From 4aab4087dc97906d0b9890035401175cdaab32d4 Mon Sep 17 00:00:00 2001 From: blackhao <13851610112@163.com> Date: Fri, 22 Aug 2025 02:51:50 -0500 Subject: 2.0 --- .github/workflows/build-dataset.yml | 70 + .github/workflows/update-and-deploy.yml | 34 - .gitignore | 7 - .venv/bin/Activate.ps1 | 247 + .venv/bin/activate | 70 + .venv/bin/activate.csh | 27 + .venv/bin/activate.fish | 69 + .venv/bin/f2py | 7 + .venv/bin/jsonschema | 7 + .venv/bin/normalizer | 7 + .venv/bin/numpy-config | 7 + .venv/bin/python | 1 + .venv/bin/python3 | 1 + .venv/bin/python3.12 | 1 + .../__pycache__/typing_extensions.cpython-312.pyc | Bin 0 -> 162195 bytes .../lib/python3.12/site-packages/attr/__init__.py | 104 + .../lib/python3.12/site-packages/attr/__init__.pyi | 389 + .../attr/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 2618 bytes .../attr/__pycache__/_cmp.cpython-312.pyc | Bin 0 -> 5082 bytes .../attr/__pycache__/_compat.cpython-312.pyc | Bin 0 -> 3508 bytes .../attr/__pycache__/_config.cpython-312.pyc | Bin 0 -> 1114 bytes .../attr/__pycache__/_funcs.cpython-312.pyc | Bin 0 -> 13637 bytes .../attr/__pycache__/_make.cpython-312.pyc | Bin 0 -> 99492 bytes .../attr/__pycache__/_next_gen.cpython-312.pyc | Bin 0 -> 24360 bytes .../attr/__pycache__/_version_info.cpython-312.pyc | Bin 0 -> 3128 bytes .../attr/__pycache__/converters.cpython-312.pyc | Bin 0 -> 4632 bytes .../attr/__pycache__/exceptions.cpython-312.pyc | Bin 0 -> 3585 bytes .../attr/__pycache__/filters.cpython-312.pyc | Bin 0 -> 2999 bytes .../attr/__pycache__/setters.cpython-312.pyc | Bin 0 -> 1919 bytes .../attr/__pycache__/validators.cpython-312.pyc | Bin 0 -> 26229 bytes .venv/lib/python3.12/site-packages/attr/_cmp.py | 160 + .venv/lib/python3.12/site-packages/attr/_cmp.pyi | 13 + .venv/lib/python3.12/site-packages/attr/_compat.py | 94 + .venv/lib/python3.12/site-packages/attr/_config.py | 31 + .venv/lib/python3.12/site-packages/attr/_funcs.py | 468 + .venv/lib/python3.12/site-packages/attr/_make.py | 3123 + .../lib/python3.12/site-packages/attr/_next_gen.py | 623 + .../site-packages/attr/_typing_compat.pyi | 15 + .../python3.12/site-packages/attr/_version_info.py | 86 + .../site-packages/attr/_version_info.pyi | 9 + .../python3.12/site-packages/attr/converters.py | 162 + .../python3.12/site-packages/attr/converters.pyi | 19 + .../python3.12/site-packages/attr/exceptions.py | 95 + .../python3.12/site-packages/attr/exceptions.pyi | 17 + .venv/lib/python3.12/site-packages/attr/filters.py | 72 + .../lib/python3.12/site-packages/attr/filters.pyi | 6 + .venv/lib/python3.12/site-packages/attr/py.typed | 0 .venv/lib/python3.12/site-packages/attr/setters.py | 79 + .../lib/python3.12/site-packages/attr/setters.pyi | 20 + .../python3.12/site-packages/attr/validators.py | 710 + .../python3.12/site-packages/attr/validators.pyi | 86 + .../site-packages/attrs-25.3.0.dist-info/INSTALLER | 1 + .../site-packages/attrs-25.3.0.dist-info/METADATA | 232 + .../site-packages/attrs-25.3.0.dist-info/RECORD | 55 + .../site-packages/attrs-25.3.0.dist-info/WHEEL | 4 + .../attrs-25.3.0.dist-info/licenses/LICENSE | 21 + .../lib/python3.12/site-packages/attrs/__init__.py | 69 + .../python3.12/site-packages/attrs/__init__.pyi | 263 + .../attrs/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 1138 bytes .../attrs/__pycache__/converters.cpython-312.pyc | Bin 0 -> 223 bytes .../attrs/__pycache__/exceptions.cpython-312.pyc | Bin 0 -> 223 bytes .../attrs/__pycache__/filters.cpython-312.pyc | Bin 0 -> 217 bytes .../attrs/__pycache__/setters.cpython-312.pyc | Bin 0 -> 217 bytes .../attrs/__pycache__/validators.cpython-312.pyc | Bin 0 -> 223 bytes .../python3.12/site-packages/attrs/converters.py | 3 + .../python3.12/site-packages/attrs/exceptions.py | 3 + .../lib/python3.12/site-packages/attrs/filters.py | 3 + .venv/lib/python3.12/site-packages/attrs/py.typed | 0 .../lib/python3.12/site-packages/attrs/setters.py | 3 + .../python3.12/site-packages/attrs/validators.py | 3 + .../certifi-2025.8.3.dist-info/INSTALLER | 1 + .../certifi-2025.8.3.dist-info/METADATA | 77 + .../certifi-2025.8.3.dist-info/RECORD | 14 + .../site-packages/certifi-2025.8.3.dist-info/WHEEL | 5 + .../certifi-2025.8.3.dist-info/licenses/LICENSE | 20 + .../certifi-2025.8.3.dist-info/top_level.txt | 1 + .../python3.12/site-packages/certifi/__init__.py | 4 + .../python3.12/site-packages/certifi/__main__.py | 12 + .../certifi/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 311 bytes .../certifi/__pycache__/__main__.cpython-312.pyc | Bin 0 -> 626 bytes .../certifi/__pycache__/core.cpython-312.pyc | Bin 0 -> 2058 bytes .../python3.12/site-packages/certifi/cacert.pem | 4738 + .venv/lib/python3.12/site-packages/certifi/core.py | 83 + .../lib/python3.12/site-packages/certifi/py.typed | 0 .../charset_normalizer-3.4.3.dist-info/INSTALLER | 1 + .../charset_normalizer-3.4.3.dist-info/METADATA | 750 + .../charset_normalizer-3.4.3.dist-info/RECORD | 35 + .../charset_normalizer-3.4.3.dist-info/WHEEL | 7 + .../entry_points.txt | 2 + .../licenses/LICENSE | 21 + .../top_level.txt | 1 + .../site-packages/charset_normalizer/__init__.py | 48 + .../site-packages/charset_normalizer/__main__.py | 6 + .../__pycache__/__init__.cpython-312.pyc | Bin 0 -> 1777 bytes .../__pycache__/__main__.cpython-312.pyc | Bin 0 -> 352 bytes .../__pycache__/api.cpython-312.pyc | Bin 0 -> 18188 bytes .../__pycache__/cd.cpython-312.pyc | Bin 0 -> 13293 bytes .../__pycache__/constant.cpython-312.pyc | Bin 0 -> 40807 bytes .../__pycache__/legacy.cpython-312.pyc | Bin 0 -> 3007 bytes .../__pycache__/md.cpython-312.pyc | Bin 0 -> 24344 bytes .../__pycache__/models.cpython-312.pyc | Bin 0 -> 17124 bytes .../__pycache__/utils.cpython-312.pyc | Bin 0 -> 13753 bytes .../__pycache__/version.cpython-312.pyc | Bin 0 -> 379 bytes .../site-packages/charset_normalizer/api.py | 669 + .../site-packages/charset_normalizer/cd.py | 395 + .../charset_normalizer/cli/__init__.py | 8 + .../charset_normalizer/cli/__main__.py | 381 + .../cli/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 340 bytes .../cli/__pycache__/__main__.cpython-312.pyc | Bin 0 -> 14401 bytes .../site-packages/charset_normalizer/constant.py | 2015 + .../site-packages/charset_normalizer/legacy.py | 80 + .../md.cpython-312-x86_64-linux-gnu.so | Bin 0 -> 15912 bytes .../site-packages/charset_normalizer/md.py | 635 + .../md__mypyc.cpython-312-x86_64-linux-gnu.so | Bin 0 -> 289536 bytes .../site-packages/charset_normalizer/models.py | 360 + .../site-packages/charset_normalizer/py.typed | 0 .../site-packages/charset_normalizer/utils.py | 414 + .../site-packages/charset_normalizer/version.py | 8 + .../site-packages/idna-3.10.dist-info/INSTALLER | 1 + .../site-packages/idna-3.10.dist-info/LICENSE.md | 31 + .../site-packages/idna-3.10.dist-info/METADATA | 250 + .../site-packages/idna-3.10.dist-info/RECORD | 22 + .../site-packages/idna-3.10.dist-info/WHEEL | 4 + .../lib/python3.12/site-packages/idna/__init__.py | 45 + .../idna/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 879 bytes .../idna/__pycache__/codec.cpython-312.pyc | Bin 0 -> 4979 bytes .../idna/__pycache__/compat.cpython-312.pyc | Bin 0 -> 883 bytes .../idna/__pycache__/core.cpython-312.pyc | Bin 0 -> 16169 bytes .../idna/__pycache__/idnadata.cpython-312.pyc | Bin 0 -> 99469 bytes .../idna/__pycache__/intranges.cpython-312.pyc | Bin 0 -> 2631 bytes .../idna/__pycache__/package_data.cpython-312.pyc | Bin 0 -> 210 bytes .../idna/__pycache__/uts46data.cpython-312.pyc | Bin 0 -> 158839 bytes .venv/lib/python3.12/site-packages/idna/codec.py | 122 + .venv/lib/python3.12/site-packages/idna/compat.py | 15 + .venv/lib/python3.12/site-packages/idna/core.py | 437 + .../lib/python3.12/site-packages/idna/idnadata.py | 4243 + .../lib/python3.12/site-packages/idna/intranges.py | 57 + .../python3.12/site-packages/idna/package_data.py | 1 + .venv/lib/python3.12/site-packages/idna/py.typed | 0 .../lib/python3.12/site-packages/idna/uts46data.py | 8681 ++ .../jsonschema-4.25.1.dist-info/INSTALLER | 1 + .../jsonschema-4.25.1.dist-info/METADATA | 170 + .../jsonschema-4.25.1.dist-info/RECORD | 81 + .../jsonschema-4.25.1.dist-info/REQUESTED | 0 .../jsonschema-4.25.1.dist-info/WHEEL | 4 + .../jsonschema-4.25.1.dist-info/entry_points.txt | 2 + .../jsonschema-4.25.1.dist-info/licenses/COPYING | 19 + .../site-packages/jsonschema/__init__.py | 120 + .../site-packages/jsonschema/__main__.py | 6 + .../__pycache__/__init__.cpython-312.pyc | Bin 0 -> 3782 bytes .../__pycache__/__main__.cpython-312.pyc | Bin 0 -> 342 bytes .../jsonschema/__pycache__/_format.cpython-312.pyc | Bin 0 -> 18367 bytes .../__pycache__/_keywords.cpython-312.pyc | Bin 0 -> 19628 bytes .../__pycache__/_legacy_keywords.cpython-312.pyc | Bin 0 -> 17150 bytes .../jsonschema/__pycache__/_types.cpython-312.pyc | Bin 0 -> 7115 bytes .../jsonschema/__pycache__/_typing.cpython-312.pyc | Bin 0 -> 1351 bytes .../jsonschema/__pycache__/_utils.cpython-312.pyc | Bin 0 -> 14257 bytes .../jsonschema/__pycache__/cli.cpython-312.pyc | Bin 0 -> 12227 bytes .../__pycache__/exceptions.cpython-312.pyc | Bin 0 -> 21690 bytes .../__pycache__/protocols.cpython-312.pyc | Bin 0 -> 6849 bytes .../__pycache__/validators.cpython-312.pyc | Bin 0 -> 52946 bytes .../python3.12/site-packages/jsonschema/_format.py | 546 + .../site-packages/jsonschema/_keywords.py | 449 + .../site-packages/jsonschema/_legacy_keywords.py | 449 + .../python3.12/site-packages/jsonschema/_types.py | 204 + .../python3.12/site-packages/jsonschema/_typing.py | 29 + .../python3.12/site-packages/jsonschema/_utils.py | 355 + .../jsonschema/benchmarks/__init__.py | 5 + .../__pycache__/__init__.cpython-312.pyc | Bin 0 -> 278 bytes .../__pycache__/const_vs_enum.cpython-312.pyc | Bin 0 -> 1899 bytes .../__pycache__/contains.cpython-312.pyc | Bin 0 -> 1925 bytes .../__pycache__/issue232.cpython-312.pyc | Bin 0 -> 903 bytes .../json_schema_test_suite.cpython-312.pyc | Bin 0 -> 648 bytes .../__pycache__/nested_schemas.cpython-312.pyc | Bin 0 -> 2478 bytes .../__pycache__/subcomponents.cpython-312.pyc | Bin 0 -> 2275 bytes .../__pycache__/unused_registry.cpython-312.pyc | Bin 0 -> 1589 bytes .../useless_applicator_schemas.cpython-312.pyc | Bin 0 -> 3568 bytes .../__pycache__/useless_keywords.cpython-312.pyc | Bin 0 -> 2079 bytes .../__pycache__/validator_creation.cpython-312.pyc | Bin 0 -> 568 bytes .../jsonschema/benchmarks/const_vs_enum.py | 30 + .../jsonschema/benchmarks/contains.py | 28 + .../jsonschema/benchmarks/issue232.py | 25 + .../jsonschema/benchmarks/issue232/issue.json | 2653 + .../benchmarks/json_schema_test_suite.py | 12 + .../jsonschema/benchmarks/nested_schemas.py | 56 + .../jsonschema/benchmarks/subcomponents.py | 42 + .../jsonschema/benchmarks/unused_registry.py | 35 + .../benchmarks/useless_applicator_schemas.py | 106 + .../jsonschema/benchmarks/useless_keywords.py | 32 + .../jsonschema/benchmarks/validator_creation.py | 14 + .../lib/python3.12/site-packages/jsonschema/cli.py | 292 + .../site-packages/jsonschema/exceptions.py | 490 + .../site-packages/jsonschema/protocols.py | 230 + .../site-packages/jsonschema/tests/__init__.py | 0 .../tests/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 193 bytes .../tests/__pycache__/_suite.cpython-312.pyc | Bin 0 -> 13098 bytes .../__pycache__/fuzz_validate.cpython-312.pyc | Bin 0 -> 2342 bytes .../tests/__pycache__/test_cli.cpython-312.pyc | Bin 0 -> 33392 bytes .../__pycache__/test_deprecations.cpython-312.pyc | Bin 0 -> 25292 bytes .../__pycache__/test_exceptions.cpython-312.pyc | Bin 0 -> 33409 bytes .../tests/__pycache__/test_format.cpython-312.pyc | Bin 0 -> 6469 bytes .../test_jsonschema_test_suite.cpython-312.pyc | Bin 0 -> 10529 bytes .../tests/__pycache__/test_types.cpython-312.pyc | Bin 0 -> 11897 bytes .../tests/__pycache__/test_utils.cpython-312.pyc | Bin 0 -> 7908 bytes .../__pycache__/test_validators.cpython-312.pyc | Bin 0 -> 132036 bytes .../site-packages/jsonschema/tests/_suite.py | 285 + .../jsonschema/tests/fuzz_validate.py | 50 + .../site-packages/jsonschema/tests/test_cli.py | 904 + .../jsonschema/tests/test_deprecations.py | 432 + .../jsonschema/tests/test_exceptions.py | 759 + .../site-packages/jsonschema/tests/test_format.py | 91 + .../jsonschema/tests/test_jsonschema_test_suite.py | 262 + .../site-packages/jsonschema/tests/test_types.py | 221 + .../site-packages/jsonschema/tests/test_utils.py | 138 + .../jsonschema/tests/test_validators.py | 2575 + .../jsonschema/tests/typing/__init__.py | 0 .../typing/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 200 bytes ...crete_validators_match_protocol.cpython-312.pyc | Bin 0 -> 1000 bytes .../test_all_concrete_validators_match_protocol.py | 38 + .../site-packages/jsonschema/validators.py | 1410 + .../INSTALLER | 1 + .../METADATA | 54 + .../RECORD | 33 + .../WHEEL | 4 + .../licenses/COPYING | 19 + .../jsonschema_specifications/__init__.py | 12 + .../__pycache__/__init__.cpython-312.pyc | Bin 0 -> 557 bytes .../__pycache__/_core.cpython-312.pyc | Bin 0 -> 1477 bytes .../jsonschema_specifications/_core.py | 38 + .../schemas/draft201909/metaschema.json | 42 + .../schemas/draft201909/vocabularies/applicator | 56 + .../schemas/draft201909/vocabularies/content | 17 + .../schemas/draft201909/vocabularies/core | 57 + .../schemas/draft201909/vocabularies/meta-data | 37 + .../schemas/draft201909/vocabularies/validation | 98 + .../schemas/draft202012/metaschema.json | 58 + .../schemas/draft202012/vocabularies/applicator | 48 + .../schemas/draft202012/vocabularies/content | 17 + .../schemas/draft202012/vocabularies/core | 51 + .../schemas/draft202012/vocabularies/format | 14 + .../draft202012/vocabularies/format-annotation | 14 + .../draft202012/vocabularies/format-assertion | 14 + .../schemas/draft202012/vocabularies/meta-data | 37 + .../schemas/draft202012/vocabularies/unevaluated | 15 + .../schemas/draft202012/vocabularies/validation | 98 + .../schemas/draft3/metaschema.json | 172 + .../schemas/draft4/metaschema.json | 149 + .../schemas/draft6/metaschema.json | 153 + .../schemas/draft7/metaschema.json | 166 + .../jsonschema_specifications/tests/__init__.py | 0 .../tests/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 208 bytes .../test_jsonschema_specifications.cpython-312.pyc | Bin 0 -> 1952 bytes .../tests/test_jsonschema_specifications.py | 41 + .../site-packages/networkx-3.5.dist-info/INSTALLER | 1 + .../site-packages/networkx-3.5.dist-info/METADATA | 167 + .../site-packages/networkx-3.5.dist-info/RECORD | 1171 + .../site-packages/networkx-3.5.dist-info/REQUESTED | 0 .../site-packages/networkx-3.5.dist-info/WHEEL | 5 + .../networkx-3.5.dist-info/entry_points.txt | 2 + .../networkx-3.5.dist-info/licenses/LICENSE.txt | 37 + .../networkx-3.5.dist-info/top_level.txt | 1 + .../python3.12/site-packages/networkx/__init__.py | 53 + .../networkx/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 1474 bytes .../networkx/__pycache__/conftest.cpython-312.pyc | Bin 0 -> 8423 bytes .../networkx/__pycache__/convert.cpython-312.pyc | Bin 0 -> 18890 bytes .../__pycache__/convert_matrix.cpython-312.pyc | Bin 0 -> 50841 bytes .../networkx/__pycache__/exception.cpython-312.pyc | Bin 0 -> 5399 bytes .../__pycache__/lazy_imports.cpython-312.pyc | Bin 0 -> 7338 bytes .../networkx/__pycache__/relabel.cpython-312.pyc | Bin 0 -> 13304 bytes .../site-packages/networkx/algorithms/__init__.py | 133 + .../__pycache__/__init__.cpython-312.pyc | Bin 0 -> 5784 bytes .../__pycache__/asteroidal.cpython-312.pyc | Bin 0 -> 6192 bytes .../__pycache__/boundary.cpython-312.pyc | Bin 0 -> 5963 bytes .../algorithms/__pycache__/bridges.cpython-312.pyc | Bin 0 -> 7483 bytes .../__pycache__/broadcasting.cpython-312.pyc | Bin 0 -> 7952 bytes .../algorithms/__pycache__/chains.cpython-312.pyc | Bin 0 -> 6146 bytes .../algorithms/__pycache__/chordal.cpython-312.pyc | Bin 0 -> 16071 bytes .../algorithms/__pycache__/clique.cpython-312.pyc | Bin 0 -> 31785 bytes .../algorithms/__pycache__/cluster.cpython-312.pyc | Bin 0 -> 27390 bytes .../communicability_alg.cpython-312.pyc | Bin 0 -> 5569 bytes .../algorithms/__pycache__/core.cpython-312.pyc | Bin 0 -> 20200 bytes .../__pycache__/covering.cpython-312.pyc | Bin 0 -> 5617 bytes .../algorithms/__pycache__/cuts.cpython-312.pyc | Bin 0 -> 11959 bytes .../algorithms/__pycache__/cycles.cpython-312.pyc | Bin 0 -> 49324 bytes .../__pycache__/d_separation.cpython-312.pyc | Bin 0 -> 27738 bytes .../algorithms/__pycache__/dag.cpython-312.pyc | Bin 0 -> 53794 bytes .../__pycache__/distance_measures.cpython-312.pyc | Bin 0 -> 41152 bytes .../__pycache__/distance_regular.cpython-312.pyc | Bin 0 -> 8161 bytes .../__pycache__/dominance.cpython-312.pyc | Bin 0 -> 4796 bytes .../__pycache__/dominating.cpython-312.pyc | Bin 0 -> 9202 bytes .../efficiency_measures.cpython-312.pyc | Bin 0 -> 5591 bytes .../algorithms/__pycache__/euler.cpython-312.pyc | Bin 0 -> 16922 bytes .../__pycache__/graph_hashing.cpython-312.pyc | Bin 0 -> 18524 bytes .../__pycache__/graphical.cpython-312.pyc | Bin 0 -> 16323 bytes .../__pycache__/hierarchy.cpython-312.pyc | Bin 0 -> 2636 bytes .../algorithms/__pycache__/hybrid.cpython-312.pyc | Bin 0 -> 5894 bytes .../algorithms/__pycache__/isolate.cpython-312.pyc | Bin 0 -> 3313 bytes .../__pycache__/link_prediction.cpython-312.pyc | Bin 0 -> 26570 bytes .../lowest_common_ancestors.cpython-312.pyc | Bin 0 -> 9563 bytes .../__pycache__/matching.cpython-312.pyc | Bin 0 -> 32805 bytes .../algorithms/__pycache__/mis.cpython-312.pyc | Bin 0 -> 3249 bytes .../algorithms/__pycache__/moral.cpython-312.pyc | Bin 0 -> 2083 bytes .../node_classification.cpython-312.pyc | Bin 0 -> 8268 bytes .../__pycache__/non_randomness.cpython-312.pyc | Bin 0 -> 4213 bytes .../__pycache__/planar_drawing.cpython-312.pyc | Bin 0 -> 14721 bytes .../__pycache__/planarity.cpython-312.pyc | Bin 0 -> 61022 bytes .../__pycache__/polynomials.cpython-312.pyc | Bin 0 -> 12635 bytes .../__pycache__/reciprocity.cpython-312.pyc | Bin 0 -> 3276 bytes .../algorithms/__pycache__/regular.cpython-312.pyc | Bin 0 -> 11379 bytes .../__pycache__/richclub.cpython-312.pyc | Bin 0 -> 5880 bytes .../__pycache__/similarity.cpython-312.pyc | Bin 0 -> 67243 bytes .../__pycache__/simple_paths.cpython-312.pyc | Bin 0 -> 32164 bytes .../__pycache__/smallworld.cpython-312.pyc | Bin 0 -> 14654 bytes .../algorithms/__pycache__/smetric.cpython-312.pyc | Bin 0 -> 1434 bytes .../__pycache__/sparsifiers.cpython-312.pyc | Bin 0 -> 9486 bytes .../__pycache__/structuralholes.cpython-312.pyc | Bin 0 -> 14716 bytes .../__pycache__/summarization.cpython-312.pyc | Bin 0 -> 24989 bytes .../algorithms/__pycache__/swap.cpython-312.pyc | Bin 0 -> 14523 bytes .../__pycache__/threshold.cpython-312.pyc | Bin 0 -> 35148 bytes .../__pycache__/time_dependent.cpython-312.pyc | Bin 0 -> 7359 bytes .../__pycache__/tournament.cpython-312.pyc | Bin 0 -> 15111 bytes .../algorithms/__pycache__/triads.cpython-312.pyc | Bin 0 -> 16027 bytes .../__pycache__/vitality.cpython-312.pyc | Bin 0 -> 2762 bytes .../algorithms/__pycache__/voronoi.cpython-312.pyc | Bin 0 -> 3298 bytes .../algorithms/__pycache__/walks.cpython-312.pyc | Bin 0 -> 3018 bytes .../algorithms/__pycache__/wiener.cpython-312.pyc | Bin 0 -> 9631 bytes .../networkx/algorithms/approximation/__init__.py | 26 + .../__pycache__/__init__.cpython-312.pyc | Bin 0 -> 1469 bytes .../__pycache__/clique.cpython-312.pyc | Bin 0 -> 8988 bytes .../clustering_coefficient.cpython-312.pyc | Bin 0 -> 2848 bytes .../__pycache__/connectivity.cpython-312.pyc | Bin 0 -> 13719 bytes .../__pycache__/density.cpython-312.pyc | Bin 0 -> 15521 bytes .../__pycache__/distance_measures.cpython-312.pyc | Bin 0 -> 6027 bytes .../__pycache__/dominating_set.cpython-312.pyc | Bin 0 -> 4991 bytes .../__pycache__/kcomponents.cpython-312.pyc | Bin 0 -> 18598 bytes .../__pycache__/matching.cpython-312.pyc | Bin 0 -> 1566 bytes .../__pycache__/maxcut.cpython-312.pyc | Bin 0 -> 5322 bytes .../__pycache__/ramsey.cpython-312.pyc | Bin 0 -> 2259 bytes .../__pycache__/steinertree.cpython-312.pyc | Bin 0 -> 11860 bytes .../__pycache__/traveling_salesman.cpython-312.pyc | Bin 0 -> 59806 bytes .../__pycache__/treewidth.cpython-312.pyc | Bin 0 -> 8895 bytes .../__pycache__/vertex_cover.cpython-312.pyc | Bin 0 -> 3264 bytes .../networkx/algorithms/approximation/clique.py | 259 + .../approximation/clustering_coefficient.py | 71 + .../algorithms/approximation/connectivity.py | 412 + .../networkx/algorithms/approximation/density.py | 396 + .../algorithms/approximation/distance_measures.py | 150 + .../algorithms/approximation/dominating_set.py | 149 + .../algorithms/approximation/kcomponents.py | 367 + .../networkx/algorithms/approximation/matching.py | 44 + .../networkx/algorithms/approximation/maxcut.py | 143 + .../networkx/algorithms/approximation/ramsey.py | 53 + .../algorithms/approximation/steinertree.py | 248 + .../algorithms/approximation/tests/__init__.py | 0 .../tests/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 216 bytes .../test_approx_clust_coeff.cpython-312.pyc | Bin 0 -> 2427 bytes .../tests/__pycache__/test_clique.cpython-312.pyc | Bin 0 -> 6360 bytes .../__pycache__/test_connectivity.cpython-312.pyc | Bin 0 -> 11042 bytes .../tests/__pycache__/test_density.cpython-312.pyc | Bin 0 -> 7389 bytes .../test_distance_measures.cpython-312.pyc | Bin 0 -> 4280 bytes .../test_dominating_set.cpython-312.pyc | Bin 0 -> 3870 bytes .../__pycache__/test_kcomponents.cpython-312.pyc | Bin 0 -> 17838 bytes .../__pycache__/test_matching.cpython-312.pyc | Bin 0 -> 652 bytes .../tests/__pycache__/test_maxcut.cpython-312.pyc | Bin 0 -> 5584 bytes .../tests/__pycache__/test_ramsey.cpython-312.pyc | Bin 0 -> 2065 bytes .../__pycache__/test_steinertree.cpython-312.pyc | Bin 0 -> 13867 bytes .../test_traveling_salesman.cpython-312.pyc | Bin 0 -> 46085 bytes .../__pycache__/test_treewidth.cpython-312.pyc | Bin 0 -> 13249 bytes .../__pycache__/test_vertex_cover.cpython-312.pyc | Bin 0 -> 4459 bytes .../approximation/tests/test_approx_clust_coeff.py | 41 + .../algorithms/approximation/tests/test_clique.py | 112 + .../approximation/tests/test_connectivity.py | 199 + .../algorithms/approximation/tests/test_density.py | 138 + .../approximation/tests/test_distance_measures.py | 59 + .../approximation/tests/test_dominating_set.py | 78 + .../approximation/tests/test_kcomponents.py | 303 + .../approximation/tests/test_matching.py | 8 + .../algorithms/approximation/tests/test_maxcut.py | 94 + .../algorithms/approximation/tests/test_ramsey.py | 31 + .../approximation/tests/test_steinertree.py | 265 + .../approximation/tests/test_traveling_salesman.py | 1013 + .../approximation/tests/test_treewidth.py | 274 + .../approximation/tests/test_vertex_cover.py | 68 + .../algorithms/approximation/traveling_salesman.py | 1508 + .../networkx/algorithms/approximation/treewidth.py | 255 + .../algorithms/approximation/vertex_cover.py | 83 + .../networkx/algorithms/assortativity/__init__.py | 5 + .../__pycache__/__init__.cpython-312.pyc | Bin 0 -> 511 bytes .../__pycache__/connectivity.cpython-312.pyc | Bin 0 -> 5216 bytes .../__pycache__/correlation.cpython-312.pyc | Bin 0 -> 10840 bytes .../__pycache__/mixing.cpython-312.pyc | Bin 0 -> 8436 bytes .../__pycache__/neighbor_degree.cpython-312.pyc | Bin 0 -> 6150 bytes .../__pycache__/pairs.cpython-312.pyc | Bin 0 -> 4836 bytes .../algorithms/assortativity/connectivity.py | 122 + .../algorithms/assortativity/correlation.py | 302 + .../networkx/algorithms/assortativity/mixing.py | 255 + .../algorithms/assortativity/neighbor_degree.py | 160 + .../networkx/algorithms/assortativity/pairs.py | 127 + .../algorithms/assortativity/tests/__init__.py | 0 .../tests/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 216 bytes .../tests/__pycache__/base_test.cpython-312.pyc | Bin 0 -> 5394 bytes .../__pycache__/test_connectivity.cpython-312.pyc | Bin 0 -> 7984 bytes .../__pycache__/test_correlation.cpython-312.pyc | Bin 0 -> 10722 bytes .../tests/__pycache__/test_mixing.cpython-312.pyc | Bin 0 -> 12463 bytes .../test_neighbor_degree.cpython-312.pyc | Bin 0 -> 6668 bytes .../tests/__pycache__/test_pairs.cpython-312.pyc | Bin 0 -> 5358 bytes .../algorithms/assortativity/tests/base_test.py | 81 + .../assortativity/tests/test_connectivity.py | 143 + .../assortativity/tests/test_correlation.py | 122 + .../algorithms/assortativity/tests/test_mixing.py | 174 + .../assortativity/tests/test_neighbor_degree.py | 107 + .../algorithms/assortativity/tests/test_pairs.py | 87 + .../networkx/algorithms/asteroidal.py | 164 + .../networkx/algorithms/bipartite/__init__.py | 88 + .../bipartite/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 4114 bytes .../bipartite/__pycache__/basic.cpython-312.pyc | Bin 0 -> 10489 bytes .../__pycache__/centrality.cpython-312.pyc | Bin 0 -> 10724 bytes .../bipartite/__pycache__/cluster.cpython-312.pyc | Bin 0 -> 9432 bytes .../bipartite/__pycache__/covering.cpython-312.pyc | Bin 0 -> 2515 bytes .../bipartite/__pycache__/edgelist.cpython-312.pyc | Bin 0 -> 12978 bytes .../__pycache__/extendability.cpython-312.pyc | Bin 0 -> 5027 bytes .../__pycache__/generators.cpython-312.pyc | Bin 0 -> 25022 bytes .../__pycache__/link_analysis.cpython-312.pyc | Bin 0 -> 13839 bytes .../bipartite/__pycache__/matching.cpython-312.pyc | Bin 0 -> 18868 bytes .../bipartite/__pycache__/matrix.cpython-312.pyc | Bin 0 -> 7967 bytes .../__pycache__/projection.cpython-312.pyc | Bin 0 -> 21883 bytes .../__pycache__/redundancy.cpython-312.pyc | Bin 0 -> 4436 bytes .../bipartite/__pycache__/spectral.cpython-312.pyc | Bin 0 -> 2601 bytes .../networkx/algorithms/bipartite/basic.py | 322 + .../networkx/algorithms/bipartite/centrality.py | 290 + .../networkx/algorithms/bipartite/cluster.py | 289 + .../networkx/algorithms/bipartite/covering.py | 57 + .../networkx/algorithms/bipartite/edgelist.py | 360 + .../networkx/algorithms/bipartite/extendability.py | 105 + .../networkx/algorithms/bipartite/generators.py | 603 + .../networkx/algorithms/bipartite/link_analysis.py | 316 + .../networkx/algorithms/bipartite/matching.py | 590 + .../networkx/algorithms/bipartite/matrix.py | 168 + .../networkx/algorithms/bipartite/projection.py | 526 + .../networkx/algorithms/bipartite/redundancy.py | 112 + .../networkx/algorithms/bipartite/spectral.py | 69 + .../algorithms/bipartite/tests/__init__.py | 0 .../tests/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 212 bytes .../tests/__pycache__/test_basic.cpython-312.pyc | Bin 0 -> 9929 bytes .../__pycache__/test_centrality.cpython-312.pyc | Bin 0 -> 8280 bytes .../tests/__pycache__/test_cluster.cpython-312.pyc | Bin 0 -> 5661 bytes .../__pycache__/test_covering.cpython-312.pyc | Bin 0 -> 2709 bytes .../__pycache__/test_edgelist.cpython-312.pyc | Bin 0 -> 15058 bytes .../__pycache__/test_extendability.cpython-312.pyc | Bin 0 -> 7486 bytes .../__pycache__/test_generators.cpython-312.pyc | Bin 0 -> 21773 bytes .../__pycache__/test_link_analysis.cpython-312.pyc | Bin 0 -> 9206 bytes .../__pycache__/test_matching.cpython-312.pyc | Bin 0 -> 20154 bytes .../tests/__pycache__/test_matrix.cpython-312.pyc | Bin 0 -> 7479 bytes .../tests/__pycache__/test_project.cpython-312.pyc | Bin 0 -> 25983 bytes .../__pycache__/test_redundancy.cpython-312.pyc | Bin 0 -> 1942 bytes .../test_spectral_bipartivity.cpython-312.pyc | Bin 0 -> 4657 bytes .../algorithms/bipartite/tests/test_basic.py | 125 + .../algorithms/bipartite/tests/test_centrality.py | 192 + .../algorithms/bipartite/tests/test_cluster.py | 84 + .../algorithms/bipartite/tests/test_covering.py | 33 + .../algorithms/bipartite/tests/test_edgelist.py | 240 + .../bipartite/tests/test_extendability.py | 334 + .../algorithms/bipartite/tests/test_generators.py | 407 + .../bipartite/tests/test_link_analysis.py | 218 + .../algorithms/bipartite/tests/test_matching.py | 327 + .../algorithms/bipartite/tests/test_matrix.py | 82 + .../algorithms/bipartite/tests/test_project.py | 407 + .../algorithms/bipartite/tests/test_redundancy.py | 35 + .../bipartite/tests/test_spectral_bipartivity.py | 80 + .../site-packages/networkx/algorithms/boundary.py | 168 + .../site-packages/networkx/algorithms/bridges.py | 205 + .../networkx/algorithms/broadcasting.py | 155 + .../networkx/algorithms/centrality/__init__.py | 20 + .../__pycache__/__init__.cpython-312.pyc | Bin 0 -> 752 bytes .../__pycache__/betweenness.cpython-312.pyc | Bin 0 -> 16288 bytes .../__pycache__/betweenness_subset.cpython-312.pyc | Bin 0 -> 9920 bytes .../__pycache__/closeness.cpython-312.pyc | Bin 0 -> 10639 bytes .../current_flow_betweenness.cpython-312.pyc | Bin 0 -> 14347 bytes ...current_flow_betweenness_subset.cpython-312.pyc | Bin 0 -> 9633 bytes .../current_flow_closeness.cpython-312.pyc | Bin 0 -> 4278 bytes .../__pycache__/degree_alg.cpython-312.pyc | Bin 0 -> 4990 bytes .../__pycache__/dispersion.cpython-312.pyc | Bin 0 -> 3844 bytes .../__pycache__/eigenvector.cpython-312.pyc | Bin 0 -> 15177 bytes .../__pycache__/flow_matrix.cpython-312.pyc | Bin 0 -> 8577 bytes .../centrality/__pycache__/group.cpython-312.pyc | Bin 0 -> 29373 bytes .../__pycache__/harmonic.cpython-312.pyc | Bin 0 -> 3474 bytes .../centrality/__pycache__/katz.cpython-312.pyc | Bin 0 -> 12833 bytes .../__pycache__/laplacian.cpython-312.pyc | Bin 0 -> 6374 bytes .../centrality/__pycache__/load.cpython-312.pyc | Bin 0 -> 7293 bytes .../__pycache__/percolation.cpython-312.pyc | Bin 0 -> 4899 bytes .../__pycache__/reaching.cpython-312.pyc | Bin 0 -> 8465 bytes .../__pycache__/second_order.cpython-312.pyc | Bin 0 -> 6885 bytes .../__pycache__/subgraph_alg.cpython-312.pyc | Bin 0 -> 11022 bytes .../centrality/__pycache__/trophic.cpython-312.pyc | Bin 0 -> 6636 bytes .../__pycache__/voterank_alg.cpython-312.pyc | Bin 0 -> 4063 bytes .../networkx/algorithms/centrality/betweenness.py | 469 + .../algorithms/centrality/betweenness_subset.py | 275 + .../networkx/algorithms/centrality/closeness.py | 282 + .../centrality/current_flow_betweenness.py | 342 + .../centrality/current_flow_betweenness_subset.py | 227 + .../centrality/current_flow_closeness.py | 96 + .../networkx/algorithms/centrality/degree_alg.py | 150 + .../networkx/algorithms/centrality/dispersion.py | 107 + .../networkx/algorithms/centrality/eigenvector.py | 357 + .../networkx/algorithms/centrality/flow_matrix.py | 130 + .../networkx/algorithms/centrality/group.py | 787 + .../networkx/algorithms/centrality/harmonic.py | 89 + .../networkx/algorithms/centrality/katz.py | 331 + .../networkx/algorithms/centrality/laplacian.py | 150 + .../networkx/algorithms/centrality/load.py | 200 + .../networkx/algorithms/centrality/percolation.py | 128 + .../networkx/algorithms/centrality/reaching.py | 209 + .../networkx/algorithms/centrality/second_order.py | 141 + .../networkx/algorithms/centrality/subgraph_alg.py | 342 + .../algorithms/centrality/tests/__init__.py | 0 .../tests/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 213 bytes .../test_betweenness_centrality.cpython-312.pyc | Bin 0 -> 39839 bytes ...t_betweenness_centrality_subset.cpython-312.pyc | Bin 0 -> 19533 bytes .../test_closeness_centrality.cpython-312.pyc | Bin 0 -> 14105 bytes ...ent_flow_betweenness_centrality.cpython-312.pyc | Bin 0 -> 13638 bytes ...w_betweenness_centrality_subset.cpython-312.pyc | Bin 0 -> 9317 bytes .../test_current_flow_closeness.cpython-312.pyc | Bin 0 -> 3150 bytes .../test_degree_centrality.cpython-312.pyc | Bin 0 -> 7345 bytes .../__pycache__/test_dispersion.cpython-312.pyc | Bin 0 -> 3161 bytes .../test_eigenvector_centrality.cpython-312.pyc | Bin 0 -> 10478 bytes .../tests/__pycache__/test_group.cpython-312.pyc | Bin 0 -> 14888 bytes .../test_harmonic_centrality.cpython-312.pyc | Bin 0 -> 8289 bytes .../test_katz_centrality.cpython-312.pyc | Bin 0 -> 17842 bytes .../test_laplacian_centrality.cpython-312.pyc | Bin 0 -> 9836 bytes .../test_load_centrality.cpython-312.pyc | Bin 0 -> 15498 bytes .../test_percolation_centrality.cpython-312.pyc | Bin 0 -> 4303 bytes .../__pycache__/test_reaching.cpython-312.pyc | Bin 0 -> 10903 bytes .../test_second_order_centrality.cpython-312.pyc | Bin 0 -> 4645 bytes .../__pycache__/test_subgraph.cpython-312.pyc | Bin 0 -> 5089 bytes .../tests/__pycache__/test_trophic.cpython-312.pyc | Bin 0 -> 14583 bytes .../__pycache__/test_voterank.cpython-312.pyc | Bin 0 -> 3057 bytes .../tests/test_betweenness_centrality.py | 903 + .../tests/test_betweenness_centrality_subset.py | 340 + .../centrality/tests/test_closeness_centrality.py | 274 + .../test_current_flow_betweenness_centrality.py | 197 + ...t_current_flow_betweenness_centrality_subset.py | 147 + .../tests/test_current_flow_closeness.py | 43 + .../centrality/tests/test_degree_centrality.py | 144 + .../algorithms/centrality/tests/test_dispersion.py | 73 + .../tests/test_eigenvector_centrality.py | 186 + .../algorithms/centrality/tests/test_group.py | 277 + .../centrality/tests/test_harmonic_centrality.py | 122 + .../centrality/tests/test_katz_centrality.py | 345 + .../centrality/tests/test_laplacian_centrality.py | 220 + .../centrality/tests/test_load_centrality.py | 344 + .../tests/test_percolation_centrality.py | 87 + .../algorithms/centrality/tests/test_reaching.py | 140 + .../tests/test_second_order_centrality.py | 82 + .../algorithms/centrality/tests/test_subgraph.py | 110 + .../algorithms/centrality/tests/test_trophic.py | 302 + .../algorithms/centrality/tests/test_voterank.py | 64 + .../networkx/algorithms/centrality/trophic.py | 181 + .../networkx/algorithms/centrality/voterank_alg.py | 95 + .../site-packages/networkx/algorithms/chains.py | 172 + .../site-packages/networkx/algorithms/chordal.py | 443 + .../site-packages/networkx/algorithms/clique.py | 757 + .../site-packages/networkx/algorithms/cluster.py | 658 + .../networkx/algorithms/coloring/__init__.py | 4 + .../coloring/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 406 bytes .../__pycache__/equitable_coloring.cpython-312.pyc | Bin 0 -> 15409 bytes .../__pycache__/greedy_coloring.cpython-312.pyc | Bin 0 -> 22306 bytes .../algorithms/coloring/equitable_coloring.py | 505 + .../algorithms/coloring/greedy_coloring.py | 565 + .../networkx/algorithms/coloring/tests/__init__.py | 0 .../tests/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 211 bytes .../__pycache__/test_coloring.cpython-312.pyc | Bin 0 -> 28198 bytes .../algorithms/coloring/tests/test_coloring.py | 863 + .../networkx/algorithms/communicability_alg.py | 163 + .../networkx/algorithms/community/__init__.py | 28 + .../community/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 1510 bytes .../__pycache__/asyn_fluid.cpython-312.pyc | Bin 0 -> 5639 bytes .../__pycache__/centrality.cpython-312.pyc | Bin 0 -> 7109 bytes .../__pycache__/community_utils.cpython-312.pyc | Bin 0 -> 1511 bytes .../community/__pycache__/divisive.cpython-312.pyc | Bin 0 -> 7650 bytes .../community/__pycache__/kclique.cpython-312.pyc | Bin 0 -> 3149 bytes .../__pycache__/kernighan_lin.cpython-312.pyc | Bin 0 -> 6770 bytes .../__pycache__/label_propagation.cpython-312.pyc | Bin 0 -> 13404 bytes .../community/__pycache__/leiden.cpython-312.pyc | Bin 0 -> 7515 bytes .../community/__pycache__/local.cpython-312.pyc | Bin 0 -> 8368 bytes .../community/__pycache__/louvain.cpython-312.pyc | Bin 0 -> 17264 bytes .../community/__pycache__/lukes.cpython-312.pyc | Bin 0 -> 10590 bytes .../__pycache__/modularity_max.cpython-312.pyc | Bin 0 -> 17217 bytes .../community/__pycache__/quality.cpython-312.pyc | Bin 0 -> 14013 bytes .../networkx/algorithms/community/asyn_fluid.py | 151 + .../networkx/algorithms/community/centrality.py | 171 + .../algorithms/community/community_utils.py | 30 + .../networkx/algorithms/community/divisive.py | 216 + .../networkx/algorithms/community/kclique.py | 79 + .../networkx/algorithms/community/kernighan_lin.py | 139 + .../algorithms/community/label_propagation.py | 338 + .../networkx/algorithms/community/leiden.py | 162 + .../networkx/algorithms/community/local.py | 220 + .../networkx/algorithms/community/louvain.py | 384 + .../networkx/algorithms/community/lukes.py | 227 + .../algorithms/community/modularity_max.py | 451 + .../networkx/algorithms/community/quality.py | 347 + .../algorithms/community/tests/__init__.py | 0 .../tests/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 212 bytes .../__pycache__/test_asyn_fluid.cpython-312.pyc | Bin 0 -> 5899 bytes .../__pycache__/test_centrality.cpython-312.pyc | Bin 0 -> 5317 bytes .../__pycache__/test_divisive.cpython-312.pyc | Bin 0 -> 7781 bytes .../tests/__pycache__/test_kclique.cpython-312.pyc | Bin 0 -> 4733 bytes .../__pycache__/test_kernighan_lin.cpython-312.pyc | Bin 0 -> 4755 bytes .../test_label_propagation.cpython-312.pyc | Bin 0 -> 15638 bytes .../tests/__pycache__/test_leiden.cpython-312.pyc | Bin 0 -> 8583 bytes .../tests/__pycache__/test_local.cpython-312.pyc | Bin 0 -> 3551 bytes .../tests/__pycache__/test_louvain.cpython-312.pyc | Bin 0 -> 12610 bytes .../tests/__pycache__/test_lukes.cpython-312.pyc | Bin 0 -> 6297 bytes .../test_modularity_max.cpython-312.pyc | Bin 0 -> 12582 bytes .../tests/__pycache__/test_quality.cpython-312.pyc | Bin 0 -> 8412 bytes .../tests/__pycache__/test_utils.cpython-312.pyc | Bin 0 -> 1875 bytes .../algorithms/community/tests/test_asyn_fluid.py | 136 + .../algorithms/community/tests/test_centrality.py | 85 + .../algorithms/community/tests/test_divisive.py | 106 + .../algorithms/community/tests/test_kclique.py | 91 + .../community/tests/test_kernighan_lin.py | 92 + .../community/tests/test_label_propagation.py | 241 + .../algorithms/community/tests/test_leiden.py | 138 + .../algorithms/community/tests/test_local.py | 76 + .../algorithms/community/tests/test_louvain.py | 264 + .../algorithms/community/tests/test_lukes.py | 152 + .../community/tests/test_modularity_max.py | 340 + .../algorithms/community/tests/test_quality.py | 139 + .../algorithms/community/tests/test_utils.py | 26 + .../networkx/algorithms/components/__init__.py | 6 + .../__pycache__/__init__.cpython-312.pyc | Bin 0 -> 381 bytes .../__pycache__/attracting.cpython-312.pyc | Bin 0 -> 3746 bytes .../__pycache__/biconnected.cpython-312.pyc | Bin 0 -> 13082 bytes .../__pycache__/connected.cpython-312.pyc | Bin 0 -> 5900 bytes .../__pycache__/semiconnected.cpython-312.pyc | Bin 0 -> 2860 bytes .../__pycache__/strongly_connected.cpython-312.pyc | Bin 0 -> 11415 bytes .../__pycache__/weakly_connected.cpython-312.pyc | Bin 0 -> 5561 bytes .../networkx/algorithms/components/attracting.py | 115 + .../networkx/algorithms/components/biconnected.py | 394 + .../networkx/algorithms/components/connected.py | 216 + .../algorithms/components/semiconnected.py | 71 + .../algorithms/components/strongly_connected.py | 351 + .../algorithms/components/tests/__init__.py | 0 .../tests/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 213 bytes .../__pycache__/test_attracting.cpython-312.pyc | Bin 0 -> 4692 bytes .../__pycache__/test_biconnected.cpython-312.pyc | Bin 0 -> 10795 bytes .../__pycache__/test_connected.cpython-312.pyc | Bin 0 -> 8483 bytes .../__pycache__/test_semiconnected.cpython-312.pyc | Bin 0 -> 5170 bytes .../test_strongly_connected.cpython-312.pyc | Bin 0 -> 11650 bytes .../test_weakly_connected.cpython-312.pyc | Bin 0 -> 6520 bytes .../algorithms/components/tests/test_attracting.py | 70 + .../components/tests/test_biconnected.py | 248 + .../algorithms/components/tests/test_connected.py | 138 + .../components/tests/test_semiconnected.py | 55 + .../components/tests/test_strongly_connected.py | 193 + .../components/tests/test_weakly_connected.py | 96 + .../algorithms/components/weakly_connected.py | 197 + .../networkx/algorithms/connectivity/__init__.py | 11 + .../__pycache__/__init__.cpython-312.pyc | Bin 0 -> 498 bytes .../__pycache__/connectivity.cpython-312.pyc | Bin 0 -> 30140 bytes .../connectivity/__pycache__/cuts.cpython-312.pyc | Bin 0 -> 23741 bytes .../__pycache__/disjoint_paths.cpython-312.pyc | Bin 0 -> 14926 bytes .../__pycache__/edge_augmentation.cpython-312.pyc | Bin 0 -> 47654 bytes .../__pycache__/edge_kcomponents.cpython-312.pyc | Bin 0 -> 22752 bytes .../__pycache__/kcomponents.cpython-312.pyc | Bin 0 -> 10384 bytes .../__pycache__/kcutsets.cpython-312.pyc | Bin 0 -> 8810 bytes .../__pycache__/stoerwagner.cpython-312.pyc | Bin 0 -> 6244 bytes .../connectivity/__pycache__/utils.cpython-312.pyc | Bin 0 -> 4337 bytes .../algorithms/connectivity/connectivity.py | 811 + .../networkx/algorithms/connectivity/cuts.py | 616 + .../algorithms/connectivity/disjoint_paths.py | 408 + .../algorithms/connectivity/edge_augmentation.py | 1270 + .../algorithms/connectivity/edge_kcomponents.py | 592 + .../algorithms/connectivity/kcomponents.py | 223 + .../networkx/algorithms/connectivity/kcutsets.py | 235 + .../algorithms/connectivity/stoerwagner.py | 152 + .../algorithms/connectivity/tests/__init__.py | 0 .../tests/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 215 bytes .../__pycache__/test_connectivity.cpython-312.pyc | Bin 0 -> 27138 bytes .../tests/__pycache__/test_cuts.cpython-312.pyc | Bin 0 -> 16841 bytes .../test_disjoint_paths.cpython-312.pyc | Bin 0 -> 14370 bytes .../test_edge_augmentation.cpython-312.pyc | Bin 0 -> 21538 bytes .../test_edge_kcomponents.cpython-312.pyc | Bin 0 -> 22419 bytes .../__pycache__/test_kcomponents.cpython-312.pyc | Bin 0 -> 11276 bytes .../__pycache__/test_kcutsets.cpython-312.pyc | Bin 0 -> 13914 bytes .../__pycache__/test_stoer_wagner.cpython-312.pyc | Bin 0 -> 6298 bytes .../connectivity/tests/test_connectivity.py | 421 + .../algorithms/connectivity/tests/test_cuts.py | 309 + .../connectivity/tests/test_disjoint_paths.py | 249 + .../connectivity/tests/test_edge_augmentation.py | 502 + .../connectivity/tests/test_edge_kcomponents.py | 488 + .../connectivity/tests/test_kcomponents.py | 296 + .../algorithms/connectivity/tests/test_kcutsets.py | 270 + .../connectivity/tests/test_stoer_wagner.py | 102 + .../networkx/algorithms/connectivity/utils.py | 88 + .../site-packages/networkx/algorithms/core.py | 588 + .../site-packages/networkx/algorithms/covering.py | 142 + .../site-packages/networkx/algorithms/cuts.py | 398 + .../site-packages/networkx/algorithms/cycles.py | 1234 + .../networkx/algorithms/d_separation.py | 677 + .../site-packages/networkx/algorithms/dag.py | 1468 + .../networkx/algorithms/distance_measures.py | 1159 + .../networkx/algorithms/distance_regular.py | 238 + .../site-packages/networkx/algorithms/dominance.py | 135 + .../networkx/algorithms/dominating.py | 268 + .../networkx/algorithms/efficiency_measures.py | 167 + .../site-packages/networkx/algorithms/euler.py | 470 + .../networkx/algorithms/flow/__init__.py | 11 + .../flow/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 562 bytes .../__pycache__/boykovkolmogorov.cpython-312.pyc | Bin 0 -> 14603 bytes .../__pycache__/capacityscaling.cpython-312.pyc | Bin 0 -> 17106 bytes .../flow/__pycache__/dinitz_alg.cpython-312.pyc | Bin 0 -> 9122 bytes .../flow/__pycache__/edmondskarp.cpython-312.pyc | Bin 0 -> 8963 bytes .../flow/__pycache__/gomory_hu.cpython-312.pyc | Bin 0 -> 6795 bytes .../flow/__pycache__/maxflow.cpython-312.pyc | Bin 0 -> 23253 bytes .../flow/__pycache__/mincost.cpython-312.pyc | Bin 0 -> 13941 bytes .../__pycache__/networksimplex.cpython-312.pyc | Bin 0 -> 30286 bytes .../flow/__pycache__/preflowpush.cpython-312.pyc | Bin 0 -> 15833 bytes .../shortestaugmentingpath.cpython-312.pyc | Bin 0 -> 10477 bytes .../flow/__pycache__/utils.cpython-312.pyc | Bin 0 -> 8858 bytes .../networkx/algorithms/flow/boykovkolmogorov.py | 370 + .../networkx/algorithms/flow/capacityscaling.py | 407 + .../networkx/algorithms/flow/dinitz_alg.py | 238 + .../networkx/algorithms/flow/edmondskarp.py | 241 + .../networkx/algorithms/flow/gomory_hu.py | 178 + .../networkx/algorithms/flow/maxflow.py | 611 + .../networkx/algorithms/flow/mincost.py | 356 + .../networkx/algorithms/flow/networksimplex.py | 662 + .../networkx/algorithms/flow/preflowpush.py | 425 + .../algorithms/flow/shortestaugmentingpath.py | 300 + .../networkx/algorithms/flow/tests/__init__.py | 0 .../tests/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 207 bytes .../__pycache__/test_gomory_hu.cpython-312.pyc | Bin 0 -> 8857 bytes .../tests/__pycache__/test_maxflow.cpython-312.pyc | Bin 0 -> 26556 bytes .../test_maxflow_large_graph.cpython-312.pyc | Bin 0 -> 6697 bytes .../tests/__pycache__/test_mincost.cpython-312.pyc | Bin 0 -> 27852 bytes .../test_networksimplex.cpython-312.pyc | Bin 0 -> 23407 bytes .../networkx/algorithms/flow/tests/gl1.gpickle.bz2 | Bin 0 -> 44623 bytes .../networkx/algorithms/flow/tests/gw1.gpickle.bz2 | Bin 0 -> 42248 bytes .../algorithms/flow/tests/netgen-2.gpickle.bz2 | Bin 0 -> 18972 bytes .../algorithms/flow/tests/test_gomory_hu.py | 128 + .../networkx/algorithms/flow/tests/test_maxflow.py | 573 + .../flow/tests/test_maxflow_large_graph.py | 155 + .../networkx/algorithms/flow/tests/test_mincost.py | 475 + .../algorithms/flow/tests/test_networksimplex.py | 481 + .../algorithms/flow/tests/wlm3.gpickle.bz2 | Bin 0 -> 88132 bytes .../networkx/algorithms/flow/utils.py | 194 + .../networkx/algorithms/graph_hashing.py | 435 + .../site-packages/networkx/algorithms/graphical.py | 483 + .../site-packages/networkx/algorithms/hierarchy.py | 57 + .../site-packages/networkx/algorithms/hybrid.py | 196 + .../site-packages/networkx/algorithms/isolate.py | 107 + .../networkx/algorithms/isomorphism/__init__.py | 7 + .../__pycache__/__init__.cpython-312.pyc | Bin 0 -> 621 bytes .../isomorphism/__pycache__/ismags.cpython-312.pyc | Bin 0 -> 46618 bytes .../__pycache__/isomorph.cpython-312.pyc | Bin 0 -> 12922 bytes .../__pycache__/isomorphvf2.cpython-312.pyc | Bin 0 -> 41990 bytes .../__pycache__/matchhelpers.cpython-312.pyc | Bin 0 -> 15681 bytes .../temporalisomorphvf2.cpython-312.pyc | Bin 0 -> 13824 bytes .../__pycache__/tree_isomorphism.cpython-312.pyc | Bin 0 -> 9387 bytes .../isomorphism/__pycache__/vf2pp.cpython-312.pyc | Bin 0 -> 40600 bytes .../__pycache__/vf2userfunc.cpython-312.pyc | Bin 0 -> 7881 bytes .../networkx/algorithms/isomorphism/ismags.py | 1174 + .../networkx/algorithms/isomorphism/isomorph.py | 336 + .../networkx/algorithms/isomorphism/isomorphvf2.py | 1262 + .../algorithms/isomorphism/matchhelpers.py | 352 + .../algorithms/isomorphism/temporalisomorphvf2.py | 308 + .../algorithms/isomorphism/tests/__init__.py | 0 .../tests/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 214 bytes .../tests/__pycache__/test_ismags.cpython-312.pyc | Bin 0 -> 14949 bytes .../__pycache__/test_isomorphism.cpython-312.pyc | Bin 0 -> 9090 bytes .../__pycache__/test_isomorphvf2.cpython-312.pyc | Bin 0 -> 19306 bytes .../__pycache__/test_match_helpers.cpython-312.pyc | Bin 0 -> 4011 bytes .../test_temporalisomorphvf2.cpython-312.pyc | Bin 0 -> 11869 bytes .../test_tree_isomorphism.cpython-312.pyc | Bin 0 -> 8350 bytes .../tests/__pycache__/test_vf2pp.cpython-312.pyc | Bin 0 -> 59414 bytes .../__pycache__/test_vf2pp_helpers.cpython-312.pyc | Bin 0 -> 98711 bytes .../__pycache__/test_vf2userfunc.cpython-312.pyc | Bin 0 -> 13085 bytes .../algorithms/isomorphism/tests/iso_r01_s80.A99 | Bin 0 -> 1442 bytes .../algorithms/isomorphism/tests/iso_r01_s80.B99 | Bin 0 -> 1442 bytes .../algorithms/isomorphism/tests/si2_b06_m200.A99 | Bin 0 -> 310 bytes .../algorithms/isomorphism/tests/si2_b06_m200.B99 | Bin 0 -> 1602 bytes .../algorithms/isomorphism/tests/test_ismags.py | 351 + .../isomorphism/tests/test_isomorphism.py | 109 + .../isomorphism/tests/test_isomorphvf2.py | 439 + .../isomorphism/tests/test_match_helpers.py | 64 + .../isomorphism/tests/test_temporalisomorphvf2.py | 212 + .../isomorphism/tests/test_tree_isomorphism.py | 202 + .../algorithms/isomorphism/tests/test_vf2pp.py | 1609 + .../isomorphism/tests/test_vf2pp_helpers.py | 3118 + .../isomorphism/tests/test_vf2userfunc.py | 200 + .../algorithms/isomorphism/tree_isomorphism.py | 264 + .../networkx/algorithms/isomorphism/vf2pp.py | 1102 + .../networkx/algorithms/isomorphism/vf2userfunc.py | 192 + .../networkx/algorithms/link_analysis/__init__.py | 2 + .../__pycache__/__init__.cpython-312.pyc | Bin 0 -> 335 bytes .../__pycache__/hits_alg.cpython-312.pyc | Bin 0 -> 13425 bytes .../__pycache__/pagerank_alg.cpython-312.pyc | Bin 0 -> 19874 bytes .../networkx/algorithms/link_analysis/hits_alg.py | 337 + .../algorithms/link_analysis/pagerank_alg.py | 499 + .../algorithms/link_analysis/tests/__init__.py | 0 .../tests/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 216 bytes .../tests/__pycache__/test_hits.cpython-312.pyc | Bin 0 -> 5331 bytes .../__pycache__/test_pagerank.cpython-312.pyc | Bin 0 -> 12615 bytes .../algorithms/link_analysis/tests/test_hits.py | 77 + .../link_analysis/tests/test_pagerank.py | 213 + .../networkx/algorithms/link_prediction.py | 687 + .../networkx/algorithms/lowest_common_ancestors.py | 280 + .../site-packages/networkx/algorithms/matching.py | 1151 + .../networkx/algorithms/minors/__init__.py | 27 + .../minors/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 726 bytes .../minors/__pycache__/contraction.cpython-312.pyc | Bin 0 -> 24465 bytes .../networkx/algorithms/minors/contraction.py | 666 + .../__pycache__/test_contraction.cpython-312.pyc | Bin 0 -> 27511 bytes .../algorithms/minors/tests/test_contraction.py | 544 + .../site-packages/networkx/algorithms/mis.py | 78 + .../site-packages/networkx/algorithms/moral.py | 59 + .../networkx/algorithms/node_classification.py | 219 + .../networkx/algorithms/non_randomness.py | 98 + .../networkx/algorithms/operators/__init__.py | 4 + .../operators/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 414 bytes .../operators/__pycache__/all.cpython-312.pyc | Bin 0 -> 12182 bytes .../operators/__pycache__/binary.cpython-312.pyc | Bin 0 -> 15141 bytes .../operators/__pycache__/product.cpython-312.pyc | Bin 0 -> 25202 bytes .../operators/__pycache__/unary.cpython-312.pyc | Bin 0 -> 2721 bytes .../networkx/algorithms/operators/all.py | 324 + .../networkx/algorithms/operators/binary.py | 468 + .../networkx/algorithms/operators/product.py | 633 + .../algorithms/operators/tests/__init__.py | 0 .../tests/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 212 bytes .../tests/__pycache__/test_all.cpython-312.pyc | Bin 0 -> 19502 bytes .../tests/__pycache__/test_binary.cpython-312.pyc | Bin 0 -> 29140 bytes .../tests/__pycache__/test_product.cpython-312.pyc | Bin 0 -> 28725 bytes .../tests/__pycache__/test_unary.cpython-312.pyc | Bin 0 -> 3023 bytes .../algorithms/operators/tests/test_all.py | 328 + .../algorithms/operators/tests/test_binary.py | 451 + .../algorithms/operators/tests/test_product.py | 491 + .../algorithms/operators/tests/test_unary.py | 55 + .../networkx/algorithms/operators/unary.py | 77 + .../networkx/algorithms/planar_drawing.py | 464 + .../site-packages/networkx/algorithms/planarity.py | 1463 + .../networkx/algorithms/polynomials.py | 306 + .../networkx/algorithms/reciprocity.py | 98 + .../site-packages/networkx/algorithms/regular.py | 215 + .../site-packages/networkx/algorithms/richclub.py | 138 + .../networkx/algorithms/shortest_paths/__init__.py | 5 + .../__pycache__/__init__.cpython-312.pyc | Bin 0 -> 503 bytes .../__pycache__/astar.cpython-312.pyc | Bin 0 -> 8625 bytes .../__pycache__/dense.cpython-312.pyc | Bin 0 -> 9334 bytes .../__pycache__/generic.cpython-312.pyc | Bin 0 -> 25786 bytes .../__pycache__/unweighted.cpython-312.pyc | Bin 0 -> 16125 bytes .../__pycache__/weighted.cpython-312.pyc | Bin 0 -> 84792 bytes .../networkx/algorithms/shortest_paths/astar.py | 239 + .../networkx/algorithms/shortest_paths/dense.py | 264 + .../networkx/algorithms/shortest_paths/generic.py | 713 + .../algorithms/shortest_paths/tests/__init__.py | 0 .../tests/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 217 bytes .../tests/__pycache__/test_astar.cpython-312.pyc | Bin 0 -> 15904 bytes .../tests/__pycache__/test_dense.cpython-312.pyc | Bin 0 -> 7700 bytes .../__pycache__/test_dense_numpy.cpython-312.pyc | Bin 0 -> 4760 bytes .../tests/__pycache__/test_generic.cpython-312.pyc | Bin 0 -> 31458 bytes .../__pycache__/test_unweighted.cpython-312.pyc | Bin 0 -> 11738 bytes .../__pycache__/test_weighted.cpython-312.pyc | Bin 0 -> 56793 bytes .../algorithms/shortest_paths/tests/test_astar.py | 254 + .../algorithms/shortest_paths/tests/test_dense.py | 212 + .../shortest_paths/tests/test_dense_numpy.py | 88 + .../shortest_paths/tests/test_generic.py | 450 + .../shortest_paths/tests/test_unweighted.py | 149 + .../shortest_paths/tests/test_weighted.py | 972 + .../algorithms/shortest_paths/unweighted.py | 562 + .../networkx/algorithms/shortest_paths/weighted.py | 2516 + .../networkx/algorithms/similarity.py | 1783 + .../networkx/algorithms/simple_paths.py | 948 + .../networkx/algorithms/smallworld.py | 404 + .../site-packages/networkx/algorithms/smetric.py | 30 + .../networkx/algorithms/sparsifiers.py | 296 + .../networkx/algorithms/structuralholes.py | 374 + .../networkx/algorithms/summarization.py | 564 + .../site-packages/networkx/algorithms/swap.py | 406 + .../networkx/algorithms/tests/__init__.py | 0 .../tests/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 202 bytes .../__pycache__/test_asteroidal.cpython-312.pyc | Bin 0 -> 1126 bytes .../__pycache__/test_boundary.cpython-312.pyc | Bin 0 -> 12290 bytes .../tests/__pycache__/test_bridges.cpython-312.pyc | Bin 0 -> 7284 bytes .../__pycache__/test_broadcasting.cpython-312.pyc | Bin 0 -> 3680 bytes .../tests/__pycache__/test_chains.cpython-312.pyc | Bin 0 -> 6398 bytes .../tests/__pycache__/test_chordal.cpython-312.pyc | Bin 0 -> 8411 bytes .../tests/__pycache__/test_clique.cpython-312.pyc | Bin 0 -> 15682 bytes .../tests/__pycache__/test_cluster.cpython-312.pyc | Bin 0 -> 31123 bytes .../test_communicability.cpython-312.pyc | Bin 0 -> 3216 bytes .../tests/__pycache__/test_core.cpython-312.pyc | Bin 0 -> 18529 bytes .../__pycache__/test_covering.cpython-312.pyc | Bin 0 -> 5992 bytes .../tests/__pycache__/test_cuts.cpython-312.pyc | Bin 0 -> 9544 bytes .../tests/__pycache__/test_cycles.cpython-312.pyc | Bin 0 -> 60383 bytes .../__pycache__/test_d_separation.cpython-312.pyc | Bin 0 -> 17254 bytes .../tests/__pycache__/test_dag.cpython-312.pyc | Bin 0 -> 52512 bytes .../test_distance_measures.cpython-312.pyc | Bin 0 -> 58067 bytes .../test_distance_regular.cpython-312.pyc | Bin 0 -> 6595 bytes .../__pycache__/test_dominance.cpython-312.pyc | Bin 0 -> 13635 bytes .../__pycache__/test_dominating.cpython-312.pyc | Bin 0 -> 6611 bytes .../__pycache__/test_efficiency.cpython-312.pyc | Bin 0 -> 3952 bytes .../tests/__pycache__/test_euler.cpython-312.pyc | Bin 0 -> 24668 bytes .../__pycache__/test_graph_hashing.cpython-312.pyc | Bin 0 -> 43056 bytes .../__pycache__/test_graphical.cpython-312.pyc | Bin 0 -> 10060 bytes .../__pycache__/test_hierarchy.cpython-312.pyc | Bin 0 -> 2913 bytes .../tests/__pycache__/test_hybrid.cpython-312.pyc | Bin 0 -> 1404 bytes .../tests/__pycache__/test_isolate.cpython-312.pyc | Bin 0 -> 1650 bytes .../test_link_prediction.cpython-312.pyc | Bin 0 -> 40745 bytes .../test_lowest_common_ancestors.cpython-312.pyc | Bin 0 -> 27839 bytes .../__pycache__/test_matching.cpython-312.pyc | Bin 0 -> 28766 bytes .../test_max_weight_clique.cpython-312.pyc | Bin 0 -> 9642 bytes .../tests/__pycache__/test_mis.cpython-312.pyc | Bin 0 -> 4398 bytes .../tests/__pycache__/test_moral.cpython-312.pyc | Bin 0 -> 1169 bytes .../test_node_classification.cpython-312.pyc | Bin 0 -> 9010 bytes .../test_non_randomness.cpython-312.pyc | Bin 0 -> 2594 bytes .../test_planar_drawing.cpython-312.pyc | Bin 0 -> 11890 bytes .../__pycache__/test_planarity.cpython-312.pyc | Bin 0 -> 25554 bytes .../__pycache__/test_polynomials.cpython-312.pyc | Bin 0 -> 3674 bytes .../__pycache__/test_reciprocity.cpython-312.pyc | Bin 0 -> 2641 bytes .../tests/__pycache__/test_regular.cpython-312.pyc | Bin 0 -> 6850 bytes .../__pycache__/test_richclub.cpython-312.pyc | Bin 0 -> 6893 bytes .../__pycache__/test_similarity.cpython-312.pyc | Bin 0 -> 45791 bytes .../__pycache__/test_simple_paths.cpython-312.pyc | Bin 0 -> 49342 bytes .../__pycache__/test_smallworld.cpython-312.pyc | Bin 0 -> 4896 bytes .../tests/__pycache__/test_smetric.cpython-312.pyc | Bin 0 -> 570 bytes .../__pycache__/test_sparsifiers.cpython-312.pyc | Bin 0 -> 6552 bytes .../test_structuralholes.cpython-312.pyc | Bin 0 -> 14011 bytes .../__pycache__/test_summarization.cpython-312.pyc | Bin 0 -> 24295 bytes .../tests/__pycache__/test_swap.cpython-312.pyc | Bin 0 -> 16071 bytes .../__pycache__/test_threshold.cpython-312.pyc | Bin 0 -> 19068 bytes .../test_time_dependent.cpython-312.pyc | Bin 0 -> 20084 bytes .../__pycache__/test_tournament.cpython-312.pyc | Bin 0 -> 8435 bytes .../tests/__pycache__/test_triads.cpython-312.pyc | Bin 0 -> 17276 bytes .../__pycache__/test_vitality.cpython-312.pyc | Bin 0 -> 3123 bytes .../tests/__pycache__/test_voronoi.cpython-312.pyc | Bin 0 -> 5948 bytes .../tests/__pycache__/test_walks.cpython-312.pyc | Bin 0 -> 2813 bytes .../tests/__pycache__/test_wiener.cpython-312.pyc | Bin 0 -> 5181 bytes .../networkx/algorithms/tests/test_asteroidal.py | 23 + .../networkx/algorithms/tests/test_boundary.py | 154 + .../networkx/algorithms/tests/test_bridges.py | 144 + .../networkx/algorithms/tests/test_broadcasting.py | 82 + .../networkx/algorithms/tests/test_chains.py | 136 + .../networkx/algorithms/tests/test_chordal.py | 129 + .../networkx/algorithms/tests/test_clique.py | 291 + .../networkx/algorithms/tests/test_cluster.py | 584 + .../algorithms/tests/test_communicability.py | 80 + .../networkx/algorithms/tests/test_core.py | 266 + .../networkx/algorithms/tests/test_covering.py | 85 + .../networkx/algorithms/tests/test_cuts.py | 171 + .../networkx/algorithms/tests/test_cycles.py | 984 + .../networkx/algorithms/tests/test_d_separation.py | 340 + .../networkx/algorithms/tests/test_dag.py | 837 + .../algorithms/tests/test_distance_measures.py | 831 + .../algorithms/tests/test_distance_regular.py | 85 + .../networkx/algorithms/tests/test_dominance.py | 286 + .../networkx/algorithms/tests/test_dominating.py | 115 + .../networkx/algorithms/tests/test_efficiency.py | 58 + .../networkx/algorithms/tests/test_euler.py | 314 + .../algorithms/tests/test_graph_hashing.py | 872 + .../networkx/algorithms/tests/test_graphical.py | 163 + .../networkx/algorithms/tests/test_hierarchy.py | 46 + .../networkx/algorithms/tests/test_hybrid.py | 24 + .../networkx/algorithms/tests/test_isolate.py | 26 + .../algorithms/tests/test_link_prediction.py | 593 + .../tests/test_lowest_common_ancestors.py | 459 + .../networkx/algorithms/tests/test_matching.py | 548 + .../algorithms/tests/test_max_weight_clique.py | 179 + .../networkx/algorithms/tests/test_mis.py | 62 + .../networkx/algorithms/tests/test_moral.py | 15 + .../algorithms/tests/test_node_classification.py | 140 + .../algorithms/tests/test_non_randomness.py | 42 + .../algorithms/tests/test_planar_drawing.py | 274 + .../networkx/algorithms/tests/test_planarity.py | 556 + .../networkx/algorithms/tests/test_polynomials.py | 57 + .../networkx/algorithms/tests/test_reciprocity.py | 37 + .../networkx/algorithms/tests/test_regular.py | 91 + .../networkx/algorithms/tests/test_richclub.py | 149 + .../networkx/algorithms/tests/test_similarity.py | 984 + .../networkx/algorithms/tests/test_simple_paths.py | 803 + .../networkx/algorithms/tests/test_smallworld.py | 76 + .../networkx/algorithms/tests/test_smetric.py | 8 + .../networkx/algorithms/tests/test_sparsifiers.py | 138 + .../algorithms/tests/test_structuralholes.py | 191 + .../algorithms/tests/test_summarization.py | 642 + .../networkx/algorithms/tests/test_swap.py | 179 + .../networkx/algorithms/tests/test_threshold.py | 266 + .../algorithms/tests/test_time_dependent.py | 431 + .../networkx/algorithms/tests/test_tournament.py | 161 + .../networkx/algorithms/tests/test_triads.py | 248 + .../networkx/algorithms/tests/test_vitality.py | 41 + .../networkx/algorithms/tests/test_voronoi.py | 103 + .../networkx/algorithms/tests/test_walks.py | 54 + .../networkx/algorithms/tests/test_wiener.py | 123 + .../site-packages/networkx/algorithms/threshold.py | 1035 + .../networkx/algorithms/time_dependent.py | 142 + .../networkx/algorithms/tournament.py | 404 + .../networkx/algorithms/traversal/__init__.py | 5 + .../traversal/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 350 bytes .../__pycache__/beamsearch.cpython-312.pyc | Bin 0 -> 3426 bytes .../breadth_first_search.cpython-312.pyc | Bin 0 -> 19426 bytes .../__pycache__/depth_first_search.cpython-312.pyc | Bin 0 -> 17778 bytes .../traversal/__pycache__/edgebfs.cpython-312.pyc | Bin 0 -> 7777 bytes .../traversal/__pycache__/edgedfs.cpython-312.pyc | Bin 0 -> 7042 bytes .../networkx/algorithms/traversal/beamsearch.py | 90 + .../algorithms/traversal/breadth_first_search.py | 576 + .../algorithms/traversal/depth_first_search.py | 529 + .../networkx/algorithms/traversal/edgebfs.py | 185 + .../networkx/algorithms/traversal/edgedfs.py | 182 + .../algorithms/traversal/tests/__init__.py | 0 .../tests/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 212 bytes .../__pycache__/test_beamsearch.cpython-312.pyc | Bin 0 -> 1544 bytes .../tests/__pycache__/test_bfs.cpython-312.pyc | Bin 0 -> 12714 bytes .../tests/__pycache__/test_dfs.cpython-312.pyc | Bin 0 -> 15396 bytes .../tests/__pycache__/test_edgebfs.cpython-312.pyc | Bin 0 -> 8849 bytes .../tests/__pycache__/test_edgedfs.cpython-312.pyc | Bin 0 -> 7844 bytes .../algorithms/traversal/tests/test_beamsearch.py | 25 + .../algorithms/traversal/tests/test_bfs.py | 203 + .../algorithms/traversal/tests/test_dfs.py | 307 + .../algorithms/traversal/tests/test_edgebfs.py | 147 + .../algorithms/traversal/tests/test_edgedfs.py | 131 + .../networkx/algorithms/tree/__init__.py | 6 + .../tree/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 351 bytes .../tree/__pycache__/branchings.cpython-312.pyc | Bin 0 -> 34347 bytes .../tree/__pycache__/coding.cpython-312.pyc | Bin 0 -> 15910 bytes .../tree/__pycache__/decomposition.cpython-312.pyc | Bin 0 -> 4145 bytes .../tree/__pycache__/mst.cpython-312.pyc | Bin 0 -> 46855 bytes .../tree/__pycache__/operations.cpython-312.pyc | Bin 0 -> 4788 bytes .../tree/__pycache__/recognition.cpython-312.pyc | Bin 0 -> 9849 bytes .../networkx/algorithms/tree/branchings.py | 1042 + .../networkx/algorithms/tree/coding.py | 413 + .../networkx/algorithms/tree/decomposition.py | 88 + .../site-packages/networkx/algorithms/tree/mst.py | 1283 + .../networkx/algorithms/tree/operations.py | 106 + .../networkx/algorithms/tree/recognition.py | 273 + .../networkx/algorithms/tree/tests/__init__.py | 0 .../tests/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 207 bytes .../__pycache__/test_branchings.cpython-312.pyc | Bin 0 -> 23538 bytes .../tests/__pycache__/test_coding.cpython-312.pyc | Bin 0 -> 8384 bytes .../__pycache__/test_decomposition.cpython-312.pyc | Bin 0 -> 3028 bytes .../tests/__pycache__/test_mst.cpython-312.pyc | Bin 0 -> 45028 bytes .../__pycache__/test_operations.cpython-312.pyc | Bin 0 -> 4059 bytes .../__pycache__/test_recognition.cpython-312.pyc | Bin 0 -> 11505 bytes .../algorithms/tree/tests/test_branchings.py | 624 + .../networkx/algorithms/tree/tests/test_coding.py | 114 + .../algorithms/tree/tests/test_decomposition.py | 79 + .../networkx/algorithms/tree/tests/test_mst.py | 918 + .../algorithms/tree/tests/test_operations.py | 53 + .../algorithms/tree/tests/test_recognition.py | 174 + .../site-packages/networkx/algorithms/triads.py | 500 + .../site-packages/networkx/algorithms/vitality.py | 76 + .../site-packages/networkx/algorithms/voronoi.py | 86 + .../site-packages/networkx/algorithms/walks.py | 79 + .../site-packages/networkx/algorithms/wiener.py | 226 + .../site-packages/networkx/classes/__init__.py | 13 + .../classes/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 611 bytes .../classes/__pycache__/coreviews.cpython-312.pyc | Bin 0 -> 23308 bytes .../classes/__pycache__/digraph.cpython-312.pyc | Bin 0 -> 54038 bytes .../classes/__pycache__/filters.cpython-312.pyc | Bin 0 -> 5522 bytes .../classes/__pycache__/function.cpython-312.pyc | Bin 0 -> 49500 bytes .../classes/__pycache__/graph.cpython-312.pyc | Bin 0 -> 79387 bytes .../classes/__pycache__/graphviews.cpython-312.pyc | Bin 0 -> 9686 bytes .../__pycache__/multidigraph.cpython-312.pyc | Bin 0 -> 40145 bytes .../classes/__pycache__/multigraph.cpython-312.pyc | Bin 0 -> 51663 bytes .../__pycache__/reportviews.cpython-312.pyc | Bin 0 -> 71836 bytes .../site-packages/networkx/classes/coreviews.py | 435 + .../site-packages/networkx/classes/digraph.py | 1352 + .../site-packages/networkx/classes/filters.py | 95 + .../site-packages/networkx/classes/function.py | 1416 + .../site-packages/networkx/classes/graph.py | 2071 + .../site-packages/networkx/classes/graphviews.py | 269 + .../site-packages/networkx/classes/multidigraph.py | 966 + .../site-packages/networkx/classes/multigraph.py | 1283 + .../site-packages/networkx/classes/reportviews.py | 1447 + .../networkx/classes/tests/__init__.py | 0 .../tests/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 199 bytes .../__pycache__/dispatch_interface.cpython-312.pyc | Bin 0 -> 9165 bytes .../__pycache__/historical_tests.cpython-312.pyc | Bin 0 -> 27460 bytes .../__pycache__/test_coreviews.cpython-312.pyc | Bin 0 -> 27633 bytes .../tests/__pycache__/test_digraph.cpython-312.pyc | Bin 0 -> 26043 bytes .../test_digraph_historical.cpython-312.pyc | Bin 0 -> 8702 bytes .../tests/__pycache__/test_filters.cpython-312.pyc | Bin 0 -> 10815 bytes .../__pycache__/test_function.cpython-312.pyc | Bin 0 -> 61327 bytes .../tests/__pycache__/test_graph.cpython-312.pyc | Bin 0 -> 61203 bytes .../test_graph_historical.cpython-312.pyc | Bin 0 -> 830 bytes .../__pycache__/test_graphviews.cpython-312.pyc | Bin 0 -> 26159 bytes .../__pycache__/test_multidigraph.cpython-312.pyc | Bin 0 -> 28784 bytes .../__pycache__/test_multigraph.cpython-312.pyc | Bin 0 -> 32902 bytes .../__pycache__/test_reportviews.cpython-312.pyc | Bin 0 -> 73874 bytes .../tests/__pycache__/test_special.cpython-312.pyc | Bin 0 -> 8263 bytes .../__pycache__/test_subgraphviews.cpython-312.pyc | Bin 0 -> 26516 bytes .../networkx/classes/tests/dispatch_interface.py | 185 + .../networkx/classes/tests/historical_tests.py | 475 + .../networkx/classes/tests/test_coreviews.py | 362 + .../networkx/classes/tests/test_digraph.py | 331 + .../classes/tests/test_digraph_historical.py | 110 + .../networkx/classes/tests/test_filters.py | 177 + .../networkx/classes/tests/test_function.py | 1035 + .../networkx/classes/tests/test_graph.py | 927 + .../classes/tests/test_graph_historical.py | 12 + .../networkx/classes/tests/test_graphviews.py | 349 + .../networkx/classes/tests/test_multidigraph.py | 459 + .../networkx/classes/tests/test_multigraph.py | 528 + .../networkx/classes/tests/test_reportviews.py | 1421 + .../networkx/classes/tests/test_special.py | 131 + .../networkx/classes/tests/test_subgraphviews.py | 371 + .../python3.12/site-packages/networkx/conftest.py | 257 + .../python3.12/site-packages/networkx/convert.py | 502 + .../site-packages/networkx/convert_matrix.py | 1314 + .../site-packages/networkx/drawing/__init__.py | 7 + .../drawing/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 336 bytes .../drawing/__pycache__/layout.cpython-312.pyc | Bin 0 -> 74328 bytes .../drawing/__pycache__/nx_agraph.cpython-312.pyc | Bin 0 -> 17507 bytes .../drawing/__pycache__/nx_latex.cpython-312.pyc | Bin 0 -> 24822 bytes .../drawing/__pycache__/nx_pydot.cpython-312.pyc | Bin 0 -> 12750 bytes .../drawing/__pycache__/nx_pylab.cpython-312.pyc | Bin 0 -> 107766 bytes .../site-packages/networkx/drawing/layout.py | 2028 + .../site-packages/networkx/drawing/nx_agraph.py | 464 + .../site-packages/networkx/drawing/nx_latex.py | 570 + .../site-packages/networkx/drawing/nx_pydot.py | 361 + .../site-packages/networkx/drawing/nx_pylab.py | 3021 + .../networkx/drawing/tests/__init__.py | 0 .../tests/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 199 bytes .../tests/__pycache__/test_agraph.cpython-312.pyc | Bin 0 -> 17999 bytes .../tests/__pycache__/test_latex.cpython-312.pyc | Bin 0 -> 10989 bytes .../tests/__pycache__/test_layout.cpython-312.pyc | Bin 0 -> 42803 bytes .../tests/__pycache__/test_pydot.cpython-312.pyc | Bin 0 -> 7801 bytes .../tests/__pycache__/test_pylab.cpython-312.pyc | Bin 0 -> 92160 bytes .../tests/baseline/test_display_complex.png | Bin 0 -> 142315 bytes .../tests/baseline/test_display_empty_graph.png | Bin 0 -> 3257 bytes .../baseline/test_display_house_with_colors.png | Bin 0 -> 26002 bytes .../baseline/test_display_labels_and_colors.png | Bin 0 -> 70446 bytes .../tests/baseline/test_display_shortest_path.png | Bin 0 -> 35897 bytes .../tests/baseline/test_house_with_colors.png | Bin 0 -> 21918 bytes .../networkx/drawing/tests/test_agraph.py | 240 + .../networkx/drawing/tests/test_latex.py | 285 + .../networkx/drawing/tests/test_layout.py | 631 + .../networkx/drawing/tests/test_pydot.py | 146 + .../networkx/drawing/tests/test_pylab.py | 1756 + .../python3.12/site-packages/networkx/exception.py | 131 + .../site-packages/networkx/generators/__init__.py | 34 + .../__pycache__/__init__.cpython-312.pyc | Bin 0 -> 1583 bytes .../generators/__pycache__/atlas.cpython-312.pyc | Bin 0 -> 6908 bytes .../generators/__pycache__/classic.cpython-312.pyc | Bin 0 -> 37275 bytes .../__pycache__/cographs.cpython-312.pyc | Bin 0 -> 2702 bytes .../__pycache__/community.cpython-312.pyc | Bin 0 -> 39110 bytes .../__pycache__/degree_seq.cpython-312.pyc | Bin 0 -> 34568 bytes .../__pycache__/directed.cpython-312.pyc | Bin 0 -> 21629 bytes .../__pycache__/duplication.cpython-312.pyc | Bin 0 -> 6380 bytes .../generators/__pycache__/ego.cpython-312.pyc | Bin 0 -> 2315 bytes .../__pycache__/expanders.cpython-312.pyc | Bin 0 -> 15847 bytes .../__pycache__/geometric.cpython-312.pyc | Bin 0 -> 44278 bytes .../__pycache__/harary_graph.cpython-312.pyc | Bin 0 -> 6681 bytes .../__pycache__/internet_as_graphs.cpython-312.pyc | Bin 0 -> 19268 bytes .../__pycache__/intersection.cpython-312.pyc | Bin 0 -> 5346 bytes .../__pycache__/interval_graph.cpython-312.pyc | Bin 0 -> 2767 bytes .../__pycache__/joint_degree_seq.cpython-312.pyc | Bin 0 -> 22680 bytes .../generators/__pycache__/lattice.cpython-312.pyc | Bin 0 -> 20283 bytes .../generators/__pycache__/line.cpython-312.pyc | Bin 0 -> 19717 bytes .../__pycache__/mycielski.cpython-312.pyc | Bin 0 -> 4909 bytes .../nonisomorphic_trees.cpython-312.pyc | Bin 0 -> 7143 bytes .../__pycache__/random_clustered.cpython-312.pyc | Bin 0 -> 5100 bytes .../__pycache__/random_graphs.cpython-312.pyc | Bin 0 -> 52732 bytes .../generators/__pycache__/small.cpython-312.pyc | Bin 0 -> 31757 bytes .../generators/__pycache__/social.cpython-312.pyc | Bin 0 -> 25287 bytes .../spectral_graph_forge.cpython-312.pyc | Bin 0 -> 5520 bytes .../__pycache__/stochastic.cpython-312.pyc | Bin 0 -> 2456 bytes .../generators/__pycache__/sudoku.cpython-312.pyc | Bin 0 -> 4854 bytes .../__pycache__/time_series.cpython-312.pyc | Bin 0 -> 3191 bytes .../generators/__pycache__/trees.cpython-312.pyc | Bin 0 -> 40261 bytes .../generators/__pycache__/triads.cpython-312.pyc | Bin 0 -> 2709 bytes .../site-packages/networkx/generators/atlas.dat.gz | Bin 0 -> 8887 bytes .../site-packages/networkx/generators/atlas.py | 227 + .../site-packages/networkx/generators/classic.py | 1068 + .../site-packages/networkx/generators/cographs.py | 68 + .../site-packages/networkx/generators/community.py | 1070 + .../networkx/generators/degree_seq.py | 867 + .../site-packages/networkx/generators/directed.py | 572 + .../networkx/generators/duplication.py | 174 + .../site-packages/networkx/generators/ego.py | 66 + .../site-packages/networkx/generators/expanders.py | 476 + .../site-packages/networkx/generators/geometric.py | 1037 + .../networkx/generators/harary_graph.py | 199 + .../networkx/generators/internet_as_graphs.py | 441 + .../networkx/generators/intersection.py | 125 + .../networkx/generators/interval_graph.py | 70 + .../networkx/generators/joint_degree_seq.py | 664 + .../site-packages/networkx/generators/lattice.py | 367 + .../site-packages/networkx/generators/line.py | 501 + .../site-packages/networkx/generators/mycielski.py | 110 + .../networkx/generators/nonisomorphic_trees.py | 209 + .../networkx/generators/random_clustered.py | 117 + .../networkx/generators/random_graphs.py | 1400 + .../site-packages/networkx/generators/small.py | 996 + .../site-packages/networkx/generators/social.py | 554 + .../networkx/generators/spectral_graph_forge.py | 120 + .../networkx/generators/stochastic.py | 54 + .../site-packages/networkx/generators/sudoku.py | 131 + .../networkx/generators/tests/__init__.py | 0 .../tests/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 202 bytes .../tests/__pycache__/test_atlas.cpython-312.pyc | Bin 0 -> 5318 bytes .../tests/__pycache__/test_classic.cpython-312.pyc | Bin 0 -> 47600 bytes .../__pycache__/test_cographs.cpython-312.pyc | Bin 0 -> 1002 bytes .../__pycache__/test_community.cpython-312.pyc | Bin 0 -> 20428 bytes .../__pycache__/test_degree_seq.cpython-312.pyc | Bin 0 -> 14487 bytes .../__pycache__/test_directed.cpython-312.pyc | Bin 0 -> 11922 bytes .../__pycache__/test_duplication.cpython-312.pyc | Bin 0 -> 7224 bytes .../tests/__pycache__/test_ego.cpython-312.pyc | Bin 0 -> 2968 bytes .../__pycache__/test_expanders.cpython-312.pyc | Bin 0 -> 11728 bytes .../__pycache__/test_geometric.cpython-312.pyc | Bin 0 -> 32298 bytes .../__pycache__/test_harary_graph.cpython-312.pyc | Bin 0 -> 5664 bytes .../test_internet_as_graphs.cpython-312.pyc | Bin 0 -> 10345 bytes .../__pycache__/test_intersection.cpython-312.pyc | Bin 0 -> 2040 bytes .../test_interval_graph.cpython-312.pyc | Bin 0 -> 6515 bytes .../test_joint_degree_seq.cpython-312.pyc | Bin 0 -> 3870 bytes .../tests/__pycache__/test_lattice.cpython-312.pyc | Bin 0 -> 18183 bytes .../tests/__pycache__/test_line.cpython-312.pyc | Bin 0 -> 20271 bytes .../__pycache__/test_mycielski.cpython-312.pyc | Bin 0 -> 2624 bytes .../test_nonisomorphic_trees.cpython-312.pyc | Bin 0 -> 4277 bytes .../test_random_clustered.cpython-312.pyc | Bin 0 -> 3004 bytes .../__pycache__/test_random_graphs.cpython-312.pyc | Bin 0 -> 31633 bytes .../tests/__pycache__/test_small.cpython-312.pyc | Bin 0 -> 16038 bytes .../test_spectral_graph_forge.cpython-312.pyc | Bin 0 -> 1786 bytes .../__pycache__/test_stochastic.cpython-312.pyc | Bin 0 -> 4692 bytes .../tests/__pycache__/test_sudoku.cpython-312.pyc | Bin 0 -> 2608 bytes .../__pycache__/test_time_series.cpython-312.pyc | Bin 0 -> 3935 bytes .../tests/__pycache__/test_trees.cpython-312.pyc | Bin 0 -> 12053 bytes .../tests/__pycache__/test_triads.cpython-312.pyc | Bin 0 -> 1009 bytes .../networkx/generators/tests/test_atlas.py | 75 + .../networkx/generators/tests/test_classic.py | 639 + .../networkx/generators/tests/test_cographs.py | 18 + .../networkx/generators/tests/test_community.py | 362 + .../networkx/generators/tests/test_degree_seq.py | 237 + .../networkx/generators/tests/test_directed.py | 182 + .../networkx/generators/tests/test_duplication.py | 103 + .../networkx/generators/tests/test_ego.py | 39 + .../networkx/generators/tests/test_expanders.py | 171 + .../networkx/generators/tests/test_geometric.py | 488 + .../networkx/generators/tests/test_harary_graph.py | 133 + .../generators/tests/test_internet_as_graphs.py | 176 + .../networkx/generators/tests/test_intersection.py | 28 + .../generators/tests/test_interval_graph.py | 144 + .../generators/tests/test_joint_degree_seq.py | 125 + .../networkx/generators/tests/test_lattice.py | 246 + .../networkx/generators/tests/test_line.py | 309 + .../networkx/generators/tests/test_mycielski.py | 30 + .../generators/tests/test_nonisomorphic_trees.py | 56 + .../generators/tests/test_random_clustered.py | 33 + .../generators/tests/test_random_graphs.py | 478 + .../networkx/generators/tests/test_small.py | 202 + .../generators/tests/test_spectral_graph_forge.py | 49 + .../networkx/generators/tests/test_stochastic.py | 72 + .../networkx/generators/tests/test_sudoku.py | 92 + .../networkx/generators/tests/test_time_series.py | 64 + .../networkx/generators/tests/test_trees.py | 195 + .../networkx/generators/tests/test_triads.py | 15 + .../networkx/generators/time_series.py | 74 + .../site-packages/networkx/generators/trees.py | 1070 + .../site-packages/networkx/generators/triads.py | 94 + .../site-packages/networkx/lazy_imports.py | 188 + .../site-packages/networkx/linalg/__init__.py | 13 + .../linalg/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 754 bytes .../algebraicconnectivity.cpython-312.pyc | Bin 0 -> 28390 bytes .../linalg/__pycache__/attrmatrix.cpython-312.pyc | Bin 0 -> 17369 bytes .../__pycache__/bethehessianmatrix.cpython-312.pyc | Bin 0 -> 3806 bytes .../linalg/__pycache__/graphmatrix.cpython-312.pyc | Bin 0 -> 6479 bytes .../__pycache__/laplacianmatrix.cpython-312.pyc | Bin 0 -> 19178 bytes .../__pycache__/modularitymatrix.cpython-312.pyc | Bin 0 -> 5447 bytes .../linalg/__pycache__/spectrum.cpython-312.pyc | Bin 0 -> 5615 bytes .../networkx/linalg/algebraicconnectivity.py | 655 + .../site-packages/networkx/linalg/attrmatrix.py | 466 + .../networkx/linalg/bethehessianmatrix.py | 79 + .../site-packages/networkx/linalg/graphmatrix.py | 168 + .../networkx/linalg/laplacianmatrix.py | 520 + .../networkx/linalg/modularitymatrix.py | 166 + .../site-packages/networkx/linalg/spectrum.py | 186 + .../networkx/linalg/tests/__init__.py | 0 .../tests/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 198 bytes .../test_algebraic_connectivity.cpython-312.pyc | Bin 0 -> 23532 bytes .../__pycache__/test_attrmatrix.cpython-312.pyc | Bin 0 -> 6007 bytes .../__pycache__/test_bethehessian.cpython-312.pyc | Bin 0 -> 2568 bytes .../__pycache__/test_graphmatrix.cpython-312.pyc | Bin 0 -> 13770 bytes .../__pycache__/test_laplacian.cpython-312.pyc | Bin 0 -> 16478 bytes .../__pycache__/test_modularity.cpython-312.pyc | Bin 0 -> 4219 bytes .../__pycache__/test_spectrum.cpython-312.pyc | Bin 0 -> 6358 bytes .../linalg/tests/test_algebraic_connectivity.py | 400 + .../networkx/linalg/tests/test_attrmatrix.py | 108 + .../networkx/linalg/tests/test_bethehessian.py | 40 + .../networkx/linalg/tests/test_graphmatrix.py | 275 + .../networkx/linalg/tests/test_laplacian.py | 334 + .../networkx/linalg/tests/test_modularity.py | 86 + .../networkx/linalg/tests/test_spectrum.py | 70 + .../site-packages/networkx/readwrite/__init__.py | 17 + .../readwrite/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 777 bytes .../readwrite/__pycache__/adjlist.cpython-312.pyc | Bin 0 -> 10019 bytes .../readwrite/__pycache__/edgelist.cpython-312.pyc | Bin 0 -> 15532 bytes .../readwrite/__pycache__/gexf.cpython-312.pyc | Bin 0 -> 45136 bytes .../readwrite/__pycache__/gml.cpython-312.pyc | Bin 0 -> 34784 bytes .../readwrite/__pycache__/graph6.cpython-312.pyc | Bin 0 -> 14708 bytes .../readwrite/__pycache__/graphml.cpython-312.pyc | Bin 0 -> 43610 bytes .../readwrite/__pycache__/leda.cpython-312.pyc | Bin 0 -> 4103 bytes .../__pycache__/multiline_adjlist.cpython-312.pyc | Bin 0 -> 12769 bytes .../readwrite/__pycache__/p2g.cpython-312.pyc | Bin 0 -> 4714 bytes .../readwrite/__pycache__/pajek.cpython-312.pyc | Bin 0 -> 11301 bytes .../readwrite/__pycache__/sparse6.cpython-312.pyc | Bin 0 -> 12611 bytes .../readwrite/__pycache__/text.cpython-312.pyc | Bin 0 -> 25990 bytes .../site-packages/networkx/readwrite/adjlist.py | 310 + .../site-packages/networkx/readwrite/edgelist.py | 489 + .../site-packages/networkx/readwrite/gexf.py | 1064 + .../site-packages/networkx/readwrite/gml.py | 879 + .../site-packages/networkx/readwrite/graph6.py | 427 + .../site-packages/networkx/readwrite/graphml.py | 1053 + .../networkx/readwrite/json_graph/__init__.py | 19 + .../__pycache__/__init__.cpython-312.pyc | Bin 0 -> 907 bytes .../__pycache__/adjacency.cpython-312.pyc | Bin 0 -> 5894 bytes .../__pycache__/cytoscape.cpython-312.pyc | Bin 0 -> 7625 bytes .../__pycache__/node_link.cpython-312.pyc | Bin 0 -> 11310 bytes .../json_graph/__pycache__/tree.cpython-312.pyc | Bin 0 -> 5184 bytes .../networkx/readwrite/json_graph/adjacency.py | 156 + .../networkx/readwrite/json_graph/cytoscape.py | 190 + .../networkx/readwrite/json_graph/node_link.py | 333 + .../readwrite/json_graph/tests/__init__.py | 0 .../tests/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 212 bytes .../__pycache__/test_adjacency.cpython-312.pyc | Bin 0 -> 5251 bytes .../__pycache__/test_cytoscape.cpython-312.pyc | Bin 0 -> 4581 bytes .../__pycache__/test_node_link.cpython-312.pyc | Bin 0 -> 12038 bytes .../tests/__pycache__/test_tree.cpython-312.pyc | Bin 0 -> 3417 bytes .../readwrite/json_graph/tests/test_adjacency.py | 78 + .../readwrite/json_graph/tests/test_cytoscape.py | 78 + .../readwrite/json_graph/tests/test_node_link.py | 175 + .../readwrite/json_graph/tests/test_tree.py | 48 + .../networkx/readwrite/json_graph/tree.py | 137 + .../site-packages/networkx/readwrite/leda.py | 108 + .../networkx/readwrite/multiline_adjlist.py | 393 + .../site-packages/networkx/readwrite/p2g.py | 113 + .../site-packages/networkx/readwrite/pajek.py | 286 + .../site-packages/networkx/readwrite/sparse6.py | 379 + .../networkx/readwrite/tests/__init__.py | 0 .../tests/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 201 bytes .../tests/__pycache__/test_adjlist.cpython-312.pyc | Bin 0 -> 18565 bytes .../__pycache__/test_edgelist.cpython-312.pyc | Bin 0 -> 18069 bytes .../tests/__pycache__/test_gexf.cpython-312.pyc | Bin 0 -> 34980 bytes .../tests/__pycache__/test_gml.cpython-312.pyc | Bin 0 -> 30123 bytes .../tests/__pycache__/test_graph6.cpython-312.pyc | Bin 0 -> 15251 bytes .../tests/__pycache__/test_graphml.cpython-312.pyc | Bin 0 -> 90846 bytes .../tests/__pycache__/test_leda.cpython-312.pyc | Bin 0 -> 2271 bytes .../tests/__pycache__/test_p2g.cpython-312.pyc | Bin 0 -> 3135 bytes .../tests/__pycache__/test_pajek.cpython-312.pyc | Bin 0 -> 7970 bytes .../tests/__pycache__/test_sparse6.cpython-312.pyc | Bin 0 -> 9470 bytes .../tests/__pycache__/test_text.cpython-312.pyc | Bin 0 -> 67233 bytes .../networkx/readwrite/tests/test_adjlist.py | 262 + .../networkx/readwrite/tests/test_edgelist.py | 314 + .../networkx/readwrite/tests/test_gexf.py | 579 + .../networkx/readwrite/tests/test_gml.py | 744 + .../networkx/readwrite/tests/test_graph6.py | 181 + .../networkx/readwrite/tests/test_graphml.py | 1531 + .../networkx/readwrite/tests/test_leda.py | 30 + .../networkx/readwrite/tests/test_p2g.py | 62 + .../networkx/readwrite/tests/test_pajek.py | 126 + .../networkx/readwrite/tests/test_sparse6.py | 166 + .../networkx/readwrite/tests/test_text.py | 1742 + .../site-packages/networkx/readwrite/text.py | 851 + .../python3.12/site-packages/networkx/relabel.py | 285 + .../site-packages/networkx/tests/__init__.py | 0 .../tests/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 191 bytes .../test_all_random_functions.cpython-312.pyc | Bin 0 -> 14370 bytes .../tests/__pycache__/test_convert.cpython-312.pyc | Bin 0 -> 25242 bytes .../__pycache__/test_convert_numpy.cpython-312.pyc | Bin 0 -> 33918 bytes .../test_convert_pandas.cpython-312.pyc | Bin 0 -> 21722 bytes .../__pycache__/test_convert_scipy.cpython-312.pyc | Bin 0 -> 19696 bytes .../__pycache__/test_exceptions.cpython-312.pyc | Bin 0 -> 2501 bytes .../tests/__pycache__/test_import.cpython-312.pyc | Bin 0 -> 809 bytes .../__pycache__/test_lazy_imports.cpython-312.pyc | Bin 0 -> 4009 bytes .../tests/__pycache__/test_relabel.cpython-312.pyc | Bin 0 -> 27631 bytes .../networkx/tests/test_all_random_functions.py | 250 + .../site-packages/networkx/tests/test_convert.py | 321 + .../networkx/tests/test_convert_numpy.py | 531 + .../networkx/tests/test_convert_pandas.py | 349 + .../networkx/tests/test_convert_scipy.py | 281 + .../networkx/tests/test_exceptions.py | 40 + .../site-packages/networkx/tests/test_import.py | 11 + .../networkx/tests/test_lazy_imports.py | 96 + .../site-packages/networkx/tests/test_relabel.py | 347 + .../site-packages/networkx/utils/__init__.py | 8 + .../utils/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 500 bytes .../utils/__pycache__/backends.cpython-312.pyc | Bin 0 -> 73020 bytes .../utils/__pycache__/configs.cpython-312.pyc | Bin 0 -> 20531 bytes .../utils/__pycache__/decorators.cpython-312.pyc | Bin 0 -> 45792 bytes .../utils/__pycache__/heaps.cpython-312.pyc | Bin 0 -> 11824 bytes .../utils/__pycache__/mapped_queue.cpython-312.pyc | Bin 0 -> 11860 bytes .../utils/__pycache__/misc.cpython-312.pyc | Bin 0 -> 26246 bytes .../__pycache__/random_sequence.cpython-312.pyc | Bin 0 -> 5471 bytes .../networkx/utils/__pycache__/rcm.cpython-312.pyc | Bin 0 -> 5840 bytes .../utils/__pycache__/union_find.cpython-312.pyc | Bin 0 -> 4403 bytes .../site-packages/networkx/utils/backends.py | 2143 + .../site-packages/networkx/utils/configs.py | 391 + .../site-packages/networkx/utils/decorators.py | 1233 + .../site-packages/networkx/utils/heaps.py | 338 + .../site-packages/networkx/utils/mapped_queue.py | 297 + .../site-packages/networkx/utils/misc.py | 651 + .../networkx/utils/random_sequence.py | 164 + .../python3.12/site-packages/networkx/utils/rcm.py | 159 + .../site-packages/networkx/utils/tests/__init__.py | 0 .../tests/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 197 bytes .../tests/__pycache__/test__init.cpython-312.pyc | Bin 0 -> 984 bytes .../__pycache__/test_backends.cpython-312.pyc | Bin 0 -> 9985 bytes .../tests/__pycache__/test_config.cpython-312.pyc | Bin 0 -> 18071 bytes .../__pycache__/test_decorators.cpython-312.pyc | Bin 0 -> 31724 bytes .../tests/__pycache__/test_heaps.cpython-312.pyc | Bin 0 -> 5488 bytes .../__pycache__/test_mapped_queue.cpython-312.pyc | Bin 0 -> 14549 bytes .../tests/__pycache__/test_misc.cpython-312.pyc | Bin 0 -> 15920 bytes .../test_random_sequence.cpython-312.pyc | Bin 0 -> 1923 bytes .../tests/__pycache__/test_rcm.cpython-312.pyc | Bin 0 -> 2031 bytes .../__pycache__/test_unionfind.cpython-312.pyc | Bin 0 -> 2670 bytes .../networkx/utils/tests/test__init.py | 11 + .../networkx/utils/tests/test_backends.py | 187 + .../networkx/utils/tests/test_config.py | 263 + .../networkx/utils/tests/test_decorators.py | 510 + .../networkx/utils/tests/test_heaps.py | 131 + .../networkx/utils/tests/test_mapped_queue.py | 268 + .../networkx/utils/tests/test_misc.py | 268 + .../networkx/utils/tests/test_random_sequence.py | 38 + .../site-packages/networkx/utils/tests/test_rcm.py | 63 + .../networkx/utils/tests/test_unionfind.py | 55 + .../site-packages/networkx/utils/union_find.py | 106 + .../site-packages/numpy-2.3.2.dist-info/INSTALLER | 1 + .../numpy-2.3.2.dist-info/LICENSE.txt | 971 + .../site-packages/numpy-2.3.2.dist-info/METADATA | 1093 + .../site-packages/numpy-2.3.2.dist-info/RECORD | 1314 + .../site-packages/numpy-2.3.2.dist-info/REQUESTED | 0 .../site-packages/numpy-2.3.2.dist-info/WHEEL | 6 + .../numpy-2.3.2.dist-info/entry_points.txt | 13 + .../libgfortran-040039e1-0352e75f.so.5.0.0 | Bin 0 -> 2833617 bytes .../libquadmath-96973f99-934c22de.so.0.0.0 | Bin 0 -> 250985 bytes .../numpy.libs/libscipy_openblas64_-8fb3d286.so | Bin 0 -> 25050385 bytes .../python3.12/site-packages/numpy/__config__.py | 170 + .../python3.12/site-packages/numpy/__config__.pyi | 102 + .../site-packages/numpy/__init__.cython-30.pxd | 1241 + .../python3.12/site-packages/numpy/__init__.pxd | 1154 + .../lib/python3.12/site-packages/numpy/__init__.py | 928 + .../python3.12/site-packages/numpy/__init__.pyi | 5387 + .../numpy/__pycache__/__config__.cpython-312.pyc | Bin 0 -> 5102 bytes .../numpy/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 27561 bytes .../__pycache__/_array_api_info.cpython-312.pyc | Bin 0 -> 11098 bytes .../numpy/__pycache__/_configtool.cpython-312.pyc | Bin 0 -> 1671 bytes .../__pycache__/_distributor_init.cpython-312.pyc | Bin 0 -> 660 bytes .../__pycache__/_expired_attrs_2_0.cpython-312.pyc | Bin 0 -> 4296 bytes .../numpy/__pycache__/_globals.cpython-312.pyc | Bin 0 -> 3895 bytes .../__pycache__/_pytesttester.cpython-312.pyc | Bin 0 -> 6728 bytes .../numpy/__pycache__/conftest.cpython-312.pyc | Bin 0 -> 10515 bytes .../numpy/__pycache__/dtypes.cpython-312.pyc | Bin 0 -> 1540 bytes .../numpy/__pycache__/exceptions.cpython-312.pyc | Bin 0 -> 8735 bytes .../numpy/__pycache__/matlib.cpython-312.pyc | Bin 0 -> 11962 bytes .../numpy/__pycache__/version.cpython-312.pyc | Bin 0 -> 549 bytes .../site-packages/numpy/_array_api_info.py | 346 + .../site-packages/numpy/_array_api_info.pyi | 207 + .../python3.12/site-packages/numpy/_configtool.py | 39 + .../python3.12/site-packages/numpy/_configtool.pyi | 1 + .../site-packages/numpy/_core/__init__.py | 186 + .../site-packages/numpy/_core/__init__.pyi | 2 + .../_core/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 5672 bytes .../_core/__pycache__/_add_newdocs.cpython-312.pyc | Bin 0 -> 200245 bytes .../_add_newdocs_scalars.cpython-312.pyc | Bin 0 -> 13221 bytes .../_core/__pycache__/_asarray.cpython-312.pyc | Bin 0 -> 4282 bytes .../numpy/_core/__pycache__/_dtype.cpython-312.pyc | Bin 0 -> 13470 bytes .../__pycache__/_dtype_ctypes.cpython-312.pyc | Bin 0 -> 4844 bytes .../_core/__pycache__/_exceptions.cpython-312.pyc | Bin 0 -> 8291 bytes .../_core/__pycache__/_internal.cpython-312.pyc | Bin 0 -> 34878 bytes .../_core/__pycache__/_machar.cpython-312.pyc | Bin 0 -> 11687 bytes .../_core/__pycache__/_methods.cpython-312.pyc | Bin 0 -> 11439 bytes .../__pycache__/_string_helpers.cpython-312.pyc | Bin 0 -> 3292 bytes .../__pycache__/_type_aliases.cpython-312.pyc | Bin 0 -> 3758 bytes .../__pycache__/_ufunc_config.cpython-312.pyc | Bin 0 -> 16595 bytes .../_core/__pycache__/arrayprint.cpython-312.pyc | Bin 0 -> 72498 bytes .../_core/__pycache__/cversions.cpython-312.pyc | Bin 0 -> 632 bytes .../_core/__pycache__/defchararray.cpython-312.pyc | Bin 0 -> 42062 bytes .../_core/__pycache__/einsumfunc.cpython-312.pyc | Bin 0 -> 50073 bytes .../_core/__pycache__/fromnumeric.cpython-312.pyc | Bin 0 -> 150475 bytes .../__pycache__/function_base.cpython-312.pyc | Bin 0 -> 20803 bytes .../_core/__pycache__/getlimits.cpython-312.pyc | Bin 0 -> 28447 bytes .../numpy/_core/__pycache__/memmap.cpython-312.pyc | Bin 0 -> 13331 bytes .../_core/__pycache__/multiarray.cpython-312.pyc | Bin 0 -> 58659 bytes .../_core/__pycache__/numeric.cpython-312.pyc | Bin 0 -> 91193 bytes .../_core/__pycache__/numerictypes.cpython-312.pyc | Bin 0 -> 17689 bytes .../_core/__pycache__/overrides.cpython-312.pyc | Bin 0 -> 7902 bytes .../_core/__pycache__/printoptions.cpython-312.pyc | Bin 0 -> 941 bytes .../_core/__pycache__/records.cpython-312.pyc | Bin 0 -> 39474 bytes .../_core/__pycache__/shape_base.cpython-312.pyc | Bin 0 -> 34456 bytes .../_core/__pycache__/strings.cpython-312.pyc | Bin 0 -> 59926 bytes .../numpy/_core/__pycache__/umath.cpython-312.pyc | Bin 0 -> 2010 bytes .../site-packages/numpy/_core/_add_newdocs.py | 6967 ++ .../site-packages/numpy/_core/_add_newdocs.pyi | 3 + .../numpy/_core/_add_newdocs_scalars.py | 390 + .../numpy/_core/_add_newdocs_scalars.pyi | 16 + .../site-packages/numpy/_core/_asarray.py | 134 + .../site-packages/numpy/_core/_asarray.pyi | 41 + .../python3.12/site-packages/numpy/_core/_dtype.py | 366 + .../site-packages/numpy/_core/_dtype.pyi | 58 + .../site-packages/numpy/_core/_dtype_ctypes.py | 120 + .../site-packages/numpy/_core/_dtype_ctypes.pyi | 83 + .../site-packages/numpy/_core/_exceptions.py | 162 + .../site-packages/numpy/_core/_exceptions.pyi | 55 + .../site-packages/numpy/_core/_internal.py | 958 + .../site-packages/numpy/_core/_internal.pyi | 72 + .../site-packages/numpy/_core/_machar.py | 355 + .../site-packages/numpy/_core/_machar.pyi | 55 + .../site-packages/numpy/_core/_methods.py | 255 + .../site-packages/numpy/_core/_methods.pyi | 22 + ...ultiarray_tests.cpython-312-x86_64-linux-gnu.so | Bin 0 -> 141888 bytes ...ultiarray_umath.cpython-312-x86_64-linux-gnu.so | Bin 0 -> 10808937 bytes ...rand_flag_tests.cpython-312-x86_64-linux-gnu.so | Bin 0 -> 16800 bytes ..._rational_tests.cpython-312-x86_64-linux-gnu.so | Bin 0 -> 59592 bytes .../_core/_simd.cpython-312-x86_64-linux-gnu.so | Bin 0 -> 2882368 bytes .../python3.12/site-packages/numpy/_core/_simd.pyi | 25 + .../site-packages/numpy/_core/_string_helpers.py | 100 + .../site-packages/numpy/_core/_string_helpers.pyi | 12 + ...uct_ufunc_tests.cpython-312-x86_64-linux-gnu.so | Bin 0 -> 16936 bytes .../site-packages/numpy/_core/_type_aliases.py | 119 + .../site-packages/numpy/_core/_type_aliases.pyi | 97 + .../site-packages/numpy/_core/_ufunc_config.py | 489 + .../site-packages/numpy/_core/_ufunc_config.pyi | 32 + .../_umath_tests.cpython-312-x86_64-linux-gnu.so | Bin 0 -> 50312 bytes .../site-packages/numpy/_core/arrayprint.py | 1775 + .../site-packages/numpy/_core/arrayprint.pyi | 238 + .../site-packages/numpy/_core/cversions.py | 13 + .../site-packages/numpy/_core/defchararray.py | 1427 + .../site-packages/numpy/_core/defchararray.pyi | 1135 + .../site-packages/numpy/_core/einsumfunc.py | 1498 + .../site-packages/numpy/_core/einsumfunc.pyi | 184 + .../site-packages/numpy/_core/fromnumeric.py | 4269 + .../site-packages/numpy/_core/fromnumeric.pyi | 1750 + .../site-packages/numpy/_core/function_base.py | 545 + .../site-packages/numpy/_core/function_base.pyi | 278 + .../site-packages/numpy/_core/getlimits.py | 748 + .../site-packages/numpy/_core/getlimits.pyi | 3 + .../numpy/_core/include/numpy/__multiarray_api.c | 376 + .../numpy/_core/include/numpy/__multiarray_api.h | 1622 + .../numpy/_core/include/numpy/__ufunc_api.c | 54 + .../numpy/_core/include/numpy/__ufunc_api.h | 341 + .../include/numpy/_neighborhood_iterator_imp.h | 90 + .../numpy/_core/include/numpy/_numpyconfig.h | 33 + .../_core/include/numpy/_public_dtype_api_table.h | 86 + .../numpy/_core/include/numpy/arrayobject.h | 7 + .../numpy/_core/include/numpy/arrayscalars.h | 196 + .../numpy/_core/include/numpy/dtype_api.h | 480 + .../numpy/_core/include/numpy/halffloat.h | 70 + .../numpy/_core/include/numpy/ndarrayobject.h | 304 + .../numpy/_core/include/numpy/ndarraytypes.h | 1950 + .../numpy/_core/include/numpy/npy_2_compat.h | 249 + .../_core/include/numpy/npy_2_complexcompat.h | 28 + .../numpy/_core/include/numpy/npy_3kcompat.h | 374 + .../numpy/_core/include/numpy/npy_common.h | 977 + .../numpy/_core/include/numpy/npy_cpu.h | 124 + .../numpy/_core/include/numpy/npy_endian.h | 78 + .../numpy/_core/include/numpy/npy_math.h | 602 + .../_core/include/numpy/npy_no_deprecated_api.h | 20 + .../numpy/_core/include/numpy/npy_os.h | 42 + .../numpy/_core/include/numpy/numpyconfig.h | 182 + .../numpy/_core/include/numpy/random/LICENSE.txt | 21 + .../numpy/_core/include/numpy/random/bitgen.h | 20 + .../_core/include/numpy/random/distributions.h | 209 + .../numpy/_core/include/numpy/random/libdivide.h | 2079 + .../numpy/_core/include/numpy/ufuncobject.h | 343 + .../numpy/_core/include/numpy/utils.h | 37 + .../site-packages/numpy/_core/lib/libnpymath.a | Bin 0 -> 54312 bytes .../numpy/_core/lib/npy-pkg-config/mlib.ini | 12 + .../numpy/_core/lib/npy-pkg-config/npymath.ini | 20 + .../numpy/_core/lib/pkgconfig/numpy.pc | 7 + .../python3.12/site-packages/numpy/_core/memmap.py | 363 + .../site-packages/numpy/_core/memmap.pyi | 3 + .../site-packages/numpy/_core/multiarray.py | 1762 + .../site-packages/numpy/_core/multiarray.pyi | 1285 + .../site-packages/numpy/_core/numeric.py | 2760 + .../site-packages/numpy/_core/numeric.pyi | 882 + .../site-packages/numpy/_core/numerictypes.py | 633 + .../site-packages/numpy/_core/numerictypes.pyi | 192 + .../site-packages/numpy/_core/overrides.py | 183 + .../site-packages/numpy/_core/overrides.pyi | 48 + .../site-packages/numpy/_core/printoptions.py | 32 + .../site-packages/numpy/_core/printoptions.pyi | 28 + .../site-packages/numpy/_core/records.py | 1089 + .../site-packages/numpy/_core/records.pyi | 333 + .../site-packages/numpy/_core/shape_base.py | 998 + .../site-packages/numpy/_core/shape_base.pyi | 175 + .../site-packages/numpy/_core/strings.py | 1823 + .../site-packages/numpy/_core/strings.pyi | 511 + .../tests/__pycache__/_locales.cpython-312.pyc | Bin 0 -> 3576 bytes .../tests/__pycache__/_natype.cpython-312.pyc | Bin 0 -> 8145 bytes .../__pycache__/test__exceptions.cpython-312.pyc | Bin 0 -> 5481 bytes .../tests/__pycache__/test_abc.cpython-312.pyc | Bin 0 -> 4457 bytes .../tests/__pycache__/test_api.cpython-312.pyc | Bin 0 -> 39732 bytes .../__pycache__/test_argparse.cpython-312.pyc | Bin 0 -> 5156 bytes .../test_array_api_info.cpython-312.pyc | Bin 0 -> 6002 bytes .../test_array_coercion.cpython-312.pyc | Bin 0 -> 52255 bytes .../test_array_interface.cpython-312.pyc | Bin 0 -> 8236 bytes .../__pycache__/test_arraymethod.cpython-312.pyc | Bin 0 -> 5077 bytes .../__pycache__/test_arrayobject.cpython-312.pyc | Bin 0 -> 4185 bytes .../__pycache__/test_arrayprint.cpython-312.pyc | Bin 0 -> 74163 bytes ...st_casting_floatingpoint_errors.cpython-312.pyc | Bin 0 -> 9300 bytes .../test_casting_unittests.cpython-312.pyc | Bin 0 -> 39301 bytes .../test_conversion_utils.cpython-312.pyc | Bin 0 -> 12983 bytes .../test_cpu_dispatcher.cpython-312.pyc | Bin 0 -> 1550 bytes .../__pycache__/test_cpu_features.cpython-312.pyc | Bin 0 -> 20368 bytes .../__pycache__/test_custom_dtypes.cpython-312.pyc | Bin 0 -> 21319 bytes .../tests/__pycache__/test_cython.cpython-312.pyc | Bin 0 -> 18827 bytes .../__pycache__/test_datetime.cpython-312.pyc | Bin 0 -> 174850 bytes .../__pycache__/test_defchararray.cpython-312.pyc | Bin 0 -> 64382 bytes .../__pycache__/test_deprecations.cpython-312.pyc | Bin 0 -> 31575 bytes .../tests/__pycache__/test_dlpack.cpython-312.pyc | Bin 0 -> 12981 bytes .../tests/__pycache__/test_dtype.cpython-312.pyc | Bin 0 -> 122350 bytes .../tests/__pycache__/test_einsum.cpython-312.pyc | Bin 0 -> 78324 bytes .../__pycache__/test_errstate.cpython-312.pyc | Bin 0 -> 8694 bytes .../__pycache__/test_extint128.cpython-312.pyc | Bin 0 -> 10344 bytes .../__pycache__/test_function_base.cpython-312.pyc | Bin 0 -> 29345 bytes .../__pycache__/test_getlimits.cpython-312.pyc | Bin 0 -> 13842 bytes .../tests/__pycache__/test_half.cpython-312.pyc | Bin 0 -> 38233 bytes .../__pycache__/test_hashtable.cpython-312.pyc | Bin 0 -> 1766 bytes .../__pycache__/test_indexerrors.cpython-312.pyc | Bin 0 -> 12910 bytes .../__pycache__/test_indexing.cpython-312.pyc | Bin 0 -> 86408 bytes .../test_item_selection.cpython-312.pyc | Bin 0 -> 10641 bytes .../__pycache__/test_limited_api.cpython-312.pyc | Bin 0 -> 4733 bytes .../__pycache__/test_longdouble.cpython-312.pyc | Bin 0 -> 23323 bytes .../tests/__pycache__/test_machar.cpython-312.pyc | Bin 0 -> 1769 bytes .../__pycache__/test_mem_overlap.cpython-312.pyc | Bin 0 -> 49249 bytes .../__pycache__/test_mem_policy.cpython-312.pyc | Bin 0 -> 20028 bytes .../tests/__pycache__/test_memmap.cpython-312.pyc | Bin 0 -> 14628 bytes .../__pycache__/test_multiarray.cpython-312.pyc | Bin 0 -> 678288 bytes .../test_multithreading.cpython-312.pyc | Bin 0 -> 14978 bytes .../tests/__pycache__/test_nditer.cpython-312.pyc | Bin 0 -> 185737 bytes .../test_nep50_promotions.cpython-312.pyc | Bin 0 -> 17904 bytes .../tests/__pycache__/test_numeric.cpython-312.pyc | Bin 0 -> 280511 bytes .../__pycache__/test_numerictypes.cpython-312.pyc | Bin 0 -> 41326 bytes .../__pycache__/test_overrides.cpython-312.pyc | Bin 0 -> 52364 bytes .../tests/__pycache__/test_print.cpython-312.pyc | Bin 0 -> 11602 bytes .../__pycache__/test_protocols.cpython-312.pyc | Bin 0 -> 3258 bytes .../tests/__pycache__/test_records.cpython-312.pyc | Bin 0 -> 39462 bytes .../__pycache__/test_regression.cpython-312.pyc | Bin 0 -> 176141 bytes .../__pycache__/test_scalar_ctors.cpython-312.pyc | Bin 0 -> 13993 bytes .../test_scalar_methods.cpython-312.pyc | Bin 0 -> 17146 bytes .../__pycache__/test_scalarbuffer.cpython-312.pyc | Bin 0 -> 9471 bytes .../__pycache__/test_scalarinherit.cpython-312.pyc | Bin 0 -> 6196 bytes .../__pycache__/test_scalarmath.cpython-312.pyc | Bin 0 -> 73753 bytes .../__pycache__/test_scalarprint.cpython-312.pyc | Bin 0 -> 21426 bytes .../__pycache__/test_shape_base.cpython-312.pyc | Bin 0 -> 50651 bytes .../tests/__pycache__/test_simd.cpython-312.pyc | Bin 0 -> 70235 bytes .../__pycache__/test_simd_module.cpython-312.pyc | Bin 0 -> 7046 bytes .../__pycache__/test_stringdtype.cpython-312.pyc | Bin 0 -> 94845 bytes .../tests/__pycache__/test_strings.cpython-312.pyc | Bin 0 -> 85552 bytes .../tests/__pycache__/test_ufunc.cpython-312.pyc | Bin 0 -> 222129 bytes .../tests/__pycache__/test_umath.cpython-312.pyc | Bin 0 -> 342735 bytes .../test_umath_accuracy.cpython-312.pyc | Bin 0 -> 8685 bytes .../__pycache__/test_umath_complex.cpython-312.pyc | Bin 0 -> 42942 bytes .../tests/__pycache__/test_unicode.cpython-312.pyc | Bin 0 -> 18959 bytes .../site-packages/numpy/_core/tests/_locales.py | 72 + .../site-packages/numpy/_core/tests/_natype.py | 205 + .../numpy/_core/tests/data/astype_copy.pkl | Bin 0 -> 716 bytes .../tests/data/generate_umath_validation_data.cpp | 170 + .../numpy/_core/tests/data/recarray_from_file.fits | Bin 0 -> 8640 bytes .../tests/data/umath-validation-set-README.txt | 15 + .../tests/data/umath-validation-set-arccos.csv | 1429 + .../tests/data/umath-validation-set-arccosh.csv | 1429 + .../tests/data/umath-validation-set-arcsin.csv | 1429 + .../tests/data/umath-validation-set-arcsinh.csv | 1429 + .../tests/data/umath-validation-set-arctan.csv | 1429 + .../tests/data/umath-validation-set-arctanh.csv | 1429 + .../_core/tests/data/umath-validation-set-cbrt.csv | 1429 + .../_core/tests/data/umath-validation-set-cos.csv | 1375 + .../_core/tests/data/umath-validation-set-cosh.csv | 1429 + .../_core/tests/data/umath-validation-set-exp.csv | 412 + .../_core/tests/data/umath-validation-set-exp2.csv | 1429 + .../tests/data/umath-validation-set-expm1.csv | 1429 + .../_core/tests/data/umath-validation-set-log.csv | 271 + .../tests/data/umath-validation-set-log10.csv | 1629 + .../tests/data/umath-validation-set-log1p.csv | 1429 + .../_core/tests/data/umath-validation-set-log2.csv | 1629 + .../_core/tests/data/umath-validation-set-sin.csv | 1370 + .../_core/tests/data/umath-validation-set-sinh.csv | 1429 + .../_core/tests/data/umath-validation-set-tan.csv | 1429 + .../_core/tests/data/umath-validation-set-tanh.csv | 1429 + .../cython/__pycache__/setup.cpython-312.pyc | Bin 0 -> 1273 bytes .../numpy/_core/tests/examples/cython/checks.pyx | 373 + .../numpy/_core/tests/examples/cython/meson.build | 43 + .../numpy/_core/tests/examples/cython/setup.py | 39 + .../limited_api/__pycache__/setup.cpython-312.pyc | Bin 0 -> 813 bytes .../tests/examples/limited_api/limited_api1.c | 17 + .../tests/examples/limited_api/limited_api2.pyx | 11 + .../examples/limited_api/limited_api_latest.c | 19 + .../_core/tests/examples/limited_api/meson.build | 59 + .../_core/tests/examples/limited_api/setup.py | 24 + .../numpy/_core/tests/test__exceptions.py | 90 + .../site-packages/numpy/_core/tests/test_abc.py | 54 + .../site-packages/numpy/_core/tests/test_api.py | 621 + .../numpy/_core/tests/test_argparse.py | 92 + .../numpy/_core/tests/test_array_api_info.py | 113 + .../numpy/_core/tests/test_array_coercion.py | 911 + .../numpy/_core/tests/test_array_interface.py | 222 + .../numpy/_core/tests/test_arraymethod.py | 84 + .../numpy/_core/tests/test_arrayobject.py | 75 + .../numpy/_core/tests/test_arrayprint.py | 1328 + .../tests/test_casting_floatingpoint_errors.py | 154 + .../numpy/_core/tests/test_casting_unittests.py | 817 + .../numpy/_core/tests/test_conversion_utils.py | 206 + .../numpy/_core/tests/test_cpu_dispatcher.py | 49 + .../numpy/_core/tests/test_cpu_features.py | 432 + .../numpy/_core/tests/test_custom_dtypes.py | 315 + .../site-packages/numpy/_core/tests/test_cython.py | 351 + .../numpy/_core/tests/test_datetime.py | 2710 + .../numpy/_core/tests/test_defchararray.py | 825 + .../numpy/_core/tests/test_deprecations.py | 454 + .../site-packages/numpy/_core/tests/test_dlpack.py | 190 + .../site-packages/numpy/_core/tests/test_dtype.py | 1995 + .../site-packages/numpy/_core/tests/test_einsum.py | 1317 + .../numpy/_core/tests/test_errstate.py | 131 + .../numpy/_core/tests/test_extint128.py | 217 + .../numpy/_core/tests/test_function_base.py | 503 + .../numpy/_core/tests/test_getlimits.py | 205 + .../site-packages/numpy/_core/tests/test_half.py | 568 + .../numpy/_core/tests/test_hashtable.py | 35 + .../numpy/_core/tests/test_indexerrors.py | 125 + .../numpy/_core/tests/test_indexing.py | 1455 + .../numpy/_core/tests/test_item_selection.py | 167 + .../numpy/_core/tests/test_limited_api.py | 102 + .../numpy/_core/tests/test_longdouble.py | 369 + .../site-packages/numpy/_core/tests/test_machar.py | 30 + .../numpy/_core/tests/test_mem_overlap.py | 930 + .../numpy/_core/tests/test_mem_policy.py | 452 + .../site-packages/numpy/_core/tests/test_memmap.py | 246 + .../numpy/_core/tests/test_multiarray.py | 10563 ++ .../numpy/_core/tests/test_multithreading.py | 292 + .../site-packages/numpy/_core/tests/test_nditer.py | 3498 + .../numpy/_core/tests/test_nep50_promotions.py | 287 + .../numpy/_core/tests/test_numeric.py | 4247 + .../numpy/_core/tests/test_numerictypes.py | 622 + .../numpy/_core/tests/test_overrides.py | 791 + .../site-packages/numpy/_core/tests/test_print.py | 200 + .../numpy/_core/tests/test_protocols.py | 46 + .../numpy/_core/tests/test_records.py | 544 + .../numpy/_core/tests/test_regression.py | 2670 + .../numpy/_core/tests/test_scalar_ctors.py | 207 + .../numpy/_core/tests/test_scalar_methods.py | 246 + .../numpy/_core/tests/test_scalarbuffer.py | 153 + .../numpy/_core/tests/test_scalarinherit.py | 105 + .../numpy/_core/tests/test_scalarmath.py | 1176 + .../numpy/_core/tests/test_scalarprint.py | 403 + .../numpy/_core/tests/test_shape_base.py | 891 + .../site-packages/numpy/_core/tests/test_simd.py | 1341 + .../numpy/_core/tests/test_simd_module.py | 103 + .../numpy/_core/tests/test_stringdtype.py | 1807 + .../numpy/_core/tests/test_strings.py | 1454 + .../site-packages/numpy/_core/tests/test_ufunc.py | 3313 + .../site-packages/numpy/_core/tests/test_umath.py | 4916 + .../numpy/_core/tests/test_umath_accuracy.py | 124 + .../numpy/_core/tests/test_umath_complex.py | 626 + .../numpy/_core/tests/test_unicode.py | 368 + .../python3.12/site-packages/numpy/_core/umath.py | 60 + .../python3.12/site-packages/numpy/_core/umath.pyi | 197 + .../site-packages/numpy/_distributor_init.py | 15 + .../site-packages/numpy/_distributor_init.pyi | 1 + .../site-packages/numpy/_expired_attrs_2_0.py | 79 + .../site-packages/numpy/_expired_attrs_2_0.pyi | 62 + .../lib/python3.12/site-packages/numpy/_globals.py | 96 + .../python3.12/site-packages/numpy/_globals.pyi | 17 + .../site-packages/numpy/_pyinstaller/__init__.py | 0 .../site-packages/numpy/_pyinstaller/__init__.pyi | 0 .../__pycache__/__init__.cpython-312.pyc | Bin 0 -> 195 bytes .../__pycache__/hook-numpy.cpython-312.pyc | Bin 0 -> 938 bytes .../site-packages/numpy/_pyinstaller/hook-numpy.py | 36 + .../numpy/_pyinstaller/hook-numpy.pyi | 13 + .../numpy/_pyinstaller/tests/__init__.py | 16 + .../tests/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 568 bytes .../__pycache__/pyinstaller-smoke.cpython-312.pyc | Bin 0 -> 2613 bytes .../__pycache__/test_pyinstaller.cpython-312.pyc | Bin 0 -> 1963 bytes .../numpy/_pyinstaller/tests/pyinstaller-smoke.py | 32 + .../numpy/_pyinstaller/tests/test_pyinstaller.py | 35 + .../site-packages/numpy/_pytesttester.py | 201 + .../site-packages/numpy/_pytesttester.pyi | 18 + .../site-packages/numpy/_typing/__init__.py | 148 + .../_typing/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 5113 bytes .../__pycache__/_add_docstring.cpython-312.pyc | Bin 0 -> 4800 bytes .../__pycache__/_array_like.cpython-312.pyc | Bin 0 -> 5802 bytes .../__pycache__/_char_codes.cpython-312.pyc | Bin 0 -> 7075 bytes .../__pycache__/_dtype_like.cpython-312.pyc | Bin 0 -> 3919 bytes .../_extended_precision.cpython-312.pyc | Bin 0 -> 784 bytes .../_typing/__pycache__/_nbit.cpython-312.pyc | Bin 0 -> 966 bytes .../_typing/__pycache__/_nbit_base.cpython-312.pyc | Bin 0 -> 3876 bytes .../__pycache__/_nested_sequence.cpython-312.pyc | Bin 0 -> 3411 bytes .../_typing/__pycache__/_scalars.cpython-312.pyc | Bin 0 -> 1426 bytes .../_typing/__pycache__/_shape.cpython-312.pyc | Bin 0 -> 531 bytes .../_typing/__pycache__/_ufunc.cpython-312.pyc | Bin 0 -> 372 bytes .../site-packages/numpy/_typing/_add_docstring.py | 153 + .../site-packages/numpy/_typing/_array_like.py | 106 + .../site-packages/numpy/_typing/_callable.pyi | 366 + .../site-packages/numpy/_typing/_char_codes.py | 213 + .../site-packages/numpy/_typing/_dtype_like.py | 114 + .../numpy/_typing/_extended_precision.py | 15 + .../site-packages/numpy/_typing/_nbit.py | 19 + .../site-packages/numpy/_typing/_nbit_base.py | 94 + .../site-packages/numpy/_typing/_nbit_base.pyi | 40 + .../numpy/_typing/_nested_sequence.py | 79 + .../site-packages/numpy/_typing/_scalars.py | 20 + .../site-packages/numpy/_typing/_shape.py | 8 + .../site-packages/numpy/_typing/_ufunc.py | 7 + .../site-packages/numpy/_typing/_ufunc.pyi | 941 + .../site-packages/numpy/_utils/__init__.py | 95 + .../site-packages/numpy/_utils/__init__.pyi | 30 + .../_utils/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 4182 bytes .../__pycache__/_convertions.cpython-312.pyc | Bin 0 -> 836 bytes .../_utils/__pycache__/_inspect.cpython-312.pyc | Bin 0 -> 9433 bytes .../_utils/__pycache__/_pep440.cpython-312.pyc | Bin 0 -> 18748 bytes .../site-packages/numpy/_utils/_convertions.py | 18 + .../site-packages/numpy/_utils/_convertions.pyi | 4 + .../site-packages/numpy/_utils/_inspect.py | 192 + .../site-packages/numpy/_utils/_inspect.pyi | 71 + .../site-packages/numpy/_utils/_pep440.py | 486 + .../site-packages/numpy/_utils/_pep440.pyi | 121 + .../site-packages/numpy/char/__init__.py | 2 + .../site-packages/numpy/char/__init__.pyi | 111 + .../char/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 285 bytes .../lib/python3.12/site-packages/numpy/conftest.py | 258 + .../site-packages/numpy/core/__init__.py | 33 + .../site-packages/numpy/core/__init__.pyi | 0 .../core/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 1163 bytes .../numpy/core/__pycache__/_dtype.cpython-312.pyc | Bin 0 -> 621 bytes .../core/__pycache__/_dtype_ctypes.cpython-312.pyc | Bin 0 -> 642 bytes .../core/__pycache__/_internal.cpython-312.pyc | Bin 0 -> 961 bytes .../__pycache__/_multiarray_umath.cpython-312.pyc | Bin 0 -> 2125 bytes .../numpy/core/__pycache__/_utils.cpython-312.pyc | Bin 0 -> 1175 bytes .../core/__pycache__/arrayprint.cpython-312.pyc | Bin 0 -> 633 bytes .../core/__pycache__/defchararray.cpython-312.pyc | Bin 0 -> 639 bytes .../core/__pycache__/einsumfunc.cpython-312.pyc | Bin 0 -> 633 bytes .../core/__pycache__/fromnumeric.cpython-312.pyc | Bin 0 -> 636 bytes .../core/__pycache__/function_base.cpython-312.pyc | Bin 0 -> 642 bytes .../core/__pycache__/getlimits.cpython-312.pyc | Bin 0 -> 630 bytes .../core/__pycache__/multiarray.cpython-312.pyc | Bin 0 -> 851 bytes .../numpy/core/__pycache__/numeric.cpython-312.pyc | Bin 0 -> 676 bytes .../core/__pycache__/numerictypes.cpython-312.pyc | Bin 0 -> 639 bytes .../core/__pycache__/overrides.cpython-312.pyc | Bin 0 -> 630 bytes .../numpy/core/__pycache__/records.cpython-312.pyc | Bin 0 -> 624 bytes .../core/__pycache__/shape_base.cpython-312.pyc | Bin 0 -> 633 bytes .../numpy/core/__pycache__/umath.cpython-312.pyc | Bin 0 -> 618 bytes .../python3.12/site-packages/numpy/core/_dtype.py | 10 + .../python3.12/site-packages/numpy/core/_dtype.pyi | 0 .../site-packages/numpy/core/_dtype_ctypes.py | 10 + .../site-packages/numpy/core/_dtype_ctypes.pyi | 0 .../site-packages/numpy/core/_internal.py | 27 + .../site-packages/numpy/core/_multiarray_umath.py | 57 + .../python3.12/site-packages/numpy/core/_utils.py | 21 + .../site-packages/numpy/core/arrayprint.py | 10 + .../site-packages/numpy/core/defchararray.py | 10 + .../site-packages/numpy/core/einsumfunc.py | 10 + .../site-packages/numpy/core/fromnumeric.py | 10 + .../site-packages/numpy/core/function_base.py | 10 + .../site-packages/numpy/core/getlimits.py | 10 + .../site-packages/numpy/core/multiarray.py | 25 + .../python3.12/site-packages/numpy/core/numeric.py | 12 + .../site-packages/numpy/core/numerictypes.py | 10 + .../site-packages/numpy/core/overrides.py | 10 + .../site-packages/numpy/core/overrides.pyi | 7 + .../python3.12/site-packages/numpy/core/records.py | 10 + .../site-packages/numpy/core/shape_base.py | 10 + .../python3.12/site-packages/numpy/core/umath.py | 10 + .../site-packages/numpy/ctypeslib/__init__.py | 13 + .../site-packages/numpy/ctypeslib/__init__.pyi | 33 + .../ctypeslib/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 456 bytes .../__pycache__/_ctypeslib.cpython-312.pyc | Bin 0 -> 22404 bytes .../site-packages/numpy/ctypeslib/_ctypeslib.py | 603 + .../site-packages/numpy/ctypeslib/_ctypeslib.pyi | 245 + .../numpy/doc/__pycache__/ufuncs.cpython-312.pyc | Bin 0 -> 5610 bytes .../python3.12/site-packages/numpy/doc/ufuncs.py | 138 + .venv/lib/python3.12/site-packages/numpy/dtypes.py | 41 + .../lib/python3.12/site-packages/numpy/dtypes.pyi | 631 + .../python3.12/site-packages/numpy/exceptions.py | 247 + .../python3.12/site-packages/numpy/exceptions.pyi | 25 + .../site-packages/numpy/f2py/__init__.py | 86 + .../site-packages/numpy/f2py/__init__.pyi | 6 + .../site-packages/numpy/f2py/__main__.py | 5 + .../f2py/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 3134 bytes .../f2py/__pycache__/__main__.cpython-312.pyc | Bin 0 -> 257 bytes .../f2py/__pycache__/__version__.cpython-312.pyc | Bin 0 -> 238 bytes .../f2py/__pycache__/_isocbind.cpython-312.pyc | Bin 0 -> 1904 bytes .../f2py/__pycache__/_src_pyf.cpython-312.pyc | Bin 0 -> 9034 bytes .../f2py/__pycache__/auxfuncs.cpython-312.pyc | Bin 0 -> 39134 bytes .../f2py/__pycache__/capi_maps.cpython-312.pyc | Bin 0 -> 31112 bytes .../f2py/__pycache__/cb_rules.cpython-312.pyc | Bin 0 -> 22505 bytes .../numpy/f2py/__pycache__/cfuncs.cpython-312.pyc | Bin 0 -> 49018 bytes .../f2py/__pycache__/common_rules.cpython-312.pyc | Bin 0 -> 7404 bytes .../f2py/__pycache__/crackfortran.cpython-312.pyc | Bin 0 -> 150449 bytes .../f2py/__pycache__/diagnose.cpython-312.pyc | Bin 0 -> 6594 bytes .../numpy/f2py/__pycache__/f2py2e.cpython-312.pyc | Bin 0 -> 34136 bytes .../f2py/__pycache__/f90mod_rules.cpython-312.pyc | Bin 0 -> 12356 bytes .../f2py/__pycache__/func2subr.cpython-312.pyc | Bin 0 -> 11598 bytes .../numpy/f2py/__pycache__/rules.cpython-312.pyc | Bin 0 -> 52771 bytes .../f2py/__pycache__/symbolic.cpython-312.pyc | Bin 0 -> 79042 bytes .../f2py/__pycache__/use_rules.cpython-312.pyc | Bin 0 -> 4266 bytes .../site-packages/numpy/f2py/__version__.py | 1 + .../site-packages/numpy/f2py/__version__.pyi | 1 + .../site-packages/numpy/f2py/_backends/__init__.py | 9 + .../numpy/f2py/_backends/__init__.pyi | 5 + .../_backends/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 569 bytes .../_backends/__pycache__/_backend.cpython-312.pyc | Bin 0 -> 1376 bytes .../__pycache__/_distutils.cpython-312.pyc | Bin 0 -> 3560 bytes .../_backends/__pycache__/_meson.cpython-312.pyc | Bin 0 -> 13395 bytes .../site-packages/numpy/f2py/_backends/_backend.py | 44 + .../numpy/f2py/_backends/_backend.pyi | 46 + .../numpy/f2py/_backends/_distutils.py | 76 + .../numpy/f2py/_backends/_distutils.pyi | 13 + .../site-packages/numpy/f2py/_backends/_meson.py | 231 + .../site-packages/numpy/f2py/_backends/_meson.pyi | 63 + .../numpy/f2py/_backends/meson.build.template | 55 + .../site-packages/numpy/f2py/_isocbind.py | 62 + .../site-packages/numpy/f2py/_isocbind.pyi | 13 + .../site-packages/numpy/f2py/_src_pyf.py | 247 + .../site-packages/numpy/f2py/_src_pyf.pyi | 29 + .../site-packages/numpy/f2py/auxfuncs.py | 1004 + .../site-packages/numpy/f2py/auxfuncs.pyi | 264 + .../site-packages/numpy/f2py/capi_maps.py | 811 + .../site-packages/numpy/f2py/capi_maps.pyi | 33 + .../site-packages/numpy/f2py/cb_rules.py | 665 + .../site-packages/numpy/f2py/cb_rules.pyi | 17 + .../python3.12/site-packages/numpy/f2py/cfuncs.py | 1563 + .../python3.12/site-packages/numpy/f2py/cfuncs.pyi | 31 + .../site-packages/numpy/f2py/common_rules.py | 143 + .../site-packages/numpy/f2py/common_rules.pyi | 9 + .../site-packages/numpy/f2py/crackfortran.py | 3725 + .../site-packages/numpy/f2py/crackfortran.pyi | 258 + .../site-packages/numpy/f2py/diagnose.py | 149 + .../site-packages/numpy/f2py/diagnose.pyi | 1 + .../python3.12/site-packages/numpy/f2py/f2py2e.py | 786 + .../python3.12/site-packages/numpy/f2py/f2py2e.pyi | 76 + .../site-packages/numpy/f2py/f90mod_rules.py | 269 + .../site-packages/numpy/f2py/f90mod_rules.pyi | 16 + .../site-packages/numpy/f2py/func2subr.py | 329 + .../site-packages/numpy/f2py/func2subr.pyi | 7 + .../python3.12/site-packages/numpy/f2py/rules.py | 1629 + .../python3.12/site-packages/numpy/f2py/rules.pyi | 43 + .../python3.12/site-packages/numpy/f2py/setup.cfg | 3 + .../site-packages/numpy/f2py/src/fortranobject.c | 1436 + .../site-packages/numpy/f2py/src/fortranobject.h | 173 + .../site-packages/numpy/f2py/symbolic.py | 1516 + .../site-packages/numpy/f2py/symbolic.pyi | 221 + .../site-packages/numpy/f2py/tests/__init__.py | 16 + .../tests/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 560 bytes .../test_abstract_interface.cpython-312.pyc | Bin 0 -> 1935 bytes .../test_array_from_pyobj.cpython-312.pyc | Bin 0 -> 42830 bytes .../__pycache__/test_assumed_shape.cpython-312.pyc | Bin 0 -> 3327 bytes .../test_block_docstring.cpython-312.pyc | Bin 0 -> 1468 bytes .../__pycache__/test_callback.cpython-312.pyc | Bin 0 -> 14755 bytes .../__pycache__/test_character.cpython-312.pyc | Bin 0 -> 35665 bytes .../tests/__pycache__/test_common.cpython-312.pyc | Bin 0 -> 2114 bytes .../__pycache__/test_crackfortran.cpython-312.pyc | Bin 0 -> 25614 bytes .../tests/__pycache__/test_data.cpython-312.pyc | Bin 0 -> 6759 bytes .../tests/__pycache__/test_docs.cpython-312.pyc | Bin 0 -> 3551 bytes .../tests/__pycache__/test_f2cmap.cpython-312.pyc | Bin 0 -> 1018 bytes .../tests/__pycache__/test_f2py2e.cpython-312.pyc | Bin 0 -> 43300 bytes .../tests/__pycache__/test_isoc.cpython-312.pyc | Bin 0 -> 2737 bytes .../tests/__pycache__/test_kind.cpython-312.pyc | Bin 0 -> 3061 bytes .../tests/__pycache__/test_mixed.cpython-312.pyc | Bin 0 -> 2012 bytes .../tests/__pycache__/test_modules.cpython-312.pyc | Bin 0 -> 4612 bytes .../__pycache__/test_parameter.cpython-312.pyc | Bin 0 -> 9268 bytes .../tests/__pycache__/test_pyf_src.cpython-312.pyc | Bin 0 -> 1550 bytes .../test_quoted_character.cpython-312.pyc | Bin 0 -> 1301 bytes .../__pycache__/test_regression.cpython-312.pyc | Bin 0 -> 11898 bytes .../test_return_character.cpython-312.pyc | Bin 0 -> 3042 bytes .../test_return_complex.cpython-312.pyc | Bin 0 -> 4797 bytes .../test_return_integer.cpython-312.pyc | Bin 0 -> 3470 bytes .../test_return_logical.cpython-312.pyc | Bin 0 -> 4412 bytes .../__pycache__/test_return_real.cpython-312.pyc | Bin 0 -> 5920 bytes .../__pycache__/test_routines.cpython-312.pyc | Bin 0 -> 1987 bytes .../test_semicolon_split.cpython-312.pyc | Bin 0 -> 2764 bytes .../tests/__pycache__/test_size.cpython-312.pyc | Bin 0 -> 3149 bytes .../tests/__pycache__/test_string.cpython-312.pyc | Bin 0 -> 5723 bytes .../__pycache__/test_symbolic.cpython-312.pyc | Bin 0 -> 32197 bytes .../test_value_attrspec.cpython-312.pyc | Bin 0 -> 1030 bytes .../f2py/tests/__pycache__/util.cpython-312.pyc | Bin 0 -> 18149 bytes .../f2py/tests/src/abstract_interface/foo.f90 | 34 + .../tests/src/abstract_interface/gh18403_mod.f90 | 6 + .../f2py/tests/src/array_from_pyobj/wrapmodule.c | 235 + .../f2py/tests/src/assumed_shape/.f2py_f2cmap | 1 + .../f2py/tests/src/assumed_shape/foo_free.f90 | 34 + .../numpy/f2py/tests/src/assumed_shape/foo_mod.f90 | 41 + .../numpy/f2py/tests/src/assumed_shape/foo_use.f90 | 19 + .../f2py/tests/src/assumed_shape/precision.f90 | 4 + .../numpy/f2py/tests/src/block_docstring/foo.f | 6 + .../numpy/f2py/tests/src/callback/foo.f | 62 + .../numpy/f2py/tests/src/callback/gh17797.f90 | 7 + .../numpy/f2py/tests/src/callback/gh18335.f90 | 17 + .../numpy/f2py/tests/src/callback/gh25211.f | 10 + .../numpy/f2py/tests/src/callback/gh25211.pyf | 18 + .../numpy/f2py/tests/src/callback/gh26681.f90 | 18 + .../numpy/f2py/tests/src/cli/gh_22819.pyf | 6 + .../site-packages/numpy/f2py/tests/src/cli/hi77.f | 3 + .../numpy/f2py/tests/src/cli/hiworld.f90 | 3 + .../numpy/f2py/tests/src/common/block.f | 11 + .../numpy/f2py/tests/src/common/gh19161.f90 | 10 + .../f2py/tests/src/crackfortran/accesstype.f90 | 13 + .../tests/src/crackfortran/common_with_division.f | 17 + .../f2py/tests/src/crackfortran/data_common.f | 8 + .../f2py/tests/src/crackfortran/data_multiplier.f | 5 + .../f2py/tests/src/crackfortran/data_stmts.f90 | 20 + .../tests/src/crackfortran/data_with_comments.f | 8 + .../numpy/f2py/tests/src/crackfortran/foo_deps.f90 | 6 + .../numpy/f2py/tests/src/crackfortran/gh15035.f | 16 + .../numpy/f2py/tests/src/crackfortran/gh17859.f | 12 + .../numpy/f2py/tests/src/crackfortran/gh22648.pyf | 7 + .../numpy/f2py/tests/src/crackfortran/gh23533.f | 5 + .../numpy/f2py/tests/src/crackfortran/gh23598.f90 | 4 + .../f2py/tests/src/crackfortran/gh23598Warn.f90 | 11 + .../numpy/f2py/tests/src/crackfortran/gh23879.f90 | 20 + .../numpy/f2py/tests/src/crackfortran/gh27697.f90 | 12 + .../numpy/f2py/tests/src/crackfortran/gh2848.f90 | 13 + .../f2py/tests/src/crackfortran/operators.f90 | 49 + .../f2py/tests/src/crackfortran/privatemod.f90 | 11 + .../f2py/tests/src/crackfortran/publicmod.f90 | 10 + .../f2py/tests/src/crackfortran/pubprivmod.f90 | 10 + .../tests/src/crackfortran/unicode_comment.f90 | 4 + .../numpy/f2py/tests/src/f2cmap/.f2py_f2cmap | 1 + .../f2py/tests/src/f2cmap/isoFortranEnvMap.f90 | 9 + .../numpy/f2py/tests/src/isocintrin/isoCtests.f90 | 34 + .../numpy/f2py/tests/src/kind/foo.f90 | 20 + .../site-packages/numpy/f2py/tests/src/mixed/foo.f | 5 + .../numpy/f2py/tests/src/mixed/foo_fixed.f90 | 8 + .../numpy/f2py/tests/src/mixed/foo_free.f90 | 8 + .../numpy/f2py/tests/src/modules/gh25337/data.f90 | 8 + .../f2py/tests/src/modules/gh25337/use_data.f90 | 6 + .../gh26920/two_mods_with_no_public_entities.f90 | 21 + .../gh26920/two_mods_with_one_public_routine.f90 | 21 + .../tests/src/modules/module_data_docstring.f90 | 12 + .../numpy/f2py/tests/src/modules/use_modules.f90 | 20 + .../f2py/tests/src/negative_bounds/issue_20853.f90 | 7 + .../f2py/tests/src/parameter/constant_array.f90 | 45 + .../f2py/tests/src/parameter/constant_both.f90 | 57 + .../f2py/tests/src/parameter/constant_compound.f90 | 15 + .../f2py/tests/src/parameter/constant_integer.f90 | 22 + .../tests/src/parameter/constant_non_compound.f90 | 23 + .../f2py/tests/src/parameter/constant_real.f90 | 23 + .../numpy/f2py/tests/src/quoted_character/foo.f | 14 + .../numpy/f2py/tests/src/regression/AB.inc | 1 + .../f2py/tests/src/regression/assignOnlyModule.f90 | 25 + .../numpy/f2py/tests/src/regression/datonly.f90 | 17 + .../numpy/f2py/tests/src/regression/f77comments.f | 26 + .../f2py/tests/src/regression/f77fixedform.f95 | 5 + .../f2py/tests/src/regression/f90continuation.f90 | 9 + .../numpy/f2py/tests/src/regression/incfile.f90 | 5 + .../numpy/f2py/tests/src/regression/inout.f90 | 9 + .../tests/src/regression/lower_f2py_fortran.f90 | 5 + .../tests/src/regression/mod_derived_types.f90 | 23 + .../numpy/f2py/tests/src/return_character/foo77.f | 45 + .../f2py/tests/src/return_character/foo90.f90 | 48 + .../numpy/f2py/tests/src/return_complex/foo77.f | 45 + .../numpy/f2py/tests/src/return_complex/foo90.f90 | 48 + .../numpy/f2py/tests/src/return_integer/foo77.f | 56 + .../numpy/f2py/tests/src/return_integer/foo90.f90 | 59 + .../numpy/f2py/tests/src/return_logical/foo77.f | 56 + .../numpy/f2py/tests/src/return_logical/foo90.f90 | 59 + .../numpy/f2py/tests/src/return_real/foo77.f | 45 + .../numpy/f2py/tests/src/return_real/foo90.f90 | 48 + .../f2py/tests/src/routines/funcfortranname.f | 5 + .../f2py/tests/src/routines/funcfortranname.pyf | 11 + .../numpy/f2py/tests/src/routines/subrout.f | 4 + .../numpy/f2py/tests/src/routines/subrout.pyf | 10 + .../numpy/f2py/tests/src/size/foo.f90 | 44 + .../numpy/f2py/tests/src/string/char.f90 | 29 + .../numpy/f2py/tests/src/string/fixed_string.f90 | 34 + .../numpy/f2py/tests/src/string/gh24008.f | 8 + .../numpy/f2py/tests/src/string/gh24662.f90 | 7 + .../numpy/f2py/tests/src/string/gh25286.f90 | 14 + .../numpy/f2py/tests/src/string/gh25286.pyf | 12 + .../numpy/f2py/tests/src/string/gh25286_bc.pyf | 12 + .../numpy/f2py/tests/src/string/scalar_string.f90 | 9 + .../numpy/f2py/tests/src/string/string.f | 12 + .../f2py/tests/src/value_attrspec/gh21665.f90 | 9 + .../numpy/f2py/tests/test_abstract_interface.py | 26 + .../numpy/f2py/tests/test_array_from_pyobj.py | 678 + .../numpy/f2py/tests/test_assumed_shape.py | 50 + .../numpy/f2py/tests/test_block_docstring.py | 20 + .../numpy/f2py/tests/test_callback.py | 263 + .../numpy/f2py/tests/test_character.py | 641 + .../site-packages/numpy/f2py/tests/test_common.py | 23 + .../numpy/f2py/tests/test_crackfortran.py | 421 + .../site-packages/numpy/f2py/tests/test_data.py | 71 + .../site-packages/numpy/f2py/tests/test_docs.py | 64 + .../site-packages/numpy/f2py/tests/test_f2cmap.py | 17 + .../site-packages/numpy/f2py/tests/test_f2py2e.py | 964 + .../site-packages/numpy/f2py/tests/test_isoc.py | 56 + .../site-packages/numpy/f2py/tests/test_kind.py | 53 + .../site-packages/numpy/f2py/tests/test_mixed.py | 35 + .../site-packages/numpy/f2py/tests/test_modules.py | 83 + .../numpy/f2py/tests/test_parameter.py | 129 + .../site-packages/numpy/f2py/tests/test_pyf_src.py | 43 + .../numpy/f2py/tests/test_quoted_character.py | 18 + .../numpy/f2py/tests/test_regression.py | 187 + .../numpy/f2py/tests/test_return_character.py | 48 + .../numpy/f2py/tests/test_return_complex.py | 67 + .../numpy/f2py/tests/test_return_integer.py | 55 + .../numpy/f2py/tests/test_return_logical.py | 65 + .../numpy/f2py/tests/test_return_real.py | 109 + .../numpy/f2py/tests/test_routines.py | 29 + .../numpy/f2py/tests/test_semicolon_split.py | 75 + .../site-packages/numpy/f2py/tests/test_size.py | 45 + .../site-packages/numpy/f2py/tests/test_string.py | 100 + .../numpy/f2py/tests/test_symbolic.py | 495 + .../numpy/f2py/tests/test_value_attrspec.py | 15 + .../site-packages/numpy/f2py/tests/util.py | 442 + .../site-packages/numpy/f2py/use_rules.py | 99 + .../site-packages/numpy/f2py/use_rules.pyi | 9 + .../python3.12/site-packages/numpy/fft/__init__.py | 215 + .../site-packages/numpy/fft/__init__.pyi | 43 + .../numpy/fft/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 8405 bytes .../numpy/fft/__pycache__/_helper.cpython-312.pyc | Bin 0 -> 8086 bytes .../fft/__pycache__/_pocketfft.cpython-312.pyc | Bin 0 -> 64761 bytes .../numpy/fft/__pycache__/helper.cpython-312.pyc | Bin 0 -> 923 bytes .../python3.12/site-packages/numpy/fft/_helper.py | 235 + .../python3.12/site-packages/numpy/fft/_helper.pyi | 45 + .../site-packages/numpy/fft/_pocketfft.py | 1693 + .../site-packages/numpy/fft/_pocketfft.pyi | 138 + ...pocketfft_umath.cpython-312-x86_64-linux-gnu.so | Bin 0 -> 539072 bytes .../python3.12/site-packages/numpy/fft/helper.py | 17 + .../python3.12/site-packages/numpy/fft/helper.pyi | 22 + .../site-packages/numpy/fft/tests/__init__.py | 0 .../fft/tests/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 192 bytes .../tests/__pycache__/test_helper.cpython-312.pyc | Bin 0 -> 9021 bytes .../__pycache__/test_pocketfft.cpython-312.pyc | Bin 0 -> 47477 bytes .../site-packages/numpy/fft/tests/test_helper.py | 167 + .../numpy/fft/tests/test_pocketfft.py | 589 + .../python3.12/site-packages/numpy/lib/__init__.py | 97 + .../site-packages/numpy/lib/__init__.pyi | 44 + .../numpy/lib/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 3010 bytes .../__pycache__/_array_utils_impl.cpython-312.pyc | Bin 0 -> 2161 bytes .../lib/__pycache__/_arraypad_impl.cpython-312.pyc | Bin 0 -> 28978 bytes .../__pycache__/_arraysetops_impl.cpython-312.pyc | Bin 0 -> 43158 bytes .../__pycache__/_arrayterator_impl.cpython-312.pyc | Bin 0 -> 9627 bytes .../lib/__pycache__/_datasource.cpython-312.pyc | Bin 0 -> 25647 bytes .../lib/__pycache__/_format_impl.cpython-312.pyc | Bin 0 -> 35982 bytes .../_function_base_impl.cpython-312.pyc | Bin 0 -> 207985 bytes .../__pycache__/_histograms_impl.cpython-312.pyc | Bin 0 -> 40778 bytes .../__pycache__/_index_tricks_impl.cpython-312.pyc | Bin 0 -> 36311 bytes .../numpy/lib/__pycache__/_iotools.cpython-312.pyc | Bin 0 -> 35190 bytes .../__pycache__/_nanfunctions_impl.cpython-312.pyc | Bin 0 -> 76016 bytes .../lib/__pycache__/_npyio_impl.cpython-312.pyc | Bin 0 -> 96608 bytes .../__pycache__/_polynomial_impl.cpython-312.pyc | Bin 0 -> 53606 bytes .../lib/__pycache__/_scimath_impl.cpython-312.pyc | Bin 0 -> 18335 bytes .../__pycache__/_shape_base_impl.cpython-312.pyc | Bin 0 -> 44015 bytes .../_stride_tricks_impl.cpython-312.pyc | Bin 0 -> 19596 bytes .../__pycache__/_twodim_base_impl.cpython-312.pyc | Bin 0 -> 38482 bytes .../__pycache__/_type_check_impl.cpython-312.pyc | Bin 0 -> 22208 bytes .../__pycache__/_ufunclike_impl.cpython-312.pyc | Bin 0 -> 7270 bytes .../__pycache__/_user_array_impl.cpython-312.pyc | Bin 0 -> 16244 bytes .../lib/__pycache__/_utils_impl.cpython-312.pyc | Bin 0 -> 25608 bytes .../numpy/lib/__pycache__/_version.cpython-312.pyc | Bin 0 -> 6643 bytes .../lib/__pycache__/array_utils.cpython-312.pyc | Bin 0 -> 350 bytes .../numpy/lib/__pycache__/format.cpython-312.pyc | Bin 0 -> 788 bytes .../lib/__pycache__/introspect.cpython-312.pyc | Bin 0 -> 3290 bytes .../numpy/lib/__pycache__/mixins.cpython-312.pyc | Bin 0 -> 8977 bytes .../numpy/lib/__pycache__/npyio.cpython-312.pyc | Bin 0 -> 271 bytes .../lib/__pycache__/recfunctions.cpython-312.pyc | Bin 0 -> 64014 bytes .../numpy/lib/__pycache__/scimath.cpython-312.pyc | Bin 0 -> 411 bytes .../lib/__pycache__/stride_tricks.cpython-312.pyc | Bin 0 -> 299 bytes .../lib/__pycache__/user_array.cpython-312.pyc | Bin 0 -> 262 bytes .../site-packages/numpy/lib/_array_utils_impl.py | 62 + .../site-packages/numpy/lib/_array_utils_impl.pyi | 26 + .../site-packages/numpy/lib/_arraypad_impl.py | 890 + .../site-packages/numpy/lib/_arraypad_impl.pyi | 89 + .../site-packages/numpy/lib/_arraysetops_impl.py | 1260 + .../site-packages/numpy/lib/_arraysetops_impl.pyi | 444 + .../site-packages/numpy/lib/_arrayterator_impl.py | 224 + .../site-packages/numpy/lib/_arrayterator_impl.pyi | 46 + .../site-packages/numpy/lib/_datasource.py | 700 + .../site-packages/numpy/lib/_datasource.pyi | 31 + .../site-packages/numpy/lib/_format_impl.py | 1036 + .../site-packages/numpy/lib/_format_impl.pyi | 26 + .../site-packages/numpy/lib/_function_base_impl.py | 5844 + .../numpy/lib/_function_base_impl.pyi | 985 + .../site-packages/numpy/lib/_histograms_impl.py | 1085 + .../site-packages/numpy/lib/_histograms_impl.pyi | 50 + .../site-packages/numpy/lib/_index_tricks_impl.py | 1067 + .../site-packages/numpy/lib/_index_tricks_impl.pyi | 196 + .../python3.12/site-packages/numpy/lib/_iotools.py | 900 + .../site-packages/numpy/lib/_iotools.pyi | 114 + .../site-packages/numpy/lib/_nanfunctions_impl.py | 2024 + .../site-packages/numpy/lib/_nanfunctions_impl.pyi | 52 + .../site-packages/numpy/lib/_npyio_impl.py | 2596 + .../site-packages/numpy/lib/_npyio_impl.pyi | 301 + .../site-packages/numpy/lib/_polynomial_impl.py | 1465 + .../site-packages/numpy/lib/_polynomial_impl.pyi | 316 + .../site-packages/numpy/lib/_scimath_impl.py | 642 + .../site-packages/numpy/lib/_scimath_impl.pyi | 93 + .../site-packages/numpy/lib/_shape_base_impl.py | 1301 + .../site-packages/numpy/lib/_shape_base_impl.pyi | 235 + .../site-packages/numpy/lib/_stride_tricks_impl.py | 549 + .../numpy/lib/_stride_tricks_impl.pyi | 74 + .../site-packages/numpy/lib/_twodim_base_impl.py | 1201 + .../site-packages/numpy/lib/_twodim_base_impl.pyi | 438 + .../site-packages/numpy/lib/_type_check_impl.py | 699 + .../site-packages/numpy/lib/_type_check_impl.pyi | 350 + .../site-packages/numpy/lib/_ufunclike_impl.py | 207 + .../site-packages/numpy/lib/_ufunclike_impl.pyi | 67 + .../site-packages/numpy/lib/_user_array_impl.py | 299 + .../site-packages/numpy/lib/_user_array_impl.pyi | 225 + .../site-packages/numpy/lib/_utils_impl.py | 779 + .../site-packages/numpy/lib/_utils_impl.pyi | 10 + .../python3.12/site-packages/numpy/lib/_version.py | 154 + .../site-packages/numpy/lib/_version.pyi | 17 + .../site-packages/numpy/lib/array_utils.py | 7 + .../site-packages/numpy/lib/array_utils.pyi | 12 + .../python3.12/site-packages/numpy/lib/format.py | 24 + .../python3.12/site-packages/numpy/lib/format.pyi | 66 + .../site-packages/numpy/lib/introspect.py | 95 + .../site-packages/numpy/lib/introspect.pyi | 3 + .../python3.12/site-packages/numpy/lib/mixins.py | 180 + .../python3.12/site-packages/numpy/lib/mixins.pyi | 75 + .../python3.12/site-packages/numpy/lib/npyio.py | 1 + .../python3.12/site-packages/numpy/lib/npyio.pyi | 9 + .../site-packages/numpy/lib/recfunctions.py | 1681 + .../site-packages/numpy/lib/recfunctions.pyi | 435 + .../python3.12/site-packages/numpy/lib/scimath.py | 13 + .../python3.12/site-packages/numpy/lib/scimath.pyi | 30 + .../site-packages/numpy/lib/stride_tricks.py | 1 + .../site-packages/numpy/lib/stride_tricks.pyi | 6 + .../site-packages/numpy/lib/tests/__init__.py | 0 .../lib/tests/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 192 bytes .../__pycache__/test__datasource.cpython-312.pyc | Bin 0 -> 20906 bytes .../__pycache__/test__iotools.cpython-312.pyc | Bin 0 -> 18619 bytes .../__pycache__/test__version.cpython-312.pyc | Bin 0 -> 3482 bytes .../__pycache__/test_array_utils.cpython-312.pyc | Bin 0 -> 2544 bytes .../__pycache__/test_arraypad.cpython-312.pyc | Bin 0 -> 72097 bytes .../__pycache__/test_arraysetops.cpython-312.pyc | Bin 0 -> 59087 bytes .../__pycache__/test_arrayterator.cpython-312.pyc | Bin 0 -> 2637 bytes .../tests/__pycache__/test_format.cpython-312.pyc | Bin 0 -> 55254 bytes .../__pycache__/test_function_base.cpython-312.pyc | Bin 0 -> 301287 bytes .../__pycache__/test_histograms.cpython-312.pyc | Bin 0 -> 51401 bytes .../__pycache__/test_index_tricks.cpython-312.pyc | Bin 0 -> 35518 bytes .../lib/tests/__pycache__/test_io.cpython-312.pyc | Bin 0 -> 176840 bytes .../tests/__pycache__/test_loadtxt.cpython-312.pyc | Bin 0 -> 67059 bytes .../tests/__pycache__/test_mixins.cpython-312.pyc | Bin 0 -> 12707 bytes .../__pycache__/test_nanfunctions.cpython-312.pyc | Bin 0 -> 92780 bytes .../__pycache__/test_packbits.cpython-312.pyc | Bin 0 -> 23174 bytes .../__pycache__/test_polynomial.cpython-312.pyc | Bin 0 -> 22348 bytes .../__pycache__/test_recfunctions.cpython-312.pyc | Bin 0 -> 57938 bytes .../__pycache__/test_regression.cpython-312.pyc | Bin 0 -> 15294 bytes .../__pycache__/test_shape_base.cpython-312.pyc | Bin 0 -> 56388 bytes .../__pycache__/test_stride_tricks.cpython-312.pyc | Bin 0 -> 30055 bytes .../__pycache__/test_twodim_base.cpython-312.pyc | Bin 0 -> 29332 bytes .../__pycache__/test_type_check.cpython-312.pyc | Bin 0 -> 31613 bytes .../__pycache__/test_ufunclike.cpython-312.pyc | Bin 0 -> 6034 bytes .../tests/__pycache__/test_utils.cpython-312.pyc | Bin 0 -> 4174 bytes .../numpy/lib/tests/data/py2-np0-objarr.npy | Bin 0 -> 258 bytes .../numpy/lib/tests/data/py2-objarr.npy | Bin 0 -> 258 bytes .../numpy/lib/tests/data/py2-objarr.npz | Bin 0 -> 366 bytes .../numpy/lib/tests/data/py3-objarr.npy | Bin 0 -> 325 bytes .../numpy/lib/tests/data/py3-objarr.npz | Bin 0 -> 453 bytes .../site-packages/numpy/lib/tests/data/python3.npy | Bin 0 -> 96 bytes .../numpy/lib/tests/data/win64python2.npy | Bin 0 -> 96 bytes .../numpy/lib/tests/test__datasource.py | 352 + .../site-packages/numpy/lib/tests/test__iotools.py | 360 + .../site-packages/numpy/lib/tests/test__version.py | 64 + .../numpy/lib/tests/test_array_utils.py | 32 + .../site-packages/numpy/lib/tests/test_arraypad.py | 1415 + .../numpy/lib/tests/test_arraysetops.py | 1074 + .../numpy/lib/tests/test_arrayterator.py | 46 + .../site-packages/numpy/lib/tests/test_format.py | 1054 + .../numpy/lib/tests/test_function_base.py | 4573 + .../numpy/lib/tests/test_histograms.py | 855 + .../numpy/lib/tests/test_index_tricks.py | 568 + .../site-packages/numpy/lib/tests/test_io.py | 2848 + .../site-packages/numpy/lib/tests/test_loadtxt.py | 1101 + .../site-packages/numpy/lib/tests/test_mixins.py | 215 + .../numpy/lib/tests/test_nanfunctions.py | 1438 + .../site-packages/numpy/lib/tests/test_packbits.py | 376 + .../numpy/lib/tests/test_polynomial.py | 320 + .../numpy/lib/tests/test_recfunctions.py | 1052 + .../numpy/lib/tests/test_regression.py | 231 + .../numpy/lib/tests/test_shape_base.py | 813 + .../numpy/lib/tests/test_stride_tricks.py | 656 + .../numpy/lib/tests/test_twodim_base.py | 559 + .../numpy/lib/tests/test_type_check.py | 473 + .../numpy/lib/tests/test_ufunclike.py | 97 + .../site-packages/numpy/lib/tests/test_utils.py | 80 + .../site-packages/numpy/lib/user_array.py | 1 + .../site-packages/numpy/lib/user_array.pyi | 1 + .../site-packages/numpy/linalg/__init__.py | 98 + .../site-packages/numpy/linalg/__init__.pyi | 73 + .../linalg/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 2340 bytes .../linalg/__pycache__/_linalg.cpython-312.pyc | Bin 0 -> 126143 bytes .../linalg/__pycache__/linalg.cpython-312.pyc | Bin 0 -> 903 bytes .../site-packages/numpy/linalg/_linalg.py | 3681 + .../site-packages/numpy/linalg/_linalg.pyi | 482 + .../_umath_linalg.cpython-312-x86_64-linux-gnu.so | Bin 0 -> 231833 bytes .../site-packages/numpy/linalg/_umath_linalg.pyi | 61 + .../lapack_lite.cpython-312-x86_64-linux-gnu.so | Bin 0 -> 30001 bytes .../site-packages/numpy/linalg/lapack_lite.pyi | 141 + .../site-packages/numpy/linalg/linalg.py | 17 + .../site-packages/numpy/linalg/linalg.pyi | 69 + .../site-packages/numpy/linalg/tests/__init__.py | 0 .../tests/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 195 bytes .../__pycache__/test_deprecations.cpython-312.pyc | Bin 0 -> 1239 bytes .../tests/__pycache__/test_linalg.cpython-312.pyc | Bin 0 -> 144380 bytes .../__pycache__/test_regression.cpython-312.pyc | Bin 0 -> 10930 bytes .../numpy/linalg/tests/test_deprecations.py | 20 + .../numpy/linalg/tests/test_linalg.py | 2430 + .../numpy/linalg/tests/test_regression.py | 181 + .../site-packages/numpy/ma/API_CHANGES.txt | 135 + .../lib/python3.12/site-packages/numpy/ma/LICENSE | 24 + .../python3.12/site-packages/numpy/ma/README.rst | 236 + .../python3.12/site-packages/numpy/ma/__init__.py | 53 + .../python3.12/site-packages/numpy/ma/__init__.pyi | 458 + .../numpy/ma/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 1681 bytes .../numpy/ma/__pycache__/core.cpython-312.pyc | Bin 0 -> 311706 bytes .../numpy/ma/__pycache__/extras.cpython-312.pyc | Bin 0 -> 83198 bytes .../numpy/ma/__pycache__/mrecords.cpython-312.pyc | Bin 0 -> 34170 bytes .../numpy/ma/__pycache__/testutils.cpython-312.pyc | Bin 0 -> 12731 bytes .../lib/python3.12/site-packages/numpy/ma/core.py | 8936 ++ .../lib/python3.12/site-packages/numpy/ma/core.pyi | 1462 + .../python3.12/site-packages/numpy/ma/extras.py | 2344 + .../python3.12/site-packages/numpy/ma/extras.pyi | 134 + .../python3.12/site-packages/numpy/ma/mrecords.py | 773 + .../python3.12/site-packages/numpy/ma/mrecords.pyi | 96 + .../site-packages/numpy/ma/tests/__init__.py | 0 .../ma/tests/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 191 bytes .../__pycache__/test_arrayobject.cpython-312.pyc | Bin 0 -> 2260 bytes .../ma/tests/__pycache__/test_core.cpython-312.pyc | Bin 0 -> 348758 bytes .../__pycache__/test_deprecations.cpython-312.pyc | Bin 0 -> 4829 bytes .../tests/__pycache__/test_extras.cpython-312.pyc | Bin 0 -> 128761 bytes .../__pycache__/test_mrecords.cpython-312.pyc | Bin 0 -> 30721 bytes .../tests/__pycache__/test_old_ma.cpython-312.pyc | Bin 0 -> 64345 bytes .../__pycache__/test_regression.cpython-312.pyc | Bin 0 -> 7501 bytes .../__pycache__/test_subclassing.cpython-312.pyc | Bin 0 -> 27748 bytes .../numpy/ma/tests/test_arrayobject.py | 40 + .../site-packages/numpy/ma/tests/test_core.py | 5886 + .../numpy/ma/tests/test_deprecations.py | 87 + .../site-packages/numpy/ma/tests/test_extras.py | 1998 + .../site-packages/numpy/ma/tests/test_mrecords.py | 497 + .../site-packages/numpy/ma/tests/test_old_ma.py | 942 + .../numpy/ma/tests/test_regression.py | 100 + .../numpy/ma/tests/test_subclassing.py | 469 + .../python3.12/site-packages/numpy/ma/testutils.py | 294 + .venv/lib/python3.12/site-packages/numpy/matlib.py | 380 + .../lib/python3.12/site-packages/numpy/matlib.pyi | 582 + .../site-packages/numpy/matrixlib/__init__.py | 12 + .../site-packages/numpy/matrixlib/__init__.pyi | 5 + .../matrixlib/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 486 bytes .../__pycache__/defmatrix.cpython-312.pyc | Bin 0 -> 36117 bytes .../site-packages/numpy/matrixlib/defmatrix.py | 1119 + .../site-packages/numpy/matrixlib/defmatrix.pyi | 17 + .../numpy/matrixlib/tests/__init__.py | 0 .../tests/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 198 bytes .../__pycache__/test_defmatrix.cpython-312.pyc | Bin 0 -> 32981 bytes .../__pycache__/test_interaction.cpython-312.pyc | Bin 0 -> 21453 bytes .../__pycache__/test_masked_matrix.cpython-312.pyc | Bin 0 -> 16670 bytes .../__pycache__/test_matrix_linalg.cpython-312.pyc | Bin 0 -> 4291 bytes .../__pycache__/test_multiarray.cpython-312.pyc | Bin 0 -> 1614 bytes .../tests/__pycache__/test_numeric.cpython-312.pyc | Bin 0 -> 1426 bytes .../__pycache__/test_regression.cpython-312.pyc | Bin 0 -> 2833 bytes .../numpy/matrixlib/tests/test_defmatrix.py | 455 + .../numpy/matrixlib/tests/test_interaction.py | 360 + .../numpy/matrixlib/tests/test_masked_matrix.py | 240 + .../numpy/matrixlib/tests/test_matrix_linalg.py | 105 + .../numpy/matrixlib/tests/test_multiarray.py | 17 + .../numpy/matrixlib/tests/test_numeric.py | 18 + .../numpy/matrixlib/tests/test_regression.py | 31 + .../site-packages/numpy/polynomial/__init__.py | 187 + .../site-packages/numpy/polynomial/__init__.pyi | 25 + .../__pycache__/__init__.cpython-312.pyc | Bin 0 -> 6936 bytes .../__pycache__/_polybase.cpython-312.pyc | Bin 0 -> 48960 bytes .../__pycache__/chebyshev.cpython-312.pyc | Bin 0 -> 71846 bytes .../polynomial/__pycache__/hermite.cpython-312.pyc | Bin 0 -> 62595 bytes .../__pycache__/hermite_e.cpython-312.pyc | Bin 0 -> 59967 bytes .../__pycache__/laguerre.cpython-312.pyc | Bin 0 -> 59726 bytes .../__pycache__/legendre.cpython-312.pyc | Bin 0 -> 58298 bytes .../__pycache__/polynomial.cpython-312.pyc | Bin 0 -> 58203 bytes .../__pycache__/polyutils.cpython-312.pyc | Bin 0 -> 28866 bytes .../site-packages/numpy/polynomial/_polybase.py | 1191 + .../site-packages/numpy/polynomial/_polybase.pyi | 285 + .../site-packages/numpy/polynomial/_polytypes.pyi | 892 + .../site-packages/numpy/polynomial/chebyshev.py | 2003 + .../site-packages/numpy/polynomial/chebyshev.pyi | 181 + .../site-packages/numpy/polynomial/hermite.py | 1740 + .../site-packages/numpy/polynomial/hermite.pyi | 107 + .../site-packages/numpy/polynomial/hermite_e.py | 1642 + .../site-packages/numpy/polynomial/hermite_e.pyi | 107 + .../site-packages/numpy/polynomial/laguerre.py | 1675 + .../site-packages/numpy/polynomial/laguerre.pyi | 100 + .../site-packages/numpy/polynomial/legendre.py | 1605 + .../site-packages/numpy/polynomial/legendre.pyi | 100 + .../site-packages/numpy/polynomial/polynomial.py | 1616 + .../site-packages/numpy/polynomial/polynomial.pyi | 89 + .../site-packages/numpy/polynomial/polyutils.py | 759 + .../site-packages/numpy/polynomial/polyutils.pyi | 423 + .../numpy/polynomial/tests/__init__.py | 0 .../tests/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 199 bytes .../__pycache__/test_chebyshev.cpython-312.pyc | Bin 0 -> 38472 bytes .../tests/__pycache__/test_classes.cpython-312.pyc | Bin 0 -> 33620 bytes .../tests/__pycache__/test_hermite.cpython-312.pyc | Bin 0 -> 34381 bytes .../__pycache__/test_hermite_e.cpython-312.pyc | Bin 0 -> 34560 bytes .../__pycache__/test_laguerre.cpython-312.pyc | Bin 0 -> 33118 bytes .../__pycache__/test_legendre.cpython-312.pyc | Bin 0 -> 35352 bytes .../__pycache__/test_polynomial.cpython-312.pyc | Bin 0 -> 41009 bytes .../__pycache__/test_polyutils.cpython-312.pyc | Bin 0 -> 6720 bytes .../__pycache__/test_printing.cpython-312.pyc | Bin 0 -> 31522 bytes .../tests/__pycache__/test_symbol.cpython-312.pyc | Bin 0 -> 12638 bytes .../numpy/polynomial/tests/test_chebyshev.py | 623 + .../numpy/polynomial/tests/test_classes.py | 618 + .../numpy/polynomial/tests/test_hermite.py | 558 + .../numpy/polynomial/tests/test_hermite_e.py | 559 + .../numpy/polynomial/tests/test_laguerre.py | 540 + .../numpy/polynomial/tests/test_legendre.py | 571 + .../numpy/polynomial/tests/test_polynomial.py | 669 + .../numpy/polynomial/tests/test_polyutils.py | 128 + .../numpy/polynomial/tests/test_printing.py | 555 + .../numpy/polynomial/tests/test_symbol.py | 217 + .venv/lib/python3.12/site-packages/numpy/py.typed | 0 .../site-packages/numpy/random/LICENSE.md | 71 + .../site-packages/numpy/random/__init__.pxd | 14 + .../site-packages/numpy/random/__init__.py | 213 + .../site-packages/numpy/random/__init__.pyi | 124 + .../random/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 7507 bytes .../random/__pycache__/_pickle.cpython-312.pyc | Bin 0 -> 2923 bytes ...ounded_integers.cpython-312-x86_64-linux-gnu.so | Bin 0 -> 323168 bytes .../numpy/random/_bounded_integers.pxd | 29 + .../numpy/random/_bounded_integers.pyi | 1 + .../random/_common.cpython-312-x86_64-linux-gnu.so | Bin 0 -> 258912 bytes .../site-packages/numpy/random/_common.pxd | 107 + .../site-packages/numpy/random/_common.pyi | 16 + .../cffi/__pycache__/extending.cpython-312.pyc | Bin 0 -> 1670 bytes .../cffi/__pycache__/parse.cpython-312.pyc | Bin 0 -> 2297 bytes .../numpy/random/_examples/cffi/extending.py | 44 + .../numpy/random/_examples/cffi/parse.py | 53 + .../numpy/random/_examples/cython/extending.pyx | 77 + .../_examples/cython/extending_distributions.pyx | 118 + .../numpy/random/_examples/cython/meson.build | 53 + .../numba/__pycache__/extending.cpython-312.pyc | Bin 0 -> 3879 bytes .../extending_distributions.cpython-312.pyc | Bin 0 -> 2734 bytes .../numpy/random/_examples/numba/extending.py | 86 + .../_examples/numba/extending_distributions.py | 67 + .../_generator.cpython-312-x86_64-linux-gnu.so | Bin 0 -> 993240 bytes .../site-packages/numpy/random/_generator.pyi | 856 + .../_mt19937.cpython-312-x86_64-linux-gnu.so | Bin 0 -> 137960 bytes .../site-packages/numpy/random/_mt19937.pyi | 25 + .../random/_pcg64.cpython-312-x86_64-linux-gnu.so | Bin 0 -> 148272 bytes .../site-packages/numpy/random/_pcg64.pyi | 44 + .../random/_philox.cpython-312-x86_64-linux-gnu.so | Bin 0 -> 120808 bytes .../site-packages/numpy/random/_philox.pyi | 39 + .../site-packages/numpy/random/_pickle.py | 88 + .../site-packages/numpy/random/_pickle.pyi | 43 + .../random/_sfc64.cpython-312-x86_64-linux-gnu.so | Bin 0 -> 89648 bytes .../site-packages/numpy/random/_sfc64.pyi | 28 + .../bit_generator.cpython-312-x86_64-linux-gnu.so | Bin 0 -> 239072 bytes .../site-packages/numpy/random/bit_generator.pxd | 35 + .../site-packages/numpy/random/bit_generator.pyi | 124 + .../site-packages/numpy/random/c_distributions.pxd | 119 + .../site-packages/numpy/random/lib/libnpyrandom.a | Bin 0 -> 71702 bytes .../random/mtrand.cpython-312-x86_64-linux-gnu.so | Bin 0 -> 785752 bytes .../site-packages/numpy/random/mtrand.pyi | 703 + .../site-packages/numpy/random/tests/__init__.py | 0 .../tests/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 195 bytes .../tests/__pycache__/test_direct.cpython-312.pyc | Bin 0 -> 35400 bytes .../__pycache__/test_extending.cpython-312.pyc | Bin 0 -> 6181 bytes .../test_generator_mt19937.cpython-312.pyc | Bin 0 -> 178178 bytes ...t_generator_mt19937_regressions.cpython-312.pyc | Bin 0 -> 13530 bytes .../tests/__pycache__/test_random.cpython-312.pyc | Bin 0 -> 113339 bytes .../__pycache__/test_randomstate.cpython-312.pyc | Bin 0 -> 130399 bytes .../test_randomstate_regression.cpython-312.pyc | Bin 0 -> 15167 bytes .../__pycache__/test_regression.cpython-312.pyc | Bin 0 -> 10894 bytes .../__pycache__/test_seed_sequence.cpython-312.pyc | Bin 0 -> 3480 bytes .../tests/__pycache__/test_smoke.cpython-312.pyc | Bin 0 -> 57058 bytes .../numpy/random/tests/data/__init__.py | 0 .../data/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 200 bytes .../random/tests/data/generator_pcg64_np121.pkl.gz | Bin 0 -> 203 bytes .../random/tests/data/generator_pcg64_np126.pkl.gz | Bin 0 -> 208 bytes .../numpy/random/tests/data/mt19937-testset-1.csv | 1001 + .../numpy/random/tests/data/mt19937-testset-2.csv | 1001 + .../numpy/random/tests/data/pcg64-testset-1.csv | 1001 + .../numpy/random/tests/data/pcg64-testset-2.csv | 1001 + .../random/tests/data/pcg64dxsm-testset-1.csv | 1001 + .../random/tests/data/pcg64dxsm-testset-2.csv | 1001 + .../numpy/random/tests/data/philox-testset-1.csv | 1001 + .../numpy/random/tests/data/philox-testset-2.csv | 1001 + .../numpy/random/tests/data/sfc64-testset-1.csv | 1001 + .../numpy/random/tests/data/sfc64-testset-2.csv | 1001 + .../numpy/random/tests/data/sfc64_np126.pkl.gz | Bin 0 -> 290 bytes .../numpy/random/tests/test_direct.py | 592 + .../numpy/random/tests/test_extending.py | 127 + .../numpy/random/tests/test_generator_mt19937.py | 2804 + .../tests/test_generator_mt19937_regressions.py | 207 + .../numpy/random/tests/test_random.py | 1757 + .../numpy/random/tests/test_randomstate.py | 2130 + .../random/tests/test_randomstate_regression.py | 217 + .../numpy/random/tests/test_regression.py | 152 + .../numpy/random/tests/test_seed_sequence.py | 79 + .../site-packages/numpy/random/tests/test_smoke.py | 819 + .../python3.12/site-packages/numpy/rec/__init__.py | 2 + .../site-packages/numpy/rec/__init__.pyi | 23 + .../numpy/rec/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 279 bytes .../site-packages/numpy/strings/__init__.py | 2 + .../site-packages/numpy/strings/__init__.pyi | 97 + .../strings/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 283 bytes .../site-packages/numpy/testing/__init__.py | 22 + .../site-packages/numpy/testing/__init__.pyi | 102 + .../testing/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 885 bytes .../testing/__pycache__/overrides.cpython-312.pyc | Bin 0 -> 2720 bytes .../print_coercion_tables.cpython-312.pyc | Bin 0 -> 8113 bytes .../numpy/testing/_private/__init__.py | 0 .../numpy/testing/_private/__init__.pyi | 0 .../_private/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 199 bytes .../_private/__pycache__/extbuild.cpython-312.pyc | Bin 0 -> 9414 bytes .../_private/__pycache__/utils.cpython-312.pyc | Bin 0 -> 106874 bytes .../numpy/testing/_private/extbuild.py | 250 + .../numpy/testing/_private/extbuild.pyi | 25 + .../site-packages/numpy/testing/_private/utils.py | 2759 + .../site-packages/numpy/testing/_private/utils.pyi | 499 + .../site-packages/numpy/testing/overrides.py | 84 + .../site-packages/numpy/testing/overrides.pyi | 11 + .../numpy/testing/print_coercion_tables.py | 207 + .../numpy/testing/print_coercion_tables.pyi | 27 + .../site-packages/numpy/testing/tests/__init__.py | 0 .../tests/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 196 bytes .../tests/__pycache__/test_utils.cpython-312.pyc | Bin 0 -> 118885 bytes .../numpy/testing/tests/test_utils.py | 1917 + .../site-packages/numpy/tests/__init__.py | 0 .../tests/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 188 bytes .../tests/__pycache__/test__all__.cpython-312.pyc | Bin 0 -> 703 bytes .../__pycache__/test_configtool.cpython-312.pyc | Bin 0 -> 3723 bytes .../__pycache__/test_ctypeslib.cpython-312.pyc | Bin 0 -> 21993 bytes .../__pycache__/test_lazyloading.cpython-312.pyc | Bin 0 -> 1761 bytes .../tests/__pycache__/test_matlib.cpython-312.pyc | Bin 0 -> 4191 bytes .../__pycache__/test_numpy_config.cpython-312.pyc | Bin 0 -> 2869 bytes .../__pycache__/test_numpy_version.cpython-312.pyc | Bin 0 -> 2482 bytes .../__pycache__/test_public_api.cpython-312.pyc | Bin 0 -> 24556 bytes .../__pycache__/test_reloading.cpython-312.pyc | Bin 0 -> 3557 bytes .../tests/__pycache__/test_scripts.cpython-312.pyc | Bin 0 -> 2846 bytes .../__pycache__/test_warnings.cpython-312.pyc | Bin 0 -> 4317 bytes .../site-packages/numpy/tests/test__all__.py | 10 + .../site-packages/numpy/tests/test_configtool.py | 48 + .../site-packages/numpy/tests/test_ctypeslib.py | 377 + .../site-packages/numpy/tests/test_lazyloading.py | 38 + .../site-packages/numpy/tests/test_matlib.py | 59 + .../site-packages/numpy/tests/test_numpy_config.py | 46 + .../numpy/tests/test_numpy_version.py | 54 + .../site-packages/numpy/tests/test_public_api.py | 806 + .../site-packages/numpy/tests/test_reloading.py | 74 + .../site-packages/numpy/tests/test_scripts.py | 49 + .../site-packages/numpy/tests/test_warnings.py | 78 + .../site-packages/numpy/typing/__init__.py | 201 + .../typing/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 6501 bytes .../typing/__pycache__/mypy_plugin.cpython-312.pyc | Bin 0 -> 8502 bytes .../site-packages/numpy/typing/mypy_plugin.py | 195 + .../site-packages/numpy/typing/tests/__init__.py | 0 .../tests/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 195 bytes .../tests/__pycache__/test_isfile.cpython-312.pyc | Bin 0 -> 1549 bytes .../tests/__pycache__/test_runtime.cpython-312.pyc | Bin 0 -> 5957 bytes .../tests/__pycache__/test_typing.cpython-312.pyc | Bin 0 -> 9466 bytes .../numpy/typing/tests/data/fail/arithmetic.pyi | 126 + .../typing/tests/data/fail/array_constructors.pyi | 34 + .../numpy/typing/tests/data/fail/array_like.pyi | 15 + .../numpy/typing/tests/data/fail/array_pad.pyi | 6 + .../numpy/typing/tests/data/fail/arrayprint.pyi | 16 + .../numpy/typing/tests/data/fail/arrayterator.pyi | 14 + .../numpy/typing/tests/data/fail/bitwise_ops.pyi | 17 + .../numpy/typing/tests/data/fail/char.pyi | 65 + .../numpy/typing/tests/data/fail/chararray.pyi | 62 + .../numpy/typing/tests/data/fail/comparisons.pyi | 27 + .../numpy/typing/tests/data/fail/constants.pyi | 3 + .../numpy/typing/tests/data/fail/datasource.pyi | 15 + .../numpy/typing/tests/data/fail/dtype.pyi | 17 + .../numpy/typing/tests/data/fail/einsumfunc.pyi | 12 + .../numpy/typing/tests/data/fail/flatiter.pyi | 20 + .../numpy/typing/tests/data/fail/fromnumeric.pyi | 148 + .../numpy/typing/tests/data/fail/histograms.pyi | 12 + .../numpy/typing/tests/data/fail/index_tricks.pyi | 14 + .../typing/tests/data/fail/lib_function_base.pyi | 62 + .../typing/tests/data/fail/lib_polynomial.pyi | 29 + .../numpy/typing/tests/data/fail/lib_utils.pyi | 3 + .../numpy/typing/tests/data/fail/lib_version.pyi | 6 + .../numpy/typing/tests/data/fail/linalg.pyi | 48 + .../numpy/typing/tests/data/fail/ma.pyi | 143 + .../numpy/typing/tests/data/fail/memmap.pyi | 5 + .../numpy/typing/tests/data/fail/modules.pyi | 17 + .../numpy/typing/tests/data/fail/multiarray.pyi | 52 + .../numpy/typing/tests/data/fail/ndarray.pyi | 11 + .../numpy/typing/tests/data/fail/ndarray_misc.pyi | 36 + .../numpy/typing/tests/data/fail/nditer.pyi | 8 + .../typing/tests/data/fail/nested_sequence.pyi | 16 + .../numpy/typing/tests/data/fail/npyio.pyi | 24 + .../numpy/typing/tests/data/fail/numerictypes.pyi | 5 + .../numpy/typing/tests/data/fail/random.pyi | 62 + .../numpy/typing/tests/data/fail/rec.pyi | 17 + .../numpy/typing/tests/data/fail/scalars.pyi | 87 + .../numpy/typing/tests/data/fail/shape.pyi | 6 + .../numpy/typing/tests/data/fail/shape_base.pyi | 8 + .../numpy/typing/tests/data/fail/stride_tricks.pyi | 9 + .../numpy/typing/tests/data/fail/strings.pyi | 52 + .../numpy/typing/tests/data/fail/testing.pyi | 28 + .../numpy/typing/tests/data/fail/twodim_base.pyi | 32 + .../numpy/typing/tests/data/fail/type_check.pyi | 13 + .../numpy/typing/tests/data/fail/ufunc_config.pyi | 21 + .../numpy/typing/tests/data/fail/ufunclike.pyi | 21 + .../numpy/typing/tests/data/fail/ufuncs.pyi | 17 + .../typing/tests/data/fail/warnings_and_errors.pyi | 5 + .../typing/tests/data/misc/extended_precision.pyi | 9 + .../site-packages/numpy/typing/tests/data/mypy.ini | 9 + .../pass/__pycache__/arithmetic.cpython-312.pyc | Bin 0 -> 12383 bytes .../__pycache__/array_constructors.cpython-312.pyc | Bin 0 -> 6985 bytes .../pass/__pycache__/array_like.cpython-312.pyc | Bin 0 -> 2422 bytes .../pass/__pycache__/arrayprint.cpython-312.pyc | Bin 0 -> 1400 bytes .../pass/__pycache__/arrayterator.cpython-312.pyc | Bin 0 -> 1043 bytes .../pass/__pycache__/bitwise_ops.cpython-312.pyc | Bin 0 -> 2400 bytes .../pass/__pycache__/comparisons.cpython-312.pyc | Bin 0 -> 6842 bytes .../data/pass/__pycache__/dtype.cpython-312.pyc | Bin 0 -> 2313 bytes .../pass/__pycache__/einsumfunc.cpython-312.pyc | Bin 0 -> 2288 bytes .../data/pass/__pycache__/flatiter.cpython-312.pyc | Bin 0 -> 938 bytes .../pass/__pycache__/fromnumeric.cpython-312.pyc | Bin 0 -> 12819 bytes .../pass/__pycache__/index_tricks.cpython-312.pyc | Bin 0 -> 3285 bytes .../__pycache__/lib_user_array.cpython-312.pyc | Bin 0 -> 1087 bytes .../pass/__pycache__/lib_utils.cpython-312.pyc | Bin 0 -> 885 bytes .../pass/__pycache__/lib_version.cpython-312.pyc | Bin 0 -> 687 bytes .../data/pass/__pycache__/literal.cpython-312.pyc | Bin 0 -> 2724 bytes .../tests/data/pass/__pycache__/ma.cpython-312.pyc | Bin 0 -> 5121 bytes .../data/pass/__pycache__/mod.cpython-312.pyc | Bin 0 -> 3488 bytes .../data/pass/__pycache__/modules.cpython-312.pyc | Bin 0 -> 2248 bytes .../pass/__pycache__/multiarray.cpython-312.pyc | Bin 0 -> 3483 bytes .../__pycache__/ndarray_conversion.cpython-312.pyc | Bin 0 -> 3478 bytes .../pass/__pycache__/ndarray_misc.cpython-312.pyc | Bin 0 -> 9652 bytes .../ndarray_shape_manipulation.cpython-312.pyc | Bin 0 -> 1749 bytes .../data/pass/__pycache__/nditer.cpython-312.pyc | Bin 0 -> 365 bytes .../data/pass/__pycache__/numeric.cpython-312.pyc | Bin 0 -> 4676 bytes .../pass/__pycache__/numerictypes.cpython-312.pyc | Bin 0 -> 1089 bytes .../data/pass/__pycache__/random.cpython-312.pyc | Bin 0 -> 87027 bytes .../pass/__pycache__/recfunctions.cpython-312.pyc | Bin 0 -> 12154 bytes .../data/pass/__pycache__/scalars.cpython-312.pyc | Bin 0 -> 11894 bytes .../data/pass/__pycache__/shape.cpython-312.pyc | Bin 0 -> 1204 bytes .../data/pass/__pycache__/simple.cpython-312.pyc | Bin 0 -> 5037 bytes .../pass/__pycache__/simple_py3.cpython-312.pyc | Bin 0 -> 327 bytes .../pass/__pycache__/ufunc_config.cpython-312.pyc | Bin 0 -> 3548 bytes .../pass/__pycache__/ufunclike.cpython-312.pyc | Bin 0 -> 2672 bytes .../data/pass/__pycache__/ufuncs.cpython-312.pyc | Bin 0 -> 1069 bytes .../warnings_and_errors.cpython-312.pyc | Bin 0 -> 554 bytes .../numpy/typing/tests/data/pass/arithmetic.py | 612 + .../typing/tests/data/pass/array_constructors.py | 137 + .../numpy/typing/tests/data/pass/array_like.py | 43 + .../numpy/typing/tests/data/pass/arrayprint.py | 37 + .../numpy/typing/tests/data/pass/arrayterator.py | 27 + .../numpy/typing/tests/data/pass/bitwise_ops.py | 131 + .../numpy/typing/tests/data/pass/comparisons.py | 315 + .../numpy/typing/tests/data/pass/dtype.py | 57 + .../numpy/typing/tests/data/pass/einsumfunc.py | 36 + .../numpy/typing/tests/data/pass/flatiter.py | 19 + .../numpy/typing/tests/data/pass/fromnumeric.py | 272 + .../numpy/typing/tests/data/pass/index_tricks.py | 60 + .../numpy/typing/tests/data/pass/lib_user_array.py | 22 + .../numpy/typing/tests/data/pass/lib_utils.py | 19 + .../numpy/typing/tests/data/pass/lib_version.py | 18 + .../numpy/typing/tests/data/pass/literal.py | 51 + .../numpy/typing/tests/data/pass/ma.py | 174 + .../numpy/typing/tests/data/pass/mod.py | 149 + .../numpy/typing/tests/data/pass/modules.py | 45 + .../numpy/typing/tests/data/pass/multiarray.py | 76 + .../typing/tests/data/pass/ndarray_conversion.py | 87 + .../numpy/typing/tests/data/pass/ndarray_misc.py | 198 + .../tests/data/pass/ndarray_shape_manipulation.py | 47 + .../numpy/typing/tests/data/pass/nditer.py | 4 + .../numpy/typing/tests/data/pass/numeric.py | 95 + .../numpy/typing/tests/data/pass/numerictypes.py | 17 + .../numpy/typing/tests/data/pass/random.py | 1497 + .../numpy/typing/tests/data/pass/recfunctions.py | 161 + .../numpy/typing/tests/data/pass/scalars.py | 248 + .../numpy/typing/tests/data/pass/shape.py | 19 + .../numpy/typing/tests/data/pass/simple.py | 168 + .../numpy/typing/tests/data/pass/simple_py3.py | 6 + .../numpy/typing/tests/data/pass/ufunc_config.py | 64 + .../numpy/typing/tests/data/pass/ufunclike.py | 47 + .../numpy/typing/tests/data/pass/ufuncs.py | 16 + .../typing/tests/data/pass/warnings_and_errors.py | 6 + .../numpy/typing/tests/data/reveal/arithmetic.pyi | 720 + .../typing/tests/data/reveal/array_api_info.pyi | 70 + .../tests/data/reveal/array_constructors.pyi | 249 + .../numpy/typing/tests/data/reveal/arraypad.pyi | 22 + .../numpy/typing/tests/data/reveal/arrayprint.pyi | 25 + .../numpy/typing/tests/data/reveal/arraysetops.pyi | 74 + .../typing/tests/data/reveal/arrayterator.pyi | 27 + .../numpy/typing/tests/data/reveal/bitwise_ops.pyi | 168 + .../numpy/typing/tests/data/reveal/char.pyi | 224 + .../numpy/typing/tests/data/reveal/chararray.pyi | 137 + .../numpy/typing/tests/data/reveal/comparisons.pyi | 264 + .../numpy/typing/tests/data/reveal/constants.pyi | 14 + .../numpy/typing/tests/data/reveal/ctypeslib.pyi | 81 + .../numpy/typing/tests/data/reveal/datasource.pyi | 23 + .../numpy/typing/tests/data/reveal/dtype.pyi | 136 + .../numpy/typing/tests/data/reveal/einsumfunc.pyi | 39 + .../numpy/typing/tests/data/reveal/emath.pyi | 54 + .../numpy/typing/tests/data/reveal/fft.pyi | 37 + .../numpy/typing/tests/data/reveal/flatiter.pyi | 47 + .../numpy/typing/tests/data/reveal/fromnumeric.pyi | 347 + .../numpy/typing/tests/data/reveal/getlimits.pyi | 51 + .../numpy/typing/tests/data/reveal/histograms.pyi | 25 + .../typing/tests/data/reveal/index_tricks.pyi | 70 + .../typing/tests/data/reveal/lib_function_base.pyi | 213 + .../typing/tests/data/reveal/lib_polynomial.pyi | 144 + .../numpy/typing/tests/data/reveal/lib_utils.pyi | 17 + .../numpy/typing/tests/data/reveal/lib_version.pyi | 20 + .../numpy/typing/tests/data/reveal/linalg.pyi | 132 + .../numpy/typing/tests/data/reveal/ma.pyi | 369 + .../numpy/typing/tests/data/reveal/matrix.pyi | 73 + .../numpy/typing/tests/data/reveal/memmap.pyi | 19 + .../numpy/typing/tests/data/reveal/mod.pyi | 180 + .../numpy/typing/tests/data/reveal/modules.pyi | 51 + .../numpy/typing/tests/data/reveal/multiarray.pyi | 194 + .../typing/tests/data/reveal/nbit_base_example.pyi | 21 + .../tests/data/reveal/ndarray_assignability.pyi | 77 + .../tests/data/reveal/ndarray_conversion.pyi | 85 + .../typing/tests/data/reveal/ndarray_misc.pyi | 247 + .../data/reveal/ndarray_shape_manipulation.pyi | 39 + .../numpy/typing/tests/data/reveal/nditer.pyi | 49 + .../typing/tests/data/reveal/nested_sequence.pyi | 25 + .../numpy/typing/tests/data/reveal/npyio.pyi | 83 + .../numpy/typing/tests/data/reveal/numeric.pyi | 134 + .../typing/tests/data/reveal/numerictypes.pyi | 51 + .../tests/data/reveal/polynomial_polybase.pyi | 220 + .../tests/data/reveal/polynomial_polyutils.pyi | 219 + .../typing/tests/data/reveal/polynomial_series.pyi | 138 + .../numpy/typing/tests/data/reveal/random.pyi | 1546 + .../numpy/typing/tests/data/reveal/rec.pyi | 171 + .../numpy/typing/tests/data/reveal/scalars.pyi | 191 + .../numpy/typing/tests/data/reveal/shape.pyi | 13 + .../numpy/typing/tests/data/reveal/shape_base.pyi | 52 + .../typing/tests/data/reveal/stride_tricks.pyi | 27 + .../numpy/typing/tests/data/reveal/strings.pyi | 196 + .../numpy/typing/tests/data/reveal/testing.pyi | 198 + .../numpy/typing/tests/data/reveal/twodim_base.pyi | 145 + .../numpy/typing/tests/data/reveal/type_check.pyi | 67 + .../typing/tests/data/reveal/ufunc_config.pyi | 30 + .../numpy/typing/tests/data/reveal/ufunclike.pyi | 31 + .../numpy/typing/tests/data/reveal/ufuncs.pyi | 123 + .../tests/data/reveal/warnings_and_errors.pyi | 11 + .../numpy/typing/tests/test_isfile.py | 32 + .../numpy/typing/tests/test_runtime.py | 102 + .../numpy/typing/tests/test_typing.py | 205 + .../lib/python3.12/site-packages/numpy/version.py | 11 + .../lib/python3.12/site-packages/numpy/version.pyi | 18 + .../site-packages/pip-25.2.dist-info/METADATA | 112 + .../site-packages/pip-25.2.dist-info/RECORD | 453 + .../site-packages/pip-25.2.dist-info/WHEEL | 5 + .../pip-25.2.dist-info/entry_points.txt | 3 + .../pip-25.2.dist-info/licenses/AUTHORS.txt | 833 + .../pip-25.2.dist-info/licenses/LICENSE.txt | 20 + .../src/pip/_vendor/cachecontrol/LICENSE.txt | 13 + .../licenses/src/pip/_vendor/certifi/LICENSE | 20 + .../src/pip/_vendor/dependency_groups/LICENSE.txt | 9 + .../licenses/src/pip/_vendor/distlib/LICENSE.txt | 284 + .../licenses/src/pip/_vendor/distro/LICENSE | 202 + .../licenses/src/pip/_vendor/idna/LICENSE.md | 31 + .../licenses/src/pip/_vendor/msgpack/COPYING | 14 + .../licenses/src/pip/_vendor/packaging/LICENSE | 3 + .../src/pip/_vendor/packaging/LICENSE.APACHE | 177 + .../licenses/src/pip/_vendor/packaging/LICENSE.BSD | 23 + .../licenses/src/pip/_vendor/pkg_resources/LICENSE | 17 + .../licenses/src/pip/_vendor/platformdirs/LICENSE | 21 + .../licenses/src/pip/_vendor/pygments/LICENSE | 25 + .../src/pip/_vendor/pyproject_hooks/LICENSE | 21 + .../licenses/src/pip/_vendor/requests/LICENSE | 175 + .../licenses/src/pip/_vendor/resolvelib/LICENSE | 13 + .../licenses/src/pip/_vendor/rich/LICENSE | 19 + .../licenses/src/pip/_vendor/tomli/LICENSE | 21 + .../licenses/src/pip/_vendor/tomli/LICENSE-HEADER | 3 + .../licenses/src/pip/_vendor/tomli_w/LICENSE | 21 + .../licenses/src/pip/_vendor/truststore/LICENSE | 21 + .../licenses/src/pip/_vendor/urllib3/LICENSE.txt | 21 + .../site-packages/pip-25.2.dist-info/top_level.txt | 1 + .venv/lib/python3.12/site-packages/pip/__init__.py | 13 + .venv/lib/python3.12/site-packages/pip/__main__.py | 24 + .../python3.12/site-packages/pip/__pip-runner__.py | 50 + .../pip/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 653 bytes .../pip/__pycache__/__main__.cpython-312.pyc | Bin 0 -> 845 bytes .../pip/__pycache__/__pip-runner__.cpython-312.pyc | Bin 0 -> 2208 bytes .../site-packages/pip/_internal/__init__.py | 18 + .../_internal/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 755 bytes .../__pycache__/build_env.cpython-312.pyc | Bin 0 -> 15832 bytes .../_internal/__pycache__/cache.cpython-312.pyc | Bin 0 -> 12393 bytes .../__pycache__/configuration.cpython-312.pyc | Bin 0 -> 18295 bytes .../__pycache__/exceptions.cpython-312.pyc | Bin 0 -> 39631 bytes .../pip/_internal/__pycache__/main.cpython-312.pyc | Bin 0 -> 638 bytes .../__pycache__/pyproject.cpython-312.pyc | Bin 0 -> 5063 bytes .../self_outdated_check.cpython-312.pyc | Bin 0 -> 10256 bytes .../__pycache__/wheel_builder.cpython-312.pyc | Bin 0 -> 13393 bytes .../site-packages/pip/_internal/build_env.py | 349 + .../site-packages/pip/_internal/cache.py | 291 + .../site-packages/pip/_internal/cli/__init__.py | 3 + .../cli/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 279 bytes .../cli/__pycache__/autocompletion.cpython-312.pyc | Bin 0 -> 9129 bytes .../cli/__pycache__/base_command.cpython-312.pyc | Bin 0 -> 10596 bytes .../cli/__pycache__/cmdoptions.cpython-312.pyc | Bin 0 -> 31234 bytes .../__pycache__/command_context.cpython-312.pyc | Bin 0 -> 1824 bytes .../cli/__pycache__/index_command.cpython-312.pyc | Bin 0 -> 7212 bytes .../_internal/cli/__pycache__/main.cpython-312.pyc | Bin 0 -> 2263 bytes .../cli/__pycache__/main_parser.cpython-312.pyc | Bin 0 -> 4837 bytes .../cli/__pycache__/parser.cpython-312.pyc | Bin 0 -> 14863 bytes .../cli/__pycache__/progress_bars.cpython-312.pyc | Bin 0 -> 6108 bytes .../cli/__pycache__/req_command.cpython-312.pyc | Bin 0 -> 13224 bytes .../cli/__pycache__/spinners.cpython-312.pyc | Bin 0 -> 11266 bytes .../cli/__pycache__/status_codes.cpython-312.pyc | Bin 0 -> 379 bytes .../pip/_internal/cli/autocompletion.py | 184 + .../pip/_internal/cli/base_command.py | 244 + .../site-packages/pip/_internal/cli/cmdoptions.py | 1138 + .../pip/_internal/cli/command_context.py | 28 + .../pip/_internal/cli/index_command.py | 175 + .../site-packages/pip/_internal/cli/main.py | 80 + .../site-packages/pip/_internal/cli/main_parser.py | 134 + .../site-packages/pip/_internal/cli/parser.py | 298 + .../pip/_internal/cli/progress_bars.py | 151 + .../site-packages/pip/_internal/cli/req_command.py | 351 + .../site-packages/pip/_internal/cli/spinners.py | 235 + .../pip/_internal/cli/status_codes.py | 6 + .../pip/_internal/commands/__init__.py | 139 + .../commands/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 4121 bytes .../commands/__pycache__/cache.cpython-312.pyc | Bin 0 -> 10176 bytes .../commands/__pycache__/check.cpython-312.pyc | Bin 0 -> 2584 bytes .../__pycache__/completion.cpython-312.pyc | Bin 0 -> 5429 bytes .../__pycache__/configuration.cpython-312.pyc | Bin 0 -> 13408 bytes .../commands/__pycache__/debug.cpython-312.pyc | Bin 0 -> 10011 bytes .../commands/__pycache__/download.cpython-312.pyc | Bin 0 -> 7477 bytes .../commands/__pycache__/freeze.cpython-312.pyc | Bin 0 -> 4294 bytes .../commands/__pycache__/hash.cpython-312.pyc | Bin 0 -> 2952 bytes .../commands/__pycache__/help.cpython-312.pyc | Bin 0 -> 1642 bytes .../commands/__pycache__/index.cpython-312.pyc | Bin 0 -> 7264 bytes .../commands/__pycache__/inspect.cpython-312.pyc | Bin 0 -> 3949 bytes .../commands/__pycache__/install.cpython-312.pyc | Bin 0 -> 29745 bytes .../commands/__pycache__/list.cpython-312.pyc | Bin 0 -> 17028 bytes .../commands/__pycache__/lock.cpython-312.pyc | Bin 0 -> 7979 bytes .../commands/__pycache__/search.cpython-312.pyc | Bin 0 -> 7637 bytes .../commands/__pycache__/show.cpython-312.pyc | Bin 0 -> 11207 bytes .../commands/__pycache__/uninstall.cpython-312.pyc | Bin 0 -> 4702 bytes .../commands/__pycache__/wheel.cpython-312.pyc | Bin 0 -> 8766 bytes .../site-packages/pip/_internal/commands/cache.py | 231 + .../site-packages/pip/_internal/commands/check.py | 66 + .../pip/_internal/commands/completion.py | 135 + .../pip/_internal/commands/configuration.py | 288 + .../site-packages/pip/_internal/commands/debug.py | 203 + .../pip/_internal/commands/download.py | 145 + .../site-packages/pip/_internal/commands/freeze.py | 107 + .../site-packages/pip/_internal/commands/hash.py | 58 + .../site-packages/pip/_internal/commands/help.py | 40 + .../site-packages/pip/_internal/commands/index.py | 159 + .../pip/_internal/commands/inspect.py | 92 + .../pip/_internal/commands/install.py | 798 + .../site-packages/pip/_internal/commands/list.py | 400 + .../site-packages/pip/_internal/commands/lock.py | 170 + .../site-packages/pip/_internal/commands/search.py | 178 + .../site-packages/pip/_internal/commands/show.py | 231 + .../pip/_internal/commands/uninstall.py | 113 + .../site-packages/pip/_internal/commands/wheel.py | 181 + .../site-packages/pip/_internal/configuration.py | 397 + .../pip/_internal/distributions/__init__.py | 21 + .../__pycache__/__init__.cpython-312.pyc | Bin 0 -> 945 bytes .../distributions/__pycache__/base.cpython-312.pyc | Bin 0 -> 2923 bytes .../__pycache__/installed.cpython-312.pyc | Bin 0 -> 1773 bytes .../__pycache__/sdist.cpython-312.pyc | Bin 0 -> 8575 bytes .../__pycache__/wheel.cpython-312.pyc | Bin 0 -> 2322 bytes .../pip/_internal/distributions/base.py | 55 + .../pip/_internal/distributions/installed.py | 33 + .../pip/_internal/distributions/sdist.py | 165 + .../pip/_internal/distributions/wheel.py | 44 + .../site-packages/pip/_internal/exceptions.py | 881 + .../site-packages/pip/_internal/index/__init__.py | 1 + .../index/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 233 bytes .../index/__pycache__/collector.cpython-312.pyc | Bin 0 -> 21232 bytes .../__pycache__/package_finder.cpython-312.pyc | Bin 0 -> 42142 bytes .../index/__pycache__/sources.cpython-312.pyc | Bin 0 -> 12327 bytes .../site-packages/pip/_internal/index/collector.py | 489 + .../pip/_internal/index/package_finder.py | 1059 + .../site-packages/pip/_internal/index/sources.py | 287 + .../pip/_internal/locations/__init__.py | 441 + .../locations/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 15321 bytes .../__pycache__/_distutils.cpython-312.pyc | Bin 0 -> 6778 bytes .../__pycache__/_sysconfig.cpython-312.pyc | Bin 0 -> 7929 bytes .../locations/__pycache__/base.cpython-312.pyc | Bin 0 -> 3712 bytes .../pip/_internal/locations/_distutils.py | 173 + .../pip/_internal/locations/_sysconfig.py | 215 + .../site-packages/pip/_internal/locations/base.py | 82 + .../python3.12/site-packages/pip/_internal/main.py | 12 + .../pip/_internal/metadata/__init__.py | 164 + .../metadata/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 6641 bytes .../metadata/__pycache__/_json.cpython-312.pyc | Bin 0 -> 2881 bytes .../metadata/__pycache__/base.cpython-312.pyc | Bin 0 -> 34465 bytes .../__pycache__/pkg_resources.cpython-312.pyc | Bin 0 -> 15809 bytes .../site-packages/pip/_internal/metadata/_json.py | 87 + .../site-packages/pip/_internal/metadata/base.py | 685 + .../pip/_internal/metadata/importlib/__init__.py | 6 + .../importlib/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 362 bytes .../importlib/__pycache__/_compat.cpython-312.pyc | Bin 0 -> 4241 bytes .../importlib/__pycache__/_dists.cpython-312.pyc | Bin 0 -> 12635 bytes .../importlib/__pycache__/_envs.cpython-312.pyc | Bin 0 -> 8046 bytes .../pip/_internal/metadata/importlib/_compat.py | 87 + .../pip/_internal/metadata/importlib/_dists.py | 223 + .../pip/_internal/metadata/importlib/_envs.py | 143 + .../pip/_internal/metadata/pkg_resources.py | 298 + .../site-packages/pip/_internal/models/__init__.py | 1 + .../models/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 267 bytes .../models/__pycache__/candidate.cpython-312.pyc | Bin 0 -> 1608 bytes .../models/__pycache__/direct_url.cpython-312.pyc | Bin 0 -> 10502 bytes .../__pycache__/format_control.cpython-312.pyc | Bin 0 -> 4132 bytes .../models/__pycache__/index.cpython-312.pyc | Bin 0 -> 1698 bytes .../installation_report.cpython-312.pyc | Bin 0 -> 2289 bytes .../models/__pycache__/link.cpython-312.pyc | Bin 0 -> 26647 bytes .../models/__pycache__/pylock.cpython-312.pyc | Bin 0 -> 7863 bytes .../models/__pycache__/scheme.cpython-312.pyc | Bin 0 -> 1027 bytes .../__pycache__/search_scope.cpython-312.pyc | Bin 0 -> 4965 bytes .../__pycache__/selection_prefs.cpython-312.pyc | Bin 0 -> 1882 bytes .../__pycache__/target_python.cpython-312.pyc | Bin 0 -> 4859 bytes .../models/__pycache__/wheel.cpython-312.pyc | Bin 0 -> 7465 bytes .../pip/_internal/models/candidate.py | 25 + .../pip/_internal/models/direct_url.py | 227 + .../pip/_internal/models/format_control.py | 78 + .../site-packages/pip/_internal/models/index.py | 28 + .../pip/_internal/models/installation_report.py | 57 + .../site-packages/pip/_internal/models/link.py | 613 + .../site-packages/pip/_internal/models/pylock.py | 188 + .../site-packages/pip/_internal/models/scheme.py | 25 + .../pip/_internal/models/search_scope.py | 126 + .../pip/_internal/models/selection_prefs.py | 53 + .../pip/_internal/models/target_python.py | 122 + .../site-packages/pip/_internal/models/wheel.py | 141 + .../pip/_internal/network/__init__.py | 1 + .../network/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 255 bytes .../network/__pycache__/auth.cpython-312.pyc | Bin 0 -> 21476 bytes .../network/__pycache__/cache.cpython-312.pyc | Bin 0 -> 8491 bytes .../network/__pycache__/download.cpython-312.pyc | Bin 0 -> 16093 bytes .../network/__pycache__/lazy_wheel.cpython-312.pyc | Bin 0 -> 11609 bytes .../network/__pycache__/session.cpython-312.pyc | Bin 0 -> 19210 bytes .../network/__pycache__/utils.cpython-312.pyc | Bin 0 -> 2261 bytes .../network/__pycache__/xmlrpc.cpython-312.pyc | Bin 0 -> 2934 bytes .../site-packages/pip/_internal/network/auth.py | 564 + .../site-packages/pip/_internal/network/cache.py | 133 + .../pip/_internal/network/download.py | 342 + .../pip/_internal/network/lazy_wheel.py | 213 + .../site-packages/pip/_internal/network/session.py | 528 + .../site-packages/pip/_internal/network/utils.py | 98 + .../site-packages/pip/_internal/network/xmlrpc.py | 61 + .../pip/_internal/operations/__init__.py | 0 .../__pycache__/__init__.cpython-312.pyc | Bin 0 -> 201 bytes .../operations/__pycache__/check.cpython-312.pyc | Bin 0 -> 7210 bytes .../operations/__pycache__/freeze.cpython-312.pyc | Bin 0 -> 10252 bytes .../operations/__pycache__/prepare.cpython-312.pyc | Bin 0 -> 26481 bytes .../pip/_internal/operations/build/__init__.py | 0 .../build/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 207 bytes .../__pycache__/build_tracker.cpython-312.pyc | Bin 0 -> 7634 bytes .../build/__pycache__/metadata.cpython-312.pyc | Bin 0 -> 1879 bytes .../__pycache__/metadata_editable.cpython-312.pyc | Bin 0 -> 1933 bytes .../__pycache__/metadata_legacy.cpython-312.pyc | Bin 0 -> 3026 bytes .../build/__pycache__/wheel.cpython-312.pyc | Bin 0 -> 1714 bytes .../__pycache__/wheel_editable.cpython-312.pyc | Bin 0 -> 2055 bytes .../build/__pycache__/wheel_legacy.cpython-312.pyc | Bin 0 -> 4415 bytes .../_internal/operations/build/build_tracker.py | 140 + .../pip/_internal/operations/build/metadata.py | 38 + .../operations/build/metadata_editable.py | 41 + .../_internal/operations/build/metadata_legacy.py | 73 + .../pip/_internal/operations/build/wheel.py | 38 + .../_internal/operations/build/wheel_editable.py | 47 + .../pip/_internal/operations/build/wheel_legacy.py | 119 + .../pip/_internal/operations/check.py | 175 + .../pip/_internal/operations/freeze.py | 259 + .../pip/_internal/operations/install/__init__.py | 1 + .../install/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 267 bytes .../__pycache__/editable_legacy.cpython-312.pyc | Bin 0 -> 1885 bytes .../install/__pycache__/wheel.cpython-312.pyc | Bin 0 -> 34219 bytes .../operations/install/editable_legacy.py | 48 + .../pip/_internal/operations/install/wheel.py | 746 + .../pip/_internal/operations/prepare.py | 742 + .../site-packages/pip/_internal/pyproject.py | 182 + .../site-packages/pip/_internal/req/__init__.py | 105 + .../req/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 4101 bytes .../req/__pycache__/constructors.cpython-312.pyc | Bin 0 -> 21093 bytes .../req_dependency_group.cpython-312.pyc | Bin 0 -> 4019 bytes .../req/__pycache__/req_file.cpython-312.pyc | Bin 0 -> 23857 bytes .../req/__pycache__/req_install.cpython-312.pyc | Bin 0 -> 38343 bytes .../req/__pycache__/req_set.cpython-312.pyc | Bin 0 -> 5428 bytes .../req/__pycache__/req_uninstall.cpython-312.pyc | Bin 0 -> 31762 bytes .../pip/_internal/req/constructors.py | 562 + .../pip/_internal/req/req_dependency_group.py | 75 + .../site-packages/pip/_internal/req/req_file.py | 620 + .../site-packages/pip/_internal/req/req_install.py | 937 + .../site-packages/pip/_internal/req/req_set.py | 81 + .../pip/_internal/req/req_uninstall.py | 639 + .../pip/_internal/resolution/__init__.py | 0 .../__pycache__/__init__.cpython-312.pyc | Bin 0 -> 201 bytes .../resolution/__pycache__/base.cpython-312.pyc | Bin 0 -> 1172 bytes .../site-packages/pip/_internal/resolution/base.py | 20 + .../pip/_internal/resolution/legacy/__init__.py | 0 .../legacy/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 208 bytes .../legacy/__pycache__/resolver.cpython-312.pyc | Bin 0 -> 22556 bytes .../pip/_internal/resolution/legacy/resolver.py | 598 + .../_internal/resolution/resolvelib/__init__.py | 0 .../__pycache__/__init__.cpython-312.pyc | Bin 0 -> 212 bytes .../resolvelib/__pycache__/base.cpython-312.pyc | Bin 0 -> 7986 bytes .../__pycache__/candidates.cpython-312.pyc | Bin 0 -> 29015 bytes .../resolvelib/__pycache__/factory.cpython-312.pyc | Bin 0 -> 32637 bytes .../__pycache__/found_candidates.cpython-312.pyc | Bin 0 -> 6749 bytes .../__pycache__/provider.cpython-312.pyc | Bin 0 -> 11254 bytes .../__pycache__/reporter.cpython-312.pyc | Bin 0 -> 5065 bytes .../__pycache__/requirements.cpython-312.pyc | Bin 0 -> 14756 bytes .../__pycache__/resolver.cpython-312.pyc | Bin 0 -> 12660 bytes .../pip/_internal/resolution/resolvelib/base.py | 142 + .../_internal/resolution/resolvelib/candidates.py | 582 + .../pip/_internal/resolution/resolvelib/factory.py | 814 + .../resolution/resolvelib/found_candidates.py | 166 + .../_internal/resolution/resolvelib/provider.py | 276 + .../_internal/resolution/resolvelib/reporter.py | 85 + .../resolution/resolvelib/requirements.py | 247 + .../_internal/resolution/resolvelib/resolver.py | 336 + .../pip/_internal/self_outdated_check.py | 254 + .../site-packages/pip/_internal/utils/__init__.py | 0 .../utils/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 196 bytes .../utils/__pycache__/_jaraco_text.cpython-312.pyc | Bin 0 -> 4536 bytes .../utils/__pycache__/_log.cpython-312.pyc | Bin 0 -> 1867 bytes .../utils/__pycache__/appdirs.cpython-312.pyc | Bin 0 -> 2456 bytes .../utils/__pycache__/compat.cpython-312.pyc | Bin 0 -> 3023 bytes .../__pycache__/compatibility_tags.cpython-312.pyc | Bin 0 -> 6642 bytes .../utils/__pycache__/datetime.cpython-312.pyc | Bin 0 -> 680 bytes .../utils/__pycache__/deprecation.cpython-312.pyc | Bin 0 -> 4209 bytes .../__pycache__/direct_url_helpers.cpython-312.pyc | Bin 0 -> 3540 bytes .../utils/__pycache__/egg_link.cpython-312.pyc | Bin 0 -> 3165 bytes .../utils/__pycache__/entrypoints.cpython-312.pyc | Bin 0 -> 4056 bytes .../utils/__pycache__/filesystem.cpython-312.pyc | Bin 0 -> 7235 bytes .../utils/__pycache__/filetypes.cpython-312.pyc | Bin 0 -> 1130 bytes .../utils/__pycache__/glibc.cpython-312.pyc | Bin 0 -> 2351 bytes .../utils/__pycache__/hashes.cpython-312.pyc | Bin 0 -> 7469 bytes .../utils/__pycache__/logging.cpython-312.pyc | Bin 0 -> 13864 bytes .../utils/__pycache__/misc.cpython-312.pyc | Bin 0 -> 32701 bytes .../utils/__pycache__/packaging.cpython-312.pyc | Bin 0 -> 1886 bytes .../utils/__pycache__/retry.cpython-312.pyc | Bin 0 -> 1973 bytes .../__pycache__/setuptools_build.cpython-312.pyc | Bin 0 -> 4699 bytes .../utils/__pycache__/subprocess.cpython-312.pyc | Bin 0 -> 8533 bytes .../utils/__pycache__/temp_dir.cpython-312.pyc | Bin 0 -> 11948 bytes .../utils/__pycache__/unpacking.cpython-312.pyc | Bin 0 -> 13368 bytes .../utils/__pycache__/urls.cpython-312.pyc | Bin 0 -> 2082 bytes .../utils/__pycache__/virtualenv.cpython-312.pyc | Bin 0 -> 4395 bytes .../utils/__pycache__/wheel.cpython-312.pyc | Bin 0 -> 5863 bytes .../pip/_internal/utils/_jaraco_text.py | 109 + .../site-packages/pip/_internal/utils/_log.py | 38 + .../site-packages/pip/_internal/utils/appdirs.py | 52 + .../site-packages/pip/_internal/utils/compat.py | 85 + .../pip/_internal/utils/compatibility_tags.py | 201 + .../site-packages/pip/_internal/utils/datetime.py | 10 + .../pip/_internal/utils/deprecation.py | 126 + .../pip/_internal/utils/direct_url_helpers.py | 87 + .../site-packages/pip/_internal/utils/egg_link.py | 81 + .../pip/_internal/utils/entrypoints.py | 88 + .../pip/_internal/utils/filesystem.py | 152 + .../site-packages/pip/_internal/utils/filetypes.py | 24 + .../site-packages/pip/_internal/utils/glibc.py | 102 + .../site-packages/pip/_internal/utils/hashes.py | 150 + .../site-packages/pip/_internal/utils/logging.py | 364 + .../site-packages/pip/_internal/utils/misc.py | 765 + .../site-packages/pip/_internal/utils/packaging.py | 44 + .../site-packages/pip/_internal/utils/retry.py | 45 + .../pip/_internal/utils/setuptools_build.py | 149 + .../pip/_internal/utils/subprocess.py | 248 + .../site-packages/pip/_internal/utils/temp_dir.py | 294 + .../site-packages/pip/_internal/utils/unpacking.py | 337 + .../site-packages/pip/_internal/utils/urls.py | 55 + .../pip/_internal/utils/virtualenv.py | 105 + .../site-packages/pip/_internal/utils/wheel.py | 132 + .../site-packages/pip/_internal/vcs/__init__.py | 15 + .../vcs/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 535 bytes .../vcs/__pycache__/bazaar.cpython-312.pyc | Bin 0 -> 5163 bytes .../_internal/vcs/__pycache__/git.cpython-312.pyc | Bin 0 -> 19922 bytes .../vcs/__pycache__/mercurial.cpython-312.pyc | Bin 0 -> 7768 bytes .../vcs/__pycache__/subversion.cpython-312.pyc | Bin 0 -> 12342 bytes .../vcs/__pycache__/versioncontrol.cpython-312.pyc | Bin 0 -> 28742 bytes .../site-packages/pip/_internal/vcs/bazaar.py | 130 + .../site-packages/pip/_internal/vcs/git.py | 571 + .../site-packages/pip/_internal/vcs/mercurial.py | 186 + .../site-packages/pip/_internal/vcs/subversion.py | 335 + .../pip/_internal/vcs/versioncontrol.py | 693 + .../site-packages/pip/_internal/wheel_builder.py | 334 + .../site-packages/pip/_vendor/__init__.py | 117 + .../_vendor/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 4598 bytes .../pip/_vendor/cachecontrol/__init__.py | 29 + .../__pycache__/__init__.cpython-312.pyc | Bin 0 -> 907 bytes .../cachecontrol/__pycache__/_cmd.cpython-312.pyc | Bin 0 -> 2651 bytes .../__pycache__/adapter.cpython-312.pyc | Bin 0 -> 6716 bytes .../cachecontrol/__pycache__/cache.cpython-312.pyc | Bin 0 -> 3814 bytes .../__pycache__/controller.cpython-312.pyc | Bin 0 -> 16458 bytes .../__pycache__/filewrapper.cpython-312.pyc | Bin 0 -> 4352 bytes .../__pycache__/heuristics.cpython-312.pyc | Bin 0 -> 6702 bytes .../__pycache__/serialize.cpython-312.pyc | Bin 0 -> 5270 bytes .../__pycache__/wrapper.cpython-312.pyc | Bin 0 -> 1679 bytes .../site-packages/pip/_vendor/cachecontrol/_cmd.py | 70 + .../pip/_vendor/cachecontrol/adapter.py | 168 + .../pip/_vendor/cachecontrol/cache.py | 75 + .../pip/_vendor/cachecontrol/caches/__init__.py | 8 + .../caches/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 440 bytes .../caches/__pycache__/file_cache.cpython-312.pyc | Bin 0 -> 7057 bytes .../caches/__pycache__/redis_cache.cpython-312.pyc | Bin 0 -> 2743 bytes .../pip/_vendor/cachecontrol/caches/file_cache.py | 145 + .../pip/_vendor/cachecontrol/caches/redis_cache.py | 48 + .../pip/_vendor/cachecontrol/controller.py | 511 + .../pip/_vendor/cachecontrol/filewrapper.py | 119 + .../pip/_vendor/cachecontrol/heuristics.py | 157 + .../pip/_vendor/cachecontrol/py.typed | 0 .../pip/_vendor/cachecontrol/serialize.py | 146 + .../pip/_vendor/cachecontrol/wrapper.py | 43 + .../site-packages/pip/_vendor/certifi/__init__.py | 4 + .../site-packages/pip/_vendor/certifi/__main__.py | 12 + .../certifi/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 323 bytes .../certifi/__pycache__/__main__.cpython-312.pyc | Bin 0 -> 650 bytes .../certifi/__pycache__/core.cpython-312.pyc | Bin 0 -> 2087 bytes .../site-packages/pip/_vendor/certifi/cacert.pem | 4778 + .../site-packages/pip/_vendor/certifi/core.py | 83 + .../site-packages/pip/_vendor/certifi/py.typed | 0 .../pip/_vendor/dependency_groups/__init__.py | 13 + .../pip/_vendor/dependency_groups/__main__.py | 65 + .../__pycache__/__init__.cpython-312.pyc | Bin 0 -> 382 bytes .../__pycache__/__main__.cpython-312.pyc | Bin 0 -> 2704 bytes .../__pycache__/_implementation.cpython-312.pyc | Bin 0 -> 9656 bytes .../_lint_dependency_groups.cpython-312.pyc | Bin 0 -> 2869 bytes .../__pycache__/_pip_wrapper.cpython-312.pyc | Bin 0 -> 3436 bytes .../__pycache__/_toml_compat.cpython-312.pyc | Bin 0 -> 483 bytes .../_vendor/dependency_groups/_implementation.py | 209 + .../dependency_groups/_lint_dependency_groups.py | 59 + .../pip/_vendor/dependency_groups/_pip_wrapper.py | 62 + .../pip/_vendor/dependency_groups/_toml_compat.py | 9 + .../pip/_vendor/dependency_groups/py.typed | 0 .../site-packages/pip/_vendor/distlib/__init__.py | 33 + .../distlib/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 1274 bytes .../distlib/__pycache__/compat.cpython-312.pyc | Bin 0 -> 45605 bytes .../distlib/__pycache__/resources.cpython-312.pyc | Bin 0 -> 17330 bytes .../distlib/__pycache__/scripts.cpython-312.pyc | Bin 0 -> 19785 bytes .../distlib/__pycache__/util.cpython-312.pyc | Bin 0 -> 88279 bytes .../site-packages/pip/_vendor/distlib/compat.py | 1137 + .../site-packages/pip/_vendor/distlib/resources.py | 358 + .../site-packages/pip/_vendor/distlib/scripts.py | 447 + .../site-packages/pip/_vendor/distlib/t32.exe | Bin 0 -> 97792 bytes .../site-packages/pip/_vendor/distlib/t64-arm.exe | Bin 0 -> 182784 bytes .../site-packages/pip/_vendor/distlib/t64.exe | Bin 0 -> 108032 bytes .../site-packages/pip/_vendor/distlib/util.py | 1984 + .../site-packages/pip/_vendor/distlib/w32.exe | Bin 0 -> 91648 bytes .../site-packages/pip/_vendor/distlib/w64-arm.exe | Bin 0 -> 168448 bytes .../site-packages/pip/_vendor/distlib/w64.exe | Bin 0 -> 101888 bytes .../site-packages/pip/_vendor/distro/__init__.py | 54 + .../site-packages/pip/_vendor/distro/__main__.py | 4 + .../distro/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 965 bytes .../distro/__pycache__/__main__.cpython-312.pyc | Bin 0 -> 297 bytes .../distro/__pycache__/distro.cpython-312.pyc | Bin 0 -> 53850 bytes .../site-packages/pip/_vendor/distro/distro.py | 1403 + .../site-packages/pip/_vendor/distro/py.typed | 0 .../site-packages/pip/_vendor/idna/__init__.py | 45 + .../idna/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 891 bytes .../_vendor/idna/__pycache__/codec.cpython-312.pyc | Bin 0 -> 4991 bytes .../idna/__pycache__/compat.cpython-312.pyc | Bin 0 -> 895 bytes .../_vendor/idna/__pycache__/core.cpython-312.pyc | Bin 0 -> 16181 bytes .../idna/__pycache__/idnadata.cpython-312.pyc | Bin 0 -> 99481 bytes .../idna/__pycache__/intranges.cpython-312.pyc | Bin 0 -> 2643 bytes .../idna/__pycache__/package_data.cpython-312.pyc | Bin 0 -> 222 bytes .../idna/__pycache__/uts46data.cpython-312.pyc | Bin 0 -> 158851 bytes .../site-packages/pip/_vendor/idna/codec.py | 122 + .../site-packages/pip/_vendor/idna/compat.py | 15 + .../site-packages/pip/_vendor/idna/core.py | 437 + .../site-packages/pip/_vendor/idna/idnadata.py | 4243 + .../site-packages/pip/_vendor/idna/intranges.py | 57 + .../site-packages/pip/_vendor/idna/package_data.py | 1 + .../site-packages/pip/_vendor/idna/py.typed | 0 .../site-packages/pip/_vendor/idna/uts46data.py | 8681 ++ .../site-packages/pip/_vendor/msgpack/__init__.py | 55 + .../msgpack/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 1744 bytes .../msgpack/__pycache__/exceptions.cpython-312.pyc | Bin 0 -> 2030 bytes .../msgpack/__pycache__/ext.cpython-312.pyc | Bin 0 -> 8298 bytes .../msgpack/__pycache__/fallback.cpython-312.pyc | Bin 0 -> 41524 bytes .../pip/_vendor/msgpack/exceptions.py | 48 + .../site-packages/pip/_vendor/msgpack/ext.py | 170 + .../site-packages/pip/_vendor/msgpack/fallback.py | 929 + .../pip/_vendor/packaging/__init__.py | 15 + .../packaging/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 563 bytes .../packaging/__pycache__/_elffile.cpython-312.pyc | Bin 0 -> 5028 bytes .../__pycache__/_manylinux.cpython-312.pyc | Bin 0 -> 9756 bytes .../__pycache__/_musllinux.cpython-312.pyc | Bin 0 -> 4574 bytes .../packaging/__pycache__/_parser.cpython-312.pyc | Bin 0 -> 14003 bytes .../__pycache__/_structures.cpython-312.pyc | Bin 0 -> 3246 bytes .../__pycache__/_tokenizer.cpython-312.pyc | Bin 0 -> 7953 bytes .../packaging/__pycache__/markers.cpython-312.pyc | Bin 0 -> 12770 bytes .../packaging/__pycache__/metadata.cpython-312.pyc | Bin 0 -> 27255 bytes .../__pycache__/requirements.cpython-312.pyc | Bin 0 -> 4415 bytes .../__pycache__/specifiers.cpython-312.pyc | Bin 0 -> 39069 bytes .../packaging/__pycache__/tags.cpython-312.pyc | Bin 0 -> 24821 bytes .../packaging/__pycache__/utils.cpython-312.pyc | Bin 0 -> 6640 bytes .../packaging/__pycache__/version.cpython-312.pyc | Bin 0 -> 20485 bytes .../pip/_vendor/packaging/_elffile.py | 109 + .../pip/_vendor/packaging/_manylinux.py | 262 + .../pip/_vendor/packaging/_musllinux.py | 85 + .../site-packages/pip/_vendor/packaging/_parser.py | 353 + .../pip/_vendor/packaging/_structures.py | 61 + .../pip/_vendor/packaging/_tokenizer.py | 195 + .../pip/_vendor/packaging/licenses/__init__.py | 145 + .../licenses/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 4134 bytes .../licenses/__pycache__/_spdx.cpython-312.pyc | Bin 0 -> 47369 bytes .../pip/_vendor/packaging/licenses/_spdx.py | 759 + .../site-packages/pip/_vendor/packaging/markers.py | 362 + .../pip/_vendor/packaging/metadata.py | 862 + .../site-packages/pip/_vendor/packaging/py.typed | 0 .../pip/_vendor/packaging/requirements.py | 91 + .../pip/_vendor/packaging/specifiers.py | 1019 + .../site-packages/pip/_vendor/packaging/tags.py | 656 + .../site-packages/pip/_vendor/packaging/utils.py | 163 + .../site-packages/pip/_vendor/packaging/version.py | 582 + .../pip/_vendor/pkg_resources/__init__.py | 3676 + .../__pycache__/__init__.cpython-312.pyc | Bin 0 -> 161526 bytes .../pip/_vendor/platformdirs/__init__.py | 631 + .../pip/_vendor/platformdirs/__main__.py | 55 + .../__pycache__/__init__.cpython-312.pyc | Bin 0 -> 19852 bytes .../__pycache__/__main__.cpython-312.pyc | Bin 0 -> 1973 bytes .../__pycache__/android.cpython-312.pyc | Bin 0 -> 10700 bytes .../platformdirs/__pycache__/api.cpython-312.pyc | Bin 0 -> 13359 bytes .../platformdirs/__pycache__/macos.cpython-312.pyc | Bin 0 -> 8829 bytes .../platformdirs/__pycache__/unix.cpython-312.pyc | Bin 0 -> 14751 bytes .../__pycache__/version.cpython-312.pyc | Bin 0 -> 661 bytes .../__pycache__/windows.cpython-312.pyc | Bin 0 -> 13678 bytes .../pip/_vendor/platformdirs/android.py | 249 + .../site-packages/pip/_vendor/platformdirs/api.py | 299 + .../pip/_vendor/platformdirs/macos.py | 144 + .../pip/_vendor/platformdirs/py.typed | 0 .../site-packages/pip/_vendor/platformdirs/unix.py | 272 + .../pip/_vendor/platformdirs/version.py | 21 + .../pip/_vendor/platformdirs/windows.py | 272 + .../site-packages/pip/_vendor/pygments/__init__.py | 82 + .../site-packages/pip/_vendor/pygments/__main__.py | 17 + .../pygments/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 3494 bytes .../pygments/__pycache__/__main__.cpython-312.pyc | Bin 0 -> 740 bytes .../pygments/__pycache__/console.cpython-312.pyc | Bin 0 -> 2639 bytes .../pygments/__pycache__/filter.cpython-312.pyc | Bin 0 -> 3232 bytes .../pygments/__pycache__/formatter.cpython-312.pyc | Bin 0 -> 4726 bytes .../pygments/__pycache__/lexer.cpython-312.pyc | Bin 0 -> 38471 bytes .../pygments/__pycache__/modeline.cpython-312.pyc | Bin 0 -> 1575 bytes .../pygments/__pycache__/plugin.cpython-312.pyc | Bin 0 -> 2634 bytes .../pygments/__pycache__/regexopt.cpython-312.pyc | Bin 0 -> 4087 bytes .../pygments/__pycache__/scanner.cpython-312.pyc | Bin 0 -> 4762 bytes .../pygments/__pycache__/sphinxext.cpython-312.pyc | Bin 0 -> 12147 bytes .../pygments/__pycache__/style.cpython-312.pyc | Bin 0 -> 6723 bytes .../pygments/__pycache__/token.cpython-312.pyc | Bin 0 -> 8200 bytes .../pygments/__pycache__/unistring.cpython-312.pyc | Bin 0 -> 33017 bytes .../pygments/__pycache__/util.cpython-312.pyc | Bin 0 -> 14089 bytes .../site-packages/pip/_vendor/pygments/console.py | 70 + .../site-packages/pip/_vendor/pygments/filter.py | 70 + .../pip/_vendor/pygments/filters/__init__.py | 940 + .../filters/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 37985 bytes .../pip/_vendor/pygments/formatter.py | 129 + .../pip/_vendor/pygments/formatters/__init__.py | 157 + .../__pycache__/__init__.cpython-312.pyc | Bin 0 -> 6959 bytes .../__pycache__/_mapping.cpython-312.pyc | Bin 0 -> 4221 bytes .../pip/_vendor/pygments/formatters/_mapping.py | 23 + .../site-packages/pip/_vendor/pygments/lexer.py | 963 + .../pip/_vendor/pygments/lexers/__init__.py | 362 + .../lexers/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 14741 bytes .../lexers/__pycache__/_mapping.cpython-312.pyc | Bin 0 -> 69850 bytes .../lexers/__pycache__/python.cpython-312.pyc | Bin 0 -> 42980 bytes .../pip/_vendor/pygments/lexers/_mapping.py | 602 + .../pip/_vendor/pygments/lexers/python.py | 1201 + .../site-packages/pip/_vendor/pygments/modeline.py | 43 + .../site-packages/pip/_vendor/pygments/plugin.py | 72 + .../site-packages/pip/_vendor/pygments/regexopt.py | 91 + .../site-packages/pip/_vendor/pygments/scanner.py | 104 + .../pip/_vendor/pygments/sphinxext.py | 247 + .../site-packages/pip/_vendor/pygments/style.py | 203 + .../pip/_vendor/pygments/styles/__init__.py | 61 + .../styles/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 2677 bytes .../styles/__pycache__/_mapping.cpython-312.pyc | Bin 0 -> 3654 bytes .../pip/_vendor/pygments/styles/_mapping.py | 54 + .../site-packages/pip/_vendor/pygments/token.py | 214 + .../pip/_vendor/pygments/unistring.py | 153 + .../site-packages/pip/_vendor/pygments/util.py | 324 + .../pip/_vendor/pyproject_hooks/__init__.py | 31 + .../__pycache__/__init__.cpython-312.pyc | Bin 0 -> 753 bytes .../__pycache__/_impl.cpython-312.pyc | Bin 0 -> 18085 bytes .../pip/_vendor/pyproject_hooks/_impl.py | 410 + .../pyproject_hooks/_in_process/__init__.py | 21 + .../__pycache__/__init__.cpython-312.pyc | Bin 0 -> 1082 bytes .../__pycache__/_in_process.cpython-312.pyc | Bin 0 -> 15364 bytes .../pyproject_hooks/_in_process/_in_process.py | 389 + .../pip/_vendor/pyproject_hooks/py.typed | 0 .../site-packages/pip/_vendor/requests/__init__.py | 179 + .../requests/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 5259 bytes .../__pycache__/__version__.cpython-312.pyc | Bin 0 -> 590 bytes .../__pycache__/_internal_utils.cpython-312.pyc | Bin 0 -> 2030 bytes .../requests/__pycache__/adapters.cpython-312.pyc | Bin 0 -> 28443 bytes .../requests/__pycache__/api.cpython-312.pyc | Bin 0 -> 7210 bytes .../requests/__pycache__/auth.cpython-312.pyc | Bin 0 -> 13929 bytes .../requests/__pycache__/certs.cpython-312.pyc | Bin 0 -> 684 bytes .../requests/__pycache__/compat.cpython-312.pyc | Bin 0 -> 1991 bytes .../requests/__pycache__/cookies.cpython-312.pyc | Bin 0 -> 25282 bytes .../__pycache__/exceptions.cpython-312.pyc | Bin 0 -> 7604 bytes .../requests/__pycache__/help.cpython-312.pyc | Bin 0 -> 4234 bytes .../requests/__pycache__/hooks.cpython-312.pyc | Bin 0 -> 1058 bytes .../requests/__pycache__/models.cpython-312.pyc | Bin 0 -> 35579 bytes .../requests/__pycache__/packages.cpython-312.pyc | Bin 0 -> 1293 bytes .../requests/__pycache__/sessions.cpython-312.pyc | Bin 0 -> 27888 bytes .../__pycache__/status_codes.cpython-312.pyc | Bin 0 -> 6037 bytes .../__pycache__/structures.cpython-312.pyc | Bin 0 -> 5623 bytes .../requests/__pycache__/utils.cpython-312.pyc | Bin 0 -> 36196 bytes .../pip/_vendor/requests/__version__.py | 14 + .../pip/_vendor/requests/_internal_utils.py | 50 + .../site-packages/pip/_vendor/requests/adapters.py | 719 + .../site-packages/pip/_vendor/requests/api.py | 157 + .../site-packages/pip/_vendor/requests/auth.py | 314 + .../site-packages/pip/_vendor/requests/certs.py | 17 + .../site-packages/pip/_vendor/requests/compat.py | 90 + .../site-packages/pip/_vendor/requests/cookies.py | 561 + .../pip/_vendor/requests/exceptions.py | 151 + .../site-packages/pip/_vendor/requests/help.py | 127 + .../site-packages/pip/_vendor/requests/hooks.py | 33 + .../site-packages/pip/_vendor/requests/models.py | 1039 + .../site-packages/pip/_vendor/requests/packages.py | 25 + .../site-packages/pip/_vendor/requests/sessions.py | 831 + .../pip/_vendor/requests/status_codes.py | 128 + .../pip/_vendor/requests/structures.py | 99 + .../site-packages/pip/_vendor/requests/utils.py | 1086 + .../pip/_vendor/resolvelib/__init__.py | 27 + .../__pycache__/__init__.cpython-312.pyc | Bin 0 -> 640 bytes .../__pycache__/providers.cpython-312.pyc | Bin 0 -> 10131 bytes .../__pycache__/reporters.cpython-312.pyc | Bin 0 -> 3295 bytes .../resolvelib/__pycache__/structs.cpython-312.pyc | Bin 0 -> 12456 bytes .../pip/_vendor/resolvelib/providers.py | 196 + .../site-packages/pip/_vendor/resolvelib/py.typed | 0 .../pip/_vendor/resolvelib/reporters.py | 55 + .../pip/_vendor/resolvelib/resolvers/__init__.py | 27 + .../resolvers/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 745 bytes .../resolvers/__pycache__/abstract.cpython-312.pyc | Bin 0 -> 2475 bytes .../__pycache__/criterion.cpython-312.pyc | Bin 0 -> 3276 bytes .../__pycache__/exceptions.cpython-312.pyc | Bin 0 -> 4086 bytes .../__pycache__/resolution.cpython-312.pyc | Bin 0 -> 25067 bytes .../pip/_vendor/resolvelib/resolvers/abstract.py | 47 + .../pip/_vendor/resolvelib/resolvers/criterion.py | 48 + .../pip/_vendor/resolvelib/resolvers/exceptions.py | 57 + .../pip/_vendor/resolvelib/resolvers/resolution.py | 622 + .../pip/_vendor/resolvelib/structs.py | 209 + .../site-packages/pip/_vendor/rich/__init__.py | 177 + .../site-packages/pip/_vendor/rich/__main__.py | 245 + .../rich/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 7021 bytes .../rich/__pycache__/__main__.cpython-312.pyc | Bin 0 -> 9556 bytes .../rich/__pycache__/_cell_widths.cpython-312.pyc | Bin 0 -> 7878 bytes .../rich/__pycache__/_emoji_codes.cpython-312.pyc | Bin 0 -> 205982 bytes .../__pycache__/_emoji_replace.cpython-312.pyc | Bin 0 -> 1735 bytes .../__pycache__/_export_format.cpython-312.pyc | Bin 0 -> 2355 bytes .../rich/__pycache__/_extension.cpython-312.pyc | Bin 0 -> 543 bytes .../rich/__pycache__/_fileno.cpython-312.pyc | Bin 0 -> 861 bytes .../rich/__pycache__/_inspect.cpython-312.pyc | Bin 0 -> 12033 bytes .../rich/__pycache__/_log_render.cpython-312.pyc | Bin 0 -> 4153 bytes .../_vendor/rich/__pycache__/_loop.cpython-312.pyc | Bin 0 -> 1891 bytes .../rich/__pycache__/_null_file.cpython-312.pyc | Bin 0 -> 3635 bytes .../rich/__pycache__/_palettes.cpython-312.pyc | Bin 0 -> 5166 bytes .../_vendor/rich/__pycache__/_pick.cpython-312.pyc | Bin 0 -> 732 bytes .../rich/__pycache__/_ratio.cpython-312.pyc | Bin 0 -> 6436 bytes .../rich/__pycache__/_spinners.cpython-312.pyc | Bin 0 -> 13185 bytes .../rich/__pycache__/_stack.cpython-312.pyc | Bin 0 -> 971 bytes .../rich/__pycache__/_timer.cpython-312.pyc | Bin 0 -> 871 bytes .../__pycache__/_win32_console.cpython-312.pyc | Bin 0 -> 28817 bytes .../rich/__pycache__/_windows.cpython-312.pyc | Bin 0 -> 2496 bytes .../__pycache__/_windows_renderer.cpython-312.pyc | Bin 0 -> 3579 bytes .../_vendor/rich/__pycache__/_wrap.cpython-312.pyc | Bin 0 -> 3342 bytes .../_vendor/rich/__pycache__/abc.cpython-312.pyc | Bin 0 -> 1614 bytes .../_vendor/rich/__pycache__/align.cpython-312.pyc | Bin 0 -> 12285 bytes .../_vendor/rich/__pycache__/ansi.cpython-312.pyc | Bin 0 -> 9127 bytes .../_vendor/rich/__pycache__/bar.cpython-312.pyc | Bin 0 -> 4278 bytes .../_vendor/rich/__pycache__/box.cpython-312.pyc | Bin 0 -> 11717 bytes .../_vendor/rich/__pycache__/cells.cpython-312.pyc | Bin 0 -> 5584 bytes .../_vendor/rich/__pycache__/color.cpython-312.pyc | Bin 0 -> 26559 bytes .../rich/__pycache__/color_triplet.cpython-312.pyc | Bin 0 -> 1707 bytes .../rich/__pycache__/columns.cpython-312.pyc | Bin 0 -> 8593 bytes .../rich/__pycache__/console.cpython-312.pyc | Bin 0 -> 115306 bytes .../rich/__pycache__/constrain.cpython-312.pyc | Bin 0 -> 2264 bytes .../rich/__pycache__/containers.cpython-312.pyc | Bin 0 -> 9237 bytes .../rich/__pycache__/control.cpython-312.pyc | Bin 0 -> 10790 bytes .../__pycache__/default_styles.cpython-312.pyc | Bin 0 -> 10534 bytes .../rich/__pycache__/diagnose.cpython-312.pyc | Bin 0 -> 1526 bytes .../_vendor/rich/__pycache__/emoji.cpython-312.pyc | Bin 0 -> 4084 bytes .../rich/__pycache__/errors.cpython-312.pyc | Bin 0 -> 1851 bytes .../rich/__pycache__/file_proxy.cpython-312.pyc | Bin 0 -> 3583 bytes .../rich/__pycache__/filesize.cpython-312.pyc | Bin 0 -> 3063 bytes .../rich/__pycache__/highlighter.cpython-312.pyc | Bin 0 -> 9906 bytes .../_vendor/rich/__pycache__/json.cpython-312.pyc | Bin 0 -> 6041 bytes .../rich/__pycache__/jupyter.cpython-312.pyc | Bin 0 -> 5215 bytes .../rich/__pycache__/layout.cpython-312.pyc | Bin 0 -> 20226 bytes .../_vendor/rich/__pycache__/live.cpython-312.pyc | Bin 0 -> 20138 bytes .../rich/__pycache__/live_render.cpython-312.pyc | Bin 0 -> 4756 bytes .../rich/__pycache__/logging.cpython-312.pyc | Bin 0 -> 14080 bytes .../rich/__pycache__/markup.cpython-312.pyc | Bin 0 -> 9596 bytes .../rich/__pycache__/measure.cpython-312.pyc | Bin 0 -> 6382 bytes .../rich/__pycache__/padding.cpython-312.pyc | Bin 0 -> 6949 bytes .../_vendor/rich/__pycache__/pager.cpython-312.pyc | Bin 0 -> 1826 bytes .../rich/__pycache__/palette.cpython-312.pyc | Bin 0 -> 5320 bytes .../_vendor/rich/__pycache__/panel.cpython-312.pyc | Bin 0 -> 12735 bytes .../rich/__pycache__/pretty.cpython-312.pyc | Bin 0 -> 40639 bytes .../rich/__pycache__/progress.cpython-312.pyc | Bin 0 -> 75129 bytes .../rich/__pycache__/progress_bar.cpython-312.pyc | Bin 0 -> 10395 bytes .../rich/__pycache__/protocol.cpython-312.pyc | Bin 0 -> 1798 bytes .../rich/__pycache__/region.cpython-312.pyc | Bin 0 -> 573 bytes .../_vendor/rich/__pycache__/repr.cpython-312.pyc | Bin 0 -> 6630 bytes .../_vendor/rich/__pycache__/scope.cpython-312.pyc | Bin 0 -> 3836 bytes .../rich/__pycache__/screen.cpython-312.pyc | Bin 0 -> 2490 bytes .../rich/__pycache__/segment.cpython-312.pyc | Bin 0 -> 28589 bytes .../rich/__pycache__/spinner.cpython-312.pyc | Bin 0 -> 5931 bytes .../_vendor/rich/__pycache__/style.cpython-312.pyc | Bin 0 -> 33487 bytes .../rich/__pycache__/styled.cpython-312.pyc | Bin 0 -> 2145 bytes .../rich/__pycache__/syntax.cpython-312.pyc | Bin 0 -> 40998 bytes .../_vendor/rich/__pycache__/table.cpython-312.pyc | Bin 0 -> 43888 bytes .../__pycache__/terminal_theme.cpython-312.pyc | Bin 0 -> 3354 bytes .../_vendor/rich/__pycache__/text.cpython-312.pyc | Bin 0 -> 61260 bytes .../_vendor/rich/__pycache__/theme.cpython-312.pyc | Bin 0 -> 6338 bytes .../rich/__pycache__/themes.cpython-312.pyc | Bin 0 -> 320 bytes .../rich/__pycache__/traceback.cpython-312.pyc | Bin 0 -> 36246 bytes .../site-packages/pip/_vendor/rich/_cell_widths.py | 454 + .../site-packages/pip/_vendor/rich/_emoji_codes.py | 3610 + .../pip/_vendor/rich/_emoji_replace.py | 32 + .../pip/_vendor/rich/_export_format.py | 76 + .../site-packages/pip/_vendor/rich/_extension.py | 10 + .../site-packages/pip/_vendor/rich/_fileno.py | 24 + .../site-packages/pip/_vendor/rich/_inspect.py | 268 + .../site-packages/pip/_vendor/rich/_log_render.py | 94 + .../site-packages/pip/_vendor/rich/_loop.py | 43 + .../site-packages/pip/_vendor/rich/_null_file.py | 69 + .../site-packages/pip/_vendor/rich/_palettes.py | 309 + .../site-packages/pip/_vendor/rich/_pick.py | 17 + .../site-packages/pip/_vendor/rich/_ratio.py | 153 + .../site-packages/pip/_vendor/rich/_spinners.py | 482 + .../site-packages/pip/_vendor/rich/_stack.py | 16 + .../site-packages/pip/_vendor/rich/_timer.py | 19 + .../pip/_vendor/rich/_win32_console.py | 661 + .../site-packages/pip/_vendor/rich/_windows.py | 71 + .../pip/_vendor/rich/_windows_renderer.py | 56 + .../site-packages/pip/_vendor/rich/_wrap.py | 93 + .../site-packages/pip/_vendor/rich/abc.py | 33 + .../site-packages/pip/_vendor/rich/align.py | 306 + .../site-packages/pip/_vendor/rich/ansi.py | 241 + .../site-packages/pip/_vendor/rich/bar.py | 93 + .../site-packages/pip/_vendor/rich/box.py | 474 + .../site-packages/pip/_vendor/rich/cells.py | 174 + .../site-packages/pip/_vendor/rich/color.py | 621 + .../pip/_vendor/rich/color_triplet.py | 38 + .../site-packages/pip/_vendor/rich/columns.py | 187 + .../site-packages/pip/_vendor/rich/console.py | 2680 + .../site-packages/pip/_vendor/rich/constrain.py | 37 + .../site-packages/pip/_vendor/rich/containers.py | 167 + .../site-packages/pip/_vendor/rich/control.py | 219 + .../pip/_vendor/rich/default_styles.py | 193 + .../site-packages/pip/_vendor/rich/diagnose.py | 39 + .../site-packages/pip/_vendor/rich/emoji.py | 91 + .../site-packages/pip/_vendor/rich/errors.py | 34 + .../site-packages/pip/_vendor/rich/file_proxy.py | 57 + .../site-packages/pip/_vendor/rich/filesize.py | 88 + .../site-packages/pip/_vendor/rich/highlighter.py | 232 + .../site-packages/pip/_vendor/rich/json.py | 139 + .../site-packages/pip/_vendor/rich/jupyter.py | 101 + .../site-packages/pip/_vendor/rich/layout.py | 442 + .../site-packages/pip/_vendor/rich/live.py | 400 + .../site-packages/pip/_vendor/rich/live_render.py | 106 + .../site-packages/pip/_vendor/rich/logging.py | 297 + .../site-packages/pip/_vendor/rich/markup.py | 251 + .../site-packages/pip/_vendor/rich/measure.py | 151 + .../site-packages/pip/_vendor/rich/padding.py | 141 + .../site-packages/pip/_vendor/rich/pager.py | 34 + .../site-packages/pip/_vendor/rich/palette.py | 100 + .../site-packages/pip/_vendor/rich/panel.py | 317 + .../site-packages/pip/_vendor/rich/pretty.py | 1016 + .../site-packages/pip/_vendor/rich/progress.py | 1715 + .../site-packages/pip/_vendor/rich/progress_bar.py | 223 + .../site-packages/pip/_vendor/rich/prompt.py | 400 + .../site-packages/pip/_vendor/rich/protocol.py | 42 + .../site-packages/pip/_vendor/rich/py.typed | 0 .../site-packages/pip/_vendor/rich/region.py | 10 + .../site-packages/pip/_vendor/rich/repr.py | 149 + .../site-packages/pip/_vendor/rich/rule.py | 130 + .../site-packages/pip/_vendor/rich/scope.py | 86 + .../site-packages/pip/_vendor/rich/screen.py | 54 + .../site-packages/pip/_vendor/rich/segment.py | 752 + .../site-packages/pip/_vendor/rich/spinner.py | 132 + .../site-packages/pip/_vendor/rich/status.py | 131 + .../site-packages/pip/_vendor/rich/style.py | 796 + .../site-packages/pip/_vendor/rich/styled.py | 42 + .../site-packages/pip/_vendor/rich/syntax.py | 985 + .../site-packages/pip/_vendor/rich/table.py | 1006 + .../pip/_vendor/rich/terminal_theme.py | 153 + .../site-packages/pip/_vendor/rich/text.py | 1361 + .../site-packages/pip/_vendor/rich/theme.py | 115 + .../site-packages/pip/_vendor/rich/themes.py | 5 + .../site-packages/pip/_vendor/rich/traceback.py | 899 + .../site-packages/pip/_vendor/rich/tree.py | 257 + .../site-packages/pip/_vendor/tomli/__init__.py | 8 + .../site-packages/pip/_vendor/tomli/_parser.py | 770 + .../site-packages/pip/_vendor/tomli/_re.py | 112 + .../site-packages/pip/_vendor/tomli/_types.py | 10 + .../site-packages/pip/_vendor/tomli/py.typed | 1 + .../site-packages/pip/_vendor/tomli_w/__init__.py | 4 + .../site-packages/pip/_vendor/tomli_w/_writer.py | 229 + .../site-packages/pip/_vendor/tomli_w/py.typed | 1 + .../pip/_vendor/truststore/__init__.py | 36 + .../__pycache__/__init__.cpython-312.pyc | Bin 0 -> 1458 bytes .../truststore/__pycache__/_api.cpython-312.pyc | Bin 0 -> 17157 bytes .../__pycache__/_openssl.cpython-312.pyc | Bin 0 -> 2221 bytes .../__pycache__/_ssl_constants.cpython-312.pyc | Bin 0 -> 1105 bytes .../site-packages/pip/_vendor/truststore/_api.py | 333 + .../site-packages/pip/_vendor/truststore/_macos.py | 571 + .../pip/_vendor/truststore/_openssl.py | 66 + .../pip/_vendor/truststore/_ssl_constants.py | 31 + .../pip/_vendor/truststore/_windows.py | 567 + .../site-packages/pip/_vendor/truststore/py.typed | 0 .../site-packages/pip/_vendor/urllib3/__init__.py | 102 + .../urllib3/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 3411 bytes .../__pycache__/_collections.cpython-312.pyc | Bin 0 -> 16494 bytes .../urllib3/__pycache__/_version.cpython-312.pyc | Bin 0 -> 224 bytes .../urllib3/__pycache__/connection.cpython-312.pyc | Bin 0 -> 20413 bytes .../__pycache__/connectionpool.cpython-312.pyc | Bin 0 -> 36549 bytes .../urllib3/__pycache__/exceptions.cpython-312.pyc | Bin 0 -> 13499 bytes .../urllib3/__pycache__/fields.cpython-312.pyc | Bin 0 -> 10415 bytes .../urllib3/__pycache__/filepost.cpython-312.pyc | Bin 0 -> 4024 bytes .../__pycache__/poolmanager.cpython-312.pyc | Bin 0 -> 20478 bytes .../urllib3/__pycache__/request.cpython-312.pyc | Bin 0 -> 7300 bytes .../urllib3/__pycache__/response.cpython-312.pyc | Bin 0 -> 33974 bytes .../pip/_vendor/urllib3/_collections.py | 355 + .../site-packages/pip/_vendor/urllib3/_version.py | 2 + .../pip/_vendor/urllib3/connection.py | 572 + .../pip/_vendor/urllib3/connectionpool.py | 1140 + .../pip/_vendor/urllib3/contrib/__init__.py | 0 .../contrib/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 204 bytes .../__pycache__/_appengine_environ.cpython-312.pyc | Bin 0 -> 1854 bytes .../contrib/__pycache__/socks.cpython-312.pyc | Bin 0 -> 7517 bytes .../_vendor/urllib3/contrib/_appengine_environ.py | 36 + .../urllib3/contrib/_securetransport/__init__.py | 0 .../urllib3/contrib/_securetransport/bindings.py | 519 + .../urllib3/contrib/_securetransport/low_level.py | 397 + .../pip/_vendor/urllib3/contrib/appengine.py | 314 + .../pip/_vendor/urllib3/contrib/ntlmpool.py | 130 + .../pip/_vendor/urllib3/contrib/pyopenssl.py | 518 + .../pip/_vendor/urllib3/contrib/securetransport.py | 920 + .../pip/_vendor/urllib3/contrib/socks.py | 216 + .../pip/_vendor/urllib3/exceptions.py | 323 + .../site-packages/pip/_vendor/urllib3/fields.py | 274 + .../site-packages/pip/_vendor/urllib3/filepost.py | 98 + .../pip/_vendor/urllib3/packages/__init__.py | 0 .../packages/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 205 bytes .../packages/__pycache__/six.cpython-312.pyc | Bin 0 -> 41325 bytes .../_vendor/urllib3/packages/backports/__init__.py | 0 .../_vendor/urllib3/packages/backports/makefile.py | 51 + .../urllib3/packages/backports/weakref_finalize.py | 155 + .../pip/_vendor/urllib3/packages/six.py | 1076 + .../pip/_vendor/urllib3/poolmanager.py | 540 + .../site-packages/pip/_vendor/urllib3/request.py | 191 + .../site-packages/pip/_vendor/urllib3/response.py | 879 + .../pip/_vendor/urllib3/util/__init__.py | 49 + .../util/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 1152 bytes .../util/__pycache__/connection.cpython-312.pyc | Bin 0 -> 4762 bytes .../urllib3/util/__pycache__/proxy.cpython-312.pyc | Bin 0 -> 1558 bytes .../urllib3/util/__pycache__/queue.cpython-312.pyc | Bin 0 -> 1358 bytes .../util/__pycache__/request.cpython-312.pyc | Bin 0 -> 4189 bytes .../util/__pycache__/response.cpython-312.pyc | Bin 0 -> 2995 bytes .../urllib3/util/__pycache__/retry.cpython-312.pyc | Bin 0 -> 21724 bytes .../urllib3/util/__pycache__/ssl_.cpython-312.pyc | Bin 0 -> 15383 bytes .../__pycache__/ssl_match_hostname.cpython-312.pyc | Bin 0 -> 5077 bytes .../util/__pycache__/ssltransport.cpython-312.pyc | Bin 0 -> 10778 bytes .../util/__pycache__/timeout.cpython-312.pyc | Bin 0 -> 11145 bytes .../urllib3/util/__pycache__/url.cpython-312.pyc | Bin 0 -> 15801 bytes .../urllib3/util/__pycache__/wait.cpython-312.pyc | Bin 0 -> 4409 bytes .../pip/_vendor/urllib3/util/connection.py | 149 + .../pip/_vendor/urllib3/util/proxy.py | 57 + .../pip/_vendor/urllib3/util/queue.py | 22 + .../pip/_vendor/urllib3/util/request.py | 137 + .../pip/_vendor/urllib3/util/response.py | 107 + .../pip/_vendor/urllib3/util/retry.py | 622 + .../site-packages/pip/_vendor/urllib3/util/ssl_.py | 504 + .../pip/_vendor/urllib3/util/ssl_match_hostname.py | 159 + .../pip/_vendor/urllib3/util/ssltransport.py | 221 + .../pip/_vendor/urllib3/util/timeout.py | 271 + .../site-packages/pip/_vendor/urllib3/util/url.py | 435 + .../site-packages/pip/_vendor/urllib3/util/wait.py | 152 + .../site-packages/pip/_vendor/vendor.txt | 19 + .venv/lib/python3.12/site-packages/pip/py.typed | 4 + .../referencing-0.36.2.dist-info/INSTALLER | 1 + .../referencing-0.36.2.dist-info/METADATA | 64 + .../referencing-0.36.2.dist-info/RECORD | 33 + .../referencing-0.36.2.dist-info/WHEEL | 4 + .../referencing-0.36.2.dist-info/licenses/COPYING | 19 + .../site-packages/referencing/__init__.py | 7 + .../__pycache__/__init__.cpython-312.pyc | Bin 0 -> 413 bytes .../referencing/__pycache__/_attrs.cpython-312.pyc | Bin 0 -> 1449 bytes .../referencing/__pycache__/_core.cpython-312.pyc | Bin 0 -> 30018 bytes .../__pycache__/exceptions.cpython-312.pyc | Bin 0 -> 6979 bytes .../__pycache__/jsonschema.cpython-312.pyc | Bin 0 -> 17334 bytes .../__pycache__/retrieval.cpython-312.pyc | Bin 0 -> 3442 bytes .../referencing/__pycache__/typing.cpython-312.pyc | Bin 0 -> 2241 bytes .../python3.12/site-packages/referencing/_attrs.py | 31 + .../site-packages/referencing/_attrs.pyi | 20 + .../python3.12/site-packages/referencing/_core.py | 739 + .../site-packages/referencing/exceptions.py | 165 + .../site-packages/referencing/jsonschema.py | 642 + .../python3.12/site-packages/referencing/py.typed | 0 .../site-packages/referencing/retrieval.py | 92 + .../site-packages/referencing/tests/__init__.py | 0 .../tests/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 194 bytes .../tests/__pycache__/test_core.cpython-312.pyc | Bin 0 -> 60369 bytes .../__pycache__/test_exceptions.cpython-312.pyc | Bin 0 -> 2774 bytes .../__pycache__/test_jsonschema.cpython-312.pyc | Bin 0 -> 14204 bytes .../test_referencing_suite.cpython-312.pyc | Bin 0 -> 4368 bytes .../__pycache__/test_retrieval.cpython-312.pyc | Bin 0 -> 6086 bytes .../site-packages/referencing/tests/test_core.py | 1057 + .../referencing/tests/test_exceptions.py | 34 + .../referencing/tests/test_jsonschema.py | 382 + .../referencing/tests/test_referencing_suite.py | 66 + .../referencing/tests/test_retrieval.py | 106 + .../python3.12/site-packages/referencing/typing.py | 61 + .../requests-2.32.5.dist-info/INSTALLER | 1 + .../requests-2.32.5.dist-info/METADATA | 133 + .../site-packages/requests-2.32.5.dist-info/RECORD | 43 + .../requests-2.32.5.dist-info/REQUESTED | 0 .../site-packages/requests-2.32.5.dist-info/WHEEL | 5 + .../requests-2.32.5.dist-info/licenses/LICENSE | 175 + .../requests-2.32.5.dist-info/top_level.txt | 1 + .../python3.12/site-packages/requests/__init__.py | 184 + .../requests/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 5414 bytes .../__pycache__/__version__.cpython-312.pyc | Bin 0 -> 578 bytes .../__pycache__/_internal_utils.cpython-312.pyc | Bin 0 -> 2018 bytes .../requests/__pycache__/adapters.cpython-312.pyc | Bin 0 -> 27805 bytes .../requests/__pycache__/api.cpython-312.pyc | Bin 0 -> 7198 bytes .../requests/__pycache__/auth.cpython-312.pyc | Bin 0 -> 13917 bytes .../requests/__pycache__/certs.cpython-312.pyc | Bin 0 -> 660 bytes .../requests/__pycache__/compat.cpython-312.pyc | Bin 0 -> 2374 bytes .../requests/__pycache__/cookies.cpython-312.pyc | Bin 0 -> 25270 bytes .../__pycache__/exceptions.cpython-312.pyc | Bin 0 -> 7579 bytes .../requests/__pycache__/help.cpython-312.pyc | Bin 0 -> 4321 bytes .../requests/__pycache__/hooks.cpython-312.pyc | Bin 0 -> 1046 bytes .../requests/__pycache__/models.cpython-312.pyc | Bin 0 -> 35495 bytes .../requests/__pycache__/packages.cpython-312.pyc | Bin 0 -> 1133 bytes .../requests/__pycache__/sessions.cpython-312.pyc | Bin 0 -> 27884 bytes .../__pycache__/status_codes.cpython-312.pyc | Bin 0 -> 6025 bytes .../__pycache__/structures.cpython-312.pyc | Bin 0 -> 5611 bytes .../requests/__pycache__/utils.cpython-312.pyc | Bin 0 -> 36172 bytes .../site-packages/requests/__version__.py | 14 + .../site-packages/requests/_internal_utils.py | 50 + .../python3.12/site-packages/requests/adapters.py | 696 + .venv/lib/python3.12/site-packages/requests/api.py | 157 + .../lib/python3.12/site-packages/requests/auth.py | 314 + .../lib/python3.12/site-packages/requests/certs.py | 17 + .../python3.12/site-packages/requests/compat.py | 106 + .../python3.12/site-packages/requests/cookies.py | 561 + .../site-packages/requests/exceptions.py | 151 + .../lib/python3.12/site-packages/requests/help.py | 134 + .../lib/python3.12/site-packages/requests/hooks.py | 33 + .../python3.12/site-packages/requests/models.py | 1039 + .../python3.12/site-packages/requests/packages.py | 23 + .../python3.12/site-packages/requests/sessions.py | 831 + .../site-packages/requests/status_codes.py | 128 + .../site-packages/requests/structures.py | 99 + .../lib/python3.12/site-packages/requests/utils.py | 1086 + .../lib/python3.12/site-packages/rpds/__init__.py | 5 + .../lib/python3.12/site-packages/rpds/__init__.pyi | 77 + .../rpds/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 340 bytes .venv/lib/python3.12/site-packages/rpds/py.typed | 0 .../rpds/rpds.cpython-312-x86_64-linux-gnu.so | Bin 0 -> 1038000 bytes .../rpds_py-0.27.0.dist-info/INSTALLER | 1 + .../rpds_py-0.27.0.dist-info/METADATA | 100 + .../site-packages/rpds_py-0.27.0.dist-info/RECORD | 10 + .../site-packages/rpds_py-0.27.0.dist-info/WHEEL | 4 + .../rpds_py-0.27.0.dist-info/licenses/LICENSE | 19 + .../typing_extensions-4.14.1.dist-info/INSTALLER | 1 + .../typing_extensions-4.14.1.dist-info/METADATA | 68 + .../typing_extensions-4.14.1.dist-info/RECORD | 7 + .../typing_extensions-4.14.1.dist-info/WHEEL | 4 + .../licenses/LICENSE | 279 + .../python3.12/site-packages/typing_extensions.py | 4244 + .../urllib3-2.5.0.dist-info/INSTALLER | 1 + .../site-packages/urllib3-2.5.0.dist-info/METADATA | 154 + .../site-packages/urllib3-2.5.0.dist-info/RECORD | 79 + .../site-packages/urllib3-2.5.0.dist-info/WHEEL | 4 + .../urllib3-2.5.0.dist-info/licenses/LICENSE.txt | 21 + .../python3.12/site-packages/urllib3/__init__.py | 211 + .../urllib3/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 7309 bytes .../__pycache__/_base_connection.cpython-312.pyc | Bin 0 -> 6859 bytes .../__pycache__/_collections.cpython-312.pyc | Bin 0 -> 22570 bytes .../__pycache__/_request_methods.cpython-312.pyc | Bin 0 -> 10601 bytes .../urllib3/__pycache__/_version.cpython-312.pyc | Bin 0 -> 645 bytes .../urllib3/__pycache__/connection.cpython-312.pyc | Bin 0 -> 38412 bytes .../__pycache__/connectionpool.cpython-312.pyc | Bin 0 -> 39734 bytes .../urllib3/__pycache__/exceptions.cpython-312.pyc | Bin 0 -> 16610 bytes .../urllib3/__pycache__/fields.cpython-312.pyc | Bin 0 -> 12021 bytes .../urllib3/__pycache__/filepost.cpython-312.pyc | Bin 0 -> 3488 bytes .../__pycache__/poolmanager.cpython-312.pyc | Bin 0 -> 24402 bytes .../urllib3/__pycache__/response.cpython-312.pyc | Bin 0 -> 52712 bytes .../site-packages/urllib3/_base_connection.py | 165 + .../site-packages/urllib3/_collections.py | 479 + .../site-packages/urllib3/_request_methods.py | 278 + .../python3.12/site-packages/urllib3/_version.py | 21 + .../python3.12/site-packages/urllib3/connection.py | 1093 + .../site-packages/urllib3/connectionpool.py | 1178 + .../site-packages/urllib3/contrib/__init__.py | 0 .../contrib/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 192 bytes .../contrib/__pycache__/pyopenssl.cpython-312.pyc | Bin 0 -> 28215 bytes .../contrib/__pycache__/socks.cpython-312.pyc | Bin 0 -> 8170 bytes .../urllib3/contrib/emscripten/__init__.py | 16 + .../__pycache__/__init__.cpython-312.pyc | Bin 0 -> 900 bytes .../__pycache__/connection.cpython-312.pyc | Bin 0 -> 10241 bytes .../emscripten/__pycache__/fetch.cpython-312.pyc | Bin 0 -> 28631 bytes .../emscripten/__pycache__/request.cpython-312.pyc | Bin 0 -> 1420 bytes .../__pycache__/response.cpython-312.pyc | Bin 0 -> 12214 bytes .../urllib3/contrib/emscripten/connection.py | 255 + .../contrib/emscripten/emscripten_fetch_worker.js | 110 + .../urllib3/contrib/emscripten/fetch.py | 728 + .../urllib3/contrib/emscripten/request.py | 22 + .../urllib3/contrib/emscripten/response.py | 277 + .../site-packages/urllib3/contrib/pyopenssl.py | 564 + .../site-packages/urllib3/contrib/socks.py | 228 + .../python3.12/site-packages/urllib3/exceptions.py | 335 + .../lib/python3.12/site-packages/urllib3/fields.py | 341 + .../python3.12/site-packages/urllib3/filepost.py | 89 + .../site-packages/urllib3/http2/__init__.py | 53 + .../http2/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 1745 bytes .../http2/__pycache__/connection.cpython-312.pyc | Bin 0 -> 17052 bytes .../http2/__pycache__/probe.cpython-312.pyc | Bin 0 -> 3696 bytes .../site-packages/urllib3/http2/connection.py | 356 + .../site-packages/urllib3/http2/probe.py | 87 + .../site-packages/urllib3/poolmanager.py | 653 + .../lib/python3.12/site-packages/urllib3/py.typed | 2 + .../python3.12/site-packages/urllib3/response.py | 1307 + .../site-packages/urllib3/util/__init__.py | 42 + .../util/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 1005 bytes .../util/__pycache__/connection.cpython-312.pyc | Bin 0 -> 4695 bytes .../urllib3/util/__pycache__/proxy.cpython-312.pyc | Bin 0 -> 1217 bytes .../util/__pycache__/request.cpython-312.pyc | Bin 0 -> 8317 bytes .../util/__pycache__/response.cpython-312.pyc | Bin 0 -> 2873 bytes .../urllib3/util/__pycache__/retry.cpython-312.pyc | Bin 0 -> 20286 bytes .../urllib3/util/__pycache__/ssl_.cpython-312.pyc | Bin 0 -> 17169 bytes .../__pycache__/ssl_match_hostname.cpython-312.pyc | Bin 0 -> 5557 bytes .../util/__pycache__/ssltransport.cpython-312.pyc | Bin 0 -> 13324 bytes .../util/__pycache__/timeout.cpython-312.pyc | Bin 0 -> 11689 bytes .../urllib3/util/__pycache__/url.cpython-312.pyc | Bin 0 -> 16226 bytes .../urllib3/util/__pycache__/util.cpython-312.pyc | Bin 0 -> 1994 bytes .../urllib3/util/__pycache__/wait.cpython-312.pyc | Bin 0 -> 3440 bytes .../site-packages/urllib3/util/connection.py | 137 + .../python3.12/site-packages/urllib3/util/proxy.py | 43 + .../site-packages/urllib3/util/request.py | 266 + .../site-packages/urllib3/util/response.py | 101 + .../python3.12/site-packages/urllib3/util/retry.py | 533 + .../python3.12/site-packages/urllib3/util/ssl_.py | 524 + .../urllib3/util/ssl_match_hostname.py | 159 + .../site-packages/urllib3/util/ssltransport.py | 271 + .../site-packages/urllib3/util/timeout.py | 275 + .../python3.12/site-packages/urllib3/util/url.py | 469 + .../python3.12/site-packages/urllib3/util/util.py | 42 + .../python3.12/site-packages/urllib3/util/wait.py | 124 + .../site-packages/~ip-24.0.dist-info/AUTHORS.txt | 760 + .../site-packages/~ip-24.0.dist-info/INSTALLER | 1 + .../site-packages/~ip-24.0.dist-info/LICENSE.txt | 20 + .../site-packages/~ip-24.0.dist-info/METADATA | 88 + .../site-packages/~ip-24.0.dist-info/RECORD | 1024 + .../site-packages/~ip-24.0.dist-info/REQUESTED | 0 .../site-packages/~ip-24.0.dist-info/WHEEL | 5 + .../~ip-24.0.dist-info/entry_points.txt | 4 + .../site-packages/~ip-24.0.dist-info/top_level.txt | 1 + .venv/lib/python3.12/site-packages/~ip/__init__.py | 13 + .venv/lib/python3.12/site-packages/~ip/__main__.py | 24 + .../python3.12/site-packages/~ip/__pip-runner__.py | 50 + .../~ip/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 689 bytes .../~ip/__pycache__/__main__.cpython-312.pyc | Bin 0 -> 845 bytes .../~ip/__pycache__/__pip-runner__.cpython-312.pyc | Bin 0 -> 2208 bytes .../site-packages/~ip/_internal/__init__.py | 18 + .../_internal/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 791 bytes .../__pycache__/build_env.cpython-312.pyc | Bin 0 -> 14298 bytes .../_internal/__pycache__/cache.cpython-312.pyc | Bin 0 -> 12669 bytes .../__pycache__/configuration.cpython-312.pyc | Bin 0 -> 17670 bytes .../__pycache__/exceptions.cpython-312.pyc | Bin 0 -> 33288 bytes .../~ip/_internal/__pycache__/main.cpython-312.pyc | Bin 0 -> 674 bytes .../__pycache__/pyproject.cpython-312.pyc | Bin 0 -> 4975 bytes .../self_outdated_check.cpython-312.pyc | Bin 0 -> 10556 bytes .../__pycache__/wheel_builder.cpython-312.pyc | Bin 0 -> 13653 bytes .../site-packages/~ip/_internal/build_env.py | 311 + .../site-packages/~ip/_internal/cache.py | 290 + .../site-packages/~ip/_internal/cli/__init__.py | 4 + .../cli/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 282 bytes .../cli/__pycache__/autocompletion.cpython-312.pyc | Bin 0 -> 8469 bytes .../cli/__pycache__/base_command.cpython-312.pyc | Bin 0 -> 10459 bytes .../cli/__pycache__/cmdoptions.cpython-312.pyc | Bin 0 -> 30378 bytes .../__pycache__/command_context.cpython-312.pyc | Bin 0 -> 1785 bytes .../_internal/cli/__pycache__/main.cpython-312.pyc | Bin 0 -> 2302 bytes .../cli/__pycache__/main_parser.cpython-312.pyc | Bin 0 -> 4909 bytes .../cli/__pycache__/parser.cpython-312.pyc | Bin 0 -> 15026 bytes .../cli/__pycache__/progress_bars.cpython-312.pyc | Bin 0 -> 2624 bytes .../cli/__pycache__/req_command.cpython-312.pyc | Bin 0 -> 18856 bytes .../cli/__pycache__/spinners.cpython-312.pyc | Bin 0 -> 7844 bytes .../cli/__pycache__/status_codes.cpython-312.pyc | Bin 0 -> 379 bytes .../~ip/_internal/cli/autocompletion.py | 172 + .../~ip/_internal/cli/base_command.py | 236 + .../site-packages/~ip/_internal/cli/cmdoptions.py | 1074 + .../~ip/_internal/cli/command_context.py | 27 + .../site-packages/~ip/_internal/cli/main.py | 79 + .../site-packages/~ip/_internal/cli/main_parser.py | 134 + .../site-packages/~ip/_internal/cli/parser.py | 294 + .../~ip/_internal/cli/progress_bars.py | 68 + .../site-packages/~ip/_internal/cli/req_command.py | 505 + .../site-packages/~ip/_internal/cli/spinners.py | 159 + .../~ip/_internal/cli/status_codes.py | 6 + .../~ip/_internal/commands/__init__.py | 132 + .../commands/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 4006 bytes .../commands/__pycache__/cache.cpython-312.pyc | Bin 0 -> 9715 bytes .../commands/__pycache__/check.cpython-312.pyc | Bin 0 -> 2094 bytes .../__pycache__/completion.cpython-312.pyc | Bin 0 -> 5196 bytes .../__pycache__/configuration.cpython-312.pyc | Bin 0 -> 13216 bytes .../commands/__pycache__/debug.cpython-312.pyc | Bin 0 -> 10165 bytes .../commands/__pycache__/download.cpython-312.pyc | Bin 0 -> 7593 bytes .../commands/__pycache__/freeze.cpython-312.pyc | Bin 0 -> 4360 bytes .../commands/__pycache__/hash.cpython-312.pyc | Bin 0 -> 2987 bytes .../commands/__pycache__/help.cpython-312.pyc | Bin 0 -> 1677 bytes .../commands/__pycache__/index.cpython-312.pyc | Bin 0 -> 6724 bytes .../commands/__pycache__/inspect.cpython-312.pyc | Bin 0 -> 3979 bytes .../commands/__pycache__/install.cpython-312.pyc | Bin 0 -> 28917 bytes .../commands/__pycache__/list.cpython-312.pyc | Bin 0 -> 15404 bytes .../commands/__pycache__/search.cpython-312.pyc | Bin 0 -> 7625 bytes .../commands/__pycache__/show.cpython-312.pyc | Bin 0 -> 9732 bytes .../commands/__pycache__/uninstall.cpython-312.pyc | Bin 0 -> 4730 bytes .../commands/__pycache__/wheel.cpython-312.pyc | Bin 0 -> 8960 bytes .../site-packages/~ip/_internal/commands/cache.py | 225 + .../site-packages/~ip/_internal/commands/check.py | 54 + .../~ip/_internal/commands/completion.py | 130 + .../~ip/_internal/commands/configuration.py | 280 + .../site-packages/~ip/_internal/commands/debug.py | 201 + .../~ip/_internal/commands/download.py | 147 + .../site-packages/~ip/_internal/commands/freeze.py | 108 + .../site-packages/~ip/_internal/commands/hash.py | 59 + .../site-packages/~ip/_internal/commands/help.py | 41 + .../site-packages/~ip/_internal/commands/index.py | 139 + .../~ip/_internal/commands/inspect.py | 92 + .../~ip/_internal/commands/install.py | 774 + .../site-packages/~ip/_internal/commands/list.py | 368 + .../site-packages/~ip/_internal/commands/search.py | 174 + .../site-packages/~ip/_internal/commands/show.py | 189 + .../~ip/_internal/commands/uninstall.py | 113 + .../site-packages/~ip/_internal/commands/wheel.py | 183 + .../site-packages/~ip/_internal/configuration.py | 383 + .../~ip/_internal/distributions/__init__.py | 21 + .../__pycache__/__init__.cpython-312.pyc | Bin 0 -> 945 bytes .../distributions/__pycache__/base.cpython-312.pyc | Bin 0 -> 2866 bytes .../__pycache__/installed.cpython-312.pyc | Bin 0 -> 1704 bytes .../__pycache__/sdist.cpython-312.pyc | Bin 0 -> 8492 bytes .../__pycache__/wheel.cpython-312.pyc | Bin 0 -> 2252 bytes .../~ip/_internal/distributions/base.py | 51 + .../~ip/_internal/distributions/installed.py | 29 + .../~ip/_internal/distributions/sdist.py | 156 + .../~ip/_internal/distributions/wheel.py | 40 + .../site-packages/~ip/_internal/exceptions.py | 728 + .../site-packages/~ip/_internal/index/__init__.py | 2 + .../index/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 236 bytes .../index/__pycache__/collector.cpython-312.pyc | Bin 0 -> 21890 bytes .../__pycache__/package_finder.cpython-312.pyc | Bin 0 -> 40739 bytes .../index/__pycache__/sources.cpython-312.pyc | Bin 0 -> 12608 bytes .../site-packages/~ip/_internal/index/collector.py | 507 + .../~ip/_internal/index/package_finder.py | 1027 + .../site-packages/~ip/_internal/index/sources.py | 285 + .../~ip/_internal/locations/__init__.py | 467 + .../locations/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 16780 bytes .../__pycache__/_distutils.cpython-312.pyc | Bin 0 -> 6860 bytes .../__pycache__/_sysconfig.cpython-312.pyc | Bin 0 -> 8015 bytes .../locations/__pycache__/base.cpython-312.pyc | Bin 0 -> 3785 bytes .../~ip/_internal/locations/_distutils.py | 172 + .../~ip/_internal/locations/_sysconfig.py | 213 + .../site-packages/~ip/_internal/locations/base.py | 81 + .../python3.12/site-packages/~ip/_internal/main.py | 12 + .../~ip/_internal/metadata/__init__.py | 128 + .../metadata/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 5886 bytes .../metadata/__pycache__/_json.cpython-312.pyc | Bin 0 -> 2879 bytes .../metadata/__pycache__/base.cpython-312.pyc | Bin 0 -> 35716 bytes .../__pycache__/pkg_resources.cpython-312.pyc | Bin 0 -> 15794 bytes .../site-packages/~ip/_internal/metadata/_json.py | 84 + .../site-packages/~ip/_internal/metadata/base.py | 702 + .../~ip/_internal/metadata/importlib/__init__.py | 6 + .../importlib/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 362 bytes .../importlib/__pycache__/_compat.cpython-312.pyc | Bin 0 -> 3337 bytes .../importlib/__pycache__/_dists.cpython-312.pyc | Bin 0 -> 13429 bytes .../importlib/__pycache__/_envs.cpython-312.pyc | Bin 0 -> 11184 bytes .../~ip/_internal/metadata/importlib/_compat.py | 55 + .../~ip/_internal/metadata/importlib/_dists.py | 227 + .../~ip/_internal/metadata/importlib/_envs.py | 189 + .../~ip/_internal/metadata/pkg_resources.py | 278 + .../site-packages/~ip/_internal/models/__init__.py | 2 + .../models/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 270 bytes .../models/__pycache__/candidate.cpython-312.pyc | Bin 0 -> 1909 bytes .../models/__pycache__/direct_url.cpython-312.pyc | Bin 0 -> 11203 bytes .../__pycache__/format_control.cpython-312.pyc | Bin 0 -> 4231 bytes .../models/__pycache__/index.cpython-312.pyc | Bin 0 -> 1698 bytes .../installation_report.cpython-312.pyc | Bin 0 -> 2276 bytes .../models/__pycache__/link.cpython-312.pyc | Bin 0 -> 26006 bytes .../models/__pycache__/scheme.cpython-312.pyc | Bin 0 -> 1173 bytes .../__pycache__/search_scope.cpython-312.pyc | Bin 0 -> 5092 bytes .../__pycache__/selection_prefs.cpython-312.pyc | Bin 0 -> 1855 bytes .../__pycache__/target_python.cpython-312.pyc | Bin 0 -> 4958 bytes .../models/__pycache__/wheel.cpython-312.pyc | Bin 0 -> 5784 bytes .../~ip/_internal/models/candidate.py | 30 + .../~ip/_internal/models/direct_url.py | 235 + .../~ip/_internal/models/format_control.py | 78 + .../site-packages/~ip/_internal/models/index.py | 28 + .../~ip/_internal/models/installation_report.py | 56 + .../site-packages/~ip/_internal/models/link.py | 579 + .../site-packages/~ip/_internal/models/scheme.py | 31 + .../~ip/_internal/models/search_scope.py | 132 + .../~ip/_internal/models/selection_prefs.py | 51 + .../~ip/_internal/models/target_python.py | 122 + .../site-packages/~ip/_internal/models/wheel.py | 92 + .../~ip/_internal/network/__init__.py | 2 + .../network/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 258 bytes .../network/__pycache__/auth.cpython-312.pyc | Bin 0 -> 22000 bytes .../network/__pycache__/cache.cpython-312.pyc | Bin 0 -> 6522 bytes .../network/__pycache__/download.cpython-312.pyc | Bin 0 -> 8557 bytes .../network/__pycache__/lazy_wheel.cpython-312.pyc | Bin 0 -> 11667 bytes .../network/__pycache__/session.cpython-312.pyc | Bin 0 -> 18778 bytes .../network/__pycache__/utils.cpython-312.pyc | Bin 0 -> 2257 bytes .../network/__pycache__/xmlrpc.cpython-312.pyc | Bin 0 -> 2953 bytes .../site-packages/~ip/_internal/network/auth.py | 561 + .../site-packages/~ip/_internal/network/cache.py | 106 + .../~ip/_internal/network/download.py | 186 + .../~ip/_internal/network/lazy_wheel.py | 210 + .../site-packages/~ip/_internal/network/session.py | 520 + .../site-packages/~ip/_internal/network/utils.py | 96 + .../site-packages/~ip/_internal/network/xmlrpc.py | 62 + .../~ip/_internal/operations/__init__.py | 0 .../__pycache__/__init__.cpython-312.pyc | Bin 0 -> 201 bytes .../operations/__pycache__/check.cpython-312.pyc | Bin 0 -> 7583 bytes .../operations/__pycache__/freeze.cpython-312.pyc | Bin 0 -> 10121 bytes .../operations/__pycache__/prepare.cpython-312.pyc | Bin 0 -> 25751 bytes .../~ip/_internal/operations/build/__init__.py | 0 .../build/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 207 bytes .../__pycache__/build_tracker.cpython-312.pyc | Bin 0 -> 7827 bytes .../build/__pycache__/metadata.cpython-312.pyc | Bin 0 -> 1884 bytes .../__pycache__/metadata_editable.cpython-312.pyc | Bin 0 -> 1918 bytes .../__pycache__/metadata_legacy.cpython-312.pyc | Bin 0 -> 3069 bytes .../build/__pycache__/wheel.cpython-312.pyc | Bin 0 -> 1688 bytes .../__pycache__/wheel_editable.cpython-312.pyc | Bin 0 -> 2029 bytes .../build/__pycache__/wheel_legacy.cpython-312.pyc | Bin 0 -> 3933 bytes .../_internal/operations/build/build_tracker.py | 139 + .../~ip/_internal/operations/build/metadata.py | 39 + .../operations/build/metadata_editable.py | 41 + .../_internal/operations/build/metadata_legacy.py | 74 + .../~ip/_internal/operations/build/wheel.py | 37 + .../_internal/operations/build/wheel_editable.py | 46 + .../~ip/_internal/operations/build/wheel_legacy.py | 102 + .../~ip/_internal/operations/check.py | 187 + .../~ip/_internal/operations/freeze.py | 255 + .../~ip/_internal/operations/install/__init__.py | 2 + .../install/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 270 bytes .../__pycache__/editable_legacy.cpython-312.pyc | Bin 0 -> 1821 bytes .../install/__pycache__/wheel.cpython-312.pyc | Bin 0 -> 33863 bytes .../operations/install/editable_legacy.py | 46 + .../~ip/_internal/operations/install/wheel.py | 734 + .../~ip/_internal/operations/prepare.py | 730 + .../site-packages/~ip/_internal/pyproject.py | 179 + .../site-packages/~ip/_internal/req/__init__.py | 92 + .../req/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 3747 bytes .../req/__pycache__/constructors.cpython-312.pyc | Bin 0 -> 21586 bytes .../req/__pycache__/req_file.cpython-312.pyc | Bin 0 -> 21465 bytes .../req/__pycache__/req_install.cpython-312.pyc | Bin 0 -> 38418 bytes .../req/__pycache__/req_set.cpython-312.pyc | Bin 0 -> 7222 bytes .../req/__pycache__/req_uninstall.cpython-312.pyc | Bin 0 -> 32981 bytes .../~ip/_internal/req/constructors.py | 576 + .../site-packages/~ip/_internal/req/req_file.py | 554 + .../site-packages/~ip/_internal/req/req_install.py | 923 + .../site-packages/~ip/_internal/req/req_set.py | 119 + .../~ip/_internal/req/req_uninstall.py | 649 + .../~ip/_internal/resolution/__init__.py | 0 .../__pycache__/__init__.cpython-312.pyc | Bin 0 -> 201 bytes .../resolution/__pycache__/base.cpython-312.pyc | Bin 0 -> 1189 bytes .../site-packages/~ip/_internal/resolution/base.py | 20 + .../~ip/_internal/resolution/legacy/__init__.py | 0 .../legacy/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 208 bytes .../legacy/__pycache__/resolver.cpython-312.pyc | Bin 0 -> 22443 bytes .../~ip/_internal/resolution/legacy/resolver.py | 598 + .../_internal/resolution/resolvelib/__init__.py | 0 .../__pycache__/__init__.cpython-312.pyc | Bin 0 -> 212 bytes .../resolvelib/__pycache__/base.cpython-312.pyc | Bin 0 -> 8341 bytes .../__pycache__/candidates.cpython-312.pyc | Bin 0 -> 30402 bytes .../resolvelib/__pycache__/factory.cpython-312.pyc | Bin 0 -> 32118 bytes .../__pycache__/found_candidates.cpython-312.pyc | Bin 0 -> 6212 bytes .../__pycache__/provider.cpython-312.pyc | Bin 0 -> 10382 bytes .../__pycache__/reporter.cpython-312.pyc | Bin 0 -> 4939 bytes .../__pycache__/requirements.cpython-312.pyc | Bin 0 -> 11433 bytes .../__pycache__/resolver.cpython-312.pyc | Bin 0 -> 12355 bytes .../~ip/_internal/resolution/resolvelib/base.py | 141 + .../_internal/resolution/resolvelib/candidates.py | 597 + .../~ip/_internal/resolution/resolvelib/factory.py | 812 + .../resolution/resolvelib/found_candidates.py | 155 + .../_internal/resolution/resolvelib/provider.py | 255 + .../_internal/resolution/resolvelib/reporter.py | 80 + .../resolution/resolvelib/requirements.py | 166 + .../_internal/resolution/resolvelib/resolver.py | 317 + .../~ip/_internal/self_outdated_check.py | 248 + .../site-packages/~ip/_internal/utils/__init__.py | 0 .../utils/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 196 bytes .../utils/__pycache__/_jaraco_text.cpython-312.pyc | Bin 0 -> 4537 bytes .../utils/__pycache__/_log.cpython-312.pyc | Bin 0 -> 1867 bytes .../utils/__pycache__/appdirs.cpython-312.pyc | Bin 0 -> 2411 bytes .../utils/__pycache__/compat.cpython-312.pyc | Bin 0 -> 2214 bytes .../__pycache__/compatibility_tags.cpython-312.pyc | Bin 0 -> 5562 bytes .../utils/__pycache__/datetime.cpython-312.pyc | Bin 0 -> 685 bytes .../utils/__pycache__/deprecation.cpython-312.pyc | Bin 0 -> 4187 bytes .../__pycache__/direct_url_helpers.cpython-312.pyc | Bin 0 -> 3564 bytes .../utils/__pycache__/egg_link.cpython-312.pyc | Bin 0 -> 3227 bytes .../utils/__pycache__/encoding.cpython-312.pyc | Bin 0 -> 2159 bytes .../utils/__pycache__/entrypoints.cpython-312.pyc | Bin 0 -> 3994 bytes .../utils/__pycache__/filesystem.cpython-312.pyc | Bin 0 -> 7459 bytes .../utils/__pycache__/filetypes.cpython-312.pyc | Bin 0 -> 1165 bytes .../utils/__pycache__/glibc.cpython-312.pyc | Bin 0 -> 2343 bytes .../utils/__pycache__/hashes.cpython-312.pyc | Bin 0 -> 7555 bytes .../utils/__pycache__/logging.cpython-312.pyc | Bin 0 -> 13558 bytes .../utils/__pycache__/misc.cpython-312.pyc | Bin 0 -> 34122 bytes .../utils/__pycache__/models.cpython-312.pyc | Bin 0 -> 2713 bytes .../utils/__pycache__/packaging.cpython-312.pyc | Bin 0 -> 2584 bytes .../__pycache__/setuptools_build.cpython-312.pyc | Bin 0 -> 4551 bytes .../utils/__pycache__/subprocess.cpython-312.pyc | Bin 0 -> 8719 bytes .../utils/__pycache__/temp_dir.cpython-312.pyc | Bin 0 -> 12063 bytes .../utils/__pycache__/unpacking.cpython-312.pyc | Bin 0 -> 11109 bytes .../utils/__pycache__/urls.cpython-312.pyc | Bin 0 -> 2406 bytes .../utils/__pycache__/virtualenv.cpython-312.pyc | Bin 0 -> 4481 bytes .../utils/__pycache__/wheel.cpython-312.pyc | Bin 0 -> 5927 bytes .../~ip/_internal/utils/_jaraco_text.py | 109 + .../site-packages/~ip/_internal/utils/_log.py | 38 + .../site-packages/~ip/_internal/utils/appdirs.py | 52 + .../site-packages/~ip/_internal/utils/compat.py | 63 + .../~ip/_internal/utils/compatibility_tags.py | 165 + .../site-packages/~ip/_internal/utils/datetime.py | 11 + .../~ip/_internal/utils/deprecation.py | 120 + .../~ip/_internal/utils/direct_url_helpers.py | 87 + .../site-packages/~ip/_internal/utils/egg_link.py | 80 + .../site-packages/~ip/_internal/utils/encoding.py | 36 + .../~ip/_internal/utils/entrypoints.py | 84 + .../~ip/_internal/utils/filesystem.py | 153 + .../site-packages/~ip/_internal/utils/filetypes.py | 27 + .../site-packages/~ip/_internal/utils/glibc.py | 88 + .../site-packages/~ip/_internal/utils/hashes.py | 151 + .../site-packages/~ip/_internal/utils/logging.py | 348 + .../site-packages/~ip/_internal/utils/misc.py | 783 + .../site-packages/~ip/_internal/utils/models.py | 39 + .../site-packages/~ip/_internal/utils/packaging.py | 57 + .../~ip/_internal/utils/setuptools_build.py | 146 + .../~ip/_internal/utils/subprocess.py | 260 + .../site-packages/~ip/_internal/utils/temp_dir.py | 296 + .../site-packages/~ip/_internal/utils/unpacking.py | 257 + .../site-packages/~ip/_internal/utils/urls.py | 62 + .../~ip/_internal/utils/virtualenv.py | 104 + .../site-packages/~ip/_internal/utils/wheel.py | 134 + .../site-packages/~ip/_internal/vcs/__init__.py | 15 + .../vcs/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 535 bytes .../vcs/__pycache__/bazaar.cpython-312.pyc | Bin 0 -> 5027 bytes .../_internal/vcs/__pycache__/git.cpython-312.pyc | Bin 0 -> 18996 bytes .../vcs/__pycache__/mercurial.cpython-312.pyc | Bin 0 -> 7616 bytes .../vcs/__pycache__/subversion.cpython-312.pyc | Bin 0 -> 12488 bytes .../vcs/__pycache__/versioncontrol.cpython-312.pyc | Bin 0 -> 29014 bytes .../site-packages/~ip/_internal/vcs/bazaar.py | 112 + .../site-packages/~ip/_internal/vcs/git.py | 526 + .../site-packages/~ip/_internal/vcs/mercurial.py | 163 + .../site-packages/~ip/_internal/vcs/subversion.py | 324 + .../~ip/_internal/vcs/versioncontrol.py | 705 + .../site-packages/~ip/_internal/wheel_builder.py | 354 + .../site-packages/~ip/_vendor/__init__.py | 121 + .../_vendor/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 4697 bytes .../~ip/_vendor/__pycache__/six.cpython-312.pyc | Bin 0 -> 41274 bytes .../__pycache__/typing_extensions.cpython-312.pyc | Bin 0 -> 122054 bytes .../~ip/_vendor/cachecontrol/__init__.py | 28 + .../__pycache__/__init__.cpython-312.pyc | Bin 0 -> 907 bytes .../cachecontrol/__pycache__/_cmd.cpython-312.pyc | Bin 0 -> 2651 bytes .../__pycache__/adapter.cpython-312.pyc | Bin 0 -> 6469 bytes .../cachecontrol/__pycache__/cache.cpython-312.pyc | Bin 0 -> 3814 bytes .../__pycache__/controller.cpython-312.pyc | Bin 0 -> 16172 bytes .../__pycache__/filewrapper.cpython-312.pyc | Bin 0 -> 4352 bytes .../__pycache__/heuristics.cpython-312.pyc | Bin 0 -> 6699 bytes .../__pycache__/serialize.cpython-312.pyc | Bin 0 -> 6410 bytes .../__pycache__/wrapper.cpython-312.pyc | Bin 0 -> 1679 bytes .../site-packages/~ip/_vendor/cachecontrol/_cmd.py | 70 + .../~ip/_vendor/cachecontrol/adapter.py | 161 + .../~ip/_vendor/cachecontrol/cache.py | 74 + .../~ip/_vendor/cachecontrol/caches/__init__.py | 8 + .../caches/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 440 bytes .../caches/__pycache__/file_cache.cpython-312.pyc | Bin 0 -> 7715 bytes .../caches/__pycache__/redis_cache.cpython-312.pyc | Bin 0 -> 2743 bytes .../~ip/_vendor/cachecontrol/caches/file_cache.py | 181 + .../~ip/_vendor/cachecontrol/caches/redis_cache.py | 48 + .../~ip/_vendor/cachecontrol/controller.py | 494 + .../~ip/_vendor/cachecontrol/filewrapper.py | 119 + .../~ip/_vendor/cachecontrol/heuristics.py | 154 + .../~ip/_vendor/cachecontrol/py.typed | 0 .../~ip/_vendor/cachecontrol/serialize.py | 206 + .../~ip/_vendor/cachecontrol/wrapper.py | 43 + .../site-packages/~ip/_vendor/certifi/__init__.py | 4 + .../site-packages/~ip/_vendor/certifi/__main__.py | 12 + .../certifi/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 323 bytes .../certifi/__pycache__/__main__.cpython-312.pyc | Bin 0 -> 650 bytes .../certifi/__pycache__/core.cpython-312.pyc | Bin 0 -> 2854 bytes .../site-packages/~ip/_vendor/certifi/cacert.pem | 4635 + .../site-packages/~ip/_vendor/certifi/core.py | 108 + .../site-packages/~ip/_vendor/certifi/py.typed | 0 .../site-packages/~ip/_vendor/chardet/__init__.py | 115 + .../chardet/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 4573 bytes .../chardet/__pycache__/big5freq.cpython-312.pyc | Bin 0 -> 27204 bytes .../chardet/__pycache__/big5prober.cpython-312.pyc | Bin 0 -> 1392 bytes .../__pycache__/chardistribution.cpython-312.pyc | Bin 0 -> 9643 bytes .../__pycache__/charsetgroupprober.cpython-312.pyc | Bin 0 -> 4127 bytes .../__pycache__/charsetprober.cpython-312.pyc | Bin 0 -> 5023 bytes .../__pycache__/codingstatemachine.cpython-312.pyc | Bin 0 -> 3883 bytes .../codingstatemachinedict.cpython-312.pyc | Bin 0 -> 794 bytes .../__pycache__/cp949prober.cpython-312.pyc | Bin 0 -> 1401 bytes .../chardet/__pycache__/enums.cpython-312.pyc | Bin 0 -> 3001 bytes .../chardet/__pycache__/escprober.cpython-312.pyc | Bin 0 -> 4571 bytes .../chardet/__pycache__/escsm.cpython-312.pyc | Bin 0 -> 15315 bytes .../__pycache__/eucjpprober.cpython-312.pyc | Bin 0 -> 4388 bytes .../chardet/__pycache__/euckrfreq.cpython-312.pyc | Bin 0 -> 12087 bytes .../__pycache__/euckrprober.cpython-312.pyc | Bin 0 -> 1395 bytes .../chardet/__pycache__/euctwfreq.cpython-312.pyc | Bin 0 -> 27209 bytes .../__pycache__/euctwprober.cpython-312.pyc | Bin 0 -> 1395 bytes .../chardet/__pycache__/gb2312freq.cpython-312.pyc | Bin 0 -> 19131 bytes .../__pycache__/gb2312prober.cpython-312.pyc | Bin 0 -> 1408 bytes .../__pycache__/hebrewprober.cpython-312.pyc | Bin 0 -> 5827 bytes .../chardet/__pycache__/jisfreq.cpython-312.pyc | Bin 0 -> 22160 bytes .../chardet/__pycache__/johabfreq.cpython-312.pyc | Bin 0 -> 83008 bytes .../__pycache__/johabprober.cpython-312.pyc | Bin 0 -> 1399 bytes .../chardet/__pycache__/jpcntx.cpython-312.pyc | Bin 0 -> 39554 bytes .../__pycache__/langbulgarianmodel.cpython-312.pyc | Bin 0 -> 83127 bytes .../__pycache__/langgreekmodel.cpython-312.pyc | Bin 0 -> 76993 bytes .../__pycache__/langhebrewmodel.cpython-312.pyc | Bin 0 -> 77504 bytes .../__pycache__/langhungarianmodel.cpython-312.pyc | Bin 0 -> 83081 bytes .../__pycache__/langrussianmodel.cpython-312.pyc | Bin 0 -> 105256 bytes .../__pycache__/langthaimodel.cpython-312.pyc | Bin 0 -> 77682 bytes .../__pycache__/langturkishmodel.cpython-312.pyc | Bin 0 -> 77521 bytes .../__pycache__/latin1prober.cpython-312.pyc | Bin 0 -> 7007 bytes .../__pycache__/macromanprober.cpython-312.pyc | Bin 0 -> 7187 bytes .../__pycache__/mbcharsetprober.cpython-312.pyc | Bin 0 -> 3908 bytes .../__pycache__/mbcsgroupprober.cpython-312.pyc | Bin 0 -> 1593 bytes .../chardet/__pycache__/mbcssm.cpython-312.pyc | Bin 0 -> 38650 bytes .../chardet/__pycache__/resultdict.cpython-312.pyc | Bin 0 -> 637 bytes .../__pycache__/sbcharsetprober.cpython-312.pyc | Bin 0 -> 6392 bytes .../__pycache__/sbcsgroupprober.cpython-312.pyc | Bin 0 -> 2362 bytes .../chardet/__pycache__/sjisprober.cpython-312.pyc | Bin 0 -> 4500 bytes .../__pycache__/universaldetector.cpython-312.pyc | Bin 0 -> 12274 bytes .../__pycache__/utf1632prober.cpython-312.pyc | Bin 0 -> 9984 bytes .../chardet/__pycache__/utf8prober.cpython-312.pyc | Bin 0 -> 3180 bytes .../chardet/__pycache__/version.cpython-312.pyc | Bin 0 -> 493 bytes .../site-packages/~ip/_vendor/chardet/big5freq.py | 386 + .../~ip/_vendor/chardet/big5prober.py | 47 + .../~ip/_vendor/chardet/chardistribution.py | 261 + .../~ip/_vendor/chardet/charsetgroupprober.py | 106 + .../~ip/_vendor/chardet/charsetprober.py | 147 + .../~ip/_vendor/chardet/cli/__init__.py | 0 .../cli/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 200 bytes .../cli/__pycache__/chardetect.cpython-312.pyc | Bin 0 -> 4017 bytes .../~ip/_vendor/chardet/cli/chardetect.py | 112 + .../~ip/_vendor/chardet/codingstatemachine.py | 90 + .../~ip/_vendor/chardet/codingstatemachinedict.py | 19 + .../~ip/_vendor/chardet/cp949prober.py | 49 + .../site-packages/~ip/_vendor/chardet/enums.py | 85 + .../site-packages/~ip/_vendor/chardet/escprober.py | 102 + .../site-packages/~ip/_vendor/chardet/escsm.py | 261 + .../~ip/_vendor/chardet/eucjpprober.py | 102 + .../site-packages/~ip/_vendor/chardet/euckrfreq.py | 196 + .../~ip/_vendor/chardet/euckrprober.py | 47 + .../site-packages/~ip/_vendor/chardet/euctwfreq.py | 388 + .../~ip/_vendor/chardet/euctwprober.py | 47 + .../~ip/_vendor/chardet/gb2312freq.py | 284 + .../~ip/_vendor/chardet/gb2312prober.py | 47 + .../~ip/_vendor/chardet/hebrewprober.py | 316 + .../site-packages/~ip/_vendor/chardet/jisfreq.py | 325 + .../site-packages/~ip/_vendor/chardet/johabfreq.py | 2382 + .../~ip/_vendor/chardet/johabprober.py | 47 + .../site-packages/~ip/_vendor/chardet/jpcntx.py | 238 + .../~ip/_vendor/chardet/langbulgarianmodel.py | 4649 + .../~ip/_vendor/chardet/langgreekmodel.py | 4397 + .../~ip/_vendor/chardet/langhebrewmodel.py | 4380 + .../~ip/_vendor/chardet/langhungarianmodel.py | 4649 + .../~ip/_vendor/chardet/langrussianmodel.py | 5725 + .../~ip/_vendor/chardet/langthaimodel.py | 4380 + .../~ip/_vendor/chardet/langturkishmodel.py | 4380 + .../~ip/_vendor/chardet/latin1prober.py | 147 + .../~ip/_vendor/chardet/macromanprober.py | 162 + .../~ip/_vendor/chardet/mbcharsetprober.py | 95 + .../~ip/_vendor/chardet/mbcsgroupprober.py | 57 + .../site-packages/~ip/_vendor/chardet/mbcssm.py | 661 + .../~ip/_vendor/chardet/metadata/__init__.py | 0 .../metadata/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 205 bytes .../metadata/__pycache__/languages.cpython-312.pyc | Bin 0 -> 9760 bytes .../~ip/_vendor/chardet/metadata/languages.py | 352 + .../site-packages/~ip/_vendor/chardet/py.typed | 0 .../~ip/_vendor/chardet/resultdict.py | 16 + .../~ip/_vendor/chardet/sbcharsetprober.py | 162 + .../~ip/_vendor/chardet/sbcsgroupprober.py | 88 + .../~ip/_vendor/chardet/sjisprober.py | 105 + .../~ip/_vendor/chardet/universaldetector.py | 362 + .../~ip/_vendor/chardet/utf1632prober.py | 225 + .../~ip/_vendor/chardet/utf8prober.py | 82 + .../site-packages/~ip/_vendor/chardet/version.py | 9 + .../site-packages/~ip/_vendor/colorama/__init__.py | 7 + .../colorama/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 497 bytes .../colorama/__pycache__/ansi.cpython-312.pyc | Bin 0 -> 3955 bytes .../__pycache__/ansitowin32.cpython-312.pyc | Bin 0 -> 16426 bytes .../__pycache__/initialise.cpython-312.pyc | Bin 0 -> 3555 bytes .../colorama/__pycache__/win32.cpython-312.pyc | Bin 0 -> 8131 bytes .../colorama/__pycache__/winterm.cpython-312.pyc | Bin 0 -> 9093 bytes .../site-packages/~ip/_vendor/colorama/ansi.py | 102 + .../~ip/_vendor/colorama/ansitowin32.py | 277 + .../~ip/_vendor/colorama/initialise.py | 121 + .../~ip/_vendor/colorama/tests/__init__.py | 1 + .../tests/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 203 bytes .../tests/__pycache__/ansi_test.cpython-312.pyc | Bin 0 -> 5472 bytes .../__pycache__/ansitowin32_test.cpython-312.pyc | Bin 0 -> 18108 bytes .../__pycache__/initialise_test.cpython-312.pyc | Bin 0 -> 11753 bytes .../tests/__pycache__/isatty_test.cpython-312.pyc | Bin 0 -> 4909 bytes .../tests/__pycache__/utils.cpython-312.pyc | Bin 0 -> 2493 bytes .../tests/__pycache__/winterm_test.cpython-312.pyc | Bin 0 -> 6617 bytes .../~ip/_vendor/colorama/tests/ansi_test.py | 76 + .../~ip/_vendor/colorama/tests/ansitowin32_test.py | 294 + .../~ip/_vendor/colorama/tests/initialise_test.py | 189 + .../~ip/_vendor/colorama/tests/isatty_test.py | 57 + .../~ip/_vendor/colorama/tests/utils.py | 49 + .../~ip/_vendor/colorama/tests/winterm_test.py | 131 + .../site-packages/~ip/_vendor/colorama/win32.py | 180 + .../site-packages/~ip/_vendor/colorama/winterm.py | 195 + .../site-packages/~ip/_vendor/distlib/__init__.py | 33 + .../distlib/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 1274 bytes .../distlib/__pycache__/compat.cpython-312.pyc | Bin 0 -> 45610 bytes .../distlib/__pycache__/database.cpython-312.pyc | Bin 0 -> 66032 bytes .../distlib/__pycache__/index.cpython-312.pyc | Bin 0 -> 24371 bytes .../distlib/__pycache__/locators.cpython-312.pyc | Bin 0 -> 60163 bytes .../distlib/__pycache__/manifest.cpython-312.pyc | Bin 0 -> 15130 bytes .../distlib/__pycache__/markers.cpython-312.pyc | Bin 0 -> 7687 bytes .../distlib/__pycache__/metadata.cpython-312.pyc | Bin 0 -> 41804 bytes .../distlib/__pycache__/resources.cpython-312.pyc | Bin 0 -> 17330 bytes .../distlib/__pycache__/scripts.cpython-312.pyc | Bin 0 -> 19585 bytes .../distlib/__pycache__/util.cpython-312.pyc | Bin 0 -> 88261 bytes .../distlib/__pycache__/version.cpython-312.pyc | Bin 0 -> 30371 bytes .../distlib/__pycache__/wheel.cpython-312.pyc | Bin 0 -> 51866 bytes .../site-packages/~ip/_vendor/distlib/compat.py | 1138 + .../site-packages/~ip/_vendor/distlib/database.py | 1359 + .../site-packages/~ip/_vendor/distlib/index.py | 508 + .../site-packages/~ip/_vendor/distlib/locators.py | 1303 + .../site-packages/~ip/_vendor/distlib/manifest.py | 384 + .../site-packages/~ip/_vendor/distlib/markers.py | 167 + .../site-packages/~ip/_vendor/distlib/metadata.py | 1068 + .../site-packages/~ip/_vendor/distlib/resources.py | 358 + .../site-packages/~ip/_vendor/distlib/scripts.py | 452 + .../site-packages/~ip/_vendor/distlib/t32.exe | Bin 0 -> 97792 bytes .../site-packages/~ip/_vendor/distlib/t64-arm.exe | Bin 0 -> 182784 bytes .../site-packages/~ip/_vendor/distlib/t64.exe | Bin 0 -> 108032 bytes .../site-packages/~ip/_vendor/distlib/util.py | 2025 + .../site-packages/~ip/_vendor/distlib/version.py | 751 + .../site-packages/~ip/_vendor/distlib/w32.exe | Bin 0 -> 91648 bytes .../site-packages/~ip/_vendor/distlib/w64-arm.exe | Bin 0 -> 168448 bytes .../site-packages/~ip/_vendor/distlib/w64.exe | Bin 0 -> 101888 bytes .../site-packages/~ip/_vendor/distlib/wheel.py | 1099 + .../site-packages/~ip/_vendor/distro/__init__.py | 54 + .../site-packages/~ip/_vendor/distro/__main__.py | 4 + .../distro/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 965 bytes .../distro/__pycache__/__main__.cpython-312.pyc | Bin 0 -> 297 bytes .../distro/__pycache__/distro.cpython-312.pyc | Bin 0 -> 53759 bytes .../site-packages/~ip/_vendor/distro/distro.py | 1399 + .../site-packages/~ip/_vendor/distro/py.typed | 0 .../site-packages/~ip/_vendor/idna/__init__.py | 44 + .../idna/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 886 bytes .../_vendor/idna/__pycache__/codec.cpython-312.pyc | Bin 0 -> 4638 bytes .../idna/__pycache__/compat.cpython-312.pyc | Bin 0 -> 892 bytes .../_vendor/idna/__pycache__/core.cpython-312.pyc | Bin 0 -> 16287 bytes .../idna/__pycache__/idnadata.cpython-312.pyc | Bin 0 -> 38387 bytes .../idna/__pycache__/intranges.cpython-312.pyc | Bin 0 -> 2643 bytes .../idna/__pycache__/package_data.cpython-312.pyc | Bin 0 -> 221 bytes .../idna/__pycache__/uts46data.cpython-312.pyc | Bin 0 -> 158875 bytes .../site-packages/~ip/_vendor/idna/codec.py | 112 + .../site-packages/~ip/_vendor/idna/compat.py | 13 + .../site-packages/~ip/_vendor/idna/core.py | 400 + .../site-packages/~ip/_vendor/idna/idnadata.py | 2151 + .../site-packages/~ip/_vendor/idna/intranges.py | 54 + .../site-packages/~ip/_vendor/idna/package_data.py | 2 + .../site-packages/~ip/_vendor/idna/py.typed | 0 .../site-packages/~ip/_vendor/idna/uts46data.py | 8600 ++ .../site-packages/~ip/_vendor/msgpack/__init__.py | 57 + .../msgpack/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 1836 bytes .../msgpack/__pycache__/exceptions.cpython-312.pyc | Bin 0 -> 2030 bytes .../msgpack/__pycache__/ext.cpython-312.pyc | Bin 0 -> 8673 bytes .../msgpack/__pycache__/fallback.cpython-312.pyc | Bin 0 -> 43581 bytes .../~ip/_vendor/msgpack/exceptions.py | 48 + .../site-packages/~ip/_vendor/msgpack/ext.py | 193 + .../site-packages/~ip/_vendor/msgpack/fallback.py | 1010 + .../~ip/_vendor/packaging/__about__.py | 26 + .../~ip/_vendor/packaging/__init__.py | 25 + .../__pycache__/__about__.cpython-312.pyc | Bin 0 -> 635 bytes .../packaging/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 471 bytes .../__pycache__/_manylinux.cpython-312.pyc | Bin 0 -> 12081 bytes .../__pycache__/_musllinux.cpython-312.pyc | Bin 0 -> 6915 bytes .../__pycache__/_structures.cpython-312.pyc | Bin 0 -> 3246 bytes .../packaging/__pycache__/markers.cpython-312.pyc | Bin 0 -> 14063 bytes .../__pycache__/requirements.cpython-312.pyc | Bin 0 -> 6951 bytes .../__pycache__/specifiers.cpython-312.pyc | Bin 0 -> 31252 bytes .../packaging/__pycache__/tags.cpython-312.pyc | Bin 0 -> 18961 bytes .../packaging/__pycache__/utils.cpython-312.pyc | Bin 0 -> 5873 bytes .../packaging/__pycache__/version.cpython-312.pyc | Bin 0 -> 19944 bytes .../~ip/_vendor/packaging/_manylinux.py | 301 + .../~ip/_vendor/packaging/_musllinux.py | 136 + .../~ip/_vendor/packaging/_structures.py | 61 + .../site-packages/~ip/_vendor/packaging/markers.py | 304 + .../site-packages/~ip/_vendor/packaging/py.typed | 0 .../~ip/_vendor/packaging/requirements.py | 146 + .../~ip/_vendor/packaging/specifiers.py | 802 + .../site-packages/~ip/_vendor/packaging/tags.py | 487 + .../site-packages/~ip/_vendor/packaging/utils.py | 136 + .../site-packages/~ip/_vendor/packaging/version.py | 504 + .../~ip/_vendor/pkg_resources/__init__.py | 3361 + .../__pycache__/__init__.cpython-312.pyc | Bin 0 -> 146479 bytes .../~ip/_vendor/platformdirs/__init__.py | 566 + .../~ip/_vendor/platformdirs/__main__.py | 53 + .../__pycache__/__init__.cpython-312.pyc | Bin 0 -> 18034 bytes .../__pycache__/__main__.cpython-312.pyc | Bin 0 -> 1951 bytes .../__pycache__/android.cpython-312.pyc | Bin 0 -> 9449 bytes .../platformdirs/__pycache__/api.cpython-312.pyc | Bin 0 -> 9677 bytes .../platformdirs/__pycache__/macos.cpython-312.pyc | Bin 0 -> 5642 bytes .../platformdirs/__pycache__/unix.cpython-312.pyc | Bin 0 -> 12446 bytes .../__pycache__/version.cpython-312.pyc | Bin 0 -> 316 bytes .../__pycache__/windows.cpython-312.pyc | Bin 0 -> 13004 bytes .../~ip/_vendor/platformdirs/android.py | 210 + .../site-packages/~ip/_vendor/platformdirs/api.py | 223 + .../~ip/_vendor/platformdirs/macos.py | 91 + .../~ip/_vendor/platformdirs/py.typed | 0 .../site-packages/~ip/_vendor/platformdirs/unix.py | 223 + .../~ip/_vendor/platformdirs/version.py | 4 + .../~ip/_vendor/platformdirs/windows.py | 255 + .../site-packages/~ip/_vendor/pygments/__init__.py | 82 + .../site-packages/~ip/_vendor/pygments/__main__.py | 17 + .../pygments/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 3494 bytes .../pygments/__pycache__/__main__.cpython-312.pyc | Bin 0 -> 740 bytes .../pygments/__pycache__/cmdline.cpython-312.pyc | Bin 0 -> 26611 bytes .../pygments/__pycache__/console.cpython-312.pyc | Bin 0 -> 2632 bytes .../pygments/__pycache__/filter.cpython-312.pyc | Bin 0 -> 3238 bytes .../pygments/__pycache__/formatter.cpython-312.pyc | Bin 0 -> 4575 bytes .../pygments/__pycache__/lexer.cpython-312.pyc | Bin 0 -> 38335 bytes .../pygments/__pycache__/modeline.cpython-312.pyc | Bin 0 -> 1574 bytes .../pygments/__pycache__/plugin.cpython-312.pyc | Bin 0 -> 3402 bytes .../pygments/__pycache__/regexopt.cpython-312.pyc | Bin 0 -> 4087 bytes .../pygments/__pycache__/scanner.cpython-312.pyc | Bin 0 -> 4762 bytes .../pygments/__pycache__/sphinxext.cpython-312.pyc | Bin 0 -> 11052 bytes .../pygments/__pycache__/style.cpython-312.pyc | Bin 0 -> 6680 bytes .../pygments/__pycache__/token.cpython-312.pyc | Bin 0 -> 8148 bytes .../pygments/__pycache__/unistring.cpython-312.pyc | Bin 0 -> 32994 bytes .../pygments/__pycache__/util.cpython-312.pyc | Bin 0 -> 13987 bytes .../site-packages/~ip/_vendor/pygments/cmdline.py | 668 + .../site-packages/~ip/_vendor/pygments/console.py | 70 + .../site-packages/~ip/_vendor/pygments/filter.py | 71 + .../~ip/_vendor/pygments/filters/__init__.py | 940 + .../filters/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 37942 bytes .../~ip/_vendor/pygments/formatter.py | 124 + .../~ip/_vendor/pygments/formatters/__init__.py | 158 + .../__pycache__/__init__.cpython-312.pyc | Bin 0 -> 6932 bytes .../__pycache__/_mapping.cpython-312.pyc | Bin 0 -> 4221 bytes .../formatters/__pycache__/bbcode.cpython-312.pyc | Bin 0 -> 4200 bytes .../formatters/__pycache__/groff.cpython-312.pyc | Bin 0 -> 7270 bytes .../formatters/__pycache__/html.cpython-312.pyc | Bin 0 -> 40578 bytes .../formatters/__pycache__/img.cpython-312.pyc | Bin 0 -> 27049 bytes .../formatters/__pycache__/irc.cpython-312.pyc | Bin 0 -> 6071 bytes .../formatters/__pycache__/latex.cpython-312.pyc | Bin 0 -> 19960 bytes .../formatters/__pycache__/other.cpython-312.pyc | Bin 0 -> 6890 bytes .../__pycache__/pangomarkup.cpython-312.pyc | Bin 0 -> 2936 bytes .../formatters/__pycache__/rtf.cpython-312.pyc | Bin 0 -> 6132 bytes .../formatters/__pycache__/svg.cpython-312.pyc | Bin 0 -> 9072 bytes .../__pycache__/terminal.cpython-312.pyc | Bin 0 -> 5835 bytes .../__pycache__/terminal256.cpython-312.pyc | Bin 0 -> 15163 bytes .../~ip/_vendor/pygments/formatters/_mapping.py | 23 + .../~ip/_vendor/pygments/formatters/bbcode.py | 108 + .../~ip/_vendor/pygments/formatters/groff.py | 170 + .../~ip/_vendor/pygments/formatters/html.py | 989 + .../~ip/_vendor/pygments/formatters/img.py | 645 + .../~ip/_vendor/pygments/formatters/irc.py | 154 + .../~ip/_vendor/pygments/formatters/latex.py | 521 + .../~ip/_vendor/pygments/formatters/other.py | 161 + .../~ip/_vendor/pygments/formatters/pangomarkup.py | 83 + .../~ip/_vendor/pygments/formatters/rtf.py | 146 + .../~ip/_vendor/pygments/formatters/svg.py | 188 + .../~ip/_vendor/pygments/formatters/terminal.py | 127 + .../~ip/_vendor/pygments/formatters/terminal256.py | 338 + .../site-packages/~ip/_vendor/pygments/lexer.py | 943 + .../~ip/_vendor/pygments/lexers/__init__.py | 362 + .../lexers/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 14658 bytes .../lexers/__pycache__/_mapping.cpython-312.pyc | Bin 0 -> 64410 bytes .../lexers/__pycache__/python.cpython-312.pyc | Bin 0 -> 42645 bytes .../~ip/_vendor/pygments/lexers/_mapping.py | 559 + .../~ip/_vendor/pygments/lexers/python.py | 1198 + .../site-packages/~ip/_vendor/pygments/modeline.py | 43 + .../site-packages/~ip/_vendor/pygments/plugin.py | 88 + .../site-packages/~ip/_vendor/pygments/regexopt.py | 91 + .../site-packages/~ip/_vendor/pygments/scanner.py | 104 + .../~ip/_vendor/pygments/sphinxext.py | 217 + .../site-packages/~ip/_vendor/pygments/style.py | 197 + .../~ip/_vendor/pygments/styles/__init__.py | 103 + .../styles/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 4454 bytes .../site-packages/~ip/_vendor/pygments/token.py | 213 + .../~ip/_vendor/pygments/unistring.py | 153 + .../site-packages/~ip/_vendor/pygments/util.py | 330 + .../~ip/_vendor/pyparsing/__init__.py | 322 + .../pyparsing/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 7917 bytes .../pyparsing/__pycache__/actions.cpython-312.pyc | Bin 0 -> 8401 bytes .../pyparsing/__pycache__/common.cpython-312.pyc | Bin 0 -> 13420 bytes .../pyparsing/__pycache__/core.cpython-312.pyc | Bin 0 -> 267714 bytes .../__pycache__/exceptions.cpython-312.pyc | Bin 0 -> 13000 bytes .../pyparsing/__pycache__/helpers.cpython-312.pyc | Bin 0 -> 48507 bytes .../pyparsing/__pycache__/results.cpython-312.pyc | Bin 0 -> 34116 bytes .../pyparsing/__pycache__/testing.cpython-312.pyc | Bin 0 -> 17194 bytes .../pyparsing/__pycache__/unicode.cpython-312.pyc | Bin 0 -> 13190 bytes .../pyparsing/__pycache__/util.cpython-312.pyc | Bin 0 -> 14910 bytes .../site-packages/~ip/_vendor/pyparsing/actions.py | 217 + .../site-packages/~ip/_vendor/pyparsing/common.py | 432 + .../site-packages/~ip/_vendor/pyparsing/core.py | 6115 ++ .../~ip/_vendor/pyparsing/diagram/__init__.py | 656 + .../diagram/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 26819 bytes .../~ip/_vendor/pyparsing/exceptions.py | 299 + .../site-packages/~ip/_vendor/pyparsing/helpers.py | 1100 + .../site-packages/~ip/_vendor/pyparsing/py.typed | 0 .../site-packages/~ip/_vendor/pyparsing/results.py | 796 + .../site-packages/~ip/_vendor/pyparsing/testing.py | 331 + .../site-packages/~ip/_vendor/pyparsing/unicode.py | 361 + .../site-packages/~ip/_vendor/pyparsing/util.py | 284 + .../~ip/_vendor/pyproject_hooks/__init__.py | 23 + .../__pycache__/__init__.cpython-312.pyc | Bin 0 -> 619 bytes .../__pycache__/_compat.cpython-312.pyc | Bin 0 -> 380 bytes .../__pycache__/_impl.cpython-312.pyc | Bin 0 -> 14731 bytes .../~ip/_vendor/pyproject_hooks/_compat.py | 8 + .../~ip/_vendor/pyproject_hooks/_impl.py | 330 + .../pyproject_hooks/_in_process/__init__.py | 18 + .../__pycache__/__init__.cpython-312.pyc | Bin 0 -> 1086 bytes .../__pycache__/_in_process.cpython-312.pyc | Bin 0 -> 14403 bytes .../pyproject_hooks/_in_process/_in_process.py | 353 + .../site-packages/~ip/_vendor/requests/__init__.py | 182 + .../requests/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 5459 bytes .../__pycache__/__version__.cpython-312.pyc | Bin 0 -> 590 bytes .../__pycache__/_internal_utils.cpython-312.pyc | Bin 0 -> 2030 bytes .../requests/__pycache__/adapters.cpython-312.pyc | Bin 0 -> 21286 bytes .../requests/__pycache__/api.cpython-312.pyc | Bin 0 -> 7210 bytes .../requests/__pycache__/auth.cpython-312.pyc | Bin 0 -> 13929 bytes .../requests/__pycache__/certs.cpython-312.pyc | Bin 0 -> 928 bytes .../requests/__pycache__/compat.cpython-312.pyc | Bin 0 -> 1513 bytes .../requests/__pycache__/cookies.cpython-312.pyc | Bin 0 -> 25252 bytes .../__pycache__/exceptions.cpython-312.pyc | Bin 0 -> 7053 bytes .../requests/__pycache__/help.cpython-312.pyc | Bin 0 -> 4318 bytes .../requests/__pycache__/hooks.cpython-312.pyc | Bin 0 -> 1058 bytes .../requests/__pycache__/models.cpython-312.pyc | Bin 0 -> 35454 bytes .../requests/__pycache__/packages.cpython-312.pyc | Bin 0 -> 778 bytes .../requests/__pycache__/sessions.cpython-312.pyc | Bin 0 -> 27763 bytes .../__pycache__/status_codes.cpython-312.pyc | Bin 0 -> 5965 bytes .../__pycache__/structures.cpython-312.pyc | Bin 0 -> 5623 bytes .../requests/__pycache__/utils.cpython-312.pyc | Bin 0 -> 36275 bytes .../~ip/_vendor/requests/__version__.py | 14 + .../~ip/_vendor/requests/_internal_utils.py | 50 + .../site-packages/~ip/_vendor/requests/adapters.py | 538 + .../site-packages/~ip/_vendor/requests/api.py | 157 + .../site-packages/~ip/_vendor/requests/auth.py | 315 + .../site-packages/~ip/_vendor/requests/certs.py | 24 + .../site-packages/~ip/_vendor/requests/compat.py | 67 + .../site-packages/~ip/_vendor/requests/cookies.py | 561 + .../~ip/_vendor/requests/exceptions.py | 141 + .../site-packages/~ip/_vendor/requests/help.py | 131 + .../site-packages/~ip/_vendor/requests/hooks.py | 33 + .../site-packages/~ip/_vendor/requests/models.py | 1034 + .../site-packages/~ip/_vendor/requests/packages.py | 16 + .../site-packages/~ip/_vendor/requests/sessions.py | 833 + .../~ip/_vendor/requests/status_codes.py | 128 + .../~ip/_vendor/requests/structures.py | 99 + .../site-packages/~ip/_vendor/requests/utils.py | 1094 + .../~ip/_vendor/resolvelib/__init__.py | 26 + .../__pycache__/__init__.cpython-312.pyc | Bin 0 -> 640 bytes .../__pycache__/providers.cpython-312.pyc | Bin 0 -> 6857 bytes .../__pycache__/reporters.cpython-312.pyc | Bin 0 -> 2660 bytes .../__pycache__/resolvers.cpython-312.pyc | Bin 0 -> 25903 bytes .../resolvelib/__pycache__/structs.cpython-312.pyc | Bin 0 -> 10512 bytes .../~ip/_vendor/resolvelib/compat/__init__.py | 0 .../compat/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 206 bytes .../__pycache__/collections_abc.cpython-312.pyc | Bin 0 -> 426 bytes .../_vendor/resolvelib/compat/collections_abc.py | 6 + .../~ip/_vendor/resolvelib/providers.py | 133 + .../site-packages/~ip/_vendor/resolvelib/py.typed | 0 .../~ip/_vendor/resolvelib/reporters.py | 43 + .../~ip/_vendor/resolvelib/resolvers.py | 547 + .../~ip/_vendor/resolvelib/structs.py | 170 + .../site-packages/~ip/_vendor/rich/__init__.py | 177 + .../site-packages/~ip/_vendor/rich/__main__.py | 274 + .../rich/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 7021 bytes .../rich/__pycache__/__main__.cpython-312.pyc | Bin 0 -> 10310 bytes .../rich/__pycache__/_cell_widths.cpython-312.pyc | Bin 0 -> 7827 bytes .../rich/__pycache__/_emoji_codes.cpython-312.pyc | Bin 0 -> 205982 bytes .../__pycache__/_emoji_replace.cpython-312.pyc | Bin 0 -> 1735 bytes .../__pycache__/_export_format.cpython-312.pyc | Bin 0 -> 2327 bytes .../rich/__pycache__/_extension.cpython-312.pyc | Bin 0 -> 543 bytes .../rich/__pycache__/_fileno.cpython-312.pyc | Bin 0 -> 861 bytes .../rich/__pycache__/_inspect.cpython-312.pyc | Bin 0 -> 12083 bytes .../rich/__pycache__/_log_render.cpython-312.pyc | Bin 0 -> 4153 bytes .../_vendor/rich/__pycache__/_loop.cpython-312.pyc | Bin 0 -> 1891 bytes .../rich/__pycache__/_null_file.cpython-312.pyc | Bin 0 -> 3626 bytes .../rich/__pycache__/_palettes.cpython-312.pyc | Bin 0 -> 5166 bytes .../_vendor/rich/__pycache__/_pick.cpython-312.pyc | Bin 0 -> 732 bytes .../rich/__pycache__/_ratio.cpython-312.pyc | Bin 0 -> 6585 bytes .../rich/__pycache__/_spinners.cpython-312.pyc | Bin 0 -> 13185 bytes .../rich/__pycache__/_stack.cpython-312.pyc | Bin 0 -> 971 bytes .../rich/__pycache__/_timer.cpython-312.pyc | Bin 0 -> 871 bytes .../__pycache__/_win32_console.cpython-312.pyc | Bin 0 -> 28982 bytes .../rich/__pycache__/_windows.cpython-312.pyc | Bin 0 -> 2496 bytes .../__pycache__/_windows_renderer.cpython-312.pyc | Bin 0 -> 3579 bytes .../_vendor/rich/__pycache__/_wrap.cpython-312.pyc | Bin 0 -> 2366 bytes .../_vendor/rich/__pycache__/abc.cpython-312.pyc | Bin 0 -> 1614 bytes .../_vendor/rich/__pycache__/align.cpython-312.pyc | Bin 0 -> 12328 bytes .../_vendor/rich/__pycache__/ansi.cpython-312.pyc | Bin 0 -> 9112 bytes .../_vendor/rich/__pycache__/bar.cpython-312.pyc | Bin 0 -> 4278 bytes .../_vendor/rich/__pycache__/box.cpython-312.pyc | Bin 0 -> 11864 bytes .../_vendor/rich/__pycache__/cells.cpython-312.pyc | Bin 0 -> 5624 bytes .../_vendor/rich/__pycache__/color.cpython-312.pyc | Bin 0 -> 26576 bytes .../rich/__pycache__/color_triplet.cpython-312.pyc | Bin 0 -> 1707 bytes .../rich/__pycache__/columns.cpython-312.pyc | Bin 0 -> 8593 bytes .../rich/__pycache__/console.cpython-312.pyc | Bin 0 -> 113799 bytes .../rich/__pycache__/constrain.cpython-312.pyc | Bin 0 -> 2264 bytes .../rich/__pycache__/containers.cpython-312.pyc | Bin 0 -> 9232 bytes .../rich/__pycache__/control.cpython-312.pyc | Bin 0 -> 10935 bytes .../__pycache__/default_styles.cpython-312.pyc | Bin 0 -> 10379 bytes .../rich/__pycache__/diagnose.cpython-312.pyc | Bin 0 -> 1493 bytes .../_vendor/rich/__pycache__/emoji.cpython-312.pyc | Bin 0 -> 4215 bytes .../rich/__pycache__/errors.cpython-312.pyc | Bin 0 -> 1851 bytes .../rich/__pycache__/file_proxy.cpython-312.pyc | Bin 0 -> 3583 bytes .../rich/__pycache__/filesize.cpython-312.pyc | Bin 0 -> 3088 bytes .../rich/__pycache__/highlighter.cpython-312.pyc | Bin 0 -> 9904 bytes .../_vendor/rich/__pycache__/json.cpython-312.pyc | Bin 0 -> 6041 bytes .../rich/__pycache__/jupyter.cpython-312.pyc | Bin 0 -> 5215 bytes .../rich/__pycache__/layout.cpython-312.pyc | Bin 0 -> 20226 bytes .../_vendor/rich/__pycache__/live.cpython-312.pyc | Bin 0 -> 19149 bytes .../rich/__pycache__/live_render.cpython-312.pyc | Bin 0 -> 4900 bytes .../rich/__pycache__/logging.cpython-312.pyc | Bin 0 -> 13560 bytes .../rich/__pycache__/markup.cpython-312.pyc | Bin 0 -> 9304 bytes .../rich/__pycache__/measure.cpython-312.pyc | Bin 0 -> 6382 bytes .../rich/__pycache__/padding.cpython-312.pyc | Bin 0 -> 7140 bytes .../_vendor/rich/__pycache__/pager.cpython-312.pyc | Bin 0 -> 1826 bytes .../rich/__pycache__/palette.cpython-312.pyc | Bin 0 -> 5320 bytes .../_vendor/rich/__pycache__/panel.cpython-312.pyc | Bin 0 -> 12103 bytes .../rich/__pycache__/pretty.cpython-312.pyc | Bin 0 -> 40062 bytes .../rich/__pycache__/progress.cpython-312.pyc | Bin 0 -> 75084 bytes .../rich/__pycache__/progress_bar.cpython-312.pyc | Bin 0 -> 10395 bytes .../rich/__pycache__/prompt.cpython-312.pyc | Bin 0 -> 14787 bytes .../rich/__pycache__/protocol.cpython-312.pyc | Bin 0 -> 1798 bytes .../rich/__pycache__/region.cpython-312.pyc | Bin 0 -> 573 bytes .../_vendor/rich/__pycache__/repr.cpython-312.pyc | Bin 0 -> 6632 bytes .../_vendor/rich/__pycache__/rule.cpython-312.pyc | Bin 0 -> 6574 bytes .../_vendor/rich/__pycache__/scope.cpython-312.pyc | Bin 0 -> 3836 bytes .../rich/__pycache__/screen.cpython-312.pyc | Bin 0 -> 2490 bytes .../rich/__pycache__/segment.cpython-312.pyc | Bin 0 -> 28167 bytes .../rich/__pycache__/spinner.cpython-312.pyc | Bin 0 -> 6070 bytes .../rich/__pycache__/status.cpython-312.pyc | Bin 0 -> 6074 bytes .../_vendor/rich/__pycache__/style.cpython-312.pyc | Bin 0 -> 33520 bytes .../rich/__pycache__/styled.cpython-312.pyc | Bin 0 -> 2145 bytes .../rich/__pycache__/syntax.cpython-312.pyc | Bin 0 -> 39618 bytes .../_vendor/rich/__pycache__/table.cpython-312.pyc | Bin 0 -> 43590 bytes .../__pycache__/terminal_theme.cpython-312.pyc | Bin 0 -> 3354 bytes .../_vendor/rich/__pycache__/text.cpython-312.pyc | Bin 0 -> 58969 bytes .../_vendor/rich/__pycache__/theme.cpython-312.pyc | Bin 0 -> 6346 bytes .../rich/__pycache__/themes.cpython-312.pyc | Bin 0 -> 320 bytes .../rich/__pycache__/traceback.cpython-312.pyc | Bin 0 -> 31554 bytes .../_vendor/rich/__pycache__/tree.cpython-312.pyc | Bin 0 -> 11445 bytes .../site-packages/~ip/_vendor/rich/_cell_widths.py | 451 + .../site-packages/~ip/_vendor/rich/_emoji_codes.py | 3610 + .../~ip/_vendor/rich/_emoji_replace.py | 32 + .../~ip/_vendor/rich/_export_format.py | 76 + .../site-packages/~ip/_vendor/rich/_extension.py | 10 + .../site-packages/~ip/_vendor/rich/_fileno.py | 24 + .../site-packages/~ip/_vendor/rich/_inspect.py | 270 + .../site-packages/~ip/_vendor/rich/_log_render.py | 94 + .../site-packages/~ip/_vendor/rich/_loop.py | 43 + .../site-packages/~ip/_vendor/rich/_null_file.py | 69 + .../site-packages/~ip/_vendor/rich/_palettes.py | 309 + .../site-packages/~ip/_vendor/rich/_pick.py | 17 + .../site-packages/~ip/_vendor/rich/_ratio.py | 160 + .../site-packages/~ip/_vendor/rich/_spinners.py | 482 + .../site-packages/~ip/_vendor/rich/_stack.py | 16 + .../site-packages/~ip/_vendor/rich/_timer.py | 19 + .../~ip/_vendor/rich/_win32_console.py | 662 + .../site-packages/~ip/_vendor/rich/_windows.py | 72 + .../~ip/_vendor/rich/_windows_renderer.py | 56 + .../site-packages/~ip/_vendor/rich/_wrap.py | 56 + .../site-packages/~ip/_vendor/rich/abc.py | 33 + .../site-packages/~ip/_vendor/rich/align.py | 311 + .../site-packages/~ip/_vendor/rich/ansi.py | 240 + .../site-packages/~ip/_vendor/rich/bar.py | 94 + .../site-packages/~ip/_vendor/rich/box.py | 517 + .../site-packages/~ip/_vendor/rich/cells.py | 154 + .../site-packages/~ip/_vendor/rich/color.py | 622 + .../~ip/_vendor/rich/color_triplet.py | 38 + .../site-packages/~ip/_vendor/rich/columns.py | 187 + .../site-packages/~ip/_vendor/rich/console.py | 2633 + .../site-packages/~ip/_vendor/rich/constrain.py | 37 + .../site-packages/~ip/_vendor/rich/containers.py | 167 + .../site-packages/~ip/_vendor/rich/control.py | 225 + .../~ip/_vendor/rich/default_styles.py | 190 + .../site-packages/~ip/_vendor/rich/diagnose.py | 37 + .../site-packages/~ip/_vendor/rich/emoji.py | 96 + .../site-packages/~ip/_vendor/rich/errors.py | 34 + .../site-packages/~ip/_vendor/rich/file_proxy.py | 57 + .../site-packages/~ip/_vendor/rich/filesize.py | 89 + .../site-packages/~ip/_vendor/rich/highlighter.py | 232 + .../site-packages/~ip/_vendor/rich/json.py | 140 + .../site-packages/~ip/_vendor/rich/jupyter.py | 101 + .../site-packages/~ip/_vendor/rich/layout.py | 443 + .../site-packages/~ip/_vendor/rich/live.py | 375 + .../site-packages/~ip/_vendor/rich/live_render.py | 113 + .../site-packages/~ip/_vendor/rich/logging.py | 289 + .../site-packages/~ip/_vendor/rich/markup.py | 246 + .../site-packages/~ip/_vendor/rich/measure.py | 151 + .../site-packages/~ip/_vendor/rich/padding.py | 141 + .../site-packages/~ip/_vendor/rich/pager.py | 34 + .../site-packages/~ip/_vendor/rich/palette.py | 100 + .../site-packages/~ip/_vendor/rich/panel.py | 308 + .../site-packages/~ip/_vendor/rich/pretty.py | 994 + .../site-packages/~ip/_vendor/rich/progress.py | 1702 + .../site-packages/~ip/_vendor/rich/progress_bar.py | 224 + .../site-packages/~ip/_vendor/rich/prompt.py | 376 + .../site-packages/~ip/_vendor/rich/protocol.py | 42 + .../site-packages/~ip/_vendor/rich/py.typed | 0 .../site-packages/~ip/_vendor/rich/region.py | 10 + .../site-packages/~ip/_vendor/rich/repr.py | 149 + .../site-packages/~ip/_vendor/rich/rule.py | 130 + .../site-packages/~ip/_vendor/rich/scope.py | 86 + .../site-packages/~ip/_vendor/rich/screen.py | 54 + .../site-packages/~ip/_vendor/rich/segment.py | 739 + .../site-packages/~ip/_vendor/rich/spinner.py | 137 + .../site-packages/~ip/_vendor/rich/status.py | 132 + .../site-packages/~ip/_vendor/rich/style.py | 796 + .../site-packages/~ip/_vendor/rich/styled.py | 42 + .../site-packages/~ip/_vendor/rich/syntax.py | 948 + .../site-packages/~ip/_vendor/rich/table.py | 1002 + .../~ip/_vendor/rich/terminal_theme.py | 153 + .../site-packages/~ip/_vendor/rich/text.py | 1307 + .../site-packages/~ip/_vendor/rich/theme.py | 115 + .../site-packages/~ip/_vendor/rich/themes.py | 5 + .../site-packages/~ip/_vendor/rich/traceback.py | 756 + .../site-packages/~ip/_vendor/rich/tree.py | 251 + .../python3.12/site-packages/~ip/_vendor/six.py | 998 + .../site-packages/~ip/_vendor/tenacity/__init__.py | 608 + .../tenacity/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 27092 bytes .../tenacity/__pycache__/_asyncio.cpython-312.pyc | Bin 0 -> 4812 bytes .../tenacity/__pycache__/_utils.cpython-312.pyc | Bin 0 -> 2321 bytes .../tenacity/__pycache__/after.cpython-312.pyc | Bin 0 -> 1630 bytes .../tenacity/__pycache__/before.cpython-312.pyc | Bin 0 -> 1470 bytes .../__pycache__/before_sleep.cpython-312.pyc | Bin 0 -> 2308 bytes .../tenacity/__pycache__/nap.cpython-312.pyc | Bin 0 -> 1418 bytes .../tenacity/__pycache__/retry.cpython-312.pyc | Bin 0 -> 14287 bytes .../tenacity/__pycache__/stop.cpython-312.pyc | Bin 0 -> 5574 bytes .../__pycache__/tornadoweb.cpython-312.pyc | Bin 0 -> 2592 bytes .../tenacity/__pycache__/wait.cpython-312.pyc | Bin 0 -> 12419 bytes .../site-packages/~ip/_vendor/tenacity/_asyncio.py | 94 + .../site-packages/~ip/_vendor/tenacity/_utils.py | 76 + .../site-packages/~ip/_vendor/tenacity/after.py | 51 + .../site-packages/~ip/_vendor/tenacity/before.py | 46 + .../~ip/_vendor/tenacity/before_sleep.py | 71 + .../site-packages/~ip/_vendor/tenacity/nap.py | 43 + .../site-packages/~ip/_vendor/tenacity/py.typed | 0 .../site-packages/~ip/_vendor/tenacity/retry.py | 272 + .../site-packages/~ip/_vendor/tenacity/stop.py | 103 + .../~ip/_vendor/tenacity/tornadoweb.py | 59 + .../site-packages/~ip/_vendor/tenacity/wait.py | 228 + .../site-packages/~ip/_vendor/tomli/__init__.py | 11 + .../tomli/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 390 bytes .../tomli/__pycache__/_parser.cpython-312.pyc | Bin 0 -> 26933 bytes .../_vendor/tomli/__pycache__/_re.cpython-312.pyc | Bin 0 -> 3914 bytes .../tomli/__pycache__/_types.cpython-312.pyc | Bin 0 -> 372 bytes .../site-packages/~ip/_vendor/tomli/_parser.py | 691 + .../site-packages/~ip/_vendor/tomli/_re.py | 107 + .../site-packages/~ip/_vendor/tomli/_types.py | 10 + .../site-packages/~ip/_vendor/tomli/py.typed | 1 + .../~ip/_vendor/truststore/__init__.py | 13 + .../__pycache__/__init__.cpython-312.pyc | Bin 0 -> 624 bytes .../truststore/__pycache__/_api.cpython-312.pyc | Bin 0 -> 15803 bytes .../truststore/__pycache__/_macos.cpython-312.pyc | Bin 0 -> 16668 bytes .../__pycache__/_openssl.cpython-312.pyc | Bin 0 -> 2221 bytes .../__pycache__/_ssl_constants.cpython-312.pyc | Bin 0 -> 1105 bytes .../__pycache__/_windows.cpython-312.pyc | Bin 0 -> 15512 bytes .../site-packages/~ip/_vendor/truststore/_api.py | 302 + .../site-packages/~ip/_vendor/truststore/_macos.py | 501 + .../~ip/_vendor/truststore/_openssl.py | 66 + .../~ip/_vendor/truststore/_ssl_constants.py | 31 + .../~ip/_vendor/truststore/_windows.py | 554 + .../site-packages/~ip/_vendor/truststore/py.typed | 0 .../site-packages/~ip/_vendor/typing_extensions.py | 3072 + .../site-packages/~ip/_vendor/urllib3/__init__.py | 102 + .../urllib3/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 3411 bytes .../__pycache__/_collections.cpython-312.pyc | Bin 0 -> 15937 bytes .../urllib3/__pycache__/_version.cpython-312.pyc | Bin 0 -> 224 bytes .../urllib3/__pycache__/connection.cpython-312.pyc | Bin 0 -> 20413 bytes .../__pycache__/connectionpool.cpython-312.pyc | Bin 0 -> 36285 bytes .../urllib3/__pycache__/exceptions.cpython-312.pyc | Bin 0 -> 13499 bytes .../urllib3/__pycache__/fields.cpython-312.pyc | Bin 0 -> 10415 bytes .../urllib3/__pycache__/filepost.cpython-312.pyc | Bin 0 -> 4024 bytes .../__pycache__/poolmanager.cpython-312.pyc | Bin 0 -> 20308 bytes .../urllib3/__pycache__/request.cpython-312.pyc | Bin 0 -> 7300 bytes .../urllib3/__pycache__/response.cpython-312.pyc | Bin 0 -> 33974 bytes .../~ip/_vendor/urllib3/_collections.py | 337 + .../site-packages/~ip/_vendor/urllib3/_version.py | 2 + .../~ip/_vendor/urllib3/connection.py | 572 + .../~ip/_vendor/urllib3/connectionpool.py | 1132 + .../~ip/_vendor/urllib3/contrib/__init__.py | 0 .../contrib/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 204 bytes .../__pycache__/_appengine_environ.cpython-312.pyc | Bin 0 -> 1854 bytes .../contrib/__pycache__/appengine.cpython-312.pyc | Bin 0 -> 11570 bytes .../contrib/__pycache__/ntlmpool.cpython-312.pyc | Bin 0 -> 5725 bytes .../contrib/__pycache__/pyopenssl.cpython-312.pyc | Bin 0 -> 24456 bytes .../__pycache__/securetransport.cpython-312.pyc | Bin 0 -> 35562 bytes .../contrib/__pycache__/socks.cpython-312.pyc | Bin 0 -> 7517 bytes .../_vendor/urllib3/contrib/_appengine_environ.py | 36 + .../urllib3/contrib/_securetransport/__init__.py | 0 .../__pycache__/__init__.cpython-312.pyc | Bin 0 -> 221 bytes .../__pycache__/bindings.cpython-312.pyc | Bin 0 -> 17433 bytes .../__pycache__/low_level.cpython-312.pyc | Bin 0 -> 14807 bytes .../urllib3/contrib/_securetransport/bindings.py | 519 + .../urllib3/contrib/_securetransport/low_level.py | 397 + .../~ip/_vendor/urllib3/contrib/appengine.py | 314 + .../~ip/_vendor/urllib3/contrib/ntlmpool.py | 130 + .../~ip/_vendor/urllib3/contrib/pyopenssl.py | 518 + .../~ip/_vendor/urllib3/contrib/securetransport.py | 921 + .../~ip/_vendor/urllib3/contrib/socks.py | 216 + .../~ip/_vendor/urllib3/exceptions.py | 323 + .../site-packages/~ip/_vendor/urllib3/fields.py | 274 + .../site-packages/~ip/_vendor/urllib3/filepost.py | 98 + .../~ip/_vendor/urllib3/packages/__init__.py | 0 .../packages/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 205 bytes .../packages/__pycache__/six.cpython-312.pyc | Bin 0 -> 41325 bytes .../_vendor/urllib3/packages/backports/__init__.py | 0 .../backports/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 215 bytes .../backports/__pycache__/makefile.cpython-312.pyc | Bin 0 -> 1831 bytes .../__pycache__/weakref_finalize.cpython-312.pyc | Bin 0 -> 7337 bytes .../_vendor/urllib3/packages/backports/makefile.py | 51 + .../urllib3/packages/backports/weakref_finalize.py | 155 + .../~ip/_vendor/urllib3/packages/six.py | 1076 + .../~ip/_vendor/urllib3/poolmanager.py | 537 + .../site-packages/~ip/_vendor/urllib3/request.py | 191 + .../site-packages/~ip/_vendor/urllib3/response.py | 879 + .../~ip/_vendor/urllib3/util/__init__.py | 49 + .../util/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 1152 bytes .../util/__pycache__/connection.cpython-312.pyc | Bin 0 -> 4762 bytes .../urllib3/util/__pycache__/proxy.cpython-312.pyc | Bin 0 -> 1558 bytes .../urllib3/util/__pycache__/queue.cpython-312.pyc | Bin 0 -> 1358 bytes .../util/__pycache__/request.cpython-312.pyc | Bin 0 -> 4189 bytes .../util/__pycache__/response.cpython-312.pyc | Bin 0 -> 2995 bytes .../urllib3/util/__pycache__/retry.cpython-312.pyc | Bin 0 -> 21703 bytes .../urllib3/util/__pycache__/ssl_.cpython-312.pyc | Bin 0 -> 15109 bytes .../__pycache__/ssl_match_hostname.cpython-312.pyc | Bin 0 -> 5077 bytes .../util/__pycache__/ssltransport.cpython-312.pyc | Bin 0 -> 10778 bytes .../util/__pycache__/timeout.cpython-312.pyc | Bin 0 -> 11145 bytes .../urllib3/util/__pycache__/url.cpython-312.pyc | Bin 0 -> 15801 bytes .../urllib3/util/__pycache__/wait.cpython-312.pyc | Bin 0 -> 4409 bytes .../~ip/_vendor/urllib3/util/connection.py | 149 + .../~ip/_vendor/urllib3/util/proxy.py | 57 + .../~ip/_vendor/urllib3/util/queue.py | 22 + .../~ip/_vendor/urllib3/util/request.py | 137 + .../~ip/_vendor/urllib3/util/response.py | 107 + .../~ip/_vendor/urllib3/util/retry.py | 620 + .../site-packages/~ip/_vendor/urllib3/util/ssl_.py | 495 + .../~ip/_vendor/urllib3/util/ssl_match_hostname.py | 159 + .../~ip/_vendor/urllib3/util/ssltransport.py | 221 + .../~ip/_vendor/urllib3/util/timeout.py | 271 + .../site-packages/~ip/_vendor/urllib3/util/url.py | 435 + .../site-packages/~ip/_vendor/urllib3/util/wait.py | 152 + .../site-packages/~ip/_vendor/vendor.txt | 24 + .../~ip/_vendor/webencodings/__init__.py | 342 + .../__pycache__/__init__.cpython-312.pyc | Bin 0 -> 12007 bytes .../__pycache__/labels.cpython-312.pyc | Bin 0 -> 7138 bytes .../__pycache__/mklabels.cpython-312.pyc | Bin 0 -> 2705 bytes .../webencodings/__pycache__/tests.cpython-312.pyc | Bin 0 -> 9257 bytes .../__pycache__/x_user_defined.cpython-312.pyc | Bin 0 -> 3301 bytes .../~ip/_vendor/webencodings/labels.py | 231 + .../~ip/_vendor/webencodings/mklabels.py | 59 + .../~ip/_vendor/webencodings/tests.py | 153 + .../~ip/_vendor/webencodings/x_user_defined.py | 325 + .venv/lib/python3.12/site-packages/~ip/py.typed | 4 + .venv/lib64 | 1 + .venv/pyvenv.cfg | 5 + README.md | 23 - data/analysis/course_only.json | 5879 + data/analysis/none.json | 12536 +++ data/analysis/remaining.json | 10847 ++ data/catalog_2025_fall.json | 5 - data/communities.json | 11605 ++ data/courses.json | 29438 +++++ data/courses_parsed.json | 101437 ++++++++++++++++++ data/courses_test.json | 950 + data/graph.json | 42566 ++++++++ data/graph_reduced.json | 49655 +++++++++ data/parsed/course_only_parsed.json | 14011 +++ data/parsed/course_only_unparsed.json | 1 + data/positions.json | 20494 ++++ data/positions_drl.json | 20494 ++++ data/positions_smacof.json | 20494 ++++ data/prereq_analysis.json | 236 + index.html | 12 - package-lock.json | 1791 - package.json | 31 - requirements.txt | 6 + schema/course.schema.json | 16 + .../parse_course_prereqs.cpython-312.pyc | Bin 0 -> 10335 bytes scripts/analyze_prereqs.py | 120 + scripts/build_final_parsed.py | 107 + scripts/build_graph_assets.py | 359 + scripts/fetch_uiuc_courses.py | 230 + scripts/parse_course_prereqs.py | 190 + scripts/reduce_and_cluster.py | 153 + scripts/scrape.js | 63 - scripts/validate_courses.py | 31 + src/App.tsx | 12 - src/components/Graph.tsx | 49 - src/global.d.ts | 2 - src/hooks/useCourseData.ts | 14 - src/main.tsx | 9 - tsconfig.json | 16 - vite.config.ts | 7 - web/index.html | 82 + web/index.js | 249 + web/styles.css | 11 + 4804 files changed, 1259709 insertions(+), 2075 deletions(-) create mode 100644 .github/workflows/build-dataset.yml delete mode 100644 .github/workflows/update-and-deploy.yml delete mode 100644 .gitignore create mode 100644 .venv/bin/Activate.ps1 create mode 100644 .venv/bin/activate create mode 100644 .venv/bin/activate.csh create mode 100644 .venv/bin/activate.fish create mode 100755 .venv/bin/f2py create mode 100755 .venv/bin/jsonschema create mode 100755 .venv/bin/normalizer create mode 100755 .venv/bin/numpy-config create mode 120000 .venv/bin/python create mode 120000 .venv/bin/python3 create mode 120000 .venv/bin/python3.12 create mode 100644 .venv/lib/python3.12/site-packages/__pycache__/typing_extensions.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/attr/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/attr/__init__.pyi create mode 100644 .venv/lib/python3.12/site-packages/attr/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/attr/__pycache__/_cmp.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/attr/__pycache__/_compat.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/attr/__pycache__/_config.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/attr/__pycache__/_funcs.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/attr/__pycache__/_make.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/attr/__pycache__/_next_gen.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/attr/__pycache__/_version_info.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/attr/__pycache__/converters.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/attr/__pycache__/exceptions.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/attr/__pycache__/filters.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/attr/__pycache__/setters.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/attr/__pycache__/validators.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/attr/_cmp.py create mode 100644 .venv/lib/python3.12/site-packages/attr/_cmp.pyi create mode 100644 .venv/lib/python3.12/site-packages/attr/_compat.py create mode 100644 .venv/lib/python3.12/site-packages/attr/_config.py create mode 100644 .venv/lib/python3.12/site-packages/attr/_funcs.py create mode 100644 .venv/lib/python3.12/site-packages/attr/_make.py create mode 100644 .venv/lib/python3.12/site-packages/attr/_next_gen.py create mode 100644 .venv/lib/python3.12/site-packages/attr/_typing_compat.pyi create mode 100644 .venv/lib/python3.12/site-packages/attr/_version_info.py create mode 100644 .venv/lib/python3.12/site-packages/attr/_version_info.pyi create mode 100644 .venv/lib/python3.12/site-packages/attr/converters.py create mode 100644 .venv/lib/python3.12/site-packages/attr/converters.pyi create mode 100644 .venv/lib/python3.12/site-packages/attr/exceptions.py create mode 100644 .venv/lib/python3.12/site-packages/attr/exceptions.pyi create mode 100644 .venv/lib/python3.12/site-packages/attr/filters.py create mode 100644 .venv/lib/python3.12/site-packages/attr/filters.pyi create mode 100644 .venv/lib/python3.12/site-packages/attr/py.typed create mode 100644 .venv/lib/python3.12/site-packages/attr/setters.py create mode 100644 .venv/lib/python3.12/site-packages/attr/setters.pyi create mode 100644 .venv/lib/python3.12/site-packages/attr/validators.py create mode 100644 .venv/lib/python3.12/site-packages/attr/validators.pyi create mode 100644 .venv/lib/python3.12/site-packages/attrs-25.3.0.dist-info/INSTALLER create mode 100644 .venv/lib/python3.12/site-packages/attrs-25.3.0.dist-info/METADATA create mode 100644 .venv/lib/python3.12/site-packages/attrs-25.3.0.dist-info/RECORD create mode 100644 .venv/lib/python3.12/site-packages/attrs-25.3.0.dist-info/WHEEL create mode 100644 .venv/lib/python3.12/site-packages/attrs-25.3.0.dist-info/licenses/LICENSE create mode 100644 .venv/lib/python3.12/site-packages/attrs/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/attrs/__init__.pyi create mode 100644 .venv/lib/python3.12/site-packages/attrs/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/attrs/__pycache__/converters.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/attrs/__pycache__/exceptions.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/attrs/__pycache__/filters.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/attrs/__pycache__/setters.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/attrs/__pycache__/validators.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/attrs/converters.py create mode 100644 .venv/lib/python3.12/site-packages/attrs/exceptions.py create mode 100644 .venv/lib/python3.12/site-packages/attrs/filters.py create mode 100644 .venv/lib/python3.12/site-packages/attrs/py.typed create mode 100644 .venv/lib/python3.12/site-packages/attrs/setters.py create mode 100644 .venv/lib/python3.12/site-packages/attrs/validators.py create mode 100644 .venv/lib/python3.12/site-packages/certifi-2025.8.3.dist-info/INSTALLER create mode 100644 .venv/lib/python3.12/site-packages/certifi-2025.8.3.dist-info/METADATA create mode 100644 .venv/lib/python3.12/site-packages/certifi-2025.8.3.dist-info/RECORD create mode 100644 .venv/lib/python3.12/site-packages/certifi-2025.8.3.dist-info/WHEEL create mode 100644 .venv/lib/python3.12/site-packages/certifi-2025.8.3.dist-info/licenses/LICENSE create mode 100644 .venv/lib/python3.12/site-packages/certifi-2025.8.3.dist-info/top_level.txt create mode 100644 .venv/lib/python3.12/site-packages/certifi/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/certifi/__main__.py create mode 100644 .venv/lib/python3.12/site-packages/certifi/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/certifi/__pycache__/__main__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/certifi/__pycache__/core.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/certifi/cacert.pem create mode 100644 .venv/lib/python3.12/site-packages/certifi/core.py create mode 100644 .venv/lib/python3.12/site-packages/certifi/py.typed create mode 100644 .venv/lib/python3.12/site-packages/charset_normalizer-3.4.3.dist-info/INSTALLER create mode 100644 .venv/lib/python3.12/site-packages/charset_normalizer-3.4.3.dist-info/METADATA create mode 100644 .venv/lib/python3.12/site-packages/charset_normalizer-3.4.3.dist-info/RECORD create mode 100644 .venv/lib/python3.12/site-packages/charset_normalizer-3.4.3.dist-info/WHEEL create mode 100644 .venv/lib/python3.12/site-packages/charset_normalizer-3.4.3.dist-info/entry_points.txt create mode 100644 .venv/lib/python3.12/site-packages/charset_normalizer-3.4.3.dist-info/licenses/LICENSE create mode 100644 .venv/lib/python3.12/site-packages/charset_normalizer-3.4.3.dist-info/top_level.txt create mode 100644 .venv/lib/python3.12/site-packages/charset_normalizer/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/charset_normalizer/__main__.py create mode 100644 .venv/lib/python3.12/site-packages/charset_normalizer/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/charset_normalizer/__pycache__/__main__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/charset_normalizer/__pycache__/api.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/charset_normalizer/__pycache__/cd.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/charset_normalizer/__pycache__/constant.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/charset_normalizer/__pycache__/legacy.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/charset_normalizer/__pycache__/md.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/charset_normalizer/__pycache__/models.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/charset_normalizer/__pycache__/utils.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/charset_normalizer/__pycache__/version.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/charset_normalizer/api.py create mode 100644 .venv/lib/python3.12/site-packages/charset_normalizer/cd.py create mode 100644 .venv/lib/python3.12/site-packages/charset_normalizer/cli/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/charset_normalizer/cli/__main__.py create mode 100644 .venv/lib/python3.12/site-packages/charset_normalizer/cli/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/charset_normalizer/cli/__pycache__/__main__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/charset_normalizer/constant.py create mode 100644 .venv/lib/python3.12/site-packages/charset_normalizer/legacy.py create mode 100755 .venv/lib/python3.12/site-packages/charset_normalizer/md.cpython-312-x86_64-linux-gnu.so create mode 100644 .venv/lib/python3.12/site-packages/charset_normalizer/md.py create mode 100755 .venv/lib/python3.12/site-packages/charset_normalizer/md__mypyc.cpython-312-x86_64-linux-gnu.so create mode 100644 .venv/lib/python3.12/site-packages/charset_normalizer/models.py create mode 100644 .venv/lib/python3.12/site-packages/charset_normalizer/py.typed create mode 100644 .venv/lib/python3.12/site-packages/charset_normalizer/utils.py create mode 100644 .venv/lib/python3.12/site-packages/charset_normalizer/version.py create mode 100644 .venv/lib/python3.12/site-packages/idna-3.10.dist-info/INSTALLER create mode 100644 .venv/lib/python3.12/site-packages/idna-3.10.dist-info/LICENSE.md create mode 100644 .venv/lib/python3.12/site-packages/idna-3.10.dist-info/METADATA create mode 100644 .venv/lib/python3.12/site-packages/idna-3.10.dist-info/RECORD create mode 100644 .venv/lib/python3.12/site-packages/idna-3.10.dist-info/WHEEL create mode 100644 .venv/lib/python3.12/site-packages/idna/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/idna/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/idna/__pycache__/codec.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/idna/__pycache__/compat.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/idna/__pycache__/core.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/idna/__pycache__/idnadata.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/idna/__pycache__/intranges.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/idna/__pycache__/package_data.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/idna/__pycache__/uts46data.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/idna/codec.py create mode 100644 .venv/lib/python3.12/site-packages/idna/compat.py create mode 100644 .venv/lib/python3.12/site-packages/idna/core.py create mode 100644 .venv/lib/python3.12/site-packages/idna/idnadata.py create mode 100644 .venv/lib/python3.12/site-packages/idna/intranges.py create mode 100644 .venv/lib/python3.12/site-packages/idna/package_data.py create mode 100644 .venv/lib/python3.12/site-packages/idna/py.typed create mode 100644 .venv/lib/python3.12/site-packages/idna/uts46data.py create mode 100644 .venv/lib/python3.12/site-packages/jsonschema-4.25.1.dist-info/INSTALLER create mode 100644 .venv/lib/python3.12/site-packages/jsonschema-4.25.1.dist-info/METADATA create mode 100644 .venv/lib/python3.12/site-packages/jsonschema-4.25.1.dist-info/RECORD create mode 100644 .venv/lib/python3.12/site-packages/jsonschema-4.25.1.dist-info/REQUESTED create mode 100644 .venv/lib/python3.12/site-packages/jsonschema-4.25.1.dist-info/WHEEL create mode 100644 .venv/lib/python3.12/site-packages/jsonschema-4.25.1.dist-info/entry_points.txt create mode 100644 .venv/lib/python3.12/site-packages/jsonschema-4.25.1.dist-info/licenses/COPYING create mode 100644 .venv/lib/python3.12/site-packages/jsonschema/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/jsonschema/__main__.py create mode 100644 .venv/lib/python3.12/site-packages/jsonschema/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/jsonschema/__pycache__/__main__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/jsonschema/__pycache__/_format.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/jsonschema/__pycache__/_keywords.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/jsonschema/__pycache__/_legacy_keywords.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/jsonschema/__pycache__/_types.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/jsonschema/__pycache__/_typing.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/jsonschema/__pycache__/_utils.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/jsonschema/__pycache__/cli.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/jsonschema/__pycache__/exceptions.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/jsonschema/__pycache__/protocols.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/jsonschema/__pycache__/validators.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/jsonschema/_format.py create mode 100644 .venv/lib/python3.12/site-packages/jsonschema/_keywords.py create mode 100644 .venv/lib/python3.12/site-packages/jsonschema/_legacy_keywords.py create mode 100644 .venv/lib/python3.12/site-packages/jsonschema/_types.py create mode 100644 .venv/lib/python3.12/site-packages/jsonschema/_typing.py create mode 100644 .venv/lib/python3.12/site-packages/jsonschema/_utils.py create mode 100644 .venv/lib/python3.12/site-packages/jsonschema/benchmarks/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/jsonschema/benchmarks/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/jsonschema/benchmarks/__pycache__/const_vs_enum.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/jsonschema/benchmarks/__pycache__/contains.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/jsonschema/benchmarks/__pycache__/issue232.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/jsonschema/benchmarks/__pycache__/json_schema_test_suite.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/jsonschema/benchmarks/__pycache__/nested_schemas.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/jsonschema/benchmarks/__pycache__/subcomponents.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/jsonschema/benchmarks/__pycache__/unused_registry.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/jsonschema/benchmarks/__pycache__/useless_applicator_schemas.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/jsonschema/benchmarks/__pycache__/useless_keywords.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/jsonschema/benchmarks/__pycache__/validator_creation.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/jsonschema/benchmarks/const_vs_enum.py create mode 100644 .venv/lib/python3.12/site-packages/jsonschema/benchmarks/contains.py create mode 100644 .venv/lib/python3.12/site-packages/jsonschema/benchmarks/issue232.py create mode 100644 .venv/lib/python3.12/site-packages/jsonschema/benchmarks/issue232/issue.json create mode 100644 .venv/lib/python3.12/site-packages/jsonschema/benchmarks/json_schema_test_suite.py create mode 100644 .venv/lib/python3.12/site-packages/jsonschema/benchmarks/nested_schemas.py create mode 100644 .venv/lib/python3.12/site-packages/jsonschema/benchmarks/subcomponents.py create mode 100644 .venv/lib/python3.12/site-packages/jsonschema/benchmarks/unused_registry.py create mode 100644 .venv/lib/python3.12/site-packages/jsonschema/benchmarks/useless_applicator_schemas.py create mode 100644 .venv/lib/python3.12/site-packages/jsonschema/benchmarks/useless_keywords.py create mode 100644 .venv/lib/python3.12/site-packages/jsonschema/benchmarks/validator_creation.py create mode 100644 .venv/lib/python3.12/site-packages/jsonschema/cli.py create mode 100644 .venv/lib/python3.12/site-packages/jsonschema/exceptions.py create mode 100644 .venv/lib/python3.12/site-packages/jsonschema/protocols.py create mode 100644 .venv/lib/python3.12/site-packages/jsonschema/tests/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/jsonschema/tests/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/jsonschema/tests/__pycache__/_suite.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/jsonschema/tests/__pycache__/fuzz_validate.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/jsonschema/tests/__pycache__/test_cli.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/jsonschema/tests/__pycache__/test_deprecations.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/jsonschema/tests/__pycache__/test_exceptions.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/jsonschema/tests/__pycache__/test_format.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/jsonschema/tests/__pycache__/test_jsonschema_test_suite.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/jsonschema/tests/__pycache__/test_types.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/jsonschema/tests/__pycache__/test_utils.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/jsonschema/tests/__pycache__/test_validators.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/jsonschema/tests/_suite.py create mode 100644 .venv/lib/python3.12/site-packages/jsonschema/tests/fuzz_validate.py create mode 100644 .venv/lib/python3.12/site-packages/jsonschema/tests/test_cli.py create mode 100644 .venv/lib/python3.12/site-packages/jsonschema/tests/test_deprecations.py create mode 100644 .venv/lib/python3.12/site-packages/jsonschema/tests/test_exceptions.py create mode 100644 .venv/lib/python3.12/site-packages/jsonschema/tests/test_format.py create mode 100644 .venv/lib/python3.12/site-packages/jsonschema/tests/test_jsonschema_test_suite.py create mode 100644 .venv/lib/python3.12/site-packages/jsonschema/tests/test_types.py create mode 100644 .venv/lib/python3.12/site-packages/jsonschema/tests/test_utils.py create mode 100644 .venv/lib/python3.12/site-packages/jsonschema/tests/test_validators.py create mode 100644 .venv/lib/python3.12/site-packages/jsonschema/tests/typing/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/jsonschema/tests/typing/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/jsonschema/tests/typing/__pycache__/test_all_concrete_validators_match_protocol.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/jsonschema/tests/typing/test_all_concrete_validators_match_protocol.py create mode 100644 .venv/lib/python3.12/site-packages/jsonschema/validators.py create mode 100644 .venv/lib/python3.12/site-packages/jsonschema_specifications-2025.4.1.dist-info/INSTALLER create mode 100644 .venv/lib/python3.12/site-packages/jsonschema_specifications-2025.4.1.dist-info/METADATA create mode 100644 .venv/lib/python3.12/site-packages/jsonschema_specifications-2025.4.1.dist-info/RECORD create mode 100644 .venv/lib/python3.12/site-packages/jsonschema_specifications-2025.4.1.dist-info/WHEEL create mode 100644 .venv/lib/python3.12/site-packages/jsonschema_specifications-2025.4.1.dist-info/licenses/COPYING create mode 100644 .venv/lib/python3.12/site-packages/jsonschema_specifications/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/jsonschema_specifications/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/jsonschema_specifications/__pycache__/_core.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/jsonschema_specifications/_core.py create mode 100644 .venv/lib/python3.12/site-packages/jsonschema_specifications/schemas/draft201909/metaschema.json create mode 100644 .venv/lib/python3.12/site-packages/jsonschema_specifications/schemas/draft201909/vocabularies/applicator create mode 100644 .venv/lib/python3.12/site-packages/jsonschema_specifications/schemas/draft201909/vocabularies/content create mode 100644 .venv/lib/python3.12/site-packages/jsonschema_specifications/schemas/draft201909/vocabularies/core create mode 100644 .venv/lib/python3.12/site-packages/jsonschema_specifications/schemas/draft201909/vocabularies/meta-data create mode 100644 .venv/lib/python3.12/site-packages/jsonschema_specifications/schemas/draft201909/vocabularies/validation create mode 100644 .venv/lib/python3.12/site-packages/jsonschema_specifications/schemas/draft202012/metaschema.json create mode 100644 .venv/lib/python3.12/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/applicator create mode 100644 .venv/lib/python3.12/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/content create mode 100644 .venv/lib/python3.12/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/core create mode 100644 .venv/lib/python3.12/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/format create mode 100644 .venv/lib/python3.12/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/format-annotation create mode 100644 .venv/lib/python3.12/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/format-assertion create mode 100644 .venv/lib/python3.12/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/meta-data create mode 100644 .venv/lib/python3.12/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/unevaluated create mode 100644 .venv/lib/python3.12/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/validation create mode 100644 .venv/lib/python3.12/site-packages/jsonschema_specifications/schemas/draft3/metaschema.json create mode 100644 .venv/lib/python3.12/site-packages/jsonschema_specifications/schemas/draft4/metaschema.json create mode 100644 .venv/lib/python3.12/site-packages/jsonschema_specifications/schemas/draft6/metaschema.json create mode 100644 .venv/lib/python3.12/site-packages/jsonschema_specifications/schemas/draft7/metaschema.json create mode 100644 .venv/lib/python3.12/site-packages/jsonschema_specifications/tests/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/jsonschema_specifications/tests/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/jsonschema_specifications/tests/__pycache__/test_jsonschema_specifications.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/jsonschema_specifications/tests/test_jsonschema_specifications.py create mode 100644 .venv/lib/python3.12/site-packages/networkx-3.5.dist-info/INSTALLER create mode 100644 .venv/lib/python3.12/site-packages/networkx-3.5.dist-info/METADATA create mode 100644 .venv/lib/python3.12/site-packages/networkx-3.5.dist-info/RECORD create mode 100644 .venv/lib/python3.12/site-packages/networkx-3.5.dist-info/REQUESTED create mode 100644 .venv/lib/python3.12/site-packages/networkx-3.5.dist-info/WHEEL create mode 100644 .venv/lib/python3.12/site-packages/networkx-3.5.dist-info/entry_points.txt create mode 100644 .venv/lib/python3.12/site-packages/networkx-3.5.dist-info/licenses/LICENSE.txt create mode 100644 .venv/lib/python3.12/site-packages/networkx-3.5.dist-info/top_level.txt create mode 100644 .venv/lib/python3.12/site-packages/networkx/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/__pycache__/conftest.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/__pycache__/convert.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/__pycache__/convert_matrix.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/__pycache__/exception.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/__pycache__/lazy_imports.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/__pycache__/relabel.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/asteroidal.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/boundary.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/bridges.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/broadcasting.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/chains.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/chordal.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/clique.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/cluster.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/communicability_alg.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/core.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/covering.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/cuts.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/cycles.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/d_separation.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/dag.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/distance_measures.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/distance_regular.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/dominance.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/dominating.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/efficiency_measures.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/euler.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/graph_hashing.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/graphical.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/hierarchy.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/hybrid.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/isolate.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/link_prediction.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/lowest_common_ancestors.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/matching.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/mis.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/moral.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/node_classification.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/non_randomness.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/planar_drawing.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/planarity.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/polynomials.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/reciprocity.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/regular.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/richclub.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/similarity.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/simple_paths.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/smallworld.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/smetric.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/sparsifiers.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/structuralholes.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/summarization.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/swap.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/threshold.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/time_dependent.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/tournament.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/triads.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/vitality.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/voronoi.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/walks.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/wiener.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/approximation/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/approximation/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/approximation/__pycache__/clique.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/approximation/__pycache__/clustering_coefficient.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/approximation/__pycache__/connectivity.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/approximation/__pycache__/density.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/approximation/__pycache__/distance_measures.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/approximation/__pycache__/dominating_set.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/approximation/__pycache__/kcomponents.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/approximation/__pycache__/matching.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/approximation/__pycache__/maxcut.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/approximation/__pycache__/ramsey.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/approximation/__pycache__/steinertree.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/approximation/__pycache__/traveling_salesman.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/approximation/__pycache__/treewidth.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/approximation/__pycache__/vertex_cover.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/approximation/clique.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/approximation/clustering_coefficient.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/approximation/connectivity.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/approximation/density.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/approximation/distance_measures.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/approximation/dominating_set.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/approximation/kcomponents.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/approximation/matching.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/approximation/maxcut.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/approximation/ramsey.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/approximation/steinertree.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_approx_clust_coeff.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_clique.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_connectivity.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_density.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_distance_measures.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_dominating_set.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_kcomponents.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_matching.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_maxcut.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_ramsey.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_steinertree.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_traveling_salesman.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_treewidth.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_vertex_cover.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/test_approx_clust_coeff.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/test_clique.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/test_connectivity.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/test_density.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/test_distance_measures.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/test_dominating_set.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/test_kcomponents.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/test_matching.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/test_maxcut.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/test_ramsey.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/test_steinertree.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/test_traveling_salesman.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/test_treewidth.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/test_vertex_cover.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/approximation/traveling_salesman.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/approximation/treewidth.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/approximation/vertex_cover.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/__pycache__/connectivity.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/__pycache__/correlation.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/__pycache__/mixing.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/__pycache__/neighbor_degree.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/__pycache__/pairs.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/connectivity.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/correlation.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/mixing.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/neighbor_degree.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/pairs.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/tests/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/tests/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/tests/__pycache__/base_test.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/tests/__pycache__/test_connectivity.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/tests/__pycache__/test_correlation.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/tests/__pycache__/test_mixing.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/tests/__pycache__/test_neighbor_degree.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/tests/__pycache__/test_pairs.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/tests/base_test.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/tests/test_connectivity.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/tests/test_correlation.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/tests/test_mixing.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/tests/test_neighbor_degree.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/tests/test_pairs.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/asteroidal.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/__pycache__/basic.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/__pycache__/centrality.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/__pycache__/cluster.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/__pycache__/covering.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/__pycache__/edgelist.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/__pycache__/extendability.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/__pycache__/generators.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/__pycache__/link_analysis.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/__pycache__/matching.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/__pycache__/matrix.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/__pycache__/projection.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/__pycache__/redundancy.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/__pycache__/spectral.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/basic.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/centrality.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/cluster.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/covering.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/edgelist.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/extendability.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/generators.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/link_analysis.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/matching.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/matrix.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/projection.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/redundancy.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/spectral.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_basic.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_centrality.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_cluster.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_covering.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_edgelist.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_extendability.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_generators.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_link_analysis.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_matching.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_matrix.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_project.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_redundancy.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_spectral_bipartivity.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/test_basic.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/test_centrality.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/test_cluster.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/test_covering.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/test_edgelist.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/test_extendability.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/test_generators.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/test_link_analysis.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/test_matching.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/test_matrix.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/test_project.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/test_redundancy.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/test_spectral_bipartivity.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/boundary.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/bridges.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/broadcasting.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__pycache__/betweenness.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__pycache__/betweenness_subset.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__pycache__/closeness.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__pycache__/current_flow_betweenness.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__pycache__/current_flow_betweenness_subset.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__pycache__/current_flow_closeness.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__pycache__/degree_alg.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__pycache__/dispersion.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__pycache__/eigenvector.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__pycache__/flow_matrix.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__pycache__/group.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__pycache__/harmonic.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__pycache__/katz.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__pycache__/laplacian.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__pycache__/load.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__pycache__/percolation.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__pycache__/reaching.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__pycache__/second_order.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__pycache__/subgraph_alg.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__pycache__/trophic.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__pycache__/voterank_alg.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/centrality/betweenness.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/centrality/betweenness_subset.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/centrality/closeness.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/centrality/current_flow_betweenness.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/centrality/current_flow_betweenness_subset.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/centrality/current_flow_closeness.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/centrality/degree_alg.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/centrality/dispersion.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/centrality/eigenvector.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/centrality/flow_matrix.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/centrality/group.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/centrality/harmonic.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/centrality/katz.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/centrality/laplacian.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/centrality/load.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/centrality/percolation.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/centrality/reaching.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/centrality/second_order.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/centrality/subgraph_alg.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_betweenness_centrality.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_betweenness_centrality_subset.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_closeness_centrality.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_current_flow_betweenness_centrality.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_current_flow_betweenness_centrality_subset.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_current_flow_closeness.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_degree_centrality.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_dispersion.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_eigenvector_centrality.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_group.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_harmonic_centrality.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_katz_centrality.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_laplacian_centrality.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_load_centrality.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_percolation_centrality.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_reaching.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_second_order_centrality.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_subgraph.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_trophic.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_voterank.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_betweenness_centrality.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_betweenness_centrality_subset.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_closeness_centrality.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_current_flow_betweenness_centrality.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_current_flow_betweenness_centrality_subset.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_current_flow_closeness.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_degree_centrality.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_dispersion.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_eigenvector_centrality.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_group.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_harmonic_centrality.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_katz_centrality.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_laplacian_centrality.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_load_centrality.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_percolation_centrality.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_reaching.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_second_order_centrality.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_subgraph.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_trophic.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_voterank.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/centrality/trophic.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/centrality/voterank_alg.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/chains.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/chordal.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/clique.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/cluster.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/coloring/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/coloring/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/coloring/__pycache__/equitable_coloring.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/coloring/__pycache__/greedy_coloring.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/coloring/equitable_coloring.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/coloring/greedy_coloring.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/coloring/tests/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/coloring/tests/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/coloring/tests/__pycache__/test_coloring.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/coloring/tests/test_coloring.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/communicability_alg.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/community/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/community/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/community/__pycache__/asyn_fluid.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/community/__pycache__/centrality.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/community/__pycache__/community_utils.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/community/__pycache__/divisive.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/community/__pycache__/kclique.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/community/__pycache__/kernighan_lin.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/community/__pycache__/label_propagation.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/community/__pycache__/leiden.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/community/__pycache__/local.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/community/__pycache__/louvain.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/community/__pycache__/lukes.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/community/__pycache__/modularity_max.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/community/__pycache__/quality.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/community/asyn_fluid.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/community/centrality.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/community/community_utils.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/community/divisive.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/community/kclique.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/community/kernighan_lin.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/community/label_propagation.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/community/leiden.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/community/local.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/community/louvain.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/community/lukes.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/community/modularity_max.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/community/quality.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/__pycache__/test_asyn_fluid.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/__pycache__/test_centrality.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/__pycache__/test_divisive.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/__pycache__/test_kclique.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/__pycache__/test_kernighan_lin.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/__pycache__/test_label_propagation.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/__pycache__/test_leiden.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/__pycache__/test_local.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/__pycache__/test_louvain.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/__pycache__/test_lukes.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/__pycache__/test_modularity_max.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/__pycache__/test_quality.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/__pycache__/test_utils.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/test_asyn_fluid.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/test_centrality.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/test_divisive.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/test_kclique.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/test_kernighan_lin.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/test_label_propagation.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/test_leiden.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/test_local.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/test_louvain.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/test_lukes.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/test_modularity_max.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/test_quality.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/test_utils.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/components/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/components/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/components/__pycache__/attracting.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/components/__pycache__/biconnected.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/components/__pycache__/connected.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/components/__pycache__/semiconnected.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/components/__pycache__/strongly_connected.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/components/__pycache__/weakly_connected.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/components/attracting.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/components/biconnected.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/components/connected.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/components/semiconnected.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/components/strongly_connected.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/components/tests/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/components/tests/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/components/tests/__pycache__/test_attracting.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/components/tests/__pycache__/test_biconnected.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/components/tests/__pycache__/test_connected.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/components/tests/__pycache__/test_semiconnected.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/components/tests/__pycache__/test_strongly_connected.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/components/tests/__pycache__/test_weakly_connected.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/components/tests/test_attracting.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/components/tests/test_biconnected.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/components/tests/test_connected.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/components/tests/test_semiconnected.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/components/tests/test_strongly_connected.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/components/tests/test_weakly_connected.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/components/weakly_connected.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/__pycache__/connectivity.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/__pycache__/cuts.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/__pycache__/disjoint_paths.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/__pycache__/edge_augmentation.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/__pycache__/edge_kcomponents.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/__pycache__/kcomponents.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/__pycache__/kcutsets.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/__pycache__/stoerwagner.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/__pycache__/utils.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/connectivity.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/cuts.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/disjoint_paths.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/edge_augmentation.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/edge_kcomponents.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/kcomponents.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/kcutsets.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/stoerwagner.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/tests/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/tests/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/tests/__pycache__/test_connectivity.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/tests/__pycache__/test_cuts.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/tests/__pycache__/test_disjoint_paths.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/tests/__pycache__/test_edge_augmentation.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/tests/__pycache__/test_edge_kcomponents.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/tests/__pycache__/test_kcomponents.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/tests/__pycache__/test_kcutsets.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/tests/__pycache__/test_stoer_wagner.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/tests/test_connectivity.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/tests/test_cuts.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/tests/test_disjoint_paths.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/tests/test_edge_augmentation.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/tests/test_edge_kcomponents.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/tests/test_kcomponents.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/tests/test_kcutsets.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/tests/test_stoer_wagner.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/utils.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/core.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/covering.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/cuts.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/cycles.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/d_separation.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/dag.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/distance_measures.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/distance_regular.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/dominance.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/dominating.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/efficiency_measures.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/euler.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/flow/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/flow/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/flow/__pycache__/boykovkolmogorov.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/flow/__pycache__/capacityscaling.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/flow/__pycache__/dinitz_alg.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/flow/__pycache__/edmondskarp.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/flow/__pycache__/gomory_hu.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/flow/__pycache__/maxflow.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/flow/__pycache__/mincost.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/flow/__pycache__/networksimplex.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/flow/__pycache__/preflowpush.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/flow/__pycache__/shortestaugmentingpath.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/flow/__pycache__/utils.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/flow/boykovkolmogorov.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/flow/capacityscaling.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/flow/dinitz_alg.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/flow/edmondskarp.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/flow/gomory_hu.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/flow/maxflow.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/flow/mincost.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/flow/networksimplex.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/flow/preflowpush.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/flow/shortestaugmentingpath.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/flow/tests/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/flow/tests/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/flow/tests/__pycache__/test_gomory_hu.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/flow/tests/__pycache__/test_maxflow.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/flow/tests/__pycache__/test_maxflow_large_graph.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/flow/tests/__pycache__/test_mincost.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/flow/tests/__pycache__/test_networksimplex.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/flow/tests/gl1.gpickle.bz2 create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/flow/tests/gw1.gpickle.bz2 create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/flow/tests/netgen-2.gpickle.bz2 create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/flow/tests/test_gomory_hu.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/flow/tests/test_maxflow.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/flow/tests/test_maxflow_large_graph.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/flow/tests/test_mincost.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/flow/tests/test_networksimplex.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/flow/tests/wlm3.gpickle.bz2 create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/flow/utils.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/graph_hashing.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/graphical.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/hierarchy.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/hybrid.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/isolate.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/__pycache__/ismags.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/__pycache__/isomorph.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/__pycache__/isomorphvf2.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/__pycache__/matchhelpers.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/__pycache__/temporalisomorphvf2.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/__pycache__/tree_isomorphism.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/__pycache__/vf2pp.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/__pycache__/vf2userfunc.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/ismags.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/isomorph.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/isomorphvf2.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/matchhelpers.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/temporalisomorphvf2.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/tests/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/tests/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/tests/__pycache__/test_ismags.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/tests/__pycache__/test_isomorphism.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/tests/__pycache__/test_isomorphvf2.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/tests/__pycache__/test_match_helpers.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/tests/__pycache__/test_temporalisomorphvf2.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/tests/__pycache__/test_tree_isomorphism.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/tests/__pycache__/test_vf2pp.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/tests/__pycache__/test_vf2pp_helpers.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/tests/__pycache__/test_vf2userfunc.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/tests/iso_r01_s80.A99 create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/tests/iso_r01_s80.B99 create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/tests/si2_b06_m200.A99 create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/tests/si2_b06_m200.B99 create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/tests/test_ismags.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/tests/test_isomorphism.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/tests/test_isomorphvf2.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/tests/test_match_helpers.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/tests/test_temporalisomorphvf2.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/tests/test_tree_isomorphism.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/tests/test_vf2pp.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/tests/test_vf2pp_helpers.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/tests/test_vf2userfunc.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/tree_isomorphism.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/vf2pp.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/vf2userfunc.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/link_analysis/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/link_analysis/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/link_analysis/__pycache__/hits_alg.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/link_analysis/__pycache__/pagerank_alg.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/link_analysis/hits_alg.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/link_analysis/pagerank_alg.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/link_analysis/tests/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/link_analysis/tests/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/link_analysis/tests/__pycache__/test_hits.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/link_analysis/tests/__pycache__/test_pagerank.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/link_analysis/tests/test_hits.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/link_analysis/tests/test_pagerank.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/link_prediction.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/lowest_common_ancestors.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/matching.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/minors/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/minors/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/minors/__pycache__/contraction.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/minors/contraction.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/minors/tests/__pycache__/test_contraction.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/minors/tests/test_contraction.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/mis.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/moral.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/node_classification.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/non_randomness.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/operators/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/operators/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/operators/__pycache__/all.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/operators/__pycache__/binary.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/operators/__pycache__/product.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/operators/__pycache__/unary.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/operators/all.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/operators/binary.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/operators/product.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/operators/tests/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/operators/tests/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/operators/tests/__pycache__/test_all.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/operators/tests/__pycache__/test_binary.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/operators/tests/__pycache__/test_product.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/operators/tests/__pycache__/test_unary.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/operators/tests/test_all.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/operators/tests/test_binary.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/operators/tests/test_product.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/operators/tests/test_unary.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/operators/unary.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/planar_drawing.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/planarity.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/polynomials.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/reciprocity.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/regular.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/richclub.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/__pycache__/astar.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/__pycache__/dense.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/__pycache__/generic.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/__pycache__/unweighted.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/__pycache__/weighted.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/astar.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/dense.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/generic.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/tests/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/tests/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/tests/__pycache__/test_astar.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/tests/__pycache__/test_dense.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/tests/__pycache__/test_dense_numpy.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/tests/__pycache__/test_generic.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/tests/__pycache__/test_unweighted.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/tests/__pycache__/test_weighted.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/tests/test_astar.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/tests/test_dense.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/tests/test_dense_numpy.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/tests/test_generic.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/tests/test_unweighted.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/tests/test_weighted.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/unweighted.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/weighted.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/similarity.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/simple_paths.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/smallworld.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/smetric.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/sparsifiers.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/structuralholes.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/summarization.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/swap.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_asteroidal.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_boundary.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_bridges.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_broadcasting.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_chains.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_chordal.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_clique.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_cluster.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_communicability.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_core.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_covering.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_cuts.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_cycles.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_d_separation.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_dag.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_distance_measures.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_distance_regular.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_dominance.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_dominating.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_efficiency.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_euler.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_graph_hashing.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_graphical.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_hierarchy.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_hybrid.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_isolate.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_link_prediction.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_lowest_common_ancestors.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_matching.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_max_weight_clique.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_mis.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_moral.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_node_classification.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_non_randomness.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_planar_drawing.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_planarity.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_polynomials.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_reciprocity.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_regular.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_richclub.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_similarity.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_simple_paths.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_smallworld.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_smetric.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_sparsifiers.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_structuralholes.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_summarization.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_swap.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_threshold.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_time_dependent.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_tournament.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_triads.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_vitality.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_voronoi.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_walks.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_wiener.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_asteroidal.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_boundary.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_bridges.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_broadcasting.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_chains.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_chordal.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_clique.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_cluster.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_communicability.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_core.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_covering.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_cuts.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_cycles.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_d_separation.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_dag.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_distance_measures.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_distance_regular.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_dominance.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_dominating.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_efficiency.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_euler.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_graph_hashing.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_graphical.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_hierarchy.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_hybrid.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_isolate.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_link_prediction.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_lowest_common_ancestors.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_matching.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_max_weight_clique.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_mis.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_moral.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_node_classification.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_non_randomness.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_planar_drawing.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_planarity.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_polynomials.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_reciprocity.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_regular.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_richclub.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_similarity.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_simple_paths.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_smallworld.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_smetric.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_sparsifiers.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_structuralholes.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_summarization.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_swap.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_threshold.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_time_dependent.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_tournament.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_triads.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_vitality.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_voronoi.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_walks.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_wiener.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/threshold.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/time_dependent.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tournament.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/traversal/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/traversal/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/traversal/__pycache__/beamsearch.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/traversal/__pycache__/breadth_first_search.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/traversal/__pycache__/depth_first_search.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/traversal/__pycache__/edgebfs.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/traversal/__pycache__/edgedfs.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/traversal/beamsearch.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/traversal/breadth_first_search.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/traversal/depth_first_search.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/traversal/edgebfs.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/traversal/edgedfs.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/traversal/tests/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/traversal/tests/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/traversal/tests/__pycache__/test_beamsearch.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/traversal/tests/__pycache__/test_bfs.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/traversal/tests/__pycache__/test_dfs.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/traversal/tests/__pycache__/test_edgebfs.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/traversal/tests/__pycache__/test_edgedfs.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/traversal/tests/test_beamsearch.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/traversal/tests/test_bfs.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/traversal/tests/test_dfs.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/traversal/tests/test_edgebfs.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/traversal/tests/test_edgedfs.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tree/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tree/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tree/__pycache__/branchings.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tree/__pycache__/coding.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tree/__pycache__/decomposition.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tree/__pycache__/mst.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tree/__pycache__/operations.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tree/__pycache__/recognition.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tree/branchings.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tree/coding.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tree/decomposition.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tree/mst.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tree/operations.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tree/recognition.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tree/tests/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tree/tests/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tree/tests/__pycache__/test_branchings.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tree/tests/__pycache__/test_coding.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tree/tests/__pycache__/test_decomposition.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tree/tests/__pycache__/test_mst.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tree/tests/__pycache__/test_operations.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tree/tests/__pycache__/test_recognition.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tree/tests/test_branchings.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tree/tests/test_coding.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tree/tests/test_decomposition.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tree/tests/test_mst.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tree/tests/test_operations.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/tree/tests/test_recognition.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/triads.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/vitality.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/voronoi.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/walks.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/algorithms/wiener.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/classes/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/classes/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/classes/__pycache__/coreviews.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/classes/__pycache__/digraph.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/classes/__pycache__/filters.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/classes/__pycache__/function.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/classes/__pycache__/graph.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/classes/__pycache__/graphviews.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/classes/__pycache__/multidigraph.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/classes/__pycache__/multigraph.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/classes/__pycache__/reportviews.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/classes/coreviews.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/classes/digraph.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/classes/filters.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/classes/function.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/classes/graph.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/classes/graphviews.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/classes/multidigraph.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/classes/multigraph.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/classes/reportviews.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/classes/tests/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/classes/tests/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/classes/tests/__pycache__/dispatch_interface.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/classes/tests/__pycache__/historical_tests.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/classes/tests/__pycache__/test_coreviews.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/classes/tests/__pycache__/test_digraph.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/classes/tests/__pycache__/test_digraph_historical.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/classes/tests/__pycache__/test_filters.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/classes/tests/__pycache__/test_function.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/classes/tests/__pycache__/test_graph.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/classes/tests/__pycache__/test_graph_historical.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/classes/tests/__pycache__/test_graphviews.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/classes/tests/__pycache__/test_multidigraph.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/classes/tests/__pycache__/test_multigraph.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/classes/tests/__pycache__/test_reportviews.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/classes/tests/__pycache__/test_special.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/classes/tests/__pycache__/test_subgraphviews.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/classes/tests/dispatch_interface.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/classes/tests/historical_tests.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/classes/tests/test_coreviews.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/classes/tests/test_digraph.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/classes/tests/test_digraph_historical.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/classes/tests/test_filters.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/classes/tests/test_function.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/classes/tests/test_graph.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/classes/tests/test_graph_historical.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/classes/tests/test_graphviews.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/classes/tests/test_multidigraph.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/classes/tests/test_multigraph.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/classes/tests/test_reportviews.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/classes/tests/test_special.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/classes/tests/test_subgraphviews.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/conftest.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/convert.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/convert_matrix.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/drawing/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/drawing/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/drawing/__pycache__/layout.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/drawing/__pycache__/nx_agraph.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/drawing/__pycache__/nx_latex.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/drawing/__pycache__/nx_pydot.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/drawing/__pycache__/nx_pylab.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/drawing/layout.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/drawing/nx_agraph.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/drawing/nx_latex.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/drawing/nx_pydot.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/drawing/nx_pylab.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/drawing/tests/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/drawing/tests/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/drawing/tests/__pycache__/test_agraph.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/drawing/tests/__pycache__/test_latex.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/drawing/tests/__pycache__/test_layout.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/drawing/tests/__pycache__/test_pydot.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/drawing/tests/__pycache__/test_pylab.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/drawing/tests/baseline/test_display_complex.png create mode 100644 .venv/lib/python3.12/site-packages/networkx/drawing/tests/baseline/test_display_empty_graph.png create mode 100644 .venv/lib/python3.12/site-packages/networkx/drawing/tests/baseline/test_display_house_with_colors.png create mode 100644 .venv/lib/python3.12/site-packages/networkx/drawing/tests/baseline/test_display_labels_and_colors.png create mode 100644 .venv/lib/python3.12/site-packages/networkx/drawing/tests/baseline/test_display_shortest_path.png create mode 100644 .venv/lib/python3.12/site-packages/networkx/drawing/tests/baseline/test_house_with_colors.png create mode 100644 .venv/lib/python3.12/site-packages/networkx/drawing/tests/test_agraph.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/drawing/tests/test_latex.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/drawing/tests/test_layout.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/drawing/tests/test_pydot.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/drawing/tests/test_pylab.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/exception.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/__pycache__/atlas.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/__pycache__/classic.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/__pycache__/cographs.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/__pycache__/community.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/__pycache__/degree_seq.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/__pycache__/directed.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/__pycache__/duplication.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/__pycache__/ego.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/__pycache__/expanders.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/__pycache__/geometric.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/__pycache__/harary_graph.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/__pycache__/internet_as_graphs.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/__pycache__/intersection.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/__pycache__/interval_graph.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/__pycache__/joint_degree_seq.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/__pycache__/lattice.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/__pycache__/line.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/__pycache__/mycielski.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/__pycache__/nonisomorphic_trees.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/__pycache__/random_clustered.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/__pycache__/random_graphs.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/__pycache__/small.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/__pycache__/social.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/__pycache__/spectral_graph_forge.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/__pycache__/stochastic.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/__pycache__/sudoku.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/__pycache__/time_series.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/__pycache__/trees.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/__pycache__/triads.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/atlas.dat.gz create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/atlas.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/classic.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/cographs.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/community.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/degree_seq.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/directed.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/duplication.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/ego.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/expanders.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/geometric.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/harary_graph.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/internet_as_graphs.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/intersection.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/interval_graph.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/joint_degree_seq.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/lattice.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/line.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/mycielski.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/nonisomorphic_trees.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/random_clustered.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/random_graphs.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/small.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/social.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/spectral_graph_forge.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/stochastic.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/sudoku.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/tests/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_atlas.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_classic.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_cographs.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_community.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_degree_seq.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_directed.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_duplication.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_ego.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_expanders.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_geometric.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_harary_graph.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_internet_as_graphs.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_intersection.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_interval_graph.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_joint_degree_seq.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_lattice.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_line.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_mycielski.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_nonisomorphic_trees.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_random_clustered.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_random_graphs.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_small.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_spectral_graph_forge.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_stochastic.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_sudoku.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_time_series.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_trees.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_triads.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/tests/test_atlas.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/tests/test_classic.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/tests/test_cographs.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/tests/test_community.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/tests/test_degree_seq.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/tests/test_directed.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/tests/test_duplication.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/tests/test_ego.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/tests/test_expanders.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/tests/test_geometric.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/tests/test_harary_graph.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/tests/test_internet_as_graphs.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/tests/test_intersection.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/tests/test_interval_graph.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/tests/test_joint_degree_seq.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/tests/test_lattice.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/tests/test_line.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/tests/test_mycielski.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/tests/test_nonisomorphic_trees.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/tests/test_random_clustered.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/tests/test_random_graphs.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/tests/test_small.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/tests/test_spectral_graph_forge.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/tests/test_stochastic.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/tests/test_sudoku.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/tests/test_time_series.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/tests/test_trees.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/tests/test_triads.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/time_series.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/trees.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/generators/triads.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/lazy_imports.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/linalg/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/linalg/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/linalg/__pycache__/algebraicconnectivity.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/linalg/__pycache__/attrmatrix.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/linalg/__pycache__/bethehessianmatrix.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/linalg/__pycache__/graphmatrix.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/linalg/__pycache__/laplacianmatrix.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/linalg/__pycache__/modularitymatrix.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/linalg/__pycache__/spectrum.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/linalg/algebraicconnectivity.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/linalg/attrmatrix.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/linalg/bethehessianmatrix.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/linalg/graphmatrix.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/linalg/laplacianmatrix.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/linalg/modularitymatrix.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/linalg/spectrum.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/linalg/tests/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/linalg/tests/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/linalg/tests/__pycache__/test_algebraic_connectivity.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/linalg/tests/__pycache__/test_attrmatrix.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/linalg/tests/__pycache__/test_bethehessian.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/linalg/tests/__pycache__/test_graphmatrix.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/linalg/tests/__pycache__/test_laplacian.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/linalg/tests/__pycache__/test_modularity.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/linalg/tests/__pycache__/test_spectrum.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/linalg/tests/test_algebraic_connectivity.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/linalg/tests/test_attrmatrix.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/linalg/tests/test_bethehessian.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/linalg/tests/test_graphmatrix.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/linalg/tests/test_laplacian.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/linalg/tests/test_modularity.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/linalg/tests/test_spectrum.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/readwrite/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/readwrite/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/readwrite/__pycache__/adjlist.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/readwrite/__pycache__/edgelist.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/readwrite/__pycache__/gexf.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/readwrite/__pycache__/gml.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/readwrite/__pycache__/graph6.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/readwrite/__pycache__/graphml.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/readwrite/__pycache__/leda.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/readwrite/__pycache__/multiline_adjlist.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/readwrite/__pycache__/p2g.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/readwrite/__pycache__/pajek.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/readwrite/__pycache__/sparse6.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/readwrite/__pycache__/text.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/readwrite/adjlist.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/readwrite/edgelist.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/readwrite/gexf.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/readwrite/gml.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/readwrite/graph6.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/readwrite/graphml.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/readwrite/json_graph/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/readwrite/json_graph/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/readwrite/json_graph/__pycache__/adjacency.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/readwrite/json_graph/__pycache__/cytoscape.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/readwrite/json_graph/__pycache__/node_link.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/readwrite/json_graph/__pycache__/tree.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/readwrite/json_graph/adjacency.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/readwrite/json_graph/cytoscape.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/readwrite/json_graph/node_link.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/readwrite/json_graph/tests/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/readwrite/json_graph/tests/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/readwrite/json_graph/tests/__pycache__/test_adjacency.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/readwrite/json_graph/tests/__pycache__/test_cytoscape.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/readwrite/json_graph/tests/__pycache__/test_node_link.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/readwrite/json_graph/tests/__pycache__/test_tree.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/readwrite/json_graph/tests/test_adjacency.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/readwrite/json_graph/tests/test_cytoscape.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/readwrite/json_graph/tests/test_node_link.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/readwrite/json_graph/tests/test_tree.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/readwrite/json_graph/tree.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/readwrite/leda.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/readwrite/multiline_adjlist.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/readwrite/p2g.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/readwrite/pajek.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/readwrite/sparse6.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/readwrite/tests/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/readwrite/tests/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/readwrite/tests/__pycache__/test_adjlist.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/readwrite/tests/__pycache__/test_edgelist.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/readwrite/tests/__pycache__/test_gexf.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/readwrite/tests/__pycache__/test_gml.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/readwrite/tests/__pycache__/test_graph6.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/readwrite/tests/__pycache__/test_graphml.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/readwrite/tests/__pycache__/test_leda.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/readwrite/tests/__pycache__/test_p2g.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/readwrite/tests/__pycache__/test_pajek.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/readwrite/tests/__pycache__/test_sparse6.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/readwrite/tests/__pycache__/test_text.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/readwrite/tests/test_adjlist.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/readwrite/tests/test_edgelist.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/readwrite/tests/test_gexf.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/readwrite/tests/test_gml.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/readwrite/tests/test_graph6.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/readwrite/tests/test_graphml.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/readwrite/tests/test_leda.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/readwrite/tests/test_p2g.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/readwrite/tests/test_pajek.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/readwrite/tests/test_sparse6.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/readwrite/tests/test_text.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/readwrite/text.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/relabel.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/tests/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/tests/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/tests/__pycache__/test_all_random_functions.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/tests/__pycache__/test_convert.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/tests/__pycache__/test_convert_numpy.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/tests/__pycache__/test_convert_pandas.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/tests/__pycache__/test_convert_scipy.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/tests/__pycache__/test_exceptions.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/tests/__pycache__/test_import.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/tests/__pycache__/test_lazy_imports.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/tests/__pycache__/test_relabel.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/tests/test_all_random_functions.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/tests/test_convert.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/tests/test_convert_numpy.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/tests/test_convert_pandas.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/tests/test_convert_scipy.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/tests/test_exceptions.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/tests/test_import.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/tests/test_lazy_imports.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/tests/test_relabel.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/utils/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/utils/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/utils/__pycache__/backends.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/utils/__pycache__/configs.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/utils/__pycache__/decorators.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/utils/__pycache__/heaps.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/utils/__pycache__/mapped_queue.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/utils/__pycache__/misc.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/utils/__pycache__/random_sequence.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/utils/__pycache__/rcm.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/utils/__pycache__/union_find.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/utils/backends.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/utils/configs.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/utils/decorators.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/utils/heaps.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/utils/mapped_queue.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/utils/misc.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/utils/random_sequence.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/utils/rcm.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/utils/tests/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/utils/tests/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/utils/tests/__pycache__/test__init.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/utils/tests/__pycache__/test_backends.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/utils/tests/__pycache__/test_config.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/utils/tests/__pycache__/test_decorators.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/utils/tests/__pycache__/test_heaps.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/utils/tests/__pycache__/test_mapped_queue.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/utils/tests/__pycache__/test_misc.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/utils/tests/__pycache__/test_random_sequence.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/utils/tests/__pycache__/test_rcm.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/utils/tests/__pycache__/test_unionfind.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/networkx/utils/tests/test__init.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/utils/tests/test_backends.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/utils/tests/test_config.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/utils/tests/test_decorators.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/utils/tests/test_heaps.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/utils/tests/test_mapped_queue.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/utils/tests/test_misc.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/utils/tests/test_random_sequence.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/utils/tests/test_rcm.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/utils/tests/test_unionfind.py create mode 100644 .venv/lib/python3.12/site-packages/networkx/utils/union_find.py create mode 100644 .venv/lib/python3.12/site-packages/numpy-2.3.2.dist-info/INSTALLER create mode 100644 .venv/lib/python3.12/site-packages/numpy-2.3.2.dist-info/LICENSE.txt create mode 100644 .venv/lib/python3.12/site-packages/numpy-2.3.2.dist-info/METADATA create mode 100644 .venv/lib/python3.12/site-packages/numpy-2.3.2.dist-info/RECORD create mode 100644 .venv/lib/python3.12/site-packages/numpy-2.3.2.dist-info/REQUESTED create mode 100644 .venv/lib/python3.12/site-packages/numpy-2.3.2.dist-info/WHEEL create mode 100644 .venv/lib/python3.12/site-packages/numpy-2.3.2.dist-info/entry_points.txt create mode 100755 .venv/lib/python3.12/site-packages/numpy.libs/libgfortran-040039e1-0352e75f.so.5.0.0 create mode 100755 .venv/lib/python3.12/site-packages/numpy.libs/libquadmath-96973f99-934c22de.so.0.0.0 create mode 100755 .venv/lib/python3.12/site-packages/numpy.libs/libscipy_openblas64_-8fb3d286.so create mode 100644 .venv/lib/python3.12/site-packages/numpy/__config__.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/__config__.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/__init__.cython-30.pxd create mode 100644 .venv/lib/python3.12/site-packages/numpy/__init__.pxd create mode 100644 .venv/lib/python3.12/site-packages/numpy/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/__init__.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/__pycache__/__config__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/__pycache__/_array_api_info.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/__pycache__/_configtool.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/__pycache__/_distributor_init.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/__pycache__/_expired_attrs_2_0.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/__pycache__/_globals.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/__pycache__/_pytesttester.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/__pycache__/conftest.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/__pycache__/dtypes.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/__pycache__/exceptions.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/__pycache__/matlib.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/__pycache__/version.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_array_api_info.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_array_api_info.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/_configtool.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_configtool.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/__init__.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/__pycache__/_add_newdocs.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/__pycache__/_add_newdocs_scalars.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/__pycache__/_asarray.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/__pycache__/_dtype.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/__pycache__/_dtype_ctypes.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/__pycache__/_exceptions.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/__pycache__/_internal.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/__pycache__/_machar.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/__pycache__/_methods.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/__pycache__/_string_helpers.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/__pycache__/_type_aliases.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/__pycache__/_ufunc_config.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/__pycache__/arrayprint.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/__pycache__/cversions.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/__pycache__/defchararray.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/__pycache__/einsumfunc.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/__pycache__/fromnumeric.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/__pycache__/function_base.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/__pycache__/getlimits.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/__pycache__/memmap.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/__pycache__/multiarray.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/__pycache__/numeric.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/__pycache__/numerictypes.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/__pycache__/overrides.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/__pycache__/printoptions.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/__pycache__/records.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/__pycache__/shape_base.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/__pycache__/strings.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/__pycache__/umath.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/_add_newdocs.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/_add_newdocs.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/_add_newdocs_scalars.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/_add_newdocs_scalars.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/_asarray.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/_asarray.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/_dtype.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/_dtype.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/_dtype_ctypes.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/_dtype_ctypes.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/_exceptions.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/_exceptions.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/_internal.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/_internal.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/_machar.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/_machar.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/_methods.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/_methods.pyi create mode 100755 .venv/lib/python3.12/site-packages/numpy/_core/_multiarray_tests.cpython-312-x86_64-linux-gnu.so create mode 100755 .venv/lib/python3.12/site-packages/numpy/_core/_multiarray_umath.cpython-312-x86_64-linux-gnu.so create mode 100755 .venv/lib/python3.12/site-packages/numpy/_core/_operand_flag_tests.cpython-312-x86_64-linux-gnu.so create mode 100755 .venv/lib/python3.12/site-packages/numpy/_core/_rational_tests.cpython-312-x86_64-linux-gnu.so create mode 100755 .venv/lib/python3.12/site-packages/numpy/_core/_simd.cpython-312-x86_64-linux-gnu.so create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/_simd.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/_string_helpers.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/_string_helpers.pyi create mode 100755 .venv/lib/python3.12/site-packages/numpy/_core/_struct_ufunc_tests.cpython-312-x86_64-linux-gnu.so create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/_type_aliases.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/_type_aliases.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/_ufunc_config.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/_ufunc_config.pyi create mode 100755 .venv/lib/python3.12/site-packages/numpy/_core/_umath_tests.cpython-312-x86_64-linux-gnu.so create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/arrayprint.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/arrayprint.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/cversions.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/defchararray.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/defchararray.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/einsumfunc.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/einsumfunc.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/fromnumeric.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/fromnumeric.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/function_base.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/function_base.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/getlimits.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/getlimits.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/include/numpy/__multiarray_api.c create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/include/numpy/__multiarray_api.h create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/include/numpy/__ufunc_api.c create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/include/numpy/__ufunc_api.h create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/include/numpy/_neighborhood_iterator_imp.h create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/include/numpy/_numpyconfig.h create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/include/numpy/_public_dtype_api_table.h create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/include/numpy/arrayobject.h create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/include/numpy/arrayscalars.h create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/include/numpy/dtype_api.h create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/include/numpy/halffloat.h create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/include/numpy/ndarrayobject.h create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/include/numpy/ndarraytypes.h create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/include/numpy/npy_2_compat.h create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/include/numpy/npy_2_complexcompat.h create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/include/numpy/npy_3kcompat.h create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/include/numpy/npy_common.h create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/include/numpy/npy_cpu.h create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/include/numpy/npy_endian.h create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/include/numpy/npy_math.h create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/include/numpy/npy_no_deprecated_api.h create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/include/numpy/npy_os.h create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/include/numpy/numpyconfig.h create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/include/numpy/random/LICENSE.txt create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/include/numpy/random/bitgen.h create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/include/numpy/random/distributions.h create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/include/numpy/random/libdivide.h create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/include/numpy/ufuncobject.h create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/include/numpy/utils.h create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/lib/libnpymath.a create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/lib/npy-pkg-config/mlib.ini create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/lib/npy-pkg-config/npymath.ini create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/lib/pkgconfig/numpy.pc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/memmap.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/memmap.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/multiarray.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/multiarray.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/numeric.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/numeric.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/numerictypes.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/numerictypes.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/overrides.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/overrides.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/printoptions.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/printoptions.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/records.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/records.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/shape_base.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/shape_base.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/strings.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/strings.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/_locales.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/_natype.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test__exceptions.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_abc.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_api.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_argparse.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_array_api_info.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_array_coercion.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_array_interface.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_arraymethod.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_arrayobject.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_arrayprint.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_casting_floatingpoint_errors.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_casting_unittests.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_conversion_utils.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_cpu_dispatcher.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_cpu_features.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_custom_dtypes.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_cython.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_datetime.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_defchararray.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_deprecations.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_dlpack.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_dtype.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_einsum.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_errstate.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_extint128.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_function_base.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_getlimits.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_half.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_hashtable.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_indexerrors.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_indexing.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_item_selection.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_limited_api.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_longdouble.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_machar.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_mem_overlap.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_mem_policy.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_memmap.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_multiarray.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_multithreading.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_nditer.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_nep50_promotions.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_numeric.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_numerictypes.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_overrides.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_print.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_protocols.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_records.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_regression.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_scalar_ctors.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_scalar_methods.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_scalarbuffer.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_scalarinherit.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_scalarmath.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_scalarprint.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_shape_base.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_simd.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_simd_module.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_stringdtype.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_strings.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_ufunc.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_umath.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_umath_accuracy.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_umath_complex.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_unicode.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/_locales.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/_natype.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/data/astype_copy.pkl create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/data/generate_umath_validation_data.cpp create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/data/recarray_from_file.fits create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/data/umath-validation-set-README.txt create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/data/umath-validation-set-arccos.csv create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/data/umath-validation-set-arccosh.csv create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/data/umath-validation-set-arcsin.csv create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/data/umath-validation-set-arcsinh.csv create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/data/umath-validation-set-arctan.csv create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/data/umath-validation-set-arctanh.csv create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/data/umath-validation-set-cbrt.csv create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/data/umath-validation-set-cos.csv create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/data/umath-validation-set-cosh.csv create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/data/umath-validation-set-exp.csv create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/data/umath-validation-set-exp2.csv create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/data/umath-validation-set-expm1.csv create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/data/umath-validation-set-log.csv create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/data/umath-validation-set-log10.csv create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/data/umath-validation-set-log1p.csv create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/data/umath-validation-set-log2.csv create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/data/umath-validation-set-sin.csv create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/data/umath-validation-set-sinh.csv create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/data/umath-validation-set-tan.csv create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/data/umath-validation-set-tanh.csv create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/examples/cython/__pycache__/setup.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/examples/cython/checks.pyx create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/examples/cython/meson.build create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/examples/cython/setup.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/examples/limited_api/__pycache__/setup.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/examples/limited_api/limited_api1.c create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/examples/limited_api/limited_api2.pyx create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/examples/limited_api/limited_api_latest.c create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/examples/limited_api/meson.build create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/examples/limited_api/setup.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/test__exceptions.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/test_abc.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/test_api.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/test_argparse.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/test_array_api_info.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/test_array_coercion.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/test_array_interface.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/test_arraymethod.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/test_arrayobject.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/test_arrayprint.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/test_casting_floatingpoint_errors.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/test_casting_unittests.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/test_conversion_utils.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/test_cpu_dispatcher.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/test_cpu_features.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/test_custom_dtypes.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/test_cython.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/test_datetime.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/test_defchararray.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/test_deprecations.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/test_dlpack.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/test_dtype.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/test_einsum.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/test_errstate.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/test_extint128.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/test_function_base.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/test_getlimits.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/test_half.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/test_hashtable.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/test_indexerrors.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/test_indexing.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/test_item_selection.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/test_limited_api.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/test_longdouble.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/test_machar.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/test_mem_overlap.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/test_mem_policy.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/test_memmap.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/test_multiarray.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/test_multithreading.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/test_nditer.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/test_nep50_promotions.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/test_numeric.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/test_numerictypes.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/test_overrides.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/test_print.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/test_protocols.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/test_records.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/test_regression.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/test_scalar_ctors.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/test_scalar_methods.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/test_scalarbuffer.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/test_scalarinherit.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/test_scalarmath.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/test_scalarprint.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/test_shape_base.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/test_simd.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/test_simd_module.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/test_stringdtype.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/test_strings.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/test_ufunc.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/test_umath.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/test_umath_accuracy.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/test_umath_complex.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/tests/test_unicode.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/umath.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_core/umath.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/_distributor_init.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_distributor_init.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/_expired_attrs_2_0.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_expired_attrs_2_0.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/_globals.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_globals.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/_pyinstaller/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_pyinstaller/__init__.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/_pyinstaller/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_pyinstaller/__pycache__/hook-numpy.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_pyinstaller/hook-numpy.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_pyinstaller/hook-numpy.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/_pyinstaller/tests/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_pyinstaller/tests/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_pyinstaller/tests/__pycache__/pyinstaller-smoke.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_pyinstaller/tests/__pycache__/test_pyinstaller.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_pyinstaller/tests/pyinstaller-smoke.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_pyinstaller/tests/test_pyinstaller.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_pytesttester.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_pytesttester.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/_typing/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_typing/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_typing/__pycache__/_add_docstring.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_typing/__pycache__/_array_like.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_typing/__pycache__/_char_codes.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_typing/__pycache__/_dtype_like.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_typing/__pycache__/_extended_precision.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_typing/__pycache__/_nbit.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_typing/__pycache__/_nbit_base.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_typing/__pycache__/_nested_sequence.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_typing/__pycache__/_scalars.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_typing/__pycache__/_shape.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_typing/__pycache__/_ufunc.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_typing/_add_docstring.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_typing/_array_like.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_typing/_callable.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/_typing/_char_codes.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_typing/_dtype_like.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_typing/_extended_precision.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_typing/_nbit.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_typing/_nbit_base.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_typing/_nbit_base.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/_typing/_nested_sequence.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_typing/_scalars.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_typing/_shape.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_typing/_ufunc.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_typing/_ufunc.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/_utils/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_utils/__init__.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/_utils/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_utils/__pycache__/_convertions.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_utils/__pycache__/_inspect.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_utils/__pycache__/_pep440.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/_utils/_convertions.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_utils/_convertions.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/_utils/_inspect.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_utils/_inspect.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/_utils/_pep440.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/_utils/_pep440.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/char/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/char/__init__.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/char/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/conftest.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/core/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/core/__init__.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/core/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/core/__pycache__/_dtype.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/core/__pycache__/_dtype_ctypes.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/core/__pycache__/_internal.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/core/__pycache__/_multiarray_umath.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/core/__pycache__/_utils.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/core/__pycache__/arrayprint.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/core/__pycache__/defchararray.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/core/__pycache__/einsumfunc.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/core/__pycache__/fromnumeric.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/core/__pycache__/function_base.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/core/__pycache__/getlimits.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/core/__pycache__/multiarray.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/core/__pycache__/numeric.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/core/__pycache__/numerictypes.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/core/__pycache__/overrides.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/core/__pycache__/records.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/core/__pycache__/shape_base.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/core/__pycache__/umath.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/core/_dtype.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/core/_dtype.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/core/_dtype_ctypes.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/core/_dtype_ctypes.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/core/_internal.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/core/_multiarray_umath.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/core/_utils.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/core/arrayprint.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/core/defchararray.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/core/einsumfunc.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/core/fromnumeric.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/core/function_base.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/core/getlimits.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/core/multiarray.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/core/numeric.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/core/numerictypes.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/core/overrides.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/core/overrides.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/core/records.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/core/shape_base.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/core/umath.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/ctypeslib/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/ctypeslib/__init__.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/ctypeslib/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/ctypeslib/__pycache__/_ctypeslib.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/ctypeslib/_ctypeslib.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/ctypeslib/_ctypeslib.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/doc/__pycache__/ufuncs.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/doc/ufuncs.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/dtypes.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/dtypes.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/exceptions.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/exceptions.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/__init__.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/__main__.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/__pycache__/__main__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/__pycache__/__version__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/__pycache__/_isocbind.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/__pycache__/_src_pyf.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/__pycache__/auxfuncs.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/__pycache__/capi_maps.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/__pycache__/cb_rules.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/__pycache__/cfuncs.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/__pycache__/common_rules.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/__pycache__/crackfortran.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/__pycache__/diagnose.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/__pycache__/f2py2e.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/__pycache__/f90mod_rules.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/__pycache__/func2subr.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/__pycache__/rules.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/__pycache__/symbolic.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/__pycache__/use_rules.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/__version__.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/__version__.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/_backends/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/_backends/__init__.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/_backends/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/_backends/__pycache__/_backend.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/_backends/__pycache__/_distutils.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/_backends/__pycache__/_meson.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/_backends/_backend.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/_backends/_backend.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/_backends/_distutils.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/_backends/_distutils.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/_backends/_meson.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/_backends/_meson.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/_backends/meson.build.template create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/_isocbind.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/_isocbind.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/_src_pyf.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/_src_pyf.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/auxfuncs.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/auxfuncs.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/capi_maps.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/capi_maps.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/cb_rules.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/cb_rules.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/cfuncs.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/cfuncs.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/common_rules.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/common_rules.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/crackfortran.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/crackfortran.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/diagnose.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/diagnose.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/f2py2e.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/f2py2e.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/f90mod_rules.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/f90mod_rules.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/func2subr.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/func2subr.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/rules.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/rules.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/setup.cfg create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/src/fortranobject.c create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/src/fortranobject.h create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/symbolic.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/symbolic.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_abstract_interface.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_array_from_pyobj.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_assumed_shape.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_block_docstring.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_callback.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_character.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_common.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_crackfortran.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_data.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_docs.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_f2cmap.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_f2py2e.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_isoc.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_kind.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_mixed.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_modules.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_parameter.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_pyf_src.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_quoted_character.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_regression.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_return_character.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_return_complex.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_return_integer.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_return_logical.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_return_real.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_routines.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_semicolon_split.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_size.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_string.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_symbolic.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_value_attrspec.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/util.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/abstract_interface/foo.f90 create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/abstract_interface/gh18403_mod.f90 create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/assumed_shape/.f2py_f2cmap create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/assumed_shape/foo_free.f90 create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/assumed_shape/foo_mod.f90 create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/assumed_shape/foo_use.f90 create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/assumed_shape/precision.f90 create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/block_docstring/foo.f create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/callback/foo.f create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/callback/gh17797.f90 create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/callback/gh18335.f90 create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/callback/gh25211.f create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/callback/gh25211.pyf create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/callback/gh26681.f90 create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/cli/gh_22819.pyf create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/cli/hi77.f create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/cli/hiworld.f90 create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/common/block.f create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/common/gh19161.f90 create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/crackfortran/accesstype.f90 create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/crackfortran/common_with_division.f create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/crackfortran/data_common.f create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/crackfortran/data_multiplier.f create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/crackfortran/data_stmts.f90 create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/crackfortran/data_with_comments.f create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/crackfortran/foo_deps.f90 create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/crackfortran/gh15035.f create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/crackfortran/gh17859.f create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/crackfortran/gh22648.pyf create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/crackfortran/gh23533.f create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/crackfortran/gh23598.f90 create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/crackfortran/gh23598Warn.f90 create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/crackfortran/gh23879.f90 create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/crackfortran/gh27697.f90 create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/crackfortran/gh2848.f90 create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/crackfortran/operators.f90 create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/crackfortran/privatemod.f90 create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/crackfortran/publicmod.f90 create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/crackfortran/pubprivmod.f90 create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/crackfortran/unicode_comment.f90 create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/f2cmap/.f2py_f2cmap create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/f2cmap/isoFortranEnvMap.f90 create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/isocintrin/isoCtests.f90 create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/kind/foo.f90 create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/mixed/foo.f create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/mixed/foo_fixed.f90 create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/mixed/foo_free.f90 create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/modules/gh25337/data.f90 create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/modules/gh25337/use_data.f90 create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/modules/gh26920/two_mods_with_no_public_entities.f90 create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/modules/gh26920/two_mods_with_one_public_routine.f90 create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/modules/module_data_docstring.f90 create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/modules/use_modules.f90 create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/negative_bounds/issue_20853.f90 create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/parameter/constant_array.f90 create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/parameter/constant_both.f90 create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/parameter/constant_compound.f90 create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/parameter/constant_integer.f90 create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/parameter/constant_non_compound.f90 create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/parameter/constant_real.f90 create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/quoted_character/foo.f create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/regression/AB.inc create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/regression/assignOnlyModule.f90 create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/regression/datonly.f90 create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/regression/f77comments.f create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/regression/f77fixedform.f95 create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/regression/f90continuation.f90 create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/regression/incfile.f90 create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/regression/inout.f90 create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/regression/lower_f2py_fortran.f90 create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/regression/mod_derived_types.f90 create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/return_character/foo77.f create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/return_character/foo90.f90 create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/return_complex/foo77.f create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/return_complex/foo90.f90 create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/return_integer/foo77.f create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/return_integer/foo90.f90 create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/return_logical/foo77.f create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/return_logical/foo90.f90 create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/return_real/foo77.f create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/return_real/foo90.f90 create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/routines/funcfortranname.f create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/routines/funcfortranname.pyf create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/routines/subrout.f create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/routines/subrout.pyf create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/size/foo.f90 create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/string/char.f90 create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/string/fixed_string.f90 create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/string/gh24008.f create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/string/gh24662.f90 create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/string/gh25286.f90 create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/string/gh25286.pyf create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/string/gh25286_bc.pyf create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/string/scalar_string.f90 create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/string/string.f create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/src/value_attrspec/gh21665.f90 create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/test_abstract_interface.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/test_array_from_pyobj.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/test_assumed_shape.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/test_block_docstring.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/test_callback.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/test_character.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/test_common.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/test_crackfortran.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/test_data.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/test_docs.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/test_f2cmap.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/test_f2py2e.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/test_isoc.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/test_kind.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/test_mixed.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/test_modules.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/test_parameter.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/test_pyf_src.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/test_quoted_character.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/test_regression.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/test_return_character.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/test_return_complex.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/test_return_integer.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/test_return_logical.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/test_return_real.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/test_routines.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/test_semicolon_split.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/test_size.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/test_string.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/test_symbolic.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/test_value_attrspec.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/tests/util.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/use_rules.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/f2py/use_rules.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/fft/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/fft/__init__.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/fft/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/fft/__pycache__/_helper.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/fft/__pycache__/_pocketfft.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/fft/__pycache__/helper.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/fft/_helper.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/fft/_helper.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/fft/_pocketfft.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/fft/_pocketfft.pyi create mode 100755 .venv/lib/python3.12/site-packages/numpy/fft/_pocketfft_umath.cpython-312-x86_64-linux-gnu.so create mode 100644 .venv/lib/python3.12/site-packages/numpy/fft/helper.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/fft/helper.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/fft/tests/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/fft/tests/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/fft/tests/__pycache__/test_helper.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/fft/tests/__pycache__/test_pocketfft.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/fft/tests/test_helper.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/fft/tests/test_pocketfft.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/__init__.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_array_utils_impl.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_arraypad_impl.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_arraysetops_impl.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_arrayterator_impl.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_datasource.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_format_impl.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_function_base_impl.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_histograms_impl.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_index_tricks_impl.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_iotools.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_nanfunctions_impl.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_npyio_impl.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_polynomial_impl.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_scimath_impl.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_shape_base_impl.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_stride_tricks_impl.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_twodim_base_impl.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_type_check_impl.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_ufunclike_impl.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_user_array_impl.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_utils_impl.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_version.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/__pycache__/array_utils.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/__pycache__/format.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/__pycache__/introspect.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/__pycache__/mixins.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/__pycache__/npyio.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/__pycache__/recfunctions.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/__pycache__/scimath.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/__pycache__/stride_tricks.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/__pycache__/user_array.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/_array_utils_impl.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/_array_utils_impl.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/_arraypad_impl.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/_arraypad_impl.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/_arraysetops_impl.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/_arraysetops_impl.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/_arrayterator_impl.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/_arrayterator_impl.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/_datasource.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/_datasource.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/_format_impl.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/_format_impl.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/_function_base_impl.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/_function_base_impl.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/_histograms_impl.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/_histograms_impl.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/_index_tricks_impl.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/_index_tricks_impl.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/_iotools.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/_iotools.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/_nanfunctions_impl.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/_nanfunctions_impl.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/_npyio_impl.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/_npyio_impl.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/_polynomial_impl.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/_polynomial_impl.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/_scimath_impl.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/_scimath_impl.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/_shape_base_impl.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/_shape_base_impl.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/_stride_tricks_impl.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/_stride_tricks_impl.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/_twodim_base_impl.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/_twodim_base_impl.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/_type_check_impl.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/_type_check_impl.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/_ufunclike_impl.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/_ufunclike_impl.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/_user_array_impl.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/_user_array_impl.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/_utils_impl.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/_utils_impl.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/_version.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/_version.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/array_utils.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/array_utils.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/format.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/format.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/introspect.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/introspect.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/mixins.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/mixins.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/npyio.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/npyio.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/recfunctions.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/recfunctions.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/scimath.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/scimath.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/stride_tricks.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/stride_tricks.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/tests/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test__datasource.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test__iotools.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test__version.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_array_utils.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_arraypad.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_arraysetops.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_arrayterator.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_format.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_function_base.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_histograms.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_index_tricks.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_io.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_loadtxt.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_mixins.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_nanfunctions.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_packbits.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_polynomial.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_recfunctions.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_regression.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_shape_base.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_stride_tricks.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_twodim_base.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_type_check.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_ufunclike.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_utils.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/tests/data/py2-np0-objarr.npy create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/tests/data/py2-objarr.npy create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/tests/data/py2-objarr.npz create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/tests/data/py3-objarr.npy create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/tests/data/py3-objarr.npz create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/tests/data/python3.npy create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/tests/data/win64python2.npy create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/tests/test__datasource.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/tests/test__iotools.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/tests/test__version.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/tests/test_array_utils.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/tests/test_arraypad.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/tests/test_arraysetops.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/tests/test_arrayterator.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/tests/test_format.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/tests/test_function_base.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/tests/test_histograms.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/tests/test_index_tricks.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/tests/test_io.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/tests/test_loadtxt.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/tests/test_mixins.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/tests/test_nanfunctions.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/tests/test_packbits.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/tests/test_polynomial.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/tests/test_recfunctions.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/tests/test_regression.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/tests/test_shape_base.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/tests/test_stride_tricks.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/tests/test_twodim_base.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/tests/test_type_check.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/tests/test_ufunclike.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/tests/test_utils.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/user_array.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/lib/user_array.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/linalg/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/linalg/__init__.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/linalg/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/linalg/__pycache__/_linalg.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/linalg/__pycache__/linalg.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/linalg/_linalg.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/linalg/_linalg.pyi create mode 100755 .venv/lib/python3.12/site-packages/numpy/linalg/_umath_linalg.cpython-312-x86_64-linux-gnu.so create mode 100644 .venv/lib/python3.12/site-packages/numpy/linalg/_umath_linalg.pyi create mode 100755 .venv/lib/python3.12/site-packages/numpy/linalg/lapack_lite.cpython-312-x86_64-linux-gnu.so create mode 100644 .venv/lib/python3.12/site-packages/numpy/linalg/lapack_lite.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/linalg/linalg.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/linalg/linalg.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/linalg/tests/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/linalg/tests/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/linalg/tests/__pycache__/test_deprecations.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/linalg/tests/__pycache__/test_linalg.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/linalg/tests/__pycache__/test_regression.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/linalg/tests/test_deprecations.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/linalg/tests/test_linalg.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/linalg/tests/test_regression.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/ma/API_CHANGES.txt create mode 100644 .venv/lib/python3.12/site-packages/numpy/ma/LICENSE create mode 100644 .venv/lib/python3.12/site-packages/numpy/ma/README.rst create mode 100644 .venv/lib/python3.12/site-packages/numpy/ma/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/ma/__init__.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/ma/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/ma/__pycache__/core.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/ma/__pycache__/extras.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/ma/__pycache__/mrecords.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/ma/__pycache__/testutils.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/ma/core.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/ma/core.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/ma/extras.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/ma/extras.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/ma/mrecords.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/ma/mrecords.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/ma/tests/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/ma/tests/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/ma/tests/__pycache__/test_arrayobject.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/ma/tests/__pycache__/test_core.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/ma/tests/__pycache__/test_deprecations.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/ma/tests/__pycache__/test_extras.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/ma/tests/__pycache__/test_mrecords.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/ma/tests/__pycache__/test_old_ma.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/ma/tests/__pycache__/test_regression.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/ma/tests/__pycache__/test_subclassing.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/ma/tests/test_arrayobject.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/ma/tests/test_core.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/ma/tests/test_deprecations.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/ma/tests/test_extras.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/ma/tests/test_mrecords.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/ma/tests/test_old_ma.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/ma/tests/test_regression.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/ma/tests/test_subclassing.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/ma/testutils.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/matlib.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/matlib.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/matrixlib/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/matrixlib/__init__.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/matrixlib/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/matrixlib/__pycache__/defmatrix.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/matrixlib/defmatrix.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/matrixlib/defmatrix.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/matrixlib/tests/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/matrixlib/tests/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/matrixlib/tests/__pycache__/test_defmatrix.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/matrixlib/tests/__pycache__/test_interaction.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/matrixlib/tests/__pycache__/test_masked_matrix.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/matrixlib/tests/__pycache__/test_matrix_linalg.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/matrixlib/tests/__pycache__/test_multiarray.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/matrixlib/tests/__pycache__/test_numeric.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/matrixlib/tests/__pycache__/test_regression.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/matrixlib/tests/test_defmatrix.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/matrixlib/tests/test_interaction.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/matrixlib/tests/test_masked_matrix.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/matrixlib/tests/test_matrix_linalg.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/matrixlib/tests/test_multiarray.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/matrixlib/tests/test_numeric.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/matrixlib/tests/test_regression.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/polynomial/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/polynomial/__init__.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/polynomial/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/polynomial/__pycache__/_polybase.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/polynomial/__pycache__/chebyshev.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/polynomial/__pycache__/hermite.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/polynomial/__pycache__/hermite_e.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/polynomial/__pycache__/laguerre.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/polynomial/__pycache__/legendre.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/polynomial/__pycache__/polynomial.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/polynomial/__pycache__/polyutils.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/polynomial/_polybase.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/polynomial/_polybase.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/polynomial/_polytypes.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/polynomial/chebyshev.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/polynomial/chebyshev.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/polynomial/hermite.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/polynomial/hermite.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/polynomial/hermite_e.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/polynomial/hermite_e.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/polynomial/laguerre.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/polynomial/laguerre.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/polynomial/legendre.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/polynomial/legendre.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/polynomial/polynomial.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/polynomial/polynomial.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/polynomial/polyutils.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/polynomial/polyutils.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/polynomial/tests/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/polynomial/tests/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/polynomial/tests/__pycache__/test_chebyshev.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/polynomial/tests/__pycache__/test_classes.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/polynomial/tests/__pycache__/test_hermite.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/polynomial/tests/__pycache__/test_hermite_e.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/polynomial/tests/__pycache__/test_laguerre.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/polynomial/tests/__pycache__/test_legendre.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/polynomial/tests/__pycache__/test_polynomial.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/polynomial/tests/__pycache__/test_polyutils.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/polynomial/tests/__pycache__/test_printing.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/polynomial/tests/__pycache__/test_symbol.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/polynomial/tests/test_chebyshev.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/polynomial/tests/test_classes.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/polynomial/tests/test_hermite.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/polynomial/tests/test_hermite_e.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/polynomial/tests/test_laguerre.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/polynomial/tests/test_legendre.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/polynomial/tests/test_polynomial.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/polynomial/tests/test_polyutils.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/polynomial/tests/test_printing.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/polynomial/tests/test_symbol.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/py.typed create mode 100644 .venv/lib/python3.12/site-packages/numpy/random/LICENSE.md create mode 100644 .venv/lib/python3.12/site-packages/numpy/random/__init__.pxd create mode 100644 .venv/lib/python3.12/site-packages/numpy/random/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/random/__init__.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/random/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/random/__pycache__/_pickle.cpython-312.pyc create mode 100755 .venv/lib/python3.12/site-packages/numpy/random/_bounded_integers.cpython-312-x86_64-linux-gnu.so create mode 100644 .venv/lib/python3.12/site-packages/numpy/random/_bounded_integers.pxd create mode 100644 .venv/lib/python3.12/site-packages/numpy/random/_bounded_integers.pyi create mode 100755 .venv/lib/python3.12/site-packages/numpy/random/_common.cpython-312-x86_64-linux-gnu.so create mode 100644 .venv/lib/python3.12/site-packages/numpy/random/_common.pxd create mode 100644 .venv/lib/python3.12/site-packages/numpy/random/_common.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/random/_examples/cffi/__pycache__/extending.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/random/_examples/cffi/__pycache__/parse.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/random/_examples/cffi/extending.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/random/_examples/cffi/parse.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/random/_examples/cython/extending.pyx create mode 100644 .venv/lib/python3.12/site-packages/numpy/random/_examples/cython/extending_distributions.pyx create mode 100644 .venv/lib/python3.12/site-packages/numpy/random/_examples/cython/meson.build create mode 100644 .venv/lib/python3.12/site-packages/numpy/random/_examples/numba/__pycache__/extending.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/random/_examples/numba/__pycache__/extending_distributions.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/random/_examples/numba/extending.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/random/_examples/numba/extending_distributions.py create mode 100755 .venv/lib/python3.12/site-packages/numpy/random/_generator.cpython-312-x86_64-linux-gnu.so create mode 100644 .venv/lib/python3.12/site-packages/numpy/random/_generator.pyi create mode 100755 .venv/lib/python3.12/site-packages/numpy/random/_mt19937.cpython-312-x86_64-linux-gnu.so create mode 100644 .venv/lib/python3.12/site-packages/numpy/random/_mt19937.pyi create mode 100755 .venv/lib/python3.12/site-packages/numpy/random/_pcg64.cpython-312-x86_64-linux-gnu.so create mode 100644 .venv/lib/python3.12/site-packages/numpy/random/_pcg64.pyi create mode 100755 .venv/lib/python3.12/site-packages/numpy/random/_philox.cpython-312-x86_64-linux-gnu.so create mode 100644 .venv/lib/python3.12/site-packages/numpy/random/_philox.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/random/_pickle.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/random/_pickle.pyi create mode 100755 .venv/lib/python3.12/site-packages/numpy/random/_sfc64.cpython-312-x86_64-linux-gnu.so create mode 100644 .venv/lib/python3.12/site-packages/numpy/random/_sfc64.pyi create mode 100755 .venv/lib/python3.12/site-packages/numpy/random/bit_generator.cpython-312-x86_64-linux-gnu.so create mode 100644 .venv/lib/python3.12/site-packages/numpy/random/bit_generator.pxd create mode 100644 .venv/lib/python3.12/site-packages/numpy/random/bit_generator.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/random/c_distributions.pxd create mode 100644 .venv/lib/python3.12/site-packages/numpy/random/lib/libnpyrandom.a create mode 100755 .venv/lib/python3.12/site-packages/numpy/random/mtrand.cpython-312-x86_64-linux-gnu.so create mode 100644 .venv/lib/python3.12/site-packages/numpy/random/mtrand.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/random/tests/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/random/tests/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/random/tests/__pycache__/test_direct.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/random/tests/__pycache__/test_extending.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/random/tests/__pycache__/test_generator_mt19937.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/random/tests/__pycache__/test_generator_mt19937_regressions.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/random/tests/__pycache__/test_random.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/random/tests/__pycache__/test_randomstate.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/random/tests/__pycache__/test_randomstate_regression.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/random/tests/__pycache__/test_regression.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/random/tests/__pycache__/test_seed_sequence.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/random/tests/__pycache__/test_smoke.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/random/tests/data/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/random/tests/data/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/random/tests/data/generator_pcg64_np121.pkl.gz create mode 100644 .venv/lib/python3.12/site-packages/numpy/random/tests/data/generator_pcg64_np126.pkl.gz create mode 100644 .venv/lib/python3.12/site-packages/numpy/random/tests/data/mt19937-testset-1.csv create mode 100644 .venv/lib/python3.12/site-packages/numpy/random/tests/data/mt19937-testset-2.csv create mode 100644 .venv/lib/python3.12/site-packages/numpy/random/tests/data/pcg64-testset-1.csv create mode 100644 .venv/lib/python3.12/site-packages/numpy/random/tests/data/pcg64-testset-2.csv create mode 100644 .venv/lib/python3.12/site-packages/numpy/random/tests/data/pcg64dxsm-testset-1.csv create mode 100644 .venv/lib/python3.12/site-packages/numpy/random/tests/data/pcg64dxsm-testset-2.csv create mode 100644 .venv/lib/python3.12/site-packages/numpy/random/tests/data/philox-testset-1.csv create mode 100644 .venv/lib/python3.12/site-packages/numpy/random/tests/data/philox-testset-2.csv create mode 100644 .venv/lib/python3.12/site-packages/numpy/random/tests/data/sfc64-testset-1.csv create mode 100644 .venv/lib/python3.12/site-packages/numpy/random/tests/data/sfc64-testset-2.csv create mode 100644 .venv/lib/python3.12/site-packages/numpy/random/tests/data/sfc64_np126.pkl.gz create mode 100644 .venv/lib/python3.12/site-packages/numpy/random/tests/test_direct.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/random/tests/test_extending.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/random/tests/test_generator_mt19937.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/random/tests/test_generator_mt19937_regressions.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/random/tests/test_random.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/random/tests/test_randomstate.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/random/tests/test_randomstate_regression.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/random/tests/test_regression.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/random/tests/test_seed_sequence.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/random/tests/test_smoke.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/rec/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/rec/__init__.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/rec/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/strings/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/strings/__init__.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/strings/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/testing/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/testing/__init__.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/testing/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/testing/__pycache__/overrides.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/testing/__pycache__/print_coercion_tables.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/testing/_private/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/testing/_private/__init__.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/testing/_private/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/testing/_private/__pycache__/extbuild.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/testing/_private/__pycache__/utils.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/testing/_private/extbuild.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/testing/_private/extbuild.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/testing/_private/utils.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/testing/_private/utils.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/testing/overrides.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/testing/overrides.pyi create mode 100755 .venv/lib/python3.12/site-packages/numpy/testing/print_coercion_tables.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/testing/print_coercion_tables.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/testing/tests/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/testing/tests/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/testing/tests/__pycache__/test_utils.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/testing/tests/test_utils.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/tests/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/tests/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/tests/__pycache__/test__all__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/tests/__pycache__/test_configtool.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/tests/__pycache__/test_ctypeslib.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/tests/__pycache__/test_lazyloading.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/tests/__pycache__/test_matlib.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/tests/__pycache__/test_numpy_config.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/tests/__pycache__/test_numpy_version.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/tests/__pycache__/test_public_api.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/tests/__pycache__/test_reloading.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/tests/__pycache__/test_scripts.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/tests/__pycache__/test_warnings.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/tests/test__all__.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/tests/test_configtool.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/tests/test_ctypeslib.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/tests/test_lazyloading.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/tests/test_matlib.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/tests/test_numpy_config.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/tests/test_numpy_version.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/tests/test_public_api.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/tests/test_reloading.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/tests/test_scripts.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/tests/test_warnings.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/__pycache__/mypy_plugin.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/mypy_plugin.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/__pycache__/test_isfile.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/__pycache__/test_runtime.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/__pycache__/test_typing.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/arithmetic.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/array_constructors.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/array_like.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/array_pad.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/arrayprint.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/arrayterator.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/bitwise_ops.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/char.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/chararray.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/comparisons.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/constants.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/datasource.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/dtype.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/einsumfunc.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/flatiter.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/fromnumeric.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/histograms.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/index_tricks.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/lib_function_base.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/lib_polynomial.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/lib_utils.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/lib_version.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/linalg.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/ma.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/memmap.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/modules.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/multiarray.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/ndarray.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/ndarray_misc.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/nditer.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/nested_sequence.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/npyio.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/numerictypes.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/random.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/rec.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/scalars.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/shape.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/shape_base.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/stride_tricks.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/strings.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/testing.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/twodim_base.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/type_check.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/ufunc_config.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/ufunclike.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/ufuncs.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/warnings_and_errors.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/misc/extended_precision.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/mypy.ini create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/arithmetic.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/array_constructors.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/array_like.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/arrayprint.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/arrayterator.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/bitwise_ops.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/comparisons.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/dtype.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/einsumfunc.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/flatiter.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/fromnumeric.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/index_tricks.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/lib_user_array.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/lib_utils.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/lib_version.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/literal.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/ma.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/mod.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/modules.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/multiarray.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/ndarray_conversion.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/ndarray_misc.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/ndarray_shape_manipulation.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/nditer.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/numeric.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/numerictypes.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/random.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/recfunctions.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/scalars.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/shape.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/simple.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/simple_py3.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/ufunc_config.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/ufunclike.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/ufuncs.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/warnings_and_errors.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/arithmetic.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/array_constructors.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/array_like.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/arrayprint.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/arrayterator.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/bitwise_ops.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/comparisons.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/dtype.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/einsumfunc.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/flatiter.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/fromnumeric.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/index_tricks.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/lib_user_array.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/lib_utils.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/lib_version.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/literal.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/ma.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/mod.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/modules.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/multiarray.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/ndarray_conversion.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/ndarray_misc.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/ndarray_shape_manipulation.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/nditer.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/numeric.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/numerictypes.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/random.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/recfunctions.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/scalars.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/shape.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/simple.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/simple_py3.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/ufunc_config.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/ufunclike.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/ufuncs.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/warnings_and_errors.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/arithmetic.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/array_api_info.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/array_constructors.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/arraypad.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/arrayprint.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/arraysetops.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/arrayterator.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/bitwise_ops.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/char.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/chararray.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/comparisons.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/constants.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/ctypeslib.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/datasource.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/dtype.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/einsumfunc.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/emath.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/fft.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/flatiter.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/fromnumeric.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/getlimits.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/histograms.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/index_tricks.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/lib_function_base.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/lib_polynomial.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/lib_utils.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/lib_version.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/linalg.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/ma.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/matrix.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/memmap.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/mod.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/modules.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/multiarray.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/nbit_base_example.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/ndarray_assignability.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/ndarray_conversion.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/ndarray_misc.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/ndarray_shape_manipulation.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/nditer.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/nested_sequence.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/npyio.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/numeric.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/numerictypes.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/polynomial_polybase.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/polynomial_polyutils.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/polynomial_series.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/random.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/rec.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/scalars.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/shape.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/shape_base.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/stride_tricks.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/strings.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/testing.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/twodim_base.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/type_check.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/ufunc_config.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/ufunclike.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/ufuncs.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/warnings_and_errors.pyi create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/test_isfile.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/test_runtime.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/typing/tests/test_typing.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/version.py create mode 100644 .venv/lib/python3.12/site-packages/numpy/version.pyi create mode 100644 .venv/lib/python3.12/site-packages/pip-25.2.dist-info/METADATA create mode 100644 .venv/lib/python3.12/site-packages/pip-25.2.dist-info/RECORD create mode 100644 .venv/lib/python3.12/site-packages/pip-25.2.dist-info/WHEEL create mode 100644 .venv/lib/python3.12/site-packages/pip-25.2.dist-info/entry_points.txt create mode 100644 .venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/AUTHORS.txt create mode 100644 .venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/LICENSE.txt create mode 100644 .venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/src/pip/_vendor/cachecontrol/LICENSE.txt create mode 100644 .venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/src/pip/_vendor/certifi/LICENSE create mode 100644 .venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/src/pip/_vendor/dependency_groups/LICENSE.txt create mode 100644 .venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/src/pip/_vendor/distlib/LICENSE.txt create mode 100644 .venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/src/pip/_vendor/distro/LICENSE create mode 100644 .venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/src/pip/_vendor/idna/LICENSE.md create mode 100644 .venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/src/pip/_vendor/msgpack/COPYING create mode 100644 .venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/src/pip/_vendor/packaging/LICENSE create mode 100644 .venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/src/pip/_vendor/packaging/LICENSE.APACHE create mode 100644 .venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/src/pip/_vendor/packaging/LICENSE.BSD create mode 100644 .venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/src/pip/_vendor/pkg_resources/LICENSE create mode 100644 .venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/src/pip/_vendor/platformdirs/LICENSE create mode 100644 .venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/src/pip/_vendor/pygments/LICENSE create mode 100644 .venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/src/pip/_vendor/pyproject_hooks/LICENSE create mode 100644 .venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/src/pip/_vendor/requests/LICENSE create mode 100644 .venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/src/pip/_vendor/resolvelib/LICENSE create mode 100644 .venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/src/pip/_vendor/rich/LICENSE create mode 100644 .venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/src/pip/_vendor/tomli/LICENSE create mode 100644 .venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/src/pip/_vendor/tomli/LICENSE-HEADER create mode 100644 .venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/src/pip/_vendor/tomli_w/LICENSE create mode 100644 .venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/src/pip/_vendor/truststore/LICENSE create mode 100644 .venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/src/pip/_vendor/urllib3/LICENSE.txt create mode 100644 .venv/lib/python3.12/site-packages/pip-25.2.dist-info/top_level.txt create mode 100644 .venv/lib/python3.12/site-packages/pip/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/pip/__main__.py create mode 100644 .venv/lib/python3.12/site-packages/pip/__pip-runner__.py create mode 100644 .venv/lib/python3.12/site-packages/pip/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/__pycache__/__main__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/__pycache__/__pip-runner__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/__pycache__/build_env.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/__pycache__/cache.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/__pycache__/configuration.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/__pycache__/exceptions.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/__pycache__/main.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/__pycache__/pyproject.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/__pycache__/self_outdated_check.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/__pycache__/wheel_builder.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/build_env.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/cache.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/cli/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/cli/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/cli/__pycache__/autocompletion.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/cli/__pycache__/base_command.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/cli/__pycache__/cmdoptions.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/cli/__pycache__/command_context.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/cli/__pycache__/index_command.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/cli/__pycache__/main.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/cli/__pycache__/main_parser.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/cli/__pycache__/parser.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/cli/__pycache__/progress_bars.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/cli/__pycache__/req_command.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/cli/__pycache__/spinners.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/cli/__pycache__/status_codes.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/cli/autocompletion.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/cli/base_command.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/cli/cmdoptions.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/cli/command_context.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/cli/index_command.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/cli/main.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/cli/main_parser.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/cli/parser.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/cli/progress_bars.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/cli/req_command.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/cli/spinners.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/cli/status_codes.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/commands/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/commands/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/commands/__pycache__/cache.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/commands/__pycache__/check.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/commands/__pycache__/completion.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/commands/__pycache__/configuration.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/commands/__pycache__/debug.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/commands/__pycache__/download.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/commands/__pycache__/freeze.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/commands/__pycache__/hash.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/commands/__pycache__/help.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/commands/__pycache__/index.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/commands/__pycache__/inspect.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/commands/__pycache__/install.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/commands/__pycache__/list.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/commands/__pycache__/lock.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/commands/__pycache__/search.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/commands/__pycache__/show.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/commands/__pycache__/uninstall.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/commands/__pycache__/wheel.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/commands/cache.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/commands/check.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/commands/completion.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/commands/configuration.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/commands/debug.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/commands/download.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/commands/freeze.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/commands/hash.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/commands/help.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/commands/index.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/commands/inspect.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/commands/install.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/commands/list.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/commands/lock.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/commands/search.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/commands/show.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/commands/uninstall.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/commands/wheel.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/configuration.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/distributions/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/distributions/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/distributions/__pycache__/base.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/distributions/__pycache__/installed.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/distributions/__pycache__/sdist.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/distributions/__pycache__/wheel.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/distributions/base.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/distributions/installed.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/distributions/sdist.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/distributions/wheel.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/exceptions.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/index/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/index/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/index/__pycache__/collector.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/index/__pycache__/package_finder.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/index/__pycache__/sources.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/index/collector.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/index/package_finder.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/index/sources.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/locations/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/locations/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/locations/__pycache__/_distutils.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/locations/__pycache__/_sysconfig.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/locations/__pycache__/base.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/locations/_distutils.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/locations/_sysconfig.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/locations/base.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/main.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/metadata/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/metadata/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/metadata/__pycache__/_json.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/metadata/__pycache__/base.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/metadata/__pycache__/pkg_resources.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/metadata/_json.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/metadata/base.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/metadata/importlib/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/metadata/importlib/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/metadata/importlib/__pycache__/_compat.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/metadata/importlib/__pycache__/_dists.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/metadata/importlib/__pycache__/_envs.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/metadata/importlib/_compat.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/metadata/importlib/_dists.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/metadata/importlib/_envs.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/metadata/pkg_resources.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/models/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/models/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/models/__pycache__/candidate.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/models/__pycache__/direct_url.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/models/__pycache__/format_control.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/models/__pycache__/index.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/models/__pycache__/installation_report.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/models/__pycache__/link.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/models/__pycache__/pylock.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/models/__pycache__/scheme.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/models/__pycache__/search_scope.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/models/__pycache__/selection_prefs.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/models/__pycache__/target_python.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/models/__pycache__/wheel.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/models/candidate.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/models/direct_url.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/models/format_control.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/models/index.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/models/installation_report.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/models/link.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/models/pylock.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/models/scheme.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/models/search_scope.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/models/selection_prefs.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/models/target_python.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/models/wheel.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/network/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/network/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/network/__pycache__/auth.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/network/__pycache__/cache.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/network/__pycache__/download.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/network/__pycache__/lazy_wheel.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/network/__pycache__/session.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/network/__pycache__/utils.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/network/__pycache__/xmlrpc.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/network/auth.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/network/cache.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/network/download.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/network/lazy_wheel.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/network/session.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/network/utils.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/network/xmlrpc.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/operations/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/operations/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/operations/__pycache__/check.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/operations/__pycache__/freeze.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/operations/__pycache__/prepare.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/operations/build/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/operations/build/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/operations/build/__pycache__/build_tracker.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/operations/build/__pycache__/metadata.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/operations/build/__pycache__/metadata_editable.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/operations/build/__pycache__/metadata_legacy.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/operations/build/__pycache__/wheel.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/operations/build/__pycache__/wheel_editable.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/operations/build/__pycache__/wheel_legacy.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/operations/build/build_tracker.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/operations/build/metadata.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/operations/build/metadata_editable.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/operations/build/metadata_legacy.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/operations/build/wheel.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/operations/build/wheel_editable.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/operations/build/wheel_legacy.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/operations/check.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/operations/freeze.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/operations/install/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/operations/install/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/operations/install/__pycache__/editable_legacy.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/operations/install/__pycache__/wheel.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/operations/install/editable_legacy.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/operations/install/wheel.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/operations/prepare.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/pyproject.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/req/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/req/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/req/__pycache__/constructors.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/req/__pycache__/req_dependency_group.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/req/__pycache__/req_file.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/req/__pycache__/req_install.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/req/__pycache__/req_set.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/req/__pycache__/req_uninstall.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/req/constructors.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/req/req_dependency_group.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/req/req_file.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/req/req_install.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/req/req_set.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/req/req_uninstall.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/resolution/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/resolution/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/resolution/__pycache__/base.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/resolution/base.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/resolution/legacy/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/resolution/legacy/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/resolution/legacy/__pycache__/resolver.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/resolution/legacy/resolver.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/resolution/resolvelib/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/resolution/resolvelib/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/resolution/resolvelib/__pycache__/base.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/resolution/resolvelib/__pycache__/candidates.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/resolution/resolvelib/__pycache__/factory.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/resolution/resolvelib/__pycache__/found_candidates.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/resolution/resolvelib/__pycache__/provider.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/resolution/resolvelib/__pycache__/reporter.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/resolution/resolvelib/__pycache__/requirements.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/resolution/resolvelib/__pycache__/resolver.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/resolution/resolvelib/base.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/resolution/resolvelib/candidates.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/resolution/resolvelib/factory.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/resolution/resolvelib/found_candidates.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/resolution/resolvelib/provider.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/resolution/resolvelib/reporter.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/resolution/resolvelib/requirements.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/resolution/resolvelib/resolver.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/self_outdated_check.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/utils/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/_jaraco_text.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/_log.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/appdirs.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/compat.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/compatibility_tags.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/datetime.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/deprecation.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/direct_url_helpers.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/egg_link.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/entrypoints.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/filesystem.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/filetypes.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/glibc.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/hashes.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/logging.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/misc.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/packaging.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/retry.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/setuptools_build.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/subprocess.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/temp_dir.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/unpacking.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/urls.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/virtualenv.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/wheel.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/utils/_jaraco_text.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/utils/_log.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/utils/appdirs.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/utils/compat.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/utils/compatibility_tags.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/utils/datetime.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/utils/deprecation.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/utils/direct_url_helpers.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/utils/egg_link.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/utils/entrypoints.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/utils/filesystem.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/utils/filetypes.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/utils/glibc.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/utils/hashes.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/utils/logging.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/utils/misc.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/utils/packaging.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/utils/retry.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/utils/setuptools_build.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/utils/subprocess.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/utils/temp_dir.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/utils/unpacking.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/utils/urls.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/utils/virtualenv.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/utils/wheel.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/vcs/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/vcs/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/vcs/__pycache__/bazaar.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/vcs/__pycache__/git.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/vcs/__pycache__/mercurial.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/vcs/__pycache__/subversion.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/vcs/__pycache__/versioncontrol.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/vcs/bazaar.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/vcs/git.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/vcs/mercurial.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/vcs/subversion.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/vcs/versioncontrol.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_internal/wheel_builder.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/__pycache__/_cmd.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/__pycache__/adapter.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/__pycache__/cache.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/__pycache__/controller.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/__pycache__/filewrapper.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/__pycache__/heuristics.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/__pycache__/serialize.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/__pycache__/wrapper.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/_cmd.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/adapter.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/cache.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/caches/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/caches/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/caches/__pycache__/file_cache.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/caches/__pycache__/redis_cache.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/caches/file_cache.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/caches/redis_cache.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/controller.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/filewrapper.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/heuristics.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/py.typed create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/serialize.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/wrapper.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/certifi/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/certifi/__main__.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/certifi/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/certifi/__pycache__/__main__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/certifi/__pycache__/core.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/certifi/cacert.pem create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/certifi/core.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/certifi/py.typed create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/dependency_groups/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/dependency_groups/__main__.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/dependency_groups/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/dependency_groups/__pycache__/__main__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/dependency_groups/__pycache__/_implementation.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/dependency_groups/__pycache__/_lint_dependency_groups.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/dependency_groups/__pycache__/_pip_wrapper.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/dependency_groups/__pycache__/_toml_compat.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/dependency_groups/_implementation.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/dependency_groups/_lint_dependency_groups.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/dependency_groups/_pip_wrapper.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/dependency_groups/_toml_compat.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/dependency_groups/py.typed create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/distlib/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/distlib/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/distlib/__pycache__/compat.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/distlib/__pycache__/resources.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/distlib/__pycache__/scripts.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/distlib/__pycache__/util.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/distlib/compat.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/distlib/resources.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/distlib/scripts.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/distlib/t32.exe create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/distlib/t64-arm.exe create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/distlib/t64.exe create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/distlib/util.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/distlib/w32.exe create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/distlib/w64-arm.exe create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/distlib/w64.exe create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/distro/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/distro/__main__.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/distro/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/distro/__pycache__/__main__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/distro/__pycache__/distro.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/distro/distro.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/distro/py.typed create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/idna/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/idna/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/idna/__pycache__/codec.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/idna/__pycache__/compat.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/idna/__pycache__/core.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/idna/__pycache__/idnadata.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/idna/__pycache__/intranges.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/idna/__pycache__/package_data.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/idna/__pycache__/uts46data.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/idna/codec.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/idna/compat.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/idna/core.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/idna/idnadata.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/idna/intranges.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/idna/package_data.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/idna/py.typed create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/idna/uts46data.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/msgpack/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/msgpack/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/msgpack/__pycache__/exceptions.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/msgpack/__pycache__/ext.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/msgpack/__pycache__/fallback.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/msgpack/exceptions.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/msgpack/ext.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/msgpack/fallback.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/packaging/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/packaging/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/packaging/__pycache__/_elffile.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/packaging/__pycache__/_manylinux.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/packaging/__pycache__/_musllinux.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/packaging/__pycache__/_parser.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/packaging/__pycache__/_structures.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/packaging/__pycache__/_tokenizer.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/packaging/__pycache__/markers.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/packaging/__pycache__/metadata.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/packaging/__pycache__/requirements.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/packaging/__pycache__/specifiers.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/packaging/__pycache__/tags.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/packaging/__pycache__/utils.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/packaging/__pycache__/version.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/packaging/_elffile.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/packaging/_manylinux.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/packaging/_musllinux.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/packaging/_parser.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/packaging/_structures.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/packaging/_tokenizer.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/packaging/licenses/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/packaging/licenses/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/packaging/licenses/__pycache__/_spdx.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/packaging/licenses/_spdx.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/packaging/markers.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/packaging/metadata.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/packaging/py.typed create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/packaging/requirements.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/packaging/specifiers.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/packaging/tags.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/packaging/utils.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/packaging/version.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/pkg_resources/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/pkg_resources/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/platformdirs/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/platformdirs/__main__.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/platformdirs/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/platformdirs/__pycache__/__main__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/platformdirs/__pycache__/android.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/platformdirs/__pycache__/api.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/platformdirs/__pycache__/macos.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/platformdirs/__pycache__/unix.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/platformdirs/__pycache__/version.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/platformdirs/__pycache__/windows.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/platformdirs/android.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/platformdirs/api.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/platformdirs/macos.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/platformdirs/py.typed create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/platformdirs/unix.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/platformdirs/version.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/platformdirs/windows.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/pygments/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/pygments/__main__.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/pygments/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/pygments/__pycache__/__main__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/pygments/__pycache__/console.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/pygments/__pycache__/filter.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/pygments/__pycache__/formatter.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/pygments/__pycache__/lexer.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/pygments/__pycache__/modeline.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/pygments/__pycache__/plugin.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/pygments/__pycache__/regexopt.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/pygments/__pycache__/scanner.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/pygments/__pycache__/sphinxext.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/pygments/__pycache__/style.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/pygments/__pycache__/token.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/pygments/__pycache__/unistring.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/pygments/__pycache__/util.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/pygments/console.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/pygments/filter.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/pygments/filters/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/pygments/filters/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/pygments/formatter.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/pygments/formatters/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/pygments/formatters/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/pygments/formatters/__pycache__/_mapping.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/pygments/formatters/_mapping.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/pygments/lexer.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/pygments/lexers/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/pygments/lexers/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/pygments/lexers/__pycache__/_mapping.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/pygments/lexers/__pycache__/python.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/pygments/lexers/_mapping.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/pygments/lexers/python.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/pygments/modeline.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/pygments/plugin.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/pygments/regexopt.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/pygments/scanner.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/pygments/sphinxext.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/pygments/style.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/pygments/styles/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/pygments/styles/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/pygments/styles/__pycache__/_mapping.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/pygments/styles/_mapping.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/pygments/token.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/pygments/unistring.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/pygments/util.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/pyproject_hooks/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/pyproject_hooks/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/pyproject_hooks/__pycache__/_impl.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/pyproject_hooks/_impl.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/pyproject_hooks/_in_process/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/pyproject_hooks/_in_process/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/pyproject_hooks/_in_process/__pycache__/_in_process.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/pyproject_hooks/_in_process/_in_process.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/pyproject_hooks/py.typed create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/requests/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/requests/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/requests/__pycache__/__version__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/requests/__pycache__/_internal_utils.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/requests/__pycache__/adapters.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/requests/__pycache__/api.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/requests/__pycache__/auth.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/requests/__pycache__/certs.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/requests/__pycache__/compat.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/requests/__pycache__/cookies.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/requests/__pycache__/exceptions.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/requests/__pycache__/help.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/requests/__pycache__/hooks.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/requests/__pycache__/models.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/requests/__pycache__/packages.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/requests/__pycache__/sessions.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/requests/__pycache__/status_codes.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/requests/__pycache__/structures.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/requests/__pycache__/utils.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/requests/__version__.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/requests/_internal_utils.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/requests/adapters.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/requests/api.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/requests/auth.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/requests/certs.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/requests/compat.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/requests/cookies.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/requests/exceptions.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/requests/help.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/requests/hooks.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/requests/models.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/requests/packages.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/requests/sessions.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/requests/status_codes.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/requests/structures.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/requests/utils.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/resolvelib/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/resolvelib/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/resolvelib/__pycache__/providers.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/resolvelib/__pycache__/reporters.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/resolvelib/__pycache__/structs.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/resolvelib/providers.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/resolvelib/py.typed create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/resolvelib/reporters.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/resolvelib/resolvers/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/resolvelib/resolvers/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/resolvelib/resolvers/__pycache__/abstract.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/resolvelib/resolvers/__pycache__/criterion.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/resolvelib/resolvers/__pycache__/exceptions.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/resolvelib/resolvers/__pycache__/resolution.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/resolvelib/resolvers/abstract.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/resolvelib/resolvers/criterion.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/resolvelib/resolvers/exceptions.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/resolvelib/resolvers/resolution.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/resolvelib/structs.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/__main__.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/__main__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/_cell_widths.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/_emoji_codes.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/_emoji_replace.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/_export_format.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/_extension.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/_fileno.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/_inspect.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/_log_render.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/_loop.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/_null_file.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/_palettes.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/_pick.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/_ratio.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/_spinners.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/_stack.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/_timer.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/_win32_console.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/_windows.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/_windows_renderer.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/_wrap.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/abc.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/align.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/ansi.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/bar.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/box.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/cells.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/color.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/color_triplet.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/columns.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/console.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/constrain.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/containers.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/control.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/default_styles.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/diagnose.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/emoji.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/errors.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/file_proxy.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/filesize.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/highlighter.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/json.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/jupyter.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/layout.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/live.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/live_render.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/logging.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/markup.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/measure.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/padding.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/pager.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/palette.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/panel.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/pretty.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/progress.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/progress_bar.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/protocol.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/region.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/repr.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/scope.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/screen.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/segment.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/spinner.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/style.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/styled.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/syntax.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/table.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/terminal_theme.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/text.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/theme.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/themes.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/traceback.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/_cell_widths.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/_emoji_codes.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/_emoji_replace.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/_export_format.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/_extension.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/_fileno.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/_inspect.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/_log_render.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/_loop.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/_null_file.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/_palettes.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/_pick.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/_ratio.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/_spinners.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/_stack.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/_timer.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/_win32_console.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/_windows.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/_windows_renderer.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/_wrap.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/abc.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/align.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/ansi.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/bar.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/box.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/cells.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/color.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/color_triplet.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/columns.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/console.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/constrain.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/containers.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/control.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/default_styles.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/diagnose.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/emoji.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/errors.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/file_proxy.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/filesize.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/highlighter.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/json.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/jupyter.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/layout.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/live.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/live_render.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/logging.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/markup.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/measure.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/padding.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/pager.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/palette.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/panel.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/pretty.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/progress.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/progress_bar.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/prompt.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/protocol.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/py.typed create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/region.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/repr.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/rule.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/scope.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/screen.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/segment.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/spinner.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/status.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/style.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/styled.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/syntax.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/table.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/terminal_theme.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/text.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/theme.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/themes.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/traceback.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/rich/tree.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/tomli/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/tomli/_parser.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/tomli/_re.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/tomli/_types.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/tomli/py.typed create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/tomli_w/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/tomli_w/_writer.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/tomli_w/py.typed create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/truststore/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/truststore/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/truststore/__pycache__/_api.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/truststore/__pycache__/_openssl.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/truststore/__pycache__/_ssl_constants.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/truststore/_api.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/truststore/_macos.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/truststore/_openssl.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/truststore/_ssl_constants.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/truststore/_windows.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/truststore/py.typed create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/urllib3/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/urllib3/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/urllib3/__pycache__/_collections.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/urllib3/__pycache__/_version.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/urllib3/__pycache__/connection.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/urllib3/__pycache__/connectionpool.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/urllib3/__pycache__/exceptions.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/urllib3/__pycache__/fields.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/urllib3/__pycache__/filepost.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/urllib3/__pycache__/poolmanager.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/urllib3/__pycache__/request.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/urllib3/__pycache__/response.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/urllib3/_collections.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/urllib3/_version.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/urllib3/connection.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/urllib3/connectionpool.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/urllib3/contrib/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/urllib3/contrib/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/urllib3/contrib/__pycache__/_appengine_environ.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/urllib3/contrib/__pycache__/socks.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/urllib3/contrib/_appengine_environ.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/urllib3/contrib/_securetransport/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/urllib3/contrib/_securetransport/bindings.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/urllib3/contrib/_securetransport/low_level.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/urllib3/contrib/appengine.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/urllib3/contrib/ntlmpool.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/urllib3/contrib/pyopenssl.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/urllib3/contrib/securetransport.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/urllib3/contrib/socks.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/urllib3/exceptions.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/urllib3/fields.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/urllib3/filepost.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/urllib3/packages/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/urllib3/packages/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/urllib3/packages/__pycache__/six.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/urllib3/packages/backports/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/urllib3/packages/backports/makefile.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/urllib3/packages/backports/weakref_finalize.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/urllib3/packages/six.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/urllib3/poolmanager.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/urllib3/request.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/urllib3/response.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/urllib3/util/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/urllib3/util/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/urllib3/util/__pycache__/connection.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/urllib3/util/__pycache__/proxy.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/urllib3/util/__pycache__/queue.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/urllib3/util/__pycache__/request.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/urllib3/util/__pycache__/response.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/urllib3/util/__pycache__/retry.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/urllib3/util/__pycache__/ssl_.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/urllib3/util/__pycache__/ssl_match_hostname.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/urllib3/util/__pycache__/ssltransport.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/urllib3/util/__pycache__/timeout.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/urllib3/util/__pycache__/url.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/urllib3/util/__pycache__/wait.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/urllib3/util/connection.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/urllib3/util/proxy.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/urllib3/util/queue.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/urllib3/util/request.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/urllib3/util/response.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/urllib3/util/retry.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/urllib3/util/ssl_.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/urllib3/util/ssl_match_hostname.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/urllib3/util/ssltransport.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/urllib3/util/timeout.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/urllib3/util/url.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/urllib3/util/wait.py create mode 100644 .venv/lib/python3.12/site-packages/pip/_vendor/vendor.txt create mode 100644 .venv/lib/python3.12/site-packages/pip/py.typed create mode 100644 .venv/lib/python3.12/site-packages/referencing-0.36.2.dist-info/INSTALLER create mode 100644 .venv/lib/python3.12/site-packages/referencing-0.36.2.dist-info/METADATA create mode 100644 .venv/lib/python3.12/site-packages/referencing-0.36.2.dist-info/RECORD create mode 100644 .venv/lib/python3.12/site-packages/referencing-0.36.2.dist-info/WHEEL create mode 100644 .venv/lib/python3.12/site-packages/referencing-0.36.2.dist-info/licenses/COPYING create mode 100644 .venv/lib/python3.12/site-packages/referencing/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/referencing/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/referencing/__pycache__/_attrs.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/referencing/__pycache__/_core.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/referencing/__pycache__/exceptions.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/referencing/__pycache__/jsonschema.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/referencing/__pycache__/retrieval.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/referencing/__pycache__/typing.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/referencing/_attrs.py create mode 100644 .venv/lib/python3.12/site-packages/referencing/_attrs.pyi create mode 100644 .venv/lib/python3.12/site-packages/referencing/_core.py create mode 100644 .venv/lib/python3.12/site-packages/referencing/exceptions.py create mode 100644 .venv/lib/python3.12/site-packages/referencing/jsonschema.py create mode 100644 .venv/lib/python3.12/site-packages/referencing/py.typed create mode 100644 .venv/lib/python3.12/site-packages/referencing/retrieval.py create mode 100644 .venv/lib/python3.12/site-packages/referencing/tests/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/referencing/tests/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/referencing/tests/__pycache__/test_core.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/referencing/tests/__pycache__/test_exceptions.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/referencing/tests/__pycache__/test_jsonschema.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/referencing/tests/__pycache__/test_referencing_suite.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/referencing/tests/__pycache__/test_retrieval.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/referencing/tests/test_core.py create mode 100644 .venv/lib/python3.12/site-packages/referencing/tests/test_exceptions.py create mode 100644 .venv/lib/python3.12/site-packages/referencing/tests/test_jsonschema.py create mode 100644 .venv/lib/python3.12/site-packages/referencing/tests/test_referencing_suite.py create mode 100644 .venv/lib/python3.12/site-packages/referencing/tests/test_retrieval.py create mode 100644 .venv/lib/python3.12/site-packages/referencing/typing.py create mode 100644 .venv/lib/python3.12/site-packages/requests-2.32.5.dist-info/INSTALLER create mode 100644 .venv/lib/python3.12/site-packages/requests-2.32.5.dist-info/METADATA create mode 100644 .venv/lib/python3.12/site-packages/requests-2.32.5.dist-info/RECORD create mode 100644 .venv/lib/python3.12/site-packages/requests-2.32.5.dist-info/REQUESTED create mode 100644 .venv/lib/python3.12/site-packages/requests-2.32.5.dist-info/WHEEL create mode 100644 .venv/lib/python3.12/site-packages/requests-2.32.5.dist-info/licenses/LICENSE create mode 100644 .venv/lib/python3.12/site-packages/requests-2.32.5.dist-info/top_level.txt create mode 100644 .venv/lib/python3.12/site-packages/requests/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/requests/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/requests/__pycache__/__version__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/requests/__pycache__/_internal_utils.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/requests/__pycache__/adapters.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/requests/__pycache__/api.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/requests/__pycache__/auth.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/requests/__pycache__/certs.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/requests/__pycache__/compat.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/requests/__pycache__/cookies.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/requests/__pycache__/exceptions.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/requests/__pycache__/help.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/requests/__pycache__/hooks.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/requests/__pycache__/models.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/requests/__pycache__/packages.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/requests/__pycache__/sessions.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/requests/__pycache__/status_codes.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/requests/__pycache__/structures.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/requests/__pycache__/utils.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/requests/__version__.py create mode 100644 .venv/lib/python3.12/site-packages/requests/_internal_utils.py create mode 100644 .venv/lib/python3.12/site-packages/requests/adapters.py create mode 100644 .venv/lib/python3.12/site-packages/requests/api.py create mode 100644 .venv/lib/python3.12/site-packages/requests/auth.py create mode 100644 .venv/lib/python3.12/site-packages/requests/certs.py create mode 100644 .venv/lib/python3.12/site-packages/requests/compat.py create mode 100644 .venv/lib/python3.12/site-packages/requests/cookies.py create mode 100644 .venv/lib/python3.12/site-packages/requests/exceptions.py create mode 100644 .venv/lib/python3.12/site-packages/requests/help.py create mode 100644 .venv/lib/python3.12/site-packages/requests/hooks.py create mode 100644 .venv/lib/python3.12/site-packages/requests/models.py create mode 100644 .venv/lib/python3.12/site-packages/requests/packages.py create mode 100644 .venv/lib/python3.12/site-packages/requests/sessions.py create mode 100644 .venv/lib/python3.12/site-packages/requests/status_codes.py create mode 100644 .venv/lib/python3.12/site-packages/requests/structures.py create mode 100644 .venv/lib/python3.12/site-packages/requests/utils.py create mode 100644 .venv/lib/python3.12/site-packages/rpds/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/rpds/__init__.pyi create mode 100644 .venv/lib/python3.12/site-packages/rpds/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/rpds/py.typed create mode 100755 .venv/lib/python3.12/site-packages/rpds/rpds.cpython-312-x86_64-linux-gnu.so create mode 100644 .venv/lib/python3.12/site-packages/rpds_py-0.27.0.dist-info/INSTALLER create mode 100644 .venv/lib/python3.12/site-packages/rpds_py-0.27.0.dist-info/METADATA create mode 100644 .venv/lib/python3.12/site-packages/rpds_py-0.27.0.dist-info/RECORD create mode 100644 .venv/lib/python3.12/site-packages/rpds_py-0.27.0.dist-info/WHEEL create mode 100644 .venv/lib/python3.12/site-packages/rpds_py-0.27.0.dist-info/licenses/LICENSE create mode 100644 .venv/lib/python3.12/site-packages/typing_extensions-4.14.1.dist-info/INSTALLER create mode 100644 .venv/lib/python3.12/site-packages/typing_extensions-4.14.1.dist-info/METADATA create mode 100644 .venv/lib/python3.12/site-packages/typing_extensions-4.14.1.dist-info/RECORD create mode 100644 .venv/lib/python3.12/site-packages/typing_extensions-4.14.1.dist-info/WHEEL create mode 100644 .venv/lib/python3.12/site-packages/typing_extensions-4.14.1.dist-info/licenses/LICENSE create mode 100644 .venv/lib/python3.12/site-packages/typing_extensions.py create mode 100644 .venv/lib/python3.12/site-packages/urllib3-2.5.0.dist-info/INSTALLER create mode 100644 .venv/lib/python3.12/site-packages/urllib3-2.5.0.dist-info/METADATA create mode 100644 .venv/lib/python3.12/site-packages/urllib3-2.5.0.dist-info/RECORD create mode 100644 .venv/lib/python3.12/site-packages/urllib3-2.5.0.dist-info/WHEEL create mode 100644 .venv/lib/python3.12/site-packages/urllib3-2.5.0.dist-info/licenses/LICENSE.txt create mode 100644 .venv/lib/python3.12/site-packages/urllib3/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/urllib3/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/urllib3/__pycache__/_base_connection.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/urllib3/__pycache__/_collections.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/urllib3/__pycache__/_request_methods.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/urllib3/__pycache__/_version.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/urllib3/__pycache__/connection.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/urllib3/__pycache__/connectionpool.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/urllib3/__pycache__/exceptions.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/urllib3/__pycache__/fields.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/urllib3/__pycache__/filepost.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/urllib3/__pycache__/poolmanager.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/urllib3/__pycache__/response.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/urllib3/_base_connection.py create mode 100644 .venv/lib/python3.12/site-packages/urllib3/_collections.py create mode 100644 .venv/lib/python3.12/site-packages/urllib3/_request_methods.py create mode 100644 .venv/lib/python3.12/site-packages/urllib3/_version.py create mode 100644 .venv/lib/python3.12/site-packages/urllib3/connection.py create mode 100644 .venv/lib/python3.12/site-packages/urllib3/connectionpool.py create mode 100644 .venv/lib/python3.12/site-packages/urllib3/contrib/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/urllib3/contrib/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/urllib3/contrib/__pycache__/pyopenssl.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/urllib3/contrib/__pycache__/socks.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/urllib3/contrib/emscripten/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/urllib3/contrib/emscripten/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/urllib3/contrib/emscripten/__pycache__/connection.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/urllib3/contrib/emscripten/__pycache__/fetch.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/urllib3/contrib/emscripten/__pycache__/request.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/urllib3/contrib/emscripten/__pycache__/response.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/urllib3/contrib/emscripten/connection.py create mode 100644 .venv/lib/python3.12/site-packages/urllib3/contrib/emscripten/emscripten_fetch_worker.js create mode 100644 .venv/lib/python3.12/site-packages/urllib3/contrib/emscripten/fetch.py create mode 100644 .venv/lib/python3.12/site-packages/urllib3/contrib/emscripten/request.py create mode 100644 .venv/lib/python3.12/site-packages/urllib3/contrib/emscripten/response.py create mode 100644 .venv/lib/python3.12/site-packages/urllib3/contrib/pyopenssl.py create mode 100644 .venv/lib/python3.12/site-packages/urllib3/contrib/socks.py create mode 100644 .venv/lib/python3.12/site-packages/urllib3/exceptions.py create mode 100644 .venv/lib/python3.12/site-packages/urllib3/fields.py create mode 100644 .venv/lib/python3.12/site-packages/urllib3/filepost.py create mode 100644 .venv/lib/python3.12/site-packages/urllib3/http2/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/urllib3/http2/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/urllib3/http2/__pycache__/connection.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/urllib3/http2/__pycache__/probe.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/urllib3/http2/connection.py create mode 100644 .venv/lib/python3.12/site-packages/urllib3/http2/probe.py create mode 100644 .venv/lib/python3.12/site-packages/urllib3/poolmanager.py create mode 100644 .venv/lib/python3.12/site-packages/urllib3/py.typed create mode 100644 .venv/lib/python3.12/site-packages/urllib3/response.py create mode 100644 .venv/lib/python3.12/site-packages/urllib3/util/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/urllib3/util/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/urllib3/util/__pycache__/connection.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/urllib3/util/__pycache__/proxy.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/urllib3/util/__pycache__/request.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/urllib3/util/__pycache__/response.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/urllib3/util/__pycache__/retry.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/urllib3/util/__pycache__/ssl_.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/urllib3/util/__pycache__/ssl_match_hostname.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/urllib3/util/__pycache__/ssltransport.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/urllib3/util/__pycache__/timeout.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/urllib3/util/__pycache__/url.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/urllib3/util/__pycache__/util.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/urllib3/util/__pycache__/wait.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/urllib3/util/connection.py create mode 100644 .venv/lib/python3.12/site-packages/urllib3/util/proxy.py create mode 100644 .venv/lib/python3.12/site-packages/urllib3/util/request.py create mode 100644 .venv/lib/python3.12/site-packages/urllib3/util/response.py create mode 100644 .venv/lib/python3.12/site-packages/urllib3/util/retry.py create mode 100644 .venv/lib/python3.12/site-packages/urllib3/util/ssl_.py create mode 100644 .venv/lib/python3.12/site-packages/urllib3/util/ssl_match_hostname.py create mode 100644 .venv/lib/python3.12/site-packages/urllib3/util/ssltransport.py create mode 100644 .venv/lib/python3.12/site-packages/urllib3/util/timeout.py create mode 100644 .venv/lib/python3.12/site-packages/urllib3/util/url.py create mode 100644 .venv/lib/python3.12/site-packages/urllib3/util/util.py create mode 100644 .venv/lib/python3.12/site-packages/urllib3/util/wait.py create mode 100644 .venv/lib/python3.12/site-packages/~ip-24.0.dist-info/AUTHORS.txt create mode 100644 .venv/lib/python3.12/site-packages/~ip-24.0.dist-info/INSTALLER create mode 100644 .venv/lib/python3.12/site-packages/~ip-24.0.dist-info/LICENSE.txt create mode 100644 .venv/lib/python3.12/site-packages/~ip-24.0.dist-info/METADATA create mode 100644 .venv/lib/python3.12/site-packages/~ip-24.0.dist-info/RECORD create mode 100644 .venv/lib/python3.12/site-packages/~ip-24.0.dist-info/REQUESTED create mode 100644 .venv/lib/python3.12/site-packages/~ip-24.0.dist-info/WHEEL create mode 100644 .venv/lib/python3.12/site-packages/~ip-24.0.dist-info/entry_points.txt create mode 100644 .venv/lib/python3.12/site-packages/~ip-24.0.dist-info/top_level.txt create mode 100644 .venv/lib/python3.12/site-packages/~ip/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/__main__.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/__pip-runner__.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/__pycache__/__main__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/__pycache__/__pip-runner__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/__pycache__/build_env.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/__pycache__/cache.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/__pycache__/configuration.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/__pycache__/exceptions.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/__pycache__/main.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/__pycache__/pyproject.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/__pycache__/self_outdated_check.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/__pycache__/wheel_builder.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/build_env.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/cache.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/cli/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/cli/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/cli/__pycache__/autocompletion.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/cli/__pycache__/base_command.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/cli/__pycache__/cmdoptions.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/cli/__pycache__/command_context.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/cli/__pycache__/main.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/cli/__pycache__/main_parser.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/cli/__pycache__/parser.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/cli/__pycache__/progress_bars.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/cli/__pycache__/req_command.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/cli/__pycache__/spinners.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/cli/__pycache__/status_codes.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/cli/autocompletion.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/cli/base_command.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/cli/cmdoptions.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/cli/command_context.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/cli/main.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/cli/main_parser.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/cli/parser.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/cli/progress_bars.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/cli/req_command.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/cli/spinners.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/cli/status_codes.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/commands/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/commands/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/commands/__pycache__/cache.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/commands/__pycache__/check.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/commands/__pycache__/completion.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/commands/__pycache__/configuration.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/commands/__pycache__/debug.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/commands/__pycache__/download.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/commands/__pycache__/freeze.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/commands/__pycache__/hash.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/commands/__pycache__/help.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/commands/__pycache__/index.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/commands/__pycache__/inspect.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/commands/__pycache__/install.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/commands/__pycache__/list.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/commands/__pycache__/search.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/commands/__pycache__/show.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/commands/__pycache__/uninstall.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/commands/__pycache__/wheel.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/commands/cache.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/commands/check.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/commands/completion.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/commands/configuration.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/commands/debug.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/commands/download.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/commands/freeze.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/commands/hash.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/commands/help.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/commands/index.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/commands/inspect.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/commands/install.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/commands/list.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/commands/search.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/commands/show.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/commands/uninstall.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/commands/wheel.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/configuration.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/distributions/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/distributions/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/distributions/__pycache__/base.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/distributions/__pycache__/installed.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/distributions/__pycache__/sdist.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/distributions/__pycache__/wheel.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/distributions/base.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/distributions/installed.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/distributions/sdist.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/distributions/wheel.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/exceptions.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/index/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/index/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/index/__pycache__/collector.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/index/__pycache__/package_finder.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/index/__pycache__/sources.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/index/collector.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/index/package_finder.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/index/sources.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/locations/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/locations/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/locations/__pycache__/_distutils.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/locations/__pycache__/_sysconfig.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/locations/__pycache__/base.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/locations/_distutils.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/locations/_sysconfig.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/locations/base.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/main.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/metadata/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/metadata/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/metadata/__pycache__/_json.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/metadata/__pycache__/base.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/metadata/__pycache__/pkg_resources.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/metadata/_json.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/metadata/base.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/metadata/importlib/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/metadata/importlib/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/metadata/importlib/__pycache__/_compat.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/metadata/importlib/__pycache__/_dists.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/metadata/importlib/__pycache__/_envs.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/metadata/importlib/_compat.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/metadata/importlib/_dists.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/metadata/importlib/_envs.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/metadata/pkg_resources.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/models/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/models/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/models/__pycache__/candidate.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/models/__pycache__/direct_url.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/models/__pycache__/format_control.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/models/__pycache__/index.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/models/__pycache__/installation_report.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/models/__pycache__/link.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/models/__pycache__/scheme.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/models/__pycache__/search_scope.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/models/__pycache__/selection_prefs.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/models/__pycache__/target_python.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/models/__pycache__/wheel.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/models/candidate.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/models/direct_url.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/models/format_control.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/models/index.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/models/installation_report.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/models/link.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/models/scheme.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/models/search_scope.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/models/selection_prefs.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/models/target_python.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/models/wheel.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/network/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/network/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/network/__pycache__/auth.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/network/__pycache__/cache.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/network/__pycache__/download.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/network/__pycache__/lazy_wheel.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/network/__pycache__/session.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/network/__pycache__/utils.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/network/__pycache__/xmlrpc.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/network/auth.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/network/cache.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/network/download.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/network/lazy_wheel.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/network/session.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/network/utils.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/network/xmlrpc.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/operations/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/operations/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/operations/__pycache__/check.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/operations/__pycache__/freeze.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/operations/__pycache__/prepare.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/operations/build/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/operations/build/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/operations/build/__pycache__/build_tracker.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/operations/build/__pycache__/metadata.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/operations/build/__pycache__/metadata_editable.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/operations/build/__pycache__/metadata_legacy.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/operations/build/__pycache__/wheel.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/operations/build/__pycache__/wheel_editable.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/operations/build/__pycache__/wheel_legacy.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/operations/build/build_tracker.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/operations/build/metadata.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/operations/build/metadata_editable.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/operations/build/metadata_legacy.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/operations/build/wheel.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/operations/build/wheel_editable.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/operations/build/wheel_legacy.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/operations/check.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/operations/freeze.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/operations/install/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/operations/install/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/operations/install/__pycache__/editable_legacy.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/operations/install/__pycache__/wheel.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/operations/install/editable_legacy.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/operations/install/wheel.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/operations/prepare.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/pyproject.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/req/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/req/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/req/__pycache__/constructors.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/req/__pycache__/req_file.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/req/__pycache__/req_install.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/req/__pycache__/req_set.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/req/__pycache__/req_uninstall.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/req/constructors.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/req/req_file.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/req/req_install.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/req/req_set.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/req/req_uninstall.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/resolution/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/resolution/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/resolution/__pycache__/base.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/resolution/base.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/resolution/legacy/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/resolution/legacy/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/resolution/legacy/__pycache__/resolver.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/resolution/legacy/resolver.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/resolution/resolvelib/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/resolution/resolvelib/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/resolution/resolvelib/__pycache__/base.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/resolution/resolvelib/__pycache__/candidates.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/resolution/resolvelib/__pycache__/factory.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/resolution/resolvelib/__pycache__/found_candidates.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/resolution/resolvelib/__pycache__/provider.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/resolution/resolvelib/__pycache__/reporter.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/resolution/resolvelib/__pycache__/requirements.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/resolution/resolvelib/__pycache__/resolver.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/resolution/resolvelib/base.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/resolution/resolvelib/candidates.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/resolution/resolvelib/factory.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/resolution/resolvelib/found_candidates.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/resolution/resolvelib/provider.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/resolution/resolvelib/reporter.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/resolution/resolvelib/requirements.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/resolution/resolvelib/resolver.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/self_outdated_check.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/utils/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/utils/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/utils/__pycache__/_jaraco_text.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/utils/__pycache__/_log.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/utils/__pycache__/appdirs.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/utils/__pycache__/compat.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/utils/__pycache__/compatibility_tags.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/utils/__pycache__/datetime.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/utils/__pycache__/deprecation.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/utils/__pycache__/direct_url_helpers.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/utils/__pycache__/egg_link.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/utils/__pycache__/encoding.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/utils/__pycache__/entrypoints.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/utils/__pycache__/filesystem.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/utils/__pycache__/filetypes.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/utils/__pycache__/glibc.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/utils/__pycache__/hashes.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/utils/__pycache__/logging.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/utils/__pycache__/misc.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/utils/__pycache__/models.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/utils/__pycache__/packaging.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/utils/__pycache__/setuptools_build.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/utils/__pycache__/subprocess.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/utils/__pycache__/temp_dir.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/utils/__pycache__/unpacking.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/utils/__pycache__/urls.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/utils/__pycache__/virtualenv.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/utils/__pycache__/wheel.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/utils/_jaraco_text.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/utils/_log.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/utils/appdirs.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/utils/compat.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/utils/compatibility_tags.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/utils/datetime.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/utils/deprecation.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/utils/direct_url_helpers.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/utils/egg_link.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/utils/encoding.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/utils/entrypoints.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/utils/filesystem.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/utils/filetypes.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/utils/glibc.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/utils/hashes.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/utils/logging.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/utils/misc.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/utils/models.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/utils/packaging.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/utils/setuptools_build.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/utils/subprocess.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/utils/temp_dir.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/utils/unpacking.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/utils/urls.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/utils/virtualenv.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/utils/wheel.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/vcs/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/vcs/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/vcs/__pycache__/bazaar.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/vcs/__pycache__/git.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/vcs/__pycache__/mercurial.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/vcs/__pycache__/subversion.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/vcs/__pycache__/versioncontrol.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/vcs/bazaar.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/vcs/git.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/vcs/mercurial.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/vcs/subversion.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/vcs/versioncontrol.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_internal/wheel_builder.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/__pycache__/six.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/__pycache__/typing_extensions.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/cachecontrol/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/cachecontrol/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/cachecontrol/__pycache__/_cmd.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/cachecontrol/__pycache__/adapter.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/cachecontrol/__pycache__/cache.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/cachecontrol/__pycache__/controller.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/cachecontrol/__pycache__/filewrapper.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/cachecontrol/__pycache__/heuristics.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/cachecontrol/__pycache__/serialize.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/cachecontrol/__pycache__/wrapper.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/cachecontrol/_cmd.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/cachecontrol/adapter.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/cachecontrol/cache.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/cachecontrol/caches/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/cachecontrol/caches/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/cachecontrol/caches/__pycache__/file_cache.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/cachecontrol/caches/__pycache__/redis_cache.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/cachecontrol/caches/file_cache.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/cachecontrol/caches/redis_cache.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/cachecontrol/controller.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/cachecontrol/filewrapper.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/cachecontrol/heuristics.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/cachecontrol/py.typed create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/cachecontrol/serialize.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/cachecontrol/wrapper.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/certifi/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/certifi/__main__.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/certifi/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/certifi/__pycache__/__main__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/certifi/__pycache__/core.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/certifi/cacert.pem create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/certifi/core.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/certifi/py.typed create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/__pycache__/big5freq.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/__pycache__/big5prober.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/__pycache__/chardistribution.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/__pycache__/charsetgroupprober.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/__pycache__/charsetprober.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/__pycache__/codingstatemachine.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/__pycache__/codingstatemachinedict.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/__pycache__/cp949prober.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/__pycache__/enums.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/__pycache__/escprober.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/__pycache__/escsm.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/__pycache__/eucjpprober.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/__pycache__/euckrfreq.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/__pycache__/euckrprober.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/__pycache__/euctwfreq.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/__pycache__/euctwprober.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/__pycache__/gb2312freq.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/__pycache__/gb2312prober.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/__pycache__/hebrewprober.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/__pycache__/jisfreq.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/__pycache__/johabfreq.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/__pycache__/johabprober.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/__pycache__/jpcntx.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/__pycache__/langbulgarianmodel.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/__pycache__/langgreekmodel.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/__pycache__/langhebrewmodel.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/__pycache__/langhungarianmodel.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/__pycache__/langrussianmodel.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/__pycache__/langthaimodel.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/__pycache__/langturkishmodel.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/__pycache__/latin1prober.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/__pycache__/macromanprober.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/__pycache__/mbcharsetprober.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/__pycache__/mbcsgroupprober.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/__pycache__/mbcssm.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/__pycache__/resultdict.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/__pycache__/sbcharsetprober.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/__pycache__/sbcsgroupprober.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/__pycache__/sjisprober.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/__pycache__/universaldetector.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/__pycache__/utf1632prober.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/__pycache__/utf8prober.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/__pycache__/version.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/big5freq.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/big5prober.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/chardistribution.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/charsetgroupprober.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/charsetprober.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/cli/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/cli/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/cli/__pycache__/chardetect.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/cli/chardetect.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/codingstatemachine.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/codingstatemachinedict.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/cp949prober.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/enums.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/escprober.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/escsm.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/eucjpprober.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/euckrfreq.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/euckrprober.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/euctwfreq.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/euctwprober.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/gb2312freq.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/gb2312prober.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/hebrewprober.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/jisfreq.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/johabfreq.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/johabprober.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/jpcntx.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/langbulgarianmodel.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/langgreekmodel.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/langhebrewmodel.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/langhungarianmodel.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/langrussianmodel.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/langthaimodel.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/langturkishmodel.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/latin1prober.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/macromanprober.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/mbcharsetprober.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/mbcsgroupprober.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/mbcssm.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/metadata/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/metadata/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/metadata/__pycache__/languages.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/metadata/languages.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/py.typed create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/resultdict.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/sbcharsetprober.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/sbcsgroupprober.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/sjisprober.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/universaldetector.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/utf1632prober.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/utf8prober.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/chardet/version.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/colorama/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/colorama/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/colorama/__pycache__/ansi.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/colorama/__pycache__/ansitowin32.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/colorama/__pycache__/initialise.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/colorama/__pycache__/win32.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/colorama/__pycache__/winterm.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/colorama/ansi.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/colorama/ansitowin32.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/colorama/initialise.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/colorama/tests/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/colorama/tests/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/colorama/tests/__pycache__/ansi_test.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/colorama/tests/__pycache__/ansitowin32_test.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/colorama/tests/__pycache__/initialise_test.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/colorama/tests/__pycache__/isatty_test.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/colorama/tests/__pycache__/utils.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/colorama/tests/__pycache__/winterm_test.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/colorama/tests/ansi_test.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/colorama/tests/ansitowin32_test.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/colorama/tests/initialise_test.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/colorama/tests/isatty_test.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/colorama/tests/utils.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/colorama/tests/winterm_test.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/colorama/win32.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/colorama/winterm.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/distlib/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/distlib/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/distlib/__pycache__/compat.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/distlib/__pycache__/database.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/distlib/__pycache__/index.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/distlib/__pycache__/locators.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/distlib/__pycache__/manifest.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/distlib/__pycache__/markers.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/distlib/__pycache__/metadata.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/distlib/__pycache__/resources.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/distlib/__pycache__/scripts.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/distlib/__pycache__/util.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/distlib/__pycache__/version.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/distlib/__pycache__/wheel.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/distlib/compat.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/distlib/database.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/distlib/index.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/distlib/locators.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/distlib/manifest.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/distlib/markers.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/distlib/metadata.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/distlib/resources.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/distlib/scripts.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/distlib/t32.exe create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/distlib/t64-arm.exe create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/distlib/t64.exe create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/distlib/util.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/distlib/version.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/distlib/w32.exe create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/distlib/w64-arm.exe create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/distlib/w64.exe create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/distlib/wheel.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/distro/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/distro/__main__.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/distro/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/distro/__pycache__/__main__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/distro/__pycache__/distro.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/distro/distro.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/distro/py.typed create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/idna/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/idna/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/idna/__pycache__/codec.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/idna/__pycache__/compat.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/idna/__pycache__/core.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/idna/__pycache__/idnadata.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/idna/__pycache__/intranges.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/idna/__pycache__/package_data.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/idna/__pycache__/uts46data.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/idna/codec.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/idna/compat.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/idna/core.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/idna/idnadata.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/idna/intranges.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/idna/package_data.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/idna/py.typed create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/idna/uts46data.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/msgpack/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/msgpack/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/msgpack/__pycache__/exceptions.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/msgpack/__pycache__/ext.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/msgpack/__pycache__/fallback.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/msgpack/exceptions.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/msgpack/ext.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/msgpack/fallback.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/packaging/__about__.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/packaging/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/packaging/__pycache__/__about__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/packaging/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/packaging/__pycache__/_manylinux.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/packaging/__pycache__/_musllinux.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/packaging/__pycache__/_structures.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/packaging/__pycache__/markers.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/packaging/__pycache__/requirements.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/packaging/__pycache__/specifiers.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/packaging/__pycache__/tags.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/packaging/__pycache__/utils.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/packaging/__pycache__/version.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/packaging/_manylinux.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/packaging/_musllinux.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/packaging/_structures.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/packaging/markers.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/packaging/py.typed create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/packaging/requirements.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/packaging/specifiers.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/packaging/tags.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/packaging/utils.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/packaging/version.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pkg_resources/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pkg_resources/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/platformdirs/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/platformdirs/__main__.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/platformdirs/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/platformdirs/__pycache__/__main__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/platformdirs/__pycache__/android.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/platformdirs/__pycache__/api.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/platformdirs/__pycache__/macos.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/platformdirs/__pycache__/unix.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/platformdirs/__pycache__/version.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/platformdirs/__pycache__/windows.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/platformdirs/android.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/platformdirs/api.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/platformdirs/macos.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/platformdirs/py.typed create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/platformdirs/unix.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/platformdirs/version.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/platformdirs/windows.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pygments/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pygments/__main__.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pygments/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pygments/__pycache__/__main__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pygments/__pycache__/cmdline.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pygments/__pycache__/console.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pygments/__pycache__/filter.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pygments/__pycache__/formatter.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pygments/__pycache__/lexer.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pygments/__pycache__/modeline.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pygments/__pycache__/plugin.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pygments/__pycache__/regexopt.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pygments/__pycache__/scanner.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pygments/__pycache__/sphinxext.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pygments/__pycache__/style.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pygments/__pycache__/token.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pygments/__pycache__/unistring.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pygments/__pycache__/util.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pygments/cmdline.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pygments/console.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pygments/filter.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pygments/filters/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pygments/filters/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pygments/formatter.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pygments/formatters/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pygments/formatters/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pygments/formatters/__pycache__/_mapping.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pygments/formatters/__pycache__/bbcode.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pygments/formatters/__pycache__/groff.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pygments/formatters/__pycache__/html.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pygments/formatters/__pycache__/img.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pygments/formatters/__pycache__/irc.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pygments/formatters/__pycache__/latex.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pygments/formatters/__pycache__/other.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pygments/formatters/__pycache__/pangomarkup.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pygments/formatters/__pycache__/rtf.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pygments/formatters/__pycache__/svg.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pygments/formatters/__pycache__/terminal.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pygments/formatters/__pycache__/terminal256.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pygments/formatters/_mapping.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pygments/formatters/bbcode.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pygments/formatters/groff.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pygments/formatters/html.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pygments/formatters/img.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pygments/formatters/irc.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pygments/formatters/latex.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pygments/formatters/other.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pygments/formatters/pangomarkup.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pygments/formatters/rtf.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pygments/formatters/svg.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pygments/formatters/terminal.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pygments/formatters/terminal256.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pygments/lexer.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pygments/lexers/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pygments/lexers/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pygments/lexers/__pycache__/_mapping.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pygments/lexers/__pycache__/python.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pygments/lexers/_mapping.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pygments/lexers/python.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pygments/modeline.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pygments/plugin.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pygments/regexopt.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pygments/scanner.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pygments/sphinxext.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pygments/style.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pygments/styles/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pygments/styles/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pygments/token.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pygments/unistring.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pygments/util.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pyparsing/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pyparsing/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pyparsing/__pycache__/actions.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pyparsing/__pycache__/common.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pyparsing/__pycache__/core.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pyparsing/__pycache__/exceptions.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pyparsing/__pycache__/helpers.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pyparsing/__pycache__/results.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pyparsing/__pycache__/testing.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pyparsing/__pycache__/unicode.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pyparsing/__pycache__/util.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pyparsing/actions.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pyparsing/common.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pyparsing/core.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pyparsing/diagram/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pyparsing/diagram/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pyparsing/exceptions.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pyparsing/helpers.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pyparsing/py.typed create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pyparsing/results.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pyparsing/testing.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pyparsing/unicode.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pyparsing/util.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pyproject_hooks/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pyproject_hooks/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pyproject_hooks/__pycache__/_compat.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pyproject_hooks/__pycache__/_impl.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pyproject_hooks/_compat.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pyproject_hooks/_impl.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pyproject_hooks/_in_process/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pyproject_hooks/_in_process/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pyproject_hooks/_in_process/__pycache__/_in_process.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/pyproject_hooks/_in_process/_in_process.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/requests/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/requests/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/requests/__pycache__/__version__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/requests/__pycache__/_internal_utils.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/requests/__pycache__/adapters.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/requests/__pycache__/api.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/requests/__pycache__/auth.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/requests/__pycache__/certs.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/requests/__pycache__/compat.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/requests/__pycache__/cookies.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/requests/__pycache__/exceptions.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/requests/__pycache__/help.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/requests/__pycache__/hooks.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/requests/__pycache__/models.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/requests/__pycache__/packages.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/requests/__pycache__/sessions.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/requests/__pycache__/status_codes.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/requests/__pycache__/structures.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/requests/__pycache__/utils.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/requests/__version__.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/requests/_internal_utils.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/requests/adapters.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/requests/api.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/requests/auth.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/requests/certs.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/requests/compat.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/requests/cookies.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/requests/exceptions.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/requests/help.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/requests/hooks.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/requests/models.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/requests/packages.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/requests/sessions.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/requests/status_codes.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/requests/structures.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/requests/utils.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/resolvelib/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/resolvelib/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/resolvelib/__pycache__/providers.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/resolvelib/__pycache__/reporters.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/resolvelib/__pycache__/resolvers.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/resolvelib/__pycache__/structs.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/resolvelib/compat/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/resolvelib/compat/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/resolvelib/compat/__pycache__/collections_abc.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/resolvelib/compat/collections_abc.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/resolvelib/providers.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/resolvelib/py.typed create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/resolvelib/reporters.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/resolvelib/resolvers.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/resolvelib/structs.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/__main__.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/__pycache__/__main__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/__pycache__/_cell_widths.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/__pycache__/_emoji_codes.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/__pycache__/_emoji_replace.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/__pycache__/_export_format.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/__pycache__/_extension.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/__pycache__/_fileno.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/__pycache__/_inspect.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/__pycache__/_log_render.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/__pycache__/_loop.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/__pycache__/_null_file.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/__pycache__/_palettes.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/__pycache__/_pick.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/__pycache__/_ratio.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/__pycache__/_spinners.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/__pycache__/_stack.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/__pycache__/_timer.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/__pycache__/_win32_console.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/__pycache__/_windows.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/__pycache__/_windows_renderer.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/__pycache__/_wrap.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/__pycache__/abc.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/__pycache__/align.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/__pycache__/ansi.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/__pycache__/bar.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/__pycache__/box.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/__pycache__/cells.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/__pycache__/color.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/__pycache__/color_triplet.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/__pycache__/columns.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/__pycache__/console.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/__pycache__/constrain.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/__pycache__/containers.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/__pycache__/control.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/__pycache__/default_styles.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/__pycache__/diagnose.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/__pycache__/emoji.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/__pycache__/errors.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/__pycache__/file_proxy.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/__pycache__/filesize.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/__pycache__/highlighter.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/__pycache__/json.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/__pycache__/jupyter.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/__pycache__/layout.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/__pycache__/live.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/__pycache__/live_render.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/__pycache__/logging.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/__pycache__/markup.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/__pycache__/measure.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/__pycache__/padding.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/__pycache__/pager.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/__pycache__/palette.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/__pycache__/panel.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/__pycache__/pretty.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/__pycache__/progress.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/__pycache__/progress_bar.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/__pycache__/prompt.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/__pycache__/protocol.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/__pycache__/region.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/__pycache__/repr.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/__pycache__/rule.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/__pycache__/scope.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/__pycache__/screen.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/__pycache__/segment.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/__pycache__/spinner.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/__pycache__/status.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/__pycache__/style.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/__pycache__/styled.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/__pycache__/syntax.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/__pycache__/table.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/__pycache__/terminal_theme.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/__pycache__/text.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/__pycache__/theme.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/__pycache__/themes.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/__pycache__/traceback.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/__pycache__/tree.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/_cell_widths.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/_emoji_codes.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/_emoji_replace.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/_export_format.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/_extension.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/_fileno.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/_inspect.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/_log_render.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/_loop.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/_null_file.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/_palettes.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/_pick.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/_ratio.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/_spinners.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/_stack.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/_timer.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/_win32_console.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/_windows.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/_windows_renderer.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/_wrap.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/abc.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/align.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/ansi.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/bar.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/box.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/cells.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/color.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/color_triplet.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/columns.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/console.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/constrain.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/containers.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/control.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/default_styles.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/diagnose.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/emoji.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/errors.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/file_proxy.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/filesize.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/highlighter.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/json.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/jupyter.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/layout.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/live.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/live_render.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/logging.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/markup.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/measure.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/padding.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/pager.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/palette.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/panel.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/pretty.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/progress.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/progress_bar.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/prompt.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/protocol.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/py.typed create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/region.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/repr.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/rule.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/scope.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/screen.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/segment.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/spinner.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/status.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/style.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/styled.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/syntax.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/table.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/terminal_theme.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/text.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/theme.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/themes.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/traceback.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/rich/tree.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/six.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/tenacity/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/tenacity/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/tenacity/__pycache__/_asyncio.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/tenacity/__pycache__/_utils.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/tenacity/__pycache__/after.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/tenacity/__pycache__/before.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/tenacity/__pycache__/before_sleep.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/tenacity/__pycache__/nap.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/tenacity/__pycache__/retry.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/tenacity/__pycache__/stop.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/tenacity/__pycache__/tornadoweb.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/tenacity/__pycache__/wait.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/tenacity/_asyncio.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/tenacity/_utils.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/tenacity/after.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/tenacity/before.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/tenacity/before_sleep.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/tenacity/nap.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/tenacity/py.typed create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/tenacity/retry.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/tenacity/stop.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/tenacity/tornadoweb.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/tenacity/wait.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/tomli/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/tomli/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/tomli/__pycache__/_parser.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/tomli/__pycache__/_re.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/tomli/__pycache__/_types.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/tomli/_parser.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/tomli/_re.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/tomli/_types.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/tomli/py.typed create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/truststore/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/truststore/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/truststore/__pycache__/_api.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/truststore/__pycache__/_macos.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/truststore/__pycache__/_openssl.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/truststore/__pycache__/_ssl_constants.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/truststore/__pycache__/_windows.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/truststore/_api.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/truststore/_macos.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/truststore/_openssl.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/truststore/_ssl_constants.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/truststore/_windows.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/truststore/py.typed create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/typing_extensions.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/urllib3/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/urllib3/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/urllib3/__pycache__/_collections.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/urllib3/__pycache__/_version.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/urllib3/__pycache__/connection.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/urllib3/__pycache__/connectionpool.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/urllib3/__pycache__/exceptions.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/urllib3/__pycache__/fields.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/urllib3/__pycache__/filepost.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/urllib3/__pycache__/poolmanager.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/urllib3/__pycache__/request.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/urllib3/__pycache__/response.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/urllib3/_collections.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/urllib3/_version.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/urllib3/connection.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/urllib3/connectionpool.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/urllib3/contrib/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/urllib3/contrib/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/urllib3/contrib/__pycache__/_appengine_environ.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/urllib3/contrib/__pycache__/appengine.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/urllib3/contrib/__pycache__/ntlmpool.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/urllib3/contrib/__pycache__/pyopenssl.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/urllib3/contrib/__pycache__/securetransport.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/urllib3/contrib/__pycache__/socks.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/urllib3/contrib/_appengine_environ.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/urllib3/contrib/_securetransport/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/urllib3/contrib/_securetransport/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/urllib3/contrib/_securetransport/__pycache__/bindings.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/urllib3/contrib/_securetransport/__pycache__/low_level.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/urllib3/contrib/_securetransport/bindings.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/urllib3/contrib/_securetransport/low_level.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/urllib3/contrib/appengine.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/urllib3/contrib/ntlmpool.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/urllib3/contrib/pyopenssl.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/urllib3/contrib/securetransport.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/urllib3/contrib/socks.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/urllib3/exceptions.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/urllib3/fields.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/urllib3/filepost.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/urllib3/packages/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/urllib3/packages/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/urllib3/packages/__pycache__/six.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/urllib3/packages/backports/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/urllib3/packages/backports/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/urllib3/packages/backports/__pycache__/makefile.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/urllib3/packages/backports/__pycache__/weakref_finalize.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/urllib3/packages/backports/makefile.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/urllib3/packages/backports/weakref_finalize.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/urllib3/packages/six.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/urllib3/poolmanager.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/urllib3/request.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/urllib3/response.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/urllib3/util/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/urllib3/util/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/urllib3/util/__pycache__/connection.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/urllib3/util/__pycache__/proxy.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/urllib3/util/__pycache__/queue.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/urllib3/util/__pycache__/request.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/urllib3/util/__pycache__/response.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/urllib3/util/__pycache__/retry.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/urllib3/util/__pycache__/ssl_.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/urllib3/util/__pycache__/ssl_match_hostname.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/urllib3/util/__pycache__/ssltransport.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/urllib3/util/__pycache__/timeout.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/urllib3/util/__pycache__/url.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/urllib3/util/__pycache__/wait.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/urllib3/util/connection.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/urllib3/util/proxy.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/urllib3/util/queue.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/urllib3/util/request.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/urllib3/util/response.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/urllib3/util/retry.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/urllib3/util/ssl_.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/urllib3/util/ssl_match_hostname.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/urllib3/util/ssltransport.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/urllib3/util/timeout.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/urllib3/util/url.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/urllib3/util/wait.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/vendor.txt create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/webencodings/__init__.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/webencodings/__pycache__/__init__.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/webencodings/__pycache__/labels.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/webencodings/__pycache__/mklabels.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/webencodings/__pycache__/tests.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/webencodings/__pycache__/x_user_defined.cpython-312.pyc create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/webencodings/labels.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/webencodings/mklabels.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/webencodings/tests.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/_vendor/webencodings/x_user_defined.py create mode 100644 .venv/lib/python3.12/site-packages/~ip/py.typed create mode 120000 .venv/lib64 create mode 100644 .venv/pyvenv.cfg delete mode 100644 README.md create mode 100644 data/analysis/course_only.json create mode 100644 data/analysis/none.json create mode 100644 data/analysis/remaining.json delete mode 100644 data/catalog_2025_fall.json create mode 100644 data/communities.json create mode 100644 data/courses.json create mode 100644 data/courses_parsed.json create mode 100644 data/courses_test.json create mode 100644 data/graph.json create mode 100644 data/graph_reduced.json create mode 100644 data/parsed/course_only_parsed.json create mode 100644 data/parsed/course_only_unparsed.json create mode 100644 data/positions.json create mode 100644 data/positions_drl.json create mode 100644 data/positions_smacof.json create mode 100644 data/prereq_analysis.json delete mode 100644 index.html delete mode 100644 package-lock.json delete mode 100644 package.json create mode 100644 requirements.txt create mode 100644 schema/course.schema.json create mode 100644 scripts/__pycache__/parse_course_prereqs.cpython-312.pyc create mode 100644 scripts/analyze_prereqs.py create mode 100644 scripts/build_final_parsed.py create mode 100644 scripts/build_graph_assets.py create mode 100644 scripts/fetch_uiuc_courses.py create mode 100644 scripts/parse_course_prereqs.py create mode 100644 scripts/reduce_and_cluster.py delete mode 100644 scripts/scrape.js create mode 100644 scripts/validate_courses.py delete mode 100644 src/App.tsx delete mode 100644 src/components/Graph.tsx delete mode 100644 src/global.d.ts delete mode 100644 src/hooks/useCourseData.ts delete mode 100644 src/main.tsx delete mode 100644 tsconfig.json delete mode 100644 vite.config.ts create mode 100644 web/index.html create mode 100644 web/index.js create mode 100644 web/styles.css diff --git a/.github/workflows/build-dataset.yml b/.github/workflows/build-dataset.yml new file mode 100644 index 0000000..103000b --- /dev/null +++ b/.github/workflows/build-dataset.yml @@ -0,0 +1,70 @@ +name: Build UIUC Course Dataset + +on: + workflow_dispatch: + inputs: + year: + description: "Catalog year (e.g., 2024)" + required: false + default: "2024" + term: + description: "Term (fall|spring|summer|winter)" + required: false + default: "fall" + +jobs: + build: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: '3.12' + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install -r requirements.txt + + - name: Fetch catalog -> data/courses.json + run: | + mkdir -p data + python scripts/fetch_uiuc_courses.py --year "${{ github.event.inputs.year }}" --term "${{ github.event.inputs.term }}" --output data/courses.json + + - name: Analyze prerequisites + run: | + python scripts/analyze_prereqs.py data/courses.json --outdir data/analysis + + - name: Parse course-only prerequisites + run: | + mkdir -p data/parsed + python scripts/parse_course_prereqs.py data/analysis/course_only.json --output data/parsed/course_only_parsed.json --unparsed-output data/parsed/course_only_unparsed.json + + - name: Build final parsed dataset + run: | + python scripts/build_final_parsed.py data/courses.json --output data/courses_parsed.json + + - name: Upload raw courses + uses: actions/upload-artifact@v4 + with: + name: courses-json + path: data/courses.json + + - name: Upload analysis + uses: actions/upload-artifact@v4 + with: + name: analysis-json + path: data/analysis + + - name: Upload parsed outputs + uses: actions/upload-artifact@v4 + with: + name: parsed-json + path: | + data/parsed + data/courses_parsed.json + + diff --git a/.github/workflows/update-and-deploy.yml b/.github/workflows/update-and-deploy.yml deleted file mode 100644 index fb034a6..0000000 --- a/.github/workflows/update-and-deploy.yml +++ /dev/null @@ -1,34 +0,0 @@ -name: grab-data-build-deploy - -on: - schedule: - - cron: "0 6 * * *" - push: - branches: [main] - -jobs: - build: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - uses: actions/setup-node@v4 - with: - node-version: 18 - - run: npm ci - - name: Scrape UIUC catalog - env: - NODE_OPTIONS: --dns-result-order=ipv4first - run: npm run scrape - - name: Commit data - run: | - git config user.name "gh-bot" - git config user.email "bot@users.noreply.github.com" - git add data || true - git diff --cached --quiet || git commit -m "auto: update catalog" - - run: git push - - run: npm run build - - name: Deploy - uses: peaceiris/actions-gh-pages@v4 - with: - github_token: ${{ secrets.GITHUB_TOKEN }} - publish_dir: ./dist diff --git a/.gitignore b/.gitignore deleted file mode 100644 index ce3886a..0000000 --- a/.gitignore +++ /dev/null @@ -1,7 +0,0 @@ -node_modules -/dist - -# Logs -npm-debug.log* -yarn-debug.log* -yarn-error.log* diff --git a/.venv/bin/Activate.ps1 b/.venv/bin/Activate.ps1 new file mode 100644 index 0000000..b49d77b --- /dev/null +++ b/.venv/bin/Activate.ps1 @@ -0,0 +1,247 @@ +<# +.Synopsis +Activate a Python virtual environment for the current PowerShell session. + +.Description +Pushes the python executable for a virtual environment to the front of the +$Env:PATH environment variable and sets the prompt to signify that you are +in a Python virtual environment. Makes use of the command line switches as +well as the `pyvenv.cfg` file values present in the virtual environment. + +.Parameter VenvDir +Path to the directory that contains the virtual environment to activate. The +default value for this is the parent of the directory that the Activate.ps1 +script is located within. + +.Parameter Prompt +The prompt prefix to display when this virtual environment is activated. By +default, this prompt is the name of the virtual environment folder (VenvDir) +surrounded by parentheses and followed by a single space (ie. '(.venv) '). + +.Example +Activate.ps1 +Activates the Python virtual environment that contains the Activate.ps1 script. + +.Example +Activate.ps1 -Verbose +Activates the Python virtual environment that contains the Activate.ps1 script, +and shows extra information about the activation as it executes. + +.Example +Activate.ps1 -VenvDir C:\Users\MyUser\Common\.venv +Activates the Python virtual environment located in the specified location. + +.Example +Activate.ps1 -Prompt "MyPython" +Activates the Python virtual environment that contains the Activate.ps1 script, +and prefixes the current prompt with the specified string (surrounded in +parentheses) while the virtual environment is active. + +.Notes +On Windows, it may be required to enable this Activate.ps1 script by setting the +execution policy for the user. You can do this by issuing the following PowerShell +command: + +PS C:\> Set-ExecutionPolicy -ExecutionPolicy RemoteSigned -Scope CurrentUser + +For more information on Execution Policies: +https://go.microsoft.com/fwlink/?LinkID=135170 + +#> +Param( + [Parameter(Mandatory = $false)] + [String] + $VenvDir, + [Parameter(Mandatory = $false)] + [String] + $Prompt +) + +<# Function declarations --------------------------------------------------- #> + +<# +.Synopsis +Remove all shell session elements added by the Activate script, including the +addition of the virtual environment's Python executable from the beginning of +the PATH variable. + +.Parameter NonDestructive +If present, do not remove this function from the global namespace for the +session. + +#> +function global:deactivate ([switch]$NonDestructive) { + # Revert to original values + + # The prior prompt: + if (Test-Path -Path Function:_OLD_VIRTUAL_PROMPT) { + Copy-Item -Path Function:_OLD_VIRTUAL_PROMPT -Destination Function:prompt + Remove-Item -Path Function:_OLD_VIRTUAL_PROMPT + } + + # The prior PYTHONHOME: + if (Test-Path -Path Env:_OLD_VIRTUAL_PYTHONHOME) { + Copy-Item -Path Env:_OLD_VIRTUAL_PYTHONHOME -Destination Env:PYTHONHOME + Remove-Item -Path Env:_OLD_VIRTUAL_PYTHONHOME + } + + # The prior PATH: + if (Test-Path -Path Env:_OLD_VIRTUAL_PATH) { + Copy-Item -Path Env:_OLD_VIRTUAL_PATH -Destination Env:PATH + Remove-Item -Path Env:_OLD_VIRTUAL_PATH + } + + # Just remove the VIRTUAL_ENV altogether: + if (Test-Path -Path Env:VIRTUAL_ENV) { + Remove-Item -Path env:VIRTUAL_ENV + } + + # Just remove VIRTUAL_ENV_PROMPT altogether. + if (Test-Path -Path Env:VIRTUAL_ENV_PROMPT) { + Remove-Item -Path env:VIRTUAL_ENV_PROMPT + } + + # Just remove the _PYTHON_VENV_PROMPT_PREFIX altogether: + if (Get-Variable -Name "_PYTHON_VENV_PROMPT_PREFIX" -ErrorAction SilentlyContinue) { + Remove-Variable -Name _PYTHON_VENV_PROMPT_PREFIX -Scope Global -Force + } + + # Leave deactivate function in the global namespace if requested: + if (-not $NonDestructive) { + Remove-Item -Path function:deactivate + } +} + +<# +.Description +Get-PyVenvConfig parses the values from the pyvenv.cfg file located in the +given folder, and returns them in a map. + +For each line in the pyvenv.cfg file, if that line can be parsed into exactly +two strings separated by `=` (with any amount of whitespace surrounding the =) +then it is considered a `key = value` line. The left hand string is the key, +the right hand is the value. + +If the value starts with a `'` or a `"` then the first and last character is +stripped from the value before being captured. + +.Parameter ConfigDir +Path to the directory that contains the `pyvenv.cfg` file. +#> +function Get-PyVenvConfig( + [String] + $ConfigDir +) { + Write-Verbose "Given ConfigDir=$ConfigDir, obtain values in pyvenv.cfg" + + # Ensure the file exists, and issue a warning if it doesn't (but still allow the function to continue). + $pyvenvConfigPath = Join-Path -Resolve -Path $ConfigDir -ChildPath 'pyvenv.cfg' -ErrorAction Continue + + # An empty map will be returned if no config file is found. + $pyvenvConfig = @{ } + + if ($pyvenvConfigPath) { + + Write-Verbose "File exists, parse `key = value` lines" + $pyvenvConfigContent = Get-Content -Path $pyvenvConfigPath + + $pyvenvConfigContent | ForEach-Object { + $keyval = $PSItem -split "\s*=\s*", 2 + if ($keyval[0] -and $keyval[1]) { + $val = $keyval[1] + + # Remove extraneous quotations around a string value. + if ("'""".Contains($val.Substring(0, 1))) { + $val = $val.Substring(1, $val.Length - 2) + } + + $pyvenvConfig[$keyval[0]] = $val + Write-Verbose "Adding Key: '$($keyval[0])'='$val'" + } + } + } + return $pyvenvConfig +} + + +<# Begin Activate script --------------------------------------------------- #> + +# Determine the containing directory of this script +$VenvExecPath = Split-Path -Parent $MyInvocation.MyCommand.Definition +$VenvExecDir = Get-Item -Path $VenvExecPath + +Write-Verbose "Activation script is located in path: '$VenvExecPath'" +Write-Verbose "VenvExecDir Fullname: '$($VenvExecDir.FullName)" +Write-Verbose "VenvExecDir Name: '$($VenvExecDir.Name)" + +# Set values required in priority: CmdLine, ConfigFile, Default +# First, get the location of the virtual environment, it might not be +# VenvExecDir if specified on the command line. +if ($VenvDir) { + Write-Verbose "VenvDir given as parameter, using '$VenvDir' to determine values" +} +else { + Write-Verbose "VenvDir not given as a parameter, using parent directory name as VenvDir." + $VenvDir = $VenvExecDir.Parent.FullName.TrimEnd("\\/") + Write-Verbose "VenvDir=$VenvDir" +} + +# Next, read the `pyvenv.cfg` file to determine any required value such +# as `prompt`. +$pyvenvCfg = Get-PyVenvConfig -ConfigDir $VenvDir + +# Next, set the prompt from the command line, or the config file, or +# just use the name of the virtual environment folder. +if ($Prompt) { + Write-Verbose "Prompt specified as argument, using '$Prompt'" +} +else { + Write-Verbose "Prompt not specified as argument to script, checking pyvenv.cfg value" + if ($pyvenvCfg -and $pyvenvCfg['prompt']) { + Write-Verbose " Setting based on value in pyvenv.cfg='$($pyvenvCfg['prompt'])'" + $Prompt = $pyvenvCfg['prompt']; + } + else { + Write-Verbose " Setting prompt based on parent's directory's name. (Is the directory name passed to venv module when creating the virtual environment)" + Write-Verbose " Got leaf-name of $VenvDir='$(Split-Path -Path $venvDir -Leaf)'" + $Prompt = Split-Path -Path $venvDir -Leaf + } +} + +Write-Verbose "Prompt = '$Prompt'" +Write-Verbose "VenvDir='$VenvDir'" + +# Deactivate any currently active virtual environment, but leave the +# deactivate function in place. +deactivate -nondestructive + +# Now set the environment variable VIRTUAL_ENV, used by many tools to determine +# that there is an activated venv. +$env:VIRTUAL_ENV = $VenvDir + +if (-not $Env:VIRTUAL_ENV_DISABLE_PROMPT) { + + Write-Verbose "Setting prompt to '$Prompt'" + + # Set the prompt to include the env name + # Make sure _OLD_VIRTUAL_PROMPT is global + function global:_OLD_VIRTUAL_PROMPT { "" } + Copy-Item -Path function:prompt -Destination function:_OLD_VIRTUAL_PROMPT + New-Variable -Name _PYTHON_VENV_PROMPT_PREFIX -Description "Python virtual environment prompt prefix" -Scope Global -Option ReadOnly -Visibility Public -Value $Prompt + + function global:prompt { + Write-Host -NoNewline -ForegroundColor Green "($_PYTHON_VENV_PROMPT_PREFIX) " + _OLD_VIRTUAL_PROMPT + } + $env:VIRTUAL_ENV_PROMPT = $Prompt +} + +# Clear PYTHONHOME +if (Test-Path -Path Env:PYTHONHOME) { + Copy-Item -Path Env:PYTHONHOME -Destination Env:_OLD_VIRTUAL_PYTHONHOME + Remove-Item -Path Env:PYTHONHOME +} + +# Add the venv to the PATH +Copy-Item -Path Env:PATH -Destination Env:_OLD_VIRTUAL_PATH +$Env:PATH = "$VenvExecDir$([System.IO.Path]::PathSeparator)$Env:PATH" diff --git a/.venv/bin/activate b/.venv/bin/activate new file mode 100644 index 0000000..386b875 --- /dev/null +++ b/.venv/bin/activate @@ -0,0 +1,70 @@ +# This file must be used with "source bin/activate" *from bash* +# You cannot run it directly + +deactivate () { + # reset old environment variables + if [ -n "${_OLD_VIRTUAL_PATH:-}" ] ; then + PATH="${_OLD_VIRTUAL_PATH:-}" + export PATH + unset _OLD_VIRTUAL_PATH + fi + if [ -n "${_OLD_VIRTUAL_PYTHONHOME:-}" ] ; then + PYTHONHOME="${_OLD_VIRTUAL_PYTHONHOME:-}" + export PYTHONHOME + unset _OLD_VIRTUAL_PYTHONHOME + fi + + # Call hash to forget past commands. Without forgetting + # past commands the $PATH changes we made may not be respected + hash -r 2> /dev/null + + if [ -n "${_OLD_VIRTUAL_PS1:-}" ] ; then + PS1="${_OLD_VIRTUAL_PS1:-}" + export PS1 + unset _OLD_VIRTUAL_PS1 + fi + + unset VIRTUAL_ENV + unset VIRTUAL_ENV_PROMPT + if [ ! "${1:-}" = "nondestructive" ] ; then + # Self destruct! + unset -f deactivate + fi +} + +# unset irrelevant variables +deactivate nondestructive + +# on Windows, a path can contain colons and backslashes and has to be converted: +if [ "${OSTYPE:-}" = "cygwin" ] || [ "${OSTYPE:-}" = "msys" ] ; then + # transform D:\path\to\venv to /d/path/to/venv on MSYS + # and to /cygdrive/d/path/to/venv on Cygwin + export VIRTUAL_ENV=$(cygpath "/home/blackhao/uiuc-course-graph/.venv") +else + # use the path as-is + export VIRTUAL_ENV="/home/blackhao/uiuc-course-graph/.venv" +fi + +_OLD_VIRTUAL_PATH="$PATH" +PATH="$VIRTUAL_ENV/bin:$PATH" +export PATH + +# unset PYTHONHOME if set +# this will fail if PYTHONHOME is set to the empty string (which is bad anyway) +# could use `if (set -u; : $PYTHONHOME) ;` in bash +if [ -n "${PYTHONHOME:-}" ] ; then + _OLD_VIRTUAL_PYTHONHOME="${PYTHONHOME:-}" + unset PYTHONHOME +fi + +if [ -z "${VIRTUAL_ENV_DISABLE_PROMPT:-}" ] ; then + _OLD_VIRTUAL_PS1="${PS1:-}" + PS1="(.venv) ${PS1:-}" + export PS1 + VIRTUAL_ENV_PROMPT="(.venv) " + export VIRTUAL_ENV_PROMPT +fi + +# Call hash to forget past commands. Without forgetting +# past commands the $PATH changes we made may not be respected +hash -r 2> /dev/null diff --git a/.venv/bin/activate.csh b/.venv/bin/activate.csh new file mode 100644 index 0000000..873b40d --- /dev/null +++ b/.venv/bin/activate.csh @@ -0,0 +1,27 @@ +# This file must be used with "source bin/activate.csh" *from csh*. +# You cannot run it directly. + +# Created by Davide Di Blasi . +# Ported to Python 3.3 venv by Andrew Svetlov + +alias deactivate 'test $?_OLD_VIRTUAL_PATH != 0 && setenv PATH "$_OLD_VIRTUAL_PATH" && unset _OLD_VIRTUAL_PATH; rehash; test $?_OLD_VIRTUAL_PROMPT != 0 && set prompt="$_OLD_VIRTUAL_PROMPT" && unset _OLD_VIRTUAL_PROMPT; unsetenv VIRTUAL_ENV; unsetenv VIRTUAL_ENV_PROMPT; test "\!:*" != "nondestructive" && unalias deactivate' + +# Unset irrelevant variables. +deactivate nondestructive + +setenv VIRTUAL_ENV "/home/blackhao/uiuc-course-graph/.venv" + +set _OLD_VIRTUAL_PATH="$PATH" +setenv PATH "$VIRTUAL_ENV/bin:$PATH" + + +set _OLD_VIRTUAL_PROMPT="$prompt" + +if (! "$?VIRTUAL_ENV_DISABLE_PROMPT") then + set prompt = "(.venv) $prompt" + setenv VIRTUAL_ENV_PROMPT "(.venv) " +endif + +alias pydoc python -m pydoc + +rehash diff --git a/.venv/bin/activate.fish b/.venv/bin/activate.fish new file mode 100644 index 0000000..49348e8 --- /dev/null +++ b/.venv/bin/activate.fish @@ -0,0 +1,69 @@ +# This file must be used with "source /bin/activate.fish" *from fish* +# (https://fishshell.com/). You cannot run it directly. + +function deactivate -d "Exit virtual environment and return to normal shell environment" + # reset old environment variables + if test -n "$_OLD_VIRTUAL_PATH" + set -gx PATH $_OLD_VIRTUAL_PATH + set -e _OLD_VIRTUAL_PATH + end + if test -n "$_OLD_VIRTUAL_PYTHONHOME" + set -gx PYTHONHOME $_OLD_VIRTUAL_PYTHONHOME + set -e _OLD_VIRTUAL_PYTHONHOME + end + + if test -n "$_OLD_FISH_PROMPT_OVERRIDE" + set -e _OLD_FISH_PROMPT_OVERRIDE + # prevents error when using nested fish instances (Issue #93858) + if functions -q _old_fish_prompt + functions -e fish_prompt + functions -c _old_fish_prompt fish_prompt + functions -e _old_fish_prompt + end + end + + set -e VIRTUAL_ENV + set -e VIRTUAL_ENV_PROMPT + if test "$argv[1]" != "nondestructive" + # Self-destruct! + functions -e deactivate + end +end + +# Unset irrelevant variables. +deactivate nondestructive + +set -gx VIRTUAL_ENV "/home/blackhao/uiuc-course-graph/.venv" + +set -gx _OLD_VIRTUAL_PATH $PATH +set -gx PATH "$VIRTUAL_ENV/bin" $PATH + +# Unset PYTHONHOME if set. +if set -q PYTHONHOME + set -gx _OLD_VIRTUAL_PYTHONHOME $PYTHONHOME + set -e PYTHONHOME +end + +if test -z "$VIRTUAL_ENV_DISABLE_PROMPT" + # fish uses a function instead of an env var to generate the prompt. + + # Save the current fish_prompt function as the function _old_fish_prompt. + functions -c fish_prompt _old_fish_prompt + + # With the original prompt function renamed, we can override with our own. + function fish_prompt + # Save the return status of the last command. + set -l old_status $status + + # Output the venv prompt; color taken from the blue of the Python logo. + printf "%s%s%s" (set_color 4B8BBE) "(.venv) " (set_color normal) + + # Restore the return status of the previous command. + echo "exit $old_status" | . + # Output the original/"old" prompt. + _old_fish_prompt + end + + set -gx _OLD_FISH_PROMPT_OVERRIDE "$VIRTUAL_ENV" + set -gx VIRTUAL_ENV_PROMPT "(.venv) " +end diff --git a/.venv/bin/f2py b/.venv/bin/f2py new file mode 100755 index 0000000..0ad2401 --- /dev/null +++ b/.venv/bin/f2py @@ -0,0 +1,7 @@ +#!/home/blackhao/uiuc-course-graph/.venv/bin/python +import sys +from numpy.f2py.f2py2e import main +if __name__ == '__main__': + if sys.argv[0].endswith('.exe'): + sys.argv[0] = sys.argv[0][:-4] + sys.exit(main()) diff --git a/.venv/bin/jsonschema b/.venv/bin/jsonschema new file mode 100755 index 0000000..a97b823 --- /dev/null +++ b/.venv/bin/jsonschema @@ -0,0 +1,7 @@ +#!/home/blackhao/uiuc-course-graph/.venv/bin/python +import sys +from jsonschema.cli import main +if __name__ == '__main__': + if sys.argv[0].endswith('.exe'): + sys.argv[0] = sys.argv[0][:-4] + sys.exit(main()) diff --git a/.venv/bin/normalizer b/.venv/bin/normalizer new file mode 100755 index 0000000..ac40f3f --- /dev/null +++ b/.venv/bin/normalizer @@ -0,0 +1,7 @@ +#!/home/blackhao/uiuc-course-graph/.venv/bin/python +import sys +from charset_normalizer.cli import cli_detect +if __name__ == '__main__': + if sys.argv[0].endswith('.exe'): + sys.argv[0] = sys.argv[0][:-4] + sys.exit(cli_detect()) diff --git a/.venv/bin/numpy-config b/.venv/bin/numpy-config new file mode 100755 index 0000000..def41d9 --- /dev/null +++ b/.venv/bin/numpy-config @@ -0,0 +1,7 @@ +#!/home/blackhao/uiuc-course-graph/.venv/bin/python +import sys +from numpy._configtool import main +if __name__ == '__main__': + if sys.argv[0].endswith('.exe'): + sys.argv[0] = sys.argv[0][:-4] + sys.exit(main()) diff --git a/.venv/bin/python b/.venv/bin/python new file mode 120000 index 0000000..b8a0adb --- /dev/null +++ b/.venv/bin/python @@ -0,0 +1 @@ +python3 \ No newline at end of file diff --git a/.venv/bin/python3 b/.venv/bin/python3 new file mode 120000 index 0000000..4d3e2b4 --- /dev/null +++ b/.venv/bin/python3 @@ -0,0 +1 @@ +/home/blackhao/miniconda3/bin/python3 \ No newline at end of file diff --git a/.venv/bin/python3.12 b/.venv/bin/python3.12 new file mode 120000 index 0000000..b8a0adb --- /dev/null +++ b/.venv/bin/python3.12 @@ -0,0 +1 @@ +python3 \ No newline at end of file diff --git a/.venv/lib/python3.12/site-packages/__pycache__/typing_extensions.cpython-312.pyc b/.venv/lib/python3.12/site-packages/__pycache__/typing_extensions.cpython-312.pyc new file mode 100644 index 0000000..e46cd99 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/__pycache__/typing_extensions.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/attr/__init__.py b/.venv/lib/python3.12/site-packages/attr/__init__.py new file mode 100644 index 0000000..5c6e065 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/attr/__init__.py @@ -0,0 +1,104 @@ +# SPDX-License-Identifier: MIT + +""" +Classes Without Boilerplate +""" + +from functools import partial +from typing import Callable, Literal, Protocol + +from . import converters, exceptions, filters, setters, validators +from ._cmp import cmp_using +from ._config import get_run_validators, set_run_validators +from ._funcs import asdict, assoc, astuple, has, resolve_types +from ._make import ( + NOTHING, + Attribute, + Converter, + Factory, + _Nothing, + attrib, + attrs, + evolve, + fields, + fields_dict, + make_class, + validate, +) +from ._next_gen import define, field, frozen, mutable +from ._version_info import VersionInfo + + +s = attributes = attrs +ib = attr = attrib +dataclass = partial(attrs, auto_attribs=True) # happy Easter ;) + + +class AttrsInstance(Protocol): + pass + + +NothingType = Literal[_Nothing.NOTHING] + +__all__ = [ + "NOTHING", + "Attribute", + "AttrsInstance", + "Converter", + "Factory", + "NothingType", + "asdict", + "assoc", + "astuple", + "attr", + "attrib", + "attributes", + "attrs", + "cmp_using", + "converters", + "define", + "evolve", + "exceptions", + "field", + "fields", + "fields_dict", + "filters", + "frozen", + "get_run_validators", + "has", + "ib", + "make_class", + "mutable", + "resolve_types", + "s", + "set_run_validators", + "setters", + "validate", + "validators", +] + + +def _make_getattr(mod_name: str) -> Callable: + """ + Create a metadata proxy for packaging information that uses *mod_name* in + its warnings and errors. + """ + + def __getattr__(name: str) -> str: + if name not in ("__version__", "__version_info__"): + msg = f"module {mod_name} has no attribute {name}" + raise AttributeError(msg) + + from importlib.metadata import metadata + + meta = metadata("attrs") + + if name == "__version_info__": + return VersionInfo._from_version_string(meta["version"]) + + return meta["version"] + + return __getattr__ + + +__getattr__ = _make_getattr(__name__) diff --git a/.venv/lib/python3.12/site-packages/attr/__init__.pyi b/.venv/lib/python3.12/site-packages/attr/__init__.pyi new file mode 100644 index 0000000..133e501 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/attr/__init__.pyi @@ -0,0 +1,389 @@ +import enum +import sys + +from typing import ( + Any, + Callable, + Generic, + Literal, + Mapping, + Protocol, + Sequence, + TypeVar, + overload, +) + +# `import X as X` is required to make these public +from . import converters as converters +from . import exceptions as exceptions +from . import filters as filters +from . import setters as setters +from . import validators as validators +from ._cmp import cmp_using as cmp_using +from ._typing_compat import AttrsInstance_ +from ._version_info import VersionInfo +from attrs import ( + define as define, + field as field, + mutable as mutable, + frozen as frozen, + _EqOrderType, + _ValidatorType, + _ConverterType, + _ReprArgType, + _OnSetAttrType, + _OnSetAttrArgType, + _FieldTransformer, + _ValidatorArgType, +) + +if sys.version_info >= (3, 10): + from typing import TypeGuard, TypeAlias +else: + from typing_extensions import TypeGuard, TypeAlias + +if sys.version_info >= (3, 11): + from typing import dataclass_transform +else: + from typing_extensions import dataclass_transform + +__version__: str +__version_info__: VersionInfo +__title__: str +__description__: str +__url__: str +__uri__: str +__author__: str +__email__: str +__license__: str +__copyright__: str + +_T = TypeVar("_T") +_C = TypeVar("_C", bound=type) + +_FilterType = Callable[["Attribute[_T]", _T], bool] + +# We subclass this here to keep the protocol's qualified name clean. +class AttrsInstance(AttrsInstance_, Protocol): + pass + +_A = TypeVar("_A", bound=type[AttrsInstance]) + +class _Nothing(enum.Enum): + NOTHING = enum.auto() + +NOTHING = _Nothing.NOTHING +NothingType: TypeAlias = Literal[_Nothing.NOTHING] + +# NOTE: Factory lies about its return type to make this possible: +# `x: List[int] # = Factory(list)` +# Work around mypy issue #4554 in the common case by using an overload. + +@overload +def Factory(factory: Callable[[], _T]) -> _T: ... +@overload +def Factory( + factory: Callable[[Any], _T], + takes_self: Literal[True], +) -> _T: ... +@overload +def Factory( + factory: Callable[[], _T], + takes_self: Literal[False], +) -> _T: ... + +In = TypeVar("In") +Out = TypeVar("Out") + +class Converter(Generic[In, Out]): + @overload + def __init__(self, converter: Callable[[In], Out]) -> None: ... + @overload + def __init__( + self, + converter: Callable[[In, AttrsInstance, Attribute], Out], + *, + takes_self: Literal[True], + takes_field: Literal[True], + ) -> None: ... + @overload + def __init__( + self, + converter: Callable[[In, Attribute], Out], + *, + takes_field: Literal[True], + ) -> None: ... + @overload + def __init__( + self, + converter: Callable[[In, AttrsInstance], Out], + *, + takes_self: Literal[True], + ) -> None: ... + +class Attribute(Generic[_T]): + name: str + default: _T | None + validator: _ValidatorType[_T] | None + repr: _ReprArgType + cmp: _EqOrderType + eq: _EqOrderType + order: _EqOrderType + hash: bool | None + init: bool + converter: Converter | None + metadata: dict[Any, Any] + type: type[_T] | None + kw_only: bool + on_setattr: _OnSetAttrType + alias: str | None + + def evolve(self, **changes: Any) -> "Attribute[Any]": ... + +# NOTE: We had several choices for the annotation to use for type arg: +# 1) Type[_T] +# - Pros: Handles simple cases correctly +# - Cons: Might produce less informative errors in the case of conflicting +# TypeVars e.g. `attr.ib(default='bad', type=int)` +# 2) Callable[..., _T] +# - Pros: Better error messages than #1 for conflicting TypeVars +# - Cons: Terrible error messages for validator checks. +# e.g. attr.ib(type=int, validator=validate_str) +# -> error: Cannot infer function type argument +# 3) type (and do all of the work in the mypy plugin) +# - Pros: Simple here, and we could customize the plugin with our own errors. +# - Cons: Would need to write mypy plugin code to handle all the cases. +# We chose option #1. + +# `attr` lies about its return type to make the following possible: +# attr() -> Any +# attr(8) -> int +# attr(validator=) -> Whatever the callable expects. +# This makes this type of assignments possible: +# x: int = attr(8) +# +# This form catches explicit None or no default but with no other arguments +# returns Any. +@overload +def attrib( + default: None = ..., + validator: None = ..., + repr: _ReprArgType = ..., + cmp: _EqOrderType | None = ..., + hash: bool | None = ..., + init: bool = ..., + metadata: Mapping[Any, Any] | None = ..., + type: None = ..., + converter: None = ..., + factory: None = ..., + kw_only: bool = ..., + eq: _EqOrderType | None = ..., + order: _EqOrderType | None = ..., + on_setattr: _OnSetAttrArgType | None = ..., + alias: str | None = ..., +) -> Any: ... + +# This form catches an explicit None or no default and infers the type from the +# other arguments. +@overload +def attrib( + default: None = ..., + validator: _ValidatorArgType[_T] | None = ..., + repr: _ReprArgType = ..., + cmp: _EqOrderType | None = ..., + hash: bool | None = ..., + init: bool = ..., + metadata: Mapping[Any, Any] | None = ..., + type: type[_T] | None = ..., + converter: _ConverterType + | list[_ConverterType] + | tuple[_ConverterType] + | None = ..., + factory: Callable[[], _T] | None = ..., + kw_only: bool = ..., + eq: _EqOrderType | None = ..., + order: _EqOrderType | None = ..., + on_setattr: _OnSetAttrArgType | None = ..., + alias: str | None = ..., +) -> _T: ... + +# This form catches an explicit default argument. +@overload +def attrib( + default: _T, + validator: _ValidatorArgType[_T] | None = ..., + repr: _ReprArgType = ..., + cmp: _EqOrderType | None = ..., + hash: bool | None = ..., + init: bool = ..., + metadata: Mapping[Any, Any] | None = ..., + type: type[_T] | None = ..., + converter: _ConverterType + | list[_ConverterType] + | tuple[_ConverterType] + | None = ..., + factory: Callable[[], _T] | None = ..., + kw_only: bool = ..., + eq: _EqOrderType | None = ..., + order: _EqOrderType | None = ..., + on_setattr: _OnSetAttrArgType | None = ..., + alias: str | None = ..., +) -> _T: ... + +# This form covers type=non-Type: e.g. forward references (str), Any +@overload +def attrib( + default: _T | None = ..., + validator: _ValidatorArgType[_T] | None = ..., + repr: _ReprArgType = ..., + cmp: _EqOrderType | None = ..., + hash: bool | None = ..., + init: bool = ..., + metadata: Mapping[Any, Any] | None = ..., + type: object = ..., + converter: _ConverterType + | list[_ConverterType] + | tuple[_ConverterType] + | None = ..., + factory: Callable[[], _T] | None = ..., + kw_only: bool = ..., + eq: _EqOrderType | None = ..., + order: _EqOrderType | None = ..., + on_setattr: _OnSetAttrArgType | None = ..., + alias: str | None = ..., +) -> Any: ... +@overload +@dataclass_transform(order_default=True, field_specifiers=(attrib, field)) +def attrs( + maybe_cls: _C, + these: dict[str, Any] | None = ..., + repr_ns: str | None = ..., + repr: bool = ..., + cmp: _EqOrderType | None = ..., + hash: bool | None = ..., + init: bool = ..., + slots: bool = ..., + frozen: bool = ..., + weakref_slot: bool = ..., + str: bool = ..., + auto_attribs: bool = ..., + kw_only: bool = ..., + cache_hash: bool = ..., + auto_exc: bool = ..., + eq: _EqOrderType | None = ..., + order: _EqOrderType | None = ..., + auto_detect: bool = ..., + collect_by_mro: bool = ..., + getstate_setstate: bool | None = ..., + on_setattr: _OnSetAttrArgType | None = ..., + field_transformer: _FieldTransformer | None = ..., + match_args: bool = ..., + unsafe_hash: bool | None = ..., +) -> _C: ... +@overload +@dataclass_transform(order_default=True, field_specifiers=(attrib, field)) +def attrs( + maybe_cls: None = ..., + these: dict[str, Any] | None = ..., + repr_ns: str | None = ..., + repr: bool = ..., + cmp: _EqOrderType | None = ..., + hash: bool | None = ..., + init: bool = ..., + slots: bool = ..., + frozen: bool = ..., + weakref_slot: bool = ..., + str: bool = ..., + auto_attribs: bool = ..., + kw_only: bool = ..., + cache_hash: bool = ..., + auto_exc: bool = ..., + eq: _EqOrderType | None = ..., + order: _EqOrderType | None = ..., + auto_detect: bool = ..., + collect_by_mro: bool = ..., + getstate_setstate: bool | None = ..., + on_setattr: _OnSetAttrArgType | None = ..., + field_transformer: _FieldTransformer | None = ..., + match_args: bool = ..., + unsafe_hash: bool | None = ..., +) -> Callable[[_C], _C]: ... +def fields(cls: type[AttrsInstance]) -> Any: ... +def fields_dict(cls: type[AttrsInstance]) -> dict[str, Attribute[Any]]: ... +def validate(inst: AttrsInstance) -> None: ... +def resolve_types( + cls: _A, + globalns: dict[str, Any] | None = ..., + localns: dict[str, Any] | None = ..., + attribs: list[Attribute[Any]] | None = ..., + include_extras: bool = ..., +) -> _A: ... + +# TODO: add support for returning a proper attrs class from the mypy plugin +# we use Any instead of _CountingAttr so that e.g. `make_class('Foo', +# [attr.ib()])` is valid +def make_class( + name: str, + attrs: list[str] | tuple[str, ...] | dict[str, Any], + bases: tuple[type, ...] = ..., + class_body: dict[str, Any] | None = ..., + repr_ns: str | None = ..., + repr: bool = ..., + cmp: _EqOrderType | None = ..., + hash: bool | None = ..., + init: bool = ..., + slots: bool = ..., + frozen: bool = ..., + weakref_slot: bool = ..., + str: bool = ..., + auto_attribs: bool = ..., + kw_only: bool = ..., + cache_hash: bool = ..., + auto_exc: bool = ..., + eq: _EqOrderType | None = ..., + order: _EqOrderType | None = ..., + collect_by_mro: bool = ..., + on_setattr: _OnSetAttrArgType | None = ..., + field_transformer: _FieldTransformer | None = ..., +) -> type: ... + +# _funcs -- + +# TODO: add support for returning TypedDict from the mypy plugin +# FIXME: asdict/astuple do not honor their factory args. Waiting on one of +# these: +# https://github.com/python/mypy/issues/4236 +# https://github.com/python/typing/issues/253 +# XXX: remember to fix attrs.asdict/astuple too! +def asdict( + inst: AttrsInstance, + recurse: bool = ..., + filter: _FilterType[Any] | None = ..., + dict_factory: type[Mapping[Any, Any]] = ..., + retain_collection_types: bool = ..., + value_serializer: Callable[[type, Attribute[Any], Any], Any] | None = ..., + tuple_keys: bool | None = ..., +) -> dict[str, Any]: ... + +# TODO: add support for returning NamedTuple from the mypy plugin +def astuple( + inst: AttrsInstance, + recurse: bool = ..., + filter: _FilterType[Any] | None = ..., + tuple_factory: type[Sequence[Any]] = ..., + retain_collection_types: bool = ..., +) -> tuple[Any, ...]: ... +def has(cls: type) -> TypeGuard[type[AttrsInstance]]: ... +def assoc(inst: _T, **changes: Any) -> _T: ... +def evolve(inst: _T, **changes: Any) -> _T: ... + +# _config -- + +def set_run_validators(run: bool) -> None: ... +def get_run_validators() -> bool: ... + +# aliases -- + +s = attributes = attrs +ib = attr = attrib +dataclass = attrs # Technically, partial(attrs, auto_attribs=True) ;) diff --git a/.venv/lib/python3.12/site-packages/attr/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/attr/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..326e7c8 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/attr/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/attr/__pycache__/_cmp.cpython-312.pyc b/.venv/lib/python3.12/site-packages/attr/__pycache__/_cmp.cpython-312.pyc new file mode 100644 index 0000000..157011c Binary files /dev/null and b/.venv/lib/python3.12/site-packages/attr/__pycache__/_cmp.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/attr/__pycache__/_compat.cpython-312.pyc b/.venv/lib/python3.12/site-packages/attr/__pycache__/_compat.cpython-312.pyc new file mode 100644 index 0000000..c9663a1 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/attr/__pycache__/_compat.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/attr/__pycache__/_config.cpython-312.pyc b/.venv/lib/python3.12/site-packages/attr/__pycache__/_config.cpython-312.pyc new file mode 100644 index 0000000..0f00a70 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/attr/__pycache__/_config.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/attr/__pycache__/_funcs.cpython-312.pyc b/.venv/lib/python3.12/site-packages/attr/__pycache__/_funcs.cpython-312.pyc new file mode 100644 index 0000000..8d3bd3f Binary files /dev/null and b/.venv/lib/python3.12/site-packages/attr/__pycache__/_funcs.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/attr/__pycache__/_make.cpython-312.pyc b/.venv/lib/python3.12/site-packages/attr/__pycache__/_make.cpython-312.pyc new file mode 100644 index 0000000..2c5ec9e Binary files /dev/null and b/.venv/lib/python3.12/site-packages/attr/__pycache__/_make.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/attr/__pycache__/_next_gen.cpython-312.pyc b/.venv/lib/python3.12/site-packages/attr/__pycache__/_next_gen.cpython-312.pyc new file mode 100644 index 0000000..3e782bc Binary files /dev/null and b/.venv/lib/python3.12/site-packages/attr/__pycache__/_next_gen.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/attr/__pycache__/_version_info.cpython-312.pyc b/.venv/lib/python3.12/site-packages/attr/__pycache__/_version_info.cpython-312.pyc new file mode 100644 index 0000000..1af4992 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/attr/__pycache__/_version_info.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/attr/__pycache__/converters.cpython-312.pyc b/.venv/lib/python3.12/site-packages/attr/__pycache__/converters.cpython-312.pyc new file mode 100644 index 0000000..5be24ac Binary files /dev/null and b/.venv/lib/python3.12/site-packages/attr/__pycache__/converters.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/attr/__pycache__/exceptions.cpython-312.pyc b/.venv/lib/python3.12/site-packages/attr/__pycache__/exceptions.cpython-312.pyc new file mode 100644 index 0000000..168a17a Binary files /dev/null and b/.venv/lib/python3.12/site-packages/attr/__pycache__/exceptions.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/attr/__pycache__/filters.cpython-312.pyc b/.venv/lib/python3.12/site-packages/attr/__pycache__/filters.cpython-312.pyc new file mode 100644 index 0000000..5a8f424 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/attr/__pycache__/filters.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/attr/__pycache__/setters.cpython-312.pyc b/.venv/lib/python3.12/site-packages/attr/__pycache__/setters.cpython-312.pyc new file mode 100644 index 0000000..330a208 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/attr/__pycache__/setters.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/attr/__pycache__/validators.cpython-312.pyc b/.venv/lib/python3.12/site-packages/attr/__pycache__/validators.cpython-312.pyc new file mode 100644 index 0000000..e5580d6 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/attr/__pycache__/validators.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/attr/_cmp.py b/.venv/lib/python3.12/site-packages/attr/_cmp.py new file mode 100644 index 0000000..09bab49 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/attr/_cmp.py @@ -0,0 +1,160 @@ +# SPDX-License-Identifier: MIT + + +import functools +import types + +from ._make import __ne__ + + +_operation_names = {"eq": "==", "lt": "<", "le": "<=", "gt": ">", "ge": ">="} + + +def cmp_using( + eq=None, + lt=None, + le=None, + gt=None, + ge=None, + require_same_type=True, + class_name="Comparable", +): + """ + Create a class that can be passed into `attrs.field`'s ``eq``, ``order``, + and ``cmp`` arguments to customize field comparison. + + The resulting class will have a full set of ordering methods if at least + one of ``{lt, le, gt, ge}`` and ``eq`` are provided. + + Args: + eq (typing.Callable | None): + Callable used to evaluate equality of two objects. + + lt (typing.Callable | None): + Callable used to evaluate whether one object is less than another + object. + + le (typing.Callable | None): + Callable used to evaluate whether one object is less than or equal + to another object. + + gt (typing.Callable | None): + Callable used to evaluate whether one object is greater than + another object. + + ge (typing.Callable | None): + Callable used to evaluate whether one object is greater than or + equal to another object. + + require_same_type (bool): + When `True`, equality and ordering methods will return + `NotImplemented` if objects are not of the same type. + + class_name (str | None): Name of class. Defaults to "Comparable". + + See `comparison` for more details. + + .. versionadded:: 21.1.0 + """ + + body = { + "__slots__": ["value"], + "__init__": _make_init(), + "_requirements": [], + "_is_comparable_to": _is_comparable_to, + } + + # Add operations. + num_order_functions = 0 + has_eq_function = False + + if eq is not None: + has_eq_function = True + body["__eq__"] = _make_operator("eq", eq) + body["__ne__"] = __ne__ + + if lt is not None: + num_order_functions += 1 + body["__lt__"] = _make_operator("lt", lt) + + if le is not None: + num_order_functions += 1 + body["__le__"] = _make_operator("le", le) + + if gt is not None: + num_order_functions += 1 + body["__gt__"] = _make_operator("gt", gt) + + if ge is not None: + num_order_functions += 1 + body["__ge__"] = _make_operator("ge", ge) + + type_ = types.new_class( + class_name, (object,), {}, lambda ns: ns.update(body) + ) + + # Add same type requirement. + if require_same_type: + type_._requirements.append(_check_same_type) + + # Add total ordering if at least one operation was defined. + if 0 < num_order_functions < 4: + if not has_eq_function: + # functools.total_ordering requires __eq__ to be defined, + # so raise early error here to keep a nice stack. + msg = "eq must be define is order to complete ordering from lt, le, gt, ge." + raise ValueError(msg) + type_ = functools.total_ordering(type_) + + return type_ + + +def _make_init(): + """ + Create __init__ method. + """ + + def __init__(self, value): + """ + Initialize object with *value*. + """ + self.value = value + + return __init__ + + +def _make_operator(name, func): + """ + Create operator method. + """ + + def method(self, other): + if not self._is_comparable_to(other): + return NotImplemented + + result = func(self.value, other.value) + if result is NotImplemented: + return NotImplemented + + return result + + method.__name__ = f"__{name}__" + method.__doc__ = ( + f"Return a {_operation_names[name]} b. Computed by attrs." + ) + + return method + + +def _is_comparable_to(self, other): + """ + Check whether `other` is comparable to `self`. + """ + return all(func(self, other) for func in self._requirements) + + +def _check_same_type(self, other): + """ + Return True if *self* and *other* are of the same type, False otherwise. + """ + return other.value.__class__ is self.value.__class__ diff --git a/.venv/lib/python3.12/site-packages/attr/_cmp.pyi b/.venv/lib/python3.12/site-packages/attr/_cmp.pyi new file mode 100644 index 0000000..cc7893b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/attr/_cmp.pyi @@ -0,0 +1,13 @@ +from typing import Any, Callable + +_CompareWithType = Callable[[Any, Any], bool] + +def cmp_using( + eq: _CompareWithType | None = ..., + lt: _CompareWithType | None = ..., + le: _CompareWithType | None = ..., + gt: _CompareWithType | None = ..., + ge: _CompareWithType | None = ..., + require_same_type: bool = ..., + class_name: str = ..., +) -> type: ... diff --git a/.venv/lib/python3.12/site-packages/attr/_compat.py b/.venv/lib/python3.12/site-packages/attr/_compat.py new file mode 100644 index 0000000..22fcd78 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/attr/_compat.py @@ -0,0 +1,94 @@ +# SPDX-License-Identifier: MIT + +import inspect +import platform +import sys +import threading + +from collections.abc import Mapping, Sequence # noqa: F401 +from typing import _GenericAlias + + +PYPY = platform.python_implementation() == "PyPy" +PY_3_9_PLUS = sys.version_info[:2] >= (3, 9) +PY_3_10_PLUS = sys.version_info[:2] >= (3, 10) +PY_3_11_PLUS = sys.version_info[:2] >= (3, 11) +PY_3_12_PLUS = sys.version_info[:2] >= (3, 12) +PY_3_13_PLUS = sys.version_info[:2] >= (3, 13) +PY_3_14_PLUS = sys.version_info[:2] >= (3, 14) + + +if PY_3_14_PLUS: # pragma: no cover + import annotationlib + + _get_annotations = annotationlib.get_annotations + +else: + + def _get_annotations(cls): + """ + Get annotations for *cls*. + """ + return cls.__dict__.get("__annotations__", {}) + + +class _AnnotationExtractor: + """ + Extract type annotations from a callable, returning None whenever there + is none. + """ + + __slots__ = ["sig"] + + def __init__(self, callable): + try: + self.sig = inspect.signature(callable) + except (ValueError, TypeError): # inspect failed + self.sig = None + + def get_first_param_type(self): + """ + Return the type annotation of the first argument if it's not empty. + """ + if not self.sig: + return None + + params = list(self.sig.parameters.values()) + if params and params[0].annotation is not inspect.Parameter.empty: + return params[0].annotation + + return None + + def get_return_type(self): + """ + Return the return type if it's not empty. + """ + if ( + self.sig + and self.sig.return_annotation is not inspect.Signature.empty + ): + return self.sig.return_annotation + + return None + + +# Thread-local global to track attrs instances which are already being repr'd. +# This is needed because there is no other (thread-safe) way to pass info +# about the instances that are already being repr'd through the call stack +# in order to ensure we don't perform infinite recursion. +# +# For instance, if an instance contains a dict which contains that instance, +# we need to know that we're already repr'ing the outside instance from within +# the dict's repr() call. +# +# This lives here rather than in _make.py so that the functions in _make.py +# don't have a direct reference to the thread-local in their globals dict. +# If they have such a reference, it breaks cloudpickle. +repr_context = threading.local() + + +def get_generic_base(cl): + """If this is a generic class (A[str]), return the generic base for it.""" + if cl.__class__ is _GenericAlias: + return cl.__origin__ + return None diff --git a/.venv/lib/python3.12/site-packages/attr/_config.py b/.venv/lib/python3.12/site-packages/attr/_config.py new file mode 100644 index 0000000..4b25772 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/attr/_config.py @@ -0,0 +1,31 @@ +# SPDX-License-Identifier: MIT + +__all__ = ["get_run_validators", "set_run_validators"] + +_run_validators = True + + +def set_run_validators(run): + """ + Set whether or not validators are run. By default, they are run. + + .. deprecated:: 21.3.0 It will not be removed, but it also will not be + moved to new ``attrs`` namespace. Use `attrs.validators.set_disabled()` + instead. + """ + if not isinstance(run, bool): + msg = "'run' must be bool." + raise TypeError(msg) + global _run_validators + _run_validators = run + + +def get_run_validators(): + """ + Return whether or not validators are run. + + .. deprecated:: 21.3.0 It will not be removed, but it also will not be + moved to new ``attrs`` namespace. Use `attrs.validators.get_disabled()` + instead. + """ + return _run_validators diff --git a/.venv/lib/python3.12/site-packages/attr/_funcs.py b/.venv/lib/python3.12/site-packages/attr/_funcs.py new file mode 100644 index 0000000..c39fb8a --- /dev/null +++ b/.venv/lib/python3.12/site-packages/attr/_funcs.py @@ -0,0 +1,468 @@ +# SPDX-License-Identifier: MIT + + +import copy + +from ._compat import PY_3_9_PLUS, get_generic_base +from ._make import _OBJ_SETATTR, NOTHING, fields +from .exceptions import AttrsAttributeNotFoundError + + +def asdict( + inst, + recurse=True, + filter=None, + dict_factory=dict, + retain_collection_types=False, + value_serializer=None, +): + """ + Return the *attrs* attribute values of *inst* as a dict. + + Optionally recurse into other *attrs*-decorated classes. + + Args: + inst: Instance of an *attrs*-decorated class. + + recurse (bool): Recurse into classes that are also *attrs*-decorated. + + filter (~typing.Callable): + A callable whose return code determines whether an attribute or + element is included (`True`) or dropped (`False`). Is called with + the `attrs.Attribute` as the first argument and the value as the + second argument. + + dict_factory (~typing.Callable): + A callable to produce dictionaries from. For example, to produce + ordered dictionaries instead of normal Python dictionaries, pass in + ``collections.OrderedDict``. + + retain_collection_types (bool): + Do not convert to `list` when encountering an attribute whose type + is `tuple` or `set`. Only meaningful if *recurse* is `True`. + + value_serializer (typing.Callable | None): + A hook that is called for every attribute or dict key/value. It + receives the current instance, field and value and must return the + (updated) value. The hook is run *after* the optional *filter* has + been applied. + + Returns: + Return type of *dict_factory*. + + Raises: + attrs.exceptions.NotAnAttrsClassError: + If *cls* is not an *attrs* class. + + .. versionadded:: 16.0.0 *dict_factory* + .. versionadded:: 16.1.0 *retain_collection_types* + .. versionadded:: 20.3.0 *value_serializer* + .. versionadded:: 21.3.0 + If a dict has a collection for a key, it is serialized as a tuple. + """ + attrs = fields(inst.__class__) + rv = dict_factory() + for a in attrs: + v = getattr(inst, a.name) + if filter is not None and not filter(a, v): + continue + + if value_serializer is not None: + v = value_serializer(inst, a, v) + + if recurse is True: + if has(v.__class__): + rv[a.name] = asdict( + v, + recurse=True, + filter=filter, + dict_factory=dict_factory, + retain_collection_types=retain_collection_types, + value_serializer=value_serializer, + ) + elif isinstance(v, (tuple, list, set, frozenset)): + cf = v.__class__ if retain_collection_types is True else list + items = [ + _asdict_anything( + i, + is_key=False, + filter=filter, + dict_factory=dict_factory, + retain_collection_types=retain_collection_types, + value_serializer=value_serializer, + ) + for i in v + ] + try: + rv[a.name] = cf(items) + except TypeError: + if not issubclass(cf, tuple): + raise + # Workaround for TypeError: cf.__new__() missing 1 required + # positional argument (which appears, for a namedturle) + rv[a.name] = cf(*items) + elif isinstance(v, dict): + df = dict_factory + rv[a.name] = df( + ( + _asdict_anything( + kk, + is_key=True, + filter=filter, + dict_factory=df, + retain_collection_types=retain_collection_types, + value_serializer=value_serializer, + ), + _asdict_anything( + vv, + is_key=False, + filter=filter, + dict_factory=df, + retain_collection_types=retain_collection_types, + value_serializer=value_serializer, + ), + ) + for kk, vv in v.items() + ) + else: + rv[a.name] = v + else: + rv[a.name] = v + return rv + + +def _asdict_anything( + val, + is_key, + filter, + dict_factory, + retain_collection_types, + value_serializer, +): + """ + ``asdict`` only works on attrs instances, this works on anything. + """ + if getattr(val.__class__, "__attrs_attrs__", None) is not None: + # Attrs class. + rv = asdict( + val, + recurse=True, + filter=filter, + dict_factory=dict_factory, + retain_collection_types=retain_collection_types, + value_serializer=value_serializer, + ) + elif isinstance(val, (tuple, list, set, frozenset)): + if retain_collection_types is True: + cf = val.__class__ + elif is_key: + cf = tuple + else: + cf = list + + rv = cf( + [ + _asdict_anything( + i, + is_key=False, + filter=filter, + dict_factory=dict_factory, + retain_collection_types=retain_collection_types, + value_serializer=value_serializer, + ) + for i in val + ] + ) + elif isinstance(val, dict): + df = dict_factory + rv = df( + ( + _asdict_anything( + kk, + is_key=True, + filter=filter, + dict_factory=df, + retain_collection_types=retain_collection_types, + value_serializer=value_serializer, + ), + _asdict_anything( + vv, + is_key=False, + filter=filter, + dict_factory=df, + retain_collection_types=retain_collection_types, + value_serializer=value_serializer, + ), + ) + for kk, vv in val.items() + ) + else: + rv = val + if value_serializer is not None: + rv = value_serializer(None, None, rv) + + return rv + + +def astuple( + inst, + recurse=True, + filter=None, + tuple_factory=tuple, + retain_collection_types=False, +): + """ + Return the *attrs* attribute values of *inst* as a tuple. + + Optionally recurse into other *attrs*-decorated classes. + + Args: + inst: Instance of an *attrs*-decorated class. + + recurse (bool): + Recurse into classes that are also *attrs*-decorated. + + filter (~typing.Callable): + A callable whose return code determines whether an attribute or + element is included (`True`) or dropped (`False`). Is called with + the `attrs.Attribute` as the first argument and the value as the + second argument. + + tuple_factory (~typing.Callable): + A callable to produce tuples from. For example, to produce lists + instead of tuples. + + retain_collection_types (bool): + Do not convert to `list` or `dict` when encountering an attribute + which type is `tuple`, `dict` or `set`. Only meaningful if + *recurse* is `True`. + + Returns: + Return type of *tuple_factory* + + Raises: + attrs.exceptions.NotAnAttrsClassError: + If *cls* is not an *attrs* class. + + .. versionadded:: 16.2.0 + """ + attrs = fields(inst.__class__) + rv = [] + retain = retain_collection_types # Very long. :/ + for a in attrs: + v = getattr(inst, a.name) + if filter is not None and not filter(a, v): + continue + if recurse is True: + if has(v.__class__): + rv.append( + astuple( + v, + recurse=True, + filter=filter, + tuple_factory=tuple_factory, + retain_collection_types=retain, + ) + ) + elif isinstance(v, (tuple, list, set, frozenset)): + cf = v.__class__ if retain is True else list + items = [ + ( + astuple( + j, + recurse=True, + filter=filter, + tuple_factory=tuple_factory, + retain_collection_types=retain, + ) + if has(j.__class__) + else j + ) + for j in v + ] + try: + rv.append(cf(items)) + except TypeError: + if not issubclass(cf, tuple): + raise + # Workaround for TypeError: cf.__new__() missing 1 required + # positional argument (which appears, for a namedturle) + rv.append(cf(*items)) + elif isinstance(v, dict): + df = v.__class__ if retain is True else dict + rv.append( + df( + ( + ( + astuple( + kk, + tuple_factory=tuple_factory, + retain_collection_types=retain, + ) + if has(kk.__class__) + else kk + ), + ( + astuple( + vv, + tuple_factory=tuple_factory, + retain_collection_types=retain, + ) + if has(vv.__class__) + else vv + ), + ) + for kk, vv in v.items() + ) + ) + else: + rv.append(v) + else: + rv.append(v) + + return rv if tuple_factory is list else tuple_factory(rv) + + +def has(cls): + """ + Check whether *cls* is a class with *attrs* attributes. + + Args: + cls (type): Class to introspect. + + Raises: + TypeError: If *cls* is not a class. + + Returns: + bool: + """ + attrs = getattr(cls, "__attrs_attrs__", None) + if attrs is not None: + return True + + # No attrs, maybe it's a specialized generic (A[str])? + generic_base = get_generic_base(cls) + if generic_base is not None: + generic_attrs = getattr(generic_base, "__attrs_attrs__", None) + if generic_attrs is not None: + # Stick it on here for speed next time. + cls.__attrs_attrs__ = generic_attrs + return generic_attrs is not None + return False + + +def assoc(inst, **changes): + """ + Copy *inst* and apply *changes*. + + This is different from `evolve` that applies the changes to the arguments + that create the new instance. + + `evolve`'s behavior is preferable, but there are `edge cases`_ where it + doesn't work. Therefore `assoc` is deprecated, but will not be removed. + + .. _`edge cases`: https://github.com/python-attrs/attrs/issues/251 + + Args: + inst: Instance of a class with *attrs* attributes. + + changes: Keyword changes in the new copy. + + Returns: + A copy of inst with *changes* incorporated. + + Raises: + attrs.exceptions.AttrsAttributeNotFoundError: + If *attr_name* couldn't be found on *cls*. + + attrs.exceptions.NotAnAttrsClassError: + If *cls* is not an *attrs* class. + + .. deprecated:: 17.1.0 + Use `attrs.evolve` instead if you can. This function will not be + removed du to the slightly different approach compared to + `attrs.evolve`, though. + """ + new = copy.copy(inst) + attrs = fields(inst.__class__) + for k, v in changes.items(): + a = getattr(attrs, k, NOTHING) + if a is NOTHING: + msg = f"{k} is not an attrs attribute on {new.__class__}." + raise AttrsAttributeNotFoundError(msg) + _OBJ_SETATTR(new, k, v) + return new + + +def resolve_types( + cls, globalns=None, localns=None, attribs=None, include_extras=True +): + """ + Resolve any strings and forward annotations in type annotations. + + This is only required if you need concrete types in :class:`Attribute`'s + *type* field. In other words, you don't need to resolve your types if you + only use them for static type checking. + + With no arguments, names will be looked up in the module in which the class + was created. If this is not what you want, for example, if the name only + exists inside a method, you may pass *globalns* or *localns* to specify + other dictionaries in which to look up these names. See the docs of + `typing.get_type_hints` for more details. + + Args: + cls (type): Class to resolve. + + globalns (dict | None): Dictionary containing global variables. + + localns (dict | None): Dictionary containing local variables. + + attribs (list | None): + List of attribs for the given class. This is necessary when calling + from inside a ``field_transformer`` since *cls* is not an *attrs* + class yet. + + include_extras (bool): + Resolve more accurately, if possible. Pass ``include_extras`` to + ``typing.get_hints``, if supported by the typing module. On + supported Python versions (3.9+), this resolves the types more + accurately. + + Raises: + TypeError: If *cls* is not a class. + + attrs.exceptions.NotAnAttrsClassError: + If *cls* is not an *attrs* class and you didn't pass any attribs. + + NameError: If types cannot be resolved because of missing variables. + + Returns: + *cls* so you can use this function also as a class decorator. Please + note that you have to apply it **after** `attrs.define`. That means the + decorator has to come in the line **before** `attrs.define`. + + .. versionadded:: 20.1.0 + .. versionadded:: 21.1.0 *attribs* + .. versionadded:: 23.1.0 *include_extras* + """ + # Since calling get_type_hints is expensive we cache whether we've + # done it already. + if getattr(cls, "__attrs_types_resolved__", None) != cls: + import typing + + kwargs = {"globalns": globalns, "localns": localns} + + if PY_3_9_PLUS: + kwargs["include_extras"] = include_extras + + hints = typing.get_type_hints(cls, **kwargs) + for field in fields(cls) if attribs is None else attribs: + if field.name in hints: + # Since fields have been frozen we must work around it. + _OBJ_SETATTR(field, "type", hints[field.name]) + # We store the class we resolved so that subclasses know they haven't + # been resolved. + cls.__attrs_types_resolved__ = cls + + # Return the class so you can use it as a decorator too. + return cls diff --git a/.venv/lib/python3.12/site-packages/attr/_make.py b/.venv/lib/python3.12/site-packages/attr/_make.py new file mode 100644 index 0000000..e84d979 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/attr/_make.py @@ -0,0 +1,3123 @@ +# SPDX-License-Identifier: MIT + +from __future__ import annotations + +import abc +import contextlib +import copy +import enum +import inspect +import itertools +import linecache +import sys +import types +import unicodedata + +from collections.abc import Callable, Mapping +from functools import cached_property +from typing import Any, NamedTuple, TypeVar + +# We need to import _compat itself in addition to the _compat members to avoid +# having the thread-local in the globals here. +from . import _compat, _config, setters +from ._compat import ( + PY_3_10_PLUS, + PY_3_11_PLUS, + PY_3_13_PLUS, + _AnnotationExtractor, + _get_annotations, + get_generic_base, +) +from .exceptions import ( + DefaultAlreadySetError, + FrozenInstanceError, + NotAnAttrsClassError, + UnannotatedAttributeError, +) + + +# This is used at least twice, so cache it here. +_OBJ_SETATTR = object.__setattr__ +_INIT_FACTORY_PAT = "__attr_factory_%s" +_CLASSVAR_PREFIXES = ( + "typing.ClassVar", + "t.ClassVar", + "ClassVar", + "typing_extensions.ClassVar", +) +# we don't use a double-underscore prefix because that triggers +# name mangling when trying to create a slot for the field +# (when slots=True) +_HASH_CACHE_FIELD = "_attrs_cached_hash" + +_EMPTY_METADATA_SINGLETON = types.MappingProxyType({}) + +# Unique object for unequivocal getattr() defaults. +_SENTINEL = object() + +_DEFAULT_ON_SETATTR = setters.pipe(setters.convert, setters.validate) + + +class _Nothing(enum.Enum): + """ + Sentinel to indicate the lack of a value when `None` is ambiguous. + + If extending attrs, you can use ``typing.Literal[NOTHING]`` to show + that a value may be ``NOTHING``. + + .. versionchanged:: 21.1.0 ``bool(NOTHING)`` is now False. + .. versionchanged:: 22.2.0 ``NOTHING`` is now an ``enum.Enum`` variant. + """ + + NOTHING = enum.auto() + + def __repr__(self): + return "NOTHING" + + def __bool__(self): + return False + + +NOTHING = _Nothing.NOTHING +""" +Sentinel to indicate the lack of a value when `None` is ambiguous. + +When using in 3rd party code, use `attrs.NothingType` for type annotations. +""" + + +class _CacheHashWrapper(int): + """ + An integer subclass that pickles / copies as None + + This is used for non-slots classes with ``cache_hash=True``, to avoid + serializing a potentially (even likely) invalid hash value. Since `None` + is the default value for uncalculated hashes, whenever this is copied, + the copy's value for the hash should automatically reset. + + See GH #613 for more details. + """ + + def __reduce__(self, _none_constructor=type(None), _args=()): # noqa: B008 + return _none_constructor, _args + + +def attrib( + default=NOTHING, + validator=None, + repr=True, + cmp=None, + hash=None, + init=True, + metadata=None, + type=None, + converter=None, + factory=None, + kw_only=False, + eq=None, + order=None, + on_setattr=None, + alias=None, +): + """ + Create a new field / attribute on a class. + + Identical to `attrs.field`, except it's not keyword-only. + + Consider using `attrs.field` in new code (``attr.ib`` will *never* go away, + though). + + .. warning:: + + Does **nothing** unless the class is also decorated with + `attr.s` (or similar)! + + + .. versionadded:: 15.2.0 *convert* + .. versionadded:: 16.3.0 *metadata* + .. versionchanged:: 17.1.0 *validator* can be a ``list`` now. + .. versionchanged:: 17.1.0 + *hash* is `None` and therefore mirrors *eq* by default. + .. versionadded:: 17.3.0 *type* + .. deprecated:: 17.4.0 *convert* + .. versionadded:: 17.4.0 + *converter* as a replacement for the deprecated *convert* to achieve + consistency with other noun-based arguments. + .. versionadded:: 18.1.0 + ``factory=f`` is syntactic sugar for ``default=attr.Factory(f)``. + .. versionadded:: 18.2.0 *kw_only* + .. versionchanged:: 19.2.0 *convert* keyword argument removed. + .. versionchanged:: 19.2.0 *repr* also accepts a custom callable. + .. deprecated:: 19.2.0 *cmp* Removal on or after 2021-06-01. + .. versionadded:: 19.2.0 *eq* and *order* + .. versionadded:: 20.1.0 *on_setattr* + .. versionchanged:: 20.3.0 *kw_only* backported to Python 2 + .. versionchanged:: 21.1.0 + *eq*, *order*, and *cmp* also accept a custom callable + .. versionchanged:: 21.1.0 *cmp* undeprecated + .. versionadded:: 22.2.0 *alias* + """ + eq, eq_key, order, order_key = _determine_attrib_eq_order( + cmp, eq, order, True + ) + + if hash is not None and hash is not True and hash is not False: + msg = "Invalid value for hash. Must be True, False, or None." + raise TypeError(msg) + + if factory is not None: + if default is not NOTHING: + msg = ( + "The `default` and `factory` arguments are mutually exclusive." + ) + raise ValueError(msg) + if not callable(factory): + msg = "The `factory` argument must be a callable." + raise ValueError(msg) + default = Factory(factory) + + if metadata is None: + metadata = {} + + # Apply syntactic sugar by auto-wrapping. + if isinstance(on_setattr, (list, tuple)): + on_setattr = setters.pipe(*on_setattr) + + if validator and isinstance(validator, (list, tuple)): + validator = and_(*validator) + + if converter and isinstance(converter, (list, tuple)): + converter = pipe(*converter) + + return _CountingAttr( + default=default, + validator=validator, + repr=repr, + cmp=None, + hash=hash, + init=init, + converter=converter, + metadata=metadata, + type=type, + kw_only=kw_only, + eq=eq, + eq_key=eq_key, + order=order, + order_key=order_key, + on_setattr=on_setattr, + alias=alias, + ) + + +def _compile_and_eval( + script: str, + globs: dict[str, Any] | None, + locs: Mapping[str, object] | None = None, + filename: str = "", +) -> None: + """ + Evaluate the script with the given global (globs) and local (locs) + variables. + """ + bytecode = compile(script, filename, "exec") + eval(bytecode, globs, locs) + + +def _linecache_and_compile( + script: str, + filename: str, + globs: dict[str, Any] | None, + locals: Mapping[str, object] | None = None, +) -> dict[str, Any]: + """ + Cache the script with _linecache_, compile it and return the _locals_. + """ + + locs = {} if locals is None else locals + + # In order of debuggers like PDB being able to step through the code, + # we add a fake linecache entry. + count = 1 + base_filename = filename + while True: + linecache_tuple = ( + len(script), + None, + script.splitlines(True), + filename, + ) + old_val = linecache.cache.setdefault(filename, linecache_tuple) + if old_val == linecache_tuple: + break + + filename = f"{base_filename[:-1]}-{count}>" + count += 1 + + _compile_and_eval(script, globs, locs, filename) + + return locs + + +def _make_attr_tuple_class(cls_name: str, attr_names: list[str]) -> type: + """ + Create a tuple subclass to hold `Attribute`s for an `attrs` class. + + The subclass is a bare tuple with properties for names. + + class MyClassAttributes(tuple): + __slots__ = () + x = property(itemgetter(0)) + """ + attr_class_name = f"{cls_name}Attributes" + body = {} + for i, attr_name in enumerate(attr_names): + + def getter(self, i=i): + return self[i] + + body[attr_name] = property(getter) + return type(attr_class_name, (tuple,), body) + + +# Tuple class for extracted attributes from a class definition. +# `base_attrs` is a subset of `attrs`. +class _Attributes(NamedTuple): + attrs: type + base_attrs: list[Attribute] + base_attrs_map: dict[str, type] + + +def _is_class_var(annot): + """ + Check whether *annot* is a typing.ClassVar. + + The string comparison hack is used to avoid evaluating all string + annotations which would put attrs-based classes at a performance + disadvantage compared to plain old classes. + """ + annot = str(annot) + + # Annotation can be quoted. + if annot.startswith(("'", '"')) and annot.endswith(("'", '"')): + annot = annot[1:-1] + + return annot.startswith(_CLASSVAR_PREFIXES) + + +def _has_own_attribute(cls, attrib_name): + """ + Check whether *cls* defines *attrib_name* (and doesn't just inherit it). + """ + return attrib_name in cls.__dict__ + + +def _collect_base_attrs( + cls, taken_attr_names +) -> tuple[list[Attribute], dict[str, type]]: + """ + Collect attr.ibs from base classes of *cls*, except *taken_attr_names*. + """ + base_attrs = [] + base_attr_map = {} # A dictionary of base attrs to their classes. + + # Traverse the MRO and collect attributes. + for base_cls in reversed(cls.__mro__[1:-1]): + for a in getattr(base_cls, "__attrs_attrs__", []): + if a.inherited or a.name in taken_attr_names: + continue + + a = a.evolve(inherited=True) # noqa: PLW2901 + base_attrs.append(a) + base_attr_map[a.name] = base_cls + + # For each name, only keep the freshest definition i.e. the furthest at the + # back. base_attr_map is fine because it gets overwritten with every new + # instance. + filtered = [] + seen = set() + for a in reversed(base_attrs): + if a.name in seen: + continue + filtered.insert(0, a) + seen.add(a.name) + + return filtered, base_attr_map + + +def _collect_base_attrs_broken(cls, taken_attr_names): + """ + Collect attr.ibs from base classes of *cls*, except *taken_attr_names*. + + N.B. *taken_attr_names* will be mutated. + + Adhere to the old incorrect behavior. + + Notably it collects from the front and considers inherited attributes which + leads to the buggy behavior reported in #428. + """ + base_attrs = [] + base_attr_map = {} # A dictionary of base attrs to their classes. + + # Traverse the MRO and collect attributes. + for base_cls in cls.__mro__[1:-1]: + for a in getattr(base_cls, "__attrs_attrs__", []): + if a.name in taken_attr_names: + continue + + a = a.evolve(inherited=True) # noqa: PLW2901 + taken_attr_names.add(a.name) + base_attrs.append(a) + base_attr_map[a.name] = base_cls + + return base_attrs, base_attr_map + + +def _transform_attrs( + cls, these, auto_attribs, kw_only, collect_by_mro, field_transformer +) -> _Attributes: + """ + Transform all `_CountingAttr`s on a class into `Attribute`s. + + If *these* is passed, use that and don't look for them on the class. + + If *collect_by_mro* is True, collect them in the correct MRO order, + otherwise use the old -- incorrect -- order. See #428. + + Return an `_Attributes`. + """ + cd = cls.__dict__ + anns = _get_annotations(cls) + + if these is not None: + ca_list = list(these.items()) + elif auto_attribs is True: + ca_names = { + name + for name, attr in cd.items() + if attr.__class__ is _CountingAttr + } + ca_list = [] + annot_names = set() + for attr_name, type in anns.items(): + if _is_class_var(type): + continue + annot_names.add(attr_name) + a = cd.get(attr_name, NOTHING) + + if a.__class__ is not _CountingAttr: + a = attrib(a) + ca_list.append((attr_name, a)) + + unannotated = ca_names - annot_names + if unannotated: + raise UnannotatedAttributeError( + "The following `attr.ib`s lack a type annotation: " + + ", ".join( + sorted(unannotated, key=lambda n: cd.get(n).counter) + ) + + "." + ) + else: + ca_list = sorted( + ( + (name, attr) + for name, attr in cd.items() + if attr.__class__ is _CountingAttr + ), + key=lambda e: e[1].counter, + ) + + fca = Attribute.from_counting_attr + own_attrs = [ + fca(attr_name, ca, anns.get(attr_name)) for attr_name, ca in ca_list + ] + + if collect_by_mro: + base_attrs, base_attr_map = _collect_base_attrs( + cls, {a.name for a in own_attrs} + ) + else: + base_attrs, base_attr_map = _collect_base_attrs_broken( + cls, {a.name for a in own_attrs} + ) + + if kw_only: + own_attrs = [a.evolve(kw_only=True) for a in own_attrs] + base_attrs = [a.evolve(kw_only=True) for a in base_attrs] + + attrs = base_attrs + own_attrs + + if field_transformer is not None: + attrs = tuple(field_transformer(cls, attrs)) + + # Check attr order after executing the field_transformer. + # Mandatory vs non-mandatory attr order only matters when they are part of + # the __init__ signature and when they aren't kw_only (which are moved to + # the end and can be mandatory or non-mandatory in any order, as they will + # be specified as keyword args anyway). Check the order of those attrs: + had_default = False + for a in (a for a in attrs if a.init is not False and a.kw_only is False): + if had_default is True and a.default is NOTHING: + msg = f"No mandatory attributes allowed after an attribute with a default value or factory. Attribute in question: {a!r}" + raise ValueError(msg) + + if had_default is False and a.default is not NOTHING: + had_default = True + + # Resolve default field alias after executing field_transformer. + # This allows field_transformer to differentiate between explicit vs + # default aliases and supply their own defaults. + for a in attrs: + if not a.alias: + # Evolve is very slow, so we hold our nose and do it dirty. + _OBJ_SETATTR.__get__(a)("alias", _default_init_alias_for(a.name)) + + # Create AttrsClass *after* applying the field_transformer since it may + # add or remove attributes! + attr_names = [a.name for a in attrs] + AttrsClass = _make_attr_tuple_class(cls.__name__, attr_names) + + return _Attributes(AttrsClass(attrs), base_attrs, base_attr_map) + + +def _make_cached_property_getattr(cached_properties, original_getattr, cls): + lines = [ + # Wrapped to get `__class__` into closure cell for super() + # (It will be replaced with the newly constructed class after construction). + "def wrapper(_cls):", + " __class__ = _cls", + " def __getattr__(self, item, cached_properties=cached_properties, original_getattr=original_getattr, _cached_setattr_get=_cached_setattr_get):", + " func = cached_properties.get(item)", + " if func is not None:", + " result = func(self)", + " _setter = _cached_setattr_get(self)", + " _setter(item, result)", + " return result", + ] + if original_getattr is not None: + lines.append( + " return original_getattr(self, item)", + ) + else: + lines.extend( + [ + " try:", + " return super().__getattribute__(item)", + " except AttributeError:", + " if not hasattr(super(), '__getattr__'):", + " raise", + " return super().__getattr__(item)", + " original_error = f\"'{self.__class__.__name__}' object has no attribute '{item}'\"", + " raise AttributeError(original_error)", + ] + ) + + lines.extend( + [ + " return __getattr__", + "__getattr__ = wrapper(_cls)", + ] + ) + + unique_filename = _generate_unique_filename(cls, "getattr") + + glob = { + "cached_properties": cached_properties, + "_cached_setattr_get": _OBJ_SETATTR.__get__, + "original_getattr": original_getattr, + } + + return _linecache_and_compile( + "\n".join(lines), unique_filename, glob, locals={"_cls": cls} + )["__getattr__"] + + +def _frozen_setattrs(self, name, value): + """ + Attached to frozen classes as __setattr__. + """ + if isinstance(self, BaseException) and name in ( + "__cause__", + "__context__", + "__traceback__", + "__suppress_context__", + "__notes__", + ): + BaseException.__setattr__(self, name, value) + return + + raise FrozenInstanceError + + +def _frozen_delattrs(self, name): + """ + Attached to frozen classes as __delattr__. + """ + if isinstance(self, BaseException) and name in ("__notes__",): + BaseException.__delattr__(self, name) + return + + raise FrozenInstanceError + + +def evolve(*args, **changes): + """ + Create a new instance, based on the first positional argument with + *changes* applied. + + .. tip:: + + On Python 3.13 and later, you can also use `copy.replace` instead. + + Args: + + inst: + Instance of a class with *attrs* attributes. *inst* must be passed + as a positional argument. + + changes: + Keyword changes in the new copy. + + Returns: + A copy of inst with *changes* incorporated. + + Raises: + TypeError: + If *attr_name* couldn't be found in the class ``__init__``. + + attrs.exceptions.NotAnAttrsClassError: + If *cls* is not an *attrs* class. + + .. versionadded:: 17.1.0 + .. deprecated:: 23.1.0 + It is now deprecated to pass the instance using the keyword argument + *inst*. It will raise a warning until at least April 2024, after which + it will become an error. Always pass the instance as a positional + argument. + .. versionchanged:: 24.1.0 + *inst* can't be passed as a keyword argument anymore. + """ + try: + (inst,) = args + except ValueError: + msg = ( + f"evolve() takes 1 positional argument, but {len(args)} were given" + ) + raise TypeError(msg) from None + + cls = inst.__class__ + attrs = fields(cls) + for a in attrs: + if not a.init: + continue + attr_name = a.name # To deal with private attributes. + init_name = a.alias + if init_name not in changes: + changes[init_name] = getattr(inst, attr_name) + + return cls(**changes) + + +class _ClassBuilder: + """ + Iteratively build *one* class. + """ + + __slots__ = ( + "_add_method_dunders", + "_attr_names", + "_attrs", + "_base_attr_map", + "_base_names", + "_cache_hash", + "_cls", + "_cls_dict", + "_delete_attribs", + "_frozen", + "_has_custom_setattr", + "_has_post_init", + "_has_pre_init", + "_is_exc", + "_on_setattr", + "_pre_init_has_args", + "_repr_added", + "_script_snippets", + "_slots", + "_weakref_slot", + "_wrote_own_setattr", + ) + + def __init__( + self, + cls: type, + these, + slots, + frozen, + weakref_slot, + getstate_setstate, + auto_attribs, + kw_only, + cache_hash, + is_exc, + collect_by_mro, + on_setattr, + has_custom_setattr, + field_transformer, + ): + attrs, base_attrs, base_map = _transform_attrs( + cls, + these, + auto_attribs, + kw_only, + collect_by_mro, + field_transformer, + ) + + self._cls = cls + self._cls_dict = dict(cls.__dict__) if slots else {} + self._attrs = attrs + self._base_names = {a.name for a in base_attrs} + self._base_attr_map = base_map + self._attr_names = tuple(a.name for a in attrs) + self._slots = slots + self._frozen = frozen + self._weakref_slot = weakref_slot + self._cache_hash = cache_hash + self._has_pre_init = bool(getattr(cls, "__attrs_pre_init__", False)) + self._pre_init_has_args = False + if self._has_pre_init: + # Check if the pre init method has more arguments than just `self` + # We want to pass arguments if pre init expects arguments + pre_init_func = cls.__attrs_pre_init__ + pre_init_signature = inspect.signature(pre_init_func) + self._pre_init_has_args = len(pre_init_signature.parameters) > 1 + self._has_post_init = bool(getattr(cls, "__attrs_post_init__", False)) + self._delete_attribs = not bool(these) + self._is_exc = is_exc + self._on_setattr = on_setattr + + self._has_custom_setattr = has_custom_setattr + self._wrote_own_setattr = False + + self._cls_dict["__attrs_attrs__"] = self._attrs + + if frozen: + self._cls_dict["__setattr__"] = _frozen_setattrs + self._cls_dict["__delattr__"] = _frozen_delattrs + + self._wrote_own_setattr = True + elif on_setattr in ( + _DEFAULT_ON_SETATTR, + setters.validate, + setters.convert, + ): + has_validator = has_converter = False + for a in attrs: + if a.validator is not None: + has_validator = True + if a.converter is not None: + has_converter = True + + if has_validator and has_converter: + break + if ( + ( + on_setattr == _DEFAULT_ON_SETATTR + and not (has_validator or has_converter) + ) + or (on_setattr == setters.validate and not has_validator) + or (on_setattr == setters.convert and not has_converter) + ): + # If class-level on_setattr is set to convert + validate, but + # there's no field to convert or validate, pretend like there's + # no on_setattr. + self._on_setattr = None + + if getstate_setstate: + ( + self._cls_dict["__getstate__"], + self._cls_dict["__setstate__"], + ) = self._make_getstate_setstate() + + # tuples of script, globs, hook + self._script_snippets: list[ + tuple[str, dict, Callable[[dict, dict], Any]] + ] = [] + self._repr_added = False + + # We want to only do this check once; in 99.9% of cases these + # exist. + if not hasattr(self._cls, "__module__") or not hasattr( + self._cls, "__qualname__" + ): + self._add_method_dunders = self._add_method_dunders_safe + else: + self._add_method_dunders = self._add_method_dunders_unsafe + + def __repr__(self): + return f"<_ClassBuilder(cls={self._cls.__name__})>" + + def _eval_snippets(self) -> None: + """ + Evaluate any registered snippets in one go. + """ + script = "\n".join([snippet[0] for snippet in self._script_snippets]) + globs = {} + for _, snippet_globs, _ in self._script_snippets: + globs.update(snippet_globs) + + locs = _linecache_and_compile( + script, + _generate_unique_filename(self._cls, "methods"), + globs, + ) + + for _, _, hook in self._script_snippets: + hook(self._cls_dict, locs) + + def build_class(self): + """ + Finalize class based on the accumulated configuration. + + Builder cannot be used after calling this method. + """ + self._eval_snippets() + if self._slots is True: + cls = self._create_slots_class() + else: + cls = self._patch_original_class() + if PY_3_10_PLUS: + cls = abc.update_abstractmethods(cls) + + # The method gets only called if it's not inherited from a base class. + # _has_own_attribute does NOT work properly for classmethods. + if ( + getattr(cls, "__attrs_init_subclass__", None) + and "__attrs_init_subclass__" not in cls.__dict__ + ): + cls.__attrs_init_subclass__() + + return cls + + def _patch_original_class(self): + """ + Apply accumulated methods and return the class. + """ + cls = self._cls + base_names = self._base_names + + # Clean class of attribute definitions (`attr.ib()`s). + if self._delete_attribs: + for name in self._attr_names: + if ( + name not in base_names + and getattr(cls, name, _SENTINEL) is not _SENTINEL + ): + # An AttributeError can happen if a base class defines a + # class variable and we want to set an attribute with the + # same name by using only a type annotation. + with contextlib.suppress(AttributeError): + delattr(cls, name) + + # Attach our dunder methods. + for name, value in self._cls_dict.items(): + setattr(cls, name, value) + + # If we've inherited an attrs __setattr__ and don't write our own, + # reset it to object's. + if not self._wrote_own_setattr and getattr( + cls, "__attrs_own_setattr__", False + ): + cls.__attrs_own_setattr__ = False + + if not self._has_custom_setattr: + cls.__setattr__ = _OBJ_SETATTR + + return cls + + def _create_slots_class(self): + """ + Build and return a new class with a `__slots__` attribute. + """ + cd = { + k: v + for k, v in self._cls_dict.items() + if k not in (*tuple(self._attr_names), "__dict__", "__weakref__") + } + + # If our class doesn't have its own implementation of __setattr__ + # (either from the user or by us), check the bases, if one of them has + # an attrs-made __setattr__, that needs to be reset. We don't walk the + # MRO because we only care about our immediate base classes. + # XXX: This can be confused by subclassing a slotted attrs class with + # XXX: a non-attrs class and subclass the resulting class with an attrs + # XXX: class. See `test_slotted_confused` for details. For now that's + # XXX: OK with us. + if not self._wrote_own_setattr: + cd["__attrs_own_setattr__"] = False + + if not self._has_custom_setattr: + for base_cls in self._cls.__bases__: + if base_cls.__dict__.get("__attrs_own_setattr__", False): + cd["__setattr__"] = _OBJ_SETATTR + break + + # Traverse the MRO to collect existing slots + # and check for an existing __weakref__. + existing_slots = {} + weakref_inherited = False + for base_cls in self._cls.__mro__[1:-1]: + if base_cls.__dict__.get("__weakref__", None) is not None: + weakref_inherited = True + existing_slots.update( + { + name: getattr(base_cls, name) + for name in getattr(base_cls, "__slots__", []) + } + ) + + base_names = set(self._base_names) + + names = self._attr_names + if ( + self._weakref_slot + and "__weakref__" not in getattr(self._cls, "__slots__", ()) + and "__weakref__" not in names + and not weakref_inherited + ): + names += ("__weakref__",) + + cached_properties = { + name: cached_prop.func + for name, cached_prop in cd.items() + if isinstance(cached_prop, cached_property) + } + + # Collect methods with a `__class__` reference that are shadowed in the new class. + # To know to update them. + additional_closure_functions_to_update = [] + if cached_properties: + class_annotations = _get_annotations(self._cls) + for name, func in cached_properties.items(): + # Add cached properties to names for slotting. + names += (name,) + # Clear out function from class to avoid clashing. + del cd[name] + additional_closure_functions_to_update.append(func) + annotation = inspect.signature(func).return_annotation + if annotation is not inspect.Parameter.empty: + class_annotations[name] = annotation + + original_getattr = cd.get("__getattr__") + if original_getattr is not None: + additional_closure_functions_to_update.append(original_getattr) + + cd["__getattr__"] = _make_cached_property_getattr( + cached_properties, original_getattr, self._cls + ) + + # We only add the names of attributes that aren't inherited. + # Setting __slots__ to inherited attributes wastes memory. + slot_names = [name for name in names if name not in base_names] + + # There are slots for attributes from current class + # that are defined in parent classes. + # As their descriptors may be overridden by a child class, + # we collect them here and update the class dict + reused_slots = { + slot: slot_descriptor + for slot, slot_descriptor in existing_slots.items() + if slot in slot_names + } + slot_names = [name for name in slot_names if name not in reused_slots] + cd.update(reused_slots) + if self._cache_hash: + slot_names.append(_HASH_CACHE_FIELD) + + cd["__slots__"] = tuple(slot_names) + + cd["__qualname__"] = self._cls.__qualname__ + + # Create new class based on old class and our methods. + cls = type(self._cls)(self._cls.__name__, self._cls.__bases__, cd) + + # The following is a fix for + # . + # If a method mentions `__class__` or uses the no-arg super(), the + # compiler will bake a reference to the class in the method itself + # as `method.__closure__`. Since we replace the class with a + # clone, we rewrite these references so it keeps working. + for item in itertools.chain( + cls.__dict__.values(), additional_closure_functions_to_update + ): + if isinstance(item, (classmethod, staticmethod)): + # Class- and staticmethods hide their functions inside. + # These might need to be rewritten as well. + closure_cells = getattr(item.__func__, "__closure__", None) + elif isinstance(item, property): + # Workaround for property `super()` shortcut (PY3-only). + # There is no universal way for other descriptors. + closure_cells = getattr(item.fget, "__closure__", None) + else: + closure_cells = getattr(item, "__closure__", None) + + if not closure_cells: # Catch None or the empty list. + continue + for cell in closure_cells: + try: + match = cell.cell_contents is self._cls + except ValueError: # noqa: PERF203 + # ValueError: Cell is empty + pass + else: + if match: + cell.cell_contents = cls + return cls + + def add_repr(self, ns): + script, globs = _make_repr_script(self._attrs, ns) + + def _attach_repr(cls_dict, globs): + cls_dict["__repr__"] = self._add_method_dunders(globs["__repr__"]) + + self._script_snippets.append((script, globs, _attach_repr)) + self._repr_added = True + return self + + def add_str(self): + if not self._repr_added: + msg = "__str__ can only be generated if a __repr__ exists." + raise ValueError(msg) + + def __str__(self): + return self.__repr__() + + self._cls_dict["__str__"] = self._add_method_dunders(__str__) + return self + + def _make_getstate_setstate(self): + """ + Create custom __setstate__ and __getstate__ methods. + """ + # __weakref__ is not writable. + state_attr_names = tuple( + an for an in self._attr_names if an != "__weakref__" + ) + + def slots_getstate(self): + """ + Automatically created by attrs. + """ + return {name: getattr(self, name) for name in state_attr_names} + + hash_caching_enabled = self._cache_hash + + def slots_setstate(self, state): + """ + Automatically created by attrs. + """ + __bound_setattr = _OBJ_SETATTR.__get__(self) + if isinstance(state, tuple): + # Backward compatibility with attrs instances pickled with + # attrs versions before v22.2.0 which stored tuples. + for name, value in zip(state_attr_names, state): + __bound_setattr(name, value) + else: + for name in state_attr_names: + if name in state: + __bound_setattr(name, state[name]) + + # The hash code cache is not included when the object is + # serialized, but it still needs to be initialized to None to + # indicate that the first call to __hash__ should be a cache + # miss. + if hash_caching_enabled: + __bound_setattr(_HASH_CACHE_FIELD, None) + + return slots_getstate, slots_setstate + + def make_unhashable(self): + self._cls_dict["__hash__"] = None + return self + + def add_hash(self): + script, globs = _make_hash_script( + self._cls, + self._attrs, + frozen=self._frozen, + cache_hash=self._cache_hash, + ) + + def attach_hash(cls_dict: dict, locs: dict) -> None: + cls_dict["__hash__"] = self._add_method_dunders(locs["__hash__"]) + + self._script_snippets.append((script, globs, attach_hash)) + + return self + + def add_init(self): + script, globs, annotations = _make_init_script( + self._cls, + self._attrs, + self._has_pre_init, + self._pre_init_has_args, + self._has_post_init, + self._frozen, + self._slots, + self._cache_hash, + self._base_attr_map, + self._is_exc, + self._on_setattr, + attrs_init=False, + ) + + def _attach_init(cls_dict, globs): + init = globs["__init__"] + init.__annotations__ = annotations + cls_dict["__init__"] = self._add_method_dunders(init) + + self._script_snippets.append((script, globs, _attach_init)) + + return self + + def add_replace(self): + self._cls_dict["__replace__"] = self._add_method_dunders( + lambda self, **changes: evolve(self, **changes) + ) + return self + + def add_match_args(self): + self._cls_dict["__match_args__"] = tuple( + field.name + for field in self._attrs + if field.init and not field.kw_only + ) + + def add_attrs_init(self): + script, globs, annotations = _make_init_script( + self._cls, + self._attrs, + self._has_pre_init, + self._pre_init_has_args, + self._has_post_init, + self._frozen, + self._slots, + self._cache_hash, + self._base_attr_map, + self._is_exc, + self._on_setattr, + attrs_init=True, + ) + + def _attach_attrs_init(cls_dict, globs): + init = globs["__attrs_init__"] + init.__annotations__ = annotations + cls_dict["__attrs_init__"] = self._add_method_dunders(init) + + self._script_snippets.append((script, globs, _attach_attrs_init)) + + return self + + def add_eq(self): + cd = self._cls_dict + + script, globs = _make_eq_script(self._attrs) + + def _attach_eq(cls_dict, globs): + cls_dict["__eq__"] = self._add_method_dunders(globs["__eq__"]) + + self._script_snippets.append((script, globs, _attach_eq)) + + cd["__ne__"] = __ne__ + + return self + + def add_order(self): + cd = self._cls_dict + + cd["__lt__"], cd["__le__"], cd["__gt__"], cd["__ge__"] = ( + self._add_method_dunders(meth) + for meth in _make_order(self._cls, self._attrs) + ) + + return self + + def add_setattr(self): + sa_attrs = {} + for a in self._attrs: + on_setattr = a.on_setattr or self._on_setattr + if on_setattr and on_setattr is not setters.NO_OP: + sa_attrs[a.name] = a, on_setattr + + if not sa_attrs: + return self + + if self._has_custom_setattr: + # We need to write a __setattr__ but there already is one! + msg = "Can't combine custom __setattr__ with on_setattr hooks." + raise ValueError(msg) + + # docstring comes from _add_method_dunders + def __setattr__(self, name, val): + try: + a, hook = sa_attrs[name] + except KeyError: + nval = val + else: + nval = hook(self, a, val) + + _OBJ_SETATTR(self, name, nval) + + self._cls_dict["__attrs_own_setattr__"] = True + self._cls_dict["__setattr__"] = self._add_method_dunders(__setattr__) + self._wrote_own_setattr = True + + return self + + def _add_method_dunders_unsafe(self, method: Callable) -> Callable: + """ + Add __module__ and __qualname__ to a *method*. + """ + method.__module__ = self._cls.__module__ + + method.__qualname__ = f"{self._cls.__qualname__}.{method.__name__}" + + method.__doc__ = ( + f"Method generated by attrs for class {self._cls.__qualname__}." + ) + + return method + + def _add_method_dunders_safe(self, method: Callable) -> Callable: + """ + Add __module__ and __qualname__ to a *method* if possible. + """ + with contextlib.suppress(AttributeError): + method.__module__ = self._cls.__module__ + + with contextlib.suppress(AttributeError): + method.__qualname__ = f"{self._cls.__qualname__}.{method.__name__}" + + with contextlib.suppress(AttributeError): + method.__doc__ = f"Method generated by attrs for class {self._cls.__qualname__}." + + return method + + +def _determine_attrs_eq_order(cmp, eq, order, default_eq): + """ + Validate the combination of *cmp*, *eq*, and *order*. Derive the effective + values of eq and order. If *eq* is None, set it to *default_eq*. + """ + if cmp is not None and any((eq is not None, order is not None)): + msg = "Don't mix `cmp` with `eq' and `order`." + raise ValueError(msg) + + # cmp takes precedence due to bw-compatibility. + if cmp is not None: + return cmp, cmp + + # If left None, equality is set to the specified default and ordering + # mirrors equality. + if eq is None: + eq = default_eq + + if order is None: + order = eq + + if eq is False and order is True: + msg = "`order` can only be True if `eq` is True too." + raise ValueError(msg) + + return eq, order + + +def _determine_attrib_eq_order(cmp, eq, order, default_eq): + """ + Validate the combination of *cmp*, *eq*, and *order*. Derive the effective + values of eq and order. If *eq* is None, set it to *default_eq*. + """ + if cmp is not None and any((eq is not None, order is not None)): + msg = "Don't mix `cmp` with `eq' and `order`." + raise ValueError(msg) + + def decide_callable_or_boolean(value): + """ + Decide whether a key function is used. + """ + if callable(value): + value, key = True, value + else: + key = None + return value, key + + # cmp takes precedence due to bw-compatibility. + if cmp is not None: + cmp, cmp_key = decide_callable_or_boolean(cmp) + return cmp, cmp_key, cmp, cmp_key + + # If left None, equality is set to the specified default and ordering + # mirrors equality. + if eq is None: + eq, eq_key = default_eq, None + else: + eq, eq_key = decide_callable_or_boolean(eq) + + if order is None: + order, order_key = eq, eq_key + else: + order, order_key = decide_callable_or_boolean(order) + + if eq is False and order is True: + msg = "`order` can only be True if `eq` is True too." + raise ValueError(msg) + + return eq, eq_key, order, order_key + + +def _determine_whether_to_implement( + cls, flag, auto_detect, dunders, default=True +): + """ + Check whether we should implement a set of methods for *cls*. + + *flag* is the argument passed into @attr.s like 'init', *auto_detect* the + same as passed into @attr.s and *dunders* is a tuple of attribute names + whose presence signal that the user has implemented it themselves. + + Return *default* if no reason for either for or against is found. + """ + if flag is True or flag is False: + return flag + + if flag is None and auto_detect is False: + return default + + # Logically, flag is None and auto_detect is True here. + for dunder in dunders: + if _has_own_attribute(cls, dunder): + return False + + return default + + +def attrs( + maybe_cls=None, + these=None, + repr_ns=None, + repr=None, + cmp=None, + hash=None, + init=None, + slots=False, + frozen=False, + weakref_slot=True, + str=False, + auto_attribs=False, + kw_only=False, + cache_hash=False, + auto_exc=False, + eq=None, + order=None, + auto_detect=False, + collect_by_mro=False, + getstate_setstate=None, + on_setattr=None, + field_transformer=None, + match_args=True, + unsafe_hash=None, +): + r""" + A class decorator that adds :term:`dunder methods` according to the + specified attributes using `attr.ib` or the *these* argument. + + Consider using `attrs.define` / `attrs.frozen` in new code (``attr.s`` will + *never* go away, though). + + Args: + repr_ns (str): + When using nested classes, there was no way in Python 2 to + automatically detect that. This argument allows to set a custom + name for a more meaningful ``repr`` output. This argument is + pointless in Python 3 and is therefore deprecated. + + .. caution:: + Refer to `attrs.define` for the rest of the parameters, but note that they + can have different defaults. + + Notably, leaving *on_setattr* as `None` will **not** add any hooks. + + .. versionadded:: 16.0.0 *slots* + .. versionadded:: 16.1.0 *frozen* + .. versionadded:: 16.3.0 *str* + .. versionadded:: 16.3.0 Support for ``__attrs_post_init__``. + .. versionchanged:: 17.1.0 + *hash* supports `None` as value which is also the default now. + .. versionadded:: 17.3.0 *auto_attribs* + .. versionchanged:: 18.1.0 + If *these* is passed, no attributes are deleted from the class body. + .. versionchanged:: 18.1.0 If *these* is ordered, the order is retained. + .. versionadded:: 18.2.0 *weakref_slot* + .. deprecated:: 18.2.0 + ``__lt__``, ``__le__``, ``__gt__``, and ``__ge__`` now raise a + `DeprecationWarning` if the classes compared are subclasses of + each other. ``__eq`` and ``__ne__`` never tried to compared subclasses + to each other. + .. versionchanged:: 19.2.0 + ``__lt__``, ``__le__``, ``__gt__``, and ``__ge__`` now do not consider + subclasses comparable anymore. + .. versionadded:: 18.2.0 *kw_only* + .. versionadded:: 18.2.0 *cache_hash* + .. versionadded:: 19.1.0 *auto_exc* + .. deprecated:: 19.2.0 *cmp* Removal on or after 2021-06-01. + .. versionadded:: 19.2.0 *eq* and *order* + .. versionadded:: 20.1.0 *auto_detect* + .. versionadded:: 20.1.0 *collect_by_mro* + .. versionadded:: 20.1.0 *getstate_setstate* + .. versionadded:: 20.1.0 *on_setattr* + .. versionadded:: 20.3.0 *field_transformer* + .. versionchanged:: 21.1.0 + ``init=False`` injects ``__attrs_init__`` + .. versionchanged:: 21.1.0 Support for ``__attrs_pre_init__`` + .. versionchanged:: 21.1.0 *cmp* undeprecated + .. versionadded:: 21.3.0 *match_args* + .. versionadded:: 22.2.0 + *unsafe_hash* as an alias for *hash* (for :pep:`681` compliance). + .. deprecated:: 24.1.0 *repr_ns* + .. versionchanged:: 24.1.0 + Instances are not compared as tuples of attributes anymore, but using a + big ``and`` condition. This is faster and has more correct behavior for + uncomparable values like `math.nan`. + .. versionadded:: 24.1.0 + If a class has an *inherited* classmethod called + ``__attrs_init_subclass__``, it is executed after the class is created. + .. deprecated:: 24.1.0 *hash* is deprecated in favor of *unsafe_hash*. + """ + if repr_ns is not None: + import warnings + + warnings.warn( + DeprecationWarning( + "The `repr_ns` argument is deprecated and will be removed in or after August 2025." + ), + stacklevel=2, + ) + + eq_, order_ = _determine_attrs_eq_order(cmp, eq, order, None) + + # unsafe_hash takes precedence due to PEP 681. + if unsafe_hash is not None: + hash = unsafe_hash + + if isinstance(on_setattr, (list, tuple)): + on_setattr = setters.pipe(*on_setattr) + + def wrap(cls): + is_frozen = frozen or _has_frozen_base_class(cls) + is_exc = auto_exc is True and issubclass(cls, BaseException) + has_own_setattr = auto_detect and _has_own_attribute( + cls, "__setattr__" + ) + + if has_own_setattr and is_frozen: + msg = "Can't freeze a class with a custom __setattr__." + raise ValueError(msg) + + builder = _ClassBuilder( + cls, + these, + slots, + is_frozen, + weakref_slot, + _determine_whether_to_implement( + cls, + getstate_setstate, + auto_detect, + ("__getstate__", "__setstate__"), + default=slots, + ), + auto_attribs, + kw_only, + cache_hash, + is_exc, + collect_by_mro, + on_setattr, + has_own_setattr, + field_transformer, + ) + + if _determine_whether_to_implement( + cls, repr, auto_detect, ("__repr__",) + ): + builder.add_repr(repr_ns) + + if str is True: + builder.add_str() + + eq = _determine_whether_to_implement( + cls, eq_, auto_detect, ("__eq__", "__ne__") + ) + if not is_exc and eq is True: + builder.add_eq() + if not is_exc and _determine_whether_to_implement( + cls, order_, auto_detect, ("__lt__", "__le__", "__gt__", "__ge__") + ): + builder.add_order() + + if not frozen: + builder.add_setattr() + + nonlocal hash + if ( + hash is None + and auto_detect is True + and _has_own_attribute(cls, "__hash__") + ): + hash = False + + if hash is not True and hash is not False and hash is not None: + # Can't use `hash in` because 1 == True for example. + msg = "Invalid value for hash. Must be True, False, or None." + raise TypeError(msg) + + if hash is False or (hash is None and eq is False) or is_exc: + # Don't do anything. Should fall back to __object__'s __hash__ + # which is by id. + if cache_hash: + msg = "Invalid value for cache_hash. To use hash caching, hashing must be either explicitly or implicitly enabled." + raise TypeError(msg) + elif hash is True or ( + hash is None and eq is True and is_frozen is True + ): + # Build a __hash__ if told so, or if it's safe. + builder.add_hash() + else: + # Raise TypeError on attempts to hash. + if cache_hash: + msg = "Invalid value for cache_hash. To use hash caching, hashing must be either explicitly or implicitly enabled." + raise TypeError(msg) + builder.make_unhashable() + + if _determine_whether_to_implement( + cls, init, auto_detect, ("__init__",) + ): + builder.add_init() + else: + builder.add_attrs_init() + if cache_hash: + msg = "Invalid value for cache_hash. To use hash caching, init must be True." + raise TypeError(msg) + + if PY_3_13_PLUS and not _has_own_attribute(cls, "__replace__"): + builder.add_replace() + + if ( + PY_3_10_PLUS + and match_args + and not _has_own_attribute(cls, "__match_args__") + ): + builder.add_match_args() + + return builder.build_class() + + # maybe_cls's type depends on the usage of the decorator. It's a class + # if it's used as `@attrs` but `None` if used as `@attrs()`. + if maybe_cls is None: + return wrap + + return wrap(maybe_cls) + + +_attrs = attrs +""" +Internal alias so we can use it in functions that take an argument called +*attrs*. +""" + + +def _has_frozen_base_class(cls): + """ + Check whether *cls* has a frozen ancestor by looking at its + __setattr__. + """ + return cls.__setattr__ is _frozen_setattrs + + +def _generate_unique_filename(cls: type, func_name: str) -> str: + """ + Create a "filename" suitable for a function being generated. + """ + return ( + f"" + ) + + +def _make_hash_script( + cls: type, attrs: list[Attribute], frozen: bool, cache_hash: bool +) -> tuple[str, dict]: + attrs = tuple( + a for a in attrs if a.hash is True or (a.hash is None and a.eq is True) + ) + + tab = " " + + type_hash = hash(_generate_unique_filename(cls, "hash")) + # If eq is custom generated, we need to include the functions in globs + globs = {} + + hash_def = "def __hash__(self" + hash_func = "hash((" + closing_braces = "))" + if not cache_hash: + hash_def += "):" + else: + hash_def += ", *" + + hash_def += ", _cache_wrapper=__import__('attr._make')._make._CacheHashWrapper):" + hash_func = "_cache_wrapper(" + hash_func + closing_braces += ")" + + method_lines = [hash_def] + + def append_hash_computation_lines(prefix, indent): + """ + Generate the code for actually computing the hash code. + Below this will either be returned directly or used to compute + a value which is then cached, depending on the value of cache_hash + """ + + method_lines.extend( + [ + indent + prefix + hash_func, + indent + f" {type_hash},", + ] + ) + + for a in attrs: + if a.eq_key: + cmp_name = f"_{a.name}_key" + globs[cmp_name] = a.eq_key + method_lines.append( + indent + f" {cmp_name}(self.{a.name})," + ) + else: + method_lines.append(indent + f" self.{a.name},") + + method_lines.append(indent + " " + closing_braces) + + if cache_hash: + method_lines.append(tab + f"if self.{_HASH_CACHE_FIELD} is None:") + if frozen: + append_hash_computation_lines( + f"object.__setattr__(self, '{_HASH_CACHE_FIELD}', ", tab * 2 + ) + method_lines.append(tab * 2 + ")") # close __setattr__ + else: + append_hash_computation_lines( + f"self.{_HASH_CACHE_FIELD} = ", tab * 2 + ) + method_lines.append(tab + f"return self.{_HASH_CACHE_FIELD}") + else: + append_hash_computation_lines("return ", tab) + + script = "\n".join(method_lines) + return script, globs + + +def _add_hash(cls: type, attrs: list[Attribute]): + """ + Add a hash method to *cls*. + """ + script, globs = _make_hash_script( + cls, attrs, frozen=False, cache_hash=False + ) + _compile_and_eval( + script, globs, filename=_generate_unique_filename(cls, "__hash__") + ) + cls.__hash__ = globs["__hash__"] + return cls + + +def __ne__(self, other): + """ + Check equality and either forward a NotImplemented or + return the result negated. + """ + result = self.__eq__(other) + if result is NotImplemented: + return NotImplemented + + return not result + + +def _make_eq_script(attrs: list) -> tuple[str, dict]: + """ + Create __eq__ method for *cls* with *attrs*. + """ + attrs = [a for a in attrs if a.eq] + + lines = [ + "def __eq__(self, other):", + " if other.__class__ is not self.__class__:", + " return NotImplemented", + ] + + globs = {} + if attrs: + lines.append(" return (") + for a in attrs: + if a.eq_key: + cmp_name = f"_{a.name}_key" + # Add the key function to the global namespace + # of the evaluated function. + globs[cmp_name] = a.eq_key + lines.append( + f" {cmp_name}(self.{a.name}) == {cmp_name}(other.{a.name})" + ) + else: + lines.append(f" self.{a.name} == other.{a.name}") + if a is not attrs[-1]: + lines[-1] = f"{lines[-1]} and" + lines.append(" )") + else: + lines.append(" return True") + + script = "\n".join(lines) + + return script, globs + + +def _make_order(cls, attrs): + """ + Create ordering methods for *cls* with *attrs*. + """ + attrs = [a for a in attrs if a.order] + + def attrs_to_tuple(obj): + """ + Save us some typing. + """ + return tuple( + key(value) if key else value + for value, key in ( + (getattr(obj, a.name), a.order_key) for a in attrs + ) + ) + + def __lt__(self, other): + """ + Automatically created by attrs. + """ + if other.__class__ is self.__class__: + return attrs_to_tuple(self) < attrs_to_tuple(other) + + return NotImplemented + + def __le__(self, other): + """ + Automatically created by attrs. + """ + if other.__class__ is self.__class__: + return attrs_to_tuple(self) <= attrs_to_tuple(other) + + return NotImplemented + + def __gt__(self, other): + """ + Automatically created by attrs. + """ + if other.__class__ is self.__class__: + return attrs_to_tuple(self) > attrs_to_tuple(other) + + return NotImplemented + + def __ge__(self, other): + """ + Automatically created by attrs. + """ + if other.__class__ is self.__class__: + return attrs_to_tuple(self) >= attrs_to_tuple(other) + + return NotImplemented + + return __lt__, __le__, __gt__, __ge__ + + +def _add_eq(cls, attrs=None): + """ + Add equality methods to *cls* with *attrs*. + """ + if attrs is None: + attrs = cls.__attrs_attrs__ + + script, globs = _make_eq_script(attrs) + _compile_and_eval( + script, globs, filename=_generate_unique_filename(cls, "__eq__") + ) + cls.__eq__ = globs["__eq__"] + cls.__ne__ = __ne__ + + return cls + + +def _make_repr_script(attrs, ns) -> tuple[str, dict]: + """ + Create the source and globs for a __repr__ and return it. + """ + # Figure out which attributes to include, and which function to use to + # format them. The a.repr value can be either bool or a custom + # callable. + attr_names_with_reprs = tuple( + (a.name, (repr if a.repr is True else a.repr), a.init) + for a in attrs + if a.repr is not False + ) + globs = { + name + "_repr": r for name, r, _ in attr_names_with_reprs if r != repr + } + globs["_compat"] = _compat + globs["AttributeError"] = AttributeError + globs["NOTHING"] = NOTHING + attribute_fragments = [] + for name, r, i in attr_names_with_reprs: + accessor = ( + "self." + name if i else 'getattr(self, "' + name + '", NOTHING)' + ) + fragment = ( + "%s={%s!r}" % (name, accessor) + if r == repr + else "%s={%s_repr(%s)}" % (name, name, accessor) + ) + attribute_fragments.append(fragment) + repr_fragment = ", ".join(attribute_fragments) + + if ns is None: + cls_name_fragment = '{self.__class__.__qualname__.rsplit(">.", 1)[-1]}' + else: + cls_name_fragment = ns + ".{self.__class__.__name__}" + + lines = [ + "def __repr__(self):", + " try:", + " already_repring = _compat.repr_context.already_repring", + " except AttributeError:", + " already_repring = {id(self),}", + " _compat.repr_context.already_repring = already_repring", + " else:", + " if id(self) in already_repring:", + " return '...'", + " else:", + " already_repring.add(id(self))", + " try:", + f" return f'{cls_name_fragment}({repr_fragment})'", + " finally:", + " already_repring.remove(id(self))", + ] + + return "\n".join(lines), globs + + +def _add_repr(cls, ns=None, attrs=None): + """ + Add a repr method to *cls*. + """ + if attrs is None: + attrs = cls.__attrs_attrs__ + + script, globs = _make_repr_script(attrs, ns) + _compile_and_eval( + script, globs, filename=_generate_unique_filename(cls, "__repr__") + ) + cls.__repr__ = globs["__repr__"] + return cls + + +def fields(cls): + """ + Return the tuple of *attrs* attributes for a class. + + The tuple also allows accessing the fields by their names (see below for + examples). + + Args: + cls (type): Class to introspect. + + Raises: + TypeError: If *cls* is not a class. + + attrs.exceptions.NotAnAttrsClassError: + If *cls* is not an *attrs* class. + + Returns: + tuple (with name accessors) of `attrs.Attribute` + + .. versionchanged:: 16.2.0 Returned tuple allows accessing the fields + by name. + .. versionchanged:: 23.1.0 Add support for generic classes. + """ + generic_base = get_generic_base(cls) + + if generic_base is None and not isinstance(cls, type): + msg = "Passed object must be a class." + raise TypeError(msg) + + attrs = getattr(cls, "__attrs_attrs__", None) + + if attrs is None: + if generic_base is not None: + attrs = getattr(generic_base, "__attrs_attrs__", None) + if attrs is not None: + # Even though this is global state, stick it on here to speed + # it up. We rely on `cls` being cached for this to be + # efficient. + cls.__attrs_attrs__ = attrs + return attrs + msg = f"{cls!r} is not an attrs-decorated class." + raise NotAnAttrsClassError(msg) + + return attrs + + +def fields_dict(cls): + """ + Return an ordered dictionary of *attrs* attributes for a class, whose keys + are the attribute names. + + Args: + cls (type): Class to introspect. + + Raises: + TypeError: If *cls* is not a class. + + attrs.exceptions.NotAnAttrsClassError: + If *cls* is not an *attrs* class. + + Returns: + dict[str, attrs.Attribute]: Dict of attribute name to definition + + .. versionadded:: 18.1.0 + """ + if not isinstance(cls, type): + msg = "Passed object must be a class." + raise TypeError(msg) + attrs = getattr(cls, "__attrs_attrs__", None) + if attrs is None: + msg = f"{cls!r} is not an attrs-decorated class." + raise NotAnAttrsClassError(msg) + return {a.name: a for a in attrs} + + +def validate(inst): + """ + Validate all attributes on *inst* that have a validator. + + Leaves all exceptions through. + + Args: + inst: Instance of a class with *attrs* attributes. + """ + if _config._run_validators is False: + return + + for a in fields(inst.__class__): + v = a.validator + if v is not None: + v(inst, a, getattr(inst, a.name)) + + +def _is_slot_attr(a_name, base_attr_map): + """ + Check if the attribute name comes from a slot class. + """ + cls = base_attr_map.get(a_name) + return cls and "__slots__" in cls.__dict__ + + +def _make_init_script( + cls, + attrs, + pre_init, + pre_init_has_args, + post_init, + frozen, + slots, + cache_hash, + base_attr_map, + is_exc, + cls_on_setattr, + attrs_init, +) -> tuple[str, dict, dict]: + has_cls_on_setattr = ( + cls_on_setattr is not None and cls_on_setattr is not setters.NO_OP + ) + + if frozen and has_cls_on_setattr: + msg = "Frozen classes can't use on_setattr." + raise ValueError(msg) + + needs_cached_setattr = cache_hash or frozen + filtered_attrs = [] + attr_dict = {} + for a in attrs: + if not a.init and a.default is NOTHING: + continue + + filtered_attrs.append(a) + attr_dict[a.name] = a + + if a.on_setattr is not None: + if frozen is True: + msg = "Frozen classes can't use on_setattr." + raise ValueError(msg) + + needs_cached_setattr = True + elif has_cls_on_setattr and a.on_setattr is not setters.NO_OP: + needs_cached_setattr = True + + script, globs, annotations = _attrs_to_init_script( + filtered_attrs, + frozen, + slots, + pre_init, + pre_init_has_args, + post_init, + cache_hash, + base_attr_map, + is_exc, + needs_cached_setattr, + has_cls_on_setattr, + "__attrs_init__" if attrs_init else "__init__", + ) + if cls.__module__ in sys.modules: + # This makes typing.get_type_hints(CLS.__init__) resolve string types. + globs.update(sys.modules[cls.__module__].__dict__) + + globs.update({"NOTHING": NOTHING, "attr_dict": attr_dict}) + + if needs_cached_setattr: + # Save the lookup overhead in __init__ if we need to circumvent + # setattr hooks. + globs["_cached_setattr_get"] = _OBJ_SETATTR.__get__ + + return script, globs, annotations + + +def _setattr(attr_name: str, value_var: str, has_on_setattr: bool) -> str: + """ + Use the cached object.setattr to set *attr_name* to *value_var*. + """ + return f"_setattr('{attr_name}', {value_var})" + + +def _setattr_with_converter( + attr_name: str, value_var: str, has_on_setattr: bool, converter: Converter +) -> str: + """ + Use the cached object.setattr to set *attr_name* to *value_var*, but run + its converter first. + """ + return f"_setattr('{attr_name}', {converter._fmt_converter_call(attr_name, value_var)})" + + +def _assign(attr_name: str, value: str, has_on_setattr: bool) -> str: + """ + Unless *attr_name* has an on_setattr hook, use normal assignment. Otherwise + relegate to _setattr. + """ + if has_on_setattr: + return _setattr(attr_name, value, True) + + return f"self.{attr_name} = {value}" + + +def _assign_with_converter( + attr_name: str, value_var: str, has_on_setattr: bool, converter: Converter +) -> str: + """ + Unless *attr_name* has an on_setattr hook, use normal assignment after + conversion. Otherwise relegate to _setattr_with_converter. + """ + if has_on_setattr: + return _setattr_with_converter(attr_name, value_var, True, converter) + + return f"self.{attr_name} = {converter._fmt_converter_call(attr_name, value_var)}" + + +def _determine_setters( + frozen: bool, slots: bool, base_attr_map: dict[str, type] +): + """ + Determine the correct setter functions based on whether a class is frozen + and/or slotted. + """ + if frozen is True: + if slots is True: + return (), _setattr, _setattr_with_converter + + # Dict frozen classes assign directly to __dict__. + # But only if the attribute doesn't come from an ancestor slot + # class. + # Note _inst_dict will be used again below if cache_hash is True + + def fmt_setter( + attr_name: str, value_var: str, has_on_setattr: bool + ) -> str: + if _is_slot_attr(attr_name, base_attr_map): + return _setattr(attr_name, value_var, has_on_setattr) + + return f"_inst_dict['{attr_name}'] = {value_var}" + + def fmt_setter_with_converter( + attr_name: str, + value_var: str, + has_on_setattr: bool, + converter: Converter, + ) -> str: + if has_on_setattr or _is_slot_attr(attr_name, base_attr_map): + return _setattr_with_converter( + attr_name, value_var, has_on_setattr, converter + ) + + return f"_inst_dict['{attr_name}'] = {converter._fmt_converter_call(attr_name, value_var)}" + + return ( + ("_inst_dict = self.__dict__",), + fmt_setter, + fmt_setter_with_converter, + ) + + # Not frozen -- we can just assign directly. + return (), _assign, _assign_with_converter + + +def _attrs_to_init_script( + attrs: list[Attribute], + is_frozen: bool, + is_slotted: bool, + call_pre_init: bool, + pre_init_has_args: bool, + call_post_init: bool, + does_cache_hash: bool, + base_attr_map: dict[str, type], + is_exc: bool, + needs_cached_setattr: bool, + has_cls_on_setattr: bool, + method_name: str, +) -> tuple[str, dict, dict]: + """ + Return a script of an initializer for *attrs*, a dict of globals, and + annotations for the initializer. + + The globals are required by the generated script. + """ + lines = ["self.__attrs_pre_init__()"] if call_pre_init else [] + + if needs_cached_setattr: + lines.append( + # Circumvent the __setattr__ descriptor to save one lookup per + # assignment. Note _setattr will be used again below if + # does_cache_hash is True. + "_setattr = _cached_setattr_get(self)" + ) + + extra_lines, fmt_setter, fmt_setter_with_converter = _determine_setters( + is_frozen, is_slotted, base_attr_map + ) + lines.extend(extra_lines) + + args = [] + kw_only_args = [] + attrs_to_validate = [] + + # This is a dictionary of names to validator and converter callables. + # Injecting this into __init__ globals lets us avoid lookups. + names_for_globals = {} + annotations = {"return": None} + + for a in attrs: + if a.validator: + attrs_to_validate.append(a) + + attr_name = a.name + has_on_setattr = a.on_setattr is not None or ( + a.on_setattr is not setters.NO_OP and has_cls_on_setattr + ) + # a.alias is set to maybe-mangled attr_name in _ClassBuilder if not + # explicitly provided + arg_name = a.alias + + has_factory = isinstance(a.default, Factory) + maybe_self = "self" if has_factory and a.default.takes_self else "" + + if a.converter is not None and not isinstance(a.converter, Converter): + converter = Converter(a.converter) + else: + converter = a.converter + + if a.init is False: + if has_factory: + init_factory_name = _INIT_FACTORY_PAT % (a.name,) + if converter is not None: + lines.append( + fmt_setter_with_converter( + attr_name, + init_factory_name + f"({maybe_self})", + has_on_setattr, + converter, + ) + ) + names_for_globals[converter._get_global_name(a.name)] = ( + converter.converter + ) + else: + lines.append( + fmt_setter( + attr_name, + init_factory_name + f"({maybe_self})", + has_on_setattr, + ) + ) + names_for_globals[init_factory_name] = a.default.factory + elif converter is not None: + lines.append( + fmt_setter_with_converter( + attr_name, + f"attr_dict['{attr_name}'].default", + has_on_setattr, + converter, + ) + ) + names_for_globals[converter._get_global_name(a.name)] = ( + converter.converter + ) + else: + lines.append( + fmt_setter( + attr_name, + f"attr_dict['{attr_name}'].default", + has_on_setattr, + ) + ) + elif a.default is not NOTHING and not has_factory: + arg = f"{arg_name}=attr_dict['{attr_name}'].default" + if a.kw_only: + kw_only_args.append(arg) + else: + args.append(arg) + + if converter is not None: + lines.append( + fmt_setter_with_converter( + attr_name, arg_name, has_on_setattr, converter + ) + ) + names_for_globals[converter._get_global_name(a.name)] = ( + converter.converter + ) + else: + lines.append(fmt_setter(attr_name, arg_name, has_on_setattr)) + + elif has_factory: + arg = f"{arg_name}=NOTHING" + if a.kw_only: + kw_only_args.append(arg) + else: + args.append(arg) + lines.append(f"if {arg_name} is not NOTHING:") + + init_factory_name = _INIT_FACTORY_PAT % (a.name,) + if converter is not None: + lines.append( + " " + + fmt_setter_with_converter( + attr_name, arg_name, has_on_setattr, converter + ) + ) + lines.append("else:") + lines.append( + " " + + fmt_setter_with_converter( + attr_name, + init_factory_name + "(" + maybe_self + ")", + has_on_setattr, + converter, + ) + ) + names_for_globals[converter._get_global_name(a.name)] = ( + converter.converter + ) + else: + lines.append( + " " + fmt_setter(attr_name, arg_name, has_on_setattr) + ) + lines.append("else:") + lines.append( + " " + + fmt_setter( + attr_name, + init_factory_name + "(" + maybe_self + ")", + has_on_setattr, + ) + ) + names_for_globals[init_factory_name] = a.default.factory + else: + if a.kw_only: + kw_only_args.append(arg_name) + else: + args.append(arg_name) + + if converter is not None: + lines.append( + fmt_setter_with_converter( + attr_name, arg_name, has_on_setattr, converter + ) + ) + names_for_globals[converter._get_global_name(a.name)] = ( + converter.converter + ) + else: + lines.append(fmt_setter(attr_name, arg_name, has_on_setattr)) + + if a.init is True: + if a.type is not None and converter is None: + annotations[arg_name] = a.type + elif converter is not None and converter._first_param_type: + # Use the type from the converter if present. + annotations[arg_name] = converter._first_param_type + + if attrs_to_validate: # we can skip this if there are no validators. + names_for_globals["_config"] = _config + lines.append("if _config._run_validators is True:") + for a in attrs_to_validate: + val_name = "__attr_validator_" + a.name + attr_name = "__attr_" + a.name + lines.append(f" {val_name}(self, {attr_name}, self.{a.name})") + names_for_globals[val_name] = a.validator + names_for_globals[attr_name] = a + + if call_post_init: + lines.append("self.__attrs_post_init__()") + + # Because this is set only after __attrs_post_init__ is called, a crash + # will result if post-init tries to access the hash code. This seemed + # preferable to setting this beforehand, in which case alteration to field + # values during post-init combined with post-init accessing the hash code + # would result in silent bugs. + if does_cache_hash: + if is_frozen: + if is_slotted: + init_hash_cache = f"_setattr('{_HASH_CACHE_FIELD}', None)" + else: + init_hash_cache = f"_inst_dict['{_HASH_CACHE_FIELD}'] = None" + else: + init_hash_cache = f"self.{_HASH_CACHE_FIELD} = None" + lines.append(init_hash_cache) + + # For exceptions we rely on BaseException.__init__ for proper + # initialization. + if is_exc: + vals = ",".join(f"self.{a.name}" for a in attrs if a.init) + + lines.append(f"BaseException.__init__(self, {vals})") + + args = ", ".join(args) + pre_init_args = args + if kw_only_args: + # leading comma & kw_only args + args += f"{', ' if args else ''}*, {', '.join(kw_only_args)}" + pre_init_kw_only_args = ", ".join( + [ + f"{kw_arg_name}={kw_arg_name}" + # We need to remove the defaults from the kw_only_args. + for kw_arg_name in (kwa.split("=")[0] for kwa in kw_only_args) + ] + ) + pre_init_args += ", " if pre_init_args else "" + pre_init_args += pre_init_kw_only_args + + if call_pre_init and pre_init_has_args: + # If pre init method has arguments, pass same arguments as `__init__`. + lines[0] = f"self.__attrs_pre_init__({pre_init_args})" + + # Python <3.12 doesn't allow backslashes in f-strings. + NL = "\n " + return ( + f"""def {method_name}(self, {args}): + {NL.join(lines) if lines else "pass"} +""", + names_for_globals, + annotations, + ) + + +def _default_init_alias_for(name: str) -> str: + """ + The default __init__ parameter name for a field. + + This performs private-name adjustment via leading-unscore stripping, + and is the default value of Attribute.alias if not provided. + """ + + return name.lstrip("_") + + +class Attribute: + """ + *Read-only* representation of an attribute. + + .. warning:: + + You should never instantiate this class yourself. + + The class has *all* arguments of `attr.ib` (except for ``factory`` which is + only syntactic sugar for ``default=Factory(...)`` plus the following: + + - ``name`` (`str`): The name of the attribute. + - ``alias`` (`str`): The __init__ parameter name of the attribute, after + any explicit overrides and default private-attribute-name handling. + - ``inherited`` (`bool`): Whether or not that attribute has been inherited + from a base class. + - ``eq_key`` and ``order_key`` (`typing.Callable` or `None`): The + callables that are used for comparing and ordering objects by this + attribute, respectively. These are set by passing a callable to + `attr.ib`'s ``eq``, ``order``, or ``cmp`` arguments. See also + :ref:`comparison customization `. + + Instances of this class are frequently used for introspection purposes + like: + + - `fields` returns a tuple of them. + - Validators get them passed as the first argument. + - The :ref:`field transformer ` hook receives a list of + them. + - The ``alias`` property exposes the __init__ parameter name of the field, + with any overrides and default private-attribute handling applied. + + + .. versionadded:: 20.1.0 *inherited* + .. versionadded:: 20.1.0 *on_setattr* + .. versionchanged:: 20.2.0 *inherited* is not taken into account for + equality checks and hashing anymore. + .. versionadded:: 21.1.0 *eq_key* and *order_key* + .. versionadded:: 22.2.0 *alias* + + For the full version history of the fields, see `attr.ib`. + """ + + # These slots must NOT be reordered because we use them later for + # instantiation. + __slots__ = ( # noqa: RUF023 + "name", + "default", + "validator", + "repr", + "eq", + "eq_key", + "order", + "order_key", + "hash", + "init", + "metadata", + "type", + "converter", + "kw_only", + "inherited", + "on_setattr", + "alias", + ) + + def __init__( + self, + name, + default, + validator, + repr, + cmp, # XXX: unused, remove along with other cmp code. + hash, + init, + inherited, + metadata=None, + type=None, + converter=None, + kw_only=False, + eq=None, + eq_key=None, + order=None, + order_key=None, + on_setattr=None, + alias=None, + ): + eq, eq_key, order, order_key = _determine_attrib_eq_order( + cmp, eq_key or eq, order_key or order, True + ) + + # Cache this descriptor here to speed things up later. + bound_setattr = _OBJ_SETATTR.__get__(self) + + # Despite the big red warning, people *do* instantiate `Attribute` + # themselves. + bound_setattr("name", name) + bound_setattr("default", default) + bound_setattr("validator", validator) + bound_setattr("repr", repr) + bound_setattr("eq", eq) + bound_setattr("eq_key", eq_key) + bound_setattr("order", order) + bound_setattr("order_key", order_key) + bound_setattr("hash", hash) + bound_setattr("init", init) + bound_setattr("converter", converter) + bound_setattr( + "metadata", + ( + types.MappingProxyType(dict(metadata)) # Shallow copy + if metadata + else _EMPTY_METADATA_SINGLETON + ), + ) + bound_setattr("type", type) + bound_setattr("kw_only", kw_only) + bound_setattr("inherited", inherited) + bound_setattr("on_setattr", on_setattr) + bound_setattr("alias", alias) + + def __setattr__(self, name, value): + raise FrozenInstanceError + + @classmethod + def from_counting_attr(cls, name: str, ca: _CountingAttr, type=None): + # type holds the annotated value. deal with conflicts: + if type is None: + type = ca.type + elif ca.type is not None: + msg = f"Type annotation and type argument cannot both be present for '{name}'." + raise ValueError(msg) + return cls( + name, + ca._default, + ca._validator, + ca.repr, + None, + ca.hash, + ca.init, + False, + ca.metadata, + type, + ca.converter, + ca.kw_only, + ca.eq, + ca.eq_key, + ca.order, + ca.order_key, + ca.on_setattr, + ca.alias, + ) + + # Don't use attrs.evolve since fields(Attribute) doesn't work + def evolve(self, **changes): + """ + Copy *self* and apply *changes*. + + This works similarly to `attrs.evolve` but that function does not work + with :class:`attrs.Attribute`. + + It is mainly meant to be used for `transform-fields`. + + .. versionadded:: 20.3.0 + """ + new = copy.copy(self) + + new._setattrs(changes.items()) + + return new + + # Don't use _add_pickle since fields(Attribute) doesn't work + def __getstate__(self): + """ + Play nice with pickle. + """ + return tuple( + getattr(self, name) if name != "metadata" else dict(self.metadata) + for name in self.__slots__ + ) + + def __setstate__(self, state): + """ + Play nice with pickle. + """ + self._setattrs(zip(self.__slots__, state)) + + def _setattrs(self, name_values_pairs): + bound_setattr = _OBJ_SETATTR.__get__(self) + for name, value in name_values_pairs: + if name != "metadata": + bound_setattr(name, value) + else: + bound_setattr( + name, + ( + types.MappingProxyType(dict(value)) + if value + else _EMPTY_METADATA_SINGLETON + ), + ) + + +_a = [ + Attribute( + name=name, + default=NOTHING, + validator=None, + repr=True, + cmp=None, + eq=True, + order=False, + hash=(name != "metadata"), + init=True, + inherited=False, + alias=_default_init_alias_for(name), + ) + for name in Attribute.__slots__ +] + +Attribute = _add_hash( + _add_eq( + _add_repr(Attribute, attrs=_a), + attrs=[a for a in _a if a.name != "inherited"], + ), + attrs=[a for a in _a if a.hash and a.name != "inherited"], +) + + +class _CountingAttr: + """ + Intermediate representation of attributes that uses a counter to preserve + the order in which the attributes have been defined. + + *Internal* data structure of the attrs library. Running into is most + likely the result of a bug like a forgotten `@attr.s` decorator. + """ + + __slots__ = ( + "_default", + "_validator", + "alias", + "converter", + "counter", + "eq", + "eq_key", + "hash", + "init", + "kw_only", + "metadata", + "on_setattr", + "order", + "order_key", + "repr", + "type", + ) + __attrs_attrs__ = ( + *tuple( + Attribute( + name=name, + alias=_default_init_alias_for(name), + default=NOTHING, + validator=None, + repr=True, + cmp=None, + hash=True, + init=True, + kw_only=False, + eq=True, + eq_key=None, + order=False, + order_key=None, + inherited=False, + on_setattr=None, + ) + for name in ( + "counter", + "_default", + "repr", + "eq", + "order", + "hash", + "init", + "on_setattr", + "alias", + ) + ), + Attribute( + name="metadata", + alias="metadata", + default=None, + validator=None, + repr=True, + cmp=None, + hash=False, + init=True, + kw_only=False, + eq=True, + eq_key=None, + order=False, + order_key=None, + inherited=False, + on_setattr=None, + ), + ) + cls_counter = 0 + + def __init__( + self, + default, + validator, + repr, + cmp, + hash, + init, + converter, + metadata, + type, + kw_only, + eq, + eq_key, + order, + order_key, + on_setattr, + alias, + ): + _CountingAttr.cls_counter += 1 + self.counter = _CountingAttr.cls_counter + self._default = default + self._validator = validator + self.converter = converter + self.repr = repr + self.eq = eq + self.eq_key = eq_key + self.order = order + self.order_key = order_key + self.hash = hash + self.init = init + self.metadata = metadata + self.type = type + self.kw_only = kw_only + self.on_setattr = on_setattr + self.alias = alias + + def validator(self, meth): + """ + Decorator that adds *meth* to the list of validators. + + Returns *meth* unchanged. + + .. versionadded:: 17.1.0 + """ + if self._validator is None: + self._validator = meth + else: + self._validator = and_(self._validator, meth) + return meth + + def default(self, meth): + """ + Decorator that allows to set the default for an attribute. + + Returns *meth* unchanged. + + Raises: + DefaultAlreadySetError: If default has been set before. + + .. versionadded:: 17.1.0 + """ + if self._default is not NOTHING: + raise DefaultAlreadySetError + + self._default = Factory(meth, takes_self=True) + + return meth + + +_CountingAttr = _add_eq(_add_repr(_CountingAttr)) + + +class Factory: + """ + Stores a factory callable. + + If passed as the default value to `attrs.field`, the factory is used to + generate a new value. + + Args: + factory (typing.Callable): + A callable that takes either none or exactly one mandatory + positional argument depending on *takes_self*. + + takes_self (bool): + Pass the partially initialized instance that is being initialized + as a positional argument. + + .. versionadded:: 17.1.0 *takes_self* + """ + + __slots__ = ("factory", "takes_self") + + def __init__(self, factory, takes_self=False): + self.factory = factory + self.takes_self = takes_self + + def __getstate__(self): + """ + Play nice with pickle. + """ + return tuple(getattr(self, name) for name in self.__slots__) + + def __setstate__(self, state): + """ + Play nice with pickle. + """ + for name, value in zip(self.__slots__, state): + setattr(self, name, value) + + +_f = [ + Attribute( + name=name, + default=NOTHING, + validator=None, + repr=True, + cmp=None, + eq=True, + order=False, + hash=True, + init=True, + inherited=False, + ) + for name in Factory.__slots__ +] + +Factory = _add_hash(_add_eq(_add_repr(Factory, attrs=_f), attrs=_f), attrs=_f) + + +class Converter: + """ + Stores a converter callable. + + Allows for the wrapped converter to take additional arguments. The + arguments are passed in the order they are documented. + + Args: + converter (Callable): A callable that converts the passed value. + + takes_self (bool): + Pass the partially initialized instance that is being initialized + as a positional argument. (default: `False`) + + takes_field (bool): + Pass the field definition (an :class:`Attribute`) into the + converter as a positional argument. (default: `False`) + + .. versionadded:: 24.1.0 + """ + + __slots__ = ( + "__call__", + "_first_param_type", + "_global_name", + "converter", + "takes_field", + "takes_self", + ) + + def __init__(self, converter, *, takes_self=False, takes_field=False): + self.converter = converter + self.takes_self = takes_self + self.takes_field = takes_field + + ex = _AnnotationExtractor(converter) + self._first_param_type = ex.get_first_param_type() + + if not (self.takes_self or self.takes_field): + self.__call__ = lambda value, _, __: self.converter(value) + elif self.takes_self and not self.takes_field: + self.__call__ = lambda value, instance, __: self.converter( + value, instance + ) + elif not self.takes_self and self.takes_field: + self.__call__ = lambda value, __, field: self.converter( + value, field + ) + else: + self.__call__ = lambda value, instance, field: self.converter( + value, instance, field + ) + + rt = ex.get_return_type() + if rt is not None: + self.__call__.__annotations__["return"] = rt + + @staticmethod + def _get_global_name(attr_name: str) -> str: + """ + Return the name that a converter for an attribute name *attr_name* + would have. + """ + return f"__attr_converter_{attr_name}" + + def _fmt_converter_call(self, attr_name: str, value_var: str) -> str: + """ + Return a string that calls the converter for an attribute name + *attr_name* and the value in variable named *value_var* according to + `self.takes_self` and `self.takes_field`. + """ + if not (self.takes_self or self.takes_field): + return f"{self._get_global_name(attr_name)}({value_var})" + + if self.takes_self and self.takes_field: + return f"{self._get_global_name(attr_name)}({value_var}, self, attr_dict['{attr_name}'])" + + if self.takes_self: + return f"{self._get_global_name(attr_name)}({value_var}, self)" + + return f"{self._get_global_name(attr_name)}({value_var}, attr_dict['{attr_name}'])" + + def __getstate__(self): + """ + Return a dict containing only converter and takes_self -- the rest gets + computed when loading. + """ + return { + "converter": self.converter, + "takes_self": self.takes_self, + "takes_field": self.takes_field, + } + + def __setstate__(self, state): + """ + Load instance from state. + """ + self.__init__(**state) + + +_f = [ + Attribute( + name=name, + default=NOTHING, + validator=None, + repr=True, + cmp=None, + eq=True, + order=False, + hash=True, + init=True, + inherited=False, + ) + for name in ("converter", "takes_self", "takes_field") +] + +Converter = _add_hash( + _add_eq(_add_repr(Converter, attrs=_f), attrs=_f), attrs=_f +) + + +def make_class( + name, attrs, bases=(object,), class_body=None, **attributes_arguments +): + r""" + A quick way to create a new class called *name* with *attrs*. + + .. note:: + + ``make_class()`` is a thin wrapper around `attr.s`, not `attrs.define` + which means that it doesn't come with some of the improved defaults. + + For example, if you want the same ``on_setattr`` behavior as in + `attrs.define`, you have to pass the hooks yourself: ``make_class(..., + on_setattr=setters.pipe(setters.convert, setters.validate)`` + + .. warning:: + + It is *your* duty to ensure that the class name and the attribute names + are valid identifiers. ``make_class()`` will *not* validate them for + you. + + Args: + name (str): The name for the new class. + + attrs (list | dict): + A list of names or a dictionary of mappings of names to `attr.ib`\ + s / `attrs.field`\ s. + + The order is deduced from the order of the names or attributes + inside *attrs*. Otherwise the order of the definition of the + attributes is used. + + bases (tuple[type, ...]): Classes that the new class will subclass. + + class_body (dict): + An optional dictionary of class attributes for the new class. + + attributes_arguments: Passed unmodified to `attr.s`. + + Returns: + type: A new class with *attrs*. + + .. versionadded:: 17.1.0 *bases* + .. versionchanged:: 18.1.0 If *attrs* is ordered, the order is retained. + .. versionchanged:: 23.2.0 *class_body* + .. versionchanged:: 25.2.0 Class names can now be unicode. + """ + # Class identifiers are converted into the normal form NFKC while parsing + name = unicodedata.normalize("NFKC", name) + + if isinstance(attrs, dict): + cls_dict = attrs + elif isinstance(attrs, (list, tuple)): + cls_dict = {a: attrib() for a in attrs} + else: + msg = "attrs argument must be a dict or a list." + raise TypeError(msg) + + pre_init = cls_dict.pop("__attrs_pre_init__", None) + post_init = cls_dict.pop("__attrs_post_init__", None) + user_init = cls_dict.pop("__init__", None) + + body = {} + if class_body is not None: + body.update(class_body) + if pre_init is not None: + body["__attrs_pre_init__"] = pre_init + if post_init is not None: + body["__attrs_post_init__"] = post_init + if user_init is not None: + body["__init__"] = user_init + + type_ = types.new_class(name, bases, {}, lambda ns: ns.update(body)) + + # For pickling to work, the __module__ variable needs to be set to the + # frame where the class is created. Bypass this step in environments where + # sys._getframe is not defined (Jython for example) or sys._getframe is not + # defined for arguments greater than 0 (IronPython). + with contextlib.suppress(AttributeError, ValueError): + type_.__module__ = sys._getframe(1).f_globals.get( + "__name__", "__main__" + ) + + # We do it here for proper warnings with meaningful stacklevel. + cmp = attributes_arguments.pop("cmp", None) + ( + attributes_arguments["eq"], + attributes_arguments["order"], + ) = _determine_attrs_eq_order( + cmp, + attributes_arguments.get("eq"), + attributes_arguments.get("order"), + True, + ) + + cls = _attrs(these=cls_dict, **attributes_arguments)(type_) + # Only add type annotations now or "_attrs()" will complain: + cls.__annotations__ = { + k: v.type for k, v in cls_dict.items() if v.type is not None + } + return cls + + +# These are required by within this module so we define them here and merely +# import into .validators / .converters. + + +@attrs(slots=True, unsafe_hash=True) +class _AndValidator: + """ + Compose many validators to a single one. + """ + + _validators = attrib() + + def __call__(self, inst, attr, value): + for v in self._validators: + v(inst, attr, value) + + +def and_(*validators): + """ + A validator that composes multiple validators into one. + + When called on a value, it runs all wrapped validators. + + Args: + validators (~collections.abc.Iterable[typing.Callable]): + Arbitrary number of validators. + + .. versionadded:: 17.1.0 + """ + vals = [] + for validator in validators: + vals.extend( + validator._validators + if isinstance(validator, _AndValidator) + else [validator] + ) + + return _AndValidator(tuple(vals)) + + +def pipe(*converters): + """ + A converter that composes multiple converters into one. + + When called on a value, it runs all wrapped converters, returning the + *last* value. + + Type annotations will be inferred from the wrapped converters', if they + have any. + + converters (~collections.abc.Iterable[typing.Callable]): + Arbitrary number of converters. + + .. versionadded:: 20.1.0 + """ + + return_instance = any(isinstance(c, Converter) for c in converters) + + if return_instance: + + def pipe_converter(val, inst, field): + for c in converters: + val = ( + c(val, inst, field) if isinstance(c, Converter) else c(val) + ) + + return val + + else: + + def pipe_converter(val): + for c in converters: + val = c(val) + + return val + + if not converters: + # If the converter list is empty, pipe_converter is the identity. + A = TypeVar("A") + pipe_converter.__annotations__.update({"val": A, "return": A}) + else: + # Get parameter type from first converter. + t = _AnnotationExtractor(converters[0]).get_first_param_type() + if t: + pipe_converter.__annotations__["val"] = t + + last = converters[-1] + if not PY_3_11_PLUS and isinstance(last, Converter): + last = last.__call__ + + # Get return type from last converter. + rt = _AnnotationExtractor(last).get_return_type() + if rt: + pipe_converter.__annotations__["return"] = rt + + if return_instance: + return Converter(pipe_converter, takes_self=True, takes_field=True) + return pipe_converter diff --git a/.venv/lib/python3.12/site-packages/attr/_next_gen.py b/.venv/lib/python3.12/site-packages/attr/_next_gen.py new file mode 100644 index 0000000..9290664 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/attr/_next_gen.py @@ -0,0 +1,623 @@ +# SPDX-License-Identifier: MIT + +""" +These are keyword-only APIs that call `attr.s` and `attr.ib` with different +default values. +""" + +from functools import partial + +from . import setters +from ._funcs import asdict as _asdict +from ._funcs import astuple as _astuple +from ._make import ( + _DEFAULT_ON_SETATTR, + NOTHING, + _frozen_setattrs, + attrib, + attrs, +) +from .exceptions import UnannotatedAttributeError + + +def define( + maybe_cls=None, + *, + these=None, + repr=None, + unsafe_hash=None, + hash=None, + init=None, + slots=True, + frozen=False, + weakref_slot=True, + str=False, + auto_attribs=None, + kw_only=False, + cache_hash=False, + auto_exc=True, + eq=None, + order=False, + auto_detect=True, + getstate_setstate=None, + on_setattr=None, + field_transformer=None, + match_args=True, +): + r""" + A class decorator that adds :term:`dunder methods` according to + :term:`fields ` specified using :doc:`type annotations `, + `field()` calls, or the *these* argument. + + Since *attrs* patches or replaces an existing class, you cannot use + `object.__init_subclass__` with *attrs* classes, because it runs too early. + As a replacement, you can define ``__attrs_init_subclass__`` on your class. + It will be called by *attrs* classes that subclass it after they're + created. See also :ref:`init-subclass`. + + Args: + slots (bool): + Create a :term:`slotted class ` that's more + memory-efficient. Slotted classes are generally superior to the + default dict classes, but have some gotchas you should know about, + so we encourage you to read the :term:`glossary entry `. + + auto_detect (bool): + Instead of setting the *init*, *repr*, *eq*, and *hash* arguments + explicitly, assume they are set to True **unless any** of the + involved methods for one of the arguments is implemented in the + *current* class (meaning, it is *not* inherited from some base + class). + + So, for example by implementing ``__eq__`` on a class yourself, + *attrs* will deduce ``eq=False`` and will create *neither* + ``__eq__`` *nor* ``__ne__`` (but Python classes come with a + sensible ``__ne__`` by default, so it *should* be enough to only + implement ``__eq__`` in most cases). + + Passing True or False` to *init*, *repr*, *eq*, or *hash* + overrides whatever *auto_detect* would determine. + + auto_exc (bool): + If the class subclasses `BaseException` (which implicitly includes + any subclass of any exception), the following happens to behave + like a well-behaved Python exception class: + + - the values for *eq*, *order*, and *hash* are ignored and the + instances compare and hash by the instance's ids [#]_ , + - all attributes that are either passed into ``__init__`` or have a + default value are additionally available as a tuple in the + ``args`` attribute, + - the value of *str* is ignored leaving ``__str__`` to base + classes. + + .. [#] + Note that *attrs* will *not* remove existing implementations of + ``__hash__`` or the equality methods. It just won't add own + ones. + + on_setattr (~typing.Callable | list[~typing.Callable] | None | ~typing.Literal[attrs.setters.NO_OP]): + A callable that is run whenever the user attempts to set an + attribute (either by assignment like ``i.x = 42`` or by using + `setattr` like ``setattr(i, "x", 42)``). It receives the same + arguments as validators: the instance, the attribute that is being + modified, and the new value. + + If no exception is raised, the attribute is set to the return value + of the callable. + + If a list of callables is passed, they're automatically wrapped in + an `attrs.setters.pipe`. + + If left None, the default behavior is to run converters and + validators whenever an attribute is set. + + init (bool): + Create a ``__init__`` method that initializes the *attrs* + attributes. Leading underscores are stripped for the argument name, + unless an alias is set on the attribute. + + .. seealso:: + `init` shows advanced ways to customize the generated + ``__init__`` method, including executing code before and after. + + repr(bool): + Create a ``__repr__`` method with a human readable representation + of *attrs* attributes. + + str (bool): + Create a ``__str__`` method that is identical to ``__repr__``. This + is usually not necessary except for `Exception`\ s. + + eq (bool | None): + If True or None (default), add ``__eq__`` and ``__ne__`` methods + that check two instances for equality. + + .. seealso:: + `comparison` describes how to customize the comparison behavior + going as far comparing NumPy arrays. + + order (bool | None): + If True, add ``__lt__``, ``__le__``, ``__gt__``, and ``__ge__`` + methods that behave like *eq* above and allow instances to be + ordered. + + They compare the instances as if they were tuples of their *attrs* + attributes if and only if the types of both classes are + *identical*. + + If `None` mirror value of *eq*. + + .. seealso:: `comparison` + + unsafe_hash (bool | None): + If None (default), the ``__hash__`` method is generated according + how *eq* and *frozen* are set. + + 1. If *both* are True, *attrs* will generate a ``__hash__`` for + you. + 2. If *eq* is True and *frozen* is False, ``__hash__`` will be set + to None, marking it unhashable (which it is). + 3. If *eq* is False, ``__hash__`` will be left untouched meaning + the ``__hash__`` method of the base class will be used. If the + base class is `object`, this means it will fall back to id-based + hashing. + + Although not recommended, you can decide for yourself and force + *attrs* to create one (for example, if the class is immutable even + though you didn't freeze it programmatically) by passing True or + not. Both of these cases are rather special and should be used + carefully. + + .. seealso:: + + - Our documentation on `hashing`, + - Python's documentation on `object.__hash__`, + - and the `GitHub issue that led to the default \ behavior + `_ for more + details. + + hash (bool | None): + Deprecated alias for *unsafe_hash*. *unsafe_hash* takes precedence. + + cache_hash (bool): + Ensure that the object's hash code is computed only once and stored + on the object. If this is set to True, hashing must be either + explicitly or implicitly enabled for this class. If the hash code + is cached, avoid any reassignments of fields involved in hash code + computation or mutations of the objects those fields point to after + object creation. If such changes occur, the behavior of the + object's hash code is undefined. + + frozen (bool): + Make instances immutable after initialization. If someone attempts + to modify a frozen instance, `attrs.exceptions.FrozenInstanceError` + is raised. + + .. note:: + + 1. This is achieved by installing a custom ``__setattr__`` + method on your class, so you can't implement your own. + + 2. True immutability is impossible in Python. + + 3. This *does* have a minor a runtime performance `impact + ` when initializing new instances. In other + words: ``__init__`` is slightly slower with ``frozen=True``. + + 4. If a class is frozen, you cannot modify ``self`` in + ``__attrs_post_init__`` or a self-written ``__init__``. You + can circumvent that limitation by using + ``object.__setattr__(self, "attribute_name", value)``. + + 5. Subclasses of a frozen class are frozen too. + + kw_only (bool): + Make all attributes keyword-only in the generated ``__init__`` (if + *init* is False, this parameter is ignored). + + weakref_slot (bool): + Make instances weak-referenceable. This has no effect unless + *slots* is True. + + field_transformer (~typing.Callable | None): + A function that is called with the original class object and all + fields right before *attrs* finalizes the class. You can use this, + for example, to automatically add converters or validators to + fields based on their types. + + .. seealso:: `transform-fields` + + match_args (bool): + If True (default), set ``__match_args__`` on the class to support + :pep:`634` (*Structural Pattern Matching*). It is a tuple of all + non-keyword-only ``__init__`` parameter names on Python 3.10 and + later. Ignored on older Python versions. + + collect_by_mro (bool): + If True, *attrs* collects attributes from base classes correctly + according to the `method resolution order + `_. If False, *attrs* + will mimic the (wrong) behavior of `dataclasses` and :pep:`681`. + + See also `issue #428 + `_. + + getstate_setstate (bool | None): + .. note:: + + This is usually only interesting for slotted classes and you + should probably just set *auto_detect* to True. + + If True, ``__getstate__`` and ``__setstate__`` are generated and + attached to the class. This is necessary for slotted classes to be + pickleable. If left None, it's True by default for slotted classes + and False for dict classes. + + If *auto_detect* is True, and *getstate_setstate* is left None, and + **either** ``__getstate__`` or ``__setstate__`` is detected + directly on the class (meaning: not inherited), it is set to False + (this is usually what you want). + + auto_attribs (bool | None): + If True, look at type annotations to determine which attributes to + use, like `dataclasses`. If False, it will only look for explicit + :func:`field` class attributes, like classic *attrs*. + + If left None, it will guess: + + 1. If any attributes are annotated and no unannotated + `attrs.field`\ s are found, it assumes *auto_attribs=True*. + 2. Otherwise it assumes *auto_attribs=False* and tries to collect + `attrs.field`\ s. + + If *attrs* decides to look at type annotations, **all** fields + **must** be annotated. If *attrs* encounters a field that is set to + a :func:`field` / `attr.ib` but lacks a type annotation, an + `attrs.exceptions.UnannotatedAttributeError` is raised. Use + ``field_name: typing.Any = field(...)`` if you don't want to set a + type. + + .. warning:: + + For features that use the attribute name to create decorators + (for example, :ref:`validators `), you still *must* + assign :func:`field` / `attr.ib` to them. Otherwise Python will + either not find the name or try to use the default value to + call, for example, ``validator`` on it. + + Attributes annotated as `typing.ClassVar`, and attributes that are + neither annotated nor set to an `field()` are **ignored**. + + these (dict[str, object]): + A dictionary of name to the (private) return value of `field()` + mappings. This is useful to avoid the definition of your attributes + within the class body because you can't (for example, if you want + to add ``__repr__`` methods to Django models) or don't want to. + + If *these* is not `None`, *attrs* will *not* search the class body + for attributes and will *not* remove any attributes from it. + + The order is deduced from the order of the attributes inside + *these*. + + Arguably, this is a rather obscure feature. + + .. versionadded:: 20.1.0 + .. versionchanged:: 21.3.0 Converters are also run ``on_setattr``. + .. versionadded:: 22.2.0 + *unsafe_hash* as an alias for *hash* (for :pep:`681` compliance). + .. versionchanged:: 24.1.0 + Instances are not compared as tuples of attributes anymore, but using a + big ``and`` condition. This is faster and has more correct behavior for + uncomparable values like `math.nan`. + .. versionadded:: 24.1.0 + If a class has an *inherited* classmethod called + ``__attrs_init_subclass__``, it is executed after the class is created. + .. deprecated:: 24.1.0 *hash* is deprecated in favor of *unsafe_hash*. + .. versionadded:: 24.3.0 + Unless already present, a ``__replace__`` method is automatically + created for `copy.replace` (Python 3.13+ only). + + .. note:: + + The main differences to the classic `attr.s` are: + + - Automatically detect whether or not *auto_attribs* should be `True` + (c.f. *auto_attribs* parameter). + - Converters and validators run when attributes are set by default -- + if *frozen* is `False`. + - *slots=True* + + Usually, this has only upsides and few visible effects in everyday + programming. But it *can* lead to some surprising behaviors, so + please make sure to read :term:`slotted classes`. + + - *auto_exc=True* + - *auto_detect=True* + - *order=False* + - Some options that were only relevant on Python 2 or were kept around + for backwards-compatibility have been removed. + + """ + + def do_it(cls, auto_attribs): + return attrs( + maybe_cls=cls, + these=these, + repr=repr, + hash=hash, + unsafe_hash=unsafe_hash, + init=init, + slots=slots, + frozen=frozen, + weakref_slot=weakref_slot, + str=str, + auto_attribs=auto_attribs, + kw_only=kw_only, + cache_hash=cache_hash, + auto_exc=auto_exc, + eq=eq, + order=order, + auto_detect=auto_detect, + collect_by_mro=True, + getstate_setstate=getstate_setstate, + on_setattr=on_setattr, + field_transformer=field_transformer, + match_args=match_args, + ) + + def wrap(cls): + """ + Making this a wrapper ensures this code runs during class creation. + + We also ensure that frozen-ness of classes is inherited. + """ + nonlocal frozen, on_setattr + + had_on_setattr = on_setattr not in (None, setters.NO_OP) + + # By default, mutable classes convert & validate on setattr. + if frozen is False and on_setattr is None: + on_setattr = _DEFAULT_ON_SETATTR + + # However, if we subclass a frozen class, we inherit the immutability + # and disable on_setattr. + for base_cls in cls.__bases__: + if base_cls.__setattr__ is _frozen_setattrs: + if had_on_setattr: + msg = "Frozen classes can't use on_setattr (frozen-ness was inherited)." + raise ValueError(msg) + + on_setattr = setters.NO_OP + break + + if auto_attribs is not None: + return do_it(cls, auto_attribs) + + try: + return do_it(cls, True) + except UnannotatedAttributeError: + return do_it(cls, False) + + # maybe_cls's type depends on the usage of the decorator. It's a class + # if it's used as `@attrs` but `None` if used as `@attrs()`. + if maybe_cls is None: + return wrap + + return wrap(maybe_cls) + + +mutable = define +frozen = partial(define, frozen=True, on_setattr=None) + + +def field( + *, + default=NOTHING, + validator=None, + repr=True, + hash=None, + init=True, + metadata=None, + type=None, + converter=None, + factory=None, + kw_only=False, + eq=None, + order=None, + on_setattr=None, + alias=None, +): + """ + Create a new :term:`field` / :term:`attribute` on a class. + + .. warning:: + + Does **nothing** unless the class is also decorated with + `attrs.define` (or similar)! + + Args: + default: + A value that is used if an *attrs*-generated ``__init__`` is used + and no value is passed while instantiating or the attribute is + excluded using ``init=False``. + + If the value is an instance of `attrs.Factory`, its callable will + be used to construct a new value (useful for mutable data types + like lists or dicts). + + If a default is not set (or set manually to `attrs.NOTHING`), a + value *must* be supplied when instantiating; otherwise a + `TypeError` will be raised. + + .. seealso:: `defaults` + + factory (~typing.Callable): + Syntactic sugar for ``default=attr.Factory(factory)``. + + validator (~typing.Callable | list[~typing.Callable]): + Callable that is called by *attrs*-generated ``__init__`` methods + after the instance has been initialized. They receive the + initialized instance, the :func:`~attrs.Attribute`, and the passed + value. + + The return value is *not* inspected so the validator has to throw + an exception itself. + + If a `list` is passed, its items are treated as validators and must + all pass. + + Validators can be globally disabled and re-enabled using + `attrs.validators.get_disabled` / `attrs.validators.set_disabled`. + + The validator can also be set using decorator notation as shown + below. + + .. seealso:: :ref:`validators` + + repr (bool | ~typing.Callable): + Include this attribute in the generated ``__repr__`` method. If + True, include the attribute; if False, omit it. By default, the + built-in ``repr()`` function is used. To override how the attribute + value is formatted, pass a ``callable`` that takes a single value + and returns a string. Note that the resulting string is used as-is, + which means it will be used directly *instead* of calling + ``repr()`` (the default). + + eq (bool | ~typing.Callable): + If True (default), include this attribute in the generated + ``__eq__`` and ``__ne__`` methods that check two instances for + equality. To override how the attribute value is compared, pass a + callable that takes a single value and returns the value to be + compared. + + .. seealso:: `comparison` + + order (bool | ~typing.Callable): + If True (default), include this attributes in the generated + ``__lt__``, ``__le__``, ``__gt__`` and ``__ge__`` methods. To + override how the attribute value is ordered, pass a callable that + takes a single value and returns the value to be ordered. + + .. seealso:: `comparison` + + hash (bool | None): + Include this attribute in the generated ``__hash__`` method. If + None (default), mirror *eq*'s value. This is the correct behavior + according the Python spec. Setting this value to anything else + than None is *discouraged*. + + .. seealso:: `hashing` + + init (bool): + Include this attribute in the generated ``__init__`` method. + + It is possible to set this to False and set a default value. In + that case this attributed is unconditionally initialized with the + specified default value or factory. + + .. seealso:: `init` + + converter (typing.Callable | Converter): + A callable that is called by *attrs*-generated ``__init__`` methods + to convert attribute's value to the desired format. + + If a vanilla callable is passed, it is given the passed-in value as + the only positional argument. It is possible to receive additional + arguments by wrapping the callable in a `Converter`. + + Either way, the returned value will be used as the new value of the + attribute. The value is converted before being passed to the + validator, if any. + + .. seealso:: :ref:`converters` + + metadata (dict | None): + An arbitrary mapping, to be used by third-party code. + + .. seealso:: `extending-metadata`. + + type (type): + The type of the attribute. Nowadays, the preferred method to + specify the type is using a variable annotation (see :pep:`526`). + This argument is provided for backwards-compatibility and for usage + with `make_class`. Regardless of the approach used, the type will + be stored on ``Attribute.type``. + + Please note that *attrs* doesn't do anything with this metadata by + itself. You can use it as part of your own code or for `static type + checking `. + + kw_only (bool): + Make this attribute keyword-only in the generated ``__init__`` (if + ``init`` is False, this parameter is ignored). + + on_setattr (~typing.Callable | list[~typing.Callable] | None | ~typing.Literal[attrs.setters.NO_OP]): + Allows to overwrite the *on_setattr* setting from `attr.s`. If left + None, the *on_setattr* value from `attr.s` is used. Set to + `attrs.setters.NO_OP` to run **no** `setattr` hooks for this + attribute -- regardless of the setting in `define()`. + + alias (str | None): + Override this attribute's parameter name in the generated + ``__init__`` method. If left None, default to ``name`` stripped + of leading underscores. See `private-attributes`. + + .. versionadded:: 20.1.0 + .. versionchanged:: 21.1.0 + *eq*, *order*, and *cmp* also accept a custom callable + .. versionadded:: 22.2.0 *alias* + .. versionadded:: 23.1.0 + The *type* parameter has been re-added; mostly for `attrs.make_class`. + Please note that type checkers ignore this metadata. + + .. seealso:: + + `attr.ib` + """ + return attrib( + default=default, + validator=validator, + repr=repr, + hash=hash, + init=init, + metadata=metadata, + type=type, + converter=converter, + factory=factory, + kw_only=kw_only, + eq=eq, + order=order, + on_setattr=on_setattr, + alias=alias, + ) + + +def asdict(inst, *, recurse=True, filter=None, value_serializer=None): + """ + Same as `attr.asdict`, except that collections types are always retained + and dict is always used as *dict_factory*. + + .. versionadded:: 21.3.0 + """ + return _asdict( + inst=inst, + recurse=recurse, + filter=filter, + value_serializer=value_serializer, + retain_collection_types=True, + ) + + +def astuple(inst, *, recurse=True, filter=None): + """ + Same as `attr.astuple`, except that collections types are always retained + and `tuple` is always used as the *tuple_factory*. + + .. versionadded:: 21.3.0 + """ + return _astuple( + inst=inst, recurse=recurse, filter=filter, retain_collection_types=True + ) diff --git a/.venv/lib/python3.12/site-packages/attr/_typing_compat.pyi b/.venv/lib/python3.12/site-packages/attr/_typing_compat.pyi new file mode 100644 index 0000000..ca7b71e --- /dev/null +++ b/.venv/lib/python3.12/site-packages/attr/_typing_compat.pyi @@ -0,0 +1,15 @@ +from typing import Any, ClassVar, Protocol + +# MYPY is a special constant in mypy which works the same way as `TYPE_CHECKING`. +MYPY = False + +if MYPY: + # A protocol to be able to statically accept an attrs class. + class AttrsInstance_(Protocol): + __attrs_attrs__: ClassVar[Any] + +else: + # For type checkers without plug-in support use an empty protocol that + # will (hopefully) be combined into a union. + class AttrsInstance_(Protocol): + pass diff --git a/.venv/lib/python3.12/site-packages/attr/_version_info.py b/.venv/lib/python3.12/site-packages/attr/_version_info.py new file mode 100644 index 0000000..51a1312 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/attr/_version_info.py @@ -0,0 +1,86 @@ +# SPDX-License-Identifier: MIT + + +from functools import total_ordering + +from ._funcs import astuple +from ._make import attrib, attrs + + +@total_ordering +@attrs(eq=False, order=False, slots=True, frozen=True) +class VersionInfo: + """ + A version object that can be compared to tuple of length 1--4: + + >>> attr.VersionInfo(19, 1, 0, "final") <= (19, 2) + True + >>> attr.VersionInfo(19, 1, 0, "final") < (19, 1, 1) + True + >>> vi = attr.VersionInfo(19, 2, 0, "final") + >>> vi < (19, 1, 1) + False + >>> vi < (19,) + False + >>> vi == (19, 2,) + True + >>> vi == (19, 2, 1) + False + + .. versionadded:: 19.2 + """ + + year = attrib(type=int) + minor = attrib(type=int) + micro = attrib(type=int) + releaselevel = attrib(type=str) + + @classmethod + def _from_version_string(cls, s): + """ + Parse *s* and return a _VersionInfo. + """ + v = s.split(".") + if len(v) == 3: + v.append("final") + + return cls( + year=int(v[0]), minor=int(v[1]), micro=int(v[2]), releaselevel=v[3] + ) + + def _ensure_tuple(self, other): + """ + Ensure *other* is a tuple of a valid length. + + Returns a possibly transformed *other* and ourselves as a tuple of + the same length as *other*. + """ + + if self.__class__ is other.__class__: + other = astuple(other) + + if not isinstance(other, tuple): + raise NotImplementedError + + if not (1 <= len(other) <= 4): + raise NotImplementedError + + return astuple(self)[: len(other)], other + + def __eq__(self, other): + try: + us, them = self._ensure_tuple(other) + except NotImplementedError: + return NotImplemented + + return us == them + + def __lt__(self, other): + try: + us, them = self._ensure_tuple(other) + except NotImplementedError: + return NotImplemented + + # Since alphabetically "dev0" < "final" < "post1" < "post2", we don't + # have to do anything special with releaselevel for now. + return us < them diff --git a/.venv/lib/python3.12/site-packages/attr/_version_info.pyi b/.venv/lib/python3.12/site-packages/attr/_version_info.pyi new file mode 100644 index 0000000..45ced08 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/attr/_version_info.pyi @@ -0,0 +1,9 @@ +class VersionInfo: + @property + def year(self) -> int: ... + @property + def minor(self) -> int: ... + @property + def micro(self) -> int: ... + @property + def releaselevel(self) -> str: ... diff --git a/.venv/lib/python3.12/site-packages/attr/converters.py b/.venv/lib/python3.12/site-packages/attr/converters.py new file mode 100644 index 0000000..0a79dee --- /dev/null +++ b/.venv/lib/python3.12/site-packages/attr/converters.py @@ -0,0 +1,162 @@ +# SPDX-License-Identifier: MIT + +""" +Commonly useful converters. +""" + +import typing + +from ._compat import _AnnotationExtractor +from ._make import NOTHING, Converter, Factory, pipe + + +__all__ = [ + "default_if_none", + "optional", + "pipe", + "to_bool", +] + + +def optional(converter): + """ + A converter that allows an attribute to be optional. An optional attribute + is one which can be set to `None`. + + Type annotations will be inferred from the wrapped converter's, if it has + any. + + Args: + converter (typing.Callable): + the converter that is used for non-`None` values. + + .. versionadded:: 17.1.0 + """ + + if isinstance(converter, Converter): + + def optional_converter(val, inst, field): + if val is None: + return None + return converter(val, inst, field) + + else: + + def optional_converter(val): + if val is None: + return None + return converter(val) + + xtr = _AnnotationExtractor(converter) + + t = xtr.get_first_param_type() + if t: + optional_converter.__annotations__["val"] = typing.Optional[t] + + rt = xtr.get_return_type() + if rt: + optional_converter.__annotations__["return"] = typing.Optional[rt] + + if isinstance(converter, Converter): + return Converter(optional_converter, takes_self=True, takes_field=True) + + return optional_converter + + +def default_if_none(default=NOTHING, factory=None): + """ + A converter that allows to replace `None` values by *default* or the result + of *factory*. + + Args: + default: + Value to be used if `None` is passed. Passing an instance of + `attrs.Factory` is supported, however the ``takes_self`` option is + *not*. + + factory (typing.Callable): + A callable that takes no parameters whose result is used if `None` + is passed. + + Raises: + TypeError: If **neither** *default* or *factory* is passed. + + TypeError: If **both** *default* and *factory* are passed. + + ValueError: + If an instance of `attrs.Factory` is passed with + ``takes_self=True``. + + .. versionadded:: 18.2.0 + """ + if default is NOTHING and factory is None: + msg = "Must pass either `default` or `factory`." + raise TypeError(msg) + + if default is not NOTHING and factory is not None: + msg = "Must pass either `default` or `factory` but not both." + raise TypeError(msg) + + if factory is not None: + default = Factory(factory) + + if isinstance(default, Factory): + if default.takes_self: + msg = "`takes_self` is not supported by default_if_none." + raise ValueError(msg) + + def default_if_none_converter(val): + if val is not None: + return val + + return default.factory() + + else: + + def default_if_none_converter(val): + if val is not None: + return val + + return default + + return default_if_none_converter + + +def to_bool(val): + """ + Convert "boolean" strings (for example, from environment variables) to real + booleans. + + Values mapping to `True`: + + - ``True`` + - ``"true"`` / ``"t"`` + - ``"yes"`` / ``"y"`` + - ``"on"`` + - ``"1"`` + - ``1`` + + Values mapping to `False`: + + - ``False`` + - ``"false"`` / ``"f"`` + - ``"no"`` / ``"n"`` + - ``"off"`` + - ``"0"`` + - ``0`` + + Raises: + ValueError: For any other value. + + .. versionadded:: 21.3.0 + """ + if isinstance(val, str): + val = val.lower() + + if val in (True, "true", "t", "yes", "y", "on", "1", 1): + return True + if val in (False, "false", "f", "no", "n", "off", "0", 0): + return False + + msg = f"Cannot convert value to bool: {val!r}" + raise ValueError(msg) diff --git a/.venv/lib/python3.12/site-packages/attr/converters.pyi b/.venv/lib/python3.12/site-packages/attr/converters.pyi new file mode 100644 index 0000000..12bd0c4 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/attr/converters.pyi @@ -0,0 +1,19 @@ +from typing import Callable, Any, overload + +from attrs import _ConverterType, _CallableConverterType + +@overload +def pipe(*validators: _CallableConverterType) -> _CallableConverterType: ... +@overload +def pipe(*validators: _ConverterType) -> _ConverterType: ... +@overload +def optional(converter: _CallableConverterType) -> _CallableConverterType: ... +@overload +def optional(converter: _ConverterType) -> _ConverterType: ... +@overload +def default_if_none(default: Any) -> _CallableConverterType: ... +@overload +def default_if_none( + *, factory: Callable[[], Any] +) -> _CallableConverterType: ... +def to_bool(val: str | int | bool) -> bool: ... diff --git a/.venv/lib/python3.12/site-packages/attr/exceptions.py b/.venv/lib/python3.12/site-packages/attr/exceptions.py new file mode 100644 index 0000000..3b7abb8 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/attr/exceptions.py @@ -0,0 +1,95 @@ +# SPDX-License-Identifier: MIT + +from __future__ import annotations + +from typing import ClassVar + + +class FrozenError(AttributeError): + """ + A frozen/immutable instance or attribute have been attempted to be + modified. + + It mirrors the behavior of ``namedtuples`` by using the same error message + and subclassing `AttributeError`. + + .. versionadded:: 20.1.0 + """ + + msg = "can't set attribute" + args: ClassVar[tuple[str]] = [msg] + + +class FrozenInstanceError(FrozenError): + """ + A frozen instance has been attempted to be modified. + + .. versionadded:: 16.1.0 + """ + + +class FrozenAttributeError(FrozenError): + """ + A frozen attribute has been attempted to be modified. + + .. versionadded:: 20.1.0 + """ + + +class AttrsAttributeNotFoundError(ValueError): + """ + An *attrs* function couldn't find an attribute that the user asked for. + + .. versionadded:: 16.2.0 + """ + + +class NotAnAttrsClassError(ValueError): + """ + A non-*attrs* class has been passed into an *attrs* function. + + .. versionadded:: 16.2.0 + """ + + +class DefaultAlreadySetError(RuntimeError): + """ + A default has been set when defining the field and is attempted to be reset + using the decorator. + + .. versionadded:: 17.1.0 + """ + + +class UnannotatedAttributeError(RuntimeError): + """ + A class with ``auto_attribs=True`` has a field without a type annotation. + + .. versionadded:: 17.3.0 + """ + + +class PythonTooOldError(RuntimeError): + """ + It was attempted to use an *attrs* feature that requires a newer Python + version. + + .. versionadded:: 18.2.0 + """ + + +class NotCallableError(TypeError): + """ + A field requiring a callable has been set with a value that is not + callable. + + .. versionadded:: 19.2.0 + """ + + def __init__(self, msg, value): + super(TypeError, self).__init__(msg, value) + self.msg = msg + self.value = value + + def __str__(self): + return str(self.msg) diff --git a/.venv/lib/python3.12/site-packages/attr/exceptions.pyi b/.venv/lib/python3.12/site-packages/attr/exceptions.pyi new file mode 100644 index 0000000..f268011 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/attr/exceptions.pyi @@ -0,0 +1,17 @@ +from typing import Any + +class FrozenError(AttributeError): + msg: str = ... + +class FrozenInstanceError(FrozenError): ... +class FrozenAttributeError(FrozenError): ... +class AttrsAttributeNotFoundError(ValueError): ... +class NotAnAttrsClassError(ValueError): ... +class DefaultAlreadySetError(RuntimeError): ... +class UnannotatedAttributeError(RuntimeError): ... +class PythonTooOldError(RuntimeError): ... + +class NotCallableError(TypeError): + msg: str = ... + value: Any = ... + def __init__(self, msg: str, value: Any) -> None: ... diff --git a/.venv/lib/python3.12/site-packages/attr/filters.py b/.venv/lib/python3.12/site-packages/attr/filters.py new file mode 100644 index 0000000..689b170 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/attr/filters.py @@ -0,0 +1,72 @@ +# SPDX-License-Identifier: MIT + +""" +Commonly useful filters for `attrs.asdict` and `attrs.astuple`. +""" + +from ._make import Attribute + + +def _split_what(what): + """ + Returns a tuple of `frozenset`s of classes and attributes. + """ + return ( + frozenset(cls for cls in what if isinstance(cls, type)), + frozenset(cls for cls in what if isinstance(cls, str)), + frozenset(cls for cls in what if isinstance(cls, Attribute)), + ) + + +def include(*what): + """ + Create a filter that only allows *what*. + + Args: + what (list[type, str, attrs.Attribute]): + What to include. Can be a type, a name, or an attribute. + + Returns: + Callable: + A callable that can be passed to `attrs.asdict`'s and + `attrs.astuple`'s *filter* argument. + + .. versionchanged:: 23.1.0 Accept strings with field names. + """ + cls, names, attrs = _split_what(what) + + def include_(attribute, value): + return ( + value.__class__ in cls + or attribute.name in names + or attribute in attrs + ) + + return include_ + + +def exclude(*what): + """ + Create a filter that does **not** allow *what*. + + Args: + what (list[type, str, attrs.Attribute]): + What to exclude. Can be a type, a name, or an attribute. + + Returns: + Callable: + A callable that can be passed to `attrs.asdict`'s and + `attrs.astuple`'s *filter* argument. + + .. versionchanged:: 23.3.0 Accept field name string as input argument + """ + cls, names, attrs = _split_what(what) + + def exclude_(attribute, value): + return not ( + value.__class__ in cls + or attribute.name in names + or attribute in attrs + ) + + return exclude_ diff --git a/.venv/lib/python3.12/site-packages/attr/filters.pyi b/.venv/lib/python3.12/site-packages/attr/filters.pyi new file mode 100644 index 0000000..974abdc --- /dev/null +++ b/.venv/lib/python3.12/site-packages/attr/filters.pyi @@ -0,0 +1,6 @@ +from typing import Any + +from . import Attribute, _FilterType + +def include(*what: type | str | Attribute[Any]) -> _FilterType[Any]: ... +def exclude(*what: type | str | Attribute[Any]) -> _FilterType[Any]: ... diff --git a/.venv/lib/python3.12/site-packages/attr/py.typed b/.venv/lib/python3.12/site-packages/attr/py.typed new file mode 100644 index 0000000..e69de29 diff --git a/.venv/lib/python3.12/site-packages/attr/setters.py b/.venv/lib/python3.12/site-packages/attr/setters.py new file mode 100644 index 0000000..78b0839 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/attr/setters.py @@ -0,0 +1,79 @@ +# SPDX-License-Identifier: MIT + +""" +Commonly used hooks for on_setattr. +""" + +from . import _config +from .exceptions import FrozenAttributeError + + +def pipe(*setters): + """ + Run all *setters* and return the return value of the last one. + + .. versionadded:: 20.1.0 + """ + + def wrapped_pipe(instance, attrib, new_value): + rv = new_value + + for setter in setters: + rv = setter(instance, attrib, rv) + + return rv + + return wrapped_pipe + + +def frozen(_, __, ___): + """ + Prevent an attribute to be modified. + + .. versionadded:: 20.1.0 + """ + raise FrozenAttributeError + + +def validate(instance, attrib, new_value): + """ + Run *attrib*'s validator on *new_value* if it has one. + + .. versionadded:: 20.1.0 + """ + if _config._run_validators is False: + return new_value + + v = attrib.validator + if not v: + return new_value + + v(instance, attrib, new_value) + + return new_value + + +def convert(instance, attrib, new_value): + """ + Run *attrib*'s converter -- if it has one -- on *new_value* and return the + result. + + .. versionadded:: 20.1.0 + """ + c = attrib.converter + if c: + # This can be removed once we drop 3.8 and use attrs.Converter instead. + from ._make import Converter + + if not isinstance(c, Converter): + return c(new_value) + + return c(new_value, instance, attrib) + + return new_value + + +# Sentinel for disabling class-wide *on_setattr* hooks for certain attributes. +# Sphinx's autodata stopped working, so the docstring is inlined in the API +# docs. +NO_OP = object() diff --git a/.venv/lib/python3.12/site-packages/attr/setters.pyi b/.venv/lib/python3.12/site-packages/attr/setters.pyi new file mode 100644 index 0000000..73abf36 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/attr/setters.pyi @@ -0,0 +1,20 @@ +from typing import Any, NewType, NoReturn, TypeVar + +from . import Attribute +from attrs import _OnSetAttrType + +_T = TypeVar("_T") + +def frozen( + instance: Any, attribute: Attribute[Any], new_value: Any +) -> NoReturn: ... +def pipe(*setters: _OnSetAttrType) -> _OnSetAttrType: ... +def validate(instance: Any, attribute: Attribute[_T], new_value: _T) -> _T: ... + +# convert is allowed to return Any, because they can be chained using pipe. +def convert( + instance: Any, attribute: Attribute[Any], new_value: Any +) -> Any: ... + +_NoOpType = NewType("_NoOpType", object) +NO_OP: _NoOpType diff --git a/.venv/lib/python3.12/site-packages/attr/validators.py b/.venv/lib/python3.12/site-packages/attr/validators.py new file mode 100644 index 0000000..e7b7552 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/attr/validators.py @@ -0,0 +1,710 @@ +# SPDX-License-Identifier: MIT + +""" +Commonly useful validators. +""" + +import operator +import re + +from contextlib import contextmanager +from re import Pattern + +from ._config import get_run_validators, set_run_validators +from ._make import _AndValidator, and_, attrib, attrs +from .converters import default_if_none +from .exceptions import NotCallableError + + +__all__ = [ + "and_", + "deep_iterable", + "deep_mapping", + "disabled", + "ge", + "get_disabled", + "gt", + "in_", + "instance_of", + "is_callable", + "le", + "lt", + "matches_re", + "max_len", + "min_len", + "not_", + "optional", + "or_", + "set_disabled", +] + + +def set_disabled(disabled): + """ + Globally disable or enable running validators. + + By default, they are run. + + Args: + disabled (bool): If `True`, disable running all validators. + + .. warning:: + + This function is not thread-safe! + + .. versionadded:: 21.3.0 + """ + set_run_validators(not disabled) + + +def get_disabled(): + """ + Return a bool indicating whether validators are currently disabled or not. + + Returns: + bool:`True` if validators are currently disabled. + + .. versionadded:: 21.3.0 + """ + return not get_run_validators() + + +@contextmanager +def disabled(): + """ + Context manager that disables running validators within its context. + + .. warning:: + + This context manager is not thread-safe! + + .. versionadded:: 21.3.0 + """ + set_run_validators(False) + try: + yield + finally: + set_run_validators(True) + + +@attrs(repr=False, slots=True, unsafe_hash=True) +class _InstanceOfValidator: + type = attrib() + + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if not isinstance(value, self.type): + msg = f"'{attr.name}' must be {self.type!r} (got {value!r} that is a {value.__class__!r})." + raise TypeError( + msg, + attr, + self.type, + value, + ) + + def __repr__(self): + return f"" + + +def instance_of(type): + """ + A validator that raises a `TypeError` if the initializer is called with a + wrong type for this particular attribute (checks are performed using + `isinstance` therefore it's also valid to pass a tuple of types). + + Args: + type (type | tuple[type]): The type to check for. + + Raises: + TypeError: + With a human readable error message, the attribute (of type + `attrs.Attribute`), the expected type, and the value it got. + """ + return _InstanceOfValidator(type) + + +@attrs(repr=False, frozen=True, slots=True) +class _MatchesReValidator: + pattern = attrib() + match_func = attrib() + + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if not self.match_func(value): + msg = f"'{attr.name}' must match regex {self.pattern.pattern!r} ({value!r} doesn't)" + raise ValueError( + msg, + attr, + self.pattern, + value, + ) + + def __repr__(self): + return f"" + + +def matches_re(regex, flags=0, func=None): + r""" + A validator that raises `ValueError` if the initializer is called with a + string that doesn't match *regex*. + + Args: + regex (str, re.Pattern): + A regex string or precompiled pattern to match against + + flags (int): + Flags that will be passed to the underlying re function (default 0) + + func (typing.Callable): + Which underlying `re` function to call. Valid options are + `re.fullmatch`, `re.search`, and `re.match`; the default `None` + means `re.fullmatch`. For performance reasons, the pattern is + always precompiled using `re.compile`. + + .. versionadded:: 19.2.0 + .. versionchanged:: 21.3.0 *regex* can be a pre-compiled pattern. + """ + valid_funcs = (re.fullmatch, None, re.search, re.match) + if func not in valid_funcs: + msg = "'func' must be one of {}.".format( + ", ".join( + sorted((e and e.__name__) or "None" for e in set(valid_funcs)) + ) + ) + raise ValueError(msg) + + if isinstance(regex, Pattern): + if flags: + msg = "'flags' can only be used with a string pattern; pass flags to re.compile() instead" + raise TypeError(msg) + pattern = regex + else: + pattern = re.compile(regex, flags) + + if func is re.match: + match_func = pattern.match + elif func is re.search: + match_func = pattern.search + else: + match_func = pattern.fullmatch + + return _MatchesReValidator(pattern, match_func) + + +@attrs(repr=False, slots=True, unsafe_hash=True) +class _OptionalValidator: + validator = attrib() + + def __call__(self, inst, attr, value): + if value is None: + return + + self.validator(inst, attr, value) + + def __repr__(self): + return f"" + + +def optional(validator): + """ + A validator that makes an attribute optional. An optional attribute is one + which can be set to `None` in addition to satisfying the requirements of + the sub-validator. + + Args: + validator + (typing.Callable | tuple[typing.Callable] | list[typing.Callable]): + A validator (or validators) that is used for non-`None` values. + + .. versionadded:: 15.1.0 + .. versionchanged:: 17.1.0 *validator* can be a list of validators. + .. versionchanged:: 23.1.0 *validator* can also be a tuple of validators. + """ + if isinstance(validator, (list, tuple)): + return _OptionalValidator(_AndValidator(validator)) + + return _OptionalValidator(validator) + + +@attrs(repr=False, slots=True, unsafe_hash=True) +class _InValidator: + options = attrib() + _original_options = attrib(hash=False) + + def __call__(self, inst, attr, value): + try: + in_options = value in self.options + except TypeError: # e.g. `1 in "abc"` + in_options = False + + if not in_options: + msg = f"'{attr.name}' must be in {self._original_options!r} (got {value!r})" + raise ValueError( + msg, + attr, + self._original_options, + value, + ) + + def __repr__(self): + return f"" + + +def in_(options): + """ + A validator that raises a `ValueError` if the initializer is called with a + value that does not belong in the *options* provided. + + The check is performed using ``value in options``, so *options* has to + support that operation. + + To keep the validator hashable, dicts, lists, and sets are transparently + transformed into a `tuple`. + + Args: + options: Allowed options. + + Raises: + ValueError: + With a human readable error message, the attribute (of type + `attrs.Attribute`), the expected options, and the value it got. + + .. versionadded:: 17.1.0 + .. versionchanged:: 22.1.0 + The ValueError was incomplete until now and only contained the human + readable error message. Now it contains all the information that has + been promised since 17.1.0. + .. versionchanged:: 24.1.0 + *options* that are a list, dict, or a set are now transformed into a + tuple to keep the validator hashable. + """ + repr_options = options + if isinstance(options, (list, dict, set)): + options = tuple(options) + + return _InValidator(options, repr_options) + + +@attrs(repr=False, slots=False, unsafe_hash=True) +class _IsCallableValidator: + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if not callable(value): + message = ( + "'{name}' must be callable " + "(got {value!r} that is a {actual!r})." + ) + raise NotCallableError( + msg=message.format( + name=attr.name, value=value, actual=value.__class__ + ), + value=value, + ) + + def __repr__(self): + return "" + + +def is_callable(): + """ + A validator that raises a `attrs.exceptions.NotCallableError` if the + initializer is called with a value for this particular attribute that is + not callable. + + .. versionadded:: 19.1.0 + + Raises: + attrs.exceptions.NotCallableError: + With a human readable error message containing the attribute + (`attrs.Attribute`) name, and the value it got. + """ + return _IsCallableValidator() + + +@attrs(repr=False, slots=True, unsafe_hash=True) +class _DeepIterable: + member_validator = attrib(validator=is_callable()) + iterable_validator = attrib( + default=None, validator=optional(is_callable()) + ) + + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if self.iterable_validator is not None: + self.iterable_validator(inst, attr, value) + + for member in value: + self.member_validator(inst, attr, member) + + def __repr__(self): + iterable_identifier = ( + "" + if self.iterable_validator is None + else f" {self.iterable_validator!r}" + ) + return ( + f"" + ) + + +def deep_iterable(member_validator, iterable_validator=None): + """ + A validator that performs deep validation of an iterable. + + Args: + member_validator: Validator to apply to iterable members. + + iterable_validator: + Validator to apply to iterable itself (optional). + + Raises + TypeError: if any sub-validators fail + + .. versionadded:: 19.1.0 + """ + if isinstance(member_validator, (list, tuple)): + member_validator = and_(*member_validator) + return _DeepIterable(member_validator, iterable_validator) + + +@attrs(repr=False, slots=True, unsafe_hash=True) +class _DeepMapping: + key_validator = attrib(validator=is_callable()) + value_validator = attrib(validator=is_callable()) + mapping_validator = attrib(default=None, validator=optional(is_callable())) + + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if self.mapping_validator is not None: + self.mapping_validator(inst, attr, value) + + for key in value: + self.key_validator(inst, attr, key) + self.value_validator(inst, attr, value[key]) + + def __repr__(self): + return f"" + + +def deep_mapping(key_validator, value_validator, mapping_validator=None): + """ + A validator that performs deep validation of a dictionary. + + Args: + key_validator: Validator to apply to dictionary keys. + + value_validator: Validator to apply to dictionary values. + + mapping_validator: + Validator to apply to top-level mapping attribute (optional). + + .. versionadded:: 19.1.0 + + Raises: + TypeError: if any sub-validators fail + """ + return _DeepMapping(key_validator, value_validator, mapping_validator) + + +@attrs(repr=False, frozen=True, slots=True) +class _NumberValidator: + bound = attrib() + compare_op = attrib() + compare_func = attrib() + + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if not self.compare_func(value, self.bound): + msg = f"'{attr.name}' must be {self.compare_op} {self.bound}: {value}" + raise ValueError(msg) + + def __repr__(self): + return f"" + + +def lt(val): + """ + A validator that raises `ValueError` if the initializer is called with a + number larger or equal to *val*. + + The validator uses `operator.lt` to compare the values. + + Args: + val: Exclusive upper bound for values. + + .. versionadded:: 21.3.0 + """ + return _NumberValidator(val, "<", operator.lt) + + +def le(val): + """ + A validator that raises `ValueError` if the initializer is called with a + number greater than *val*. + + The validator uses `operator.le` to compare the values. + + Args: + val: Inclusive upper bound for values. + + .. versionadded:: 21.3.0 + """ + return _NumberValidator(val, "<=", operator.le) + + +def ge(val): + """ + A validator that raises `ValueError` if the initializer is called with a + number smaller than *val*. + + The validator uses `operator.ge` to compare the values. + + Args: + val: Inclusive lower bound for values + + .. versionadded:: 21.3.0 + """ + return _NumberValidator(val, ">=", operator.ge) + + +def gt(val): + """ + A validator that raises `ValueError` if the initializer is called with a + number smaller or equal to *val*. + + The validator uses `operator.ge` to compare the values. + + Args: + val: Exclusive lower bound for values + + .. versionadded:: 21.3.0 + """ + return _NumberValidator(val, ">", operator.gt) + + +@attrs(repr=False, frozen=True, slots=True) +class _MaxLengthValidator: + max_length = attrib() + + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if len(value) > self.max_length: + msg = f"Length of '{attr.name}' must be <= {self.max_length}: {len(value)}" + raise ValueError(msg) + + def __repr__(self): + return f"" + + +def max_len(length): + """ + A validator that raises `ValueError` if the initializer is called + with a string or iterable that is longer than *length*. + + Args: + length (int): Maximum length of the string or iterable + + .. versionadded:: 21.3.0 + """ + return _MaxLengthValidator(length) + + +@attrs(repr=False, frozen=True, slots=True) +class _MinLengthValidator: + min_length = attrib() + + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if len(value) < self.min_length: + msg = f"Length of '{attr.name}' must be >= {self.min_length}: {len(value)}" + raise ValueError(msg) + + def __repr__(self): + return f"" + + +def min_len(length): + """ + A validator that raises `ValueError` if the initializer is called + with a string or iterable that is shorter than *length*. + + Args: + length (int): Minimum length of the string or iterable + + .. versionadded:: 22.1.0 + """ + return _MinLengthValidator(length) + + +@attrs(repr=False, slots=True, unsafe_hash=True) +class _SubclassOfValidator: + type = attrib() + + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if not issubclass(value, self.type): + msg = f"'{attr.name}' must be a subclass of {self.type!r} (got {value!r})." + raise TypeError( + msg, + attr, + self.type, + value, + ) + + def __repr__(self): + return f"" + + +def _subclass_of(type): + """ + A validator that raises a `TypeError` if the initializer is called with a + wrong type for this particular attribute (checks are performed using + `issubclass` therefore it's also valid to pass a tuple of types). + + Args: + type (type | tuple[type, ...]): The type(s) to check for. + + Raises: + TypeError: + With a human readable error message, the attribute (of type + `attrs.Attribute`), the expected type, and the value it got. + """ + return _SubclassOfValidator(type) + + +@attrs(repr=False, slots=True, unsafe_hash=True) +class _NotValidator: + validator = attrib() + msg = attrib( + converter=default_if_none( + "not_ validator child '{validator!r}' " + "did not raise a captured error" + ) + ) + exc_types = attrib( + validator=deep_iterable( + member_validator=_subclass_of(Exception), + iterable_validator=instance_of(tuple), + ), + ) + + def __call__(self, inst, attr, value): + try: + self.validator(inst, attr, value) + except self.exc_types: + pass # suppress error to invert validity + else: + raise ValueError( + self.msg.format( + validator=self.validator, + exc_types=self.exc_types, + ), + attr, + self.validator, + value, + self.exc_types, + ) + + def __repr__(self): + return f"" + + +def not_(validator, *, msg=None, exc_types=(ValueError, TypeError)): + """ + A validator that wraps and logically 'inverts' the validator passed to it. + It will raise a `ValueError` if the provided validator *doesn't* raise a + `ValueError` or `TypeError` (by default), and will suppress the exception + if the provided validator *does*. + + Intended to be used with existing validators to compose logic without + needing to create inverted variants, for example, ``not_(in_(...))``. + + Args: + validator: A validator to be logically inverted. + + msg (str): + Message to raise if validator fails. Formatted with keys + ``exc_types`` and ``validator``. + + exc_types (tuple[type, ...]): + Exception type(s) to capture. Other types raised by child + validators will not be intercepted and pass through. + + Raises: + ValueError: + With a human readable error message, the attribute (of type + `attrs.Attribute`), the validator that failed to raise an + exception, the value it got, and the expected exception types. + + .. versionadded:: 22.2.0 + """ + try: + exc_types = tuple(exc_types) + except TypeError: + exc_types = (exc_types,) + return _NotValidator(validator, msg, exc_types) + + +@attrs(repr=False, slots=True, unsafe_hash=True) +class _OrValidator: + validators = attrib() + + def __call__(self, inst, attr, value): + for v in self.validators: + try: + v(inst, attr, value) + except Exception: # noqa: BLE001, PERF203, S112 + continue + else: + return + + msg = f"None of {self.validators!r} satisfied for value {value!r}" + raise ValueError(msg) + + def __repr__(self): + return f"" + + +def or_(*validators): + """ + A validator that composes multiple validators into one. + + When called on a value, it runs all wrapped validators until one of them is + satisfied. + + Args: + validators (~collections.abc.Iterable[typing.Callable]): + Arbitrary number of validators. + + Raises: + ValueError: + If no validator is satisfied. Raised with a human-readable error + message listing all the wrapped validators and the value that + failed all of them. + + .. versionadded:: 24.1.0 + """ + vals = [] + for v in validators: + vals.extend(v.validators if isinstance(v, _OrValidator) else [v]) + + return _OrValidator(tuple(vals)) diff --git a/.venv/lib/python3.12/site-packages/attr/validators.pyi b/.venv/lib/python3.12/site-packages/attr/validators.pyi new file mode 100644 index 0000000..a0fdda7 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/attr/validators.pyi @@ -0,0 +1,86 @@ +from types import UnionType +from typing import ( + Any, + AnyStr, + Callable, + Container, + ContextManager, + Iterable, + Mapping, + Match, + Pattern, + TypeVar, + overload, +) + +from attrs import _ValidatorType +from attrs import _ValidatorArgType + +_T = TypeVar("_T") +_T1 = TypeVar("_T1") +_T2 = TypeVar("_T2") +_T3 = TypeVar("_T3") +_I = TypeVar("_I", bound=Iterable) +_K = TypeVar("_K") +_V = TypeVar("_V") +_M = TypeVar("_M", bound=Mapping) + +def set_disabled(run: bool) -> None: ... +def get_disabled() -> bool: ... +def disabled() -> ContextManager[None]: ... + +# To be more precise on instance_of use some overloads. +# If there are more than 3 items in the tuple then we fall back to Any +@overload +def instance_of(type: type[_T]) -> _ValidatorType[_T]: ... +@overload +def instance_of(type: tuple[type[_T]]) -> _ValidatorType[_T]: ... +@overload +def instance_of( + type: tuple[type[_T1], type[_T2]], +) -> _ValidatorType[_T1 | _T2]: ... +@overload +def instance_of( + type: tuple[type[_T1], type[_T2], type[_T3]], +) -> _ValidatorType[_T1 | _T2 | _T3]: ... +@overload +def instance_of(type: tuple[type, ...]) -> _ValidatorType[Any]: ... +@overload +def instance_of(type: UnionType) -> _ValidatorType[Any]: ... +def optional( + validator: ( + _ValidatorType[_T] + | list[_ValidatorType[_T]] + | tuple[_ValidatorType[_T]] + ), +) -> _ValidatorType[_T | None]: ... +def in_(options: Container[_T]) -> _ValidatorType[_T]: ... +def and_(*validators: _ValidatorType[_T]) -> _ValidatorType[_T]: ... +def matches_re( + regex: Pattern[AnyStr] | AnyStr, + flags: int = ..., + func: Callable[[AnyStr, AnyStr, int], Match[AnyStr] | None] | None = ..., +) -> _ValidatorType[AnyStr]: ... +def deep_iterable( + member_validator: _ValidatorArgType[_T], + iterable_validator: _ValidatorType[_I] | None = ..., +) -> _ValidatorType[_I]: ... +def deep_mapping( + key_validator: _ValidatorType[_K], + value_validator: _ValidatorType[_V], + mapping_validator: _ValidatorType[_M] | None = ..., +) -> _ValidatorType[_M]: ... +def is_callable() -> _ValidatorType[_T]: ... +def lt(val: _T) -> _ValidatorType[_T]: ... +def le(val: _T) -> _ValidatorType[_T]: ... +def ge(val: _T) -> _ValidatorType[_T]: ... +def gt(val: _T) -> _ValidatorType[_T]: ... +def max_len(length: int) -> _ValidatorType[_T]: ... +def min_len(length: int) -> _ValidatorType[_T]: ... +def not_( + validator: _ValidatorType[_T], + *, + msg: str | None = None, + exc_types: type[Exception] | Iterable[type[Exception]] = ..., +) -> _ValidatorType[_T]: ... +def or_(*validators: _ValidatorType[_T]) -> _ValidatorType[_T]: ... diff --git a/.venv/lib/python3.12/site-packages/attrs-25.3.0.dist-info/INSTALLER b/.venv/lib/python3.12/site-packages/attrs-25.3.0.dist-info/INSTALLER new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/.venv/lib/python3.12/site-packages/attrs-25.3.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/.venv/lib/python3.12/site-packages/attrs-25.3.0.dist-info/METADATA b/.venv/lib/python3.12/site-packages/attrs-25.3.0.dist-info/METADATA new file mode 100644 index 0000000..029afee --- /dev/null +++ b/.venv/lib/python3.12/site-packages/attrs-25.3.0.dist-info/METADATA @@ -0,0 +1,232 @@ +Metadata-Version: 2.4 +Name: attrs +Version: 25.3.0 +Summary: Classes Without Boilerplate +Project-URL: Documentation, https://www.attrs.org/ +Project-URL: Changelog, https://www.attrs.org/en/stable/changelog.html +Project-URL: GitHub, https://github.com/python-attrs/attrs +Project-URL: Funding, https://github.com/sponsors/hynek +Project-URL: Tidelift, https://tidelift.com/subscription/pkg/pypi-attrs?utm_source=pypi-attrs&utm_medium=pypi +Author-email: Hynek Schlawack +License-Expression: MIT +License-File: LICENSE +Keywords: attribute,boilerplate,class +Classifier: Development Status :: 5 - Production/Stable +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: 3.13 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Typing :: Typed +Requires-Python: >=3.8 +Provides-Extra: benchmark +Requires-Dist: cloudpickle; (platform_python_implementation == 'CPython') and extra == 'benchmark' +Requires-Dist: hypothesis; extra == 'benchmark' +Requires-Dist: mypy>=1.11.1; (platform_python_implementation == 'CPython' and python_version >= '3.10') and extra == 'benchmark' +Requires-Dist: pympler; extra == 'benchmark' +Requires-Dist: pytest-codspeed; extra == 'benchmark' +Requires-Dist: pytest-mypy-plugins; (platform_python_implementation == 'CPython' and python_version >= '3.10') and extra == 'benchmark' +Requires-Dist: pytest-xdist[psutil]; extra == 'benchmark' +Requires-Dist: pytest>=4.3.0; extra == 'benchmark' +Provides-Extra: cov +Requires-Dist: cloudpickle; (platform_python_implementation == 'CPython') and extra == 'cov' +Requires-Dist: coverage[toml]>=5.3; extra == 'cov' +Requires-Dist: hypothesis; extra == 'cov' +Requires-Dist: mypy>=1.11.1; (platform_python_implementation == 'CPython' and python_version >= '3.10') and extra == 'cov' +Requires-Dist: pympler; extra == 'cov' +Requires-Dist: pytest-mypy-plugins; (platform_python_implementation == 'CPython' and python_version >= '3.10') and extra == 'cov' +Requires-Dist: pytest-xdist[psutil]; extra == 'cov' +Requires-Dist: pytest>=4.3.0; extra == 'cov' +Provides-Extra: dev +Requires-Dist: cloudpickle; (platform_python_implementation == 'CPython') and extra == 'dev' +Requires-Dist: hypothesis; extra == 'dev' +Requires-Dist: mypy>=1.11.1; (platform_python_implementation == 'CPython' and python_version >= '3.10') and extra == 'dev' +Requires-Dist: pre-commit-uv; extra == 'dev' +Requires-Dist: pympler; extra == 'dev' +Requires-Dist: pytest-mypy-plugins; (platform_python_implementation == 'CPython' and python_version >= '3.10') and extra == 'dev' +Requires-Dist: pytest-xdist[psutil]; extra == 'dev' +Requires-Dist: pytest>=4.3.0; extra == 'dev' +Provides-Extra: docs +Requires-Dist: cogapp; extra == 'docs' +Requires-Dist: furo; extra == 'docs' +Requires-Dist: myst-parser; extra == 'docs' +Requires-Dist: sphinx; extra == 'docs' +Requires-Dist: sphinx-notfound-page; extra == 'docs' +Requires-Dist: sphinxcontrib-towncrier; extra == 'docs' +Requires-Dist: towncrier; extra == 'docs' +Provides-Extra: tests +Requires-Dist: cloudpickle; (platform_python_implementation == 'CPython') and extra == 'tests' +Requires-Dist: hypothesis; extra == 'tests' +Requires-Dist: mypy>=1.11.1; (platform_python_implementation == 'CPython' and python_version >= '3.10') and extra == 'tests' +Requires-Dist: pympler; extra == 'tests' +Requires-Dist: pytest-mypy-plugins; (platform_python_implementation == 'CPython' and python_version >= '3.10') and extra == 'tests' +Requires-Dist: pytest-xdist[psutil]; extra == 'tests' +Requires-Dist: pytest>=4.3.0; extra == 'tests' +Provides-Extra: tests-mypy +Requires-Dist: mypy>=1.11.1; (platform_python_implementation == 'CPython' and python_version >= '3.10') and extra == 'tests-mypy' +Requires-Dist: pytest-mypy-plugins; (platform_python_implementation == 'CPython' and python_version >= '3.10') and extra == 'tests-mypy' +Description-Content-Type: text/markdown + +

+ + attrs + +

+ + +*attrs* is the Python package that will bring back the **joy** of **writing classes** by relieving you from the drudgery of implementing object protocols (aka [dunder methods](https://www.attrs.org/en/latest/glossary.html#term-dunder-methods)). +[Trusted by NASA](https://docs.github.com/en/account-and-profile/setting-up-and-managing-your-github-profile/customizing-your-profile/personalizing-your-profile#list-of-qualifying-repositories-for-mars-2020-helicopter-contributor-achievement) for Mars missions since 2020! + +Its main goal is to help you to write **concise** and **correct** software without slowing down your code. + + +## Sponsors + +*attrs* would not be possible without our [amazing sponsors](https://github.com/sponsors/hynek). +Especially those generously supporting us at the *The Organization* tier and higher: + + + +

+ + + + + + + + + + + +

+ + + +

+ Please consider joining them to help make attrs’s maintenance more sustainable! +

+ + + +## Example + +*attrs* gives you a class decorator and a way to declaratively define the attributes on that class: + + + +```pycon +>>> from attrs import asdict, define, make_class, Factory + +>>> @define +... class SomeClass: +... a_number: int = 42 +... list_of_numbers: list[int] = Factory(list) +... +... def hard_math(self, another_number): +... return self.a_number + sum(self.list_of_numbers) * another_number + + +>>> sc = SomeClass(1, [1, 2, 3]) +>>> sc +SomeClass(a_number=1, list_of_numbers=[1, 2, 3]) + +>>> sc.hard_math(3) +19 +>>> sc == SomeClass(1, [1, 2, 3]) +True +>>> sc != SomeClass(2, [3, 2, 1]) +True + +>>> asdict(sc) +{'a_number': 1, 'list_of_numbers': [1, 2, 3]} + +>>> SomeClass() +SomeClass(a_number=42, list_of_numbers=[]) + +>>> C = make_class("C", ["a", "b"]) +>>> C("foo", "bar") +C(a='foo', b='bar') +``` + +After *declaring* your attributes, *attrs* gives you: + +- a concise and explicit overview of the class's attributes, +- a nice human-readable `__repr__`, +- equality-checking methods, +- an initializer, +- and much more, + +*without* writing dull boilerplate code again and again and *without* runtime performance penalties. + +--- + +This example uses *attrs*'s modern APIs that have been introduced in version 20.1.0, and the *attrs* package import name that has been added in version 21.3.0. +The classic APIs (`@attr.s`, `attr.ib`, plus their serious-business aliases) and the `attr` package import name will remain **indefinitely**. + +Check out [*On The Core API Names*](https://www.attrs.org/en/latest/names.html) for an in-depth explanation! + + +### Hate Type Annotations!? + +No problem! +Types are entirely **optional** with *attrs*. +Simply assign `attrs.field()` to the attributes instead of annotating them with types: + +```python +from attrs import define, field + +@define +class SomeClass: + a_number = field(default=42) + list_of_numbers = field(factory=list) +``` + + +## Data Classes + +On the tin, *attrs* might remind you of `dataclasses` (and indeed, `dataclasses` [are a descendant](https://hynek.me/articles/import-attrs/) of *attrs*). +In practice it does a lot more and is more flexible. +For instance, it allows you to define [special handling of NumPy arrays for equality checks](https://www.attrs.org/en/stable/comparison.html#customization), allows more ways to [plug into the initialization process](https://www.attrs.org/en/stable/init.html#hooking-yourself-into-initialization), has a replacement for `__init_subclass__`, and allows for stepping through the generated methods using a debugger. + +For more details, please refer to our [comparison page](https://www.attrs.org/en/stable/why.html#data-classes), but generally speaking, we are more likely to commit crimes against nature to make things work that one would expect to work, but that are quite complicated in practice. + + +## Project Information + +- [**Changelog**](https://www.attrs.org/en/stable/changelog.html) +- [**Documentation**](https://www.attrs.org/) +- [**PyPI**](https://pypi.org/project/attrs/) +- [**Source Code**](https://github.com/python-attrs/attrs) +- [**Contributing**](https://github.com/python-attrs/attrs/blob/main/.github/CONTRIBUTING.md) +- [**Third-party Extensions**](https://github.com/python-attrs/attrs/wiki/Extensions-to-attrs) +- **Get Help**: use the `python-attrs` tag on [Stack Overflow](https://stackoverflow.com/questions/tagged/python-attrs) + + +### *attrs* for Enterprise + +Available as part of the [Tidelift Subscription](https://tidelift.com/?utm_source=lifter&utm_medium=referral&utm_campaign=hynek). + +The maintainers of *attrs* and thousands of other packages are working with Tidelift to deliver commercial support and maintenance for the open source packages you use to build your applications. +Save time, reduce risk, and improve code health, while paying the maintainers of the exact packages you use. + +## Release Information + +### Changes + +- Restore support for generator-based `field_transformer`s. + [#1417](https://github.com/python-attrs/attrs/issues/1417) + + + +--- + +[Full changelog →](https://www.attrs.org/en/stable/changelog.html) diff --git a/.venv/lib/python3.12/site-packages/attrs-25.3.0.dist-info/RECORD b/.venv/lib/python3.12/site-packages/attrs-25.3.0.dist-info/RECORD new file mode 100644 index 0000000..b04b541 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/attrs-25.3.0.dist-info/RECORD @@ -0,0 +1,55 @@ +attr/__init__.py,sha256=fOYIvt1eGSqQre4uCS3sJWKZ0mwAuC8UD6qba5OS9_U,2057 +attr/__init__.pyi,sha256=QIXnnHPoucmDWkbpNsWTP-cgJ1bn8le7DjyRa_wYdew,11281 +attr/__pycache__/__init__.cpython-312.pyc,, +attr/__pycache__/_cmp.cpython-312.pyc,, +attr/__pycache__/_compat.cpython-312.pyc,, +attr/__pycache__/_config.cpython-312.pyc,, +attr/__pycache__/_funcs.cpython-312.pyc,, +attr/__pycache__/_make.cpython-312.pyc,, +attr/__pycache__/_next_gen.cpython-312.pyc,, +attr/__pycache__/_version_info.cpython-312.pyc,, +attr/__pycache__/converters.cpython-312.pyc,, +attr/__pycache__/exceptions.cpython-312.pyc,, +attr/__pycache__/filters.cpython-312.pyc,, +attr/__pycache__/setters.cpython-312.pyc,, +attr/__pycache__/validators.cpython-312.pyc,, +attr/_cmp.py,sha256=3Nn1TjxllUYiX_nJoVnEkXoDk0hM1DYKj5DE7GZe4i0,4117 +attr/_cmp.pyi,sha256=U-_RU_UZOyPUEQzXE6RMYQQcjkZRY25wTH99sN0s7MM,368 +attr/_compat.py,sha256=4hlXbWhdDjQCDK6FKF1EgnZ3POiHgtpp54qE0nxaGHg,2704 +attr/_config.py,sha256=dGq3xR6fgZEF6UBt_L0T-eUHIB4i43kRmH0P28sJVw8,843 +attr/_funcs.py,sha256=5-tUKJtp3h5El55EcDl6GWXFp68fT8D8U7uCRN6497I,15854 +attr/_make.py,sha256=lBUPPmxiA1BeHzB6OlHoCEh--tVvM1ozXO8eXOa6g4c,96664 +attr/_next_gen.py,sha256=7FRkbtl_N017SuBhf_Vw3mw2c2pGZhtCGOzadgz7tp4,24395 +attr/_typing_compat.pyi,sha256=XDP54TUn-ZKhD62TOQebmzrwFyomhUCoGRpclb6alRA,469 +attr/_version_info.py,sha256=exSqb3b5E-fMSsgZAlEw9XcLpEgobPORCZpcaEglAM4,2121 +attr/_version_info.pyi,sha256=x_M3L3WuB7r_ULXAWjx959udKQ4HLB8l-hsc1FDGNvk,209 +attr/converters.py,sha256=GlDeOzPeTFgeBBLbj9G57Ez5lAk68uhSALRYJ_exe84,3861 +attr/converters.pyi,sha256=orU2bff-VjQa2kMDyvnMQV73oJT2WRyQuw4ZR1ym1bE,643 +attr/exceptions.py,sha256=HRFq4iybmv7-DcZwyjl6M1euM2YeJVK_hFxuaBGAngI,1977 +attr/exceptions.pyi,sha256=zZq8bCUnKAy9mDtBEw42ZhPhAUIHoTKedDQInJD883M,539 +attr/filters.py,sha256=ZBiKWLp3R0LfCZsq7X11pn9WX8NslS2wXM4jsnLOGc8,1795 +attr/filters.pyi,sha256=3J5BG-dTxltBk1_-RuNRUHrv2qu1v8v4aDNAQ7_mifA,208 +attr/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +attr/setters.py,sha256=5-dcT63GQK35ONEzSgfXCkbB7pPkaR-qv15mm4PVSzQ,1617 +attr/setters.pyi,sha256=NnVkaFU1BB4JB8E4JuXyrzTUgvtMpj8p3wBdJY7uix4,584 +attr/validators.py,sha256=WaB1HLAHHqRHWsrv_K9H-sJ7ESil3H3Cmv2d8TtVZx4,20046 +attr/validators.pyi,sha256=s2WhKPqskxbsckJfKk8zOuuB088GfgpyxcCYSNFLqNU,2603 +attrs-25.3.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +attrs-25.3.0.dist-info/METADATA,sha256=W38cREj7s1wqNf1fg4hVwZmL1xh0AdSp4IhtTMROinw,10993 +attrs-25.3.0.dist-info/RECORD,, +attrs-25.3.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87 +attrs-25.3.0.dist-info/licenses/LICENSE,sha256=iCEVyV38KvHutnFPjsbVy8q_Znyv-HKfQkINpj9xTp8,1109 +attrs/__init__.py,sha256=qeQJZ4O08yczSn840v9bYOaZyRE81WsVi-QCrY3krCU,1107 +attrs/__init__.pyi,sha256=nZmInocjM7tHV4AQw0vxO_fo6oJjL_PonlV9zKKW8DY,7931 +attrs/__pycache__/__init__.cpython-312.pyc,, +attrs/__pycache__/converters.cpython-312.pyc,, +attrs/__pycache__/exceptions.cpython-312.pyc,, +attrs/__pycache__/filters.cpython-312.pyc,, +attrs/__pycache__/setters.cpython-312.pyc,, +attrs/__pycache__/validators.cpython-312.pyc,, +attrs/converters.py,sha256=8kQljrVwfSTRu8INwEk8SI0eGrzmWftsT7rM0EqyohM,76 +attrs/exceptions.py,sha256=ACCCmg19-vDFaDPY9vFl199SPXCQMN_bENs4DALjzms,76 +attrs/filters.py,sha256=VOUMZug9uEU6dUuA0dF1jInUK0PL3fLgP0VBS5d-CDE,73 +attrs/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +attrs/setters.py,sha256=eL1YidYQV3T2h9_SYIZSZR1FAcHGb1TuCTy0E0Lv2SU,73 +attrs/validators.py,sha256=xcy6wD5TtTkdCG1f4XWbocPSO0faBjk5IfVJfP6SUj0,76 diff --git a/.venv/lib/python3.12/site-packages/attrs-25.3.0.dist-info/WHEEL b/.venv/lib/python3.12/site-packages/attrs-25.3.0.dist-info/WHEEL new file mode 100644 index 0000000..12228d4 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/attrs-25.3.0.dist-info/WHEEL @@ -0,0 +1,4 @@ +Wheel-Version: 1.0 +Generator: hatchling 1.27.0 +Root-Is-Purelib: true +Tag: py3-none-any diff --git a/.venv/lib/python3.12/site-packages/attrs-25.3.0.dist-info/licenses/LICENSE b/.venv/lib/python3.12/site-packages/attrs-25.3.0.dist-info/licenses/LICENSE new file mode 100644 index 0000000..2bd6453 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/attrs-25.3.0.dist-info/licenses/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015 Hynek Schlawack and the attrs contributors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/.venv/lib/python3.12/site-packages/attrs/__init__.py b/.venv/lib/python3.12/site-packages/attrs/__init__.py new file mode 100644 index 0000000..e8023ff --- /dev/null +++ b/.venv/lib/python3.12/site-packages/attrs/__init__.py @@ -0,0 +1,69 @@ +# SPDX-License-Identifier: MIT + +from attr import ( + NOTHING, + Attribute, + AttrsInstance, + Converter, + Factory, + NothingType, + _make_getattr, + assoc, + cmp_using, + define, + evolve, + field, + fields, + fields_dict, + frozen, + has, + make_class, + mutable, + resolve_types, + validate, +) +from attr._next_gen import asdict, astuple + +from . import converters, exceptions, filters, setters, validators + + +__all__ = [ + "NOTHING", + "Attribute", + "AttrsInstance", + "Converter", + "Factory", + "NothingType", + "__author__", + "__copyright__", + "__description__", + "__doc__", + "__email__", + "__license__", + "__title__", + "__url__", + "__version__", + "__version_info__", + "asdict", + "assoc", + "astuple", + "cmp_using", + "converters", + "define", + "evolve", + "exceptions", + "field", + "fields", + "fields_dict", + "filters", + "frozen", + "has", + "make_class", + "mutable", + "resolve_types", + "setters", + "validate", + "validators", +] + +__getattr__ = _make_getattr(__name__) diff --git a/.venv/lib/python3.12/site-packages/attrs/__init__.pyi b/.venv/lib/python3.12/site-packages/attrs/__init__.pyi new file mode 100644 index 0000000..648fa7a --- /dev/null +++ b/.venv/lib/python3.12/site-packages/attrs/__init__.pyi @@ -0,0 +1,263 @@ +import sys + +from typing import ( + Any, + Callable, + Mapping, + Sequence, + overload, + TypeVar, +) + +# Because we need to type our own stuff, we have to make everything from +# attr explicitly public too. +from attr import __author__ as __author__ +from attr import __copyright__ as __copyright__ +from attr import __description__ as __description__ +from attr import __email__ as __email__ +from attr import __license__ as __license__ +from attr import __title__ as __title__ +from attr import __url__ as __url__ +from attr import __version__ as __version__ +from attr import __version_info__ as __version_info__ +from attr import assoc as assoc +from attr import Attribute as Attribute +from attr import AttrsInstance as AttrsInstance +from attr import cmp_using as cmp_using +from attr import converters as converters +from attr import Converter as Converter +from attr import evolve as evolve +from attr import exceptions as exceptions +from attr import Factory as Factory +from attr import fields as fields +from attr import fields_dict as fields_dict +from attr import filters as filters +from attr import has as has +from attr import make_class as make_class +from attr import NOTHING as NOTHING +from attr import resolve_types as resolve_types +from attr import setters as setters +from attr import validate as validate +from attr import validators as validators +from attr import attrib, asdict as asdict, astuple as astuple +from attr import NothingType as NothingType + +if sys.version_info >= (3, 11): + from typing import dataclass_transform +else: + from typing_extensions import dataclass_transform + +_T = TypeVar("_T") +_C = TypeVar("_C", bound=type) + +_EqOrderType = bool | Callable[[Any], Any] +_ValidatorType = Callable[[Any, "Attribute[_T]", _T], Any] +_CallableConverterType = Callable[[Any], Any] +_ConverterType = _CallableConverterType | Converter[Any, Any] +_ReprType = Callable[[Any], str] +_ReprArgType = bool | _ReprType +_OnSetAttrType = Callable[[Any, "Attribute[Any]", Any], Any] +_OnSetAttrArgType = _OnSetAttrType | list[_OnSetAttrType] | setters._NoOpType +_FieldTransformer = Callable[ + [type, list["Attribute[Any]"]], list["Attribute[Any]"] +] +# FIXME: in reality, if multiple validators are passed they must be in a list +# or tuple, but those are invariant and so would prevent subtypes of +# _ValidatorType from working when passed in a list or tuple. +_ValidatorArgType = _ValidatorType[_T] | Sequence[_ValidatorType[_T]] + +@overload +def field( + *, + default: None = ..., + validator: None = ..., + repr: _ReprArgType = ..., + hash: bool | None = ..., + init: bool = ..., + metadata: Mapping[Any, Any] | None = ..., + converter: None = ..., + factory: None = ..., + kw_only: bool = ..., + eq: bool | None = ..., + order: bool | None = ..., + on_setattr: _OnSetAttrArgType | None = ..., + alias: str | None = ..., + type: type | None = ..., +) -> Any: ... + +# This form catches an explicit None or no default and infers the type from the +# other arguments. +@overload +def field( + *, + default: None = ..., + validator: _ValidatorArgType[_T] | None = ..., + repr: _ReprArgType = ..., + hash: bool | None = ..., + init: bool = ..., + metadata: Mapping[Any, Any] | None = ..., + converter: _ConverterType + | list[_ConverterType] + | tuple[_ConverterType] + | None = ..., + factory: Callable[[], _T] | None = ..., + kw_only: bool = ..., + eq: _EqOrderType | None = ..., + order: _EqOrderType | None = ..., + on_setattr: _OnSetAttrArgType | None = ..., + alias: str | None = ..., + type: type | None = ..., +) -> _T: ... + +# This form catches an explicit default argument. +@overload +def field( + *, + default: _T, + validator: _ValidatorArgType[_T] | None = ..., + repr: _ReprArgType = ..., + hash: bool | None = ..., + init: bool = ..., + metadata: Mapping[Any, Any] | None = ..., + converter: _ConverterType + | list[_ConverterType] + | tuple[_ConverterType] + | None = ..., + factory: Callable[[], _T] | None = ..., + kw_only: bool = ..., + eq: _EqOrderType | None = ..., + order: _EqOrderType | None = ..., + on_setattr: _OnSetAttrArgType | None = ..., + alias: str | None = ..., + type: type | None = ..., +) -> _T: ... + +# This form covers type=non-Type: e.g. forward references (str), Any +@overload +def field( + *, + default: _T | None = ..., + validator: _ValidatorArgType[_T] | None = ..., + repr: _ReprArgType = ..., + hash: bool | None = ..., + init: bool = ..., + metadata: Mapping[Any, Any] | None = ..., + converter: _ConverterType + | list[_ConverterType] + | tuple[_ConverterType] + | None = ..., + factory: Callable[[], _T] | None = ..., + kw_only: bool = ..., + eq: _EqOrderType | None = ..., + order: _EqOrderType | None = ..., + on_setattr: _OnSetAttrArgType | None = ..., + alias: str | None = ..., + type: type | None = ..., +) -> Any: ... +@overload +@dataclass_transform(field_specifiers=(attrib, field)) +def define( + maybe_cls: _C, + *, + these: dict[str, Any] | None = ..., + repr: bool = ..., + unsafe_hash: bool | None = ..., + hash: bool | None = ..., + init: bool = ..., + slots: bool = ..., + frozen: bool = ..., + weakref_slot: bool = ..., + str: bool = ..., + auto_attribs: bool = ..., + kw_only: bool = ..., + cache_hash: bool = ..., + auto_exc: bool = ..., + eq: bool | None = ..., + order: bool | None = ..., + auto_detect: bool = ..., + getstate_setstate: bool | None = ..., + on_setattr: _OnSetAttrArgType | None = ..., + field_transformer: _FieldTransformer | None = ..., + match_args: bool = ..., +) -> _C: ... +@overload +@dataclass_transform(field_specifiers=(attrib, field)) +def define( + maybe_cls: None = ..., + *, + these: dict[str, Any] | None = ..., + repr: bool = ..., + unsafe_hash: bool | None = ..., + hash: bool | None = ..., + init: bool = ..., + slots: bool = ..., + frozen: bool = ..., + weakref_slot: bool = ..., + str: bool = ..., + auto_attribs: bool = ..., + kw_only: bool = ..., + cache_hash: bool = ..., + auto_exc: bool = ..., + eq: bool | None = ..., + order: bool | None = ..., + auto_detect: bool = ..., + getstate_setstate: bool | None = ..., + on_setattr: _OnSetAttrArgType | None = ..., + field_transformer: _FieldTransformer | None = ..., + match_args: bool = ..., +) -> Callable[[_C], _C]: ... + +mutable = define + +@overload +@dataclass_transform(frozen_default=True, field_specifiers=(attrib, field)) +def frozen( + maybe_cls: _C, + *, + these: dict[str, Any] | None = ..., + repr: bool = ..., + unsafe_hash: bool | None = ..., + hash: bool | None = ..., + init: bool = ..., + slots: bool = ..., + frozen: bool = ..., + weakref_slot: bool = ..., + str: bool = ..., + auto_attribs: bool = ..., + kw_only: bool = ..., + cache_hash: bool = ..., + auto_exc: bool = ..., + eq: bool | None = ..., + order: bool | None = ..., + auto_detect: bool = ..., + getstate_setstate: bool | None = ..., + on_setattr: _OnSetAttrArgType | None = ..., + field_transformer: _FieldTransformer | None = ..., + match_args: bool = ..., +) -> _C: ... +@overload +@dataclass_transform(frozen_default=True, field_specifiers=(attrib, field)) +def frozen( + maybe_cls: None = ..., + *, + these: dict[str, Any] | None = ..., + repr: bool = ..., + unsafe_hash: bool | None = ..., + hash: bool | None = ..., + init: bool = ..., + slots: bool = ..., + frozen: bool = ..., + weakref_slot: bool = ..., + str: bool = ..., + auto_attribs: bool = ..., + kw_only: bool = ..., + cache_hash: bool = ..., + auto_exc: bool = ..., + eq: bool | None = ..., + order: bool | None = ..., + auto_detect: bool = ..., + getstate_setstate: bool | None = ..., + on_setattr: _OnSetAttrArgType | None = ..., + field_transformer: _FieldTransformer | None = ..., + match_args: bool = ..., +) -> Callable[[_C], _C]: ... diff --git a/.venv/lib/python3.12/site-packages/attrs/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/attrs/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..e1e6f34 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/attrs/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/attrs/__pycache__/converters.cpython-312.pyc b/.venv/lib/python3.12/site-packages/attrs/__pycache__/converters.cpython-312.pyc new file mode 100644 index 0000000..6f6b38c Binary files /dev/null and b/.venv/lib/python3.12/site-packages/attrs/__pycache__/converters.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/attrs/__pycache__/exceptions.cpython-312.pyc b/.venv/lib/python3.12/site-packages/attrs/__pycache__/exceptions.cpython-312.pyc new file mode 100644 index 0000000..c8df6de Binary files /dev/null and b/.venv/lib/python3.12/site-packages/attrs/__pycache__/exceptions.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/attrs/__pycache__/filters.cpython-312.pyc b/.venv/lib/python3.12/site-packages/attrs/__pycache__/filters.cpython-312.pyc new file mode 100644 index 0000000..8da52ac Binary files /dev/null and b/.venv/lib/python3.12/site-packages/attrs/__pycache__/filters.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/attrs/__pycache__/setters.cpython-312.pyc b/.venv/lib/python3.12/site-packages/attrs/__pycache__/setters.cpython-312.pyc new file mode 100644 index 0000000..4e24e5d Binary files /dev/null and b/.venv/lib/python3.12/site-packages/attrs/__pycache__/setters.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/attrs/__pycache__/validators.cpython-312.pyc b/.venv/lib/python3.12/site-packages/attrs/__pycache__/validators.cpython-312.pyc new file mode 100644 index 0000000..fd45fb9 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/attrs/__pycache__/validators.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/attrs/converters.py b/.venv/lib/python3.12/site-packages/attrs/converters.py new file mode 100644 index 0000000..7821f6c --- /dev/null +++ b/.venv/lib/python3.12/site-packages/attrs/converters.py @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: MIT + +from attr.converters import * # noqa: F403 diff --git a/.venv/lib/python3.12/site-packages/attrs/exceptions.py b/.venv/lib/python3.12/site-packages/attrs/exceptions.py new file mode 100644 index 0000000..3323f9d --- /dev/null +++ b/.venv/lib/python3.12/site-packages/attrs/exceptions.py @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: MIT + +from attr.exceptions import * # noqa: F403 diff --git a/.venv/lib/python3.12/site-packages/attrs/filters.py b/.venv/lib/python3.12/site-packages/attrs/filters.py new file mode 100644 index 0000000..3080f48 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/attrs/filters.py @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: MIT + +from attr.filters import * # noqa: F403 diff --git a/.venv/lib/python3.12/site-packages/attrs/py.typed b/.venv/lib/python3.12/site-packages/attrs/py.typed new file mode 100644 index 0000000..e69de29 diff --git a/.venv/lib/python3.12/site-packages/attrs/setters.py b/.venv/lib/python3.12/site-packages/attrs/setters.py new file mode 100644 index 0000000..f3d73bb --- /dev/null +++ b/.venv/lib/python3.12/site-packages/attrs/setters.py @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: MIT + +from attr.setters import * # noqa: F403 diff --git a/.venv/lib/python3.12/site-packages/attrs/validators.py b/.venv/lib/python3.12/site-packages/attrs/validators.py new file mode 100644 index 0000000..037e124 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/attrs/validators.py @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: MIT + +from attr.validators import * # noqa: F403 diff --git a/.venv/lib/python3.12/site-packages/certifi-2025.8.3.dist-info/INSTALLER b/.venv/lib/python3.12/site-packages/certifi-2025.8.3.dist-info/INSTALLER new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/.venv/lib/python3.12/site-packages/certifi-2025.8.3.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/.venv/lib/python3.12/site-packages/certifi-2025.8.3.dist-info/METADATA b/.venv/lib/python3.12/site-packages/certifi-2025.8.3.dist-info/METADATA new file mode 100644 index 0000000..3a7ea22 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/certifi-2025.8.3.dist-info/METADATA @@ -0,0 +1,77 @@ +Metadata-Version: 2.4 +Name: certifi +Version: 2025.8.3 +Summary: Python package for providing Mozilla's CA Bundle. +Home-page: https://github.com/certifi/python-certifi +Author: Kenneth Reitz +Author-email: me@kennethreitz.com +License: MPL-2.0 +Project-URL: Source, https://github.com/certifi/python-certifi +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0) +Classifier: Natural Language :: English +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: 3.13 +Requires-Python: >=3.7 +License-File: LICENSE +Dynamic: author +Dynamic: author-email +Dynamic: classifier +Dynamic: description +Dynamic: home-page +Dynamic: license +Dynamic: license-file +Dynamic: project-url +Dynamic: requires-python +Dynamic: summary + +Certifi: Python SSL Certificates +================================ + +Certifi provides Mozilla's carefully curated collection of Root Certificates for +validating the trustworthiness of SSL certificates while verifying the identity +of TLS hosts. It has been extracted from the `Requests`_ project. + +Installation +------------ + +``certifi`` is available on PyPI. Simply install it with ``pip``:: + + $ pip install certifi + +Usage +----- + +To reference the installed certificate authority (CA) bundle, you can use the +built-in function:: + + >>> import certifi + + >>> certifi.where() + '/usr/local/lib/python3.7/site-packages/certifi/cacert.pem' + +Or from the command line:: + + $ python -m certifi + /usr/local/lib/python3.7/site-packages/certifi/cacert.pem + +Enjoy! + +.. _`Requests`: https://requests.readthedocs.io/en/master/ + +Addition/Removal of Certificates +-------------------------------- + +Certifi does not support any addition/removal or other modification of the +CA trust store content. This project is intended to provide a reliable and +highly portable root of trust to python deployments. Look to upstream projects +for methods to use alternate trust. diff --git a/.venv/lib/python3.12/site-packages/certifi-2025.8.3.dist-info/RECORD b/.venv/lib/python3.12/site-packages/certifi-2025.8.3.dist-info/RECORD new file mode 100644 index 0000000..64c2430 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/certifi-2025.8.3.dist-info/RECORD @@ -0,0 +1,14 @@ +certifi-2025.8.3.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +certifi-2025.8.3.dist-info/METADATA,sha256=z4sG3fosbP3nviub_TUpSb71z0bPmsp3Xa6ZIatGUe4,2422 +certifi-2025.8.3.dist-info/RECORD,, +certifi-2025.8.3.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91 +certifi-2025.8.3.dist-info/licenses/LICENSE,sha256=6TcW2mucDVpKHfYP5pWzcPBpVgPSH2-D8FPkLPwQyvc,989 +certifi-2025.8.3.dist-info/top_level.txt,sha256=KMu4vUCfsjLrkPbSNdgdekS-pVJzBAJFO__nI8NF6-U,8 +certifi/__init__.py,sha256=0a5ro4KTYep37Oo0Z8TycCPXaDlOEtvuj2pNWZ_1t8Y,94 +certifi/__main__.py,sha256=xBBoj905TUWBLRGANOcf7oi6e-3dMP4cEoG9OyMs11g,243 +certifi/__pycache__/__init__.cpython-312.pyc,, +certifi/__pycache__/__main__.cpython-312.pyc,, +certifi/__pycache__/core.cpython-312.pyc,, +certifi/cacert.pem,sha256=kQLmo2RKBxumzb1KU2mPKRxKZLGEUKCLwEZUi221zIs,287634 +certifi/core.py,sha256=XFXycndG5pf37ayeF8N32HUuDafsyhkVMbO4BAPWHa0,3394 +certifi/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 diff --git a/.venv/lib/python3.12/site-packages/certifi-2025.8.3.dist-info/WHEEL b/.venv/lib/python3.12/site-packages/certifi-2025.8.3.dist-info/WHEEL new file mode 100644 index 0000000..e7fa31b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/certifi-2025.8.3.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: setuptools (80.9.0) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/.venv/lib/python3.12/site-packages/certifi-2025.8.3.dist-info/licenses/LICENSE b/.venv/lib/python3.12/site-packages/certifi-2025.8.3.dist-info/licenses/LICENSE new file mode 100644 index 0000000..62b076c --- /dev/null +++ b/.venv/lib/python3.12/site-packages/certifi-2025.8.3.dist-info/licenses/LICENSE @@ -0,0 +1,20 @@ +This package contains a modified version of ca-bundle.crt: + +ca-bundle.crt -- Bundle of CA Root Certificates + +This is a bundle of X.509 certificates of public Certificate Authorities +(CA). These were automatically extracted from Mozilla's root certificates +file (certdata.txt). This file can be found in the mozilla source tree: +https://hg.mozilla.org/mozilla-central/file/tip/security/nss/lib/ckfw/builtins/certdata.txt +It contains the certificates in PEM format and therefore +can be directly used with curl / libcurl / php_curl, or with +an Apache+mod_ssl webserver for SSL client authentication. +Just configure this file as the SSLCACertificateFile.# + +***** BEGIN LICENSE BLOCK ***** +This Source Code Form is subject to the terms of the Mozilla Public License, +v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain +one at http://mozilla.org/MPL/2.0/. + +***** END LICENSE BLOCK ***** +@(#) $RCSfile: certdata.txt,v $ $Revision: 1.80 $ $Date: 2011/11/03 15:11:58 $ diff --git a/.venv/lib/python3.12/site-packages/certifi-2025.8.3.dist-info/top_level.txt b/.venv/lib/python3.12/site-packages/certifi-2025.8.3.dist-info/top_level.txt new file mode 100644 index 0000000..963eac5 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/certifi-2025.8.3.dist-info/top_level.txt @@ -0,0 +1 @@ +certifi diff --git a/.venv/lib/python3.12/site-packages/certifi/__init__.py b/.venv/lib/python3.12/site-packages/certifi/__init__.py new file mode 100644 index 0000000..fe2a891 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/certifi/__init__.py @@ -0,0 +1,4 @@ +from .core import contents, where + +__all__ = ["contents", "where"] +__version__ = "2025.08.03" diff --git a/.venv/lib/python3.12/site-packages/certifi/__main__.py b/.venv/lib/python3.12/site-packages/certifi/__main__.py new file mode 100644 index 0000000..8945b5d --- /dev/null +++ b/.venv/lib/python3.12/site-packages/certifi/__main__.py @@ -0,0 +1,12 @@ +import argparse + +from certifi import contents, where + +parser = argparse.ArgumentParser() +parser.add_argument("-c", "--contents", action="store_true") +args = parser.parse_args() + +if args.contents: + print(contents()) +else: + print(where()) diff --git a/.venv/lib/python3.12/site-packages/certifi/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/certifi/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..16e0ebe Binary files /dev/null and b/.venv/lib/python3.12/site-packages/certifi/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/certifi/__pycache__/__main__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/certifi/__pycache__/__main__.cpython-312.pyc new file mode 100644 index 0000000..e91ae39 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/certifi/__pycache__/__main__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/certifi/__pycache__/core.cpython-312.pyc b/.venv/lib/python3.12/site-packages/certifi/__pycache__/core.cpython-312.pyc new file mode 100644 index 0000000..5868d28 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/certifi/__pycache__/core.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/certifi/cacert.pem b/.venv/lib/python3.12/site-packages/certifi/cacert.pem new file mode 100644 index 0000000..2077a1e --- /dev/null +++ b/.venv/lib/python3.12/site-packages/certifi/cacert.pem @@ -0,0 +1,4738 @@ + +# Issuer: CN=Entrust Root Certification Authority O=Entrust, Inc. OU=www.entrust.net/CPS is incorporated by reference/(c) 2006 Entrust, Inc. +# Subject: CN=Entrust Root Certification Authority O=Entrust, Inc. OU=www.entrust.net/CPS is incorporated by reference/(c) 2006 Entrust, Inc. +# Label: "Entrust Root Certification Authority" +# Serial: 1164660820 +# MD5 Fingerprint: d6:a5:c3:ed:5d:dd:3e:00:c1:3d:87:92:1f:1d:3f:e4 +# SHA1 Fingerprint: b3:1e:b1:b7:40:e3:6c:84:02:da:dc:37:d4:4d:f5:d4:67:49:52:f9 +# SHA256 Fingerprint: 73:c1:76:43:4f:1b:c6:d5:ad:f4:5b:0e:76:e7:27:28:7c:8d:e5:76:16:c1:e6:e6:14:1a:2b:2c:bc:7d:8e:4c +-----BEGIN CERTIFICATE----- +MIIEkTCCA3mgAwIBAgIERWtQVDANBgkqhkiG9w0BAQUFADCBsDELMAkGA1UEBhMC +VVMxFjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xOTA3BgNVBAsTMHd3dy5lbnRydXN0 +Lm5ldC9DUFMgaXMgaW5jb3Jwb3JhdGVkIGJ5IHJlZmVyZW5jZTEfMB0GA1UECxMW +KGMpIDIwMDYgRW50cnVzdCwgSW5jLjEtMCsGA1UEAxMkRW50cnVzdCBSb290IENl +cnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTA2MTEyNzIwMjM0MloXDTI2MTEyNzIw +NTM0MlowgbAxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1FbnRydXN0LCBJbmMuMTkw +NwYDVQQLEzB3d3cuZW50cnVzdC5uZXQvQ1BTIGlzIGluY29ycG9yYXRlZCBieSBy +ZWZlcmVuY2UxHzAdBgNVBAsTFihjKSAyMDA2IEVudHJ1c3QsIEluYy4xLTArBgNV +BAMTJEVudHJ1c3QgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASIwDQYJ +KoZIhvcNAQEBBQADggEPADCCAQoCggEBALaVtkNC+sZtKm9I35RMOVcF7sN5EUFo +Nu3s/poBj6E4KPz3EEZmLk0eGrEaTsbRwJWIsMn/MYszA9u3g3s+IIRe7bJWKKf4 +4LlAcTfFy0cOlypowCKVYhXbR9n10Cv/gkvJrT7eTNuQgFA/CYqEAOwwCj0Yzfv9 +KlmaI5UXLEWeH25DeW0MXJj+SKfFI0dcXv1u5x609mhF0YaDW6KKjbHjKYD+JXGI +rb68j6xSlkuqUY3kEzEZ6E5Nn9uss2rVvDlUccp6en+Q3X0dgNmBu1kmwhH+5pPi +94DkZfs0Nw4pgHBNrziGLp5/V6+eF67rHMsoIV+2HNjnogQi+dPa2MsCAwEAAaOB +sDCBrTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zArBgNVHRAEJDAi +gA8yMDA2MTEyNzIwMjM0MlqBDzIwMjYxMTI3MjA1MzQyWjAfBgNVHSMEGDAWgBRo +kORnpKZTgMeGZqTx90tD+4S9bTAdBgNVHQ4EFgQUaJDkZ6SmU4DHhmak8fdLQ/uE +vW0wHQYJKoZIhvZ9B0EABBAwDhsIVjcuMTo0LjADAgSQMA0GCSqGSIb3DQEBBQUA +A4IBAQCT1DCw1wMgKtD5Y+iRDAUgqV8ZyntyTtSx29CW+1RaGSwMCPeyvIWonX9t +O1KzKtvn1ISMY/YPyyYBkVBs9F8U4pN0wBOeMDpQ47RgxRzwIkSNcUesyBrJ6Zua +AGAT/3B+XxFNSRuzFVJ7yVTav52Vr2ua2J7p8eRDjeIRRDq/r72DQnNSi6q7pynP +9WQcCk3RvKqsnyrQ/39/2n3qse0wJcGE2jTSW3iDVuycNsMm4hH2Z0kdkquM++v/ +eu6FSqdQgPCnXEqULl8FmTxSQeDNtGPPAUO6nIPcj2A781q0tHuu2guQOHXvgR1m +0vdXcDazv/wor3ElhVsT/h5/WrQ8 +-----END CERTIFICATE----- + +# Issuer: CN=QuoVadis Root CA 2 O=QuoVadis Limited +# Subject: CN=QuoVadis Root CA 2 O=QuoVadis Limited +# Label: "QuoVadis Root CA 2" +# Serial: 1289 +# MD5 Fingerprint: 5e:39:7b:dd:f8:ba:ec:82:e9:ac:62:ba:0c:54:00:2b +# SHA1 Fingerprint: ca:3a:fb:cf:12:40:36:4b:44:b2:16:20:88:80:48:39:19:93:7c:f7 +# SHA256 Fingerprint: 85:a0:dd:7d:d7:20:ad:b7:ff:05:f8:3d:54:2b:20:9d:c7:ff:45:28:f7:d6:77:b1:83:89:fe:a5:e5:c4:9e:86 +-----BEGIN CERTIFICATE----- +MIIFtzCCA5+gAwIBAgICBQkwDQYJKoZIhvcNAQEFBQAwRTELMAkGA1UEBhMCQk0x +GTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMTElF1b1ZhZGlzIFJv +b3QgQ0EgMjAeFw0wNjExMjQxODI3MDBaFw0zMTExMjQxODIzMzNaMEUxCzAJBgNV +BAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMRswGQYDVQQDExJRdW9W +YWRpcyBSb290IENBIDIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCa +GMpLlA0ALa8DKYrwD4HIrkwZhR0In6spRIXzL4GtMh6QRr+jhiYaHv5+HBg6XJxg +Fyo6dIMzMH1hVBHL7avg5tKifvVrbxi3Cgst/ek+7wrGsxDp3MJGF/hd/aTa/55J +WpzmM+Yklvc/ulsrHHo1wtZn/qtmUIttKGAr79dgw8eTvI02kfN/+NsRE8Scd3bB +rrcCaoF6qUWD4gXmuVbBlDePSHFjIuwXZQeVikvfj8ZaCuWw419eaxGrDPmF60Tp ++ARz8un+XJiM9XOva7R+zdRcAitMOeGylZUtQofX1bOQQ7dsE/He3fbE+Ik/0XX1 +ksOR1YqI0JDs3G3eicJlcZaLDQP9nL9bFqyS2+r+eXyt66/3FsvbzSUr5R/7mp/i +Ucw6UwxI5g69ybR2BlLmEROFcmMDBOAENisgGQLodKcftslWZvB1JdxnwQ5hYIiz +PtGo/KPaHbDRsSNU30R2be1B2MGyIrZTHN81Hdyhdyox5C315eXbyOD/5YDXC2Og +/zOhD7osFRXql7PSorW+8oyWHhqPHWykYTe5hnMz15eWniN9gqRMgeKh0bpnX5UH +oycR7hYQe7xFSkyyBNKr79X9DFHOUGoIMfmR2gyPZFwDwzqLID9ujWc9Otb+fVuI +yV77zGHcizN300QyNQliBJIWENieJ0f7OyHj+OsdWwIDAQABo4GwMIGtMA8GA1Ud +EwEB/wQFMAMBAf8wCwYDVR0PBAQDAgEGMB0GA1UdDgQWBBQahGK8SEwzJQTU7tD2 +A8QZRtGUazBuBgNVHSMEZzBlgBQahGK8SEwzJQTU7tD2A8QZRtGUa6FJpEcwRTEL +MAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMT +ElF1b1ZhZGlzIFJvb3QgQ0EgMoICBQkwDQYJKoZIhvcNAQEFBQADggIBAD4KFk2f +BluornFdLwUvZ+YTRYPENvbzwCYMDbVHZF34tHLJRqUDGCdViXh9duqWNIAXINzn +g/iN/Ae42l9NLmeyhP3ZRPx3UIHmfLTJDQtyU/h2BwdBR5YM++CCJpNVjP4iH2Bl +fF/nJrP3MpCYUNQ3cVX2kiF495V5+vgtJodmVjB3pjd4M1IQWK4/YY7yarHvGH5K +WWPKjaJW1acvvFYfzznB4vsKqBUsfU16Y8Zsl0Q80m/DShcK+JDSV6IZUaUtl0Ha +B0+pUNqQjZRG4T7wlP0QADj1O+hA4bRuVhogzG9Yje0uRY/W6ZM/57Es3zrWIozc +hLsib9D45MY56QSIPMO661V6bYCZJPVsAfv4l7CUW+v90m/xd2gNNWQjrLhVoQPR +TUIZ3Ph1WVaj+ahJefivDrkRoHy3au000LYmYjgahwz46P0u05B/B5EqHdZ+XIWD +mbA4CD/pXvk1B+TJYm5Xf6dQlfe6yJvmjqIBxdZmv3lh8zwc4bmCXF2gw+nYSL0Z +ohEUGW6yhhtoPkg3Goi3XZZenMfvJ2II4pEZXNLxId26F0KCl3GBUzGpn/Z9Yr9y +4aOTHcyKJloJONDO1w2AFrR4pTqHTI2KpdVGl/IsELm8VCLAAVBpQ570su9t+Oza +8eOx79+Rj1QqCyXBJhnEUhAFZdWCEOrCMc0u +-----END CERTIFICATE----- + +# Issuer: CN=QuoVadis Root CA 3 O=QuoVadis Limited +# Subject: CN=QuoVadis Root CA 3 O=QuoVadis Limited +# Label: "QuoVadis Root CA 3" +# Serial: 1478 +# MD5 Fingerprint: 31:85:3c:62:94:97:63:b9:aa:fd:89:4e:af:6f:e0:cf +# SHA1 Fingerprint: 1f:49:14:f7:d8:74:95:1d:dd:ae:02:c0:be:fd:3a:2d:82:75:51:85 +# SHA256 Fingerprint: 18:f1:fc:7f:20:5d:f8:ad:dd:eb:7f:e0:07:dd:57:e3:af:37:5a:9c:4d:8d:73:54:6b:f4:f1:fe:d1:e1:8d:35 +-----BEGIN CERTIFICATE----- +MIIGnTCCBIWgAwIBAgICBcYwDQYJKoZIhvcNAQEFBQAwRTELMAkGA1UEBhMCQk0x +GTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMTElF1b1ZhZGlzIFJv +b3QgQ0EgMzAeFw0wNjExMjQxOTExMjNaFw0zMTExMjQxOTA2NDRaMEUxCzAJBgNV +BAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMRswGQYDVQQDExJRdW9W +YWRpcyBSb290IENBIDMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDM +V0IWVJzmmNPTTe7+7cefQzlKZbPoFog02w1ZkXTPkrgEQK0CSzGrvI2RaNggDhoB +4hp7Thdd4oq3P5kazethq8Jlph+3t723j/z9cI8LoGe+AaJZz3HmDyl2/7FWeUUr +H556VOijKTVopAFPD6QuN+8bv+OPEKhyq1hX51SGyMnzW9os2l2ObjyjPtr7guXd +8lyyBTNvijbO0BNO/79KDDRMpsMhvVAEVeuxu537RR5kFd5VAYwCdrXLoT9Cabwv +vWhDFlaJKjdhkf2mrk7AyxRllDdLkgbvBNDInIjbC3uBr7E9KsRlOni27tyAsdLT +mZw67mtaa7ONt9XOnMK+pUsvFrGeaDsGb659n/je7Mwpp5ijJUMv7/FfJuGITfhe +btfZFG4ZM2mnO4SJk8RTVROhUXhA+LjJou57ulJCg54U7QVSWllWp5f8nT8KKdjc +T5EOE7zelaTfi5m+rJsziO+1ga8bxiJTyPbH7pcUsMV8eFLI8M5ud2CEpukqdiDt +WAEXMJPpGovgc2PZapKUSU60rUqFxKMiMPwJ7Wgic6aIDFUhWMXhOp8q3crhkODZ +c6tsgLjoC2SToJyMGf+z0gzskSaHirOi4XCPLArlzW1oUevaPwV/izLmE1xr/l9A +4iLItLRkT9a6fUg+qGkM17uGcclzuD87nSVL2v9A6wIDAQABo4IBlTCCAZEwDwYD +VR0TAQH/BAUwAwEB/zCB4QYDVR0gBIHZMIHWMIHTBgkrBgEEAb5YAAMwgcUwgZMG +CCsGAQUFBwICMIGGGoGDQW55IHVzZSBvZiB0aGlzIENlcnRpZmljYXRlIGNvbnN0 +aXR1dGVzIGFjY2VwdGFuY2Ugb2YgdGhlIFF1b1ZhZGlzIFJvb3QgQ0EgMyBDZXJ0 +aWZpY2F0ZSBQb2xpY3kgLyBDZXJ0aWZpY2F0aW9uIFByYWN0aWNlIFN0YXRlbWVu +dC4wLQYIKwYBBQUHAgEWIWh0dHA6Ly93d3cucXVvdmFkaXNnbG9iYWwuY29tL2Nw +czALBgNVHQ8EBAMCAQYwHQYDVR0OBBYEFPLAE+CCQz777i9nMpY1XNu4ywLQMG4G +A1UdIwRnMGWAFPLAE+CCQz777i9nMpY1XNu4ywLQoUmkRzBFMQswCQYDVQQGEwJC +TTEZMBcGA1UEChMQUXVvVmFkaXMgTGltaXRlZDEbMBkGA1UEAxMSUXVvVmFkaXMg +Um9vdCBDQSAzggIFxjANBgkqhkiG9w0BAQUFAAOCAgEAT62gLEz6wPJv92ZVqyM0 +7ucp2sNbtrCD2dDQ4iH782CnO11gUyeim/YIIirnv6By5ZwkajGxkHon24QRiSem +d1o417+shvzuXYO8BsbRd2sPbSQvS3pspweWyuOEn62Iix2rFo1bZhfZFvSLgNLd ++LJ2w/w4E6oM3kJpK27zPOuAJ9v1pkQNn1pVWQvVDVJIxa6f8i+AxeoyUDUSly7B +4f/xI4hROJ/yZlZ25w9Rl6VSDE1JUZU2Pb+iSwwQHYaZTKrzchGT5Or2m9qoXadN +t54CrnMAyNojA+j56hl0YgCUyyIgvpSnWbWCar6ZeXqp8kokUvd0/bpO5qgdAm6x +DYBEwa7TIzdfu4V8K5Iu6H6li92Z4b8nby1dqnuH/grdS/yO9SbkbnBCbjPsMZ57 +k8HkyWkaPcBrTiJt7qtYTcbQQcEr6k8Sh17rRdhs9ZgC06DYVYoGmRmioHfRMJ6s +zHXug/WwYjnPbFfiTNKRCw51KBuav/0aQ/HKd/s7j2G4aSgWQgRecCocIdiP4b0j +Wy10QJLZYxkNc91pvGJHvOB0K7Lrfb5BG7XARsWhIstfTsEokt4YutUqKLsRixeT +mJlglFwjz1onl14LBQaTNx47aTbrqZ5hHY8y2o4M1nQ+ewkk2gF3R8Q7zTSMmfXK +4SVhM7JZG+Ju1zdXtg2pEto= +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert Assured ID Root CA O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert Assured ID Root CA O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert Assured ID Root CA" +# Serial: 17154717934120587862167794914071425081 +# MD5 Fingerprint: 87:ce:0b:7b:2a:0e:49:00:e1:58:71:9b:37:a8:93:72 +# SHA1 Fingerprint: 05:63:b8:63:0d:62:d7:5a:bb:c8:ab:1e:4b:df:b5:a8:99:b2:4d:43 +# SHA256 Fingerprint: 3e:90:99:b5:01:5e:8f:48:6c:00:bc:ea:9d:11:1e:e7:21:fa:ba:35:5a:89:bc:f1:df:69:56:1e:3d:c6:32:5c +-----BEGIN CERTIFICATE----- +MIIDtzCCAp+gAwIBAgIQDOfg5RfYRv6P5WD8G/AwOTANBgkqhkiG9w0BAQUFADBl +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJv +b3QgQ0EwHhcNMDYxMTEwMDAwMDAwWhcNMzExMTEwMDAwMDAwWjBlMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNl +cnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgQ0EwggEi +MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCtDhXO5EOAXLGH87dg+XESpa7c +JpSIqvTO9SA5KFhgDPiA2qkVlTJhPLWxKISKityfCgyDF3qPkKyK53lTXDGEKvYP +mDI2dsze3Tyoou9q+yHyUmHfnyDXH+Kx2f4YZNISW1/5WBg1vEfNoTb5a3/UsDg+ +wRvDjDPZ2C8Y/igPs6eD1sNuRMBhNZYW/lmci3Zt1/GiSw0r/wty2p5g0I6QNcZ4 +VYcgoc/lbQrISXwxmDNsIumH0DJaoroTghHtORedmTpyoeb6pNnVFzF1roV9Iq4/ +AUaG9ih5yLHa5FcXxH4cDrC0kqZWs72yl+2qp/C3xag/lRbQ/6GW6whfGHdPAgMB +AAGjYzBhMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW +BBRF66Kv9JLLgjEtUYunpyGd823IDzAfBgNVHSMEGDAWgBRF66Kv9JLLgjEtUYun +pyGd823IDzANBgkqhkiG9w0BAQUFAAOCAQEAog683+Lt8ONyc3pklL/3cmbYMuRC +dWKuh+vy1dneVrOfzM4UKLkNl2BcEkxY5NM9g0lFWJc1aRqoR+pWxnmrEthngYTf +fwk8lOa4JiwgvT2zKIn3X/8i4peEH+ll74fg38FnSbNd67IJKusm7Xi+fT8r87cm +NW1fiQG2SVufAQWbqz0lwcy2f8Lxb4bG+mRo64EtlOtCt/qMHt1i8b5QZ7dsvfPx +H2sMNgcWfzd8qVttevESRmCD1ycEvkvOl77DZypoEd+A5wwzZr8TDRRu838fYxAe ++o0bJW1sj6W3YQGx0qMmoRBxna3iw/nDmVG3KwcIzi7mULKn+gpFL6Lw8g== +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert Global Root CA O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert Global Root CA O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert Global Root CA" +# Serial: 10944719598952040374951832963794454346 +# MD5 Fingerprint: 79:e4:a9:84:0d:7d:3a:96:d7:c0:4f:e2:43:4c:89:2e +# SHA1 Fingerprint: a8:98:5d:3a:65:e5:e5:c4:b2:d7:d6:6d:40:c6:dd:2f:b1:9c:54:36 +# SHA256 Fingerprint: 43:48:a0:e9:44:4c:78:cb:26:5e:05:8d:5e:89:44:b4:d8:4f:96:62:bd:26:db:25:7f:89:34:a4:43:c7:01:61 +-----BEGIN CERTIFICATE----- +MIIDrzCCApegAwIBAgIQCDvgVpBCRrGhdWrJWZHHSjANBgkqhkiG9w0BAQUFADBh +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBD +QTAeFw0wNjExMTAwMDAwMDBaFw0zMTExMTAwMDAwMDBaMGExCzAJBgNVBAYTAlVT +MRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5j +b20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IENBMIIBIjANBgkqhkiG +9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4jvhEXLeqKTTo1eqUKKPC3eQyaKl7hLOllsB +CSDMAZOnTjC3U/dDxGkAV53ijSLdhwZAAIEJzs4bg7/fzTtxRuLWZscFs3YnFo97 +nh6Vfe63SKMI2tavegw5BmV/Sl0fvBf4q77uKNd0f3p4mVmFaG5cIzJLv07A6Fpt +43C/dxC//AH2hdmoRBBYMql1GNXRor5H4idq9Joz+EkIYIvUX7Q6hL+hqkpMfT7P +T19sdl6gSzeRntwi5m3OFBqOasv+zbMUZBfHWymeMr/y7vrTC0LUq7dBMtoM1O/4 +gdW7jVg/tRvoSSiicNoxBN33shbyTApOB6jtSj1etX+jkMOvJwIDAQABo2MwYTAO +BgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUA95QNVbR +TLtm8KPiGxvDl7I90VUwHwYDVR0jBBgwFoAUA95QNVbRTLtm8KPiGxvDl7I90VUw +DQYJKoZIhvcNAQEFBQADggEBAMucN6pIExIK+t1EnE9SsPTfrgT1eXkIoyQY/Esr +hMAtudXH/vTBH1jLuG2cenTnmCmrEbXjcKChzUyImZOMkXDiqw8cvpOp/2PV5Adg +06O/nVsJ8dWO41P0jmP6P6fbtGbfYmbW0W5BjfIttep3Sp+dWOIrWcBAI+0tKIJF +PnlUkiaY4IBIqDfv8NZ5YBberOgOzW6sRBc4L0na4UU+Krk2U886UAb3LujEV0ls +YSEY1QSteDwsOoBrp+uvFRTp2InBuThs4pFsiv9kuXclVzDAGySj4dzp30d8tbQk +CAUw7C29C79Fv1C5qfPrmAESrciIxpg0X40KPMbp1ZWVbd4= +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert High Assurance EV Root CA O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert High Assurance EV Root CA O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert High Assurance EV Root CA" +# Serial: 3553400076410547919724730734378100087 +# MD5 Fingerprint: d4:74:de:57:5c:39:b2:d3:9c:85:83:c5:c0:65:49:8a +# SHA1 Fingerprint: 5f:b7:ee:06:33:e2:59:db:ad:0c:4c:9a:e6:d3:8f:1a:61:c7:dc:25 +# SHA256 Fingerprint: 74:31:e5:f4:c3:c1:ce:46:90:77:4f:0b:61:e0:54:40:88:3b:a9:a0:1e:d0:0b:a6:ab:d7:80:6e:d3:b1:18:cf +-----BEGIN CERTIFICATE----- +MIIDxTCCAq2gAwIBAgIQAqxcJmoLQJuPC3nyrkYldzANBgkqhkiG9w0BAQUFADBs +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSswKQYDVQQDEyJEaWdpQ2VydCBIaWdoIEFzc3VyYW5j +ZSBFViBSb290IENBMB4XDTA2MTExMDAwMDAwMFoXDTMxMTExMDAwMDAwMFowbDEL +MAkGA1UEBhMCVVMxFTATBgNVBAoTDERpZ2lDZXJ0IEluYzEZMBcGA1UECxMQd3d3 +LmRpZ2ljZXJ0LmNvbTErMCkGA1UEAxMiRGlnaUNlcnQgSGlnaCBBc3N1cmFuY2Ug +RVYgUm9vdCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMbM5XPm ++9S75S0tMqbf5YE/yc0lSbZxKsPVlDRnogocsF9ppkCxxLeyj9CYpKlBWTrT3JTW +PNt0OKRKzE0lgvdKpVMSOO7zSW1xkX5jtqumX8OkhPhPYlG++MXs2ziS4wblCJEM +xChBVfvLWokVfnHoNb9Ncgk9vjo4UFt3MRuNs8ckRZqnrG0AFFoEt7oT61EKmEFB +Ik5lYYeBQVCmeVyJ3hlKV9Uu5l0cUyx+mM0aBhakaHPQNAQTXKFx01p8VdteZOE3 +hzBWBOURtCmAEvF5OYiiAhF8J2a3iLd48soKqDirCmTCv2ZdlYTBoSUeh10aUAsg +EsxBu24LUTi4S8sCAwEAAaNjMGEwDgYDVR0PAQH/BAQDAgGGMA8GA1UdEwEB/wQF +MAMBAf8wHQYDVR0OBBYEFLE+w2kD+L9HAdSYJhoIAu9jZCvDMB8GA1UdIwQYMBaA +FLE+w2kD+L9HAdSYJhoIAu9jZCvDMA0GCSqGSIb3DQEBBQUAA4IBAQAcGgaX3Nec +nzyIZgYIVyHbIUf4KmeqvxgydkAQV8GK83rZEWWONfqe/EW1ntlMMUu4kehDLI6z +eM7b41N5cdblIZQB2lWHmiRk9opmzN6cN82oNLFpmyPInngiK3BD41VHMWEZ71jF +hS9OMPagMRYjyOfiZRYzy78aG6A9+MpeizGLYAiJLQwGXFK3xPkKmNEVX58Svnw2 +Yzi9RKR/5CYrCsSXaQ3pjOLAEFe4yHYSkVXySGnYvCoCWw9E1CAx2/S6cCZdkGCe +vEsXCS+0yx5DaMkHJ8HSXPfqIbloEpw8nL+e/IBcm2PN7EeqJSdnoDfzAIJ9VNep ++OkuE6N36B9K +-----END CERTIFICATE----- + +# Issuer: CN=SwissSign Gold CA - G2 O=SwissSign AG +# Subject: CN=SwissSign Gold CA - G2 O=SwissSign AG +# Label: "SwissSign Gold CA - G2" +# Serial: 13492815561806991280 +# MD5 Fingerprint: 24:77:d9:a8:91:d1:3b:fa:88:2d:c2:ff:f8:cd:33:93 +# SHA1 Fingerprint: d8:c5:38:8a:b7:30:1b:1b:6e:d4:7a:e6:45:25:3a:6f:9f:1a:27:61 +# SHA256 Fingerprint: 62:dd:0b:e9:b9:f5:0a:16:3e:a0:f8:e7:5c:05:3b:1e:ca:57:ea:55:c8:68:8f:64:7c:68:81:f2:c8:35:7b:95 +-----BEGIN CERTIFICATE----- +MIIFujCCA6KgAwIBAgIJALtAHEP1Xk+wMA0GCSqGSIb3DQEBBQUAMEUxCzAJBgNV +BAYTAkNIMRUwEwYDVQQKEwxTd2lzc1NpZ24gQUcxHzAdBgNVBAMTFlN3aXNzU2ln +biBHb2xkIENBIC0gRzIwHhcNMDYxMDI1MDgzMDM1WhcNMzYxMDI1MDgzMDM1WjBF +MQswCQYDVQQGEwJDSDEVMBMGA1UEChMMU3dpc3NTaWduIEFHMR8wHQYDVQQDExZT +d2lzc1NpZ24gR29sZCBDQSAtIEcyMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIIC +CgKCAgEAr+TufoskDhJuqVAtFkQ7kpJcyrhdhJJCEyq8ZVeCQD5XJM1QiyUqt2/8 +76LQwB8CJEoTlo8jE+YoWACjR8cGp4QjK7u9lit/VcyLwVcfDmJlD909Vopz2q5+ +bbqBHH5CjCA12UNNhPqE21Is8w4ndwtrvxEvcnifLtg+5hg3Wipy+dpikJKVyh+c +6bM8K8vzARO/Ws/BtQpgvd21mWRTuKCWs2/iJneRjOBiEAKfNA+k1ZIzUd6+jbqE +emA8atufK+ze3gE/bk3lUIbLtK/tREDFylqM2tIrfKjuvqblCqoOpd8FUrdVxyJd +MmqXl2MT28nbeTZ7hTpKxVKJ+STnnXepgv9VHKVxaSvRAiTysybUa9oEVeXBCsdt +MDeQKuSeFDNeFhdVxVu1yzSJkvGdJo+hB9TGsnhQ2wwMC3wLjEHXuendjIj3o02y +MszYF9rNt85mndT9Xv+9lz4pded+p2JYryU0pUHHPbwNUMoDAw8IWh+Vc3hiv69y +FGkOpeUDDniOJihC8AcLYiAQZzlG+qkDzAQ4embvIIO1jEpWjpEA/I5cgt6IoMPi +aG59je883WX0XaxR7ySArqpWl2/5rX3aYT+YdzylkbYcjCbaZaIJbcHiVOO5ykxM +gI93e2CaHt+28kgeDrpOVG2Y4OGiGqJ3UM/EY5LsRxmd6+ZrzsECAwEAAaOBrDCB +qTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUWyV7 +lqRlUX64OfPAeGZe6Drn8O4wHwYDVR0jBBgwFoAUWyV7lqRlUX64OfPAeGZe6Drn +8O4wRgYDVR0gBD8wPTA7BglghXQBWQECAQEwLjAsBggrBgEFBQcCARYgaHR0cDov +L3JlcG9zaXRvcnkuc3dpc3NzaWduLmNvbS8wDQYJKoZIhvcNAQEFBQADggIBACe6 +45R88a7A3hfm5djV9VSwg/S7zV4Fe0+fdWavPOhWfvxyeDgD2StiGwC5+OlgzczO +UYrHUDFu4Up+GC9pWbY9ZIEr44OE5iKHjn3g7gKZYbge9LgriBIWhMIxkziWMaa5 +O1M/wySTVltpkuzFwbs4AOPsF6m43Md8AYOfMke6UiI0HTJ6CVanfCU2qT1L2sCC +bwq7EsiHSycR+R4tx5M/nttfJmtS2S6K8RTGRI0Vqbe/vd6mGu6uLftIdxf+u+yv +GPUqUfA5hJeVbG4bwyvEdGB5JbAKJ9/fXtI5z0V9QkvfsywexcZdylU6oJxpmo/a +77KwPJ+HbBIrZXAVUjEaJM9vMSNQH4xPjyPDdEFjHFWoFN0+4FFQz/EbMFYOkrCC +hdiDyyJkvC24JdVUorgG6q2SpCSgwYa1ShNqR88uC1aVVMvOmttqtKay20EIhid3 +92qgQmwLOM7XdVAyksLfKzAiSNDVQTglXaTpXZ/GlHXQRf0wl0OPkKsKx4ZzYEpp +Ld6leNcG2mqeSz53OiATIgHQv2ieY2BrNU0LbbqhPcCT4H8js1WtciVORvnSFu+w +ZMEBnunKoGqYDs/YYPIvSbjkQuE4NRb0yG5P94FW6LqjviOvrv1vA+ACOzB2+htt +Qc8Bsem4yWb02ybzOqR08kkkW8mw0FfB+j564ZfJ +-----END CERTIFICATE----- + +# Issuer: CN=SecureTrust CA O=SecureTrust Corporation +# Subject: CN=SecureTrust CA O=SecureTrust Corporation +# Label: "SecureTrust CA" +# Serial: 17199774589125277788362757014266862032 +# MD5 Fingerprint: dc:32:c3:a7:6d:25:57:c7:68:09:9d:ea:2d:a9:a2:d1 +# SHA1 Fingerprint: 87:82:c6:c3:04:35:3b:cf:d2:96:92:d2:59:3e:7d:44:d9:34:ff:11 +# SHA256 Fingerprint: f1:c1:b5:0a:e5:a2:0d:d8:03:0e:c9:f6:bc:24:82:3d:d3:67:b5:25:57:59:b4:e7:1b:61:fc:e9:f7:37:5d:73 +-----BEGIN CERTIFICATE----- +MIIDuDCCAqCgAwIBAgIQDPCOXAgWpa1Cf/DrJxhZ0DANBgkqhkiG9w0BAQUFADBI +MQswCQYDVQQGEwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24x +FzAVBgNVBAMTDlNlY3VyZVRydXN0IENBMB4XDTA2MTEwNzE5MzExOFoXDTI5MTIz +MTE5NDA1NVowSDELMAkGA1UEBhMCVVMxIDAeBgNVBAoTF1NlY3VyZVRydXN0IENv +cnBvcmF0aW9uMRcwFQYDVQQDEw5TZWN1cmVUcnVzdCBDQTCCASIwDQYJKoZIhvcN +AQEBBQADggEPADCCAQoCggEBAKukgeWVzfX2FI7CT8rU4niVWJxB4Q2ZQCQXOZEz +Zum+4YOvYlyJ0fwkW2Gz4BERQRwdbvC4u/jep4G6pkjGnx29vo6pQT64lO0pGtSO +0gMdA+9tDWccV9cGrcrI9f4Or2YlSASWC12juhbDCE/RRvgUXPLIXgGZbf2IzIao +wW8xQmxSPmjL8xk037uHGFaAJsTQ3MBv396gwpEWoGQRS0S8Hvbn+mPeZqx2pHGj +7DaUaHp3pLHnDi+BeuK1cobvomuL8A/b01k/unK8RCSc43Oz969XL0Imnal0ugBS +8kvNU3xHCzaFDmapCJcWNFfBZveA4+1wVMeT4C4oFVmHursCAwEAAaOBnTCBmjAT +BgkrBgEEAYI3FAIEBh4EAEMAQTALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB +/zAdBgNVHQ4EFgQUQjK2FvoE/f5dS3rD/fdMQB1aQ68wNAYDVR0fBC0wKzApoCeg +JYYjaHR0cDovL2NybC5zZWN1cmV0cnVzdC5jb20vU1RDQS5jcmwwEAYJKwYBBAGC +NxUBBAMCAQAwDQYJKoZIhvcNAQEFBQADggEBADDtT0rhWDpSclu1pqNlGKa7UTt3 +6Z3q059c4EVlew3KW+JwULKUBRSuSceNQQcSc5R+DCMh/bwQf2AQWnL1mA6s7Ll/ +3XpvXdMc9P+IBWlCqQVxyLesJugutIxq/3HcuLHfmbx8IVQr5Fiiu1cprp6poxkm +D5kuCLDv/WnPmRoJjeOnnyvJNjR7JLN4TJUXpAYmHrZkUjZfYGfZnMUFdAvnZyPS +CPyI6a6Lf+Ew9Dd+/cYy2i2eRDAwbO4H3tI0/NL/QPZL9GZGBlSm8jIKYyYwa5vR +3ItHuuG51WLQoqD0ZwV4KWMabwTW+MZMo5qxN7SN5ShLHZ4swrhovO0C7jE= +-----END CERTIFICATE----- + +# Issuer: CN=Secure Global CA O=SecureTrust Corporation +# Subject: CN=Secure Global CA O=SecureTrust Corporation +# Label: "Secure Global CA" +# Serial: 9751836167731051554232119481456978597 +# MD5 Fingerprint: cf:f4:27:0d:d4:ed:dc:65:16:49:6d:3d:da:bf:6e:de +# SHA1 Fingerprint: 3a:44:73:5a:e5:81:90:1f:24:86:61:46:1e:3b:9c:c4:5f:f5:3a:1b +# SHA256 Fingerprint: 42:00:f5:04:3a:c8:59:0e:bb:52:7d:20:9e:d1:50:30:29:fb:cb:d4:1c:a1:b5:06:ec:27:f1:5a:de:7d:ac:69 +-----BEGIN CERTIFICATE----- +MIIDvDCCAqSgAwIBAgIQB1YipOjUiolN9BPI8PjqpTANBgkqhkiG9w0BAQUFADBK +MQswCQYDVQQGEwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24x +GTAXBgNVBAMTEFNlY3VyZSBHbG9iYWwgQ0EwHhcNMDYxMTA3MTk0MjI4WhcNMjkx +MjMxMTk1MjA2WjBKMQswCQYDVQQGEwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3Qg +Q29ycG9yYXRpb24xGTAXBgNVBAMTEFNlY3VyZSBHbG9iYWwgQ0EwggEiMA0GCSqG +SIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvNS7YrGxVaQZx5RNoJLNP2MwhR/jxYDiJ +iQPpvepeRlMJ3Fz1Wuj3RSoC6zFh1ykzTM7HfAo3fg+6MpjhHZevj8fcyTiW89sa +/FHtaMbQbqR8JNGuQsiWUGMu4P51/pinX0kuleM5M2SOHqRfkNJnPLLZ/kG5VacJ +jnIFHovdRIWCQtBJwB1g8NEXLJXr9qXBkqPFwqcIYA1gBBCWeZ4WNOaptvolRTnI +HmX5k/Wq8VLcmZg9pYYaDDUz+kulBAYVHDGA76oYa8J719rO+TMg1fW9ajMtgQT7 +sFzUnKPiXB3jqUJ1XnvUd+85VLrJChgbEplJL4hL/VBi0XPnj3pDAgMBAAGjgZ0w +gZowEwYJKwYBBAGCNxQCBAYeBABDAEEwCwYDVR0PBAQDAgGGMA8GA1UdEwEB/wQF +MAMBAf8wHQYDVR0OBBYEFK9EBMJBfkiD2045AuzshHrmzsmkMDQGA1UdHwQtMCsw +KaAnoCWGI2h0dHA6Ly9jcmwuc2VjdXJldHJ1c3QuY29tL1NHQ0EuY3JsMBAGCSsG +AQQBgjcVAQQDAgEAMA0GCSqGSIb3DQEBBQUAA4IBAQBjGghAfaReUw132HquHw0L +URYD7xh8yOOvaliTFGCRsoTciE6+OYo68+aCiV0BN7OrJKQVDpI1WkpEXk5X+nXO +H0jOZvQ8QCaSmGwb7iRGDBezUqXbpZGRzzfTb+cnCDpOGR86p1hcF895P4vkp9Mm +I50mD1hp/Ed+stCNi5O/KU9DaXR2Z0vPB4zmAve14bRDtUstFJ/53CYNv6ZHdAbY +iNE6KTCEztI5gGIbqMdXSbxqVVFnFUq+NQfk1XWYN3kwFNspnWzFacxHVaIw98xc +f8LDmBxrThaA63p4ZUWiABqvDA1VZDRIuJK58bRQKfJPIx/abKwfROHdI3hRW8cW +-----END CERTIFICATE----- + +# Issuer: CN=COMODO Certification Authority O=COMODO CA Limited +# Subject: CN=COMODO Certification Authority O=COMODO CA Limited +# Label: "COMODO Certification Authority" +# Serial: 104350513648249232941998508985834464573 +# MD5 Fingerprint: 5c:48:dc:f7:42:72:ec:56:94:6d:1c:cc:71:35:80:75 +# SHA1 Fingerprint: 66:31:bf:9e:f7:4f:9e:b6:c9:d5:a6:0c:ba:6a:be:d1:f7:bd:ef:7b +# SHA256 Fingerprint: 0c:2c:d6:3d:f7:80:6f:a3:99:ed:e8:09:11:6b:57:5b:f8:79:89:f0:65:18:f9:80:8c:86:05:03:17:8b:af:66 +-----BEGIN CERTIFICATE----- +MIIEHTCCAwWgAwIBAgIQToEtioJl4AsC7j41AkblPTANBgkqhkiG9w0BAQUFADCB +gTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4G +A1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxJzAlBgNV +BAMTHkNPTU9ETyBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wNjEyMDEwMDAw +MDBaFw0yOTEyMzEyMzU5NTlaMIGBMQswCQYDVQQGEwJHQjEbMBkGA1UECBMSR3Jl +YXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHEwdTYWxmb3JkMRowGAYDVQQKExFDT01P +RE8gQ0EgTGltaXRlZDEnMCUGA1UEAxMeQ09NT0RPIENlcnRpZmljYXRpb24gQXV0 +aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA0ECLi3LjkRv3 +UcEbVASY06m/weaKXTuH+7uIzg3jLz8GlvCiKVCZrts7oVewdFFxze1CkU1B/qnI +2GqGd0S7WWaXUF601CxwRM/aN5VCaTwwxHGzUvAhTaHYujl8HJ6jJJ3ygxaYqhZ8 +Q5sVW7euNJH+1GImGEaaP+vB+fGQV+useg2L23IwambV4EajcNxo2f8ESIl33rXp ++2dtQem8Ob0y2WIC8bGoPW43nOIv4tOiJovGuFVDiOEjPqXSJDlqR6sA1KGzqSX+ +DT+nHbrTUcELpNqsOO9VUCQFZUaTNE8tja3G1CEZ0o7KBWFxB3NH5YoZEr0ETc5O +nKVIrLsm9wIDAQABo4GOMIGLMB0GA1UdDgQWBBQLWOWLxkwVN6RAqTCpIb5HNlpW +/zAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zBJBgNVHR8EQjBAMD6g +PKA6hjhodHRwOi8vY3JsLmNvbW9kb2NhLmNvbS9DT01PRE9DZXJ0aWZpY2F0aW9u +QXV0aG9yaXR5LmNybDANBgkqhkiG9w0BAQUFAAOCAQEAPpiem/Yb6dc5t3iuHXIY +SdOH5EOC6z/JqvWote9VfCFSZfnVDeFs9D6Mk3ORLgLETgdxb8CPOGEIqB6BCsAv +IC9Bi5HcSEW88cbeunZrM8gALTFGTO3nnc+IlP8zwFboJIYmuNg4ON8qa90SzMc/ +RxdMosIGlgnW2/4/PEZB31jiVg88O8EckzXZOFKs7sjsLjBOlDW0JB9LeGna8gI4 +zJVSk/BwJVmcIGfE7vmLV2H0knZ9P4SNVbfo5azV8fUZVqZa+5Acr5Pr5RzUZ5dd +BA6+C4OmF4O5MBKgxTMVBbkN+8cFduPYSo38NBejxiEovjBFMR7HeL5YYTisO+IB +ZQ== +-----END CERTIFICATE----- + +# Issuer: CN=COMODO ECC Certification Authority O=COMODO CA Limited +# Subject: CN=COMODO ECC Certification Authority O=COMODO CA Limited +# Label: "COMODO ECC Certification Authority" +# Serial: 41578283867086692638256921589707938090 +# MD5 Fingerprint: 7c:62:ff:74:9d:31:53:5e:68:4a:d5:78:aa:1e:bf:23 +# SHA1 Fingerprint: 9f:74:4e:9f:2b:4d:ba:ec:0f:31:2c:50:b6:56:3b:8e:2d:93:c3:11 +# SHA256 Fingerprint: 17:93:92:7a:06:14:54:97:89:ad:ce:2f:8f:34:f7:f0:b6:6d:0f:3a:e3:a3:b8:4d:21:ec:15:db:ba:4f:ad:c7 +-----BEGIN CERTIFICATE----- +MIICiTCCAg+gAwIBAgIQH0evqmIAcFBUTAGem2OZKjAKBggqhkjOPQQDAzCBhTEL +MAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UE +BxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMT +IkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDgwMzA2MDAw +MDAwWhcNMzgwMTE4MjM1OTU5WjCBhTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdy +ZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09N +T0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlv +biBBdXRob3JpdHkwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQDR3svdcmCFYX7deSR +FtSrYpn1PlILBs5BAH+X4QokPB0BBO490o0JlwzgdeT6+3eKKvUDYEs2ixYjFq0J +cfRK9ChQtP6IHG4/bC8vCVlbpVsLM5niwz2J+Wos77LTBumjQjBAMB0GA1UdDgQW +BBR1cacZSBm8nZ3qQUfflMRId5nTeTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/ +BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjEA7wNbeqy3eApyt4jf/7VGFAkK+qDm +fQjGGoe9GKhzvSbKYAydzpmfz1wPMOG+FDHqAjAU9JM8SaczepBGR7NjfRObTrdv +GDeAU/7dIOA1mjbRxwG55tzd8/8dLDoWV9mSOdY= +-----END CERTIFICATE----- + +# Issuer: CN=Certigna O=Dhimyotis +# Subject: CN=Certigna O=Dhimyotis +# Label: "Certigna" +# Serial: 18364802974209362175 +# MD5 Fingerprint: ab:57:a6:5b:7d:42:82:19:b5:d8:58:26:28:5e:fd:ff +# SHA1 Fingerprint: b1:2e:13:63:45:86:a4:6f:1a:b2:60:68:37:58:2d:c4:ac:fd:94:97 +# SHA256 Fingerprint: e3:b6:a2:db:2e:d7:ce:48:84:2f:7a:c5:32:41:c7:b7:1d:54:14:4b:fb:40:c1:1f:3f:1d:0b:42:f5:ee:a1:2d +-----BEGIN CERTIFICATE----- +MIIDqDCCApCgAwIBAgIJAP7c4wEPyUj/MA0GCSqGSIb3DQEBBQUAMDQxCzAJBgNV +BAYTAkZSMRIwEAYDVQQKDAlEaGlteW90aXMxETAPBgNVBAMMCENlcnRpZ25hMB4X +DTA3MDYyOTE1MTMwNVoXDTI3MDYyOTE1MTMwNVowNDELMAkGA1UEBhMCRlIxEjAQ +BgNVBAoMCURoaW15b3RpczERMA8GA1UEAwwIQ2VydGlnbmEwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQDIaPHJ1tazNHUmgh7stL7qXOEm7RFHYeGifBZ4 +QCHkYJ5ayGPhxLGWkv8YbWkj4Sti993iNi+RB7lIzw7sebYs5zRLcAglozyHGxny +gQcPOJAZ0xH+hrTy0V4eHpbNgGzOOzGTtvKg0KmVEn2lmsxryIRWijOp5yIVUxbw +zBfsV1/pogqYCd7jX5xv3EjjhQsVWqa6n6xI4wmy9/Qy3l40vhx4XUJbzg4ij02Q +130yGLMLLGq/jj8UEYkgDncUtT2UCIf3JR7VsmAA7G8qKCVuKj4YYxclPz5EIBb2 +JsglrgVKtOdjLPOMFlN+XPsRGgjBRmKfIrjxwo1p3Po6WAbfAgMBAAGjgbwwgbkw +DwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUGu3+QTmQtCRZvgHyUtVF9lo53BEw +ZAYDVR0jBF0wW4AUGu3+QTmQtCRZvgHyUtVF9lo53BGhOKQ2MDQxCzAJBgNVBAYT +AkZSMRIwEAYDVQQKDAlEaGlteW90aXMxETAPBgNVBAMMCENlcnRpZ25hggkA/tzj +AQ/JSP8wDgYDVR0PAQH/BAQDAgEGMBEGCWCGSAGG+EIBAQQEAwIABzANBgkqhkiG +9w0BAQUFAAOCAQEAhQMeknH2Qq/ho2Ge6/PAD/Kl1NqV5ta+aDY9fm4fTIrv0Q8h +bV6lUmPOEvjvKtpv6zf+EwLHyzs+ImvaYS5/1HI93TDhHkxAGYwP15zRgzB7mFnc +fca5DClMoTOi62c6ZYTTluLtdkVwj7Ur3vkj1kluPBS1xp81HlDQwY9qcEQCYsuu +HWhBp6pX6FOqB9IG9tUUBguRA3UsbHK1YZWaDYu5Def131TN3ubY1gkIl2PlwS6w +t0QmwCbAr1UwnjvVNioZBPRcHv/PLLf/0P2HQBHVESO7SMAhqaQoLf0V+LBOK/Qw +WyH8EZE0vkHve52Xdf+XlcCWWC/qu0bXu+TZLg== +-----END CERTIFICATE----- + +# Issuer: O=Chunghwa Telecom Co., Ltd. OU=ePKI Root Certification Authority +# Subject: O=Chunghwa Telecom Co., Ltd. OU=ePKI Root Certification Authority +# Label: "ePKI Root Certification Authority" +# Serial: 28956088682735189655030529057352760477 +# MD5 Fingerprint: 1b:2e:00:ca:26:06:90:3d:ad:fe:6f:15:68:d3:6b:b3 +# SHA1 Fingerprint: 67:65:0d:f1:7e:8e:7e:5b:82:40:a4:f4:56:4b:cf:e2:3d:69:c6:f0 +# SHA256 Fingerprint: c0:a6:f4:dc:63:a2:4b:fd:cf:54:ef:2a:6a:08:2a:0a:72:de:35:80:3e:2f:f5:ff:52:7a:e5:d8:72:06:df:d5 +-----BEGIN CERTIFICATE----- +MIIFsDCCA5igAwIBAgIQFci9ZUdcr7iXAF7kBtK8nTANBgkqhkiG9w0BAQUFADBe +MQswCQYDVQQGEwJUVzEjMCEGA1UECgwaQ2h1bmdod2EgVGVsZWNvbSBDby4sIEx0 +ZC4xKjAoBgNVBAsMIWVQS0kgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAe +Fw0wNDEyMjAwMjMxMjdaFw0zNDEyMjAwMjMxMjdaMF4xCzAJBgNVBAYTAlRXMSMw +IQYDVQQKDBpDaHVuZ2h3YSBUZWxlY29tIENvLiwgTHRkLjEqMCgGA1UECwwhZVBL +SSBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIICIjANBgkqhkiG9w0BAQEF +AAOCAg8AMIICCgKCAgEA4SUP7o3biDN1Z82tH306Tm2d0y8U82N0ywEhajfqhFAH +SyZbCUNsIZ5qyNUD9WBpj8zwIuQf5/dqIjG3LBXy4P4AakP/h2XGtRrBp0xtInAh +ijHyl3SJCRImHJ7K2RKilTza6We/CKBk49ZCt0Xvl/T29de1ShUCWH2YWEtgvM3X +DZoTM1PRYfl61dd4s5oz9wCGzh1NlDivqOx4UXCKXBCDUSH3ET00hl7lSM2XgYI1 +TBnsZfZrxQWh7kcT1rMhJ5QQCtkkO7q+RBNGMD+XPNjX12ruOzjjK9SXDrkb5wdJ +fzcq+Xd4z1TtW0ado4AOkUPB1ltfFLqfpo0kR0BZv3I4sjZsN/+Z0V0OWQqraffA +sgRFelQArr5T9rXn4fg8ozHSqf4hUmTFpmfwdQcGlBSBVcYn5AGPF8Fqcde+S/uU +WH1+ETOxQvdibBjWzwloPn9s9h6PYq2lY9sJpx8iQkEeb5mKPtf5P0B6ebClAZLS +nT0IFaUQAS2zMnaolQ2zepr7BxB4EW/hj8e6DyUadCrlHJhBmd8hh+iVBmoKs2pH +dmX2Os+PYhcZewoozRrSgx4hxyy/vv9haLdnG7t4TY3OZ+XkwY63I2binZB1NJip +NiuKmpS5nezMirH4JYlcWrYvjB9teSSnUmjDhDXiZo1jDiVN1Rmy5nk3pyKdVDEC +AwEAAaNqMGgwHQYDVR0OBBYEFB4M97Zn8uGSJglFwFU5Lnc/QkqiMAwGA1UdEwQF +MAMBAf8wOQYEZyoHAAQxMC8wLQIBADAJBgUrDgMCGgUAMAcGBWcqAwAABBRFsMLH +ClZ87lt4DJX5GFPBphzYEDANBgkqhkiG9w0BAQUFAAOCAgEACbODU1kBPpVJufGB +uvl2ICO1J2B01GqZNF5sAFPZn/KmsSQHRGoqxqWOeBLoR9lYGxMqXnmbnwoqZ6Yl +PwZpVnPDimZI+ymBV3QGypzqKOg4ZyYr8dW1P2WT+DZdjo2NQCCHGervJ8A9tDkP +JXtoUHRVnAxZfVo9QZQlUgjgRywVMRnVvwdVxrsStZf0X4OFunHB2WyBEXYKCrC/ +gpf36j36+uwtqSiUO1bd0lEursC9CBWMd1I0ltabrNMdjmEPNXubrjlpC2JgQCA2 +j6/7Nu4tCEoduL+bXPjqpRugc6bY+G7gMwRfaKonh+3ZwZCc7b3jajWvY9+rGNm6 +5ulK6lCKD2GTHuItGeIwlDWSXQ62B68ZgI9HkFFLLk3dheLSClIKF5r8GrBQAuUB +o2M3IUxExJtRmREOc5wGj1QupyheRDmHVi03vYVElOEMSyycw5KFNGHLD7ibSkNS +/jQ6fbjpKdx2qcgw+BRxgMYeNkh0IkFch4LoGHGLQYlE535YW6i4jRPpp2zDR+2z +Gp1iro2C6pSe3VkQw63d4k3jMdXH7OjysP6SHhYKGvzZ8/gntsm+HbRsZJB/9OTE +W9c3rkIO3aQab3yIVMUWbuF6aC74Or8NpDyJO3inTmODBCEIZ43ygknQW/2xzQ+D +hNQ+IIX3Sj0rnP0qCglN6oH4EZw= +-----END CERTIFICATE----- + +# Issuer: O=certSIGN OU=certSIGN ROOT CA +# Subject: O=certSIGN OU=certSIGN ROOT CA +# Label: "certSIGN ROOT CA" +# Serial: 35210227249154 +# MD5 Fingerprint: 18:98:c0:d6:e9:3a:fc:f9:b0:f5:0c:f7:4b:01:44:17 +# SHA1 Fingerprint: fa:b7:ee:36:97:26:62:fb:2d:b0:2a:f6:bf:03:fd:e8:7c:4b:2f:9b +# SHA256 Fingerprint: ea:a9:62:c4:fa:4a:6b:af:eb:e4:15:19:6d:35:1c:cd:88:8d:4f:53:f3:fa:8a:e6:d7:c4:66:a9:4e:60:42:bb +-----BEGIN CERTIFICATE----- +MIIDODCCAiCgAwIBAgIGIAYFFnACMA0GCSqGSIb3DQEBBQUAMDsxCzAJBgNVBAYT +AlJPMREwDwYDVQQKEwhjZXJ0U0lHTjEZMBcGA1UECxMQY2VydFNJR04gUk9PVCBD +QTAeFw0wNjA3MDQxNzIwMDRaFw0zMTA3MDQxNzIwMDRaMDsxCzAJBgNVBAYTAlJP +MREwDwYDVQQKEwhjZXJ0U0lHTjEZMBcGA1UECxMQY2VydFNJR04gUk9PVCBDQTCC +ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALczuX7IJUqOtdu0KBuqV5Do +0SLTZLrTk+jUrIZhQGpgV2hUhE28alQCBf/fm5oqrl0Hj0rDKH/v+yv6efHHrfAQ +UySQi2bJqIirr1qjAOm+ukbuW3N7LBeCgV5iLKECZbO9xSsAfsT8AzNXDe3i+s5d +RdY4zTW2ssHQnIFKquSyAVwdj1+ZxLGt24gh65AIgoDzMKND5pCCrlUoSe1b16kQ +OA7+j0xbm0bqQfWwCHTD0IgztnzXdN/chNFDDnU5oSVAKOp4yw4sLjmdjItuFhwv +JoIQ4uNllAoEwF73XVv4EOLQunpL+943AAAaWyjj0pxzPjKHmKHJUS/X3qwzs08C +AwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAcYwHQYDVR0O +BBYEFOCMm9slSbPxfIbWskKHC9BroNnkMA0GCSqGSIb3DQEBBQUAA4IBAQA+0hyJ +LjX8+HXd5n9liPRyTMks1zJO890ZeUe9jjtbkw9QSSQTaxQGcu8J06Gh40CEyecY +MnQ8SG4Pn0vU9x7Tk4ZkVJdjclDVVc/6IJMCopvDI5NOFlV2oHB5bc0hH88vLbwZ +44gx+FkagQnIl6Z0x2DEW8xXjrJ1/RsCCdtZb3KTafcxQdaIOL+Hsr0Wefmq5L6I +Jd1hJyMctTEHBDa0GpC9oHRxUIltvBTjD4au8as+x6AJzKNI0eDbZOeStc+vckNw +i/nDhDwTqn6Sm1dTk/pwwpEOMfmbZ13pljheX7NzTogVZ96edhBiIL5VaZVDADlN +9u6wWk5JRFRYX0KD +-----END CERTIFICATE----- + +# Issuer: CN=NetLock Arany (Class Gold) F\u0151tan\xfas\xedtv\xe1ny O=NetLock Kft. OU=Tan\xfas\xedtv\xe1nykiad\xf3k (Certification Services) +# Subject: CN=NetLock Arany (Class Gold) F\u0151tan\xfas\xedtv\xe1ny O=NetLock Kft. OU=Tan\xfas\xedtv\xe1nykiad\xf3k (Certification Services) +# Label: "NetLock Arany (Class Gold) F\u0151tan\xfas\xedtv\xe1ny" +# Serial: 80544274841616 +# MD5 Fingerprint: c5:a1:b7:ff:73:dd:d6:d7:34:32:18:df:fc:3c:ad:88 +# SHA1 Fingerprint: 06:08:3f:59:3f:15:a1:04:a0:69:a4:6b:a9:03:d0:06:b7:97:09:91 +# SHA256 Fingerprint: 6c:61:da:c3:a2:de:f0:31:50:6b:e0:36:d2:a6:fe:40:19:94:fb:d1:3d:f9:c8:d4:66:59:92:74:c4:46:ec:98 +-----BEGIN CERTIFICATE----- +MIIEFTCCAv2gAwIBAgIGSUEs5AAQMA0GCSqGSIb3DQEBCwUAMIGnMQswCQYDVQQG +EwJIVTERMA8GA1UEBwwIQnVkYXBlc3QxFTATBgNVBAoMDE5ldExvY2sgS2Z0LjE3 +MDUGA1UECwwuVGFuw7pzw610dsOhbnlraWFkw7NrIChDZXJ0aWZpY2F0aW9uIFNl +cnZpY2VzKTE1MDMGA1UEAwwsTmV0TG9jayBBcmFueSAoQ2xhc3MgR29sZCkgRsWR +dGFuw7pzw610dsOhbnkwHhcNMDgxMjExMTUwODIxWhcNMjgxMjA2MTUwODIxWjCB +pzELMAkGA1UEBhMCSFUxETAPBgNVBAcMCEJ1ZGFwZXN0MRUwEwYDVQQKDAxOZXRM +b2NrIEtmdC4xNzA1BgNVBAsMLlRhbsO6c8OtdHbDoW55a2lhZMOzayAoQ2VydGlm +aWNhdGlvbiBTZXJ2aWNlcykxNTAzBgNVBAMMLE5ldExvY2sgQXJhbnkgKENsYXNz +IEdvbGQpIEbFkXRhbsO6c8OtdHbDoW55MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEAxCRec75LbRTDofTjl5Bu0jBFHjzuZ9lk4BqKf8owyoPjIMHj9DrT +lF8afFttvzBPhCf2nx9JvMaZCpDyD/V/Q4Q3Y1GLeqVw/HpYzY6b7cNGbIRwXdrz +AZAj/E4wqX7hJ2Pn7WQ8oLjJM2P+FpD/sLj916jAwJRDC7bVWaaeVtAkH3B5r9s5 +VA1lddkVQZQBr17s9o3x/61k/iCa11zr/qYfCGSji3ZVrR47KGAuhyXoqq8fxmRG +ILdwfzzeSNuWU7c5d+Qa4scWhHaXWy+7GRWF+GmF9ZmnqfI0p6m2pgP8b4Y9VHx2 +BJtr+UBdADTHLpl1neWIA6pN+APSQnbAGwIDAKiLo0UwQzASBgNVHRMBAf8ECDAG +AQH/AgEEMA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUzPpnk/C2uNClwB7zU/2M +U9+D15YwDQYJKoZIhvcNAQELBQADggEBAKt/7hwWqZw8UQCgwBEIBaeZ5m8BiFRh +bvG5GK1Krf6BQCOUL/t1fC8oS2IkgYIL9WHxHG64YTjrgfpioTtaYtOUZcTh5m2C ++C8lcLIhJsFyUR+MLMOEkMNaj7rP9KdlpeuY0fsFskZ1FSNqb4VjMIDw1Z4fKRzC +bLBQWV2QWzuoDTDPv31/zvGdg73JRm4gpvlhUbohL3u+pRVjodSVh/GeufOJ8z2F +uLjbvrW5KfnaNwUASZQDhETnv0Mxz3WLJdH0pmT1kvarBes96aULNmLazAZfNou2 +XjG4Kvte9nHfRCaexOYNkbQudZWAUWpLMKawYqGT8ZvYzsRjdT9ZR7E= +-----END CERTIFICATE----- + +# Issuer: CN=Microsec e-Szigno Root CA 2009 O=Microsec Ltd. +# Subject: CN=Microsec e-Szigno Root CA 2009 O=Microsec Ltd. +# Label: "Microsec e-Szigno Root CA 2009" +# Serial: 14014712776195784473 +# MD5 Fingerprint: f8:49:f4:03:bc:44:2d:83:be:48:69:7d:29:64:fc:b1 +# SHA1 Fingerprint: 89:df:74:fe:5c:f4:0f:4a:80:f9:e3:37:7d:54:da:91:e1:01:31:8e +# SHA256 Fingerprint: 3c:5f:81:fe:a5:fa:b8:2c:64:bf:a2:ea:ec:af:cd:e8:e0:77:fc:86:20:a7:ca:e5:37:16:3d:f3:6e:db:f3:78 +-----BEGIN CERTIFICATE----- +MIIECjCCAvKgAwIBAgIJAMJ+QwRORz8ZMA0GCSqGSIb3DQEBCwUAMIGCMQswCQYD +VQQGEwJIVTERMA8GA1UEBwwIQnVkYXBlc3QxFjAUBgNVBAoMDU1pY3Jvc2VjIEx0 +ZC4xJzAlBgNVBAMMHk1pY3Jvc2VjIGUtU3ppZ25vIFJvb3QgQ0EgMjAwOTEfMB0G +CSqGSIb3DQEJARYQaW5mb0BlLXN6aWduby5odTAeFw0wOTA2MTYxMTMwMThaFw0y +OTEyMzAxMTMwMThaMIGCMQswCQYDVQQGEwJIVTERMA8GA1UEBwwIQnVkYXBlc3Qx +FjAUBgNVBAoMDU1pY3Jvc2VjIEx0ZC4xJzAlBgNVBAMMHk1pY3Jvc2VjIGUtU3pp +Z25vIFJvb3QgQ0EgMjAwOTEfMB0GCSqGSIb3DQEJARYQaW5mb0BlLXN6aWduby5o +dTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAOn4j/NjrdqG2KfgQvvP +kd6mJviZpWNwrZuuyjNAfW2WbqEORO7hE52UQlKavXWFdCyoDh2Tthi3jCyoz/tc +cbna7P7ofo/kLx2yqHWH2Leh5TvPmUpG0IMZfcChEhyVbUr02MelTTMuhTlAdX4U +fIASmFDHQWe4oIBhVKZsTh/gnQ4H6cm6M+f+wFUoLAKApxn1ntxVUwOXewdI/5n7 +N4okxFnMUBBjjqqpGrCEGob5X7uxUG6k0QrM1XF+H6cbfPVTbiJfyyvm1HxdrtbC +xkzlBQHZ7Vf8wSN5/PrIJIOV87VqUQHQd9bpEqH5GoP7ghu5sJf0dgYzQ0mg/wu1 ++rUCAwEAAaOBgDB+MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0G +A1UdDgQWBBTLD8bfQkPMPcu1SCOhGnqmKrs0aDAfBgNVHSMEGDAWgBTLD8bfQkPM +Pcu1SCOhGnqmKrs0aDAbBgNVHREEFDASgRBpbmZvQGUtc3ppZ25vLmh1MA0GCSqG +SIb3DQEBCwUAA4IBAQDJ0Q5eLtXMs3w+y/w9/w0olZMEyL/azXm4Q5DwpL7v8u8h +mLzU1F0G9u5C7DBsoKqpyvGvivo/C3NqPuouQH4frlRheesuCDfXI/OMn74dseGk +ddug4lQUsbocKaQY9hK6ohQU4zE1yED/t+AFdlfBHFny+L/k7SViXITwfn4fs775 +tyERzAMBVnCnEJIeGzSBHq2cGsMEPO0CYdYeBvNfOofyK/FFh+U9rNHHV4S9a67c +2Pm2G2JwCz02yULyMtd6YebS2z3PyKnJm9zbWETXbzivf3jTo60adbocwTZ8jx5t +HMN1Rq41Bab2XD0h7lbwyYIiLXpUq3DDfSJlgnCW +-----END CERTIFICATE----- + +# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R3 +# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R3 +# Label: "GlobalSign Root CA - R3" +# Serial: 4835703278459759426209954 +# MD5 Fingerprint: c5:df:b8:49:ca:05:13:55:ee:2d:ba:1a:c3:3e:b0:28 +# SHA1 Fingerprint: d6:9b:56:11:48:f0:1c:77:c5:45:78:c1:09:26:df:5b:85:69:76:ad +# SHA256 Fingerprint: cb:b5:22:d7:b7:f1:27:ad:6a:01:13:86:5b:df:1c:d4:10:2e:7d:07:59:af:63:5a:7c:f4:72:0d:c9:63:c5:3b +-----BEGIN CERTIFICATE----- +MIIDXzCCAkegAwIBAgILBAAAAAABIVhTCKIwDQYJKoZIhvcNAQELBQAwTDEgMB4G +A1UECxMXR2xvYmFsU2lnbiBSb290IENBIC0gUjMxEzARBgNVBAoTCkdsb2JhbFNp +Z24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMDkwMzE4MTAwMDAwWhcNMjkwMzE4 +MTAwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxTaWduIFJvb3QgQ0EgLSBSMzETMBEG +A1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2lnbjCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAMwldpB5BngiFvXAg7aEyiie/QV2EcWtiHL8 +RgJDx7KKnQRfJMsuS+FggkbhUqsMgUdwbN1k0ev1LKMPgj0MK66X17YUhhB5uzsT +gHeMCOFJ0mpiLx9e+pZo34knlTifBtc+ycsmWQ1z3rDI6SYOgxXG71uL0gRgykmm +KPZpO/bLyCiR5Z2KYVc3rHQU3HTgOu5yLy6c+9C7v/U9AOEGM+iCK65TpjoWc4zd +QQ4gOsC0p6Hpsk+QLjJg6VfLuQSSaGjlOCZgdbKfd/+RFO+uIEn8rUAVSNECMWEZ +XriX7613t2Saer9fwRPvm2L7DWzgVGkWqQPabumDk3F2xmmFghcCAwEAAaNCMEAw +DgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFI/wS3+o +LkUkrk1Q+mOai97i3Ru8MA0GCSqGSIb3DQEBCwUAA4IBAQBLQNvAUKr+yAzv95ZU +RUm7lgAJQayzE4aGKAczymvmdLm6AC2upArT9fHxD4q/c2dKg8dEe3jgr25sbwMp +jjM5RcOO5LlXbKr8EpbsU8Yt5CRsuZRj+9xTaGdWPoO4zzUhw8lo/s7awlOqzJCK +6fBdRoyV3XpYKBovHd7NADdBj+1EbddTKJd+82cEHhXXipa0095MJ6RMG3NzdvQX +mcIfeg7jLQitChws/zyrVQ4PkX4268NXSb7hLi18YIvDQVETI53O9zJrlAGomecs +Mx86OyXShkDOOyyGeMlhLxS67ttVb9+E7gUJTb0o2HLO02JQZR7rkpeDMdmztcpH +WD9f +-----END CERTIFICATE----- + +# Issuer: CN=Izenpe.com O=IZENPE S.A. +# Subject: CN=Izenpe.com O=IZENPE S.A. +# Label: "Izenpe.com" +# Serial: 917563065490389241595536686991402621 +# MD5 Fingerprint: a6:b0:cd:85:80:da:5c:50:34:a3:39:90:2f:55:67:73 +# SHA1 Fingerprint: 2f:78:3d:25:52:18:a7:4a:65:39:71:b5:2c:a2:9c:45:15:6f:e9:19 +# SHA256 Fingerprint: 25:30:cc:8e:98:32:15:02:ba:d9:6f:9b:1f:ba:1b:09:9e:2d:29:9e:0f:45:48:bb:91:4f:36:3b:c0:d4:53:1f +-----BEGIN CERTIFICATE----- +MIIF8TCCA9mgAwIBAgIQALC3WhZIX7/hy/WL1xnmfTANBgkqhkiG9w0BAQsFADA4 +MQswCQYDVQQGEwJFUzEUMBIGA1UECgwLSVpFTlBFIFMuQS4xEzARBgNVBAMMCkl6 +ZW5wZS5jb20wHhcNMDcxMjEzMTMwODI4WhcNMzcxMjEzMDgyNzI1WjA4MQswCQYD +VQQGEwJFUzEUMBIGA1UECgwLSVpFTlBFIFMuQS4xEzARBgNVBAMMCkl6ZW5wZS5j +b20wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDJ03rKDx6sp4boFmVq +scIbRTJxldn+EFvMr+eleQGPicPK8lVx93e+d5TzcqQsRNiekpsUOqHnJJAKClaO +xdgmlOHZSOEtPtoKct2jmRXagaKH9HtuJneJWK3W6wyyQXpzbm3benhB6QiIEn6H +LmYRY2xU+zydcsC8Lv/Ct90NduM61/e0aL6i9eOBbsFGb12N4E3GVFWJGjMxCrFX +uaOKmMPsOzTFlUFpfnXCPCDFYbpRR6AgkJOhkEvzTnyFRVSa0QUmQbC1TR0zvsQD +yCV8wXDbO/QJLVQnSKwv4cSsPsjLkkxTOTcj7NMB+eAJRE1NZMDhDVqHIrytG6P+ +JrUV86f8hBnp7KGItERphIPzidF0BqnMC9bC3ieFUCbKF7jJeodWLBoBHmy+E60Q +rLUk9TiRodZL2vG70t5HtfG8gfZZa88ZU+mNFctKy6lvROUbQc/hhqfK0GqfvEyN +BjNaooXlkDWgYlwWTvDjovoDGrQscbNYLN57C9saD+veIR8GdwYDsMnvmfzAuU8L +hij+0rnq49qlw0dpEuDb8PYZi+17cNcC1u2HGCgsBCRMd+RIihrGO5rUD8r6ddIB +QFqNeb+Lz0vPqhbBleStTIo+F5HUsWLlguWABKQDfo2/2n+iD5dPDNMN+9fR5XJ+ +HMh3/1uaD7euBUbl8agW7EekFwIDAQABo4H2MIHzMIGwBgNVHREEgagwgaWBD2lu +Zm9AaXplbnBlLmNvbaSBkTCBjjFHMEUGA1UECgw+SVpFTlBFIFMuQS4gLSBDSUYg +QTAxMzM3MjYwLVJNZXJjLlZpdG9yaWEtR2FzdGVpeiBUMTA1NSBGNjIgUzgxQzBB +BgNVBAkMOkF2ZGEgZGVsIE1lZGl0ZXJyYW5lbyBFdG9yYmlkZWEgMTQgLSAwMTAx +MCBWaXRvcmlhLUdhc3RlaXowDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC +AQYwHQYDVR0OBBYEFB0cZQ6o8iV7tJHP5LGx5r1VdGwFMA0GCSqGSIb3DQEBCwUA +A4ICAQB4pgwWSp9MiDrAyw6lFn2fuUhfGI8NYjb2zRlrrKvV9pF9rnHzP7MOeIWb +laQnIUdCSnxIOvVFfLMMjlF4rJUT3sb9fbgakEyrkgPH7UIBzg/YsfqikuFgba56 +awmqxinuaElnMIAkejEWOVt+8Rwu3WwJrfIxwYJOubv5vr8qhT/AQKM6WfxZSzwo +JNu0FXWuDYi6LnPAvViH5ULy617uHjAimcs30cQhbIHsvm0m5hzkQiCeR7Csg1lw +LDXWrzY0tM07+DKo7+N4ifuNRSzanLh+QBxh5z6ikixL8s36mLYp//Pye6kfLqCT +VyvehQP5aTfLnnhqBbTFMXiJ7HqnheG5ezzevh55hM6fcA5ZwjUukCox2eRFekGk +LhObNA5me0mrZJfQRsN5nXJQY6aYWwa9SG3YOYNw6DXwBdGqvOPbyALqfP2C2sJb +UjWumDqtujWTI6cfSN01RpiyEGjkpTHCClguGYEQyVB1/OpaFs4R1+7vUIgtYf8/ +QnMFlEPVjjxOAToZpR9GTnfQXeWBIiGH/pR9hNiTrdZoQ0iy2+tzJOeRf1SktoA+ +naM8THLCV8Sg1Mw4J87VBp6iSNnpn86CcDaTmjvfliHjWbcM2pE38P1ZWrOZyGls +QyYBNWNgVYkDOnXYukrZVP/u3oDYLdE41V4tC5h9Pmzb/CaIxw== +-----END CERTIFICATE----- + +# Issuer: CN=Go Daddy Root Certificate Authority - G2 O=GoDaddy.com, Inc. +# Subject: CN=Go Daddy Root Certificate Authority - G2 O=GoDaddy.com, Inc. +# Label: "Go Daddy Root Certificate Authority - G2" +# Serial: 0 +# MD5 Fingerprint: 80:3a:bc:22:c1:e6:fb:8d:9b:3b:27:4a:32:1b:9a:01 +# SHA1 Fingerprint: 47:be:ab:c9:22:ea:e8:0e:78:78:34:62:a7:9f:45:c2:54:fd:e6:8b +# SHA256 Fingerprint: 45:14:0b:32:47:eb:9c:c8:c5:b4:f0:d7:b5:30:91:f7:32:92:08:9e:6e:5a:63:e2:74:9d:d3:ac:a9:19:8e:da +-----BEGIN CERTIFICATE----- +MIIDxTCCAq2gAwIBAgIBADANBgkqhkiG9w0BAQsFADCBgzELMAkGA1UEBhMCVVMx +EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxGjAYBgNVBAoT +EUdvRGFkZHkuY29tLCBJbmMuMTEwLwYDVQQDEyhHbyBEYWRkeSBSb290IENlcnRp +ZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5MDkwMTAwMDAwMFoXDTM3MTIzMTIz +NTk1OVowgYMxCzAJBgNVBAYTAlVTMRAwDgYDVQQIEwdBcml6b25hMRMwEQYDVQQH +EwpTY290dHNkYWxlMRowGAYDVQQKExFHb0RhZGR5LmNvbSwgSW5jLjExMC8GA1UE +AxMoR28gRGFkZHkgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIw +DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAL9xYgjx+lk09xvJGKP3gElY6SKD +E6bFIEMBO4Tx5oVJnyfq9oQbTqC023CYxzIBsQU+B07u9PpPL1kwIuerGVZr4oAH +/PMWdYA5UXvl+TW2dE6pjYIT5LY/qQOD+qK+ihVqf94Lw7YZFAXK6sOoBJQ7Rnwy +DfMAZiLIjWltNowRGLfTshxgtDj6AozO091GB94KPutdfMh8+7ArU6SSYmlRJQVh +GkSBjCypQ5Yj36w6gZoOKcUcqeldHraenjAKOc7xiID7S13MMuyFYkMlNAJWJwGR +tDtwKj9useiciAF9n9T521NtYJ2/LOdYq7hfRvzOxBsDPAnrSTFcaUaz4EcCAwEA +AaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYE +FDqahQcQZyi27/a9BUFuIMGU2g/eMA0GCSqGSIb3DQEBCwUAA4IBAQCZ21151fmX +WWcDYfF+OwYxdS2hII5PZYe096acvNjpL9DbWu7PdIxztDhC2gV7+AJ1uP2lsdeu +9tfeE8tTEH6KRtGX+rcuKxGrkLAngPnon1rpN5+r5N9ss4UXnT3ZJE95kTXWXwTr +gIOrmgIttRD02JDHBHNA7XIloKmf7J6raBKZV8aPEjoJpL1E/QYVN8Gb5DKj7Tjo +2GTzLH4U/ALqn83/B2gX2yKQOC16jdFU8WnjXzPKej17CuPKf1855eJ1usV2GDPO +LPAvTK33sefOT6jEm0pUBsV/fdUID+Ic/n4XuKxe9tQWskMJDE32p2u0mYRlynqI +4uJEvlz36hz1 +-----END CERTIFICATE----- + +# Issuer: CN=Starfield Root Certificate Authority - G2 O=Starfield Technologies, Inc. +# Subject: CN=Starfield Root Certificate Authority - G2 O=Starfield Technologies, Inc. +# Label: "Starfield Root Certificate Authority - G2" +# Serial: 0 +# MD5 Fingerprint: d6:39:81:c6:52:7e:96:69:fc:fc:ca:66:ed:05:f2:96 +# SHA1 Fingerprint: b5:1c:06:7c:ee:2b:0c:3d:f8:55:ab:2d:92:f4:fe:39:d4:e7:0f:0e +# SHA256 Fingerprint: 2c:e1:cb:0b:f9:d2:f9:e1:02:99:3f:be:21:51:52:c3:b2:dd:0c:ab:de:1c:68:e5:31:9b:83:91:54:db:b7:f5 +-----BEGIN CERTIFICATE----- +MIID3TCCAsWgAwIBAgIBADANBgkqhkiG9w0BAQsFADCBjzELMAkGA1UEBhMCVVMx +EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoT +HFN0YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xMjAwBgNVBAMTKVN0YXJmaWVs +ZCBSb290IENlcnRpZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5MDkwMTAwMDAw +MFoXDTM3MTIzMTIzNTk1OVowgY8xCzAJBgNVBAYTAlVTMRAwDgYDVQQIEwdBcml6 +b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxTdGFyZmllbGQgVGVj +aG5vbG9naWVzLCBJbmMuMTIwMAYDVQQDEylTdGFyZmllbGQgUm9vdCBDZXJ0aWZp +Y2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBAL3twQP89o/8ArFvW59I2Z154qK3A2FWGMNHttfKPTUuiUP3oWmb3ooa/RMg +nLRJdzIpVv257IzdIvpy3Cdhl+72WoTsbhm5iSzchFvVdPtrX8WJpRBSiUZV9Lh1 +HOZ/5FSuS/hVclcCGfgXcVnrHigHdMWdSL5stPSksPNkN3mSwOxGXn/hbVNMYq/N +Hwtjuzqd+/x5AJhhdM8mgkBj87JyahkNmcrUDnXMN/uLicFZ8WJ/X7NfZTD4p7dN +dloedl40wOiWVpmKs/B/pM293DIxfJHP4F8R+GuqSVzRmZTRouNjWwl2tVZi4Ut0 +HZbUJtQIBFnQmA4O5t78w+wfkPECAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAO +BgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFHwMMh+n2TB/xH1oo2Kooc6rB1snMA0G +CSqGSIb3DQEBCwUAA4IBAQARWfolTwNvlJk7mh+ChTnUdgWUXuEok21iXQnCoKjU +sHU48TRqneSfioYmUeYs0cYtbpUgSpIB7LiKZ3sx4mcujJUDJi5DnUox9g61DLu3 +4jd/IroAow57UvtruzvE03lRTs2Q9GcHGcg8RnoNAX3FWOdt5oUwF5okxBDgBPfg +8n/Uqgr/Qh037ZTlZFkSIHc40zI+OIF1lnP6aI+xy84fxez6nH7PfrHxBy22/L/K +pL/QlwVKvOoYKAKQvVR4CSFx09F9HdkWsKlhPdAKACL8x3vLCWRFCztAgfd9fDL1 +mMpYjn0q7pBZc2T5NnReJaH1ZgUufzkVqSr7UIuOhWn0 +-----END CERTIFICATE----- + +# Issuer: CN=Starfield Services Root Certificate Authority - G2 O=Starfield Technologies, Inc. +# Subject: CN=Starfield Services Root Certificate Authority - G2 O=Starfield Technologies, Inc. +# Label: "Starfield Services Root Certificate Authority - G2" +# Serial: 0 +# MD5 Fingerprint: 17:35:74:af:7b:61:1c:eb:f4:f9:3c:e2:ee:40:f9:a2 +# SHA1 Fingerprint: 92:5a:8f:8d:2c:6d:04:e0:66:5f:59:6a:ff:22:d8:63:e8:25:6f:3f +# SHA256 Fingerprint: 56:8d:69:05:a2:c8:87:08:a4:b3:02:51:90:ed:cf:ed:b1:97:4a:60:6a:13:c6:e5:29:0f:cb:2a:e6:3e:da:b5 +-----BEGIN CERTIFICATE----- +MIID7zCCAtegAwIBAgIBADANBgkqhkiG9w0BAQsFADCBmDELMAkGA1UEBhMCVVMx +EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoT +HFN0YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xOzA5BgNVBAMTMlN0YXJmaWVs +ZCBTZXJ2aWNlcyBSb290IENlcnRpZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5 +MDkwMTAwMDAwMFoXDTM3MTIzMTIzNTk1OVowgZgxCzAJBgNVBAYTAlVTMRAwDgYD +VQQIEwdBcml6b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxTdGFy +ZmllbGQgVGVjaG5vbG9naWVzLCBJbmMuMTswOQYDVQQDEzJTdGFyZmllbGQgU2Vy +dmljZXMgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBANUMOsQq+U7i9b4Zl1+OiFOxHz/Lz58gE20p +OsgPfTz3a3Y4Y9k2YKibXlwAgLIvWX/2h/klQ4bnaRtSmpDhcePYLQ1Ob/bISdm2 +8xpWriu2dBTrz/sm4xq6HZYuajtYlIlHVv8loJNwU4PahHQUw2eeBGg6345AWh1K +Ts9DkTvnVtYAcMtS7nt9rjrnvDH5RfbCYM8TWQIrgMw0R9+53pBlbQLPLJGmpufe +hRhJfGZOozptqbXuNC66DQO4M99H67FrjSXZm86B0UVGMpZwh94CDklDhbZsc7tk +6mFBrMnUVN+HL8cisibMn1lUaJ/8viovxFUcdUBgF4UCVTmLfwUCAwEAAaNCMEAw +DwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFJxfAN+q +AdcwKziIorhtSpzyEZGDMA0GCSqGSIb3DQEBCwUAA4IBAQBLNqaEd2ndOxmfZyMI +bw5hyf2E3F/YNoHN2BtBLZ9g3ccaaNnRbobhiCPPE95Dz+I0swSdHynVv/heyNXB +ve6SbzJ08pGCL72CQnqtKrcgfU28elUSwhXqvfdqlS5sdJ/PHLTyxQGjhdByPq1z +qwubdQxtRbeOlKyWN7Wg0I8VRw7j6IPdj/3vQQF3zCepYoUz8jcI73HPdwbeyBkd +iEDPfUYd/x7H4c7/I9vG+o1VTqkC50cRRj70/b17KSa7qWFiNyi2LSr2EIZkyXCn +0q23KXB56jzaYyWf/Wi3MOxw+3WKt21gZ7IeyLnp2KhvAotnDU0mV3HaIPzBSlCN +sSi6 +-----END CERTIFICATE----- + +# Issuer: CN=AffirmTrust Commercial O=AffirmTrust +# Subject: CN=AffirmTrust Commercial O=AffirmTrust +# Label: "AffirmTrust Commercial" +# Serial: 8608355977964138876 +# MD5 Fingerprint: 82:92:ba:5b:ef:cd:8a:6f:a6:3d:55:f9:84:f6:d6:b7 +# SHA1 Fingerprint: f9:b5:b6:32:45:5f:9c:be:ec:57:5f:80:dc:e9:6e:2c:c7:b2:78:b7 +# SHA256 Fingerprint: 03:76:ab:1d:54:c5:f9:80:3c:e4:b2:e2:01:a0:ee:7e:ef:7b:57:b6:36:e8:a9:3c:9b:8d:48:60:c9:6f:5f:a7 +-----BEGIN CERTIFICATE----- +MIIDTDCCAjSgAwIBAgIId3cGJyapsXwwDQYJKoZIhvcNAQELBQAwRDELMAkGA1UE +BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVz +dCBDb21tZXJjaWFsMB4XDTEwMDEyOTE0MDYwNloXDTMwMTIzMTE0MDYwNlowRDEL +MAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZp +cm1UcnVzdCBDb21tZXJjaWFsMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC +AQEA9htPZwcroRX1BiLLHwGy43NFBkRJLLtJJRTWzsO3qyxPxkEylFf6EqdbDuKP +Hx6GGaeqtS25Xw2Kwq+FNXkyLbscYjfysVtKPcrNcV/pQr6U6Mje+SJIZMblq8Yr +ba0F8PrVC8+a5fBQpIs7R6UjW3p6+DM/uO+Zl+MgwdYoic+U+7lF7eNAFxHUdPAL +MeIrJmqbTFeurCA+ukV6BfO9m2kVrn1OIGPENXY6BwLJN/3HR+7o8XYdcxXyl6S1 +yHp52UKqK39c/s4mT6NmgTWvRLpUHhwwMmWd5jyTXlBOeuM61G7MGvv50jeuJCqr +VwMiKA1JdX+3KNp1v47j3A55MQIDAQABo0IwQDAdBgNVHQ4EFgQUnZPGU4teyq8/ +nx4P5ZmVvCT2lI8wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwDQYJ +KoZIhvcNAQELBQADggEBAFis9AQOzcAN/wr91LoWXym9e2iZWEnStB03TX8nfUYG +XUPGhi4+c7ImfU+TqbbEKpqrIZcUsd6M06uJFdhrJNTxFq7YpFzUf1GO7RgBsZNj +vbz4YYCanrHOQnDiqX0GJX0nof5v7LMeJNrjS1UaADs1tDvZ110w/YETifLCBivt +Z8SOyUOyXGsViQK8YvxO8rUzqrJv0wqiUOP2O+guRMLbZjipM1ZI8W0bM40NjD9g +N53Tym1+NH4Nn3J2ixufcv1SNUFFApYvHLKac0khsUlHRUe072o0EclNmsxZt9YC +nlpOZbWUrhvfKbAW8b8Angc6F2S1BLUjIZkKlTuXfO8= +-----END CERTIFICATE----- + +# Issuer: CN=AffirmTrust Networking O=AffirmTrust +# Subject: CN=AffirmTrust Networking O=AffirmTrust +# Label: "AffirmTrust Networking" +# Serial: 8957382827206547757 +# MD5 Fingerprint: 42:65:ca:be:01:9a:9a:4c:a9:8c:41:49:cd:c0:d5:7f +# SHA1 Fingerprint: 29:36:21:02:8b:20:ed:02:f5:66:c5:32:d1:d6:ed:90:9f:45:00:2f +# SHA256 Fingerprint: 0a:81:ec:5a:92:97:77:f1:45:90:4a:f3:8d:5d:50:9f:66:b5:e2:c5:8f:cd:b5:31:05:8b:0e:17:f3:f0:b4:1b +-----BEGIN CERTIFICATE----- +MIIDTDCCAjSgAwIBAgIIfE8EORzUmS0wDQYJKoZIhvcNAQEFBQAwRDELMAkGA1UE +BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVz +dCBOZXR3b3JraW5nMB4XDTEwMDEyOTE0MDgyNFoXDTMwMTIzMTE0MDgyNFowRDEL +MAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZp +cm1UcnVzdCBOZXR3b3JraW5nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC +AQEAtITMMxcua5Rsa2FSoOujz3mUTOWUgJnLVWREZY9nZOIG41w3SfYvm4SEHi3y +YJ0wTsyEheIszx6e/jarM3c1RNg1lho9Nuh6DtjVR6FqaYvZ/Ls6rnla1fTWcbua +kCNrmreIdIcMHl+5ni36q1Mr3Lt2PpNMCAiMHqIjHNRqrSK6mQEubWXLviRmVSRL +QESxG9fhwoXA3hA/Pe24/PHxI1Pcv2WXb9n5QHGNfb2V1M6+oF4nI979ptAmDgAp +6zxG8D1gvz9Q0twmQVGeFDdCBKNwV6gbh+0t+nvujArjqWaJGctB+d1ENmHP4ndG +yH329JKBNv3bNPFyfvMMFr20FQIDAQABo0IwQDAdBgNVHQ4EFgQUBx/S55zawm6i +QLSwelAQUHTEyL0wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwDQYJ +KoZIhvcNAQEFBQADggEBAIlXshZ6qML91tmbmzTCnLQyFE2npN/svqe++EPbkTfO +tDIuUFUaNU52Q3Eg75N3ThVwLofDwR1t3Mu1J9QsVtFSUzpE0nPIxBsFZVpikpzu +QY0x2+c06lkh1QF612S4ZDnNye2v7UsDSKegmQGA3GWjNq5lWUhPgkvIZfFXHeVZ +Lgo/bNjR9eUJtGxUAArgFU2HdW23WJZa3W3SAKD0m0i+wzekujbgfIeFlxoVot4u +olu9rxj5kFDNcFn4J2dHy8egBzp90SxdbBk6ZrV9/ZFvgrG+CJPbFEfxojfHRZ48 +x3evZKiT3/Zpg4Jg8klCNO1aAFSFHBY2kgxc+qatv9s= +-----END CERTIFICATE----- + +# Issuer: CN=AffirmTrust Premium O=AffirmTrust +# Subject: CN=AffirmTrust Premium O=AffirmTrust +# Label: "AffirmTrust Premium" +# Serial: 7893706540734352110 +# MD5 Fingerprint: c4:5d:0e:48:b6:ac:28:30:4e:0a:bc:f9:38:16:87:57 +# SHA1 Fingerprint: d8:a6:33:2c:e0:03:6f:b1:85:f6:63:4f:7d:6a:06:65:26:32:28:27 +# SHA256 Fingerprint: 70:a7:3f:7f:37:6b:60:07:42:48:90:45:34:b1:14:82:d5:bf:0e:69:8e:cc:49:8d:f5:25:77:eb:f2:e9:3b:9a +-----BEGIN CERTIFICATE----- +MIIFRjCCAy6gAwIBAgIIbYwURrGmCu4wDQYJKoZIhvcNAQEMBQAwQTELMAkGA1UE +BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MRwwGgYDVQQDDBNBZmZpcm1UcnVz +dCBQcmVtaXVtMB4XDTEwMDEyOTE0MTAzNloXDTQwMTIzMTE0MTAzNlowQTELMAkG +A1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MRwwGgYDVQQDDBNBZmZpcm1U +cnVzdCBQcmVtaXVtMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAxBLf +qV/+Qd3d9Z+K4/as4Tx4mrzY8H96oDMq3I0gW64tb+eT2TZwamjPjlGjhVtnBKAQ +JG9dKILBl1fYSCkTtuG+kU3fhQxTGJoeJKJPj/CihQvL9Cl/0qRY7iZNyaqoe5rZ ++jjeRFcV5fiMyNlI4g0WJx0eyIOFJbe6qlVBzAMiSy2RjYvmia9mx+n/K+k8rNrS +s8PhaJyJ+HoAVt70VZVs+7pk3WKL3wt3MutizCaam7uqYoNMtAZ6MMgpv+0GTZe5 +HMQxK9VfvFMSF5yZVylmd2EhMQcuJUmdGPLu8ytxjLW6OQdJd/zvLpKQBY0tL3d7 +70O/Nbua2Plzpyzy0FfuKE4mX4+QaAkvuPjcBukumj5Rp9EixAqnOEhss/n/fauG +V+O61oV4d7pD6kh/9ti+I20ev9E2bFhc8e6kGVQa9QPSdubhjL08s9NIS+LI+H+S +qHZGnEJlPqQewQcDWkYtuJfzt9WyVSHvutxMAJf7FJUnM7/oQ0dG0giZFmA7mn7S +5u046uwBHjxIVkkJx0w3AJ6IDsBz4W9m6XJHMD4Q5QsDyZpCAGzFlH5hxIrff4Ia +C1nEWTJ3s7xgaVY5/bQGeyzWZDbZvUjthB9+pSKPKrhC9IK31FOQeE4tGv2Bb0TX +OwF0lkLgAOIua+rF7nKsu7/+6qqo+Nz2snmKtmcCAwEAAaNCMEAwHQYDVR0OBBYE +FJ3AZ6YMItkm9UWrpmVSESfYRaxjMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/ +BAQDAgEGMA0GCSqGSIb3DQEBDAUAA4ICAQCzV00QYk465KzquByvMiPIs0laUZx2 +KI15qldGF9X1Uva3ROgIRL8YhNILgM3FEv0AVQVhh0HctSSePMTYyPtwni94loMg +Nt58D2kTiKV1NpgIpsbfrM7jWNa3Pt668+s0QNiigfV4Py/VpfzZotReBA4Xrf5B +8OWycvpEgjNC6C1Y91aMYj+6QrCcDFx+LmUmXFNPALJ4fqENmS2NuB2OosSw/WDQ +MKSOyARiqcTtNd56l+0OOF6SL5Nwpamcb6d9Ex1+xghIsV5n61EIJenmJWtSKZGc +0jlzCFfemQa0W50QBuHCAKi4HEoCChTQwUHK+4w1IX2COPKpVJEZNZOUbWo6xbLQ +u4mGk+ibyQ86p3q4ofB4Rvr8Ny/lioTz3/4E2aFooC8k4gmVBtWVyuEklut89pMF +u+1z6S3RdTnX5yTb2E5fQ4+e0BQ5v1VwSJlXMbSc7kqYA5YwH2AG7hsj/oFgIxpH +YoWlzBk0gG+zrBrjn/B7SK3VAdlntqlyk+otZrWyuOQ9PLLvTIzq6we/qzWaVYa8 +GKa1qF60g2xraUDTn9zxw2lrueFtCfTxqlB2Cnp9ehehVZZCmTEJ3WARjQUwfuaO +RtGdFNrHF+QFlozEJLUbzxQHskD4o55BhrwE0GuWyCqANP2/7waj3VjFhT0+j/6e +KeC2uAloGRwYQw== +-----END CERTIFICATE----- + +# Issuer: CN=AffirmTrust Premium ECC O=AffirmTrust +# Subject: CN=AffirmTrust Premium ECC O=AffirmTrust +# Label: "AffirmTrust Premium ECC" +# Serial: 8401224907861490260 +# MD5 Fingerprint: 64:b0:09:55:cf:b1:d5:99:e2:be:13:ab:a6:5d:ea:4d +# SHA1 Fingerprint: b8:23:6b:00:2f:1d:16:86:53:01:55:6c:11:a4:37:ca:eb:ff:c3:bb +# SHA256 Fingerprint: bd:71:fd:f6:da:97:e4:cf:62:d1:64:7a:dd:25:81:b0:7d:79:ad:f8:39:7e:b4:ec:ba:9c:5e:84:88:82:14:23 +-----BEGIN CERTIFICATE----- +MIIB/jCCAYWgAwIBAgIIdJclisc/elQwCgYIKoZIzj0EAwMwRTELMAkGA1UEBhMC +VVMxFDASBgNVBAoMC0FmZmlybVRydXN0MSAwHgYDVQQDDBdBZmZpcm1UcnVzdCBQ +cmVtaXVtIEVDQzAeFw0xMDAxMjkxNDIwMjRaFw00MDEyMzExNDIwMjRaMEUxCzAJ +BgNVBAYTAlVTMRQwEgYDVQQKDAtBZmZpcm1UcnVzdDEgMB4GA1UEAwwXQWZmaXJt +VHJ1c3QgUHJlbWl1bSBFQ0MwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQNMF4bFZ0D +0KF5Nbc6PJJ6yhUczWLznCZcBz3lVPqj1swS6vQUX+iOGasvLkjmrBhDeKzQN8O9 +ss0s5kfiGuZjuD0uL3jET9v0D6RoTFVya5UdThhClXjMNzyR4ptlKymjQjBAMB0G +A1UdDgQWBBSaryl6wBE1NSZRMADDav5A1a7WPDAPBgNVHRMBAf8EBTADAQH/MA4G +A1UdDwEB/wQEAwIBBjAKBggqhkjOPQQDAwNnADBkAjAXCfOHiFBar8jAQr9HX/Vs +aobgxCd05DhT1wV/GzTjxi+zygk8N53X57hG8f2h4nECMEJZh0PUUd+60wkyWs6I +flc9nF9Ca/UHLbXwgpP5WW+uZPpY5Yse42O+tYHNbwKMeQ== +-----END CERTIFICATE----- + +# Issuer: CN=Certum Trusted Network CA O=Unizeto Technologies S.A. OU=Certum Certification Authority +# Subject: CN=Certum Trusted Network CA O=Unizeto Technologies S.A. OU=Certum Certification Authority +# Label: "Certum Trusted Network CA" +# Serial: 279744 +# MD5 Fingerprint: d5:e9:81:40:c5:18:69:fc:46:2c:89:75:62:0f:aa:78 +# SHA1 Fingerprint: 07:e0:32:e0:20:b7:2c:3f:19:2f:06:28:a2:59:3a:19:a7:0f:06:9e +# SHA256 Fingerprint: 5c:58:46:8d:55:f5:8e:49:7e:74:39:82:d2:b5:00:10:b6:d1:65:37:4a:cf:83:a7:d4:a3:2d:b7:68:c4:40:8e +-----BEGIN CERTIFICATE----- +MIIDuzCCAqOgAwIBAgIDBETAMA0GCSqGSIb3DQEBBQUAMH4xCzAJBgNVBAYTAlBM +MSIwIAYDVQQKExlVbml6ZXRvIFRlY2hub2xvZ2llcyBTLkEuMScwJQYDVQQLEx5D +ZXJ0dW0gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkxIjAgBgNVBAMTGUNlcnR1bSBU +cnVzdGVkIE5ldHdvcmsgQ0EwHhcNMDgxMDIyMTIwNzM3WhcNMjkxMjMxMTIwNzM3 +WjB+MQswCQYDVQQGEwJQTDEiMCAGA1UEChMZVW5pemV0byBUZWNobm9sb2dpZXMg +Uy5BLjEnMCUGA1UECxMeQ2VydHVtIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MSIw +IAYDVQQDExlDZXJ0dW0gVHJ1c3RlZCBOZXR3b3JrIENBMIIBIjANBgkqhkiG9w0B +AQEFAAOCAQ8AMIIBCgKCAQEA4/t9o3K6wvDJFIf1awFO4W5AB7ptJ11/91sts1rH +UV+rpDKmYYe2bg+G0jACl/jXaVehGDldamR5xgFZrDwxSjh80gTSSyjoIF87B6LM +TXPb865Px1bVWqeWifrzq2jUI4ZZJ88JJ7ysbnKDHDBy3+Ci6dLhdHUZvSqeexVU +BBvXQzmtVSjF4hq79MDkrjhJM8x2hZ85RdKknvISjFH4fOQtf/WsX+sWn7Et0brM +kUJ3TCXJkDhv2/DM+44el1k+1WBO5gUo7Ul5E0u6SNsv+XLTOcr+H9g0cvW0QM8x +AcPs3hEtF10fuFDRXhmnad4HMyjKUJX5p1TLVIZQRan5SQIDAQABo0IwQDAPBgNV +HRMBAf8EBTADAQH/MB0GA1UdDgQWBBQIds3LB/8k9sXN7buQvOKEN0Z19zAOBgNV +HQ8BAf8EBAMCAQYwDQYJKoZIhvcNAQEFBQADggEBAKaorSLOAT2mo/9i0Eidi15y +sHhE49wcrwn9I0j6vSrEuVUEtRCjjSfeC4Jj0O7eDDd5QVsisrCaQVymcODU0HfL +I9MA4GxWL+FpDQ3Zqr8hgVDZBqWo/5U30Kr+4rP1mS1FhIrlQgnXdAIv94nYmem8 +J9RHjboNRhx3zxSkHLmkMcScKHQDNP8zGSal6Q10tz6XxnboJ5ajZt3hrvJBW8qY +VoNzcOSGGtIxQbovvi0TWnZvTuhOgQ4/WwMioBK+ZlgRSssDxLQqKi2WF+A5VLxI +03YnnZotBqbJ7DnSq9ufmgsnAjUpsUCV5/nonFWIGUbWtzT1fs45mtk48VH3Tyw= +-----END CERTIFICATE----- + +# Issuer: CN=TWCA Root Certification Authority O=TAIWAN-CA OU=Root CA +# Subject: CN=TWCA Root Certification Authority O=TAIWAN-CA OU=Root CA +# Label: "TWCA Root Certification Authority" +# Serial: 1 +# MD5 Fingerprint: aa:08:8f:f6:f9:7b:b7:f2:b1:a7:1e:9b:ea:ea:bd:79 +# SHA1 Fingerprint: cf:9e:87:6d:d3:eb:fc:42:26:97:a3:b5:a3:7a:a0:76:a9:06:23:48 +# SHA256 Fingerprint: bf:d8:8f:e1:10:1c:41:ae:3e:80:1b:f8:be:56:35:0e:e9:ba:d1:a6:b9:bd:51:5e:dc:5c:6d:5b:87:11:ac:44 +-----BEGIN CERTIFICATE----- +MIIDezCCAmOgAwIBAgIBATANBgkqhkiG9w0BAQUFADBfMQswCQYDVQQGEwJUVzES +MBAGA1UECgwJVEFJV0FOLUNBMRAwDgYDVQQLDAdSb290IENBMSowKAYDVQQDDCFU +V0NBIFJvb3QgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDgwODI4MDcyNDMz +WhcNMzAxMjMxMTU1OTU5WjBfMQswCQYDVQQGEwJUVzESMBAGA1UECgwJVEFJV0FO +LUNBMRAwDgYDVQQLDAdSb290IENBMSowKAYDVQQDDCFUV0NBIFJvb3QgQ2VydGlm +aWNhdGlvbiBBdXRob3JpdHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB +AQCwfnK4pAOU5qfeCTiRShFAh6d8WWQUe7UREN3+v9XAu1bihSX0NXIP+FPQQeFE +AcK0HMMxQhZHhTMidrIKbw/lJVBPhYa+v5guEGcevhEFhgWQxFnQfHgQsIBct+HH +K3XLfJ+utdGdIzdjp9xCoi2SBBtQwXu4PhvJVgSLL1KbralW6cH/ralYhzC2gfeX +RfwZVzsrb+RH9JlF/h3x+JejiB03HFyP4HYlmlD4oFT/RJB2I9IyxsOrBr/8+7/z +rX2SYgJbKdM1o5OaQ2RgXbL6Mv87BK9NQGr5x+PvI/1ry+UPizgN7gr8/g+YnzAx +3WxSZfmLgb4i4RxYA7qRG4kHAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV +HRMBAf8EBTADAQH/MB0GA1UdDgQWBBRqOFsmjd6LWvJPelSDGRjjCDWmujANBgkq +hkiG9w0BAQUFAAOCAQEAPNV3PdrfibqHDAhUaiBQkr6wQT25JmSDCi/oQMCXKCeC +MErJk/9q56YAf4lCmtYR5VPOL8zy2gXE/uJQxDqGfczafhAJO5I1KlOy/usrBdls +XebQ79NqZp4VKIV66IIArB6nCWlWQtNoURi+VJq/REG6Sb4gumlc7rh3zc5sH62D +lhh9DrUUOYTxKOkto557HnpyWoOzeW/vtPzQCqVYT0bf+215WfKEIlKuD8z7fDvn +aspHYcN6+NOSBB+4IIThNlQWx0DeO4pz3N/GCUzf7Nr/1FNCocnyYh0igzyXxfkZ +YiesZSLX0zzG5Y6yU8xJzrww/nsOM5D77dIUkR8Hrw== +-----END CERTIFICATE----- + +# Issuer: O=SECOM Trust Systems CO.,LTD. OU=Security Communication RootCA2 +# Subject: O=SECOM Trust Systems CO.,LTD. OU=Security Communication RootCA2 +# Label: "Security Communication RootCA2" +# Serial: 0 +# MD5 Fingerprint: 6c:39:7d:a4:0e:55:59:b2:3f:d6:41:b1:12:50:de:43 +# SHA1 Fingerprint: 5f:3b:8c:f2:f8:10:b3:7d:78:b4:ce:ec:19:19:c3:73:34:b9:c7:74 +# SHA256 Fingerprint: 51:3b:2c:ec:b8:10:d4:cd:e5:dd:85:39:1a:df:c6:c2:dd:60:d8:7b:b7:36:d2:b5:21:48:4a:a4:7a:0e:be:f6 +-----BEGIN CERTIFICATE----- +MIIDdzCCAl+gAwIBAgIBADANBgkqhkiG9w0BAQsFADBdMQswCQYDVQQGEwJKUDEl +MCMGA1UEChMcU0VDT00gVHJ1c3QgU3lzdGVtcyBDTy4sTFRELjEnMCUGA1UECxMe +U2VjdXJpdHkgQ29tbXVuaWNhdGlvbiBSb290Q0EyMB4XDTA5MDUyOTA1MDAzOVoX +DTI5MDUyOTA1MDAzOVowXTELMAkGA1UEBhMCSlAxJTAjBgNVBAoTHFNFQ09NIFRy +dXN0IFN5c3RlbXMgQ08uLExURC4xJzAlBgNVBAsTHlNlY3VyaXR5IENvbW11bmlj +YXRpb24gUm9vdENBMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANAV +OVKxUrO6xVmCxF1SrjpDZYBLx/KWvNs2l9amZIyoXvDjChz335c9S672XewhtUGr +zbl+dp+++T42NKA7wfYxEUV0kz1XgMX5iZnK5atq1LXaQZAQwdbWQonCv/Q4EpVM +VAX3NuRFg3sUZdbcDE3R3n4MqzvEFb46VqZab3ZpUql6ucjrappdUtAtCms1FgkQ +hNBqyjoGADdH5H5XTz+L62e4iKrFvlNVspHEfbmwhRkGeC7bYRr6hfVKkaHnFtWO +ojnflLhwHyg/i/xAXmODPIMqGplrz95Zajv8bxbXH/1KEOtOghY6rCcMU/Gt1SSw +awNQwS08Ft1ENCcadfsCAwEAAaNCMEAwHQYDVR0OBBYEFAqFqXdlBZh8QIH4D5cs +OPEK7DzPMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3 +DQEBCwUAA4IBAQBMOqNErLlFsceTfsgLCkLfZOoc7llsCLqJX2rKSpWeeo8HxdpF +coJxDjrSzG+ntKEju/Ykn8sX/oymzsLS28yN/HH8AynBbF0zX2S2ZTuJbxh2ePXc +okgfGT+Ok+vx+hfuzU7jBBJV1uXk3fs+BXziHV7Gp7yXT2g69ekuCkO2r1dcYmh8 +t/2jioSgrGK+KwmHNPBqAbubKVY8/gA3zyNs8U6qtnRGEmyR7jTV7JqR50S+kDFy +1UkC9gLl9B/rfNmWVan/7Ir5mUf/NVoCqgTLiluHcSmRvaS0eg29mvVXIwAHIRc/ +SjnRBUkLp7Y3gaVdjKozXoEofKd9J+sAro03 +-----END CERTIFICATE----- + +# Issuer: CN=Actalis Authentication Root CA O=Actalis S.p.A./03358520967 +# Subject: CN=Actalis Authentication Root CA O=Actalis S.p.A./03358520967 +# Label: "Actalis Authentication Root CA" +# Serial: 6271844772424770508 +# MD5 Fingerprint: 69:c1:0d:4f:07:a3:1b:c3:fe:56:3d:04:bc:11:f6:a6 +# SHA1 Fingerprint: f3:73:b3:87:06:5a:28:84:8a:f2:f3:4a:ce:19:2b:dd:c7:8e:9c:ac +# SHA256 Fingerprint: 55:92:60:84:ec:96:3a:64:b9:6e:2a:be:01:ce:0b:a8:6a:64:fb:fe:bc:c7:aa:b5:af:c1:55:b3:7f:d7:60:66 +-----BEGIN CERTIFICATE----- +MIIFuzCCA6OgAwIBAgIIVwoRl0LE48wwDQYJKoZIhvcNAQELBQAwazELMAkGA1UE +BhMCSVQxDjAMBgNVBAcMBU1pbGFuMSMwIQYDVQQKDBpBY3RhbGlzIFMucC5BLi8w +MzM1ODUyMDk2NzEnMCUGA1UEAwweQWN0YWxpcyBBdXRoZW50aWNhdGlvbiBSb290 +IENBMB4XDTExMDkyMjExMjIwMloXDTMwMDkyMjExMjIwMlowazELMAkGA1UEBhMC +SVQxDjAMBgNVBAcMBU1pbGFuMSMwIQYDVQQKDBpBY3RhbGlzIFMucC5BLi8wMzM1 +ODUyMDk2NzEnMCUGA1UEAwweQWN0YWxpcyBBdXRoZW50aWNhdGlvbiBSb290IENB +MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAp8bEpSmkLO/lGMWwUKNv +UTufClrJwkg4CsIcoBh/kbWHuUA/3R1oHwiD1S0eiKD4j1aPbZkCkpAW1V8IbInX +4ay8IMKx4INRimlNAJZaby/ARH6jDuSRzVju3PvHHkVH3Se5CAGfpiEd9UEtL0z9 +KK3giq0itFZljoZUj5NDKd45RnijMCO6zfB9E1fAXdKDa0hMxKufgFpbOr3JpyI/ +gCczWw63igxdBzcIy2zSekciRDXFzMwujt0q7bd9Zg1fYVEiVRvjRuPjPdA1Yprb +rxTIW6HMiRvhMCb8oJsfgadHHwTrozmSBp+Z07/T6k9QnBn+locePGX2oxgkg4YQ +51Q+qDp2JE+BIcXjDwL4k5RHILv+1A7TaLndxHqEguNTVHnd25zS8gebLra8Pu2F +be8lEfKXGkJh90qX6IuxEAf6ZYGyojnP9zz/GPvG8VqLWeICrHuS0E4UT1lF9gxe +KF+w6D9Fz8+vm2/7hNN3WpVvrJSEnu68wEqPSpP4RCHiMUVhUE4Q2OM1fEwZtN4F +v6MGn8i1zeQf1xcGDXqVdFUNaBr8EBtiZJ1t4JWgw5QHVw0U5r0F+7if5t+L4sbn +fpb2U8WANFAoWPASUHEXMLrmeGO89LKtmyuy/uE5jF66CyCU3nuDuP/jVo23Eek7 +jPKxwV2dpAtMK9myGPW1n0sCAwEAAaNjMGEwHQYDVR0OBBYEFFLYiDrIn3hm7Ynz +ezhwlMkCAjbQMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUUtiIOsifeGbt +ifN7OHCUyQICNtAwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEBCwUAA4ICAQAL +e3KHwGCmSUyIWOYdiPcUZEim2FgKDk8TNd81HdTtBjHIgT5q1d07GjLukD0R0i70 +jsNjLiNmsGe+b7bAEzlgqqI0JZN1Ut6nna0Oh4lScWoWPBkdg/iaKWW+9D+a2fDz +WochcYBNy+A4mz+7+uAwTc+G02UQGRjRlwKxK3JCaKygvU5a2hi/a5iB0P2avl4V +SM0RFbnAKVy06Ij3Pjaut2L9HmLecHgQHEhb2rykOLpn7VU+Xlff1ANATIGk0k9j +pwlCCRT8AKnCgHNPLsBA2RF7SOp6AsDT6ygBJlh0wcBzIm2Tlf05fbsq4/aC4yyX +X04fkZT6/iyj2HYauE2yOE+b+h1IYHkm4vP9qdCa6HCPSXrW5b0KDtst842/6+Ok +fcvHlXHo2qN8xcL4dJIEG4aspCJTQLas/kx2z/uUMsA1n3Y/buWQbqCmJqK4LL7R +K4X9p2jIugErsWx0Hbhzlefut8cl8ABMALJ+tguLHPPAUJ4lueAI3jZm/zel0btU +ZCzJJ7VLkn5l/9Mt4blOvH+kQSGQQXemOR/qnuOf0GZvBeyqdn6/axag67XH/JJU +LysRJyU3eExRarDzzFhdFPFqSBX/wge2sY0PjlxQRrM9vwGYT7JZVEc+NHt4bVaT +LnPqZih4zR0Uv6CPLy64Lo7yFIrM6bV8+2ydDKXhlg== +-----END CERTIFICATE----- + +# Issuer: CN=Buypass Class 2 Root CA O=Buypass AS-983163327 +# Subject: CN=Buypass Class 2 Root CA O=Buypass AS-983163327 +# Label: "Buypass Class 2 Root CA" +# Serial: 2 +# MD5 Fingerprint: 46:a7:d2:fe:45:fb:64:5a:a8:59:90:9b:78:44:9b:29 +# SHA1 Fingerprint: 49:0a:75:74:de:87:0a:47:fe:58:ee:f6:c7:6b:eb:c6:0b:12:40:99 +# SHA256 Fingerprint: 9a:11:40:25:19:7c:5b:b9:5d:94:e6:3d:55:cd:43:79:08:47:b6:46:b2:3c:df:11:ad:a4:a0:0e:ff:15:fb:48 +-----BEGIN CERTIFICATE----- +MIIFWTCCA0GgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBOMQswCQYDVQQGEwJOTzEd +MBsGA1UECgwUQnV5cGFzcyBBUy05ODMxNjMzMjcxIDAeBgNVBAMMF0J1eXBhc3Mg +Q2xhc3MgMiBSb290IENBMB4XDTEwMTAyNjA4MzgwM1oXDTQwMTAyNjA4MzgwM1ow +TjELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1eXBhc3MgQVMtOTgzMTYzMzI3MSAw +HgYDVQQDDBdCdXlwYXNzIENsYXNzIDIgUm9vdCBDQTCCAiIwDQYJKoZIhvcNAQEB +BQADggIPADCCAgoCggIBANfHXvfBB9R3+0Mh9PT1aeTuMgHbo4Yf5FkNuud1g1Lr +6hxhFUi7HQfKjK6w3Jad6sNgkoaCKHOcVgb/S2TwDCo3SbXlzwx87vFKu3MwZfPV +L4O2fuPn9Z6rYPnT8Z2SdIrkHJasW4DptfQxh6NR/Md+oW+OU3fUl8FVM5I+GC91 +1K2GScuVr1QGbNgGE41b/+EmGVnAJLqBcXmQRFBoJJRfuLMR8SlBYaNByyM21cHx +MlAQTn/0hpPshNOOvEu/XAFOBz3cFIqUCqTqc/sLUegTBxj6DvEr0VQVfTzh97QZ +QmdiXnfgolXsttlpF9U6r0TtSsWe5HonfOV116rLJeffawrbD02TTqigzXsu8lkB +arcNuAeBfos4GzjmCleZPe4h6KP1DBbdi+w0jpwqHAAVF41og9JwnxgIzRFo1clr +Us3ERo/ctfPYV3Me6ZQ5BL/T3jjetFPsaRyifsSP5BtwrfKi+fv3FmRmaZ9JUaLi +FRhnBkp/1Wy1TbMz4GHrXb7pmA8y1x1LPC5aAVKRCfLf6o3YBkBjqhHk/sM3nhRS +P/TizPJhk9H9Z2vXUq6/aKtAQ6BXNVN48FP4YUIHZMbXb5tMOA1jrGKvNouicwoN +9SG9dKpN6nIDSdvHXx1iY8f93ZHsM+71bbRuMGjeyNYmsHVee7QHIJihdjK4TWxP +AgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFMmAd+BikoL1Rpzz +uvdMw964o605MA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAgEAU18h +9bqwOlI5LJKwbADJ784g7wbylp7ppHR/ehb8t/W2+xUbP6umwHJdELFx7rxP462s +A20ucS6vxOOto70MEae0/0qyexAQH6dXQbLArvQsWdZHEIjzIVEpMMpghq9Gqx3t +OluwlN5E40EIosHsHdb9T7bWR9AUC8rmyrV7d35BH16Dx7aMOZawP5aBQW9gkOLo ++fsicdl9sz1Gv7SEr5AcD48Saq/v7h56rgJKihcrdv6sVIkkLE8/trKnToyokZf7 +KcZ7XC25y2a2t6hbElGFtQl+Ynhw/qlqYLYdDnkM/crqJIByw5c/8nerQyIKx+u2 +DISCLIBrQYoIwOula9+ZEsuK1V6ADJHgJgg2SMX6OBE1/yWDLfJ6v9r9jv6ly0Us +H8SIU653DtmadsWOLB2jutXsMq7Aqqz30XpN69QH4kj3Io6wpJ9qzo6ysmD0oyLQ +I+uUWnpp3Q+/QFesa1lQ2aOZ4W7+jQF5JyMV3pKdewlNWudLSDBaGOYKbeaP4NK7 +5t98biGCwWg5TbSYWGZizEqQXsP6JwSxeRV0mcy+rSDeJmAc61ZRpqPq5KM/p/9h +3PFaTWwyI0PurKju7koSCTxdccK+efrCh2gdC/1cacwG0Jp9VJkqyTkaGa9LKkPz +Y11aWOIv4x3kqdbQCtCev9eBCfHJxyYNrJgWVqA= +-----END CERTIFICATE----- + +# Issuer: CN=Buypass Class 3 Root CA O=Buypass AS-983163327 +# Subject: CN=Buypass Class 3 Root CA O=Buypass AS-983163327 +# Label: "Buypass Class 3 Root CA" +# Serial: 2 +# MD5 Fingerprint: 3d:3b:18:9e:2c:64:5a:e8:d5:88:ce:0e:f9:37:c2:ec +# SHA1 Fingerprint: da:fa:f7:fa:66:84:ec:06:8f:14:50:bd:c7:c2:81:a5:bc:a9:64:57 +# SHA256 Fingerprint: ed:f7:eb:bc:a2:7a:2a:38:4d:38:7b:7d:40:10:c6:66:e2:ed:b4:84:3e:4c:29:b4:ae:1d:5b:93:32:e6:b2:4d +-----BEGIN CERTIFICATE----- +MIIFWTCCA0GgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBOMQswCQYDVQQGEwJOTzEd +MBsGA1UECgwUQnV5cGFzcyBBUy05ODMxNjMzMjcxIDAeBgNVBAMMF0J1eXBhc3Mg +Q2xhc3MgMyBSb290IENBMB4XDTEwMTAyNjA4Mjg1OFoXDTQwMTAyNjA4Mjg1OFow +TjELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1eXBhc3MgQVMtOTgzMTYzMzI3MSAw +HgYDVQQDDBdCdXlwYXNzIENsYXNzIDMgUm9vdCBDQTCCAiIwDQYJKoZIhvcNAQEB +BQADggIPADCCAgoCggIBAKXaCpUWUOOV8l6ddjEGMnqb8RB2uACatVI2zSRHsJ8Y +ZLya9vrVediQYkwiL944PdbgqOkcLNt4EemOaFEVcsfzM4fkoF0LXOBXByow9c3E +N3coTRiR5r/VUv1xLXA+58bEiuPwKAv0dpihi4dVsjoT/Lc+JzeOIuOoTyrvYLs9 +tznDDgFHmV0ST9tD+leh7fmdvhFHJlsTmKtdFoqwNxxXnUX/iJY2v7vKB3tvh2PX +0DJq1l1sDPGzbjniazEuOQAnFN44wOwZZoYS6J1yFhNkUsepNxz9gjDthBgd9K5c +/3ATAOux9TN6S9ZV+AWNS2mw9bMoNlwUxFFzTWsL8TQH2xc519woe2v1n/MuwU8X +KhDzzMro6/1rqy6any2CbgTUUgGTLT2G/H783+9CHaZr77kgxve9oKeV/afmiSTY +zIw0bOIjL9kSGiG5VZFvC5F5GQytQIgLcOJ60g7YaEi7ghM5EFjp2CoHxhLbWNvS +O1UQRwUVZ2J+GGOmRj8JDlQyXr8NYnon74Do29lLBlo3WiXQCBJ31G8JUJc9yB3D +34xFMFbG02SrZvPAXpacw8Tvw3xrizp5f7NJzz3iiZ+gMEuFuZyUJHmPfWupRWgP +K9Dx2hzLabjKSWJtyNBjYt1gD1iqj6G8BaVmos8bdrKEZLFMOVLAMLrwjEsCsLa3 +AgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFEe4zf/lb+74suwv +Tg75JbCOPGvDMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAgEAACAj +QTUEkMJAYmDv4jVM1z+s4jSQuKFvdvoWFqRINyzpkMLyPPgKn9iB5btb2iUspKdV +cSQy9sgL8rxq+JOssgfCX5/bzMiKqr5qb+FJEMwx14C7u8jYog5kV+qi9cKpMRXS +IGrs/CIBKM+GuIAeqcwRpTzyFrNHnfzSgCHEy9BHcEGhyoMZCCxt8l13nIoUE9Q2 +HJLw5QY33KbmkJs4j1xrG0aGQ0JfPgEHU1RdZX33inOhmlRaHylDFCfChQ+1iHsa +O5S3HWCntZznKWlXWpuTekMwGwPXYshApqr8ZORK15FTAaggiG6cX0S5y2CBNOxv +033aSF/rtJC8LakcC6wc1aJoIIAE1vyxjy+7SjENSoYc6+I2KSb12tjE8nVhz36u +dmNKekBlk4f4HoCMhuWG1o8O/FMsYOgWYRqiPkN7zTlgVGr18okmAWiDSKIz6MkE +kbIRNBE+6tBDGR8Dk5AM/1E9V/RBbuHLoL7ryWPNbczk+DaqaJ3tvV2XcEQNtg41 +3OEMXbugUZTLfhbrES+jkkXITHHZvMmZUldGL1DPvTVp9D0VzgalLA8+9oG6lLvD +u79leNKGef9JOxqDDPDeeOzI8k1MGt6CKfjBWtrt7uYnXuhF0J0cUahoq0Tj0Itq +4/g7u9xN12TyUb7mqqta6THuBrxzvxNiCp/HuZc= +-----END CERTIFICATE----- + +# Issuer: CN=T-TeleSec GlobalRoot Class 3 O=T-Systems Enterprise Services GmbH OU=T-Systems Trust Center +# Subject: CN=T-TeleSec GlobalRoot Class 3 O=T-Systems Enterprise Services GmbH OU=T-Systems Trust Center +# Label: "T-TeleSec GlobalRoot Class 3" +# Serial: 1 +# MD5 Fingerprint: ca:fb:40:a8:4e:39:92:8a:1d:fe:8e:2f:c4:27:ea:ef +# SHA1 Fingerprint: 55:a6:72:3e:cb:f2:ec:cd:c3:23:74:70:19:9d:2a:be:11:e3:81:d1 +# SHA256 Fingerprint: fd:73:da:d3:1c:64:4f:f1:b4:3b:ef:0c:cd:da:96:71:0b:9c:d9:87:5e:ca:7e:31:70:7a:f3:e9:6d:52:2b:bd +-----BEGIN CERTIFICATE----- +MIIDwzCCAqugAwIBAgIBATANBgkqhkiG9w0BAQsFADCBgjELMAkGA1UEBhMCREUx +KzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAd +BgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNl +YyBHbG9iYWxSb290IENsYXNzIDMwHhcNMDgxMDAxMTAyOTU2WhcNMzMxMDAxMjM1 +OTU5WjCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnBy +aXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50 +ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDMwggEiMA0G +CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC9dZPwYiJvJK7genasfb3ZJNW4t/zN +8ELg63iIVl6bmlQdTQyK9tPPcPRStdiTBONGhnFBSivwKixVA9ZIw+A5OO3yXDw/ +RLyTPWGrTs0NvvAgJ1gORH8EGoel15YUNpDQSXuhdfsaa3Ox+M6pCSzyU9XDFES4 +hqX2iys52qMzVNn6chr3IhUciJFrf2blw2qAsCTz34ZFiP0Zf3WHHx+xGwpzJFu5 +ZeAsVMhg02YXP+HMVDNzkQI6pn97djmiH5a2OK61yJN0HZ65tOVgnS9W0eDrXltM +EnAMbEQgqxHY9Bn20pxSN+f6tsIxO0rUFJmtxxr1XV/6B7h8DR/Wgx6zAgMBAAGj +QjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBS1 +A/d2O2GCahKqGFPrAyGUv/7OyjANBgkqhkiG9w0BAQsFAAOCAQEAVj3vlNW92nOy +WL6ukK2YJ5f+AbGwUgC4TeQbIXQbfsDuXmkqJa9c1h3a0nnJ85cp4IaH3gRZD/FZ +1GSFS5mvJQQeyUapl96Cshtwn5z2r3Ex3XsFpSzTucpH9sry9uetuUg/vBa3wW30 +6gmv7PO15wWeph6KU1HWk4HMdJP2udqmJQV0eVp+QD6CSyYRMG7hP0HHRwA11fXT +91Q+gT3aSWqas+8QPebrb9HIIkfLzM8BMZLZGOMivgkeGj5asuRrDFR6fUNOuIml +e9eiPZaGzPImNC1qkp2aGtAw4l1OBLBfiyB+d8E9lYLRRpo7PHi4b6HQDWSieB4p +TpPDpFQUWw== +-----END CERTIFICATE----- + +# Issuer: CN=D-TRUST Root Class 3 CA 2 2009 O=D-Trust GmbH +# Subject: CN=D-TRUST Root Class 3 CA 2 2009 O=D-Trust GmbH +# Label: "D-TRUST Root Class 3 CA 2 2009" +# Serial: 623603 +# MD5 Fingerprint: cd:e0:25:69:8d:47:ac:9c:89:35:90:f7:fd:51:3d:2f +# SHA1 Fingerprint: 58:e8:ab:b0:36:15:33:fb:80:f7:9b:1b:6d:29:d3:ff:8d:5f:00:f0 +# SHA256 Fingerprint: 49:e7:a4:42:ac:f0:ea:62:87:05:00:54:b5:25:64:b6:50:e4:f4:9e:42:e3:48:d6:aa:38:e0:39:e9:57:b1:c1 +-----BEGIN CERTIFICATE----- +MIIEMzCCAxugAwIBAgIDCYPzMA0GCSqGSIb3DQEBCwUAME0xCzAJBgNVBAYTAkRF +MRUwEwYDVQQKDAxELVRydXN0IEdtYkgxJzAlBgNVBAMMHkQtVFJVU1QgUm9vdCBD +bGFzcyAzIENBIDIgMjAwOTAeFw0wOTExMDUwODM1NThaFw0yOTExMDUwODM1NTha +ME0xCzAJBgNVBAYTAkRFMRUwEwYDVQQKDAxELVRydXN0IEdtYkgxJzAlBgNVBAMM +HkQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgMjAwOTCCASIwDQYJKoZIhvcNAQEB +BQADggEPADCCAQoCggEBANOySs96R+91myP6Oi/WUEWJNTrGa9v+2wBoqOADER03 +UAifTUpolDWzU9GUY6cgVq/eUXjsKj3zSEhQPgrfRlWLJ23DEE0NkVJD2IfgXU42 +tSHKXzlABF9bfsyjxiupQB7ZNoTWSPOSHjRGICTBpFGOShrvUD9pXRl/RcPHAY9R +ySPocq60vFYJfxLLHLGvKZAKyVXMD9O0Gu1HNVpK7ZxzBCHQqr0ME7UAyiZsxGsM +lFqVlNpQmvH/pStmMaTJOKDfHR+4CS7zp+hnUquVH+BGPtikw8paxTGA6Eian5Rp +/hnd2HN8gcqW3o7tszIFZYQ05ub9VxC1X3a/L7AQDcUCAwEAAaOCARowggEWMA8G +A1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFP3aFMSfMN4hvR5COfyrYyNJ4PGEMA4G +A1UdDwEB/wQEAwIBBjCB0wYDVR0fBIHLMIHIMIGAoH6gfIZ6bGRhcDovL2RpcmVj +dG9yeS5kLXRydXN0Lm5ldC9DTj1ELVRSVVNUJTIwUm9vdCUyMENsYXNzJTIwMyUy +MENBJTIwMiUyMDIwMDksTz1ELVRydXN0JTIwR21iSCxDPURFP2NlcnRpZmljYXRl +cmV2b2NhdGlvbmxpc3QwQ6BBoD+GPWh0dHA6Ly93d3cuZC10cnVzdC5uZXQvY3Js +L2QtdHJ1c3Rfcm9vdF9jbGFzc18zX2NhXzJfMjAwOS5jcmwwDQYJKoZIhvcNAQEL +BQADggEBAH+X2zDI36ScfSF6gHDOFBJpiBSVYEQBrLLpME+bUMJm2H6NMLVwMeni +acfzcNsgFYbQDfC+rAF1hM5+n02/t2A7nPPKHeJeaNijnZflQGDSNiH+0LS4F9p0 +o3/U37CYAqxva2ssJSRyoWXuJVrl5jLn8t+rSfrzkGkj2wTZ51xY/GXUl77M/C4K +zCUqNQT4YJEVdT1B/yMfGchs64JTBKbkTCJNjYy6zltz7GRUUG3RnFX7acM2w4y8 +PIWmawomDeCTmGCufsYkl4phX5GOZpIJhzbNi5stPvZR1FDUWSi9g/LMKHtThm3Y +Johw1+qRzT65ysCQblrGXnRl11z+o+I= +-----END CERTIFICATE----- + +# Issuer: CN=D-TRUST Root Class 3 CA 2 EV 2009 O=D-Trust GmbH +# Subject: CN=D-TRUST Root Class 3 CA 2 EV 2009 O=D-Trust GmbH +# Label: "D-TRUST Root Class 3 CA 2 EV 2009" +# Serial: 623604 +# MD5 Fingerprint: aa:c6:43:2c:5e:2d:cd:c4:34:c0:50:4f:11:02:4f:b6 +# SHA1 Fingerprint: 96:c9:1b:0b:95:b4:10:98:42:fa:d0:d8:22:79:fe:60:fa:b9:16:83 +# SHA256 Fingerprint: ee:c5:49:6b:98:8c:e9:86:25:b9:34:09:2e:ec:29:08:be:d0:b0:f3:16:c2:d4:73:0c:84:ea:f1:f3:d3:48:81 +-----BEGIN CERTIFICATE----- +MIIEQzCCAyugAwIBAgIDCYP0MA0GCSqGSIb3DQEBCwUAMFAxCzAJBgNVBAYTAkRF +MRUwEwYDVQQKDAxELVRydXN0IEdtYkgxKjAoBgNVBAMMIUQtVFJVU1QgUm9vdCBD +bGFzcyAzIENBIDIgRVYgMjAwOTAeFw0wOTExMDUwODUwNDZaFw0yOTExMDUwODUw +NDZaMFAxCzAJBgNVBAYTAkRFMRUwEwYDVQQKDAxELVRydXN0IEdtYkgxKjAoBgNV +BAMMIUQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgRVYgMjAwOTCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAJnxhDRwui+3MKCOvXwEz75ivJn9gpfSegpn +ljgJ9hBOlSJzmY3aFS3nBfwZcyK3jpgAvDw9rKFs+9Z5JUut8Mxk2og+KbgPCdM0 +3TP1YtHhzRnp7hhPTFiu4h7WDFsVWtg6uMQYZB7jM7K1iXdODL/ZlGsTl28So/6Z +qQTMFexgaDbtCHu39b+T7WYxg4zGcTSHThfqr4uRjRxWQa4iN1438h3Z0S0NL2lR +p75mpoo6Kr3HGrHhFPC+Oh25z1uxav60sUYgovseO3Dvk5h9jHOW8sXvhXCtKSb8 +HgQ+HKDYD8tSg2J87otTlZCpV6LqYQXY+U3EJ/pure3511H3a6UCAwEAAaOCASQw +ggEgMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNOUikxiEyoZLsyvcop9Ntea +HNxnMA4GA1UdDwEB/wQEAwIBBjCB3QYDVR0fBIHVMIHSMIGHoIGEoIGBhn9sZGFw +Oi8vZGlyZWN0b3J5LmQtdHJ1c3QubmV0L0NOPUQtVFJVU1QlMjBSb290JTIwQ2xh +c3MlMjAzJTIwQ0ElMjAyJTIwRVYlMjAyMDA5LE89RC1UcnVzdCUyMEdtYkgsQz1E +RT9jZXJ0aWZpY2F0ZXJldm9jYXRpb25saXN0MEagRKBChkBodHRwOi8vd3d3LmQt +dHJ1c3QubmV0L2NybC9kLXRydXN0X3Jvb3RfY2xhc3NfM19jYV8yX2V2XzIwMDku +Y3JsMA0GCSqGSIb3DQEBCwUAA4IBAQA07XtaPKSUiO8aEXUHL7P+PPoeUSbrh/Yp +3uDx1MYkCenBz1UbtDDZzhr+BlGmFaQt77JLvyAoJUnRpjZ3NOhk31KxEcdzes05 +nsKtjHEh8lprr988TlWvsoRlFIm5d8sqMb7Po23Pb0iUMkZv53GMoKaEGTcH8gNF +CSuGdXzfX2lXANtu2KZyIktQ1HWYVt+3GP9DQ1CuekR78HlR10M9p9OB0/DJT7na +xpeG0ILD5EJt/rDiZE4OJudANCa1CInXCGNjOCd1HjPqbqjdn5lPdE2BiYBL3ZqX +KVwvvoFBuYz/6n1gBp7N1z3TLqMVvKjmJuVvw9y4AyHqnxbxLFS1 +-----END CERTIFICATE----- + +# Issuer: CN=CA Disig Root R2 O=Disig a.s. +# Subject: CN=CA Disig Root R2 O=Disig a.s. +# Label: "CA Disig Root R2" +# Serial: 10572350602393338211 +# MD5 Fingerprint: 26:01:fb:d8:27:a7:17:9a:45:54:38:1a:43:01:3b:03 +# SHA1 Fingerprint: b5:61:eb:ea:a4:de:e4:25:4b:69:1a:98:a5:57:47:c2:34:c7:d9:71 +# SHA256 Fingerprint: e2:3d:4a:03:6d:7b:70:e9:f5:95:b1:42:20:79:d2:b9:1e:df:bb:1f:b6:51:a0:63:3e:aa:8a:9d:c5:f8:07:03 +-----BEGIN CERTIFICATE----- +MIIFaTCCA1GgAwIBAgIJAJK4iNuwisFjMA0GCSqGSIb3DQEBCwUAMFIxCzAJBgNV +BAYTAlNLMRMwEQYDVQQHEwpCcmF0aXNsYXZhMRMwEQYDVQQKEwpEaXNpZyBhLnMu +MRkwFwYDVQQDExBDQSBEaXNpZyBSb290IFIyMB4XDTEyMDcxOTA5MTUzMFoXDTQy +MDcxOTA5MTUzMFowUjELMAkGA1UEBhMCU0sxEzARBgNVBAcTCkJyYXRpc2xhdmEx +EzARBgNVBAoTCkRpc2lnIGEucy4xGTAXBgNVBAMTEENBIERpc2lnIFJvb3QgUjIw +ggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCio8QACdaFXS1tFPbCw3Oe +NcJxVX6B+6tGUODBfEl45qt5WDza/3wcn9iXAng+a0EE6UG9vgMsRfYvZNSrXaNH +PWSb6WiaxswbP7q+sos0Ai6YVRn8jG+qX9pMzk0DIaPY0jSTVpbLTAwAFjxfGs3I +x2ymrdMxp7zo5eFm1tL7A7RBZckQrg4FY8aAamkw/dLukO8NJ9+flXP04SXabBbe +QTg06ov80egEFGEtQX6sx3dOy1FU+16SGBsEWmjGycT6txOgmLcRK7fWV8x8nhfR +yyX+hk4kLlYMeE2eARKmK6cBZW58Yh2EhN/qwGu1pSqVg8NTEQxzHQuyRpDRQjrO +QG6Vrf/GlK1ul4SOfW+eioANSW1z4nuSHsPzwfPrLgVv2RvPN3YEyLRa5Beny912 +H9AZdugsBbPWnDTYltxhh5EF5EQIM8HauQhl1K6yNg3ruji6DOWbnuuNZt2Zz9aJ +QfYEkoopKW1rOhzndX0CcQ7zwOe9yxndnWCywmZgtrEE7snmhrmaZkCo5xHtgUUD +i/ZnWejBBhG93c+AAk9lQHhcR1DIm+YfgXvkRKhbhZri3lrVx/k6RGZL5DJUfORs +nLMOPReisjQS1n6yqEm70XooQL6iFh/f5DcfEXP7kAplQ6INfPgGAVUzfbANuPT1 +rqVCV3w2EYx7XsQDnYx5nQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1Ud +DwEB/wQEAwIBBjAdBgNVHQ4EFgQUtZn4r7CU9eMg1gqtzk5WpC5uQu0wDQYJKoZI +hvcNAQELBQADggIBACYGXnDnZTPIgm7ZnBc6G3pmsgH2eDtpXi/q/075KMOYKmFM +tCQSin1tERT3nLXK5ryeJ45MGcipvXrA1zYObYVybqjGom32+nNjf7xueQgcnYqf +GopTpti72TVVsRHFqQOzVju5hJMiXn7B9hJSi+osZ7z+Nkz1uM/Rs0mSO9MpDpkb +lvdhuDvEK7Z4bLQjb/D907JedR+Zlais9trhxTF7+9FGs9K8Z7RiVLoJ92Owk6Ka ++elSLotgEqv89WBW7xBci8QaQtyDW2QOy7W81k/BfDxujRNt+3vrMNDcTa/F1bal +TFtxyegxvug4BkihGuLq0t4SOVga/4AOgnXmt8kHbA7v/zjxmHHEt38OFdAlab0i +nSvtBfZGR6ztwPDUO+Ls7pZbkBNOHlY667DvlruWIxG68kOGdGSVyCh13x01utI3 +gzhTODY7z2zp+WsO0PsE6E9312UBeIYMej4hYvF/Y3EMyZ9E26gnonW+boE+18Dr +G5gPcFw0sorMwIUY6256s/daoQe/qUKS82Ail+QUoQebTnbAjn39pCXHR+3/H3Os +zMOl6W8KjptlwlCFtaOgUxLMVYdh84GuEEZhvUQhuMI9dM9+JDX6HAcOmz0iyu8x +L4ysEr3vQCj8KWefshNPZiTEUxnpHikV7+ZtsH8tZ/3zbBt1RqPlShfppNcL +-----END CERTIFICATE----- + +# Issuer: CN=ACCVRAIZ1 O=ACCV OU=PKIACCV +# Subject: CN=ACCVRAIZ1 O=ACCV OU=PKIACCV +# Label: "ACCVRAIZ1" +# Serial: 6828503384748696800 +# MD5 Fingerprint: d0:a0:5a:ee:05:b6:09:94:21:a1:7d:f1:b2:29:82:02 +# SHA1 Fingerprint: 93:05:7a:88:15:c6:4f:ce:88:2f:fa:91:16:52:28:78:bc:53:64:17 +# SHA256 Fingerprint: 9a:6e:c0:12:e1:a7:da:9d:be:34:19:4d:47:8a:d7:c0:db:18:22:fb:07:1d:f1:29:81:49:6e:d1:04:38:41:13 +-----BEGIN CERTIFICATE----- +MIIH0zCCBbugAwIBAgIIXsO3pkN/pOAwDQYJKoZIhvcNAQEFBQAwQjESMBAGA1UE +AwwJQUNDVlJBSVoxMRAwDgYDVQQLDAdQS0lBQ0NWMQ0wCwYDVQQKDARBQ0NWMQsw +CQYDVQQGEwJFUzAeFw0xMTA1MDUwOTM3MzdaFw0zMDEyMzEwOTM3MzdaMEIxEjAQ +BgNVBAMMCUFDQ1ZSQUlaMTEQMA4GA1UECwwHUEtJQUNDVjENMAsGA1UECgwEQUND +VjELMAkGA1UEBhMCRVMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCb +qau/YUqXry+XZpp0X9DZlv3P4uRm7x8fRzPCRKPfmt4ftVTdFXxpNRFvu8gMjmoY +HtiP2Ra8EEg2XPBjs5BaXCQ316PWywlxufEBcoSwfdtNgM3802/J+Nq2DoLSRYWo +G2ioPej0RGy9ocLLA76MPhMAhN9KSMDjIgro6TenGEyxCQ0jVn8ETdkXhBilyNpA +lHPrzg5XPAOBOp0KoVdDaaxXbXmQeOW1tDvYvEyNKKGno6e6Ak4l0Squ7a4DIrhr +IA8wKFSVf+DuzgpmndFALW4ir50awQUZ0m/A8p/4e7MCQvtQqR0tkw8jq8bBD5L/ +0KIV9VMJcRz/RROE5iZe+OCIHAr8Fraocwa48GOEAqDGWuzndN9wrqODJerWx5eH +k6fGioozl2A3ED6XPm4pFdahD9GILBKfb6qkxkLrQaLjlUPTAYVtjrs78yM2x/47 +4KElB0iryYl0/wiPgL/AlmXz7uxLaL2diMMxs0Dx6M/2OLuc5NF/1OVYm3z61PMO +m3WR5LpSLhl+0fXNWhn8ugb2+1KoS5kE3fj5tItQo05iifCHJPqDQsGH+tUtKSpa +cXpkatcnYGMN285J9Y0fkIkyF/hzQ7jSWpOGYdbhdQrqeWZ2iE9x6wQl1gpaepPl +uUsXQA+xtrn13k/c4LOsOxFwYIRKQ26ZIMApcQrAZQIDAQABo4ICyzCCAscwfQYI +KwYBBQUHAQEEcTBvMEwGCCsGAQUFBzAChkBodHRwOi8vd3d3LmFjY3YuZXMvZmls +ZWFkbWluL0FyY2hpdm9zL2NlcnRpZmljYWRvcy9yYWl6YWNjdjEuY3J0MB8GCCsG +AQUFBzABhhNodHRwOi8vb2NzcC5hY2N2LmVzMB0GA1UdDgQWBBTSh7Tj3zcnk1X2 +VuqB5TbMjB4/vTAPBgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFNKHtOPfNyeT +VfZW6oHlNsyMHj+9MIIBcwYDVR0gBIIBajCCAWYwggFiBgRVHSAAMIIBWDCCASIG +CCsGAQUFBwICMIIBFB6CARAAQQB1AHQAbwByAGkAZABhAGQAIABkAGUAIABDAGUA +cgB0AGkAZgBpAGMAYQBjAGkA8wBuACAAUgBhAO0AegAgAGQAZQAgAGwAYQAgAEEA +QwBDAFYAIAAoAEEAZwBlAG4AYwBpAGEAIABkAGUAIABUAGUAYwBuAG8AbABvAGcA +7QBhACAAeQAgAEMAZQByAHQAaQBmAGkAYwBhAGMAaQDzAG4AIABFAGwAZQBjAHQA +cgDzAG4AaQBjAGEALAAgAEMASQBGACAAUQA0ADYAMAAxADEANQA2AEUAKQAuACAA +QwBQAFMAIABlAG4AIABoAHQAdABwADoALwAvAHcAdwB3AC4AYQBjAGMAdgAuAGUA +czAwBggrBgEFBQcCARYkaHR0cDovL3d3dy5hY2N2LmVzL2xlZ2lzbGFjaW9uX2Mu +aHRtMFUGA1UdHwROMEwwSqBIoEaGRGh0dHA6Ly93d3cuYWNjdi5lcy9maWxlYWRt +aW4vQXJjaGl2b3MvY2VydGlmaWNhZG9zL3JhaXphY2N2MV9kZXIuY3JsMA4GA1Ud +DwEB/wQEAwIBBjAXBgNVHREEEDAOgQxhY2N2QGFjY3YuZXMwDQYJKoZIhvcNAQEF +BQADggIBAJcxAp/n/UNnSEQU5CmH7UwoZtCPNdpNYbdKl02125DgBS4OxnnQ8pdp +D70ER9m+27Up2pvZrqmZ1dM8MJP1jaGo/AaNRPTKFpV8M9xii6g3+CfYCS0b78gU +JyCpZET/LtZ1qmxNYEAZSUNUY9rizLpm5U9EelvZaoErQNV/+QEnWCzI7UiRfD+m +AM/EKXMRNt6GGT6d7hmKG9Ww7Y49nCrADdg9ZuM8Db3VlFzi4qc1GwQA9j9ajepD +vV+JHanBsMyZ4k0ACtrJJ1vnE5Bc5PUzolVt3OAJTS+xJlsndQAJxGJ3KQhfnlms +tn6tn1QwIgPBHnFk/vk4CpYY3QIUrCPLBhwepH2NDd4nQeit2hW3sCPdK6jT2iWH +7ehVRE2I9DZ+hJp4rPcOVkkO1jMl1oRQQmwgEh0q1b688nCBpHBgvgW1m54ERL5h +I6zppSSMEYCUWqKiuUnSwdzRp+0xESyeGabu4VXhwOrPDYTkF7eifKXeVSUG7szA +h1xA2syVP1XgNce4hL60Xc16gwFy7ofmXx2utYXGJt/mwZrpHgJHnyqobalbz+xF +d3+YJ5oyXSrjhO7FmGYvliAd3djDJ9ew+f7Zfc3Qn48LFFhRny+Lwzgt3uiP1o2H +pPVWQxaZLPSkVrQ0uGE3ycJYgBugl6H8WY3pEfbRD0tVNEYqi4Y7 +-----END CERTIFICATE----- + +# Issuer: CN=TWCA Global Root CA O=TAIWAN-CA OU=Root CA +# Subject: CN=TWCA Global Root CA O=TAIWAN-CA OU=Root CA +# Label: "TWCA Global Root CA" +# Serial: 3262 +# MD5 Fingerprint: f9:03:7e:cf:e6:9e:3c:73:7a:2a:90:07:69:ff:2b:96 +# SHA1 Fingerprint: 9c:bb:48:53:f6:a4:f6:d3:52:a4:e8:32:52:55:60:13:f5:ad:af:65 +# SHA256 Fingerprint: 59:76:90:07:f7:68:5d:0f:cd:50:87:2f:9f:95:d5:75:5a:5b:2b:45:7d:81:f3:69:2b:61:0a:98:67:2f:0e:1b +-----BEGIN CERTIFICATE----- +MIIFQTCCAymgAwIBAgICDL4wDQYJKoZIhvcNAQELBQAwUTELMAkGA1UEBhMCVFcx +EjAQBgNVBAoTCVRBSVdBTi1DQTEQMA4GA1UECxMHUm9vdCBDQTEcMBoGA1UEAxMT +VFdDQSBHbG9iYWwgUm9vdCBDQTAeFw0xMjA2MjcwNjI4MzNaFw0zMDEyMzExNTU5 +NTlaMFExCzAJBgNVBAYTAlRXMRIwEAYDVQQKEwlUQUlXQU4tQ0ExEDAOBgNVBAsT +B1Jvb3QgQ0ExHDAaBgNVBAMTE1RXQ0EgR2xvYmFsIFJvb3QgQ0EwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQCwBdvI64zEbooh745NnHEKH1Jw7W2CnJfF +10xORUnLQEK1EjRsGcJ0pDFfhQKX7EMzClPSnIyOt7h52yvVavKOZsTuKwEHktSz +0ALfUPZVr2YOy+BHYC8rMjk1Ujoog/h7FsYYuGLWRyWRzvAZEk2tY/XTP3VfKfCh +MBwqoJimFb3u/Rk28OKRQ4/6ytYQJ0lM793B8YVwm8rqqFpD/G2Gb3PpN0Wp8DbH +zIh1HrtsBv+baz4X7GGqcXzGHaL3SekVtTzWoWH1EfcFbx39Eb7QMAfCKbAJTibc +46KokWofwpFFiFzlmLhxpRUZyXx1EcxwdE8tmx2RRP1WKKD+u4ZqyPpcC1jcxkt2 +yKsi2XMPpfRaAok/T54igu6idFMqPVMnaR1sjjIsZAAmY2E2TqNGtz99sy2sbZCi +laLOz9qC5wc0GZbpuCGqKX6mOL6OKUohZnkfs8O1CWfe1tQHRvMq2uYiN2DLgbYP +oA/pyJV/v1WRBXrPPRXAb94JlAGD1zQbzECl8LibZ9WYkTunhHiVJqRaCPgrdLQA +BDzfuBSO6N+pjWxnkjMdwLfS7JLIvgm/LCkFbwJrnu+8vyq8W8BQj0FwcYeyTbcE +qYSjMq+u7msXi7Kx/mzhkIyIqJdIzshNy/MGz19qCkKxHh53L46g5pIOBvwFItIm +4TFRfTLcDwIDAQABoyMwITAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB +/zANBgkqhkiG9w0BAQsFAAOCAgEAXzSBdu+WHdXltdkCY4QWwa6gcFGn90xHNcgL +1yg9iXHZqjNB6hQbbCEAwGxCGX6faVsgQt+i0trEfJdLjbDorMjupWkEmQqSpqsn +LhpNgb+E1HAerUf+/UqdM+DyucRFCCEK2mlpc3INvjT+lIutwx4116KD7+U4x6WF +H6vPNOw/KP4M8VeGTslV9xzU2KV9Bnpv1d8Q34FOIWWxtuEXeZVFBs5fzNxGiWNo +RI2T9GRwoD2dKAXDOXC4Ynsg/eTb6QihuJ49CcdP+yz4k3ZB3lLg4VfSnQO8d57+ +nile98FRYB/e2guyLXW3Q0iT5/Z5xoRdgFlglPx4mI88k1HtQJAH32RjJMtOcQWh +15QaiDLxInQirqWm2BJpTGCjAu4r7NRjkgtevi92a6O2JryPA9gK8kxkRr05YuWW +6zRjESjMlfGt7+/cgFhI6Uu46mWs6fyAtbXIRfmswZ/ZuepiiI7E8UuDEq3mi4TW +nsLrgxifarsbJGAzcMzs9zLzXNl5fe+epP7JI8Mk7hWSsT2RTyaGvWZzJBPqpK5j +wa19hAM8EHiGG3njxPPyBJUgriOCxLM6AGK/5jYk4Ve6xx6QddVfP5VhK8E7zeWz +aGHQRiapIVJpLesux+t3zqY6tQMzT3bR51xUAV3LePTJDL/PEo4XLSNolOer/qmy +KwbQBM0= +-----END CERTIFICATE----- + +# Issuer: CN=TeliaSonera Root CA v1 O=TeliaSonera +# Subject: CN=TeliaSonera Root CA v1 O=TeliaSonera +# Label: "TeliaSonera Root CA v1" +# Serial: 199041966741090107964904287217786801558 +# MD5 Fingerprint: 37:41:49:1b:18:56:9a:26:f5:ad:c2:66:fb:40:a5:4c +# SHA1 Fingerprint: 43:13:bb:96:f1:d5:86:9b:c1:4e:6a:92:f6:cf:f6:34:69:87:82:37 +# SHA256 Fingerprint: dd:69:36:fe:21:f8:f0:77:c1:23:a1:a5:21:c1:22:24:f7:22:55:b7:3e:03:a7:26:06:93:e8:a2:4b:0f:a3:89 +-----BEGIN CERTIFICATE----- +MIIFODCCAyCgAwIBAgIRAJW+FqD3LkbxezmCcvqLzZYwDQYJKoZIhvcNAQEFBQAw +NzEUMBIGA1UECgwLVGVsaWFTb25lcmExHzAdBgNVBAMMFlRlbGlhU29uZXJhIFJv +b3QgQ0EgdjEwHhcNMDcxMDE4MTIwMDUwWhcNMzIxMDE4MTIwMDUwWjA3MRQwEgYD +VQQKDAtUZWxpYVNvbmVyYTEfMB0GA1UEAwwWVGVsaWFTb25lcmEgUm9vdCBDQSB2 +MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMK+6yfwIaPzaSZVfp3F +VRaRXP3vIb9TgHot0pGMYzHw7CTww6XScnwQbfQ3t+XmfHnqjLWCi65ItqwA3GV1 +7CpNX8GH9SBlK4GoRz6JI5UwFpB/6FcHSOcZrr9FZ7E3GwYq/t75rH2D+1665I+X +Z75Ljo1kB1c4VWk0Nj0TSO9P4tNmHqTPGrdeNjPUtAa9GAH9d4RQAEX1jF3oI7x+ +/jXh7VB7qTCNGdMJjmhnXb88lxhTuylixcpecsHHltTbLaC0H2kD7OriUPEMPPCs +81Mt8Bz17Ww5OXOAFshSsCPN4D7c3TxHoLs1iuKYaIu+5b9y7tL6pe0S7fyYGKkm +dtwoSxAgHNN/Fnct7W+A90m7UwW7XWjH1Mh1Fj+JWov3F0fUTPHSiXk+TT2YqGHe +Oh7S+F4D4MHJHIzTjU3TlTazN19jY5szFPAtJmtTfImMMsJu7D0hADnJoWjiUIMu +sDor8zagrC/kb2HCUQk5PotTubtn2txTuXZZNp1D5SDgPTJghSJRt8czu90VL6R4 +pgd7gUY2BIbdeTXHlSw7sKMXNeVzH7RcWe/a6hBle3rQf5+ztCo3O3CLm1u5K7fs +slESl1MpWtTwEhDcTwK7EpIvYtQ/aUN8Ddb8WHUBiJ1YFkveupD/RwGJBmr2X7KQ +arMCpgKIv7NHfirZ1fpoeDVNAgMBAAGjPzA9MA8GA1UdEwEB/wQFMAMBAf8wCwYD +VR0PBAQDAgEGMB0GA1UdDgQWBBTwj1k4ALP1j5qWDNXr+nuqF+gTEjANBgkqhkiG +9w0BAQUFAAOCAgEAvuRcYk4k9AwI//DTDGjkk0kiP0Qnb7tt3oNmzqjMDfz1mgbl +dxSR651Be5kqhOX//CHBXfDkH1e3damhXwIm/9fH907eT/j3HEbAek9ALCI18Bmx +0GtnLLCo4MBANzX2hFxc469CeP6nyQ1Q6g2EdvZR74NTxnr/DlZJLo961gzmJ1Tj +TQpgcmLNkQfWpb/ImWvtxBnmq0wROMVvMeJuScg/doAmAyYp4Db29iBT4xdwNBed +Y2gea+zDTYa4EzAvXUYNR0PVG6pZDrlcjQZIrXSHX8f8MVRBE+LHIQ6e4B4N4cB7 +Q4WQxYpYxmUKeFfyxiMPAdkgS94P+5KFdSpcc41teyWRyu5FrgZLAMzTsVlQ2jqI +OylDRl6XK1TOU2+NSueW+r9xDkKLfP0ooNBIytrEgUy7onOTJsjrDNYmiLbAJM+7 +vVvrdX3pCI6GMyx5dwlppYn8s3CQh3aP0yK7Qs69cwsgJirQmz1wHiRszYd2qReW +t88NkvuOGKmYSdGe/mBEciG5Ge3C9THxOUiIkCR1VBatzvT4aRRkOfujuLpwQMcn +HL/EVlP6Y2XQ8xwOFvVrhlhNGNTkDY6lnVuR3HYkUD/GKvvZt5y11ubQ2egZixVx +SK236thZiNSQvxaz2emsWWFUyBy6ysHK4bkgTI86k4mloMy/0/Z1pHWWbVY= +-----END CERTIFICATE----- + +# Issuer: CN=T-TeleSec GlobalRoot Class 2 O=T-Systems Enterprise Services GmbH OU=T-Systems Trust Center +# Subject: CN=T-TeleSec GlobalRoot Class 2 O=T-Systems Enterprise Services GmbH OU=T-Systems Trust Center +# Label: "T-TeleSec GlobalRoot Class 2" +# Serial: 1 +# MD5 Fingerprint: 2b:9b:9e:e4:7b:6c:1f:00:72:1a:cc:c1:77:79:df:6a +# SHA1 Fingerprint: 59:0d:2d:7d:88:4f:40:2e:61:7e:a5:62:32:17:65:cf:17:d8:94:e9 +# SHA256 Fingerprint: 91:e2:f5:78:8d:58:10:eb:a7:ba:58:73:7d:e1:54:8a:8e:ca:cd:01:45:98:bc:0b:14:3e:04:1b:17:05:25:52 +-----BEGIN CERTIFICATE----- +MIIDwzCCAqugAwIBAgIBATANBgkqhkiG9w0BAQsFADCBgjELMAkGA1UEBhMCREUx +KzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAd +BgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNl +YyBHbG9iYWxSb290IENsYXNzIDIwHhcNMDgxMDAxMTA0MDE0WhcNMzMxMDAxMjM1 +OTU5WjCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnBy +aXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50 +ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDIwggEiMA0G +CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCqX9obX+hzkeXaXPSi5kfl82hVYAUd +AqSzm1nzHoqvNK38DcLZSBnuaY/JIPwhqgcZ7bBcrGXHX+0CfHt8LRvWurmAwhiC +FoT6ZrAIxlQjgeTNuUk/9k9uN0goOA/FvudocP05l03Sx5iRUKrERLMjfTlH6VJi +1hKTXrcxlkIF+3anHqP1wvzpesVsqXFP6st4vGCvx9702cu+fjOlbpSD8DT6Iavq +jnKgP6TeMFvvhk1qlVtDRKgQFRzlAVfFmPHmBiiRqiDFt1MmUUOyCxGVWOHAD3bZ +wI18gfNycJ5v/hqO2V81xrJvNHy+SE/iWjnX2J14np+GPgNeGYtEotXHAgMBAAGj +QjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBS/ +WSA2AHmgoCJrjNXyYdK4LMuCSjANBgkqhkiG9w0BAQsFAAOCAQEAMQOiYQsfdOhy +NsZt+U2e+iKo4YFWz827n+qrkRk4r6p8FU3ztqONpfSO9kSpp+ghla0+AGIWiPAC +uvxhI+YzmzB6azZie60EI4RYZeLbK4rnJVM3YlNfvNoBYimipidx5joifsFvHZVw +IEoHNN/q/xWA5brXethbdXwFeilHfkCoMRN3zUA7tFFHei4R40cR3p1m0IvVVGb6 +g1XqfMIpiRvpb7PO4gWEyS8+eIVibslfwXhjdFjASBgMmTnrpMwatXlajRWc2BQN +9noHV8cigwUtPJslJj0Ys6lDfMjIq2SPDqO/nBudMNva0Bkuqjzx+zOAduTNrRlP +BSeOE6Fuwg== +-----END CERTIFICATE----- + +# Issuer: CN=Atos TrustedRoot 2011 O=Atos +# Subject: CN=Atos TrustedRoot 2011 O=Atos +# Label: "Atos TrustedRoot 2011" +# Serial: 6643877497813316402 +# MD5 Fingerprint: ae:b9:c4:32:4b:ac:7f:5d:66:cc:77:94:bb:2a:77:56 +# SHA1 Fingerprint: 2b:b1:f5:3e:55:0c:1d:c5:f1:d4:e6:b7:6a:46:4b:55:06:02:ac:21 +# SHA256 Fingerprint: f3:56:be:a2:44:b7:a9:1e:b3:5d:53:ca:9a:d7:86:4a:ce:01:8e:2d:35:d5:f8:f9:6d:df:68:a6:f4:1a:a4:74 +-----BEGIN CERTIFICATE----- +MIIDdzCCAl+gAwIBAgIIXDPLYixfszIwDQYJKoZIhvcNAQELBQAwPDEeMBwGA1UE +AwwVQXRvcyBUcnVzdGVkUm9vdCAyMDExMQ0wCwYDVQQKDARBdG9zMQswCQYDVQQG +EwJERTAeFw0xMTA3MDcxNDU4MzBaFw0zMDEyMzEyMzU5NTlaMDwxHjAcBgNVBAMM +FUF0b3MgVHJ1c3RlZFJvb3QgMjAxMTENMAsGA1UECgwEQXRvczELMAkGA1UEBhMC +REUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCVhTuXbyo7LjvPpvMp +Nb7PGKw+qtn4TaA+Gke5vJrf8v7MPkfoepbCJI419KkM/IL9bcFyYie96mvr54rM +VD6QUM+A1JX76LWC1BTFtqlVJVfbsVD2sGBkWXppzwO3bw2+yj5vdHLqqjAqc2K+ +SZFhyBH+DgMq92og3AIVDV4VavzjgsG1xZ1kCWyjWZgHJ8cblithdHFsQ/H3NYkQ +4J7sVaE3IqKHBAUsR320HLliKWYoyrfhk/WklAOZuXCFteZI6o1Q/NnezG8HDt0L +cp2AMBYHlT8oDv3FdU9T1nSatCQujgKRz3bFmx5VdJx4IbHwLfELn8LVlhgf8FQi +eowHAgMBAAGjfTB7MB0GA1UdDgQWBBSnpQaxLKYJYO7Rl+lwrrw7GWzbITAPBgNV +HRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFKelBrEspglg7tGX6XCuvDsZbNshMBgG +A1UdIAQRMA8wDQYLKwYBBAGwLQMEAQEwDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3 +DQEBCwUAA4IBAQAmdzTblEiGKkGdLD4GkGDEjKwLVLgfuXvTBznk+j57sj1O7Z8j +vZfza1zv7v1Apt+hk6EKhqzvINB5Ab149xnYJDE0BAGmuhWawyfc2E8PzBhj/5kP +DpFrdRbhIfzYJsdHt6bPWHJxfrrhTZVHO8mvbaG0weyJ9rQPOLXiZNwlz6bb65pc +maHFCN795trV1lpFDMS3wrUU77QR/w4VtfX128a961qn8FYiqTxlVMYVqL2Gns2D +lmh6cYGJ4Qvh6hEbaAjMaZ7snkGeRDImeuKHCnE96+RapNLbxc3G3mB/ufNPRJLv +KrcYPqcZ2Qt9sTdBQrC6YB3y/gkRsPCHe6ed +-----END CERTIFICATE----- + +# Issuer: CN=QuoVadis Root CA 1 G3 O=QuoVadis Limited +# Subject: CN=QuoVadis Root CA 1 G3 O=QuoVadis Limited +# Label: "QuoVadis Root CA 1 G3" +# Serial: 687049649626669250736271037606554624078720034195 +# MD5 Fingerprint: a4:bc:5b:3f:fe:37:9a:fa:64:f0:e2:fa:05:3d:0b:ab +# SHA1 Fingerprint: 1b:8e:ea:57:96:29:1a:c9:39:ea:b8:0a:81:1a:73:73:c0:93:79:67 +# SHA256 Fingerprint: 8a:86:6f:d1:b2:76:b5:7e:57:8e:92:1c:65:82:8a:2b:ed:58:e9:f2:f2:88:05:41:34:b7:f1:f4:bf:c9:cc:74 +-----BEGIN CERTIFICATE----- +MIIFYDCCA0igAwIBAgIUeFhfLq0sGUvjNwc1NBMotZbUZZMwDQYJKoZIhvcNAQEL +BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc +BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMSBHMzAeFw0xMjAxMTIxNzI3NDRaFw00 +MjAxMTIxNzI3NDRaMEgxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM +aW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDEgRzMwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQCgvlAQjunybEC0BJyFuTHK3C3kEakEPBtV +wedYMB0ktMPvhd6MLOHBPd+C5k+tR4ds7FtJwUrVu4/sh6x/gpqG7D0DmVIB0jWe +rNrwU8lmPNSsAgHaJNM7qAJGr6Qc4/hzWHa39g6QDbXwz8z6+cZM5cOGMAqNF341 +68Xfuw6cwI2H44g4hWf6Pser4BOcBRiYz5P1sZK0/CPTz9XEJ0ngnjybCKOLXSoh +4Pw5qlPafX7PGglTvF0FBM+hSo+LdoINofjSxxR3W5A2B4GbPgb6Ul5jxaYA/qXp +UhtStZI5cgMJYr2wYBZupt0lwgNm3fME0UDiTouG9G/lg6AnhF4EwfWQvTA9xO+o +abw4m6SkltFi2mnAAZauy8RRNOoMqv8hjlmPSlzkYZqn0ukqeI1RPToV7qJZjqlc +3sX5kCLliEVx3ZGZbHqfPT2YfF72vhZooF6uCyP8Wg+qInYtyaEQHeTTRCOQiJ/G +KubX9ZqzWB4vMIkIG1SitZgj7Ah3HJVdYdHLiZxfokqRmu8hqkkWCKi9YSgxyXSt +hfbZxbGL0eUQMk1fiyA6PEkfM4VZDdvLCXVDaXP7a3F98N/ETH3Goy7IlXnLc6KO +Tk0k+17kBL5yG6YnLUlamXrXXAkgt3+UuU/xDRxeiEIbEbfnkduebPRq34wGmAOt +zCjvpUfzUwIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB +BjAdBgNVHQ4EFgQUo5fW816iEOGrRZ88F2Q87gFwnMwwDQYJKoZIhvcNAQELBQAD +ggIBABj6W3X8PnrHX3fHyt/PX8MSxEBd1DKquGrX1RUVRpgjpeaQWxiZTOOtQqOC +MTaIzen7xASWSIsBx40Bz1szBpZGZnQdT+3Btrm0DWHMY37XLneMlhwqI2hrhVd2 +cDMT/uFPpiN3GPoajOi9ZcnPP/TJF9zrx7zABC4tRi9pZsMbj/7sPtPKlL92CiUN +qXsCHKnQO18LwIE6PWThv6ctTr1NxNgpxiIY0MWscgKCP6o6ojoilzHdCGPDdRS5 +YCgtW2jgFqlmgiNR9etT2DGbe+m3nUvriBbP+V04ikkwj+3x6xn0dxoxGE1nVGwv +b2X52z3sIexe9PSLymBlVNFxZPT5pqOBMzYzcfCkeF9OrYMh3jRJjehZrJ3ydlo2 +8hP0r+AJx2EqbPfgna67hkooby7utHnNkDPDs3b69fBsnQGQ+p6Q9pxyz0fawx/k +NSBT8lTR32GDpgLiJTjehTItXnOQUl1CxM49S+H5GYQd1aJQzEH7QRTDvdbJWqNj +ZgKAvQU6O0ec7AAmTPWIUb+oI38YB7AL7YsmoWTTYUrrXJ/es69nA7Mf3W1daWhp +q1467HxpvMc7hU6eFbm0FU/DlXpY18ls6Wy58yljXrQs8C097Vpl4KlbQMJImYFt +nh8GKjwStIsPm6Ik8KaN1nrgS7ZklmOVhMJKzRwuJIczYOXD +-----END CERTIFICATE----- + +# Issuer: CN=QuoVadis Root CA 2 G3 O=QuoVadis Limited +# Subject: CN=QuoVadis Root CA 2 G3 O=QuoVadis Limited +# Label: "QuoVadis Root CA 2 G3" +# Serial: 390156079458959257446133169266079962026824725800 +# MD5 Fingerprint: af:0c:86:6e:bf:40:2d:7f:0b:3e:12:50:ba:12:3d:06 +# SHA1 Fingerprint: 09:3c:61:f3:8b:8b:dc:7d:55:df:75:38:02:05:00:e1:25:f5:c8:36 +# SHA256 Fingerprint: 8f:e4:fb:0a:f9:3a:4d:0d:67:db:0b:eb:b2:3e:37:c7:1b:f3:25:dc:bc:dd:24:0e:a0:4d:af:58:b4:7e:18:40 +-----BEGIN CERTIFICATE----- +MIIFYDCCA0igAwIBAgIURFc0JFuBiZs18s64KztbpybwdSgwDQYJKoZIhvcNAQEL +BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc +BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMiBHMzAeFw0xMjAxMTIxODU5MzJaFw00 +MjAxMTIxODU5MzJaMEgxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM +aW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDIgRzMwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQChriWyARjcV4g/Ruv5r+LrI3HimtFhZiFf +qq8nUeVuGxbULX1QsFN3vXg6YOJkApt8hpvWGo6t/x8Vf9WVHhLL5hSEBMHfNrMW +n4rjyduYNM7YMxcoRvynyfDStNVNCXJJ+fKH46nafaF9a7I6JaltUkSs+L5u+9ym +c5GQYaYDFCDy54ejiK2toIz/pgslUiXnFgHVy7g1gQyjO/Dh4fxaXc6AcW34Sas+ +O7q414AB+6XrW7PFXmAqMaCvN+ggOp+oMiwMzAkd056OXbxMmO7FGmh77FOm6RQ1 +o9/NgJ8MSPsc9PG/Srj61YxxSscfrf5BmrODXfKEVu+lV0POKa2Mq1W/xPtbAd0j +IaFYAI7D0GoT7RPjEiuA3GfmlbLNHiJuKvhB1PLKFAeNilUSxmn1uIZoL1NesNKq +IcGY5jDjZ1XHm26sGahVpkUG0CM62+tlXSoREfA7T8pt9DTEceT/AFr2XK4jYIVz +8eQQsSWu1ZK7E8EM4DnatDlXtas1qnIhO4M15zHfeiFuuDIIfR0ykRVKYnLP43eh +vNURG3YBZwjgQQvD6xVu+KQZ2aKrr+InUlYrAoosFCT5v0ICvybIxo/gbjh9Uy3l +7ZizlWNof/k19N+IxWA1ksB8aRxhlRbQ694Lrz4EEEVlWFA4r0jyWbYW8jwNkALG +cC4BrTwV1wIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB +BjAdBgNVHQ4EFgQU7edvdlq/YOxJW8ald7tyFnGbxD0wDQYJKoZIhvcNAQELBQAD +ggIBAJHfgD9DCX5xwvfrs4iP4VGyvD11+ShdyLyZm3tdquXK4Qr36LLTn91nMX66 +AarHakE7kNQIXLJgapDwyM4DYvmL7ftuKtwGTTwpD4kWilhMSA/ohGHqPHKmd+RC +roijQ1h5fq7KpVMNqT1wvSAZYaRsOPxDMuHBR//47PERIjKWnML2W2mWeyAMQ0Ga +W/ZZGYjeVYg3UQt4XAoeo0L9x52ID8DyeAIkVJOviYeIyUqAHerQbj5hLja7NQ4n +lv1mNDthcnPxFlxHBlRJAHpYErAK74X9sbgzdWqTHBLmYF5vHX/JHyPLhGGfHoJE ++V+tYlUkmlKY7VHnoX6XOuYvHxHaU4AshZ6rNRDbIl9qxV6XU/IyAgkwo1jwDQHV +csaxfGl7w/U2Rcxhbl5MlMVerugOXou/983g7aEOGzPuVBj+D77vfoRrQ+NwmNtd +dbINWQeFFSM51vHfqSYP1kjHs6Yi9TM3WpVHn3u6GBVv/9YUZINJ0gpnIdsPNWNg +KCLjsZWDzYWm3S8P52dSbrsvhXz1SnPnxT7AvSESBT/8twNJAlvIJebiVDj1eYeM +HVOyToV7BjjHLPj4sHKNJeV3UvQDHEimUF+IIDBu8oJDqz2XhOdT+yHBTw8imoa4 +WSr2Rz0ZiC3oheGe7IUIarFsNMkd7EgrO3jtZsSOeWmD3n+M +-----END CERTIFICATE----- + +# Issuer: CN=QuoVadis Root CA 3 G3 O=QuoVadis Limited +# Subject: CN=QuoVadis Root CA 3 G3 O=QuoVadis Limited +# Label: "QuoVadis Root CA 3 G3" +# Serial: 268090761170461462463995952157327242137089239581 +# MD5 Fingerprint: df:7d:b9:ad:54:6f:68:a1:df:89:57:03:97:43:b0:d7 +# SHA1 Fingerprint: 48:12:bd:92:3c:a8:c4:39:06:e7:30:6d:27:96:e6:a4:cf:22:2e:7d +# SHA256 Fingerprint: 88:ef:81:de:20:2e:b0:18:45:2e:43:f8:64:72:5c:ea:5f:bd:1f:c2:d9:d2:05:73:07:09:c5:d8:b8:69:0f:46 +-----BEGIN CERTIFICATE----- +MIIFYDCCA0igAwIBAgIULvWbAiin23r/1aOp7r0DoM8Sah0wDQYJKoZIhvcNAQEL +BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc +BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMyBHMzAeFw0xMjAxMTIyMDI2MzJaFw00 +MjAxMTIyMDI2MzJaMEgxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM +aW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDMgRzMwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQCzyw4QZ47qFJenMioKVjZ/aEzHs286IxSR +/xl/pcqs7rN2nXrpixurazHb+gtTTK/FpRp5PIpM/6zfJd5O2YIyC0TeytuMrKNu +FoM7pmRLMon7FhY4futD4tN0SsJiCnMK3UmzV9KwCoWdcTzeo8vAMvMBOSBDGzXR +U7Ox7sWTaYI+FrUoRqHe6okJ7UO4BUaKhvVZR74bbwEhELn9qdIoyhA5CcoTNs+c +ra1AdHkrAj80//ogaX3T7mH1urPnMNA3I4ZyYUUpSFlob3emLoG+B01vr87ERROR +FHAGjx+f+IdpsQ7vw4kZ6+ocYfx6bIrc1gMLnia6Et3UVDmrJqMz6nWB2i3ND0/k +A9HvFZcba5DFApCTZgIhsUfei5pKgLlVj7WiL8DWM2fafsSntARE60f75li59wzw +eyuxwHApw0BiLTtIadwjPEjrewl5qW3aqDCYz4ByA4imW0aucnl8CAMhZa634Ryl +sSqiMd5mBPfAdOhx3v89WcyWJhKLhZVXGqtrdQtEPREoPHtht+KPZ0/l7DxMYIBp +VzgeAVuNVejH38DMdyM0SXV89pgR6y3e7UEuFAUCf+D+IOs15xGsIs5XPd7JMG0Q +A4XN8f+MFrXBsj6IbGB/kE+V9/YtrQE5BwT6dYB9v0lQ7e/JxHwc64B+27bQ3RP+ +ydOc17KXqQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB +BjAdBgNVHQ4EFgQUxhfQvKjqAkPyGwaZXSuQILnXnOQwDQYJKoZIhvcNAQELBQAD +ggIBADRh2Va1EodVTd2jNTFGu6QHcrxfYWLopfsLN7E8trP6KZ1/AvWkyaiTt3px +KGmPc+FSkNrVvjrlt3ZqVoAh313m6Tqe5T72omnHKgqwGEfcIHB9UqM+WXzBusnI +FUBhynLWcKzSt/Ac5IYp8M7vaGPQtSCKFWGafoaYtMnCdvvMujAWzKNhxnQT5Wvv +oxXqA/4Ti2Tk08HS6IT7SdEQTXlm66r99I0xHnAUrdzeZxNMgRVhvLfZkXdxGYFg +u/BYpbWcC/ePIlUnwEsBbTuZDdQdm2NnL9DuDcpmvJRPpq3t/O5jrFc/ZSXPsoaP +0Aj/uHYUbt7lJ+yreLVTubY/6CD50qi+YUbKh4yE8/nxoGibIh6BJpsQBJFxwAYf +3KDTuVan45gtf4Od34wrnDKOMpTwATwiKp9Dwi7DmDkHOHv8XgBCH/MyJnmDhPbl +8MFREsALHgQjDFSlTC9JxUrRtm5gDWv8a4uFJGS3iQ6rJUdbPM9+Sb3H6QrG2vd+ +DhcI00iX0HGS8A85PjRqHH3Y8iKuu2n0M7SmSFXRDw4m6Oy2Cy2nhTXN/VnIn9HN +PlopNLk9hM6xZdRZkZFWdSHBd575euFgndOtBBj0fOtek49TSiIp+EgrPk2GrFt/ +ywaZWWDYWGWVjUTR939+J399roD1B0y2PpxxVJkES/1Y+Zj0 +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert Assured ID Root G2 O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert Assured ID Root G2 O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert Assured ID Root G2" +# Serial: 15385348160840213938643033620894905419 +# MD5 Fingerprint: 92:38:b9:f8:63:24:82:65:2c:57:33:e6:fe:81:8f:9d +# SHA1 Fingerprint: a1:4b:48:d9:43:ee:0a:0e:40:90:4f:3c:e0:a4:c0:91:93:51:5d:3f +# SHA256 Fingerprint: 7d:05:eb:b6:82:33:9f:8c:94:51:ee:09:4e:eb:fe:fa:79:53:a1:14:ed:b2:f4:49:49:45:2f:ab:7d:2f:c1:85 +-----BEGIN CERTIFICATE----- +MIIDljCCAn6gAwIBAgIQC5McOtY5Z+pnI7/Dr5r0SzANBgkqhkiG9w0BAQsFADBl +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJv +b3QgRzIwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1MTIwMDAwWjBlMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNl +cnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgRzIwggEi +MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDZ5ygvUj82ckmIkzTz+GoeMVSA +n61UQbVH35ao1K+ALbkKz3X9iaV9JPrjIgwrvJUXCzO/GU1BBpAAvQxNEP4Htecc +biJVMWWXvdMX0h5i89vqbFCMP4QMls+3ywPgym2hFEwbid3tALBSfK+RbLE4E9Hp +EgjAALAcKxHad3A2m67OeYfcgnDmCXRwVWmvo2ifv922ebPynXApVfSr/5Vh88lA +bx3RvpO704gqu52/clpWcTs/1PPRCv4o76Pu2ZmvA9OPYLfykqGxvYmJHzDNw6Yu +YjOuFgJ3RFrngQo8p0Quebg/BLxcoIfhG69Rjs3sLPr4/m3wOnyqi+RnlTGNAgMB +AAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQW +BBTOw0q5mVXyuNtgv6l+vVa1lzan1jANBgkqhkiG9w0BAQsFAAOCAQEAyqVVjOPI +QW5pJ6d1Ee88hjZv0p3GeDgdaZaikmkuOGybfQTUiaWxMTeKySHMq2zNixya1r9I +0jJmwYrA8y8678Dj1JGG0VDjA9tzd29KOVPt3ibHtX2vK0LRdWLjSisCx1BL4Gni +lmwORGYQRI+tBev4eaymG+g3NJ1TyWGqolKvSnAWhsI6yLETcDbYz+70CjTVW0z9 +B5yiutkBclzzTcHdDrEcDcRjvq30FPuJ7KJBDkzMyFdA0G4Dqs0MjomZmWzwPDCv +ON9vvKO+KSAnq3T/EyJ43pdSVR6DtVQgA+6uwE9W3jfMw3+qBCe703e4YtsXfJwo +IhNzbM8m9Yop5w== +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert Assured ID Root G3 O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert Assured ID Root G3 O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert Assured ID Root G3" +# Serial: 15459312981008553731928384953135426796 +# MD5 Fingerprint: 7c:7f:65:31:0c:81:df:8d:ba:3e:99:e2:5c:ad:6e:fb +# SHA1 Fingerprint: f5:17:a2:4f:9a:48:c6:c9:f8:a2:00:26:9f:dc:0f:48:2c:ab:30:89 +# SHA256 Fingerprint: 7e:37:cb:8b:4c:47:09:0c:ab:36:55:1b:a6:f4:5d:b8:40:68:0f:ba:16:6a:95:2d:b1:00:71:7f:43:05:3f:c2 +-----BEGIN CERTIFICATE----- +MIICRjCCAc2gAwIBAgIQC6Fa+h3foLVJRK/NJKBs7DAKBggqhkjOPQQDAzBlMQsw +CQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cu +ZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3Qg +RzMwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1MTIwMDAwWjBlMQswCQYDVQQGEwJV +UzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQu +Y29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgRzMwdjAQBgcq +hkjOPQIBBgUrgQQAIgNiAAQZ57ysRGXtzbg/WPuNsVepRC0FFfLvC/8QdJ+1YlJf +Zn4f5dwbRXkLzMZTCp2NXQLZqVneAlr2lSoOjThKiknGvMYDOAdfVdp+CW7if17Q +RSAPWXYQ1qAk8C3eNvJsKTmjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/ +BAQDAgGGMB0GA1UdDgQWBBTL0L2p4ZgFUaFNN6KDec6NHSrkhDAKBggqhkjOPQQD +AwNnADBkAjAlpIFFAmsSS3V0T8gj43DydXLefInwz5FyYZ5eEJJZVrmDxxDnOOlY +JjZ91eQ0hjkCMHw2U/Aw5WJjOpnitqM7mzT6HtoQknFekROn3aRukswy1vUhZscv +6pZjamVFkpUBtA== +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert Global Root G2 O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert Global Root G2 O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert Global Root G2" +# Serial: 4293743540046975378534879503202253541 +# MD5 Fingerprint: e4:a6:8a:c8:54:ac:52:42:46:0a:fd:72:48:1b:2a:44 +# SHA1 Fingerprint: df:3c:24:f9:bf:d6:66:76:1b:26:80:73:fe:06:d1:cc:8d:4f:82:a4 +# SHA256 Fingerprint: cb:3c:cb:b7:60:31:e5:e0:13:8f:8d:d3:9a:23:f9:de:47:ff:c3:5e:43:c1:14:4c:ea:27:d4:6a:5a:b1:cb:5f +-----BEGIN CERTIFICATE----- +MIIDjjCCAnagAwIBAgIQAzrx5qcRqaC7KGSxHQn65TANBgkqhkiG9w0BAQsFADBh +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBH +MjAeFw0xMzA4MDExMjAwMDBaFw0zODAxMTUxMjAwMDBaMGExCzAJBgNVBAYTAlVT +MRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5j +b20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IEcyMIIBIjANBgkqhkiG +9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuzfNNNx7a8myaJCtSnX/RrohCgiN9RlUyfuI +2/Ou8jqJkTx65qsGGmvPrC3oXgkkRLpimn7Wo6h+4FR1IAWsULecYxpsMNzaHxmx +1x7e/dfgy5SDN67sH0NO3Xss0r0upS/kqbitOtSZpLYl6ZtrAGCSYP9PIUkY92eQ +q2EGnI/yuum06ZIya7XzV+hdG82MHauVBJVJ8zUtluNJbd134/tJS7SsVQepj5Wz +tCO7TG1F8PapspUwtP1MVYwnSlcUfIKdzXOS0xZKBgyMUNGPHgm+F6HmIcr9g+UQ +vIOlCsRnKPZzFBQ9RnbDhxSJITRNrw9FDKZJobq7nMWxM4MphQIDAQABo0IwQDAP +BgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBhjAdBgNVHQ4EFgQUTiJUIBiV +5uNu5g/6+rkS7QYXjzkwDQYJKoZIhvcNAQELBQADggEBAGBnKJRvDkhj6zHd6mcY +1Yl9PMWLSn/pvtsrF9+wX3N3KjITOYFnQoQj8kVnNeyIv/iPsGEMNKSuIEyExtv4 +NeF22d+mQrvHRAiGfzZ0JFrabA0UWTW98kndth/Jsw1HKj2ZL7tcu7XUIOGZX1NG +Fdtom/DzMNU+MeKNhJ7jitralj41E6Vf8PlwUHBHQRFXGU7Aj64GxJUTFy8bJZ91 +8rGOmaFvE7FBcf6IKshPECBV1/MUReXgRPTqh5Uykw7+U0b6LJ3/iyK5S9kJRaTe +pLiaWN0bfVKfjllDiIGknibVb63dDcY3fe0Dkhvld1927jyNxF1WW6LZZm6zNTfl +MrY= +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert Global Root G3 O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert Global Root G3 O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert Global Root G3" +# Serial: 7089244469030293291760083333884364146 +# MD5 Fingerprint: f5:5d:a4:50:a5:fb:28:7e:1e:0f:0d:cc:96:57:56:ca +# SHA1 Fingerprint: 7e:04:de:89:6a:3e:66:6d:00:e6:87:d3:3f:fa:d9:3b:e8:3d:34:9e +# SHA256 Fingerprint: 31:ad:66:48:f8:10:41:38:c7:38:f3:9e:a4:32:01:33:39:3e:3a:18:cc:02:29:6e:f9:7c:2a:c9:ef:67:31:d0 +-----BEGIN CERTIFICATE----- +MIICPzCCAcWgAwIBAgIQBVVWvPJepDU1w6QP1atFcjAKBggqhkjOPQQDAzBhMQsw +CQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cu +ZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBHMzAe +Fw0xMzA4MDExMjAwMDBaFw0zODAxMTUxMjAwMDBaMGExCzAJBgNVBAYTAlVTMRUw +EwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5jb20x +IDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IEczMHYwEAYHKoZIzj0CAQYF +K4EEACIDYgAE3afZu4q4C/sLfyHS8L6+c/MzXRq8NOrexpu80JX28MzQC7phW1FG +fp4tn+6OYwwX7Adw9c+ELkCDnOg/QW07rdOkFFk2eJ0DQ+4QE2xy3q6Ip6FrtUPO +Z9wj/wMco+I+o0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBhjAd +BgNVHQ4EFgQUs9tIpPmhxdiuNkHMEWNpYim8S8YwCgYIKoZIzj0EAwMDaAAwZQIx +AK288mw/EkrRLTnDCgmXc/SINoyIJ7vmiI1Qhadj+Z4y3maTD/HMsQmP3Wyr+mt/ +oAIwOWZbwmSNuJ5Q3KjVSaLtx9zRSX8XAbjIho9OjIgrqJqpisXRAL34VOKa5Vt8 +sycX +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert Trusted Root G4 O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert Trusted Root G4 O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert Trusted Root G4" +# Serial: 7451500558977370777930084869016614236 +# MD5 Fingerprint: 78:f2:fc:aa:60:1f:2f:b4:eb:c9:37:ba:53:2e:75:49 +# SHA1 Fingerprint: dd:fb:16:cd:49:31:c9:73:a2:03:7d:3f:c8:3a:4d:7d:77:5d:05:e4 +# SHA256 Fingerprint: 55:2f:7b:dc:f1:a7:af:9e:6c:e6:72:01:7f:4f:12:ab:f7:72:40:c7:8e:76:1a:c2:03:d1:d9:d2:0a:c8:99:88 +-----BEGIN CERTIFICATE----- +MIIFkDCCA3igAwIBAgIQBZsbV56OITLiOQe9p3d1XDANBgkqhkiG9w0BAQwFADBi +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSEwHwYDVQQDExhEaWdpQ2VydCBUcnVzdGVkIFJvb3Qg +RzQwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1MTIwMDAwWjBiMQswCQYDVQQGEwJV +UzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQu +Y29tMSEwHwYDVQQDExhEaWdpQ2VydCBUcnVzdGVkIFJvb3QgRzQwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQC/5pBzaN675F1KPDAiMGkz7MKnJS7JIT3y +ithZwuEppz1Yq3aaza57G4QNxDAf8xukOBbrVsaXbR2rsnnyyhHS5F/WBTxSD1If +xp4VpX6+n6lXFllVcq9ok3DCsrp1mWpzMpTREEQQLt+C8weE5nQ7bXHiLQwb7iDV +ySAdYyktzuxeTsiT+CFhmzTrBcZe7FsavOvJz82sNEBfsXpm7nfISKhmV1efVFiO +DCu3T6cw2Vbuyntd463JT17lNecxy9qTXtyOj4DatpGYQJB5w3jHtrHEtWoYOAMQ +jdjUN6QuBX2I9YI+EJFwq1WCQTLX2wRzKm6RAXwhTNS8rhsDdV14Ztk6MUSaM0C/ +CNdaSaTC5qmgZ92kJ7yhTzm1EVgX9yRcRo9k98FpiHaYdj1ZXUJ2h4mXaXpI8OCi +EhtmmnTK3kse5w5jrubU75KSOp493ADkRSWJtppEGSt+wJS00mFt6zPZxd9LBADM +fRyVw4/3IbKyEbe7f/LVjHAsQWCqsWMYRJUadmJ+9oCw++hkpjPRiQfhvbfmQ6QY +uKZ3AeEPlAwhHbJUKSWJbOUOUlFHdL4mrLZBdd56rF+NP8m800ERElvlEFDrMcXK +chYiCd98THU/Y+whX8QgUWtvsauGi0/C1kVfnSD8oR7FwI+isX4KJpn15GkvmB0t +9dmpsh3lGwIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB +hjAdBgNVHQ4EFgQU7NfjgtJxXWRM3y5nP+e6mK4cD08wDQYJKoZIhvcNAQEMBQAD +ggIBALth2X2pbL4XxJEbw6GiAI3jZGgPVs93rnD5/ZpKmbnJeFwMDF/k5hQpVgs2 +SV1EY+CtnJYYZhsjDT156W1r1lT40jzBQ0CuHVD1UvyQO7uYmWlrx8GnqGikJ9yd ++SeuMIW59mdNOj6PWTkiU0TryF0Dyu1Qen1iIQqAyHNm0aAFYF/opbSnr6j3bTWc +fFqK1qI4mfN4i/RN0iAL3gTujJtHgXINwBQy7zBZLq7gcfJW5GqXb5JQbZaNaHqa +sjYUegbyJLkJEVDXCLG4iXqEI2FCKeWjzaIgQdfRnGTZ6iahixTXTBmyUEFxPT9N +cCOGDErcgdLMMpSEDQgJlxxPwO5rIHQw0uA5NBCFIRUBCOhVMt5xSdkoF1BN5r5N +0XWs0Mr7QbhDparTwwVETyw2m+L64kW4I1NsBm9nVX9GtUw/bihaeSbSpKhil9Ie +4u1Ki7wb/UdKDd9nZn6yW0HQO+T0O/QEY+nvwlQAUaCKKsnOeMzV6ocEGLPOr0mI +r/OSmbaz5mEP0oUA51Aa5BuVnRmhuZyxm7EAHu/QD09CbMkKvO5D+jpxpchNJqU1 +/YldvIViHTLSoCtU7ZpXwdv6EM8Zt4tKG48BtieVU+i2iW1bvGjUI+iLUaJW+fCm +gKDWHrO8Dw9TdSmq6hN35N6MgSGtBxBHEa2HPQfRdbzP82Z+ +-----END CERTIFICATE----- + +# Issuer: CN=COMODO RSA Certification Authority O=COMODO CA Limited +# Subject: CN=COMODO RSA Certification Authority O=COMODO CA Limited +# Label: "COMODO RSA Certification Authority" +# Serial: 101909084537582093308941363524873193117 +# MD5 Fingerprint: 1b:31:b0:71:40:36:cc:14:36:91:ad:c4:3e:fd:ec:18 +# SHA1 Fingerprint: af:e5:d2:44:a8:d1:19:42:30:ff:47:9f:e2:f8:97:bb:cd:7a:8c:b4 +# SHA256 Fingerprint: 52:f0:e1:c4:e5:8e:c6:29:29:1b:60:31:7f:07:46:71:b8:5d:7e:a8:0d:5b:07:27:34:63:53:4b:32:b4:02:34 +-----BEGIN CERTIFICATE----- +MIIF2DCCA8CgAwIBAgIQTKr5yttjb+Af907YWwOGnTANBgkqhkiG9w0BAQwFADCB +hTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4G +A1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNV +BAMTIkNPTU9ETyBSU0EgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTAwMTE5 +MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBhTELMAkGA1UEBhMCR0IxGzAZBgNVBAgT +EkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UEChMR +Q09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBSU0EgQ2VydGlmaWNh +dGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCR +6FSS0gpWsawNJN3Fz0RndJkrN6N9I3AAcbxT38T6KhKPS38QVr2fcHK3YX/JSw8X +pz3jsARh7v8Rl8f0hj4K+j5c+ZPmNHrZFGvnnLOFoIJ6dq9xkNfs/Q36nGz637CC +9BR++b7Epi9Pf5l/tfxnQ3K9DADWietrLNPtj5gcFKt+5eNu/Nio5JIk2kNrYrhV +/erBvGy2i/MOjZrkm2xpmfh4SDBF1a3hDTxFYPwyllEnvGfDyi62a+pGx8cgoLEf +Zd5ICLqkTqnyg0Y3hOvozIFIQ2dOciqbXL1MGyiKXCJ7tKuY2e7gUYPDCUZObT6Z ++pUX2nwzV0E8jVHtC7ZcryxjGt9XyD+86V3Em69FmeKjWiS0uqlWPc9vqv9JWL7w +qP/0uK3pN/u6uPQLOvnoQ0IeidiEyxPx2bvhiWC4jChWrBQdnArncevPDt09qZah +SL0896+1DSJMwBGB7FY79tOi4lu3sgQiUpWAk2nojkxl8ZEDLXB0AuqLZxUpaVIC +u9ffUGpVRr+goyhhf3DQw6KqLCGqR84onAZFdr+CGCe01a60y1Dma/RMhnEw6abf +Fobg2P9A3fvQQoh/ozM6LlweQRGBY84YcWsr7KaKtzFcOmpH4MN5WdYgGq/yapiq +crxXStJLnbsQ/LBMQeXtHT1eKJ2czL+zUdqnR+WEUwIDAQABo0IwQDAdBgNVHQ4E +FgQUu69+Aj36pvE8hI6t7jiY7NkyMtQwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB +/wQFMAMBAf8wDQYJKoZIhvcNAQEMBQADggIBAArx1UaEt65Ru2yyTUEUAJNMnMvl +wFTPoCWOAvn9sKIN9SCYPBMtrFaisNZ+EZLpLrqeLppysb0ZRGxhNaKatBYSaVqM +4dc+pBroLwP0rmEdEBsqpIt6xf4FpuHA1sj+nq6PK7o9mfjYcwlYRm6mnPTXJ9OV +2jeDchzTc+CiR5kDOF3VSXkAKRzH7JsgHAckaVd4sjn8OoSgtZx8jb8uk2Intzna +FxiuvTwJaP+EmzzV1gsD41eeFPfR60/IvYcjt7ZJQ3mFXLrrkguhxuhoqEwWsRqZ +CuhTLJK7oQkYdQxlqHvLI7cawiiFwxv/0Cti76R7CZGYZ4wUAc1oBmpjIXUDgIiK +boHGhfKppC3n9KUkEEeDys30jXlYsQab5xoq2Z0B15R97QNKyvDb6KkBPvVWmcke +jkk9u+UJueBPSZI9FoJAzMxZxuY67RIuaTxslbH9qh17f4a+Hg4yRvv7E491f0yL +S0Zj/gA0QHDBw7mh3aZw4gSzQbzpgJHqZJx64SIDqZxubw5lT2yHh17zbqD5daWb +QOhTsiedSrnAdyGN/4fy3ryM7xfft0kL0fJuMAsaDk527RH89elWsn2/x20Kk4yl +0MC2Hb46TpSi125sC8KKfPog88Tk5c0NqMuRkrF8hey1FGlmDoLnzc7ILaZRfyHB +NVOFBkpdn627G190 +-----END CERTIFICATE----- + +# Issuer: CN=USERTrust RSA Certification Authority O=The USERTRUST Network +# Subject: CN=USERTrust RSA Certification Authority O=The USERTRUST Network +# Label: "USERTrust RSA Certification Authority" +# Serial: 2645093764781058787591871645665788717 +# MD5 Fingerprint: 1b:fe:69:d1:91:b7:19:33:a3:72:a8:0f:e1:55:e5:b5 +# SHA1 Fingerprint: 2b:8f:1b:57:33:0d:bb:a2:d0:7a:6c:51:f7:0e:e9:0d:da:b9:ad:8e +# SHA256 Fingerprint: e7:93:c9:b0:2f:d8:aa:13:e2:1c:31:22:8a:cc:b0:81:19:64:3b:74:9c:89:89:64:b1:74:6d:46:c3:d4:cb:d2 +-----BEGIN CERTIFICATE----- +MIIF3jCCA8agAwIBAgIQAf1tMPyjylGoG7xkDjUDLTANBgkqhkiG9w0BAQwFADCB +iDELMAkGA1UEBhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0pl +cnNleSBDaXR5MR4wHAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNV +BAMTJVVTRVJUcnVzdCBSU0EgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTAw +MjAxMDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBiDELMAkGA1UEBhMCVVMxEzARBgNV +BAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNleSBDaXR5MR4wHAYDVQQKExVU +aGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMTJVVTRVJUcnVzdCBSU0EgQ2Vy +dGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK +AoICAQCAEmUXNg7D2wiz0KxXDXbtzSfTTK1Qg2HiqiBNCS1kCdzOiZ/MPans9s/B +3PHTsdZ7NygRK0faOca8Ohm0X6a9fZ2jY0K2dvKpOyuR+OJv0OwWIJAJPuLodMkY +tJHUYmTbf6MG8YgYapAiPLz+E/CHFHv25B+O1ORRxhFnRghRy4YUVD+8M/5+bJz/ +Fp0YvVGONaanZshyZ9shZrHUm3gDwFA66Mzw3LyeTP6vBZY1H1dat//O+T23LLb2 +VN3I5xI6Ta5MirdcmrS3ID3KfyI0rn47aGYBROcBTkZTmzNg95S+UzeQc0PzMsNT +79uq/nROacdrjGCT3sTHDN/hMq7MkztReJVni+49Vv4M0GkPGw/zJSZrM233bkf6 +c0Plfg6lZrEpfDKEY1WJxA3Bk1QwGROs0303p+tdOmw1XNtB1xLaqUkL39iAigmT +Yo61Zs8liM2EuLE/pDkP2QKe6xJMlXzzawWpXhaDzLhn4ugTncxbgtNMs+1b/97l +c6wjOy0AvzVVdAlJ2ElYGn+SNuZRkg7zJn0cTRe8yexDJtC/QV9AqURE9JnnV4ee +UB9XVKg+/XRjL7FQZQnmWEIuQxpMtPAlR1n6BB6T1CZGSlCBst6+eLf8ZxXhyVeE +Hg9j1uliutZfVS7qXMYoCAQlObgOK6nyTJccBz8NUvXt7y+CDwIDAQABo0IwQDAd +BgNVHQ4EFgQUU3m/WqorSs9UgOHYm8Cd8rIDZsswDgYDVR0PAQH/BAQDAgEGMA8G +A1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEMBQADggIBAFzUfA3P9wF9QZllDHPF +Up/L+M+ZBn8b2kMVn54CVVeWFPFSPCeHlCjtHzoBN6J2/FNQwISbxmtOuowhT6KO +VWKR82kV2LyI48SqC/3vqOlLVSoGIG1VeCkZ7l8wXEskEVX/JJpuXior7gtNn3/3 +ATiUFJVDBwn7YKnuHKsSjKCaXqeYalltiz8I+8jRRa8YFWSQEg9zKC7F4iRO/Fjs +8PRF/iKz6y+O0tlFYQXBl2+odnKPi4w2r78NBc5xjeambx9spnFixdjQg3IM8WcR +iQycE0xyNN+81XHfqnHd4blsjDwSXWXavVcStkNr/+XeTWYRUc+ZruwXtuhxkYze +Sf7dNXGiFSeUHM9h4ya7b6NnJSFd5t0dCy5oGzuCr+yDZ4XUmFF0sbmZgIn/f3gZ +XHlKYC6SQK5MNyosycdiyA5d9zZbyuAlJQG03RoHnHcAP9Dc1ew91Pq7P8yF1m9/ +qS3fuQL39ZeatTXaw2ewh0qpKJ4jjv9cJ2vhsE/zB+4ALtRZh8tSQZXq9EfX7mRB +VXyNWQKV3WKdwrnuWih0hKWbt5DHDAff9Yk2dDLWKMGwsAvgnEzDHNb842m1R0aB +L6KCq9NjRHDEjf8tM7qtj3u1cIiuPhnPQCjY/MiQu12ZIvVS5ljFH4gxQ+6IHdfG +jjxDah2nGN59PRbxYvnKkKj9 +-----END CERTIFICATE----- + +# Issuer: CN=USERTrust ECC Certification Authority O=The USERTRUST Network +# Subject: CN=USERTrust ECC Certification Authority O=The USERTRUST Network +# Label: "USERTrust ECC Certification Authority" +# Serial: 123013823720199481456569720443997572134 +# MD5 Fingerprint: fa:68:bc:d9:b5:7f:ad:fd:c9:1d:06:83:28:cc:24:c1 +# SHA1 Fingerprint: d1:cb:ca:5d:b2:d5:2a:7f:69:3b:67:4d:e5:f0:5a:1d:0c:95:7d:f0 +# SHA256 Fingerprint: 4f:f4:60:d5:4b:9c:86:da:bf:bc:fc:57:12:e0:40:0d:2b:ed:3f:bc:4d:4f:bd:aa:86:e0:6a:dc:d2:a9:ad:7a +-----BEGIN CERTIFICATE----- +MIICjzCCAhWgAwIBAgIQXIuZxVqUxdJxVt7NiYDMJjAKBggqhkjOPQQDAzCBiDEL +MAkGA1UEBhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNl +eSBDaXR5MR4wHAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMT +JVVTRVJUcnVzdCBFQ0MgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTAwMjAx +MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBiDELMAkGA1UEBhMCVVMxEzARBgNVBAgT +Ck5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNleSBDaXR5MR4wHAYDVQQKExVUaGUg +VVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMTJVVTRVJUcnVzdCBFQ0MgQ2VydGlm +aWNhdGlvbiBBdXRob3JpdHkwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQarFRaqflo +I+d61SRvU8Za2EurxtW20eZzca7dnNYMYf3boIkDuAUU7FfO7l0/4iGzzvfUinng +o4N+LZfQYcTxmdwlkWOrfzCjtHDix6EznPO/LlxTsV+zfTJ/ijTjeXmjQjBAMB0G +A1UdDgQWBBQ64QmG1M8ZwpZ2dEl23OA1xmNjmjAOBgNVHQ8BAf8EBAMCAQYwDwYD +VR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjA2Z6EWCNzklwBBHU6+4WMB +zzuqQhFkoJ2UOQIReVx7Hfpkue4WQrO/isIJxOzksU0CMQDpKmFHjFJKS04YcPbW +RNZu9YO6bVi9JNlWSOrvxKJGgYhqOkbRqZtNyWHa0V1Xahg= +-----END CERTIFICATE----- + +# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R5 +# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R5 +# Label: "GlobalSign ECC Root CA - R5" +# Serial: 32785792099990507226680698011560947931244 +# MD5 Fingerprint: 9f:ad:3b:1c:02:1e:8a:ba:17:74:38:81:0c:a2:bc:08 +# SHA1 Fingerprint: 1f:24:c6:30:cd:a4:18:ef:20:69:ff:ad:4f:dd:5f:46:3a:1b:69:aa +# SHA256 Fingerprint: 17:9f:bc:14:8a:3d:d0:0f:d2:4e:a1:34:58:cc:43:bf:a7:f5:9c:81:82:d7:83:a5:13:f6:eb:ec:10:0c:89:24 +-----BEGIN CERTIFICATE----- +MIICHjCCAaSgAwIBAgIRYFlJ4CYuu1X5CneKcflK2GwwCgYIKoZIzj0EAwMwUDEk +MCIGA1UECxMbR2xvYmFsU2lnbiBFQ0MgUm9vdCBDQSAtIFI1MRMwEQYDVQQKEwpH +bG9iYWxTaWduMRMwEQYDVQQDEwpHbG9iYWxTaWduMB4XDTEyMTExMzAwMDAwMFoX +DTM4MDExOTAzMTQwN1owUDEkMCIGA1UECxMbR2xvYmFsU2lnbiBFQ0MgUm9vdCBD +QSAtIFI1MRMwEQYDVQQKEwpHbG9iYWxTaWduMRMwEQYDVQQDEwpHbG9iYWxTaWdu +MHYwEAYHKoZIzj0CAQYFK4EEACIDYgAER0UOlvt9Xb/pOdEh+J8LttV7HpI6SFkc +8GIxLcB6KP4ap1yztsyX50XUWPrRd21DosCHZTQKH3rd6zwzocWdTaRvQZU4f8ke +hOvRnkmSh5SHDDqFSmafnVmTTZdhBoZKo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYD +VR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUPeYpSJvqB8ohREom3m7e0oPQn1kwCgYI +KoZIzj0EAwMDaAAwZQIxAOVpEslu28YxuglB4Zf4+/2a4n0Sye18ZNPLBSWLVtmg +515dTguDnFt2KaAJJiFqYgIwcdK1j1zqO+F4CYWodZI7yFz9SO8NdCKoCOJuxUnO +xwy8p2Fp8fc74SrL+SvzZpA3 +-----END CERTIFICATE----- + +# Issuer: CN=IdenTrust Commercial Root CA 1 O=IdenTrust +# Subject: CN=IdenTrust Commercial Root CA 1 O=IdenTrust +# Label: "IdenTrust Commercial Root CA 1" +# Serial: 13298821034946342390520003877796839426 +# MD5 Fingerprint: b3:3e:77:73:75:ee:a0:d3:e3:7e:49:63:49:59:bb:c7 +# SHA1 Fingerprint: df:71:7e:aa:4a:d9:4e:c9:55:84:99:60:2d:48:de:5f:bc:f0:3a:25 +# SHA256 Fingerprint: 5d:56:49:9b:e4:d2:e0:8b:cf:ca:d0:8a:3e:38:72:3d:50:50:3b:de:70:69:48:e4:2f:55:60:30:19:e5:28:ae +-----BEGIN CERTIFICATE----- +MIIFYDCCA0igAwIBAgIQCgFCgAAAAUUjyES1AAAAAjANBgkqhkiG9w0BAQsFADBK +MQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0MScwJQYDVQQDEx5JZGVu +VHJ1c3QgQ29tbWVyY2lhbCBSb290IENBIDEwHhcNMTQwMTE2MTgxMjIzWhcNMzQw +MTE2MTgxMjIzWjBKMQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0MScw +JQYDVQQDEx5JZGVuVHJ1c3QgQ29tbWVyY2lhbCBSb290IENBIDEwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQCnUBneP5k91DNG8W9RYYKyqU+PZ4ldhNlT +3Qwo2dfw/66VQ3KZ+bVdfIrBQuExUHTRgQ18zZshq0PirK1ehm7zCYofWjK9ouuU ++ehcCuz/mNKvcbO0U59Oh++SvL3sTzIwiEsXXlfEU8L2ApeN2WIrvyQfYo3fw7gp +S0l4PJNgiCL8mdo2yMKi1CxUAGc1bnO/AljwpN3lsKImesrgNqUZFvX9t++uP0D1 +bVoE/c40yiTcdCMbXTMTEl3EASX2MN0CXZ/g1Ue9tOsbobtJSdifWwLziuQkkORi +T0/Br4sOdBeo0XKIanoBScy0RnnGF7HamB4HWfp1IYVl3ZBWzvurpWCdxJ35UrCL +vYf5jysjCiN2O/cz4ckA82n5S6LgTrx+kzmEB/dEcH7+B1rlsazRGMzyNeVJSQjK +Vsk9+w8YfYs7wRPCTY/JTw436R+hDmrfYi7LNQZReSzIJTj0+kuniVyc0uMNOYZK +dHzVWYfCP04MXFL0PfdSgvHqo6z9STQaKPNBiDoT7uje/5kdX7rL6B7yuVBgwDHT +c+XvvqDtMwt0viAgxGds8AgDelWAf0ZOlqf0Hj7h9tgJ4TNkK2PXMl6f+cB7D3hv +l7yTmvmcEpB4eoCHFddydJxVdHixuuFucAS6T6C6aMN7/zHwcz09lCqxC0EOoP5N +iGVreTO01wIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB +/zAdBgNVHQ4EFgQU7UQZwNPwBovupHu+QucmVMiONnYwDQYJKoZIhvcNAQELBQAD +ggIBAA2ukDL2pkt8RHYZYR4nKM1eVO8lvOMIkPkp165oCOGUAFjvLi5+U1KMtlwH +6oi6mYtQlNeCgN9hCQCTrQ0U5s7B8jeUeLBfnLOic7iPBZM4zY0+sLj7wM+x8uwt +LRvM7Kqas6pgghstO8OEPVeKlh6cdbjTMM1gCIOQ045U8U1mwF10A0Cj7oV+wh93 +nAbowacYXVKV7cndJZ5t+qntozo00Fl72u1Q8zW/7esUTTHHYPTa8Yec4kjixsU3 ++wYQ+nVZZjFHKdp2mhzpgq7vmrlR94gjmmmVYjzlVYA211QC//G5Xc7UI2/YRYRK +W2XviQzdFKcgyxilJbQN+QHwotL0AMh0jqEqSI5l2xPE4iUXfeu+h1sXIFRRk0pT +AwvsXcoz7WL9RccvW9xYoIA55vrX/hMUpu09lEpCdNTDd1lzzY9GvlU47/rokTLq +l1gEIt44w8y8bckzOmoKaT+gyOpyj4xjhiO9bTyWnpXgSUyqorkqG5w2gXjtw+hG +4iZZRHUe2XWJUc0QhJ1hYMtd+ZciTY6Y5uN/9lu7rs3KSoFrXgvzUeF0K+l+J6fZ +mUlO+KWA2yUPHGNiiskzZ2s8EIPGrd6ozRaOjfAHN3Gf8qv8QfXBi+wAN10J5U6A +7/qxXDgGpRtK4dw4LTzcqx+QGtVKnO7RcGzM7vRX+Bi6hG6H +-----END CERTIFICATE----- + +# Issuer: CN=IdenTrust Public Sector Root CA 1 O=IdenTrust +# Subject: CN=IdenTrust Public Sector Root CA 1 O=IdenTrust +# Label: "IdenTrust Public Sector Root CA 1" +# Serial: 13298821034946342390521976156843933698 +# MD5 Fingerprint: 37:06:a5:b0:fc:89:9d:ba:f4:6b:8c:1a:64:cd:d5:ba +# SHA1 Fingerprint: ba:29:41:60:77:98:3f:f4:f3:ef:f2:31:05:3b:2e:ea:6d:4d:45:fd +# SHA256 Fingerprint: 30:d0:89:5a:9a:44:8a:26:20:91:63:55:22:d1:f5:20:10:b5:86:7a:ca:e1:2c:78:ef:95:8f:d4:f4:38:9f:2f +-----BEGIN CERTIFICATE----- +MIIFZjCCA06gAwIBAgIQCgFCgAAAAUUjz0Z8AAAAAjANBgkqhkiG9w0BAQsFADBN +MQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0MSowKAYDVQQDEyFJZGVu +VHJ1c3QgUHVibGljIFNlY3RvciBSb290IENBIDEwHhcNMTQwMTE2MTc1MzMyWhcN +MzQwMTE2MTc1MzMyWjBNMQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0 +MSowKAYDVQQDEyFJZGVuVHJ1c3QgUHVibGljIFNlY3RvciBSb290IENBIDEwggIi +MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC2IpT8pEiv6EdrCvsnduTyP4o7 +ekosMSqMjbCpwzFrqHd2hCa2rIFCDQjrVVi7evi8ZX3yoG2LqEfpYnYeEe4IFNGy +RBb06tD6Hi9e28tzQa68ALBKK0CyrOE7S8ItneShm+waOh7wCLPQ5CQ1B5+ctMlS +bdsHyo+1W/CD80/HLaXIrcuVIKQxKFdYWuSNG5qrng0M8gozOSI5Cpcu81N3uURF +/YTLNiCBWS2ab21ISGHKTN9T0a9SvESfqy9rg3LvdYDaBjMbXcjaY8ZNzaxmMc3R +3j6HEDbhuaR672BQssvKplbgN6+rNBM5Jeg5ZuSYeqoSmJxZZoY+rfGwyj4GD3vw +EUs3oERte8uojHH01bWRNszwFcYr3lEXsZdMUD2xlVl8BX0tIdUAvwFnol57plzy +9yLxkA2T26pEUWbMfXYD62qoKjgZl3YNa4ph+bz27nb9cCvdKTz4Ch5bQhyLVi9V +GxyhLrXHFub4qjySjmm2AcG1hp2JDws4lFTo6tyePSW8Uybt1as5qsVATFSrsrTZ +2fjXctscvG29ZV/viDUqZi/u9rNl8DONfJhBaUYPQxxp+pu10GFqzcpL2UyQRqsV +WaFHVCkugyhfHMKiq3IXAAaOReyL4jM9f9oZRORicsPfIsbyVtTdX5Vy7W1f90gD +W/3FKqD2cyOEEBsB5wIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/ +BAUwAwEB/zAdBgNVHQ4EFgQU43HgntinQtnbcZFrlJPrw6PRFKMwDQYJKoZIhvcN +AQELBQADggIBAEf63QqwEZE4rU1d9+UOl1QZgkiHVIyqZJnYWv6IAcVYpZmxI1Qj +t2odIFflAWJBF9MJ23XLblSQdf4an4EKwt3X9wnQW3IV5B4Jaj0z8yGa5hV+rVHV +DRDtfULAj+7AmgjVQdZcDiFpboBhDhXAuM/FSRJSzL46zNQuOAXeNf0fb7iAaJg9 +TaDKQGXSc3z1i9kKlT/YPyNtGtEqJBnZhbMX73huqVjRI9PHE+1yJX9dsXNw0H8G +lwmEKYBhHfpe/3OsoOOJuBxxFcbeMX8S3OFtm6/n6J91eEyrRjuazr8FGF1NFTwW +mhlQBJqymm9li1JfPFgEKCXAZmExfrngdbkaqIHWchezxQMxNRF4eKLg6TCMf4Df +WN88uieW4oA0beOY02QnrEh+KHdcxiVhJfiFDGX6xDIvpZgF5PgLZxYWxoK4Mhn5 ++bl53B/N66+rDt0b20XkeucC4pVd/GnwU2lhlXV5C15V5jgclKlZM57IcXR5f1GJ +tshquDDIajjDbp7hNxbqBWJMWxJH7ae0s1hWx0nzfxJoCTFx8G34Tkf71oXuxVhA +GaQdp/lLQzfcaFpPz+vCZHTetBXZ9FRUGi8c15dxVJCO2SCdUyt/q4/i6jC8UDfv +8Ue1fXwsBOxonbRJRBD0ckscZOf85muQ3Wl9af0AVqW3rLatt8o+Ae+c +-----END CERTIFICATE----- + +# Issuer: CN=Entrust Root Certification Authority - G2 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2009 Entrust, Inc. - for authorized use only +# Subject: CN=Entrust Root Certification Authority - G2 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2009 Entrust, Inc. - for authorized use only +# Label: "Entrust Root Certification Authority - G2" +# Serial: 1246989352 +# MD5 Fingerprint: 4b:e2:c9:91:96:65:0c:f4:0e:5a:93:92:a0:0a:fe:b2 +# SHA1 Fingerprint: 8c:f4:27:fd:79:0c:3a:d1:66:06:8d:e8:1e:57:ef:bb:93:22:72:d4 +# SHA256 Fingerprint: 43:df:57:74:b0:3e:7f:ef:5f:e4:0d:93:1a:7b:ed:f1:bb:2e:6b:42:73:8c:4e:6d:38:41:10:3d:3a:a7:f3:39 +-----BEGIN CERTIFICATE----- +MIIEPjCCAyagAwIBAgIESlOMKDANBgkqhkiG9w0BAQsFADCBvjELMAkGA1UEBhMC +VVMxFjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3d3cuZW50 +cnVzdC5uZXQvbGVnYWwtdGVybXMxOTA3BgNVBAsTMChjKSAyMDA5IEVudHJ1c3Qs +IEluYy4gLSBmb3IgYXV0aG9yaXplZCB1c2Ugb25seTEyMDAGA1UEAxMpRW50cnVz +dCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzIwHhcNMDkwNzA3MTcy +NTU0WhcNMzAxMjA3MTc1NTU0WjCBvjELMAkGA1UEBhMCVVMxFjAUBgNVBAoTDUVu +dHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3d3cuZW50cnVzdC5uZXQvbGVnYWwt +dGVybXMxOTA3BgNVBAsTMChjKSAyMDA5IEVudHJ1c3QsIEluYy4gLSBmb3IgYXV0 +aG9yaXplZCB1c2Ugb25seTEyMDAGA1UEAxMpRW50cnVzdCBSb290IENlcnRpZmlj +YXRpb24gQXV0aG9yaXR5IC0gRzIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK +AoIBAQC6hLZy254Ma+KZ6TABp3bqMriVQRrJ2mFOWHLP/vaCeb9zYQYKpSfYs1/T +RU4cctZOMvJyig/3gxnQaoCAAEUesMfnmr8SVycco2gvCoe9amsOXmXzHHfV1IWN +cCG0szLni6LVhjkCsbjSR87kyUnEO6fe+1R9V77w6G7CebI6C1XiUJgWMhNcL3hW +wcKUs/Ja5CeanyTXxuzQmyWC48zCxEXFjJd6BmsqEZ+pCm5IO2/b1BEZQvePB7/1 +U1+cPvQXLOZprE4yTGJ36rfo5bs0vBmLrpxR57d+tVOxMyLlbc9wPBr64ptntoP0 +jaWvYkxN4FisZDQSA/i2jZRjJKRxAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAP +BgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRqciZ60B7vfec7aVHUbI2fkBJmqzAN +BgkqhkiG9w0BAQsFAAOCAQEAeZ8dlsa2eT8ijYfThwMEYGprmi5ZiXMRrEPR9RP/ +jTkrwPK9T3CMqS/qF8QLVJ7UG5aYMzyorWKiAHarWWluBh1+xLlEjZivEtRh2woZ +Rkfz6/djwUAFQKXSt/S1mja/qYh2iARVBCuch38aNzx+LaUa2NSJXsq9rD1s2G2v +1fN2D807iDginWyTmsQ9v4IbZT+mD12q/OWyFcq1rca8PdCE6OoGcrBNOTJ4vz4R +nAuknZoh8/CbCzB428Hch0P+vGOaysXCHMnHjf87ElgI5rY97HosTvuDls4MPGmH +VHOkc8KT/1EQrBVUAdj8BbGJoX90g5pJ19xOe4pIb4tF9g== +-----END CERTIFICATE----- + +# Issuer: CN=Entrust Root Certification Authority - EC1 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2012 Entrust, Inc. - for authorized use only +# Subject: CN=Entrust Root Certification Authority - EC1 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2012 Entrust, Inc. - for authorized use only +# Label: "Entrust Root Certification Authority - EC1" +# Serial: 51543124481930649114116133369 +# MD5 Fingerprint: b6:7e:1d:f0:58:c5:49:6c:24:3b:3d:ed:98:18:ed:bc +# SHA1 Fingerprint: 20:d8:06:40:df:9b:25:f5:12:25:3a:11:ea:f7:59:8a:eb:14:b5:47 +# SHA256 Fingerprint: 02:ed:0e:b2:8c:14:da:45:16:5c:56:67:91:70:0d:64:51:d7:fb:56:f0:b2:ab:1d:3b:8e:b0:70:e5:6e:df:f5 +-----BEGIN CERTIFICATE----- +MIIC+TCCAoCgAwIBAgINAKaLeSkAAAAAUNCR+TAKBggqhkjOPQQDAzCBvzELMAkG +A1UEBhMCVVMxFjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3 +d3cuZW50cnVzdC5uZXQvbGVnYWwtdGVybXMxOTA3BgNVBAsTMChjKSAyMDEyIEVu +dHJ1c3QsIEluYy4gLSBmb3IgYXV0aG9yaXplZCB1c2Ugb25seTEzMDEGA1UEAxMq +RW50cnVzdCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRUMxMB4XDTEy +MTIxODE1MjUzNloXDTM3MTIxODE1NTUzNlowgb8xCzAJBgNVBAYTAlVTMRYwFAYD +VQQKEw1FbnRydXN0LCBJbmMuMSgwJgYDVQQLEx9TZWUgd3d3LmVudHJ1c3QubmV0 +L2xlZ2FsLXRlcm1zMTkwNwYDVQQLEzAoYykgMjAxMiBFbnRydXN0LCBJbmMuIC0g +Zm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxMzAxBgNVBAMTKkVudHJ1c3QgUm9vdCBD +ZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEVDMTB2MBAGByqGSM49AgEGBSuBBAAi +A2IABIQTydC6bUF74mzQ61VfZgIaJPRbiWlH47jCffHyAsWfoPZb1YsGGYZPUxBt +ByQnoaD41UcZYUx9ypMn6nQM72+WCf5j7HBdNq1nd67JnXxVRDqiY1Ef9eNi1KlH +Bz7MIKNCMEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0O +BBYEFLdj5xrdjekIplWDpOBqUEFlEUJJMAoGCCqGSM49BAMDA2cAMGQCMGF52OVC +R98crlOZF7ZvHH3hvxGU0QOIdeSNiaSKd0bebWHvAvX7td/M/k7//qnmpwIwW5nX +hTcGtXsI/esni0qU+eH6p44mCOh8kmhtc9hvJqwhAriZtyZBWyVgrtBIGu4G +-----END CERTIFICATE----- + +# Issuer: CN=CFCA EV ROOT O=China Financial Certification Authority +# Subject: CN=CFCA EV ROOT O=China Financial Certification Authority +# Label: "CFCA EV ROOT" +# Serial: 407555286 +# MD5 Fingerprint: 74:e1:b6:ed:26:7a:7a:44:30:33:94:ab:7b:27:81:30 +# SHA1 Fingerprint: e2:b8:29:4b:55:84:ab:6b:58:c2:90:46:6c:ac:3f:b8:39:8f:84:83 +# SHA256 Fingerprint: 5c:c3:d7:8e:4e:1d:5e:45:54:7a:04:e6:87:3e:64:f9:0c:f9:53:6d:1c:cc:2e:f8:00:f3:55:c4:c5:fd:70:fd +-----BEGIN CERTIFICATE----- +MIIFjTCCA3WgAwIBAgIEGErM1jANBgkqhkiG9w0BAQsFADBWMQswCQYDVQQGEwJD +TjEwMC4GA1UECgwnQ2hpbmEgRmluYW5jaWFsIENlcnRpZmljYXRpb24gQXV0aG9y +aXR5MRUwEwYDVQQDDAxDRkNBIEVWIFJPT1QwHhcNMTIwODA4MDMwNzAxWhcNMjkx +MjMxMDMwNzAxWjBWMQswCQYDVQQGEwJDTjEwMC4GA1UECgwnQ2hpbmEgRmluYW5j +aWFsIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MRUwEwYDVQQDDAxDRkNBIEVWIFJP +T1QwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDXXWvNED8fBVnVBU03 +sQ7smCuOFR36k0sXgiFxEFLXUWRwFsJVaU2OFW2fvwwbwuCjZ9YMrM8irq93VCpL +TIpTUnrD7i7es3ElweldPe6hL6P3KjzJIx1qqx2hp/Hz7KDVRM8Vz3IvHWOX6Jn5 +/ZOkVIBMUtRSqy5J35DNuF++P96hyk0g1CXohClTt7GIH//62pCfCqktQT+x8Rgp +7hZZLDRJGqgG16iI0gNyejLi6mhNbiyWZXvKWfry4t3uMCz7zEasxGPrb382KzRz +EpR/38wmnvFyXVBlWY9ps4deMm/DGIq1lY+wejfeWkU7xzbh72fROdOXW3NiGUgt +hxwG+3SYIElz8AXSG7Ggo7cbcNOIabla1jj0Ytwli3i/+Oh+uFzJlU9fpy25IGvP +a931DfSCt/SyZi4QKPaXWnuWFo8BGS1sbn85WAZkgwGDg8NNkt0yxoekN+kWzqot +aK8KgWU6cMGbrU1tVMoqLUuFG7OA5nBFDWteNfB/O7ic5ARwiRIlk9oKmSJgamNg +TnYGmE69g60dWIolhdLHZR4tjsbftsbhf4oEIRUpdPA+nJCdDC7xij5aqgwJHsfV +PKPtl8MeNPo4+QgO48BdK4PRVmrJtqhUUy54Mmc9gn900PvhtgVguXDbjgv5E1hv +cWAQUhC5wUEJ73IfZzF4/5YFjQIDAQABo2MwYTAfBgNVHSMEGDAWgBTj/i39KNAL +tbq2osS/BqoFjJP7LzAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAd +BgNVHQ4EFgQU4/4t/SjQC7W6tqLEvwaqBYyT+y8wDQYJKoZIhvcNAQELBQADggIB +ACXGumvrh8vegjmWPfBEp2uEcwPenStPuiB/vHiyz5ewG5zz13ku9Ui20vsXiObT +ej/tUxPQ4i9qecsAIyjmHjdXNYmEwnZPNDatZ8POQQaIxffu2Bq41gt/UP+TqhdL +jOztUmCypAbqTuv0axn96/Ua4CUqmtzHQTb3yHQFhDmVOdYLO6Qn+gjYXB74BGBS +ESgoA//vU2YApUo0FmZ8/Qmkrp5nGm9BC2sGE5uPhnEFtC+NiWYzKXZUmhH4J/qy +P5Hgzg0b8zAarb8iXRvTvyUFTeGSGn+ZnzxEk8rUQElsgIfXBDrDMlI1Dlb4pd19 +xIsNER9Tyx6yF7Zod1rg1MvIB671Oi6ON7fQAUtDKXeMOZePglr4UeWJoBjnaH9d +Ci77o0cOPaYjesYBx4/IXr9tgFa+iiS6M+qf4TIRnvHST4D2G0CvOJ4RUHlzEhLN +5mydLIhyPDCBBpEi6lmt2hkuIsKNuYyH4Ga8cyNfIWRjgEj1oDwYPZTISEEdQLpe +/v5WOaHIz16eGWRGENoXkbcFgKyLmZJ956LYBws2J+dIeWCKw9cTXPhyQN9Ky8+Z +AAoACxGV2lZFA4gKn2fQ1XmxqI1AbQ3CekD6819kR5LLU7m7Wc5P/dAVUwHY3+vZ +5nbv0CO7O6l5s9UCKc2Jo5YPSjXnTkLAdc0Hz+Ys63su +-----END CERTIFICATE----- + +# Issuer: CN=OISTE WISeKey Global Root GB CA O=WISeKey OU=OISTE Foundation Endorsed +# Subject: CN=OISTE WISeKey Global Root GB CA O=WISeKey OU=OISTE Foundation Endorsed +# Label: "OISTE WISeKey Global Root GB CA" +# Serial: 157768595616588414422159278966750757568 +# MD5 Fingerprint: a4:eb:b9:61:28:2e:b7:2f:98:b0:35:26:90:99:51:1d +# SHA1 Fingerprint: 0f:f9:40:76:18:d3:d7:6a:4b:98:f0:a8:35:9e:0c:fd:27:ac:cc:ed +# SHA256 Fingerprint: 6b:9c:08:e8:6e:b0:f7:67:cf:ad:65:cd:98:b6:21:49:e5:49:4a:67:f5:84:5e:7b:d1:ed:01:9f:27:b8:6b:d6 +-----BEGIN CERTIFICATE----- +MIIDtTCCAp2gAwIBAgIQdrEgUnTwhYdGs/gjGvbCwDANBgkqhkiG9w0BAQsFADBt +MQswCQYDVQQGEwJDSDEQMA4GA1UEChMHV0lTZUtleTEiMCAGA1UECxMZT0lTVEUg +Rm91bmRhdGlvbiBFbmRvcnNlZDEoMCYGA1UEAxMfT0lTVEUgV0lTZUtleSBHbG9i +YWwgUm9vdCBHQiBDQTAeFw0xNDEyMDExNTAwMzJaFw0zOTEyMDExNTEwMzFaMG0x +CzAJBgNVBAYTAkNIMRAwDgYDVQQKEwdXSVNlS2V5MSIwIAYDVQQLExlPSVNURSBG +b3VuZGF0aW9uIEVuZG9yc2VkMSgwJgYDVQQDEx9PSVNURSBXSVNlS2V5IEdsb2Jh +bCBSb290IEdCIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA2Be3 +HEokKtaXscriHvt9OO+Y9bI5mE4nuBFde9IllIiCFSZqGzG7qFshISvYD06fWvGx +WuR51jIjK+FTzJlFXHtPrby/h0oLS5daqPZI7H17Dc0hBt+eFf1Biki3IPShehtX +1F1Q/7pn2COZH8g/497/b1t3sWtuuMlk9+HKQUYOKXHQuSP8yYFfTvdv37+ErXNk +u7dCjmn21HYdfp2nuFeKUWdy19SouJVUQHMD9ur06/4oQnc/nSMbsrY9gBQHTC5P +99UKFg29ZkM3fiNDecNAhvVMKdqOmq0NpQSHiB6F4+lT1ZvIiwNjeOvgGUpuuy9r +M2RYk61pv48b74JIxwIDAQABo1EwTzALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/BAUw +AwEB/zAdBgNVHQ4EFgQUNQ/INmNe4qPs+TtmFc5RUuORmj0wEAYJKwYBBAGCNxUB +BAMCAQAwDQYJKoZIhvcNAQELBQADggEBAEBM+4eymYGQfp3FsLAmzYh7KzKNbrgh +cViXfa43FK8+5/ea4n32cZiZBKpDdHij40lhPnOMTZTg+XHEthYOU3gf1qKHLwI5 +gSk8rxWYITD+KJAAjNHhy/peyP34EEY7onhCkRd0VQreUGdNZtGn//3ZwLWoo4rO +ZvUPQ82nK1d7Y0Zqqi5S2PTt4W2tKZB4SLrhI6qjiey1q5bAtEuiHZeeevJuQHHf +aPFlTc58Bd9TZaml8LGXBHAVRgOY1NK/VLSgWH1Sb9pWJmLU2NuJMW8c8CLC02Ic +Nc1MaRVUGpCY3useX8p3x8uOPUNpnJpY0CQ73xtAln41rYHHTnG6iBM= +-----END CERTIFICATE----- + +# Issuer: CN=SZAFIR ROOT CA2 O=Krajowa Izba Rozliczeniowa S.A. +# Subject: CN=SZAFIR ROOT CA2 O=Krajowa Izba Rozliczeniowa S.A. +# Label: "SZAFIR ROOT CA2" +# Serial: 357043034767186914217277344587386743377558296292 +# MD5 Fingerprint: 11:64:c1:89:b0:24:b1:8c:b1:07:7e:89:9e:51:9e:99 +# SHA1 Fingerprint: e2:52:fa:95:3f:ed:db:24:60:bd:6e:28:f3:9c:cc:cf:5e:b3:3f:de +# SHA256 Fingerprint: a1:33:9d:33:28:1a:0b:56:e5:57:d3:d3:2b:1c:e7:f9:36:7e:b0:94:bd:5f:a7:2a:7e:50:04:c8:de:d7:ca:fe +-----BEGIN CERTIFICATE----- +MIIDcjCCAlqgAwIBAgIUPopdB+xV0jLVt+O2XwHrLdzk1uQwDQYJKoZIhvcNAQEL +BQAwUTELMAkGA1UEBhMCUEwxKDAmBgNVBAoMH0tyYWpvd2EgSXpiYSBSb3psaWN6 +ZW5pb3dhIFMuQS4xGDAWBgNVBAMMD1NaQUZJUiBST09UIENBMjAeFw0xNTEwMTkw +NzQzMzBaFw0zNTEwMTkwNzQzMzBaMFExCzAJBgNVBAYTAlBMMSgwJgYDVQQKDB9L +cmFqb3dhIEl6YmEgUm96bGljemVuaW93YSBTLkEuMRgwFgYDVQQDDA9TWkFGSVIg +Uk9PVCBDQTIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC3vD5QqEvN +QLXOYeeWyrSh2gwisPq1e3YAd4wLz32ohswmUeQgPYUM1ljj5/QqGJ3a0a4m7utT +3PSQ1hNKDJA8w/Ta0o4NkjrcsbH/ON7Dui1fgLkCvUqdGw+0w8LBZwPd3BucPbOw +3gAeqDRHu5rr/gsUvTaE2g0gv/pby6kWIK05YO4vdbbnl5z5Pv1+TW9NL++IDWr6 +3fE9biCloBK0TXC5ztdyO4mTp4CEHCdJckm1/zuVnsHMyAHs6A6KCpbns6aH5db5 +BSsNl0BwPLqsdVqc1U2dAgrSS5tmS0YHF2Wtn2yIANwiieDhZNRnvDF5YTy7ykHN +XGoAyDw4jlivAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQD +AgEGMB0GA1UdDgQWBBQuFqlKGLXLzPVvUPMjX/hd56zwyDANBgkqhkiG9w0BAQsF +AAOCAQEAtXP4A9xZWx126aMqe5Aosk3AM0+qmrHUuOQn/6mWmc5G4G18TKI4pAZw +8PRBEew/R40/cof5O/2kbytTAOD/OblqBw7rHRz2onKQy4I9EYKL0rufKq8h5mOG +nXkZ7/e7DDWQw4rtTw/1zBLZpD67oPwglV9PJi8RI4NOdQcPv5vRtB3pEAT+ymCP +oky4rc/hkA/NrgrHXXu3UNLUYfrVFdvXn4dRVOul4+vJhaAlIDf7js4MNIThPIGy +d05DpYhfhmehPea0XGG2Ptv+tyjFogeutcrKjSoS75ftwjCkySp6+/NNIxuZMzSg +LvWpCz/UXeHPhJ/iGcJfitYgHuNztw== +-----END CERTIFICATE----- + +# Issuer: CN=Certum Trusted Network CA 2 O=Unizeto Technologies S.A. OU=Certum Certification Authority +# Subject: CN=Certum Trusted Network CA 2 O=Unizeto Technologies S.A. OU=Certum Certification Authority +# Label: "Certum Trusted Network CA 2" +# Serial: 44979900017204383099463764357512596969 +# MD5 Fingerprint: 6d:46:9e:d9:25:6d:08:23:5b:5e:74:7d:1e:27:db:f2 +# SHA1 Fingerprint: d3:dd:48:3e:2b:bf:4c:05:e8:af:10:f5:fa:76:26:cf:d3:dc:30:92 +# SHA256 Fingerprint: b6:76:f2:ed:da:e8:77:5c:d3:6c:b0:f6:3c:d1:d4:60:39:61:f4:9e:62:65:ba:01:3a:2f:03:07:b6:d0:b8:04 +-----BEGIN CERTIFICATE----- +MIIF0jCCA7qgAwIBAgIQIdbQSk8lD8kyN/yqXhKN6TANBgkqhkiG9w0BAQ0FADCB +gDELMAkGA1UEBhMCUEwxIjAgBgNVBAoTGVVuaXpldG8gVGVjaG5vbG9naWVzIFMu +QS4xJzAlBgNVBAsTHkNlcnR1bSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTEkMCIG +A1UEAxMbQ2VydHVtIFRydXN0ZWQgTmV0d29yayBDQSAyMCIYDzIwMTExMDA2MDgz +OTU2WhgPMjA0NjEwMDYwODM5NTZaMIGAMQswCQYDVQQGEwJQTDEiMCAGA1UEChMZ +VW5pemV0byBUZWNobm9sb2dpZXMgUy5BLjEnMCUGA1UECxMeQ2VydHVtIENlcnRp +ZmljYXRpb24gQXV0aG9yaXR5MSQwIgYDVQQDExtDZXJ0dW0gVHJ1c3RlZCBOZXR3 +b3JrIENBIDIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC9+Xj45tWA +DGSdhhuWZGc/IjoedQF97/tcZ4zJzFxrqZHmuULlIEub2pt7uZld2ZuAS9eEQCsn +0+i6MLs+CRqnSZXvK0AkwpfHp+6bJe+oCgCXhVqqndwpyeI1B+twTUrWwbNWuKFB +OJvR+zF/j+Bf4bE/D44WSWDXBo0Y+aomEKsq09DRZ40bRr5HMNUuctHFY9rnY3lE +fktjJImGLjQ/KUxSiyqnwOKRKIm5wFv5HdnnJ63/mgKXwcZQkpsCLL2puTRZCr+E +Sv/f/rOf69me4Jgj7KZrdxYq28ytOxykh9xGc14ZYmhFV+SQgkK7QtbwYeDBoz1m +o130GO6IyY0XRSmZMnUCMe4pJshrAua1YkV/NxVaI2iJ1D7eTiew8EAMvE0Xy02i +sx7QBlrd9pPPV3WZ9fqGGmd4s7+W/jTcvedSVuWz5XV710GRBdxdaeOVDUO5/IOW +OZV7bIBaTxNyxtd9KXpEulKkKtVBRgkg/iKgtlswjbyJDNXXcPiHUv3a76xRLgez +Tv7QCdpw75j6VuZt27VXS9zlLCUVyJ4ueE742pyehizKV/Ma5ciSixqClnrDvFAS +adgOWkaLOusm+iPJtrCBvkIApPjW/jAux9JG9uWOdf3yzLnQh1vMBhBgu4M1t15n +3kfsmUjxpKEV/q2MYo45VU85FrmxY53/twIDAQABo0IwQDAPBgNVHRMBAf8EBTAD +AQH/MB0GA1UdDgQWBBS2oVQ5AsOgP46KvPrU+Bym0ToO/TAOBgNVHQ8BAf8EBAMC +AQYwDQYJKoZIhvcNAQENBQADggIBAHGlDs7k6b8/ONWJWsQCYftMxRQXLYtPU2sQ +F/xlhMcQSZDe28cmk4gmb3DWAl45oPePq5a1pRNcgRRtDoGCERuKTsZPpd1iHkTf +CVn0W3cLN+mLIMb4Ck4uWBzrM9DPhmDJ2vuAL55MYIR4PSFk1vtBHxgP58l1cb29 +XN40hz5BsA72udY/CROWFC/emh1auVbONTqwX3BNXuMp8SMoclm2q8KMZiYcdywm +djWLKKdpoPk79SPdhRB0yZADVpHnr7pH1BKXESLjokmUbOe3lEu6LaTaM4tMpkT/ +WjzGHWTYtTHkpjx6qFcL2+1hGsvxznN3Y6SHb0xRONbkX8eftoEq5IVIeVheO/jb +AoJnwTnbw3RLPTYe+SmTiGhbqEQZIfCn6IENLOiTNrQ3ssqwGyZ6miUfmpqAnksq +P/ujmv5zMnHCnsZy4YpoJ/HkD7TETKVhk/iXEAcqMCWpuchxuO9ozC1+9eB+D4Ko +b7a6bINDd82Kkhehnlt4Fj1F4jNy3eFmypnTycUm/Q1oBEauttmbjL4ZvrHG8hnj +XALKLNhvSgfZyTXaQHXyxKcZb55CEJh15pWLYLztxRLXis7VmFxWlgPF7ncGNf/P +5O4/E2Hu29othfDNrp2yGAlFw5Khchf8R7agCyzxxN5DaAhqXzvwdmP7zAYspsbi +DrW5viSP +-----END CERTIFICATE----- + +# Issuer: CN=Hellenic Academic and Research Institutions RootCA 2015 O=Hellenic Academic and Research Institutions Cert. Authority +# Subject: CN=Hellenic Academic and Research Institutions RootCA 2015 O=Hellenic Academic and Research Institutions Cert. Authority +# Label: "Hellenic Academic and Research Institutions RootCA 2015" +# Serial: 0 +# MD5 Fingerprint: ca:ff:e2:db:03:d9:cb:4b:e9:0f:ad:84:fd:7b:18:ce +# SHA1 Fingerprint: 01:0c:06:95:a6:98:19:14:ff:bf:5f:c6:b0:b6:95:ea:29:e9:12:a6 +# SHA256 Fingerprint: a0:40:92:9a:02:ce:53:b4:ac:f4:f2:ff:c6:98:1c:e4:49:6f:75:5e:6d:45:fe:0b:2a:69:2b:cd:52:52:3f:36 +-----BEGIN CERTIFICATE----- +MIIGCzCCA/OgAwIBAgIBADANBgkqhkiG9w0BAQsFADCBpjELMAkGA1UEBhMCR1Ix +DzANBgNVBAcTBkF0aGVuczFEMEIGA1UEChM7SGVsbGVuaWMgQWNhZGVtaWMgYW5k +IFJlc2VhcmNoIEluc3RpdHV0aW9ucyBDZXJ0LiBBdXRob3JpdHkxQDA+BgNVBAMT +N0hlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1dGlvbnMgUm9v +dENBIDIwMTUwHhcNMTUwNzA3MTAxMTIxWhcNNDAwNjMwMTAxMTIxWjCBpjELMAkG +A1UEBhMCR1IxDzANBgNVBAcTBkF0aGVuczFEMEIGA1UEChM7SGVsbGVuaWMgQWNh +ZGVtaWMgYW5kIFJlc2VhcmNoIEluc3RpdHV0aW9ucyBDZXJ0LiBBdXRob3JpdHkx +QDA+BgNVBAMTN0hlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1 +dGlvbnMgUm9vdENBIDIwMTUwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC +AQDC+Kk/G4n8PDwEXT2QNrCROnk8ZlrvbTkBSRq0t89/TSNTt5AA4xMqKKYx8ZEA +4yjsriFBzh/a/X0SWwGDD7mwX5nh8hKDgE0GPt+sr+ehiGsxr/CL0BgzuNtFajT0 +AoAkKAoCFZVedioNmToUW/bLy1O8E00BiDeUJRtCvCLYjqOWXjrZMts+6PAQZe10 +4S+nfK8nNLspfZu2zwnI5dMK/IhlZXQK3HMcXM1AsRzUtoSMTFDPaI6oWa7CJ06C +ojXdFPQf/7J31Ycvqm59JCfnxssm5uX+Zwdj2EUN3TpZZTlYepKZcj2chF6IIbjV +9Cz82XBST3i4vTwri5WY9bPRaM8gFH5MXF/ni+X1NYEZN9cRCLdmvtNKzoNXADrD +gfgXy5I2XdGj2HUb4Ysn6npIQf1FGQatJ5lOwXBH3bWfgVMS5bGMSF0xQxfjjMZ6 +Y5ZLKTBOhE5iGV48zpeQpX8B653g+IuJ3SWYPZK2fu/Z8VFRfS0myGlZYeCsargq +NhEEelC9MoS+L9xy1dcdFkfkR2YgP/SWxa+OAXqlD3pk9Q0Yh9muiNX6hME6wGko +LfINaFGq46V3xqSQDqE3izEjR8EJCOtu93ib14L8hCCZSRm2Ekax+0VVFqmjZayc +Bw/qa9wfLgZy7IaIEuQt218FL+TwA9MmM+eAws1CoRc0CwIDAQABo0IwQDAPBgNV +HRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUcRVnyMjJvXVd +ctA4GGqd83EkVAswDQYJKoZIhvcNAQELBQADggIBAHW7bVRLqhBYRjTyYtcWNl0I +XtVsyIe9tC5G8jH4fOpCtZMWVdyhDBKg2mF+D1hYc2Ryx+hFjtyp8iY/xnmMsVMI +M4GwVhO+5lFc2JsKT0ucVlMC6U/2DWDqTUJV6HwbISHTGzrMd/K4kPFox/la/vot +9L/J9UUbzjgQKjeKeaO04wlshYaT/4mWJ3iBj2fjRnRUjtkNaeJK9E10A/+yd+2V +Z5fkscWrv2oj6NSU4kQoYsRL4vDY4ilrGnB+JGGTe08DMiUNRSQrlrRGar9KC/ea +j8GsGsVn82800vpzY4zvFrCopEYq+OsS7HK07/grfoxSwIuEVPkvPuNVqNxmsdnh +X9izjFk0WaSrT2y7HxjbdavYy5LNlDhhDgcGH0tGEPEVvo2FXDtKK4F5D7Rpn0lQ +l033DlZdwJVqwjbDG2jJ9SrcR5q+ss7FJej6A7na+RZukYT1HCjI/CbM1xyQVqdf +bzoEvM14iQuODy+jqk+iGxI9FghAD/FGTNeqewjBCvVtJ94Cj8rDtSvK6evIIVM4 +pcw72Hc3MKJP2W/R8kCtQXoXxdZKNYm3QdV8hn9VTYNKpXMgwDqvkPGaJI7ZjnHK +e7iG2rKPmT4dEw0SEe7Uq/DpFXYC5ODfqiAeW2GFZECpkJcNrVPSWh2HagCXZWK0 +vm9qp/UsQu0yrbYhnr68 +-----END CERTIFICATE----- + +# Issuer: CN=Hellenic Academic and Research Institutions ECC RootCA 2015 O=Hellenic Academic and Research Institutions Cert. Authority +# Subject: CN=Hellenic Academic and Research Institutions ECC RootCA 2015 O=Hellenic Academic and Research Institutions Cert. Authority +# Label: "Hellenic Academic and Research Institutions ECC RootCA 2015" +# Serial: 0 +# MD5 Fingerprint: 81:e5:b4:17:eb:c2:f5:e1:4b:0d:41:7b:49:92:fe:ef +# SHA1 Fingerprint: 9f:f1:71:8d:92:d5:9a:f3:7d:74:97:b4:bc:6f:84:68:0b:ba:b6:66 +# SHA256 Fingerprint: 44:b5:45:aa:8a:25:e6:5a:73:ca:15:dc:27:fc:36:d2:4c:1c:b9:95:3a:06:65:39:b1:15:82:dc:48:7b:48:33 +-----BEGIN CERTIFICATE----- +MIICwzCCAkqgAwIBAgIBADAKBggqhkjOPQQDAjCBqjELMAkGA1UEBhMCR1IxDzAN +BgNVBAcTBkF0aGVuczFEMEIGA1UEChM7SGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJl +c2VhcmNoIEluc3RpdHV0aW9ucyBDZXJ0LiBBdXRob3JpdHkxRDBCBgNVBAMTO0hl +bGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1dGlvbnMgRUNDIFJv +b3RDQSAyMDE1MB4XDTE1MDcwNzEwMzcxMloXDTQwMDYzMDEwMzcxMlowgaoxCzAJ +BgNVBAYTAkdSMQ8wDQYDVQQHEwZBdGhlbnMxRDBCBgNVBAoTO0hlbGxlbmljIEFj +YWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1dGlvbnMgQ2VydC4gQXV0aG9yaXR5 +MUQwQgYDVQQDEztIZWxsZW5pYyBBY2FkZW1pYyBhbmQgUmVzZWFyY2ggSW5zdGl0 +dXRpb25zIEVDQyBSb290Q0EgMjAxNTB2MBAGByqGSM49AgEGBSuBBAAiA2IABJKg +QehLgoRc4vgxEZmGZE4JJS+dQS8KrjVPdJWyUWRrjWvmP3CV8AVER6ZyOFB2lQJa +jq4onvktTpnvLEhvTCUp6NFxW98dwXU3tNf6e3pCnGoKVlp8aQuqgAkkbH7BRqNC +MEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFLQi +C4KZJAEOnLvkDv2/+5cgk5kqMAoGCCqGSM49BAMCA2cAMGQCMGfOFmI4oqxiRaep +lSTAGiecMjvAwNW6qef4BENThe5SId6d9SWDPp5YSy/XZxMOIQIwBeF1Ad5o7Sof +TUwJCA3sS61kFyjndc5FZXIhF8siQQ6ME5g4mlRtm8rifOoCWCKR +-----END CERTIFICATE----- + +# Issuer: CN=ISRG Root X1 O=Internet Security Research Group +# Subject: CN=ISRG Root X1 O=Internet Security Research Group +# Label: "ISRG Root X1" +# Serial: 172886928669790476064670243504169061120 +# MD5 Fingerprint: 0c:d2:f9:e0:da:17:73:e9:ed:86:4d:a5:e3:70:e7:4e +# SHA1 Fingerprint: ca:bd:2a:79:a1:07:6a:31:f2:1d:25:36:35:cb:03:9d:43:29:a5:e8 +# SHA256 Fingerprint: 96:bc:ec:06:26:49:76:f3:74:60:77:9a:cf:28:c5:a7:cf:e8:a3:c0:aa:e1:1a:8f:fc:ee:05:c0:bd:df:08:c6 +-----BEGIN CERTIFICATE----- +MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAw +TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh +cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4 +WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJu +ZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBY +MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54rVygc +h77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+ +0TM8ukj13Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6U +A5/TR5d8mUgjU+g4rk8Kb4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sW +T8KOEUt+zwvo/7V3LvSye0rgTBIlDHCNAymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyH +B5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ4Q7e2RCOFvu396j3x+UC +B5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf1b0SHzUv +KBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWn +OlFuhjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTn +jh8BCNAw1FtxNrQHusEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbw +qHyGO0aoSCqI3Haadr8faqU9GY/rOPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CI +rU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV +HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY9umbbjANBgkq +hkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL +ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ +3BebYhtF8GaV0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KK +NFtY2PwByVS5uCbMiogziUwthDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5 +ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJwTdwJx4nLCgdNbOhdjsnvzqvHu7Ur +TkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nxe5AW0wdeRlN8NwdC +jNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZAJzVc +oyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq +4RgqsahDYVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPA +mRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57d +emyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc= +-----END CERTIFICATE----- + +# Issuer: O=FNMT-RCM OU=AC RAIZ FNMT-RCM +# Subject: O=FNMT-RCM OU=AC RAIZ FNMT-RCM +# Label: "AC RAIZ FNMT-RCM" +# Serial: 485876308206448804701554682760554759 +# MD5 Fingerprint: e2:09:04:b4:d3:bd:d1:a0:14:fd:1a:d2:47:c4:57:1d +# SHA1 Fingerprint: ec:50:35:07:b2:15:c4:95:62:19:e2:a8:9a:5b:42:99:2c:4c:2c:20 +# SHA256 Fingerprint: eb:c5:57:0c:29:01:8c:4d:67:b1:aa:12:7b:af:12:f7:03:b4:61:1e:bc:17:b7:da:b5:57:38:94:17:9b:93:fa +-----BEGIN CERTIFICATE----- +MIIFgzCCA2ugAwIBAgIPXZONMGc2yAYdGsdUhGkHMA0GCSqGSIb3DQEBCwUAMDsx +CzAJBgNVBAYTAkVTMREwDwYDVQQKDAhGTk1ULVJDTTEZMBcGA1UECwwQQUMgUkFJ +WiBGTk1ULVJDTTAeFw0wODEwMjkxNTU5NTZaFw0zMDAxMDEwMDAwMDBaMDsxCzAJ +BgNVBAYTAkVTMREwDwYDVQQKDAhGTk1ULVJDTTEZMBcGA1UECwwQQUMgUkFJWiBG +Tk1ULVJDTTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBALpxgHpMhm5/ +yBNtwMZ9HACXjywMI7sQmkCpGreHiPibVmr75nuOi5KOpyVdWRHbNi63URcfqQgf +BBckWKo3Shjf5TnUV/3XwSyRAZHiItQDwFj8d0fsjz50Q7qsNI1NOHZnjrDIbzAz +WHFctPVrbtQBULgTfmxKo0nRIBnuvMApGGWn3v7v3QqQIecaZ5JCEJhfTzC8PhxF +tBDXaEAUwED653cXeuYLj2VbPNmaUtu1vZ5Gzz3rkQUCwJaydkxNEJY7kvqcfw+Z +374jNUUeAlz+taibmSXaXvMiwzn15Cou08YfxGyqxRxqAQVKL9LFwag0Jl1mpdIC +IfkYtwb1TplvqKtMUejPUBjFd8g5CSxJkjKZqLsXF3mwWsXmo8RZZUc1g16p6DUL +mbvkzSDGm0oGObVo/CK67lWMK07q87Hj/LaZmtVC+nFNCM+HHmpxffnTtOmlcYF7 +wk5HlqX2doWjKI/pgG6BU6VtX7hI+cL5NqYuSf+4lsKMB7ObiFj86xsc3i1w4peS +MKGJ47xVqCfWS+2QrYv6YyVZLag13cqXM7zlzced0ezvXg5KkAYmY6252TUtB7p2 +ZSysV4999AeU14ECll2jB0nVetBX+RvnU0Z1qrB5QstocQjpYL05ac70r8NWQMet +UqIJ5G+GR4of6ygnXYMgrwTJbFaai0b1AgMBAAGjgYMwgYAwDwYDVR0TAQH/BAUw +AwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFPd9xf3E6Jobd2Sn9R2gzL+H +YJptMD4GA1UdIAQ3MDUwMwYEVR0gADArMCkGCCsGAQUFBwIBFh1odHRwOi8vd3d3 +LmNlcnQuZm5tdC5lcy9kcGNzLzANBgkqhkiG9w0BAQsFAAOCAgEAB5BK3/MjTvDD +nFFlm5wioooMhfNzKWtN/gHiqQxjAb8EZ6WdmF/9ARP67Jpi6Yb+tmLSbkyU+8B1 +RXxlDPiyN8+sD8+Nb/kZ94/sHvJwnvDKuO+3/3Y3dlv2bojzr2IyIpMNOmqOFGYM +LVN0V2Ue1bLdI4E7pWYjJ2cJj+F3qkPNZVEI7VFY/uY5+ctHhKQV8Xa7pO6kO8Rf +77IzlhEYt8llvhjho6Tc+hj507wTmzl6NLrTQfv6MooqtyuGC2mDOL7Nii4LcK2N +JpLuHvUBKwrZ1pebbuCoGRw6IYsMHkCtA+fdZn71uSANA+iW+YJF1DngoABd15jm +fZ5nc8OaKveri6E6FO80vFIOiZiaBECEHX5FaZNXzuvO+FB8TxxuBEOb+dY7Ixjp +6o7RTUaN8Tvkasq6+yO3m/qZASlaWFot4/nUbQ4mrcFuNLwy+AwF+mWj2zs3gyLp +1txyM/1d8iC9djwj2ij3+RvrWWTV3F9yfiD8zYm1kGdNYno/Tq0dwzn+evQoFt9B +9kiABdcPUXmsEKvU7ANm5mqwujGSQkBqvjrTcuFqN1W8rB2Vt2lh8kORdOag0wok +RqEIr9baRRmW1FMdW4R58MD3R++Lj8UGrp1MYp3/RgT408m2ECVAdf4WqslKYIYv +uu8wd+RU4riEmViAqhOLUTpPSPaLtrM= +-----END CERTIFICATE----- + +# Issuer: CN=Amazon Root CA 1 O=Amazon +# Subject: CN=Amazon Root CA 1 O=Amazon +# Label: "Amazon Root CA 1" +# Serial: 143266978916655856878034712317230054538369994 +# MD5 Fingerprint: 43:c6:bf:ae:ec:fe:ad:2f:18:c6:88:68:30:fc:c8:e6 +# SHA1 Fingerprint: 8d:a7:f9:65:ec:5e:fc:37:91:0f:1c:6e:59:fd:c1:cc:6a:6e:de:16 +# SHA256 Fingerprint: 8e:cd:e6:88:4f:3d:87:b1:12:5b:a3:1a:c3:fc:b1:3d:70:16:de:7f:57:cc:90:4f:e1:cb:97:c6:ae:98:19:6e +-----BEGIN CERTIFICATE----- +MIIDQTCCAimgAwIBAgITBmyfz5m/jAo54vB4ikPmljZbyjANBgkqhkiG9w0BAQsF +ADA5MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6 +b24gUm9vdCBDQSAxMB4XDTE1MDUyNjAwMDAwMFoXDTM4MDExNzAwMDAwMFowOTEL +MAkGA1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJv +b3QgQ0EgMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALJ4gHHKeNXj +ca9HgFB0fW7Y14h29Jlo91ghYPl0hAEvrAIthtOgQ3pOsqTQNroBvo3bSMgHFzZM +9O6II8c+6zf1tRn4SWiw3te5djgdYZ6k/oI2peVKVuRF4fn9tBb6dNqcmzU5L/qw +IFAGbHrQgLKm+a/sRxmPUDgH3KKHOVj4utWp+UhnMJbulHheb4mjUcAwhmahRWa6 +VOujw5H5SNz/0egwLX0tdHA114gk957EWW67c4cX8jJGKLhD+rcdqsq08p8kDi1L +93FcXmn/6pUCyziKrlA4b9v7LWIbxcceVOF34GfID5yHI9Y/QCB/IIDEgEw+OyQm +jgSubJrIqg0CAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC +AYYwHQYDVR0OBBYEFIQYzIU07LwMlJQuCFmcx7IQTgoIMA0GCSqGSIb3DQEBCwUA +A4IBAQCY8jdaQZChGsV2USggNiMOruYou6r4lK5IpDB/G/wkjUu0yKGX9rbxenDI +U5PMCCjjmCXPI6T53iHTfIUJrU6adTrCC2qJeHZERxhlbI1Bjjt/msv0tadQ1wUs +N+gDS63pYaACbvXy8MWy7Vu33PqUXHeeE6V/Uq2V8viTO96LXFvKWlJbYK8U90vv +o/ufQJVtMVT8QtPHRh8jrdkPSHCa2XV4cdFyQzR1bldZwgJcJmApzyMZFo6IQ6XU +5MsI+yMRQ+hDKXJioaldXgjUkK642M4UwtBV8ob2xJNDd2ZhwLnoQdeXeGADbkpy +rqXRfboQnoZsG4q5WTP468SQvvG5 +-----END CERTIFICATE----- + +# Issuer: CN=Amazon Root CA 2 O=Amazon +# Subject: CN=Amazon Root CA 2 O=Amazon +# Label: "Amazon Root CA 2" +# Serial: 143266982885963551818349160658925006970653239 +# MD5 Fingerprint: c8:e5:8d:ce:a8:42:e2:7a:c0:2a:5c:7c:9e:26:bf:66 +# SHA1 Fingerprint: 5a:8c:ef:45:d7:a6:98:59:76:7a:8c:8b:44:96:b5:78:cf:47:4b:1a +# SHA256 Fingerprint: 1b:a5:b2:aa:8c:65:40:1a:82:96:01:18:f8:0b:ec:4f:62:30:4d:83:ce:c4:71:3a:19:c3:9c:01:1e:a4:6d:b4 +-----BEGIN CERTIFICATE----- +MIIFQTCCAymgAwIBAgITBmyf0pY1hp8KD+WGePhbJruKNzANBgkqhkiG9w0BAQwF +ADA5MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6 +b24gUm9vdCBDQSAyMB4XDTE1MDUyNjAwMDAwMFoXDTQwMDUyNjAwMDAwMFowOTEL +MAkGA1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJv +b3QgQ0EgMjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK2Wny2cSkxK +gXlRmeyKy2tgURO8TW0G/LAIjd0ZEGrHJgw12MBvIITplLGbhQPDW9tK6Mj4kHbZ +W0/jTOgGNk3Mmqw9DJArktQGGWCsN0R5hYGCrVo34A3MnaZMUnbqQ523BNFQ9lXg +1dKmSYXpN+nKfq5clU1Imj+uIFptiJXZNLhSGkOQsL9sBbm2eLfq0OQ6PBJTYv9K +8nu+NQWpEjTj82R0Yiw9AElaKP4yRLuH3WUnAnE72kr3H9rN9yFVkE8P7K6C4Z9r +2UXTu/Bfh+08LDmG2j/e7HJV63mjrdvdfLC6HM783k81ds8P+HgfajZRRidhW+me +z/CiVX18JYpvL7TFz4QuK/0NURBs+18bvBt+xa47mAExkv8LV/SasrlX6avvDXbR +8O70zoan4G7ptGmh32n2M8ZpLpcTnqWHsFcQgTfJU7O7f/aS0ZzQGPSSbtqDT6Zj +mUyl+17vIWR6IF9sZIUVyzfpYgwLKhbcAS4y2j5L9Z469hdAlO+ekQiG+r5jqFoz +7Mt0Q5X5bGlSNscpb/xVA1wf+5+9R+vnSUeVC06JIglJ4PVhHvG/LopyboBZ/1c6 ++XUyo05f7O0oYtlNc/LMgRdg7c3r3NunysV+Ar3yVAhU/bQtCSwXVEqY0VThUWcI +0u1ufm8/0i2BWSlmy5A5lREedCf+3euvAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMB +Af8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQWBBSwDPBMMPQFWAJI/TPlUq9LhONm +UjANBgkqhkiG9w0BAQwFAAOCAgEAqqiAjw54o+Ci1M3m9Zh6O+oAA7CXDpO8Wqj2 +LIxyh6mx/H9z/WNxeKWHWc8w4Q0QshNabYL1auaAn6AFC2jkR2vHat+2/XcycuUY ++gn0oJMsXdKMdYV2ZZAMA3m3MSNjrXiDCYZohMr/+c8mmpJ5581LxedhpxfL86kS +k5Nrp+gvU5LEYFiwzAJRGFuFjWJZY7attN6a+yb3ACfAXVU3dJnJUH/jWS5E4ywl +7uxMMne0nxrpS10gxdr9HIcWxkPo1LsmmkVwXqkLN1PiRnsn/eBG8om3zEK2yygm +btmlyTrIQRNg91CMFa6ybRoVGld45pIq2WWQgj9sAq+uEjonljYE1x2igGOpm/Hl +urR8FLBOybEfdF849lHqm/osohHUqS0nGkWxr7JOcQ3AWEbWaQbLU8uz/mtBzUF+ +fUwPfHJ5elnNXkoOrJupmHN5fLT0zLm4BwyydFy4x2+IoZCn9Kr5v2c69BoVYh63 +n749sSmvZ6ES8lgQGVMDMBu4Gon2nL2XA46jCfMdiyHxtN/kHNGfZQIG6lzWE7OE +76KlXIx3KadowGuuQNKotOrN8I1LOJwZmhsoVLiJkO/KdYE+HvJkJMcYr07/R54H +9jVlpNMKVv/1F2Rs76giJUmTtt8AF9pYfl3uxRuw0dFfIRDH+fO6AgonB8Xx1sfT +4PsJYGw= +-----END CERTIFICATE----- + +# Issuer: CN=Amazon Root CA 3 O=Amazon +# Subject: CN=Amazon Root CA 3 O=Amazon +# Label: "Amazon Root CA 3" +# Serial: 143266986699090766294700635381230934788665930 +# MD5 Fingerprint: a0:d4:ef:0b:f7:b5:d8:49:95:2a:ec:f5:c4:fc:81:87 +# SHA1 Fingerprint: 0d:44:dd:8c:3c:8c:1a:1a:58:75:64:81:e9:0f:2e:2a:ff:b3:d2:6e +# SHA256 Fingerprint: 18:ce:6c:fe:7b:f1:4e:60:b2:e3:47:b8:df:e8:68:cb:31:d0:2e:bb:3a:da:27:15:69:f5:03:43:b4:6d:b3:a4 +-----BEGIN CERTIFICATE----- +MIIBtjCCAVugAwIBAgITBmyf1XSXNmY/Owua2eiedgPySjAKBggqhkjOPQQDAjA5 +MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6b24g +Um9vdCBDQSAzMB4XDTE1MDUyNjAwMDAwMFoXDTQwMDUyNjAwMDAwMFowOTELMAkG +A1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJvb3Qg +Q0EgMzBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABCmXp8ZBf8ANm+gBG1bG8lKl +ui2yEujSLtf6ycXYqm0fc4E7O5hrOXwzpcVOho6AF2hiRVd9RFgdszflZwjrZt6j +QjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQWBBSr +ttvXBp43rDCGB5Fwx5zEGbF4wDAKBggqhkjOPQQDAgNJADBGAiEA4IWSoxe3jfkr +BqWTrBqYaGFy+uGh0PsceGCmQ5nFuMQCIQCcAu/xlJyzlvnrxir4tiz+OpAUFteM +YyRIHN8wfdVoOw== +-----END CERTIFICATE----- + +# Issuer: CN=Amazon Root CA 4 O=Amazon +# Subject: CN=Amazon Root CA 4 O=Amazon +# Label: "Amazon Root CA 4" +# Serial: 143266989758080763974105200630763877849284878 +# MD5 Fingerprint: 89:bc:27:d5:eb:17:8d:06:6a:69:d5:fd:89:47:b4:cd +# SHA1 Fingerprint: f6:10:84:07:d6:f8:bb:67:98:0c:c2:e2:44:c2:eb:ae:1c:ef:63:be +# SHA256 Fingerprint: e3:5d:28:41:9e:d0:20:25:cf:a6:90:38:cd:62:39:62:45:8d:a5:c6:95:fb:de:a3:c2:2b:0b:fb:25:89:70:92 +-----BEGIN CERTIFICATE----- +MIIB8jCCAXigAwIBAgITBmyf18G7EEwpQ+Vxe3ssyBrBDjAKBggqhkjOPQQDAzA5 +MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6b24g +Um9vdCBDQSA0MB4XDTE1MDUyNjAwMDAwMFoXDTQwMDUyNjAwMDAwMFowOTELMAkG +A1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJvb3Qg +Q0EgNDB2MBAGByqGSM49AgEGBSuBBAAiA2IABNKrijdPo1MN/sGKe0uoe0ZLY7Bi +9i0b2whxIdIA6GO9mif78DluXeo9pcmBqqNbIJhFXRbb/egQbeOc4OO9X4Ri83Bk +M6DLJC9wuoihKqB1+IGuYgbEgds5bimwHvouXKNCMEAwDwYDVR0TAQH/BAUwAwEB +/zAOBgNVHQ8BAf8EBAMCAYYwHQYDVR0OBBYEFNPsxzplbszh2naaVvuc84ZtV+WB +MAoGCCqGSM49BAMDA2gAMGUCMDqLIfG9fhGt0O9Yli/W651+kI0rz2ZVwyzjKKlw +CkcO8DdZEv8tmZQoTipPNU0zWgIxAOp1AE47xDqUEpHJWEadIRNyp4iciuRMStuW +1KyLa2tJElMzrdfkviT8tQp21KW8EA== +-----END CERTIFICATE----- + +# Issuer: CN=TUBITAK Kamu SM SSL Kok Sertifikasi - Surum 1 O=Turkiye Bilimsel ve Teknolojik Arastirma Kurumu - TUBITAK OU=Kamu Sertifikasyon Merkezi - Kamu SM +# Subject: CN=TUBITAK Kamu SM SSL Kok Sertifikasi - Surum 1 O=Turkiye Bilimsel ve Teknolojik Arastirma Kurumu - TUBITAK OU=Kamu Sertifikasyon Merkezi - Kamu SM +# Label: "TUBITAK Kamu SM SSL Kok Sertifikasi - Surum 1" +# Serial: 1 +# MD5 Fingerprint: dc:00:81:dc:69:2f:3e:2f:b0:3b:f6:3d:5a:91:8e:49 +# SHA1 Fingerprint: 31:43:64:9b:ec:ce:27:ec:ed:3a:3f:0b:8f:0d:e4:e8:91:dd:ee:ca +# SHA256 Fingerprint: 46:ed:c3:68:90:46:d5:3a:45:3f:b3:10:4a:b8:0d:ca:ec:65:8b:26:60:ea:16:29:dd:7e:86:79:90:64:87:16 +-----BEGIN CERTIFICATE----- +MIIEYzCCA0ugAwIBAgIBATANBgkqhkiG9w0BAQsFADCB0jELMAkGA1UEBhMCVFIx +GDAWBgNVBAcTD0dlYnplIC0gS29jYWVsaTFCMEAGA1UEChM5VHVya2l5ZSBCaWxp +bXNlbCB2ZSBUZWtub2xvamlrIEFyYXN0aXJtYSBLdXJ1bXUgLSBUVUJJVEFLMS0w +KwYDVQQLEyRLYW11IFNlcnRpZmlrYXN5b24gTWVya2V6aSAtIEthbXUgU00xNjA0 +BgNVBAMTLVRVQklUQUsgS2FtdSBTTSBTU0wgS29rIFNlcnRpZmlrYXNpIC0gU3Vy +dW0gMTAeFw0xMzExMjUwODI1NTVaFw00MzEwMjUwODI1NTVaMIHSMQswCQYDVQQG +EwJUUjEYMBYGA1UEBxMPR2ViemUgLSBLb2NhZWxpMUIwQAYDVQQKEzlUdXJraXll +IEJpbGltc2VsIHZlIFRla25vbG9qaWsgQXJhc3Rpcm1hIEt1cnVtdSAtIFRVQklU +QUsxLTArBgNVBAsTJEthbXUgU2VydGlmaWthc3lvbiBNZXJrZXppIC0gS2FtdSBT +TTE2MDQGA1UEAxMtVFVCSVRBSyBLYW11IFNNIFNTTCBLb2sgU2VydGlmaWthc2kg +LSBTdXJ1bSAxMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAr3UwM6q7 +a9OZLBI3hNmNe5eA027n/5tQlT6QlVZC1xl8JoSNkvoBHToP4mQ4t4y86Ij5iySr +LqP1N+RAjhgleYN1Hzv/bKjFxlb4tO2KRKOrbEz8HdDc72i9z+SqzvBV96I01INr +N3wcwv61A+xXzry0tcXtAA9TNypN9E8Mg/uGz8v+jE69h/mniyFXnHrfA2eJLJ2X +YacQuFWQfw4tJzh03+f92k4S400VIgLI4OD8D62K18lUUMw7D8oWgITQUVbDjlZ/ +iSIzL+aFCr2lqBs23tPcLG07xxO9WSMs5uWk99gL7eqQQESolbuT1dCANLZGeA4f +AJNG4e7p+exPFwIDAQABo0IwQDAdBgNVHQ4EFgQUZT/HiobGPN08VFw1+DrtUgxH +V8gwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEL +BQADggEBACo/4fEyjq7hmFxLXs9rHmoJ0iKpEsdeV31zVmSAhHqT5Am5EM2fKifh +AHe+SMg1qIGf5LgsyX8OsNJLN13qudULXjS99HMpw+0mFZx+CFOKWI3QSyjfwbPf +IPP54+M638yclNhOT8NrF7f3cuitZjO1JVOr4PhMqZ398g26rrnZqsZr+ZO7rqu4 +lzwDGrpDxpa5RXI4s6ehlj2Re37AIVNMh+3yC1SVUZPVIqUNivGTDj5UDrDYyU7c +8jEyVupk+eq1nRZmQnLzf9OxMUP8pI4X8W0jq5Rm+K37DwhuJi1/FwcJsoz7UMCf +lo3Ptv0AnVoUmr8CRPXBwp8iXqIPoeM= +-----END CERTIFICATE----- + +# Issuer: CN=GDCA TrustAUTH R5 ROOT O=GUANG DONG CERTIFICATE AUTHORITY CO.,LTD. +# Subject: CN=GDCA TrustAUTH R5 ROOT O=GUANG DONG CERTIFICATE AUTHORITY CO.,LTD. +# Label: "GDCA TrustAUTH R5 ROOT" +# Serial: 9009899650740120186 +# MD5 Fingerprint: 63:cc:d9:3d:34:35:5c:6f:53:a3:e2:08:70:48:1f:b4 +# SHA1 Fingerprint: 0f:36:38:5b:81:1a:25:c3:9b:31:4e:83:ca:e9:34:66:70:cc:74:b4 +# SHA256 Fingerprint: bf:ff:8f:d0:44:33:48:7d:6a:8a:a6:0c:1a:29:76:7a:9f:c2:bb:b0:5e:42:0f:71:3a:13:b9:92:89:1d:38:93 +-----BEGIN CERTIFICATE----- +MIIFiDCCA3CgAwIBAgIIfQmX/vBH6nowDQYJKoZIhvcNAQELBQAwYjELMAkGA1UE +BhMCQ04xMjAwBgNVBAoMKUdVQU5HIERPTkcgQ0VSVElGSUNBVEUgQVVUSE9SSVRZ +IENPLixMVEQuMR8wHQYDVQQDDBZHRENBIFRydXN0QVVUSCBSNSBST09UMB4XDTE0 +MTEyNjA1MTMxNVoXDTQwMTIzMTE1NTk1OVowYjELMAkGA1UEBhMCQ04xMjAwBgNV +BAoMKUdVQU5HIERPTkcgQ0VSVElGSUNBVEUgQVVUSE9SSVRZIENPLixMVEQuMR8w +HQYDVQQDDBZHRENBIFRydXN0QVVUSCBSNSBST09UMIICIjANBgkqhkiG9w0BAQEF +AAOCAg8AMIICCgKCAgEA2aMW8Mh0dHeb7zMNOwZ+Vfy1YI92hhJCfVZmPoiC7XJj +Dp6L3TQsAlFRwxn9WVSEyfFrs0yw6ehGXTjGoqcuEVe6ghWinI9tsJlKCvLriXBj +TnnEt1u9ol2x8kECK62pOqPseQrsXzrj/e+APK00mxqriCZ7VqKChh/rNYmDf1+u +KU49tm7srsHwJ5uu4/Ts765/94Y9cnrrpftZTqfrlYwiOXnhLQiPzLyRuEH3FMEj +qcOtmkVEs7LXLM3GKeJQEK5cy4KOFxg2fZfmiJqwTTQJ9Cy5WmYqsBebnh52nUpm +MUHfP/vFBu8btn4aRjb3ZGM74zkYI+dndRTVdVeSN72+ahsmUPI2JgaQxXABZG12 +ZuGR224HwGGALrIuL4xwp9E7PLOR5G62xDtw8mySlwnNR30YwPO7ng/Wi64HtloP +zgsMR6flPri9fcebNaBhlzpBdRfMK5Z3KpIhHtmVdiBnaM8Nvd/WHwlqmuLMc3Gk +L30SgLdTMEZeS1SZD2fJpcjyIMGC7J0R38IC+xo70e0gmu9lZJIQDSri3nDxGGeC +jGHeuLzRL5z7D9Ar7Rt2ueQ5Vfj4oR24qoAATILnsn8JuLwwoC8N9VKejveSswoA +HQBUlwbgsQfZxw9cZX08bVlX5O2ljelAU58VS6Bx9hoh49pwBiFYFIeFd3mqgnkC +AwEAAaNCMEAwHQYDVR0OBBYEFOLJQJ9NzuiaoXzPDj9lxSmIahlRMA8GA1UdEwEB +/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3DQEBCwUAA4ICAQDRSVfg +p8xoWLoBDysZzY2wYUWsEe1jUGn4H3++Fo/9nesLqjJHdtJnJO29fDMylyrHBYZm +DRd9FBUb1Ov9H5r2XpdptxolpAqzkT9fNqyL7FeoPueBihhXOYV0GkLH6VsTX4/5 +COmSdI31R9KrO9b7eGZONn356ZLpBN79SWP8bfsUcZNnL0dKt7n/HipzcEYwv1ry +L3ml4Y0M2fmyYzeMN2WFcGpcWwlyua1jPLHd+PwyvzeG5LuOmCd+uh8W4XAR8gPf +JWIyJyYYMoSf/wA6E7qaTfRPuBRwIrHKK5DOKcFw9C+df/KQHtZa37dG/OaG+svg +IHZ6uqbL9XzeYqWxi+7egmaKTjowHz+Ay60nugxe19CxVsp3cbK1daFQqUBDF8Io +2c9Si1vIY9RCPqAzekYu9wogRlR+ak8x8YF+QnQ4ZXMn7sZ8uI7XpTrXmKGcjBBV +09tL7ECQ8s1uV9JiDnxXk7Gnbc2dg7sq5+W2O3FYrf3RRbxake5TFW/TRQl1brqQ +XR4EzzffHqhmsYzmIGrv/EhOdJhCrylvLmrH+33RZjEizIYAfmaDDEL0vTSSwxrq +T8p+ck0LcIymSLumoRT2+1hEmRSuqguTaaApJUqlyyvdimYHFngVV3Eb7PVHhPOe +MTd61X8kreS8/f3MboPoDKi3QWwH3b08hpcv0g== +-----END CERTIFICATE----- + +# Issuer: CN=SSL.com Root Certification Authority RSA O=SSL Corporation +# Subject: CN=SSL.com Root Certification Authority RSA O=SSL Corporation +# Label: "SSL.com Root Certification Authority RSA" +# Serial: 8875640296558310041 +# MD5 Fingerprint: 86:69:12:c0:70:f1:ec:ac:ac:c2:d5:bc:a5:5b:a1:29 +# SHA1 Fingerprint: b7:ab:33:08:d1:ea:44:77:ba:14:80:12:5a:6f:bd:a9:36:49:0c:bb +# SHA256 Fingerprint: 85:66:6a:56:2e:e0:be:5c:e9:25:c1:d8:89:0a:6f:76:a8:7e:c1:6d:4d:7d:5f:29:ea:74:19:cf:20:12:3b:69 +-----BEGIN CERTIFICATE----- +MIIF3TCCA8WgAwIBAgIIeyyb0xaAMpkwDQYJKoZIhvcNAQELBQAwfDELMAkGA1UE +BhMCVVMxDjAMBgNVBAgMBVRleGFzMRAwDgYDVQQHDAdIb3VzdG9uMRgwFgYDVQQK +DA9TU0wgQ29ycG9yYXRpb24xMTAvBgNVBAMMKFNTTC5jb20gUm9vdCBDZXJ0aWZp +Y2F0aW9uIEF1dGhvcml0eSBSU0EwHhcNMTYwMjEyMTczOTM5WhcNNDEwMjEyMTcz +OTM5WjB8MQswCQYDVQQGEwJVUzEOMAwGA1UECAwFVGV4YXMxEDAOBgNVBAcMB0hv +dXN0b24xGDAWBgNVBAoMD1NTTCBDb3Jwb3JhdGlvbjExMC8GA1UEAwwoU1NMLmNv +bSBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IFJTQTCCAiIwDQYJKoZIhvcN +AQEBBQADggIPADCCAgoCggIBAPkP3aMrfcvQKv7sZ4Wm5y4bunfh4/WvpOz6Sl2R +xFdHaxh3a3by/ZPkPQ/CFp4LZsNWlJ4Xg4XOVu/yFv0AYvUiCVToZRdOQbngT0aX +qhvIuG5iXmmxX9sqAn78bMrzQdjt0Oj8P2FI7bADFB0QDksZ4LtO7IZl/zbzXmcC +C52GVWH9ejjt/uIZALdvoVBidXQ8oPrIJZK0bnoix/geoeOy3ZExqysdBP+lSgQ3 +6YWkMyv94tZVNHwZpEpox7Ko07fKoZOI68GXvIz5HdkihCR0xwQ9aqkpk8zruFvh +/l8lqjRYyMEjVJ0bmBHDOJx+PYZspQ9AhnwC9FwCTyjLrnGfDzrIM/4RJTXq/LrF +YD3ZfBjVsqnTdXgDciLKOsMf7yzlLqn6niy2UUb9rwPW6mBo6oUWNmuF6R7As93E +JNyAKoFBbZQ+yODJgUEAnl6/f8UImKIYLEJAs/lvOCdLToD0PYFH4Ih86hzOtXVc +US4cK38acijnALXRdMbX5J+tB5O2UzU1/Dfkw/ZdFr4hc96SCvigY2q8lpJqPvi8 +ZVWb3vUNiSYE/CUapiVpy8JtynziWV+XrOvvLsi81xtZPCvM8hnIk2snYxnP/Okm ++Mpxm3+T/jRnhE6Z6/yzeAkzcLpmpnbtG3PrGqUNxCITIJRWCk4sbE6x/c+cCbqi +M+2HAgMBAAGjYzBhMB0GA1UdDgQWBBTdBAkHovV6fVJTEpKV7jiAJQ2mWTAPBgNV +HRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFN0ECQei9Xp9UlMSkpXuOIAlDaZZMA4G +A1UdDwEB/wQEAwIBhjANBgkqhkiG9w0BAQsFAAOCAgEAIBgRlCn7Jp0cHh5wYfGV +cpNxJK1ok1iOMq8bs3AD/CUrdIWQPXhq9LmLpZc7tRiRux6n+UBbkflVma8eEdBc +Hadm47GUBwwyOabqG7B52B2ccETjit3E+ZUfijhDPwGFpUenPUayvOUiaPd7nNgs +PgohyC0zrL/FgZkxdMF1ccW+sfAjRfSda/wZY52jvATGGAslu1OJD7OAUN5F7kR/ +q5R4ZJjT9ijdh9hwZXT7DrkT66cPYakylszeu+1jTBi7qUD3oFRuIIhxdRjqerQ0 +cuAjJ3dctpDqhiVAq+8zD8ufgr6iIPv2tS0a5sKFsXQP+8hlAqRSAUfdSSLBv9jr +a6x+3uxjMxW3IwiPxg+NQVrdjsW5j+VFP3jbutIbQLH+cU0/4IGiul607BXgk90I +H37hVZkLId6Tngr75qNJvTYw/ud3sqB1l7UtgYgXZSD32pAAn8lSzDLKNXz1PQ/Y +K9f1JmzJBjSWFupwWRoyeXkLtoh/D1JIPb9s2KJELtFOt3JY04kTlf5Eq/jXixtu +nLwsoFvVagCvXzfh1foQC5ichucmj87w7G6KVwuA406ywKBjYZC6VWg3dGq2ktuf +oYYitmUnDuy2n0Jg5GfCtdpBC8TTi2EbvPofkSvXRAdeuims2cXp71NIWuuA8ShY +Ic2wBlX7Jz9TkHCpBB5XJ7k= +-----END CERTIFICATE----- + +# Issuer: CN=SSL.com Root Certification Authority ECC O=SSL Corporation +# Subject: CN=SSL.com Root Certification Authority ECC O=SSL Corporation +# Label: "SSL.com Root Certification Authority ECC" +# Serial: 8495723813297216424 +# MD5 Fingerprint: 2e:da:e4:39:7f:9c:8f:37:d1:70:9f:26:17:51:3a:8e +# SHA1 Fingerprint: c3:19:7c:39:24:e6:54:af:1b:c4:ab:20:95:7a:e2:c3:0e:13:02:6a +# SHA256 Fingerprint: 34:17:bb:06:cc:60:07:da:1b:96:1c:92:0b:8a:b4:ce:3f:ad:82:0e:4a:a3:0b:9a:cb:c4:a7:4e:bd:ce:bc:65 +-----BEGIN CERTIFICATE----- +MIICjTCCAhSgAwIBAgIIdebfy8FoW6gwCgYIKoZIzj0EAwIwfDELMAkGA1UEBhMC +VVMxDjAMBgNVBAgMBVRleGFzMRAwDgYDVQQHDAdIb3VzdG9uMRgwFgYDVQQKDA9T +U0wgQ29ycG9yYXRpb24xMTAvBgNVBAMMKFNTTC5jb20gUm9vdCBDZXJ0aWZpY2F0 +aW9uIEF1dGhvcml0eSBFQ0MwHhcNMTYwMjEyMTgxNDAzWhcNNDEwMjEyMTgxNDAz +WjB8MQswCQYDVQQGEwJVUzEOMAwGA1UECAwFVGV4YXMxEDAOBgNVBAcMB0hvdXN0 +b24xGDAWBgNVBAoMD1NTTCBDb3Jwb3JhdGlvbjExMC8GA1UEAwwoU1NMLmNvbSBS +b290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IEVDQzB2MBAGByqGSM49AgEGBSuB +BAAiA2IABEVuqVDEpiM2nl8ojRfLliJkP9x6jh3MCLOicSS6jkm5BBtHllirLZXI +7Z4INcgn64mMU1jrYor+8FsPazFSY0E7ic3s7LaNGdM0B9y7xgZ/wkWV7Mt/qCPg +CemB+vNH06NjMGEwHQYDVR0OBBYEFILRhXMw5zUE044CkvvlpNHEIejNMA8GA1Ud +EwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUgtGFczDnNQTTjgKS++Wk0cQh6M0wDgYD +VR0PAQH/BAQDAgGGMAoGCCqGSM49BAMCA2cAMGQCMG/n61kRpGDPYbCWe+0F+S8T +kdzt5fxQaxFGRrMcIQBiu77D5+jNB5n5DQtdcj7EqgIwH7y6C+IwJPt8bYBVCpk+ +gA0z5Wajs6O7pdWLjwkspl1+4vAHCGht0nxpbl/f5Wpl +-----END CERTIFICATE----- + +# Issuer: CN=SSL.com EV Root Certification Authority RSA R2 O=SSL Corporation +# Subject: CN=SSL.com EV Root Certification Authority RSA R2 O=SSL Corporation +# Label: "SSL.com EV Root Certification Authority RSA R2" +# Serial: 6248227494352943350 +# MD5 Fingerprint: e1:1e:31:58:1a:ae:54:53:02:f6:17:6a:11:7b:4d:95 +# SHA1 Fingerprint: 74:3a:f0:52:9b:d0:32:a0:f4:4a:83:cd:d4:ba:a9:7b:7c:2e:c4:9a +# SHA256 Fingerprint: 2e:7b:f1:6c:c2:24:85:a7:bb:e2:aa:86:96:75:07:61:b0:ae:39:be:3b:2f:e9:d0:cc:6d:4e:f7:34:91:42:5c +-----BEGIN CERTIFICATE----- +MIIF6zCCA9OgAwIBAgIIVrYpzTS8ePYwDQYJKoZIhvcNAQELBQAwgYIxCzAJBgNV +BAYTAlVTMQ4wDAYDVQQIDAVUZXhhczEQMA4GA1UEBwwHSG91c3RvbjEYMBYGA1UE +CgwPU1NMIENvcnBvcmF0aW9uMTcwNQYDVQQDDC5TU0wuY29tIEVWIFJvb3QgQ2Vy +dGlmaWNhdGlvbiBBdXRob3JpdHkgUlNBIFIyMB4XDTE3MDUzMTE4MTQzN1oXDTQy +MDUzMDE4MTQzN1owgYIxCzAJBgNVBAYTAlVTMQ4wDAYDVQQIDAVUZXhhczEQMA4G +A1UEBwwHSG91c3RvbjEYMBYGA1UECgwPU1NMIENvcnBvcmF0aW9uMTcwNQYDVQQD +DC5TU0wuY29tIEVWIFJvb3QgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgUlNBIFIy +MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAjzZlQOHWTcDXtOlG2mvq +M0fNTPl9fb69LT3w23jhhqXZuglXaO1XPqDQCEGD5yhBJB/jchXQARr7XnAjssuf +OePPxU7Gkm0mxnu7s9onnQqG6YE3Bf7wcXHswxzpY6IXFJ3vG2fThVUCAtZJycxa +4bH3bzKfydQ7iEGonL3Lq9ttewkfokxykNorCPzPPFTOZw+oz12WGQvE43LrrdF9 +HSfvkusQv1vrO6/PgN3B0pYEW3p+pKk8OHakYo6gOV7qd89dAFmPZiw+B6KjBSYR +aZfqhbcPlgtLyEDhULouisv3D5oi53+aNxPN8k0TayHRwMwi8qFG9kRpnMphNQcA +b9ZhCBHqurj26bNg5U257J8UZslXWNvNh2n4ioYSA0e/ZhN2rHd9NCSFg83XqpyQ +Gp8hLH94t2S42Oim9HizVcuE0jLEeK6jj2HdzghTreyI/BXkmg3mnxp3zkyPuBQV +PWKchjgGAGYS5Fl2WlPAApiiECtoRHuOec4zSnaqW4EWG7WK2NAAe15itAnWhmMO +pgWVSbooi4iTsjQc2KRVbrcc0N6ZVTsj9CLg+SlmJuwgUHfbSguPvuUCYHBBXtSu +UDkiFCbLsjtzdFVHB3mBOagwE0TlBIqulhMlQg+5U8Sb/M3kHN48+qvWBkofZ6aY +MBzdLNvcGJVXZsb/XItW9XcCAwEAAaNjMGEwDwYDVR0TAQH/BAUwAwEB/zAfBgNV +HSMEGDAWgBT5YLvU49U09rj1BoAlp3PbRmmonjAdBgNVHQ4EFgQU+WC71OPVNPa4 +9QaAJadz20ZpqJ4wDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3DQEBCwUAA4ICAQBW +s47LCp1Jjr+kxJG7ZhcFUZh1++VQLHqe8RT6q9OKPv+RKY9ji9i0qVQBDb6Thi/5 +Sm3HXvVX+cpVHBK+Rw82xd9qt9t1wkclf7nxY/hoLVUE0fKNsKTPvDxeH3jnpaAg +cLAExbf3cqfeIg29MyVGjGSSJuM+LmOW2puMPfgYCdcDzH2GguDKBAdRUNf/ktUM +79qGn5nX67evaOI5JpS6aLe/g9Pqemc9YmeuJeVy6OLk7K4S9ksrPJ/psEDzOFSz +/bdoyNrGj1E8svuR3Bznm53htw1yj+KkxKl4+esUrMZDBcJlOSgYAsOCsp0FvmXt +ll9ldDz7CTUue5wT/RsPXcdtgTpWD8w74a8CLyKsRspGPKAcTNZEtF4uXBVmCeEm +Kf7GUmG6sXP/wwyc5WxqlD8UykAWlYTzWamsX0xhk23RO8yilQwipmdnRC652dKK +QbNmC1r7fSOl8hqw/96bg5Qu0T/fkreRrwU7ZcegbLHNYhLDkBvjJc40vG93drEQ +w/cFGsDWr3RiSBd3kmmQYRzelYB0VI8YHMPzA9C/pEN1hlMYegouCRw2n5H9gooi +S9EOUCXdywMMF8mDAAhONU2Ki+3wApRmLER/y5UnlhetCTCstnEXbosX9hwJ1C07 +mKVx01QT2WDz9UtmT/rx7iASjbSsV7FFY6GsdqnC+w== +-----END CERTIFICATE----- + +# Issuer: CN=SSL.com EV Root Certification Authority ECC O=SSL Corporation +# Subject: CN=SSL.com EV Root Certification Authority ECC O=SSL Corporation +# Label: "SSL.com EV Root Certification Authority ECC" +# Serial: 3182246526754555285 +# MD5 Fingerprint: 59:53:22:65:83:42:01:54:c0:ce:42:b9:5a:7c:f2:90 +# SHA1 Fingerprint: 4c:dd:51:a3:d1:f5:20:32:14:b0:c6:c5:32:23:03:91:c7:46:42:6d +# SHA256 Fingerprint: 22:a2:c1:f7:bd:ed:70:4c:c1:e7:01:b5:f4:08:c3:10:88:0f:e9:56:b5:de:2a:4a:44:f9:9c:87:3a:25:a7:c8 +-----BEGIN CERTIFICATE----- +MIIClDCCAhqgAwIBAgIILCmcWxbtBZUwCgYIKoZIzj0EAwIwfzELMAkGA1UEBhMC +VVMxDjAMBgNVBAgMBVRleGFzMRAwDgYDVQQHDAdIb3VzdG9uMRgwFgYDVQQKDA9T +U0wgQ29ycG9yYXRpb24xNDAyBgNVBAMMK1NTTC5jb20gRVYgUm9vdCBDZXJ0aWZp +Y2F0aW9uIEF1dGhvcml0eSBFQ0MwHhcNMTYwMjEyMTgxNTIzWhcNNDEwMjEyMTgx +NTIzWjB/MQswCQYDVQQGEwJVUzEOMAwGA1UECAwFVGV4YXMxEDAOBgNVBAcMB0hv +dXN0b24xGDAWBgNVBAoMD1NTTCBDb3Jwb3JhdGlvbjE0MDIGA1UEAwwrU1NMLmNv +bSBFViBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IEVDQzB2MBAGByqGSM49 +AgEGBSuBBAAiA2IABKoSR5CYG/vvw0AHgyBO8TCCogbR8pKGYfL2IWjKAMTH6kMA +VIbc/R/fALhBYlzccBYy3h+Z1MzFB8gIH2EWB1E9fVwHU+M1OIzfzZ/ZLg1Kthku +WnBaBu2+8KGwytAJKaNjMGEwHQYDVR0OBBYEFFvKXuXe0oGqzagtZFG22XKbl+ZP +MA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUW8pe5d7SgarNqC1kUbbZcpuX +5k8wDgYDVR0PAQH/BAQDAgGGMAoGCCqGSM49BAMCA2gAMGUCMQCK5kCJN+vp1RPZ +ytRrJPOwPYdGWBrssd9v+1a6cGvHOMzosYxPD/fxZ3YOg9AeUY8CMD32IygmTMZg +h5Mmm7I1HrrW9zzRHM76JTymGoEVW/MSD2zuZYrJh6j5B+BimoxcSg== +-----END CERTIFICATE----- + +# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R6 +# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R6 +# Label: "GlobalSign Root CA - R6" +# Serial: 1417766617973444989252670301619537 +# MD5 Fingerprint: 4f:dd:07:e4:d4:22:64:39:1e:0c:37:42:ea:d1:c6:ae +# SHA1 Fingerprint: 80:94:64:0e:b5:a7:a1:ca:11:9c:1f:dd:d5:9f:81:02:63:a7:fb:d1 +# SHA256 Fingerprint: 2c:ab:ea:fe:37:d0:6c:a2:2a:ba:73:91:c0:03:3d:25:98:29:52:c4:53:64:73:49:76:3a:3a:b5:ad:6c:cf:69 +-----BEGIN CERTIFICATE----- +MIIFgzCCA2ugAwIBAgIORea7A4Mzw4VlSOb/RVEwDQYJKoZIhvcNAQEMBQAwTDEg +MB4GA1UECxMXR2xvYmFsU2lnbiBSb290IENBIC0gUjYxEzARBgNVBAoTCkdsb2Jh +bFNpZ24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMTQxMjEwMDAwMDAwWhcNMzQx +MjEwMDAwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxTaWduIFJvb3QgQ0EgLSBSNjET +MBEGA1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2lnbjCCAiIwDQYJ +KoZIhvcNAQEBBQADggIPADCCAgoCggIBAJUH6HPKZvnsFMp7PPcNCPG0RQssgrRI +xutbPK6DuEGSMxSkb3/pKszGsIhrxbaJ0cay/xTOURQh7ErdG1rG1ofuTToVBu1k +ZguSgMpE3nOUTvOniX9PeGMIyBJQbUJmL025eShNUhqKGoC3GYEOfsSKvGRMIRxD +aNc9PIrFsmbVkJq3MQbFvuJtMgamHvm566qjuL++gmNQ0PAYid/kD3n16qIfKtJw +LnvnvJO7bVPiSHyMEAc4/2ayd2F+4OqMPKq0pPbzlUoSB239jLKJz9CgYXfIWHSw +1CM69106yqLbnQneXUQtkPGBzVeS+n68UARjNN9rkxi+azayOeSsJDa38O+2HBNX +k7besvjihbdzorg1qkXy4J02oW9UivFyVm4uiMVRQkQVlO6jxTiWm05OWgtH8wY2 +SXcwvHE35absIQh1/OZhFj931dmRl4QKbNQCTXTAFO39OfuD8l4UoQSwC+n+7o/h +bguyCLNhZglqsQY6ZZZZwPA1/cnaKI0aEYdwgQqomnUdnjqGBQCe24DWJfncBZ4n +WUx2OVvq+aWh2IMP0f/fMBH5hc8zSPXKbWQULHpYT9NLCEnFlWQaYw55PfWzjMpY +rZxCRXluDocZXFSxZba/jJvcE+kNb7gu3GduyYsRtYQUigAZcIN5kZeR1Bonvzce +MgfYFGM8KEyvAgMBAAGjYzBhMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTAD +AQH/MB0GA1UdDgQWBBSubAWjkxPioufi1xzWx/B/yGdToDAfBgNVHSMEGDAWgBSu +bAWjkxPioufi1xzWx/B/yGdToDANBgkqhkiG9w0BAQwFAAOCAgEAgyXt6NH9lVLN +nsAEoJFp5lzQhN7craJP6Ed41mWYqVuoPId8AorRbrcWc+ZfwFSY1XS+wc3iEZGt +Ixg93eFyRJa0lV7Ae46ZeBZDE1ZXs6KzO7V33EByrKPrmzU+sQghoefEQzd5Mr61 +55wsTLxDKZmOMNOsIeDjHfrYBzN2VAAiKrlNIC5waNrlU/yDXNOd8v9EDERm8tLj +vUYAGm0CuiVdjaExUd1URhxN25mW7xocBFymFe944Hn+Xds+qkxV/ZoVqW/hpvvf +cDDpw+5CRu3CkwWJ+n1jez/QcYF8AOiYrg54NMMl+68KnyBr3TsTjxKM4kEaSHpz +oHdpx7Zcf4LIHv5YGygrqGytXm3ABdJ7t+uA/iU3/gKbaKxCXcPu9czc8FB10jZp +nOZ7BN9uBmm23goJSFmH63sUYHpkqmlD75HHTOwY3WzvUy2MmeFe8nI+z1TIvWfs +pA9MRf/TuTAjB0yPEL+GltmZWrSZVxykzLsViVO6LAUP5MSeGbEYNNVMnbrt9x+v +JJUEeKgDu+6B5dpffItKoZB0JaezPkvILFa9x8jvOOJckvB595yEunQtYQEgfn7R +8k8HWV+LLUNS60YMlOH1Zkd5d9VUWx+tJDfLRVpOoERIyNiwmcUVhAn21klJwGW4 +5hpxbqCo8YLoRT5s1gLXCmeDBVrJpBA= +-----END CERTIFICATE----- + +# Issuer: CN=OISTE WISeKey Global Root GC CA O=WISeKey OU=OISTE Foundation Endorsed +# Subject: CN=OISTE WISeKey Global Root GC CA O=WISeKey OU=OISTE Foundation Endorsed +# Label: "OISTE WISeKey Global Root GC CA" +# Serial: 44084345621038548146064804565436152554 +# MD5 Fingerprint: a9:d6:b9:2d:2f:93:64:f8:a5:69:ca:91:e9:68:07:23 +# SHA1 Fingerprint: e0:11:84:5e:34:de:be:88:81:b9:9c:f6:16:26:d1:96:1f:c3:b9:31 +# SHA256 Fingerprint: 85:60:f9:1c:36:24:da:ba:95:70:b5:fe:a0:db:e3:6f:f1:1a:83:23:be:94:86:85:4f:b3:f3:4a:55:71:19:8d +-----BEGIN CERTIFICATE----- +MIICaTCCAe+gAwIBAgIQISpWDK7aDKtARb8roi066jAKBggqhkjOPQQDAzBtMQsw +CQYDVQQGEwJDSDEQMA4GA1UEChMHV0lTZUtleTEiMCAGA1UECxMZT0lTVEUgRm91 +bmRhdGlvbiBFbmRvcnNlZDEoMCYGA1UEAxMfT0lTVEUgV0lTZUtleSBHbG9iYWwg +Um9vdCBHQyBDQTAeFw0xNzA1MDkwOTQ4MzRaFw00MjA1MDkwOTU4MzNaMG0xCzAJ +BgNVBAYTAkNIMRAwDgYDVQQKEwdXSVNlS2V5MSIwIAYDVQQLExlPSVNURSBGb3Vu +ZGF0aW9uIEVuZG9yc2VkMSgwJgYDVQQDEx9PSVNURSBXSVNlS2V5IEdsb2JhbCBS +b290IEdDIENBMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAETOlQwMYPchi82PG6s4ni +eUqjFqdrVCTbUf/q9Akkwwsin8tqJ4KBDdLArzHkdIJuyiXZjHWd8dvQmqJLIX4W +p2OQ0jnUsYd4XxiWD1AbNTcPasbc2RNNpI6QN+a9WzGRo1QwUjAOBgNVHQ8BAf8E +BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUSIcUrOPDnpBgOtfKie7T +rYy0UGYwEAYJKwYBBAGCNxUBBAMCAQAwCgYIKoZIzj0EAwMDaAAwZQIwJsdpW9zV +57LnyAyMjMPdeYwbY9XJUpROTYJKcx6ygISpJcBMWm1JKWB4E+J+SOtkAjEA2zQg +Mgj/mkkCtojeFK9dbJlxjRo/i9fgojaGHAeCOnZT/cKi7e97sIBPWA9LUzm9 +-----END CERTIFICATE----- + +# Issuer: CN=UCA Global G2 Root O=UniTrust +# Subject: CN=UCA Global G2 Root O=UniTrust +# Label: "UCA Global G2 Root" +# Serial: 124779693093741543919145257850076631279 +# MD5 Fingerprint: 80:fe:f0:c4:4a:f0:5c:62:32:9f:1c:ba:78:a9:50:f8 +# SHA1 Fingerprint: 28:f9:78:16:19:7a:ff:18:25:18:aa:44:fe:c1:a0:ce:5c:b6:4c:8a +# SHA256 Fingerprint: 9b:ea:11:c9:76:fe:01:47:64:c1:be:56:a6:f9:14:b5:a5:60:31:7a:bd:99:88:39:33:82:e5:16:1a:a0:49:3c +-----BEGIN CERTIFICATE----- +MIIFRjCCAy6gAwIBAgIQXd+x2lqj7V2+WmUgZQOQ7zANBgkqhkiG9w0BAQsFADA9 +MQswCQYDVQQGEwJDTjERMA8GA1UECgwIVW5pVHJ1c3QxGzAZBgNVBAMMElVDQSBH +bG9iYWwgRzIgUm9vdDAeFw0xNjAzMTEwMDAwMDBaFw00MDEyMzEwMDAwMDBaMD0x +CzAJBgNVBAYTAkNOMREwDwYDVQQKDAhVbmlUcnVzdDEbMBkGA1UEAwwSVUNBIEds +b2JhbCBHMiBSb290MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAxeYr +b3zvJgUno4Ek2m/LAfmZmqkywiKHYUGRO8vDaBsGxUypK8FnFyIdK+35KYmToni9 +kmugow2ifsqTs6bRjDXVdfkX9s9FxeV67HeToI8jrg4aA3++1NDtLnurRiNb/yzm +VHqUwCoV8MmNsHo7JOHXaOIxPAYzRrZUEaalLyJUKlgNAQLx+hVRZ2zA+te2G3/R +VogvGjqNO7uCEeBHANBSh6v7hn4PJGtAnTRnvI3HLYZveT6OqTwXS3+wmeOwcWDc +C/Vkw85DvG1xudLeJ1uK6NjGruFZfc8oLTW4lVYa8bJYS7cSN8h8s+1LgOGN+jIj +tm+3SJUIsUROhYw6AlQgL9+/V087OpAh18EmNVQg7Mc/R+zvWr9LesGtOxdQXGLY +D0tK3Cv6brxzks3sx1DoQZbXqX5t2Okdj4q1uViSukqSKwxW/YDrCPBeKW4bHAyv +j5OJrdu9o54hyokZ7N+1wxrrFv54NkzWbtA+FxyQF2smuvt6L78RHBgOLXMDj6Dl +NaBa4kx1HXHhOThTeEDMg5PXCp6dW4+K5OXgSORIskfNTip1KnvyIvbJvgmRlld6 +iIis7nCs+dwp4wwcOxJORNanTrAmyPPZGpeRaOrvjUYG0lZFWJo8DA+DuAUlwznP +O6Q0ibd5Ei9Hxeepl2n8pndntd978XplFeRhVmUCAwEAAaNCMEAwDgYDVR0PAQH/ +BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFIHEjMz15DD/pQwIX4wV +ZyF0Ad/fMA0GCSqGSIb3DQEBCwUAA4ICAQATZSL1jiutROTL/7lo5sOASD0Ee/oj +L3rtNtqyzm325p7lX1iPyzcyochltq44PTUbPrw7tgTQvPlJ9Zv3hcU2tsu8+Mg5 +1eRfB70VVJd0ysrtT7q6ZHafgbiERUlMjW+i67HM0cOU2kTC5uLqGOiiHycFutfl +1qnN3e92mI0ADs0b+gO3joBYDic/UvuUospeZcnWhNq5NXHzJsBPd+aBJ9J3O5oU +b3n09tDh05S60FdRvScFDcH9yBIw7m+NESsIndTUv4BFFJqIRNow6rSn4+7vW4LV +PtateJLbXDzz2K36uGt/xDYotgIVilQsnLAXc47QN6MUPJiVAAwpBVueSUmxX8fj +y88nZY41F7dXyDDZQVu5FLbowg+UMaeUmMxq67XhJ/UQqAHojhJi6IjMtX9Gl8Cb +EGY4GjZGXyJoPd/JxhMnq1MGrKI8hgZlb7F+sSlEmqO6SWkoaY/X5V+tBIZkbxqg +DMUIYs6Ao9Dz7GjevjPHF1t/gMRMTLGmhIrDO7gJzRSBuhjjVFc2/tsvfEehOjPI ++Vg7RE+xygKJBJYoaMVLuCaJu9YzL1DV/pqJuhgyklTGW+Cd+V7lDSKb9triyCGy +YiGqhkCyLmTTX8jjfhFnRR8F/uOi77Oos/N9j/gMHyIfLXC0uAE0djAA5SN4p1bX +UB+K+wb1whnw0A== +-----END CERTIFICATE----- + +# Issuer: CN=UCA Extended Validation Root O=UniTrust +# Subject: CN=UCA Extended Validation Root O=UniTrust +# Label: "UCA Extended Validation Root" +# Serial: 106100277556486529736699587978573607008 +# MD5 Fingerprint: a1:f3:5f:43:c6:34:9b:da:bf:8c:7e:05:53:ad:96:e2 +# SHA1 Fingerprint: a3:a1:b0:6f:24:61:23:4a:e3:36:a5:c2:37:fc:a6:ff:dd:f0:d7:3a +# SHA256 Fingerprint: d4:3a:f9:b3:54:73:75:5c:96:84:fc:06:d7:d8:cb:70:ee:5c:28:e7:73:fb:29:4e:b4:1e:e7:17:22:92:4d:24 +-----BEGIN CERTIFICATE----- +MIIFWjCCA0KgAwIBAgIQT9Irj/VkyDOeTzRYZiNwYDANBgkqhkiG9w0BAQsFADBH +MQswCQYDVQQGEwJDTjERMA8GA1UECgwIVW5pVHJ1c3QxJTAjBgNVBAMMHFVDQSBF +eHRlbmRlZCBWYWxpZGF0aW9uIFJvb3QwHhcNMTUwMzEzMDAwMDAwWhcNMzgxMjMx +MDAwMDAwWjBHMQswCQYDVQQGEwJDTjERMA8GA1UECgwIVW5pVHJ1c3QxJTAjBgNV +BAMMHFVDQSBFeHRlbmRlZCBWYWxpZGF0aW9uIFJvb3QwggIiMA0GCSqGSIb3DQEB +AQUAA4ICDwAwggIKAoICAQCpCQcoEwKwmeBkqh5DFnpzsZGgdT6o+uM4AHrsiWog +D4vFsJszA1qGxliG1cGFu0/GnEBNyr7uaZa4rYEwmnySBesFK5pI0Lh2PpbIILvS +sPGP2KxFRv+qZ2C0d35qHzwaUnoEPQc8hQ2E0B92CvdqFN9y4zR8V05WAT558aop +O2z6+I9tTcg1367r3CTueUWnhbYFiN6IXSV8l2RnCdm/WhUFhvMJHuxYMjMR83dk +sHYf5BA1FxvyDrFspCqjc/wJHx4yGVMR59mzLC52LqGj3n5qiAno8geK+LLNEOfi +c0CTuwjRP+H8C5SzJe98ptfRr5//lpr1kXuYC3fUfugH0mK1lTnj8/FtDw5lhIpj +VMWAtuCeS31HJqcBCF3RiJ7XwzJE+oJKCmhUfzhTA8ykADNkUVkLo4KRel7sFsLz +KuZi2irbWWIQJUoqgQtHB0MGcIfS+pMRKXpITeuUx3BNr2fVUbGAIAEBtHoIppB/ +TuDvB0GHr2qlXov7z1CymlSvw4m6WC31MJixNnI5fkkE/SmnTHnkBVfblLkWU41G +sx2VYVdWf6/wFlthWG82UBEL2KwrlRYaDh8IzTY0ZRBiZtWAXxQgXy0MoHgKaNYs +1+lvK9JKBZP8nm9rZ/+I8U6laUpSNwXqxhaN0sSZ0YIrO7o1dfdRUVjzyAfd5LQD +fwIDAQABo0IwQDAdBgNVHQ4EFgQU2XQ65DA9DfcS3H5aBZ8eNJr34RQwDwYDVR0T +AQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAYYwDQYJKoZIhvcNAQELBQADggIBADaN +l8xCFWQpN5smLNb7rhVpLGsaGvdftvkHTFnq88nIua7Mui563MD1sC3AO6+fcAUR +ap8lTwEpcOPlDOHqWnzcSbvBHiqB9RZLcpHIojG5qtr8nR/zXUACE/xOHAbKsxSQ +VBcZEhrxH9cMaVr2cXj0lH2RC47skFSOvG+hTKv8dGT9cZr4QQehzZHkPJrgmzI5 +c6sq1WnIeJEmMX3ixzDx/BR4dxIOE/TdFpS/S2d7cFOFyrC78zhNLJA5wA3CXWvp +4uXViI3WLL+rG761KIcSF3Ru/H38j9CHJrAb+7lsq+KePRXBOy5nAliRn+/4Qh8s +t2j1da3Ptfb/EX3C8CSlrdP6oDyp+l3cpaDvRKS+1ujl5BOWF3sGPjLtx7dCvHaj +2GU4Kzg1USEODm8uNBNA4StnDG1KQTAYI1oyVZnJF+A83vbsea0rWBmirSwiGpWO +vpaQXUJXxPkUAzUrHC1RVwinOt4/5Mi0A3PCwSaAuwtCH60NryZy2sy+s6ODWA2C +xR9GUeOcGMyNm43sSet1UNWMKFnKdDTajAshqx7qG+XH/RU+wBeq+yNuJkbL+vmx +cmtpzyKEC2IPrNkZAJSidjzULZrtBJ4tBmIQN1IchXIbJ+XMxjHsN+xjWZsLHXbM +fjKaiJUINlK73nZfdklJrX+9ZSCyycErdhh2n1ax +-----END CERTIFICATE----- + +# Issuer: CN=Certigna Root CA O=Dhimyotis OU=0002 48146308100036 +# Subject: CN=Certigna Root CA O=Dhimyotis OU=0002 48146308100036 +# Label: "Certigna Root CA" +# Serial: 269714418870597844693661054334862075617 +# MD5 Fingerprint: 0e:5c:30:62:27:eb:5b:bc:d7:ae:62:ba:e9:d5:df:77 +# SHA1 Fingerprint: 2d:0d:52:14:ff:9e:ad:99:24:01:74:20:47:6e:6c:85:27:27:f5:43 +# SHA256 Fingerprint: d4:8d:3d:23:ee:db:50:a4:59:e5:51:97:60:1c:27:77:4b:9d:7b:18:c9:4d:5a:05:95:11:a1:02:50:b9:31:68 +-----BEGIN CERTIFICATE----- +MIIGWzCCBEOgAwIBAgIRAMrpG4nxVQMNo+ZBbcTjpuEwDQYJKoZIhvcNAQELBQAw +WjELMAkGA1UEBhMCRlIxEjAQBgNVBAoMCURoaW15b3RpczEcMBoGA1UECwwTMDAw +MiA0ODE0NjMwODEwMDAzNjEZMBcGA1UEAwwQQ2VydGlnbmEgUm9vdCBDQTAeFw0x +MzEwMDEwODMyMjdaFw0zMzEwMDEwODMyMjdaMFoxCzAJBgNVBAYTAkZSMRIwEAYD +VQQKDAlEaGlteW90aXMxHDAaBgNVBAsMEzAwMDIgNDgxNDYzMDgxMDAwMzYxGTAX +BgNVBAMMEENlcnRpZ25hIFJvb3QgQ0EwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAw +ggIKAoICAQDNGDllGlmx6mQWDoyUJJV8g9PFOSbcDO8WV43X2KyjQn+Cyu3NW9sO +ty3tRQgXstmzy9YXUnIo245Onoq2C/mehJpNdt4iKVzSs9IGPjA5qXSjklYcoW9M +CiBtnyN6tMbaLOQdLNyzKNAT8kxOAkmhVECe5uUFoC2EyP+YbNDrihqECB63aCPu +I9Vwzm1RaRDuoXrC0SIxwoKF0vJVdlB8JXrJhFwLrN1CTivngqIkicuQstDuI7pm +TLtipPlTWmR7fJj6o0ieD5Wupxj0auwuA0Wv8HT4Ks16XdG+RCYyKfHx9WzMfgIh +C59vpD++nVPiz32pLHxYGpfhPTc3GGYo0kDFUYqMwy3OU4gkWGQwFsWq4NYKpkDf +ePb1BHxpE4S80dGnBs8B92jAqFe7OmGtBIyT46388NtEbVncSVmurJqZNjBBe3Yz +IoejwpKGbvlw7q6Hh5UbxHq9MfPU0uWZ/75I7HX1eBYdpnDBfzwboZL7z8g81sWT +Co/1VTp2lc5ZmIoJlXcymoO6LAQ6l73UL77XbJuiyn1tJslV1c/DeVIICZkHJC1k +JWumIWmbat10TWuXekG9qxf5kBdIjzb5LdXF2+6qhUVB+s06RbFo5jZMm5BX7CO5 +hwjCxAnxl4YqKE3idMDaxIzb3+KhF1nOJFl0Mdp//TBt2dzhauH8XwIDAQABo4IB +GjCCARYwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYE +FBiHVuBud+4kNTxOc5of1uHieX4rMB8GA1UdIwQYMBaAFBiHVuBud+4kNTxOc5of +1uHieX4rMEQGA1UdIAQ9MDswOQYEVR0gADAxMC8GCCsGAQUFBwIBFiNodHRwczov +L3d3d3cuY2VydGlnbmEuZnIvYXV0b3JpdGVzLzBtBgNVHR8EZjBkMC+gLaArhilo +dHRwOi8vY3JsLmNlcnRpZ25hLmZyL2NlcnRpZ25hcm9vdGNhLmNybDAxoC+gLYYr +aHR0cDovL2NybC5kaGlteW90aXMuY29tL2NlcnRpZ25hcm9vdGNhLmNybDANBgkq +hkiG9w0BAQsFAAOCAgEAlLieT/DjlQgi581oQfccVdV8AOItOoldaDgvUSILSo3L +6btdPrtcPbEo/uRTVRPPoZAbAh1fZkYJMyjhDSSXcNMQH+pkV5a7XdrnxIxPTGRG +HVyH41neQtGbqH6mid2PHMkwgu07nM3A6RngatgCdTer9zQoKJHyBApPNeNgJgH6 +0BGM+RFq7q89w1DTj18zeTyGqHNFkIwgtnJzFyO+B2XleJINugHA64wcZr+shncB +lA2c5uk5jR+mUYyZDDl34bSb+hxnV29qao6pK0xXeXpXIs/NX2NGjVxZOob4Mkdi +o2cNGJHc+6Zr9UhhcyNZjgKnvETq9Emd8VRY+WCv2hikLyhF3HqgiIZd8zvn/yk1 +gPxkQ5Tm4xxvvq0OKmOZK8l+hfZx6AYDlf7ej0gcWtSS6Cvu5zHbugRqh5jnxV/v +faci9wHYTfmJ0A6aBVmknpjZbyvKcL5kwlWj9Omvw5Ip3IgWJJk8jSaYtlu3zM63 +Nwf9JtmYhST/WSMDmu2dnajkXjjO11INb9I/bbEFa0nOipFGc/T2L/Coc3cOZayh +jWZSaX5LaAzHHjcng6WMxwLkFM1JAbBzs/3GkDpv0mztO+7skb6iQ12LAEpmJURw +3kAP+HwV96LOPNdeE4yBFxgX0b3xdxA61GU5wSesVywlVP+i2k+KYTlerj1KjL0= +-----END CERTIFICATE----- + +# Issuer: CN=emSign Root CA - G1 O=eMudhra Technologies Limited OU=emSign PKI +# Subject: CN=emSign Root CA - G1 O=eMudhra Technologies Limited OU=emSign PKI +# Label: "emSign Root CA - G1" +# Serial: 235931866688319308814040 +# MD5 Fingerprint: 9c:42:84:57:dd:cb:0b:a7:2e:95:ad:b6:f3:da:bc:ac +# SHA1 Fingerprint: 8a:c7:ad:8f:73:ac:4e:c1:b5:75:4d:a5:40:f4:fc:cf:7c:b5:8e:8c +# SHA256 Fingerprint: 40:f6:af:03:46:a9:9a:a1:cd:1d:55:5a:4e:9c:ce:62:c7:f9:63:46:03:ee:40:66:15:83:3d:c8:c8:d0:03:67 +-----BEGIN CERTIFICATE----- +MIIDlDCCAnygAwIBAgIKMfXkYgxsWO3W2DANBgkqhkiG9w0BAQsFADBnMQswCQYD +VQQGEwJJTjETMBEGA1UECxMKZW1TaWduIFBLSTElMCMGA1UEChMcZU11ZGhyYSBU +ZWNobm9sb2dpZXMgTGltaXRlZDEcMBoGA1UEAxMTZW1TaWduIFJvb3QgQ0EgLSBH +MTAeFw0xODAyMTgxODMwMDBaFw00MzAyMTgxODMwMDBaMGcxCzAJBgNVBAYTAklO +MRMwEQYDVQQLEwplbVNpZ24gUEtJMSUwIwYDVQQKExxlTXVkaHJhIFRlY2hub2xv +Z2llcyBMaW1pdGVkMRwwGgYDVQQDExNlbVNpZ24gUm9vdCBDQSAtIEcxMIIBIjAN +BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAk0u76WaK7p1b1TST0Bsew+eeuGQz +f2N4aLTNLnF115sgxk0pvLZoYIr3IZpWNVrzdr3YzZr/k1ZLpVkGoZM0Kd0WNHVO +8oG0x5ZOrRkVUkr+PHB1cM2vK6sVmjM8qrOLqs1D/fXqcP/tzxE7lM5OMhbTI0Aq +d7OvPAEsbO2ZLIvZTmmYsvePQbAyeGHWDV/D+qJAkh1cF+ZwPjXnorfCYuKrpDhM +tTk1b+oDafo6VGiFbdbyL0NVHpENDtjVaqSW0RM8LHhQ6DqS0hdW5TUaQBw+jSzt +Od9C4INBdN+jzcKGYEho42kLVACL5HZpIQ15TjQIXhTCzLG3rdd8cIrHhQIDAQAB +o0IwQDAdBgNVHQ4EFgQU++8Nhp6w492pufEhF38+/PB3KxowDgYDVR0PAQH/BAQD +AgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAFn/8oz1h31x +PaOfG1vR2vjTnGs2vZupYeveFix0PZ7mddrXuqe8QhfnPZHr5X3dPpzxz5KsbEjM +wiI/aTvFthUvozXGaCocV685743QNcMYDHsAVhzNixl03r4PEuDQqqE/AjSxcM6d +GNYIAwlG7mDgfrbESQRRfXBgvKqy/3lyeqYdPV8q+Mri/Tm3R7nrft8EI6/6nAYH +6ftjk4BAtcZsCjEozgyfz7MjNYBBjWzEN3uBL4ChQEKF6dk4jeihU80Bv2noWgby +RQuQ+q7hv53yrlc8pa6yVvSLZUDp/TGBLPQ5Cdjua6e0ph0VpZj3AYHYhX3zUVxx +iN66zB+Afko= +-----END CERTIFICATE----- + +# Issuer: CN=emSign ECC Root CA - G3 O=eMudhra Technologies Limited OU=emSign PKI +# Subject: CN=emSign ECC Root CA - G3 O=eMudhra Technologies Limited OU=emSign PKI +# Label: "emSign ECC Root CA - G3" +# Serial: 287880440101571086945156 +# MD5 Fingerprint: ce:0b:72:d1:9f:88:8e:d0:50:03:e8:e3:b8:8b:67:40 +# SHA1 Fingerprint: 30:43:fa:4f:f2:57:dc:a0:c3:80:ee:2e:58:ea:78:b2:3f:e6:bb:c1 +# SHA256 Fingerprint: 86:a1:ec:ba:08:9c:4a:8d:3b:be:27:34:c6:12:ba:34:1d:81:3e:04:3c:f9:e8:a8:62:cd:5c:57:a3:6b:be:6b +-----BEGIN CERTIFICATE----- +MIICTjCCAdOgAwIBAgIKPPYHqWhwDtqLhDAKBggqhkjOPQQDAzBrMQswCQYDVQQG +EwJJTjETMBEGA1UECxMKZW1TaWduIFBLSTElMCMGA1UEChMcZU11ZGhyYSBUZWNo +bm9sb2dpZXMgTGltaXRlZDEgMB4GA1UEAxMXZW1TaWduIEVDQyBSb290IENBIC0g +RzMwHhcNMTgwMjE4MTgzMDAwWhcNNDMwMjE4MTgzMDAwWjBrMQswCQYDVQQGEwJJ +TjETMBEGA1UECxMKZW1TaWduIFBLSTElMCMGA1UEChMcZU11ZGhyYSBUZWNobm9s +b2dpZXMgTGltaXRlZDEgMB4GA1UEAxMXZW1TaWduIEVDQyBSb290IENBIC0gRzMw +djAQBgcqhkjOPQIBBgUrgQQAIgNiAAQjpQy4LRL1KPOxst3iAhKAnjlfSU2fySU0 +WXTsuwYc58Byr+iuL+FBVIcUqEqy6HyC5ltqtdyzdc6LBtCGI79G1Y4PPwT01xyS +fvalY8L1X44uT6EYGQIrMgqCZH0Wk9GjQjBAMB0GA1UdDgQWBBR8XQKEE9TMipuB +zhccLikenEhjQjAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAKBggq +hkjOPQQDAwNpADBmAjEAvvNhzwIQHWSVB7gYboiFBS+DCBeQyh+KTOgNG3qxrdWB +CUfvO6wIBHxcmbHtRwfSAjEAnbpV/KlK6O3t5nYBQnvI+GDZjVGLVTv7jHvrZQnD ++JbNR6iC8hZVdyR+EhCVBCyj +-----END CERTIFICATE----- + +# Issuer: CN=emSign Root CA - C1 O=eMudhra Inc OU=emSign PKI +# Subject: CN=emSign Root CA - C1 O=eMudhra Inc OU=emSign PKI +# Label: "emSign Root CA - C1" +# Serial: 825510296613316004955058 +# MD5 Fingerprint: d8:e3:5d:01:21:fa:78:5a:b0:df:ba:d2:ee:2a:5f:68 +# SHA1 Fingerprint: e7:2e:f1:df:fc:b2:09:28:cf:5d:d4:d5:67:37:b1:51:cb:86:4f:01 +# SHA256 Fingerprint: 12:56:09:aa:30:1d:a0:a2:49:b9:7a:82:39:cb:6a:34:21:6f:44:dc:ac:9f:39:54:b1:42:92:f2:e8:c8:60:8f +-----BEGIN CERTIFICATE----- +MIIDczCCAlugAwIBAgILAK7PALrEzzL4Q7IwDQYJKoZIhvcNAQELBQAwVjELMAkG +A1UEBhMCVVMxEzARBgNVBAsTCmVtU2lnbiBQS0kxFDASBgNVBAoTC2VNdWRocmEg +SW5jMRwwGgYDVQQDExNlbVNpZ24gUm9vdCBDQSAtIEMxMB4XDTE4MDIxODE4MzAw +MFoXDTQzMDIxODE4MzAwMFowVjELMAkGA1UEBhMCVVMxEzARBgNVBAsTCmVtU2ln +biBQS0kxFDASBgNVBAoTC2VNdWRocmEgSW5jMRwwGgYDVQQDExNlbVNpZ24gUm9v +dCBDQSAtIEMxMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAz+upufGZ +BczYKCFK83M0UYRWEPWgTywS4/oTmifQz/l5GnRfHXk5/Fv4cI7gklL35CX5VIPZ +HdPIWoU/Xse2B+4+wM6ar6xWQio5JXDWv7V7Nq2s9nPczdcdioOl+yuQFTdrHCZH +3DspVpNqs8FqOp099cGXOFgFixwR4+S0uF2FHYP+eF8LRWgYSKVGczQ7/g/IdrvH +GPMF0Ybzhe3nudkyrVWIzqa2kbBPrH4VI5b2P/AgNBbeCsbEBEV5f6f9vtKppa+c +xSMq9zwhbL2vj07FOrLzNBL834AaSaTUqZX3noleoomslMuoaJuvimUnzYnu3Yy1 +aylwQ6BpC+S5DwIDAQABo0IwQDAdBgNVHQ4EFgQU/qHgcB4qAzlSWkK+XJGFehiq +TbUwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEL +BQADggEBAMJKVvoVIXsoounlHfv4LcQ5lkFMOycsxGwYFYDGrK9HWS8mC+M2sO87 +/kOXSTKZEhVb3xEp/6tT+LvBeA+snFOvV71ojD1pM/CjoCNjO2RnIkSt1XHLVip4 +kqNPEjE2NuLe/gDEo2APJ62gsIq1NnpSob0n9CAnYuhNlCQT5AoE6TyrLshDCUrG +YQTlSTR+08TI9Q/Aqum6VF7zYytPT1DU/rl7mYw9wC68AivTxEDkigcxHpvOJpkT ++xHqmiIMERnHXhuBUDDIlhJu58tBf5E7oke3VIAb3ADMmpDqw8NQBmIMMMAVSKeo +WXzhriKi4gp6D/piq1JM4fHfyr6DDUI= +-----END CERTIFICATE----- + +# Issuer: CN=emSign ECC Root CA - C3 O=eMudhra Inc OU=emSign PKI +# Subject: CN=emSign ECC Root CA - C3 O=eMudhra Inc OU=emSign PKI +# Label: "emSign ECC Root CA - C3" +# Serial: 582948710642506000014504 +# MD5 Fingerprint: 3e:53:b3:a3:81:ee:d7:10:f8:d3:b0:1d:17:92:f5:d5 +# SHA1 Fingerprint: b6:af:43:c2:9b:81:53:7d:f6:ef:6b:c3:1f:1f:60:15:0c:ee:48:66 +# SHA256 Fingerprint: bc:4d:80:9b:15:18:9d:78:db:3e:1d:8c:f4:f9:72:6a:79:5d:a1:64:3c:a5:f1:35:8e:1d:db:0e:dc:0d:7e:b3 +-----BEGIN CERTIFICATE----- +MIICKzCCAbGgAwIBAgIKe3G2gla4EnycqDAKBggqhkjOPQQDAzBaMQswCQYDVQQG +EwJVUzETMBEGA1UECxMKZW1TaWduIFBLSTEUMBIGA1UEChMLZU11ZGhyYSBJbmMx +IDAeBgNVBAMTF2VtU2lnbiBFQ0MgUm9vdCBDQSAtIEMzMB4XDTE4MDIxODE4MzAw +MFoXDTQzMDIxODE4MzAwMFowWjELMAkGA1UEBhMCVVMxEzARBgNVBAsTCmVtU2ln +biBQS0kxFDASBgNVBAoTC2VNdWRocmEgSW5jMSAwHgYDVQQDExdlbVNpZ24gRUND +IFJvb3QgQ0EgLSBDMzB2MBAGByqGSM49AgEGBSuBBAAiA2IABP2lYa57JhAd6bci +MK4G9IGzsUJxlTm801Ljr6/58pc1kjZGDoeVjbk5Wum739D+yAdBPLtVb4Ojavti +sIGJAnB9SMVK4+kiVCJNk7tCDK93nCOmfddhEc5lx/h//vXyqaNCMEAwHQYDVR0O +BBYEFPtaSNCAIEDyqOkAB2kZd6fmw/TPMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMB +Af8EBTADAQH/MAoGCCqGSM49BAMDA2gAMGUCMQC02C8Cif22TGK6Q04ThHK1rt0c +3ta13FaPWEBaLd4gTCKDypOofu4SQMfWh0/434UCMBwUZOR8loMRnLDRWmFLpg9J +0wD8ofzkpf9/rdcw0Md3f76BB1UwUCAU9Vc4CqgxUQ== +-----END CERTIFICATE----- + +# Issuer: CN=Hongkong Post Root CA 3 O=Hongkong Post +# Subject: CN=Hongkong Post Root CA 3 O=Hongkong Post +# Label: "Hongkong Post Root CA 3" +# Serial: 46170865288971385588281144162979347873371282084 +# MD5 Fingerprint: 11:fc:9f:bd:73:30:02:8a:fd:3f:f3:58:b9:cb:20:f0 +# SHA1 Fingerprint: 58:a2:d0:ec:20:52:81:5b:c1:f3:f8:64:02:24:4e:c2:8e:02:4b:02 +# SHA256 Fingerprint: 5a:2f:c0:3f:0c:83:b0:90:bb:fa:40:60:4b:09:88:44:6c:76:36:18:3d:f9:84:6e:17:10:1a:44:7f:b8:ef:d6 +-----BEGIN CERTIFICATE----- +MIIFzzCCA7egAwIBAgIUCBZfikyl7ADJk0DfxMauI7gcWqQwDQYJKoZIhvcNAQEL +BQAwbzELMAkGA1UEBhMCSEsxEjAQBgNVBAgTCUhvbmcgS29uZzESMBAGA1UEBxMJ +SG9uZyBLb25nMRYwFAYDVQQKEw1Ib25na29uZyBQb3N0MSAwHgYDVQQDExdIb25n +a29uZyBQb3N0IFJvb3QgQ0EgMzAeFw0xNzA2MDMwMjI5NDZaFw00MjA2MDMwMjI5 +NDZaMG8xCzAJBgNVBAYTAkhLMRIwEAYDVQQIEwlIb25nIEtvbmcxEjAQBgNVBAcT +CUhvbmcgS29uZzEWMBQGA1UEChMNSG9uZ2tvbmcgUG9zdDEgMB4GA1UEAxMXSG9u +Z2tvbmcgUG9zdCBSb290IENBIDMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK +AoICAQCziNfqzg8gTr7m1gNt7ln8wlffKWihgw4+aMdoWJwcYEuJQwy51BWy7sFO +dem1p+/l6TWZ5Mwc50tfjTMwIDNT2aa71T4Tjukfh0mtUC1Qyhi+AViiE3CWu4mI +VoBc+L0sPOFMV4i707mV78vH9toxdCim5lSJ9UExyuUmGs2C4HDaOym71QP1mbpV +9WTRYA6ziUm4ii8F0oRFKHyPaFASePwLtVPLwpgchKOesL4jpNrcyCse2m5FHomY +2vkALgbpDDtw1VAliJnLzXNg99X/NWfFobxeq81KuEXryGgeDQ0URhLj0mRiikKY +vLTGCAj4/ahMZJx2Ab0vqWwzD9g/KLg8aQFChn5pwckGyuV6RmXpwtZQQS4/t+Tt +bNe/JgERohYpSms0BpDsE9K2+2p20jzt8NYt3eEV7KObLyzJPivkaTv/ciWxNoZb +x39ri1UbSsUgYT2uy1DhCDq+sI9jQVMwCFk8mB13umOResoQUGC/8Ne8lYePl8X+ +l2oBlKN8W4UdKjk60FSh0Tlxnf0h+bV78OLgAo9uliQlLKAeLKjEiafv7ZkGL7YK +TE/bosw3Gq9HhS2KX8Q0NEwA/RiTZxPRN+ZItIsGxVd7GYYKecsAyVKvQv83j+Gj +Hno9UKtjBucVtT+2RTeUN7F+8kjDf8V1/peNRY8apxpyKBpADwIDAQABo2MwYTAP +BgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAfBgNVHSMEGDAWgBQXnc0e +i9Y5K3DTXNSguB+wAPzFYTAdBgNVHQ4EFgQUF53NHovWOStw01zUoLgfsAD8xWEw +DQYJKoZIhvcNAQELBQADggIBAFbVe27mIgHSQpsY1Q7XZiNc4/6gx5LS6ZStS6LG +7BJ8dNVI0lkUmcDrudHr9EgwW62nV3OZqdPlt9EuWSRY3GguLmLYauRwCy0gUCCk +MpXRAJi70/33MvJJrsZ64Ee+bs7Lo3I6LWldy8joRTnU+kLBEUx3XZL7av9YROXr +gZ6voJmtvqkBZss4HTzfQx/0TW60uhdG/H39h4F5ag0zD/ov+BS5gLNdTaqX4fnk +GMX41TiMJjz98iji7lpJiCzfeT2OnpA8vUFKOt1b9pq0zj8lMH8yfaIDlNDceqFS +3m6TjRgm/VWsvY+b0s+v54Ysyx8Jb6NvqYTUc79NoXQbTiNg8swOqn+knEwlqLJm +Ozj/2ZQw9nKEvmhVEA/GcywWaZMH/rFF7buiVWqw2rVKAiUnhde3t4ZEFolsgCs+ +l6mc1X5VTMbeRRAc6uk7nwNT7u56AQIWeNTowr5GdogTPyK7SBIdUgC0An4hGh6c +JfTzPV4e0hz5sy229zdcxsshTrD3mUcYhcErulWuBurQB7Lcq9CClnXO0lD+mefP +L5/ndtFhKvshuzHQqp9HpLIiyhY6UFfEW0NnxWViA0kB60PZ2Pierc+xYw5F9KBa +LJstxabArahH9CdMOA0uG0k7UvToiIMrVCjU8jVStDKDYmlkDJGcn5fqdBb9HxEG +mpv0 +-----END CERTIFICATE----- + +# Issuer: CN=Microsoft ECC Root Certificate Authority 2017 O=Microsoft Corporation +# Subject: CN=Microsoft ECC Root Certificate Authority 2017 O=Microsoft Corporation +# Label: "Microsoft ECC Root Certificate Authority 2017" +# Serial: 136839042543790627607696632466672567020 +# MD5 Fingerprint: dd:a1:03:e6:4a:93:10:d1:bf:f0:19:42:cb:fe:ed:67 +# SHA1 Fingerprint: 99:9a:64:c3:7f:f4:7d:9f:ab:95:f1:47:69:89:14:60:ee:c4:c3:c5 +# SHA256 Fingerprint: 35:8d:f3:9d:76:4a:f9:e1:b7:66:e9:c9:72:df:35:2e:e1:5c:fa:c2:27:af:6a:d1:d7:0e:8e:4a:6e:dc:ba:02 +-----BEGIN CERTIFICATE----- +MIICWTCCAd+gAwIBAgIQZvI9r4fei7FK6gxXMQHC7DAKBggqhkjOPQQDAzBlMQsw +CQYDVQQGEwJVUzEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMTYwNAYD +VQQDEy1NaWNyb3NvZnQgRUNDIFJvb3QgQ2VydGlmaWNhdGUgQXV0aG9yaXR5IDIw +MTcwHhcNMTkxMjE4MjMwNjQ1WhcNNDIwNzE4MjMxNjA0WjBlMQswCQYDVQQGEwJV +UzEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMTYwNAYDVQQDEy1NaWNy +b3NvZnQgRUNDIFJvb3QgQ2VydGlmaWNhdGUgQXV0aG9yaXR5IDIwMTcwdjAQBgcq +hkjOPQIBBgUrgQQAIgNiAATUvD0CQnVBEyPNgASGAlEvaqiBYgtlzPbKnR5vSmZR +ogPZnZH6thaxjG7efM3beaYvzrvOcS/lpaso7GMEZpn4+vKTEAXhgShC48Zo9OYb +hGBKia/teQ87zvH2RPUBeMCjVDBSMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8E +BTADAQH/MB0GA1UdDgQWBBTIy5lycFIM+Oa+sgRXKSrPQhDtNTAQBgkrBgEEAYI3 +FQEEAwIBADAKBggqhkjOPQQDAwNoADBlAjBY8k3qDPlfXu5gKcs68tvWMoQZP3zV +L8KxzJOuULsJMsbG7X7JNpQS5GiFBqIb0C8CMQCZ6Ra0DvpWSNSkMBaReNtUjGUB +iudQZsIxtzm6uBoiB078a1QWIP8rtedMDE2mT3M= +-----END CERTIFICATE----- + +# Issuer: CN=Microsoft RSA Root Certificate Authority 2017 O=Microsoft Corporation +# Subject: CN=Microsoft RSA Root Certificate Authority 2017 O=Microsoft Corporation +# Label: "Microsoft RSA Root Certificate Authority 2017" +# Serial: 40975477897264996090493496164228220339 +# MD5 Fingerprint: 10:ff:00:ff:cf:c9:f8:c7:7a:c0:ee:35:8e:c9:0f:47 +# SHA1 Fingerprint: 73:a5:e6:4a:3b:ff:83:16:ff:0e:dc:cc:61:8a:90:6e:4e:ae:4d:74 +# SHA256 Fingerprint: c7:41:f7:0f:4b:2a:8d:88:bf:2e:71:c1:41:22:ef:53:ef:10:eb:a0:cf:a5:e6:4c:fa:20:f4:18:85:30:73:e0 +-----BEGIN CERTIFICATE----- +MIIFqDCCA5CgAwIBAgIQHtOXCV/YtLNHcB6qvn9FszANBgkqhkiG9w0BAQwFADBl +MQswCQYDVQQGEwJVUzEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMTYw +NAYDVQQDEy1NaWNyb3NvZnQgUlNBIFJvb3QgQ2VydGlmaWNhdGUgQXV0aG9yaXR5 +IDIwMTcwHhcNMTkxMjE4MjI1MTIyWhcNNDIwNzE4MjMwMDIzWjBlMQswCQYDVQQG +EwJVUzEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMTYwNAYDVQQDEy1N +aWNyb3NvZnQgUlNBIFJvb3QgQ2VydGlmaWNhdGUgQXV0aG9yaXR5IDIwMTcwggIi +MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDKW76UM4wplZEWCpW9R2LBifOZ +Nt9GkMml7Xhqb0eRaPgnZ1AzHaGm++DlQ6OEAlcBXZxIQIJTELy/xztokLaCLeX0 +ZdDMbRnMlfl7rEqUrQ7eS0MdhweSE5CAg2Q1OQT85elss7YfUJQ4ZVBcF0a5toW1 +HLUX6NZFndiyJrDKxHBKrmCk3bPZ7Pw71VdyvD/IybLeS2v4I2wDwAW9lcfNcztm +gGTjGqwu+UcF8ga2m3P1eDNbx6H7JyqhtJqRjJHTOoI+dkC0zVJhUXAoP8XFWvLJ +jEm7FFtNyP9nTUwSlq31/niol4fX/V4ggNyhSyL71Imtus5Hl0dVe49FyGcohJUc +aDDv70ngNXtk55iwlNpNhTs+VcQor1fznhPbRiefHqJeRIOkpcrVE7NLP8TjwuaG +YaRSMLl6IE9vDzhTyzMMEyuP1pq9KsgtsRx9S1HKR9FIJ3Jdh+vVReZIZZ2vUpC6 +W6IYZVcSn2i51BVrlMRpIpj0M+Dt+VGOQVDJNE92kKz8OMHY4Xu54+OU4UZpyw4K +UGsTuqwPN1q3ErWQgR5WrlcihtnJ0tHXUeOrO8ZV/R4O03QK0dqq6mm4lyiPSMQH ++FJDOvTKVTUssKZqwJz58oHhEmrARdlns87/I6KJClTUFLkqqNfs+avNJVgyeY+Q +W5g5xAgGwax/Dj0ApQIDAQABo1QwUjAOBgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/ +BAUwAwEB/zAdBgNVHQ4EFgQUCctZf4aycI8awznjwNnpv7tNsiMwEAYJKwYBBAGC +NxUBBAMCAQAwDQYJKoZIhvcNAQEMBQADggIBAKyvPl3CEZaJjqPnktaXFbgToqZC +LgLNFgVZJ8og6Lq46BrsTaiXVq5lQ7GPAJtSzVXNUzltYkyLDVt8LkS/gxCP81OC +gMNPOsduET/m4xaRhPtthH80dK2Jp86519efhGSSvpWhrQlTM93uCupKUY5vVau6 +tZRGrox/2KJQJWVggEbbMwSubLWYdFQl3JPk+ONVFT24bcMKpBLBaYVu32TxU5nh +SnUgnZUP5NbcA/FZGOhHibJXWpS2qdgXKxdJ5XbLwVaZOjex/2kskZGT4d9Mozd2 +TaGf+G0eHdP67Pv0RR0Tbc/3WeUiJ3IrhvNXuzDtJE3cfVa7o7P4NHmJweDyAmH3 +pvwPuxwXC65B2Xy9J6P9LjrRk5Sxcx0ki69bIImtt2dmefU6xqaWM/5TkshGsRGR +xpl/j8nWZjEgQRCHLQzWwa80mMpkg/sTV9HB8Dx6jKXB/ZUhoHHBk2dxEuqPiApp +GWSZI1b7rCoucL5mxAyE7+WL85MB+GqQk2dLsmijtWKP6T+MejteD+eMuMZ87zf9 +dOLITzNy4ZQ5bb0Sr74MTnB8G2+NszKTc0QWbej09+CVgI+WXTik9KveCjCHk9hN +AHFiRSdLOkKEW39lt2c0Ui2cFmuqqNh7o0JMcccMyj6D5KbvtwEwXlGjefVwaaZB +RA+GsCyRxj3qrg+E +-----END CERTIFICATE----- + +# Issuer: CN=e-Szigno Root CA 2017 O=Microsec Ltd. +# Subject: CN=e-Szigno Root CA 2017 O=Microsec Ltd. +# Label: "e-Szigno Root CA 2017" +# Serial: 411379200276854331539784714 +# MD5 Fingerprint: de:1f:f6:9e:84:ae:a7:b4:21:ce:1e:58:7d:d1:84:98 +# SHA1 Fingerprint: 89:d4:83:03:4f:9e:9a:48:80:5f:72:37:d4:a9:a6:ef:cb:7c:1f:d1 +# SHA256 Fingerprint: be:b0:0b:30:83:9b:9b:c3:2c:32:e4:44:79:05:95:06:41:f2:64:21:b1:5e:d0:89:19:8b:51:8a:e2:ea:1b:99 +-----BEGIN CERTIFICATE----- +MIICQDCCAeWgAwIBAgIMAVRI7yH9l1kN9QQKMAoGCCqGSM49BAMCMHExCzAJBgNV +BAYTAkhVMREwDwYDVQQHDAhCdWRhcGVzdDEWMBQGA1UECgwNTWljcm9zZWMgTHRk +LjEXMBUGA1UEYQwOVkFUSFUtMjM1ODQ0OTcxHjAcBgNVBAMMFWUtU3ppZ25vIFJv +b3QgQ0EgMjAxNzAeFw0xNzA4MjIxMjA3MDZaFw00MjA4MjIxMjA3MDZaMHExCzAJ +BgNVBAYTAkhVMREwDwYDVQQHDAhCdWRhcGVzdDEWMBQGA1UECgwNTWljcm9zZWMg +THRkLjEXMBUGA1UEYQwOVkFUSFUtMjM1ODQ0OTcxHjAcBgNVBAMMFWUtU3ppZ25v +IFJvb3QgQ0EgMjAxNzBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABJbcPYrYsHtv +xie+RJCxs1YVe45DJH0ahFnuY2iyxl6H0BVIHqiQrb1TotreOpCmYF9oMrWGQd+H +Wyx7xf58etqjYzBhMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0G +A1UdDgQWBBSHERUI0arBeAyxr87GyZDvvzAEwDAfBgNVHSMEGDAWgBSHERUI0arB +eAyxr87GyZDvvzAEwDAKBggqhkjOPQQDAgNJADBGAiEAtVfd14pVCzbhhkT61Nlo +jbjcI4qKDdQvfepz7L9NbKgCIQDLpbQS+ue16M9+k/zzNY9vTlp8tLxOsvxyqltZ ++efcMQ== +-----END CERTIFICATE----- + +# Issuer: O=CERTSIGN SA OU=certSIGN ROOT CA G2 +# Subject: O=CERTSIGN SA OU=certSIGN ROOT CA G2 +# Label: "certSIGN Root CA G2" +# Serial: 313609486401300475190 +# MD5 Fingerprint: 8c:f1:75:8a:c6:19:cf:94:b7:f7:65:20:87:c3:97:c7 +# SHA1 Fingerprint: 26:f9:93:b4:ed:3d:28:27:b0:b9:4b:a7:e9:15:1d:a3:8d:92:e5:32 +# SHA256 Fingerprint: 65:7c:fe:2f:a7:3f:aa:38:46:25:71:f3:32:a2:36:3a:46:fc:e7:02:09:51:71:07:02:cd:fb:b6:ee:da:33:05 +-----BEGIN CERTIFICATE----- +MIIFRzCCAy+gAwIBAgIJEQA0tk7GNi02MA0GCSqGSIb3DQEBCwUAMEExCzAJBgNV +BAYTAlJPMRQwEgYDVQQKEwtDRVJUU0lHTiBTQTEcMBoGA1UECxMTY2VydFNJR04g +Uk9PVCBDQSBHMjAeFw0xNzAyMDYwOTI3MzVaFw00MjAyMDYwOTI3MzVaMEExCzAJ +BgNVBAYTAlJPMRQwEgYDVQQKEwtDRVJUU0lHTiBTQTEcMBoGA1UECxMTY2VydFNJ +R04gUk9PVCBDQSBHMjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMDF +dRmRfUR0dIf+DjuW3NgBFszuY5HnC2/OOwppGnzC46+CjobXXo9X69MhWf05N0Iw +vlDqtg+piNguLWkh59E3GE59kdUWX2tbAMI5Qw02hVK5U2UPHULlj88F0+7cDBrZ +uIt4ImfkabBoxTzkbFpG583H+u/E7Eu9aqSs/cwoUe+StCmrqzWaTOTECMYmzPhp +n+Sc8CnTXPnGFiWeI8MgwT0PPzhAsP6CRDiqWhqKa2NYOLQV07YRaXseVO6MGiKs +cpc/I1mbySKEwQdPzH/iV8oScLumZfNpdWO9lfsbl83kqK/20U6o2YpxJM02PbyW +xPFsqa7lzw1uKA2wDrXKUXt4FMMgL3/7FFXhEZn91QqhngLjYl/rNUssuHLoPj1P +rCy7Lobio3aP5ZMqz6WryFyNSwb/EkaseMsUBzXgqd+L6a8VTxaJW732jcZZroiF +DsGJ6x9nxUWO/203Nit4ZoORUSs9/1F3dmKh7Gc+PoGD4FapUB8fepmrY7+EF3fx +DTvf95xhszWYijqy7DwaNz9+j5LP2RIUZNoQAhVB/0/E6xyjyfqZ90bp4RjZsbgy +LcsUDFDYg2WD7rlcz8sFWkz6GZdr1l0T08JcVLwyc6B49fFtHsufpaafItzRUZ6C +eWRgKRM+o/1Pcmqr4tTluCRVLERLiohEnMqE0yo7AgMBAAGjQjBAMA8GA1UdEwEB +/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBSCIS1mxteg4BXrzkwJ +d8RgnlRuAzANBgkqhkiG9w0BAQsFAAOCAgEAYN4auOfyYILVAzOBywaK8SJJ6ejq +kX/GM15oGQOGO0MBzwdw5AgeZYWR5hEit/UCI46uuR59H35s5r0l1ZUa8gWmr4UC +b6741jH/JclKyMeKqdmfS0mbEVeZkkMR3rYzpMzXjWR91M08KCy0mpbqTfXERMQl +qiCA2ClV9+BB/AYm/7k29UMUA2Z44RGx2iBfRgB4ACGlHgAoYXhvqAEBj500mv/0 +OJD7uNGzcgbJceaBxXntC6Z58hMLnPddDnskk7RI24Zf3lCGeOdA5jGokHZwYa+c +NywRtYK3qq4kNFtyDGkNzVmf9nGvnAvRCjj5BiKDUyUM/FHE5r7iOZULJK2v0ZXk +ltd0ZGtxTgI8qoXzIKNDOXZbbFD+mpwUHmUUihW9o4JFWklWatKcsWMy5WHgUyIO +pwpJ6st+H6jiYoD2EEVSmAYY3qXNL3+q1Ok+CHLsIwMCPKaq2LxndD0UF/tUSxfj +03k9bWtJySgOLnRQvwzZRjoQhsmnP+mg7H/rpXdYaXHmgwo38oZJar55CJD2AhZk +PuXaTH4MNMn5X7azKFGnpyuqSfqNZSlO42sTp5SjLVFteAxEy9/eCG/Oo2Sr05WE +1LlSVHJ7liXMvGnjSG4N0MedJ5qq+BOS3R7fY581qRY27Iy4g/Q9iY/NtBde17MX +QRBdJ3NghVdJIgc= +-----END CERTIFICATE----- + +# Issuer: CN=Trustwave Global Certification Authority O=Trustwave Holdings, Inc. +# Subject: CN=Trustwave Global Certification Authority O=Trustwave Holdings, Inc. +# Label: "Trustwave Global Certification Authority" +# Serial: 1846098327275375458322922162 +# MD5 Fingerprint: f8:1c:18:2d:2f:ba:5f:6d:a1:6c:bc:c7:ab:91:c7:0e +# SHA1 Fingerprint: 2f:8f:36:4f:e1:58:97:44:21:59:87:a5:2a:9a:d0:69:95:26:7f:b5 +# SHA256 Fingerprint: 97:55:20:15:f5:dd:fc:3c:87:88:c0:06:94:45:55:40:88:94:45:00:84:f1:00:86:70:86:bc:1a:2b:b5:8d:c8 +-----BEGIN CERTIFICATE----- +MIIF2jCCA8KgAwIBAgIMBfcOhtpJ80Y1LrqyMA0GCSqGSIb3DQEBCwUAMIGIMQsw +CQYDVQQGEwJVUzERMA8GA1UECAwISWxsaW5vaXMxEDAOBgNVBAcMB0NoaWNhZ28x +ITAfBgNVBAoMGFRydXN0d2F2ZSBIb2xkaW5ncywgSW5jLjExMC8GA1UEAwwoVHJ1 +c3R3YXZlIEdsb2JhbCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0xNzA4MjMx +OTM0MTJaFw00MjA4MjMxOTM0MTJaMIGIMQswCQYDVQQGEwJVUzERMA8GA1UECAwI +SWxsaW5vaXMxEDAOBgNVBAcMB0NoaWNhZ28xITAfBgNVBAoMGFRydXN0d2F2ZSBI +b2xkaW5ncywgSW5jLjExMC8GA1UEAwwoVHJ1c3R3YXZlIEdsb2JhbCBDZXJ0aWZp +Y2F0aW9uIEF1dGhvcml0eTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIB +ALldUShLPDeS0YLOvR29zd24q88KPuFd5dyqCblXAj7mY2Hf8g+CY66j96xz0Xzn +swuvCAAJWX/NKSqIk4cXGIDtiLK0thAfLdZfVaITXdHG6wZWiYj+rDKd/VzDBcdu +7oaJuogDnXIhhpCujwOl3J+IKMujkkkP7NAP4m1ET4BqstTnoApTAbqOl5F2brz8 +1Ws25kCI1nsvXwXoLG0R8+eyvpJETNKXpP7ScoFDB5zpET71ixpZfR9oWN0EACyW +80OzfpgZdNmcc9kYvkHHNHnZ9GLCQ7mzJ7Aiy/k9UscwR7PJPrhq4ufogXBeQotP +JqX+OsIgbrv4Fo7NDKm0G2x2EOFYeUY+VM6AqFcJNykbmROPDMjWLBz7BegIlT1l +RtzuzWniTY+HKE40Cz7PFNm73bZQmq131BnW2hqIyE4bJ3XYsgjxroMwuREOzYfw +hI0Vcnyh78zyiGG69Gm7DIwLdVcEuE4qFC49DxweMqZiNu5m4iK4BUBjECLzMx10 +coos9TkpoNPnG4CELcU9402x/RpvumUHO1jsQkUm+9jaJXLE9gCxInm943xZYkqc +BW89zubWR2OZxiRvchLIrH+QtAuRcOi35hYQcRfO3gZPSEF9NUqjifLJS3tBEW1n +twiYTOURGa5CgNz7kAXU+FDKvuStx8KU1xad5hePrzb7AgMBAAGjQjBAMA8GA1Ud +EwEB/wQFMAMBAf8wHQYDVR0OBBYEFJngGWcNYtt2s9o9uFvo/ULSMQ6HMA4GA1Ud +DwEB/wQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAgEAmHNw4rDT7TnsTGDZqRKGFx6W +0OhUKDtkLSGm+J1WE2pIPU/HPinbbViDVD2HfSMF1OQc3Og4ZYbFdada2zUFvXfe +uyk3QAUHw5RSn8pk3fEbK9xGChACMf1KaA0HZJDmHvUqoai7PF35owgLEQzxPy0Q +lG/+4jSHg9bP5Rs1bdID4bANqKCqRieCNqcVtgimQlRXtpla4gt5kNdXElE1GYhB +aCXUNxeEFfsBctyV3lImIJgm4nb1J2/6ADtKYdkNy1GTKv0WBpanI5ojSP5RvbbE +sLFUzt5sQa0WZ37b/TjNuThOssFgy50X31ieemKyJo90lZvkWx3SD92YHJtZuSPT +MaCm/zjdzyBP6VhWOmfD0faZmZ26NraAL4hHT4a/RDqA5Dccprrql5gR0IRiR2Qe +qu5AvzSxnI9O4fKSTx+O856X3vOmeWqJcU9LJxdI/uz0UA9PSX3MReO9ekDFQdxh +VicGaeVyQYHTtgGJoC86cnn+OjC/QezHYj6RS8fZMXZC+fc8Y+wmjHMMfRod6qh8 +h6jCJ3zhM0EPz8/8AKAigJ5Kp28AsEFFtyLKaEjFQqKu3R3y4G5OBVixwJAWKqQ9 +EEC+j2Jjg6mcgn0tAumDMHzLJ8n9HmYAsC7TIS+OMxZsmO0QqAfWzJPP29FpHOTK +yeC2nOnOcXHebD8WpHk= +-----END CERTIFICATE----- + +# Issuer: CN=Trustwave Global ECC P256 Certification Authority O=Trustwave Holdings, Inc. +# Subject: CN=Trustwave Global ECC P256 Certification Authority O=Trustwave Holdings, Inc. +# Label: "Trustwave Global ECC P256 Certification Authority" +# Serial: 4151900041497450638097112925 +# MD5 Fingerprint: 5b:44:e3:8d:5d:36:86:26:e8:0d:05:d2:59:a7:83:54 +# SHA1 Fingerprint: b4:90:82:dd:45:0c:be:8b:5b:b1:66:d3:e2:a4:08:26:cd:ed:42:cf +# SHA256 Fingerprint: 94:5b:bc:82:5e:a5:54:f4:89:d1:fd:51:a7:3d:df:2e:a6:24:ac:70:19:a0:52:05:22:5c:22:a7:8c:cf:a8:b4 +-----BEGIN CERTIFICATE----- +MIICYDCCAgegAwIBAgIMDWpfCD8oXD5Rld9dMAoGCCqGSM49BAMCMIGRMQswCQYD +VQQGEwJVUzERMA8GA1UECBMISWxsaW5vaXMxEDAOBgNVBAcTB0NoaWNhZ28xITAf +BgNVBAoTGFRydXN0d2F2ZSBIb2xkaW5ncywgSW5jLjE6MDgGA1UEAxMxVHJ1c3R3 +YXZlIEdsb2JhbCBFQ0MgUDI1NiBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0x +NzA4MjMxOTM1MTBaFw00MjA4MjMxOTM1MTBaMIGRMQswCQYDVQQGEwJVUzERMA8G +A1UECBMISWxsaW5vaXMxEDAOBgNVBAcTB0NoaWNhZ28xITAfBgNVBAoTGFRydXN0 +d2F2ZSBIb2xkaW5ncywgSW5jLjE6MDgGA1UEAxMxVHJ1c3R3YXZlIEdsb2JhbCBF +Q0MgUDI1NiBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTBZMBMGByqGSM49AgEGCCqG +SM49AwEHA0IABH77bOYj43MyCMpg5lOcunSNGLB4kFKA3TjASh3RqMyTpJcGOMoN +FWLGjgEqZZ2q3zSRLoHB5DOSMcT9CTqmP62jQzBBMA8GA1UdEwEB/wQFMAMBAf8w +DwYDVR0PAQH/BAUDAwcGADAdBgNVHQ4EFgQUo0EGrJBt0UrrdaVKEJmzsaGLSvcw +CgYIKoZIzj0EAwIDRwAwRAIgB+ZU2g6gWrKuEZ+Hxbb/ad4lvvigtwjzRM4q3wgh +DDcCIC0mA6AFvWvR9lz4ZcyGbbOcNEhjhAnFjXca4syc4XR7 +-----END CERTIFICATE----- + +# Issuer: CN=Trustwave Global ECC P384 Certification Authority O=Trustwave Holdings, Inc. +# Subject: CN=Trustwave Global ECC P384 Certification Authority O=Trustwave Holdings, Inc. +# Label: "Trustwave Global ECC P384 Certification Authority" +# Serial: 2704997926503831671788816187 +# MD5 Fingerprint: ea:cf:60:c4:3b:b9:15:29:40:a1:97:ed:78:27:93:d6 +# SHA1 Fingerprint: e7:f3:a3:c8:cf:6f:c3:04:2e:6d:0e:67:32:c5:9e:68:95:0d:5e:d2 +# SHA256 Fingerprint: 55:90:38:59:c8:c0:c3:eb:b8:75:9e:ce:4e:25:57:22:5f:f5:75:8b:bd:38:eb:d4:82:76:60:1e:1b:d5:80:97 +-----BEGIN CERTIFICATE----- +MIICnTCCAiSgAwIBAgIMCL2Fl2yZJ6SAaEc7MAoGCCqGSM49BAMDMIGRMQswCQYD +VQQGEwJVUzERMA8GA1UECBMISWxsaW5vaXMxEDAOBgNVBAcTB0NoaWNhZ28xITAf +BgNVBAoTGFRydXN0d2F2ZSBIb2xkaW5ncywgSW5jLjE6MDgGA1UEAxMxVHJ1c3R3 +YXZlIEdsb2JhbCBFQ0MgUDM4NCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0x +NzA4MjMxOTM2NDNaFw00MjA4MjMxOTM2NDNaMIGRMQswCQYDVQQGEwJVUzERMA8G +A1UECBMISWxsaW5vaXMxEDAOBgNVBAcTB0NoaWNhZ28xITAfBgNVBAoTGFRydXN0 +d2F2ZSBIb2xkaW5ncywgSW5jLjE6MDgGA1UEAxMxVHJ1c3R3YXZlIEdsb2JhbCBF +Q0MgUDM4NCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTB2MBAGByqGSM49AgEGBSuB +BAAiA2IABGvaDXU1CDFHBa5FmVXxERMuSvgQMSOjfoPTfygIOiYaOs+Xgh+AtycJ +j9GOMMQKmw6sWASr9zZ9lCOkmwqKi6vr/TklZvFe/oyujUF5nQlgziip04pt89ZF +1PKYhDhloKNDMEEwDwYDVR0TAQH/BAUwAwEB/zAPBgNVHQ8BAf8EBQMDBwYAMB0G +A1UdDgQWBBRVqYSJ0sEyvRjLbKYHTsjnnb6CkDAKBggqhkjOPQQDAwNnADBkAjA3 +AZKXRRJ+oPM+rRk6ct30UJMDEr5E0k9BpIycnR+j9sKS50gU/k6bpZFXrsY3crsC +MGclCrEMXu6pY5Jv5ZAL/mYiykf9ijH3g/56vxC+GCsej/YpHpRZ744hN8tRmKVu +Sw== +-----END CERTIFICATE----- + +# Issuer: CN=NAVER Global Root Certification Authority O=NAVER BUSINESS PLATFORM Corp. +# Subject: CN=NAVER Global Root Certification Authority O=NAVER BUSINESS PLATFORM Corp. +# Label: "NAVER Global Root Certification Authority" +# Serial: 9013692873798656336226253319739695165984492813 +# MD5 Fingerprint: c8:7e:41:f6:25:3b:f5:09:b3:17:e8:46:3d:bf:d0:9b +# SHA1 Fingerprint: 8f:6b:f2:a9:27:4a:da:14:a0:c4:f4:8e:61:27:f9:c0:1e:78:5d:d1 +# SHA256 Fingerprint: 88:f4:38:dc:f8:ff:d1:fa:8f:42:91:15:ff:e5:f8:2a:e1:e0:6e:0c:70:c3:75:fa:ad:71:7b:34:a4:9e:72:65 +-----BEGIN CERTIFICATE----- +MIIFojCCA4qgAwIBAgIUAZQwHqIL3fXFMyqxQ0Rx+NZQTQ0wDQYJKoZIhvcNAQEM +BQAwaTELMAkGA1UEBhMCS1IxJjAkBgNVBAoMHU5BVkVSIEJVU0lORVNTIFBMQVRG +T1JNIENvcnAuMTIwMAYDVQQDDClOQVZFUiBHbG9iYWwgUm9vdCBDZXJ0aWZpY2F0 +aW9uIEF1dGhvcml0eTAeFw0xNzA4MTgwODU4NDJaFw0zNzA4MTgyMzU5NTlaMGkx +CzAJBgNVBAYTAktSMSYwJAYDVQQKDB1OQVZFUiBCVVNJTkVTUyBQTEFURk9STSBD +b3JwLjEyMDAGA1UEAwwpTkFWRVIgR2xvYmFsIFJvb3QgQ2VydGlmaWNhdGlvbiBB +dXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC21PGTXLVA +iQqrDZBbUGOukJR0F0Vy1ntlWilLp1agS7gvQnXp2XskWjFlqxcX0TM62RHcQDaH +38dq6SZeWYp34+hInDEW+j6RscrJo+KfziFTowI2MMtSAuXaMl3Dxeb57hHHi8lE +HoSTGEq0n+USZGnQJoViAbbJAh2+g1G7XNr4rRVqmfeSVPc0W+m/6imBEtRTkZaz +kVrd/pBzKPswRrXKCAfHcXLJZtM0l/aM9BhK4dA9WkW2aacp+yPOiNgSnABIqKYP +szuSjXEOdMWLyEz59JuOuDxp7W87UC9Y7cSw0BwbagzivESq2M0UXZR4Yb8Obtoq +vC8MC3GmsxY/nOb5zJ9TNeIDoKAYv7vxvvTWjIcNQvcGufFt7QSUqP620wbGQGHf +nZ3zVHbOUzoBppJB7ASjjw2i1QnK1sua8e9DXcCrpUHPXFNwcMmIpi3Ua2FzUCaG +YQ5fG8Ir4ozVu53BA0K6lNpfqbDKzE0K70dpAy8i+/Eozr9dUGWokG2zdLAIx6yo +0es+nPxdGoMuK8u180SdOqcXYZaicdNwlhVNt0xz7hlcxVs+Qf6sdWA7G2POAN3a +CJBitOUt7kinaxeZVL6HSuOpXgRM6xBtVNbv8ejyYhbLgGvtPe31HzClrkvJE+2K +AQHJuFFYwGY6sWZLxNUxAmLpdIQM201GLQIDAQABo0IwQDAdBgNVHQ4EFgQU0p+I +36HNLL3s9TsBAZMzJ7LrYEswDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMB +Af8wDQYJKoZIhvcNAQEMBQADggIBADLKgLOdPVQG3dLSLvCkASELZ0jKbY7gyKoN +qo0hV4/GPnrK21HUUrPUloSlWGB/5QuOH/XcChWB5Tu2tyIvCZwTFrFsDDUIbatj +cu3cvuzHV+YwIHHW1xDBE1UBjCpD5EHxzzp6U5LOogMFDTjfArsQLtk70pt6wKGm ++LUx5vR1yblTmXVHIloUFcd4G7ad6Qz4G3bxhYTeodoS76TiEJd6eN4MUZeoIUCL +hr0N8F5OSza7OyAfikJW4Qsav3vQIkMsRIz75Sq0bBwcupTgE34h5prCy8VCZLQe +lHsIJchxzIdFV4XTnyliIoNRlwAYl3dqmJLJfGBs32x9SuRwTMKeuB330DTHD8z7 +p/8Dvq1wkNoL3chtl1+afwkyQf3NosxabUzyqkn+Zvjp2DXrDige7kgvOtB5CTh8 +piKCk5XQA76+AqAF3SAi428diDRgxuYKuQl1C/AH6GmWNcf7I4GOODm4RStDeKLR +LBT/DShycpWbXgnbiUSYqqFJu3FS8r/2/yehNq+4tneI3TqkbZs0kNwUXTC/t+sX +5Ie3cdCh13cV1ELX8vMxmV2b3RZtP+oGI/hGoiLtk/bdmuYqh7GYVPEi92tF4+KO +dh2ajcQGjTa3FPOdVGm3jjzVpG2Tgbet9r1ke8LJaDmgkpzNNIaRkPpkUZ3+/uul +9XXeifdy +-----END CERTIFICATE----- + +# Issuer: CN=AC RAIZ FNMT-RCM SERVIDORES SEGUROS O=FNMT-RCM OU=Ceres +# Subject: CN=AC RAIZ FNMT-RCM SERVIDORES SEGUROS O=FNMT-RCM OU=Ceres +# Label: "AC RAIZ FNMT-RCM SERVIDORES SEGUROS" +# Serial: 131542671362353147877283741781055151509 +# MD5 Fingerprint: 19:36:9c:52:03:2f:d2:d1:bb:23:cc:dd:1e:12:55:bb +# SHA1 Fingerprint: 62:ff:d9:9e:c0:65:0d:03:ce:75:93:d2:ed:3f:2d:32:c9:e3:e5:4a +# SHA256 Fingerprint: 55:41:53:b1:3d:2c:f9:dd:b7:53:bf:be:1a:4e:0a:e0:8d:0a:a4:18:70:58:fe:60:a2:b8:62:b2:e4:b8:7b:cb +-----BEGIN CERTIFICATE----- +MIICbjCCAfOgAwIBAgIQYvYybOXE42hcG2LdnC6dlTAKBggqhkjOPQQDAzB4MQsw +CQYDVQQGEwJFUzERMA8GA1UECgwIRk5NVC1SQ00xDjAMBgNVBAsMBUNlcmVzMRgw +FgYDVQRhDA9WQVRFUy1RMjgyNjAwNEoxLDAqBgNVBAMMI0FDIFJBSVogRk5NVC1S +Q00gU0VSVklET1JFUyBTRUdVUk9TMB4XDTE4MTIyMDA5MzczM1oXDTQzMTIyMDA5 +MzczM1oweDELMAkGA1UEBhMCRVMxETAPBgNVBAoMCEZOTVQtUkNNMQ4wDAYDVQQL +DAVDZXJlczEYMBYGA1UEYQwPVkFURVMtUTI4MjYwMDRKMSwwKgYDVQQDDCNBQyBS +QUlaIEZOTVQtUkNNIFNFUlZJRE9SRVMgU0VHVVJPUzB2MBAGByqGSM49AgEGBSuB +BAAiA2IABPa6V1PIyqvfNkpSIeSX0oNnnvBlUdBeh8dHsVnyV0ebAAKTRBdp20LH +sbI6GA60XYyzZl2hNPk2LEnb80b8s0RpRBNm/dfF/a82Tc4DTQdxz69qBdKiQ1oK +Um8BA06Oi6NCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYD +VR0OBBYEFAG5L++/EYZg8k/QQW6rcx/n0m5JMAoGCCqGSM49BAMDA2kAMGYCMQCu +SuMrQMN0EfKVrRYj3k4MGuZdpSRea0R7/DjiT8ucRRcRTBQnJlU5dUoDzBOQn5IC +MQD6SmxgiHPz7riYYqnOK8LZiqZwMR2vsJRM60/G49HzYqc8/5MuB1xJAWdpEgJy +v+c= +-----END CERTIFICATE----- + +# Issuer: CN=GlobalSign Root R46 O=GlobalSign nv-sa +# Subject: CN=GlobalSign Root R46 O=GlobalSign nv-sa +# Label: "GlobalSign Root R46" +# Serial: 1552617688466950547958867513931858518042577 +# MD5 Fingerprint: c4:14:30:e4:fa:66:43:94:2a:6a:1b:24:5f:19:d0:ef +# SHA1 Fingerprint: 53:a2:b0:4b:ca:6b:d6:45:e6:39:8a:8e:c4:0d:d2:bf:77:c3:a2:90 +# SHA256 Fingerprint: 4f:a3:12:6d:8d:3a:11:d1:c4:85:5a:4f:80:7c:ba:d6:cf:91:9d:3a:5a:88:b0:3b:ea:2c:63:72:d9:3c:40:c9 +-----BEGIN CERTIFICATE----- +MIIFWjCCA0KgAwIBAgISEdK7udcjGJ5AXwqdLdDfJWfRMA0GCSqGSIb3DQEBDAUA +MEYxCzAJBgNVBAYTAkJFMRkwFwYDVQQKExBHbG9iYWxTaWduIG52LXNhMRwwGgYD +VQQDExNHbG9iYWxTaWduIFJvb3QgUjQ2MB4XDTE5MDMyMDAwMDAwMFoXDTQ2MDMy +MDAwMDAwMFowRjELMAkGA1UEBhMCQkUxGTAXBgNVBAoTEEdsb2JhbFNpZ24gbnYt +c2ExHDAaBgNVBAMTE0dsb2JhbFNpZ24gUm9vdCBSNDYwggIiMA0GCSqGSIb3DQEB +AQUAA4ICDwAwggIKAoICAQCsrHQy6LNl5brtQyYdpokNRbopiLKkHWPd08EsCVeJ +OaFV6Wc0dwxu5FUdUiXSE2te4R2pt32JMl8Nnp8semNgQB+msLZ4j5lUlghYruQG +vGIFAha/r6gjA7aUD7xubMLL1aa7DOn2wQL7Id5m3RerdELv8HQvJfTqa1VbkNud +316HCkD7rRlr+/fKYIje2sGP1q7Vf9Q8g+7XFkyDRTNrJ9CG0Bwta/OrffGFqfUo +0q3v84RLHIf8E6M6cqJaESvWJ3En7YEtbWaBkoe0G1h6zD8K+kZPTXhc+CtI4wSE +y132tGqzZfxCnlEmIyDLPRT5ge1lFgBPGmSXZgjPjHvjK8Cd+RTyG/FWaha/LIWF +zXg4mutCagI0GIMXTpRW+LaCtfOW3T3zvn8gdz57GSNrLNRyc0NXfeD412lPFzYE ++cCQYDdF3uYM2HSNrpyibXRdQr4G9dlkbgIQrImwTDsHTUB+JMWKmIJ5jqSngiCN +I/onccnfxkF0oE32kRbcRoxfKWMxWXEM2G/CtjJ9++ZdU6Z+Ffy7dXxd7Pj2Fxzs +x2sZy/N78CsHpdlseVR2bJ0cpm4O6XkMqCNqo98bMDGfsVR7/mrLZqrcZdCinkqa +ByFrgY/bxFn63iLABJzjqls2k+g9vXqhnQt2sQvHnf3PmKgGwvgqo6GDoLclcqUC +4wIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNV +HQ4EFgQUA1yrc4GHqMywptWU4jaWSf8FmSwwDQYJKoZIhvcNAQEMBQADggIBAHx4 +7PYCLLtbfpIrXTncvtgdokIzTfnvpCo7RGkerNlFo048p9gkUbJUHJNOxO97k4Vg +JuoJSOD1u8fpaNK7ajFxzHmuEajwmf3lH7wvqMxX63bEIaZHU1VNaL8FpO7XJqti +2kM3S+LGteWygxk6x9PbTZ4IevPuzz5i+6zoYMzRx6Fcg0XERczzF2sUyQQCPtIk +pnnpHs6i58FZFZ8d4kuaPp92CC1r2LpXFNqD6v6MVenQTqnMdzGxRBF6XLE+0xRF +FRhiJBPSy03OXIPBNvIQtQ6IbbjhVp+J3pZmOUdkLG5NrmJ7v2B0GbhWrJKsFjLt +rWhV/pi60zTe9Mlhww6G9kuEYO4Ne7UyWHmRVSyBQ7N0H3qqJZ4d16GLuc1CLgSk +ZoNNiTW2bKg2SnkheCLQQrzRQDGQob4Ez8pn7fXwgNNgyYMqIgXQBztSvwyeqiv5 +u+YfjyW6hY0XHgL+XVAEV8/+LbzvXMAaq7afJMbfc2hIkCwU9D9SGuTSyxTDYWnP +4vkYxboznxSjBF25cfe1lNj2M8FawTSLfJvdkzrnE6JwYZ+vj+vYxXX4M2bUdGc6 +N3ec592kD3ZDZopD8p/7DEJ4Y9HiD2971KE9dJeFt0g5QdYg/NA6s/rob8SKunE3 +vouXsXgxT7PntgMTzlSdriVZzH81Xwj3QEUxeCp6 +-----END CERTIFICATE----- + +# Issuer: CN=GlobalSign Root E46 O=GlobalSign nv-sa +# Subject: CN=GlobalSign Root E46 O=GlobalSign nv-sa +# Label: "GlobalSign Root E46" +# Serial: 1552617690338932563915843282459653771421763 +# MD5 Fingerprint: b5:b8:66:ed:de:08:83:e3:c9:e2:01:34:06:ac:51:6f +# SHA1 Fingerprint: 39:b4:6c:d5:fe:80:06:eb:e2:2f:4a:bb:08:33:a0:af:db:b9:dd:84 +# SHA256 Fingerprint: cb:b9:c4:4d:84:b8:04:3e:10:50:ea:31:a6:9f:51:49:55:d7:bf:d2:e2:c6:b4:93:01:01:9a:d6:1d:9f:50:58 +-----BEGIN CERTIFICATE----- +MIICCzCCAZGgAwIBAgISEdK7ujNu1LzmJGjFDYQdmOhDMAoGCCqGSM49BAMDMEYx +CzAJBgNVBAYTAkJFMRkwFwYDVQQKExBHbG9iYWxTaWduIG52LXNhMRwwGgYDVQQD +ExNHbG9iYWxTaWduIFJvb3QgRTQ2MB4XDTE5MDMyMDAwMDAwMFoXDTQ2MDMyMDAw +MDAwMFowRjELMAkGA1UEBhMCQkUxGTAXBgNVBAoTEEdsb2JhbFNpZ24gbnYtc2Ex +HDAaBgNVBAMTE0dsb2JhbFNpZ24gUm9vdCBFNDYwdjAQBgcqhkjOPQIBBgUrgQQA +IgNiAAScDrHPt+ieUnd1NPqlRqetMhkytAepJ8qUuwzSChDH2omwlwxwEwkBjtjq +R+q+soArzfwoDdusvKSGN+1wCAB16pMLey5SnCNoIwZD7JIvU4Tb+0cUB+hflGdd +yXqBPCCjQjBAMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1Ud +DgQWBBQxCpCPtsad0kRLgLWi5h+xEk8blTAKBggqhkjOPQQDAwNoADBlAjEA31SQ +7Zvvi5QCkxeCmb6zniz2C5GMn0oUsfZkvLtoURMMA/cVi4RguYv/Uo7njLwcAjA8 ++RHUjE7AwWHCFUyqqx0LMV87HOIAl0Qx5v5zli/altP+CAezNIm8BZ/3Hobui3A= +-----END CERTIFICATE----- + +# Issuer: CN=ANF Secure Server Root CA O=ANF Autoridad de Certificacion OU=ANF CA Raiz +# Subject: CN=ANF Secure Server Root CA O=ANF Autoridad de Certificacion OU=ANF CA Raiz +# Label: "ANF Secure Server Root CA" +# Serial: 996390341000653745 +# MD5 Fingerprint: 26:a6:44:5a:d9:af:4e:2f:b2:1d:b6:65:b0:4e:e8:96 +# SHA1 Fingerprint: 5b:6e:68:d0:cc:15:b6:a0:5f:1e:c1:5f:ae:02:fc:6b:2f:5d:6f:74 +# SHA256 Fingerprint: fb:8f:ec:75:91:69:b9:10:6b:1e:51:16:44:c6:18:c5:13:04:37:3f:6c:06:43:08:8d:8b:ef:fd:1b:99:75:99 +-----BEGIN CERTIFICATE----- +MIIF7zCCA9egAwIBAgIIDdPjvGz5a7EwDQYJKoZIhvcNAQELBQAwgYQxEjAQBgNV +BAUTCUc2MzI4NzUxMDELMAkGA1UEBhMCRVMxJzAlBgNVBAoTHkFORiBBdXRvcmlk +YWQgZGUgQ2VydGlmaWNhY2lvbjEUMBIGA1UECxMLQU5GIENBIFJhaXoxIjAgBgNV +BAMTGUFORiBTZWN1cmUgU2VydmVyIFJvb3QgQ0EwHhcNMTkwOTA0MTAwMDM4WhcN +MzkwODMwMTAwMDM4WjCBhDESMBAGA1UEBRMJRzYzMjg3NTEwMQswCQYDVQQGEwJF +UzEnMCUGA1UEChMeQU5GIEF1dG9yaWRhZCBkZSBDZXJ0aWZpY2FjaW9uMRQwEgYD +VQQLEwtBTkYgQ0EgUmFpejEiMCAGA1UEAxMZQU5GIFNlY3VyZSBTZXJ2ZXIgUm9v +dCBDQTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANvrayvmZFSVgpCj +cqQZAZ2cC4Ffc0m6p6zzBE57lgvsEeBbphzOG9INgxwruJ4dfkUyYA8H6XdYfp9q +yGFOtibBTI3/TO80sh9l2Ll49a2pcbnvT1gdpd50IJeh7WhM3pIXS7yr/2WanvtH +2Vdy8wmhrnZEE26cLUQ5vPnHO6RYPUG9tMJJo8gN0pcvB2VSAKduyK9o7PQUlrZX +H1bDOZ8rbeTzPvY1ZNoMHKGESy9LS+IsJJ1tk0DrtSOOMspvRdOoiXsezx76W0OL +zc2oD2rKDF65nkeP8Nm2CgtYZRczuSPkdxl9y0oukntPLxB3sY0vaJxizOBQ+OyR +p1RMVwnVdmPF6GUe7m1qzwmd+nxPrWAI/VaZDxUse6mAq4xhj0oHdkLePfTdsiQz +W7i1o0TJrH93PB0j7IKppuLIBkwC/qxcmZkLLxCKpvR/1Yd0DVlJRfbwcVw5Kda/ +SiOL9V8BY9KHcyi1Swr1+KuCLH5zJTIdC2MKF4EA/7Z2Xue0sUDKIbvVgFHlSFJn +LNJhiQcND85Cd8BEc5xEUKDbEAotlRyBr+Qc5RQe8TZBAQIvfXOn3kLMTOmJDVb3 +n5HUA8ZsyY/b2BzgQJhdZpmYgG4t/wHFzstGH6wCxkPmrqKEPMVOHj1tyRRM4y5B +u8o5vzY8KhmqQYdOpc5LMnndkEl/AgMBAAGjYzBhMB8GA1UdIwQYMBaAFJxf0Gxj +o1+TypOYCK2Mh6UsXME3MB0GA1UdDgQWBBScX9BsY6Nfk8qTmAitjIelLFzBNzAO +BgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOC +AgEATh65isagmD9uw2nAalxJUqzLK114OMHVVISfk/CHGT0sZonrDUL8zPB1hT+L +9IBdeeUXZ701guLyPI59WzbLWoAAKfLOKyzxj6ptBZNscsdW699QIyjlRRA96Gej +rw5VD5AJYu9LWaL2U/HANeQvwSS9eS9OICI7/RogsKQOLHDtdD+4E5UGUcjohybK +pFtqFiGS3XNgnhAY3jyB6ugYw3yJ8otQPr0R4hUDqDZ9MwFsSBXXiJCZBMXM5gf0 +vPSQ7RPi6ovDj6MzD8EpTBNO2hVWcXNyglD2mjN8orGoGjR0ZVzO0eurU+AagNjq +OknkJjCb5RyKqKkVMoaZkgoQI1YS4PbOTOK7vtuNknMBZi9iPrJyJ0U27U1W45eZ +/zo1PqVUSlJZS2Db7v54EX9K3BR5YLZrZAPbFYPhor72I5dQ8AkzNqdxliXzuUJ9 +2zg/LFis6ELhDtjTO0wugumDLmsx2d1Hhk9tl5EuT+IocTUW0fJz/iUrB0ckYyfI ++PbZa/wSMVYIwFNCr5zQM378BvAxRAMU8Vjq8moNqRGyg77FGr8H6lnco4g175x2 +MjxNBiLOFeXdntiP2t7SxDnlF4HPOEfrf4htWRvfn0IUrn7PqLBmZdo3r5+qPeoo +tt7VMVgWglvquxl1AnMaykgaIZOQCo6ThKd9OyMYkomgjaw= +-----END CERTIFICATE----- + +# Issuer: CN=Certum EC-384 CA O=Asseco Data Systems S.A. OU=Certum Certification Authority +# Subject: CN=Certum EC-384 CA O=Asseco Data Systems S.A. OU=Certum Certification Authority +# Label: "Certum EC-384 CA" +# Serial: 160250656287871593594747141429395092468 +# MD5 Fingerprint: b6:65:b3:96:60:97:12:a1:ec:4e:e1:3d:a3:c6:c9:f1 +# SHA1 Fingerprint: f3:3e:78:3c:ac:df:f4:a2:cc:ac:67:55:69:56:d7:e5:16:3c:e1:ed +# SHA256 Fingerprint: 6b:32:80:85:62:53:18:aa:50:d1:73:c9:8d:8b:da:09:d5:7e:27:41:3d:11:4c:f7:87:a0:f5:d0:6c:03:0c:f6 +-----BEGIN CERTIFICATE----- +MIICZTCCAeugAwIBAgIQeI8nXIESUiClBNAt3bpz9DAKBggqhkjOPQQDAzB0MQsw +CQYDVQQGEwJQTDEhMB8GA1UEChMYQXNzZWNvIERhdGEgU3lzdGVtcyBTLkEuMScw +JQYDVQQLEx5DZXJ0dW0gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkxGTAXBgNVBAMT +EENlcnR1bSBFQy0zODQgQ0EwHhcNMTgwMzI2MDcyNDU0WhcNNDMwMzI2MDcyNDU0 +WjB0MQswCQYDVQQGEwJQTDEhMB8GA1UEChMYQXNzZWNvIERhdGEgU3lzdGVtcyBT +LkEuMScwJQYDVQQLEx5DZXJ0dW0gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkxGTAX +BgNVBAMTEENlcnR1bSBFQy0zODQgQ0EwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAATE +KI6rGFtqvm5kN2PkzeyrOvfMobgOgknXhimfoZTy42B4mIF4Bk3y7JoOV2CDn7Tm +Fy8as10CW4kjPMIRBSqniBMY81CE1700LCeJVf/OTOffph8oxPBUw7l8t1Ot68Kj +QjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFI0GZnQkdjrzife81r1HfS+8 +EF9LMA4GA1UdDwEB/wQEAwIBBjAKBggqhkjOPQQDAwNoADBlAjADVS2m5hjEfO/J +UG7BJw+ch69u1RsIGL2SKcHvlJF40jocVYli5RsJHrpka/F2tNQCMQC0QoSZ/6vn +nvuRlydd3LBbMHHOXjgaatkl5+r3YZJW+OraNsKHZZYuciUvf9/DE8k= +-----END CERTIFICATE----- + +# Issuer: CN=Certum Trusted Root CA O=Asseco Data Systems S.A. OU=Certum Certification Authority +# Subject: CN=Certum Trusted Root CA O=Asseco Data Systems S.A. OU=Certum Certification Authority +# Label: "Certum Trusted Root CA" +# Serial: 40870380103424195783807378461123655149 +# MD5 Fingerprint: 51:e1:c2:e7:fe:4c:84:af:59:0e:2f:f4:54:6f:ea:29 +# SHA1 Fingerprint: c8:83:44:c0:18:ae:9f:cc:f1:87:b7:8f:22:d1:c5:d7:45:84:ba:e5 +# SHA256 Fingerprint: fe:76:96:57:38:55:77:3e:37:a9:5e:7a:d4:d9:cc:96:c3:01:57:c1:5d:31:76:5b:a9:b1:57:04:e1:ae:78:fd +-----BEGIN CERTIFICATE----- +MIIFwDCCA6igAwIBAgIQHr9ZULjJgDdMBvfrVU+17TANBgkqhkiG9w0BAQ0FADB6 +MQswCQYDVQQGEwJQTDEhMB8GA1UEChMYQXNzZWNvIERhdGEgU3lzdGVtcyBTLkEu +MScwJQYDVQQLEx5DZXJ0dW0gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkxHzAdBgNV +BAMTFkNlcnR1bSBUcnVzdGVkIFJvb3QgQ0EwHhcNMTgwMzE2MTIxMDEzWhcNNDMw +MzE2MTIxMDEzWjB6MQswCQYDVQQGEwJQTDEhMB8GA1UEChMYQXNzZWNvIERhdGEg +U3lzdGVtcyBTLkEuMScwJQYDVQQLEx5DZXJ0dW0gQ2VydGlmaWNhdGlvbiBBdXRo +b3JpdHkxHzAdBgNVBAMTFkNlcnR1bSBUcnVzdGVkIFJvb3QgQ0EwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQDRLY67tzbqbTeRn06TpwXkKQMlzhyC93yZ +n0EGze2jusDbCSzBfN8pfktlL5On1AFrAygYo9idBcEq2EXxkd7fO9CAAozPOA/q +p1x4EaTByIVcJdPTsuclzxFUl6s1wB52HO8AU5853BSlLCIls3Jy/I2z5T4IHhQq +NwuIPMqw9MjCoa68wb4pZ1Xi/K1ZXP69VyywkI3C7Te2fJmItdUDmj0VDT06qKhF +8JVOJVkdzZhpu9PMMsmN74H+rX2Ju7pgE8pllWeg8xn2A1bUatMn4qGtg/BKEiJ3 +HAVz4hlxQsDsdUaakFjgao4rpUYwBI4Zshfjvqm6f1bxJAPXsiEodg42MEx51UGa +mqi4NboMOvJEGyCI98Ul1z3G4z5D3Yf+xOr1Uz5MZf87Sst4WmsXXw3Hw09Omiqi +7VdNIuJGmj8PkTQkfVXjjJU30xrwCSss0smNtA0Aq2cpKNgB9RkEth2+dv5yXMSF +ytKAQd8FqKPVhJBPC/PgP5sZ0jeJP/J7UhyM9uH3PAeXjA6iWYEMspA90+NZRu0P +qafegGtaqge2Gcu8V/OXIXoMsSt0Puvap2ctTMSYnjYJdmZm/Bo/6khUHL4wvYBQ +v3y1zgD2DGHZ5yQD4OMBgQ692IU0iL2yNqh7XAjlRICMb/gv1SHKHRzQ+8S1h9E6 +Tsd2tTVItQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBSM+xx1 +vALTn04uSNn5YFSqxLNP+jAOBgNVHQ8BAf8EBAMCAQYwDQYJKoZIhvcNAQENBQAD +ggIBAEii1QALLtA/vBzVtVRJHlpr9OTy4EA34MwUe7nJ+jW1dReTagVphZzNTxl4 +WxmB82M+w85bj/UvXgF2Ez8sALnNllI5SW0ETsXpD4YN4fqzX4IS8TrOZgYkNCvo +zMrnadyHncI013nR03e4qllY/p0m+jiGPp2Kh2RX5Rc64vmNueMzeMGQ2Ljdt4NR +5MTMI9UGfOZR0800McD2RrsLrfw9EAUqO0qRJe6M1ISHgCq8CYyqOhNf6DR5UMEQ +GfnTKB7U0VEwKbOukGfWHwpjscWpxkIxYxeU72nLL/qMFH3EQxiJ2fAyQOaA4kZf +5ePBAFmo+eggvIksDkc0C+pXwlM2/KfUrzHN/gLldfq5Jwn58/U7yn2fqSLLiMmq +0Uc9NneoWWRrJ8/vJ8HjJLWG965+Mk2weWjROeiQWMODvA8s1pfrzgzhIMfatz7D +P78v3DSk+yshzWePS/Tj6tQ/50+6uaWTRRxmHyH6ZF5v4HaUMst19W7l9o/HuKTM +qJZ9ZPskWkoDbGs4xugDQ5r3V7mzKWmTOPQD8rv7gmsHINFSH5pkAnuYZttcTVoP +0ISVoDwUQwbKytu4QTbaakRnh6+v40URFWkIsr4WOZckbxJF0WddCajJFdr60qZf +E2Efv4WstK2tBZQIgx51F9NxO5NQI1mg7TyRVJ12AMXDuDjb +-----END CERTIFICATE----- + +# Issuer: CN=TunTrust Root CA O=Agence Nationale de Certification Electronique +# Subject: CN=TunTrust Root CA O=Agence Nationale de Certification Electronique +# Label: "TunTrust Root CA" +# Serial: 108534058042236574382096126452369648152337120275 +# MD5 Fingerprint: 85:13:b9:90:5b:36:5c:b6:5e:b8:5a:f8:e0:31:57:b4 +# SHA1 Fingerprint: cf:e9:70:84:0f:e0:73:0f:9d:f6:0c:7f:2c:4b:ee:20:46:34:9c:bb +# SHA256 Fingerprint: 2e:44:10:2a:b5:8c:b8:54:19:45:1c:8e:19:d9:ac:f3:66:2c:af:bc:61:4b:6a:53:96:0a:30:f7:d0:e2:eb:41 +-----BEGIN CERTIFICATE----- +MIIFszCCA5ugAwIBAgIUEwLV4kBMkkaGFmddtLu7sms+/BMwDQYJKoZIhvcNAQEL +BQAwYTELMAkGA1UEBhMCVE4xNzA1BgNVBAoMLkFnZW5jZSBOYXRpb25hbGUgZGUg +Q2VydGlmaWNhdGlvbiBFbGVjdHJvbmlxdWUxGTAXBgNVBAMMEFR1blRydXN0IFJv +b3QgQ0EwHhcNMTkwNDI2MDg1NzU2WhcNNDQwNDI2MDg1NzU2WjBhMQswCQYDVQQG +EwJUTjE3MDUGA1UECgwuQWdlbmNlIE5hdGlvbmFsZSBkZSBDZXJ0aWZpY2F0aW9u +IEVsZWN0cm9uaXF1ZTEZMBcGA1UEAwwQVHVuVHJ1c3QgUm9vdCBDQTCCAiIwDQYJ +KoZIhvcNAQEBBQADggIPADCCAgoCggIBAMPN0/y9BFPdDCA61YguBUtB9YOCfvdZ +n56eY+hz2vYGqU8ftPkLHzmMmiDQfgbU7DTZhrx1W4eI8NLZ1KMKsmwb60ksPqxd +2JQDoOw05TDENX37Jk0bbjBU2PWARZw5rZzJJQRNmpA+TkBuimvNKWfGzC3gdOgF +VwpIUPp6Q9p+7FuaDmJ2/uqdHYVy7BG7NegfJ7/Boce7SBbdVtfMTqDhuazb1YMZ +GoXRlJfXyqNlC/M4+QKu3fZnz8k/9YosRxqZbwUN/dAdgjH8KcwAWJeRTIAAHDOF +li/LQcKLEITDCSSJH7UP2dl3RxiSlGBcx5kDPP73lad9UKGAwqmDrViWVSHbhlnU +r8a83YFuB9tgYv7sEG7aaAH0gxupPqJbI9dkxt/con3YS7qC0lH4Zr8GRuR5KiY2 +eY8fTpkdso8MDhz/yV3A/ZAQprE38806JG60hZC/gLkMjNWb1sjxVj8agIl6qeIb +MlEsPvLfe/ZdeikZjuXIvTZxi11Mwh0/rViizz1wTaZQmCXcI/m4WEEIcb9PuISg +jwBUFfyRbVinljvrS5YnzWuioYasDXxU5mZMZl+QviGaAkYt5IPCgLnPSz7ofzwB +7I9ezX/SKEIBlYrilz0QIX32nRzFNKHsLA4KUiwSVXAkPcvCFDVDXSdOvsC9qnyW +5/yeYa1E0wCXAgMBAAGjYzBhMB0GA1UdDgQWBBQGmpsfU33x9aTI04Y+oXNZtPdE +ITAPBgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFAaamx9TffH1pMjThj6hc1m0 +90QhMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAgEAqgVutt0Vyb+z +xiD2BkewhpMl0425yAA/l/VSJ4hxyXT968pk21vvHl26v9Hr7lxpuhbI87mP0zYu +QEkHDVneixCwSQXi/5E/S7fdAo74gShczNxtr18UnH1YeA32gAm56Q6XKRm4t+v4 +FstVEuTGfbvE7Pi1HE4+Z7/FXxttbUcoqgRYYdZ2vyJ/0Adqp2RT8JeNnYA/u8EH +22Wv5psymsNUk8QcCMNE+3tjEUPRahphanltkE8pjkcFwRJpadbGNjHh/PqAulxP +xOu3Mqz4dWEX1xAZufHSCe96Qp1bWgvUxpVOKs7/B9dPfhgGiPEZtdmYu65xxBzn +dFlY7wyJz4sfdZMaBBSSSFCp61cpABbjNhzI+L/wM9VBD8TMPN3pM0MBkRArHtG5 +Xc0yGYuPjCB31yLEQtyEFpslbei0VXF/sHyz03FJuc9SpAQ/3D2gu68zngowYI7b +nV2UqL1g52KAdoGDDIzMMEZJ4gzSqK/rYXHv5yJiqfdcZGyfFoxnNidF9Ql7v/YQ +CvGwjVRDjAS6oz/v4jXH+XTgbzRB0L9zZVcg+ZtnemZoJE6AZb0QmQZZ8mWvuMZH +u/2QeItBcy6vVR/cO5JyboTT0GFMDcx2V+IthSIVNg3rAZ3r2OvEhJn7wAzMMujj +d9qDRIueVSjAi1jTkD5OGwDxFa2DK5o= +-----END CERTIFICATE----- + +# Issuer: CN=HARICA TLS RSA Root CA 2021 O=Hellenic Academic and Research Institutions CA +# Subject: CN=HARICA TLS RSA Root CA 2021 O=Hellenic Academic and Research Institutions CA +# Label: "HARICA TLS RSA Root CA 2021" +# Serial: 76817823531813593706434026085292783742 +# MD5 Fingerprint: 65:47:9b:58:86:dd:2c:f0:fc:a2:84:1f:1e:96:c4:91 +# SHA1 Fingerprint: 02:2d:05:82:fa:88:ce:14:0c:06:79:de:7f:14:10:e9:45:d7:a5:6d +# SHA256 Fingerprint: d9:5d:0e:8e:da:79:52:5b:f9:be:b1:1b:14:d2:10:0d:32:94:98:5f:0c:62:d9:fa:bd:9c:d9:99:ec:cb:7b:1d +-----BEGIN CERTIFICATE----- +MIIFpDCCA4ygAwIBAgIQOcqTHO9D88aOk8f0ZIk4fjANBgkqhkiG9w0BAQsFADBs +MQswCQYDVQQGEwJHUjE3MDUGA1UECgwuSGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJl +c2VhcmNoIEluc3RpdHV0aW9ucyBDQTEkMCIGA1UEAwwbSEFSSUNBIFRMUyBSU0Eg +Um9vdCBDQSAyMDIxMB4XDTIxMDIxOTEwNTUzOFoXDTQ1MDIxMzEwNTUzN1owbDEL +MAkGA1UEBhMCR1IxNzA1BgNVBAoMLkhlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNl +YXJjaCBJbnN0aXR1dGlvbnMgQ0ExJDAiBgNVBAMMG0hBUklDQSBUTFMgUlNBIFJv +b3QgQ0EgMjAyMTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAIvC569l +mwVnlskNJLnQDmT8zuIkGCyEf3dRywQRNrhe7Wlxp57kJQmXZ8FHws+RFjZiPTgE +4VGC/6zStGndLuwRo0Xua2s7TL+MjaQenRG56Tj5eg4MmOIjHdFOY9TnuEFE+2uv +a9of08WRiFukiZLRgeaMOVig1mlDqa2YUlhu2wr7a89o+uOkXjpFc5gH6l8Cct4M +pbOfrqkdtx2z/IpZ525yZa31MJQjB/OCFks1mJxTuy/K5FrZx40d/JiZ+yykgmvw +Kh+OC19xXFyuQnspiYHLA6OZyoieC0AJQTPb5lh6/a6ZcMBaD9YThnEvdmn8kN3b +LW7R8pv1GmuebxWMevBLKKAiOIAkbDakO/IwkfN4E8/BPzWr8R0RI7VDIp4BkrcY +AuUR0YLbFQDMYTfBKnya4dC6s1BG7oKsnTH4+yPiAwBIcKMJJnkVU2DzOFytOOqB +AGMUuTNe3QvboEUHGjMJ+E20pwKmafTCWQWIZYVWrkvL4N48fS0ayOn7H6NhStYq +E613TBoYm5EPWNgGVMWX+Ko/IIqmhaZ39qb8HOLubpQzKoNQhArlT4b4UEV4AIHr +W2jjJo3Me1xR9BQsQL4aYB16cmEdH2MtiKrOokWQCPxrvrNQKlr9qEgYRtaQQJKQ +CoReaDH46+0N0x3GfZkYVVYnZS6NRcUk7M7jAgMBAAGjQjBAMA8GA1UdEwEB/wQF +MAMBAf8wHQYDVR0OBBYEFApII6ZgpJIKM+qTW8VX6iVNvRLuMA4GA1UdDwEB/wQE +AwIBhjANBgkqhkiG9w0BAQsFAAOCAgEAPpBIqm5iFSVmewzVjIuJndftTgfvnNAU +X15QvWiWkKQUEapobQk1OUAJ2vQJLDSle1mESSmXdMgHHkdt8s4cUCbjnj1AUz/3 +f5Z2EMVGpdAgS1D0NTsY9FVqQRtHBmg8uwkIYtlfVUKqrFOFrJVWNlar5AWMxaja +H6NpvVMPxP/cyuN+8kyIhkdGGvMA9YCRotxDQpSbIPDRzbLrLFPCU3hKTwSUQZqP +JzLB5UkZv/HywouoCjkxKLR9YjYsTewfM7Z+d21+UPCfDtcRj88YxeMn/ibvBZ3P +zzfF0HvaO7AWhAw6k9a+F9sPPg4ZeAnHqQJyIkv3N3a6dcSFA1pj1bF1BcK5vZSt +jBWZp5N99sXzqnTPBIWUmAD04vnKJGW/4GKvyMX6ssmeVkjaef2WdhW+o45WxLM0 +/L5H9MG0qPzVMIho7suuyWPEdr6sOBjhXlzPrjoiUevRi7PzKzMHVIf6tLITe7pT +BGIBnfHAT+7hOtSLIBD6Alfm78ELt5BGnBkpjNxvoEppaZS3JGWg/6w/zgH7IS79 +aPib8qXPMThcFarmlwDB31qlpzmq6YR/PFGoOtmUW4y/Twhx5duoXNTSpv4Ao8YW +xw/ogM4cKGR0GQjTQuPOAF1/sdwTsOEFy9EgqoZ0njnnkf3/W9b3raYvAwtt41dU +63ZTGI0RmLo= +-----END CERTIFICATE----- + +# Issuer: CN=HARICA TLS ECC Root CA 2021 O=Hellenic Academic and Research Institutions CA +# Subject: CN=HARICA TLS ECC Root CA 2021 O=Hellenic Academic and Research Institutions CA +# Label: "HARICA TLS ECC Root CA 2021" +# Serial: 137515985548005187474074462014555733966 +# MD5 Fingerprint: ae:f7:4c:e5:66:35:d1:b7:9b:8c:22:93:74:d3:4b:b0 +# SHA1 Fingerprint: bc:b0:c1:9d:e9:98:92:70:19:38:57:e9:8d:a7:b4:5d:6e:ee:01:48 +# SHA256 Fingerprint: 3f:99:cc:47:4a:cf:ce:4d:fe:d5:87:94:66:5e:47:8d:15:47:73:9f:2e:78:0f:1b:b4:ca:9b:13:30:97:d4:01 +-----BEGIN CERTIFICATE----- +MIICVDCCAdugAwIBAgIQZ3SdjXfYO2rbIvT/WeK/zjAKBggqhkjOPQQDAzBsMQsw +CQYDVQQGEwJHUjE3MDUGA1UECgwuSGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJlc2Vh +cmNoIEluc3RpdHV0aW9ucyBDQTEkMCIGA1UEAwwbSEFSSUNBIFRMUyBFQ0MgUm9v +dCBDQSAyMDIxMB4XDTIxMDIxOTExMDExMFoXDTQ1MDIxMzExMDEwOVowbDELMAkG +A1UEBhMCR1IxNzA1BgNVBAoMLkhlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJj +aCBJbnN0aXR1dGlvbnMgQ0ExJDAiBgNVBAMMG0hBUklDQSBUTFMgRUNDIFJvb3Qg +Q0EgMjAyMTB2MBAGByqGSM49AgEGBSuBBAAiA2IABDgI/rGgltJ6rK9JOtDA4MM7 +KKrxcm1lAEeIhPyaJmuqS7psBAqIXhfyVYf8MLA04jRYVxqEU+kw2anylnTDUR9Y +STHMmE5gEYd103KUkE+bECUqqHgtvpBBWJAVcqeht6NCMEAwDwYDVR0TAQH/BAUw +AwEB/zAdBgNVHQ4EFgQUyRtTgRL+BNUW0aq8mm+3oJUZbsowDgYDVR0PAQH/BAQD +AgGGMAoGCCqGSM49BAMDA2cAMGQCMBHervjcToiwqfAircJRQO9gcS3ujwLEXQNw +SaSS6sUUiHCm0w2wqsosQJz76YJumgIwK0eaB8bRwoF8yguWGEEbo/QwCZ61IygN +nxS2PFOiTAZpffpskcYqSUXm7LcT4Tps +-----END CERTIFICATE----- + +# Issuer: CN=Autoridad de Certificacion Firmaprofesional CIF A62634068 +# Subject: CN=Autoridad de Certificacion Firmaprofesional CIF A62634068 +# Label: "Autoridad de Certificacion Firmaprofesional CIF A62634068" +# Serial: 1977337328857672817 +# MD5 Fingerprint: 4e:6e:9b:54:4c:ca:b7:fa:48:e4:90:b1:15:4b:1c:a3 +# SHA1 Fingerprint: 0b:be:c2:27:22:49:cb:39:aa:db:35:5c:53:e3:8c:ae:78:ff:b6:fe +# SHA256 Fingerprint: 57:de:05:83:ef:d2:b2:6e:03:61:da:99:da:9d:f4:64:8d:ef:7e:e8:44:1c:3b:72:8a:fa:9b:cd:e0:f9:b2:6a +-----BEGIN CERTIFICATE----- +MIIGFDCCA/ygAwIBAgIIG3Dp0v+ubHEwDQYJKoZIhvcNAQELBQAwUTELMAkGA1UE +BhMCRVMxQjBABgNVBAMMOUF1dG9yaWRhZCBkZSBDZXJ0aWZpY2FjaW9uIEZpcm1h +cHJvZmVzaW9uYWwgQ0lGIEE2MjYzNDA2ODAeFw0xNDA5MjMxNTIyMDdaFw0zNjA1 +MDUxNTIyMDdaMFExCzAJBgNVBAYTAkVTMUIwQAYDVQQDDDlBdXRvcmlkYWQgZGUg +Q2VydGlmaWNhY2lvbiBGaXJtYXByb2Zlc2lvbmFsIENJRiBBNjI2MzQwNjgwggIi +MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDKlmuO6vj78aI14H9M2uDDUtd9 +thDIAl6zQyrET2qyyhxdKJp4ERppWVevtSBC5IsP5t9bpgOSL/UR5GLXMnE42QQM +cas9UX4PB99jBVzpv5RvwSmCwLTaUbDBPLutN0pcyvFLNg4kq7/DhHf9qFD0sefG +L9ItWY16Ck6WaVICqjaY7Pz6FIMMNx/Jkjd/14Et5cS54D40/mf0PmbR0/RAz15i +NA9wBj4gGFrO93IbJWyTdBSTo3OxDqqHECNZXyAFGUftaI6SEspd/NYrspI8IM/h +X68gvqB2f3bl7BqGYTM+53u0P6APjqK5am+5hyZvQWyIplD9amML9ZMWGxmPsu2b +m8mQ9QEM3xk9Dz44I8kvjwzRAv4bVdZO0I08r0+k8/6vKtMFnXkIoctXMbScyJCy +Z/QYFpM6/EfY0XiWMR+6KwxfXZmtY4laJCB22N/9q06mIqqdXuYnin1oKaPnirja +EbsXLZmdEyRG98Xi2J+Of8ePdG1asuhy9azuJBCtLxTa/y2aRnFHvkLfuwHb9H/T +KI8xWVvTyQKmtFLKbpf7Q8UIJm+K9Lv9nyiqDdVF8xM6HdjAeI9BZzwelGSuewvF +6NkBiDkal4ZkQdU7hwxu+g/GvUgUvzlN1J5Bto+WHWOWk9mVBngxaJ43BjuAiUVh +OSPHG0SjFeUc+JIwuwIDAQABo4HvMIHsMB0GA1UdDgQWBBRlzeurNR4APn7VdMAc +tHNHDhpkLzASBgNVHRMBAf8ECDAGAQH/AgEBMIGmBgNVHSAEgZ4wgZswgZgGBFUd +IAAwgY8wLwYIKwYBBQUHAgEWI2h0dHA6Ly93d3cuZmlybWFwcm9mZXNpb25hbC5j +b20vY3BzMFwGCCsGAQUFBwICMFAeTgBQAGEAcwBlAG8AIABkAGUAIABsAGEAIABC +AG8AbgBhAG4AbwB2AGEAIAA0ADcAIABCAGEAcgBjAGUAbABvAG4AYQAgADAAOAAw +ADEANzAOBgNVHQ8BAf8EBAMCAQYwDQYJKoZIhvcNAQELBQADggIBAHSHKAIrdx9m +iWTtj3QuRhy7qPj4Cx2Dtjqn6EWKB7fgPiDL4QjbEwj4KKE1soCzC1HA01aajTNF +Sa9J8OA9B3pFE1r/yJfY0xgsfZb43aJlQ3CTkBW6kN/oGbDbLIpgD7dvlAceHabJ +hfa9NPhAeGIQcDq+fUs5gakQ1JZBu/hfHAsdCPKxsIl68veg4MSPi3i1O1ilI45P +Vf42O+AMt8oqMEEgtIDNrvx2ZnOorm7hfNoD6JQg5iKj0B+QXSBTFCZX2lSX3xZE +EAEeiGaPcjiT3SC3NL7X8e5jjkd5KAb881lFJWAiMxujX6i6KtoaPc1A6ozuBRWV +1aUsIC+nmCjuRfzxuIgALI9C2lHVnOUTaHFFQ4ueCyE8S1wF3BqfmI7avSKecs2t +CsvMo2ebKHTEm9caPARYpoKdrcd7b/+Alun4jWq9GJAd/0kakFI3ky88Al2CdgtR +5xbHV/g4+afNmyJU72OwFW1TZQNKXkqgsqeOSQBZONXH9IBk9W6VULgRfhVwOEqw +f9DEMnDAGf/JOC0ULGb0QkTmVXYbgBVX/8Cnp6o5qtjTcNAuuuuUavpfNIbnYrX9 +ivAwhZTJryQCL2/W3Wf+47BVTwSYT6RBVuKT0Gro1vP7ZeDOdcQxWQzugsgMYDNK +GbqEZycPvEJdvSRUDewdcAZfpLz6IHxV +-----END CERTIFICATE----- + +# Issuer: CN=vTrus ECC Root CA O=iTrusChina Co.,Ltd. +# Subject: CN=vTrus ECC Root CA O=iTrusChina Co.,Ltd. +# Label: "vTrus ECC Root CA" +# Serial: 630369271402956006249506845124680065938238527194 +# MD5 Fingerprint: de:4b:c1:f5:52:8c:9b:43:e1:3e:8f:55:54:17:8d:85 +# SHA1 Fingerprint: f6:9c:db:b0:fc:f6:02:13:b6:52:32:a6:a3:91:3f:16:70:da:c3:e1 +# SHA256 Fingerprint: 30:fb:ba:2c:32:23:8e:2a:98:54:7a:f9:79:31:e5:50:42:8b:9b:3f:1c:8e:eb:66:33:dc:fa:86:c5:b2:7d:d3 +-----BEGIN CERTIFICATE----- +MIICDzCCAZWgAwIBAgIUbmq8WapTvpg5Z6LSa6Q75m0c1towCgYIKoZIzj0EAwMw +RzELMAkGA1UEBhMCQ04xHDAaBgNVBAoTE2lUcnVzQ2hpbmEgQ28uLEx0ZC4xGjAY +BgNVBAMTEXZUcnVzIEVDQyBSb290IENBMB4XDTE4MDczMTA3MjY0NFoXDTQzMDcz +MTA3MjY0NFowRzELMAkGA1UEBhMCQ04xHDAaBgNVBAoTE2lUcnVzQ2hpbmEgQ28u +LEx0ZC4xGjAYBgNVBAMTEXZUcnVzIEVDQyBSb290IENBMHYwEAYHKoZIzj0CAQYF +K4EEACIDYgAEZVBKrox5lkqqHAjDo6LN/llWQXf9JpRCux3NCNtzslt188+cToL0 +v/hhJoVs1oVbcnDS/dtitN9Ti72xRFhiQgnH+n9bEOf+QP3A2MMrMudwpremIFUd +e4BdS49nTPEQo0IwQDAdBgNVHQ4EFgQUmDnNvtiyjPeyq+GtJK97fKHbH88wDwYD +VR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwCgYIKoZIzj0EAwMDaAAwZQIw +V53dVvHH4+m4SVBrm2nDb+zDfSXkV5UTQJtS0zvzQBm8JsctBp61ezaf9SXUY2sA +AjEA6dPGnlaaKsyh2j/IZivTWJwghfqrkYpwcBE4YGQLYgmRWAD5Tfs0aNoJrSEG +GJTO +-----END CERTIFICATE----- + +# Issuer: CN=vTrus Root CA O=iTrusChina Co.,Ltd. +# Subject: CN=vTrus Root CA O=iTrusChina Co.,Ltd. +# Label: "vTrus Root CA" +# Serial: 387574501246983434957692974888460947164905180485 +# MD5 Fingerprint: b8:c9:37:df:fa:6b:31:84:64:c5:ea:11:6a:1b:75:fc +# SHA1 Fingerprint: 84:1a:69:fb:f5:cd:1a:25:34:13:3d:e3:f8:fc:b8:99:d0:c9:14:b7 +# SHA256 Fingerprint: 8a:71:de:65:59:33:6f:42:6c:26:e5:38:80:d0:0d:88:a1:8d:a4:c6:a9:1f:0d:cb:61:94:e2:06:c5:c9:63:87 +-----BEGIN CERTIFICATE----- +MIIFVjCCAz6gAwIBAgIUQ+NxE9izWRRdt86M/TX9b7wFjUUwDQYJKoZIhvcNAQEL +BQAwQzELMAkGA1UEBhMCQ04xHDAaBgNVBAoTE2lUcnVzQ2hpbmEgQ28uLEx0ZC4x +FjAUBgNVBAMTDXZUcnVzIFJvb3QgQ0EwHhcNMTgwNzMxMDcyNDA1WhcNNDMwNzMx +MDcyNDA1WjBDMQswCQYDVQQGEwJDTjEcMBoGA1UEChMTaVRydXNDaGluYSBDby4s +THRkLjEWMBQGA1UEAxMNdlRydXMgUm9vdCBDQTCCAiIwDQYJKoZIhvcNAQEBBQAD +ggIPADCCAgoCggIBAL1VfGHTuB0EYgWgrmy3cLRB6ksDXhA/kFocizuwZotsSKYc +IrrVQJLuM7IjWcmOvFjai57QGfIvWcaMY1q6n6MLsLOaXLoRuBLpDLvPbmyAhykU +AyyNJJrIZIO1aqwTLDPxn9wsYTwaP3BVm60AUn/PBLn+NvqcwBauYv6WTEN+VRS+ +GrPSbcKvdmaVayqwlHeFXgQPYh1jdfdr58tbmnDsPmcF8P4HCIDPKNsFxhQnL4Z9 +8Cfe/+Z+M0jnCx5Y0ScrUw5XSmXX+6KAYPxMvDVTAWqXcoKv8R1w6Jz1717CbMdH +flqUhSZNO7rrTOiwCcJlwp2dCZtOtZcFrPUGoPc2BX70kLJrxLT5ZOrpGgrIDajt +J8nU57O5q4IikCc9Kuh8kO+8T/3iCiSn3mUkpF3qwHYw03dQ+A0Em5Q2AXPKBlim +0zvc+gRGE1WKyURHuFE5Gi7oNOJ5y1lKCn+8pu8fA2dqWSslYpPZUxlmPCdiKYZN +pGvu/9ROutW04o5IWgAZCfEF2c6Rsffr6TlP9m8EQ5pV9T4FFL2/s1m02I4zhKOQ +UqqzApVg+QxMaPnu1RcN+HFXtSXkKe5lXa/R7jwXC1pDxaWG6iSe4gUH3DRCEpHW +OXSuTEGC2/KmSNGzm/MzqvOmwMVO9fSddmPmAsYiS8GVP1BkLFTltvA8Kc9XAgMB +AAGjQjBAMB0GA1UdDgQWBBRUYnBj8XWEQ1iO0RYgscasGrz2iTAPBgNVHRMBAf8E +BTADAQH/MA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAgEAKbqSSaet +8PFww+SX8J+pJdVrnjT+5hpk9jprUrIQeBqfTNqK2uwcN1LgQkv7bHbKJAs5EhWd +nxEt/Hlk3ODg9d3gV8mlsnZwUKT+twpw1aA08XXXTUm6EdGz2OyC/+sOxL9kLX1j +bhd47F18iMjrjld22VkE+rxSH0Ws8HqA7Oxvdq6R2xCOBNyS36D25q5J08FsEhvM +Kar5CKXiNxTKsbhm7xqC5PD48acWabfbqWE8n/Uxy+QARsIvdLGx14HuqCaVvIiv +TDUHKgLKeBRtRytAVunLKmChZwOgzoy8sHJnxDHO2zTlJQNgJXtxmOTAGytfdELS +S8VZCAeHvsXDf+eW2eHcKJfWjwXj9ZtOyh1QRwVTsMo554WgicEFOwE30z9J4nfr +I8iIZjs9OXYhRvHsXyO466JmdXTBQPfYaJqT4i2pLr0cox7IdMakLXogqzu4sEb9 +b91fUlV1YvCXoHzXOP0l382gmxDPi7g4Xl7FtKYCNqEeXxzP4padKar9mK5S4fNB +UvupLnKWnyfjqnN9+BojZns7q2WwMgFLFT49ok8MKzWixtlnEjUwzXYuFrOZnk1P +Ti07NEPhmg4NpGaXutIcSkwsKouLgU9xGqndXHt7CMUADTdA43x7VF8vhV929ven +sBxXVsFy6K2ir40zSbofitzmdHxghm+Hl3s= +-----END CERTIFICATE----- + +# Issuer: CN=ISRG Root X2 O=Internet Security Research Group +# Subject: CN=ISRG Root X2 O=Internet Security Research Group +# Label: "ISRG Root X2" +# Serial: 87493402998870891108772069816698636114 +# MD5 Fingerprint: d3:9e:c4:1e:23:3c:a6:df:cf:a3:7e:6d:e0:14:e6:e5 +# SHA1 Fingerprint: bd:b1:b9:3c:d5:97:8d:45:c6:26:14:55:f8:db:95:c7:5a:d1:53:af +# SHA256 Fingerprint: 69:72:9b:8e:15:a8:6e:fc:17:7a:57:af:b7:17:1d:fc:64:ad:d2:8c:2f:ca:8c:f1:50:7e:34:45:3c:cb:14:70 +-----BEGIN CERTIFICATE----- +MIICGzCCAaGgAwIBAgIQQdKd0XLq7qeAwSxs6S+HUjAKBggqhkjOPQQDAzBPMQsw +CQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJuZXQgU2VjdXJpdHkgUmVzZWFyY2gg +R3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBYMjAeFw0yMDA5MDQwMDAwMDBaFw00 +MDA5MTcxNjAwMDBaME8xCzAJBgNVBAYTAlVTMSkwJwYDVQQKEyBJbnRlcm5ldCBT +ZWN1cml0eSBSZXNlYXJjaCBHcm91cDEVMBMGA1UEAxMMSVNSRyBSb290IFgyMHYw +EAYHKoZIzj0CAQYFK4EEACIDYgAEzZvVn4CDCuwJSvMWSj5cz3es3mcFDR0HttwW ++1qLFNvicWDEukWVEYmO6gbf9yoWHKS5xcUy4APgHoIYOIvXRdgKam7mAHf7AlF9 +ItgKbppbd9/w+kHsOdx1ymgHDB/qo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0T +AQH/BAUwAwEB/zAdBgNVHQ4EFgQUfEKWrt5LSDv6kviejM9ti6lyN5UwCgYIKoZI +zj0EAwMDaAAwZQIwe3lORlCEwkSHRhtFcP9Ymd70/aTSVaYgLXTWNLxBo1BfASdW +tL4ndQavEi51mI38AjEAi/V3bNTIZargCyzuFJ0nN6T5U6VR5CmD1/iQMVtCnwr1 +/q4AaOeMSQ+2b1tbFfLn +-----END CERTIFICATE----- + +# Issuer: CN=HiPKI Root CA - G1 O=Chunghwa Telecom Co., Ltd. +# Subject: CN=HiPKI Root CA - G1 O=Chunghwa Telecom Co., Ltd. +# Label: "HiPKI Root CA - G1" +# Serial: 60966262342023497858655262305426234976 +# MD5 Fingerprint: 69:45:df:16:65:4b:e8:68:9a:8f:76:5f:ff:80:9e:d3 +# SHA1 Fingerprint: 6a:92:e4:a8:ee:1b:ec:96:45:37:e3:29:57:49:cd:96:e3:e5:d2:60 +# SHA256 Fingerprint: f0:15:ce:3c:c2:39:bf:ef:06:4b:e9:f1:d2:c4:17:e1:a0:26:4a:0a:94:be:1f:0c:8d:12:18:64:eb:69:49:cc +-----BEGIN CERTIFICATE----- +MIIFajCCA1KgAwIBAgIQLd2szmKXlKFD6LDNdmpeYDANBgkqhkiG9w0BAQsFADBP +MQswCQYDVQQGEwJUVzEjMCEGA1UECgwaQ2h1bmdod2EgVGVsZWNvbSBDby4sIEx0 +ZC4xGzAZBgNVBAMMEkhpUEtJIFJvb3QgQ0EgLSBHMTAeFw0xOTAyMjIwOTQ2MDRa +Fw0zNzEyMzExNTU5NTlaME8xCzAJBgNVBAYTAlRXMSMwIQYDVQQKDBpDaHVuZ2h3 +YSBUZWxlY29tIENvLiwgTHRkLjEbMBkGA1UEAwwSSGlQS0kgUm9vdCBDQSAtIEcx +MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA9B5/UnMyDHPkvRN0o9Qw +qNCuS9i233VHZvR85zkEHmpwINJaR3JnVfSl6J3VHiGh8Ge6zCFovkRTv4354twv +Vcg3Px+kwJyz5HdcoEb+d/oaoDjq7Zpy3iu9lFc6uux55199QmQ5eiY29yTw1S+6 +lZgRZq2XNdZ1AYDgr/SEYYwNHl98h5ZeQa/rh+r4XfEuiAU+TCK72h8q3VJGZDnz +Qs7ZngyzsHeXZJzA9KMuH5UHsBffMNsAGJZMoYFL3QRtU6M9/Aes1MU3guvklQgZ +KILSQjqj2FPseYlgSGDIcpJQ3AOPgz+yQlda22rpEZfdhSi8MEyr48KxRURHH+CK +FgeW0iEPU8DtqX7UTuybCeyvQqww1r/REEXgphaypcXTT3OUM3ECoWqj1jOXTyFj +HluP2cFeRXF3D4FdXyGarYPM+l7WjSNfGz1BryB1ZlpK9p/7qxj3ccC2HTHsOyDr +y+K49a6SsvfhhEvyovKTmiKe0xRvNlS9H15ZFblzqMF8b3ti6RZsR1pl8w4Rm0bZ +/W3c1pzAtH2lsN0/Vm+h+fbkEkj9Bn8SV7apI09bA8PgcSojt/ewsTu8mL3WmKgM +a/aOEmem8rJY5AIJEzypuxC00jBF8ez3ABHfZfjcK0NVvxaXxA/VLGGEqnKG/uY6 +fsI/fe78LxQ+5oXdUG+3Se0CAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAdBgNV +HQ4EFgQU8ncX+l6o/vY9cdVouslGDDjYr7AwDgYDVR0PAQH/BAQDAgGGMA0GCSqG +SIb3DQEBCwUAA4ICAQBQUfB13HAE4/+qddRxosuej6ip0691x1TPOhwEmSKsxBHi +7zNKpiMdDg1H2DfHb680f0+BazVP6XKlMeJ45/dOlBhbQH3PayFUhuaVevvGyuqc +SE5XCV0vrPSltJczWNWseanMX/mF+lLFjfiRFOs6DRfQUsJ748JzjkZ4Bjgs6Fza +ZsT0pPBWGTMpWmWSBUdGSquEwx4noR8RkpkndZMPvDY7l1ePJlsMu5wP1G4wB9Tc +XzZoZjmDlicmisjEOf6aIW/Vcobpf2Lll07QJNBAsNB1CI69aO4I1258EHBGG3zg +iLKecoaZAeO/n0kZtCW+VmWuF2PlHt/o/0elv+EmBYTksMCv5wiZqAxeJoBF1Pho +L5aPruJKHJwWDBNvOIf2u8g0X5IDUXlwpt/L9ZlNec1OvFefQ05rLisY+GpzjLrF +Ne85akEez3GoorKGB1s6yeHvP2UEgEcyRHCVTjFnanRbEEV16rCf0OY1/k6fi8wr +kkVbbiVghUbN0aqwdmaTd5a+g744tiROJgvM7XpWGuDpWsZkrUx6AEhEL7lAuxM+ +vhV4nYWBSipX3tUZQ9rbyltHhoMLP7YNdnhzeSJesYAfz77RP1YQmCuVh6EfnWQU +YDksswBVLuT1sw5XxJFBAJw/6KXf6vb/yPCtbVKoF6ubYfwSUTXkJf2vqmqGOQ== +-----END CERTIFICATE----- + +# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R4 +# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R4 +# Label: "GlobalSign ECC Root CA - R4" +# Serial: 159662223612894884239637590694 +# MD5 Fingerprint: 26:29:f8:6d:e1:88:bf:a2:65:7f:aa:c4:cd:0f:7f:fc +# SHA1 Fingerprint: 6b:a0:b0:98:e1:71:ef:5a:ad:fe:48:15:80:77:10:f4:bd:6f:0b:28 +# SHA256 Fingerprint: b0:85:d7:0b:96:4f:19:1a:73:e4:af:0d:54:ae:7a:0e:07:aa:fd:af:9b:71:dd:08:62:13:8a:b7:32:5a:24:a2 +-----BEGIN CERTIFICATE----- +MIIB3DCCAYOgAwIBAgINAgPlfvU/k/2lCSGypjAKBggqhkjOPQQDAjBQMSQwIgYD +VQQLExtHbG9iYWxTaWduIEVDQyBSb290IENBIC0gUjQxEzARBgNVBAoTCkdsb2Jh +bFNpZ24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMTIxMTEzMDAwMDAwWhcNMzgw +MTE5MDMxNDA3WjBQMSQwIgYDVQQLExtHbG9iYWxTaWduIEVDQyBSb290IENBIC0g +UjQxEzARBgNVBAoTCkdsb2JhbFNpZ24xEzARBgNVBAMTCkdsb2JhbFNpZ24wWTAT +BgcqhkjOPQIBBggqhkjOPQMBBwNCAAS4xnnTj2wlDp8uORkcA6SumuU5BwkWymOx +uYb4ilfBV85C+nOh92VC/x7BALJucw7/xyHlGKSq2XE/qNS5zowdo0IwQDAOBgNV +HQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUVLB7rUW44kB/ ++wpu+74zyTyjhNUwCgYIKoZIzj0EAwIDRwAwRAIgIk90crlgr/HmnKAWBVBfw147 +bmF0774BxL4YSFlhgjICICadVGNA3jdgUM/I2O2dgq43mLyjj0xMqTQrbO/7lZsm +-----END CERTIFICATE----- + +# Issuer: CN=GTS Root R1 O=Google Trust Services LLC +# Subject: CN=GTS Root R1 O=Google Trust Services LLC +# Label: "GTS Root R1" +# Serial: 159662320309726417404178440727 +# MD5 Fingerprint: 05:fe:d0:bf:71:a8:a3:76:63:da:01:e0:d8:52:dc:40 +# SHA1 Fingerprint: e5:8c:1c:c4:91:3b:38:63:4b:e9:10:6e:e3:ad:8e:6b:9d:d9:81:4a +# SHA256 Fingerprint: d9:47:43:2a:bd:e7:b7:fa:90:fc:2e:6b:59:10:1b:12:80:e0:e1:c7:e4:e4:0f:a3:c6:88:7f:ff:57:a7:f4:cf +-----BEGIN CERTIFICATE----- +MIIFVzCCAz+gAwIBAgINAgPlk28xsBNJiGuiFzANBgkqhkiG9w0BAQwFADBHMQsw +CQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEU +MBIGA1UEAxMLR1RTIFJvb3QgUjEwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAw +MDAwWjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZp +Y2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjEwggIiMA0GCSqGSIb3DQEBAQUA +A4ICDwAwggIKAoICAQC2EQKLHuOhd5s73L+UPreVp0A8of2C+X0yBoJx9vaMf/vo +27xqLpeXo4xL+Sv2sfnOhB2x+cWX3u+58qPpvBKJXqeqUqv4IyfLpLGcY9vXmX7w +Cl7raKb0xlpHDU0QM+NOsROjyBhsS+z8CZDfnWQpJSMHobTSPS5g4M/SCYe7zUjw +TcLCeoiKu7rPWRnWr4+wB7CeMfGCwcDfLqZtbBkOtdh+JhpFAz2weaSUKK0Pfybl +qAj+lug8aJRT7oM6iCsVlgmy4HqMLnXWnOunVmSPlk9orj2XwoSPwLxAwAtcvfaH +szVsrBhQf4TgTM2S0yDpM7xSma8ytSmzJSq0SPly4cpk9+aCEI3oncKKiPo4Zor8 +Y/kB+Xj9e1x3+naH+uzfsQ55lVe0vSbv1gHR6xYKu44LtcXFilWr06zqkUspzBmk +MiVOKvFlRNACzqrOSbTqn3yDsEB750Orp2yjj32JgfpMpf/VjsPOS+C12LOORc92 +wO1AK/1TD7Cn1TsNsYqiA94xrcx36m97PtbfkSIS5r762DL8EGMUUXLeXdYWk70p +aDPvOmbsB4om3xPXV2V4J95eSRQAogB/mqghtqmxlbCluQ0WEdrHbEg8QOB+DVrN +VjzRlwW5y0vtOUucxD/SVRNuJLDWcfr0wbrM7Rv1/oFB2ACYPTrIrnqYNxgFlQID +AQABo0IwQDAOBgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4E +FgQU5K8rJnEaK0gnhS9SZizv8IkTcT4wDQYJKoZIhvcNAQEMBQADggIBAJ+qQibb +C5u+/x6Wki4+omVKapi6Ist9wTrYggoGxval3sBOh2Z5ofmmWJyq+bXmYOfg6LEe +QkEzCzc9zolwFcq1JKjPa7XSQCGYzyI0zzvFIoTgxQ6KfF2I5DUkzps+GlQebtuy +h6f88/qBVRRiClmpIgUxPoLW7ttXNLwzldMXG+gnoot7TiYaelpkttGsN/H9oPM4 +7HLwEXWdyzRSjeZ2axfG34arJ45JK3VmgRAhpuo+9K4l/3wV3s6MJT/KYnAK9y8J +ZgfIPxz88NtFMN9iiMG1D53Dn0reWVlHxYciNuaCp+0KueIHoI17eko8cdLiA6Ef +MgfdG+RCzgwARWGAtQsgWSl4vflVy2PFPEz0tv/bal8xa5meLMFrUKTX5hgUvYU/ +Z6tGn6D/Qqc6f1zLXbBwHSs09dR2CQzreExZBfMzQsNhFRAbd03OIozUhfJFfbdT +6u9AWpQKXCBfTkBdYiJ23//OYb2MI3jSNwLgjt7RETeJ9r/tSQdirpLsQBqvFAnZ +0E6yove+7u7Y/9waLd64NnHi/Hm3lCXRSHNboTXns5lndcEZOitHTtNCjv0xyBZm +2tIMPNuzjsmhDYAPexZ3FL//2wmUspO8IFgV6dtxQ/PeEMMA3KgqlbbC1j+Qa3bb +bP6MvPJwNQzcmRk13NfIRmPVNnGuV/u3gm3c +-----END CERTIFICATE----- + +# Issuer: CN=GTS Root R2 O=Google Trust Services LLC +# Subject: CN=GTS Root R2 O=Google Trust Services LLC +# Label: "GTS Root R2" +# Serial: 159662449406622349769042896298 +# MD5 Fingerprint: 1e:39:c0:53:e6:1e:29:82:0b:ca:52:55:36:5d:57:dc +# SHA1 Fingerprint: 9a:44:49:76:32:db:de:fa:d0:bc:fb:5a:7b:17:bd:9e:56:09:24:94 +# SHA256 Fingerprint: 8d:25:cd:97:22:9d:bf:70:35:6b:da:4e:b3:cc:73:40:31:e2:4c:f0:0f:af:cf:d3:2d:c7:6e:b5:84:1c:7e:a8 +-----BEGIN CERTIFICATE----- +MIIFVzCCAz+gAwIBAgINAgPlrsWNBCUaqxElqjANBgkqhkiG9w0BAQwFADBHMQsw +CQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEU +MBIGA1UEAxMLR1RTIFJvb3QgUjIwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAw +MDAwWjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZp +Y2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjIwggIiMA0GCSqGSIb3DQEBAQUA +A4ICDwAwggIKAoICAQDO3v2m++zsFDQ8BwZabFn3GTXd98GdVarTzTukk3LvCvpt +nfbwhYBboUhSnznFt+4orO/LdmgUud+tAWyZH8QiHZ/+cnfgLFuv5AS/T3KgGjSY +6Dlo7JUle3ah5mm5hRm9iYz+re026nO8/4Piy33B0s5Ks40FnotJk9/BW9BuXvAu +MC6C/Pq8tBcKSOWIm8Wba96wyrQD8Nr0kLhlZPdcTK3ofmZemde4wj7I0BOdre7k +RXuJVfeKH2JShBKzwkCX44ofR5GmdFrS+LFjKBC4swm4VndAoiaYecb+3yXuPuWg +f9RhD1FLPD+M2uFwdNjCaKH5wQzpoeJ/u1U8dgbuak7MkogwTZq9TwtImoS1mKPV ++3PBV2HdKFZ1E66HjucMUQkQdYhMvI35ezzUIkgfKtzra7tEscszcTJGr61K8Yzo +dDqs5xoic4DSMPclQsciOzsSrZYuxsN2B6ogtzVJV+mSSeh2FnIxZyuWfoqjx5RW +Ir9qS34BIbIjMt/kmkRtWVtd9QCgHJvGeJeNkP+byKq0rxFROV7Z+2et1VsRnTKa +G73VululycslaVNVJ1zgyjbLiGH7HrfQy+4W+9OmTN6SpdTi3/UGVN4unUu0kzCq +gc7dGtxRcw1PcOnlthYhGXmy5okLdWTK1au8CcEYof/UVKGFPP0UJAOyh9OktwID +AQABo0IwQDAOBgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4E +FgQUu//KjiOfT5nK2+JopqUVJxce2Q4wDQYJKoZIhvcNAQEMBQADggIBAB/Kzt3H +vqGf2SdMC9wXmBFqiN495nFWcrKeGk6c1SuYJF2ba3uwM4IJvd8lRuqYnrYb/oM8 +0mJhwQTtzuDFycgTE1XnqGOtjHsB/ncw4c5omwX4Eu55MaBBRTUoCnGkJE+M3DyC +B19m3H0Q/gxhswWV7uGugQ+o+MePTagjAiZrHYNSVc61LwDKgEDg4XSsYPWHgJ2u +NmSRXbBoGOqKYcl3qJfEycel/FVL8/B/uWU9J2jQzGv6U53hkRrJXRqWbTKH7QMg +yALOWr7Z6v2yTcQvG99fevX4i8buMTolUVVnjWQye+mew4K6Ki3pHrTgSAai/Gev +HyICc/sgCq+dVEuhzf9gR7A/Xe8bVr2XIZYtCtFenTgCR2y59PYjJbigapordwj6 +xLEokCZYCDzifqrXPW+6MYgKBesntaFJ7qBFVHvmJ2WZICGoo7z7GJa7Um8M7YNR +TOlZ4iBgxcJlkoKM8xAfDoqXvneCbT+PHV28SSe9zE8P4c52hgQjxcCMElv924Sg +JPFI/2R80L5cFtHvma3AH/vLrrw4IgYmZNralw4/KBVEqE8AyvCazM90arQ+POuV +7LXTWtiBmelDGDfrs7vRWGJB82bSj6p4lVQgw1oudCvV0b4YacCs1aTPObpRhANl +6WLAYv7YTVWW4tAR+kg0Eeye7QUd5MjWHYbL +-----END CERTIFICATE----- + +# Issuer: CN=GTS Root R3 O=Google Trust Services LLC +# Subject: CN=GTS Root R3 O=Google Trust Services LLC +# Label: "GTS Root R3" +# Serial: 159662495401136852707857743206 +# MD5 Fingerprint: 3e:e7:9d:58:02:94:46:51:94:e5:e0:22:4a:8b:e7:73 +# SHA1 Fingerprint: ed:e5:71:80:2b:c8:92:b9:5b:83:3c:d2:32:68:3f:09:cd:a0:1e:46 +# SHA256 Fingerprint: 34:d8:a7:3e:e2:08:d9:bc:db:0d:95:65:20:93:4b:4e:40:e6:94:82:59:6e:8b:6f:73:c8:42:6b:01:0a:6f:48 +-----BEGIN CERTIFICATE----- +MIICCTCCAY6gAwIBAgINAgPluILrIPglJ209ZjAKBggqhkjOPQQDAzBHMQswCQYD +VQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEUMBIG +A1UEAxMLR1RTIFJvb3QgUjMwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAwMDAw +WjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2Vz +IExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjMwdjAQBgcqhkjOPQIBBgUrgQQAIgNi +AAQfTzOHMymKoYTey8chWEGJ6ladK0uFxh1MJ7x/JlFyb+Kf1qPKzEUURout736G +jOyxfi//qXGdGIRFBEFVbivqJn+7kAHjSxm65FSWRQmx1WyRRK2EE46ajA2ADDL2 +4CejQjBAMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW +BBTB8Sa6oC2uhYHP0/EqEr24Cmf9vDAKBggqhkjOPQQDAwNpADBmAjEA9uEglRR7 +VKOQFhG/hMjqb2sXnh5GmCCbn9MN2azTL818+FsuVbu/3ZL3pAzcMeGiAjEA/Jdm +ZuVDFhOD3cffL74UOO0BzrEXGhF16b0DjyZ+hOXJYKaV11RZt+cRLInUue4X +-----END CERTIFICATE----- + +# Issuer: CN=GTS Root R4 O=Google Trust Services LLC +# Subject: CN=GTS Root R4 O=Google Trust Services LLC +# Label: "GTS Root R4" +# Serial: 159662532700760215368942768210 +# MD5 Fingerprint: 43:96:83:77:19:4d:76:b3:9d:65:52:e4:1d:22:a5:e8 +# SHA1 Fingerprint: 77:d3:03:67:b5:e0:0c:15:f6:0c:38:61:df:7c:e1:3b:92:46:4d:47 +# SHA256 Fingerprint: 34:9d:fa:40:58:c5:e2:63:12:3b:39:8a:e7:95:57:3c:4e:13:13:c8:3f:e6:8f:93:55:6c:d5:e8:03:1b:3c:7d +-----BEGIN CERTIFICATE----- +MIICCTCCAY6gAwIBAgINAgPlwGjvYxqccpBQUjAKBggqhkjOPQQDAzBHMQswCQYD +VQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEUMBIG +A1UEAxMLR1RTIFJvb3QgUjQwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAwMDAw +WjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2Vz +IExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjQwdjAQBgcqhkjOPQIBBgUrgQQAIgNi +AATzdHOnaItgrkO4NcWBMHtLSZ37wWHO5t5GvWvVYRg1rkDdc/eJkTBa6zzuhXyi +QHY7qca4R9gq55KRanPpsXI5nymfopjTX15YhmUPoYRlBtHci8nHc8iMai/lxKvR +HYqjQjBAMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW +BBSATNbrdP9JNqPV2Py1PsVq8JQdjDAKBggqhkjOPQQDAwNpADBmAjEA6ED/g94D +9J+uHXqnLrmvT/aDHQ4thQEd0dlq7A/Cr8deVl5c1RxYIigL9zC2L7F8AjEA8GE8 +p/SgguMh1YQdc4acLa/KNJvxn7kjNuK8YAOdgLOaVsjh4rsUecrNIdSUtUlD +-----END CERTIFICATE----- + +# Issuer: CN=Telia Root CA v2 O=Telia Finland Oyj +# Subject: CN=Telia Root CA v2 O=Telia Finland Oyj +# Label: "Telia Root CA v2" +# Serial: 7288924052977061235122729490515358 +# MD5 Fingerprint: 0e:8f:ac:aa:82:df:85:b1:f4:dc:10:1c:fc:99:d9:48 +# SHA1 Fingerprint: b9:99:cd:d1:73:50:8a:c4:47:05:08:9c:8c:88:fb:be:a0:2b:40:cd +# SHA256 Fingerprint: 24:2b:69:74:2f:cb:1e:5b:2a:bf:98:89:8b:94:57:21:87:54:4e:5b:4d:99:11:78:65:73:62:1f:6a:74:b8:2c +-----BEGIN CERTIFICATE----- +MIIFdDCCA1ygAwIBAgIPAWdfJ9b+euPkrL4JWwWeMA0GCSqGSIb3DQEBCwUAMEQx +CzAJBgNVBAYTAkZJMRowGAYDVQQKDBFUZWxpYSBGaW5sYW5kIE95ajEZMBcGA1UE +AwwQVGVsaWEgUm9vdCBDQSB2MjAeFw0xODExMjkxMTU1NTRaFw00MzExMjkxMTU1 +NTRaMEQxCzAJBgNVBAYTAkZJMRowGAYDVQQKDBFUZWxpYSBGaW5sYW5kIE95ajEZ +MBcGA1UEAwwQVGVsaWEgUm9vdCBDQSB2MjCCAiIwDQYJKoZIhvcNAQEBBQADggIP +ADCCAgoCggIBALLQPwe84nvQa5n44ndp586dpAO8gm2h/oFlH0wnrI4AuhZ76zBq +AMCzdGh+sq/H1WKzej9Qyow2RCRj0jbpDIX2Q3bVTKFgcmfiKDOlyzG4OiIjNLh9 +vVYiQJ3q9HsDrWj8soFPmNB06o3lfc1jw6P23pLCWBnglrvFxKk9pXSW/q/5iaq9 +lRdU2HhE8Qx3FZLgmEKnpNaqIJLNwaCzlrI6hEKNfdWV5Nbb6WLEWLN5xYzTNTOD +n3WhUidhOPFZPY5Q4L15POdslv5e2QJltI5c0BE0312/UqeBAMN/mUWZFdUXyApT +7GPzmX3MaRKGwhfwAZ6/hLzRUssbkmbOpFPlob/E2wnW5olWK8jjfN7j/4nlNW4o +6GwLI1GpJQXrSPjdscr6bAhR77cYbETKJuFzxokGgeWKrLDiKca5JLNrRBH0pUPC +TEPlcDaMtjNXepUugqD0XBCzYYP2AgWGLnwtbNwDRm41k9V6lS/eINhbfpSQBGq6 +WT0EBXWdN6IOLj3rwaRSg/7Qa9RmjtzG6RJOHSpXqhC8fF6CfaamyfItufUXJ63R +DolUK5X6wK0dmBR4M0KGCqlztft0DbcbMBnEWg4cJ7faGND/isgFuvGqHKI3t+ZI +pEYslOqodmJHixBTB0hXbOKSTbauBcvcwUpej6w9GU7C7WB1K9vBykLVAgMBAAGj +YzBhMB8GA1UdIwQYMBaAFHKs5DN5qkWH9v2sHZ7Wxy+G2CQ5MB0GA1UdDgQWBBRy +rOQzeapFh/b9rB2e1scvhtgkOTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUw +AwEB/zANBgkqhkiG9w0BAQsFAAOCAgEAoDtZpwmUPjaE0n4vOaWWl/oRrfxn83EJ +8rKJhGdEr7nv7ZbsnGTbMjBvZ5qsfl+yqwE2foH65IRe0qw24GtixX1LDoJt0nZi +0f6X+J8wfBj5tFJ3gh1229MdqfDBmgC9bXXYfef6xzijnHDoRnkDry5023X4blMM +A8iZGok1GTzTyVR8qPAs5m4HeW9q4ebqkYJpCh3DflminmtGFZhb069GHWLIzoBS +SRE/yQQSwxN8PzuKlts8oB4KtItUsiRnDe+Cy748fdHif64W1lZYudogsYMVoe+K +TTJvQS8TUoKU1xrBeKJR3Stwbbca+few4GeXVtt8YVMJAygCQMez2P2ccGrGKMOF +6eLtGpOg3kuYooQ+BXcBlj37tCAPnHICehIv1aO6UXivKitEZU61/Qrowc15h2Er +3oBXRb9n8ZuRXqWk7FlIEA04x7D6w0RtBPV4UBySllva9bguulvP5fBqnUsvWHMt +Ty3EHD70sz+rFQ47GUGKpMFXEmZxTPpT41frYpUJnlTd0cI8Vzy9OK2YZLe4A5pT +VmBds9hCG1xLEooc6+t9xnppxyd/pPiL8uSUZodL6ZQHCRJ5irLrdATczvREWeAW +ysUsWNc8e89ihmpQfTU2Zqf7N+cox9jQraVplI/owd8k+BsHMYeB2F326CjYSlKA +rBPuUBQemMc= +-----END CERTIFICATE----- + +# Issuer: CN=D-TRUST BR Root CA 1 2020 O=D-Trust GmbH +# Subject: CN=D-TRUST BR Root CA 1 2020 O=D-Trust GmbH +# Label: "D-TRUST BR Root CA 1 2020" +# Serial: 165870826978392376648679885835942448534 +# MD5 Fingerprint: b5:aa:4b:d5:ed:f7:e3:55:2e:8f:72:0a:f3:75:b8:ed +# SHA1 Fingerprint: 1f:5b:98:f0:e3:b5:f7:74:3c:ed:e6:b0:36:7d:32:cd:f4:09:41:67 +# SHA256 Fingerprint: e5:9a:aa:81:60:09:c2:2b:ff:5b:25:ba:d3:7d:f3:06:f0:49:79:7c:1f:81:d8:5a:b0:89:e6:57:bd:8f:00:44 +-----BEGIN CERTIFICATE----- +MIIC2zCCAmCgAwIBAgIQfMmPK4TX3+oPyWWa00tNljAKBggqhkjOPQQDAzBIMQsw +CQYDVQQGEwJERTEVMBMGA1UEChMMRC1UcnVzdCBHbWJIMSIwIAYDVQQDExlELVRS +VVNUIEJSIFJvb3QgQ0EgMSAyMDIwMB4XDTIwMDIxMTA5NDUwMFoXDTM1MDIxMTA5 +NDQ1OVowSDELMAkGA1UEBhMCREUxFTATBgNVBAoTDEQtVHJ1c3QgR21iSDEiMCAG +A1UEAxMZRC1UUlVTVCBCUiBSb290IENBIDEgMjAyMDB2MBAGByqGSM49AgEGBSuB +BAAiA2IABMbLxyjR+4T1mu9CFCDhQ2tuda38KwOE1HaTJddZO0Flax7mNCq7dPYS +zuht56vkPE4/RAiLzRZxy7+SmfSk1zxQVFKQhYN4lGdnoxwJGT11NIXe7WB9xwy0 +QVK5buXuQqOCAQ0wggEJMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFHOREKv/ +VbNafAkl1bK6CKBrqx9tMA4GA1UdDwEB/wQEAwIBBjCBxgYDVR0fBIG+MIG7MD6g +PKA6hjhodHRwOi8vY3JsLmQtdHJ1c3QubmV0L2NybC9kLXRydXN0X2JyX3Jvb3Rf +Y2FfMV8yMDIwLmNybDB5oHegdYZzbGRhcDovL2RpcmVjdG9yeS5kLXRydXN0Lm5l +dC9DTj1ELVRSVVNUJTIwQlIlMjBSb290JTIwQ0ElMjAxJTIwMjAyMCxPPUQtVHJ1 +c3QlMjBHbWJILEM9REU/Y2VydGlmaWNhdGVyZXZvY2F0aW9ubGlzdDAKBggqhkjO +PQQDAwNpADBmAjEAlJAtE/rhY/hhY+ithXhUkZy4kzg+GkHaQBZTQgjKL47xPoFW +wKrY7RjEsK70PvomAjEA8yjixtsrmfu3Ubgko6SUeho/5jbiA1czijDLgsfWFBHV +dWNbFJWcHwHP2NVypw87 +-----END CERTIFICATE----- + +# Issuer: CN=D-TRUST EV Root CA 1 2020 O=D-Trust GmbH +# Subject: CN=D-TRUST EV Root CA 1 2020 O=D-Trust GmbH +# Label: "D-TRUST EV Root CA 1 2020" +# Serial: 126288379621884218666039612629459926992 +# MD5 Fingerprint: 8c:2d:9d:70:9f:48:99:11:06:11:fb:e9:cb:30:c0:6e +# SHA1 Fingerprint: 61:db:8c:21:59:69:03:90:d8:7c:9c:12:86:54:cf:9d:3d:f4:dd:07 +# SHA256 Fingerprint: 08:17:0d:1a:a3:64:53:90:1a:2f:95:92:45:e3:47:db:0c:8d:37:ab:aa:bc:56:b8:1a:a1:00:dc:95:89:70:db +-----BEGIN CERTIFICATE----- +MIIC2zCCAmCgAwIBAgIQXwJB13qHfEwDo6yWjfv/0DAKBggqhkjOPQQDAzBIMQsw +CQYDVQQGEwJERTEVMBMGA1UEChMMRC1UcnVzdCBHbWJIMSIwIAYDVQQDExlELVRS +VVNUIEVWIFJvb3QgQ0EgMSAyMDIwMB4XDTIwMDIxMTEwMDAwMFoXDTM1MDIxMTA5 +NTk1OVowSDELMAkGA1UEBhMCREUxFTATBgNVBAoTDEQtVHJ1c3QgR21iSDEiMCAG +A1UEAxMZRC1UUlVTVCBFViBSb290IENBIDEgMjAyMDB2MBAGByqGSM49AgEGBSuB +BAAiA2IABPEL3YZDIBnfl4XoIkqbz52Yv7QFJsnL46bSj8WeeHsxiamJrSc8ZRCC +/N/DnU7wMyPE0jL1HLDfMxddxfCxivnvubcUyilKwg+pf3VlSSowZ/Rk99Yad9rD +wpdhQntJraOCAQ0wggEJMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFH8QARY3 +OqQo5FD4pPfsazK2/umLMA4GA1UdDwEB/wQEAwIBBjCBxgYDVR0fBIG+MIG7MD6g +PKA6hjhodHRwOi8vY3JsLmQtdHJ1c3QubmV0L2NybC9kLXRydXN0X2V2X3Jvb3Rf +Y2FfMV8yMDIwLmNybDB5oHegdYZzbGRhcDovL2RpcmVjdG9yeS5kLXRydXN0Lm5l +dC9DTj1ELVRSVVNUJTIwRVYlMjBSb290JTIwQ0ElMjAxJTIwMjAyMCxPPUQtVHJ1 +c3QlMjBHbWJILEM9REU/Y2VydGlmaWNhdGVyZXZvY2F0aW9ubGlzdDAKBggqhkjO +PQQDAwNpADBmAjEAyjzGKnXCXnViOTYAYFqLwZOZzNnbQTs7h5kXO9XMT8oi96CA +y/m0sRtW9XLS/BnRAjEAkfcwkz8QRitxpNA7RJvAKQIFskF3UfN5Wp6OFKBOQtJb +gfM0agPnIjhQW+0ZT0MW +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert TLS ECC P384 Root G5 O=DigiCert, Inc. +# Subject: CN=DigiCert TLS ECC P384 Root G5 O=DigiCert, Inc. +# Label: "DigiCert TLS ECC P384 Root G5" +# Serial: 13129116028163249804115411775095713523 +# MD5 Fingerprint: d3:71:04:6a:43:1c:db:a6:59:e1:a8:a3:aa:c5:71:ed +# SHA1 Fingerprint: 17:f3:de:5e:9f:0f:19:e9:8e:f6:1f:32:26:6e:20:c4:07:ae:30:ee +# SHA256 Fingerprint: 01:8e:13:f0:77:25:32:cf:80:9b:d1:b1:72:81:86:72:83:fc:48:c6:e1:3b:e9:c6:98:12:85:4a:49:0c:1b:05 +-----BEGIN CERTIFICATE----- +MIICGTCCAZ+gAwIBAgIQCeCTZaz32ci5PhwLBCou8zAKBggqhkjOPQQDAzBOMQsw +CQYDVQQGEwJVUzEXMBUGA1UEChMORGlnaUNlcnQsIEluYy4xJjAkBgNVBAMTHURp +Z2lDZXJ0IFRMUyBFQ0MgUDM4NCBSb290IEc1MB4XDTIxMDExNTAwMDAwMFoXDTQ2 +MDExNDIzNTk1OVowTjELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDkRpZ2lDZXJ0LCBJ +bmMuMSYwJAYDVQQDEx1EaWdpQ2VydCBUTFMgRUNDIFAzODQgUm9vdCBHNTB2MBAG +ByqGSM49AgEGBSuBBAAiA2IABMFEoc8Rl1Ca3iOCNQfN0MsYndLxf3c1TzvdlHJS +7cI7+Oz6e2tYIOyZrsn8aLN1udsJ7MgT9U7GCh1mMEy7H0cKPGEQQil8pQgO4CLp +0zVozptjn4S1mU1YoI71VOeVyaNCMEAwHQYDVR0OBBYEFMFRRVBZqz7nLFr6ICIS +B4CIfBFqMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MAoGCCqGSM49 +BAMDA2gAMGUCMQCJao1H5+z8blUD2WdsJk6Dxv3J+ysTvLd6jLRl0mlpYxNjOyZQ +LgGheQaRnUi/wr4CMEfDFXuxoJGZSZOoPHzoRgaLLPIxAJSdYsiJvRmEFOml+wG4 +DXZDjC5Ty3zfDBeWUA== +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert TLS RSA4096 Root G5 O=DigiCert, Inc. +# Subject: CN=DigiCert TLS RSA4096 Root G5 O=DigiCert, Inc. +# Label: "DigiCert TLS RSA4096 Root G5" +# Serial: 11930366277458970227240571539258396554 +# MD5 Fingerprint: ac:fe:f7:34:96:a9:f2:b3:b4:12:4b:e4:27:41:6f:e1 +# SHA1 Fingerprint: a7:88:49:dc:5d:7c:75:8c:8c:de:39:98:56:b3:aa:d0:b2:a5:71:35 +# SHA256 Fingerprint: 37:1a:00:dc:05:33:b3:72:1a:7e:eb:40:e8:41:9e:70:79:9d:2b:0a:0f:2c:1d:80:69:31:65:f7:ce:c4:ad:75 +-----BEGIN CERTIFICATE----- +MIIFZjCCA06gAwIBAgIQCPm0eKj6ftpqMzeJ3nzPijANBgkqhkiG9w0BAQwFADBN +MQswCQYDVQQGEwJVUzEXMBUGA1UEChMORGlnaUNlcnQsIEluYy4xJTAjBgNVBAMT +HERpZ2lDZXJ0IFRMUyBSU0E0MDk2IFJvb3QgRzUwHhcNMjEwMTE1MDAwMDAwWhcN +NDYwMTE0MjM1OTU5WjBNMQswCQYDVQQGEwJVUzEXMBUGA1UEChMORGlnaUNlcnQs +IEluYy4xJTAjBgNVBAMTHERpZ2lDZXJ0IFRMUyBSU0E0MDk2IFJvb3QgRzUwggIi +MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCz0PTJeRGd/fxmgefM1eS87IE+ +ajWOLrfn3q/5B03PMJ3qCQuZvWxX2hhKuHisOjmopkisLnLlvevxGs3npAOpPxG0 +2C+JFvuUAT27L/gTBaF4HI4o4EXgg/RZG5Wzrn4DReW+wkL+7vI8toUTmDKdFqgp +wgscONyfMXdcvyej/Cestyu9dJsXLfKB2l2w4SMXPohKEiPQ6s+d3gMXsUJKoBZM +pG2T6T867jp8nVid9E6P/DsjyG244gXazOvswzH016cpVIDPRFtMbzCe88zdH5RD +nU1/cHAN1DrRN/BsnZvAFJNY781BOHW8EwOVfH/jXOnVDdXifBBiqmvwPXbzP6Po +sMH976pXTayGpxi0KcEsDr9kvimM2AItzVwv8n/vFfQMFawKsPHTDU9qTXeXAaDx +Zre3zu/O7Oyldcqs4+Fj97ihBMi8ez9dLRYiVu1ISf6nL3kwJZu6ay0/nTvEF+cd +Lvvyz6b84xQslpghjLSR6Rlgg/IwKwZzUNWYOwbpx4oMYIwo+FKbbuH2TbsGJJvX +KyY//SovcfXWJL5/MZ4PbeiPT02jP/816t9JXkGPhvnxd3lLG7SjXi/7RgLQZhNe +XoVPzthwiHvOAbWWl9fNff2C+MIkwcoBOU+NosEUQB+cZtUMCUbW8tDRSHZWOkPL +tgoRObqME2wGtZ7P6wIDAQABo0IwQDAdBgNVHQ4EFgQUUTMc7TZArxfTJc1paPKv +TiM+s0EwDgYDVR0PAQH/BAQDAgGGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcN +AQEMBQADggIBAGCmr1tfV9qJ20tQqcQjNSH/0GEwhJG3PxDPJY7Jv0Y02cEhJhxw +GXIeo8mH/qlDZJY6yFMECrZBu8RHANmfGBg7sg7zNOok992vIGCukihfNudd5N7H +PNtQOa27PShNlnx2xlv0wdsUpasZYgcYQF+Xkdycx6u1UQ3maVNVzDl92sURVXLF +O4uJ+DQtpBflF+aZfTCIITfNMBc9uPK8qHWgQ9w+iUuQrm0D4ByjoJYJu32jtyoQ +REtGBzRj7TG5BO6jm5qu5jF49OokYTurWGT/u4cnYiWB39yhL/btp/96j1EuMPik +AdKFOV8BmZZvWltwGUb+hmA+rYAQCd05JS9Yf7vSdPD3Rh9GOUrYU9DzLjtxpdRv +/PNn5AeP3SYZ4Y1b+qOTEZvpyDrDVWiakuFSdjjo4bq9+0/V77PnSIMx8IIh47a+ +p6tv75/fTM8BuGJqIz3nCU2AG3swpMPdB380vqQmsvZB6Akd4yCYqjdP//fx4ilw +MUc/dNAUFvohigLVigmUdy7yWSiLfFCSCmZ4OIN1xLVaqBHG5cGdZlXPU8Sv13WF +qUITVuwhd4GTWgzqltlJyqEI8pc7bZsEGCREjnwB8twl2F6GmrE52/WRMmrRpnCK +ovfepEWFJqgejF0pW8hL2JpqA15w8oVPbEtoL8pU9ozaMv7Da4M/OMZ+ +-----END CERTIFICATE----- + +# Issuer: CN=Certainly Root R1 O=Certainly +# Subject: CN=Certainly Root R1 O=Certainly +# Label: "Certainly Root R1" +# Serial: 188833316161142517227353805653483829216 +# MD5 Fingerprint: 07:70:d4:3e:82:87:a0:fa:33:36:13:f4:fa:33:e7:12 +# SHA1 Fingerprint: a0:50:ee:0f:28:71:f4:27:b2:12:6d:6f:50:96:25:ba:cc:86:42:af +# SHA256 Fingerprint: 77:b8:2c:d8:64:4c:43:05:f7:ac:c5:cb:15:6b:45:67:50:04:03:3d:51:c6:0c:62:02:a8:e0:c3:34:67:d3:a0 +-----BEGIN CERTIFICATE----- +MIIFRzCCAy+gAwIBAgIRAI4P+UuQcWhlM1T01EQ5t+AwDQYJKoZIhvcNAQELBQAw +PTELMAkGA1UEBhMCVVMxEjAQBgNVBAoTCUNlcnRhaW5seTEaMBgGA1UEAxMRQ2Vy +dGFpbmx5IFJvb3QgUjEwHhcNMjEwNDAxMDAwMDAwWhcNNDYwNDAxMDAwMDAwWjA9 +MQswCQYDVQQGEwJVUzESMBAGA1UEChMJQ2VydGFpbmx5MRowGAYDVQQDExFDZXJ0 +YWlubHkgUm9vdCBSMTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANA2 +1B/q3avk0bbm+yLA3RMNansiExyXPGhjZjKcA7WNpIGD2ngwEc/csiu+kr+O5MQT +vqRoTNoCaBZ0vrLdBORrKt03H2As2/X3oXyVtwxwhi7xOu9S98zTm/mLvg7fMbed +aFySpvXl8wo0tf97ouSHocavFwDvA5HtqRxOcT3Si2yJ9HiG5mpJoM610rCrm/b0 +1C7jcvk2xusVtyWMOvwlDbMicyF0yEqWYZL1LwsYpfSt4u5BvQF5+paMjRcCMLT5 +r3gajLQ2EBAHBXDQ9DGQilHFhiZ5shGIXsXwClTNSaa/ApzSRKft43jvRl5tcdF5 +cBxGX1HpyTfcX35pe0HfNEXgO4T0oYoKNp43zGJS4YkNKPl6I7ENPT2a/Z2B7yyQ +wHtETrtJ4A5KVpK8y7XdeReJkd5hiXSSqOMyhb5OhaRLWcsrxXiOcVTQAjeZjOVJ +6uBUcqQRBi8LjMFbvrWhsFNunLhgkR9Za/kt9JQKl7XsxXYDVBtlUrpMklZRNaBA +2CnbrlJ2Oy0wQJuK0EJWtLeIAaSHO1OWzaMWj/Nmqhexx2DgwUMFDO6bW2BvBlyH +Wyf5QBGenDPBt+U1VwV/J84XIIwc/PH72jEpSe31C4SnT8H2TsIonPru4K8H+zMR +eiFPCyEQtkA6qyI6BJyLm4SGcprSp6XEtHWRqSsjAgMBAAGjQjBAMA4GA1UdDwEB +/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBTgqj8ljZ9EXME66C6u +d0yEPmcM9DANBgkqhkiG9w0BAQsFAAOCAgEAuVevuBLaV4OPaAszHQNTVfSVcOQr +PbA56/qJYv331hgELyE03fFo8NWWWt7CgKPBjcZq91l3rhVkz1t5BXdm6ozTaw3d +8VkswTOlMIAVRQdFGjEitpIAq5lNOo93r6kiyi9jyhXWx8bwPWz8HA2YEGGeEaIi +1wrykXprOQ4vMMM2SZ/g6Q8CRFA3lFV96p/2O7qUpUzpvD5RtOjKkjZUbVwlKNrd +rRT90+7iIgXr0PK3aBLXWopBGsaSpVo7Y0VPv+E6dyIvXL9G+VoDhRNCX8reU9di +taY1BMJH/5n9hN9czulegChB8n3nHpDYT3Y+gjwN/KUD+nsa2UUeYNrEjvn8K8l7 +lcUq/6qJ34IxD3L/DCfXCh5WAFAeDJDBlrXYFIW7pw0WwfgHJBu6haEaBQmAupVj +yTrsJZ9/nbqkRxWbRHDxakvWOF5D8xh+UG7pWijmZeZ3Gzr9Hb4DJqPb1OG7fpYn +Kx3upPvaJVQTA945xsMfTZDsjxtK0hzthZU4UHlG1sGQUDGpXJpuHfUzVounmdLy +yCwzk5Iwx06MZTMQZBf9JBeW0Y3COmor6xOLRPIh80oat3df1+2IpHLlOR+Vnb5n +wXARPbv0+Em34yaXOp/SX3z7wJl8OSngex2/DaeP0ik0biQVy96QXr8axGbqwua6 +OV+KmalBWQewLK8= +-----END CERTIFICATE----- + +# Issuer: CN=Certainly Root E1 O=Certainly +# Subject: CN=Certainly Root E1 O=Certainly +# Label: "Certainly Root E1" +# Serial: 8168531406727139161245376702891150584 +# MD5 Fingerprint: 0a:9e:ca:cd:3e:52:50:c6:36:f3:4b:a3:ed:a7:53:e9 +# SHA1 Fingerprint: f9:e1:6d:dc:01:89:cf:d5:82:45:63:3e:c5:37:7d:c2:eb:93:6f:2b +# SHA256 Fingerprint: b4:58:5f:22:e4:ac:75:6a:4e:86:12:a1:36:1c:5d:9d:03:1a:93:fd:84:fe:bb:77:8f:a3:06:8b:0f:c4:2d:c2 +-----BEGIN CERTIFICATE----- +MIIB9zCCAX2gAwIBAgIQBiUzsUcDMydc+Y2aub/M+DAKBggqhkjOPQQDAzA9MQsw +CQYDVQQGEwJVUzESMBAGA1UEChMJQ2VydGFpbmx5MRowGAYDVQQDExFDZXJ0YWlu +bHkgUm9vdCBFMTAeFw0yMTA0MDEwMDAwMDBaFw00NjA0MDEwMDAwMDBaMD0xCzAJ +BgNVBAYTAlVTMRIwEAYDVQQKEwlDZXJ0YWlubHkxGjAYBgNVBAMTEUNlcnRhaW5s +eSBSb290IEUxMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAE3m/4fxzf7flHh4axpMCK ++IKXgOqPyEpeKn2IaKcBYhSRJHpcnqMXfYqGITQYUBsQ3tA3SybHGWCA6TS9YBk2 +QNYphwk8kXr2vBMj3VlOBF7PyAIcGFPBMdjaIOlEjeR2o0IwQDAOBgNVHQ8BAf8E +BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU8ygYy2R17ikq6+2uI1g4 +hevIIgcwCgYIKoZIzj0EAwMDaAAwZQIxALGOWiDDshliTd6wT99u0nCK8Z9+aozm +ut6Dacpps6kFtZaSF4fC0urQe87YQVt8rgIwRt7qy12a7DLCZRawTDBcMPPaTnOG +BtjOiQRINzf43TNRnXCve1XYAS59BWQOhriR +-----END CERTIFICATE----- + +# Issuer: CN=Security Communication ECC RootCA1 O=SECOM Trust Systems CO.,LTD. +# Subject: CN=Security Communication ECC RootCA1 O=SECOM Trust Systems CO.,LTD. +# Label: "Security Communication ECC RootCA1" +# Serial: 15446673492073852651 +# MD5 Fingerprint: 7e:43:b0:92:68:ec:05:43:4c:98:ab:5d:35:2e:7e:86 +# SHA1 Fingerprint: b8:0e:26:a9:bf:d2:b2:3b:c0:ef:46:c9:ba:c7:bb:f6:1d:0d:41:41 +# SHA256 Fingerprint: e7:4f:bd:a5:5b:d5:64:c4:73:a3:6b:44:1a:a7:99:c8:a6:8e:07:74:40:e8:28:8b:9f:a1:e5:0e:4b:ba:ca:11 +-----BEGIN CERTIFICATE----- +MIICODCCAb6gAwIBAgIJANZdm7N4gS7rMAoGCCqGSM49BAMDMGExCzAJBgNVBAYT +AkpQMSUwIwYDVQQKExxTRUNPTSBUcnVzdCBTeXN0ZW1zIENPLixMVEQuMSswKQYD +VQQDEyJTZWN1cml0eSBDb21tdW5pY2F0aW9uIEVDQyBSb290Q0ExMB4XDTE2MDYx +NjA1MTUyOFoXDTM4MDExODA1MTUyOFowYTELMAkGA1UEBhMCSlAxJTAjBgNVBAoT +HFNFQ09NIFRydXN0IFN5c3RlbXMgQ08uLExURC4xKzApBgNVBAMTIlNlY3VyaXR5 +IENvbW11bmljYXRpb24gRUNDIFJvb3RDQTEwdjAQBgcqhkjOPQIBBgUrgQQAIgNi +AASkpW9gAwPDvTH00xecK4R1rOX9PVdu12O/5gSJko6BnOPpR27KkBLIE+Cnnfdl +dB9sELLo5OnvbYUymUSxXv3MdhDYW72ixvnWQuRXdtyQwjWpS4g8EkdtXP9JTxpK +ULGjQjBAMB0GA1UdDgQWBBSGHOf+LaVKiwj+KBH6vqNm+GBZLzAOBgNVHQ8BAf8E +BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjAVXUI9/Lbu +9zuxNuie9sRGKEkz0FhDKmMpzE2xtHqiuQ04pV1IKv3LsnNdo4gIxwwCMQDAqy0O +be0YottT6SXbVQjgUMzfRGEWgqtJsLKB7HOHeLRMsmIbEvoWTSVLY70eN9k= +-----END CERTIFICATE----- + +# Issuer: CN=BJCA Global Root CA1 O=BEIJING CERTIFICATE AUTHORITY +# Subject: CN=BJCA Global Root CA1 O=BEIJING CERTIFICATE AUTHORITY +# Label: "BJCA Global Root CA1" +# Serial: 113562791157148395269083148143378328608 +# MD5 Fingerprint: 42:32:99:76:43:33:36:24:35:07:82:9b:28:f9:d0:90 +# SHA1 Fingerprint: d5:ec:8d:7b:4c:ba:79:f4:e7:e8:cb:9d:6b:ae:77:83:10:03:21:6a +# SHA256 Fingerprint: f3:89:6f:88:fe:7c:0a:88:27:66:a7:fa:6a:d2:74:9f:b5:7a:7f:3e:98:fb:76:9c:1f:a7:b0:9c:2c:44:d5:ae +-----BEGIN CERTIFICATE----- +MIIFdDCCA1ygAwIBAgIQVW9l47TZkGobCdFsPsBsIDANBgkqhkiG9w0BAQsFADBU +MQswCQYDVQQGEwJDTjEmMCQGA1UECgwdQkVJSklORyBDRVJUSUZJQ0FURSBBVVRI +T1JJVFkxHTAbBgNVBAMMFEJKQ0EgR2xvYmFsIFJvb3QgQ0ExMB4XDTE5MTIxOTAz +MTYxN1oXDTQ0MTIxMjAzMTYxN1owVDELMAkGA1UEBhMCQ04xJjAkBgNVBAoMHUJF +SUpJTkcgQ0VSVElGSUNBVEUgQVVUSE9SSVRZMR0wGwYDVQQDDBRCSkNBIEdsb2Jh +bCBSb290IENBMTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAPFmCL3Z +xRVhy4QEQaVpN3cdwbB7+sN3SJATcmTRuHyQNZ0YeYjjlwE8R4HyDqKYDZ4/N+AZ +spDyRhySsTphzvq3Rp4Dhtczbu33RYx2N95ulpH3134rhxfVizXuhJFyV9xgw8O5 +58dnJCNPYwpj9mZ9S1WnP3hkSWkSl+BMDdMJoDIwOvqfwPKcxRIqLhy1BDPapDgR +at7GGPZHOiJBhyL8xIkoVNiMpTAK+BcWyqw3/XmnkRd4OJmtWO2y3syJfQOcs4ll +5+M7sSKGjwZteAf9kRJ/sGsciQ35uMt0WwfCyPQ10WRjeulumijWML3mG90Vr4Tq +nMfK9Q7q8l0ph49pczm+LiRvRSGsxdRpJQaDrXpIhRMsDQa4bHlW/KNnMoH1V6XK +V0Jp6VwkYe/iMBhORJhVb3rCk9gZtt58R4oRTklH2yiUAguUSiz5EtBP6DF+bHq/ +pj+bOT0CFqMYs2esWz8sgytnOYFcuX6U1WTdno9uruh8W7TXakdI136z1C2OVnZO +z2nxbkRs1CTqjSShGL+9V/6pmTW12xB3uD1IutbB5/EjPtffhZ0nPNRAvQoMvfXn +jSXWgXSHRtQpdaJCbPdzied9v3pKH9MiyRVVz99vfFXQpIsHETdfg6YmV6YBW37+ +WGgHqel62bno/1Afq8K0wM7o6v0PvY1NuLxxAgMBAAGjQjBAMB0GA1UdDgQWBBTF +7+3M2I0hxkjk49cULqcWk+WYATAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQE +AwIBBjANBgkqhkiG9w0BAQsFAAOCAgEAUoKsITQfI/Ki2Pm4rzc2IInRNwPWaZ+4 +YRC6ojGYWUfo0Q0lHhVBDOAqVdVXUsv45Mdpox1NcQJeXyFFYEhcCY5JEMEE3Kli +awLwQ8hOnThJdMkycFRtwUf8jrQ2ntScvd0g1lPJGKm1Vrl2i5VnZu69mP6u775u ++2D2/VnGKhs/I0qUJDAnyIm860Qkmss9vk/Ves6OF8tiwdneHg56/0OGNFK8YT88 +X7vZdrRTvJez/opMEi4r89fO4aL/3Xtw+zuhTaRjAv04l5U/BXCga99igUOLtFkN +SoxUnMW7gZ/NfaXvCyUeOiDbHPwfmGcCCtRzRBPbUYQaVQNW4AB+dAb/OMRyHdOo +P2gxXdMJxy6MW2Pg6Nwe0uxhHvLe5e/2mXZgLR6UcnHGCyoyx5JO1UbXHfmpGQrI ++pXObSOYqgs4rZpWDW+N8TEAiMEXnM0ZNjX+VVOg4DwzX5Ze4jLp3zO7Bkqp2IRz +znfSxqxx4VyjHQy7Ct9f4qNx2No3WqB4K/TUfet27fJhcKVlmtOJNBir+3I+17Q9 +eVzYH6Eze9mCUAyTF6ps3MKCuwJXNq+YJyo5UOGwifUll35HaBC07HPKs5fRJNz2 +YqAo07WjuGS3iGJCz51TzZm+ZGiPTx4SSPfSKcOYKMryMguTjClPPGAyzQWWYezy +r/6zcCwupvI= +-----END CERTIFICATE----- + +# Issuer: CN=BJCA Global Root CA2 O=BEIJING CERTIFICATE AUTHORITY +# Subject: CN=BJCA Global Root CA2 O=BEIJING CERTIFICATE AUTHORITY +# Label: "BJCA Global Root CA2" +# Serial: 58605626836079930195615843123109055211 +# MD5 Fingerprint: 5e:0a:f6:47:5f:a6:14:e8:11:01:95:3f:4d:01:eb:3c +# SHA1 Fingerprint: f4:27:86:eb:6e:b8:6d:88:31:67:02:fb:ba:66:a4:53:00:aa:7a:a6 +# SHA256 Fingerprint: 57:4d:f6:93:1e:27:80:39:66:7b:72:0a:fd:c1:60:0f:c2:7e:b6:6d:d3:09:29:79:fb:73:85:64:87:21:28:82 +-----BEGIN CERTIFICATE----- +MIICJTCCAaugAwIBAgIQLBcIfWQqwP6FGFkGz7RK6zAKBggqhkjOPQQDAzBUMQsw +CQYDVQQGEwJDTjEmMCQGA1UECgwdQkVJSklORyBDRVJUSUZJQ0FURSBBVVRIT1JJ +VFkxHTAbBgNVBAMMFEJKQ0EgR2xvYmFsIFJvb3QgQ0EyMB4XDTE5MTIxOTAzMTgy +MVoXDTQ0MTIxMjAzMTgyMVowVDELMAkGA1UEBhMCQ04xJjAkBgNVBAoMHUJFSUpJ +TkcgQ0VSVElGSUNBVEUgQVVUSE9SSVRZMR0wGwYDVQQDDBRCSkNBIEdsb2JhbCBS +b290IENBMjB2MBAGByqGSM49AgEGBSuBBAAiA2IABJ3LgJGNU2e1uVCxA/jlSR9B +IgmwUVJY1is0j8USRhTFiy8shP8sbqjV8QnjAyEUxEM9fMEsxEtqSs3ph+B99iK+ ++kpRuDCK/eHeGBIK9ke35xe/J4rUQUyWPGCWwf0VHKNCMEAwHQYDVR0OBBYEFNJK +sVF/BvDRgh9Obl+rg/xI1LCRMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQD +AgEGMAoGCCqGSM49BAMDA2gAMGUCMBq8W9f+qdJUDkpd0m2xQNz0Q9XSSpkZElaA +94M04TVOSG0ED1cxMDAtsaqdAzjbBgIxAMvMh1PLet8gUXOQwKhbYdDFUDn9hf7B +43j4ptZLvZuHjw/l1lOWqzzIQNph91Oj9w== +-----END CERTIFICATE----- + +# Issuer: CN=Sectigo Public Server Authentication Root E46 O=Sectigo Limited +# Subject: CN=Sectigo Public Server Authentication Root E46 O=Sectigo Limited +# Label: "Sectigo Public Server Authentication Root E46" +# Serial: 88989738453351742415770396670917916916 +# MD5 Fingerprint: 28:23:f8:b2:98:5c:37:16:3b:3e:46:13:4e:b0:b3:01 +# SHA1 Fingerprint: ec:8a:39:6c:40:f0:2e:bc:42:75:d4:9f:ab:1c:1a:5b:67:be:d2:9a +# SHA256 Fingerprint: c9:0f:26:f0:fb:1b:40:18:b2:22:27:51:9b:5c:a2:b5:3e:2c:a5:b3:be:5c:f1:8e:fe:1b:ef:47:38:0c:53:83 +-----BEGIN CERTIFICATE----- +MIICOjCCAcGgAwIBAgIQQvLM2htpN0RfFf51KBC49DAKBggqhkjOPQQDAzBfMQsw +CQYDVQQGEwJHQjEYMBYGA1UEChMPU2VjdGlnbyBMaW1pdGVkMTYwNAYDVQQDEy1T +ZWN0aWdvIFB1YmxpYyBTZXJ2ZXIgQXV0aGVudGljYXRpb24gUm9vdCBFNDYwHhcN +MjEwMzIyMDAwMDAwWhcNNDYwMzIxMjM1OTU5WjBfMQswCQYDVQQGEwJHQjEYMBYG +A1UEChMPU2VjdGlnbyBMaW1pdGVkMTYwNAYDVQQDEy1TZWN0aWdvIFB1YmxpYyBT +ZXJ2ZXIgQXV0aGVudGljYXRpb24gUm9vdCBFNDYwdjAQBgcqhkjOPQIBBgUrgQQA +IgNiAAR2+pmpbiDt+dd34wc7qNs9Xzjoq1WmVk/WSOrsfy2qw7LFeeyZYX8QeccC +WvkEN/U0NSt3zn8gj1KjAIns1aeibVvjS5KToID1AZTc8GgHHs3u/iVStSBDHBv+ +6xnOQ6OjQjBAMB0GA1UdDgQWBBTRItpMWfFLXyY4qp3W7usNw/upYTAOBgNVHQ8B +Af8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAwNnADBkAjAn7qRa +qCG76UeXlImldCBteU/IvZNeWBj7LRoAasm4PdCkT0RHlAFWovgzJQxC36oCMB3q +4S6ILuH5px0CMk7yn2xVdOOurvulGu7t0vzCAxHrRVxgED1cf5kDW21USAGKcw== +-----END CERTIFICATE----- + +# Issuer: CN=Sectigo Public Server Authentication Root R46 O=Sectigo Limited +# Subject: CN=Sectigo Public Server Authentication Root R46 O=Sectigo Limited +# Label: "Sectigo Public Server Authentication Root R46" +# Serial: 156256931880233212765902055439220583700 +# MD5 Fingerprint: 32:10:09:52:00:d5:7e:6c:43:df:15:c0:b1:16:93:e5 +# SHA1 Fingerprint: ad:98:f9:f3:e4:7d:75:3b:65:d4:82:b3:a4:52:17:bb:6e:f5:e4:38 +# SHA256 Fingerprint: 7b:b6:47:a6:2a:ee:ac:88:bf:25:7a:a5:22:d0:1f:fe:a3:95:e0:ab:45:c7:3f:93:f6:56:54:ec:38:f2:5a:06 +-----BEGIN CERTIFICATE----- +MIIFijCCA3KgAwIBAgIQdY39i658BwD6qSWn4cetFDANBgkqhkiG9w0BAQwFADBf +MQswCQYDVQQGEwJHQjEYMBYGA1UEChMPU2VjdGlnbyBMaW1pdGVkMTYwNAYDVQQD +Ey1TZWN0aWdvIFB1YmxpYyBTZXJ2ZXIgQXV0aGVudGljYXRpb24gUm9vdCBSNDYw +HhcNMjEwMzIyMDAwMDAwWhcNNDYwMzIxMjM1OTU5WjBfMQswCQYDVQQGEwJHQjEY +MBYGA1UEChMPU2VjdGlnbyBMaW1pdGVkMTYwNAYDVQQDEy1TZWN0aWdvIFB1Ymxp +YyBTZXJ2ZXIgQXV0aGVudGljYXRpb24gUm9vdCBSNDYwggIiMA0GCSqGSIb3DQEB +AQUAA4ICDwAwggIKAoICAQCTvtU2UnXYASOgHEdCSe5jtrch/cSV1UgrJnwUUxDa +ef0rty2k1Cz66jLdScK5vQ9IPXtamFSvnl0xdE8H/FAh3aTPaE8bEmNtJZlMKpnz +SDBh+oF8HqcIStw+KxwfGExxqjWMrfhu6DtK2eWUAtaJhBOqbchPM8xQljeSM9xf +iOefVNlI8JhD1mb9nxc4Q8UBUQvX4yMPFF1bFOdLvt30yNoDN9HWOaEhUTCDsG3X +ME6WW5HwcCSrv0WBZEMNvSE6Lzzpng3LILVCJ8zab5vuZDCQOc2TZYEhMbUjUDM3 +IuM47fgxMMxF/mL50V0yeUKH32rMVhlATc6qu/m1dkmU8Sf4kaWD5QazYw6A3OAS +VYCmO2a0OYctyPDQ0RTp5A1NDvZdV3LFOxxHVp3i1fuBYYzMTYCQNFu31xR13NgE +SJ/AwSiItOkcyqex8Va3e0lMWeUgFaiEAin6OJRpmkkGj80feRQXEgyDet4fsZfu ++Zd4KKTIRJLpfSYFplhym3kT2BFfrsU4YjRosoYwjviQYZ4ybPUHNs2iTG7sijbt +8uaZFURww3y8nDnAtOFr94MlI1fZEoDlSfB1D++N6xybVCi0ITz8fAr/73trdf+L +HaAZBav6+CuBQug4urv7qv094PPK306Xlynt8xhW6aWWrL3DkJiy4Pmi1KZHQ3xt +zwIDAQABo0IwQDAdBgNVHQ4EFgQUVnNYZJX5khqwEioEYnmhQBWIIUkwDgYDVR0P +AQH/BAQDAgGGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEMBQADggIBAC9c +mTz8Bl6MlC5w6tIyMY208FHVvArzZJ8HXtXBc2hkeqK5Duj5XYUtqDdFqij0lgVQ +YKlJfp/imTYpE0RHap1VIDzYm/EDMrraQKFz6oOht0SmDpkBm+S8f74TlH7Kph52 +gDY9hAaLMyZlbcp+nv4fjFg4exqDsQ+8FxG75gbMY/qB8oFM2gsQa6H61SilzwZA +Fv97fRheORKkU55+MkIQpiGRqRxOF3yEvJ+M0ejf5lG5Nkc/kLnHvALcWxxPDkjB +JYOcCj+esQMzEhonrPcibCTRAUH4WAP+JWgiH5paPHxsnnVI84HxZmduTILA7rpX +DhjvLpr3Etiga+kFpaHpaPi8TD8SHkXoUsCjvxInebnMMTzD9joiFgOgyY9mpFui +TdaBJQbpdqQACj7LzTWb4OE4y2BThihCQRxEV+ioratF4yUQvNs+ZUH7G6aXD+u5 +dHn5HrwdVw1Hr8Mvn4dGp+smWg9WY7ViYG4A++MnESLn/pmPNPW56MORcr3Ywx65 +LvKRRFHQV80MNNVIIb/bE/FmJUNS0nAiNs2fxBx1IK1jcmMGDw4nztJqDby1ORrp +0XZ60Vzk50lJLVU3aPAaOpg+VBeHVOmmJ1CJeyAvP/+/oYtKR5j/K3tJPsMpRmAY +QqszKbrAKbkTidOIijlBO8n9pu0f9GBj39ItVQGL +-----END CERTIFICATE----- + +# Issuer: CN=SSL.com TLS RSA Root CA 2022 O=SSL Corporation +# Subject: CN=SSL.com TLS RSA Root CA 2022 O=SSL Corporation +# Label: "SSL.com TLS RSA Root CA 2022" +# Serial: 148535279242832292258835760425842727825 +# MD5 Fingerprint: d8:4e:c6:59:30:d8:fe:a0:d6:7a:5a:2c:2c:69:78:da +# SHA1 Fingerprint: ec:2c:83:40:72:af:26:95:10:ff:0e:f2:03:ee:31:70:f6:78:9d:ca +# SHA256 Fingerprint: 8f:af:7d:2e:2c:b4:70:9b:b8:e0:b3:36:66:bf:75:a5:dd:45:b5:de:48:0f:8e:a8:d4:bf:e6:be:bc:17:f2:ed +-----BEGIN CERTIFICATE----- +MIIFiTCCA3GgAwIBAgIQb77arXO9CEDii02+1PdbkTANBgkqhkiG9w0BAQsFADBO +MQswCQYDVQQGEwJVUzEYMBYGA1UECgwPU1NMIENvcnBvcmF0aW9uMSUwIwYDVQQD +DBxTU0wuY29tIFRMUyBSU0EgUm9vdCBDQSAyMDIyMB4XDTIyMDgyNTE2MzQyMloX +DTQ2MDgxOTE2MzQyMVowTjELMAkGA1UEBhMCVVMxGDAWBgNVBAoMD1NTTCBDb3Jw +b3JhdGlvbjElMCMGA1UEAwwcU1NMLmNvbSBUTFMgUlNBIFJvb3QgQ0EgMjAyMjCC +AiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANCkCXJPQIgSYT41I57u9nTP +L3tYPc48DRAokC+X94xI2KDYJbFMsBFMF3NQ0CJKY7uB0ylu1bUJPiYYf7ISf5OY +t6/wNr/y7hienDtSxUcZXXTzZGbVXcdotL8bHAajvI9AI7YexoS9UcQbOcGV0ins +S657Lb85/bRi3pZ7QcacoOAGcvvwB5cJOYF0r/c0WRFXCsJbwST0MXMwgsadugL3 +PnxEX4MN8/HdIGkWCVDi1FW24IBydm5MR7d1VVm0U3TZlMZBrViKMWYPHqIbKUBO +L9975hYsLfy/7PO0+r4Y9ptJ1O4Fbtk085zx7AGL0SDGD6C1vBdOSHtRwvzpXGk3 +R2azaPgVKPC506QVzFpPulJwoxJF3ca6TvvC0PeoUidtbnm1jPx7jMEWTO6Af77w +dr5BUxIzrlo4QqvXDz5BjXYHMtWrifZOZ9mxQnUjbvPNQrL8VfVThxc7wDNY8VLS ++YCk8OjwO4s4zKTGkH8PnP2L0aPP2oOnaclQNtVcBdIKQXTbYxE3waWglksejBYS +d66UNHsef8JmAOSqg+qKkK3ONkRN0VHpvB/zagX9wHQfJRlAUW7qglFA35u5CCoG +AtUjHBPW6dvbxrB6y3snm/vg1UYk7RBLY0ulBY+6uB0rpvqR4pJSvezrZ5dtmi2f +gTIFZzL7SAg/2SW4BCUvAgMBAAGjYzBhMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0j +BBgwFoAU+y437uOEeicuzRk1sTN8/9REQrkwHQYDVR0OBBYEFPsuN+7jhHonLs0Z +NbEzfP/UREK5MA4GA1UdDwEB/wQEAwIBhjANBgkqhkiG9w0BAQsFAAOCAgEAjYlt +hEUY8U+zoO9opMAdrDC8Z2awms22qyIZZtM7QbUQnRC6cm4pJCAcAZli05bg4vsM +QtfhWsSWTVTNj8pDU/0quOr4ZcoBwq1gaAafORpR2eCNJvkLTqVTJXojpBzOCBvf +R4iyrT7gJ4eLSYwfqUdYe5byiB0YrrPRpgqU+tvT5TgKa3kSM/tKWTcWQA673vWJ +DPFs0/dRa1419dvAJuoSc06pkZCmF8NsLzjUo3KUQyxi4U5cMj29TH0ZR6LDSeeW +P4+a0zvkEdiLA9z2tmBVGKaBUfPhqBVq6+AL8BQx1rmMRTqoENjwuSfr98t67wVy +lrXEj5ZzxOhWc5y8aVFjvO9nHEMaX3cZHxj4HCUp+UmZKbaSPaKDN7EgkaibMOlq +bLQjk2UEqxHzDh1TJElTHaE/nUiSEeJ9DU/1172iWD54nR4fK/4huxoTtrEoZP2w +AgDHbICivRZQIA9ygV/MlP+7mea6kMvq+cYMwq7FGc4zoWtcu358NFcXrfA/rs3q +r5nsLFR+jM4uElZI7xc7P0peYNLcdDa8pUNjyw9bowJWCZ4kLOGGgYz+qxcs+sji +Mho6/4UIyYOf8kpIEFR3N+2ivEC+5BB09+Rbu7nzifmPQdjH5FCQNYA+HLhNkNPU +98OwoX6EyneSMSy4kLGCenROmxMmtNVQZlR4rmA= +-----END CERTIFICATE----- + +# Issuer: CN=SSL.com TLS ECC Root CA 2022 O=SSL Corporation +# Subject: CN=SSL.com TLS ECC Root CA 2022 O=SSL Corporation +# Label: "SSL.com TLS ECC Root CA 2022" +# Serial: 26605119622390491762507526719404364228 +# MD5 Fingerprint: 99:d7:5c:f1:51:36:cc:e9:ce:d9:19:2e:77:71:56:c5 +# SHA1 Fingerprint: 9f:5f:d9:1a:54:6d:f5:0c:71:f0:ee:7a:bd:17:49:98:84:73:e2:39 +# SHA256 Fingerprint: c3:2f:fd:9f:46:f9:36:d1:6c:36:73:99:09:59:43:4b:9a:d6:0a:af:bb:9e:7c:f3:36:54:f1:44:cc:1b:a1:43 +-----BEGIN CERTIFICATE----- +MIICOjCCAcCgAwIBAgIQFAP1q/s3ixdAW+JDsqXRxDAKBggqhkjOPQQDAzBOMQsw +CQYDVQQGEwJVUzEYMBYGA1UECgwPU1NMIENvcnBvcmF0aW9uMSUwIwYDVQQDDBxT +U0wuY29tIFRMUyBFQ0MgUm9vdCBDQSAyMDIyMB4XDTIyMDgyNTE2MzM0OFoXDTQ2 +MDgxOTE2MzM0N1owTjELMAkGA1UEBhMCVVMxGDAWBgNVBAoMD1NTTCBDb3Jwb3Jh +dGlvbjElMCMGA1UEAwwcU1NMLmNvbSBUTFMgRUNDIFJvb3QgQ0EgMjAyMjB2MBAG +ByqGSM49AgEGBSuBBAAiA2IABEUpNXP6wrgjzhR9qLFNoFs27iosU8NgCTWyJGYm +acCzldZdkkAZDsalE3D07xJRKF3nzL35PIXBz5SQySvOkkJYWWf9lCcQZIxPBLFN +SeR7T5v15wj4A4j3p8OSSxlUgaNjMGEwDwYDVR0TAQH/BAUwAwEB/zAfBgNVHSME +GDAWgBSJjy+j6CugFFR781a4Jl9nOAuc0DAdBgNVHQ4EFgQUiY8vo+groBRUe/NW +uCZfZzgLnNAwDgYDVR0PAQH/BAQDAgGGMAoGCCqGSM49BAMDA2gAMGUCMFXjIlbp +15IkWE8elDIPDAI2wv2sdDJO4fscgIijzPvX6yv/N33w7deedWo1dlJF4AIxAMeN +b0Igj762TVntd00pxCAgRWSGOlDGxK0tk/UYfXLtqc/ErFc2KAhl3zx5Zn6g6g== +-----END CERTIFICATE----- + +# Issuer: CN=Atos TrustedRoot Root CA ECC TLS 2021 O=Atos +# Subject: CN=Atos TrustedRoot Root CA ECC TLS 2021 O=Atos +# Label: "Atos TrustedRoot Root CA ECC TLS 2021" +# Serial: 81873346711060652204712539181482831616 +# MD5 Fingerprint: 16:9f:ad:f1:70:ad:79:d6:ed:29:b4:d1:c5:79:70:a8 +# SHA1 Fingerprint: 9e:bc:75:10:42:b3:02:f3:81:f4:f7:30:62:d4:8f:c3:a7:51:b2:dd +# SHA256 Fingerprint: b2:fa:e5:3e:14:cc:d7:ab:92:12:06:47:01:ae:27:9c:1d:89:88:fa:cb:77:5f:a8:a0:08:91:4e:66:39:88:a8 +-----BEGIN CERTIFICATE----- +MIICFTCCAZugAwIBAgIQPZg7pmY9kGP3fiZXOATvADAKBggqhkjOPQQDAzBMMS4w +LAYDVQQDDCVBdG9zIFRydXN0ZWRSb290IFJvb3QgQ0EgRUNDIFRMUyAyMDIxMQ0w +CwYDVQQKDARBdG9zMQswCQYDVQQGEwJERTAeFw0yMTA0MjIwOTI2MjNaFw00MTA0 +MTcwOTI2MjJaMEwxLjAsBgNVBAMMJUF0b3MgVHJ1c3RlZFJvb3QgUm9vdCBDQSBF +Q0MgVExTIDIwMjExDTALBgNVBAoMBEF0b3MxCzAJBgNVBAYTAkRFMHYwEAYHKoZI +zj0CAQYFK4EEACIDYgAEloZYKDcKZ9Cg3iQZGeHkBQcfl+3oZIK59sRxUM6KDP/X +tXa7oWyTbIOiaG6l2b4siJVBzV3dscqDY4PMwL502eCdpO5KTlbgmClBk1IQ1SQ4 +AjJn8ZQSb+/Xxd4u/RmAo0IwQDAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBR2 +KCXWfeBmmnoJsmo7jjPXNtNPojAOBgNVHQ8BAf8EBAMCAYYwCgYIKoZIzj0EAwMD +aAAwZQIwW5kp85wxtolrbNa9d+F851F+uDrNozZffPc8dz7kUK2o59JZDCaOMDtu +CCrCp1rIAjEAmeMM56PDr9NJLkaCI2ZdyQAUEv049OGYa3cpetskz2VAv9LcjBHo +9H1/IISpQuQo +-----END CERTIFICATE----- + +# Issuer: CN=Atos TrustedRoot Root CA RSA TLS 2021 O=Atos +# Subject: CN=Atos TrustedRoot Root CA RSA TLS 2021 O=Atos +# Label: "Atos TrustedRoot Root CA RSA TLS 2021" +# Serial: 111436099570196163832749341232207667876 +# MD5 Fingerprint: d4:d3:46:b8:9a:c0:9c:76:5d:9e:3a:c3:b9:99:31:d2 +# SHA1 Fingerprint: 18:52:3b:0d:06:37:e4:d6:3a:df:23:e4:98:fb:5b:16:fb:86:74:48 +# SHA256 Fingerprint: 81:a9:08:8e:a5:9f:b3:64:c5:48:a6:f8:55:59:09:9b:6f:04:05:ef:bf:18:e5:32:4e:c9:f4:57:ba:00:11:2f +-----BEGIN CERTIFICATE----- +MIIFZDCCA0ygAwIBAgIQU9XP5hmTC/srBRLYwiqipDANBgkqhkiG9w0BAQwFADBM +MS4wLAYDVQQDDCVBdG9zIFRydXN0ZWRSb290IFJvb3QgQ0EgUlNBIFRMUyAyMDIx +MQ0wCwYDVQQKDARBdG9zMQswCQYDVQQGEwJERTAeFw0yMTA0MjIwOTIxMTBaFw00 +MTA0MTcwOTIxMDlaMEwxLjAsBgNVBAMMJUF0b3MgVHJ1c3RlZFJvb3QgUm9vdCBD +QSBSU0EgVExTIDIwMjExDTALBgNVBAoMBEF0b3MxCzAJBgNVBAYTAkRFMIICIjAN +BgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAtoAOxHm9BYx9sKOdTSJNy/BBl01Z +4NH+VoyX8te9j2y3I49f1cTYQcvyAh5x5en2XssIKl4w8i1mx4QbZFc4nXUtVsYv +Ye+W/CBGvevUez8/fEc4BKkbqlLfEzfTFRVOvV98r61jx3ncCHvVoOX3W3WsgFWZ +kmGbzSoXfduP9LVq6hdKZChmFSlsAvFr1bqjM9xaZ6cF4r9lthawEO3NUDPJcFDs +GY6wx/J0W2tExn2WuZgIWWbeKQGb9Cpt0xU6kGpn8bRrZtkh68rZYnxGEFzedUln +nkL5/nWpo63/dgpnQOPF943HhZpZnmKaau1Fh5hnstVKPNe0OwANwI8f4UDErmwh +3El+fsqyjW22v5MvoVw+j8rtgI5Y4dtXz4U2OLJxpAmMkokIiEjxQGMYsluMWuPD +0xeqqxmjLBvk1cbiZnrXghmmOxYsL3GHX0WelXOTwkKBIROW1527k2gV+p2kHYzy +geBYBr3JtuP2iV2J+axEoctr+hbxx1A9JNr3w+SH1VbxT5Aw+kUJWdo0zuATHAR8 +ANSbhqRAvNncTFd+rrcztl524WWLZt+NyteYr842mIycg5kDcPOvdO3GDjbnvezB +c6eUWsuSZIKmAMFwoW4sKeFYV+xafJlrJaSQOoD0IJ2azsct+bJLKZWD6TWNp0lI +pw9MGZHQ9b8Q4HECAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU +dEmZ0f+0emhFdcN+tNzMzjkz2ggwDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3DQEB +DAUAA4ICAQAjQ1MkYlxt/T7Cz1UAbMVWiLkO3TriJQ2VSpfKgInuKs1l+NsW4AmS +4BjHeJi78+xCUvuppILXTdiK/ORO/auQxDh1MoSf/7OwKwIzNsAQkG8dnK/haZPs +o0UvFJ/1TCplQ3IM98P4lYsU84UgYt1UU90s3BiVaU+DR3BAM1h3Egyi61IxHkzJ +qM7F78PRreBrAwA0JrRUITWXAdxfG/F851X6LWh3e9NpzNMOa7pNdkTWwhWaJuyw +xfW70Xp0wmzNxbVe9kzmWy2B27O3Opee7c9GslA9hGCZcbUztVdF5kJHdWoOsAgM +rr3e97sPWD2PAzHoPYJQyi9eDF20l74gNAf0xBLh7tew2VktafcxBPTy+av5EzH4 +AXcOPUIjJsyacmdRIXrMPIWo6iFqO9taPKU0nprALN+AnCng33eU0aKAQv9qTFsR +0PXNor6uzFFcw9VUewyu1rkGd4Di7wcaaMxZUa1+XGdrudviB0JbuAEFWDlN5LuY +o7Ey7Nmj1m+UI/87tyll5gfp77YZ6ufCOB0yiJA8EytuzO+rdwY0d4RPcuSBhPm5 +dDTedk+SKlOxJTnbPP/lPqYO5Wue/9vsL3SD3460s6neFE3/MaNFcyT6lSnMEpcE +oji2jbDwN/zIIX8/syQbPYtuzE2wFg2WHYMfRsCbvUOZ58SWLs5fyQ== +-----END CERTIFICATE----- + +# Issuer: CN=TrustAsia Global Root CA G3 O=TrustAsia Technologies, Inc. +# Subject: CN=TrustAsia Global Root CA G3 O=TrustAsia Technologies, Inc. +# Label: "TrustAsia Global Root CA G3" +# Serial: 576386314500428537169965010905813481816650257167 +# MD5 Fingerprint: 30:42:1b:b7:bb:81:75:35:e4:16:4f:53:d2:94:de:04 +# SHA1 Fingerprint: 63:cf:b6:c1:27:2b:56:e4:88:8e:1c:23:9a:b6:2e:81:47:24:c3:c7 +# SHA256 Fingerprint: e0:d3:22:6a:eb:11:63:c2:e4:8f:f9:be:3b:50:b4:c6:43:1b:e7:bb:1e:ac:c5:c3:6b:5d:5e:c5:09:03:9a:08 +-----BEGIN CERTIFICATE----- +MIIFpTCCA42gAwIBAgIUZPYOZXdhaqs7tOqFhLuxibhxkw8wDQYJKoZIhvcNAQEM +BQAwWjELMAkGA1UEBhMCQ04xJTAjBgNVBAoMHFRydXN0QXNpYSBUZWNobm9sb2dp +ZXMsIEluYy4xJDAiBgNVBAMMG1RydXN0QXNpYSBHbG9iYWwgUm9vdCBDQSBHMzAe +Fw0yMTA1MjAwMjEwMTlaFw00NjA1MTkwMjEwMTlaMFoxCzAJBgNVBAYTAkNOMSUw +IwYDVQQKDBxUcnVzdEFzaWEgVGVjaG5vbG9naWVzLCBJbmMuMSQwIgYDVQQDDBtU +cnVzdEFzaWEgR2xvYmFsIFJvb3QgQ0EgRzMwggIiMA0GCSqGSIb3DQEBAQUAA4IC +DwAwggIKAoICAQDAMYJhkuSUGwoqZdC+BqmHO1ES6nBBruL7dOoKjbmzTNyPtxNS +T1QY4SxzlZHFZjtqz6xjbYdT8PfxObegQ2OwxANdV6nnRM7EoYNl9lA+sX4WuDqK +AtCWHwDNBSHvBm3dIZwZQ0WhxeiAysKtQGIXBsaqvPPW5vxQfmZCHzyLpnl5hkA1 +nyDvP+uLRx+PjsXUjrYsyUQE49RDdT/VP68czH5GX6zfZBCK70bwkPAPLfSIC7Ep +qq+FqklYqL9joDiR5rPmd2jE+SoZhLsO4fWvieylL1AgdB4SQXMeJNnKziyhWTXA +yB1GJ2Faj/lN03J5Zh6fFZAhLf3ti1ZwA0pJPn9pMRJpxx5cynoTi+jm9WAPzJMs +hH/x/Gr8m0ed262IPfN2dTPXS6TIi/n1Q1hPy8gDVI+lhXgEGvNz8teHHUGf59gX +zhqcD0r83ERoVGjiQTz+LISGNzzNPy+i2+f3VANfWdP3kXjHi3dqFuVJhZBFcnAv +kV34PmVACxmZySYgWmjBNb9Pp1Hx2BErW+Canig7CjoKH8GB5S7wprlppYiU5msT +f9FkPz2ccEblooV7WIQn3MSAPmeamseaMQ4w7OYXQJXZRe0Blqq/DPNL0WP3E1jA +uPP6Z92bfW1K/zJMtSU7/xxnD4UiWQWRkUF3gdCFTIcQcf+eQxuulXUtgQIDAQAB +o2MwYTAPBgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFEDk5PIj7zjKsK5Xf/Ih +MBY027ySMB0GA1UdDgQWBBRA5OTyI+84yrCuV3/yITAWNNu8kjAOBgNVHQ8BAf8E +BAMCAQYwDQYJKoZIhvcNAQEMBQADggIBACY7UeFNOPMyGLS0XuFlXsSUT9SnYaP4 +wM8zAQLpw6o1D/GUE3d3NZ4tVlFEbuHGLige/9rsR82XRBf34EzC4Xx8MnpmyFq2 +XFNFV1pF1AWZLy4jVe5jaN/TG3inEpQGAHUNcoTpLrxaatXeL1nHo+zSh2bbt1S1 +JKv0Q3jbSwTEb93mPmY+KfJLaHEih6D4sTNjduMNhXJEIlU/HHzp/LgV6FL6qj6j +ITk1dImmasI5+njPtqzn59ZW/yOSLlALqbUHM/Q4X6RJpstlcHboCoWASzY9M/eV +VHUl2qzEc4Jl6VL1XP04lQJqaTDFHApXB64ipCz5xUG3uOyfT0gA+QEEVcys+TIx +xHWVBqB/0Y0n3bOppHKH/lmLmnp0Ft0WpWIp6zqW3IunaFnT63eROfjXy9mPX1on +AX1daBli2MjN9LdyR75bl87yraKZk62Uy5P2EgmVtqvXO9A/EcswFi55gORngS1d +7XB4tmBZrOFdRWOPyN9yaFvqHbgB8X7754qz41SgOAngPN5C8sLtLpvzHzW2Ntjj +gKGLzZlkD8Kqq7HK9W+eQ42EVJmzbsASZthwEPEGNTNDqJwuuhQxzhB/HIbjj9LV ++Hfsm6vxL2PZQl/gZ4FkkfGXL/xuJvYz+NO1+MRiqzFRJQJ6+N1rZdVtTTDIZbpo +FGWsJwt0ivKH +-----END CERTIFICATE----- + +# Issuer: CN=TrustAsia Global Root CA G4 O=TrustAsia Technologies, Inc. +# Subject: CN=TrustAsia Global Root CA G4 O=TrustAsia Technologies, Inc. +# Label: "TrustAsia Global Root CA G4" +# Serial: 451799571007117016466790293371524403291602933463 +# MD5 Fingerprint: 54:dd:b2:d7:5f:d8:3e:ed:7c:e0:0b:2e:cc:ed:eb:eb +# SHA1 Fingerprint: 57:73:a5:61:5d:80:b2:e6:ac:38:82:fc:68:07:31:ac:9f:b5:92:5a +# SHA256 Fingerprint: be:4b:56:cb:50:56:c0:13:6a:52:6d:f4:44:50:8d:aa:36:a0:b5:4f:42:e4:ac:38:f7:2a:f4:70:e4:79:65:4c +-----BEGIN CERTIFICATE----- +MIICVTCCAdygAwIBAgIUTyNkuI6XY57GU4HBdk7LKnQV1tcwCgYIKoZIzj0EAwMw +WjELMAkGA1UEBhMCQ04xJTAjBgNVBAoMHFRydXN0QXNpYSBUZWNobm9sb2dpZXMs +IEluYy4xJDAiBgNVBAMMG1RydXN0QXNpYSBHbG9iYWwgUm9vdCBDQSBHNDAeFw0y +MTA1MjAwMjEwMjJaFw00NjA1MTkwMjEwMjJaMFoxCzAJBgNVBAYTAkNOMSUwIwYD +VQQKDBxUcnVzdEFzaWEgVGVjaG5vbG9naWVzLCBJbmMuMSQwIgYDVQQDDBtUcnVz +dEFzaWEgR2xvYmFsIFJvb3QgQ0EgRzQwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAATx +s8045CVD5d4ZCbuBeaIVXxVjAd7Cq92zphtnS4CDr5nLrBfbK5bKfFJV4hrhPVbw +LxYI+hW8m7tH5j/uqOFMjPXTNvk4XatwmkcN4oFBButJ+bAp3TPsUKV/eSm4IJij +YzBhMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUpbtKl86zK3+kMd6Xg1mD +pm9xy94wHQYDVR0OBBYEFKW7SpfOsyt/pDHel4NZg6ZvccveMA4GA1UdDwEB/wQE +AwIBBjAKBggqhkjOPQQDAwNnADBkAjBe8usGzEkxn0AAbbd+NvBNEU/zy4k6LHiR +UKNbwMp1JvK/kF0LgoxgKJ/GcJpo5PECMFxYDlZ2z1jD1xCMuo6u47xkdUfFVZDj +/bpV6wfEU6s3qe4hsiFbYI89MvHVI5TWWA== +-----END CERTIFICATE----- + +# Issuer: CN=CommScope Public Trust ECC Root-01 O=CommScope +# Subject: CN=CommScope Public Trust ECC Root-01 O=CommScope +# Label: "CommScope Public Trust ECC Root-01" +# Serial: 385011430473757362783587124273108818652468453534 +# MD5 Fingerprint: 3a:40:a7:fc:03:8c:9c:38:79:2f:3a:a2:6c:b6:0a:16 +# SHA1 Fingerprint: 07:86:c0:d8:dd:8e:c0:80:98:06:98:d0:58:7a:ef:de:a6:cc:a2:5d +# SHA256 Fingerprint: 11:43:7c:da:7b:b4:5e:41:36:5f:45:b3:9a:38:98:6b:0d:e0:0d:ef:34:8e:0c:7b:b0:87:36:33:80:0b:c3:8b +-----BEGIN CERTIFICATE----- +MIICHTCCAaOgAwIBAgIUQ3CCd89NXTTxyq4yLzf39H91oJ4wCgYIKoZIzj0EAwMw +TjELMAkGA1UEBhMCVVMxEjAQBgNVBAoMCUNvbW1TY29wZTErMCkGA1UEAwwiQ29t +bVNjb3BlIFB1YmxpYyBUcnVzdCBFQ0MgUm9vdC0wMTAeFw0yMTA0MjgxNzM1NDNa +Fw00NjA0MjgxNzM1NDJaME4xCzAJBgNVBAYTAlVTMRIwEAYDVQQKDAlDb21tU2Nv +cGUxKzApBgNVBAMMIkNvbW1TY29wZSBQdWJsaWMgVHJ1c3QgRUNDIFJvb3QtMDEw +djAQBgcqhkjOPQIBBgUrgQQAIgNiAARLNumuV16ocNfQj3Rid8NeeqrltqLxeP0C +flfdkXmcbLlSiFS8LwS+uM32ENEp7LXQoMPwiXAZu1FlxUOcw5tjnSCDPgYLpkJE +hRGnSjot6dZoL0hOUysHP029uax3OVejQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYD +VR0PAQH/BAQDAgEGMB0GA1UdDgQWBBSOB2LAUN3GGQYARnQE9/OufXVNMDAKBggq +hkjOPQQDAwNoADBlAjEAnDPfQeMjqEI2Jpc1XHvr20v4qotzVRVcrHgpD7oh2MSg +2NED3W3ROT3Ek2DS43KyAjB8xX6I01D1HiXo+k515liWpDVfG2XqYZpwI7UNo5uS +Um9poIyNStDuiw7LR47QjRE= +-----END CERTIFICATE----- + +# Issuer: CN=CommScope Public Trust ECC Root-02 O=CommScope +# Subject: CN=CommScope Public Trust ECC Root-02 O=CommScope +# Label: "CommScope Public Trust ECC Root-02" +# Serial: 234015080301808452132356021271193974922492992893 +# MD5 Fingerprint: 59:b0:44:d5:65:4d:b8:5c:55:19:92:02:b6:d1:94:b2 +# SHA1 Fingerprint: 3c:3f:ef:57:0f:fe:65:93:86:9e:a0:fe:b0:f6:ed:8e:d1:13:c7:e5 +# SHA256 Fingerprint: 2f:fb:7f:81:3b:bb:b3:c8:9a:b4:e8:16:2d:0f:16:d7:15:09:a8:30:cc:9d:73:c2:62:e5:14:08:75:d1:ad:4a +-----BEGIN CERTIFICATE----- +MIICHDCCAaOgAwIBAgIUKP2ZYEFHpgE6yhR7H+/5aAiDXX0wCgYIKoZIzj0EAwMw +TjELMAkGA1UEBhMCVVMxEjAQBgNVBAoMCUNvbW1TY29wZTErMCkGA1UEAwwiQ29t +bVNjb3BlIFB1YmxpYyBUcnVzdCBFQ0MgUm9vdC0wMjAeFw0yMTA0MjgxNzQ0NTRa +Fw00NjA0MjgxNzQ0NTNaME4xCzAJBgNVBAYTAlVTMRIwEAYDVQQKDAlDb21tU2Nv +cGUxKzApBgNVBAMMIkNvbW1TY29wZSBQdWJsaWMgVHJ1c3QgRUNDIFJvb3QtMDIw +djAQBgcqhkjOPQIBBgUrgQQAIgNiAAR4MIHoYx7l63FRD/cHB8o5mXxO1Q/MMDAL +j2aTPs+9xYa9+bG3tD60B8jzljHz7aRP+KNOjSkVWLjVb3/ubCK1sK9IRQq9qEmU +v4RDsNuESgMjGWdqb8FuvAY5N9GIIvejQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYD +VR0PAQH/BAQDAgEGMB0GA1UdDgQWBBTmGHX/72DehKT1RsfeSlXjMjZ59TAKBggq +hkjOPQQDAwNnADBkAjAmc0l6tqvmSfR9Uj/UQQSugEODZXW5hYA4O9Zv5JOGq4/n +ich/m35rChJVYaoR4HkCMHfoMXGsPHED1oQmHhS48zs73u1Z/GtMMH9ZzkXpc2AV +mkzw5l4lIhVtwodZ0LKOag== +-----END CERTIFICATE----- + +# Issuer: CN=CommScope Public Trust RSA Root-01 O=CommScope +# Subject: CN=CommScope Public Trust RSA Root-01 O=CommScope +# Label: "CommScope Public Trust RSA Root-01" +# Serial: 354030733275608256394402989253558293562031411421 +# MD5 Fingerprint: 0e:b4:15:bc:87:63:5d:5d:02:73:d4:26:38:68:73:d8 +# SHA1 Fingerprint: 6d:0a:5f:f7:b4:23:06:b4:85:b3:b7:97:64:fc:ac:75:f5:33:f2:93 +# SHA256 Fingerprint: 02:bd:f9:6e:2a:45:dd:9b:f1:8f:c7:e1:db:df:21:a0:37:9b:a3:c9:c2:61:03:44:cf:d8:d6:06:fe:c1:ed:81 +-----BEGIN CERTIFICATE----- +MIIFbDCCA1SgAwIBAgIUPgNJgXUWdDGOTKvVxZAplsU5EN0wDQYJKoZIhvcNAQEL +BQAwTjELMAkGA1UEBhMCVVMxEjAQBgNVBAoMCUNvbW1TY29wZTErMCkGA1UEAwwi +Q29tbVNjb3BlIFB1YmxpYyBUcnVzdCBSU0EgUm9vdC0wMTAeFw0yMTA0MjgxNjQ1 +NTRaFw00NjA0MjgxNjQ1NTNaME4xCzAJBgNVBAYTAlVTMRIwEAYDVQQKDAlDb21t +U2NvcGUxKzApBgNVBAMMIkNvbW1TY29wZSBQdWJsaWMgVHJ1c3QgUlNBIFJvb3Qt +MDEwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCwSGWjDR1C45FtnYSk +YZYSwu3D2iM0GXb26v1VWvZVAVMP8syMl0+5UMuzAURWlv2bKOx7dAvnQmtVzslh +suitQDy6uUEKBU8bJoWPQ7VAtYXR1HHcg0Hz9kXHgKKEUJdGzqAMxGBWBB0HW0al +DrJLpA6lfO741GIDuZNqihS4cPgugkY4Iw50x2tBt9Apo52AsH53k2NC+zSDO3Oj +WiE260f6GBfZumbCk6SP/F2krfxQapWsvCQz0b2If4b19bJzKo98rwjyGpg/qYFl +P8GMicWWMJoKz/TUyDTtnS+8jTiGU+6Xn6myY5QXjQ/cZip8UlF1y5mO6D1cv547 +KI2DAg+pn3LiLCuz3GaXAEDQpFSOm117RTYm1nJD68/A6g3czhLmfTifBSeolz7p +UcZsBSjBAg/pGG3svZwG1KdJ9FQFa2ww8esD1eo9anbCyxooSU1/ZOD6K9pzg4H/ +kQO9lLvkuI6cMmPNn7togbGEW682v3fuHX/3SZtS7NJ3Wn2RnU3COS3kuoL4b/JO +Hg9O5j9ZpSPcPYeoKFgo0fEbNttPxP/hjFtyjMcmAyejOQoBqsCyMWCDIqFPEgkB +Ea801M/XrmLTBQe0MXXgDW1XT2mH+VepuhX2yFJtocucH+X8eKg1mp9BFM6ltM6U +CBwJrVbl2rZJmkrqYxhTnCwuwwIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4G +A1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUN12mmnQywsL5x6YVEFm45P3luG0wDQYJ +KoZIhvcNAQELBQADggIBAK+nz97/4L1CjU3lIpbfaOp9TSp90K09FlxD533Ahuh6 +NWPxzIHIxgvoLlI1pKZJkGNRrDSsBTtXAOnTYtPZKdVUvhwQkZyybf5Z/Xn36lbQ +nmhUQo8mUuJM3y+Xpi/SB5io82BdS5pYV4jvguX6r2yBS5KPQJqTRlnLX3gWsWc+ +QgvfKNmwrZggvkN80V4aCRckjXtdlemrwWCrWxhkgPut4AZ9HcpZuPN4KWfGVh2v +trV0KnahP/t1MJ+UXjulYPPLXAziDslg+MkfFoom3ecnf+slpoq9uC02EJqxWE2a +aE9gVOX2RhOOiKy8IUISrcZKiX2bwdgt6ZYD9KJ0DLwAHb/WNyVntHKLr4W96ioD +j8z7PEQkguIBpQtZtjSNMgsSDesnwv1B10A8ckYpwIzqug/xBpMu95yo9GA+o/E4 +Xo4TwbM6l4c/ksp4qRyv0LAbJh6+cOx69TOY6lz/KwsETkPdY34Op054A5U+1C0w +lREQKC6/oAI+/15Z0wUOlV9TRe9rh9VIzRamloPh37MG88EU26fsHItdkJANclHn +YfkUyq+Dj7+vsQpZXdxc1+SWrVtgHdqul7I52Qb1dgAT+GhMIbA1xNxVssnBQVoc +icCMb3SgazNNtQEo/a2tiRc7ppqEvOuM6sRxJKi6KfkIsidWNTJf6jn7MZrVGczw +-----END CERTIFICATE----- + +# Issuer: CN=CommScope Public Trust RSA Root-02 O=CommScope +# Subject: CN=CommScope Public Trust RSA Root-02 O=CommScope +# Label: "CommScope Public Trust RSA Root-02" +# Serial: 480062499834624527752716769107743131258796508494 +# MD5 Fingerprint: e1:29:f9:62:7b:76:e2:96:6d:f3:d4:d7:0f:ae:1f:aa +# SHA1 Fingerprint: ea:b0:e2:52:1b:89:93:4c:11:68:f2:d8:9a:ac:22:4c:a3:8a:57:ae +# SHA256 Fingerprint: ff:e9:43:d7:93:42:4b:4f:7c:44:0c:1c:3d:64:8d:53:63:f3:4b:82:dc:87:aa:7a:9f:11:8f:c5:de:e1:01:f1 +-----BEGIN CERTIFICATE----- +MIIFbDCCA1SgAwIBAgIUVBa/O345lXGN0aoApYYNK496BU4wDQYJKoZIhvcNAQEL +BQAwTjELMAkGA1UEBhMCVVMxEjAQBgNVBAoMCUNvbW1TY29wZTErMCkGA1UEAwwi +Q29tbVNjb3BlIFB1YmxpYyBUcnVzdCBSU0EgUm9vdC0wMjAeFw0yMTA0MjgxNzE2 +NDNaFw00NjA0MjgxNzE2NDJaME4xCzAJBgNVBAYTAlVTMRIwEAYDVQQKDAlDb21t +U2NvcGUxKzApBgNVBAMMIkNvbW1TY29wZSBQdWJsaWMgVHJ1c3QgUlNBIFJvb3Qt +MDIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDh+g77aAASyE3VrCLE +NQE7xVTlWXZjpX/rwcRqmL0yjReA61260WI9JSMZNRTpf4mnG2I81lDnNJUDMrG0 +kyI9p+Kx7eZ7Ti6Hmw0zdQreqjXnfuU2mKKuJZ6VszKWpCtYHu8//mI0SFHRtI1C +rWDaSWqVcN3SAOLMV2MCe5bdSZdbkk6V0/nLKR8YSvgBKtJjCW4k6YnS5cciTNxz +hkcAqg2Ijq6FfUrpuzNPDlJwnZXjfG2WWy09X6GDRl224yW4fKcZgBzqZUPckXk2 +LHR88mcGyYnJ27/aaL8j7dxrrSiDeS/sOKUNNwFnJ5rpM9kzXzehxfCrPfp4sOcs +n/Y+n2Dg70jpkEUeBVF4GiwSLFworA2iI540jwXmojPOEXcT1A6kHkIfhs1w/tku +FT0du7jyU1fbzMZ0KZwYszZ1OC4PVKH4kh+Jlk+71O6d6Ts2QrUKOyrUZHk2EOH5 +kQMreyBUzQ0ZGshBMjTRsJnhkB4BQDa1t/qp5Xd1pCKBXbCL5CcSD1SIxtuFdOa3 +wNemKfrb3vOTlycEVS8KbzfFPROvCgCpLIscgSjX74Yxqa7ybrjKaixUR9gqiC6v +wQcQeKwRoi9C8DfF8rhW3Q5iLc4tVn5V8qdE9isy9COoR+jUKgF4z2rDN6ieZdIs +5fq6M8EGRPbmz6UNp2YINIos8wIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4G +A1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUR9DnsSL/nSz12Vdgs7GxcJXvYXowDQYJ +KoZIhvcNAQELBQADggIBAIZpsU0v6Z9PIpNojuQhmaPORVMbc0RTAIFhzTHjCLqB +KCh6krm2qMhDnscTJk3C2OVVnJJdUNjCK9v+5qiXz1I6JMNlZFxHMaNlNRPDk7n3 ++VGXu6TwYofF1gbTl4MgqX67tiHCpQ2EAOHyJxCDut0DgdXdaMNmEMjRdrSzbyme +APnCKfWxkxlSaRosTKCL4BWaMS/TiJVZbuXEs1DIFAhKm4sTg7GkcrI7djNB3Nyq +pgdvHSQSn8h2vS/ZjvQs7rfSOBAkNlEv41xdgSGn2rtO/+YHqP65DSdsu3BaVXoT +6fEqSWnHX4dXTEN5bTpl6TBcQe7rd6VzEojov32u5cSoHw2OHG1QAk8mGEPej1WF +sQs3BWDJVTkSBKEqz3EWnzZRSb9wO55nnPt7eck5HHisd5FUmrh1CoFSl+NmYWvt +PjgelmFV4ZFUjO2MJB+ByRCac5krFk5yAD9UG/iNuovnFNa2RU9g7Jauwy8CTl2d +lklyALKrdVwPaFsdZcJfMw8eD/A7hvWwTruc9+olBdytoptLFwG+Qt81IR2tq670 +v64fG9PiO/yzcnMcmyiQiRM9HcEARwmWmjgb3bHPDcK0RPOWlc4yOo80nOAXx17O +rg3bhzjlP1v9mxnhMUF6cKojawHhRUzNlM47ni3niAIi9G7oyOzWPPO5std3eqx7 +-----END CERTIFICATE----- + +# Issuer: CN=Telekom Security TLS ECC Root 2020 O=Deutsche Telekom Security GmbH +# Subject: CN=Telekom Security TLS ECC Root 2020 O=Deutsche Telekom Security GmbH +# Label: "Telekom Security TLS ECC Root 2020" +# Serial: 72082518505882327255703894282316633856 +# MD5 Fingerprint: c1:ab:fe:6a:10:2c:03:8d:bc:1c:22:32:c0:85:a7:fd +# SHA1 Fingerprint: c0:f8:96:c5:a9:3b:01:06:21:07:da:18:42:48:bc:e9:9d:88:d5:ec +# SHA256 Fingerprint: 57:8a:f4:de:d0:85:3f:4e:59:98:db:4a:ea:f9:cb:ea:8d:94:5f:60:b6:20:a3:8d:1a:3c:13:b2:bc:7b:a8:e1 +-----BEGIN CERTIFICATE----- +MIICQjCCAcmgAwIBAgIQNjqWjMlcsljN0AFdxeVXADAKBggqhkjOPQQDAzBjMQsw +CQYDVQQGEwJERTEnMCUGA1UECgweRGV1dHNjaGUgVGVsZWtvbSBTZWN1cml0eSBH +bWJIMSswKQYDVQQDDCJUZWxla29tIFNlY3VyaXR5IFRMUyBFQ0MgUm9vdCAyMDIw +MB4XDTIwMDgyNTA3NDgyMFoXDTQ1MDgyNTIzNTk1OVowYzELMAkGA1UEBhMCREUx +JzAlBgNVBAoMHkRldXRzY2hlIFRlbGVrb20gU2VjdXJpdHkgR21iSDErMCkGA1UE +AwwiVGVsZWtvbSBTZWN1cml0eSBUTFMgRUNDIFJvb3QgMjAyMDB2MBAGByqGSM49 +AgEGBSuBBAAiA2IABM6//leov9Wq9xCazbzREaK9Z0LMkOsVGJDZos0MKiXrPk/O +tdKPD/M12kOLAoC+b1EkHQ9rK8qfwm9QMuU3ILYg/4gND21Ju9sGpIeQkpT0CdDP +f8iAC8GXs7s1J8nCG6NCMEAwHQYDVR0OBBYEFONyzG6VmUex5rNhTNHLq+O6zd6f +MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMAoGCCqGSM49BAMDA2cA +MGQCMHVSi7ekEE+uShCLsoRbQuHmKjYC2qBuGT8lv9pZMo7k+5Dck2TOrbRBR2Di +z6fLHgIwN0GMZt9Ba9aDAEH9L1r3ULRn0SyocddDypwnJJGDSA3PzfdUga/sf+Rn +27iQ7t0l +-----END CERTIFICATE----- + +# Issuer: CN=Telekom Security TLS RSA Root 2023 O=Deutsche Telekom Security GmbH +# Subject: CN=Telekom Security TLS RSA Root 2023 O=Deutsche Telekom Security GmbH +# Label: "Telekom Security TLS RSA Root 2023" +# Serial: 44676229530606711399881795178081572759 +# MD5 Fingerprint: bf:5b:eb:54:40:cd:48:71:c4:20:8d:7d:de:0a:42:f2 +# SHA1 Fingerprint: 54:d3:ac:b3:bd:57:56:f6:85:9d:ce:e5:c3:21:e2:d4:ad:83:d0:93 +# SHA256 Fingerprint: ef:c6:5c:ad:bb:59:ad:b6:ef:e8:4d:a2:23:11:b3:56:24:b7:1b:3b:1e:a0:da:8b:66:55:17:4e:c8:97:86:46 +-----BEGIN CERTIFICATE----- +MIIFszCCA5ugAwIBAgIQIZxULej27HF3+k7ow3BXlzANBgkqhkiG9w0BAQwFADBj +MQswCQYDVQQGEwJERTEnMCUGA1UECgweRGV1dHNjaGUgVGVsZWtvbSBTZWN1cml0 +eSBHbWJIMSswKQYDVQQDDCJUZWxla29tIFNlY3VyaXR5IFRMUyBSU0EgUm9vdCAy +MDIzMB4XDTIzMDMyODEyMTY0NVoXDTQ4MDMyNzIzNTk1OVowYzELMAkGA1UEBhMC +REUxJzAlBgNVBAoMHkRldXRzY2hlIFRlbGVrb20gU2VjdXJpdHkgR21iSDErMCkG +A1UEAwwiVGVsZWtvbSBTZWN1cml0eSBUTFMgUlNBIFJvb3QgMjAyMzCCAiIwDQYJ +KoZIhvcNAQEBBQADggIPADCCAgoCggIBAO01oYGA88tKaVvC+1GDrib94W7zgRJ9 +cUD/h3VCKSHtgVIs3xLBGYSJwb3FKNXVS2xE1kzbB5ZKVXrKNoIENqil/Cf2SfHV +cp6R+SPWcHu79ZvB7JPPGeplfohwoHP89v+1VmLhc2o0mD6CuKyVU/QBoCcHcqMA +U6DksquDOFczJZSfvkgdmOGjup5czQRxUX11eKvzWarE4GC+j4NSuHUaQTXtvPM6 +Y+mpFEXX5lLRbtLevOP1Czvm4MS9Q2QTps70mDdsipWol8hHD/BeEIvnHRz+sTug +BTNoBUGCwQMrAcjnj02r6LX2zWtEtefdi+zqJbQAIldNsLGyMcEWzv/9FIS3R/qy +8XDe24tsNlikfLMR0cN3f1+2JeANxdKz+bi4d9s3cXFH42AYTyS2dTd4uaNir73J +co4vzLuu2+QVUhkHM/tqty1LkCiCc/4YizWN26cEar7qwU02OxY2kTLvtkCJkUPg +8qKrBC7m8kwOFjQgrIfBLX7JZkcXFBGk8/ehJImr2BrIoVyxo/eMbcgByU/J7MT8 +rFEz0ciD0cmfHdRHNCk+y7AO+oMLKFjlKdw/fKifybYKu6boRhYPluV75Gp6SG12 +mAWl3G0eQh5C2hrgUve1g8Aae3g1LDj1H/1Joy7SWWO/gLCMk3PLNaaZlSJhZQNg ++y+TS/qanIA7AgMBAAGjYzBhMA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUtqeX +gj10hZv3PJ+TmpV5dVKMbUcwDwYDVR0TAQH/BAUwAwEB/zAfBgNVHSMEGDAWgBS2 +p5eCPXSFm/c8n5OalXl1UoxtRzANBgkqhkiG9w0BAQwFAAOCAgEAqMxhpr51nhVQ +pGv7qHBFfLp+sVr8WyP6Cnf4mHGCDG3gXkaqk/QeoMPhk9tLrbKmXauw1GLLXrtm +9S3ul0A8Yute1hTWjOKWi0FpkzXmuZlrYrShF2Y0pmtjxrlO8iLpWA1WQdH6DErw +M807u20hOq6OcrXDSvvpfeWxm4bu4uB9tPcy/SKE8YXJN3nptT+/XOR0so8RYgDd +GGah2XsjX/GO1WfoVNpbOms2b/mBsTNHM3dA+VKq3dSDz4V4mZqTuXNnQkYRIer+ +CqkbGmVps4+uFrb2S1ayLfmlyOw7YqPta9BO1UAJpB+Y1zqlklkg5LB9zVtzaL1t +xKITDmcZuI1CfmwMmm6gJC3VRRvcxAIU/oVbZZfKTpBQCHpCNfnqwmbU+AGuHrS+ +w6jv/naaoqYfRvaE7fzbzsQCzndILIyy7MMAo+wsVRjBfhnu4S/yrYObnqsZ38aK +L4x35bcF7DvB7L6Gs4a8wPfc5+pbrrLMtTWGS9DiP7bY+A4A7l3j941Y/8+LN+lj +X273CXE2whJdV/LItM3z7gLfEdxquVeEHVlNjM7IDiPCtyaaEBRx/pOyiriA8A4Q +ntOoUAw3gi/q4Iqd4Sw5/7W0cwDk90imc6y/st53BIe0o82bNSQ3+pCTE4FCxpgm +dTdmQRCsu/WU48IxK63nI1bMNSWSs1A= +-----END CERTIFICATE----- + +# Issuer: CN=FIRMAPROFESIONAL CA ROOT-A WEB O=Firmaprofesional SA +# Subject: CN=FIRMAPROFESIONAL CA ROOT-A WEB O=Firmaprofesional SA +# Label: "FIRMAPROFESIONAL CA ROOT-A WEB" +# Serial: 65916896770016886708751106294915943533 +# MD5 Fingerprint: 82:b2:ad:45:00:82:b0:66:63:f8:5f:c3:67:4e:ce:a3 +# SHA1 Fingerprint: a8:31:11:74:a6:14:15:0d:ca:77:dd:0e:e4:0c:5d:58:fc:a0:72:a5 +# SHA256 Fingerprint: be:f2:56:da:f2:6e:9c:69:bd:ec:16:02:35:97:98:f3:ca:f7:18:21:a0:3e:01:82:57:c5:3c:65:61:7f:3d:4a +-----BEGIN CERTIFICATE----- +MIICejCCAgCgAwIBAgIQMZch7a+JQn81QYehZ1ZMbTAKBggqhkjOPQQDAzBuMQsw +CQYDVQQGEwJFUzEcMBoGA1UECgwTRmlybWFwcm9mZXNpb25hbCBTQTEYMBYGA1UE +YQwPVkFURVMtQTYyNjM0MDY4MScwJQYDVQQDDB5GSVJNQVBST0ZFU0lPTkFMIENB +IFJPT1QtQSBXRUIwHhcNMjIwNDA2MDkwMTM2WhcNNDcwMzMxMDkwMTM2WjBuMQsw +CQYDVQQGEwJFUzEcMBoGA1UECgwTRmlybWFwcm9mZXNpb25hbCBTQTEYMBYGA1UE +YQwPVkFURVMtQTYyNjM0MDY4MScwJQYDVQQDDB5GSVJNQVBST0ZFU0lPTkFMIENB +IFJPT1QtQSBXRUIwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAARHU+osEaR3xyrq89Zf +e9MEkVz6iMYiuYMQYneEMy3pA4jU4DP37XcsSmDq5G+tbbT4TIqk5B/K6k84Si6C +cyvHZpsKjECcfIr28jlgst7L7Ljkb+qbXbdTkBgyVcUgt5SjYzBhMA8GA1UdEwEB +/wQFMAMBAf8wHwYDVR0jBBgwFoAUk+FDY1w8ndYn81LsF7Kpryz3dvgwHQYDVR0O +BBYEFJPhQ2NcPJ3WJ/NS7Beyqa8s93b4MA4GA1UdDwEB/wQEAwIBBjAKBggqhkjO +PQQDAwNoADBlAjAdfKR7w4l1M+E7qUW/Runpod3JIha3RxEL2Jq68cgLcFBTApFw +hVmpHqTm6iMxoAACMQD94vizrxa5HnPEluPBMBnYfubDl94cT7iJLzPrSA8Z94dG +XSaQpYXFuXqUPoeovQA= +-----END CERTIFICATE----- + +# Issuer: CN=TWCA CYBER Root CA O=TAIWAN-CA OU=Root CA +# Subject: CN=TWCA CYBER Root CA O=TAIWAN-CA OU=Root CA +# Label: "TWCA CYBER Root CA" +# Serial: 85076849864375384482682434040119489222 +# MD5 Fingerprint: 0b:33:a0:97:52:95:d4:a9:fd:bb:db:6e:a3:55:5b:51 +# SHA1 Fingerprint: f6:b1:1c:1a:83:38:e9:7b:db:b3:a8:c8:33:24:e0:2d:9c:7f:26:66 +# SHA256 Fingerprint: 3f:63:bb:28:14:be:17:4e:c8:b6:43:9c:f0:8d:6d:56:f0:b7:c4:05:88:3a:56:48:a3:34:42:4d:6b:3e:c5:58 +-----BEGIN CERTIFICATE----- +MIIFjTCCA3WgAwIBAgIQQAE0jMIAAAAAAAAAATzyxjANBgkqhkiG9w0BAQwFADBQ +MQswCQYDVQQGEwJUVzESMBAGA1UEChMJVEFJV0FOLUNBMRAwDgYDVQQLEwdSb290 +IENBMRswGQYDVQQDExJUV0NBIENZQkVSIFJvb3QgQ0EwHhcNMjIxMTIyMDY1NDI5 +WhcNNDcxMTIyMTU1OTU5WjBQMQswCQYDVQQGEwJUVzESMBAGA1UEChMJVEFJV0FO +LUNBMRAwDgYDVQQLEwdSb290IENBMRswGQYDVQQDExJUV0NBIENZQkVSIFJvb3Qg +Q0EwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDG+Moe2Qkgfh1sTs6P +40czRJzHyWmqOlt47nDSkvgEs1JSHWdyKKHfi12VCv7qze33Kc7wb3+szT3vsxxF +avcokPFhV8UMxKNQXd7UtcsZyoC5dc4pztKFIuwCY8xEMCDa6pFbVuYdHNWdZsc/ +34bKS1PE2Y2yHer43CdTo0fhYcx9tbD47nORxc5zb87uEB8aBs/pJ2DFTxnk684i +JkXXYJndzk834H/nY62wuFm40AZoNWDTNq5xQwTxaWV4fPMf88oon1oglWa0zbfu +j3ikRRjpJi+NmykosaS3Om251Bw4ckVYsV7r8Cibt4LK/c/WMw+f+5eesRycnupf +Xtuq3VTpMCEobY5583WSjCb+3MX2w7DfRFlDo7YDKPYIMKoNM+HvnKkHIuNZW0CP +2oi3aQiotyMuRAlZN1vH4xfyIutuOVLF3lSnmMlLIJXcRolftBL5hSmO68gnFSDA +S9TMfAxsNAwmmyYxpjyn9tnQS6Jk/zuZQXLB4HCX8SS7K8R0IrGsayIyJNN4KsDA +oS/xUgXJP+92ZuJF2A09rZXIx4kmyA+upwMu+8Ff+iDhcK2wZSA3M2Cw1a/XDBzC +kHDXShi8fgGwsOsVHkQGzaRP6AzRwyAQ4VRlnrZR0Bp2a0JaWHY06rc3Ga4udfmW +5cFZ95RXKSWNOkyrTZpB0F8mAwIDAQABo2MwYTAOBgNVHQ8BAf8EBAMCAQYwDwYD +VR0TAQH/BAUwAwEB/zAfBgNVHSMEGDAWgBSdhWEUfMFib5do5E83QOGt4A1WNzAd +BgNVHQ4EFgQUnYVhFHzBYm+XaORPN0DhreANVjcwDQYJKoZIhvcNAQEMBQADggIB +AGSPesRiDrWIzLjHhg6hShbNcAu3p4ULs3a2D6f/CIsLJc+o1IN1KriWiLb73y0t +tGlTITVX1olNc79pj3CjYcya2x6a4CD4bLubIp1dhDGaLIrdaqHXKGnK/nZVekZn +68xDiBaiA9a5F/gZbG0jAn/xX9AKKSM70aoK7akXJlQKTcKlTfjF/biBzysseKNn +TKkHmvPfXvt89YnNdJdhEGoHK4Fa0o635yDRIG4kqIQnoVesqlVYL9zZyvpoBJ7t +RCT5dEA7IzOrg1oYJkK2bVS1FmAwbLGg+LhBoF1JSdJlBTrq/p1hvIbZv97Tujqx +f36SNI7JAG7cmL3c7IAFrQI932XtCwP39xaEBDG6k5TY8hL4iuO/Qq+n1M0RFxbI +Qh0UqEL20kCGoE8jypZFVmAGzbdVAaYBlGX+bgUJurSkquLvWL69J1bY73NxW0Qz +8ppy6rBePm6pUlvscG21h483XjyMnM7k8M4MZ0HMzvaAq07MTFb1wWFZk7Q+ptq4 +NxKfKjLji7gh7MMrZQzvIt6IKTtM1/r+t+FHvpw+PoP7UV31aPcuIYXcv/Fa4nzX +xeSDwWrruoBa3lwtcHb4yOWHh8qgnaHlIhInD0Q9HWzq1MKLL295q39QpsQZp6F6 +t5b5wR9iWqJDB0BeJsas7a5wFsWqynKKTbDPAYsDP27X +-----END CERTIFICATE----- + +# Issuer: CN=SecureSign Root CA12 O=Cybertrust Japan Co., Ltd. +# Subject: CN=SecureSign Root CA12 O=Cybertrust Japan Co., Ltd. +# Label: "SecureSign Root CA12" +# Serial: 587887345431707215246142177076162061960426065942 +# MD5 Fingerprint: c6:89:ca:64:42:9b:62:08:49:0b:1e:7f:e9:07:3d:e8 +# SHA1 Fingerprint: 7a:22:1e:3d:de:1b:06:ac:9e:c8:47:70:16:8e:3c:e5:f7:6b:06:f4 +# SHA256 Fingerprint: 3f:03:4b:b5:70:4d:44:b2:d0:85:45:a0:20:57:de:93:eb:f3:90:5f:ce:72:1a:cb:c7:30:c0:6d:da:ee:90:4e +-----BEGIN CERTIFICATE----- +MIIDcjCCAlqgAwIBAgIUZvnHwa/swlG07VOX5uaCwysckBYwDQYJKoZIhvcNAQEL +BQAwUTELMAkGA1UEBhMCSlAxIzAhBgNVBAoTGkN5YmVydHJ1c3QgSmFwYW4gQ28u +LCBMdGQuMR0wGwYDVQQDExRTZWN1cmVTaWduIFJvb3QgQ0ExMjAeFw0yMDA0MDgw +NTM2NDZaFw00MDA0MDgwNTM2NDZaMFExCzAJBgNVBAYTAkpQMSMwIQYDVQQKExpD +eWJlcnRydXN0IEphcGFuIENvLiwgTHRkLjEdMBsGA1UEAxMUU2VjdXJlU2lnbiBS +b290IENBMTIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC6OcE3emhF +KxS06+QT61d1I02PJC0W6K6OyX2kVzsqdiUzg2zqMoqUm048luT9Ub+ZyZN+v/mt +p7JIKwccJ/VMvHASd6SFVLX9kHrko+RRWAPNEHl57muTH2SOa2SroxPjcf59q5zd +J1M3s6oYwlkm7Fsf0uZlfO+TvdhYXAvA42VvPMfKWeP+bl+sg779XSVOKik71gur +FzJ4pOE+lEa+Ym6b3kaosRbnhW70CEBFEaCeVESE99g2zvVQR9wsMJvuwPWW0v4J +hscGWa5Pro4RmHvzC1KqYiaqId+OJTN5lxZJjfU+1UefNzFJM3IFTQy2VYzxV4+K +h9GtxRESOaCtAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQD +AgEGMB0GA1UdDgQWBBRXNPN0zwRL1SXm8UC2LEzZLemgrTANBgkqhkiG9w0BAQsF +AAOCAQEAPrvbFxbS8hQBICw4g0utvsqFepq2m2um4fylOqyttCg6r9cBg0krY6Ld +mmQOmFxv3Y67ilQiLUoT865AQ9tPkbeGGuwAtEGBpE/6aouIs3YIcipJQMPTw4WJ +mBClnW8Zt7vPemVV2zfrPIpyMpcemik+rY3moxtt9XUa5rBouVui7mlHJzWhhpmA +8zNL4WukJsPvdFlseqJkth5Ew1DgDzk9qTPxpfPSvWKErI4cqc1avTc7bgoitPQV +55FYxTpE05Uo2cBl6XLK0A+9H7MV2anjpEcJnuDLN/v9vZfVvhgaaaI5gdka9at/ +yOPiZwud9AzqVN/Ssq+xIvEg37xEHA== +-----END CERTIFICATE----- + +# Issuer: CN=SecureSign Root CA14 O=Cybertrust Japan Co., Ltd. +# Subject: CN=SecureSign Root CA14 O=Cybertrust Japan Co., Ltd. +# Label: "SecureSign Root CA14" +# Serial: 575790784512929437950770173562378038616896959179 +# MD5 Fingerprint: 71:0d:72:fa:92:19:65:5e:89:04:ac:16:33:f0:bc:d5 +# SHA1 Fingerprint: dd:50:c0:f7:79:b3:64:2e:74:a2:b8:9d:9f:d3:40:dd:bb:f0:f2:4f +# SHA256 Fingerprint: 4b:00:9c:10:34:49:4f:9a:b5:6b:ba:3b:a1:d6:27:31:fc:4d:20:d8:95:5a:dc:ec:10:a9:25:60:72:61:e3:38 +-----BEGIN CERTIFICATE----- +MIIFcjCCA1qgAwIBAgIUZNtaDCBO6Ncpd8hQJ6JaJ90t8sswDQYJKoZIhvcNAQEM +BQAwUTELMAkGA1UEBhMCSlAxIzAhBgNVBAoTGkN5YmVydHJ1c3QgSmFwYW4gQ28u +LCBMdGQuMR0wGwYDVQQDExRTZWN1cmVTaWduIFJvb3QgQ0ExNDAeFw0yMDA0MDgw +NzA2MTlaFw00NTA0MDgwNzA2MTlaMFExCzAJBgNVBAYTAkpQMSMwIQYDVQQKExpD +eWJlcnRydXN0IEphcGFuIENvLiwgTHRkLjEdMBsGA1UEAxMUU2VjdXJlU2lnbiBS +b290IENBMTQwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDF0nqh1oq/ +FjHQmNE6lPxauG4iwWL3pwon71D2LrGeaBLwbCRjOfHw3xDG3rdSINVSW0KZnvOg +vlIfX8xnbacuUKLBl422+JX1sLrcneC+y9/3OPJH9aaakpUqYllQC6KxNedlsmGy +6pJxaeQp8E+BgQQ8sqVb1MWoWWd7VRxJq3qdwudzTe/NCcLEVxLbAQ4jeQkHO6Lo +/IrPj8BGJJw4J+CDnRugv3gVEOuGTgpa/d/aLIJ+7sr2KeH6caH3iGicnPCNvg9J +kdjqOvn90Ghx2+m1K06Ckm9mH+Dw3EzsytHqunQG+bOEkJTRX45zGRBdAuVwpcAQ +0BB8b8VYSbSwbprafZX1zNoCr7gsfXmPvkPx+SgojQlD+Ajda8iLLCSxjVIHvXib +y8posqTdDEx5YMaZ0ZPxMBoH064iwurO8YQJzOAUbn8/ftKChazcqRZOhaBgy/ac +18izju3Gm5h1DVXoX+WViwKkrkMpKBGk5hIwAUt1ax5mnXkvpXYvHUC0bcl9eQjs +0Wq2XSqypWa9a4X0dFbD9ed1Uigspf9mR6XU/v6eVL9lfgHWMI+lNpyiUBzuOIAB +SMbHdPTGrMNASRZhdCyvjG817XsYAFs2PJxQDcqSMxDxJklt33UkN4Ii1+iW/RVL +ApY+B3KVfqs9TC7XyvDf4Fg/LS8EmjijAQIDAQABo0IwQDAPBgNVHRMBAf8EBTAD +AQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUBpOjCl4oaTeqYR3r6/wtbyPk +86AwDQYJKoZIhvcNAQEMBQADggIBAJaAcgkGfpzMkwQWu6A6jZJOtxEaCnFxEM0E +rX+lRVAQZk5KQaID2RFPeje5S+LGjzJmdSX7684/AykmjbgWHfYfM25I5uj4V7Ib +ed87hwriZLoAymzvftAj63iP/2SbNDefNWWipAA9EiOWWF3KY4fGoweITedpdopT +zfFP7ELyk+OZpDc8h7hi2/DsHzc/N19DzFGdtfCXwreFamgLRB7lUe6TzktuhsHS +DCRZNhqfLJGP4xjblJUK7ZGqDpncllPjYYPGFrojutzdfhrGe0K22VoF3Jpf1d+4 +2kd92jjbrDnVHmtsKheMYc2xbXIBw8MgAGJoFjHVdqqGuw6qnsb58Nn4DSEC5MUo +FlkRudlpcyqSeLiSV5sI8jrlL5WwWLdrIBRtFO8KvH7YVdiI2i/6GaX7i+B/OfVy +K4XELKzvGUWSTLNhB9xNH27SgRNcmvMSZ4PPmz+Ln52kuaiWA3rF7iDeM9ovnhp6 +dB7h7sxaOgTdsxoEqBRjrLdHEoOabPXm6RUVkRqEGQ6UROcSjiVbgGcZ3GOTEAtl +Lor6CZpO2oYofaphNdgOpygau1LgePhsumywbrmHXumZNTfxPWQrqaA0k89jL9WB +365jJ6UeTo3cKXhZ+PmhIIynJkBugnLNeLLIjzwec+fBH7/PzqUqm9tEZDKgu39c +JRNItX+S +-----END CERTIFICATE----- + +# Issuer: CN=SecureSign Root CA15 O=Cybertrust Japan Co., Ltd. +# Subject: CN=SecureSign Root CA15 O=Cybertrust Japan Co., Ltd. +# Label: "SecureSign Root CA15" +# Serial: 126083514594751269499665114766174399806381178503 +# MD5 Fingerprint: 13:30:fc:c4:62:a6:a9:de:b5:c1:68:af:b5:d2:31:47 +# SHA1 Fingerprint: cb:ba:83:c8:c1:5a:5d:f1:f9:73:6f:ca:d7:ef:28:13:06:4a:07:7d +# SHA256 Fingerprint: e7:78:f0:f0:95:fe:84:37:29:cd:1a:00:82:17:9e:53:14:a9:c2:91:44:28:05:e1:fb:1d:8f:b6:b8:88:6c:3a +-----BEGIN CERTIFICATE----- +MIICIzCCAamgAwIBAgIUFhXHw9hJp75pDIqI7fBw+d23PocwCgYIKoZIzj0EAwMw +UTELMAkGA1UEBhMCSlAxIzAhBgNVBAoTGkN5YmVydHJ1c3QgSmFwYW4gQ28uLCBM +dGQuMR0wGwYDVQQDExRTZWN1cmVTaWduIFJvb3QgQ0ExNTAeFw0yMDA0MDgwODMy +NTZaFw00NTA0MDgwODMyNTZaMFExCzAJBgNVBAYTAkpQMSMwIQYDVQQKExpDeWJl +cnRydXN0IEphcGFuIENvLiwgTHRkLjEdMBsGA1UEAxMUU2VjdXJlU2lnbiBSb290 +IENBMTUwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQLUHSNZDKZmbPSYAi4Io5GdCx4 +wCtELW1fHcmuS1Iggz24FG1Th2CeX2yF2wYUleDHKP+dX+Sq8bOLbe1PL0vJSpSR +ZHX+AezB2Ot6lHhWGENfa4HL9rzatAy2KZMIaY+jQjBAMA8GA1UdEwEB/wQFMAMB +Af8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBTrQciu/NWeUUj1vYv0hyCTQSvT +9DAKBggqhkjOPQQDAwNoADBlAjEA2S6Jfl5OpBEHvVnCB96rMjhTKkZEBhd6zlHp +4P9mLQlO4E/0BdGF9jVg3PVys0Z9AjBEmEYagoUeYWmJSwdLZrWeqrqgHkHZAXQ6 +bkU6iYAZezKYVWOr62Nuk22rGwlgMU4= +-----END CERTIFICATE----- + +# Issuer: CN=D-TRUST BR Root CA 2 2023 O=D-Trust GmbH +# Subject: CN=D-TRUST BR Root CA 2 2023 O=D-Trust GmbH +# Label: "D-TRUST BR Root CA 2 2023" +# Serial: 153168538924886464690566649552453098598 +# MD5 Fingerprint: e1:09:ed:d3:60:d4:56:1b:47:1f:b7:0c:5f:1b:5f:85 +# SHA1 Fingerprint: 2d:b0:70:ee:71:94:af:69:68:17:db:79:ce:58:9f:a0:6b:96:f7:87 +# SHA256 Fingerprint: 05:52:e6:f8:3f:df:65:e8:fa:96:70:e6:66:df:28:a4:e2:13:40:b5:10:cb:e5:25:66:f9:7c:4f:b9:4b:2b:d1 +-----BEGIN CERTIFICATE----- +MIIFqTCCA5GgAwIBAgIQczswBEhb2U14LnNLyaHcZjANBgkqhkiG9w0BAQ0FADBI +MQswCQYDVQQGEwJERTEVMBMGA1UEChMMRC1UcnVzdCBHbWJIMSIwIAYDVQQDExlE +LVRSVVNUIEJSIFJvb3QgQ0EgMiAyMDIzMB4XDTIzMDUwOTA4NTYzMVoXDTM4MDUw +OTA4NTYzMFowSDELMAkGA1UEBhMCREUxFTATBgNVBAoTDEQtVHJ1c3QgR21iSDEi +MCAGA1UEAxMZRC1UUlVTVCBCUiBSb290IENBIDIgMjAyMzCCAiIwDQYJKoZIhvcN +AQEBBQADggIPADCCAgoCggIBAK7/CVmRgApKaOYkP7in5Mg6CjoWzckjYaCTcfKr +i3OPoGdlYNJUa2NRb0kz4HIHE304zQaSBylSa053bATTlfrdTIzZXcFhfUvnKLNE +gXtRr90zsWh81k5M/itoucpmacTsXld/9w3HnDY25QdgrMBM6ghs7wZ8T1soegj8 +k12b9py0i4a6Ibn08OhZWiihNIQaJZG2tY/vsvmA+vk9PBFy2OMvhnbFeSzBqZCT +Rphny4NqoFAjpzv2gTng7fC5v2Xx2Mt6++9zA84A9H3X4F07ZrjcjrqDy4d2A/wl +2ecjbwb9Z/Pg/4S8R7+1FhhGaRTMBffb00msa8yr5LULQyReS2tNZ9/WtT5PeB+U +cSTq3nD88ZP+npNa5JRal1QMNXtfbO4AHyTsA7oC9Xb0n9Sa7YUsOCIvx9gvdhFP +/Wxc6PWOJ4d/GUohR5AdeY0cW/jPSoXk7bNbjb7EZChdQcRurDhaTyN0dKkSw/bS +uREVMweR2Ds3OmMwBtHFIjYoYiMQ4EbMl6zWK11kJNXuHA7e+whadSr2Y23OC0K+ +0bpwHJwh5Q8xaRfX/Aq03u2AnMuStIv13lmiWAmlY0cL4UEyNEHZmrHZqLAbWt4N +DfTisl01gLmB1IRpkQLLddCNxbU9CZEJjxShFHR5PtbJFR2kWVki3PaKRT08EtY+ +XTIvAgMBAAGjgY4wgYswDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUZ5Dw1t61 +GNVGKX5cq/ieCLxklRAwDgYDVR0PAQH/BAQDAgEGMEkGA1UdHwRCMEAwPqA8oDqG +OGh0dHA6Ly9jcmwuZC10cnVzdC5uZXQvY3JsL2QtdHJ1c3RfYnJfcm9vdF9jYV8y +XzIwMjMuY3JsMA0GCSqGSIb3DQEBDQUAA4ICAQA097N3U9swFrktpSHxQCF16+tI +FoE9c+CeJyrrd6kTpGoKWloUMz1oH4Guaf2Mn2VsNELZLdB/eBaxOqwjMa1ef67n +riv6uvw8l5VAk1/DLQOj7aRvU9f6QA4w9QAgLABMjDu0ox+2v5Eyq6+SmNMW5tTR +VFxDWy6u71cqqLRvpO8NVhTaIasgdp4D/Ca4nj8+AybmTNudX0KEPUUDAxxZiMrc +LmEkWqTqJwtzEr5SswrPMhfiHocaFpVIbVrg0M8JkiZmkdijYQ6qgYF/6FKC0ULn +4B0Y+qSFNueG4A3rvNTJ1jxD8V1Jbn6Bm2m1iWKPiFLY1/4nwSPFyysCu7Ff/vtD +hQNGvl3GyiEm/9cCnnRK3PgTFbGBVzbLZVzRHTF36SXDw7IyN9XxmAnkbWOACKsG +koHU6XCPpz+y7YaMgmo1yEJagtFSGkUPFaUA8JR7ZSdXOUPPfH/mvTWze/EZTN46 +ls/pdu4D58JDUjxqgejBWoC9EV2Ta/vH5mQ/u2kc6d0li690yVRAysuTEwrt+2aS +Ecr1wPrYg1UDfNPFIkZ1cGt5SAYqgpq/5usWDiJFAbzdNpQ0qTUmiteXue4Icr80 +knCDgKs4qllo3UCkGJCy89UDyibK79XH4I9TjvAA46jtn/mtd+ArY0+ew+43u3gJ +hJ65bvspmZDogNOfJA== +-----END CERTIFICATE----- + +# Issuer: CN=TrustAsia TLS ECC Root CA O=TrustAsia Technologies, Inc. +# Subject: CN=TrustAsia TLS ECC Root CA O=TrustAsia Technologies, Inc. +# Label: "TrustAsia TLS ECC Root CA" +# Serial: 310892014698942880364840003424242768478804666567 +# MD5 Fingerprint: 09:48:04:77:d2:fc:65:93:71:66:b1:11:95:4f:06:8c +# SHA1 Fingerprint: b5:ec:39:f3:a1:66:37:ae:c3:05:94:57:e2:be:11:be:b7:a1:7f:36 +# SHA256 Fingerprint: c0:07:6b:9e:f0:53:1f:b1:a6:56:d6:7c:4e:be:97:cd:5d:ba:a4:1e:f4:45:98:ac:c2:48:98:78:c9:2d:87:11 +-----BEGIN CERTIFICATE----- +MIICMTCCAbegAwIBAgIUNnThTXxlE8msg1UloD5Sfi9QaMcwCgYIKoZIzj0EAwMw +WDELMAkGA1UEBhMCQ04xJTAjBgNVBAoTHFRydXN0QXNpYSBUZWNobm9sb2dpZXMs +IEluYy4xIjAgBgNVBAMTGVRydXN0QXNpYSBUTFMgRUNDIFJvb3QgQ0EwHhcNMjQw +NTE1MDU0MTU2WhcNNDQwNTE1MDU0MTU1WjBYMQswCQYDVQQGEwJDTjElMCMGA1UE +ChMcVHJ1c3RBc2lhIFRlY2hub2xvZ2llcywgSW5jLjEiMCAGA1UEAxMZVHJ1c3RB +c2lhIFRMUyBFQ0MgUm9vdCBDQTB2MBAGByqGSM49AgEGBSuBBAAiA2IABLh/pVs/ +AT598IhtrimY4ZtcU5nb9wj/1WrgjstEpvDBjL1P1M7UiFPoXlfXTr4sP/MSpwDp +guMqWzJ8S5sUKZ74LYO1644xST0mYekdcouJtgq7nDM1D9rs3qlKH8kzsaNCMEAw +DwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQULIVTu7FDzTLqnqOH/qKYqKaT6RAw +DgYDVR0PAQH/BAQDAgEGMAoGCCqGSM49BAMDA2gAMGUCMFRH18MtYYZI9HlaVQ01 +L18N9mdsd0AaRuf4aFtOJx24mH1/k78ITcTaRTChD15KeAIxAKORh/IRM4PDwYqR +OkwrULG9IpRdNYlzg8WbGf60oenUoWa2AaU2+dhoYSi3dOGiMQ== +-----END CERTIFICATE----- + +# Issuer: CN=TrustAsia TLS RSA Root CA O=TrustAsia Technologies, Inc. +# Subject: CN=TrustAsia TLS RSA Root CA O=TrustAsia Technologies, Inc. +# Label: "TrustAsia TLS RSA Root CA" +# Serial: 160405846464868906657516898462547310235378010780 +# MD5 Fingerprint: 3b:9e:c3:86:0f:34:3c:6b:c5:46:c4:8e:1d:e7:19:12 +# SHA1 Fingerprint: a5:46:50:c5:62:ea:95:9a:1a:a7:04:6f:17:58:c7:29:53:3d:03:fa +# SHA256 Fingerprint: 06:c0:8d:7d:af:d8:76:97:1e:b1:12:4f:e6:7f:84:7e:c0:c7:a1:58:d3:ea:53:cb:e9:40:e2:ea:97:91:f4:c3 +-----BEGIN CERTIFICATE----- +MIIFgDCCA2igAwIBAgIUHBjYz+VTPyI1RlNUJDxsR9FcSpwwDQYJKoZIhvcNAQEM +BQAwWDELMAkGA1UEBhMCQ04xJTAjBgNVBAoTHFRydXN0QXNpYSBUZWNobm9sb2dp +ZXMsIEluYy4xIjAgBgNVBAMTGVRydXN0QXNpYSBUTFMgUlNBIFJvb3QgQ0EwHhcN +MjQwNTE1MDU0MTU3WhcNNDQwNTE1MDU0MTU2WjBYMQswCQYDVQQGEwJDTjElMCMG +A1UEChMcVHJ1c3RBc2lhIFRlY2hub2xvZ2llcywgSW5jLjEiMCAGA1UEAxMZVHJ1 +c3RBc2lhIFRMUyBSU0EgUm9vdCBDQTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCC +AgoCggIBAMMWuBtqpERz5dZO9LnPWwvB0ZqB9WOwj0PBuwhaGnrhB3YmH49pVr7+ +NmDQDIPNlOrnxS1cLwUWAp4KqC/lYCZUlviYQB2srp10Zy9U+5RjmOMmSoPGlbYJ +Q1DNDX3eRA5gEk9bNb2/mThtfWza4mhzH/kxpRkQcwUqwzIZheo0qt1CHjCNP561 +HmHVb70AcnKtEj+qpklz8oYVlQwQX1Fkzv93uMltrOXVmPGZLmzjyUT5tUMnCE32 +ft5EebuyjBza00tsLtbDeLdM1aTk2tyKjg7/D8OmYCYozza/+lcK7Fs/6TAWe8Tb +xNRkoDD75f0dcZLdKY9BWN4ArTr9PXwaqLEX8E40eFgl1oUh63kd0Nyrz2I8sMeX +i9bQn9P+PN7F4/w6g3CEIR0JwqH8uyghZVNgepBtljhb//HXeltt08lwSUq6HTrQ +UNoyIBnkiz/r1RYmNzz7dZ6wB3C4FGB33PYPXFIKvF1tjVEK2sUYyJtt3LCDs3+j +TnhMmCWr8n4uIF6CFabW2I+s5c0yhsj55NqJ4js+k8UTav/H9xj8Z7XvGCxUq0DT +bE3txci3OE9kxJRMT6DNrqXGJyV1J23G2pyOsAWZ1SgRxSHUuPzHlqtKZFlhaxP8 +S8ySpg+kUb8OWJDZgoM5pl+z+m6Ss80zDoWo8SnTq1mt1tve1CuBAgMBAAGjQjBA +MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFLgHkXlcBvRG/XtZylomkadFK/hT +MA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQwFAAOCAgEAIZtqBSBdGBanEqT3 +Rz/NyjuujsCCztxIJXgXbODgcMTWltnZ9r96nBO7U5WS/8+S4PPFJzVXqDuiGev4 +iqME3mmL5Dw8veWv0BIb5Ylrc5tvJQJLkIKvQMKtuppgJFqBTQUYo+IzeXoLH5Pt +7DlK9RME7I10nYEKqG/odv6LTytpEoYKNDbdgptvT+Bz3Ul/KD7JO6NXBNiT2Twp +2xIQaOHEibgGIOcberyxk2GaGUARtWqFVwHxtlotJnMnlvm5P1vQiJ3koP26TpUJ +g3933FEFlJ0gcXax7PqJtZwuhfG5WyRasQmr2soaB82G39tp27RIGAAtvKLEiUUj +pQ7hRGU+isFqMB3iYPg6qocJQrmBktwliJiJ8Xw18WLK7nn4GS/+X/jbh87qqA8M +pugLoDzga5SYnH+tBuYc6kIQX+ImFTw3OffXvO645e8D7r0i+yiGNFjEWn9hongP +XvPKnbwbPKfILfanIhHKA9jnZwqKDss1jjQ52MjqjZ9k4DewbNfFj8GQYSbbJIwe +SsCI3zWQzj8C9GRh3sfIB5XeMhg6j6JCQCTl1jNdfK7vsU1P1FeQNWrcrgSXSYk0 +ly4wBOeY99sLAZDBHwo/+ML+TvrbmnNzFrwFuHnYWa8G5z9nODmxfKuU4CkUpijy +323imttUQ/hHWKNddBWcwauwxzQ= +-----END CERTIFICATE----- + +# Issuer: CN=D-TRUST EV Root CA 2 2023 O=D-Trust GmbH +# Subject: CN=D-TRUST EV Root CA 2 2023 O=D-Trust GmbH +# Label: "D-TRUST EV Root CA 2 2023" +# Serial: 139766439402180512324132425437959641711 +# MD5 Fingerprint: 96:b4:78:09:f0:09:cb:77:eb:bb:1b:4d:6f:36:bc:b6 +# SHA1 Fingerprint: a5:5b:d8:47:6c:8f:19:f7:4c:f4:6d:6b:b6:c2:79:82:22:df:54:8b +# SHA256 Fingerprint: 8e:82:21:b2:e7:d4:00:78:36:a1:67:2f:0d:cc:29:9c:33:bc:07:d3:16:f1:32:fa:1a:20:6d:58:71:50:f1:ce +-----BEGIN CERTIFICATE----- +MIIFqTCCA5GgAwIBAgIQaSYJfoBLTKCnjHhiU19abzANBgkqhkiG9w0BAQ0FADBI +MQswCQYDVQQGEwJERTEVMBMGA1UEChMMRC1UcnVzdCBHbWJIMSIwIAYDVQQDExlE +LVRSVVNUIEVWIFJvb3QgQ0EgMiAyMDIzMB4XDTIzMDUwOTA5MTAzM1oXDTM4MDUw +OTA5MTAzMlowSDELMAkGA1UEBhMCREUxFTATBgNVBAoTDEQtVHJ1c3QgR21iSDEi +MCAGA1UEAxMZRC1UUlVTVCBFViBSb290IENBIDIgMjAyMzCCAiIwDQYJKoZIhvcN +AQEBBQADggIPADCCAgoCggIBANiOo4mAC7JXUtypU0w3uX9jFxPvp1sjW2l1sJkK +F8GLxNuo4MwxusLyzV3pt/gdr2rElYfXR8mV2IIEUD2BCP/kPbOx1sWy/YgJ25yE +7CUXFId/MHibaljJtnMoPDT3mfd/06b4HEV8rSyMlD/YZxBTfiLNTiVR8CUkNRFe +EMbsh2aJgWi6zCudR3Mfvc2RpHJqnKIbGKBv7FD0fUDCqDDPvXPIEysQEx6Lmqg6 +lHPTGGkKSv/BAQP/eX+1SH977ugpbzZMlWGG2Pmic4ruri+W7mjNPU0oQvlFKzIb +RlUWaqZLKfm7lVa/Rh3sHZMdwGWyH6FDrlaeoLGPaxK3YG14C8qKXO0elg6DpkiV +jTujIcSuWMYAsoS0I6SWhjW42J7YrDRJmGOVxcttSEfi8i4YHtAxq9107PncjLgc +jmgjutDzUNzPZY9zOjLHfP7KgiJPvo5iR2blzYfi6NUPGJ/lBHJLRjwQ8kTCZFZx +TnXonMkmdMV9WdEKWw9t/p51HBjGGjp82A0EzM23RWV6sY+4roRIPrN6TagD4uJ+ +ARZZaBhDM7DS3LAaQzXupdqpRlyuhoFBAUp0JuyfBr/CBTdkdXgpaP3F9ev+R/nk +hbDhezGdpn9yo7nELC7MmVcOIQxFAZRl62UJxmMiCzNJkkg8/M3OsD6Onov4/knF +NXJHAgMBAAGjgY4wgYswDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUqvyREBuH +kV8Wub9PS5FeAByxMoAwDgYDVR0PAQH/BAQDAgEGMEkGA1UdHwRCMEAwPqA8oDqG +OGh0dHA6Ly9jcmwuZC10cnVzdC5uZXQvY3JsL2QtdHJ1c3RfZXZfcm9vdF9jYV8y +XzIwMjMuY3JsMA0GCSqGSIb3DQEBDQUAA4ICAQCTy6UfmRHsmg1fLBWTxj++EI14 +QvBukEdHjqOSMo1wj/Zbjb6JzkcBahsgIIlbyIIQbODnmaprxiqgYzWRaoUlrRc4 +pZt+UPJ26oUFKidBK7GB0aL2QHWpDsvxVUjY7NHss+jOFKE17MJeNRqrphYBBo7q +3C+jisosketSjl8MmxfPy3MHGcRqwnNU73xDUmPBEcrCRbH0O1P1aa4846XerOhU +t7KR/aypH/KH5BfGSah82ApB9PI+53c0BFLd6IHyTS9URZ0V4U/M5d40VxDJI3IX +cI1QcB9WbMy5/zpaT2N6w25lBx2Eof+pDGOJbbJAiDnXH3dotfyc1dZnaVuodNv8 +ifYbMvekJKZ2t0dT741Jj6m2g1qllpBFYfXeA08mD6iL8AOWsKwV0HFaanuU5nCT +2vFp4LJiTZ6P/4mdm13NRemUAiKN4DV/6PEEeXFsVIP4M7kFMhtYVRFP0OUnR3Hs +7dpn1mKmS00PaaLJvOwiS5THaJQXfuKOKD62xur1NGyfN4gHONuGcfrNlUhDbqNP +gofXNJhuS5N5YHVpD/Aa1VP6IQzCP+k/HxiMkl14p3ZnGbuy6n/pcAlWVqOwDAst +Nl7F6cTVg8uGF5csbBNvh1qvSaYd2804BC5f4ko1Di1L+KIkBI3Y4WNeApI02phh +XBxvWHZks/wCuPWdCg== +-----END CERTIFICATE----- + +# Issuer: CN=SwissSign RSA TLS Root CA 2022 - 1 O=SwissSign AG +# Subject: CN=SwissSign RSA TLS Root CA 2022 - 1 O=SwissSign AG +# Label: "SwissSign RSA TLS Root CA 2022 - 1" +# Serial: 388078645722908516278762308316089881486363258315 +# MD5 Fingerprint: 16:2e:e4:19:76:81:85:ba:8e:91:58:f1:15:ef:72:39 +# SHA1 Fingerprint: 81:34:0a:be:4c:cd:ce:cc:e7:7d:cc:8a:d4:57:e2:45:a0:77:5d:ce +# SHA256 Fingerprint: 19:31:44:f4:31:e0:fd:db:74:07:17:d4:de:92:6a:57:11:33:88:4b:43:60:d3:0e:27:29:13:cb:e6:60:ce:41 +-----BEGIN CERTIFICATE----- +MIIFkzCCA3ugAwIBAgIUQ/oMX04bgBhE79G0TzUfRPSA7cswDQYJKoZIhvcNAQEL +BQAwUTELMAkGA1UEBhMCQ0gxFTATBgNVBAoTDFN3aXNzU2lnbiBBRzErMCkGA1UE +AxMiU3dpc3NTaWduIFJTQSBUTFMgUm9vdCBDQSAyMDIyIC0gMTAeFw0yMjA2MDgx +MTA4MjJaFw00NzA2MDgxMTA4MjJaMFExCzAJBgNVBAYTAkNIMRUwEwYDVQQKEwxT +d2lzc1NpZ24gQUcxKzApBgNVBAMTIlN3aXNzU2lnbiBSU0EgVExTIFJvb3QgQ0Eg +MjAyMiAtIDEwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDLKmjiC8NX +vDVjvHClO/OMPE5Xlm7DTjak9gLKHqquuN6orx122ro10JFwB9+zBvKK8i5VUXu7 +LCTLf5ImgKO0lPaCoaTo+nUdWfMHamFk4saMla+ju45vVs9xzF6BYQ1t8qsCLqSX +5XH8irCRIFucdFJtrhUnWXjyCcplDn/L9Ovn3KlMd/YrFgSVrpxxpT8q2kFC5zyE +EPThPYxr4iuRR1VPuFa+Rd4iUU1OKNlfGUEGjw5NBuBwQCMBauTLE5tzrE0USJIt +/m2n+IdreXXhvhCxqohAWVTXz8TQm0SzOGlkjIHRI36qOTw7D59Ke4LKa2/KIj4x +0LDQKhySio/YGZxH5D4MucLNvkEM+KRHBdvBFzA4OmnczcNpI/2aDwLOEGrOyvi5 +KaM2iYauC8BPY7kGWUleDsFpswrzd34unYyzJ5jSmY0lpx+Gs6ZUcDj8fV3oT4MM +0ZPlEuRU2j7yrTrePjxF8CgPBrnh25d7mUWe3f6VWQQvdT/TromZhqwUtKiE+shd +OxtYk8EXlFXIC+OCeYSf8wCENO7cMdWP8vpPlkwGqnj73mSiI80fPsWMvDdUDrta +clXvyFu1cvh43zcgTFeRc5JzrBh3Q4IgaezprClG5QtO+DdziZaKHG29777YtvTK +wP1H8K4LWCDFyB02rpeNUIMmJCn3nTsPBQIDAQABo2MwYTAPBgNVHRMBAf8EBTAD +AQH/MA4GA1UdDwEB/wQEAwIBBjAfBgNVHSMEGDAWgBRvjmKLk0Ow4UD2p8P98Q+4 +DxU4pTAdBgNVHQ4EFgQUb45ii5NDsOFA9qfD/fEPuA8VOKUwDQYJKoZIhvcNAQEL +BQADggIBAKwsKUF9+lz1GpUYvyypiqkkVHX1uECry6gkUSsYP2OprphWKwVDIqO3 +10aewCoSPY6WlkDfDDOLazeROpW7OSltwAJsipQLBwJNGD77+3v1dj2b9l4wBlgz +Hqp41eZUBDqyggmNzhYzWUUo8aWjlw5DI/0LIICQ/+Mmz7hkkeUFjxOgdg3XNwwQ +iJb0Pr6VvfHDffCjw3lHC1ySFWPtUnWK50Zpy1FVCypM9fJkT6lc/2cyjlUtMoIc +gC9qkfjLvH4YoiaoLqNTKIftV+Vlek4ASltOU8liNr3CjlvrzG4ngRhZi0Rjn9UM +ZfQpZX+RLOV/fuiJz48gy20HQhFRJjKKLjpHE7iNvUcNCfAWpO2Whi4Z2L6MOuhF +LhG6rlrnub+xzI/goP+4s9GFe3lmozm1O2bYQL7Pt2eLSMkZJVX8vY3PXtpOpvJp +zv1/THfQwUY1mFwjmwJFQ5Ra3bxHrSL+ul4vkSkphnsh3m5kt8sNjzdbowhq6/Td +Ao9QAwKxuDdollDruF/UKIqlIgyKhPBZLtU30WHlQnNYKoH3dtvi4k0NX/a3vgW0 +rk4N3hY9A4GzJl5LuEsAz/+MF7psYC0nhzck5npgL7XTgwSqT0N1osGDsieYK7EO +gLrAhV5Cud+xYJHT6xh+cHiudoO+cVrQkOPKwRYlZ0rwtnu64ZzZ +-----END CERTIFICATE----- diff --git a/.venv/lib/python3.12/site-packages/certifi/core.py b/.venv/lib/python3.12/site-packages/certifi/core.py new file mode 100644 index 0000000..1c9661c --- /dev/null +++ b/.venv/lib/python3.12/site-packages/certifi/core.py @@ -0,0 +1,83 @@ +""" +certifi.py +~~~~~~~~~~ + +This module returns the installation location of cacert.pem or its contents. +""" +import sys +import atexit + +def exit_cacert_ctx() -> None: + _CACERT_CTX.__exit__(None, None, None) # type: ignore[union-attr] + + +if sys.version_info >= (3, 11): + + from importlib.resources import as_file, files + + _CACERT_CTX = None + _CACERT_PATH = None + + def where() -> str: + # This is slightly terrible, but we want to delay extracting the file + # in cases where we're inside of a zipimport situation until someone + # actually calls where(), but we don't want to re-extract the file + # on every call of where(), so we'll do it once then store it in a + # global variable. + global _CACERT_CTX + global _CACERT_PATH + if _CACERT_PATH is None: + # This is slightly janky, the importlib.resources API wants you to + # manage the cleanup of this file, so it doesn't actually return a + # path, it returns a context manager that will give you the path + # when you enter it and will do any cleanup when you leave it. In + # the common case of not needing a temporary file, it will just + # return the file system location and the __exit__() is a no-op. + # + # We also have to hold onto the actual context manager, because + # it will do the cleanup whenever it gets garbage collected, so + # we will also store that at the global level as well. + _CACERT_CTX = as_file(files("certifi").joinpath("cacert.pem")) + _CACERT_PATH = str(_CACERT_CTX.__enter__()) + atexit.register(exit_cacert_ctx) + + return _CACERT_PATH + + def contents() -> str: + return files("certifi").joinpath("cacert.pem").read_text(encoding="ascii") + +else: + + from importlib.resources import path as get_path, read_text + + _CACERT_CTX = None + _CACERT_PATH = None + + def where() -> str: + # This is slightly terrible, but we want to delay extracting the + # file in cases where we're inside of a zipimport situation until + # someone actually calls where(), but we don't want to re-extract + # the file on every call of where(), so we'll do it once then store + # it in a global variable. + global _CACERT_CTX + global _CACERT_PATH + if _CACERT_PATH is None: + # This is slightly janky, the importlib.resources API wants you + # to manage the cleanup of this file, so it doesn't actually + # return a path, it returns a context manager that will give + # you the path when you enter it and will do any cleanup when + # you leave it. In the common case of not needing a temporary + # file, it will just return the file system location and the + # __exit__() is a no-op. + # + # We also have to hold onto the actual context manager, because + # it will do the cleanup whenever it gets garbage collected, so + # we will also store that at the global level as well. + _CACERT_CTX = get_path("certifi", "cacert.pem") + _CACERT_PATH = str(_CACERT_CTX.__enter__()) + atexit.register(exit_cacert_ctx) + + return _CACERT_PATH + + def contents() -> str: + return read_text("certifi", "cacert.pem", encoding="ascii") diff --git a/.venv/lib/python3.12/site-packages/certifi/py.typed b/.venv/lib/python3.12/site-packages/certifi/py.typed new file mode 100644 index 0000000..e69de29 diff --git a/.venv/lib/python3.12/site-packages/charset_normalizer-3.4.3.dist-info/INSTALLER b/.venv/lib/python3.12/site-packages/charset_normalizer-3.4.3.dist-info/INSTALLER new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/.venv/lib/python3.12/site-packages/charset_normalizer-3.4.3.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/.venv/lib/python3.12/site-packages/charset_normalizer-3.4.3.dist-info/METADATA b/.venv/lib/python3.12/site-packages/charset_normalizer-3.4.3.dist-info/METADATA new file mode 100644 index 0000000..65b5577 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/charset_normalizer-3.4.3.dist-info/METADATA @@ -0,0 +1,750 @@ +Metadata-Version: 2.4 +Name: charset-normalizer +Version: 3.4.3 +Summary: The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet. +Author-email: "Ahmed R. TAHRI" +Maintainer-email: "Ahmed R. TAHRI" +License: MIT +Project-URL: Changelog, https://github.com/jawah/charset_normalizer/blob/master/CHANGELOG.md +Project-URL: Documentation, https://charset-normalizer.readthedocs.io/ +Project-URL: Code, https://github.com/jawah/charset_normalizer +Project-URL: Issue tracker, https://github.com/jawah/charset_normalizer/issues +Keywords: encoding,charset,charset-detector,detector,normalization,unicode,chardet,detect +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: 3.13 +Classifier: Programming Language :: Python :: 3.14 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Topic :: Text Processing :: Linguistic +Classifier: Topic :: Utilities +Classifier: Typing :: Typed +Requires-Python: >=3.7 +Description-Content-Type: text/markdown +License-File: LICENSE +Provides-Extra: unicode-backport +Dynamic: license-file + +

Charset Detection, for Everyone 👋

+ +

+ The Real First Universal Charset Detector
+ + + + + Download Count Total + + + + +

+

+ Featured Packages
+ + Static Badge + + + Static Badge + +

+

+ In other language (unofficial port - by the community)
+ + Static Badge + +

+ +> A library that helps you read text from an unknown charset encoding.
Motivated by `chardet`, +> I'm trying to resolve the issue by taking a new approach. +> All IANA character set names for which the Python core library provides codecs are supported. + +

+ >>>>> 👉 Try Me Online Now, Then Adopt Me 👈 <<<<< +

+ +This project offers you an alternative to **Universal Charset Encoding Detector**, also known as **Chardet**. + +| Feature | [Chardet](https://github.com/chardet/chardet) | Charset Normalizer | [cChardet](https://github.com/PyYoshi/cChardet) | +|--------------------------------------------------|:---------------------------------------------:|:--------------------------------------------------------------------------------------------------:|:-----------------------------------------------:| +| `Fast` | ❌ | ✅ | ✅ | +| `Universal**` | ❌ | ✅ | ❌ | +| `Reliable` **without** distinguishable standards | ❌ | ✅ | ✅ | +| `Reliable` **with** distinguishable standards | ✅ | ✅ | ✅ | +| `License` | LGPL-2.1
_restrictive_ | MIT | MPL-1.1
_restrictive_ | +| `Native Python` | ✅ | ✅ | ❌ | +| `Detect spoken language` | ❌ | ✅ | N/A | +| `UnicodeDecodeError Safety` | ❌ | ✅ | ❌ | +| `Whl Size (min)` | 193.6 kB | 42 kB | ~200 kB | +| `Supported Encoding` | 33 | 🎉 [99](https://charset-normalizer.readthedocs.io/en/latest/user/support.html#supported-encodings) | 40 | + +

+Reading Normalized TextCat Reading Text +

+ +*\*\* : They are clearly using specific code for a specific encoding even if covering most of used one*
+ +## ⚡ Performance + +This package offer better performance than its counterpart Chardet. Here are some numbers. + +| Package | Accuracy | Mean per file (ms) | File per sec (est) | +|-----------------------------------------------|:--------:|:------------------:|:------------------:| +| [chardet](https://github.com/chardet/chardet) | 86 % | 63 ms | 16 file/sec | +| charset-normalizer | **98 %** | **10 ms** | 100 file/sec | + +| Package | 99th percentile | 95th percentile | 50th percentile | +|-----------------------------------------------|:---------------:|:---------------:|:---------------:| +| [chardet](https://github.com/chardet/chardet) | 265 ms | 71 ms | 7 ms | +| charset-normalizer | 100 ms | 50 ms | 5 ms | + +_updated as of december 2024 using CPython 3.12_ + +Chardet's performance on larger file (1MB+) are very poor. Expect huge difference on large payload. + +> Stats are generated using 400+ files using default parameters. More details on used files, see GHA workflows. +> And yes, these results might change at any time. The dataset can be updated to include more files. +> The actual delays heavily depends on your CPU capabilities. The factors should remain the same. +> Keep in mind that the stats are generous and that Chardet accuracy vs our is measured using Chardet initial capability +> (e.g. Supported Encoding) Challenge-them if you want. + +## ✨ Installation + +Using pip: + +```sh +pip install charset-normalizer -U +``` + +## 🚀 Basic Usage + +### CLI +This package comes with a CLI. + +``` +usage: normalizer [-h] [-v] [-a] [-n] [-m] [-r] [-f] [-t THRESHOLD] + file [file ...] + +The Real First Universal Charset Detector. Discover originating encoding used +on text file. Normalize text to unicode. + +positional arguments: + files File(s) to be analysed + +optional arguments: + -h, --help show this help message and exit + -v, --verbose Display complementary information about file if any. + Stdout will contain logs about the detection process. + -a, --with-alternative + Output complementary possibilities if any. Top-level + JSON WILL be a list. + -n, --normalize Permit to normalize input file. If not set, program + does not write anything. + -m, --minimal Only output the charset detected to STDOUT. Disabling + JSON output. + -r, --replace Replace file when trying to normalize it instead of + creating a new one. + -f, --force Replace file without asking if you are sure, use this + flag with caution. + -t THRESHOLD, --threshold THRESHOLD + Define a custom maximum amount of chaos allowed in + decoded content. 0. <= chaos <= 1. + --version Show version information and exit. +``` + +```bash +normalizer ./data/sample.1.fr.srt +``` + +or + +```bash +python -m charset_normalizer ./data/sample.1.fr.srt +``` + +🎉 Since version 1.4.0 the CLI produce easily usable stdout result in JSON format. + +```json +{ + "path": "/home/default/projects/charset_normalizer/data/sample.1.fr.srt", + "encoding": "cp1252", + "encoding_aliases": [ + "1252", + "windows_1252" + ], + "alternative_encodings": [ + "cp1254", + "cp1256", + "cp1258", + "iso8859_14", + "iso8859_15", + "iso8859_16", + "iso8859_3", + "iso8859_9", + "latin_1", + "mbcs" + ], + "language": "French", + "alphabets": [ + "Basic Latin", + "Latin-1 Supplement" + ], + "has_sig_or_bom": false, + "chaos": 0.149, + "coherence": 97.152, + "unicode_path": null, + "is_preferred": true +} +``` + +### Python +*Just print out normalized text* +```python +from charset_normalizer import from_path + +results = from_path('./my_subtitle.srt') + +print(str(results.best())) +``` + +*Upgrade your code without effort* +```python +from charset_normalizer import detect +``` + +The above code will behave the same as **chardet**. We ensure that we offer the best (reasonable) BC result possible. + +See the docs for advanced usage : [readthedocs.io](https://charset-normalizer.readthedocs.io/en/latest/) + +## 😇 Why + +When I started using Chardet, I noticed that it was not suited to my expectations, and I wanted to propose a +reliable alternative using a completely different method. Also! I never back down on a good challenge! + +I **don't care** about the **originating charset** encoding, because **two different tables** can +produce **two identical rendered string.** +What I want is to get readable text, the best I can. + +In a way, **I'm brute forcing text decoding.** How cool is that ? 😎 + +Don't confuse package **ftfy** with charset-normalizer or chardet. ftfy goal is to repair Unicode string whereas charset-normalizer to convert raw file in unknown encoding to unicode. + +## 🍰 How + + - Discard all charset encoding table that could not fit the binary content. + - Measure noise, or the mess once opened (by chunks) with a corresponding charset encoding. + - Extract matches with the lowest mess detected. + - Additionally, we measure coherence / probe for a language. + +**Wait a minute**, what is noise/mess and coherence according to **YOU ?** + +*Noise :* I opened hundred of text files, **written by humans**, with the wrong encoding table. **I observed**, then +**I established** some ground rules about **what is obvious** when **it seems like** a mess (aka. defining noise in rendered text). + I know that my interpretation of what is noise is probably incomplete, feel free to contribute in order to + improve or rewrite it. + +*Coherence :* For each language there is on earth, we have computed ranked letter appearance occurrences (the best we can). So I thought +that intel is worth something here. So I use those records against decoded text to check if I can detect intelligent design. + +## ⚡ Known limitations + + - Language detection is unreliable when text contains two or more languages sharing identical letters. (eg. HTML (english tags) + Turkish content (Sharing Latin characters)) + - Every charset detector heavily depends on sufficient content. In common cases, do not bother run detection on very tiny content. + +## ⚠️ About Python EOLs + +**If you are running:** + +- Python >=2.7,<3.5: Unsupported +- Python 3.5: charset-normalizer < 2.1 +- Python 3.6: charset-normalizer < 3.1 +- Python 3.7: charset-normalizer < 4.0 + +Upgrade your Python interpreter as soon as possible. + +## 👤 Contributing + +Contributions, issues and feature requests are very much welcome.
+Feel free to check [issues page](https://github.com/ousret/charset_normalizer/issues) if you want to contribute. + +## 📝 License + +Copyright © [Ahmed TAHRI @Ousret](https://github.com/Ousret).
+This project is [MIT](https://github.com/Ousret/charset_normalizer/blob/master/LICENSE) licensed. + +Characters frequencies used in this project © 2012 [Denny Vrandečić](http://simia.net/letters/) + +## 💼 For Enterprise + +Professional support for charset-normalizer is available as part of the [Tidelift +Subscription][1]. Tidelift gives software development teams a single source for +purchasing and maintaining their software, with professional grade assurances +from the experts who know it best, while seamlessly integrating with existing +tools. + +[1]: https://tidelift.com/subscription/pkg/pypi-charset-normalizer?utm_source=pypi-charset-normalizer&utm_medium=readme + +[![OpenSSF Best Practices](https://www.bestpractices.dev/projects/7297/badge)](https://www.bestpractices.dev/projects/7297) + +# Changelog +All notable changes to charset-normalizer will be documented in this file. This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). + +## [3.4.3](https://github.com/Ousret/charset_normalizer/compare/3.4.2...3.4.3) (2025-08-09) + +### Changed +- mypy(c) is no longer a required dependency at build time if `CHARSET_NORMALIZER_USE_MYPYC` isn't set to `1`. (#595) (#583) +- automatically lower confidence on small bytes samples that are not Unicode in `detect` output legacy function. (#391) + +### Added +- Custom build backend to overcome inability to mark mypy as an optional dependency in the build phase. +- Support for Python 3.14 + +### Fixed +- sdist archive contained useless directories. +- automatically fallback on valid UTF-16 or UTF-32 even if the md says it's noisy. (#633) + +### Misc +- SBOM are automatically published to the relevant GitHub release to comply with regulatory changes. + Each published wheel comes with its SBOM. We choose CycloneDX as the format. +- Prebuilt optimized wheel are no longer distributed by default for CPython 3.7 due to a change in cibuildwheel. + +## [3.4.2](https://github.com/Ousret/charset_normalizer/compare/3.4.1...3.4.2) (2025-05-02) + +### Fixed +- Addressed the DeprecationWarning in our CLI regarding `argparse.FileType` by backporting the target class into the package. (#591) +- Improved the overall reliability of the detector with CJK Ideographs. (#605) (#587) + +### Changed +- Optional mypyc compilation upgraded to version 1.15 for Python >= 3.8 + +## [3.4.1](https://github.com/Ousret/charset_normalizer/compare/3.4.0...3.4.1) (2024-12-24) + +### Changed +- Project metadata are now stored using `pyproject.toml` instead of `setup.cfg` using setuptools as the build backend. +- Enforce annotation delayed loading for a simpler and consistent types in the project. +- Optional mypyc compilation upgraded to version 1.14 for Python >= 3.8 + +### Added +- pre-commit configuration. +- noxfile. + +### Removed +- `build-requirements.txt` as per using `pyproject.toml` native build configuration. +- `bin/integration.py` and `bin/serve.py` in favor of downstream integration test (see noxfile). +- `setup.cfg` in favor of `pyproject.toml` metadata configuration. +- Unused `utils.range_scan` function. + +### Fixed +- Converting content to Unicode bytes may insert `utf_8` instead of preferred `utf-8`. (#572) +- Deprecation warning "'count' is passed as positional argument" when converting to Unicode bytes on Python 3.13+ + +## [3.4.0](https://github.com/Ousret/charset_normalizer/compare/3.3.2...3.4.0) (2024-10-08) + +### Added +- Argument `--no-preemptive` in the CLI to prevent the detector to search for hints. +- Support for Python 3.13 (#512) + +### Fixed +- Relax the TypeError exception thrown when trying to compare a CharsetMatch with anything else than a CharsetMatch. +- Improved the general reliability of the detector based on user feedbacks. (#520) (#509) (#498) (#407) (#537) +- Declared charset in content (preemptive detection) not changed when converting to utf-8 bytes. (#381) + +## [3.3.2](https://github.com/Ousret/charset_normalizer/compare/3.3.1...3.3.2) (2023-10-31) + +### Fixed +- Unintentional memory usage regression when using large payload that match several encoding (#376) +- Regression on some detection case showcased in the documentation (#371) + +### Added +- Noise (md) probe that identify malformed arabic representation due to the presence of letters in isolated form (credit to my wife) + +## [3.3.1](https://github.com/Ousret/charset_normalizer/compare/3.3.0...3.3.1) (2023-10-22) + +### Changed +- Optional mypyc compilation upgraded to version 1.6.1 for Python >= 3.8 +- Improved the general detection reliability based on reports from the community + +## [3.3.0](https://github.com/Ousret/charset_normalizer/compare/3.2.0...3.3.0) (2023-09-30) + +### Added +- Allow to execute the CLI (e.g. normalizer) through `python -m charset_normalizer.cli` or `python -m charset_normalizer` +- Support for 9 forgotten encoding that are supported by Python but unlisted in `encoding.aliases` as they have no alias (#323) + +### Removed +- (internal) Redundant utils.is_ascii function and unused function is_private_use_only +- (internal) charset_normalizer.assets is moved inside charset_normalizer.constant + +### Changed +- (internal) Unicode code blocks in constants are updated using the latest v15.0.0 definition to improve detection +- Optional mypyc compilation upgraded to version 1.5.1 for Python >= 3.8 + +### Fixed +- Unable to properly sort CharsetMatch when both chaos/noise and coherence were close due to an unreachable condition in \_\_lt\_\_ (#350) + +## [3.2.0](https://github.com/Ousret/charset_normalizer/compare/3.1.0...3.2.0) (2023-06-07) + +### Changed +- Typehint for function `from_path` no longer enforce `PathLike` as its first argument +- Minor improvement over the global detection reliability + +### Added +- Introduce function `is_binary` that relies on main capabilities, and optimized to detect binaries +- Propagate `enable_fallback` argument throughout `from_bytes`, `from_path`, and `from_fp` that allow a deeper control over the detection (default True) +- Explicit support for Python 3.12 + +### Fixed +- Edge case detection failure where a file would contain 'very-long' camel cased word (Issue #289) + +## [3.1.0](https://github.com/Ousret/charset_normalizer/compare/3.0.1...3.1.0) (2023-03-06) + +### Added +- Argument `should_rename_legacy` for legacy function `detect` and disregard any new arguments without errors (PR #262) + +### Removed +- Support for Python 3.6 (PR #260) + +### Changed +- Optional speedup provided by mypy/c 1.0.1 + +## [3.0.1](https://github.com/Ousret/charset_normalizer/compare/3.0.0...3.0.1) (2022-11-18) + +### Fixed +- Multi-bytes cutter/chunk generator did not always cut correctly (PR #233) + +### Changed +- Speedup provided by mypy/c 0.990 on Python >= 3.7 + +## [3.0.0](https://github.com/Ousret/charset_normalizer/compare/2.1.1...3.0.0) (2022-10-20) + +### Added +- Extend the capability of explain=True when cp_isolation contains at most two entries (min one), will log in details of the Mess-detector results +- Support for alternative language frequency set in charset_normalizer.assets.FREQUENCIES +- Add parameter `language_threshold` in `from_bytes`, `from_path` and `from_fp` to adjust the minimum expected coherence ratio +- `normalizer --version` now specify if current version provide extra speedup (meaning mypyc compilation whl) + +### Changed +- Build with static metadata using 'build' frontend +- Make the language detection stricter +- Optional: Module `md.py` can be compiled using Mypyc to provide an extra speedup up to 4x faster than v2.1 + +### Fixed +- CLI with opt --normalize fail when using full path for files +- TooManyAccentuatedPlugin induce false positive on the mess detection when too few alpha character have been fed to it +- Sphinx warnings when generating the documentation + +### Removed +- Coherence detector no longer return 'Simple English' instead return 'English' +- Coherence detector no longer return 'Classical Chinese' instead return 'Chinese' +- Breaking: Method `first()` and `best()` from CharsetMatch +- UTF-7 will no longer appear as "detected" without a recognized SIG/mark (is unreliable/conflict with ASCII) +- Breaking: Class aliases CharsetDetector, CharsetDoctor, CharsetNormalizerMatch and CharsetNormalizerMatches +- Breaking: Top-level function `normalize` +- Breaking: Properties `chaos_secondary_pass`, `coherence_non_latin` and `w_counter` from CharsetMatch +- Support for the backport `unicodedata2` + +## [3.0.0rc1](https://github.com/Ousret/charset_normalizer/compare/3.0.0b2...3.0.0rc1) (2022-10-18) + +### Added +- Extend the capability of explain=True when cp_isolation contains at most two entries (min one), will log in details of the Mess-detector results +- Support for alternative language frequency set in charset_normalizer.assets.FREQUENCIES +- Add parameter `language_threshold` in `from_bytes`, `from_path` and `from_fp` to adjust the minimum expected coherence ratio + +### Changed +- Build with static metadata using 'build' frontend +- Make the language detection stricter + +### Fixed +- CLI with opt --normalize fail when using full path for files +- TooManyAccentuatedPlugin induce false positive on the mess detection when too few alpha character have been fed to it + +### Removed +- Coherence detector no longer return 'Simple English' instead return 'English' +- Coherence detector no longer return 'Classical Chinese' instead return 'Chinese' + +## [3.0.0b2](https://github.com/Ousret/charset_normalizer/compare/3.0.0b1...3.0.0b2) (2022-08-21) + +### Added +- `normalizer --version` now specify if current version provide extra speedup (meaning mypyc compilation whl) + +### Removed +- Breaking: Method `first()` and `best()` from CharsetMatch +- UTF-7 will no longer appear as "detected" without a recognized SIG/mark (is unreliable/conflict with ASCII) + +### Fixed +- Sphinx warnings when generating the documentation + +## [3.0.0b1](https://github.com/Ousret/charset_normalizer/compare/2.1.0...3.0.0b1) (2022-08-15) + +### Changed +- Optional: Module `md.py` can be compiled using Mypyc to provide an extra speedup up to 4x faster than v2.1 + +### Removed +- Breaking: Class aliases CharsetDetector, CharsetDoctor, CharsetNormalizerMatch and CharsetNormalizerMatches +- Breaking: Top-level function `normalize` +- Breaking: Properties `chaos_secondary_pass`, `coherence_non_latin` and `w_counter` from CharsetMatch +- Support for the backport `unicodedata2` + +## [2.1.1](https://github.com/Ousret/charset_normalizer/compare/2.1.0...2.1.1) (2022-08-19) + +### Deprecated +- Function `normalize` scheduled for removal in 3.0 + +### Changed +- Removed useless call to decode in fn is_unprintable (#206) + +### Fixed +- Third-party library (i18n xgettext) crashing not recognizing utf_8 (PEP 263) with underscore from [@aleksandernovikov](https://github.com/aleksandernovikov) (#204) + +## [2.1.0](https://github.com/Ousret/charset_normalizer/compare/2.0.12...2.1.0) (2022-06-19) + +### Added +- Output the Unicode table version when running the CLI with `--version` (PR #194) + +### Changed +- Re-use decoded buffer for single byte character sets from [@nijel](https://github.com/nijel) (PR #175) +- Fixing some performance bottlenecks from [@deedy5](https://github.com/deedy5) (PR #183) + +### Fixed +- Workaround potential bug in cpython with Zero Width No-Break Space located in Arabic Presentation Forms-B, Unicode 1.1 not acknowledged as space (PR #175) +- CLI default threshold aligned with the API threshold from [@oleksandr-kuzmenko](https://github.com/oleksandr-kuzmenko) (PR #181) + +### Removed +- Support for Python 3.5 (PR #192) + +### Deprecated +- Use of backport unicodedata from `unicodedata2` as Python is quickly catching up, scheduled for removal in 3.0 (PR #194) + +## [2.0.12](https://github.com/Ousret/charset_normalizer/compare/2.0.11...2.0.12) (2022-02-12) + +### Fixed +- ASCII miss-detection on rare cases (PR #170) + +## [2.0.11](https://github.com/Ousret/charset_normalizer/compare/2.0.10...2.0.11) (2022-01-30) + +### Added +- Explicit support for Python 3.11 (PR #164) + +### Changed +- The logging behavior have been completely reviewed, now using only TRACE and DEBUG levels (PR #163 #165) + +## [2.0.10](https://github.com/Ousret/charset_normalizer/compare/2.0.9...2.0.10) (2022-01-04) + +### Fixed +- Fallback match entries might lead to UnicodeDecodeError for large bytes sequence (PR #154) + +### Changed +- Skipping the language-detection (CD) on ASCII (PR #155) + +## [2.0.9](https://github.com/Ousret/charset_normalizer/compare/2.0.8...2.0.9) (2021-12-03) + +### Changed +- Moderating the logging impact (since 2.0.8) for specific environments (PR #147) + +### Fixed +- Wrong logging level applied when setting kwarg `explain` to True (PR #146) + +## [2.0.8](https://github.com/Ousret/charset_normalizer/compare/2.0.7...2.0.8) (2021-11-24) +### Changed +- Improvement over Vietnamese detection (PR #126) +- MD improvement on trailing data and long foreign (non-pure latin) data (PR #124) +- Efficiency improvements in cd/alphabet_languages from [@adbar](https://github.com/adbar) (PR #122) +- call sum() without an intermediary list following PEP 289 recommendations from [@adbar](https://github.com/adbar) (PR #129) +- Code style as refactored by Sourcery-AI (PR #131) +- Minor adjustment on the MD around european words (PR #133) +- Remove and replace SRTs from assets / tests (PR #139) +- Initialize the library logger with a `NullHandler` by default from [@nmaynes](https://github.com/nmaynes) (PR #135) +- Setting kwarg `explain` to True will add provisionally (bounded to function lifespan) a specific stream handler (PR #135) + +### Fixed +- Fix large (misleading) sequence giving UnicodeDecodeError (PR #137) +- Avoid using too insignificant chunk (PR #137) + +### Added +- Add and expose function `set_logging_handler` to configure a specific StreamHandler from [@nmaynes](https://github.com/nmaynes) (PR #135) +- Add `CHANGELOG.md` entries, format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) (PR #141) + +## [2.0.7](https://github.com/Ousret/charset_normalizer/compare/2.0.6...2.0.7) (2021-10-11) +### Added +- Add support for Kazakh (Cyrillic) language detection (PR #109) + +### Changed +- Further, improve inferring the language from a given single-byte code page (PR #112) +- Vainly trying to leverage PEP263 when PEP3120 is not supported (PR #116) +- Refactoring for potential performance improvements in loops from [@adbar](https://github.com/adbar) (PR #113) +- Various detection improvement (MD+CD) (PR #117) + +### Removed +- Remove redundant logging entry about detected language(s) (PR #115) + +### Fixed +- Fix a minor inconsistency between Python 3.5 and other versions regarding language detection (PR #117 #102) + +## [2.0.6](https://github.com/Ousret/charset_normalizer/compare/2.0.5...2.0.6) (2021-09-18) +### Fixed +- Unforeseen regression with the loss of the backward-compatibility with some older minor of Python 3.5.x (PR #100) +- Fix CLI crash when using --minimal output in certain cases (PR #103) + +### Changed +- Minor improvement to the detection efficiency (less than 1%) (PR #106 #101) + +## [2.0.5](https://github.com/Ousret/charset_normalizer/compare/2.0.4...2.0.5) (2021-09-14) +### Changed +- The project now comply with: flake8, mypy, isort and black to ensure a better overall quality (PR #81) +- The BC-support with v1.x was improved, the old staticmethods are restored (PR #82) +- The Unicode detection is slightly improved (PR #93) +- Add syntax sugar \_\_bool\_\_ for results CharsetMatches list-container (PR #91) + +### Removed +- The project no longer raise warning on tiny content given for detection, will be simply logged as warning instead (PR #92) + +### Fixed +- In some rare case, the chunks extractor could cut in the middle of a multi-byte character and could mislead the mess detection (PR #95) +- Some rare 'space' characters could trip up the UnprintablePlugin/Mess detection (PR #96) +- The MANIFEST.in was not exhaustive (PR #78) + +## [2.0.4](https://github.com/Ousret/charset_normalizer/compare/2.0.3...2.0.4) (2021-07-30) +### Fixed +- The CLI no longer raise an unexpected exception when no encoding has been found (PR #70) +- Fix accessing the 'alphabets' property when the payload contains surrogate characters (PR #68) +- The logger could mislead (explain=True) on detected languages and the impact of one MBCS match (PR #72) +- Submatch factoring could be wrong in rare edge cases (PR #72) +- Multiple files given to the CLI were ignored when publishing results to STDOUT. (After the first path) (PR #72) +- Fix line endings from CRLF to LF for certain project files (PR #67) + +### Changed +- Adjust the MD to lower the sensitivity, thus improving the global detection reliability (PR #69 #76) +- Allow fallback on specified encoding if any (PR #71) + +## [2.0.3](https://github.com/Ousret/charset_normalizer/compare/2.0.2...2.0.3) (2021-07-16) +### Changed +- Part of the detection mechanism has been improved to be less sensitive, resulting in more accurate detection results. Especially ASCII. (PR #63) +- According to the community wishes, the detection will fall back on ASCII or UTF-8 in a last-resort case. (PR #64) + +## [2.0.2](https://github.com/Ousret/charset_normalizer/compare/2.0.1...2.0.2) (2021-07-15) +### Fixed +- Empty/Too small JSON payload miss-detection fixed. Report from [@tseaver](https://github.com/tseaver) (PR #59) + +### Changed +- Don't inject unicodedata2 into sys.modules from [@akx](https://github.com/akx) (PR #57) + +## [2.0.1](https://github.com/Ousret/charset_normalizer/compare/2.0.0...2.0.1) (2021-07-13) +### Fixed +- Make it work where there isn't a filesystem available, dropping assets frequencies.json. Report from [@sethmlarson](https://github.com/sethmlarson). (PR #55) +- Using explain=False permanently disable the verbose output in the current runtime (PR #47) +- One log entry (language target preemptive) was not show in logs when using explain=True (PR #47) +- Fix undesired exception (ValueError) on getitem of instance CharsetMatches (PR #52) + +### Changed +- Public function normalize default args values were not aligned with from_bytes (PR #53) + +### Added +- You may now use charset aliases in cp_isolation and cp_exclusion arguments (PR #47) + +## [2.0.0](https://github.com/Ousret/charset_normalizer/compare/1.4.1...2.0.0) (2021-07-02) +### Changed +- 4x to 5 times faster than the previous 1.4.0 release. At least 2x faster than Chardet. +- Accent has been made on UTF-8 detection, should perform rather instantaneous. +- The backward compatibility with Chardet has been greatly improved. The legacy detect function returns an identical charset name whenever possible. +- The detection mechanism has been slightly improved, now Turkish content is detected correctly (most of the time) +- The program has been rewritten to ease the readability and maintainability. (+Using static typing)+ +- utf_7 detection has been reinstated. + +### Removed +- This package no longer require anything when used with Python 3.5 (Dropped cached_property) +- Removed support for these languages: Catalan, Esperanto, Kazakh, Baque, Volapük, Azeri, Galician, Nynorsk, Macedonian, and Serbocroatian. +- The exception hook on UnicodeDecodeError has been removed. + +### Deprecated +- Methods coherence_non_latin, w_counter, chaos_secondary_pass of the class CharsetMatch are now deprecated and scheduled for removal in v3.0 + +### Fixed +- The CLI output used the relative path of the file(s). Should be absolute. + +## [1.4.1](https://github.com/Ousret/charset_normalizer/compare/1.4.0...1.4.1) (2021-05-28) +### Fixed +- Logger configuration/usage no longer conflict with others (PR #44) + +## [1.4.0](https://github.com/Ousret/charset_normalizer/compare/1.3.9...1.4.0) (2021-05-21) +### Removed +- Using standard logging instead of using the package loguru. +- Dropping nose test framework in favor of the maintained pytest. +- Choose to not use dragonmapper package to help with gibberish Chinese/CJK text. +- Require cached_property only for Python 3.5 due to constraint. Dropping for every other interpreter version. +- Stop support for UTF-7 that does not contain a SIG. +- Dropping PrettyTable, replaced with pure JSON output in CLI. + +### Fixed +- BOM marker in a CharsetNormalizerMatch instance could be False in rare cases even if obviously present. Due to the sub-match factoring process. +- Not searching properly for the BOM when trying utf32/16 parent codec. + +### Changed +- Improving the package final size by compressing frequencies.json. +- Huge improvement over the larges payload. + +### Added +- CLI now produces JSON consumable output. +- Return ASCII if given sequences fit. Given reasonable confidence. + +## [1.3.9](https://github.com/Ousret/charset_normalizer/compare/1.3.8...1.3.9) (2021-05-13) + +### Fixed +- In some very rare cases, you may end up getting encode/decode errors due to a bad bytes payload (PR #40) + +## [1.3.8](https://github.com/Ousret/charset_normalizer/compare/1.3.7...1.3.8) (2021-05-12) + +### Fixed +- Empty given payload for detection may cause an exception if trying to access the `alphabets` property. (PR #39) + +## [1.3.7](https://github.com/Ousret/charset_normalizer/compare/1.3.6...1.3.7) (2021-05-12) + +### Fixed +- The legacy detect function should return UTF-8-SIG if sig is present in the payload. (PR #38) + +## [1.3.6](https://github.com/Ousret/charset_normalizer/compare/1.3.5...1.3.6) (2021-02-09) + +### Changed +- Amend the previous release to allow prettytable 2.0 (PR #35) + +## [1.3.5](https://github.com/Ousret/charset_normalizer/compare/1.3.4...1.3.5) (2021-02-08) + +### Fixed +- Fix error while using the package with a python pre-release interpreter (PR #33) + +### Changed +- Dependencies refactoring, constraints revised. + +### Added +- Add python 3.9 and 3.10 to the supported interpreters + +MIT License + +Copyright (c) 2025 TAHRI Ahmed R. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/.venv/lib/python3.12/site-packages/charset_normalizer-3.4.3.dist-info/RECORD b/.venv/lib/python3.12/site-packages/charset_normalizer-3.4.3.dist-info/RECORD new file mode 100644 index 0000000..404155e --- /dev/null +++ b/.venv/lib/python3.12/site-packages/charset_normalizer-3.4.3.dist-info/RECORD @@ -0,0 +1,35 @@ +../../../bin/normalizer,sha256=gK3ycb3IxEvyyQYFfOx8rEnBmGoMDmzmWNYZ8IlV5_0,239 +charset_normalizer-3.4.3.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +charset_normalizer-3.4.3.dist-info/METADATA,sha256=nBNOskPUtcqHtaSPPaJafjXrlicPcPIgLFzpJQTgvaA,36700 +charset_normalizer-3.4.3.dist-info/RECORD,, +charset_normalizer-3.4.3.dist-info/WHEEL,sha256=DxRnWQz-Kp9-4a4hdDHsSv0KUC3H7sN9Nbef3-8RjXU,190 +charset_normalizer-3.4.3.dist-info/entry_points.txt,sha256=ADSTKrkXZ3hhdOVFi6DcUEHQRS0xfxDIE_pEz4wLIXA,65 +charset_normalizer-3.4.3.dist-info/licenses/LICENSE,sha256=bQ1Bv-FwrGx9wkjJpj4lTQ-0WmDVCoJX0K-SxuJJuIc,1071 +charset_normalizer-3.4.3.dist-info/top_level.txt,sha256=7ASyzePr8_xuZWJsnqJjIBtyV8vhEo0wBCv1MPRRi3Q,19 +charset_normalizer/__init__.py,sha256=OKRxRv2Zhnqk00tqkN0c1BtJjm165fWXLydE52IKuHc,1590 +charset_normalizer/__main__.py,sha256=yzYxMR-IhKRHYwcSlavEv8oGdwxsR89mr2X09qXGdps,109 +charset_normalizer/__pycache__/__init__.cpython-312.pyc,, +charset_normalizer/__pycache__/__main__.cpython-312.pyc,, +charset_normalizer/__pycache__/api.cpython-312.pyc,, +charset_normalizer/__pycache__/cd.cpython-312.pyc,, +charset_normalizer/__pycache__/constant.cpython-312.pyc,, +charset_normalizer/__pycache__/legacy.cpython-312.pyc,, +charset_normalizer/__pycache__/md.cpython-312.pyc,, +charset_normalizer/__pycache__/models.cpython-312.pyc,, +charset_normalizer/__pycache__/utils.cpython-312.pyc,, +charset_normalizer/__pycache__/version.cpython-312.pyc,, +charset_normalizer/api.py,sha256=V07i8aVeCD8T2fSia3C-fn0i9t8qQguEBhsqszg32Ns,22668 +charset_normalizer/cd.py,sha256=WKTo1HDb-H9HfCDc3Bfwq5jzS25Ziy9SE2a74SgTq88,12522 +charset_normalizer/cli/__init__.py,sha256=D8I86lFk2-py45JvqxniTirSj_sFyE6sjaY_0-G1shc,136 +charset_normalizer/cli/__main__.py,sha256=dMaXG6IJXRvqq8z2tig7Qb83-BpWTln55ooiku5_uvg,12646 +charset_normalizer/cli/__pycache__/__init__.cpython-312.pyc,, +charset_normalizer/cli/__pycache__/__main__.cpython-312.pyc,, +charset_normalizer/constant.py,sha256=7UVY4ldYhmQMHUdgQ_sgZmzcQ0xxYxpBunqSZ-XJZ8U,42713 +charset_normalizer/legacy.py,sha256=sYBzSpzsRrg_wF4LP536pG64BItw7Tqtc3SMQAHvFLM,2731 +charset_normalizer/md.cpython-312-x86_64-linux-gnu.so,sha256=sZ7umtJLjKfA83NFJ7npkiDyr06zDT8cWtl6uIx2MsM,15912 +charset_normalizer/md.py,sha256=-_oN3h3_X99nkFfqamD3yu45DC_wfk5odH0Tr_CQiXs,20145 +charset_normalizer/md__mypyc.cpython-312-x86_64-linux-gnu.so,sha256=froFxeWX3QD-u-6lU-1gyOZmEo6pgm3i-HdUWz2J8ro,289536 +charset_normalizer/models.py,sha256=lKXhOnIPtiakbK3i__J9wpOfzx3JDTKj7Dn3Rg0VaRI,12394 +charset_normalizer/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +charset_normalizer/utils.py,sha256=sTejPgrdlNsKNucZfJCxJ95lMTLA0ShHLLE3n5wpT9Q,12170 +charset_normalizer/version.py,sha256=hBN3id1io4HMVPtyDn9IIRVShbBM0kgVs3haVtppZOE,115 diff --git a/.venv/lib/python3.12/site-packages/charset_normalizer-3.4.3.dist-info/WHEEL b/.venv/lib/python3.12/site-packages/charset_normalizer-3.4.3.dist-info/WHEEL new file mode 100644 index 0000000..f3e8a97 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/charset_normalizer-3.4.3.dist-info/WHEEL @@ -0,0 +1,7 @@ +Wheel-Version: 1.0 +Generator: setuptools (80.9.0) +Root-Is-Purelib: false +Tag: cp312-cp312-manylinux_2_17_x86_64 +Tag: cp312-cp312-manylinux2014_x86_64 +Tag: cp312-cp312-manylinux_2_28_x86_64 + diff --git a/.venv/lib/python3.12/site-packages/charset_normalizer-3.4.3.dist-info/entry_points.txt b/.venv/lib/python3.12/site-packages/charset_normalizer-3.4.3.dist-info/entry_points.txt new file mode 100644 index 0000000..65619e7 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/charset_normalizer-3.4.3.dist-info/entry_points.txt @@ -0,0 +1,2 @@ +[console_scripts] +normalizer = charset_normalizer.cli:cli_detect diff --git a/.venv/lib/python3.12/site-packages/charset_normalizer-3.4.3.dist-info/licenses/LICENSE b/.venv/lib/python3.12/site-packages/charset_normalizer-3.4.3.dist-info/licenses/LICENSE new file mode 100644 index 0000000..9725772 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/charset_normalizer-3.4.3.dist-info/licenses/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2025 TAHRI Ahmed R. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/.venv/lib/python3.12/site-packages/charset_normalizer-3.4.3.dist-info/top_level.txt b/.venv/lib/python3.12/site-packages/charset_normalizer-3.4.3.dist-info/top_level.txt new file mode 100644 index 0000000..66958f0 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/charset_normalizer-3.4.3.dist-info/top_level.txt @@ -0,0 +1 @@ +charset_normalizer diff --git a/.venv/lib/python3.12/site-packages/charset_normalizer/__init__.py b/.venv/lib/python3.12/site-packages/charset_normalizer/__init__.py new file mode 100644 index 0000000..0d3a379 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/charset_normalizer/__init__.py @@ -0,0 +1,48 @@ +""" +Charset-Normalizer +~~~~~~~~~~~~~~ +The Real First Universal Charset Detector. +A library that helps you read text from an unknown charset encoding. +Motivated by chardet, This package is trying to resolve the issue by taking a new approach. +All IANA character set names for which the Python core library provides codecs are supported. + +Basic usage: + >>> from charset_normalizer import from_bytes + >>> results = from_bytes('Bсеки човек има право на образование. Oбразованието!'.encode('utf_8')) + >>> best_guess = results.best() + >>> str(best_guess) + 'Bсеки човек има право на образование. Oбразованието!' + +Others methods and usages are available - see the full documentation +at . +:copyright: (c) 2021 by Ahmed TAHRI +:license: MIT, see LICENSE for more details. +""" + +from __future__ import annotations + +import logging + +from .api import from_bytes, from_fp, from_path, is_binary +from .legacy import detect +from .models import CharsetMatch, CharsetMatches +from .utils import set_logging_handler +from .version import VERSION, __version__ + +__all__ = ( + "from_fp", + "from_path", + "from_bytes", + "is_binary", + "detect", + "CharsetMatch", + "CharsetMatches", + "__version__", + "VERSION", + "set_logging_handler", +) + +# Attach a NullHandler to the top level logger by default +# https://docs.python.org/3.3/howto/logging.html#configuring-logging-for-a-library + +logging.getLogger("charset_normalizer").addHandler(logging.NullHandler()) diff --git a/.venv/lib/python3.12/site-packages/charset_normalizer/__main__.py b/.venv/lib/python3.12/site-packages/charset_normalizer/__main__.py new file mode 100644 index 0000000..e0e76f7 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/charset_normalizer/__main__.py @@ -0,0 +1,6 @@ +from __future__ import annotations + +from .cli import cli_detect + +if __name__ == "__main__": + cli_detect() diff --git a/.venv/lib/python3.12/site-packages/charset_normalizer/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/charset_normalizer/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..c5aea7f Binary files /dev/null and b/.venv/lib/python3.12/site-packages/charset_normalizer/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/charset_normalizer/__pycache__/__main__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/charset_normalizer/__pycache__/__main__.cpython-312.pyc new file mode 100644 index 0000000..0d1ca0a Binary files /dev/null and b/.venv/lib/python3.12/site-packages/charset_normalizer/__pycache__/__main__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/charset_normalizer/__pycache__/api.cpython-312.pyc b/.venv/lib/python3.12/site-packages/charset_normalizer/__pycache__/api.cpython-312.pyc new file mode 100644 index 0000000..2cd98e5 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/charset_normalizer/__pycache__/api.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/charset_normalizer/__pycache__/cd.cpython-312.pyc b/.venv/lib/python3.12/site-packages/charset_normalizer/__pycache__/cd.cpython-312.pyc new file mode 100644 index 0000000..03adda0 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/charset_normalizer/__pycache__/cd.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/charset_normalizer/__pycache__/constant.cpython-312.pyc b/.venv/lib/python3.12/site-packages/charset_normalizer/__pycache__/constant.cpython-312.pyc new file mode 100644 index 0000000..315ab78 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/charset_normalizer/__pycache__/constant.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/charset_normalizer/__pycache__/legacy.cpython-312.pyc b/.venv/lib/python3.12/site-packages/charset_normalizer/__pycache__/legacy.cpython-312.pyc new file mode 100644 index 0000000..2b0b3fa Binary files /dev/null and b/.venv/lib/python3.12/site-packages/charset_normalizer/__pycache__/legacy.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/charset_normalizer/__pycache__/md.cpython-312.pyc b/.venv/lib/python3.12/site-packages/charset_normalizer/__pycache__/md.cpython-312.pyc new file mode 100644 index 0000000..f420c9e Binary files /dev/null and b/.venv/lib/python3.12/site-packages/charset_normalizer/__pycache__/md.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/charset_normalizer/__pycache__/models.cpython-312.pyc b/.venv/lib/python3.12/site-packages/charset_normalizer/__pycache__/models.cpython-312.pyc new file mode 100644 index 0000000..86d78d5 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/charset_normalizer/__pycache__/models.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/charset_normalizer/__pycache__/utils.cpython-312.pyc b/.venv/lib/python3.12/site-packages/charset_normalizer/__pycache__/utils.cpython-312.pyc new file mode 100644 index 0000000..9a11851 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/charset_normalizer/__pycache__/utils.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/charset_normalizer/__pycache__/version.cpython-312.pyc b/.venv/lib/python3.12/site-packages/charset_normalizer/__pycache__/version.cpython-312.pyc new file mode 100644 index 0000000..398292c Binary files /dev/null and b/.venv/lib/python3.12/site-packages/charset_normalizer/__pycache__/version.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/charset_normalizer/api.py b/.venv/lib/python3.12/site-packages/charset_normalizer/api.py new file mode 100644 index 0000000..ebd9639 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/charset_normalizer/api.py @@ -0,0 +1,669 @@ +from __future__ import annotations + +import logging +from os import PathLike +from typing import BinaryIO + +from .cd import ( + coherence_ratio, + encoding_languages, + mb_encoding_languages, + merge_coherence_ratios, +) +from .constant import IANA_SUPPORTED, TOO_BIG_SEQUENCE, TOO_SMALL_SEQUENCE, TRACE +from .md import mess_ratio +from .models import CharsetMatch, CharsetMatches +from .utils import ( + any_specified_encoding, + cut_sequence_chunks, + iana_name, + identify_sig_or_bom, + is_cp_similar, + is_multi_byte_encoding, + should_strip_sig_or_bom, +) + +logger = logging.getLogger("charset_normalizer") +explain_handler = logging.StreamHandler() +explain_handler.setFormatter( + logging.Formatter("%(asctime)s | %(levelname)s | %(message)s") +) + + +def from_bytes( + sequences: bytes | bytearray, + steps: int = 5, + chunk_size: int = 512, + threshold: float = 0.2, + cp_isolation: list[str] | None = None, + cp_exclusion: list[str] | None = None, + preemptive_behaviour: bool = True, + explain: bool = False, + language_threshold: float = 0.1, + enable_fallback: bool = True, +) -> CharsetMatches: + """ + Given a raw bytes sequence, return the best possibles charset usable to render str objects. + If there is no results, it is a strong indicator that the source is binary/not text. + By default, the process will extract 5 blocks of 512o each to assess the mess and coherence of a given sequence. + And will give up a particular code page after 20% of measured mess. Those criteria are customizable at will. + + The preemptive behavior DOES NOT replace the traditional detection workflow, it prioritize a particular code page + but never take it for granted. Can improve the performance. + + You may want to focus your attention to some code page or/and not others, use cp_isolation and cp_exclusion for that + purpose. + + This function will strip the SIG in the payload/sequence every time except on UTF-16, UTF-32. + By default the library does not setup any handler other than the NullHandler, if you choose to set the 'explain' + toggle to True it will alter the logger configuration to add a StreamHandler that is suitable for debugging. + Custom logging format and handler can be set manually. + """ + + if not isinstance(sequences, (bytearray, bytes)): + raise TypeError( + "Expected object of type bytes or bytearray, got: {}".format( + type(sequences) + ) + ) + + if explain: + previous_logger_level: int = logger.level + logger.addHandler(explain_handler) + logger.setLevel(TRACE) + + length: int = len(sequences) + + if length == 0: + logger.debug("Encoding detection on empty bytes, assuming utf_8 intention.") + if explain: # Defensive: ensure exit path clean handler + logger.removeHandler(explain_handler) + logger.setLevel(previous_logger_level or logging.WARNING) + return CharsetMatches([CharsetMatch(sequences, "utf_8", 0.0, False, [], "")]) + + if cp_isolation is not None: + logger.log( + TRACE, + "cp_isolation is set. use this flag for debugging purpose. " + "limited list of encoding allowed : %s.", + ", ".join(cp_isolation), + ) + cp_isolation = [iana_name(cp, False) for cp in cp_isolation] + else: + cp_isolation = [] + + if cp_exclusion is not None: + logger.log( + TRACE, + "cp_exclusion is set. use this flag for debugging purpose. " + "limited list of encoding excluded : %s.", + ", ".join(cp_exclusion), + ) + cp_exclusion = [iana_name(cp, False) for cp in cp_exclusion] + else: + cp_exclusion = [] + + if length <= (chunk_size * steps): + logger.log( + TRACE, + "override steps (%i) and chunk_size (%i) as content does not fit (%i byte(s) given) parameters.", + steps, + chunk_size, + length, + ) + steps = 1 + chunk_size = length + + if steps > 1 and length / steps < chunk_size: + chunk_size = int(length / steps) + + is_too_small_sequence: bool = len(sequences) < TOO_SMALL_SEQUENCE + is_too_large_sequence: bool = len(sequences) >= TOO_BIG_SEQUENCE + + if is_too_small_sequence: + logger.log( + TRACE, + "Trying to detect encoding from a tiny portion of ({}) byte(s).".format( + length + ), + ) + elif is_too_large_sequence: + logger.log( + TRACE, + "Using lazy str decoding because the payload is quite large, ({}) byte(s).".format( + length + ), + ) + + prioritized_encodings: list[str] = [] + + specified_encoding: str | None = ( + any_specified_encoding(sequences) if preemptive_behaviour else None + ) + + if specified_encoding is not None: + prioritized_encodings.append(specified_encoding) + logger.log( + TRACE, + "Detected declarative mark in sequence. Priority +1 given for %s.", + specified_encoding, + ) + + tested: set[str] = set() + tested_but_hard_failure: list[str] = [] + tested_but_soft_failure: list[str] = [] + + fallback_ascii: CharsetMatch | None = None + fallback_u8: CharsetMatch | None = None + fallback_specified: CharsetMatch | None = None + + results: CharsetMatches = CharsetMatches() + + early_stop_results: CharsetMatches = CharsetMatches() + + sig_encoding, sig_payload = identify_sig_or_bom(sequences) + + if sig_encoding is not None: + prioritized_encodings.append(sig_encoding) + logger.log( + TRACE, + "Detected a SIG or BOM mark on first %i byte(s). Priority +1 given for %s.", + len(sig_payload), + sig_encoding, + ) + + prioritized_encodings.append("ascii") + + if "utf_8" not in prioritized_encodings: + prioritized_encodings.append("utf_8") + + for encoding_iana in prioritized_encodings + IANA_SUPPORTED: + if cp_isolation and encoding_iana not in cp_isolation: + continue + + if cp_exclusion and encoding_iana in cp_exclusion: + continue + + if encoding_iana in tested: + continue + + tested.add(encoding_iana) + + decoded_payload: str | None = None + bom_or_sig_available: bool = sig_encoding == encoding_iana + strip_sig_or_bom: bool = bom_or_sig_available and should_strip_sig_or_bom( + encoding_iana + ) + + if encoding_iana in {"utf_16", "utf_32"} and not bom_or_sig_available: + logger.log( + TRACE, + "Encoding %s won't be tested as-is because it require a BOM. Will try some sub-encoder LE/BE.", + encoding_iana, + ) + continue + if encoding_iana in {"utf_7"} and not bom_or_sig_available: + logger.log( + TRACE, + "Encoding %s won't be tested as-is because detection is unreliable without BOM/SIG.", + encoding_iana, + ) + continue + + try: + is_multi_byte_decoder: bool = is_multi_byte_encoding(encoding_iana) + except (ModuleNotFoundError, ImportError): + logger.log( + TRACE, + "Encoding %s does not provide an IncrementalDecoder", + encoding_iana, + ) + continue + + try: + if is_too_large_sequence and is_multi_byte_decoder is False: + str( + ( + sequences[: int(50e4)] + if strip_sig_or_bom is False + else sequences[len(sig_payload) : int(50e4)] + ), + encoding=encoding_iana, + ) + else: + decoded_payload = str( + ( + sequences + if strip_sig_or_bom is False + else sequences[len(sig_payload) :] + ), + encoding=encoding_iana, + ) + except (UnicodeDecodeError, LookupError) as e: + if not isinstance(e, LookupError): + logger.log( + TRACE, + "Code page %s does not fit given bytes sequence at ALL. %s", + encoding_iana, + str(e), + ) + tested_but_hard_failure.append(encoding_iana) + continue + + similar_soft_failure_test: bool = False + + for encoding_soft_failed in tested_but_soft_failure: + if is_cp_similar(encoding_iana, encoding_soft_failed): + similar_soft_failure_test = True + break + + if similar_soft_failure_test: + logger.log( + TRACE, + "%s is deemed too similar to code page %s and was consider unsuited already. Continuing!", + encoding_iana, + encoding_soft_failed, + ) + continue + + r_ = range( + 0 if not bom_or_sig_available else len(sig_payload), + length, + int(length / steps), + ) + + multi_byte_bonus: bool = ( + is_multi_byte_decoder + and decoded_payload is not None + and len(decoded_payload) < length + ) + + if multi_byte_bonus: + logger.log( + TRACE, + "Code page %s is a multi byte encoding table and it appear that at least one character " + "was encoded using n-bytes.", + encoding_iana, + ) + + max_chunk_gave_up: int = int(len(r_) / 4) + + max_chunk_gave_up = max(max_chunk_gave_up, 2) + early_stop_count: int = 0 + lazy_str_hard_failure = False + + md_chunks: list[str] = [] + md_ratios = [] + + try: + for chunk in cut_sequence_chunks( + sequences, + encoding_iana, + r_, + chunk_size, + bom_or_sig_available, + strip_sig_or_bom, + sig_payload, + is_multi_byte_decoder, + decoded_payload, + ): + md_chunks.append(chunk) + + md_ratios.append( + mess_ratio( + chunk, + threshold, + explain is True and 1 <= len(cp_isolation) <= 2, + ) + ) + + if md_ratios[-1] >= threshold: + early_stop_count += 1 + + if (early_stop_count >= max_chunk_gave_up) or ( + bom_or_sig_available and strip_sig_or_bom is False + ): + break + except ( + UnicodeDecodeError + ) as e: # Lazy str loading may have missed something there + logger.log( + TRACE, + "LazyStr Loading: After MD chunk decode, code page %s does not fit given bytes sequence at ALL. %s", + encoding_iana, + str(e), + ) + early_stop_count = max_chunk_gave_up + lazy_str_hard_failure = True + + # We might want to check the sequence again with the whole content + # Only if initial MD tests passes + if ( + not lazy_str_hard_failure + and is_too_large_sequence + and not is_multi_byte_decoder + ): + try: + sequences[int(50e3) :].decode(encoding_iana, errors="strict") + except UnicodeDecodeError as e: + logger.log( + TRACE, + "LazyStr Loading: After final lookup, code page %s does not fit given bytes sequence at ALL. %s", + encoding_iana, + str(e), + ) + tested_but_hard_failure.append(encoding_iana) + continue + + mean_mess_ratio: float = sum(md_ratios) / len(md_ratios) if md_ratios else 0.0 + if mean_mess_ratio >= threshold or early_stop_count >= max_chunk_gave_up: + tested_but_soft_failure.append(encoding_iana) + logger.log( + TRACE, + "%s was excluded because of initial chaos probing. Gave up %i time(s). " + "Computed mean chaos is %f %%.", + encoding_iana, + early_stop_count, + round(mean_mess_ratio * 100, ndigits=3), + ) + # Preparing those fallbacks in case we got nothing. + if ( + enable_fallback + and encoding_iana + in ["ascii", "utf_8", specified_encoding, "utf_16", "utf_32"] + and not lazy_str_hard_failure + ): + fallback_entry = CharsetMatch( + sequences, + encoding_iana, + threshold, + bom_or_sig_available, + [], + decoded_payload, + preemptive_declaration=specified_encoding, + ) + if encoding_iana == specified_encoding: + fallback_specified = fallback_entry + elif encoding_iana == "ascii": + fallback_ascii = fallback_entry + else: + fallback_u8 = fallback_entry + continue + + logger.log( + TRACE, + "%s passed initial chaos probing. Mean measured chaos is %f %%", + encoding_iana, + round(mean_mess_ratio * 100, ndigits=3), + ) + + if not is_multi_byte_decoder: + target_languages: list[str] = encoding_languages(encoding_iana) + else: + target_languages = mb_encoding_languages(encoding_iana) + + if target_languages: + logger.log( + TRACE, + "{} should target any language(s) of {}".format( + encoding_iana, str(target_languages) + ), + ) + + cd_ratios = [] + + # We shall skip the CD when its about ASCII + # Most of the time its not relevant to run "language-detection" on it. + if encoding_iana != "ascii": + for chunk in md_chunks: + chunk_languages = coherence_ratio( + chunk, + language_threshold, + ",".join(target_languages) if target_languages else None, + ) + + cd_ratios.append(chunk_languages) + + cd_ratios_merged = merge_coherence_ratios(cd_ratios) + + if cd_ratios_merged: + logger.log( + TRACE, + "We detected language {} using {}".format( + cd_ratios_merged, encoding_iana + ), + ) + + current_match = CharsetMatch( + sequences, + encoding_iana, + mean_mess_ratio, + bom_or_sig_available, + cd_ratios_merged, + ( + decoded_payload + if ( + is_too_large_sequence is False + or encoding_iana in [specified_encoding, "ascii", "utf_8"] + ) + else None + ), + preemptive_declaration=specified_encoding, + ) + + results.append(current_match) + + if ( + encoding_iana in [specified_encoding, "ascii", "utf_8"] + and mean_mess_ratio < 0.1 + ): + # If md says nothing to worry about, then... stop immediately! + if mean_mess_ratio == 0.0: + logger.debug( + "Encoding detection: %s is most likely the one.", + current_match.encoding, + ) + if explain: # Defensive: ensure exit path clean handler + logger.removeHandler(explain_handler) + logger.setLevel(previous_logger_level) + return CharsetMatches([current_match]) + + early_stop_results.append(current_match) + + if ( + len(early_stop_results) + and (specified_encoding is None or specified_encoding in tested) + and "ascii" in tested + and "utf_8" in tested + ): + probable_result: CharsetMatch = early_stop_results.best() # type: ignore[assignment] + logger.debug( + "Encoding detection: %s is most likely the one.", + probable_result.encoding, + ) + if explain: # Defensive: ensure exit path clean handler + logger.removeHandler(explain_handler) + logger.setLevel(previous_logger_level) + + return CharsetMatches([probable_result]) + + if encoding_iana == sig_encoding: + logger.debug( + "Encoding detection: %s is most likely the one as we detected a BOM or SIG within " + "the beginning of the sequence.", + encoding_iana, + ) + if explain: # Defensive: ensure exit path clean handler + logger.removeHandler(explain_handler) + logger.setLevel(previous_logger_level) + return CharsetMatches([results[encoding_iana]]) + + if len(results) == 0: + if fallback_u8 or fallback_ascii or fallback_specified: + logger.log( + TRACE, + "Nothing got out of the detection process. Using ASCII/UTF-8/Specified fallback.", + ) + + if fallback_specified: + logger.debug( + "Encoding detection: %s will be used as a fallback match", + fallback_specified.encoding, + ) + results.append(fallback_specified) + elif ( + (fallback_u8 and fallback_ascii is None) + or ( + fallback_u8 + and fallback_ascii + and fallback_u8.fingerprint != fallback_ascii.fingerprint + ) + or (fallback_u8 is not None) + ): + logger.debug("Encoding detection: utf_8 will be used as a fallback match") + results.append(fallback_u8) + elif fallback_ascii: + logger.debug("Encoding detection: ascii will be used as a fallback match") + results.append(fallback_ascii) + + if results: + logger.debug( + "Encoding detection: Found %s as plausible (best-candidate) for content. With %i alternatives.", + results.best().encoding, # type: ignore + len(results) - 1, + ) + else: + logger.debug("Encoding detection: Unable to determine any suitable charset.") + + if explain: + logger.removeHandler(explain_handler) + logger.setLevel(previous_logger_level) + + return results + + +def from_fp( + fp: BinaryIO, + steps: int = 5, + chunk_size: int = 512, + threshold: float = 0.20, + cp_isolation: list[str] | None = None, + cp_exclusion: list[str] | None = None, + preemptive_behaviour: bool = True, + explain: bool = False, + language_threshold: float = 0.1, + enable_fallback: bool = True, +) -> CharsetMatches: + """ + Same thing than the function from_bytes but using a file pointer that is already ready. + Will not close the file pointer. + """ + return from_bytes( + fp.read(), + steps, + chunk_size, + threshold, + cp_isolation, + cp_exclusion, + preemptive_behaviour, + explain, + language_threshold, + enable_fallback, + ) + + +def from_path( + path: str | bytes | PathLike, # type: ignore[type-arg] + steps: int = 5, + chunk_size: int = 512, + threshold: float = 0.20, + cp_isolation: list[str] | None = None, + cp_exclusion: list[str] | None = None, + preemptive_behaviour: bool = True, + explain: bool = False, + language_threshold: float = 0.1, + enable_fallback: bool = True, +) -> CharsetMatches: + """ + Same thing than the function from_bytes but with one extra step. Opening and reading given file path in binary mode. + Can raise IOError. + """ + with open(path, "rb") as fp: + return from_fp( + fp, + steps, + chunk_size, + threshold, + cp_isolation, + cp_exclusion, + preemptive_behaviour, + explain, + language_threshold, + enable_fallback, + ) + + +def is_binary( + fp_or_path_or_payload: PathLike | str | BinaryIO | bytes, # type: ignore[type-arg] + steps: int = 5, + chunk_size: int = 512, + threshold: float = 0.20, + cp_isolation: list[str] | None = None, + cp_exclusion: list[str] | None = None, + preemptive_behaviour: bool = True, + explain: bool = False, + language_threshold: float = 0.1, + enable_fallback: bool = False, +) -> bool: + """ + Detect if the given input (file, bytes, or path) points to a binary file. aka. not a string. + Based on the same main heuristic algorithms and default kwargs at the sole exception that fallbacks match + are disabled to be stricter around ASCII-compatible but unlikely to be a string. + """ + if isinstance(fp_or_path_or_payload, (str, PathLike)): + guesses = from_path( + fp_or_path_or_payload, + steps=steps, + chunk_size=chunk_size, + threshold=threshold, + cp_isolation=cp_isolation, + cp_exclusion=cp_exclusion, + preemptive_behaviour=preemptive_behaviour, + explain=explain, + language_threshold=language_threshold, + enable_fallback=enable_fallback, + ) + elif isinstance( + fp_or_path_or_payload, + ( + bytes, + bytearray, + ), + ): + guesses = from_bytes( + fp_or_path_or_payload, + steps=steps, + chunk_size=chunk_size, + threshold=threshold, + cp_isolation=cp_isolation, + cp_exclusion=cp_exclusion, + preemptive_behaviour=preemptive_behaviour, + explain=explain, + language_threshold=language_threshold, + enable_fallback=enable_fallback, + ) + else: + guesses = from_fp( + fp_or_path_or_payload, + steps=steps, + chunk_size=chunk_size, + threshold=threshold, + cp_isolation=cp_isolation, + cp_exclusion=cp_exclusion, + preemptive_behaviour=preemptive_behaviour, + explain=explain, + language_threshold=language_threshold, + enable_fallback=enable_fallback, + ) + + return not guesses diff --git a/.venv/lib/python3.12/site-packages/charset_normalizer/cd.py b/.venv/lib/python3.12/site-packages/charset_normalizer/cd.py new file mode 100644 index 0000000..71a3ed5 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/charset_normalizer/cd.py @@ -0,0 +1,395 @@ +from __future__ import annotations + +import importlib +from codecs import IncrementalDecoder +from collections import Counter +from functools import lru_cache +from typing import Counter as TypeCounter + +from .constant import ( + FREQUENCIES, + KO_NAMES, + LANGUAGE_SUPPORTED_COUNT, + TOO_SMALL_SEQUENCE, + ZH_NAMES, +) +from .md import is_suspiciously_successive_range +from .models import CoherenceMatches +from .utils import ( + is_accentuated, + is_latin, + is_multi_byte_encoding, + is_unicode_range_secondary, + unicode_range, +) + + +def encoding_unicode_range(iana_name: str) -> list[str]: + """ + Return associated unicode ranges in a single byte code page. + """ + if is_multi_byte_encoding(iana_name): + raise OSError("Function not supported on multi-byte code page") + + decoder = importlib.import_module(f"encodings.{iana_name}").IncrementalDecoder + + p: IncrementalDecoder = decoder(errors="ignore") + seen_ranges: dict[str, int] = {} + character_count: int = 0 + + for i in range(0x40, 0xFF): + chunk: str = p.decode(bytes([i])) + + if chunk: + character_range: str | None = unicode_range(chunk) + + if character_range is None: + continue + + if is_unicode_range_secondary(character_range) is False: + if character_range not in seen_ranges: + seen_ranges[character_range] = 0 + seen_ranges[character_range] += 1 + character_count += 1 + + return sorted( + [ + character_range + for character_range in seen_ranges + if seen_ranges[character_range] / character_count >= 0.15 + ] + ) + + +def unicode_range_languages(primary_range: str) -> list[str]: + """ + Return inferred languages used with a unicode range. + """ + languages: list[str] = [] + + for language, characters in FREQUENCIES.items(): + for character in characters: + if unicode_range(character) == primary_range: + languages.append(language) + break + + return languages + + +@lru_cache() +def encoding_languages(iana_name: str) -> list[str]: + """ + Single-byte encoding language association. Some code page are heavily linked to particular language(s). + This function does the correspondence. + """ + unicode_ranges: list[str] = encoding_unicode_range(iana_name) + primary_range: str | None = None + + for specified_range in unicode_ranges: + if "Latin" not in specified_range: + primary_range = specified_range + break + + if primary_range is None: + return ["Latin Based"] + + return unicode_range_languages(primary_range) + + +@lru_cache() +def mb_encoding_languages(iana_name: str) -> list[str]: + """ + Multi-byte encoding language association. Some code page are heavily linked to particular language(s). + This function does the correspondence. + """ + if ( + iana_name.startswith("shift_") + or iana_name.startswith("iso2022_jp") + or iana_name.startswith("euc_j") + or iana_name == "cp932" + ): + return ["Japanese"] + if iana_name.startswith("gb") or iana_name in ZH_NAMES: + return ["Chinese"] + if iana_name.startswith("iso2022_kr") or iana_name in KO_NAMES: + return ["Korean"] + + return [] + + +@lru_cache(maxsize=LANGUAGE_SUPPORTED_COUNT) +def get_target_features(language: str) -> tuple[bool, bool]: + """ + Determine main aspects from a supported language if it contains accents and if is pure Latin. + """ + target_have_accents: bool = False + target_pure_latin: bool = True + + for character in FREQUENCIES[language]: + if not target_have_accents and is_accentuated(character): + target_have_accents = True + if target_pure_latin and is_latin(character) is False: + target_pure_latin = False + + return target_have_accents, target_pure_latin + + +def alphabet_languages( + characters: list[str], ignore_non_latin: bool = False +) -> list[str]: + """ + Return associated languages associated to given characters. + """ + languages: list[tuple[str, float]] = [] + + source_have_accents = any(is_accentuated(character) for character in characters) + + for language, language_characters in FREQUENCIES.items(): + target_have_accents, target_pure_latin = get_target_features(language) + + if ignore_non_latin and target_pure_latin is False: + continue + + if target_have_accents is False and source_have_accents: + continue + + character_count: int = len(language_characters) + + character_match_count: int = len( + [c for c in language_characters if c in characters] + ) + + ratio: float = character_match_count / character_count + + if ratio >= 0.2: + languages.append((language, ratio)) + + languages = sorted(languages, key=lambda x: x[1], reverse=True) + + return [compatible_language[0] for compatible_language in languages] + + +def characters_popularity_compare( + language: str, ordered_characters: list[str] +) -> float: + """ + Determine if a ordered characters list (by occurrence from most appearance to rarest) match a particular language. + The result is a ratio between 0. (absolutely no correspondence) and 1. (near perfect fit). + Beware that is function is not strict on the match in order to ease the detection. (Meaning close match is 1.) + """ + if language not in FREQUENCIES: + raise ValueError(f"{language} not available") + + character_approved_count: int = 0 + FREQUENCIES_language_set = set(FREQUENCIES[language]) + + ordered_characters_count: int = len(ordered_characters) + target_language_characters_count: int = len(FREQUENCIES[language]) + + large_alphabet: bool = target_language_characters_count > 26 + + for character, character_rank in zip( + ordered_characters, range(0, ordered_characters_count) + ): + if character not in FREQUENCIES_language_set: + continue + + character_rank_in_language: int = FREQUENCIES[language].index(character) + expected_projection_ratio: float = ( + target_language_characters_count / ordered_characters_count + ) + character_rank_projection: int = int(character_rank * expected_projection_ratio) + + if ( + large_alphabet is False + and abs(character_rank_projection - character_rank_in_language) > 4 + ): + continue + + if ( + large_alphabet is True + and abs(character_rank_projection - character_rank_in_language) + < target_language_characters_count / 3 + ): + character_approved_count += 1 + continue + + characters_before_source: list[str] = FREQUENCIES[language][ + 0:character_rank_in_language + ] + characters_after_source: list[str] = FREQUENCIES[language][ + character_rank_in_language: + ] + characters_before: list[str] = ordered_characters[0:character_rank] + characters_after: list[str] = ordered_characters[character_rank:] + + before_match_count: int = len( + set(characters_before) & set(characters_before_source) + ) + + after_match_count: int = len( + set(characters_after) & set(characters_after_source) + ) + + if len(characters_before_source) == 0 and before_match_count <= 4: + character_approved_count += 1 + continue + + if len(characters_after_source) == 0 and after_match_count <= 4: + character_approved_count += 1 + continue + + if ( + before_match_count / len(characters_before_source) >= 0.4 + or after_match_count / len(characters_after_source) >= 0.4 + ): + character_approved_count += 1 + continue + + return character_approved_count / len(ordered_characters) + + +def alpha_unicode_split(decoded_sequence: str) -> list[str]: + """ + Given a decoded text sequence, return a list of str. Unicode range / alphabet separation. + Ex. a text containing English/Latin with a bit a Hebrew will return two items in the resulting list; + One containing the latin letters and the other hebrew. + """ + layers: dict[str, str] = {} + + for character in decoded_sequence: + if character.isalpha() is False: + continue + + character_range: str | None = unicode_range(character) + + if character_range is None: + continue + + layer_target_range: str | None = None + + for discovered_range in layers: + if ( + is_suspiciously_successive_range(discovered_range, character_range) + is False + ): + layer_target_range = discovered_range + break + + if layer_target_range is None: + layer_target_range = character_range + + if layer_target_range not in layers: + layers[layer_target_range] = character.lower() + continue + + layers[layer_target_range] += character.lower() + + return list(layers.values()) + + +def merge_coherence_ratios(results: list[CoherenceMatches]) -> CoherenceMatches: + """ + This function merge results previously given by the function coherence_ratio. + The return type is the same as coherence_ratio. + """ + per_language_ratios: dict[str, list[float]] = {} + for result in results: + for sub_result in result: + language, ratio = sub_result + if language not in per_language_ratios: + per_language_ratios[language] = [ratio] + continue + per_language_ratios[language].append(ratio) + + merge = [ + ( + language, + round( + sum(per_language_ratios[language]) / len(per_language_ratios[language]), + 4, + ), + ) + for language in per_language_ratios + ] + + return sorted(merge, key=lambda x: x[1], reverse=True) + + +def filter_alt_coherence_matches(results: CoherenceMatches) -> CoherenceMatches: + """ + We shall NOT return "English—" in CoherenceMatches because it is an alternative + of "English". This function only keeps the best match and remove the em-dash in it. + """ + index_results: dict[str, list[float]] = dict() + + for result in results: + language, ratio = result + no_em_name: str = language.replace("—", "") + + if no_em_name not in index_results: + index_results[no_em_name] = [] + + index_results[no_em_name].append(ratio) + + if any(len(index_results[e]) > 1 for e in index_results): + filtered_results: CoherenceMatches = [] + + for language in index_results: + filtered_results.append((language, max(index_results[language]))) + + return filtered_results + + return results + + +@lru_cache(maxsize=2048) +def coherence_ratio( + decoded_sequence: str, threshold: float = 0.1, lg_inclusion: str | None = None +) -> CoherenceMatches: + """ + Detect ANY language that can be identified in given sequence. The sequence will be analysed by layers. + A layer = Character extraction by alphabets/ranges. + """ + + results: list[tuple[str, float]] = [] + ignore_non_latin: bool = False + + sufficient_match_count: int = 0 + + lg_inclusion_list = lg_inclusion.split(",") if lg_inclusion is not None else [] + if "Latin Based" in lg_inclusion_list: + ignore_non_latin = True + lg_inclusion_list.remove("Latin Based") + + for layer in alpha_unicode_split(decoded_sequence): + sequence_frequencies: TypeCounter[str] = Counter(layer) + most_common = sequence_frequencies.most_common() + + character_count: int = sum(o for c, o in most_common) + + if character_count <= TOO_SMALL_SEQUENCE: + continue + + popular_character_ordered: list[str] = [c for c, o in most_common] + + for language in lg_inclusion_list or alphabet_languages( + popular_character_ordered, ignore_non_latin + ): + ratio: float = characters_popularity_compare( + language, popular_character_ordered + ) + + if ratio < threshold: + continue + elif ratio >= 0.8: + sufficient_match_count += 1 + + results.append((language, round(ratio, 4))) + + if sufficient_match_count >= 3: + break + + return sorted( + filter_alt_coherence_matches(results), key=lambda x: x[1], reverse=True + ) diff --git a/.venv/lib/python3.12/site-packages/charset_normalizer/cli/__init__.py b/.venv/lib/python3.12/site-packages/charset_normalizer/cli/__init__.py new file mode 100644 index 0000000..543a5a4 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/charset_normalizer/cli/__init__.py @@ -0,0 +1,8 @@ +from __future__ import annotations + +from .__main__ import cli_detect, query_yes_no + +__all__ = ( + "cli_detect", + "query_yes_no", +) diff --git a/.venv/lib/python3.12/site-packages/charset_normalizer/cli/__main__.py b/.venv/lib/python3.12/site-packages/charset_normalizer/cli/__main__.py new file mode 100644 index 0000000..cb64156 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/charset_normalizer/cli/__main__.py @@ -0,0 +1,381 @@ +from __future__ import annotations + +import argparse +import sys +import typing +from json import dumps +from os.path import abspath, basename, dirname, join, realpath +from platform import python_version +from unicodedata import unidata_version + +import charset_normalizer.md as md_module +from charset_normalizer import from_fp +from charset_normalizer.models import CliDetectionResult +from charset_normalizer.version import __version__ + + +def query_yes_no(question: str, default: str = "yes") -> bool: + """Ask a yes/no question via input() and return their answer. + + "question" is a string that is presented to the user. + "default" is the presumed answer if the user just hits . + It must be "yes" (the default), "no" or None (meaning + an answer is required of the user). + + The "answer" return value is True for "yes" or False for "no". + + Credit goes to (c) https://stackoverflow.com/questions/3041986/apt-command-line-interface-like-yes-no-input + """ + valid = {"yes": True, "y": True, "ye": True, "no": False, "n": False} + if default is None: + prompt = " [y/n] " + elif default == "yes": + prompt = " [Y/n] " + elif default == "no": + prompt = " [y/N] " + else: + raise ValueError("invalid default answer: '%s'" % default) + + while True: + sys.stdout.write(question + prompt) + choice = input().lower() + if default is not None and choice == "": + return valid[default] + elif choice in valid: + return valid[choice] + else: + sys.stdout.write("Please respond with 'yes' or 'no' (or 'y' or 'n').\n") + + +class FileType: + """Factory for creating file object types + + Instances of FileType are typically passed as type= arguments to the + ArgumentParser add_argument() method. + + Keyword Arguments: + - mode -- A string indicating how the file is to be opened. Accepts the + same values as the builtin open() function. + - bufsize -- The file's desired buffer size. Accepts the same values as + the builtin open() function. + - encoding -- The file's encoding. Accepts the same values as the + builtin open() function. + - errors -- A string indicating how encoding and decoding errors are to + be handled. Accepts the same value as the builtin open() function. + + Backported from CPython 3.12 + """ + + def __init__( + self, + mode: str = "r", + bufsize: int = -1, + encoding: str | None = None, + errors: str | None = None, + ): + self._mode = mode + self._bufsize = bufsize + self._encoding = encoding + self._errors = errors + + def __call__(self, string: str) -> typing.IO: # type: ignore[type-arg] + # the special argument "-" means sys.std{in,out} + if string == "-": + if "r" in self._mode: + return sys.stdin.buffer if "b" in self._mode else sys.stdin + elif any(c in self._mode for c in "wax"): + return sys.stdout.buffer if "b" in self._mode else sys.stdout + else: + msg = f'argument "-" with mode {self._mode}' + raise ValueError(msg) + + # all other arguments are used as file names + try: + return open(string, self._mode, self._bufsize, self._encoding, self._errors) + except OSError as e: + message = f"can't open '{string}': {e}" + raise argparse.ArgumentTypeError(message) + + def __repr__(self) -> str: + args = self._mode, self._bufsize + kwargs = [("encoding", self._encoding), ("errors", self._errors)] + args_str = ", ".join( + [repr(arg) for arg in args if arg != -1] + + [f"{kw}={arg!r}" for kw, arg in kwargs if arg is not None] + ) + return f"{type(self).__name__}({args_str})" + + +def cli_detect(argv: list[str] | None = None) -> int: + """ + CLI assistant using ARGV and ArgumentParser + :param argv: + :return: 0 if everything is fine, anything else equal trouble + """ + parser = argparse.ArgumentParser( + description="The Real First Universal Charset Detector. " + "Discover originating encoding used on text file. " + "Normalize text to unicode." + ) + + parser.add_argument( + "files", type=FileType("rb"), nargs="+", help="File(s) to be analysed" + ) + parser.add_argument( + "-v", + "--verbose", + action="store_true", + default=False, + dest="verbose", + help="Display complementary information about file if any. " + "Stdout will contain logs about the detection process.", + ) + parser.add_argument( + "-a", + "--with-alternative", + action="store_true", + default=False, + dest="alternatives", + help="Output complementary possibilities if any. Top-level JSON WILL be a list.", + ) + parser.add_argument( + "-n", + "--normalize", + action="store_true", + default=False, + dest="normalize", + help="Permit to normalize input file. If not set, program does not write anything.", + ) + parser.add_argument( + "-m", + "--minimal", + action="store_true", + default=False, + dest="minimal", + help="Only output the charset detected to STDOUT. Disabling JSON output.", + ) + parser.add_argument( + "-r", + "--replace", + action="store_true", + default=False, + dest="replace", + help="Replace file when trying to normalize it instead of creating a new one.", + ) + parser.add_argument( + "-f", + "--force", + action="store_true", + default=False, + dest="force", + help="Replace file without asking if you are sure, use this flag with caution.", + ) + parser.add_argument( + "-i", + "--no-preemptive", + action="store_true", + default=False, + dest="no_preemptive", + help="Disable looking at a charset declaration to hint the detector.", + ) + parser.add_argument( + "-t", + "--threshold", + action="store", + default=0.2, + type=float, + dest="threshold", + help="Define a custom maximum amount of noise allowed in decoded content. 0. <= noise <= 1.", + ) + parser.add_argument( + "--version", + action="version", + version="Charset-Normalizer {} - Python {} - Unicode {} - SpeedUp {}".format( + __version__, + python_version(), + unidata_version, + "OFF" if md_module.__file__.lower().endswith(".py") else "ON", + ), + help="Show version information and exit.", + ) + + args = parser.parse_args(argv) + + if args.replace is True and args.normalize is False: + if args.files: + for my_file in args.files: + my_file.close() + print("Use --replace in addition of --normalize only.", file=sys.stderr) + return 1 + + if args.force is True and args.replace is False: + if args.files: + for my_file in args.files: + my_file.close() + print("Use --force in addition of --replace only.", file=sys.stderr) + return 1 + + if args.threshold < 0.0 or args.threshold > 1.0: + if args.files: + for my_file in args.files: + my_file.close() + print("--threshold VALUE should be between 0. AND 1.", file=sys.stderr) + return 1 + + x_ = [] + + for my_file in args.files: + matches = from_fp( + my_file, + threshold=args.threshold, + explain=args.verbose, + preemptive_behaviour=args.no_preemptive is False, + ) + + best_guess = matches.best() + + if best_guess is None: + print( + 'Unable to identify originating encoding for "{}". {}'.format( + my_file.name, + ( + "Maybe try increasing maximum amount of chaos." + if args.threshold < 1.0 + else "" + ), + ), + file=sys.stderr, + ) + x_.append( + CliDetectionResult( + abspath(my_file.name), + None, + [], + [], + "Unknown", + [], + False, + 1.0, + 0.0, + None, + True, + ) + ) + else: + x_.append( + CliDetectionResult( + abspath(my_file.name), + best_guess.encoding, + best_guess.encoding_aliases, + [ + cp + for cp in best_guess.could_be_from_charset + if cp != best_guess.encoding + ], + best_guess.language, + best_guess.alphabets, + best_guess.bom, + best_guess.percent_chaos, + best_guess.percent_coherence, + None, + True, + ) + ) + + if len(matches) > 1 and args.alternatives: + for el in matches: + if el != best_guess: + x_.append( + CliDetectionResult( + abspath(my_file.name), + el.encoding, + el.encoding_aliases, + [ + cp + for cp in el.could_be_from_charset + if cp != el.encoding + ], + el.language, + el.alphabets, + el.bom, + el.percent_chaos, + el.percent_coherence, + None, + False, + ) + ) + + if args.normalize is True: + if best_guess.encoding.startswith("utf") is True: + print( + '"{}" file does not need to be normalized, as it already came from unicode.'.format( + my_file.name + ), + file=sys.stderr, + ) + if my_file.closed is False: + my_file.close() + continue + + dir_path = dirname(realpath(my_file.name)) + file_name = basename(realpath(my_file.name)) + + o_: list[str] = file_name.split(".") + + if args.replace is False: + o_.insert(-1, best_guess.encoding) + if my_file.closed is False: + my_file.close() + elif ( + args.force is False + and query_yes_no( + 'Are you sure to normalize "{}" by replacing it ?'.format( + my_file.name + ), + "no", + ) + is False + ): + if my_file.closed is False: + my_file.close() + continue + + try: + x_[0].unicode_path = join(dir_path, ".".join(o_)) + + with open(x_[0].unicode_path, "wb") as fp: + fp.write(best_guess.output()) + except OSError as e: + print(str(e), file=sys.stderr) + if my_file.closed is False: + my_file.close() + return 2 + + if my_file.closed is False: + my_file.close() + + if args.minimal is False: + print( + dumps( + [el.__dict__ for el in x_] if len(x_) > 1 else x_[0].__dict__, + ensure_ascii=True, + indent=4, + ) + ) + else: + for my_file in args.files: + print( + ", ".join( + [ + el.encoding or "undefined" + for el in x_ + if el.path == abspath(my_file.name) + ] + ) + ) + + return 0 + + +if __name__ == "__main__": + cli_detect() diff --git a/.venv/lib/python3.12/site-packages/charset_normalizer/cli/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/charset_normalizer/cli/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..ddf3f5a Binary files /dev/null and b/.venv/lib/python3.12/site-packages/charset_normalizer/cli/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/charset_normalizer/cli/__pycache__/__main__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/charset_normalizer/cli/__pycache__/__main__.cpython-312.pyc new file mode 100644 index 0000000..589b85e Binary files /dev/null and b/.venv/lib/python3.12/site-packages/charset_normalizer/cli/__pycache__/__main__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/charset_normalizer/constant.py b/.venv/lib/python3.12/site-packages/charset_normalizer/constant.py new file mode 100644 index 0000000..cc71a01 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/charset_normalizer/constant.py @@ -0,0 +1,2015 @@ +from __future__ import annotations + +from codecs import BOM_UTF8, BOM_UTF16_BE, BOM_UTF16_LE, BOM_UTF32_BE, BOM_UTF32_LE +from encodings.aliases import aliases +from re import IGNORECASE +from re import compile as re_compile + +# Contain for each eligible encoding a list of/item bytes SIG/BOM +ENCODING_MARKS: dict[str, bytes | list[bytes]] = { + "utf_8": BOM_UTF8, + "utf_7": [ + b"\x2b\x2f\x76\x38", + b"\x2b\x2f\x76\x39", + b"\x2b\x2f\x76\x2b", + b"\x2b\x2f\x76\x2f", + b"\x2b\x2f\x76\x38\x2d", + ], + "gb18030": b"\x84\x31\x95\x33", + "utf_32": [BOM_UTF32_BE, BOM_UTF32_LE], + "utf_16": [BOM_UTF16_BE, BOM_UTF16_LE], +} + +TOO_SMALL_SEQUENCE: int = 32 +TOO_BIG_SEQUENCE: int = int(10e6) + +UTF8_MAXIMAL_ALLOCATION: int = 1_112_064 + +# Up-to-date Unicode ucd/15.0.0 +UNICODE_RANGES_COMBINED: dict[str, range] = { + "Control character": range(32), + "Basic Latin": range(32, 128), + "Latin-1 Supplement": range(128, 256), + "Latin Extended-A": range(256, 384), + "Latin Extended-B": range(384, 592), + "IPA Extensions": range(592, 688), + "Spacing Modifier Letters": range(688, 768), + "Combining Diacritical Marks": range(768, 880), + "Greek and Coptic": range(880, 1024), + "Cyrillic": range(1024, 1280), + "Cyrillic Supplement": range(1280, 1328), + "Armenian": range(1328, 1424), + "Hebrew": range(1424, 1536), + "Arabic": range(1536, 1792), + "Syriac": range(1792, 1872), + "Arabic Supplement": range(1872, 1920), + "Thaana": range(1920, 1984), + "NKo": range(1984, 2048), + "Samaritan": range(2048, 2112), + "Mandaic": range(2112, 2144), + "Syriac Supplement": range(2144, 2160), + "Arabic Extended-B": range(2160, 2208), + "Arabic Extended-A": range(2208, 2304), + "Devanagari": range(2304, 2432), + "Bengali": range(2432, 2560), + "Gurmukhi": range(2560, 2688), + "Gujarati": range(2688, 2816), + "Oriya": range(2816, 2944), + "Tamil": range(2944, 3072), + "Telugu": range(3072, 3200), + "Kannada": range(3200, 3328), + "Malayalam": range(3328, 3456), + "Sinhala": range(3456, 3584), + "Thai": range(3584, 3712), + "Lao": range(3712, 3840), + "Tibetan": range(3840, 4096), + "Myanmar": range(4096, 4256), + "Georgian": range(4256, 4352), + "Hangul Jamo": range(4352, 4608), + "Ethiopic": range(4608, 4992), + "Ethiopic Supplement": range(4992, 5024), + "Cherokee": range(5024, 5120), + "Unified Canadian Aboriginal Syllabics": range(5120, 5760), + "Ogham": range(5760, 5792), + "Runic": range(5792, 5888), + "Tagalog": range(5888, 5920), + "Hanunoo": range(5920, 5952), + "Buhid": range(5952, 5984), + "Tagbanwa": range(5984, 6016), + "Khmer": range(6016, 6144), + "Mongolian": range(6144, 6320), + "Unified Canadian Aboriginal Syllabics Extended": range(6320, 6400), + "Limbu": range(6400, 6480), + "Tai Le": range(6480, 6528), + "New Tai Lue": range(6528, 6624), + "Khmer Symbols": range(6624, 6656), + "Buginese": range(6656, 6688), + "Tai Tham": range(6688, 6832), + "Combining Diacritical Marks Extended": range(6832, 6912), + "Balinese": range(6912, 7040), + "Sundanese": range(7040, 7104), + "Batak": range(7104, 7168), + "Lepcha": range(7168, 7248), + "Ol Chiki": range(7248, 7296), + "Cyrillic Extended-C": range(7296, 7312), + "Georgian Extended": range(7312, 7360), + "Sundanese Supplement": range(7360, 7376), + "Vedic Extensions": range(7376, 7424), + "Phonetic Extensions": range(7424, 7552), + "Phonetic Extensions Supplement": range(7552, 7616), + "Combining Diacritical Marks Supplement": range(7616, 7680), + "Latin Extended Additional": range(7680, 7936), + "Greek Extended": range(7936, 8192), + "General Punctuation": range(8192, 8304), + "Superscripts and Subscripts": range(8304, 8352), + "Currency Symbols": range(8352, 8400), + "Combining Diacritical Marks for Symbols": range(8400, 8448), + "Letterlike Symbols": range(8448, 8528), + "Number Forms": range(8528, 8592), + "Arrows": range(8592, 8704), + "Mathematical Operators": range(8704, 8960), + "Miscellaneous Technical": range(8960, 9216), + "Control Pictures": range(9216, 9280), + "Optical Character Recognition": range(9280, 9312), + "Enclosed Alphanumerics": range(9312, 9472), + "Box Drawing": range(9472, 9600), + "Block Elements": range(9600, 9632), + "Geometric Shapes": range(9632, 9728), + "Miscellaneous Symbols": range(9728, 9984), + "Dingbats": range(9984, 10176), + "Miscellaneous Mathematical Symbols-A": range(10176, 10224), + "Supplemental Arrows-A": range(10224, 10240), + "Braille Patterns": range(10240, 10496), + "Supplemental Arrows-B": range(10496, 10624), + "Miscellaneous Mathematical Symbols-B": range(10624, 10752), + "Supplemental Mathematical Operators": range(10752, 11008), + "Miscellaneous Symbols and Arrows": range(11008, 11264), + "Glagolitic": range(11264, 11360), + "Latin Extended-C": range(11360, 11392), + "Coptic": range(11392, 11520), + "Georgian Supplement": range(11520, 11568), + "Tifinagh": range(11568, 11648), + "Ethiopic Extended": range(11648, 11744), + "Cyrillic Extended-A": range(11744, 11776), + "Supplemental Punctuation": range(11776, 11904), + "CJK Radicals Supplement": range(11904, 12032), + "Kangxi Radicals": range(12032, 12256), + "Ideographic Description Characters": range(12272, 12288), + "CJK Symbols and Punctuation": range(12288, 12352), + "Hiragana": range(12352, 12448), + "Katakana": range(12448, 12544), + "Bopomofo": range(12544, 12592), + "Hangul Compatibility Jamo": range(12592, 12688), + "Kanbun": range(12688, 12704), + "Bopomofo Extended": range(12704, 12736), + "CJK Strokes": range(12736, 12784), + "Katakana Phonetic Extensions": range(12784, 12800), + "Enclosed CJK Letters and Months": range(12800, 13056), + "CJK Compatibility": range(13056, 13312), + "CJK Unified Ideographs Extension A": range(13312, 19904), + "Yijing Hexagram Symbols": range(19904, 19968), + "CJK Unified Ideographs": range(19968, 40960), + "Yi Syllables": range(40960, 42128), + "Yi Radicals": range(42128, 42192), + "Lisu": range(42192, 42240), + "Vai": range(42240, 42560), + "Cyrillic Extended-B": range(42560, 42656), + "Bamum": range(42656, 42752), + "Modifier Tone Letters": range(42752, 42784), + "Latin Extended-D": range(42784, 43008), + "Syloti Nagri": range(43008, 43056), + "Common Indic Number Forms": range(43056, 43072), + "Phags-pa": range(43072, 43136), + "Saurashtra": range(43136, 43232), + "Devanagari Extended": range(43232, 43264), + "Kayah Li": range(43264, 43312), + "Rejang": range(43312, 43360), + "Hangul Jamo Extended-A": range(43360, 43392), + "Javanese": range(43392, 43488), + "Myanmar Extended-B": range(43488, 43520), + "Cham": range(43520, 43616), + "Myanmar Extended-A": range(43616, 43648), + "Tai Viet": range(43648, 43744), + "Meetei Mayek Extensions": range(43744, 43776), + "Ethiopic Extended-A": range(43776, 43824), + "Latin Extended-E": range(43824, 43888), + "Cherokee Supplement": range(43888, 43968), + "Meetei Mayek": range(43968, 44032), + "Hangul Syllables": range(44032, 55216), + "Hangul Jamo Extended-B": range(55216, 55296), + "High Surrogates": range(55296, 56192), + "High Private Use Surrogates": range(56192, 56320), + "Low Surrogates": range(56320, 57344), + "Private Use Area": range(57344, 63744), + "CJK Compatibility Ideographs": range(63744, 64256), + "Alphabetic Presentation Forms": range(64256, 64336), + "Arabic Presentation Forms-A": range(64336, 65024), + "Variation Selectors": range(65024, 65040), + "Vertical Forms": range(65040, 65056), + "Combining Half Marks": range(65056, 65072), + "CJK Compatibility Forms": range(65072, 65104), + "Small Form Variants": range(65104, 65136), + "Arabic Presentation Forms-B": range(65136, 65280), + "Halfwidth and Fullwidth Forms": range(65280, 65520), + "Specials": range(65520, 65536), + "Linear B Syllabary": range(65536, 65664), + "Linear B Ideograms": range(65664, 65792), + "Aegean Numbers": range(65792, 65856), + "Ancient Greek Numbers": range(65856, 65936), + "Ancient Symbols": range(65936, 66000), + "Phaistos Disc": range(66000, 66048), + "Lycian": range(66176, 66208), + "Carian": range(66208, 66272), + "Coptic Epact Numbers": range(66272, 66304), + "Old Italic": range(66304, 66352), + "Gothic": range(66352, 66384), + "Old Permic": range(66384, 66432), + "Ugaritic": range(66432, 66464), + "Old Persian": range(66464, 66528), + "Deseret": range(66560, 66640), + "Shavian": range(66640, 66688), + "Osmanya": range(66688, 66736), + "Osage": range(66736, 66816), + "Elbasan": range(66816, 66864), + "Caucasian Albanian": range(66864, 66928), + "Vithkuqi": range(66928, 67008), + "Linear A": range(67072, 67456), + "Latin Extended-F": range(67456, 67520), + "Cypriot Syllabary": range(67584, 67648), + "Imperial Aramaic": range(67648, 67680), + "Palmyrene": range(67680, 67712), + "Nabataean": range(67712, 67760), + "Hatran": range(67808, 67840), + "Phoenician": range(67840, 67872), + "Lydian": range(67872, 67904), + "Meroitic Hieroglyphs": range(67968, 68000), + "Meroitic Cursive": range(68000, 68096), + "Kharoshthi": range(68096, 68192), + "Old South Arabian": range(68192, 68224), + "Old North Arabian": range(68224, 68256), + "Manichaean": range(68288, 68352), + "Avestan": range(68352, 68416), + "Inscriptional Parthian": range(68416, 68448), + "Inscriptional Pahlavi": range(68448, 68480), + "Psalter Pahlavi": range(68480, 68528), + "Old Turkic": range(68608, 68688), + "Old Hungarian": range(68736, 68864), + "Hanifi Rohingya": range(68864, 68928), + "Rumi Numeral Symbols": range(69216, 69248), + "Yezidi": range(69248, 69312), + "Arabic Extended-C": range(69312, 69376), + "Old Sogdian": range(69376, 69424), + "Sogdian": range(69424, 69488), + "Old Uyghur": range(69488, 69552), + "Chorasmian": range(69552, 69600), + "Elymaic": range(69600, 69632), + "Brahmi": range(69632, 69760), + "Kaithi": range(69760, 69840), + "Sora Sompeng": range(69840, 69888), + "Chakma": range(69888, 69968), + "Mahajani": range(69968, 70016), + "Sharada": range(70016, 70112), + "Sinhala Archaic Numbers": range(70112, 70144), + "Khojki": range(70144, 70224), + "Multani": range(70272, 70320), + "Khudawadi": range(70320, 70400), + "Grantha": range(70400, 70528), + "Newa": range(70656, 70784), + "Tirhuta": range(70784, 70880), + "Siddham": range(71040, 71168), + "Modi": range(71168, 71264), + "Mongolian Supplement": range(71264, 71296), + "Takri": range(71296, 71376), + "Ahom": range(71424, 71504), + "Dogra": range(71680, 71760), + "Warang Citi": range(71840, 71936), + "Dives Akuru": range(71936, 72032), + "Nandinagari": range(72096, 72192), + "Zanabazar Square": range(72192, 72272), + "Soyombo": range(72272, 72368), + "Unified Canadian Aboriginal Syllabics Extended-A": range(72368, 72384), + "Pau Cin Hau": range(72384, 72448), + "Devanagari Extended-A": range(72448, 72544), + "Bhaiksuki": range(72704, 72816), + "Marchen": range(72816, 72896), + "Masaram Gondi": range(72960, 73056), + "Gunjala Gondi": range(73056, 73136), + "Makasar": range(73440, 73472), + "Kawi": range(73472, 73568), + "Lisu Supplement": range(73648, 73664), + "Tamil Supplement": range(73664, 73728), + "Cuneiform": range(73728, 74752), + "Cuneiform Numbers and Punctuation": range(74752, 74880), + "Early Dynastic Cuneiform": range(74880, 75088), + "Cypro-Minoan": range(77712, 77824), + "Egyptian Hieroglyphs": range(77824, 78896), + "Egyptian Hieroglyph Format Controls": range(78896, 78944), + "Anatolian Hieroglyphs": range(82944, 83584), + "Bamum Supplement": range(92160, 92736), + "Mro": range(92736, 92784), + "Tangsa": range(92784, 92880), + "Bassa Vah": range(92880, 92928), + "Pahawh Hmong": range(92928, 93072), + "Medefaidrin": range(93760, 93856), + "Miao": range(93952, 94112), + "Ideographic Symbols and Punctuation": range(94176, 94208), + "Tangut": range(94208, 100352), + "Tangut Components": range(100352, 101120), + "Khitan Small Script": range(101120, 101632), + "Tangut Supplement": range(101632, 101760), + "Kana Extended-B": range(110576, 110592), + "Kana Supplement": range(110592, 110848), + "Kana Extended-A": range(110848, 110896), + "Small Kana Extension": range(110896, 110960), + "Nushu": range(110960, 111360), + "Duployan": range(113664, 113824), + "Shorthand Format Controls": range(113824, 113840), + "Znamenny Musical Notation": range(118528, 118736), + "Byzantine Musical Symbols": range(118784, 119040), + "Musical Symbols": range(119040, 119296), + "Ancient Greek Musical Notation": range(119296, 119376), + "Kaktovik Numerals": range(119488, 119520), + "Mayan Numerals": range(119520, 119552), + "Tai Xuan Jing Symbols": range(119552, 119648), + "Counting Rod Numerals": range(119648, 119680), + "Mathematical Alphanumeric Symbols": range(119808, 120832), + "Sutton SignWriting": range(120832, 121520), + "Latin Extended-G": range(122624, 122880), + "Glagolitic Supplement": range(122880, 122928), + "Cyrillic Extended-D": range(122928, 123024), + "Nyiakeng Puachue Hmong": range(123136, 123216), + "Toto": range(123536, 123584), + "Wancho": range(123584, 123648), + "Nag Mundari": range(124112, 124160), + "Ethiopic Extended-B": range(124896, 124928), + "Mende Kikakui": range(124928, 125152), + "Adlam": range(125184, 125280), + "Indic Siyaq Numbers": range(126064, 126144), + "Ottoman Siyaq Numbers": range(126208, 126288), + "Arabic Mathematical Alphabetic Symbols": range(126464, 126720), + "Mahjong Tiles": range(126976, 127024), + "Domino Tiles": range(127024, 127136), + "Playing Cards": range(127136, 127232), + "Enclosed Alphanumeric Supplement": range(127232, 127488), + "Enclosed Ideographic Supplement": range(127488, 127744), + "Miscellaneous Symbols and Pictographs": range(127744, 128512), + "Emoticons range(Emoji)": range(128512, 128592), + "Ornamental Dingbats": range(128592, 128640), + "Transport and Map Symbols": range(128640, 128768), + "Alchemical Symbols": range(128768, 128896), + "Geometric Shapes Extended": range(128896, 129024), + "Supplemental Arrows-C": range(129024, 129280), + "Supplemental Symbols and Pictographs": range(129280, 129536), + "Chess Symbols": range(129536, 129648), + "Symbols and Pictographs Extended-A": range(129648, 129792), + "Symbols for Legacy Computing": range(129792, 130048), + "CJK Unified Ideographs Extension B": range(131072, 173792), + "CJK Unified Ideographs Extension C": range(173824, 177984), + "CJK Unified Ideographs Extension D": range(177984, 178208), + "CJK Unified Ideographs Extension E": range(178208, 183984), + "CJK Unified Ideographs Extension F": range(183984, 191472), + "CJK Compatibility Ideographs Supplement": range(194560, 195104), + "CJK Unified Ideographs Extension G": range(196608, 201552), + "CJK Unified Ideographs Extension H": range(201552, 205744), + "Tags": range(917504, 917632), + "Variation Selectors Supplement": range(917760, 918000), + "Supplementary Private Use Area-A": range(983040, 1048576), + "Supplementary Private Use Area-B": range(1048576, 1114112), +} + + +UNICODE_SECONDARY_RANGE_KEYWORD: list[str] = [ + "Supplement", + "Extended", + "Extensions", + "Modifier", + "Marks", + "Punctuation", + "Symbols", + "Forms", + "Operators", + "Miscellaneous", + "Drawing", + "Block", + "Shapes", + "Supplemental", + "Tags", +] + +RE_POSSIBLE_ENCODING_INDICATION = re_compile( + r"(?:(?:encoding)|(?:charset)|(?:coding))(?:[\:= ]{1,10})(?:[\"\']?)([a-zA-Z0-9\-_]+)(?:[\"\']?)", + IGNORECASE, +) + +IANA_NO_ALIASES = [ + "cp720", + "cp737", + "cp856", + "cp874", + "cp875", + "cp1006", + "koi8_r", + "koi8_t", + "koi8_u", +] + +IANA_SUPPORTED: list[str] = sorted( + filter( + lambda x: x.endswith("_codec") is False + and x not in {"rot_13", "tactis", "mbcs"}, + list(set(aliases.values())) + IANA_NO_ALIASES, + ) +) + +IANA_SUPPORTED_COUNT: int = len(IANA_SUPPORTED) + +# pre-computed code page that are similar using the function cp_similarity. +IANA_SUPPORTED_SIMILAR: dict[str, list[str]] = { + "cp037": ["cp1026", "cp1140", "cp273", "cp500"], + "cp1026": ["cp037", "cp1140", "cp273", "cp500"], + "cp1125": ["cp866"], + "cp1140": ["cp037", "cp1026", "cp273", "cp500"], + "cp1250": ["iso8859_2"], + "cp1251": ["kz1048", "ptcp154"], + "cp1252": ["iso8859_15", "iso8859_9", "latin_1"], + "cp1253": ["iso8859_7"], + "cp1254": ["iso8859_15", "iso8859_9", "latin_1"], + "cp1257": ["iso8859_13"], + "cp273": ["cp037", "cp1026", "cp1140", "cp500"], + "cp437": ["cp850", "cp858", "cp860", "cp861", "cp862", "cp863", "cp865"], + "cp500": ["cp037", "cp1026", "cp1140", "cp273"], + "cp850": ["cp437", "cp857", "cp858", "cp865"], + "cp857": ["cp850", "cp858", "cp865"], + "cp858": ["cp437", "cp850", "cp857", "cp865"], + "cp860": ["cp437", "cp861", "cp862", "cp863", "cp865"], + "cp861": ["cp437", "cp860", "cp862", "cp863", "cp865"], + "cp862": ["cp437", "cp860", "cp861", "cp863", "cp865"], + "cp863": ["cp437", "cp860", "cp861", "cp862", "cp865"], + "cp865": ["cp437", "cp850", "cp857", "cp858", "cp860", "cp861", "cp862", "cp863"], + "cp866": ["cp1125"], + "iso8859_10": ["iso8859_14", "iso8859_15", "iso8859_4", "iso8859_9", "latin_1"], + "iso8859_11": ["tis_620"], + "iso8859_13": ["cp1257"], + "iso8859_14": [ + "iso8859_10", + "iso8859_15", + "iso8859_16", + "iso8859_3", + "iso8859_9", + "latin_1", + ], + "iso8859_15": [ + "cp1252", + "cp1254", + "iso8859_10", + "iso8859_14", + "iso8859_16", + "iso8859_3", + "iso8859_9", + "latin_1", + ], + "iso8859_16": [ + "iso8859_14", + "iso8859_15", + "iso8859_2", + "iso8859_3", + "iso8859_9", + "latin_1", + ], + "iso8859_2": ["cp1250", "iso8859_16", "iso8859_4"], + "iso8859_3": ["iso8859_14", "iso8859_15", "iso8859_16", "iso8859_9", "latin_1"], + "iso8859_4": ["iso8859_10", "iso8859_2", "iso8859_9", "latin_1"], + "iso8859_7": ["cp1253"], + "iso8859_9": [ + "cp1252", + "cp1254", + "cp1258", + "iso8859_10", + "iso8859_14", + "iso8859_15", + "iso8859_16", + "iso8859_3", + "iso8859_4", + "latin_1", + ], + "kz1048": ["cp1251", "ptcp154"], + "latin_1": [ + "cp1252", + "cp1254", + "cp1258", + "iso8859_10", + "iso8859_14", + "iso8859_15", + "iso8859_16", + "iso8859_3", + "iso8859_4", + "iso8859_9", + ], + "mac_iceland": ["mac_roman", "mac_turkish"], + "mac_roman": ["mac_iceland", "mac_turkish"], + "mac_turkish": ["mac_iceland", "mac_roman"], + "ptcp154": ["cp1251", "kz1048"], + "tis_620": ["iso8859_11"], +} + + +CHARDET_CORRESPONDENCE: dict[str, str] = { + "iso2022_kr": "ISO-2022-KR", + "iso2022_jp": "ISO-2022-JP", + "euc_kr": "EUC-KR", + "tis_620": "TIS-620", + "utf_32": "UTF-32", + "euc_jp": "EUC-JP", + "koi8_r": "KOI8-R", + "iso8859_1": "ISO-8859-1", + "iso8859_2": "ISO-8859-2", + "iso8859_5": "ISO-8859-5", + "iso8859_6": "ISO-8859-6", + "iso8859_7": "ISO-8859-7", + "iso8859_8": "ISO-8859-8", + "utf_16": "UTF-16", + "cp855": "IBM855", + "mac_cyrillic": "MacCyrillic", + "gb2312": "GB2312", + "gb18030": "GB18030", + "cp932": "CP932", + "cp866": "IBM866", + "utf_8": "utf-8", + "utf_8_sig": "UTF-8-SIG", + "shift_jis": "SHIFT_JIS", + "big5": "Big5", + "cp1250": "windows-1250", + "cp1251": "windows-1251", + "cp1252": "Windows-1252", + "cp1253": "windows-1253", + "cp1255": "windows-1255", + "cp1256": "windows-1256", + "cp1254": "Windows-1254", + "cp949": "CP949", +} + + +COMMON_SAFE_ASCII_CHARACTERS: set[str] = { + "<", + ">", + "=", + ":", + "/", + "&", + ";", + "{", + "}", + "[", + "]", + ",", + "|", + '"', + "-", + "(", + ")", +} + +# Sample character sets — replace with full lists if needed +COMMON_CHINESE_CHARACTERS = "的一是在不了有和人这中大为上个国我以要他时来用们生到作地于出就分对成会可主发年动同工也能下过子说产种面而方后多定行学法所民得经十三之进着等部度家电力里如水化高自二理起小物现实加量都两体制机当使点从业本去把性好应开它合还因由其些然前外天政四日那社义事平形相全表间样与关各重新线内数正心反你明看原又么利比或但质气第向道命此变条只没结解问意建月公无系军很情者最立代想已通并提直题党程展五果料象员革位入常文总次品式活设及管特件长求老头基资边流路级少图山统接知较将组见计别她手角期根论运农指几九区强放决西被干做必战先回则任取据处队南给色光门即保治北造百规热领七海口东导器压志世金增争济阶油思术极交受联什认六共权收证改清己美再采转更单风切打白教速花带安场身车例真务具万每目至达走积示议声报斗完类八离华名确才科张信马节话米整空元况今集温传土许步群广石记需段研界拉林律叫且究观越织装影算低持音众书布复容儿须际商非验连断深难近矿千周委素技备半办青省列习响约支般史感劳便团往酸历市克何除消构府太准精值号率族维划选标写存候毛亲快效斯院查江型眼王按格养易置派层片始却专状育厂京识适属圆包火住调满县局照参红细引听该铁价严龙飞" + +COMMON_JAPANESE_CHARACTERS = "日一国年大十二本中長出三時行見月分後前生五間上東四今金九入学高円子外八六下来気小七山話女北午百書先名川千水半男西電校語土木聞食車何南万毎白天母火右読友左休父雨" + +COMMON_KOREAN_CHARACTERS = "一二三四五六七八九十百千萬上下左右中人女子大小山川日月火水木金土父母天地國名年時文校學生" + +# Combine all into a set +COMMON_CJK_CHARACTERS = set( + "".join( + [ + COMMON_CHINESE_CHARACTERS, + COMMON_JAPANESE_CHARACTERS, + COMMON_KOREAN_CHARACTERS, + ] + ) +) + +KO_NAMES: set[str] = {"johab", "cp949", "euc_kr"} +ZH_NAMES: set[str] = {"big5", "cp950", "big5hkscs", "hz"} + +# Logging LEVEL below DEBUG +TRACE: int = 5 + + +# Language label that contain the em dash "—" +# character are to be considered alternative seq to origin +FREQUENCIES: dict[str, list[str]] = { + "English": [ + "e", + "a", + "t", + "i", + "o", + "n", + "s", + "r", + "h", + "l", + "d", + "c", + "u", + "m", + "f", + "p", + "g", + "w", + "y", + "b", + "v", + "k", + "x", + "j", + "z", + "q", + ], + "English—": [ + "e", + "a", + "t", + "i", + "o", + "n", + "s", + "r", + "h", + "l", + "d", + "c", + "m", + "u", + "f", + "p", + "g", + "w", + "b", + "y", + "v", + "k", + "j", + "x", + "z", + "q", + ], + "German": [ + "e", + "n", + "i", + "r", + "s", + "t", + "a", + "d", + "h", + "u", + "l", + "g", + "o", + "c", + "m", + "b", + "f", + "k", + "w", + "z", + "p", + "v", + "ü", + "ä", + "ö", + "j", + ], + "French": [ + "e", + "a", + "s", + "n", + "i", + "t", + "r", + "l", + "u", + "o", + "d", + "c", + "p", + "m", + "é", + "v", + "g", + "f", + "b", + "h", + "q", + "à", + "x", + "è", + "y", + "j", + ], + "Dutch": [ + "e", + "n", + "a", + "i", + "r", + "t", + "o", + "d", + "s", + "l", + "g", + "h", + "v", + "m", + "u", + "k", + "c", + "p", + "b", + "w", + "j", + "z", + "f", + "y", + "x", + "ë", + ], + "Italian": [ + "e", + "i", + "a", + "o", + "n", + "l", + "t", + "r", + "s", + "c", + "d", + "u", + "p", + "m", + "g", + "v", + "f", + "b", + "z", + "h", + "q", + "è", + "à", + "k", + "y", + "ò", + ], + "Polish": [ + "a", + "i", + "o", + "e", + "n", + "r", + "z", + "w", + "s", + "c", + "t", + "k", + "y", + "d", + "p", + "m", + "u", + "l", + "j", + "ł", + "g", + "b", + "h", + "ą", + "ę", + "ó", + ], + "Spanish": [ + "e", + "a", + "o", + "n", + "s", + "r", + "i", + "l", + "d", + "t", + "c", + "u", + "m", + "p", + "b", + "g", + "v", + "f", + "y", + "ó", + "h", + "q", + "í", + "j", + "z", + "á", + ], + "Russian": [ + "о", + "а", + "е", + "и", + "н", + "с", + "т", + "р", + "в", + "л", + "к", + "м", + "д", + "п", + "у", + "г", + "я", + "ы", + "з", + "б", + "й", + "ь", + "ч", + "х", + "ж", + "ц", + ], + # Jap-Kanji + "Japanese": [ + "人", + "一", + "大", + "亅", + "丁", + "丨", + "竹", + "笑", + "口", + "日", + "今", + "二", + "彳", + "行", + "十", + "土", + "丶", + "寸", + "寺", + "時", + "乙", + "丿", + "乂", + "气", + "気", + "冂", + "巾", + "亠", + "市", + "目", + "儿", + "見", + "八", + "小", + "凵", + "県", + "月", + "彐", + "門", + "間", + "木", + "東", + "山", + "出", + "本", + "中", + "刀", + "分", + "耳", + "又", + "取", + "最", + "言", + "田", + "心", + "思", + "刂", + "前", + "京", + "尹", + "事", + "生", + "厶", + "云", + "会", + "未", + "来", + "白", + "冫", + "楽", + "灬", + "馬", + "尸", + "尺", + "駅", + "明", + "耂", + "者", + "了", + "阝", + "都", + "高", + "卜", + "占", + "厂", + "广", + "店", + "子", + "申", + "奄", + "亻", + "俺", + "上", + "方", + "冖", + "学", + "衣", + "艮", + "食", + "自", + ], + # Jap-Katakana + "Japanese—": [ + "ー", + "ン", + "ス", + "・", + "ル", + "ト", + "リ", + "イ", + "ア", + "ラ", + "ッ", + "ク", + "ド", + "シ", + "レ", + "ジ", + "タ", + "フ", + "ロ", + "カ", + "テ", + "マ", + "ィ", + "グ", + "バ", + "ム", + "プ", + "オ", + "コ", + "デ", + "ニ", + "ウ", + "メ", + "サ", + "ビ", + "ナ", + "ブ", + "ャ", + "エ", + "ュ", + "チ", + "キ", + "ズ", + "ダ", + "パ", + "ミ", + "ェ", + "ョ", + "ハ", + "セ", + "ベ", + "ガ", + "モ", + "ツ", + "ネ", + "ボ", + "ソ", + "ノ", + "ァ", + "ヴ", + "ワ", + "ポ", + "ペ", + "ピ", + "ケ", + "ゴ", + "ギ", + "ザ", + "ホ", + "ゲ", + "ォ", + "ヤ", + "ヒ", + "ユ", + "ヨ", + "ヘ", + "ゼ", + "ヌ", + "ゥ", + "ゾ", + "ヶ", + "ヂ", + "ヲ", + "ヅ", + "ヵ", + "ヱ", + "ヰ", + "ヮ", + "ヽ", + "゠", + "ヾ", + "ヷ", + "ヿ", + "ヸ", + "ヹ", + "ヺ", + ], + # Jap-Hiragana + "Japanese——": [ + "の", + "に", + "る", + "た", + "と", + "は", + "し", + "い", + "を", + "で", + "て", + "が", + "な", + "れ", + "か", + "ら", + "さ", + "っ", + "り", + "す", + "あ", + "も", + "こ", + "ま", + "う", + "く", + "よ", + "き", + "ん", + "め", + "お", + "け", + "そ", + "つ", + "だ", + "や", + "え", + "ど", + "わ", + "ち", + "み", + "せ", + "じ", + "ば", + "へ", + "び", + "ず", + "ろ", + "ほ", + "げ", + "む", + "べ", + "ひ", + "ょ", + "ゆ", + "ぶ", + "ご", + "ゃ", + "ね", + "ふ", + "ぐ", + "ぎ", + "ぼ", + "ゅ", + "づ", + "ざ", + "ぞ", + "ぬ", + "ぜ", + "ぱ", + "ぽ", + "ぷ", + "ぴ", + "ぃ", + "ぁ", + "ぇ", + "ぺ", + "ゞ", + "ぢ", + "ぉ", + "ぅ", + "ゐ", + "ゝ", + "ゑ", + "゛", + "゜", + "ゎ", + "ゔ", + "゚", + "ゟ", + "゙", + "ゕ", + "ゖ", + ], + "Portuguese": [ + "a", + "e", + "o", + "s", + "i", + "r", + "d", + "n", + "t", + "m", + "u", + "c", + "l", + "p", + "g", + "v", + "b", + "f", + "h", + "ã", + "q", + "é", + "ç", + "á", + "z", + "í", + ], + "Swedish": [ + "e", + "a", + "n", + "r", + "t", + "s", + "i", + "l", + "d", + "o", + "m", + "k", + "g", + "v", + "h", + "f", + "u", + "p", + "ä", + "c", + "b", + "ö", + "å", + "y", + "j", + "x", + ], + "Chinese": [ + "的", + "一", + "是", + "不", + "了", + "在", + "人", + "有", + "我", + "他", + "这", + "个", + "们", + "中", + "来", + "上", + "大", + "为", + "和", + "国", + "地", + "到", + "以", + "说", + "时", + "要", + "就", + "出", + "会", + "可", + "也", + "你", + "对", + "生", + "能", + "而", + "子", + "那", + "得", + "于", + "着", + "下", + "自", + "之", + "年", + "过", + "发", + "后", + "作", + "里", + "用", + "道", + "行", + "所", + "然", + "家", + "种", + "事", + "成", + "方", + "多", + "经", + "么", + "去", + "法", + "学", + "如", + "都", + "同", + "现", + "当", + "没", + "动", + "面", + "起", + "看", + "定", + "天", + "分", + "还", + "进", + "好", + "小", + "部", + "其", + "些", + "主", + "样", + "理", + "心", + "她", + "本", + "前", + "开", + "但", + "因", + "只", + "从", + "想", + "实", + ], + "Ukrainian": [ + "о", + "а", + "н", + "і", + "и", + "р", + "в", + "т", + "е", + "с", + "к", + "л", + "у", + "д", + "м", + "п", + "з", + "я", + "ь", + "б", + "г", + "й", + "ч", + "х", + "ц", + "ї", + ], + "Norwegian": [ + "e", + "r", + "n", + "t", + "a", + "s", + "i", + "o", + "l", + "d", + "g", + "k", + "m", + "v", + "f", + "p", + "u", + "b", + "h", + "å", + "y", + "j", + "ø", + "c", + "æ", + "w", + ], + "Finnish": [ + "a", + "i", + "n", + "t", + "e", + "s", + "l", + "o", + "u", + "k", + "ä", + "m", + "r", + "v", + "j", + "h", + "p", + "y", + "d", + "ö", + "g", + "c", + "b", + "f", + "w", + "z", + ], + "Vietnamese": [ + "n", + "h", + "t", + "i", + "c", + "g", + "a", + "o", + "u", + "m", + "l", + "r", + "à", + "đ", + "s", + "e", + "v", + "p", + "b", + "y", + "ư", + "d", + "á", + "k", + "ộ", + "ế", + ], + "Czech": [ + "o", + "e", + "a", + "n", + "t", + "s", + "i", + "l", + "v", + "r", + "k", + "d", + "u", + "m", + "p", + "í", + "c", + "h", + "z", + "á", + "y", + "j", + "b", + "ě", + "é", + "ř", + ], + "Hungarian": [ + "e", + "a", + "t", + "l", + "s", + "n", + "k", + "r", + "i", + "o", + "z", + "á", + "é", + "g", + "m", + "b", + "y", + "v", + "d", + "h", + "u", + "p", + "j", + "ö", + "f", + "c", + ], + "Korean": [ + "이", + "다", + "에", + "의", + "는", + "로", + "하", + "을", + "가", + "고", + "지", + "서", + "한", + "은", + "기", + "으", + "년", + "대", + "사", + "시", + "를", + "리", + "도", + "인", + "스", + "일", + ], + "Indonesian": [ + "a", + "n", + "e", + "i", + "r", + "t", + "u", + "s", + "d", + "k", + "m", + "l", + "g", + "p", + "b", + "o", + "h", + "y", + "j", + "c", + "w", + "f", + "v", + "z", + "x", + "q", + ], + "Turkish": [ + "a", + "e", + "i", + "n", + "r", + "l", + "ı", + "k", + "d", + "t", + "s", + "m", + "y", + "u", + "o", + "b", + "ü", + "ş", + "v", + "g", + "z", + "h", + "c", + "p", + "ç", + "ğ", + ], + "Romanian": [ + "e", + "i", + "a", + "r", + "n", + "t", + "u", + "l", + "o", + "c", + "s", + "d", + "p", + "m", + "ă", + "f", + "v", + "î", + "g", + "b", + "ș", + "ț", + "z", + "h", + "â", + "j", + ], + "Farsi": [ + "ا", + "ی", + "ر", + "د", + "ن", + "ه", + "و", + "م", + "ت", + "ب", + "س", + "ل", + "ک", + "ش", + "ز", + "ف", + "گ", + "ع", + "خ", + "ق", + "ج", + "آ", + "پ", + "ح", + "ط", + "ص", + ], + "Arabic": [ + "ا", + "ل", + "ي", + "م", + "و", + "ن", + "ر", + "ت", + "ب", + "ة", + "ع", + "د", + "س", + "ف", + "ه", + "ك", + "ق", + "أ", + "ح", + "ج", + "ش", + "ط", + "ص", + "ى", + "خ", + "إ", + ], + "Danish": [ + "e", + "r", + "n", + "t", + "a", + "i", + "s", + "d", + "l", + "o", + "g", + "m", + "k", + "f", + "v", + "u", + "b", + "h", + "p", + "å", + "y", + "ø", + "æ", + "c", + "j", + "w", + ], + "Serbian": [ + "а", + "и", + "о", + "е", + "н", + "р", + "с", + "у", + "т", + "к", + "ј", + "в", + "д", + "м", + "п", + "л", + "г", + "з", + "б", + "a", + "i", + "e", + "o", + "n", + "ц", + "ш", + ], + "Lithuanian": [ + "i", + "a", + "s", + "o", + "r", + "e", + "t", + "n", + "u", + "k", + "m", + "l", + "p", + "v", + "d", + "j", + "g", + "ė", + "b", + "y", + "ų", + "š", + "ž", + "c", + "ą", + "į", + ], + "Slovene": [ + "e", + "a", + "i", + "o", + "n", + "r", + "s", + "l", + "t", + "j", + "v", + "k", + "d", + "p", + "m", + "u", + "z", + "b", + "g", + "h", + "č", + "c", + "š", + "ž", + "f", + "y", + ], + "Slovak": [ + "o", + "a", + "e", + "n", + "i", + "r", + "v", + "t", + "s", + "l", + "k", + "d", + "m", + "p", + "u", + "c", + "h", + "j", + "b", + "z", + "á", + "y", + "ý", + "í", + "č", + "é", + ], + "Hebrew": [ + "י", + "ו", + "ה", + "ל", + "ר", + "ב", + "ת", + "מ", + "א", + "ש", + "נ", + "ע", + "ם", + "ד", + "ק", + "ח", + "פ", + "ס", + "כ", + "ג", + "ט", + "צ", + "ן", + "ז", + "ך", + ], + "Bulgarian": [ + "а", + "и", + "о", + "е", + "н", + "т", + "р", + "с", + "в", + "л", + "к", + "д", + "п", + "м", + "з", + "г", + "я", + "ъ", + "у", + "б", + "ч", + "ц", + "й", + "ж", + "щ", + "х", + ], + "Croatian": [ + "a", + "i", + "o", + "e", + "n", + "r", + "j", + "s", + "t", + "u", + "k", + "l", + "v", + "d", + "m", + "p", + "g", + "z", + "b", + "c", + "č", + "h", + "š", + "ž", + "ć", + "f", + ], + "Hindi": [ + "क", + "र", + "स", + "न", + "त", + "म", + "ह", + "प", + "य", + "ल", + "व", + "ज", + "द", + "ग", + "ब", + "श", + "ट", + "अ", + "ए", + "थ", + "भ", + "ड", + "च", + "ध", + "ष", + "इ", + ], + "Estonian": [ + "a", + "i", + "e", + "s", + "t", + "l", + "u", + "n", + "o", + "k", + "r", + "d", + "m", + "v", + "g", + "p", + "j", + "h", + "ä", + "b", + "õ", + "ü", + "f", + "c", + "ö", + "y", + ], + "Thai": [ + "า", + "น", + "ร", + "อ", + "ก", + "เ", + "ง", + "ม", + "ย", + "ล", + "ว", + "ด", + "ท", + "ส", + "ต", + "ะ", + "ป", + "บ", + "ค", + "ห", + "แ", + "จ", + "พ", + "ช", + "ข", + "ใ", + ], + "Greek": [ + "α", + "τ", + "ο", + "ι", + "ε", + "ν", + "ρ", + "σ", + "κ", + "η", + "π", + "ς", + "υ", + "μ", + "λ", + "ί", + "ό", + "ά", + "γ", + "έ", + "δ", + "ή", + "ω", + "χ", + "θ", + "ύ", + ], + "Tamil": [ + "க", + "த", + "ப", + "ட", + "ர", + "ம", + "ல", + "ன", + "வ", + "ற", + "ய", + "ள", + "ச", + "ந", + "இ", + "ண", + "அ", + "ஆ", + "ழ", + "ங", + "எ", + "உ", + "ஒ", + "ஸ", + ], + "Kazakh": [ + "а", + "ы", + "е", + "н", + "т", + "р", + "л", + "і", + "д", + "с", + "м", + "қ", + "к", + "о", + "б", + "и", + "у", + "ғ", + "ж", + "ң", + "з", + "ш", + "й", + "п", + "г", + "ө", + ], +} + +LANGUAGE_SUPPORTED_COUNT: int = len(FREQUENCIES) diff --git a/.venv/lib/python3.12/site-packages/charset_normalizer/legacy.py b/.venv/lib/python3.12/site-packages/charset_normalizer/legacy.py new file mode 100644 index 0000000..360a310 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/charset_normalizer/legacy.py @@ -0,0 +1,80 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING, Any +from warnings import warn + +from .api import from_bytes +from .constant import CHARDET_CORRESPONDENCE, TOO_SMALL_SEQUENCE + +# TODO: remove this check when dropping Python 3.7 support +if TYPE_CHECKING: + from typing_extensions import TypedDict + + class ResultDict(TypedDict): + encoding: str | None + language: str + confidence: float | None + + +def detect( + byte_str: bytes, should_rename_legacy: bool = False, **kwargs: Any +) -> ResultDict: + """ + chardet legacy method + Detect the encoding of the given byte string. It should be mostly backward-compatible. + Encoding name will match Chardet own writing whenever possible. (Not on encoding name unsupported by it) + This function is deprecated and should be used to migrate your project easily, consult the documentation for + further information. Not planned for removal. + + :param byte_str: The byte sequence to examine. + :param should_rename_legacy: Should we rename legacy encodings + to their more modern equivalents? + """ + if len(kwargs): + warn( + f"charset-normalizer disregard arguments '{','.join(list(kwargs.keys()))}' in legacy function detect()" + ) + + if not isinstance(byte_str, (bytearray, bytes)): + raise TypeError( # pragma: nocover + f"Expected object of type bytes or bytearray, got: {type(byte_str)}" + ) + + if isinstance(byte_str, bytearray): + byte_str = bytes(byte_str) + + r = from_bytes(byte_str).best() + + encoding = r.encoding if r is not None else None + language = r.language if r is not None and r.language != "Unknown" else "" + confidence = 1.0 - r.chaos if r is not None else None + + # automatically lower confidence + # on small bytes samples. + # https://github.com/jawah/charset_normalizer/issues/391 + if ( + confidence is not None + and confidence >= 0.9 + and encoding + not in { + "utf_8", + "ascii", + } + and r.bom is False # type: ignore[union-attr] + and len(byte_str) < TOO_SMALL_SEQUENCE + ): + confidence -= 0.2 + + # Note: CharsetNormalizer does not return 'UTF-8-SIG' as the sig get stripped in the detection/normalization process + # but chardet does return 'utf-8-sig' and it is a valid codec name. + if r is not None and encoding == "utf_8" and r.bom: + encoding += "_sig" + + if should_rename_legacy is False and encoding in CHARDET_CORRESPONDENCE: + encoding = CHARDET_CORRESPONDENCE[encoding] + + return { + "encoding": encoding, + "language": language, + "confidence": confidence, + } diff --git a/.venv/lib/python3.12/site-packages/charset_normalizer/md.cpython-312-x86_64-linux-gnu.so b/.venv/lib/python3.12/site-packages/charset_normalizer/md.cpython-312-x86_64-linux-gnu.so new file mode 100755 index 0000000..857d747 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/charset_normalizer/md.cpython-312-x86_64-linux-gnu.so differ diff --git a/.venv/lib/python3.12/site-packages/charset_normalizer/md.py b/.venv/lib/python3.12/site-packages/charset_normalizer/md.py new file mode 100644 index 0000000..12ce024 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/charset_normalizer/md.py @@ -0,0 +1,635 @@ +from __future__ import annotations + +from functools import lru_cache +from logging import getLogger + +from .constant import ( + COMMON_SAFE_ASCII_CHARACTERS, + TRACE, + UNICODE_SECONDARY_RANGE_KEYWORD, +) +from .utils import ( + is_accentuated, + is_arabic, + is_arabic_isolated_form, + is_case_variable, + is_cjk, + is_emoticon, + is_hangul, + is_hiragana, + is_katakana, + is_latin, + is_punctuation, + is_separator, + is_symbol, + is_thai, + is_unprintable, + remove_accent, + unicode_range, + is_cjk_uncommon, +) + + +class MessDetectorPlugin: + """ + Base abstract class used for mess detection plugins. + All detectors MUST extend and implement given methods. + """ + + def eligible(self, character: str) -> bool: + """ + Determine if given character should be fed in. + """ + raise NotImplementedError # pragma: nocover + + def feed(self, character: str) -> None: + """ + The main routine to be executed upon character. + Insert the logic in witch the text would be considered chaotic. + """ + raise NotImplementedError # pragma: nocover + + def reset(self) -> None: # pragma: no cover + """ + Permit to reset the plugin to the initial state. + """ + raise NotImplementedError + + @property + def ratio(self) -> float: + """ + Compute the chaos ratio based on what your feed() has seen. + Must NOT be lower than 0.; No restriction gt 0. + """ + raise NotImplementedError # pragma: nocover + + +class TooManySymbolOrPunctuationPlugin(MessDetectorPlugin): + def __init__(self) -> None: + self._punctuation_count: int = 0 + self._symbol_count: int = 0 + self._character_count: int = 0 + + self._last_printable_char: str | None = None + self._frenzy_symbol_in_word: bool = False + + def eligible(self, character: str) -> bool: + return character.isprintable() + + def feed(self, character: str) -> None: + self._character_count += 1 + + if ( + character != self._last_printable_char + and character not in COMMON_SAFE_ASCII_CHARACTERS + ): + if is_punctuation(character): + self._punctuation_count += 1 + elif ( + character.isdigit() is False + and is_symbol(character) + and is_emoticon(character) is False + ): + self._symbol_count += 2 + + self._last_printable_char = character + + def reset(self) -> None: # Abstract + self._punctuation_count = 0 + self._character_count = 0 + self._symbol_count = 0 + + @property + def ratio(self) -> float: + if self._character_count == 0: + return 0.0 + + ratio_of_punctuation: float = ( + self._punctuation_count + self._symbol_count + ) / self._character_count + + return ratio_of_punctuation if ratio_of_punctuation >= 0.3 else 0.0 + + +class TooManyAccentuatedPlugin(MessDetectorPlugin): + def __init__(self) -> None: + self._character_count: int = 0 + self._accentuated_count: int = 0 + + def eligible(self, character: str) -> bool: + return character.isalpha() + + def feed(self, character: str) -> None: + self._character_count += 1 + + if is_accentuated(character): + self._accentuated_count += 1 + + def reset(self) -> None: # Abstract + self._character_count = 0 + self._accentuated_count = 0 + + @property + def ratio(self) -> float: + if self._character_count < 8: + return 0.0 + + ratio_of_accentuation: float = self._accentuated_count / self._character_count + return ratio_of_accentuation if ratio_of_accentuation >= 0.35 else 0.0 + + +class UnprintablePlugin(MessDetectorPlugin): + def __init__(self) -> None: + self._unprintable_count: int = 0 + self._character_count: int = 0 + + def eligible(self, character: str) -> bool: + return True + + def feed(self, character: str) -> None: + if is_unprintable(character): + self._unprintable_count += 1 + self._character_count += 1 + + def reset(self) -> None: # Abstract + self._unprintable_count = 0 + + @property + def ratio(self) -> float: + if self._character_count == 0: + return 0.0 + + return (self._unprintable_count * 8) / self._character_count + + +class SuspiciousDuplicateAccentPlugin(MessDetectorPlugin): + def __init__(self) -> None: + self._successive_count: int = 0 + self._character_count: int = 0 + + self._last_latin_character: str | None = None + + def eligible(self, character: str) -> bool: + return character.isalpha() and is_latin(character) + + def feed(self, character: str) -> None: + self._character_count += 1 + if ( + self._last_latin_character is not None + and is_accentuated(character) + and is_accentuated(self._last_latin_character) + ): + if character.isupper() and self._last_latin_character.isupper(): + self._successive_count += 1 + # Worse if its the same char duplicated with different accent. + if remove_accent(character) == remove_accent(self._last_latin_character): + self._successive_count += 1 + self._last_latin_character = character + + def reset(self) -> None: # Abstract + self._successive_count = 0 + self._character_count = 0 + self._last_latin_character = None + + @property + def ratio(self) -> float: + if self._character_count == 0: + return 0.0 + + return (self._successive_count * 2) / self._character_count + + +class SuspiciousRange(MessDetectorPlugin): + def __init__(self) -> None: + self._suspicious_successive_range_count: int = 0 + self._character_count: int = 0 + self._last_printable_seen: str | None = None + + def eligible(self, character: str) -> bool: + return character.isprintable() + + def feed(self, character: str) -> None: + self._character_count += 1 + + if ( + character.isspace() + or is_punctuation(character) + or character in COMMON_SAFE_ASCII_CHARACTERS + ): + self._last_printable_seen = None + return + + if self._last_printable_seen is None: + self._last_printable_seen = character + return + + unicode_range_a: str | None = unicode_range(self._last_printable_seen) + unicode_range_b: str | None = unicode_range(character) + + if is_suspiciously_successive_range(unicode_range_a, unicode_range_b): + self._suspicious_successive_range_count += 1 + + self._last_printable_seen = character + + def reset(self) -> None: # Abstract + self._character_count = 0 + self._suspicious_successive_range_count = 0 + self._last_printable_seen = None + + @property + def ratio(self) -> float: + if self._character_count <= 13: + return 0.0 + + ratio_of_suspicious_range_usage: float = ( + self._suspicious_successive_range_count * 2 + ) / self._character_count + + return ratio_of_suspicious_range_usage + + +class SuperWeirdWordPlugin(MessDetectorPlugin): + def __init__(self) -> None: + self._word_count: int = 0 + self._bad_word_count: int = 0 + self._foreign_long_count: int = 0 + + self._is_current_word_bad: bool = False + self._foreign_long_watch: bool = False + + self._character_count: int = 0 + self._bad_character_count: int = 0 + + self._buffer: str = "" + self._buffer_accent_count: int = 0 + self._buffer_glyph_count: int = 0 + + def eligible(self, character: str) -> bool: + return True + + def feed(self, character: str) -> None: + if character.isalpha(): + self._buffer += character + if is_accentuated(character): + self._buffer_accent_count += 1 + if ( + self._foreign_long_watch is False + and (is_latin(character) is False or is_accentuated(character)) + and is_cjk(character) is False + and is_hangul(character) is False + and is_katakana(character) is False + and is_hiragana(character) is False + and is_thai(character) is False + ): + self._foreign_long_watch = True + if ( + is_cjk(character) + or is_hangul(character) + or is_katakana(character) + or is_hiragana(character) + or is_thai(character) + ): + self._buffer_glyph_count += 1 + return + if not self._buffer: + return + if ( + character.isspace() or is_punctuation(character) or is_separator(character) + ) and self._buffer: + self._word_count += 1 + buffer_length: int = len(self._buffer) + + self._character_count += buffer_length + + if buffer_length >= 4: + if self._buffer_accent_count / buffer_length >= 0.5: + self._is_current_word_bad = True + # Word/Buffer ending with an upper case accentuated letter are so rare, + # that we will consider them all as suspicious. Same weight as foreign_long suspicious. + elif ( + is_accentuated(self._buffer[-1]) + and self._buffer[-1].isupper() + and all(_.isupper() for _ in self._buffer) is False + ): + self._foreign_long_count += 1 + self._is_current_word_bad = True + elif self._buffer_glyph_count == 1: + self._is_current_word_bad = True + self._foreign_long_count += 1 + if buffer_length >= 24 and self._foreign_long_watch: + camel_case_dst = [ + i + for c, i in zip(self._buffer, range(0, buffer_length)) + if c.isupper() + ] + probable_camel_cased: bool = False + + if camel_case_dst and (len(camel_case_dst) / buffer_length <= 0.3): + probable_camel_cased = True + + if not probable_camel_cased: + self._foreign_long_count += 1 + self._is_current_word_bad = True + + if self._is_current_word_bad: + self._bad_word_count += 1 + self._bad_character_count += len(self._buffer) + self._is_current_word_bad = False + + self._foreign_long_watch = False + self._buffer = "" + self._buffer_accent_count = 0 + self._buffer_glyph_count = 0 + elif ( + character not in {"<", ">", "-", "=", "~", "|", "_"} + and character.isdigit() is False + and is_symbol(character) + ): + self._is_current_word_bad = True + self._buffer += character + + def reset(self) -> None: # Abstract + self._buffer = "" + self._is_current_word_bad = False + self._foreign_long_watch = False + self._bad_word_count = 0 + self._word_count = 0 + self._character_count = 0 + self._bad_character_count = 0 + self._foreign_long_count = 0 + + @property + def ratio(self) -> float: + if self._word_count <= 10 and self._foreign_long_count == 0: + return 0.0 + + return self._bad_character_count / self._character_count + + +class CjkUncommonPlugin(MessDetectorPlugin): + """ + Detect messy CJK text that probably means nothing. + """ + + def __init__(self) -> None: + self._character_count: int = 0 + self._uncommon_count: int = 0 + + def eligible(self, character: str) -> bool: + return is_cjk(character) + + def feed(self, character: str) -> None: + self._character_count += 1 + + if is_cjk_uncommon(character): + self._uncommon_count += 1 + return + + def reset(self) -> None: # Abstract + self._character_count = 0 + self._uncommon_count = 0 + + @property + def ratio(self) -> float: + if self._character_count < 8: + return 0.0 + + uncommon_form_usage: float = self._uncommon_count / self._character_count + + # we can be pretty sure it's garbage when uncommon characters are widely + # used. otherwise it could just be traditional chinese for example. + return uncommon_form_usage / 10 if uncommon_form_usage > 0.5 else 0.0 + + +class ArchaicUpperLowerPlugin(MessDetectorPlugin): + def __init__(self) -> None: + self._buf: bool = False + + self._character_count_since_last_sep: int = 0 + + self._successive_upper_lower_count: int = 0 + self._successive_upper_lower_count_final: int = 0 + + self._character_count: int = 0 + + self._last_alpha_seen: str | None = None + self._current_ascii_only: bool = True + + def eligible(self, character: str) -> bool: + return True + + def feed(self, character: str) -> None: + is_concerned = character.isalpha() and is_case_variable(character) + chunk_sep = is_concerned is False + + if chunk_sep and self._character_count_since_last_sep > 0: + if ( + self._character_count_since_last_sep <= 64 + and character.isdigit() is False + and self._current_ascii_only is False + ): + self._successive_upper_lower_count_final += ( + self._successive_upper_lower_count + ) + + self._successive_upper_lower_count = 0 + self._character_count_since_last_sep = 0 + self._last_alpha_seen = None + self._buf = False + self._character_count += 1 + self._current_ascii_only = True + + return + + if self._current_ascii_only is True and character.isascii() is False: + self._current_ascii_only = False + + if self._last_alpha_seen is not None: + if (character.isupper() and self._last_alpha_seen.islower()) or ( + character.islower() and self._last_alpha_seen.isupper() + ): + if self._buf is True: + self._successive_upper_lower_count += 2 + self._buf = False + else: + self._buf = True + else: + self._buf = False + + self._character_count += 1 + self._character_count_since_last_sep += 1 + self._last_alpha_seen = character + + def reset(self) -> None: # Abstract + self._character_count = 0 + self._character_count_since_last_sep = 0 + self._successive_upper_lower_count = 0 + self._successive_upper_lower_count_final = 0 + self._last_alpha_seen = None + self._buf = False + self._current_ascii_only = True + + @property + def ratio(self) -> float: + if self._character_count == 0: + return 0.0 + + return self._successive_upper_lower_count_final / self._character_count + + +class ArabicIsolatedFormPlugin(MessDetectorPlugin): + def __init__(self) -> None: + self._character_count: int = 0 + self._isolated_form_count: int = 0 + + def reset(self) -> None: # Abstract + self._character_count = 0 + self._isolated_form_count = 0 + + def eligible(self, character: str) -> bool: + return is_arabic(character) + + def feed(self, character: str) -> None: + self._character_count += 1 + + if is_arabic_isolated_form(character): + self._isolated_form_count += 1 + + @property + def ratio(self) -> float: + if self._character_count < 8: + return 0.0 + + isolated_form_usage: float = self._isolated_form_count / self._character_count + + return isolated_form_usage + + +@lru_cache(maxsize=1024) +def is_suspiciously_successive_range( + unicode_range_a: str | None, unicode_range_b: str | None +) -> bool: + """ + Determine if two Unicode range seen next to each other can be considered as suspicious. + """ + if unicode_range_a is None or unicode_range_b is None: + return True + + if unicode_range_a == unicode_range_b: + return False + + if "Latin" in unicode_range_a and "Latin" in unicode_range_b: + return False + + if "Emoticons" in unicode_range_a or "Emoticons" in unicode_range_b: + return False + + # Latin characters can be accompanied with a combining diacritical mark + # eg. Vietnamese. + if ("Latin" in unicode_range_a or "Latin" in unicode_range_b) and ( + "Combining" in unicode_range_a or "Combining" in unicode_range_b + ): + return False + + keywords_range_a, keywords_range_b = ( + unicode_range_a.split(" "), + unicode_range_b.split(" "), + ) + + for el in keywords_range_a: + if el in UNICODE_SECONDARY_RANGE_KEYWORD: + continue + if el in keywords_range_b: + return False + + # Japanese Exception + range_a_jp_chars, range_b_jp_chars = ( + unicode_range_a + in ( + "Hiragana", + "Katakana", + ), + unicode_range_b in ("Hiragana", "Katakana"), + ) + if (range_a_jp_chars or range_b_jp_chars) and ( + "CJK" in unicode_range_a or "CJK" in unicode_range_b + ): + return False + if range_a_jp_chars and range_b_jp_chars: + return False + + if "Hangul" in unicode_range_a or "Hangul" in unicode_range_b: + if "CJK" in unicode_range_a or "CJK" in unicode_range_b: + return False + if unicode_range_a == "Basic Latin" or unicode_range_b == "Basic Latin": + return False + + # Chinese/Japanese use dedicated range for punctuation and/or separators. + if ("CJK" in unicode_range_a or "CJK" in unicode_range_b) or ( + unicode_range_a in ["Katakana", "Hiragana"] + and unicode_range_b in ["Katakana", "Hiragana"] + ): + if "Punctuation" in unicode_range_a or "Punctuation" in unicode_range_b: + return False + if "Forms" in unicode_range_a or "Forms" in unicode_range_b: + return False + if unicode_range_a == "Basic Latin" or unicode_range_b == "Basic Latin": + return False + + return True + + +@lru_cache(maxsize=2048) +def mess_ratio( + decoded_sequence: str, maximum_threshold: float = 0.2, debug: bool = False +) -> float: + """ + Compute a mess ratio given a decoded bytes sequence. The maximum threshold does stop the computation earlier. + """ + + detectors: list[MessDetectorPlugin] = [ + md_class() for md_class in MessDetectorPlugin.__subclasses__() + ] + + length: int = len(decoded_sequence) + 1 + + mean_mess_ratio: float = 0.0 + + if length < 512: + intermediary_mean_mess_ratio_calc: int = 32 + elif length <= 1024: + intermediary_mean_mess_ratio_calc = 64 + else: + intermediary_mean_mess_ratio_calc = 128 + + for character, index in zip(decoded_sequence + "\n", range(length)): + for detector in detectors: + if detector.eligible(character): + detector.feed(character) + + if ( + index > 0 and index % intermediary_mean_mess_ratio_calc == 0 + ) or index == length - 1: + mean_mess_ratio = sum(dt.ratio for dt in detectors) + + if mean_mess_ratio >= maximum_threshold: + break + + if debug: + logger = getLogger("charset_normalizer") + + logger.log( + TRACE, + "Mess-detector extended-analysis start. " + f"intermediary_mean_mess_ratio_calc={intermediary_mean_mess_ratio_calc} mean_mess_ratio={mean_mess_ratio} " + f"maximum_threshold={maximum_threshold}", + ) + + if len(decoded_sequence) > 16: + logger.log(TRACE, f"Starting with: {decoded_sequence[:16]}") + logger.log(TRACE, f"Ending with: {decoded_sequence[-16::]}") + + for dt in detectors: + logger.log(TRACE, f"{dt.__class__}: {dt.ratio}") + + return round(mean_mess_ratio, 3) diff --git a/.venv/lib/python3.12/site-packages/charset_normalizer/md__mypyc.cpython-312-x86_64-linux-gnu.so b/.venv/lib/python3.12/site-packages/charset_normalizer/md__mypyc.cpython-312-x86_64-linux-gnu.so new file mode 100755 index 0000000..d5091af Binary files /dev/null and b/.venv/lib/python3.12/site-packages/charset_normalizer/md__mypyc.cpython-312-x86_64-linux-gnu.so differ diff --git a/.venv/lib/python3.12/site-packages/charset_normalizer/models.py b/.venv/lib/python3.12/site-packages/charset_normalizer/models.py new file mode 100644 index 0000000..1042758 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/charset_normalizer/models.py @@ -0,0 +1,360 @@ +from __future__ import annotations + +from encodings.aliases import aliases +from hashlib import sha256 +from json import dumps +from re import sub +from typing import Any, Iterator, List, Tuple + +from .constant import RE_POSSIBLE_ENCODING_INDICATION, TOO_BIG_SEQUENCE +from .utils import iana_name, is_multi_byte_encoding, unicode_range + + +class CharsetMatch: + def __init__( + self, + payload: bytes, + guessed_encoding: str, + mean_mess_ratio: float, + has_sig_or_bom: bool, + languages: CoherenceMatches, + decoded_payload: str | None = None, + preemptive_declaration: str | None = None, + ): + self._payload: bytes = payload + + self._encoding: str = guessed_encoding + self._mean_mess_ratio: float = mean_mess_ratio + self._languages: CoherenceMatches = languages + self._has_sig_or_bom: bool = has_sig_or_bom + self._unicode_ranges: list[str] | None = None + + self._leaves: list[CharsetMatch] = [] + self._mean_coherence_ratio: float = 0.0 + + self._output_payload: bytes | None = None + self._output_encoding: str | None = None + + self._string: str | None = decoded_payload + + self._preemptive_declaration: str | None = preemptive_declaration + + def __eq__(self, other: object) -> bool: + if not isinstance(other, CharsetMatch): + if isinstance(other, str): + return iana_name(other) == self.encoding + return False + return self.encoding == other.encoding and self.fingerprint == other.fingerprint + + def __lt__(self, other: object) -> bool: + """ + Implemented to make sorted available upon CharsetMatches items. + """ + if not isinstance(other, CharsetMatch): + raise ValueError + + chaos_difference: float = abs(self.chaos - other.chaos) + coherence_difference: float = abs(self.coherence - other.coherence) + + # Below 1% difference --> Use Coherence + if chaos_difference < 0.01 and coherence_difference > 0.02: + return self.coherence > other.coherence + elif chaos_difference < 0.01 and coherence_difference <= 0.02: + # When having a difficult decision, use the result that decoded as many multi-byte as possible. + # preserve RAM usage! + if len(self._payload) >= TOO_BIG_SEQUENCE: + return self.chaos < other.chaos + return self.multi_byte_usage > other.multi_byte_usage + + return self.chaos < other.chaos + + @property + def multi_byte_usage(self) -> float: + return 1.0 - (len(str(self)) / len(self.raw)) + + def __str__(self) -> str: + # Lazy Str Loading + if self._string is None: + self._string = str(self._payload, self._encoding, "strict") + return self._string + + def __repr__(self) -> str: + return f"" + + def add_submatch(self, other: CharsetMatch) -> None: + if not isinstance(other, CharsetMatch) or other == self: + raise ValueError( + "Unable to add instance <{}> as a submatch of a CharsetMatch".format( + other.__class__ + ) + ) + + other._string = None # Unload RAM usage; dirty trick. + self._leaves.append(other) + + @property + def encoding(self) -> str: + return self._encoding + + @property + def encoding_aliases(self) -> list[str]: + """ + Encoding name are known by many name, using this could help when searching for IBM855 when it's listed as CP855. + """ + also_known_as: list[str] = [] + for u, p in aliases.items(): + if self.encoding == u: + also_known_as.append(p) + elif self.encoding == p: + also_known_as.append(u) + return also_known_as + + @property + def bom(self) -> bool: + return self._has_sig_or_bom + + @property + def byte_order_mark(self) -> bool: + return self._has_sig_or_bom + + @property + def languages(self) -> list[str]: + """ + Return the complete list of possible languages found in decoded sequence. + Usually not really useful. Returned list may be empty even if 'language' property return something != 'Unknown'. + """ + return [e[0] for e in self._languages] + + @property + def language(self) -> str: + """ + Most probable language found in decoded sequence. If none were detected or inferred, the property will return + "Unknown". + """ + if not self._languages: + # Trying to infer the language based on the given encoding + # Its either English or we should not pronounce ourselves in certain cases. + if "ascii" in self.could_be_from_charset: + return "English" + + # doing it there to avoid circular import + from charset_normalizer.cd import encoding_languages, mb_encoding_languages + + languages = ( + mb_encoding_languages(self.encoding) + if is_multi_byte_encoding(self.encoding) + else encoding_languages(self.encoding) + ) + + if len(languages) == 0 or "Latin Based" in languages: + return "Unknown" + + return languages[0] + + return self._languages[0][0] + + @property + def chaos(self) -> float: + return self._mean_mess_ratio + + @property + def coherence(self) -> float: + if not self._languages: + return 0.0 + return self._languages[0][1] + + @property + def percent_chaos(self) -> float: + return round(self.chaos * 100, ndigits=3) + + @property + def percent_coherence(self) -> float: + return round(self.coherence * 100, ndigits=3) + + @property + def raw(self) -> bytes: + """ + Original untouched bytes. + """ + return self._payload + + @property + def submatch(self) -> list[CharsetMatch]: + return self._leaves + + @property + def has_submatch(self) -> bool: + return len(self._leaves) > 0 + + @property + def alphabets(self) -> list[str]: + if self._unicode_ranges is not None: + return self._unicode_ranges + # list detected ranges + detected_ranges: list[str | None] = [unicode_range(char) for char in str(self)] + # filter and sort + self._unicode_ranges = sorted(list({r for r in detected_ranges if r})) + return self._unicode_ranges + + @property + def could_be_from_charset(self) -> list[str]: + """ + The complete list of encoding that output the exact SAME str result and therefore could be the originating + encoding. + This list does include the encoding available in property 'encoding'. + """ + return [self._encoding] + [m.encoding for m in self._leaves] + + def output(self, encoding: str = "utf_8") -> bytes: + """ + Method to get re-encoded bytes payload using given target encoding. Default to UTF-8. + Any errors will be simply ignored by the encoder NOT replaced. + """ + if self._output_encoding is None or self._output_encoding != encoding: + self._output_encoding = encoding + decoded_string = str(self) + if ( + self._preemptive_declaration is not None + and self._preemptive_declaration.lower() + not in ["utf-8", "utf8", "utf_8"] + ): + patched_header = sub( + RE_POSSIBLE_ENCODING_INDICATION, + lambda m: m.string[m.span()[0] : m.span()[1]].replace( + m.groups()[0], + iana_name(self._output_encoding).replace("_", "-"), # type: ignore[arg-type] + ), + decoded_string[:8192], + count=1, + ) + + decoded_string = patched_header + decoded_string[8192:] + + self._output_payload = decoded_string.encode(encoding, "replace") + + return self._output_payload # type: ignore + + @property + def fingerprint(self) -> str: + """ + Retrieve the unique SHA256 computed using the transformed (re-encoded) payload. Not the original one. + """ + return sha256(self.output()).hexdigest() + + +class CharsetMatches: + """ + Container with every CharsetMatch items ordered by default from most probable to the less one. + Act like a list(iterable) but does not implements all related methods. + """ + + def __init__(self, results: list[CharsetMatch] | None = None): + self._results: list[CharsetMatch] = sorted(results) if results else [] + + def __iter__(self) -> Iterator[CharsetMatch]: + yield from self._results + + def __getitem__(self, item: int | str) -> CharsetMatch: + """ + Retrieve a single item either by its position or encoding name (alias may be used here). + Raise KeyError upon invalid index or encoding not present in results. + """ + if isinstance(item, int): + return self._results[item] + if isinstance(item, str): + item = iana_name(item, False) + for result in self._results: + if item in result.could_be_from_charset: + return result + raise KeyError + + def __len__(self) -> int: + return len(self._results) + + def __bool__(self) -> bool: + return len(self._results) > 0 + + def append(self, item: CharsetMatch) -> None: + """ + Insert a single match. Will be inserted accordingly to preserve sort. + Can be inserted as a submatch. + """ + if not isinstance(item, CharsetMatch): + raise ValueError( + "Cannot append instance '{}' to CharsetMatches".format( + str(item.__class__) + ) + ) + # We should disable the submatch factoring when the input file is too heavy (conserve RAM usage) + if len(item.raw) < TOO_BIG_SEQUENCE: + for match in self._results: + if match.fingerprint == item.fingerprint and match.chaos == item.chaos: + match.add_submatch(item) + return + self._results.append(item) + self._results = sorted(self._results) + + def best(self) -> CharsetMatch | None: + """ + Simply return the first match. Strict equivalent to matches[0]. + """ + if not self._results: + return None + return self._results[0] + + def first(self) -> CharsetMatch | None: + """ + Redundant method, call the method best(). Kept for BC reasons. + """ + return self.best() + + +CoherenceMatch = Tuple[str, float] +CoherenceMatches = List[CoherenceMatch] + + +class CliDetectionResult: + def __init__( + self, + path: str, + encoding: str | None, + encoding_aliases: list[str], + alternative_encodings: list[str], + language: str, + alphabets: list[str], + has_sig_or_bom: bool, + chaos: float, + coherence: float, + unicode_path: str | None, + is_preferred: bool, + ): + self.path: str = path + self.unicode_path: str | None = unicode_path + self.encoding: str | None = encoding + self.encoding_aliases: list[str] = encoding_aliases + self.alternative_encodings: list[str] = alternative_encodings + self.language: str = language + self.alphabets: list[str] = alphabets + self.has_sig_or_bom: bool = has_sig_or_bom + self.chaos: float = chaos + self.coherence: float = coherence + self.is_preferred: bool = is_preferred + + @property + def __dict__(self) -> dict[str, Any]: # type: ignore + return { + "path": self.path, + "encoding": self.encoding, + "encoding_aliases": self.encoding_aliases, + "alternative_encodings": self.alternative_encodings, + "language": self.language, + "alphabets": self.alphabets, + "has_sig_or_bom": self.has_sig_or_bom, + "chaos": self.chaos, + "coherence": self.coherence, + "unicode_path": self.unicode_path, + "is_preferred": self.is_preferred, + } + + def to_json(self) -> str: + return dumps(self.__dict__, ensure_ascii=True, indent=4) diff --git a/.venv/lib/python3.12/site-packages/charset_normalizer/py.typed b/.venv/lib/python3.12/site-packages/charset_normalizer/py.typed new file mode 100644 index 0000000..e69de29 diff --git a/.venv/lib/python3.12/site-packages/charset_normalizer/utils.py b/.venv/lib/python3.12/site-packages/charset_normalizer/utils.py new file mode 100644 index 0000000..6bf0384 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/charset_normalizer/utils.py @@ -0,0 +1,414 @@ +from __future__ import annotations + +import importlib +import logging +import unicodedata +from codecs import IncrementalDecoder +from encodings.aliases import aliases +from functools import lru_cache +from re import findall +from typing import Generator + +from _multibytecodec import ( # type: ignore[import-not-found,import] + MultibyteIncrementalDecoder, +) + +from .constant import ( + ENCODING_MARKS, + IANA_SUPPORTED_SIMILAR, + RE_POSSIBLE_ENCODING_INDICATION, + UNICODE_RANGES_COMBINED, + UNICODE_SECONDARY_RANGE_KEYWORD, + UTF8_MAXIMAL_ALLOCATION, + COMMON_CJK_CHARACTERS, +) + + +@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) +def is_accentuated(character: str) -> bool: + try: + description: str = unicodedata.name(character) + except ValueError: # Defensive: unicode database outdated? + return False + return ( + "WITH GRAVE" in description + or "WITH ACUTE" in description + or "WITH CEDILLA" in description + or "WITH DIAERESIS" in description + or "WITH CIRCUMFLEX" in description + or "WITH TILDE" in description + or "WITH MACRON" in description + or "WITH RING ABOVE" in description + ) + + +@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) +def remove_accent(character: str) -> str: + decomposed: str = unicodedata.decomposition(character) + if not decomposed: + return character + + codes: list[str] = decomposed.split(" ") + + return chr(int(codes[0], 16)) + + +@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) +def unicode_range(character: str) -> str | None: + """ + Retrieve the Unicode range official name from a single character. + """ + character_ord: int = ord(character) + + for range_name, ord_range in UNICODE_RANGES_COMBINED.items(): + if character_ord in ord_range: + return range_name + + return None + + +@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) +def is_latin(character: str) -> bool: + try: + description: str = unicodedata.name(character) + except ValueError: # Defensive: unicode database outdated? + return False + return "LATIN" in description + + +@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) +def is_punctuation(character: str) -> bool: + character_category: str = unicodedata.category(character) + + if "P" in character_category: + return True + + character_range: str | None = unicode_range(character) + + if character_range is None: + return False + + return "Punctuation" in character_range + + +@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) +def is_symbol(character: str) -> bool: + character_category: str = unicodedata.category(character) + + if "S" in character_category or "N" in character_category: + return True + + character_range: str | None = unicode_range(character) + + if character_range is None: + return False + + return "Forms" in character_range and character_category != "Lo" + + +@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) +def is_emoticon(character: str) -> bool: + character_range: str | None = unicode_range(character) + + if character_range is None: + return False + + return "Emoticons" in character_range or "Pictographs" in character_range + + +@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) +def is_separator(character: str) -> bool: + if character.isspace() or character in {"|", "+", "<", ">"}: + return True + + character_category: str = unicodedata.category(character) + + return "Z" in character_category or character_category in {"Po", "Pd", "Pc"} + + +@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) +def is_case_variable(character: str) -> bool: + return character.islower() != character.isupper() + + +@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) +def is_cjk(character: str) -> bool: + try: + character_name = unicodedata.name(character) + except ValueError: # Defensive: unicode database outdated? + return False + + return "CJK" in character_name + + +@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) +def is_hiragana(character: str) -> bool: + try: + character_name = unicodedata.name(character) + except ValueError: # Defensive: unicode database outdated? + return False + + return "HIRAGANA" in character_name + + +@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) +def is_katakana(character: str) -> bool: + try: + character_name = unicodedata.name(character) + except ValueError: # Defensive: unicode database outdated? + return False + + return "KATAKANA" in character_name + + +@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) +def is_hangul(character: str) -> bool: + try: + character_name = unicodedata.name(character) + except ValueError: # Defensive: unicode database outdated? + return False + + return "HANGUL" in character_name + + +@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) +def is_thai(character: str) -> bool: + try: + character_name = unicodedata.name(character) + except ValueError: # Defensive: unicode database outdated? + return False + + return "THAI" in character_name + + +@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) +def is_arabic(character: str) -> bool: + try: + character_name = unicodedata.name(character) + except ValueError: # Defensive: unicode database outdated? + return False + + return "ARABIC" in character_name + + +@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) +def is_arabic_isolated_form(character: str) -> bool: + try: + character_name = unicodedata.name(character) + except ValueError: # Defensive: unicode database outdated? + return False + + return "ARABIC" in character_name and "ISOLATED FORM" in character_name + + +@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) +def is_cjk_uncommon(character: str) -> bool: + return character not in COMMON_CJK_CHARACTERS + + +@lru_cache(maxsize=len(UNICODE_RANGES_COMBINED)) +def is_unicode_range_secondary(range_name: str) -> bool: + return any(keyword in range_name for keyword in UNICODE_SECONDARY_RANGE_KEYWORD) + + +@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) +def is_unprintable(character: str) -> bool: + return ( + character.isspace() is False # includes \n \t \r \v + and character.isprintable() is False + and character != "\x1a" # Why? Its the ASCII substitute character. + and character != "\ufeff" # bug discovered in Python, + # Zero Width No-Break Space located in Arabic Presentation Forms-B, Unicode 1.1 not acknowledged as space. + ) + + +def any_specified_encoding(sequence: bytes, search_zone: int = 8192) -> str | None: + """ + Extract using ASCII-only decoder any specified encoding in the first n-bytes. + """ + if not isinstance(sequence, bytes): + raise TypeError + + seq_len: int = len(sequence) + + results: list[str] = findall( + RE_POSSIBLE_ENCODING_INDICATION, + sequence[: min(seq_len, search_zone)].decode("ascii", errors="ignore"), + ) + + if len(results) == 0: + return None + + for specified_encoding in results: + specified_encoding = specified_encoding.lower().replace("-", "_") + + encoding_alias: str + encoding_iana: str + + for encoding_alias, encoding_iana in aliases.items(): + if encoding_alias == specified_encoding: + return encoding_iana + if encoding_iana == specified_encoding: + return encoding_iana + + return None + + +@lru_cache(maxsize=128) +def is_multi_byte_encoding(name: str) -> bool: + """ + Verify is a specific encoding is a multi byte one based on it IANA name + """ + return name in { + "utf_8", + "utf_8_sig", + "utf_16", + "utf_16_be", + "utf_16_le", + "utf_32", + "utf_32_le", + "utf_32_be", + "utf_7", + } or issubclass( + importlib.import_module(f"encodings.{name}").IncrementalDecoder, + MultibyteIncrementalDecoder, + ) + + +def identify_sig_or_bom(sequence: bytes) -> tuple[str | None, bytes]: + """ + Identify and extract SIG/BOM in given sequence. + """ + + for iana_encoding in ENCODING_MARKS: + marks: bytes | list[bytes] = ENCODING_MARKS[iana_encoding] + + if isinstance(marks, bytes): + marks = [marks] + + for mark in marks: + if sequence.startswith(mark): + return iana_encoding, mark + + return None, b"" + + +def should_strip_sig_or_bom(iana_encoding: str) -> bool: + return iana_encoding not in {"utf_16", "utf_32"} + + +def iana_name(cp_name: str, strict: bool = True) -> str: + """Returns the Python normalized encoding name (Not the IANA official name).""" + cp_name = cp_name.lower().replace("-", "_") + + encoding_alias: str + encoding_iana: str + + for encoding_alias, encoding_iana in aliases.items(): + if cp_name in [encoding_alias, encoding_iana]: + return encoding_iana + + if strict: + raise ValueError(f"Unable to retrieve IANA for '{cp_name}'") + + return cp_name + + +def cp_similarity(iana_name_a: str, iana_name_b: str) -> float: + if is_multi_byte_encoding(iana_name_a) or is_multi_byte_encoding(iana_name_b): + return 0.0 + + decoder_a = importlib.import_module(f"encodings.{iana_name_a}").IncrementalDecoder + decoder_b = importlib.import_module(f"encodings.{iana_name_b}").IncrementalDecoder + + id_a: IncrementalDecoder = decoder_a(errors="ignore") + id_b: IncrementalDecoder = decoder_b(errors="ignore") + + character_match_count: int = 0 + + for i in range(255): + to_be_decoded: bytes = bytes([i]) + if id_a.decode(to_be_decoded) == id_b.decode(to_be_decoded): + character_match_count += 1 + + return character_match_count / 254 + + +def is_cp_similar(iana_name_a: str, iana_name_b: str) -> bool: + """ + Determine if two code page are at least 80% similar. IANA_SUPPORTED_SIMILAR dict was generated using + the function cp_similarity. + """ + return ( + iana_name_a in IANA_SUPPORTED_SIMILAR + and iana_name_b in IANA_SUPPORTED_SIMILAR[iana_name_a] + ) + + +def set_logging_handler( + name: str = "charset_normalizer", + level: int = logging.INFO, + format_string: str = "%(asctime)s | %(levelname)s | %(message)s", +) -> None: + logger = logging.getLogger(name) + logger.setLevel(level) + + handler = logging.StreamHandler() + handler.setFormatter(logging.Formatter(format_string)) + logger.addHandler(handler) + + +def cut_sequence_chunks( + sequences: bytes, + encoding_iana: str, + offsets: range, + chunk_size: int, + bom_or_sig_available: bool, + strip_sig_or_bom: bool, + sig_payload: bytes, + is_multi_byte_decoder: bool, + decoded_payload: str | None = None, +) -> Generator[str, None, None]: + if decoded_payload and is_multi_byte_decoder is False: + for i in offsets: + chunk = decoded_payload[i : i + chunk_size] + if not chunk: + break + yield chunk + else: + for i in offsets: + chunk_end = i + chunk_size + if chunk_end > len(sequences) + 8: + continue + + cut_sequence = sequences[i : i + chunk_size] + + if bom_or_sig_available and strip_sig_or_bom is False: + cut_sequence = sig_payload + cut_sequence + + chunk = cut_sequence.decode( + encoding_iana, + errors="ignore" if is_multi_byte_decoder else "strict", + ) + + # multi-byte bad cutting detector and adjustment + # not the cleanest way to perform that fix but clever enough for now. + if is_multi_byte_decoder and i > 0: + chunk_partial_size_chk: int = min(chunk_size, 16) + + if ( + decoded_payload + and chunk[:chunk_partial_size_chk] not in decoded_payload + ): + for j in range(i, i - 4, -1): + cut_sequence = sequences[j:chunk_end] + + if bom_or_sig_available and strip_sig_or_bom is False: + cut_sequence = sig_payload + cut_sequence + + chunk = cut_sequence.decode(encoding_iana, errors="ignore") + + if chunk[:chunk_partial_size_chk] in decoded_payload: + break + + yield chunk diff --git a/.venv/lib/python3.12/site-packages/charset_normalizer/version.py b/.venv/lib/python3.12/site-packages/charset_normalizer/version.py new file mode 100644 index 0000000..71350e5 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/charset_normalizer/version.py @@ -0,0 +1,8 @@ +""" +Expose version +""" + +from __future__ import annotations + +__version__ = "3.4.3" +VERSION = __version__.split(".") diff --git a/.venv/lib/python3.12/site-packages/idna-3.10.dist-info/INSTALLER b/.venv/lib/python3.12/site-packages/idna-3.10.dist-info/INSTALLER new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/.venv/lib/python3.12/site-packages/idna-3.10.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/.venv/lib/python3.12/site-packages/idna-3.10.dist-info/LICENSE.md b/.venv/lib/python3.12/site-packages/idna-3.10.dist-info/LICENSE.md new file mode 100644 index 0000000..19b6b45 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/idna-3.10.dist-info/LICENSE.md @@ -0,0 +1,31 @@ +BSD 3-Clause License + +Copyright (c) 2013-2024, Kim Davies and contributors. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/.venv/lib/python3.12/site-packages/idna-3.10.dist-info/METADATA b/.venv/lib/python3.12/site-packages/idna-3.10.dist-info/METADATA new file mode 100644 index 0000000..c42623e --- /dev/null +++ b/.venv/lib/python3.12/site-packages/idna-3.10.dist-info/METADATA @@ -0,0 +1,250 @@ +Metadata-Version: 2.1 +Name: idna +Version: 3.10 +Summary: Internationalized Domain Names in Applications (IDNA) +Author-email: Kim Davies +Requires-Python: >=3.6 +Description-Content-Type: text/x-rst +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: Intended Audience :: System Administrators +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: 3.13 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Topic :: Internet :: Name Service (DNS) +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Topic :: Utilities +Requires-Dist: ruff >= 0.6.2 ; extra == "all" +Requires-Dist: mypy >= 1.11.2 ; extra == "all" +Requires-Dist: pytest >= 8.3.2 ; extra == "all" +Requires-Dist: flake8 >= 7.1.1 ; extra == "all" +Project-URL: Changelog, https://github.com/kjd/idna/blob/master/HISTORY.rst +Project-URL: Issue tracker, https://github.com/kjd/idna/issues +Project-URL: Source, https://github.com/kjd/idna +Provides-Extra: all + +Internationalized Domain Names in Applications (IDNA) +===================================================== + +Support for the Internationalized Domain Names in +Applications (IDNA) protocol as specified in `RFC 5891 +`_. This is the latest version of +the protocol and is sometimes referred to as “IDNA 2008”. + +This library also provides support for Unicode Technical +Standard 46, `Unicode IDNA Compatibility Processing +`_. + +This acts as a suitable replacement for the “encodings.idna” +module that comes with the Python standard library, but which +only supports the older superseded IDNA specification (`RFC 3490 +`_). + +Basic functions are simply executed: + +.. code-block:: pycon + + >>> import idna + >>> idna.encode('ドメイン.テスト') + b'xn--eckwd4c7c.xn--zckzah' + >>> print(idna.decode('xn--eckwd4c7c.xn--zckzah')) + ドメイン.テスト + + +Installation +------------ + +This package is available for installation from PyPI: + +.. code-block:: bash + + $ python3 -m pip install idna + + +Usage +----- + +For typical usage, the ``encode`` and ``decode`` functions will take a +domain name argument and perform a conversion to A-labels or U-labels +respectively. + +.. code-block:: pycon + + >>> import idna + >>> idna.encode('ドメイン.テスト') + b'xn--eckwd4c7c.xn--zckzah' + >>> print(idna.decode('xn--eckwd4c7c.xn--zckzah')) + ドメイン.テスト + +You may use the codec encoding and decoding methods using the +``idna.codec`` module: + +.. code-block:: pycon + + >>> import idna.codec + >>> print('домен.испытание'.encode('idna2008')) + b'xn--d1acufc.xn--80akhbyknj4f' + >>> print(b'xn--d1acufc.xn--80akhbyknj4f'.decode('idna2008')) + домен.испытание + +Conversions can be applied at a per-label basis using the ``ulabel`` or +``alabel`` functions if necessary: + +.. code-block:: pycon + + >>> idna.alabel('测试') + b'xn--0zwm56d' + +Compatibility Mapping (UTS #46) ++++++++++++++++++++++++++++++++ + +As described in `RFC 5895 `_, the +IDNA specification does not normalize input from different potential +ways a user may input a domain name. This functionality, known as +a “mapping”, is considered by the specification to be a local +user-interface issue distinct from IDNA conversion functionality. + +This library provides one such mapping that was developed by the +Unicode Consortium. Known as `Unicode IDNA Compatibility Processing +`_, it provides for both a regular +mapping for typical applications, as well as a transitional mapping to +help migrate from older IDNA 2003 applications. Strings are +preprocessed according to Section 4.4 “Preprocessing for IDNA2008” +prior to the IDNA operations. + +For example, “Königsgäßchen” is not a permissible label as *LATIN +CAPITAL LETTER K* is not allowed (nor are capital letters in general). +UTS 46 will convert this into lower case prior to applying the IDNA +conversion. + +.. code-block:: pycon + + >>> import idna + >>> idna.encode('Königsgäßchen') + ... + idna.core.InvalidCodepoint: Codepoint U+004B at position 1 of 'Königsgäßchen' not allowed + >>> idna.encode('Königsgäßchen', uts46=True) + b'xn--knigsgchen-b4a3dun' + >>> print(idna.decode('xn--knigsgchen-b4a3dun')) + königsgäßchen + +Transitional processing provides conversions to help transition from +the older 2003 standard to the current standard. For example, in the +original IDNA specification, the *LATIN SMALL LETTER SHARP S* (ß) was +converted into two *LATIN SMALL LETTER S* (ss), whereas in the current +IDNA specification this conversion is not performed. + +.. code-block:: pycon + + >>> idna.encode('Königsgäßchen', uts46=True, transitional=True) + 'xn--knigsgsschen-lcb0w' + +Implementers should use transitional processing with caution, only in +rare cases where conversion from legacy labels to current labels must be +performed (i.e. IDNA implementations that pre-date 2008). For typical +applications that just need to convert labels, transitional processing +is unlikely to be beneficial and could produce unexpected incompatible +results. + +``encodings.idna`` Compatibility +++++++++++++++++++++++++++++++++ + +Function calls from the Python built-in ``encodings.idna`` module are +mapped to their IDNA 2008 equivalents using the ``idna.compat`` module. +Simply substitute the ``import`` clause in your code to refer to the new +module name. + +Exceptions +---------- + +All errors raised during the conversion following the specification +should raise an exception derived from the ``idna.IDNAError`` base +class. + +More specific exceptions that may be generated as ``idna.IDNABidiError`` +when the error reflects an illegal combination of left-to-right and +right-to-left characters in a label; ``idna.InvalidCodepoint`` when +a specific codepoint is an illegal character in an IDN label (i.e. +INVALID); and ``idna.InvalidCodepointContext`` when the codepoint is +illegal based on its positional context (i.e. it is CONTEXTO or CONTEXTJ +but the contextual requirements are not satisfied.) + +Building and Diagnostics +------------------------ + +The IDNA and UTS 46 functionality relies upon pre-calculated lookup +tables for performance. These tables are derived from computing against +eligibility criteria in the respective standards. These tables are +computed using the command-line script ``tools/idna-data``. + +This tool will fetch relevant codepoint data from the Unicode repository +and perform the required calculations to identify eligibility. There are +three main modes: + +* ``idna-data make-libdata``. Generates ``idnadata.py`` and + ``uts46data.py``, the pre-calculated lookup tables used for IDNA and + UTS 46 conversions. Implementers who wish to track this library against + a different Unicode version may use this tool to manually generate a + different version of the ``idnadata.py`` and ``uts46data.py`` files. + +* ``idna-data make-table``. Generate a table of the IDNA disposition + (e.g. PVALID, CONTEXTJ, CONTEXTO) in the format found in Appendix + B.1 of RFC 5892 and the pre-computed tables published by `IANA + `_. + +* ``idna-data U+0061``. Prints debugging output on the various + properties associated with an individual Unicode codepoint (in this + case, U+0061), that are used to assess the IDNA and UTS 46 status of a + codepoint. This is helpful in debugging or analysis. + +The tool accepts a number of arguments, described using ``idna-data +-h``. Most notably, the ``--version`` argument allows the specification +of the version of Unicode to be used in computing the table data. For +example, ``idna-data --version 9.0.0 make-libdata`` will generate +library data against Unicode 9.0.0. + + +Additional Notes +---------------- + +* **Packages**. The latest tagged release version is published in the + `Python Package Index `_. + +* **Version support**. This library supports Python 3.6 and higher. + As this library serves as a low-level toolkit for a variety of + applications, many of which strive for broad compatibility with older + Python versions, there is no rush to remove older interpreter support. + Removing support for older versions should be well justified in that the + maintenance burden has become too high. + +* **Python 2**. Python 2 is supported by version 2.x of this library. + Use "idna<3" in your requirements file if you need this library for + a Python 2 application. Be advised that these versions are no longer + actively developed. + +* **Testing**. The library has a test suite based on each rule of the + IDNA specification, as well as tests that are provided as part of the + Unicode Technical Standard 46, `Unicode IDNA Compatibility Processing + `_. + +* **Emoji**. It is an occasional request to support emoji domains in + this library. Encoding of symbols like emoji is expressly prohibited by + the technical standard IDNA 2008 and emoji domains are broadly phased + out across the domain industry due to associated security risks. For + now, applications that need to support these non-compliant labels + may wish to consider trying the encode/decode operation in this library + first, and then falling back to using `encodings.idna`. See `the Github + project `_ for more discussion. + diff --git a/.venv/lib/python3.12/site-packages/idna-3.10.dist-info/RECORD b/.venv/lib/python3.12/site-packages/idna-3.10.dist-info/RECORD new file mode 100644 index 0000000..9cfce7f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/idna-3.10.dist-info/RECORD @@ -0,0 +1,22 @@ +idna-3.10.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +idna-3.10.dist-info/LICENSE.md,sha256=pZ8LDvNjWHQQmkRhykT_enDVBpboFHZ7-vch1Mmw2w8,1541 +idna-3.10.dist-info/METADATA,sha256=URR5ZyDfQ1PCEGhkYoojqfi2Ra0tau2--lhwG4XSfjI,10158 +idna-3.10.dist-info/RECORD,, +idna-3.10.dist-info/WHEEL,sha256=EZbGkh7Ie4PoZfRQ8I0ZuP9VklN_TvcZ6DSE5Uar4z4,81 +idna/__init__.py,sha256=MPqNDLZbXqGaNdXxAFhiqFPKEQXju2jNQhCey6-5eJM,868 +idna/__pycache__/__init__.cpython-312.pyc,, +idna/__pycache__/codec.cpython-312.pyc,, +idna/__pycache__/compat.cpython-312.pyc,, +idna/__pycache__/core.cpython-312.pyc,, +idna/__pycache__/idnadata.cpython-312.pyc,, +idna/__pycache__/intranges.cpython-312.pyc,, +idna/__pycache__/package_data.cpython-312.pyc,, +idna/__pycache__/uts46data.cpython-312.pyc,, +idna/codec.py,sha256=PEew3ItwzjW4hymbasnty2N2OXvNcgHB-JjrBuxHPYY,3422 +idna/compat.py,sha256=RzLy6QQCdl9784aFhb2EX9EKGCJjg0P3PilGdeXXcx8,316 +idna/core.py,sha256=YJYyAMnwiQEPjVC4-Fqu_p4CJ6yKKuDGmppBNQNQpFs,13239 +idna/idnadata.py,sha256=W30GcIGvtOWYwAjZj4ZjuouUutC6ffgNuyjJy7fZ-lo,78306 +idna/intranges.py,sha256=amUtkdhYcQG8Zr-CoMM_kVRacxkivC1WgxN1b63KKdU,1898 +idna/package_data.py,sha256=q59S3OXsc5VI8j6vSD0sGBMyk6zZ4vWFREE88yCJYKs,21 +idna/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +idna/uts46data.py,sha256=rt90K9J40gUSwppDPCrhjgi5AA6pWM65dEGRSf6rIhM,239289 diff --git a/.venv/lib/python3.12/site-packages/idna-3.10.dist-info/WHEEL b/.venv/lib/python3.12/site-packages/idna-3.10.dist-info/WHEEL new file mode 100644 index 0000000..3b5e64b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/idna-3.10.dist-info/WHEEL @@ -0,0 +1,4 @@ +Wheel-Version: 1.0 +Generator: flit 3.9.0 +Root-Is-Purelib: true +Tag: py3-none-any diff --git a/.venv/lib/python3.12/site-packages/idna/__init__.py b/.venv/lib/python3.12/site-packages/idna/__init__.py new file mode 100644 index 0000000..cfdc030 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/idna/__init__.py @@ -0,0 +1,45 @@ +from .core import ( + IDNABidiError, + IDNAError, + InvalidCodepoint, + InvalidCodepointContext, + alabel, + check_bidi, + check_hyphen_ok, + check_initial_combiner, + check_label, + check_nfc, + decode, + encode, + ulabel, + uts46_remap, + valid_contextj, + valid_contexto, + valid_label_length, + valid_string_length, +) +from .intranges import intranges_contain +from .package_data import __version__ + +__all__ = [ + "__version__", + "IDNABidiError", + "IDNAError", + "InvalidCodepoint", + "InvalidCodepointContext", + "alabel", + "check_bidi", + "check_hyphen_ok", + "check_initial_combiner", + "check_label", + "check_nfc", + "decode", + "encode", + "intranges_contain", + "ulabel", + "uts46_remap", + "valid_contextj", + "valid_contexto", + "valid_label_length", + "valid_string_length", +] diff --git a/.venv/lib/python3.12/site-packages/idna/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/idna/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..e758ac9 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/idna/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/idna/__pycache__/codec.cpython-312.pyc b/.venv/lib/python3.12/site-packages/idna/__pycache__/codec.cpython-312.pyc new file mode 100644 index 0000000..07424bf Binary files /dev/null and b/.venv/lib/python3.12/site-packages/idna/__pycache__/codec.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/idna/__pycache__/compat.cpython-312.pyc b/.venv/lib/python3.12/site-packages/idna/__pycache__/compat.cpython-312.pyc new file mode 100644 index 0000000..dcf5841 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/idna/__pycache__/compat.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/idna/__pycache__/core.cpython-312.pyc b/.venv/lib/python3.12/site-packages/idna/__pycache__/core.cpython-312.pyc new file mode 100644 index 0000000..b9e2aeb Binary files /dev/null and b/.venv/lib/python3.12/site-packages/idna/__pycache__/core.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/idna/__pycache__/idnadata.cpython-312.pyc b/.venv/lib/python3.12/site-packages/idna/__pycache__/idnadata.cpython-312.pyc new file mode 100644 index 0000000..f73c672 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/idna/__pycache__/idnadata.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/idna/__pycache__/intranges.cpython-312.pyc b/.venv/lib/python3.12/site-packages/idna/__pycache__/intranges.cpython-312.pyc new file mode 100644 index 0000000..b940262 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/idna/__pycache__/intranges.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/idna/__pycache__/package_data.cpython-312.pyc b/.venv/lib/python3.12/site-packages/idna/__pycache__/package_data.cpython-312.pyc new file mode 100644 index 0000000..3fe24f2 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/idna/__pycache__/package_data.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/idna/__pycache__/uts46data.cpython-312.pyc b/.venv/lib/python3.12/site-packages/idna/__pycache__/uts46data.cpython-312.pyc new file mode 100644 index 0000000..90b7be6 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/idna/__pycache__/uts46data.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/idna/codec.py b/.venv/lib/python3.12/site-packages/idna/codec.py new file mode 100644 index 0000000..913abfd --- /dev/null +++ b/.venv/lib/python3.12/site-packages/idna/codec.py @@ -0,0 +1,122 @@ +import codecs +import re +from typing import Any, Optional, Tuple + +from .core import IDNAError, alabel, decode, encode, ulabel + +_unicode_dots_re = re.compile("[\u002e\u3002\uff0e\uff61]") + + +class Codec(codecs.Codec): + def encode(self, data: str, errors: str = "strict") -> Tuple[bytes, int]: + if errors != "strict": + raise IDNAError('Unsupported error handling "{}"'.format(errors)) + + if not data: + return b"", 0 + + return encode(data), len(data) + + def decode(self, data: bytes, errors: str = "strict") -> Tuple[str, int]: + if errors != "strict": + raise IDNAError('Unsupported error handling "{}"'.format(errors)) + + if not data: + return "", 0 + + return decode(data), len(data) + + +class IncrementalEncoder(codecs.BufferedIncrementalEncoder): + def _buffer_encode(self, data: str, errors: str, final: bool) -> Tuple[bytes, int]: + if errors != "strict": + raise IDNAError('Unsupported error handling "{}"'.format(errors)) + + if not data: + return b"", 0 + + labels = _unicode_dots_re.split(data) + trailing_dot = b"" + if labels: + if not labels[-1]: + trailing_dot = b"." + del labels[-1] + elif not final: + # Keep potentially unfinished label until the next call + del labels[-1] + if labels: + trailing_dot = b"." + + result = [] + size = 0 + for label in labels: + result.append(alabel(label)) + if size: + size += 1 + size += len(label) + + # Join with U+002E + result_bytes = b".".join(result) + trailing_dot + size += len(trailing_dot) + return result_bytes, size + + +class IncrementalDecoder(codecs.BufferedIncrementalDecoder): + def _buffer_decode(self, data: Any, errors: str, final: bool) -> Tuple[str, int]: + if errors != "strict": + raise IDNAError('Unsupported error handling "{}"'.format(errors)) + + if not data: + return ("", 0) + + if not isinstance(data, str): + data = str(data, "ascii") + + labels = _unicode_dots_re.split(data) + trailing_dot = "" + if labels: + if not labels[-1]: + trailing_dot = "." + del labels[-1] + elif not final: + # Keep potentially unfinished label until the next call + del labels[-1] + if labels: + trailing_dot = "." + + result = [] + size = 0 + for label in labels: + result.append(ulabel(label)) + if size: + size += 1 + size += len(label) + + result_str = ".".join(result) + trailing_dot + size += len(trailing_dot) + return (result_str, size) + + +class StreamWriter(Codec, codecs.StreamWriter): + pass + + +class StreamReader(Codec, codecs.StreamReader): + pass + + +def search_function(name: str) -> Optional[codecs.CodecInfo]: + if name != "idna2008": + return None + return codecs.CodecInfo( + name=name, + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamwriter=StreamWriter, + streamreader=StreamReader, + ) + + +codecs.register(search_function) diff --git a/.venv/lib/python3.12/site-packages/idna/compat.py b/.venv/lib/python3.12/site-packages/idna/compat.py new file mode 100644 index 0000000..1df9f2a --- /dev/null +++ b/.venv/lib/python3.12/site-packages/idna/compat.py @@ -0,0 +1,15 @@ +from typing import Any, Union + +from .core import decode, encode + + +def ToASCII(label: str) -> bytes: + return encode(label) + + +def ToUnicode(label: Union[bytes, bytearray]) -> str: + return decode(label) + + +def nameprep(s: Any) -> None: + raise NotImplementedError("IDNA 2008 does not utilise nameprep protocol") diff --git a/.venv/lib/python3.12/site-packages/idna/core.py b/.venv/lib/python3.12/site-packages/idna/core.py new file mode 100644 index 0000000..9115f12 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/idna/core.py @@ -0,0 +1,437 @@ +import bisect +import re +import unicodedata +from typing import Optional, Union + +from . import idnadata +from .intranges import intranges_contain + +_virama_combining_class = 9 +_alabel_prefix = b"xn--" +_unicode_dots_re = re.compile("[\u002e\u3002\uff0e\uff61]") + + +class IDNAError(UnicodeError): + """Base exception for all IDNA-encoding related problems""" + + pass + + +class IDNABidiError(IDNAError): + """Exception when bidirectional requirements are not satisfied""" + + pass + + +class InvalidCodepoint(IDNAError): + """Exception when a disallowed or unallocated codepoint is used""" + + pass + + +class InvalidCodepointContext(IDNAError): + """Exception when the codepoint is not valid in the context it is used""" + + pass + + +def _combining_class(cp: int) -> int: + v = unicodedata.combining(chr(cp)) + if v == 0: + if not unicodedata.name(chr(cp)): + raise ValueError("Unknown character in unicodedata") + return v + + +def _is_script(cp: str, script: str) -> bool: + return intranges_contain(ord(cp), idnadata.scripts[script]) + + +def _punycode(s: str) -> bytes: + return s.encode("punycode") + + +def _unot(s: int) -> str: + return "U+{:04X}".format(s) + + +def valid_label_length(label: Union[bytes, str]) -> bool: + if len(label) > 63: + return False + return True + + +def valid_string_length(label: Union[bytes, str], trailing_dot: bool) -> bool: + if len(label) > (254 if trailing_dot else 253): + return False + return True + + +def check_bidi(label: str, check_ltr: bool = False) -> bool: + # Bidi rules should only be applied if string contains RTL characters + bidi_label = False + for idx, cp in enumerate(label, 1): + direction = unicodedata.bidirectional(cp) + if direction == "": + # String likely comes from a newer version of Unicode + raise IDNABidiError("Unknown directionality in label {} at position {}".format(repr(label), idx)) + if direction in ["R", "AL", "AN"]: + bidi_label = True + if not bidi_label and not check_ltr: + return True + + # Bidi rule 1 + direction = unicodedata.bidirectional(label[0]) + if direction in ["R", "AL"]: + rtl = True + elif direction == "L": + rtl = False + else: + raise IDNABidiError("First codepoint in label {} must be directionality L, R or AL".format(repr(label))) + + valid_ending = False + number_type: Optional[str] = None + for idx, cp in enumerate(label, 1): + direction = unicodedata.bidirectional(cp) + + if rtl: + # Bidi rule 2 + if direction not in [ + "R", + "AL", + "AN", + "EN", + "ES", + "CS", + "ET", + "ON", + "BN", + "NSM", + ]: + raise IDNABidiError("Invalid direction for codepoint at position {} in a right-to-left label".format(idx)) + # Bidi rule 3 + if direction in ["R", "AL", "EN", "AN"]: + valid_ending = True + elif direction != "NSM": + valid_ending = False + # Bidi rule 4 + if direction in ["AN", "EN"]: + if not number_type: + number_type = direction + else: + if number_type != direction: + raise IDNABidiError("Can not mix numeral types in a right-to-left label") + else: + # Bidi rule 5 + if direction not in ["L", "EN", "ES", "CS", "ET", "ON", "BN", "NSM"]: + raise IDNABidiError("Invalid direction for codepoint at position {} in a left-to-right label".format(idx)) + # Bidi rule 6 + if direction in ["L", "EN"]: + valid_ending = True + elif direction != "NSM": + valid_ending = False + + if not valid_ending: + raise IDNABidiError("Label ends with illegal codepoint directionality") + + return True + + +def check_initial_combiner(label: str) -> bool: + if unicodedata.category(label[0])[0] == "M": + raise IDNAError("Label begins with an illegal combining character") + return True + + +def check_hyphen_ok(label: str) -> bool: + if label[2:4] == "--": + raise IDNAError("Label has disallowed hyphens in 3rd and 4th position") + if label[0] == "-" or label[-1] == "-": + raise IDNAError("Label must not start or end with a hyphen") + return True + + +def check_nfc(label: str) -> None: + if unicodedata.normalize("NFC", label) != label: + raise IDNAError("Label must be in Normalization Form C") + + +def valid_contextj(label: str, pos: int) -> bool: + cp_value = ord(label[pos]) + + if cp_value == 0x200C: + if pos > 0: + if _combining_class(ord(label[pos - 1])) == _virama_combining_class: + return True + + ok = False + for i in range(pos - 1, -1, -1): + joining_type = idnadata.joining_types.get(ord(label[i])) + if joining_type == ord("T"): + continue + elif joining_type in [ord("L"), ord("D")]: + ok = True + break + else: + break + + if not ok: + return False + + ok = False + for i in range(pos + 1, len(label)): + joining_type = idnadata.joining_types.get(ord(label[i])) + if joining_type == ord("T"): + continue + elif joining_type in [ord("R"), ord("D")]: + ok = True + break + else: + break + return ok + + if cp_value == 0x200D: + if pos > 0: + if _combining_class(ord(label[pos - 1])) == _virama_combining_class: + return True + return False + + else: + return False + + +def valid_contexto(label: str, pos: int, exception: bool = False) -> bool: + cp_value = ord(label[pos]) + + if cp_value == 0x00B7: + if 0 < pos < len(label) - 1: + if ord(label[pos - 1]) == 0x006C and ord(label[pos + 1]) == 0x006C: + return True + return False + + elif cp_value == 0x0375: + if pos < len(label) - 1 and len(label) > 1: + return _is_script(label[pos + 1], "Greek") + return False + + elif cp_value == 0x05F3 or cp_value == 0x05F4: + if pos > 0: + return _is_script(label[pos - 1], "Hebrew") + return False + + elif cp_value == 0x30FB: + for cp in label: + if cp == "\u30fb": + continue + if _is_script(cp, "Hiragana") or _is_script(cp, "Katakana") or _is_script(cp, "Han"): + return True + return False + + elif 0x660 <= cp_value <= 0x669: + for cp in label: + if 0x6F0 <= ord(cp) <= 0x06F9: + return False + return True + + elif 0x6F0 <= cp_value <= 0x6F9: + for cp in label: + if 0x660 <= ord(cp) <= 0x0669: + return False + return True + + return False + + +def check_label(label: Union[str, bytes, bytearray]) -> None: + if isinstance(label, (bytes, bytearray)): + label = label.decode("utf-8") + if len(label) == 0: + raise IDNAError("Empty Label") + + check_nfc(label) + check_hyphen_ok(label) + check_initial_combiner(label) + + for pos, cp in enumerate(label): + cp_value = ord(cp) + if intranges_contain(cp_value, idnadata.codepoint_classes["PVALID"]): + continue + elif intranges_contain(cp_value, idnadata.codepoint_classes["CONTEXTJ"]): + try: + if not valid_contextj(label, pos): + raise InvalidCodepointContext( + "Joiner {} not allowed at position {} in {}".format(_unot(cp_value), pos + 1, repr(label)) + ) + except ValueError: + raise IDNAError( + "Unknown codepoint adjacent to joiner {} at position {} in {}".format( + _unot(cp_value), pos + 1, repr(label) + ) + ) + elif intranges_contain(cp_value, idnadata.codepoint_classes["CONTEXTO"]): + if not valid_contexto(label, pos): + raise InvalidCodepointContext( + "Codepoint {} not allowed at position {} in {}".format(_unot(cp_value), pos + 1, repr(label)) + ) + else: + raise InvalidCodepoint( + "Codepoint {} at position {} of {} not allowed".format(_unot(cp_value), pos + 1, repr(label)) + ) + + check_bidi(label) + + +def alabel(label: str) -> bytes: + try: + label_bytes = label.encode("ascii") + ulabel(label_bytes) + if not valid_label_length(label_bytes): + raise IDNAError("Label too long") + return label_bytes + except UnicodeEncodeError: + pass + + check_label(label) + label_bytes = _alabel_prefix + _punycode(label) + + if not valid_label_length(label_bytes): + raise IDNAError("Label too long") + + return label_bytes + + +def ulabel(label: Union[str, bytes, bytearray]) -> str: + if not isinstance(label, (bytes, bytearray)): + try: + label_bytes = label.encode("ascii") + except UnicodeEncodeError: + check_label(label) + return label + else: + label_bytes = label + + label_bytes = label_bytes.lower() + if label_bytes.startswith(_alabel_prefix): + label_bytes = label_bytes[len(_alabel_prefix) :] + if not label_bytes: + raise IDNAError("Malformed A-label, no Punycode eligible content found") + if label_bytes.decode("ascii")[-1] == "-": + raise IDNAError("A-label must not end with a hyphen") + else: + check_label(label_bytes) + return label_bytes.decode("ascii") + + try: + label = label_bytes.decode("punycode") + except UnicodeError: + raise IDNAError("Invalid A-label") + check_label(label) + return label + + +def uts46_remap(domain: str, std3_rules: bool = True, transitional: bool = False) -> str: + """Re-map the characters in the string according to UTS46 processing.""" + from .uts46data import uts46data + + output = "" + + for pos, char in enumerate(domain): + code_point = ord(char) + try: + uts46row = uts46data[code_point if code_point < 256 else bisect.bisect_left(uts46data, (code_point, "Z")) - 1] + status = uts46row[1] + replacement: Optional[str] = None + if len(uts46row) == 3: + replacement = uts46row[2] + if ( + status == "V" + or (status == "D" and not transitional) + or (status == "3" and not std3_rules and replacement is None) + ): + output += char + elif replacement is not None and ( + status == "M" or (status == "3" and not std3_rules) or (status == "D" and transitional) + ): + output += replacement + elif status != "I": + raise IndexError() + except IndexError: + raise InvalidCodepoint( + "Codepoint {} not allowed at position {} in {}".format(_unot(code_point), pos + 1, repr(domain)) + ) + + return unicodedata.normalize("NFC", output) + + +def encode( + s: Union[str, bytes, bytearray], + strict: bool = False, + uts46: bool = False, + std3_rules: bool = False, + transitional: bool = False, +) -> bytes: + if not isinstance(s, str): + try: + s = str(s, "ascii") + except UnicodeDecodeError: + raise IDNAError("should pass a unicode string to the function rather than a byte string.") + if uts46: + s = uts46_remap(s, std3_rules, transitional) + trailing_dot = False + result = [] + if strict: + labels = s.split(".") + else: + labels = _unicode_dots_re.split(s) + if not labels or labels == [""]: + raise IDNAError("Empty domain") + if labels[-1] == "": + del labels[-1] + trailing_dot = True + for label in labels: + s = alabel(label) + if s: + result.append(s) + else: + raise IDNAError("Empty label") + if trailing_dot: + result.append(b"") + s = b".".join(result) + if not valid_string_length(s, trailing_dot): + raise IDNAError("Domain too long") + return s + + +def decode( + s: Union[str, bytes, bytearray], + strict: bool = False, + uts46: bool = False, + std3_rules: bool = False, +) -> str: + try: + if not isinstance(s, str): + s = str(s, "ascii") + except UnicodeDecodeError: + raise IDNAError("Invalid ASCII in A-label") + if uts46: + s = uts46_remap(s, std3_rules, False) + trailing_dot = False + result = [] + if not strict: + labels = _unicode_dots_re.split(s) + else: + labels = s.split(".") + if not labels or labels == [""]: + raise IDNAError("Empty domain") + if not labels[-1]: + del labels[-1] + trailing_dot = True + for label in labels: + s = ulabel(label) + if s: + result.append(s) + else: + raise IDNAError("Empty label") + if trailing_dot: + result.append("") + return ".".join(result) diff --git a/.venv/lib/python3.12/site-packages/idna/idnadata.py b/.venv/lib/python3.12/site-packages/idna/idnadata.py new file mode 100644 index 0000000..4be6004 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/idna/idnadata.py @@ -0,0 +1,4243 @@ +# This file is automatically generated by tools/idna-data + +__version__ = "15.1.0" +scripts = { + "Greek": ( + 0x37000000374, + 0x37500000378, + 0x37A0000037E, + 0x37F00000380, + 0x38400000385, + 0x38600000387, + 0x3880000038B, + 0x38C0000038D, + 0x38E000003A2, + 0x3A3000003E2, + 0x3F000000400, + 0x1D2600001D2B, + 0x1D5D00001D62, + 0x1D6600001D6B, + 0x1DBF00001DC0, + 0x1F0000001F16, + 0x1F1800001F1E, + 0x1F2000001F46, + 0x1F4800001F4E, + 0x1F5000001F58, + 0x1F5900001F5A, + 0x1F5B00001F5C, + 0x1F5D00001F5E, + 0x1F5F00001F7E, + 0x1F8000001FB5, + 0x1FB600001FC5, + 0x1FC600001FD4, + 0x1FD600001FDC, + 0x1FDD00001FF0, + 0x1FF200001FF5, + 0x1FF600001FFF, + 0x212600002127, + 0xAB650000AB66, + 0x101400001018F, + 0x101A0000101A1, + 0x1D2000001D246, + ), + "Han": ( + 0x2E8000002E9A, + 0x2E9B00002EF4, + 0x2F0000002FD6, + 0x300500003006, + 0x300700003008, + 0x30210000302A, + 0x30380000303C, + 0x340000004DC0, + 0x4E000000A000, + 0xF9000000FA6E, + 0xFA700000FADA, + 0x16FE200016FE4, + 0x16FF000016FF2, + 0x200000002A6E0, + 0x2A7000002B73A, + 0x2B7400002B81E, + 0x2B8200002CEA2, + 0x2CEB00002EBE1, + 0x2EBF00002EE5E, + 0x2F8000002FA1E, + 0x300000003134B, + 0x31350000323B0, + ), + "Hebrew": ( + 0x591000005C8, + 0x5D0000005EB, + 0x5EF000005F5, + 0xFB1D0000FB37, + 0xFB380000FB3D, + 0xFB3E0000FB3F, + 0xFB400000FB42, + 0xFB430000FB45, + 0xFB460000FB50, + ), + "Hiragana": ( + 0x304100003097, + 0x309D000030A0, + 0x1B0010001B120, + 0x1B1320001B133, + 0x1B1500001B153, + 0x1F2000001F201, + ), + "Katakana": ( + 0x30A1000030FB, + 0x30FD00003100, + 0x31F000003200, + 0x32D0000032FF, + 0x330000003358, + 0xFF660000FF70, + 0xFF710000FF9E, + 0x1AFF00001AFF4, + 0x1AFF50001AFFC, + 0x1AFFD0001AFFF, + 0x1B0000001B001, + 0x1B1200001B123, + 0x1B1550001B156, + 0x1B1640001B168, + ), +} +joining_types = { + 0xAD: 84, + 0x300: 84, + 0x301: 84, + 0x302: 84, + 0x303: 84, + 0x304: 84, + 0x305: 84, + 0x306: 84, + 0x307: 84, + 0x308: 84, + 0x309: 84, + 0x30A: 84, + 0x30B: 84, + 0x30C: 84, + 0x30D: 84, + 0x30E: 84, + 0x30F: 84, + 0x310: 84, + 0x311: 84, + 0x312: 84, + 0x313: 84, + 0x314: 84, + 0x315: 84, + 0x316: 84, + 0x317: 84, + 0x318: 84, + 0x319: 84, + 0x31A: 84, + 0x31B: 84, + 0x31C: 84, + 0x31D: 84, + 0x31E: 84, + 0x31F: 84, + 0x320: 84, + 0x321: 84, + 0x322: 84, + 0x323: 84, + 0x324: 84, + 0x325: 84, + 0x326: 84, + 0x327: 84, + 0x328: 84, + 0x329: 84, + 0x32A: 84, + 0x32B: 84, + 0x32C: 84, + 0x32D: 84, + 0x32E: 84, + 0x32F: 84, + 0x330: 84, + 0x331: 84, + 0x332: 84, + 0x333: 84, + 0x334: 84, + 0x335: 84, + 0x336: 84, + 0x337: 84, + 0x338: 84, + 0x339: 84, + 0x33A: 84, + 0x33B: 84, + 0x33C: 84, + 0x33D: 84, + 0x33E: 84, + 0x33F: 84, + 0x340: 84, + 0x341: 84, + 0x342: 84, + 0x343: 84, + 0x344: 84, + 0x345: 84, + 0x346: 84, + 0x347: 84, + 0x348: 84, + 0x349: 84, + 0x34A: 84, + 0x34B: 84, + 0x34C: 84, + 0x34D: 84, + 0x34E: 84, + 0x34F: 84, + 0x350: 84, + 0x351: 84, + 0x352: 84, + 0x353: 84, + 0x354: 84, + 0x355: 84, + 0x356: 84, + 0x357: 84, + 0x358: 84, + 0x359: 84, + 0x35A: 84, + 0x35B: 84, + 0x35C: 84, + 0x35D: 84, + 0x35E: 84, + 0x35F: 84, + 0x360: 84, + 0x361: 84, + 0x362: 84, + 0x363: 84, + 0x364: 84, + 0x365: 84, + 0x366: 84, + 0x367: 84, + 0x368: 84, + 0x369: 84, + 0x36A: 84, + 0x36B: 84, + 0x36C: 84, + 0x36D: 84, + 0x36E: 84, + 0x36F: 84, + 0x483: 84, + 0x484: 84, + 0x485: 84, + 0x486: 84, + 0x487: 84, + 0x488: 84, + 0x489: 84, + 0x591: 84, + 0x592: 84, + 0x593: 84, + 0x594: 84, + 0x595: 84, + 0x596: 84, + 0x597: 84, + 0x598: 84, + 0x599: 84, + 0x59A: 84, + 0x59B: 84, + 0x59C: 84, + 0x59D: 84, + 0x59E: 84, + 0x59F: 84, + 0x5A0: 84, + 0x5A1: 84, + 0x5A2: 84, + 0x5A3: 84, + 0x5A4: 84, + 0x5A5: 84, + 0x5A6: 84, + 0x5A7: 84, + 0x5A8: 84, + 0x5A9: 84, + 0x5AA: 84, + 0x5AB: 84, + 0x5AC: 84, + 0x5AD: 84, + 0x5AE: 84, + 0x5AF: 84, + 0x5B0: 84, + 0x5B1: 84, + 0x5B2: 84, + 0x5B3: 84, + 0x5B4: 84, + 0x5B5: 84, + 0x5B6: 84, + 0x5B7: 84, + 0x5B8: 84, + 0x5B9: 84, + 0x5BA: 84, + 0x5BB: 84, + 0x5BC: 84, + 0x5BD: 84, + 0x5BF: 84, + 0x5C1: 84, + 0x5C2: 84, + 0x5C4: 84, + 0x5C5: 84, + 0x5C7: 84, + 0x610: 84, + 0x611: 84, + 0x612: 84, + 0x613: 84, + 0x614: 84, + 0x615: 84, + 0x616: 84, + 0x617: 84, + 0x618: 84, + 0x619: 84, + 0x61A: 84, + 0x61C: 84, + 0x620: 68, + 0x622: 82, + 0x623: 82, + 0x624: 82, + 0x625: 82, + 0x626: 68, + 0x627: 82, + 0x628: 68, + 0x629: 82, + 0x62A: 68, + 0x62B: 68, + 0x62C: 68, + 0x62D: 68, + 0x62E: 68, + 0x62F: 82, + 0x630: 82, + 0x631: 82, + 0x632: 82, + 0x633: 68, + 0x634: 68, + 0x635: 68, + 0x636: 68, + 0x637: 68, + 0x638: 68, + 0x639: 68, + 0x63A: 68, + 0x63B: 68, + 0x63C: 68, + 0x63D: 68, + 0x63E: 68, + 0x63F: 68, + 0x640: 67, + 0x641: 68, + 0x642: 68, + 0x643: 68, + 0x644: 68, + 0x645: 68, + 0x646: 68, + 0x647: 68, + 0x648: 82, + 0x649: 68, + 0x64A: 68, + 0x64B: 84, + 0x64C: 84, + 0x64D: 84, + 0x64E: 84, + 0x64F: 84, + 0x650: 84, + 0x651: 84, + 0x652: 84, + 0x653: 84, + 0x654: 84, + 0x655: 84, + 0x656: 84, + 0x657: 84, + 0x658: 84, + 0x659: 84, + 0x65A: 84, + 0x65B: 84, + 0x65C: 84, + 0x65D: 84, + 0x65E: 84, + 0x65F: 84, + 0x66E: 68, + 0x66F: 68, + 0x670: 84, + 0x671: 82, + 0x672: 82, + 0x673: 82, + 0x675: 82, + 0x676: 82, + 0x677: 82, + 0x678: 68, + 0x679: 68, + 0x67A: 68, + 0x67B: 68, + 0x67C: 68, + 0x67D: 68, + 0x67E: 68, + 0x67F: 68, + 0x680: 68, + 0x681: 68, + 0x682: 68, + 0x683: 68, + 0x684: 68, + 0x685: 68, + 0x686: 68, + 0x687: 68, + 0x688: 82, + 0x689: 82, + 0x68A: 82, + 0x68B: 82, + 0x68C: 82, + 0x68D: 82, + 0x68E: 82, + 0x68F: 82, + 0x690: 82, + 0x691: 82, + 0x692: 82, + 0x693: 82, + 0x694: 82, + 0x695: 82, + 0x696: 82, + 0x697: 82, + 0x698: 82, + 0x699: 82, + 0x69A: 68, + 0x69B: 68, + 0x69C: 68, + 0x69D: 68, + 0x69E: 68, + 0x69F: 68, + 0x6A0: 68, + 0x6A1: 68, + 0x6A2: 68, + 0x6A3: 68, + 0x6A4: 68, + 0x6A5: 68, + 0x6A6: 68, + 0x6A7: 68, + 0x6A8: 68, + 0x6A9: 68, + 0x6AA: 68, + 0x6AB: 68, + 0x6AC: 68, + 0x6AD: 68, + 0x6AE: 68, + 0x6AF: 68, + 0x6B0: 68, + 0x6B1: 68, + 0x6B2: 68, + 0x6B3: 68, + 0x6B4: 68, + 0x6B5: 68, + 0x6B6: 68, + 0x6B7: 68, + 0x6B8: 68, + 0x6B9: 68, + 0x6BA: 68, + 0x6BB: 68, + 0x6BC: 68, + 0x6BD: 68, + 0x6BE: 68, + 0x6BF: 68, + 0x6C0: 82, + 0x6C1: 68, + 0x6C2: 68, + 0x6C3: 82, + 0x6C4: 82, + 0x6C5: 82, + 0x6C6: 82, + 0x6C7: 82, + 0x6C8: 82, + 0x6C9: 82, + 0x6CA: 82, + 0x6CB: 82, + 0x6CC: 68, + 0x6CD: 82, + 0x6CE: 68, + 0x6CF: 82, + 0x6D0: 68, + 0x6D1: 68, + 0x6D2: 82, + 0x6D3: 82, + 0x6D5: 82, + 0x6D6: 84, + 0x6D7: 84, + 0x6D8: 84, + 0x6D9: 84, + 0x6DA: 84, + 0x6DB: 84, + 0x6DC: 84, + 0x6DF: 84, + 0x6E0: 84, + 0x6E1: 84, + 0x6E2: 84, + 0x6E3: 84, + 0x6E4: 84, + 0x6E7: 84, + 0x6E8: 84, + 0x6EA: 84, + 0x6EB: 84, + 0x6EC: 84, + 0x6ED: 84, + 0x6EE: 82, + 0x6EF: 82, + 0x6FA: 68, + 0x6FB: 68, + 0x6FC: 68, + 0x6FF: 68, + 0x70F: 84, + 0x710: 82, + 0x711: 84, + 0x712: 68, + 0x713: 68, + 0x714: 68, + 0x715: 82, + 0x716: 82, + 0x717: 82, + 0x718: 82, + 0x719: 82, + 0x71A: 68, + 0x71B: 68, + 0x71C: 68, + 0x71D: 68, + 0x71E: 82, + 0x71F: 68, + 0x720: 68, + 0x721: 68, + 0x722: 68, + 0x723: 68, + 0x724: 68, + 0x725: 68, + 0x726: 68, + 0x727: 68, + 0x728: 82, + 0x729: 68, + 0x72A: 82, + 0x72B: 68, + 0x72C: 82, + 0x72D: 68, + 0x72E: 68, + 0x72F: 82, + 0x730: 84, + 0x731: 84, + 0x732: 84, + 0x733: 84, + 0x734: 84, + 0x735: 84, + 0x736: 84, + 0x737: 84, + 0x738: 84, + 0x739: 84, + 0x73A: 84, + 0x73B: 84, + 0x73C: 84, + 0x73D: 84, + 0x73E: 84, + 0x73F: 84, + 0x740: 84, + 0x741: 84, + 0x742: 84, + 0x743: 84, + 0x744: 84, + 0x745: 84, + 0x746: 84, + 0x747: 84, + 0x748: 84, + 0x749: 84, + 0x74A: 84, + 0x74D: 82, + 0x74E: 68, + 0x74F: 68, + 0x750: 68, + 0x751: 68, + 0x752: 68, + 0x753: 68, + 0x754: 68, + 0x755: 68, + 0x756: 68, + 0x757: 68, + 0x758: 68, + 0x759: 82, + 0x75A: 82, + 0x75B: 82, + 0x75C: 68, + 0x75D: 68, + 0x75E: 68, + 0x75F: 68, + 0x760: 68, + 0x761: 68, + 0x762: 68, + 0x763: 68, + 0x764: 68, + 0x765: 68, + 0x766: 68, + 0x767: 68, + 0x768: 68, + 0x769: 68, + 0x76A: 68, + 0x76B: 82, + 0x76C: 82, + 0x76D: 68, + 0x76E: 68, + 0x76F: 68, + 0x770: 68, + 0x771: 82, + 0x772: 68, + 0x773: 82, + 0x774: 82, + 0x775: 68, + 0x776: 68, + 0x777: 68, + 0x778: 82, + 0x779: 82, + 0x77A: 68, + 0x77B: 68, + 0x77C: 68, + 0x77D: 68, + 0x77E: 68, + 0x77F: 68, + 0x7A6: 84, + 0x7A7: 84, + 0x7A8: 84, + 0x7A9: 84, + 0x7AA: 84, + 0x7AB: 84, + 0x7AC: 84, + 0x7AD: 84, + 0x7AE: 84, + 0x7AF: 84, + 0x7B0: 84, + 0x7CA: 68, + 0x7CB: 68, + 0x7CC: 68, + 0x7CD: 68, + 0x7CE: 68, + 0x7CF: 68, + 0x7D0: 68, + 0x7D1: 68, + 0x7D2: 68, + 0x7D3: 68, + 0x7D4: 68, + 0x7D5: 68, + 0x7D6: 68, + 0x7D7: 68, + 0x7D8: 68, + 0x7D9: 68, + 0x7DA: 68, + 0x7DB: 68, + 0x7DC: 68, + 0x7DD: 68, + 0x7DE: 68, + 0x7DF: 68, + 0x7E0: 68, + 0x7E1: 68, + 0x7E2: 68, + 0x7E3: 68, + 0x7E4: 68, + 0x7E5: 68, + 0x7E6: 68, + 0x7E7: 68, + 0x7E8: 68, + 0x7E9: 68, + 0x7EA: 68, + 0x7EB: 84, + 0x7EC: 84, + 0x7ED: 84, + 0x7EE: 84, + 0x7EF: 84, + 0x7F0: 84, + 0x7F1: 84, + 0x7F2: 84, + 0x7F3: 84, + 0x7FA: 67, + 0x7FD: 84, + 0x816: 84, + 0x817: 84, + 0x818: 84, + 0x819: 84, + 0x81B: 84, + 0x81C: 84, + 0x81D: 84, + 0x81E: 84, + 0x81F: 84, + 0x820: 84, + 0x821: 84, + 0x822: 84, + 0x823: 84, + 0x825: 84, + 0x826: 84, + 0x827: 84, + 0x829: 84, + 0x82A: 84, + 0x82B: 84, + 0x82C: 84, + 0x82D: 84, + 0x840: 82, + 0x841: 68, + 0x842: 68, + 0x843: 68, + 0x844: 68, + 0x845: 68, + 0x846: 82, + 0x847: 82, + 0x848: 68, + 0x849: 82, + 0x84A: 68, + 0x84B: 68, + 0x84C: 68, + 0x84D: 68, + 0x84E: 68, + 0x84F: 68, + 0x850: 68, + 0x851: 68, + 0x852: 68, + 0x853: 68, + 0x854: 82, + 0x855: 68, + 0x856: 82, + 0x857: 82, + 0x858: 82, + 0x859: 84, + 0x85A: 84, + 0x85B: 84, + 0x860: 68, + 0x862: 68, + 0x863: 68, + 0x864: 68, + 0x865: 68, + 0x867: 82, + 0x868: 68, + 0x869: 82, + 0x86A: 82, + 0x870: 82, + 0x871: 82, + 0x872: 82, + 0x873: 82, + 0x874: 82, + 0x875: 82, + 0x876: 82, + 0x877: 82, + 0x878: 82, + 0x879: 82, + 0x87A: 82, + 0x87B: 82, + 0x87C: 82, + 0x87D: 82, + 0x87E: 82, + 0x87F: 82, + 0x880: 82, + 0x881: 82, + 0x882: 82, + 0x883: 67, + 0x884: 67, + 0x885: 67, + 0x886: 68, + 0x889: 68, + 0x88A: 68, + 0x88B: 68, + 0x88C: 68, + 0x88D: 68, + 0x88E: 82, + 0x898: 84, + 0x899: 84, + 0x89A: 84, + 0x89B: 84, + 0x89C: 84, + 0x89D: 84, + 0x89E: 84, + 0x89F: 84, + 0x8A0: 68, + 0x8A1: 68, + 0x8A2: 68, + 0x8A3: 68, + 0x8A4: 68, + 0x8A5: 68, + 0x8A6: 68, + 0x8A7: 68, + 0x8A8: 68, + 0x8A9: 68, + 0x8AA: 82, + 0x8AB: 82, + 0x8AC: 82, + 0x8AE: 82, + 0x8AF: 68, + 0x8B0: 68, + 0x8B1: 82, + 0x8B2: 82, + 0x8B3: 68, + 0x8B4: 68, + 0x8B5: 68, + 0x8B6: 68, + 0x8B7: 68, + 0x8B8: 68, + 0x8B9: 82, + 0x8BA: 68, + 0x8BB: 68, + 0x8BC: 68, + 0x8BD: 68, + 0x8BE: 68, + 0x8BF: 68, + 0x8C0: 68, + 0x8C1: 68, + 0x8C2: 68, + 0x8C3: 68, + 0x8C4: 68, + 0x8C5: 68, + 0x8C6: 68, + 0x8C7: 68, + 0x8C8: 68, + 0x8CA: 84, + 0x8CB: 84, + 0x8CC: 84, + 0x8CD: 84, + 0x8CE: 84, + 0x8CF: 84, + 0x8D0: 84, + 0x8D1: 84, + 0x8D2: 84, + 0x8D3: 84, + 0x8D4: 84, + 0x8D5: 84, + 0x8D6: 84, + 0x8D7: 84, + 0x8D8: 84, + 0x8D9: 84, + 0x8DA: 84, + 0x8DB: 84, + 0x8DC: 84, + 0x8DD: 84, + 0x8DE: 84, + 0x8DF: 84, + 0x8E0: 84, + 0x8E1: 84, + 0x8E3: 84, + 0x8E4: 84, + 0x8E5: 84, + 0x8E6: 84, + 0x8E7: 84, + 0x8E8: 84, + 0x8E9: 84, + 0x8EA: 84, + 0x8EB: 84, + 0x8EC: 84, + 0x8ED: 84, + 0x8EE: 84, + 0x8EF: 84, + 0x8F0: 84, + 0x8F1: 84, + 0x8F2: 84, + 0x8F3: 84, + 0x8F4: 84, + 0x8F5: 84, + 0x8F6: 84, + 0x8F7: 84, + 0x8F8: 84, + 0x8F9: 84, + 0x8FA: 84, + 0x8FB: 84, + 0x8FC: 84, + 0x8FD: 84, + 0x8FE: 84, + 0x8FF: 84, + 0x900: 84, + 0x901: 84, + 0x902: 84, + 0x93A: 84, + 0x93C: 84, + 0x941: 84, + 0x942: 84, + 0x943: 84, + 0x944: 84, + 0x945: 84, + 0x946: 84, + 0x947: 84, + 0x948: 84, + 0x94D: 84, + 0x951: 84, + 0x952: 84, + 0x953: 84, + 0x954: 84, + 0x955: 84, + 0x956: 84, + 0x957: 84, + 0x962: 84, + 0x963: 84, + 0x981: 84, + 0x9BC: 84, + 0x9C1: 84, + 0x9C2: 84, + 0x9C3: 84, + 0x9C4: 84, + 0x9CD: 84, + 0x9E2: 84, + 0x9E3: 84, + 0x9FE: 84, + 0xA01: 84, + 0xA02: 84, + 0xA3C: 84, + 0xA41: 84, + 0xA42: 84, + 0xA47: 84, + 0xA48: 84, + 0xA4B: 84, + 0xA4C: 84, + 0xA4D: 84, + 0xA51: 84, + 0xA70: 84, + 0xA71: 84, + 0xA75: 84, + 0xA81: 84, + 0xA82: 84, + 0xABC: 84, + 0xAC1: 84, + 0xAC2: 84, + 0xAC3: 84, + 0xAC4: 84, + 0xAC5: 84, + 0xAC7: 84, + 0xAC8: 84, + 0xACD: 84, + 0xAE2: 84, + 0xAE3: 84, + 0xAFA: 84, + 0xAFB: 84, + 0xAFC: 84, + 0xAFD: 84, + 0xAFE: 84, + 0xAFF: 84, + 0xB01: 84, + 0xB3C: 84, + 0xB3F: 84, + 0xB41: 84, + 0xB42: 84, + 0xB43: 84, + 0xB44: 84, + 0xB4D: 84, + 0xB55: 84, + 0xB56: 84, + 0xB62: 84, + 0xB63: 84, + 0xB82: 84, + 0xBC0: 84, + 0xBCD: 84, + 0xC00: 84, + 0xC04: 84, + 0xC3C: 84, + 0xC3E: 84, + 0xC3F: 84, + 0xC40: 84, + 0xC46: 84, + 0xC47: 84, + 0xC48: 84, + 0xC4A: 84, + 0xC4B: 84, + 0xC4C: 84, + 0xC4D: 84, + 0xC55: 84, + 0xC56: 84, + 0xC62: 84, + 0xC63: 84, + 0xC81: 84, + 0xCBC: 84, + 0xCBF: 84, + 0xCC6: 84, + 0xCCC: 84, + 0xCCD: 84, + 0xCE2: 84, + 0xCE3: 84, + 0xD00: 84, + 0xD01: 84, + 0xD3B: 84, + 0xD3C: 84, + 0xD41: 84, + 0xD42: 84, + 0xD43: 84, + 0xD44: 84, + 0xD4D: 84, + 0xD62: 84, + 0xD63: 84, + 0xD81: 84, + 0xDCA: 84, + 0xDD2: 84, + 0xDD3: 84, + 0xDD4: 84, + 0xDD6: 84, + 0xE31: 84, + 0xE34: 84, + 0xE35: 84, + 0xE36: 84, + 0xE37: 84, + 0xE38: 84, + 0xE39: 84, + 0xE3A: 84, + 0xE47: 84, + 0xE48: 84, + 0xE49: 84, + 0xE4A: 84, + 0xE4B: 84, + 0xE4C: 84, + 0xE4D: 84, + 0xE4E: 84, + 0xEB1: 84, + 0xEB4: 84, + 0xEB5: 84, + 0xEB6: 84, + 0xEB7: 84, + 0xEB8: 84, + 0xEB9: 84, + 0xEBA: 84, + 0xEBB: 84, + 0xEBC: 84, + 0xEC8: 84, + 0xEC9: 84, + 0xECA: 84, + 0xECB: 84, + 0xECC: 84, + 0xECD: 84, + 0xECE: 84, + 0xF18: 84, + 0xF19: 84, + 0xF35: 84, + 0xF37: 84, + 0xF39: 84, + 0xF71: 84, + 0xF72: 84, + 0xF73: 84, + 0xF74: 84, + 0xF75: 84, + 0xF76: 84, + 0xF77: 84, + 0xF78: 84, + 0xF79: 84, + 0xF7A: 84, + 0xF7B: 84, + 0xF7C: 84, + 0xF7D: 84, + 0xF7E: 84, + 0xF80: 84, + 0xF81: 84, + 0xF82: 84, + 0xF83: 84, + 0xF84: 84, + 0xF86: 84, + 0xF87: 84, + 0xF8D: 84, + 0xF8E: 84, + 0xF8F: 84, + 0xF90: 84, + 0xF91: 84, + 0xF92: 84, + 0xF93: 84, + 0xF94: 84, + 0xF95: 84, + 0xF96: 84, + 0xF97: 84, + 0xF99: 84, + 0xF9A: 84, + 0xF9B: 84, + 0xF9C: 84, + 0xF9D: 84, + 0xF9E: 84, + 0xF9F: 84, + 0xFA0: 84, + 0xFA1: 84, + 0xFA2: 84, + 0xFA3: 84, + 0xFA4: 84, + 0xFA5: 84, + 0xFA6: 84, + 0xFA7: 84, + 0xFA8: 84, + 0xFA9: 84, + 0xFAA: 84, + 0xFAB: 84, + 0xFAC: 84, + 0xFAD: 84, + 0xFAE: 84, + 0xFAF: 84, + 0xFB0: 84, + 0xFB1: 84, + 0xFB2: 84, + 0xFB3: 84, + 0xFB4: 84, + 0xFB5: 84, + 0xFB6: 84, + 0xFB7: 84, + 0xFB8: 84, + 0xFB9: 84, + 0xFBA: 84, + 0xFBB: 84, + 0xFBC: 84, + 0xFC6: 84, + 0x102D: 84, + 0x102E: 84, + 0x102F: 84, + 0x1030: 84, + 0x1032: 84, + 0x1033: 84, + 0x1034: 84, + 0x1035: 84, + 0x1036: 84, + 0x1037: 84, + 0x1039: 84, + 0x103A: 84, + 0x103D: 84, + 0x103E: 84, + 0x1058: 84, + 0x1059: 84, + 0x105E: 84, + 0x105F: 84, + 0x1060: 84, + 0x1071: 84, + 0x1072: 84, + 0x1073: 84, + 0x1074: 84, + 0x1082: 84, + 0x1085: 84, + 0x1086: 84, + 0x108D: 84, + 0x109D: 84, + 0x135D: 84, + 0x135E: 84, + 0x135F: 84, + 0x1712: 84, + 0x1713: 84, + 0x1714: 84, + 0x1732: 84, + 0x1733: 84, + 0x1752: 84, + 0x1753: 84, + 0x1772: 84, + 0x1773: 84, + 0x17B4: 84, + 0x17B5: 84, + 0x17B7: 84, + 0x17B8: 84, + 0x17B9: 84, + 0x17BA: 84, + 0x17BB: 84, + 0x17BC: 84, + 0x17BD: 84, + 0x17C6: 84, + 0x17C9: 84, + 0x17CA: 84, + 0x17CB: 84, + 0x17CC: 84, + 0x17CD: 84, + 0x17CE: 84, + 0x17CF: 84, + 0x17D0: 84, + 0x17D1: 84, + 0x17D2: 84, + 0x17D3: 84, + 0x17DD: 84, + 0x1807: 68, + 0x180A: 67, + 0x180B: 84, + 0x180C: 84, + 0x180D: 84, + 0x180F: 84, + 0x1820: 68, + 0x1821: 68, + 0x1822: 68, + 0x1823: 68, + 0x1824: 68, + 0x1825: 68, + 0x1826: 68, + 0x1827: 68, + 0x1828: 68, + 0x1829: 68, + 0x182A: 68, + 0x182B: 68, + 0x182C: 68, + 0x182D: 68, + 0x182E: 68, + 0x182F: 68, + 0x1830: 68, + 0x1831: 68, + 0x1832: 68, + 0x1833: 68, + 0x1834: 68, + 0x1835: 68, + 0x1836: 68, + 0x1837: 68, + 0x1838: 68, + 0x1839: 68, + 0x183A: 68, + 0x183B: 68, + 0x183C: 68, + 0x183D: 68, + 0x183E: 68, + 0x183F: 68, + 0x1840: 68, + 0x1841: 68, + 0x1842: 68, + 0x1843: 68, + 0x1844: 68, + 0x1845: 68, + 0x1846: 68, + 0x1847: 68, + 0x1848: 68, + 0x1849: 68, + 0x184A: 68, + 0x184B: 68, + 0x184C: 68, + 0x184D: 68, + 0x184E: 68, + 0x184F: 68, + 0x1850: 68, + 0x1851: 68, + 0x1852: 68, + 0x1853: 68, + 0x1854: 68, + 0x1855: 68, + 0x1856: 68, + 0x1857: 68, + 0x1858: 68, + 0x1859: 68, + 0x185A: 68, + 0x185B: 68, + 0x185C: 68, + 0x185D: 68, + 0x185E: 68, + 0x185F: 68, + 0x1860: 68, + 0x1861: 68, + 0x1862: 68, + 0x1863: 68, + 0x1864: 68, + 0x1865: 68, + 0x1866: 68, + 0x1867: 68, + 0x1868: 68, + 0x1869: 68, + 0x186A: 68, + 0x186B: 68, + 0x186C: 68, + 0x186D: 68, + 0x186E: 68, + 0x186F: 68, + 0x1870: 68, + 0x1871: 68, + 0x1872: 68, + 0x1873: 68, + 0x1874: 68, + 0x1875: 68, + 0x1876: 68, + 0x1877: 68, + 0x1878: 68, + 0x1885: 84, + 0x1886: 84, + 0x1887: 68, + 0x1888: 68, + 0x1889: 68, + 0x188A: 68, + 0x188B: 68, + 0x188C: 68, + 0x188D: 68, + 0x188E: 68, + 0x188F: 68, + 0x1890: 68, + 0x1891: 68, + 0x1892: 68, + 0x1893: 68, + 0x1894: 68, + 0x1895: 68, + 0x1896: 68, + 0x1897: 68, + 0x1898: 68, + 0x1899: 68, + 0x189A: 68, + 0x189B: 68, + 0x189C: 68, + 0x189D: 68, + 0x189E: 68, + 0x189F: 68, + 0x18A0: 68, + 0x18A1: 68, + 0x18A2: 68, + 0x18A3: 68, + 0x18A4: 68, + 0x18A5: 68, + 0x18A6: 68, + 0x18A7: 68, + 0x18A8: 68, + 0x18A9: 84, + 0x18AA: 68, + 0x1920: 84, + 0x1921: 84, + 0x1922: 84, + 0x1927: 84, + 0x1928: 84, + 0x1932: 84, + 0x1939: 84, + 0x193A: 84, + 0x193B: 84, + 0x1A17: 84, + 0x1A18: 84, + 0x1A1B: 84, + 0x1A56: 84, + 0x1A58: 84, + 0x1A59: 84, + 0x1A5A: 84, + 0x1A5B: 84, + 0x1A5C: 84, + 0x1A5D: 84, + 0x1A5E: 84, + 0x1A60: 84, + 0x1A62: 84, + 0x1A65: 84, + 0x1A66: 84, + 0x1A67: 84, + 0x1A68: 84, + 0x1A69: 84, + 0x1A6A: 84, + 0x1A6B: 84, + 0x1A6C: 84, + 0x1A73: 84, + 0x1A74: 84, + 0x1A75: 84, + 0x1A76: 84, + 0x1A77: 84, + 0x1A78: 84, + 0x1A79: 84, + 0x1A7A: 84, + 0x1A7B: 84, + 0x1A7C: 84, + 0x1A7F: 84, + 0x1AB0: 84, + 0x1AB1: 84, + 0x1AB2: 84, + 0x1AB3: 84, + 0x1AB4: 84, + 0x1AB5: 84, + 0x1AB6: 84, + 0x1AB7: 84, + 0x1AB8: 84, + 0x1AB9: 84, + 0x1ABA: 84, + 0x1ABB: 84, + 0x1ABC: 84, + 0x1ABD: 84, + 0x1ABE: 84, + 0x1ABF: 84, + 0x1AC0: 84, + 0x1AC1: 84, + 0x1AC2: 84, + 0x1AC3: 84, + 0x1AC4: 84, + 0x1AC5: 84, + 0x1AC6: 84, + 0x1AC7: 84, + 0x1AC8: 84, + 0x1AC9: 84, + 0x1ACA: 84, + 0x1ACB: 84, + 0x1ACC: 84, + 0x1ACD: 84, + 0x1ACE: 84, + 0x1B00: 84, + 0x1B01: 84, + 0x1B02: 84, + 0x1B03: 84, + 0x1B34: 84, + 0x1B36: 84, + 0x1B37: 84, + 0x1B38: 84, + 0x1B39: 84, + 0x1B3A: 84, + 0x1B3C: 84, + 0x1B42: 84, + 0x1B6B: 84, + 0x1B6C: 84, + 0x1B6D: 84, + 0x1B6E: 84, + 0x1B6F: 84, + 0x1B70: 84, + 0x1B71: 84, + 0x1B72: 84, + 0x1B73: 84, + 0x1B80: 84, + 0x1B81: 84, + 0x1BA2: 84, + 0x1BA3: 84, + 0x1BA4: 84, + 0x1BA5: 84, + 0x1BA8: 84, + 0x1BA9: 84, + 0x1BAB: 84, + 0x1BAC: 84, + 0x1BAD: 84, + 0x1BE6: 84, + 0x1BE8: 84, + 0x1BE9: 84, + 0x1BED: 84, + 0x1BEF: 84, + 0x1BF0: 84, + 0x1BF1: 84, + 0x1C2C: 84, + 0x1C2D: 84, + 0x1C2E: 84, + 0x1C2F: 84, + 0x1C30: 84, + 0x1C31: 84, + 0x1C32: 84, + 0x1C33: 84, + 0x1C36: 84, + 0x1C37: 84, + 0x1CD0: 84, + 0x1CD1: 84, + 0x1CD2: 84, + 0x1CD4: 84, + 0x1CD5: 84, + 0x1CD6: 84, + 0x1CD7: 84, + 0x1CD8: 84, + 0x1CD9: 84, + 0x1CDA: 84, + 0x1CDB: 84, + 0x1CDC: 84, + 0x1CDD: 84, + 0x1CDE: 84, + 0x1CDF: 84, + 0x1CE0: 84, + 0x1CE2: 84, + 0x1CE3: 84, + 0x1CE4: 84, + 0x1CE5: 84, + 0x1CE6: 84, + 0x1CE7: 84, + 0x1CE8: 84, + 0x1CED: 84, + 0x1CF4: 84, + 0x1CF8: 84, + 0x1CF9: 84, + 0x1DC0: 84, + 0x1DC1: 84, + 0x1DC2: 84, + 0x1DC3: 84, + 0x1DC4: 84, + 0x1DC5: 84, + 0x1DC6: 84, + 0x1DC7: 84, + 0x1DC8: 84, + 0x1DC9: 84, + 0x1DCA: 84, + 0x1DCB: 84, + 0x1DCC: 84, + 0x1DCD: 84, + 0x1DCE: 84, + 0x1DCF: 84, + 0x1DD0: 84, + 0x1DD1: 84, + 0x1DD2: 84, + 0x1DD3: 84, + 0x1DD4: 84, + 0x1DD5: 84, + 0x1DD6: 84, + 0x1DD7: 84, + 0x1DD8: 84, + 0x1DD9: 84, + 0x1DDA: 84, + 0x1DDB: 84, + 0x1DDC: 84, + 0x1DDD: 84, + 0x1DDE: 84, + 0x1DDF: 84, + 0x1DE0: 84, + 0x1DE1: 84, + 0x1DE2: 84, + 0x1DE3: 84, + 0x1DE4: 84, + 0x1DE5: 84, + 0x1DE6: 84, + 0x1DE7: 84, + 0x1DE8: 84, + 0x1DE9: 84, + 0x1DEA: 84, + 0x1DEB: 84, + 0x1DEC: 84, + 0x1DED: 84, + 0x1DEE: 84, + 0x1DEF: 84, + 0x1DF0: 84, + 0x1DF1: 84, + 0x1DF2: 84, + 0x1DF3: 84, + 0x1DF4: 84, + 0x1DF5: 84, + 0x1DF6: 84, + 0x1DF7: 84, + 0x1DF8: 84, + 0x1DF9: 84, + 0x1DFA: 84, + 0x1DFB: 84, + 0x1DFC: 84, + 0x1DFD: 84, + 0x1DFE: 84, + 0x1DFF: 84, + 0x200B: 84, + 0x200D: 67, + 0x200E: 84, + 0x200F: 84, + 0x202A: 84, + 0x202B: 84, + 0x202C: 84, + 0x202D: 84, + 0x202E: 84, + 0x2060: 84, + 0x2061: 84, + 0x2062: 84, + 0x2063: 84, + 0x2064: 84, + 0x206A: 84, + 0x206B: 84, + 0x206C: 84, + 0x206D: 84, + 0x206E: 84, + 0x206F: 84, + 0x20D0: 84, + 0x20D1: 84, + 0x20D2: 84, + 0x20D3: 84, + 0x20D4: 84, + 0x20D5: 84, + 0x20D6: 84, + 0x20D7: 84, + 0x20D8: 84, + 0x20D9: 84, + 0x20DA: 84, + 0x20DB: 84, + 0x20DC: 84, + 0x20DD: 84, + 0x20DE: 84, + 0x20DF: 84, + 0x20E0: 84, + 0x20E1: 84, + 0x20E2: 84, + 0x20E3: 84, + 0x20E4: 84, + 0x20E5: 84, + 0x20E6: 84, + 0x20E7: 84, + 0x20E8: 84, + 0x20E9: 84, + 0x20EA: 84, + 0x20EB: 84, + 0x20EC: 84, + 0x20ED: 84, + 0x20EE: 84, + 0x20EF: 84, + 0x20F0: 84, + 0x2CEF: 84, + 0x2CF0: 84, + 0x2CF1: 84, + 0x2D7F: 84, + 0x2DE0: 84, + 0x2DE1: 84, + 0x2DE2: 84, + 0x2DE3: 84, + 0x2DE4: 84, + 0x2DE5: 84, + 0x2DE6: 84, + 0x2DE7: 84, + 0x2DE8: 84, + 0x2DE9: 84, + 0x2DEA: 84, + 0x2DEB: 84, + 0x2DEC: 84, + 0x2DED: 84, + 0x2DEE: 84, + 0x2DEF: 84, + 0x2DF0: 84, + 0x2DF1: 84, + 0x2DF2: 84, + 0x2DF3: 84, + 0x2DF4: 84, + 0x2DF5: 84, + 0x2DF6: 84, + 0x2DF7: 84, + 0x2DF8: 84, + 0x2DF9: 84, + 0x2DFA: 84, + 0x2DFB: 84, + 0x2DFC: 84, + 0x2DFD: 84, + 0x2DFE: 84, + 0x2DFF: 84, + 0x302A: 84, + 0x302B: 84, + 0x302C: 84, + 0x302D: 84, + 0x3099: 84, + 0x309A: 84, + 0xA66F: 84, + 0xA670: 84, + 0xA671: 84, + 0xA672: 84, + 0xA674: 84, + 0xA675: 84, + 0xA676: 84, + 0xA677: 84, + 0xA678: 84, + 0xA679: 84, + 0xA67A: 84, + 0xA67B: 84, + 0xA67C: 84, + 0xA67D: 84, + 0xA69E: 84, + 0xA69F: 84, + 0xA6F0: 84, + 0xA6F1: 84, + 0xA802: 84, + 0xA806: 84, + 0xA80B: 84, + 0xA825: 84, + 0xA826: 84, + 0xA82C: 84, + 0xA840: 68, + 0xA841: 68, + 0xA842: 68, + 0xA843: 68, + 0xA844: 68, + 0xA845: 68, + 0xA846: 68, + 0xA847: 68, + 0xA848: 68, + 0xA849: 68, + 0xA84A: 68, + 0xA84B: 68, + 0xA84C: 68, + 0xA84D: 68, + 0xA84E: 68, + 0xA84F: 68, + 0xA850: 68, + 0xA851: 68, + 0xA852: 68, + 0xA853: 68, + 0xA854: 68, + 0xA855: 68, + 0xA856: 68, + 0xA857: 68, + 0xA858: 68, + 0xA859: 68, + 0xA85A: 68, + 0xA85B: 68, + 0xA85C: 68, + 0xA85D: 68, + 0xA85E: 68, + 0xA85F: 68, + 0xA860: 68, + 0xA861: 68, + 0xA862: 68, + 0xA863: 68, + 0xA864: 68, + 0xA865: 68, + 0xA866: 68, + 0xA867: 68, + 0xA868: 68, + 0xA869: 68, + 0xA86A: 68, + 0xA86B: 68, + 0xA86C: 68, + 0xA86D: 68, + 0xA86E: 68, + 0xA86F: 68, + 0xA870: 68, + 0xA871: 68, + 0xA872: 76, + 0xA8C4: 84, + 0xA8C5: 84, + 0xA8E0: 84, + 0xA8E1: 84, + 0xA8E2: 84, + 0xA8E3: 84, + 0xA8E4: 84, + 0xA8E5: 84, + 0xA8E6: 84, + 0xA8E7: 84, + 0xA8E8: 84, + 0xA8E9: 84, + 0xA8EA: 84, + 0xA8EB: 84, + 0xA8EC: 84, + 0xA8ED: 84, + 0xA8EE: 84, + 0xA8EF: 84, + 0xA8F0: 84, + 0xA8F1: 84, + 0xA8FF: 84, + 0xA926: 84, + 0xA927: 84, + 0xA928: 84, + 0xA929: 84, + 0xA92A: 84, + 0xA92B: 84, + 0xA92C: 84, + 0xA92D: 84, + 0xA947: 84, + 0xA948: 84, + 0xA949: 84, + 0xA94A: 84, + 0xA94B: 84, + 0xA94C: 84, + 0xA94D: 84, + 0xA94E: 84, + 0xA94F: 84, + 0xA950: 84, + 0xA951: 84, + 0xA980: 84, + 0xA981: 84, + 0xA982: 84, + 0xA9B3: 84, + 0xA9B6: 84, + 0xA9B7: 84, + 0xA9B8: 84, + 0xA9B9: 84, + 0xA9BC: 84, + 0xA9BD: 84, + 0xA9E5: 84, + 0xAA29: 84, + 0xAA2A: 84, + 0xAA2B: 84, + 0xAA2C: 84, + 0xAA2D: 84, + 0xAA2E: 84, + 0xAA31: 84, + 0xAA32: 84, + 0xAA35: 84, + 0xAA36: 84, + 0xAA43: 84, + 0xAA4C: 84, + 0xAA7C: 84, + 0xAAB0: 84, + 0xAAB2: 84, + 0xAAB3: 84, + 0xAAB4: 84, + 0xAAB7: 84, + 0xAAB8: 84, + 0xAABE: 84, + 0xAABF: 84, + 0xAAC1: 84, + 0xAAEC: 84, + 0xAAED: 84, + 0xAAF6: 84, + 0xABE5: 84, + 0xABE8: 84, + 0xABED: 84, + 0xFB1E: 84, + 0xFE00: 84, + 0xFE01: 84, + 0xFE02: 84, + 0xFE03: 84, + 0xFE04: 84, + 0xFE05: 84, + 0xFE06: 84, + 0xFE07: 84, + 0xFE08: 84, + 0xFE09: 84, + 0xFE0A: 84, + 0xFE0B: 84, + 0xFE0C: 84, + 0xFE0D: 84, + 0xFE0E: 84, + 0xFE0F: 84, + 0xFE20: 84, + 0xFE21: 84, + 0xFE22: 84, + 0xFE23: 84, + 0xFE24: 84, + 0xFE25: 84, + 0xFE26: 84, + 0xFE27: 84, + 0xFE28: 84, + 0xFE29: 84, + 0xFE2A: 84, + 0xFE2B: 84, + 0xFE2C: 84, + 0xFE2D: 84, + 0xFE2E: 84, + 0xFE2F: 84, + 0xFEFF: 84, + 0xFFF9: 84, + 0xFFFA: 84, + 0xFFFB: 84, + 0x101FD: 84, + 0x102E0: 84, + 0x10376: 84, + 0x10377: 84, + 0x10378: 84, + 0x10379: 84, + 0x1037A: 84, + 0x10A01: 84, + 0x10A02: 84, + 0x10A03: 84, + 0x10A05: 84, + 0x10A06: 84, + 0x10A0C: 84, + 0x10A0D: 84, + 0x10A0E: 84, + 0x10A0F: 84, + 0x10A38: 84, + 0x10A39: 84, + 0x10A3A: 84, + 0x10A3F: 84, + 0x10AC0: 68, + 0x10AC1: 68, + 0x10AC2: 68, + 0x10AC3: 68, + 0x10AC4: 68, + 0x10AC5: 82, + 0x10AC7: 82, + 0x10AC9: 82, + 0x10ACA: 82, + 0x10ACD: 76, + 0x10ACE: 82, + 0x10ACF: 82, + 0x10AD0: 82, + 0x10AD1: 82, + 0x10AD2: 82, + 0x10AD3: 68, + 0x10AD4: 68, + 0x10AD5: 68, + 0x10AD6: 68, + 0x10AD7: 76, + 0x10AD8: 68, + 0x10AD9: 68, + 0x10ADA: 68, + 0x10ADB: 68, + 0x10ADC: 68, + 0x10ADD: 82, + 0x10ADE: 68, + 0x10ADF: 68, + 0x10AE0: 68, + 0x10AE1: 82, + 0x10AE4: 82, + 0x10AE5: 84, + 0x10AE6: 84, + 0x10AEB: 68, + 0x10AEC: 68, + 0x10AED: 68, + 0x10AEE: 68, + 0x10AEF: 82, + 0x10B80: 68, + 0x10B81: 82, + 0x10B82: 68, + 0x10B83: 82, + 0x10B84: 82, + 0x10B85: 82, + 0x10B86: 68, + 0x10B87: 68, + 0x10B88: 68, + 0x10B89: 82, + 0x10B8A: 68, + 0x10B8B: 68, + 0x10B8C: 82, + 0x10B8D: 68, + 0x10B8E: 82, + 0x10B8F: 82, + 0x10B90: 68, + 0x10B91: 82, + 0x10BA9: 82, + 0x10BAA: 82, + 0x10BAB: 82, + 0x10BAC: 82, + 0x10BAD: 68, + 0x10BAE: 68, + 0x10D00: 76, + 0x10D01: 68, + 0x10D02: 68, + 0x10D03: 68, + 0x10D04: 68, + 0x10D05: 68, + 0x10D06: 68, + 0x10D07: 68, + 0x10D08: 68, + 0x10D09: 68, + 0x10D0A: 68, + 0x10D0B: 68, + 0x10D0C: 68, + 0x10D0D: 68, + 0x10D0E: 68, + 0x10D0F: 68, + 0x10D10: 68, + 0x10D11: 68, + 0x10D12: 68, + 0x10D13: 68, + 0x10D14: 68, + 0x10D15: 68, + 0x10D16: 68, + 0x10D17: 68, + 0x10D18: 68, + 0x10D19: 68, + 0x10D1A: 68, + 0x10D1B: 68, + 0x10D1C: 68, + 0x10D1D: 68, + 0x10D1E: 68, + 0x10D1F: 68, + 0x10D20: 68, + 0x10D21: 68, + 0x10D22: 82, + 0x10D23: 68, + 0x10D24: 84, + 0x10D25: 84, + 0x10D26: 84, + 0x10D27: 84, + 0x10EAB: 84, + 0x10EAC: 84, + 0x10EFD: 84, + 0x10EFE: 84, + 0x10EFF: 84, + 0x10F30: 68, + 0x10F31: 68, + 0x10F32: 68, + 0x10F33: 82, + 0x10F34: 68, + 0x10F35: 68, + 0x10F36: 68, + 0x10F37: 68, + 0x10F38: 68, + 0x10F39: 68, + 0x10F3A: 68, + 0x10F3B: 68, + 0x10F3C: 68, + 0x10F3D: 68, + 0x10F3E: 68, + 0x10F3F: 68, + 0x10F40: 68, + 0x10F41: 68, + 0x10F42: 68, + 0x10F43: 68, + 0x10F44: 68, + 0x10F46: 84, + 0x10F47: 84, + 0x10F48: 84, + 0x10F49: 84, + 0x10F4A: 84, + 0x10F4B: 84, + 0x10F4C: 84, + 0x10F4D: 84, + 0x10F4E: 84, + 0x10F4F: 84, + 0x10F50: 84, + 0x10F51: 68, + 0x10F52: 68, + 0x10F53: 68, + 0x10F54: 82, + 0x10F70: 68, + 0x10F71: 68, + 0x10F72: 68, + 0x10F73: 68, + 0x10F74: 82, + 0x10F75: 82, + 0x10F76: 68, + 0x10F77: 68, + 0x10F78: 68, + 0x10F79: 68, + 0x10F7A: 68, + 0x10F7B: 68, + 0x10F7C: 68, + 0x10F7D: 68, + 0x10F7E: 68, + 0x10F7F: 68, + 0x10F80: 68, + 0x10F81: 68, + 0x10F82: 84, + 0x10F83: 84, + 0x10F84: 84, + 0x10F85: 84, + 0x10FB0: 68, + 0x10FB2: 68, + 0x10FB3: 68, + 0x10FB4: 82, + 0x10FB5: 82, + 0x10FB6: 82, + 0x10FB8: 68, + 0x10FB9: 82, + 0x10FBA: 82, + 0x10FBB: 68, + 0x10FBC: 68, + 0x10FBD: 82, + 0x10FBE: 68, + 0x10FBF: 68, + 0x10FC1: 68, + 0x10FC2: 82, + 0x10FC3: 82, + 0x10FC4: 68, + 0x10FC9: 82, + 0x10FCA: 68, + 0x10FCB: 76, + 0x11001: 84, + 0x11038: 84, + 0x11039: 84, + 0x1103A: 84, + 0x1103B: 84, + 0x1103C: 84, + 0x1103D: 84, + 0x1103E: 84, + 0x1103F: 84, + 0x11040: 84, + 0x11041: 84, + 0x11042: 84, + 0x11043: 84, + 0x11044: 84, + 0x11045: 84, + 0x11046: 84, + 0x11070: 84, + 0x11073: 84, + 0x11074: 84, + 0x1107F: 84, + 0x11080: 84, + 0x11081: 84, + 0x110B3: 84, + 0x110B4: 84, + 0x110B5: 84, + 0x110B6: 84, + 0x110B9: 84, + 0x110BA: 84, + 0x110C2: 84, + 0x11100: 84, + 0x11101: 84, + 0x11102: 84, + 0x11127: 84, + 0x11128: 84, + 0x11129: 84, + 0x1112A: 84, + 0x1112B: 84, + 0x1112D: 84, + 0x1112E: 84, + 0x1112F: 84, + 0x11130: 84, + 0x11131: 84, + 0x11132: 84, + 0x11133: 84, + 0x11134: 84, + 0x11173: 84, + 0x11180: 84, + 0x11181: 84, + 0x111B6: 84, + 0x111B7: 84, + 0x111B8: 84, + 0x111B9: 84, + 0x111BA: 84, + 0x111BB: 84, + 0x111BC: 84, + 0x111BD: 84, + 0x111BE: 84, + 0x111C9: 84, + 0x111CA: 84, + 0x111CB: 84, + 0x111CC: 84, + 0x111CF: 84, + 0x1122F: 84, + 0x11230: 84, + 0x11231: 84, + 0x11234: 84, + 0x11236: 84, + 0x11237: 84, + 0x1123E: 84, + 0x11241: 84, + 0x112DF: 84, + 0x112E3: 84, + 0x112E4: 84, + 0x112E5: 84, + 0x112E6: 84, + 0x112E7: 84, + 0x112E8: 84, + 0x112E9: 84, + 0x112EA: 84, + 0x11300: 84, + 0x11301: 84, + 0x1133B: 84, + 0x1133C: 84, + 0x11340: 84, + 0x11366: 84, + 0x11367: 84, + 0x11368: 84, + 0x11369: 84, + 0x1136A: 84, + 0x1136B: 84, + 0x1136C: 84, + 0x11370: 84, + 0x11371: 84, + 0x11372: 84, + 0x11373: 84, + 0x11374: 84, + 0x11438: 84, + 0x11439: 84, + 0x1143A: 84, + 0x1143B: 84, + 0x1143C: 84, + 0x1143D: 84, + 0x1143E: 84, + 0x1143F: 84, + 0x11442: 84, + 0x11443: 84, + 0x11444: 84, + 0x11446: 84, + 0x1145E: 84, + 0x114B3: 84, + 0x114B4: 84, + 0x114B5: 84, + 0x114B6: 84, + 0x114B7: 84, + 0x114B8: 84, + 0x114BA: 84, + 0x114BF: 84, + 0x114C0: 84, + 0x114C2: 84, + 0x114C3: 84, + 0x115B2: 84, + 0x115B3: 84, + 0x115B4: 84, + 0x115B5: 84, + 0x115BC: 84, + 0x115BD: 84, + 0x115BF: 84, + 0x115C0: 84, + 0x115DC: 84, + 0x115DD: 84, + 0x11633: 84, + 0x11634: 84, + 0x11635: 84, + 0x11636: 84, + 0x11637: 84, + 0x11638: 84, + 0x11639: 84, + 0x1163A: 84, + 0x1163D: 84, + 0x1163F: 84, + 0x11640: 84, + 0x116AB: 84, + 0x116AD: 84, + 0x116B0: 84, + 0x116B1: 84, + 0x116B2: 84, + 0x116B3: 84, + 0x116B4: 84, + 0x116B5: 84, + 0x116B7: 84, + 0x1171D: 84, + 0x1171E: 84, + 0x1171F: 84, + 0x11722: 84, + 0x11723: 84, + 0x11724: 84, + 0x11725: 84, + 0x11727: 84, + 0x11728: 84, + 0x11729: 84, + 0x1172A: 84, + 0x1172B: 84, + 0x1182F: 84, + 0x11830: 84, + 0x11831: 84, + 0x11832: 84, + 0x11833: 84, + 0x11834: 84, + 0x11835: 84, + 0x11836: 84, + 0x11837: 84, + 0x11839: 84, + 0x1183A: 84, + 0x1193B: 84, + 0x1193C: 84, + 0x1193E: 84, + 0x11943: 84, + 0x119D4: 84, + 0x119D5: 84, + 0x119D6: 84, + 0x119D7: 84, + 0x119DA: 84, + 0x119DB: 84, + 0x119E0: 84, + 0x11A01: 84, + 0x11A02: 84, + 0x11A03: 84, + 0x11A04: 84, + 0x11A05: 84, + 0x11A06: 84, + 0x11A07: 84, + 0x11A08: 84, + 0x11A09: 84, + 0x11A0A: 84, + 0x11A33: 84, + 0x11A34: 84, + 0x11A35: 84, + 0x11A36: 84, + 0x11A37: 84, + 0x11A38: 84, + 0x11A3B: 84, + 0x11A3C: 84, + 0x11A3D: 84, + 0x11A3E: 84, + 0x11A47: 84, + 0x11A51: 84, + 0x11A52: 84, + 0x11A53: 84, + 0x11A54: 84, + 0x11A55: 84, + 0x11A56: 84, + 0x11A59: 84, + 0x11A5A: 84, + 0x11A5B: 84, + 0x11A8A: 84, + 0x11A8B: 84, + 0x11A8C: 84, + 0x11A8D: 84, + 0x11A8E: 84, + 0x11A8F: 84, + 0x11A90: 84, + 0x11A91: 84, + 0x11A92: 84, + 0x11A93: 84, + 0x11A94: 84, + 0x11A95: 84, + 0x11A96: 84, + 0x11A98: 84, + 0x11A99: 84, + 0x11C30: 84, + 0x11C31: 84, + 0x11C32: 84, + 0x11C33: 84, + 0x11C34: 84, + 0x11C35: 84, + 0x11C36: 84, + 0x11C38: 84, + 0x11C39: 84, + 0x11C3A: 84, + 0x11C3B: 84, + 0x11C3C: 84, + 0x11C3D: 84, + 0x11C3F: 84, + 0x11C92: 84, + 0x11C93: 84, + 0x11C94: 84, + 0x11C95: 84, + 0x11C96: 84, + 0x11C97: 84, + 0x11C98: 84, + 0x11C99: 84, + 0x11C9A: 84, + 0x11C9B: 84, + 0x11C9C: 84, + 0x11C9D: 84, + 0x11C9E: 84, + 0x11C9F: 84, + 0x11CA0: 84, + 0x11CA1: 84, + 0x11CA2: 84, + 0x11CA3: 84, + 0x11CA4: 84, + 0x11CA5: 84, + 0x11CA6: 84, + 0x11CA7: 84, + 0x11CAA: 84, + 0x11CAB: 84, + 0x11CAC: 84, + 0x11CAD: 84, + 0x11CAE: 84, + 0x11CAF: 84, + 0x11CB0: 84, + 0x11CB2: 84, + 0x11CB3: 84, + 0x11CB5: 84, + 0x11CB6: 84, + 0x11D31: 84, + 0x11D32: 84, + 0x11D33: 84, + 0x11D34: 84, + 0x11D35: 84, + 0x11D36: 84, + 0x11D3A: 84, + 0x11D3C: 84, + 0x11D3D: 84, + 0x11D3F: 84, + 0x11D40: 84, + 0x11D41: 84, + 0x11D42: 84, + 0x11D43: 84, + 0x11D44: 84, + 0x11D45: 84, + 0x11D47: 84, + 0x11D90: 84, + 0x11D91: 84, + 0x11D95: 84, + 0x11D97: 84, + 0x11EF3: 84, + 0x11EF4: 84, + 0x11F00: 84, + 0x11F01: 84, + 0x11F36: 84, + 0x11F37: 84, + 0x11F38: 84, + 0x11F39: 84, + 0x11F3A: 84, + 0x11F40: 84, + 0x11F42: 84, + 0x13430: 84, + 0x13431: 84, + 0x13432: 84, + 0x13433: 84, + 0x13434: 84, + 0x13435: 84, + 0x13436: 84, + 0x13437: 84, + 0x13438: 84, + 0x13439: 84, + 0x1343A: 84, + 0x1343B: 84, + 0x1343C: 84, + 0x1343D: 84, + 0x1343E: 84, + 0x1343F: 84, + 0x13440: 84, + 0x13447: 84, + 0x13448: 84, + 0x13449: 84, + 0x1344A: 84, + 0x1344B: 84, + 0x1344C: 84, + 0x1344D: 84, + 0x1344E: 84, + 0x1344F: 84, + 0x13450: 84, + 0x13451: 84, + 0x13452: 84, + 0x13453: 84, + 0x13454: 84, + 0x13455: 84, + 0x16AF0: 84, + 0x16AF1: 84, + 0x16AF2: 84, + 0x16AF3: 84, + 0x16AF4: 84, + 0x16B30: 84, + 0x16B31: 84, + 0x16B32: 84, + 0x16B33: 84, + 0x16B34: 84, + 0x16B35: 84, + 0x16B36: 84, + 0x16F4F: 84, + 0x16F8F: 84, + 0x16F90: 84, + 0x16F91: 84, + 0x16F92: 84, + 0x16FE4: 84, + 0x1BC9D: 84, + 0x1BC9E: 84, + 0x1BCA0: 84, + 0x1BCA1: 84, + 0x1BCA2: 84, + 0x1BCA3: 84, + 0x1CF00: 84, + 0x1CF01: 84, + 0x1CF02: 84, + 0x1CF03: 84, + 0x1CF04: 84, + 0x1CF05: 84, + 0x1CF06: 84, + 0x1CF07: 84, + 0x1CF08: 84, + 0x1CF09: 84, + 0x1CF0A: 84, + 0x1CF0B: 84, + 0x1CF0C: 84, + 0x1CF0D: 84, + 0x1CF0E: 84, + 0x1CF0F: 84, + 0x1CF10: 84, + 0x1CF11: 84, + 0x1CF12: 84, + 0x1CF13: 84, + 0x1CF14: 84, + 0x1CF15: 84, + 0x1CF16: 84, + 0x1CF17: 84, + 0x1CF18: 84, + 0x1CF19: 84, + 0x1CF1A: 84, + 0x1CF1B: 84, + 0x1CF1C: 84, + 0x1CF1D: 84, + 0x1CF1E: 84, + 0x1CF1F: 84, + 0x1CF20: 84, + 0x1CF21: 84, + 0x1CF22: 84, + 0x1CF23: 84, + 0x1CF24: 84, + 0x1CF25: 84, + 0x1CF26: 84, + 0x1CF27: 84, + 0x1CF28: 84, + 0x1CF29: 84, + 0x1CF2A: 84, + 0x1CF2B: 84, + 0x1CF2C: 84, + 0x1CF2D: 84, + 0x1CF30: 84, + 0x1CF31: 84, + 0x1CF32: 84, + 0x1CF33: 84, + 0x1CF34: 84, + 0x1CF35: 84, + 0x1CF36: 84, + 0x1CF37: 84, + 0x1CF38: 84, + 0x1CF39: 84, + 0x1CF3A: 84, + 0x1CF3B: 84, + 0x1CF3C: 84, + 0x1CF3D: 84, + 0x1CF3E: 84, + 0x1CF3F: 84, + 0x1CF40: 84, + 0x1CF41: 84, + 0x1CF42: 84, + 0x1CF43: 84, + 0x1CF44: 84, + 0x1CF45: 84, + 0x1CF46: 84, + 0x1D167: 84, + 0x1D168: 84, + 0x1D169: 84, + 0x1D173: 84, + 0x1D174: 84, + 0x1D175: 84, + 0x1D176: 84, + 0x1D177: 84, + 0x1D178: 84, + 0x1D179: 84, + 0x1D17A: 84, + 0x1D17B: 84, + 0x1D17C: 84, + 0x1D17D: 84, + 0x1D17E: 84, + 0x1D17F: 84, + 0x1D180: 84, + 0x1D181: 84, + 0x1D182: 84, + 0x1D185: 84, + 0x1D186: 84, + 0x1D187: 84, + 0x1D188: 84, + 0x1D189: 84, + 0x1D18A: 84, + 0x1D18B: 84, + 0x1D1AA: 84, + 0x1D1AB: 84, + 0x1D1AC: 84, + 0x1D1AD: 84, + 0x1D242: 84, + 0x1D243: 84, + 0x1D244: 84, + 0x1DA00: 84, + 0x1DA01: 84, + 0x1DA02: 84, + 0x1DA03: 84, + 0x1DA04: 84, + 0x1DA05: 84, + 0x1DA06: 84, + 0x1DA07: 84, + 0x1DA08: 84, + 0x1DA09: 84, + 0x1DA0A: 84, + 0x1DA0B: 84, + 0x1DA0C: 84, + 0x1DA0D: 84, + 0x1DA0E: 84, + 0x1DA0F: 84, + 0x1DA10: 84, + 0x1DA11: 84, + 0x1DA12: 84, + 0x1DA13: 84, + 0x1DA14: 84, + 0x1DA15: 84, + 0x1DA16: 84, + 0x1DA17: 84, + 0x1DA18: 84, + 0x1DA19: 84, + 0x1DA1A: 84, + 0x1DA1B: 84, + 0x1DA1C: 84, + 0x1DA1D: 84, + 0x1DA1E: 84, + 0x1DA1F: 84, + 0x1DA20: 84, + 0x1DA21: 84, + 0x1DA22: 84, + 0x1DA23: 84, + 0x1DA24: 84, + 0x1DA25: 84, + 0x1DA26: 84, + 0x1DA27: 84, + 0x1DA28: 84, + 0x1DA29: 84, + 0x1DA2A: 84, + 0x1DA2B: 84, + 0x1DA2C: 84, + 0x1DA2D: 84, + 0x1DA2E: 84, + 0x1DA2F: 84, + 0x1DA30: 84, + 0x1DA31: 84, + 0x1DA32: 84, + 0x1DA33: 84, + 0x1DA34: 84, + 0x1DA35: 84, + 0x1DA36: 84, + 0x1DA3B: 84, + 0x1DA3C: 84, + 0x1DA3D: 84, + 0x1DA3E: 84, + 0x1DA3F: 84, + 0x1DA40: 84, + 0x1DA41: 84, + 0x1DA42: 84, + 0x1DA43: 84, + 0x1DA44: 84, + 0x1DA45: 84, + 0x1DA46: 84, + 0x1DA47: 84, + 0x1DA48: 84, + 0x1DA49: 84, + 0x1DA4A: 84, + 0x1DA4B: 84, + 0x1DA4C: 84, + 0x1DA4D: 84, + 0x1DA4E: 84, + 0x1DA4F: 84, + 0x1DA50: 84, + 0x1DA51: 84, + 0x1DA52: 84, + 0x1DA53: 84, + 0x1DA54: 84, + 0x1DA55: 84, + 0x1DA56: 84, + 0x1DA57: 84, + 0x1DA58: 84, + 0x1DA59: 84, + 0x1DA5A: 84, + 0x1DA5B: 84, + 0x1DA5C: 84, + 0x1DA5D: 84, + 0x1DA5E: 84, + 0x1DA5F: 84, + 0x1DA60: 84, + 0x1DA61: 84, + 0x1DA62: 84, + 0x1DA63: 84, + 0x1DA64: 84, + 0x1DA65: 84, + 0x1DA66: 84, + 0x1DA67: 84, + 0x1DA68: 84, + 0x1DA69: 84, + 0x1DA6A: 84, + 0x1DA6B: 84, + 0x1DA6C: 84, + 0x1DA75: 84, + 0x1DA84: 84, + 0x1DA9B: 84, + 0x1DA9C: 84, + 0x1DA9D: 84, + 0x1DA9E: 84, + 0x1DA9F: 84, + 0x1DAA1: 84, + 0x1DAA2: 84, + 0x1DAA3: 84, + 0x1DAA4: 84, + 0x1DAA5: 84, + 0x1DAA6: 84, + 0x1DAA7: 84, + 0x1DAA8: 84, + 0x1DAA9: 84, + 0x1DAAA: 84, + 0x1DAAB: 84, + 0x1DAAC: 84, + 0x1DAAD: 84, + 0x1DAAE: 84, + 0x1DAAF: 84, + 0x1E000: 84, + 0x1E001: 84, + 0x1E002: 84, + 0x1E003: 84, + 0x1E004: 84, + 0x1E005: 84, + 0x1E006: 84, + 0x1E008: 84, + 0x1E009: 84, + 0x1E00A: 84, + 0x1E00B: 84, + 0x1E00C: 84, + 0x1E00D: 84, + 0x1E00E: 84, + 0x1E00F: 84, + 0x1E010: 84, + 0x1E011: 84, + 0x1E012: 84, + 0x1E013: 84, + 0x1E014: 84, + 0x1E015: 84, + 0x1E016: 84, + 0x1E017: 84, + 0x1E018: 84, + 0x1E01B: 84, + 0x1E01C: 84, + 0x1E01D: 84, + 0x1E01E: 84, + 0x1E01F: 84, + 0x1E020: 84, + 0x1E021: 84, + 0x1E023: 84, + 0x1E024: 84, + 0x1E026: 84, + 0x1E027: 84, + 0x1E028: 84, + 0x1E029: 84, + 0x1E02A: 84, + 0x1E08F: 84, + 0x1E130: 84, + 0x1E131: 84, + 0x1E132: 84, + 0x1E133: 84, + 0x1E134: 84, + 0x1E135: 84, + 0x1E136: 84, + 0x1E2AE: 84, + 0x1E2EC: 84, + 0x1E2ED: 84, + 0x1E2EE: 84, + 0x1E2EF: 84, + 0x1E4EC: 84, + 0x1E4ED: 84, + 0x1E4EE: 84, + 0x1E4EF: 84, + 0x1E8D0: 84, + 0x1E8D1: 84, + 0x1E8D2: 84, + 0x1E8D3: 84, + 0x1E8D4: 84, + 0x1E8D5: 84, + 0x1E8D6: 84, + 0x1E900: 68, + 0x1E901: 68, + 0x1E902: 68, + 0x1E903: 68, + 0x1E904: 68, + 0x1E905: 68, + 0x1E906: 68, + 0x1E907: 68, + 0x1E908: 68, + 0x1E909: 68, + 0x1E90A: 68, + 0x1E90B: 68, + 0x1E90C: 68, + 0x1E90D: 68, + 0x1E90E: 68, + 0x1E90F: 68, + 0x1E910: 68, + 0x1E911: 68, + 0x1E912: 68, + 0x1E913: 68, + 0x1E914: 68, + 0x1E915: 68, + 0x1E916: 68, + 0x1E917: 68, + 0x1E918: 68, + 0x1E919: 68, + 0x1E91A: 68, + 0x1E91B: 68, + 0x1E91C: 68, + 0x1E91D: 68, + 0x1E91E: 68, + 0x1E91F: 68, + 0x1E920: 68, + 0x1E921: 68, + 0x1E922: 68, + 0x1E923: 68, + 0x1E924: 68, + 0x1E925: 68, + 0x1E926: 68, + 0x1E927: 68, + 0x1E928: 68, + 0x1E929: 68, + 0x1E92A: 68, + 0x1E92B: 68, + 0x1E92C: 68, + 0x1E92D: 68, + 0x1E92E: 68, + 0x1E92F: 68, + 0x1E930: 68, + 0x1E931: 68, + 0x1E932: 68, + 0x1E933: 68, + 0x1E934: 68, + 0x1E935: 68, + 0x1E936: 68, + 0x1E937: 68, + 0x1E938: 68, + 0x1E939: 68, + 0x1E93A: 68, + 0x1E93B: 68, + 0x1E93C: 68, + 0x1E93D: 68, + 0x1E93E: 68, + 0x1E93F: 68, + 0x1E940: 68, + 0x1E941: 68, + 0x1E942: 68, + 0x1E943: 68, + 0x1E944: 84, + 0x1E945: 84, + 0x1E946: 84, + 0x1E947: 84, + 0x1E948: 84, + 0x1E949: 84, + 0x1E94A: 84, + 0x1E94B: 84, + 0xE0001: 84, + 0xE0020: 84, + 0xE0021: 84, + 0xE0022: 84, + 0xE0023: 84, + 0xE0024: 84, + 0xE0025: 84, + 0xE0026: 84, + 0xE0027: 84, + 0xE0028: 84, + 0xE0029: 84, + 0xE002A: 84, + 0xE002B: 84, + 0xE002C: 84, + 0xE002D: 84, + 0xE002E: 84, + 0xE002F: 84, + 0xE0030: 84, + 0xE0031: 84, + 0xE0032: 84, + 0xE0033: 84, + 0xE0034: 84, + 0xE0035: 84, + 0xE0036: 84, + 0xE0037: 84, + 0xE0038: 84, + 0xE0039: 84, + 0xE003A: 84, + 0xE003B: 84, + 0xE003C: 84, + 0xE003D: 84, + 0xE003E: 84, + 0xE003F: 84, + 0xE0040: 84, + 0xE0041: 84, + 0xE0042: 84, + 0xE0043: 84, + 0xE0044: 84, + 0xE0045: 84, + 0xE0046: 84, + 0xE0047: 84, + 0xE0048: 84, + 0xE0049: 84, + 0xE004A: 84, + 0xE004B: 84, + 0xE004C: 84, + 0xE004D: 84, + 0xE004E: 84, + 0xE004F: 84, + 0xE0050: 84, + 0xE0051: 84, + 0xE0052: 84, + 0xE0053: 84, + 0xE0054: 84, + 0xE0055: 84, + 0xE0056: 84, + 0xE0057: 84, + 0xE0058: 84, + 0xE0059: 84, + 0xE005A: 84, + 0xE005B: 84, + 0xE005C: 84, + 0xE005D: 84, + 0xE005E: 84, + 0xE005F: 84, + 0xE0060: 84, + 0xE0061: 84, + 0xE0062: 84, + 0xE0063: 84, + 0xE0064: 84, + 0xE0065: 84, + 0xE0066: 84, + 0xE0067: 84, + 0xE0068: 84, + 0xE0069: 84, + 0xE006A: 84, + 0xE006B: 84, + 0xE006C: 84, + 0xE006D: 84, + 0xE006E: 84, + 0xE006F: 84, + 0xE0070: 84, + 0xE0071: 84, + 0xE0072: 84, + 0xE0073: 84, + 0xE0074: 84, + 0xE0075: 84, + 0xE0076: 84, + 0xE0077: 84, + 0xE0078: 84, + 0xE0079: 84, + 0xE007A: 84, + 0xE007B: 84, + 0xE007C: 84, + 0xE007D: 84, + 0xE007E: 84, + 0xE007F: 84, + 0xE0100: 84, + 0xE0101: 84, + 0xE0102: 84, + 0xE0103: 84, + 0xE0104: 84, + 0xE0105: 84, + 0xE0106: 84, + 0xE0107: 84, + 0xE0108: 84, + 0xE0109: 84, + 0xE010A: 84, + 0xE010B: 84, + 0xE010C: 84, + 0xE010D: 84, + 0xE010E: 84, + 0xE010F: 84, + 0xE0110: 84, + 0xE0111: 84, + 0xE0112: 84, + 0xE0113: 84, + 0xE0114: 84, + 0xE0115: 84, + 0xE0116: 84, + 0xE0117: 84, + 0xE0118: 84, + 0xE0119: 84, + 0xE011A: 84, + 0xE011B: 84, + 0xE011C: 84, + 0xE011D: 84, + 0xE011E: 84, + 0xE011F: 84, + 0xE0120: 84, + 0xE0121: 84, + 0xE0122: 84, + 0xE0123: 84, + 0xE0124: 84, + 0xE0125: 84, + 0xE0126: 84, + 0xE0127: 84, + 0xE0128: 84, + 0xE0129: 84, + 0xE012A: 84, + 0xE012B: 84, + 0xE012C: 84, + 0xE012D: 84, + 0xE012E: 84, + 0xE012F: 84, + 0xE0130: 84, + 0xE0131: 84, + 0xE0132: 84, + 0xE0133: 84, + 0xE0134: 84, + 0xE0135: 84, + 0xE0136: 84, + 0xE0137: 84, + 0xE0138: 84, + 0xE0139: 84, + 0xE013A: 84, + 0xE013B: 84, + 0xE013C: 84, + 0xE013D: 84, + 0xE013E: 84, + 0xE013F: 84, + 0xE0140: 84, + 0xE0141: 84, + 0xE0142: 84, + 0xE0143: 84, + 0xE0144: 84, + 0xE0145: 84, + 0xE0146: 84, + 0xE0147: 84, + 0xE0148: 84, + 0xE0149: 84, + 0xE014A: 84, + 0xE014B: 84, + 0xE014C: 84, + 0xE014D: 84, + 0xE014E: 84, + 0xE014F: 84, + 0xE0150: 84, + 0xE0151: 84, + 0xE0152: 84, + 0xE0153: 84, + 0xE0154: 84, + 0xE0155: 84, + 0xE0156: 84, + 0xE0157: 84, + 0xE0158: 84, + 0xE0159: 84, + 0xE015A: 84, + 0xE015B: 84, + 0xE015C: 84, + 0xE015D: 84, + 0xE015E: 84, + 0xE015F: 84, + 0xE0160: 84, + 0xE0161: 84, + 0xE0162: 84, + 0xE0163: 84, + 0xE0164: 84, + 0xE0165: 84, + 0xE0166: 84, + 0xE0167: 84, + 0xE0168: 84, + 0xE0169: 84, + 0xE016A: 84, + 0xE016B: 84, + 0xE016C: 84, + 0xE016D: 84, + 0xE016E: 84, + 0xE016F: 84, + 0xE0170: 84, + 0xE0171: 84, + 0xE0172: 84, + 0xE0173: 84, + 0xE0174: 84, + 0xE0175: 84, + 0xE0176: 84, + 0xE0177: 84, + 0xE0178: 84, + 0xE0179: 84, + 0xE017A: 84, + 0xE017B: 84, + 0xE017C: 84, + 0xE017D: 84, + 0xE017E: 84, + 0xE017F: 84, + 0xE0180: 84, + 0xE0181: 84, + 0xE0182: 84, + 0xE0183: 84, + 0xE0184: 84, + 0xE0185: 84, + 0xE0186: 84, + 0xE0187: 84, + 0xE0188: 84, + 0xE0189: 84, + 0xE018A: 84, + 0xE018B: 84, + 0xE018C: 84, + 0xE018D: 84, + 0xE018E: 84, + 0xE018F: 84, + 0xE0190: 84, + 0xE0191: 84, + 0xE0192: 84, + 0xE0193: 84, + 0xE0194: 84, + 0xE0195: 84, + 0xE0196: 84, + 0xE0197: 84, + 0xE0198: 84, + 0xE0199: 84, + 0xE019A: 84, + 0xE019B: 84, + 0xE019C: 84, + 0xE019D: 84, + 0xE019E: 84, + 0xE019F: 84, + 0xE01A0: 84, + 0xE01A1: 84, + 0xE01A2: 84, + 0xE01A3: 84, + 0xE01A4: 84, + 0xE01A5: 84, + 0xE01A6: 84, + 0xE01A7: 84, + 0xE01A8: 84, + 0xE01A9: 84, + 0xE01AA: 84, + 0xE01AB: 84, + 0xE01AC: 84, + 0xE01AD: 84, + 0xE01AE: 84, + 0xE01AF: 84, + 0xE01B0: 84, + 0xE01B1: 84, + 0xE01B2: 84, + 0xE01B3: 84, + 0xE01B4: 84, + 0xE01B5: 84, + 0xE01B6: 84, + 0xE01B7: 84, + 0xE01B8: 84, + 0xE01B9: 84, + 0xE01BA: 84, + 0xE01BB: 84, + 0xE01BC: 84, + 0xE01BD: 84, + 0xE01BE: 84, + 0xE01BF: 84, + 0xE01C0: 84, + 0xE01C1: 84, + 0xE01C2: 84, + 0xE01C3: 84, + 0xE01C4: 84, + 0xE01C5: 84, + 0xE01C6: 84, + 0xE01C7: 84, + 0xE01C8: 84, + 0xE01C9: 84, + 0xE01CA: 84, + 0xE01CB: 84, + 0xE01CC: 84, + 0xE01CD: 84, + 0xE01CE: 84, + 0xE01CF: 84, + 0xE01D0: 84, + 0xE01D1: 84, + 0xE01D2: 84, + 0xE01D3: 84, + 0xE01D4: 84, + 0xE01D5: 84, + 0xE01D6: 84, + 0xE01D7: 84, + 0xE01D8: 84, + 0xE01D9: 84, + 0xE01DA: 84, + 0xE01DB: 84, + 0xE01DC: 84, + 0xE01DD: 84, + 0xE01DE: 84, + 0xE01DF: 84, + 0xE01E0: 84, + 0xE01E1: 84, + 0xE01E2: 84, + 0xE01E3: 84, + 0xE01E4: 84, + 0xE01E5: 84, + 0xE01E6: 84, + 0xE01E7: 84, + 0xE01E8: 84, + 0xE01E9: 84, + 0xE01EA: 84, + 0xE01EB: 84, + 0xE01EC: 84, + 0xE01ED: 84, + 0xE01EE: 84, + 0xE01EF: 84, +} +codepoint_classes = { + "PVALID": ( + 0x2D0000002E, + 0x300000003A, + 0x610000007B, + 0xDF000000F7, + 0xF800000100, + 0x10100000102, + 0x10300000104, + 0x10500000106, + 0x10700000108, + 0x1090000010A, + 0x10B0000010C, + 0x10D0000010E, + 0x10F00000110, + 0x11100000112, + 0x11300000114, + 0x11500000116, + 0x11700000118, + 0x1190000011A, + 0x11B0000011C, + 0x11D0000011E, + 0x11F00000120, + 0x12100000122, + 0x12300000124, + 0x12500000126, + 0x12700000128, + 0x1290000012A, + 0x12B0000012C, + 0x12D0000012E, + 0x12F00000130, + 0x13100000132, + 0x13500000136, + 0x13700000139, + 0x13A0000013B, + 0x13C0000013D, + 0x13E0000013F, + 0x14200000143, + 0x14400000145, + 0x14600000147, + 0x14800000149, + 0x14B0000014C, + 0x14D0000014E, + 0x14F00000150, + 0x15100000152, + 0x15300000154, + 0x15500000156, + 0x15700000158, + 0x1590000015A, + 0x15B0000015C, + 0x15D0000015E, + 0x15F00000160, + 0x16100000162, + 0x16300000164, + 0x16500000166, + 0x16700000168, + 0x1690000016A, + 0x16B0000016C, + 0x16D0000016E, + 0x16F00000170, + 0x17100000172, + 0x17300000174, + 0x17500000176, + 0x17700000178, + 0x17A0000017B, + 0x17C0000017D, + 0x17E0000017F, + 0x18000000181, + 0x18300000184, + 0x18500000186, + 0x18800000189, + 0x18C0000018E, + 0x19200000193, + 0x19500000196, + 0x1990000019C, + 0x19E0000019F, + 0x1A1000001A2, + 0x1A3000001A4, + 0x1A5000001A6, + 0x1A8000001A9, + 0x1AA000001AC, + 0x1AD000001AE, + 0x1B0000001B1, + 0x1B4000001B5, + 0x1B6000001B7, + 0x1B9000001BC, + 0x1BD000001C4, + 0x1CE000001CF, + 0x1D0000001D1, + 0x1D2000001D3, + 0x1D4000001D5, + 0x1D6000001D7, + 0x1D8000001D9, + 0x1DA000001DB, + 0x1DC000001DE, + 0x1DF000001E0, + 0x1E1000001E2, + 0x1E3000001E4, + 0x1E5000001E6, + 0x1E7000001E8, + 0x1E9000001EA, + 0x1EB000001EC, + 0x1ED000001EE, + 0x1EF000001F1, + 0x1F5000001F6, + 0x1F9000001FA, + 0x1FB000001FC, + 0x1FD000001FE, + 0x1FF00000200, + 0x20100000202, + 0x20300000204, + 0x20500000206, + 0x20700000208, + 0x2090000020A, + 0x20B0000020C, + 0x20D0000020E, + 0x20F00000210, + 0x21100000212, + 0x21300000214, + 0x21500000216, + 0x21700000218, + 0x2190000021A, + 0x21B0000021C, + 0x21D0000021E, + 0x21F00000220, + 0x22100000222, + 0x22300000224, + 0x22500000226, + 0x22700000228, + 0x2290000022A, + 0x22B0000022C, + 0x22D0000022E, + 0x22F00000230, + 0x23100000232, + 0x2330000023A, + 0x23C0000023D, + 0x23F00000241, + 0x24200000243, + 0x24700000248, + 0x2490000024A, + 0x24B0000024C, + 0x24D0000024E, + 0x24F000002B0, + 0x2B9000002C2, + 0x2C6000002D2, + 0x2EC000002ED, + 0x2EE000002EF, + 0x30000000340, + 0x34200000343, + 0x3460000034F, + 0x35000000370, + 0x37100000372, + 0x37300000374, + 0x37700000378, + 0x37B0000037E, + 0x39000000391, + 0x3AC000003CF, + 0x3D7000003D8, + 0x3D9000003DA, + 0x3DB000003DC, + 0x3DD000003DE, + 0x3DF000003E0, + 0x3E1000003E2, + 0x3E3000003E4, + 0x3E5000003E6, + 0x3E7000003E8, + 0x3E9000003EA, + 0x3EB000003EC, + 0x3ED000003EE, + 0x3EF000003F0, + 0x3F3000003F4, + 0x3F8000003F9, + 0x3FB000003FD, + 0x43000000460, + 0x46100000462, + 0x46300000464, + 0x46500000466, + 0x46700000468, + 0x4690000046A, + 0x46B0000046C, + 0x46D0000046E, + 0x46F00000470, + 0x47100000472, + 0x47300000474, + 0x47500000476, + 0x47700000478, + 0x4790000047A, + 0x47B0000047C, + 0x47D0000047E, + 0x47F00000480, + 0x48100000482, + 0x48300000488, + 0x48B0000048C, + 0x48D0000048E, + 0x48F00000490, + 0x49100000492, + 0x49300000494, + 0x49500000496, + 0x49700000498, + 0x4990000049A, + 0x49B0000049C, + 0x49D0000049E, + 0x49F000004A0, + 0x4A1000004A2, + 0x4A3000004A4, + 0x4A5000004A6, + 0x4A7000004A8, + 0x4A9000004AA, + 0x4AB000004AC, + 0x4AD000004AE, + 0x4AF000004B0, + 0x4B1000004B2, + 0x4B3000004B4, + 0x4B5000004B6, + 0x4B7000004B8, + 0x4B9000004BA, + 0x4BB000004BC, + 0x4BD000004BE, + 0x4BF000004C0, + 0x4C2000004C3, + 0x4C4000004C5, + 0x4C6000004C7, + 0x4C8000004C9, + 0x4CA000004CB, + 0x4CC000004CD, + 0x4CE000004D0, + 0x4D1000004D2, + 0x4D3000004D4, + 0x4D5000004D6, + 0x4D7000004D8, + 0x4D9000004DA, + 0x4DB000004DC, + 0x4DD000004DE, + 0x4DF000004E0, + 0x4E1000004E2, + 0x4E3000004E4, + 0x4E5000004E6, + 0x4E7000004E8, + 0x4E9000004EA, + 0x4EB000004EC, + 0x4ED000004EE, + 0x4EF000004F0, + 0x4F1000004F2, + 0x4F3000004F4, + 0x4F5000004F6, + 0x4F7000004F8, + 0x4F9000004FA, + 0x4FB000004FC, + 0x4FD000004FE, + 0x4FF00000500, + 0x50100000502, + 0x50300000504, + 0x50500000506, + 0x50700000508, + 0x5090000050A, + 0x50B0000050C, + 0x50D0000050E, + 0x50F00000510, + 0x51100000512, + 0x51300000514, + 0x51500000516, + 0x51700000518, + 0x5190000051A, + 0x51B0000051C, + 0x51D0000051E, + 0x51F00000520, + 0x52100000522, + 0x52300000524, + 0x52500000526, + 0x52700000528, + 0x5290000052A, + 0x52B0000052C, + 0x52D0000052E, + 0x52F00000530, + 0x5590000055A, + 0x56000000587, + 0x58800000589, + 0x591000005BE, + 0x5BF000005C0, + 0x5C1000005C3, + 0x5C4000005C6, + 0x5C7000005C8, + 0x5D0000005EB, + 0x5EF000005F3, + 0x6100000061B, + 0x62000000640, + 0x64100000660, + 0x66E00000675, + 0x679000006D4, + 0x6D5000006DD, + 0x6DF000006E9, + 0x6EA000006F0, + 0x6FA00000700, + 0x7100000074B, + 0x74D000007B2, + 0x7C0000007F6, + 0x7FD000007FE, + 0x8000000082E, + 0x8400000085C, + 0x8600000086B, + 0x87000000888, + 0x8890000088F, + 0x898000008E2, + 0x8E300000958, + 0x96000000964, + 0x96600000970, + 0x97100000984, + 0x9850000098D, + 0x98F00000991, + 0x993000009A9, + 0x9AA000009B1, + 0x9B2000009B3, + 0x9B6000009BA, + 0x9BC000009C5, + 0x9C7000009C9, + 0x9CB000009CF, + 0x9D7000009D8, + 0x9E0000009E4, + 0x9E6000009F2, + 0x9FC000009FD, + 0x9FE000009FF, + 0xA0100000A04, + 0xA0500000A0B, + 0xA0F00000A11, + 0xA1300000A29, + 0xA2A00000A31, + 0xA3200000A33, + 0xA3500000A36, + 0xA3800000A3A, + 0xA3C00000A3D, + 0xA3E00000A43, + 0xA4700000A49, + 0xA4B00000A4E, + 0xA5100000A52, + 0xA5C00000A5D, + 0xA6600000A76, + 0xA8100000A84, + 0xA8500000A8E, + 0xA8F00000A92, + 0xA9300000AA9, + 0xAAA00000AB1, + 0xAB200000AB4, + 0xAB500000ABA, + 0xABC00000AC6, + 0xAC700000ACA, + 0xACB00000ACE, + 0xAD000000AD1, + 0xAE000000AE4, + 0xAE600000AF0, + 0xAF900000B00, + 0xB0100000B04, + 0xB0500000B0D, + 0xB0F00000B11, + 0xB1300000B29, + 0xB2A00000B31, + 0xB3200000B34, + 0xB3500000B3A, + 0xB3C00000B45, + 0xB4700000B49, + 0xB4B00000B4E, + 0xB5500000B58, + 0xB5F00000B64, + 0xB6600000B70, + 0xB7100000B72, + 0xB8200000B84, + 0xB8500000B8B, + 0xB8E00000B91, + 0xB9200000B96, + 0xB9900000B9B, + 0xB9C00000B9D, + 0xB9E00000BA0, + 0xBA300000BA5, + 0xBA800000BAB, + 0xBAE00000BBA, + 0xBBE00000BC3, + 0xBC600000BC9, + 0xBCA00000BCE, + 0xBD000000BD1, + 0xBD700000BD8, + 0xBE600000BF0, + 0xC0000000C0D, + 0xC0E00000C11, + 0xC1200000C29, + 0xC2A00000C3A, + 0xC3C00000C45, + 0xC4600000C49, + 0xC4A00000C4E, + 0xC5500000C57, + 0xC5800000C5B, + 0xC5D00000C5E, + 0xC6000000C64, + 0xC6600000C70, + 0xC8000000C84, + 0xC8500000C8D, + 0xC8E00000C91, + 0xC9200000CA9, + 0xCAA00000CB4, + 0xCB500000CBA, + 0xCBC00000CC5, + 0xCC600000CC9, + 0xCCA00000CCE, + 0xCD500000CD7, + 0xCDD00000CDF, + 0xCE000000CE4, + 0xCE600000CF0, + 0xCF100000CF4, + 0xD0000000D0D, + 0xD0E00000D11, + 0xD1200000D45, + 0xD4600000D49, + 0xD4A00000D4F, + 0xD5400000D58, + 0xD5F00000D64, + 0xD6600000D70, + 0xD7A00000D80, + 0xD8100000D84, + 0xD8500000D97, + 0xD9A00000DB2, + 0xDB300000DBC, + 0xDBD00000DBE, + 0xDC000000DC7, + 0xDCA00000DCB, + 0xDCF00000DD5, + 0xDD600000DD7, + 0xDD800000DE0, + 0xDE600000DF0, + 0xDF200000DF4, + 0xE0100000E33, + 0xE3400000E3B, + 0xE4000000E4F, + 0xE5000000E5A, + 0xE8100000E83, + 0xE8400000E85, + 0xE8600000E8B, + 0xE8C00000EA4, + 0xEA500000EA6, + 0xEA700000EB3, + 0xEB400000EBE, + 0xEC000000EC5, + 0xEC600000EC7, + 0xEC800000ECF, + 0xED000000EDA, + 0xEDE00000EE0, + 0xF0000000F01, + 0xF0B00000F0C, + 0xF1800000F1A, + 0xF2000000F2A, + 0xF3500000F36, + 0xF3700000F38, + 0xF3900000F3A, + 0xF3E00000F43, + 0xF4400000F48, + 0xF4900000F4D, + 0xF4E00000F52, + 0xF5300000F57, + 0xF5800000F5C, + 0xF5D00000F69, + 0xF6A00000F6D, + 0xF7100000F73, + 0xF7400000F75, + 0xF7A00000F81, + 0xF8200000F85, + 0xF8600000F93, + 0xF9400000F98, + 0xF9900000F9D, + 0xF9E00000FA2, + 0xFA300000FA7, + 0xFA800000FAC, + 0xFAD00000FB9, + 0xFBA00000FBD, + 0xFC600000FC7, + 0x10000000104A, + 0x10500000109E, + 0x10D0000010FB, + 0x10FD00001100, + 0x120000001249, + 0x124A0000124E, + 0x125000001257, + 0x125800001259, + 0x125A0000125E, + 0x126000001289, + 0x128A0000128E, + 0x1290000012B1, + 0x12B2000012B6, + 0x12B8000012BF, + 0x12C0000012C1, + 0x12C2000012C6, + 0x12C8000012D7, + 0x12D800001311, + 0x131200001316, + 0x13180000135B, + 0x135D00001360, + 0x138000001390, + 0x13A0000013F6, + 0x14010000166D, + 0x166F00001680, + 0x16810000169B, + 0x16A0000016EB, + 0x16F1000016F9, + 0x170000001716, + 0x171F00001735, + 0x174000001754, + 0x17600000176D, + 0x176E00001771, + 0x177200001774, + 0x1780000017B4, + 0x17B6000017D4, + 0x17D7000017D8, + 0x17DC000017DE, + 0x17E0000017EA, + 0x18100000181A, + 0x182000001879, + 0x1880000018AB, + 0x18B0000018F6, + 0x19000000191F, + 0x19200000192C, + 0x19300000193C, + 0x19460000196E, + 0x197000001975, + 0x1980000019AC, + 0x19B0000019CA, + 0x19D0000019DA, + 0x1A0000001A1C, + 0x1A2000001A5F, + 0x1A6000001A7D, + 0x1A7F00001A8A, + 0x1A9000001A9A, + 0x1AA700001AA8, + 0x1AB000001ABE, + 0x1ABF00001ACF, + 0x1B0000001B4D, + 0x1B5000001B5A, + 0x1B6B00001B74, + 0x1B8000001BF4, + 0x1C0000001C38, + 0x1C4000001C4A, + 0x1C4D00001C7E, + 0x1CD000001CD3, + 0x1CD400001CFB, + 0x1D0000001D2C, + 0x1D2F00001D30, + 0x1D3B00001D3C, + 0x1D4E00001D4F, + 0x1D6B00001D78, + 0x1D7900001D9B, + 0x1DC000001E00, + 0x1E0100001E02, + 0x1E0300001E04, + 0x1E0500001E06, + 0x1E0700001E08, + 0x1E0900001E0A, + 0x1E0B00001E0C, + 0x1E0D00001E0E, + 0x1E0F00001E10, + 0x1E1100001E12, + 0x1E1300001E14, + 0x1E1500001E16, + 0x1E1700001E18, + 0x1E1900001E1A, + 0x1E1B00001E1C, + 0x1E1D00001E1E, + 0x1E1F00001E20, + 0x1E2100001E22, + 0x1E2300001E24, + 0x1E2500001E26, + 0x1E2700001E28, + 0x1E2900001E2A, + 0x1E2B00001E2C, + 0x1E2D00001E2E, + 0x1E2F00001E30, + 0x1E3100001E32, + 0x1E3300001E34, + 0x1E3500001E36, + 0x1E3700001E38, + 0x1E3900001E3A, + 0x1E3B00001E3C, + 0x1E3D00001E3E, + 0x1E3F00001E40, + 0x1E4100001E42, + 0x1E4300001E44, + 0x1E4500001E46, + 0x1E4700001E48, + 0x1E4900001E4A, + 0x1E4B00001E4C, + 0x1E4D00001E4E, + 0x1E4F00001E50, + 0x1E5100001E52, + 0x1E5300001E54, + 0x1E5500001E56, + 0x1E5700001E58, + 0x1E5900001E5A, + 0x1E5B00001E5C, + 0x1E5D00001E5E, + 0x1E5F00001E60, + 0x1E6100001E62, + 0x1E6300001E64, + 0x1E6500001E66, + 0x1E6700001E68, + 0x1E6900001E6A, + 0x1E6B00001E6C, + 0x1E6D00001E6E, + 0x1E6F00001E70, + 0x1E7100001E72, + 0x1E7300001E74, + 0x1E7500001E76, + 0x1E7700001E78, + 0x1E7900001E7A, + 0x1E7B00001E7C, + 0x1E7D00001E7E, + 0x1E7F00001E80, + 0x1E8100001E82, + 0x1E8300001E84, + 0x1E8500001E86, + 0x1E8700001E88, + 0x1E8900001E8A, + 0x1E8B00001E8C, + 0x1E8D00001E8E, + 0x1E8F00001E90, + 0x1E9100001E92, + 0x1E9300001E94, + 0x1E9500001E9A, + 0x1E9C00001E9E, + 0x1E9F00001EA0, + 0x1EA100001EA2, + 0x1EA300001EA4, + 0x1EA500001EA6, + 0x1EA700001EA8, + 0x1EA900001EAA, + 0x1EAB00001EAC, + 0x1EAD00001EAE, + 0x1EAF00001EB0, + 0x1EB100001EB2, + 0x1EB300001EB4, + 0x1EB500001EB6, + 0x1EB700001EB8, + 0x1EB900001EBA, + 0x1EBB00001EBC, + 0x1EBD00001EBE, + 0x1EBF00001EC0, + 0x1EC100001EC2, + 0x1EC300001EC4, + 0x1EC500001EC6, + 0x1EC700001EC8, + 0x1EC900001ECA, + 0x1ECB00001ECC, + 0x1ECD00001ECE, + 0x1ECF00001ED0, + 0x1ED100001ED2, + 0x1ED300001ED4, + 0x1ED500001ED6, + 0x1ED700001ED8, + 0x1ED900001EDA, + 0x1EDB00001EDC, + 0x1EDD00001EDE, + 0x1EDF00001EE0, + 0x1EE100001EE2, + 0x1EE300001EE4, + 0x1EE500001EE6, + 0x1EE700001EE8, + 0x1EE900001EEA, + 0x1EEB00001EEC, + 0x1EED00001EEE, + 0x1EEF00001EF0, + 0x1EF100001EF2, + 0x1EF300001EF4, + 0x1EF500001EF6, + 0x1EF700001EF8, + 0x1EF900001EFA, + 0x1EFB00001EFC, + 0x1EFD00001EFE, + 0x1EFF00001F08, + 0x1F1000001F16, + 0x1F2000001F28, + 0x1F3000001F38, + 0x1F4000001F46, + 0x1F5000001F58, + 0x1F6000001F68, + 0x1F7000001F71, + 0x1F7200001F73, + 0x1F7400001F75, + 0x1F7600001F77, + 0x1F7800001F79, + 0x1F7A00001F7B, + 0x1F7C00001F7D, + 0x1FB000001FB2, + 0x1FB600001FB7, + 0x1FC600001FC7, + 0x1FD000001FD3, + 0x1FD600001FD8, + 0x1FE000001FE3, + 0x1FE400001FE8, + 0x1FF600001FF7, + 0x214E0000214F, + 0x218400002185, + 0x2C3000002C60, + 0x2C6100002C62, + 0x2C6500002C67, + 0x2C6800002C69, + 0x2C6A00002C6B, + 0x2C6C00002C6D, + 0x2C7100002C72, + 0x2C7300002C75, + 0x2C7600002C7C, + 0x2C8100002C82, + 0x2C8300002C84, + 0x2C8500002C86, + 0x2C8700002C88, + 0x2C8900002C8A, + 0x2C8B00002C8C, + 0x2C8D00002C8E, + 0x2C8F00002C90, + 0x2C9100002C92, + 0x2C9300002C94, + 0x2C9500002C96, + 0x2C9700002C98, + 0x2C9900002C9A, + 0x2C9B00002C9C, + 0x2C9D00002C9E, + 0x2C9F00002CA0, + 0x2CA100002CA2, + 0x2CA300002CA4, + 0x2CA500002CA6, + 0x2CA700002CA8, + 0x2CA900002CAA, + 0x2CAB00002CAC, + 0x2CAD00002CAE, + 0x2CAF00002CB0, + 0x2CB100002CB2, + 0x2CB300002CB4, + 0x2CB500002CB6, + 0x2CB700002CB8, + 0x2CB900002CBA, + 0x2CBB00002CBC, + 0x2CBD00002CBE, + 0x2CBF00002CC0, + 0x2CC100002CC2, + 0x2CC300002CC4, + 0x2CC500002CC6, + 0x2CC700002CC8, + 0x2CC900002CCA, + 0x2CCB00002CCC, + 0x2CCD00002CCE, + 0x2CCF00002CD0, + 0x2CD100002CD2, + 0x2CD300002CD4, + 0x2CD500002CD6, + 0x2CD700002CD8, + 0x2CD900002CDA, + 0x2CDB00002CDC, + 0x2CDD00002CDE, + 0x2CDF00002CE0, + 0x2CE100002CE2, + 0x2CE300002CE5, + 0x2CEC00002CED, + 0x2CEE00002CF2, + 0x2CF300002CF4, + 0x2D0000002D26, + 0x2D2700002D28, + 0x2D2D00002D2E, + 0x2D3000002D68, + 0x2D7F00002D97, + 0x2DA000002DA7, + 0x2DA800002DAF, + 0x2DB000002DB7, + 0x2DB800002DBF, + 0x2DC000002DC7, + 0x2DC800002DCF, + 0x2DD000002DD7, + 0x2DD800002DDF, + 0x2DE000002E00, + 0x2E2F00002E30, + 0x300500003008, + 0x302A0000302E, + 0x303C0000303D, + 0x304100003097, + 0x30990000309B, + 0x309D0000309F, + 0x30A1000030FB, + 0x30FC000030FF, + 0x310500003130, + 0x31A0000031C0, + 0x31F000003200, + 0x340000004DC0, + 0x4E000000A48D, + 0xA4D00000A4FE, + 0xA5000000A60D, + 0xA6100000A62C, + 0xA6410000A642, + 0xA6430000A644, + 0xA6450000A646, + 0xA6470000A648, + 0xA6490000A64A, + 0xA64B0000A64C, + 0xA64D0000A64E, + 0xA64F0000A650, + 0xA6510000A652, + 0xA6530000A654, + 0xA6550000A656, + 0xA6570000A658, + 0xA6590000A65A, + 0xA65B0000A65C, + 0xA65D0000A65E, + 0xA65F0000A660, + 0xA6610000A662, + 0xA6630000A664, + 0xA6650000A666, + 0xA6670000A668, + 0xA6690000A66A, + 0xA66B0000A66C, + 0xA66D0000A670, + 0xA6740000A67E, + 0xA67F0000A680, + 0xA6810000A682, + 0xA6830000A684, + 0xA6850000A686, + 0xA6870000A688, + 0xA6890000A68A, + 0xA68B0000A68C, + 0xA68D0000A68E, + 0xA68F0000A690, + 0xA6910000A692, + 0xA6930000A694, + 0xA6950000A696, + 0xA6970000A698, + 0xA6990000A69A, + 0xA69B0000A69C, + 0xA69E0000A6E6, + 0xA6F00000A6F2, + 0xA7170000A720, + 0xA7230000A724, + 0xA7250000A726, + 0xA7270000A728, + 0xA7290000A72A, + 0xA72B0000A72C, + 0xA72D0000A72E, + 0xA72F0000A732, + 0xA7330000A734, + 0xA7350000A736, + 0xA7370000A738, + 0xA7390000A73A, + 0xA73B0000A73C, + 0xA73D0000A73E, + 0xA73F0000A740, + 0xA7410000A742, + 0xA7430000A744, + 0xA7450000A746, + 0xA7470000A748, + 0xA7490000A74A, + 0xA74B0000A74C, + 0xA74D0000A74E, + 0xA74F0000A750, + 0xA7510000A752, + 0xA7530000A754, + 0xA7550000A756, + 0xA7570000A758, + 0xA7590000A75A, + 0xA75B0000A75C, + 0xA75D0000A75E, + 0xA75F0000A760, + 0xA7610000A762, + 0xA7630000A764, + 0xA7650000A766, + 0xA7670000A768, + 0xA7690000A76A, + 0xA76B0000A76C, + 0xA76D0000A76E, + 0xA76F0000A770, + 0xA7710000A779, + 0xA77A0000A77B, + 0xA77C0000A77D, + 0xA77F0000A780, + 0xA7810000A782, + 0xA7830000A784, + 0xA7850000A786, + 0xA7870000A789, + 0xA78C0000A78D, + 0xA78E0000A790, + 0xA7910000A792, + 0xA7930000A796, + 0xA7970000A798, + 0xA7990000A79A, + 0xA79B0000A79C, + 0xA79D0000A79E, + 0xA79F0000A7A0, + 0xA7A10000A7A2, + 0xA7A30000A7A4, + 0xA7A50000A7A6, + 0xA7A70000A7A8, + 0xA7A90000A7AA, + 0xA7AF0000A7B0, + 0xA7B50000A7B6, + 0xA7B70000A7B8, + 0xA7B90000A7BA, + 0xA7BB0000A7BC, + 0xA7BD0000A7BE, + 0xA7BF0000A7C0, + 0xA7C10000A7C2, + 0xA7C30000A7C4, + 0xA7C80000A7C9, + 0xA7CA0000A7CB, + 0xA7D10000A7D2, + 0xA7D30000A7D4, + 0xA7D50000A7D6, + 0xA7D70000A7D8, + 0xA7D90000A7DA, + 0xA7F60000A7F8, + 0xA7FA0000A828, + 0xA82C0000A82D, + 0xA8400000A874, + 0xA8800000A8C6, + 0xA8D00000A8DA, + 0xA8E00000A8F8, + 0xA8FB0000A8FC, + 0xA8FD0000A92E, + 0xA9300000A954, + 0xA9800000A9C1, + 0xA9CF0000A9DA, + 0xA9E00000A9FF, + 0xAA000000AA37, + 0xAA400000AA4E, + 0xAA500000AA5A, + 0xAA600000AA77, + 0xAA7A0000AAC3, + 0xAADB0000AADE, + 0xAAE00000AAF0, + 0xAAF20000AAF7, + 0xAB010000AB07, + 0xAB090000AB0F, + 0xAB110000AB17, + 0xAB200000AB27, + 0xAB280000AB2F, + 0xAB300000AB5B, + 0xAB600000AB69, + 0xABC00000ABEB, + 0xABEC0000ABEE, + 0xABF00000ABFA, + 0xAC000000D7A4, + 0xFA0E0000FA10, + 0xFA110000FA12, + 0xFA130000FA15, + 0xFA1F0000FA20, + 0xFA210000FA22, + 0xFA230000FA25, + 0xFA270000FA2A, + 0xFB1E0000FB1F, + 0xFE200000FE30, + 0xFE730000FE74, + 0x100000001000C, + 0x1000D00010027, + 0x100280001003B, + 0x1003C0001003E, + 0x1003F0001004E, + 0x100500001005E, + 0x10080000100FB, + 0x101FD000101FE, + 0x102800001029D, + 0x102A0000102D1, + 0x102E0000102E1, + 0x1030000010320, + 0x1032D00010341, + 0x103420001034A, + 0x103500001037B, + 0x103800001039E, + 0x103A0000103C4, + 0x103C8000103D0, + 0x104280001049E, + 0x104A0000104AA, + 0x104D8000104FC, + 0x1050000010528, + 0x1053000010564, + 0x10597000105A2, + 0x105A3000105B2, + 0x105B3000105BA, + 0x105BB000105BD, + 0x1060000010737, + 0x1074000010756, + 0x1076000010768, + 0x1078000010781, + 0x1080000010806, + 0x1080800010809, + 0x1080A00010836, + 0x1083700010839, + 0x1083C0001083D, + 0x1083F00010856, + 0x1086000010877, + 0x108800001089F, + 0x108E0000108F3, + 0x108F4000108F6, + 0x1090000010916, + 0x109200001093A, + 0x10980000109B8, + 0x109BE000109C0, + 0x10A0000010A04, + 0x10A0500010A07, + 0x10A0C00010A14, + 0x10A1500010A18, + 0x10A1900010A36, + 0x10A3800010A3B, + 0x10A3F00010A40, + 0x10A6000010A7D, + 0x10A8000010A9D, + 0x10AC000010AC8, + 0x10AC900010AE7, + 0x10B0000010B36, + 0x10B4000010B56, + 0x10B6000010B73, + 0x10B8000010B92, + 0x10C0000010C49, + 0x10CC000010CF3, + 0x10D0000010D28, + 0x10D3000010D3A, + 0x10E8000010EAA, + 0x10EAB00010EAD, + 0x10EB000010EB2, + 0x10EFD00010F1D, + 0x10F2700010F28, + 0x10F3000010F51, + 0x10F7000010F86, + 0x10FB000010FC5, + 0x10FE000010FF7, + 0x1100000011047, + 0x1106600011076, + 0x1107F000110BB, + 0x110C2000110C3, + 0x110D0000110E9, + 0x110F0000110FA, + 0x1110000011135, + 0x1113600011140, + 0x1114400011148, + 0x1115000011174, + 0x1117600011177, + 0x11180000111C5, + 0x111C9000111CD, + 0x111CE000111DB, + 0x111DC000111DD, + 0x1120000011212, + 0x1121300011238, + 0x1123E00011242, + 0x1128000011287, + 0x1128800011289, + 0x1128A0001128E, + 0x1128F0001129E, + 0x1129F000112A9, + 0x112B0000112EB, + 0x112F0000112FA, + 0x1130000011304, + 0x113050001130D, + 0x1130F00011311, + 0x1131300011329, + 0x1132A00011331, + 0x1133200011334, + 0x113350001133A, + 0x1133B00011345, + 0x1134700011349, + 0x1134B0001134E, + 0x1135000011351, + 0x1135700011358, + 0x1135D00011364, + 0x113660001136D, + 0x1137000011375, + 0x114000001144B, + 0x114500001145A, + 0x1145E00011462, + 0x11480000114C6, + 0x114C7000114C8, + 0x114D0000114DA, + 0x11580000115B6, + 0x115B8000115C1, + 0x115D8000115DE, + 0x1160000011641, + 0x1164400011645, + 0x116500001165A, + 0x11680000116B9, + 0x116C0000116CA, + 0x117000001171B, + 0x1171D0001172C, + 0x117300001173A, + 0x1174000011747, + 0x118000001183B, + 0x118C0000118EA, + 0x118FF00011907, + 0x119090001190A, + 0x1190C00011914, + 0x1191500011917, + 0x1191800011936, + 0x1193700011939, + 0x1193B00011944, + 0x119500001195A, + 0x119A0000119A8, + 0x119AA000119D8, + 0x119DA000119E2, + 0x119E3000119E5, + 0x11A0000011A3F, + 0x11A4700011A48, + 0x11A5000011A9A, + 0x11A9D00011A9E, + 0x11AB000011AF9, + 0x11C0000011C09, + 0x11C0A00011C37, + 0x11C3800011C41, + 0x11C5000011C5A, + 0x11C7200011C90, + 0x11C9200011CA8, + 0x11CA900011CB7, + 0x11D0000011D07, + 0x11D0800011D0A, + 0x11D0B00011D37, + 0x11D3A00011D3B, + 0x11D3C00011D3E, + 0x11D3F00011D48, + 0x11D5000011D5A, + 0x11D6000011D66, + 0x11D6700011D69, + 0x11D6A00011D8F, + 0x11D9000011D92, + 0x11D9300011D99, + 0x11DA000011DAA, + 0x11EE000011EF7, + 0x11F0000011F11, + 0x11F1200011F3B, + 0x11F3E00011F43, + 0x11F5000011F5A, + 0x11FB000011FB1, + 0x120000001239A, + 0x1248000012544, + 0x12F9000012FF1, + 0x1300000013430, + 0x1344000013456, + 0x1440000014647, + 0x1680000016A39, + 0x16A4000016A5F, + 0x16A6000016A6A, + 0x16A7000016ABF, + 0x16AC000016ACA, + 0x16AD000016AEE, + 0x16AF000016AF5, + 0x16B0000016B37, + 0x16B4000016B44, + 0x16B5000016B5A, + 0x16B6300016B78, + 0x16B7D00016B90, + 0x16E6000016E80, + 0x16F0000016F4B, + 0x16F4F00016F88, + 0x16F8F00016FA0, + 0x16FE000016FE2, + 0x16FE300016FE5, + 0x16FF000016FF2, + 0x17000000187F8, + 0x1880000018CD6, + 0x18D0000018D09, + 0x1AFF00001AFF4, + 0x1AFF50001AFFC, + 0x1AFFD0001AFFF, + 0x1B0000001B123, + 0x1B1320001B133, + 0x1B1500001B153, + 0x1B1550001B156, + 0x1B1640001B168, + 0x1B1700001B2FC, + 0x1BC000001BC6B, + 0x1BC700001BC7D, + 0x1BC800001BC89, + 0x1BC900001BC9A, + 0x1BC9D0001BC9F, + 0x1CF000001CF2E, + 0x1CF300001CF47, + 0x1DA000001DA37, + 0x1DA3B0001DA6D, + 0x1DA750001DA76, + 0x1DA840001DA85, + 0x1DA9B0001DAA0, + 0x1DAA10001DAB0, + 0x1DF000001DF1F, + 0x1DF250001DF2B, + 0x1E0000001E007, + 0x1E0080001E019, + 0x1E01B0001E022, + 0x1E0230001E025, + 0x1E0260001E02B, + 0x1E08F0001E090, + 0x1E1000001E12D, + 0x1E1300001E13E, + 0x1E1400001E14A, + 0x1E14E0001E14F, + 0x1E2900001E2AF, + 0x1E2C00001E2FA, + 0x1E4D00001E4FA, + 0x1E7E00001E7E7, + 0x1E7E80001E7EC, + 0x1E7ED0001E7EF, + 0x1E7F00001E7FF, + 0x1E8000001E8C5, + 0x1E8D00001E8D7, + 0x1E9220001E94C, + 0x1E9500001E95A, + 0x200000002A6E0, + 0x2A7000002B73A, + 0x2B7400002B81E, + 0x2B8200002CEA2, + 0x2CEB00002EBE1, + 0x2EBF00002EE5E, + 0x300000003134B, + 0x31350000323B0, + ), + "CONTEXTJ": (0x200C0000200E,), + "CONTEXTO": ( + 0xB7000000B8, + 0x37500000376, + 0x5F3000005F5, + 0x6600000066A, + 0x6F0000006FA, + 0x30FB000030FC, + ), +} diff --git a/.venv/lib/python3.12/site-packages/idna/intranges.py b/.venv/lib/python3.12/site-packages/idna/intranges.py new file mode 100644 index 0000000..7bfaa8d --- /dev/null +++ b/.venv/lib/python3.12/site-packages/idna/intranges.py @@ -0,0 +1,57 @@ +""" +Given a list of integers, made up of (hopefully) a small number of long runs +of consecutive integers, compute a representation of the form +((start1, end1), (start2, end2) ...). Then answer the question "was x present +in the original list?" in time O(log(# runs)). +""" + +import bisect +from typing import List, Tuple + + +def intranges_from_list(list_: List[int]) -> Tuple[int, ...]: + """Represent a list of integers as a sequence of ranges: + ((start_0, end_0), (start_1, end_1), ...), such that the original + integers are exactly those x such that start_i <= x < end_i for some i. + + Ranges are encoded as single integers (start << 32 | end), not as tuples. + """ + + sorted_list = sorted(list_) + ranges = [] + last_write = -1 + for i in range(len(sorted_list)): + if i + 1 < len(sorted_list): + if sorted_list[i] == sorted_list[i + 1] - 1: + continue + current_range = sorted_list[last_write + 1 : i + 1] + ranges.append(_encode_range(current_range[0], current_range[-1] + 1)) + last_write = i + + return tuple(ranges) + + +def _encode_range(start: int, end: int) -> int: + return (start << 32) | end + + +def _decode_range(r: int) -> Tuple[int, int]: + return (r >> 32), (r & ((1 << 32) - 1)) + + +def intranges_contain(int_: int, ranges: Tuple[int, ...]) -> bool: + """Determine if `int_` falls into one of the ranges in `ranges`.""" + tuple_ = _encode_range(int_, 0) + pos = bisect.bisect_left(ranges, tuple_) + # we could be immediately ahead of a tuple (start, end) + # with start < int_ <= end + if pos > 0: + left, right = _decode_range(ranges[pos - 1]) + if left <= int_ < right: + return True + # or we could be immediately behind a tuple (int_, end) + if pos < len(ranges): + left, _ = _decode_range(ranges[pos]) + if left == int_: + return True + return False diff --git a/.venv/lib/python3.12/site-packages/idna/package_data.py b/.venv/lib/python3.12/site-packages/idna/package_data.py new file mode 100644 index 0000000..514ff7e --- /dev/null +++ b/.venv/lib/python3.12/site-packages/idna/package_data.py @@ -0,0 +1 @@ +__version__ = "3.10" diff --git a/.venv/lib/python3.12/site-packages/idna/py.typed b/.venv/lib/python3.12/site-packages/idna/py.typed new file mode 100644 index 0000000..e69de29 diff --git a/.venv/lib/python3.12/site-packages/idna/uts46data.py b/.venv/lib/python3.12/site-packages/idna/uts46data.py new file mode 100644 index 0000000..eb89432 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/idna/uts46data.py @@ -0,0 +1,8681 @@ +# This file is automatically generated by tools/idna-data +# vim: set fileencoding=utf-8 : + +from typing import List, Tuple, Union + +"""IDNA Mapping Table from UTS46.""" + + +__version__ = "15.1.0" + + +def _seg_0() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x0, "3"), + (0x1, "3"), + (0x2, "3"), + (0x3, "3"), + (0x4, "3"), + (0x5, "3"), + (0x6, "3"), + (0x7, "3"), + (0x8, "3"), + (0x9, "3"), + (0xA, "3"), + (0xB, "3"), + (0xC, "3"), + (0xD, "3"), + (0xE, "3"), + (0xF, "3"), + (0x10, "3"), + (0x11, "3"), + (0x12, "3"), + (0x13, "3"), + (0x14, "3"), + (0x15, "3"), + (0x16, "3"), + (0x17, "3"), + (0x18, "3"), + (0x19, "3"), + (0x1A, "3"), + (0x1B, "3"), + (0x1C, "3"), + (0x1D, "3"), + (0x1E, "3"), + (0x1F, "3"), + (0x20, "3"), + (0x21, "3"), + (0x22, "3"), + (0x23, "3"), + (0x24, "3"), + (0x25, "3"), + (0x26, "3"), + (0x27, "3"), + (0x28, "3"), + (0x29, "3"), + (0x2A, "3"), + (0x2B, "3"), + (0x2C, "3"), + (0x2D, "V"), + (0x2E, "V"), + (0x2F, "3"), + (0x30, "V"), + (0x31, "V"), + (0x32, "V"), + (0x33, "V"), + (0x34, "V"), + (0x35, "V"), + (0x36, "V"), + (0x37, "V"), + (0x38, "V"), + (0x39, "V"), + (0x3A, "3"), + (0x3B, "3"), + (0x3C, "3"), + (0x3D, "3"), + (0x3E, "3"), + (0x3F, "3"), + (0x40, "3"), + (0x41, "M", "a"), + (0x42, "M", "b"), + (0x43, "M", "c"), + (0x44, "M", "d"), + (0x45, "M", "e"), + (0x46, "M", "f"), + (0x47, "M", "g"), + (0x48, "M", "h"), + (0x49, "M", "i"), + (0x4A, "M", "j"), + (0x4B, "M", "k"), + (0x4C, "M", "l"), + (0x4D, "M", "m"), + (0x4E, "M", "n"), + (0x4F, "M", "o"), + (0x50, "M", "p"), + (0x51, "M", "q"), + (0x52, "M", "r"), + (0x53, "M", "s"), + (0x54, "M", "t"), + (0x55, "M", "u"), + (0x56, "M", "v"), + (0x57, "M", "w"), + (0x58, "M", "x"), + (0x59, "M", "y"), + (0x5A, "M", "z"), + (0x5B, "3"), + (0x5C, "3"), + (0x5D, "3"), + (0x5E, "3"), + (0x5F, "3"), + (0x60, "3"), + (0x61, "V"), + (0x62, "V"), + (0x63, "V"), + ] + + +def _seg_1() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x64, "V"), + (0x65, "V"), + (0x66, "V"), + (0x67, "V"), + (0x68, "V"), + (0x69, "V"), + (0x6A, "V"), + (0x6B, "V"), + (0x6C, "V"), + (0x6D, "V"), + (0x6E, "V"), + (0x6F, "V"), + (0x70, "V"), + (0x71, "V"), + (0x72, "V"), + (0x73, "V"), + (0x74, "V"), + (0x75, "V"), + (0x76, "V"), + (0x77, "V"), + (0x78, "V"), + (0x79, "V"), + (0x7A, "V"), + (0x7B, "3"), + (0x7C, "3"), + (0x7D, "3"), + (0x7E, "3"), + (0x7F, "3"), + (0x80, "X"), + (0x81, "X"), + (0x82, "X"), + (0x83, "X"), + (0x84, "X"), + (0x85, "X"), + (0x86, "X"), + (0x87, "X"), + (0x88, "X"), + (0x89, "X"), + (0x8A, "X"), + (0x8B, "X"), + (0x8C, "X"), + (0x8D, "X"), + (0x8E, "X"), + (0x8F, "X"), + (0x90, "X"), + (0x91, "X"), + (0x92, "X"), + (0x93, "X"), + (0x94, "X"), + (0x95, "X"), + (0x96, "X"), + (0x97, "X"), + (0x98, "X"), + (0x99, "X"), + (0x9A, "X"), + (0x9B, "X"), + (0x9C, "X"), + (0x9D, "X"), + (0x9E, "X"), + (0x9F, "X"), + (0xA0, "3", " "), + (0xA1, "V"), + (0xA2, "V"), + (0xA3, "V"), + (0xA4, "V"), + (0xA5, "V"), + (0xA6, "V"), + (0xA7, "V"), + (0xA8, "3", " ̈"), + (0xA9, "V"), + (0xAA, "M", "a"), + (0xAB, "V"), + (0xAC, "V"), + (0xAD, "I"), + (0xAE, "V"), + (0xAF, "3", " ̄"), + (0xB0, "V"), + (0xB1, "V"), + (0xB2, "M", "2"), + (0xB3, "M", "3"), + (0xB4, "3", " ́"), + (0xB5, "M", "μ"), + (0xB6, "V"), + (0xB7, "V"), + (0xB8, "3", " ̧"), + (0xB9, "M", "1"), + (0xBA, "M", "o"), + (0xBB, "V"), + (0xBC, "M", "1⁄4"), + (0xBD, "M", "1⁄2"), + (0xBE, "M", "3⁄4"), + (0xBF, "V"), + (0xC0, "M", "à"), + (0xC1, "M", "á"), + (0xC2, "M", "â"), + (0xC3, "M", "ã"), + (0xC4, "M", "ä"), + (0xC5, "M", "å"), + (0xC6, "M", "æ"), + (0xC7, "M", "ç"), + ] + + +def _seg_2() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xC8, "M", "è"), + (0xC9, "M", "é"), + (0xCA, "M", "ê"), + (0xCB, "M", "ë"), + (0xCC, "M", "ì"), + (0xCD, "M", "í"), + (0xCE, "M", "î"), + (0xCF, "M", "ï"), + (0xD0, "M", "ð"), + (0xD1, "M", "ñ"), + (0xD2, "M", "ò"), + (0xD3, "M", "ó"), + (0xD4, "M", "ô"), + (0xD5, "M", "õ"), + (0xD6, "M", "ö"), + (0xD7, "V"), + (0xD8, "M", "ø"), + (0xD9, "M", "ù"), + (0xDA, "M", "ú"), + (0xDB, "M", "û"), + (0xDC, "M", "ü"), + (0xDD, "M", "ý"), + (0xDE, "M", "þ"), + (0xDF, "D", "ss"), + (0xE0, "V"), + (0xE1, "V"), + (0xE2, "V"), + (0xE3, "V"), + (0xE4, "V"), + (0xE5, "V"), + (0xE6, "V"), + (0xE7, "V"), + (0xE8, "V"), + (0xE9, "V"), + (0xEA, "V"), + (0xEB, "V"), + (0xEC, "V"), + (0xED, "V"), + (0xEE, "V"), + (0xEF, "V"), + (0xF0, "V"), + (0xF1, "V"), + (0xF2, "V"), + (0xF3, "V"), + (0xF4, "V"), + (0xF5, "V"), + (0xF6, "V"), + (0xF7, "V"), + (0xF8, "V"), + (0xF9, "V"), + (0xFA, "V"), + (0xFB, "V"), + (0xFC, "V"), + (0xFD, "V"), + (0xFE, "V"), + (0xFF, "V"), + (0x100, "M", "ā"), + (0x101, "V"), + (0x102, "M", "ă"), + (0x103, "V"), + (0x104, "M", "ą"), + (0x105, "V"), + (0x106, "M", "ć"), + (0x107, "V"), + (0x108, "M", "ĉ"), + (0x109, "V"), + (0x10A, "M", "ċ"), + (0x10B, "V"), + (0x10C, "M", "č"), + (0x10D, "V"), + (0x10E, "M", "ď"), + (0x10F, "V"), + (0x110, "M", "đ"), + (0x111, "V"), + (0x112, "M", "ē"), + (0x113, "V"), + (0x114, "M", "ĕ"), + (0x115, "V"), + (0x116, "M", "ė"), + (0x117, "V"), + (0x118, "M", "ę"), + (0x119, "V"), + (0x11A, "M", "ě"), + (0x11B, "V"), + (0x11C, "M", "ĝ"), + (0x11D, "V"), + (0x11E, "M", "ğ"), + (0x11F, "V"), + (0x120, "M", "ġ"), + (0x121, "V"), + (0x122, "M", "ģ"), + (0x123, "V"), + (0x124, "M", "ĥ"), + (0x125, "V"), + (0x126, "M", "ħ"), + (0x127, "V"), + (0x128, "M", "ĩ"), + (0x129, "V"), + (0x12A, "M", "ī"), + (0x12B, "V"), + ] + + +def _seg_3() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x12C, "M", "ĭ"), + (0x12D, "V"), + (0x12E, "M", "į"), + (0x12F, "V"), + (0x130, "M", "i̇"), + (0x131, "V"), + (0x132, "M", "ij"), + (0x134, "M", "ĵ"), + (0x135, "V"), + (0x136, "M", "ķ"), + (0x137, "V"), + (0x139, "M", "ĺ"), + (0x13A, "V"), + (0x13B, "M", "ļ"), + (0x13C, "V"), + (0x13D, "M", "ľ"), + (0x13E, "V"), + (0x13F, "M", "l·"), + (0x141, "M", "ł"), + (0x142, "V"), + (0x143, "M", "ń"), + (0x144, "V"), + (0x145, "M", "ņ"), + (0x146, "V"), + (0x147, "M", "ň"), + (0x148, "V"), + (0x149, "M", "ʼn"), + (0x14A, "M", "ŋ"), + (0x14B, "V"), + (0x14C, "M", "ō"), + (0x14D, "V"), + (0x14E, "M", "ŏ"), + (0x14F, "V"), + (0x150, "M", "ő"), + (0x151, "V"), + (0x152, "M", "œ"), + (0x153, "V"), + (0x154, "M", "ŕ"), + (0x155, "V"), + (0x156, "M", "ŗ"), + (0x157, "V"), + (0x158, "M", "ř"), + (0x159, "V"), + (0x15A, "M", "ś"), + (0x15B, "V"), + (0x15C, "M", "ŝ"), + (0x15D, "V"), + (0x15E, "M", "ş"), + (0x15F, "V"), + (0x160, "M", "š"), + (0x161, "V"), + (0x162, "M", "ţ"), + (0x163, "V"), + (0x164, "M", "ť"), + (0x165, "V"), + (0x166, "M", "ŧ"), + (0x167, "V"), + (0x168, "M", "ũ"), + (0x169, "V"), + (0x16A, "M", "ū"), + (0x16B, "V"), + (0x16C, "M", "ŭ"), + (0x16D, "V"), + (0x16E, "M", "ů"), + (0x16F, "V"), + (0x170, "M", "ű"), + (0x171, "V"), + (0x172, "M", "ų"), + (0x173, "V"), + (0x174, "M", "ŵ"), + (0x175, "V"), + (0x176, "M", "ŷ"), + (0x177, "V"), + (0x178, "M", "ÿ"), + (0x179, "M", "ź"), + (0x17A, "V"), + (0x17B, "M", "ż"), + (0x17C, "V"), + (0x17D, "M", "ž"), + (0x17E, "V"), + (0x17F, "M", "s"), + (0x180, "V"), + (0x181, "M", "ɓ"), + (0x182, "M", "ƃ"), + (0x183, "V"), + (0x184, "M", "ƅ"), + (0x185, "V"), + (0x186, "M", "ɔ"), + (0x187, "M", "ƈ"), + (0x188, "V"), + (0x189, "M", "ɖ"), + (0x18A, "M", "ɗ"), + (0x18B, "M", "ƌ"), + (0x18C, "V"), + (0x18E, "M", "ǝ"), + (0x18F, "M", "ə"), + (0x190, "M", "ɛ"), + (0x191, "M", "ƒ"), + (0x192, "V"), + (0x193, "M", "ɠ"), + ] + + +def _seg_4() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x194, "M", "ɣ"), + (0x195, "V"), + (0x196, "M", "ɩ"), + (0x197, "M", "ɨ"), + (0x198, "M", "ƙ"), + (0x199, "V"), + (0x19C, "M", "ɯ"), + (0x19D, "M", "ɲ"), + (0x19E, "V"), + (0x19F, "M", "ɵ"), + (0x1A0, "M", "ơ"), + (0x1A1, "V"), + (0x1A2, "M", "ƣ"), + (0x1A3, "V"), + (0x1A4, "M", "ƥ"), + (0x1A5, "V"), + (0x1A6, "M", "ʀ"), + (0x1A7, "M", "ƨ"), + (0x1A8, "V"), + (0x1A9, "M", "ʃ"), + (0x1AA, "V"), + (0x1AC, "M", "ƭ"), + (0x1AD, "V"), + (0x1AE, "M", "ʈ"), + (0x1AF, "M", "ư"), + (0x1B0, "V"), + (0x1B1, "M", "ʊ"), + (0x1B2, "M", "ʋ"), + (0x1B3, "M", "ƴ"), + (0x1B4, "V"), + (0x1B5, "M", "ƶ"), + (0x1B6, "V"), + (0x1B7, "M", "ʒ"), + (0x1B8, "M", "ƹ"), + (0x1B9, "V"), + (0x1BC, "M", "ƽ"), + (0x1BD, "V"), + (0x1C4, "M", "dž"), + (0x1C7, "M", "lj"), + (0x1CA, "M", "nj"), + (0x1CD, "M", "ǎ"), + (0x1CE, "V"), + (0x1CF, "M", "ǐ"), + (0x1D0, "V"), + (0x1D1, "M", "ǒ"), + (0x1D2, "V"), + (0x1D3, "M", "ǔ"), + (0x1D4, "V"), + (0x1D5, "M", "ǖ"), + (0x1D6, "V"), + (0x1D7, "M", "ǘ"), + (0x1D8, "V"), + (0x1D9, "M", "ǚ"), + (0x1DA, "V"), + (0x1DB, "M", "ǜ"), + (0x1DC, "V"), + (0x1DE, "M", "ǟ"), + (0x1DF, "V"), + (0x1E0, "M", "ǡ"), + (0x1E1, "V"), + (0x1E2, "M", "ǣ"), + (0x1E3, "V"), + (0x1E4, "M", "ǥ"), + (0x1E5, "V"), + (0x1E6, "M", "ǧ"), + (0x1E7, "V"), + (0x1E8, "M", "ǩ"), + (0x1E9, "V"), + (0x1EA, "M", "ǫ"), + (0x1EB, "V"), + (0x1EC, "M", "ǭ"), + (0x1ED, "V"), + (0x1EE, "M", "ǯ"), + (0x1EF, "V"), + (0x1F1, "M", "dz"), + (0x1F4, "M", "ǵ"), + (0x1F5, "V"), + (0x1F6, "M", "ƕ"), + (0x1F7, "M", "ƿ"), + (0x1F8, "M", "ǹ"), + (0x1F9, "V"), + (0x1FA, "M", "ǻ"), + (0x1FB, "V"), + (0x1FC, "M", "ǽ"), + (0x1FD, "V"), + (0x1FE, "M", "ǿ"), + (0x1FF, "V"), + (0x200, "M", "ȁ"), + (0x201, "V"), + (0x202, "M", "ȃ"), + (0x203, "V"), + (0x204, "M", "ȅ"), + (0x205, "V"), + (0x206, "M", "ȇ"), + (0x207, "V"), + (0x208, "M", "ȉ"), + (0x209, "V"), + (0x20A, "M", "ȋ"), + (0x20B, "V"), + (0x20C, "M", "ȍ"), + ] + + +def _seg_5() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x20D, "V"), + (0x20E, "M", "ȏ"), + (0x20F, "V"), + (0x210, "M", "ȑ"), + (0x211, "V"), + (0x212, "M", "ȓ"), + (0x213, "V"), + (0x214, "M", "ȕ"), + (0x215, "V"), + (0x216, "M", "ȗ"), + (0x217, "V"), + (0x218, "M", "ș"), + (0x219, "V"), + (0x21A, "M", "ț"), + (0x21B, "V"), + (0x21C, "M", "ȝ"), + (0x21D, "V"), + (0x21E, "M", "ȟ"), + (0x21F, "V"), + (0x220, "M", "ƞ"), + (0x221, "V"), + (0x222, "M", "ȣ"), + (0x223, "V"), + (0x224, "M", "ȥ"), + (0x225, "V"), + (0x226, "M", "ȧ"), + (0x227, "V"), + (0x228, "M", "ȩ"), + (0x229, "V"), + (0x22A, "M", "ȫ"), + (0x22B, "V"), + (0x22C, "M", "ȭ"), + (0x22D, "V"), + (0x22E, "M", "ȯ"), + (0x22F, "V"), + (0x230, "M", "ȱ"), + (0x231, "V"), + (0x232, "M", "ȳ"), + (0x233, "V"), + (0x23A, "M", "ⱥ"), + (0x23B, "M", "ȼ"), + (0x23C, "V"), + (0x23D, "M", "ƚ"), + (0x23E, "M", "ⱦ"), + (0x23F, "V"), + (0x241, "M", "ɂ"), + (0x242, "V"), + (0x243, "M", "ƀ"), + (0x244, "M", "ʉ"), + (0x245, "M", "ʌ"), + (0x246, "M", "ɇ"), + (0x247, "V"), + (0x248, "M", "ɉ"), + (0x249, "V"), + (0x24A, "M", "ɋ"), + (0x24B, "V"), + (0x24C, "M", "ɍ"), + (0x24D, "V"), + (0x24E, "M", "ɏ"), + (0x24F, "V"), + (0x2B0, "M", "h"), + (0x2B1, "M", "ɦ"), + (0x2B2, "M", "j"), + (0x2B3, "M", "r"), + (0x2B4, "M", "ɹ"), + (0x2B5, "M", "ɻ"), + (0x2B6, "M", "ʁ"), + (0x2B7, "M", "w"), + (0x2B8, "M", "y"), + (0x2B9, "V"), + (0x2D8, "3", " ̆"), + (0x2D9, "3", " ̇"), + (0x2DA, "3", " ̊"), + (0x2DB, "3", " ̨"), + (0x2DC, "3", " ̃"), + (0x2DD, "3", " ̋"), + (0x2DE, "V"), + (0x2E0, "M", "ɣ"), + (0x2E1, "M", "l"), + (0x2E2, "M", "s"), + (0x2E3, "M", "x"), + (0x2E4, "M", "ʕ"), + (0x2E5, "V"), + (0x340, "M", "̀"), + (0x341, "M", "́"), + (0x342, "V"), + (0x343, "M", "̓"), + (0x344, "M", "̈́"), + (0x345, "M", "ι"), + (0x346, "V"), + (0x34F, "I"), + (0x350, "V"), + (0x370, "M", "ͱ"), + (0x371, "V"), + (0x372, "M", "ͳ"), + (0x373, "V"), + (0x374, "M", "ʹ"), + (0x375, "V"), + (0x376, "M", "ͷ"), + (0x377, "V"), + ] + + +def _seg_6() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x378, "X"), + (0x37A, "3", " ι"), + (0x37B, "V"), + (0x37E, "3", ";"), + (0x37F, "M", "ϳ"), + (0x380, "X"), + (0x384, "3", " ́"), + (0x385, "3", " ̈́"), + (0x386, "M", "ά"), + (0x387, "M", "·"), + (0x388, "M", "έ"), + (0x389, "M", "ή"), + (0x38A, "M", "ί"), + (0x38B, "X"), + (0x38C, "M", "ό"), + (0x38D, "X"), + (0x38E, "M", "ύ"), + (0x38F, "M", "ώ"), + (0x390, "V"), + (0x391, "M", "α"), + (0x392, "M", "β"), + (0x393, "M", "γ"), + (0x394, "M", "δ"), + (0x395, "M", "ε"), + (0x396, "M", "ζ"), + (0x397, "M", "η"), + (0x398, "M", "θ"), + (0x399, "M", "ι"), + (0x39A, "M", "κ"), + (0x39B, "M", "λ"), + (0x39C, "M", "μ"), + (0x39D, "M", "ν"), + (0x39E, "M", "ξ"), + (0x39F, "M", "ο"), + (0x3A0, "M", "π"), + (0x3A1, "M", "ρ"), + (0x3A2, "X"), + (0x3A3, "M", "σ"), + (0x3A4, "M", "τ"), + (0x3A5, "M", "υ"), + (0x3A6, "M", "φ"), + (0x3A7, "M", "χ"), + (0x3A8, "M", "ψ"), + (0x3A9, "M", "ω"), + (0x3AA, "M", "ϊ"), + (0x3AB, "M", "ϋ"), + (0x3AC, "V"), + (0x3C2, "D", "σ"), + (0x3C3, "V"), + (0x3CF, "M", "ϗ"), + (0x3D0, "M", "β"), + (0x3D1, "M", "θ"), + (0x3D2, "M", "υ"), + (0x3D3, "M", "ύ"), + (0x3D4, "M", "ϋ"), + (0x3D5, "M", "φ"), + (0x3D6, "M", "π"), + (0x3D7, "V"), + (0x3D8, "M", "ϙ"), + (0x3D9, "V"), + (0x3DA, "M", "ϛ"), + (0x3DB, "V"), + (0x3DC, "M", "ϝ"), + (0x3DD, "V"), + (0x3DE, "M", "ϟ"), + (0x3DF, "V"), + (0x3E0, "M", "ϡ"), + (0x3E1, "V"), + (0x3E2, "M", "ϣ"), + (0x3E3, "V"), + (0x3E4, "M", "ϥ"), + (0x3E5, "V"), + (0x3E6, "M", "ϧ"), + (0x3E7, "V"), + (0x3E8, "M", "ϩ"), + (0x3E9, "V"), + (0x3EA, "M", "ϫ"), + (0x3EB, "V"), + (0x3EC, "M", "ϭ"), + (0x3ED, "V"), + (0x3EE, "M", "ϯ"), + (0x3EF, "V"), + (0x3F0, "M", "κ"), + (0x3F1, "M", "ρ"), + (0x3F2, "M", "σ"), + (0x3F3, "V"), + (0x3F4, "M", "θ"), + (0x3F5, "M", "ε"), + (0x3F6, "V"), + (0x3F7, "M", "ϸ"), + (0x3F8, "V"), + (0x3F9, "M", "σ"), + (0x3FA, "M", "ϻ"), + (0x3FB, "V"), + (0x3FD, "M", "ͻ"), + (0x3FE, "M", "ͼ"), + (0x3FF, "M", "ͽ"), + (0x400, "M", "ѐ"), + (0x401, "M", "ё"), + (0x402, "M", "ђ"), + ] + + +def _seg_7() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x403, "M", "ѓ"), + (0x404, "M", "є"), + (0x405, "M", "ѕ"), + (0x406, "M", "і"), + (0x407, "M", "ї"), + (0x408, "M", "ј"), + (0x409, "M", "љ"), + (0x40A, "M", "њ"), + (0x40B, "M", "ћ"), + (0x40C, "M", "ќ"), + (0x40D, "M", "ѝ"), + (0x40E, "M", "ў"), + (0x40F, "M", "џ"), + (0x410, "M", "а"), + (0x411, "M", "б"), + (0x412, "M", "в"), + (0x413, "M", "г"), + (0x414, "M", "д"), + (0x415, "M", "е"), + (0x416, "M", "ж"), + (0x417, "M", "з"), + (0x418, "M", "и"), + (0x419, "M", "й"), + (0x41A, "M", "к"), + (0x41B, "M", "л"), + (0x41C, "M", "м"), + (0x41D, "M", "н"), + (0x41E, "M", "о"), + (0x41F, "M", "п"), + (0x420, "M", "р"), + (0x421, "M", "с"), + (0x422, "M", "т"), + (0x423, "M", "у"), + (0x424, "M", "ф"), + (0x425, "M", "х"), + (0x426, "M", "ц"), + (0x427, "M", "ч"), + (0x428, "M", "ш"), + (0x429, "M", "щ"), + (0x42A, "M", "ъ"), + (0x42B, "M", "ы"), + (0x42C, "M", "ь"), + (0x42D, "M", "э"), + (0x42E, "M", "ю"), + (0x42F, "M", "я"), + (0x430, "V"), + (0x460, "M", "ѡ"), + (0x461, "V"), + (0x462, "M", "ѣ"), + (0x463, "V"), + (0x464, "M", "ѥ"), + (0x465, "V"), + (0x466, "M", "ѧ"), + (0x467, "V"), + (0x468, "M", "ѩ"), + (0x469, "V"), + (0x46A, "M", "ѫ"), + (0x46B, "V"), + (0x46C, "M", "ѭ"), + (0x46D, "V"), + (0x46E, "M", "ѯ"), + (0x46F, "V"), + (0x470, "M", "ѱ"), + (0x471, "V"), + (0x472, "M", "ѳ"), + (0x473, "V"), + (0x474, "M", "ѵ"), + (0x475, "V"), + (0x476, "M", "ѷ"), + (0x477, "V"), + (0x478, "M", "ѹ"), + (0x479, "V"), + (0x47A, "M", "ѻ"), + (0x47B, "V"), + (0x47C, "M", "ѽ"), + (0x47D, "V"), + (0x47E, "M", "ѿ"), + (0x47F, "V"), + (0x480, "M", "ҁ"), + (0x481, "V"), + (0x48A, "M", "ҋ"), + (0x48B, "V"), + (0x48C, "M", "ҍ"), + (0x48D, "V"), + (0x48E, "M", "ҏ"), + (0x48F, "V"), + (0x490, "M", "ґ"), + (0x491, "V"), + (0x492, "M", "ғ"), + (0x493, "V"), + (0x494, "M", "ҕ"), + (0x495, "V"), + (0x496, "M", "җ"), + (0x497, "V"), + (0x498, "M", "ҙ"), + (0x499, "V"), + (0x49A, "M", "қ"), + (0x49B, "V"), + (0x49C, "M", "ҝ"), + (0x49D, "V"), + ] + + +def _seg_8() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x49E, "M", "ҟ"), + (0x49F, "V"), + (0x4A0, "M", "ҡ"), + (0x4A1, "V"), + (0x4A2, "M", "ң"), + (0x4A3, "V"), + (0x4A4, "M", "ҥ"), + (0x4A5, "V"), + (0x4A6, "M", "ҧ"), + (0x4A7, "V"), + (0x4A8, "M", "ҩ"), + (0x4A9, "V"), + (0x4AA, "M", "ҫ"), + (0x4AB, "V"), + (0x4AC, "M", "ҭ"), + (0x4AD, "V"), + (0x4AE, "M", "ү"), + (0x4AF, "V"), + (0x4B0, "M", "ұ"), + (0x4B1, "V"), + (0x4B2, "M", "ҳ"), + (0x4B3, "V"), + (0x4B4, "M", "ҵ"), + (0x4B5, "V"), + (0x4B6, "M", "ҷ"), + (0x4B7, "V"), + (0x4B8, "M", "ҹ"), + (0x4B9, "V"), + (0x4BA, "M", "һ"), + (0x4BB, "V"), + (0x4BC, "M", "ҽ"), + (0x4BD, "V"), + (0x4BE, "M", "ҿ"), + (0x4BF, "V"), + (0x4C0, "X"), + (0x4C1, "M", "ӂ"), + (0x4C2, "V"), + (0x4C3, "M", "ӄ"), + (0x4C4, "V"), + (0x4C5, "M", "ӆ"), + (0x4C6, "V"), + (0x4C7, "M", "ӈ"), + (0x4C8, "V"), + (0x4C9, "M", "ӊ"), + (0x4CA, "V"), + (0x4CB, "M", "ӌ"), + (0x4CC, "V"), + (0x4CD, "M", "ӎ"), + (0x4CE, "V"), + (0x4D0, "M", "ӑ"), + (0x4D1, "V"), + (0x4D2, "M", "ӓ"), + (0x4D3, "V"), + (0x4D4, "M", "ӕ"), + (0x4D5, "V"), + (0x4D6, "M", "ӗ"), + (0x4D7, "V"), + (0x4D8, "M", "ә"), + (0x4D9, "V"), + (0x4DA, "M", "ӛ"), + (0x4DB, "V"), + (0x4DC, "M", "ӝ"), + (0x4DD, "V"), + (0x4DE, "M", "ӟ"), + (0x4DF, "V"), + (0x4E0, "M", "ӡ"), + (0x4E1, "V"), + (0x4E2, "M", "ӣ"), + (0x4E3, "V"), + (0x4E4, "M", "ӥ"), + (0x4E5, "V"), + (0x4E6, "M", "ӧ"), + (0x4E7, "V"), + (0x4E8, "M", "ө"), + (0x4E9, "V"), + (0x4EA, "M", "ӫ"), + (0x4EB, "V"), + (0x4EC, "M", "ӭ"), + (0x4ED, "V"), + (0x4EE, "M", "ӯ"), + (0x4EF, "V"), + (0x4F0, "M", "ӱ"), + (0x4F1, "V"), + (0x4F2, "M", "ӳ"), + (0x4F3, "V"), + (0x4F4, "M", "ӵ"), + (0x4F5, "V"), + (0x4F6, "M", "ӷ"), + (0x4F7, "V"), + (0x4F8, "M", "ӹ"), + (0x4F9, "V"), + (0x4FA, "M", "ӻ"), + (0x4FB, "V"), + (0x4FC, "M", "ӽ"), + (0x4FD, "V"), + (0x4FE, "M", "ӿ"), + (0x4FF, "V"), + (0x500, "M", "ԁ"), + (0x501, "V"), + (0x502, "M", "ԃ"), + ] + + +def _seg_9() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x503, "V"), + (0x504, "M", "ԅ"), + (0x505, "V"), + (0x506, "M", "ԇ"), + (0x507, "V"), + (0x508, "M", "ԉ"), + (0x509, "V"), + (0x50A, "M", "ԋ"), + (0x50B, "V"), + (0x50C, "M", "ԍ"), + (0x50D, "V"), + (0x50E, "M", "ԏ"), + (0x50F, "V"), + (0x510, "M", "ԑ"), + (0x511, "V"), + (0x512, "M", "ԓ"), + (0x513, "V"), + (0x514, "M", "ԕ"), + (0x515, "V"), + (0x516, "M", "ԗ"), + (0x517, "V"), + (0x518, "M", "ԙ"), + (0x519, "V"), + (0x51A, "M", "ԛ"), + (0x51B, "V"), + (0x51C, "M", "ԝ"), + (0x51D, "V"), + (0x51E, "M", "ԟ"), + (0x51F, "V"), + (0x520, "M", "ԡ"), + (0x521, "V"), + (0x522, "M", "ԣ"), + (0x523, "V"), + (0x524, "M", "ԥ"), + (0x525, "V"), + (0x526, "M", "ԧ"), + (0x527, "V"), + (0x528, "M", "ԩ"), + (0x529, "V"), + (0x52A, "M", "ԫ"), + (0x52B, "V"), + (0x52C, "M", "ԭ"), + (0x52D, "V"), + (0x52E, "M", "ԯ"), + (0x52F, "V"), + (0x530, "X"), + (0x531, "M", "ա"), + (0x532, "M", "բ"), + (0x533, "M", "գ"), + (0x534, "M", "դ"), + (0x535, "M", "ե"), + (0x536, "M", "զ"), + (0x537, "M", "է"), + (0x538, "M", "ը"), + (0x539, "M", "թ"), + (0x53A, "M", "ժ"), + (0x53B, "M", "ի"), + (0x53C, "M", "լ"), + (0x53D, "M", "խ"), + (0x53E, "M", "ծ"), + (0x53F, "M", "կ"), + (0x540, "M", "հ"), + (0x541, "M", "ձ"), + (0x542, "M", "ղ"), + (0x543, "M", "ճ"), + (0x544, "M", "մ"), + (0x545, "M", "յ"), + (0x546, "M", "ն"), + (0x547, "M", "շ"), + (0x548, "M", "ո"), + (0x549, "M", "չ"), + (0x54A, "M", "պ"), + (0x54B, "M", "ջ"), + (0x54C, "M", "ռ"), + (0x54D, "M", "ս"), + (0x54E, "M", "վ"), + (0x54F, "M", "տ"), + (0x550, "M", "ր"), + (0x551, "M", "ց"), + (0x552, "M", "ւ"), + (0x553, "M", "փ"), + (0x554, "M", "ք"), + (0x555, "M", "օ"), + (0x556, "M", "ֆ"), + (0x557, "X"), + (0x559, "V"), + (0x587, "M", "եւ"), + (0x588, "V"), + (0x58B, "X"), + (0x58D, "V"), + (0x590, "X"), + (0x591, "V"), + (0x5C8, "X"), + (0x5D0, "V"), + (0x5EB, "X"), + (0x5EF, "V"), + (0x5F5, "X"), + (0x606, "V"), + (0x61C, "X"), + (0x61D, "V"), + ] + + +def _seg_10() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x675, "M", "اٴ"), + (0x676, "M", "وٴ"), + (0x677, "M", "ۇٴ"), + (0x678, "M", "يٴ"), + (0x679, "V"), + (0x6DD, "X"), + (0x6DE, "V"), + (0x70E, "X"), + (0x710, "V"), + (0x74B, "X"), + (0x74D, "V"), + (0x7B2, "X"), + (0x7C0, "V"), + (0x7FB, "X"), + (0x7FD, "V"), + (0x82E, "X"), + (0x830, "V"), + (0x83F, "X"), + (0x840, "V"), + (0x85C, "X"), + (0x85E, "V"), + (0x85F, "X"), + (0x860, "V"), + (0x86B, "X"), + (0x870, "V"), + (0x88F, "X"), + (0x898, "V"), + (0x8E2, "X"), + (0x8E3, "V"), + (0x958, "M", "क़"), + (0x959, "M", "ख़"), + (0x95A, "M", "ग़"), + (0x95B, "M", "ज़"), + (0x95C, "M", "ड़"), + (0x95D, "M", "ढ़"), + (0x95E, "M", "फ़"), + (0x95F, "M", "य़"), + (0x960, "V"), + (0x984, "X"), + (0x985, "V"), + (0x98D, "X"), + (0x98F, "V"), + (0x991, "X"), + (0x993, "V"), + (0x9A9, "X"), + (0x9AA, "V"), + (0x9B1, "X"), + (0x9B2, "V"), + (0x9B3, "X"), + (0x9B6, "V"), + (0x9BA, "X"), + (0x9BC, "V"), + (0x9C5, "X"), + (0x9C7, "V"), + (0x9C9, "X"), + (0x9CB, "V"), + (0x9CF, "X"), + (0x9D7, "V"), + (0x9D8, "X"), + (0x9DC, "M", "ড়"), + (0x9DD, "M", "ঢ়"), + (0x9DE, "X"), + (0x9DF, "M", "য়"), + (0x9E0, "V"), + (0x9E4, "X"), + (0x9E6, "V"), + (0x9FF, "X"), + (0xA01, "V"), + (0xA04, "X"), + (0xA05, "V"), + (0xA0B, "X"), + (0xA0F, "V"), + (0xA11, "X"), + (0xA13, "V"), + (0xA29, "X"), + (0xA2A, "V"), + (0xA31, "X"), + (0xA32, "V"), + (0xA33, "M", "ਲ਼"), + (0xA34, "X"), + (0xA35, "V"), + (0xA36, "M", "ਸ਼"), + (0xA37, "X"), + (0xA38, "V"), + (0xA3A, "X"), + (0xA3C, "V"), + (0xA3D, "X"), + (0xA3E, "V"), + (0xA43, "X"), + (0xA47, "V"), + (0xA49, "X"), + (0xA4B, "V"), + (0xA4E, "X"), + (0xA51, "V"), + (0xA52, "X"), + (0xA59, "M", "ਖ਼"), + (0xA5A, "M", "ਗ਼"), + (0xA5B, "M", "ਜ਼"), + (0xA5C, "V"), + (0xA5D, "X"), + ] + + +def _seg_11() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xA5E, "M", "ਫ਼"), + (0xA5F, "X"), + (0xA66, "V"), + (0xA77, "X"), + (0xA81, "V"), + (0xA84, "X"), + (0xA85, "V"), + (0xA8E, "X"), + (0xA8F, "V"), + (0xA92, "X"), + (0xA93, "V"), + (0xAA9, "X"), + (0xAAA, "V"), + (0xAB1, "X"), + (0xAB2, "V"), + (0xAB4, "X"), + (0xAB5, "V"), + (0xABA, "X"), + (0xABC, "V"), + (0xAC6, "X"), + (0xAC7, "V"), + (0xACA, "X"), + (0xACB, "V"), + (0xACE, "X"), + (0xAD0, "V"), + (0xAD1, "X"), + (0xAE0, "V"), + (0xAE4, "X"), + (0xAE6, "V"), + (0xAF2, "X"), + (0xAF9, "V"), + (0xB00, "X"), + (0xB01, "V"), + (0xB04, "X"), + (0xB05, "V"), + (0xB0D, "X"), + (0xB0F, "V"), + (0xB11, "X"), + (0xB13, "V"), + (0xB29, "X"), + (0xB2A, "V"), + (0xB31, "X"), + (0xB32, "V"), + (0xB34, "X"), + (0xB35, "V"), + (0xB3A, "X"), + (0xB3C, "V"), + (0xB45, "X"), + (0xB47, "V"), + (0xB49, "X"), + (0xB4B, "V"), + (0xB4E, "X"), + (0xB55, "V"), + (0xB58, "X"), + (0xB5C, "M", "ଡ଼"), + (0xB5D, "M", "ଢ଼"), + (0xB5E, "X"), + (0xB5F, "V"), + (0xB64, "X"), + (0xB66, "V"), + (0xB78, "X"), + (0xB82, "V"), + (0xB84, "X"), + (0xB85, "V"), + (0xB8B, "X"), + (0xB8E, "V"), + (0xB91, "X"), + (0xB92, "V"), + (0xB96, "X"), + (0xB99, "V"), + (0xB9B, "X"), + (0xB9C, "V"), + (0xB9D, "X"), + (0xB9E, "V"), + (0xBA0, "X"), + (0xBA3, "V"), + (0xBA5, "X"), + (0xBA8, "V"), + (0xBAB, "X"), + (0xBAE, "V"), + (0xBBA, "X"), + (0xBBE, "V"), + (0xBC3, "X"), + (0xBC6, "V"), + (0xBC9, "X"), + (0xBCA, "V"), + (0xBCE, "X"), + (0xBD0, "V"), + (0xBD1, "X"), + (0xBD7, "V"), + (0xBD8, "X"), + (0xBE6, "V"), + (0xBFB, "X"), + (0xC00, "V"), + (0xC0D, "X"), + (0xC0E, "V"), + (0xC11, "X"), + (0xC12, "V"), + (0xC29, "X"), + (0xC2A, "V"), + ] + + +def _seg_12() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xC3A, "X"), + (0xC3C, "V"), + (0xC45, "X"), + (0xC46, "V"), + (0xC49, "X"), + (0xC4A, "V"), + (0xC4E, "X"), + (0xC55, "V"), + (0xC57, "X"), + (0xC58, "V"), + (0xC5B, "X"), + (0xC5D, "V"), + (0xC5E, "X"), + (0xC60, "V"), + (0xC64, "X"), + (0xC66, "V"), + (0xC70, "X"), + (0xC77, "V"), + (0xC8D, "X"), + (0xC8E, "V"), + (0xC91, "X"), + (0xC92, "V"), + (0xCA9, "X"), + (0xCAA, "V"), + (0xCB4, "X"), + (0xCB5, "V"), + (0xCBA, "X"), + (0xCBC, "V"), + (0xCC5, "X"), + (0xCC6, "V"), + (0xCC9, "X"), + (0xCCA, "V"), + (0xCCE, "X"), + (0xCD5, "V"), + (0xCD7, "X"), + (0xCDD, "V"), + (0xCDF, "X"), + (0xCE0, "V"), + (0xCE4, "X"), + (0xCE6, "V"), + (0xCF0, "X"), + (0xCF1, "V"), + (0xCF4, "X"), + (0xD00, "V"), + (0xD0D, "X"), + (0xD0E, "V"), + (0xD11, "X"), + (0xD12, "V"), + (0xD45, "X"), + (0xD46, "V"), + (0xD49, "X"), + (0xD4A, "V"), + (0xD50, "X"), + (0xD54, "V"), + (0xD64, "X"), + (0xD66, "V"), + (0xD80, "X"), + (0xD81, "V"), + (0xD84, "X"), + (0xD85, "V"), + (0xD97, "X"), + (0xD9A, "V"), + (0xDB2, "X"), + (0xDB3, "V"), + (0xDBC, "X"), + (0xDBD, "V"), + (0xDBE, "X"), + (0xDC0, "V"), + (0xDC7, "X"), + (0xDCA, "V"), + (0xDCB, "X"), + (0xDCF, "V"), + (0xDD5, "X"), + (0xDD6, "V"), + (0xDD7, "X"), + (0xDD8, "V"), + (0xDE0, "X"), + (0xDE6, "V"), + (0xDF0, "X"), + (0xDF2, "V"), + (0xDF5, "X"), + (0xE01, "V"), + (0xE33, "M", "ํา"), + (0xE34, "V"), + (0xE3B, "X"), + (0xE3F, "V"), + (0xE5C, "X"), + (0xE81, "V"), + (0xE83, "X"), + (0xE84, "V"), + (0xE85, "X"), + (0xE86, "V"), + (0xE8B, "X"), + (0xE8C, "V"), + (0xEA4, "X"), + (0xEA5, "V"), + (0xEA6, "X"), + (0xEA7, "V"), + (0xEB3, "M", "ໍາ"), + (0xEB4, "V"), + ] + + +def _seg_13() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xEBE, "X"), + (0xEC0, "V"), + (0xEC5, "X"), + (0xEC6, "V"), + (0xEC7, "X"), + (0xEC8, "V"), + (0xECF, "X"), + (0xED0, "V"), + (0xEDA, "X"), + (0xEDC, "M", "ຫນ"), + (0xEDD, "M", "ຫມ"), + (0xEDE, "V"), + (0xEE0, "X"), + (0xF00, "V"), + (0xF0C, "M", "་"), + (0xF0D, "V"), + (0xF43, "M", "གྷ"), + (0xF44, "V"), + (0xF48, "X"), + (0xF49, "V"), + (0xF4D, "M", "ཌྷ"), + (0xF4E, "V"), + (0xF52, "M", "དྷ"), + (0xF53, "V"), + (0xF57, "M", "བྷ"), + (0xF58, "V"), + (0xF5C, "M", "ཛྷ"), + (0xF5D, "V"), + (0xF69, "M", "ཀྵ"), + (0xF6A, "V"), + (0xF6D, "X"), + (0xF71, "V"), + (0xF73, "M", "ཱི"), + (0xF74, "V"), + (0xF75, "M", "ཱུ"), + (0xF76, "M", "ྲྀ"), + (0xF77, "M", "ྲཱྀ"), + (0xF78, "M", "ླྀ"), + (0xF79, "M", "ླཱྀ"), + (0xF7A, "V"), + (0xF81, "M", "ཱྀ"), + (0xF82, "V"), + (0xF93, "M", "ྒྷ"), + (0xF94, "V"), + (0xF98, "X"), + (0xF99, "V"), + (0xF9D, "M", "ྜྷ"), + (0xF9E, "V"), + (0xFA2, "M", "ྡྷ"), + (0xFA3, "V"), + (0xFA7, "M", "ྦྷ"), + (0xFA8, "V"), + (0xFAC, "M", "ྫྷ"), + (0xFAD, "V"), + (0xFB9, "M", "ྐྵ"), + (0xFBA, "V"), + (0xFBD, "X"), + (0xFBE, "V"), + (0xFCD, "X"), + (0xFCE, "V"), + (0xFDB, "X"), + (0x1000, "V"), + (0x10A0, "X"), + (0x10C7, "M", "ⴧ"), + (0x10C8, "X"), + (0x10CD, "M", "ⴭ"), + (0x10CE, "X"), + (0x10D0, "V"), + (0x10FC, "M", "ნ"), + (0x10FD, "V"), + (0x115F, "X"), + (0x1161, "V"), + (0x1249, "X"), + (0x124A, "V"), + (0x124E, "X"), + (0x1250, "V"), + (0x1257, "X"), + (0x1258, "V"), + (0x1259, "X"), + (0x125A, "V"), + (0x125E, "X"), + (0x1260, "V"), + (0x1289, "X"), + (0x128A, "V"), + (0x128E, "X"), + (0x1290, "V"), + (0x12B1, "X"), + (0x12B2, "V"), + (0x12B6, "X"), + (0x12B8, "V"), + (0x12BF, "X"), + (0x12C0, "V"), + (0x12C1, "X"), + (0x12C2, "V"), + (0x12C6, "X"), + (0x12C8, "V"), + (0x12D7, "X"), + (0x12D8, "V"), + (0x1311, "X"), + (0x1312, "V"), + ] + + +def _seg_14() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1316, "X"), + (0x1318, "V"), + (0x135B, "X"), + (0x135D, "V"), + (0x137D, "X"), + (0x1380, "V"), + (0x139A, "X"), + (0x13A0, "V"), + (0x13F6, "X"), + (0x13F8, "M", "Ᏸ"), + (0x13F9, "M", "Ᏹ"), + (0x13FA, "M", "Ᏺ"), + (0x13FB, "M", "Ᏻ"), + (0x13FC, "M", "Ᏼ"), + (0x13FD, "M", "Ᏽ"), + (0x13FE, "X"), + (0x1400, "V"), + (0x1680, "X"), + (0x1681, "V"), + (0x169D, "X"), + (0x16A0, "V"), + (0x16F9, "X"), + (0x1700, "V"), + (0x1716, "X"), + (0x171F, "V"), + (0x1737, "X"), + (0x1740, "V"), + (0x1754, "X"), + (0x1760, "V"), + (0x176D, "X"), + (0x176E, "V"), + (0x1771, "X"), + (0x1772, "V"), + (0x1774, "X"), + (0x1780, "V"), + (0x17B4, "X"), + (0x17B6, "V"), + (0x17DE, "X"), + (0x17E0, "V"), + (0x17EA, "X"), + (0x17F0, "V"), + (0x17FA, "X"), + (0x1800, "V"), + (0x1806, "X"), + (0x1807, "V"), + (0x180B, "I"), + (0x180E, "X"), + (0x180F, "I"), + (0x1810, "V"), + (0x181A, "X"), + (0x1820, "V"), + (0x1879, "X"), + (0x1880, "V"), + (0x18AB, "X"), + (0x18B0, "V"), + (0x18F6, "X"), + (0x1900, "V"), + (0x191F, "X"), + (0x1920, "V"), + (0x192C, "X"), + (0x1930, "V"), + (0x193C, "X"), + (0x1940, "V"), + (0x1941, "X"), + (0x1944, "V"), + (0x196E, "X"), + (0x1970, "V"), + (0x1975, "X"), + (0x1980, "V"), + (0x19AC, "X"), + (0x19B0, "V"), + (0x19CA, "X"), + (0x19D0, "V"), + (0x19DB, "X"), + (0x19DE, "V"), + (0x1A1C, "X"), + (0x1A1E, "V"), + (0x1A5F, "X"), + (0x1A60, "V"), + (0x1A7D, "X"), + (0x1A7F, "V"), + (0x1A8A, "X"), + (0x1A90, "V"), + (0x1A9A, "X"), + (0x1AA0, "V"), + (0x1AAE, "X"), + (0x1AB0, "V"), + (0x1ACF, "X"), + (0x1B00, "V"), + (0x1B4D, "X"), + (0x1B50, "V"), + (0x1B7F, "X"), + (0x1B80, "V"), + (0x1BF4, "X"), + (0x1BFC, "V"), + (0x1C38, "X"), + (0x1C3B, "V"), + (0x1C4A, "X"), + (0x1C4D, "V"), + (0x1C80, "M", "в"), + ] + + +def _seg_15() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1C81, "M", "д"), + (0x1C82, "M", "о"), + (0x1C83, "M", "с"), + (0x1C84, "M", "т"), + (0x1C86, "M", "ъ"), + (0x1C87, "M", "ѣ"), + (0x1C88, "M", "ꙋ"), + (0x1C89, "X"), + (0x1C90, "M", "ა"), + (0x1C91, "M", "ბ"), + (0x1C92, "M", "გ"), + (0x1C93, "M", "დ"), + (0x1C94, "M", "ე"), + (0x1C95, "M", "ვ"), + (0x1C96, "M", "ზ"), + (0x1C97, "M", "თ"), + (0x1C98, "M", "ი"), + (0x1C99, "M", "კ"), + (0x1C9A, "M", "ლ"), + (0x1C9B, "M", "მ"), + (0x1C9C, "M", "ნ"), + (0x1C9D, "M", "ო"), + (0x1C9E, "M", "პ"), + (0x1C9F, "M", "ჟ"), + (0x1CA0, "M", "რ"), + (0x1CA1, "M", "ს"), + (0x1CA2, "M", "ტ"), + (0x1CA3, "M", "უ"), + (0x1CA4, "M", "ფ"), + (0x1CA5, "M", "ქ"), + (0x1CA6, "M", "ღ"), + (0x1CA7, "M", "ყ"), + (0x1CA8, "M", "შ"), + (0x1CA9, "M", "ჩ"), + (0x1CAA, "M", "ც"), + (0x1CAB, "M", "ძ"), + (0x1CAC, "M", "წ"), + (0x1CAD, "M", "ჭ"), + (0x1CAE, "M", "ხ"), + (0x1CAF, "M", "ჯ"), + (0x1CB0, "M", "ჰ"), + (0x1CB1, "M", "ჱ"), + (0x1CB2, "M", "ჲ"), + (0x1CB3, "M", "ჳ"), + (0x1CB4, "M", "ჴ"), + (0x1CB5, "M", "ჵ"), + (0x1CB6, "M", "ჶ"), + (0x1CB7, "M", "ჷ"), + (0x1CB8, "M", "ჸ"), + (0x1CB9, "M", "ჹ"), + (0x1CBA, "M", "ჺ"), + (0x1CBB, "X"), + (0x1CBD, "M", "ჽ"), + (0x1CBE, "M", "ჾ"), + (0x1CBF, "M", "ჿ"), + (0x1CC0, "V"), + (0x1CC8, "X"), + (0x1CD0, "V"), + (0x1CFB, "X"), + (0x1D00, "V"), + (0x1D2C, "M", "a"), + (0x1D2D, "M", "æ"), + (0x1D2E, "M", "b"), + (0x1D2F, "V"), + (0x1D30, "M", "d"), + (0x1D31, "M", "e"), + (0x1D32, "M", "ǝ"), + (0x1D33, "M", "g"), + (0x1D34, "M", "h"), + (0x1D35, "M", "i"), + (0x1D36, "M", "j"), + (0x1D37, "M", "k"), + (0x1D38, "M", "l"), + (0x1D39, "M", "m"), + (0x1D3A, "M", "n"), + (0x1D3B, "V"), + (0x1D3C, "M", "o"), + (0x1D3D, "M", "ȣ"), + (0x1D3E, "M", "p"), + (0x1D3F, "M", "r"), + (0x1D40, "M", "t"), + (0x1D41, "M", "u"), + (0x1D42, "M", "w"), + (0x1D43, "M", "a"), + (0x1D44, "M", "ɐ"), + (0x1D45, "M", "ɑ"), + (0x1D46, "M", "ᴂ"), + (0x1D47, "M", "b"), + (0x1D48, "M", "d"), + (0x1D49, "M", "e"), + (0x1D4A, "M", "ə"), + (0x1D4B, "M", "ɛ"), + (0x1D4C, "M", "ɜ"), + (0x1D4D, "M", "g"), + (0x1D4E, "V"), + (0x1D4F, "M", "k"), + (0x1D50, "M", "m"), + (0x1D51, "M", "ŋ"), + (0x1D52, "M", "o"), + (0x1D53, "M", "ɔ"), + ] + + +def _seg_16() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1D54, "M", "ᴖ"), + (0x1D55, "M", "ᴗ"), + (0x1D56, "M", "p"), + (0x1D57, "M", "t"), + (0x1D58, "M", "u"), + (0x1D59, "M", "ᴝ"), + (0x1D5A, "M", "ɯ"), + (0x1D5B, "M", "v"), + (0x1D5C, "M", "ᴥ"), + (0x1D5D, "M", "β"), + (0x1D5E, "M", "γ"), + (0x1D5F, "M", "δ"), + (0x1D60, "M", "φ"), + (0x1D61, "M", "χ"), + (0x1D62, "M", "i"), + (0x1D63, "M", "r"), + (0x1D64, "M", "u"), + (0x1D65, "M", "v"), + (0x1D66, "M", "β"), + (0x1D67, "M", "γ"), + (0x1D68, "M", "ρ"), + (0x1D69, "M", "φ"), + (0x1D6A, "M", "χ"), + (0x1D6B, "V"), + (0x1D78, "M", "н"), + (0x1D79, "V"), + (0x1D9B, "M", "ɒ"), + (0x1D9C, "M", "c"), + (0x1D9D, "M", "ɕ"), + (0x1D9E, "M", "ð"), + (0x1D9F, "M", "ɜ"), + (0x1DA0, "M", "f"), + (0x1DA1, "M", "ɟ"), + (0x1DA2, "M", "ɡ"), + (0x1DA3, "M", "ɥ"), + (0x1DA4, "M", "ɨ"), + (0x1DA5, "M", "ɩ"), + (0x1DA6, "M", "ɪ"), + (0x1DA7, "M", "ᵻ"), + (0x1DA8, "M", "ʝ"), + (0x1DA9, "M", "ɭ"), + (0x1DAA, "M", "ᶅ"), + (0x1DAB, "M", "ʟ"), + (0x1DAC, "M", "ɱ"), + (0x1DAD, "M", "ɰ"), + (0x1DAE, "M", "ɲ"), + (0x1DAF, "M", "ɳ"), + (0x1DB0, "M", "ɴ"), + (0x1DB1, "M", "ɵ"), + (0x1DB2, "M", "ɸ"), + (0x1DB3, "M", "ʂ"), + (0x1DB4, "M", "ʃ"), + (0x1DB5, "M", "ƫ"), + (0x1DB6, "M", "ʉ"), + (0x1DB7, "M", "ʊ"), + (0x1DB8, "M", "ᴜ"), + (0x1DB9, "M", "ʋ"), + (0x1DBA, "M", "ʌ"), + (0x1DBB, "M", "z"), + (0x1DBC, "M", "ʐ"), + (0x1DBD, "M", "ʑ"), + (0x1DBE, "M", "ʒ"), + (0x1DBF, "M", "θ"), + (0x1DC0, "V"), + (0x1E00, "M", "ḁ"), + (0x1E01, "V"), + (0x1E02, "M", "ḃ"), + (0x1E03, "V"), + (0x1E04, "M", "ḅ"), + (0x1E05, "V"), + (0x1E06, "M", "ḇ"), + (0x1E07, "V"), + (0x1E08, "M", "ḉ"), + (0x1E09, "V"), + (0x1E0A, "M", "ḋ"), + (0x1E0B, "V"), + (0x1E0C, "M", "ḍ"), + (0x1E0D, "V"), + (0x1E0E, "M", "ḏ"), + (0x1E0F, "V"), + (0x1E10, "M", "ḑ"), + (0x1E11, "V"), + (0x1E12, "M", "ḓ"), + (0x1E13, "V"), + (0x1E14, "M", "ḕ"), + (0x1E15, "V"), + (0x1E16, "M", "ḗ"), + (0x1E17, "V"), + (0x1E18, "M", "ḙ"), + (0x1E19, "V"), + (0x1E1A, "M", "ḛ"), + (0x1E1B, "V"), + (0x1E1C, "M", "ḝ"), + (0x1E1D, "V"), + (0x1E1E, "M", "ḟ"), + (0x1E1F, "V"), + (0x1E20, "M", "ḡ"), + (0x1E21, "V"), + (0x1E22, "M", "ḣ"), + (0x1E23, "V"), + ] + + +def _seg_17() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1E24, "M", "ḥ"), + (0x1E25, "V"), + (0x1E26, "M", "ḧ"), + (0x1E27, "V"), + (0x1E28, "M", "ḩ"), + (0x1E29, "V"), + (0x1E2A, "M", "ḫ"), + (0x1E2B, "V"), + (0x1E2C, "M", "ḭ"), + (0x1E2D, "V"), + (0x1E2E, "M", "ḯ"), + (0x1E2F, "V"), + (0x1E30, "M", "ḱ"), + (0x1E31, "V"), + (0x1E32, "M", "ḳ"), + (0x1E33, "V"), + (0x1E34, "M", "ḵ"), + (0x1E35, "V"), + (0x1E36, "M", "ḷ"), + (0x1E37, "V"), + (0x1E38, "M", "ḹ"), + (0x1E39, "V"), + (0x1E3A, "M", "ḻ"), + (0x1E3B, "V"), + (0x1E3C, "M", "ḽ"), + (0x1E3D, "V"), + (0x1E3E, "M", "ḿ"), + (0x1E3F, "V"), + (0x1E40, "M", "ṁ"), + (0x1E41, "V"), + (0x1E42, "M", "ṃ"), + (0x1E43, "V"), + (0x1E44, "M", "ṅ"), + (0x1E45, "V"), + (0x1E46, "M", "ṇ"), + (0x1E47, "V"), + (0x1E48, "M", "ṉ"), + (0x1E49, "V"), + (0x1E4A, "M", "ṋ"), + (0x1E4B, "V"), + (0x1E4C, "M", "ṍ"), + (0x1E4D, "V"), + (0x1E4E, "M", "ṏ"), + (0x1E4F, "V"), + (0x1E50, "M", "ṑ"), + (0x1E51, "V"), + (0x1E52, "M", "ṓ"), + (0x1E53, "V"), + (0x1E54, "M", "ṕ"), + (0x1E55, "V"), + (0x1E56, "M", "ṗ"), + (0x1E57, "V"), + (0x1E58, "M", "ṙ"), + (0x1E59, "V"), + (0x1E5A, "M", "ṛ"), + (0x1E5B, "V"), + (0x1E5C, "M", "ṝ"), + (0x1E5D, "V"), + (0x1E5E, "M", "ṟ"), + (0x1E5F, "V"), + (0x1E60, "M", "ṡ"), + (0x1E61, "V"), + (0x1E62, "M", "ṣ"), + (0x1E63, "V"), + (0x1E64, "M", "ṥ"), + (0x1E65, "V"), + (0x1E66, "M", "ṧ"), + (0x1E67, "V"), + (0x1E68, "M", "ṩ"), + (0x1E69, "V"), + (0x1E6A, "M", "ṫ"), + (0x1E6B, "V"), + (0x1E6C, "M", "ṭ"), + (0x1E6D, "V"), + (0x1E6E, "M", "ṯ"), + (0x1E6F, "V"), + (0x1E70, "M", "ṱ"), + (0x1E71, "V"), + (0x1E72, "M", "ṳ"), + (0x1E73, "V"), + (0x1E74, "M", "ṵ"), + (0x1E75, "V"), + (0x1E76, "M", "ṷ"), + (0x1E77, "V"), + (0x1E78, "M", "ṹ"), + (0x1E79, "V"), + (0x1E7A, "M", "ṻ"), + (0x1E7B, "V"), + (0x1E7C, "M", "ṽ"), + (0x1E7D, "V"), + (0x1E7E, "M", "ṿ"), + (0x1E7F, "V"), + (0x1E80, "M", "ẁ"), + (0x1E81, "V"), + (0x1E82, "M", "ẃ"), + (0x1E83, "V"), + (0x1E84, "M", "ẅ"), + (0x1E85, "V"), + (0x1E86, "M", "ẇ"), + (0x1E87, "V"), + ] + + +def _seg_18() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1E88, "M", "ẉ"), + (0x1E89, "V"), + (0x1E8A, "M", "ẋ"), + (0x1E8B, "V"), + (0x1E8C, "M", "ẍ"), + (0x1E8D, "V"), + (0x1E8E, "M", "ẏ"), + (0x1E8F, "V"), + (0x1E90, "M", "ẑ"), + (0x1E91, "V"), + (0x1E92, "M", "ẓ"), + (0x1E93, "V"), + (0x1E94, "M", "ẕ"), + (0x1E95, "V"), + (0x1E9A, "M", "aʾ"), + (0x1E9B, "M", "ṡ"), + (0x1E9C, "V"), + (0x1E9E, "M", "ß"), + (0x1E9F, "V"), + (0x1EA0, "M", "ạ"), + (0x1EA1, "V"), + (0x1EA2, "M", "ả"), + (0x1EA3, "V"), + (0x1EA4, "M", "ấ"), + (0x1EA5, "V"), + (0x1EA6, "M", "ầ"), + (0x1EA7, "V"), + (0x1EA8, "M", "ẩ"), + (0x1EA9, "V"), + (0x1EAA, "M", "ẫ"), + (0x1EAB, "V"), + (0x1EAC, "M", "ậ"), + (0x1EAD, "V"), + (0x1EAE, "M", "ắ"), + (0x1EAF, "V"), + (0x1EB0, "M", "ằ"), + (0x1EB1, "V"), + (0x1EB2, "M", "ẳ"), + (0x1EB3, "V"), + (0x1EB4, "M", "ẵ"), + (0x1EB5, "V"), + (0x1EB6, "M", "ặ"), + (0x1EB7, "V"), + (0x1EB8, "M", "ẹ"), + (0x1EB9, "V"), + (0x1EBA, "M", "ẻ"), + (0x1EBB, "V"), + (0x1EBC, "M", "ẽ"), + (0x1EBD, "V"), + (0x1EBE, "M", "ế"), + (0x1EBF, "V"), + (0x1EC0, "M", "ề"), + (0x1EC1, "V"), + (0x1EC2, "M", "ể"), + (0x1EC3, "V"), + (0x1EC4, "M", "ễ"), + (0x1EC5, "V"), + (0x1EC6, "M", "ệ"), + (0x1EC7, "V"), + (0x1EC8, "M", "ỉ"), + (0x1EC9, "V"), + (0x1ECA, "M", "ị"), + (0x1ECB, "V"), + (0x1ECC, "M", "ọ"), + (0x1ECD, "V"), + (0x1ECE, "M", "ỏ"), + (0x1ECF, "V"), + (0x1ED0, "M", "ố"), + (0x1ED1, "V"), + (0x1ED2, "M", "ồ"), + (0x1ED3, "V"), + (0x1ED4, "M", "ổ"), + (0x1ED5, "V"), + (0x1ED6, "M", "ỗ"), + (0x1ED7, "V"), + (0x1ED8, "M", "ộ"), + (0x1ED9, "V"), + (0x1EDA, "M", "ớ"), + (0x1EDB, "V"), + (0x1EDC, "M", "ờ"), + (0x1EDD, "V"), + (0x1EDE, "M", "ở"), + (0x1EDF, "V"), + (0x1EE0, "M", "ỡ"), + (0x1EE1, "V"), + (0x1EE2, "M", "ợ"), + (0x1EE3, "V"), + (0x1EE4, "M", "ụ"), + (0x1EE5, "V"), + (0x1EE6, "M", "ủ"), + (0x1EE7, "V"), + (0x1EE8, "M", "ứ"), + (0x1EE9, "V"), + (0x1EEA, "M", "ừ"), + (0x1EEB, "V"), + (0x1EEC, "M", "ử"), + (0x1EED, "V"), + (0x1EEE, "M", "ữ"), + (0x1EEF, "V"), + (0x1EF0, "M", "ự"), + ] + + +def _seg_19() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1EF1, "V"), + (0x1EF2, "M", "ỳ"), + (0x1EF3, "V"), + (0x1EF4, "M", "ỵ"), + (0x1EF5, "V"), + (0x1EF6, "M", "ỷ"), + (0x1EF7, "V"), + (0x1EF8, "M", "ỹ"), + (0x1EF9, "V"), + (0x1EFA, "M", "ỻ"), + (0x1EFB, "V"), + (0x1EFC, "M", "ỽ"), + (0x1EFD, "V"), + (0x1EFE, "M", "ỿ"), + (0x1EFF, "V"), + (0x1F08, "M", "ἀ"), + (0x1F09, "M", "ἁ"), + (0x1F0A, "M", "ἂ"), + (0x1F0B, "M", "ἃ"), + (0x1F0C, "M", "ἄ"), + (0x1F0D, "M", "ἅ"), + (0x1F0E, "M", "ἆ"), + (0x1F0F, "M", "ἇ"), + (0x1F10, "V"), + (0x1F16, "X"), + (0x1F18, "M", "ἐ"), + (0x1F19, "M", "ἑ"), + (0x1F1A, "M", "ἒ"), + (0x1F1B, "M", "ἓ"), + (0x1F1C, "M", "ἔ"), + (0x1F1D, "M", "ἕ"), + (0x1F1E, "X"), + (0x1F20, "V"), + (0x1F28, "M", "ἠ"), + (0x1F29, "M", "ἡ"), + (0x1F2A, "M", "ἢ"), + (0x1F2B, "M", "ἣ"), + (0x1F2C, "M", "ἤ"), + (0x1F2D, "M", "ἥ"), + (0x1F2E, "M", "ἦ"), + (0x1F2F, "M", "ἧ"), + (0x1F30, "V"), + (0x1F38, "M", "ἰ"), + (0x1F39, "M", "ἱ"), + (0x1F3A, "M", "ἲ"), + (0x1F3B, "M", "ἳ"), + (0x1F3C, "M", "ἴ"), + (0x1F3D, "M", "ἵ"), + (0x1F3E, "M", "ἶ"), + (0x1F3F, "M", "ἷ"), + (0x1F40, "V"), + (0x1F46, "X"), + (0x1F48, "M", "ὀ"), + (0x1F49, "M", "ὁ"), + (0x1F4A, "M", "ὂ"), + (0x1F4B, "M", "ὃ"), + (0x1F4C, "M", "ὄ"), + (0x1F4D, "M", "ὅ"), + (0x1F4E, "X"), + (0x1F50, "V"), + (0x1F58, "X"), + (0x1F59, "M", "ὑ"), + (0x1F5A, "X"), + (0x1F5B, "M", "ὓ"), + (0x1F5C, "X"), + (0x1F5D, "M", "ὕ"), + (0x1F5E, "X"), + (0x1F5F, "M", "ὗ"), + (0x1F60, "V"), + (0x1F68, "M", "ὠ"), + (0x1F69, "M", "ὡ"), + (0x1F6A, "M", "ὢ"), + (0x1F6B, "M", "ὣ"), + (0x1F6C, "M", "ὤ"), + (0x1F6D, "M", "ὥ"), + (0x1F6E, "M", "ὦ"), + (0x1F6F, "M", "ὧ"), + (0x1F70, "V"), + (0x1F71, "M", "ά"), + (0x1F72, "V"), + (0x1F73, "M", "έ"), + (0x1F74, "V"), + (0x1F75, "M", "ή"), + (0x1F76, "V"), + (0x1F77, "M", "ί"), + (0x1F78, "V"), + (0x1F79, "M", "ό"), + (0x1F7A, "V"), + (0x1F7B, "M", "ύ"), + (0x1F7C, "V"), + (0x1F7D, "M", "ώ"), + (0x1F7E, "X"), + (0x1F80, "M", "ἀι"), + (0x1F81, "M", "ἁι"), + (0x1F82, "M", "ἂι"), + (0x1F83, "M", "ἃι"), + (0x1F84, "M", "ἄι"), + (0x1F85, "M", "ἅι"), + (0x1F86, "M", "ἆι"), + (0x1F87, "M", "ἇι"), + ] + + +def _seg_20() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1F88, "M", "ἀι"), + (0x1F89, "M", "ἁι"), + (0x1F8A, "M", "ἂι"), + (0x1F8B, "M", "ἃι"), + (0x1F8C, "M", "ἄι"), + (0x1F8D, "M", "ἅι"), + (0x1F8E, "M", "ἆι"), + (0x1F8F, "M", "ἇι"), + (0x1F90, "M", "ἠι"), + (0x1F91, "M", "ἡι"), + (0x1F92, "M", "ἢι"), + (0x1F93, "M", "ἣι"), + (0x1F94, "M", "ἤι"), + (0x1F95, "M", "ἥι"), + (0x1F96, "M", "ἦι"), + (0x1F97, "M", "ἧι"), + (0x1F98, "M", "ἠι"), + (0x1F99, "M", "ἡι"), + (0x1F9A, "M", "ἢι"), + (0x1F9B, "M", "ἣι"), + (0x1F9C, "M", "ἤι"), + (0x1F9D, "M", "ἥι"), + (0x1F9E, "M", "ἦι"), + (0x1F9F, "M", "ἧι"), + (0x1FA0, "M", "ὠι"), + (0x1FA1, "M", "ὡι"), + (0x1FA2, "M", "ὢι"), + (0x1FA3, "M", "ὣι"), + (0x1FA4, "M", "ὤι"), + (0x1FA5, "M", "ὥι"), + (0x1FA6, "M", "ὦι"), + (0x1FA7, "M", "ὧι"), + (0x1FA8, "M", "ὠι"), + (0x1FA9, "M", "ὡι"), + (0x1FAA, "M", "ὢι"), + (0x1FAB, "M", "ὣι"), + (0x1FAC, "M", "ὤι"), + (0x1FAD, "M", "ὥι"), + (0x1FAE, "M", "ὦι"), + (0x1FAF, "M", "ὧι"), + (0x1FB0, "V"), + (0x1FB2, "M", "ὰι"), + (0x1FB3, "M", "αι"), + (0x1FB4, "M", "άι"), + (0x1FB5, "X"), + (0x1FB6, "V"), + (0x1FB7, "M", "ᾶι"), + (0x1FB8, "M", "ᾰ"), + (0x1FB9, "M", "ᾱ"), + (0x1FBA, "M", "ὰ"), + (0x1FBB, "M", "ά"), + (0x1FBC, "M", "αι"), + (0x1FBD, "3", " ̓"), + (0x1FBE, "M", "ι"), + (0x1FBF, "3", " ̓"), + (0x1FC0, "3", " ͂"), + (0x1FC1, "3", " ̈͂"), + (0x1FC2, "M", "ὴι"), + (0x1FC3, "M", "ηι"), + (0x1FC4, "M", "ήι"), + (0x1FC5, "X"), + (0x1FC6, "V"), + (0x1FC7, "M", "ῆι"), + (0x1FC8, "M", "ὲ"), + (0x1FC9, "M", "έ"), + (0x1FCA, "M", "ὴ"), + (0x1FCB, "M", "ή"), + (0x1FCC, "M", "ηι"), + (0x1FCD, "3", " ̓̀"), + (0x1FCE, "3", " ̓́"), + (0x1FCF, "3", " ̓͂"), + (0x1FD0, "V"), + (0x1FD3, "M", "ΐ"), + (0x1FD4, "X"), + (0x1FD6, "V"), + (0x1FD8, "M", "ῐ"), + (0x1FD9, "M", "ῑ"), + (0x1FDA, "M", "ὶ"), + (0x1FDB, "M", "ί"), + (0x1FDC, "X"), + (0x1FDD, "3", " ̔̀"), + (0x1FDE, "3", " ̔́"), + (0x1FDF, "3", " ̔͂"), + (0x1FE0, "V"), + (0x1FE3, "M", "ΰ"), + (0x1FE4, "V"), + (0x1FE8, "M", "ῠ"), + (0x1FE9, "M", "ῡ"), + (0x1FEA, "M", "ὺ"), + (0x1FEB, "M", "ύ"), + (0x1FEC, "M", "ῥ"), + (0x1FED, "3", " ̈̀"), + (0x1FEE, "3", " ̈́"), + (0x1FEF, "3", "`"), + (0x1FF0, "X"), + (0x1FF2, "M", "ὼι"), + (0x1FF3, "M", "ωι"), + (0x1FF4, "M", "ώι"), + (0x1FF5, "X"), + (0x1FF6, "V"), + ] + + +def _seg_21() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1FF7, "M", "ῶι"), + (0x1FF8, "M", "ὸ"), + (0x1FF9, "M", "ό"), + (0x1FFA, "M", "ὼ"), + (0x1FFB, "M", "ώ"), + (0x1FFC, "M", "ωι"), + (0x1FFD, "3", " ́"), + (0x1FFE, "3", " ̔"), + (0x1FFF, "X"), + (0x2000, "3", " "), + (0x200B, "I"), + (0x200C, "D", ""), + (0x200E, "X"), + (0x2010, "V"), + (0x2011, "M", "‐"), + (0x2012, "V"), + (0x2017, "3", " ̳"), + (0x2018, "V"), + (0x2024, "X"), + (0x2027, "V"), + (0x2028, "X"), + (0x202F, "3", " "), + (0x2030, "V"), + (0x2033, "M", "′′"), + (0x2034, "M", "′′′"), + (0x2035, "V"), + (0x2036, "M", "‵‵"), + (0x2037, "M", "‵‵‵"), + (0x2038, "V"), + (0x203C, "3", "!!"), + (0x203D, "V"), + (0x203E, "3", " ̅"), + (0x203F, "V"), + (0x2047, "3", "??"), + (0x2048, "3", "?!"), + (0x2049, "3", "!?"), + (0x204A, "V"), + (0x2057, "M", "′′′′"), + (0x2058, "V"), + (0x205F, "3", " "), + (0x2060, "I"), + (0x2061, "X"), + (0x2064, "I"), + (0x2065, "X"), + (0x2070, "M", "0"), + (0x2071, "M", "i"), + (0x2072, "X"), + (0x2074, "M", "4"), + (0x2075, "M", "5"), + (0x2076, "M", "6"), + (0x2077, "M", "7"), + (0x2078, "M", "8"), + (0x2079, "M", "9"), + (0x207A, "3", "+"), + (0x207B, "M", "−"), + (0x207C, "3", "="), + (0x207D, "3", "("), + (0x207E, "3", ")"), + (0x207F, "M", "n"), + (0x2080, "M", "0"), + (0x2081, "M", "1"), + (0x2082, "M", "2"), + (0x2083, "M", "3"), + (0x2084, "M", "4"), + (0x2085, "M", "5"), + (0x2086, "M", "6"), + (0x2087, "M", "7"), + (0x2088, "M", "8"), + (0x2089, "M", "9"), + (0x208A, "3", "+"), + (0x208B, "M", "−"), + (0x208C, "3", "="), + (0x208D, "3", "("), + (0x208E, "3", ")"), + (0x208F, "X"), + (0x2090, "M", "a"), + (0x2091, "M", "e"), + (0x2092, "M", "o"), + (0x2093, "M", "x"), + (0x2094, "M", "ə"), + (0x2095, "M", "h"), + (0x2096, "M", "k"), + (0x2097, "M", "l"), + (0x2098, "M", "m"), + (0x2099, "M", "n"), + (0x209A, "M", "p"), + (0x209B, "M", "s"), + (0x209C, "M", "t"), + (0x209D, "X"), + (0x20A0, "V"), + (0x20A8, "M", "rs"), + (0x20A9, "V"), + (0x20C1, "X"), + (0x20D0, "V"), + (0x20F1, "X"), + (0x2100, "3", "a/c"), + (0x2101, "3", "a/s"), + (0x2102, "M", "c"), + (0x2103, "M", "°c"), + (0x2104, "V"), + ] + + +def _seg_22() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x2105, "3", "c/o"), + (0x2106, "3", "c/u"), + (0x2107, "M", "ɛ"), + (0x2108, "V"), + (0x2109, "M", "°f"), + (0x210A, "M", "g"), + (0x210B, "M", "h"), + (0x210F, "M", "ħ"), + (0x2110, "M", "i"), + (0x2112, "M", "l"), + (0x2114, "V"), + (0x2115, "M", "n"), + (0x2116, "M", "no"), + (0x2117, "V"), + (0x2119, "M", "p"), + (0x211A, "M", "q"), + (0x211B, "M", "r"), + (0x211E, "V"), + (0x2120, "M", "sm"), + (0x2121, "M", "tel"), + (0x2122, "M", "tm"), + (0x2123, "V"), + (0x2124, "M", "z"), + (0x2125, "V"), + (0x2126, "M", "ω"), + (0x2127, "V"), + (0x2128, "M", "z"), + (0x2129, "V"), + (0x212A, "M", "k"), + (0x212B, "M", "å"), + (0x212C, "M", "b"), + (0x212D, "M", "c"), + (0x212E, "V"), + (0x212F, "M", "e"), + (0x2131, "M", "f"), + (0x2132, "X"), + (0x2133, "M", "m"), + (0x2134, "M", "o"), + (0x2135, "M", "א"), + (0x2136, "M", "ב"), + (0x2137, "M", "ג"), + (0x2138, "M", "ד"), + (0x2139, "M", "i"), + (0x213A, "V"), + (0x213B, "M", "fax"), + (0x213C, "M", "π"), + (0x213D, "M", "γ"), + (0x213F, "M", "π"), + (0x2140, "M", "∑"), + (0x2141, "V"), + (0x2145, "M", "d"), + (0x2147, "M", "e"), + (0x2148, "M", "i"), + (0x2149, "M", "j"), + (0x214A, "V"), + (0x2150, "M", "1⁄7"), + (0x2151, "M", "1⁄9"), + (0x2152, "M", "1⁄10"), + (0x2153, "M", "1⁄3"), + (0x2154, "M", "2⁄3"), + (0x2155, "M", "1⁄5"), + (0x2156, "M", "2⁄5"), + (0x2157, "M", "3⁄5"), + (0x2158, "M", "4⁄5"), + (0x2159, "M", "1⁄6"), + (0x215A, "M", "5⁄6"), + (0x215B, "M", "1⁄8"), + (0x215C, "M", "3⁄8"), + (0x215D, "M", "5⁄8"), + (0x215E, "M", "7⁄8"), + (0x215F, "M", "1⁄"), + (0x2160, "M", "i"), + (0x2161, "M", "ii"), + (0x2162, "M", "iii"), + (0x2163, "M", "iv"), + (0x2164, "M", "v"), + (0x2165, "M", "vi"), + (0x2166, "M", "vii"), + (0x2167, "M", "viii"), + (0x2168, "M", "ix"), + (0x2169, "M", "x"), + (0x216A, "M", "xi"), + (0x216B, "M", "xii"), + (0x216C, "M", "l"), + (0x216D, "M", "c"), + (0x216E, "M", "d"), + (0x216F, "M", "m"), + (0x2170, "M", "i"), + (0x2171, "M", "ii"), + (0x2172, "M", "iii"), + (0x2173, "M", "iv"), + (0x2174, "M", "v"), + (0x2175, "M", "vi"), + (0x2176, "M", "vii"), + (0x2177, "M", "viii"), + (0x2178, "M", "ix"), + (0x2179, "M", "x"), + (0x217A, "M", "xi"), + (0x217B, "M", "xii"), + (0x217C, "M", "l"), + ] + + +def _seg_23() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x217D, "M", "c"), + (0x217E, "M", "d"), + (0x217F, "M", "m"), + (0x2180, "V"), + (0x2183, "X"), + (0x2184, "V"), + (0x2189, "M", "0⁄3"), + (0x218A, "V"), + (0x218C, "X"), + (0x2190, "V"), + (0x222C, "M", "∫∫"), + (0x222D, "M", "∫∫∫"), + (0x222E, "V"), + (0x222F, "M", "∮∮"), + (0x2230, "M", "∮∮∮"), + (0x2231, "V"), + (0x2329, "M", "〈"), + (0x232A, "M", "〉"), + (0x232B, "V"), + (0x2427, "X"), + (0x2440, "V"), + (0x244B, "X"), + (0x2460, "M", "1"), + (0x2461, "M", "2"), + (0x2462, "M", "3"), + (0x2463, "M", "4"), + (0x2464, "M", "5"), + (0x2465, "M", "6"), + (0x2466, "M", "7"), + (0x2467, "M", "8"), + (0x2468, "M", "9"), + (0x2469, "M", "10"), + (0x246A, "M", "11"), + (0x246B, "M", "12"), + (0x246C, "M", "13"), + (0x246D, "M", "14"), + (0x246E, "M", "15"), + (0x246F, "M", "16"), + (0x2470, "M", "17"), + (0x2471, "M", "18"), + (0x2472, "M", "19"), + (0x2473, "M", "20"), + (0x2474, "3", "(1)"), + (0x2475, "3", "(2)"), + (0x2476, "3", "(3)"), + (0x2477, "3", "(4)"), + (0x2478, "3", "(5)"), + (0x2479, "3", "(6)"), + (0x247A, "3", "(7)"), + (0x247B, "3", "(8)"), + (0x247C, "3", "(9)"), + (0x247D, "3", "(10)"), + (0x247E, "3", "(11)"), + (0x247F, "3", "(12)"), + (0x2480, "3", "(13)"), + (0x2481, "3", "(14)"), + (0x2482, "3", "(15)"), + (0x2483, "3", "(16)"), + (0x2484, "3", "(17)"), + (0x2485, "3", "(18)"), + (0x2486, "3", "(19)"), + (0x2487, "3", "(20)"), + (0x2488, "X"), + (0x249C, "3", "(a)"), + (0x249D, "3", "(b)"), + (0x249E, "3", "(c)"), + (0x249F, "3", "(d)"), + (0x24A0, "3", "(e)"), + (0x24A1, "3", "(f)"), + (0x24A2, "3", "(g)"), + (0x24A3, "3", "(h)"), + (0x24A4, "3", "(i)"), + (0x24A5, "3", "(j)"), + (0x24A6, "3", "(k)"), + (0x24A7, "3", "(l)"), + (0x24A8, "3", "(m)"), + (0x24A9, "3", "(n)"), + (0x24AA, "3", "(o)"), + (0x24AB, "3", "(p)"), + (0x24AC, "3", "(q)"), + (0x24AD, "3", "(r)"), + (0x24AE, "3", "(s)"), + (0x24AF, "3", "(t)"), + (0x24B0, "3", "(u)"), + (0x24B1, "3", "(v)"), + (0x24B2, "3", "(w)"), + (0x24B3, "3", "(x)"), + (0x24B4, "3", "(y)"), + (0x24B5, "3", "(z)"), + (0x24B6, "M", "a"), + (0x24B7, "M", "b"), + (0x24B8, "M", "c"), + (0x24B9, "M", "d"), + (0x24BA, "M", "e"), + (0x24BB, "M", "f"), + (0x24BC, "M", "g"), + (0x24BD, "M", "h"), + (0x24BE, "M", "i"), + (0x24BF, "M", "j"), + (0x24C0, "M", "k"), + ] + + +def _seg_24() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x24C1, "M", "l"), + (0x24C2, "M", "m"), + (0x24C3, "M", "n"), + (0x24C4, "M", "o"), + (0x24C5, "M", "p"), + (0x24C6, "M", "q"), + (0x24C7, "M", "r"), + (0x24C8, "M", "s"), + (0x24C9, "M", "t"), + (0x24CA, "M", "u"), + (0x24CB, "M", "v"), + (0x24CC, "M", "w"), + (0x24CD, "M", "x"), + (0x24CE, "M", "y"), + (0x24CF, "M", "z"), + (0x24D0, "M", "a"), + (0x24D1, "M", "b"), + (0x24D2, "M", "c"), + (0x24D3, "M", "d"), + (0x24D4, "M", "e"), + (0x24D5, "M", "f"), + (0x24D6, "M", "g"), + (0x24D7, "M", "h"), + (0x24D8, "M", "i"), + (0x24D9, "M", "j"), + (0x24DA, "M", "k"), + (0x24DB, "M", "l"), + (0x24DC, "M", "m"), + (0x24DD, "M", "n"), + (0x24DE, "M", "o"), + (0x24DF, "M", "p"), + (0x24E0, "M", "q"), + (0x24E1, "M", "r"), + (0x24E2, "M", "s"), + (0x24E3, "M", "t"), + (0x24E4, "M", "u"), + (0x24E5, "M", "v"), + (0x24E6, "M", "w"), + (0x24E7, "M", "x"), + (0x24E8, "M", "y"), + (0x24E9, "M", "z"), + (0x24EA, "M", "0"), + (0x24EB, "V"), + (0x2A0C, "M", "∫∫∫∫"), + (0x2A0D, "V"), + (0x2A74, "3", "::="), + (0x2A75, "3", "=="), + (0x2A76, "3", "==="), + (0x2A77, "V"), + (0x2ADC, "M", "⫝̸"), + (0x2ADD, "V"), + (0x2B74, "X"), + (0x2B76, "V"), + (0x2B96, "X"), + (0x2B97, "V"), + (0x2C00, "M", "ⰰ"), + (0x2C01, "M", "ⰱ"), + (0x2C02, "M", "ⰲ"), + (0x2C03, "M", "ⰳ"), + (0x2C04, "M", "ⰴ"), + (0x2C05, "M", "ⰵ"), + (0x2C06, "M", "ⰶ"), + (0x2C07, "M", "ⰷ"), + (0x2C08, "M", "ⰸ"), + (0x2C09, "M", "ⰹ"), + (0x2C0A, "M", "ⰺ"), + (0x2C0B, "M", "ⰻ"), + (0x2C0C, "M", "ⰼ"), + (0x2C0D, "M", "ⰽ"), + (0x2C0E, "M", "ⰾ"), + (0x2C0F, "M", "ⰿ"), + (0x2C10, "M", "ⱀ"), + (0x2C11, "M", "ⱁ"), + (0x2C12, "M", "ⱂ"), + (0x2C13, "M", "ⱃ"), + (0x2C14, "M", "ⱄ"), + (0x2C15, "M", "ⱅ"), + (0x2C16, "M", "ⱆ"), + (0x2C17, "M", "ⱇ"), + (0x2C18, "M", "ⱈ"), + (0x2C19, "M", "ⱉ"), + (0x2C1A, "M", "ⱊ"), + (0x2C1B, "M", "ⱋ"), + (0x2C1C, "M", "ⱌ"), + (0x2C1D, "M", "ⱍ"), + (0x2C1E, "M", "ⱎ"), + (0x2C1F, "M", "ⱏ"), + (0x2C20, "M", "ⱐ"), + (0x2C21, "M", "ⱑ"), + (0x2C22, "M", "ⱒ"), + (0x2C23, "M", "ⱓ"), + (0x2C24, "M", "ⱔ"), + (0x2C25, "M", "ⱕ"), + (0x2C26, "M", "ⱖ"), + (0x2C27, "M", "ⱗ"), + (0x2C28, "M", "ⱘ"), + (0x2C29, "M", "ⱙ"), + (0x2C2A, "M", "ⱚ"), + (0x2C2B, "M", "ⱛ"), + (0x2C2C, "M", "ⱜ"), + ] + + +def _seg_25() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x2C2D, "M", "ⱝ"), + (0x2C2E, "M", "ⱞ"), + (0x2C2F, "M", "ⱟ"), + (0x2C30, "V"), + (0x2C60, "M", "ⱡ"), + (0x2C61, "V"), + (0x2C62, "M", "ɫ"), + (0x2C63, "M", "ᵽ"), + (0x2C64, "M", "ɽ"), + (0x2C65, "V"), + (0x2C67, "M", "ⱨ"), + (0x2C68, "V"), + (0x2C69, "M", "ⱪ"), + (0x2C6A, "V"), + (0x2C6B, "M", "ⱬ"), + (0x2C6C, "V"), + (0x2C6D, "M", "ɑ"), + (0x2C6E, "M", "ɱ"), + (0x2C6F, "M", "ɐ"), + (0x2C70, "M", "ɒ"), + (0x2C71, "V"), + (0x2C72, "M", "ⱳ"), + (0x2C73, "V"), + (0x2C75, "M", "ⱶ"), + (0x2C76, "V"), + (0x2C7C, "M", "j"), + (0x2C7D, "M", "v"), + (0x2C7E, "M", "ȿ"), + (0x2C7F, "M", "ɀ"), + (0x2C80, "M", "ⲁ"), + (0x2C81, "V"), + (0x2C82, "M", "ⲃ"), + (0x2C83, "V"), + (0x2C84, "M", "ⲅ"), + (0x2C85, "V"), + (0x2C86, "M", "ⲇ"), + (0x2C87, "V"), + (0x2C88, "M", "ⲉ"), + (0x2C89, "V"), + (0x2C8A, "M", "ⲋ"), + (0x2C8B, "V"), + (0x2C8C, "M", "ⲍ"), + (0x2C8D, "V"), + (0x2C8E, "M", "ⲏ"), + (0x2C8F, "V"), + (0x2C90, "M", "ⲑ"), + (0x2C91, "V"), + (0x2C92, "M", "ⲓ"), + (0x2C93, "V"), + (0x2C94, "M", "ⲕ"), + (0x2C95, "V"), + (0x2C96, "M", "ⲗ"), + (0x2C97, "V"), + (0x2C98, "M", "ⲙ"), + (0x2C99, "V"), + (0x2C9A, "M", "ⲛ"), + (0x2C9B, "V"), + (0x2C9C, "M", "ⲝ"), + (0x2C9D, "V"), + (0x2C9E, "M", "ⲟ"), + (0x2C9F, "V"), + (0x2CA0, "M", "ⲡ"), + (0x2CA1, "V"), + (0x2CA2, "M", "ⲣ"), + (0x2CA3, "V"), + (0x2CA4, "M", "ⲥ"), + (0x2CA5, "V"), + (0x2CA6, "M", "ⲧ"), + (0x2CA7, "V"), + (0x2CA8, "M", "ⲩ"), + (0x2CA9, "V"), + (0x2CAA, "M", "ⲫ"), + (0x2CAB, "V"), + (0x2CAC, "M", "ⲭ"), + (0x2CAD, "V"), + (0x2CAE, "M", "ⲯ"), + (0x2CAF, "V"), + (0x2CB0, "M", "ⲱ"), + (0x2CB1, "V"), + (0x2CB2, "M", "ⲳ"), + (0x2CB3, "V"), + (0x2CB4, "M", "ⲵ"), + (0x2CB5, "V"), + (0x2CB6, "M", "ⲷ"), + (0x2CB7, "V"), + (0x2CB8, "M", "ⲹ"), + (0x2CB9, "V"), + (0x2CBA, "M", "ⲻ"), + (0x2CBB, "V"), + (0x2CBC, "M", "ⲽ"), + (0x2CBD, "V"), + (0x2CBE, "M", "ⲿ"), + (0x2CBF, "V"), + (0x2CC0, "M", "ⳁ"), + (0x2CC1, "V"), + (0x2CC2, "M", "ⳃ"), + (0x2CC3, "V"), + (0x2CC4, "M", "ⳅ"), + (0x2CC5, "V"), + (0x2CC6, "M", "ⳇ"), + ] + + +def _seg_26() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x2CC7, "V"), + (0x2CC8, "M", "ⳉ"), + (0x2CC9, "V"), + (0x2CCA, "M", "ⳋ"), + (0x2CCB, "V"), + (0x2CCC, "M", "ⳍ"), + (0x2CCD, "V"), + (0x2CCE, "M", "ⳏ"), + (0x2CCF, "V"), + (0x2CD0, "M", "ⳑ"), + (0x2CD1, "V"), + (0x2CD2, "M", "ⳓ"), + (0x2CD3, "V"), + (0x2CD4, "M", "ⳕ"), + (0x2CD5, "V"), + (0x2CD6, "M", "ⳗ"), + (0x2CD7, "V"), + (0x2CD8, "M", "ⳙ"), + (0x2CD9, "V"), + (0x2CDA, "M", "ⳛ"), + (0x2CDB, "V"), + (0x2CDC, "M", "ⳝ"), + (0x2CDD, "V"), + (0x2CDE, "M", "ⳟ"), + (0x2CDF, "V"), + (0x2CE0, "M", "ⳡ"), + (0x2CE1, "V"), + (0x2CE2, "M", "ⳣ"), + (0x2CE3, "V"), + (0x2CEB, "M", "ⳬ"), + (0x2CEC, "V"), + (0x2CED, "M", "ⳮ"), + (0x2CEE, "V"), + (0x2CF2, "M", "ⳳ"), + (0x2CF3, "V"), + (0x2CF4, "X"), + (0x2CF9, "V"), + (0x2D26, "X"), + (0x2D27, "V"), + (0x2D28, "X"), + (0x2D2D, "V"), + (0x2D2E, "X"), + (0x2D30, "V"), + (0x2D68, "X"), + (0x2D6F, "M", "ⵡ"), + (0x2D70, "V"), + (0x2D71, "X"), + (0x2D7F, "V"), + (0x2D97, "X"), + (0x2DA0, "V"), + (0x2DA7, "X"), + (0x2DA8, "V"), + (0x2DAF, "X"), + (0x2DB0, "V"), + (0x2DB7, "X"), + (0x2DB8, "V"), + (0x2DBF, "X"), + (0x2DC0, "V"), + (0x2DC7, "X"), + (0x2DC8, "V"), + (0x2DCF, "X"), + (0x2DD0, "V"), + (0x2DD7, "X"), + (0x2DD8, "V"), + (0x2DDF, "X"), + (0x2DE0, "V"), + (0x2E5E, "X"), + (0x2E80, "V"), + (0x2E9A, "X"), + (0x2E9B, "V"), + (0x2E9F, "M", "母"), + (0x2EA0, "V"), + (0x2EF3, "M", "龟"), + (0x2EF4, "X"), + (0x2F00, "M", "一"), + (0x2F01, "M", "丨"), + (0x2F02, "M", "丶"), + (0x2F03, "M", "丿"), + (0x2F04, "M", "乙"), + (0x2F05, "M", "亅"), + (0x2F06, "M", "二"), + (0x2F07, "M", "亠"), + (0x2F08, "M", "人"), + (0x2F09, "M", "儿"), + (0x2F0A, "M", "入"), + (0x2F0B, "M", "八"), + (0x2F0C, "M", "冂"), + (0x2F0D, "M", "冖"), + (0x2F0E, "M", "冫"), + (0x2F0F, "M", "几"), + (0x2F10, "M", "凵"), + (0x2F11, "M", "刀"), + (0x2F12, "M", "力"), + (0x2F13, "M", "勹"), + (0x2F14, "M", "匕"), + (0x2F15, "M", "匚"), + (0x2F16, "M", "匸"), + (0x2F17, "M", "十"), + (0x2F18, "M", "卜"), + (0x2F19, "M", "卩"), + ] + + +def _seg_27() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x2F1A, "M", "厂"), + (0x2F1B, "M", "厶"), + (0x2F1C, "M", "又"), + (0x2F1D, "M", "口"), + (0x2F1E, "M", "囗"), + (0x2F1F, "M", "土"), + (0x2F20, "M", "士"), + (0x2F21, "M", "夂"), + (0x2F22, "M", "夊"), + (0x2F23, "M", "夕"), + (0x2F24, "M", "大"), + (0x2F25, "M", "女"), + (0x2F26, "M", "子"), + (0x2F27, "M", "宀"), + (0x2F28, "M", "寸"), + (0x2F29, "M", "小"), + (0x2F2A, "M", "尢"), + (0x2F2B, "M", "尸"), + (0x2F2C, "M", "屮"), + (0x2F2D, "M", "山"), + (0x2F2E, "M", "巛"), + (0x2F2F, "M", "工"), + (0x2F30, "M", "己"), + (0x2F31, "M", "巾"), + (0x2F32, "M", "干"), + (0x2F33, "M", "幺"), + (0x2F34, "M", "广"), + (0x2F35, "M", "廴"), + (0x2F36, "M", "廾"), + (0x2F37, "M", "弋"), + (0x2F38, "M", "弓"), + (0x2F39, "M", "彐"), + (0x2F3A, "M", "彡"), + (0x2F3B, "M", "彳"), + (0x2F3C, "M", "心"), + (0x2F3D, "M", "戈"), + (0x2F3E, "M", "戶"), + (0x2F3F, "M", "手"), + (0x2F40, "M", "支"), + (0x2F41, "M", "攴"), + (0x2F42, "M", "文"), + (0x2F43, "M", "斗"), + (0x2F44, "M", "斤"), + (0x2F45, "M", "方"), + (0x2F46, "M", "无"), + (0x2F47, "M", "日"), + (0x2F48, "M", "曰"), + (0x2F49, "M", "月"), + (0x2F4A, "M", "木"), + (0x2F4B, "M", "欠"), + (0x2F4C, "M", "止"), + (0x2F4D, "M", "歹"), + (0x2F4E, "M", "殳"), + (0x2F4F, "M", "毋"), + (0x2F50, "M", "比"), + (0x2F51, "M", "毛"), + (0x2F52, "M", "氏"), + (0x2F53, "M", "气"), + (0x2F54, "M", "水"), + (0x2F55, "M", "火"), + (0x2F56, "M", "爪"), + (0x2F57, "M", "父"), + (0x2F58, "M", "爻"), + (0x2F59, "M", "爿"), + (0x2F5A, "M", "片"), + (0x2F5B, "M", "牙"), + (0x2F5C, "M", "牛"), + (0x2F5D, "M", "犬"), + (0x2F5E, "M", "玄"), + (0x2F5F, "M", "玉"), + (0x2F60, "M", "瓜"), + (0x2F61, "M", "瓦"), + (0x2F62, "M", "甘"), + (0x2F63, "M", "生"), + (0x2F64, "M", "用"), + (0x2F65, "M", "田"), + (0x2F66, "M", "疋"), + (0x2F67, "M", "疒"), + (0x2F68, "M", "癶"), + (0x2F69, "M", "白"), + (0x2F6A, "M", "皮"), + (0x2F6B, "M", "皿"), + (0x2F6C, "M", "目"), + (0x2F6D, "M", "矛"), + (0x2F6E, "M", "矢"), + (0x2F6F, "M", "石"), + (0x2F70, "M", "示"), + (0x2F71, "M", "禸"), + (0x2F72, "M", "禾"), + (0x2F73, "M", "穴"), + (0x2F74, "M", "立"), + (0x2F75, "M", "竹"), + (0x2F76, "M", "米"), + (0x2F77, "M", "糸"), + (0x2F78, "M", "缶"), + (0x2F79, "M", "网"), + (0x2F7A, "M", "羊"), + (0x2F7B, "M", "羽"), + (0x2F7C, "M", "老"), + (0x2F7D, "M", "而"), + ] + + +def _seg_28() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x2F7E, "M", "耒"), + (0x2F7F, "M", "耳"), + (0x2F80, "M", "聿"), + (0x2F81, "M", "肉"), + (0x2F82, "M", "臣"), + (0x2F83, "M", "自"), + (0x2F84, "M", "至"), + (0x2F85, "M", "臼"), + (0x2F86, "M", "舌"), + (0x2F87, "M", "舛"), + (0x2F88, "M", "舟"), + (0x2F89, "M", "艮"), + (0x2F8A, "M", "色"), + (0x2F8B, "M", "艸"), + (0x2F8C, "M", "虍"), + (0x2F8D, "M", "虫"), + (0x2F8E, "M", "血"), + (0x2F8F, "M", "行"), + (0x2F90, "M", "衣"), + (0x2F91, "M", "襾"), + (0x2F92, "M", "見"), + (0x2F93, "M", "角"), + (0x2F94, "M", "言"), + (0x2F95, "M", "谷"), + (0x2F96, "M", "豆"), + (0x2F97, "M", "豕"), + (0x2F98, "M", "豸"), + (0x2F99, "M", "貝"), + (0x2F9A, "M", "赤"), + (0x2F9B, "M", "走"), + (0x2F9C, "M", "足"), + (0x2F9D, "M", "身"), + (0x2F9E, "M", "車"), + (0x2F9F, "M", "辛"), + (0x2FA0, "M", "辰"), + (0x2FA1, "M", "辵"), + (0x2FA2, "M", "邑"), + (0x2FA3, "M", "酉"), + (0x2FA4, "M", "釆"), + (0x2FA5, "M", "里"), + (0x2FA6, "M", "金"), + (0x2FA7, "M", "長"), + (0x2FA8, "M", "門"), + (0x2FA9, "M", "阜"), + (0x2FAA, "M", "隶"), + (0x2FAB, "M", "隹"), + (0x2FAC, "M", "雨"), + (0x2FAD, "M", "靑"), + (0x2FAE, "M", "非"), + (0x2FAF, "M", "面"), + (0x2FB0, "M", "革"), + (0x2FB1, "M", "韋"), + (0x2FB2, "M", "韭"), + (0x2FB3, "M", "音"), + (0x2FB4, "M", "頁"), + (0x2FB5, "M", "風"), + (0x2FB6, "M", "飛"), + (0x2FB7, "M", "食"), + (0x2FB8, "M", "首"), + (0x2FB9, "M", "香"), + (0x2FBA, "M", "馬"), + (0x2FBB, "M", "骨"), + (0x2FBC, "M", "高"), + (0x2FBD, "M", "髟"), + (0x2FBE, "M", "鬥"), + (0x2FBF, "M", "鬯"), + (0x2FC0, "M", "鬲"), + (0x2FC1, "M", "鬼"), + (0x2FC2, "M", "魚"), + (0x2FC3, "M", "鳥"), + (0x2FC4, "M", "鹵"), + (0x2FC5, "M", "鹿"), + (0x2FC6, "M", "麥"), + (0x2FC7, "M", "麻"), + (0x2FC8, "M", "黃"), + (0x2FC9, "M", "黍"), + (0x2FCA, "M", "黑"), + (0x2FCB, "M", "黹"), + (0x2FCC, "M", "黽"), + (0x2FCD, "M", "鼎"), + (0x2FCE, "M", "鼓"), + (0x2FCF, "M", "鼠"), + (0x2FD0, "M", "鼻"), + (0x2FD1, "M", "齊"), + (0x2FD2, "M", "齒"), + (0x2FD3, "M", "龍"), + (0x2FD4, "M", "龜"), + (0x2FD5, "M", "龠"), + (0x2FD6, "X"), + (0x3000, "3", " "), + (0x3001, "V"), + (0x3002, "M", "."), + (0x3003, "V"), + (0x3036, "M", "〒"), + (0x3037, "V"), + (0x3038, "M", "十"), + (0x3039, "M", "卄"), + (0x303A, "M", "卅"), + (0x303B, "V"), + (0x3040, "X"), + ] + + +def _seg_29() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x3041, "V"), + (0x3097, "X"), + (0x3099, "V"), + (0x309B, "3", " ゙"), + (0x309C, "3", " ゚"), + (0x309D, "V"), + (0x309F, "M", "より"), + (0x30A0, "V"), + (0x30FF, "M", "コト"), + (0x3100, "X"), + (0x3105, "V"), + (0x3130, "X"), + (0x3131, "M", "ᄀ"), + (0x3132, "M", "ᄁ"), + (0x3133, "M", "ᆪ"), + (0x3134, "M", "ᄂ"), + (0x3135, "M", "ᆬ"), + (0x3136, "M", "ᆭ"), + (0x3137, "M", "ᄃ"), + (0x3138, "M", "ᄄ"), + (0x3139, "M", "ᄅ"), + (0x313A, "M", "ᆰ"), + (0x313B, "M", "ᆱ"), + (0x313C, "M", "ᆲ"), + (0x313D, "M", "ᆳ"), + (0x313E, "M", "ᆴ"), + (0x313F, "M", "ᆵ"), + (0x3140, "M", "ᄚ"), + (0x3141, "M", "ᄆ"), + (0x3142, "M", "ᄇ"), + (0x3143, "M", "ᄈ"), + (0x3144, "M", "ᄡ"), + (0x3145, "M", "ᄉ"), + (0x3146, "M", "ᄊ"), + (0x3147, "M", "ᄋ"), + (0x3148, "M", "ᄌ"), + (0x3149, "M", "ᄍ"), + (0x314A, "M", "ᄎ"), + (0x314B, "M", "ᄏ"), + (0x314C, "M", "ᄐ"), + (0x314D, "M", "ᄑ"), + (0x314E, "M", "ᄒ"), + (0x314F, "M", "ᅡ"), + (0x3150, "M", "ᅢ"), + (0x3151, "M", "ᅣ"), + (0x3152, "M", "ᅤ"), + (0x3153, "M", "ᅥ"), + (0x3154, "M", "ᅦ"), + (0x3155, "M", "ᅧ"), + (0x3156, "M", "ᅨ"), + (0x3157, "M", "ᅩ"), + (0x3158, "M", "ᅪ"), + (0x3159, "M", "ᅫ"), + (0x315A, "M", "ᅬ"), + (0x315B, "M", "ᅭ"), + (0x315C, "M", "ᅮ"), + (0x315D, "M", "ᅯ"), + (0x315E, "M", "ᅰ"), + (0x315F, "M", "ᅱ"), + (0x3160, "M", "ᅲ"), + (0x3161, "M", "ᅳ"), + (0x3162, "M", "ᅴ"), + (0x3163, "M", "ᅵ"), + (0x3164, "X"), + (0x3165, "M", "ᄔ"), + (0x3166, "M", "ᄕ"), + (0x3167, "M", "ᇇ"), + (0x3168, "M", "ᇈ"), + (0x3169, "M", "ᇌ"), + (0x316A, "M", "ᇎ"), + (0x316B, "M", "ᇓ"), + (0x316C, "M", "ᇗ"), + (0x316D, "M", "ᇙ"), + (0x316E, "M", "ᄜ"), + (0x316F, "M", "ᇝ"), + (0x3170, "M", "ᇟ"), + (0x3171, "M", "ᄝ"), + (0x3172, "M", "ᄞ"), + (0x3173, "M", "ᄠ"), + (0x3174, "M", "ᄢ"), + (0x3175, "M", "ᄣ"), + (0x3176, "M", "ᄧ"), + (0x3177, "M", "ᄩ"), + (0x3178, "M", "ᄫ"), + (0x3179, "M", "ᄬ"), + (0x317A, "M", "ᄭ"), + (0x317B, "M", "ᄮ"), + (0x317C, "M", "ᄯ"), + (0x317D, "M", "ᄲ"), + (0x317E, "M", "ᄶ"), + (0x317F, "M", "ᅀ"), + (0x3180, "M", "ᅇ"), + (0x3181, "M", "ᅌ"), + (0x3182, "M", "ᇱ"), + (0x3183, "M", "ᇲ"), + (0x3184, "M", "ᅗ"), + (0x3185, "M", "ᅘ"), + (0x3186, "M", "ᅙ"), + (0x3187, "M", "ᆄ"), + (0x3188, "M", "ᆅ"), + ] + + +def _seg_30() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x3189, "M", "ᆈ"), + (0x318A, "M", "ᆑ"), + (0x318B, "M", "ᆒ"), + (0x318C, "M", "ᆔ"), + (0x318D, "M", "ᆞ"), + (0x318E, "M", "ᆡ"), + (0x318F, "X"), + (0x3190, "V"), + (0x3192, "M", "一"), + (0x3193, "M", "二"), + (0x3194, "M", "三"), + (0x3195, "M", "四"), + (0x3196, "M", "上"), + (0x3197, "M", "中"), + (0x3198, "M", "下"), + (0x3199, "M", "甲"), + (0x319A, "M", "乙"), + (0x319B, "M", "丙"), + (0x319C, "M", "丁"), + (0x319D, "M", "天"), + (0x319E, "M", "地"), + (0x319F, "M", "人"), + (0x31A0, "V"), + (0x31E4, "X"), + (0x31F0, "V"), + (0x3200, "3", "(ᄀ)"), + (0x3201, "3", "(ᄂ)"), + (0x3202, "3", "(ᄃ)"), + (0x3203, "3", "(ᄅ)"), + (0x3204, "3", "(ᄆ)"), + (0x3205, "3", "(ᄇ)"), + (0x3206, "3", "(ᄉ)"), + (0x3207, "3", "(ᄋ)"), + (0x3208, "3", "(ᄌ)"), + (0x3209, "3", "(ᄎ)"), + (0x320A, "3", "(ᄏ)"), + (0x320B, "3", "(ᄐ)"), + (0x320C, "3", "(ᄑ)"), + (0x320D, "3", "(ᄒ)"), + (0x320E, "3", "(가)"), + (0x320F, "3", "(나)"), + (0x3210, "3", "(다)"), + (0x3211, "3", "(라)"), + (0x3212, "3", "(마)"), + (0x3213, "3", "(바)"), + (0x3214, "3", "(사)"), + (0x3215, "3", "(아)"), + (0x3216, "3", "(자)"), + (0x3217, "3", "(차)"), + (0x3218, "3", "(카)"), + (0x3219, "3", "(타)"), + (0x321A, "3", "(파)"), + (0x321B, "3", "(하)"), + (0x321C, "3", "(주)"), + (0x321D, "3", "(오전)"), + (0x321E, "3", "(오후)"), + (0x321F, "X"), + (0x3220, "3", "(一)"), + (0x3221, "3", "(二)"), + (0x3222, "3", "(三)"), + (0x3223, "3", "(四)"), + (0x3224, "3", "(五)"), + (0x3225, "3", "(六)"), + (0x3226, "3", "(七)"), + (0x3227, "3", "(八)"), + (0x3228, "3", "(九)"), + (0x3229, "3", "(十)"), + (0x322A, "3", "(月)"), + (0x322B, "3", "(火)"), + (0x322C, "3", "(水)"), + (0x322D, "3", "(木)"), + (0x322E, "3", "(金)"), + (0x322F, "3", "(土)"), + (0x3230, "3", "(日)"), + (0x3231, "3", "(株)"), + (0x3232, "3", "(有)"), + (0x3233, "3", "(社)"), + (0x3234, "3", "(名)"), + (0x3235, "3", "(特)"), + (0x3236, "3", "(財)"), + (0x3237, "3", "(祝)"), + (0x3238, "3", "(労)"), + (0x3239, "3", "(代)"), + (0x323A, "3", "(呼)"), + (0x323B, "3", "(学)"), + (0x323C, "3", "(監)"), + (0x323D, "3", "(企)"), + (0x323E, "3", "(資)"), + (0x323F, "3", "(協)"), + (0x3240, "3", "(祭)"), + (0x3241, "3", "(休)"), + (0x3242, "3", "(自)"), + (0x3243, "3", "(至)"), + (0x3244, "M", "問"), + (0x3245, "M", "幼"), + (0x3246, "M", "文"), + (0x3247, "M", "箏"), + (0x3248, "V"), + (0x3250, "M", "pte"), + (0x3251, "M", "21"), + ] + + +def _seg_31() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x3252, "M", "22"), + (0x3253, "M", "23"), + (0x3254, "M", "24"), + (0x3255, "M", "25"), + (0x3256, "M", "26"), + (0x3257, "M", "27"), + (0x3258, "M", "28"), + (0x3259, "M", "29"), + (0x325A, "M", "30"), + (0x325B, "M", "31"), + (0x325C, "M", "32"), + (0x325D, "M", "33"), + (0x325E, "M", "34"), + (0x325F, "M", "35"), + (0x3260, "M", "ᄀ"), + (0x3261, "M", "ᄂ"), + (0x3262, "M", "ᄃ"), + (0x3263, "M", "ᄅ"), + (0x3264, "M", "ᄆ"), + (0x3265, "M", "ᄇ"), + (0x3266, "M", "ᄉ"), + (0x3267, "M", "ᄋ"), + (0x3268, "M", "ᄌ"), + (0x3269, "M", "ᄎ"), + (0x326A, "M", "ᄏ"), + (0x326B, "M", "ᄐ"), + (0x326C, "M", "ᄑ"), + (0x326D, "M", "ᄒ"), + (0x326E, "M", "가"), + (0x326F, "M", "나"), + (0x3270, "M", "다"), + (0x3271, "M", "라"), + (0x3272, "M", "마"), + (0x3273, "M", "바"), + (0x3274, "M", "사"), + (0x3275, "M", "아"), + (0x3276, "M", "자"), + (0x3277, "M", "차"), + (0x3278, "M", "카"), + (0x3279, "M", "타"), + (0x327A, "M", "파"), + (0x327B, "M", "하"), + (0x327C, "M", "참고"), + (0x327D, "M", "주의"), + (0x327E, "M", "우"), + (0x327F, "V"), + (0x3280, "M", "一"), + (0x3281, "M", "二"), + (0x3282, "M", "三"), + (0x3283, "M", "四"), + (0x3284, "M", "五"), + (0x3285, "M", "六"), + (0x3286, "M", "七"), + (0x3287, "M", "八"), + (0x3288, "M", "九"), + (0x3289, "M", "十"), + (0x328A, "M", "月"), + (0x328B, "M", "火"), + (0x328C, "M", "水"), + (0x328D, "M", "木"), + (0x328E, "M", "金"), + (0x328F, "M", "土"), + (0x3290, "M", "日"), + (0x3291, "M", "株"), + (0x3292, "M", "有"), + (0x3293, "M", "社"), + (0x3294, "M", "名"), + (0x3295, "M", "特"), + (0x3296, "M", "財"), + (0x3297, "M", "祝"), + (0x3298, "M", "労"), + (0x3299, "M", "秘"), + (0x329A, "M", "男"), + (0x329B, "M", "女"), + (0x329C, "M", "適"), + (0x329D, "M", "優"), + (0x329E, "M", "印"), + (0x329F, "M", "注"), + (0x32A0, "M", "項"), + (0x32A1, "M", "休"), + (0x32A2, "M", "写"), + (0x32A3, "M", "正"), + (0x32A4, "M", "上"), + (0x32A5, "M", "中"), + (0x32A6, "M", "下"), + (0x32A7, "M", "左"), + (0x32A8, "M", "右"), + (0x32A9, "M", "医"), + (0x32AA, "M", "宗"), + (0x32AB, "M", "学"), + (0x32AC, "M", "監"), + (0x32AD, "M", "企"), + (0x32AE, "M", "資"), + (0x32AF, "M", "協"), + (0x32B0, "M", "夜"), + (0x32B1, "M", "36"), + (0x32B2, "M", "37"), + (0x32B3, "M", "38"), + (0x32B4, "M", "39"), + (0x32B5, "M", "40"), + ] + + +def _seg_32() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x32B6, "M", "41"), + (0x32B7, "M", "42"), + (0x32B8, "M", "43"), + (0x32B9, "M", "44"), + (0x32BA, "M", "45"), + (0x32BB, "M", "46"), + (0x32BC, "M", "47"), + (0x32BD, "M", "48"), + (0x32BE, "M", "49"), + (0x32BF, "M", "50"), + (0x32C0, "M", "1月"), + (0x32C1, "M", "2月"), + (0x32C2, "M", "3月"), + (0x32C3, "M", "4月"), + (0x32C4, "M", "5月"), + (0x32C5, "M", "6月"), + (0x32C6, "M", "7月"), + (0x32C7, "M", "8月"), + (0x32C8, "M", "9月"), + (0x32C9, "M", "10月"), + (0x32CA, "M", "11月"), + (0x32CB, "M", "12月"), + (0x32CC, "M", "hg"), + (0x32CD, "M", "erg"), + (0x32CE, "M", "ev"), + (0x32CF, "M", "ltd"), + (0x32D0, "M", "ア"), + (0x32D1, "M", "イ"), + (0x32D2, "M", "ウ"), + (0x32D3, "M", "エ"), + (0x32D4, "M", "オ"), + (0x32D5, "M", "カ"), + (0x32D6, "M", "キ"), + (0x32D7, "M", "ク"), + (0x32D8, "M", "ケ"), + (0x32D9, "M", "コ"), + (0x32DA, "M", "サ"), + (0x32DB, "M", "シ"), + (0x32DC, "M", "ス"), + (0x32DD, "M", "セ"), + (0x32DE, "M", "ソ"), + (0x32DF, "M", "タ"), + (0x32E0, "M", "チ"), + (0x32E1, "M", "ツ"), + (0x32E2, "M", "テ"), + (0x32E3, "M", "ト"), + (0x32E4, "M", "ナ"), + (0x32E5, "M", "ニ"), + (0x32E6, "M", "ヌ"), + (0x32E7, "M", "ネ"), + (0x32E8, "M", "ノ"), + (0x32E9, "M", "ハ"), + (0x32EA, "M", "ヒ"), + (0x32EB, "M", "フ"), + (0x32EC, "M", "ヘ"), + (0x32ED, "M", "ホ"), + (0x32EE, "M", "マ"), + (0x32EF, "M", "ミ"), + (0x32F0, "M", "ム"), + (0x32F1, "M", "メ"), + (0x32F2, "M", "モ"), + (0x32F3, "M", "ヤ"), + (0x32F4, "M", "ユ"), + (0x32F5, "M", "ヨ"), + (0x32F6, "M", "ラ"), + (0x32F7, "M", "リ"), + (0x32F8, "M", "ル"), + (0x32F9, "M", "レ"), + (0x32FA, "M", "ロ"), + (0x32FB, "M", "ワ"), + (0x32FC, "M", "ヰ"), + (0x32FD, "M", "ヱ"), + (0x32FE, "M", "ヲ"), + (0x32FF, "M", "令和"), + (0x3300, "M", "アパート"), + (0x3301, "M", "アルファ"), + (0x3302, "M", "アンペア"), + (0x3303, "M", "アール"), + (0x3304, "M", "イニング"), + (0x3305, "M", "インチ"), + (0x3306, "M", "ウォン"), + (0x3307, "M", "エスクード"), + (0x3308, "M", "エーカー"), + (0x3309, "M", "オンス"), + (0x330A, "M", "オーム"), + (0x330B, "M", "カイリ"), + (0x330C, "M", "カラット"), + (0x330D, "M", "カロリー"), + (0x330E, "M", "ガロン"), + (0x330F, "M", "ガンマ"), + (0x3310, "M", "ギガ"), + (0x3311, "M", "ギニー"), + (0x3312, "M", "キュリー"), + (0x3313, "M", "ギルダー"), + (0x3314, "M", "キロ"), + (0x3315, "M", "キログラム"), + (0x3316, "M", "キロメートル"), + (0x3317, "M", "キロワット"), + (0x3318, "M", "グラム"), + (0x3319, "M", "グラムトン"), + ] + + +def _seg_33() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x331A, "M", "クルゼイロ"), + (0x331B, "M", "クローネ"), + (0x331C, "M", "ケース"), + (0x331D, "M", "コルナ"), + (0x331E, "M", "コーポ"), + (0x331F, "M", "サイクル"), + (0x3320, "M", "サンチーム"), + (0x3321, "M", "シリング"), + (0x3322, "M", "センチ"), + (0x3323, "M", "セント"), + (0x3324, "M", "ダース"), + (0x3325, "M", "デシ"), + (0x3326, "M", "ドル"), + (0x3327, "M", "トン"), + (0x3328, "M", "ナノ"), + (0x3329, "M", "ノット"), + (0x332A, "M", "ハイツ"), + (0x332B, "M", "パーセント"), + (0x332C, "M", "パーツ"), + (0x332D, "M", "バーレル"), + (0x332E, "M", "ピアストル"), + (0x332F, "M", "ピクル"), + (0x3330, "M", "ピコ"), + (0x3331, "M", "ビル"), + (0x3332, "M", "ファラッド"), + (0x3333, "M", "フィート"), + (0x3334, "M", "ブッシェル"), + (0x3335, "M", "フラン"), + (0x3336, "M", "ヘクタール"), + (0x3337, "M", "ペソ"), + (0x3338, "M", "ペニヒ"), + (0x3339, "M", "ヘルツ"), + (0x333A, "M", "ペンス"), + (0x333B, "M", "ページ"), + (0x333C, "M", "ベータ"), + (0x333D, "M", "ポイント"), + (0x333E, "M", "ボルト"), + (0x333F, "M", "ホン"), + (0x3340, "M", "ポンド"), + (0x3341, "M", "ホール"), + (0x3342, "M", "ホーン"), + (0x3343, "M", "マイクロ"), + (0x3344, "M", "マイル"), + (0x3345, "M", "マッハ"), + (0x3346, "M", "マルク"), + (0x3347, "M", "マンション"), + (0x3348, "M", "ミクロン"), + (0x3349, "M", "ミリ"), + (0x334A, "M", "ミリバール"), + (0x334B, "M", "メガ"), + (0x334C, "M", "メガトン"), + (0x334D, "M", "メートル"), + (0x334E, "M", "ヤード"), + (0x334F, "M", "ヤール"), + (0x3350, "M", "ユアン"), + (0x3351, "M", "リットル"), + (0x3352, "M", "リラ"), + (0x3353, "M", "ルピー"), + (0x3354, "M", "ルーブル"), + (0x3355, "M", "レム"), + (0x3356, "M", "レントゲン"), + (0x3357, "M", "ワット"), + (0x3358, "M", "0点"), + (0x3359, "M", "1点"), + (0x335A, "M", "2点"), + (0x335B, "M", "3点"), + (0x335C, "M", "4点"), + (0x335D, "M", "5点"), + (0x335E, "M", "6点"), + (0x335F, "M", "7点"), + (0x3360, "M", "8点"), + (0x3361, "M", "9点"), + (0x3362, "M", "10点"), + (0x3363, "M", "11点"), + (0x3364, "M", "12点"), + (0x3365, "M", "13点"), + (0x3366, "M", "14点"), + (0x3367, "M", "15点"), + (0x3368, "M", "16点"), + (0x3369, "M", "17点"), + (0x336A, "M", "18点"), + (0x336B, "M", "19点"), + (0x336C, "M", "20点"), + (0x336D, "M", "21点"), + (0x336E, "M", "22点"), + (0x336F, "M", "23点"), + (0x3370, "M", "24点"), + (0x3371, "M", "hpa"), + (0x3372, "M", "da"), + (0x3373, "M", "au"), + (0x3374, "M", "bar"), + (0x3375, "M", "ov"), + (0x3376, "M", "pc"), + (0x3377, "M", "dm"), + (0x3378, "M", "dm2"), + (0x3379, "M", "dm3"), + (0x337A, "M", "iu"), + (0x337B, "M", "平成"), + (0x337C, "M", "昭和"), + (0x337D, "M", "大正"), + ] + + +def _seg_34() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x337E, "M", "明治"), + (0x337F, "M", "株式会社"), + (0x3380, "M", "pa"), + (0x3381, "M", "na"), + (0x3382, "M", "μa"), + (0x3383, "M", "ma"), + (0x3384, "M", "ka"), + (0x3385, "M", "kb"), + (0x3386, "M", "mb"), + (0x3387, "M", "gb"), + (0x3388, "M", "cal"), + (0x3389, "M", "kcal"), + (0x338A, "M", "pf"), + (0x338B, "M", "nf"), + (0x338C, "M", "μf"), + (0x338D, "M", "μg"), + (0x338E, "M", "mg"), + (0x338F, "M", "kg"), + (0x3390, "M", "hz"), + (0x3391, "M", "khz"), + (0x3392, "M", "mhz"), + (0x3393, "M", "ghz"), + (0x3394, "M", "thz"), + (0x3395, "M", "μl"), + (0x3396, "M", "ml"), + (0x3397, "M", "dl"), + (0x3398, "M", "kl"), + (0x3399, "M", "fm"), + (0x339A, "M", "nm"), + (0x339B, "M", "μm"), + (0x339C, "M", "mm"), + (0x339D, "M", "cm"), + (0x339E, "M", "km"), + (0x339F, "M", "mm2"), + (0x33A0, "M", "cm2"), + (0x33A1, "M", "m2"), + (0x33A2, "M", "km2"), + (0x33A3, "M", "mm3"), + (0x33A4, "M", "cm3"), + (0x33A5, "M", "m3"), + (0x33A6, "M", "km3"), + (0x33A7, "M", "m∕s"), + (0x33A8, "M", "m∕s2"), + (0x33A9, "M", "pa"), + (0x33AA, "M", "kpa"), + (0x33AB, "M", "mpa"), + (0x33AC, "M", "gpa"), + (0x33AD, "M", "rad"), + (0x33AE, "M", "rad∕s"), + (0x33AF, "M", "rad∕s2"), + (0x33B0, "M", "ps"), + (0x33B1, "M", "ns"), + (0x33B2, "M", "μs"), + (0x33B3, "M", "ms"), + (0x33B4, "M", "pv"), + (0x33B5, "M", "nv"), + (0x33B6, "M", "μv"), + (0x33B7, "M", "mv"), + (0x33B8, "M", "kv"), + (0x33B9, "M", "mv"), + (0x33BA, "M", "pw"), + (0x33BB, "M", "nw"), + (0x33BC, "M", "μw"), + (0x33BD, "M", "mw"), + (0x33BE, "M", "kw"), + (0x33BF, "M", "mw"), + (0x33C0, "M", "kω"), + (0x33C1, "M", "mω"), + (0x33C2, "X"), + (0x33C3, "M", "bq"), + (0x33C4, "M", "cc"), + (0x33C5, "M", "cd"), + (0x33C6, "M", "c∕kg"), + (0x33C7, "X"), + (0x33C8, "M", "db"), + (0x33C9, "M", "gy"), + (0x33CA, "M", "ha"), + (0x33CB, "M", "hp"), + (0x33CC, "M", "in"), + (0x33CD, "M", "kk"), + (0x33CE, "M", "km"), + (0x33CF, "M", "kt"), + (0x33D0, "M", "lm"), + (0x33D1, "M", "ln"), + (0x33D2, "M", "log"), + (0x33D3, "M", "lx"), + (0x33D4, "M", "mb"), + (0x33D5, "M", "mil"), + (0x33D6, "M", "mol"), + (0x33D7, "M", "ph"), + (0x33D8, "X"), + (0x33D9, "M", "ppm"), + (0x33DA, "M", "pr"), + (0x33DB, "M", "sr"), + (0x33DC, "M", "sv"), + (0x33DD, "M", "wb"), + (0x33DE, "M", "v∕m"), + (0x33DF, "M", "a∕m"), + (0x33E0, "M", "1日"), + (0x33E1, "M", "2日"), + ] + + +def _seg_35() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x33E2, "M", "3日"), + (0x33E3, "M", "4日"), + (0x33E4, "M", "5日"), + (0x33E5, "M", "6日"), + (0x33E6, "M", "7日"), + (0x33E7, "M", "8日"), + (0x33E8, "M", "9日"), + (0x33E9, "M", "10日"), + (0x33EA, "M", "11日"), + (0x33EB, "M", "12日"), + (0x33EC, "M", "13日"), + (0x33ED, "M", "14日"), + (0x33EE, "M", "15日"), + (0x33EF, "M", "16日"), + (0x33F0, "M", "17日"), + (0x33F1, "M", "18日"), + (0x33F2, "M", "19日"), + (0x33F3, "M", "20日"), + (0x33F4, "M", "21日"), + (0x33F5, "M", "22日"), + (0x33F6, "M", "23日"), + (0x33F7, "M", "24日"), + (0x33F8, "M", "25日"), + (0x33F9, "M", "26日"), + (0x33FA, "M", "27日"), + (0x33FB, "M", "28日"), + (0x33FC, "M", "29日"), + (0x33FD, "M", "30日"), + (0x33FE, "M", "31日"), + (0x33FF, "M", "gal"), + (0x3400, "V"), + (0xA48D, "X"), + (0xA490, "V"), + (0xA4C7, "X"), + (0xA4D0, "V"), + (0xA62C, "X"), + (0xA640, "M", "ꙁ"), + (0xA641, "V"), + (0xA642, "M", "ꙃ"), + (0xA643, "V"), + (0xA644, "M", "ꙅ"), + (0xA645, "V"), + (0xA646, "M", "ꙇ"), + (0xA647, "V"), + (0xA648, "M", "ꙉ"), + (0xA649, "V"), + (0xA64A, "M", "ꙋ"), + (0xA64B, "V"), + (0xA64C, "M", "ꙍ"), + (0xA64D, "V"), + (0xA64E, "M", "ꙏ"), + (0xA64F, "V"), + (0xA650, "M", "ꙑ"), + (0xA651, "V"), + (0xA652, "M", "ꙓ"), + (0xA653, "V"), + (0xA654, "M", "ꙕ"), + (0xA655, "V"), + (0xA656, "M", "ꙗ"), + (0xA657, "V"), + (0xA658, "M", "ꙙ"), + (0xA659, "V"), + (0xA65A, "M", "ꙛ"), + (0xA65B, "V"), + (0xA65C, "M", "ꙝ"), + (0xA65D, "V"), + (0xA65E, "M", "ꙟ"), + (0xA65F, "V"), + (0xA660, "M", "ꙡ"), + (0xA661, "V"), + (0xA662, "M", "ꙣ"), + (0xA663, "V"), + (0xA664, "M", "ꙥ"), + (0xA665, "V"), + (0xA666, "M", "ꙧ"), + (0xA667, "V"), + (0xA668, "M", "ꙩ"), + (0xA669, "V"), + (0xA66A, "M", "ꙫ"), + (0xA66B, "V"), + (0xA66C, "M", "ꙭ"), + (0xA66D, "V"), + (0xA680, "M", "ꚁ"), + (0xA681, "V"), + (0xA682, "M", "ꚃ"), + (0xA683, "V"), + (0xA684, "M", "ꚅ"), + (0xA685, "V"), + (0xA686, "M", "ꚇ"), + (0xA687, "V"), + (0xA688, "M", "ꚉ"), + (0xA689, "V"), + (0xA68A, "M", "ꚋ"), + (0xA68B, "V"), + (0xA68C, "M", "ꚍ"), + (0xA68D, "V"), + (0xA68E, "M", "ꚏ"), + (0xA68F, "V"), + (0xA690, "M", "ꚑ"), + (0xA691, "V"), + ] + + +def _seg_36() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xA692, "M", "ꚓ"), + (0xA693, "V"), + (0xA694, "M", "ꚕ"), + (0xA695, "V"), + (0xA696, "M", "ꚗ"), + (0xA697, "V"), + (0xA698, "M", "ꚙ"), + (0xA699, "V"), + (0xA69A, "M", "ꚛ"), + (0xA69B, "V"), + (0xA69C, "M", "ъ"), + (0xA69D, "M", "ь"), + (0xA69E, "V"), + (0xA6F8, "X"), + (0xA700, "V"), + (0xA722, "M", "ꜣ"), + (0xA723, "V"), + (0xA724, "M", "ꜥ"), + (0xA725, "V"), + (0xA726, "M", "ꜧ"), + (0xA727, "V"), + (0xA728, "M", "ꜩ"), + (0xA729, "V"), + (0xA72A, "M", "ꜫ"), + (0xA72B, "V"), + (0xA72C, "M", "ꜭ"), + (0xA72D, "V"), + (0xA72E, "M", "ꜯ"), + (0xA72F, "V"), + (0xA732, "M", "ꜳ"), + (0xA733, "V"), + (0xA734, "M", "ꜵ"), + (0xA735, "V"), + (0xA736, "M", "ꜷ"), + (0xA737, "V"), + (0xA738, "M", "ꜹ"), + (0xA739, "V"), + (0xA73A, "M", "ꜻ"), + (0xA73B, "V"), + (0xA73C, "M", "ꜽ"), + (0xA73D, "V"), + (0xA73E, "M", "ꜿ"), + (0xA73F, "V"), + (0xA740, "M", "ꝁ"), + (0xA741, "V"), + (0xA742, "M", "ꝃ"), + (0xA743, "V"), + (0xA744, "M", "ꝅ"), + (0xA745, "V"), + (0xA746, "M", "ꝇ"), + (0xA747, "V"), + (0xA748, "M", "ꝉ"), + (0xA749, "V"), + (0xA74A, "M", "ꝋ"), + (0xA74B, "V"), + (0xA74C, "M", "ꝍ"), + (0xA74D, "V"), + (0xA74E, "M", "ꝏ"), + (0xA74F, "V"), + (0xA750, "M", "ꝑ"), + (0xA751, "V"), + (0xA752, "M", "ꝓ"), + (0xA753, "V"), + (0xA754, "M", "ꝕ"), + (0xA755, "V"), + (0xA756, "M", "ꝗ"), + (0xA757, "V"), + (0xA758, "M", "ꝙ"), + (0xA759, "V"), + (0xA75A, "M", "ꝛ"), + (0xA75B, "V"), + (0xA75C, "M", "ꝝ"), + (0xA75D, "V"), + (0xA75E, "M", "ꝟ"), + (0xA75F, "V"), + (0xA760, "M", "ꝡ"), + (0xA761, "V"), + (0xA762, "M", "ꝣ"), + (0xA763, "V"), + (0xA764, "M", "ꝥ"), + (0xA765, "V"), + (0xA766, "M", "ꝧ"), + (0xA767, "V"), + (0xA768, "M", "ꝩ"), + (0xA769, "V"), + (0xA76A, "M", "ꝫ"), + (0xA76B, "V"), + (0xA76C, "M", "ꝭ"), + (0xA76D, "V"), + (0xA76E, "M", "ꝯ"), + (0xA76F, "V"), + (0xA770, "M", "ꝯ"), + (0xA771, "V"), + (0xA779, "M", "ꝺ"), + (0xA77A, "V"), + (0xA77B, "M", "ꝼ"), + (0xA77C, "V"), + (0xA77D, "M", "ᵹ"), + (0xA77E, "M", "ꝿ"), + (0xA77F, "V"), + ] + + +def _seg_37() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xA780, "M", "ꞁ"), + (0xA781, "V"), + (0xA782, "M", "ꞃ"), + (0xA783, "V"), + (0xA784, "M", "ꞅ"), + (0xA785, "V"), + (0xA786, "M", "ꞇ"), + (0xA787, "V"), + (0xA78B, "M", "ꞌ"), + (0xA78C, "V"), + (0xA78D, "M", "ɥ"), + (0xA78E, "V"), + (0xA790, "M", "ꞑ"), + (0xA791, "V"), + (0xA792, "M", "ꞓ"), + (0xA793, "V"), + (0xA796, "M", "ꞗ"), + (0xA797, "V"), + (0xA798, "M", "ꞙ"), + (0xA799, "V"), + (0xA79A, "M", "ꞛ"), + (0xA79B, "V"), + (0xA79C, "M", "ꞝ"), + (0xA79D, "V"), + (0xA79E, "M", "ꞟ"), + (0xA79F, "V"), + (0xA7A0, "M", "ꞡ"), + (0xA7A1, "V"), + (0xA7A2, "M", "ꞣ"), + (0xA7A3, "V"), + (0xA7A4, "M", "ꞥ"), + (0xA7A5, "V"), + (0xA7A6, "M", "ꞧ"), + (0xA7A7, "V"), + (0xA7A8, "M", "ꞩ"), + (0xA7A9, "V"), + (0xA7AA, "M", "ɦ"), + (0xA7AB, "M", "ɜ"), + (0xA7AC, "M", "ɡ"), + (0xA7AD, "M", "ɬ"), + (0xA7AE, "M", "ɪ"), + (0xA7AF, "V"), + (0xA7B0, "M", "ʞ"), + (0xA7B1, "M", "ʇ"), + (0xA7B2, "M", "ʝ"), + (0xA7B3, "M", "ꭓ"), + (0xA7B4, "M", "ꞵ"), + (0xA7B5, "V"), + (0xA7B6, "M", "ꞷ"), + (0xA7B7, "V"), + (0xA7B8, "M", "ꞹ"), + (0xA7B9, "V"), + (0xA7BA, "M", "ꞻ"), + (0xA7BB, "V"), + (0xA7BC, "M", "ꞽ"), + (0xA7BD, "V"), + (0xA7BE, "M", "ꞿ"), + (0xA7BF, "V"), + (0xA7C0, "M", "ꟁ"), + (0xA7C1, "V"), + (0xA7C2, "M", "ꟃ"), + (0xA7C3, "V"), + (0xA7C4, "M", "ꞔ"), + (0xA7C5, "M", "ʂ"), + (0xA7C6, "M", "ᶎ"), + (0xA7C7, "M", "ꟈ"), + (0xA7C8, "V"), + (0xA7C9, "M", "ꟊ"), + (0xA7CA, "V"), + (0xA7CB, "X"), + (0xA7D0, "M", "ꟑ"), + (0xA7D1, "V"), + (0xA7D2, "X"), + (0xA7D3, "V"), + (0xA7D4, "X"), + (0xA7D5, "V"), + (0xA7D6, "M", "ꟗ"), + (0xA7D7, "V"), + (0xA7D8, "M", "ꟙ"), + (0xA7D9, "V"), + (0xA7DA, "X"), + (0xA7F2, "M", "c"), + (0xA7F3, "M", "f"), + (0xA7F4, "M", "q"), + (0xA7F5, "M", "ꟶ"), + (0xA7F6, "V"), + (0xA7F8, "M", "ħ"), + (0xA7F9, "M", "œ"), + (0xA7FA, "V"), + (0xA82D, "X"), + (0xA830, "V"), + (0xA83A, "X"), + (0xA840, "V"), + (0xA878, "X"), + (0xA880, "V"), + (0xA8C6, "X"), + (0xA8CE, "V"), + (0xA8DA, "X"), + (0xA8E0, "V"), + (0xA954, "X"), + ] + + +def _seg_38() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xA95F, "V"), + (0xA97D, "X"), + (0xA980, "V"), + (0xA9CE, "X"), + (0xA9CF, "V"), + (0xA9DA, "X"), + (0xA9DE, "V"), + (0xA9FF, "X"), + (0xAA00, "V"), + (0xAA37, "X"), + (0xAA40, "V"), + (0xAA4E, "X"), + (0xAA50, "V"), + (0xAA5A, "X"), + (0xAA5C, "V"), + (0xAAC3, "X"), + (0xAADB, "V"), + (0xAAF7, "X"), + (0xAB01, "V"), + (0xAB07, "X"), + (0xAB09, "V"), + (0xAB0F, "X"), + (0xAB11, "V"), + (0xAB17, "X"), + (0xAB20, "V"), + (0xAB27, "X"), + (0xAB28, "V"), + (0xAB2F, "X"), + (0xAB30, "V"), + (0xAB5C, "M", "ꜧ"), + (0xAB5D, "M", "ꬷ"), + (0xAB5E, "M", "ɫ"), + (0xAB5F, "M", "ꭒ"), + (0xAB60, "V"), + (0xAB69, "M", "ʍ"), + (0xAB6A, "V"), + (0xAB6C, "X"), + (0xAB70, "M", "Ꭰ"), + (0xAB71, "M", "Ꭱ"), + (0xAB72, "M", "Ꭲ"), + (0xAB73, "M", "Ꭳ"), + (0xAB74, "M", "Ꭴ"), + (0xAB75, "M", "Ꭵ"), + (0xAB76, "M", "Ꭶ"), + (0xAB77, "M", "Ꭷ"), + (0xAB78, "M", "Ꭸ"), + (0xAB79, "M", "Ꭹ"), + (0xAB7A, "M", "Ꭺ"), + (0xAB7B, "M", "Ꭻ"), + (0xAB7C, "M", "Ꭼ"), + (0xAB7D, "M", "Ꭽ"), + (0xAB7E, "M", "Ꭾ"), + (0xAB7F, "M", "Ꭿ"), + (0xAB80, "M", "Ꮀ"), + (0xAB81, "M", "Ꮁ"), + (0xAB82, "M", "Ꮂ"), + (0xAB83, "M", "Ꮃ"), + (0xAB84, "M", "Ꮄ"), + (0xAB85, "M", "Ꮅ"), + (0xAB86, "M", "Ꮆ"), + (0xAB87, "M", "Ꮇ"), + (0xAB88, "M", "Ꮈ"), + (0xAB89, "M", "Ꮉ"), + (0xAB8A, "M", "Ꮊ"), + (0xAB8B, "M", "Ꮋ"), + (0xAB8C, "M", "Ꮌ"), + (0xAB8D, "M", "Ꮍ"), + (0xAB8E, "M", "Ꮎ"), + (0xAB8F, "M", "Ꮏ"), + (0xAB90, "M", "Ꮐ"), + (0xAB91, "M", "Ꮑ"), + (0xAB92, "M", "Ꮒ"), + (0xAB93, "M", "Ꮓ"), + (0xAB94, "M", "Ꮔ"), + (0xAB95, "M", "Ꮕ"), + (0xAB96, "M", "Ꮖ"), + (0xAB97, "M", "Ꮗ"), + (0xAB98, "M", "Ꮘ"), + (0xAB99, "M", "Ꮙ"), + (0xAB9A, "M", "Ꮚ"), + (0xAB9B, "M", "Ꮛ"), + (0xAB9C, "M", "Ꮜ"), + (0xAB9D, "M", "Ꮝ"), + (0xAB9E, "M", "Ꮞ"), + (0xAB9F, "M", "Ꮟ"), + (0xABA0, "M", "Ꮠ"), + (0xABA1, "M", "Ꮡ"), + (0xABA2, "M", "Ꮢ"), + (0xABA3, "M", "Ꮣ"), + (0xABA4, "M", "Ꮤ"), + (0xABA5, "M", "Ꮥ"), + (0xABA6, "M", "Ꮦ"), + (0xABA7, "M", "Ꮧ"), + (0xABA8, "M", "Ꮨ"), + (0xABA9, "M", "Ꮩ"), + (0xABAA, "M", "Ꮪ"), + (0xABAB, "M", "Ꮫ"), + (0xABAC, "M", "Ꮬ"), + (0xABAD, "M", "Ꮭ"), + (0xABAE, "M", "Ꮮ"), + ] + + +def _seg_39() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xABAF, "M", "Ꮯ"), + (0xABB0, "M", "Ꮰ"), + (0xABB1, "M", "Ꮱ"), + (0xABB2, "M", "Ꮲ"), + (0xABB3, "M", "Ꮳ"), + (0xABB4, "M", "Ꮴ"), + (0xABB5, "M", "Ꮵ"), + (0xABB6, "M", "Ꮶ"), + (0xABB7, "M", "Ꮷ"), + (0xABB8, "M", "Ꮸ"), + (0xABB9, "M", "Ꮹ"), + (0xABBA, "M", "Ꮺ"), + (0xABBB, "M", "Ꮻ"), + (0xABBC, "M", "Ꮼ"), + (0xABBD, "M", "Ꮽ"), + (0xABBE, "M", "Ꮾ"), + (0xABBF, "M", "Ꮿ"), + (0xABC0, "V"), + (0xABEE, "X"), + (0xABF0, "V"), + (0xABFA, "X"), + (0xAC00, "V"), + (0xD7A4, "X"), + (0xD7B0, "V"), + (0xD7C7, "X"), + (0xD7CB, "V"), + (0xD7FC, "X"), + (0xF900, "M", "豈"), + (0xF901, "M", "更"), + (0xF902, "M", "車"), + (0xF903, "M", "賈"), + (0xF904, "M", "滑"), + (0xF905, "M", "串"), + (0xF906, "M", "句"), + (0xF907, "M", "龜"), + (0xF909, "M", "契"), + (0xF90A, "M", "金"), + (0xF90B, "M", "喇"), + (0xF90C, "M", "奈"), + (0xF90D, "M", "懶"), + (0xF90E, "M", "癩"), + (0xF90F, "M", "羅"), + (0xF910, "M", "蘿"), + (0xF911, "M", "螺"), + (0xF912, "M", "裸"), + (0xF913, "M", "邏"), + (0xF914, "M", "樂"), + (0xF915, "M", "洛"), + (0xF916, "M", "烙"), + (0xF917, "M", "珞"), + (0xF918, "M", "落"), + (0xF919, "M", "酪"), + (0xF91A, "M", "駱"), + (0xF91B, "M", "亂"), + (0xF91C, "M", "卵"), + (0xF91D, "M", "欄"), + (0xF91E, "M", "爛"), + (0xF91F, "M", "蘭"), + (0xF920, "M", "鸞"), + (0xF921, "M", "嵐"), + (0xF922, "M", "濫"), + (0xF923, "M", "藍"), + (0xF924, "M", "襤"), + (0xF925, "M", "拉"), + (0xF926, "M", "臘"), + (0xF927, "M", "蠟"), + (0xF928, "M", "廊"), + (0xF929, "M", "朗"), + (0xF92A, "M", "浪"), + (0xF92B, "M", "狼"), + (0xF92C, "M", "郎"), + (0xF92D, "M", "來"), + (0xF92E, "M", "冷"), + (0xF92F, "M", "勞"), + (0xF930, "M", "擄"), + (0xF931, "M", "櫓"), + (0xF932, "M", "爐"), + (0xF933, "M", "盧"), + (0xF934, "M", "老"), + (0xF935, "M", "蘆"), + (0xF936, "M", "虜"), + (0xF937, "M", "路"), + (0xF938, "M", "露"), + (0xF939, "M", "魯"), + (0xF93A, "M", "鷺"), + (0xF93B, "M", "碌"), + (0xF93C, "M", "祿"), + (0xF93D, "M", "綠"), + (0xF93E, "M", "菉"), + (0xF93F, "M", "錄"), + (0xF940, "M", "鹿"), + (0xF941, "M", "論"), + (0xF942, "M", "壟"), + (0xF943, "M", "弄"), + (0xF944, "M", "籠"), + (0xF945, "M", "聾"), + (0xF946, "M", "牢"), + (0xF947, "M", "磊"), + (0xF948, "M", "賂"), + (0xF949, "M", "雷"), + ] + + +def _seg_40() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xF94A, "M", "壘"), + (0xF94B, "M", "屢"), + (0xF94C, "M", "樓"), + (0xF94D, "M", "淚"), + (0xF94E, "M", "漏"), + (0xF94F, "M", "累"), + (0xF950, "M", "縷"), + (0xF951, "M", "陋"), + (0xF952, "M", "勒"), + (0xF953, "M", "肋"), + (0xF954, "M", "凜"), + (0xF955, "M", "凌"), + (0xF956, "M", "稜"), + (0xF957, "M", "綾"), + (0xF958, "M", "菱"), + (0xF959, "M", "陵"), + (0xF95A, "M", "讀"), + (0xF95B, "M", "拏"), + (0xF95C, "M", "樂"), + (0xF95D, "M", "諾"), + (0xF95E, "M", "丹"), + (0xF95F, "M", "寧"), + (0xF960, "M", "怒"), + (0xF961, "M", "率"), + (0xF962, "M", "異"), + (0xF963, "M", "北"), + (0xF964, "M", "磻"), + (0xF965, "M", "便"), + (0xF966, "M", "復"), + (0xF967, "M", "不"), + (0xF968, "M", "泌"), + (0xF969, "M", "數"), + (0xF96A, "M", "索"), + (0xF96B, "M", "參"), + (0xF96C, "M", "塞"), + (0xF96D, "M", "省"), + (0xF96E, "M", "葉"), + (0xF96F, "M", "說"), + (0xF970, "M", "殺"), + (0xF971, "M", "辰"), + (0xF972, "M", "沈"), + (0xF973, "M", "拾"), + (0xF974, "M", "若"), + (0xF975, "M", "掠"), + (0xF976, "M", "略"), + (0xF977, "M", "亮"), + (0xF978, "M", "兩"), + (0xF979, "M", "凉"), + (0xF97A, "M", "梁"), + (0xF97B, "M", "糧"), + (0xF97C, "M", "良"), + (0xF97D, "M", "諒"), + (0xF97E, "M", "量"), + (0xF97F, "M", "勵"), + (0xF980, "M", "呂"), + (0xF981, "M", "女"), + (0xF982, "M", "廬"), + (0xF983, "M", "旅"), + (0xF984, "M", "濾"), + (0xF985, "M", "礪"), + (0xF986, "M", "閭"), + (0xF987, "M", "驪"), + (0xF988, "M", "麗"), + (0xF989, "M", "黎"), + (0xF98A, "M", "力"), + (0xF98B, "M", "曆"), + (0xF98C, "M", "歷"), + (0xF98D, "M", "轢"), + (0xF98E, "M", "年"), + (0xF98F, "M", "憐"), + (0xF990, "M", "戀"), + (0xF991, "M", "撚"), + (0xF992, "M", "漣"), + (0xF993, "M", "煉"), + (0xF994, "M", "璉"), + (0xF995, "M", "秊"), + (0xF996, "M", "練"), + (0xF997, "M", "聯"), + (0xF998, "M", "輦"), + (0xF999, "M", "蓮"), + (0xF99A, "M", "連"), + (0xF99B, "M", "鍊"), + (0xF99C, "M", "列"), + (0xF99D, "M", "劣"), + (0xF99E, "M", "咽"), + (0xF99F, "M", "烈"), + (0xF9A0, "M", "裂"), + (0xF9A1, "M", "說"), + (0xF9A2, "M", "廉"), + (0xF9A3, "M", "念"), + (0xF9A4, "M", "捻"), + (0xF9A5, "M", "殮"), + (0xF9A6, "M", "簾"), + (0xF9A7, "M", "獵"), + (0xF9A8, "M", "令"), + (0xF9A9, "M", "囹"), + (0xF9AA, "M", "寧"), + (0xF9AB, "M", "嶺"), + (0xF9AC, "M", "怜"), + (0xF9AD, "M", "玲"), + ] + + +def _seg_41() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xF9AE, "M", "瑩"), + (0xF9AF, "M", "羚"), + (0xF9B0, "M", "聆"), + (0xF9B1, "M", "鈴"), + (0xF9B2, "M", "零"), + (0xF9B3, "M", "靈"), + (0xF9B4, "M", "領"), + (0xF9B5, "M", "例"), + (0xF9B6, "M", "禮"), + (0xF9B7, "M", "醴"), + (0xF9B8, "M", "隸"), + (0xF9B9, "M", "惡"), + (0xF9BA, "M", "了"), + (0xF9BB, "M", "僚"), + (0xF9BC, "M", "寮"), + (0xF9BD, "M", "尿"), + (0xF9BE, "M", "料"), + (0xF9BF, "M", "樂"), + (0xF9C0, "M", "燎"), + (0xF9C1, "M", "療"), + (0xF9C2, "M", "蓼"), + (0xF9C3, "M", "遼"), + (0xF9C4, "M", "龍"), + (0xF9C5, "M", "暈"), + (0xF9C6, "M", "阮"), + (0xF9C7, "M", "劉"), + (0xF9C8, "M", "杻"), + (0xF9C9, "M", "柳"), + (0xF9CA, "M", "流"), + (0xF9CB, "M", "溜"), + (0xF9CC, "M", "琉"), + (0xF9CD, "M", "留"), + (0xF9CE, "M", "硫"), + (0xF9CF, "M", "紐"), + (0xF9D0, "M", "類"), + (0xF9D1, "M", "六"), + (0xF9D2, "M", "戮"), + (0xF9D3, "M", "陸"), + (0xF9D4, "M", "倫"), + (0xF9D5, "M", "崙"), + (0xF9D6, "M", "淪"), + (0xF9D7, "M", "輪"), + (0xF9D8, "M", "律"), + (0xF9D9, "M", "慄"), + (0xF9DA, "M", "栗"), + (0xF9DB, "M", "率"), + (0xF9DC, "M", "隆"), + (0xF9DD, "M", "利"), + (0xF9DE, "M", "吏"), + (0xF9DF, "M", "履"), + (0xF9E0, "M", "易"), + (0xF9E1, "M", "李"), + (0xF9E2, "M", "梨"), + (0xF9E3, "M", "泥"), + (0xF9E4, "M", "理"), + (0xF9E5, "M", "痢"), + (0xF9E6, "M", "罹"), + (0xF9E7, "M", "裏"), + (0xF9E8, "M", "裡"), + (0xF9E9, "M", "里"), + (0xF9EA, "M", "離"), + (0xF9EB, "M", "匿"), + (0xF9EC, "M", "溺"), + (0xF9ED, "M", "吝"), + (0xF9EE, "M", "燐"), + (0xF9EF, "M", "璘"), + (0xF9F0, "M", "藺"), + (0xF9F1, "M", "隣"), + (0xF9F2, "M", "鱗"), + (0xF9F3, "M", "麟"), + (0xF9F4, "M", "林"), + (0xF9F5, "M", "淋"), + (0xF9F6, "M", "臨"), + (0xF9F7, "M", "立"), + (0xF9F8, "M", "笠"), + (0xF9F9, "M", "粒"), + (0xF9FA, "M", "狀"), + (0xF9FB, "M", "炙"), + (0xF9FC, "M", "識"), + (0xF9FD, "M", "什"), + (0xF9FE, "M", "茶"), + (0xF9FF, "M", "刺"), + (0xFA00, "M", "切"), + (0xFA01, "M", "度"), + (0xFA02, "M", "拓"), + (0xFA03, "M", "糖"), + (0xFA04, "M", "宅"), + (0xFA05, "M", "洞"), + (0xFA06, "M", "暴"), + (0xFA07, "M", "輻"), + (0xFA08, "M", "行"), + (0xFA09, "M", "降"), + (0xFA0A, "M", "見"), + (0xFA0B, "M", "廓"), + (0xFA0C, "M", "兀"), + (0xFA0D, "M", "嗀"), + (0xFA0E, "V"), + (0xFA10, "M", "塚"), + (0xFA11, "V"), + (0xFA12, "M", "晴"), + ] + + +def _seg_42() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xFA13, "V"), + (0xFA15, "M", "凞"), + (0xFA16, "M", "猪"), + (0xFA17, "M", "益"), + (0xFA18, "M", "礼"), + (0xFA19, "M", "神"), + (0xFA1A, "M", "祥"), + (0xFA1B, "M", "福"), + (0xFA1C, "M", "靖"), + (0xFA1D, "M", "精"), + (0xFA1E, "M", "羽"), + (0xFA1F, "V"), + (0xFA20, "M", "蘒"), + (0xFA21, "V"), + (0xFA22, "M", "諸"), + (0xFA23, "V"), + (0xFA25, "M", "逸"), + (0xFA26, "M", "都"), + (0xFA27, "V"), + (0xFA2A, "M", "飯"), + (0xFA2B, "M", "飼"), + (0xFA2C, "M", "館"), + (0xFA2D, "M", "鶴"), + (0xFA2E, "M", "郞"), + (0xFA2F, "M", "隷"), + (0xFA30, "M", "侮"), + (0xFA31, "M", "僧"), + (0xFA32, "M", "免"), + (0xFA33, "M", "勉"), + (0xFA34, "M", "勤"), + (0xFA35, "M", "卑"), + (0xFA36, "M", "喝"), + (0xFA37, "M", "嘆"), + (0xFA38, "M", "器"), + (0xFA39, "M", "塀"), + (0xFA3A, "M", "墨"), + (0xFA3B, "M", "層"), + (0xFA3C, "M", "屮"), + (0xFA3D, "M", "悔"), + (0xFA3E, "M", "慨"), + (0xFA3F, "M", "憎"), + (0xFA40, "M", "懲"), + (0xFA41, "M", "敏"), + (0xFA42, "M", "既"), + (0xFA43, "M", "暑"), + (0xFA44, "M", "梅"), + (0xFA45, "M", "海"), + (0xFA46, "M", "渚"), + (0xFA47, "M", "漢"), + (0xFA48, "M", "煮"), + (0xFA49, "M", "爫"), + (0xFA4A, "M", "琢"), + (0xFA4B, "M", "碑"), + (0xFA4C, "M", "社"), + (0xFA4D, "M", "祉"), + (0xFA4E, "M", "祈"), + (0xFA4F, "M", "祐"), + (0xFA50, "M", "祖"), + (0xFA51, "M", "祝"), + (0xFA52, "M", "禍"), + (0xFA53, "M", "禎"), + (0xFA54, "M", "穀"), + (0xFA55, "M", "突"), + (0xFA56, "M", "節"), + (0xFA57, "M", "練"), + (0xFA58, "M", "縉"), + (0xFA59, "M", "繁"), + (0xFA5A, "M", "署"), + (0xFA5B, "M", "者"), + (0xFA5C, "M", "臭"), + (0xFA5D, "M", "艹"), + (0xFA5F, "M", "著"), + (0xFA60, "M", "褐"), + (0xFA61, "M", "視"), + (0xFA62, "M", "謁"), + (0xFA63, "M", "謹"), + (0xFA64, "M", "賓"), + (0xFA65, "M", "贈"), + (0xFA66, "M", "辶"), + (0xFA67, "M", "逸"), + (0xFA68, "M", "難"), + (0xFA69, "M", "響"), + (0xFA6A, "M", "頻"), + (0xFA6B, "M", "恵"), + (0xFA6C, "M", "𤋮"), + (0xFA6D, "M", "舘"), + (0xFA6E, "X"), + (0xFA70, "M", "並"), + (0xFA71, "M", "况"), + (0xFA72, "M", "全"), + (0xFA73, "M", "侀"), + (0xFA74, "M", "充"), + (0xFA75, "M", "冀"), + (0xFA76, "M", "勇"), + (0xFA77, "M", "勺"), + (0xFA78, "M", "喝"), + (0xFA79, "M", "啕"), + (0xFA7A, "M", "喙"), + (0xFA7B, "M", "嗢"), + (0xFA7C, "M", "塚"), + ] + + +def _seg_43() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xFA7D, "M", "墳"), + (0xFA7E, "M", "奄"), + (0xFA7F, "M", "奔"), + (0xFA80, "M", "婢"), + (0xFA81, "M", "嬨"), + (0xFA82, "M", "廒"), + (0xFA83, "M", "廙"), + (0xFA84, "M", "彩"), + (0xFA85, "M", "徭"), + (0xFA86, "M", "惘"), + (0xFA87, "M", "慎"), + (0xFA88, "M", "愈"), + (0xFA89, "M", "憎"), + (0xFA8A, "M", "慠"), + (0xFA8B, "M", "懲"), + (0xFA8C, "M", "戴"), + (0xFA8D, "M", "揄"), + (0xFA8E, "M", "搜"), + (0xFA8F, "M", "摒"), + (0xFA90, "M", "敖"), + (0xFA91, "M", "晴"), + (0xFA92, "M", "朗"), + (0xFA93, "M", "望"), + (0xFA94, "M", "杖"), + (0xFA95, "M", "歹"), + (0xFA96, "M", "殺"), + (0xFA97, "M", "流"), + (0xFA98, "M", "滛"), + (0xFA99, "M", "滋"), + (0xFA9A, "M", "漢"), + (0xFA9B, "M", "瀞"), + (0xFA9C, "M", "煮"), + (0xFA9D, "M", "瞧"), + (0xFA9E, "M", "爵"), + (0xFA9F, "M", "犯"), + (0xFAA0, "M", "猪"), + (0xFAA1, "M", "瑱"), + (0xFAA2, "M", "甆"), + (0xFAA3, "M", "画"), + (0xFAA4, "M", "瘝"), + (0xFAA5, "M", "瘟"), + (0xFAA6, "M", "益"), + (0xFAA7, "M", "盛"), + (0xFAA8, "M", "直"), + (0xFAA9, "M", "睊"), + (0xFAAA, "M", "着"), + (0xFAAB, "M", "磌"), + (0xFAAC, "M", "窱"), + (0xFAAD, "M", "節"), + (0xFAAE, "M", "类"), + (0xFAAF, "M", "絛"), + (0xFAB0, "M", "練"), + (0xFAB1, "M", "缾"), + (0xFAB2, "M", "者"), + (0xFAB3, "M", "荒"), + (0xFAB4, "M", "華"), + (0xFAB5, "M", "蝹"), + (0xFAB6, "M", "襁"), + (0xFAB7, "M", "覆"), + (0xFAB8, "M", "視"), + (0xFAB9, "M", "調"), + (0xFABA, "M", "諸"), + (0xFABB, "M", "請"), + (0xFABC, "M", "謁"), + (0xFABD, "M", "諾"), + (0xFABE, "M", "諭"), + (0xFABF, "M", "謹"), + (0xFAC0, "M", "變"), + (0xFAC1, "M", "贈"), + (0xFAC2, "M", "輸"), + (0xFAC3, "M", "遲"), + (0xFAC4, "M", "醙"), + (0xFAC5, "M", "鉶"), + (0xFAC6, "M", "陼"), + (0xFAC7, "M", "難"), + (0xFAC8, "M", "靖"), + (0xFAC9, "M", "韛"), + (0xFACA, "M", "響"), + (0xFACB, "M", "頋"), + (0xFACC, "M", "頻"), + (0xFACD, "M", "鬒"), + (0xFACE, "M", "龜"), + (0xFACF, "M", "𢡊"), + (0xFAD0, "M", "𢡄"), + (0xFAD1, "M", "𣏕"), + (0xFAD2, "M", "㮝"), + (0xFAD3, "M", "䀘"), + (0xFAD4, "M", "䀹"), + (0xFAD5, "M", "𥉉"), + (0xFAD6, "M", "𥳐"), + (0xFAD7, "M", "𧻓"), + (0xFAD8, "M", "齃"), + (0xFAD9, "M", "龎"), + (0xFADA, "X"), + (0xFB00, "M", "ff"), + (0xFB01, "M", "fi"), + (0xFB02, "M", "fl"), + (0xFB03, "M", "ffi"), + (0xFB04, "M", "ffl"), + (0xFB05, "M", "st"), + ] + + +def _seg_44() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xFB07, "X"), + (0xFB13, "M", "մն"), + (0xFB14, "M", "մե"), + (0xFB15, "M", "մի"), + (0xFB16, "M", "վն"), + (0xFB17, "M", "մխ"), + (0xFB18, "X"), + (0xFB1D, "M", "יִ"), + (0xFB1E, "V"), + (0xFB1F, "M", "ײַ"), + (0xFB20, "M", "ע"), + (0xFB21, "M", "א"), + (0xFB22, "M", "ד"), + (0xFB23, "M", "ה"), + (0xFB24, "M", "כ"), + (0xFB25, "M", "ל"), + (0xFB26, "M", "ם"), + (0xFB27, "M", "ר"), + (0xFB28, "M", "ת"), + (0xFB29, "3", "+"), + (0xFB2A, "M", "שׁ"), + (0xFB2B, "M", "שׂ"), + (0xFB2C, "M", "שּׁ"), + (0xFB2D, "M", "שּׂ"), + (0xFB2E, "M", "אַ"), + (0xFB2F, "M", "אָ"), + (0xFB30, "M", "אּ"), + (0xFB31, "M", "בּ"), + (0xFB32, "M", "גּ"), + (0xFB33, "M", "דּ"), + (0xFB34, "M", "הּ"), + (0xFB35, "M", "וּ"), + (0xFB36, "M", "זּ"), + (0xFB37, "X"), + (0xFB38, "M", "טּ"), + (0xFB39, "M", "יּ"), + (0xFB3A, "M", "ךּ"), + (0xFB3B, "M", "כּ"), + (0xFB3C, "M", "לּ"), + (0xFB3D, "X"), + (0xFB3E, "M", "מּ"), + (0xFB3F, "X"), + (0xFB40, "M", "נּ"), + (0xFB41, "M", "סּ"), + (0xFB42, "X"), + (0xFB43, "M", "ףּ"), + (0xFB44, "M", "פּ"), + (0xFB45, "X"), + (0xFB46, "M", "צּ"), + (0xFB47, "M", "קּ"), + (0xFB48, "M", "רּ"), + (0xFB49, "M", "שּ"), + (0xFB4A, "M", "תּ"), + (0xFB4B, "M", "וֹ"), + (0xFB4C, "M", "בֿ"), + (0xFB4D, "M", "כֿ"), + (0xFB4E, "M", "פֿ"), + (0xFB4F, "M", "אל"), + (0xFB50, "M", "ٱ"), + (0xFB52, "M", "ٻ"), + (0xFB56, "M", "پ"), + (0xFB5A, "M", "ڀ"), + (0xFB5E, "M", "ٺ"), + (0xFB62, "M", "ٿ"), + (0xFB66, "M", "ٹ"), + (0xFB6A, "M", "ڤ"), + (0xFB6E, "M", "ڦ"), + (0xFB72, "M", "ڄ"), + (0xFB76, "M", "ڃ"), + (0xFB7A, "M", "چ"), + (0xFB7E, "M", "ڇ"), + (0xFB82, "M", "ڍ"), + (0xFB84, "M", "ڌ"), + (0xFB86, "M", "ڎ"), + (0xFB88, "M", "ڈ"), + (0xFB8A, "M", "ژ"), + (0xFB8C, "M", "ڑ"), + (0xFB8E, "M", "ک"), + (0xFB92, "M", "گ"), + (0xFB96, "M", "ڳ"), + (0xFB9A, "M", "ڱ"), + (0xFB9E, "M", "ں"), + (0xFBA0, "M", "ڻ"), + (0xFBA4, "M", "ۀ"), + (0xFBA6, "M", "ہ"), + (0xFBAA, "M", "ھ"), + (0xFBAE, "M", "ے"), + (0xFBB0, "M", "ۓ"), + (0xFBB2, "V"), + (0xFBC3, "X"), + (0xFBD3, "M", "ڭ"), + (0xFBD7, "M", "ۇ"), + (0xFBD9, "M", "ۆ"), + (0xFBDB, "M", "ۈ"), + (0xFBDD, "M", "ۇٴ"), + (0xFBDE, "M", "ۋ"), + (0xFBE0, "M", "ۅ"), + (0xFBE2, "M", "ۉ"), + (0xFBE4, "M", "ې"), + (0xFBE8, "M", "ى"), + ] + + +def _seg_45() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xFBEA, "M", "ئا"), + (0xFBEC, "M", "ئە"), + (0xFBEE, "M", "ئو"), + (0xFBF0, "M", "ئۇ"), + (0xFBF2, "M", "ئۆ"), + (0xFBF4, "M", "ئۈ"), + (0xFBF6, "M", "ئې"), + (0xFBF9, "M", "ئى"), + (0xFBFC, "M", "ی"), + (0xFC00, "M", "ئج"), + (0xFC01, "M", "ئح"), + (0xFC02, "M", "ئم"), + (0xFC03, "M", "ئى"), + (0xFC04, "M", "ئي"), + (0xFC05, "M", "بج"), + (0xFC06, "M", "بح"), + (0xFC07, "M", "بخ"), + (0xFC08, "M", "بم"), + (0xFC09, "M", "بى"), + (0xFC0A, "M", "بي"), + (0xFC0B, "M", "تج"), + (0xFC0C, "M", "تح"), + (0xFC0D, "M", "تخ"), + (0xFC0E, "M", "تم"), + (0xFC0F, "M", "تى"), + (0xFC10, "M", "تي"), + (0xFC11, "M", "ثج"), + (0xFC12, "M", "ثم"), + (0xFC13, "M", "ثى"), + (0xFC14, "M", "ثي"), + (0xFC15, "M", "جح"), + (0xFC16, "M", "جم"), + (0xFC17, "M", "حج"), + (0xFC18, "M", "حم"), + (0xFC19, "M", "خج"), + (0xFC1A, "M", "خح"), + (0xFC1B, "M", "خم"), + (0xFC1C, "M", "سج"), + (0xFC1D, "M", "سح"), + (0xFC1E, "M", "سخ"), + (0xFC1F, "M", "سم"), + (0xFC20, "M", "صح"), + (0xFC21, "M", "صم"), + (0xFC22, "M", "ضج"), + (0xFC23, "M", "ضح"), + (0xFC24, "M", "ضخ"), + (0xFC25, "M", "ضم"), + (0xFC26, "M", "طح"), + (0xFC27, "M", "طم"), + (0xFC28, "M", "ظم"), + (0xFC29, "M", "عج"), + (0xFC2A, "M", "عم"), + (0xFC2B, "M", "غج"), + (0xFC2C, "M", "غم"), + (0xFC2D, "M", "فج"), + (0xFC2E, "M", "فح"), + (0xFC2F, "M", "فخ"), + (0xFC30, "M", "فم"), + (0xFC31, "M", "فى"), + (0xFC32, "M", "في"), + (0xFC33, "M", "قح"), + (0xFC34, "M", "قم"), + (0xFC35, "M", "قى"), + (0xFC36, "M", "قي"), + (0xFC37, "M", "كا"), + (0xFC38, "M", "كج"), + (0xFC39, "M", "كح"), + (0xFC3A, "M", "كخ"), + (0xFC3B, "M", "كل"), + (0xFC3C, "M", "كم"), + (0xFC3D, "M", "كى"), + (0xFC3E, "M", "كي"), + (0xFC3F, "M", "لج"), + (0xFC40, "M", "لح"), + (0xFC41, "M", "لخ"), + (0xFC42, "M", "لم"), + (0xFC43, "M", "لى"), + (0xFC44, "M", "لي"), + (0xFC45, "M", "مج"), + (0xFC46, "M", "مح"), + (0xFC47, "M", "مخ"), + (0xFC48, "M", "مم"), + (0xFC49, "M", "مى"), + (0xFC4A, "M", "مي"), + (0xFC4B, "M", "نج"), + (0xFC4C, "M", "نح"), + (0xFC4D, "M", "نخ"), + (0xFC4E, "M", "نم"), + (0xFC4F, "M", "نى"), + (0xFC50, "M", "ني"), + (0xFC51, "M", "هج"), + (0xFC52, "M", "هم"), + (0xFC53, "M", "هى"), + (0xFC54, "M", "هي"), + (0xFC55, "M", "يج"), + (0xFC56, "M", "يح"), + (0xFC57, "M", "يخ"), + (0xFC58, "M", "يم"), + (0xFC59, "M", "يى"), + (0xFC5A, "M", "يي"), + ] + + +def _seg_46() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xFC5B, "M", "ذٰ"), + (0xFC5C, "M", "رٰ"), + (0xFC5D, "M", "ىٰ"), + (0xFC5E, "3", " ٌّ"), + (0xFC5F, "3", " ٍّ"), + (0xFC60, "3", " َّ"), + (0xFC61, "3", " ُّ"), + (0xFC62, "3", " ِّ"), + (0xFC63, "3", " ّٰ"), + (0xFC64, "M", "ئر"), + (0xFC65, "M", "ئز"), + (0xFC66, "M", "ئم"), + (0xFC67, "M", "ئن"), + (0xFC68, "M", "ئى"), + (0xFC69, "M", "ئي"), + (0xFC6A, "M", "بر"), + (0xFC6B, "M", "بز"), + (0xFC6C, "M", "بم"), + (0xFC6D, "M", "بن"), + (0xFC6E, "M", "بى"), + (0xFC6F, "M", "بي"), + (0xFC70, "M", "تر"), + (0xFC71, "M", "تز"), + (0xFC72, "M", "تم"), + (0xFC73, "M", "تن"), + (0xFC74, "M", "تى"), + (0xFC75, "M", "تي"), + (0xFC76, "M", "ثر"), + (0xFC77, "M", "ثز"), + (0xFC78, "M", "ثم"), + (0xFC79, "M", "ثن"), + (0xFC7A, "M", "ثى"), + (0xFC7B, "M", "ثي"), + (0xFC7C, "M", "فى"), + (0xFC7D, "M", "في"), + (0xFC7E, "M", "قى"), + (0xFC7F, "M", "قي"), + (0xFC80, "M", "كا"), + (0xFC81, "M", "كل"), + (0xFC82, "M", "كم"), + (0xFC83, "M", "كى"), + (0xFC84, "M", "كي"), + (0xFC85, "M", "لم"), + (0xFC86, "M", "لى"), + (0xFC87, "M", "لي"), + (0xFC88, "M", "ما"), + (0xFC89, "M", "مم"), + (0xFC8A, "M", "نر"), + (0xFC8B, "M", "نز"), + (0xFC8C, "M", "نم"), + (0xFC8D, "M", "نن"), + (0xFC8E, "M", "نى"), + (0xFC8F, "M", "ني"), + (0xFC90, "M", "ىٰ"), + (0xFC91, "M", "ير"), + (0xFC92, "M", "يز"), + (0xFC93, "M", "يم"), + (0xFC94, "M", "ين"), + (0xFC95, "M", "يى"), + (0xFC96, "M", "يي"), + (0xFC97, "M", "ئج"), + (0xFC98, "M", "ئح"), + (0xFC99, "M", "ئخ"), + (0xFC9A, "M", "ئم"), + (0xFC9B, "M", "ئه"), + (0xFC9C, "M", "بج"), + (0xFC9D, "M", "بح"), + (0xFC9E, "M", "بخ"), + (0xFC9F, "M", "بم"), + (0xFCA0, "M", "به"), + (0xFCA1, "M", "تج"), + (0xFCA2, "M", "تح"), + (0xFCA3, "M", "تخ"), + (0xFCA4, "M", "تم"), + (0xFCA5, "M", "ته"), + (0xFCA6, "M", "ثم"), + (0xFCA7, "M", "جح"), + (0xFCA8, "M", "جم"), + (0xFCA9, "M", "حج"), + (0xFCAA, "M", "حم"), + (0xFCAB, "M", "خج"), + (0xFCAC, "M", "خم"), + (0xFCAD, "M", "سج"), + (0xFCAE, "M", "سح"), + (0xFCAF, "M", "سخ"), + (0xFCB0, "M", "سم"), + (0xFCB1, "M", "صح"), + (0xFCB2, "M", "صخ"), + (0xFCB3, "M", "صم"), + (0xFCB4, "M", "ضج"), + (0xFCB5, "M", "ضح"), + (0xFCB6, "M", "ضخ"), + (0xFCB7, "M", "ضم"), + (0xFCB8, "M", "طح"), + (0xFCB9, "M", "ظم"), + (0xFCBA, "M", "عج"), + (0xFCBB, "M", "عم"), + (0xFCBC, "M", "غج"), + (0xFCBD, "M", "غم"), + (0xFCBE, "M", "فج"), + ] + + +def _seg_47() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xFCBF, "M", "فح"), + (0xFCC0, "M", "فخ"), + (0xFCC1, "M", "فم"), + (0xFCC2, "M", "قح"), + (0xFCC3, "M", "قم"), + (0xFCC4, "M", "كج"), + (0xFCC5, "M", "كح"), + (0xFCC6, "M", "كخ"), + (0xFCC7, "M", "كل"), + (0xFCC8, "M", "كم"), + (0xFCC9, "M", "لج"), + (0xFCCA, "M", "لح"), + (0xFCCB, "M", "لخ"), + (0xFCCC, "M", "لم"), + (0xFCCD, "M", "له"), + (0xFCCE, "M", "مج"), + (0xFCCF, "M", "مح"), + (0xFCD0, "M", "مخ"), + (0xFCD1, "M", "مم"), + (0xFCD2, "M", "نج"), + (0xFCD3, "M", "نح"), + (0xFCD4, "M", "نخ"), + (0xFCD5, "M", "نم"), + (0xFCD6, "M", "نه"), + (0xFCD7, "M", "هج"), + (0xFCD8, "M", "هم"), + (0xFCD9, "M", "هٰ"), + (0xFCDA, "M", "يج"), + (0xFCDB, "M", "يح"), + (0xFCDC, "M", "يخ"), + (0xFCDD, "M", "يم"), + (0xFCDE, "M", "يه"), + (0xFCDF, "M", "ئم"), + (0xFCE0, "M", "ئه"), + (0xFCE1, "M", "بم"), + (0xFCE2, "M", "به"), + (0xFCE3, "M", "تم"), + (0xFCE4, "M", "ته"), + (0xFCE5, "M", "ثم"), + (0xFCE6, "M", "ثه"), + (0xFCE7, "M", "سم"), + (0xFCE8, "M", "سه"), + (0xFCE9, "M", "شم"), + (0xFCEA, "M", "شه"), + (0xFCEB, "M", "كل"), + (0xFCEC, "M", "كم"), + (0xFCED, "M", "لم"), + (0xFCEE, "M", "نم"), + (0xFCEF, "M", "نه"), + (0xFCF0, "M", "يم"), + (0xFCF1, "M", "يه"), + (0xFCF2, "M", "ـَّ"), + (0xFCF3, "M", "ـُّ"), + (0xFCF4, "M", "ـِّ"), + (0xFCF5, "M", "طى"), + (0xFCF6, "M", "طي"), + (0xFCF7, "M", "عى"), + (0xFCF8, "M", "عي"), + (0xFCF9, "M", "غى"), + (0xFCFA, "M", "غي"), + (0xFCFB, "M", "سى"), + (0xFCFC, "M", "سي"), + (0xFCFD, "M", "شى"), + (0xFCFE, "M", "شي"), + (0xFCFF, "M", "حى"), + (0xFD00, "M", "حي"), + (0xFD01, "M", "جى"), + (0xFD02, "M", "جي"), + (0xFD03, "M", "خى"), + (0xFD04, "M", "خي"), + (0xFD05, "M", "صى"), + (0xFD06, "M", "صي"), + (0xFD07, "M", "ضى"), + (0xFD08, "M", "ضي"), + (0xFD09, "M", "شج"), + (0xFD0A, "M", "شح"), + (0xFD0B, "M", "شخ"), + (0xFD0C, "M", "شم"), + (0xFD0D, "M", "شر"), + (0xFD0E, "M", "سر"), + (0xFD0F, "M", "صر"), + (0xFD10, "M", "ضر"), + (0xFD11, "M", "طى"), + (0xFD12, "M", "طي"), + (0xFD13, "M", "عى"), + (0xFD14, "M", "عي"), + (0xFD15, "M", "غى"), + (0xFD16, "M", "غي"), + (0xFD17, "M", "سى"), + (0xFD18, "M", "سي"), + (0xFD19, "M", "شى"), + (0xFD1A, "M", "شي"), + (0xFD1B, "M", "حى"), + (0xFD1C, "M", "حي"), + (0xFD1D, "M", "جى"), + (0xFD1E, "M", "جي"), + (0xFD1F, "M", "خى"), + (0xFD20, "M", "خي"), + (0xFD21, "M", "صى"), + (0xFD22, "M", "صي"), + ] + + +def _seg_48() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xFD23, "M", "ضى"), + (0xFD24, "M", "ضي"), + (0xFD25, "M", "شج"), + (0xFD26, "M", "شح"), + (0xFD27, "M", "شخ"), + (0xFD28, "M", "شم"), + (0xFD29, "M", "شر"), + (0xFD2A, "M", "سر"), + (0xFD2B, "M", "صر"), + (0xFD2C, "M", "ضر"), + (0xFD2D, "M", "شج"), + (0xFD2E, "M", "شح"), + (0xFD2F, "M", "شخ"), + (0xFD30, "M", "شم"), + (0xFD31, "M", "سه"), + (0xFD32, "M", "شه"), + (0xFD33, "M", "طم"), + (0xFD34, "M", "سج"), + (0xFD35, "M", "سح"), + (0xFD36, "M", "سخ"), + (0xFD37, "M", "شج"), + (0xFD38, "M", "شح"), + (0xFD39, "M", "شخ"), + (0xFD3A, "M", "طم"), + (0xFD3B, "M", "ظم"), + (0xFD3C, "M", "اً"), + (0xFD3E, "V"), + (0xFD50, "M", "تجم"), + (0xFD51, "M", "تحج"), + (0xFD53, "M", "تحم"), + (0xFD54, "M", "تخم"), + (0xFD55, "M", "تمج"), + (0xFD56, "M", "تمح"), + (0xFD57, "M", "تمخ"), + (0xFD58, "M", "جمح"), + (0xFD5A, "M", "حمي"), + (0xFD5B, "M", "حمى"), + (0xFD5C, "M", "سحج"), + (0xFD5D, "M", "سجح"), + (0xFD5E, "M", "سجى"), + (0xFD5F, "M", "سمح"), + (0xFD61, "M", "سمج"), + (0xFD62, "M", "سمم"), + (0xFD64, "M", "صحح"), + (0xFD66, "M", "صمم"), + (0xFD67, "M", "شحم"), + (0xFD69, "M", "شجي"), + (0xFD6A, "M", "شمخ"), + (0xFD6C, "M", "شمم"), + (0xFD6E, "M", "ضحى"), + (0xFD6F, "M", "ضخم"), + (0xFD71, "M", "طمح"), + (0xFD73, "M", "طمم"), + (0xFD74, "M", "طمي"), + (0xFD75, "M", "عجم"), + (0xFD76, "M", "عمم"), + (0xFD78, "M", "عمى"), + (0xFD79, "M", "غمم"), + (0xFD7A, "M", "غمي"), + (0xFD7B, "M", "غمى"), + (0xFD7C, "M", "فخم"), + (0xFD7E, "M", "قمح"), + (0xFD7F, "M", "قمم"), + (0xFD80, "M", "لحم"), + (0xFD81, "M", "لحي"), + (0xFD82, "M", "لحى"), + (0xFD83, "M", "لجج"), + (0xFD85, "M", "لخم"), + (0xFD87, "M", "لمح"), + (0xFD89, "M", "محج"), + (0xFD8A, "M", "محم"), + (0xFD8B, "M", "محي"), + (0xFD8C, "M", "مجح"), + (0xFD8D, "M", "مجم"), + (0xFD8E, "M", "مخج"), + (0xFD8F, "M", "مخم"), + (0xFD90, "X"), + (0xFD92, "M", "مجخ"), + (0xFD93, "M", "همج"), + (0xFD94, "M", "همم"), + (0xFD95, "M", "نحم"), + (0xFD96, "M", "نحى"), + (0xFD97, "M", "نجم"), + (0xFD99, "M", "نجى"), + (0xFD9A, "M", "نمي"), + (0xFD9B, "M", "نمى"), + (0xFD9C, "M", "يمم"), + (0xFD9E, "M", "بخي"), + (0xFD9F, "M", "تجي"), + (0xFDA0, "M", "تجى"), + (0xFDA1, "M", "تخي"), + (0xFDA2, "M", "تخى"), + (0xFDA3, "M", "تمي"), + (0xFDA4, "M", "تمى"), + (0xFDA5, "M", "جمي"), + (0xFDA6, "M", "جحى"), + (0xFDA7, "M", "جمى"), + (0xFDA8, "M", "سخى"), + (0xFDA9, "M", "صحي"), + (0xFDAA, "M", "شحي"), + ] + + +def _seg_49() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xFDAB, "M", "ضحي"), + (0xFDAC, "M", "لجي"), + (0xFDAD, "M", "لمي"), + (0xFDAE, "M", "يحي"), + (0xFDAF, "M", "يجي"), + (0xFDB0, "M", "يمي"), + (0xFDB1, "M", "ممي"), + (0xFDB2, "M", "قمي"), + (0xFDB3, "M", "نحي"), + (0xFDB4, "M", "قمح"), + (0xFDB5, "M", "لحم"), + (0xFDB6, "M", "عمي"), + (0xFDB7, "M", "كمي"), + (0xFDB8, "M", "نجح"), + (0xFDB9, "M", "مخي"), + (0xFDBA, "M", "لجم"), + (0xFDBB, "M", "كمم"), + (0xFDBC, "M", "لجم"), + (0xFDBD, "M", "نجح"), + (0xFDBE, "M", "جحي"), + (0xFDBF, "M", "حجي"), + (0xFDC0, "M", "مجي"), + (0xFDC1, "M", "فمي"), + (0xFDC2, "M", "بحي"), + (0xFDC3, "M", "كمم"), + (0xFDC4, "M", "عجم"), + (0xFDC5, "M", "صمم"), + (0xFDC6, "M", "سخي"), + (0xFDC7, "M", "نجي"), + (0xFDC8, "X"), + (0xFDCF, "V"), + (0xFDD0, "X"), + (0xFDF0, "M", "صلے"), + (0xFDF1, "M", "قلے"), + (0xFDF2, "M", "الله"), + (0xFDF3, "M", "اكبر"), + (0xFDF4, "M", "محمد"), + (0xFDF5, "M", "صلعم"), + (0xFDF6, "M", "رسول"), + (0xFDF7, "M", "عليه"), + (0xFDF8, "M", "وسلم"), + (0xFDF9, "M", "صلى"), + (0xFDFA, "3", "صلى الله عليه وسلم"), + (0xFDFB, "3", "جل جلاله"), + (0xFDFC, "M", "ریال"), + (0xFDFD, "V"), + (0xFE00, "I"), + (0xFE10, "3", ","), + (0xFE11, "M", "、"), + (0xFE12, "X"), + (0xFE13, "3", ":"), + (0xFE14, "3", ";"), + (0xFE15, "3", "!"), + (0xFE16, "3", "?"), + (0xFE17, "M", "〖"), + (0xFE18, "M", "〗"), + (0xFE19, "X"), + (0xFE20, "V"), + (0xFE30, "X"), + (0xFE31, "M", "—"), + (0xFE32, "M", "–"), + (0xFE33, "3", "_"), + (0xFE35, "3", "("), + (0xFE36, "3", ")"), + (0xFE37, "3", "{"), + (0xFE38, "3", "}"), + (0xFE39, "M", "〔"), + (0xFE3A, "M", "〕"), + (0xFE3B, "M", "【"), + (0xFE3C, "M", "】"), + (0xFE3D, "M", "《"), + (0xFE3E, "M", "》"), + (0xFE3F, "M", "〈"), + (0xFE40, "M", "〉"), + (0xFE41, "M", "「"), + (0xFE42, "M", "」"), + (0xFE43, "M", "『"), + (0xFE44, "M", "』"), + (0xFE45, "V"), + (0xFE47, "3", "["), + (0xFE48, "3", "]"), + (0xFE49, "3", " ̅"), + (0xFE4D, "3", "_"), + (0xFE50, "3", ","), + (0xFE51, "M", "、"), + (0xFE52, "X"), + (0xFE54, "3", ";"), + (0xFE55, "3", ":"), + (0xFE56, "3", "?"), + (0xFE57, "3", "!"), + (0xFE58, "M", "—"), + (0xFE59, "3", "("), + (0xFE5A, "3", ")"), + (0xFE5B, "3", "{"), + (0xFE5C, "3", "}"), + (0xFE5D, "M", "〔"), + (0xFE5E, "M", "〕"), + (0xFE5F, "3", "#"), + (0xFE60, "3", "&"), + (0xFE61, "3", "*"), + ] + + +def _seg_50() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xFE62, "3", "+"), + (0xFE63, "M", "-"), + (0xFE64, "3", "<"), + (0xFE65, "3", ">"), + (0xFE66, "3", "="), + (0xFE67, "X"), + (0xFE68, "3", "\\"), + (0xFE69, "3", "$"), + (0xFE6A, "3", "%"), + (0xFE6B, "3", "@"), + (0xFE6C, "X"), + (0xFE70, "3", " ً"), + (0xFE71, "M", "ـً"), + (0xFE72, "3", " ٌ"), + (0xFE73, "V"), + (0xFE74, "3", " ٍ"), + (0xFE75, "X"), + (0xFE76, "3", " َ"), + (0xFE77, "M", "ـَ"), + (0xFE78, "3", " ُ"), + (0xFE79, "M", "ـُ"), + (0xFE7A, "3", " ِ"), + (0xFE7B, "M", "ـِ"), + (0xFE7C, "3", " ّ"), + (0xFE7D, "M", "ـّ"), + (0xFE7E, "3", " ْ"), + (0xFE7F, "M", "ـْ"), + (0xFE80, "M", "ء"), + (0xFE81, "M", "آ"), + (0xFE83, "M", "أ"), + (0xFE85, "M", "ؤ"), + (0xFE87, "M", "إ"), + (0xFE89, "M", "ئ"), + (0xFE8D, "M", "ا"), + (0xFE8F, "M", "ب"), + (0xFE93, "M", "ة"), + (0xFE95, "M", "ت"), + (0xFE99, "M", "ث"), + (0xFE9D, "M", "ج"), + (0xFEA1, "M", "ح"), + (0xFEA5, "M", "خ"), + (0xFEA9, "M", "د"), + (0xFEAB, "M", "ذ"), + (0xFEAD, "M", "ر"), + (0xFEAF, "M", "ز"), + (0xFEB1, "M", "س"), + (0xFEB5, "M", "ش"), + (0xFEB9, "M", "ص"), + (0xFEBD, "M", "ض"), + (0xFEC1, "M", "ط"), + (0xFEC5, "M", "ظ"), + (0xFEC9, "M", "ع"), + (0xFECD, "M", "غ"), + (0xFED1, "M", "ف"), + (0xFED5, "M", "ق"), + (0xFED9, "M", "ك"), + (0xFEDD, "M", "ل"), + (0xFEE1, "M", "م"), + (0xFEE5, "M", "ن"), + (0xFEE9, "M", "ه"), + (0xFEED, "M", "و"), + (0xFEEF, "M", "ى"), + (0xFEF1, "M", "ي"), + (0xFEF5, "M", "لآ"), + (0xFEF7, "M", "لأ"), + (0xFEF9, "M", "لإ"), + (0xFEFB, "M", "لا"), + (0xFEFD, "X"), + (0xFEFF, "I"), + (0xFF00, "X"), + (0xFF01, "3", "!"), + (0xFF02, "3", '"'), + (0xFF03, "3", "#"), + (0xFF04, "3", "$"), + (0xFF05, "3", "%"), + (0xFF06, "3", "&"), + (0xFF07, "3", "'"), + (0xFF08, "3", "("), + (0xFF09, "3", ")"), + (0xFF0A, "3", "*"), + (0xFF0B, "3", "+"), + (0xFF0C, "3", ","), + (0xFF0D, "M", "-"), + (0xFF0E, "M", "."), + (0xFF0F, "3", "/"), + (0xFF10, "M", "0"), + (0xFF11, "M", "1"), + (0xFF12, "M", "2"), + (0xFF13, "M", "3"), + (0xFF14, "M", "4"), + (0xFF15, "M", "5"), + (0xFF16, "M", "6"), + (0xFF17, "M", "7"), + (0xFF18, "M", "8"), + (0xFF19, "M", "9"), + (0xFF1A, "3", ":"), + (0xFF1B, "3", ";"), + (0xFF1C, "3", "<"), + (0xFF1D, "3", "="), + (0xFF1E, "3", ">"), + ] + + +def _seg_51() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xFF1F, "3", "?"), + (0xFF20, "3", "@"), + (0xFF21, "M", "a"), + (0xFF22, "M", "b"), + (0xFF23, "M", "c"), + (0xFF24, "M", "d"), + (0xFF25, "M", "e"), + (0xFF26, "M", "f"), + (0xFF27, "M", "g"), + (0xFF28, "M", "h"), + (0xFF29, "M", "i"), + (0xFF2A, "M", "j"), + (0xFF2B, "M", "k"), + (0xFF2C, "M", "l"), + (0xFF2D, "M", "m"), + (0xFF2E, "M", "n"), + (0xFF2F, "M", "o"), + (0xFF30, "M", "p"), + (0xFF31, "M", "q"), + (0xFF32, "M", "r"), + (0xFF33, "M", "s"), + (0xFF34, "M", "t"), + (0xFF35, "M", "u"), + (0xFF36, "M", "v"), + (0xFF37, "M", "w"), + (0xFF38, "M", "x"), + (0xFF39, "M", "y"), + (0xFF3A, "M", "z"), + (0xFF3B, "3", "["), + (0xFF3C, "3", "\\"), + (0xFF3D, "3", "]"), + (0xFF3E, "3", "^"), + (0xFF3F, "3", "_"), + (0xFF40, "3", "`"), + (0xFF41, "M", "a"), + (0xFF42, "M", "b"), + (0xFF43, "M", "c"), + (0xFF44, "M", "d"), + (0xFF45, "M", "e"), + (0xFF46, "M", "f"), + (0xFF47, "M", "g"), + (0xFF48, "M", "h"), + (0xFF49, "M", "i"), + (0xFF4A, "M", "j"), + (0xFF4B, "M", "k"), + (0xFF4C, "M", "l"), + (0xFF4D, "M", "m"), + (0xFF4E, "M", "n"), + (0xFF4F, "M", "o"), + (0xFF50, "M", "p"), + (0xFF51, "M", "q"), + (0xFF52, "M", "r"), + (0xFF53, "M", "s"), + (0xFF54, "M", "t"), + (0xFF55, "M", "u"), + (0xFF56, "M", "v"), + (0xFF57, "M", "w"), + (0xFF58, "M", "x"), + (0xFF59, "M", "y"), + (0xFF5A, "M", "z"), + (0xFF5B, "3", "{"), + (0xFF5C, "3", "|"), + (0xFF5D, "3", "}"), + (0xFF5E, "3", "~"), + (0xFF5F, "M", "⦅"), + (0xFF60, "M", "⦆"), + (0xFF61, "M", "."), + (0xFF62, "M", "「"), + (0xFF63, "M", "」"), + (0xFF64, "M", "、"), + (0xFF65, "M", "・"), + (0xFF66, "M", "ヲ"), + (0xFF67, "M", "ァ"), + (0xFF68, "M", "ィ"), + (0xFF69, "M", "ゥ"), + (0xFF6A, "M", "ェ"), + (0xFF6B, "M", "ォ"), + (0xFF6C, "M", "ャ"), + (0xFF6D, "M", "ュ"), + (0xFF6E, "M", "ョ"), + (0xFF6F, "M", "ッ"), + (0xFF70, "M", "ー"), + (0xFF71, "M", "ア"), + (0xFF72, "M", "イ"), + (0xFF73, "M", "ウ"), + (0xFF74, "M", "エ"), + (0xFF75, "M", "オ"), + (0xFF76, "M", "カ"), + (0xFF77, "M", "キ"), + (0xFF78, "M", "ク"), + (0xFF79, "M", "ケ"), + (0xFF7A, "M", "コ"), + (0xFF7B, "M", "サ"), + (0xFF7C, "M", "シ"), + (0xFF7D, "M", "ス"), + (0xFF7E, "M", "セ"), + (0xFF7F, "M", "ソ"), + (0xFF80, "M", "タ"), + (0xFF81, "M", "チ"), + (0xFF82, "M", "ツ"), + ] + + +def _seg_52() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xFF83, "M", "テ"), + (0xFF84, "M", "ト"), + (0xFF85, "M", "ナ"), + (0xFF86, "M", "ニ"), + (0xFF87, "M", "ヌ"), + (0xFF88, "M", "ネ"), + (0xFF89, "M", "ノ"), + (0xFF8A, "M", "ハ"), + (0xFF8B, "M", "ヒ"), + (0xFF8C, "M", "フ"), + (0xFF8D, "M", "ヘ"), + (0xFF8E, "M", "ホ"), + (0xFF8F, "M", "マ"), + (0xFF90, "M", "ミ"), + (0xFF91, "M", "ム"), + (0xFF92, "M", "メ"), + (0xFF93, "M", "モ"), + (0xFF94, "M", "ヤ"), + (0xFF95, "M", "ユ"), + (0xFF96, "M", "ヨ"), + (0xFF97, "M", "ラ"), + (0xFF98, "M", "リ"), + (0xFF99, "M", "ル"), + (0xFF9A, "M", "レ"), + (0xFF9B, "M", "ロ"), + (0xFF9C, "M", "ワ"), + (0xFF9D, "M", "ン"), + (0xFF9E, "M", "゙"), + (0xFF9F, "M", "゚"), + (0xFFA0, "X"), + (0xFFA1, "M", "ᄀ"), + (0xFFA2, "M", "ᄁ"), + (0xFFA3, "M", "ᆪ"), + (0xFFA4, "M", "ᄂ"), + (0xFFA5, "M", "ᆬ"), + (0xFFA6, "M", "ᆭ"), + (0xFFA7, "M", "ᄃ"), + (0xFFA8, "M", "ᄄ"), + (0xFFA9, "M", "ᄅ"), + (0xFFAA, "M", "ᆰ"), + (0xFFAB, "M", "ᆱ"), + (0xFFAC, "M", "ᆲ"), + (0xFFAD, "M", "ᆳ"), + (0xFFAE, "M", "ᆴ"), + (0xFFAF, "M", "ᆵ"), + (0xFFB0, "M", "ᄚ"), + (0xFFB1, "M", "ᄆ"), + (0xFFB2, "M", "ᄇ"), + (0xFFB3, "M", "ᄈ"), + (0xFFB4, "M", "ᄡ"), + (0xFFB5, "M", "ᄉ"), + (0xFFB6, "M", "ᄊ"), + (0xFFB7, "M", "ᄋ"), + (0xFFB8, "M", "ᄌ"), + (0xFFB9, "M", "ᄍ"), + (0xFFBA, "M", "ᄎ"), + (0xFFBB, "M", "ᄏ"), + (0xFFBC, "M", "ᄐ"), + (0xFFBD, "M", "ᄑ"), + (0xFFBE, "M", "ᄒ"), + (0xFFBF, "X"), + (0xFFC2, "M", "ᅡ"), + (0xFFC3, "M", "ᅢ"), + (0xFFC4, "M", "ᅣ"), + (0xFFC5, "M", "ᅤ"), + (0xFFC6, "M", "ᅥ"), + (0xFFC7, "M", "ᅦ"), + (0xFFC8, "X"), + (0xFFCA, "M", "ᅧ"), + (0xFFCB, "M", "ᅨ"), + (0xFFCC, "M", "ᅩ"), + (0xFFCD, "M", "ᅪ"), + (0xFFCE, "M", "ᅫ"), + (0xFFCF, "M", "ᅬ"), + (0xFFD0, "X"), + (0xFFD2, "M", "ᅭ"), + (0xFFD3, "M", "ᅮ"), + (0xFFD4, "M", "ᅯ"), + (0xFFD5, "M", "ᅰ"), + (0xFFD6, "M", "ᅱ"), + (0xFFD7, "M", "ᅲ"), + (0xFFD8, "X"), + (0xFFDA, "M", "ᅳ"), + (0xFFDB, "M", "ᅴ"), + (0xFFDC, "M", "ᅵ"), + (0xFFDD, "X"), + (0xFFE0, "M", "¢"), + (0xFFE1, "M", "£"), + (0xFFE2, "M", "¬"), + (0xFFE3, "3", " ̄"), + (0xFFE4, "M", "¦"), + (0xFFE5, "M", "¥"), + (0xFFE6, "M", "₩"), + (0xFFE7, "X"), + (0xFFE8, "M", "│"), + (0xFFE9, "M", "←"), + (0xFFEA, "M", "↑"), + (0xFFEB, "M", "→"), + (0xFFEC, "M", "↓"), + (0xFFED, "M", "■"), + ] + + +def _seg_53() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xFFEE, "M", "○"), + (0xFFEF, "X"), + (0x10000, "V"), + (0x1000C, "X"), + (0x1000D, "V"), + (0x10027, "X"), + (0x10028, "V"), + (0x1003B, "X"), + (0x1003C, "V"), + (0x1003E, "X"), + (0x1003F, "V"), + (0x1004E, "X"), + (0x10050, "V"), + (0x1005E, "X"), + (0x10080, "V"), + (0x100FB, "X"), + (0x10100, "V"), + (0x10103, "X"), + (0x10107, "V"), + (0x10134, "X"), + (0x10137, "V"), + (0x1018F, "X"), + (0x10190, "V"), + (0x1019D, "X"), + (0x101A0, "V"), + (0x101A1, "X"), + (0x101D0, "V"), + (0x101FE, "X"), + (0x10280, "V"), + (0x1029D, "X"), + (0x102A0, "V"), + (0x102D1, "X"), + (0x102E0, "V"), + (0x102FC, "X"), + (0x10300, "V"), + (0x10324, "X"), + (0x1032D, "V"), + (0x1034B, "X"), + (0x10350, "V"), + (0x1037B, "X"), + (0x10380, "V"), + (0x1039E, "X"), + (0x1039F, "V"), + (0x103C4, "X"), + (0x103C8, "V"), + (0x103D6, "X"), + (0x10400, "M", "𐐨"), + (0x10401, "M", "𐐩"), + (0x10402, "M", "𐐪"), + (0x10403, "M", "𐐫"), + (0x10404, "M", "𐐬"), + (0x10405, "M", "𐐭"), + (0x10406, "M", "𐐮"), + (0x10407, "M", "𐐯"), + (0x10408, "M", "𐐰"), + (0x10409, "M", "𐐱"), + (0x1040A, "M", "𐐲"), + (0x1040B, "M", "𐐳"), + (0x1040C, "M", "𐐴"), + (0x1040D, "M", "𐐵"), + (0x1040E, "M", "𐐶"), + (0x1040F, "M", "𐐷"), + (0x10410, "M", "𐐸"), + (0x10411, "M", "𐐹"), + (0x10412, "M", "𐐺"), + (0x10413, "M", "𐐻"), + (0x10414, "M", "𐐼"), + (0x10415, "M", "𐐽"), + (0x10416, "M", "𐐾"), + (0x10417, "M", "𐐿"), + (0x10418, "M", "𐑀"), + (0x10419, "M", "𐑁"), + (0x1041A, "M", "𐑂"), + (0x1041B, "M", "𐑃"), + (0x1041C, "M", "𐑄"), + (0x1041D, "M", "𐑅"), + (0x1041E, "M", "𐑆"), + (0x1041F, "M", "𐑇"), + (0x10420, "M", "𐑈"), + (0x10421, "M", "𐑉"), + (0x10422, "M", "𐑊"), + (0x10423, "M", "𐑋"), + (0x10424, "M", "𐑌"), + (0x10425, "M", "𐑍"), + (0x10426, "M", "𐑎"), + (0x10427, "M", "𐑏"), + (0x10428, "V"), + (0x1049E, "X"), + (0x104A0, "V"), + (0x104AA, "X"), + (0x104B0, "M", "𐓘"), + (0x104B1, "M", "𐓙"), + (0x104B2, "M", "𐓚"), + (0x104B3, "M", "𐓛"), + (0x104B4, "M", "𐓜"), + (0x104B5, "M", "𐓝"), + (0x104B6, "M", "𐓞"), + (0x104B7, "M", "𐓟"), + (0x104B8, "M", "𐓠"), + (0x104B9, "M", "𐓡"), + ] + + +def _seg_54() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x104BA, "M", "𐓢"), + (0x104BB, "M", "𐓣"), + (0x104BC, "M", "𐓤"), + (0x104BD, "M", "𐓥"), + (0x104BE, "M", "𐓦"), + (0x104BF, "M", "𐓧"), + (0x104C0, "M", "𐓨"), + (0x104C1, "M", "𐓩"), + (0x104C2, "M", "𐓪"), + (0x104C3, "M", "𐓫"), + (0x104C4, "M", "𐓬"), + (0x104C5, "M", "𐓭"), + (0x104C6, "M", "𐓮"), + (0x104C7, "M", "𐓯"), + (0x104C8, "M", "𐓰"), + (0x104C9, "M", "𐓱"), + (0x104CA, "M", "𐓲"), + (0x104CB, "M", "𐓳"), + (0x104CC, "M", "𐓴"), + (0x104CD, "M", "𐓵"), + (0x104CE, "M", "𐓶"), + (0x104CF, "M", "𐓷"), + (0x104D0, "M", "𐓸"), + (0x104D1, "M", "𐓹"), + (0x104D2, "M", "𐓺"), + (0x104D3, "M", "𐓻"), + (0x104D4, "X"), + (0x104D8, "V"), + (0x104FC, "X"), + (0x10500, "V"), + (0x10528, "X"), + (0x10530, "V"), + (0x10564, "X"), + (0x1056F, "V"), + (0x10570, "M", "𐖗"), + (0x10571, "M", "𐖘"), + (0x10572, "M", "𐖙"), + (0x10573, "M", "𐖚"), + (0x10574, "M", "𐖛"), + (0x10575, "M", "𐖜"), + (0x10576, "M", "𐖝"), + (0x10577, "M", "𐖞"), + (0x10578, "M", "𐖟"), + (0x10579, "M", "𐖠"), + (0x1057A, "M", "𐖡"), + (0x1057B, "X"), + (0x1057C, "M", "𐖣"), + (0x1057D, "M", "𐖤"), + (0x1057E, "M", "𐖥"), + (0x1057F, "M", "𐖦"), + (0x10580, "M", "𐖧"), + (0x10581, "M", "𐖨"), + (0x10582, "M", "𐖩"), + (0x10583, "M", "𐖪"), + (0x10584, "M", "𐖫"), + (0x10585, "M", "𐖬"), + (0x10586, "M", "𐖭"), + (0x10587, "M", "𐖮"), + (0x10588, "M", "𐖯"), + (0x10589, "M", "𐖰"), + (0x1058A, "M", "𐖱"), + (0x1058B, "X"), + (0x1058C, "M", "𐖳"), + (0x1058D, "M", "𐖴"), + (0x1058E, "M", "𐖵"), + (0x1058F, "M", "𐖶"), + (0x10590, "M", "𐖷"), + (0x10591, "M", "𐖸"), + (0x10592, "M", "𐖹"), + (0x10593, "X"), + (0x10594, "M", "𐖻"), + (0x10595, "M", "𐖼"), + (0x10596, "X"), + (0x10597, "V"), + (0x105A2, "X"), + (0x105A3, "V"), + (0x105B2, "X"), + (0x105B3, "V"), + (0x105BA, "X"), + (0x105BB, "V"), + (0x105BD, "X"), + (0x10600, "V"), + (0x10737, "X"), + (0x10740, "V"), + (0x10756, "X"), + (0x10760, "V"), + (0x10768, "X"), + (0x10780, "V"), + (0x10781, "M", "ː"), + (0x10782, "M", "ˑ"), + (0x10783, "M", "æ"), + (0x10784, "M", "ʙ"), + (0x10785, "M", "ɓ"), + (0x10786, "X"), + (0x10787, "M", "ʣ"), + (0x10788, "M", "ꭦ"), + (0x10789, "M", "ʥ"), + (0x1078A, "M", "ʤ"), + (0x1078B, "M", "ɖ"), + (0x1078C, "M", "ɗ"), + ] + + +def _seg_55() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1078D, "M", "ᶑ"), + (0x1078E, "M", "ɘ"), + (0x1078F, "M", "ɞ"), + (0x10790, "M", "ʩ"), + (0x10791, "M", "ɤ"), + (0x10792, "M", "ɢ"), + (0x10793, "M", "ɠ"), + (0x10794, "M", "ʛ"), + (0x10795, "M", "ħ"), + (0x10796, "M", "ʜ"), + (0x10797, "M", "ɧ"), + (0x10798, "M", "ʄ"), + (0x10799, "M", "ʪ"), + (0x1079A, "M", "ʫ"), + (0x1079B, "M", "ɬ"), + (0x1079C, "M", "𝼄"), + (0x1079D, "M", "ꞎ"), + (0x1079E, "M", "ɮ"), + (0x1079F, "M", "𝼅"), + (0x107A0, "M", "ʎ"), + (0x107A1, "M", "𝼆"), + (0x107A2, "M", "ø"), + (0x107A3, "M", "ɶ"), + (0x107A4, "M", "ɷ"), + (0x107A5, "M", "q"), + (0x107A6, "M", "ɺ"), + (0x107A7, "M", "𝼈"), + (0x107A8, "M", "ɽ"), + (0x107A9, "M", "ɾ"), + (0x107AA, "M", "ʀ"), + (0x107AB, "M", "ʨ"), + (0x107AC, "M", "ʦ"), + (0x107AD, "M", "ꭧ"), + (0x107AE, "M", "ʧ"), + (0x107AF, "M", "ʈ"), + (0x107B0, "M", "ⱱ"), + (0x107B1, "X"), + (0x107B2, "M", "ʏ"), + (0x107B3, "M", "ʡ"), + (0x107B4, "M", "ʢ"), + (0x107B5, "M", "ʘ"), + (0x107B6, "M", "ǀ"), + (0x107B7, "M", "ǁ"), + (0x107B8, "M", "ǂ"), + (0x107B9, "M", "𝼊"), + (0x107BA, "M", "𝼞"), + (0x107BB, "X"), + (0x10800, "V"), + (0x10806, "X"), + (0x10808, "V"), + (0x10809, "X"), + (0x1080A, "V"), + (0x10836, "X"), + (0x10837, "V"), + (0x10839, "X"), + (0x1083C, "V"), + (0x1083D, "X"), + (0x1083F, "V"), + (0x10856, "X"), + (0x10857, "V"), + (0x1089F, "X"), + (0x108A7, "V"), + (0x108B0, "X"), + (0x108E0, "V"), + (0x108F3, "X"), + (0x108F4, "V"), + (0x108F6, "X"), + (0x108FB, "V"), + (0x1091C, "X"), + (0x1091F, "V"), + (0x1093A, "X"), + (0x1093F, "V"), + (0x10940, "X"), + (0x10980, "V"), + (0x109B8, "X"), + (0x109BC, "V"), + (0x109D0, "X"), + (0x109D2, "V"), + (0x10A04, "X"), + (0x10A05, "V"), + (0x10A07, "X"), + (0x10A0C, "V"), + (0x10A14, "X"), + (0x10A15, "V"), + (0x10A18, "X"), + (0x10A19, "V"), + (0x10A36, "X"), + (0x10A38, "V"), + (0x10A3B, "X"), + (0x10A3F, "V"), + (0x10A49, "X"), + (0x10A50, "V"), + (0x10A59, "X"), + (0x10A60, "V"), + (0x10AA0, "X"), + (0x10AC0, "V"), + (0x10AE7, "X"), + (0x10AEB, "V"), + (0x10AF7, "X"), + (0x10B00, "V"), + ] + + +def _seg_56() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x10B36, "X"), + (0x10B39, "V"), + (0x10B56, "X"), + (0x10B58, "V"), + (0x10B73, "X"), + (0x10B78, "V"), + (0x10B92, "X"), + (0x10B99, "V"), + (0x10B9D, "X"), + (0x10BA9, "V"), + (0x10BB0, "X"), + (0x10C00, "V"), + (0x10C49, "X"), + (0x10C80, "M", "𐳀"), + (0x10C81, "M", "𐳁"), + (0x10C82, "M", "𐳂"), + (0x10C83, "M", "𐳃"), + (0x10C84, "M", "𐳄"), + (0x10C85, "M", "𐳅"), + (0x10C86, "M", "𐳆"), + (0x10C87, "M", "𐳇"), + (0x10C88, "M", "𐳈"), + (0x10C89, "M", "𐳉"), + (0x10C8A, "M", "𐳊"), + (0x10C8B, "M", "𐳋"), + (0x10C8C, "M", "𐳌"), + (0x10C8D, "M", "𐳍"), + (0x10C8E, "M", "𐳎"), + (0x10C8F, "M", "𐳏"), + (0x10C90, "M", "𐳐"), + (0x10C91, "M", "𐳑"), + (0x10C92, "M", "𐳒"), + (0x10C93, "M", "𐳓"), + (0x10C94, "M", "𐳔"), + (0x10C95, "M", "𐳕"), + (0x10C96, "M", "𐳖"), + (0x10C97, "M", "𐳗"), + (0x10C98, "M", "𐳘"), + (0x10C99, "M", "𐳙"), + (0x10C9A, "M", "𐳚"), + (0x10C9B, "M", "𐳛"), + (0x10C9C, "M", "𐳜"), + (0x10C9D, "M", "𐳝"), + (0x10C9E, "M", "𐳞"), + (0x10C9F, "M", "𐳟"), + (0x10CA0, "M", "𐳠"), + (0x10CA1, "M", "𐳡"), + (0x10CA2, "M", "𐳢"), + (0x10CA3, "M", "𐳣"), + (0x10CA4, "M", "𐳤"), + (0x10CA5, "M", "𐳥"), + (0x10CA6, "M", "𐳦"), + (0x10CA7, "M", "𐳧"), + (0x10CA8, "M", "𐳨"), + (0x10CA9, "M", "𐳩"), + (0x10CAA, "M", "𐳪"), + (0x10CAB, "M", "𐳫"), + (0x10CAC, "M", "𐳬"), + (0x10CAD, "M", "𐳭"), + (0x10CAE, "M", "𐳮"), + (0x10CAF, "M", "𐳯"), + (0x10CB0, "M", "𐳰"), + (0x10CB1, "M", "𐳱"), + (0x10CB2, "M", "𐳲"), + (0x10CB3, "X"), + (0x10CC0, "V"), + (0x10CF3, "X"), + (0x10CFA, "V"), + (0x10D28, "X"), + (0x10D30, "V"), + (0x10D3A, "X"), + (0x10E60, "V"), + (0x10E7F, "X"), + (0x10E80, "V"), + (0x10EAA, "X"), + (0x10EAB, "V"), + (0x10EAE, "X"), + (0x10EB0, "V"), + (0x10EB2, "X"), + (0x10EFD, "V"), + (0x10F28, "X"), + (0x10F30, "V"), + (0x10F5A, "X"), + (0x10F70, "V"), + (0x10F8A, "X"), + (0x10FB0, "V"), + (0x10FCC, "X"), + (0x10FE0, "V"), + (0x10FF7, "X"), + (0x11000, "V"), + (0x1104E, "X"), + (0x11052, "V"), + (0x11076, "X"), + (0x1107F, "V"), + (0x110BD, "X"), + (0x110BE, "V"), + (0x110C3, "X"), + (0x110D0, "V"), + (0x110E9, "X"), + (0x110F0, "V"), + ] + + +def _seg_57() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x110FA, "X"), + (0x11100, "V"), + (0x11135, "X"), + (0x11136, "V"), + (0x11148, "X"), + (0x11150, "V"), + (0x11177, "X"), + (0x11180, "V"), + (0x111E0, "X"), + (0x111E1, "V"), + (0x111F5, "X"), + (0x11200, "V"), + (0x11212, "X"), + (0x11213, "V"), + (0x11242, "X"), + (0x11280, "V"), + (0x11287, "X"), + (0x11288, "V"), + (0x11289, "X"), + (0x1128A, "V"), + (0x1128E, "X"), + (0x1128F, "V"), + (0x1129E, "X"), + (0x1129F, "V"), + (0x112AA, "X"), + (0x112B0, "V"), + (0x112EB, "X"), + (0x112F0, "V"), + (0x112FA, "X"), + (0x11300, "V"), + (0x11304, "X"), + (0x11305, "V"), + (0x1130D, "X"), + (0x1130F, "V"), + (0x11311, "X"), + (0x11313, "V"), + (0x11329, "X"), + (0x1132A, "V"), + (0x11331, "X"), + (0x11332, "V"), + (0x11334, "X"), + (0x11335, "V"), + (0x1133A, "X"), + (0x1133B, "V"), + (0x11345, "X"), + (0x11347, "V"), + (0x11349, "X"), + (0x1134B, "V"), + (0x1134E, "X"), + (0x11350, "V"), + (0x11351, "X"), + (0x11357, "V"), + (0x11358, "X"), + (0x1135D, "V"), + (0x11364, "X"), + (0x11366, "V"), + (0x1136D, "X"), + (0x11370, "V"), + (0x11375, "X"), + (0x11400, "V"), + (0x1145C, "X"), + (0x1145D, "V"), + (0x11462, "X"), + (0x11480, "V"), + (0x114C8, "X"), + (0x114D0, "V"), + (0x114DA, "X"), + (0x11580, "V"), + (0x115B6, "X"), + (0x115B8, "V"), + (0x115DE, "X"), + (0x11600, "V"), + (0x11645, "X"), + (0x11650, "V"), + (0x1165A, "X"), + (0x11660, "V"), + (0x1166D, "X"), + (0x11680, "V"), + (0x116BA, "X"), + (0x116C0, "V"), + (0x116CA, "X"), + (0x11700, "V"), + (0x1171B, "X"), + (0x1171D, "V"), + (0x1172C, "X"), + (0x11730, "V"), + (0x11747, "X"), + (0x11800, "V"), + (0x1183C, "X"), + (0x118A0, "M", "𑣀"), + (0x118A1, "M", "𑣁"), + (0x118A2, "M", "𑣂"), + (0x118A3, "M", "𑣃"), + (0x118A4, "M", "𑣄"), + (0x118A5, "M", "𑣅"), + (0x118A6, "M", "𑣆"), + (0x118A7, "M", "𑣇"), + (0x118A8, "M", "𑣈"), + (0x118A9, "M", "𑣉"), + (0x118AA, "M", "𑣊"), + ] + + +def _seg_58() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x118AB, "M", "𑣋"), + (0x118AC, "M", "𑣌"), + (0x118AD, "M", "𑣍"), + (0x118AE, "M", "𑣎"), + (0x118AF, "M", "𑣏"), + (0x118B0, "M", "𑣐"), + (0x118B1, "M", "𑣑"), + (0x118B2, "M", "𑣒"), + (0x118B3, "M", "𑣓"), + (0x118B4, "M", "𑣔"), + (0x118B5, "M", "𑣕"), + (0x118B6, "M", "𑣖"), + (0x118B7, "M", "𑣗"), + (0x118B8, "M", "𑣘"), + (0x118B9, "M", "𑣙"), + (0x118BA, "M", "𑣚"), + (0x118BB, "M", "𑣛"), + (0x118BC, "M", "𑣜"), + (0x118BD, "M", "𑣝"), + (0x118BE, "M", "𑣞"), + (0x118BF, "M", "𑣟"), + (0x118C0, "V"), + (0x118F3, "X"), + (0x118FF, "V"), + (0x11907, "X"), + (0x11909, "V"), + (0x1190A, "X"), + (0x1190C, "V"), + (0x11914, "X"), + (0x11915, "V"), + (0x11917, "X"), + (0x11918, "V"), + (0x11936, "X"), + (0x11937, "V"), + (0x11939, "X"), + (0x1193B, "V"), + (0x11947, "X"), + (0x11950, "V"), + (0x1195A, "X"), + (0x119A0, "V"), + (0x119A8, "X"), + (0x119AA, "V"), + (0x119D8, "X"), + (0x119DA, "V"), + (0x119E5, "X"), + (0x11A00, "V"), + (0x11A48, "X"), + (0x11A50, "V"), + (0x11AA3, "X"), + (0x11AB0, "V"), + (0x11AF9, "X"), + (0x11B00, "V"), + (0x11B0A, "X"), + (0x11C00, "V"), + (0x11C09, "X"), + (0x11C0A, "V"), + (0x11C37, "X"), + (0x11C38, "V"), + (0x11C46, "X"), + (0x11C50, "V"), + (0x11C6D, "X"), + (0x11C70, "V"), + (0x11C90, "X"), + (0x11C92, "V"), + (0x11CA8, "X"), + (0x11CA9, "V"), + (0x11CB7, "X"), + (0x11D00, "V"), + (0x11D07, "X"), + (0x11D08, "V"), + (0x11D0A, "X"), + (0x11D0B, "V"), + (0x11D37, "X"), + (0x11D3A, "V"), + (0x11D3B, "X"), + (0x11D3C, "V"), + (0x11D3E, "X"), + (0x11D3F, "V"), + (0x11D48, "X"), + (0x11D50, "V"), + (0x11D5A, "X"), + (0x11D60, "V"), + (0x11D66, "X"), + (0x11D67, "V"), + (0x11D69, "X"), + (0x11D6A, "V"), + (0x11D8F, "X"), + (0x11D90, "V"), + (0x11D92, "X"), + (0x11D93, "V"), + (0x11D99, "X"), + (0x11DA0, "V"), + (0x11DAA, "X"), + (0x11EE0, "V"), + (0x11EF9, "X"), + (0x11F00, "V"), + (0x11F11, "X"), + (0x11F12, "V"), + (0x11F3B, "X"), + (0x11F3E, "V"), + ] + + +def _seg_59() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x11F5A, "X"), + (0x11FB0, "V"), + (0x11FB1, "X"), + (0x11FC0, "V"), + (0x11FF2, "X"), + (0x11FFF, "V"), + (0x1239A, "X"), + (0x12400, "V"), + (0x1246F, "X"), + (0x12470, "V"), + (0x12475, "X"), + (0x12480, "V"), + (0x12544, "X"), + (0x12F90, "V"), + (0x12FF3, "X"), + (0x13000, "V"), + (0x13430, "X"), + (0x13440, "V"), + (0x13456, "X"), + (0x14400, "V"), + (0x14647, "X"), + (0x16800, "V"), + (0x16A39, "X"), + (0x16A40, "V"), + (0x16A5F, "X"), + (0x16A60, "V"), + (0x16A6A, "X"), + (0x16A6E, "V"), + (0x16ABF, "X"), + (0x16AC0, "V"), + (0x16ACA, "X"), + (0x16AD0, "V"), + (0x16AEE, "X"), + (0x16AF0, "V"), + (0x16AF6, "X"), + (0x16B00, "V"), + (0x16B46, "X"), + (0x16B50, "V"), + (0x16B5A, "X"), + (0x16B5B, "V"), + (0x16B62, "X"), + (0x16B63, "V"), + (0x16B78, "X"), + (0x16B7D, "V"), + (0x16B90, "X"), + (0x16E40, "M", "𖹠"), + (0x16E41, "M", "𖹡"), + (0x16E42, "M", "𖹢"), + (0x16E43, "M", "𖹣"), + (0x16E44, "M", "𖹤"), + (0x16E45, "M", "𖹥"), + (0x16E46, "M", "𖹦"), + (0x16E47, "M", "𖹧"), + (0x16E48, "M", "𖹨"), + (0x16E49, "M", "𖹩"), + (0x16E4A, "M", "𖹪"), + (0x16E4B, "M", "𖹫"), + (0x16E4C, "M", "𖹬"), + (0x16E4D, "M", "𖹭"), + (0x16E4E, "M", "𖹮"), + (0x16E4F, "M", "𖹯"), + (0x16E50, "M", "𖹰"), + (0x16E51, "M", "𖹱"), + (0x16E52, "M", "𖹲"), + (0x16E53, "M", "𖹳"), + (0x16E54, "M", "𖹴"), + (0x16E55, "M", "𖹵"), + (0x16E56, "M", "𖹶"), + (0x16E57, "M", "𖹷"), + (0x16E58, "M", "𖹸"), + (0x16E59, "M", "𖹹"), + (0x16E5A, "M", "𖹺"), + (0x16E5B, "M", "𖹻"), + (0x16E5C, "M", "𖹼"), + (0x16E5D, "M", "𖹽"), + (0x16E5E, "M", "𖹾"), + (0x16E5F, "M", "𖹿"), + (0x16E60, "V"), + (0x16E9B, "X"), + (0x16F00, "V"), + (0x16F4B, "X"), + (0x16F4F, "V"), + (0x16F88, "X"), + (0x16F8F, "V"), + (0x16FA0, "X"), + (0x16FE0, "V"), + (0x16FE5, "X"), + (0x16FF0, "V"), + (0x16FF2, "X"), + (0x17000, "V"), + (0x187F8, "X"), + (0x18800, "V"), + (0x18CD6, "X"), + (0x18D00, "V"), + (0x18D09, "X"), + (0x1AFF0, "V"), + (0x1AFF4, "X"), + (0x1AFF5, "V"), + (0x1AFFC, "X"), + (0x1AFFD, "V"), + ] + + +def _seg_60() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1AFFF, "X"), + (0x1B000, "V"), + (0x1B123, "X"), + (0x1B132, "V"), + (0x1B133, "X"), + (0x1B150, "V"), + (0x1B153, "X"), + (0x1B155, "V"), + (0x1B156, "X"), + (0x1B164, "V"), + (0x1B168, "X"), + (0x1B170, "V"), + (0x1B2FC, "X"), + (0x1BC00, "V"), + (0x1BC6B, "X"), + (0x1BC70, "V"), + (0x1BC7D, "X"), + (0x1BC80, "V"), + (0x1BC89, "X"), + (0x1BC90, "V"), + (0x1BC9A, "X"), + (0x1BC9C, "V"), + (0x1BCA0, "I"), + (0x1BCA4, "X"), + (0x1CF00, "V"), + (0x1CF2E, "X"), + (0x1CF30, "V"), + (0x1CF47, "X"), + (0x1CF50, "V"), + (0x1CFC4, "X"), + (0x1D000, "V"), + (0x1D0F6, "X"), + (0x1D100, "V"), + (0x1D127, "X"), + (0x1D129, "V"), + (0x1D15E, "M", "𝅗𝅥"), + (0x1D15F, "M", "𝅘𝅥"), + (0x1D160, "M", "𝅘𝅥𝅮"), + (0x1D161, "M", "𝅘𝅥𝅯"), + (0x1D162, "M", "𝅘𝅥𝅰"), + (0x1D163, "M", "𝅘𝅥𝅱"), + (0x1D164, "M", "𝅘𝅥𝅲"), + (0x1D165, "V"), + (0x1D173, "X"), + (0x1D17B, "V"), + (0x1D1BB, "M", "𝆹𝅥"), + (0x1D1BC, "M", "𝆺𝅥"), + (0x1D1BD, "M", "𝆹𝅥𝅮"), + (0x1D1BE, "M", "𝆺𝅥𝅮"), + (0x1D1BF, "M", "𝆹𝅥𝅯"), + (0x1D1C0, "M", "𝆺𝅥𝅯"), + (0x1D1C1, "V"), + (0x1D1EB, "X"), + (0x1D200, "V"), + (0x1D246, "X"), + (0x1D2C0, "V"), + (0x1D2D4, "X"), + (0x1D2E0, "V"), + (0x1D2F4, "X"), + (0x1D300, "V"), + (0x1D357, "X"), + (0x1D360, "V"), + (0x1D379, "X"), + (0x1D400, "M", "a"), + (0x1D401, "M", "b"), + (0x1D402, "M", "c"), + (0x1D403, "M", "d"), + (0x1D404, "M", "e"), + (0x1D405, "M", "f"), + (0x1D406, "M", "g"), + (0x1D407, "M", "h"), + (0x1D408, "M", "i"), + (0x1D409, "M", "j"), + (0x1D40A, "M", "k"), + (0x1D40B, "M", "l"), + (0x1D40C, "M", "m"), + (0x1D40D, "M", "n"), + (0x1D40E, "M", "o"), + (0x1D40F, "M", "p"), + (0x1D410, "M", "q"), + (0x1D411, "M", "r"), + (0x1D412, "M", "s"), + (0x1D413, "M", "t"), + (0x1D414, "M", "u"), + (0x1D415, "M", "v"), + (0x1D416, "M", "w"), + (0x1D417, "M", "x"), + (0x1D418, "M", "y"), + (0x1D419, "M", "z"), + (0x1D41A, "M", "a"), + (0x1D41B, "M", "b"), + (0x1D41C, "M", "c"), + (0x1D41D, "M", "d"), + (0x1D41E, "M", "e"), + (0x1D41F, "M", "f"), + (0x1D420, "M", "g"), + (0x1D421, "M", "h"), + (0x1D422, "M", "i"), + (0x1D423, "M", "j"), + (0x1D424, "M", "k"), + ] + + +def _seg_61() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1D425, "M", "l"), + (0x1D426, "M", "m"), + (0x1D427, "M", "n"), + (0x1D428, "M", "o"), + (0x1D429, "M", "p"), + (0x1D42A, "M", "q"), + (0x1D42B, "M", "r"), + (0x1D42C, "M", "s"), + (0x1D42D, "M", "t"), + (0x1D42E, "M", "u"), + (0x1D42F, "M", "v"), + (0x1D430, "M", "w"), + (0x1D431, "M", "x"), + (0x1D432, "M", "y"), + (0x1D433, "M", "z"), + (0x1D434, "M", "a"), + (0x1D435, "M", "b"), + (0x1D436, "M", "c"), + (0x1D437, "M", "d"), + (0x1D438, "M", "e"), + (0x1D439, "M", "f"), + (0x1D43A, "M", "g"), + (0x1D43B, "M", "h"), + (0x1D43C, "M", "i"), + (0x1D43D, "M", "j"), + (0x1D43E, "M", "k"), + (0x1D43F, "M", "l"), + (0x1D440, "M", "m"), + (0x1D441, "M", "n"), + (0x1D442, "M", "o"), + (0x1D443, "M", "p"), + (0x1D444, "M", "q"), + (0x1D445, "M", "r"), + (0x1D446, "M", "s"), + (0x1D447, "M", "t"), + (0x1D448, "M", "u"), + (0x1D449, "M", "v"), + (0x1D44A, "M", "w"), + (0x1D44B, "M", "x"), + (0x1D44C, "M", "y"), + (0x1D44D, "M", "z"), + (0x1D44E, "M", "a"), + (0x1D44F, "M", "b"), + (0x1D450, "M", "c"), + (0x1D451, "M", "d"), + (0x1D452, "M", "e"), + (0x1D453, "M", "f"), + (0x1D454, "M", "g"), + (0x1D455, "X"), + (0x1D456, "M", "i"), + (0x1D457, "M", "j"), + (0x1D458, "M", "k"), + (0x1D459, "M", "l"), + (0x1D45A, "M", "m"), + (0x1D45B, "M", "n"), + (0x1D45C, "M", "o"), + (0x1D45D, "M", "p"), + (0x1D45E, "M", "q"), + (0x1D45F, "M", "r"), + (0x1D460, "M", "s"), + (0x1D461, "M", "t"), + (0x1D462, "M", "u"), + (0x1D463, "M", "v"), + (0x1D464, "M", "w"), + (0x1D465, "M", "x"), + (0x1D466, "M", "y"), + (0x1D467, "M", "z"), + (0x1D468, "M", "a"), + (0x1D469, "M", "b"), + (0x1D46A, "M", "c"), + (0x1D46B, "M", "d"), + (0x1D46C, "M", "e"), + (0x1D46D, "M", "f"), + (0x1D46E, "M", "g"), + (0x1D46F, "M", "h"), + (0x1D470, "M", "i"), + (0x1D471, "M", "j"), + (0x1D472, "M", "k"), + (0x1D473, "M", "l"), + (0x1D474, "M", "m"), + (0x1D475, "M", "n"), + (0x1D476, "M", "o"), + (0x1D477, "M", "p"), + (0x1D478, "M", "q"), + (0x1D479, "M", "r"), + (0x1D47A, "M", "s"), + (0x1D47B, "M", "t"), + (0x1D47C, "M", "u"), + (0x1D47D, "M", "v"), + (0x1D47E, "M", "w"), + (0x1D47F, "M", "x"), + (0x1D480, "M", "y"), + (0x1D481, "M", "z"), + (0x1D482, "M", "a"), + (0x1D483, "M", "b"), + (0x1D484, "M", "c"), + (0x1D485, "M", "d"), + (0x1D486, "M", "e"), + (0x1D487, "M", "f"), + (0x1D488, "M", "g"), + ] + + +def _seg_62() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1D489, "M", "h"), + (0x1D48A, "M", "i"), + (0x1D48B, "M", "j"), + (0x1D48C, "M", "k"), + (0x1D48D, "M", "l"), + (0x1D48E, "M", "m"), + (0x1D48F, "M", "n"), + (0x1D490, "M", "o"), + (0x1D491, "M", "p"), + (0x1D492, "M", "q"), + (0x1D493, "M", "r"), + (0x1D494, "M", "s"), + (0x1D495, "M", "t"), + (0x1D496, "M", "u"), + (0x1D497, "M", "v"), + (0x1D498, "M", "w"), + (0x1D499, "M", "x"), + (0x1D49A, "M", "y"), + (0x1D49B, "M", "z"), + (0x1D49C, "M", "a"), + (0x1D49D, "X"), + (0x1D49E, "M", "c"), + (0x1D49F, "M", "d"), + (0x1D4A0, "X"), + (0x1D4A2, "M", "g"), + (0x1D4A3, "X"), + (0x1D4A5, "M", "j"), + (0x1D4A6, "M", "k"), + (0x1D4A7, "X"), + (0x1D4A9, "M", "n"), + (0x1D4AA, "M", "o"), + (0x1D4AB, "M", "p"), + (0x1D4AC, "M", "q"), + (0x1D4AD, "X"), + (0x1D4AE, "M", "s"), + (0x1D4AF, "M", "t"), + (0x1D4B0, "M", "u"), + (0x1D4B1, "M", "v"), + (0x1D4B2, "M", "w"), + (0x1D4B3, "M", "x"), + (0x1D4B4, "M", "y"), + (0x1D4B5, "M", "z"), + (0x1D4B6, "M", "a"), + (0x1D4B7, "M", "b"), + (0x1D4B8, "M", "c"), + (0x1D4B9, "M", "d"), + (0x1D4BA, "X"), + (0x1D4BB, "M", "f"), + (0x1D4BC, "X"), + (0x1D4BD, "M", "h"), + (0x1D4BE, "M", "i"), + (0x1D4BF, "M", "j"), + (0x1D4C0, "M", "k"), + (0x1D4C1, "M", "l"), + (0x1D4C2, "M", "m"), + (0x1D4C3, "M", "n"), + (0x1D4C4, "X"), + (0x1D4C5, "M", "p"), + (0x1D4C6, "M", "q"), + (0x1D4C7, "M", "r"), + (0x1D4C8, "M", "s"), + (0x1D4C9, "M", "t"), + (0x1D4CA, "M", "u"), + (0x1D4CB, "M", "v"), + (0x1D4CC, "M", "w"), + (0x1D4CD, "M", "x"), + (0x1D4CE, "M", "y"), + (0x1D4CF, "M", "z"), + (0x1D4D0, "M", "a"), + (0x1D4D1, "M", "b"), + (0x1D4D2, "M", "c"), + (0x1D4D3, "M", "d"), + (0x1D4D4, "M", "e"), + (0x1D4D5, "M", "f"), + (0x1D4D6, "M", "g"), + (0x1D4D7, "M", "h"), + (0x1D4D8, "M", "i"), + (0x1D4D9, "M", "j"), + (0x1D4DA, "M", "k"), + (0x1D4DB, "M", "l"), + (0x1D4DC, "M", "m"), + (0x1D4DD, "M", "n"), + (0x1D4DE, "M", "o"), + (0x1D4DF, "M", "p"), + (0x1D4E0, "M", "q"), + (0x1D4E1, "M", "r"), + (0x1D4E2, "M", "s"), + (0x1D4E3, "M", "t"), + (0x1D4E4, "M", "u"), + (0x1D4E5, "M", "v"), + (0x1D4E6, "M", "w"), + (0x1D4E7, "M", "x"), + (0x1D4E8, "M", "y"), + (0x1D4E9, "M", "z"), + (0x1D4EA, "M", "a"), + (0x1D4EB, "M", "b"), + (0x1D4EC, "M", "c"), + (0x1D4ED, "M", "d"), + (0x1D4EE, "M", "e"), + (0x1D4EF, "M", "f"), + ] + + +def _seg_63() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1D4F0, "M", "g"), + (0x1D4F1, "M", "h"), + (0x1D4F2, "M", "i"), + (0x1D4F3, "M", "j"), + (0x1D4F4, "M", "k"), + (0x1D4F5, "M", "l"), + (0x1D4F6, "M", "m"), + (0x1D4F7, "M", "n"), + (0x1D4F8, "M", "o"), + (0x1D4F9, "M", "p"), + (0x1D4FA, "M", "q"), + (0x1D4FB, "M", "r"), + (0x1D4FC, "M", "s"), + (0x1D4FD, "M", "t"), + (0x1D4FE, "M", "u"), + (0x1D4FF, "M", "v"), + (0x1D500, "M", "w"), + (0x1D501, "M", "x"), + (0x1D502, "M", "y"), + (0x1D503, "M", "z"), + (0x1D504, "M", "a"), + (0x1D505, "M", "b"), + (0x1D506, "X"), + (0x1D507, "M", "d"), + (0x1D508, "M", "e"), + (0x1D509, "M", "f"), + (0x1D50A, "M", "g"), + (0x1D50B, "X"), + (0x1D50D, "M", "j"), + (0x1D50E, "M", "k"), + (0x1D50F, "M", "l"), + (0x1D510, "M", "m"), + (0x1D511, "M", "n"), + (0x1D512, "M", "o"), + (0x1D513, "M", "p"), + (0x1D514, "M", "q"), + (0x1D515, "X"), + (0x1D516, "M", "s"), + (0x1D517, "M", "t"), + (0x1D518, "M", "u"), + (0x1D519, "M", "v"), + (0x1D51A, "M", "w"), + (0x1D51B, "M", "x"), + (0x1D51C, "M", "y"), + (0x1D51D, "X"), + (0x1D51E, "M", "a"), + (0x1D51F, "M", "b"), + (0x1D520, "M", "c"), + (0x1D521, "M", "d"), + (0x1D522, "M", "e"), + (0x1D523, "M", "f"), + (0x1D524, "M", "g"), + (0x1D525, "M", "h"), + (0x1D526, "M", "i"), + (0x1D527, "M", "j"), + (0x1D528, "M", "k"), + (0x1D529, "M", "l"), + (0x1D52A, "M", "m"), + (0x1D52B, "M", "n"), + (0x1D52C, "M", "o"), + (0x1D52D, "M", "p"), + (0x1D52E, "M", "q"), + (0x1D52F, "M", "r"), + (0x1D530, "M", "s"), + (0x1D531, "M", "t"), + (0x1D532, "M", "u"), + (0x1D533, "M", "v"), + (0x1D534, "M", "w"), + (0x1D535, "M", "x"), + (0x1D536, "M", "y"), + (0x1D537, "M", "z"), + (0x1D538, "M", "a"), + (0x1D539, "M", "b"), + (0x1D53A, "X"), + (0x1D53B, "M", "d"), + (0x1D53C, "M", "e"), + (0x1D53D, "M", "f"), + (0x1D53E, "M", "g"), + (0x1D53F, "X"), + (0x1D540, "M", "i"), + (0x1D541, "M", "j"), + (0x1D542, "M", "k"), + (0x1D543, "M", "l"), + (0x1D544, "M", "m"), + (0x1D545, "X"), + (0x1D546, "M", "o"), + (0x1D547, "X"), + (0x1D54A, "M", "s"), + (0x1D54B, "M", "t"), + (0x1D54C, "M", "u"), + (0x1D54D, "M", "v"), + (0x1D54E, "M", "w"), + (0x1D54F, "M", "x"), + (0x1D550, "M", "y"), + (0x1D551, "X"), + (0x1D552, "M", "a"), + (0x1D553, "M", "b"), + (0x1D554, "M", "c"), + (0x1D555, "M", "d"), + (0x1D556, "M", "e"), + ] + + +def _seg_64() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1D557, "M", "f"), + (0x1D558, "M", "g"), + (0x1D559, "M", "h"), + (0x1D55A, "M", "i"), + (0x1D55B, "M", "j"), + (0x1D55C, "M", "k"), + (0x1D55D, "M", "l"), + (0x1D55E, "M", "m"), + (0x1D55F, "M", "n"), + (0x1D560, "M", "o"), + (0x1D561, "M", "p"), + (0x1D562, "M", "q"), + (0x1D563, "M", "r"), + (0x1D564, "M", "s"), + (0x1D565, "M", "t"), + (0x1D566, "M", "u"), + (0x1D567, "M", "v"), + (0x1D568, "M", "w"), + (0x1D569, "M", "x"), + (0x1D56A, "M", "y"), + (0x1D56B, "M", "z"), + (0x1D56C, "M", "a"), + (0x1D56D, "M", "b"), + (0x1D56E, "M", "c"), + (0x1D56F, "M", "d"), + (0x1D570, "M", "e"), + (0x1D571, "M", "f"), + (0x1D572, "M", "g"), + (0x1D573, "M", "h"), + (0x1D574, "M", "i"), + (0x1D575, "M", "j"), + (0x1D576, "M", "k"), + (0x1D577, "M", "l"), + (0x1D578, "M", "m"), + (0x1D579, "M", "n"), + (0x1D57A, "M", "o"), + (0x1D57B, "M", "p"), + (0x1D57C, "M", "q"), + (0x1D57D, "M", "r"), + (0x1D57E, "M", "s"), + (0x1D57F, "M", "t"), + (0x1D580, "M", "u"), + (0x1D581, "M", "v"), + (0x1D582, "M", "w"), + (0x1D583, "M", "x"), + (0x1D584, "M", "y"), + (0x1D585, "M", "z"), + (0x1D586, "M", "a"), + (0x1D587, "M", "b"), + (0x1D588, "M", "c"), + (0x1D589, "M", "d"), + (0x1D58A, "M", "e"), + (0x1D58B, "M", "f"), + (0x1D58C, "M", "g"), + (0x1D58D, "M", "h"), + (0x1D58E, "M", "i"), + (0x1D58F, "M", "j"), + (0x1D590, "M", "k"), + (0x1D591, "M", "l"), + (0x1D592, "M", "m"), + (0x1D593, "M", "n"), + (0x1D594, "M", "o"), + (0x1D595, "M", "p"), + (0x1D596, "M", "q"), + (0x1D597, "M", "r"), + (0x1D598, "M", "s"), + (0x1D599, "M", "t"), + (0x1D59A, "M", "u"), + (0x1D59B, "M", "v"), + (0x1D59C, "M", "w"), + (0x1D59D, "M", "x"), + (0x1D59E, "M", "y"), + (0x1D59F, "M", "z"), + (0x1D5A0, "M", "a"), + (0x1D5A1, "M", "b"), + (0x1D5A2, "M", "c"), + (0x1D5A3, "M", "d"), + (0x1D5A4, "M", "e"), + (0x1D5A5, "M", "f"), + (0x1D5A6, "M", "g"), + (0x1D5A7, "M", "h"), + (0x1D5A8, "M", "i"), + (0x1D5A9, "M", "j"), + (0x1D5AA, "M", "k"), + (0x1D5AB, "M", "l"), + (0x1D5AC, "M", "m"), + (0x1D5AD, "M", "n"), + (0x1D5AE, "M", "o"), + (0x1D5AF, "M", "p"), + (0x1D5B0, "M", "q"), + (0x1D5B1, "M", "r"), + (0x1D5B2, "M", "s"), + (0x1D5B3, "M", "t"), + (0x1D5B4, "M", "u"), + (0x1D5B5, "M", "v"), + (0x1D5B6, "M", "w"), + (0x1D5B7, "M", "x"), + (0x1D5B8, "M", "y"), + (0x1D5B9, "M", "z"), + (0x1D5BA, "M", "a"), + ] + + +def _seg_65() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1D5BB, "M", "b"), + (0x1D5BC, "M", "c"), + (0x1D5BD, "M", "d"), + (0x1D5BE, "M", "e"), + (0x1D5BF, "M", "f"), + (0x1D5C0, "M", "g"), + (0x1D5C1, "M", "h"), + (0x1D5C2, "M", "i"), + (0x1D5C3, "M", "j"), + (0x1D5C4, "M", "k"), + (0x1D5C5, "M", "l"), + (0x1D5C6, "M", "m"), + (0x1D5C7, "M", "n"), + (0x1D5C8, "M", "o"), + (0x1D5C9, "M", "p"), + (0x1D5CA, "M", "q"), + (0x1D5CB, "M", "r"), + (0x1D5CC, "M", "s"), + (0x1D5CD, "M", "t"), + (0x1D5CE, "M", "u"), + (0x1D5CF, "M", "v"), + (0x1D5D0, "M", "w"), + (0x1D5D1, "M", "x"), + (0x1D5D2, "M", "y"), + (0x1D5D3, "M", "z"), + (0x1D5D4, "M", "a"), + (0x1D5D5, "M", "b"), + (0x1D5D6, "M", "c"), + (0x1D5D7, "M", "d"), + (0x1D5D8, "M", "e"), + (0x1D5D9, "M", "f"), + (0x1D5DA, "M", "g"), + (0x1D5DB, "M", "h"), + (0x1D5DC, "M", "i"), + (0x1D5DD, "M", "j"), + (0x1D5DE, "M", "k"), + (0x1D5DF, "M", "l"), + (0x1D5E0, "M", "m"), + (0x1D5E1, "M", "n"), + (0x1D5E2, "M", "o"), + (0x1D5E3, "M", "p"), + (0x1D5E4, "M", "q"), + (0x1D5E5, "M", "r"), + (0x1D5E6, "M", "s"), + (0x1D5E7, "M", "t"), + (0x1D5E8, "M", "u"), + (0x1D5E9, "M", "v"), + (0x1D5EA, "M", "w"), + (0x1D5EB, "M", "x"), + (0x1D5EC, "M", "y"), + (0x1D5ED, "M", "z"), + (0x1D5EE, "M", "a"), + (0x1D5EF, "M", "b"), + (0x1D5F0, "M", "c"), + (0x1D5F1, "M", "d"), + (0x1D5F2, "M", "e"), + (0x1D5F3, "M", "f"), + (0x1D5F4, "M", "g"), + (0x1D5F5, "M", "h"), + (0x1D5F6, "M", "i"), + (0x1D5F7, "M", "j"), + (0x1D5F8, "M", "k"), + (0x1D5F9, "M", "l"), + (0x1D5FA, "M", "m"), + (0x1D5FB, "M", "n"), + (0x1D5FC, "M", "o"), + (0x1D5FD, "M", "p"), + (0x1D5FE, "M", "q"), + (0x1D5FF, "M", "r"), + (0x1D600, "M", "s"), + (0x1D601, "M", "t"), + (0x1D602, "M", "u"), + (0x1D603, "M", "v"), + (0x1D604, "M", "w"), + (0x1D605, "M", "x"), + (0x1D606, "M", "y"), + (0x1D607, "M", "z"), + (0x1D608, "M", "a"), + (0x1D609, "M", "b"), + (0x1D60A, "M", "c"), + (0x1D60B, "M", "d"), + (0x1D60C, "M", "e"), + (0x1D60D, "M", "f"), + (0x1D60E, "M", "g"), + (0x1D60F, "M", "h"), + (0x1D610, "M", "i"), + (0x1D611, "M", "j"), + (0x1D612, "M", "k"), + (0x1D613, "M", "l"), + (0x1D614, "M", "m"), + (0x1D615, "M", "n"), + (0x1D616, "M", "o"), + (0x1D617, "M", "p"), + (0x1D618, "M", "q"), + (0x1D619, "M", "r"), + (0x1D61A, "M", "s"), + (0x1D61B, "M", "t"), + (0x1D61C, "M", "u"), + (0x1D61D, "M", "v"), + (0x1D61E, "M", "w"), + ] + + +def _seg_66() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1D61F, "M", "x"), + (0x1D620, "M", "y"), + (0x1D621, "M", "z"), + (0x1D622, "M", "a"), + (0x1D623, "M", "b"), + (0x1D624, "M", "c"), + (0x1D625, "M", "d"), + (0x1D626, "M", "e"), + (0x1D627, "M", "f"), + (0x1D628, "M", "g"), + (0x1D629, "M", "h"), + (0x1D62A, "M", "i"), + (0x1D62B, "M", "j"), + (0x1D62C, "M", "k"), + (0x1D62D, "M", "l"), + (0x1D62E, "M", "m"), + (0x1D62F, "M", "n"), + (0x1D630, "M", "o"), + (0x1D631, "M", "p"), + (0x1D632, "M", "q"), + (0x1D633, "M", "r"), + (0x1D634, "M", "s"), + (0x1D635, "M", "t"), + (0x1D636, "M", "u"), + (0x1D637, "M", "v"), + (0x1D638, "M", "w"), + (0x1D639, "M", "x"), + (0x1D63A, "M", "y"), + (0x1D63B, "M", "z"), + (0x1D63C, "M", "a"), + (0x1D63D, "M", "b"), + (0x1D63E, "M", "c"), + (0x1D63F, "M", "d"), + (0x1D640, "M", "e"), + (0x1D641, "M", "f"), + (0x1D642, "M", "g"), + (0x1D643, "M", "h"), + (0x1D644, "M", "i"), + (0x1D645, "M", "j"), + (0x1D646, "M", "k"), + (0x1D647, "M", "l"), + (0x1D648, "M", "m"), + (0x1D649, "M", "n"), + (0x1D64A, "M", "o"), + (0x1D64B, "M", "p"), + (0x1D64C, "M", "q"), + (0x1D64D, "M", "r"), + (0x1D64E, "M", "s"), + (0x1D64F, "M", "t"), + (0x1D650, "M", "u"), + (0x1D651, "M", "v"), + (0x1D652, "M", "w"), + (0x1D653, "M", "x"), + (0x1D654, "M", "y"), + (0x1D655, "M", "z"), + (0x1D656, "M", "a"), + (0x1D657, "M", "b"), + (0x1D658, "M", "c"), + (0x1D659, "M", "d"), + (0x1D65A, "M", "e"), + (0x1D65B, "M", "f"), + (0x1D65C, "M", "g"), + (0x1D65D, "M", "h"), + (0x1D65E, "M", "i"), + (0x1D65F, "M", "j"), + (0x1D660, "M", "k"), + (0x1D661, "M", "l"), + (0x1D662, "M", "m"), + (0x1D663, "M", "n"), + (0x1D664, "M", "o"), + (0x1D665, "M", "p"), + (0x1D666, "M", "q"), + (0x1D667, "M", "r"), + (0x1D668, "M", "s"), + (0x1D669, "M", "t"), + (0x1D66A, "M", "u"), + (0x1D66B, "M", "v"), + (0x1D66C, "M", "w"), + (0x1D66D, "M", "x"), + (0x1D66E, "M", "y"), + (0x1D66F, "M", "z"), + (0x1D670, "M", "a"), + (0x1D671, "M", "b"), + (0x1D672, "M", "c"), + (0x1D673, "M", "d"), + (0x1D674, "M", "e"), + (0x1D675, "M", "f"), + (0x1D676, "M", "g"), + (0x1D677, "M", "h"), + (0x1D678, "M", "i"), + (0x1D679, "M", "j"), + (0x1D67A, "M", "k"), + (0x1D67B, "M", "l"), + (0x1D67C, "M", "m"), + (0x1D67D, "M", "n"), + (0x1D67E, "M", "o"), + (0x1D67F, "M", "p"), + (0x1D680, "M", "q"), + (0x1D681, "M", "r"), + (0x1D682, "M", "s"), + ] + + +def _seg_67() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1D683, "M", "t"), + (0x1D684, "M", "u"), + (0x1D685, "M", "v"), + (0x1D686, "M", "w"), + (0x1D687, "M", "x"), + (0x1D688, "M", "y"), + (0x1D689, "M", "z"), + (0x1D68A, "M", "a"), + (0x1D68B, "M", "b"), + (0x1D68C, "M", "c"), + (0x1D68D, "M", "d"), + (0x1D68E, "M", "e"), + (0x1D68F, "M", "f"), + (0x1D690, "M", "g"), + (0x1D691, "M", "h"), + (0x1D692, "M", "i"), + (0x1D693, "M", "j"), + (0x1D694, "M", "k"), + (0x1D695, "M", "l"), + (0x1D696, "M", "m"), + (0x1D697, "M", "n"), + (0x1D698, "M", "o"), + (0x1D699, "M", "p"), + (0x1D69A, "M", "q"), + (0x1D69B, "M", "r"), + (0x1D69C, "M", "s"), + (0x1D69D, "M", "t"), + (0x1D69E, "M", "u"), + (0x1D69F, "M", "v"), + (0x1D6A0, "M", "w"), + (0x1D6A1, "M", "x"), + (0x1D6A2, "M", "y"), + (0x1D6A3, "M", "z"), + (0x1D6A4, "M", "ı"), + (0x1D6A5, "M", "ȷ"), + (0x1D6A6, "X"), + (0x1D6A8, "M", "α"), + (0x1D6A9, "M", "β"), + (0x1D6AA, "M", "γ"), + (0x1D6AB, "M", "δ"), + (0x1D6AC, "M", "ε"), + (0x1D6AD, "M", "ζ"), + (0x1D6AE, "M", "η"), + (0x1D6AF, "M", "θ"), + (0x1D6B0, "M", "ι"), + (0x1D6B1, "M", "κ"), + (0x1D6B2, "M", "λ"), + (0x1D6B3, "M", "μ"), + (0x1D6B4, "M", "ν"), + (0x1D6B5, "M", "ξ"), + (0x1D6B6, "M", "ο"), + (0x1D6B7, "M", "π"), + (0x1D6B8, "M", "ρ"), + (0x1D6B9, "M", "θ"), + (0x1D6BA, "M", "σ"), + (0x1D6BB, "M", "τ"), + (0x1D6BC, "M", "υ"), + (0x1D6BD, "M", "φ"), + (0x1D6BE, "M", "χ"), + (0x1D6BF, "M", "ψ"), + (0x1D6C0, "M", "ω"), + (0x1D6C1, "M", "∇"), + (0x1D6C2, "M", "α"), + (0x1D6C3, "M", "β"), + (0x1D6C4, "M", "γ"), + (0x1D6C5, "M", "δ"), + (0x1D6C6, "M", "ε"), + (0x1D6C7, "M", "ζ"), + (0x1D6C8, "M", "η"), + (0x1D6C9, "M", "θ"), + (0x1D6CA, "M", "ι"), + (0x1D6CB, "M", "κ"), + (0x1D6CC, "M", "λ"), + (0x1D6CD, "M", "μ"), + (0x1D6CE, "M", "ν"), + (0x1D6CF, "M", "ξ"), + (0x1D6D0, "M", "ο"), + (0x1D6D1, "M", "π"), + (0x1D6D2, "M", "ρ"), + (0x1D6D3, "M", "σ"), + (0x1D6D5, "M", "τ"), + (0x1D6D6, "M", "υ"), + (0x1D6D7, "M", "φ"), + (0x1D6D8, "M", "χ"), + (0x1D6D9, "M", "ψ"), + (0x1D6DA, "M", "ω"), + (0x1D6DB, "M", "∂"), + (0x1D6DC, "M", "ε"), + (0x1D6DD, "M", "θ"), + (0x1D6DE, "M", "κ"), + (0x1D6DF, "M", "φ"), + (0x1D6E0, "M", "ρ"), + (0x1D6E1, "M", "π"), + (0x1D6E2, "M", "α"), + (0x1D6E3, "M", "β"), + (0x1D6E4, "M", "γ"), + (0x1D6E5, "M", "δ"), + (0x1D6E6, "M", "ε"), + (0x1D6E7, "M", "ζ"), + (0x1D6E8, "M", "η"), + ] + + +def _seg_68() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1D6E9, "M", "θ"), + (0x1D6EA, "M", "ι"), + (0x1D6EB, "M", "κ"), + (0x1D6EC, "M", "λ"), + (0x1D6ED, "M", "μ"), + (0x1D6EE, "M", "ν"), + (0x1D6EF, "M", "ξ"), + (0x1D6F0, "M", "ο"), + (0x1D6F1, "M", "π"), + (0x1D6F2, "M", "ρ"), + (0x1D6F3, "M", "θ"), + (0x1D6F4, "M", "σ"), + (0x1D6F5, "M", "τ"), + (0x1D6F6, "M", "υ"), + (0x1D6F7, "M", "φ"), + (0x1D6F8, "M", "χ"), + (0x1D6F9, "M", "ψ"), + (0x1D6FA, "M", "ω"), + (0x1D6FB, "M", "∇"), + (0x1D6FC, "M", "α"), + (0x1D6FD, "M", "β"), + (0x1D6FE, "M", "γ"), + (0x1D6FF, "M", "δ"), + (0x1D700, "M", "ε"), + (0x1D701, "M", "ζ"), + (0x1D702, "M", "η"), + (0x1D703, "M", "θ"), + (0x1D704, "M", "ι"), + (0x1D705, "M", "κ"), + (0x1D706, "M", "λ"), + (0x1D707, "M", "μ"), + (0x1D708, "M", "ν"), + (0x1D709, "M", "ξ"), + (0x1D70A, "M", "ο"), + (0x1D70B, "M", "π"), + (0x1D70C, "M", "ρ"), + (0x1D70D, "M", "σ"), + (0x1D70F, "M", "τ"), + (0x1D710, "M", "υ"), + (0x1D711, "M", "φ"), + (0x1D712, "M", "χ"), + (0x1D713, "M", "ψ"), + (0x1D714, "M", "ω"), + (0x1D715, "M", "∂"), + (0x1D716, "M", "ε"), + (0x1D717, "M", "θ"), + (0x1D718, "M", "κ"), + (0x1D719, "M", "φ"), + (0x1D71A, "M", "ρ"), + (0x1D71B, "M", "π"), + (0x1D71C, "M", "α"), + (0x1D71D, "M", "β"), + (0x1D71E, "M", "γ"), + (0x1D71F, "M", "δ"), + (0x1D720, "M", "ε"), + (0x1D721, "M", "ζ"), + (0x1D722, "M", "η"), + (0x1D723, "M", "θ"), + (0x1D724, "M", "ι"), + (0x1D725, "M", "κ"), + (0x1D726, "M", "λ"), + (0x1D727, "M", "μ"), + (0x1D728, "M", "ν"), + (0x1D729, "M", "ξ"), + (0x1D72A, "M", "ο"), + (0x1D72B, "M", "π"), + (0x1D72C, "M", "ρ"), + (0x1D72D, "M", "θ"), + (0x1D72E, "M", "σ"), + (0x1D72F, "M", "τ"), + (0x1D730, "M", "υ"), + (0x1D731, "M", "φ"), + (0x1D732, "M", "χ"), + (0x1D733, "M", "ψ"), + (0x1D734, "M", "ω"), + (0x1D735, "M", "∇"), + (0x1D736, "M", "α"), + (0x1D737, "M", "β"), + (0x1D738, "M", "γ"), + (0x1D739, "M", "δ"), + (0x1D73A, "M", "ε"), + (0x1D73B, "M", "ζ"), + (0x1D73C, "M", "η"), + (0x1D73D, "M", "θ"), + (0x1D73E, "M", "ι"), + (0x1D73F, "M", "κ"), + (0x1D740, "M", "λ"), + (0x1D741, "M", "μ"), + (0x1D742, "M", "ν"), + (0x1D743, "M", "ξ"), + (0x1D744, "M", "ο"), + (0x1D745, "M", "π"), + (0x1D746, "M", "ρ"), + (0x1D747, "M", "σ"), + (0x1D749, "M", "τ"), + (0x1D74A, "M", "υ"), + (0x1D74B, "M", "φ"), + (0x1D74C, "M", "χ"), + (0x1D74D, "M", "ψ"), + (0x1D74E, "M", "ω"), + ] + + +def _seg_69() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1D74F, "M", "∂"), + (0x1D750, "M", "ε"), + (0x1D751, "M", "θ"), + (0x1D752, "M", "κ"), + (0x1D753, "M", "φ"), + (0x1D754, "M", "ρ"), + (0x1D755, "M", "π"), + (0x1D756, "M", "α"), + (0x1D757, "M", "β"), + (0x1D758, "M", "γ"), + (0x1D759, "M", "δ"), + (0x1D75A, "M", "ε"), + (0x1D75B, "M", "ζ"), + (0x1D75C, "M", "η"), + (0x1D75D, "M", "θ"), + (0x1D75E, "M", "ι"), + (0x1D75F, "M", "κ"), + (0x1D760, "M", "λ"), + (0x1D761, "M", "μ"), + (0x1D762, "M", "ν"), + (0x1D763, "M", "ξ"), + (0x1D764, "M", "ο"), + (0x1D765, "M", "π"), + (0x1D766, "M", "ρ"), + (0x1D767, "M", "θ"), + (0x1D768, "M", "σ"), + (0x1D769, "M", "τ"), + (0x1D76A, "M", "υ"), + (0x1D76B, "M", "φ"), + (0x1D76C, "M", "χ"), + (0x1D76D, "M", "ψ"), + (0x1D76E, "M", "ω"), + (0x1D76F, "M", "∇"), + (0x1D770, "M", "α"), + (0x1D771, "M", "β"), + (0x1D772, "M", "γ"), + (0x1D773, "M", "δ"), + (0x1D774, "M", "ε"), + (0x1D775, "M", "ζ"), + (0x1D776, "M", "η"), + (0x1D777, "M", "θ"), + (0x1D778, "M", "ι"), + (0x1D779, "M", "κ"), + (0x1D77A, "M", "λ"), + (0x1D77B, "M", "μ"), + (0x1D77C, "M", "ν"), + (0x1D77D, "M", "ξ"), + (0x1D77E, "M", "ο"), + (0x1D77F, "M", "π"), + (0x1D780, "M", "ρ"), + (0x1D781, "M", "σ"), + (0x1D783, "M", "τ"), + (0x1D784, "M", "υ"), + (0x1D785, "M", "φ"), + (0x1D786, "M", "χ"), + (0x1D787, "M", "ψ"), + (0x1D788, "M", "ω"), + (0x1D789, "M", "∂"), + (0x1D78A, "M", "ε"), + (0x1D78B, "M", "θ"), + (0x1D78C, "M", "κ"), + (0x1D78D, "M", "φ"), + (0x1D78E, "M", "ρ"), + (0x1D78F, "M", "π"), + (0x1D790, "M", "α"), + (0x1D791, "M", "β"), + (0x1D792, "M", "γ"), + (0x1D793, "M", "δ"), + (0x1D794, "M", "ε"), + (0x1D795, "M", "ζ"), + (0x1D796, "M", "η"), + (0x1D797, "M", "θ"), + (0x1D798, "M", "ι"), + (0x1D799, "M", "κ"), + (0x1D79A, "M", "λ"), + (0x1D79B, "M", "μ"), + (0x1D79C, "M", "ν"), + (0x1D79D, "M", "ξ"), + (0x1D79E, "M", "ο"), + (0x1D79F, "M", "π"), + (0x1D7A0, "M", "ρ"), + (0x1D7A1, "M", "θ"), + (0x1D7A2, "M", "σ"), + (0x1D7A3, "M", "τ"), + (0x1D7A4, "M", "υ"), + (0x1D7A5, "M", "φ"), + (0x1D7A6, "M", "χ"), + (0x1D7A7, "M", "ψ"), + (0x1D7A8, "M", "ω"), + (0x1D7A9, "M", "∇"), + (0x1D7AA, "M", "α"), + (0x1D7AB, "M", "β"), + (0x1D7AC, "M", "γ"), + (0x1D7AD, "M", "δ"), + (0x1D7AE, "M", "ε"), + (0x1D7AF, "M", "ζ"), + (0x1D7B0, "M", "η"), + (0x1D7B1, "M", "θ"), + (0x1D7B2, "M", "ι"), + (0x1D7B3, "M", "κ"), + ] + + +def _seg_70() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1D7B4, "M", "λ"), + (0x1D7B5, "M", "μ"), + (0x1D7B6, "M", "ν"), + (0x1D7B7, "M", "ξ"), + (0x1D7B8, "M", "ο"), + (0x1D7B9, "M", "π"), + (0x1D7BA, "M", "ρ"), + (0x1D7BB, "M", "σ"), + (0x1D7BD, "M", "τ"), + (0x1D7BE, "M", "υ"), + (0x1D7BF, "M", "φ"), + (0x1D7C0, "M", "χ"), + (0x1D7C1, "M", "ψ"), + (0x1D7C2, "M", "ω"), + (0x1D7C3, "M", "∂"), + (0x1D7C4, "M", "ε"), + (0x1D7C5, "M", "θ"), + (0x1D7C6, "M", "κ"), + (0x1D7C7, "M", "φ"), + (0x1D7C8, "M", "ρ"), + (0x1D7C9, "M", "π"), + (0x1D7CA, "M", "ϝ"), + (0x1D7CC, "X"), + (0x1D7CE, "M", "0"), + (0x1D7CF, "M", "1"), + (0x1D7D0, "M", "2"), + (0x1D7D1, "M", "3"), + (0x1D7D2, "M", "4"), + (0x1D7D3, "M", "5"), + (0x1D7D4, "M", "6"), + (0x1D7D5, "M", "7"), + (0x1D7D6, "M", "8"), + (0x1D7D7, "M", "9"), + (0x1D7D8, "M", "0"), + (0x1D7D9, "M", "1"), + (0x1D7DA, "M", "2"), + (0x1D7DB, "M", "3"), + (0x1D7DC, "M", "4"), + (0x1D7DD, "M", "5"), + (0x1D7DE, "M", "6"), + (0x1D7DF, "M", "7"), + (0x1D7E0, "M", "8"), + (0x1D7E1, "M", "9"), + (0x1D7E2, "M", "0"), + (0x1D7E3, "M", "1"), + (0x1D7E4, "M", "2"), + (0x1D7E5, "M", "3"), + (0x1D7E6, "M", "4"), + (0x1D7E7, "M", "5"), + (0x1D7E8, "M", "6"), + (0x1D7E9, "M", "7"), + (0x1D7EA, "M", "8"), + (0x1D7EB, "M", "9"), + (0x1D7EC, "M", "0"), + (0x1D7ED, "M", "1"), + (0x1D7EE, "M", "2"), + (0x1D7EF, "M", "3"), + (0x1D7F0, "M", "4"), + (0x1D7F1, "M", "5"), + (0x1D7F2, "M", "6"), + (0x1D7F3, "M", "7"), + (0x1D7F4, "M", "8"), + (0x1D7F5, "M", "9"), + (0x1D7F6, "M", "0"), + (0x1D7F7, "M", "1"), + (0x1D7F8, "M", "2"), + (0x1D7F9, "M", "3"), + (0x1D7FA, "M", "4"), + (0x1D7FB, "M", "5"), + (0x1D7FC, "M", "6"), + (0x1D7FD, "M", "7"), + (0x1D7FE, "M", "8"), + (0x1D7FF, "M", "9"), + (0x1D800, "V"), + (0x1DA8C, "X"), + (0x1DA9B, "V"), + (0x1DAA0, "X"), + (0x1DAA1, "V"), + (0x1DAB0, "X"), + (0x1DF00, "V"), + (0x1DF1F, "X"), + (0x1DF25, "V"), + (0x1DF2B, "X"), + (0x1E000, "V"), + (0x1E007, "X"), + (0x1E008, "V"), + (0x1E019, "X"), + (0x1E01B, "V"), + (0x1E022, "X"), + (0x1E023, "V"), + (0x1E025, "X"), + (0x1E026, "V"), + (0x1E02B, "X"), + (0x1E030, "M", "а"), + (0x1E031, "M", "б"), + (0x1E032, "M", "в"), + (0x1E033, "M", "г"), + (0x1E034, "M", "д"), + (0x1E035, "M", "е"), + (0x1E036, "M", "ж"), + ] + + +def _seg_71() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1E037, "M", "з"), + (0x1E038, "M", "и"), + (0x1E039, "M", "к"), + (0x1E03A, "M", "л"), + (0x1E03B, "M", "м"), + (0x1E03C, "M", "о"), + (0x1E03D, "M", "п"), + (0x1E03E, "M", "р"), + (0x1E03F, "M", "с"), + (0x1E040, "M", "т"), + (0x1E041, "M", "у"), + (0x1E042, "M", "ф"), + (0x1E043, "M", "х"), + (0x1E044, "M", "ц"), + (0x1E045, "M", "ч"), + (0x1E046, "M", "ш"), + (0x1E047, "M", "ы"), + (0x1E048, "M", "э"), + (0x1E049, "M", "ю"), + (0x1E04A, "M", "ꚉ"), + (0x1E04B, "M", "ә"), + (0x1E04C, "M", "і"), + (0x1E04D, "M", "ј"), + (0x1E04E, "M", "ө"), + (0x1E04F, "M", "ү"), + (0x1E050, "M", "ӏ"), + (0x1E051, "M", "а"), + (0x1E052, "M", "б"), + (0x1E053, "M", "в"), + (0x1E054, "M", "г"), + (0x1E055, "M", "д"), + (0x1E056, "M", "е"), + (0x1E057, "M", "ж"), + (0x1E058, "M", "з"), + (0x1E059, "M", "и"), + (0x1E05A, "M", "к"), + (0x1E05B, "M", "л"), + (0x1E05C, "M", "о"), + (0x1E05D, "M", "п"), + (0x1E05E, "M", "с"), + (0x1E05F, "M", "у"), + (0x1E060, "M", "ф"), + (0x1E061, "M", "х"), + (0x1E062, "M", "ц"), + (0x1E063, "M", "ч"), + (0x1E064, "M", "ш"), + (0x1E065, "M", "ъ"), + (0x1E066, "M", "ы"), + (0x1E067, "M", "ґ"), + (0x1E068, "M", "і"), + (0x1E069, "M", "ѕ"), + (0x1E06A, "M", "џ"), + (0x1E06B, "M", "ҫ"), + (0x1E06C, "M", "ꙑ"), + (0x1E06D, "M", "ұ"), + (0x1E06E, "X"), + (0x1E08F, "V"), + (0x1E090, "X"), + (0x1E100, "V"), + (0x1E12D, "X"), + (0x1E130, "V"), + (0x1E13E, "X"), + (0x1E140, "V"), + (0x1E14A, "X"), + (0x1E14E, "V"), + (0x1E150, "X"), + (0x1E290, "V"), + (0x1E2AF, "X"), + (0x1E2C0, "V"), + (0x1E2FA, "X"), + (0x1E2FF, "V"), + (0x1E300, "X"), + (0x1E4D0, "V"), + (0x1E4FA, "X"), + (0x1E7E0, "V"), + (0x1E7E7, "X"), + (0x1E7E8, "V"), + (0x1E7EC, "X"), + (0x1E7ED, "V"), + (0x1E7EF, "X"), + (0x1E7F0, "V"), + (0x1E7FF, "X"), + (0x1E800, "V"), + (0x1E8C5, "X"), + (0x1E8C7, "V"), + (0x1E8D7, "X"), + (0x1E900, "M", "𞤢"), + (0x1E901, "M", "𞤣"), + (0x1E902, "M", "𞤤"), + (0x1E903, "M", "𞤥"), + (0x1E904, "M", "𞤦"), + (0x1E905, "M", "𞤧"), + (0x1E906, "M", "𞤨"), + (0x1E907, "M", "𞤩"), + (0x1E908, "M", "𞤪"), + (0x1E909, "M", "𞤫"), + (0x1E90A, "M", "𞤬"), + (0x1E90B, "M", "𞤭"), + (0x1E90C, "M", "𞤮"), + (0x1E90D, "M", "𞤯"), + ] + + +def _seg_72() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1E90E, "M", "𞤰"), + (0x1E90F, "M", "𞤱"), + (0x1E910, "M", "𞤲"), + (0x1E911, "M", "𞤳"), + (0x1E912, "M", "𞤴"), + (0x1E913, "M", "𞤵"), + (0x1E914, "M", "𞤶"), + (0x1E915, "M", "𞤷"), + (0x1E916, "M", "𞤸"), + (0x1E917, "M", "𞤹"), + (0x1E918, "M", "𞤺"), + (0x1E919, "M", "𞤻"), + (0x1E91A, "M", "𞤼"), + (0x1E91B, "M", "𞤽"), + (0x1E91C, "M", "𞤾"), + (0x1E91D, "M", "𞤿"), + (0x1E91E, "M", "𞥀"), + (0x1E91F, "M", "𞥁"), + (0x1E920, "M", "𞥂"), + (0x1E921, "M", "𞥃"), + (0x1E922, "V"), + (0x1E94C, "X"), + (0x1E950, "V"), + (0x1E95A, "X"), + (0x1E95E, "V"), + (0x1E960, "X"), + (0x1EC71, "V"), + (0x1ECB5, "X"), + (0x1ED01, "V"), + (0x1ED3E, "X"), + (0x1EE00, "M", "ا"), + (0x1EE01, "M", "ب"), + (0x1EE02, "M", "ج"), + (0x1EE03, "M", "د"), + (0x1EE04, "X"), + (0x1EE05, "M", "و"), + (0x1EE06, "M", "ز"), + (0x1EE07, "M", "ح"), + (0x1EE08, "M", "ط"), + (0x1EE09, "M", "ي"), + (0x1EE0A, "M", "ك"), + (0x1EE0B, "M", "ل"), + (0x1EE0C, "M", "م"), + (0x1EE0D, "M", "ن"), + (0x1EE0E, "M", "س"), + (0x1EE0F, "M", "ع"), + (0x1EE10, "M", "ف"), + (0x1EE11, "M", "ص"), + (0x1EE12, "M", "ق"), + (0x1EE13, "M", "ر"), + (0x1EE14, "M", "ش"), + (0x1EE15, "M", "ت"), + (0x1EE16, "M", "ث"), + (0x1EE17, "M", "خ"), + (0x1EE18, "M", "ذ"), + (0x1EE19, "M", "ض"), + (0x1EE1A, "M", "ظ"), + (0x1EE1B, "M", "غ"), + (0x1EE1C, "M", "ٮ"), + (0x1EE1D, "M", "ں"), + (0x1EE1E, "M", "ڡ"), + (0x1EE1F, "M", "ٯ"), + (0x1EE20, "X"), + (0x1EE21, "M", "ب"), + (0x1EE22, "M", "ج"), + (0x1EE23, "X"), + (0x1EE24, "M", "ه"), + (0x1EE25, "X"), + (0x1EE27, "M", "ح"), + (0x1EE28, "X"), + (0x1EE29, "M", "ي"), + (0x1EE2A, "M", "ك"), + (0x1EE2B, "M", "ل"), + (0x1EE2C, "M", "م"), + (0x1EE2D, "M", "ن"), + (0x1EE2E, "M", "س"), + (0x1EE2F, "M", "ع"), + (0x1EE30, "M", "ف"), + (0x1EE31, "M", "ص"), + (0x1EE32, "M", "ق"), + (0x1EE33, "X"), + (0x1EE34, "M", "ش"), + (0x1EE35, "M", "ت"), + (0x1EE36, "M", "ث"), + (0x1EE37, "M", "خ"), + (0x1EE38, "X"), + (0x1EE39, "M", "ض"), + (0x1EE3A, "X"), + (0x1EE3B, "M", "غ"), + (0x1EE3C, "X"), + (0x1EE42, "M", "ج"), + (0x1EE43, "X"), + (0x1EE47, "M", "ح"), + (0x1EE48, "X"), + (0x1EE49, "M", "ي"), + (0x1EE4A, "X"), + (0x1EE4B, "M", "ل"), + (0x1EE4C, "X"), + (0x1EE4D, "M", "ن"), + (0x1EE4E, "M", "س"), + ] + + +def _seg_73() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1EE4F, "M", "ع"), + (0x1EE50, "X"), + (0x1EE51, "M", "ص"), + (0x1EE52, "M", "ق"), + (0x1EE53, "X"), + (0x1EE54, "M", "ش"), + (0x1EE55, "X"), + (0x1EE57, "M", "خ"), + (0x1EE58, "X"), + (0x1EE59, "M", "ض"), + (0x1EE5A, "X"), + (0x1EE5B, "M", "غ"), + (0x1EE5C, "X"), + (0x1EE5D, "M", "ں"), + (0x1EE5E, "X"), + (0x1EE5F, "M", "ٯ"), + (0x1EE60, "X"), + (0x1EE61, "M", "ب"), + (0x1EE62, "M", "ج"), + (0x1EE63, "X"), + (0x1EE64, "M", "ه"), + (0x1EE65, "X"), + (0x1EE67, "M", "ح"), + (0x1EE68, "M", "ط"), + (0x1EE69, "M", "ي"), + (0x1EE6A, "M", "ك"), + (0x1EE6B, "X"), + (0x1EE6C, "M", "م"), + (0x1EE6D, "M", "ن"), + (0x1EE6E, "M", "س"), + (0x1EE6F, "M", "ع"), + (0x1EE70, "M", "ف"), + (0x1EE71, "M", "ص"), + (0x1EE72, "M", "ق"), + (0x1EE73, "X"), + (0x1EE74, "M", "ش"), + (0x1EE75, "M", "ت"), + (0x1EE76, "M", "ث"), + (0x1EE77, "M", "خ"), + (0x1EE78, "X"), + (0x1EE79, "M", "ض"), + (0x1EE7A, "M", "ظ"), + (0x1EE7B, "M", "غ"), + (0x1EE7C, "M", "ٮ"), + (0x1EE7D, "X"), + (0x1EE7E, "M", "ڡ"), + (0x1EE7F, "X"), + (0x1EE80, "M", "ا"), + (0x1EE81, "M", "ب"), + (0x1EE82, "M", "ج"), + (0x1EE83, "M", "د"), + (0x1EE84, "M", "ه"), + (0x1EE85, "M", "و"), + (0x1EE86, "M", "ز"), + (0x1EE87, "M", "ح"), + (0x1EE88, "M", "ط"), + (0x1EE89, "M", "ي"), + (0x1EE8A, "X"), + (0x1EE8B, "M", "ل"), + (0x1EE8C, "M", "م"), + (0x1EE8D, "M", "ن"), + (0x1EE8E, "M", "س"), + (0x1EE8F, "M", "ع"), + (0x1EE90, "M", "ف"), + (0x1EE91, "M", "ص"), + (0x1EE92, "M", "ق"), + (0x1EE93, "M", "ر"), + (0x1EE94, "M", "ش"), + (0x1EE95, "M", "ت"), + (0x1EE96, "M", "ث"), + (0x1EE97, "M", "خ"), + (0x1EE98, "M", "ذ"), + (0x1EE99, "M", "ض"), + (0x1EE9A, "M", "ظ"), + (0x1EE9B, "M", "غ"), + (0x1EE9C, "X"), + (0x1EEA1, "M", "ب"), + (0x1EEA2, "M", "ج"), + (0x1EEA3, "M", "د"), + (0x1EEA4, "X"), + (0x1EEA5, "M", "و"), + (0x1EEA6, "M", "ز"), + (0x1EEA7, "M", "ح"), + (0x1EEA8, "M", "ط"), + (0x1EEA9, "M", "ي"), + (0x1EEAA, "X"), + (0x1EEAB, "M", "ل"), + (0x1EEAC, "M", "م"), + (0x1EEAD, "M", "ن"), + (0x1EEAE, "M", "س"), + (0x1EEAF, "M", "ع"), + (0x1EEB0, "M", "ف"), + (0x1EEB1, "M", "ص"), + (0x1EEB2, "M", "ق"), + (0x1EEB3, "M", "ر"), + (0x1EEB4, "M", "ش"), + (0x1EEB5, "M", "ت"), + (0x1EEB6, "M", "ث"), + (0x1EEB7, "M", "خ"), + (0x1EEB8, "M", "ذ"), + ] + + +def _seg_74() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1EEB9, "M", "ض"), + (0x1EEBA, "M", "ظ"), + (0x1EEBB, "M", "غ"), + (0x1EEBC, "X"), + (0x1EEF0, "V"), + (0x1EEF2, "X"), + (0x1F000, "V"), + (0x1F02C, "X"), + (0x1F030, "V"), + (0x1F094, "X"), + (0x1F0A0, "V"), + (0x1F0AF, "X"), + (0x1F0B1, "V"), + (0x1F0C0, "X"), + (0x1F0C1, "V"), + (0x1F0D0, "X"), + (0x1F0D1, "V"), + (0x1F0F6, "X"), + (0x1F101, "3", "0,"), + (0x1F102, "3", "1,"), + (0x1F103, "3", "2,"), + (0x1F104, "3", "3,"), + (0x1F105, "3", "4,"), + (0x1F106, "3", "5,"), + (0x1F107, "3", "6,"), + (0x1F108, "3", "7,"), + (0x1F109, "3", "8,"), + (0x1F10A, "3", "9,"), + (0x1F10B, "V"), + (0x1F110, "3", "(a)"), + (0x1F111, "3", "(b)"), + (0x1F112, "3", "(c)"), + (0x1F113, "3", "(d)"), + (0x1F114, "3", "(e)"), + (0x1F115, "3", "(f)"), + (0x1F116, "3", "(g)"), + (0x1F117, "3", "(h)"), + (0x1F118, "3", "(i)"), + (0x1F119, "3", "(j)"), + (0x1F11A, "3", "(k)"), + (0x1F11B, "3", "(l)"), + (0x1F11C, "3", "(m)"), + (0x1F11D, "3", "(n)"), + (0x1F11E, "3", "(o)"), + (0x1F11F, "3", "(p)"), + (0x1F120, "3", "(q)"), + (0x1F121, "3", "(r)"), + (0x1F122, "3", "(s)"), + (0x1F123, "3", "(t)"), + (0x1F124, "3", "(u)"), + (0x1F125, "3", "(v)"), + (0x1F126, "3", "(w)"), + (0x1F127, "3", "(x)"), + (0x1F128, "3", "(y)"), + (0x1F129, "3", "(z)"), + (0x1F12A, "M", "〔s〕"), + (0x1F12B, "M", "c"), + (0x1F12C, "M", "r"), + (0x1F12D, "M", "cd"), + (0x1F12E, "M", "wz"), + (0x1F12F, "V"), + (0x1F130, "M", "a"), + (0x1F131, "M", "b"), + (0x1F132, "M", "c"), + (0x1F133, "M", "d"), + (0x1F134, "M", "e"), + (0x1F135, "M", "f"), + (0x1F136, "M", "g"), + (0x1F137, "M", "h"), + (0x1F138, "M", "i"), + (0x1F139, "M", "j"), + (0x1F13A, "M", "k"), + (0x1F13B, "M", "l"), + (0x1F13C, "M", "m"), + (0x1F13D, "M", "n"), + (0x1F13E, "M", "o"), + (0x1F13F, "M", "p"), + (0x1F140, "M", "q"), + (0x1F141, "M", "r"), + (0x1F142, "M", "s"), + (0x1F143, "M", "t"), + (0x1F144, "M", "u"), + (0x1F145, "M", "v"), + (0x1F146, "M", "w"), + (0x1F147, "M", "x"), + (0x1F148, "M", "y"), + (0x1F149, "M", "z"), + (0x1F14A, "M", "hv"), + (0x1F14B, "M", "mv"), + (0x1F14C, "M", "sd"), + (0x1F14D, "M", "ss"), + (0x1F14E, "M", "ppv"), + (0x1F14F, "M", "wc"), + (0x1F150, "V"), + (0x1F16A, "M", "mc"), + (0x1F16B, "M", "md"), + (0x1F16C, "M", "mr"), + (0x1F16D, "V"), + (0x1F190, "M", "dj"), + (0x1F191, "V"), + ] + + +def _seg_75() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1F1AE, "X"), + (0x1F1E6, "V"), + (0x1F200, "M", "ほか"), + (0x1F201, "M", "ココ"), + (0x1F202, "M", "サ"), + (0x1F203, "X"), + (0x1F210, "M", "手"), + (0x1F211, "M", "字"), + (0x1F212, "M", "双"), + (0x1F213, "M", "デ"), + (0x1F214, "M", "二"), + (0x1F215, "M", "多"), + (0x1F216, "M", "解"), + (0x1F217, "M", "天"), + (0x1F218, "M", "交"), + (0x1F219, "M", "映"), + (0x1F21A, "M", "無"), + (0x1F21B, "M", "料"), + (0x1F21C, "M", "前"), + (0x1F21D, "M", "後"), + (0x1F21E, "M", "再"), + (0x1F21F, "M", "新"), + (0x1F220, "M", "初"), + (0x1F221, "M", "終"), + (0x1F222, "M", "生"), + (0x1F223, "M", "販"), + (0x1F224, "M", "声"), + (0x1F225, "M", "吹"), + (0x1F226, "M", "演"), + (0x1F227, "M", "投"), + (0x1F228, "M", "捕"), + (0x1F229, "M", "一"), + (0x1F22A, "M", "三"), + (0x1F22B, "M", "遊"), + (0x1F22C, "M", "左"), + (0x1F22D, "M", "中"), + (0x1F22E, "M", "右"), + (0x1F22F, "M", "指"), + (0x1F230, "M", "走"), + (0x1F231, "M", "打"), + (0x1F232, "M", "禁"), + (0x1F233, "M", "空"), + (0x1F234, "M", "合"), + (0x1F235, "M", "満"), + (0x1F236, "M", "有"), + (0x1F237, "M", "月"), + (0x1F238, "M", "申"), + (0x1F239, "M", "割"), + (0x1F23A, "M", "営"), + (0x1F23B, "M", "配"), + (0x1F23C, "X"), + (0x1F240, "M", "〔本〕"), + (0x1F241, "M", "〔三〕"), + (0x1F242, "M", "〔二〕"), + (0x1F243, "M", "〔安〕"), + (0x1F244, "M", "〔点〕"), + (0x1F245, "M", "〔打〕"), + (0x1F246, "M", "〔盗〕"), + (0x1F247, "M", "〔勝〕"), + (0x1F248, "M", "〔敗〕"), + (0x1F249, "X"), + (0x1F250, "M", "得"), + (0x1F251, "M", "可"), + (0x1F252, "X"), + (0x1F260, "V"), + (0x1F266, "X"), + (0x1F300, "V"), + (0x1F6D8, "X"), + (0x1F6DC, "V"), + (0x1F6ED, "X"), + (0x1F6F0, "V"), + (0x1F6FD, "X"), + (0x1F700, "V"), + (0x1F777, "X"), + (0x1F77B, "V"), + (0x1F7DA, "X"), + (0x1F7E0, "V"), + (0x1F7EC, "X"), + (0x1F7F0, "V"), + (0x1F7F1, "X"), + (0x1F800, "V"), + (0x1F80C, "X"), + (0x1F810, "V"), + (0x1F848, "X"), + (0x1F850, "V"), + (0x1F85A, "X"), + (0x1F860, "V"), + (0x1F888, "X"), + (0x1F890, "V"), + (0x1F8AE, "X"), + (0x1F8B0, "V"), + (0x1F8B2, "X"), + (0x1F900, "V"), + (0x1FA54, "X"), + (0x1FA60, "V"), + (0x1FA6E, "X"), + (0x1FA70, "V"), + (0x1FA7D, "X"), + (0x1FA80, "V"), + (0x1FA89, "X"), + ] + + +def _seg_76() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1FA90, "V"), + (0x1FABE, "X"), + (0x1FABF, "V"), + (0x1FAC6, "X"), + (0x1FACE, "V"), + (0x1FADC, "X"), + (0x1FAE0, "V"), + (0x1FAE9, "X"), + (0x1FAF0, "V"), + (0x1FAF9, "X"), + (0x1FB00, "V"), + (0x1FB93, "X"), + (0x1FB94, "V"), + (0x1FBCB, "X"), + (0x1FBF0, "M", "0"), + (0x1FBF1, "M", "1"), + (0x1FBF2, "M", "2"), + (0x1FBF3, "M", "3"), + (0x1FBF4, "M", "4"), + (0x1FBF5, "M", "5"), + (0x1FBF6, "M", "6"), + (0x1FBF7, "M", "7"), + (0x1FBF8, "M", "8"), + (0x1FBF9, "M", "9"), + (0x1FBFA, "X"), + (0x20000, "V"), + (0x2A6E0, "X"), + (0x2A700, "V"), + (0x2B73A, "X"), + (0x2B740, "V"), + (0x2B81E, "X"), + (0x2B820, "V"), + (0x2CEA2, "X"), + (0x2CEB0, "V"), + (0x2EBE1, "X"), + (0x2EBF0, "V"), + (0x2EE5E, "X"), + (0x2F800, "M", "丽"), + (0x2F801, "M", "丸"), + (0x2F802, "M", "乁"), + (0x2F803, "M", "𠄢"), + (0x2F804, "M", "你"), + (0x2F805, "M", "侮"), + (0x2F806, "M", "侻"), + (0x2F807, "M", "倂"), + (0x2F808, "M", "偺"), + (0x2F809, "M", "備"), + (0x2F80A, "M", "僧"), + (0x2F80B, "M", "像"), + (0x2F80C, "M", "㒞"), + (0x2F80D, "M", "𠘺"), + (0x2F80E, "M", "免"), + (0x2F80F, "M", "兔"), + (0x2F810, "M", "兤"), + (0x2F811, "M", "具"), + (0x2F812, "M", "𠔜"), + (0x2F813, "M", "㒹"), + (0x2F814, "M", "內"), + (0x2F815, "M", "再"), + (0x2F816, "M", "𠕋"), + (0x2F817, "M", "冗"), + (0x2F818, "M", "冤"), + (0x2F819, "M", "仌"), + (0x2F81A, "M", "冬"), + (0x2F81B, "M", "况"), + (0x2F81C, "M", "𩇟"), + (0x2F81D, "M", "凵"), + (0x2F81E, "M", "刃"), + (0x2F81F, "M", "㓟"), + (0x2F820, "M", "刻"), + (0x2F821, "M", "剆"), + (0x2F822, "M", "割"), + (0x2F823, "M", "剷"), + (0x2F824, "M", "㔕"), + (0x2F825, "M", "勇"), + (0x2F826, "M", "勉"), + (0x2F827, "M", "勤"), + (0x2F828, "M", "勺"), + (0x2F829, "M", "包"), + (0x2F82A, "M", "匆"), + (0x2F82B, "M", "北"), + (0x2F82C, "M", "卉"), + (0x2F82D, "M", "卑"), + (0x2F82E, "M", "博"), + (0x2F82F, "M", "即"), + (0x2F830, "M", "卽"), + (0x2F831, "M", "卿"), + (0x2F834, "M", "𠨬"), + (0x2F835, "M", "灰"), + (0x2F836, "M", "及"), + (0x2F837, "M", "叟"), + (0x2F838, "M", "𠭣"), + (0x2F839, "M", "叫"), + (0x2F83A, "M", "叱"), + (0x2F83B, "M", "吆"), + (0x2F83C, "M", "咞"), + (0x2F83D, "M", "吸"), + (0x2F83E, "M", "呈"), + (0x2F83F, "M", "周"), + (0x2F840, "M", "咢"), + ] + + +def _seg_77() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x2F841, "M", "哶"), + (0x2F842, "M", "唐"), + (0x2F843, "M", "啓"), + (0x2F844, "M", "啣"), + (0x2F845, "M", "善"), + (0x2F847, "M", "喙"), + (0x2F848, "M", "喫"), + (0x2F849, "M", "喳"), + (0x2F84A, "M", "嗂"), + (0x2F84B, "M", "圖"), + (0x2F84C, "M", "嘆"), + (0x2F84D, "M", "圗"), + (0x2F84E, "M", "噑"), + (0x2F84F, "M", "噴"), + (0x2F850, "M", "切"), + (0x2F851, "M", "壮"), + (0x2F852, "M", "城"), + (0x2F853, "M", "埴"), + (0x2F854, "M", "堍"), + (0x2F855, "M", "型"), + (0x2F856, "M", "堲"), + (0x2F857, "M", "報"), + (0x2F858, "M", "墬"), + (0x2F859, "M", "𡓤"), + (0x2F85A, "M", "売"), + (0x2F85B, "M", "壷"), + (0x2F85C, "M", "夆"), + (0x2F85D, "M", "多"), + (0x2F85E, "M", "夢"), + (0x2F85F, "M", "奢"), + (0x2F860, "M", "𡚨"), + (0x2F861, "M", "𡛪"), + (0x2F862, "M", "姬"), + (0x2F863, "M", "娛"), + (0x2F864, "M", "娧"), + (0x2F865, "M", "姘"), + (0x2F866, "M", "婦"), + (0x2F867, "M", "㛮"), + (0x2F868, "X"), + (0x2F869, "M", "嬈"), + (0x2F86A, "M", "嬾"), + (0x2F86C, "M", "𡧈"), + (0x2F86D, "M", "寃"), + (0x2F86E, "M", "寘"), + (0x2F86F, "M", "寧"), + (0x2F870, "M", "寳"), + (0x2F871, "M", "𡬘"), + (0x2F872, "M", "寿"), + (0x2F873, "M", "将"), + (0x2F874, "X"), + (0x2F875, "M", "尢"), + (0x2F876, "M", "㞁"), + (0x2F877, "M", "屠"), + (0x2F878, "M", "屮"), + (0x2F879, "M", "峀"), + (0x2F87A, "M", "岍"), + (0x2F87B, "M", "𡷤"), + (0x2F87C, "M", "嵃"), + (0x2F87D, "M", "𡷦"), + (0x2F87E, "M", "嵮"), + (0x2F87F, "M", "嵫"), + (0x2F880, "M", "嵼"), + (0x2F881, "M", "巡"), + (0x2F882, "M", "巢"), + (0x2F883, "M", "㠯"), + (0x2F884, "M", "巽"), + (0x2F885, "M", "帨"), + (0x2F886, "M", "帽"), + (0x2F887, "M", "幩"), + (0x2F888, "M", "㡢"), + (0x2F889, "M", "𢆃"), + (0x2F88A, "M", "㡼"), + (0x2F88B, "M", "庰"), + (0x2F88C, "M", "庳"), + (0x2F88D, "M", "庶"), + (0x2F88E, "M", "廊"), + (0x2F88F, "M", "𪎒"), + (0x2F890, "M", "廾"), + (0x2F891, "M", "𢌱"), + (0x2F893, "M", "舁"), + (0x2F894, "M", "弢"), + (0x2F896, "M", "㣇"), + (0x2F897, "M", "𣊸"), + (0x2F898, "M", "𦇚"), + (0x2F899, "M", "形"), + (0x2F89A, "M", "彫"), + (0x2F89B, "M", "㣣"), + (0x2F89C, "M", "徚"), + (0x2F89D, "M", "忍"), + (0x2F89E, "M", "志"), + (0x2F89F, "M", "忹"), + (0x2F8A0, "M", "悁"), + (0x2F8A1, "M", "㤺"), + (0x2F8A2, "M", "㤜"), + (0x2F8A3, "M", "悔"), + (0x2F8A4, "M", "𢛔"), + (0x2F8A5, "M", "惇"), + (0x2F8A6, "M", "慈"), + (0x2F8A7, "M", "慌"), + (0x2F8A8, "M", "慎"), + ] + + +def _seg_78() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x2F8A9, "M", "慌"), + (0x2F8AA, "M", "慺"), + (0x2F8AB, "M", "憎"), + (0x2F8AC, "M", "憲"), + (0x2F8AD, "M", "憤"), + (0x2F8AE, "M", "憯"), + (0x2F8AF, "M", "懞"), + (0x2F8B0, "M", "懲"), + (0x2F8B1, "M", "懶"), + (0x2F8B2, "M", "成"), + (0x2F8B3, "M", "戛"), + (0x2F8B4, "M", "扝"), + (0x2F8B5, "M", "抱"), + (0x2F8B6, "M", "拔"), + (0x2F8B7, "M", "捐"), + (0x2F8B8, "M", "𢬌"), + (0x2F8B9, "M", "挽"), + (0x2F8BA, "M", "拼"), + (0x2F8BB, "M", "捨"), + (0x2F8BC, "M", "掃"), + (0x2F8BD, "M", "揤"), + (0x2F8BE, "M", "𢯱"), + (0x2F8BF, "M", "搢"), + (0x2F8C0, "M", "揅"), + (0x2F8C1, "M", "掩"), + (0x2F8C2, "M", "㨮"), + (0x2F8C3, "M", "摩"), + (0x2F8C4, "M", "摾"), + (0x2F8C5, "M", "撝"), + (0x2F8C6, "M", "摷"), + (0x2F8C7, "M", "㩬"), + (0x2F8C8, "M", "敏"), + (0x2F8C9, "M", "敬"), + (0x2F8CA, "M", "𣀊"), + (0x2F8CB, "M", "旣"), + (0x2F8CC, "M", "書"), + (0x2F8CD, "M", "晉"), + (0x2F8CE, "M", "㬙"), + (0x2F8CF, "M", "暑"), + (0x2F8D0, "M", "㬈"), + (0x2F8D1, "M", "㫤"), + (0x2F8D2, "M", "冒"), + (0x2F8D3, "M", "冕"), + (0x2F8D4, "M", "最"), + (0x2F8D5, "M", "暜"), + (0x2F8D6, "M", "肭"), + (0x2F8D7, "M", "䏙"), + (0x2F8D8, "M", "朗"), + (0x2F8D9, "M", "望"), + (0x2F8DA, "M", "朡"), + (0x2F8DB, "M", "杞"), + (0x2F8DC, "M", "杓"), + (0x2F8DD, "M", "𣏃"), + (0x2F8DE, "M", "㭉"), + (0x2F8DF, "M", "柺"), + (0x2F8E0, "M", "枅"), + (0x2F8E1, "M", "桒"), + (0x2F8E2, "M", "梅"), + (0x2F8E3, "M", "𣑭"), + (0x2F8E4, "M", "梎"), + (0x2F8E5, "M", "栟"), + (0x2F8E6, "M", "椔"), + (0x2F8E7, "M", "㮝"), + (0x2F8E8, "M", "楂"), + (0x2F8E9, "M", "榣"), + (0x2F8EA, "M", "槪"), + (0x2F8EB, "M", "檨"), + (0x2F8EC, "M", "𣚣"), + (0x2F8ED, "M", "櫛"), + (0x2F8EE, "M", "㰘"), + (0x2F8EF, "M", "次"), + (0x2F8F0, "M", "𣢧"), + (0x2F8F1, "M", "歔"), + (0x2F8F2, "M", "㱎"), + (0x2F8F3, "M", "歲"), + (0x2F8F4, "M", "殟"), + (0x2F8F5, "M", "殺"), + (0x2F8F6, "M", "殻"), + (0x2F8F7, "M", "𣪍"), + (0x2F8F8, "M", "𡴋"), + (0x2F8F9, "M", "𣫺"), + (0x2F8FA, "M", "汎"), + (0x2F8FB, "M", "𣲼"), + (0x2F8FC, "M", "沿"), + (0x2F8FD, "M", "泍"), + (0x2F8FE, "M", "汧"), + (0x2F8FF, "M", "洖"), + (0x2F900, "M", "派"), + (0x2F901, "M", "海"), + (0x2F902, "M", "流"), + (0x2F903, "M", "浩"), + (0x2F904, "M", "浸"), + (0x2F905, "M", "涅"), + (0x2F906, "M", "𣴞"), + (0x2F907, "M", "洴"), + (0x2F908, "M", "港"), + (0x2F909, "M", "湮"), + (0x2F90A, "M", "㴳"), + (0x2F90B, "M", "滋"), + (0x2F90C, "M", "滇"), + ] + + +def _seg_79() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x2F90D, "M", "𣻑"), + (0x2F90E, "M", "淹"), + (0x2F90F, "M", "潮"), + (0x2F910, "M", "𣽞"), + (0x2F911, "M", "𣾎"), + (0x2F912, "M", "濆"), + (0x2F913, "M", "瀹"), + (0x2F914, "M", "瀞"), + (0x2F915, "M", "瀛"), + (0x2F916, "M", "㶖"), + (0x2F917, "M", "灊"), + (0x2F918, "M", "災"), + (0x2F919, "M", "灷"), + (0x2F91A, "M", "炭"), + (0x2F91B, "M", "𠔥"), + (0x2F91C, "M", "煅"), + (0x2F91D, "M", "𤉣"), + (0x2F91E, "M", "熜"), + (0x2F91F, "X"), + (0x2F920, "M", "爨"), + (0x2F921, "M", "爵"), + (0x2F922, "M", "牐"), + (0x2F923, "M", "𤘈"), + (0x2F924, "M", "犀"), + (0x2F925, "M", "犕"), + (0x2F926, "M", "𤜵"), + (0x2F927, "M", "𤠔"), + (0x2F928, "M", "獺"), + (0x2F929, "M", "王"), + (0x2F92A, "M", "㺬"), + (0x2F92B, "M", "玥"), + (0x2F92C, "M", "㺸"), + (0x2F92E, "M", "瑇"), + (0x2F92F, "M", "瑜"), + (0x2F930, "M", "瑱"), + (0x2F931, "M", "璅"), + (0x2F932, "M", "瓊"), + (0x2F933, "M", "㼛"), + (0x2F934, "M", "甤"), + (0x2F935, "M", "𤰶"), + (0x2F936, "M", "甾"), + (0x2F937, "M", "𤲒"), + (0x2F938, "M", "異"), + (0x2F939, "M", "𢆟"), + (0x2F93A, "M", "瘐"), + (0x2F93B, "M", "𤾡"), + (0x2F93C, "M", "𤾸"), + (0x2F93D, "M", "𥁄"), + (0x2F93E, "M", "㿼"), + (0x2F93F, "M", "䀈"), + (0x2F940, "M", "直"), + (0x2F941, "M", "𥃳"), + (0x2F942, "M", "𥃲"), + (0x2F943, "M", "𥄙"), + (0x2F944, "M", "𥄳"), + (0x2F945, "M", "眞"), + (0x2F946, "M", "真"), + (0x2F948, "M", "睊"), + (0x2F949, "M", "䀹"), + (0x2F94A, "M", "瞋"), + (0x2F94B, "M", "䁆"), + (0x2F94C, "M", "䂖"), + (0x2F94D, "M", "𥐝"), + (0x2F94E, "M", "硎"), + (0x2F94F, "M", "碌"), + (0x2F950, "M", "磌"), + (0x2F951, "M", "䃣"), + (0x2F952, "M", "𥘦"), + (0x2F953, "M", "祖"), + (0x2F954, "M", "𥚚"), + (0x2F955, "M", "𥛅"), + (0x2F956, "M", "福"), + (0x2F957, "M", "秫"), + (0x2F958, "M", "䄯"), + (0x2F959, "M", "穀"), + (0x2F95A, "M", "穊"), + (0x2F95B, "M", "穏"), + (0x2F95C, "M", "𥥼"), + (0x2F95D, "M", "𥪧"), + (0x2F95F, "X"), + (0x2F960, "M", "䈂"), + (0x2F961, "M", "𥮫"), + (0x2F962, "M", "篆"), + (0x2F963, "M", "築"), + (0x2F964, "M", "䈧"), + (0x2F965, "M", "𥲀"), + (0x2F966, "M", "糒"), + (0x2F967, "M", "䊠"), + (0x2F968, "M", "糨"), + (0x2F969, "M", "糣"), + (0x2F96A, "M", "紀"), + (0x2F96B, "M", "𥾆"), + (0x2F96C, "M", "絣"), + (0x2F96D, "M", "䌁"), + (0x2F96E, "M", "緇"), + (0x2F96F, "M", "縂"), + (0x2F970, "M", "繅"), + (0x2F971, "M", "䌴"), + (0x2F972, "M", "𦈨"), + (0x2F973, "M", "𦉇"), + ] + + +def _seg_80() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x2F974, "M", "䍙"), + (0x2F975, "M", "𦋙"), + (0x2F976, "M", "罺"), + (0x2F977, "M", "𦌾"), + (0x2F978, "M", "羕"), + (0x2F979, "M", "翺"), + (0x2F97A, "M", "者"), + (0x2F97B, "M", "𦓚"), + (0x2F97C, "M", "𦔣"), + (0x2F97D, "M", "聠"), + (0x2F97E, "M", "𦖨"), + (0x2F97F, "M", "聰"), + (0x2F980, "M", "𣍟"), + (0x2F981, "M", "䏕"), + (0x2F982, "M", "育"), + (0x2F983, "M", "脃"), + (0x2F984, "M", "䐋"), + (0x2F985, "M", "脾"), + (0x2F986, "M", "媵"), + (0x2F987, "M", "𦞧"), + (0x2F988, "M", "𦞵"), + (0x2F989, "M", "𣎓"), + (0x2F98A, "M", "𣎜"), + (0x2F98B, "M", "舁"), + (0x2F98C, "M", "舄"), + (0x2F98D, "M", "辞"), + (0x2F98E, "M", "䑫"), + (0x2F98F, "M", "芑"), + (0x2F990, "M", "芋"), + (0x2F991, "M", "芝"), + (0x2F992, "M", "劳"), + (0x2F993, "M", "花"), + (0x2F994, "M", "芳"), + (0x2F995, "M", "芽"), + (0x2F996, "M", "苦"), + (0x2F997, "M", "𦬼"), + (0x2F998, "M", "若"), + (0x2F999, "M", "茝"), + (0x2F99A, "M", "荣"), + (0x2F99B, "M", "莭"), + (0x2F99C, "M", "茣"), + (0x2F99D, "M", "莽"), + (0x2F99E, "M", "菧"), + (0x2F99F, "M", "著"), + (0x2F9A0, "M", "荓"), + (0x2F9A1, "M", "菊"), + (0x2F9A2, "M", "菌"), + (0x2F9A3, "M", "菜"), + (0x2F9A4, "M", "𦰶"), + (0x2F9A5, "M", "𦵫"), + (0x2F9A6, "M", "𦳕"), + (0x2F9A7, "M", "䔫"), + (0x2F9A8, "M", "蓱"), + (0x2F9A9, "M", "蓳"), + (0x2F9AA, "M", "蔖"), + (0x2F9AB, "M", "𧏊"), + (0x2F9AC, "M", "蕤"), + (0x2F9AD, "M", "𦼬"), + (0x2F9AE, "M", "䕝"), + (0x2F9AF, "M", "䕡"), + (0x2F9B0, "M", "𦾱"), + (0x2F9B1, "M", "𧃒"), + (0x2F9B2, "M", "䕫"), + (0x2F9B3, "M", "虐"), + (0x2F9B4, "M", "虜"), + (0x2F9B5, "M", "虧"), + (0x2F9B6, "M", "虩"), + (0x2F9B7, "M", "蚩"), + (0x2F9B8, "M", "蚈"), + (0x2F9B9, "M", "蜎"), + (0x2F9BA, "M", "蛢"), + (0x2F9BB, "M", "蝹"), + (0x2F9BC, "M", "蜨"), + (0x2F9BD, "M", "蝫"), + (0x2F9BE, "M", "螆"), + (0x2F9BF, "X"), + (0x2F9C0, "M", "蟡"), + (0x2F9C1, "M", "蠁"), + (0x2F9C2, "M", "䗹"), + (0x2F9C3, "M", "衠"), + (0x2F9C4, "M", "衣"), + (0x2F9C5, "M", "𧙧"), + (0x2F9C6, "M", "裗"), + (0x2F9C7, "M", "裞"), + (0x2F9C8, "M", "䘵"), + (0x2F9C9, "M", "裺"), + (0x2F9CA, "M", "㒻"), + (0x2F9CB, "M", "𧢮"), + (0x2F9CC, "M", "𧥦"), + (0x2F9CD, "M", "䚾"), + (0x2F9CE, "M", "䛇"), + (0x2F9CF, "M", "誠"), + (0x2F9D0, "M", "諭"), + (0x2F9D1, "M", "變"), + (0x2F9D2, "M", "豕"), + (0x2F9D3, "M", "𧲨"), + (0x2F9D4, "M", "貫"), + (0x2F9D5, "M", "賁"), + (0x2F9D6, "M", "贛"), + (0x2F9D7, "M", "起"), + ] + + +def _seg_81() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x2F9D8, "M", "𧼯"), + (0x2F9D9, "M", "𠠄"), + (0x2F9DA, "M", "跋"), + (0x2F9DB, "M", "趼"), + (0x2F9DC, "M", "跰"), + (0x2F9DD, "M", "𠣞"), + (0x2F9DE, "M", "軔"), + (0x2F9DF, "M", "輸"), + (0x2F9E0, "M", "𨗒"), + (0x2F9E1, "M", "𨗭"), + (0x2F9E2, "M", "邔"), + (0x2F9E3, "M", "郱"), + (0x2F9E4, "M", "鄑"), + (0x2F9E5, "M", "𨜮"), + (0x2F9E6, "M", "鄛"), + (0x2F9E7, "M", "鈸"), + (0x2F9E8, "M", "鋗"), + (0x2F9E9, "M", "鋘"), + (0x2F9EA, "M", "鉼"), + (0x2F9EB, "M", "鏹"), + (0x2F9EC, "M", "鐕"), + (0x2F9ED, "M", "𨯺"), + (0x2F9EE, "M", "開"), + (0x2F9EF, "M", "䦕"), + (0x2F9F0, "M", "閷"), + (0x2F9F1, "M", "𨵷"), + (0x2F9F2, "M", "䧦"), + (0x2F9F3, "M", "雃"), + (0x2F9F4, "M", "嶲"), + (0x2F9F5, "M", "霣"), + (0x2F9F6, "M", "𩅅"), + (0x2F9F7, "M", "𩈚"), + (0x2F9F8, "M", "䩮"), + (0x2F9F9, "M", "䩶"), + (0x2F9FA, "M", "韠"), + (0x2F9FB, "M", "𩐊"), + (0x2F9FC, "M", "䪲"), + (0x2F9FD, "M", "𩒖"), + (0x2F9FE, "M", "頋"), + (0x2FA00, "M", "頩"), + (0x2FA01, "M", "𩖶"), + (0x2FA02, "M", "飢"), + (0x2FA03, "M", "䬳"), + (0x2FA04, "M", "餩"), + (0x2FA05, "M", "馧"), + (0x2FA06, "M", "駂"), + (0x2FA07, "M", "駾"), + (0x2FA08, "M", "䯎"), + (0x2FA09, "M", "𩬰"), + (0x2FA0A, "M", "鬒"), + (0x2FA0B, "M", "鱀"), + (0x2FA0C, "M", "鳽"), + (0x2FA0D, "M", "䳎"), + (0x2FA0E, "M", "䳭"), + (0x2FA0F, "M", "鵧"), + (0x2FA10, "M", "𪃎"), + (0x2FA11, "M", "䳸"), + (0x2FA12, "M", "𪄅"), + (0x2FA13, "M", "𪈎"), + (0x2FA14, "M", "𪊑"), + (0x2FA15, "M", "麻"), + (0x2FA16, "M", "䵖"), + (0x2FA17, "M", "黹"), + (0x2FA18, "M", "黾"), + (0x2FA19, "M", "鼅"), + (0x2FA1A, "M", "鼏"), + (0x2FA1B, "M", "鼖"), + (0x2FA1C, "M", "鼻"), + (0x2FA1D, "M", "𪘀"), + (0x2FA1E, "X"), + (0x30000, "V"), + (0x3134B, "X"), + (0x31350, "V"), + (0x323B0, "X"), + (0xE0100, "I"), + (0xE01F0, "X"), + ] + + +uts46data = tuple( + _seg_0() + + _seg_1() + + _seg_2() + + _seg_3() + + _seg_4() + + _seg_5() + + _seg_6() + + _seg_7() + + _seg_8() + + _seg_9() + + _seg_10() + + _seg_11() + + _seg_12() + + _seg_13() + + _seg_14() + + _seg_15() + + _seg_16() + + _seg_17() + + _seg_18() + + _seg_19() + + _seg_20() + + _seg_21() + + _seg_22() + + _seg_23() + + _seg_24() + + _seg_25() + + _seg_26() + + _seg_27() + + _seg_28() + + _seg_29() + + _seg_30() + + _seg_31() + + _seg_32() + + _seg_33() + + _seg_34() + + _seg_35() + + _seg_36() + + _seg_37() + + _seg_38() + + _seg_39() + + _seg_40() + + _seg_41() + + _seg_42() + + _seg_43() + + _seg_44() + + _seg_45() + + _seg_46() + + _seg_47() + + _seg_48() + + _seg_49() + + _seg_50() + + _seg_51() + + _seg_52() + + _seg_53() + + _seg_54() + + _seg_55() + + _seg_56() + + _seg_57() + + _seg_58() + + _seg_59() + + _seg_60() + + _seg_61() + + _seg_62() + + _seg_63() + + _seg_64() + + _seg_65() + + _seg_66() + + _seg_67() + + _seg_68() + + _seg_69() + + _seg_70() + + _seg_71() + + _seg_72() + + _seg_73() + + _seg_74() + + _seg_75() + + _seg_76() + + _seg_77() + + _seg_78() + + _seg_79() + + _seg_80() + + _seg_81() +) # type: Tuple[Union[Tuple[int, str], Tuple[int, str, str]], ...] diff --git a/.venv/lib/python3.12/site-packages/jsonschema-4.25.1.dist-info/INSTALLER b/.venv/lib/python3.12/site-packages/jsonschema-4.25.1.dist-info/INSTALLER new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/.venv/lib/python3.12/site-packages/jsonschema-4.25.1.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/.venv/lib/python3.12/site-packages/jsonschema-4.25.1.dist-info/METADATA b/.venv/lib/python3.12/site-packages/jsonschema-4.25.1.dist-info/METADATA new file mode 100644 index 0000000..3467022 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/jsonschema-4.25.1.dist-info/METADATA @@ -0,0 +1,170 @@ +Metadata-Version: 2.4 +Name: jsonschema +Version: 4.25.1 +Summary: An implementation of JSON Schema validation for Python +Project-URL: Homepage, https://github.com/python-jsonschema/jsonschema +Project-URL: Documentation, https://python-jsonschema.readthedocs.io/ +Project-URL: Issues, https://github.com/python-jsonschema/jsonschema/issues/ +Project-URL: Funding, https://github.com/sponsors/Julian +Project-URL: Tidelift, https://tidelift.com/subscription/pkg/pypi-jsonschema?utm_source=pypi-jsonschema&utm_medium=referral&utm_campaign=pypi-link +Project-URL: Changelog, https://github.com/python-jsonschema/jsonschema/blob/main/CHANGELOG.rst +Project-URL: Source, https://github.com/python-jsonschema/jsonschema +Author-email: Julian Berman +License-Expression: MIT +License-File: COPYING +Keywords: data validation,json,json schema,jsonschema,validation +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: 3.13 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Topic :: File Formats :: JSON +Classifier: Topic :: File Formats :: JSON :: JSON Schema +Requires-Python: >=3.9 +Requires-Dist: attrs>=22.2.0 +Requires-Dist: jsonschema-specifications>=2023.03.6 +Requires-Dist: referencing>=0.28.4 +Requires-Dist: rpds-py>=0.7.1 +Provides-Extra: format +Requires-Dist: fqdn; extra == 'format' +Requires-Dist: idna; extra == 'format' +Requires-Dist: isoduration; extra == 'format' +Requires-Dist: jsonpointer>1.13; extra == 'format' +Requires-Dist: rfc3339-validator; extra == 'format' +Requires-Dist: rfc3987; extra == 'format' +Requires-Dist: uri-template; extra == 'format' +Requires-Dist: webcolors>=1.11; extra == 'format' +Provides-Extra: format-nongpl +Requires-Dist: fqdn; extra == 'format-nongpl' +Requires-Dist: idna; extra == 'format-nongpl' +Requires-Dist: isoduration; extra == 'format-nongpl' +Requires-Dist: jsonpointer>1.13; extra == 'format-nongpl' +Requires-Dist: rfc3339-validator; extra == 'format-nongpl' +Requires-Dist: rfc3986-validator>0.1.0; extra == 'format-nongpl' +Requires-Dist: rfc3987-syntax>=1.1.0; extra == 'format-nongpl' +Requires-Dist: uri-template; extra == 'format-nongpl' +Requires-Dist: webcolors>=24.6.0; extra == 'format-nongpl' +Description-Content-Type: text/x-rst + +========== +jsonschema +========== + +|PyPI| |Pythons| |CI| |ReadTheDocs| |Precommit| |Zenodo| + +.. |PyPI| image:: https://img.shields.io/pypi/v/jsonschema.svg + :alt: PyPI version + :target: https://pypi.org/project/jsonschema/ + +.. |Pythons| image:: https://img.shields.io/pypi/pyversions/jsonschema.svg + :alt: Supported Python versions + :target: https://pypi.org/project/jsonschema/ + +.. |CI| image:: https://github.com/python-jsonschema/jsonschema/workflows/CI/badge.svg + :alt: Build status + :target: https://github.com/python-jsonschema/jsonschema/actions?query=workflow%3ACI + +.. |ReadTheDocs| image:: https://readthedocs.org/projects/python-jsonschema/badge/?version=stable&style=flat + :alt: ReadTheDocs status + :target: https://python-jsonschema.readthedocs.io/en/stable/ + +.. |Precommit| image:: https://results.pre-commit.ci/badge/github/python-jsonschema/jsonschema/main.svg + :alt: pre-commit.ci status + :target: https://results.pre-commit.ci/latest/github/python-jsonschema/jsonschema/main + +.. |Zenodo| image:: https://zenodo.org/badge/3072629.svg + :alt: Zenodo DOI + :target: https://zenodo.org/badge/latestdoi/3072629 + + +``jsonschema`` is an implementation of the `JSON Schema `_ specification for Python. + +.. code:: python + + >>> from jsonschema import validate + + >>> # A sample schema, like what we'd get from json.load() + >>> schema = { + ... "type" : "object", + ... "properties" : { + ... "price" : {"type" : "number"}, + ... "name" : {"type" : "string"}, + ... }, + ... } + + >>> # If no exception is raised by validate(), the instance is valid. + >>> validate(instance={"name" : "Eggs", "price" : 34.99}, schema=schema) + + >>> validate( + ... instance={"name" : "Eggs", "price" : "Invalid"}, schema=schema, + ... ) # doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ... + ValidationError: 'Invalid' is not of type 'number' + +It can also be used from the command line by installing `check-jsonschema `_. + +Features +-------- + +* Full support for `Draft 2020-12 `_, `Draft 2019-09 `_, `Draft 7 `_, `Draft 6 `_, `Draft 4 `_ and `Draft 3 `_ + +* `Lazy validation `_ that can iteratively report *all* validation errors. + +* `Programmatic querying `_ of which properties or items failed validation. + + +Installation +------------ + +``jsonschema`` is available on `PyPI `_. You can install using `pip `_: + +.. code:: bash + + $ pip install jsonschema + + +Extras +====== + +Two extras are available when installing the package, both currently related to ``format`` validation: + + * ``format`` + * ``format-nongpl`` + +They can be used when installing in order to include additional dependencies, e.g.: + +.. code:: bash + + $ pip install jsonschema'[format]' + +Be aware that the mere presence of these dependencies – or even the specification of ``format`` checks in a schema – do *not* activate format checks (as per the specification). +Please read the `format validation documentation `_ for further details. + +About +----- + +I'm Julian Berman. + +``jsonschema`` is on `GitHub `_. + +Get in touch, via GitHub or otherwise, if you've got something to contribute, it'd be most welcome! + +If you feel overwhelmingly grateful, you can also `sponsor me `_. + +And for companies who appreciate ``jsonschema`` and its continued support and growth, ``jsonschema`` is also now supportable via `TideLift `_. + + +Release Information +------------------- + +v4.25.1 +======= + +* Fix an incorrect required argument in the ``Validator`` protocol's type annotations (#1396). diff --git a/.venv/lib/python3.12/site-packages/jsonschema-4.25.1.dist-info/RECORD b/.venv/lib/python3.12/site-packages/jsonschema-4.25.1.dist-info/RECORD new file mode 100644 index 0000000..341cd76 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/jsonschema-4.25.1.dist-info/RECORD @@ -0,0 +1,81 @@ +../../../bin/jsonschema,sha256=DJQY3bO9sMvGrlhDoCiovD15LH8M5_zmP8Ax68vimEI,219 +jsonschema-4.25.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +jsonschema-4.25.1.dist-info/METADATA,sha256=Mfg8FXnkbhr2ImnO-l2DU07xVfPuIZFPeIG71US8Elw,7608 +jsonschema-4.25.1.dist-info/RECORD,, +jsonschema-4.25.1.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jsonschema-4.25.1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87 +jsonschema-4.25.1.dist-info/entry_points.txt,sha256=vO7rX4Fs_xIVJy2pnAtKgTSxfpnozAVQ0DjCmpMxnWE,51 +jsonschema-4.25.1.dist-info/licenses/COPYING,sha256=T5KgFaE8TRoEC-8BiqE0MLTxvHO0Gxa7hGw0Z2bedDk,1057 +jsonschema/__init__.py,sha256=p-Rw4TS_0OPHZIJyImDWsdWgmd6CPWHMXLq7BuQxTGc,3941 +jsonschema/__main__.py,sha256=iLsZf2upUB3ilBKTlMnyK-HHt2Cnnfkwwxi_c6gLvSA,115 +jsonschema/__pycache__/__init__.cpython-312.pyc,, +jsonschema/__pycache__/__main__.cpython-312.pyc,, +jsonschema/__pycache__/_format.cpython-312.pyc,, +jsonschema/__pycache__/_keywords.cpython-312.pyc,, +jsonschema/__pycache__/_legacy_keywords.cpython-312.pyc,, +jsonschema/__pycache__/_types.cpython-312.pyc,, +jsonschema/__pycache__/_typing.cpython-312.pyc,, +jsonschema/__pycache__/_utils.cpython-312.pyc,, +jsonschema/__pycache__/cli.cpython-312.pyc,, +jsonschema/__pycache__/exceptions.cpython-312.pyc,, +jsonschema/__pycache__/protocols.cpython-312.pyc,, +jsonschema/__pycache__/validators.cpython-312.pyc,, +jsonschema/_format.py,sha256=ip1N16CBBOb_8sM_iejyW0U5JY9kXoz3O_AFj1SHFl8,15581 +jsonschema/_keywords.py,sha256=r8_DrqAfn6QLwQnmXEggveiSU-UaIL2p2nuPINelfFc,14949 +jsonschema/_legacy_keywords.py,sha256=2tWuwRPWbYS7EAl8wBIC_rabGuv1J4dfYLqNEPpShhA,15191 +jsonschema/_types.py,sha256=0pYJG61cn_4ZWVnqyD24tax2QBMlnSPy0fcECCpASMk,5456 +jsonschema/_typing.py,sha256=hFfAEeFJ76LYAl_feuVa0gnHnV9VEq_UhjLJS-7axgY,630 +jsonschema/_utils.py,sha256=Xv6_wKKslBJlwyj9-j2c8JDFw-4z4aWFnVe2pX8h7U4,10659 +jsonschema/benchmarks/__init__.py,sha256=A0sQrxDBVHSyQ-8ru3L11hMXf3q9gVuB9x_YgHb4R9M,70 +jsonschema/benchmarks/__pycache__/__init__.cpython-312.pyc,, +jsonschema/benchmarks/__pycache__/const_vs_enum.cpython-312.pyc,, +jsonschema/benchmarks/__pycache__/contains.cpython-312.pyc,, +jsonschema/benchmarks/__pycache__/issue232.cpython-312.pyc,, +jsonschema/benchmarks/__pycache__/json_schema_test_suite.cpython-312.pyc,, +jsonschema/benchmarks/__pycache__/nested_schemas.cpython-312.pyc,, +jsonschema/benchmarks/__pycache__/subcomponents.cpython-312.pyc,, +jsonschema/benchmarks/__pycache__/unused_registry.cpython-312.pyc,, +jsonschema/benchmarks/__pycache__/useless_applicator_schemas.cpython-312.pyc,, +jsonschema/benchmarks/__pycache__/useless_keywords.cpython-312.pyc,, +jsonschema/benchmarks/__pycache__/validator_creation.cpython-312.pyc,, +jsonschema/benchmarks/const_vs_enum.py,sha256=DVFi3WDqBalZFOibnjpX1uTSr3Rxa2cPgFcowd7Ukrs,830 +jsonschema/benchmarks/contains.py,sha256=gexQoUrCOwECofbt19BeosQZ7WFL6PDdkX49DWwBlOg,786 +jsonschema/benchmarks/issue232.py,sha256=3LLYLIlBGQnVuyyo2iAv-xky5P6PRFHANx4-zIIQOoE,521 +jsonschema/benchmarks/issue232/issue.json,sha256=eaPOZjMRu5u8RpKrsA9uk7ucPZS5tkKG4D_hkOTQ3Hk,117105 +jsonschema/benchmarks/json_schema_test_suite.py,sha256=PvfabpUYcF4_7csYDTcTauED8rnFEGYbdY5RqTXD08s,320 +jsonschema/benchmarks/nested_schemas.py,sha256=mo07dx-CIgmSOI62CNs4g5xu1FzHklLBpkQoDxWYcKs,1892 +jsonschema/benchmarks/subcomponents.py,sha256=fEyiMzsWeK2pd7DEGCuuY-vzGunwhHczRBWEnBRLKIo,1113 +jsonschema/benchmarks/unused_registry.py,sha256=hwRwONc9cefPtYzkoX_TYRO3GyUojriv0-YQaK3vnj0,940 +jsonschema/benchmarks/useless_applicator_schemas.py,sha256=EVm5-EtOEFoLP_Vt2j4SrCwlx05NhPqNuZQ6LIMP1Dc,3342 +jsonschema/benchmarks/useless_keywords.py,sha256=bj_zKr1oVctFlqyZaObCsYTgFjiiNgPzC0hr1Y868mE,867 +jsonschema/benchmarks/validator_creation.py,sha256=UkUQlLAnussnr_KdCIdad6xx2pXxQLmYtsXoiirKeWQ,285 +jsonschema/cli.py,sha256=av90OtpSxuiko3FAyEHtxpI-NSuX3WMtoQpIx09obJY,8445 +jsonschema/exceptions.py,sha256=b42hUDOfPFcprI4ZlNpjDeLKv8k9vOhgWct2jyDlxzk,15256 +jsonschema/protocols.py,sha256=lgyryVHqFijSFRAz95ch3VN2Fz-oTFyhj9Obcy5P_CI,7198 +jsonschema/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jsonschema/tests/__pycache__/__init__.cpython-312.pyc,, +jsonschema/tests/__pycache__/_suite.cpython-312.pyc,, +jsonschema/tests/__pycache__/fuzz_validate.cpython-312.pyc,, +jsonschema/tests/__pycache__/test_cli.cpython-312.pyc,, +jsonschema/tests/__pycache__/test_deprecations.cpython-312.pyc,, +jsonschema/tests/__pycache__/test_exceptions.cpython-312.pyc,, +jsonschema/tests/__pycache__/test_format.cpython-312.pyc,, +jsonschema/tests/__pycache__/test_jsonschema_test_suite.cpython-312.pyc,, +jsonschema/tests/__pycache__/test_types.cpython-312.pyc,, +jsonschema/tests/__pycache__/test_utils.cpython-312.pyc,, +jsonschema/tests/__pycache__/test_validators.cpython-312.pyc,, +jsonschema/tests/_suite.py,sha256=2k0X91N7dOHhQc5mrYv40OKf1weioj6RMBqWgLT6-PI,8374 +jsonschema/tests/fuzz_validate.py,sha256=fUA7yTJIihaCwJplkUehZeyB84HcXEcqtY5oPJXIO7I,1114 +jsonschema/tests/test_cli.py,sha256=A89r5LOHy-peLPZA5YDkOaMTWqzQO_w2Tu8WFz_vphM,28544 +jsonschema/tests/test_deprecations.py,sha256=yG6mkRJHpTHbWoxpLC5y5H7fk8erGOs8f_9V4tCBEh8,15754 +jsonschema/tests/test_exceptions.py,sha256=lWTRyeSeOaFd5dnutqy1YG9uocnxeM_0cIEVG6GgGMI,24310 +jsonschema/tests/test_format.py,sha256=eVm5SMaWF2lOPO28bPAwNvkiQvHCQKy-MnuAgEchfEc,3188 +jsonschema/tests/test_jsonschema_test_suite.py,sha256=tAfxknM65OR9LyDPHu1pkEaombLgjRLnJ6FPiWPdxjg,8461 +jsonschema/tests/test_types.py,sha256=cF51KTDmdsx06MrIc4fXKt0X9fIsVgw5uhT8CamVa8U,6977 +jsonschema/tests/test_utils.py,sha256=sao74o1PyYMxBfqweokQN48CFSS6yhJk5FkCfMJ5PsI,4163 +jsonschema/tests/test_validators.py,sha256=eiaigsZMzHYYsniQ1UPygaS56a1d-_7-9NC4wVXAhzs,87975 +jsonschema/tests/typing/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jsonschema/tests/typing/__pycache__/__init__.cpython-312.pyc,, +jsonschema/tests/typing/__pycache__/test_all_concrete_validators_match_protocol.cpython-312.pyc,, +jsonschema/tests/typing/test_all_concrete_validators_match_protocol.py,sha256=I5XUl5ZYUSZo3APkF10l8Mnfk09oXKqvXWttGGd3sDk,1238 +jsonschema/validators.py,sha256=AI0bQrGJpvEH1RSO3ynwWcNvfkqN6WJPgyy0VN7WlSE,47147 diff --git a/.venv/lib/python3.12/site-packages/jsonschema-4.25.1.dist-info/REQUESTED b/.venv/lib/python3.12/site-packages/jsonschema-4.25.1.dist-info/REQUESTED new file mode 100644 index 0000000..e69de29 diff --git a/.venv/lib/python3.12/site-packages/jsonschema-4.25.1.dist-info/WHEEL b/.venv/lib/python3.12/site-packages/jsonschema-4.25.1.dist-info/WHEEL new file mode 100644 index 0000000..12228d4 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/jsonschema-4.25.1.dist-info/WHEEL @@ -0,0 +1,4 @@ +Wheel-Version: 1.0 +Generator: hatchling 1.27.0 +Root-Is-Purelib: true +Tag: py3-none-any diff --git a/.venv/lib/python3.12/site-packages/jsonschema-4.25.1.dist-info/entry_points.txt b/.venv/lib/python3.12/site-packages/jsonschema-4.25.1.dist-info/entry_points.txt new file mode 100644 index 0000000..eecef9d --- /dev/null +++ b/.venv/lib/python3.12/site-packages/jsonschema-4.25.1.dist-info/entry_points.txt @@ -0,0 +1,2 @@ +[console_scripts] +jsonschema = jsonschema.cli:main diff --git a/.venv/lib/python3.12/site-packages/jsonschema-4.25.1.dist-info/licenses/COPYING b/.venv/lib/python3.12/site-packages/jsonschema-4.25.1.dist-info/licenses/COPYING new file mode 100644 index 0000000..af9cfbd --- /dev/null +++ b/.venv/lib/python3.12/site-packages/jsonschema-4.25.1.dist-info/licenses/COPYING @@ -0,0 +1,19 @@ +Copyright (c) 2013 Julian Berman + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/.venv/lib/python3.12/site-packages/jsonschema/__init__.py b/.venv/lib/python3.12/site-packages/jsonschema/__init__.py new file mode 100644 index 0000000..d8dec8c --- /dev/null +++ b/.venv/lib/python3.12/site-packages/jsonschema/__init__.py @@ -0,0 +1,120 @@ +""" +An implementation of JSON Schema for Python. + +The main functionality is provided by the validator classes for each of the +supported JSON Schema versions. + +Most commonly, `jsonschema.validators.validate` is the quickest way to simply +validate a given instance under a schema, and will create a validator +for you. +""" +import warnings + +from jsonschema._format import FormatChecker +from jsonschema._types import TypeChecker +from jsonschema.exceptions import SchemaError, ValidationError +from jsonschema.validators import ( + Draft3Validator, + Draft4Validator, + Draft6Validator, + Draft7Validator, + Draft201909Validator, + Draft202012Validator, + validate, +) + + +def __getattr__(name): + if name == "__version__": + warnings.warn( + "Accessing jsonschema.__version__ is deprecated and will be " + "removed in a future release. Use importlib.metadata directly " + "to query for jsonschema's version.", + DeprecationWarning, + stacklevel=2, + ) + + from importlib import metadata + return metadata.version("jsonschema") + elif name == "RefResolver": + from jsonschema.validators import _RefResolver + warnings.warn( + _RefResolver._DEPRECATION_MESSAGE, + DeprecationWarning, + stacklevel=2, + ) + return _RefResolver + elif name == "ErrorTree": + warnings.warn( + "Importing ErrorTree directly from the jsonschema package " + "is deprecated and will become an ImportError. Import it from " + "jsonschema.exceptions instead.", + DeprecationWarning, + stacklevel=2, + ) + from jsonschema.exceptions import ErrorTree + return ErrorTree + elif name == "FormatError": + warnings.warn( + "Importing FormatError directly from the jsonschema package " + "is deprecated and will become an ImportError. Import it from " + "jsonschema.exceptions instead.", + DeprecationWarning, + stacklevel=2, + ) + from jsonschema.exceptions import FormatError + return FormatError + elif name == "Validator": + warnings.warn( + "Importing Validator directly from the jsonschema package " + "is deprecated and will become an ImportError. Import it from " + "jsonschema.protocols instead.", + DeprecationWarning, + stacklevel=2, + ) + from jsonschema.protocols import Validator + return Validator + elif name == "RefResolutionError": + from jsonschema.exceptions import _RefResolutionError + warnings.warn( + _RefResolutionError._DEPRECATION_MESSAGE, + DeprecationWarning, + stacklevel=2, + ) + return _RefResolutionError + + format_checkers = { + "draft3_format_checker": Draft3Validator, + "draft4_format_checker": Draft4Validator, + "draft6_format_checker": Draft6Validator, + "draft7_format_checker": Draft7Validator, + "draft201909_format_checker": Draft201909Validator, + "draft202012_format_checker": Draft202012Validator, + } + ValidatorForFormat = format_checkers.get(name) + if ValidatorForFormat is not None: + warnings.warn( + f"Accessing jsonschema.{name} is deprecated and will be " + "removed in a future release. Instead, use the FORMAT_CHECKER " + "attribute on the corresponding Validator.", + DeprecationWarning, + stacklevel=2, + ) + return ValidatorForFormat.FORMAT_CHECKER + + raise AttributeError(f"module {__name__} has no attribute {name}") + + +__all__ = [ + "Draft3Validator", + "Draft4Validator", + "Draft6Validator", + "Draft7Validator", + "Draft201909Validator", + "Draft202012Validator", + "FormatChecker", + "SchemaError", + "TypeChecker", + "ValidationError", + "validate", +] diff --git a/.venv/lib/python3.12/site-packages/jsonschema/__main__.py b/.venv/lib/python3.12/site-packages/jsonschema/__main__.py new file mode 100644 index 0000000..fb260ae --- /dev/null +++ b/.venv/lib/python3.12/site-packages/jsonschema/__main__.py @@ -0,0 +1,6 @@ +""" +The jsonschema CLI is now deprecated in favor of check-jsonschema. +""" +from jsonschema.cli import main + +main() diff --git a/.venv/lib/python3.12/site-packages/jsonschema/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/jsonschema/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..0ebb7e5 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/jsonschema/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/jsonschema/__pycache__/__main__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/jsonschema/__pycache__/__main__.cpython-312.pyc new file mode 100644 index 0000000..9955b0d Binary files /dev/null and b/.venv/lib/python3.12/site-packages/jsonschema/__pycache__/__main__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/jsonschema/__pycache__/_format.cpython-312.pyc b/.venv/lib/python3.12/site-packages/jsonschema/__pycache__/_format.cpython-312.pyc new file mode 100644 index 0000000..82c744c Binary files /dev/null and b/.venv/lib/python3.12/site-packages/jsonschema/__pycache__/_format.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/jsonschema/__pycache__/_keywords.cpython-312.pyc b/.venv/lib/python3.12/site-packages/jsonschema/__pycache__/_keywords.cpython-312.pyc new file mode 100644 index 0000000..bdd9317 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/jsonschema/__pycache__/_keywords.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/jsonschema/__pycache__/_legacy_keywords.cpython-312.pyc b/.venv/lib/python3.12/site-packages/jsonschema/__pycache__/_legacy_keywords.cpython-312.pyc new file mode 100644 index 0000000..98dd528 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/jsonschema/__pycache__/_legacy_keywords.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/jsonschema/__pycache__/_types.cpython-312.pyc b/.venv/lib/python3.12/site-packages/jsonschema/__pycache__/_types.cpython-312.pyc new file mode 100644 index 0000000..d4208fb Binary files /dev/null and b/.venv/lib/python3.12/site-packages/jsonschema/__pycache__/_types.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/jsonschema/__pycache__/_typing.cpython-312.pyc b/.venv/lib/python3.12/site-packages/jsonschema/__pycache__/_typing.cpython-312.pyc new file mode 100644 index 0000000..ed8e60e Binary files /dev/null and b/.venv/lib/python3.12/site-packages/jsonschema/__pycache__/_typing.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/jsonschema/__pycache__/_utils.cpython-312.pyc b/.venv/lib/python3.12/site-packages/jsonschema/__pycache__/_utils.cpython-312.pyc new file mode 100644 index 0000000..045565a Binary files /dev/null and b/.venv/lib/python3.12/site-packages/jsonschema/__pycache__/_utils.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/jsonschema/__pycache__/cli.cpython-312.pyc b/.venv/lib/python3.12/site-packages/jsonschema/__pycache__/cli.cpython-312.pyc new file mode 100644 index 0000000..0c547e7 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/jsonschema/__pycache__/cli.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/jsonschema/__pycache__/exceptions.cpython-312.pyc b/.venv/lib/python3.12/site-packages/jsonschema/__pycache__/exceptions.cpython-312.pyc new file mode 100644 index 0000000..65e2483 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/jsonschema/__pycache__/exceptions.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/jsonschema/__pycache__/protocols.cpython-312.pyc b/.venv/lib/python3.12/site-packages/jsonschema/__pycache__/protocols.cpython-312.pyc new file mode 100644 index 0000000..917600f Binary files /dev/null and b/.venv/lib/python3.12/site-packages/jsonschema/__pycache__/protocols.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/jsonschema/__pycache__/validators.cpython-312.pyc b/.venv/lib/python3.12/site-packages/jsonschema/__pycache__/validators.cpython-312.pyc new file mode 100644 index 0000000..66129b7 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/jsonschema/__pycache__/validators.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/jsonschema/_format.py b/.venv/lib/python3.12/site-packages/jsonschema/_format.py new file mode 100644 index 0000000..6fc7a01 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/jsonschema/_format.py @@ -0,0 +1,546 @@ +from __future__ import annotations + +from contextlib import suppress +from datetime import date, datetime +from uuid import UUID +import ipaddress +import re +import typing +import warnings + +from jsonschema.exceptions import FormatError + +_FormatCheckCallable = typing.Callable[[object], bool] +#: A format checker callable. +_F = typing.TypeVar("_F", bound=_FormatCheckCallable) +_RaisesType = typing.Union[type[Exception], tuple[type[Exception], ...]] + +_RE_DATE = re.compile(r"^\d{4}-\d{2}-\d{2}$", re.ASCII) + + +class FormatChecker: + """ + A ``format`` property checker. + + JSON Schema does not mandate that the ``format`` property actually do any + validation. If validation is desired however, instances of this class can + be hooked into validators to enable format validation. + + `FormatChecker` objects always return ``True`` when asked about + formats that they do not know how to validate. + + To add a check for a custom format use the `FormatChecker.checks` + decorator. + + Arguments: + + formats: + + The known formats to validate. This argument can be used to + limit which formats will be used during validation. + + """ + + checkers: dict[ + str, + tuple[_FormatCheckCallable, _RaisesType], + ] = {} # noqa: RUF012 + + def __init__(self, formats: typing.Iterable[str] | None = None): + if formats is None: + formats = self.checkers.keys() + self.checkers = {k: self.checkers[k] for k in formats} + + def __repr__(self): + return f"" + + def checks( + self, format: str, raises: _RaisesType = (), + ) -> typing.Callable[[_F], _F]: + """ + Register a decorated function as validating a new format. + + Arguments: + + format: + + The format that the decorated function will check. + + raises: + + The exception(s) raised by the decorated function when an + invalid instance is found. + + The exception object will be accessible as the + `jsonschema.exceptions.ValidationError.cause` attribute of the + resulting validation error. + + """ + + def _checks(func: _F) -> _F: + self.checkers[format] = (func, raises) + return func + + return _checks + + @classmethod + def cls_checks( + cls, format: str, raises: _RaisesType = (), + ) -> typing.Callable[[_F], _F]: + warnings.warn( + ( + "FormatChecker.cls_checks is deprecated. Call " + "FormatChecker.checks on a specific FormatChecker instance " + "instead." + ), + DeprecationWarning, + stacklevel=2, + ) + return cls._cls_checks(format=format, raises=raises) + + @classmethod + def _cls_checks( + cls, format: str, raises: _RaisesType = (), + ) -> typing.Callable[[_F], _F]: + def _checks(func: _F) -> _F: + cls.checkers[format] = (func, raises) + return func + + return _checks + + def check(self, instance: object, format: str) -> None: + """ + Check whether the instance conforms to the given format. + + Arguments: + + instance (*any primitive type*, i.e. str, number, bool): + + The instance to check + + format: + + The format that instance should conform to + + Raises: + + FormatError: + + if the instance does not conform to ``format`` + + """ + if format not in self.checkers: + return + + func, raises = self.checkers[format] + result, cause = None, None + try: + result = func(instance) + except raises as e: + cause = e + if not result: + raise FormatError(f"{instance!r} is not a {format!r}", cause=cause) + + def conforms(self, instance: object, format: str) -> bool: + """ + Check whether the instance conforms to the given format. + + Arguments: + + instance (*any primitive type*, i.e. str, number, bool): + + The instance to check + + format: + + The format that instance should conform to + + Returns: + + bool: whether it conformed + + """ + try: + self.check(instance, format) + except FormatError: + return False + else: + return True + + +draft3_format_checker = FormatChecker() +draft4_format_checker = FormatChecker() +draft6_format_checker = FormatChecker() +draft7_format_checker = FormatChecker() +draft201909_format_checker = FormatChecker() +draft202012_format_checker = FormatChecker() + +_draft_checkers: dict[str, FormatChecker] = dict( + draft3=draft3_format_checker, + draft4=draft4_format_checker, + draft6=draft6_format_checker, + draft7=draft7_format_checker, + draft201909=draft201909_format_checker, + draft202012=draft202012_format_checker, +) + + +def _checks_drafts( + name=None, + draft3=None, + draft4=None, + draft6=None, + draft7=None, + draft201909=None, + draft202012=None, + raises=(), +) -> typing.Callable[[_F], _F]: + draft3 = draft3 or name + draft4 = draft4 or name + draft6 = draft6 or name + draft7 = draft7 or name + draft201909 = draft201909 or name + draft202012 = draft202012 or name + + def wrap(func: _F) -> _F: + if draft3: + func = _draft_checkers["draft3"].checks(draft3, raises)(func) + if draft4: + func = _draft_checkers["draft4"].checks(draft4, raises)(func) + if draft6: + func = _draft_checkers["draft6"].checks(draft6, raises)(func) + if draft7: + func = _draft_checkers["draft7"].checks(draft7, raises)(func) + if draft201909: + func = _draft_checkers["draft201909"].checks(draft201909, raises)( + func, + ) + if draft202012: + func = _draft_checkers["draft202012"].checks(draft202012, raises)( + func, + ) + + # Oy. This is bad global state, but relied upon for now, until + # deprecation. See #519 and test_format_checkers_come_with_defaults + FormatChecker._cls_checks( + draft202012 or draft201909 or draft7 or draft6 or draft4 or draft3, + raises, + )(func) + return func + + return wrap + + +@_checks_drafts(name="idn-email") +@_checks_drafts(name="email") +def is_email(instance: object) -> bool: + if not isinstance(instance, str): + return True + return "@" in instance + + +@_checks_drafts( + draft3="ip-address", + draft4="ipv4", + draft6="ipv4", + draft7="ipv4", + draft201909="ipv4", + draft202012="ipv4", + raises=ipaddress.AddressValueError, +) +def is_ipv4(instance: object) -> bool: + if not isinstance(instance, str): + return True + return bool(ipaddress.IPv4Address(instance)) + + +@_checks_drafts(name="ipv6", raises=ipaddress.AddressValueError) +def is_ipv6(instance: object) -> bool: + if not isinstance(instance, str): + return True + address = ipaddress.IPv6Address(instance) + return not getattr(address, "scope_id", "") + + +with suppress(ImportError): + from fqdn import FQDN + + @_checks_drafts( + draft3="host-name", + draft4="hostname", + draft6="hostname", + draft7="hostname", + draft201909="hostname", + draft202012="hostname", + # fqdn.FQDN("") raises a ValueError due to a bug + # however, it's not clear when or if that will be fixed, so catch it + # here for now + raises=ValueError, + ) + def is_host_name(instance: object) -> bool: + if not isinstance(instance, str): + return True + return FQDN(instance, min_labels=1).is_valid + + +with suppress(ImportError): + # The built-in `idna` codec only implements RFC 3890, so we go elsewhere. + import idna + + @_checks_drafts( + draft7="idn-hostname", + draft201909="idn-hostname", + draft202012="idn-hostname", + raises=(idna.IDNAError, UnicodeError), + ) + def is_idn_host_name(instance: object) -> bool: + if not isinstance(instance, str): + return True + idna.encode(instance) + return True + + +try: + import rfc3987 +except ImportError: + with suppress(ImportError): + from rfc3986_validator import validate_rfc3986 + + @_checks_drafts(name="uri") + def is_uri(instance: object) -> bool: + if not isinstance(instance, str): + return True + return validate_rfc3986(instance, rule="URI") + + @_checks_drafts( + draft6="uri-reference", + draft7="uri-reference", + draft201909="uri-reference", + draft202012="uri-reference", + raises=ValueError, + ) + def is_uri_reference(instance: object) -> bool: + if not isinstance(instance, str): + return True + return validate_rfc3986(instance, rule="URI_reference") + + with suppress(ImportError): + from rfc3987_syntax import is_valid_syntax as _rfc3987_is_valid_syntax + + @_checks_drafts( + draft7="iri", + draft201909="iri", + draft202012="iri", + raises=ValueError, + ) + def is_iri(instance: object) -> bool: + if not isinstance(instance, str): + return True + return _rfc3987_is_valid_syntax("iri", instance) + + @_checks_drafts( + draft7="iri-reference", + draft201909="iri-reference", + draft202012="iri-reference", + raises=ValueError, + ) + def is_iri_reference(instance: object) -> bool: + if not isinstance(instance, str): + return True + return _rfc3987_is_valid_syntax("iri_reference", instance) + +else: + + @_checks_drafts( + draft7="iri", + draft201909="iri", + draft202012="iri", + raises=ValueError, + ) + def is_iri(instance: object) -> bool: + if not isinstance(instance, str): + return True + return rfc3987.parse(instance, rule="IRI") + + @_checks_drafts( + draft7="iri-reference", + draft201909="iri-reference", + draft202012="iri-reference", + raises=ValueError, + ) + def is_iri_reference(instance: object) -> bool: + if not isinstance(instance, str): + return True + return rfc3987.parse(instance, rule="IRI_reference") + + @_checks_drafts(name="uri", raises=ValueError) + def is_uri(instance: object) -> bool: + if not isinstance(instance, str): + return True + return rfc3987.parse(instance, rule="URI") + + @_checks_drafts( + draft6="uri-reference", + draft7="uri-reference", + draft201909="uri-reference", + draft202012="uri-reference", + raises=ValueError, + ) + def is_uri_reference(instance: object) -> bool: + if not isinstance(instance, str): + return True + return rfc3987.parse(instance, rule="URI_reference") + + +with suppress(ImportError): + from rfc3339_validator import validate_rfc3339 + + @_checks_drafts(name="date-time") + def is_datetime(instance: object) -> bool: + if not isinstance(instance, str): + return True + return validate_rfc3339(instance.upper()) + + @_checks_drafts( + draft7="time", + draft201909="time", + draft202012="time", + ) + def is_time(instance: object) -> bool: + if not isinstance(instance, str): + return True + return is_datetime("1970-01-01T" + instance) + + +@_checks_drafts(name="regex", raises=re.error) +def is_regex(instance: object) -> bool: + if not isinstance(instance, str): + return True + return bool(re.compile(instance)) + + +@_checks_drafts( + draft3="date", + draft7="date", + draft201909="date", + draft202012="date", + raises=ValueError, +) +def is_date(instance: object) -> bool: + if not isinstance(instance, str): + return True + return bool(_RE_DATE.fullmatch(instance) and date.fromisoformat(instance)) + + +@_checks_drafts(draft3="time", raises=ValueError) +def is_draft3_time(instance: object) -> bool: + if not isinstance(instance, str): + return True + return bool(datetime.strptime(instance, "%H:%M:%S")) # noqa: DTZ007 + + +with suppress(ImportError): + import webcolors + + @_checks_drafts(draft3="color", raises=(ValueError, TypeError)) + def is_css21_color(instance: object) -> bool: + if isinstance(instance, str): + try: + webcolors.name_to_hex(instance) + except ValueError: + webcolors.normalize_hex(instance.lower()) + return True + + +with suppress(ImportError): + import jsonpointer + + @_checks_drafts( + draft6="json-pointer", + draft7="json-pointer", + draft201909="json-pointer", + draft202012="json-pointer", + raises=jsonpointer.JsonPointerException, + ) + def is_json_pointer(instance: object) -> bool: + if not isinstance(instance, str): + return True + return bool(jsonpointer.JsonPointer(instance)) + + # TODO: I don't want to maintain this, so it + # needs to go either into jsonpointer (pending + # https://github.com/stefankoegl/python-json-pointer/issues/34) or + # into a new external library. + @_checks_drafts( + draft7="relative-json-pointer", + draft201909="relative-json-pointer", + draft202012="relative-json-pointer", + raises=jsonpointer.JsonPointerException, + ) + def is_relative_json_pointer(instance: object) -> bool: + # Definition taken from: + # https://tools.ietf.org/html/draft-handrews-relative-json-pointer-01#section-3 + if not isinstance(instance, str): + return True + if not instance: + return False + + non_negative_integer, rest = [], "" + for i, character in enumerate(instance): + if character.isdigit(): + # digits with a leading "0" are not allowed + if i > 0 and int(instance[i - 1]) == 0: + return False + + non_negative_integer.append(character) + continue + + if not non_negative_integer: + return False + + rest = instance[i:] + break + return (rest == "#") or bool(jsonpointer.JsonPointer(rest)) + + +with suppress(ImportError): + import uri_template + + @_checks_drafts( + draft6="uri-template", + draft7="uri-template", + draft201909="uri-template", + draft202012="uri-template", + ) + def is_uri_template(instance: object) -> bool: + if not isinstance(instance, str): + return True + return uri_template.validate(instance) + + +with suppress(ImportError): + import isoduration + + @_checks_drafts( + draft201909="duration", + draft202012="duration", + raises=isoduration.DurationParsingException, + ) + def is_duration(instance: object) -> bool: + if not isinstance(instance, str): + return True + isoduration.parse_duration(instance) + # FIXME: See bolsote/isoduration#25 and bolsote/isoduration#21 + return instance.endswith(tuple("DMYWHMS")) + + +@_checks_drafts( + draft201909="uuid", + draft202012="uuid", + raises=ValueError, +) +def is_uuid(instance: object) -> bool: + if not isinstance(instance, str): + return True + UUID(instance) + return all(instance[position] == "-" for position in (8, 13, 18, 23)) diff --git a/.venv/lib/python3.12/site-packages/jsonschema/_keywords.py b/.venv/lib/python3.12/site-packages/jsonschema/_keywords.py new file mode 100644 index 0000000..f30f954 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/jsonschema/_keywords.py @@ -0,0 +1,449 @@ +from fractions import Fraction +import re + +from jsonschema._utils import ( + ensure_list, + equal, + extras_msg, + find_additional_properties, + find_evaluated_item_indexes_by_schema, + find_evaluated_property_keys_by_schema, + uniq, +) +from jsonschema.exceptions import FormatError, ValidationError + + +def patternProperties(validator, patternProperties, instance, schema): + if not validator.is_type(instance, "object"): + return + + for pattern, subschema in patternProperties.items(): + for k, v in instance.items(): + if re.search(pattern, k): + yield from validator.descend( + v, subschema, path=k, schema_path=pattern, + ) + + +def propertyNames(validator, propertyNames, instance, schema): + if not validator.is_type(instance, "object"): + return + + for property in instance: + yield from validator.descend(instance=property, schema=propertyNames) + + +def additionalProperties(validator, aP, instance, schema): + if not validator.is_type(instance, "object"): + return + + extras = set(find_additional_properties(instance, schema)) + + if validator.is_type(aP, "object"): + for extra in extras: + yield from validator.descend(instance[extra], aP, path=extra) + elif not aP and extras: + if "patternProperties" in schema: + verb = "does" if len(extras) == 1 else "do" + joined = ", ".join(repr(each) for each in sorted(extras)) + patterns = ", ".join( + repr(each) for each in sorted(schema["patternProperties"]) + ) + error = f"{joined} {verb} not match any of the regexes: {patterns}" + yield ValidationError(error) + else: + error = "Additional properties are not allowed (%s %s unexpected)" + yield ValidationError(error % extras_msg(sorted(extras, key=str))) + + +def items(validator, items, instance, schema): + if not validator.is_type(instance, "array"): + return + + prefix = len(schema.get("prefixItems", [])) + total = len(instance) + extra = total - prefix + if extra <= 0: + return + + if items is False: + rest = instance[prefix:] if extra != 1 else instance[prefix] + item = "items" if prefix != 1 else "item" + yield ValidationError( + f"Expected at most {prefix} {item} but found {extra} " + f"extra: {rest!r}", + ) + else: + for index in range(prefix, total): + yield from validator.descend( + instance=instance[index], + schema=items, + path=index, + ) + + +def const(validator, const, instance, schema): + if not equal(instance, const): + yield ValidationError(f"{const!r} was expected") + + +def contains(validator, contains, instance, schema): + if not validator.is_type(instance, "array"): + return + + matches = 0 + min_contains = schema.get("minContains", 1) + max_contains = schema.get("maxContains", len(instance)) + + contains_validator = validator.evolve(schema=contains) + + for each in instance: + if contains_validator.is_valid(each): + matches += 1 + if matches > max_contains: + yield ValidationError( + "Too many items match the given schema " + f"(expected at most {max_contains})", + validator="maxContains", + validator_value=max_contains, + ) + return + + if matches < min_contains: + if not matches: + yield ValidationError( + f"{instance!r} does not contain items " + "matching the given schema", + ) + else: + yield ValidationError( + "Too few items match the given schema (expected at least " + f"{min_contains} but only {matches} matched)", + validator="minContains", + validator_value=min_contains, + ) + + +def exclusiveMinimum(validator, minimum, instance, schema): + if not validator.is_type(instance, "number"): + return + + if instance <= minimum: + yield ValidationError( + f"{instance!r} is less than or equal to " + f"the minimum of {minimum!r}", + ) + + +def exclusiveMaximum(validator, maximum, instance, schema): + if not validator.is_type(instance, "number"): + return + + if instance >= maximum: + yield ValidationError( + f"{instance!r} is greater than or equal " + f"to the maximum of {maximum!r}", + ) + + +def minimum(validator, minimum, instance, schema): + if not validator.is_type(instance, "number"): + return + + if instance < minimum: + message = f"{instance!r} is less than the minimum of {minimum!r}" + yield ValidationError(message) + + +def maximum(validator, maximum, instance, schema): + if not validator.is_type(instance, "number"): + return + + if instance > maximum: + message = f"{instance!r} is greater than the maximum of {maximum!r}" + yield ValidationError(message) + + +def multipleOf(validator, dB, instance, schema): + if not validator.is_type(instance, "number"): + return + + if isinstance(dB, float): + quotient = instance / dB + try: + failed = int(quotient) != quotient + except OverflowError: + # When `instance` is large and `dB` is less than one, + # quotient can overflow to infinity; and then casting to int + # raises an error. + # + # In this case we fall back to Fraction logic, which is + # exact and cannot overflow. The performance is also + # acceptable: we try the fast all-float option first, and + # we know that fraction(dB) can have at most a few hundred + # digits in each part. The worst-case slowdown is therefore + # for already-slow enormous integers or Decimals. + failed = (Fraction(instance) / Fraction(dB)).denominator != 1 + else: + failed = instance % dB + + if failed: + yield ValidationError(f"{instance!r} is not a multiple of {dB}") + + +def minItems(validator, mI, instance, schema): + if validator.is_type(instance, "array") and len(instance) < mI: + message = "should be non-empty" if mI == 1 else "is too short" + yield ValidationError(f"{instance!r} {message}") + + +def maxItems(validator, mI, instance, schema): + if validator.is_type(instance, "array") and len(instance) > mI: + message = "is expected to be empty" if mI == 0 else "is too long" + yield ValidationError(f"{instance!r} {message}") + + +def uniqueItems(validator, uI, instance, schema): + if ( + uI + and validator.is_type(instance, "array") + and not uniq(instance) + ): + yield ValidationError(f"{instance!r} has non-unique elements") + + +def pattern(validator, patrn, instance, schema): + if ( + validator.is_type(instance, "string") + and not re.search(patrn, instance) + ): + yield ValidationError(f"{instance!r} does not match {patrn!r}") + + +def format(validator, format, instance, schema): + if validator.format_checker is not None: + try: + validator.format_checker.check(instance, format) + except FormatError as error: + yield ValidationError(error.message, cause=error.cause) + + +def minLength(validator, mL, instance, schema): + if validator.is_type(instance, "string") and len(instance) < mL: + message = "should be non-empty" if mL == 1 else "is too short" + yield ValidationError(f"{instance!r} {message}") + + +def maxLength(validator, mL, instance, schema): + if validator.is_type(instance, "string") and len(instance) > mL: + message = "is expected to be empty" if mL == 0 else "is too long" + yield ValidationError(f"{instance!r} {message}") + + +def dependentRequired(validator, dependentRequired, instance, schema): + if not validator.is_type(instance, "object"): + return + + for property, dependency in dependentRequired.items(): + if property not in instance: + continue + + for each in dependency: + if each not in instance: + message = f"{each!r} is a dependency of {property!r}" + yield ValidationError(message) + + +def dependentSchemas(validator, dependentSchemas, instance, schema): + if not validator.is_type(instance, "object"): + return + + for property, dependency in dependentSchemas.items(): + if property not in instance: + continue + yield from validator.descend( + instance, dependency, schema_path=property, + ) + + +def enum(validator, enums, instance, schema): + if all(not equal(each, instance) for each in enums): + yield ValidationError(f"{instance!r} is not one of {enums!r}") + + +def ref(validator, ref, instance, schema): + yield from validator._validate_reference(ref=ref, instance=instance) + + +def dynamicRef(validator, dynamicRef, instance, schema): + yield from validator._validate_reference(ref=dynamicRef, instance=instance) + + +def type(validator, types, instance, schema): + types = ensure_list(types) + + if not any(validator.is_type(instance, type) for type in types): + reprs = ", ".join(repr(type) for type in types) + yield ValidationError(f"{instance!r} is not of type {reprs}") + + +def properties(validator, properties, instance, schema): + if not validator.is_type(instance, "object"): + return + + for property, subschema in properties.items(): + if property in instance: + yield from validator.descend( + instance[property], + subschema, + path=property, + schema_path=property, + ) + + +def required(validator, required, instance, schema): + if not validator.is_type(instance, "object"): + return + for property in required: + if property not in instance: + yield ValidationError(f"{property!r} is a required property") + + +def minProperties(validator, mP, instance, schema): + if validator.is_type(instance, "object") and len(instance) < mP: + message = ( + "should be non-empty" if mP == 1 + else "does not have enough properties" + ) + yield ValidationError(f"{instance!r} {message}") + + +def maxProperties(validator, mP, instance, schema): + if not validator.is_type(instance, "object"): + return + if validator.is_type(instance, "object") and len(instance) > mP: + message = ( + "is expected to be empty" if mP == 0 + else "has too many properties" + ) + yield ValidationError(f"{instance!r} {message}") + + +def allOf(validator, allOf, instance, schema): + for index, subschema in enumerate(allOf): + yield from validator.descend(instance, subschema, schema_path=index) + + +def anyOf(validator, anyOf, instance, schema): + all_errors = [] + for index, subschema in enumerate(anyOf): + errs = list(validator.descend(instance, subschema, schema_path=index)) + if not errs: + break + all_errors.extend(errs) + else: + yield ValidationError( + f"{instance!r} is not valid under any of the given schemas", + context=all_errors, + ) + + +def oneOf(validator, oneOf, instance, schema): + subschemas = enumerate(oneOf) + all_errors = [] + for index, subschema in subschemas: + errs = list(validator.descend(instance, subschema, schema_path=index)) + if not errs: + first_valid = subschema + break + all_errors.extend(errs) + else: + yield ValidationError( + f"{instance!r} is not valid under any of the given schemas", + context=all_errors, + ) + + more_valid = [ + each for _, each in subschemas + if validator.evolve(schema=each).is_valid(instance) + ] + if more_valid: + more_valid.append(first_valid) + reprs = ", ".join(repr(schema) for schema in more_valid) + yield ValidationError(f"{instance!r} is valid under each of {reprs}") + + +def not_(validator, not_schema, instance, schema): + if validator.evolve(schema=not_schema).is_valid(instance): + message = f"{instance!r} should not be valid under {not_schema!r}" + yield ValidationError(message) + + +def if_(validator, if_schema, instance, schema): + if validator.evolve(schema=if_schema).is_valid(instance): + if "then" in schema: + then = schema["then"] + yield from validator.descend(instance, then, schema_path="then") + elif "else" in schema: + else_ = schema["else"] + yield from validator.descend(instance, else_, schema_path="else") + + +def unevaluatedItems(validator, unevaluatedItems, instance, schema): + if not validator.is_type(instance, "array"): + return + evaluated_item_indexes = find_evaluated_item_indexes_by_schema( + validator, instance, schema, + ) + unevaluated_items = [ + item for index, item in enumerate(instance) + if index not in evaluated_item_indexes + ] + if unevaluated_items: + error = "Unevaluated items are not allowed (%s %s unexpected)" + yield ValidationError(error % extras_msg(unevaluated_items)) + + +def unevaluatedProperties(validator, unevaluatedProperties, instance, schema): + if not validator.is_type(instance, "object"): + return + evaluated_keys = find_evaluated_property_keys_by_schema( + validator, instance, schema, + ) + unevaluated_keys = [] + for property in instance: + if property not in evaluated_keys: + for _ in validator.descend( + instance[property], + unevaluatedProperties, + path=property, + schema_path=property, + ): + # FIXME: Include context for each unevaluated property + # indicating why it's invalid under the subschema. + unevaluated_keys.append(property) # noqa: PERF401 + + if unevaluated_keys: + if unevaluatedProperties is False: + error = "Unevaluated properties are not allowed (%s %s unexpected)" + extras = sorted(unevaluated_keys, key=str) + yield ValidationError(error % extras_msg(extras)) + else: + error = ( + "Unevaluated properties are not valid under " + "the given schema (%s %s unevaluated and invalid)" + ) + yield ValidationError(error % extras_msg(unevaluated_keys)) + + +def prefixItems(validator, prefixItems, instance, schema): + if not validator.is_type(instance, "array"): + return + + for (index, item), subschema in zip(enumerate(instance), prefixItems): + yield from validator.descend( + instance=item, + schema=subschema, + schema_path=index, + path=index, + ) diff --git a/.venv/lib/python3.12/site-packages/jsonschema/_legacy_keywords.py b/.venv/lib/python3.12/site-packages/jsonschema/_legacy_keywords.py new file mode 100644 index 0000000..c691589 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/jsonschema/_legacy_keywords.py @@ -0,0 +1,449 @@ +import re + +from referencing.jsonschema import lookup_recursive_ref + +from jsonschema import _utils +from jsonschema.exceptions import ValidationError + + +def ignore_ref_siblings(schema): + """ + Ignore siblings of ``$ref`` if it is present. + + Otherwise, return all keywords. + + Suitable for use with `create`'s ``applicable_validators`` argument. + """ + ref = schema.get("$ref") + if ref is not None: + return [("$ref", ref)] + else: + return schema.items() + + +def dependencies_draft3(validator, dependencies, instance, schema): + if not validator.is_type(instance, "object"): + return + + for property, dependency in dependencies.items(): + if property not in instance: + continue + + if validator.is_type(dependency, "object"): + yield from validator.descend( + instance, dependency, schema_path=property, + ) + elif validator.is_type(dependency, "string"): + if dependency not in instance: + message = f"{dependency!r} is a dependency of {property!r}" + yield ValidationError(message) + else: + for each in dependency: + if each not in instance: + message = f"{each!r} is a dependency of {property!r}" + yield ValidationError(message) + + +def dependencies_draft4_draft6_draft7( + validator, + dependencies, + instance, + schema, +): + """ + Support for the ``dependencies`` keyword from pre-draft 2019-09. + + In later drafts, the keyword was split into separate + ``dependentRequired`` and ``dependentSchemas`` validators. + """ + if not validator.is_type(instance, "object"): + return + + for property, dependency in dependencies.items(): + if property not in instance: + continue + + if validator.is_type(dependency, "array"): + for each in dependency: + if each not in instance: + message = f"{each!r} is a dependency of {property!r}" + yield ValidationError(message) + else: + yield from validator.descend( + instance, dependency, schema_path=property, + ) + + +def disallow_draft3(validator, disallow, instance, schema): + for disallowed in _utils.ensure_list(disallow): + if validator.evolve(schema={"type": [disallowed]}).is_valid(instance): + message = f"{disallowed!r} is disallowed for {instance!r}" + yield ValidationError(message) + + +def extends_draft3(validator, extends, instance, schema): + if validator.is_type(extends, "object"): + yield from validator.descend(instance, extends) + return + for index, subschema in enumerate(extends): + yield from validator.descend(instance, subschema, schema_path=index) + + +def items_draft3_draft4(validator, items, instance, schema): + if not validator.is_type(instance, "array"): + return + + if validator.is_type(items, "object"): + for index, item in enumerate(instance): + yield from validator.descend(item, items, path=index) + else: + for (index, item), subschema in zip(enumerate(instance), items): + yield from validator.descend( + item, subschema, path=index, schema_path=index, + ) + + +def additionalItems(validator, aI, instance, schema): + if ( + not validator.is_type(instance, "array") + or validator.is_type(schema.get("items", {}), "object") + ): + return + + len_items = len(schema.get("items", [])) + if validator.is_type(aI, "object"): + for index, item in enumerate(instance[len_items:], start=len_items): + yield from validator.descend(item, aI, path=index) + elif not aI and len(instance) > len(schema.get("items", [])): + error = "Additional items are not allowed (%s %s unexpected)" + yield ValidationError( + error % _utils.extras_msg(instance[len(schema.get("items", [])):]), + ) + + +def items_draft6_draft7_draft201909(validator, items, instance, schema): + if not validator.is_type(instance, "array"): + return + + if validator.is_type(items, "array"): + for (index, item), subschema in zip(enumerate(instance), items): + yield from validator.descend( + item, subschema, path=index, schema_path=index, + ) + else: + for index, item in enumerate(instance): + yield from validator.descend(item, items, path=index) + + +def minimum_draft3_draft4(validator, minimum, instance, schema): + if not validator.is_type(instance, "number"): + return + + if schema.get("exclusiveMinimum", False): + failed = instance <= minimum + cmp = "less than or equal to" + else: + failed = instance < minimum + cmp = "less than" + + if failed: + message = f"{instance!r} is {cmp} the minimum of {minimum!r}" + yield ValidationError(message) + + +def maximum_draft3_draft4(validator, maximum, instance, schema): + if not validator.is_type(instance, "number"): + return + + if schema.get("exclusiveMaximum", False): + failed = instance >= maximum + cmp = "greater than or equal to" + else: + failed = instance > maximum + cmp = "greater than" + + if failed: + message = f"{instance!r} is {cmp} the maximum of {maximum!r}" + yield ValidationError(message) + + +def properties_draft3(validator, properties, instance, schema): + if not validator.is_type(instance, "object"): + return + + for property, subschema in properties.items(): + if property in instance: + yield from validator.descend( + instance[property], + subschema, + path=property, + schema_path=property, + ) + elif subschema.get("required", False): + error = ValidationError(f"{property!r} is a required property") + error._set( + validator="required", + validator_value=subschema["required"], + instance=instance, + schema=schema, + ) + error.path.appendleft(property) + error.schema_path.extend([property, "required"]) + yield error + + +def type_draft3(validator, types, instance, schema): + types = _utils.ensure_list(types) + + all_errors = [] + for index, type in enumerate(types): + if validator.is_type(type, "object"): + errors = list(validator.descend(instance, type, schema_path=index)) + if not errors: + return + all_errors.extend(errors) + elif validator.is_type(instance, type): + return + + reprs = [] + for type in types: + try: + reprs.append(repr(type["name"])) + except Exception: # noqa: BLE001 + reprs.append(repr(type)) + yield ValidationError( + f"{instance!r} is not of type {', '.join(reprs)}", + context=all_errors, + ) + + +def contains_draft6_draft7(validator, contains, instance, schema): + if not validator.is_type(instance, "array"): + return + + if not any( + validator.evolve(schema=contains).is_valid(element) + for element in instance + ): + yield ValidationError( + f"None of {instance!r} are valid under the given schema", + ) + + +def recursiveRef(validator, recursiveRef, instance, schema): + resolved = lookup_recursive_ref(validator._resolver) + yield from validator.descend( + instance, + resolved.contents, + resolver=resolved.resolver, + ) + + +def find_evaluated_item_indexes_by_schema(validator, instance, schema): + """ + Get all indexes of items that get evaluated under the current schema. + + Covers all keywords related to unevaluatedItems: items, prefixItems, if, + then, else, contains, unevaluatedItems, allOf, oneOf, anyOf + """ + if validator.is_type(schema, "boolean"): + return [] + evaluated_indexes = [] + + ref = schema.get("$ref") + if ref is not None: + resolved = validator._resolver.lookup(ref) + evaluated_indexes.extend( + find_evaluated_item_indexes_by_schema( + validator.evolve( + schema=resolved.contents, + _resolver=resolved.resolver, + ), + instance, + resolved.contents, + ), + ) + + if "$recursiveRef" in schema: + resolved = lookup_recursive_ref(validator._resolver) + evaluated_indexes.extend( + find_evaluated_item_indexes_by_schema( + validator.evolve( + schema=resolved.contents, + _resolver=resolved.resolver, + ), + instance, + resolved.contents, + ), + ) + + if "items" in schema: + if "additionalItems" in schema: + return list(range(len(instance))) + + if validator.is_type(schema["items"], "object"): + return list(range(len(instance))) + evaluated_indexes += list(range(len(schema["items"]))) + + if "if" in schema: + if validator.evolve(schema=schema["if"]).is_valid(instance): + evaluated_indexes += find_evaluated_item_indexes_by_schema( + validator, instance, schema["if"], + ) + if "then" in schema: + evaluated_indexes += find_evaluated_item_indexes_by_schema( + validator, instance, schema["then"], + ) + elif "else" in schema: + evaluated_indexes += find_evaluated_item_indexes_by_schema( + validator, instance, schema["else"], + ) + + for keyword in ["contains", "unevaluatedItems"]: + if keyword in schema: + for k, v in enumerate(instance): + if validator.evolve(schema=schema[keyword]).is_valid(v): + evaluated_indexes.append(k) + + for keyword in ["allOf", "oneOf", "anyOf"]: + if keyword in schema: + for subschema in schema[keyword]: + errs = next(validator.descend(instance, subschema), None) + if errs is None: + evaluated_indexes += find_evaluated_item_indexes_by_schema( + validator, instance, subschema, + ) + + return evaluated_indexes + + +def unevaluatedItems_draft2019(validator, unevaluatedItems, instance, schema): + if not validator.is_type(instance, "array"): + return + evaluated_item_indexes = find_evaluated_item_indexes_by_schema( + validator, instance, schema, + ) + unevaluated_items = [ + item for index, item in enumerate(instance) + if index not in evaluated_item_indexes + ] + if unevaluated_items: + error = "Unevaluated items are not allowed (%s %s unexpected)" + yield ValidationError(error % _utils.extras_msg(unevaluated_items)) + + +def find_evaluated_property_keys_by_schema(validator, instance, schema): + if validator.is_type(schema, "boolean"): + return [] + evaluated_keys = [] + + ref = schema.get("$ref") + if ref is not None: + resolved = validator._resolver.lookup(ref) + evaluated_keys.extend( + find_evaluated_property_keys_by_schema( + validator.evolve( + schema=resolved.contents, + _resolver=resolved.resolver, + ), + instance, + resolved.contents, + ), + ) + + if "$recursiveRef" in schema: + resolved = lookup_recursive_ref(validator._resolver) + evaluated_keys.extend( + find_evaluated_property_keys_by_schema( + validator.evolve( + schema=resolved.contents, + _resolver=resolved.resolver, + ), + instance, + resolved.contents, + ), + ) + + for keyword in [ + "properties", "additionalProperties", "unevaluatedProperties", + ]: + if keyword in schema: + schema_value = schema[keyword] + if validator.is_type(schema_value, "boolean") and schema_value: + evaluated_keys += instance.keys() + + elif validator.is_type(schema_value, "object"): + for property in schema_value: + if property in instance: + evaluated_keys.append(property) + + if "patternProperties" in schema: + for property in instance: + for pattern in schema["patternProperties"]: + if re.search(pattern, property): + evaluated_keys.append(property) + + if "dependentSchemas" in schema: + for property, subschema in schema["dependentSchemas"].items(): + if property not in instance: + continue + evaluated_keys += find_evaluated_property_keys_by_schema( + validator, instance, subschema, + ) + + for keyword in ["allOf", "oneOf", "anyOf"]: + if keyword in schema: + for subschema in schema[keyword]: + errs = next(validator.descend(instance, subschema), None) + if errs is None: + evaluated_keys += find_evaluated_property_keys_by_schema( + validator, instance, subschema, + ) + + if "if" in schema: + if validator.evolve(schema=schema["if"]).is_valid(instance): + evaluated_keys += find_evaluated_property_keys_by_schema( + validator, instance, schema["if"], + ) + if "then" in schema: + evaluated_keys += find_evaluated_property_keys_by_schema( + validator, instance, schema["then"], + ) + elif "else" in schema: + evaluated_keys += find_evaluated_property_keys_by_schema( + validator, instance, schema["else"], + ) + + return evaluated_keys + + +def unevaluatedProperties_draft2019(validator, uP, instance, schema): + if not validator.is_type(instance, "object"): + return + evaluated_keys = find_evaluated_property_keys_by_schema( + validator, instance, schema, + ) + unevaluated_keys = [] + for property in instance: + if property not in evaluated_keys: + for _ in validator.descend( + instance[property], + uP, + path=property, + schema_path=property, + ): + # FIXME: Include context for each unevaluated property + # indicating why it's invalid under the subschema. + unevaluated_keys.append(property) # noqa: PERF401 + + if unevaluated_keys: + if uP is False: + error = "Unevaluated properties are not allowed (%s %s unexpected)" + extras = sorted(unevaluated_keys, key=str) + yield ValidationError(error % _utils.extras_msg(extras)) + else: + error = ( + "Unevaluated properties are not valid under " + "the given schema (%s %s unevaluated and invalid)" + ) + yield ValidationError(error % _utils.extras_msg(unevaluated_keys)) diff --git a/.venv/lib/python3.12/site-packages/jsonschema/_types.py b/.venv/lib/python3.12/site-packages/jsonschema/_types.py new file mode 100644 index 0000000..d3ce9d6 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/jsonschema/_types.py @@ -0,0 +1,204 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING +import numbers + +from attrs import evolve, field, frozen +from rpds import HashTrieMap + +from jsonschema.exceptions import UndefinedTypeCheck + +if TYPE_CHECKING: + from collections.abc import Mapping + from typing import Any, Callable + + +# unfortunately, the type of HashTrieMap is generic, and if used as an attrs +# converter, the generic type is presented to mypy, which then fails to match +# the concrete type of a type checker mapping +# this "do nothing" wrapper presents the correct information to mypy +def _typed_map_converter( + init_val: Mapping[str, Callable[[TypeChecker, Any], bool]], +) -> HashTrieMap[str, Callable[[TypeChecker, Any], bool]]: + return HashTrieMap.convert(init_val) + + +def is_array(checker, instance): + return isinstance(instance, list) + + +def is_bool(checker, instance): + return isinstance(instance, bool) + + +def is_integer(checker, instance): + # bool inherits from int, so ensure bools aren't reported as ints + if isinstance(instance, bool): + return False + return isinstance(instance, int) + + +def is_null(checker, instance): + return instance is None + + +def is_number(checker, instance): + # bool inherits from int, so ensure bools aren't reported as ints + if isinstance(instance, bool): + return False + return isinstance(instance, numbers.Number) + + +def is_object(checker, instance): + return isinstance(instance, dict) + + +def is_string(checker, instance): + return isinstance(instance, str) + + +def is_any(checker, instance): + return True + + +@frozen(repr=False) +class TypeChecker: + """ + A :kw:`type` property checker. + + A `TypeChecker` performs type checking for a `Validator`, converting + between the defined JSON Schema types and some associated Python types or + objects. + + Modifying the behavior just mentioned by redefining which Python objects + are considered to be of which JSON Schema types can be done using + `TypeChecker.redefine` or `TypeChecker.redefine_many`, and types can be + removed via `TypeChecker.remove`. Each of these return a new `TypeChecker`. + + Arguments: + + type_checkers: + + The initial mapping of types to their checking functions. + + """ + + _type_checkers: HashTrieMap[ + str, Callable[[TypeChecker, Any], bool], + ] = field(default=HashTrieMap(), converter=_typed_map_converter) + + def __repr__(self): + types = ", ".join(repr(k) for k in sorted(self._type_checkers)) + return f"<{self.__class__.__name__} types={{{types}}}>" + + def is_type(self, instance, type: str) -> bool: + """ + Check if the instance is of the appropriate type. + + Arguments: + + instance: + + The instance to check + + type: + + The name of the type that is expected. + + Raises: + + `jsonschema.exceptions.UndefinedTypeCheck`: + + if ``type`` is unknown to this object. + + """ + try: + fn = self._type_checkers[type] + except KeyError: + raise UndefinedTypeCheck(type) from None + + return fn(self, instance) + + def redefine(self, type: str, fn) -> TypeChecker: + """ + Produce a new checker with the given type redefined. + + Arguments: + + type: + + The name of the type to check. + + fn (collections.abc.Callable): + + A callable taking exactly two parameters - the type + checker calling the function and the instance to check. + The function should return true if instance is of this + type and false otherwise. + + """ + return self.redefine_many({type: fn}) + + def redefine_many(self, definitions=()) -> TypeChecker: + """ + Produce a new checker with the given types redefined. + + Arguments: + + definitions (dict): + + A dictionary mapping types to their checking functions. + + """ + type_checkers = self._type_checkers.update(definitions) + return evolve(self, type_checkers=type_checkers) + + def remove(self, *types) -> TypeChecker: + """ + Produce a new checker with the given types forgotten. + + Arguments: + + types: + + the names of the types to remove. + + Raises: + + `jsonschema.exceptions.UndefinedTypeCheck`: + + if any given type is unknown to this object + + """ + type_checkers = self._type_checkers + for each in types: + try: + type_checkers = type_checkers.remove(each) + except KeyError: + raise UndefinedTypeCheck(each) from None + return evolve(self, type_checkers=type_checkers) + + +draft3_type_checker = TypeChecker( + { + "any": is_any, + "array": is_array, + "boolean": is_bool, + "integer": is_integer, + "object": is_object, + "null": is_null, + "number": is_number, + "string": is_string, + }, +) +draft4_type_checker = draft3_type_checker.remove("any") +draft6_type_checker = draft4_type_checker.redefine( + "integer", + lambda checker, instance: ( + is_integer(checker, instance) + or (isinstance(instance, float) and instance.is_integer()) + ), +) +draft7_type_checker = draft6_type_checker +draft201909_type_checker = draft7_type_checker +draft202012_type_checker = draft201909_type_checker diff --git a/.venv/lib/python3.12/site-packages/jsonschema/_typing.py b/.venv/lib/python3.12/site-packages/jsonschema/_typing.py new file mode 100644 index 0000000..1d091d7 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/jsonschema/_typing.py @@ -0,0 +1,29 @@ +""" +Some (initially private) typing helpers for jsonschema's types. +""" +from collections.abc import Iterable +from typing import Any, Callable, Protocol, Union + +import referencing.jsonschema + +from jsonschema.protocols import Validator + + +class SchemaKeywordValidator(Protocol): + def __call__( + self, + validator: Validator, + value: Any, + instance: Any, + schema: referencing.jsonschema.Schema, + ) -> None: + ... + + +id_of = Callable[[referencing.jsonschema.Schema], Union[str, None]] + + +ApplicableValidators = Callable[ + [referencing.jsonschema.Schema], + Iterable[tuple[str, Any]], +] diff --git a/.venv/lib/python3.12/site-packages/jsonschema/_utils.py b/.venv/lib/python3.12/site-packages/jsonschema/_utils.py new file mode 100644 index 0000000..84a0965 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/jsonschema/_utils.py @@ -0,0 +1,355 @@ +from collections.abc import Mapping, MutableMapping, Sequence +from urllib.parse import urlsplit +import itertools +import re + + +class URIDict(MutableMapping): + """ + Dictionary which uses normalized URIs as keys. + """ + + def normalize(self, uri): + return urlsplit(uri).geturl() + + def __init__(self, *args, **kwargs): + self.store = dict() + self.store.update(*args, **kwargs) + + def __getitem__(self, uri): + return self.store[self.normalize(uri)] + + def __setitem__(self, uri, value): + self.store[self.normalize(uri)] = value + + def __delitem__(self, uri): + del self.store[self.normalize(uri)] + + def __iter__(self): + return iter(self.store) + + def __len__(self): # pragma: no cover -- untested, but to be removed + return len(self.store) + + def __repr__(self): # pragma: no cover -- untested, but to be removed + return repr(self.store) + + +class Unset: + """ + An as-of-yet unset attribute or unprovided default parameter. + """ + + def __repr__(self): # pragma: no cover + return "" + + +def format_as_index(container, indices): + """ + Construct a single string containing indexing operations for the indices. + + For example for a container ``bar``, [1, 2, "foo"] -> bar[1][2]["foo"] + + Arguments: + + container (str): + + A word to use for the thing being indexed + + indices (sequence): + + The indices to format. + + """ + if not indices: + return container + return f"{container}[{']['.join(repr(index) for index in indices)}]" + + +def find_additional_properties(instance, schema): + """ + Return the set of additional properties for the given ``instance``. + + Weeds out properties that should have been validated by ``properties`` and + / or ``patternProperties``. + + Assumes ``instance`` is dict-like already. + """ + properties = schema.get("properties", {}) + patterns = "|".join(schema.get("patternProperties", {})) + for property in instance: + if property not in properties: + if patterns and re.search(patterns, property): + continue + yield property + + +def extras_msg(extras): + """ + Create an error message for extra items or properties. + """ + verb = "was" if len(extras) == 1 else "were" + return ", ".join(repr(extra) for extra in extras), verb + + +def ensure_list(thing): + """ + Wrap ``thing`` in a list if it's a single str. + + Otherwise, return it unchanged. + """ + if isinstance(thing, str): + return [thing] + return thing + + +def _mapping_equal(one, two): + """ + Check if two mappings are equal using the semantics of `equal`. + """ + if len(one) != len(two): + return False + return all( + key in two and equal(value, two[key]) + for key, value in one.items() + ) + + +def _sequence_equal(one, two): + """ + Check if two sequences are equal using the semantics of `equal`. + """ + if len(one) != len(two): + return False + return all(equal(i, j) for i, j in zip(one, two)) + + +def equal(one, two): + """ + Check if two things are equal evading some Python type hierarchy semantics. + + Specifically in JSON Schema, evade `bool` inheriting from `int`, + recursing into sequences to do the same. + """ + if one is two: + return True + if isinstance(one, str) or isinstance(two, str): + return one == two + if isinstance(one, Sequence) and isinstance(two, Sequence): + return _sequence_equal(one, two) + if isinstance(one, Mapping) and isinstance(two, Mapping): + return _mapping_equal(one, two) + return unbool(one) == unbool(two) + + +def unbool(element, true=object(), false=object()): + """ + A hack to make True and 1 and False and 0 unique for ``uniq``. + """ + if element is True: + return true + elif element is False: + return false + return element + + +def uniq(container): + """ + Check if all of a container's elements are unique. + + Tries to rely on the container being recursively sortable, or otherwise + falls back on (slow) brute force. + """ + try: + sort = sorted(unbool(i) for i in container) + sliced = itertools.islice(sort, 1, None) + + for i, j in zip(sort, sliced): + if equal(i, j): + return False + + except (NotImplementedError, TypeError): + seen = [] + for e in container: + e = unbool(e) + + for i in seen: + if equal(i, e): + return False + + seen.append(e) + return True + + +def find_evaluated_item_indexes_by_schema(validator, instance, schema): + """ + Get all indexes of items that get evaluated under the current schema. + + Covers all keywords related to unevaluatedItems: items, prefixItems, if, + then, else, contains, unevaluatedItems, allOf, oneOf, anyOf + """ + if validator.is_type(schema, "boolean"): + return [] + evaluated_indexes = [] + + if "items" in schema: + return list(range(len(instance))) + + ref = schema.get("$ref") + if ref is not None: + resolved = validator._resolver.lookup(ref) + evaluated_indexes.extend( + find_evaluated_item_indexes_by_schema( + validator.evolve( + schema=resolved.contents, + _resolver=resolved.resolver, + ), + instance, + resolved.contents, + ), + ) + + dynamicRef = schema.get("$dynamicRef") + if dynamicRef is not None: + resolved = validator._resolver.lookup(dynamicRef) + evaluated_indexes.extend( + find_evaluated_item_indexes_by_schema( + validator.evolve( + schema=resolved.contents, + _resolver=resolved.resolver, + ), + instance, + resolved.contents, + ), + ) + + if "prefixItems" in schema: + evaluated_indexes += list(range(len(schema["prefixItems"]))) + + if "if" in schema: + if validator.evolve(schema=schema["if"]).is_valid(instance): + evaluated_indexes += find_evaluated_item_indexes_by_schema( + validator, instance, schema["if"], + ) + if "then" in schema: + evaluated_indexes += find_evaluated_item_indexes_by_schema( + validator, instance, schema["then"], + ) + elif "else" in schema: + evaluated_indexes += find_evaluated_item_indexes_by_schema( + validator, instance, schema["else"], + ) + + for keyword in ["contains", "unevaluatedItems"]: + if keyword in schema: + for k, v in enumerate(instance): + if validator.evolve(schema=schema[keyword]).is_valid(v): + evaluated_indexes.append(k) + + for keyword in ["allOf", "oneOf", "anyOf"]: + if keyword in schema: + for subschema in schema[keyword]: + errs = next(validator.descend(instance, subschema), None) + if errs is None: + evaluated_indexes += find_evaluated_item_indexes_by_schema( + validator, instance, subschema, + ) + + return evaluated_indexes + + +def find_evaluated_property_keys_by_schema(validator, instance, schema): + """ + Get all keys of items that get evaluated under the current schema. + + Covers all keywords related to unevaluatedProperties: properties, + additionalProperties, unevaluatedProperties, patternProperties, + dependentSchemas, allOf, oneOf, anyOf, if, then, else + """ + if validator.is_type(schema, "boolean"): + return [] + evaluated_keys = [] + + ref = schema.get("$ref") + if ref is not None: + resolved = validator._resolver.lookup(ref) + evaluated_keys.extend( + find_evaluated_property_keys_by_schema( + validator.evolve( + schema=resolved.contents, + _resolver=resolved.resolver, + ), + instance, + resolved.contents, + ), + ) + + dynamicRef = schema.get("$dynamicRef") + if dynamicRef is not None: + resolved = validator._resolver.lookup(dynamicRef) + evaluated_keys.extend( + find_evaluated_property_keys_by_schema( + validator.evolve( + schema=resolved.contents, + _resolver=resolved.resolver, + ), + instance, + resolved.contents, + ), + ) + + properties = schema.get("properties") + if validator.is_type(properties, "object"): + evaluated_keys += properties.keys() & instance.keys() + + for keyword in ["additionalProperties", "unevaluatedProperties"]: + if (subschema := schema.get(keyword)) is None: + continue + evaluated_keys += ( + key + for key, value in instance.items() + if is_valid(validator.descend(value, subschema)) + ) + + if "patternProperties" in schema: + for property in instance: + for pattern in schema["patternProperties"]: + if re.search(pattern, property): + evaluated_keys.append(property) + + if "dependentSchemas" in schema: + for property, subschema in schema["dependentSchemas"].items(): + if property not in instance: + continue + evaluated_keys += find_evaluated_property_keys_by_schema( + validator, instance, subschema, + ) + + for keyword in ["allOf", "oneOf", "anyOf"]: + for subschema in schema.get(keyword, []): + if not is_valid(validator.descend(instance, subschema)): + continue + evaluated_keys += find_evaluated_property_keys_by_schema( + validator, instance, subschema, + ) + + if "if" in schema: + if validator.evolve(schema=schema["if"]).is_valid(instance): + evaluated_keys += find_evaluated_property_keys_by_schema( + validator, instance, schema["if"], + ) + if "then" in schema: + evaluated_keys += find_evaluated_property_keys_by_schema( + validator, instance, schema["then"], + ) + elif "else" in schema: + evaluated_keys += find_evaluated_property_keys_by_schema( + validator, instance, schema["else"], + ) + + return evaluated_keys + + +def is_valid(errs_it): + """Whether there are no errors in the given iterator.""" + return next(errs_it, None) is None diff --git a/.venv/lib/python3.12/site-packages/jsonschema/benchmarks/__init__.py b/.venv/lib/python3.12/site-packages/jsonschema/benchmarks/__init__.py new file mode 100644 index 0000000..e3dcc68 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/jsonschema/benchmarks/__init__.py @@ -0,0 +1,5 @@ +""" +Benchmarks for validation. + +This package is *not* public API. +""" diff --git a/.venv/lib/python3.12/site-packages/jsonschema/benchmarks/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/jsonschema/benchmarks/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..b6b4311 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/jsonschema/benchmarks/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/jsonschema/benchmarks/__pycache__/const_vs_enum.cpython-312.pyc b/.venv/lib/python3.12/site-packages/jsonschema/benchmarks/__pycache__/const_vs_enum.cpython-312.pyc new file mode 100644 index 0000000..fcd6465 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/jsonschema/benchmarks/__pycache__/const_vs_enum.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/jsonschema/benchmarks/__pycache__/contains.cpython-312.pyc b/.venv/lib/python3.12/site-packages/jsonschema/benchmarks/__pycache__/contains.cpython-312.pyc new file mode 100644 index 0000000..a431503 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/jsonschema/benchmarks/__pycache__/contains.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/jsonschema/benchmarks/__pycache__/issue232.cpython-312.pyc b/.venv/lib/python3.12/site-packages/jsonschema/benchmarks/__pycache__/issue232.cpython-312.pyc new file mode 100644 index 0000000..dcf7021 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/jsonschema/benchmarks/__pycache__/issue232.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/jsonschema/benchmarks/__pycache__/json_schema_test_suite.cpython-312.pyc b/.venv/lib/python3.12/site-packages/jsonschema/benchmarks/__pycache__/json_schema_test_suite.cpython-312.pyc new file mode 100644 index 0000000..50c2d05 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/jsonschema/benchmarks/__pycache__/json_schema_test_suite.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/jsonschema/benchmarks/__pycache__/nested_schemas.cpython-312.pyc b/.venv/lib/python3.12/site-packages/jsonschema/benchmarks/__pycache__/nested_schemas.cpython-312.pyc new file mode 100644 index 0000000..cef1b8a Binary files /dev/null and b/.venv/lib/python3.12/site-packages/jsonschema/benchmarks/__pycache__/nested_schemas.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/jsonschema/benchmarks/__pycache__/subcomponents.cpython-312.pyc b/.venv/lib/python3.12/site-packages/jsonschema/benchmarks/__pycache__/subcomponents.cpython-312.pyc new file mode 100644 index 0000000..2075b6a Binary files /dev/null and b/.venv/lib/python3.12/site-packages/jsonschema/benchmarks/__pycache__/subcomponents.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/jsonschema/benchmarks/__pycache__/unused_registry.cpython-312.pyc b/.venv/lib/python3.12/site-packages/jsonschema/benchmarks/__pycache__/unused_registry.cpython-312.pyc new file mode 100644 index 0000000..4f8b140 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/jsonschema/benchmarks/__pycache__/unused_registry.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/jsonschema/benchmarks/__pycache__/useless_applicator_schemas.cpython-312.pyc b/.venv/lib/python3.12/site-packages/jsonschema/benchmarks/__pycache__/useless_applicator_schemas.cpython-312.pyc new file mode 100644 index 0000000..933ead9 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/jsonschema/benchmarks/__pycache__/useless_applicator_schemas.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/jsonschema/benchmarks/__pycache__/useless_keywords.cpython-312.pyc b/.venv/lib/python3.12/site-packages/jsonschema/benchmarks/__pycache__/useless_keywords.cpython-312.pyc new file mode 100644 index 0000000..2161d0a Binary files /dev/null and b/.venv/lib/python3.12/site-packages/jsonschema/benchmarks/__pycache__/useless_keywords.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/jsonschema/benchmarks/__pycache__/validator_creation.cpython-312.pyc b/.venv/lib/python3.12/site-packages/jsonschema/benchmarks/__pycache__/validator_creation.cpython-312.pyc new file mode 100644 index 0000000..a2273c0 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/jsonschema/benchmarks/__pycache__/validator_creation.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/jsonschema/benchmarks/const_vs_enum.py b/.venv/lib/python3.12/site-packages/jsonschema/benchmarks/const_vs_enum.py new file mode 100644 index 0000000..c6fecd1 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/jsonschema/benchmarks/const_vs_enum.py @@ -0,0 +1,30 @@ +""" +A benchmark for comparing equivalent validation of `const` and `enum`. +""" + +from pyperf import Runner + +from jsonschema import Draft202012Validator + +value = [37] * 100 +const_schema = {"const": list(value)} +enum_schema = {"enum": [list(value)]} + +valid = list(value) +invalid = [*valid, 73] + +const = Draft202012Validator(const_schema) +enum = Draft202012Validator(enum_schema) + +assert const.is_valid(valid) +assert enum.is_valid(valid) +assert not const.is_valid(invalid) +assert not enum.is_valid(invalid) + + +if __name__ == "__main__": + runner = Runner() + runner.bench_func("const valid", lambda: const.is_valid(valid)) + runner.bench_func("const invalid", lambda: const.is_valid(invalid)) + runner.bench_func("enum valid", lambda: enum.is_valid(valid)) + runner.bench_func("enum invalid", lambda: enum.is_valid(invalid)) diff --git a/.venv/lib/python3.12/site-packages/jsonschema/benchmarks/contains.py b/.venv/lib/python3.12/site-packages/jsonschema/benchmarks/contains.py new file mode 100644 index 0000000..739cd04 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/jsonschema/benchmarks/contains.py @@ -0,0 +1,28 @@ +""" +A benchmark for validation of the `contains` keyword. +""" + +from pyperf import Runner + +from jsonschema import Draft202012Validator + +schema = { + "type": "array", + "contains": {"const": 37}, +} +validator = Draft202012Validator(schema) + +size = 1000 +beginning = [37] + [0] * (size - 1) +middle = [0] * (size // 2) + [37] + [0] * (size // 2) +end = [0] * (size - 1) + [37] +invalid = [0] * size + + +if __name__ == "__main__": + runner = Runner() + runner.bench_func("baseline", lambda: validator.is_valid([])) + runner.bench_func("beginning", lambda: validator.is_valid(beginning)) + runner.bench_func("middle", lambda: validator.is_valid(middle)) + runner.bench_func("end", lambda: validator.is_valid(end)) + runner.bench_func("invalid", lambda: validator.is_valid(invalid)) diff --git a/.venv/lib/python3.12/site-packages/jsonschema/benchmarks/issue232.py b/.venv/lib/python3.12/site-packages/jsonschema/benchmarks/issue232.py new file mode 100644 index 0000000..efd0715 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/jsonschema/benchmarks/issue232.py @@ -0,0 +1,25 @@ +""" +A performance benchmark using the example from issue #232. + +See https://github.com/python-jsonschema/jsonschema/pull/232. +""" +from pathlib import Path + +from pyperf import Runner +from referencing import Registry + +from jsonschema.tests._suite import Version +import jsonschema + +issue232 = Version( + path=Path(__file__).parent / "issue232", + remotes=Registry(), + name="issue232", +) + + +if __name__ == "__main__": + issue232.benchmark( + runner=Runner(), + Validator=jsonschema.Draft4Validator, + ) diff --git a/.venv/lib/python3.12/site-packages/jsonschema/benchmarks/issue232/issue.json b/.venv/lib/python3.12/site-packages/jsonschema/benchmarks/issue232/issue.json new file mode 100644 index 0000000..804c340 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/jsonschema/benchmarks/issue232/issue.json @@ -0,0 +1,2653 @@ +[ + { + "description": "Petstore", + "schema": { + "title": "A JSON Schema for Swagger 2.0 API.", + "id": "http://swagger.io/v2/schema.json#", + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "required": [ + "swagger", + "info", + "paths" + ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "swagger": { + "type": "string", + "enum": [ + "2.0" + ], + "description": "The Swagger version of this document." + }, + "info": { + "$ref": "#/definitions/info" + }, + "host": { + "type": "string", + "pattern": "^[^{}/ :\\\\]+(?::\\d+)?$", + "description": "The host (name or ip) of the API. Example: 'swagger.io'" + }, + "basePath": { + "type": "string", + "pattern": "^/", + "description": "The base path to the API. Example: '/api'." + }, + "schemes": { + "$ref": "#/definitions/schemesList" + }, + "consumes": { + "description": "A list of MIME types accepted by the API.", + "allOf": [ + { + "$ref": "#/definitions/mediaTypeList" + } + ] + }, + "produces": { + "description": "A list of MIME types the API can produce.", + "allOf": [ + { + "$ref": "#/definitions/mediaTypeList" + } + ] + }, + "paths": { + "$ref": "#/definitions/paths" + }, + "definitions": { + "$ref": "#/definitions/definitions" + }, + "parameters": { + "$ref": "#/definitions/parameterDefinitions" + }, + "responses": { + "$ref": "#/definitions/responseDefinitions" + }, + "security": { + "$ref": "#/definitions/security" + }, + "securityDefinitions": { + "$ref": "#/definitions/securityDefinitions" + }, + "tags": { + "type": "array", + "items": { + "$ref": "#/definitions/tag" + }, + "uniqueItems": true + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + } + }, + "definitions": { + "info": { + "type": "object", + "description": "General information about the API.", + "required": [ + "version", + "title" + ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "title": { + "type": "string", + "description": "A unique and precise title of the API." + }, + "version": { + "type": "string", + "description": "A semantic version number of the API." + }, + "description": { + "type": "string", + "description": "A longer description of the API. Should be different from the title. GitHub Flavored Markdown is allowed." + }, + "termsOfService": { + "type": "string", + "description": "The terms of service for the API." + }, + "contact": { + "$ref": "#/definitions/contact" + }, + "license": { + "$ref": "#/definitions/license" + } + } + }, + "contact": { + "type": "object", + "description": "Contact information for the owners of the API.", + "additionalProperties": false, + "properties": { + "name": { + "type": "string", + "description": "The identifying name of the contact person/organization." + }, + "url": { + "type": "string", + "description": "The URL pointing to the contact information.", + "format": "uri" + }, + "email": { + "type": "string", + "description": "The email address of the contact person/organization.", + "format": "email" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "license": { + "type": "object", + "required": [ + "name" + ], + "additionalProperties": false, + "properties": { + "name": { + "type": "string", + "description": "The name of the license type. It's encouraged to use an OSI compatible license." + }, + "url": { + "type": "string", + "description": "The URL pointing to the license.", + "format": "uri" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "paths": { + "type": "object", + "description": "Relative paths to the individual endpoints. They must be relative to the 'basePath'.", + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + }, + "^/": { + "$ref": "#/definitions/pathItem" + } + }, + "additionalProperties": false + }, + "definitions": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/schema" + }, + "description": "One or more JSON objects describing the schemas being consumed and produced by the API." + }, + "parameterDefinitions": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/parameter" + }, + "description": "One or more JSON representations for parameters" + }, + "responseDefinitions": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/response" + }, + "description": "One or more JSON representations for parameters" + }, + "externalDocs": { + "type": "object", + "additionalProperties": false, + "description": "information about external documentation", + "required": [ + "url" + ], + "properties": { + "description": { + "type": "string" + }, + "url": { + "type": "string", + "format": "uri" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "examples": { + "type": "object", + "additionalProperties": true + }, + "mimeType": { + "type": "string", + "description": "The MIME type of the HTTP message." + }, + "operation": { + "type": "object", + "required": [ + "responses" + ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "tags": { + "type": "array", + "items": { + "type": "string" + }, + "uniqueItems": true + }, + "summary": { + "type": "string", + "description": "A brief summary of the operation." + }, + "description": { + "type": "string", + "description": "A longer description of the operation, GitHub Flavored Markdown is allowed." + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "operationId": { + "type": "string", + "description": "A unique identifier of the operation." + }, + "produces": { + "description": "A list of MIME types the API can produce.", + "allOf": [ + { + "$ref": "#/definitions/mediaTypeList" + } + ] + }, + "consumes": { + "description": "A list of MIME types the API can consume.", + "allOf": [ + { + "$ref": "#/definitions/mediaTypeList" + } + ] + }, + "parameters": { + "$ref": "#/definitions/parametersList" + }, + "responses": { + "$ref": "#/definitions/responses" + }, + "schemes": { + "$ref": "#/definitions/schemesList" + }, + "deprecated": { + "type": "boolean", + "default": false + }, + "security": { + "$ref": "#/definitions/security" + } + } + }, + "pathItem": { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "$ref": { + "type": "string" + }, + "get": { + "$ref": "#/definitions/operation" + }, + "put": { + "$ref": "#/definitions/operation" + }, + "post": { + "$ref": "#/definitions/operation" + }, + "delete": { + "$ref": "#/definitions/operation" + }, + "options": { + "$ref": "#/definitions/operation" + }, + "head": { + "$ref": "#/definitions/operation" + }, + "patch": { + "$ref": "#/definitions/operation" + }, + "parameters": { + "$ref": "#/definitions/parametersList" + } + } + }, + "responses": { + "type": "object", + "description": "Response objects names can either be any valid HTTP status code or 'default'.", + "minProperties": 1, + "additionalProperties": false, + "patternProperties": { + "^([0-9]{3})$|^(default)$": { + "$ref": "#/definitions/responseValue" + }, + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "not": { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + } + }, + "responseValue": { + "oneOf": [ + { + "$ref": "#/definitions/response" + }, + { + "$ref": "#/definitions/jsonReference" + } + ] + }, + "response": { + "type": "object", + "required": [ + "description" + ], + "properties": { + "description": { + "type": "string" + }, + "schema": { + "oneOf": [ + { + "$ref": "#/definitions/schema" + }, + { + "$ref": "#/definitions/fileSchema" + } + ] + }, + "headers": { + "$ref": "#/definitions/headers" + }, + "examples": { + "$ref": "#/definitions/examples" + } + }, + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "headers": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/header" + } + }, + "header": { + "type": "object", + "additionalProperties": false, + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "string", + "number", + "integer", + "boolean", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormat" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "vendorExtension": { + "description": "Any property starting with x- is valid.", + "additionalProperties": true, + "additionalItems": true + }, + "bodyParameter": { + "type": "object", + "required": [ + "name", + "in", + "schema" + ], + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "body" + ] + }, + "required": { + "type": "boolean", + "description": "Determines whether or not this parameter is required or optional.", + "default": false + }, + "schema": { + "$ref": "#/definitions/schema" + } + }, + "additionalProperties": false + }, + "headerParameterSubSchema": { + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "required": { + "type": "boolean", + "description": "Determines whether or not this parameter is required or optional.", + "default": false + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "header" + ] + }, + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "type": { + "type": "string", + "enum": [ + "string", + "number", + "boolean", + "integer", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormat" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + } + }, + "queryParameterSubSchema": { + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "required": { + "type": "boolean", + "description": "Determines whether or not this parameter is required or optional.", + "default": false + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "query" + ] + }, + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "allowEmptyValue": { + "type": "boolean", + "default": false, + "description": "allows sending a parameter by name only or with an empty value." + }, + "type": { + "type": "string", + "enum": [ + "string", + "number", + "boolean", + "integer", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormatWithMulti" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + } + }, + "formDataParameterSubSchema": { + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "required": { + "type": "boolean", + "description": "Determines whether or not this parameter is required or optional.", + "default": false + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "formData" + ] + }, + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "allowEmptyValue": { + "type": "boolean", + "default": false, + "description": "allows sending a parameter by name only or with an empty value." + }, + "type": { + "type": "string", + "enum": [ + "string", + "number", + "boolean", + "integer", + "array", + "file" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormatWithMulti" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + } + }, + "pathParameterSubSchema": { + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "required": [ + "required" + ], + "properties": { + "required": { + "type": "boolean", + "enum": [ + true + ], + "description": "Determines whether or not this parameter is required or optional." + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "path" + ] + }, + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "type": { + "type": "string", + "enum": [ + "string", + "number", + "boolean", + "integer", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormat" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + } + }, + "nonBodyParameter": { + "type": "object", + "required": [ + "name", + "in", + "type" + ], + "oneOf": [ + { + "$ref": "#/definitions/headerParameterSubSchema" + }, + { + "$ref": "#/definitions/formDataParameterSubSchema" + }, + { + "$ref": "#/definitions/queryParameterSubSchema" + }, + { + "$ref": "#/definitions/pathParameterSubSchema" + } + ] + }, + "parameter": { + "oneOf": [ + { + "$ref": "#/definitions/bodyParameter" + }, + { + "$ref": "#/definitions/nonBodyParameter" + } + ] + }, + "schema": { + "type": "object", + "description": "A deterministic version of a JSON Schema object.", + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "$ref": { + "type": "string" + }, + "format": { + "type": "string" + }, + "title": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/title" + }, + "description": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/description" + }, + "default": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/default" + }, + "multipleOf": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/multipleOf" + }, + "maximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/maximum" + }, + "exclusiveMaximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMaximum" + }, + "minimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/minimum" + }, + "exclusiveMinimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMinimum" + }, + "maxLength": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minLength": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "pattern": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/pattern" + }, + "maxItems": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minItems": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "uniqueItems": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/uniqueItems" + }, + "maxProperties": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minProperties": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "required": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/stringArray" + }, + "enum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/enum" + }, + "additionalProperties": { + "anyOf": [ + { + "$ref": "#/definitions/schema" + }, + { + "type": "boolean" + } + ], + "default": {} + }, + "type": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/type" + }, + "items": { + "anyOf": [ + { + "$ref": "#/definitions/schema" + }, + { + "type": "array", + "minItems": 1, + "items": { + "$ref": "#/definitions/schema" + } + } + ], + "default": {} + }, + "allOf": { + "type": "array", + "minItems": 1, + "items": { + "$ref": "#/definitions/schema" + } + }, + "properties": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/schema" + }, + "default": {} + }, + "discriminator": { + "type": "string" + }, + "readOnly": { + "type": "boolean", + "default": false + }, + "xml": { + "$ref": "#/definitions/xml" + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "example": {} + }, + "additionalProperties": false + }, + "fileSchema": { + "type": "object", + "description": "A deterministic version of a JSON Schema object.", + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "required": [ + "type" + ], + "properties": { + "format": { + "type": "string" + }, + "title": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/title" + }, + "description": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/description" + }, + "default": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/default" + }, + "required": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/stringArray" + }, + "type": { + "type": "string", + "enum": [ + "file" + ] + }, + "readOnly": { + "type": "boolean", + "default": false + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "example": {} + }, + "additionalProperties": false + }, + "primitivesItems": { + "type": "object", + "additionalProperties": false, + "properties": { + "type": { + "type": "string", + "enum": [ + "string", + "number", + "integer", + "boolean", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormat" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "security": { + "type": "array", + "items": { + "$ref": "#/definitions/securityRequirement" + }, + "uniqueItems": true + }, + "securityRequirement": { + "type": "object", + "additionalProperties": { + "type": "array", + "items": { + "type": "string" + }, + "uniqueItems": true + } + }, + "xml": { + "type": "object", + "additionalProperties": false, + "properties": { + "name": { + "type": "string" + }, + "namespace": { + "type": "string" + }, + "prefix": { + "type": "string" + }, + "attribute": { + "type": "boolean", + "default": false + }, + "wrapped": { + "type": "boolean", + "default": false + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "tag": { + "type": "object", + "additionalProperties": false, + "required": [ + "name" + ], + "properties": { + "name": { + "type": "string" + }, + "description": { + "type": "string" + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "securityDefinitions": { + "type": "object", + "additionalProperties": { + "oneOf": [ + { + "$ref": "#/definitions/basicAuthenticationSecurity" + }, + { + "$ref": "#/definitions/apiKeySecurity" + }, + { + "$ref": "#/definitions/oauth2ImplicitSecurity" + }, + { + "$ref": "#/definitions/oauth2PasswordSecurity" + }, + { + "$ref": "#/definitions/oauth2ApplicationSecurity" + }, + { + "$ref": "#/definitions/oauth2AccessCodeSecurity" + } + ] + } + }, + "basicAuthenticationSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "basic" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "apiKeySecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "name", + "in" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "apiKey" + ] + }, + "name": { + "type": "string" + }, + "in": { + "type": "string", + "enum": [ + "header", + "query" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2ImplicitSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "flow", + "authorizationUrl" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "oauth2" + ] + }, + "flow": { + "type": "string", + "enum": [ + "implicit" + ] + }, + "scopes": { + "$ref": "#/definitions/oauth2Scopes" + }, + "authorizationUrl": { + "type": "string", + "format": "uri" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2PasswordSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "flow", + "tokenUrl" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "oauth2" + ] + }, + "flow": { + "type": "string", + "enum": [ + "password" + ] + }, + "scopes": { + "$ref": "#/definitions/oauth2Scopes" + }, + "tokenUrl": { + "type": "string", + "format": "uri" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2ApplicationSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "flow", + "tokenUrl" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "oauth2" + ] + }, + "flow": { + "type": "string", + "enum": [ + "application" + ] + }, + "scopes": { + "$ref": "#/definitions/oauth2Scopes" + }, + "tokenUrl": { + "type": "string", + "format": "uri" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2AccessCodeSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "flow", + "authorizationUrl", + "tokenUrl" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "oauth2" + ] + }, + "flow": { + "type": "string", + "enum": [ + "accessCode" + ] + }, + "scopes": { + "$ref": "#/definitions/oauth2Scopes" + }, + "authorizationUrl": { + "type": "string", + "format": "uri" + }, + "tokenUrl": { + "type": "string", + "format": "uri" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2Scopes": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "mediaTypeList": { + "type": "array", + "items": { + "$ref": "#/definitions/mimeType" + }, + "uniqueItems": true + }, + "parametersList": { + "type": "array", + "description": "The parameters needed to send a valid API call.", + "additionalItems": false, + "items": { + "oneOf": [ + { + "$ref": "#/definitions/parameter" + }, + { + "$ref": "#/definitions/jsonReference" + } + ] + }, + "uniqueItems": true + }, + "schemesList": { + "type": "array", + "description": "The transfer protocol of the API.", + "items": { + "type": "string", + "enum": [ + "http", + "https", + "ws", + "wss" + ] + }, + "uniqueItems": true + }, + "collectionFormat": { + "type": "string", + "enum": [ + "csv", + "ssv", + "tsv", + "pipes" + ], + "default": "csv" + }, + "collectionFormatWithMulti": { + "type": "string", + "enum": [ + "csv", + "ssv", + "tsv", + "pipes", + "multi" + ], + "default": "csv" + }, + "title": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/title" + }, + "description": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/description" + }, + "default": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/default" + }, + "multipleOf": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/multipleOf" + }, + "maximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/maximum" + }, + "exclusiveMaximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMaximum" + }, + "minimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/minimum" + }, + "exclusiveMinimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMinimum" + }, + "maxLength": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minLength": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "pattern": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/pattern" + }, + "maxItems": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minItems": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "uniqueItems": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/uniqueItems" + }, + "enum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/enum" + }, + "jsonReference": { + "type": "object", + "required": [ + "$ref" + ], + "additionalProperties": false, + "properties": { + "$ref": { + "type": "string" + } + } + } + } + }, + "tests": [ + { + "description": "Example petsore", + "data": { + "swagger": "2.0", + "info": { + "description": "This is a sample server Petstore server. You can find out more about Swagger at [http://swagger.io](http://swagger.io) or on [irc.freenode.net, #swagger](http://swagger.io/irc/). For this sample, you can use the api key `special-key` to test the authorization filters.", + "version": "1.0.0", + "title": "Swagger Petstore", + "termsOfService": "http://swagger.io/terms/", + "contact": { + "email": "apiteam@swagger.io" + }, + "license": { + "name": "Apache 2.0", + "url": "http://www.apache.org/licenses/LICENSE-2.0.html" + } + }, + "host": "petstore.swagger.io", + "basePath": "/v2", + "tags": [ + { + "name": "pet", + "description": "Everything about your Pets", + "externalDocs": { + "description": "Find out more", + "url": "http://swagger.io" + } + }, + { + "name": "store", + "description": "Access to Petstore orders" + }, + { + "name": "user", + "description": "Operations about user", + "externalDocs": { + "description": "Find out more about our store", + "url": "http://swagger.io" + } + } + ], + "schemes": [ + "http" + ], + "paths": { + "/pet": { + "post": { + "tags": [ + "pet" + ], + "summary": "Add a new pet to the store", + "description": "", + "operationId": "addPet", + "consumes": [ + "application/json", + "application/xml" + ], + "produces": [ + "application/xml", + "application/json" + ], + "parameters": [ + { + "in": "body", + "name": "body", + "description": "Pet object that needs to be added to the store", + "required": true, + "schema": { + "$ref": "#/definitions/Pet" + } + } + ], + "responses": { + "405": { + "description": "Invalid input" + } + }, + "security": [ + { + "petstore_auth": [ + "write:pets", + "read:pets" + ] + } + ] + }, + "put": { + "tags": [ + "pet" + ], + "summary": "Update an existing pet", + "description": "", + "operationId": "updatePet", + "consumes": [ + "application/json", + "application/xml" + ], + "produces": [ + "application/xml", + "application/json" + ], + "parameters": [ + { + "in": "body", + "name": "body", + "description": "Pet object that needs to be added to the store", + "required": true, + "schema": { + "$ref": "#/definitions/Pet" + } + } + ], + "responses": { + "400": { + "description": "Invalid ID supplied" + }, + "404": { + "description": "Pet not found" + }, + "405": { + "description": "Validation exception" + } + }, + "security": [ + { + "petstore_auth": [ + "write:pets", + "read:pets" + ] + } + ] + } + }, + "/pet/findByStatus": { + "get": { + "tags": [ + "pet" + ], + "summary": "Finds Pets by status", + "description": "Multiple status values can be provided with comma separated strings", + "operationId": "findPetsByStatus", + "produces": [ + "application/xml", + "application/json" + ], + "parameters": [ + { + "name": "status", + "in": "query", + "description": "Status values that need to be considered for filter", + "required": true, + "type": "array", + "items": { + "type": "string", + "enum": [ + "available", + "pending", + "sold" + ], + "default": "available" + }, + "collectionFormat": "multi" + } + ], + "responses": { + "200": { + "description": "successful operation", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/Pet" + } + } + }, + "400": { + "description": "Invalid status value" + } + }, + "security": [ + { + "petstore_auth": [ + "write:pets", + "read:pets" + ] + } + ] + } + }, + "/pet/findByTags": { + "get": { + "tags": [ + "pet" + ], + "summary": "Finds Pets by tags", + "description": "Muliple tags can be provided with comma separated strings. Use tag1, tag2, tag3 for testing.", + "operationId": "findPetsByTags", + "produces": [ + "application/xml", + "application/json" + ], + "parameters": [ + { + "name": "tags", + "in": "query", + "description": "Tags to filter by", + "required": true, + "type": "array", + "items": { + "type": "string" + }, + "collectionFormat": "multi" + } + ], + "responses": { + "200": { + "description": "successful operation", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/Pet" + } + } + }, + "400": { + "description": "Invalid tag value" + } + }, + "security": [ + { + "petstore_auth": [ + "write:pets", + "read:pets" + ] + } + ], + "deprecated": true + } + }, + "/pet/{petId}": { + "get": { + "tags": [ + "pet" + ], + "summary": "Find pet by ID", + "description": "Returns a single pet", + "operationId": "getPetById", + "produces": [ + "application/xml", + "application/json" + ], + "parameters": [ + { + "name": "petId", + "in": "path", + "description": "ID of pet to return", + "required": true, + "type": "integer", + "format": "int64" + } + ], + "responses": { + "200": { + "description": "successful operation", + "schema": { + "$ref": "#/definitions/Pet" + } + }, + "400": { + "description": "Invalid ID supplied" + }, + "404": { + "description": "Pet not found" + } + }, + "security": [ + { + "api_key": [] + } + ] + }, + "post": { + "tags": [ + "pet" + ], + "summary": "Updates a pet in the store with form data", + "description": "", + "operationId": "updatePetWithForm", + "consumes": [ + "application/x-www-form-urlencoded" + ], + "produces": [ + "application/xml", + "application/json" + ], + "parameters": [ + { + "name": "petId", + "in": "path", + "description": "ID of pet that needs to be updated", + "required": true, + "type": "integer", + "format": "int64" + }, + { + "name": "name", + "in": "formData", + "description": "Updated name of the pet", + "required": false, + "type": "string" + }, + { + "name": "status", + "in": "formData", + "description": "Updated status of the pet", + "required": false, + "type": "string" + } + ], + "responses": { + "405": { + "description": "Invalid input" + } + }, + "security": [ + { + "petstore_auth": [ + "write:pets", + "read:pets" + ] + } + ] + }, + "delete": { + "tags": [ + "pet" + ], + "summary": "Deletes a pet", + "description": "", + "operationId": "deletePet", + "produces": [ + "application/xml", + "application/json" + ], + "parameters": [ + { + "name": "api_key", + "in": "header", + "required": false, + "type": "string" + }, + { + "name": "petId", + "in": "path", + "description": "Pet id to delete", + "required": true, + "type": "integer", + "format": "int64" + } + ], + "responses": { + "400": { + "description": "Invalid ID supplied" + }, + "404": { + "description": "Pet not found" + } + }, + "security": [ + { + "petstore_auth": [ + "write:pets", + "read:pets" + ] + } + ] + } + }, + "/pet/{petId}/uploadImage": { + "post": { + "tags": [ + "pet" + ], + "summary": "uploads an image", + "description": "", + "operationId": "uploadFile", + "consumes": [ + "multipart/form-data" + ], + "produces": [ + "application/json" + ], + "parameters": [ + { + "name": "petId", + "in": "path", + "description": "ID of pet to update", + "required": true, + "type": "integer", + "format": "int64" + }, + { + "name": "additionalMetadata", + "in": "formData", + "description": "Additional data to pass to server", + "required": false, + "type": "string" + }, + { + "name": "file", + "in": "formData", + "description": "file to upload", + "required": false, + "type": "file" + } + ], + "responses": { + "200": { + "description": "successful operation", + "schema": { + "$ref": "#/definitions/ApiResponse" + } + } + }, + "security": [ + { + "petstore_auth": [ + "write:pets", + "read:pets" + ] + } + ] + } + }, + "/store/inventory": { + "get": { + "tags": [ + "store" + ], + "summary": "Returns pet inventories by status", + "description": "Returns a map of status codes to quantities", + "operationId": "getInventory", + "produces": [ + "application/json" + ], + "parameters": [], + "responses": { + "200": { + "description": "successful operation", + "schema": { + "type": "object", + "additionalProperties": { + "type": "integer", + "format": "int32" + } + } + } + }, + "security": [ + { + "api_key": [] + } + ] + } + }, + "/store/order": { + "post": { + "tags": [ + "store" + ], + "summary": "Place an order for a pet", + "description": "", + "operationId": "placeOrder", + "produces": [ + "application/xml", + "application/json" + ], + "parameters": [ + { + "in": "body", + "name": "body", + "description": "order placed for purchasing the pet", + "required": true, + "schema": { + "$ref": "#/definitions/Order" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "schema": { + "$ref": "#/definitions/Order" + } + }, + "400": { + "description": "Invalid Order" + } + } + } + }, + "/store/order/{orderId}": { + "get": { + "tags": [ + "store" + ], + "summary": "Find purchase order by ID", + "description": "For valid response try integer IDs with value >= 1 and <= 10. Other values will generated exceptions", + "operationId": "getOrderById", + "produces": [ + "application/xml", + "application/json" + ], + "parameters": [ + { + "name": "orderId", + "in": "path", + "description": "ID of pet that needs to be fetched", + "required": true, + "type": "integer", + "maximum": 10.0, + "minimum": 1.0, + "format": "int64" + } + ], + "responses": { + "200": { + "description": "successful operation", + "schema": { + "$ref": "#/definitions/Order" + } + }, + "400": { + "description": "Invalid ID supplied" + }, + "404": { + "description": "Order not found" + } + } + }, + "delete": { + "tags": [ + "store" + ], + "summary": "Delete purchase order by ID", + "description": "For valid response try integer IDs with positive integer value. Negative or non-integer values will generate API errors", + "operationId": "deleteOrder", + "produces": [ + "application/xml", + "application/json" + ], + "parameters": [ + { + "name": "orderId", + "in": "path", + "description": "ID of the order that needs to be deleted", + "required": true, + "type": "integer", + "minimum": 1.0, + "format": "int64" + } + ], + "responses": { + "400": { + "description": "Invalid ID supplied" + }, + "404": { + "description": "Order not found" + } + } + } + }, + "/user": { + "post": { + "tags": [ + "user" + ], + "summary": "Create user", + "description": "This can only be done by the logged in user.", + "operationId": "createUser", + "produces": [ + "application/xml", + "application/json" + ], + "parameters": [ + { + "in": "body", + "name": "body", + "description": "Created user object", + "required": true, + "schema": { + "$ref": "#/definitions/User" + } + } + ], + "responses": { + "default": { + "description": "successful operation" + } + } + } + }, + "/user/createWithArray": { + "post": { + "tags": [ + "user" + ], + "summary": "Creates list of users with given input array", + "description": "", + "operationId": "createUsersWithArrayInput", + "produces": [ + "application/xml", + "application/json" + ], + "parameters": [ + { + "in": "body", + "name": "body", + "description": "List of user object", + "required": true, + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/User" + } + } + } + ], + "responses": { + "default": { + "description": "successful operation" + } + } + } + }, + "/user/createWithList": { + "post": { + "tags": [ + "user" + ], + "summary": "Creates list of users with given input array", + "description": "", + "operationId": "createUsersWithListInput", + "produces": [ + "application/xml", + "application/json" + ], + "parameters": [ + { + "in": "body", + "name": "body", + "description": "List of user object", + "required": true, + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/User" + } + } + } + ], + "responses": { + "default": { + "description": "successful operation" + } + } + } + }, + "/user/login": { + "get": { + "tags": [ + "user" + ], + "summary": "Logs user into the system", + "description": "", + "operationId": "loginUser", + "produces": [ + "application/xml", + "application/json" + ], + "parameters": [ + { + "name": "username", + "in": "query", + "description": "The user name for login", + "required": true, + "type": "string" + }, + { + "name": "password", + "in": "query", + "description": "The password for login in clear text", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "successful operation", + "schema": { + "type": "string" + }, + "headers": { + "X-Rate-Limit": { + "type": "integer", + "format": "int32", + "description": "calls per hour allowed by the user" + }, + "X-Expires-After": { + "type": "string", + "format": "date-time", + "description": "date in UTC when token expires" + } + } + }, + "400": { + "description": "Invalid username/password supplied" + } + } + } + }, + "/user/logout": { + "get": { + "tags": [ + "user" + ], + "summary": "Logs out current logged in user session", + "description": "", + "operationId": "logoutUser", + "produces": [ + "application/xml", + "application/json" + ], + "parameters": [], + "responses": { + "default": { + "description": "successful operation" + } + } + } + }, + "/user/{username}": { + "get": { + "tags": [ + "user" + ], + "summary": "Get user by user name", + "description": "", + "operationId": "getUserByName", + "produces": [ + "application/xml", + "application/json" + ], + "parameters": [ + { + "name": "username", + "in": "path", + "description": "The name that needs to be fetched. Use user1 for testing. ", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "successful operation", + "schema": { + "$ref": "#/definitions/User" + } + }, + "400": { + "description": "Invalid username supplied" + }, + "404": { + "description": "User not found" + } + } + }, + "put": { + "tags": [ + "user" + ], + "summary": "Updated user", + "description": "This can only be done by the logged in user.", + "operationId": "updateUser", + "produces": [ + "application/xml", + "application/json" + ], + "parameters": [ + { + "name": "username", + "in": "path", + "description": "name that need to be updated", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "body", + "description": "Updated user object", + "required": true, + "schema": { + "$ref": "#/definitions/User" + } + } + ], + "responses": { + "400": { + "description": "Invalid user supplied" + }, + "404": { + "description": "User not found" + } + } + }, + "delete": { + "tags": [ + "user" + ], + "summary": "Delete user", + "description": "This can only be done by the logged in user.", + "operationId": "deleteUser", + "produces": [ + "application/xml", + "application/json" + ], + "parameters": [ + { + "name": "username", + "in": "path", + "description": "The name that needs to be deleted", + "required": true, + "type": "string" + } + ], + "responses": { + "400": { + "description": "Invalid username supplied" + }, + "404": { + "description": "User not found" + } + } + } + } + }, + "securityDefinitions": { + "petstore_auth": { + "type": "oauth2", + "authorizationUrl": "http://petstore.swagger.io/oauth/dialog", + "flow": "implicit", + "scopes": { + "write:pets": "modify pets in your account", + "read:pets": "read your pets" + } + }, + "api_key": { + "type": "apiKey", + "name": "api_key", + "in": "header" + } + }, + "definitions": { + "Order": { + "type": "object", + "properties": { + "id": { + "type": "integer", + "format": "int64" + }, + "petId": { + "type": "integer", + "format": "int64" + }, + "quantity": { + "type": "integer", + "format": "int32" + }, + "shipDate": { + "type": "string", + "format": "date-time" + }, + "status": { + "type": "string", + "description": "Order Status", + "enum": [ + "placed", + "approved", + "delivered" + ] + }, + "complete": { + "type": "boolean", + "default": false + } + }, + "xml": { + "name": "Order" + } + }, + "Category": { + "type": "object", + "properties": { + "id": { + "type": "integer", + "format": "int64" + }, + "name": { + "type": "string" + } + }, + "xml": { + "name": "Category" + } + }, + "User": { + "type": "object", + "properties": { + "id": { + "type": "integer", + "format": "int64" + }, + "username": { + "type": "string" + }, + "firstName": { + "type": "string" + }, + "lastName": { + "type": "string" + }, + "email": { + "type": "string" + }, + "password": { + "type": "string" + }, + "phone": { + "type": "string" + }, + "userStatus": { + "type": "integer", + "format": "int32", + "description": "User Status" + } + }, + "xml": { + "name": "User" + } + }, + "Tag": { + "type": "object", + "properties": { + "id": { + "type": "integer", + "format": "int64" + }, + "name": { + "type": "string" + } + }, + "xml": { + "name": "Tag" + } + }, + "Pet": { + "type": "object", + "required": [ + "name", + "photoUrls" + ], + "properties": { + "id": { + "type": "integer", + "format": "int64" + }, + "category": { + "$ref": "#/definitions/Category" + }, + "name": { + "type": "string", + "example": "doggie" + }, + "photoUrls": { + "type": "array", + "xml": { + "name": "photoUrl", + "wrapped": true + }, + "items": { + "type": "string" + } + }, + "tags": { + "type": "array", + "xml": { + "name": "tag", + "wrapped": true + }, + "items": { + "$ref": "#/definitions/Tag" + } + }, + "status": { + "type": "string", + "description": "pet status in the store", + "enum": [ + "available", + "pending", + "sold" + ] + } + }, + "xml": { + "name": "Pet" + } + }, + "ApiResponse": { + "type": "object", + "properties": { + "code": { + "type": "integer", + "format": "int32" + }, + "type": { + "type": "string" + }, + "message": { + "type": "string" + } + } + } + }, + "externalDocs": { + "description": "Find out more about Swagger", + "url": "http://swagger.io" + } + }, + "valid": true + } + ] + } +] diff --git a/.venv/lib/python3.12/site-packages/jsonschema/benchmarks/json_schema_test_suite.py b/.venv/lib/python3.12/site-packages/jsonschema/benchmarks/json_schema_test_suite.py new file mode 100644 index 0000000..905fb6a --- /dev/null +++ b/.venv/lib/python3.12/site-packages/jsonschema/benchmarks/json_schema_test_suite.py @@ -0,0 +1,12 @@ +""" +A performance benchmark using the official test suite. + +This benchmarks jsonschema using every valid example in the +JSON-Schema-Test-Suite. It will take some time to complete. +""" +from pyperf import Runner + +from jsonschema.tests._suite import Suite + +if __name__ == "__main__": + Suite().benchmark(runner=Runner()) diff --git a/.venv/lib/python3.12/site-packages/jsonschema/benchmarks/nested_schemas.py b/.venv/lib/python3.12/site-packages/jsonschema/benchmarks/nested_schemas.py new file mode 100644 index 0000000..b025c47 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/jsonschema/benchmarks/nested_schemas.py @@ -0,0 +1,56 @@ +""" +Validating highly nested schemas shouldn't cause exponential time blowups. + +See https://github.com/python-jsonschema/jsonschema/issues/1097. +""" +from itertools import cycle + +from jsonschema.validators import validator_for + +metaschemaish = { + "$id": "https://example.com/draft/2020-12/schema/strict", + "$schema": "https://json-schema.org/draft/2020-12/schema", + + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/core": True, + "https://json-schema.org/draft/2020-12/vocab/applicator": True, + "https://json-schema.org/draft/2020-12/vocab/unevaluated": True, + "https://json-schema.org/draft/2020-12/vocab/validation": True, + "https://json-schema.org/draft/2020-12/vocab/meta-data": True, + "https://json-schema.org/draft/2020-12/vocab/format-annotation": True, + "https://json-schema.org/draft/2020-12/vocab/content": True, + }, + "$dynamicAnchor": "meta", + + "$ref": "https://json-schema.org/draft/2020-12/schema", + "unevaluatedProperties": False, +} + + +def nested_schema(levels): + """ + Produce a schema which validates deeply nested objects and arrays. + """ + + names = cycle(["foo", "bar", "baz", "quux", "spam", "eggs"]) + schema = {"type": "object", "properties": {"ham": {"type": "string"}}} + for _, name in zip(range(levels - 1), names): + schema = {"type": "object", "properties": {name: schema}} + return schema + + +validator = validator_for(metaschemaish)(metaschemaish) + +if __name__ == "__main__": + from pyperf import Runner + runner = Runner() + + not_nested = nested_schema(levels=1) + runner.bench_func("not nested", lambda: validator.is_valid(not_nested)) + + for levels in range(1, 11, 3): + schema = nested_schema(levels=levels) + runner.bench_func( + f"nested * {levels}", + lambda schema=schema: validator.is_valid(schema), + ) diff --git a/.venv/lib/python3.12/site-packages/jsonschema/benchmarks/subcomponents.py b/.venv/lib/python3.12/site-packages/jsonschema/benchmarks/subcomponents.py new file mode 100644 index 0000000..6d78c7b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/jsonschema/benchmarks/subcomponents.py @@ -0,0 +1,42 @@ +""" +A benchmark which tries to compare the possible slow subparts of validation. +""" +from referencing import Registry +from referencing.jsonschema import DRAFT202012 +from rpds import HashTrieMap, HashTrieSet + +from jsonschema import Draft202012Validator + +schema = { + "type": "array", + "minLength": 1, + "maxLength": 1, + "items": {"type": "integer"}, +} + +hmap = HashTrieMap() +hset = HashTrieSet() + +registry = Registry() + +v = Draft202012Validator(schema) + + +def registry_data_structures(): + return hmap.insert("foo", "bar"), hset.insert("foo") + + +def registry_add(): + resource = DRAFT202012.create_resource(schema) + return registry.with_resource(uri="urn:example", resource=resource) + + +if __name__ == "__main__": + from pyperf import Runner + runner = Runner() + + runner.bench_func("HashMap/HashSet insertion", registry_data_structures) + runner.bench_func("Registry insertion", registry_add) + runner.bench_func("Success", lambda: v.is_valid([1])) + runner.bench_func("Failure", lambda: v.is_valid(["foo"])) + runner.bench_func("Metaschema validation", lambda: v.check_schema(schema)) diff --git a/.venv/lib/python3.12/site-packages/jsonschema/benchmarks/unused_registry.py b/.venv/lib/python3.12/site-packages/jsonschema/benchmarks/unused_registry.py new file mode 100644 index 0000000..7b272c2 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/jsonschema/benchmarks/unused_registry.py @@ -0,0 +1,35 @@ +""" +An unused schema registry should not cause slower validation. + +"Unused" here means one where no reference resolution is occurring anyhow. + +See https://github.com/python-jsonschema/jsonschema/issues/1088. +""" +from pyperf import Runner +from referencing import Registry +from referencing.jsonschema import DRAFT201909 + +from jsonschema import Draft201909Validator + +registry = Registry().with_resource( + "urn:example:foo", + DRAFT201909.create_resource({}), +) + +schema = {"$ref": "https://json-schema.org/draft/2019-09/schema"} +instance = {"maxLength": 4} + +no_registry = Draft201909Validator(schema) +with_useless_registry = Draft201909Validator(schema, registry=registry) + +if __name__ == "__main__": + runner = Runner() + + runner.bench_func( + "no registry", + lambda: no_registry.is_valid(instance), + ) + runner.bench_func( + "useless registry", + lambda: with_useless_registry.is_valid(instance), + ) diff --git a/.venv/lib/python3.12/site-packages/jsonschema/benchmarks/useless_applicator_schemas.py b/.venv/lib/python3.12/site-packages/jsonschema/benchmarks/useless_applicator_schemas.py new file mode 100644 index 0000000..f3229c0 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/jsonschema/benchmarks/useless_applicator_schemas.py @@ -0,0 +1,106 @@ + +""" +A benchmark for validation of applicators containing lots of useless schemas. + +Signals a small possible optimization to remove all such schemas ahead of time. +""" + +from pyperf import Runner + +from jsonschema import Draft202012Validator as Validator + +NUM_USELESS = 100000 + +subschema = {"const": 37} + +valid = 37 +invalid = 12 + +baseline = Validator(subschema) + + +# These should be indistinguishable from just `subschema` +by_name = { + "single subschema": { + "anyOf": Validator({"anyOf": [subschema]}), + "allOf": Validator({"allOf": [subschema]}), + "oneOf": Validator({"oneOf": [subschema]}), + }, + "redundant subschemas": { + "anyOf": Validator({"anyOf": [subschema] * NUM_USELESS}), + "allOf": Validator({"allOf": [subschema] * NUM_USELESS}), + }, + "useless successful subschemas (beginning)": { + "anyOf": Validator({"anyOf": [subschema, *[True] * NUM_USELESS]}), + "allOf": Validator({"allOf": [subschema, *[True] * NUM_USELESS]}), + }, + "useless successful subschemas (middle)": { + "anyOf": Validator( + { + "anyOf": [ + *[True] * (NUM_USELESS // 2), + subschema, + *[True] * (NUM_USELESS // 2), + ], + }, + ), + "allOf": Validator( + { + "allOf": [ + *[True] * (NUM_USELESS // 2), + subschema, + *[True] * (NUM_USELESS // 2), + ], + }, + ), + }, + "useless successful subschemas (end)": { + "anyOf": Validator({"anyOf": [*[True] * NUM_USELESS, subschema]}), + "allOf": Validator({"allOf": [*[True] * NUM_USELESS, subschema]}), + }, + "useless failing subschemas (beginning)": { + "anyOf": Validator({"anyOf": [subschema, *[False] * NUM_USELESS]}), + "oneOf": Validator({"oneOf": [subschema, *[False] * NUM_USELESS]}), + }, + "useless failing subschemas (middle)": { + "anyOf": Validator( + { + "anyOf": [ + *[False] * (NUM_USELESS // 2), + subschema, + *[False] * (NUM_USELESS // 2), + ], + }, + ), + "oneOf": Validator( + { + "oneOf": [ + *[False] * (NUM_USELESS // 2), + subschema, + *[False] * (NUM_USELESS // 2), + ], + }, + ), + }, + "useless failing subschemas (end)": { + "anyOf": Validator({"anyOf": [*[False] * NUM_USELESS, subschema]}), + "oneOf": Validator({"oneOf": [*[False] * NUM_USELESS, subschema]}), + }, +} + +if __name__ == "__main__": + runner = Runner() + + runner.bench_func("baseline valid", lambda: baseline.is_valid(valid)) + runner.bench_func("baseline invalid", lambda: baseline.is_valid(invalid)) + + for group, applicators in by_name.items(): + for applicator, validator in applicators.items(): + runner.bench_func( + f"{group}: {applicator} valid", + lambda validator=validator: validator.is_valid(valid), + ) + runner.bench_func( + f"{group}: {applicator} invalid", + lambda validator=validator: validator.is_valid(invalid), + ) diff --git a/.venv/lib/python3.12/site-packages/jsonschema/benchmarks/useless_keywords.py b/.venv/lib/python3.12/site-packages/jsonschema/benchmarks/useless_keywords.py new file mode 100644 index 0000000..50f4359 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/jsonschema/benchmarks/useless_keywords.py @@ -0,0 +1,32 @@ +""" +A benchmark for validation of schemas containing lots of useless keywords. + +Checks we filter them out once, ahead of time. +""" + +from pyperf import Runner + +from jsonschema import Draft202012Validator + +NUM_USELESS = 100000 +schema = dict( + [ + ("not", {"const": 42}), + *((str(i), i) for i in range(NUM_USELESS)), + ("type", "integer"), + *((str(i), i) for i in range(NUM_USELESS, NUM_USELESS)), + ("minimum", 37), + ], +) +validator = Draft202012Validator(schema) + +valid = 3737 +invalid = 12 + + +if __name__ == "__main__": + runner = Runner() + runner.bench_func("beginning of schema", lambda: validator.is_valid(42)) + runner.bench_func("middle of schema", lambda: validator.is_valid("foo")) + runner.bench_func("end of schema", lambda: validator.is_valid(12)) + runner.bench_func("valid", lambda: validator.is_valid(3737)) diff --git a/.venv/lib/python3.12/site-packages/jsonschema/benchmarks/validator_creation.py b/.venv/lib/python3.12/site-packages/jsonschema/benchmarks/validator_creation.py new file mode 100644 index 0000000..4baeb3a --- /dev/null +++ b/.venv/lib/python3.12/site-packages/jsonschema/benchmarks/validator_creation.py @@ -0,0 +1,14 @@ +from pyperf import Runner + +from jsonschema import Draft202012Validator + +schema = { + "type": "array", + "minLength": 1, + "maxLength": 1, + "items": {"type": "integer"}, +} + + +if __name__ == "__main__": + Runner().bench_func("validator creation", Draft202012Validator, schema) diff --git a/.venv/lib/python3.12/site-packages/jsonschema/cli.py b/.venv/lib/python3.12/site-packages/jsonschema/cli.py new file mode 100644 index 0000000..f3ca4d6 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/jsonschema/cli.py @@ -0,0 +1,292 @@ +""" +The ``jsonschema`` command line. +""" + +from importlib import metadata +from json import JSONDecodeError +from pkgutil import resolve_name +from textwrap import dedent +import argparse +import json +import sys +import traceback +import warnings + +from attrs import define, field + +from jsonschema.exceptions import SchemaError +from jsonschema.validators import _RefResolver, validator_for + +warnings.warn( + ( + "The jsonschema CLI is deprecated and will be removed in a future " + "version. Please use check-jsonschema instead, which can be installed " + "from https://pypi.org/project/check-jsonschema/" + ), + DeprecationWarning, + stacklevel=2, +) + + +class _CannotLoadFile(Exception): + pass + + +@define +class _Outputter: + + _formatter = field() + _stdout = field() + _stderr = field() + + @classmethod + def from_arguments(cls, arguments, stdout, stderr): + if arguments["output"] == "plain": + formatter = _PlainFormatter(arguments["error_format"]) + elif arguments["output"] == "pretty": + formatter = _PrettyFormatter() + return cls(formatter=formatter, stdout=stdout, stderr=stderr) + + def load(self, path): + try: + file = open(path) # noqa: SIM115, PTH123 + except FileNotFoundError as error: + self.filenotfound_error(path=path, exc_info=sys.exc_info()) + raise _CannotLoadFile() from error + + with file: + try: + return json.load(file) + except JSONDecodeError as error: + self.parsing_error(path=path, exc_info=sys.exc_info()) + raise _CannotLoadFile() from error + + def filenotfound_error(self, **kwargs): + self._stderr.write(self._formatter.filenotfound_error(**kwargs)) + + def parsing_error(self, **kwargs): + self._stderr.write(self._formatter.parsing_error(**kwargs)) + + def validation_error(self, **kwargs): + self._stderr.write(self._formatter.validation_error(**kwargs)) + + def validation_success(self, **kwargs): + self._stdout.write(self._formatter.validation_success(**kwargs)) + + +@define +class _PrettyFormatter: + + _ERROR_MSG = dedent( + """\ + ===[{type}]===({path})=== + + {body} + ----------------------------- + """, + ) + _SUCCESS_MSG = "===[SUCCESS]===({path})===\n" + + def filenotfound_error(self, path, exc_info): + return self._ERROR_MSG.format( + path=path, + type="FileNotFoundError", + body=f"{path!r} does not exist.", + ) + + def parsing_error(self, path, exc_info): + exc_type, exc_value, exc_traceback = exc_info + exc_lines = "".join( + traceback.format_exception(exc_type, exc_value, exc_traceback), + ) + return self._ERROR_MSG.format( + path=path, + type=exc_type.__name__, + body=exc_lines, + ) + + def validation_error(self, instance_path, error): + return self._ERROR_MSG.format( + path=instance_path, + type=error.__class__.__name__, + body=error, + ) + + def validation_success(self, instance_path): + return self._SUCCESS_MSG.format(path=instance_path) + + +@define +class _PlainFormatter: + + _error_format = field() + + def filenotfound_error(self, path, exc_info): + return f"{path!r} does not exist.\n" + + def parsing_error(self, path, exc_info): + return "Failed to parse {}: {}\n".format( + "" if path == "" else repr(path), + exc_info[1], + ) + + def validation_error(self, instance_path, error): + return self._error_format.format(file_name=instance_path, error=error) + + def validation_success(self, instance_path): + return "" + + +def _resolve_name_with_default(name): + if "." not in name: + name = "jsonschema." + name + return resolve_name(name) + + +parser = argparse.ArgumentParser( + description="JSON Schema Validation CLI", +) +parser.add_argument( + "-i", "--instance", + action="append", + dest="instances", + help=""" + a path to a JSON instance (i.e. filename.json) to validate (may + be specified multiple times). If no instances are provided via this + option, one will be expected on standard input. + """, +) +parser.add_argument( + "-F", "--error-format", + help=""" + the format to use for each validation error message, specified + in a form suitable for str.format. This string will be passed + one formatted object named 'error' for each ValidationError. + Only provide this option when using --output=plain, which is the + default. If this argument is unprovided and --output=plain is + used, a simple default representation will be used. + """, +) +parser.add_argument( + "-o", "--output", + choices=["plain", "pretty"], + default="plain", + help=""" + an output format to use. 'plain' (default) will produce minimal + text with one line for each error, while 'pretty' will produce + more detailed human-readable output on multiple lines. + """, +) +parser.add_argument( + "-V", "--validator", + type=_resolve_name_with_default, + help=""" + the fully qualified object name of a validator to use, or, for + validators that are registered with jsonschema, simply the name + of the class. + """, +) +parser.add_argument( + "--base-uri", + help=""" + a base URI to assign to the provided schema, even if it does not + declare one (via e.g. $id). This option can be used if you wish to + resolve relative references to a particular URI (or local path) + """, +) +parser.add_argument( + "--version", + action="version", + version=metadata.version("jsonschema"), +) +parser.add_argument( + "schema", + help="the path to a JSON Schema to validate with (i.e. schema.json)", +) + + +def parse_args(args): # noqa: D103 + arguments = vars(parser.parse_args(args=args or ["--help"])) + if arguments["output"] != "plain" and arguments["error_format"]: + raise parser.error( + "--error-format can only be used with --output plain", + ) + if arguments["output"] == "plain" and arguments["error_format"] is None: + arguments["error_format"] = "{error.instance}: {error.message}\n" + return arguments + + +def _validate_instance(instance_path, instance, validator, outputter): + invalid = False + for error in validator.iter_errors(instance): + invalid = True + outputter.validation_error(instance_path=instance_path, error=error) + + if not invalid: + outputter.validation_success(instance_path=instance_path) + return invalid + + +def main(args=sys.argv[1:]): # noqa: D103 + sys.exit(run(arguments=parse_args(args=args))) + + +def run(arguments, stdout=sys.stdout, stderr=sys.stderr, stdin=sys.stdin): # noqa: D103 + outputter = _Outputter.from_arguments( + arguments=arguments, + stdout=stdout, + stderr=stderr, + ) + + try: + schema = outputter.load(arguments["schema"]) + except _CannotLoadFile: + return 1 + + Validator = arguments["validator"] + if Validator is None: + Validator = validator_for(schema) + + try: + Validator.check_schema(schema) + except SchemaError as error: + outputter.validation_error( + instance_path=arguments["schema"], + error=error, + ) + return 1 + + if arguments["instances"]: + load, instances = outputter.load, arguments["instances"] + else: + def load(_): + try: + return json.load(stdin) + except JSONDecodeError as error: + outputter.parsing_error( + path="", exc_info=sys.exc_info(), + ) + raise _CannotLoadFile() from error + instances = [""] + + resolver = _RefResolver( + base_uri=arguments["base_uri"], + referrer=schema, + ) if arguments["base_uri"] is not None else None + + validator = Validator(schema, resolver=resolver) + exit_code = 0 + for each in instances: + try: + instance = load(each) + except _CannotLoadFile: + exit_code = 1 + else: + exit_code |= _validate_instance( + instance_path=each, + instance=instance, + validator=validator, + outputter=outputter, + ) + + return exit_code diff --git a/.venv/lib/python3.12/site-packages/jsonschema/exceptions.py b/.venv/lib/python3.12/site-packages/jsonschema/exceptions.py new file mode 100644 index 0000000..d955e35 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/jsonschema/exceptions.py @@ -0,0 +1,490 @@ +""" +Validation errors, and some surrounding helpers. +""" +from __future__ import annotations + +from collections import defaultdict, deque +from pprint import pformat +from textwrap import dedent, indent +from typing import TYPE_CHECKING, Any, ClassVar +import heapq +import re +import warnings + +from attrs import define +from referencing.exceptions import Unresolvable as _Unresolvable + +from jsonschema import _utils + +if TYPE_CHECKING: + from collections.abc import Iterable, Mapping, MutableMapping, Sequence + + from jsonschema import _types + +WEAK_MATCHES: frozenset[str] = frozenset(["anyOf", "oneOf"]) +STRONG_MATCHES: frozenset[str] = frozenset() + +_JSON_PATH_COMPATIBLE_PROPERTY_PATTERN = re.compile("^[a-zA-Z][a-zA-Z0-9_]*$") + +_unset = _utils.Unset() + + +def _pretty(thing: Any, prefix: str): + """ + Format something for an error message as prettily as we currently can. + """ + return indent(pformat(thing, width=72, sort_dicts=False), prefix).lstrip() + + +def __getattr__(name): + if name == "RefResolutionError": + warnings.warn( + _RefResolutionError._DEPRECATION_MESSAGE, + DeprecationWarning, + stacklevel=2, + ) + return _RefResolutionError + raise AttributeError(f"module {__name__} has no attribute {name}") + + +class _Error(Exception): + + _word_for_schema_in_error_message: ClassVar[str] + _word_for_instance_in_error_message: ClassVar[str] + + def __init__( + self, + message: str, + validator: str = _unset, # type: ignore[assignment] + path: Iterable[str | int] = (), + cause: Exception | None = None, + context=(), + validator_value: Any = _unset, + instance: Any = _unset, + schema: Mapping[str, Any] | bool = _unset, # type: ignore[assignment] + schema_path: Iterable[str | int] = (), + parent: _Error | None = None, + type_checker: _types.TypeChecker = _unset, # type: ignore[assignment] + ) -> None: + super().__init__( + message, + validator, + path, + cause, + context, + validator_value, + instance, + schema, + schema_path, + parent, + ) + self.message = message + self.path = self.relative_path = deque(path) + self.schema_path = self.relative_schema_path = deque(schema_path) + self.context = list(context) + self.cause = self.__cause__ = cause + self.validator = validator + self.validator_value = validator_value + self.instance = instance + self.schema = schema + self.parent = parent + self._type_checker = type_checker + + for error in context: + error.parent = self + + def __repr__(self) -> str: + return f"<{self.__class__.__name__}: {self.message!r}>" + + def __str__(self) -> str: + essential_for_verbose = ( + self.validator, self.validator_value, self.instance, self.schema, + ) + if any(m is _unset for m in essential_for_verbose): + return self.message + + schema_path = _utils.format_as_index( + container=self._word_for_schema_in_error_message, + indices=list(self.relative_schema_path)[:-1], + ) + instance_path = _utils.format_as_index( + container=self._word_for_instance_in_error_message, + indices=self.relative_path, + ) + prefix = 16 * " " + + return dedent( + f"""\ + {self.message} + + Failed validating {self.validator!r} in {schema_path}: + {_pretty(self.schema, prefix=prefix)} + + On {instance_path}: + {_pretty(self.instance, prefix=prefix)} + """.rstrip(), + ) + + @classmethod + def create_from(cls, other: _Error): + return cls(**other._contents()) + + @property + def absolute_path(self) -> Sequence[str | int]: + parent = self.parent + if parent is None: + return self.relative_path + + path = deque(self.relative_path) + path.extendleft(reversed(parent.absolute_path)) + return path + + @property + def absolute_schema_path(self) -> Sequence[str | int]: + parent = self.parent + if parent is None: + return self.relative_schema_path + + path = deque(self.relative_schema_path) + path.extendleft(reversed(parent.absolute_schema_path)) + return path + + @property + def json_path(self) -> str: + path = "$" + for elem in self.absolute_path: + if isinstance(elem, int): + path += "[" + str(elem) + "]" + elif _JSON_PATH_COMPATIBLE_PROPERTY_PATTERN.match(elem): + path += "." + elem + else: + escaped_elem = elem.replace("\\", "\\\\").replace("'", r"\'") + path += "['" + escaped_elem + "']" + return path + + def _set( + self, + type_checker: _types.TypeChecker | None = None, + **kwargs: Any, + ) -> None: + if type_checker is not None and self._type_checker is _unset: + self._type_checker = type_checker + + for k, v in kwargs.items(): + if getattr(self, k) is _unset: + setattr(self, k, v) + + def _contents(self): + attrs = ( + "message", "cause", "context", "validator", "validator_value", + "path", "schema_path", "instance", "schema", "parent", + ) + return {attr: getattr(self, attr) for attr in attrs} + + def _matches_type(self) -> bool: + try: + # We ignore this as we want to simply crash if this happens + expected = self.schema["type"] # type: ignore[index] + except (KeyError, TypeError): + return False + + if isinstance(expected, str): + return self._type_checker.is_type(self.instance, expected) + + return any( + self._type_checker.is_type(self.instance, expected_type) + for expected_type in expected + ) + + +class ValidationError(_Error): + """ + An instance was invalid under a provided schema. + """ + + _word_for_schema_in_error_message = "schema" + _word_for_instance_in_error_message = "instance" + + +class SchemaError(_Error): + """ + A schema was invalid under its corresponding metaschema. + """ + + _word_for_schema_in_error_message = "metaschema" + _word_for_instance_in_error_message = "schema" + + +@define(slots=False) +class _RefResolutionError(Exception): # noqa: PLW1641 + """ + A ref could not be resolved. + """ + + _DEPRECATION_MESSAGE = ( + "jsonschema.exceptions.RefResolutionError is deprecated as of version " + "4.18.0. If you wish to catch potential reference resolution errors, " + "directly catch referencing.exceptions.Unresolvable." + ) + + _cause: Exception + + def __eq__(self, other): + if self.__class__ is not other.__class__: + return NotImplemented # pragma: no cover -- uncovered but deprecated # noqa: E501 + return self._cause == other._cause + + def __str__(self) -> str: + return str(self._cause) + + +class _WrappedReferencingError(_RefResolutionError, _Unresolvable): # pragma: no cover -- partially uncovered but to be removed # noqa: E501 + def __init__(self, cause: _Unresolvable): + object.__setattr__(self, "_wrapped", cause) + + def __eq__(self, other): + if other.__class__ is self.__class__: + return self._wrapped == other._wrapped + elif other.__class__ is self._wrapped.__class__: + return self._wrapped == other + return NotImplemented + + def __getattr__(self, attr): + return getattr(self._wrapped, attr) + + def __hash__(self): + return hash(self._wrapped) + + def __repr__(self): + return f"" + + def __str__(self): + return f"{self._wrapped.__class__.__name__}: {self._wrapped}" + + +class UndefinedTypeCheck(Exception): + """ + A type checker was asked to check a type it did not have registered. + """ + + def __init__(self, type: str) -> None: + self.type = type + + def __str__(self) -> str: + return f"Type {self.type!r} is unknown to this type checker" + + +class UnknownType(Exception): + """ + A validator was asked to validate an instance against an unknown type. + """ + + def __init__(self, type, instance, schema): + self.type = type + self.instance = instance + self.schema = schema + + def __str__(self): + prefix = 16 * " " + + return dedent( + f"""\ + Unknown type {self.type!r} for validator with schema: + {_pretty(self.schema, prefix=prefix)} + + While checking instance: + {_pretty(self.instance, prefix=prefix)} + """.rstrip(), + ) + + +class FormatError(Exception): + """ + Validating a format failed. + """ + + def __init__(self, message, cause=None): + super().__init__(message, cause) + self.message = message + self.cause = self.__cause__ = cause + + def __str__(self): + return self.message + + +class ErrorTree: + """ + ErrorTrees make it easier to check which validations failed. + """ + + _instance = _unset + + def __init__(self, errors: Iterable[ValidationError] = ()): + self.errors: MutableMapping[str, ValidationError] = {} + self._contents: Mapping[str, ErrorTree] = defaultdict(self.__class__) + + for error in errors: + container = self + for element in error.path: + container = container[element] + container.errors[error.validator] = error + + container._instance = error.instance + + def __contains__(self, index: str | int): + """ + Check whether ``instance[index]`` has any errors. + """ + return index in self._contents + + def __getitem__(self, index): + """ + Retrieve the child tree one level down at the given ``index``. + + If the index is not in the instance that this tree corresponds + to and is not known by this tree, whatever error would be raised + by ``instance.__getitem__`` will be propagated (usually this is + some subclass of `LookupError`. + """ + if self._instance is not _unset and index not in self: + self._instance[index] + return self._contents[index] + + def __setitem__(self, index: str | int, value: ErrorTree): + """ + Add an error to the tree at the given ``index``. + + .. deprecated:: v4.20.0 + + Setting items on an `ErrorTree` is deprecated without replacement. + To populate a tree, provide all of its sub-errors when you + construct the tree. + """ + warnings.warn( + "ErrorTree.__setitem__ is deprecated without replacement.", + DeprecationWarning, + stacklevel=2, + ) + self._contents[index] = value # type: ignore[index] + + def __iter__(self): + """ + Iterate (non-recursively) over the indices in the instance with errors. + """ + return iter(self._contents) + + def __len__(self): + """ + Return the `total_errors`. + """ + return self.total_errors + + def __repr__(self): + total = len(self) + errors = "error" if total == 1 else "errors" + return f"<{self.__class__.__name__} ({total} total {errors})>" + + @property + def total_errors(self): + """ + The total number of errors in the entire tree, including children. + """ + child_errors = sum(len(tree) for _, tree in self._contents.items()) + return len(self.errors) + child_errors + + +def by_relevance(weak=WEAK_MATCHES, strong=STRONG_MATCHES): + """ + Create a key function that can be used to sort errors by relevance. + + Arguments: + weak (set): + a collection of validation keywords to consider to be + "weak". If there are two errors at the same level of the + instance and one is in the set of weak validation keywords, + the other error will take priority. By default, :kw:`anyOf` + and :kw:`oneOf` are considered weak keywords and will be + superseded by other same-level validation errors. + + strong (set): + a collection of validation keywords to consider to be + "strong" + + """ + + def relevance(error): + validator = error.validator + return ( # prefer errors which are ... + -len(error.path), # 'deeper' and thereby more specific + error.path, # earlier (for sibling errors) + validator not in weak, # for a non-low-priority keyword + validator in strong, # for a high priority keyword + not error._matches_type(), # at least match the instance's type + ) # otherwise we'll treat them the same + + return relevance + + +relevance = by_relevance() +""" +A key function (e.g. to use with `sorted`) which sorts errors by relevance. + +Example: + +.. code:: python + + sorted(validator.iter_errors(12), key=jsonschema.exceptions.relevance) +""" + + +def best_match(errors, key=relevance): + """ + Try to find an error that appears to be the best match among given errors. + + In general, errors that are higher up in the instance (i.e. for which + `ValidationError.path` is shorter) are considered better matches, + since they indicate "more" is wrong with the instance. + + If the resulting match is either :kw:`oneOf` or :kw:`anyOf`, the + *opposite* assumption is made -- i.e. the deepest error is picked, + since these keywords only need to match once, and any other errors + may not be relevant. + + Arguments: + errors (collections.abc.Iterable): + + the errors to select from. Do not provide a mixture of + errors from different validation attempts (i.e. from + different instances or schemas), since it won't produce + sensical output. + + key (collections.abc.Callable): + + the key to use when sorting errors. See `relevance` and + transitively `by_relevance` for more details (the default is + to sort with the defaults of that function). Changing the + default is only useful if you want to change the function + that rates errors but still want the error context descent + done by this function. + + Returns: + the best matching error, or ``None`` if the iterable was empty + + .. note:: + + This function is a heuristic. Its return value may change for a given + set of inputs from version to version if better heuristics are added. + + """ + best = max(errors, key=key, default=None) + if best is None: + return + + while best.context: + # Calculate the minimum via nsmallest, because we don't recurse if + # all nested errors have the same relevance (i.e. if min == max == all) + smallest = heapq.nsmallest(2, best.context, key=key) + if len(smallest) == 2 and key(smallest[0]) == key(smallest[1]): # noqa: PLR2004 + return best + best = smallest[0] + return best diff --git a/.venv/lib/python3.12/site-packages/jsonschema/protocols.py b/.venv/lib/python3.12/site-packages/jsonschema/protocols.py new file mode 100644 index 0000000..b6288dc --- /dev/null +++ b/.venv/lib/python3.12/site-packages/jsonschema/protocols.py @@ -0,0 +1,230 @@ +""" +typing.Protocol classes for jsonschema interfaces. +""" + +# for reference material on Protocols, see +# https://www.python.org/dev/peps/pep-0544/ + +from __future__ import annotations + +from typing import TYPE_CHECKING, Any, ClassVar, Protocol, runtime_checkable + +# in order for Sphinx to resolve references accurately from type annotations, +# it needs to see names like `jsonschema.TypeChecker` +# therefore, only import at type-checking time (to avoid circular references), +# but use `jsonschema` for any types which will otherwise not be resolvable +if TYPE_CHECKING: + from collections.abc import Iterable, Mapping + + import referencing.jsonschema + + from jsonschema import _typing + from jsonschema.exceptions import ValidationError + import jsonschema + import jsonschema.validators + +# For code authors working on the validator protocol, these are the three +# use-cases which should be kept in mind: +# +# 1. As a protocol class, it can be used in type annotations to describe the +# available methods and attributes of a validator +# 2. It is the source of autodoc for the validator documentation +# 3. It is runtime_checkable, meaning that it can be used in isinstance() +# checks. +# +# Since protocols are not base classes, isinstance() checking is limited in +# its capabilities. See docs on runtime_checkable for detail + + +@runtime_checkable +class Validator(Protocol): + """ + The protocol to which all validator classes adhere. + + Arguments: + + schema: + + The schema that the validator object will validate with. + It is assumed to be valid, and providing + an invalid schema can lead to undefined behavior. See + `Validator.check_schema` to validate a schema first. + + registry: + + a schema registry that will be used for looking up JSON references + + resolver: + + a resolver that will be used to resolve :kw:`$ref` + properties (JSON references). If unprovided, one will be created. + + .. deprecated:: v4.18.0 + + `RefResolver <_RefResolver>` has been deprecated in favor of + `referencing`, and with it, this argument. + + format_checker: + + if provided, a checker which will be used to assert about + :kw:`format` properties present in the schema. If unprovided, + *no* format validation is done, and the presence of format + within schemas is strictly informational. Certain formats + require additional packages to be installed in order to assert + against instances. Ensure you've installed `jsonschema` with + its `extra (optional) dependencies ` when + invoking ``pip``. + + .. deprecated:: v4.12.0 + + Subclassing validator classes now explicitly warns this is not part of + their public API. + + """ + + #: An object representing the validator's meta schema (the schema that + #: describes valid schemas in the given version). + META_SCHEMA: ClassVar[Mapping] + + #: A mapping of validation keywords (`str`\s) to functions that + #: validate the keyword with that name. For more information see + #: `creating-validators`. + VALIDATORS: ClassVar[Mapping] + + #: A `jsonschema.TypeChecker` that will be used when validating + #: :kw:`type` keywords in JSON schemas. + TYPE_CHECKER: ClassVar[jsonschema.TypeChecker] + + #: A `jsonschema.FormatChecker` that will be used when validating + #: :kw:`format` keywords in JSON schemas. + FORMAT_CHECKER: ClassVar[jsonschema.FormatChecker] + + #: A function which given a schema returns its ID. + ID_OF: _typing.id_of + + #: The schema that will be used to validate instances + schema: Mapping | bool + + def __init__( + self, + schema: Mapping | bool, + resolver: Any = None, # deprecated + format_checker: jsonschema.FormatChecker | None = None, + *, + registry: referencing.jsonschema.SchemaRegistry = ..., + ) -> None: ... + + @classmethod + def check_schema(cls, schema: Mapping | bool) -> None: + """ + Validate the given schema against the validator's `META_SCHEMA`. + + Raises: + + `jsonschema.exceptions.SchemaError`: + + if the schema is invalid + + """ + + def is_type(self, instance: Any, type: str) -> bool: + """ + Check if the instance is of the given (JSON Schema) type. + + Arguments: + + instance: + + the value to check + + type: + + the name of a known (JSON Schema) type + + Returns: + + whether the instance is of the given type + + Raises: + + `jsonschema.exceptions.UnknownType`: + + if ``type`` is not a known type + + """ + + def is_valid(self, instance: Any) -> bool: + """ + Check if the instance is valid under the current `schema`. + + Returns: + + whether the instance is valid or not + + >>> schema = {"maxItems" : 2} + >>> Draft202012Validator(schema).is_valid([2, 3, 4]) + False + + """ + + def iter_errors(self, instance: Any) -> Iterable[ValidationError]: + r""" + Lazily yield each of the validation errors in the given instance. + + >>> schema = { + ... "type" : "array", + ... "items" : {"enum" : [1, 2, 3]}, + ... "maxItems" : 2, + ... } + >>> v = Draft202012Validator(schema) + >>> for error in sorted(v.iter_errors([2, 3, 4]), key=str): + ... print(error.message) + 4 is not one of [1, 2, 3] + [2, 3, 4] is too long + + .. deprecated:: v4.0.0 + + Calling this function with a second schema argument is deprecated. + Use `Validator.evolve` instead. + """ + + def validate(self, instance: Any) -> None: + """ + Check if the instance is valid under the current `schema`. + + Raises: + + `jsonschema.exceptions.ValidationError`: + + if the instance is invalid + + >>> schema = {"maxItems" : 2} + >>> Draft202012Validator(schema).validate([2, 3, 4]) + Traceback (most recent call last): + ... + ValidationError: [2, 3, 4] is too long + + """ + + def evolve(self, **kwargs) -> Validator: + """ + Create a new validator like this one, but with given changes. + + Preserves all other attributes, so can be used to e.g. create a + validator with a different schema but with the same :kw:`$ref` + resolution behavior. + + >>> validator = Draft202012Validator({}) + >>> validator.evolve(schema={"type": "number"}) + Draft202012Validator(schema={'type': 'number'}, format_checker=None) + + The returned object satisfies the validator protocol, but may not + be of the same concrete class! In particular this occurs + when a :kw:`$ref` occurs to a schema with a different + :kw:`$schema` than this one (i.e. for a different draft). + + >>> validator.evolve( + ... schema={"$schema": Draft7Validator.META_SCHEMA["$id"]} + ... ) + Draft7Validator(schema=..., format_checker=None) + """ diff --git a/.venv/lib/python3.12/site-packages/jsonschema/tests/__init__.py b/.venv/lib/python3.12/site-packages/jsonschema/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/.venv/lib/python3.12/site-packages/jsonschema/tests/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/jsonschema/tests/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..824a0de Binary files /dev/null and b/.venv/lib/python3.12/site-packages/jsonschema/tests/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/jsonschema/tests/__pycache__/_suite.cpython-312.pyc b/.venv/lib/python3.12/site-packages/jsonschema/tests/__pycache__/_suite.cpython-312.pyc new file mode 100644 index 0000000..d3821e5 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/jsonschema/tests/__pycache__/_suite.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/jsonschema/tests/__pycache__/fuzz_validate.cpython-312.pyc b/.venv/lib/python3.12/site-packages/jsonschema/tests/__pycache__/fuzz_validate.cpython-312.pyc new file mode 100644 index 0000000..cc5e38a Binary files /dev/null and b/.venv/lib/python3.12/site-packages/jsonschema/tests/__pycache__/fuzz_validate.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/jsonschema/tests/__pycache__/test_cli.cpython-312.pyc b/.venv/lib/python3.12/site-packages/jsonschema/tests/__pycache__/test_cli.cpython-312.pyc new file mode 100644 index 0000000..ae0af70 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/jsonschema/tests/__pycache__/test_cli.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/jsonschema/tests/__pycache__/test_deprecations.cpython-312.pyc b/.venv/lib/python3.12/site-packages/jsonschema/tests/__pycache__/test_deprecations.cpython-312.pyc new file mode 100644 index 0000000..21e833b Binary files /dev/null and b/.venv/lib/python3.12/site-packages/jsonschema/tests/__pycache__/test_deprecations.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/jsonschema/tests/__pycache__/test_exceptions.cpython-312.pyc b/.venv/lib/python3.12/site-packages/jsonschema/tests/__pycache__/test_exceptions.cpython-312.pyc new file mode 100644 index 0000000..eeb1996 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/jsonschema/tests/__pycache__/test_exceptions.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/jsonschema/tests/__pycache__/test_format.cpython-312.pyc b/.venv/lib/python3.12/site-packages/jsonschema/tests/__pycache__/test_format.cpython-312.pyc new file mode 100644 index 0000000..b77a9c0 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/jsonschema/tests/__pycache__/test_format.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/jsonschema/tests/__pycache__/test_jsonschema_test_suite.cpython-312.pyc b/.venv/lib/python3.12/site-packages/jsonschema/tests/__pycache__/test_jsonschema_test_suite.cpython-312.pyc new file mode 100644 index 0000000..59fb57b Binary files /dev/null and b/.venv/lib/python3.12/site-packages/jsonschema/tests/__pycache__/test_jsonschema_test_suite.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/jsonschema/tests/__pycache__/test_types.cpython-312.pyc b/.venv/lib/python3.12/site-packages/jsonschema/tests/__pycache__/test_types.cpython-312.pyc new file mode 100644 index 0000000..c07ef7d Binary files /dev/null and b/.venv/lib/python3.12/site-packages/jsonschema/tests/__pycache__/test_types.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/jsonschema/tests/__pycache__/test_utils.cpython-312.pyc b/.venv/lib/python3.12/site-packages/jsonschema/tests/__pycache__/test_utils.cpython-312.pyc new file mode 100644 index 0000000..01c67fa Binary files /dev/null and b/.venv/lib/python3.12/site-packages/jsonschema/tests/__pycache__/test_utils.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/jsonschema/tests/__pycache__/test_validators.cpython-312.pyc b/.venv/lib/python3.12/site-packages/jsonschema/tests/__pycache__/test_validators.cpython-312.pyc new file mode 100644 index 0000000..1d76a08 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/jsonschema/tests/__pycache__/test_validators.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/jsonschema/tests/_suite.py b/.venv/lib/python3.12/site-packages/jsonschema/tests/_suite.py new file mode 100644 index 0000000..d61d382 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/jsonschema/tests/_suite.py @@ -0,0 +1,285 @@ +""" +Python representations of the JSON Schema Test Suite tests. +""" +from __future__ import annotations + +from contextlib import suppress +from functools import partial +from pathlib import Path +from typing import TYPE_CHECKING, Any +import json +import os +import re +import sys +import unittest + +from attrs import field, frozen +from referencing import Registry +import referencing.jsonschema + +if TYPE_CHECKING: + from collections.abc import Iterable, Mapping, Sequence + + from referencing.jsonschema import Schema + import pyperf + +from jsonschema.validators import _VALIDATORS +import jsonschema + +MAGIC_REMOTE_URL = "http://localhost:1234" + +_DELIMITERS = re.compile(r"[\W\- ]+") + + +def _find_suite(): + root = os.environ.get("JSON_SCHEMA_TEST_SUITE") + if root is not None: + return Path(root) + + root = Path(jsonschema.__file__).parent.parent / "json" + if not root.is_dir(): # pragma: no cover + raise ValueError( + ( + "Can't find the JSON-Schema-Test-Suite directory. " + "Set the 'JSON_SCHEMA_TEST_SUITE' environment " + "variable or run the tests from alongside a checkout " + "of the suite." + ), + ) + return root + + +@frozen +class Suite: + + _root: Path = field(factory=_find_suite) + + + def benchmark(self, runner: pyperf.Runner): # pragma: no cover + for name, Validator in _VALIDATORS.items(): + self.version(name=name).benchmark( + runner=runner, + Validator=Validator, + ) + + def version(self, name) -> Version: + Validator = _VALIDATORS[name] + uri: str = Validator.ID_OF(Validator.META_SCHEMA) # type: ignore[assignment] + specification = referencing.jsonschema.specification_with(uri) + + registry = Registry().with_contents( + remotes_in(root=self._root / "remotes", name=name, uri=uri), + default_specification=specification, + ) + return Version( + name=name, + path=self._root / "tests" / name, + remotes=registry, + ) + + +@frozen +class Version: + + _path: Path + _remotes: referencing.jsonschema.SchemaRegistry + + name: str + + def benchmark(self, **kwargs): # pragma: no cover + for case in self.cases(): + case.benchmark(**kwargs) + + def cases(self) -> Iterable[_Case]: + return self._cases_in(paths=self._path.glob("*.json")) + + def format_cases(self) -> Iterable[_Case]: + return self._cases_in(paths=self._path.glob("optional/format/*.json")) + + def optional_cases_of(self, name: str) -> Iterable[_Case]: + return self._cases_in(paths=[self._path / "optional" / f"{name}.json"]) + + def to_unittest_testcase(self, *groups, **kwargs): + name = kwargs.pop("name", "Test" + self.name.title().replace("-", "")) + methods = { + method.__name__: method + for method in ( + test.to_unittest_method(**kwargs) + for group in groups + for case in group + for test in case.tests + ) + } + cls = type(name, (unittest.TestCase,), methods) + + # We're doing crazy things, so if they go wrong, like a function + # behaving differently on some other interpreter, just make them + # not happen. + with suppress(Exception): + cls.__module__ = _someone_save_us_the_module_of_the_caller() + + return cls + + def _cases_in(self, paths: Iterable[Path]) -> Iterable[_Case]: + for path in paths: + for case in json.loads(path.read_text(encoding="utf-8")): + yield _Case.from_dict( + case, + version=self, + subject=path.stem, + remotes=self._remotes, + ) + + +@frozen +class _Case: + + version: Version + + subject: str + description: str + schema: Mapping[str, Any] | bool + tests: list[_Test] + comment: str | None = None + specification: Sequence[dict[str, str]] = () + + @classmethod + def from_dict(cls, data, remotes, **kwargs): + data.update(kwargs) + tests = [ + _Test( + version=data["version"], + subject=data["subject"], + case_description=data["description"], + schema=data["schema"], + remotes=remotes, + **test, + ) for test in data.pop("tests") + ] + return cls(tests=tests, **data) + + def benchmark(self, runner: pyperf.Runner, **kwargs): # pragma: no cover + for test in self.tests: + runner.bench_func( + test.fully_qualified_name, + partial(test.validate_ignoring_errors, **kwargs), + ) + + +def remotes_in( + root: Path, + name: str, + uri: str, +) -> Iterable[tuple[str, Schema]]: + # This messy logic is because the test suite is terrible at indicating + # what remotes are needed for what drafts, and mixes in schemas which + # have no $schema and which are invalid under earlier versions, in with + # other schemas which are needed for tests. + + for each in root.rglob("*.json"): + schema = json.loads(each.read_text()) + + relative = str(each.relative_to(root)).replace("\\", "/") + + if ( + ( # invalid boolean schema + name in {"draft3", "draft4"} + and each.stem == "tree" + ) or + ( # draft/*.json + "$schema" not in schema + and relative.startswith("draft") + and not relative.startswith(name) + ) + ): + continue + yield f"{MAGIC_REMOTE_URL}/{relative}", schema + + +@frozen(repr=False) +class _Test: + + version: Version + + subject: str + case_description: str + description: str + + data: Any + schema: Mapping[str, Any] | bool + + valid: bool + + _remotes: referencing.jsonschema.SchemaRegistry + + comment: str | None = None + + def __repr__(self): # pragma: no cover + return f"" + + @property + def fully_qualified_name(self): # pragma: no cover + return " > ".join( # noqa: FLY002 + [ + self.version.name, + self.subject, + self.case_description, + self.description, + ], + ) + + def to_unittest_method(self, skip=lambda test: None, **kwargs): + if self.valid: + def fn(this): + self.validate(**kwargs) + else: + def fn(this): + with this.assertRaises(jsonschema.ValidationError): + self.validate(**kwargs) + + fn.__name__ = "_".join( + [ + "test", + _DELIMITERS.sub("_", self.subject), + _DELIMITERS.sub("_", self.case_description), + _DELIMITERS.sub("_", self.description), + ], + ) + reason = skip(self) + if reason is None or os.environ.get("JSON_SCHEMA_DEBUG", "0") != "0": + return fn + elif os.environ.get("JSON_SCHEMA_EXPECTED_FAILURES", "0") != "0": # pragma: no cover # noqa: E501 + return unittest.expectedFailure(fn) + else: + return unittest.skip(reason)(fn) + + def validate(self, Validator, **kwargs): + Validator.check_schema(self.schema) + validator = Validator( + schema=self.schema, + registry=self._remotes, + **kwargs, + ) + if os.environ.get("JSON_SCHEMA_DEBUG", "0") != "0": # pragma: no cover + breakpoint() # noqa: T100 + validator.validate(instance=self.data) + + def validate_ignoring_errors(self, Validator): # pragma: no cover + with suppress(jsonschema.ValidationError): + self.validate(Validator=Validator) + + +def _someone_save_us_the_module_of_the_caller(): + """ + The FQON of the module 2nd stack frames up from here. + + This is intended to allow us to dynamically return test case classes that + are indistinguishable from being defined in the module that wants them. + + Otherwise, trial will mis-print the FQON, and copy pasting it won't re-run + the class that really is running. + + Save us all, this is all so so so so so terrible. + """ + + return sys._getframe(2).f_globals["__name__"] diff --git a/.venv/lib/python3.12/site-packages/jsonschema/tests/fuzz_validate.py b/.venv/lib/python3.12/site-packages/jsonschema/tests/fuzz_validate.py new file mode 100644 index 0000000..c12e88b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/jsonschema/tests/fuzz_validate.py @@ -0,0 +1,50 @@ +""" +Fuzzing setup for OSS-Fuzz. + +See https://github.com/google/oss-fuzz/tree/master/projects/jsonschema for the +other half of the setup here. +""" +import sys + +from hypothesis import given, strategies + +import jsonschema + +PRIM = strategies.one_of( + strategies.booleans(), + strategies.integers(), + strategies.floats(allow_nan=False, allow_infinity=False), + strategies.text(), +) +DICT = strategies.recursive( + base=strategies.one_of( + strategies.booleans(), + strategies.dictionaries(strategies.text(), PRIM), + ), + extend=lambda inner: strategies.dictionaries(strategies.text(), inner), +) + + +@given(obj1=DICT, obj2=DICT) +def test_schemas(obj1, obj2): + try: + jsonschema.validate(instance=obj1, schema=obj2) + except jsonschema.exceptions.ValidationError: + pass + except jsonschema.exceptions.SchemaError: + pass + + +def main(): + atheris.instrument_all() + atheris.Setup( + sys.argv, + test_schemas.hypothesis.fuzz_one_input, + enable_python_coverage=True, + ) + atheris.Fuzz() + + +if __name__ == "__main__": + import atheris + main() diff --git a/.venv/lib/python3.12/site-packages/jsonschema/tests/test_cli.py b/.venv/lib/python3.12/site-packages/jsonschema/tests/test_cli.py new file mode 100644 index 0000000..bed9f3e --- /dev/null +++ b/.venv/lib/python3.12/site-packages/jsonschema/tests/test_cli.py @@ -0,0 +1,904 @@ +from contextlib import redirect_stderr, redirect_stdout +from importlib import metadata +from io import StringIO +from json import JSONDecodeError +from pathlib import Path +from textwrap import dedent +from unittest import TestCase +import json +import os +import subprocess +import sys +import tempfile +import warnings + +from jsonschema import Draft4Validator, Draft202012Validator +from jsonschema.exceptions import ( + SchemaError, + ValidationError, + _RefResolutionError, +) +from jsonschema.validators import _LATEST_VERSION, validate + +with warnings.catch_warnings(): + warnings.simplefilter("ignore") + from jsonschema import cli + + +def fake_validator(*errors): + errors = list(reversed(errors)) + + class FakeValidator: + def __init__(self, *args, **kwargs): + pass + + def iter_errors(self, instance): + if errors: + return errors.pop() + return [] # pragma: no cover + + @classmethod + def check_schema(self, schema): + pass + + return FakeValidator + + +def fake_open(all_contents): + def open(path): + contents = all_contents.get(path) + if contents is None: + raise FileNotFoundError(path) + return StringIO(contents) + return open + + +def _message_for(non_json): + try: + json.loads(non_json) + except JSONDecodeError as error: + return str(error) + else: # pragma: no cover + raise RuntimeError("Tried and failed to capture a JSON dump error.") + + +class TestCLI(TestCase): + def run_cli( + self, argv, files=None, stdin=StringIO(), exit_code=0, **override, + ): + arguments = cli.parse_args(argv) + arguments.update(override) + + self.assertFalse(hasattr(cli, "open")) + cli.open = fake_open(files or {}) + try: + stdout, stderr = StringIO(), StringIO() + actual_exit_code = cli.run( + arguments, + stdin=stdin, + stdout=stdout, + stderr=stderr, + ) + finally: + del cli.open + + self.assertEqual( + actual_exit_code, exit_code, msg=dedent( + f""" + Expected an exit code of {exit_code} != {actual_exit_code}. + + stdout: {stdout.getvalue()} + + stderr: {stderr.getvalue()} + """, + ), + ) + return stdout.getvalue(), stderr.getvalue() + + def assertOutputs(self, stdout="", stderr="", **kwargs): + self.assertEqual( + self.run_cli(**kwargs), + (dedent(stdout), dedent(stderr)), + ) + + def test_invalid_instance(self): + error = ValidationError("I am an error!", instance=12) + self.assertOutputs( + files=dict( + some_schema='{"does not": "matter since it is stubbed"}', + some_instance=json.dumps(error.instance), + ), + validator=fake_validator([error]), + + argv=["-i", "some_instance", "some_schema"], + + exit_code=1, + stderr="12: I am an error!\n", + ) + + def test_invalid_instance_pretty_output(self): + error = ValidationError("I am an error!", instance=12) + self.assertOutputs( + files=dict( + some_schema='{"does not": "matter since it is stubbed"}', + some_instance=json.dumps(error.instance), + ), + validator=fake_validator([error]), + + argv=["-i", "some_instance", "--output", "pretty", "some_schema"], + + exit_code=1, + stderr="""\ + ===[ValidationError]===(some_instance)=== + + I am an error! + ----------------------------- + """, + ) + + def test_invalid_instance_explicit_plain_output(self): + error = ValidationError("I am an error!", instance=12) + self.assertOutputs( + files=dict( + some_schema='{"does not": "matter since it is stubbed"}', + some_instance=json.dumps(error.instance), + ), + validator=fake_validator([error]), + + argv=["--output", "plain", "-i", "some_instance", "some_schema"], + + exit_code=1, + stderr="12: I am an error!\n", + ) + + def test_invalid_instance_multiple_errors(self): + instance = 12 + first = ValidationError("First error", instance=instance) + second = ValidationError("Second error", instance=instance) + + self.assertOutputs( + files=dict( + some_schema='{"does not": "matter since it is stubbed"}', + some_instance=json.dumps(instance), + ), + validator=fake_validator([first, second]), + + argv=["-i", "some_instance", "some_schema"], + + exit_code=1, + stderr="""\ + 12: First error + 12: Second error + """, + ) + + def test_invalid_instance_multiple_errors_pretty_output(self): + instance = 12 + first = ValidationError("First error", instance=instance) + second = ValidationError("Second error", instance=instance) + + self.assertOutputs( + files=dict( + some_schema='{"does not": "matter since it is stubbed"}', + some_instance=json.dumps(instance), + ), + validator=fake_validator([first, second]), + + argv=["-i", "some_instance", "--output", "pretty", "some_schema"], + + exit_code=1, + stderr="""\ + ===[ValidationError]===(some_instance)=== + + First error + ----------------------------- + ===[ValidationError]===(some_instance)=== + + Second error + ----------------------------- + """, + ) + + def test_multiple_invalid_instances(self): + first_instance = 12 + first_errors = [ + ValidationError("An error", instance=first_instance), + ValidationError("Another error", instance=first_instance), + ] + second_instance = "foo" + second_errors = [ValidationError("BOOM", instance=second_instance)] + + self.assertOutputs( + files=dict( + some_schema='{"does not": "matter since it is stubbed"}', + some_first_instance=json.dumps(first_instance), + some_second_instance=json.dumps(second_instance), + ), + validator=fake_validator(first_errors, second_errors), + + argv=[ + "-i", "some_first_instance", + "-i", "some_second_instance", + "some_schema", + ], + + exit_code=1, + stderr="""\ + 12: An error + 12: Another error + foo: BOOM + """, + ) + + def test_multiple_invalid_instances_pretty_output(self): + first_instance = 12 + first_errors = [ + ValidationError("An error", instance=first_instance), + ValidationError("Another error", instance=first_instance), + ] + second_instance = "foo" + second_errors = [ValidationError("BOOM", instance=second_instance)] + + self.assertOutputs( + files=dict( + some_schema='{"does not": "matter since it is stubbed"}', + some_first_instance=json.dumps(first_instance), + some_second_instance=json.dumps(second_instance), + ), + validator=fake_validator(first_errors, second_errors), + + argv=[ + "--output", "pretty", + "-i", "some_first_instance", + "-i", "some_second_instance", + "some_schema", + ], + + exit_code=1, + stderr="""\ + ===[ValidationError]===(some_first_instance)=== + + An error + ----------------------------- + ===[ValidationError]===(some_first_instance)=== + + Another error + ----------------------------- + ===[ValidationError]===(some_second_instance)=== + + BOOM + ----------------------------- + """, + ) + + def test_custom_error_format(self): + first_instance = 12 + first_errors = [ + ValidationError("An error", instance=first_instance), + ValidationError("Another error", instance=first_instance), + ] + second_instance = "foo" + second_errors = [ValidationError("BOOM", instance=second_instance)] + + self.assertOutputs( + files=dict( + some_schema='{"does not": "matter since it is stubbed"}', + some_first_instance=json.dumps(first_instance), + some_second_instance=json.dumps(second_instance), + ), + validator=fake_validator(first_errors, second_errors), + + argv=[ + "--error-format", ":{error.message}._-_.{error.instance}:", + "-i", "some_first_instance", + "-i", "some_second_instance", + "some_schema", + ], + + exit_code=1, + stderr=":An error._-_.12::Another error._-_.12::BOOM._-_.foo:", + ) + + def test_invalid_schema(self): + self.assertOutputs( + files=dict(some_schema='{"type": 12}'), + argv=["some_schema"], + + exit_code=1, + stderr="""\ + 12: 12 is not valid under any of the given schemas + """, + ) + + def test_invalid_schema_pretty_output(self): + schema = {"type": 12} + + with self.assertRaises(SchemaError) as e: + validate(schema=schema, instance="") + error = str(e.exception) + + self.assertOutputs( + files=dict(some_schema=json.dumps(schema)), + argv=["--output", "pretty", "some_schema"], + + exit_code=1, + stderr=( + "===[SchemaError]===(some_schema)===\n\n" + + str(error) + + "\n-----------------------------\n" + ), + ) + + def test_invalid_schema_multiple_errors(self): + self.assertOutputs( + files=dict(some_schema='{"type": 12, "items": 57}'), + argv=["some_schema"], + + exit_code=1, + stderr="""\ + 57: 57 is not of type 'object', 'boolean' + """, + ) + + def test_invalid_schema_multiple_errors_pretty_output(self): + schema = {"type": 12, "items": 57} + + with self.assertRaises(SchemaError) as e: + validate(schema=schema, instance="") + error = str(e.exception) + + self.assertOutputs( + files=dict(some_schema=json.dumps(schema)), + argv=["--output", "pretty", "some_schema"], + + exit_code=1, + stderr=( + "===[SchemaError]===(some_schema)===\n\n" + + str(error) + + "\n-----------------------------\n" + ), + ) + + def test_invalid_schema_with_invalid_instance(self): + """ + "Validating" an instance that's invalid under an invalid schema + just shows the schema error. + """ + self.assertOutputs( + files=dict( + some_schema='{"type": 12, "minimum": 30}', + some_instance="13", + ), + argv=["-i", "some_instance", "some_schema"], + + exit_code=1, + stderr="""\ + 12: 12 is not valid under any of the given schemas + """, + ) + + def test_invalid_schema_with_invalid_instance_pretty_output(self): + instance, schema = 13, {"type": 12, "minimum": 30} + + with self.assertRaises(SchemaError) as e: + validate(schema=schema, instance=instance) + error = str(e.exception) + + self.assertOutputs( + files=dict( + some_schema=json.dumps(schema), + some_instance=json.dumps(instance), + ), + argv=["--output", "pretty", "-i", "some_instance", "some_schema"], + + exit_code=1, + stderr=( + "===[SchemaError]===(some_schema)===\n\n" + + str(error) + + "\n-----------------------------\n" + ), + ) + + def test_invalid_instance_continues_with_the_rest(self): + self.assertOutputs( + files=dict( + some_schema='{"minimum": 30}', + first_instance="not valid JSON!", + second_instance="12", + ), + argv=[ + "-i", "first_instance", + "-i", "second_instance", + "some_schema", + ], + + exit_code=1, + stderr="""\ + Failed to parse 'first_instance': {} + 12: 12 is less than the minimum of 30 + """.format(_message_for("not valid JSON!")), + ) + + def test_custom_error_format_applies_to_schema_errors(self): + instance, schema = 13, {"type": 12, "minimum": 30} + + with self.assertRaises(SchemaError): + validate(schema=schema, instance=instance) + + self.assertOutputs( + files=dict(some_schema=json.dumps(schema)), + + argv=[ + "--error-format", ":{error.message}._-_.{error.instance}:", + "some_schema", + ], + + exit_code=1, + stderr=":12 is not valid under any of the given schemas._-_.12:", + ) + + def test_instance_is_invalid_JSON(self): + instance = "not valid JSON!" + + self.assertOutputs( + files=dict(some_schema="{}", some_instance=instance), + argv=["-i", "some_instance", "some_schema"], + + exit_code=1, + stderr=f"""\ + Failed to parse 'some_instance': {_message_for(instance)} + """, + ) + + def test_instance_is_invalid_JSON_pretty_output(self): + stdout, stderr = self.run_cli( + files=dict( + some_schema="{}", + some_instance="not valid JSON!", + ), + + argv=["--output", "pretty", "-i", "some_instance", "some_schema"], + + exit_code=1, + ) + self.assertFalse(stdout) + self.assertIn( + "(some_instance)===\n\nTraceback (most recent call last):\n", + stderr, + ) + self.assertNotIn("some_schema", stderr) + + def test_instance_is_invalid_JSON_on_stdin(self): + instance = "not valid JSON!" + + self.assertOutputs( + files=dict(some_schema="{}"), + stdin=StringIO(instance), + + argv=["some_schema"], + + exit_code=1, + stderr=f"""\ + Failed to parse : {_message_for(instance)} + """, + ) + + def test_instance_is_invalid_JSON_on_stdin_pretty_output(self): + stdout, stderr = self.run_cli( + files=dict(some_schema="{}"), + stdin=StringIO("not valid JSON!"), + + argv=["--output", "pretty", "some_schema"], + + exit_code=1, + ) + self.assertFalse(stdout) + self.assertIn( + "()===\n\nTraceback (most recent call last):\n", + stderr, + ) + self.assertNotIn("some_schema", stderr) + + def test_schema_is_invalid_JSON(self): + schema = "not valid JSON!" + + self.assertOutputs( + files=dict(some_schema=schema), + + argv=["some_schema"], + + exit_code=1, + stderr=f"""\ + Failed to parse 'some_schema': {_message_for(schema)} + """, + ) + + def test_schema_is_invalid_JSON_pretty_output(self): + stdout, stderr = self.run_cli( + files=dict(some_schema="not valid JSON!"), + + argv=["--output", "pretty", "some_schema"], + + exit_code=1, + ) + self.assertFalse(stdout) + self.assertIn( + "(some_schema)===\n\nTraceback (most recent call last):\n", + stderr, + ) + + def test_schema_and_instance_are_both_invalid_JSON(self): + """ + Only the schema error is reported, as we abort immediately. + """ + schema, instance = "not valid JSON!", "also not valid JSON!" + self.assertOutputs( + files=dict(some_schema=schema, some_instance=instance), + + argv=["some_schema"], + + exit_code=1, + stderr=f"""\ + Failed to parse 'some_schema': {_message_for(schema)} + """, + ) + + def test_schema_and_instance_are_both_invalid_JSON_pretty_output(self): + """ + Only the schema error is reported, as we abort immediately. + """ + stdout, stderr = self.run_cli( + files=dict( + some_schema="not valid JSON!", + some_instance="also not valid JSON!", + ), + + argv=["--output", "pretty", "-i", "some_instance", "some_schema"], + + exit_code=1, + ) + self.assertFalse(stdout) + self.assertIn( + "(some_schema)===\n\nTraceback (most recent call last):\n", + stderr, + ) + self.assertNotIn("some_instance", stderr) + + def test_instance_does_not_exist(self): + self.assertOutputs( + files=dict(some_schema="{}"), + argv=["-i", "nonexisting_instance", "some_schema"], + + exit_code=1, + stderr="""\ + 'nonexisting_instance' does not exist. + """, + ) + + def test_instance_does_not_exist_pretty_output(self): + self.assertOutputs( + files=dict(some_schema="{}"), + argv=[ + "--output", "pretty", + "-i", "nonexisting_instance", + "some_schema", + ], + + exit_code=1, + stderr="""\ + ===[FileNotFoundError]===(nonexisting_instance)=== + + 'nonexisting_instance' does not exist. + ----------------------------- + """, + ) + + def test_schema_does_not_exist(self): + self.assertOutputs( + argv=["nonexisting_schema"], + + exit_code=1, + stderr="'nonexisting_schema' does not exist.\n", + ) + + def test_schema_does_not_exist_pretty_output(self): + self.assertOutputs( + argv=["--output", "pretty", "nonexisting_schema"], + + exit_code=1, + stderr="""\ + ===[FileNotFoundError]===(nonexisting_schema)=== + + 'nonexisting_schema' does not exist. + ----------------------------- + """, + ) + + def test_neither_instance_nor_schema_exist(self): + self.assertOutputs( + argv=["-i", "nonexisting_instance", "nonexisting_schema"], + + exit_code=1, + stderr="'nonexisting_schema' does not exist.\n", + ) + + def test_neither_instance_nor_schema_exist_pretty_output(self): + self.assertOutputs( + argv=[ + "--output", "pretty", + "-i", "nonexisting_instance", + "nonexisting_schema", + ], + + exit_code=1, + stderr="""\ + ===[FileNotFoundError]===(nonexisting_schema)=== + + 'nonexisting_schema' does not exist. + ----------------------------- + """, + ) + + def test_successful_validation(self): + self.assertOutputs( + files=dict(some_schema="{}", some_instance="{}"), + argv=["-i", "some_instance", "some_schema"], + stdout="", + stderr="", + ) + + def test_successful_validation_pretty_output(self): + self.assertOutputs( + files=dict(some_schema="{}", some_instance="{}"), + argv=["--output", "pretty", "-i", "some_instance", "some_schema"], + stdout="===[SUCCESS]===(some_instance)===\n", + stderr="", + ) + + def test_successful_validation_of_stdin(self): + self.assertOutputs( + files=dict(some_schema="{}"), + stdin=StringIO("{}"), + argv=["some_schema"], + stdout="", + stderr="", + ) + + def test_successful_validation_of_stdin_pretty_output(self): + self.assertOutputs( + files=dict(some_schema="{}"), + stdin=StringIO("{}"), + argv=["--output", "pretty", "some_schema"], + stdout="===[SUCCESS]===()===\n", + stderr="", + ) + + def test_successful_validation_of_just_the_schema(self): + self.assertOutputs( + files=dict(some_schema="{}", some_instance="{}"), + argv=["-i", "some_instance", "some_schema"], + stdout="", + stderr="", + ) + + def test_successful_validation_of_just_the_schema_pretty_output(self): + self.assertOutputs( + files=dict(some_schema="{}", some_instance="{}"), + argv=["--output", "pretty", "-i", "some_instance", "some_schema"], + stdout="===[SUCCESS]===(some_instance)===\n", + stderr="", + ) + + def test_successful_validation_via_explicit_base_uri(self): + ref_schema_file = tempfile.NamedTemporaryFile(delete=False) # noqa: SIM115 + ref_schema_file.close() + self.addCleanup(os.remove, ref_schema_file.name) + + ref_path = Path(ref_schema_file.name) + ref_path.write_text('{"definitions": {"num": {"type": "integer"}}}') + + schema = f'{{"$ref": "{ref_path.name}#/definitions/num"}}' + + self.assertOutputs( + files=dict(some_schema=schema, some_instance="1"), + argv=[ + "-i", "some_instance", + "--base-uri", ref_path.parent.as_uri() + "/", + "some_schema", + ], + stdout="", + stderr="", + ) + + def test_unsuccessful_validation_via_explicit_base_uri(self): + ref_schema_file = tempfile.NamedTemporaryFile(delete=False) # noqa: SIM115 + ref_schema_file.close() + self.addCleanup(os.remove, ref_schema_file.name) + + ref_path = Path(ref_schema_file.name) + ref_path.write_text('{"definitions": {"num": {"type": "integer"}}}') + + schema = f'{{"$ref": "{ref_path.name}#/definitions/num"}}' + + self.assertOutputs( + files=dict(some_schema=schema, some_instance='"1"'), + argv=[ + "-i", "some_instance", + "--base-uri", ref_path.parent.as_uri() + "/", + "some_schema", + ], + exit_code=1, + stdout="", + stderr="1: '1' is not of type 'integer'\n", + ) + + def test_nonexistent_file_with_explicit_base_uri(self): + schema = '{"$ref": "someNonexistentFile.json#definitions/num"}' + instance = "1" + + with self.assertRaises(_RefResolutionError) as e: + self.assertOutputs( + files=dict( + some_schema=schema, + some_instance=instance, + ), + argv=[ + "-i", "some_instance", + "--base-uri", Path.cwd().as_uri(), + "some_schema", + ], + ) + error = str(e.exception) + self.assertIn(f"{os.sep}someNonexistentFile.json'", error) + + def test_invalid_explicit_base_uri(self): + schema = '{"$ref": "foo.json#definitions/num"}' + instance = "1" + + with self.assertRaises(_RefResolutionError) as e: + self.assertOutputs( + files=dict( + some_schema=schema, + some_instance=instance, + ), + argv=[ + "-i", "some_instance", + "--base-uri", "not@UR1", + "some_schema", + ], + ) + error = str(e.exception) + self.assertEqual( + error, "unknown url type: 'foo.json'", + ) + + def test_it_validates_using_the_latest_validator_when_unspecified(self): + # There isn't a better way now I can think of to ensure that the + # latest version was used, given that the call to validator_for + # is hidden inside the CLI, so guard that that's the case, and + # this test will have to be updated when versions change until + # we can think of a better way to ensure this behavior. + self.assertIs(Draft202012Validator, _LATEST_VERSION) + + self.assertOutputs( + files=dict(some_schema='{"const": "check"}', some_instance='"a"'), + argv=["-i", "some_instance", "some_schema"], + exit_code=1, + stdout="", + stderr="a: 'check' was expected\n", + ) + + def test_it_validates_using_draft7_when_specified(self): + """ + Specifically, `const` validation applies for Draft 7. + """ + schema = """ + { + "$schema": "http://json-schema.org/draft-07/schema#", + "const": "check" + } + """ + instance = '"foo"' + self.assertOutputs( + files=dict(some_schema=schema, some_instance=instance), + argv=["-i", "some_instance", "some_schema"], + exit_code=1, + stdout="", + stderr="foo: 'check' was expected\n", + ) + + def test_it_validates_using_draft4_when_specified(self): + """ + Specifically, `const` validation *does not* apply for Draft 4. + """ + schema = """ + { + "$schema": "http://json-schema.org/draft-04/schema#", + "const": "check" + } + """ + instance = '"foo"' + self.assertOutputs( + files=dict(some_schema=schema, some_instance=instance), + argv=["-i", "some_instance", "some_schema"], + stdout="", + stderr="", + ) + + +class TestParser(TestCase): + + FakeValidator = fake_validator() + + def test_find_validator_by_fully_qualified_object_name(self): + arguments = cli.parse_args( + [ + "--validator", + "jsonschema.tests.test_cli.TestParser.FakeValidator", + "--instance", "mem://some/instance", + "mem://some/schema", + ], + ) + self.assertIs(arguments["validator"], self.FakeValidator) + + def test_find_validator_in_jsonschema(self): + arguments = cli.parse_args( + [ + "--validator", "Draft4Validator", + "--instance", "mem://some/instance", + "mem://some/schema", + ], + ) + self.assertIs(arguments["validator"], Draft4Validator) + + def cli_output_for(self, *argv): + stdout, stderr = StringIO(), StringIO() + with redirect_stdout(stdout), redirect_stderr(stderr): # noqa: SIM117 + with self.assertRaises(SystemExit): + cli.parse_args(argv) + return stdout.getvalue(), stderr.getvalue() + + def test_unknown_output(self): + stdout, stderr = self.cli_output_for( + "--output", "foo", + "mem://some/schema", + ) + self.assertIn("invalid choice: 'foo'", stderr) + self.assertFalse(stdout) + + def test_useless_error_format(self): + stdout, stderr = self.cli_output_for( + "--output", "pretty", + "--error-format", "foo", + "mem://some/schema", + ) + self.assertIn( + "--error-format can only be used with --output plain", + stderr, + ) + self.assertFalse(stdout) + + +class TestCLIIntegration(TestCase): + def test_license(self): + our_metadata = metadata.metadata("jsonschema") + self.assertEqual(our_metadata.get("License-Expression"), "MIT") + + def test_version(self): + version = subprocess.check_output( + [sys.executable, "-W", "ignore", "-m", "jsonschema", "--version"], + stderr=subprocess.STDOUT, + ) + version = version.decode("utf-8").strip() + self.assertEqual(version, metadata.version("jsonschema")) + + def test_no_arguments_shows_usage_notes(self): + output = subprocess.check_output( + [sys.executable, "-m", "jsonschema"], + stderr=subprocess.STDOUT, + ) + output_for_help = subprocess.check_output( + [sys.executable, "-m", "jsonschema", "--help"], + stderr=subprocess.STDOUT, + ) + self.assertEqual(output, output_for_help) diff --git a/.venv/lib/python3.12/site-packages/jsonschema/tests/test_deprecations.py b/.venv/lib/python3.12/site-packages/jsonschema/tests/test_deprecations.py new file mode 100644 index 0000000..a54b02f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/jsonschema/tests/test_deprecations.py @@ -0,0 +1,432 @@ +from contextlib import contextmanager +from io import BytesIO +from unittest import TestCase, mock +import importlib.metadata +import json +import subprocess +import sys +import urllib.request + +import referencing.exceptions + +from jsonschema import FormatChecker, exceptions, protocols, validators + + +class TestDeprecations(TestCase): + def test_version(self): + """ + As of v4.0.0, __version__ is deprecated in favor of importlib.metadata. + """ + + message = "Accessing jsonschema.__version__ is deprecated" + with self.assertWarnsRegex(DeprecationWarning, message) as w: + from jsonschema import __version__ + + self.assertEqual(__version__, importlib.metadata.version("jsonschema")) + self.assertEqual(w.filename, __file__) + + def test_validators_ErrorTree(self): + """ + As of v4.0.0, importing ErrorTree from jsonschema.validators is + deprecated in favor of doing so from jsonschema.exceptions. + """ + + message = "Importing ErrorTree from jsonschema.validators is " + with self.assertWarnsRegex(DeprecationWarning, message) as w: + from jsonschema.validators import ErrorTree + + self.assertEqual(ErrorTree, exceptions.ErrorTree) + self.assertEqual(w.filename, __file__) + + def test_import_ErrorTree(self): + """ + As of v4.18.0, importing ErrorTree from the package root is + deprecated in favor of doing so from jsonschema.exceptions. + """ + + message = "Importing ErrorTree directly from the jsonschema package " + with self.assertWarnsRegex(DeprecationWarning, message) as w: + from jsonschema import ErrorTree + + self.assertEqual(ErrorTree, exceptions.ErrorTree) + self.assertEqual(w.filename, __file__) + + def test_ErrorTree_setitem(self): + """ + As of v4.20.0, setting items on an ErrorTree is deprecated. + """ + + e = exceptions.ValidationError("some error", path=["foo"]) + tree = exceptions.ErrorTree() + subtree = exceptions.ErrorTree(errors=[e]) + + message = "ErrorTree.__setitem__ is " + with self.assertWarnsRegex(DeprecationWarning, message) as w: + tree["foo"] = subtree + + self.assertEqual(tree["foo"], subtree) + self.assertEqual(w.filename, __file__) + + def test_import_FormatError(self): + """ + As of v4.18.0, importing FormatError from the package root is + deprecated in favor of doing so from jsonschema.exceptions. + """ + + message = "Importing FormatError directly from the jsonschema package " + with self.assertWarnsRegex(DeprecationWarning, message) as w: + from jsonschema import FormatError + + self.assertEqual(FormatError, exceptions.FormatError) + self.assertEqual(w.filename, __file__) + + def test_import_Validator(self): + """ + As of v4.19.0, importing Validator from the package root is + deprecated in favor of doing so from jsonschema.protocols. + """ + + message = "Importing Validator directly from the jsonschema package " + with self.assertWarnsRegex(DeprecationWarning, message) as w: + from jsonschema import Validator + + self.assertEqual(Validator, protocols.Validator) + self.assertEqual(w.filename, __file__) + + def test_validators_validators(self): + """ + As of v4.0.0, accessing jsonschema.validators.validators is + deprecated. + """ + + message = "Accessing jsonschema.validators.validators is deprecated" + with self.assertWarnsRegex(DeprecationWarning, message) as w: + value = validators.validators + + self.assertEqual(value, validators._VALIDATORS) + self.assertEqual(w.filename, __file__) + + def test_validators_meta_schemas(self): + """ + As of v4.0.0, accessing jsonschema.validators.meta_schemas is + deprecated. + """ + + message = "Accessing jsonschema.validators.meta_schemas is deprecated" + with self.assertWarnsRegex(DeprecationWarning, message) as w: + value = validators.meta_schemas + + self.assertEqual(value, validators._META_SCHEMAS) + self.assertEqual(w.filename, __file__) + + def test_RefResolver_in_scope(self): + """ + As of v4.0.0, RefResolver.in_scope is deprecated. + """ + + resolver = validators._RefResolver.from_schema({}) + message = "jsonschema.RefResolver.in_scope is deprecated " + with self.assertWarnsRegex(DeprecationWarning, message) as w: # noqa: SIM117 + with resolver.in_scope("foo"): + pass + + self.assertEqual(w.filename, __file__) + + def test_Validator_is_valid_two_arguments(self): + """ + As of v4.0.0, calling is_valid with two arguments (to provide a + different schema) is deprecated. + """ + + validator = validators.Draft7Validator({}) + message = "Passing a schema to Validator.is_valid is deprecated " + with self.assertWarnsRegex(DeprecationWarning, message) as w: + result = validator.is_valid("foo", {"type": "number"}) + + self.assertFalse(result) + self.assertEqual(w.filename, __file__) + + def test_Validator_iter_errors_two_arguments(self): + """ + As of v4.0.0, calling iter_errors with two arguments (to provide a + different schema) is deprecated. + """ + + validator = validators.Draft7Validator({}) + message = "Passing a schema to Validator.iter_errors is deprecated " + with self.assertWarnsRegex(DeprecationWarning, message) as w: + error, = validator.iter_errors("foo", {"type": "number"}) + + self.assertEqual(error.validator, "type") + self.assertEqual(w.filename, __file__) + + def test_Validator_resolver(self): + """ + As of v4.18.0, accessing Validator.resolver is deprecated. + """ + + validator = validators.Draft7Validator({}) + message = "Accessing Draft7Validator.resolver is " + with self.assertWarnsRegex(DeprecationWarning, message) as w: + self.assertIsInstance(validator.resolver, validators._RefResolver) + + self.assertEqual(w.filename, __file__) + + def test_RefResolver(self): + """ + As of v4.18.0, RefResolver is fully deprecated. + """ + + message = "jsonschema.RefResolver is deprecated" + with self.assertWarnsRegex(DeprecationWarning, message) as w: + from jsonschema import RefResolver + self.assertEqual(w.filename, __file__) + + with self.assertWarnsRegex(DeprecationWarning, message) as w: + from jsonschema.validators import RefResolver # noqa: F401 + self.assertEqual(w.filename, __file__) + + def test_RefResolutionError(self): + """ + As of v4.18.0, RefResolutionError is deprecated in favor of directly + catching errors from the referencing library. + """ + + message = "jsonschema.exceptions.RefResolutionError is deprecated" + with self.assertWarnsRegex(DeprecationWarning, message) as w: + from jsonschema import RefResolutionError + + self.assertEqual(RefResolutionError, exceptions._RefResolutionError) + self.assertEqual(w.filename, __file__) + + with self.assertWarnsRegex(DeprecationWarning, message) as w: + from jsonschema.exceptions import RefResolutionError + + self.assertEqual(RefResolutionError, exceptions._RefResolutionError) + self.assertEqual(w.filename, __file__) + + def test_catching_Unresolvable_directly(self): + """ + This behavior is the intended behavior (i.e. it's not deprecated), but + given we do "tricksy" things in the iterim to wrap exceptions in a + multiple inheritance subclass, we need to be extra sure it works and + stays working. + """ + validator = validators.Draft202012Validator({"$ref": "urn:nothing"}) + + with self.assertRaises(referencing.exceptions.Unresolvable) as e: + validator.validate(12) + + expected = referencing.exceptions.Unresolvable(ref="urn:nothing") + self.assertEqual( + (e.exception, str(e.exception)), + (expected, "Unresolvable: urn:nothing"), + ) + + def test_catching_Unresolvable_via_RefResolutionError(self): + """ + Until RefResolutionError is removed, it is still possible to catch + exceptions from reference resolution using it, even though they may + have been raised by referencing. + """ + with self.assertWarns(DeprecationWarning): + from jsonschema import RefResolutionError + + validator = validators.Draft202012Validator({"$ref": "urn:nothing"}) + + with self.assertRaises(referencing.exceptions.Unresolvable) as u: + validator.validate(12) + + with self.assertRaises(RefResolutionError) as e: + validator.validate(12) + + self.assertEqual( + (e.exception, str(e.exception)), + (u.exception, "Unresolvable: urn:nothing"), + ) + + def test_WrappedReferencingError_hashability(self): + """ + Ensure the wrapped referencing errors are hashable when possible. + """ + with self.assertWarns(DeprecationWarning): + from jsonschema import RefResolutionError + + validator = validators.Draft202012Validator({"$ref": "urn:nothing"}) + + with self.assertRaises(referencing.exceptions.Unresolvable) as u: + validator.validate(12) + + with self.assertRaises(RefResolutionError) as e: + validator.validate(12) + + self.assertIn(e.exception, {u.exception}) + self.assertIn(u.exception, {e.exception}) + + def test_Validator_subclassing(self): + """ + As of v4.12.0, subclassing a validator class produces an explicit + deprecation warning. + + This was never intended to be public API (and some comments over the + years in issues said so, but obviously that's not a great way to make + sure it's followed). + + A future version will explicitly raise an error. + """ + + message = "Subclassing validator classes is " + with self.assertWarnsRegex(DeprecationWarning, message) as w: + class Subclass(validators.Draft202012Validator): + pass + + self.assertEqual(w.filename, __file__) + + with self.assertWarnsRegex(DeprecationWarning, message) as w: + class AnotherSubclass(validators.create(meta_schema={})): + pass + + def test_FormatChecker_cls_checks(self): + """ + As of v4.14.0, FormatChecker.cls_checks is deprecated without + replacement. + """ + + self.addCleanup(FormatChecker.checkers.pop, "boom", None) + + message = "FormatChecker.cls_checks " + with self.assertWarnsRegex(DeprecationWarning, message) as w: + FormatChecker.cls_checks("boom") + + self.assertEqual(w.filename, __file__) + + def test_draftN_format_checker(self): + """ + As of v4.16.0, accessing jsonschema.draftn_format_checker is deprecated + in favor of Validator.FORMAT_CHECKER. + """ + + message = "Accessing jsonschema.draft202012_format_checker is " + with self.assertWarnsRegex(DeprecationWarning, message) as w: + from jsonschema import draft202012_format_checker + + self.assertIs( + draft202012_format_checker, + validators.Draft202012Validator.FORMAT_CHECKER, + ) + self.assertEqual(w.filename, __file__) + + message = "Accessing jsonschema.draft201909_format_checker is " + with self.assertWarnsRegex(DeprecationWarning, message) as w: + from jsonschema import draft201909_format_checker + + self.assertIs( + draft201909_format_checker, + validators.Draft201909Validator.FORMAT_CHECKER, + ) + self.assertEqual(w.filename, __file__) + + message = "Accessing jsonschema.draft7_format_checker is " + with self.assertWarnsRegex(DeprecationWarning, message) as w: + from jsonschema import draft7_format_checker + + self.assertIs( + draft7_format_checker, + validators.Draft7Validator.FORMAT_CHECKER, + ) + self.assertEqual(w.filename, __file__) + + message = "Accessing jsonschema.draft6_format_checker is " + with self.assertWarnsRegex(DeprecationWarning, message) as w: + from jsonschema import draft6_format_checker + + self.assertIs( + draft6_format_checker, + validators.Draft6Validator.FORMAT_CHECKER, + ) + self.assertEqual(w.filename, __file__) + + message = "Accessing jsonschema.draft4_format_checker is " + with self.assertWarnsRegex(DeprecationWarning, message) as w: + from jsonschema import draft4_format_checker + + self.assertIs( + draft4_format_checker, + validators.Draft4Validator.FORMAT_CHECKER, + ) + self.assertEqual(w.filename, __file__) + + message = "Accessing jsonschema.draft3_format_checker is " + with self.assertWarnsRegex(DeprecationWarning, message) as w: + from jsonschema import draft3_format_checker + + self.assertIs( + draft3_format_checker, + validators.Draft3Validator.FORMAT_CHECKER, + ) + self.assertEqual(w.filename, __file__) + + with self.assertRaises(ImportError): + from jsonschema import draft1234_format_checker # noqa: F401 + + def test_import_cli(self): + """ + As of v4.17.0, importing jsonschema.cli is deprecated. + """ + + message = "The jsonschema CLI is deprecated and will be removed " + with self.assertWarnsRegex(DeprecationWarning, message) as w: + import jsonschema.cli + importlib.reload(jsonschema.cli) + + self.assertEqual(w.filename, importlib.__file__) + + def test_cli(self): + """ + As of v4.17.0, the jsonschema CLI is deprecated. + """ + + process = subprocess.run( + [sys.executable, "-m", "jsonschema"], + capture_output=True, + check=True, + ) + self.assertIn(b"The jsonschema CLI is deprecated ", process.stderr) + + def test_automatic_remote_retrieval(self): + """ + Automatic retrieval of remote references is deprecated as of v4.18.0. + """ + ref = "http://bar#/$defs/baz" + schema = {"$defs": {"baz": {"type": "integer"}}} + + if "requests" in sys.modules: # pragma: no cover + self.addCleanup( + sys.modules.__setitem__, "requests", sys.modules["requests"], + ) + sys.modules["requests"] = None + + @contextmanager + def fake_urlopen(request): + self.assertIsInstance(request, urllib.request.Request) + self.assertEqual(request.full_url, "http://bar") + + # Ha ha urllib.request.Request "normalizes" header names and + # Request.get_header does not also normalize them... + (header, value), = request.header_items() + self.assertEqual(header.lower(), "user-agent") + self.assertEqual( + value, "python-jsonschema (deprecated $ref resolution)", + ) + yield BytesIO(json.dumps(schema).encode("utf8")) + + validator = validators.Draft202012Validator({"$ref": ref}) + + message = "Automatically retrieving remote references " + patch = mock.patch.object(urllib.request, "urlopen", new=fake_urlopen) + + with patch, self.assertWarnsRegex(DeprecationWarning, message): + self.assertEqual( + (validator.is_valid({}), validator.is_valid(37)), + (False, True), + ) diff --git a/.venv/lib/python3.12/site-packages/jsonschema/tests/test_exceptions.py b/.venv/lib/python3.12/site-packages/jsonschema/tests/test_exceptions.py new file mode 100644 index 0000000..8d515a9 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/jsonschema/tests/test_exceptions.py @@ -0,0 +1,759 @@ +from unittest import TestCase +import textwrap + +import jsonpath_ng + +from jsonschema import exceptions +from jsonschema.validators import _LATEST_VERSION + + +class TestBestMatch(TestCase): + def best_match_of(self, instance, schema): + errors = list(_LATEST_VERSION(schema).iter_errors(instance)) + msg = f"No errors found for {instance} under {schema!r}!" + self.assertTrue(errors, msg=msg) + + best = exceptions.best_match(iter(errors)) + reversed_best = exceptions.best_match(reversed(errors)) + + self.assertEqual( + best._contents(), + reversed_best._contents(), + f"No consistent best match!\nGot: {best}\n\nThen: {reversed_best}", + ) + return best + + def test_shallower_errors_are_better_matches(self): + schema = { + "properties": { + "foo": { + "minProperties": 2, + "properties": {"bar": {"type": "object"}}, + }, + }, + } + best = self.best_match_of(instance={"foo": {"bar": []}}, schema=schema) + self.assertEqual(best.validator, "minProperties") + + def test_oneOf_and_anyOf_are_weak_matches(self): + """ + A property you *must* match is probably better than one you have to + match a part of. + """ + + schema = { + "minProperties": 2, + "anyOf": [{"type": "string"}, {"type": "number"}], + "oneOf": [{"type": "string"}, {"type": "number"}], + } + best = self.best_match_of(instance={}, schema=schema) + self.assertEqual(best.validator, "minProperties") + + def test_if_the_most_relevant_error_is_anyOf_it_is_traversed(self): + """ + If the most relevant error is an anyOf, then we traverse its context + and select the otherwise *least* relevant error, since in this case + that means the most specific, deep, error inside the instance. + + I.e. since only one of the schemas must match, we look for the most + relevant one. + """ + + schema = { + "properties": { + "foo": { + "anyOf": [ + {"type": "string"}, + {"properties": {"bar": {"type": "array"}}}, + ], + }, + }, + } + best = self.best_match_of(instance={"foo": {"bar": 12}}, schema=schema) + self.assertEqual(best.validator_value, "array") + + def test_no_anyOf_traversal_for_equally_relevant_errors(self): + """ + We don't traverse into an anyOf (as above) if all of its context errors + seem to be equally "wrong" against the instance. + """ + + schema = { + "anyOf": [ + {"type": "string"}, + {"type": "integer"}, + {"type": "object"}, + ], + } + best = self.best_match_of(instance=[], schema=schema) + self.assertEqual(best.validator, "anyOf") + + def test_anyOf_traversal_for_single_equally_relevant_error(self): + """ + We *do* traverse anyOf with a single nested error, even though it is + vacuously equally relevant to itself. + """ + + schema = { + "anyOf": [ + {"type": "string"}, + ], + } + best = self.best_match_of(instance=[], schema=schema) + self.assertEqual(best.validator, "type") + + def test_anyOf_traversal_for_single_sibling_errors(self): + """ + We *do* traverse anyOf with a single subschema that fails multiple + times (e.g. on multiple items). + """ + + schema = { + "anyOf": [ + {"items": {"const": 37}}, + ], + } + best = self.best_match_of(instance=[12, 12], schema=schema) + self.assertEqual(best.validator, "const") + + def test_anyOf_traversal_for_non_type_matching_sibling_errors(self): + """ + We *do* traverse anyOf with multiple subschemas when one does not type + match. + """ + + schema = { + "anyOf": [ + {"type": "object"}, + {"items": {"const": 37}}, + ], + } + best = self.best_match_of(instance=[12, 12], schema=schema) + self.assertEqual(best.validator, "const") + + def test_if_the_most_relevant_error_is_oneOf_it_is_traversed(self): + """ + If the most relevant error is an oneOf, then we traverse its context + and select the otherwise *least* relevant error, since in this case + that means the most specific, deep, error inside the instance. + + I.e. since only one of the schemas must match, we look for the most + relevant one. + """ + + schema = { + "properties": { + "foo": { + "oneOf": [ + {"type": "string"}, + {"properties": {"bar": {"type": "array"}}}, + ], + }, + }, + } + best = self.best_match_of(instance={"foo": {"bar": 12}}, schema=schema) + self.assertEqual(best.validator_value, "array") + + def test_no_oneOf_traversal_for_equally_relevant_errors(self): + """ + We don't traverse into an oneOf (as above) if all of its context errors + seem to be equally "wrong" against the instance. + """ + + schema = { + "oneOf": [ + {"type": "string"}, + {"type": "integer"}, + {"type": "object"}, + ], + } + best = self.best_match_of(instance=[], schema=schema) + self.assertEqual(best.validator, "oneOf") + + def test_oneOf_traversal_for_single_equally_relevant_error(self): + """ + We *do* traverse oneOf with a single nested error, even though it is + vacuously equally relevant to itself. + """ + + schema = { + "oneOf": [ + {"type": "string"}, + ], + } + best = self.best_match_of(instance=[], schema=schema) + self.assertEqual(best.validator, "type") + + def test_oneOf_traversal_for_single_sibling_errors(self): + """ + We *do* traverse oneOf with a single subschema that fails multiple + times (e.g. on multiple items). + """ + + schema = { + "oneOf": [ + {"items": {"const": 37}}, + ], + } + best = self.best_match_of(instance=[12, 12], schema=schema) + self.assertEqual(best.validator, "const") + + def test_oneOf_traversal_for_non_type_matching_sibling_errors(self): + """ + We *do* traverse oneOf with multiple subschemas when one does not type + match. + """ + + schema = { + "oneOf": [ + {"type": "object"}, + {"items": {"const": 37}}, + ], + } + best = self.best_match_of(instance=[12, 12], schema=schema) + self.assertEqual(best.validator, "const") + + def test_if_the_most_relevant_error_is_allOf_it_is_traversed(self): + """ + Now, if the error is allOf, we traverse but select the *most* relevant + error from the context, because all schemas here must match anyways. + """ + + schema = { + "properties": { + "foo": { + "allOf": [ + {"type": "string"}, + {"properties": {"bar": {"type": "array"}}}, + ], + }, + }, + } + best = self.best_match_of(instance={"foo": {"bar": 12}}, schema=schema) + self.assertEqual(best.validator_value, "string") + + def test_nested_context_for_oneOf(self): + """ + We traverse into nested contexts (a oneOf containing an error in a + nested oneOf here). + """ + + schema = { + "properties": { + "foo": { + "oneOf": [ + {"type": "string"}, + { + "oneOf": [ + {"type": "string"}, + { + "properties": { + "bar": {"type": "array"}, + }, + }, + ], + }, + ], + }, + }, + } + best = self.best_match_of(instance={"foo": {"bar": 12}}, schema=schema) + self.assertEqual(best.validator_value, "array") + + def test_it_prioritizes_matching_types(self): + schema = { + "properties": { + "foo": { + "anyOf": [ + {"type": "array", "minItems": 2}, + {"type": "string", "minLength": 10}, + ], + }, + }, + } + best = self.best_match_of(instance={"foo": "bar"}, schema=schema) + self.assertEqual(best.validator, "minLength") + + reordered = { + "properties": { + "foo": { + "anyOf": [ + {"type": "string", "minLength": 10}, + {"type": "array", "minItems": 2}, + ], + }, + }, + } + best = self.best_match_of(instance={"foo": "bar"}, schema=reordered) + self.assertEqual(best.validator, "minLength") + + def test_it_prioritizes_matching_union_types(self): + schema = { + "properties": { + "foo": { + "anyOf": [ + {"type": ["array", "object"], "minItems": 2}, + {"type": ["integer", "string"], "minLength": 10}, + ], + }, + }, + } + best = self.best_match_of(instance={"foo": "bar"}, schema=schema) + self.assertEqual(best.validator, "minLength") + + reordered = { + "properties": { + "foo": { + "anyOf": [ + {"type": "string", "minLength": 10}, + {"type": "array", "minItems": 2}, + ], + }, + }, + } + best = self.best_match_of(instance={"foo": "bar"}, schema=reordered) + self.assertEqual(best.validator, "minLength") + + def test_boolean_schemas(self): + schema = {"properties": {"foo": False}} + best = self.best_match_of(instance={"foo": "bar"}, schema=schema) + self.assertIsNone(best.validator) + + def test_one_error(self): + validator = _LATEST_VERSION({"minProperties": 2}) + error, = validator.iter_errors({}) + self.assertEqual( + exceptions.best_match(validator.iter_errors({})).validator, + "minProperties", + ) + + def test_no_errors(self): + validator = _LATEST_VERSION({}) + self.assertIsNone(exceptions.best_match(validator.iter_errors({}))) + + +class TestByRelevance(TestCase): + def test_short_paths_are_better_matches(self): + shallow = exceptions.ValidationError("Oh no!", path=["baz"]) + deep = exceptions.ValidationError("Oh yes!", path=["foo", "bar"]) + match = max([shallow, deep], key=exceptions.relevance) + self.assertIs(match, shallow) + + match = max([deep, shallow], key=exceptions.relevance) + self.assertIs(match, shallow) + + def test_global_errors_are_even_better_matches(self): + shallow = exceptions.ValidationError("Oh no!", path=[]) + deep = exceptions.ValidationError("Oh yes!", path=["foo"]) + + errors = sorted([shallow, deep], key=exceptions.relevance) + self.assertEqual( + [list(error.path) for error in errors], + [["foo"], []], + ) + + errors = sorted([deep, shallow], key=exceptions.relevance) + self.assertEqual( + [list(error.path) for error in errors], + [["foo"], []], + ) + + def test_weak_keywords_are_lower_priority(self): + weak = exceptions.ValidationError("Oh no!", path=[], validator="a") + normal = exceptions.ValidationError("Oh yes!", path=[], validator="b") + + best_match = exceptions.by_relevance(weak="a") + + match = max([weak, normal], key=best_match) + self.assertIs(match, normal) + + match = max([normal, weak], key=best_match) + self.assertIs(match, normal) + + def test_strong_keywords_are_higher_priority(self): + weak = exceptions.ValidationError("Oh no!", path=[], validator="a") + normal = exceptions.ValidationError("Oh yes!", path=[], validator="b") + strong = exceptions.ValidationError("Oh fine!", path=[], validator="c") + + best_match = exceptions.by_relevance(weak="a", strong="c") + + match = max([weak, normal, strong], key=best_match) + self.assertIs(match, strong) + + match = max([strong, normal, weak], key=best_match) + self.assertIs(match, strong) + + +class TestErrorTree(TestCase): + def test_it_knows_how_many_total_errors_it_contains(self): + # FIXME: #442 + errors = [ + exceptions.ValidationError("Something", validator=i) + for i in range(8) + ] + tree = exceptions.ErrorTree(errors) + self.assertEqual(tree.total_errors, 8) + + def test_it_contains_an_item_if_the_item_had_an_error(self): + errors = [exceptions.ValidationError("a message", path=["bar"])] + tree = exceptions.ErrorTree(errors) + self.assertIn("bar", tree) + + def test_it_does_not_contain_an_item_if_the_item_had_no_error(self): + errors = [exceptions.ValidationError("a message", path=["bar"])] + tree = exceptions.ErrorTree(errors) + self.assertNotIn("foo", tree) + + def test_keywords_that_failed_appear_in_errors_dict(self): + error = exceptions.ValidationError("a message", validator="foo") + tree = exceptions.ErrorTree([error]) + self.assertEqual(tree.errors, {"foo": error}) + + def test_it_creates_a_child_tree_for_each_nested_path(self): + errors = [ + exceptions.ValidationError("a bar message", path=["bar"]), + exceptions.ValidationError("a bar -> 0 message", path=["bar", 0]), + ] + tree = exceptions.ErrorTree(errors) + self.assertIn(0, tree["bar"]) + self.assertNotIn(1, tree["bar"]) + + def test_children_have_their_errors_dicts_built(self): + e1, e2 = ( + exceptions.ValidationError("1", validator="foo", path=["bar", 0]), + exceptions.ValidationError("2", validator="quux", path=["bar", 0]), + ) + tree = exceptions.ErrorTree([e1, e2]) + self.assertEqual(tree["bar"][0].errors, {"foo": e1, "quux": e2}) + + def test_multiple_errors_with_instance(self): + e1, e2 = ( + exceptions.ValidationError( + "1", + validator="foo", + path=["bar", "bar2"], + instance="i1"), + exceptions.ValidationError( + "2", + validator="quux", + path=["foobar", 2], + instance="i2"), + ) + exceptions.ErrorTree([e1, e2]) + + def test_it_does_not_contain_subtrees_that_are_not_in_the_instance(self): + error = exceptions.ValidationError("123", validator="foo", instance=[]) + tree = exceptions.ErrorTree([error]) + + with self.assertRaises(IndexError): + tree[0] + + def test_if_its_in_the_tree_anyhow_it_does_not_raise_an_error(self): + """ + If a keyword refers to a path that isn't in the instance, the + tree still properly returns a subtree for that path. + """ + + error = exceptions.ValidationError( + "a message", validator="foo", instance={}, path=["foo"], + ) + tree = exceptions.ErrorTree([error]) + self.assertIsInstance(tree["foo"], exceptions.ErrorTree) + + def test_iter(self): + e1, e2 = ( + exceptions.ValidationError( + "1", + validator="foo", + path=["bar", "bar2"], + instance="i1"), + exceptions.ValidationError( + "2", + validator="quux", + path=["foobar", 2], + instance="i2"), + ) + tree = exceptions.ErrorTree([e1, e2]) + self.assertEqual(set(tree), {"bar", "foobar"}) + + def test_repr_single(self): + error = exceptions.ValidationError( + "1", + validator="foo", + path=["bar", "bar2"], + instance="i1", + ) + tree = exceptions.ErrorTree([error]) + self.assertEqual(repr(tree), "") + + def test_repr_multiple(self): + e1, e2 = ( + exceptions.ValidationError( + "1", + validator="foo", + path=["bar", "bar2"], + instance="i1"), + exceptions.ValidationError( + "2", + validator="quux", + path=["foobar", 2], + instance="i2"), + ) + tree = exceptions.ErrorTree([e1, e2]) + self.assertEqual(repr(tree), "") + + def test_repr_empty(self): + tree = exceptions.ErrorTree([]) + self.assertEqual(repr(tree), "") + + +class TestErrorInitReprStr(TestCase): + def make_error(self, **kwargs): + defaults = dict( + message="hello", + validator="type", + validator_value="string", + instance=5, + schema={"type": "string"}, + ) + defaults.update(kwargs) + return exceptions.ValidationError(**defaults) + + def assertShows(self, expected, **kwargs): + expected = textwrap.dedent(expected).rstrip("\n") + + error = self.make_error(**kwargs) + message_line, _, rest = str(error).partition("\n") + self.assertEqual(message_line, error.message) + self.assertEqual(rest, expected) + + def test_it_calls_super_and_sets_args(self): + error = self.make_error() + self.assertGreater(len(error.args), 1) + + def test_repr(self): + self.assertEqual( + repr(exceptions.ValidationError(message="Hello!")), + "", + ) + + def test_unset_error(self): + error = exceptions.ValidationError("message") + self.assertEqual(str(error), "message") + + kwargs = { + "validator": "type", + "validator_value": "string", + "instance": 5, + "schema": {"type": "string"}, + } + # Just the message should show if any of the attributes are unset + for attr in kwargs: + k = dict(kwargs) + del k[attr] + error = exceptions.ValidationError("message", **k) + self.assertEqual(str(error), "message") + + def test_empty_paths(self): + self.assertShows( + """ + Failed validating 'type' in schema: + {'type': 'string'} + + On instance: + 5 + """, + path=[], + schema_path=[], + ) + + def test_one_item_paths(self): + self.assertShows( + """ + Failed validating 'type' in schema: + {'type': 'string'} + + On instance[0]: + 5 + """, + path=[0], + schema_path=["items"], + ) + + def test_multiple_item_paths(self): + self.assertShows( + """ + Failed validating 'type' in schema['items'][0]: + {'type': 'string'} + + On instance[0]['a']: + 5 + """, + path=[0, "a"], + schema_path=["items", 0, 1], + ) + + def test_uses_pprint(self): + self.assertShows( + """ + Failed validating 'maxLength' in schema: + {0: 0, + 1: 1, + 2: 2, + 3: 3, + 4: 4, + 5: 5, + 6: 6, + 7: 7, + 8: 8, + 9: 9, + 10: 10, + 11: 11, + 12: 12, + 13: 13, + 14: 14, + 15: 15, + 16: 16, + 17: 17, + 18: 18, + 19: 19} + + On instance: + [0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24] + """, + instance=list(range(25)), + schema=dict(zip(range(20), range(20))), + validator="maxLength", + ) + + def test_does_not_reorder_dicts(self): + self.assertShows( + """ + Failed validating 'type' in schema: + {'do': 3, 'not': 7, 'sort': 37, 'me': 73} + + On instance: + {'here': 73, 'too': 37, 'no': 7, 'sorting': 3} + """, + schema={ + "do": 3, + "not": 7, + "sort": 37, + "me": 73, + }, + instance={ + "here": 73, + "too": 37, + "no": 7, + "sorting": 3, + }, + ) + + def test_str_works_with_instances_having_overriden_eq_operator(self): + """ + Check for #164 which rendered exceptions unusable when a + `ValidationError` involved instances with an `__eq__` method + that returned truthy values. + """ + + class DontEQMeBro: + def __eq__(this, other): # pragma: no cover + self.fail("Don't!") + + def __ne__(this, other): # pragma: no cover + self.fail("Don't!") + + instance = DontEQMeBro() + error = exceptions.ValidationError( + "a message", + validator="foo", + instance=instance, + validator_value="some", + schema="schema", + ) + self.assertIn(repr(instance), str(error)) + + +class TestHashable(TestCase): + def test_hashable(self): + {exceptions.ValidationError("")} + {exceptions.SchemaError("")} + + +class TestJsonPathRendering(TestCase): + def validate_json_path_rendering(self, property_name, expected_path): + error = exceptions.ValidationError( + path=[property_name], + message="1", + validator="foo", + instance="i1", + ) + + rendered_json_path = error.json_path + self.assertEqual(rendered_json_path, expected_path) + + re_parsed_name = jsonpath_ng.parse(rendered_json_path).right.fields[0] + self.assertEqual(re_parsed_name, property_name) + + def test_basic(self): + self.validate_json_path_rendering("x", "$.x") + + def test_empty(self): + self.validate_json_path_rendering("", "$['']") + + def test_number(self): + self.validate_json_path_rendering("1", "$['1']") + + def test_period(self): + self.validate_json_path_rendering(".", "$['.']") + + def test_single_quote(self): + self.validate_json_path_rendering("'", r"$['\'']") + + def test_space(self): + self.validate_json_path_rendering(" ", "$[' ']") + + def test_backslash(self): + self.validate_json_path_rendering("\\", r"$['\\']") + + def test_backslash_single_quote(self): + self.validate_json_path_rendering(r"\'", r"$['\\\'']") + + def test_underscore(self): + self.validate_json_path_rendering("_", r"$['_']") + + def test_double_quote(self): + self.validate_json_path_rendering('"', """$['"']""") + + def test_hyphen(self): + self.validate_json_path_rendering("-", "$['-']") + + def test_json_path_injection(self): + self.validate_json_path_rendering("a[0]", "$['a[0]']") + + def test_open_bracket(self): + self.validate_json_path_rendering("[", "$['[']") diff --git a/.venv/lib/python3.12/site-packages/jsonschema/tests/test_format.py b/.venv/lib/python3.12/site-packages/jsonschema/tests/test_format.py new file mode 100644 index 0000000..d829f98 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/jsonschema/tests/test_format.py @@ -0,0 +1,91 @@ +""" +Tests for the parts of jsonschema related to the :kw:`format` keyword. +""" + +from unittest import TestCase + +from jsonschema import FormatChecker, ValidationError +from jsonschema.exceptions import FormatError +from jsonschema.validators import Draft4Validator + +BOOM = ValueError("Boom!") +BANG = ZeroDivisionError("Bang!") + + +def boom(thing): + if thing == "bang": + raise BANG + raise BOOM + + +class TestFormatChecker(TestCase): + def test_it_can_validate_no_formats(self): + checker = FormatChecker(formats=()) + self.assertFalse(checker.checkers) + + def test_it_raises_a_key_error_for_unknown_formats(self): + with self.assertRaises(KeyError): + FormatChecker(formats=["o noes"]) + + def test_it_can_register_cls_checkers(self): + original = dict(FormatChecker.checkers) + self.addCleanup(FormatChecker.checkers.pop, "boom") + with self.assertWarns(DeprecationWarning): + FormatChecker.cls_checks("boom")(boom) + self.assertEqual( + FormatChecker.checkers, + dict(original, boom=(boom, ())), + ) + + def test_it_can_register_checkers(self): + checker = FormatChecker() + checker.checks("boom")(boom) + self.assertEqual( + checker.checkers, + dict(FormatChecker.checkers, boom=(boom, ())), + ) + + def test_it_catches_registered_errors(self): + checker = FormatChecker() + checker.checks("boom", raises=type(BOOM))(boom) + + with self.assertRaises(FormatError) as cm: + checker.check(instance=12, format="boom") + + self.assertIs(cm.exception.cause, BOOM) + self.assertIs(cm.exception.__cause__, BOOM) + self.assertEqual(str(cm.exception), "12 is not a 'boom'") + + # Unregistered errors should not be caught + with self.assertRaises(type(BANG)): + checker.check(instance="bang", format="boom") + + def test_format_error_causes_become_validation_error_causes(self): + checker = FormatChecker() + checker.checks("boom", raises=ValueError)(boom) + validator = Draft4Validator({"format": "boom"}, format_checker=checker) + + with self.assertRaises(ValidationError) as cm: + validator.validate("BOOM") + + self.assertIs(cm.exception.cause, BOOM) + self.assertIs(cm.exception.__cause__, BOOM) + + def test_format_checkers_come_with_defaults(self): + # This is bad :/ but relied upon. + # The docs for quite awhile recommended people do things like + # validate(..., format_checker=FormatChecker()) + # We should change that, but we can't without deprecation... + checker = FormatChecker() + with self.assertRaises(FormatError): + checker.check(instance="not-an-ipv4", format="ipv4") + + def test_repr(self): + checker = FormatChecker(formats=()) + checker.checks("foo")(lambda thing: True) # pragma: no cover + checker.checks("bar")(lambda thing: True) # pragma: no cover + checker.checks("baz")(lambda thing: True) # pragma: no cover + self.assertEqual( + repr(checker), + "", + ) diff --git a/.venv/lib/python3.12/site-packages/jsonschema/tests/test_jsonschema_test_suite.py b/.venv/lib/python3.12/site-packages/jsonschema/tests/test_jsonschema_test_suite.py new file mode 100644 index 0000000..41c9825 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/jsonschema/tests/test_jsonschema_test_suite.py @@ -0,0 +1,262 @@ +""" +Test runner for the JSON Schema official test suite + +Tests comprehensive correctness of each draft's validator. + +See https://github.com/json-schema-org/JSON-Schema-Test-Suite for details. +""" + + +from jsonschema.tests._suite import Suite +import jsonschema + +SUITE = Suite() +DRAFT3 = SUITE.version(name="draft3") +DRAFT4 = SUITE.version(name="draft4") +DRAFT6 = SUITE.version(name="draft6") +DRAFT7 = SUITE.version(name="draft7") +DRAFT201909 = SUITE.version(name="draft2019-09") +DRAFT202012 = SUITE.version(name="draft2020-12") + + +def skip(message, **kwargs): + def skipper(test): + if all(value == getattr(test, attr) for attr, value in kwargs.items()): + return message + return skipper + + +def ecmascript_regex(test): + if test.subject == "ecmascript-regex": + return "ECMA regex support will be added in #1142." + + +def missing_format(Validator): + def missing_format(test): # pragma: no cover + schema = test.schema + if ( + schema is True + or schema is False + or "format" not in schema + or schema["format"] in Validator.FORMAT_CHECKER.checkers + or test.valid + ): + return + + return f"Format checker {schema['format']!r} not found." + return missing_format + + +def complex_email_validation(test): + if test.subject != "email": + return + + message = "Complex email validation is (intentionally) unsupported." + return skip( + message=message, + description="an invalid domain", + )(test) or skip( + message=message, + description="an invalid IPv4-address-literal", + )(test) or skip( + message=message, + description="dot after local part is not valid", + )(test) or skip( + message=message, + description="dot before local part is not valid", + )(test) or skip( + message=message, + description="two subsequent dots inside local part are not valid", + )(test) + + +def leap_second(test): + message = "Leap seconds are unsupported." + return skip( + message=message, + subject="time", + description="a valid time string with leap second", + )(test) or skip( + message=message, + subject="time", + description="a valid time string with leap second, Zulu", + )(test) or skip( + message=message, + subject="time", + description="a valid time string with leap second with offset", + )(test) or skip( + message=message, + subject="time", + description="valid leap second, positive time-offset", + )(test) or skip( + message=message, + subject="time", + description="valid leap second, negative time-offset", + )(test) or skip( + message=message, + subject="time", + description="valid leap second, large positive time-offset", + )(test) or skip( + message=message, + subject="time", + description="valid leap second, large negative time-offset", + )(test) or skip( + message=message, + subject="time", + description="valid leap second, zero time-offset", + )(test) or skip( + message=message, + subject="date-time", + description="a valid date-time with a leap second, UTC", + )(test) or skip( + message=message, + subject="date-time", + description="a valid date-time with a leap second, with minus offset", + )(test) + + +TestDraft3 = DRAFT3.to_unittest_testcase( + DRAFT3.cases(), + DRAFT3.format_cases(), + DRAFT3.optional_cases_of(name="bignum"), + DRAFT3.optional_cases_of(name="non-bmp-regex"), + DRAFT3.optional_cases_of(name="zeroTerminatedFloats"), + Validator=jsonschema.Draft3Validator, + format_checker=jsonschema.Draft3Validator.FORMAT_CHECKER, + skip=lambda test: ( + ecmascript_regex(test) + or missing_format(jsonschema.Draft3Validator)(test) + or complex_email_validation(test) + ), +) + + +TestDraft4 = DRAFT4.to_unittest_testcase( + DRAFT4.cases(), + DRAFT4.format_cases(), + DRAFT4.optional_cases_of(name="bignum"), + DRAFT4.optional_cases_of(name="float-overflow"), + DRAFT4.optional_cases_of(name="id"), + DRAFT4.optional_cases_of(name="non-bmp-regex"), + DRAFT4.optional_cases_of(name="zeroTerminatedFloats"), + Validator=jsonschema.Draft4Validator, + format_checker=jsonschema.Draft4Validator.FORMAT_CHECKER, + skip=lambda test: ( + ecmascript_regex(test) + or leap_second(test) + or missing_format(jsonschema.Draft4Validator)(test) + or complex_email_validation(test) + ), +) + + +TestDraft6 = DRAFT6.to_unittest_testcase( + DRAFT6.cases(), + DRAFT6.format_cases(), + DRAFT6.optional_cases_of(name="bignum"), + DRAFT6.optional_cases_of(name="float-overflow"), + DRAFT6.optional_cases_of(name="id"), + DRAFT6.optional_cases_of(name="non-bmp-regex"), + Validator=jsonschema.Draft6Validator, + format_checker=jsonschema.Draft6Validator.FORMAT_CHECKER, + skip=lambda test: ( + ecmascript_regex(test) + or leap_second(test) + or missing_format(jsonschema.Draft6Validator)(test) + or complex_email_validation(test) + ), +) + + +TestDraft7 = DRAFT7.to_unittest_testcase( + DRAFT7.cases(), + DRAFT7.format_cases(), + DRAFT7.optional_cases_of(name="bignum"), + DRAFT7.optional_cases_of(name="cross-draft"), + DRAFT7.optional_cases_of(name="float-overflow"), + DRAFT6.optional_cases_of(name="id"), + DRAFT7.optional_cases_of(name="non-bmp-regex"), + DRAFT7.optional_cases_of(name="unknownKeyword"), + Validator=jsonschema.Draft7Validator, + format_checker=jsonschema.Draft7Validator.FORMAT_CHECKER, + skip=lambda test: ( + ecmascript_regex(test) + or leap_second(test) + or missing_format(jsonschema.Draft7Validator)(test) + or complex_email_validation(test) + ), +) + + +TestDraft201909 = DRAFT201909.to_unittest_testcase( + DRAFT201909.cases(), + DRAFT201909.optional_cases_of(name="anchor"), + DRAFT201909.optional_cases_of(name="bignum"), + DRAFT201909.optional_cases_of(name="cross-draft"), + DRAFT201909.optional_cases_of(name="float-overflow"), + DRAFT201909.optional_cases_of(name="id"), + DRAFT201909.optional_cases_of(name="no-schema"), + DRAFT201909.optional_cases_of(name="non-bmp-regex"), + DRAFT201909.optional_cases_of(name="refOfUnknownKeyword"), + DRAFT201909.optional_cases_of(name="unknownKeyword"), + Validator=jsonschema.Draft201909Validator, + skip=skip( + message="Vocabulary support is still in-progress.", + subject="vocabulary", + description=( + "no validation: invalid number, but it still validates" + ), + ), +) + + +TestDraft201909Format = DRAFT201909.to_unittest_testcase( + DRAFT201909.format_cases(), + name="TestDraft201909Format", + Validator=jsonschema.Draft201909Validator, + format_checker=jsonschema.Draft201909Validator.FORMAT_CHECKER, + skip=lambda test: ( + complex_email_validation(test) + or ecmascript_regex(test) + or leap_second(test) + or missing_format(jsonschema.Draft201909Validator)(test) + or complex_email_validation(test) + ), +) + + +TestDraft202012 = DRAFT202012.to_unittest_testcase( + DRAFT202012.cases(), + DRAFT201909.optional_cases_of(name="anchor"), + DRAFT202012.optional_cases_of(name="bignum"), + DRAFT202012.optional_cases_of(name="cross-draft"), + DRAFT202012.optional_cases_of(name="float-overflow"), + DRAFT202012.optional_cases_of(name="id"), + DRAFT202012.optional_cases_of(name="no-schema"), + DRAFT202012.optional_cases_of(name="non-bmp-regex"), + DRAFT202012.optional_cases_of(name="refOfUnknownKeyword"), + DRAFT202012.optional_cases_of(name="unknownKeyword"), + Validator=jsonschema.Draft202012Validator, + skip=skip( + message="Vocabulary support is still in-progress.", + subject="vocabulary", + description=( + "no validation: invalid number, but it still validates" + ), + ), +) + + +TestDraft202012Format = DRAFT202012.to_unittest_testcase( + DRAFT202012.format_cases(), + name="TestDraft202012Format", + Validator=jsonschema.Draft202012Validator, + format_checker=jsonschema.Draft202012Validator.FORMAT_CHECKER, + skip=lambda test: ( + complex_email_validation(test) + or ecmascript_regex(test) + or leap_second(test) + or missing_format(jsonschema.Draft202012Validator)(test) + or complex_email_validation(test) + ), +) diff --git a/.venv/lib/python3.12/site-packages/jsonschema/tests/test_types.py b/.venv/lib/python3.12/site-packages/jsonschema/tests/test_types.py new file mode 100644 index 0000000..bd97b18 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/jsonschema/tests/test_types.py @@ -0,0 +1,221 @@ +""" +Tests for the `TypeChecker`-based type interface. + +The actual correctness of the type checking is handled in +`test_jsonschema_test_suite`; these tests check that TypeChecker +functions correctly at a more granular level. +""" +from collections import namedtuple +from unittest import TestCase + +from jsonschema import ValidationError, _keywords +from jsonschema._types import TypeChecker +from jsonschema.exceptions import UndefinedTypeCheck, UnknownType +from jsonschema.validators import Draft202012Validator, extend + + +def equals_2(checker, instance): + return instance == 2 + + +def is_namedtuple(instance): + return isinstance(instance, tuple) and getattr(instance, "_fields", None) + + +def is_object_or_named_tuple(checker, instance): + if Draft202012Validator.TYPE_CHECKER.is_type(instance, "object"): + return True + return is_namedtuple(instance) + + +class TestTypeChecker(TestCase): + def test_is_type(self): + checker = TypeChecker({"two": equals_2}) + self.assertEqual( + ( + checker.is_type(instance=2, type="two"), + checker.is_type(instance="bar", type="two"), + ), + (True, False), + ) + + def test_is_unknown_type(self): + with self.assertRaises(UndefinedTypeCheck) as e: + TypeChecker().is_type(4, "foobar") + self.assertIn( + "'foobar' is unknown to this type checker", + str(e.exception), + ) + self.assertTrue( + e.exception.__suppress_context__, + msg="Expected the internal KeyError to be hidden.", + ) + + def test_checks_can_be_added_at_init(self): + checker = TypeChecker({"two": equals_2}) + self.assertEqual(checker, TypeChecker().redefine("two", equals_2)) + + def test_redefine_existing_type(self): + self.assertEqual( + TypeChecker().redefine("two", object()).redefine("two", equals_2), + TypeChecker().redefine("two", equals_2), + ) + + def test_remove(self): + self.assertEqual( + TypeChecker({"two": equals_2}).remove("two"), + TypeChecker(), + ) + + def test_remove_unknown_type(self): + with self.assertRaises(UndefinedTypeCheck) as context: + TypeChecker().remove("foobar") + self.assertIn("foobar", str(context.exception)) + + def test_redefine_many(self): + self.assertEqual( + TypeChecker().redefine_many({"foo": int, "bar": str}), + TypeChecker().redefine("foo", int).redefine("bar", str), + ) + + def test_remove_multiple(self): + self.assertEqual( + TypeChecker({"foo": int, "bar": str}).remove("foo", "bar"), + TypeChecker(), + ) + + def test_type_check_can_raise_key_error(self): + """ + Make sure no one writes: + + try: + self._type_checkers[type](...) + except KeyError: + + ignoring the fact that the function itself can raise that. + """ + + error = KeyError("Stuff") + + def raises_keyerror(checker, instance): + raise error + + with self.assertRaises(KeyError) as context: + TypeChecker({"foo": raises_keyerror}).is_type(4, "foo") + + self.assertIs(context.exception, error) + + def test_repr(self): + checker = TypeChecker({"foo": is_namedtuple, "bar": is_namedtuple}) + self.assertEqual(repr(checker), "") + + +class TestCustomTypes(TestCase): + def test_simple_type_can_be_extended(self): + def int_or_str_int(checker, instance): + if not isinstance(instance, (int, str)): + return False + try: + int(instance) + except ValueError: + return False + return True + + CustomValidator = extend( + Draft202012Validator, + type_checker=Draft202012Validator.TYPE_CHECKER.redefine( + "integer", int_or_str_int, + ), + ) + validator = CustomValidator({"type": "integer"}) + + validator.validate(4) + validator.validate("4") + + with self.assertRaises(ValidationError): + validator.validate(4.4) + + with self.assertRaises(ValidationError): + validator.validate("foo") + + def test_object_can_be_extended(self): + schema = {"type": "object"} + + Point = namedtuple("Point", ["x", "y"]) + + type_checker = Draft202012Validator.TYPE_CHECKER.redefine( + "object", is_object_or_named_tuple, + ) + + CustomValidator = extend( + Draft202012Validator, + type_checker=type_checker, + ) + validator = CustomValidator(schema) + + validator.validate(Point(x=4, y=5)) + + def test_object_extensions_require_custom_validators(self): + schema = {"type": "object", "required": ["x"]} + + type_checker = Draft202012Validator.TYPE_CHECKER.redefine( + "object", is_object_or_named_tuple, + ) + + CustomValidator = extend( + Draft202012Validator, + type_checker=type_checker, + ) + validator = CustomValidator(schema) + + Point = namedtuple("Point", ["x", "y"]) + # Cannot handle required + with self.assertRaises(ValidationError): + validator.validate(Point(x=4, y=5)) + + def test_object_extensions_can_handle_custom_validators(self): + schema = { + "type": "object", + "required": ["x"], + "properties": {"x": {"type": "integer"}}, + } + + type_checker = Draft202012Validator.TYPE_CHECKER.redefine( + "object", is_object_or_named_tuple, + ) + + def coerce_named_tuple(fn): + def coerced(validator, value, instance, schema): + if is_namedtuple(instance): + instance = instance._asdict() + return fn(validator, value, instance, schema) + return coerced + + required = coerce_named_tuple(_keywords.required) + properties = coerce_named_tuple(_keywords.properties) + + CustomValidator = extend( + Draft202012Validator, + type_checker=type_checker, + validators={"required": required, "properties": properties}, + ) + + validator = CustomValidator(schema) + + Point = namedtuple("Point", ["x", "y"]) + # Can now process required and properties + validator.validate(Point(x=4, y=5)) + + with self.assertRaises(ValidationError): + validator.validate(Point(x="not an integer", y=5)) + + # As well as still handle objects. + validator.validate({"x": 4, "y": 5}) + + with self.assertRaises(ValidationError): + validator.validate({"x": "not an integer", "y": 5}) + + def test_unknown_type(self): + with self.assertRaises(UnknownType) as e: + Draft202012Validator({}).is_type(12, "some unknown type") + self.assertIn("'some unknown type'", str(e.exception)) diff --git a/.venv/lib/python3.12/site-packages/jsonschema/tests/test_utils.py b/.venv/lib/python3.12/site-packages/jsonschema/tests/test_utils.py new file mode 100644 index 0000000..d9764b0 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/jsonschema/tests/test_utils.py @@ -0,0 +1,138 @@ +from math import nan +from unittest import TestCase + +from jsonschema._utils import equal + + +class TestEqual(TestCase): + def test_none(self): + self.assertTrue(equal(None, None)) + + def test_nan(self): + self.assertTrue(equal(nan, nan)) + + +class TestDictEqual(TestCase): + def test_equal_dictionaries(self): + dict_1 = {"a": "b", "c": "d"} + dict_2 = {"c": "d", "a": "b"} + self.assertTrue(equal(dict_1, dict_2)) + + def test_equal_dictionaries_with_nan(self): + dict_1 = {"a": nan, "c": "d"} + dict_2 = {"c": "d", "a": nan} + self.assertTrue(equal(dict_1, dict_2)) + + def test_missing_key(self): + dict_1 = {"a": "b", "c": "d"} + dict_2 = {"c": "d", "x": "b"} + self.assertFalse(equal(dict_1, dict_2)) + + def test_additional_key(self): + dict_1 = {"a": "b", "c": "d"} + dict_2 = {"c": "d", "a": "b", "x": "x"} + self.assertFalse(equal(dict_1, dict_2)) + + def test_missing_value(self): + dict_1 = {"a": "b", "c": "d"} + dict_2 = {"c": "d", "a": "x"} + self.assertFalse(equal(dict_1, dict_2)) + + def test_empty_dictionaries(self): + dict_1 = {} + dict_2 = {} + self.assertTrue(equal(dict_1, dict_2)) + + def test_one_none(self): + dict_1 = None + dict_2 = {"a": "b", "c": "d"} + self.assertFalse(equal(dict_1, dict_2)) + + def test_same_item(self): + dict_1 = {"a": "b", "c": "d"} + self.assertTrue(equal(dict_1, dict_1)) + + def test_nested_equal(self): + dict_1 = {"a": {"a": "b", "c": "d"}, "c": "d"} + dict_2 = {"c": "d", "a": {"a": "b", "c": "d"}} + self.assertTrue(equal(dict_1, dict_2)) + + def test_nested_dict_unequal(self): + dict_1 = {"a": {"a": "b", "c": "d"}, "c": "d"} + dict_2 = {"c": "d", "a": {"a": "b", "c": "x"}} + self.assertFalse(equal(dict_1, dict_2)) + + def test_mixed_nested_equal(self): + dict_1 = {"a": ["a", "b", "c", "d"], "c": "d"} + dict_2 = {"c": "d", "a": ["a", "b", "c", "d"]} + self.assertTrue(equal(dict_1, dict_2)) + + def test_nested_list_unequal(self): + dict_1 = {"a": ["a", "b", "c", "d"], "c": "d"} + dict_2 = {"c": "d", "a": ["b", "c", "d", "a"]} + self.assertFalse(equal(dict_1, dict_2)) + + +class TestListEqual(TestCase): + def test_equal_lists(self): + list_1 = ["a", "b", "c"] + list_2 = ["a", "b", "c"] + self.assertTrue(equal(list_1, list_2)) + + def test_equal_lists_with_nan(self): + list_1 = ["a", nan, "c"] + list_2 = ["a", nan, "c"] + self.assertTrue(equal(list_1, list_2)) + + def test_unsorted_lists(self): + list_1 = ["a", "b", "c"] + list_2 = ["b", "b", "a"] + self.assertFalse(equal(list_1, list_2)) + + def test_first_list_larger(self): + list_1 = ["a", "b", "c"] + list_2 = ["a", "b"] + self.assertFalse(equal(list_1, list_2)) + + def test_second_list_larger(self): + list_1 = ["a", "b"] + list_2 = ["a", "b", "c"] + self.assertFalse(equal(list_1, list_2)) + + def test_list_with_none_unequal(self): + list_1 = ["a", "b", None] + list_2 = ["a", "b", "c"] + self.assertFalse(equal(list_1, list_2)) + + list_1 = ["a", "b", None] + list_2 = [None, "b", "c"] + self.assertFalse(equal(list_1, list_2)) + + def test_list_with_none_equal(self): + list_1 = ["a", None, "c"] + list_2 = ["a", None, "c"] + self.assertTrue(equal(list_1, list_2)) + + def test_empty_list(self): + list_1 = [] + list_2 = [] + self.assertTrue(equal(list_1, list_2)) + + def test_one_none(self): + list_1 = None + list_2 = [] + self.assertFalse(equal(list_1, list_2)) + + def test_same_list(self): + list_1 = ["a", "b", "c"] + self.assertTrue(equal(list_1, list_1)) + + def test_equal_nested_lists(self): + list_1 = ["a", ["b", "c"], "d"] + list_2 = ["a", ["b", "c"], "d"] + self.assertTrue(equal(list_1, list_2)) + + def test_unequal_nested_lists(self): + list_1 = ["a", ["b", "c"], "d"] + list_2 = ["a", [], "c"] + self.assertFalse(equal(list_1, list_2)) diff --git a/.venv/lib/python3.12/site-packages/jsonschema/tests/test_validators.py b/.venv/lib/python3.12/site-packages/jsonschema/tests/test_validators.py new file mode 100644 index 0000000..28cc402 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/jsonschema/tests/test_validators.py @@ -0,0 +1,2575 @@ +from __future__ import annotations + +from collections import deque, namedtuple +from contextlib import contextmanager +from decimal import Decimal +from io import BytesIO +from typing import Any +from unittest import TestCase, mock +from urllib.request import pathname2url +import json +import os +import sys +import tempfile +import warnings + +from attrs import define, field +from referencing.jsonschema import DRAFT202012 +import referencing.exceptions + +from jsonschema import ( + FormatChecker, + TypeChecker, + exceptions, + protocols, + validators, +) + + +def fail(validator, errors, instance, schema): + for each in errors: + each.setdefault("message", "You told me to fail!") + yield exceptions.ValidationError(**each) + + +class TestCreateAndExtend(TestCase): + def setUp(self): + self.addCleanup( + self.assertEqual, + validators._META_SCHEMAS, + dict(validators._META_SCHEMAS), + ) + self.addCleanup( + self.assertEqual, + validators._VALIDATORS, + dict(validators._VALIDATORS), + ) + + self.meta_schema = {"$id": "some://meta/schema"} + self.validators = {"fail": fail} + self.type_checker = TypeChecker() + self.Validator = validators.create( + meta_schema=self.meta_schema, + validators=self.validators, + type_checker=self.type_checker, + ) + + def test_attrs(self): + self.assertEqual( + ( + self.Validator.VALIDATORS, + self.Validator.META_SCHEMA, + self.Validator.TYPE_CHECKER, + ), ( + self.validators, + self.meta_schema, + self.type_checker, + ), + ) + + def test_init(self): + schema = {"fail": []} + self.assertEqual(self.Validator(schema).schema, schema) + + def test_iter_errors_successful(self): + schema = {"fail": []} + validator = self.Validator(schema) + + errors = list(validator.iter_errors("hello")) + self.assertEqual(errors, []) + + def test_iter_errors_one_error(self): + schema = {"fail": [{"message": "Whoops!"}]} + validator = self.Validator(schema) + + expected_error = exceptions.ValidationError( + "Whoops!", + instance="goodbye", + schema=schema, + validator="fail", + validator_value=[{"message": "Whoops!"}], + schema_path=deque(["fail"]), + ) + + errors = list(validator.iter_errors("goodbye")) + self.assertEqual(len(errors), 1) + self.assertEqual(errors[0]._contents(), expected_error._contents()) + + def test_iter_errors_multiple_errors(self): + schema = { + "fail": [ + {"message": "First"}, + {"message": "Second!", "validator": "asdf"}, + {"message": "Third"}, + ], + } + validator = self.Validator(schema) + + errors = list(validator.iter_errors("goodbye")) + self.assertEqual(len(errors), 3) + + def test_if_a_version_is_provided_it_is_registered(self): + Validator = validators.create( + meta_schema={"$id": "something"}, + version="my version", + ) + self.addCleanup(validators._META_SCHEMAS.pop, "something") + self.addCleanup(validators._VALIDATORS.pop, "my version") + self.assertEqual(Validator.__name__, "MyVersionValidator") + self.assertEqual(Validator.__qualname__, "MyVersionValidator") + + def test_repr(self): + Validator = validators.create( + meta_schema={"$id": "something"}, + version="my version", + ) + self.addCleanup(validators._META_SCHEMAS.pop, "something") + self.addCleanup(validators._VALIDATORS.pop, "my version") + self.assertEqual( + repr(Validator({})), + "MyVersionValidator(schema={}, format_checker=None)", + ) + + def test_long_repr(self): + Validator = validators.create( + meta_schema={"$id": "something"}, + version="my version", + ) + self.addCleanup(validators._META_SCHEMAS.pop, "something") + self.addCleanup(validators._VALIDATORS.pop, "my version") + self.assertEqual( + repr(Validator({"a": list(range(1000))})), ( + "MyVersionValidator(schema={'a': [0, 1, 2, 3, 4, 5, ...]}, " + "format_checker=None)" + ), + ) + + def test_repr_no_version(self): + Validator = validators.create(meta_schema={}) + self.assertEqual( + repr(Validator({})), + "Validator(schema={}, format_checker=None)", + ) + + def test_dashes_are_stripped_from_validator_names(self): + Validator = validators.create( + meta_schema={"$id": "something"}, + version="foo-bar", + ) + self.addCleanup(validators._META_SCHEMAS.pop, "something") + self.addCleanup(validators._VALIDATORS.pop, "foo-bar") + self.assertEqual(Validator.__qualname__, "FooBarValidator") + + def test_if_a_version_is_not_provided_it_is_not_registered(self): + original = dict(validators._META_SCHEMAS) + validators.create(meta_schema={"id": "id"}) + self.assertEqual(validators._META_SCHEMAS, original) + + def test_validates_registers_meta_schema_id(self): + meta_schema_key = "meta schema id" + my_meta_schema = {"id": meta_schema_key} + + validators.create( + meta_schema=my_meta_schema, + version="my version", + id_of=lambda s: s.get("id", ""), + ) + self.addCleanup(validators._META_SCHEMAS.pop, meta_schema_key) + self.addCleanup(validators._VALIDATORS.pop, "my version") + + self.assertIn(meta_schema_key, validators._META_SCHEMAS) + + def test_validates_registers_meta_schema_draft6_id(self): + meta_schema_key = "meta schema $id" + my_meta_schema = {"$id": meta_schema_key} + + validators.create( + meta_schema=my_meta_schema, + version="my version", + ) + self.addCleanup(validators._META_SCHEMAS.pop, meta_schema_key) + self.addCleanup(validators._VALIDATORS.pop, "my version") + + self.assertIn(meta_schema_key, validators._META_SCHEMAS) + + def test_create_default_types(self): + Validator = validators.create(meta_schema={}, validators=()) + self.assertTrue( + all( + Validator({}).is_type(instance=instance, type=type) + for type, instance in [ + ("array", []), + ("boolean", True), + ("integer", 12), + ("null", None), + ("number", 12.0), + ("object", {}), + ("string", "foo"), + ] + ), + ) + + def test_check_schema_with_different_metaschema(self): + """ + One can create a validator class whose metaschema uses a different + dialect than itself. + """ + + NoEmptySchemasValidator = validators.create( + meta_schema={ + "$schema": validators.Draft202012Validator.META_SCHEMA["$id"], + "not": {"const": {}}, + }, + ) + NoEmptySchemasValidator.check_schema({"foo": "bar"}) + + with self.assertRaises(exceptions.SchemaError): + NoEmptySchemasValidator.check_schema({}) + + NoEmptySchemasValidator({"foo": "bar"}).validate("foo") + + def test_check_schema_with_different_metaschema_defaults_to_self(self): + """ + A validator whose metaschema doesn't declare $schema defaults to its + own validation behavior, not the latest "normal" specification. + """ + + NoEmptySchemasValidator = validators.create( + meta_schema={"fail": [{"message": "Meta schema whoops!"}]}, + validators={"fail": fail}, + ) + with self.assertRaises(exceptions.SchemaError): + NoEmptySchemasValidator.check_schema({}) + + def test_extend(self): + original = dict(self.Validator.VALIDATORS) + new = object() + + Extended = validators.extend( + self.Validator, + validators={"new": new}, + ) + self.assertEqual( + ( + Extended.VALIDATORS, + Extended.META_SCHEMA, + Extended.TYPE_CHECKER, + self.Validator.VALIDATORS, + ), ( + dict(original, new=new), + self.Validator.META_SCHEMA, + self.Validator.TYPE_CHECKER, + original, + ), + ) + + def test_extend_idof(self): + """ + Extending a validator preserves its notion of schema IDs. + """ + def id_of(schema): + return schema.get("__test__", self.Validator.ID_OF(schema)) + correct_id = "the://correct/id/" + meta_schema = { + "$id": "the://wrong/id/", + "__test__": correct_id, + } + Original = validators.create( + meta_schema=meta_schema, + validators=self.validators, + type_checker=self.type_checker, + id_of=id_of, + ) + self.assertEqual(Original.ID_OF(Original.META_SCHEMA), correct_id) + + Derived = validators.extend(Original) + self.assertEqual(Derived.ID_OF(Derived.META_SCHEMA), correct_id) + + def test_extend_applicable_validators(self): + """ + Extending a validator preserves its notion of applicable validators. + """ + + schema = { + "$defs": {"test": {"type": "number"}}, + "$ref": "#/$defs/test", + "maximum": 1, + } + + draft4 = validators.Draft4Validator(schema) + self.assertTrue(draft4.is_valid(37)) # as $ref ignores siblings + + Derived = validators.extend(validators.Draft4Validator) + self.assertTrue(Derived(schema).is_valid(37)) + + +class TestValidationErrorMessages(TestCase): + def message_for(self, instance, schema, *args, **kwargs): + cls = kwargs.pop("cls", validators._LATEST_VERSION) + cls.check_schema(schema) + validator = cls(schema, *args, **kwargs) + errors = list(validator.iter_errors(instance)) + self.assertTrue(errors, msg=f"No errors were raised for {instance!r}") + self.assertEqual( + len(errors), + 1, + msg=f"Expected exactly one error, found {errors!r}", + ) + return errors[0].message + + def test_single_type_failure(self): + message = self.message_for(instance=1, schema={"type": "string"}) + self.assertEqual(message, "1 is not of type 'string'") + + def test_single_type_list_failure(self): + message = self.message_for(instance=1, schema={"type": ["string"]}) + self.assertEqual(message, "1 is not of type 'string'") + + def test_multiple_type_failure(self): + types = "string", "object" + message = self.message_for(instance=1, schema={"type": list(types)}) + self.assertEqual(message, "1 is not of type 'string', 'object'") + + def test_object_with_named_type_failure(self): + schema = {"type": [{"name": "Foo", "minimum": 3}]} + message = self.message_for( + instance=1, + schema=schema, + cls=validators.Draft3Validator, + ) + self.assertEqual(message, "1 is not of type 'Foo'") + + def test_minimum(self): + message = self.message_for(instance=1, schema={"minimum": 2}) + self.assertEqual(message, "1 is less than the minimum of 2") + + def test_maximum(self): + message = self.message_for(instance=1, schema={"maximum": 0}) + self.assertEqual(message, "1 is greater than the maximum of 0") + + def test_dependencies_single_element(self): + depend, on = "bar", "foo" + schema = {"dependencies": {depend: on}} + message = self.message_for( + instance={"bar": 2}, + schema=schema, + cls=validators.Draft3Validator, + ) + self.assertEqual(message, "'foo' is a dependency of 'bar'") + + def test_object_without_title_type_failure_draft3(self): + type = {"type": [{"minimum": 3}]} + message = self.message_for( + instance=1, + schema={"type": [type]}, + cls=validators.Draft3Validator, + ) + self.assertEqual( + message, + "1 is not of type {'type': [{'minimum': 3}]}", + ) + + def test_dependencies_list_draft3(self): + depend, on = "bar", "foo" + schema = {"dependencies": {depend: [on]}} + message = self.message_for( + instance={"bar": 2}, + schema=schema, + cls=validators.Draft3Validator, + ) + self.assertEqual(message, "'foo' is a dependency of 'bar'") + + def test_dependencies_list_draft7(self): + depend, on = "bar", "foo" + schema = {"dependencies": {depend: [on]}} + message = self.message_for( + instance={"bar": 2}, + schema=schema, + cls=validators.Draft7Validator, + ) + self.assertEqual(message, "'foo' is a dependency of 'bar'") + + def test_additionalItems_single_failure(self): + message = self.message_for( + instance=[2], + schema={"items": [], "additionalItems": False}, + cls=validators.Draft3Validator, + ) + self.assertIn("(2 was unexpected)", message) + + def test_additionalItems_multiple_failures(self): + message = self.message_for( + instance=[1, 2, 3], + schema={"items": [], "additionalItems": False}, + cls=validators.Draft3Validator, + ) + self.assertIn("(1, 2, 3 were unexpected)", message) + + def test_additionalProperties_single_failure(self): + additional = "foo" + schema = {"additionalProperties": False} + message = self.message_for(instance={additional: 2}, schema=schema) + self.assertIn("('foo' was unexpected)", message) + + def test_additionalProperties_multiple_failures(self): + schema = {"additionalProperties": False} + message = self.message_for( + instance=dict.fromkeys(["foo", "bar"]), + schema=schema, + ) + + self.assertIn(repr("foo"), message) + self.assertIn(repr("bar"), message) + self.assertIn("were unexpected)", message) + + def test_const(self): + schema = {"const": 12} + message = self.message_for( + instance={"foo": "bar"}, + schema=schema, + ) + self.assertIn("12 was expected", message) + + def test_contains_draft_6(self): + schema = {"contains": {"const": 12}} + message = self.message_for( + instance=[2, {}, []], + schema=schema, + cls=validators.Draft6Validator, + ) + self.assertEqual( + message, + "None of [2, {}, []] are valid under the given schema", + ) + + def test_invalid_format_default_message(self): + checker = FormatChecker(formats=()) + checker.checks("thing")(lambda value: False) + + schema = {"format": "thing"} + message = self.message_for( + instance="bla", + schema=schema, + format_checker=checker, + ) + + self.assertIn(repr("bla"), message) + self.assertIn(repr("thing"), message) + self.assertIn("is not a", message) + + def test_additionalProperties_false_patternProperties(self): + schema = {"type": "object", + "additionalProperties": False, + "patternProperties": { + "^abc$": {"type": "string"}, + "^def$": {"type": "string"}, + }} + message = self.message_for( + instance={"zebra": 123}, + schema=schema, + cls=validators.Draft4Validator, + ) + self.assertEqual( + message, + "{} does not match any of the regexes: {}, {}".format( + repr("zebra"), repr("^abc$"), repr("^def$"), + ), + ) + message = self.message_for( + instance={"zebra": 123, "fish": 456}, + schema=schema, + cls=validators.Draft4Validator, + ) + self.assertEqual( + message, + "{}, {} do not match any of the regexes: {}, {}".format( + repr("fish"), repr("zebra"), repr("^abc$"), repr("^def$"), + ), + ) + + def test_False_schema(self): + message = self.message_for( + instance="something", + schema=False, + ) + self.assertEqual(message, "False schema does not allow 'something'") + + def test_multipleOf(self): + message = self.message_for( + instance=3, + schema={"multipleOf": 2}, + ) + self.assertEqual(message, "3 is not a multiple of 2") + + def test_minItems(self): + message = self.message_for(instance=[], schema={"minItems": 2}) + self.assertEqual(message, "[] is too short") + + def test_maxItems(self): + message = self.message_for(instance=[1, 2, 3], schema={"maxItems": 2}) + self.assertEqual(message, "[1, 2, 3] is too long") + + def test_minItems_1(self): + message = self.message_for(instance=[], schema={"minItems": 1}) + self.assertEqual(message, "[] should be non-empty") + + def test_maxItems_0(self): + message = self.message_for(instance=[1, 2, 3], schema={"maxItems": 0}) + self.assertEqual(message, "[1, 2, 3] is expected to be empty") + + def test_minLength(self): + message = self.message_for( + instance="", + schema={"minLength": 2}, + ) + self.assertEqual(message, "'' is too short") + + def test_maxLength(self): + message = self.message_for( + instance="abc", + schema={"maxLength": 2}, + ) + self.assertEqual(message, "'abc' is too long") + + def test_minLength_1(self): + message = self.message_for(instance="", schema={"minLength": 1}) + self.assertEqual(message, "'' should be non-empty") + + def test_maxLength_0(self): + message = self.message_for(instance="abc", schema={"maxLength": 0}) + self.assertEqual(message, "'abc' is expected to be empty") + + def test_minProperties(self): + message = self.message_for(instance={}, schema={"minProperties": 2}) + self.assertEqual(message, "{} does not have enough properties") + + def test_maxProperties(self): + message = self.message_for( + instance={"a": {}, "b": {}, "c": {}}, + schema={"maxProperties": 2}, + ) + self.assertEqual( + message, + "{'a': {}, 'b': {}, 'c': {}} has too many properties", + ) + + def test_minProperties_1(self): + message = self.message_for(instance={}, schema={"minProperties": 1}) + self.assertEqual(message, "{} should be non-empty") + + def test_maxProperties_0(self): + message = self.message_for( + instance={1: 2}, + schema={"maxProperties": 0}, + ) + self.assertEqual(message, "{1: 2} is expected to be empty") + + def test_prefixItems_with_items(self): + message = self.message_for( + instance=[1, 2, "foo"], + schema={"items": False, "prefixItems": [{}, {}]}, + ) + self.assertEqual( + message, + "Expected at most 2 items but found 1 extra: 'foo'", + ) + + def test_prefixItems_with_multiple_extra_items(self): + message = self.message_for( + instance=[1, 2, "foo", 5], + schema={"items": False, "prefixItems": [{}, {}]}, + ) + self.assertEqual( + message, + "Expected at most 2 items but found 2 extra: ['foo', 5]", + ) + + def test_pattern(self): + message = self.message_for( + instance="bbb", + schema={"pattern": "^a*$"}, + ) + self.assertEqual(message, "'bbb' does not match '^a*$'") + + def test_does_not_contain(self): + message = self.message_for( + instance=[], + schema={"contains": {"type": "string"}}, + ) + self.assertEqual( + message, + "[] does not contain items matching the given schema", + ) + + def test_contains_too_few(self): + message = self.message_for( + instance=["foo", 1], + schema={"contains": {"type": "string"}, "minContains": 2}, + ) + self.assertEqual( + message, + "Too few items match the given schema " + "(expected at least 2 but only 1 matched)", + ) + + def test_contains_too_few_both_constrained(self): + message = self.message_for( + instance=["foo", 1], + schema={ + "contains": {"type": "string"}, + "minContains": 2, + "maxContains": 4, + }, + ) + self.assertEqual( + message, + "Too few items match the given schema (expected at least 2 but " + "only 1 matched)", + ) + + def test_contains_too_many(self): + message = self.message_for( + instance=["foo", "bar", "baz"], + schema={"contains": {"type": "string"}, "maxContains": 2}, + ) + self.assertEqual( + message, + "Too many items match the given schema (expected at most 2)", + ) + + def test_contains_too_many_both_constrained(self): + message = self.message_for( + instance=["foo"] * 5, + schema={ + "contains": {"type": "string"}, + "minContains": 2, + "maxContains": 4, + }, + ) + self.assertEqual( + message, + "Too many items match the given schema (expected at most 4)", + ) + + def test_exclusiveMinimum(self): + message = self.message_for( + instance=3, + schema={"exclusiveMinimum": 5}, + ) + self.assertEqual( + message, + "3 is less than or equal to the minimum of 5", + ) + + def test_exclusiveMaximum(self): + message = self.message_for(instance=3, schema={"exclusiveMaximum": 2}) + self.assertEqual( + message, + "3 is greater than or equal to the maximum of 2", + ) + + def test_required(self): + message = self.message_for(instance={}, schema={"required": ["foo"]}) + self.assertEqual(message, "'foo' is a required property") + + def test_dependentRequired(self): + message = self.message_for( + instance={"foo": {}}, + schema={"dependentRequired": {"foo": ["bar"]}}, + ) + self.assertEqual(message, "'bar' is a dependency of 'foo'") + + def test_oneOf_matches_none(self): + message = self.message_for(instance={}, schema={"oneOf": [False]}) + self.assertEqual( + message, + "{} is not valid under any of the given schemas", + ) + + def test_oneOf_matches_too_many(self): + message = self.message_for(instance={}, schema={"oneOf": [True, True]}) + self.assertEqual(message, "{} is valid under each of True, True") + + def test_unevaluated_items(self): + schema = {"type": "array", "unevaluatedItems": False} + message = self.message_for(instance=["foo", "bar"], schema=schema) + self.assertIn( + message, + "Unevaluated items are not allowed ('foo', 'bar' were unexpected)", + ) + + def test_unevaluated_items_on_invalid_type(self): + schema = {"type": "array", "unevaluatedItems": False} + message = self.message_for(instance="foo", schema=schema) + self.assertEqual(message, "'foo' is not of type 'array'") + + def test_unevaluated_properties_invalid_against_subschema(self): + schema = { + "properties": {"foo": {"type": "string"}}, + "unevaluatedProperties": {"const": 12}, + } + message = self.message_for( + instance={ + "foo": "foo", + "bar": "bar", + "baz": 12, + }, + schema=schema, + ) + self.assertEqual( + message, + "Unevaluated properties are not valid under the given schema " + "('bar' was unevaluated and invalid)", + ) + + def test_unevaluated_properties_disallowed(self): + schema = {"type": "object", "unevaluatedProperties": False} + message = self.message_for( + instance={ + "foo": "foo", + "bar": "bar", + }, + schema=schema, + ) + self.assertEqual( + message, + "Unevaluated properties are not allowed " + "('bar', 'foo' were unexpected)", + ) + + def test_unevaluated_properties_on_invalid_type(self): + schema = {"type": "object", "unevaluatedProperties": False} + message = self.message_for(instance="foo", schema=schema) + self.assertEqual(message, "'foo' is not of type 'object'") + + def test_single_item(self): + schema = {"prefixItems": [{}], "items": False} + message = self.message_for( + instance=["foo", "bar", "baz"], + schema=schema, + ) + self.assertEqual( + message, + "Expected at most 1 item but found 2 extra: ['bar', 'baz']", + ) + + def test_heterogeneous_additionalItems_with_Items(self): + schema = {"items": [{}], "additionalItems": False} + message = self.message_for( + instance=["foo", "bar", 37], + schema=schema, + cls=validators.Draft7Validator, + ) + self.assertEqual( + message, + "Additional items are not allowed ('bar', 37 were unexpected)", + ) + + def test_heterogeneous_items_prefixItems(self): + schema = {"prefixItems": [{}], "items": False} + message = self.message_for( + instance=["foo", "bar", 37], + schema=schema, + ) + self.assertEqual( + message, + "Expected at most 1 item but found 2 extra: ['bar', 37]", + ) + + def test_heterogeneous_unevaluatedItems_prefixItems(self): + schema = {"prefixItems": [{}], "unevaluatedItems": False} + message = self.message_for( + instance=["foo", "bar", 37], + schema=schema, + ) + self.assertEqual( + message, + "Unevaluated items are not allowed ('bar', 37 were unexpected)", + ) + + def test_heterogeneous_properties_additionalProperties(self): + """ + Not valid deserialized JSON, but this should not blow up. + """ + schema = {"properties": {"foo": {}}, "additionalProperties": False} + message = self.message_for( + instance={"foo": {}, "a": "baz", 37: 12}, + schema=schema, + ) + self.assertEqual( + message, + "Additional properties are not allowed (37, 'a' were unexpected)", + ) + + def test_heterogeneous_properties_unevaluatedProperties(self): + """ + Not valid deserialized JSON, but this should not blow up. + """ + schema = {"properties": {"foo": {}}, "unevaluatedProperties": False} + message = self.message_for( + instance={"foo": {}, "a": "baz", 37: 12}, + schema=schema, + ) + self.assertEqual( + message, + "Unevaluated properties are not allowed (37, 'a' were unexpected)", + ) + + +class TestValidationErrorDetails(TestCase): + # TODO: These really need unit tests for each individual keyword, rather + # than just these higher level tests. + def test_anyOf(self): + instance = 5 + schema = { + "anyOf": [ + {"minimum": 20}, + {"type": "string"}, + ], + } + + validator = validators.Draft4Validator(schema) + errors = list(validator.iter_errors(instance)) + self.assertEqual(len(errors), 1) + e = errors[0] + + self.assertEqual(e.validator, "anyOf") + self.assertEqual(e.validator_value, schema["anyOf"]) + self.assertEqual(e.instance, instance) + self.assertEqual(e.schema, schema) + self.assertIsNone(e.parent) + + self.assertEqual(e.path, deque([])) + self.assertEqual(e.relative_path, deque([])) + self.assertEqual(e.absolute_path, deque([])) + self.assertEqual(e.json_path, "$") + + self.assertEqual(e.schema_path, deque(["anyOf"])) + self.assertEqual(e.relative_schema_path, deque(["anyOf"])) + self.assertEqual(e.absolute_schema_path, deque(["anyOf"])) + + self.assertEqual(len(e.context), 2) + + e1, e2 = sorted_errors(e.context) + + self.assertEqual(e1.validator, "minimum") + self.assertEqual(e1.validator_value, schema["anyOf"][0]["minimum"]) + self.assertEqual(e1.instance, instance) + self.assertEqual(e1.schema, schema["anyOf"][0]) + self.assertIs(e1.parent, e) + + self.assertEqual(e1.path, deque([])) + self.assertEqual(e1.absolute_path, deque([])) + self.assertEqual(e1.relative_path, deque([])) + self.assertEqual(e1.json_path, "$") + + self.assertEqual(e1.schema_path, deque([0, "minimum"])) + self.assertEqual(e1.relative_schema_path, deque([0, "minimum"])) + self.assertEqual( + e1.absolute_schema_path, deque(["anyOf", 0, "minimum"]), + ) + + self.assertFalse(e1.context) + + self.assertEqual(e2.validator, "type") + self.assertEqual(e2.validator_value, schema["anyOf"][1]["type"]) + self.assertEqual(e2.instance, instance) + self.assertEqual(e2.schema, schema["anyOf"][1]) + self.assertIs(e2.parent, e) + + self.assertEqual(e2.path, deque([])) + self.assertEqual(e2.relative_path, deque([])) + self.assertEqual(e2.absolute_path, deque([])) + self.assertEqual(e2.json_path, "$") + + self.assertEqual(e2.schema_path, deque([1, "type"])) + self.assertEqual(e2.relative_schema_path, deque([1, "type"])) + self.assertEqual(e2.absolute_schema_path, deque(["anyOf", 1, "type"])) + + self.assertEqual(len(e2.context), 0) + + def test_type(self): + instance = {"foo": 1} + schema = { + "type": [ + {"type": "integer"}, + { + "type": "object", + "properties": {"foo": {"enum": [2]}}, + }, + ], + } + + validator = validators.Draft3Validator(schema) + errors = list(validator.iter_errors(instance)) + self.assertEqual(len(errors), 1) + e = errors[0] + + self.assertEqual(e.validator, "type") + self.assertEqual(e.validator_value, schema["type"]) + self.assertEqual(e.instance, instance) + self.assertEqual(e.schema, schema) + self.assertIsNone(e.parent) + + self.assertEqual(e.path, deque([])) + self.assertEqual(e.relative_path, deque([])) + self.assertEqual(e.absolute_path, deque([])) + self.assertEqual(e.json_path, "$") + + self.assertEqual(e.schema_path, deque(["type"])) + self.assertEqual(e.relative_schema_path, deque(["type"])) + self.assertEqual(e.absolute_schema_path, deque(["type"])) + + self.assertEqual(len(e.context), 2) + + e1, e2 = sorted_errors(e.context) + + self.assertEqual(e1.validator, "type") + self.assertEqual(e1.validator_value, schema["type"][0]["type"]) + self.assertEqual(e1.instance, instance) + self.assertEqual(e1.schema, schema["type"][0]) + self.assertIs(e1.parent, e) + + self.assertEqual(e1.path, deque([])) + self.assertEqual(e1.relative_path, deque([])) + self.assertEqual(e1.absolute_path, deque([])) + self.assertEqual(e1.json_path, "$") + + self.assertEqual(e1.schema_path, deque([0, "type"])) + self.assertEqual(e1.relative_schema_path, deque([0, "type"])) + self.assertEqual(e1.absolute_schema_path, deque(["type", 0, "type"])) + + self.assertFalse(e1.context) + + self.assertEqual(e2.validator, "enum") + self.assertEqual(e2.validator_value, [2]) + self.assertEqual(e2.instance, 1) + self.assertEqual(e2.schema, {"enum": [2]}) + self.assertIs(e2.parent, e) + + self.assertEqual(e2.path, deque(["foo"])) + self.assertEqual(e2.relative_path, deque(["foo"])) + self.assertEqual(e2.absolute_path, deque(["foo"])) + self.assertEqual(e2.json_path, "$.foo") + + self.assertEqual( + e2.schema_path, deque([1, "properties", "foo", "enum"]), + ) + self.assertEqual( + e2.relative_schema_path, deque([1, "properties", "foo", "enum"]), + ) + self.assertEqual( + e2.absolute_schema_path, + deque(["type", 1, "properties", "foo", "enum"]), + ) + + self.assertFalse(e2.context) + + def test_single_nesting(self): + instance = {"foo": 2, "bar": [1], "baz": 15, "quux": "spam"} + schema = { + "properties": { + "foo": {"type": "string"}, + "bar": {"minItems": 2}, + "baz": {"maximum": 10, "enum": [2, 4, 6, 8]}, + }, + } + + validator = validators.Draft3Validator(schema) + errors = validator.iter_errors(instance) + e1, e2, e3, e4 = sorted_errors(errors) + + self.assertEqual(e1.path, deque(["bar"])) + self.assertEqual(e2.path, deque(["baz"])) + self.assertEqual(e3.path, deque(["baz"])) + self.assertEqual(e4.path, deque(["foo"])) + + self.assertEqual(e1.relative_path, deque(["bar"])) + self.assertEqual(e2.relative_path, deque(["baz"])) + self.assertEqual(e3.relative_path, deque(["baz"])) + self.assertEqual(e4.relative_path, deque(["foo"])) + + self.assertEqual(e1.absolute_path, deque(["bar"])) + self.assertEqual(e2.absolute_path, deque(["baz"])) + self.assertEqual(e3.absolute_path, deque(["baz"])) + self.assertEqual(e4.absolute_path, deque(["foo"])) + + self.assertEqual(e1.json_path, "$.bar") + self.assertEqual(e2.json_path, "$.baz") + self.assertEqual(e3.json_path, "$.baz") + self.assertEqual(e4.json_path, "$.foo") + + self.assertEqual(e1.validator, "minItems") + self.assertEqual(e2.validator, "enum") + self.assertEqual(e3.validator, "maximum") + self.assertEqual(e4.validator, "type") + + def test_multiple_nesting(self): + instance = [1, {"foo": 2, "bar": {"baz": [1]}}, "quux"] + schema = { + "type": "string", + "items": { + "type": ["string", "object"], + "properties": { + "foo": {"enum": [1, 3]}, + "bar": { + "type": "array", + "properties": { + "bar": {"required": True}, + "baz": {"minItems": 2}, + }, + }, + }, + }, + } + + validator = validators.Draft3Validator(schema) + errors = validator.iter_errors(instance) + e1, e2, e3, e4, e5, e6 = sorted_errors(errors) + + self.assertEqual(e1.path, deque([])) + self.assertEqual(e2.path, deque([0])) + self.assertEqual(e3.path, deque([1, "bar"])) + self.assertEqual(e4.path, deque([1, "bar", "bar"])) + self.assertEqual(e5.path, deque([1, "bar", "baz"])) + self.assertEqual(e6.path, deque([1, "foo"])) + + self.assertEqual(e1.json_path, "$") + self.assertEqual(e2.json_path, "$[0]") + self.assertEqual(e3.json_path, "$[1].bar") + self.assertEqual(e4.json_path, "$[1].bar.bar") + self.assertEqual(e5.json_path, "$[1].bar.baz") + self.assertEqual(e6.json_path, "$[1].foo") + + self.assertEqual(e1.schema_path, deque(["type"])) + self.assertEqual(e2.schema_path, deque(["items", "type"])) + self.assertEqual( + list(e3.schema_path), ["items", "properties", "bar", "type"], + ) + self.assertEqual( + list(e4.schema_path), + ["items", "properties", "bar", "properties", "bar", "required"], + ) + self.assertEqual( + list(e5.schema_path), + ["items", "properties", "bar", "properties", "baz", "minItems"], + ) + self.assertEqual( + list(e6.schema_path), ["items", "properties", "foo", "enum"], + ) + + self.assertEqual(e1.validator, "type") + self.assertEqual(e2.validator, "type") + self.assertEqual(e3.validator, "type") + self.assertEqual(e4.validator, "required") + self.assertEqual(e5.validator, "minItems") + self.assertEqual(e6.validator, "enum") + + def test_recursive(self): + schema = { + "definitions": { + "node": { + "anyOf": [{ + "type": "object", + "required": ["name", "children"], + "properties": { + "name": { + "type": "string", + }, + "children": { + "type": "object", + "patternProperties": { + "^.*$": { + "$ref": "#/definitions/node", + }, + }, + }, + }, + }], + }, + }, + "type": "object", + "required": ["root"], + "properties": {"root": {"$ref": "#/definitions/node"}}, + } + + instance = { + "root": { + "name": "root", + "children": { + "a": { + "name": "a", + "children": { + "ab": { + "name": "ab", + # missing "children" + }, + }, + }, + }, + }, + } + validator = validators.Draft4Validator(schema) + + e, = validator.iter_errors(instance) + self.assertEqual(e.absolute_path, deque(["root"])) + self.assertEqual( + e.absolute_schema_path, deque(["properties", "root", "anyOf"]), + ) + self.assertEqual(e.json_path, "$.root") + + e1, = e.context + self.assertEqual(e1.absolute_path, deque(["root", "children", "a"])) + self.assertEqual( + e1.absolute_schema_path, deque( + [ + "properties", + "root", + "anyOf", + 0, + "properties", + "children", + "patternProperties", + "^.*$", + "anyOf", + ], + ), + ) + self.assertEqual(e1.json_path, "$.root.children.a") + + e2, = e1.context + self.assertEqual( + e2.absolute_path, deque( + ["root", "children", "a", "children", "ab"], + ), + ) + self.assertEqual( + e2.absolute_schema_path, deque( + [ + "properties", + "root", + "anyOf", + 0, + "properties", + "children", + "patternProperties", + "^.*$", + "anyOf", + 0, + "properties", + "children", + "patternProperties", + "^.*$", + "anyOf", + ], + ), + ) + self.assertEqual(e2.json_path, "$.root.children.a.children.ab") + + def test_additionalProperties(self): + instance = {"bar": "bar", "foo": 2} + schema = {"additionalProperties": {"type": "integer", "minimum": 5}} + + validator = validators.Draft3Validator(schema) + errors = validator.iter_errors(instance) + e1, e2 = sorted_errors(errors) + + self.assertEqual(e1.path, deque(["bar"])) + self.assertEqual(e2.path, deque(["foo"])) + + self.assertEqual(e1.json_path, "$.bar") + self.assertEqual(e2.json_path, "$.foo") + + self.assertEqual(e1.validator, "type") + self.assertEqual(e2.validator, "minimum") + + def test_patternProperties(self): + instance = {"bar": 1, "foo": 2} + schema = { + "patternProperties": { + "bar": {"type": "string"}, + "foo": {"minimum": 5}, + }, + } + + validator = validators.Draft3Validator(schema) + errors = validator.iter_errors(instance) + e1, e2 = sorted_errors(errors) + + self.assertEqual(e1.path, deque(["bar"])) + self.assertEqual(e2.path, deque(["foo"])) + + self.assertEqual(e1.json_path, "$.bar") + self.assertEqual(e2.json_path, "$.foo") + + self.assertEqual(e1.validator, "type") + self.assertEqual(e2.validator, "minimum") + + def test_additionalItems(self): + instance = ["foo", 1] + schema = { + "items": [], + "additionalItems": {"type": "integer", "minimum": 5}, + } + + validator = validators.Draft3Validator(schema) + errors = validator.iter_errors(instance) + e1, e2 = sorted_errors(errors) + + self.assertEqual(e1.path, deque([0])) + self.assertEqual(e2.path, deque([1])) + + self.assertEqual(e1.json_path, "$[0]") + self.assertEqual(e2.json_path, "$[1]") + + self.assertEqual(e1.validator, "type") + self.assertEqual(e2.validator, "minimum") + + def test_additionalItems_with_items(self): + instance = ["foo", "bar", 1] + schema = { + "items": [{}], + "additionalItems": {"type": "integer", "minimum": 5}, + } + + validator = validators.Draft3Validator(schema) + errors = validator.iter_errors(instance) + e1, e2 = sorted_errors(errors) + + self.assertEqual(e1.path, deque([1])) + self.assertEqual(e2.path, deque([2])) + + self.assertEqual(e1.json_path, "$[1]") + self.assertEqual(e2.json_path, "$[2]") + + self.assertEqual(e1.validator, "type") + self.assertEqual(e2.validator, "minimum") + + def test_propertyNames(self): + instance = {"foo": 12} + schema = {"propertyNames": {"not": {"const": "foo"}}} + + validator = validators.Draft7Validator(schema) + error, = validator.iter_errors(instance) + + self.assertEqual(error.validator, "not") + self.assertEqual( + error.message, + "'foo' should not be valid under {'const': 'foo'}", + ) + self.assertEqual(error.path, deque([])) + self.assertEqual(error.json_path, "$") + self.assertEqual(error.schema_path, deque(["propertyNames", "not"])) + + def test_if_then(self): + schema = { + "if": {"const": 12}, + "then": {"const": 13}, + } + + validator = validators.Draft7Validator(schema) + error, = validator.iter_errors(12) + + self.assertEqual(error.validator, "const") + self.assertEqual(error.message, "13 was expected") + self.assertEqual(error.path, deque([])) + self.assertEqual(error.json_path, "$") + self.assertEqual(error.schema_path, deque(["then", "const"])) + + def test_if_else(self): + schema = { + "if": {"const": 12}, + "else": {"const": 13}, + } + + validator = validators.Draft7Validator(schema) + error, = validator.iter_errors(15) + + self.assertEqual(error.validator, "const") + self.assertEqual(error.message, "13 was expected") + self.assertEqual(error.path, deque([])) + self.assertEqual(error.json_path, "$") + self.assertEqual(error.schema_path, deque(["else", "const"])) + + def test_boolean_schema_False(self): + validator = validators.Draft7Validator(False) + error, = validator.iter_errors(12) + + self.assertEqual( + ( + error.message, + error.validator, + error.validator_value, + error.instance, + error.schema, + error.schema_path, + error.json_path, + ), + ( + "False schema does not allow 12", + None, + None, + 12, + False, + deque([]), + "$", + ), + ) + + def test_ref(self): + ref, schema = "someRef", {"additionalProperties": {"type": "integer"}} + validator = validators.Draft7Validator( + {"$ref": ref}, + resolver=validators._RefResolver("", {}, store={ref: schema}), + ) + error, = validator.iter_errors({"foo": "notAnInteger"}) + + self.assertEqual( + ( + error.message, + error.validator, + error.validator_value, + error.instance, + error.absolute_path, + error.schema, + error.schema_path, + error.json_path, + ), + ( + "'notAnInteger' is not of type 'integer'", + "type", + "integer", + "notAnInteger", + deque(["foo"]), + {"type": "integer"}, + deque(["additionalProperties", "type"]), + "$.foo", + ), + ) + + def test_prefixItems(self): + schema = {"prefixItems": [{"type": "string"}, {}, {}, {"maximum": 3}]} + validator = validators.Draft202012Validator(schema) + type_error, min_error = validator.iter_errors([1, 2, "foo", 5]) + self.assertEqual( + ( + type_error.message, + type_error.validator, + type_error.validator_value, + type_error.instance, + type_error.absolute_path, + type_error.schema, + type_error.schema_path, + type_error.json_path, + ), + ( + "1 is not of type 'string'", + "type", + "string", + 1, + deque([0]), + {"type": "string"}, + deque(["prefixItems", 0, "type"]), + "$[0]", + ), + ) + self.assertEqual( + ( + min_error.message, + min_error.validator, + min_error.validator_value, + min_error.instance, + min_error.absolute_path, + min_error.schema, + min_error.schema_path, + min_error.json_path, + ), + ( + "5 is greater than the maximum of 3", + "maximum", + 3, + 5, + deque([3]), + {"maximum": 3}, + deque(["prefixItems", 3, "maximum"]), + "$[3]", + ), + ) + + def test_prefixItems_with_items(self): + schema = { + "items": {"type": "string"}, + "prefixItems": [{}], + } + validator = validators.Draft202012Validator(schema) + e1, e2 = validator.iter_errors(["foo", 2, "bar", 4, "baz"]) + self.assertEqual( + ( + e1.message, + e1.validator, + e1.validator_value, + e1.instance, + e1.absolute_path, + e1.schema, + e1.schema_path, + e1.json_path, + ), + ( + "2 is not of type 'string'", + "type", + "string", + 2, + deque([1]), + {"type": "string"}, + deque(["items", "type"]), + "$[1]", + ), + ) + self.assertEqual( + ( + e2.message, + e2.validator, + e2.validator_value, + e2.instance, + e2.absolute_path, + e2.schema, + e2.schema_path, + e2.json_path, + ), + ( + "4 is not of type 'string'", + "type", + "string", + 4, + deque([3]), + {"type": "string"}, + deque(["items", "type"]), + "$[3]", + ), + ) + + def test_contains_too_many(self): + """ + `contains` + `maxContains` produces only one error, even if there are + many more incorrectly matching elements. + """ + schema = {"contains": {"type": "string"}, "maxContains": 2} + validator = validators.Draft202012Validator(schema) + error, = validator.iter_errors(["foo", 2, "bar", 4, "baz", "quux"]) + self.assertEqual( + ( + error.message, + error.validator, + error.validator_value, + error.instance, + error.absolute_path, + error.schema, + error.schema_path, + error.json_path, + ), + ( + "Too many items match the given schema (expected at most 2)", + "maxContains", + 2, + ["foo", 2, "bar", 4, "baz", "quux"], + deque([]), + {"contains": {"type": "string"}, "maxContains": 2}, + deque(["contains"]), + "$", + ), + ) + + def test_contains_too_few(self): + schema = {"contains": {"type": "string"}, "minContains": 2} + validator = validators.Draft202012Validator(schema) + error, = validator.iter_errors(["foo", 2, 4]) + self.assertEqual( + ( + error.message, + error.validator, + error.validator_value, + error.instance, + error.absolute_path, + error.schema, + error.schema_path, + error.json_path, + ), + ( + ( + "Too few items match the given schema " + "(expected at least 2 but only 1 matched)" + ), + "minContains", + 2, + ["foo", 2, 4], + deque([]), + {"contains": {"type": "string"}, "minContains": 2}, + deque(["contains"]), + "$", + ), + ) + + def test_contains_none(self): + schema = {"contains": {"type": "string"}, "minContains": 2} + validator = validators.Draft202012Validator(schema) + error, = validator.iter_errors([2, 4]) + self.assertEqual( + ( + error.message, + error.validator, + error.validator_value, + error.instance, + error.absolute_path, + error.schema, + error.schema_path, + error.json_path, + ), + ( + "[2, 4] does not contain items matching the given schema", + "contains", + {"type": "string"}, + [2, 4], + deque([]), + {"contains": {"type": "string"}, "minContains": 2}, + deque(["contains"]), + "$", + ), + ) + + def test_ref_sibling(self): + schema = { + "$defs": {"foo": {"required": ["bar"]}}, + "properties": { + "aprop": { + "$ref": "#/$defs/foo", + "required": ["baz"], + }, + }, + } + + validator = validators.Draft202012Validator(schema) + e1, e2 = validator.iter_errors({"aprop": {}}) + self.assertEqual( + ( + e1.message, + e1.validator, + e1.validator_value, + e1.instance, + e1.absolute_path, + e1.schema, + e1.schema_path, + e1.relative_schema_path, + e1.json_path, + ), + ( + "'bar' is a required property", + "required", + ["bar"], + {}, + deque(["aprop"]), + {"required": ["bar"]}, + deque(["properties", "aprop", "required"]), + deque(["properties", "aprop", "required"]), + "$.aprop", + ), + ) + self.assertEqual( + ( + e2.message, + e2.validator, + e2.validator_value, + e2.instance, + e2.absolute_path, + e2.schema, + e2.schema_path, + e2.relative_schema_path, + e2.json_path, + ), + ( + "'baz' is a required property", + "required", + ["baz"], + {}, + deque(["aprop"]), + {"$ref": "#/$defs/foo", "required": ["baz"]}, + deque(["properties", "aprop", "required"]), + deque(["properties", "aprop", "required"]), + "$.aprop", + ), + ) + + +class MetaSchemaTestsMixin: + # TODO: These all belong upstream + def test_invalid_properties(self): + with self.assertRaises(exceptions.SchemaError): + self.Validator.check_schema({"properties": 12}) + + def test_minItems_invalid_string(self): + with self.assertRaises(exceptions.SchemaError): + # needs to be an integer + self.Validator.check_schema({"minItems": "1"}) + + def test_enum_allows_empty_arrays(self): + """ + Technically, all the spec says is they SHOULD have elements, not MUST. + + (As of Draft 6. Previous drafts do say MUST). + + See #529. + """ + if self.Validator in { + validators.Draft3Validator, + validators.Draft4Validator, + }: + with self.assertRaises(exceptions.SchemaError): + self.Validator.check_schema({"enum": []}) + else: + self.Validator.check_schema({"enum": []}) + + def test_enum_allows_non_unique_items(self): + """ + Technically, all the spec says is they SHOULD be unique, not MUST. + + (As of Draft 6. Previous drafts do say MUST). + + See #529. + """ + if self.Validator in { + validators.Draft3Validator, + validators.Draft4Validator, + }: + with self.assertRaises(exceptions.SchemaError): + self.Validator.check_schema({"enum": [12, 12]}) + else: + self.Validator.check_schema({"enum": [12, 12]}) + + def test_schema_with_invalid_regex(self): + with self.assertRaises(exceptions.SchemaError): + self.Validator.check_schema({"pattern": "*notaregex"}) + + def test_schema_with_invalid_regex_with_disabled_format_validation(self): + self.Validator.check_schema( + {"pattern": "*notaregex"}, + format_checker=None, + ) + + +class ValidatorTestMixin(MetaSchemaTestsMixin): + def test_it_implements_the_validator_protocol(self): + self.assertIsInstance(self.Validator({}), protocols.Validator) + + def test_valid_instances_are_valid(self): + schema, instance = self.valid + self.assertTrue(self.Validator(schema).is_valid(instance)) + + def test_invalid_instances_are_not_valid(self): + schema, instance = self.invalid + self.assertFalse(self.Validator(schema).is_valid(instance)) + + def test_non_existent_properties_are_ignored(self): + self.Validator({object(): object()}).validate(instance=object()) + + def test_evolve(self): + schema, format_checker = {"type": "integer"}, FormatChecker() + original = self.Validator( + schema, + format_checker=format_checker, + ) + new = original.evolve( + schema={"type": "string"}, + format_checker=self.Validator.FORMAT_CHECKER, + ) + + expected = self.Validator( + {"type": "string"}, + format_checker=self.Validator.FORMAT_CHECKER, + _resolver=new._resolver, + ) + + self.assertEqual(new, expected) + self.assertNotEqual(new, original) + + def test_evolve_with_subclass(self): + """ + Subclassing validators isn't supported public API, but some users have + done it, because we don't actually error entirely when it's done :/ + + We need to deprecate doing so first to help as many of these users + ensure they can move to supported APIs, but this test ensures that in + the interim, we haven't broken those users. + """ + + with self.assertWarns(DeprecationWarning): + @define + class OhNo(self.Validator): + foo = field(factory=lambda: [1, 2, 3]) + _bar = field(default=37) + + validator = OhNo({}, bar=12) + self.assertEqual(validator.foo, [1, 2, 3]) + + new = validator.evolve(schema={"type": "integer"}) + self.assertEqual(new.foo, [1, 2, 3]) + self.assertEqual(new._bar, 12) + + def test_is_type_is_true_for_valid_type(self): + self.assertTrue(self.Validator({}).is_type("foo", "string")) + + def test_is_type_is_false_for_invalid_type(self): + self.assertFalse(self.Validator({}).is_type("foo", "array")) + + def test_is_type_evades_bool_inheriting_from_int(self): + self.assertFalse(self.Validator({}).is_type(True, "integer")) + self.assertFalse(self.Validator({}).is_type(True, "number")) + + def test_it_can_validate_with_decimals(self): + schema = {"items": {"type": "number"}} + Validator = validators.extend( + self.Validator, + type_checker=self.Validator.TYPE_CHECKER.redefine( + "number", + lambda checker, thing: isinstance( + thing, (int, float, Decimal), + ) and not isinstance(thing, bool), + ), + ) + + validator = Validator(schema) + validator.validate([1, 1.1, Decimal(1) / Decimal(8)]) + + invalid = ["foo", {}, [], True, None] + self.assertEqual( + [error.instance for error in validator.iter_errors(invalid)], + invalid, + ) + + def test_it_returns_true_for_formats_it_does_not_know_about(self): + validator = self.Validator( + {"format": "carrot"}, format_checker=FormatChecker(), + ) + validator.validate("bugs") + + def test_it_does_not_validate_formats_by_default(self): + validator = self.Validator({}) + self.assertIsNone(validator.format_checker) + + def test_it_validates_formats_if_a_checker_is_provided(self): + checker = FormatChecker() + bad = ValueError("Bad!") + + @checker.checks("foo", raises=ValueError) + def check(value): + if value == "good": + return True + elif value == "bad": + raise bad + else: # pragma: no cover + self.fail(f"What is {value}? [Baby Don't Hurt Me]") + + validator = self.Validator( + {"format": "foo"}, format_checker=checker, + ) + + validator.validate("good") + with self.assertRaises(exceptions.ValidationError) as cm: + validator.validate("bad") + + # Make sure original cause is attached + self.assertIs(cm.exception.cause, bad) + + def test_non_string_custom_type(self): + non_string_type = object() + schema = {"type": [non_string_type]} + Crazy = validators.extend( + self.Validator, + type_checker=self.Validator.TYPE_CHECKER.redefine( + non_string_type, + lambda checker, thing: isinstance(thing, int), + ), + ) + Crazy(schema).validate(15) + + def test_it_properly_formats_tuples_in_errors(self): + """ + A tuple instance properly formats validation errors for uniqueItems. + + See #224 + """ + TupleValidator = validators.extend( + self.Validator, + type_checker=self.Validator.TYPE_CHECKER.redefine( + "array", + lambda checker, thing: isinstance(thing, tuple), + ), + ) + with self.assertRaises(exceptions.ValidationError) as e: + TupleValidator({"uniqueItems": True}).validate((1, 1)) + self.assertIn("(1, 1) has non-unique elements", str(e.exception)) + + def test_check_redefined_sequence(self): + """ + Allow array to validate against another defined sequence type + """ + schema = {"type": "array", "uniqueItems": True} + MyMapping = namedtuple("MyMapping", "a, b") + Validator = validators.extend( + self.Validator, + type_checker=self.Validator.TYPE_CHECKER.redefine_many( + { + "array": lambda checker, thing: isinstance( + thing, (list, deque), + ), + "object": lambda checker, thing: isinstance( + thing, (dict, MyMapping), + ), + }, + ), + ) + validator = Validator(schema) + + valid_instances = [ + deque(["a", None, "1", "", True]), + deque([[False], [0]]), + [deque([False]), deque([0])], + [[deque([False])], [deque([0])]], + [[[[[deque([False])]]]], [[[[deque([0])]]]]], + [deque([deque([False])]), deque([deque([0])])], + [MyMapping("a", 0), MyMapping("a", False)], + [ + MyMapping("a", [deque([0])]), + MyMapping("a", [deque([False])]), + ], + [ + MyMapping("a", [MyMapping("a", deque([0]))]), + MyMapping("a", [MyMapping("a", deque([False]))]), + ], + [deque(deque(deque([False]))), deque(deque(deque([0])))], + ] + + for instance in valid_instances: + validator.validate(instance) + + invalid_instances = [ + deque(["a", "b", "a"]), + deque([[False], [False]]), + [deque([False]), deque([False])], + [[deque([False])], [deque([False])]], + [[[[[deque([False])]]]], [[[[deque([False])]]]]], + [deque([deque([False])]), deque([deque([False])])], + [MyMapping("a", False), MyMapping("a", False)], + [ + MyMapping("a", [deque([False])]), + MyMapping("a", [deque([False])]), + ], + [ + MyMapping("a", [MyMapping("a", deque([False]))]), + MyMapping("a", [MyMapping("a", deque([False]))]), + ], + [deque(deque(deque([False]))), deque(deque(deque([False])))], + ] + + for instance in invalid_instances: + with self.assertRaises(exceptions.ValidationError): + validator.validate(instance) + + def test_it_creates_a_ref_resolver_if_not_provided(self): + with self.assertWarns(DeprecationWarning): + resolver = self.Validator({}).resolver + self.assertIsInstance(resolver, validators._RefResolver) + + def test_it_upconverts_from_deprecated_RefResolvers(self): + ref, schema = "someCoolRef", {"type": "integer"} + resolver = validators._RefResolver("", {}, store={ref: schema}) + validator = self.Validator({"$ref": ref}, resolver=resolver) + + with self.assertRaises(exceptions.ValidationError): + validator.validate(None) + + def test_it_upconverts_from_yet_older_deprecated_legacy_RefResolvers(self): + """ + Legacy RefResolvers support only the context manager form of + resolution. + """ + + class LegacyRefResolver: + @contextmanager + def resolving(this, ref): + self.assertEqual(ref, "the ref") + yield {"type": "integer"} + + resolver = LegacyRefResolver() + schema = {"$ref": "the ref"} + + with self.assertRaises(exceptions.ValidationError): + self.Validator(schema, resolver=resolver).validate(None) + + +class AntiDraft6LeakMixin: + """ + Make sure functionality from draft 6 doesn't leak backwards in time. + """ + + def test_True_is_not_a_schema(self): + with self.assertRaises(exceptions.SchemaError) as e: + self.Validator.check_schema(True) + self.assertIn("True is not of type", str(e.exception)) + + def test_False_is_not_a_schema(self): + with self.assertRaises(exceptions.SchemaError) as e: + self.Validator.check_schema(False) + self.assertIn("False is not of type", str(e.exception)) + + def test_True_is_not_a_schema_even_if_you_forget_to_check(self): + with self.assertRaises(Exception) as e: + self.Validator(True).validate(12) + self.assertNotIsInstance(e.exception, exceptions.ValidationError) + + def test_False_is_not_a_schema_even_if_you_forget_to_check(self): + with self.assertRaises(Exception) as e: + self.Validator(False).validate(12) + self.assertNotIsInstance(e.exception, exceptions.ValidationError) + + +class TestDraft3Validator(AntiDraft6LeakMixin, ValidatorTestMixin, TestCase): + Validator = validators.Draft3Validator + valid: tuple[dict, dict] = ({}, {}) + invalid = {"type": "integer"}, "foo" + + def test_any_type_is_valid_for_type_any(self): + validator = self.Validator({"type": "any"}) + validator.validate(object()) + + def test_any_type_is_redefinable(self): + """ + Sigh, because why not. + """ + Crazy = validators.extend( + self.Validator, + type_checker=self.Validator.TYPE_CHECKER.redefine( + "any", lambda checker, thing: isinstance(thing, int), + ), + ) + validator = Crazy({"type": "any"}) + validator.validate(12) + with self.assertRaises(exceptions.ValidationError): + validator.validate("foo") + + def test_is_type_is_true_for_any_type(self): + self.assertTrue(self.Validator({"type": "any"}).is_valid(object())) + + def test_is_type_does_not_evade_bool_if_it_is_being_tested(self): + self.assertTrue(self.Validator({}).is_type(True, "boolean")) + self.assertTrue(self.Validator({"type": "any"}).is_valid(True)) + + +class TestDraft4Validator(AntiDraft6LeakMixin, ValidatorTestMixin, TestCase): + Validator = validators.Draft4Validator + valid: tuple[dict, dict] = ({}, {}) + invalid = {"type": "integer"}, "foo" + + +class TestDraft6Validator(ValidatorTestMixin, TestCase): + Validator = validators.Draft6Validator + valid: tuple[dict, dict] = ({}, {}) + invalid = {"type": "integer"}, "foo" + + +class TestDraft7Validator(ValidatorTestMixin, TestCase): + Validator = validators.Draft7Validator + valid: tuple[dict, dict] = ({}, {}) + invalid = {"type": "integer"}, "foo" + + +class TestDraft201909Validator(ValidatorTestMixin, TestCase): + Validator = validators.Draft201909Validator + valid: tuple[dict, dict] = ({}, {}) + invalid = {"type": "integer"}, "foo" + + +class TestDraft202012Validator(ValidatorTestMixin, TestCase): + Validator = validators.Draft202012Validator + valid: tuple[dict, dict] = ({}, {}) + invalid = {"type": "integer"}, "foo" + + +class TestLatestValidator(TestCase): + """ + These really apply to multiple versions but are easiest to test on one. + """ + + def test_ref_resolvers_may_have_boolean_schemas_stored(self): + ref = "someCoolRef" + schema = {"$ref": ref} + resolver = validators._RefResolver("", {}, store={ref: False}) + validator = validators._LATEST_VERSION(schema, resolver=resolver) + + with self.assertRaises(exceptions.ValidationError): + validator.validate(None) + + +class TestValidatorFor(TestCase): + def test_draft_3(self): + schema = {"$schema": "http://json-schema.org/draft-03/schema"} + self.assertIs( + validators.validator_for(schema), + validators.Draft3Validator, + ) + + schema = {"$schema": "http://json-schema.org/draft-03/schema#"} + self.assertIs( + validators.validator_for(schema), + validators.Draft3Validator, + ) + + def test_draft_4(self): + schema = {"$schema": "http://json-schema.org/draft-04/schema"} + self.assertIs( + validators.validator_for(schema), + validators.Draft4Validator, + ) + + schema = {"$schema": "http://json-schema.org/draft-04/schema#"} + self.assertIs( + validators.validator_for(schema), + validators.Draft4Validator, + ) + + def test_draft_6(self): + schema = {"$schema": "http://json-schema.org/draft-06/schema"} + self.assertIs( + validators.validator_for(schema), + validators.Draft6Validator, + ) + + schema = {"$schema": "http://json-schema.org/draft-06/schema#"} + self.assertIs( + validators.validator_for(schema), + validators.Draft6Validator, + ) + + def test_draft_7(self): + schema = {"$schema": "http://json-schema.org/draft-07/schema"} + self.assertIs( + validators.validator_for(schema), + validators.Draft7Validator, + ) + + schema = {"$schema": "http://json-schema.org/draft-07/schema#"} + self.assertIs( + validators.validator_for(schema), + validators.Draft7Validator, + ) + + def test_draft_201909(self): + schema = {"$schema": "https://json-schema.org/draft/2019-09/schema"} + self.assertIs( + validators.validator_for(schema), + validators.Draft201909Validator, + ) + + schema = {"$schema": "https://json-schema.org/draft/2019-09/schema#"} + self.assertIs( + validators.validator_for(schema), + validators.Draft201909Validator, + ) + + def test_draft_202012(self): + schema = {"$schema": "https://json-schema.org/draft/2020-12/schema"} + self.assertIs( + validators.validator_for(schema), + validators.Draft202012Validator, + ) + + schema = {"$schema": "https://json-schema.org/draft/2020-12/schema#"} + self.assertIs( + validators.validator_for(schema), + validators.Draft202012Validator, + ) + + def test_True(self): + self.assertIs( + validators.validator_for(True), + validators._LATEST_VERSION, + ) + + def test_False(self): + self.assertIs( + validators.validator_for(False), + validators._LATEST_VERSION, + ) + + def test_custom_validator(self): + Validator = validators.create( + meta_schema={"id": "meta schema id"}, + version="12", + id_of=lambda s: s.get("id", ""), + ) + schema = {"$schema": "meta schema id"} + self.assertIs( + validators.validator_for(schema), + Validator, + ) + + def test_custom_validator_draft6(self): + Validator = validators.create( + meta_schema={"$id": "meta schema $id"}, + version="13", + ) + schema = {"$schema": "meta schema $id"} + self.assertIs( + validators.validator_for(schema), + Validator, + ) + + def test_validator_for_jsonschema_default(self): + self.assertIs(validators.validator_for({}), validators._LATEST_VERSION) + + def test_validator_for_custom_default(self): + self.assertIs(validators.validator_for({}, default=None), None) + + def test_warns_if_meta_schema_specified_was_not_found(self): + with self.assertWarns(DeprecationWarning) as cm: + validators.validator_for(schema={"$schema": "unknownSchema"}) + + self.assertEqual(cm.filename, __file__) + self.assertEqual( + str(cm.warning), + "The metaschema specified by $schema was not found. " + "Using the latest draft to validate, but this will raise " + "an error in the future.", + ) + + def test_does_not_warn_if_meta_schema_is_unspecified(self): + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + validators.validator_for(schema={}, default={}) + self.assertFalse(w) + + def test_validator_for_custom_default_with_schema(self): + schema, default = {"$schema": "mailto:foo@example.com"}, object() + self.assertIs(validators.validator_for(schema, default), default) + + +class TestValidate(TestCase): + def assertUses(self, schema, Validator): + result = [] + with mock.patch.object(Validator, "check_schema", result.append): + validators.validate({}, schema) + self.assertEqual(result, [schema]) + + def test_draft3_validator_is_chosen(self): + self.assertUses( + schema={"$schema": "http://json-schema.org/draft-03/schema#"}, + Validator=validators.Draft3Validator, + ) + # Make sure it works without the empty fragment + self.assertUses( + schema={"$schema": "http://json-schema.org/draft-03/schema"}, + Validator=validators.Draft3Validator, + ) + + def test_draft4_validator_is_chosen(self): + self.assertUses( + schema={"$schema": "http://json-schema.org/draft-04/schema#"}, + Validator=validators.Draft4Validator, + ) + # Make sure it works without the empty fragment + self.assertUses( + schema={"$schema": "http://json-schema.org/draft-04/schema"}, + Validator=validators.Draft4Validator, + ) + + def test_draft6_validator_is_chosen(self): + self.assertUses( + schema={"$schema": "http://json-schema.org/draft-06/schema#"}, + Validator=validators.Draft6Validator, + ) + # Make sure it works without the empty fragment + self.assertUses( + schema={"$schema": "http://json-schema.org/draft-06/schema"}, + Validator=validators.Draft6Validator, + ) + + def test_draft7_validator_is_chosen(self): + self.assertUses( + schema={"$schema": "http://json-schema.org/draft-07/schema#"}, + Validator=validators.Draft7Validator, + ) + # Make sure it works without the empty fragment + self.assertUses( + schema={"$schema": "http://json-schema.org/draft-07/schema"}, + Validator=validators.Draft7Validator, + ) + + def test_draft202012_validator_is_chosen(self): + self.assertUses( + schema={ + "$schema": "https://json-schema.org/draft/2020-12/schema#", + }, + Validator=validators.Draft202012Validator, + ) + # Make sure it works without the empty fragment + self.assertUses( + schema={ + "$schema": "https://json-schema.org/draft/2020-12/schema", + }, + Validator=validators.Draft202012Validator, + ) + + def test_draft202012_validator_is_the_default(self): + self.assertUses(schema={}, Validator=validators.Draft202012Validator) + + def test_validation_error_message(self): + with self.assertRaises(exceptions.ValidationError) as e: + validators.validate(12, {"type": "string"}) + self.assertRegex( + str(e.exception), + "(?s)Failed validating '.*' in schema.*On instance", + ) + + def test_schema_error_message(self): + with self.assertRaises(exceptions.SchemaError) as e: + validators.validate(12, {"type": 12}) + self.assertRegex( + str(e.exception), + "(?s)Failed validating '.*' in metaschema.*On schema", + ) + + def test_it_uses_best_match(self): + schema = { + "oneOf": [ + {"type": "number", "minimum": 20}, + {"type": "array"}, + ], + } + with self.assertRaises(exceptions.ValidationError) as e: + validators.validate(12, schema) + self.assertIn("12 is less than the minimum of 20", str(e.exception)) + + +class TestThreading(TestCase): + """ + Threading-related functionality tests. + + jsonschema doesn't promise thread safety, and its validation behavior + across multiple threads may change at any time, but that means it isn't + safe to share *validators* across threads, not that anytime one has + multiple threads that jsonschema won't work (it certainly is intended to). + + These tests ensure that this minimal level of functionality continues to + work. + """ + + def test_validation_across_a_second_thread(self): + failed = [] + + def validate(): + try: + validators.validate(instance=37, schema=True) + except: # pragma: no cover # noqa: E722 + failed.append(sys.exc_info()) + + validate() # just verify it succeeds + + from threading import Thread + thread = Thread(target=validate) + thread.start() + thread.join() + self.assertEqual((thread.is_alive(), failed), (False, [])) + + +class TestReferencing(TestCase): + def test_registry_with_retrieve(self): + def retrieve(uri): + return DRAFT202012.create_resource({"type": "integer"}) + + registry = referencing.Registry(retrieve=retrieve) + schema = {"$ref": "https://example.com/"} + validator = validators.Draft202012Validator(schema, registry=registry) + + self.assertEqual( + (validator.is_valid(12), validator.is_valid("foo")), + (True, False), + ) + + def test_custom_registries_do_not_autoretrieve_remote_resources(self): + registry = referencing.Registry() + schema = {"$ref": "https://example.com/"} + validator = validators.Draft202012Validator(schema, registry=registry) + + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + with self.assertRaises(referencing.exceptions.Unresolvable): + validator.validate(12) + self.assertFalse(w) + + +class TestRefResolver(TestCase): + + base_uri = "" + stored_uri = "foo://stored" + stored_schema = {"stored": "schema"} + + def setUp(self): + self.referrer = {} + self.store = {self.stored_uri: self.stored_schema} + self.resolver = validators._RefResolver( + self.base_uri, self.referrer, self.store, + ) + + def test_it_does_not_retrieve_schema_urls_from_the_network(self): + ref = validators.Draft3Validator.META_SCHEMA["id"] + with mock.patch.object(self.resolver, "resolve_remote") as patched: # noqa: SIM117 + with self.resolver.resolving(ref) as resolved: + pass + self.assertEqual(resolved, validators.Draft3Validator.META_SCHEMA) + self.assertFalse(patched.called) + + def test_it_resolves_local_refs(self): + ref = "#/properties/foo" + self.referrer["properties"] = {"foo": object()} + with self.resolver.resolving(ref) as resolved: + self.assertEqual(resolved, self.referrer["properties"]["foo"]) + + def test_it_resolves_local_refs_with_id(self): + schema = {"id": "http://bar/schema#", "a": {"foo": "bar"}} + resolver = validators._RefResolver.from_schema( + schema, + id_of=lambda schema: schema.get("id", ""), + ) + with resolver.resolving("#/a") as resolved: + self.assertEqual(resolved, schema["a"]) + with resolver.resolving("http://bar/schema#/a") as resolved: + self.assertEqual(resolved, schema["a"]) + + def test_it_retrieves_stored_refs(self): + with self.resolver.resolving(self.stored_uri) as resolved: + self.assertIs(resolved, self.stored_schema) + + self.resolver.store["cached_ref"] = {"foo": 12} + with self.resolver.resolving("cached_ref#/foo") as resolved: + self.assertEqual(resolved, 12) + + def test_it_retrieves_unstored_refs_via_requests(self): + ref = "http://bar#baz" + schema = {"baz": 12} + + if "requests" in sys.modules: # pragma: no cover + self.addCleanup( + sys.modules.__setitem__, "requests", sys.modules["requests"], + ) + sys.modules["requests"] = ReallyFakeRequests({"http://bar": schema}) + + with self.resolver.resolving(ref) as resolved: + self.assertEqual(resolved, 12) + + def test_it_retrieves_unstored_refs_via_urlopen(self): + ref = "http://bar#baz" + schema = {"baz": 12} + + if "requests" in sys.modules: # pragma: no cover + self.addCleanup( + sys.modules.__setitem__, "requests", sys.modules["requests"], + ) + sys.modules["requests"] = None + + @contextmanager + def fake_urlopen(url): + self.assertEqual(url, "http://bar") + yield BytesIO(json.dumps(schema).encode("utf8")) + + self.addCleanup(setattr, validators, "urlopen", validators.urlopen) + validators.urlopen = fake_urlopen + + with self.resolver.resolving(ref) as resolved: + pass + self.assertEqual(resolved, 12) + + def test_it_retrieves_local_refs_via_urlopen(self): + with tempfile.NamedTemporaryFile(delete=False, mode="wt") as tempf: + self.addCleanup(os.remove, tempf.name) + json.dump({"foo": "bar"}, tempf) + + ref = f"file://{pathname2url(tempf.name)}#foo" + with self.resolver.resolving(ref) as resolved: + self.assertEqual(resolved, "bar") + + def test_it_can_construct_a_base_uri_from_a_schema(self): + schema = {"id": "foo"} + resolver = validators._RefResolver.from_schema( + schema, + id_of=lambda schema: schema.get("id", ""), + ) + self.assertEqual(resolver.base_uri, "foo") + self.assertEqual(resolver.resolution_scope, "foo") + with resolver.resolving("") as resolved: + self.assertEqual(resolved, schema) + with resolver.resolving("#") as resolved: + self.assertEqual(resolved, schema) + with resolver.resolving("foo") as resolved: + self.assertEqual(resolved, schema) + with resolver.resolving("foo#") as resolved: + self.assertEqual(resolved, schema) + + def test_it_can_construct_a_base_uri_from_a_schema_without_id(self): + schema = {} + resolver = validators._RefResolver.from_schema(schema) + self.assertEqual(resolver.base_uri, "") + self.assertEqual(resolver.resolution_scope, "") + with resolver.resolving("") as resolved: + self.assertEqual(resolved, schema) + with resolver.resolving("#") as resolved: + self.assertEqual(resolved, schema) + + def test_custom_uri_scheme_handlers(self): + def handler(url): + self.assertEqual(url, ref) + return schema + + schema = {"foo": "bar"} + ref = "foo://bar" + resolver = validators._RefResolver("", {}, handlers={"foo": handler}) + with resolver.resolving(ref) as resolved: + self.assertEqual(resolved, schema) + + def test_cache_remote_on(self): + response = [object()] + + def handler(url): + try: + return response.pop() + except IndexError: # pragma: no cover + self.fail("Response must not have been cached!") + + ref = "foo://bar" + resolver = validators._RefResolver( + "", {}, cache_remote=True, handlers={"foo": handler}, + ) + with resolver.resolving(ref): + pass + with resolver.resolving(ref): + pass + + def test_cache_remote_off(self): + response = [object()] + + def handler(url): + try: + return response.pop() + except IndexError: # pragma: no cover + self.fail("Handler called twice!") + + ref = "foo://bar" + resolver = validators._RefResolver( + "", {}, cache_remote=False, handlers={"foo": handler}, + ) + with resolver.resolving(ref): + pass + + def test_if_you_give_it_junk_you_get_a_resolution_error(self): + error = ValueError("Oh no! What's this?") + + def handler(url): + raise error + + ref = "foo://bar" + resolver = validators._RefResolver("", {}, handlers={"foo": handler}) + with self.assertRaises(exceptions._RefResolutionError) as err: # noqa: SIM117 + with resolver.resolving(ref): + self.fail("Shouldn't get this far!") # pragma: no cover + self.assertEqual(err.exception, exceptions._RefResolutionError(error)) + + def test_helpful_error_message_on_failed_pop_scope(self): + resolver = validators._RefResolver("", {}) + resolver.pop_scope() + with self.assertRaises(exceptions._RefResolutionError) as exc: + resolver.pop_scope() + self.assertIn("Failed to pop the scope", str(exc.exception)) + + def test_pointer_within_schema_with_different_id(self): + """ + See #1085. + """ + schema = validators.Draft7Validator.META_SCHEMA + one = validators._RefResolver("", schema) + validator = validators.Draft7Validator(schema, resolver=one) + self.assertFalse(validator.is_valid({"maxLength": "foo"})) + + another = { + "allOf": [{"$ref": validators.Draft7Validator.META_SCHEMA["$id"]}], + } + two = validators._RefResolver("", another) + validator = validators.Draft7Validator(another, resolver=two) + self.assertFalse(validator.is_valid({"maxLength": "foo"})) + + def test_newly_created_validator_with_ref_resolver(self): + """ + See https://github.com/python-jsonschema/jsonschema/issues/1061#issuecomment-1624266555. + """ + + def handle(uri): + self.assertEqual(uri, "http://example.com/foo") + return {"type": "integer"} + + resolver = validators._RefResolver("", {}, handlers={"http": handle}) + Validator = validators.create( + meta_schema={}, + validators=validators.Draft4Validator.VALIDATORS, + ) + schema = {"$id": "http://example.com/bar", "$ref": "foo"} + validator = Validator(schema, resolver=resolver) + self.assertEqual( + (validator.is_valid({}), validator.is_valid(37)), + (False, True), + ) + + def test_refresolver_with_pointer_in_schema_with_no_id(self): + """ + See https://github.com/python-jsonschema/jsonschema/issues/1124#issuecomment-1632574249. + """ + + schema = { + "properties": {"x": {"$ref": "#/definitions/x"}}, + "definitions": {"x": {"type": "integer"}}, + } + + validator = validators.Draft202012Validator( + schema, + resolver=validators._RefResolver("", schema), + ) + self.assertEqual( + (validator.is_valid({"x": "y"}), validator.is_valid({"x": 37})), + (False, True), + ) + + + +def sorted_errors(errors): + def key(error): + return ( + [str(e) for e in error.path], + [str(e) for e in error.schema_path], + ) + return sorted(errors, key=key) + + +@define +class ReallyFakeRequests: + + _responses: dict[str, Any] + + def get(self, url): + response = self._responses.get(url) + if url is None: # pragma: no cover + raise ValueError("Unknown URL: " + repr(url)) + return _ReallyFakeJSONResponse(json.dumps(response)) + + +@define +class _ReallyFakeJSONResponse: + + _response: str + + def json(self): + return json.loads(self._response) diff --git a/.venv/lib/python3.12/site-packages/jsonschema/tests/typing/__init__.py b/.venv/lib/python3.12/site-packages/jsonschema/tests/typing/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/.venv/lib/python3.12/site-packages/jsonschema/tests/typing/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/jsonschema/tests/typing/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..9dec906 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/jsonschema/tests/typing/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/jsonschema/tests/typing/__pycache__/test_all_concrete_validators_match_protocol.cpython-312.pyc b/.venv/lib/python3.12/site-packages/jsonschema/tests/typing/__pycache__/test_all_concrete_validators_match_protocol.cpython-312.pyc new file mode 100644 index 0000000..53c0fd3 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/jsonschema/tests/typing/__pycache__/test_all_concrete_validators_match_protocol.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/jsonschema/tests/typing/test_all_concrete_validators_match_protocol.py b/.venv/lib/python3.12/site-packages/jsonschema/tests/typing/test_all_concrete_validators_match_protocol.py new file mode 100644 index 0000000..63e8bd4 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/jsonschema/tests/typing/test_all_concrete_validators_match_protocol.py @@ -0,0 +1,38 @@ +""" +This module acts as a test that type checkers will allow each validator +class to be assigned to a variable of type `type[Validator]` + +The assignation is only valid if type checkers recognize each Validator +implementation as a valid implementer of the protocol. +""" +from jsonschema.protocols import Validator +from jsonschema.validators import ( + Draft3Validator, + Draft4Validator, + Draft6Validator, + Draft7Validator, + Draft201909Validator, + Draft202012Validator, +) + +my_validator: type[Validator] + +my_validator = Draft3Validator +my_validator = Draft4Validator +my_validator = Draft6Validator +my_validator = Draft7Validator +my_validator = Draft201909Validator +my_validator = Draft202012Validator + + +# in order to confirm that none of the above were incorrectly typed as 'Any' +# ensure that each of these assignments to a non-validator variable requires an +# ignore +none_var: None + +none_var = Draft3Validator # type: ignore[assignment] +none_var = Draft4Validator # type: ignore[assignment] +none_var = Draft6Validator # type: ignore[assignment] +none_var = Draft7Validator # type: ignore[assignment] +none_var = Draft201909Validator # type: ignore[assignment] +none_var = Draft202012Validator # type: ignore[assignment] diff --git a/.venv/lib/python3.12/site-packages/jsonschema/validators.py b/.venv/lib/python3.12/site-packages/jsonschema/validators.py new file mode 100644 index 0000000..dbc029f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/jsonschema/validators.py @@ -0,0 +1,1410 @@ +""" +Creation and extension of validators, with implementations for existing drafts. +""" +from __future__ import annotations + +from collections import deque +from collections.abc import Iterable, Mapping, Sequence +from functools import lru_cache +from operator import methodcaller +from typing import TYPE_CHECKING +from urllib.parse import unquote, urldefrag, urljoin, urlsplit +from urllib.request import urlopen +from warnings import warn +import contextlib +import json +import reprlib +import warnings + +from attrs import define, field, fields +from jsonschema_specifications import REGISTRY as SPECIFICATIONS +from rpds import HashTrieMap +import referencing.exceptions +import referencing.jsonschema + +from jsonschema import ( + _format, + _keywords, + _legacy_keywords, + _types, + _typing, + _utils, + exceptions, +) + +if TYPE_CHECKING: + from jsonschema.protocols import Validator + +_UNSET = _utils.Unset() + +_VALIDATORS: dict[str, Validator] = {} +_META_SCHEMAS = _utils.URIDict() + + +def __getattr__(name): + if name == "ErrorTree": + warnings.warn( + "Importing ErrorTree from jsonschema.validators is deprecated. " + "Instead import it from jsonschema.exceptions.", + DeprecationWarning, + stacklevel=2, + ) + from jsonschema.exceptions import ErrorTree + return ErrorTree + elif name == "validators": + warnings.warn( + "Accessing jsonschema.validators.validators is deprecated. " + "Use jsonschema.validators.validator_for with a given schema.", + DeprecationWarning, + stacklevel=2, + ) + return _VALIDATORS + elif name == "meta_schemas": + warnings.warn( + "Accessing jsonschema.validators.meta_schemas is deprecated. " + "Use jsonschema.validators.validator_for with a given schema.", + DeprecationWarning, + stacklevel=2, + ) + return _META_SCHEMAS + elif name == "RefResolver": + warnings.warn( + _RefResolver._DEPRECATION_MESSAGE, + DeprecationWarning, + stacklevel=2, + ) + return _RefResolver + raise AttributeError(f"module {__name__} has no attribute {name}") + + +def validates(version): + """ + Register the decorated validator for a ``version`` of the specification. + + Registered validators and their meta schemas will be considered when + parsing :kw:`$schema` keywords' URIs. + + Arguments: + + version (str): + + An identifier to use as the version's name + + Returns: + + collections.abc.Callable: + + a class decorator to decorate the validator with the version + + """ + + def _validates(cls): + _VALIDATORS[version] = cls + meta_schema_id = cls.ID_OF(cls.META_SCHEMA) + _META_SCHEMAS[meta_schema_id] = cls + return cls + return _validates + + +def _warn_for_remote_retrieve(uri: str): + from urllib.request import Request, urlopen + headers = {"User-Agent": "python-jsonschema (deprecated $ref resolution)"} + request = Request(uri, headers=headers) # noqa: S310 + with urlopen(request) as response: # noqa: S310 + warnings.warn( + "Automatically retrieving remote references can be a security " + "vulnerability and is discouraged by the JSON Schema " + "specifications. Relying on this behavior is deprecated " + "and will shortly become an error. If you are sure you want to " + "remotely retrieve your reference and that it is safe to do so, " + "you can find instructions for doing so via referencing.Registry " + "in the referencing documentation " + "(https://referencing.readthedocs.org).", + DeprecationWarning, + stacklevel=9, # Ha ha ha ha magic numbers :/ + ) + return referencing.Resource.from_contents( + json.load(response), + default_specification=referencing.jsonschema.DRAFT202012, + ) + + +_REMOTE_WARNING_REGISTRY = SPECIFICATIONS.combine( + referencing.Registry(retrieve=_warn_for_remote_retrieve), # type: ignore[call-arg] +) + + +def create( + meta_schema: referencing.jsonschema.ObjectSchema, + validators: ( + Mapping[str, _typing.SchemaKeywordValidator] + | Iterable[tuple[str, _typing.SchemaKeywordValidator]] + ) = (), + version: str | None = None, + type_checker: _types.TypeChecker = _types.draft202012_type_checker, + format_checker: _format.FormatChecker = _format.draft202012_format_checker, + id_of: _typing.id_of = referencing.jsonschema.DRAFT202012.id_of, + applicable_validators: _typing.ApplicableValidators = methodcaller( + "items", + ), +) -> type[Validator]: + """ + Create a new validator class. + + Arguments: + + meta_schema: + + the meta schema for the new validator class + + validators: + + a mapping from names to callables, where each callable will + validate the schema property with the given name. + + Each callable should take 4 arguments: + + 1. a validator instance, + 2. the value of the property being validated within the + instance + 3. the instance + 4. the schema + + version: + + an identifier for the version that this validator class will + validate. If provided, the returned validator class will + have its ``__name__`` set to include the version, and also + will have `jsonschema.validators.validates` automatically + called for the given version. + + type_checker: + + a type checker, used when applying the :kw:`type` keyword. + + If unprovided, a `jsonschema.TypeChecker` will be created + with a set of default types typical of JSON Schema drafts. + + format_checker: + + a format checker, used when applying the :kw:`format` keyword. + + If unprovided, a `jsonschema.FormatChecker` will be created + with a set of default formats typical of JSON Schema drafts. + + id_of: + + A function that given a schema, returns its ID. + + applicable_validators: + + A function that, given a schema, returns the list of + applicable schema keywords and associated values + which will be used to validate the instance. + This is mostly used to support pre-draft 7 versions of JSON Schema + which specified behavior around ignoring keywords if they were + siblings of a ``$ref`` keyword. If you're not attempting to + implement similar behavior, you can typically ignore this argument + and leave it at its default. + + Returns: + + a new `jsonschema.protocols.Validator` class + + """ + # preemptively don't shadow the `Validator.format_checker` local + format_checker_arg = format_checker + + specification = referencing.jsonschema.specification_with( + dialect_id=id_of(meta_schema) or "urn:unknown-dialect", + default=referencing.Specification.OPAQUE, + ) + + @define + class Validator: + + VALIDATORS = dict(validators) # noqa: RUF012 + META_SCHEMA = dict(meta_schema) # noqa: RUF012 + TYPE_CHECKER = type_checker + FORMAT_CHECKER = format_checker_arg + ID_OF = staticmethod(id_of) + + _APPLICABLE_VALIDATORS = applicable_validators + _validators = field(init=False, repr=False, eq=False) + + schema: referencing.jsonschema.Schema = field(repr=reprlib.repr) + _ref_resolver = field(default=None, repr=False, alias="resolver") + format_checker: _format.FormatChecker | None = field(default=None) + # TODO: include new meta-schemas added at runtime + _registry: referencing.jsonschema.SchemaRegistry = field( + default=_REMOTE_WARNING_REGISTRY, + kw_only=True, + repr=False, + ) + _resolver = field( + alias="_resolver", + default=None, + kw_only=True, + repr=False, + ) + + def __init_subclass__(cls): + warnings.warn( + ( + "Subclassing validator classes is not intended to " + "be part of their public API. A future version " + "will make doing so an error, as the behavior of " + "subclasses isn't guaranteed to stay the same " + "between releases of jsonschema. Instead, prefer " + "composition of validators, wrapping them in an object " + "owned entirely by the downstream library." + ), + DeprecationWarning, + stacklevel=2, + ) + + def evolve(self, **changes): + cls = self.__class__ + schema = changes.setdefault("schema", self.schema) + NewValidator = validator_for(schema, default=cls) + + for field in fields(cls): # noqa: F402 + if not field.init: + continue + attr_name = field.name + init_name = field.alias + if init_name not in changes: + changes[init_name] = getattr(self, attr_name) + + return NewValidator(**changes) + + cls.evolve = evolve + + def __attrs_post_init__(self): + if self._resolver is None: + registry = self._registry + if registry is not _REMOTE_WARNING_REGISTRY: + registry = SPECIFICATIONS.combine(registry) + resource = specification.create_resource(self.schema) + self._resolver = registry.resolver_with_root(resource) + + if self.schema is True or self.schema is False: + self._validators = [] + else: + self._validators = [ + (self.VALIDATORS[k], k, v) + for k, v in applicable_validators(self.schema) + if k in self.VALIDATORS + ] + + # REMOVEME: Legacy ref resolution state management. + push_scope = getattr(self._ref_resolver, "push_scope", None) + if push_scope is not None: + id = id_of(self.schema) + if id is not None: + push_scope(id) + + @classmethod + def check_schema(cls, schema, format_checker=_UNSET): + Validator = validator_for(cls.META_SCHEMA, default=cls) + if format_checker is _UNSET: + format_checker = Validator.FORMAT_CHECKER + validator = Validator( + schema=cls.META_SCHEMA, + format_checker=format_checker, + ) + for error in validator.iter_errors(schema): + raise exceptions.SchemaError.create_from(error) + + @property + def resolver(self): + warnings.warn( + ( + f"Accessing {self.__class__.__name__}.resolver is " + "deprecated as of v4.18.0, in favor of the " + "https://github.com/python-jsonschema/referencing " + "library, which provides more compliant referencing " + "behavior as well as more flexible APIs for " + "customization." + ), + DeprecationWarning, + stacklevel=2, + ) + if self._ref_resolver is None: + self._ref_resolver = _RefResolver.from_schema( + self.schema, + id_of=id_of, + ) + return self._ref_resolver + + def evolve(self, **changes): + schema = changes.setdefault("schema", self.schema) + NewValidator = validator_for(schema, default=self.__class__) + + for (attr_name, init_name) in evolve_fields: + if init_name not in changes: + changes[init_name] = getattr(self, attr_name) + + return NewValidator(**changes) + + def iter_errors(self, instance, _schema=None): + if _schema is not None: + warnings.warn( + ( + "Passing a schema to Validator.iter_errors " + "is deprecated and will be removed in a future " + "release. Call validator.evolve(schema=new_schema)." + "iter_errors(...) instead." + ), + DeprecationWarning, + stacklevel=2, + ) + validators = [ + (self.VALIDATORS[k], k, v) + for k, v in applicable_validators(_schema) + if k in self.VALIDATORS + ] + else: + _schema, validators = self.schema, self._validators + + if _schema is True: + return + elif _schema is False: + yield exceptions.ValidationError( + f"False schema does not allow {instance!r}", + validator=None, + validator_value=None, + instance=instance, + schema=_schema, + ) + return + + for validator, k, v in validators: + errors = validator(self, v, instance, _schema) or () + for error in errors: + # set details if not already set by the called fn + error._set( + validator=k, + validator_value=v, + instance=instance, + schema=_schema, + type_checker=self.TYPE_CHECKER, + ) + if k not in {"if", "$ref"}: + error.schema_path.appendleft(k) + yield error + + def descend( + self, + instance, + schema, + path=None, + schema_path=None, + resolver=None, + ): + if schema is True: + return + elif schema is False: + yield exceptions.ValidationError( + f"False schema does not allow {instance!r}", + validator=None, + validator_value=None, + instance=instance, + schema=schema, + ) + return + + if self._ref_resolver is not None: + evolved = self.evolve(schema=schema) + else: + if resolver is None: + resolver = self._resolver.in_subresource( + specification.create_resource(schema), + ) + evolved = self.evolve(schema=schema, _resolver=resolver) + + for k, v in applicable_validators(schema): + validator = evolved.VALIDATORS.get(k) + if validator is None: + continue + + errors = validator(evolved, v, instance, schema) or () + for error in errors: + # set details if not already set by the called fn + error._set( + validator=k, + validator_value=v, + instance=instance, + schema=schema, + type_checker=evolved.TYPE_CHECKER, + ) + if k not in {"if", "$ref"}: + error.schema_path.appendleft(k) + if path is not None: + error.path.appendleft(path) + if schema_path is not None: + error.schema_path.appendleft(schema_path) + yield error + + def validate(self, *args, **kwargs): + for error in self.iter_errors(*args, **kwargs): + raise error + + def is_type(self, instance, type): + try: + return self.TYPE_CHECKER.is_type(instance, type) + except exceptions.UndefinedTypeCheck: + exc = exceptions.UnknownType(type, instance, self.schema) + raise exc from None + + def _validate_reference(self, ref, instance): + if self._ref_resolver is None: + try: + resolved = self._resolver.lookup(ref) + except referencing.exceptions.Unresolvable as err: + raise exceptions._WrappedReferencingError(err) from err + + return self.descend( + instance, + resolved.contents, + resolver=resolved.resolver, + ) + else: + resolve = getattr(self._ref_resolver, "resolve", None) + if resolve is None: + with self._ref_resolver.resolving(ref) as resolved: + return self.descend(instance, resolved) + else: + scope, resolved = resolve(ref) + self._ref_resolver.push_scope(scope) + + try: + return list(self.descend(instance, resolved)) + finally: + self._ref_resolver.pop_scope() + + def is_valid(self, instance, _schema=None): + if _schema is not None: + warnings.warn( + ( + "Passing a schema to Validator.is_valid is deprecated " + "and will be removed in a future release. Call " + "validator.evolve(schema=new_schema).is_valid(...) " + "instead." + ), + DeprecationWarning, + stacklevel=2, + ) + self = self.evolve(schema=_schema) + + error = next(self.iter_errors(instance), None) + return error is None + + evolve_fields = [ + (field.name, field.alias) + for field in fields(Validator) + if field.init + ] + + if version is not None: + safe = version.title().replace(" ", "").replace("-", "") + Validator.__name__ = Validator.__qualname__ = f"{safe}Validator" + Validator = validates(version)(Validator) # type: ignore[misc] + + return Validator # type: ignore[return-value] + + +def extend( + validator, + validators=(), + version=None, + type_checker=None, + format_checker=None, +): + """ + Create a new validator class by extending an existing one. + + Arguments: + + validator (jsonschema.protocols.Validator): + + an existing validator class + + validators (collections.abc.Mapping): + + a mapping of new validator callables to extend with, whose + structure is as in `create`. + + .. note:: + + Any validator callables with the same name as an + existing one will (silently) replace the old validator + callable entirely, effectively overriding any validation + done in the "parent" validator class. + + If you wish to instead extend the behavior of a parent's + validator callable, delegate and call it directly in + the new validator function by retrieving it using + ``OldValidator.VALIDATORS["validation_keyword_name"]``. + + version (str): + + a version for the new validator class + + type_checker (jsonschema.TypeChecker): + + a type checker, used when applying the :kw:`type` keyword. + + If unprovided, the type checker of the extended + `jsonschema.protocols.Validator` will be carried along. + + format_checker (jsonschema.FormatChecker): + + a format checker, used when applying the :kw:`format` keyword. + + If unprovided, the format checker of the extended + `jsonschema.protocols.Validator` will be carried along. + + Returns: + + a new `jsonschema.protocols.Validator` class extending the one + provided + + .. note:: Meta Schemas + + The new validator class will have its parent's meta schema. + + If you wish to change or extend the meta schema in the new + validator class, modify ``META_SCHEMA`` directly on the returned + class. Note that no implicit copying is done, so a copy should + likely be made before modifying it, in order to not affect the + old validator. + + """ + all_validators = dict(validator.VALIDATORS) + all_validators.update(validators) + + if type_checker is None: + type_checker = validator.TYPE_CHECKER + if format_checker is None: + format_checker = validator.FORMAT_CHECKER + return create( + meta_schema=validator.META_SCHEMA, + validators=all_validators, + version=version, + type_checker=type_checker, + format_checker=format_checker, + id_of=validator.ID_OF, + applicable_validators=validator._APPLICABLE_VALIDATORS, + ) + + +Draft3Validator = create( + meta_schema=SPECIFICATIONS.contents( + "http://json-schema.org/draft-03/schema#", + ), + validators={ + "$ref": _keywords.ref, + "additionalItems": _legacy_keywords.additionalItems, + "additionalProperties": _keywords.additionalProperties, + "dependencies": _legacy_keywords.dependencies_draft3, + "disallow": _legacy_keywords.disallow_draft3, + "divisibleBy": _keywords.multipleOf, + "enum": _keywords.enum, + "extends": _legacy_keywords.extends_draft3, + "format": _keywords.format, + "items": _legacy_keywords.items_draft3_draft4, + "maxItems": _keywords.maxItems, + "maxLength": _keywords.maxLength, + "maximum": _legacy_keywords.maximum_draft3_draft4, + "minItems": _keywords.minItems, + "minLength": _keywords.minLength, + "minimum": _legacy_keywords.minimum_draft3_draft4, + "pattern": _keywords.pattern, + "patternProperties": _keywords.patternProperties, + "properties": _legacy_keywords.properties_draft3, + "type": _legacy_keywords.type_draft3, + "uniqueItems": _keywords.uniqueItems, + }, + type_checker=_types.draft3_type_checker, + format_checker=_format.draft3_format_checker, + version="draft3", + id_of=referencing.jsonschema.DRAFT3.id_of, + applicable_validators=_legacy_keywords.ignore_ref_siblings, +) + +Draft4Validator = create( + meta_schema=SPECIFICATIONS.contents( + "http://json-schema.org/draft-04/schema#", + ), + validators={ + "$ref": _keywords.ref, + "additionalItems": _legacy_keywords.additionalItems, + "additionalProperties": _keywords.additionalProperties, + "allOf": _keywords.allOf, + "anyOf": _keywords.anyOf, + "dependencies": _legacy_keywords.dependencies_draft4_draft6_draft7, + "enum": _keywords.enum, + "format": _keywords.format, + "items": _legacy_keywords.items_draft3_draft4, + "maxItems": _keywords.maxItems, + "maxLength": _keywords.maxLength, + "maxProperties": _keywords.maxProperties, + "maximum": _legacy_keywords.maximum_draft3_draft4, + "minItems": _keywords.minItems, + "minLength": _keywords.minLength, + "minProperties": _keywords.minProperties, + "minimum": _legacy_keywords.minimum_draft3_draft4, + "multipleOf": _keywords.multipleOf, + "not": _keywords.not_, + "oneOf": _keywords.oneOf, + "pattern": _keywords.pattern, + "patternProperties": _keywords.patternProperties, + "properties": _keywords.properties, + "required": _keywords.required, + "type": _keywords.type, + "uniqueItems": _keywords.uniqueItems, + }, + type_checker=_types.draft4_type_checker, + format_checker=_format.draft4_format_checker, + version="draft4", + id_of=referencing.jsonschema.DRAFT4.id_of, + applicable_validators=_legacy_keywords.ignore_ref_siblings, +) + +Draft6Validator = create( + meta_schema=SPECIFICATIONS.contents( + "http://json-schema.org/draft-06/schema#", + ), + validators={ + "$ref": _keywords.ref, + "additionalItems": _legacy_keywords.additionalItems, + "additionalProperties": _keywords.additionalProperties, + "allOf": _keywords.allOf, + "anyOf": _keywords.anyOf, + "const": _keywords.const, + "contains": _legacy_keywords.contains_draft6_draft7, + "dependencies": _legacy_keywords.dependencies_draft4_draft6_draft7, + "enum": _keywords.enum, + "exclusiveMaximum": _keywords.exclusiveMaximum, + "exclusiveMinimum": _keywords.exclusiveMinimum, + "format": _keywords.format, + "items": _legacy_keywords.items_draft6_draft7_draft201909, + "maxItems": _keywords.maxItems, + "maxLength": _keywords.maxLength, + "maxProperties": _keywords.maxProperties, + "maximum": _keywords.maximum, + "minItems": _keywords.minItems, + "minLength": _keywords.minLength, + "minProperties": _keywords.minProperties, + "minimum": _keywords.minimum, + "multipleOf": _keywords.multipleOf, + "not": _keywords.not_, + "oneOf": _keywords.oneOf, + "pattern": _keywords.pattern, + "patternProperties": _keywords.patternProperties, + "properties": _keywords.properties, + "propertyNames": _keywords.propertyNames, + "required": _keywords.required, + "type": _keywords.type, + "uniqueItems": _keywords.uniqueItems, + }, + type_checker=_types.draft6_type_checker, + format_checker=_format.draft6_format_checker, + version="draft6", + id_of=referencing.jsonschema.DRAFT6.id_of, + applicable_validators=_legacy_keywords.ignore_ref_siblings, +) + +Draft7Validator = create( + meta_schema=SPECIFICATIONS.contents( + "http://json-schema.org/draft-07/schema#", + ), + validators={ + "$ref": _keywords.ref, + "additionalItems": _legacy_keywords.additionalItems, + "additionalProperties": _keywords.additionalProperties, + "allOf": _keywords.allOf, + "anyOf": _keywords.anyOf, + "const": _keywords.const, + "contains": _legacy_keywords.contains_draft6_draft7, + "dependencies": _legacy_keywords.dependencies_draft4_draft6_draft7, + "enum": _keywords.enum, + "exclusiveMaximum": _keywords.exclusiveMaximum, + "exclusiveMinimum": _keywords.exclusiveMinimum, + "format": _keywords.format, + "if": _keywords.if_, + "items": _legacy_keywords.items_draft6_draft7_draft201909, + "maxItems": _keywords.maxItems, + "maxLength": _keywords.maxLength, + "maxProperties": _keywords.maxProperties, + "maximum": _keywords.maximum, + "minItems": _keywords.minItems, + "minLength": _keywords.minLength, + "minProperties": _keywords.minProperties, + "minimum": _keywords.minimum, + "multipleOf": _keywords.multipleOf, + "not": _keywords.not_, + "oneOf": _keywords.oneOf, + "pattern": _keywords.pattern, + "patternProperties": _keywords.patternProperties, + "properties": _keywords.properties, + "propertyNames": _keywords.propertyNames, + "required": _keywords.required, + "type": _keywords.type, + "uniqueItems": _keywords.uniqueItems, + }, + type_checker=_types.draft7_type_checker, + format_checker=_format.draft7_format_checker, + version="draft7", + id_of=referencing.jsonschema.DRAFT7.id_of, + applicable_validators=_legacy_keywords.ignore_ref_siblings, +) + +Draft201909Validator = create( + meta_schema=SPECIFICATIONS.contents( + "https://json-schema.org/draft/2019-09/schema", + ), + validators={ + "$recursiveRef": _legacy_keywords.recursiveRef, + "$ref": _keywords.ref, + "additionalItems": _legacy_keywords.additionalItems, + "additionalProperties": _keywords.additionalProperties, + "allOf": _keywords.allOf, + "anyOf": _keywords.anyOf, + "const": _keywords.const, + "contains": _keywords.contains, + "dependentRequired": _keywords.dependentRequired, + "dependentSchemas": _keywords.dependentSchemas, + "enum": _keywords.enum, + "exclusiveMaximum": _keywords.exclusiveMaximum, + "exclusiveMinimum": _keywords.exclusiveMinimum, + "format": _keywords.format, + "if": _keywords.if_, + "items": _legacy_keywords.items_draft6_draft7_draft201909, + "maxItems": _keywords.maxItems, + "maxLength": _keywords.maxLength, + "maxProperties": _keywords.maxProperties, + "maximum": _keywords.maximum, + "minItems": _keywords.minItems, + "minLength": _keywords.minLength, + "minProperties": _keywords.minProperties, + "minimum": _keywords.minimum, + "multipleOf": _keywords.multipleOf, + "not": _keywords.not_, + "oneOf": _keywords.oneOf, + "pattern": _keywords.pattern, + "patternProperties": _keywords.patternProperties, + "properties": _keywords.properties, + "propertyNames": _keywords.propertyNames, + "required": _keywords.required, + "type": _keywords.type, + "unevaluatedItems": _legacy_keywords.unevaluatedItems_draft2019, + "unevaluatedProperties": ( + _legacy_keywords.unevaluatedProperties_draft2019 + ), + "uniqueItems": _keywords.uniqueItems, + }, + type_checker=_types.draft201909_type_checker, + format_checker=_format.draft201909_format_checker, + version="draft2019-09", +) + +Draft202012Validator = create( + meta_schema=SPECIFICATIONS.contents( + "https://json-schema.org/draft/2020-12/schema", + ), + validators={ + "$dynamicRef": _keywords.dynamicRef, + "$ref": _keywords.ref, + "additionalProperties": _keywords.additionalProperties, + "allOf": _keywords.allOf, + "anyOf": _keywords.anyOf, + "const": _keywords.const, + "contains": _keywords.contains, + "dependentRequired": _keywords.dependentRequired, + "dependentSchemas": _keywords.dependentSchemas, + "enum": _keywords.enum, + "exclusiveMaximum": _keywords.exclusiveMaximum, + "exclusiveMinimum": _keywords.exclusiveMinimum, + "format": _keywords.format, + "if": _keywords.if_, + "items": _keywords.items, + "maxItems": _keywords.maxItems, + "maxLength": _keywords.maxLength, + "maxProperties": _keywords.maxProperties, + "maximum": _keywords.maximum, + "minItems": _keywords.minItems, + "minLength": _keywords.minLength, + "minProperties": _keywords.minProperties, + "minimum": _keywords.minimum, + "multipleOf": _keywords.multipleOf, + "not": _keywords.not_, + "oneOf": _keywords.oneOf, + "pattern": _keywords.pattern, + "patternProperties": _keywords.patternProperties, + "prefixItems": _keywords.prefixItems, + "properties": _keywords.properties, + "propertyNames": _keywords.propertyNames, + "required": _keywords.required, + "type": _keywords.type, + "unevaluatedItems": _keywords.unevaluatedItems, + "unevaluatedProperties": _keywords.unevaluatedProperties, + "uniqueItems": _keywords.uniqueItems, + }, + type_checker=_types.draft202012_type_checker, + format_checker=_format.draft202012_format_checker, + version="draft2020-12", +) + +_LATEST_VERSION: type[Validator] = Draft202012Validator + + +class _RefResolver: + """ + Resolve JSON References. + + Arguments: + + base_uri (str): + + The URI of the referring document + + referrer: + + The actual referring document + + store (dict): + + A mapping from URIs to documents to cache + + cache_remote (bool): + + Whether remote refs should be cached after first resolution + + handlers (dict): + + A mapping from URI schemes to functions that should be used + to retrieve them + + urljoin_cache (:func:`functools.lru_cache`): + + A cache that will be used for caching the results of joining + the resolution scope to subscopes. + + remote_cache (:func:`functools.lru_cache`): + + A cache that will be used for caching the results of + resolved remote URLs. + + Attributes: + + cache_remote (bool): + + Whether remote refs should be cached after first resolution + + .. deprecated:: v4.18.0 + + ``RefResolver`` has been deprecated in favor of `referencing`. + + """ + + _DEPRECATION_MESSAGE = ( + "jsonschema.RefResolver is deprecated as of v4.18.0, in favor of the " + "https://github.com/python-jsonschema/referencing library, which " + "provides more compliant referencing behavior as well as more " + "flexible APIs for customization. A future release will remove " + "RefResolver. Please file a feature request (on referencing) if you " + "are missing an API for the kind of customization you need." + ) + + def __init__( + self, + base_uri, + referrer, + store=HashTrieMap(), + cache_remote=True, + handlers=(), + urljoin_cache=None, + remote_cache=None, + ): + if urljoin_cache is None: + urljoin_cache = lru_cache(1024)(urljoin) + if remote_cache is None: + remote_cache = lru_cache(1024)(self.resolve_from_url) + + self.referrer = referrer + self.cache_remote = cache_remote + self.handlers = dict(handlers) + + self._scopes_stack = [base_uri] + + self.store = _utils.URIDict( + (uri, each.contents) for uri, each in SPECIFICATIONS.items() + ) + self.store.update( + (id, each.META_SCHEMA) for id, each in _META_SCHEMAS.items() + ) + self.store.update(store) + self.store.update( + (schema["$id"], schema) + for schema in store.values() + if isinstance(schema, Mapping) and "$id" in schema + ) + self.store[base_uri] = referrer + + self._urljoin_cache = urljoin_cache + self._remote_cache = remote_cache + + @classmethod + def from_schema( # noqa: D417 + cls, + schema, + id_of=referencing.jsonschema.DRAFT202012.id_of, + *args, + **kwargs, + ): + """ + Construct a resolver from a JSON schema object. + + Arguments: + + schema: + + the referring schema + + Returns: + + `_RefResolver` + + """ + return cls(base_uri=id_of(schema) or "", referrer=schema, *args, **kwargs) # noqa: B026, E501 + + def push_scope(self, scope): + """ + Enter a given sub-scope. + + Treats further dereferences as being performed underneath the + given scope. + """ + self._scopes_stack.append( + self._urljoin_cache(self.resolution_scope, scope), + ) + + def pop_scope(self): + """ + Exit the most recent entered scope. + + Treats further dereferences as being performed underneath the + original scope. + + Don't call this method more times than `push_scope` has been + called. + """ + try: + self._scopes_stack.pop() + except IndexError: + raise exceptions._RefResolutionError( + "Failed to pop the scope from an empty stack. " + "`pop_scope()` should only be called once for every " + "`push_scope()`", + ) from None + + @property + def resolution_scope(self): + """ + Retrieve the current resolution scope. + """ + return self._scopes_stack[-1] + + @property + def base_uri(self): + """ + Retrieve the current base URI, not including any fragment. + """ + uri, _ = urldefrag(self.resolution_scope) + return uri + + @contextlib.contextmanager + def in_scope(self, scope): + """ + Temporarily enter the given scope for the duration of the context. + + .. deprecated:: v4.0.0 + """ + warnings.warn( + "jsonschema.RefResolver.in_scope is deprecated and will be " + "removed in a future release.", + DeprecationWarning, + stacklevel=3, + ) + self.push_scope(scope) + try: + yield + finally: + self.pop_scope() + + @contextlib.contextmanager + def resolving(self, ref): + """ + Resolve the given ``ref`` and enter its resolution scope. + + Exits the scope on exit of this context manager. + + Arguments: + + ref (str): + + The reference to resolve + + """ + url, resolved = self.resolve(ref) + self.push_scope(url) + try: + yield resolved + finally: + self.pop_scope() + + def _find_in_referrer(self, key): + return self._get_subschemas_cache()[key] + + @lru_cache # noqa: B019 + def _get_subschemas_cache(self): + cache = {key: [] for key in _SUBSCHEMAS_KEYWORDS} + for keyword, subschema in _search_schema( + self.referrer, _match_subschema_keywords, + ): + cache[keyword].append(subschema) + return cache + + @lru_cache # noqa: B019 + def _find_in_subschemas(self, url): + subschemas = self._get_subschemas_cache()["$id"] + if not subschemas: + return None + uri, fragment = urldefrag(url) + for subschema in subschemas: + id = subschema["$id"] + if not isinstance(id, str): + continue + target_uri = self._urljoin_cache(self.resolution_scope, id) + if target_uri.rstrip("/") == uri.rstrip("/"): + if fragment: + subschema = self.resolve_fragment(subschema, fragment) + self.store[url] = subschema + return url, subschema + return None + + def resolve(self, ref): + """ + Resolve the given reference. + """ + url = self._urljoin_cache(self.resolution_scope, ref).rstrip("/") + + match = self._find_in_subschemas(url) + if match is not None: + return match + + return url, self._remote_cache(url) + + def resolve_from_url(self, url): + """ + Resolve the given URL. + """ + url, fragment = urldefrag(url) + if not url: + url = self.base_uri + + try: + document = self.store[url] + except KeyError: + try: + document = self.resolve_remote(url) + except Exception as exc: + raise exceptions._RefResolutionError(exc) from exc + + return self.resolve_fragment(document, fragment) + + def resolve_fragment(self, document, fragment): + """ + Resolve a ``fragment`` within the referenced ``document``. + + Arguments: + + document: + + The referent document + + fragment (str): + + a URI fragment to resolve within it + + """ + fragment = fragment.lstrip("/") + + if not fragment: + return document + + if document is self.referrer: + find = self._find_in_referrer + else: + + def find(key): + yield from _search_schema(document, _match_keyword(key)) + + for keyword in ["$anchor", "$dynamicAnchor"]: + for subschema in find(keyword): + if fragment == subschema[keyword]: + return subschema + for keyword in ["id", "$id"]: + for subschema in find(keyword): + if "#" + fragment == subschema[keyword]: + return subschema + + # Resolve via path + parts = unquote(fragment).split("/") if fragment else [] + for part in parts: + part = part.replace("~1", "/").replace("~0", "~") + + if isinstance(document, Sequence): + try: # noqa: SIM105 + part = int(part) + except ValueError: + pass + try: + document = document[part] + except (TypeError, LookupError) as err: + raise exceptions._RefResolutionError( + f"Unresolvable JSON pointer: {fragment!r}", + ) from err + + return document + + def resolve_remote(self, uri): + """ + Resolve a remote ``uri``. + + If called directly, does not check the store first, but after + retrieving the document at the specified URI it will be saved in + the store if :attr:`cache_remote` is True. + + .. note:: + + If the requests_ library is present, ``jsonschema`` will use it to + request the remote ``uri``, so that the correct encoding is + detected and used. + + If it isn't, or if the scheme of the ``uri`` is not ``http`` or + ``https``, UTF-8 is assumed. + + Arguments: + + uri (str): + + The URI to resolve + + Returns: + + The retrieved document + + .. _requests: https://pypi.org/project/requests/ + + """ + try: + import requests + except ImportError: + requests = None + + scheme = urlsplit(uri).scheme + + if scheme in self.handlers: + result = self.handlers[scheme](uri) + elif scheme in ["http", "https"] and requests: + # Requests has support for detecting the correct encoding of + # json over http + result = requests.get(uri).json() + else: + # Otherwise, pass off to urllib and assume utf-8 + with urlopen(uri) as url: # noqa: S310 + result = json.loads(url.read().decode("utf-8")) + + if self.cache_remote: + self.store[uri] = result + return result + + +_SUBSCHEMAS_KEYWORDS = ("$id", "id", "$anchor", "$dynamicAnchor") + + +def _match_keyword(keyword): + + def matcher(value): + if keyword in value: + yield value + + return matcher + + +def _match_subschema_keywords(value): + for keyword in _SUBSCHEMAS_KEYWORDS: + if keyword in value: + yield keyword, value + + +def _search_schema(schema, matcher): + """Breadth-first search routine.""" + values = deque([schema]) + while values: + value = values.pop() + if not isinstance(value, dict): + continue + yield from matcher(value) + values.extendleft(value.values()) + + +def validate(instance, schema, cls=None, *args, **kwargs): # noqa: D417 + """ + Validate an instance under the given schema. + + >>> validate([2, 3, 4], {"maxItems": 2}) + Traceback (most recent call last): + ... + ValidationError: [2, 3, 4] is too long + + :func:`~jsonschema.validators.validate` will first verify that the + provided schema is itself valid, since not doing so can lead to less + obvious error messages and fail in less obvious or consistent ways. + + If you know you have a valid schema already, especially + if you intend to validate multiple instances with + the same schema, you likely would prefer using the + `jsonschema.protocols.Validator.validate` method directly on a + specific validator (e.g. ``Draft202012Validator.validate``). + + + Arguments: + + instance: + + The instance to validate + + schema: + + The schema to validate with + + cls (jsonschema.protocols.Validator): + + The class that will be used to validate the instance. + + If the ``cls`` argument is not provided, two things will happen + in accordance with the specification. First, if the schema has a + :kw:`$schema` keyword containing a known meta-schema [#]_ then the + proper validator will be used. The specification recommends that + all schemas contain :kw:`$schema` properties for this reason. If no + :kw:`$schema` property is found, the default validator class is the + latest released draft. + + Any other provided positional and keyword arguments will be passed + on when instantiating the ``cls``. + + Raises: + + `jsonschema.exceptions.ValidationError`: + + if the instance is invalid + + `jsonschema.exceptions.SchemaError`: + + if the schema itself is invalid + + .. rubric:: Footnotes + .. [#] known by a validator registered with + `jsonschema.validators.validates` + + """ + if cls is None: + cls = validator_for(schema) + + cls.check_schema(schema) + validator = cls(schema, *args, **kwargs) + error = exceptions.best_match(validator.iter_errors(instance)) + if error is not None: + raise error + + +def validator_for( + schema, + default: type[Validator] | _utils.Unset = _UNSET, +) -> type[Validator]: + """ + Retrieve the validator class appropriate for validating the given schema. + + Uses the :kw:`$schema` keyword that should be present in the given + schema to look up the appropriate validator class. + + Arguments: + + schema (collections.abc.Mapping or bool): + + the schema to look at + + default: + + the default to return if the appropriate validator class + cannot be determined. + + If unprovided, the default is to return the latest supported + draft. + + Examples: + + The :kw:`$schema` JSON Schema keyword will control which validator + class is returned: + + >>> schema = { + ... "$schema": "https://json-schema.org/draft/2020-12/schema", + ... "type": "integer", + ... } + >>> jsonschema.validators.validator_for(schema) + + + + Here, a draft 7 schema instead will return the draft 7 validator: + + >>> schema = { + ... "$schema": "http://json-schema.org/draft-07/schema#", + ... "type": "integer", + ... } + >>> jsonschema.validators.validator_for(schema) + + + + Schemas with no ``$schema`` keyword will fallback to the default + argument: + + >>> schema = {"type": "integer"} + >>> jsonschema.validators.validator_for( + ... schema, default=Draft7Validator, + ... ) + + + or if none is provided, to the latest version supported. + Always including the keyword when authoring schemas is highly + recommended. + + """ + DefaultValidator = _LATEST_VERSION if default is _UNSET else default + + if schema is True or schema is False or "$schema" not in schema: + return DefaultValidator # type: ignore[return-value] + if schema["$schema"] not in _META_SCHEMAS and default is _UNSET: + warn( + ( + "The metaschema specified by $schema was not found. " + "Using the latest draft to validate, but this will raise " + "an error in the future." + ), + DeprecationWarning, + stacklevel=2, + ) + return _META_SCHEMAS.get(schema["$schema"], DefaultValidator) diff --git a/.venv/lib/python3.12/site-packages/jsonschema_specifications-2025.4.1.dist-info/INSTALLER b/.venv/lib/python3.12/site-packages/jsonschema_specifications-2025.4.1.dist-info/INSTALLER new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/.venv/lib/python3.12/site-packages/jsonschema_specifications-2025.4.1.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/.venv/lib/python3.12/site-packages/jsonschema_specifications-2025.4.1.dist-info/METADATA b/.venv/lib/python3.12/site-packages/jsonschema_specifications-2025.4.1.dist-info/METADATA new file mode 100644 index 0000000..0437aaa --- /dev/null +++ b/.venv/lib/python3.12/site-packages/jsonschema_specifications-2025.4.1.dist-info/METADATA @@ -0,0 +1,54 @@ +Metadata-Version: 2.4 +Name: jsonschema-specifications +Version: 2025.4.1 +Summary: The JSON Schema meta-schemas and vocabularies, exposed as a Registry +Project-URL: Documentation, https://jsonschema-specifications.readthedocs.io/ +Project-URL: Homepage, https://github.com/python-jsonschema/jsonschema-specifications +Project-URL: Issues, https://github.com/python-jsonschema/jsonschema-specifications/issues/ +Project-URL: Funding, https://github.com/sponsors/Julian +Project-URL: Tidelift, https://tidelift.com/subscription/pkg/pypi-jsonschema-specifications?utm_source=pypi-jsonschema-specifications&utm_medium=referral&utm_campaign=pypi-link +Project-URL: Source, https://github.com/python-jsonschema/jsonschema-specifications +Author-email: Julian Berman +License-Expression: MIT +License-File: COPYING +Keywords: data validation,json,json schema,jsonschema,validation +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: 3.13 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Topic :: File Formats :: JSON +Classifier: Topic :: File Formats :: JSON :: JSON Schema +Requires-Python: >=3.9 +Requires-Dist: referencing>=0.31.0 +Description-Content-Type: text/x-rst + +============================= +``jsonschema-specifications`` +============================= + +|PyPI| |Pythons| |CI| |ReadTheDocs| + +JSON support files from the `JSON Schema Specifications `_ (metaschemas, vocabularies, etc.), packaged for runtime access from Python as a `referencing-based Schema Registry `_. + +.. |PyPI| image:: https://img.shields.io/pypi/v/jsonschema-specifications.svg + :alt: PyPI version + :target: https://pypi.org/project/jsonschema-specifications/ + +.. |Pythons| image:: https://img.shields.io/pypi/pyversions/jsonschema-specifications.svg + :alt: Supported Python versions + :target: https://pypi.org/project/jsonschema-specifications/ + +.. |CI| image:: https://github.com/python-jsonschema/jsonschema-specifications/workflows/CI/badge.svg + :alt: Build status + :target: https://github.com/python-jsonschema/jsonschema-specifications/actions?query=workflow%3ACI + +.. |ReadTheDocs| image:: https://readthedocs.org/projects/jsonschema-specifications/badge/?version=stable&style=flat + :alt: ReadTheDocs status + :target: https://jsonschema-specifications.readthedocs.io/en/stable/ diff --git a/.venv/lib/python3.12/site-packages/jsonschema_specifications-2025.4.1.dist-info/RECORD b/.venv/lib/python3.12/site-packages/jsonschema_specifications-2025.4.1.dist-info/RECORD new file mode 100644 index 0000000..3fc0c35 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/jsonschema_specifications-2025.4.1.dist-info/RECORD @@ -0,0 +1,33 @@ +jsonschema_specifications-2025.4.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +jsonschema_specifications-2025.4.1.dist-info/METADATA,sha256=O5aSPJh5x5sRBhPXC-mW3OVQ7F8cSiVjwv7ZPOSOKcA,2907 +jsonschema_specifications-2025.4.1.dist-info/RECORD,, +jsonschema_specifications-2025.4.1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87 +jsonschema_specifications-2025.4.1.dist-info/licenses/COPYING,sha256=QtzWNJX4e063x3V6-jebtVpT-Ur9el9lfZrfVyNuUVw,1057 +jsonschema_specifications/__init__.py,sha256=qoTB2DKY7qvNrGhMPH6gtmAJRLilmVQ-fFZwT6ryqw0,386 +jsonschema_specifications/__pycache__/__init__.cpython-312.pyc,, +jsonschema_specifications/__pycache__/_core.cpython-312.pyc,, +jsonschema_specifications/_core.py,sha256=tFhc1CMleJ3AJOK_bjxOpFQTdrsUClFGfFxPBU_CebM,1140 +jsonschema_specifications/schemas/draft201909/metaschema.json,sha256=e3YbPhIfCgyh6ioLjizIVrz4AWBLgmjXG6yqICvAwTs,1785 +jsonschema_specifications/schemas/draft201909/vocabularies/applicator,sha256=aJUQDplyb7sQcFhRK77D7P1LJOj9L6zuPlBe5ysNTDE,1860 +jsonschema_specifications/schemas/draft201909/vocabularies/content,sha256=m31PVaTi_bAsQwBo_f-rxzKt3OI42j8d8mkCScM1MnQ,517 +jsonschema_specifications/schemas/draft201909/vocabularies/core,sha256=taLElX9kldClCB8ECevooU5BOayyA_x0hHH47eKvWyw,1531 +jsonschema_specifications/schemas/draft201909/vocabularies/meta-data,sha256=1H4kRd1qgicaKY2DzGxsuNSuHhXg3Fa-zTehY-zwEoY,892 +jsonschema_specifications/schemas/draft201909/vocabularies/validation,sha256=HlJsHTNac0gF_ILPV5jBK5YK19olF8Zs2lobCTWcPBw,2834 +jsonschema_specifications/schemas/draft202012/metaschema.json,sha256=Qdp29a-3zgYtJI92JGOpL3ykfk4PkFsiS6av7vkd7Q8,2452 +jsonschema_specifications/schemas/draft202012/vocabularies/applicator,sha256=xKbkFHuR_vf-ptwFjLG_k0AvdBS3ZXiosWqvHa1qrO8,1659 +jsonschema_specifications/schemas/draft202012/vocabularies/content,sha256=CDQ3R3ZOSlgUJieTz01lIFenkThjxZUNQyl-jh_axbY,519 +jsonschema_specifications/schemas/draft202012/vocabularies/core,sha256=wtEqjk3RHTNt_IOj9mOqTGnwtJs76wlP_rJbUxb0gD0,1564 +jsonschema_specifications/schemas/draft202012/vocabularies/format,sha256=UOu_55BhGoSbjMQAoJwdDg-2q1wNQ6DyIgH9NiUFa_Q,403 +jsonschema_specifications/schemas/draft202012/vocabularies/format-annotation,sha256=q8d1rf79idIjWBcNm_k_Tr0jSVY7u-3WDwK-98gSvMA,448 +jsonschema_specifications/schemas/draft202012/vocabularies/format-assertion,sha256=xSJCuaG7eGsmw-gset1CjDH5yW5XXc6Z5W6l_qptogw,445 +jsonschema_specifications/schemas/draft202012/vocabularies/meta-data,sha256=j3bW4U9Bubku-TO3CM3FFEyLUmhlGtEZGEhfsXVPHHY,892 +jsonschema_specifications/schemas/draft202012/vocabularies/unevaluated,sha256=Lb-8tzmUtnCwl2SSre4f_7RsIWgnhNL1pMpWH54tDLQ,506 +jsonschema_specifications/schemas/draft202012/vocabularies/validation,sha256=cBCjHlQfMtK-ch4t40jfdcmzaHaj7TBId_wKvaHTelg,2834 +jsonschema_specifications/schemas/draft3/metaschema.json,sha256=LPdfZENvtb43Si6qJ6uLfh_WUcm0ba6mxnsC_WTiRYs,2600 +jsonschema_specifications/schemas/draft4/metaschema.json,sha256=4UidC0dV8CeTMCWR0_y48Htok6gqlPJIlfjk7fEbguI,4357 +jsonschema_specifications/schemas/draft6/metaschema.json,sha256=wp386fVINcOgbAOzxdXsDtp3cGVo-cTffPvHVmpRAG0,4437 +jsonschema_specifications/schemas/draft7/metaschema.json,sha256=PVOSCIJhYGxVm2A_OFMpyfGrRbXWZ-uZBodFOwVdQF4,4819 +jsonschema_specifications/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jsonschema_specifications/tests/__pycache__/__init__.cpython-312.pyc,, +jsonschema_specifications/tests/__pycache__/test_jsonschema_specifications.cpython-312.pyc,, +jsonschema_specifications/tests/test_jsonschema_specifications.py,sha256=WkbYRW6A6FoZ0rivShfqVLSCsAiHJ2x8TxqECJTXPTY,1106 diff --git a/.venv/lib/python3.12/site-packages/jsonschema_specifications-2025.4.1.dist-info/WHEEL b/.venv/lib/python3.12/site-packages/jsonschema_specifications-2025.4.1.dist-info/WHEEL new file mode 100644 index 0000000..12228d4 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/jsonschema_specifications-2025.4.1.dist-info/WHEEL @@ -0,0 +1,4 @@ +Wheel-Version: 1.0 +Generator: hatchling 1.27.0 +Root-Is-Purelib: true +Tag: py3-none-any diff --git a/.venv/lib/python3.12/site-packages/jsonschema_specifications-2025.4.1.dist-info/licenses/COPYING b/.venv/lib/python3.12/site-packages/jsonschema_specifications-2025.4.1.dist-info/licenses/COPYING new file mode 100644 index 0000000..a9f853e --- /dev/null +++ b/.venv/lib/python3.12/site-packages/jsonschema_specifications-2025.4.1.dist-info/licenses/COPYING @@ -0,0 +1,19 @@ +Copyright (c) 2022 Julian Berman + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/.venv/lib/python3.12/site-packages/jsonschema_specifications/__init__.py b/.venv/lib/python3.12/site-packages/jsonschema_specifications/__init__.py new file mode 100644 index 0000000..a423574 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/jsonschema_specifications/__init__.py @@ -0,0 +1,12 @@ +""" +The JSON Schema meta-schemas and vocabularies, exposed as a Registry. +""" + +from referencing.jsonschema import EMPTY_REGISTRY as _EMPTY_REGISTRY + +from jsonschema_specifications._core import _schemas + +#: A `referencing.jsonschema.SchemaRegistry` containing all of the official +#: meta-schemas and vocabularies. +REGISTRY = (_schemas() @ _EMPTY_REGISTRY).crawl() +__all__ = ["REGISTRY"] diff --git a/.venv/lib/python3.12/site-packages/jsonschema_specifications/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/jsonschema_specifications/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..eb2c56d Binary files /dev/null and b/.venv/lib/python3.12/site-packages/jsonschema_specifications/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/jsonschema_specifications/__pycache__/_core.cpython-312.pyc b/.venv/lib/python3.12/site-packages/jsonschema_specifications/__pycache__/_core.cpython-312.pyc new file mode 100644 index 0000000..4827ac9 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/jsonschema_specifications/__pycache__/_core.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/jsonschema_specifications/_core.py b/.venv/lib/python3.12/site-packages/jsonschema_specifications/_core.py new file mode 100644 index 0000000..e67bd71 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/jsonschema_specifications/_core.py @@ -0,0 +1,38 @@ +""" +Load all the JSON Schema specification's official schemas. +""" + +import json + +try: + from importlib.resources import files +except ImportError: + from importlib_resources import ( # type: ignore[import-not-found, no-redef] + files, + ) + +from referencing import Resource + + +def _schemas(): + """ + All schemas we ship. + """ + # importlib.resources.abc.Traversal doesn't have nice ways to do this that + # I'm aware of... + # + # It can't recurse arbitrarily, e.g. no ``.glob()``. + # + # So this takes some liberties given the real layout of what we ship + # (only 2 levels of nesting, no directories within the second level). + + for version in files(__package__).joinpath("schemas").iterdir(): + if version.name.startswith("."): + continue + for child in version.iterdir(): + children = [child] if child.is_file() else child.iterdir() + for path in children: + if path.name.startswith("."): + continue + contents = json.loads(path.read_text(encoding="utf-8")) + yield Resource.from_contents(contents) diff --git a/.venv/lib/python3.12/site-packages/jsonschema_specifications/schemas/draft201909/metaschema.json b/.venv/lib/python3.12/site-packages/jsonschema_specifications/schemas/draft201909/metaschema.json new file mode 100644 index 0000000..2248a0c --- /dev/null +++ b/.venv/lib/python3.12/site-packages/jsonschema_specifications/schemas/draft201909/metaschema.json @@ -0,0 +1,42 @@ +{ + "$schema": "https://json-schema.org/draft/2019-09/schema", + "$id": "https://json-schema.org/draft/2019-09/schema", + "$vocabulary": { + "https://json-schema.org/draft/2019-09/vocab/core": true, + "https://json-schema.org/draft/2019-09/vocab/applicator": true, + "https://json-schema.org/draft/2019-09/vocab/validation": true, + "https://json-schema.org/draft/2019-09/vocab/meta-data": true, + "https://json-schema.org/draft/2019-09/vocab/format": false, + "https://json-schema.org/draft/2019-09/vocab/content": true + }, + "$recursiveAnchor": true, + + "title": "Core and Validation specifications meta-schema", + "allOf": [ + {"$ref": "meta/core"}, + {"$ref": "meta/applicator"}, + {"$ref": "meta/validation"}, + {"$ref": "meta/meta-data"}, + {"$ref": "meta/format"}, + {"$ref": "meta/content"} + ], + "type": ["object", "boolean"], + "properties": { + "definitions": { + "$comment": "While no longer an official keyword as it is replaced by $defs, this keyword is retained in the meta-schema to prevent incompatible extensions as it remains in common use.", + "type": "object", + "additionalProperties": { "$recursiveRef": "#" }, + "default": {} + }, + "dependencies": { + "$comment": "\"dependencies\" is no longer a keyword, but schema authors should avoid redefining it to facilitate a smooth transition to \"dependentSchemas\" and \"dependentRequired\"", + "type": "object", + "additionalProperties": { + "anyOf": [ + { "$recursiveRef": "#" }, + { "$ref": "meta/validation#/$defs/stringArray" } + ] + } + } + } +} diff --git a/.venv/lib/python3.12/site-packages/jsonschema_specifications/schemas/draft201909/vocabularies/applicator b/.venv/lib/python3.12/site-packages/jsonschema_specifications/schemas/draft201909/vocabularies/applicator new file mode 100644 index 0000000..24a1cc4 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/jsonschema_specifications/schemas/draft201909/vocabularies/applicator @@ -0,0 +1,56 @@ +{ + "$schema": "https://json-schema.org/draft/2019-09/schema", + "$id": "https://json-schema.org/draft/2019-09/meta/applicator", + "$vocabulary": { + "https://json-schema.org/draft/2019-09/vocab/applicator": true + }, + "$recursiveAnchor": true, + + "title": "Applicator vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "additionalItems": { "$recursiveRef": "#" }, + "unevaluatedItems": { "$recursiveRef": "#" }, + "items": { + "anyOf": [ + { "$recursiveRef": "#" }, + { "$ref": "#/$defs/schemaArray" } + ] + }, + "contains": { "$recursiveRef": "#" }, + "additionalProperties": { "$recursiveRef": "#" }, + "unevaluatedProperties": { "$recursiveRef": "#" }, + "properties": { + "type": "object", + "additionalProperties": { "$recursiveRef": "#" }, + "default": {} + }, + "patternProperties": { + "type": "object", + "additionalProperties": { "$recursiveRef": "#" }, + "propertyNames": { "format": "regex" }, + "default": {} + }, + "dependentSchemas": { + "type": "object", + "additionalProperties": { + "$recursiveRef": "#" + } + }, + "propertyNames": { "$recursiveRef": "#" }, + "if": { "$recursiveRef": "#" }, + "then": { "$recursiveRef": "#" }, + "else": { "$recursiveRef": "#" }, + "allOf": { "$ref": "#/$defs/schemaArray" }, + "anyOf": { "$ref": "#/$defs/schemaArray" }, + "oneOf": { "$ref": "#/$defs/schemaArray" }, + "not": { "$recursiveRef": "#" } + }, + "$defs": { + "schemaArray": { + "type": "array", + "minItems": 1, + "items": { "$recursiveRef": "#" } + } + } +} diff --git a/.venv/lib/python3.12/site-packages/jsonschema_specifications/schemas/draft201909/vocabularies/content b/.venv/lib/python3.12/site-packages/jsonschema_specifications/schemas/draft201909/vocabularies/content new file mode 100644 index 0000000..f6752a8 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/jsonschema_specifications/schemas/draft201909/vocabularies/content @@ -0,0 +1,17 @@ +{ + "$schema": "https://json-schema.org/draft/2019-09/schema", + "$id": "https://json-schema.org/draft/2019-09/meta/content", + "$vocabulary": { + "https://json-schema.org/draft/2019-09/vocab/content": true + }, + "$recursiveAnchor": true, + + "title": "Content vocabulary meta-schema", + + "type": ["object", "boolean"], + "properties": { + "contentMediaType": { "type": "string" }, + "contentEncoding": { "type": "string" }, + "contentSchema": { "$recursiveRef": "#" } + } +} diff --git a/.venv/lib/python3.12/site-packages/jsonschema_specifications/schemas/draft201909/vocabularies/core b/.venv/lib/python3.12/site-packages/jsonschema_specifications/schemas/draft201909/vocabularies/core new file mode 100644 index 0000000..eb708a5 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/jsonschema_specifications/schemas/draft201909/vocabularies/core @@ -0,0 +1,57 @@ +{ + "$schema": "https://json-schema.org/draft/2019-09/schema", + "$id": "https://json-schema.org/draft/2019-09/meta/core", + "$vocabulary": { + "https://json-schema.org/draft/2019-09/vocab/core": true + }, + "$recursiveAnchor": true, + + "title": "Core vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "$id": { + "type": "string", + "format": "uri-reference", + "$comment": "Non-empty fragments not allowed.", + "pattern": "^[^#]*#?$" + }, + "$schema": { + "type": "string", + "format": "uri" + }, + "$anchor": { + "type": "string", + "pattern": "^[A-Za-z][-A-Za-z0-9.:_]*$" + }, + "$ref": { + "type": "string", + "format": "uri-reference" + }, + "$recursiveRef": { + "type": "string", + "format": "uri-reference" + }, + "$recursiveAnchor": { + "type": "boolean", + "default": false + }, + "$vocabulary": { + "type": "object", + "propertyNames": { + "type": "string", + "format": "uri" + }, + "additionalProperties": { + "type": "boolean" + } + }, + "$comment": { + "type": "string" + }, + "$defs": { + "type": "object", + "additionalProperties": { "$recursiveRef": "#" }, + "default": {} + } + } +} diff --git a/.venv/lib/python3.12/site-packages/jsonschema_specifications/schemas/draft201909/vocabularies/meta-data b/.venv/lib/python3.12/site-packages/jsonschema_specifications/schemas/draft201909/vocabularies/meta-data new file mode 100644 index 0000000..da04cff --- /dev/null +++ b/.venv/lib/python3.12/site-packages/jsonschema_specifications/schemas/draft201909/vocabularies/meta-data @@ -0,0 +1,37 @@ +{ + "$schema": "https://json-schema.org/draft/2019-09/schema", + "$id": "https://json-schema.org/draft/2019-09/meta/meta-data", + "$vocabulary": { + "https://json-schema.org/draft/2019-09/vocab/meta-data": true + }, + "$recursiveAnchor": true, + + "title": "Meta-data vocabulary meta-schema", + + "type": ["object", "boolean"], + "properties": { + "title": { + "type": "string" + }, + "description": { + "type": "string" + }, + "default": true, + "deprecated": { + "type": "boolean", + "default": false + }, + "readOnly": { + "type": "boolean", + "default": false + }, + "writeOnly": { + "type": "boolean", + "default": false + }, + "examples": { + "type": "array", + "items": true + } + } +} diff --git a/.venv/lib/python3.12/site-packages/jsonschema_specifications/schemas/draft201909/vocabularies/validation b/.venv/lib/python3.12/site-packages/jsonschema_specifications/schemas/draft201909/vocabularies/validation new file mode 100644 index 0000000..9f59677 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/jsonschema_specifications/schemas/draft201909/vocabularies/validation @@ -0,0 +1,98 @@ +{ + "$schema": "https://json-schema.org/draft/2019-09/schema", + "$id": "https://json-schema.org/draft/2019-09/meta/validation", + "$vocabulary": { + "https://json-schema.org/draft/2019-09/vocab/validation": true + }, + "$recursiveAnchor": true, + + "title": "Validation vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "multipleOf": { + "type": "number", + "exclusiveMinimum": 0 + }, + "maximum": { + "type": "number" + }, + "exclusiveMaximum": { + "type": "number" + }, + "minimum": { + "type": "number" + }, + "exclusiveMinimum": { + "type": "number" + }, + "maxLength": { "$ref": "#/$defs/nonNegativeInteger" }, + "minLength": { "$ref": "#/$defs/nonNegativeIntegerDefault0" }, + "pattern": { + "type": "string", + "format": "regex" + }, + "maxItems": { "$ref": "#/$defs/nonNegativeInteger" }, + "minItems": { "$ref": "#/$defs/nonNegativeIntegerDefault0" }, + "uniqueItems": { + "type": "boolean", + "default": false + }, + "maxContains": { "$ref": "#/$defs/nonNegativeInteger" }, + "minContains": { + "$ref": "#/$defs/nonNegativeInteger", + "default": 1 + }, + "maxProperties": { "$ref": "#/$defs/nonNegativeInteger" }, + "minProperties": { "$ref": "#/$defs/nonNegativeIntegerDefault0" }, + "required": { "$ref": "#/$defs/stringArray" }, + "dependentRequired": { + "type": "object", + "additionalProperties": { + "$ref": "#/$defs/stringArray" + } + }, + "const": true, + "enum": { + "type": "array", + "items": true + }, + "type": { + "anyOf": [ + { "$ref": "#/$defs/simpleTypes" }, + { + "type": "array", + "items": { "$ref": "#/$defs/simpleTypes" }, + "minItems": 1, + "uniqueItems": true + } + ] + } + }, + "$defs": { + "nonNegativeInteger": { + "type": "integer", + "minimum": 0 + }, + "nonNegativeIntegerDefault0": { + "$ref": "#/$defs/nonNegativeInteger", + "default": 0 + }, + "simpleTypes": { + "enum": [ + "array", + "boolean", + "integer", + "null", + "number", + "object", + "string" + ] + }, + "stringArray": { + "type": "array", + "items": { "type": "string" }, + "uniqueItems": true, + "default": [] + } + } +} diff --git a/.venv/lib/python3.12/site-packages/jsonschema_specifications/schemas/draft202012/metaschema.json b/.venv/lib/python3.12/site-packages/jsonschema_specifications/schemas/draft202012/metaschema.json new file mode 100644 index 0000000..d5e2d31 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/jsonschema_specifications/schemas/draft202012/metaschema.json @@ -0,0 +1,58 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/schema", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/core": true, + "https://json-schema.org/draft/2020-12/vocab/applicator": true, + "https://json-schema.org/draft/2020-12/vocab/unevaluated": true, + "https://json-schema.org/draft/2020-12/vocab/validation": true, + "https://json-schema.org/draft/2020-12/vocab/meta-data": true, + "https://json-schema.org/draft/2020-12/vocab/format-annotation": true, + "https://json-schema.org/draft/2020-12/vocab/content": true + }, + "$dynamicAnchor": "meta", + + "title": "Core and Validation specifications meta-schema", + "allOf": [ + {"$ref": "meta/core"}, + {"$ref": "meta/applicator"}, + {"$ref": "meta/unevaluated"}, + {"$ref": "meta/validation"}, + {"$ref": "meta/meta-data"}, + {"$ref": "meta/format-annotation"}, + {"$ref": "meta/content"} + ], + "type": ["object", "boolean"], + "$comment": "This meta-schema also defines keywords that have appeared in previous drafts in order to prevent incompatible extensions as they remain in common use.", + "properties": { + "definitions": { + "$comment": "\"definitions\" has been replaced by \"$defs\".", + "type": "object", + "additionalProperties": { "$dynamicRef": "#meta" }, + "deprecated": true, + "default": {} + }, + "dependencies": { + "$comment": "\"dependencies\" has been split and replaced by \"dependentSchemas\" and \"dependentRequired\" in order to serve their differing semantics.", + "type": "object", + "additionalProperties": { + "anyOf": [ + { "$dynamicRef": "#meta" }, + { "$ref": "meta/validation#/$defs/stringArray" } + ] + }, + "deprecated": true, + "default": {} + }, + "$recursiveAnchor": { + "$comment": "\"$recursiveAnchor\" has been replaced by \"$dynamicAnchor\".", + "$ref": "meta/core#/$defs/anchorString", + "deprecated": true + }, + "$recursiveRef": { + "$comment": "\"$recursiveRef\" has been replaced by \"$dynamicRef\".", + "$ref": "meta/core#/$defs/uriReferenceString", + "deprecated": true + } + } +} diff --git a/.venv/lib/python3.12/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/applicator b/.venv/lib/python3.12/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/applicator new file mode 100644 index 0000000..ca69923 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/applicator @@ -0,0 +1,48 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/meta/applicator", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/applicator": true + }, + "$dynamicAnchor": "meta", + + "title": "Applicator vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "prefixItems": { "$ref": "#/$defs/schemaArray" }, + "items": { "$dynamicRef": "#meta" }, + "contains": { "$dynamicRef": "#meta" }, + "additionalProperties": { "$dynamicRef": "#meta" }, + "properties": { + "type": "object", + "additionalProperties": { "$dynamicRef": "#meta" }, + "default": {} + }, + "patternProperties": { + "type": "object", + "additionalProperties": { "$dynamicRef": "#meta" }, + "propertyNames": { "format": "regex" }, + "default": {} + }, + "dependentSchemas": { + "type": "object", + "additionalProperties": { "$dynamicRef": "#meta" }, + "default": {} + }, + "propertyNames": { "$dynamicRef": "#meta" }, + "if": { "$dynamicRef": "#meta" }, + "then": { "$dynamicRef": "#meta" }, + "else": { "$dynamicRef": "#meta" }, + "allOf": { "$ref": "#/$defs/schemaArray" }, + "anyOf": { "$ref": "#/$defs/schemaArray" }, + "oneOf": { "$ref": "#/$defs/schemaArray" }, + "not": { "$dynamicRef": "#meta" } + }, + "$defs": { + "schemaArray": { + "type": "array", + "minItems": 1, + "items": { "$dynamicRef": "#meta" } + } + } +} diff --git a/.venv/lib/python3.12/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/content b/.venv/lib/python3.12/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/content new file mode 100644 index 0000000..2f6e056 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/content @@ -0,0 +1,17 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/meta/content", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/content": true + }, + "$dynamicAnchor": "meta", + + "title": "Content vocabulary meta-schema", + + "type": ["object", "boolean"], + "properties": { + "contentEncoding": { "type": "string" }, + "contentMediaType": { "type": "string" }, + "contentSchema": { "$dynamicRef": "#meta" } + } +} diff --git a/.venv/lib/python3.12/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/core b/.venv/lib/python3.12/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/core new file mode 100644 index 0000000..dfc092d --- /dev/null +++ b/.venv/lib/python3.12/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/core @@ -0,0 +1,51 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/meta/core", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/core": true + }, + "$dynamicAnchor": "meta", + + "title": "Core vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "$id": { + "$ref": "#/$defs/uriReferenceString", + "$comment": "Non-empty fragments not allowed.", + "pattern": "^[^#]*#?$" + }, + "$schema": { "$ref": "#/$defs/uriString" }, + "$ref": { "$ref": "#/$defs/uriReferenceString" }, + "$anchor": { "$ref": "#/$defs/anchorString" }, + "$dynamicRef": { "$ref": "#/$defs/uriReferenceString" }, + "$dynamicAnchor": { "$ref": "#/$defs/anchorString" }, + "$vocabulary": { + "type": "object", + "propertyNames": { "$ref": "#/$defs/uriString" }, + "additionalProperties": { + "type": "boolean" + } + }, + "$comment": { + "type": "string" + }, + "$defs": { + "type": "object", + "additionalProperties": { "$dynamicRef": "#meta" } + } + }, + "$defs": { + "anchorString": { + "type": "string", + "pattern": "^[A-Za-z_][-A-Za-z0-9._]*$" + }, + "uriString": { + "type": "string", + "format": "uri" + }, + "uriReferenceString": { + "type": "string", + "format": "uri-reference" + } + } +} diff --git a/.venv/lib/python3.12/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/format b/.venv/lib/python3.12/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/format new file mode 100644 index 0000000..09bbfdd --- /dev/null +++ b/.venv/lib/python3.12/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/format @@ -0,0 +1,14 @@ +{ + "$schema": "https://json-schema.org/draft/2019-09/schema", + "$id": "https://json-schema.org/draft/2019-09/meta/format", + "$vocabulary": { + "https://json-schema.org/draft/2019-09/vocab/format": true + }, + "$recursiveAnchor": true, + + "title": "Format vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "format": { "type": "string" } + } +} diff --git a/.venv/lib/python3.12/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/format-annotation b/.venv/lib/python3.12/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/format-annotation new file mode 100644 index 0000000..51ef7ea --- /dev/null +++ b/.venv/lib/python3.12/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/format-annotation @@ -0,0 +1,14 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/meta/format-annotation", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/format-annotation": true + }, + "$dynamicAnchor": "meta", + + "title": "Format vocabulary meta-schema for annotation results", + "type": ["object", "boolean"], + "properties": { + "format": { "type": "string" } + } +} diff --git a/.venv/lib/python3.12/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/format-assertion b/.venv/lib/python3.12/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/format-assertion new file mode 100644 index 0000000..5e73fd7 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/format-assertion @@ -0,0 +1,14 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/meta/format-assertion", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/format-assertion": true + }, + "$dynamicAnchor": "meta", + + "title": "Format vocabulary meta-schema for assertion results", + "type": ["object", "boolean"], + "properties": { + "format": { "type": "string" } + } +} diff --git a/.venv/lib/python3.12/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/meta-data b/.venv/lib/python3.12/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/meta-data new file mode 100644 index 0000000..05cbc22 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/meta-data @@ -0,0 +1,37 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/meta/meta-data", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/meta-data": true + }, + "$dynamicAnchor": "meta", + + "title": "Meta-data vocabulary meta-schema", + + "type": ["object", "boolean"], + "properties": { + "title": { + "type": "string" + }, + "description": { + "type": "string" + }, + "default": true, + "deprecated": { + "type": "boolean", + "default": false + }, + "readOnly": { + "type": "boolean", + "default": false + }, + "writeOnly": { + "type": "boolean", + "default": false + }, + "examples": { + "type": "array", + "items": true + } + } +} diff --git a/.venv/lib/python3.12/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/unevaluated b/.venv/lib/python3.12/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/unevaluated new file mode 100644 index 0000000..5f62a3f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/unevaluated @@ -0,0 +1,15 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/meta/unevaluated", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/unevaluated": true + }, + "$dynamicAnchor": "meta", + + "title": "Unevaluated applicator vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "unevaluatedItems": { "$dynamicRef": "#meta" }, + "unevaluatedProperties": { "$dynamicRef": "#meta" } + } +} diff --git a/.venv/lib/python3.12/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/validation b/.venv/lib/python3.12/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/validation new file mode 100644 index 0000000..606b87b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/validation @@ -0,0 +1,98 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/meta/validation", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/validation": true + }, + "$dynamicAnchor": "meta", + + "title": "Validation vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "type": { + "anyOf": [ + { "$ref": "#/$defs/simpleTypes" }, + { + "type": "array", + "items": { "$ref": "#/$defs/simpleTypes" }, + "minItems": 1, + "uniqueItems": true + } + ] + }, + "const": true, + "enum": { + "type": "array", + "items": true + }, + "multipleOf": { + "type": "number", + "exclusiveMinimum": 0 + }, + "maximum": { + "type": "number" + }, + "exclusiveMaximum": { + "type": "number" + }, + "minimum": { + "type": "number" + }, + "exclusiveMinimum": { + "type": "number" + }, + "maxLength": { "$ref": "#/$defs/nonNegativeInteger" }, + "minLength": { "$ref": "#/$defs/nonNegativeIntegerDefault0" }, + "pattern": { + "type": "string", + "format": "regex" + }, + "maxItems": { "$ref": "#/$defs/nonNegativeInteger" }, + "minItems": { "$ref": "#/$defs/nonNegativeIntegerDefault0" }, + "uniqueItems": { + "type": "boolean", + "default": false + }, + "maxContains": { "$ref": "#/$defs/nonNegativeInteger" }, + "minContains": { + "$ref": "#/$defs/nonNegativeInteger", + "default": 1 + }, + "maxProperties": { "$ref": "#/$defs/nonNegativeInteger" }, + "minProperties": { "$ref": "#/$defs/nonNegativeIntegerDefault0" }, + "required": { "$ref": "#/$defs/stringArray" }, + "dependentRequired": { + "type": "object", + "additionalProperties": { + "$ref": "#/$defs/stringArray" + } + } + }, + "$defs": { + "nonNegativeInteger": { + "type": "integer", + "minimum": 0 + }, + "nonNegativeIntegerDefault0": { + "$ref": "#/$defs/nonNegativeInteger", + "default": 0 + }, + "simpleTypes": { + "enum": [ + "array", + "boolean", + "integer", + "null", + "number", + "object", + "string" + ] + }, + "stringArray": { + "type": "array", + "items": { "type": "string" }, + "uniqueItems": true, + "default": [] + } + } +} diff --git a/.venv/lib/python3.12/site-packages/jsonschema_specifications/schemas/draft3/metaschema.json b/.venv/lib/python3.12/site-packages/jsonschema_specifications/schemas/draft3/metaschema.json new file mode 100644 index 0000000..8b26b1f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/jsonschema_specifications/schemas/draft3/metaschema.json @@ -0,0 +1,172 @@ +{ + "$schema" : "http://json-schema.org/draft-03/schema#", + "id" : "http://json-schema.org/draft-03/schema#", + "type" : "object", + + "properties" : { + "type" : { + "type" : ["string", "array"], + "items" : { + "type" : ["string", {"$ref" : "#"}] + }, + "uniqueItems" : true, + "default" : "any" + }, + + "properties" : { + "type" : "object", + "additionalProperties" : {"$ref" : "#"}, + "default" : {} + }, + + "patternProperties" : { + "type" : "object", + "additionalProperties" : {"$ref" : "#"}, + "default" : {} + }, + + "additionalProperties" : { + "type" : [{"$ref" : "#"}, "boolean"], + "default" : {} + }, + + "items" : { + "type" : [{"$ref" : "#"}, "array"], + "items" : {"$ref" : "#"}, + "default" : {} + }, + + "additionalItems" : { + "type" : [{"$ref" : "#"}, "boolean"], + "default" : {} + }, + + "required" : { + "type" : "boolean", + "default" : false + }, + + "dependencies" : { + "type" : "object", + "additionalProperties" : { + "type" : ["string", "array", {"$ref" : "#"}], + "items" : { + "type" : "string" + } + }, + "default" : {} + }, + + "minimum" : { + "type" : "number" + }, + + "maximum" : { + "type" : "number" + }, + + "exclusiveMinimum" : { + "type" : "boolean", + "default" : false + }, + + "exclusiveMaximum" : { + "type" : "boolean", + "default" : false + }, + + "minItems" : { + "type" : "integer", + "minimum" : 0, + "default" : 0 + }, + + "maxItems" : { + "type" : "integer", + "minimum" : 0 + }, + + "uniqueItems" : { + "type" : "boolean", + "default" : false + }, + + "pattern" : { + "type" : "string", + "format" : "regex" + }, + + "minLength" : { + "type" : "integer", + "minimum" : 0, + "default" : 0 + }, + + "maxLength" : { + "type" : "integer" + }, + + "enum" : { + "type" : "array", + "minItems" : 1, + "uniqueItems" : true + }, + + "default" : { + "type" : "any" + }, + + "title" : { + "type" : "string" + }, + + "description" : { + "type" : "string" + }, + + "format" : { + "type" : "string" + }, + + "divisibleBy" : { + "type" : "number", + "minimum" : 0, + "exclusiveMinimum" : true, + "default" : 1 + }, + + "disallow" : { + "type" : ["string", "array"], + "items" : { + "type" : ["string", {"$ref" : "#"}] + }, + "uniqueItems" : true + }, + + "extends" : { + "type" : [{"$ref" : "#"}, "array"], + "items" : {"$ref" : "#"}, + "default" : {} + }, + + "id" : { + "type" : "string" + }, + + "$ref" : { + "type" : "string" + }, + + "$schema" : { + "type" : "string", + "format" : "uri" + } + }, + + "dependencies" : { + "exclusiveMinimum" : "minimum", + "exclusiveMaximum" : "maximum" + }, + + "default" : {} +} diff --git a/.venv/lib/python3.12/site-packages/jsonschema_specifications/schemas/draft4/metaschema.json b/.venv/lib/python3.12/site-packages/jsonschema_specifications/schemas/draft4/metaschema.json new file mode 100644 index 0000000..bcbb847 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/jsonschema_specifications/schemas/draft4/metaschema.json @@ -0,0 +1,149 @@ +{ + "id": "http://json-schema.org/draft-04/schema#", + "$schema": "http://json-schema.org/draft-04/schema#", + "description": "Core schema meta-schema", + "definitions": { + "schemaArray": { + "type": "array", + "minItems": 1, + "items": { "$ref": "#" } + }, + "positiveInteger": { + "type": "integer", + "minimum": 0 + }, + "positiveIntegerDefault0": { + "allOf": [ { "$ref": "#/definitions/positiveInteger" }, { "default": 0 } ] + }, + "simpleTypes": { + "enum": [ "array", "boolean", "integer", "null", "number", "object", "string" ] + }, + "stringArray": { + "type": "array", + "items": { "type": "string" }, + "minItems": 1, + "uniqueItems": true + } + }, + "type": "object", + "properties": { + "id": { + "type": "string" + }, + "$schema": { + "type": "string" + }, + "title": { + "type": "string" + }, + "description": { + "type": "string" + }, + "default": {}, + "multipleOf": { + "type": "number", + "minimum": 0, + "exclusiveMinimum": true + }, + "maximum": { + "type": "number" + }, + "exclusiveMaximum": { + "type": "boolean", + "default": false + }, + "minimum": { + "type": "number" + }, + "exclusiveMinimum": { + "type": "boolean", + "default": false + }, + "maxLength": { "$ref": "#/definitions/positiveInteger" }, + "minLength": { "$ref": "#/definitions/positiveIntegerDefault0" }, + "pattern": { + "type": "string", + "format": "regex" + }, + "additionalItems": { + "anyOf": [ + { "type": "boolean" }, + { "$ref": "#" } + ], + "default": {} + }, + "items": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/schemaArray" } + ], + "default": {} + }, + "maxItems": { "$ref": "#/definitions/positiveInteger" }, + "minItems": { "$ref": "#/definitions/positiveIntegerDefault0" }, + "uniqueItems": { + "type": "boolean", + "default": false + }, + "maxProperties": { "$ref": "#/definitions/positiveInteger" }, + "minProperties": { "$ref": "#/definitions/positiveIntegerDefault0" }, + "required": { "$ref": "#/definitions/stringArray" }, + "additionalProperties": { + "anyOf": [ + { "type": "boolean" }, + { "$ref": "#" } + ], + "default": {} + }, + "definitions": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "properties": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "patternProperties": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "dependencies": { + "type": "object", + "additionalProperties": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/stringArray" } + ] + } + }, + "enum": { + "type": "array", + "minItems": 1, + "uniqueItems": true + }, + "type": { + "anyOf": [ + { "$ref": "#/definitions/simpleTypes" }, + { + "type": "array", + "items": { "$ref": "#/definitions/simpleTypes" }, + "minItems": 1, + "uniqueItems": true + } + ] + }, + "format": { "type": "string" }, + "allOf": { "$ref": "#/definitions/schemaArray" }, + "anyOf": { "$ref": "#/definitions/schemaArray" }, + "oneOf": { "$ref": "#/definitions/schemaArray" }, + "not": { "$ref": "#" } + }, + "dependencies": { + "exclusiveMaximum": [ "maximum" ], + "exclusiveMinimum": [ "minimum" ] + }, + "default": {} +} diff --git a/.venv/lib/python3.12/site-packages/jsonschema_specifications/schemas/draft6/metaschema.json b/.venv/lib/python3.12/site-packages/jsonschema_specifications/schemas/draft6/metaschema.json new file mode 100644 index 0000000..a0d2bf7 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/jsonschema_specifications/schemas/draft6/metaschema.json @@ -0,0 +1,153 @@ +{ + "$schema": "http://json-schema.org/draft-06/schema#", + "$id": "http://json-schema.org/draft-06/schema#", + "title": "Core schema meta-schema", + "definitions": { + "schemaArray": { + "type": "array", + "minItems": 1, + "items": { "$ref": "#" } + }, + "nonNegativeInteger": { + "type": "integer", + "minimum": 0 + }, + "nonNegativeIntegerDefault0": { + "allOf": [ + { "$ref": "#/definitions/nonNegativeInteger" }, + { "default": 0 } + ] + }, + "simpleTypes": { + "enum": [ + "array", + "boolean", + "integer", + "null", + "number", + "object", + "string" + ] + }, + "stringArray": { + "type": "array", + "items": { "type": "string" }, + "uniqueItems": true, + "default": [] + } + }, + "type": ["object", "boolean"], + "properties": { + "$id": { + "type": "string", + "format": "uri-reference" + }, + "$schema": { + "type": "string", + "format": "uri" + }, + "$ref": { + "type": "string", + "format": "uri-reference" + }, + "title": { + "type": "string" + }, + "description": { + "type": "string" + }, + "default": {}, + "examples": { + "type": "array", + "items": {} + }, + "multipleOf": { + "type": "number", + "exclusiveMinimum": 0 + }, + "maximum": { + "type": "number" + }, + "exclusiveMaximum": { + "type": "number" + }, + "minimum": { + "type": "number" + }, + "exclusiveMinimum": { + "type": "number" + }, + "maxLength": { "$ref": "#/definitions/nonNegativeInteger" }, + "minLength": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, + "pattern": { + "type": "string", + "format": "regex" + }, + "additionalItems": { "$ref": "#" }, + "items": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/schemaArray" } + ], + "default": {} + }, + "maxItems": { "$ref": "#/definitions/nonNegativeInteger" }, + "minItems": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, + "uniqueItems": { + "type": "boolean", + "default": false + }, + "contains": { "$ref": "#" }, + "maxProperties": { "$ref": "#/definitions/nonNegativeInteger" }, + "minProperties": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, + "required": { "$ref": "#/definitions/stringArray" }, + "additionalProperties": { "$ref": "#" }, + "definitions": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "properties": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "patternProperties": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "propertyNames": { "format": "regex" }, + "default": {} + }, + "dependencies": { + "type": "object", + "additionalProperties": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/stringArray" } + ] + } + }, + "propertyNames": { "$ref": "#" }, + "const": {}, + "enum": { + "type": "array" + }, + "type": { + "anyOf": [ + { "$ref": "#/definitions/simpleTypes" }, + { + "type": "array", + "items": { "$ref": "#/definitions/simpleTypes" }, + "minItems": 1, + "uniqueItems": true + } + ] + }, + "format": { "type": "string" }, + "allOf": { "$ref": "#/definitions/schemaArray" }, + "anyOf": { "$ref": "#/definitions/schemaArray" }, + "oneOf": { "$ref": "#/definitions/schemaArray" }, + "not": { "$ref": "#" } + }, + "default": {} +} diff --git a/.venv/lib/python3.12/site-packages/jsonschema_specifications/schemas/draft7/metaschema.json b/.venv/lib/python3.12/site-packages/jsonschema_specifications/schemas/draft7/metaschema.json new file mode 100644 index 0000000..746cde9 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/jsonschema_specifications/schemas/draft7/metaschema.json @@ -0,0 +1,166 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "$id": "http://json-schema.org/draft-07/schema#", + "title": "Core schema meta-schema", + "definitions": { + "schemaArray": { + "type": "array", + "minItems": 1, + "items": { "$ref": "#" } + }, + "nonNegativeInteger": { + "type": "integer", + "minimum": 0 + }, + "nonNegativeIntegerDefault0": { + "allOf": [ + { "$ref": "#/definitions/nonNegativeInteger" }, + { "default": 0 } + ] + }, + "simpleTypes": { + "enum": [ + "array", + "boolean", + "integer", + "null", + "number", + "object", + "string" + ] + }, + "stringArray": { + "type": "array", + "items": { "type": "string" }, + "uniqueItems": true, + "default": [] + } + }, + "type": ["object", "boolean"], + "properties": { + "$id": { + "type": "string", + "format": "uri-reference" + }, + "$schema": { + "type": "string", + "format": "uri" + }, + "$ref": { + "type": "string", + "format": "uri-reference" + }, + "$comment": { + "type": "string" + }, + "title": { + "type": "string" + }, + "description": { + "type": "string" + }, + "default": true, + "readOnly": { + "type": "boolean", + "default": false + }, + "examples": { + "type": "array", + "items": true + }, + "multipleOf": { + "type": "number", + "exclusiveMinimum": 0 + }, + "maximum": { + "type": "number" + }, + "exclusiveMaximum": { + "type": "number" + }, + "minimum": { + "type": "number" + }, + "exclusiveMinimum": { + "type": "number" + }, + "maxLength": { "$ref": "#/definitions/nonNegativeInteger" }, + "minLength": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, + "pattern": { + "type": "string", + "format": "regex" + }, + "additionalItems": { "$ref": "#" }, + "items": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/schemaArray" } + ], + "default": true + }, + "maxItems": { "$ref": "#/definitions/nonNegativeInteger" }, + "minItems": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, + "uniqueItems": { + "type": "boolean", + "default": false + }, + "contains": { "$ref": "#" }, + "maxProperties": { "$ref": "#/definitions/nonNegativeInteger" }, + "minProperties": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, + "required": { "$ref": "#/definitions/stringArray" }, + "additionalProperties": { "$ref": "#" }, + "definitions": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "properties": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "patternProperties": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "propertyNames": { "format": "regex" }, + "default": {} + }, + "dependencies": { + "type": "object", + "additionalProperties": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/stringArray" } + ] + } + }, + "propertyNames": { "$ref": "#" }, + "const": true, + "enum": { + "type": "array", + "items": true + }, + "type": { + "anyOf": [ + { "$ref": "#/definitions/simpleTypes" }, + { + "type": "array", + "items": { "$ref": "#/definitions/simpleTypes" }, + "minItems": 1, + "uniqueItems": true + } + ] + }, + "format": { "type": "string" }, + "contentMediaType": { "type": "string" }, + "contentEncoding": { "type": "string" }, + "if": {"$ref": "#"}, + "then": {"$ref": "#"}, + "else": {"$ref": "#"}, + "allOf": { "$ref": "#/definitions/schemaArray" }, + "anyOf": { "$ref": "#/definitions/schemaArray" }, + "oneOf": { "$ref": "#/definitions/schemaArray" }, + "not": { "$ref": "#" } + }, + "default": true +} diff --git a/.venv/lib/python3.12/site-packages/jsonschema_specifications/tests/__init__.py b/.venv/lib/python3.12/site-packages/jsonschema_specifications/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/.venv/lib/python3.12/site-packages/jsonschema_specifications/tests/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/jsonschema_specifications/tests/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..43aa478 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/jsonschema_specifications/tests/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/jsonschema_specifications/tests/__pycache__/test_jsonschema_specifications.cpython-312.pyc b/.venv/lib/python3.12/site-packages/jsonschema_specifications/tests/__pycache__/test_jsonschema_specifications.cpython-312.pyc new file mode 100644 index 0000000..6f2c27f Binary files /dev/null and b/.venv/lib/python3.12/site-packages/jsonschema_specifications/tests/__pycache__/test_jsonschema_specifications.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/jsonschema_specifications/tests/test_jsonschema_specifications.py b/.venv/lib/python3.12/site-packages/jsonschema_specifications/tests/test_jsonschema_specifications.py new file mode 100644 index 0000000..fd2927e --- /dev/null +++ b/.venv/lib/python3.12/site-packages/jsonschema_specifications/tests/test_jsonschema_specifications.py @@ -0,0 +1,41 @@ +from collections.abc import Mapping +from pathlib import Path + +import pytest + +from jsonschema_specifications import REGISTRY + + +def test_it_contains_metaschemas(): + schema = REGISTRY.contents("http://json-schema.org/draft-07/schema#") + assert isinstance(schema, Mapping) + assert schema["$id"] == "http://json-schema.org/draft-07/schema#" + assert schema["title"] == "Core schema meta-schema" + + +def test_it_is_crawled(): + assert REGISTRY.crawl() == REGISTRY + + +@pytest.mark.parametrize( + "ignored_relative_path", + ["schemas/.DS_Store", "schemas/draft7/.DS_Store"], +) +def test_it_copes_with_dotfiles(ignored_relative_path): + """ + Ignore files like .DS_Store if someone has actually caused one to exist. + + We test here through the private interface as of course the global has + already loaded our schemas. + """ + + import jsonschema_specifications + + package = Path(jsonschema_specifications.__file__).parent + + ignored = package / ignored_relative_path + ignored.touch() + try: + list(jsonschema_specifications._schemas()) + finally: + ignored.unlink() diff --git a/.venv/lib/python3.12/site-packages/networkx-3.5.dist-info/INSTALLER b/.venv/lib/python3.12/site-packages/networkx-3.5.dist-info/INSTALLER new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx-3.5.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/.venv/lib/python3.12/site-packages/networkx-3.5.dist-info/METADATA b/.venv/lib/python3.12/site-packages/networkx-3.5.dist-info/METADATA new file mode 100644 index 0000000..c5075e1 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx-3.5.dist-info/METADATA @@ -0,0 +1,167 @@ +Metadata-Version: 2.4 +Name: networkx +Version: 3.5 +Summary: Python package for creating and manipulating graphs and networks +Author-email: Aric Hagberg +Maintainer-email: NetworkX Developers +Project-URL: Homepage, https://networkx.org/ +Project-URL: Bug Tracker, https://github.com/networkx/networkx/issues +Project-URL: Documentation, https://networkx.org/documentation/stable/ +Project-URL: Source Code, https://github.com/networkx/networkx +Keywords: Networks,Graph Theory,Mathematics,network,graph,discrete mathematics,math +Platform: Linux +Platform: Mac OSX +Platform: Windows +Platform: Unix +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: Intended Audience :: Science/Research +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: 3.13 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Topic :: Scientific/Engineering :: Bio-Informatics +Classifier: Topic :: Scientific/Engineering :: Information Analysis +Classifier: Topic :: Scientific/Engineering :: Mathematics +Classifier: Topic :: Scientific/Engineering :: Physics +Requires-Python: >=3.11 +Description-Content-Type: text/x-rst +License-File: LICENSE.txt +Provides-Extra: default +Requires-Dist: numpy>=1.25; extra == "default" +Requires-Dist: scipy>=1.11.2; extra == "default" +Requires-Dist: matplotlib>=3.8; extra == "default" +Requires-Dist: pandas>=2.0; extra == "default" +Provides-Extra: developer +Requires-Dist: pre-commit>=4.1; extra == "developer" +Requires-Dist: mypy>=1.15; extra == "developer" +Provides-Extra: doc +Requires-Dist: sphinx>=8.0; extra == "doc" +Requires-Dist: pydata-sphinx-theme>=0.16; extra == "doc" +Requires-Dist: sphinx-gallery>=0.18; extra == "doc" +Requires-Dist: numpydoc>=1.8.0; extra == "doc" +Requires-Dist: pillow>=10; extra == "doc" +Requires-Dist: texext>=0.6.7; extra == "doc" +Requires-Dist: myst-nb>=1.1; extra == "doc" +Requires-Dist: intersphinx-registry; extra == "doc" +Provides-Extra: example +Requires-Dist: osmnx>=2.0.0; extra == "example" +Requires-Dist: momepy>=0.7.2; extra == "example" +Requires-Dist: contextily>=1.6; extra == "example" +Requires-Dist: seaborn>=0.13; extra == "example" +Requires-Dist: cairocffi>=1.7; extra == "example" +Requires-Dist: igraph>=0.11; extra == "example" +Requires-Dist: scikit-learn>=1.5; extra == "example" +Provides-Extra: extra +Requires-Dist: lxml>=4.6; extra == "extra" +Requires-Dist: pygraphviz>=1.14; extra == "extra" +Requires-Dist: pydot>=3.0.1; extra == "extra" +Requires-Dist: sympy>=1.10; extra == "extra" +Provides-Extra: test +Requires-Dist: pytest>=7.2; extra == "test" +Requires-Dist: pytest-cov>=4.0; extra == "test" +Requires-Dist: pytest-xdist>=3.0; extra == "test" +Provides-Extra: test-extras +Requires-Dist: pytest-mpl; extra == "test-extras" +Requires-Dist: pytest-randomly; extra == "test-extras" +Dynamic: license-file + +NetworkX +======== + + +.. image:: + https://github.com/networkx/networkx/workflows/test/badge.svg?branch=main + :target: https://github.com/networkx/networkx/actions?query=workflow%3Atest + +.. image:: + https://codecov.io/gh/networkx/networkx/branch/main/graph/badge.svg? + :target: https://app.codecov.io/gh/networkx/networkx/branch/main + +.. image:: + https://img.shields.io/pypi/v/networkx.svg? + :target: https://pypi.python.org/pypi/networkx + +.. image:: + https://img.shields.io/pypi/l/networkx.svg? + :target: https://github.com/networkx/networkx/blob/main/LICENSE.txt + +.. image:: + https://img.shields.io/pypi/pyversions/networkx.svg? + :target: https://pypi.python.org/pypi/networkx + +.. image:: + https://img.shields.io/github/labels/networkx/networkx/good%20first%20issue?color=green&label=contribute + :target: https://github.com/networkx/networkx/contribute + + +NetworkX is a Python package for the creation, manipulation, +and study of the structure, dynamics, and functions +of complex networks. + +- **Website (including documentation):** https://networkx.org +- **Mailing list:** https://groups.google.com/forum/#!forum/networkx-discuss +- **Source:** https://github.com/networkx/networkx +- **Bug reports:** https://github.com/networkx/networkx/issues +- **Report a security vulnerability:** https://tidelift.com/security +- **Tutorial:** https://networkx.org/documentation/latest/tutorial.html +- **GitHub Discussions:** https://github.com/networkx/networkx/discussions +- **Discord (Scientific Python) invite link:** https://discord.com/invite/vur45CbwMz +- **NetworkX meetings calendar (open to all):** https://scientific-python.org/calendars/networkx.ics + +Simple example +-------------- + +Find the shortest path between two nodes in an undirected graph: + +.. code:: pycon + + >>> import networkx as nx + >>> G = nx.Graph() + >>> G.add_edge("A", "B", weight=4) + >>> G.add_edge("B", "D", weight=2) + >>> G.add_edge("A", "C", weight=3) + >>> G.add_edge("C", "D", weight=4) + >>> nx.shortest_path(G, "A", "D", weight="weight") + ['A', 'B', 'D'] + +Install +------- + +Install the latest released version of NetworkX: + +.. code:: shell + + $ pip install networkx + +Install with all optional dependencies: + +.. code:: shell + + $ pip install networkx[default] + +For additional details, +please see the `installation guide `_. + +Bugs +---- + +Please report any bugs that you find `here `_. +Or, even better, fork the repository on `GitHub `_ +and create a pull request (PR). We welcome all changes, big or small, and we +will help you make the PR if you are new to `git` (just ask on the issue and/or +see the `contributor guide `_). + +License +------- + +Released under the `3-clause BSD license `_:: + + Copyright (c) 2004-2025, NetworkX Developers + Aric Hagberg + Dan Schult + Pieter Swart diff --git a/.venv/lib/python3.12/site-packages/networkx-3.5.dist-info/RECORD b/.venv/lib/python3.12/site-packages/networkx-3.5.dist-info/RECORD new file mode 100644 index 0000000..6f09405 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx-3.5.dist-info/RECORD @@ -0,0 +1,1171 @@ +networkx-3.5.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +networkx-3.5.dist-info/METADATA,sha256=GLKCWKenBlMsxel1SaDxvrObp7KZwIWQQ2fEkw1Pi5Q,6336 +networkx-3.5.dist-info/RECORD,, +networkx-3.5.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx-3.5.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91 +networkx-3.5.dist-info/entry_points.txt,sha256=H2jZaDsDJ_i9H2SwWpwuFel8BrZ9xHKuvh-DQAWW9lQ,94 +networkx-3.5.dist-info/licenses/LICENSE.txt,sha256=PPfDoXnYF7I7i4VoIRdp_v35N5fSB9mEuLL6JNAiCzM,1763 +networkx-3.5.dist-info/top_level.txt,sha256=s3Mk-7KOlu-kD39w8Xg_KXoP5Z_MVvgB-upkyuOE4Hk,9 +networkx/__init__.py,sha256=KS06F7sBYlCI_rdSIwN4T0N3Yh5vyiNa76MmMHZOHyE,1272 +networkx/__pycache__/__init__.cpython-312.pyc,, +networkx/__pycache__/conftest.cpython-312.pyc,, +networkx/__pycache__/convert.cpython-312.pyc,, +networkx/__pycache__/convert_matrix.cpython-312.pyc,, +networkx/__pycache__/exception.cpython-312.pyc,, +networkx/__pycache__/lazy_imports.cpython-312.pyc,, +networkx/__pycache__/relabel.cpython-312.pyc,, +networkx/algorithms/__init__.py,sha256=oij1HDNcE7GhTPAtuHYT8eGZdH4K_vYaha51X5XoUCY,6559 +networkx/algorithms/__pycache__/__init__.cpython-312.pyc,, +networkx/algorithms/__pycache__/asteroidal.cpython-312.pyc,, +networkx/algorithms/__pycache__/boundary.cpython-312.pyc,, +networkx/algorithms/__pycache__/bridges.cpython-312.pyc,, +networkx/algorithms/__pycache__/broadcasting.cpython-312.pyc,, +networkx/algorithms/__pycache__/chains.cpython-312.pyc,, +networkx/algorithms/__pycache__/chordal.cpython-312.pyc,, +networkx/algorithms/__pycache__/clique.cpython-312.pyc,, +networkx/algorithms/__pycache__/cluster.cpython-312.pyc,, +networkx/algorithms/__pycache__/communicability_alg.cpython-312.pyc,, +networkx/algorithms/__pycache__/core.cpython-312.pyc,, +networkx/algorithms/__pycache__/covering.cpython-312.pyc,, +networkx/algorithms/__pycache__/cuts.cpython-312.pyc,, +networkx/algorithms/__pycache__/cycles.cpython-312.pyc,, +networkx/algorithms/__pycache__/d_separation.cpython-312.pyc,, +networkx/algorithms/__pycache__/dag.cpython-312.pyc,, +networkx/algorithms/__pycache__/distance_measures.cpython-312.pyc,, +networkx/algorithms/__pycache__/distance_regular.cpython-312.pyc,, +networkx/algorithms/__pycache__/dominance.cpython-312.pyc,, +networkx/algorithms/__pycache__/dominating.cpython-312.pyc,, +networkx/algorithms/__pycache__/efficiency_measures.cpython-312.pyc,, +networkx/algorithms/__pycache__/euler.cpython-312.pyc,, +networkx/algorithms/__pycache__/graph_hashing.cpython-312.pyc,, +networkx/algorithms/__pycache__/graphical.cpython-312.pyc,, +networkx/algorithms/__pycache__/hierarchy.cpython-312.pyc,, +networkx/algorithms/__pycache__/hybrid.cpython-312.pyc,, +networkx/algorithms/__pycache__/isolate.cpython-312.pyc,, +networkx/algorithms/__pycache__/link_prediction.cpython-312.pyc,, +networkx/algorithms/__pycache__/lowest_common_ancestors.cpython-312.pyc,, +networkx/algorithms/__pycache__/matching.cpython-312.pyc,, +networkx/algorithms/__pycache__/mis.cpython-312.pyc,, +networkx/algorithms/__pycache__/moral.cpython-312.pyc,, +networkx/algorithms/__pycache__/node_classification.cpython-312.pyc,, +networkx/algorithms/__pycache__/non_randomness.cpython-312.pyc,, +networkx/algorithms/__pycache__/planar_drawing.cpython-312.pyc,, +networkx/algorithms/__pycache__/planarity.cpython-312.pyc,, +networkx/algorithms/__pycache__/polynomials.cpython-312.pyc,, +networkx/algorithms/__pycache__/reciprocity.cpython-312.pyc,, +networkx/algorithms/__pycache__/regular.cpython-312.pyc,, +networkx/algorithms/__pycache__/richclub.cpython-312.pyc,, +networkx/algorithms/__pycache__/similarity.cpython-312.pyc,, +networkx/algorithms/__pycache__/simple_paths.cpython-312.pyc,, +networkx/algorithms/__pycache__/smallworld.cpython-312.pyc,, +networkx/algorithms/__pycache__/smetric.cpython-312.pyc,, +networkx/algorithms/__pycache__/sparsifiers.cpython-312.pyc,, +networkx/algorithms/__pycache__/structuralholes.cpython-312.pyc,, +networkx/algorithms/__pycache__/summarization.cpython-312.pyc,, +networkx/algorithms/__pycache__/swap.cpython-312.pyc,, +networkx/algorithms/__pycache__/threshold.cpython-312.pyc,, +networkx/algorithms/__pycache__/time_dependent.cpython-312.pyc,, +networkx/algorithms/__pycache__/tournament.cpython-312.pyc,, +networkx/algorithms/__pycache__/triads.cpython-312.pyc,, +networkx/algorithms/__pycache__/vitality.cpython-312.pyc,, +networkx/algorithms/__pycache__/voronoi.cpython-312.pyc,, +networkx/algorithms/__pycache__/walks.cpython-312.pyc,, +networkx/algorithms/__pycache__/wiener.cpython-312.pyc,, +networkx/algorithms/approximation/__init__.py,sha256=1W0c3YlSfVQtNI4-WiQdzMfPqmcrCar5hJQmrwcsVoI,1234 +networkx/algorithms/approximation/__pycache__/__init__.cpython-312.pyc,, +networkx/algorithms/approximation/__pycache__/clique.cpython-312.pyc,, +networkx/algorithms/approximation/__pycache__/clustering_coefficient.cpython-312.pyc,, +networkx/algorithms/approximation/__pycache__/connectivity.cpython-312.pyc,, +networkx/algorithms/approximation/__pycache__/density.cpython-312.pyc,, +networkx/algorithms/approximation/__pycache__/distance_measures.cpython-312.pyc,, +networkx/algorithms/approximation/__pycache__/dominating_set.cpython-312.pyc,, +networkx/algorithms/approximation/__pycache__/kcomponents.cpython-312.pyc,, +networkx/algorithms/approximation/__pycache__/matching.cpython-312.pyc,, +networkx/algorithms/approximation/__pycache__/maxcut.cpython-312.pyc,, +networkx/algorithms/approximation/__pycache__/ramsey.cpython-312.pyc,, +networkx/algorithms/approximation/__pycache__/steinertree.cpython-312.pyc,, +networkx/algorithms/approximation/__pycache__/traveling_salesman.cpython-312.pyc,, +networkx/algorithms/approximation/__pycache__/treewidth.cpython-312.pyc,, +networkx/algorithms/approximation/__pycache__/vertex_cover.cpython-312.pyc,, +networkx/algorithms/approximation/clique.py,sha256=b4cnWMJXmmgCyjMI8A_doHZeKS_RQbGqm2L01OpT_Jg,7691 +networkx/algorithms/approximation/clustering_coefficient.py,sha256=SWpSLEhW3DJc1n2fHlSbJSGg3wdoJkN5Y4_tnntn0Ws,2164 +networkx/algorithms/approximation/connectivity.py,sha256=aVXSfUiWEG4gUL0R1u6WZ-h-wheuLP1_suO_pRFB8M4,13118 +networkx/algorithms/approximation/density.py,sha256=hJ26O4rmyXLdsdbekoKVDhXSL1MQWIC0OU4bpqLNnsI,15250 +networkx/algorithms/approximation/distance_measures.py,sha256=UEkmKagNw9sj8kiUDdbAeYuzvZ31pgLMXqzliqMkG84,5805 +networkx/algorithms/approximation/dominating_set.py,sha256=5fC90w1CgYR4Xkpqact8iukKY0i57bMmyJW-A9CToUQ,4710 +networkx/algorithms/approximation/kcomponents.py,sha256=vNXQ2wSSYgNafDD3aHCskxvkZr5k6rEsKmxtTZODI5s,13259 +networkx/algorithms/approximation/matching.py,sha256=PFof5m9AIq9Xr5Kaa_-mYxI1IBBP7HEkjf-R9wVE3bo,1175 +networkx/algorithms/approximation/maxcut.py,sha256=eTQZqsDQAAUaufni-aDJAY2UzIcajDhRMdj-AcqVkPs,4333 +networkx/algorithms/approximation/ramsey.py,sha256=W5tX7BOQJIM_qNsBeUhCXVWMD8DFdeTycYyk08k4Sqk,1358 +networkx/algorithms/approximation/steinertree.py,sha256=nK2Icj54NLisVbOZmwqyxD-eI-D40iU2tHT_o4_m7T4,8763 +networkx/algorithms/approximation/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/approximation/tests/__pycache__/__init__.cpython-312.pyc,, +networkx/algorithms/approximation/tests/__pycache__/test_approx_clust_coeff.cpython-312.pyc,, +networkx/algorithms/approximation/tests/__pycache__/test_clique.cpython-312.pyc,, +networkx/algorithms/approximation/tests/__pycache__/test_connectivity.cpython-312.pyc,, +networkx/algorithms/approximation/tests/__pycache__/test_density.cpython-312.pyc,, +networkx/algorithms/approximation/tests/__pycache__/test_distance_measures.cpython-312.pyc,, +networkx/algorithms/approximation/tests/__pycache__/test_dominating_set.cpython-312.pyc,, +networkx/algorithms/approximation/tests/__pycache__/test_kcomponents.cpython-312.pyc,, +networkx/algorithms/approximation/tests/__pycache__/test_matching.cpython-312.pyc,, +networkx/algorithms/approximation/tests/__pycache__/test_maxcut.cpython-312.pyc,, +networkx/algorithms/approximation/tests/__pycache__/test_ramsey.cpython-312.pyc,, +networkx/algorithms/approximation/tests/__pycache__/test_steinertree.cpython-312.pyc,, +networkx/algorithms/approximation/tests/__pycache__/test_traveling_salesman.cpython-312.pyc,, +networkx/algorithms/approximation/tests/__pycache__/test_treewidth.cpython-312.pyc,, +networkx/algorithms/approximation/tests/__pycache__/test_vertex_cover.cpython-312.pyc,, +networkx/algorithms/approximation/tests/test_approx_clust_coeff.py,sha256=PGOVEKf2BcJu1vvjZrgTlBBpwM8V6t7yCANjyS9nWF0,1171 +networkx/algorithms/approximation/tests/test_clique.py,sha256=s6HQB-lK3RAu_ftpe2NvIiMu0Ol8tpAdbGvWzucNL6k,3021 +networkx/algorithms/approximation/tests/test_connectivity.py,sha256=gDG6tsgP3ux7Dgu0x7r0nso7_yknIxicV42Gq0It5pc,5952 +networkx/algorithms/approximation/tests/test_density.py,sha256=GeaxHGvO21g_nQTNY4o8KtXIPOcCEluO3_vh6SbJDUA,5008 +networkx/algorithms/approximation/tests/test_distance_measures.py,sha256=axgOojplJIgXdopgkjxjAgvzGTQ1FV1oJ5NG-7ICalo,2023 +networkx/algorithms/approximation/tests/test_dominating_set.py,sha256=l4pBDY7pK7Fxw-S4tOlNcxf-j2j5GpHPJ9f4TrMs1sI,2686 +networkx/algorithms/approximation/tests/test_kcomponents.py,sha256=tTljP1FHzXrUwi-oBz5AQcibRw1NgR4N5UE0a2OrOUA,9346 +networkx/algorithms/approximation/tests/test_matching.py,sha256=nitZncaM0605kaIu1NO6_5TFV2--nohUCO46XTD_lnM,186 +networkx/algorithms/approximation/tests/test_maxcut.py,sha256=U6CDZFSLfYDII-1nX9XB7avSz10kTx88vNazJFoLQ1k,2804 +networkx/algorithms/approximation/tests/test_ramsey.py,sha256=h36Ol39csHbIoTDBxbxMgn4371iVUGZ3a2N6l7d56lI,1143 +networkx/algorithms/approximation/tests/test_steinertree.py,sha256=-K1N4IxQS3TTJ82lnKvhO2nvJeBV13pWQ1ffCQ5x8MQ,9675 +networkx/algorithms/approximation/tests/test_traveling_salesman.py,sha256=Ssqu-gFtqJ70RjPN27svLdsKOk0ODfsZCpXx7CSBu3A,31911 +networkx/algorithms/approximation/tests/test_treewidth.py,sha256=b_79ZKiW0XX24-GYaeQJ9Zaq7ZFYQT0DcDTkACII3EY,8868 +networkx/algorithms/approximation/tests/test_vertex_cover.py,sha256=FobHNhG9CAMeB_AOEprUs-7XQdPoc1YvfmXhozDZ8pM,1942 +networkx/algorithms/approximation/traveling_salesman.py,sha256=HZU6dbPo3Hiz2-Z3QHLFm5kdGBQwkXPefkVUuW-IC2A,56210 +networkx/algorithms/approximation/treewidth.py,sha256=hfLiPlheQMN7MG6CGR5w7AscKbLu3pTLNYRS13SO0Xo,8389 +networkx/algorithms/approximation/vertex_cover.py,sha256=oIi_yg5O-IisnfmrSof1P4HD-fsZpW69RpvkR_SM5Og,2803 +networkx/algorithms/assortativity/__init__.py,sha256=ov3HRRbeYB_6Qezvxp1OTl77GBpw-EWkWGUzgfT8G9c,294 +networkx/algorithms/assortativity/__pycache__/__init__.cpython-312.pyc,, +networkx/algorithms/assortativity/__pycache__/connectivity.cpython-312.pyc,, +networkx/algorithms/assortativity/__pycache__/correlation.cpython-312.pyc,, +networkx/algorithms/assortativity/__pycache__/mixing.cpython-312.pyc,, +networkx/algorithms/assortativity/__pycache__/neighbor_degree.cpython-312.pyc,, +networkx/algorithms/assortativity/__pycache__/pairs.cpython-312.pyc,, +networkx/algorithms/assortativity/connectivity.py,sha256=-V0C5MTqtErl86N-gyrZ487MUyiG5x1QFEZKurOpIJA,4220 +networkx/algorithms/assortativity/correlation.py,sha256=0rc4FDi-e8eQRia7gpFrTqjIy-J7V2GtSwOb4QN6WZk,8689 +networkx/algorithms/assortativity/mixing.py,sha256=RRqqkuVwo71LosJLDbeVCVBikqC7I_XZORdsonQsf9Y,7586 +networkx/algorithms/assortativity/neighbor_degree.py,sha256=UMaQWKBkOZ0ZgC8xGt5fXEz8OL1rgwYjt2zKbKEqofI,5282 +networkx/algorithms/assortativity/pairs.py,sha256=w7xnaWxDDteluHoCsqunLlcM6nlcBenO_5Nz87oOEnE,3841 +networkx/algorithms/assortativity/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/assortativity/tests/__pycache__/__init__.cpython-312.pyc,, +networkx/algorithms/assortativity/tests/__pycache__/base_test.cpython-312.pyc,, +networkx/algorithms/assortativity/tests/__pycache__/test_connectivity.cpython-312.pyc,, +networkx/algorithms/assortativity/tests/__pycache__/test_correlation.cpython-312.pyc,, +networkx/algorithms/assortativity/tests/__pycache__/test_mixing.cpython-312.pyc,, +networkx/algorithms/assortativity/tests/__pycache__/test_neighbor_degree.cpython-312.pyc,, +networkx/algorithms/assortativity/tests/__pycache__/test_pairs.cpython-312.pyc,, +networkx/algorithms/assortativity/tests/base_test.py,sha256=MNeQMLA3oBUCM8TSyNbBQ_uW0nDc1GEZYdNdUwePAm4,2651 +networkx/algorithms/assortativity/tests/test_connectivity.py,sha256=Js841GQLYTLWvc6xZhnyqj-JtyrnS0ska1TFYntxyXA,4978 +networkx/algorithms/assortativity/tests/test_correlation.py,sha256=ddx-yqnVcOfx1dKVNUF695hS3Q-zCmFmCGzK64B7YSE,5068 +networkx/algorithms/assortativity/tests/test_mixing.py,sha256=1kkiMoQXslY-VnT1j00mFbRdj75A4d1b6OPTUOJVgaY,6802 +networkx/algorithms/assortativity/tests/test_neighbor_degree.py,sha256=wphbir1e-h-BAq5rjvWBi4WlgWdseyQbh_KLGQvy5Pc,3934 +networkx/algorithms/assortativity/tests/test_pairs.py,sha256=t05qP_-gfkbiR6aTLtE1owYl9otBSsuJcRkuZsa63UQ,3008 +networkx/algorithms/asteroidal.py,sha256=hIGg9zI4vylRXkrY5wHverTSOeK6dt1Gn2T_JYvGNnU,5500 +networkx/algorithms/bipartite/__init__.py,sha256=FZug-pg0FUDgAdysnKXxDpi1ly8ezxf4UxBPRklqjys,3883 +networkx/algorithms/bipartite/__pycache__/__init__.cpython-312.pyc,, +networkx/algorithms/bipartite/__pycache__/basic.cpython-312.pyc,, +networkx/algorithms/bipartite/__pycache__/centrality.cpython-312.pyc,, +networkx/algorithms/bipartite/__pycache__/cluster.cpython-312.pyc,, +networkx/algorithms/bipartite/__pycache__/covering.cpython-312.pyc,, +networkx/algorithms/bipartite/__pycache__/edgelist.cpython-312.pyc,, +networkx/algorithms/bipartite/__pycache__/extendability.cpython-312.pyc,, +networkx/algorithms/bipartite/__pycache__/generators.cpython-312.pyc,, +networkx/algorithms/bipartite/__pycache__/link_analysis.cpython-312.pyc,, +networkx/algorithms/bipartite/__pycache__/matching.cpython-312.pyc,, +networkx/algorithms/bipartite/__pycache__/matrix.cpython-312.pyc,, +networkx/algorithms/bipartite/__pycache__/projection.cpython-312.pyc,, +networkx/algorithms/bipartite/__pycache__/redundancy.cpython-312.pyc,, +networkx/algorithms/bipartite/__pycache__/spectral.cpython-312.pyc,, +networkx/algorithms/bipartite/basic.py,sha256=JPC2gGuPvFA6q2CuI5mqLX_9QUGxrsQ8cIwcS0e9P4U,8375 +networkx/algorithms/bipartite/centrality.py,sha256=G280bAqeyXyCmes5NpRqUv2Tc-EHWrMshJ3_f4uqV9U,9156 +networkx/algorithms/bipartite/cluster.py,sha256=8aZRmlQ3g0XtzHyF1kUBBwnzMSjtduquHDH8MxKNSEI,7346 +networkx/algorithms/bipartite/covering.py,sha256=B3ITc016Kk70NBv-1lb30emXnfjlMIQJ7M-FIPCZip0,2163 +networkx/algorithms/bipartite/edgelist.py,sha256=mmiM2Bvh9CxRKCsspbs-90GZedpt7Gj5AGzvJ-aYDSM,11409 +networkx/algorithms/bipartite/extendability.py,sha256=OrYHlS4ruQST-dlQOuleiqHFKpVVNOvrG5aDNFgfckg,3989 +networkx/algorithms/bipartite/generators.py,sha256=p0xgyuJ5hp52NYPu2ryPEKZ0MEktP5VSW2HMK2VdBCo,20408 +networkx/algorithms/bipartite/link_analysis.py,sha256=eVRRQgwzMcUPPu6ccskPk72yc_lmnF5EGqNIdXe-MxA,12772 +networkx/algorithms/bipartite/matching.py,sha256=wblV_kyE7G8SHnsD5EYhOOmidrKkePHSyVLE0fn8eE0,21631 +networkx/algorithms/bipartite/matrix.py,sha256=tv-paQTsB_XYgZmtdNTdAhskgKfFeDAyDIGic0d6WVI,6164 +networkx/algorithms/bipartite/projection.py,sha256=YIUlreqQQ6IPE37OXF32zNIdzEGeyR8aY-7iUENZYVA,17252 +networkx/algorithms/bipartite/redundancy.py,sha256=Mnkz0LbNXS0haxtLQ5naorR6C2tNLUbkNS_3PANFxbg,3402 +networkx/algorithms/bipartite/spectral.py,sha256=fu2grV1the_e_G-e_lUdhk8Y9XFe6_p2tPmx3RKntFw,1902 +networkx/algorithms/bipartite/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/bipartite/tests/__pycache__/__init__.cpython-312.pyc,, +networkx/algorithms/bipartite/tests/__pycache__/test_basic.cpython-312.pyc,, +networkx/algorithms/bipartite/tests/__pycache__/test_centrality.cpython-312.pyc,, +networkx/algorithms/bipartite/tests/__pycache__/test_cluster.cpython-312.pyc,, +networkx/algorithms/bipartite/tests/__pycache__/test_covering.cpython-312.pyc,, +networkx/algorithms/bipartite/tests/__pycache__/test_edgelist.cpython-312.pyc,, +networkx/algorithms/bipartite/tests/__pycache__/test_extendability.cpython-312.pyc,, +networkx/algorithms/bipartite/tests/__pycache__/test_generators.cpython-312.pyc,, +networkx/algorithms/bipartite/tests/__pycache__/test_link_analysis.cpython-312.pyc,, +networkx/algorithms/bipartite/tests/__pycache__/test_matching.cpython-312.pyc,, +networkx/algorithms/bipartite/tests/__pycache__/test_matrix.cpython-312.pyc,, +networkx/algorithms/bipartite/tests/__pycache__/test_project.cpython-312.pyc,, +networkx/algorithms/bipartite/tests/__pycache__/test_redundancy.cpython-312.pyc,, +networkx/algorithms/bipartite/tests/__pycache__/test_spectral_bipartivity.cpython-312.pyc,, +networkx/algorithms/bipartite/tests/test_basic.py,sha256=gzbtsQqPi85BznX5REdGBBJVyr9aH4nO06c3eEI4634,4291 +networkx/algorithms/bipartite/tests/test_centrality.py,sha256=PABPbrIyoAziEEQKXsZLl2jT36N8DZpNRzEO-jeu89Y,6362 +networkx/algorithms/bipartite/tests/test_cluster.py,sha256=O0VsPVt8vcY_E1FjjLJX2xaUbhVViI5MP6_gLTbEpos,2801 +networkx/algorithms/bipartite/tests/test_covering.py,sha256=EGVxYQsyLXE5yY5N5u6D4wZq2NcZe9OwlYpEuY6DF3o,1221 +networkx/algorithms/bipartite/tests/test_edgelist.py,sha256=fK35tSekG_-9Ewr5Bhl1bRdwAy247Z9zZ4dQFFDQ9xw,8471 +networkx/algorithms/bipartite/tests/test_extendability.py,sha256=XgPmg6bWiHAF1iQ75_r2NqUxExOQNZRUeYUPzlCa5-E,7043 +networkx/algorithms/bipartite/tests/test_generators.py,sha256=BehRU6SQnWzKsAFoshrN2vpxcPByLAViofGeq38v23E,13203 +networkx/algorithms/bipartite/tests/test_link_analysis.py,sha256=vJMOtYG0vxYQCif_ztnYTUm_13gApfzFiNxChRefONg,6914 +networkx/algorithms/bipartite/tests/test_matching.py,sha256=3-2DMl3tF-g4_xNHvEuY4fZW7S5cqMTO_GUpcz1gkeQ,11973 +networkx/algorithms/bipartite/tests/test_matrix.py,sha256=8-pFysS6LaHJR7IwmQt4JSSd4YGxQVdZbaouZTdg-AE,3052 +networkx/algorithms/bipartite/tests/test_project.py,sha256=FBjkys3JYYzEG4aq_CsQrtm41edZibWI_uDAQ0b4wqM,15134 +networkx/algorithms/bipartite/tests/test_redundancy.py,sha256=utxcrQaTrkcEN3kqtObgKNpLZai8B5sMAqLyXatOuUo,917 +networkx/algorithms/bipartite/tests/test_spectral_bipartivity.py,sha256=1jGDgrIx3-TWOCNMSC4zxmZa7LHyMU69DXh3h12Bjag,2358 +networkx/algorithms/boundary.py,sha256=q3JtWssmn9yCB2mBdkjKZjkaxmBhkG9_dJOzmuJiQos,5339 +networkx/algorithms/bridges.py,sha256=CsxueHDOB9aFM5D8GP83u1ZKGzxF193XBpvmMReAcQk,6066 +networkx/algorithms/broadcasting.py,sha256=Oe13TkT7sL-q0FYsHhTgkh6y3O-DYQShWPkvQjroLaA,4892 +networkx/algorithms/centrality/__init__.py,sha256=Er3YoYoj76UfY4P6I0L-0fCQkO7mMU0b3NLsTT2RGWI,558 +networkx/algorithms/centrality/__pycache__/__init__.cpython-312.pyc,, +networkx/algorithms/centrality/__pycache__/betweenness.cpython-312.pyc,, +networkx/algorithms/centrality/__pycache__/betweenness_subset.cpython-312.pyc,, +networkx/algorithms/centrality/__pycache__/closeness.cpython-312.pyc,, +networkx/algorithms/centrality/__pycache__/current_flow_betweenness.cpython-312.pyc,, +networkx/algorithms/centrality/__pycache__/current_flow_betweenness_subset.cpython-312.pyc,, +networkx/algorithms/centrality/__pycache__/current_flow_closeness.cpython-312.pyc,, +networkx/algorithms/centrality/__pycache__/degree_alg.cpython-312.pyc,, +networkx/algorithms/centrality/__pycache__/dispersion.cpython-312.pyc,, +networkx/algorithms/centrality/__pycache__/eigenvector.cpython-312.pyc,, +networkx/algorithms/centrality/__pycache__/flow_matrix.cpython-312.pyc,, +networkx/algorithms/centrality/__pycache__/group.cpython-312.pyc,, +networkx/algorithms/centrality/__pycache__/harmonic.cpython-312.pyc,, +networkx/algorithms/centrality/__pycache__/katz.cpython-312.pyc,, +networkx/algorithms/centrality/__pycache__/laplacian.cpython-312.pyc,, +networkx/algorithms/centrality/__pycache__/load.cpython-312.pyc,, +networkx/algorithms/centrality/__pycache__/percolation.cpython-312.pyc,, +networkx/algorithms/centrality/__pycache__/reaching.cpython-312.pyc,, +networkx/algorithms/centrality/__pycache__/second_order.cpython-312.pyc,, +networkx/algorithms/centrality/__pycache__/subgraph_alg.cpython-312.pyc,, +networkx/algorithms/centrality/__pycache__/trophic.cpython-312.pyc,, +networkx/algorithms/centrality/__pycache__/voterank_alg.cpython-312.pyc,, +networkx/algorithms/centrality/betweenness.py,sha256=rAzhHNFWjhYssmONVatlbObY0jajBuyjbhVbm0hhk98,16174 +networkx/algorithms/centrality/betweenness_subset.py,sha256=mkVJdEmR1G8kFoS-KN-jwhUyR_CUiB8DXneGqsqyB6U,9336 +networkx/algorithms/centrality/closeness.py,sha256=ehkntG-gApT9uhWJjGaEZQ-tEQ-hdxDT7luf-uVPNAE,10281 +networkx/algorithms/centrality/current_flow_betweenness.py,sha256=zZRqgrB06uDzgwWJ_FLUF3DSrgkER1tvakYZHX8DbSY,11848 +networkx/algorithms/centrality/current_flow_betweenness_subset.py,sha256=2qtLgf_3ft5qdDvHFrfYUt6zeQi42Nw7XBpSZRboJIA,8107 +networkx/algorithms/centrality/current_flow_closeness.py,sha256=IvecI8BZE4SgKayEXhKowIJw7S2fD_dN__N-f9TW-ME,3327 +networkx/algorithms/centrality/degree_alg.py,sha256=40KCApq4EM3UNgHH_v81V-i_mpzMVVn_Xwf-zUjS_Ic,3900 +networkx/algorithms/centrality/dispersion.py,sha256=M12L2KiVPrC2-SyCXMF0kvxLelgcmvXJkLT_cBHoCTw,3631 +networkx/algorithms/centrality/eigenvector.py,sha256=EK_xrg-eObg1CusFbL35IitLVD6OTQKgMOVDsqqhbJs,13625 +networkx/algorithms/centrality/flow_matrix.py,sha256=Y65m6VbWyYjNK0CInE_lufyEkKy9-TyPmBeXb-Gkz70,3834 +networkx/algorithms/centrality/group.py,sha256=TLlK2eWlcMX7Lvbe2wAcZrmZ9LLTvbRCz-3RbXF0Zug,27960 +networkx/algorithms/centrality/harmonic.py,sha256=jijzEWvT5PPAD0Qos-IATrPJjnC7yV_VHWVwJvpUDcU,2849 +networkx/algorithms/centrality/katz.py,sha256=vncFnHK1uGoaNtGaHEeycNwcVRo5CEg6qrZQR_y4GwA,11044 +networkx/algorithms/centrality/laplacian.py,sha256=uHtZrTExKI4WYSwaiBNzWD8WsHPilSySnWpVubt-Vlo,5556 +networkx/algorithms/centrality/load.py,sha256=M2EdPX4gJEYGjMBIJMFKRWGI9uYHbFOWYxsILeaJuOE,6859 +networkx/algorithms/centrality/percolation.py,sha256=YJB8iYgbpjJ3EYK8pl26iSnjgfFsK31ufytRHnUTYYE,4419 +networkx/algorithms/centrality/reaching.py,sha256=Q9rda_dqXBfVaHOi8FgZSYNHKpJ0SHfCbX_myMsIe2I,7255 +networkx/algorithms/centrality/second_order.py,sha256=4CTboP95B6gUtAtSKLfeeE4s9oq0_3hXsXczxL6c_g8,5012 +networkx/algorithms/centrality/subgraph_alg.py,sha256=S8TK26_yisW3hyyLK-z8GHxGZRZRX8I07-4UHJCVw68,9515 +networkx/algorithms/centrality/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/centrality/tests/__pycache__/__init__.cpython-312.pyc,, +networkx/algorithms/centrality/tests/__pycache__/test_betweenness_centrality.cpython-312.pyc,, +networkx/algorithms/centrality/tests/__pycache__/test_betweenness_centrality_subset.cpython-312.pyc,, +networkx/algorithms/centrality/tests/__pycache__/test_closeness_centrality.cpython-312.pyc,, +networkx/algorithms/centrality/tests/__pycache__/test_current_flow_betweenness_centrality.cpython-312.pyc,, +networkx/algorithms/centrality/tests/__pycache__/test_current_flow_betweenness_centrality_subset.cpython-312.pyc,, +networkx/algorithms/centrality/tests/__pycache__/test_current_flow_closeness.cpython-312.pyc,, +networkx/algorithms/centrality/tests/__pycache__/test_degree_centrality.cpython-312.pyc,, +networkx/algorithms/centrality/tests/__pycache__/test_dispersion.cpython-312.pyc,, +networkx/algorithms/centrality/tests/__pycache__/test_eigenvector_centrality.cpython-312.pyc,, +networkx/algorithms/centrality/tests/__pycache__/test_group.cpython-312.pyc,, +networkx/algorithms/centrality/tests/__pycache__/test_harmonic_centrality.cpython-312.pyc,, +networkx/algorithms/centrality/tests/__pycache__/test_katz_centrality.cpython-312.pyc,, +networkx/algorithms/centrality/tests/__pycache__/test_laplacian_centrality.cpython-312.pyc,, +networkx/algorithms/centrality/tests/__pycache__/test_load_centrality.cpython-312.pyc,, +networkx/algorithms/centrality/tests/__pycache__/test_percolation_centrality.cpython-312.pyc,, +networkx/algorithms/centrality/tests/__pycache__/test_reaching.cpython-312.pyc,, +networkx/algorithms/centrality/tests/__pycache__/test_second_order_centrality.cpython-312.pyc,, +networkx/algorithms/centrality/tests/__pycache__/test_subgraph.cpython-312.pyc,, +networkx/algorithms/centrality/tests/__pycache__/test_trophic.cpython-312.pyc,, +networkx/algorithms/centrality/tests/__pycache__/test_voterank.cpython-312.pyc,, +networkx/algorithms/centrality/tests/test_betweenness_centrality.py,sha256=F20YqgwdatTm9d62PJBmqB25I1d1bAr_1jxmLsmt-fE,32505 +networkx/algorithms/centrality/tests/test_betweenness_centrality_subset.py,sha256=HrHMcgOL69Z6y679SbqZIjkQOnqrYSz24gt17AJ9q-o,12554 +networkx/algorithms/centrality/tests/test_closeness_centrality.py,sha256=-LtG4ex192Xjgy4JCTfdjdJ3Cd9Op-3XnVvG2GA3FWQ,8728 +networkx/algorithms/centrality/tests/test_current_flow_betweenness_centrality.py,sha256=VOxx1A7iSGtdEbzJYea_sW_Hv0S71-oo1CVX7Rqd5RY,7870 +networkx/algorithms/centrality/tests/test_current_flow_betweenness_centrality_subset.py,sha256=JfRGgPuiF-vJu5fc2_pcJYREEboxcK_dmy-np39c4Aw,5839 +networkx/algorithms/centrality/tests/test_current_flow_closeness.py,sha256=vflQeoNKngrGUiRb3XNlm2X9wR4vKgMSW_sCyMUCQi8,1379 +networkx/algorithms/centrality/tests/test_degree_centrality.py,sha256=Jn_p5lThA3__ZBTDAORwo_EchjXKKkK1NwU_73HHI6M,4101 +networkx/algorithms/centrality/tests/test_dispersion.py,sha256=ROgl_5bGhcNXonNW3ylsvUcA0NCwynsQu_scic371Gw,1959 +networkx/algorithms/centrality/tests/test_eigenvector_centrality.py,sha256=ChzH0SU3nQnUenvzRGr3BIJzZ9cNl8OfPV9EVUM2zyc,5258 +networkx/algorithms/centrality/tests/test_group.py,sha256=833ME4tGlOGQZz8YANw4MSyeVPpjbyCdYh5X88GOprw,8685 +networkx/algorithms/centrality/tests/test_harmonic_centrality.py,sha256=wI7nStX_kIFJoZQY_i8DXXlZBOJzVnQfOP8yidX0PAU,3867 +networkx/algorithms/centrality/tests/test_katz_centrality.py,sha256=e0XWHIDw056SV5i2PFiHzfPF2DFauW10S17TuvI1UEs,11242 +networkx/algorithms/centrality/tests/test_laplacian_centrality.py,sha256=9Nd9CfiCn2908BgRZ-cQiMQjpOFDu4Bftod1didWyCE,5898 +networkx/algorithms/centrality/tests/test_load_centrality.py,sha256=Vv3zSW89iELN-8KNbUclmkhOe1LzKdF7U_w34nYovIo,11343 +networkx/algorithms/centrality/tests/test_percolation_centrality.py,sha256=ycQ1fvEZZcWAfqL11urT7yHiEP77usJDSG25OQiDM2s,2591 +networkx/algorithms/centrality/tests/test_reaching.py,sha256=_JVeO1Ri-KybdnGCJ_yNPtJQmT_g77z0DAkU0JYFVGQ,5090 +networkx/algorithms/centrality/tests/test_second_order_centrality.py,sha256=ce0wQ4T33lu23wskzGUnBS7X4BSODlvAX1S5KxlLzOA,1999 +networkx/algorithms/centrality/tests/test_subgraph.py,sha256=vhE9Uh-_Hlk49k-ny6ORHCgqk7LWH8OHIYOEYM96uz0,3729 +networkx/algorithms/centrality/tests/test_trophic.py,sha256=mt--0AUc_8qez2SjauEHVnetC3DMwMAlLME6kgb8Lc0,8796 +networkx/algorithms/centrality/tests/test_voterank.py,sha256=tN5u7pKAnJ_4AiwhPW6EuJZz7FLIG2jYqLKcXFi2urk,1687 +networkx/algorithms/centrality/trophic.py,sha256=7mpFrpgQhwP3Ad1plpJu1WzTGR1YWrIp_SUhM0D8Zww,5328 +networkx/algorithms/centrality/voterank_alg.py,sha256=z_1eq8rSDadEO5W5BbAg1zuOJj2di4FUCkmOwiuK12I,3231 +networkx/algorithms/chains.py,sha256=PPiSq5-GsT1Lsf8fwtGwGDVf1hhv5ZLariWtfzkBbAw,6968 +networkx/algorithms/chordal.py,sha256=AX0dPE-w4G28kMJzEhPYZX1ZP1BdNjxvptsmJ9YFPL0,13403 +networkx/algorithms/clique.py,sha256=-nuPwjoGMJqgHZjX5dJux0mX508bDNEuiOiY7uPVuR4,25985 +networkx/algorithms/cluster.py,sha256=IRfxYlHlWtrjR8QRji-ChHHXvc29Wm8c3WjwT3pl4So,22511 +networkx/algorithms/coloring/__init__.py,sha256=P1cmqrAjcaCdObkNZ1e6Hp__ZpxBAhQx0iIipOVW8jg,182 +networkx/algorithms/coloring/__pycache__/__init__.cpython-312.pyc,, +networkx/algorithms/coloring/__pycache__/equitable_coloring.cpython-312.pyc,, +networkx/algorithms/coloring/__pycache__/greedy_coloring.cpython-312.pyc,, +networkx/algorithms/coloring/equitable_coloring.py,sha256=uDcza6PD9qbvwVPUX1MBZbopQdrAEKNk6DpCFkc02tU,16315 +networkx/algorithms/coloring/greedy_coloring.py,sha256=GLXbwSvitsQgmxtOsbgUt4DTkURnb2k0X-7-SNsDW9I,20043 +networkx/algorithms/coloring/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/coloring/tests/__pycache__/__init__.cpython-312.pyc,, +networkx/algorithms/coloring/tests/__pycache__/test_coloring.cpython-312.pyc,, +networkx/algorithms/coloring/tests/test_coloring.py,sha256=7v_d1xanjYMZCa3dq2hE2hCcyexwWBTEFV5SoLgQDv4,23697 +networkx/algorithms/communicability_alg.py,sha256=0tZvZKY-_GUUB7GsRILxabS2jEpI51Udg5ADI9ADGZw,4545 +networkx/algorithms/community/__init__.py,sha256=fD_H65kfcYDgqmPu5nP4YrqZmdUUaP6D08uUDNU59Ls,1280 +networkx/algorithms/community/__pycache__/__init__.cpython-312.pyc,, +networkx/algorithms/community/__pycache__/asyn_fluid.cpython-312.pyc,, +networkx/algorithms/community/__pycache__/centrality.cpython-312.pyc,, +networkx/algorithms/community/__pycache__/community_utils.cpython-312.pyc,, +networkx/algorithms/community/__pycache__/divisive.cpython-312.pyc,, +networkx/algorithms/community/__pycache__/kclique.cpython-312.pyc,, +networkx/algorithms/community/__pycache__/kernighan_lin.cpython-312.pyc,, +networkx/algorithms/community/__pycache__/label_propagation.cpython-312.pyc,, +networkx/algorithms/community/__pycache__/leiden.cpython-312.pyc,, +networkx/algorithms/community/__pycache__/local.cpython-312.pyc,, +networkx/algorithms/community/__pycache__/louvain.cpython-312.pyc,, +networkx/algorithms/community/__pycache__/lukes.cpython-312.pyc,, +networkx/algorithms/community/__pycache__/modularity_max.cpython-312.pyc,, +networkx/algorithms/community/__pycache__/quality.cpython-312.pyc,, +networkx/algorithms/community/asyn_fluid.py,sha256=0ktsoOa4JKBKiuE3wmGDcBSUgPlFdGvzNheqINtWKbk,5935 +networkx/algorithms/community/centrality.py,sha256=Yyv5kyf1hf_L7iQ_ZbG8_FAkP638Sc_3N4tCSoB6J1w,6635 +networkx/algorithms/community/community_utils.py,sha256=sUi-AcPYyGrYhnjI9ztt-vrSHLl28lKXxTJPfi5N0c8,908 +networkx/algorithms/community/divisive.py,sha256=yFcKfKkiI6FqEVlBVxLa1fbqI1Yeiqe_A5fpPnYvlAE,6655 +networkx/algorithms/community/kclique.py,sha256=DTr9iUT_XWv0S3Y79KQl6OXefjztNMc9SAHWhdFOxcU,2460 +networkx/algorithms/community/kernighan_lin.py,sha256=vPU8Mbpk7_NscMC-gorNoXhsQjkOhgK2YiKOo-u6DvY,4349 +networkx/algorithms/community/label_propagation.py,sha256=LhzAXSHFCPQ2kG_rPgXb06YKdppO7buApksCC4GI4w8,11878 +networkx/algorithms/community/leiden.py,sha256=qaSfRyYakpZCMktLC_5nV2rpLvZBMHytHkiUrf_nR8U,6962 +networkx/algorithms/community/local.py,sha256=w-LK7qlMsQ2YHbZRQP95JvqM2gHGSe4yYZi7CXNkj_M,7316 +networkx/algorithms/community/louvain.py,sha256=jscGGTF6uUnC7yGEZTcE9UFmKsLDFwlZa4ecQyVQrOU,15424 +networkx/algorithms/community/lukes.py,sha256=gzqnup95RR2UzUiPpIt8qkepzZ9dCWqHGQSVPIJDMx8,8115 +networkx/algorithms/community/modularity_max.py,sha256=gzyZrGHNMtTZyqpLFcJHxgzzIsar1m5DktScODoUngk,18082 +networkx/algorithms/community/quality.py,sha256=oEw-RZBe62janFTTs-ak62APBiF2FpQoBiHv11_4YQY,11943 +networkx/algorithms/community/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/community/tests/__pycache__/__init__.cpython-312.pyc,, +networkx/algorithms/community/tests/__pycache__/test_asyn_fluid.cpython-312.pyc,, +networkx/algorithms/community/tests/__pycache__/test_centrality.cpython-312.pyc,, +networkx/algorithms/community/tests/__pycache__/test_divisive.cpython-312.pyc,, +networkx/algorithms/community/tests/__pycache__/test_kclique.cpython-312.pyc,, +networkx/algorithms/community/tests/__pycache__/test_kernighan_lin.cpython-312.pyc,, +networkx/algorithms/community/tests/__pycache__/test_label_propagation.cpython-312.pyc,, +networkx/algorithms/community/tests/__pycache__/test_leiden.cpython-312.pyc,, +networkx/algorithms/community/tests/__pycache__/test_local.cpython-312.pyc,, +networkx/algorithms/community/tests/__pycache__/test_louvain.cpython-312.pyc,, +networkx/algorithms/community/tests/__pycache__/test_lukes.cpython-312.pyc,, +networkx/algorithms/community/tests/__pycache__/test_modularity_max.cpython-312.pyc,, +networkx/algorithms/community/tests/__pycache__/test_quality.cpython-312.pyc,, +networkx/algorithms/community/tests/__pycache__/test_utils.cpython-312.pyc,, +networkx/algorithms/community/tests/test_asyn_fluid.py,sha256=UzAMxJzhN74qUinehR7B1rhU_vsigJ7-cRvcE6jdKyc,3332 +networkx/algorithms/community/tests/test_centrality.py,sha256=s8q4k5aThR0OgO9CDQk_PXMxfllmf5uC1GlvyUc_8EY,2932 +networkx/algorithms/community/tests/test_divisive.py,sha256=-Ee40OR-mPDReTngTEhbpx4_uLtNI7cqFkt8cZT9t5Y,3441 +networkx/algorithms/community/tests/test_kclique.py,sha256=iA0SBqwbDfaD2u7KM6ccs6LfgAQY_xxrnW05UIT_tFA,2413 +networkx/algorithms/community/tests/test_kernighan_lin.py,sha256=rcFDI9mTq1Nwsi251PwDgi1UoxTMPXAeSy2Cp6GtUQg,2710 +networkx/algorithms/community/tests/test_label_propagation.py,sha256=IHidFEv7MI781zsdk7XT848rLvLwDk2wBK1FjL-CRv4,7985 +networkx/algorithms/community/tests/test_leiden.py,sha256=bl4jr-Z0m59AISIAl-OLsnATz1lDacNssc-ICKn72Nw,4803 +networkx/algorithms/community/tests/test_local.py,sha256=c-dy1rs1L0ahhDTQwNZP1zNbffjzRgw5IBFTCBV-fas,1809 +networkx/algorithms/community/tests/test_louvain.py,sha256=TwW1nlSKWGJeIKr9QOJ8xGehSY6R0Nz01xsnFqzt0Oo,8071 +networkx/algorithms/community/tests/test_lukes.py,sha256=f_JU-EzY6PwXEkPN8kk5_3NVg6phlX0nrj1f57M49lk,3961 +networkx/algorithms/community/tests/test_modularity_max.py,sha256=XYyPuDkxL4CYFwnpTdU_qD4GydpqgiRAIJO3CHQN_m4,10617 +networkx/algorithms/community/tests/test_quality.py,sha256=sZEy10hh3zlelUmww5r2pk5LxpZAht06PC5zCHxV1bs,5275 +networkx/algorithms/community/tests/test_utils.py,sha256=gomD6rFgAaywxT1Yjdi4ozY-1rC0ina4jgfvWeCvwGE,704 +networkx/algorithms/components/__init__.py,sha256=Dt74KZWp_cJ_j0lL5hd_S50_hia5DKcC2SjuRnubr6M,173 +networkx/algorithms/components/__pycache__/__init__.cpython-312.pyc,, +networkx/algorithms/components/__pycache__/attracting.cpython-312.pyc,, +networkx/algorithms/components/__pycache__/biconnected.cpython-312.pyc,, +networkx/algorithms/components/__pycache__/connected.cpython-312.pyc,, +networkx/algorithms/components/__pycache__/semiconnected.cpython-312.pyc,, +networkx/algorithms/components/__pycache__/strongly_connected.cpython-312.pyc,, +networkx/algorithms/components/__pycache__/weakly_connected.cpython-312.pyc,, +networkx/algorithms/components/attracting.py,sha256=6az3lgqWhHTXaWUUuOPZfW9t7okliAhooFRotQY5JoM,2712 +networkx/algorithms/components/biconnected.py,sha256=_9GJdPZgqusGKZLzqT9tUSj1XZr2DgohiT6hcHVyil4,12782 +networkx/algorithms/components/connected.py,sha256=wWCsuTc-4pwuo2eTC2AfJSoYA4aU8dSdDjwUvBfOPZg,4471 +networkx/algorithms/components/semiconnected.py,sha256=BaBMFlQ208vuHOo5y1xeV0PDEI3yDUfH6zFb_jkcVhQ,2030 +networkx/algorithms/components/strongly_connected.py,sha256=i41vDeazdNGqG4weufAKd6axaN2nBKmMzURZBs7WsLI,9542 +networkx/algorithms/components/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/components/tests/__pycache__/__init__.cpython-312.pyc,, +networkx/algorithms/components/tests/__pycache__/test_attracting.cpython-312.pyc,, +networkx/algorithms/components/tests/__pycache__/test_biconnected.cpython-312.pyc,, +networkx/algorithms/components/tests/__pycache__/test_connected.cpython-312.pyc,, +networkx/algorithms/components/tests/__pycache__/test_semiconnected.cpython-312.pyc,, +networkx/algorithms/components/tests/__pycache__/test_strongly_connected.cpython-312.pyc,, +networkx/algorithms/components/tests/__pycache__/test_weakly_connected.cpython-312.pyc,, +networkx/algorithms/components/tests/test_attracting.py,sha256=b3N3ZR9E5gLSQWGgaqhcRfRs4KBW6GnnkVYeAjdxC_o,2243 +networkx/algorithms/components/tests/test_biconnected.py,sha256=N-J-dgBgI77ytYUUrXjduLxtDydH7jS-af98fyPBkYc,6036 +networkx/algorithms/components/tests/test_connected.py,sha256=KMYm55BpbFdGXk_B2WozS9rIagQROd7_k0LT3HFQmr4,4815 +networkx/algorithms/components/tests/test_semiconnected.py,sha256=q860lIxZF5M2JmDwwdzy-SGSXnrillOefMx23GcJpw0,1792 +networkx/algorithms/components/tests/test_strongly_connected.py,sha256=Zm7MgUIZbuPPJu66xZH1zfMZQ_3X1YBl2fLCOjph7NQ,6021 +networkx/algorithms/components/tests/test_weakly_connected.py,sha256=_eUx7226dxme_K2WNmvSIwZXQlKNoCuglWOOC3kFUW4,3083 +networkx/algorithms/components/weakly_connected.py,sha256=KBrE9_y8Jg5jZieK683N_tB2oRv_eisf2gbqVs3SsJM,4467 +networkx/algorithms/connectivity/__init__.py,sha256=EvYKw8LJn7wyZECHAsuEkIaSl-cV-LhymR6tqcn90p8,281 +networkx/algorithms/connectivity/__pycache__/__init__.cpython-312.pyc,, +networkx/algorithms/connectivity/__pycache__/connectivity.cpython-312.pyc,, +networkx/algorithms/connectivity/__pycache__/cuts.cpython-312.pyc,, +networkx/algorithms/connectivity/__pycache__/disjoint_paths.cpython-312.pyc,, +networkx/algorithms/connectivity/__pycache__/edge_augmentation.cpython-312.pyc,, +networkx/algorithms/connectivity/__pycache__/edge_kcomponents.cpython-312.pyc,, +networkx/algorithms/connectivity/__pycache__/kcomponents.cpython-312.pyc,, +networkx/algorithms/connectivity/__pycache__/kcutsets.cpython-312.pyc,, +networkx/algorithms/connectivity/__pycache__/stoerwagner.cpython-312.pyc,, +networkx/algorithms/connectivity/__pycache__/utils.cpython-312.pyc,, +networkx/algorithms/connectivity/connectivity.py,sha256=xck9yth1asWFAM9Hp7UP6vdUh-Kr0IOmq046plOAXPQ,29367 +networkx/algorithms/connectivity/cuts.py,sha256=o-5GRQotOVXsNq7Kx_w7yBBVuGLa57whS2YF5DVb1KQ,23199 +networkx/algorithms/connectivity/disjoint_paths.py,sha256=57ZerbGqn30B8cwomcsS0GScectvP-mgCMb0GH-RYb8,14649 +networkx/algorithms/connectivity/edge_augmentation.py,sha256=cK9S6pRnsKLyb_57guKfrAbLiXL6sMALHCf7pR4tzFM,44063 +networkx/algorithms/connectivity/edge_kcomponents.py,sha256=hqABcfCqZ-rb45I0qYE-X4NtstsKJbxl37FZzzmoXA4,20894 +networkx/algorithms/connectivity/kcomponents.py,sha256=TtiEvpaKflkdxJ3r37Qsj1qrSzB2rtHzDcxCDO_Aq2Q,8171 +networkx/algorithms/connectivity/kcutsets.py,sha256=zYohzgkR2FODi_Ew2M9uMLb_a9ZP5fNqcXJwMYy6P7o,9371 +networkx/algorithms/connectivity/stoerwagner.py,sha256=WodsJEqKgsmTTcyUBk2u3wV_CXeon-cAzveWgIGgFmA,5431 +networkx/algorithms/connectivity/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/connectivity/tests/__pycache__/__init__.cpython-312.pyc,, +networkx/algorithms/connectivity/tests/__pycache__/test_connectivity.cpython-312.pyc,, +networkx/algorithms/connectivity/tests/__pycache__/test_cuts.cpython-312.pyc,, +networkx/algorithms/connectivity/tests/__pycache__/test_disjoint_paths.cpython-312.pyc,, +networkx/algorithms/connectivity/tests/__pycache__/test_edge_augmentation.cpython-312.pyc,, +networkx/algorithms/connectivity/tests/__pycache__/test_edge_kcomponents.cpython-312.pyc,, +networkx/algorithms/connectivity/tests/__pycache__/test_kcomponents.cpython-312.pyc,, +networkx/algorithms/connectivity/tests/__pycache__/test_kcutsets.cpython-312.pyc,, +networkx/algorithms/connectivity/tests/__pycache__/test_stoer_wagner.cpython-312.pyc,, +networkx/algorithms/connectivity/tests/test_connectivity.py,sha256=eSmsi8uQk6MI591JgtSu2elIusb08bmSZS0h9gxb76I,15027 +networkx/algorithms/connectivity/tests/test_cuts.py,sha256=q8dxYBAnsGtIBBPZCjIvMszZvXOYST2cVVsCJvXbIhw,10356 +networkx/algorithms/connectivity/tests/test_disjoint_paths.py,sha256=NLHReLoXSKoA6KPBNRbjF84ktg5PEaaktIj2AII3SDY,8392 +networkx/algorithms/connectivity/tests/test_edge_augmentation.py,sha256=CQprUtEX1Z5BVTatUIURNHBVepn8qG087JfnJFx7YS8,15739 +networkx/algorithms/connectivity/tests/test_edge_kcomponents.py,sha256=Bzo77zG79Lv_-OzdI1ZZKO_c13wrnFmfv60FrkDyXI0,16453 +networkx/algorithms/connectivity/tests/test_kcomponents.py,sha256=ohoSX8GACeszRZdzTiNuWXSFitfU9DzP0hqllS2gvMU,8554 +networkx/algorithms/connectivity/tests/test_kcutsets.py,sha256=PCg1CFF0AwrURi8V9oGoN6g7xsxRzKxsUUL0NZ_xN8M,8503 +networkx/algorithms/connectivity/tests/test_stoer_wagner.py,sha256=A291C30_t2CI1erPCqN1W0DoAj3zqNA8fThPIj4Rku0,3011 +networkx/algorithms/connectivity/utils.py,sha256=gL8LmZnK4GKAZQcIPEhVNYmVi18Mqsqwg4O4j_et56s,3217 +networkx/algorithms/core.py,sha256=6SO5Wz-LSkx6t2DSq0GPmOvFALGe0gVsXdsvno9Fljs,17479 +networkx/algorithms/covering.py,sha256=I_on4DUgmwbtbo-qlBq4YWhOsNjuSHCfVGZNAL-Sefs,5278 +networkx/algorithms/cuts.py,sha256=-J5j6Yi2CrlFsrX4bK-5kFztD6i4X6gihXwxmFC1zYQ,9990 +networkx/algorithms/cycles.py,sha256=DPypxE7egEC9WIVyZl-j-7hbgnxmLOg5gtODp_tmDE8,43303 +networkx/algorithms/d_separation.py,sha256=DpOl1YaI-72_fgUJ2NI3Piuj7EQRQ8xmyAQCRdLgjTw,26089 +networkx/algorithms/dag.py,sha256=vWd5C5VFEl0-e3sdqwzgCrLcFp0wYQe7u4akKClBUNk,47248 +networkx/algorithms/distance_measures.py,sha256=-0Kz7wL1SiYvJ-mgqFdf0lx492PzWPUA61Ik89Ca8r8,39927 +networkx/algorithms/distance_regular.py,sha256=-1QCGLy7OPoNuV2bYJDY4jVot-0LGMobBQ0DubjbhGI,7053 +networkx/algorithms/dominance.py,sha256=T_z37jx_WSbY_HMfYgqZL6fT-p6PMAlZjSwEVsaLfLE,3450 +networkx/algorithms/dominating.py,sha256=yoRB4WCe0wMvJpkbKtNo2fpd0jTVq6b09JsCYcaeR_Y,8145 +networkx/algorithms/efficiency_measures.py,sha256=VKbLKJgdIbno-YnJaLaCZt7TNXXnQPdz8N99uJCo748,4741 +networkx/algorithms/euler.py,sha256=yCqKaGchFSRPTRDXq7u1fH2IXZF94wWf9S10K9-Cd6U,14205 +networkx/algorithms/flow/__init__.py,sha256=rVtMUy6dViPLewjDRntmn15QF0bQwiDdQbZZx9j7Drc,341 +networkx/algorithms/flow/__pycache__/__init__.cpython-312.pyc,, +networkx/algorithms/flow/__pycache__/boykovkolmogorov.cpython-312.pyc,, +networkx/algorithms/flow/__pycache__/capacityscaling.cpython-312.pyc,, +networkx/algorithms/flow/__pycache__/dinitz_alg.cpython-312.pyc,, +networkx/algorithms/flow/__pycache__/edmondskarp.cpython-312.pyc,, +networkx/algorithms/flow/__pycache__/gomory_hu.cpython-312.pyc,, +networkx/algorithms/flow/__pycache__/maxflow.cpython-312.pyc,, +networkx/algorithms/flow/__pycache__/mincost.cpython-312.pyc,, +networkx/algorithms/flow/__pycache__/networksimplex.cpython-312.pyc,, +networkx/algorithms/flow/__pycache__/preflowpush.cpython-312.pyc,, +networkx/algorithms/flow/__pycache__/shortestaugmentingpath.cpython-312.pyc,, +networkx/algorithms/flow/__pycache__/utils.cpython-312.pyc,, +networkx/algorithms/flow/boykovkolmogorov.py,sha256=qFcppmiXz4VKKFd4RbDsiWOqJODtDTHbNr9_UFTjQaU,13334 +networkx/algorithms/flow/capacityscaling.py,sha256=8rng2qO5kawNSxq2S8BNlUMmdvNSoC6R8ekiBGU8LxU,14469 +networkx/algorithms/flow/dinitz_alg.py,sha256=I5nnZVsj0aU8-9Cje0umey407epFzpd7BDJpkI6ESK4,8341 +networkx/algorithms/flow/edmondskarp.py,sha256=PEIwLftevS2VYHaTzzZMSOLPy7QSBPsWPedjx1lR6Cs,8056 +networkx/algorithms/flow/gomory_hu.py,sha256=EuibaxPl65shGM9Jxvaa9WMwMmoczDvXXc2b0E81cqM,6345 +networkx/algorithms/flow/maxflow.py,sha256=GuVA4MlPwdOeCdPRXW2QVW1OdJqgzSpPhEyASr302u4,22975 +networkx/algorithms/flow/mincost.py,sha256=GzMYInS4QcNe0yImGrVXJ0bRd7t5TSSMa9jSeenIoOk,12853 +networkx/algorithms/flow/networksimplex.py,sha256=6F1JNT1pMEMt-C27H3PsdZYF-53SrJMrHaakQ8pD7Ng,25098 +networkx/algorithms/flow/preflowpush.py,sha256=CUKZ0-7X9l7P7qH_2n2Immbf8mFm8vocH2SY0tIwjGo,15721 +networkx/algorithms/flow/shortestaugmentingpath.py,sha256=gXXdkY3nH4d0hXVn0P2-kzfC3DHcuCdrudFdxetflKI,10372 +networkx/algorithms/flow/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/flow/tests/__pycache__/__init__.cpython-312.pyc,, +networkx/algorithms/flow/tests/__pycache__/test_gomory_hu.cpython-312.pyc,, +networkx/algorithms/flow/tests/__pycache__/test_maxflow.cpython-312.pyc,, +networkx/algorithms/flow/tests/__pycache__/test_maxflow_large_graph.cpython-312.pyc,, +networkx/algorithms/flow/tests/__pycache__/test_mincost.cpython-312.pyc,, +networkx/algorithms/flow/tests/__pycache__/test_networksimplex.cpython-312.pyc,, +networkx/algorithms/flow/tests/gl1.gpickle.bz2,sha256=z4-BzrXqruFiGqYLiS2D5ZamFz9vZRc1m2ef89qhsPg,44623 +networkx/algorithms/flow/tests/gw1.gpickle.bz2,sha256=b3nw6Q-kxR7HkWXxWWPh7YlHdXbga8qmeuYiwmBBGTE,42248 +networkx/algorithms/flow/tests/netgen-2.gpickle.bz2,sha256=OxfmbN7ajtuNHexyYmx38fZd1GdeP3bcL8T9hKoDjjA,18972 +networkx/algorithms/flow/tests/test_gomory_hu.py,sha256=aWtbI3AHofIK6LDJnmj9UH1QOfulXsi5NyB7bNyV2Vw,4471 +networkx/algorithms/flow/tests/test_maxflow.py,sha256=a5yUEv58ETO9aZZnxjshPpkG_-k3a4h5J8lPa2IuQcA,18936 +networkx/algorithms/flow/tests/test_maxflow_large_graph.py,sha256=aEoKfeb_q3o16yPNeBjQ-ERmf7PwEr4SFC3-KZgjPC0,4614 +networkx/algorithms/flow/tests/test_mincost.py,sha256=vI61ZCLoWAzwYU4hU0AJS8Ori8vAWEPzFDCUoiloVRk,17806 +networkx/algorithms/flow/tests/test_networksimplex.py,sha256=tCw5C1hLEwbUbt_ySWgkvRyLKj-1T2wfXV9HT4Fx77Q,14162 +networkx/algorithms/flow/tests/wlm3.gpickle.bz2,sha256=zKy6Hg-_swvsNh8OSOyIyZnTR0_Npd35O9RErOF8-g4,88132 +networkx/algorithms/flow/utils.py,sha256=zXUSBR24spnkVzuURoJbLdlxFtmhTQGmr279KvL8leo,6248 +networkx/algorithms/graph_hashing.py,sha256=-gQIfh2WoPiAxglgZ9nx-115_zHCo10sLP6YpdyD97U,16919 +networkx/algorithms/graphical.py,sha256=1NdlhXuGEgUkHPo47EoNTWUMfdeTpiv7BBVM9ty2ivw,15831 +networkx/algorithms/hierarchy.py,sha256=_KFhCF1Afr2TrkPhqx-1PXUXEtfYLhbRShC58ZKbDGE,1786 +networkx/algorithms/hybrid.py,sha256=z3sIFMOpja1wlj-lI8YI6OIbSLZWHr66uSqyVESZWXY,6209 +networkx/algorithms/isolate.py,sha256=4rDH_iGY2WM5igJS-lBcIVb11MrKdoaFhJLieLZ4BAE,2301 +networkx/algorithms/isomorphism/__init__.py,sha256=gPRQ-_X6xN2lJZPQNw86IVj4NemGmbQYTejf5yJ32N4,406 +networkx/algorithms/isomorphism/__pycache__/__init__.cpython-312.pyc,, +networkx/algorithms/isomorphism/__pycache__/ismags.cpython-312.pyc,, +networkx/algorithms/isomorphism/__pycache__/isomorph.cpython-312.pyc,, +networkx/algorithms/isomorphism/__pycache__/isomorphvf2.cpython-312.pyc,, +networkx/algorithms/isomorphism/__pycache__/matchhelpers.cpython-312.pyc,, +networkx/algorithms/isomorphism/__pycache__/temporalisomorphvf2.cpython-312.pyc,, +networkx/algorithms/isomorphism/__pycache__/tree_isomorphism.cpython-312.pyc,, +networkx/algorithms/isomorphism/__pycache__/vf2pp.cpython-312.pyc,, +networkx/algorithms/isomorphism/__pycache__/vf2userfunc.cpython-312.pyc,, +networkx/algorithms/isomorphism/ismags.py,sha256=tIXZajrhj12eO7Z7bbxkfb603jd8xzirSZOeNo52jxA,43875 +networkx/algorithms/isomorphism/isomorph.py,sha256=O2TZtUPe89CsZxoNchv6FyYsGU79kWrwywGFBeonksE,10561 +networkx/algorithms/isomorphism/isomorphvf2.py,sha256=NTc9uCm2RnR9RxuKsAS_70RWD1zJYCJNPhefcJZUi5U,47637 +networkx/algorithms/isomorphism/matchhelpers.py,sha256=PaZ7PjmNNsJO9KoeRrf9JgcDHIcFr1tZckQc_ol4e9I,10884 +networkx/algorithms/isomorphism/temporalisomorphvf2.py,sha256=-1NW81l8kM9orQ2ni9tcNizQzEhOUE9BaBJXjUWqhiI,10948 +networkx/algorithms/isomorphism/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/isomorphism/tests/__pycache__/__init__.cpython-312.pyc,, +networkx/algorithms/isomorphism/tests/__pycache__/test_ismags.cpython-312.pyc,, +networkx/algorithms/isomorphism/tests/__pycache__/test_isomorphism.cpython-312.pyc,, +networkx/algorithms/isomorphism/tests/__pycache__/test_isomorphvf2.cpython-312.pyc,, +networkx/algorithms/isomorphism/tests/__pycache__/test_match_helpers.cpython-312.pyc,, +networkx/algorithms/isomorphism/tests/__pycache__/test_temporalisomorphvf2.cpython-312.pyc,, +networkx/algorithms/isomorphism/tests/__pycache__/test_tree_isomorphism.cpython-312.pyc,, +networkx/algorithms/isomorphism/tests/__pycache__/test_vf2pp.cpython-312.pyc,, +networkx/algorithms/isomorphism/tests/__pycache__/test_vf2pp_helpers.cpython-312.pyc,, +networkx/algorithms/isomorphism/tests/__pycache__/test_vf2userfunc.cpython-312.pyc,, +networkx/algorithms/isomorphism/tests/iso_r01_s80.A99,sha256=hKzMtYLUR8Oqp9pmJR6RwG7qo31aNPZcnXy4KHDGhqU,1442 +networkx/algorithms/isomorphism/tests/iso_r01_s80.B99,sha256=AHx_W2xG4JEcz1xKoN5TwCHVE6-UO2PiMByynkd4TPE,1442 +networkx/algorithms/isomorphism/tests/si2_b06_m200.A99,sha256=NVnPFA52amNl3qM55G1V9eL9ZlP9NwugBlPf-zekTFU,310 +networkx/algorithms/isomorphism/tests/si2_b06_m200.B99,sha256=-clIDp05LFNRHA2BghhGTeyuXDqBBqA9XpEzpB7Ku7M,1602 +networkx/algorithms/isomorphism/tests/test_ismags.py,sha256=U3QhV8wLv7iOnb4ZwT2jNYOrovYybNyGkoaCJ9jVoGs,11433 +networkx/algorithms/isomorphism/tests/test_isomorphism.py,sha256=d5k38hQJ_Jj9QRpg2iAFslirhRMgLcCLVDfIyTnwlk8,4768 +networkx/algorithms/isomorphism/tests/test_isomorphvf2.py,sha256=HUqOFw9g0Lv_fcs0cZ3gCJbpYt6ex0hfC3mVvN-ifK4,12879 +networkx/algorithms/isomorphism/tests/test_match_helpers.py,sha256=uuTcvjgf2LPqSQzzECPIh0dezw8-a1IN0u42u8TxwAw,2483 +networkx/algorithms/isomorphism/tests/test_temporalisomorphvf2.py,sha256=k8032J4ItZ4aFHeOraOpiF8y4aPm2O1g44UvUfrQJgg,7343 +networkx/algorithms/isomorphism/tests/test_tree_isomorphism.py,sha256=mZPWRv2sSVJ9PbpGi5MEj5GaFbrp_ha2bL1wre1-Jqo,5681 +networkx/algorithms/isomorphism/tests/test_vf2pp.py,sha256=Ce409PPfOGW5v5VMjymb47OkLelyRmM63RYysvHmPP4,49934 +networkx/algorithms/isomorphism/tests/test_vf2pp_helpers.py,sha256=J3gqQ1z73wacQIa2xRi3WVsG_7ULRQ_cvYTNFflBRWo,90317 +networkx/algorithms/isomorphism/tests/test_vf2userfunc.py,sha256=KMRPb-m3fmvRF0vt9laKIzOfwnrkxN2SueLv7JWUuXs,6625 +networkx/algorithms/isomorphism/tree_isomorphism.py,sha256=i5x7T9cLo9MLamJbBcXsjG3nqlXQSus5vmxoEEfFlh8,9039 +networkx/algorithms/isomorphism/vf2pp.py,sha256=rhYf-5CAFUzhVNy-_p2fwBGQDcK8cUHyg8bWxRktrHk,36677 +networkx/algorithms/isomorphism/vf2userfunc.py,sha256=HiPwyr7nJF1QS9w69MzKf6wGvO8cgjvdS5vW59iwCew,7371 +networkx/algorithms/link_analysis/__init__.py,sha256=UkcgTDdzsIu-jsJ4jBwP8sF2CsRPC1YcZZT-q5Wlj3I,118 +networkx/algorithms/link_analysis/__pycache__/__init__.cpython-312.pyc,, +networkx/algorithms/link_analysis/__pycache__/hits_alg.cpython-312.pyc,, +networkx/algorithms/link_analysis/__pycache__/pagerank_alg.cpython-312.pyc,, +networkx/algorithms/link_analysis/hits_alg.py,sha256=OJ2DPKn_qGDBiW7Tln8_vLtJGvBkzWbOxylbHn95ne4,10421 +networkx/algorithms/link_analysis/pagerank_alg.py,sha256=tFjZ0ZhFjJF1Sy0zniBTrHTcbq_GNMrPVeE1ybehyM0,17228 +networkx/algorithms/link_analysis/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/link_analysis/tests/__pycache__/__init__.cpython-312.pyc,, +networkx/algorithms/link_analysis/tests/__pycache__/test_hits.cpython-312.pyc,, +networkx/algorithms/link_analysis/tests/__pycache__/test_pagerank.cpython-312.pyc,, +networkx/algorithms/link_analysis/tests/test_hits.py,sha256=wOeobilAxHJbLDUatd4OwiSCt-4cnyFiX4W8UvJVJBk,2548 +networkx/algorithms/link_analysis/tests/test_pagerank.py,sha256=rUJNa_2nKDSk75Fg1yygMPAqg_Bdk0gGWMDRKY8g1lk,7282 +networkx/algorithms/link_prediction.py,sha256=UYo_LJgoVXcM1iLMXswM2g4jvUJmvxln3e5bVfXxQ10,22253 +networkx/algorithms/lowest_common_ancestors.py,sha256=D1LgoX_ibv2hR-viKEx6l_qp3mWVCkW6YAlGGdoWgXQ,9286 +networkx/algorithms/matching.py,sha256=Iv3Uq3LP94boJLEa6BaEGiebdW6ZIigkndKXwiD1C8E,44518 +networkx/algorithms/minors/__init__.py,sha256=ceeKdsZ6U1H40ED-KmtVGkbADxeWMTVG07Ja8P7N_Pg,587 +networkx/algorithms/minors/__pycache__/__init__.cpython-312.pyc,, +networkx/algorithms/minors/__pycache__/contraction.cpython-312.pyc,, +networkx/algorithms/minors/contraction.py,sha256=pBL2GucsXbUhLD2LXMB1ITm-nQJtG-HK1SFB7xsaUcE,24354 +networkx/algorithms/minors/tests/__pycache__/test_contraction.cpython-312.pyc,, +networkx/algorithms/minors/tests/test_contraction.py,sha256=FiLT10TawT8EoCxtdhcEgNp3zdHX_ONqCmZp-le2OEI,17644 +networkx/algorithms/mis.py,sha256=BEMv_dW8R6CjMMXJQGIhS4HpS8A8AkLJJWnz3GstuS4,2344 +networkx/algorithms/moral.py,sha256=z5lp42k4kqYk7t_FfszVj5KAC7BxXe6Adik3T2qvA6o,1535 +networkx/algorithms/node_classification.py,sha256=a2mVO7NI2IQF4Cd2Mx7TMLoTEu5HNG9RB5sEHQ19Wdw,6469 +networkx/algorithms/non_randomness.py,sha256=Uag54gFi5DR5uAQNFXyKKyORQuowTPuhq_QsjZaVMJ4,3068 +networkx/algorithms/operators/__init__.py,sha256=dJ3xOXvHxSzzM3-YcfvjGTJ_ndxULF1TybkIRzUS87Y,201 +networkx/algorithms/operators/__pycache__/__init__.cpython-312.pyc,, +networkx/algorithms/operators/__pycache__/all.cpython-312.pyc,, +networkx/algorithms/operators/__pycache__/binary.cpython-312.pyc,, +networkx/algorithms/operators/__pycache__/product.cpython-312.pyc,, +networkx/algorithms/operators/__pycache__/unary.cpython-312.pyc,, +networkx/algorithms/operators/all.py,sha256=v_W9ZT3u4STNvT4YI9zYn1Z2PDMhqfh4vfhtO4glaIA,9718 +networkx/algorithms/operators/binary.py,sha256=hziSCLDIVIoTWPV56fED5gI1pErOdyLOQ3Z_E_Nz9As,13150 +networkx/algorithms/operators/product.py,sha256=FQkSIduOv-z1ktVzid2T40759S-BmAfTlya88VytuZc,19632 +networkx/algorithms/operators/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/operators/tests/__pycache__/__init__.cpython-312.pyc,, +networkx/algorithms/operators/tests/__pycache__/test_all.cpython-312.pyc,, +networkx/algorithms/operators/tests/__pycache__/test_binary.cpython-312.pyc,, +networkx/algorithms/operators/tests/__pycache__/test_product.cpython-312.pyc,, +networkx/algorithms/operators/tests/__pycache__/test_unary.cpython-312.pyc,, +networkx/algorithms/operators/tests/test_all.py,sha256=Pqjv9QiA0875Yl9D5o6c5Ml0t4KHpH2a5jbpAoZQXFc,8250 +networkx/algorithms/operators/tests/test_binary.py,sha256=k1OtCagdfAYZRSl2JOyH9I7ggwRef2ubVNaiSgG1yv0,12160 +networkx/algorithms/operators/tests/test_product.py,sha256=i4pBb5A4NmaCsllR1XizyhUToaQFMuLZ-JrywkQFdbU,15155 +networkx/algorithms/operators/tests/test_unary.py,sha256=UZdzbt5GI9hnflEizUWXihGqBWmSFJDkzjwVv6wziQE,1415 +networkx/algorithms/operators/unary.py,sha256=Eo2yeTg-F5uODGWSWR_im5VaKZQ97LyATIuKZcAFQR8,1795 +networkx/algorithms/planar_drawing.py,sha256=AXuoT3aFgEtCeMnAaUsRqjxCABdNYZ8Oo9sGOKBQto0,16254 +networkx/algorithms/planarity.py,sha256=hPrIby5SfttcdRXo-1Shi0z_PEnlv-HXsVVjAqhxr3Y,49891 +networkx/algorithms/polynomials.py,sha256=iP30_mcOlj81Vrzt4iB_ZZxYiRokubs-O1i9RW4pgTw,11278 +networkx/algorithms/reciprocity.py,sha256=1WMhLbSMkVPxRPlfUvbgO5FgVvJHn1doXQF4WuqSLQk,2855 +networkx/algorithms/regular.py,sha256=lEhYCP4Yysz8oTdxY8m40oqZcdhjKJuDsEj-P310loI,6794 +networkx/algorithms/richclub.py,sha256=kARzso3M6wnUcAJo2g8ga_ZtigL2czDNzeUDzBtRfqo,4892 +networkx/algorithms/shortest_paths/__init__.py,sha256=Rmxtsje-mPdQyeYhE8TP2NId-iZEOu4eAsWhVRm2Xqk,285 +networkx/algorithms/shortest_paths/__pycache__/__init__.cpython-312.pyc,, +networkx/algorithms/shortest_paths/__pycache__/astar.cpython-312.pyc,, +networkx/algorithms/shortest_paths/__pycache__/dense.cpython-312.pyc,, +networkx/algorithms/shortest_paths/__pycache__/generic.cpython-312.pyc,, +networkx/algorithms/shortest_paths/__pycache__/unweighted.cpython-312.pyc,, +networkx/algorithms/shortest_paths/__pycache__/weighted.cpython-312.pyc,, +networkx/algorithms/shortest_paths/astar.py,sha256=0pEnYNzG7Z86zWqyFe0OL8HTlQO9PwjDABy_ypC30_A,8937 +networkx/algorithms/shortest_paths/dense.py,sha256=5Y8ziU-RsWZRTzxEEzg5gB7f4j9yxbNhplTbLaMliG8,8261 +networkx/algorithms/shortest_paths/generic.py,sha256=zj0adw1324M87Lh-CHR4hkS1nyXGnCHa8ddv0SpyB28,25238 +networkx/algorithms/shortest_paths/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/shortest_paths/tests/__pycache__/__init__.cpython-312.pyc,, +networkx/algorithms/shortest_paths/tests/__pycache__/test_astar.cpython-312.pyc,, +networkx/algorithms/shortest_paths/tests/__pycache__/test_dense.cpython-312.pyc,, +networkx/algorithms/shortest_paths/tests/__pycache__/test_dense_numpy.cpython-312.pyc,, +networkx/algorithms/shortest_paths/tests/__pycache__/test_generic.cpython-312.pyc,, +networkx/algorithms/shortest_paths/tests/__pycache__/test_unweighted.cpython-312.pyc,, +networkx/algorithms/shortest_paths/tests/__pycache__/test_weighted.cpython-312.pyc,, +networkx/algorithms/shortest_paths/tests/test_astar.py,sha256=F0zXZ7R0eXP8Et4F3mrCtdtfE1-tQTGB2ATcFveDUHI,8989 +networkx/algorithms/shortest_paths/tests/test_dense.py,sha256=ievl4gu3Exl_31hp4OKcsAGPb3g3_xFUM4t3NnvrG_A,6747 +networkx/algorithms/shortest_paths/tests/test_dense_numpy.py,sha256=IEwhjPNTlc2H1on-B3WhFoyLkDkJVdAFBjn675XALHA,2299 +networkx/algorithms/shortest_paths/tests/test_generic.py,sha256=2KU1zqUkt8ZF3ssyFdZ1E--PcymhvEQI5lHUy6WxVAs,18405 +networkx/algorithms/shortest_paths/tests/test_unweighted.py,sha256=r1F5qVEDZnzPn5yJ71Rp-Z7pq33CzJFfZmDhVBekR9Y,5879 +networkx/algorithms/shortest_paths/tests/test_weighted.py,sha256=1bmmtL2s2JsGKlnLc7UC5b3LPdL1jKCWky7KfN9dZo8,35032 +networkx/algorithms/shortest_paths/unweighted.py,sha256=qqMEVSBHCIeoDzdfHqQ5irmVWN8VOKLvVf61CcrwzP8,15527 +networkx/algorithms/shortest_paths/weighted.py,sha256=IFPx_QQll5lA2p4bjAIjp6n3rM7Jy3E1Epu8-bzR1l0,82423 +networkx/algorithms/similarity.py,sha256=4TW0bqgs7vdNE8lK-rXLQOZJwMe_HJl1Ke3APtwF4yY,60915 +networkx/algorithms/simple_paths.py,sha256=cJb2I8KMdJ3oSGVanVwpzaewsl6nm9L_sSqIkyihJvI,30298 +networkx/algorithms/smallworld.py,sha256=3xT-z2_CVdp5-Ap8vF6fsd3DiavDYtspFNZrcwcpXG0,13565 +networkx/algorithms/smetric.py,sha256=_Aj4BIMnafiXbJtLkvAfAnIEMdI9OcVvMy6kk9KKTns,770 +networkx/algorithms/sparsifiers.py,sha256=4T8pMlh-usEHA2-rZFh-CmZbBY9dcXIHjoqR-oJ2hSw,10048 +networkx/algorithms/structuralholes.py,sha256=rYaVkndSJ3kFxbjkhfQd6uMBpYO7NNlcsOPR0Xbiy7Y,12626 +networkx/algorithms/summarization.py,sha256=CygTsSthyCKHs0ZTZsCgWnyaT8annQbLpUtahmfY9Sw,23251 +networkx/algorithms/swap.py,sha256=NVZMmlnkdxgwwNw5GDrc8waNERcdCu52ydHcBdOA_hw,14744 +networkx/algorithms/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/tests/__pycache__/__init__.cpython-312.pyc,, +networkx/algorithms/tests/__pycache__/test_asteroidal.cpython-312.pyc,, +networkx/algorithms/tests/__pycache__/test_boundary.cpython-312.pyc,, +networkx/algorithms/tests/__pycache__/test_bridges.cpython-312.pyc,, +networkx/algorithms/tests/__pycache__/test_broadcasting.cpython-312.pyc,, +networkx/algorithms/tests/__pycache__/test_chains.cpython-312.pyc,, +networkx/algorithms/tests/__pycache__/test_chordal.cpython-312.pyc,, +networkx/algorithms/tests/__pycache__/test_clique.cpython-312.pyc,, +networkx/algorithms/tests/__pycache__/test_cluster.cpython-312.pyc,, +networkx/algorithms/tests/__pycache__/test_communicability.cpython-312.pyc,, +networkx/algorithms/tests/__pycache__/test_core.cpython-312.pyc,, +networkx/algorithms/tests/__pycache__/test_covering.cpython-312.pyc,, +networkx/algorithms/tests/__pycache__/test_cuts.cpython-312.pyc,, +networkx/algorithms/tests/__pycache__/test_cycles.cpython-312.pyc,, +networkx/algorithms/tests/__pycache__/test_d_separation.cpython-312.pyc,, +networkx/algorithms/tests/__pycache__/test_dag.cpython-312.pyc,, +networkx/algorithms/tests/__pycache__/test_distance_measures.cpython-312.pyc,, +networkx/algorithms/tests/__pycache__/test_distance_regular.cpython-312.pyc,, +networkx/algorithms/tests/__pycache__/test_dominance.cpython-312.pyc,, +networkx/algorithms/tests/__pycache__/test_dominating.cpython-312.pyc,, +networkx/algorithms/tests/__pycache__/test_efficiency.cpython-312.pyc,, +networkx/algorithms/tests/__pycache__/test_euler.cpython-312.pyc,, +networkx/algorithms/tests/__pycache__/test_graph_hashing.cpython-312.pyc,, +networkx/algorithms/tests/__pycache__/test_graphical.cpython-312.pyc,, +networkx/algorithms/tests/__pycache__/test_hierarchy.cpython-312.pyc,, +networkx/algorithms/tests/__pycache__/test_hybrid.cpython-312.pyc,, +networkx/algorithms/tests/__pycache__/test_isolate.cpython-312.pyc,, +networkx/algorithms/tests/__pycache__/test_link_prediction.cpython-312.pyc,, +networkx/algorithms/tests/__pycache__/test_lowest_common_ancestors.cpython-312.pyc,, +networkx/algorithms/tests/__pycache__/test_matching.cpython-312.pyc,, +networkx/algorithms/tests/__pycache__/test_max_weight_clique.cpython-312.pyc,, +networkx/algorithms/tests/__pycache__/test_mis.cpython-312.pyc,, +networkx/algorithms/tests/__pycache__/test_moral.cpython-312.pyc,, +networkx/algorithms/tests/__pycache__/test_node_classification.cpython-312.pyc,, +networkx/algorithms/tests/__pycache__/test_non_randomness.cpython-312.pyc,, +networkx/algorithms/tests/__pycache__/test_planar_drawing.cpython-312.pyc,, +networkx/algorithms/tests/__pycache__/test_planarity.cpython-312.pyc,, +networkx/algorithms/tests/__pycache__/test_polynomials.cpython-312.pyc,, +networkx/algorithms/tests/__pycache__/test_reciprocity.cpython-312.pyc,, +networkx/algorithms/tests/__pycache__/test_regular.cpython-312.pyc,, +networkx/algorithms/tests/__pycache__/test_richclub.cpython-312.pyc,, +networkx/algorithms/tests/__pycache__/test_similarity.cpython-312.pyc,, +networkx/algorithms/tests/__pycache__/test_simple_paths.cpython-312.pyc,, +networkx/algorithms/tests/__pycache__/test_smallworld.cpython-312.pyc,, +networkx/algorithms/tests/__pycache__/test_smetric.cpython-312.pyc,, +networkx/algorithms/tests/__pycache__/test_sparsifiers.cpython-312.pyc,, +networkx/algorithms/tests/__pycache__/test_structuralholes.cpython-312.pyc,, +networkx/algorithms/tests/__pycache__/test_summarization.cpython-312.pyc,, +networkx/algorithms/tests/__pycache__/test_swap.cpython-312.pyc,, +networkx/algorithms/tests/__pycache__/test_threshold.cpython-312.pyc,, +networkx/algorithms/tests/__pycache__/test_time_dependent.cpython-312.pyc,, +networkx/algorithms/tests/__pycache__/test_tournament.cpython-312.pyc,, +networkx/algorithms/tests/__pycache__/test_triads.cpython-312.pyc,, +networkx/algorithms/tests/__pycache__/test_vitality.cpython-312.pyc,, +networkx/algorithms/tests/__pycache__/test_voronoi.cpython-312.pyc,, +networkx/algorithms/tests/__pycache__/test_walks.cpython-312.pyc,, +networkx/algorithms/tests/__pycache__/test_wiener.cpython-312.pyc,, +networkx/algorithms/tests/test_asteroidal.py,sha256=DnWI5_jnaaZMxtG44XD0K690HZs8ez7HU_9dSR-p6eA,502 +networkx/algorithms/tests/test_boundary.py,sha256=1OSJh32FYFhAVYB5zqxhZGEXZLS0HPp9kvfHZvWmD3o,6227 +networkx/algorithms/tests/test_bridges.py,sha256=jSCguECho0GNHnu0vpRh1twyfGP6tWFcaYL1rgvc8mU,4026 +networkx/algorithms/tests/test_broadcasting.py,sha256=2LIMrKmSGSSWRLy5TR_NzDQD1annA2JohpsbEbVJKfE,2021 +networkx/algorithms/tests/test_chains.py,sha256=4fR5Uh2NMrTZGT1yR6sAsQEP3uy1U3UnwjcXLFk9d1Y,4196 +networkx/algorithms/tests/test_chordal.py,sha256=DPdNPY7KtqCsCwYVb4xQfnIm-z35dUJIWxNHtAiQLAQ,4438 +networkx/algorithms/tests/test_clique.py,sha256=FPIF2f8NLODsz-k_qrHt7DolClV_VdNWSh68oe8-ygI,9413 +networkx/algorithms/tests/test_cluster.py,sha256=2s23-dRp3NemK4lpknYMJoN3Y_7Ub8VluzH4F3uYeok,17037 +networkx/algorithms/tests/test_communicability.py,sha256=4KK9wU9gAUqHAAAyHwAKpq2dV9g415s_X0qd7Tt83gU,2938 +networkx/algorithms/tests/test_core.py,sha256=Q3bi_c0LpxZ-0VopHU6Lyzkb_JKseVT16H45qDuax9A,9619 +networkx/algorithms/tests/test_covering.py,sha256=EeBjQ5mxcVctgavqXZ255T8ryFocuxjxdVpIxVUNFvw,2718 +networkx/algorithms/tests/test_cuts.py,sha256=gKm9VDtnmwFli6kgwV1ktEFI_rw84p2Sg02Em6SoW5Q,5376 +networkx/algorithms/tests/test_cycles.py,sha256=CBEQ_kNlHg5PHb9vKMS9ot-lqUuvs_cAgaPtnEQn76g,34851 +networkx/algorithms/tests/test_d_separation.py,sha256=cxoAOA_EahXQHFsFaRopz6cp3h5sMnD7D_isq4_EuWI,10714 +networkx/algorithms/tests/test_dag.py,sha256=It8wG6ltcMUhD5Z1WClFBEhFqgPNL5xo4hqBIE0Fe0c,29486 +networkx/algorithms/tests/test_distance_measures.py,sha256=kF-pR5WbVBs1d_IO-U6Dr-sf8MaRHfxc_D286sBN62E,28793 +networkx/algorithms/tests/test_distance_regular.py,sha256=w27OTUtAI0VQv7cikkOdJg4bo4q7xTNIVE8nbU_x7b8,2915 +networkx/algorithms/tests/test_dominance.py,sha256=9Vx7WCGYDmwR3BasLlsAHBPmfGh-Xqm4DHBX-cyRu10,9184 +networkx/algorithms/tests/test_dominating.py,sha256=5WwPlrQ6_pFaVx_-K4D5cHNbCZL23qp-PytgsXDuS4Q,3112 +networkx/algorithms/tests/test_efficiency.py,sha256=QKWMvyjCG1Byt-oNp7Rz_qxnVeT77Zk27lrzI1qH0mA,1894 +networkx/algorithms/tests/test_euler.py,sha256=L4L1ljHVxQxjQQludO2r6k3UZU7WAY_N6WYUjFx1fEk,11209 +networkx/algorithms/tests/test_graph_hashing.py,sha256=ribxC8ZkNxkkBDyjSZ8P2f8J6y5y4g-Ik-k5hBgZcx4,31567 +networkx/algorithms/tests/test_graphical.py,sha256=uhFjvs04odxABToY4IRig_CaUTpAC3SfZRu1p1T7FwY,5366 +networkx/algorithms/tests/test_hierarchy.py,sha256=uW8DqCdXiAeypkNPKcAYX7aW86CawYH84Q0bW4cDTXo,1184 +networkx/algorithms/tests/test_hybrid.py,sha256=kQLzaMoqZcKFaJ3D7PKbY2O-FX59XDZ1pN5un8My-tk,720 +networkx/algorithms/tests/test_isolate.py,sha256=LyR0YYHJDH5vppQzGzGiJK-aaIV17_Jmla8dMf93olg,555 +networkx/algorithms/tests/test_link_prediction.py,sha256=_I-H0TyiQ8-q_Qx5JULs6URcNUrLSVsdCkveuGoWqWk,20028 +networkx/algorithms/tests/test_lowest_common_ancestors.py,sha256=5ZT_17q-5ipw7NNgNisaVcxIxrY6XeQQaUlBXHx9k9g,14160 +networkx/algorithms/tests/test_matching.py,sha256=iHivzaOjwdRe4O1OQbc2TVoEszg4gVfB0Q68Ghug3ko,17897 +networkx/algorithms/tests/test_max_weight_clique.py,sha256=M1eoy8OtuQVZkEvNMauV9vqR6hHtOCrtq6INv2qzMyA,6739 +networkx/algorithms/tests/test_mis.py,sha256=Z2tKoqbs-AFPzEBDYO7S8U-F7usLfZJ2l6j2DpZUts4,1865 +networkx/algorithms/tests/test_moral.py,sha256=15PZgkx7O9aXQB1npQ2JNqBBkEqPPP2RfeZzKqY-GNU,452 +networkx/algorithms/tests/test_node_classification.py,sha256=NgJJKUHH1GoD1GE3F4QRYBLM3fUo_En3RNtZvhqCjlg,4663 +networkx/algorithms/tests/test_non_randomness.py,sha256=xMkJp0F91Qn45EUuMottk1WSDfOQ90TDQfZFDSJ8tkE,1000 +networkx/algorithms/tests/test_planar_drawing.py,sha256=fBTd9JzDkTqNQz7GBQCS9lShU1P3lWRQ7L-E5WhSdlA,8765 +networkx/algorithms/tests/test_planarity.py,sha256=2iy22H4APB2K0DiPcoWmiQV9BTQlJ9JSOexubN14sU0,17312 +networkx/algorithms/tests/test_polynomials.py,sha256=baI0Kua1pRngRC6Scm5gRRwi1bl0iET5_Xxo3AZTP3A,1983 +networkx/algorithms/tests/test_reciprocity.py,sha256=X_PXWFOTzuEcyMWpRdwEJfm8lJOfNE_1rb9AAybf4is,1296 +networkx/algorithms/tests/test_regular.py,sha256=Cy7nQeQ4yubZ91ey5itQF4jPyZIhZlE9qB0cFQj9xos,2610 +networkx/algorithms/tests/test_richclub.py,sha256=Wl8aE0CSswaXJiHCQ42IkyRvjWaJQZ2OQcbC7LpIgfo,3962 +networkx/algorithms/tests/test_similarity.py,sha256=rBVTpFEOOHs95xq1vVkqQdV3ASP02U__44vVcUVGwpc,34412 +networkx/algorithms/tests/test_simple_paths.py,sha256=7U9wCXz4SHK0XeYrs1k2KjYgrYVQDnts2ggQLzU18p0,25181 +networkx/algorithms/tests/test_smallworld.py,sha256=bmoNDxIuhV0bBKWh2hgFw0XYTPCc2rgg1y77uMg6rE8,2390 +networkx/algorithms/tests/test_smetric.py,sha256=VM14L4X1AABvINDL9qKXzlech_Q2g4Aee-ozWM2Qrr4,144 +networkx/algorithms/tests/test_sparsifiers.py,sha256=1GRbQy1vfmwv6eUhP4Io0aykH2VyTJfFWmncrXmTqi4,4044 +networkx/algorithms/tests/test_structuralholes.py,sha256=F0xZ1nlKJLGmQDebDiExju4fMffu79qThnyF7ZGA53A,7592 +networkx/algorithms/tests/test_summarization.py,sha256=uNyaUstobIEu6M_Hexik-3YiYTRSy_XO6LUqoE4wazw,21312 +networkx/algorithms/tests/test_swap.py,sha256=WJtGMkSbAd1Cv06VaUeDVHosNOtdigsqEspyux0ExCs,6144 +networkx/algorithms/tests/test_threshold.py,sha256=D63eul7s_L3YazfLLlCMj7kvfSmXknkOUDrLkYzV_q8,9564 +networkx/algorithms/tests/test_time_dependent.py,sha256=NmuV2kDo4nh2MeN0hwcJf0QSDtqMD0dfSeeKSsYBtQ8,13342 +networkx/algorithms/tests/test_tournament.py,sha256=tfOPwN7YHnw0oxl9d7-ErPBFKz6KBfSc5S3tbGrGHs8,4107 +networkx/algorithms/tests/test_triads.py,sha256=WJFUC6M3Ad--I7Ua64t2TsoBJ1JLDSiCIO9_uyzNGRQ,8221 +networkx/algorithms/tests/test_vitality.py,sha256=p5lPWCtVMtbvxDw6TJUaf8vpb0zKPoz5pND722xiypQ,1380 +networkx/algorithms/tests/test_voronoi.py,sha256=M4B6JtkJUw56ULEWRs1kyVEUsroNrnb5FBq9OioAyHM,3477 +networkx/algorithms/tests/test_walks.py,sha256=X8cb-YvGHiiqbMEXuKMSdTAb9WtVtbHjIESNSqpJTmU,1499 +networkx/algorithms/tests/test_wiener.py,sha256=k9ld7wdPq5knS6cjo0hja8aWL-cdxYKGRpDU0z3cvNI,3209 +networkx/algorithms/threshold.py,sha256=P8xV0p9pzK0mdRTGs_GaOIPR56hKOwFiYaTfJbXyZgo,32858 +networkx/algorithms/time_dependent.py,sha256=PAeJ7Yt8kUqbDgvBaz_ZfUFZg-w-vf1gPC0HO6go_TI,5762 +networkx/algorithms/tournament.py,sha256=9NBu3Z2WOmve72eW4ZazFWsOqxRYiJbuxE69eUmP0D0,11604 +networkx/algorithms/traversal/__init__.py,sha256=YtFrfNjciqTOI6jGePQaJ01tRSEQXTHqTGGNhDEDb_8,142 +networkx/algorithms/traversal/__pycache__/__init__.cpython-312.pyc,, +networkx/algorithms/traversal/__pycache__/beamsearch.cpython-312.pyc,, +networkx/algorithms/traversal/__pycache__/breadth_first_search.cpython-312.pyc,, +networkx/algorithms/traversal/__pycache__/depth_first_search.cpython-312.pyc,, +networkx/algorithms/traversal/__pycache__/edgebfs.cpython-312.pyc,, +networkx/algorithms/traversal/__pycache__/edgedfs.cpython-312.pyc,, +networkx/algorithms/traversal/beamsearch.py,sha256=Vn0U4Wck8ICShIAGggv3tVtQWVW0ABEz_hcBsGrql6o,3473 +networkx/algorithms/traversal/breadth_first_search.py,sha256=7T7EVhM0Pxprd-9rKj8ZjXbhTcEJHwltKViaSseKtP8,18307 +networkx/algorithms/traversal/depth_first_search.py,sha256=2V4T3tGujcAtV3W6WcTQUjGAAe3b1rqinONowUhLsa8,16795 +networkx/algorithms/traversal/edgebfs.py,sha256=JJ1mQCmv8WydqQtIVQC1HqkqEFDMeKVzJ_hexwU8sXA,6333 +networkx/algorithms/traversal/edgedfs.py,sha256=T1xHGr8p_eC1pEUyCP-kpaYlIY_la_ms-J0-50c4EDk,6041 +networkx/algorithms/traversal/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/traversal/tests/__pycache__/__init__.cpython-312.pyc,, +networkx/algorithms/traversal/tests/__pycache__/test_beamsearch.cpython-312.pyc,, +networkx/algorithms/traversal/tests/__pycache__/test_bfs.cpython-312.pyc,, +networkx/algorithms/traversal/tests/__pycache__/test_dfs.cpython-312.pyc,, +networkx/algorithms/traversal/tests/__pycache__/test_edgebfs.cpython-312.pyc,, +networkx/algorithms/traversal/tests/__pycache__/test_edgedfs.cpython-312.pyc,, +networkx/algorithms/traversal/tests/test_beamsearch.py,sha256=bzUcswZ1qo0ecDZYSER_4enbsW6SjTpb_3Nb3fqmkAo,900 +networkx/algorithms/traversal/tests/test_bfs.py,sha256=tGQSiYhc6BxaZEuw-G3ZxVfYLN7q0zrP_BeKl6EfxkU,6447 +networkx/algorithms/traversal/tests/test_dfs.py,sha256=rQMq7GToM6CVU3WwIZfmqa-HrqlmvIT1DryRU7NbU_U,10580 +networkx/algorithms/traversal/tests/test_edgebfs.py,sha256=8oplCu0fct3QipT0JB0-292EA2aOm8zWlMkPedfe6iY,4702 +networkx/algorithms/traversal/tests/test_edgedfs.py,sha256=HGmC3GUYSn9XLMHQpdefdE6g-Uh3KqbmgEEXBcckdYc,4775 +networkx/algorithms/tree/__init__.py,sha256=wm_FjX3G7hqJfyNmeEaJsRjZI-8Kkv0Nb5jAmQNXzSc,149 +networkx/algorithms/tree/__pycache__/__init__.cpython-312.pyc,, +networkx/algorithms/tree/__pycache__/branchings.cpython-312.pyc,, +networkx/algorithms/tree/__pycache__/coding.cpython-312.pyc,, +networkx/algorithms/tree/__pycache__/decomposition.cpython-312.pyc,, +networkx/algorithms/tree/__pycache__/mst.cpython-312.pyc,, +networkx/algorithms/tree/__pycache__/operations.cpython-312.pyc,, +networkx/algorithms/tree/__pycache__/recognition.cpython-312.pyc,, +networkx/algorithms/tree/branchings.py,sha256=B0c_uKpcnV2SwJMZJRK0BMEz8LkvIcOhv1y0AI0gTnY,34339 +networkx/algorithms/tree/coding.py,sha256=l9P4jhmhg_9uYBVMNSmLYW8btHJH07-Sl2SIKJtO1LA,13466 +networkx/algorithms/tree/decomposition.py,sha256=lY_rqx9JxnLEkp1wiAv0mX62PGPwGQ6SW4Jp48o8aiw,3071 +networkx/algorithms/tree/mst.py,sha256=kS0XtC3KftZEMLgdY7UavdVqyzbMKJz6BanRA1VGA8A,46151 +networkx/algorithms/tree/operations.py,sha256=TjfLQpxwlaLgdOnhLkdzHfZ68yFCYz6BMzzh_d_2mJc,4048 +networkx/algorithms/tree/recognition.py,sha256=bYnaDN0ZaIWTgq0tbPEHAcdxQBWZpDvWypZarBbA334,7569 +networkx/algorithms/tree/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/tree/tests/__pycache__/__init__.cpython-312.pyc,, +networkx/algorithms/tree/tests/__pycache__/test_branchings.cpython-312.pyc,, +networkx/algorithms/tree/tests/__pycache__/test_coding.cpython-312.pyc,, +networkx/algorithms/tree/tests/__pycache__/test_decomposition.cpython-312.pyc,, +networkx/algorithms/tree/tests/__pycache__/test_mst.cpython-312.pyc,, +networkx/algorithms/tree/tests/__pycache__/test_operations.cpython-312.pyc,, +networkx/algorithms/tree/tests/__pycache__/test_recognition.cpython-312.pyc,, +networkx/algorithms/tree/tests/test_branchings.py,sha256=euky-3tFOV5CVV5v_yFawuVGCaqTuTrKtBIkCygwhiA,17731 +networkx/algorithms/tree/tests/test_coding.py,sha256=XC6SbfA2zVGH4FyJJyv6o8eOnBu7FNzNot3SKs7QmEo,3955 +networkx/algorithms/tree/tests/test_decomposition.py,sha256=vnl_xoQzi1LnlZL25vXOZWwvaWmon3-x222OKt4eDqE,1871 +networkx/algorithms/tree/tests/test_mst.py,sha256=ad6kAEpAF9PH1FyD_jHa2xnAtgBGs75sYGTY0s530BQ,31631 +networkx/algorithms/tree/tests/test_operations.py,sha256=ybU96kROTVJRTyjLG7JSJjYlPxaWmYjUVJqbXV5VGGI,1961 +networkx/algorithms/tree/tests/test_recognition.py,sha256=qeMEIvg-j2MqaU-TNIQhCcXxao8vTBy0wjpU7jr2iw8,4521 +networkx/algorithms/triads.py,sha256=dHgLUUvLo3hAcb-nfNdU2U62MS8vSN4ZykLoIHDzjxo,14229 +networkx/algorithms/vitality.py,sha256=8M1cubIydO49El2kwVCURHZ2UwCtfGVFeGS8-JYt1ko,2289 +networkx/algorithms/voronoi.py,sha256=07SnSpxLDz4k6K59Jo-VTNA-Qy5knaHfBC-y_5vAOLQ,3183 +networkx/algorithms/walks.py,sha256=0JOLhpAyeNzmF8EtlVlYOWEPJJvCIltt7tbk1Vx52dI,2427 +networkx/algorithms/wiener.py,sha256=WOUG0L5xDKpY4uspyI-oDo1hWuHxbUnTFZEe_-IAx5M,7639 +networkx/classes/__init__.py,sha256=Q9oONJrnTFs874SGpwcbV_kyJTDcrLI69GFt99MiE6I,364 +networkx/classes/__pycache__/__init__.cpython-312.pyc,, +networkx/classes/__pycache__/coreviews.cpython-312.pyc,, +networkx/classes/__pycache__/digraph.cpython-312.pyc,, +networkx/classes/__pycache__/filters.cpython-312.pyc,, +networkx/classes/__pycache__/function.cpython-312.pyc,, +networkx/classes/__pycache__/graph.cpython-312.pyc,, +networkx/classes/__pycache__/graphviews.cpython-312.pyc,, +networkx/classes/__pycache__/multidigraph.cpython-312.pyc,, +networkx/classes/__pycache__/multigraph.cpython-312.pyc,, +networkx/classes/__pycache__/reportviews.cpython-312.pyc,, +networkx/classes/coreviews.py,sha256=5Dgi8PjMZJz_Uun2SXL5a6mxtAfdbm3kPIdbgzSTXgM,13251 +networkx/classes/digraph.py,sha256=Fup1GbADCpXKLA12M67RbhA0cm6BGi_4cIxBLsjHEtc,48101 +networkx/classes/filters.py,sha256=PCy7BsoIby8VcamqDjZQiNAe_5egI0WKUq-y5nc9unQ,2817 +networkx/classes/function.py,sha256=NugxIYb05wUo5brPlD2liCBiulw7cB8_O98Am0GShyM,39334 +networkx/classes/graph.py,sha256=uQiUwlK6pw9GhyYq75UX4NKx9ZlkMg2J2wMY3QC7_no,71659 +networkx/classes/graphviews.py,sha256=ulUTLozEK_hj_4TGHdgvxveR2-rb92Q14jjxH4oH4Go,8520 +networkx/classes/multidigraph.py,sha256=aOqjfSJ6Lx9l-1zwCIMNYRW0mW1wPDniEcRWQ8gKmYY,36351 +networkx/classes/multigraph.py,sha256=PSZR7QgyszlO5PqzhxI954LySqLHq-589OQrCOtC9pw,47248 +networkx/classes/reportviews.py,sha256=u0hNZqaWXCfLMP_lq835XCIVStkZQJ9HaQPeDPPoo88,46132 +networkx/classes/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/classes/tests/__pycache__/__init__.cpython-312.pyc,, +networkx/classes/tests/__pycache__/dispatch_interface.cpython-312.pyc,, +networkx/classes/tests/__pycache__/historical_tests.cpython-312.pyc,, +networkx/classes/tests/__pycache__/test_coreviews.cpython-312.pyc,, +networkx/classes/tests/__pycache__/test_digraph.cpython-312.pyc,, +networkx/classes/tests/__pycache__/test_digraph_historical.cpython-312.pyc,, +networkx/classes/tests/__pycache__/test_filters.cpython-312.pyc,, +networkx/classes/tests/__pycache__/test_function.cpython-312.pyc,, +networkx/classes/tests/__pycache__/test_graph.cpython-312.pyc,, +networkx/classes/tests/__pycache__/test_graph_historical.cpython-312.pyc,, +networkx/classes/tests/__pycache__/test_graphviews.cpython-312.pyc,, +networkx/classes/tests/__pycache__/test_multidigraph.cpython-312.pyc,, +networkx/classes/tests/__pycache__/test_multigraph.cpython-312.pyc,, +networkx/classes/tests/__pycache__/test_reportviews.cpython-312.pyc,, +networkx/classes/tests/__pycache__/test_special.cpython-312.pyc,, +networkx/classes/tests/__pycache__/test_subgraphviews.cpython-312.pyc,, +networkx/classes/tests/dispatch_interface.py,sha256=OA4t1XQX7Qqm3pGhTZYKno4c_zbIcvpSWstO_LXIVRo,6479 +networkx/classes/tests/historical_tests.py,sha256=nrv0ccvUMtp714VEV1I9UTwWz8ohgujbC-Xnxpc7kU8,16174 +networkx/classes/tests/test_coreviews.py,sha256=qzdozzWK8vLag-CAUqrXAM2CZZwMFN5vMu6Tdrwdf-E,12128 +networkx/classes/tests/test_digraph.py,sha256=uw0FuEu3y_YI-PSGuQCRytFpXLF7Eye2fqLJaKbXkBc,12283 +networkx/classes/tests/test_digraph_historical.py,sha256=8TbDcvmLgfllfGomTRStSpT5WIQiTHLt-rS3GyJEItU,3668 +networkx/classes/tests/test_filters.py,sha256=fBLig8z548gsBBlQw6VJdGZb4IcqJj7_0mi2Fd2ncEM,5851 +networkx/classes/tests/test_function.py,sha256=MLDKHDulOIBgPvT0q3E4MX4KStLrPAfP8VEnA2qGaxY,34997 +networkx/classes/tests/test_graph.py,sha256=Mg9SHbtS3RI_9G4E7dlDf1jpviJOmw4HIwnFuIPuQFw,31105 +networkx/classes/tests/test_graph_historical.py,sha256=EVAndjelyyZ9laQCTrj6KR6lhdmG8Y1raP2IrxkZxdA,258 +networkx/classes/tests/test_graphviews.py,sha256=aXj--KlMuicu4qgOGNHbW49tKIoXrj-VTxD_7dl2DA4,11438 +networkx/classes/tests/test_multidigraph.py,sha256=ryTKegCoYixXbAqOn3mIt9vSMb5666Dv-pfMkXEjoUE,16342 +networkx/classes/tests/test_multigraph.py,sha256=0vFQO3RCJaBpzXvnQzdWa_qYLHNo_I9DICYhPZJNUMk,18777 +networkx/classes/tests/test_reportviews.py,sha256=MgP9_cKlw4aTU5m1XbsNTM3ZjmO7roOOf-7x-Hn9xKs,41332 +networkx/classes/tests/test_special.py,sha256=wl-Idzdc55eOQ4Tn5SNqOG21AjwLb2TTp9x102Qx3Ms,4053 +networkx/classes/tests/test_subgraphviews.py,sha256=iIMkbHdtJsgDlcdphhE_uUTJ0VGTGFzKIVvMd7oamoU,13669 +networkx/conftest.py,sha256=QecHXLPmER-f6bvc_LpKSs1gaQA7jHJ7IJVzwNuN0zg,7941 +networkx/convert.py,sha256=Xmkhacuyw9CDviBTZPlH6nmD-JabSk0aPcVmGgp4LeY,16143 +networkx/convert_matrix.py,sha256=9BXSEXzCEIte1nx8f35cWhXo-EVYjOkuW4H3Jutq-oE,45342 +networkx/drawing/__init__.py,sha256=rnTFNzLc4fis1hTAEpnWTC80neAR88-llVQ-LObN-i4,160 +networkx/drawing/__pycache__/__init__.cpython-312.pyc,, +networkx/drawing/__pycache__/layout.cpython-312.pyc,, +networkx/drawing/__pycache__/nx_agraph.cpython-312.pyc,, +networkx/drawing/__pycache__/nx_latex.cpython-312.pyc,, +networkx/drawing/__pycache__/nx_pydot.cpython-312.pyc,, +networkx/drawing/__pycache__/nx_pylab.cpython-312.pyc,, +networkx/drawing/layout.py,sha256=xXAHS00FbQc90-1JOSQL3hKxRWdzvn8pzGoQuafUBk0,66075 +networkx/drawing/nx_agraph.py,sha256=icmJvRQeCzy6Duar-0xlDZf8Q6JtrhrB_YwQjfLxcZg,13987 +networkx/drawing/nx_latex.py,sha256=PySHixo2YhGV0xbKLroaX-_XcI1L5u8IF8nP6xdV2NU,24846 +networkx/drawing/nx_pydot.py,sha256=9KbNVrVcU5gFmo1GPpB_UMBXcD1tnBYDBzxHvgg6mOs,9992 +networkx/drawing/nx_pylab.py,sha256=LhctsMIjl2Z1_wBXrWX5UcpcC3XLqW_pN2FfoYSltpw,105946 +networkx/drawing/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/drawing/tests/__pycache__/__init__.cpython-312.pyc,, +networkx/drawing/tests/__pycache__/test_agraph.cpython-312.pyc,, +networkx/drawing/tests/__pycache__/test_latex.cpython-312.pyc,, +networkx/drawing/tests/__pycache__/test_layout.cpython-312.pyc,, +networkx/drawing/tests/__pycache__/test_pydot.cpython-312.pyc,, +networkx/drawing/tests/__pycache__/test_pylab.cpython-312.pyc,, +networkx/drawing/tests/baseline/test_display_complex.png,sha256=3YPN6JEuyq1KEpRgSnH-2yECI81frU_5fw0VxHlwo9A,142315 +networkx/drawing/tests/baseline/test_display_empty_graph.png,sha256=XsOkBCZlt-55c40ElwEapBENVfmdc4TZKIF1ufs5Dzk,3257 +networkx/drawing/tests/baseline/test_display_house_with_colors.png,sha256=uiEx6_2RddHG-516OsRVM0AG7M5CGiAo4tpDgbgPKIU,26002 +networkx/drawing/tests/baseline/test_display_labels_and_colors.png,sha256=qjiFF5hVnkuofR_rXq8WfLAbL6cSt7fxMEloLCkZNtA,70446 +networkx/drawing/tests/baseline/test_display_shortest_path.png,sha256=an2msRHyeefLT8O_B210Xu21eXlWHWycPdIy6V3nvvQ,35897 +networkx/drawing/tests/baseline/test_house_with_colors.png,sha256=FQi9pIRFwjq4gvgB8cDdBHL5euQUJFw6sQlABf2kRVo,21918 +networkx/drawing/tests/test_agraph.py,sha256=G1JAjIyFGYxs4cwDekdMZjxIlsXTqfnYlESYQwfOsWY,8788 +networkx/drawing/tests/test_latex.py,sha256=YQjLxsZnx7_CX2sJ3nN0cUiJzC7p6UhyFN6ufbox5G0,8621 +networkx/drawing/tests/test_layout.py,sha256=2_wJfBcCcHwBm1JRrQiLMcnNWoaURX6F4LFcn5OPul4,23709 +networkx/drawing/tests/test_pydot.py,sha256=X9b66gWqMgdTEyRJ7Zmy5kL9cr22waI688K9BJUf4Bk,4973 +networkx/drawing/tests/test_pylab.py,sha256=066ytUBWk-HsFlKhGQ-jsrbNuwpYTJJdizxJOq0ZVLM,59298 +networkx/exception.py,sha256=hC8efPfIzOFo0jiWiQbTPaNKuNTuUwhp9RPw--pdv4U,3787 +networkx/generators/__init__.py,sha256=EoYB5c5ZE4rsNKZvl1TRQy2Vo2D3T2H-YunyD2i6sa0,1366 +networkx/generators/__pycache__/__init__.cpython-312.pyc,, +networkx/generators/__pycache__/atlas.cpython-312.pyc,, +networkx/generators/__pycache__/classic.cpython-312.pyc,, +networkx/generators/__pycache__/cographs.cpython-312.pyc,, +networkx/generators/__pycache__/community.cpython-312.pyc,, +networkx/generators/__pycache__/degree_seq.cpython-312.pyc,, +networkx/generators/__pycache__/directed.cpython-312.pyc,, +networkx/generators/__pycache__/duplication.cpython-312.pyc,, +networkx/generators/__pycache__/ego.cpython-312.pyc,, +networkx/generators/__pycache__/expanders.cpython-312.pyc,, +networkx/generators/__pycache__/geometric.cpython-312.pyc,, +networkx/generators/__pycache__/harary_graph.cpython-312.pyc,, +networkx/generators/__pycache__/internet_as_graphs.cpython-312.pyc,, +networkx/generators/__pycache__/intersection.cpython-312.pyc,, +networkx/generators/__pycache__/interval_graph.cpython-312.pyc,, +networkx/generators/__pycache__/joint_degree_seq.cpython-312.pyc,, +networkx/generators/__pycache__/lattice.cpython-312.pyc,, +networkx/generators/__pycache__/line.cpython-312.pyc,, +networkx/generators/__pycache__/mycielski.cpython-312.pyc,, +networkx/generators/__pycache__/nonisomorphic_trees.cpython-312.pyc,, +networkx/generators/__pycache__/random_clustered.cpython-312.pyc,, +networkx/generators/__pycache__/random_graphs.cpython-312.pyc,, +networkx/generators/__pycache__/small.cpython-312.pyc,, +networkx/generators/__pycache__/social.cpython-312.pyc,, +networkx/generators/__pycache__/spectral_graph_forge.cpython-312.pyc,, +networkx/generators/__pycache__/stochastic.cpython-312.pyc,, +networkx/generators/__pycache__/sudoku.cpython-312.pyc,, +networkx/generators/__pycache__/time_series.cpython-312.pyc,, +networkx/generators/__pycache__/trees.cpython-312.pyc,, +networkx/generators/__pycache__/triads.cpython-312.pyc,, +networkx/generators/atlas.dat.gz,sha256=c_xBbfAWSSNgd1HLdZ9K6B3rX2VQvyW-Wcht47dH5B0,8887 +networkx/generators/atlas.py,sha256=cbki_oYDi_7vniSesqJZ-PCEM3BvnzEdjSc3TA3s1xc,6955 +networkx/generators/classic.py,sha256=68lCnSeo50uV1yoc6ZvjnckR7lAbrhUdniyEogczvB4,32000 +networkx/generators/cographs.py,sha256=-WR4_yrNk_X5nj7egb7A22eKPVymOdIYM-IftSRH4WA,1891 +networkx/generators/community.py,sha256=_p_4OfItbg8nS0b3EvojCXZ8cESdC-0Gj67V5w2veuM,34911 +networkx/generators/degree_seq.py,sha256=vJE5MEo88O2p2CxjNFb-Wz_cNKVB-0oEct89CY1dlEo,30178 +networkx/generators/directed.py,sha256=oOu1mkXBvYByGxulwENnHFZBqJOm8VtvIuXndF5kiaI,18139 +networkx/generators/duplication.py,sha256=hmYAHJBez7WlfdVGGa288JFUBHoIUdVqEGCodApKOr4,5831 +networkx/generators/ego.py,sha256=ylW-UMvg9C8cqygw9vrBNLGAVZJT4U5EWbU4gaT_H1g,1894 +networkx/generators/expanders.py,sha256=4Q1pQQjvLJm00l2lYNe3r1S_1N9HM_w0NGDmUH3PcZ8,14460 +networkx/generators/geometric.py,sha256=6slgS0vraxql67AOP9lWzBrpNbx_OBZCoBwYuSkQQXg,39461 +networkx/generators/harary_graph.py,sha256=N6vzXKrW-ZU-xDc2ZTF_Gf7kb0LRQVRfK2oLBQvyVO8,6159 +networkx/generators/internet_as_graphs.py,sha256=Y_pQaGhe183X6dXH4ocqIK3DzXRz0oXE-AKwsL1yCHk,14172 +networkx/generators/intersection.py,sha256=EFm0AOjnqyp8KcT7kGWqANq-_vq9kQ0d_0DzVyQyP-o,4101 +networkx/generators/interval_graph.py,sha256=ZTmdgQbBx3M6sysGWXbGyngYYOC1TAXD3Ozkw4deQFw,2204 +networkx/generators/joint_degree_seq.py,sha256=nyp86NC_4XvzvwpwwzKrrCSz1i_4bESSDtVjWvpkWFg,24773 +networkx/generators/lattice.py,sha256=kVCvTahWPQGNbok6maXfaqGzm88UuxhP7D9BkKhGW1o,13500 +networkx/generators/line.py,sha256=dh9ywOotmHzdoxRgxJNJNpkNQkPNfslgSIzOHlyKQ9U,17542 +networkx/generators/mycielski.py,sha256=xBX2m77sCzumoH5cAGitksvEEW-ocbCnbdaN7fKUtVk,3314 +networkx/generators/nonisomorphic_trees.py,sha256=n57ywjiEOLg-aC7XoSJC7SSdFTobHbiPUxZ2uwIrhxA,5710 +networkx/generators/random_clustered.py,sha256=i_NdvvchHvsvbwgQtoWSY_pLwvhO9Lh02MSZXzgGb7c,4183 +networkx/generators/random_graphs.py,sha256=qi_AjT9Hx5M6ujgTe-DBVIsY9LwqPPZjuAHFMIaQOOc,51346 +networkx/generators/small.py,sha256=OvTPnBicfe6QyeTCwuBMS0KKZnfnmMmeZuYzG36OS5c,28298 +networkx/generators/social.py,sha256=IUVgWVMUmRaUG28U0KzB--0DtKLdCFDz54tkJ69W4ms,23437 +networkx/generators/spectral_graph_forge.py,sha256=9r6d9f5Y03KPX138sQTKWG3_RjeiYQVUJJWwCHqG4MA,4282 +networkx/generators/stochastic.py,sha256=Qg9vWm9EOug2OQVIHL_dZ5HrXc16lxnWyzX52KWNEPI,1981 +networkx/generators/sudoku.py,sha256=kLM2AP0H4966uYiNO1oAFEmv5qBftU_bOfYucRxexM0,4288 +networkx/generators/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/generators/tests/__pycache__/__init__.cpython-312.pyc,, +networkx/generators/tests/__pycache__/test_atlas.cpython-312.pyc,, +networkx/generators/tests/__pycache__/test_classic.cpython-312.pyc,, +networkx/generators/tests/__pycache__/test_cographs.cpython-312.pyc,, +networkx/generators/tests/__pycache__/test_community.cpython-312.pyc,, +networkx/generators/tests/__pycache__/test_degree_seq.cpython-312.pyc,, +networkx/generators/tests/__pycache__/test_directed.cpython-312.pyc,, +networkx/generators/tests/__pycache__/test_duplication.cpython-312.pyc,, +networkx/generators/tests/__pycache__/test_ego.cpython-312.pyc,, +networkx/generators/tests/__pycache__/test_expanders.cpython-312.pyc,, +networkx/generators/tests/__pycache__/test_geometric.cpython-312.pyc,, +networkx/generators/tests/__pycache__/test_harary_graph.cpython-312.pyc,, +networkx/generators/tests/__pycache__/test_internet_as_graphs.cpython-312.pyc,, +networkx/generators/tests/__pycache__/test_intersection.cpython-312.pyc,, +networkx/generators/tests/__pycache__/test_interval_graph.cpython-312.pyc,, +networkx/generators/tests/__pycache__/test_joint_degree_seq.cpython-312.pyc,, +networkx/generators/tests/__pycache__/test_lattice.cpython-312.pyc,, +networkx/generators/tests/__pycache__/test_line.cpython-312.pyc,, +networkx/generators/tests/__pycache__/test_mycielski.cpython-312.pyc,, +networkx/generators/tests/__pycache__/test_nonisomorphic_trees.cpython-312.pyc,, +networkx/generators/tests/__pycache__/test_random_clustered.cpython-312.pyc,, +networkx/generators/tests/__pycache__/test_random_graphs.cpython-312.pyc,, +networkx/generators/tests/__pycache__/test_small.cpython-312.pyc,, +networkx/generators/tests/__pycache__/test_spectral_graph_forge.cpython-312.pyc,, +networkx/generators/tests/__pycache__/test_stochastic.cpython-312.pyc,, +networkx/generators/tests/__pycache__/test_sudoku.cpython-312.pyc,, +networkx/generators/tests/__pycache__/test_time_series.cpython-312.pyc,, +networkx/generators/tests/__pycache__/test_trees.cpython-312.pyc,, +networkx/generators/tests/__pycache__/test_triads.cpython-312.pyc,, +networkx/generators/tests/test_atlas.py,sha256=nwXJL4O5jUqhTwqhkPxHY8s3KXHQTDEdsfbg4MsSzVQ,2530 +networkx/generators/tests/test_classic.py,sha256=I8sO6q67GyxztZuNlYNlLAe3n8rosGcE2FFngjniHPw,24218 +networkx/generators/tests/test_cographs.py,sha256=Khqvx15VNWHjNkMeEpsio3oJAi8KoiYqfTqKVbQWT9U,458 +networkx/generators/tests/test_community.py,sha256=FGcDo3Ajb-yYc5kUkFbVfOJVMG-YppbAtjgBPcVzjLc,11311 +networkx/generators/tests/test_degree_seq.py,sha256=UO-1Slh368Hm0tblVBN_M3fJu5F7Img_IdVfXOJulVI,7306 +networkx/generators/tests/test_directed.py,sha256=oF67etbl4vc0d36KlciCSfPYouQL4Uhg19idu0cwwxI,5758 +networkx/generators/tests/test_duplication.py,sha256=UdIGDF_fishanWid1xO_aH4NDfie8xpIqd26qndhOqI,3155 +networkx/generators/tests/test_ego.py,sha256=8v1Qjmkli9wIhhUuqzgqCzysr0C1Z2C3oJMCUoNvgY4,1327 +networkx/generators/tests/test_expanders.py,sha256=uRPqC4b5roUHckY1Gx82zyC9jq_HCEh32AiCdqAmjvk,5949 +networkx/generators/tests/test_geometric.py,sha256=v8alTMSjQvCCcNdqu7On8ZrEYEWBNYfuT5ywR2qz2lU,18091 +networkx/generators/tests/test_harary_graph.py,sha256=GiX5LXXJaNxzjvd-Nyw_QuARzbFGkA6zE1R1eX8mclw,4936 +networkx/generators/tests/test_internet_as_graphs.py,sha256=QmzkOnWg9bcSrv31UcaD6Cko55AV-GPLLY5Aqb_Dmvs,6795 +networkx/generators/tests/test_intersection.py,sha256=hcIit5fKfOn3VjMhz9KqovZK9tzxZfmC6ezvA7gZAvM,819 +networkx/generators/tests/test_interval_graph.py,sha256=JYMi-QMkJQdBU9uOdfm0Xr6MEYqIbhU5oSDa6D3tSb0,4277 +networkx/generators/tests/test_joint_degree_seq.py,sha256=8TXTZI3Um2gBXtP-4yhGKf9vCi78-NVmWZw9r9WG3F8,4270 +networkx/generators/tests/test_lattice.py,sha256=q4Ri-dH9mKhfq0PNX9xMeYRUiP0JlPBr7piSruZlFlg,9290 +networkx/generators/tests/test_line.py,sha256=vXncJuny2j5ulCJyT01Rt1tTwPib4XelS3dJDdJXjx0,10378 +networkx/generators/tests/test_mycielski.py,sha256=fwZLO1ybcltRy6TzCel8tPBil1oZWv9QSXs779H6Xt0,946 +networkx/generators/tests/test_nonisomorphic_trees.py,sha256=WLuYrjujRstHd48EtM-sWG0HjI6YY2fv03S9z6MPeDg,2034 +networkx/generators/tests/test_random_clustered.py,sha256=SalHqWvpnXA3QrDRMjLx15dk2c4Us8Ck52clUERoUI8,1297 +networkx/generators/tests/test_random_graphs.py,sha256=RTrKahiDHdXIb2ScFzQk3vrxncnMOE3W5LyJfIPvuKc,18925 +networkx/generators/tests/test_small.py,sha256=-gnVq45YpfMk_qIn7lGT1HiJIGuAiC6V_yeVclFuMe0,7103 +networkx/generators/tests/test_spectral_graph_forge.py,sha256=x4jyTiQiydaUPWYaGsNFsIB47PAzSSwQYCNXGa2B4SU,1594 +networkx/generators/tests/test_stochastic.py,sha256=f-5KD3RpoQf369gXHH7KGebE19g5lCkXR_alcwmFm_s,2179 +networkx/generators/tests/test_sudoku.py,sha256=dgOmk-B7MxCVkbHdZzsLZppQ61FAArVy4McSVL8Afzo,1968 +networkx/generators/tests/test_time_series.py,sha256=rgmFcitlKa_kF6TzJ2ze91lSmNJlqjhvgrYet0AUZx8,2230 +networkx/generators/tests/test_trees.py,sha256=Pvh0MvTKaRuZuwWL-wpJIC0zlBAcnTirpSLJi-9c7qc,7006 +networkx/generators/tests/test_triads.py,sha256=K8anVEP8R90Y172IrKIOrYRWRJBGeqxNqU9isX7Ybxs,333 +networkx/generators/time_series.py,sha256=_DMiY9X95O_9sK2BSeeTb2yMWfStBwKFWwn6FUOXN4Q,2439 +networkx/generators/trees.py,sha256=KvKYhZwwSl1KKMyKBe_6G_UTpLwLLeqdWwRkWJ2Xxz4,36517 +networkx/generators/triads.py,sha256=7kScTf3ITDi3qsSa-IvGMpa9diEaFwQnRuIf3Tv4UBI,2452 +networkx/lazy_imports.py,sha256=pZWn3f6NCY-lGtViKQh11NJ5Cn-cXBKhvacMycg3844,5764 +networkx/linalg/__init__.py,sha256=7iyNZ_YYBnlsW8zSfhUgvEkywOrUWfpIuyS86ZOKlG8,568 +networkx/linalg/__pycache__/__init__.cpython-312.pyc,, +networkx/linalg/__pycache__/algebraicconnectivity.cpython-312.pyc,, +networkx/linalg/__pycache__/attrmatrix.cpython-312.pyc,, +networkx/linalg/__pycache__/bethehessianmatrix.cpython-312.pyc,, +networkx/linalg/__pycache__/graphmatrix.cpython-312.pyc,, +networkx/linalg/__pycache__/laplacianmatrix.cpython-312.pyc,, +networkx/linalg/__pycache__/modularitymatrix.cpython-312.pyc,, +networkx/linalg/__pycache__/spectrum.cpython-312.pyc,, +networkx/linalg/algebraicconnectivity.py,sha256=OsgczsN7t-P60HRENN1Ge7VOEUE27i1dlxtCU-BOG3M,21118 +networkx/linalg/attrmatrix.py,sha256=gG2cZZm4N7PQhXuq5nxZq1S_OmW61641rdRnrDiX_ws,15900 +networkx/linalg/bethehessianmatrix.py,sha256=Ii4NX6mo90W3MppCRcYn9dRW_MsEkVdA9TH6x7JhX8o,2697 +networkx/linalg/graphmatrix.py,sha256=NIs2uWGS_8lJJ5IQ8Og9aIWHawghtlCDWifqOIKV2-c,5623 +networkx/linalg/laplacianmatrix.py,sha256=CaV0FquZVTVbA7mLKf3tPPc0qGhdM6HP-I9gd4FOsKE,17360 +networkx/linalg/modularitymatrix.py,sha256=R_VITtgIkGenxlsCLN4u6CYxj3_HiPXfeU29yarntRo,4706 +networkx/linalg/spectrum.py,sha256=aRY7ApYv5HxrO_4O8brxpZRw3SJU3fYzlgMwhEIXcrc,4215 +networkx/linalg/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/linalg/tests/__pycache__/__init__.cpython-312.pyc,, +networkx/linalg/tests/__pycache__/test_algebraic_connectivity.cpython-312.pyc,, +networkx/linalg/tests/__pycache__/test_attrmatrix.cpython-312.pyc,, +networkx/linalg/tests/__pycache__/test_bethehessian.cpython-312.pyc,, +networkx/linalg/tests/__pycache__/test_graphmatrix.cpython-312.pyc,, +networkx/linalg/tests/__pycache__/test_laplacian.cpython-312.pyc,, +networkx/linalg/tests/__pycache__/test_modularity.cpython-312.pyc,, +networkx/linalg/tests/__pycache__/test_spectrum.cpython-312.pyc,, +networkx/linalg/tests/test_algebraic_connectivity.py,sha256=uXq7h1fCmvcUyJhh3DujyZvJH2SGlgo-JRdLbff3bKI,13735 +networkx/linalg/tests/test_attrmatrix.py,sha256=I8p_ME5Cq__SZJoxla1y5ynGaa_b4UruZRcuzPOh_wE,2833 +networkx/linalg/tests/test_bethehessian.py,sha256=WQM3H_y1GWrtM2B659HYQ2vvYGs-4H9V1qeo3Bx75B4,1268 +networkx/linalg/tests/test_graphmatrix.py,sha256=6y-DSezX5slSms8LtEJcoggI4wP_duBqodNYWKKvM4w,8655 +networkx/linalg/tests/test_laplacian.py,sha256=sIJ64KVVCUcK3b4ohF-443222vR64myIlV6addwwfWs,13953 +networkx/linalg/tests/test_modularity.py,sha256=Xs7iaUr_QqcxG0t0xHx54iYbM2VyQNmFeANovTMJomE,3056 +networkx/linalg/tests/test_spectrum.py,sha256=Hrl3FE0Z0u4urwY5eNMx0VcH4IRTYgudT04EmfhkosA,2769 +networkx/readwrite/__init__.py,sha256=TvSbnGEHQ5F9CY2tkpjWYOyrUj6BeW3sc6P4_IczbKA,561 +networkx/readwrite/__pycache__/__init__.cpython-312.pyc,, +networkx/readwrite/__pycache__/adjlist.cpython-312.pyc,, +networkx/readwrite/__pycache__/edgelist.cpython-312.pyc,, +networkx/readwrite/__pycache__/gexf.cpython-312.pyc,, +networkx/readwrite/__pycache__/gml.cpython-312.pyc,, +networkx/readwrite/__pycache__/graph6.cpython-312.pyc,, +networkx/readwrite/__pycache__/graphml.cpython-312.pyc,, +networkx/readwrite/__pycache__/leda.cpython-312.pyc,, +networkx/readwrite/__pycache__/multiline_adjlist.cpython-312.pyc,, +networkx/readwrite/__pycache__/p2g.cpython-312.pyc,, +networkx/readwrite/__pycache__/pajek.cpython-312.pyc,, +networkx/readwrite/__pycache__/sparse6.cpython-312.pyc,, +networkx/readwrite/__pycache__/text.cpython-312.pyc,, +networkx/readwrite/adjlist.py,sha256=0_GZdm55sfgW1JBEVNedOAyD92aNTBf_VS1c2mssuJM,8438 +networkx/readwrite/edgelist.py,sha256=XwemCOs-xNet3mVIGpxol3RgFVJVa1iXsgBonAHDmug,14292 +networkx/readwrite/gexf.py,sha256=Gu7FNoAsZF-9FzAKddpgOaCtMMv-yekjKvTHIYX6WVw,39638 +networkx/readwrite/gml.py,sha256=JcP0hsDKwpZOPzZeXdoEiKBBNd94Ox8y7_cjT5AVBT4,31193 +networkx/readwrite/graph6.py,sha256=dED6yWelHARFvyKniX8xSTtqj1-asDs1vUBWuZOG9cA,11684 +networkx/readwrite/graphml.py,sha256=sb-D5I5gmqyNj_TSNUvLn-96NBXtOAPFMEKvT3mkdTw,39326 +networkx/readwrite/json_graph/__init__.py,sha256=37XJPMmilcwwo8KqouLWUly7Yv5tZ7IKraMHbBRx3fI,677 +networkx/readwrite/json_graph/__pycache__/__init__.cpython-312.pyc,, +networkx/readwrite/json_graph/__pycache__/adjacency.cpython-312.pyc,, +networkx/readwrite/json_graph/__pycache__/cytoscape.cpython-312.pyc,, +networkx/readwrite/json_graph/__pycache__/node_link.cpython-312.pyc,, +networkx/readwrite/json_graph/__pycache__/tree.cpython-312.pyc,, +networkx/readwrite/json_graph/adjacency.py,sha256=WM6fdncV87WDLPOfF-IbOlOOBMX0utUjJ09UsxtwRAo,4716 +networkx/readwrite/json_graph/cytoscape.py,sha256=NaHGbwLuQCx_GB5R7Uag2ACKSmrpCFQKsRk3y9hE6Io,5841 +networkx/readwrite/json_graph/node_link.py,sha256=qhMRYPlqAMsVBofkbT0exdn78Ew3z-6ZJtlX9QbpIFg,10829 +networkx/readwrite/json_graph/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/readwrite/json_graph/tests/__pycache__/__init__.cpython-312.pyc,, +networkx/readwrite/json_graph/tests/__pycache__/test_adjacency.cpython-312.pyc,, +networkx/readwrite/json_graph/tests/__pycache__/test_cytoscape.cpython-312.pyc,, +networkx/readwrite/json_graph/tests/__pycache__/test_node_link.cpython-312.pyc,, +networkx/readwrite/json_graph/tests/__pycache__/test_tree.cpython-312.pyc,, +networkx/readwrite/json_graph/tests/test_adjacency.py,sha256=jueQE3Z_W5BZuCjr0hEsOWSfoQ2fP51p0o0m7IcXUuE,2456 +networkx/readwrite/json_graph/tests/test_cytoscape.py,sha256=vFoDzcSRI9THlmp4Fu2HHhIF9AUmECWs5mftVWjaWWs,2044 +networkx/readwrite/json_graph/tests/test_node_link.py,sha256=q0mqy5fqZFxxHQb18tmFXUOOp_oTP1Ye5bEWzTnXEFo,6468 +networkx/readwrite/json_graph/tests/test_tree.py,sha256=zBXv3_db2XGxFs3XQ35btNf_ku52aLXXiHZmmX4ixAs,1352 +networkx/readwrite/json_graph/tree.py,sha256=K4rF4Kds4g0JhgcPTrrR_I3Pswpze8yCVH4M-WF9nn0,3851 +networkx/readwrite/leda.py,sha256=mtuIRKecfykiInePt-oIw91JB1vIgNXuUEbuBInO7Hs,2852 +networkx/readwrite/multiline_adjlist.py,sha256=TyvVZih3DIYaCQ6MC7XF8GuGBfcYIX-q-ccYu2Qs05M,11384 +networkx/readwrite/p2g.py,sha256=wDPg_3u42rlsv8pFtVLbZIjo-BN6dd95jCxFAoEC_6E,3253 +networkx/readwrite/pajek.py,sha256=0Xli27YbjyW5Ny6-qqVOtBmhZkPj4i3cJPvwjU3mMjM,8746 +networkx/readwrite/sparse6.py,sha256=kGEbDmnK1qcR5I6LZnPKYXLe7Qp6esT6gftkwSt1xcs,10442 +networkx/readwrite/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/readwrite/tests/__pycache__/__init__.cpython-312.pyc,, +networkx/readwrite/tests/__pycache__/test_adjlist.cpython-312.pyc,, +networkx/readwrite/tests/__pycache__/test_edgelist.cpython-312.pyc,, +networkx/readwrite/tests/__pycache__/test_gexf.cpython-312.pyc,, +networkx/readwrite/tests/__pycache__/test_gml.cpython-312.pyc,, +networkx/readwrite/tests/__pycache__/test_graph6.cpython-312.pyc,, +networkx/readwrite/tests/__pycache__/test_graphml.cpython-312.pyc,, +networkx/readwrite/tests/__pycache__/test_leda.cpython-312.pyc,, +networkx/readwrite/tests/__pycache__/test_p2g.cpython-312.pyc,, +networkx/readwrite/tests/__pycache__/test_pajek.cpython-312.pyc,, +networkx/readwrite/tests/__pycache__/test_sparse6.cpython-312.pyc,, +networkx/readwrite/tests/__pycache__/test_text.cpython-312.pyc,, +networkx/readwrite/tests/test_adjlist.py,sha256=t5RL85eDQFPUIdh8W4kozY_P7PMJU2LwSXjWZGE-4Aw,10134 +networkx/readwrite/tests/test_edgelist.py,sha256=cmOqVSpVO-FTdFRUAz40_e2sSmmB9xV6uYmfvw5cNhQ,10113 +networkx/readwrite/tests/test_gexf.py,sha256=q5LnCgZuxUCqAYNcyi1OyWwSgkPJdxMQkBWgA6S-d8U,20262 +networkx/readwrite/tests/test_gml.py,sha256=SIuCCSoH9psZdjKkrJfdGHbMROhUsfms70B71rnozUU,21399 +networkx/readwrite/tests/test_graph6.py,sha256=WMQmTgrI_VDcli1Gl5cjM17EFKc8_AkwoR14FLS1sXQ,6559 +networkx/readwrite/tests/test_graphml.py,sha256=TTKhxCVD5bJBlfxUEqZuuqy9YMOePoFgTIN1JxaDfSs,67573 +networkx/readwrite/tests/test_leda.py,sha256=_5F4nLLQ1oAZQMZtTQoFncZL0Oc-IsztFBglEdQeH3k,1392 +networkx/readwrite/tests/test_p2g.py,sha256=drsdod5amV9TGCk-qE2RwsvAop78IKEI1WguVFfd9rs,1320 +networkx/readwrite/tests/test_pajek.py,sha256=-bT-y26OmWgpLcvk-qvVfOEa-DTcQPwV2qKB99roOrk,4629 +networkx/readwrite/tests/test_sparse6.py,sha256=zpd0hUPtOqwAJiuke4FXWFaQVa6Fa47dgIdHpCWXQ6U,5288 +networkx/readwrite/tests/test_text.py,sha256=x1N97hD31HPkj9Wn2PYti5-gcwaFNnStkaN_38HKnIg,55319 +networkx/readwrite/text.py,sha256=u62a-zG_7NpJD4Bh6kBZbUntORVM4XZId9zGX9wJBGQ,29140 +networkx/relabel.py,sha256=0HptAQOBToKhLZzxscd6FQpzVCNMlYmiHjHul69ct8o,10300 +networkx/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/tests/__pycache__/__init__.cpython-312.pyc,, +networkx/tests/__pycache__/test_all_random_functions.cpython-312.pyc,, +networkx/tests/__pycache__/test_convert.cpython-312.pyc,, +networkx/tests/__pycache__/test_convert_numpy.cpython-312.pyc,, +networkx/tests/__pycache__/test_convert_pandas.cpython-312.pyc,, +networkx/tests/__pycache__/test_convert_scipy.cpython-312.pyc,, +networkx/tests/__pycache__/test_exceptions.cpython-312.pyc,, +networkx/tests/__pycache__/test_import.cpython-312.pyc,, +networkx/tests/__pycache__/test_lazy_imports.cpython-312.pyc,, +networkx/tests/__pycache__/test_relabel.cpython-312.pyc,, +networkx/tests/test_all_random_functions.py,sha256=Jc2tEOEEqTHwt1ApsH13CPxtin3X6HB654hALnIuByM,8623 +networkx/tests/test_convert.py,sha256=SoIVrqJFF9Gu9Jff_apfbpqg8QhkfC6QW4qzoSM-ukM,12731 +networkx/tests/test_convert_numpy.py,sha256=wivMBF5pmDd2FdrHX72J4Xt26IxTIfb12G-t-TWch40,19032 +networkx/tests/test_convert_pandas.py,sha256=2LrQrGkxdlvEZxKmMvyptUvOsTsAcbo8u6siSVbnV3M,13346 +networkx/tests/test_convert_scipy.py,sha256=IO9Xfke5Vk0SC4c7UR4YbQMjVZBuADQBt_wlMaOxTSQ,10381 +networkx/tests/test_exceptions.py,sha256=XYkpPzqMepSw3MPRUJN5LcFsUsy3YT_fiRDhm0OeAeQ,927 +networkx/tests/test_import.py,sha256=Gm4ujfH9JkQtDrSjOlwXXXUuubI057wskKLCkF6Z92k,220 +networkx/tests/test_lazy_imports.py,sha256=PylxEvRtD8ekwH-1M1sjnV_4uyhCYQl4WvlyF6ah0wo,2675 +networkx/tests/test_relabel.py,sha256=dffbjiW_VUAQe7iD8knFS_KepUITt0F6xuwf7daWwKw,14517 +networkx/utils/__init__.py,sha256=7pxleRNpBWuL3FEQz3CzKLn17b6_eSwkM7dqnL1okDk,302 +networkx/utils/__pycache__/__init__.cpython-312.pyc,, +networkx/utils/__pycache__/backends.cpython-312.pyc,, +networkx/utils/__pycache__/configs.cpython-312.pyc,, +networkx/utils/__pycache__/decorators.cpython-312.pyc,, +networkx/utils/__pycache__/heaps.cpython-312.pyc,, +networkx/utils/__pycache__/mapped_queue.cpython-312.pyc,, +networkx/utils/__pycache__/misc.cpython-312.pyc,, +networkx/utils/__pycache__/random_sequence.cpython-312.pyc,, +networkx/utils/__pycache__/rcm.cpython-312.pyc,, +networkx/utils/__pycache__/union_find.cpython-312.pyc,, +networkx/utils/backends.py,sha256=twjFxZL0-xSYULILSdEpJ79iA35nD1TndgxJ0r0gUNw,95569 +networkx/utils/configs.py,sha256=B5dlLGW_UvUVr7Ui5eM3Fbk2qeCmzZ4nUDq_2lWC-dM,15234 +networkx/utils/decorators.py,sha256=rRgAZw6glxOBYN6n5kkbQ7RS43EH3eVbJ5F91-uk0IM,44713 +networkx/utils/heaps.py,sha256=98tdylWKqrLQKdNlpjT2NvIpuOF2mT2Lhh5OzVg3Hu8,10355 +networkx/utils/mapped_queue.py,sha256=WdIRk27D_ArmPs9tdpvQLQCV4Tmus212BQhxsFIMYgk,10184 +networkx/utils/misc.py,sha256=TxqWVmAgVNeYEdVOJC2td3mLfqLBzNM8vWrWWxSJOYE,21257 +networkx/utils/random_sequence.py,sha256=KzKh0BRMri0MBZlzxHNMl3qRTy2DnBexW3eDzmxKab4,4237 +networkx/utils/rcm.py,sha256=t2LJq6BG39JnPl_353nM8eDfGWy3LVL8BdrBZCcAy2g,4618 +networkx/utils/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/utils/tests/__pycache__/__init__.cpython-312.pyc,, +networkx/utils/tests/__pycache__/test__init.cpython-312.pyc,, +networkx/utils/tests/__pycache__/test_backends.cpython-312.pyc,, +networkx/utils/tests/__pycache__/test_config.cpython-312.pyc,, +networkx/utils/tests/__pycache__/test_decorators.cpython-312.pyc,, +networkx/utils/tests/__pycache__/test_heaps.cpython-312.pyc,, +networkx/utils/tests/__pycache__/test_mapped_queue.cpython-312.pyc,, +networkx/utils/tests/__pycache__/test_misc.cpython-312.pyc,, +networkx/utils/tests/__pycache__/test_random_sequence.cpython-312.pyc,, +networkx/utils/tests/__pycache__/test_rcm.cpython-312.pyc,, +networkx/utils/tests/__pycache__/test_unionfind.cpython-312.pyc,, +networkx/utils/tests/test__init.py,sha256=QE0i-lNE4pG2eYjB2mZ0uw7jPD-7TdL7Y9p73JoWQmo,363 +networkx/utils/tests/test_backends.py,sha256=cXUJv9SJgW2xcQGyNe3BnVBu_ECILVORIvM0ANEVkVo,6523 +networkx/utils/tests/test_config.py,sha256=eBAiwADWJgCAIyJjgF3-zn89VPIGftiWMWyccpFnyMU,8717 +networkx/utils/tests/test_decorators.py,sha256=dm3b5yiQPlnlT_4pSm0FwK-xBGV9dcnhv14Vh9Jiz1o,14050 +networkx/utils/tests/test_heaps.py,sha256=qCuWMzpcMH1Gwu014CAams78o151QD5YL0mB1fz16Yw,3711 +networkx/utils/tests/test_mapped_queue.py,sha256=l1Nguzz68Fv91FnAT7y7B0GXSoje9uoWiObHo7TliGM,7354 +networkx/utils/tests/test_misc.py,sha256=76R5wevLwATA8BCQwGwP1_U4asXKYntW4m-Z6fBeYbc,8683 +networkx/utils/tests/test_random_sequence.py,sha256=Ou-IeCFybibZuycoin5gUQzzC-iy5yanZFmrqvdGt6Q,925 +networkx/utils/tests/test_rcm.py,sha256=UvUAkgmQMGk_Nn94TJyQsle4A5SLQFqMQWld1tiQ2lk,1421 +networkx/utils/tests/test_unionfind.py,sha256=j-DF5XyeJzq1hoeAgN5Nye2Au7EPD040t8oS4Aw2IwU,1579 +networkx/utils/union_find.py,sha256=NxKlBlyS71A1Wlnt28L-wyZoI9ExZvJth_0e2XSVris,3338 diff --git a/.venv/lib/python3.12/site-packages/networkx-3.5.dist-info/REQUESTED b/.venv/lib/python3.12/site-packages/networkx-3.5.dist-info/REQUESTED new file mode 100644 index 0000000..e69de29 diff --git a/.venv/lib/python3.12/site-packages/networkx-3.5.dist-info/WHEEL b/.venv/lib/python3.12/site-packages/networkx-3.5.dist-info/WHEEL new file mode 100644 index 0000000..e7fa31b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx-3.5.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: setuptools (80.9.0) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/.venv/lib/python3.12/site-packages/networkx-3.5.dist-info/entry_points.txt b/.venv/lib/python3.12/site-packages/networkx-3.5.dist-info/entry_points.txt new file mode 100644 index 0000000..377282a --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx-3.5.dist-info/entry_points.txt @@ -0,0 +1,2 @@ +[networkx.backends] +nx_loopback = networkx.classes.tests.dispatch_interface:backend_interface diff --git a/.venv/lib/python3.12/site-packages/networkx-3.5.dist-info/licenses/LICENSE.txt b/.venv/lib/python3.12/site-packages/networkx-3.5.dist-info/licenses/LICENSE.txt new file mode 100644 index 0000000..0bf9a8f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx-3.5.dist-info/licenses/LICENSE.txt @@ -0,0 +1,37 @@ +NetworkX is distributed with the 3-clause BSD license. + +:: + + Copyright (c) 2004-2025, NetworkX Developers + Aric Hagberg + Dan Schult + Pieter Swart + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + + * Neither the name of the NetworkX Developers nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/.venv/lib/python3.12/site-packages/networkx-3.5.dist-info/top_level.txt b/.venv/lib/python3.12/site-packages/networkx-3.5.dist-info/top_level.txt new file mode 100644 index 0000000..4d07dfe --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx-3.5.dist-info/top_level.txt @@ -0,0 +1 @@ +networkx diff --git a/.venv/lib/python3.12/site-packages/networkx/__init__.py b/.venv/lib/python3.12/site-packages/networkx/__init__.py new file mode 100644 index 0000000..bed45be --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/__init__.py @@ -0,0 +1,53 @@ +""" +NetworkX +======== + +NetworkX is a Python package for the creation, manipulation, and study of the +structure, dynamics, and functions of complex networks. + +See https://networkx.org for complete documentation. +""" + +__version__ = "3.5" + + +# These are imported in order as listed +from networkx.lazy_imports import _lazy_import + +from networkx.exception import * + +from networkx import utils +from networkx.utils import _clear_cache, _dispatchable + +# load_and_call entry_points, set configs +config = utils.backends._set_configs_from_environment() +utils.config = utils.configs.config = config # type: ignore[attr-defined] + +from networkx import classes +from networkx.classes import filters +from networkx.classes import * + +from networkx import convert +from networkx.convert import * + +from networkx import convert_matrix +from networkx.convert_matrix import * + +from networkx import relabel +from networkx.relabel import * + +from networkx import generators +from networkx.generators import * + +from networkx import readwrite +from networkx.readwrite import * + +# Need to test with SciPy, when available +from networkx import algorithms +from networkx.algorithms import * + +from networkx import linalg +from networkx.linalg import * + +from networkx import drawing +from networkx.drawing import * diff --git a/.venv/lib/python3.12/site-packages/networkx/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..cf3c8b8 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/__pycache__/conftest.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/__pycache__/conftest.cpython-312.pyc new file mode 100644 index 0000000..2668f9e Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/__pycache__/conftest.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/__pycache__/convert.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/__pycache__/convert.cpython-312.pyc new file mode 100644 index 0000000..f1900e5 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/__pycache__/convert.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/__pycache__/convert_matrix.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/__pycache__/convert_matrix.cpython-312.pyc new file mode 100644 index 0000000..73f4e50 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/__pycache__/convert_matrix.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/__pycache__/exception.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/__pycache__/exception.cpython-312.pyc new file mode 100644 index 0000000..8ece7bc Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/__pycache__/exception.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/__pycache__/lazy_imports.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/__pycache__/lazy_imports.cpython-312.pyc new file mode 100644 index 0000000..97cee14 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/__pycache__/lazy_imports.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/__pycache__/relabel.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/__pycache__/relabel.cpython-312.pyc new file mode 100644 index 0000000..6b85571 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/__pycache__/relabel.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/__init__.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/__init__.py new file mode 100644 index 0000000..56bfb14 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/__init__.py @@ -0,0 +1,133 @@ +from networkx.algorithms.assortativity import * +from networkx.algorithms.asteroidal import * +from networkx.algorithms.boundary import * +from networkx.algorithms.broadcasting import * +from networkx.algorithms.bridges import * +from networkx.algorithms.chains import * +from networkx.algorithms.centrality import * +from networkx.algorithms.chordal import * +from networkx.algorithms.cluster import * +from networkx.algorithms.clique import * +from networkx.algorithms.communicability_alg import * +from networkx.algorithms.components import * +from networkx.algorithms.coloring import * +from networkx.algorithms.core import * +from networkx.algorithms.covering import * +from networkx.algorithms.cycles import * +from networkx.algorithms.cuts import * +from networkx.algorithms.d_separation import * +from networkx.algorithms.dag import * +from networkx.algorithms.distance_measures import * +from networkx.algorithms.distance_regular import * +from networkx.algorithms.dominance import * +from networkx.algorithms.dominating import * +from networkx.algorithms.efficiency_measures import * +from networkx.algorithms.euler import * +from networkx.algorithms.graphical import * +from networkx.algorithms.hierarchy import * +from networkx.algorithms.hybrid import * +from networkx.algorithms.link_analysis import * +from networkx.algorithms.link_prediction import * +from networkx.algorithms.lowest_common_ancestors import * +from networkx.algorithms.isolate import * +from networkx.algorithms.matching import * +from networkx.algorithms.minors import * +from networkx.algorithms.mis import * +from networkx.algorithms.moral import * +from networkx.algorithms.non_randomness import * +from networkx.algorithms.operators import * +from networkx.algorithms.planarity import * +from networkx.algorithms.planar_drawing import * +from networkx.algorithms.polynomials import * +from networkx.algorithms.reciprocity import * +from networkx.algorithms.regular import * +from networkx.algorithms.richclub import * +from networkx.algorithms.shortest_paths import * +from networkx.algorithms.similarity import * +from networkx.algorithms.graph_hashing import * +from networkx.algorithms.simple_paths import * +from networkx.algorithms.smallworld import * +from networkx.algorithms.smetric import * +from networkx.algorithms.structuralholes import * +from networkx.algorithms.sparsifiers import * +from networkx.algorithms.summarization import * +from networkx.algorithms.swap import * +from networkx.algorithms.time_dependent import * +from networkx.algorithms.traversal import * +from networkx.algorithms.triads import * +from networkx.algorithms.vitality import * +from networkx.algorithms.voronoi import * +from networkx.algorithms.walks import * +from networkx.algorithms.wiener import * + +# Make certain subpackages available to the user as direct imports from +# the `networkx` namespace. +from networkx.algorithms import approximation +from networkx.algorithms import assortativity +from networkx.algorithms import bipartite +from networkx.algorithms import node_classification +from networkx.algorithms import centrality +from networkx.algorithms import chordal +from networkx.algorithms import cluster +from networkx.algorithms import clique +from networkx.algorithms import components +from networkx.algorithms import connectivity +from networkx.algorithms import community +from networkx.algorithms import coloring +from networkx.algorithms import flow +from networkx.algorithms import isomorphism +from networkx.algorithms import link_analysis +from networkx.algorithms import lowest_common_ancestors +from networkx.algorithms import operators +from networkx.algorithms import shortest_paths +from networkx.algorithms import tournament +from networkx.algorithms import traversal +from networkx.algorithms import tree + +# Make certain functions from some of the previous subpackages available +# to the user as direct imports from the `networkx` namespace. +from networkx.algorithms.bipartite import complete_bipartite_graph +from networkx.algorithms.bipartite import is_bipartite +from networkx.algorithms.bipartite import projected_graph +from networkx.algorithms.connectivity import all_pairs_node_connectivity +from networkx.algorithms.connectivity import all_node_cuts +from networkx.algorithms.connectivity import average_node_connectivity +from networkx.algorithms.connectivity import edge_connectivity +from networkx.algorithms.connectivity import edge_disjoint_paths +from networkx.algorithms.connectivity import k_components +from networkx.algorithms.connectivity import k_edge_components +from networkx.algorithms.connectivity import k_edge_subgraphs +from networkx.algorithms.connectivity import k_edge_augmentation +from networkx.algorithms.connectivity import is_k_edge_connected +from networkx.algorithms.connectivity import minimum_edge_cut +from networkx.algorithms.connectivity import minimum_node_cut +from networkx.algorithms.connectivity import node_connectivity +from networkx.algorithms.connectivity import node_disjoint_paths +from networkx.algorithms.connectivity import stoer_wagner +from networkx.algorithms.flow import capacity_scaling +from networkx.algorithms.flow import cost_of_flow +from networkx.algorithms.flow import gomory_hu_tree +from networkx.algorithms.flow import max_flow_min_cost +from networkx.algorithms.flow import maximum_flow +from networkx.algorithms.flow import maximum_flow_value +from networkx.algorithms.flow import min_cost_flow +from networkx.algorithms.flow import min_cost_flow_cost +from networkx.algorithms.flow import minimum_cut +from networkx.algorithms.flow import minimum_cut_value +from networkx.algorithms.flow import network_simplex +from networkx.algorithms.isomorphism import could_be_isomorphic +from networkx.algorithms.isomorphism import fast_could_be_isomorphic +from networkx.algorithms.isomorphism import faster_could_be_isomorphic +from networkx.algorithms.isomorphism import is_isomorphic +from networkx.algorithms.isomorphism.vf2pp import * +from networkx.algorithms.tree.branchings import maximum_branching +from networkx.algorithms.tree.branchings import maximum_spanning_arborescence +from networkx.algorithms.tree.branchings import minimum_branching +from networkx.algorithms.tree.branchings import minimum_spanning_arborescence +from networkx.algorithms.tree.branchings import ArborescenceIterator +from networkx.algorithms.tree.coding import * +from networkx.algorithms.tree.decomposition import * +from networkx.algorithms.tree.mst import * +from networkx.algorithms.tree.operations import * +from networkx.algorithms.tree.recognition import * +from networkx.algorithms.tournament import is_tournament diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..add085d Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/asteroidal.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/asteroidal.cpython-312.pyc new file mode 100644 index 0000000..5a8296a Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/asteroidal.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/boundary.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/boundary.cpython-312.pyc new file mode 100644 index 0000000..6cc35fb Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/boundary.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/bridges.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/bridges.cpython-312.pyc new file mode 100644 index 0000000..ff01790 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/bridges.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/broadcasting.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/broadcasting.cpython-312.pyc new file mode 100644 index 0000000..09fa4e3 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/broadcasting.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/chains.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/chains.cpython-312.pyc new file mode 100644 index 0000000..ac33ea7 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/chains.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/chordal.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/chordal.cpython-312.pyc new file mode 100644 index 0000000..057ef97 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/chordal.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/clique.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/clique.cpython-312.pyc new file mode 100644 index 0000000..32144d8 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/clique.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/cluster.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/cluster.cpython-312.pyc new file mode 100644 index 0000000..92872f5 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/cluster.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/communicability_alg.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/communicability_alg.cpython-312.pyc new file mode 100644 index 0000000..4f644c0 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/communicability_alg.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/core.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/core.cpython-312.pyc new file mode 100644 index 0000000..98402bb Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/core.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/covering.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/covering.cpython-312.pyc new file mode 100644 index 0000000..bd0cbee Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/covering.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/cuts.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/cuts.cpython-312.pyc new file mode 100644 index 0000000..4ee1dd2 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/cuts.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/cycles.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/cycles.cpython-312.pyc new file mode 100644 index 0000000..5029675 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/cycles.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/d_separation.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/d_separation.cpython-312.pyc new file mode 100644 index 0000000..a53d1a6 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/d_separation.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/dag.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/dag.cpython-312.pyc new file mode 100644 index 0000000..7b8576f Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/dag.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/distance_measures.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/distance_measures.cpython-312.pyc new file mode 100644 index 0000000..ff1029e Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/distance_measures.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/distance_regular.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/distance_regular.cpython-312.pyc new file mode 100644 index 0000000..829b376 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/distance_regular.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/dominance.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/dominance.cpython-312.pyc new file mode 100644 index 0000000..7128df6 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/dominance.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/dominating.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/dominating.cpython-312.pyc new file mode 100644 index 0000000..5633734 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/dominating.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/efficiency_measures.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/efficiency_measures.cpython-312.pyc new file mode 100644 index 0000000..4fbae28 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/efficiency_measures.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/euler.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/euler.cpython-312.pyc new file mode 100644 index 0000000..c4fe65f Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/euler.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/graph_hashing.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/graph_hashing.cpython-312.pyc new file mode 100644 index 0000000..469e35e Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/graph_hashing.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/graphical.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/graphical.cpython-312.pyc new file mode 100644 index 0000000..abe31e0 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/graphical.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/hierarchy.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/hierarchy.cpython-312.pyc new file mode 100644 index 0000000..ce9a787 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/hierarchy.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/hybrid.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/hybrid.cpython-312.pyc new file mode 100644 index 0000000..5aeff0d Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/hybrid.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/isolate.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/isolate.cpython-312.pyc new file mode 100644 index 0000000..d78b896 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/isolate.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/link_prediction.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/link_prediction.cpython-312.pyc new file mode 100644 index 0000000..82b4114 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/link_prediction.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/lowest_common_ancestors.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/lowest_common_ancestors.cpython-312.pyc new file mode 100644 index 0000000..2c2c01d Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/lowest_common_ancestors.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/matching.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/matching.cpython-312.pyc new file mode 100644 index 0000000..c9316a0 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/matching.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/mis.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/mis.cpython-312.pyc new file mode 100644 index 0000000..3c1c8ce Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/mis.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/moral.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/moral.cpython-312.pyc new file mode 100644 index 0000000..0b5ae00 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/moral.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/node_classification.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/node_classification.cpython-312.pyc new file mode 100644 index 0000000..c023823 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/node_classification.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/non_randomness.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/non_randomness.cpython-312.pyc new file mode 100644 index 0000000..6e0798b Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/non_randomness.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/planar_drawing.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/planar_drawing.cpython-312.pyc new file mode 100644 index 0000000..026a588 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/planar_drawing.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/planarity.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/planarity.cpython-312.pyc new file mode 100644 index 0000000..25c9afa Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/planarity.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/polynomials.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/polynomials.cpython-312.pyc new file mode 100644 index 0000000..d25f863 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/polynomials.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/reciprocity.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/reciprocity.cpython-312.pyc new file mode 100644 index 0000000..9c439a1 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/reciprocity.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/regular.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/regular.cpython-312.pyc new file mode 100644 index 0000000..ceac357 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/regular.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/richclub.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/richclub.cpython-312.pyc new file mode 100644 index 0000000..6038a8b Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/richclub.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/similarity.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/similarity.cpython-312.pyc new file mode 100644 index 0000000..6e4b450 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/similarity.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/simple_paths.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/simple_paths.cpython-312.pyc new file mode 100644 index 0000000..4dfa8b5 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/simple_paths.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/smallworld.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/smallworld.cpython-312.pyc new file mode 100644 index 0000000..35c1609 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/smallworld.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/smetric.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/smetric.cpython-312.pyc new file mode 100644 index 0000000..1fd52f1 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/smetric.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/sparsifiers.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/sparsifiers.cpython-312.pyc new file mode 100644 index 0000000..0fe826e Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/sparsifiers.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/structuralholes.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/structuralholes.cpython-312.pyc new file mode 100644 index 0000000..de1a936 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/structuralholes.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/summarization.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/summarization.cpython-312.pyc new file mode 100644 index 0000000..ff7191d Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/summarization.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/swap.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/swap.cpython-312.pyc new file mode 100644 index 0000000..9eacf1c Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/swap.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/threshold.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/threshold.cpython-312.pyc new file mode 100644 index 0000000..edb9fcb Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/threshold.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/time_dependent.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/time_dependent.cpython-312.pyc new file mode 100644 index 0000000..ce55937 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/time_dependent.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/tournament.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/tournament.cpython-312.pyc new file mode 100644 index 0000000..60c5b3f Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/tournament.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/triads.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/triads.cpython-312.pyc new file mode 100644 index 0000000..c49a4d3 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/triads.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/vitality.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/vitality.cpython-312.pyc new file mode 100644 index 0000000..3892b1c Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/vitality.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/voronoi.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/voronoi.cpython-312.pyc new file mode 100644 index 0000000..916f901 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/voronoi.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/walks.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/walks.cpython-312.pyc new file mode 100644 index 0000000..d48b47e Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/walks.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/wiener.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/wiener.cpython-312.pyc new file mode 100644 index 0000000..4e0af60 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/__pycache__/wiener.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/__init__.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/__init__.py new file mode 100644 index 0000000..b0b4015 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/__init__.py @@ -0,0 +1,26 @@ +"""Approximations of graph properties and Heuristic methods for optimization. + +The functions in this class are not imported into the top-level ``networkx`` +namespace so the easiest way to use them is with:: + + >>> from networkx.algorithms import approximation + +Another option is to import the specific function with +``from networkx.algorithms.approximation import function_name``. + +""" + +from networkx.algorithms.approximation.clustering_coefficient import * +from networkx.algorithms.approximation.clique import * +from networkx.algorithms.approximation.connectivity import * +from networkx.algorithms.approximation.distance_measures import * +from networkx.algorithms.approximation.dominating_set import * +from networkx.algorithms.approximation.kcomponents import * +from networkx.algorithms.approximation.matching import * +from networkx.algorithms.approximation.ramsey import * +from networkx.algorithms.approximation.steinertree import * +from networkx.algorithms.approximation.traveling_salesman import * +from networkx.algorithms.approximation.treewidth import * +from networkx.algorithms.approximation.vertex_cover import * +from networkx.algorithms.approximation.maxcut import * +from networkx.algorithms.approximation.density import * diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..402d9a2 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/__pycache__/clique.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/__pycache__/clique.cpython-312.pyc new file mode 100644 index 0000000..b55667e Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/__pycache__/clique.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/__pycache__/clustering_coefficient.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/__pycache__/clustering_coefficient.cpython-312.pyc new file mode 100644 index 0000000..1f1b589 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/__pycache__/clustering_coefficient.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/__pycache__/connectivity.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/__pycache__/connectivity.cpython-312.pyc new file mode 100644 index 0000000..8322446 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/__pycache__/connectivity.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/__pycache__/density.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/__pycache__/density.cpython-312.pyc new file mode 100644 index 0000000..e3bd7ca Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/__pycache__/density.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/__pycache__/distance_measures.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/__pycache__/distance_measures.cpython-312.pyc new file mode 100644 index 0000000..d4e7565 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/__pycache__/distance_measures.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/__pycache__/dominating_set.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/__pycache__/dominating_set.cpython-312.pyc new file mode 100644 index 0000000..3095786 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/__pycache__/dominating_set.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/__pycache__/kcomponents.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/__pycache__/kcomponents.cpython-312.pyc new file mode 100644 index 0000000..183ce72 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/__pycache__/kcomponents.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/__pycache__/matching.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/__pycache__/matching.cpython-312.pyc new file mode 100644 index 0000000..3a4f150 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/__pycache__/matching.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/__pycache__/maxcut.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/__pycache__/maxcut.cpython-312.pyc new file mode 100644 index 0000000..7eace4d Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/__pycache__/maxcut.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/__pycache__/ramsey.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/__pycache__/ramsey.cpython-312.pyc new file mode 100644 index 0000000..2e19871 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/__pycache__/ramsey.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/__pycache__/steinertree.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/__pycache__/steinertree.cpython-312.pyc new file mode 100644 index 0000000..162e363 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/__pycache__/steinertree.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/__pycache__/traveling_salesman.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/__pycache__/traveling_salesman.cpython-312.pyc new file mode 100644 index 0000000..22b41fc Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/__pycache__/traveling_salesman.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/__pycache__/treewidth.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/__pycache__/treewidth.cpython-312.pyc new file mode 100644 index 0000000..8cffcef Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/__pycache__/treewidth.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/__pycache__/vertex_cover.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/__pycache__/vertex_cover.cpython-312.pyc new file mode 100644 index 0000000..e7a2c29 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/__pycache__/vertex_cover.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/clique.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/clique.py new file mode 100644 index 0000000..ed0f350 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/clique.py @@ -0,0 +1,259 @@ +"""Functions for computing large cliques and maximum independent sets.""" + +import networkx as nx +from networkx.algorithms.approximation import ramsey +from networkx.utils import not_implemented_for + +__all__ = [ + "clique_removal", + "max_clique", + "large_clique_size", + "maximum_independent_set", +] + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatchable +def maximum_independent_set(G): + """Returns an approximate maximum independent set. + + Independent set or stable set is a set of vertices in a graph, no two of + which are adjacent. That is, it is a set I of vertices such that for every + two vertices in I, there is no edge connecting the two. Equivalently, each + edge in the graph has at most one endpoint in I. The size of an independent + set is the number of vertices it contains [1]_. + + A maximum independent set is a largest independent set for a given graph G + and its size is denoted $\\alpha(G)$. The problem of finding such a set is called + the maximum independent set problem and is an NP-hard optimization problem. + As such, it is unlikely that there exists an efficient algorithm for finding + a maximum independent set of a graph. + + The Independent Set algorithm is based on [2]_. + + Parameters + ---------- + G : NetworkX graph + Undirected graph + + Returns + ------- + iset : Set + The apx-maximum independent set + + Examples + -------- + >>> G = nx.path_graph(10) + >>> nx.approximation.maximum_independent_set(G) + {0, 2, 4, 6, 9} + + Raises + ------ + NetworkXNotImplemented + If the graph is directed or is a multigraph. + + Notes + ----- + Finds the $O(|V|/(log|V|)^2)$ apx of independent set in the worst case. + + References + ---------- + .. [1] `Wikipedia: Independent set + `_ + .. [2] Boppana, R., & Halldórsson, M. M. (1992). + Approximating maximum independent sets by excluding subgraphs. + BIT Numerical Mathematics, 32(2), 180–196. Springer. + """ + iset, _ = clique_removal(G) + return iset + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatchable +def max_clique(G): + r"""Find the Maximum Clique + + Finds the $O(|V|/(log|V|)^2)$ apx of maximum clique/independent set + in the worst case. + + Parameters + ---------- + G : NetworkX graph + Undirected graph + + Returns + ------- + clique : set + The apx-maximum clique of the graph + + Examples + -------- + >>> G = nx.path_graph(10) + >>> nx.approximation.max_clique(G) + {8, 9} + + Raises + ------ + NetworkXNotImplemented + If the graph is directed or is a multigraph. + + Notes + ----- + A clique in an undirected graph G = (V, E) is a subset of the vertex set + `C \subseteq V` such that for every two vertices in C there exists an edge + connecting the two. This is equivalent to saying that the subgraph + induced by C is complete (in some cases, the term clique may also refer + to the subgraph). + + A maximum clique is a clique of the largest possible size in a given graph. + The clique number `\omega(G)` of a graph G is the number of + vertices in a maximum clique in G. The intersection number of + G is the smallest number of cliques that together cover all edges of G. + + https://en.wikipedia.org/wiki/Maximum_clique + + References + ---------- + .. [1] Boppana, R., & Halldórsson, M. M. (1992). + Approximating maximum independent sets by excluding subgraphs. + BIT Numerical Mathematics, 32(2), 180–196. Springer. + doi:10.1007/BF01994876 + """ + # finding the maximum clique in a graph is equivalent to finding + # the independent set in the complementary graph + cgraph = nx.complement(G) + iset, _ = clique_removal(cgraph) + return iset + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatchable +def clique_removal(G): + r"""Repeatedly remove cliques from the graph. + + Results in a $O(|V|/(\log |V|)^2)$ approximation of maximum clique + and independent set. Returns the largest independent set found, along + with found maximal cliques. + + Parameters + ---------- + G : NetworkX graph + Undirected graph + + Returns + ------- + max_ind_cliques : (set, list) tuple + 2-tuple of Maximal Independent Set and list of maximal cliques (sets). + + Examples + -------- + >>> G = nx.path_graph(10) + >>> nx.approximation.clique_removal(G) + ({0, 2, 4, 6, 9}, [{0, 1}, {2, 3}, {4, 5}, {6, 7}, {8, 9}]) + + Raises + ------ + NetworkXNotImplemented + If the graph is directed or is a multigraph. + + References + ---------- + .. [1] Boppana, R., & Halldórsson, M. M. (1992). + Approximating maximum independent sets by excluding subgraphs. + BIT Numerical Mathematics, 32(2), 180–196. Springer. + """ + graph = G.copy() + c_i, i_i = ramsey.ramsey_R2(graph) + cliques = [c_i] + isets = [i_i] + while graph: + graph.remove_nodes_from(c_i) + c_i, i_i = ramsey.ramsey_R2(graph) + if c_i: + cliques.append(c_i) + if i_i: + isets.append(i_i) + # Determine the largest independent set as measured by cardinality. + maxiset = max(isets, key=len) + return maxiset, cliques + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatchable +def large_clique_size(G): + """Find the size of a large clique in a graph. + + A *clique* is a subset of nodes in which each pair of nodes is + adjacent. This function is a heuristic for finding the size of a + large clique in the graph. + + Parameters + ---------- + G : NetworkX graph + + Returns + ------- + k: integer + The size of a large clique in the graph. + + Examples + -------- + >>> G = nx.path_graph(10) + >>> nx.approximation.large_clique_size(G) + 2 + + Raises + ------ + NetworkXNotImplemented + If the graph is directed or is a multigraph. + + Notes + ----- + This implementation is from [1]_. Its worst case time complexity is + :math:`O(n d^2)`, where *n* is the number of nodes in the graph and + *d* is the maximum degree. + + This function is a heuristic, which means it may work well in + practice, but there is no rigorous mathematical guarantee on the + ratio between the returned number and the actual largest clique size + in the graph. + + References + ---------- + .. [1] Pattabiraman, Bharath, et al. + "Fast Algorithms for the Maximum Clique Problem on Massive Graphs + with Applications to Overlapping Community Detection." + *Internet Mathematics* 11.4-5 (2015): 421--448. + + + See also + -------- + + :func:`networkx.algorithms.approximation.clique.max_clique` + A function that returns an approximate maximum clique with a + guarantee on the approximation ratio. + + :mod:`networkx.algorithms.clique` + Functions for finding the exact maximum clique in a graph. + + """ + degrees = G.degree + + def _clique_heuristic(G, U, size, best_size): + if not U: + return max(best_size, size) + u = max(U, key=degrees) + U.remove(u) + N_prime = {v for v in G[u] if degrees[v] >= best_size} + return _clique_heuristic(G, U & N_prime, size + 1, best_size) + + best_size = 0 + nodes = (u for u in G if degrees[u] >= best_size) + for u in nodes: + neighbors = {v for v in G[u] if degrees[v] >= best_size} + best_size = _clique_heuristic(G, neighbors, 1, best_size) + return best_size diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/clustering_coefficient.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/clustering_coefficient.py new file mode 100644 index 0000000..545fc65 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/clustering_coefficient.py @@ -0,0 +1,71 @@ +import networkx as nx +from networkx.utils import not_implemented_for, py_random_state + +__all__ = ["average_clustering"] + + +@not_implemented_for("directed") +@py_random_state(2) +@nx._dispatchable(name="approximate_average_clustering") +def average_clustering(G, trials=1000, seed=None): + r"""Estimates the average clustering coefficient of G. + + The local clustering of each node in `G` is the fraction of triangles + that actually exist over all possible triangles in its neighborhood. + The average clustering coefficient of a graph `G` is the mean of + local clusterings. + + This function finds an approximate average clustering coefficient + for G by repeating `n` times (defined in `trials`) the following + experiment: choose a node at random, choose two of its neighbors + at random, and check if they are connected. The approximate + coefficient is the fraction of triangles found over the number + of trials [1]_. + + Parameters + ---------- + G : NetworkX graph + + trials : integer + Number of trials to perform (default 1000). + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + c : float + Approximated average clustering coefficient. + + Examples + -------- + >>> from networkx.algorithms import approximation + >>> G = nx.erdos_renyi_graph(10, 0.2, seed=10) + >>> approximation.average_clustering(G, trials=1000, seed=10) + 0.214 + + Raises + ------ + NetworkXNotImplemented + If G is directed. + + References + ---------- + .. [1] Schank, Thomas, and Dorothea Wagner. Approximating clustering + coefficient and transitivity. Universität Karlsruhe, Fakultät für + Informatik, 2004. + https://doi.org/10.5445/IR/1000001239 + + """ + n = len(G) + triangles = 0 + nodes = list(G) + for i in [int(seed.random() * n) for i in range(trials)]: + nbrs = list(G[nodes[i]]) + if len(nbrs) < 2: + continue + u, v = seed.sample(nbrs, 2) + if u in G[v]: + triangles += 1 + return triangles / trials diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/connectivity.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/connectivity.py new file mode 100644 index 0000000..0b596fd --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/connectivity.py @@ -0,0 +1,412 @@ +"""Fast approximation for node connectivity""" + +import itertools +from operator import itemgetter + +import networkx as nx + +__all__ = [ + "local_node_connectivity", + "node_connectivity", + "all_pairs_node_connectivity", +] + + +@nx._dispatchable(name="approximate_local_node_connectivity") +def local_node_connectivity(G, source, target, cutoff=None): + """Compute node connectivity between source and target. + + Pairwise or local node connectivity between two distinct and nonadjacent + nodes is the minimum number of nodes that must be removed (minimum + separating cutset) to disconnect them. By Menger's theorem, this is equal + to the number of node independent paths (paths that share no nodes other + than source and target). Which is what we compute in this function. + + This algorithm is a fast approximation that gives an strict lower + bound on the actual number of node independent paths between two nodes [1]_. + It works for both directed and undirected graphs. + + Parameters + ---------- + + G : NetworkX graph + + source : node + Starting node for node connectivity + + target : node + Ending node for node connectivity + + cutoff : integer + Maximum node connectivity to consider. If None, the minimum degree + of source or target is used as a cutoff. Default value None. + + Returns + ------- + k: integer + pairwise node connectivity + + Examples + -------- + >>> # Platonic octahedral graph has node connectivity 4 + >>> # for each non adjacent node pair + >>> from networkx.algorithms import approximation as approx + >>> G = nx.octahedral_graph() + >>> approx.local_node_connectivity(G, 0, 5) + 4 + + Notes + ----- + This algorithm [1]_ finds node independents paths between two nodes by + computing their shortest path using BFS, marking the nodes of the path + found as 'used' and then searching other shortest paths excluding the + nodes marked as used until no more paths exist. It is not exact because + a shortest path could use nodes that, if the path were longer, may belong + to two different node independent paths. Thus it only guarantees an + strict lower bound on node connectivity. + + Note that the authors propose a further refinement, losing accuracy and + gaining speed, which is not implemented yet. + + See also + -------- + all_pairs_node_connectivity + node_connectivity + + References + ---------- + .. [1] White, Douglas R., and Mark Newman. 2001 A Fast Algorithm for + Node-Independent Paths. Santa Fe Institute Working Paper #01-07-035 + http://eclectic.ss.uci.edu/~drwhite/working.pdf + + """ + if target == source: + raise nx.NetworkXError("source and target have to be different nodes.") + + # Maximum possible node independent paths + if G.is_directed(): + possible = min(G.out_degree(source), G.in_degree(target)) + else: + possible = min(G.degree(source), G.degree(target)) + + K = 0 + if not possible: + return K + + if cutoff is None: + cutoff = float("inf") + + exclude = set() + for i in range(min(possible, cutoff)): + try: + path = _bidirectional_shortest_path(G, source, target, exclude) + exclude.update(set(path)) + K += 1 + except nx.NetworkXNoPath: + break + + return K + + +@nx._dispatchable(name="approximate_node_connectivity") +def node_connectivity(G, s=None, t=None): + r"""Returns an approximation for node connectivity for a graph or digraph G. + + Node connectivity is equal to the minimum number of nodes that + must be removed to disconnect G or render it trivial. By Menger's theorem, + this is equal to the number of node independent paths (paths that + share no nodes other than source and target). + + If source and target nodes are provided, this function returns the + local node connectivity: the minimum number of nodes that must be + removed to break all paths from source to target in G. + + This algorithm is based on a fast approximation that gives an strict lower + bound on the actual number of node independent paths between two nodes [1]_. + It works for both directed and undirected graphs. + + Parameters + ---------- + G : NetworkX graph + Undirected graph + + s : node + Source node. Optional. Default value: None. + + t : node + Target node. Optional. Default value: None. + + Returns + ------- + K : integer + Node connectivity of G, or local node connectivity if source + and target are provided. + + Examples + -------- + >>> # Platonic octahedral graph is 4-node-connected + >>> from networkx.algorithms import approximation as approx + >>> G = nx.octahedral_graph() + >>> approx.node_connectivity(G) + 4 + + Notes + ----- + This algorithm [1]_ finds node independents paths between two nodes by + computing their shortest path using BFS, marking the nodes of the path + found as 'used' and then searching other shortest paths excluding the + nodes marked as used until no more paths exist. It is not exact because + a shortest path could use nodes that, if the path were longer, may belong + to two different node independent paths. Thus it only guarantees an + strict lower bound on node connectivity. + + See also + -------- + all_pairs_node_connectivity + local_node_connectivity + + References + ---------- + .. [1] White, Douglas R., and Mark Newman. 2001 A Fast Algorithm for + Node-Independent Paths. Santa Fe Institute Working Paper #01-07-035 + http://eclectic.ss.uci.edu/~drwhite/working.pdf + + """ + if (s is not None and t is None) or (s is None and t is not None): + raise nx.NetworkXError("Both source and target must be specified.") + + # Local node connectivity + if s is not None and t is not None: + if s not in G: + raise nx.NetworkXError(f"node {s} not in graph") + if t not in G: + raise nx.NetworkXError(f"node {t} not in graph") + return local_node_connectivity(G, s, t) + + # Global node connectivity + if G.is_directed(): + connected_func = nx.is_weakly_connected + iter_func = itertools.permutations + + def neighbors(v): + return itertools.chain(G.predecessors(v), G.successors(v)) + + else: + connected_func = nx.is_connected + iter_func = itertools.combinations + neighbors = G.neighbors + + if not connected_func(G): + return 0 + + # Choose a node with minimum degree + v, minimum_degree = min(G.degree(), key=itemgetter(1)) + # Node connectivity is bounded by minimum degree + K = minimum_degree + # compute local node connectivity with all non-neighbors nodes + # and store the minimum + for w in set(G) - set(neighbors(v)) - {v}: + K = min(K, local_node_connectivity(G, v, w, cutoff=K)) + # Same for non adjacent pairs of neighbors of v + for x, y in iter_func(neighbors(v), 2): + if y not in G[x] and x != y: + K = min(K, local_node_connectivity(G, x, y, cutoff=K)) + return K + + +@nx._dispatchable(name="approximate_all_pairs_node_connectivity") +def all_pairs_node_connectivity(G, nbunch=None, cutoff=None): + """Compute node connectivity between all pairs of nodes. + + Pairwise or local node connectivity between two distinct and nonadjacent + nodes is the minimum number of nodes that must be removed (minimum + separating cutset) to disconnect them. By Menger's theorem, this is equal + to the number of node independent paths (paths that share no nodes other + than source and target). Which is what we compute in this function. + + This algorithm is a fast approximation that gives an strict lower + bound on the actual number of node independent paths between two nodes [1]_. + It works for both directed and undirected graphs. + + + Parameters + ---------- + G : NetworkX graph + + nbunch: container + Container of nodes. If provided node connectivity will be computed + only over pairs of nodes in nbunch. + + cutoff : integer + Maximum node connectivity to consider. If None, the minimum degree + of source or target is used as a cutoff in each pair of nodes. + Default value None. + + Returns + ------- + K : dictionary + Dictionary, keyed by source and target, of pairwise node connectivity + + Examples + -------- + A 3 node cycle with one extra node attached has connectivity 2 between all + nodes in the cycle and connectivity 1 between the extra node and the rest: + + >>> G = nx.cycle_graph(3) + >>> G.add_edge(2, 3) + >>> import pprint # for nice dictionary formatting + >>> pprint.pprint(nx.all_pairs_node_connectivity(G)) + {0: {1: 2, 2: 2, 3: 1}, + 1: {0: 2, 2: 2, 3: 1}, + 2: {0: 2, 1: 2, 3: 1}, + 3: {0: 1, 1: 1, 2: 1}} + + See Also + -------- + local_node_connectivity + node_connectivity + + References + ---------- + .. [1] White, Douglas R., and Mark Newman. 2001 A Fast Algorithm for + Node-Independent Paths. Santa Fe Institute Working Paper #01-07-035 + http://eclectic.ss.uci.edu/~drwhite/working.pdf + """ + if nbunch is None: + nbunch = G + else: + nbunch = set(nbunch) + + directed = G.is_directed() + if directed: + iter_func = itertools.permutations + else: + iter_func = itertools.combinations + + all_pairs = {n: {} for n in nbunch} + + for u, v in iter_func(nbunch, 2): + k = local_node_connectivity(G, u, v, cutoff=cutoff) + all_pairs[u][v] = k + if not directed: + all_pairs[v][u] = k + + return all_pairs + + +def _bidirectional_shortest_path(G, source, target, exclude): + """Returns shortest path between source and target ignoring nodes in the + container 'exclude'. + + Parameters + ---------- + + G : NetworkX graph + + source : node + Starting node for path + + target : node + Ending node for path + + exclude: container + Container for nodes to exclude from the search for shortest paths + + Returns + ------- + path: list + Shortest path between source and target ignoring nodes in 'exclude' + + Raises + ------ + NetworkXNoPath + If there is no path or if nodes are adjacent and have only one path + between them + + Notes + ----- + This function and its helper are originally from + networkx.algorithms.shortest_paths.unweighted and are modified to + accept the extra parameter 'exclude', which is a container for nodes + already used in other paths that should be ignored. + + References + ---------- + .. [1] White, Douglas R., and Mark Newman. 2001 A Fast Algorithm for + Node-Independent Paths. Santa Fe Institute Working Paper #01-07-035 + http://eclectic.ss.uci.edu/~drwhite/working.pdf + + """ + # call helper to do the real work + results = _bidirectional_pred_succ(G, source, target, exclude) + pred, succ, w = results + + # build path from pred+w+succ + path = [] + # from source to w + while w is not None: + path.append(w) + w = pred[w] + path.reverse() + # from w to target + w = succ[path[-1]] + while w is not None: + path.append(w) + w = succ[w] + + return path + + +def _bidirectional_pred_succ(G, source, target, exclude): + # does BFS from both source and target and meets in the middle + # excludes nodes in the container "exclude" from the search + + # handle either directed or undirected + if G.is_directed(): + Gpred = G.predecessors + Gsucc = G.successors + else: + Gpred = G.neighbors + Gsucc = G.neighbors + + # predecessor and successors in search + pred = {source: None} + succ = {target: None} + + # initialize fringes, start with forward + forward_fringe = [source] + reverse_fringe = [target] + + level = 0 + + while forward_fringe and reverse_fringe: + # Make sure that we iterate one step forward and one step backwards + # thus source and target will only trigger "found path" when they are + # adjacent and then they can be safely included in the container 'exclude' + level += 1 + if level % 2 != 0: + this_level = forward_fringe + forward_fringe = [] + for v in this_level: + for w in Gsucc(v): + if w in exclude: + continue + if w not in pred: + forward_fringe.append(w) + pred[w] = v + if w in succ: + return pred, succ, w # found path + else: + this_level = reverse_fringe + reverse_fringe = [] + for v in this_level: + for w in Gpred(v): + if w in exclude: + continue + if w not in succ: + succ[w] = v + reverse_fringe.append(w) + if w in pred: + return pred, succ, w # found path + + raise nx.NetworkXNoPath(f"No path between {source} and {target}.") diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/density.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/density.py new file mode 100644 index 0000000..7eb744c --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/density.py @@ -0,0 +1,396 @@ +"""Fast algorithms for the densest subgraph problem""" + +import math + +import networkx as nx + +__all__ = ["densest_subgraph"] + + +def _greedy_plus_plus(G, iterations): + if G.number_of_edges() == 0: + return 0.0, set() + if iterations < 1: + raise ValueError( + f"The number of iterations must be an integer >= 1. Provided: {iterations}" + ) + + loads = dict.fromkeys(G.nodes, 0) # Load vector for Greedy++. + best_density = 0.0 # Highest density encountered. + best_subgraph = set() # Nodes of the best subgraph found. + + for _ in range(iterations): + # Initialize heap for fast access to minimum weighted degree. + heap = nx.utils.BinaryHeap() + + # Compute initial weighted degrees and add nodes to the heap. + for node, degree in G.degree: + heap.insert(node, loads[node] + degree) + # Set up tracking for current graph state. + remaining_nodes = set(G.nodes) + num_edges = G.number_of_edges() + current_degrees = dict(G.degree) + + while remaining_nodes: + num_nodes = len(remaining_nodes) + + # Current density of the (implicit) graph + current_density = num_edges / num_nodes + + # Update the best density. + if current_density > best_density: + best_density = current_density + best_subgraph = set(remaining_nodes) + + # Pop the node with the smallest weighted degree. + node, _ = heap.pop() + if node not in remaining_nodes: + continue # Skip nodes already removed. + + # Update the load of the popped node. + loads[node] += current_degrees[node] + + # Update neighbors' degrees and the heap. + for neighbor in G.neighbors(node): + if neighbor in remaining_nodes: + current_degrees[neighbor] -= 1 + num_edges -= 1 + heap.insert(neighbor, loads[neighbor] + current_degrees[neighbor]) + + # Remove the node from the remaining nodes. + remaining_nodes.remove(node) + + return best_density, best_subgraph + + +def _fractional_peeling(G, b, x, node_to_idx, edge_to_idx): + """ + Optimized fractional peeling using NumPy arrays. + + Parameters + ---------- + G : networkx.Graph + The input graph. + b : numpy.ndarray + Induced load vector. + x : numpy.ndarray + Fractional edge values. + node_to_idx : dict + Mapping from node to index. + edge_to_idx : dict + Mapping from edge to index. + + Returns + ------- + best_density : float + The best density found. + best_subgraph : set + The subset of nodes defining the densest subgraph. + """ + heap = nx.utils.BinaryHeap() + + remaining_nodes = set(G.nodes) + + # Initialize heap with b values + for idx in remaining_nodes: + heap.insert(idx, b[idx]) + + num_edges = G.number_of_edges() + + best_density = 0.0 + best_subgraph = set() + + while remaining_nodes: + num_nodes = len(remaining_nodes) + current_density = num_edges / num_nodes + + if current_density > best_density: + best_density = current_density + best_subgraph = set(remaining_nodes) + + # Pop the node with the smallest b + node, _ = heap.pop() + while node not in remaining_nodes: + node, _ = heap.pop() # Clean the heap from stale values + + # Update neighbors b values by subtracting fractional x value + for neighbor in G.neighbors(node): + if neighbor in remaining_nodes: + neighbor_idx = node_to_idx[neighbor] + # Take off fractional value + b[neighbor_idx] -= x[edge_to_idx[(neighbor, node)]] + num_edges -= 1 + heap.insert(neighbor, b[neighbor_idx]) + + remaining_nodes.remove(node) # peel off node + + return best_density, best_subgraph + + +def _fista(G, iterations): + if G.number_of_edges() == 0: + return 0.0, set() + if iterations < 1: + raise ValueError( + f"The number of iterations must be an integer >= 1. Provided: {iterations}" + ) + import numpy as np + + # 1. Node Mapping: Assign a unique index to each node and edge + node_to_idx = {node: idx for idx, node in enumerate(G)} + num_nodes = G.number_of_nodes() + num_undirected_edges = G.number_of_edges() + + # 2. Edge Mapping: Assign a unique index to each bidirectional edge + bidirectional_edges = [(u, v) for u, v in G.edges] + [(v, u) for u, v in G.edges] + edge_to_idx = {edge: idx for idx, edge in enumerate(bidirectional_edges)} + + num_edges = len(bidirectional_edges) + + # 3. Reverse Edge Mapping: Map each (bidirectional) edge to its reverse edge index + reverse_edge_idx = np.empty(num_edges, dtype=np.int32) + for idx in range(num_undirected_edges): + reverse_edge_idx[idx] = num_undirected_edges + idx + for idx in range(num_undirected_edges, 2 * num_undirected_edges): + reverse_edge_idx[idx] = idx - num_undirected_edges + + # 4. Initialize Variables as NumPy Arrays + x = np.full(num_edges, 0.5, dtype=np.float32) + y = x.copy() + z = np.zeros(num_edges, dtype=np.float32) + b = np.zeros(num_nodes, dtype=np.float32) # Induced load vector + tk = 1.0 # Momentum term + + # 5. Precompute Edge Source Indices + edge_src_indices = np.array( + [node_to_idx[u] for u, _ in bidirectional_edges], dtype=np.int32 + ) + + # 6. Compute Learning Rate + max_degree = max(deg for _, deg in G.degree) + # 0.9 for floating point errs when max_degree is very large + learning_rate = 0.9 / max_degree + + # 7. Iterative Updates + for _ in range(iterations): + # 7a. Update b: sum y over outgoing edges for each node + b[:] = 0.0 # Reset b to zero + np.add.at(b, edge_src_indices, y) # b_u = \sum_{v : (u,v) \in E(G)} y_{uv} + + # 7b. Compute z, z_{uv} = y_{uv} - 2 * learning_rate * b_u + z = y - 2.0 * learning_rate * b[edge_src_indices] + + # 7c. Update Momentum Term + tknew = (1.0 + math.sqrt(1 + 4.0 * tk**2)) / 2.0 + + # 7d. Update x in a vectorized manner, x_{uv} = (z_{uv} - z_{vu} + 1.0) / 2.0 + new_xuv = (z - z[reverse_edge_idx] + 1.0) / 2.0 + clamped_x = np.clip(new_xuv, 0.0, 1.0) # Clamp x_{uv} between 0 and 1 + + # Update y using the FISTA update formula (similar to gradient descent) + y = ( + clamped_x + + ((tk - 1.0) / tknew) * (clamped_x - x) + + (tk / tknew) * (clamped_x - y) + ) + + # Update x + x = clamped_x + + # Update tk, the momemntum term + tk = tknew + + # Rebalance the b values! Otherwise performance is a bit suboptimal. + b[:] = 0.0 + np.add.at(b, edge_src_indices, x) # b_u = \sum_{v : (u,v) \in E(G)} x_{uv} + + # Extract the actual (approximate) dense subgraph. + return _fractional_peeling(G, b, x, node_to_idx, edge_to_idx) + + +ALGORITHMS = {"greedy++": _greedy_plus_plus, "fista": _fista} + + +@nx.utils.not_implemented_for("directed") +@nx.utils.not_implemented_for("multigraph") +@nx._dispatchable +def densest_subgraph(G, iterations=1, *, method="fista"): + r"""Returns an approximate densest subgraph for a graph `G`. + + This function runs an iterative algorithm to find the densest subgraph, + and returns both the density and the subgraph. For a discussion on the + notion of density used and the different algorithms available on + networkx, please see the Notes section below. + + Parameters + ---------- + G : NetworkX graph + Undirected graph. + + iterations : int, optional (default=1) + Number of iterations to use for the iterative algorithm. Can be + specified positionally or as a keyword argument. + + method : string, optional (default='fista') + The algorithm to use to approximate the densest subgraph. Supported + options: 'greedy++' by Boob et al. [2]_ and 'fista' by Harb et al. [3]_. + Must be specified as a keyword argument. Other inputs produce a + ValueError. + + Returns + ------- + d : float + The density of the approximate subgraph found. + + S : set + The subset of nodes defining the approximate densest subgraph. + + Examples + -------- + >>> G = nx.star_graph(4) + >>> nx.approximation.densest_subgraph(G, iterations=1) + (0.8, {0, 1, 2, 3, 4}) + + Notes + ----- + **Problem Definition:** + The densest subgraph problem (DSG) asks to find the subgraph + $S \subseteq V(G)$ with maximum density. For a subset of the nodes of + $G$, $S \subseteq V(G)$, define $E(S) = \{ (u,v) : (u,v)\in E(G), + u\in S, v\in S \}$ as the set of edges with both endpoints in $S$. + The density of $S$ is defined as $|E(S)|/|S|$, the ratio between the + edges in the subgraph $G[S]$ and the number of nodes in that subgraph. + Note that this is different from the standard graph theoretic definition + of density, defined as $\frac{2|E(S)|}{|S|(|S|-1)}$, for historical + reasons. + + **Exact Algorithms:** + The densest subgraph problem is polynomial time solvable using maximum + flow, commonly referred to as Goldberg's algorithm. However, the + algorithm is quite involved. It first binary searches on the optimal + density, $d^\ast$. For a guess of the density $d$, it sets up a flow + network $G'$ with size $O(m)$. The maximum flow solution either + informs the algorithm that no subgraph with density $d$ exists, or it + provides a subgraph with density at least $d$. However, this is + inherently bottlenecked by the maximum flow algorithm. For example, [2]_ + notes that Goldberg’s algorithm was not feasible on many large graphs + even though they used a highly optimized maximum flow library. + + **Charikar's Greedy Peeling:** + While exact solution algorithms are quite involved, there are several + known approximation algorithms for the densest subgraph problem. + + Charikar [1]_ described a very simple 1/2-approximation algorithm for DSG + known as the greedy "peeling" algorithm. The algorithm creates an + ordering of the nodes as follows. The first node $v_1$ is the one with + the smallest degree in $G$ (ties broken arbitrarily). It selects + $v_2$ to be the smallest degree node in $G \setminus v_1$. Letting + $G_i$ be the graph after removing $v_1, ..., v_i$ (with $G_0=G$), + the algorithm returns the graph among $G_0, ..., G_n$ with the highest + density. + + **Greedy++:** + Boob et al. [2]_ generalized this algorithm into Greedy++, an iterative + algorithm that runs several rounds of "peeling". In fact, Greedy++ with 1 + iteration is precisely Charikar's algorithm. The algorithm converges to a + $(1-\epsilon)$ approximate densest subgraph in $O(\Delta(G)\log + n/\epsilon^2)$ iterations, where $\Delta(G)$ is the maximum degree, + and $n$ is the number of nodes in $G$. The algorithm also has other + desirable properties as shown by [4]_ and [5]_. + + **FISTA Algorithm:** + Harb et al. [3]_ gave a faster and more scalable algorithm using ideas + from quadratic programming for the densest subgraph, which is based on a + fast iterative shrinkage-thresholding algorithm (FISTA) algorithm. It is + known that computing the densest subgraph can be formulated as the + following convex optimization problem: + + Minimize $\sum_{u \in V(G)} b_u^2$ + + Subject to: + + $b_u = \sum_{v: \{u,v\} \in E(G)} x_{uv}$ for all $u \in V(G)$ + + $x_{uv} + x_{vu} = 1.0$ for all $\{u,v\} \in E(G)$ + + $x_{uv} \geq 0, x_{vu} \geq 0$ for all $\{u,v\} \in E(G)$ + + Here, $x_{uv}$ represents the fraction of edge $\{u,v\}$ assigned to + $u$, and $x_{vu}$ to $v$. + + The FISTA algorithm efficiently solves this convex program using gradient + descent with projections. For a learning rate $\alpha$, the algorithm + does: + + 1. **Initialization**: Set $x^{(0)}_{uv} = x^{(0)}_{vu} = 0.5$ for all + edges as a feasible solution. + + 2. **Gradient Update**: For iteration $k\geq 1$, set + $x^{(k+1)}_{uv} = x^{(k)}_{uv} - 2 \alpha \sum_{v: \{u,v\} \in E(G)} + x^{(k)}_{uv}$. However, now $x^{(k+1)}_{uv}$ might be infeasible! + To ensure feasibility, we project $x^{(k+1)}_{uv}$. + + 3. **Projection to the Feasible Set**: Compute + $b^{(k+1)}_u = \sum_{v: \{u,v\} \in E(G)} x^{(k)}_{uv}$ for all + nodes $u$. Define $z^{(k+1)}_{uv} = x^{(k+1)}_{uv} - 2 \alpha + b^{(k+1)}_u$. Update $x^{(k+1)}_{uv} = + CLAMP((z^{(k+1)}_{uv} - z^{(k+1)}_{vu} + 1.0) / 2.0)$, where + $CLAMP(x) = \max(0, \min(1, x))$. + + With a learning rate of $\alpha=1/\Delta(G)$, where $\Delta(G)$ is + the maximum degree, the algorithm converges to the optimum solution of + the convex program. + + **Fractional Peeling:** + To obtain a **discrete** subgraph, we use fractional peeling, an + adaptation of the standard peeling algorithm which peels the minimum + degree vertex in each iteration, and returns the densest subgraph found + along the way. Here, we instead peel the vertex with the smallest + induced load $b_u$: + + 1. Compute $b_u$ and $x_{uv}$. + + 2. Iteratively remove the vertex with the smallest $b_u$, updating its + neighbors' load by $x_{vu}$. + + Fractional peeling transforms the approximately optimal fractional + values $b_u, x_{uv}$ into a discrete subgraph. Unlike traditional + peeling, which removes the lowest-degree node, this method accounts for + fractional edge contributions from the convex program. + + This approach is both scalable and theoretically sound, ensuring a quick + approximation of the densest subgraph while leveraging fractional load + balancing. + + References + ---------- + .. [1] Charikar, Moses. "Greedy approximation algorithms for finding dense + components in a graph." In International workshop on approximation + algorithms for combinatorial optimization, pp. 84-95. Berlin, Heidelberg: + Springer Berlin Heidelberg, 2000. + + .. [2] Boob, Digvijay, Yu Gao, Richard Peng, Saurabh Sawlani, Charalampos + Tsourakakis, Di Wang, and Junxing Wang. "Flowless: Extracting densest + subgraphs without flow computations." In Proceedings of The Web Conference + 2020, pp. 573-583. 2020. + + .. [3] Harb, Elfarouk, Kent Quanrud, and Chandra Chekuri. "Faster and scalable + algorithms for densest subgraph and decomposition." Advances in Neural + Information Processing Systems 35 (2022): 26966-26979. + + .. [4] Harb, Elfarouk, Kent Quanrud, and Chandra Chekuri. "Convergence to + lexicographically optimal base in a (contra) polymatroid and applications + to densest subgraph and tree packing." arXiv preprint arXiv:2305.02987 + (2023). + + .. [5] Chekuri, Chandra, Kent Quanrud, and Manuel R. Torres. "Densest + subgraph: Supermodularity, iterative peeling, and flow." In Proceedings of + the 2022 Annual ACM-SIAM Symposium on Discrete Algorithms (SODA), pp. + 1531-1555. Society for Industrial and Applied Mathematics, 2022. + """ + try: + algo = ALGORITHMS[method] + except KeyError as e: + raise ValueError(f"{method} is not a valid choice for an algorithm.") from e + + return algo(G, iterations) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/distance_measures.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/distance_measures.py new file mode 100644 index 0000000..d5847e6 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/distance_measures.py @@ -0,0 +1,150 @@ +"""Distance measures approximated metrics.""" + +import networkx as nx +from networkx.utils.decorators import py_random_state + +__all__ = ["diameter"] + + +@py_random_state(1) +@nx._dispatchable(name="approximate_diameter") +def diameter(G, seed=None): + """Returns a lower bound on the diameter of the graph G. + + The function computes a lower bound on the diameter (i.e., the maximum eccentricity) + of a directed or undirected graph G. The procedure used varies depending on the graph + being directed or not. + + If G is an `undirected` graph, then the function uses the `2-sweep` algorithm [1]_. + The main idea is to pick the farthest node from a random node and return its eccentricity. + + Otherwise, if G is a `directed` graph, the function uses the `2-dSweep` algorithm [2]_, + The procedure starts by selecting a random source node $s$ from which it performs a + forward and a backward BFS. Let $a_1$ and $a_2$ be the farthest nodes in the forward and + backward cases, respectively. Then, it computes the backward eccentricity of $a_1$ using + a backward BFS and the forward eccentricity of $a_2$ using a forward BFS. + Finally, it returns the best lower bound between the two. + + In both cases, the time complexity is linear with respect to the size of G. + + Parameters + ---------- + G : NetworkX graph + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + d : integer + Lower Bound on the Diameter of G + + Examples + -------- + >>> G = nx.path_graph(10) # undirected graph + >>> nx.diameter(G) + 9 + >>> G = nx.cycle_graph(3, create_using=nx.DiGraph) # directed graph + >>> nx.diameter(G) + 2 + + Raises + ------ + NetworkXError + If the graph is empty or + If the graph is undirected and not connected or + If the graph is directed and not strongly connected. + + See Also + -------- + networkx.algorithms.distance_measures.diameter + + References + ---------- + .. [1] Magnien, Clémence, Matthieu Latapy, and Michel Habib. + *Fast computation of empirically tight bounds for the diameter of massive graphs.* + Journal of Experimental Algorithmics (JEA), 2009. + https://arxiv.org/pdf/0904.2728.pdf + .. [2] Crescenzi, Pierluigi, Roberto Grossi, Leonardo Lanzi, and Andrea Marino. + *On computing the diameter of real-world directed (weighted) graphs.* + International Symposium on Experimental Algorithms. Springer, Berlin, Heidelberg, 2012. + https://courses.cs.ut.ee/MTAT.03.238/2014_fall/uploads/Main/diameter.pdf + """ + # if G is empty + if not G: + raise nx.NetworkXError("Expected non-empty NetworkX graph!") + # if there's only a node + if G.number_of_nodes() == 1: + return 0 + # if G is directed + if G.is_directed(): + return _two_sweep_directed(G, seed) + # else if G is undirected + return _two_sweep_undirected(G, seed) + + +def _two_sweep_undirected(G, seed): + """Helper function for finding a lower bound on the diameter + for undirected Graphs. + + The idea is to pick the farthest node from a random node + and return its eccentricity. + + ``G`` is a NetworkX undirected graph. + + .. note:: + + ``seed`` is a random.Random or numpy.random.RandomState instance + """ + # select a random source node + source = seed.choice(list(G)) + # get the distances to the other nodes + distances = nx.shortest_path_length(G, source) + # if some nodes have not been visited, then the graph is not connected + if len(distances) != len(G): + raise nx.NetworkXError("Graph not connected.") + # take a node that is (one of) the farthest nodes from the source + *_, node = distances + # return the eccentricity of the node + return nx.eccentricity(G, node) + + +def _two_sweep_directed(G, seed): + """Helper function for finding a lower bound on the diameter + for directed Graphs. + + It implements 2-dSweep, the directed version of the 2-sweep algorithm. + The algorithm follows the following steps. + 1. Select a source node $s$ at random. + 2. Perform a forward BFS from $s$ to select a node $a_1$ at the maximum + distance from the source, and compute $LB_1$, the backward eccentricity of $a_1$. + 3. Perform a backward BFS from $s$ to select a node $a_2$ at the maximum + distance from the source, and compute $LB_2$, the forward eccentricity of $a_2$. + 4. Return the maximum between $LB_1$ and $LB_2$. + + ``G`` is a NetworkX directed graph. + + .. note:: + + ``seed`` is a random.Random or numpy.random.RandomState instance + """ + # get a new digraph G' with the edges reversed in the opposite direction + G_reversed = G.reverse() + # select a random source node + source = seed.choice(list(G)) + # compute forward distances from source + forward_distances = nx.shortest_path_length(G, source) + # compute backward distances from source + backward_distances = nx.shortest_path_length(G_reversed, source) + # if either the source can't reach every node or not every node + # can reach the source, then the graph is not strongly connected + n = len(G) + if len(forward_distances) != n or len(backward_distances) != n: + raise nx.NetworkXError("DiGraph not strongly connected.") + # take a node a_1 at the maximum distance from the source in G + *_, a_1 = forward_distances + # take a node a_2 at the maximum distance from the source in G_reversed + *_, a_2 = backward_distances + # return the max between the backward eccentricity of a_1 and the forward eccentricity of a_2 + return max(nx.eccentricity(G_reversed, a_1), nx.eccentricity(G, a_2)) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/dominating_set.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/dominating_set.py new file mode 100644 index 0000000..e568a82 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/dominating_set.py @@ -0,0 +1,149 @@ +"""Functions for finding node and edge dominating sets. + +A `dominating set`_ for an undirected graph *G* with vertex set *V* +and edge set *E* is a subset *D* of *V* such that every vertex not in +*D* is adjacent to at least one member of *D*. An `edge dominating set`_ +is a subset *F* of *E* such that every edge not in *F* is +incident to an endpoint of at least one edge in *F*. + +.. _dominating set: https://en.wikipedia.org/wiki/Dominating_set +.. _edge dominating set: https://en.wikipedia.org/wiki/Edge_dominating_set + +""" + +import networkx as nx + +from ...utils import not_implemented_for +from ..matching import maximal_matching + +__all__ = ["min_weighted_dominating_set", "min_edge_dominating_set"] + + +# TODO Why doesn't this algorithm work for directed graphs? +@not_implemented_for("directed") +@nx._dispatchable(node_attrs="weight") +def min_weighted_dominating_set(G, weight=None): + r"""Returns a dominating set that approximates the minimum weight node + dominating set. + + Parameters + ---------- + G : NetworkX graph + Undirected graph. + + weight : string + The node attribute storing the weight of an node. If provided, + the node attribute with this key must be a number for each + node. If not provided, each node is assumed to have weight one. + + Returns + ------- + min_weight_dominating_set : set + A set of nodes, the sum of whose weights is no more than `(\log + w(V)) w(V^*)`, where `w(V)` denotes the sum of the weights of + each node in the graph and `w(V^*)` denotes the sum of the + weights of each node in the minimum weight dominating set. + + Examples + -------- + >>> G = nx.Graph([(0, 1), (0, 4), (1, 4), (1, 2), (2, 3), (3, 4), (2, 5)]) + >>> nx.approximation.min_weighted_dominating_set(G) + {1, 2, 4} + + Raises + ------ + NetworkXNotImplemented + If G is directed. + + Notes + ----- + This algorithm computes an approximate minimum weighted dominating + set for the graph `G`. The returned solution has weight `(\log + w(V)) w(V^*)`, where `w(V)` denotes the sum of the weights of each + node in the graph and `w(V^*)` denotes the sum of the weights of + each node in the minimum weight dominating set for the graph. + + This implementation of the algorithm runs in $O(m)$ time, where $m$ + is the number of edges in the graph. + + References + ---------- + .. [1] Vazirani, Vijay V. + *Approximation Algorithms*. + Springer Science & Business Media, 2001. + + """ + # The unique dominating set for the null graph is the empty set. + if len(G) == 0: + return set() + + # This is the dominating set that will eventually be returned. + dom_set = set() + + def _cost(node_and_neighborhood): + """Returns the cost-effectiveness of greedily choosing the given + node. + + `node_and_neighborhood` is a two-tuple comprising a node and its + closed neighborhood. + + """ + v, neighborhood = node_and_neighborhood + return G.nodes[v].get(weight, 1) / len(neighborhood - dom_set) + + # This is a set of all vertices not already covered by the + # dominating set. + vertices = set(G) + # This is a dictionary mapping each node to the closed neighborhood + # of that node. + neighborhoods = {v: {v} | set(G[v]) for v in G} + + # Continue until all vertices are adjacent to some node in the + # dominating set. + while vertices: + # Find the most cost-effective node to add, along with its + # closed neighborhood. + dom_node, min_set = min(neighborhoods.items(), key=_cost) + # Add the node to the dominating set and reduce the remaining + # set of nodes to cover. + dom_set.add(dom_node) + del neighborhoods[dom_node] + vertices -= min_set + + return dom_set + + +@nx._dispatchable +def min_edge_dominating_set(G): + r"""Returns minimum cardinality edge dominating set. + + Parameters + ---------- + G : NetworkX graph + Undirected graph + + Returns + ------- + min_edge_dominating_set : set + Returns a set of dominating edges whose size is no more than 2 * OPT. + + Examples + -------- + >>> G = nx.petersen_graph() + >>> nx.approximation.min_edge_dominating_set(G) + {(0, 1), (4, 9), (6, 8), (5, 7), (2, 3)} + + Raises + ------ + ValueError + If the input graph `G` is empty. + + Notes + ----- + The algorithm computes an approximate solution to the edge dominating set + problem. The result is no more than 2 * OPT in terms of size of the set. + Runtime of the algorithm is $O(|E|)$. + """ + if not G: + raise ValueError("Expected non-empty NetworkX graph!") + return maximal_matching(G) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/kcomponents.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/kcomponents.py new file mode 100644 index 0000000..fe52e39 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/kcomponents.py @@ -0,0 +1,367 @@ +"""Fast approximation for k-component structure""" + +import itertools +from collections import defaultdict +from collections.abc import Mapping +from functools import cached_property + +import networkx as nx +from networkx.algorithms.approximation import local_node_connectivity +from networkx.exception import NetworkXError +from networkx.utils import not_implemented_for + +__all__ = ["k_components"] + + +@not_implemented_for("directed") +@nx._dispatchable(name="approximate_k_components") +def k_components(G, min_density=0.95): + r"""Returns the approximate k-component structure of a graph G. + + A `k`-component is a maximal subgraph of a graph G that has, at least, + node connectivity `k`: we need to remove at least `k` nodes to break it + into more components. `k`-components have an inherent hierarchical + structure because they are nested in terms of connectivity: a connected + graph can contain several 2-components, each of which can contain + one or more 3-components, and so forth. + + This implementation is based on the fast heuristics to approximate + the `k`-component structure of a graph [1]_. Which, in turn, it is based on + a fast approximation algorithm for finding good lower bounds of the number + of node independent paths between two nodes [2]_. + + Parameters + ---------- + G : NetworkX graph + Undirected graph + + min_density : Float + Density relaxation threshold. Default value 0.95 + + Returns + ------- + k_components : dict + Dictionary with connectivity level `k` as key and a list of + sets of nodes that form a k-component of level `k` as values. + + Raises + ------ + NetworkXNotImplemented + If G is directed. + + Examples + -------- + >>> # Petersen graph has 10 nodes and it is triconnected, thus all + >>> # nodes are in a single component on all three connectivity levels + >>> from networkx.algorithms import approximation as apxa + >>> G = nx.petersen_graph() + >>> k_components = apxa.k_components(G) + + Notes + ----- + The logic of the approximation algorithm for computing the `k`-component + structure [1]_ is based on repeatedly applying simple and fast algorithms + for `k`-cores and biconnected components in order to narrow down the + number of pairs of nodes over which we have to compute White and Newman's + approximation algorithm for finding node independent paths [2]_. More + formally, this algorithm is based on Whitney's theorem, which states + an inclusion relation among node connectivity, edge connectivity, and + minimum degree for any graph G. This theorem implies that every + `k`-component is nested inside a `k`-edge-component, which in turn, + is contained in a `k`-core. Thus, this algorithm computes node independent + paths among pairs of nodes in each biconnected part of each `k`-core, + and repeats this procedure for each `k` from 3 to the maximal core number + of a node in the input graph. + + Because, in practice, many nodes of the core of level `k` inside a + bicomponent actually are part of a component of level k, the auxiliary + graph needed for the algorithm is likely to be very dense. Thus, we use + a complement graph data structure (see `AntiGraph`) to save memory. + AntiGraph only stores information of the edges that are *not* present + in the actual auxiliary graph. When applying algorithms to this + complement graph data structure, it behaves as if it were the dense + version. + + See also + -------- + k_components + + References + ---------- + .. [1] Torrents, J. and F. Ferraro (2015) Structural Cohesion: + Visualization and Heuristics for Fast Computation. + https://arxiv.org/pdf/1503.04476v1 + + .. [2] White, Douglas R., and Mark Newman (2001) A Fast Algorithm for + Node-Independent Paths. Santa Fe Institute Working Paper #01-07-035 + https://www.santafe.edu/research/results/working-papers/fast-approximation-algorithms-for-finding-node-ind + + .. [3] Moody, J. and D. White (2003). Social cohesion and embeddedness: + A hierarchical conception of social groups. + American Sociological Review 68(1), 103--28. + https://doi.org/10.2307/3088904 + + """ + # Dictionary with connectivity level (k) as keys and a list of + # sets of nodes that form a k-component as values + k_components = defaultdict(list) + # make a few functions local for speed + node_connectivity = local_node_connectivity + k_core = nx.k_core + core_number = nx.core_number + biconnected_components = nx.biconnected_components + combinations = itertools.combinations + # Exact solution for k = {1,2} + # There is a linear time algorithm for triconnectivity, if we had an + # implementation available we could start from k = 4. + for component in nx.connected_components(G): + # isolated nodes have connectivity 0 + comp = set(component) + if len(comp) > 1: + k_components[1].append(comp) + for bicomponent in nx.biconnected_components(G): + # avoid considering dyads as bicomponents + bicomp = set(bicomponent) + if len(bicomp) > 2: + k_components[2].append(bicomp) + # There is no k-component of k > maximum core number + # \kappa(G) <= \lambda(G) <= \delta(G) + g_cnumber = core_number(G) + max_core = max(g_cnumber.values()) + for k in range(3, max_core + 1): + C = k_core(G, k, core_number=g_cnumber) + for nodes in biconnected_components(C): + # Build a subgraph SG induced by the nodes that are part of + # each biconnected component of the k-core subgraph C. + if len(nodes) < k: + continue + SG = G.subgraph(nodes) + # Build auxiliary graph + H = _AntiGraph() + H.add_nodes_from(SG.nodes()) + for u, v in combinations(SG, 2): + K = node_connectivity(SG, u, v, cutoff=k) + if k > K: + H.add_edge(u, v) + for h_nodes in biconnected_components(H): + if len(h_nodes) <= k: + continue + SH = H.subgraph(h_nodes) + for Gc in _cliques_heuristic(SG, SH, k, min_density): + for k_nodes in biconnected_components(Gc): + Gk = nx.k_core(SG.subgraph(k_nodes), k) + if len(Gk) <= k: + continue + k_components[k].append(set(Gk)) + return k_components + + +def _cliques_heuristic(G, H, k, min_density): + h_cnumber = nx.core_number(H) + for i, c_value in enumerate(sorted(set(h_cnumber.values()), reverse=True)): + cands = {n for n, c in h_cnumber.items() if c == c_value} + # Skip checking for overlap for the highest core value + if i == 0: + overlap = False + else: + overlap = set.intersection( + *[{x for x in H[n] if x not in cands} for n in cands] + ) + if overlap and len(overlap) < k: + SH = H.subgraph(cands | overlap) + else: + SH = H.subgraph(cands) + sh_cnumber = nx.core_number(SH) + SG = nx.k_core(G.subgraph(SH), k) + while not (_same(sh_cnumber) and nx.density(SH) >= min_density): + # This subgraph must be writable => .copy() + SH = H.subgraph(SG).copy() + if len(SH) <= k: + break + sh_cnumber = nx.core_number(SH) + sh_deg = dict(SH.degree()) + min_deg = min(sh_deg.values()) + SH.remove_nodes_from(n for n, d in sh_deg.items() if d == min_deg) + SG = nx.k_core(G.subgraph(SH), k) + else: + yield SG + + +def _same(measure, tol=0): + vals = set(measure.values()) + if (max(vals) - min(vals)) <= tol: + return True + return False + + +class _AntiGraph(nx.Graph): + """ + Class for complement graphs. + + The main goal is to be able to work with big and dense graphs with + a low memory footprint. + + In this class you add the edges that *do not exist* in the dense graph, + the report methods of the class return the neighbors, the edges and + the degree as if it was the dense graph. Thus it's possible to use + an instance of this class with some of NetworkX functions. In this + case we only use k-core, connected_components, and biconnected_components. + """ + + all_edge_dict = {"weight": 1} + + def single_edge_dict(self): + return self.all_edge_dict + + edge_attr_dict_factory = single_edge_dict # type: ignore[assignment] + + def __getitem__(self, n): + """Returns a dict of neighbors of node n in the dense graph. + + Parameters + ---------- + n : node + A node in the graph. + + Returns + ------- + adj_dict : dictionary + The adjacency dictionary for nodes connected to n. + + """ + all_edge_dict = self.all_edge_dict + return dict.fromkeys(set(self._adj) - set(self._adj[n]) - {n}, all_edge_dict) + + def neighbors(self, n): + """Returns an iterator over all neighbors of node n in the + dense graph. + """ + try: + return iter(set(self._adj) - set(self._adj[n]) - {n}) + except KeyError as err: + raise NetworkXError(f"The node {n} is not in the graph.") from err + + class AntiAtlasView(Mapping): + """An adjacency inner dict for AntiGraph""" + + def __init__(self, graph, node): + self._graph = graph + self._atlas = graph._adj[node] + self._node = node + + def __len__(self): + return len(self._graph) - len(self._atlas) - 1 + + def __iter__(self): + return (n for n in self._graph if n not in self._atlas and n != self._node) + + def __getitem__(self, nbr): + nbrs = set(self._graph._adj) - set(self._atlas) - {self._node} + if nbr in nbrs: + return self._graph.all_edge_dict + raise KeyError(nbr) + + class AntiAdjacencyView(AntiAtlasView): + """An adjacency outer dict for AntiGraph""" + + def __init__(self, graph): + self._graph = graph + self._atlas = graph._adj + + def __len__(self): + return len(self._atlas) + + def __iter__(self): + return iter(self._graph) + + def __getitem__(self, node): + if node not in self._graph: + raise KeyError(node) + return self._graph.AntiAtlasView(self._graph, node) + + @cached_property + def adj(self): + return self.AntiAdjacencyView(self) + + def subgraph(self, nodes): + """This subgraph method returns a full AntiGraph. Not a View""" + nodes = set(nodes) + G = _AntiGraph() + G.add_nodes_from(nodes) + for n in G: + Gnbrs = G.adjlist_inner_dict_factory() + G._adj[n] = Gnbrs + for nbr, d in self._adj[n].items(): + if nbr in G._adj: + Gnbrs[nbr] = d + G._adj[nbr][n] = d + G.graph = self.graph + return G + + class AntiDegreeView(nx.reportviews.DegreeView): + def __iter__(self): + all_nodes = set(self._succ) + for n in self._nodes: + nbrs = all_nodes - set(self._succ[n]) - {n} + yield (n, len(nbrs)) + + def __getitem__(self, n): + nbrs = set(self._succ) - set(self._succ[n]) - {n} + # AntiGraph is a ThinGraph so all edges have weight 1 + return len(nbrs) + (n in nbrs) + + @cached_property + def degree(self): + """Returns an iterator for (node, degree) and degree for single node. + + The node degree is the number of edges adjacent to the node. + + Parameters + ---------- + nbunch : iterable container, optional (default=all nodes) + A container of nodes. The container will be iterated + through once. + + weight : string or None, optional (default=None) + The edge attribute that holds the numerical value used + as a weight. If None, then each edge has weight 1. + The degree is the sum of the edge weights adjacent to the node. + + Returns + ------- + deg: + Degree of the node, if a single node is passed as argument. + nd_iter : an iterator + The iterator returns two-tuples of (node, degree). + + See Also + -------- + degree + + Examples + -------- + >>> G = nx.path_graph(4) + >>> G.degree(0) # node 0 with degree 1 + 1 + >>> list(G.degree([0, 1])) + [(0, 1), (1, 2)] + + """ + return self.AntiDegreeView(self) + + def adjacency(self): + """Returns an iterator of (node, adjacency set) tuples for all nodes + in the dense graph. + + This is the fastest way to look at every edge. + For directed graphs, only outgoing adjacencies are included. + + Returns + ------- + adj_iter : iterator + An iterator of (node, adjacency set) for all nodes in + the graph. + + """ + for n in self._adj: + yield (n, set(self._adj) - set(self._adj[n]) - {n}) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/matching.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/matching.py new file mode 100644 index 0000000..dc08919 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/matching.py @@ -0,0 +1,44 @@ +""" +************** +Graph Matching +************** + +Given a graph G = (V,E), a matching M in G is a set of pairwise non-adjacent +edges; that is, no two edges share a common vertex. + +`Wikipedia: Matching `_ +""" + +import networkx as nx + +__all__ = ["min_maximal_matching"] + + +@nx._dispatchable +def min_maximal_matching(G): + r"""Returns the minimum maximal matching of G. That is, out of all maximal + matchings of the graph G, the smallest is returned. + + Parameters + ---------- + G : NetworkX graph + Undirected graph + + Returns + ------- + min_maximal_matching : set + Returns a set of edges such that no two edges share a common endpoint + and every edge not in the set shares some common endpoint in the set. + Cardinality will be 2*OPT in the worst case. + + Notes + ----- + The algorithm computes an approximate solution for the minimum maximal + cardinality matching problem. The solution is no more than 2 * OPT in size. + Runtime is $O(|E|)$. + + References + ---------- + .. [1] Vazirani, Vijay Approximation Algorithms (2001) + """ + return nx.maximal_matching(G) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/maxcut.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/maxcut.py new file mode 100644 index 0000000..f4e1da8 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/maxcut.py @@ -0,0 +1,143 @@ +import networkx as nx +from networkx.utils.decorators import not_implemented_for, py_random_state + +__all__ = ["randomized_partitioning", "one_exchange"] + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@py_random_state(1) +@nx._dispatchable(edge_attrs="weight") +def randomized_partitioning(G, seed=None, p=0.5, weight=None): + """Compute a random partitioning of the graph nodes and its cut value. + + A partitioning is calculated by observing each node + and deciding to add it to the partition with probability `p`, + returning a random cut and its corresponding value (the + sum of weights of edges connecting different partitions). + + Parameters + ---------- + G : NetworkX graph + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + p : scalar + Probability for each node to be part of the first partition. + Should be in [0,1] + + weight : object + Edge attribute key to use as weight. If not specified, edges + have weight one. + + Returns + ------- + cut_size : scalar + Value of the minimum cut. + + partition : pair of node sets + A partitioning of the nodes that defines a minimum cut. + + Examples + -------- + >>> G = nx.complete_graph(5) + >>> cut_size, partition = nx.approximation.randomized_partitioning(G, seed=1) + >>> cut_size + 6 + >>> partition + ({0, 3, 4}, {1, 2}) + + Raises + ------ + NetworkXNotImplemented + If the graph is directed or is a multigraph. + """ + cut = {node for node in G.nodes() if seed.random() < p} + cut_size = nx.algorithms.cut_size(G, cut, weight=weight) + partition = (cut, G.nodes - cut) + return cut_size, partition + + +def _swap_node_partition(cut, node): + return cut - {node} if node in cut else cut.union({node}) + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@py_random_state(2) +@nx._dispatchable(edge_attrs="weight") +def one_exchange(G, initial_cut=None, seed=None, weight=None): + """Compute a partitioning of the graphs nodes and the corresponding cut value. + + Use a greedy one exchange strategy to find a locally maximal cut + and its value, it works by finding the best node (one that gives + the highest gain to the cut value) to add to the current cut + and repeats this process until no improvement can be made. + + Parameters + ---------- + G : networkx Graph + Graph to find a maximum cut for. + + initial_cut : set + Cut to use as a starting point. If not supplied the algorithm + starts with an empty cut. + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + weight : object + Edge attribute key to use as weight. If not specified, edges + have weight one. + + Returns + ------- + cut_value : scalar + Value of the maximum cut. + + partition : pair of node sets + A partitioning of the nodes that defines a maximum cut. + + Examples + -------- + >>> G = nx.complete_graph(5) + >>> curr_cut_size, partition = nx.approximation.one_exchange(G, seed=1) + >>> curr_cut_size + 6 + >>> partition + ({0, 2}, {1, 3, 4}) + + Raises + ------ + NetworkXNotImplemented + If the graph is directed or is a multigraph. + """ + if initial_cut is None: + initial_cut = set() + cut = set(initial_cut) + current_cut_size = nx.algorithms.cut_size(G, cut, weight=weight) + while True: + nodes = list(G.nodes()) + # Shuffling the nodes ensures random tie-breaks in the following call to max + seed.shuffle(nodes) + best_node_to_swap = max( + nodes, + key=lambda v: nx.algorithms.cut_size( + G, _swap_node_partition(cut, v), weight=weight + ), + default=None, + ) + potential_cut = _swap_node_partition(cut, best_node_to_swap) + potential_cut_size = nx.algorithms.cut_size(G, potential_cut, weight=weight) + + if potential_cut_size > current_cut_size: + cut = potential_cut + current_cut_size = potential_cut_size + else: + break + + partition = (cut, G.nodes - cut) + return current_cut_size, partition diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/ramsey.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/ramsey.py new file mode 100644 index 0000000..0552e4a --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/ramsey.py @@ -0,0 +1,53 @@ +""" +Ramsey numbers. +""" + +import networkx as nx +from networkx.utils import not_implemented_for + +from ...utils import arbitrary_element + +__all__ = ["ramsey_R2"] + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatchable +def ramsey_R2(G): + r"""Compute the largest clique and largest independent set in `G`. + + This can be used to estimate bounds for the 2-color + Ramsey number `R(2;s,t)` for `G`. + + This is a recursive implementation which could run into trouble + for large recursions. Note that self-loop edges are ignored. + + Parameters + ---------- + G : NetworkX graph + Undirected graph + + Returns + ------- + max_pair : (set, set) tuple + Maximum clique, Maximum independent set. + + Raises + ------ + NetworkXNotImplemented + If the graph is directed or is a multigraph. + """ + if not G: + return set(), set() + + node = arbitrary_element(G) + nbrs = (nbr for nbr in nx.all_neighbors(G, node) if nbr != node) + nnbrs = nx.non_neighbors(G, node) + c_1, i_1 = ramsey_R2(G.subgraph(nbrs).copy()) + c_2, i_2 = ramsey_R2(G.subgraph(nnbrs).copy()) + + c_1.add(node) + i_2.add(node) + # Choose the larger of the two cliques and the larger of the two + # independent sets, according to cardinality. + return max(c_1, c_2, key=len), max(i_1, i_2, key=len) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/steinertree.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/steinertree.py new file mode 100644 index 0000000..4bee11c --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/steinertree.py @@ -0,0 +1,248 @@ +from itertools import chain + +import networkx as nx +from networkx.utils import not_implemented_for, pairwise + +__all__ = ["metric_closure", "steiner_tree"] + + +@not_implemented_for("directed") +@nx._dispatchable(edge_attrs="weight", returns_graph=True) +def metric_closure(G, weight="weight"): + """Return the metric closure of a graph. + + The metric closure of a graph *G* is the complete graph in which each edge + is weighted by the shortest path distance between the nodes in *G* . + + Parameters + ---------- + G : NetworkX graph + + Returns + ------- + NetworkX graph + Metric closure of the graph `G`. + + """ + M = nx.Graph() + + Gnodes = set(G) + + # check for connected graph while processing first node + all_paths_iter = nx.all_pairs_dijkstra(G, weight=weight) + u, (distance, path) = next(all_paths_iter) + if len(G) != len(distance): + msg = "G is not a connected graph. metric_closure is not defined." + raise nx.NetworkXError(msg) + Gnodes.remove(u) + for v in Gnodes: + M.add_edge(u, v, distance=distance[v], path=path[v]) + + # first node done -- now process the rest + for u, (distance, path) in all_paths_iter: + Gnodes.remove(u) + for v in Gnodes: + M.add_edge(u, v, distance=distance[v], path=path[v]) + + return M + + +def _mehlhorn_steiner_tree(G, terminal_nodes, weight): + paths = nx.multi_source_dijkstra_path(G, terminal_nodes) + + d_1 = {} + s = {} + for v in G.nodes(): + s[v] = paths[v][0] + d_1[(v, s[v])] = len(paths[v]) - 1 + + # G1-G4 names match those from the Mehlhorn 1988 paper. + G_1_prime = nx.Graph() + for u, v, data in G.edges(data=True): + su, sv = s[u], s[v] + weight_here = d_1[(u, su)] + data.get(weight, 1) + d_1[(v, sv)] + if not G_1_prime.has_edge(su, sv): + G_1_prime.add_edge(su, sv, weight=weight_here) + else: + new_weight = min(weight_here, G_1_prime[su][sv]["weight"]) + G_1_prime.add_edge(su, sv, weight=new_weight) + + G_2 = nx.minimum_spanning_edges(G_1_prime, data=True) + + G_3 = nx.Graph() + for u, v, d in G_2: + path = nx.shortest_path(G, u, v, weight) + for n1, n2 in pairwise(path): + G_3.add_edge(n1, n2) + + G_3_mst = list(nx.minimum_spanning_edges(G_3, data=False)) + if G.is_multigraph(): + G_3_mst = ( + (u, v, min(G[u][v], key=lambda k: G[u][v][k][weight])) for u, v in G_3_mst + ) + G_4 = G.edge_subgraph(G_3_mst).copy() + _remove_nonterminal_leaves(G_4, terminal_nodes) + return G_4.edges() + + +def _kou_steiner_tree(G, terminal_nodes, weight): + # Compute the metric closure only for terminal nodes + # Create a complete graph H from the metric edges + H = nx.Graph() + unvisited_terminals = set(terminal_nodes) + + # check for connected graph while processing first node + u = unvisited_terminals.pop() + distances, paths = nx.single_source_dijkstra(G, source=u, weight=weight) + if len(G) != len(distances): + msg = "G is not a connected graph." + raise nx.NetworkXError(msg) + for v in unvisited_terminals: + H.add_edge(u, v, distance=distances[v], path=paths[v]) + + # first node done -- now process the rest + for u in unvisited_terminals.copy(): + distances, paths = nx.single_source_dijkstra(G, source=u, weight=weight) + unvisited_terminals.remove(u) + for v in unvisited_terminals: + H.add_edge(u, v, distance=distances[v], path=paths[v]) + + # Use the 'distance' attribute of each edge provided by H. + mst_edges = nx.minimum_spanning_edges(H, weight="distance", data=True) + + # Create an iterator over each edge in each shortest path; repeats are okay + mst_all_edges = chain.from_iterable(pairwise(d["path"]) for u, v, d in mst_edges) + if G.is_multigraph(): + mst_all_edges = ( + (u, v, min(G[u][v], key=lambda k: G[u][v][k][weight])) + for u, v in mst_all_edges + ) + + # Find the MST again, over this new set of edges + G_S = G.edge_subgraph(mst_all_edges) + T_S = nx.minimum_spanning_edges(G_S, weight="weight", data=False) + + # Leaf nodes that are not terminal might still remain; remove them here + T_H = G.edge_subgraph(T_S).copy() + _remove_nonterminal_leaves(T_H, terminal_nodes) + + return T_H.edges() + + +def _remove_nonterminal_leaves(G, terminals): + terminal_set = set(terminals) + leaves = {n for n in G if len(set(G[n]) - {n}) == 1} + nonterminal_leaves = leaves - terminal_set + + while nonterminal_leaves: + # Removing a node may create new non-terminal leaves, so we limit + # search for candidate non-terminal nodes to neighbors of current + # non-terminal nodes + candidate_leaves = set.union(*(set(G[n]) for n in nonterminal_leaves)) + candidate_leaves -= nonterminal_leaves | terminal_set + # Remove current set of non-terminal nodes + G.remove_nodes_from(nonterminal_leaves) + # Find any new non-terminal nodes from the set of candidates + leaves = {n for n in candidate_leaves if len(set(G[n]) - {n}) == 1} + nonterminal_leaves = leaves - terminal_set + + +ALGORITHMS = { + "kou": _kou_steiner_tree, + "mehlhorn": _mehlhorn_steiner_tree, +} + + +@not_implemented_for("directed") +@nx._dispatchable(preserve_all_attrs=True, returns_graph=True) +def steiner_tree(G, terminal_nodes, weight="weight", method=None): + r"""Return an approximation to the minimum Steiner tree of a graph. + + The minimum Steiner tree of `G` w.r.t a set of `terminal_nodes` (also *S*) + is a tree within `G` that spans those nodes and has minimum size (sum of + edge weights) among all such trees. + + The approximation algorithm is specified with the `method` keyword + argument. All three available algorithms produce a tree whose weight is + within a ``(2 - (2 / l))`` factor of the weight of the optimal Steiner tree, + where ``l`` is the minimum number of leaf nodes across all possible Steiner + trees. + + * ``"kou"`` [2]_ (runtime $O(|S| |V|^2)$) computes the minimum spanning tree of + the subgraph of the metric closure of *G* induced by the terminal nodes, + where the metric closure of *G* is the complete graph in which each edge is + weighted by the shortest path distance between the nodes in *G*. + + * ``"mehlhorn"`` [3]_ (runtime $O(|E|+|V|\log|V|)$) modifies Kou et al.'s + algorithm, beginning by finding the closest terminal node for each + non-terminal. This data is used to create a complete graph containing only + the terminal nodes, in which edge is weighted with the shortest path + distance between them. The algorithm then proceeds in the same way as Kou + et al.. + + Parameters + ---------- + G : NetworkX graph + + terminal_nodes : list + A list of terminal nodes for which minimum steiner tree is + to be found. + + weight : string (default = 'weight') + Use the edge attribute specified by this string as the edge weight. + Any edge attribute not present defaults to 1. + + method : string, optional (default = 'mehlhorn') + The algorithm to use to approximate the Steiner tree. + Supported options: 'kou', 'mehlhorn'. + Other inputs produce a ValueError. + + Returns + ------- + NetworkX graph + Approximation to the minimum steiner tree of `G` induced by + `terminal_nodes` . + + Raises + ------ + NetworkXNotImplemented + If `G` is directed. + + ValueError + If the specified `method` is not supported. + + Notes + ----- + For multigraphs, the edge between two nodes with minimum weight is the + edge put into the Steiner tree. + + + References + ---------- + .. [1] Steiner_tree_problem on Wikipedia. + https://en.wikipedia.org/wiki/Steiner_tree_problem + .. [2] Kou, L., G. Markowsky, and L. Berman. 1981. + ‘A Fast Algorithm for Steiner Trees’. + Acta Informatica 15 (2): 141–45. + https://doi.org/10.1007/BF00288961. + .. [3] Mehlhorn, Kurt. 1988. + ‘A Faster Approximation Algorithm for the Steiner Problem in Graphs’. + Information Processing Letters 27 (3): 125–28. + https://doi.org/10.1016/0020-0190(88)90066-X. + """ + if method is None: + method = "mehlhorn" + + try: + algo = ALGORITHMS[method] + except KeyError as e: + raise ValueError(f"{method} is not a valid choice for an algorithm.") from e + + edges = algo(G, terminal_nodes, weight) + # For multigraph we should add the minimal weight edge keys + if G.is_multigraph(): + edges = ( + (u, v, min(G[u][v], key=lambda k: G[u][v][k][weight])) for u, v in edges + ) + T = G.edge_subgraph(edges) + return T diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/__init__.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..b1f63d1 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_approx_clust_coeff.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_approx_clust_coeff.cpython-312.pyc new file mode 100644 index 0000000..008b3b7 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_approx_clust_coeff.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_clique.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_clique.cpython-312.pyc new file mode 100644 index 0000000..be73c6b Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_clique.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_connectivity.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_connectivity.cpython-312.pyc new file mode 100644 index 0000000..586f21b Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_connectivity.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_density.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_density.cpython-312.pyc new file mode 100644 index 0000000..abc1758 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_density.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_distance_measures.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_distance_measures.cpython-312.pyc new file mode 100644 index 0000000..8172c9d Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_distance_measures.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_dominating_set.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_dominating_set.cpython-312.pyc new file mode 100644 index 0000000..6323a73 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_dominating_set.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_kcomponents.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_kcomponents.cpython-312.pyc new file mode 100644 index 0000000..db785cb Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_kcomponents.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_matching.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_matching.cpython-312.pyc new file mode 100644 index 0000000..ce3d8b4 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_matching.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_maxcut.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_maxcut.cpython-312.pyc new file mode 100644 index 0000000..18824e5 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_maxcut.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_ramsey.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_ramsey.cpython-312.pyc new file mode 100644 index 0000000..3009c3b Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_ramsey.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_steinertree.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_steinertree.cpython-312.pyc new file mode 100644 index 0000000..adde4dd Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_steinertree.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_traveling_salesman.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_traveling_salesman.cpython-312.pyc new file mode 100644 index 0000000..5d07e2b Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_traveling_salesman.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_treewidth.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_treewidth.cpython-312.pyc new file mode 100644 index 0000000..a6a7b59 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_treewidth.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_vertex_cover.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_vertex_cover.cpython-312.pyc new file mode 100644 index 0000000..4ee18de Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_vertex_cover.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/test_approx_clust_coeff.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/test_approx_clust_coeff.py new file mode 100644 index 0000000..5eab5c1 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/test_approx_clust_coeff.py @@ -0,0 +1,41 @@ +import networkx as nx +from networkx.algorithms.approximation import average_clustering + +# This approximation has to be exact in regular graphs +# with no triangles or with all possible triangles. + + +def test_petersen(): + # Actual coefficient is 0 + G = nx.petersen_graph() + assert average_clustering(G, trials=len(G) // 2) == nx.average_clustering(G) + + +def test_petersen_seed(): + # Actual coefficient is 0 + G = nx.petersen_graph() + assert average_clustering(G, trials=len(G) // 2, seed=1) == nx.average_clustering(G) + + +def test_tetrahedral(): + # Actual coefficient is 1 + G = nx.tetrahedral_graph() + assert average_clustering(G, trials=len(G) // 2) == nx.average_clustering(G) + + +def test_dodecahedral(): + # Actual coefficient is 0 + G = nx.dodecahedral_graph() + assert average_clustering(G, trials=len(G) // 2) == nx.average_clustering(G) + + +def test_empty(): + G = nx.empty_graph(5) + assert average_clustering(G, trials=len(G) // 2) == 0 + + +def test_complete(): + G = nx.complete_graph(5) + assert average_clustering(G, trials=len(G) // 2) == 1 + G = nx.complete_graph(7) + assert average_clustering(G, trials=len(G) // 2) == 1 diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/test_clique.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/test_clique.py new file mode 100644 index 0000000..b40dcb9 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/test_clique.py @@ -0,0 +1,112 @@ +"""Unit tests for the :mod:`networkx.algorithms.approximation.clique` module.""" + +import networkx as nx +from networkx.algorithms.approximation import ( + clique_removal, + large_clique_size, + max_clique, + maximum_independent_set, +) + + +def is_independent_set(G, nodes): + """Returns True if and only if `nodes` is a clique in `G`. + + `G` is a NetworkX graph. `nodes` is an iterable of nodes in + `G`. + + """ + return G.subgraph(nodes).number_of_edges() == 0 + + +def is_clique(G, nodes): + """Returns True if and only if `nodes` is an independent set + in `G`. + + `G` is an undirected simple graph. `nodes` is an iterable of + nodes in `G`. + + """ + H = G.subgraph(nodes) + n = len(H) + return H.number_of_edges() == n * (n - 1) // 2 + + +class TestCliqueRemoval: + """Unit tests for the + :func:`~networkx.algorithms.approximation.clique_removal` function. + + """ + + def test_trivial_graph(self): + G = nx.trivial_graph() + independent_set, cliques = clique_removal(G) + assert is_independent_set(G, independent_set) + assert all(is_clique(G, clique) for clique in cliques) + # In fact, we should only have 1-cliques, that is, singleton nodes. + assert all(len(clique) == 1 for clique in cliques) + + def test_complete_graph(self): + G = nx.complete_graph(10) + independent_set, cliques = clique_removal(G) + assert is_independent_set(G, independent_set) + assert all(is_clique(G, clique) for clique in cliques) + + def test_barbell_graph(self): + G = nx.barbell_graph(10, 5) + independent_set, cliques = clique_removal(G) + assert is_independent_set(G, independent_set) + assert all(is_clique(G, clique) for clique in cliques) + + +class TestMaxClique: + """Unit tests for the :func:`networkx.algorithms.approximation.max_clique` + function. + + """ + + def test_null_graph(self): + G = nx.null_graph() + assert len(max_clique(G)) == 0 + + def test_complete_graph(self): + graph = nx.complete_graph(30) + # this should return the entire graph + mc = max_clique(graph) + assert 30 == len(mc) + + def test_maximal_by_cardinality(self): + """Tests that the maximal clique is computed according to maximum + cardinality of the sets. + + For more information, see pull request #1531. + + """ + G = nx.complete_graph(5) + G.add_edge(4, 5) + clique = max_clique(G) + assert len(clique) > 1 + + G = nx.lollipop_graph(30, 2) + clique = max_clique(G) + assert len(clique) > 2 + + +def test_large_clique_size(): + G = nx.complete_graph(9) + nx.add_cycle(G, [9, 10, 11]) + G.add_edge(8, 9) + G.add_edge(1, 12) + G.add_node(13) + + assert large_clique_size(G) == 9 + G.remove_node(5) + assert large_clique_size(G) == 8 + G.remove_edge(2, 3) + assert large_clique_size(G) == 7 + + +def test_independent_set(): + # smoke test + G = nx.Graph() + assert len(maximum_independent_set(G)) == 0 diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/test_connectivity.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/test_connectivity.py new file mode 100644 index 0000000..887db20 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/test_connectivity.py @@ -0,0 +1,199 @@ +import pytest + +import networkx as nx +from networkx.algorithms import approximation as approx + + +def test_global_node_connectivity(): + # Figure 1 chapter on Connectivity + G = nx.Graph() + G.add_edges_from( + [ + (1, 2), + (1, 3), + (1, 4), + (1, 5), + (2, 3), + (2, 6), + (3, 4), + (3, 6), + (4, 6), + (4, 7), + (5, 7), + (6, 8), + (6, 9), + (7, 8), + (7, 10), + (8, 11), + (9, 10), + (9, 11), + (10, 11), + ] + ) + assert 2 == approx.local_node_connectivity(G, 1, 11) + assert 2 == approx.node_connectivity(G) + assert 2 == approx.node_connectivity(G, 1, 11) + + +def test_white_harary1(): + # Figure 1b white and harary (2001) + # A graph with high adhesion (edge connectivity) and low cohesion + # (node connectivity) + G = nx.disjoint_union(nx.complete_graph(4), nx.complete_graph(4)) + G.remove_node(7) + for i in range(4, 7): + G.add_edge(0, i) + G = nx.disjoint_union(G, nx.complete_graph(4)) + G.remove_node(G.order() - 1) + for i in range(7, 10): + G.add_edge(0, i) + assert 1 == approx.node_connectivity(G) + + +def test_complete_graphs(): + for n in range(5, 25, 5): + G = nx.complete_graph(n) + assert n - 1 == approx.node_connectivity(G) + assert n - 1 == approx.node_connectivity(G, 0, 3) + + +def test_empty_graphs(): + for k in range(5, 25, 5): + G = nx.empty_graph(k) + assert 0 == approx.node_connectivity(G) + assert 0 == approx.node_connectivity(G, 0, 3) + + +def test_petersen(): + G = nx.petersen_graph() + assert 3 == approx.node_connectivity(G) + assert 3 == approx.node_connectivity(G, 0, 5) + + +# Approximation fails with tutte graph +# def test_tutte(): +# G = nx.tutte_graph() +# assert_equal(3, approx.node_connectivity(G)) + + +def test_dodecahedral(): + G = nx.dodecahedral_graph() + assert 3 == approx.node_connectivity(G) + assert 3 == approx.node_connectivity(G, 0, 5) + + +def test_octahedral(): + G = nx.octahedral_graph() + assert 4 == approx.node_connectivity(G) + assert 4 == approx.node_connectivity(G, 0, 5) + + +# Approximation can fail with icosahedral graph depending +# on iteration order. +# def test_icosahedral(): +# G=nx.icosahedral_graph() +# assert_equal(5, approx.node_connectivity(G)) +# assert_equal(5, approx.node_connectivity(G, 0, 5)) + + +def test_only_source(): + G = nx.complete_graph(5) + pytest.raises(nx.NetworkXError, approx.node_connectivity, G, s=0) + + +def test_only_target(): + G = nx.complete_graph(5) + pytest.raises(nx.NetworkXError, approx.node_connectivity, G, t=0) + + +def test_missing_source(): + G = nx.path_graph(4) + pytest.raises(nx.NetworkXError, approx.node_connectivity, G, 10, 1) + + +def test_missing_target(): + G = nx.path_graph(4) + pytest.raises(nx.NetworkXError, approx.node_connectivity, G, 1, 10) + + +def test_source_equals_target(): + G = nx.complete_graph(5) + pytest.raises(nx.NetworkXError, approx.local_node_connectivity, G, 0, 0) + + +def test_directed_node_connectivity(): + G = nx.cycle_graph(10, create_using=nx.DiGraph()) # only one direction + D = nx.cycle_graph(10).to_directed() # 2 reciprocal edges + assert 1 == approx.node_connectivity(G) + assert 1 == approx.node_connectivity(G, 1, 4) + assert 2 == approx.node_connectivity(D) + assert 2 == approx.node_connectivity(D, 1, 4) + + +class TestAllPairsNodeConnectivityApprox: + @classmethod + def setup_class(cls): + cls.path = nx.path_graph(7) + cls.directed_path = nx.path_graph(7, create_using=nx.DiGraph()) + cls.cycle = nx.cycle_graph(7) + cls.directed_cycle = nx.cycle_graph(7, create_using=nx.DiGraph()) + cls.gnp = nx.gnp_random_graph(30, 0.1) + cls.directed_gnp = nx.gnp_random_graph(30, 0.1, directed=True) + cls.K20 = nx.complete_graph(20) + cls.K10 = nx.complete_graph(10) + cls.K5 = nx.complete_graph(5) + cls.G_list = [ + cls.path, + cls.directed_path, + cls.cycle, + cls.directed_cycle, + cls.gnp, + cls.directed_gnp, + cls.K10, + cls.K5, + cls.K20, + ] + + def test_cycles(self): + K_undir = approx.all_pairs_node_connectivity(self.cycle) + for source in K_undir: + for target, k in K_undir[source].items(): + assert k == 2 + K_dir = approx.all_pairs_node_connectivity(self.directed_cycle) + for source in K_dir: + for target, k in K_dir[source].items(): + assert k == 1 + + def test_complete(self): + for G in [self.K10, self.K5, self.K20]: + K = approx.all_pairs_node_connectivity(G) + for source in K: + for target, k in K[source].items(): + assert k == len(G) - 1 + + def test_paths(self): + K_undir = approx.all_pairs_node_connectivity(self.path) + for source in K_undir: + for target, k in K_undir[source].items(): + assert k == 1 + K_dir = approx.all_pairs_node_connectivity(self.directed_path) + for source in K_dir: + for target, k in K_dir[source].items(): + if source < target: + assert k == 1 + else: + assert k == 0 + + def test_cutoff(self): + for G in [self.K10, self.K5, self.K20]: + for mp in [2, 3, 4]: + paths = approx.all_pairs_node_connectivity(G, cutoff=mp) + for source in paths: + for target, K in paths[source].items(): + assert K == mp + + def test_all_pairs_connectivity_nbunch(self): + G = nx.complete_graph(5) + nbunch = [0, 2, 3] + C = approx.all_pairs_node_connectivity(G, nbunch=nbunch) + assert len(C) == len(nbunch) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/test_density.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/test_density.py new file mode 100644 index 0000000..5cf1dc5 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/test_density.py @@ -0,0 +1,138 @@ +import pytest + +import networkx as nx +import networkx.algorithms.approximation as approx + + +def close_cliques_example(d=12, D=300, h=24, k=2): + """ + Hard example from Harb, Elfarouk, Kent Quanrud, and Chandra Chekuri. + "Faster and scalable algorithms for densest subgraph and decomposition." + Advances in Neural Information Processing Systems 35 (2022): 26966-26979. + """ + Kh = nx.complete_graph(h) + KdD = nx.complete_bipartite_graph(d, D) + G = nx.disjoint_union_all([KdD] + [Kh for _ in range(k)]) + best_density = d * D / (d + D) # of the complete bipartite graph + return G, best_density, set(KdD.nodes) + + +@pytest.mark.parametrize("iterations", (1, 3)) +@pytest.mark.parametrize("n", range(4, 7)) +@pytest.mark.parametrize("method", ("greedy++", "fista")) +def test_star(n, iterations, method): + if method == "fista": + pytest.importorskip("numpy") + + G = nx.star_graph(n) + # The densest subgraph of a star network is the entire graph. + # The peeling algorithm would peel all the vertices with degree 1, + # and so should discover the densest subgraph in one iteration! + d, S = approx.densest_subgraph(G, iterations=iterations, method=method) + + assert d == pytest.approx(G.number_of_edges() / G.number_of_nodes()) + assert S == set(G) # The entire graph! + + +@pytest.mark.parametrize("method", ("greedy++", "fista")) +def test_greedy_plus_plus_complete_graph(method): + if method == "fista": + pytest.importorskip("numpy") + + G = nx.complete_graph(4) + # The density of a complete graph network is the entire graph: C(4, 2)/4 + # where C(n, 2) is n*(n-1)//2. The peeling algorithm would find + # the densest subgraph in one iteration! + d, S = approx.densest_subgraph(G, iterations=1, method=method) + + assert d == pytest.approx(6 / 4) # The density, 4/5=0.8. + assert S == {0, 1, 2, 3} # The entire graph! + + +def test_greedy_plus_plus_close_cliques(): + G, best_density, densest_set = close_cliques_example() + # NOTE: iterations=185 fails to ID the densest subgraph + greedy_pp, S_pp = approx.densest_subgraph(G, iterations=186, method="greedy++") + + assert greedy_pp == pytest.approx(best_density) + assert S_pp == densest_set + + +def test_fista_close_cliques(): + pytest.importorskip("numpy") + G, best_density, best_set = close_cliques_example() + # NOTE: iterations=12 fails to ID the densest subgraph + density, dense_set = approx.densest_subgraph(G, iterations=13, method="fista") + + assert density == pytest.approx(best_density) + assert dense_set == best_set + + +def bipartite_and_clique_example(d=5, D=200, k=2): + """ + Hard example from: Boob, Digvijay, Yu Gao, Richard Peng, Saurabh Sawlani, + Charalampos Tsourakakis, Di Wang, and Junxing Wang. "Flowless: Extracting + densest subgraphs without flow computations." In Proceedings of The Web + Conference 2020, pp. 573-583. 2020. + """ + B = nx.complete_bipartite_graph(d, D) + H = [nx.complete_graph(d + 2) for _ in range(k)] + G = nx.disjoint_union_all([B] + H) + + best_density = d * D / (d + D) # of the complete bipartite graph + correct_one_round_density = (2 * d * D + (d + 1) * (d + 2) * k) / ( + 2 * d + 2 * D + 2 * k * (d + 2) + ) + best_subgraph = set(B.nodes) + return G, best_density, best_subgraph, correct_one_round_density + + +def test_greedy_plus_plus_bipartite_and_clique(): + G, best_density, best_subgraph, correct_one_iter_density = ( + bipartite_and_clique_example() + ) + one_round_density, S_one = approx.densest_subgraph( + G, iterations=1, method="greedy++" + ) + assert one_round_density == pytest.approx(correct_one_iter_density) + assert S_one == set(G.nodes) + + ten_round_density, S_ten = approx.densest_subgraph( + G, iterations=10, method="greedy++" + ) + assert ten_round_density == pytest.approx(best_density) + assert S_ten == best_subgraph + + +def test_fista_bipartite_and_clique(): + pytest.importorskip("numpy") + G, best_density, best_subgraph, _ = bipartite_and_clique_example() + + ten_round_density, S_ten = approx.densest_subgraph(G, iterations=10, method="fista") + assert ten_round_density == pytest.approx(best_density) + assert S_ten == best_subgraph + + +def test_fista_big_dataset(): + pytest.importorskip("numpy") + G, best_density, best_subgraph = close_cliques_example(d=30, D=2000, h=60, k=20) + + # Note: iterations=12 fails to identify densest subgraph + density, dense_set = approx.densest_subgraph(G, iterations=13, method="fista") + + assert density == pytest.approx(best_density) + assert dense_set == best_subgraph + + +@pytest.mark.parametrize("iterations", (1, 3)) +def test_greedy_plus_plus_edgeless_cornercase(iterations): + G = nx.Graph() + assert approx.densest_subgraph(G, iterations=iterations, method="greedy++") == ( + 0, + set(), + ) + G.add_nodes_from(range(4)) + assert approx.densest_subgraph(G, iterations=iterations, method="greedy++") == ( + 0, + set(), + ) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/test_distance_measures.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/test_distance_measures.py new file mode 100644 index 0000000..3809a8f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/test_distance_measures.py @@ -0,0 +1,59 @@ +"""Unit tests for the :mod:`networkx.algorithms.approximation.distance_measures` module.""" + +import pytest + +import networkx as nx +from networkx.algorithms.approximation import diameter + + +class TestDiameter: + """Unit tests for the approximate diameter function + :func:`~networkx.algorithms.approximation.distance_measures.diameter`. + """ + + def test_null_graph(self): + """Test empty graph.""" + G = nx.null_graph() + with pytest.raises( + nx.NetworkXError, match="Expected non-empty NetworkX graph!" + ): + diameter(G) + + def test_undirected_non_connected(self): + """Test an undirected disconnected graph.""" + graph = nx.path_graph(10) + graph.remove_edge(3, 4) + with pytest.raises(nx.NetworkXError, match="Graph not connected."): + diameter(graph) + + def test_directed_non_strongly_connected(self): + """Test a directed non strongly connected graph.""" + graph = nx.path_graph(10, create_using=nx.DiGraph()) + with pytest.raises(nx.NetworkXError, match="DiGraph not strongly connected."): + diameter(graph) + + def test_complete_undirected_graph(self): + """Test a complete undirected graph.""" + graph = nx.complete_graph(10) + assert diameter(graph) == 1 + + def test_complete_directed_graph(self): + """Test a complete directed graph.""" + graph = nx.complete_graph(10, create_using=nx.DiGraph()) + assert diameter(graph) == 1 + + def test_undirected_path_graph(self): + """Test an undirected path graph with 10 nodes.""" + graph = nx.path_graph(10) + assert diameter(graph) == 9 + + def test_directed_path_graph(self): + """Test a directed path graph with 10 nodes.""" + graph = nx.path_graph(10).to_directed() + assert diameter(graph) == 9 + + def test_single_node(self): + """Test a graph which contains just a node.""" + graph = nx.Graph() + graph.add_node(1) + assert diameter(graph) == 0 diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/test_dominating_set.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/test_dominating_set.py new file mode 100644 index 0000000..6b90d85 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/test_dominating_set.py @@ -0,0 +1,78 @@ +import pytest + +import networkx as nx +from networkx.algorithms.approximation import ( + min_edge_dominating_set, + min_weighted_dominating_set, +) + + +class TestMinWeightDominatingSet: + def test_min_weighted_dominating_set(self): + graph = nx.Graph() + graph.add_edge(1, 2) + graph.add_edge(1, 5) + graph.add_edge(2, 3) + graph.add_edge(2, 5) + graph.add_edge(3, 4) + graph.add_edge(3, 6) + graph.add_edge(5, 6) + + vertices = {1, 2, 3, 4, 5, 6} + # due to ties, this might be hard to test tight bounds + dom_set = min_weighted_dominating_set(graph) + for vertex in vertices - dom_set: + neighbors = set(graph.neighbors(vertex)) + assert len(neighbors & dom_set) > 0, "Non dominating set found!" + + def test_star_graph(self): + """Tests that an approximate dominating set for the star graph, + even when the center node does not have the smallest integer + label, gives just the center node. + + For more information, see #1527. + + """ + # Create a star graph in which the center node has the highest + # label instead of the lowest. + G = nx.star_graph(10) + G = nx.relabel_nodes(G, {0: 9, 9: 0}) + assert min_weighted_dominating_set(G) == {9} + + def test_null_graph(self): + """Tests that the unique dominating set for the null graph is an empty set""" + G = nx.Graph() + assert min_weighted_dominating_set(G) == set() + + def test_min_edge_dominating_set(self): + graph = nx.path_graph(5) + dom_set = min_edge_dominating_set(graph) + + # this is a crappy way to test, but good enough for now. + for edge in graph.edges(): + if edge in dom_set: + continue + else: + u, v = edge + found = False + for dom_edge in dom_set: + found |= u == dom_edge[0] or u == dom_edge[1] + assert found, "Non adjacent edge found!" + + graph = nx.complete_graph(10) + dom_set = min_edge_dominating_set(graph) + + # this is a crappy way to test, but good enough for now. + for edge in graph.edges(): + if edge in dom_set: + continue + else: + u, v = edge + found = False + for dom_edge in dom_set: + found |= u == dom_edge[0] or u == dom_edge[1] + assert found, "Non adjacent edge found!" + + graph = nx.Graph() # empty Networkx graph + with pytest.raises(ValueError, match="Expected non-empty NetworkX graph!"): + min_edge_dominating_set(graph) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/test_kcomponents.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/test_kcomponents.py new file mode 100644 index 0000000..65ba802 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/test_kcomponents.py @@ -0,0 +1,303 @@ +# Test for approximation to k-components algorithm +import pytest + +import networkx as nx +from networkx.algorithms.approximation import k_components +from networkx.algorithms.approximation.kcomponents import _AntiGraph, _same + + +def build_k_number_dict(k_components): + k_num = {} + for k, comps in sorted(k_components.items()): + for comp in comps: + for node in comp: + k_num[node] = k + return k_num + + +## +# Some nice synthetic graphs +## + + +def graph_example_1(): + G = nx.convert_node_labels_to_integers( + nx.grid_graph([5, 5]), label_attribute="labels" + ) + rlabels = nx.get_node_attributes(G, "labels") + labels = {v: k for k, v in rlabels.items()} + + for nodes in [ + (labels[(0, 0)], labels[(1, 0)]), + (labels[(0, 4)], labels[(1, 4)]), + (labels[(3, 0)], labels[(4, 0)]), + (labels[(3, 4)], labels[(4, 4)]), + ]: + new_node = G.order() + 1 + # Petersen graph is triconnected + P = nx.petersen_graph() + G = nx.disjoint_union(G, P) + # Add two edges between the grid and P + G.add_edge(new_node + 1, nodes[0]) + G.add_edge(new_node, nodes[1]) + # K5 is 4-connected + K = nx.complete_graph(5) + G = nx.disjoint_union(G, K) + # Add three edges between P and K5 + G.add_edge(new_node + 2, new_node + 11) + G.add_edge(new_node + 3, new_node + 12) + G.add_edge(new_node + 4, new_node + 13) + # Add another K5 sharing a node + G = nx.disjoint_union(G, K) + nbrs = G[new_node + 10] + G.remove_node(new_node + 10) + for nbr in nbrs: + G.add_edge(new_node + 17, nbr) + G.add_edge(new_node + 16, new_node + 5) + return G + + +def torrents_and_ferraro_graph(): + G = nx.convert_node_labels_to_integers( + nx.grid_graph([5, 5]), label_attribute="labels" + ) + rlabels = nx.get_node_attributes(G, "labels") + labels = {v: k for k, v in rlabels.items()} + + for nodes in [(labels[(0, 4)], labels[(1, 4)]), (labels[(3, 4)], labels[(4, 4)])]: + new_node = G.order() + 1 + # Petersen graph is triconnected + P = nx.petersen_graph() + G = nx.disjoint_union(G, P) + # Add two edges between the grid and P + G.add_edge(new_node + 1, nodes[0]) + G.add_edge(new_node, nodes[1]) + # K5 is 4-connected + K = nx.complete_graph(5) + G = nx.disjoint_union(G, K) + # Add three edges between P and K5 + G.add_edge(new_node + 2, new_node + 11) + G.add_edge(new_node + 3, new_node + 12) + G.add_edge(new_node + 4, new_node + 13) + # Add another K5 sharing a node + G = nx.disjoint_union(G, K) + nbrs = G[new_node + 10] + G.remove_node(new_node + 10) + for nbr in nbrs: + G.add_edge(new_node + 17, nbr) + # Commenting this makes the graph not biconnected !! + # This stupid mistake make one reviewer very angry :P + G.add_edge(new_node + 16, new_node + 8) + + for nodes in [(labels[(0, 0)], labels[(1, 0)]), (labels[(3, 0)], labels[(4, 0)])]: + new_node = G.order() + 1 + # Petersen graph is triconnected + P = nx.petersen_graph() + G = nx.disjoint_union(G, P) + # Add two edges between the grid and P + G.add_edge(new_node + 1, nodes[0]) + G.add_edge(new_node, nodes[1]) + # K5 is 4-connected + K = nx.complete_graph(5) + G = nx.disjoint_union(G, K) + # Add three edges between P and K5 + G.add_edge(new_node + 2, new_node + 11) + G.add_edge(new_node + 3, new_node + 12) + G.add_edge(new_node + 4, new_node + 13) + # Add another K5 sharing two nodes + G = nx.disjoint_union(G, K) + nbrs = G[new_node + 10] + G.remove_node(new_node + 10) + for nbr in nbrs: + G.add_edge(new_node + 17, nbr) + nbrs2 = G[new_node + 9] + G.remove_node(new_node + 9) + for nbr in nbrs2: + G.add_edge(new_node + 18, nbr) + return G + + +# Helper function + + +def _check_connectivity(G): + result = k_components(G) + for k, components in result.items(): + if k < 3: + continue + for component in components: + C = G.subgraph(component) + K = nx.node_connectivity(C) + assert K >= k + + +def test_torrents_and_ferraro_graph(): + G = torrents_and_ferraro_graph() + _check_connectivity(G) + + +def test_example_1(): + G = graph_example_1() + _check_connectivity(G) + + +def test_karate_0(): + G = nx.karate_club_graph() + _check_connectivity(G) + + +def test_karate_1(): + karate_k_num = { + 0: 4, + 1: 4, + 2: 4, + 3: 4, + 4: 3, + 5: 3, + 6: 3, + 7: 4, + 8: 4, + 9: 2, + 10: 3, + 11: 1, + 12: 2, + 13: 4, + 14: 2, + 15: 2, + 16: 2, + 17: 2, + 18: 2, + 19: 3, + 20: 2, + 21: 2, + 22: 2, + 23: 3, + 24: 3, + 25: 3, + 26: 2, + 27: 3, + 28: 3, + 29: 3, + 30: 4, + 31: 3, + 32: 4, + 33: 4, + } + approx_karate_k_num = karate_k_num.copy() + approx_karate_k_num[24] = 2 + approx_karate_k_num[25] = 2 + G = nx.karate_club_graph() + k_comps = k_components(G) + k_num = build_k_number_dict(k_comps) + assert k_num in (karate_k_num, approx_karate_k_num) + + +def test_example_1_detail_3_and_4(): + G = graph_example_1() + result = k_components(G) + # In this example graph there are 8 3-components, 4 with 15 nodes + # and 4 with 5 nodes. + assert len(result[3]) == 8 + assert len([c for c in result[3] if len(c) == 15]) == 4 + assert len([c for c in result[3] if len(c) == 5]) == 4 + # There are also 8 4-components all with 5 nodes. + assert len(result[4]) == 8 + assert all(len(c) == 5 for c in result[4]) + # Finally check that the k-components detected have actually node + # connectivity >= k. + for k, components in result.items(): + if k < 3: + continue + for component in components: + K = nx.node_connectivity(G.subgraph(component)) + assert K >= k + + +def test_directed(): + with pytest.raises(nx.NetworkXNotImplemented): + G = nx.gnp_random_graph(10, 0.4, directed=True) + kc = k_components(G) + + +def test_same(): + equal = {"A": 2, "B": 2, "C": 2} + slightly_different = {"A": 2, "B": 1, "C": 2} + different = {"A": 2, "B": 8, "C": 18} + assert _same(equal) + assert not _same(slightly_different) + assert _same(slightly_different, tol=1) + assert not _same(different) + assert not _same(different, tol=4) + + +class TestAntiGraph: + @classmethod + def setup_class(cls): + cls.Gnp = nx.gnp_random_graph(20, 0.8, seed=42) + cls.Anp = _AntiGraph(nx.complement(cls.Gnp)) + cls.Gd = nx.davis_southern_women_graph() + cls.Ad = _AntiGraph(nx.complement(cls.Gd)) + cls.Gk = nx.karate_club_graph() + cls.Ak = _AntiGraph(nx.complement(cls.Gk)) + cls.GA = [(cls.Gnp, cls.Anp), (cls.Gd, cls.Ad), (cls.Gk, cls.Ak)] + + def test_size(self): + for G, A in self.GA: + n = G.order() + s = len(list(G.edges())) + len(list(A.edges())) + assert s == (n * (n - 1)) / 2 + + def test_degree(self): + for G, A in self.GA: + assert sorted(G.degree()) == sorted(A.degree()) + + def test_core_number(self): + for G, A in self.GA: + assert nx.core_number(G) == nx.core_number(A) + + def test_connected_components(self): + # ccs are same unless isolated nodes or any node has degree=len(G)-1 + # graphs in self.GA avoid this problem + for G, A in self.GA: + gc = [set(c) for c in nx.connected_components(G)] + ac = [set(c) for c in nx.connected_components(A)] + for comp in ac: + assert comp in gc + + def test_adj(self): + for G, A in self.GA: + for n, nbrs in G.adj.items(): + a_adj = sorted((n, sorted(ad)) for n, ad in A.adj.items()) + g_adj = sorted((n, sorted(ad)) for n, ad in G.adj.items()) + assert a_adj == g_adj + + def test_adjacency(self): + for G, A in self.GA: + a_adj = list(A.adjacency()) + for n, nbrs in G.adjacency(): + assert (n, set(nbrs)) in a_adj + + def test_neighbors(self): + for G, A in self.GA: + node = list(G.nodes())[0] + assert set(G.neighbors(node)) == set(A.neighbors(node)) + + def test_node_not_in_graph(self): + for G, A in self.GA: + node = "non_existent_node" + pytest.raises(nx.NetworkXError, A.neighbors, node) + pytest.raises(nx.NetworkXError, G.neighbors, node) + + def test_degree_thingraph(self): + for G, A in self.GA: + node = list(G.nodes())[0] + nodes = list(G.nodes())[1:4] + assert G.degree(node) == A.degree(node) + assert sum(d for n, d in G.degree()) == sum(d for n, d in A.degree()) + # AntiGraph is a ThinGraph, so all the weights are 1 + assert sum(d for n, d in A.degree()) == sum( + d for n, d in A.degree(weight="weight") + ) + assert sum(d for n, d in G.degree(nodes)) == sum( + d for n, d in A.degree(nodes) + ) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/test_matching.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/test_matching.py new file mode 100644 index 0000000..f50da3d --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/test_matching.py @@ -0,0 +1,8 @@ +import networkx as nx +import networkx.algorithms.approximation as a + + +def test_min_maximal_matching(): + # smoke test + G = nx.Graph() + assert len(a.min_maximal_matching(G)) == 0 diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/test_maxcut.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/test_maxcut.py new file mode 100644 index 0000000..ef04244 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/test_maxcut.py @@ -0,0 +1,94 @@ +import random + +import pytest + +import networkx as nx +from networkx.algorithms.approximation import maxcut + + +@pytest.mark.parametrize( + "f", (nx.approximation.randomized_partitioning, nx.approximation.one_exchange) +) +@pytest.mark.parametrize("graph_constructor", (nx.DiGraph, nx.MultiGraph)) +def test_raises_on_directed_and_multigraphs(f, graph_constructor): + G = graph_constructor([(0, 1), (1, 2)]) + with pytest.raises(nx.NetworkXNotImplemented): + f(G) + + +def _is_valid_cut(G, set1, set2): + union = set1.union(set2) + assert union == set(G.nodes) + assert len(set1) + len(set2) == G.number_of_nodes() + + +def _cut_is_locally_optimal(G, cut_size, set1): + # test if cut can be locally improved + for i, node in enumerate(set1): + cut_size_without_node = nx.algorithms.cut_size( + G, set1 - {node}, weight="weight" + ) + assert cut_size_without_node <= cut_size + + +def test_random_partitioning(): + G = nx.complete_graph(5) + _, (set1, set2) = maxcut.randomized_partitioning(G, seed=5) + _is_valid_cut(G, set1, set2) + + +def test_random_partitioning_all_to_one(): + G = nx.complete_graph(5) + _, (set1, set2) = maxcut.randomized_partitioning(G, p=1) + _is_valid_cut(G, set1, set2) + assert len(set1) == G.number_of_nodes() + assert len(set2) == 0 + + +def test_one_exchange_basic(): + G = nx.complete_graph(5) + random.seed(5) + for u, v, w in G.edges(data=True): + w["weight"] = random.randrange(-100, 100, 1) / 10 + + initial_cut = set(random.sample(sorted(G.nodes()), k=5)) + cut_size, (set1, set2) = maxcut.one_exchange( + G, initial_cut, weight="weight", seed=5 + ) + + _is_valid_cut(G, set1, set2) + _cut_is_locally_optimal(G, cut_size, set1) + + +def test_one_exchange_optimal(): + # Greedy one exchange should find the optimal solution for this graph (14) + G = nx.Graph() + G.add_edge(1, 2, weight=3) + G.add_edge(1, 3, weight=3) + G.add_edge(1, 4, weight=3) + G.add_edge(1, 5, weight=3) + G.add_edge(2, 3, weight=5) + + cut_size, (set1, set2) = maxcut.one_exchange(G, weight="weight", seed=5) + + _is_valid_cut(G, set1, set2) + _cut_is_locally_optimal(G, cut_size, set1) + # check global optimality + assert cut_size == 14 + + +def test_negative_weights(): + G = nx.complete_graph(5) + random.seed(5) + for u, v, w in G.edges(data=True): + w["weight"] = -1 * random.random() + + initial_cut = set(random.sample(sorted(G.nodes()), k=5)) + cut_size, (set1, set2) = maxcut.one_exchange(G, initial_cut, weight="weight") + + # make sure it is a valid cut + _is_valid_cut(G, set1, set2) + # check local optimality + _cut_is_locally_optimal(G, cut_size, set1) + # test that all nodes are in the same partition + assert len(set1) == len(G.nodes) or len(set2) == len(G.nodes) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/test_ramsey.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/test_ramsey.py new file mode 100644 index 0000000..32fe1fb --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/test_ramsey.py @@ -0,0 +1,31 @@ +import networkx as nx +import networkx.algorithms.approximation as apxa + + +def test_ramsey(): + # this should only find the complete graph + graph = nx.complete_graph(10) + c, i = apxa.ramsey_R2(graph) + cdens = nx.density(graph.subgraph(c)) + assert cdens == 1.0, "clique not correctly found by ramsey!" + idens = nx.density(graph.subgraph(i)) + assert idens == 0.0, "i-set not correctly found by ramsey!" + + # this trivial graph has no cliques. should just find i-sets + graph = nx.trivial_graph() + c, i = apxa.ramsey_R2(graph) + assert c == {0}, "clique not correctly found by ramsey!" + assert i == {0}, "i-set not correctly found by ramsey!" + + graph = nx.barbell_graph(10, 5, nx.Graph()) + c, i = apxa.ramsey_R2(graph) + cdens = nx.density(graph.subgraph(c)) + assert cdens == 1.0, "clique not correctly found by ramsey!" + idens = nx.density(graph.subgraph(i)) + assert idens == 0.0, "i-set not correctly found by ramsey!" + + # add self-loops and test again + graph.add_edges_from([(n, n) for n in range(0, len(graph), 2)]) + cc, ii = apxa.ramsey_R2(graph) + assert cc == c + assert ii == i diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/test_steinertree.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/test_steinertree.py new file mode 100644 index 0000000..246e6f8 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/test_steinertree.py @@ -0,0 +1,265 @@ +import pytest + +import networkx as nx +from networkx.algorithms.approximation.steinertree import ( + _remove_nonterminal_leaves, + metric_closure, + steiner_tree, +) +from networkx.utils import edges_equal + + +class TestSteinerTree: + @classmethod + def setup_class(cls): + G1 = nx.Graph() + G1.add_edge(1, 2, weight=10) + G1.add_edge(2, 3, weight=10) + G1.add_edge(3, 4, weight=10) + G1.add_edge(4, 5, weight=10) + G1.add_edge(5, 6, weight=10) + G1.add_edge(2, 7, weight=1) + G1.add_edge(7, 5, weight=1) + + G2 = nx.Graph() + G2.add_edge(0, 5, weight=6) + G2.add_edge(1, 2, weight=2) + G2.add_edge(1, 5, weight=3) + G2.add_edge(2, 4, weight=4) + G2.add_edge(3, 5, weight=5) + G2.add_edge(4, 5, weight=1) + + G3 = nx.Graph() + G3.add_edge(1, 2, weight=8) + G3.add_edge(1, 9, weight=3) + G3.add_edge(1, 8, weight=6) + G3.add_edge(1, 10, weight=2) + G3.add_edge(1, 14, weight=3) + G3.add_edge(2, 3, weight=6) + G3.add_edge(3, 4, weight=3) + G3.add_edge(3, 10, weight=2) + G3.add_edge(3, 11, weight=1) + G3.add_edge(4, 5, weight=1) + G3.add_edge(4, 11, weight=1) + G3.add_edge(5, 6, weight=4) + G3.add_edge(5, 11, weight=2) + G3.add_edge(5, 12, weight=1) + G3.add_edge(5, 13, weight=3) + G3.add_edge(6, 7, weight=2) + G3.add_edge(6, 12, weight=3) + G3.add_edge(6, 13, weight=1) + G3.add_edge(7, 8, weight=3) + G3.add_edge(7, 9, weight=3) + G3.add_edge(7, 11, weight=5) + G3.add_edge(7, 13, weight=2) + G3.add_edge(7, 14, weight=4) + G3.add_edge(8, 9, weight=2) + G3.add_edge(9, 14, weight=1) + G3.add_edge(10, 11, weight=2) + G3.add_edge(10, 14, weight=1) + G3.add_edge(11, 12, weight=1) + G3.add_edge(11, 14, weight=7) + G3.add_edge(12, 14, weight=3) + G3.add_edge(12, 15, weight=1) + G3.add_edge(13, 14, weight=4) + G3.add_edge(13, 15, weight=1) + G3.add_edge(14, 15, weight=2) + + cls.G1 = G1 + cls.G2 = G2 + cls.G3 = G3 + cls.G1_term_nodes = [1, 2, 3, 4, 5] + cls.G2_term_nodes = [0, 2, 3] + cls.G3_term_nodes = [1, 3, 5, 6, 8, 10, 11, 12, 13] + + cls.methods = ["kou", "mehlhorn"] + + def test_connected_metric_closure(self): + G = self.G1.copy() + G.add_node(100) + pytest.raises(nx.NetworkXError, metric_closure, G) + + def test_metric_closure(self): + M = metric_closure(self.G1) + mc = [ + (1, 2, {"distance": 10, "path": [1, 2]}), + (1, 3, {"distance": 20, "path": [1, 2, 3]}), + (1, 4, {"distance": 22, "path": [1, 2, 7, 5, 4]}), + (1, 5, {"distance": 12, "path": [1, 2, 7, 5]}), + (1, 6, {"distance": 22, "path": [1, 2, 7, 5, 6]}), + (1, 7, {"distance": 11, "path": [1, 2, 7]}), + (2, 3, {"distance": 10, "path": [2, 3]}), + (2, 4, {"distance": 12, "path": [2, 7, 5, 4]}), + (2, 5, {"distance": 2, "path": [2, 7, 5]}), + (2, 6, {"distance": 12, "path": [2, 7, 5, 6]}), + (2, 7, {"distance": 1, "path": [2, 7]}), + (3, 4, {"distance": 10, "path": [3, 4]}), + (3, 5, {"distance": 12, "path": [3, 2, 7, 5]}), + (3, 6, {"distance": 22, "path": [3, 2, 7, 5, 6]}), + (3, 7, {"distance": 11, "path": [3, 2, 7]}), + (4, 5, {"distance": 10, "path": [4, 5]}), + (4, 6, {"distance": 20, "path": [4, 5, 6]}), + (4, 7, {"distance": 11, "path": [4, 5, 7]}), + (5, 6, {"distance": 10, "path": [5, 6]}), + (5, 7, {"distance": 1, "path": [5, 7]}), + (6, 7, {"distance": 11, "path": [6, 5, 7]}), + ] + assert edges_equal(list(M.edges(data=True)), mc) + + def test_steiner_tree(self): + valid_steiner_trees = [ + [ + [ + (1, 2, {"weight": 10}), + (2, 3, {"weight": 10}), + (2, 7, {"weight": 1}), + (3, 4, {"weight": 10}), + (5, 7, {"weight": 1}), + ], + [ + (1, 2, {"weight": 10}), + (2, 7, {"weight": 1}), + (3, 4, {"weight": 10}), + (4, 5, {"weight": 10}), + (5, 7, {"weight": 1}), + ], + [ + (1, 2, {"weight": 10}), + (2, 3, {"weight": 10}), + (2, 7, {"weight": 1}), + (4, 5, {"weight": 10}), + (5, 7, {"weight": 1}), + ], + ], + [ + [ + (0, 5, {"weight": 6}), + (1, 2, {"weight": 2}), + (1, 5, {"weight": 3}), + (3, 5, {"weight": 5}), + ], + [ + (0, 5, {"weight": 6}), + (4, 2, {"weight": 4}), + (4, 5, {"weight": 1}), + (3, 5, {"weight": 5}), + ], + ], + [ + [ + (1, 10, {"weight": 2}), + (3, 10, {"weight": 2}), + (3, 11, {"weight": 1}), + (5, 12, {"weight": 1}), + (6, 13, {"weight": 1}), + (8, 9, {"weight": 2}), + (9, 14, {"weight": 1}), + (10, 14, {"weight": 1}), + (11, 12, {"weight": 1}), + (12, 15, {"weight": 1}), + (13, 15, {"weight": 1}), + ] + ], + ] + for method in self.methods: + for G, term_nodes, valid_trees in zip( + [self.G1, self.G2, self.G3], + [self.G1_term_nodes, self.G2_term_nodes, self.G3_term_nodes], + valid_steiner_trees, + ): + S = steiner_tree(G, term_nodes, method=method) + assert any( + edges_equal(list(S.edges(data=True)), valid_tree) + for valid_tree in valid_trees + ) + + def test_multigraph_steiner_tree(self): + G = nx.MultiGraph() + G.add_edges_from( + [ + (1, 2, 0, {"weight": 1}), + (2, 3, 0, {"weight": 999}), + (2, 3, 1, {"weight": 1}), + (3, 4, 0, {"weight": 1}), + (3, 5, 0, {"weight": 1}), + ] + ) + terminal_nodes = [2, 4, 5] + expected_edges = [ + (2, 3, 1, {"weight": 1}), # edge with key 1 has lower weight + (3, 4, 0, {"weight": 1}), + (3, 5, 0, {"weight": 1}), + ] + for method in self.methods: + S = steiner_tree(G, terminal_nodes, method=method) + assert edges_equal(S.edges(data=True, keys=True), expected_edges) + + def test_remove_nonterminal_leaves(self): + G = nx.path_graph(10) + _remove_nonterminal_leaves(G, [4, 5, 6]) + + assert list(G) == [4, 5, 6] # only the terminal nodes are left + + +@pytest.mark.parametrize("method", ("kou", "mehlhorn")) +def test_steiner_tree_weight_attribute(method): + G = nx.star_graph(4) + # Add an edge attribute that is named something other than "weight" + nx.set_edge_attributes(G, dict.fromkeys(G.edges, 10), name="distance") + H = nx.approximation.steiner_tree(G, [1, 3], method=method, weight="distance") + assert nx.utils.edges_equal(H.edges, [(0, 1), (0, 3)]) + + +@pytest.mark.parametrize("method", ("kou", "mehlhorn")) +def test_steiner_tree_multigraph_weight_attribute(method): + G = nx.cycle_graph(3, create_using=nx.MultiGraph) + nx.set_edge_attributes(G, dict.fromkeys(G.edges, 10), name="distance") + G.add_edge(2, 0, distance=5) + H = nx.approximation.steiner_tree(G, list(G), method=method, weight="distance") + assert len(H.edges) == 2 and H.has_edge(2, 0, key=1) + assert sum(dist for *_, dist in H.edges(data="distance")) == 15 + + +@pytest.mark.parametrize("method", (None, "mehlhorn", "kou")) +def test_steiner_tree_methods(method): + G = nx.star_graph(4) + expected = nx.Graph([(0, 1), (0, 3)]) + st = nx.approximation.steiner_tree(G, [1, 3], method=method) + assert nx.utils.edges_equal(st.edges, expected.edges) + + +def test_steiner_tree_method_invalid(): + G = nx.star_graph(4) + with pytest.raises( + ValueError, match="invalid_method is not a valid choice for an algorithm." + ): + nx.approximation.steiner_tree(G, terminal_nodes=[1, 3], method="invalid_method") + + +def test_steiner_tree_remove_non_terminal_leaves_self_loop_edges(): + # To verify that the last step of the steiner tree approximation + # behaves in the case where a non-terminal leaf has a self loop edge + G = nx.path_graph(10) + + # Add self loops to the terminal nodes + G.add_edges_from([(2, 2), (3, 3), (4, 4), (7, 7), (8, 8)]) + + # Remove non-terminal leaves + _remove_nonterminal_leaves(G, [4, 5, 6, 7]) + + # The terminal nodes should be left + assert list(G) == [4, 5, 6, 7] # only the terminal nodes are left + + +def test_steiner_tree_non_terminal_leaves_multigraph_self_loop_edges(): + # To verify that the last step of the steiner tree approximation + # behaves in the case where a non-terminal leaf has a self loop edge + G = nx.MultiGraph() + G.add_edges_from([(i, i + 1) for i in range(10)]) + G.add_edges_from([(2, 2), (3, 3), (4, 4), (4, 4), (7, 7)]) + + # Remove non-terminal leaves + _remove_nonterminal_leaves(G, [4, 5, 6, 7]) + + # Only the terminal nodes should be left + assert list(G) == [4, 5, 6, 7] diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/test_traveling_salesman.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/test_traveling_salesman.py new file mode 100644 index 0000000..d512428 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/test_traveling_salesman.py @@ -0,0 +1,1013 @@ +"""Unit tests for the traveling_salesman module.""" + +import random + +import pytest + +import networkx as nx +import networkx.algorithms.approximation as nx_app + +pairwise = nx.utils.pairwise + + +def test_christofides_hamiltonian(): + random.seed(42) + G = nx.complete_graph(20) + for u, v in G.edges(): + G[u][v]["weight"] = random.randint(0, 10) + + H = nx.Graph() + H.add_edges_from(pairwise(nx_app.christofides(G))) + H.remove_edges_from(nx.find_cycle(H)) + assert len(H.edges) == 0 + + tree = nx.minimum_spanning_tree(G, weight="weight") + H = nx.Graph() + H.add_edges_from(pairwise(nx_app.christofides(G, tree))) + H.remove_edges_from(nx.find_cycle(H)) + assert len(H.edges) == 0 + + +def test_christofides_incomplete_graph(): + G = nx.complete_graph(10) + G.remove_edge(0, 1) + pytest.raises(nx.NetworkXError, nx_app.christofides, G) + + +def test_christofides_ignore_selfloops(): + G = nx.complete_graph(5) + G.add_edge(3, 3) + cycle = nx_app.christofides(G) + assert len(cycle) - 1 == len(G) == len(set(cycle)) + + +# set up graphs for other tests +class TestBase: + @classmethod + def setup_class(cls): + cls.DG = nx.DiGraph() + cls.DG.add_weighted_edges_from( + { + ("A", "B", 3), + ("A", "C", 17), + ("A", "D", 14), + ("B", "A", 3), + ("B", "C", 12), + ("B", "D", 16), + ("C", "A", 13), + ("C", "B", 12), + ("C", "D", 4), + ("D", "A", 14), + ("D", "B", 15), + ("D", "C", 2), + } + ) + cls.DG_cycle = ["D", "C", "B", "A", "D"] + cls.DG_cost = 31.0 + + cls.DG2 = nx.DiGraph() + cls.DG2.add_weighted_edges_from( + { + ("A", "B", 3), + ("A", "C", 17), + ("A", "D", 14), + ("B", "A", 30), + ("B", "C", 2), + ("B", "D", 16), + ("C", "A", 33), + ("C", "B", 32), + ("C", "D", 34), + ("D", "A", 14), + ("D", "B", 15), + ("D", "C", 2), + } + ) + cls.DG2_cycle = ["D", "A", "B", "C", "D"] + cls.DG2_cost = 53.0 + + cls.unweightedUG = nx.complete_graph(5, nx.Graph()) + cls.unweightedDG = nx.complete_graph(5, nx.DiGraph()) + + cls.incompleteUG = nx.Graph() + cls.incompleteUG.add_weighted_edges_from({(0, 1, 1), (1, 2, 3)}) + cls.incompleteDG = nx.DiGraph() + cls.incompleteDG.add_weighted_edges_from({(0, 1, 1), (1, 2, 3)}) + + cls.UG = nx.Graph() + cls.UG.add_weighted_edges_from( + { + ("A", "B", 3), + ("A", "C", 17), + ("A", "D", 14), + ("B", "C", 12), + ("B", "D", 16), + ("C", "D", 4), + } + ) + cls.UG_cycle = ["D", "C", "B", "A", "D"] + cls.UG_cost = 33.0 + + cls.UG2 = nx.Graph() + cls.UG2.add_weighted_edges_from( + { + ("A", "B", 1), + ("A", "C", 15), + ("A", "D", 5), + ("B", "C", 16), + ("B", "D", 8), + ("C", "D", 3), + } + ) + cls.UG2_cycle = ["D", "C", "B", "A", "D"] + cls.UG2_cost = 25.0 + + +def validate_solution(soln, cost, exp_soln, exp_cost): + assert soln == exp_soln + assert cost == exp_cost + + +def validate_symmetric_solution(soln, cost, exp_soln, exp_cost): + assert soln == exp_soln or soln == exp_soln[::-1] + assert cost == exp_cost + + +class TestGreedyTSP(TestBase): + def test_greedy(self): + cycle = nx_app.greedy_tsp(self.DG, source="D") + cost = sum(self.DG[n][nbr]["weight"] for n, nbr in pairwise(cycle)) + validate_solution(cycle, cost, ["D", "C", "B", "A", "D"], 31.0) + + cycle = nx_app.greedy_tsp(self.DG2, source="D") + cost = sum(self.DG2[n][nbr]["weight"] for n, nbr in pairwise(cycle)) + validate_solution(cycle, cost, ["D", "C", "B", "A", "D"], 78.0) + + cycle = nx_app.greedy_tsp(self.UG, source="D") + cost = sum(self.UG[n][nbr]["weight"] for n, nbr in pairwise(cycle)) + validate_solution(cycle, cost, ["D", "C", "B", "A", "D"], 33.0) + + cycle = nx_app.greedy_tsp(self.UG2, source="D") + cost = sum(self.UG2[n][nbr]["weight"] for n, nbr in pairwise(cycle)) + validate_solution(cycle, cost, ["D", "C", "A", "B", "D"], 27.0) + + def test_not_complete_graph(self): + pytest.raises(nx.NetworkXError, nx_app.greedy_tsp, self.incompleteUG) + pytest.raises(nx.NetworkXError, nx_app.greedy_tsp, self.incompleteDG) + + def test_not_weighted_graph(self): + nx_app.greedy_tsp(self.unweightedUG) + nx_app.greedy_tsp(self.unweightedDG) + + def test_two_nodes(self): + G = nx.Graph() + G.add_weighted_edges_from({(1, 2, 1)}) + cycle = nx_app.greedy_tsp(G) + cost = sum(G[n][nbr]["weight"] for n, nbr in pairwise(cycle)) + validate_solution(cycle, cost, [1, 2, 1], 2) + + def test_ignore_selfloops(self): + G = nx.complete_graph(5) + G.add_edge(3, 3) + cycle = nx_app.greedy_tsp(G) + assert len(cycle) - 1 == len(G) == len(set(cycle)) + + +class TestSimulatedAnnealingTSP(TestBase): + tsp = staticmethod(nx_app.simulated_annealing_tsp) + + def test_simulated_annealing_directed(self): + cycle = self.tsp(self.DG, "greedy", source="D", seed=42) + cost = sum(self.DG[n][nbr]["weight"] for n, nbr in pairwise(cycle)) + validate_solution(cycle, cost, self.DG_cycle, self.DG_cost) + + initial_sol = ["D", "B", "A", "C", "D"] + cycle = self.tsp(self.DG, initial_sol, source="D", seed=42) + cost = sum(self.DG[n][nbr]["weight"] for n, nbr in pairwise(cycle)) + validate_solution(cycle, cost, self.DG_cycle, self.DG_cost) + + initial_sol = ["D", "A", "C", "B", "D"] + cycle = self.tsp(self.DG, initial_sol, move="1-0", source="D", seed=42) + cost = sum(self.DG[n][nbr]["weight"] for n, nbr in pairwise(cycle)) + validate_solution(cycle, cost, self.DG_cycle, self.DG_cost) + + cycle = self.tsp(self.DG2, "greedy", source="D", seed=42) + cost = sum(self.DG2[n][nbr]["weight"] for n, nbr in pairwise(cycle)) + validate_solution(cycle, cost, self.DG2_cycle, self.DG2_cost) + + cycle = self.tsp(self.DG2, "greedy", move="1-0", source="D", seed=42) + cost = sum(self.DG2[n][nbr]["weight"] for n, nbr in pairwise(cycle)) + validate_solution(cycle, cost, self.DG2_cycle, self.DG2_cost) + + def test_simulated_annealing_undirected(self): + cycle = self.tsp(self.UG, "greedy", source="D", seed=42) + cost = sum(self.UG[n][nbr]["weight"] for n, nbr in pairwise(cycle)) + validate_solution(cycle, cost, self.UG_cycle, self.UG_cost) + + cycle = self.tsp(self.UG2, "greedy", source="D", seed=42) + cost = sum(self.UG2[n][nbr]["weight"] for n, nbr in pairwise(cycle)) + validate_symmetric_solution(cycle, cost, self.UG2_cycle, self.UG2_cost) + + cycle = self.tsp(self.UG2, "greedy", move="1-0", source="D", seed=42) + cost = sum(self.UG2[n][nbr]["weight"] for n, nbr in pairwise(cycle)) + validate_symmetric_solution(cycle, cost, self.UG2_cycle, self.UG2_cost) + + def test_error_on_input_order_mistake(self): + # see issue #4846 https://github.com/networkx/networkx/issues/4846 + pytest.raises(TypeError, self.tsp, self.UG, weight="weight") + pytest.raises(nx.NetworkXError, self.tsp, self.UG, "weight") + + def test_not_complete_graph(self): + pytest.raises(nx.NetworkXError, self.tsp, self.incompleteUG, "greedy", source=0) + pytest.raises(nx.NetworkXError, self.tsp, self.incompleteDG, "greedy", source=0) + + def test_ignore_selfloops(self): + G = nx.complete_graph(5) + G.add_edge(3, 3) + cycle = self.tsp(G, "greedy") + assert len(cycle) - 1 == len(G) == len(set(cycle)) + + def test_not_weighted_graph(self): + self.tsp(self.unweightedUG, "greedy") + self.tsp(self.unweightedDG, "greedy") + + def test_two_nodes(self): + G = nx.Graph() + G.add_weighted_edges_from({(1, 2, 1)}) + + cycle = self.tsp(G, "greedy", source=1, seed=42) + cost = sum(G[n][nbr]["weight"] for n, nbr in pairwise(cycle)) + validate_solution(cycle, cost, [1, 2, 1], 2) + + cycle = self.tsp(G, [1, 2, 1], source=1, seed=42) + cost = sum(G[n][nbr]["weight"] for n, nbr in pairwise(cycle)) + validate_solution(cycle, cost, [1, 2, 1], 2) + + def test_failure_of_costs_too_high_when_iterations_low(self): + # Simulated Annealing Version: + # set number of moves low and alpha high + cycle = self.tsp( + self.DG2, "greedy", source="D", move="1-0", alpha=1, N_inner=1, seed=42 + ) + cost = sum(self.DG2[n][nbr]["weight"] for n, nbr in pairwise(cycle)) + assert cost > self.DG2_cost + + # Try with an incorrect initial guess + initial_sol = ["D", "A", "B", "C", "D"] + cycle = self.tsp( + self.DG, + initial_sol, + source="D", + move="1-0", + alpha=0.1, + N_inner=1, + max_iterations=1, + seed=42, + ) + cost = sum(self.DG[n][nbr]["weight"] for n, nbr in pairwise(cycle)) + assert cost > self.DG_cost + + +class TestThresholdAcceptingTSP(TestSimulatedAnnealingTSP): + tsp = staticmethod(nx_app.threshold_accepting_tsp) + + def test_failure_of_costs_too_high_when_iterations_low(self): + # Threshold Version: + # set number of moves low and number of iterations low + cycle = self.tsp( + self.DG2, + "greedy", + source="D", + move="1-0", + N_inner=1, + max_iterations=1, + seed=4, + ) + cost = sum(self.DG2[n][nbr]["weight"] for n, nbr in pairwise(cycle)) + assert cost > self.DG2_cost + + # set threshold too low + initial_sol = ["D", "A", "B", "C", "D"] + cycle = self.tsp( + self.DG, initial_sol, source="D", move="1-0", threshold=-3, seed=42 + ) + cost = sum(self.DG[n][nbr]["weight"] for n, nbr in pairwise(cycle)) + assert cost > self.DG_cost + + +# Tests for function traveling_salesman_problem +def test_TSP_method(): + G = nx.cycle_graph(9) + G[4][5]["weight"] = 10 + + # Test using the old currying method + def sa_tsp(G, weight): + return nx_app.simulated_annealing_tsp(G, "greedy", weight, source=4, seed=1) + + path = nx_app.traveling_salesman_problem( + G, + method=sa_tsp, + cycle=False, + ) + assert path == [4, 3, 2, 1, 0, 8, 7, 6, 5] + + +def test_TSP_unweighted(): + G = nx.cycle_graph(9) + path = nx_app.traveling_salesman_problem(G, nodes=[3, 6], cycle=False) + assert path in ([3, 4, 5, 6], [6, 5, 4, 3]) + + cycle = nx_app.traveling_salesman_problem(G, nodes=[3, 6]) + assert cycle in ([3, 4, 5, 6, 5, 4, 3], [6, 5, 4, 3, 4, 5, 6]) + + +def test_TSP_weighted(): + G = nx.cycle_graph(9) + G[0][1]["weight"] = 2 + G[1][2]["weight"] = 2 + G[2][3]["weight"] = 2 + G[3][4]["weight"] = 4 + G[4][5]["weight"] = 5 + G[5][6]["weight"] = 4 + G[6][7]["weight"] = 2 + G[7][8]["weight"] = 2 + G[8][0]["weight"] = 2 + tsp = nx_app.traveling_salesman_problem + + # path between 3 and 6 + expected_paths = ([3, 2, 1, 0, 8, 7, 6], [6, 7, 8, 0, 1, 2, 3]) + # cycle between 3 and 6 + expected_cycles = ( + [3, 2, 1, 0, 8, 7, 6, 7, 8, 0, 1, 2, 3], + [6, 7, 8, 0, 1, 2, 3, 2, 1, 0, 8, 7, 6], + ) + # path through all nodes + expected_tourpaths = ([5, 6, 7, 8, 0, 1, 2, 3, 4], [4, 3, 2, 1, 0, 8, 7, 6, 5]) + + # Check default method + cycle = tsp(G, nodes=[3, 6], weight="weight") + assert cycle in expected_cycles + + path = tsp(G, nodes=[3, 6], weight="weight", cycle=False) + assert path in expected_paths + + tourpath = tsp(G, weight="weight", cycle=False) + assert tourpath in expected_tourpaths + + # Check all methods + methods = [ + (nx_app.christofides, {}), + (nx_app.greedy_tsp, {}), + ( + nx_app.simulated_annealing_tsp, + {"init_cycle": "greedy"}, + ), + ( + nx_app.threshold_accepting_tsp, + {"init_cycle": "greedy"}, + ), + ] + for method, kwargs in methods: + cycle = tsp(G, nodes=[3, 6], weight="weight", method=method, **kwargs) + assert cycle in expected_cycles + + path = tsp( + G, nodes=[3, 6], weight="weight", method=method, cycle=False, **kwargs + ) + assert path in expected_paths + + tourpath = tsp(G, weight="weight", method=method, cycle=False, **kwargs) + assert tourpath in expected_tourpaths + + +def test_TSP_incomplete_graph_short_path(): + G = nx.cycle_graph(9) + G.add_edges_from([(4, 9), (9, 10), (10, 11), (11, 0)]) + G[4][5]["weight"] = 5 + + cycle = nx_app.traveling_salesman_problem(G) + assert len(cycle) == 17 and len(set(cycle)) == 12 + + # make sure that cutting one edge out of complete graph formulation + # cuts out many edges out of the path of the TSP + path = nx_app.traveling_salesman_problem(G, cycle=False) + assert len(path) == 13 and len(set(path)) == 12 + + +def test_TSP_alternate_weight(): + G = nx.complete_graph(9) + G[0][1]["weight"] = 2 + G[1][2]["weight"] = 2 + G[2][3]["weight"] = 2 + G[3][4]["weight"] = 4 + G[4][5]["weight"] = 5 + G[5][6]["weight"] = 4 + G[6][7]["weight"] = 2 + G[7][8]["weight"] = 2 + G[8][0]["weight"] = 2 + + H = nx.complete_graph(9) + H[0][1]["distance"] = 2 + H[1][2]["distance"] = 2 + H[2][3]["distance"] = 2 + H[3][4]["distance"] = 4 + H[4][5]["distance"] = 5 + H[5][6]["distance"] = 4 + H[6][7]["distance"] = 2 + H[7][8]["distance"] = 2 + H[8][0]["distance"] = 2 + + assert nx_app.traveling_salesman_problem( + G, weight="weight" + ) == nx_app.traveling_salesman_problem(H, weight="distance") + + +def test_held_karp_ascent(): + """ + Test the Held-Karp relaxation with the ascent method + """ + import networkx.algorithms.approximation.traveling_salesman as tsp + + np = pytest.importorskip("numpy") + pytest.importorskip("scipy") + + # Adjacency matrix from page 1153 of the 1970 Held and Karp paper + # which have been edited to be directional, but also symmetric + G_array = np.array( + [ + [0, 97, 60, 73, 17, 52], + [97, 0, 41, 52, 90, 30], + [60, 41, 0, 21, 35, 41], + [73, 52, 21, 0, 95, 46], + [17, 90, 35, 95, 0, 81], + [52, 30, 41, 46, 81, 0], + ] + ) + + solution_edges = [(1, 3), (2, 4), (3, 2), (4, 0), (5, 1), (0, 5)] + + G = nx.from_numpy_array(G_array, create_using=nx.DiGraph) + opt_hk, z_star = tsp.held_karp_ascent(G) + + # Check that the optimal weights are the same + assert round(opt_hk, 2) == 207.00 + # Check that the z_stars are the same + solution = nx.DiGraph() + solution.add_edges_from(solution_edges) + assert nx.utils.edges_equal(z_star.edges, solution.edges) + + +def test_ascent_fractional_solution(): + """ + Test the ascent method using a modified version of Figure 2 on page 1140 + in 'The Traveling Salesman Problem and Minimum Spanning Trees' by Held and + Karp + """ + import networkx.algorithms.approximation.traveling_salesman as tsp + + np = pytest.importorskip("numpy") + pytest.importorskip("scipy") + + # This version of Figure 2 has all of the edge weights multiplied by 100 + # and is a complete directed graph with infinite edge weights for the + # edges not listed in the original graph + G_array = np.array( + [ + [0, 100, 100, 100000, 100000, 1], + [100, 0, 100, 100000, 1, 100000], + [100, 100, 0, 1, 100000, 100000], + [100000, 100000, 1, 0, 100, 100], + [100000, 1, 100000, 100, 0, 100], + [1, 100000, 100000, 100, 100, 0], + ] + ) + + solution_z_star = { + (0, 1): 5 / 12, + (0, 2): 5 / 12, + (0, 5): 5 / 6, + (1, 0): 5 / 12, + (1, 2): 1 / 3, + (1, 4): 5 / 6, + (2, 0): 5 / 12, + (2, 1): 1 / 3, + (2, 3): 5 / 6, + (3, 2): 5 / 6, + (3, 4): 1 / 3, + (3, 5): 1 / 2, + (4, 1): 5 / 6, + (4, 3): 1 / 3, + (4, 5): 1 / 2, + (5, 0): 5 / 6, + (5, 3): 1 / 2, + (5, 4): 1 / 2, + } + + G = nx.from_numpy_array(G_array, create_using=nx.DiGraph) + opt_hk, z_star = tsp.held_karp_ascent(G) + + # Check that the optimal weights are the same + assert round(opt_hk, 2) == 303.00 + # Check that the z_stars are the same + assert {key: round(z_star[key], 4) for key in z_star} == { + key: round(solution_z_star[key], 4) for key in solution_z_star + } + + +def test_ascent_method_asymmetric(): + """ + Tests the ascent method using a truly asymmetric graph for which the + solution has been brute forced + """ + import networkx.algorithms.approximation.traveling_salesman as tsp + + np = pytest.importorskip("numpy") + pytest.importorskip("scipy") + + G_array = np.array( + [ + [0, 26, 63, 59, 69, 31, 41], + [62, 0, 91, 53, 75, 87, 47], + [47, 82, 0, 90, 15, 9, 18], + [68, 19, 5, 0, 58, 34, 93], + [11, 58, 53, 55, 0, 61, 79], + [88, 75, 13, 76, 98, 0, 40], + [41, 61, 55, 88, 46, 45, 0], + ] + ) + + solution_edges = [(0, 1), (1, 3), (3, 2), (2, 5), (5, 6), (4, 0), (6, 4)] + + G = nx.from_numpy_array(G_array, create_using=nx.DiGraph) + opt_hk, z_star = tsp.held_karp_ascent(G) + + # Check that the optimal weights are the same + assert round(opt_hk, 2) == 190.00 + # Check that the z_stars match. + solution = nx.DiGraph() + solution.add_edges_from(solution_edges) + assert nx.utils.edges_equal(z_star.edges, solution.edges) + + +def test_ascent_method_asymmetric_2(): + """ + Tests the ascent method using a truly asymmetric graph for which the + solution has been brute forced + """ + import networkx.algorithms.approximation.traveling_salesman as tsp + + np = pytest.importorskip("numpy") + pytest.importorskip("scipy") + + G_array = np.array( + [ + [0, 45, 39, 92, 29, 31], + [72, 0, 4, 12, 21, 60], + [81, 6, 0, 98, 70, 53], + [49, 71, 59, 0, 98, 94], + [74, 95, 24, 43, 0, 47], + [56, 43, 3, 65, 22, 0], + ] + ) + + solution_edges = [(0, 5), (5, 4), (1, 3), (3, 0), (2, 1), (4, 2)] + + G = nx.from_numpy_array(G_array, create_using=nx.DiGraph) + opt_hk, z_star = tsp.held_karp_ascent(G) + + # Check that the optimal weights are the same + assert round(opt_hk, 2) == 144.00 + # Check that the z_stars match. + solution = nx.DiGraph() + solution.add_edges_from(solution_edges) + assert nx.utils.edges_equal(z_star.edges, solution.edges) + + +def test_held_karp_ascent_asymmetric_3(): + """ + Tests the ascent method using a truly asymmetric graph with a fractional + solution for which the solution has been brute forced. + + In this graph their are two different optimal, integral solutions (which + are also the overall atsp solutions) to the Held Karp relaxation. However, + this particular graph has two different tours of optimal value and the + possible solutions in the held_karp_ascent function are not stored in an + ordered data structure. + """ + import networkx.algorithms.approximation.traveling_salesman as tsp + + np = pytest.importorskip("numpy") + pytest.importorskip("scipy") + + G_array = np.array( + [ + [0, 1, 5, 2, 7, 4], + [7, 0, 7, 7, 1, 4], + [4, 7, 0, 9, 2, 1], + [7, 2, 7, 0, 4, 4], + [5, 5, 4, 4, 0, 3], + [3, 9, 1, 3, 4, 0], + ] + ) + + solution1_edges = [(0, 3), (1, 4), (2, 5), (3, 1), (4, 2), (5, 0)] + + solution2_edges = [(0, 3), (3, 1), (1, 4), (4, 5), (2, 0), (5, 2)] + + G = nx.from_numpy_array(G_array, create_using=nx.DiGraph) + opt_hk, z_star = tsp.held_karp_ascent(G) + + assert round(opt_hk, 2) == 13.00 + # Check that the z_stars are the same + solution1 = nx.DiGraph() + solution1.add_edges_from(solution1_edges) + solution2 = nx.DiGraph() + solution2.add_edges_from(solution2_edges) + assert nx.utils.edges_equal(z_star.edges, solution1.edges) or nx.utils.edges_equal( + z_star.edges, solution2.edges + ) + + +def test_held_karp_ascent_fractional_asymmetric(): + """ + Tests the ascent method using a truly asymmetric graph with a fractional + solution for which the solution has been brute forced + """ + import networkx.algorithms.approximation.traveling_salesman as tsp + + np = pytest.importorskip("numpy") + pytest.importorskip("scipy") + + G_array = np.array( + [ + [0, 100, 150, 100000, 100000, 1], + [150, 0, 100, 100000, 1, 100000], + [100, 150, 0, 1, 100000, 100000], + [100000, 100000, 1, 0, 150, 100], + [100000, 2, 100000, 100, 0, 150], + [2, 100000, 100000, 150, 100, 0], + ] + ) + + solution_z_star = { + (0, 1): 5 / 12, + (0, 2): 5 / 12, + (0, 5): 5 / 6, + (1, 0): 5 / 12, + (1, 2): 5 / 12, + (1, 4): 5 / 6, + (2, 0): 5 / 12, + (2, 1): 5 / 12, + (2, 3): 5 / 6, + (3, 2): 5 / 6, + (3, 4): 5 / 12, + (3, 5): 5 / 12, + (4, 1): 5 / 6, + (4, 3): 5 / 12, + (4, 5): 5 / 12, + (5, 0): 5 / 6, + (5, 3): 5 / 12, + (5, 4): 5 / 12, + } + + G = nx.from_numpy_array(G_array, create_using=nx.DiGraph) + opt_hk, z_star = tsp.held_karp_ascent(G) + + # Check that the optimal weights are the same + assert round(opt_hk, 2) == 304.00 + # Check that the z_stars are the same + assert {key: round(z_star[key], 4) for key in z_star} == { + key: round(solution_z_star[key], 4) for key in solution_z_star + } + + +def test_spanning_tree_distribution(): + """ + Test that we can create an exponential distribution of spanning trees such + that the probability of each tree is proportional to the product of edge + weights. + + Results of this test have been confirmed with hypothesis testing from the + created distribution. + + This test uses the symmetric, fractional Held Karp solution. + """ + import networkx.algorithms.approximation.traveling_salesman as tsp + + pytest.importorskip("numpy") + pytest.importorskip("scipy") + + z_star = { + (0, 1): 5 / 12, + (0, 2): 5 / 12, + (0, 5): 5 / 6, + (1, 0): 5 / 12, + (1, 2): 1 / 3, + (1, 4): 5 / 6, + (2, 0): 5 / 12, + (2, 1): 1 / 3, + (2, 3): 5 / 6, + (3, 2): 5 / 6, + (3, 4): 1 / 3, + (3, 5): 1 / 2, + (4, 1): 5 / 6, + (4, 3): 1 / 3, + (4, 5): 1 / 2, + (5, 0): 5 / 6, + (5, 3): 1 / 2, + (5, 4): 1 / 2, + } + + solution_gamma = { + (0, 1): -0.6383, + (0, 2): -0.6827, + (0, 5): 0, + (1, 2): -1.0781, + (1, 4): 0, + (2, 3): 0, + (5, 3): -0.2820, + (5, 4): -0.3327, + (4, 3): -0.9927, + } + + # The undirected support of z_star + G = nx.MultiGraph() + for u, v in z_star: + if (u, v) in G.edges or (v, u) in G.edges: + continue + G.add_edge(u, v) + + gamma = tsp.spanning_tree_distribution(G, z_star) + + assert {key: round(gamma[key], 4) for key in gamma} == solution_gamma + + +def test_asadpour_tsp(): + """ + Test the complete asadpour tsp algorithm with the fractional, symmetric + Held Karp solution. This test also uses an incomplete graph as input. + """ + # This version of Figure 2 has all of the edge weights multiplied by 100 + # and the 0 weight edges have a weight of 1. + pytest.importorskip("numpy") + pytest.importorskip("scipy") + + edge_list = [ + (0, 1, 100), + (0, 2, 100), + (0, 5, 1), + (1, 2, 100), + (1, 4, 1), + (2, 3, 1), + (3, 4, 100), + (3, 5, 100), + (4, 5, 100), + (1, 0, 100), + (2, 0, 100), + (5, 0, 1), + (2, 1, 100), + (4, 1, 1), + (3, 2, 1), + (4, 3, 100), + (5, 3, 100), + (5, 4, 100), + ] + + G = nx.DiGraph() + G.add_weighted_edges_from(edge_list) + + tour = nx_app.traveling_salesman_problem( + G, weight="weight", method=nx_app.asadpour_atsp, seed=19 + ) + + # Check that the returned list is a valid tour. Because this is an + # incomplete graph, the conditions are not as strict. We need the tour to + # + # Start and end at the same node + # Pass through every vertex at least once + # Have a total cost at most ln(6) / ln(ln(6)) = 3.0723 times the optimal + # + # For the second condition it is possible to have the tour pass through the + # same vertex more then. Imagine that the tour on the complete version takes + # an edge not in the original graph. In the output this is substituted with + # the shortest path between those vertices, allowing vertices to appear more + # than once. + # + # Even though we are using a fixed seed, multiple tours have been known to + # be returned. The first two are from the original development of this test, + # and the third one from issue #5913 on GitHub. If other tours are returned, + # add it on the list of expected tours. + expected_tours = [ + [1, 4, 5, 0, 2, 3, 2, 1], + [3, 2, 0, 1, 4, 5, 3], + [3, 2, 1, 0, 5, 4, 3], + ] + + assert tour in expected_tours + + +def test_asadpour_real_world(): + """ + This test uses airline prices between the six largest cities in the US. + + * New York City -> JFK + * Los Angeles -> LAX + * Chicago -> ORD + * Houston -> IAH + * Phoenix -> PHX + * Philadelphia -> PHL + + Flight prices from August 2021 using Delta or American airlines to get + nonstop flight. The brute force solution found the optimal tour to cost $872 + + This test also uses the `source` keyword argument to ensure that the tour + always starts at city 0. + """ + np = pytest.importorskip("numpy") + pytest.importorskip("scipy") + + G_array = np.array( + [ + # JFK LAX ORD IAH PHX PHL + [0, 243, 199, 208, 169, 183], # JFK + [277, 0, 217, 123, 127, 252], # LAX + [297, 197, 0, 197, 123, 177], # ORD + [303, 169, 197, 0, 117, 117], # IAH + [257, 127, 160, 117, 0, 319], # PHX + [183, 332, 217, 117, 319, 0], # PHL + ] + ) + + node_list = ["JFK", "LAX", "ORD", "IAH", "PHX", "PHL"] + + expected_tours = [ + ["JFK", "LAX", "PHX", "ORD", "IAH", "PHL", "JFK"], + ["JFK", "ORD", "PHX", "LAX", "IAH", "PHL", "JFK"], + ] + + G = nx.from_numpy_array(G_array, nodelist=node_list, create_using=nx.DiGraph) + + tour = nx_app.traveling_salesman_problem( + G, weight="weight", method=nx_app.asadpour_atsp, seed=37, source="JFK" + ) + + assert tour in expected_tours + + +def test_asadpour_real_world_path(): + """ + This test uses airline prices between the six largest cities in the US. This + time using a path, not a cycle. + + * New York City -> JFK + * Los Angeles -> LAX + * Chicago -> ORD + * Houston -> IAH + * Phoenix -> PHX + * Philadelphia -> PHL + + Flight prices from August 2021 using Delta or American airlines to get + nonstop flight. The brute force solution found the optimal tour to cost $872 + """ + np = pytest.importorskip("numpy") + pytest.importorskip("scipy") + + G_array = np.array( + [ + # JFK LAX ORD IAH PHX PHL + [0, 243, 199, 208, 169, 183], # JFK + [277, 0, 217, 123, 127, 252], # LAX + [297, 197, 0, 197, 123, 177], # ORD + [303, 169, 197, 0, 117, 117], # IAH + [257, 127, 160, 117, 0, 319], # PHX + [183, 332, 217, 117, 319, 0], # PHL + ] + ) + + node_list = ["JFK", "LAX", "ORD", "IAH", "PHX", "PHL"] + + expected_paths = [ + ["ORD", "PHX", "LAX", "IAH", "PHL", "JFK"], + ["JFK", "PHL", "IAH", "ORD", "PHX", "LAX"], + ] + + G = nx.from_numpy_array(G_array, nodelist=node_list, create_using=nx.DiGraph) + + path = nx_app.traveling_salesman_problem( + G, weight="weight", cycle=False, method=nx_app.asadpour_atsp, seed=56 + ) + + assert path in expected_paths + + +def test_asadpour_disconnected_graph(): + """ + Test that the proper exception is raised when asadpour_atsp is given an + disconnected graph. + """ + + G = nx.complete_graph(4, create_using=nx.DiGraph) + # have to set edge weights so that if the exception is not raised, the + # function will complete and we will fail the test + nx.set_edge_attributes(G, 1, "weight") + G.add_node(5) + + pytest.raises(nx.NetworkXError, nx_app.asadpour_atsp, G) + + +def test_asadpour_incomplete_graph(): + """ + Test that the proper exception is raised when asadpour_atsp is given an + incomplete graph + """ + + G = nx.complete_graph(4, create_using=nx.DiGraph) + # have to set edge weights so that if the exception is not raised, the + # function will complete and we will fail the test + nx.set_edge_attributes(G, 1, "weight") + G.remove_edge(0, 1) + + pytest.raises(nx.NetworkXError, nx_app.asadpour_atsp, G) + + +def test_asadpour_empty_graph(): + """ + Test the asadpour_atsp function with an empty graph + """ + G = nx.DiGraph() + + pytest.raises(nx.NetworkXError, nx_app.asadpour_atsp, G) + + +def test_asadpour_small_graphs(): + # 1 node + G = nx.path_graph(1, create_using=nx.DiGraph) + with pytest.raises(nx.NetworkXError, match="at least two nodes"): + nx_app.asadpour_atsp(G) + + # 2 nodes + G = nx.DiGraph() + G.add_weighted_edges_from([(0, 1, 7), (1, 0, 8)]) + assert nx_app.asadpour_atsp(G) in [[0, 1], [1, 0]] + assert nx_app.asadpour_atsp(G, source=1) == [1, 0] + assert nx_app.asadpour_atsp(G, source=0) == [0, 1] + + +@pytest.mark.slow +def test_asadpour_integral_held_karp(): + """ + This test uses an integral held karp solution and the held karp function + will return a graph rather than a dict, bypassing most of the asadpour + algorithm. + + At first glance, this test probably doesn't look like it ensures that we + skip the rest of the asadpour algorithm, but it does. We are not fixing a + see for the random number generator, so if we sample any spanning trees + the approximation would be different basically every time this test is + executed but it is not since held karp is deterministic and we do not + reach the portion of the code with the dependence on random numbers. + """ + np = pytest.importorskip("numpy") + + G_array = np.array( + [ + [0, 26, 63, 59, 69, 31, 41], + [62, 0, 91, 53, 75, 87, 47], + [47, 82, 0, 90, 15, 9, 18], + [68, 19, 5, 0, 58, 34, 93], + [11, 58, 53, 55, 0, 61, 79], + [88, 75, 13, 76, 98, 0, 40], + [41, 61, 55, 88, 46, 45, 0], + ] + ) + + G = nx.from_numpy_array(G_array, create_using=nx.DiGraph) + + for _ in range(2): + tour = nx_app.traveling_salesman_problem(G, method=nx_app.asadpour_atsp) + + assert [1, 3, 2, 5, 2, 6, 4, 0, 1] == tour + + +def test_directed_tsp_impossible(): + """ + Test the asadpour algorithm with a graph without a hamiltonian circuit + """ + pytest.importorskip("numpy") + + # In this graph, once we leave node 0 we cannot return + edges = [ + (0, 1, 10), + (0, 2, 11), + (0, 3, 12), + (1, 2, 4), + (1, 3, 6), + (2, 1, 3), + (2, 3, 2), + (3, 1, 5), + (3, 2, 1), + ] + + G = nx.DiGraph() + G.add_weighted_edges_from(edges) + + pytest.raises(nx.NetworkXError, nx_app.traveling_salesman_problem, G) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/test_treewidth.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/test_treewidth.py new file mode 100644 index 0000000..c40910e --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/test_treewidth.py @@ -0,0 +1,274 @@ +import itertools + +import networkx as nx +from networkx.algorithms.approximation import ( + treewidth_min_degree, + treewidth_min_fill_in, +) +from networkx.algorithms.approximation.treewidth import ( + MinDegreeHeuristic, + min_fill_in_heuristic, +) + + +def is_tree_decomp(graph, decomp): + """Check if the given tree decomposition is valid.""" + for x in graph.nodes(): + appear_once = False + for bag in decomp.nodes(): + if x in bag: + appear_once = True + break + assert appear_once + + # Check if each connected pair of nodes are at least once together in a bag + for x, y in graph.edges(): + appear_together = False + for bag in decomp.nodes(): + if x in bag and y in bag: + appear_together = True + break + assert appear_together + + # Check if the nodes associated with vertex v form a connected subset of T + for v in graph.nodes(): + subset = [] + for bag in decomp.nodes(): + if v in bag: + subset.append(bag) + sub_graph = decomp.subgraph(subset) + assert nx.is_connected(sub_graph) + + +class TestTreewidthMinDegree: + """Unit tests for the min_degree function""" + + @classmethod + def setup_class(cls): + """Setup for different kinds of trees""" + cls.complete = nx.Graph() + cls.complete.add_edge(1, 2) + cls.complete.add_edge(2, 3) + cls.complete.add_edge(1, 3) + + cls.small_tree = nx.Graph() + cls.small_tree.add_edge(1, 3) + cls.small_tree.add_edge(4, 3) + cls.small_tree.add_edge(2, 3) + cls.small_tree.add_edge(3, 5) + cls.small_tree.add_edge(5, 6) + cls.small_tree.add_edge(5, 7) + cls.small_tree.add_edge(6, 7) + + cls.deterministic_graph = nx.Graph() + cls.deterministic_graph.add_edge(0, 1) # deg(0) = 1 + + cls.deterministic_graph.add_edge(1, 2) # deg(1) = 2 + + cls.deterministic_graph.add_edge(2, 3) + cls.deterministic_graph.add_edge(2, 4) # deg(2) = 3 + + cls.deterministic_graph.add_edge(3, 4) + cls.deterministic_graph.add_edge(3, 5) + cls.deterministic_graph.add_edge(3, 6) # deg(3) = 4 + + cls.deterministic_graph.add_edge(4, 5) + cls.deterministic_graph.add_edge(4, 6) + cls.deterministic_graph.add_edge(4, 7) # deg(4) = 5 + + cls.deterministic_graph.add_edge(5, 6) + cls.deterministic_graph.add_edge(5, 7) + cls.deterministic_graph.add_edge(5, 8) + cls.deterministic_graph.add_edge(5, 9) # deg(5) = 6 + + cls.deterministic_graph.add_edge(6, 7) + cls.deterministic_graph.add_edge(6, 8) + cls.deterministic_graph.add_edge(6, 9) # deg(6) = 6 + + cls.deterministic_graph.add_edge(7, 8) + cls.deterministic_graph.add_edge(7, 9) # deg(7) = 5 + + cls.deterministic_graph.add_edge(8, 9) # deg(8) = 4 + + def test_petersen_graph(self): + """Test Petersen graph tree decomposition result""" + G = nx.petersen_graph() + _, decomp = treewidth_min_degree(G) + is_tree_decomp(G, decomp) + + def test_small_tree_treewidth(self): + """Test small tree + + Test if the computed treewidth of the known self.small_tree is 2. + As we know which value we can expect from our heuristic, values other + than two are regressions + """ + G = self.small_tree + # the order of removal should be [1,2,4]3[5,6,7] + # (with [] denoting any order of the containing nodes) + # resulting in treewidth 2 for the heuristic + treewidth, _ = treewidth_min_fill_in(G) + assert treewidth == 2 + + def test_heuristic_abort(self): + """Test heuristic abort condition for fully connected graph""" + graph = {} + for u in self.complete: + graph[u] = set() + for v in self.complete[u]: + if u != v: # ignore self-loop + graph[u].add(v) + + deg_heuristic = MinDegreeHeuristic(graph) + node = deg_heuristic.best_node(graph) + if node is None: + pass + else: + assert False + + def test_empty_graph(self): + """Test empty graph""" + G = nx.Graph() + _, _ = treewidth_min_degree(G) + + def test_two_component_graph(self): + G = nx.Graph() + G.add_node(1) + G.add_node(2) + treewidth, _ = treewidth_min_degree(G) + assert treewidth == 0 + + def test_not_sortable_nodes(self): + G = nx.Graph([(0, "a")]) + treewidth_min_degree(G) + + def test_heuristic_first_steps(self): + """Test first steps of min_degree heuristic""" + graph = { + n: set(self.deterministic_graph[n]) - {n} for n in self.deterministic_graph + } + deg_heuristic = MinDegreeHeuristic(graph) + elim_node = deg_heuristic.best_node(graph) + steps = [] + + while elim_node is not None: + steps.append(elim_node) + nbrs = graph[elim_node] + + for u, v in itertools.permutations(nbrs, 2): + if v not in graph[u]: + graph[u].add(v) + + for u in graph: + if elim_node in graph[u]: + graph[u].remove(elim_node) + + del graph[elim_node] + elim_node = deg_heuristic.best_node(graph) + + # check only the first 5 elements for equality + assert steps[:5] == [0, 1, 2, 3, 4] + + +class TestTreewidthMinFillIn: + """Unit tests for the treewidth_min_fill_in function.""" + + @classmethod + def setup_class(cls): + """Setup for different kinds of trees""" + cls.complete = nx.Graph() + cls.complete.add_edge(1, 2) + cls.complete.add_edge(2, 3) + cls.complete.add_edge(1, 3) + + cls.small_tree = nx.Graph() + cls.small_tree.add_edge(1, 2) + cls.small_tree.add_edge(2, 3) + cls.small_tree.add_edge(3, 4) + cls.small_tree.add_edge(1, 4) + cls.small_tree.add_edge(2, 4) + cls.small_tree.add_edge(4, 5) + cls.small_tree.add_edge(5, 6) + cls.small_tree.add_edge(5, 7) + cls.small_tree.add_edge(6, 7) + + cls.deterministic_graph = nx.Graph() + cls.deterministic_graph.add_edge(1, 2) + cls.deterministic_graph.add_edge(1, 3) + cls.deterministic_graph.add_edge(3, 4) + cls.deterministic_graph.add_edge(2, 4) + cls.deterministic_graph.add_edge(3, 5) + cls.deterministic_graph.add_edge(4, 5) + cls.deterministic_graph.add_edge(3, 6) + cls.deterministic_graph.add_edge(5, 6) + + def test_petersen_graph(self): + """Test Petersen graph tree decomposition result""" + G = nx.petersen_graph() + _, decomp = treewidth_min_fill_in(G) + is_tree_decomp(G, decomp) + + def test_small_tree_treewidth(self): + """Test if the computed treewidth of the known self.small_tree is 2""" + G = self.small_tree + # the order of removal should be [1,2,4]3[5,6,7] + # (with [] denoting any order of the containing nodes) + # resulting in treewidth 2 for the heuristic + treewidth, _ = treewidth_min_fill_in(G) + assert treewidth == 2 + + def test_heuristic_abort(self): + """Test if min_fill_in returns None for fully connected graph""" + graph = {} + for u in self.complete: + graph[u] = set() + for v in self.complete[u]: + if u != v: # ignore self-loop + graph[u].add(v) + next_node = min_fill_in_heuristic(graph) + if next_node is None: + pass + else: + assert False + + def test_empty_graph(self): + """Test empty graph""" + G = nx.Graph() + _, _ = treewidth_min_fill_in(G) + + def test_two_component_graph(self): + G = nx.Graph() + G.add_node(1) + G.add_node(2) + treewidth, _ = treewidth_min_fill_in(G) + assert treewidth == 0 + + def test_not_sortable_nodes(self): + G = nx.Graph([(0, "a")]) + treewidth_min_fill_in(G) + + def test_heuristic_first_steps(self): + """Test first steps of min_fill_in heuristic""" + graph = { + n: set(self.deterministic_graph[n]) - {n} for n in self.deterministic_graph + } + elim_node = min_fill_in_heuristic(graph) + steps = [] + + while elim_node is not None: + steps.append(elim_node) + nbrs = graph[elim_node] + + for u, v in itertools.permutations(nbrs, 2): + if v not in graph[u]: + graph[u].add(v) + + for u in graph: + if elim_node in graph[u]: + graph[u].remove(elim_node) + + del graph[elim_node] + elim_node = min_fill_in_heuristic(graph) + + # check only the first 2 elements for equality + assert steps[:2] == [6, 5] diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/test_vertex_cover.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/test_vertex_cover.py new file mode 100644 index 0000000..5cc5a38 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/tests/test_vertex_cover.py @@ -0,0 +1,68 @@ +import networkx as nx +from networkx.algorithms.approximation import min_weighted_vertex_cover + + +def is_cover(G, node_cover): + return all({u, v} & node_cover for u, v in G.edges()) + + +class TestMWVC: + """Unit tests for the approximate minimum weighted vertex cover + function, + :func:`~networkx.algorithms.approximation.vertex_cover.min_weighted_vertex_cover`. + + """ + + def test_unweighted_directed(self): + # Create a star graph in which half the nodes are directed in + # and half are directed out. + G = nx.DiGraph() + G.add_edges_from((0, v) for v in range(1, 26)) + G.add_edges_from((v, 0) for v in range(26, 51)) + cover = min_weighted_vertex_cover(G) + assert 1 == len(cover) + assert is_cover(G, cover) + + def test_unweighted_undirected(self): + # create a simple star graph + size = 50 + sg = nx.star_graph(size) + cover = min_weighted_vertex_cover(sg) + assert 1 == len(cover) + assert is_cover(sg, cover) + + def test_weighted(self): + wg = nx.Graph() + wg.add_node(0, weight=10) + wg.add_node(1, weight=1) + wg.add_node(2, weight=1) + wg.add_node(3, weight=1) + wg.add_node(4, weight=1) + + wg.add_edge(0, 1) + wg.add_edge(0, 2) + wg.add_edge(0, 3) + wg.add_edge(0, 4) + + wg.add_edge(1, 2) + wg.add_edge(2, 3) + wg.add_edge(3, 4) + wg.add_edge(4, 1) + + cover = min_weighted_vertex_cover(wg, weight="weight") + csum = sum(wg.nodes[node]["weight"] for node in cover) + assert 4 == csum + assert is_cover(wg, cover) + + def test_unweighted_self_loop(self): + slg = nx.Graph() + slg.add_node(0) + slg.add_node(1) + slg.add_node(2) + + slg.add_edge(0, 1) + slg.add_edge(2, 2) + + cover = min_weighted_vertex_cover(slg) + assert 2 == len(cover) + assert is_cover(slg, cover) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/traveling_salesman.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/traveling_salesman.py new file mode 100644 index 0000000..0c6dd52 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/traveling_salesman.py @@ -0,0 +1,1508 @@ +""" +================================= +Travelling Salesman Problem (TSP) +================================= + +Implementation of approximate algorithms +for solving and approximating the TSP problem. + +Categories of algorithms which are implemented: + +- Christofides (provides a 3/2-approximation of TSP) +- Greedy +- Simulated Annealing (SA) +- Threshold Accepting (TA) +- Asadpour Asymmetric Traveling Salesman Algorithm + +The Travelling Salesman Problem tries to find, given the weight +(distance) between all points where a salesman has to visit, the +route so that: + +- The total distance (cost) which the salesman travels is minimized. +- The salesman returns to the starting point. +- Note that for a complete graph, the salesman visits each point once. + +The function `travelling_salesman_problem` allows for incomplete +graphs by finding all-pairs shortest paths, effectively converting +the problem to a complete graph problem. It calls one of the +approximate methods on that problem and then converts the result +back to the original graph using the previously found shortest paths. + +TSP is an NP-hard problem in combinatorial optimization, +important in operations research and theoretical computer science. + +http://en.wikipedia.org/wiki/Travelling_salesman_problem +""" + +import math + +import networkx as nx +from networkx.algorithms.tree.mst import random_spanning_tree +from networkx.utils import not_implemented_for, pairwise, py_random_state + +__all__ = [ + "traveling_salesman_problem", + "christofides", + "asadpour_atsp", + "greedy_tsp", + "simulated_annealing_tsp", + "threshold_accepting_tsp", +] + + +def swap_two_nodes(soln, seed): + """Swap two nodes in `soln` to give a neighbor solution. + + Parameters + ---------- + soln : list of nodes + Current cycle of nodes + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + list + The solution after move is applied. (A neighbor solution.) + + Notes + ----- + This function assumes that the incoming list `soln` is a cycle + (that the first and last element are the same) and also that + we don't want any move to change the first node in the list + (and thus not the last node either). + + The input list is changed as well as returned. Make a copy if needed. + + See Also + -------- + move_one_node + """ + a, b = seed.sample(range(1, len(soln) - 1), k=2) + soln[a], soln[b] = soln[b], soln[a] + return soln + + +def move_one_node(soln, seed): + """Move one node to another position to give a neighbor solution. + + The node to move and the position to move to are chosen randomly. + The first and last nodes are left untouched as soln must be a cycle + starting at that node. + + Parameters + ---------- + soln : list of nodes + Current cycle of nodes + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + list + The solution after move is applied. (A neighbor solution.) + + Notes + ----- + This function assumes that the incoming list `soln` is a cycle + (that the first and last element are the same) and also that + we don't want any move to change the first node in the list + (and thus not the last node either). + + The input list is changed as well as returned. Make a copy if needed. + + See Also + -------- + swap_two_nodes + """ + a, b = seed.sample(range(1, len(soln) - 1), k=2) + soln.insert(b, soln.pop(a)) + return soln + + +@not_implemented_for("directed") +@nx._dispatchable(edge_attrs="weight") +def christofides(G, weight="weight", tree=None): + """Approximate a solution of the traveling salesman problem + + Compute a 3/2-approximation of the traveling salesman problem + in a complete undirected graph using Christofides [1]_ algorithm. + + Parameters + ---------- + G : Graph + `G` should be a complete weighted undirected graph. + The distance between all pairs of nodes should be included. + + weight : string, optional (default="weight") + Edge data key corresponding to the edge weight. + If any edge does not have this attribute the weight is set to 1. + + tree : NetworkX graph or None (default: None) + A minimum spanning tree of G. Or, if None, the minimum spanning + tree is computed using :func:`networkx.minimum_spanning_tree` + + Returns + ------- + list + List of nodes in `G` along a cycle with a 3/2-approximation of + the minimal Hamiltonian cycle. + + References + ---------- + .. [1] Christofides, Nicos. "Worst-case analysis of a new heuristic for + the travelling salesman problem." No. RR-388. Carnegie-Mellon Univ + Pittsburgh Pa Management Sciences Research Group, 1976. + """ + # Remove selfloops if necessary + loop_nodes = nx.nodes_with_selfloops(G) + try: + node = next(loop_nodes) + except StopIteration: + pass + else: + G = G.copy() + G.remove_edge(node, node) + G.remove_edges_from((n, n) for n in loop_nodes) + # Check that G is a complete graph + N = len(G) - 1 + # This check ignores selfloops which is what we want here. + if any(len(nbrdict) != N for n, nbrdict in G.adj.items()): + raise nx.NetworkXError("G must be a complete graph.") + + if tree is None: + tree = nx.minimum_spanning_tree(G, weight=weight) + L = G.copy() + L.remove_nodes_from([v for v, degree in tree.degree if not (degree % 2)]) + MG = nx.MultiGraph() + MG.add_edges_from(tree.edges) + edges = nx.min_weight_matching(L, weight=weight) + MG.add_edges_from(edges) + return _shortcutting(nx.eulerian_circuit(MG)) + + +def _shortcutting(circuit): + """Remove duplicate nodes in the path""" + nodes = [] + for u, v in circuit: + if v in nodes: + continue + if not nodes: + nodes.append(u) + nodes.append(v) + nodes.append(nodes[0]) + return nodes + + +@nx._dispatchable(edge_attrs="weight") +def traveling_salesman_problem( + G, weight="weight", nodes=None, cycle=True, method=None, **kwargs +): + """Find the shortest path in `G` connecting specified nodes + + This function allows approximate solution to the traveling salesman + problem on networks that are not complete graphs and/or where the + salesman does not need to visit all nodes. + + This function proceeds in two steps. First, it creates a complete + graph using the all-pairs shortest_paths between nodes in `nodes`. + Edge weights in the new graph are the lengths of the paths + between each pair of nodes in the original graph. + Second, an algorithm (default: `christofides` for undirected and + `asadpour_atsp` for directed) is used to approximate the minimal Hamiltonian + cycle on this new graph. The available algorithms are: + + - christofides + - greedy_tsp + - simulated_annealing_tsp + - threshold_accepting_tsp + - asadpour_atsp + + Once the Hamiltonian Cycle is found, this function post-processes to + accommodate the structure of the original graph. If `cycle` is ``False``, + the biggest weight edge is removed to make a Hamiltonian path. + Then each edge on the new complete graph used for that analysis is + replaced by the shortest_path between those nodes on the original graph. + If the input graph `G` includes edges with weights that do not adhere to + the triangle inequality, such as when `G` is not a complete graph (i.e + length of non-existent edges is infinity), then the returned path may + contain some repeating nodes (other than the starting node). + + Parameters + ---------- + G : NetworkX graph + A possibly weighted graph + + nodes : collection of nodes (default=G.nodes) + collection (list, set, etc.) of nodes to visit + + weight : string, optional (default="weight") + Edge data key corresponding to the edge weight. + If any edge does not have this attribute the weight is set to 1. + + cycle : bool (default: True) + Indicates whether a cycle should be returned, or a path. + Note: the cycle is the approximate minimal cycle. + The path simply removes the biggest edge in that cycle. + + method : function (default: None) + A function that returns a cycle on all nodes and approximates + the solution to the traveling salesman problem on a complete + graph. The returned cycle is then used to find a corresponding + solution on `G`. `method` should be callable; take inputs + `G`, and `weight`; and return a list of nodes along the cycle. + + Provided options include :func:`christofides`, :func:`greedy_tsp`, + :func:`simulated_annealing_tsp` and :func:`threshold_accepting_tsp`. + + If `method is None`: use :func:`christofides` for undirected `G` and + :func:`asadpour_atsp` for directed `G`. + + **kwargs : dict + Other keyword arguments to be passed to the `method` function passed in. + + Returns + ------- + list + List of nodes in `G` along a path with an approximation of the minimal + path through `nodes`. + + Raises + ------ + NetworkXError + If `G` is a directed graph it has to be strongly connected or the + complete version cannot be generated. + + Examples + -------- + >>> tsp = nx.approximation.traveling_salesman_problem + >>> G = nx.cycle_graph(9) + >>> G[4][5]["weight"] = 5 # all other weights are 1 + >>> tsp(G, nodes=[3, 6]) + [3, 2, 1, 0, 8, 7, 6, 7, 8, 0, 1, 2, 3] + >>> path = tsp(G, cycle=False) + >>> path in ([4, 3, 2, 1, 0, 8, 7, 6, 5], [5, 6, 7, 8, 0, 1, 2, 3, 4]) + True + + While no longer required, you can still build (curry) your own function + to provide parameter values to the methods. + + >>> SA_tsp = nx.approximation.simulated_annealing_tsp + >>> method = lambda G, weight: SA_tsp(G, "greedy", weight=weight, temp=500) + >>> path = tsp(G, cycle=False, method=method) + >>> path in ([4, 3, 2, 1, 0, 8, 7, 6, 5], [5, 6, 7, 8, 0, 1, 2, 3, 4]) + True + + Otherwise, pass other keyword arguments directly into the tsp function. + + >>> path = tsp( + ... G, + ... cycle=False, + ... method=nx.approximation.simulated_annealing_tsp, + ... init_cycle="greedy", + ... temp=500, + ... ) + >>> path in ([4, 3, 2, 1, 0, 8, 7, 6, 5], [5, 6, 7, 8, 0, 1, 2, 3, 4]) + True + """ + if method is None: + if G.is_directed(): + method = asadpour_atsp + else: + method = christofides + if nodes is None: + nodes = list(G.nodes) + + dist = {} + path = {} + for n, (d, p) in nx.all_pairs_dijkstra(G, weight=weight): + dist[n] = d + path[n] = p + + if G.is_directed(): + # If the graph is not strongly connected, raise an exception + if not nx.is_strongly_connected(G): + raise nx.NetworkXError("G is not strongly connected") + GG = nx.DiGraph() + else: + GG = nx.Graph() + for u in nodes: + for v in nodes: + if u == v: + continue + # Ensure that the weight attribute on GG has the + # same name as the input graph + GG.add_edge(u, v, **{weight: dist[u][v]}) + + best_GG = method(GG, weight=weight, **kwargs) + + if not cycle: + # find and remove the biggest edge + (u, v) = max(pairwise(best_GG), key=lambda x: dist[x[0]][x[1]]) + pos = best_GG.index(u) + 1 + while best_GG[pos] != v: + pos = best_GG[pos:].index(u) + 1 + best_GG = best_GG[pos:-1] + best_GG[:pos] + + best_path = [] + for u, v in pairwise(best_GG): + best_path.extend(path[u][v][:-1]) + best_path.append(v) + return best_path + + +@not_implemented_for("undirected") +@py_random_state(2) +@nx._dispatchable(edge_attrs="weight", mutates_input=True) +def asadpour_atsp(G, weight="weight", seed=None, source=None): + """ + Returns an approximate solution to the traveling salesman problem. + + This approximate solution is one of the best known approximations for the + asymmetric traveling salesman problem developed by Asadpour et al, + [1]_. The algorithm first solves the Held-Karp relaxation to find a lower + bound for the weight of the cycle. Next, it constructs an exponential + distribution of undirected spanning trees where the probability of an + edge being in the tree corresponds to the weight of that edge using a + maximum entropy rounding scheme. Next we sample that distribution + $2 \\lceil \\ln n \\rceil$ times and save the minimum sampled tree once the + direction of the arcs is added back to the edges. Finally, we augment + then short circuit that graph to find the approximate tour for the + salesman. + + Parameters + ---------- + G : nx.DiGraph + The graph should be a complete weighted directed graph. The + distance between all paris of nodes should be included and the triangle + inequality should hold. That is, the direct edge between any two nodes + should be the path of least cost. + + weight : string, optional (default="weight") + Edge data key corresponding to the edge weight. + If any edge does not have this attribute the weight is set to 1. + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + source : node label (default=`None`) + If given, return the cycle starting and ending at the given node. + + Returns + ------- + cycle : list of nodes + Returns the cycle (list of nodes) that a salesman can follow to minimize + the total weight of the trip. + + Raises + ------ + NetworkXError + If `G` is not complete or has less than two nodes, the algorithm raises + an exception. + + NetworkXError + If `source` is not `None` and is not a node in `G`, the algorithm raises + an exception. + + NetworkXNotImplemented + If `G` is an undirected graph. + + References + ---------- + .. [1] A. Asadpour, M. X. Goemans, A. Madry, S. O. Gharan, and A. Saberi, + An o(log n/log log n)-approximation algorithm for the asymmetric + traveling salesman problem, Operations research, 65 (2017), + pp. 1043–1061 + + Examples + -------- + >>> import networkx as nx + >>> import networkx.algorithms.approximation as approx + >>> G = nx.complete_graph(3, create_using=nx.DiGraph) + >>> nx.set_edge_attributes( + ... G, + ... {(0, 1): 2, (1, 2): 2, (2, 0): 2, (0, 2): 1, (2, 1): 1, (1, 0): 1}, + ... "weight", + ... ) + >>> tour = approx.asadpour_atsp(G, source=0) + >>> tour + [0, 2, 1, 0] + """ + from math import ceil, exp + from math import log as ln + + # Check that G is a complete graph + N = len(G) - 1 + if N < 1: + raise nx.NetworkXError("G must have at least two nodes") + # This check ignores selfloops which is what we want here. + if any(len(nbrdict) - (n in nbrdict) != N for n, nbrdict in G.adj.items()): + raise nx.NetworkXError("G is not a complete DiGraph") + # Check that the source vertex, if given, is in the graph + if source is not None and source not in G.nodes: + raise nx.NetworkXError("Given source node not in G.") + # handle 2 node case + if N == 1: + if source is None: + return list(G) + return [source, next(n for n in G if n != source)] + + opt_hk, z_star = held_karp_ascent(G, weight) + + # Test to see if the ascent method found an integer solution or a fractional + # solution. If it is integral then z_star is a nx.Graph, otherwise it is + # a dict + if not isinstance(z_star, dict): + # Here we are using the shortcutting method to go from the list of edges + # returned from eulerian_circuit to a list of nodes + return _shortcutting(nx.eulerian_circuit(z_star, source=source)) + + # Create the undirected support of z_star + z_support = nx.MultiGraph() + for u, v in z_star: + if (u, v) not in z_support.edges: + edge_weight = min(G[u][v][weight], G[v][u][weight]) + z_support.add_edge(u, v, **{weight: edge_weight}) + + # Create the exponential distribution of spanning trees + gamma = spanning_tree_distribution(z_support, z_star) + + # Write the lambda values to the edges of z_support + z_support = nx.Graph(z_support) + lambda_dict = {(u, v): exp(gamma[(u, v)]) for u, v in z_support.edges()} + nx.set_edge_attributes(z_support, lambda_dict, "weight") + del gamma, lambda_dict + + # Sample 2 * ceil( ln(n) ) spanning trees and record the minimum one + minimum_sampled_tree = None + minimum_sampled_tree_weight = math.inf + for _ in range(2 * ceil(ln(G.number_of_nodes()))): + sampled_tree = random_spanning_tree(z_support, "weight", seed=seed) + sampled_tree_weight = sampled_tree.size(weight) + if sampled_tree_weight < minimum_sampled_tree_weight: + minimum_sampled_tree = sampled_tree.copy() + minimum_sampled_tree_weight = sampled_tree_weight + + # Orient the edges in that tree to keep the cost of the tree the same. + t_star = nx.MultiDiGraph() + for u, v, d in minimum_sampled_tree.edges(data=weight): + if d == G[u][v][weight]: + t_star.add_edge(u, v, **{weight: d}) + else: + t_star.add_edge(v, u, **{weight: d}) + + # Find the node demands needed to neutralize the flow of t_star in G + node_demands = {n: t_star.out_degree(n) - t_star.in_degree(n) for n in t_star} + nx.set_node_attributes(G, node_demands, "demand") + + # Find the min_cost_flow + flow_dict = nx.min_cost_flow(G, "demand") + + # Build the flow into t_star + for source, values in flow_dict.items(): + for target in values: + if (source, target) not in t_star.edges and values[target] > 0: + # IF values[target] > 0 we have to add that many edges + for _ in range(values[target]): + t_star.add_edge(source, target) + + # Return the shortcut eulerian circuit + circuit = nx.eulerian_circuit(t_star, source=source) + return _shortcutting(circuit) + + +@nx._dispatchable(edge_attrs="weight", mutates_input=True, returns_graph=True) +def held_karp_ascent(G, weight="weight"): + """ + Minimizes the Held-Karp relaxation of the TSP for `G` + + Solves the Held-Karp relaxation of the input complete digraph and scales + the output solution for use in the Asadpour [1]_ ASTP algorithm. + + The Held-Karp relaxation defines the lower bound for solutions to the + ATSP, although it does return a fractional solution. This is used in the + Asadpour algorithm as an initial solution which is later rounded to a + integral tree within the spanning tree polytopes. This function solves + the relaxation with the branch and bound method in [2]_. + + Parameters + ---------- + G : nx.DiGraph + The graph should be a complete weighted directed graph. + The distance between all paris of nodes should be included. + + weight : string, optional (default="weight") + Edge data key corresponding to the edge weight. + If any edge does not have this attribute the weight is set to 1. + + Returns + ------- + OPT : float + The cost for the optimal solution to the Held-Karp relaxation + z : dict or nx.Graph + A symmetrized and scaled version of the optimal solution to the + Held-Karp relaxation for use in the Asadpour algorithm. + + If an integral solution is found, then that is an optimal solution for + the ATSP problem and that is returned instead. + + References + ---------- + .. [1] A. Asadpour, M. X. Goemans, A. Madry, S. O. Gharan, and A. Saberi, + An o(log n/log log n)-approximation algorithm for the asymmetric + traveling salesman problem, Operations research, 65 (2017), + pp. 1043–1061 + + .. [2] M. Held, R. M. Karp, The traveling-salesman problem and minimum + spanning trees, Operations Research, 1970-11-01, Vol. 18 (6), + pp.1138-1162 + """ + import numpy as np + import scipy as sp + + def k_pi(): + """ + Find the set of minimum 1-Arborescences for G at point pi. + + Returns + ------- + Set + The set of minimum 1-Arborescences + """ + # Create a copy of G without vertex 1. + G_1 = G.copy() + minimum_1_arborescences = set() + minimum_1_arborescence_weight = math.inf + + # node is node '1' in the Held and Karp paper + n = next(G.__iter__()) + G_1.remove_node(n) + + # Iterate over the spanning arborescences of the graph until we know + # that we have found the minimum 1-arborescences. My proposed strategy + # is to find the most extensive root to connect to from 'node 1' and + # the least expensive one. We then iterate over arborescences until + # the cost of the basic arborescence is the cost of the minimum one + # plus the difference between the most and least expensive roots, + # that way the cost of connecting 'node 1' will by definition not by + # minimum + min_root = {"node": None, weight: math.inf} + max_root = {"node": None, weight: -math.inf} + for u, v, d in G.edges(n, data=True): + if d[weight] < min_root[weight]: + min_root = {"node": v, weight: d[weight]} + if d[weight] > max_root[weight]: + max_root = {"node": v, weight: d[weight]} + + min_in_edge = min(G.in_edges(n, data=True), key=lambda x: x[2][weight]) + min_root[weight] = min_root[weight] + min_in_edge[2][weight] + max_root[weight] = max_root[weight] + min_in_edge[2][weight] + + min_arb_weight = math.inf + for arb in nx.ArborescenceIterator(G_1): + arb_weight = arb.size(weight) + if min_arb_weight == math.inf: + min_arb_weight = arb_weight + elif arb_weight > min_arb_weight + max_root[weight] - min_root[weight]: + break + # We have to pick the root node of the arborescence for the out + # edge of the first vertex as that is the only node without an + # edge directed into it. + for N, deg in arb.in_degree: + if deg == 0: + # root found + arb.add_edge(n, N, **{weight: G[n][N][weight]}) + arb_weight += G[n][N][weight] + break + + # We can pick the minimum weight in-edge for the vertex with + # a cycle. If there are multiple edges with the same, minimum + # weight, We need to add all of them. + # + # Delete the edge (N, v) so that we cannot pick it. + edge_data = G[N][n] + G.remove_edge(N, n) + min_weight = min(G.in_edges(n, data=weight), key=lambda x: x[2])[2] + min_edges = [ + (u, v, d) for u, v, d in G.in_edges(n, data=weight) if d == min_weight + ] + for u, v, d in min_edges: + new_arb = arb.copy() + new_arb.add_edge(u, v, **{weight: d}) + new_arb_weight = arb_weight + d + # Check to see the weight of the arborescence, if it is a + # new minimum, clear all of the old potential minimum + # 1-arborescences and add this is the only one. If its + # weight is above the known minimum, do not add it. + if new_arb_weight < minimum_1_arborescence_weight: + minimum_1_arborescences.clear() + minimum_1_arborescence_weight = new_arb_weight + # We have a 1-arborescence, add it to the set + if new_arb_weight == minimum_1_arborescence_weight: + minimum_1_arborescences.add(new_arb) + G.add_edge(N, n, **edge_data) + + return minimum_1_arborescences + + def direction_of_ascent(): + """ + Find the direction of ascent at point pi. + + See [1]_ for more information. + + Returns + ------- + dict + A mapping from the nodes of the graph which represents the direction + of ascent. + + References + ---------- + .. [1] M. Held, R. M. Karp, The traveling-salesman problem and minimum + spanning trees, Operations Research, 1970-11-01, Vol. 18 (6), + pp.1138-1162 + """ + # 1. Set d equal to the zero n-vector. + d = {} + for n in G: + d[n] = 0 + del n + # 2. Find a 1-Arborescence T^k such that k is in K(pi, d). + minimum_1_arborescences = k_pi() + while True: + # Reduce K(pi) to K(pi, d) + # Find the arborescence in K(pi) which increases the lest in + # direction d + min_k_d_weight = math.inf + min_k_d = None + for arborescence in minimum_1_arborescences: + weighted_cost = 0 + for n, deg in arborescence.degree: + weighted_cost += d[n] * (deg - 2) + if weighted_cost < min_k_d_weight: + min_k_d_weight = weighted_cost + min_k_d = arborescence + + # 3. If sum of d_i * v_{i, k} is greater than zero, terminate + if min_k_d_weight > 0: + return d, min_k_d + # 4. d_i = d_i + v_{i, k} + for n, deg in min_k_d.degree: + d[n] += deg - 2 + # Check that we do not need to terminate because the direction + # of ascent does not exist. This is done with linear + # programming. + c = np.full(len(minimum_1_arborescences), -1, dtype=int) + a_eq = np.empty((len(G) + 1, len(minimum_1_arborescences)), dtype=int) + b_eq = np.zeros(len(G) + 1, dtype=int) + b_eq[len(G)] = 1 + for arb_count, arborescence in enumerate(minimum_1_arborescences): + n_count = len(G) - 1 + for n, deg in arborescence.degree: + a_eq[n_count][arb_count] = deg - 2 + n_count -= 1 + a_eq[len(G)][arb_count] = 1 + program_result = sp.optimize.linprog( + c, A_eq=a_eq, b_eq=b_eq, method="highs-ipm" + ) + # If the constants exist, then the direction of ascent doesn't + if program_result.success: + # There is no direction of ascent + return None, minimum_1_arborescences + + # 5. GO TO 2 + + def find_epsilon(k, d): + """ + Given the direction of ascent at pi, find the maximum distance we can go + in that direction. + + Parameters + ---------- + k_xy : set + The set of 1-arborescences which have the minimum rate of increase + in the direction of ascent + + d : dict + The direction of ascent + + Returns + ------- + float + The distance we can travel in direction `d` + """ + min_epsilon = math.inf + for e_u, e_v, e_w in G.edges(data=weight): + if (e_u, e_v) in k.edges: + continue + # Now, I have found a condition which MUST be true for the edges to + # be a valid substitute. The edge in the graph which is the + # substitute is the one with the same terminated end. This can be + # checked rather simply. + # + # Find the edge within k which is the substitute. Because k is a + # 1-arborescence, we know that they is only one such edges + # leading into every vertex. + if len(k.in_edges(e_v, data=weight)) > 1: + raise Exception + sub_u, sub_v, sub_w = next(k.in_edges(e_v, data=weight).__iter__()) + k.add_edge(e_u, e_v, **{weight: e_w}) + k.remove_edge(sub_u, sub_v) + if ( + max(d for n, d in k.in_degree()) <= 1 + and len(G) == k.number_of_edges() + and nx.is_weakly_connected(k) + ): + # Ascent method calculation + if d[sub_u] == d[e_u] or sub_w == e_w: + # Revert to the original graph + k.remove_edge(e_u, e_v) + k.add_edge(sub_u, sub_v, **{weight: sub_w}) + continue + epsilon = (sub_w - e_w) / (d[e_u] - d[sub_u]) + if 0 < epsilon < min_epsilon: + min_epsilon = epsilon + # Revert to the original graph + k.remove_edge(e_u, e_v) + k.add_edge(sub_u, sub_v, **{weight: sub_w}) + + return min_epsilon + + # I have to know that the elements in pi correspond to the correct elements + # in the direction of ascent, even if the node labels are not integers. + # Thus, I will use dictionaries to made that mapping. + pi_dict = {} + for n in G: + pi_dict[n] = 0 + del n + original_edge_weights = {} + for u, v, d in G.edges(data=True): + original_edge_weights[(u, v)] = d[weight] + dir_ascent, k_d = direction_of_ascent() + while dir_ascent is not None: + max_distance = find_epsilon(k_d, dir_ascent) + for n, v in dir_ascent.items(): + pi_dict[n] += max_distance * v + for u, v, d in G.edges(data=True): + d[weight] = original_edge_weights[(u, v)] + pi_dict[u] + dir_ascent, k_d = direction_of_ascent() + nx._clear_cache(G) + # k_d is no longer an individual 1-arborescence but rather a set of + # minimal 1-arborescences at the maximum point of the polytope and should + # be reflected as such + k_max = k_d + + # Search for a cycle within k_max. If a cycle exists, return it as the + # solution + for k in k_max: + if len([n for n in k if k.degree(n) == 2]) == G.order(): + # Tour found + # TODO: this branch does not restore original_edge_weights of G! + return k.size(weight), k + + # Write the original edge weights back to G and every member of k_max at + # the maximum point. Also average the number of times that edge appears in + # the set of minimal 1-arborescences. + x_star = {} + size_k_max = len(k_max) + for u, v, d in G.edges(data=True): + edge_count = 0 + d[weight] = original_edge_weights[(u, v)] + for k in k_max: + if (u, v) in k.edges(): + edge_count += 1 + k[u][v][weight] = original_edge_weights[(u, v)] + x_star[(u, v)] = edge_count / size_k_max + # Now symmetrize the edges in x_star and scale them according to (5) in + # reference [1] + z_star = {} + scale_factor = (G.order() - 1) / G.order() + for u, v in x_star: + frequency = x_star[(u, v)] + x_star[(v, u)] + if frequency > 0: + z_star[(u, v)] = scale_factor * frequency + del x_star + # Return the optimal weight and the z dict + return next(k_max.__iter__()).size(weight), z_star + + +@nx._dispatchable +def spanning_tree_distribution(G, z): + """ + Find the asadpour exponential distribution of spanning trees. + + Solves the Maximum Entropy Convex Program in the Asadpour algorithm [1]_ + using the approach in section 7 to build an exponential distribution of + undirected spanning trees. + + This algorithm ensures that the probability of any edge in a spanning + tree is proportional to the sum of the probabilities of the tress + containing that edge over the sum of the probabilities of all spanning + trees of the graph. + + Parameters + ---------- + G : nx.MultiGraph + The undirected support graph for the Held Karp relaxation + + z : dict + The output of `held_karp_ascent()`, a scaled version of the Held-Karp + solution. + + Returns + ------- + gamma : dict + The probability distribution which approximately preserves the marginal + probabilities of `z`. + """ + from math import exp + from math import log as ln + + def q(e): + """ + The value of q(e) is described in the Asadpour paper is "the + probability that edge e will be included in a spanning tree T that is + chosen with probability proportional to exp(gamma(T))" which + basically means that it is the total probability of the edge appearing + across the whole distribution. + + Parameters + ---------- + e : tuple + The `(u, v)` tuple describing the edge we are interested in + + Returns + ------- + float + The probability that a spanning tree chosen according to the + current values of gamma will include edge `e`. + """ + # Create the laplacian matrices + for u, v, d in G.edges(data=True): + d[lambda_key] = exp(gamma[(u, v)]) + G_Kirchhoff = nx.number_of_spanning_trees(G, weight=lambda_key) + G_e = nx.contracted_edge(G, e, self_loops=False) + G_e_Kirchhoff = nx.number_of_spanning_trees(G_e, weight=lambda_key) + + # Multiply by the weight of the contracted edge since it is not included + # in the total weight of the contracted graph. + return exp(gamma[(e[0], e[1])]) * G_e_Kirchhoff / G_Kirchhoff + + # initialize gamma to the zero dict + gamma = {} + for u, v, _ in G.edges: + gamma[(u, v)] = 0 + + # set epsilon + EPSILON = 0.2 + + # pick an edge attribute name that is unlikely to be in the graph + lambda_key = "spanning_tree_distribution's secret attribute name for lambda" + + while True: + # We need to know that know that no values of q_e are greater than + # (1 + epsilon) * z_e, however changing one gamma value can increase the + # value of a different q_e, so we have to complete the for loop without + # changing anything for the condition to be meet + in_range_count = 0 + # Search for an edge with q_e > (1 + epsilon) * z_e + for u, v in gamma: + e = (u, v) + q_e = q(e) + z_e = z[e] + if q_e > (1 + EPSILON) * z_e: + delta = ln( + (q_e * (1 - (1 + EPSILON / 2) * z_e)) + / ((1 - q_e) * (1 + EPSILON / 2) * z_e) + ) + gamma[e] -= delta + # Check that delta had the desired effect + new_q_e = q(e) + desired_q_e = (1 + EPSILON / 2) * z_e + if round(new_q_e, 8) != round(desired_q_e, 8): + raise nx.NetworkXError( + f"Unable to modify probability for edge ({u}, {v})" + ) + else: + in_range_count += 1 + # Check if the for loop terminated without changing any gamma + if in_range_count == len(gamma): + break + + # Remove the new edge attributes + for _, _, d in G.edges(data=True): + if lambda_key in d: + del d[lambda_key] + + return gamma + + +@nx._dispatchable(edge_attrs="weight") +def greedy_tsp(G, weight="weight", source=None): + """Return a low cost cycle starting at `source` and its cost. + + This approximates a solution to the traveling salesman problem. + It finds a cycle of all the nodes that a salesman can visit in order + to visit many nodes while minimizing total distance. + It uses a simple greedy algorithm. + In essence, this function returns a large cycle given a source point + for which the total cost of the cycle is minimized. + + Parameters + ---------- + G : Graph + The Graph should be a complete weighted undirected graph. + The distance between all pairs of nodes should be included. + + weight : string, optional (default="weight") + Edge data key corresponding to the edge weight. + If any edge does not have this attribute the weight is set to 1. + + source : node, optional (default: first node in list(G)) + Starting node. If None, defaults to ``next(iter(G))`` + + Returns + ------- + cycle : list of nodes + Returns the cycle (list of nodes) that a salesman + can follow to minimize total weight of the trip. + + Raises + ------ + NetworkXError + If `G` is not complete, the algorithm raises an exception. + + Examples + -------- + >>> from networkx.algorithms import approximation as approx + >>> G = nx.DiGraph() + >>> G.add_weighted_edges_from( + ... { + ... ("A", "B", 3), + ... ("A", "C", 17), + ... ("A", "D", 14), + ... ("B", "A", 3), + ... ("B", "C", 12), + ... ("B", "D", 16), + ... ("C", "A", 13), + ... ("C", "B", 12), + ... ("C", "D", 4), + ... ("D", "A", 14), + ... ("D", "B", 15), + ... ("D", "C", 2), + ... } + ... ) + >>> cycle = approx.greedy_tsp(G, source="D") + >>> cost = sum(G[n][nbr]["weight"] for n, nbr in nx.utils.pairwise(cycle)) + >>> cycle + ['D', 'C', 'B', 'A', 'D'] + >>> cost + 31 + + Notes + ----- + This implementation of a greedy algorithm is based on the following: + + - The algorithm adds a node to the solution at every iteration. + - The algorithm selects a node not already in the cycle whose connection + to the previous node adds the least cost to the cycle. + + A greedy algorithm does not always give the best solution. + However, it can construct a first feasible solution which can + be passed as a parameter to an iterative improvement algorithm such + as Simulated Annealing, or Threshold Accepting. + + Time complexity: It has a running time $O(|V|^2)$ + """ + # Check that G is a complete graph + N = len(G) - 1 + # This check ignores selfloops which is what we want here. + if any(len(nbrdict) - (n in nbrdict) != N for n, nbrdict in G.adj.items()): + raise nx.NetworkXError("G must be a complete graph.") + + if source is None: + source = nx.utils.arbitrary_element(G) + + if G.number_of_nodes() == 2: + neighbor = next(G.neighbors(source)) + return [source, neighbor, source] + + nodeset = set(G) + nodeset.remove(source) + cycle = [source] + next_node = source + while nodeset: + nbrdict = G[next_node] + next_node = min(nodeset, key=lambda n: nbrdict[n].get(weight, 1)) + cycle.append(next_node) + nodeset.remove(next_node) + cycle.append(cycle[0]) + return cycle + + +@py_random_state(9) +@nx._dispatchable(edge_attrs="weight") +def simulated_annealing_tsp( + G, + init_cycle, + weight="weight", + source=None, + temp=100, + move="1-1", + max_iterations=10, + N_inner=100, + alpha=0.01, + seed=None, +): + """Returns an approximate solution to the traveling salesman problem. + + This function uses simulated annealing to approximate the minimal cost + cycle through the nodes. Starting from a suboptimal solution, simulated + annealing perturbs that solution, occasionally accepting changes that make + the solution worse to escape from a locally optimal solution. The chance + of accepting such changes decreases over the iterations to encourage + an optimal result. In summary, the function returns a cycle starting + at `source` for which the total cost is minimized. It also returns the cost. + + The chance of accepting a proposed change is related to a parameter called + the temperature (annealing has a physical analogue of steel hardening + as it cools). As the temperature is reduced, the chance of moves that + increase cost goes down. + + Parameters + ---------- + G : Graph + `G` should be a complete weighted graph. + The distance between all pairs of nodes should be included. + + init_cycle : list of all nodes or "greedy" + The initial solution (a cycle through all nodes returning to the start). + This argument has no default to make you think about it. + If "greedy", use `greedy_tsp(G, weight)`. + Other common starting cycles are `list(G) + [next(iter(G))]` or the final + result of `simulated_annealing_tsp` when doing `threshold_accepting_tsp`. + + weight : string, optional (default="weight") + Edge data key corresponding to the edge weight. + If any edge does not have this attribute the weight is set to 1. + + source : node, optional (default: first node in list(G)) + Starting node. If None, defaults to ``next(iter(G))`` + + temp : int, optional (default=100) + The algorithm's temperature parameter. It represents the initial + value of temperature + + move : "1-1" or "1-0" or function, optional (default="1-1") + Indicator of what move to use when finding new trial solutions. + Strings indicate two special built-in moves: + + - "1-1": 1-1 exchange which transposes the position + of two elements of the current solution. + The function called is :func:`swap_two_nodes`. + For example if we apply 1-1 exchange in the solution + ``A = [3, 2, 1, 4, 3]`` + we can get the following by the transposition of 1 and 4 elements: + ``A' = [3, 2, 4, 1, 3]`` + - "1-0": 1-0 exchange which moves an node in the solution + to a new position. + The function called is :func:`move_one_node`. + For example if we apply 1-0 exchange in the solution + ``A = [3, 2, 1, 4, 3]`` + we can transfer the fourth element to the second position: + ``A' = [3, 4, 2, 1, 3]`` + + You may provide your own functions to enact a move from + one solution to a neighbor solution. The function must take + the solution as input along with a `seed` input to control + random number generation (see the `seed` input here). + Your function should maintain the solution as a cycle with + equal first and last node and all others appearing once. + Your function should return the new solution. + + max_iterations : int, optional (default=10) + Declared done when this number of consecutive iterations of + the outer loop occurs without any change in the best cost solution. + + N_inner : int, optional (default=100) + The number of iterations of the inner loop. + + alpha : float between (0, 1), optional (default=0.01) + Percentage of temperature decrease in each iteration + of outer loop + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + cycle : list of nodes + Returns the cycle (list of nodes) that a salesman + can follow to minimize total weight of the trip. + + Raises + ------ + NetworkXError + If `G` is not complete the algorithm raises an exception. + + Examples + -------- + >>> from networkx.algorithms import approximation as approx + >>> G = nx.DiGraph() + >>> G.add_weighted_edges_from( + ... { + ... ("A", "B", 3), + ... ("A", "C", 17), + ... ("A", "D", 14), + ... ("B", "A", 3), + ... ("B", "C", 12), + ... ("B", "D", 16), + ... ("C", "A", 13), + ... ("C", "B", 12), + ... ("C", "D", 4), + ... ("D", "A", 14), + ... ("D", "B", 15), + ... ("D", "C", 2), + ... } + ... ) + >>> cycle = approx.simulated_annealing_tsp(G, "greedy", source="D") + >>> cost = sum(G[n][nbr]["weight"] for n, nbr in nx.utils.pairwise(cycle)) + >>> cycle + ['D', 'C', 'B', 'A', 'D'] + >>> cost + 31 + >>> incycle = ["D", "B", "A", "C", "D"] + >>> cycle = approx.simulated_annealing_tsp(G, incycle, source="D") + >>> cost = sum(G[n][nbr]["weight"] for n, nbr in nx.utils.pairwise(cycle)) + >>> cycle + ['D', 'C', 'B', 'A', 'D'] + >>> cost + 31 + + Notes + ----- + Simulated Annealing is a metaheuristic local search algorithm. + The main characteristic of this algorithm is that it accepts + even solutions which lead to the increase of the cost in order + to escape from low quality local optimal solutions. + + This algorithm needs an initial solution. If not provided, it is + constructed by a simple greedy algorithm. At every iteration, the + algorithm selects thoughtfully a neighbor solution. + Consider $c(x)$ cost of current solution and $c(x')$ cost of a + neighbor solution. + If $c(x') - c(x) <= 0$ then the neighbor solution becomes the current + solution for the next iteration. Otherwise, the algorithm accepts + the neighbor solution with probability $p = exp - ([c(x') - c(x)] / temp)$. + Otherwise the current solution is retained. + + `temp` is a parameter of the algorithm and represents temperature. + + Time complexity: + For $N_i$ iterations of the inner loop and $N_o$ iterations of the + outer loop, this algorithm has running time $O(N_i * N_o * |V|)$. + + For more information and how the algorithm is inspired see: + http://en.wikipedia.org/wiki/Simulated_annealing + """ + if move == "1-1": + move = swap_two_nodes + elif move == "1-0": + move = move_one_node + if init_cycle == "greedy": + # Construct an initial solution using a greedy algorithm. + cycle = greedy_tsp(G, weight=weight, source=source) + if G.number_of_nodes() == 2: + return cycle + + else: + cycle = list(init_cycle) + if source is None: + source = cycle[0] + elif source != cycle[0]: + raise nx.NetworkXError("source must be first node in init_cycle") + if cycle[0] != cycle[-1]: + raise nx.NetworkXError("init_cycle must be a cycle. (return to start)") + + if len(cycle) - 1 != len(G) or len(set(G.nbunch_iter(cycle))) != len(G): + raise nx.NetworkXError("init_cycle should be a cycle over all nodes in G.") + + # Check that G is a complete graph + N = len(G) - 1 + # This check ignores selfloops which is what we want here. + if any(len(nbrdict) - (n in nbrdict) != N for n, nbrdict in G.adj.items()): + raise nx.NetworkXError("G must be a complete graph.") + + if G.number_of_nodes() == 2: + neighbor = next(G.neighbors(source)) + return [source, neighbor, source] + + # Find the cost of initial solution + cost = sum(G[u][v].get(weight, 1) for u, v in pairwise(cycle)) + + count = 0 + best_cycle = cycle.copy() + best_cost = cost + while count <= max_iterations and temp > 0: + count += 1 + for i in range(N_inner): + adj_sol = move(cycle, seed) + adj_cost = sum(G[u][v].get(weight, 1) for u, v in pairwise(adj_sol)) + delta = adj_cost - cost + if delta <= 0: + # Set current solution the adjacent solution. + cycle = adj_sol + cost = adj_cost + + if cost < best_cost: + count = 0 + best_cycle = cycle.copy() + best_cost = cost + else: + # Accept even a worse solution with probability p. + p = math.exp(-delta / temp) + if p >= seed.random(): + cycle = adj_sol + cost = adj_cost + temp -= temp * alpha + + return best_cycle + + +@py_random_state(9) +@nx._dispatchable(edge_attrs="weight") +def threshold_accepting_tsp( + G, + init_cycle, + weight="weight", + source=None, + threshold=1, + move="1-1", + max_iterations=10, + N_inner=100, + alpha=0.1, + seed=None, +): + """Returns an approximate solution to the traveling salesman problem. + + This function uses threshold accepting methods to approximate the minimal cost + cycle through the nodes. Starting from a suboptimal solution, threshold + accepting methods perturb that solution, accepting any changes that make + the solution no worse than increasing by a threshold amount. Improvements + in cost are accepted, but so are changes leading to small increases in cost. + This allows the solution to leave suboptimal local minima in solution space. + The threshold is decreased slowly as iterations proceed helping to ensure + an optimum. In summary, the function returns a cycle starting at `source` + for which the total cost is minimized. + + Parameters + ---------- + G : Graph + `G` should be a complete weighted graph. + The distance between all pairs of nodes should be included. + + init_cycle : list or "greedy" + The initial solution (a cycle through all nodes returning to the start). + This argument has no default to make you think about it. + If "greedy", use `greedy_tsp(G, weight)`. + Other common starting cycles are `list(G) + [next(iter(G))]` or the final + result of `simulated_annealing_tsp` when doing `threshold_accepting_tsp`. + + weight : string, optional (default="weight") + Edge data key corresponding to the edge weight. + If any edge does not have this attribute the weight is set to 1. + + source : node, optional (default: first node in list(G)) + Starting node. If None, defaults to ``next(iter(G))`` + + threshold : int, optional (default=1) + The algorithm's threshold parameter. It represents the initial + threshold's value + + move : "1-1" or "1-0" or function, optional (default="1-1") + Indicator of what move to use when finding new trial solutions. + Strings indicate two special built-in moves: + + - "1-1": 1-1 exchange which transposes the position + of two elements of the current solution. + The function called is :func:`swap_two_nodes`. + For example if we apply 1-1 exchange in the solution + ``A = [3, 2, 1, 4, 3]`` + we can get the following by the transposition of 1 and 4 elements: + ``A' = [3, 2, 4, 1, 3]`` + - "1-0": 1-0 exchange which moves an node in the solution + to a new position. + The function called is :func:`move_one_node`. + For example if we apply 1-0 exchange in the solution + ``A = [3, 2, 1, 4, 3]`` + we can transfer the fourth element to the second position: + ``A' = [3, 4, 2, 1, 3]`` + + You may provide your own functions to enact a move from + one solution to a neighbor solution. The function must take + the solution as input along with a `seed` input to control + random number generation (see the `seed` input here). + Your function should maintain the solution as a cycle with + equal first and last node and all others appearing once. + Your function should return the new solution. + + max_iterations : int, optional (default=10) + Declared done when this number of consecutive iterations of + the outer loop occurs without any change in the best cost solution. + + N_inner : int, optional (default=100) + The number of iterations of the inner loop. + + alpha : float between (0, 1), optional (default=0.1) + Percentage of threshold decrease when there is at + least one acceptance of a neighbor solution. + If no inner loop moves are accepted the threshold remains unchanged. + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + cycle : list of nodes + Returns the cycle (list of nodes) that a salesman + can follow to minimize total weight of the trip. + + Raises + ------ + NetworkXError + If `G` is not complete the algorithm raises an exception. + + Examples + -------- + >>> from networkx.algorithms import approximation as approx + >>> G = nx.DiGraph() + >>> G.add_weighted_edges_from( + ... { + ... ("A", "B", 3), + ... ("A", "C", 17), + ... ("A", "D", 14), + ... ("B", "A", 3), + ... ("B", "C", 12), + ... ("B", "D", 16), + ... ("C", "A", 13), + ... ("C", "B", 12), + ... ("C", "D", 4), + ... ("D", "A", 14), + ... ("D", "B", 15), + ... ("D", "C", 2), + ... } + ... ) + >>> cycle = approx.threshold_accepting_tsp(G, "greedy", source="D") + >>> cost = sum(G[n][nbr]["weight"] for n, nbr in nx.utils.pairwise(cycle)) + >>> cycle + ['D', 'C', 'B', 'A', 'D'] + >>> cost + 31 + >>> incycle = ["D", "B", "A", "C", "D"] + >>> cycle = approx.threshold_accepting_tsp(G, incycle, source="D") + >>> cost = sum(G[n][nbr]["weight"] for n, nbr in nx.utils.pairwise(cycle)) + >>> cycle + ['D', 'C', 'B', 'A', 'D'] + >>> cost + 31 + + Notes + ----- + Threshold Accepting is a metaheuristic local search algorithm. + The main characteristic of this algorithm is that it accepts + even solutions which lead to the increase of the cost in order + to escape from low quality local optimal solutions. + + This algorithm needs an initial solution. This solution can be + constructed by a simple greedy algorithm. At every iteration, it + selects thoughtfully a neighbor solution. + Consider $c(x)$ cost of current solution and $c(x')$ cost of + neighbor solution. + If $c(x') - c(x) <= threshold$ then the neighbor solution becomes the current + solution for the next iteration, where the threshold is named threshold. + + In comparison to the Simulated Annealing algorithm, the Threshold + Accepting algorithm does not accept very low quality solutions + (due to the presence of the threshold value). In the case of + Simulated Annealing, even a very low quality solution can + be accepted with probability $p$. + + Time complexity: + It has a running time $O(m * n * |V|)$ where $m$ and $n$ are the number + of times the outer and inner loop run respectively. + + For more information and how algorithm is inspired see: + https://doi.org/10.1016/0021-9991(90)90201-B + + See Also + -------- + simulated_annealing_tsp + + """ + if move == "1-1": + move = swap_two_nodes + elif move == "1-0": + move = move_one_node + if init_cycle == "greedy": + # Construct an initial solution using a greedy algorithm. + cycle = greedy_tsp(G, weight=weight, source=source) + if G.number_of_nodes() == 2: + return cycle + + else: + cycle = list(init_cycle) + if source is None: + source = cycle[0] + elif source != cycle[0]: + raise nx.NetworkXError("source must be first node in init_cycle") + if cycle[0] != cycle[-1]: + raise nx.NetworkXError("init_cycle must be a cycle. (return to start)") + + if len(cycle) - 1 != len(G) or len(set(G.nbunch_iter(cycle))) != len(G): + raise nx.NetworkXError("init_cycle is not all and only nodes.") + + # Check that G is a complete graph + N = len(G) - 1 + # This check ignores selfloops which is what we want here. + if any(len(nbrdict) - (n in nbrdict) != N for n, nbrdict in G.adj.items()): + raise nx.NetworkXError("G must be a complete graph.") + + if G.number_of_nodes() == 2: + neighbor = list(G.neighbors(source))[0] + return [source, neighbor, source] + + # Find the cost of initial solution + cost = sum(G[u][v].get(weight, 1) for u, v in pairwise(cycle)) + + count = 0 + best_cycle = cycle.copy() + best_cost = cost + while count <= max_iterations: + count += 1 + accepted = False + for i in range(N_inner): + adj_sol = move(cycle, seed) + adj_cost = sum(G[u][v].get(weight, 1) for u, v in pairwise(adj_sol)) + delta = adj_cost - cost + if delta <= threshold: + accepted = True + + # Set current solution the adjacent solution. + cycle = adj_sol + cost = adj_cost + + if cost < best_cost: + count = 0 + best_cycle = cycle.copy() + best_cost = cost + if accepted: + threshold -= threshold * alpha + + return best_cycle diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/treewidth.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/treewidth.py new file mode 100644 index 0000000..ef1b2f7 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/treewidth.py @@ -0,0 +1,255 @@ +"""Functions for computing treewidth decomposition. + +Treewidth of an undirected graph is a number associated with the graph. +It can be defined as the size of the largest vertex set (bag) in a tree +decomposition of the graph minus one. + +`Wikipedia: Treewidth `_ + +The notions of treewidth and tree decomposition have gained their +attractiveness partly because many graph and network problems that are +intractable (e.g., NP-hard) on arbitrary graphs become efficiently +solvable (e.g., with a linear time algorithm) when the treewidth of the +input graphs is bounded by a constant [1]_ [2]_. + +There are two different functions for computing a tree decomposition: +:func:`treewidth_min_degree` and :func:`treewidth_min_fill_in`. + +.. [1] Hans L. Bodlaender and Arie M. C. A. Koster. 2010. "Treewidth + computations I.Upper bounds". Inf. Comput. 208, 3 (March 2010),259-275. + http://dx.doi.org/10.1016/j.ic.2009.03.008 + +.. [2] Hans L. Bodlaender. "Discovering Treewidth". Institute of Information + and Computing Sciences, Utrecht University. + Technical Report UU-CS-2005-018. + http://www.cs.uu.nl + +.. [3] K. Wang, Z. Lu, and J. Hicks *Treewidth*. + https://web.archive.org/web/20210507025929/http://web.eecs.utk.edu/~cphill25/cs594_spring2015_projects/treewidth.pdf + +""" + +import itertools +import sys +from heapq import heapify, heappop, heappush + +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = ["treewidth_min_degree", "treewidth_min_fill_in"] + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatchable(returns_graph=True) +def treewidth_min_degree(G): + """Returns a treewidth decomposition using the Minimum Degree heuristic. + + The heuristic chooses the nodes according to their degree, i.e., first + the node with the lowest degree is chosen, then the graph is updated + and the corresponding node is removed. Next, a new node with the lowest + degree is chosen, and so on. + + Parameters + ---------- + G : NetworkX graph + + Returns + ------- + Treewidth decomposition : (int, Graph) tuple + 2-tuple with treewidth and the corresponding decomposed tree. + """ + deg_heuristic = MinDegreeHeuristic(G) + return treewidth_decomp(G, lambda graph: deg_heuristic.best_node(graph)) + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatchable(returns_graph=True) +def treewidth_min_fill_in(G): + """Returns a treewidth decomposition using the Minimum Fill-in heuristic. + + The heuristic chooses a node from the graph, where the number of edges + added turning the neighborhood of the chosen node into clique is as + small as possible. + + Parameters + ---------- + G : NetworkX graph + + Returns + ------- + Treewidth decomposition : (int, Graph) tuple + 2-tuple with treewidth and the corresponding decomposed tree. + """ + return treewidth_decomp(G, min_fill_in_heuristic) + + +class MinDegreeHeuristic: + """Implements the Minimum Degree heuristic. + + The heuristic chooses the nodes according to their degree + (number of neighbors), i.e., first the node with the lowest degree is + chosen, then the graph is updated and the corresponding node is + removed. Next, a new node with the lowest degree is chosen, and so on. + """ + + def __init__(self, graph): + self._graph = graph + + # nodes that have to be updated in the heap before each iteration + self._update_nodes = [] + + self._degreeq = [] # a heapq with 3-tuples (degree,unique_id,node) + self.count = itertools.count() + + # build heap with initial degrees + for n in graph: + self._degreeq.append((len(graph[n]), next(self.count), n)) + heapify(self._degreeq) + + def best_node(self, graph): + # update nodes in self._update_nodes + for n in self._update_nodes: + # insert changed degrees into degreeq + heappush(self._degreeq, (len(graph[n]), next(self.count), n)) + + # get the next valid (minimum degree) node + while self._degreeq: + (min_degree, _, elim_node) = heappop(self._degreeq) + if elim_node not in graph or len(graph[elim_node]) != min_degree: + # outdated entry in degreeq + continue + elif min_degree == len(graph) - 1: + # fully connected: abort condition + return None + + # remember to update nodes in the heap before getting the next node + self._update_nodes = graph[elim_node] + return elim_node + + # the heap is empty: abort + return None + + +def min_fill_in_heuristic(graph_dict): + """Implements the Minimum Degree heuristic. + + graph_dict: dict keyed by node to sets of neighbors (no self-loops) + + Returns the node from the graph, where the number of edges added when + turning the neighborhood of the chosen node into clique is as small as + possible. This algorithm chooses the nodes using the Minimum Fill-In + heuristic. The running time of the algorithm is :math:`O(V^3)` and it uses + additional constant memory. + """ + + if len(graph_dict) == 0: + return None + + min_fill_in_node = None + + min_fill_in = sys.maxsize + + # sort nodes by degree + nodes_by_degree = sorted(graph_dict, key=lambda x: len(graph_dict[x])) + min_degree = len(graph_dict[nodes_by_degree[0]]) + + # abort condition (handle complete graph) + if min_degree == len(graph_dict) - 1: + return None + + for node in nodes_by_degree: + num_fill_in = 0 + nbrs = graph_dict[node] + for nbr in nbrs: + # count how many nodes in nbrs current nbr is not connected to + # subtract 1 for the node itself + num_fill_in += len(nbrs - graph_dict[nbr]) - 1 + if num_fill_in >= 2 * min_fill_in: + break + + num_fill_in /= 2 # divide by 2 because of double counting + + if num_fill_in < min_fill_in: # update min-fill-in node + if num_fill_in == 0: + return node + min_fill_in = num_fill_in + min_fill_in_node = node + + return min_fill_in_node + + +@nx._dispatchable(returns_graph=True) +def treewidth_decomp(G, heuristic=min_fill_in_heuristic): + """Returns a treewidth decomposition using the passed heuristic. + + Parameters + ---------- + G : NetworkX graph + heuristic : heuristic function + + Returns + ------- + Treewidth decomposition : (int, Graph) tuple + 2-tuple with treewidth and the corresponding decomposed tree. + """ + + # make dict-of-sets structure + graph_dict = {n: set(G[n]) - {n} for n in G} + + # stack containing nodes and neighbors in the order from the heuristic + node_stack = [] + + # get first node from heuristic + elim_node = heuristic(graph_dict) + while elim_node is not None: + # connect all neighbors with each other + nbrs = graph_dict[elim_node] + for u, v in itertools.permutations(nbrs, 2): + if v not in graph_dict[u]: + graph_dict[u].add(v) + + # push node and its current neighbors on stack + node_stack.append((elim_node, nbrs)) + + # remove node from graph_dict + for u in graph_dict[elim_node]: + graph_dict[u].remove(elim_node) + + del graph_dict[elim_node] + elim_node = heuristic(graph_dict) + + # the abort condition is met; put all remaining nodes into one bag + decomp = nx.Graph() + first_bag = frozenset(graph_dict.keys()) + decomp.add_node(first_bag) + + treewidth = len(first_bag) - 1 + + while node_stack: + # get node and its neighbors from the stack + (curr_node, nbrs) = node_stack.pop() + + # find a bag all neighbors are in + old_bag = None + for bag in decomp.nodes: + if nbrs <= bag: + old_bag = bag + break + + if old_bag is None: + # no old_bag was found: just connect to the first_bag + old_bag = first_bag + + # create new node for decomposition + nbrs.add(curr_node) + new_bag = frozenset(nbrs) + + # update treewidth + treewidth = max(treewidth, len(new_bag) - 1) + + # add edge to decomposition (implicitly also adds the new node) + decomp.add_edge(old_bag, new_bag) + + return treewidth, decomp diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/vertex_cover.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/vertex_cover.py new file mode 100644 index 0000000..13d7167 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/approximation/vertex_cover.py @@ -0,0 +1,83 @@ +"""Functions for computing an approximate minimum weight vertex cover. + +A |vertex cover|_ is a subset of nodes such that each edge in the graph +is incident to at least one node in the subset. + +.. _vertex cover: https://en.wikipedia.org/wiki/Vertex_cover +.. |vertex cover| replace:: *vertex cover* + +""" + +import networkx as nx + +__all__ = ["min_weighted_vertex_cover"] + + +@nx._dispatchable(node_attrs="weight") +def min_weighted_vertex_cover(G, weight=None): + r"""Returns an approximate minimum weighted vertex cover. + + The set of nodes returned by this function is guaranteed to be a + vertex cover, and the total weight of the set is guaranteed to be at + most twice the total weight of the minimum weight vertex cover. In + other words, + + .. math:: + + w(S) \leq 2 * w(S^*), + + where $S$ is the vertex cover returned by this function, + $S^*$ is the vertex cover of minimum weight out of all vertex + covers of the graph, and $w$ is the function that computes the + sum of the weights of each node in that given set. + + Parameters + ---------- + G : NetworkX graph + + weight : string, optional (default = None) + If None, every node has weight 1. If a string, use this node + attribute as the node weight. A node without this attribute is + assumed to have weight 1. + + Returns + ------- + min_weighted_cover : set + Returns a set of nodes whose weight sum is no more than twice + the weight sum of the minimum weight vertex cover. + + Notes + ----- + For a directed graph, a vertex cover has the same definition: a set + of nodes such that each edge in the graph is incident to at least + one node in the set. Whether the node is the head or tail of the + directed edge is ignored. + + This is the local-ratio algorithm for computing an approximate + vertex cover. The algorithm greedily reduces the costs over edges, + iteratively building a cover. The worst-case runtime of this + implementation is $O(m \log n)$, where $n$ is the number + of nodes and $m$ the number of edges in the graph. + + References + ---------- + .. [1] Bar-Yehuda, R., and Even, S. (1985). "A local-ratio theorem for + approximating the weighted vertex cover problem." + *Annals of Discrete Mathematics*, 25, 27–46 + + + """ + cost = dict(G.nodes(data=weight, default=1)) + # While there are uncovered edges, choose an uncovered and update + # the cost of the remaining edges. + cover = set() + for u, v in G.edges(): + if u in cover or v in cover: + continue + if cost[u] <= cost[v]: + cover.add(u) + cost[v] -= cost[u] + else: + cover.add(v) + cost[u] -= cost[v] + return cover diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/__init__.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/__init__.py new file mode 100644 index 0000000..4d98886 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/__init__.py @@ -0,0 +1,5 @@ +from networkx.algorithms.assortativity.connectivity import * +from networkx.algorithms.assortativity.correlation import * +from networkx.algorithms.assortativity.mixing import * +from networkx.algorithms.assortativity.neighbor_degree import * +from networkx.algorithms.assortativity.pairs import * diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..2dfc570 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/__pycache__/connectivity.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/__pycache__/connectivity.cpython-312.pyc new file mode 100644 index 0000000..b4324e3 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/__pycache__/connectivity.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/__pycache__/correlation.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/__pycache__/correlation.cpython-312.pyc new file mode 100644 index 0000000..9c71a36 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/__pycache__/correlation.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/__pycache__/mixing.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/__pycache__/mixing.cpython-312.pyc new file mode 100644 index 0000000..ed22d16 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/__pycache__/mixing.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/__pycache__/neighbor_degree.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/__pycache__/neighbor_degree.cpython-312.pyc new file mode 100644 index 0000000..57534f9 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/__pycache__/neighbor_degree.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/__pycache__/pairs.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/__pycache__/pairs.cpython-312.pyc new file mode 100644 index 0000000..b00d7ab Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/__pycache__/pairs.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/connectivity.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/connectivity.py new file mode 100644 index 0000000..c3fde0d --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/connectivity.py @@ -0,0 +1,122 @@ +from collections import defaultdict + +import networkx as nx + +__all__ = ["average_degree_connectivity"] + + +@nx._dispatchable(edge_attrs="weight") +def average_degree_connectivity( + G, source="in+out", target="in+out", nodes=None, weight=None +): + r"""Compute the average degree connectivity of graph. + + The average degree connectivity is the average nearest neighbor degree of + nodes with degree k. For weighted graphs, an analogous measure can + be computed using the weighted average neighbors degree defined in + [1]_, for a node `i`, as + + .. math:: + + k_{nn,i}^{w} = \frac{1}{s_i} \sum_{j \in N(i)} w_{ij} k_j + + where `s_i` is the weighted degree of node `i`, + `w_{ij}` is the weight of the edge that links `i` and `j`, + and `N(i)` are the neighbors of node `i`. + + Parameters + ---------- + G : NetworkX graph + + source : "in"|"out"|"in+out" (default:"in+out") + Directed graphs only. Use "in"- or "out"-degree for source node. + + target : "in"|"out"|"in+out" (default:"in+out" + Directed graphs only. Use "in"- or "out"-degree for target node. + + nodes : list or iterable (optional) + Compute neighbor connectivity for these nodes. The default is all + nodes. + + weight : string or None, optional (default=None) + The edge attribute that holds the numerical value used as a weight. + If None, then each edge has weight 1. + + Returns + ------- + d : dict + A dictionary keyed by degree k with the value of average connectivity. + + Raises + ------ + NetworkXError + If either `source` or `target` are not one of 'in', + 'out', or 'in+out'. + If either `source` or `target` is passed for an undirected graph. + + Examples + -------- + >>> G = nx.path_graph(4) + >>> G.edges[1, 2]["weight"] = 3 + >>> nx.average_degree_connectivity(G) + {1: 2.0, 2: 1.5} + >>> nx.average_degree_connectivity(G, weight="weight") + {1: 2.0, 2: 1.75} + + See Also + -------- + average_neighbor_degree + + References + ---------- + .. [1] A. Barrat, M. Barthélemy, R. Pastor-Satorras, and A. Vespignani, + "The architecture of complex weighted networks". + PNAS 101 (11): 3747–3752 (2004). + """ + # First, determine the type of neighbors and the type of degree to use. + if G.is_directed(): + if source not in ("in", "out", "in+out"): + raise nx.NetworkXError('source must be one of "in", "out", or "in+out"') + if target not in ("in", "out", "in+out"): + raise nx.NetworkXError('target must be one of "in", "out", or "in+out"') + direction = {"out": G.out_degree, "in": G.in_degree, "in+out": G.degree} + neighbor_funcs = { + "out": G.successors, + "in": G.predecessors, + "in+out": G.neighbors, + } + source_degree = direction[source] + target_degree = direction[target] + neighbors = neighbor_funcs[source] + # `reverse` indicates whether to look at the in-edge when + # computing the weight of an edge. + reverse = source == "in" + else: + if source != "in+out" or target != "in+out": + raise nx.NetworkXError( + f"source and target arguments are only supported for directed graphs" + ) + source_degree = G.degree + target_degree = G.degree + neighbors = G.neighbors + reverse = False + dsum = defaultdict(int) + dnorm = defaultdict(int) + # Check if `source_nodes` is actually a single node in the graph. + source_nodes = source_degree(nodes) + if nodes in G: + source_nodes = [(nodes, source_degree(nodes))] + for n, k in source_nodes: + nbrdeg = target_degree(neighbors(n)) + if weight is None: + s = sum(d for n, d in nbrdeg) + else: # weight nbr degree by weight of (n,nbr) edge + if reverse: + s = sum(G[nbr][n].get(weight, 1) * d for nbr, d in nbrdeg) + else: + s = sum(G[n][nbr].get(weight, 1) * d for nbr, d in nbrdeg) + dnorm[k] += source_degree(n, weight=weight) + dsum[k] += s + + # normalize + return {k: avg if dnorm[k] == 0 else avg / dnorm[k] for k, avg in dsum.items()} diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/correlation.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/correlation.py new file mode 100644 index 0000000..52ae7a1 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/correlation.py @@ -0,0 +1,302 @@ +"""Node assortativity coefficients and correlation measures.""" + +import networkx as nx +from networkx.algorithms.assortativity.mixing import ( + attribute_mixing_matrix, + degree_mixing_matrix, +) +from networkx.algorithms.assortativity.pairs import node_degree_xy + +__all__ = [ + "degree_pearson_correlation_coefficient", + "degree_assortativity_coefficient", + "attribute_assortativity_coefficient", + "numeric_assortativity_coefficient", +] + + +@nx._dispatchable(edge_attrs="weight") +def degree_assortativity_coefficient(G, x="out", y="in", weight=None, nodes=None): + """Compute degree assortativity of graph. + + Assortativity measures the similarity of connections + in the graph with respect to the node degree. + + Parameters + ---------- + G : NetworkX graph + + x: string ('in','out') + The degree type for source node (directed graphs only). + + y: string ('in','out') + The degree type for target node (directed graphs only). + + weight: string or None, optional (default=None) + The edge attribute that holds the numerical value used + as a weight. If None, then each edge has weight 1. + The degree is the sum of the edge weights adjacent to the node. + + nodes: list or iterable (optional) + Compute degree assortativity only for nodes in container. + The default is all nodes. + + Returns + ------- + r : float + Assortativity of graph by degree. + + Examples + -------- + >>> G = nx.path_graph(4) + >>> r = nx.degree_assortativity_coefficient(G) + >>> print(f"{r:3.1f}") + -0.5 + + See Also + -------- + attribute_assortativity_coefficient + numeric_assortativity_coefficient + degree_mixing_dict + degree_mixing_matrix + + Notes + ----- + This computes Eq. (21) in Ref. [1]_ , where e is the joint + probability distribution (mixing matrix) of the degrees. If G is + directed than the matrix e is the joint probability of the + user-specified degree type for the source and target. + + References + ---------- + .. [1] M. E. J. Newman, Mixing patterns in networks, + Physical Review E, 67 026126, 2003 + .. [2] Foster, J.G., Foster, D.V., Grassberger, P. & Paczuski, M. + Edge direction and the structure of networks, PNAS 107, 10815-20 (2010). + """ + if nodes is None: + nodes = G.nodes + + degrees = None + + if G.is_directed(): + indeg = ( + {d for _, d in G.in_degree(nodes, weight=weight)} + if "in" in (x, y) + else set() + ) + outdeg = ( + {d for _, d in G.out_degree(nodes, weight=weight)} + if "out" in (x, y) + else set() + ) + degrees = set.union(indeg, outdeg) + else: + degrees = {d for _, d in G.degree(nodes, weight=weight)} + + mapping = {d: i for i, d in enumerate(degrees)} + M = degree_mixing_matrix(G, x=x, y=y, nodes=nodes, weight=weight, mapping=mapping) + + return _numeric_ac(M, mapping=mapping) + + +@nx._dispatchable(edge_attrs="weight") +def degree_pearson_correlation_coefficient(G, x="out", y="in", weight=None, nodes=None): + """Compute degree assortativity of graph. + + Assortativity measures the similarity of connections + in the graph with respect to the node degree. + + This is the same as degree_assortativity_coefficient but uses the + potentially faster scipy.stats.pearsonr function. + + Parameters + ---------- + G : NetworkX graph + + x: string ('in','out') + The degree type for source node (directed graphs only). + + y: string ('in','out') + The degree type for target node (directed graphs only). + + weight: string or None, optional (default=None) + The edge attribute that holds the numerical value used + as a weight. If None, then each edge has weight 1. + The degree is the sum of the edge weights adjacent to the node. + + nodes: list or iterable (optional) + Compute pearson correlation of degrees only for specified nodes. + The default is all nodes. + + Returns + ------- + r : float + Assortativity of graph by degree. + + Examples + -------- + >>> G = nx.path_graph(4) + >>> r = nx.degree_pearson_correlation_coefficient(G) + >>> print(f"{r:3.1f}") + -0.5 + + Notes + ----- + This calls scipy.stats.pearsonr. + + References + ---------- + .. [1] M. E. J. Newman, Mixing patterns in networks + Physical Review E, 67 026126, 2003 + .. [2] Foster, J.G., Foster, D.V., Grassberger, P. & Paczuski, M. + Edge direction and the structure of networks, PNAS 107, 10815-20 (2010). + """ + import scipy as sp + + xy = node_degree_xy(G, x=x, y=y, nodes=nodes, weight=weight) + x, y = zip(*xy) + return float(sp.stats.pearsonr(x, y)[0]) + + +@nx._dispatchable(node_attrs="attribute") +def attribute_assortativity_coefficient(G, attribute, nodes=None): + """Compute assortativity for node attributes. + + Assortativity measures the similarity of connections + in the graph with respect to the given attribute. + + Parameters + ---------- + G : NetworkX graph + + attribute : string + Node attribute key + + nodes: list or iterable (optional) + Compute attribute assortativity for nodes in container. + The default is all nodes. + + Returns + ------- + r: float + Assortativity of graph for given attribute + + Examples + -------- + >>> G = nx.Graph() + >>> G.add_nodes_from([0, 1], color="red") + >>> G.add_nodes_from([2, 3], color="blue") + >>> G.add_edges_from([(0, 1), (2, 3)]) + >>> print(nx.attribute_assortativity_coefficient(G, "color")) + 1.0 + + Notes + ----- + This computes Eq. (2) in Ref. [1]_ , (trace(M)-sum(M^2))/(1-sum(M^2)), + where M is the joint probability distribution (mixing matrix) + of the specified attribute. + + References + ---------- + .. [1] M. E. J. Newman, Mixing patterns in networks, + Physical Review E, 67 026126, 2003 + """ + M = attribute_mixing_matrix(G, attribute, nodes) + return attribute_ac(M) + + +@nx._dispatchable(node_attrs="attribute") +def numeric_assortativity_coefficient(G, attribute, nodes=None): + """Compute assortativity for numerical node attributes. + + Assortativity measures the similarity of connections + in the graph with respect to the given numeric attribute. + + Parameters + ---------- + G : NetworkX graph + + attribute : string + Node attribute key. + + nodes: list or iterable (optional) + Compute numeric assortativity only for attributes of nodes in + container. The default is all nodes. + + Returns + ------- + r: float + Assortativity of graph for given attribute + + Examples + -------- + >>> G = nx.Graph() + >>> G.add_nodes_from([0, 1], size=2) + >>> G.add_nodes_from([2, 3], size=3) + >>> G.add_edges_from([(0, 1), (2, 3)]) + >>> print(nx.numeric_assortativity_coefficient(G, "size")) + 1.0 + + Notes + ----- + This computes Eq. (21) in Ref. [1]_ , which is the Pearson correlation + coefficient of the specified (scalar valued) attribute across edges. + + References + ---------- + .. [1] M. E. J. Newman, Mixing patterns in networks + Physical Review E, 67 026126, 2003 + """ + if nodes is None: + nodes = G.nodes + vals = {G.nodes[n][attribute] for n in nodes} + mapping = {d: i for i, d in enumerate(vals)} + M = attribute_mixing_matrix(G, attribute, nodes, mapping) + return _numeric_ac(M, mapping) + + +def attribute_ac(M): + """Compute assortativity for attribute matrix M. + + Parameters + ---------- + M : numpy.ndarray + 2D ndarray representing the attribute mixing matrix. + + Notes + ----- + This computes Eq. (2) in Ref. [1]_ , (trace(e)-sum(e^2))/(1-sum(e^2)), + where e is the joint probability distribution (mixing matrix) + of the specified attribute. + + References + ---------- + .. [1] M. E. J. Newman, Mixing patterns in networks, + Physical Review E, 67 026126, 2003 + """ + if M.sum() != 1.0: + M = M / M.sum() + s = (M @ M).sum() + t = M.trace() + r = (t - s) / (1 - s) + return float(r) + + +def _numeric_ac(M, mapping): + # M is a 2D numpy array + # numeric assortativity coefficient, pearsonr + import numpy as np + + if M.sum() != 1.0: + M = M / M.sum() + x = np.array(list(mapping.keys())) + y = x # x and y have the same support + idx = list(mapping.values()) + a = M.sum(axis=0) + b = M.sum(axis=1) + vara = (a[idx] * x**2).sum() - ((a[idx] * x).sum()) ** 2 + varb = (b[idx] * y**2).sum() - ((b[idx] * y).sum()) ** 2 + xy = np.outer(x, y) + ab = np.outer(a[idx], b[idx]) + return float((xy * (M - ab)).sum() / np.sqrt(vara * varb)) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/mixing.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/mixing.py new file mode 100644 index 0000000..1762d4e --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/mixing.py @@ -0,0 +1,255 @@ +""" +Mixing matrices for node attributes and degree. +""" + +import networkx as nx +from networkx.algorithms.assortativity.pairs import node_attribute_xy, node_degree_xy +from networkx.utils import dict_to_numpy_array + +__all__ = [ + "attribute_mixing_matrix", + "attribute_mixing_dict", + "degree_mixing_matrix", + "degree_mixing_dict", + "mixing_dict", +] + + +@nx._dispatchable(node_attrs="attribute") +def attribute_mixing_dict(G, attribute, nodes=None, normalized=False): + """Returns dictionary representation of mixing matrix for attribute. + + Parameters + ---------- + G : graph + NetworkX graph object. + + attribute : string + Node attribute key. + + nodes: list or iterable (optional) + Unse nodes in container to build the dict. The default is all nodes. + + normalized : bool (default=False) + Return counts if False or probabilities if True. + + Examples + -------- + >>> G = nx.Graph() + >>> G.add_nodes_from([0, 1], color="red") + >>> G.add_nodes_from([2, 3], color="blue") + >>> G.add_edge(1, 3) + >>> d = nx.attribute_mixing_dict(G, "color") + >>> print(d["red"]["blue"]) + 1 + >>> print(d["blue"]["red"]) # d symmetric for undirected graphs + 1 + + Returns + ------- + d : dictionary + Counts or joint probability of occurrence of attribute pairs. + """ + xy_iter = node_attribute_xy(G, attribute, nodes) + return mixing_dict(xy_iter, normalized=normalized) + + +@nx._dispatchable(node_attrs="attribute") +def attribute_mixing_matrix(G, attribute, nodes=None, mapping=None, normalized=True): + """Returns mixing matrix for attribute. + + Parameters + ---------- + G : graph + NetworkX graph object. + + attribute : string + Node attribute key. + + nodes: list or iterable (optional) + Use only nodes in container to build the matrix. The default is + all nodes. + + mapping : dictionary, optional + Mapping from node attribute to integer index in matrix. + If not specified, an arbitrary ordering will be used. + + normalized : bool (default=True) + Return counts if False or probabilities if True. + + Returns + ------- + m: numpy array + Counts or joint probability of occurrence of attribute pairs. + + Notes + ----- + If each node has a unique attribute value, the unnormalized mixing matrix + will be equal to the adjacency matrix. To get a denser mixing matrix, + the rounding can be performed to form groups of nodes with equal values. + For example, the exact height of persons in cm (180.79155222, 163.9080892, + 163.30095355, 167.99016217, 168.21590163, ...) can be rounded to (180, 163, + 163, 168, 168, ...). + + Definitions of attribute mixing matrix vary on whether the matrix + should include rows for attribute values that don't arise. Here we + do not include such empty-rows. But you can force them to appear + by inputting a `mapping` that includes those values. + + Examples + -------- + >>> G = nx.path_graph(3) + >>> gender = {0: "male", 1: "female", 2: "female"} + >>> nx.set_node_attributes(G, gender, "gender") + >>> mapping = {"male": 0, "female": 1} + >>> mix_mat = nx.attribute_mixing_matrix(G, "gender", mapping=mapping) + >>> mix_mat + array([[0. , 0.25], + [0.25, 0.5 ]]) + """ + d = attribute_mixing_dict(G, attribute, nodes) + a = dict_to_numpy_array(d, mapping=mapping) + if normalized: + a = a / a.sum() + return a + + +@nx._dispatchable(edge_attrs="weight") +def degree_mixing_dict(G, x="out", y="in", weight=None, nodes=None, normalized=False): + """Returns dictionary representation of mixing matrix for degree. + + Parameters + ---------- + G : graph + NetworkX graph object. + + x: string ('in','out') + The degree type for source node (directed graphs only). + + y: string ('in','out') + The degree type for target node (directed graphs only). + + weight: string or None, optional (default=None) + The edge attribute that holds the numerical value used + as a weight. If None, then each edge has weight 1. + The degree is the sum of the edge weights adjacent to the node. + + normalized : bool (default=False) + Return counts if False or probabilities if True. + + Returns + ------- + d: dictionary + Counts or joint probability of occurrence of degree pairs. + """ + xy_iter = node_degree_xy(G, x=x, y=y, nodes=nodes, weight=weight) + return mixing_dict(xy_iter, normalized=normalized) + + +@nx._dispatchable(edge_attrs="weight") +def degree_mixing_matrix( + G, x="out", y="in", weight=None, nodes=None, normalized=True, mapping=None +): + """Returns mixing matrix for attribute. + + Parameters + ---------- + G : graph + NetworkX graph object. + + x: string ('in','out') + The degree type for source node (directed graphs only). + + y: string ('in','out') + The degree type for target node (directed graphs only). + + nodes: list or iterable (optional) + Build the matrix using only nodes in container. + The default is all nodes. + + weight: string or None, optional (default=None) + The edge attribute that holds the numerical value used + as a weight. If None, then each edge has weight 1. + The degree is the sum of the edge weights adjacent to the node. + + normalized : bool (default=True) + Return counts if False or probabilities if True. + + mapping : dictionary, optional + Mapping from node degree to integer index in matrix. + If not specified, an arbitrary ordering will be used. + + Returns + ------- + m: numpy array + Counts, or joint probability, of occurrence of node degree. + + Notes + ----- + Definitions of degree mixing matrix vary on whether the matrix + should include rows for degree values that don't arise. Here we + do not include such empty-rows. But you can force them to appear + by inputting a `mapping` that includes those values. See examples. + + Examples + -------- + >>> G = nx.star_graph(3) + >>> mix_mat = nx.degree_mixing_matrix(G) + >>> mix_mat + array([[0. , 0.5], + [0.5, 0. ]]) + + If you want every possible degree to appear as a row, even if no nodes + have that degree, use `mapping` as follows, + + >>> max_degree = max(deg for n, deg in G.degree) + >>> mapping = {x: x for x in range(max_degree + 1)} # identity mapping + >>> mix_mat = nx.degree_mixing_matrix(G, mapping=mapping) + >>> mix_mat + array([[0. , 0. , 0. , 0. ], + [0. , 0. , 0. , 0.5], + [0. , 0. , 0. , 0. ], + [0. , 0.5, 0. , 0. ]]) + """ + d = degree_mixing_dict(G, x=x, y=y, nodes=nodes, weight=weight) + a = dict_to_numpy_array(d, mapping=mapping) + if normalized: + a = a / a.sum() + return a + + +def mixing_dict(xy, normalized=False): + """Returns a dictionary representation of mixing matrix. + + Parameters + ---------- + xy : list or container of two-tuples + Pairs of (x,y) items. + + attribute : string + Node attribute key + + normalized : bool (default=False) + Return counts if False or probabilities if True. + + Returns + ------- + d: dictionary + Counts or Joint probability of occurrence of values in xy. + """ + d = {} + psum = 0.0 + for x, y in xy: + if x not in d: + d[x] = {} + if y not in d: + d[y] = {} + v = d[x].get(y, 0) + d[x][y] = v + 1 + psum += 1 + + if normalized: + for _, jdict in d.items(): + for j in jdict: + jdict[j] /= psum + return d diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/neighbor_degree.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/neighbor_degree.py new file mode 100644 index 0000000..6488d04 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/neighbor_degree.py @@ -0,0 +1,160 @@ +import networkx as nx + +__all__ = ["average_neighbor_degree"] + + +@nx._dispatchable(edge_attrs="weight") +def average_neighbor_degree(G, source="out", target="out", nodes=None, weight=None): + r"""Returns the average degree of the neighborhood of each node. + + In an undirected graph, the neighborhood `N(i)` of node `i` contains the + nodes that are connected to `i` by an edge. + + For directed graphs, `N(i)` is defined according to the parameter `source`: + + - if source is 'in', then `N(i)` consists of predecessors of node `i`. + - if source is 'out', then `N(i)` consists of successors of node `i`. + - if source is 'in+out', then `N(i)` is both predecessors and successors. + + The average neighborhood degree of a node `i` is + + .. math:: + + k_{nn,i} = \frac{1}{|N(i)|} \sum_{j \in N(i)} k_j + + where `N(i)` are the neighbors of node `i` and `k_j` is + the degree of node `j` which belongs to `N(i)`. For weighted + graphs, an analogous measure can be defined [1]_, + + .. math:: + + k_{nn,i}^{w} = \frac{1}{s_i} \sum_{j \in N(i)} w_{ij} k_j + + where `s_i` is the weighted degree of node `i`, `w_{ij}` + is the weight of the edge that links `i` and `j` and + `N(i)` are the neighbors of node `i`. + + + Parameters + ---------- + G : NetworkX graph + + source : string ("in"|"out"|"in+out"), optional (default="out") + Directed graphs only. + Use "in"- or "out"-neighbors of source node. + + target : string ("in"|"out"|"in+out"), optional (default="out") + Directed graphs only. + Use "in"- or "out"-degree for target node. + + nodes : list or iterable, optional (default=G.nodes) + Compute neighbor degree only for specified nodes. + + weight : string or None, optional (default=None) + The edge attribute that holds the numerical value used as a weight. + If None, then each edge has weight 1. + + Returns + ------- + d: dict + A dictionary keyed by node to the average degree of its neighbors. + + Raises + ------ + NetworkXError + If either `source` or `target` are not one of 'in', 'out', or 'in+out'. + If either `source` or `target` is passed for an undirected graph. + + Examples + -------- + >>> G = nx.path_graph(4) + >>> G.edges[0, 1]["weight"] = 5 + >>> G.edges[2, 3]["weight"] = 3 + + >>> nx.average_neighbor_degree(G) + {0: 2.0, 1: 1.5, 2: 1.5, 3: 2.0} + >>> nx.average_neighbor_degree(G, weight="weight") + {0: 2.0, 1: 1.1666666666666667, 2: 1.25, 3: 2.0} + + >>> G = nx.DiGraph() + >>> nx.add_path(G, [0, 1, 2, 3]) + >>> nx.average_neighbor_degree(G, source="in", target="in") + {0: 0.0, 1: 0.0, 2: 1.0, 3: 1.0} + + >>> nx.average_neighbor_degree(G, source="out", target="out") + {0: 1.0, 1: 1.0, 2: 0.0, 3: 0.0} + + See Also + -------- + average_degree_connectivity + + References + ---------- + .. [1] A. Barrat, M. Barthélemy, R. Pastor-Satorras, and A. Vespignani, + "The architecture of complex weighted networks". + PNAS 101 (11): 3747–3752 (2004). + """ + if G.is_directed(): + if source == "in": + source_degree = G.in_degree + elif source == "out": + source_degree = G.out_degree + elif source == "in+out": + source_degree = G.degree + else: + raise nx.NetworkXError( + f"source argument {source} must be 'in', 'out' or 'in+out'" + ) + + if target == "in": + target_degree = G.in_degree + elif target == "out": + target_degree = G.out_degree + elif target == "in+out": + target_degree = G.degree + else: + raise nx.NetworkXError( + f"target argument {target} must be 'in', 'out' or 'in+out'" + ) + else: + if source != "out" or target != "out": + raise nx.NetworkXError( + f"source and target arguments are only supported for directed graphs" + ) + source_degree = target_degree = G.degree + + # precompute target degrees -- should *not* be weighted degree + t_deg = dict(target_degree()) + + # Set up both predecessor and successor neighbor dicts leaving empty if not needed + G_P = G_S = {n: {} for n in G} + if G.is_directed(): + # "in" or "in+out" cases: G_P contains predecessors + if "in" in source: + G_P = G.pred + # "out" or "in+out" cases: G_S contains successors + if "out" in source: + G_S = G.succ + else: + # undirected leave G_P empty but G_S is the adjacency + G_S = G.adj + + # Main loop: Compute average degree of neighbors + avg = {} + for n, deg in source_degree(nodes, weight=weight): + # handle degree zero average + if deg == 0: + avg[n] = 0.0 + continue + + # we sum over both G_P and G_S, but one of the two is usually empty. + if weight is None: + avg[n] = ( + sum(t_deg[nbr] for nbr in G_S[n]) + sum(t_deg[nbr] for nbr in G_P[n]) + ) / deg + else: + avg[n] = ( + sum(dd.get(weight, 1) * t_deg[nbr] for nbr, dd in G_S[n].items()) + + sum(dd.get(weight, 1) * t_deg[nbr] for nbr, dd in G_P[n].items()) + ) / deg + return avg diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/pairs.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/pairs.py new file mode 100644 index 0000000..ea5fd28 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/pairs.py @@ -0,0 +1,127 @@ +"""Generators of x-y pairs of node data.""" + +import networkx as nx + +__all__ = ["node_attribute_xy", "node_degree_xy"] + + +@nx._dispatchable(node_attrs="attribute") +def node_attribute_xy(G, attribute, nodes=None): + """Yields 2-tuples of node attribute values for all edges in `G`. + + This generator yields, for each edge in `G` incident to a node in `nodes`, + a 2-tuple of form ``(attribute value, attribute value)`` for the parameter + specified node-attribute. + + Parameters + ---------- + G: NetworkX graph + + attribute: key + The node attribute key. + + nodes: list or iterable (optional) + Use only edges that are incident to specified nodes. + The default is all nodes. + + Yields + ------ + (x, y): 2-tuple + Generates 2-tuple of (attribute, attribute) values. + + Examples + -------- + >>> G = nx.DiGraph() + >>> G.add_node(1, color="red") + >>> G.add_node(2, color="blue") + >>> G.add_node(3, color="green") + >>> G.add_edge(1, 2) + >>> list(nx.node_attribute_xy(G, "color")) + [('red', 'blue')] + + Notes + ----- + For undirected graphs, each edge is produced twice, once for each edge + representation (u, v) and (v, u), with the exception of self-loop edges + which only appear once. + """ + if nodes is None: + nodes = set(G) + else: + nodes = set(nodes) + Gnodes = G.nodes + for u, nbrsdict in G.adjacency(): + if u not in nodes: + continue + uattr = Gnodes[u].get(attribute, None) + if G.is_multigraph(): + for v, keys in nbrsdict.items(): + vattr = Gnodes[v].get(attribute, None) + for _ in keys: + yield (uattr, vattr) + else: + for v in nbrsdict: + vattr = Gnodes[v].get(attribute, None) + yield (uattr, vattr) + + +@nx._dispatchable(edge_attrs="weight") +def node_degree_xy(G, x="out", y="in", weight=None, nodes=None): + """Yields 2-tuples of ``(degree, degree)`` values for edges in `G`. + + This generator yields, for each edge in `G` incident to a node in `nodes`, + a 2-tuple of form ``(degree, degree)``. The node degrees are weighted + when a `weight` attribute is specified. + + Parameters + ---------- + G: NetworkX graph + + x: string ('in','out') + The degree type for source node (directed graphs only). + + y: string ('in','out') + The degree type for target node (directed graphs only). + + weight: string or None, optional (default=None) + The edge attribute that holds the numerical value used + as a weight. If None, then each edge has weight 1. + The degree is the sum of the edge weights adjacent to the node. + + nodes: list or iterable (optional) + Use only edges that are adjacency to specified nodes. + The default is all nodes. + + Yields + ------ + (x, y): 2-tuple + Generates 2-tuple of (degree, degree) values. + + Examples + -------- + >>> G = nx.DiGraph() + >>> G.add_edge(1, 2) + >>> list(nx.node_degree_xy(G, x="out", y="in")) + [(1, 1)] + >>> list(nx.node_degree_xy(G, x="in", y="out")) + [(0, 0)] + + Notes + ----- + For undirected graphs, each edge is produced twice, once for each edge + representation (u, v) and (v, u), with the exception of self-loop edges + which only appear once. + """ + nodes = set(G) if nodes is None else set(nodes) + if G.is_directed(): + direction = {"out": G.out_degree, "in": G.in_degree} + xdeg = direction[x] + ydeg = direction[y] + else: + xdeg = ydeg = G.degree + + for u, degu in xdeg(nodes, weight=weight): + # use G.edges to treat multigraphs correctly + neighbors = (nbr for _, nbr in G.edges(u) if nbr in nodes) + for _, degv in ydeg(neighbors, weight=weight): + yield degu, degv diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/tests/__init__.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/tests/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/tests/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..f3f3ec9 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/tests/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/tests/__pycache__/base_test.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/tests/__pycache__/base_test.cpython-312.pyc new file mode 100644 index 0000000..8e79b9b Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/tests/__pycache__/base_test.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/tests/__pycache__/test_connectivity.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/tests/__pycache__/test_connectivity.cpython-312.pyc new file mode 100644 index 0000000..dc8b7ef Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/tests/__pycache__/test_connectivity.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/tests/__pycache__/test_correlation.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/tests/__pycache__/test_correlation.cpython-312.pyc new file mode 100644 index 0000000..8686d50 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/tests/__pycache__/test_correlation.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/tests/__pycache__/test_mixing.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/tests/__pycache__/test_mixing.cpython-312.pyc new file mode 100644 index 0000000..9362482 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/tests/__pycache__/test_mixing.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/tests/__pycache__/test_neighbor_degree.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/tests/__pycache__/test_neighbor_degree.cpython-312.pyc new file mode 100644 index 0000000..416a845 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/tests/__pycache__/test_neighbor_degree.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/tests/__pycache__/test_pairs.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/tests/__pycache__/test_pairs.cpython-312.pyc new file mode 100644 index 0000000..c660a39 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/tests/__pycache__/test_pairs.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/tests/base_test.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/tests/base_test.py new file mode 100644 index 0000000..46d6300 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/tests/base_test.py @@ -0,0 +1,81 @@ +import networkx as nx + + +class BaseTestAttributeMixing: + @classmethod + def setup_class(cls): + G = nx.Graph() + G.add_nodes_from([0, 1], fish="one") + G.add_nodes_from([2, 3], fish="two") + G.add_nodes_from([4], fish="red") + G.add_nodes_from([5], fish="blue") + G.add_edges_from([(0, 1), (2, 3), (0, 4), (2, 5)]) + cls.G = G + + D = nx.DiGraph() + D.add_nodes_from([0, 1], fish="one") + D.add_nodes_from([2, 3], fish="two") + D.add_nodes_from([4], fish="red") + D.add_nodes_from([5], fish="blue") + D.add_edges_from([(0, 1), (2, 3), (0, 4), (2, 5)]) + cls.D = D + + M = nx.MultiGraph() + M.add_nodes_from([0, 1], fish="one") + M.add_nodes_from([2, 3], fish="two") + M.add_nodes_from([4], fish="red") + M.add_nodes_from([5], fish="blue") + M.add_edges_from([(0, 1), (0, 1), (2, 3)]) + cls.M = M + + S = nx.Graph() + S.add_nodes_from([0, 1], fish="one") + S.add_nodes_from([2, 3], fish="two") + S.add_nodes_from([4], fish="red") + S.add_nodes_from([5], fish="blue") + S.add_edge(0, 0) + S.add_edge(2, 2) + cls.S = S + + N = nx.Graph() + N.add_nodes_from([0, 1], margin=-2) + N.add_nodes_from([2, 3], margin=-2) + N.add_nodes_from([4], margin=-3) + N.add_nodes_from([5], margin=-4) + N.add_edges_from([(0, 1), (2, 3), (0, 4), (2, 5)]) + cls.N = N + + F = nx.Graph() + F.add_edges_from([(0, 3), (1, 3), (2, 3)], weight=0.5) + F.add_edge(0, 2, weight=1) + nx.set_node_attributes(F, dict(F.degree(weight="weight")), "margin") + cls.F = F + + K = nx.Graph() + K.add_nodes_from([1, 2], margin=-1) + K.add_nodes_from([3], margin=1) + K.add_nodes_from([4], margin=2) + K.add_edges_from([(3, 4), (1, 2), (1, 3)]) + cls.K = K + + +class BaseTestDegreeMixing: + @classmethod + def setup_class(cls): + cls.P4 = nx.path_graph(4) + cls.D = nx.DiGraph() + cls.D.add_edges_from([(0, 2), (0, 3), (1, 3), (2, 3)]) + cls.D2 = nx.DiGraph() + cls.D2.add_edges_from([(0, 3), (1, 0), (1, 2), (2, 4), (4, 1), (4, 3), (4, 2)]) + cls.M = nx.MultiGraph() + nx.add_path(cls.M, range(4)) + cls.M.add_edge(0, 1) + cls.S = nx.Graph() + cls.S.add_edges_from([(0, 0), (1, 1)]) + cls.W = nx.Graph() + cls.W.add_edges_from([(0, 3), (1, 3), (2, 3)], weight=0.5) + cls.W.add_edge(0, 2, weight=1) + S1 = nx.star_graph(4) + S2 = nx.star_graph(4) + cls.DS = nx.disjoint_union(S1, S2) + cls.DS.add_edge(4, 5) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/tests/test_connectivity.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/tests/test_connectivity.py new file mode 100644 index 0000000..21c6287 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/tests/test_connectivity.py @@ -0,0 +1,143 @@ +from itertools import permutations + +import pytest + +import networkx as nx + + +class TestNeighborConnectivity: + def test_degree_p4(self): + G = nx.path_graph(4) + answer = {1: 2.0, 2: 1.5} + nd = nx.average_degree_connectivity(G) + assert nd == answer + + D = G.to_directed() + answer = {2: 2.0, 4: 1.5} + nd = nx.average_degree_connectivity(D) + assert nd == answer + + answer = {1: 2.0, 2: 1.5} + D = G.to_directed() + nd = nx.average_degree_connectivity(D, source="in", target="in") + assert nd == answer + + D = G.to_directed() + nd = nx.average_degree_connectivity(D, source="in", target="in") + assert nd == answer + + def test_degree_p4_weighted(self): + G = nx.path_graph(4) + G[1][2]["weight"] = 4 + answer = {1: 2.0, 2: 1.8} + nd = nx.average_degree_connectivity(G, weight="weight") + assert nd == answer + answer = {1: 2.0, 2: 1.5} + nd = nx.average_degree_connectivity(G) + assert nd == answer + + D = G.to_directed() + answer = {2: 2.0, 4: 1.8} + nd = nx.average_degree_connectivity(D, weight="weight") + assert nd == answer + + answer = {1: 2.0, 2: 1.8} + D = G.to_directed() + nd = nx.average_degree_connectivity( + D, weight="weight", source="in", target="in" + ) + assert nd == answer + + D = G.to_directed() + nd = nx.average_degree_connectivity( + D, source="in", target="out", weight="weight" + ) + assert nd == answer + + def test_weight_keyword(self): + G = nx.path_graph(4) + G[1][2]["other"] = 4 + answer = {1: 2.0, 2: 1.8} + nd = nx.average_degree_connectivity(G, weight="other") + assert nd == answer + answer = {1: 2.0, 2: 1.5} + nd = nx.average_degree_connectivity(G, weight=None) + assert nd == answer + + D = G.to_directed() + answer = {2: 2.0, 4: 1.8} + nd = nx.average_degree_connectivity(D, weight="other") + assert nd == answer + + answer = {1: 2.0, 2: 1.8} + D = G.to_directed() + nd = nx.average_degree_connectivity(D, weight="other", source="in", target="in") + assert nd == answer + + D = G.to_directed() + nd = nx.average_degree_connectivity(D, weight="other", source="in", target="in") + assert nd == answer + + def test_degree_barrat(self): + G = nx.star_graph(5) + G.add_edges_from([(5, 6), (5, 7), (5, 8), (5, 9)]) + G[0][5]["weight"] = 5 + nd = nx.average_degree_connectivity(G)[5] + assert nd == 1.8 + nd = nx.average_degree_connectivity(G, weight="weight")[5] + assert nd == pytest.approx(3.222222, abs=1e-5) + + def test_zero_deg(self): + G = nx.DiGraph() + G.add_edge(1, 2) + G.add_edge(1, 3) + G.add_edge(1, 4) + c = nx.average_degree_connectivity(G) + assert c == {1: 0, 3: 1} + c = nx.average_degree_connectivity(G, source="in", target="in") + assert c == {0: 0, 1: 0} + c = nx.average_degree_connectivity(G, source="in", target="out") + assert c == {0: 0, 1: 3} + c = nx.average_degree_connectivity(G, source="in", target="in+out") + assert c == {0: 0, 1: 3} + c = nx.average_degree_connectivity(G, source="out", target="out") + assert c == {0: 0, 3: 0} + c = nx.average_degree_connectivity(G, source="out", target="in") + assert c == {0: 0, 3: 1} + c = nx.average_degree_connectivity(G, source="out", target="in+out") + assert c == {0: 0, 3: 1} + + def test_in_out_weight(self): + G = nx.DiGraph() + G.add_edge(1, 2, weight=1) + G.add_edge(1, 3, weight=1) + G.add_edge(3, 1, weight=1) + for s, t in permutations(["in", "out", "in+out"], 2): + c = nx.average_degree_connectivity(G, source=s, target=t) + cw = nx.average_degree_connectivity(G, source=s, target=t, weight="weight") + assert c == cw + + def test_invalid_source(self): + with pytest.raises(nx.NetworkXError): + G = nx.DiGraph() + nx.average_degree_connectivity(G, source="bogus") + + def test_invalid_target(self): + with pytest.raises(nx.NetworkXError): + G = nx.DiGraph() + nx.average_degree_connectivity(G, target="bogus") + + def test_invalid_undirected_graph(self): + G = nx.Graph() + with pytest.raises(nx.NetworkXError): + nx.average_degree_connectivity(G, target="bogus") + with pytest.raises(nx.NetworkXError): + nx.average_degree_connectivity(G, source="bogus") + + def test_single_node(self): + # TODO Is this really the intended behavior for providing a + # single node as the argument `nodes`? Shouldn't the function + # just return the connectivity value itself? + G = nx.trivial_graph() + conn = nx.average_degree_connectivity(G, nodes=0) + assert conn == {0: 0} diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/tests/test_correlation.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/tests/test_correlation.py new file mode 100644 index 0000000..147c837 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/tests/test_correlation.py @@ -0,0 +1,122 @@ +import pytest + +import networkx as nx +from networkx.algorithms.assortativity.correlation import attribute_ac + +from .base_test import BaseTestAttributeMixing, BaseTestDegreeMixing + +np = pytest.importorskip("numpy") +pytest.importorskip("scipy") + + +class TestDegreeMixingCorrelation(BaseTestDegreeMixing): + def test_degree_assortativity_undirected(self): + r = nx.degree_assortativity_coefficient(self.P4) + np.testing.assert_almost_equal(r, -1.0 / 2, decimal=4) + + def test_degree_assortativity_node_kwargs(self): + G = nx.Graph() + edges = [(0, 1), (0, 3), (1, 2), (1, 3), (1, 4), (5, 9), (9, 0)] + G.add_edges_from(edges) + r = nx.degree_assortativity_coefficient(G, nodes=[1, 2, 4]) + np.testing.assert_almost_equal(r, -1.0, decimal=4) + + def test_degree_assortativity_directed(self): + r = nx.degree_assortativity_coefficient(self.D) + np.testing.assert_almost_equal(r, -0.57735, decimal=4) + + def test_degree_assortativity_directed2(self): + """Test degree assortativity for a directed graph where the set of + in/out degree does not equal the total degree.""" + r = nx.degree_assortativity_coefficient(self.D2) + np.testing.assert_almost_equal(r, 0.14852, decimal=4) + + def test_degree_assortativity_multigraph(self): + r = nx.degree_assortativity_coefficient(self.M) + np.testing.assert_almost_equal(r, -1.0 / 7.0, decimal=4) + + def test_degree_pearson_assortativity_undirected(self): + r = nx.degree_pearson_correlation_coefficient(self.P4) + np.testing.assert_almost_equal(r, -1.0 / 2, decimal=4) + + def test_degree_pearson_assortativity_directed(self): + r = nx.degree_pearson_correlation_coefficient(self.D) + np.testing.assert_almost_equal(r, -0.57735, decimal=4) + + def test_degree_pearson_assortativity_directed2(self): + """Test degree assortativity with Pearson for a directed graph where + the set of in/out degree does not equal the total degree.""" + r = nx.degree_pearson_correlation_coefficient(self.D2) + np.testing.assert_almost_equal(r, 0.14852, decimal=4) + + def test_degree_pearson_assortativity_multigraph(self): + r = nx.degree_pearson_correlation_coefficient(self.M) + np.testing.assert_almost_equal(r, -1.0 / 7.0, decimal=4) + + def test_degree_assortativity_weighted(self): + r = nx.degree_assortativity_coefficient(self.W, weight="weight") + np.testing.assert_almost_equal(r, -0.1429, decimal=4) + + def test_degree_assortativity_double_star(self): + r = nx.degree_assortativity_coefficient(self.DS) + np.testing.assert_almost_equal(r, -0.9339, decimal=4) + + +class TestAttributeMixingCorrelation(BaseTestAttributeMixing): + def test_attribute_assortativity_undirected(self): + r = nx.attribute_assortativity_coefficient(self.G, "fish") + assert r == 6.0 / 22.0 + + def test_attribute_assortativity_directed(self): + r = nx.attribute_assortativity_coefficient(self.D, "fish") + assert r == 1.0 / 3.0 + + def test_attribute_assortativity_multigraph(self): + r = nx.attribute_assortativity_coefficient(self.M, "fish") + assert r == 1.0 + + def test_attribute_assortativity_coefficient(self): + # from "Mixing patterns in networks" + # fmt: off + a = np.array([[0.258, 0.016, 0.035, 0.013], + [0.012, 0.157, 0.058, 0.019], + [0.013, 0.023, 0.306, 0.035], + [0.005, 0.007, 0.024, 0.016]]) + # fmt: on + r = attribute_ac(a) + np.testing.assert_almost_equal(r, 0.623, decimal=3) + + def test_attribute_assortativity_coefficient2(self): + # fmt: off + a = np.array([[0.18, 0.02, 0.01, 0.03], + [0.02, 0.20, 0.03, 0.02], + [0.01, 0.03, 0.16, 0.01], + [0.03, 0.02, 0.01, 0.22]]) + # fmt: on + r = attribute_ac(a) + np.testing.assert_almost_equal(r, 0.68, decimal=2) + + def test_attribute_assortativity(self): + a = np.array([[50, 50, 0], [50, 50, 0], [0, 0, 2]]) + r = attribute_ac(a) + np.testing.assert_almost_equal(r, 0.029, decimal=3) + + def test_attribute_assortativity_negative(self): + r = nx.numeric_assortativity_coefficient(self.N, "margin") + np.testing.assert_almost_equal(r, -0.2903, decimal=4) + + def test_assortativity_node_kwargs(self): + G = nx.Graph() + G.add_nodes_from([0, 1], size=2) + G.add_nodes_from([2, 3], size=3) + G.add_edges_from([(0, 1), (2, 3)]) + r = nx.numeric_assortativity_coefficient(G, "size", nodes=[0, 3]) + np.testing.assert_almost_equal(r, 1.0, decimal=4) + + def test_attribute_assortativity_float(self): + r = nx.numeric_assortativity_coefficient(self.F, "margin") + np.testing.assert_almost_equal(r, -0.1429, decimal=4) + + def test_attribute_assortativity_mixed(self): + r = nx.numeric_assortativity_coefficient(self.K, "margin") + np.testing.assert_almost_equal(r, 0.4340, decimal=4) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/tests/test_mixing.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/tests/test_mixing.py new file mode 100644 index 0000000..589c102 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/tests/test_mixing.py @@ -0,0 +1,174 @@ +import pytest + +import networkx as nx + +from .base_test import BaseTestAttributeMixing, BaseTestDegreeMixing + +np = pytest.importorskip("numpy") + + +class TestDegreeMixingDict(BaseTestDegreeMixing): + def test_degree_mixing_dict_undirected(self): + d = nx.degree_mixing_dict(self.P4) + d_result = {1: {2: 2}, 2: {1: 2, 2: 2}} + assert d == d_result + + def test_degree_mixing_dict_undirected_normalized(self): + d = nx.degree_mixing_dict(self.P4, normalized=True) + d_result = {1: {2: 1.0 / 3}, 2: {1: 1.0 / 3, 2: 1.0 / 3}} + assert d == d_result + + def test_degree_mixing_dict_directed(self): + d = nx.degree_mixing_dict(self.D) + d_result = {1: {3: 2}, 2: {1: 1, 3: 1}, 3: {}} + assert d == d_result + + def test_degree_mixing_dict_multigraph(self): + d = nx.degree_mixing_dict(self.M) + d_result = {1: {2: 1}, 2: {1: 1, 3: 3}, 3: {2: 3}} + assert d == d_result + + def test_degree_mixing_dict_weighted(self): + d = nx.degree_mixing_dict(self.W, weight="weight") + d_result = {0.5: {1.5: 1}, 1.5: {1.5: 6, 0.5: 1}} + assert d == d_result + + +class TestDegreeMixingMatrix(BaseTestDegreeMixing): + def test_degree_mixing_matrix_undirected(self): + # fmt: off + a_result = np.array([[0, 2], + [2, 2]] + ) + # fmt: on + a = nx.degree_mixing_matrix(self.P4, normalized=False) + np.testing.assert_equal(a, a_result) + a = nx.degree_mixing_matrix(self.P4) + np.testing.assert_equal(a, a_result / a_result.sum()) + + def test_degree_mixing_matrix_directed(self): + # fmt: off + a_result = np.array([[0, 0, 2], + [1, 0, 1], + [0, 0, 0]] + ) + # fmt: on + a = nx.degree_mixing_matrix(self.D, normalized=False) + np.testing.assert_equal(a, a_result) + a = nx.degree_mixing_matrix(self.D) + np.testing.assert_equal(a, a_result / a_result.sum()) + + def test_degree_mixing_matrix_multigraph(self): + # fmt: off + a_result = np.array([[0, 1, 0], + [1, 0, 3], + [0, 3, 0]] + ) + # fmt: on + a = nx.degree_mixing_matrix(self.M, normalized=False) + np.testing.assert_equal(a, a_result) + a = nx.degree_mixing_matrix(self.M) + np.testing.assert_equal(a, a_result / a_result.sum()) + + def test_degree_mixing_matrix_selfloop(self): + # fmt: off + a_result = np.array([[2]]) + # fmt: on + a = nx.degree_mixing_matrix(self.S, normalized=False) + np.testing.assert_equal(a, a_result) + a = nx.degree_mixing_matrix(self.S) + np.testing.assert_equal(a, a_result / a_result.sum()) + + def test_degree_mixing_matrix_weighted(self): + a_result = np.array([[0.0, 1.0], [1.0, 6.0]]) + a = nx.degree_mixing_matrix(self.W, weight="weight", normalized=False) + np.testing.assert_equal(a, a_result) + a = nx.degree_mixing_matrix(self.W, weight="weight") + np.testing.assert_equal(a, a_result / float(a_result.sum())) + + def test_degree_mixing_matrix_mapping(self): + a_result = np.array([[6.0, 1.0], [1.0, 0.0]]) + mapping = {0.5: 1, 1.5: 0} + a = nx.degree_mixing_matrix( + self.W, weight="weight", normalized=False, mapping=mapping + ) + np.testing.assert_equal(a, a_result) + + +class TestAttributeMixingDict(BaseTestAttributeMixing): + def test_attribute_mixing_dict_undirected(self): + d = nx.attribute_mixing_dict(self.G, "fish") + d_result = { + "one": {"one": 2, "red": 1}, + "two": {"two": 2, "blue": 1}, + "red": {"one": 1}, + "blue": {"two": 1}, + } + assert d == d_result + + def test_attribute_mixing_dict_directed(self): + d = nx.attribute_mixing_dict(self.D, "fish") + d_result = { + "one": {"one": 1, "red": 1}, + "two": {"two": 1, "blue": 1}, + "red": {}, + "blue": {}, + } + assert d == d_result + + def test_attribute_mixing_dict_multigraph(self): + d = nx.attribute_mixing_dict(self.M, "fish") + d_result = {"one": {"one": 4}, "two": {"two": 2}} + assert d == d_result + + +class TestAttributeMixingMatrix(BaseTestAttributeMixing): + def test_attribute_mixing_matrix_undirected(self): + mapping = {"one": 0, "two": 1, "red": 2, "blue": 3} + a_result = np.array([[2, 0, 1, 0], [0, 2, 0, 1], [1, 0, 0, 0], [0, 1, 0, 0]]) + a = nx.attribute_mixing_matrix( + self.G, "fish", mapping=mapping, normalized=False + ) + np.testing.assert_equal(a, a_result) + a = nx.attribute_mixing_matrix(self.G, "fish", mapping=mapping) + np.testing.assert_equal(a, a_result / a_result.sum()) + + def test_attribute_mixing_matrix_directed(self): + mapping = {"one": 0, "two": 1, "red": 2, "blue": 3} + a_result = np.array([[1, 0, 1, 0], [0, 1, 0, 1], [0, 0, 0, 0], [0, 0, 0, 0]]) + a = nx.attribute_mixing_matrix( + self.D, "fish", mapping=mapping, normalized=False + ) + np.testing.assert_equal(a, a_result) + a = nx.attribute_mixing_matrix(self.D, "fish", mapping=mapping) + np.testing.assert_equal(a, a_result / a_result.sum()) + + def test_attribute_mixing_matrix_multigraph(self): + mapping = {"one": 0, "two": 1, "red": 2, "blue": 3} + a_result = np.array([[4, 0, 0, 0], [0, 2, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]) + a = nx.attribute_mixing_matrix( + self.M, "fish", mapping=mapping, normalized=False + ) + np.testing.assert_equal(a, a_result) + a = nx.attribute_mixing_matrix(self.M, "fish", mapping=mapping) + np.testing.assert_equal(a, a_result / a_result.sum()) + + def test_attribute_mixing_matrix_negative(self): + mapping = {-2: 0, -3: 1, -4: 2} + a_result = np.array([[4.0, 1.0, 1.0], [1.0, 0.0, 0.0], [1.0, 0.0, 0.0]]) + a = nx.attribute_mixing_matrix( + self.N, "margin", mapping=mapping, normalized=False + ) + np.testing.assert_equal(a, a_result) + a = nx.attribute_mixing_matrix(self.N, "margin", mapping=mapping) + np.testing.assert_equal(a, a_result / float(a_result.sum())) + + def test_attribute_mixing_matrix_float(self): + mapping = {0.5: 1, 1.5: 0} + a_result = np.array([[6.0, 1.0], [1.0, 0.0]]) + a = nx.attribute_mixing_matrix( + self.F, "margin", mapping=mapping, normalized=False + ) + np.testing.assert_equal(a, a_result) + a = nx.attribute_mixing_matrix(self.F, "margin", mapping=mapping) + np.testing.assert_equal(a, a_result / a_result.sum()) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/tests/test_neighbor_degree.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/tests/test_neighbor_degree.py new file mode 100644 index 0000000..92421ed --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/tests/test_neighbor_degree.py @@ -0,0 +1,107 @@ +import pytest + +import networkx as nx + + +class TestAverageNeighbor: + def test_degree_p4(self): + G = nx.path_graph(4) + answer = {0: 2, 1: 1.5, 2: 1.5, 3: 2} + nd = nx.average_neighbor_degree(G) + assert nd == answer + + D = G.to_directed() + nd = nx.average_neighbor_degree(D) + assert nd == answer + + D = nx.DiGraph(G.edges(data=True)) + nd = nx.average_neighbor_degree(D) + assert nd == {0: 1, 1: 1, 2: 0, 3: 0} + nd = nx.average_neighbor_degree(D, "in", "out") + assert nd == {0: 0, 1: 1, 2: 1, 3: 1} + nd = nx.average_neighbor_degree(D, "out", "in") + assert nd == {0: 1, 1: 1, 2: 1, 3: 0} + nd = nx.average_neighbor_degree(D, "in", "in") + assert nd == {0: 0, 1: 0, 2: 1, 3: 1} + + def test_degree_p4_weighted(self): + G = nx.path_graph(4) + G[1][2]["weight"] = 4 + answer = {0: 2, 1: 1.8, 2: 1.8, 3: 2} + nd = nx.average_neighbor_degree(G, weight="weight") + assert nd == answer + + D = G.to_directed() + nd = nx.average_neighbor_degree(D, weight="weight") + assert nd == answer + + D = nx.DiGraph(G.edges(data=True)) + nd = nx.average_neighbor_degree(D, weight="weight") + assert nd == {0: 1, 1: 1, 2: 0, 3: 0} + nd = nx.average_neighbor_degree(D, "out", "out", weight="weight") + assert nd == {0: 1, 1: 1, 2: 0, 3: 0} + nd = nx.average_neighbor_degree(D, "in", "in", weight="weight") + assert nd == {0: 0, 1: 0, 2: 1, 3: 1} + nd = nx.average_neighbor_degree(D, "in", "out", weight="weight") + assert nd == {0: 0, 1: 1, 2: 1, 3: 1} + nd = nx.average_neighbor_degree(D, "out", "in", weight="weight") + assert nd == {0: 1, 1: 1, 2: 1, 3: 0} + nd = nx.average_neighbor_degree(D, source="in+out", weight="weight") + assert nd == {0: 1.0, 1: 1.0, 2: 0.8, 3: 1.0} + nd = nx.average_neighbor_degree(D, target="in+out", weight="weight") + assert nd == {0: 2.0, 1: 2.0, 2: 1.0, 3: 0.0} + + D = G.to_directed() + nd = nx.average_neighbor_degree(D, weight="weight") + assert nd == answer + nd = nx.average_neighbor_degree(D, source="out", target="out", weight="weight") + assert nd == answer + + D = G.to_directed() + nd = nx.average_neighbor_degree(D, source="in", target="in", weight="weight") + assert nd == answer + + def test_degree_k4(self): + G = nx.complete_graph(4) + answer = {0: 3, 1: 3, 2: 3, 3: 3} + nd = nx.average_neighbor_degree(G) + assert nd == answer + + D = G.to_directed() + nd = nx.average_neighbor_degree(D) + assert nd == answer + + D = G.to_directed() + nd = nx.average_neighbor_degree(D) + assert nd == answer + + D = G.to_directed() + nd = nx.average_neighbor_degree(D, source="in", target="in") + assert nd == answer + + def test_degree_k4_nodes(self): + G = nx.complete_graph(4) + answer = {1: 3.0, 2: 3.0} + nd = nx.average_neighbor_degree(G, nodes=[1, 2]) + assert nd == answer + + def test_degree_barrat(self): + G = nx.star_graph(5) + G.add_edges_from([(5, 6), (5, 7), (5, 8), (5, 9)]) + G[0][5]["weight"] = 5 + nd = nx.average_neighbor_degree(G)[5] + assert nd == 1.8 + nd = nx.average_neighbor_degree(G, weight="weight")[5] + assert nd == pytest.approx(3.222222, abs=1e-5) + + def test_error_invalid_source_target(self): + G = nx.path_graph(4) + with pytest.raises(nx.NetworkXError): + nx.average_neighbor_degree(G, "error") + with pytest.raises(nx.NetworkXError): + nx.average_neighbor_degree(G, "in", "error") + G = G.to_directed() + with pytest.raises(nx.NetworkXError): + nx.average_neighbor_degree(G, "error") + with pytest.raises(nx.NetworkXError): + nx.average_neighbor_degree(G, "in", "error") diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/tests/test_pairs.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/tests/test_pairs.py new file mode 100644 index 0000000..3984292 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/assortativity/tests/test_pairs.py @@ -0,0 +1,87 @@ +import networkx as nx + +from .base_test import BaseTestAttributeMixing, BaseTestDegreeMixing + + +class TestAttributeMixingXY(BaseTestAttributeMixing): + def test_node_attribute_xy_undirected(self): + attrxy = sorted(nx.node_attribute_xy(self.G, "fish")) + attrxy_result = sorted( + [ + ("one", "one"), + ("one", "one"), + ("two", "two"), + ("two", "two"), + ("one", "red"), + ("red", "one"), + ("blue", "two"), + ("two", "blue"), + ] + ) + assert attrxy == attrxy_result + + def test_node_attribute_xy_undirected_nodes(self): + attrxy = sorted(nx.node_attribute_xy(self.G, "fish", nodes=["one", "yellow"])) + attrxy_result = sorted([]) + assert attrxy == attrxy_result + + def test_node_attribute_xy_directed(self): + attrxy = sorted(nx.node_attribute_xy(self.D, "fish")) + attrxy_result = sorted( + [("one", "one"), ("two", "two"), ("one", "red"), ("two", "blue")] + ) + assert attrxy == attrxy_result + + def test_node_attribute_xy_multigraph(self): + attrxy = sorted(nx.node_attribute_xy(self.M, "fish")) + attrxy_result = [ + ("one", "one"), + ("one", "one"), + ("one", "one"), + ("one", "one"), + ("two", "two"), + ("two", "two"), + ] + assert attrxy == attrxy_result + + def test_node_attribute_xy_selfloop(self): + attrxy = sorted(nx.node_attribute_xy(self.S, "fish")) + attrxy_result = [("one", "one"), ("two", "two")] + assert attrxy == attrxy_result + + +class TestDegreeMixingXY(BaseTestDegreeMixing): + def test_node_degree_xy_undirected(self): + xy = sorted(nx.node_degree_xy(self.P4)) + xy_result = sorted([(1, 2), (2, 1), (2, 2), (2, 2), (1, 2), (2, 1)]) + assert xy == xy_result + + def test_node_degree_xy_undirected_nodes(self): + xy = sorted(nx.node_degree_xy(self.P4, nodes=[0, 1, -1])) + xy_result = sorted([(1, 2), (2, 1)]) + assert xy == xy_result + + def test_node_degree_xy_directed(self): + xy = sorted(nx.node_degree_xy(self.D)) + xy_result = sorted([(2, 1), (2, 3), (1, 3), (1, 3)]) + assert xy == xy_result + + def test_node_degree_xy_multigraph(self): + xy = sorted(nx.node_degree_xy(self.M)) + xy_result = sorted( + [(2, 3), (2, 3), (3, 2), (3, 2), (2, 3), (3, 2), (1, 2), (2, 1)] + ) + assert xy == xy_result + + def test_node_degree_xy_selfloop(self): + xy = sorted(nx.node_degree_xy(self.S)) + xy_result = sorted([(2, 2), (2, 2)]) + assert xy == xy_result + + def test_node_degree_xy_weighted(self): + G = nx.Graph() + G.add_edge(1, 2, weight=7) + G.add_edge(2, 3, weight=10) + xy = sorted(nx.node_degree_xy(G, weight="weight")) + xy_result = sorted([(7, 17), (17, 10), (17, 7), (10, 17)]) + assert xy == xy_result diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/asteroidal.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/asteroidal.py new file mode 100644 index 0000000..b308392 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/asteroidal.py @@ -0,0 +1,164 @@ +""" +Algorithms for asteroidal triples and asteroidal numbers in graphs. + +An asteroidal triple in a graph G is a set of three non-adjacent vertices +u, v and w such that there exist a path between any two of them that avoids +closed neighborhood of the third. More formally, v_j, v_k belongs to the same +connected component of G - N[v_i], where N[v_i] denotes the closed neighborhood +of v_i. A graph which does not contain any asteroidal triples is called +an AT-free graph. The class of AT-free graphs is a graph class for which +many NP-complete problems are solvable in polynomial time. Amongst them, +independent set and coloring. +""" + +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = ["is_at_free", "find_asteroidal_triple"] + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatchable +def find_asteroidal_triple(G): + r"""Find an asteroidal triple in the given graph. + + An asteroidal triple is a triple of non-adjacent vertices such that + there exists a path between any two of them which avoids the closed + neighborhood of the third. It checks all independent triples of vertices + and whether they are an asteroidal triple or not. This is done with the + help of a data structure called a component structure. + A component structure encodes information about which vertices belongs to + the same connected component when the closed neighborhood of a given vertex + is removed from the graph. The algorithm used to check is the trivial + one, outlined in [1]_, which has a runtime of + :math:`O(|V||\overline{E} + |V||E|)`, where the second term is the + creation of the component structure. + + Parameters + ---------- + G : NetworkX Graph + The graph to check whether is AT-free or not + + Returns + ------- + list or None + An asteroidal triple is returned as a list of nodes. If no asteroidal + triple exists, i.e. the graph is AT-free, then None is returned. + + Notes + ----- + The component structure and the algorithm is described in [1]_. The current + implementation implements the trivial algorithm for simple graphs. + + References + ---------- + .. [1] Ekkehard Köhler, + "Recognizing Graphs without asteroidal triples", + Journal of Discrete Algorithms 2, pages 439-452, 2004. + https://www.sciencedirect.com/science/article/pii/S157086670400019X + """ + V = set(G.nodes) + + if len(V) < 6: + # An asteroidal triple cannot exist in a graph with 5 or less vertices. + return None + + component_structure = create_component_structure(G) + + for u, v in nx.non_edges(G): + u_neighborhood = set(G[u]).union([u]) + v_neighborhood = set(G[v]).union([v]) + union_of_neighborhoods = u_neighborhood.union(v_neighborhood) + for w in V - union_of_neighborhoods: + # Check for each pair of vertices whether they belong to the + # same connected component when the closed neighborhood of the + # third is removed. + if ( + component_structure[u][v] == component_structure[u][w] + and component_structure[v][u] == component_structure[v][w] + and component_structure[w][u] == component_structure[w][v] + ): + return [u, v, w] + return None + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatchable +def is_at_free(G): + """Check if a graph is AT-free. + + The method uses the `find_asteroidal_triple` method to recognize + an AT-free graph. If no asteroidal triple is found the graph is + AT-free and True is returned. If at least one asteroidal triple is + found the graph is not AT-free and False is returned. + + Parameters + ---------- + G : NetworkX Graph + The graph to check whether is AT-free or not. + + Returns + ------- + bool + True if G is AT-free and False otherwise. + + Examples + -------- + >>> G = nx.Graph([(0, 1), (0, 2), (1, 2), (1, 3), (1, 4), (4, 5)]) + >>> nx.is_at_free(G) + True + + >>> G = nx.cycle_graph(6) + >>> nx.is_at_free(G) + False + """ + return find_asteroidal_triple(G) is None + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatchable +def create_component_structure(G): + r"""Create component structure for G. + + A *component structure* is an `nxn` array, denoted `c`, where `n` is + the number of vertices, where each row and column corresponds to a vertex. + + .. math:: + c_{uv} = \begin{cases} 0, if v \in N[u] \\ + k, if v \in component k of G \setminus N[u] \end{cases} + + Where `k` is an arbitrary label for each component. The structure is used + to simplify the detection of asteroidal triples. + + Parameters + ---------- + G : NetworkX Graph + Undirected, simple graph. + + Returns + ------- + component_structure : dictionary + A dictionary of dictionaries, keyed by pairs of vertices. + + """ + V = set(G.nodes) + component_structure = {} + for v in V: + label = 0 + closed_neighborhood = set(G[v]).union({v}) + row_dict = {} + for u in closed_neighborhood: + row_dict[u] = 0 + + G_reduced = G.subgraph(set(G.nodes) - closed_neighborhood) + for cc in nx.connected_components(G_reduced): + label += 1 + for u in cc: + row_dict[u] = label + + component_structure[v] = row_dict + + return component_structure diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/__init__.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/__init__.py new file mode 100644 index 0000000..edc66b4 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/__init__.py @@ -0,0 +1,88 @@ +r"""This module provides functions and operations for bipartite +graphs. Bipartite graphs `B = (U, V, E)` have two node sets `U,V` and edges in +`E` that only connect nodes from opposite sets. It is common in the literature +to use an spatial analogy referring to the two node sets as top and bottom nodes. + +The bipartite algorithms are not imported into the networkx namespace +at the top level so the easiest way to use them is with: + +>>> from networkx.algorithms import bipartite + +NetworkX does not have a custom bipartite graph class but the Graph() +or DiGraph() classes can be used to represent bipartite graphs. However, +you have to keep track of which set each node belongs to, and make +sure that there is no edge between nodes of the same set. The convention used +in NetworkX is to use a node attribute named `bipartite` with values 0 or 1 to +identify the sets each node belongs to. This convention is not enforced in +the source code of bipartite functions, it's only a recommendation. + +For example: + +>>> B = nx.Graph() +>>> # Add nodes with the node attribute "bipartite" +>>> B.add_nodes_from([1, 2, 3, 4], bipartite=0) +>>> B.add_nodes_from(["a", "b", "c"], bipartite=1) +>>> # Add edges only between nodes of opposite node sets +>>> B.add_edges_from([(1, "a"), (1, "b"), (2, "b"), (2, "c"), (3, "c"), (4, "a")]) + +Many algorithms of the bipartite module of NetworkX require, as an argument, a +container with all the nodes that belong to one set, in addition to the bipartite +graph `B`. The functions in the bipartite package do not check that the node set +is actually correct nor that the input graph is actually bipartite. +If `B` is connected, you can find the two node sets using a two-coloring +algorithm: + +>>> nx.is_connected(B) +True +>>> bottom_nodes, top_nodes = bipartite.sets(B) + +However, if the input graph is not connected, there are more than one possible +colorations. This is the reason why we require the user to pass a container +with all nodes of one bipartite node set as an argument to most bipartite +functions. In the face of ambiguity, we refuse the temptation to guess and +raise an :exc:`AmbiguousSolution ` +Exception if the input graph for +:func:`bipartite.sets ` +is disconnected. + +Using the `bipartite` node attribute, you can easily get the two node sets: + +>>> top_nodes = {n for n, d in B.nodes(data=True) if d["bipartite"] == 0} +>>> bottom_nodes = set(B) - top_nodes + +So you can easily use the bipartite algorithms that require, as an argument, a +container with all nodes that belong to one node set: + +>>> print(round(bipartite.density(B, bottom_nodes), 2)) +0.5 +>>> G = bipartite.projected_graph(B, top_nodes) + +All bipartite graph generators in NetworkX build bipartite graphs with the +`bipartite` node attribute. Thus, you can use the same approach: + +>>> RB = bipartite.random_graph(5, 7, 0.2) +>>> RB_top = {n for n, d in RB.nodes(data=True) if d["bipartite"] == 0} +>>> RB_bottom = set(RB) - RB_top +>>> list(RB_top) +[0, 1, 2, 3, 4] +>>> list(RB_bottom) +[5, 6, 7, 8, 9, 10, 11] + +For other bipartite graph generators see +:mod:`Generators `. + +""" + +from networkx.algorithms.bipartite.basic import * +from networkx.algorithms.bipartite.centrality import * +from networkx.algorithms.bipartite.cluster import * +from networkx.algorithms.bipartite.covering import * +from networkx.algorithms.bipartite.edgelist import * +from networkx.algorithms.bipartite.matching import * +from networkx.algorithms.bipartite.matrix import * +from networkx.algorithms.bipartite.projection import * +from networkx.algorithms.bipartite.redundancy import * +from networkx.algorithms.bipartite.spectral import * +from networkx.algorithms.bipartite.generators import * +from networkx.algorithms.bipartite.extendability import * +from networkx.algorithms.bipartite.link_analysis import * diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..51dcb33 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/__pycache__/basic.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/__pycache__/basic.cpython-312.pyc new file mode 100644 index 0000000..c7ede75 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/__pycache__/basic.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/__pycache__/centrality.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/__pycache__/centrality.cpython-312.pyc new file mode 100644 index 0000000..6c6254b Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/__pycache__/centrality.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/__pycache__/cluster.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/__pycache__/cluster.cpython-312.pyc new file mode 100644 index 0000000..98571e6 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/__pycache__/cluster.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/__pycache__/covering.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/__pycache__/covering.cpython-312.pyc new file mode 100644 index 0000000..450685b Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/__pycache__/covering.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/__pycache__/edgelist.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/__pycache__/edgelist.cpython-312.pyc new file mode 100644 index 0000000..7c13ed6 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/__pycache__/edgelist.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/__pycache__/extendability.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/__pycache__/extendability.cpython-312.pyc new file mode 100644 index 0000000..c63af11 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/__pycache__/extendability.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/__pycache__/generators.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/__pycache__/generators.cpython-312.pyc new file mode 100644 index 0000000..b4a9c67 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/__pycache__/generators.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/__pycache__/link_analysis.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/__pycache__/link_analysis.cpython-312.pyc new file mode 100644 index 0000000..241cce4 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/__pycache__/link_analysis.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/__pycache__/matching.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/__pycache__/matching.cpython-312.pyc new file mode 100644 index 0000000..334bac8 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/__pycache__/matching.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/__pycache__/matrix.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/__pycache__/matrix.cpython-312.pyc new file mode 100644 index 0000000..8e50ebe Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/__pycache__/matrix.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/__pycache__/projection.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/__pycache__/projection.cpython-312.pyc new file mode 100644 index 0000000..6fa995d Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/__pycache__/projection.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/__pycache__/redundancy.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/__pycache__/redundancy.cpython-312.pyc new file mode 100644 index 0000000..7d1a278 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/__pycache__/redundancy.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/__pycache__/spectral.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/__pycache__/spectral.cpython-312.pyc new file mode 100644 index 0000000..3ff7dd2 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/__pycache__/spectral.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/basic.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/basic.py new file mode 100644 index 0000000..8d9a4d5 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/basic.py @@ -0,0 +1,322 @@ +""" +========================== +Bipartite Graph Algorithms +========================== +""" + +import networkx as nx +from networkx.algorithms.components import connected_components +from networkx.exception import AmbiguousSolution + +__all__ = [ + "is_bipartite", + "is_bipartite_node_set", + "color", + "sets", + "density", + "degrees", +] + + +@nx._dispatchable +def color(G): + """Returns a two-coloring of the graph. + + Raises an exception if the graph is not bipartite. + + Parameters + ---------- + G : NetworkX graph + + Returns + ------- + color : dictionary + A dictionary keyed by node with a 1 or 0 as data for each node color. + + Raises + ------ + NetworkXError + If the graph is not two-colorable. + + Examples + -------- + >>> from networkx.algorithms import bipartite + >>> G = nx.path_graph(4) + >>> c = bipartite.color(G) + >>> print(c) + {0: 1, 1: 0, 2: 1, 3: 0} + + You can use this to set a node attribute indicating the bipartite set: + + >>> nx.set_node_attributes(G, c, "bipartite") + >>> print(G.nodes[0]["bipartite"]) + 1 + >>> print(G.nodes[1]["bipartite"]) + 0 + """ + if G.is_directed(): + import itertools + + def neighbors(v): + return itertools.chain.from_iterable([G.predecessors(v), G.successors(v)]) + + else: + neighbors = G.neighbors + + color = {} + for n in G: # handle disconnected graphs + if n in color or len(G[n]) == 0: # skip isolates + continue + queue = [n] + color[n] = 1 # nodes seen with color (1 or 0) + while queue: + v = queue.pop() + c = 1 - color[v] # opposite color of node v + for w in neighbors(v): + if w in color: + if color[w] == color[v]: + raise nx.NetworkXError("Graph is not bipartite.") + else: + color[w] = c + queue.append(w) + # color isolates with 0 + color.update(dict.fromkeys(nx.isolates(G), 0)) + return color + + +@nx._dispatchable +def is_bipartite(G): + """Returns True if graph G is bipartite, False if not. + + Parameters + ---------- + G : NetworkX graph + + Examples + -------- + >>> from networkx.algorithms import bipartite + >>> G = nx.path_graph(4) + >>> print(bipartite.is_bipartite(G)) + True + + See Also + -------- + color, is_bipartite_node_set + """ + try: + color(G) + return True + except nx.NetworkXError: + return False + + +@nx._dispatchable +def is_bipartite_node_set(G, nodes): + """Returns True if nodes and G/nodes are a bipartition of G. + + Parameters + ---------- + G : NetworkX graph + + nodes: list or container + Check if nodes are a one of a bipartite set. + + Examples + -------- + >>> from networkx.algorithms import bipartite + >>> G = nx.path_graph(4) + >>> X = set([1, 3]) + >>> bipartite.is_bipartite_node_set(G, X) + True + + Notes + ----- + An exception is raised if the input nodes are not distinct, because in this + case some bipartite algorithms will yield incorrect results. + For connected graphs the bipartite sets are unique. This function handles + disconnected graphs. + """ + S = set(nodes) + + if len(S) < len(nodes): + # this should maybe just return False? + raise AmbiguousSolution( + "The input node set contains duplicates.\n" + "This may lead to incorrect results when using it in bipartite algorithms.\n" + "Consider using set(nodes) as the input" + ) + + for CC in (G.subgraph(c).copy() for c in connected_components(G)): + X, Y = sets(CC) + if not ( + (X.issubset(S) and Y.isdisjoint(S)) or (Y.issubset(S) and X.isdisjoint(S)) + ): + return False + return True + + +@nx._dispatchable +def sets(G, top_nodes=None): + """Returns bipartite node sets of graph G. + + Raises an exception if the graph is not bipartite or if the input + graph is disconnected and thus more than one valid solution exists. + See :mod:`bipartite documentation ` + for further details on how bipartite graphs are handled in NetworkX. + + Parameters + ---------- + G : NetworkX graph + + top_nodes : container, optional + Container with all nodes in one bipartite node set. If not supplied + it will be computed. But if more than one solution exists an exception + will be raised. + + Returns + ------- + X : set + Nodes from one side of the bipartite graph. + Y : set + Nodes from the other side. + + Raises + ------ + AmbiguousSolution + Raised if the input bipartite graph is disconnected and no container + with all nodes in one bipartite set is provided. When determining + the nodes in each bipartite set more than one valid solution is + possible if the input graph is disconnected. + NetworkXError + Raised if the input graph is not bipartite. + + Examples + -------- + >>> from networkx.algorithms import bipartite + >>> G = nx.path_graph(4) + >>> X, Y = bipartite.sets(G) + >>> list(X) + [0, 2] + >>> list(Y) + [1, 3] + + See Also + -------- + color + + """ + if G.is_directed(): + is_connected = nx.is_weakly_connected + else: + is_connected = nx.is_connected + if top_nodes is not None: + X = set(top_nodes) + Y = set(G) - X + else: + if not is_connected(G): + msg = "Disconnected graph: Ambiguous solution for bipartite sets." + raise nx.AmbiguousSolution(msg) + c = color(G) + X = {n for n, is_top in c.items() if is_top} + Y = {n for n, is_top in c.items() if not is_top} + return (X, Y) + + +@nx._dispatchable(graphs="B") +def density(B, nodes): + """Returns density of bipartite graph B. + + Parameters + ---------- + B : NetworkX graph + + nodes: list or container + Nodes in one node set of the bipartite graph. + + Returns + ------- + d : float + The bipartite density + + Examples + -------- + >>> from networkx.algorithms import bipartite + >>> G = nx.complete_bipartite_graph(3, 2) + >>> X = set([0, 1, 2]) + >>> bipartite.density(G, X) + 1.0 + >>> Y = set([3, 4]) + >>> bipartite.density(G, Y) + 1.0 + + Notes + ----- + The container of nodes passed as argument must contain all nodes + in one of the two bipartite node sets to avoid ambiguity in the + case of disconnected graphs. + See :mod:`bipartite documentation ` + for further details on how bipartite graphs are handled in NetworkX. + + See Also + -------- + color + """ + n = len(B) + m = nx.number_of_edges(B) + nb = len(nodes) + nt = n - nb + if m == 0: # includes cases n==0 and n==1 + d = 0.0 + else: + if B.is_directed(): + d = m / (2 * nb * nt) + else: + d = m / (nb * nt) + return d + + +@nx._dispatchable(graphs="B", edge_attrs="weight") +def degrees(B, nodes, weight=None): + """Returns the degrees of the two node sets in the bipartite graph B. + + Parameters + ---------- + B : NetworkX graph + + nodes: list or container + Nodes in one node set of the bipartite graph. + + weight : string or None, optional (default=None) + The edge attribute that holds the numerical value used as a weight. + If None, then each edge has weight 1. + The degree is the sum of the edge weights adjacent to the node. + + Returns + ------- + (degX,degY) : tuple of dictionaries + The degrees of the two bipartite sets as dictionaries keyed by node. + + Examples + -------- + >>> from networkx.algorithms import bipartite + >>> G = nx.complete_bipartite_graph(3, 2) + >>> Y = set([3, 4]) + >>> degX, degY = bipartite.degrees(G, Y) + >>> dict(degX) + {0: 2, 1: 2, 2: 2} + + Notes + ----- + The container of nodes passed as argument must contain all nodes + in one of the two bipartite node sets to avoid ambiguity in the + case of disconnected graphs. + See :mod:`bipartite documentation ` + for further details on how bipartite graphs are handled in NetworkX. + + See Also + -------- + color, density + """ + bottom = set(nodes) + top = set(B) - bottom + return (B.degree(top, weight), B.degree(bottom, weight)) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/centrality.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/centrality.py new file mode 100644 index 0000000..42d7270 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/centrality.py @@ -0,0 +1,290 @@ +import networkx as nx + +__all__ = ["degree_centrality", "betweenness_centrality", "closeness_centrality"] + + +@nx._dispatchable(name="bipartite_degree_centrality") +def degree_centrality(G, nodes): + r"""Compute the degree centrality for nodes in a bipartite network. + + The degree centrality for a node `v` is the fraction of nodes + connected to it. + + Parameters + ---------- + G : graph + A bipartite network + + nodes : list or container + Container with all nodes in one bipartite node set. + + Returns + ------- + centrality : dictionary + Dictionary keyed by node with bipartite degree centrality as the value. + + Examples + -------- + >>> G = nx.wheel_graph(5) + >>> top_nodes = {0, 1, 2} + >>> nx.bipartite.degree_centrality(G, nodes=top_nodes) + {0: 2.0, 1: 1.5, 2: 1.5, 3: 1.0, 4: 1.0} + + See Also + -------- + betweenness_centrality + closeness_centrality + :func:`~networkx.algorithms.bipartite.basic.sets` + :func:`~networkx.algorithms.bipartite.basic.is_bipartite` + + Notes + ----- + The nodes input parameter must contain all nodes in one bipartite node set, + but the dictionary returned contains all nodes from both bipartite node + sets. See :mod:`bipartite documentation ` + for further details on how bipartite graphs are handled in NetworkX. + + For unipartite networks, the degree centrality values are + normalized by dividing by the maximum possible degree (which is + `n-1` where `n` is the number of nodes in G). + + In the bipartite case, the maximum possible degree of a node in a + bipartite node set is the number of nodes in the opposite node set + [1]_. The degree centrality for a node `v` in the bipartite + sets `U` with `n` nodes and `V` with `m` nodes is + + .. math:: + + d_{v} = \frac{deg(v)}{m}, \mbox{for} v \in U , + + d_{v} = \frac{deg(v)}{n}, \mbox{for} v \in V , + + + where `deg(v)` is the degree of node `v`. + + References + ---------- + .. [1] Borgatti, S.P. and Halgin, D. In press. "Analyzing Affiliation + Networks". In Carrington, P. and Scott, J. (eds) The Sage Handbook + of Social Network Analysis. Sage Publications. + https://dx.doi.org/10.4135/9781446294413.n28 + """ + top = set(nodes) + bottom = set(G) - top + s = 1.0 / len(bottom) + centrality = {n: d * s for n, d in G.degree(top)} + s = 1.0 / len(top) + centrality.update({n: d * s for n, d in G.degree(bottom)}) + return centrality + + +@nx._dispatchable(name="bipartite_betweenness_centrality") +def betweenness_centrality(G, nodes): + r"""Compute betweenness centrality for nodes in a bipartite network. + + Betweenness centrality of a node `v` is the sum of the + fraction of all-pairs shortest paths that pass through `v`. + + Values of betweenness are normalized by the maximum possible + value which for bipartite graphs is limited by the relative size + of the two node sets [1]_. + + Let `n` be the number of nodes in the node set `U` and + `m` be the number of nodes in the node set `V`, then + nodes in `U` are normalized by dividing by + + .. math:: + + \frac{1}{2} [m^2 (s + 1)^2 + m (s + 1)(2t - s - 1) - t (2s - t + 3)] , + + where + + .. math:: + + s = (n - 1) \div m , t = (n - 1) \mod m , + + and nodes in `V` are normalized by dividing by + + .. math:: + + \frac{1}{2} [n^2 (p + 1)^2 + n (p + 1)(2r - p - 1) - r (2p - r + 3)] , + + where, + + .. math:: + + p = (m - 1) \div n , r = (m - 1) \mod n . + + Parameters + ---------- + G : graph + A bipartite graph + + nodes : list or container + Container with all nodes in one bipartite node set. + + Returns + ------- + betweenness : dictionary + Dictionary keyed by node with bipartite betweenness centrality + as the value. + + Examples + -------- + >>> G = nx.cycle_graph(4) + >>> top_nodes = {1, 2} + >>> nx.bipartite.betweenness_centrality(G, nodes=top_nodes) + {0: 0.25, 1: 0.25, 2: 0.25, 3: 0.25} + + See Also + -------- + degree_centrality + closeness_centrality + :func:`~networkx.algorithms.bipartite.basic.sets` + :func:`~networkx.algorithms.bipartite.basic.is_bipartite` + + Notes + ----- + The nodes input parameter must contain all nodes in one bipartite node set, + but the dictionary returned contains all nodes from both node sets. + See :mod:`bipartite documentation ` + for further details on how bipartite graphs are handled in NetworkX. + + + References + ---------- + .. [1] Borgatti, S.P. and Halgin, D. In press. "Analyzing Affiliation + Networks". In Carrington, P. and Scott, J. (eds) The Sage Handbook + of Social Network Analysis. Sage Publications. + https://dx.doi.org/10.4135/9781446294413.n28 + """ + top = set(nodes) + bottom = set(G) - top + n = len(top) + m = len(bottom) + s, t = divmod(n - 1, m) + bet_max_top = ( + ((m**2) * ((s + 1) ** 2)) + + (m * (s + 1) * (2 * t - s - 1)) + - (t * ((2 * s) - t + 3)) + ) / 2.0 + p, r = divmod(m - 1, n) + bet_max_bot = ( + ((n**2) * ((p + 1) ** 2)) + + (n * (p + 1) * (2 * r - p - 1)) + - (r * ((2 * p) - r + 3)) + ) / 2.0 + betweenness = nx.betweenness_centrality(G, normalized=False, weight=None) + for node in top: + betweenness[node] /= bet_max_top + for node in bottom: + betweenness[node] /= bet_max_bot + return betweenness + + +@nx._dispatchable(name="bipartite_closeness_centrality") +def closeness_centrality(G, nodes, normalized=True): + r"""Compute the closeness centrality for nodes in a bipartite network. + + The closeness of a node is the distance to all other nodes in the + graph or in the case that the graph is not connected to all other nodes + in the connected component containing that node. + + Parameters + ---------- + G : graph + A bipartite network + + nodes : list or container + Container with all nodes in one bipartite node set. + + normalized : bool, optional + If True (default) normalize by connected component size. + + Returns + ------- + closeness : dictionary + Dictionary keyed by node with bipartite closeness centrality + as the value. + + Examples + -------- + >>> G = nx.wheel_graph(5) + >>> top_nodes = {0, 1, 2} + >>> nx.bipartite.closeness_centrality(G, nodes=top_nodes) + {0: 1.5, 1: 1.2, 2: 1.2, 3: 1.0, 4: 1.0} + + See Also + -------- + betweenness_centrality + degree_centrality + :func:`~networkx.algorithms.bipartite.basic.sets` + :func:`~networkx.algorithms.bipartite.basic.is_bipartite` + + Notes + ----- + The nodes input parameter must contain all nodes in one bipartite node set, + but the dictionary returned contains all nodes from both node sets. + See :mod:`bipartite documentation ` + for further details on how bipartite graphs are handled in NetworkX. + + + Closeness centrality is normalized by the minimum distance possible. + In the bipartite case the minimum distance for a node in one bipartite + node set is 1 from all nodes in the other node set and 2 from all + other nodes in its own set [1]_. Thus the closeness centrality + for node `v` in the two bipartite sets `U` with + `n` nodes and `V` with `m` nodes is + + .. math:: + + c_{v} = \frac{m + 2(n - 1)}{d}, \mbox{for} v \in U, + + c_{v} = \frac{n + 2(m - 1)}{d}, \mbox{for} v \in V, + + where `d` is the sum of the distances from `v` to all + other nodes. + + Higher values of closeness indicate higher centrality. + + As in the unipartite case, setting normalized=True causes the + values to normalized further to n-1 / size(G)-1 where n is the + number of nodes in the connected part of graph containing the + node. If the graph is not completely connected, this algorithm + computes the closeness centrality for each connected part + separately. + + References + ---------- + .. [1] Borgatti, S.P. and Halgin, D. In press. "Analyzing Affiliation + Networks". In Carrington, P. and Scott, J. (eds) The Sage Handbook + of Social Network Analysis. Sage Publications. + https://dx.doi.org/10.4135/9781446294413.n28 + """ + closeness = {} + path_length = nx.single_source_shortest_path_length + top = set(nodes) + bottom = set(G) - top + n = len(top) + m = len(bottom) + for node in top: + sp = dict(path_length(G, node)) + totsp = sum(sp.values()) + if totsp > 0.0 and len(G) > 1: + closeness[node] = (m + 2 * (n - 1)) / totsp + if normalized: + s = (len(sp) - 1) / (len(G) - 1) + closeness[node] *= s + else: + closeness[node] = 0.0 + for node in bottom: + sp = dict(path_length(G, node)) + totsp = sum(sp.values()) + if totsp > 0.0 and len(G) > 1: + closeness[node] = (n + 2 * (m - 1)) / totsp + if normalized: + s = (len(sp) - 1) / (len(G) - 1) + closeness[node] *= s + else: + closeness[node] = 0.0 + return closeness diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/cluster.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/cluster.py new file mode 100644 index 0000000..78b3c0f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/cluster.py @@ -0,0 +1,289 @@ +"""Functions for computing clustering of pairs""" + +import itertools + +import networkx as nx + +__all__ = [ + "clustering", + "average_clustering", + "latapy_clustering", + "robins_alexander_clustering", +] + + +def cc_dot(nu, nv): + return len(nu & nv) / len(nu | nv) + + +def cc_max(nu, nv): + return len(nu & nv) / max(len(nu), len(nv)) + + +def cc_min(nu, nv): + return len(nu & nv) / min(len(nu), len(nv)) + + +modes = {"dot": cc_dot, "min": cc_min, "max": cc_max} + + +@nx._dispatchable +def latapy_clustering(G, nodes=None, mode="dot"): + r"""Compute a bipartite clustering coefficient for nodes. + + The bipartite clustering coefficient is a measure of local density + of connections defined as [1]_: + + .. math:: + + c_u = \frac{\sum_{v \in N(N(u))} c_{uv} }{|N(N(u))|} + + where `N(N(u))` are the second order neighbors of `u` in `G` excluding `u`, + and `c_{uv}` is the pairwise clustering coefficient between nodes + `u` and `v`. + + The mode selects the function for `c_{uv}` which can be: + + `dot`: + + .. math:: + + c_{uv}=\frac{|N(u)\cap N(v)|}{|N(u) \cup N(v)|} + + `min`: + + .. math:: + + c_{uv}=\frac{|N(u)\cap N(v)|}{min(|N(u)|,|N(v)|)} + + `max`: + + .. math:: + + c_{uv}=\frac{|N(u)\cap N(v)|}{max(|N(u)|,|N(v)|)} + + + Parameters + ---------- + G : graph + A bipartite graph + + nodes : list or iterable (optional) + Compute bipartite clustering for these nodes. The default + is all nodes in G. + + mode : string + The pairwise bipartite clustering method to be used in the computation. + It must be "dot", "max", or "min". + + Returns + ------- + clustering : dictionary + A dictionary keyed by node with the clustering coefficient value. + + + Examples + -------- + >>> from networkx.algorithms import bipartite + >>> G = nx.path_graph(4) # path graphs are bipartite + >>> c = bipartite.clustering(G) + >>> c[0] + 0.5 + >>> c = bipartite.clustering(G, mode="min") + >>> c[0] + 1.0 + + See Also + -------- + robins_alexander_clustering + average_clustering + networkx.algorithms.cluster.square_clustering + + References + ---------- + .. [1] Latapy, Matthieu, Clémence Magnien, and Nathalie Del Vecchio (2008). + Basic notions for the analysis of large two-mode networks. + Social Networks 30(1), 31--48. + """ + if not nx.algorithms.bipartite.is_bipartite(G): + raise nx.NetworkXError("Graph is not bipartite") + + try: + cc_func = modes[mode] + except KeyError as err: + raise nx.NetworkXError( + "Mode for bipartite clustering must be: dot, min or max" + ) from err + + if nodes is None: + nodes = G + ccs = {} + for v in nodes: + cc = 0.0 + nbrs2 = {u for nbr in G[v] for u in G[nbr]} - {v} + for u in nbrs2: + cc += cc_func(set(G[u]), set(G[v])) + if cc > 0.0: # len(nbrs2)>0 + cc /= len(nbrs2) + ccs[v] = cc + return ccs + + +clustering = latapy_clustering + + +@nx._dispatchable(name="bipartite_average_clustering") +def average_clustering(G, nodes=None, mode="dot"): + r"""Compute the average bipartite clustering coefficient. + + A clustering coefficient for the whole graph is the average, + + .. math:: + + C = \frac{1}{n}\sum_{v \in G} c_v, + + where `n` is the number of nodes in `G`. + + Similar measures for the two bipartite sets can be defined [1]_ + + .. math:: + + C_X = \frac{1}{|X|}\sum_{v \in X} c_v, + + where `X` is a bipartite set of `G`. + + Parameters + ---------- + G : graph + a bipartite graph + + nodes : list or iterable, optional + A container of nodes to use in computing the average. + The nodes should be either the entire graph (the default) or one of the + bipartite sets. + + mode : string + The pairwise bipartite clustering method. + It must be "dot", "max", or "min" + + Returns + ------- + clustering : float + The average bipartite clustering for the given set of nodes or the + entire graph if no nodes are specified. + + Examples + -------- + >>> from networkx.algorithms import bipartite + >>> G = nx.star_graph(3) # star graphs are bipartite + >>> bipartite.average_clustering(G) + 0.75 + >>> X, Y = bipartite.sets(G) + >>> bipartite.average_clustering(G, X) + 0.0 + >>> bipartite.average_clustering(G, Y) + 1.0 + + See Also + -------- + clustering + + Notes + ----- + The container of nodes passed to this function must contain all of the nodes + in one of the bipartite sets ("top" or "bottom") in order to compute + the correct average bipartite clustering coefficients. + See :mod:`bipartite documentation ` + for further details on how bipartite graphs are handled in NetworkX. + + + References + ---------- + .. [1] Latapy, Matthieu, Clémence Magnien, and Nathalie Del Vecchio (2008). + Basic notions for the analysis of large two-mode networks. + Social Networks 30(1), 31--48. + """ + if nodes is None: + nodes = G + ccs = latapy_clustering(G, nodes=nodes, mode=mode) + return sum(ccs[v] for v in nodes) / len(nodes) + + +@nx._dispatchable +def robins_alexander_clustering(G): + r"""Compute the bipartite clustering of G. + + Robins and Alexander [1]_ defined bipartite clustering coefficient as + four times the number of four cycles `C_4` divided by the number of + three paths `L_3` in a bipartite graph: + + .. math:: + + CC_4 = \frac{4 * C_4}{L_3} + + Parameters + ---------- + G : graph + a bipartite graph + + Returns + ------- + clustering : float + The Robins and Alexander bipartite clustering for the input graph. + + Examples + -------- + >>> from networkx.algorithms import bipartite + >>> G = nx.davis_southern_women_graph() + >>> print(round(bipartite.robins_alexander_clustering(G), 3)) + 0.468 + + See Also + -------- + latapy_clustering + networkx.algorithms.cluster.square_clustering + + References + ---------- + .. [1] Robins, G. and M. Alexander (2004). Small worlds among interlocking + directors: Network structure and distance in bipartite graphs. + Computational & Mathematical Organization Theory 10(1), 69–94. + + """ + if G.order() < 4 or G.size() < 3: + return 0 + L_3 = _threepaths(G) + if L_3 == 0: + return 0 + C_4 = _four_cycles(G) + return (4.0 * C_4) / L_3 + + +def _four_cycles(G): + # Also see `square_clustering` which counts squares in a similar way + cycles = 0 + seen = set() + G_adj = G._adj + for v in G: + seen.add(v) + v_neighbors = set(G_adj[v]) + if len(v_neighbors) < 2: + # Can't form a square without at least two neighbors + continue + two_hop_neighbors = set().union(*(G_adj[u] for u in v_neighbors)) + two_hop_neighbors -= seen + for x in two_hop_neighbors: + p2 = len(v_neighbors.intersection(G_adj[x])) + cycles += p2 * (p2 - 1) + return cycles / 4 + + +def _threepaths(G): + paths = 0 + for v in G: + for u in G[v]: + for w in set(G[u]) - {v}: + paths += len(set(G[w]) - {v, u}) + # Divide by two because we count each three path twice + # one for each possible starting point + return paths / 2 diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/covering.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/covering.py new file mode 100644 index 0000000..f937903 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/covering.py @@ -0,0 +1,57 @@ +"""Functions related to graph covers.""" + +import networkx as nx +from networkx.algorithms.bipartite.matching import hopcroft_karp_matching +from networkx.algorithms.covering import min_edge_cover as _min_edge_cover +from networkx.utils import not_implemented_for + +__all__ = ["min_edge_cover"] + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatchable(name="bipartite_min_edge_cover") +def min_edge_cover(G, matching_algorithm=None): + """Returns a set of edges which constitutes + the minimum edge cover of the graph. + + The smallest edge cover can be found in polynomial time by finding + a maximum matching and extending it greedily so that all nodes + are covered. + + Parameters + ---------- + G : NetworkX graph + An undirected bipartite graph. + + matching_algorithm : function + A function that returns a maximum cardinality matching in a + given bipartite graph. The function must take one input, the + graph ``G``, and return a dictionary mapping each node to its + mate. If not specified, + :func:`~networkx.algorithms.bipartite.matching.hopcroft_karp_matching` + will be used. Other possibilities include + :func:`~networkx.algorithms.bipartite.matching.eppstein_matching`, + + Returns + ------- + set + A set of the edges in a minimum edge cover of the graph, given as + pairs of nodes. It contains both the edges `(u, v)` and `(v, u)` + for given nodes `u` and `v` among the edges of minimum edge cover. + + Notes + ----- + An edge cover of a graph is a set of edges such that every node of + the graph is incident to at least one edge of the set. + A minimum edge cover is an edge covering of smallest cardinality. + + Due to its implementation, the worst-case running time of this algorithm + is bounded by the worst-case running time of the function + ``matching_algorithm``. + """ + if G.order() == 0: # Special case for the empty graph + return set() + if matching_algorithm is None: + matching_algorithm = hopcroft_karp_matching + return _min_edge_cover(G, matching_algorithm=matching_algorithm) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/edgelist.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/edgelist.py new file mode 100644 index 0000000..c2c6b9c --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/edgelist.py @@ -0,0 +1,360 @@ +""" +******************** +Bipartite Edge Lists +******************** +Read and write NetworkX graphs as bipartite edge lists. + +Format +------ +You can read or write three formats of edge lists with these functions. + +Node pairs with no data:: + + 1 2 + +Python dictionary as data:: + + 1 2 {'weight':7, 'color':'green'} + +Arbitrary data:: + + 1 2 7 green + +For each edge (u, v) the node u is assigned to part 0 and the node v to part 1. +""" + +__all__ = ["generate_edgelist", "write_edgelist", "parse_edgelist", "read_edgelist"] + +import networkx as nx +from networkx.utils import not_implemented_for, open_file + + +@open_file(1, mode="wb") +def write_edgelist(G, path, comments="#", delimiter=" ", data=True, encoding="utf-8"): + """Write a bipartite graph as a list of edges. + + Parameters + ---------- + G : Graph + A NetworkX bipartite graph + path : file or string + File or filename to write. If a file is provided, it must be + opened in 'wb' mode. Filenames ending in .gz or .bz2 will be compressed. + comments : string, optional + The character used to indicate the start of a comment + delimiter : string, optional + The string used to separate values. The default is whitespace. + data : bool or list, optional + If False write no edge data. + If True write a string representation of the edge data dictionary.. + If a list (or other iterable) is provided, write the keys specified + in the list. + encoding: string, optional + Specify which encoding to use when writing file. + + Examples + -------- + >>> G = nx.path_graph(4) + >>> G.add_nodes_from([0, 2], bipartite=0) + >>> G.add_nodes_from([1, 3], bipartite=1) + >>> nx.write_edgelist(G, "test.edgelist") + >>> fh = open("test.edgelist_open", "wb") + >>> nx.write_edgelist(G, fh) + >>> nx.write_edgelist(G, "test.edgelist.gz") + >>> nx.write_edgelist(G, "test.edgelist_nodata.gz", data=False) + + >>> G = nx.Graph() + >>> G.add_edge(1, 2, weight=7, color="red") + >>> nx.write_edgelist(G, "test.edgelist_bigger_nodata", data=False) + >>> nx.write_edgelist(G, "test.edgelist_color", data=["color"]) + >>> nx.write_edgelist(G, "test.edgelist_color_weight", data=["color", "weight"]) + + See Also + -------- + write_edgelist + generate_edgelist + """ + for line in generate_edgelist(G, delimiter, data): + line += "\n" + path.write(line.encode(encoding)) + + +@not_implemented_for("directed") +def generate_edgelist(G, delimiter=" ", data=True): + """Generate a single line of the bipartite graph G in edge list format. + + Parameters + ---------- + G : NetworkX graph + The graph is assumed to have node attribute `part` set to 0,1 representing + the two graph parts + + delimiter : string, optional + Separator for node labels + + data : bool or list of keys + If False generate no edge data. If True use a dictionary + representation of edge data. If a list of keys use a list of data + values corresponding to the keys. + + Returns + ------- + lines : string + Lines of data in adjlist format. + + Examples + -------- + >>> from networkx.algorithms import bipartite + >>> G = nx.path_graph(4) + >>> G.add_nodes_from([0, 2], bipartite=0) + >>> G.add_nodes_from([1, 3], bipartite=1) + >>> G[1][2]["weight"] = 3 + >>> G[2][3]["capacity"] = 12 + >>> for line in bipartite.generate_edgelist(G, data=False): + ... print(line) + 0 1 + 2 1 + 2 3 + + >>> for line in bipartite.generate_edgelist(G): + ... print(line) + 0 1 {} + 2 1 {'weight': 3} + 2 3 {'capacity': 12} + + >>> for line in bipartite.generate_edgelist(G, data=["weight"]): + ... print(line) + 0 1 + 2 1 3 + 2 3 + """ + try: + part0 = [n for n, d in G.nodes.items() if d["bipartite"] == 0] + except BaseException as err: + raise AttributeError("Missing node attribute `bipartite`") from err + if data is True or data is False: + for n in part0: + for edge in G.edges(n, data=data): + yield delimiter.join(map(str, edge)) + else: + for n in part0: + for u, v, d in G.edges(n, data=True): + edge = [u, v] + try: + edge.extend(d[k] for k in data) + except KeyError: + pass # missing data for this edge, should warn? + yield delimiter.join(map(str, edge)) + + +@nx._dispatchable(name="bipartite_parse_edgelist", graphs=None, returns_graph=True) +def parse_edgelist( + lines, comments="#", delimiter=None, create_using=None, nodetype=None, data=True +): + """Parse lines of an edge list representation of a bipartite graph. + + Parameters + ---------- + lines : list or iterator of strings + Input data in edgelist format + comments : string, optional + Marker for comment lines + delimiter : string, optional + Separator for node labels + create_using: NetworkX graph container, optional + Use given NetworkX graph for holding nodes or edges. + nodetype : Python type, optional + Convert nodes to this type. + data : bool or list of (label,type) tuples + If False generate no edge data or if True use a dictionary + representation of edge data or a list tuples specifying dictionary + key names and types for edge data. + + Returns + ------- + G: NetworkX Graph + The bipartite graph corresponding to lines + + Examples + -------- + Edgelist with no data: + + >>> from networkx.algorithms import bipartite + >>> lines = ["1 2", "2 3", "3 4"] + >>> G = bipartite.parse_edgelist(lines, nodetype=int) + >>> sorted(G.nodes()) + [1, 2, 3, 4] + >>> sorted(G.nodes(data=True)) + [(1, {'bipartite': 0}), (2, {'bipartite': 0}), (3, {'bipartite': 0}), (4, {'bipartite': 1})] + >>> sorted(G.edges()) + [(1, 2), (2, 3), (3, 4)] + + Edgelist with data in Python dictionary representation: + + >>> lines = ["1 2 {'weight':3}", "2 3 {'weight':27}", "3 4 {'weight':3.0}"] + >>> G = bipartite.parse_edgelist(lines, nodetype=int) + >>> sorted(G.nodes()) + [1, 2, 3, 4] + >>> sorted(G.edges(data=True)) + [(1, 2, {'weight': 3}), (2, 3, {'weight': 27}), (3, 4, {'weight': 3.0})] + + Edgelist with data in a list: + + >>> lines = ["1 2 3", "2 3 27", "3 4 3.0"] + >>> G = bipartite.parse_edgelist(lines, nodetype=int, data=(("weight", float),)) + >>> sorted(G.nodes()) + [1, 2, 3, 4] + >>> sorted(G.edges(data=True)) + [(1, 2, {'weight': 3.0}), (2, 3, {'weight': 27.0}), (3, 4, {'weight': 3.0})] + + See Also + -------- + """ + from ast import literal_eval + + G = nx.empty_graph(0, create_using) + for line in lines: + p = line.find(comments) + if p >= 0: + line = line[:p] + if not len(line): + continue + # split line, should have 2 or more + s = line.rstrip("\n").split(delimiter) + if len(s) < 2: + continue + u = s.pop(0) + v = s.pop(0) + d = s + if nodetype is not None: + try: + u = nodetype(u) + v = nodetype(v) + except BaseException as err: + raise TypeError( + f"Failed to convert nodes {u},{v} to type {nodetype}." + ) from err + + if len(d) == 0 or data is False: + # no data or data type specified + edgedata = {} + elif data is True: + # no edge types specified + try: # try to evaluate as dictionary + edgedata = dict(literal_eval(" ".join(d))) + except BaseException as err: + raise TypeError( + f"Failed to convert edge data ({d}) to dictionary." + ) from err + else: + # convert edge data to dictionary with specified keys and type + if len(d) != len(data): + raise IndexError( + f"Edge data {d} and data_keys {data} are not the same length" + ) + edgedata = {} + for (edge_key, edge_type), edge_value in zip(data, d): + try: + edge_value = edge_type(edge_value) + except BaseException as err: + raise TypeError( + f"Failed to convert {edge_key} data " + f"{edge_value} to type {edge_type}." + ) from err + edgedata.update({edge_key: edge_value}) + G.add_node(u, bipartite=0) + G.add_node(v, bipartite=1) + G.add_edge(u, v, **edgedata) + return G + + +@open_file(0, mode="rb") +@nx._dispatchable(name="bipartite_read_edgelist", graphs=None, returns_graph=True) +def read_edgelist( + path, + comments="#", + delimiter=None, + create_using=None, + nodetype=None, + data=True, + edgetype=None, + encoding="utf-8", +): + """Read a bipartite graph from a list of edges. + + Parameters + ---------- + path : file or string + File or filename to read. If a file is provided, it must be + opened in 'rb' mode. + Filenames ending in .gz or .bz2 will be decompressed. + comments : string, optional + The character used to indicate the start of a comment. + delimiter : string, optional + The string used to separate values. The default is whitespace. + create_using : Graph container, optional, + Use specified container to build graph. The default is networkx.Graph, + an undirected graph. + nodetype : int, float, str, Python type, optional + Convert node data from strings to specified type + data : bool or list of (label,type) tuples + Tuples specifying dictionary key names and types for edge data + edgetype : int, float, str, Python type, optional OBSOLETE + Convert edge data from strings to specified type and use as 'weight' + encoding: string, optional + Specify which encoding to use when reading file. + + Returns + ------- + G : graph + A networkx Graph or other type specified with create_using + + Examples + -------- + >>> from networkx.algorithms import bipartite + >>> G = nx.path_graph(4) + >>> G.add_nodes_from([0, 2], bipartite=0) + >>> G.add_nodes_from([1, 3], bipartite=1) + >>> bipartite.write_edgelist(G, "test.edgelist") + >>> G = bipartite.read_edgelist("test.edgelist") + + >>> fh = open("test.edgelist", "rb") + >>> G = bipartite.read_edgelist(fh) + >>> fh.close() + + >>> G = bipartite.read_edgelist("test.edgelist", nodetype=int) + + Edgelist with data in a list: + + >>> textline = "1 2 3" + >>> fh = open("test.edgelist", "w") + >>> d = fh.write(textline) + >>> fh.close() + >>> G = bipartite.read_edgelist( + ... "test.edgelist", nodetype=int, data=(("weight", float),) + ... ) + >>> list(G) + [1, 2] + >>> list(G.edges(data=True)) + [(1, 2, {'weight': 3.0})] + + See parse_edgelist() for more examples of formatting. + + See Also + -------- + parse_edgelist + + Notes + ----- + Since nodes must be hashable, the function nodetype must return hashable + types (e.g. int, float, str, frozenset - or tuples of those, etc.) + """ + lines = (line.decode(encoding) for line in path) + return parse_edgelist( + lines, + comments=comments, + delimiter=delimiter, + create_using=create_using, + nodetype=nodetype, + data=data, + ) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/extendability.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/extendability.py new file mode 100644 index 0000000..61d8d06 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/extendability.py @@ -0,0 +1,105 @@ +"""Provides a function for computing the extendability of a graph which is +undirected, simple, connected and bipartite and contains at least one perfect matching.""" + +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = ["maximal_extendability"] + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatchable +def maximal_extendability(G): + """Computes the extendability of a graph. + + The extendability of a graph is defined as the maximum $k$ for which `G` + is $k$-extendable. Graph `G` is $k$-extendable if and only if `G` has a + perfect matching and every set of $k$ independent edges can be extended + to a perfect matching in `G`. + + Parameters + ---------- + G : NetworkX Graph + A fully-connected bipartite graph without self-loops + + Returns + ------- + extendability : int + + Raises + ------ + NetworkXError + If the graph `G` is disconnected. + If the graph `G` is not bipartite. + If the graph `G` does not contain a perfect matching. + If the residual graph of `G` is not strongly connected. + + Notes + ----- + Definition: + Let `G` be a simple, connected, undirected and bipartite graph with a perfect + matching M and bipartition (U,V). The residual graph of `G`, denoted by $G_M$, + is the graph obtained from G by directing the edges of M from V to U and the + edges that do not belong to M from U to V. + + Lemma [1]_ : + Let M be a perfect matching of `G`. `G` is $k$-extendable if and only if its residual + graph $G_M$ is strongly connected and there are $k$ vertex-disjoint directed + paths between every vertex of U and every vertex of V. + + Assuming that input graph `G` is undirected, simple, connected, bipartite and contains + a perfect matching M, this function constructs the residual graph $G_M$ of G and + returns the minimum value among the maximum vertex-disjoint directed paths between + every vertex of U and every vertex of V in $G_M$. By combining the definitions + and the lemma, this value represents the extendability of the graph `G`. + + Time complexity O($n^3$ $m^2$)) where $n$ is the number of vertices + and $m$ is the number of edges. + + References + ---------- + .. [1] "A polynomial algorithm for the extendability problem in bipartite graphs", + J. Lakhal, L. Litzler, Information Processing Letters, 1998. + .. [2] "On n-extendible graphs", M. D. Plummer, Discrete Mathematics, 31:201–210, 1980 + https://doi.org/10.1016/0012-365X(80)90037-0 + + """ + if not nx.is_connected(G): + raise nx.NetworkXError("Graph G is not connected") + + if not nx.bipartite.is_bipartite(G): + raise nx.NetworkXError("Graph G is not bipartite") + + U, V = nx.bipartite.sets(G) + + maximum_matching = nx.bipartite.hopcroft_karp_matching(G) + + if not nx.is_perfect_matching(G, maximum_matching): + raise nx.NetworkXError("Graph G does not contain a perfect matching") + + # list of edges in perfect matching, directed from V to U + pm = [(node, maximum_matching[node]) for node in V & maximum_matching.keys()] + + # Direct all the edges of G, from V to U if in matching, else from U to V + directed_edges = [ + (x, y) if (x in V and (x, y) in pm) or (x in U and (y, x) not in pm) else (y, x) + for x, y in G.edges + ] + + # Construct the residual graph of G + residual_G = nx.DiGraph() + residual_G.add_nodes_from(G) + residual_G.add_edges_from(directed_edges) + + if not nx.is_strongly_connected(residual_G): + raise nx.NetworkXError("The residual graph of G is not strongly connected") + + # For node-pairs between V & U, keep min of max number of node-disjoint paths + # Variable $k$ stands for the extendability of graph G + k = float("inf") + for u in U: + for v in V: + num_paths = sum(1 for _ in nx.node_disjoint_paths(residual_G, u, v)) + k = k if k < num_paths else num_paths + return k diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/generators.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/generators.py new file mode 100644 index 0000000..6e73a4d --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/generators.py @@ -0,0 +1,603 @@ +""" +Generators and functions for bipartite graphs. +""" + +import math +import numbers +from functools import reduce + +import networkx as nx +from networkx.utils import nodes_or_number, py_random_state + +__all__ = [ + "configuration_model", + "havel_hakimi_graph", + "reverse_havel_hakimi_graph", + "alternating_havel_hakimi_graph", + "preferential_attachment_graph", + "random_graph", + "gnmk_random_graph", + "complete_bipartite_graph", +] + + +@nx._dispatchable(graphs=None, returns_graph=True) +@nodes_or_number([0, 1]) +def complete_bipartite_graph(n1, n2, create_using=None): + """Returns the complete bipartite graph `K_{n_1,n_2}`. + + The graph is composed of two partitions with nodes 0 to (n1 - 1) + in the first and nodes n1 to (n1 + n2 - 1) in the second. + Each node in the first is connected to each node in the second. + + Parameters + ---------- + n1, n2 : integer or iterable container of nodes + If integers, nodes are from `range(n1)` and `range(n1, n1 + n2)`. + If a container, the elements are the nodes. + create_using : NetworkX graph instance, (default: nx.Graph) + Return graph of this type. + + Notes + ----- + Nodes are the integers 0 to `n1 + n2 - 1` unless either n1 or n2 are + containers of nodes. If only one of n1 or n2 are integers, that + integer is replaced by `range` of that integer. + + The nodes are assigned the attribute 'bipartite' with the value 0 or 1 + to indicate which bipartite set the node belongs to. + + This function is not imported in the main namespace. + To use it use nx.bipartite.complete_bipartite_graph + """ + G = nx.empty_graph(0, create_using) + if G.is_directed(): + raise nx.NetworkXError("Directed Graph not supported") + + n1, top = n1 + n2, bottom = n2 + if isinstance(n1, numbers.Integral) and isinstance(n2, numbers.Integral): + bottom = [n1 + i for i in bottom] + G.add_nodes_from(top, bipartite=0) + G.add_nodes_from(bottom, bipartite=1) + if len(G) != len(top) + len(bottom): + raise nx.NetworkXError("Inputs n1 and n2 must contain distinct nodes") + G.add_edges_from((u, v) for u in top for v in bottom) + G.graph["name"] = f"complete_bipartite_graph({len(top)}, {len(bottom)})" + return G + + +@py_random_state(3) +@nx._dispatchable(name="bipartite_configuration_model", graphs=None, returns_graph=True) +def configuration_model(aseq, bseq, create_using=None, seed=None): + """Returns a random bipartite graph from two given degree sequences. + + Parameters + ---------- + aseq : list + Degree sequence for node set A. + bseq : list + Degree sequence for node set B. + create_using : NetworkX graph instance, optional + Return graph of this type. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + The graph is composed of two partitions. Set A has nodes 0 to + (len(aseq) - 1) and set B has nodes len(aseq) to (len(bseq) - 1). + Nodes from set A are connected to nodes in set B by choosing + randomly from the possible free stubs, one in A and one in B. + + Notes + ----- + The sum of the two sequences must be equal: sum(aseq)=sum(bseq) + If no graph type is specified use MultiGraph with parallel edges. + If you want a graph with no parallel edges use create_using=Graph() + but then the resulting degree sequences might not be exact. + + The nodes are assigned the attribute 'bipartite' with the value 0 or 1 + to indicate which bipartite set the node belongs to. + + This function is not imported in the main namespace. + To use it use nx.bipartite.configuration_model + """ + G = nx.empty_graph(0, create_using, default=nx.MultiGraph) + if G.is_directed(): + raise nx.NetworkXError("Directed Graph not supported") + + # length and sum of each sequence + lena = len(aseq) + lenb = len(bseq) + suma = sum(aseq) + sumb = sum(bseq) + + if not suma == sumb: + raise nx.NetworkXError( + f"invalid degree sequences, sum(aseq)!=sum(bseq),{suma},{sumb}" + ) + + G = _add_nodes_with_bipartite_label(G, lena, lenb) + + if len(aseq) == 0 or max(aseq) == 0: + return G # done if no edges + + # build lists of degree-repeated vertex numbers + stubs = [[v] * aseq[v] for v in range(lena)] + astubs = [x for subseq in stubs for x in subseq] + + stubs = [[v] * bseq[v - lena] for v in range(lena, lena + lenb)] + bstubs = [x for subseq in stubs for x in subseq] + + # shuffle lists + seed.shuffle(astubs) + seed.shuffle(bstubs) + + G.add_edges_from([astubs[i], bstubs[i]] for i in range(suma)) + + G.name = "bipartite_configuration_model" + return G + + +@nx._dispatchable(name="bipartite_havel_hakimi_graph", graphs=None, returns_graph=True) +def havel_hakimi_graph(aseq, bseq, create_using=None): + """Returns a bipartite graph from two given degree sequences using a + Havel-Hakimi style construction. + + The graph is composed of two partitions. Set A has nodes 0 to + (len(aseq) - 1) and set B has nodes len(aseq) to (len(bseq) - 1). + Nodes from the set A are connected to nodes in the set B by + connecting the highest degree nodes in set A to the highest degree + nodes in set B until all stubs are connected. + + Parameters + ---------- + aseq : list + Degree sequence for node set A. + bseq : list + Degree sequence for node set B. + create_using : NetworkX graph instance, optional + Return graph of this type. + + Notes + ----- + The sum of the two sequences must be equal: sum(aseq)=sum(bseq) + If no graph type is specified use MultiGraph with parallel edges. + If you want a graph with no parallel edges use create_using=Graph() + but then the resulting degree sequences might not be exact. + + The nodes are assigned the attribute 'bipartite' with the value 0 or 1 + to indicate which bipartite set the node belongs to. + + This function is not imported in the main namespace. + To use it use nx.bipartite.havel_hakimi_graph + """ + G = nx.empty_graph(0, create_using, default=nx.MultiGraph) + if G.is_directed(): + raise nx.NetworkXError("Directed Graph not supported") + + # length of the each sequence + naseq = len(aseq) + nbseq = len(bseq) + + suma = sum(aseq) + sumb = sum(bseq) + + if not suma == sumb: + raise nx.NetworkXError( + f"invalid degree sequences, sum(aseq)!=sum(bseq),{suma},{sumb}" + ) + + G = _add_nodes_with_bipartite_label(G, naseq, nbseq) + + if len(aseq) == 0 or max(aseq) == 0: + return G # done if no edges + + # build list of degree-repeated vertex numbers + astubs = [[aseq[v], v] for v in range(naseq)] + bstubs = [[bseq[v - naseq], v] for v in range(naseq, naseq + nbseq)] + astubs.sort() + while astubs: + (degree, u) = astubs.pop() # take of largest degree node in the a set + if degree == 0: + break # done, all are zero + # connect the source to largest degree nodes in the b set + bstubs.sort() + for target in bstubs[-degree:]: + v = target[1] + G.add_edge(u, v) + target[0] -= 1 # note this updates bstubs too. + if target[0] == 0: + bstubs.remove(target) + + G.name = "bipartite_havel_hakimi_graph" + return G + + +@nx._dispatchable(graphs=None, returns_graph=True) +def reverse_havel_hakimi_graph(aseq, bseq, create_using=None): + """Returns a bipartite graph from two given degree sequences using a + Havel-Hakimi style construction. + + The graph is composed of two partitions. Set A has nodes 0 to + (len(aseq) - 1) and set B has nodes len(aseq) to (len(bseq) - 1). + Nodes from set A are connected to nodes in the set B by connecting + the highest degree nodes in set A to the lowest degree nodes in + set B until all stubs are connected. + + Parameters + ---------- + aseq : list + Degree sequence for node set A. + bseq : list + Degree sequence for node set B. + create_using : NetworkX graph instance, optional + Return graph of this type. + + Notes + ----- + The sum of the two sequences must be equal: sum(aseq)=sum(bseq) + If no graph type is specified use MultiGraph with parallel edges. + If you want a graph with no parallel edges use create_using=Graph() + but then the resulting degree sequences might not be exact. + + The nodes are assigned the attribute 'bipartite' with the value 0 or 1 + to indicate which bipartite set the node belongs to. + + This function is not imported in the main namespace. + To use it use nx.bipartite.reverse_havel_hakimi_graph + """ + G = nx.empty_graph(0, create_using, default=nx.MultiGraph) + if G.is_directed(): + raise nx.NetworkXError("Directed Graph not supported") + + # length of the each sequence + lena = len(aseq) + lenb = len(bseq) + suma = sum(aseq) + sumb = sum(bseq) + + if not suma == sumb: + raise nx.NetworkXError( + f"invalid degree sequences, sum(aseq)!=sum(bseq),{suma},{sumb}" + ) + + G = _add_nodes_with_bipartite_label(G, lena, lenb) + + if len(aseq) == 0 or max(aseq) == 0: + return G # done if no edges + + # build list of degree-repeated vertex numbers + astubs = [[aseq[v], v] for v in range(lena)] + bstubs = [[bseq[v - lena], v] for v in range(lena, lena + lenb)] + astubs.sort() + bstubs.sort() + while astubs: + (degree, u) = astubs.pop() # take of largest degree node in the a set + if degree == 0: + break # done, all are zero + # connect the source to the smallest degree nodes in the b set + for target in bstubs[0:degree]: + v = target[1] + G.add_edge(u, v) + target[0] -= 1 # note this updates bstubs too. + if target[0] == 0: + bstubs.remove(target) + + G.name = "bipartite_reverse_havel_hakimi_graph" + return G + + +@nx._dispatchable(graphs=None, returns_graph=True) +def alternating_havel_hakimi_graph(aseq, bseq, create_using=None): + """Returns a bipartite graph from two given degree sequences using + an alternating Havel-Hakimi style construction. + + The graph is composed of two partitions. Set A has nodes 0 to + (len(aseq) - 1) and set B has nodes len(aseq) to (len(bseq) - 1). + Nodes from the set A are connected to nodes in the set B by + connecting the highest degree nodes in set A to alternatively the + highest and the lowest degree nodes in set B until all stubs are + connected. + + Parameters + ---------- + aseq : list + Degree sequence for node set A. + bseq : list + Degree sequence for node set B. + create_using : NetworkX graph instance, optional + Return graph of this type. + + Notes + ----- + The sum of the two sequences must be equal: sum(aseq)=sum(bseq) + If no graph type is specified use MultiGraph with parallel edges. + If you want a graph with no parallel edges use create_using=Graph() + but then the resulting degree sequences might not be exact. + + The nodes are assigned the attribute 'bipartite' with the value 0 or 1 + to indicate which bipartite set the node belongs to. + + This function is not imported in the main namespace. + To use it use nx.bipartite.alternating_havel_hakimi_graph + """ + G = nx.empty_graph(0, create_using, default=nx.MultiGraph) + if G.is_directed(): + raise nx.NetworkXError("Directed Graph not supported") + + # length of the each sequence + naseq = len(aseq) + nbseq = len(bseq) + suma = sum(aseq) + sumb = sum(bseq) + + if not suma == sumb: + raise nx.NetworkXError( + f"invalid degree sequences, sum(aseq)!=sum(bseq),{suma},{sumb}" + ) + + G = _add_nodes_with_bipartite_label(G, naseq, nbseq) + + if len(aseq) == 0 or max(aseq) == 0: + return G # done if no edges + # build list of degree-repeated vertex numbers + astubs = [[aseq[v], v] for v in range(naseq)] + bstubs = [[bseq[v - naseq], v] for v in range(naseq, naseq + nbseq)] + while astubs: + astubs.sort() + (degree, u) = astubs.pop() # take of largest degree node in the a set + if degree == 0: + break # done, all are zero + bstubs.sort() + small = bstubs[0 : degree // 2] # add these low degree targets + large = bstubs[(-degree + degree // 2) :] # now high degree targets + stubs = [x for z in zip(large, small) for x in z] # combine, sorry + if len(stubs) < len(small) + len(large): # check for zip truncation + stubs.append(large.pop()) + for target in stubs: + v = target[1] + G.add_edge(u, v) + target[0] -= 1 # note this updates bstubs too. + if target[0] == 0: + bstubs.remove(target) + + G.name = "bipartite_alternating_havel_hakimi_graph" + return G + + +@py_random_state(3) +@nx._dispatchable(graphs=None, returns_graph=True) +def preferential_attachment_graph(aseq, p, create_using=None, seed=None): + """Create a bipartite graph with a preferential attachment model from + a given single degree sequence. + + The graph is composed of two partitions. Set A has nodes 0 to + (len(aseq) - 1) and set B has nodes starting with node len(aseq). + The number of nodes in set B is random. + + Parameters + ---------- + aseq : list + Degree sequence for node set A. + p : float + Probability that a new bottom node is added. + create_using : NetworkX graph instance, optional + Return graph of this type. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + References + ---------- + .. [1] Guillaume, J.L. and Latapy, M., + Bipartite graphs as models of complex networks. + Physica A: Statistical Mechanics and its Applications, + 2006, 371(2), pp.795-813. + .. [2] Jean-Loup Guillaume and Matthieu Latapy, + Bipartite structure of all complex networks, + Inf. Process. Lett. 90, 2004, pg. 215-221 + https://doi.org/10.1016/j.ipl.2004.03.007 + + Notes + ----- + The nodes are assigned the attribute 'bipartite' with the value 0 or 1 + to indicate which bipartite set the node belongs to. + + This function is not imported in the main namespace. + To use it use nx.bipartite.preferential_attachment_graph + """ + G = nx.empty_graph(0, create_using, default=nx.MultiGraph) + if G.is_directed(): + raise nx.NetworkXError("Directed Graph not supported") + + if p > 1: + raise nx.NetworkXError(f"probability {p} > 1") + + naseq = len(aseq) + G = _add_nodes_with_bipartite_label(G, naseq, 0) + vv = [[v] * aseq[v] for v in range(naseq)] + while vv: + while vv[0]: + source = vv[0][0] + vv[0].remove(source) + if seed.random() < p or len(G) == naseq: + target = len(G) + G.add_node(target, bipartite=1) + G.add_edge(source, target) + else: + bb = [[b] * G.degree(b) for b in range(naseq, len(G))] + # flatten the list of lists into a list. + bbstubs = reduce(lambda x, y: x + y, bb) + # choose preferentially a bottom node. + target = seed.choice(bbstubs) + G.add_node(target, bipartite=1) + G.add_edge(source, target) + vv.remove(vv[0]) + G.name = "bipartite_preferential_attachment_model" + return G + + +@py_random_state(3) +@nx._dispatchable(graphs=None, returns_graph=True) +def random_graph(n, m, p, seed=None, directed=False): + """Returns a bipartite random graph. + + This is a bipartite version of the binomial (Erdős-Rényi) graph. + The graph is composed of two partitions. Set A has nodes 0 to + (n - 1) and set B has nodes n to (n + m - 1). + + Parameters + ---------- + n : int + The number of nodes in the first bipartite set. + m : int + The number of nodes in the second bipartite set. + p : float + Probability for edge creation. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + directed : bool, optional (default=False) + If True return a directed graph + + Notes + ----- + The bipartite random graph algorithm chooses each of the n*m (undirected) + or 2*nm (directed) possible edges with probability p. + + This algorithm is $O(n+m)$ where $m$ is the expected number of edges. + + The nodes are assigned the attribute 'bipartite' with the value 0 or 1 + to indicate which bipartite set the node belongs to. + + This function is not imported in the main namespace. + To use it use nx.bipartite.random_graph + + See Also + -------- + gnp_random_graph, configuration_model + + References + ---------- + .. [1] Vladimir Batagelj and Ulrik Brandes, + "Efficient generation of large random networks", + Phys. Rev. E, 71, 036113, 2005. + """ + G = nx.Graph() + G = _add_nodes_with_bipartite_label(G, n, m) + if directed: + G = nx.DiGraph(G) + G.name = f"fast_gnp_random_graph({n},{m},{p})" + + if p <= 0: + return G + if p >= 1: + return nx.complete_bipartite_graph(n, m) + + lp = math.log(1.0 - p) + + v = 0 + w = -1 + while v < n: + lr = math.log(1.0 - seed.random()) + w = w + 1 + int(lr / lp) + while w >= m and v < n: + w = w - m + v = v + 1 + if v < n: + G.add_edge(v, n + w) + + if directed: + # use the same algorithm to + # add edges from the "m" to "n" set + v = 0 + w = -1 + while v < n: + lr = math.log(1.0 - seed.random()) + w = w + 1 + int(lr / lp) + while w >= m and v < n: + w = w - m + v = v + 1 + if v < n: + G.add_edge(n + w, v) + + return G + + +@py_random_state(3) +@nx._dispatchable(graphs=None, returns_graph=True) +def gnmk_random_graph(n, m, k, seed=None, directed=False): + """Returns a random bipartite graph G_{n,m,k}. + + Produces a bipartite graph chosen randomly out of the set of all graphs + with n top nodes, m bottom nodes, and k edges. + The graph is composed of two sets of nodes. + Set A has nodes 0 to (n - 1) and set B has nodes n to (n + m - 1). + + Parameters + ---------- + n : int + The number of nodes in the first bipartite set. + m : int + The number of nodes in the second bipartite set. + k : int + The number of edges + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + directed : bool, optional (default=False) + If True return a directed graph + + Examples + -------- + >>> G = nx.bipartite.gnmk_random_graph(10, 20, 50) + + See Also + -------- + gnm_random_graph + + Notes + ----- + If k > m * n then a complete bipartite graph is returned. + + This graph is a bipartite version of the `G_{nm}` random graph model. + + The nodes are assigned the attribute 'bipartite' with the value 0 or 1 + to indicate which bipartite set the node belongs to. + + This function is not imported in the main namespace. + To use it use nx.bipartite.gnmk_random_graph + """ + G = nx.Graph() + G = _add_nodes_with_bipartite_label(G, n, m) + if directed: + G = nx.DiGraph(G) + G.name = f"bipartite_gnm_random_graph({n},{m},{k})" + if n == 1 or m == 1: + return G + max_edges = n * m # max_edges for bipartite networks + if k >= max_edges: # Maybe we should raise an exception here + return nx.complete_bipartite_graph(n, m, create_using=G) + + top = [n for n, d in G.nodes(data=True) if d["bipartite"] == 0] + bottom = list(set(G) - set(top)) + edge_count = 0 + while edge_count < k: + # generate random edge,u,v + u = seed.choice(top) + v = seed.choice(bottom) + if v in G[u]: + continue + else: + G.add_edge(u, v) + edge_count += 1 + return G + + +def _add_nodes_with_bipartite_label(G, lena, lenb): + G.add_nodes_from(range(lena + lenb)) + b = dict(zip(range(lena), [0] * lena)) + b.update(dict(zip(range(lena, lena + lenb), [1] * lenb))) + nx.set_node_attributes(G, b, "bipartite") + return G diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/link_analysis.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/link_analysis.py new file mode 100644 index 0000000..7238b1e --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/link_analysis.py @@ -0,0 +1,316 @@ +import itertools + +import networkx as nx + +__all__ = ["birank"] + + +@nx._dispatchable(edge_attrs="weight") +def birank( + G, + nodes, + *, + alpha=None, + beta=None, + top_personalization=None, + bottom_personalization=None, + max_iter=100, + tol=1.0e-6, + weight="weight", +): + r"""Compute the BiRank score for nodes in a bipartite network. + + Given the bipartite sets $U$ and $P$, the BiRank algorithm seeks to satisfy + the following recursive relationships between the scores of nodes $j \in P$ + and $i \in U$: + + .. math:: + + p_j = \alpha \sum_{i \in U} \frac{w_{ij}}{\sqrt{d_i}\sqrt{d_j}} u_i + + (1 - \alpha) p_j^0 + + u_i = \beta \sum_{j \in P} \frac{w_{ij}}{\sqrt{d_i}\sqrt{d_j}} p_j + + (1 - \beta) u_i^0 + + where + + * $p_j$ and $u_i$ are the BiRank scores of nodes $j \in P$ and $i \in U$. + * $w_{ij}$ is the weight of the edge between nodes $i \in U$ and $j \in P$ + (With a value of 0 if no edge exists). + * $d_i$ and $d_j$ are the weighted degrees of nodes $i \in U$ and $j \in P$, + respectively. + * $p_j^0$ and $u_i^0$ are personalization values that can encode a priori + weights for the nodes $j \in P$ and $i \in U$, respectively. Akin to the + personalization vector used by PageRank. + * $\alpha$ and $\beta$ are damping hyperparameters applying to nodes in $P$ + and $U$ respectively. They can take values in the interval $[0, 1]$, and + are analogous to those used by PageRank. + + Below are two use cases for this algorithm. + + 1. Personalized Recommendation System + Given a bipartite graph representing users and items, BiRank can be used + as a collaborative filtering algorithm to recommend items to users. + Previous ratings are encoded as edge weights, and the specific ratings + of an individual user on a set of items is used as the personalization + vector over items. See the example below for an implementation of this + on a toy dataset provided in [1]_. + + 2. Popularity Prediction + Given a bipartite graph representing user interactions with items, e.g. + commits to a GitHub repository, BiRank can be used to predict the + popularity of a given item. Edge weights should encode the strength of + the interaction signal. This could be a raw count, or weighted by a time + decay function like that specified in Eq. (15) of [1]_. The + personalization vectors can be used to encode existing popularity + signals, for example, the monthly download count of a repository's + package. + + Parameters + ---------- + G : graph + A bipartite network + + nodes : iterable of nodes + Container with all nodes belonging to the first bipartite node set + ('top'). The nodes in this set use the hyperparameter `alpha`, and the + personalization dictionary `top_personalization`. The nodes in the second + bipartite node set ('bottom') are automatically determined by taking the + complement of 'top' with respect to the graph `G`. + + alpha : float, optional (default=0.80 if top_personalization not empty, else 1) + Damping factor for the 'top' nodes. Must be in the interval $[0, 1]$. + Larger alpha and beta generally reduce the effect of the personalizations + and increase the number of iterations before convergence. Choice of value + is largely dependent on use case, and experimentation is recommended. + + beta : float, optional (default=0.80 if bottom_personalization not empty, else 1) + Damping factor for the 'bottom' nodes. Must be in the interval $[0, 1]$. + Larger alpha and beta generally reduce the effect of the personalizations + and increase the number of iterations before convergence. Choice of value + is largely dependent on use case, and experimentation is recommended. + + top_personalization : dict, optional (default=None) + Dictionary keyed by nodes in 'top' to that node's personalization value. + Unspecified nodes in 'top' will be assigned a personalization value of 0. + Personalization values are used to encode a priori weights for a given node, + and should be non-negative. + + bottom_personalization : dict, optional (default=None) + Dictionary keyed by nodes in 'bottom' to that node's personalization value. + Unspecified nodes in 'bottom' will be assigned a personalization value of 0. + Personalization values are used to encode a priori weights for a given node, + and should be non-negative. + + max_iter : int, optional (default=100) + Maximum number of iterations in power method eigenvalue solver. + + tol : float, optional (default=1.0e-6) + Error tolerance used to check convergence in power method solver. The + iteration will stop after a tolerance of both ``len(top) * tol`` and + ``len(bottom) * tol`` is reached for nodes in 'top' and 'bottom' + respectively. + + weight : string or None, optional (default='weight') + Edge data key to use as weight. + + Returns + ------- + birank : dictionary + Dictionary keyed by node to that node's BiRank score. + + Raises + ------ + NetworkXAlgorithmError + If the parameters `alpha` or `beta` are not in the interval [0, 1], + if either of the bipartite sets are empty, or if negative values are + provided in the personalization dictionaries. + + PowerIterationFailedConvergence + If the algorithm fails to converge to the specified tolerance + within the specified number of iterations of the power iteration + method. + + Examples + -------- + Construct a bipartite graph with user-item ratings and use BiRank to + recommend items to a user (user 1). The example below uses the `rating` + edge attribute as the weight of the edges. The `top_personalization` vector + is used to encode the user's previous ratings on items. + + Creation of graph, bipartite sets for the example. + + >>> elist = [ + ... ("u1", "p1", 5), + ... ("u2", "p1", 5), + ... ("u2", "p2", 4), + ... ("u3", "p1", 3), + ... ("u3", "p3", 2), + ... ] + >>> G = nx.Graph() + >>> G.add_weighted_edges_from(elist, weight="rating") + >>> product_nodes = ("p1", "p2", "p3") + >>> user = "u1" + + First, we create a personalization vector for the user based on on their + ratings of past items. In this case they have only rated one item (p1, with + a rating of 5) in the past. + + >>> user_personalization = { + ... product: rating + ... for _, product, rating in G.edges(nbunch=user, data="rating") + ... } + >>> user_personalization + {'p1': 5} + + Calculate the BiRank score of all nodes in the graph, filter for the items + that the user has not rated yet, and sort the results by score. + + >>> user_birank_results = nx.bipartite.birank( + ... G, product_nodes, top_personalization=user_personalization, weight="rating" + ... ) + >>> user_birank_results = filter( + ... lambda item: item[0][0] == "p" and user not in G.neighbors(item[0]), + ... user_birank_results.items(), + ... ) + >>> user_birank_results = sorted( + ... user_birank_results, key=lambda item: item[1], reverse=True + ... ) + >>> user_recommendations = { + ... product: round(score, 5) for product, score in user_birank_results + ... } + >>> user_recommendations + {'p2': 1.44818, 'p3': 1.04811} + + We find that user 1 should be recommended item p2 over item p3. This is due + to the fact that user 2 rated also rated p1 highly, while user 3 did not. + Thus user 2's tastes are inferred to be similar to user 1's, and carry more + weight in the recommendation. + + See Also + -------- + :func:`~networkx.algorithms.link_analysis.pagerank_alg.pagerank` + :func:`~networkx.algorithms.link_analysis.hits_alg.hits` + :func:`~networkx.algorithms.bipartite.centrality.betweenness_centrality` + :func:`~networkx.algorithms.bipartite.basic.sets` + :func:`~networkx.algorithms.bipartite.basic.is_bipartite` + + Notes + ----- + The `nodes` input parameter must contain all nodes in one bipartite + node set, but the dictionary returned contains all nodes from both + bipartite node sets. See :mod:`bipartite documentation + ` for further details on how + bipartite graphs are handled in NetworkX. + + In the case a personalization dictionary is not provided for top (bottom) + `alpha` (`beta`) will default to 1. This is because a damping factor + without a non-zero entry in the personalization vector will lead to the + algorithm converging to the zero vector. + + References + ---------- + .. [1] Xiangnan He, Ming Gao, Min-Yen Kan, and Dingxian Wang. 2017. + BiRank: Towards Ranking on Bipartite Graphs. IEEE Trans. on Knowl. + and Data Eng. 29, 1 (January 2017), 57–71. + https://arxiv.org/pdf/1708.04396 + + """ + import numpy as np + import scipy as sp + + # Initialize the sets of top and bottom nodes + top = set(nodes) + bottom = set(G) - top + top_count = len(top) + bottom_count = len(bottom) + + if top_count == 0 or bottom_count == 0: + raise nx.NetworkXAlgorithmError( + "The BiRank algorithm requires a bipartite graph with at least one" + "node in each set." + ) + + # Clean the personalization dictionaries + top_personalization = _clean_personalization_dict(top_personalization) + bottom_personalization = _clean_personalization_dict(bottom_personalization) + + # Set default values for alpha and beta if not provided + if alpha is None: + alpha = 0.8 if top_personalization else 1 + if beta is None: + beta = 0.8 if bottom_personalization else 1 + + if alpha < 0 or alpha > 1: + raise nx.NetworkXAlgorithmError("alpha must be in the interval [0, 1]") + if beta < 0 or beta > 1: + raise nx.NetworkXAlgorithmError("beta must be in the interval [0, 1]") + + # Initialize query vectors + p0 = np.array([top_personalization.get(n, 0) for n in top], dtype=float) + u0 = np.array([bottom_personalization.get(n, 0) for n in bottom], dtype=float) + + # Construct degree normalized biadjacency matrix `S` and its transpose + W = nx.bipartite.biadjacency_matrix(G, bottom, top, weight=weight, dtype=float) + p_degrees = W.sum(axis=0, dtype=float) + # Handle case where the node is disconnected - avoids warning + p_degrees[p_degrees == 0] = 1.0 + D_p = sp.sparse.dia_array( + ([1.0 / np.sqrt(p_degrees)], [0]), + shape=(top_count, top_count), + dtype=float, + ) + u_degrees = W.sum(axis=1, dtype=float) + u_degrees[u_degrees == 0] = 1.0 + D_u = sp.sparse.dia_array( + ([1.0 / np.sqrt(u_degrees)], [0]), + shape=(bottom_count, bottom_count), + dtype=float, + ) + S = D_u.tocsr() @ W @ D_p.tocsr() + S_T = S.T + + # Initialize birank vectors for iteration + p = np.ones(top_count, dtype=float) / top_count + u = beta * (S @ p) + (1 - beta) * u0 + + # Iterate until convergence + for _ in range(max_iter): + p_last = p + u_last = u + p = alpha * (S_T @ u) + (1 - alpha) * p0 + u = beta * (S @ p) + (1 - beta) * u0 + + # Continue iterating if the error (absolute if less than 1, relative otherwise) + # is above the tolerance threshold for either p or u + err_u = np.absolute((u_last - u) / np.maximum(1.0, u_last)).sum() + if err_u >= len(u) * tol: + continue + err_p = np.absolute((p_last - p) / np.maximum(1.0, p_last)).sum() + if err_p >= len(p) * tol: + continue + + # Handle edge case where if both alpha and beta are 1, scale is + # indeterminate, so normalization is required to return consistent results + if alpha == 1 and beta == 1: + p = p / np.linalg.norm(p, 1) + u = u / np.linalg.norm(u, 1) + + # If both error thresholds pass, return a single dictionary mapping + # nodes to their scores + return dict( + zip(itertools.chain(top, bottom), map(float, itertools.chain(p, u))) + ) + + # If we reach this point, we have not converged + raise nx.PowerIterationFailedConvergence(max_iter) + + +def _clean_personalization_dict(personalization): + """Filter out zero values from the personalization dictionary, + handle case where None is passed, ensure values are non-negative.""" + if personalization is None: + return {} + if any(value < 0 for value in personalization.values()): + raise nx.NetworkXAlgorithmError("Personalization values must be non-negative.") + return {node: value for node, value in personalization.items() if value != 0} diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/matching.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/matching.py new file mode 100644 index 0000000..6b577d6 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/matching.py @@ -0,0 +1,590 @@ +# This module uses material from the Wikipedia article Hopcroft--Karp algorithm +# , accessed on +# January 3, 2015, which is released under the Creative Commons +# Attribution-Share-Alike License 3.0 +# . That article includes +# pseudocode, which has been translated into the corresponding Python code. +# +# Portions of this module use code from David Eppstein's Python Algorithms and +# Data Structures (PADS) library, which is dedicated to the public domain (for +# proof, see ). +"""Provides functions for computing maximum cardinality matchings and minimum +weight full matchings in a bipartite graph. + +If you don't care about the particular implementation of the maximum matching +algorithm, simply use the :func:`maximum_matching`. If you do care, you can +import one of the named maximum matching algorithms directly. + +For example, to find a maximum matching in the complete bipartite graph with +two vertices on the left and three vertices on the right: + +>>> G = nx.complete_bipartite_graph(2, 3) +>>> left, right = nx.bipartite.sets(G) +>>> list(left) +[0, 1] +>>> list(right) +[2, 3, 4] +>>> nx.bipartite.maximum_matching(G) +{0: 2, 1: 3, 2: 0, 3: 1} + +The dictionary returned by :func:`maximum_matching` includes a mapping for +vertices in both the left and right vertex sets. + +Similarly, :func:`minimum_weight_full_matching` produces, for a complete +weighted bipartite graph, a matching whose cardinality is the cardinality of +the smaller of the two partitions, and for which the sum of the weights of the +edges included in the matching is minimal. + +""" + +import collections +import itertools + +import networkx as nx +from networkx.algorithms.bipartite import sets as bipartite_sets +from networkx.algorithms.bipartite.matrix import biadjacency_matrix + +__all__ = [ + "maximum_matching", + "hopcroft_karp_matching", + "eppstein_matching", + "to_vertex_cover", + "minimum_weight_full_matching", +] + +INFINITY = float("inf") + + +@nx._dispatchable +def hopcroft_karp_matching(G, top_nodes=None): + """Returns the maximum cardinality matching of the bipartite graph `G`. + + A matching is a set of edges that do not share any nodes. A maximum + cardinality matching is a matching with the most edges possible. It + is not always unique. Finding a matching in a bipartite graph can be + treated as a networkx flow problem. + + The functions ``hopcroft_karp_matching`` and ``maximum_matching`` + are aliases of the same function. + + Parameters + ---------- + G : NetworkX graph + + Undirected bipartite graph + + top_nodes : container of nodes + + Container with all nodes in one bipartite node set. If not supplied + it will be computed. But if more than one solution exists an exception + will be raised. + + Returns + ------- + matches : dictionary + + The matching is returned as a dictionary, `matches`, such that + ``matches[v] == w`` if node `v` is matched to node `w`. Unmatched + nodes do not occur as a key in `matches`. + + Raises + ------ + AmbiguousSolution + Raised if the input bipartite graph is disconnected and no container + with all nodes in one bipartite set is provided. When determining + the nodes in each bipartite set more than one valid solution is + possible if the input graph is disconnected. + + Notes + ----- + This function is implemented with the `Hopcroft--Karp matching algorithm + `_ for + bipartite graphs. + + See :mod:`bipartite documentation ` + for further details on how bipartite graphs are handled in NetworkX. + + See Also + -------- + maximum_matching + hopcroft_karp_matching + eppstein_matching + + References + ---------- + .. [1] John E. Hopcroft and Richard M. Karp. "An n^{5 / 2} Algorithm for + Maximum Matchings in Bipartite Graphs" In: **SIAM Journal of Computing** + 2.4 (1973), pp. 225--231. . + + """ + + # First we define some auxiliary search functions. + # + # If you are a human reading these auxiliary search functions, the "global" + # variables `leftmatches`, `rightmatches`, `distances`, etc. are defined + # below the functions, so that they are initialized close to the initial + # invocation of the search functions. + def breadth_first_search(): + for v in left: + if leftmatches[v] is None: + distances[v] = 0 + queue.append(v) + else: + distances[v] = INFINITY + distances[None] = INFINITY + while queue: + v = queue.popleft() + if distances[v] < distances[None]: + for u in G[v]: + if distances[rightmatches[u]] is INFINITY: + distances[rightmatches[u]] = distances[v] + 1 + queue.append(rightmatches[u]) + return distances[None] is not INFINITY + + def depth_first_search(v): + if v is not None: + for u in G[v]: + if distances[rightmatches[u]] == distances[v] + 1: + if depth_first_search(rightmatches[u]): + rightmatches[u] = v + leftmatches[v] = u + return True + distances[v] = INFINITY + return False + return True + + # Initialize the "global" variables that maintain state during the search. + left, right = bipartite_sets(G, top_nodes) + leftmatches = dict.fromkeys(left) + rightmatches = dict.fromkeys(right) + distances = {} + queue = collections.deque() + + # Implementation note: this counter is incremented as pairs are matched but + # it is currently not used elsewhere in the computation. + num_matched_pairs = 0 + while breadth_first_search(): + for v in left: + if leftmatches[v] is None: + if depth_first_search(v): + num_matched_pairs += 1 + + # Strip the entries matched to `None`. + leftmatches = {k: v for k, v in leftmatches.items() if v is not None} + rightmatches = {k: v for k, v in rightmatches.items() if v is not None} + + # At this point, the left matches and the right matches are inverses of one + # another. In other words, + # + # leftmatches == {v, k for k, v in rightmatches.items()} + # + # Finally, we combine both the left matches and right matches. + return dict(itertools.chain(leftmatches.items(), rightmatches.items())) + + +@nx._dispatchable +def eppstein_matching(G, top_nodes=None): + """Returns the maximum cardinality matching of the bipartite graph `G`. + + Parameters + ---------- + G : NetworkX graph + + Undirected bipartite graph + + top_nodes : container + + Container with all nodes in one bipartite node set. If not supplied + it will be computed. But if more than one solution exists an exception + will be raised. + + Returns + ------- + matches : dictionary + + The matching is returned as a dictionary, `matching`, such that + ``matching[v] == w`` if node `v` is matched to node `w`. Unmatched + nodes do not occur as a key in `matching`. + + Raises + ------ + AmbiguousSolution + Raised if the input bipartite graph is disconnected and no container + with all nodes in one bipartite set is provided. When determining + the nodes in each bipartite set more than one valid solution is + possible if the input graph is disconnected. + + Notes + ----- + This function is implemented with David Eppstein's version of the algorithm + Hopcroft--Karp algorithm (see :func:`hopcroft_karp_matching`), which + originally appeared in the `Python Algorithms and Data Structures library + (PADS) `_. + + See :mod:`bipartite documentation ` + for further details on how bipartite graphs are handled in NetworkX. + + See Also + -------- + + hopcroft_karp_matching + + """ + # Due to its original implementation, a directed graph is needed + # so that the two sets of bipartite nodes can be distinguished + left, right = bipartite_sets(G, top_nodes) + G = nx.DiGraph(G.edges(left)) + # initialize greedy matching (redundant, but faster than full search) + matching = {} + for u in G: + for v in G[u]: + if v not in matching: + matching[v] = u + break + while True: + # structure residual graph into layers + # pred[u] gives the neighbor in the previous layer for u in U + # preds[v] gives a list of neighbors in the previous layer for v in V + # unmatched gives a list of unmatched vertices in final layer of V, + # and is also used as a flag value for pred[u] when u is in the first + # layer + preds = {} + unmatched = [] + pred = dict.fromkeys(G, unmatched) + for v in matching: + del pred[matching[v]] + layer = list(pred) + + # repeatedly extend layering structure by another pair of layers + while layer and not unmatched: + newLayer = {} + for u in layer: + for v in G[u]: + if v not in preds: + newLayer.setdefault(v, []).append(u) + layer = [] + for v in newLayer: + preds[v] = newLayer[v] + if v in matching: + layer.append(matching[v]) + pred[matching[v]] = v + else: + unmatched.append(v) + + # did we finish layering without finding any alternating paths? + if not unmatched: + # TODO - The lines between --- were unused and were thus commented + # out. This whole commented chunk should be reviewed to determine + # whether it should be built upon or completely removed. + # --- + # unlayered = {} + # for u in G: + # # TODO Why is extra inner loop necessary? + # for v in G[u]: + # if v not in preds: + # unlayered[v] = None + # --- + # TODO Originally, this function returned a three-tuple: + # + # return (matching, list(pred), list(unlayered)) + # + # For some reason, the documentation for this function + # indicated that the second and third elements of the returned + # three-tuple would be the vertices in the left and right vertex + # sets, respectively, that are also in the maximum independent set. + # However, what I think the author meant was that the second + # element is the list of vertices that were unmatched and the third + # element was the list of vertices that were matched. Since that + # seems to be the case, they don't really need to be returned, + # since that information can be inferred from the matching + # dictionary. + + # All the matched nodes must be a key in the dictionary + for key in matching.copy(): + matching[matching[key]] = key + return matching + + # recursively search backward through layers to find alternating paths + # recursion returns true if found path, false otherwise + def recurse(v): + if v in preds: + L = preds.pop(v) + for u in L: + if u in pred: + pu = pred.pop(u) + if pu is unmatched or recurse(pu): + matching[v] = u + return True + return False + + for v in unmatched: + recurse(v) + + +def _is_connected_by_alternating_path(G, v, matched_edges, unmatched_edges, targets): + """Returns True if and only if the vertex `v` is connected to one of + the target vertices by an alternating path in `G`. + + An *alternating path* is a path in which every other edge is in the + specified maximum matching (and the remaining edges in the path are not in + the matching). An alternating path may have matched edges in the even + positions or in the odd positions, as long as the edges alternate between + 'matched' and 'unmatched'. + + `G` is an undirected bipartite NetworkX graph. + + `v` is a vertex in `G`. + + `matched_edges` is a set of edges present in a maximum matching in `G`. + + `unmatched_edges` is a set of edges not present in a maximum + matching in `G`. + + `targets` is a set of vertices. + + """ + + def _alternating_dfs(u, along_matched=True): + """Returns True if and only if `u` is connected to one of the + targets by an alternating path. + + `u` is a vertex in the graph `G`. + + If `along_matched` is True, this step of the depth-first search + will continue only through edges in the given matching. Otherwise, it + will continue only through edges *not* in the given matching. + + """ + visited = set() + # Follow matched edges when depth is even, + # and follow unmatched edges when depth is odd. + initial_depth = 0 if along_matched else 1 + stack = [(u, iter(G[u]), initial_depth)] + while stack: + parent, children, depth = stack[-1] + valid_edges = matched_edges if depth % 2 else unmatched_edges + try: + child = next(children) + if child not in visited: + if (parent, child) in valid_edges or (child, parent) in valid_edges: + if child in targets: + return True + visited.add(child) + stack.append((child, iter(G[child]), depth + 1)) + except StopIteration: + stack.pop() + return False + + # Check for alternating paths starting with edges in the matching, then + # check for alternating paths starting with edges not in the + # matching. + return _alternating_dfs(v, along_matched=True) or _alternating_dfs( + v, along_matched=False + ) + + +def _connected_by_alternating_paths(G, matching, targets): + """Returns the set of vertices that are connected to one of the target + vertices by an alternating path in `G` or are themselves a target. + + An *alternating path* is a path in which every other edge is in the + specified maximum matching (and the remaining edges in the path are not in + the matching). An alternating path may have matched edges in the even + positions or in the odd positions, as long as the edges alternate between + 'matched' and 'unmatched'. + + `G` is an undirected bipartite NetworkX graph. + + `matching` is a dictionary representing a maximum matching in `G`, as + returned by, for example, :func:`maximum_matching`. + + `targets` is a set of vertices. + + """ + # Get the set of matched edges and the set of unmatched edges. Only include + # one version of each undirected edge (for example, include edge (1, 2) but + # not edge (2, 1)). Using frozensets as an intermediary step we do not + # require nodes to be orderable. + edge_sets = {frozenset((u, v)) for u, v in matching.items()} + matched_edges = {tuple(edge) for edge in edge_sets} + unmatched_edges = { + (u, v) for (u, v) in G.edges() if frozenset((u, v)) not in edge_sets + } + + return { + v + for v in G + if v in targets + or _is_connected_by_alternating_path( + G, v, matched_edges, unmatched_edges, targets + ) + } + + +@nx._dispatchable +def to_vertex_cover(G, matching, top_nodes=None): + """Returns the minimum vertex cover corresponding to the given maximum + matching of the bipartite graph `G`. + + Parameters + ---------- + G : NetworkX graph + + Undirected bipartite graph + + matching : dictionary + + A dictionary whose keys are vertices in `G` and whose values are the + distinct neighbors comprising the maximum matching for `G`, as returned + by, for example, :func:`maximum_matching`. The dictionary *must* + represent the maximum matching. + + top_nodes : container + + Container with all nodes in one bipartite node set. If not supplied + it will be computed. But if more than one solution exists an exception + will be raised. + + Returns + ------- + vertex_cover : :class:`set` + + The minimum vertex cover in `G`. + + Raises + ------ + AmbiguousSolution + Raised if the input bipartite graph is disconnected and no container + with all nodes in one bipartite set is provided. When determining + the nodes in each bipartite set more than one valid solution is + possible if the input graph is disconnected. + + Notes + ----- + This function is implemented using the procedure guaranteed by `Konig's + theorem + `_, + which proves an equivalence between a maximum matching and a minimum vertex + cover in bipartite graphs. + + Since a minimum vertex cover is the complement of a maximum independent set + for any graph, one can compute the maximum independent set of a bipartite + graph this way: + + >>> G = nx.complete_bipartite_graph(2, 3) + >>> matching = nx.bipartite.maximum_matching(G) + >>> vertex_cover = nx.bipartite.to_vertex_cover(G, matching) + >>> independent_set = set(G) - vertex_cover + >>> print(list(independent_set)) + [2, 3, 4] + + See :mod:`bipartite documentation ` + for further details on how bipartite graphs are handled in NetworkX. + + """ + # This is a Python implementation of the algorithm described at + # . + L, R = bipartite_sets(G, top_nodes) + # Let U be the set of unmatched vertices in the left vertex set. + unmatched_vertices = set(G) - set(matching) + U = unmatched_vertices & L + # Let Z be the set of vertices that are either in U or are connected to U + # by alternating paths. + Z = _connected_by_alternating_paths(G, matching, U) + # At this point, every edge either has a right endpoint in Z or a left + # endpoint not in Z. This gives us the vertex cover. + return (L - Z) | (R & Z) + + +#: Returns the maximum cardinality matching in the given bipartite graph. +#: +#: This function is simply an alias for :func:`hopcroft_karp_matching`. +maximum_matching = hopcroft_karp_matching + + +@nx._dispatchable(edge_attrs="weight") +def minimum_weight_full_matching(G, top_nodes=None, weight="weight"): + r"""Returns a minimum weight full matching of the bipartite graph `G`. + + Let :math:`G = ((U, V), E)` be a weighted bipartite graph with real weights + :math:`w : E \to \mathbb{R}`. This function then produces a matching + :math:`M \subseteq E` with cardinality + + .. math:: + \lvert M \rvert = \min(\lvert U \rvert, \lvert V \rvert), + + which minimizes the sum of the weights of the edges included in the + matching, :math:`\sum_{e \in M} w(e)`, or raises an error if no such + matching exists. + + When :math:`\lvert U \rvert = \lvert V \rvert`, this is commonly + referred to as a perfect matching; here, since we allow + :math:`\lvert U \rvert` and :math:`\lvert V \rvert` to differ, we + follow Karp [1]_ and refer to the matching as *full*. + + Parameters + ---------- + G : NetworkX graph + + Undirected bipartite graph + + top_nodes : container + + Container with all nodes in one bipartite node set. If not supplied + it will be computed. + + weight : string, optional (default='weight') + + The edge data key used to provide each value in the matrix. + If None, then each edge has weight 1. + + Returns + ------- + matches : dictionary + + The matching is returned as a dictionary, `matches`, such that + ``matches[v] == w`` if node `v` is matched to node `w`. Unmatched + nodes do not occur as a key in `matches`. + + Raises + ------ + ValueError + Raised if no full matching exists. + + ImportError + Raised if SciPy is not available. + + Notes + ----- + The problem of determining a minimum weight full matching is also known as + the rectangular linear assignment problem. This implementation defers the + calculation of the assignment to SciPy. + + References + ---------- + .. [1] Richard Manning Karp: + An algorithm to Solve the m x n Assignment Problem in Expected Time + O(mn log n). + Networks, 10(2):143–152, 1980. + + """ + import numpy as np + import scipy as sp + + left, right = nx.bipartite.sets(G, top_nodes) + U = list(left) + V = list(right) + # We explicitly create the biadjacency matrix having infinities + # where edges are missing (as opposed to zeros, which is what one would + # get by using toarray on the sparse matrix). + weights_sparse = biadjacency_matrix( + G, row_order=U, column_order=V, weight=weight, format="coo" + ) + weights = np.full(weights_sparse.shape, np.inf) + weights[weights_sparse.row, weights_sparse.col] = weights_sparse.data + left_matches = sp.optimize.linear_sum_assignment(weights) + d = {U[u]: V[v] for u, v in zip(*left_matches)} + # d will contain the matching from edges in left to right; we need to + # add the ones from right to left as well. + d.update({v: u for u, v in d.items()}) + return d diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/matrix.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/matrix.py new file mode 100644 index 0000000..c9143da --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/matrix.py @@ -0,0 +1,168 @@ +""" +==================== +Biadjacency matrices +==================== +""" + +import itertools + +import networkx as nx +from networkx.convert_matrix import _generate_weighted_edges + +__all__ = ["biadjacency_matrix", "from_biadjacency_matrix"] + + +@nx._dispatchable(edge_attrs="weight") +def biadjacency_matrix( + G, row_order, column_order=None, dtype=None, weight="weight", format="csr" +): + r"""Returns the biadjacency matrix of the bipartite graph G. + + Let `G = (U, V, E)` be a bipartite graph with node sets + `U = u_{1},...,u_{r}` and `V = v_{1},...,v_{s}`. The biadjacency + matrix [1]_ is the `r` x `s` matrix `B` in which `b_{i,j} = 1` + if, and only if, `(u_i, v_j) \in E`. If the parameter `weight` is + not `None` and matches the name of an edge attribute, its value is + used instead of 1. + + Parameters + ---------- + G : graph + A NetworkX graph + + row_order : list of nodes + The rows of the matrix are ordered according to the list of nodes. + + column_order : list, optional + The columns of the matrix are ordered according to the list of nodes. + If column_order is None, then the ordering of columns is arbitrary. + + dtype : NumPy data-type, optional + A valid NumPy dtype used to initialize the array. If None, then the + NumPy default is used. + + weight : string or None, optional (default='weight') + The edge data key used to provide each value in the matrix. + If None, then each edge has weight 1. + + format : str in {'dense', 'bsr', 'csr', 'csc', 'coo', 'lil', 'dia', 'dok'} + The type of the matrix to be returned (default 'csr'). For + some algorithms different implementations of sparse matrices + can perform better. See [2]_ for details. + + Returns + ------- + M : SciPy sparse array + Biadjacency matrix representation of the bipartite graph G. + + Notes + ----- + No attempt is made to check that the input graph is bipartite. + + For directed bipartite graphs only successors are considered as neighbors. + To obtain an adjacency matrix with ones (or weight values) for both + predecessors and successors you have to generate two biadjacency matrices + where the rows of one of them are the columns of the other, and then add + one to the transpose of the other. + + See Also + -------- + adjacency_matrix + from_biadjacency_matrix + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Adjacency_matrix#Adjacency_matrix_of_a_bipartite_graph + .. [2] Scipy Dev. References, "Sparse Matrices", + https://docs.scipy.org/doc/scipy/reference/sparse.html + """ + import scipy as sp + + nlen = len(row_order) + if nlen == 0: + raise nx.NetworkXError("row_order is empty list") + if len(row_order) != len(set(row_order)): + msg = "Ambiguous ordering: `row_order` contained duplicates." + raise nx.NetworkXError(msg) + if column_order is None: + column_order = list(set(G) - set(row_order)) + mlen = len(column_order) + if len(column_order) != len(set(column_order)): + msg = "Ambiguous ordering: `column_order` contained duplicates." + raise nx.NetworkXError(msg) + + row_index = dict(zip(row_order, itertools.count())) + col_index = dict(zip(column_order, itertools.count())) + + if G.number_of_edges() == 0: + row, col, data = [], [], [] + else: + row, col, data = zip( + *( + (row_index[u], col_index[v], d.get(weight, 1)) + for u, v, d in G.edges(row_order, data=True) + if u in row_index and v in col_index + ) + ) + A = sp.sparse.coo_array((data, (row, col)), shape=(nlen, mlen), dtype=dtype) + try: + return A.asformat(format) + except ValueError as err: + raise nx.NetworkXError(f"Unknown sparse array format: {format}") from err + + +@nx._dispatchable(graphs=None, returns_graph=True) +def from_biadjacency_matrix(A, create_using=None, edge_attribute="weight"): + r"""Creates a new bipartite graph from a biadjacency matrix given as a + SciPy sparse array. + + Parameters + ---------- + A: scipy sparse array + A biadjacency matrix representation of a graph + + create_using: NetworkX graph + Use specified graph for result. The default is Graph() + + edge_attribute: string + Name of edge attribute to store matrix numeric value. The data will + have the same type as the matrix entry (int, float, (real,imag)). + + Notes + ----- + The nodes are labeled with the attribute `bipartite` set to an integer + 0 or 1 representing membership in part 0 or part 1 of the bipartite graph. + + If `create_using` is an instance of :class:`networkx.MultiGraph` or + :class:`networkx.MultiDiGraph` and the entries of `A` are of + type :class:`int`, then this function returns a multigraph (of the same + type as `create_using`) with parallel edges. In this case, `edge_attribute` + will be ignored. + + See Also + -------- + biadjacency_matrix + from_numpy_array + + References + ---------- + [1] https://en.wikipedia.org/wiki/Adjacency_matrix#Adjacency_matrix_of_a_bipartite_graph + """ + G = nx.empty_graph(0, create_using) + n, m = A.shape + # Make sure we get even the isolated nodes of the graph. + G.add_nodes_from(range(n), bipartite=0) + G.add_nodes_from(range(n, n + m), bipartite=1) + # Create an iterable over (u, v, w) triples and for each triple, add an + # edge from u to v with weight w. + triples = ((u, n + v, d) for (u, v, d) in _generate_weighted_edges(A)) + # If the entries in the adjacency matrix are integers and the graph is a + # multigraph, then create parallel edges, each with weight 1, for each + # entry in the adjacency matrix. Otherwise, create one edge for each + # positive entry in the adjacency matrix and set the weight of that edge to + # be the entry in the matrix. + if A.dtype.kind in ("i", "u") and G.is_multigraph(): + chain = itertools.chain.from_iterable + triples = chain(((u, v, 1) for d in range(w)) for (u, v, w) in triples) + G.add_weighted_edges_from(triples, weight=edge_attribute) + return G diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/projection.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/projection.py new file mode 100644 index 0000000..7c2a26c --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/projection.py @@ -0,0 +1,526 @@ +"""One-mode (unipartite) projections of bipartite graphs.""" + +import networkx as nx +from networkx.exception import NetworkXAlgorithmError +from networkx.utils import not_implemented_for + +__all__ = [ + "projected_graph", + "weighted_projected_graph", + "collaboration_weighted_projected_graph", + "overlap_weighted_projected_graph", + "generic_weighted_projected_graph", +] + + +@nx._dispatchable( + graphs="B", preserve_node_attrs=True, preserve_graph_attrs=True, returns_graph=True +) +def projected_graph(B, nodes, multigraph=False): + r"""Returns the projection of B onto one of its node sets. + + Returns the graph G that is the projection of the bipartite graph B + onto the specified nodes. They retain their attributes and are connected + in G if they have a common neighbor in B. + + Parameters + ---------- + B : NetworkX graph + The input graph should be bipartite. + + nodes : list or iterable + Nodes to project onto (the "bottom" nodes). + + multigraph: bool (default=False) + If True return a multigraph where the multiple edges represent multiple + shared neighbors. They edge key in the multigraph is assigned to the + label of the neighbor. + + Returns + ------- + Graph : NetworkX graph or multigraph + A graph that is the projection onto the given nodes. + + Examples + -------- + >>> from networkx.algorithms import bipartite + >>> B = nx.path_graph(4) + >>> G = bipartite.projected_graph(B, [1, 3]) + >>> list(G) + [1, 3] + >>> list(G.edges()) + [(1, 3)] + + If nodes `a`, and `b` are connected through both nodes 1 and 2 then + building a multigraph results in two edges in the projection onto + [`a`, `b`]: + + >>> B = nx.Graph() + >>> B.add_edges_from([("a", 1), ("b", 1), ("a", 2), ("b", 2)]) + >>> G = bipartite.projected_graph(B, ["a", "b"], multigraph=True) + >>> print([sorted((u, v)) for u, v in G.edges()]) + [['a', 'b'], ['a', 'b']] + + Notes + ----- + No attempt is made to verify that the input graph B is bipartite. + Returns a simple graph that is the projection of the bipartite graph B + onto the set of nodes given in list nodes. If multigraph=True then + a multigraph is returned with an edge for every shared neighbor. + + Directed graphs are allowed as input. The output will also then + be a directed graph with edges if there is a directed path between + the nodes. + + The graph and node properties are (shallow) copied to the projected graph. + + See :mod:`bipartite documentation ` + for further details on how bipartite graphs are handled in NetworkX. + + See Also + -------- + is_bipartite, + is_bipartite_node_set, + sets, + weighted_projected_graph, + collaboration_weighted_projected_graph, + overlap_weighted_projected_graph, + generic_weighted_projected_graph + """ + if B.is_multigraph(): + raise nx.NetworkXError("not defined for multigraphs") + if B.is_directed(): + directed = True + if multigraph: + G = nx.MultiDiGraph() + else: + G = nx.DiGraph() + else: + directed = False + if multigraph: + G = nx.MultiGraph() + else: + G = nx.Graph() + G.graph.update(B.graph) + G.add_nodes_from((n, B.nodes[n]) for n in nodes) + for u in nodes: + nbrs2 = {v for nbr in B[u] for v in B[nbr] if v != u} + if multigraph: + for n in nbrs2: + if directed: + links = set(B[u]) & set(B.pred[n]) + else: + links = set(B[u]) & set(B[n]) + for l in links: + if not G.has_edge(u, n, l): + G.add_edge(u, n, key=l) + else: + G.add_edges_from((u, n) for n in nbrs2) + return G + + +@not_implemented_for("multigraph") +@nx._dispatchable(graphs="B", returns_graph=True) +def weighted_projected_graph(B, nodes, ratio=False): + r"""Returns a weighted projection of B onto one of its node sets. + + The weighted projected graph is the projection of the bipartite + network B onto the specified nodes with weights representing the + number of shared neighbors or the ratio between actual shared + neighbors and possible shared neighbors if ``ratio is True`` [1]_. + The nodes retain their attributes and are connected in the resulting + graph if they have an edge to a common node in the original graph. + + Parameters + ---------- + B : NetworkX graph + The input graph should be bipartite. + + nodes : list or iterable + Distinct nodes to project onto (the "bottom" nodes). + + ratio: Bool (default=False) + If True, edge weight is the ratio between actual shared neighbors + and maximum possible shared neighbors (i.e., the size of the other + node set). If False, edges weight is the number of shared neighbors. + + Returns + ------- + Graph : NetworkX graph + A graph that is the projection onto the given nodes. + + Examples + -------- + >>> from networkx.algorithms import bipartite + >>> B = nx.path_graph(4) + >>> G = bipartite.weighted_projected_graph(B, [1, 3]) + >>> list(G) + [1, 3] + >>> list(G.edges(data=True)) + [(1, 3, {'weight': 1})] + >>> G = bipartite.weighted_projected_graph(B, [1, 3], ratio=True) + >>> list(G.edges(data=True)) + [(1, 3, {'weight': 0.5})] + + Notes + ----- + No attempt is made to verify that the input graph B is bipartite, or that + the input nodes are distinct. However, if the length of the input nodes is + greater than or equal to the nodes in the graph B, an exception is raised. + If the nodes are not distinct but don't raise this error, the output weights + will be incorrect. + The graph and node properties are (shallow) copied to the projected graph. + + See :mod:`bipartite documentation ` + for further details on how bipartite graphs are handled in NetworkX. + + See Also + -------- + is_bipartite, + is_bipartite_node_set, + sets, + collaboration_weighted_projected_graph, + overlap_weighted_projected_graph, + generic_weighted_projected_graph + projected_graph + + References + ---------- + .. [1] Borgatti, S.P. and Halgin, D. In press. "Analyzing Affiliation + Networks". In Carrington, P. and Scott, J. (eds) The Sage Handbook + of Social Network Analysis. Sage Publications. + """ + if B.is_directed(): + pred = B.pred + G = nx.DiGraph() + else: + pred = B.adj + G = nx.Graph() + G.graph.update(B.graph) + G.add_nodes_from((n, B.nodes[n]) for n in nodes) + n_top = len(B) - len(nodes) + + if n_top < 1: + raise NetworkXAlgorithmError( + f"the size of the nodes to project onto ({len(nodes)}) is >= the graph size ({len(B)}).\n" + "They are either not a valid bipartite partition or contain duplicates" + ) + + for u in nodes: + unbrs = set(B[u]) + nbrs2 = {n for nbr in unbrs for n in B[nbr]} - {u} + for v in nbrs2: + vnbrs = set(pred[v]) + common = unbrs & vnbrs + if not ratio: + weight = len(common) + else: + weight = len(common) / n_top + G.add_edge(u, v, weight=weight) + return G + + +@not_implemented_for("multigraph") +@nx._dispatchable(graphs="B", returns_graph=True) +def collaboration_weighted_projected_graph(B, nodes): + r"""Newman's weighted projection of B onto one of its node sets. + + The collaboration weighted projection is the projection of the + bipartite network B onto the specified nodes with weights assigned + using Newman's collaboration model [1]_: + + .. math:: + + w_{u, v} = \sum_k \frac{\delta_{u}^{k} \delta_{v}^{k}}{d_k - 1} + + where `u` and `v` are nodes from the bottom bipartite node set, + and `k` is a node of the top node set. + The value `d_k` is the degree of node `k` in the bipartite + network and `\delta_{u}^{k}` is 1 if node `u` is + linked to node `k` in the original bipartite graph or 0 otherwise. + + The nodes retain their attributes and are connected in the resulting + graph if have an edge to a common node in the original bipartite + graph. + + Parameters + ---------- + B : NetworkX graph + The input graph should be bipartite. + + nodes : list or iterable + Nodes to project onto (the "bottom" nodes). + + Returns + ------- + Graph : NetworkX graph + A graph that is the projection onto the given nodes. + + Examples + -------- + >>> from networkx.algorithms import bipartite + >>> B = nx.path_graph(5) + >>> B.add_edge(1, 5) + >>> G = bipartite.collaboration_weighted_projected_graph(B, [0, 2, 4, 5]) + >>> list(G) + [0, 2, 4, 5] + >>> for edge in sorted(G.edges(data=True)): + ... print(edge) + (0, 2, {'weight': 0.5}) + (0, 5, {'weight': 0.5}) + (2, 4, {'weight': 1.0}) + (2, 5, {'weight': 0.5}) + + Notes + ----- + No attempt is made to verify that the input graph B is bipartite. + The graph and node properties are (shallow) copied to the projected graph. + + See :mod:`bipartite documentation ` + for further details on how bipartite graphs are handled in NetworkX. + + See Also + -------- + is_bipartite, + is_bipartite_node_set, + sets, + weighted_projected_graph, + overlap_weighted_projected_graph, + generic_weighted_projected_graph, + projected_graph + + References + ---------- + .. [1] Scientific collaboration networks: II. + Shortest paths, weighted networks, and centrality, + M. E. J. Newman, Phys. Rev. E 64, 016132 (2001). + """ + if B.is_directed(): + pred = B.pred + G = nx.DiGraph() + else: + pred = B.adj + G = nx.Graph() + G.graph.update(B.graph) + G.add_nodes_from((n, B.nodes[n]) for n in nodes) + for u in nodes: + unbrs = set(B[u]) + nbrs2 = {n for nbr in unbrs for n in B[nbr] if n != u} + for v in nbrs2: + vnbrs = set(pred[v]) + common_degree = (len(B[n]) for n in unbrs & vnbrs) + weight = sum(1.0 / (deg - 1) for deg in common_degree if deg > 1) + G.add_edge(u, v, weight=weight) + return G + + +@not_implemented_for("multigraph") +@nx._dispatchable(graphs="B", returns_graph=True) +def overlap_weighted_projected_graph(B, nodes, jaccard=True): + r"""Overlap weighted projection of B onto one of its node sets. + + The overlap weighted projection is the projection of the bipartite + network B onto the specified nodes with weights representing + the Jaccard index between the neighborhoods of the two nodes in the + original bipartite network [1]_: + + .. math:: + + w_{v, u} = \frac{|N(u) \cap N(v)|}{|N(u) \cup N(v)|} + + or if the parameter 'jaccard' is False, the fraction of common + neighbors by minimum of both nodes degree in the original + bipartite graph [1]_: + + .. math:: + + w_{v, u} = \frac{|N(u) \cap N(v)|}{min(|N(u)|, |N(v)|)} + + The nodes retain their attributes and are connected in the resulting + graph if have an edge to a common node in the original bipartite graph. + + Parameters + ---------- + B : NetworkX graph + The input graph should be bipartite. + + nodes : list or iterable + Nodes to project onto (the "bottom" nodes). + + jaccard: Bool (default=True) + + Returns + ------- + Graph : NetworkX graph + A graph that is the projection onto the given nodes. + + Examples + -------- + >>> from networkx.algorithms import bipartite + >>> B = nx.path_graph(5) + >>> nodes = [0, 2, 4] + >>> G = bipartite.overlap_weighted_projected_graph(B, nodes) + >>> list(G) + [0, 2, 4] + >>> list(G.edges(data=True)) + [(0, 2, {'weight': 0.5}), (2, 4, {'weight': 0.5})] + >>> G = bipartite.overlap_weighted_projected_graph(B, nodes, jaccard=False) + >>> list(G.edges(data=True)) + [(0, 2, {'weight': 1.0}), (2, 4, {'weight': 1.0})] + + Notes + ----- + No attempt is made to verify that the input graph B is bipartite. + The graph and node properties are (shallow) copied to the projected graph. + + See :mod:`bipartite documentation ` + for further details on how bipartite graphs are handled in NetworkX. + + See Also + -------- + is_bipartite, + is_bipartite_node_set, + sets, + weighted_projected_graph, + collaboration_weighted_projected_graph, + generic_weighted_projected_graph, + projected_graph + + References + ---------- + .. [1] Borgatti, S.P. and Halgin, D. In press. Analyzing Affiliation + Networks. In Carrington, P. and Scott, J. (eds) The Sage Handbook + of Social Network Analysis. Sage Publications. + + """ + if B.is_directed(): + pred = B.pred + G = nx.DiGraph() + else: + pred = B.adj + G = nx.Graph() + G.graph.update(B.graph) + G.add_nodes_from((n, B.nodes[n]) for n in nodes) + for u in nodes: + unbrs = set(B[u]) + nbrs2 = {n for nbr in unbrs for n in B[nbr]} - {u} + for v in nbrs2: + vnbrs = set(pred[v]) + if jaccard: + wt = len(unbrs & vnbrs) / len(unbrs | vnbrs) + else: + wt = len(unbrs & vnbrs) / min(len(unbrs), len(vnbrs)) + G.add_edge(u, v, weight=wt) + return G + + +@not_implemented_for("multigraph") +@nx._dispatchable(graphs="B", preserve_all_attrs=True, returns_graph=True) +def generic_weighted_projected_graph(B, nodes, weight_function=None): + r"""Weighted projection of B with a user-specified weight function. + + The bipartite network B is projected on to the specified nodes + with weights computed by a user-specified function. This function + must accept as a parameter the neighborhood sets of two nodes and + return an integer or a float. + + The nodes retain their attributes and are connected in the resulting graph + if they have an edge to a common node in the original graph. + + Parameters + ---------- + B : NetworkX graph + The input graph should be bipartite. + + nodes : list or iterable + Nodes to project onto (the "bottom" nodes). + + weight_function : function + This function must accept as parameters the same input graph + that this function, and two nodes; and return an integer or a float. + The default function computes the number of shared neighbors. + + Returns + ------- + Graph : NetworkX graph + A graph that is the projection onto the given nodes. + + Examples + -------- + >>> from networkx.algorithms import bipartite + >>> # Define some custom weight functions + >>> def jaccard(G, u, v): + ... unbrs = set(G[u]) + ... vnbrs = set(G[v]) + ... return float(len(unbrs & vnbrs)) / len(unbrs | vnbrs) + >>> def my_weight(G, u, v, weight="weight"): + ... w = 0 + ... for nbr in set(G[u]) & set(G[v]): + ... w += G[u][nbr].get(weight, 1) + G[v][nbr].get(weight, 1) + ... return w + >>> # A complete bipartite graph with 4 nodes and 4 edges + >>> B = nx.complete_bipartite_graph(2, 2) + >>> # Add some arbitrary weight to the edges + >>> for i, (u, v) in enumerate(B.edges()): + ... B.edges[u, v]["weight"] = i + 1 + >>> for edge in B.edges(data=True): + ... print(edge) + (0, 2, {'weight': 1}) + (0, 3, {'weight': 2}) + (1, 2, {'weight': 3}) + (1, 3, {'weight': 4}) + >>> # By default, the weight is the number of shared neighbors + >>> G = bipartite.generic_weighted_projected_graph(B, [0, 1]) + >>> print(list(G.edges(data=True))) + [(0, 1, {'weight': 2})] + >>> # To specify a custom weight function use the weight_function parameter + >>> G = bipartite.generic_weighted_projected_graph( + ... B, [0, 1], weight_function=jaccard + ... ) + >>> print(list(G.edges(data=True))) + [(0, 1, {'weight': 1.0})] + >>> G = bipartite.generic_weighted_projected_graph( + ... B, [0, 1], weight_function=my_weight + ... ) + >>> print(list(G.edges(data=True))) + [(0, 1, {'weight': 10})] + + Notes + ----- + No attempt is made to verify that the input graph B is bipartite. + The graph and node properties are (shallow) copied to the projected graph. + + See :mod:`bipartite documentation ` + for further details on how bipartite graphs are handled in NetworkX. + + See Also + -------- + is_bipartite, + is_bipartite_node_set, + sets, + weighted_projected_graph, + collaboration_weighted_projected_graph, + overlap_weighted_projected_graph, + projected_graph + + """ + if B.is_directed(): + pred = B.pred + G = nx.DiGraph() + else: + pred = B.adj + G = nx.Graph() + if weight_function is None: + + def weight_function(G, u, v): + # Notice that we use set(pred[v]) for handling the directed case. + return len(set(G[u]) & set(pred[v])) + + G.graph.update(B.graph) + G.add_nodes_from((n, B.nodes[n]) for n in nodes) + for u in nodes: + nbrs2 = {n for nbr in set(B[u]) for n in B[nbr]} - {u} + for v in nbrs2: + weight = weight_function(B, u, v) + G.add_edge(u, v, weight=weight) + return G diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/redundancy.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/redundancy.py new file mode 100644 index 0000000..b622b97 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/redundancy.py @@ -0,0 +1,112 @@ +"""Node redundancy for bipartite graphs.""" + +from itertools import combinations + +import networkx as nx +from networkx import NetworkXError + +__all__ = ["node_redundancy"] + + +@nx._dispatchable +def node_redundancy(G, nodes=None): + r"""Computes the node redundancy coefficients for the nodes in the bipartite + graph `G`. + + The redundancy coefficient of a node `v` is the fraction of pairs of + neighbors of `v` that are both linked to other nodes. In a one-mode + projection these nodes would be linked together even if `v` were + not there. + + More formally, for any vertex `v`, the *redundancy coefficient of `v`* is + defined by + + .. math:: + + rc(v) = \frac{|\{\{u, w\} \subseteq N(v), + \: \exists v' \neq v,\: (v',u) \in E\: + \mathrm{and}\: (v',w) \in E\}|}{ \frac{|N(v)|(|N(v)|-1)}{2}}, + + where `N(v)` is the set of neighbors of `v` in `G`. + + Parameters + ---------- + G : graph + A bipartite graph + + nodes : list or iterable (optional) + Compute redundancy for these nodes. The default is all nodes in G. + + Returns + ------- + redundancy : dictionary + A dictionary keyed by node with the node redundancy value. + + Examples + -------- + Compute the redundancy coefficient of each node in a graph:: + + >>> from networkx.algorithms import bipartite + >>> G = nx.cycle_graph(4) + >>> rc = bipartite.node_redundancy(G) + >>> rc[0] + 1.0 + + Compute the average redundancy for the graph:: + + >>> from networkx.algorithms import bipartite + >>> G = nx.cycle_graph(4) + >>> rc = bipartite.node_redundancy(G) + >>> sum(rc.values()) / len(G) + 1.0 + + Compute the average redundancy for a set of nodes:: + + >>> from networkx.algorithms import bipartite + >>> G = nx.cycle_graph(4) + >>> rc = bipartite.node_redundancy(G) + >>> nodes = [0, 2] + >>> sum(rc[n] for n in nodes) / len(nodes) + 1.0 + + Raises + ------ + NetworkXError + If any of the nodes in the graph (or in `nodes`, if specified) has + (out-)degree less than two (which would result in division by zero, + according to the definition of the redundancy coefficient). + + References + ---------- + .. [1] Latapy, Matthieu, Clémence Magnien, and Nathalie Del Vecchio (2008). + Basic notions for the analysis of large two-mode networks. + Social Networks 30(1), 31--48. + + """ + if nodes is None: + nodes = G + if any(len(G[v]) < 2 for v in nodes): + raise NetworkXError( + "Cannot compute redundancy coefficient for a node" + " that has fewer than two neighbors." + ) + # TODO This can be trivially parallelized. + return {v: _node_redundancy(G, v) for v in nodes} + + +def _node_redundancy(G, v): + """Returns the redundancy of the node `v` in the bipartite graph `G`. + + If `G` is a graph with `n` nodes, the redundancy of a node is the ratio + of the "overlap" of `v` to the maximum possible overlap of `v` + according to its degree. The overlap of `v` is the number of pairs of + neighbors that have mutual neighbors themselves, other than `v`. + + `v` must have at least two neighbors in `G`. + + """ + n = len(G[v]) + overlap = sum( + 1 for (u, w) in combinations(G[v], 2) if (set(G[u]) & set(G[w])) - {v} + ) + return (2 * overlap) / (n * (n - 1)) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/spectral.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/spectral.py new file mode 100644 index 0000000..cb9388f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/spectral.py @@ -0,0 +1,69 @@ +""" +Spectral bipartivity measure. +""" + +import networkx as nx + +__all__ = ["spectral_bipartivity"] + + +@nx._dispatchable(edge_attrs="weight") +def spectral_bipartivity(G, nodes=None, weight="weight"): + """Returns the spectral bipartivity. + + Parameters + ---------- + G : NetworkX graph + + nodes : list or container optional(default is all nodes) + Nodes to return value of spectral bipartivity contribution. + + weight : string or None optional (default = 'weight') + Edge data key to use for edge weights. If None, weights set to 1. + + Returns + ------- + sb : float or dict + A single number if the keyword nodes is not specified, or + a dictionary keyed by node with the spectral bipartivity contribution + of that node as the value. + + Examples + -------- + >>> from networkx.algorithms import bipartite + >>> G = nx.path_graph(4) + >>> bipartite.spectral_bipartivity(G) + 1.0 + + Notes + ----- + This implementation uses Numpy (dense) matrices which are not efficient + for storing large sparse graphs. + + See Also + -------- + color + + References + ---------- + .. [1] E. Estrada and J. A. Rodríguez-Velázquez, "Spectral measures of + bipartivity in complex networks", PhysRev E 72, 046105 (2005) + """ + import scipy as sp + + nodelist = list(G) # ordering of nodes in matrix + A = nx.to_numpy_array(G, nodelist, weight=weight) + expA = sp.linalg.expm(A) + expmA = sp.linalg.expm(-A) + coshA = 0.5 * (expA + expmA) + if nodes is None: + # return single number for entire graph + return float(coshA.diagonal().sum() / expA.diagonal().sum()) + else: + # contribution for individual nodes + index = dict(zip(nodelist, range(len(nodelist)))) + sb = {} + for n in nodes: + i = index[n] + sb[n] = coshA.item(i, i) / expA.item(i, i) + return sb diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/__init__.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..1eada55 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_basic.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_basic.cpython-312.pyc new file mode 100644 index 0000000..8f66226 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_basic.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_centrality.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_centrality.cpython-312.pyc new file mode 100644 index 0000000..9ed5225 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_centrality.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_cluster.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_cluster.cpython-312.pyc new file mode 100644 index 0000000..5edfdfc Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_cluster.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_covering.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_covering.cpython-312.pyc new file mode 100644 index 0000000..c786d98 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_covering.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_edgelist.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_edgelist.cpython-312.pyc new file mode 100644 index 0000000..128226d Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_edgelist.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_extendability.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_extendability.cpython-312.pyc new file mode 100644 index 0000000..fc99923 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_extendability.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_generators.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_generators.cpython-312.pyc new file mode 100644 index 0000000..5bf4809 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_generators.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_link_analysis.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_link_analysis.cpython-312.pyc new file mode 100644 index 0000000..4bf9472 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_link_analysis.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_matching.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_matching.cpython-312.pyc new file mode 100644 index 0000000..32497d5 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_matching.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_matrix.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_matrix.cpython-312.pyc new file mode 100644 index 0000000..21c350a Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_matrix.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_project.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_project.cpython-312.pyc new file mode 100644 index 0000000..1f027e9 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_project.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_redundancy.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_redundancy.cpython-312.pyc new file mode 100644 index 0000000..458f917 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_redundancy.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_spectral_bipartivity.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_spectral_bipartivity.cpython-312.pyc new file mode 100644 index 0000000..3b303fd Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_spectral_bipartivity.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/test_basic.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/test_basic.py new file mode 100644 index 0000000..655506b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/test_basic.py @@ -0,0 +1,125 @@ +import pytest + +import networkx as nx +from networkx.algorithms import bipartite + + +class TestBipartiteBasic: + def test_is_bipartite(self): + assert bipartite.is_bipartite(nx.path_graph(4)) + assert bipartite.is_bipartite(nx.DiGraph([(1, 0)])) + assert not bipartite.is_bipartite(nx.complete_graph(3)) + + def test_bipartite_color(self): + G = nx.path_graph(4) + c = bipartite.color(G) + assert c == {0: 1, 1: 0, 2: 1, 3: 0} + + def test_not_bipartite_color(self): + with pytest.raises(nx.NetworkXError): + c = bipartite.color(nx.complete_graph(4)) + + def test_bipartite_directed(self): + G = bipartite.random_graph(10, 10, 0.1, directed=True) + assert bipartite.is_bipartite(G) + + def test_bipartite_sets(self): + G = nx.path_graph(4) + X, Y = bipartite.sets(G) + assert X == {0, 2} + assert Y == {1, 3} + + def test_bipartite_sets_directed(self): + G = nx.path_graph(4) + D = G.to_directed() + X, Y = bipartite.sets(D) + assert X == {0, 2} + assert Y == {1, 3} + + def test_bipartite_sets_given_top_nodes(self): + G = nx.path_graph(4) + top_nodes = [0, 2] + X, Y = bipartite.sets(G, top_nodes) + assert X == {0, 2} + assert Y == {1, 3} + + def test_bipartite_sets_disconnected(self): + with pytest.raises(nx.AmbiguousSolution): + G = nx.path_graph(4) + G.add_edges_from([(5, 6), (6, 7)]) + X, Y = bipartite.sets(G) + + def test_is_bipartite_node_set(self): + G = nx.path_graph(4) + + with pytest.raises(nx.AmbiguousSolution): + bipartite.is_bipartite_node_set(G, [1, 1, 2, 3]) + + assert bipartite.is_bipartite_node_set(G, [0, 2]) + assert bipartite.is_bipartite_node_set(G, [1, 3]) + assert not bipartite.is_bipartite_node_set(G, [1, 2]) + G.add_edge(10, 20) + assert bipartite.is_bipartite_node_set(G, [0, 2, 10]) + assert bipartite.is_bipartite_node_set(G, [0, 2, 20]) + assert bipartite.is_bipartite_node_set(G, [1, 3, 10]) + assert bipartite.is_bipartite_node_set(G, [1, 3, 20]) + + def test_bipartite_density(self): + G = nx.path_graph(5) + X, Y = bipartite.sets(G) + density = len(list(G.edges())) / (len(X) * len(Y)) + assert bipartite.density(G, X) == density + D = nx.DiGraph(G.edges()) + assert bipartite.density(D, X) == density / 2.0 + assert bipartite.density(nx.Graph(), {}) == 0.0 + + def test_bipartite_degrees(self): + G = nx.path_graph(5) + X = {1, 3} + Y = {0, 2, 4} + u, d = bipartite.degrees(G, Y) + assert dict(u) == {1: 2, 3: 2} + assert dict(d) == {0: 1, 2: 2, 4: 1} + + def test_bipartite_weighted_degrees(self): + G = nx.path_graph(5) + G.add_edge(0, 1, weight=0.1, other=0.2) + X = {1, 3} + Y = {0, 2, 4} + u, d = bipartite.degrees(G, Y, weight="weight") + assert dict(u) == {1: 1.1, 3: 2} + assert dict(d) == {0: 0.1, 2: 2, 4: 1} + u, d = bipartite.degrees(G, Y, weight="other") + assert dict(u) == {1: 1.2, 3: 2} + assert dict(d) == {0: 0.2, 2: 2, 4: 1} + + def test_biadjacency_matrix_weight(self): + pytest.importorskip("scipy") + G = nx.path_graph(5) + G.add_edge(0, 1, weight=2, other=4) + X = [1, 3] + Y = [0, 2, 4] + M = bipartite.biadjacency_matrix(G, X, weight="weight") + assert M[0, 0] == 2 + M = bipartite.biadjacency_matrix(G, X, weight="other") + assert M[0, 0] == 4 + + def test_biadjacency_matrix(self): + pytest.importorskip("scipy") + tops = [2, 5, 10] + bots = [5, 10, 15] + for i in range(len(tops)): + G = bipartite.random_graph(tops[i], bots[i], 0.2) + top = [n for n, d in G.nodes(data=True) if d["bipartite"] == 0] + M = bipartite.biadjacency_matrix(G, top) + assert M.shape[0] == tops[i] + assert M.shape[1] == bots[i] + + def test_biadjacency_matrix_order(self): + pytest.importorskip("scipy") + G = nx.path_graph(5) + G.add_edge(0, 1, weight=2) + X = [3, 1] + Y = [4, 2, 0] + M = bipartite.biadjacency_matrix(G, X, Y, weight="weight") + assert M[1, 2] == 2 diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/test_centrality.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/test_centrality.py new file mode 100644 index 0000000..19fb5d1 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/test_centrality.py @@ -0,0 +1,192 @@ +import pytest + +import networkx as nx +from networkx.algorithms import bipartite + + +class TestBipartiteCentrality: + @classmethod + def setup_class(cls): + cls.P4 = nx.path_graph(4) + cls.K3 = nx.complete_bipartite_graph(3, 3) + cls.C4 = nx.cycle_graph(4) + cls.davis = nx.davis_southern_women_graph() + cls.top_nodes = [ + n for n, d in cls.davis.nodes(data=True) if d["bipartite"] == 0 + ] + + def test_degree_centrality(self): + d = bipartite.degree_centrality(self.P4, [1, 3]) + answer = {0: 0.5, 1: 1.0, 2: 1.0, 3: 0.5} + assert d == answer + d = bipartite.degree_centrality(self.K3, [0, 1, 2]) + answer = {0: 1.0, 1: 1.0, 2: 1.0, 3: 1.0, 4: 1.0, 5: 1.0} + assert d == answer + d = bipartite.degree_centrality(self.C4, [0, 2]) + answer = {0: 1.0, 1: 1.0, 2: 1.0, 3: 1.0} + assert d == answer + + def test_betweenness_centrality(self): + c = bipartite.betweenness_centrality(self.P4, [1, 3]) + answer = {0: 0.0, 1: 1.0, 2: 1.0, 3: 0.0} + assert c == answer + c = bipartite.betweenness_centrality(self.K3, [0, 1, 2]) + answer = {0: 0.125, 1: 0.125, 2: 0.125, 3: 0.125, 4: 0.125, 5: 0.125} + assert c == answer + c = bipartite.betweenness_centrality(self.C4, [0, 2]) + answer = {0: 0.25, 1: 0.25, 2: 0.25, 3: 0.25} + assert c == answer + + def test_closeness_centrality(self): + c = bipartite.closeness_centrality(self.P4, [1, 3]) + answer = {0: 2.0 / 3, 1: 1.0, 2: 1.0, 3: 2.0 / 3} + assert c == answer + c = bipartite.closeness_centrality(self.K3, [0, 1, 2]) + answer = {0: 1.0, 1: 1.0, 2: 1.0, 3: 1.0, 4: 1.0, 5: 1.0} + assert c == answer + c = bipartite.closeness_centrality(self.C4, [0, 2]) + answer = {0: 1.0, 1: 1.0, 2: 1.0, 3: 1.0} + assert c == answer + G = nx.Graph() + G.add_node(0) + G.add_node(1) + c = bipartite.closeness_centrality(G, [0]) + assert c == {0: 0.0, 1: 0.0} + c = bipartite.closeness_centrality(G, [1]) + assert c == {0: 0.0, 1: 0.0} + + def test_bipartite_closeness_centrality_unconnected(self): + G = nx.complete_bipartite_graph(3, 3) + G.add_edge(6, 7) + c = bipartite.closeness_centrality(G, [0, 2, 4, 6], normalized=False) + answer = { + 0: 10.0 / 7, + 2: 10.0 / 7, + 4: 10.0 / 7, + 6: 10.0, + 1: 10.0 / 7, + 3: 10.0 / 7, + 5: 10.0 / 7, + 7: 10.0, + } + assert c == answer + + def test_davis_degree_centrality(self): + G = self.davis + deg = bipartite.degree_centrality(G, self.top_nodes) + answer = { + "E8": 0.78, + "E9": 0.67, + "E7": 0.56, + "Nora Fayette": 0.57, + "Evelyn Jefferson": 0.57, + "Theresa Anderson": 0.57, + "E6": 0.44, + "Sylvia Avondale": 0.50, + "Laura Mandeville": 0.50, + "Brenda Rogers": 0.50, + "Katherina Rogers": 0.43, + "E5": 0.44, + "Helen Lloyd": 0.36, + "E3": 0.33, + "Ruth DeSand": 0.29, + "Verne Sanderson": 0.29, + "E12": 0.33, + "Myra Liddel": 0.29, + "E11": 0.22, + "Eleanor Nye": 0.29, + "Frances Anderson": 0.29, + "Pearl Oglethorpe": 0.21, + "E4": 0.22, + "Charlotte McDowd": 0.29, + "E10": 0.28, + "Olivia Carleton": 0.14, + "Flora Price": 0.14, + "E2": 0.17, + "E1": 0.17, + "Dorothy Murchison": 0.14, + "E13": 0.17, + "E14": 0.17, + } + for node, value in answer.items(): + assert value == pytest.approx(deg[node], abs=1e-2) + + def test_davis_betweenness_centrality(self): + G = self.davis + bet = bipartite.betweenness_centrality(G, self.top_nodes) + answer = { + "E8": 0.24, + "E9": 0.23, + "E7": 0.13, + "Nora Fayette": 0.11, + "Evelyn Jefferson": 0.10, + "Theresa Anderson": 0.09, + "E6": 0.07, + "Sylvia Avondale": 0.07, + "Laura Mandeville": 0.05, + "Brenda Rogers": 0.05, + "Katherina Rogers": 0.05, + "E5": 0.04, + "Helen Lloyd": 0.04, + "E3": 0.02, + "Ruth DeSand": 0.02, + "Verne Sanderson": 0.02, + "E12": 0.02, + "Myra Liddel": 0.02, + "E11": 0.02, + "Eleanor Nye": 0.01, + "Frances Anderson": 0.01, + "Pearl Oglethorpe": 0.01, + "E4": 0.01, + "Charlotte McDowd": 0.01, + "E10": 0.01, + "Olivia Carleton": 0.01, + "Flora Price": 0.01, + "E2": 0.00, + "E1": 0.00, + "Dorothy Murchison": 0.00, + "E13": 0.00, + "E14": 0.00, + } + for node, value in answer.items(): + assert value == pytest.approx(bet[node], abs=1e-2) + + def test_davis_closeness_centrality(self): + G = self.davis + clos = bipartite.closeness_centrality(G, self.top_nodes) + answer = { + "E8": 0.85, + "E9": 0.79, + "E7": 0.73, + "Nora Fayette": 0.80, + "Evelyn Jefferson": 0.80, + "Theresa Anderson": 0.80, + "E6": 0.69, + "Sylvia Avondale": 0.77, + "Laura Mandeville": 0.73, + "Brenda Rogers": 0.73, + "Katherina Rogers": 0.73, + "E5": 0.59, + "Helen Lloyd": 0.73, + "E3": 0.56, + "Ruth DeSand": 0.71, + "Verne Sanderson": 0.71, + "E12": 0.56, + "Myra Liddel": 0.69, + "E11": 0.54, + "Eleanor Nye": 0.67, + "Frances Anderson": 0.67, + "Pearl Oglethorpe": 0.67, + "E4": 0.54, + "Charlotte McDowd": 0.60, + "E10": 0.55, + "Olivia Carleton": 0.59, + "Flora Price": 0.59, + "E2": 0.52, + "E1": 0.52, + "Dorothy Murchison": 0.65, + "E13": 0.52, + "E14": 0.52, + } + for node, value in answer.items(): + assert value == pytest.approx(clos[node], abs=1e-2) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/test_cluster.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/test_cluster.py new file mode 100644 index 0000000..72e2dba --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/test_cluster.py @@ -0,0 +1,84 @@ +import pytest + +import networkx as nx +from networkx.algorithms import bipartite +from networkx.algorithms.bipartite.cluster import cc_dot, cc_max, cc_min + + +def test_pairwise_bipartite_cc_functions(): + # Test functions for different kinds of bipartite clustering coefficients + # between pairs of nodes using 3 example graphs from figure 5 p. 40 + # Latapy et al (2008) + G1 = nx.Graph([(0, 2), (0, 3), (0, 4), (0, 5), (0, 6), (1, 5), (1, 6), (1, 7)]) + G2 = nx.Graph([(0, 2), (0, 3), (0, 4), (1, 3), (1, 4), (1, 5)]) + G3 = nx.Graph( + [(0, 2), (0, 3), (0, 4), (0, 5), (0, 6), (1, 5), (1, 6), (1, 7), (1, 8), (1, 9)] + ) + result = { + 0: [1 / 3.0, 2 / 3.0, 2 / 5.0], + 1: [1 / 2.0, 2 / 3.0, 2 / 3.0], + 2: [2 / 8.0, 2 / 5.0, 2 / 5.0], + } + for i, G in enumerate([G1, G2, G3]): + assert bipartite.is_bipartite(G) + assert cc_dot(set(G[0]), set(G[1])) == result[i][0] + assert cc_min(set(G[0]), set(G[1])) == result[i][1] + assert cc_max(set(G[0]), set(G[1])) == result[i][2] + + +def test_star_graph(): + G = nx.star_graph(3) + # all modes are the same + answer = {0: 0, 1: 1, 2: 1, 3: 1} + assert bipartite.clustering(G, mode="dot") == answer + assert bipartite.clustering(G, mode="min") == answer + assert bipartite.clustering(G, mode="max") == answer + + +def test_not_bipartite(): + with pytest.raises(nx.NetworkXError): + bipartite.clustering(nx.complete_graph(4)) + + +def test_bad_mode(): + with pytest.raises(nx.NetworkXError): + bipartite.clustering(nx.path_graph(4), mode="foo") + + +def test_path_graph(): + G = nx.path_graph(4) + answer = {0: 0.5, 1: 0.5, 2: 0.5, 3: 0.5} + assert bipartite.clustering(G, mode="dot") == answer + assert bipartite.clustering(G, mode="max") == answer + answer = {0: 1, 1: 1, 2: 1, 3: 1} + assert bipartite.clustering(G, mode="min") == answer + + +def test_average_path_graph(): + G = nx.path_graph(4) + assert bipartite.average_clustering(G, mode="dot") == 0.5 + assert bipartite.average_clustering(G, mode="max") == 0.5 + assert bipartite.average_clustering(G, mode="min") == 1 + + +def test_ra_clustering_davis(): + G = nx.davis_southern_women_graph() + cc4 = round(bipartite.robins_alexander_clustering(G), 3) + assert cc4 == 0.468 + + +def test_ra_clustering_square(): + G = nx.path_graph(4) + G.add_edge(0, 3) + assert bipartite.robins_alexander_clustering(G) == 1.0 + + +def test_ra_clustering_zero(): + G = nx.Graph() + assert bipartite.robins_alexander_clustering(G) == 0 + G.add_nodes_from(range(4)) + assert bipartite.robins_alexander_clustering(G) == 0 + G.add_edges_from([(0, 1), (2, 3), (3, 4)]) + assert bipartite.robins_alexander_clustering(G) == 0 + G.add_edge(1, 2) + assert bipartite.robins_alexander_clustering(G) == 0 diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/test_covering.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/test_covering.py new file mode 100644 index 0000000..9507e13 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/test_covering.py @@ -0,0 +1,33 @@ +import networkx as nx +from networkx.algorithms import bipartite + + +class TestMinEdgeCover: + """Tests for :func:`networkx.algorithms.bipartite.min_edge_cover`""" + + def test_empty_graph(self): + G = nx.Graph() + assert bipartite.min_edge_cover(G) == set() + + def test_graph_single_edge(self): + G = nx.Graph() + G.add_edge(0, 1) + assert bipartite.min_edge_cover(G) == {(0, 1), (1, 0)} + + def test_bipartite_default(self): + G = nx.Graph() + G.add_nodes_from([1, 2, 3, 4], bipartite=0) + G.add_nodes_from(["a", "b", "c"], bipartite=1) + G.add_edges_from([(1, "a"), (1, "b"), (2, "b"), (2, "c"), (3, "c"), (4, "a")]) + min_cover = bipartite.min_edge_cover(G) + assert nx.is_edge_cover(G, min_cover) + assert len(min_cover) == 8 + + def test_bipartite_explicit(self): + G = nx.Graph() + G.add_nodes_from([1, 2, 3, 4], bipartite=0) + G.add_nodes_from(["a", "b", "c"], bipartite=1) + G.add_edges_from([(1, "a"), (1, "b"), (2, "b"), (2, "c"), (3, "c"), (4, "a")]) + min_cover = bipartite.min_edge_cover(G, bipartite.eppstein_matching) + assert nx.is_edge_cover(G, min_cover) + assert len(min_cover) == 8 diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/test_edgelist.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/test_edgelist.py new file mode 100644 index 0000000..66be8a2 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/test_edgelist.py @@ -0,0 +1,240 @@ +""" +Unit tests for bipartite edgelists. +""" + +import io + +import pytest + +import networkx as nx +from networkx.algorithms import bipartite +from networkx.utils import edges_equal, graphs_equal, nodes_equal + + +class TestEdgelist: + @classmethod + def setup_class(cls): + cls.G = nx.Graph(name="test") + e = [("a", "b"), ("b", "c"), ("c", "d"), ("d", "e"), ("e", "f"), ("a", "f")] + cls.G.add_edges_from(e) + cls.G.add_nodes_from(["a", "c", "e"], bipartite=0) + cls.G.add_nodes_from(["b", "d", "f"], bipartite=1) + cls.G.add_node("g", bipartite=0) + cls.DG = nx.DiGraph(cls.G) + cls.MG = nx.MultiGraph() + cls.MG.add_edges_from([(1, 2), (1, 2), (1, 2)]) + cls.MG.add_node(1, bipartite=0) + cls.MG.add_node(2, bipartite=1) + + def test_read_edgelist_1(self): + s = b"""\ +# comment line +1 2 +# comment line +2 3 +""" + bytesIO = io.BytesIO(s) + G = bipartite.read_edgelist(bytesIO, nodetype=int) + assert edges_equal(G.edges(), [(1, 2), (2, 3)]) + + def test_read_edgelist_3(self): + s = b"""\ +# comment line +1 2 {'weight':2.0} +# comment line +2 3 {'weight':3.0} +""" + bytesIO = io.BytesIO(s) + G = bipartite.read_edgelist(bytesIO, nodetype=int, data=False) + assert edges_equal(G.edges(), [(1, 2), (2, 3)]) + + bytesIO = io.BytesIO(s) + G = bipartite.read_edgelist(bytesIO, nodetype=int, data=True) + assert edges_equal( + G.edges(data=True), [(1, 2, {"weight": 2.0}), (2, 3, {"weight": 3.0})] + ) + + def test_write_edgelist_1(self): + fh = io.BytesIO() + G = nx.Graph() + G.add_edges_from([(1, 2), (2, 3)]) + G.add_node(1, bipartite=0) + G.add_node(2, bipartite=1) + G.add_node(3, bipartite=0) + bipartite.write_edgelist(G, fh, data=False) + fh.seek(0) + assert fh.read() == b"1 2\n3 2\n" + + def test_write_edgelist_2(self): + fh = io.BytesIO() + G = nx.Graph() + G.add_edges_from([(1, 2), (2, 3)]) + G.add_node(1, bipartite=0) + G.add_node(2, bipartite=1) + G.add_node(3, bipartite=0) + bipartite.write_edgelist(G, fh, data=True) + fh.seek(0) + assert fh.read() == b"1 2 {}\n3 2 {}\n" + + def test_write_edgelist_3(self): + fh = io.BytesIO() + G = nx.Graph() + G.add_edge(1, 2, weight=2.0) + G.add_edge(2, 3, weight=3.0) + G.add_node(1, bipartite=0) + G.add_node(2, bipartite=1) + G.add_node(3, bipartite=0) + bipartite.write_edgelist(G, fh, data=True) + fh.seek(0) + assert fh.read() == b"1 2 {'weight': 2.0}\n3 2 {'weight': 3.0}\n" + + def test_write_edgelist_4(self): + fh = io.BytesIO() + G = nx.Graph() + G.add_edge(1, 2, weight=2.0) + G.add_edge(2, 3, weight=3.0) + G.add_node(1, bipartite=0) + G.add_node(2, bipartite=1) + G.add_node(3, bipartite=0) + bipartite.write_edgelist(G, fh, data=[("weight")]) + fh.seek(0) + assert fh.read() == b"1 2 2.0\n3 2 3.0\n" + + def test_unicode(self, tmp_path): + G = nx.Graph() + name1 = chr(2344) + chr(123) + chr(6543) + name2 = chr(5543) + chr(1543) + chr(324) + G.add_edge(name1, "Radiohead", **{name2: 3}) + G.add_node(name1, bipartite=0) + G.add_node("Radiohead", bipartite=1) + + fname = tmp_path / "edgelist.txt" + bipartite.write_edgelist(G, fname) + H = bipartite.read_edgelist(fname) + assert graphs_equal(G, H) + + def test_latin1_issue(self, tmp_path): + G = nx.Graph() + name1 = chr(2344) + chr(123) + chr(6543) + name2 = chr(5543) + chr(1543) + chr(324) + G.add_edge(name1, "Radiohead", **{name2: 3}) + G.add_node(name1, bipartite=0) + G.add_node("Radiohead", bipartite=1) + + fname = tmp_path / "edgelist.txt" + with pytest.raises(UnicodeEncodeError): + bipartite.write_edgelist(G, fname, encoding="latin-1") + + def test_latin1(self, tmp_path): + G = nx.Graph() + name1 = "Bj" + chr(246) + "rk" + name2 = chr(220) + "ber" + G.add_edge(name1, "Radiohead", **{name2: 3}) + G.add_node(name1, bipartite=0) + G.add_node("Radiohead", bipartite=1) + + fname = tmp_path / "edgelist.txt" + bipartite.write_edgelist(G, fname, encoding="latin-1") + H = bipartite.read_edgelist(fname, encoding="latin-1") + assert graphs_equal(G, H) + + def test_edgelist_graph(self, tmp_path): + G = self.G + fname = tmp_path / "edgelist.txt" + bipartite.write_edgelist(G, fname) + H = bipartite.read_edgelist(fname) + H2 = bipartite.read_edgelist(fname) + assert H is not H2 # they should be different graphs + G.remove_node("g") # isolated nodes are not written in edgelist + assert nodes_equal(list(H), list(G)) + assert edges_equal(list(H.edges()), list(G.edges())) + + def test_edgelist_integers(self, tmp_path): + G = nx.convert_node_labels_to_integers(self.G) + fname = tmp_path / "edgelist.txt" + bipartite.write_edgelist(G, fname) + H = bipartite.read_edgelist(fname, nodetype=int) + # isolated nodes are not written in edgelist + G.remove_nodes_from(list(nx.isolates(G))) + assert nodes_equal(list(H), list(G)) + assert edges_equal(list(H.edges()), list(G.edges())) + + def test_edgelist_multigraph(self, tmp_path): + G = self.MG + fname = tmp_path / "edgelist.txt" + bipartite.write_edgelist(G, fname) + H = bipartite.read_edgelist(fname, nodetype=int, create_using=nx.MultiGraph()) + H2 = bipartite.read_edgelist(fname, nodetype=int, create_using=nx.MultiGraph()) + assert H is not H2 # they should be different graphs + assert nodes_equal(list(H), list(G)) + assert edges_equal(list(H.edges()), list(G.edges())) + + def test_empty_digraph(self): + with pytest.raises(nx.NetworkXNotImplemented): + bytesIO = io.BytesIO() + bipartite.write_edgelist(nx.DiGraph(), bytesIO) + + def test_raise_attribute(self): + with pytest.raises(AttributeError): + G = nx.path_graph(4) + bytesIO = io.BytesIO() + bipartite.write_edgelist(G, bytesIO) + + def test_parse_edgelist(self): + """Tests for conditions specific to + parse_edge_list method""" + + # ignore strings of length less than 2 + lines = ["1 2", "2 3", "3 1", "4", " "] + G = bipartite.parse_edgelist(lines, nodetype=int) + assert list(G.nodes) == [1, 2, 3] + + # Exception raised when node is not convertible + # to specified data type + with pytest.raises(TypeError, match=".*Failed to convert nodes"): + lines = ["a b", "b c", "c a"] + G = bipartite.parse_edgelist(lines, nodetype=int) + + # Exception raised when format of data is not + # convertible to dictionary object + with pytest.raises(TypeError, match=".*Failed to convert edge data"): + lines = ["1 2 3", "2 3 4", "3 1 2"] + G = bipartite.parse_edgelist(lines, nodetype=int) + + # Exception raised when edge data and data + # keys are not of same length + with pytest.raises(IndexError): + lines = ["1 2 3 4", "2 3 4"] + G = bipartite.parse_edgelist( + lines, nodetype=int, data=[("weight", int), ("key", int)] + ) + + # Exception raised when edge data is not + # convertible to specified data type + with pytest.raises(TypeError, match=".*Failed to convert key data"): + lines = ["1 2 3 a", "2 3 4 b"] + G = bipartite.parse_edgelist( + lines, nodetype=int, data=[("weight", int), ("key", int)] + ) + + +def test_bipartite_edgelist_consistent_strip_handling(): + """See gh-7462 + + Input when printed looks like: + + A B interaction 2 + B C interaction 4 + C A interaction + + Note the trailing \\t in the last line, which indicates the existence of + an empty data field. + """ + lines = io.StringIO( + "A\tB\tinteraction\t2\nB\tC\tinteraction\t4\nC\tA\tinteraction\t" + ) + descr = [("type", str), ("weight", str)] + # Should not raise + G = nx.bipartite.parse_edgelist(lines, delimiter="\t", data=descr) + expected = [("A", "B", "2"), ("A", "C", ""), ("B", "C", "4")] + assert sorted(G.edges(data="weight")) == expected diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/test_extendability.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/test_extendability.py new file mode 100644 index 0000000..17b7124 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/test_extendability.py @@ -0,0 +1,334 @@ +import pytest + +import networkx as nx + + +def test_selfloops_raises(): + G = nx.ladder_graph(3) + G.add_edge(0, 0) + with pytest.raises(nx.NetworkXError, match=".*not bipartite"): + nx.bipartite.maximal_extendability(G) + + +def test_disconnected_raises(): + G = nx.ladder_graph(3) + G.add_node("a") + with pytest.raises(nx.NetworkXError, match=".*not connected"): + nx.bipartite.maximal_extendability(G) + + +def test_not_bipartite_raises(): + G = nx.complete_graph(5) + with pytest.raises(nx.NetworkXError, match=".*not bipartite"): + nx.bipartite.maximal_extendability(G) + + +def test_no_perfect_matching_raises(): + G = nx.Graph([(0, 1), (0, 2)]) + with pytest.raises(nx.NetworkXError, match=".*not contain a perfect matching"): + nx.bipartite.maximal_extendability(G) + + +def test_residual_graph_not_strongly_connected_raises(): + G = nx.Graph([(1, 2), (2, 3), (3, 4)]) + with pytest.raises( + nx.NetworkXError, match="The residual graph of G is not strongly connected" + ): + nx.bipartite.maximal_extendability(G) + + +def test_ladder_graph_is_1(): + G = nx.ladder_graph(3) + assert nx.bipartite.maximal_extendability(G) == 1 + + +def test_cubical_graph_is_2(): + G = nx.cubical_graph() + assert nx.bipartite.maximal_extendability(G) == 2 + + +def test_k_is_3(): + G = nx.Graph( + [ + (1, 6), + (1, 7), + (1, 8), + (1, 9), + (2, 6), + (2, 7), + (2, 8), + (2, 10), + (3, 6), + (3, 8), + (3, 9), + (3, 10), + (4, 7), + (4, 8), + (4, 9), + (4, 10), + (5, 6), + (5, 7), + (5, 9), + (5, 10), + ] + ) + assert nx.bipartite.maximal_extendability(G) == 3 + + +def test_k_is_4(): + G = nx.Graph( + [ + (8, 1), + (8, 2), + (8, 3), + (8, 4), + (8, 5), + (9, 1), + (9, 2), + (9, 3), + (9, 4), + (9, 7), + (10, 1), + (10, 2), + (10, 3), + (10, 4), + (10, 6), + (11, 1), + (11, 2), + (11, 5), + (11, 6), + (11, 7), + (12, 1), + (12, 3), + (12, 5), + (12, 6), + (12, 7), + (13, 2), + (13, 4), + (13, 5), + (13, 6), + (13, 7), + (14, 3), + (14, 4), + (14, 5), + (14, 6), + (14, 7), + ] + ) + assert nx.bipartite.maximal_extendability(G) == 4 + + +def test_k_is_5(): + G = nx.Graph( + [ + (8, 1), + (8, 2), + (8, 3), + (8, 4), + (8, 5), + (8, 6), + (9, 1), + (9, 2), + (9, 3), + (9, 4), + (9, 5), + (9, 7), + (10, 1), + (10, 2), + (10, 3), + (10, 4), + (10, 6), + (10, 7), + (11, 1), + (11, 2), + (11, 3), + (11, 5), + (11, 6), + (11, 7), + (12, 1), + (12, 2), + (12, 4), + (12, 5), + (12, 6), + (12, 7), + (13, 1), + (13, 3), + (13, 4), + (13, 5), + (13, 6), + (13, 7), + (14, 2), + (14, 3), + (14, 4), + (14, 5), + (14, 6), + (14, 7), + ] + ) + assert nx.bipartite.maximal_extendability(G) == 5 + + +def test_k_is_6(): + G = nx.Graph( + [ + (9, 1), + (9, 2), + (9, 3), + (9, 4), + (9, 5), + (9, 6), + (9, 7), + (10, 1), + (10, 2), + (10, 3), + (10, 4), + (10, 5), + (10, 6), + (10, 8), + (11, 1), + (11, 2), + (11, 3), + (11, 4), + (11, 5), + (11, 7), + (11, 8), + (12, 1), + (12, 2), + (12, 3), + (12, 4), + (12, 6), + (12, 7), + (12, 8), + (13, 1), + (13, 2), + (13, 3), + (13, 5), + (13, 6), + (13, 7), + (13, 8), + (14, 1), + (14, 2), + (14, 4), + (14, 5), + (14, 6), + (14, 7), + (14, 8), + (15, 1), + (15, 3), + (15, 4), + (15, 5), + (15, 6), + (15, 7), + (15, 8), + (16, 2), + (16, 3), + (16, 4), + (16, 5), + (16, 6), + (16, 7), + (16, 8), + ] + ) + assert nx.bipartite.maximal_extendability(G) == 6 + + +def test_k_is_7(): + G = nx.Graph( + [ + (1, 11), + (1, 12), + (1, 13), + (1, 14), + (1, 15), + (1, 16), + (1, 17), + (1, 18), + (2, 11), + (2, 12), + (2, 13), + (2, 14), + (2, 15), + (2, 16), + (2, 17), + (2, 19), + (3, 11), + (3, 12), + (3, 13), + (3, 14), + (3, 15), + (3, 16), + (3, 17), + (3, 20), + (4, 11), + (4, 12), + (4, 13), + (4, 14), + (4, 15), + (4, 16), + (4, 17), + (4, 18), + (4, 19), + (4, 20), + (5, 11), + (5, 12), + (5, 13), + (5, 14), + (5, 15), + (5, 16), + (5, 17), + (5, 18), + (5, 19), + (5, 20), + (6, 11), + (6, 12), + (6, 13), + (6, 14), + (6, 15), + (6, 16), + (6, 17), + (6, 18), + (6, 19), + (6, 20), + (7, 11), + (7, 12), + (7, 13), + (7, 14), + (7, 15), + (7, 16), + (7, 17), + (7, 18), + (7, 19), + (7, 20), + (8, 11), + (8, 12), + (8, 13), + (8, 14), + (8, 15), + (8, 16), + (8, 17), + (8, 18), + (8, 19), + (8, 20), + (9, 11), + (9, 12), + (9, 13), + (9, 14), + (9, 15), + (9, 16), + (9, 17), + (9, 18), + (9, 19), + (9, 20), + (10, 11), + (10, 12), + (10, 13), + (10, 14), + (10, 15), + (10, 16), + (10, 17), + (10, 18), + (10, 19), + (10, 20), + ] + ) + assert nx.bipartite.maximal_extendability(G) == 7 diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/test_generators.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/test_generators.py new file mode 100644 index 0000000..3c394db --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/test_generators.py @@ -0,0 +1,407 @@ +import numbers + +import pytest + +import networkx as nx + +from ..generators import ( + alternating_havel_hakimi_graph, + complete_bipartite_graph, + configuration_model, + gnmk_random_graph, + havel_hakimi_graph, + preferential_attachment_graph, + random_graph, + reverse_havel_hakimi_graph, +) + +""" +Generators - Bipartite +---------------------- +""" + + +class TestGeneratorsBipartite: + def test_complete_bipartite_graph(self): + G = complete_bipartite_graph(0, 0) + assert nx.is_isomorphic(G, nx.null_graph()) + + for i in [1, 5]: + G = complete_bipartite_graph(i, 0) + assert nx.is_isomorphic(G, nx.empty_graph(i)) + G = complete_bipartite_graph(0, i) + assert nx.is_isomorphic(G, nx.empty_graph(i)) + + G = complete_bipartite_graph(2, 2) + assert nx.is_isomorphic(G, nx.cycle_graph(4)) + + G = complete_bipartite_graph(1, 5) + assert nx.is_isomorphic(G, nx.star_graph(5)) + + G = complete_bipartite_graph(5, 1) + assert nx.is_isomorphic(G, nx.star_graph(5)) + + # complete_bipartite_graph(m1,m2) is a connected graph with + # m1+m2 nodes and m1*m2 edges + for m1, m2 in [(5, 11), (7, 3)]: + G = complete_bipartite_graph(m1, m2) + assert nx.number_of_nodes(G) == m1 + m2 + assert nx.number_of_edges(G) == m1 * m2 + + with pytest.raises(nx.NetworkXError): + complete_bipartite_graph(7, 3, create_using=nx.DiGraph) + with pytest.raises(nx.NetworkXError): + complete_bipartite_graph(7, 3, create_using=nx.MultiDiGraph) + + mG = complete_bipartite_graph(7, 3, create_using=nx.MultiGraph) + assert mG.is_multigraph() + assert sorted(mG.edges()) == sorted(G.edges()) + + mG = complete_bipartite_graph(7, 3, create_using=nx.MultiGraph) + assert mG.is_multigraph() + assert sorted(mG.edges()) == sorted(G.edges()) + + mG = complete_bipartite_graph(7, 3) # default to Graph + assert sorted(mG.edges()) == sorted(G.edges()) + assert not mG.is_multigraph() + assert not mG.is_directed() + + # specify nodes rather than number of nodes + for n1, n2 in [([1, 2], "ab"), (3, 2), (3, "ab"), ("ab", 3)]: + G = complete_bipartite_graph(n1, n2) + if isinstance(n1, numbers.Integral): + if isinstance(n2, numbers.Integral): + n2 = range(n1, n1 + n2) + n1 = range(n1) + elif isinstance(n2, numbers.Integral): + n2 = range(n2) + edges = {(u, v) for u in n1 for v in n2} + assert edges == set(G.edges) + assert G.size() == len(edges) + + # raise when node sets are not distinct + for n1, n2 in [([1, 2], 3), (3, [1, 2]), ("abc", "bcd")]: + pytest.raises(nx.NetworkXError, complete_bipartite_graph, n1, n2) + + def test_configuration_model(self): + aseq = [] + bseq = [] + G = configuration_model(aseq, bseq) + assert len(G) == 0 + + aseq = [0, 0] + bseq = [0, 0] + G = configuration_model(aseq, bseq) + assert len(G) == 4 + assert G.number_of_edges() == 0 + + aseq = [3, 3, 3, 3] + bseq = [2, 2, 2, 2, 2] + pytest.raises(nx.NetworkXError, configuration_model, aseq, bseq) + + aseq = [3, 3, 3, 3] + bseq = [2, 2, 2, 2, 2, 2] + G = configuration_model(aseq, bseq) + assert sorted(d for n, d in G.degree()) == [2, 2, 2, 2, 2, 2, 3, 3, 3, 3] + + aseq = [2, 2, 2, 2, 2, 2] + bseq = [3, 3, 3, 3] + G = configuration_model(aseq, bseq) + assert sorted(d for n, d in G.degree()) == [2, 2, 2, 2, 2, 2, 3, 3, 3, 3] + + aseq = [2, 2, 2, 1, 1, 1] + bseq = [3, 3, 3] + G = configuration_model(aseq, bseq) + assert G.is_multigraph() + assert not G.is_directed() + assert sorted(d for n, d in G.degree()) == [1, 1, 1, 2, 2, 2, 3, 3, 3] + + GU = nx.projected_graph(nx.Graph(G), range(len(aseq))) + assert GU.number_of_nodes() == 6 + + GD = nx.projected_graph(nx.Graph(G), range(len(aseq), len(aseq) + len(bseq))) + assert GD.number_of_nodes() == 3 + + G = reverse_havel_hakimi_graph(aseq, bseq, create_using=nx.Graph) + assert not G.is_multigraph() + assert not G.is_directed() + + pytest.raises( + nx.NetworkXError, configuration_model, aseq, bseq, create_using=nx.DiGraph() + ) + pytest.raises( + nx.NetworkXError, configuration_model, aseq, bseq, create_using=nx.DiGraph + ) + pytest.raises( + nx.NetworkXError, + configuration_model, + aseq, + bseq, + create_using=nx.MultiDiGraph, + ) + + def test_havel_hakimi_graph(self): + aseq = [] + bseq = [] + G = havel_hakimi_graph(aseq, bseq) + assert len(G) == 0 + + aseq = [0, 0] + bseq = [0, 0] + G = havel_hakimi_graph(aseq, bseq) + assert len(G) == 4 + assert G.number_of_edges() == 0 + + aseq = [3, 3, 3, 3] + bseq = [2, 2, 2, 2, 2] + pytest.raises(nx.NetworkXError, havel_hakimi_graph, aseq, bseq) + + bseq = [2, 2, 2, 2, 2, 2] + G = havel_hakimi_graph(aseq, bseq) + assert sorted(d for n, d in G.degree()) == [2, 2, 2, 2, 2, 2, 3, 3, 3, 3] + + aseq = [2, 2, 2, 2, 2, 2] + bseq = [3, 3, 3, 3] + G = havel_hakimi_graph(aseq, bseq) + assert G.is_multigraph() + assert not G.is_directed() + assert sorted(d for n, d in G.degree()) == [2, 2, 2, 2, 2, 2, 3, 3, 3, 3] + + GU = nx.projected_graph(nx.Graph(G), range(len(aseq))) + assert GU.number_of_nodes() == 6 + + GD = nx.projected_graph(nx.Graph(G), range(len(aseq), len(aseq) + len(bseq))) + assert GD.number_of_nodes() == 4 + + G = reverse_havel_hakimi_graph(aseq, bseq, create_using=nx.Graph) + assert not G.is_multigraph() + assert not G.is_directed() + + pytest.raises( + nx.NetworkXError, havel_hakimi_graph, aseq, bseq, create_using=nx.DiGraph + ) + pytest.raises( + nx.NetworkXError, havel_hakimi_graph, aseq, bseq, create_using=nx.DiGraph + ) + pytest.raises( + nx.NetworkXError, + havel_hakimi_graph, + aseq, + bseq, + create_using=nx.MultiDiGraph, + ) + + def test_reverse_havel_hakimi_graph(self): + aseq = [] + bseq = [] + G = reverse_havel_hakimi_graph(aseq, bseq) + assert len(G) == 0 + + aseq = [0, 0] + bseq = [0, 0] + G = reverse_havel_hakimi_graph(aseq, bseq) + assert len(G) == 4 + assert G.number_of_edges() == 0 + + aseq = [3, 3, 3, 3] + bseq = [2, 2, 2, 2, 2] + pytest.raises(nx.NetworkXError, reverse_havel_hakimi_graph, aseq, bseq) + + bseq = [2, 2, 2, 2, 2, 2] + G = reverse_havel_hakimi_graph(aseq, bseq) + assert sorted(d for n, d in G.degree()) == [2, 2, 2, 2, 2, 2, 3, 3, 3, 3] + + aseq = [2, 2, 2, 2, 2, 2] + bseq = [3, 3, 3, 3] + G = reverse_havel_hakimi_graph(aseq, bseq) + assert sorted(d for n, d in G.degree()) == [2, 2, 2, 2, 2, 2, 3, 3, 3, 3] + + aseq = [2, 2, 2, 1, 1, 1] + bseq = [3, 3, 3] + G = reverse_havel_hakimi_graph(aseq, bseq) + assert G.is_multigraph() + assert not G.is_directed() + assert sorted(d for n, d in G.degree()) == [1, 1, 1, 2, 2, 2, 3, 3, 3] + + GU = nx.projected_graph(nx.Graph(G), range(len(aseq))) + assert GU.number_of_nodes() == 6 + + GD = nx.projected_graph(nx.Graph(G), range(len(aseq), len(aseq) + len(bseq))) + assert GD.number_of_nodes() == 3 + + G = reverse_havel_hakimi_graph(aseq, bseq, create_using=nx.Graph) + assert not G.is_multigraph() + assert not G.is_directed() + + pytest.raises( + nx.NetworkXError, + reverse_havel_hakimi_graph, + aseq, + bseq, + create_using=nx.DiGraph, + ) + pytest.raises( + nx.NetworkXError, + reverse_havel_hakimi_graph, + aseq, + bseq, + create_using=nx.DiGraph, + ) + pytest.raises( + nx.NetworkXError, + reverse_havel_hakimi_graph, + aseq, + bseq, + create_using=nx.MultiDiGraph, + ) + + def test_alternating_havel_hakimi_graph(self): + aseq = [] + bseq = [] + G = alternating_havel_hakimi_graph(aseq, bseq) + assert len(G) == 0 + + aseq = [0, 0] + bseq = [0, 0] + G = alternating_havel_hakimi_graph(aseq, bseq) + assert len(G) == 4 + assert G.number_of_edges() == 0 + + aseq = [3, 3, 3, 3] + bseq = [2, 2, 2, 2, 2] + pytest.raises(nx.NetworkXError, alternating_havel_hakimi_graph, aseq, bseq) + + bseq = [2, 2, 2, 2, 2, 2] + G = alternating_havel_hakimi_graph(aseq, bseq) + assert sorted(d for n, d in G.degree()) == [2, 2, 2, 2, 2, 2, 3, 3, 3, 3] + + aseq = [2, 2, 2, 2, 2, 2] + bseq = [3, 3, 3, 3] + G = alternating_havel_hakimi_graph(aseq, bseq) + assert sorted(d for n, d in G.degree()) == [2, 2, 2, 2, 2, 2, 3, 3, 3, 3] + + aseq = [2, 2, 2, 1, 1, 1] + bseq = [3, 3, 3] + G = alternating_havel_hakimi_graph(aseq, bseq) + assert G.is_multigraph() + assert not G.is_directed() + assert sorted(d for n, d in G.degree()) == [1, 1, 1, 2, 2, 2, 3, 3, 3] + + GU = nx.projected_graph(nx.Graph(G), range(len(aseq))) + assert GU.number_of_nodes() == 6 + + GD = nx.projected_graph(nx.Graph(G), range(len(aseq), len(aseq) + len(bseq))) + assert GD.number_of_nodes() == 3 + + G = reverse_havel_hakimi_graph(aseq, bseq, create_using=nx.Graph) + assert not G.is_multigraph() + assert not G.is_directed() + + pytest.raises( + nx.NetworkXError, + alternating_havel_hakimi_graph, + aseq, + bseq, + create_using=nx.DiGraph, + ) + pytest.raises( + nx.NetworkXError, + alternating_havel_hakimi_graph, + aseq, + bseq, + create_using=nx.DiGraph, + ) + pytest.raises( + nx.NetworkXError, + alternating_havel_hakimi_graph, + aseq, + bseq, + create_using=nx.MultiDiGraph, + ) + + def test_preferential_attachment(self): + aseq = [3, 2, 1, 1] + G = preferential_attachment_graph(aseq, 0.5) + assert G.is_multigraph() + assert not G.is_directed() + + G = preferential_attachment_graph(aseq, 0.5, create_using=nx.Graph) + assert not G.is_multigraph() + assert not G.is_directed() + + pytest.raises( + nx.NetworkXError, + preferential_attachment_graph, + aseq, + 0.5, + create_using=nx.DiGraph(), + ) + pytest.raises( + nx.NetworkXError, + preferential_attachment_graph, + aseq, + 0.5, + create_using=nx.DiGraph(), + ) + pytest.raises( + nx.NetworkXError, + preferential_attachment_graph, + aseq, + 0.5, + create_using=nx.DiGraph(), + ) + + def test_random_graph(self): + n = 10 + m = 20 + G = random_graph(n, m, 0.9) + assert len(G) == 30 + assert nx.is_bipartite(G) + X, Y = nx.algorithms.bipartite.sets(G) + assert set(range(n)) == X + assert set(range(n, n + m)) == Y + + def test_random_digraph(self): + n = 10 + m = 20 + G = random_graph(n, m, 0.9, directed=True) + assert len(G) == 30 + assert nx.is_bipartite(G) + X, Y = nx.algorithms.bipartite.sets(G) + assert set(range(n)) == X + assert set(range(n, n + m)) == Y + + def test_gnmk_random_graph(self): + n = 10 + m = 20 + edges = 100 + # set seed because sometimes it is not connected + # which raises an error in bipartite.sets(G) below. + G = gnmk_random_graph(n, m, edges, seed=1234) + assert len(G) == n + m + assert nx.is_bipartite(G) + X, Y = nx.algorithms.bipartite.sets(G) + assert set(range(n)) == X + assert set(range(n, n + m)) == Y + assert edges == len(list(G.edges())) + + def test_gnmk_random_graph_complete(self): + n = 10 + m = 20 + edges = 200 + G = gnmk_random_graph(n, m, edges) + assert len(G) == n + m + assert nx.is_bipartite(G) + X, Y = nx.algorithms.bipartite.sets(G) + assert set(range(n)) == X + assert set(range(n, n + m)) == Y + assert edges == len(list(G.edges())) + + @pytest.mark.parametrize("n", (4, range(4), {0, 1, 2, 3})) + @pytest.mark.parametrize("m", (range(4, 7), {4, 5, 6})) + def test_complete_bipartite_graph_str(self, n, m): + """Ensure G.name is consistent for all inputs accepted by nodes_or_number. + See gh-7396""" + G = nx.complete_bipartite_graph(n, m) + ans = "Graph named 'complete_bipartite_graph(4, 3)' with 7 nodes and 12 edges" + assert str(G) == ans diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/test_link_analysis.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/test_link_analysis.py new file mode 100644 index 0000000..6e46056 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/test_link_analysis.py @@ -0,0 +1,218 @@ +import itertools + +import pytest + +import networkx as nx +from networkx.algorithms import bipartite + +pytest.importorskip("scipy") + + +class TestBipartiteLinkAnalysis: + @classmethod + def setup_class(cls): + cls.davis_southern_women_graph = nx.davis_southern_women_graph() + cls.women_bipartite_set = { + node + for node, bipartite in cls.davis_southern_women_graph.nodes( + data="bipartite" + ) + if bipartite == 0 + } + cls.gnmk_random_graph = nx.bipartite.generators.gnmk_random_graph( + 5 * 10**2, 10**2, 5 * 10**2, seed=27 + ) + cls.gnmk_random_graph_top_nodes = { + node + for node, bipartite in cls.gnmk_random_graph.nodes(data="bipartite") + if bipartite == 0 + } + + def test_collaborative_filtering_birank(self): + elist = [ + ("u1", "p1", 5), + ("u2", "p1", 5), + ("u2", "p2", 4), + ("u3", "p1", 3), + ("u3", "p3", 2), + ] + item_recommendation_graph = nx.DiGraph() + item_recommendation_graph.add_weighted_edges_from(elist, weight="rating") + product_nodes = ("p1", "p2", "p3") + u1_query = { + product: rating + for _, product, rating in item_recommendation_graph.edges( + nbunch="u1", data="rating" + ) + } + u1_birank_results = bipartite.birank( + item_recommendation_graph, + product_nodes, + alpha=0.8, + beta=1.0, + top_personalization=u1_query, + weight="rating", + ) + + assert u1_birank_results["p2"] > u1_birank_results["p3"] + + u1_birank_results_unweighted = bipartite.birank( + item_recommendation_graph, + product_nodes, + alpha=0.8, + beta=1.0, + top_personalization=u1_query, + weight=None, + ) + + assert u1_birank_results_unweighted["p2"] == pytest.approx( + u1_birank_results_unweighted["p3"], rel=2e-6 + ) + + def test_davis_birank(self): + scores = bipartite.birank( + self.davis_southern_women_graph, self.women_bipartite_set + ) + answer = { + "Laura Mandeville": 0.07, + "Olivia Carleton": 0.04, + "Frances Anderson": 0.05, + "Pearl Oglethorpe": 0.04, + "Katherina Rogers": 0.06, + "Flora Price": 0.04, + "Dorothy Murchison": 0.04, + "Helen Lloyd": 0.06, + "Theresa Anderson": 0.07, + "Eleanor Nye": 0.05, + "Evelyn Jefferson": 0.07, + "Sylvia Avondale": 0.07, + "Charlotte McDowd": 0.05, + "Verne Sanderson": 0.05, + "Myra Liddel": 0.05, + "Brenda Rogers": 0.07, + "Ruth DeSand": 0.05, + "Nora Fayette": 0.07, + "E8": 0.11, + "E7": 0.09, + "E10": 0.07, + "E9": 0.1, + "E13": 0.05, + "E3": 0.07, + "E12": 0.07, + "E11": 0.06, + "E2": 0.05, + "E5": 0.08, + "E6": 0.08, + "E14": 0.05, + "E4": 0.06, + "E1": 0.05, + } + + for node, value in answer.items(): + assert scores[node] == pytest.approx(value, abs=1e-2) + + def test_davis_birank_with_personalization(self): + women_personalization = {"Laura Mandeville": 1} + scores = bipartite.birank( + self.davis_southern_women_graph, + self.women_bipartite_set, + top_personalization=women_personalization, + ) + answer = { + "Laura Mandeville": 0.29, + "Olivia Carleton": 0.02, + "Frances Anderson": 0.06, + "Pearl Oglethorpe": 0.04, + "Katherina Rogers": 0.04, + "Flora Price": 0.02, + "Dorothy Murchison": 0.03, + "Helen Lloyd": 0.04, + "Theresa Anderson": 0.08, + "Eleanor Nye": 0.05, + "Evelyn Jefferson": 0.09, + "Sylvia Avondale": 0.05, + "Charlotte McDowd": 0.06, + "Verne Sanderson": 0.04, + "Myra Liddel": 0.03, + "Brenda Rogers": 0.08, + "Ruth DeSand": 0.05, + "Nora Fayette": 0.05, + "E8": 0.11, + "E7": 0.1, + "E10": 0.04, + "E9": 0.07, + "E13": 0.03, + "E3": 0.11, + "E12": 0.04, + "E11": 0.03, + "E2": 0.1, + "E5": 0.11, + "E6": 0.1, + "E14": 0.03, + "E4": 0.06, + "E1": 0.1, + } + + for node, value in answer.items(): + assert scores[node] == pytest.approx(value, abs=1e-2) + + def test_birank_empty_bipartite_set(self): + G = nx.Graph() + all_nodes = [1, 2, 3] + G.add_nodes_from(all_nodes) + + # Test with empty bipartite set + with pytest.raises(nx.NetworkXAlgorithmError): + bipartite.birank(G, all_nodes) + + @pytest.mark.parametrize( + "damping_factor,value", itertools.product(["alpha", "beta"], [-0.1, 1.1]) + ) + def test_birank_invalid_alpha_beta(self, damping_factor, value): + kwargs = {damping_factor: value} + with pytest.raises(nx.NetworkXAlgorithmError): + bipartite.birank( + self.davis_southern_women_graph, self.women_bipartite_set, **kwargs + ) + + def test_birank_power_iteration_failed_convergence(self): + with pytest.raises(nx.PowerIterationFailedConvergence): + bipartite.birank( + self.davis_southern_women_graph, self.women_bipartite_set, max_iter=1 + ) + + @pytest.mark.parametrize( + "personalization,alpha,beta", + itertools.product( + [ + # Concentrated case + lambda x: 1000 if x == 0 else 0, + # Uniform case + lambda x: 5, + # Zero case + lambda x: 0, + ], + [i / 2 for i in range(3)], + [i / 2 for i in range(3)], + ), + ) + def test_gnmk_convergence_birank(self, personalization, alpha, beta): + top_personalization_dict = { + node: personalization(node) for node in self.gnmk_random_graph_top_nodes + } + bipartite.birank( + self.gnmk_random_graph, + self.gnmk_random_graph_top_nodes, + top_personalization=top_personalization_dict, + alpha=alpha, + beta=beta, + ) + + def test_negative_personalization(self): + top_personalization_dict = {0: -1} + with pytest.raises(nx.NetworkXAlgorithmError): + bipartite.birank( + self.gnmk_random_graph, + self.gnmk_random_graph_top_nodes, + top_personalization=top_personalization_dict, + ) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/test_matching.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/test_matching.py new file mode 100644 index 0000000..c24659e --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/test_matching.py @@ -0,0 +1,327 @@ +"""Unit tests for the :mod:`networkx.algorithms.bipartite.matching` module.""" + +import itertools + +import pytest + +import networkx as nx +from networkx.algorithms.bipartite.matching import ( + eppstein_matching, + hopcroft_karp_matching, + maximum_matching, + minimum_weight_full_matching, + to_vertex_cover, +) + + +class TestMatching: + """Tests for bipartite matching algorithms.""" + + def setup_method(self): + """Creates a bipartite graph for use in testing matching algorithms. + + The bipartite graph has a maximum cardinality matching that leaves + vertex 1 and vertex 10 unmatched. The first six numbers are the left + vertices and the next six numbers are the right vertices. + + """ + self.simple_graph = nx.complete_bipartite_graph(2, 3) + self.simple_solution = {0: 2, 1: 3, 2: 0, 3: 1} + + edges = [(0, 7), (0, 8), (2, 6), (2, 9), (3, 8), (4, 8), (4, 9), (5, 11)] + self.top_nodes = set(range(6)) + self.graph = nx.Graph() + self.graph.add_nodes_from(range(12)) + self.graph.add_edges_from(edges) + + # Example bipartite graph from issue 2127 + G = nx.Graph() + G.add_nodes_from( + [ + (1, "C"), + (1, "B"), + (0, "G"), + (1, "F"), + (1, "E"), + (0, "C"), + (1, "D"), + (1, "I"), + (0, "A"), + (0, "D"), + (0, "F"), + (0, "E"), + (0, "H"), + (1, "G"), + (1, "A"), + (0, "I"), + (0, "B"), + (1, "H"), + ] + ) + G.add_edge((1, "C"), (0, "A")) + G.add_edge((1, "B"), (0, "A")) + G.add_edge((0, "G"), (1, "I")) + G.add_edge((0, "G"), (1, "H")) + G.add_edge((1, "F"), (0, "A")) + G.add_edge((1, "F"), (0, "C")) + G.add_edge((1, "F"), (0, "E")) + G.add_edge((1, "E"), (0, "A")) + G.add_edge((1, "E"), (0, "C")) + G.add_edge((0, "C"), (1, "D")) + G.add_edge((0, "C"), (1, "I")) + G.add_edge((0, "C"), (1, "G")) + G.add_edge((0, "C"), (1, "H")) + G.add_edge((1, "D"), (0, "A")) + G.add_edge((1, "I"), (0, "A")) + G.add_edge((1, "I"), (0, "E")) + G.add_edge((0, "A"), (1, "G")) + G.add_edge((0, "A"), (1, "H")) + G.add_edge((0, "E"), (1, "G")) + G.add_edge((0, "E"), (1, "H")) + self.disconnected_graph = G + + def check_match(self, matching): + """Asserts that the matching is what we expect from the bipartite graph + constructed in the :meth:`setup` fixture. + + """ + # For the sake of brevity, rename `matching` to `M`. + M = matching + matched_vertices = frozenset(itertools.chain(*M.items())) + # Assert that the maximum number of vertices (10) is matched. + assert matched_vertices == frozenset(range(12)) - {1, 10} + # Assert that no vertex appears in two edges, or in other words, that + # the matching (u, v) and (v, u) both appear in the matching + # dictionary. + assert all(u == M[M[u]] for u in range(12) if u in M) + + def check_vertex_cover(self, vertices): + """Asserts that the given set of vertices is the vertex cover we + expected from the bipartite graph constructed in the :meth:`setup` + fixture. + + """ + # By Konig's theorem, the number of edges in a maximum matching equals + # the number of vertices in a minimum vertex cover. + assert len(vertices) == 5 + # Assert that the set is truly a vertex cover. + for u, v in self.graph.edges(): + assert u in vertices or v in vertices + # TODO Assert that the vertices are the correct ones. + + def test_eppstein_matching(self): + """Tests that David Eppstein's implementation of the Hopcroft--Karp + algorithm produces a maximum cardinality matching. + + """ + self.check_match(eppstein_matching(self.graph, self.top_nodes)) + + def test_hopcroft_karp_matching(self): + """Tests that the Hopcroft--Karp algorithm produces a maximum + cardinality matching in a bipartite graph. + + """ + self.check_match(hopcroft_karp_matching(self.graph, self.top_nodes)) + + def test_to_vertex_cover(self): + """Test for converting a maximum matching to a minimum vertex cover.""" + matching = maximum_matching(self.graph, self.top_nodes) + vertex_cover = to_vertex_cover(self.graph, matching, self.top_nodes) + self.check_vertex_cover(vertex_cover) + + def test_eppstein_matching_simple(self): + match = eppstein_matching(self.simple_graph) + assert match == self.simple_solution + + def test_hopcroft_karp_matching_simple(self): + match = hopcroft_karp_matching(self.simple_graph) + assert match == self.simple_solution + + def test_eppstein_matching_disconnected(self): + with pytest.raises(nx.AmbiguousSolution): + match = eppstein_matching(self.disconnected_graph) + + def test_hopcroft_karp_matching_disconnected(self): + with pytest.raises(nx.AmbiguousSolution): + match = hopcroft_karp_matching(self.disconnected_graph) + + def test_issue_2127(self): + """Test from issue 2127""" + # Build the example DAG + G = nx.DiGraph() + G.add_edge("A", "C") + G.add_edge("A", "B") + G.add_edge("C", "E") + G.add_edge("C", "D") + G.add_edge("E", "G") + G.add_edge("E", "F") + G.add_edge("G", "I") + G.add_edge("G", "H") + + tc = nx.transitive_closure(G) + btc = nx.Graph() + + # Create a bipartite graph based on the transitive closure of G + for v in tc.nodes(): + btc.add_node((0, v)) + btc.add_node((1, v)) + + for u, v in tc.edges(): + btc.add_edge((0, u), (1, v)) + + top_nodes = {n for n in btc if n[0] == 0} + matching = hopcroft_karp_matching(btc, top_nodes) + vertex_cover = to_vertex_cover(btc, matching, top_nodes) + independent_set = set(G) - {v for _, v in vertex_cover} + assert {"B", "D", "F", "I", "H"} == independent_set + + def test_vertex_cover_issue_2384(self): + G = nx.Graph([(0, 3), (1, 3), (1, 4), (2, 3)]) + matching = maximum_matching(G) + vertex_cover = to_vertex_cover(G, matching) + for u, v in G.edges(): + assert u in vertex_cover or v in vertex_cover + + def test_vertex_cover_issue_3306(self): + G = nx.Graph() + edges = [(0, 2), (1, 0), (1, 1), (1, 2), (2, 2)] + G.add_edges_from([((i, "L"), (j, "R")) for i, j in edges]) + + matching = maximum_matching(G) + vertex_cover = to_vertex_cover(G, matching) + for u, v in G.edges(): + assert u in vertex_cover or v in vertex_cover + + def test_unorderable_nodes(self): + a = object() + b = object() + c = object() + d = object() + e = object() + G = nx.Graph([(a, d), (b, d), (b, e), (c, d)]) + matching = maximum_matching(G) + vertex_cover = to_vertex_cover(G, matching) + for u, v in G.edges(): + assert u in vertex_cover or v in vertex_cover + + +def test_eppstein_matching(): + """Test in accordance to issue #1927""" + G = nx.Graph() + G.add_nodes_from(["a", 2, 3, 4], bipartite=0) + G.add_nodes_from([1, "b", "c"], bipartite=1) + G.add_edges_from([("a", 1), ("a", "b"), (2, "b"), (2, "c"), (3, "c"), (4, 1)]) + matching = eppstein_matching(G) + assert len(matching) == len(maximum_matching(G)) + assert all(x in set(matching.keys()) for x in set(matching.values())) + + +class TestMinimumWeightFullMatching: + @classmethod + def setup_class(cls): + pytest.importorskip("scipy") + + def test_minimum_weight_full_matching_incomplete_graph(self): + B = nx.Graph() + B.add_nodes_from([1, 2], bipartite=0) + B.add_nodes_from([3, 4], bipartite=1) + B.add_edge(1, 4, weight=100) + B.add_edge(2, 3, weight=100) + B.add_edge(2, 4, weight=50) + matching = minimum_weight_full_matching(B) + assert matching == {1: 4, 2: 3, 4: 1, 3: 2} + + def test_minimum_weight_full_matching_with_no_full_matching(self): + B = nx.Graph() + B.add_nodes_from([1, 2, 3], bipartite=0) + B.add_nodes_from([4, 5, 6], bipartite=1) + B.add_edge(1, 4, weight=100) + B.add_edge(2, 4, weight=100) + B.add_edge(3, 4, weight=50) + B.add_edge(3, 5, weight=50) + B.add_edge(3, 6, weight=50) + with pytest.raises(ValueError): + minimum_weight_full_matching(B) + + def test_minimum_weight_full_matching_square(self): + G = nx.complete_bipartite_graph(3, 3) + G.add_edge(0, 3, weight=400) + G.add_edge(0, 4, weight=150) + G.add_edge(0, 5, weight=400) + G.add_edge(1, 3, weight=400) + G.add_edge(1, 4, weight=450) + G.add_edge(1, 5, weight=600) + G.add_edge(2, 3, weight=300) + G.add_edge(2, 4, weight=225) + G.add_edge(2, 5, weight=300) + matching = minimum_weight_full_matching(G) + assert matching == {0: 4, 1: 3, 2: 5, 4: 0, 3: 1, 5: 2} + + def test_minimum_weight_full_matching_smaller_left(self): + G = nx.complete_bipartite_graph(3, 4) + G.add_edge(0, 3, weight=400) + G.add_edge(0, 4, weight=150) + G.add_edge(0, 5, weight=400) + G.add_edge(0, 6, weight=1) + G.add_edge(1, 3, weight=400) + G.add_edge(1, 4, weight=450) + G.add_edge(1, 5, weight=600) + G.add_edge(1, 6, weight=2) + G.add_edge(2, 3, weight=300) + G.add_edge(2, 4, weight=225) + G.add_edge(2, 5, weight=290) + G.add_edge(2, 6, weight=3) + matching = minimum_weight_full_matching(G) + assert matching == {0: 4, 1: 6, 2: 5, 4: 0, 5: 2, 6: 1} + + def test_minimum_weight_full_matching_smaller_top_nodes_right(self): + G = nx.complete_bipartite_graph(3, 4) + G.add_edge(0, 3, weight=400) + G.add_edge(0, 4, weight=150) + G.add_edge(0, 5, weight=400) + G.add_edge(0, 6, weight=1) + G.add_edge(1, 3, weight=400) + G.add_edge(1, 4, weight=450) + G.add_edge(1, 5, weight=600) + G.add_edge(1, 6, weight=2) + G.add_edge(2, 3, weight=300) + G.add_edge(2, 4, weight=225) + G.add_edge(2, 5, weight=290) + G.add_edge(2, 6, weight=3) + matching = minimum_weight_full_matching(G, top_nodes=[3, 4, 5, 6]) + assert matching == {0: 4, 1: 6, 2: 5, 4: 0, 5: 2, 6: 1} + + def test_minimum_weight_full_matching_smaller_right(self): + G = nx.complete_bipartite_graph(4, 3) + G.add_edge(0, 4, weight=400) + G.add_edge(0, 5, weight=400) + G.add_edge(0, 6, weight=300) + G.add_edge(1, 4, weight=150) + G.add_edge(1, 5, weight=450) + G.add_edge(1, 6, weight=225) + G.add_edge(2, 4, weight=400) + G.add_edge(2, 5, weight=600) + G.add_edge(2, 6, weight=290) + G.add_edge(3, 4, weight=1) + G.add_edge(3, 5, weight=2) + G.add_edge(3, 6, weight=3) + matching = minimum_weight_full_matching(G) + assert matching == {1: 4, 2: 6, 3: 5, 4: 1, 5: 3, 6: 2} + + def test_minimum_weight_full_matching_negative_weights(self): + G = nx.complete_bipartite_graph(2, 2) + G.add_edge(0, 2, weight=-2) + G.add_edge(0, 3, weight=0.2) + G.add_edge(1, 2, weight=-2) + G.add_edge(1, 3, weight=0.3) + matching = minimum_weight_full_matching(G) + assert matching == {0: 3, 1: 2, 2: 1, 3: 0} + + def test_minimum_weight_full_matching_different_weight_key(self): + G = nx.complete_bipartite_graph(2, 2) + G.add_edge(0, 2, mass=2) + G.add_edge(0, 3, mass=0.2) + G.add_edge(1, 2, mass=1) + G.add_edge(1, 3, mass=2) + matching = minimum_weight_full_matching(G, weight="mass") + assert matching == {0: 3, 1: 2, 2: 1, 3: 0} diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/test_matrix.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/test_matrix.py new file mode 100644 index 0000000..fc8dbc7 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/test_matrix.py @@ -0,0 +1,82 @@ +import pytest + +import networkx as nx +from networkx.algorithms import bipartite +from networkx.utils import edges_equal + +np = pytest.importorskip("numpy") +sp = pytest.importorskip("scipy") + + +class TestBiadjacencyMatrix: + def test_biadjacency_matrix_weight(self): + G = nx.path_graph(5) + G.add_edge(0, 1, weight=2, other=4) + X = [1, 3] + Y = [0, 2, 4] + M = bipartite.biadjacency_matrix(G, X, weight="weight") + assert M[0, 0] == 2 + M = bipartite.biadjacency_matrix(G, X, weight="other") + assert M[0, 0] == 4 + + def test_biadjacency_matrix(self): + tops = [2, 5, 10] + bots = [5, 10, 15] + for i in range(len(tops)): + G = bipartite.random_graph(tops[i], bots[i], 0.2) + top = [n for n, d in G.nodes(data=True) if d["bipartite"] == 0] + M = bipartite.biadjacency_matrix(G, top) + assert M.shape[0] == tops[i] + assert M.shape[1] == bots[i] + + def test_biadjacency_matrix_order(self): + G = nx.path_graph(5) + G.add_edge(0, 1, weight=2) + X = [3, 1] + Y = [4, 2, 0] + M = bipartite.biadjacency_matrix(G, X, Y, weight="weight") + assert M[1, 2] == 2 + + def test_biadjacency_matrix_empty_graph(self): + G = nx.empty_graph(2) + M = nx.bipartite.biadjacency_matrix(G, [0]) + assert np.array_equal(M.toarray(), np.array([[0]])) + + def test_null_graph(self): + with pytest.raises(nx.NetworkXError): + bipartite.biadjacency_matrix(nx.Graph(), []) + + def test_empty_graph(self): + with pytest.raises(nx.NetworkXError): + bipartite.biadjacency_matrix(nx.Graph([(1, 0)]), []) + + def test_duplicate_row(self): + with pytest.raises(nx.NetworkXError): + bipartite.biadjacency_matrix(nx.Graph([(1, 0)]), [1, 1]) + + def test_duplicate_col(self): + with pytest.raises(nx.NetworkXError): + bipartite.biadjacency_matrix(nx.Graph([(1, 0)]), [0], [1, 1]) + + def test_format_keyword(self): + with pytest.raises(nx.NetworkXError): + bipartite.biadjacency_matrix(nx.Graph([(1, 0)]), [0], format="foo") + + def test_from_biadjacency_roundtrip(self): + B1 = nx.path_graph(5) + M = bipartite.biadjacency_matrix(B1, [0, 2, 4]) + B2 = bipartite.from_biadjacency_matrix(M) + assert nx.is_isomorphic(B1, B2) + + def test_from_biadjacency_weight(self): + M = sp.sparse.csc_array([[1, 2], [0, 3]]) + B = bipartite.from_biadjacency_matrix(M) + assert edges_equal(B.edges(), [(0, 2), (0, 3), (1, 3)]) + B = bipartite.from_biadjacency_matrix(M, edge_attribute="weight") + e = [(0, 2, {"weight": 1}), (0, 3, {"weight": 2}), (1, 3, {"weight": 3})] + assert edges_equal(B.edges(data=True), e) + + def test_from_biadjacency_multigraph(self): + M = sp.sparse.csc_array([[1, 2], [0, 3]]) + B = bipartite.from_biadjacency_matrix(M, create_using=nx.MultiGraph()) + assert edges_equal(B.edges(), [(0, 2), (0, 3), (0, 3), (1, 3), (1, 3), (1, 3)]) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/test_project.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/test_project.py new file mode 100644 index 0000000..076bb42 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/test_project.py @@ -0,0 +1,407 @@ +import pytest + +import networkx as nx +from networkx.algorithms import bipartite +from networkx.utils import edges_equal, nodes_equal + + +class TestBipartiteProject: + def test_path_projected_graph(self): + G = nx.path_graph(4) + P = bipartite.projected_graph(G, [1, 3]) + assert nodes_equal(list(P), [1, 3]) + assert edges_equal(list(P.edges()), [(1, 3)]) + P = bipartite.projected_graph(G, [0, 2]) + assert nodes_equal(list(P), [0, 2]) + assert edges_equal(list(P.edges()), [(0, 2)]) + G = nx.MultiGraph([(0, 1)]) + with pytest.raises(nx.NetworkXError, match="not defined for multigraphs"): + bipartite.projected_graph(G, [0]) + + def test_path_projected_properties_graph(self): + G = nx.path_graph(4) + G.add_node(1, name="one") + G.add_node(2, name="two") + P = bipartite.projected_graph(G, [1, 3]) + assert nodes_equal(list(P), [1, 3]) + assert edges_equal(list(P.edges()), [(1, 3)]) + assert P.nodes[1]["name"] == G.nodes[1]["name"] + P = bipartite.projected_graph(G, [0, 2]) + assert nodes_equal(list(P), [0, 2]) + assert edges_equal(list(P.edges()), [(0, 2)]) + assert P.nodes[2]["name"] == G.nodes[2]["name"] + + def test_path_collaboration_projected_graph(self): + G = nx.path_graph(4) + P = bipartite.collaboration_weighted_projected_graph(G, [1, 3]) + assert nodes_equal(list(P), [1, 3]) + assert edges_equal(list(P.edges()), [(1, 3)]) + P[1][3]["weight"] = 1 + P = bipartite.collaboration_weighted_projected_graph(G, [0, 2]) + assert nodes_equal(list(P), [0, 2]) + assert edges_equal(list(P.edges()), [(0, 2)]) + P[0][2]["weight"] = 1 + + def test_directed_path_collaboration_projected_graph(self): + G = nx.DiGraph() + nx.add_path(G, range(4)) + P = bipartite.collaboration_weighted_projected_graph(G, [1, 3]) + assert nodes_equal(list(P), [1, 3]) + assert edges_equal(list(P.edges()), [(1, 3)]) + P[1][3]["weight"] = 1 + P = bipartite.collaboration_weighted_projected_graph(G, [0, 2]) + assert nodes_equal(list(P), [0, 2]) + assert edges_equal(list(P.edges()), [(0, 2)]) + P[0][2]["weight"] = 1 + + def test_path_weighted_projected_graph(self): + G = nx.path_graph(4) + + with pytest.raises(nx.NetworkXAlgorithmError): + bipartite.weighted_projected_graph(G, [1, 2, 3, 3]) + + P = bipartite.weighted_projected_graph(G, [1, 3]) + assert nodes_equal(list(P), [1, 3]) + assert edges_equal(list(P.edges()), [(1, 3)]) + P[1][3]["weight"] = 1 + P = bipartite.weighted_projected_graph(G, [0, 2]) + assert nodes_equal(list(P), [0, 2]) + assert edges_equal(list(P.edges()), [(0, 2)]) + P[0][2]["weight"] = 1 + + def test_digraph_weighted_projection(self): + G = nx.DiGraph([(0, 1), (1, 2), (2, 3), (3, 4)]) + P = bipartite.overlap_weighted_projected_graph(G, [1, 3]) + assert nx.get_edge_attributes(P, "weight") == {(1, 3): 1.0} + assert len(P) == 2 + + def test_path_weighted_projected_directed_graph(self): + G = nx.DiGraph() + nx.add_path(G, range(4)) + P = bipartite.weighted_projected_graph(G, [1, 3]) + assert nodes_equal(list(P), [1, 3]) + assert edges_equal(list(P.edges()), [(1, 3)]) + P[1][3]["weight"] = 1 + P = bipartite.weighted_projected_graph(G, [0, 2]) + assert nodes_equal(list(P), [0, 2]) + assert edges_equal(list(P.edges()), [(0, 2)]) + P[0][2]["weight"] = 1 + + def test_star_projected_graph(self): + G = nx.star_graph(3) + P = bipartite.projected_graph(G, [1, 2, 3]) + assert nodes_equal(list(P), [1, 2, 3]) + assert edges_equal(list(P.edges()), [(1, 2), (1, 3), (2, 3)]) + P = bipartite.weighted_projected_graph(G, [1, 2, 3]) + assert nodes_equal(list(P), [1, 2, 3]) + assert edges_equal(list(P.edges()), [(1, 2), (1, 3), (2, 3)]) + + P = bipartite.projected_graph(G, [0]) + assert nodes_equal(list(P), [0]) + assert edges_equal(list(P.edges()), []) + + def test_project_multigraph(self): + G = nx.Graph() + G.add_edge("a", 1) + G.add_edge("b", 1) + G.add_edge("a", 2) + G.add_edge("b", 2) + P = bipartite.projected_graph(G, "ab") + assert edges_equal(list(P.edges()), [("a", "b")]) + P = bipartite.weighted_projected_graph(G, "ab") + assert edges_equal(list(P.edges()), [("a", "b")]) + P = bipartite.projected_graph(G, "ab", multigraph=True) + assert edges_equal(list(P.edges()), [("a", "b"), ("a", "b")]) + + def test_project_collaboration(self): + G = nx.Graph() + G.add_edge("a", 1) + G.add_edge("b", 1) + G.add_edge("b", 2) + G.add_edge("c", 2) + G.add_edge("c", 3) + G.add_edge("c", 4) + G.add_edge("b", 4) + P = bipartite.collaboration_weighted_projected_graph(G, "abc") + assert P["a"]["b"]["weight"] == 1 + assert P["b"]["c"]["weight"] == 2 + + def test_directed_projection(self): + G = nx.DiGraph() + G.add_edge("A", 1) + G.add_edge(1, "B") + G.add_edge("A", 2) + G.add_edge("B", 2) + P = bipartite.projected_graph(G, "AB") + assert edges_equal(list(P.edges()), [("A", "B")]) + P = bipartite.weighted_projected_graph(G, "AB") + assert edges_equal(list(P.edges()), [("A", "B")]) + assert P["A"]["B"]["weight"] == 1 + + P = bipartite.projected_graph(G, "AB", multigraph=True) + assert edges_equal(list(P.edges()), [("A", "B")]) + + G = nx.DiGraph() + G.add_edge("A", 1) + G.add_edge(1, "B") + G.add_edge("A", 2) + G.add_edge(2, "B") + P = bipartite.projected_graph(G, "AB") + assert edges_equal(list(P.edges()), [("A", "B")]) + P = bipartite.weighted_projected_graph(G, "AB") + assert edges_equal(list(P.edges()), [("A", "B")]) + assert P["A"]["B"]["weight"] == 2 + + P = bipartite.projected_graph(G, "AB", multigraph=True) + assert edges_equal(list(P.edges()), [("A", "B"), ("A", "B")]) + + +class TestBipartiteWeightedProjection: + @classmethod + def setup_class(cls): + # Tore Opsahl's example + # http://toreopsahl.com/2009/05/01/projecting-two-mode-networks-onto-weighted-one-mode-networks/ + cls.G = nx.Graph() + cls.G.add_edge("A", 1) + cls.G.add_edge("A", 2) + cls.G.add_edge("B", 1) + cls.G.add_edge("B", 2) + cls.G.add_edge("B", 3) + cls.G.add_edge("B", 4) + cls.G.add_edge("B", 5) + cls.G.add_edge("C", 1) + cls.G.add_edge("D", 3) + cls.G.add_edge("E", 4) + cls.G.add_edge("E", 5) + cls.G.add_edge("E", 6) + cls.G.add_edge("F", 6) + # Graph based on figure 6 from Newman (2001) + cls.N = nx.Graph() + cls.N.add_edge("A", 1) + cls.N.add_edge("A", 2) + cls.N.add_edge("A", 3) + cls.N.add_edge("B", 1) + cls.N.add_edge("B", 2) + cls.N.add_edge("B", 3) + cls.N.add_edge("C", 1) + cls.N.add_edge("D", 1) + cls.N.add_edge("E", 3) + + def test_project_weighted_shared(self): + edges = [ + ("A", "B", 2), + ("A", "C", 1), + ("B", "C", 1), + ("B", "D", 1), + ("B", "E", 2), + ("E", "F", 1), + ] + Panswer = nx.Graph() + Panswer.add_weighted_edges_from(edges) + P = bipartite.weighted_projected_graph(self.G, "ABCDEF") + assert edges_equal(list(P.edges()), Panswer.edges()) + for u, v in list(P.edges()): + assert P[u][v]["weight"] == Panswer[u][v]["weight"] + + edges = [ + ("A", "B", 3), + ("A", "E", 1), + ("A", "C", 1), + ("A", "D", 1), + ("B", "E", 1), + ("B", "C", 1), + ("B", "D", 1), + ("C", "D", 1), + ] + Panswer = nx.Graph() + Panswer.add_weighted_edges_from(edges) + P = bipartite.weighted_projected_graph(self.N, "ABCDE") + assert edges_equal(list(P.edges()), Panswer.edges()) + for u, v in list(P.edges()): + assert P[u][v]["weight"] == Panswer[u][v]["weight"] + + def test_project_weighted_newman(self): + edges = [ + ("A", "B", 1.5), + ("A", "C", 0.5), + ("B", "C", 0.5), + ("B", "D", 1), + ("B", "E", 2), + ("E", "F", 1), + ] + Panswer = nx.Graph() + Panswer.add_weighted_edges_from(edges) + P = bipartite.collaboration_weighted_projected_graph(self.G, "ABCDEF") + assert edges_equal(list(P.edges()), Panswer.edges()) + for u, v in list(P.edges()): + assert P[u][v]["weight"] == Panswer[u][v]["weight"] + + edges = [ + ("A", "B", 11 / 6.0), + ("A", "E", 1 / 2.0), + ("A", "C", 1 / 3.0), + ("A", "D", 1 / 3.0), + ("B", "E", 1 / 2.0), + ("B", "C", 1 / 3.0), + ("B", "D", 1 / 3.0), + ("C", "D", 1 / 3.0), + ] + Panswer = nx.Graph() + Panswer.add_weighted_edges_from(edges) + P = bipartite.collaboration_weighted_projected_graph(self.N, "ABCDE") + assert edges_equal(list(P.edges()), Panswer.edges()) + for u, v in list(P.edges()): + assert P[u][v]["weight"] == Panswer[u][v]["weight"] + + def test_project_weighted_ratio(self): + edges = [ + ("A", "B", 2 / 6.0), + ("A", "C", 1 / 6.0), + ("B", "C", 1 / 6.0), + ("B", "D", 1 / 6.0), + ("B", "E", 2 / 6.0), + ("E", "F", 1 / 6.0), + ] + Panswer = nx.Graph() + Panswer.add_weighted_edges_from(edges) + P = bipartite.weighted_projected_graph(self.G, "ABCDEF", ratio=True) + assert edges_equal(list(P.edges()), Panswer.edges()) + for u, v in list(P.edges()): + assert P[u][v]["weight"] == Panswer[u][v]["weight"] + + edges = [ + ("A", "B", 3 / 3.0), + ("A", "E", 1 / 3.0), + ("A", "C", 1 / 3.0), + ("A", "D", 1 / 3.0), + ("B", "E", 1 / 3.0), + ("B", "C", 1 / 3.0), + ("B", "D", 1 / 3.0), + ("C", "D", 1 / 3.0), + ] + Panswer = nx.Graph() + Panswer.add_weighted_edges_from(edges) + P = bipartite.weighted_projected_graph(self.N, "ABCDE", ratio=True) + assert edges_equal(list(P.edges()), Panswer.edges()) + for u, v in list(P.edges()): + assert P[u][v]["weight"] == Panswer[u][v]["weight"] + + def test_project_weighted_overlap(self): + edges = [ + ("A", "B", 2 / 2.0), + ("A", "C", 1 / 1.0), + ("B", "C", 1 / 1.0), + ("B", "D", 1 / 1.0), + ("B", "E", 2 / 3.0), + ("E", "F", 1 / 1.0), + ] + Panswer = nx.Graph() + Panswer.add_weighted_edges_from(edges) + P = bipartite.overlap_weighted_projected_graph(self.G, "ABCDEF", jaccard=False) + assert edges_equal(list(P.edges()), Panswer.edges()) + for u, v in list(P.edges()): + assert P[u][v]["weight"] == Panswer[u][v]["weight"] + + edges = [ + ("A", "B", 3 / 3.0), + ("A", "E", 1 / 1.0), + ("A", "C", 1 / 1.0), + ("A", "D", 1 / 1.0), + ("B", "E", 1 / 1.0), + ("B", "C", 1 / 1.0), + ("B", "D", 1 / 1.0), + ("C", "D", 1 / 1.0), + ] + Panswer = nx.Graph() + Panswer.add_weighted_edges_from(edges) + P = bipartite.overlap_weighted_projected_graph(self.N, "ABCDE", jaccard=False) + assert edges_equal(list(P.edges()), Panswer.edges()) + for u, v in list(P.edges()): + assert P[u][v]["weight"] == Panswer[u][v]["weight"] + + def test_project_weighted_jaccard(self): + edges = [ + ("A", "B", 2 / 5.0), + ("A", "C", 1 / 2.0), + ("B", "C", 1 / 5.0), + ("B", "D", 1 / 5.0), + ("B", "E", 2 / 6.0), + ("E", "F", 1 / 3.0), + ] + Panswer = nx.Graph() + Panswer.add_weighted_edges_from(edges) + P = bipartite.overlap_weighted_projected_graph(self.G, "ABCDEF") + assert edges_equal(list(P.edges()), Panswer.edges()) + for u, v in list(P.edges()): + assert P[u][v]["weight"] == Panswer[u][v]["weight"] + + edges = [ + ("A", "B", 3 / 3.0), + ("A", "E", 1 / 3.0), + ("A", "C", 1 / 3.0), + ("A", "D", 1 / 3.0), + ("B", "E", 1 / 3.0), + ("B", "C", 1 / 3.0), + ("B", "D", 1 / 3.0), + ("C", "D", 1 / 1.0), + ] + Panswer = nx.Graph() + Panswer.add_weighted_edges_from(edges) + P = bipartite.overlap_weighted_projected_graph(self.N, "ABCDE") + assert edges_equal(list(P.edges()), Panswer.edges()) + for u, v in P.edges(): + assert P[u][v]["weight"] == Panswer[u][v]["weight"] + + def test_generic_weighted_projected_graph_simple(self): + def shared(G, u, v): + return len(set(G[u]) & set(G[v])) + + B = nx.path_graph(5) + G = bipartite.generic_weighted_projected_graph( + B, [0, 2, 4], weight_function=shared + ) + assert nodes_equal(list(G), [0, 2, 4]) + assert edges_equal( + list(G.edges(data=True)), + [(0, 2, {"weight": 1}), (2, 4, {"weight": 1})], + ) + + G = bipartite.generic_weighted_projected_graph(B, [0, 2, 4]) + assert nodes_equal(list(G), [0, 2, 4]) + assert edges_equal( + list(G.edges(data=True)), + [(0, 2, {"weight": 1}), (2, 4, {"weight": 1})], + ) + B = nx.DiGraph() + nx.add_path(B, range(5)) + G = bipartite.generic_weighted_projected_graph(B, [0, 2, 4]) + assert nodes_equal(list(G), [0, 2, 4]) + assert edges_equal( + list(G.edges(data=True)), [(0, 2, {"weight": 1}), (2, 4, {"weight": 1})] + ) + + def test_generic_weighted_projected_graph_custom(self): + def jaccard(G, u, v): + unbrs = set(G[u]) + vnbrs = set(G[v]) + return len(unbrs & vnbrs) / len(unbrs | vnbrs) + + def my_weight(G, u, v, weight="weight"): + w = 0 + for nbr in set(G[u]) & set(G[v]): + w += G.edges[u, nbr].get(weight, 1) + G.edges[v, nbr].get(weight, 1) + return w + + B = nx.bipartite.complete_bipartite_graph(2, 2) + for i, (u, v) in enumerate(B.edges()): + B.edges[u, v]["weight"] = i + 1 + G = bipartite.generic_weighted_projected_graph( + B, [0, 1], weight_function=jaccard + ) + assert edges_equal(list(G.edges(data=True)), [(0, 1, {"weight": 1.0})]) + G = bipartite.generic_weighted_projected_graph( + B, [0, 1], weight_function=my_weight + ) + assert edges_equal(list(G.edges(data=True)), [(0, 1, {"weight": 10})]) + G = bipartite.generic_weighted_projected_graph(B, [0, 1]) + assert edges_equal(list(G.edges(data=True)), [(0, 1, {"weight": 2})]) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/test_redundancy.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/test_redundancy.py new file mode 100644 index 0000000..8d979db --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/test_redundancy.py @@ -0,0 +1,35 @@ +"""Unit tests for the :mod:`networkx.algorithms.bipartite.redundancy` module.""" + +import pytest + +from networkx import NetworkXError, cycle_graph +from networkx.algorithms.bipartite import complete_bipartite_graph, node_redundancy + + +def test_no_redundant_nodes(): + G = complete_bipartite_graph(2, 2) + + # when nodes is None + rc = node_redundancy(G) + assert all(redundancy == 1 for redundancy in rc.values()) + + # when set of nodes is specified + rc = node_redundancy(G, (2, 3)) + assert rc == {2: 1.0, 3: 1.0} + + +def test_redundant_nodes(): + G = cycle_graph(6) + edge = {0, 3} + G.add_edge(*edge) + redundancy = node_redundancy(G) + for v in edge: + assert redundancy[v] == 2 / 3 + for v in set(G) - edge: + assert redundancy[v] == 1 + + +def test_not_enough_neighbors(): + with pytest.raises(NetworkXError): + G = complete_bipartite_graph(1, 2) + node_redundancy(G) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/test_spectral_bipartivity.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/test_spectral_bipartivity.py new file mode 100644 index 0000000..b940649 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/bipartite/tests/test_spectral_bipartivity.py @@ -0,0 +1,80 @@ +import pytest + +pytest.importorskip("scipy") + +import networkx as nx +from networkx.algorithms.bipartite import spectral_bipartivity as sb + +# Examples from Figure 1 +# E. Estrada and J. A. Rodríguez-Velázquez, "Spectral measures of +# bipartivity in complex networks", PhysRev E 72, 046105 (2005) + + +class TestSpectralBipartivity: + def test_star_like(self): + # star-like + + G = nx.star_graph(2) + G.add_edge(1, 2) + assert sb(G) == pytest.approx(0.843, abs=1e-3) + + G = nx.star_graph(3) + G.add_edge(1, 2) + assert sb(G) == pytest.approx(0.871, abs=1e-3) + + G = nx.star_graph(4) + G.add_edge(1, 2) + assert sb(G) == pytest.approx(0.890, abs=1e-3) + + def test_k23_like(self): + # K2,3-like + G = nx.complete_bipartite_graph(2, 3) + G.add_edge(0, 1) + assert sb(G) == pytest.approx(0.769, abs=1e-3) + + G = nx.complete_bipartite_graph(2, 3) + G.add_edge(2, 4) + assert sb(G) == pytest.approx(0.829, abs=1e-3) + + G = nx.complete_bipartite_graph(2, 3) + G.add_edge(2, 4) + G.add_edge(3, 4) + assert sb(G) == pytest.approx(0.731, abs=1e-3) + + G = nx.complete_bipartite_graph(2, 3) + G.add_edge(0, 1) + G.add_edge(2, 4) + assert sb(G) == pytest.approx(0.692, abs=1e-3) + + G = nx.complete_bipartite_graph(2, 3) + G.add_edge(2, 4) + G.add_edge(3, 4) + G.add_edge(0, 1) + assert sb(G) == pytest.approx(0.645, abs=1e-3) + + G = nx.complete_bipartite_graph(2, 3) + G.add_edge(2, 4) + G.add_edge(3, 4) + G.add_edge(2, 3) + assert sb(G) == pytest.approx(0.645, abs=1e-3) + + G = nx.complete_bipartite_graph(2, 3) + G.add_edge(2, 4) + G.add_edge(3, 4) + G.add_edge(2, 3) + G.add_edge(0, 1) + assert sb(G) == pytest.approx(0.597, abs=1e-3) + + def test_single_nodes(self): + # single nodes + G = nx.complete_bipartite_graph(2, 3) + G.add_edge(2, 4) + sbn = sb(G, nodes=[1, 2]) + assert sbn[1] == pytest.approx(0.85, abs=1e-2) + assert sbn[2] == pytest.approx(0.77, abs=1e-2) + + G = nx.complete_bipartite_graph(2, 3) + G.add_edge(0, 1) + sbn = sb(G, nodes=[1, 2]) + assert sbn[1] == pytest.approx(0.73, abs=1e-2) + assert sbn[2] == pytest.approx(0.82, abs=1e-2) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/boundary.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/boundary.py new file mode 100644 index 0000000..ba05d80 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/boundary.py @@ -0,0 +1,168 @@ +"""Routines to find the boundary of a set of nodes. + +An edge boundary is a set of edges, each of which has exactly one +endpoint in a given set of nodes (or, in the case of directed graphs, +the set of edges whose source node is in the set). + +A node boundary of a set *S* of nodes is the set of (out-)neighbors of +nodes in *S* that are outside *S*. + +""" + +from itertools import chain + +import networkx as nx + +__all__ = ["edge_boundary", "node_boundary"] + + +@nx._dispatchable(edge_attrs={"data": "default"}, preserve_edge_attrs="data") +def edge_boundary(G, nbunch1, nbunch2=None, data=False, keys=False, default=None): + """Returns the edge boundary of `nbunch1`. + + The *edge boundary* of a set *S* with respect to a set *T* is the + set of edges (*u*, *v*) such that *u* is in *S* and *v* is in *T*. + If *T* is not specified, it is assumed to be the set of all nodes + not in *S*. + + Parameters + ---------- + G : NetworkX graph + + nbunch1 : iterable + Iterable of nodes in the graph representing the set of nodes + whose edge boundary will be returned. (This is the set *S* from + the definition above.) + + nbunch2 : iterable + Iterable of nodes representing the target (or "exterior") set of + nodes. (This is the set *T* from the definition above.) If not + specified, this is assumed to be the set of all nodes in `G` + not in `nbunch1`. + + keys : bool + This parameter has the same meaning as in + :meth:`MultiGraph.edges`. + + data : bool or object + This parameter has the same meaning as in + :meth:`MultiGraph.edges`. + + default : object + This parameter has the same meaning as in + :meth:`MultiGraph.edges`. + + Returns + ------- + iterator + An iterator over the edges in the boundary of `nbunch1` with + respect to `nbunch2`. If `keys`, `data`, or `default` + are specified and `G` is a multigraph, then edges are returned + with keys and/or data, as in :meth:`MultiGraph.edges`. + + Examples + -------- + >>> G = nx.wheel_graph(6) + + When nbunch2=None: + + >>> list(nx.edge_boundary(G, (1, 3))) + [(1, 0), (1, 2), (1, 5), (3, 0), (3, 2), (3, 4)] + + When nbunch2 is given: + + >>> list(nx.edge_boundary(G, (1, 3), (2, 0))) + [(1, 0), (1, 2), (3, 0), (3, 2)] + + Notes + ----- + Any element of `nbunch` that is not in the graph `G` will be + ignored. + + `nbunch1` and `nbunch2` are usually meant to be disjoint, but in + the interest of speed and generality, that is not required here. + + """ + nset1 = {n for n in nbunch1 if n in G} + # Here we create an iterator over edges incident to nodes in the set + # `nset1`. The `Graph.edges()` method does not provide a guarantee + # on the orientation of the edges, so our algorithm below must + # handle the case in which exactly one orientation, either (u, v) or + # (v, u), appears in this iterable. + if G.is_multigraph(): + edges = G.edges(nset1, data=data, keys=keys, default=default) + else: + edges = G.edges(nset1, data=data, default=default) + # If `nbunch2` is not provided, then it is assumed to be the set + # complement of `nbunch1`. For the sake of efficiency, this is + # implemented by using the `not in` operator, instead of by creating + # an additional set and using the `in` operator. + if nbunch2 is None: + return (e for e in edges if (e[0] in nset1) ^ (e[1] in nset1)) + nset2 = set(nbunch2) + return ( + e + for e in edges + if (e[0] in nset1 and e[1] in nset2) or (e[1] in nset1 and e[0] in nset2) + ) + + +@nx._dispatchable +def node_boundary(G, nbunch1, nbunch2=None): + """Returns the node boundary of `nbunch1`. + + The *node boundary* of a set *S* with respect to a set *T* is the + set of nodes *v* in *T* such that for some *u* in *S*, there is an + edge joining *u* to *v*. If *T* is not specified, it is assumed to + be the set of all nodes not in *S*. + + Parameters + ---------- + G : NetworkX graph + + nbunch1 : iterable + Iterable of nodes in the graph representing the set of nodes + whose node boundary will be returned. (This is the set *S* from + the definition above.) + + nbunch2 : iterable + Iterable of nodes representing the target (or "exterior") set of + nodes. (This is the set *T* from the definition above.) If not + specified, this is assumed to be the set of all nodes in `G` + not in `nbunch1`. + + Returns + ------- + set + The node boundary of `nbunch1` with respect to `nbunch2`. + + Examples + -------- + >>> G = nx.wheel_graph(6) + + When nbunch2=None: + + >>> list(nx.node_boundary(G, (3, 4))) + [0, 2, 5] + + When nbunch2 is given: + + >>> list(nx.node_boundary(G, (3, 4), (0, 1, 5))) + [0, 5] + + Notes + ----- + Any element of `nbunch` that is not in the graph `G` will be + ignored. + + `nbunch1` and `nbunch2` are usually meant to be disjoint, but in + the interest of speed and generality, that is not required here. + + """ + nset1 = {n for n in nbunch1 if n in G} + bdy = set(chain.from_iterable(G[v] for v in nset1)) - nset1 + # If `nbunch2` is not specified, it is assumed to be the set + # complement of `nbunch1`. + if nbunch2 is not None: + bdy &= set(nbunch2) + return bdy diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/bridges.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/bridges.py new file mode 100644 index 0000000..eaa6fd3 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/bridges.py @@ -0,0 +1,205 @@ +"""Bridge-finding algorithms.""" + +from itertools import chain + +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = ["bridges", "has_bridges", "local_bridges"] + + +@not_implemented_for("directed") +@nx._dispatchable +def bridges(G, root=None): + """Generate all bridges in a graph. + + A *bridge* in a graph is an edge whose removal causes the number of + connected components of the graph to increase. Equivalently, a bridge is an + edge that does not belong to any cycle. Bridges are also known as cut-edges, + isthmuses, or cut arcs. + + Parameters + ---------- + G : undirected graph + + root : node (optional) + A node in the graph `G`. If specified, only the bridges in the + connected component containing this node will be returned. + + Yields + ------ + e : edge + An edge in the graph whose removal disconnects the graph (or + causes the number of connected components to increase). + + Raises + ------ + NodeNotFound + If `root` is not in the graph `G`. + + NetworkXNotImplemented + If `G` is a directed graph. + + Examples + -------- + The barbell graph with parameter zero has a single bridge: + + >>> G = nx.barbell_graph(10, 0) + >>> list(nx.bridges(G)) + [(9, 10)] + + Notes + ----- + This is an implementation of the algorithm described in [1]_. An edge is a + bridge if and only if it is not contained in any chain. Chains are found + using the :func:`networkx.chain_decomposition` function. + + The algorithm described in [1]_ requires a simple graph. If the provided + graph is a multigraph, we convert it to a simple graph and verify that any + bridges discovered by the chain decomposition algorithm are not multi-edges. + + Ignoring polylogarithmic factors, the worst-case time complexity is the + same as the :func:`networkx.chain_decomposition` function, + $O(m + n)$, where $n$ is the number of nodes in the graph and $m$ is + the number of edges. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Bridge_%28graph_theory%29#Bridge-Finding_with_Chain_Decompositions + """ + multigraph = G.is_multigraph() + H = nx.Graph(G) if multigraph else G + chains = nx.chain_decomposition(H, root=root) + chain_edges = set(chain.from_iterable(chains)) + if root is not None: + H = H.subgraph(nx.node_connected_component(H, root)).copy() + for u, v in H.edges(): + if (u, v) not in chain_edges and (v, u) not in chain_edges: + if multigraph and len(G[u][v]) > 1: + continue + yield u, v + + +@not_implemented_for("directed") +@nx._dispatchable +def has_bridges(G, root=None): + """Decide whether a graph has any bridges. + + A *bridge* in a graph is an edge whose removal causes the number of + connected components of the graph to increase. + + Parameters + ---------- + G : undirected graph + + root : node (optional) + A node in the graph `G`. If specified, only the bridges in the + connected component containing this node will be considered. + + Returns + ------- + bool + Whether the graph (or the connected component containing `root`) + has any bridges. + + Raises + ------ + NodeNotFound + If `root` is not in the graph `G`. + + NetworkXNotImplemented + If `G` is a directed graph. + + Examples + -------- + The barbell graph with parameter zero has a single bridge:: + + >>> G = nx.barbell_graph(10, 0) + >>> nx.has_bridges(G) + True + + On the other hand, the cycle graph has no bridges:: + + >>> G = nx.cycle_graph(5) + >>> nx.has_bridges(G) + False + + Notes + ----- + This implementation uses the :func:`networkx.bridges` function, so + it shares its worst-case time complexity, $O(m + n)$, ignoring + polylogarithmic factors, where $n$ is the number of nodes in the + graph and $m$ is the number of edges. + + """ + try: + next(bridges(G, root=root)) + except StopIteration: + return False + else: + return True + + +@not_implemented_for("multigraph") +@not_implemented_for("directed") +@nx._dispatchable(edge_attrs="weight") +def local_bridges(G, with_span=True, weight=None): + """Iterate over local bridges of `G` optionally computing the span + + A *local bridge* is an edge whose endpoints have no common neighbors. + That is, the edge is not part of a triangle in the graph. + + The *span* of a *local bridge* is the shortest path length between + the endpoints if the local bridge is removed. + + Parameters + ---------- + G : undirected graph + + with_span : bool + If True, yield a 3-tuple `(u, v, span)` + + weight : function, string or None (default: None) + If function, used to compute edge weights for the span. + If string, the edge data attribute used in calculating span. + If None, all edges have weight 1. + + Yields + ------ + e : edge + The local bridges as an edge 2-tuple of nodes `(u, v)` or + as a 3-tuple `(u, v, span)` when `with_span is True`. + + Raises + ------ + NetworkXNotImplemented + If `G` is a directed graph or multigraph. + + Examples + -------- + A cycle graph has every edge a local bridge with span N-1. + + >>> G = nx.cycle_graph(9) + >>> (0, 8, 8) in set(nx.local_bridges(G)) + True + """ + if with_span is not True: + for u, v in G.edges: + if not (set(G[u]) & set(G[v])): + yield u, v + else: + wt = nx.weighted._weight_function(G, weight) + for u, v in G.edges: + if not (set(G[u]) & set(G[v])): + enodes = {u, v} + + def hide_edge(n, nbr, d): + if n not in enodes or nbr not in enodes: + return wt(n, nbr, d) + return None + + try: + span = nx.shortest_path_length(G, u, v, weight=hide_edge) + yield u, v, span + except nx.NetworkXNoPath: + yield u, v, float("inf") diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/broadcasting.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/broadcasting.py new file mode 100644 index 0000000..db8553b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/broadcasting.py @@ -0,0 +1,155 @@ +"""Routines to calculate the broadcast time of certain graphs. + +Broadcasting is an information dissemination problem in which a node in a graph, +called the originator, must distribute a message to all other nodes by placing +a series of calls along the edges of the graph. Once informed, other nodes aid +the originator in distributing the message. + +The broadcasting must be completed as quickly as possible subject to the +following constraints: +- Each call requires one unit of time. +- A node can only participate in one call per unit of time. +- Each call only involves two adjacent nodes: a sender and a receiver. +""" + +import networkx as nx +from networkx import NetworkXError +from networkx.utils import not_implemented_for + +__all__ = [ + "tree_broadcast_center", + "tree_broadcast_time", +] + + +def _get_max_broadcast_value(G, U, v, values): + adj = sorted(set(G.neighbors(v)) & U, key=values.get, reverse=True) + return max(values[u] + i for i, u in enumerate(adj, start=1)) + + +def _get_broadcast_centers(G, v, values, target): + adj = sorted(G.neighbors(v), key=values.get, reverse=True) + j = next(i for i, u in enumerate(adj, start=1) if values[u] + i == target) + return set([v] + adj[:j]) + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatchable +def tree_broadcast_center(G): + """Return the Broadcast Center of the tree `G`. + + The broadcast center of a graph G denotes the set of nodes having + minimum broadcast time [1]_. This is a linear algorithm for determining + the broadcast center of a tree with ``N`` nodes, as a by-product it also + determines the broadcast time from the broadcast center. + + Parameters + ---------- + G : undirected graph + The graph should be an undirected tree + + Returns + ------- + BC : (int, set) tuple + minimum broadcast number of the tree, set of broadcast centers + + Raises + ------ + NetworkXNotImplemented + If the graph is directed or is a multigraph. + + References + ---------- + .. [1] Slater, P.J., Cockayne, E.J., Hedetniemi, S.T, + Information dissemination in trees. SIAM J.Comput. 10(4), 692–701 (1981) + """ + # Assert that the graph G is a tree + if not nx.is_tree(G): + NetworkXError("Input graph is not a tree") + # step 0 + if G.number_of_nodes() == 2: + return 1, set(G.nodes()) + if G.number_of_nodes() == 1: + return 0, set(G.nodes()) + + # step 1 + U = {node for node, deg in G.degree if deg == 1} + values = dict.fromkeys(U, 0) + T = G.copy() + T.remove_nodes_from(U) + + # step 2 + W = {node for node, deg in T.degree if deg == 1} + values.update((w, G.degree[w] - 1) for w in W) + + # step 3 + while T.number_of_nodes() >= 2: + # step 4 + w = min(W, key=lambda n: values[n]) + v = next(T.neighbors(w)) + + # step 5 + U.add(w) + W.remove(w) + T.remove_node(w) + + # step 6 + if T.degree(v) == 1: + # update t(v) + values.update({v: _get_max_broadcast_value(G, U, v, values)}) + W.add(v) + + # step 7 + v = nx.utils.arbitrary_element(T) + b_T = _get_max_broadcast_value(G, U, v, values) + return b_T, _get_broadcast_centers(G, v, values, b_T) + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatchable +def tree_broadcast_time(G, node=None): + """Return the Broadcast Time of the tree `G`. + + The minimum broadcast time of a node is defined as the minimum amount + of time required to complete broadcasting starting from the + originator. The broadcast time of a graph is the maximum over + all nodes of the minimum broadcast time from that node [1]_. + This function returns the minimum broadcast time of `node`. + If `node` is None the broadcast time for the graph is returned. + + Parameters + ---------- + G : undirected graph + The graph should be an undirected tree + node: int, optional + index of starting node. If `None`, the algorithm returns the broadcast + time of the tree. + + Returns + ------- + BT : int + Broadcast Time of a node in a tree + + Raises + ------ + NetworkXNotImplemented + If the graph is directed or is a multigraph. + + References + ---------- + .. [1] Harutyunyan, H. A. and Li, Z. + "A Simple Construction of Broadcast Graphs." + In Computing and Combinatorics. COCOON 2019 + (Ed. D. Z. Du and C. Tian.) Springer, pp. 240-253, 2019. + """ + b_T, b_C = tree_broadcast_center(G) + if node is not None: + return b_T + min(nx.shortest_path_length(G, node, u) for u in b_C) + dist_from_center = dict.fromkeys(G, len(G)) + for u in b_C: + for v, dist in nx.shortest_path_length(G, u).items(): + if dist < dist_from_center[v]: + dist_from_center[v] = dist + return b_T + max(dist_from_center.values()) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__init__.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__init__.py new file mode 100644 index 0000000..c91a904 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__init__.py @@ -0,0 +1,20 @@ +from .betweenness import * +from .betweenness_subset import * +from .closeness import * +from .current_flow_betweenness import * +from .current_flow_betweenness_subset import * +from .current_flow_closeness import * +from .degree_alg import * +from .dispersion import * +from .eigenvector import * +from .group import * +from .harmonic import * +from .katz import * +from .load import * +from .percolation import * +from .reaching import * +from .second_order import * +from .subgraph_alg import * +from .trophic import * +from .voterank_alg import * +from .laplacian import * diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..1cffea1 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__pycache__/betweenness.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__pycache__/betweenness.cpython-312.pyc new file mode 100644 index 0000000..e366a71 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__pycache__/betweenness.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__pycache__/betweenness_subset.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__pycache__/betweenness_subset.cpython-312.pyc new file mode 100644 index 0000000..53326ad Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__pycache__/betweenness_subset.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__pycache__/closeness.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__pycache__/closeness.cpython-312.pyc new file mode 100644 index 0000000..74e31e5 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__pycache__/closeness.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__pycache__/current_flow_betweenness.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__pycache__/current_flow_betweenness.cpython-312.pyc new file mode 100644 index 0000000..0f5d33b Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__pycache__/current_flow_betweenness.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__pycache__/current_flow_betweenness_subset.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__pycache__/current_flow_betweenness_subset.cpython-312.pyc new file mode 100644 index 0000000..8665d30 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__pycache__/current_flow_betweenness_subset.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__pycache__/current_flow_closeness.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__pycache__/current_flow_closeness.cpython-312.pyc new file mode 100644 index 0000000..af8691f Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__pycache__/current_flow_closeness.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__pycache__/degree_alg.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__pycache__/degree_alg.cpython-312.pyc new file mode 100644 index 0000000..d88df27 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__pycache__/degree_alg.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__pycache__/dispersion.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__pycache__/dispersion.cpython-312.pyc new file mode 100644 index 0000000..24d876e Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__pycache__/dispersion.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__pycache__/eigenvector.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__pycache__/eigenvector.cpython-312.pyc new file mode 100644 index 0000000..aff74c6 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__pycache__/eigenvector.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__pycache__/flow_matrix.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__pycache__/flow_matrix.cpython-312.pyc new file mode 100644 index 0000000..90ea07e Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__pycache__/flow_matrix.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__pycache__/group.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__pycache__/group.cpython-312.pyc new file mode 100644 index 0000000..c83a821 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__pycache__/group.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__pycache__/harmonic.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__pycache__/harmonic.cpython-312.pyc new file mode 100644 index 0000000..dc9ba66 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__pycache__/harmonic.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__pycache__/katz.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__pycache__/katz.cpython-312.pyc new file mode 100644 index 0000000..98c57b2 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__pycache__/katz.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__pycache__/laplacian.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__pycache__/laplacian.cpython-312.pyc new file mode 100644 index 0000000..47c112a Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__pycache__/laplacian.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__pycache__/load.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__pycache__/load.cpython-312.pyc new file mode 100644 index 0000000..3f9cca9 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__pycache__/load.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__pycache__/percolation.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__pycache__/percolation.cpython-312.pyc new file mode 100644 index 0000000..979d20f Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__pycache__/percolation.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__pycache__/reaching.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__pycache__/reaching.cpython-312.pyc new file mode 100644 index 0000000..669353c Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__pycache__/reaching.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__pycache__/second_order.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__pycache__/second_order.cpython-312.pyc new file mode 100644 index 0000000..a031846 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__pycache__/second_order.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__pycache__/subgraph_alg.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__pycache__/subgraph_alg.cpython-312.pyc new file mode 100644 index 0000000..1e81b6a Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__pycache__/subgraph_alg.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__pycache__/trophic.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__pycache__/trophic.cpython-312.pyc new file mode 100644 index 0000000..8998922 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__pycache__/trophic.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__pycache__/voterank_alg.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__pycache__/voterank_alg.cpython-312.pyc new file mode 100644 index 0000000..09ce940 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__pycache__/voterank_alg.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/betweenness.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/betweenness.py new file mode 100644 index 0000000..d945a55 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/betweenness.py @@ -0,0 +1,469 @@ +"""Betweenness centrality measures.""" + +import math +from collections import deque +from heapq import heappop, heappush +from itertools import count + +import networkx as nx +from networkx.algorithms.shortest_paths.weighted import _weight_function +from networkx.utils import py_random_state +from networkx.utils.decorators import not_implemented_for + +__all__ = ["betweenness_centrality", "edge_betweenness_centrality"] + + +@py_random_state(5) +@nx._dispatchable(edge_attrs="weight") +def betweenness_centrality( + G, k=None, normalized=True, weight=None, endpoints=False, seed=None +): + r"""Compute the shortest-path betweenness centrality for nodes. + + Betweenness centrality of a node $v$ is the sum of the + fraction of all-pairs shortest paths that pass through $v$ + + .. math:: + + c_B(v) =\sum_{s,t \in V} \frac{\sigma(s, t|v)}{\sigma(s, t)} + + where $V$ is the set of nodes, $\sigma(s, t)$ is the number of + shortest $(s, t)$-paths, and $\sigma(s, t|v)$ is the number of + those paths passing through some node $v$ other than $s, t$. + If $s = t$, $\sigma(s, t) = 1$, and if $v \in {s, t}$, + $\sigma(s, t|v) = 0$ [2]_. + + Parameters + ---------- + G : graph + A NetworkX graph. + + k : int, optional (default=None) + If k is not None use k node samples to estimate betweenness. + The value of k <= n where n is the number of nodes in the graph. + Higher values give better approximation. + + normalized : bool, optional + If True the betweenness values are normalized by `2/((n-1)(n-2))` + for graphs, and `1/((n-1)(n-2))` for directed graphs where `n` + is the number of nodes in G. + + weight : None or string, optional (default=None) + If None, all edge weights are considered equal. + Otherwise holds the name of the edge attribute used as weight. + Weights are used to calculate weighted shortest paths, so they are + interpreted as distances. + + endpoints : bool, optional + If True include the endpoints in the shortest path counts. + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + Note that this is only used if k is not None. + + Returns + ------- + nodes : dictionary + Dictionary of nodes with betweenness centrality as the value. + + See Also + -------- + edge_betweenness_centrality + load_centrality + + Notes + ----- + The algorithm is from Ulrik Brandes [1]_. + See [4]_ for the original first published version and [2]_ for details on + algorithms for variations and related metrics. + + For approximate betweenness calculations set k=#samples to use + k nodes ("pivots") to estimate the betweenness values. For an estimate + of the number of pivots needed see [3]_. + + For weighted graphs the edge weights must be greater than zero. + Zero edge weights can produce an infinite number of equal length + paths between pairs of nodes. + + The total number of paths between source and target is counted + differently for directed and undirected graphs. Directed paths + are easy to count. Undirected paths are tricky: should a path + from "u" to "v" count as 1 undirected path or as 2 directed paths? + + For betweenness_centrality we report the number of undirected + paths when G is undirected. + + For betweenness_centrality_subset the reporting is different. + If the source and target subsets are the same, then we want + to count undirected paths. But if the source and target subsets + differ -- for example, if sources is {0} and targets is {1}, + then we are only counting the paths in one direction. They are + undirected paths but we are counting them in a directed way. + To count them as undirected paths, each should count as half a path. + + This algorithm is not guaranteed to be correct if edge weights + are floating point numbers. As a workaround you can use integer + numbers by multiplying the relevant edge attributes by a convenient + constant factor (eg 100) and converting to integers. + + References + ---------- + .. [1] Ulrik Brandes: + A Faster Algorithm for Betweenness Centrality. + Journal of Mathematical Sociology 25(2):163-177, 2001. + https://doi.org/10.1080/0022250X.2001.9990249 + .. [2] Ulrik Brandes: + On Variants of Shortest-Path Betweenness + Centrality and their Generic Computation. + Social Networks 30(2):136-145, 2008. + https://doi.org/10.1016/j.socnet.2007.11.001 + .. [3] Ulrik Brandes and Christian Pich: + Centrality Estimation in Large Networks. + International Journal of Bifurcation and Chaos 17(7):2303-2318, 2007. + https://dx.doi.org/10.1142/S0218127407018403 + .. [4] Linton C. Freeman: + A set of measures of centrality based on betweenness. + Sociometry 40: 35–41, 1977 + https://doi.org/10.2307/3033543 + """ + betweenness = dict.fromkeys(G, 0.0) # b[v]=0 for v in G + if k == len(G): + # This is done for performance; the result is the same regardless. + k = None + if k is None: + nodes = G + else: + nodes = seed.sample(list(G.nodes()), k) + for s in nodes: + # single source shortest paths + if weight is None: # use BFS + S, P, sigma, _ = _single_source_shortest_path_basic(G, s) + else: # use Dijkstra's algorithm + S, P, sigma, _ = _single_source_dijkstra_path_basic(G, s, weight) + # accumulation + if endpoints: + betweenness, _ = _accumulate_endpoints(betweenness, S, P, sigma, s) + else: + betweenness, _ = _accumulate_basic(betweenness, S, P, sigma, s) + # rescaling + betweenness = _rescale( + betweenness, + len(G), + normalized=normalized, + directed=G.is_directed(), + k=k, + endpoints=endpoints, + sampled_nodes=nodes, + ) + return betweenness + + +@py_random_state(4) +@nx._dispatchable(edge_attrs="weight") +def edge_betweenness_centrality(G, k=None, normalized=True, weight=None, seed=None): + r"""Compute betweenness centrality for edges. + + Betweenness centrality of an edge $e$ is the sum of the + fraction of all-pairs shortest paths that pass through $e$ + + .. math:: + + c_B(e) =\sum_{s,t \in V} \frac{\sigma(s, t|e)}{\sigma(s, t)} + + where $V$ is the set of nodes, $\sigma(s, t)$ is the number of + shortest $(s, t)$-paths, and $\sigma(s, t|e)$ is the number of + those paths passing through edge $e$ [2]_. + + Parameters + ---------- + G : graph + A NetworkX graph. + + k : int, optional (default=None) + If k is not None use k node samples to estimate betweenness. + The value of k <= n where n is the number of nodes in the graph. + Higher values give better approximation. + + normalized : bool, optional + If True the betweenness values are normalized by $2/(n(n-1))$ + for graphs, and $1/(n(n-1))$ for directed graphs where $n$ + is the number of nodes in G. + + weight : None or string, optional (default=None) + If None, all edge weights are considered equal. + Otherwise holds the name of the edge attribute used as weight. + Weights are used to calculate weighted shortest paths, so they are + interpreted as distances. + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + Note that this is only used if k is not None. + + Returns + ------- + edges : dictionary + Dictionary of edges with betweenness centrality as the value. + + See Also + -------- + betweenness_centrality + edge_load + + Notes + ----- + The algorithm is from Ulrik Brandes [1]_. + + For weighted graphs the edge weights must be greater than zero. + Zero edge weights can produce an infinite number of equal length + paths between pairs of nodes. + + References + ---------- + .. [1] A Faster Algorithm for Betweenness Centrality. Ulrik Brandes, + Journal of Mathematical Sociology 25(2):163-177, 2001. + https://doi.org/10.1080/0022250X.2001.9990249 + .. [2] Ulrik Brandes: On Variants of Shortest-Path Betweenness + Centrality and their Generic Computation. + Social Networks 30(2):136-145, 2008. + https://doi.org/10.1016/j.socnet.2007.11.001 + """ + betweenness = dict.fromkeys(G, 0.0) # b[v]=0 for v in G + # b[e]=0 for e in G.edges() + betweenness.update(dict.fromkeys(G.edges(), 0.0)) + if k is None: + nodes = G + else: + nodes = seed.sample(list(G.nodes()), k) + for s in nodes: + # single source shortest paths + if weight is None: # use BFS + S, P, sigma, _ = _single_source_shortest_path_basic(G, s) + else: # use Dijkstra's algorithm + S, P, sigma, _ = _single_source_dijkstra_path_basic(G, s, weight) + # accumulation + betweenness = _accumulate_edges(betweenness, S, P, sigma, s) + # rescaling + for n in G: # remove nodes to only return edges + del betweenness[n] + betweenness = _rescale_e( + betweenness, len(G), normalized=normalized, directed=G.is_directed() + ) + if G.is_multigraph(): + betweenness = _add_edge_keys(G, betweenness, weight=weight) + return betweenness + + +# helpers for betweenness centrality + + +def _single_source_shortest_path_basic(G, s): + S = [] + P = {} + for v in G: + P[v] = [] + sigma = dict.fromkeys(G, 0.0) # sigma[v]=0 for v in G + D = {} + sigma[s] = 1.0 + D[s] = 0 + Q = deque([s]) + while Q: # use BFS to find shortest paths + v = Q.popleft() + S.append(v) + Dv = D[v] + sigmav = sigma[v] + for w in G[v]: + if w not in D: + Q.append(w) + D[w] = Dv + 1 + if D[w] == Dv + 1: # this is a shortest path, count paths + sigma[w] += sigmav + P[w].append(v) # predecessors + return S, P, sigma, D + + +def _single_source_dijkstra_path_basic(G, s, weight): + weight = _weight_function(G, weight) + # modified from Eppstein + S = [] + P = {} + for v in G: + P[v] = [] + sigma = dict.fromkeys(G, 0.0) # sigma[v]=0 for v in G + D = {} + sigma[s] = 1.0 + seen = {s: 0} + c = count() + Q = [] # use Q as heap with (distance,node id) tuples + heappush(Q, (0, next(c), s, s)) + while Q: + (dist, _, pred, v) = heappop(Q) + if v in D: + continue # already searched this node. + sigma[v] += sigma[pred] # count paths + S.append(v) + D[v] = dist + for w, edgedata in G[v].items(): + vw_dist = dist + weight(v, w, edgedata) + if w not in D and (w not in seen or vw_dist < seen[w]): + seen[w] = vw_dist + heappush(Q, (vw_dist, next(c), v, w)) + sigma[w] = 0.0 + P[w] = [v] + elif vw_dist == seen[w]: # handle equal paths + sigma[w] += sigma[v] + P[w].append(v) + return S, P, sigma, D + + +def _accumulate_basic(betweenness, S, P, sigma, s): + delta = dict.fromkeys(S, 0) + while S: + w = S.pop() + coeff = (1 + delta[w]) / sigma[w] + for v in P[w]: + delta[v] += sigma[v] * coeff + if w != s: + betweenness[w] += delta[w] + return betweenness, delta + + +def _accumulate_endpoints(betweenness, S, P, sigma, s): + betweenness[s] += len(S) - 1 + delta = dict.fromkeys(S, 0) + while S: + w = S.pop() + coeff = (1 + delta[w]) / sigma[w] + for v in P[w]: + delta[v] += sigma[v] * coeff + if w != s: + betweenness[w] += delta[w] + 1 + return betweenness, delta + + +def _accumulate_edges(betweenness, S, P, sigma, s): + delta = dict.fromkeys(S, 0) + while S: + w = S.pop() + coeff = (1 + delta[w]) / sigma[w] + for v in P[w]: + c = sigma[v] * coeff + if (v, w) not in betweenness: + betweenness[(w, v)] += c + else: + betweenness[(v, w)] += c + delta[v] += c + if w != s: + betweenness[w] += delta[w] + return betweenness + + +def _rescale(betweenness, n, *, normalized, directed, k, endpoints, sampled_nodes): + # N is used to count the number of valid (s, t) pairs where s != t that + # could have a path pass through v. If endpoints is False, then v must + # not be the target t, hence why we subtract by 1. + N = n if endpoints else n - 1 + if N < 2: + # No rescaling necessary: b=0 for all nodes + return betweenness + + K_source = N if k is None else k + + if k is None or endpoints: + # No sampling adjustment needed + if normalized: + # Divide by the number of valid (s, t) node pairs that could have + # a path through v where s != t. + scale = 1 / (K_source * (N - 1)) + else: + # Scale to the full BC + if not directed: + # The non-normalized BC values are computed the same way for + # directed and undirected graphs: shortest paths are computed and + # counted for each *ordered* (s, t) pair. Undirected graphs should + # only count valid *unordered* node pairs {s, t}; that is, (s, t) + # and (t, s) should be counted only once. We correct for this here. + correction = 2 + else: + correction = 1 + scale = N / (K_source * correction) + + if scale != 1: + for v in betweenness: + betweenness[v] *= scale + return betweenness + + # Sampling adjustment needed when excluding endpoints when using k. In this + # case, we need to handle source nodes differently from non-source nodes, + # because source nodes can't include themselves since endpoints are excluded. + # Without this, k == n would be a special case that would violate the + # assumption that node `v` is not one of the (s, t) node pairs. + if normalized: + # NaN for undefined 0/0; there is no data for source node when k=1 + scale_source = 1 / ((K_source - 1) * (N - 1)) if K_source > 1 else math.nan + scale_nonsource = 1 / (K_source * (N - 1)) + else: + correction = 1 if directed else 2 + scale_source = N / ((K_source - 1) * correction) if K_source > 1 else math.nan + scale_nonsource = N / (K_source * correction) + + sampled_nodes = set(sampled_nodes) + for v in betweenness: + betweenness[v] *= scale_source if v in sampled_nodes else scale_nonsource + return betweenness + + +def _rescale_e(betweenness, n, normalized, directed=False, k=None): + if normalized: + if n <= 1: + scale = None # no normalization b=0 for all nodes + else: + scale = 1 / (n * (n - 1)) + else: # rescale by 2 for undirected graphs + if not directed: + scale = 0.5 + else: + scale = None + if scale is not None: + if k is not None: + scale = scale * n / k + for v in betweenness: + betweenness[v] *= scale + return betweenness + + +@not_implemented_for("graph") +def _add_edge_keys(G, betweenness, weight=None): + r"""Adds the corrected betweenness centrality (BC) values for multigraphs. + + Parameters + ---------- + G : NetworkX graph. + + betweenness : dictionary + Dictionary mapping adjacent node tuples to betweenness centrality values. + + weight : string or function + See `_weight_function` for details. Defaults to `None`. + + Returns + ------- + edges : dictionary + The parameter `betweenness` including edges with keys and their + betweenness centrality values. + + The BC value is divided among edges of equal weight. + """ + _weight = _weight_function(G, weight) + + edge_bc = dict.fromkeys(G.edges, 0.0) + for u, v in betweenness: + d = G[u][v] + wt = _weight(u, v, d) + keys = [k for k in d if _weight(u, v, {k: d[k]}) == wt] + bc = betweenness[(u, v)] / len(keys) + for k in keys: + edge_bc[(u, v, k)] = bc + + return edge_bc diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/betweenness_subset.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/betweenness_subset.py new file mode 100644 index 0000000..b9e9936 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/betweenness_subset.py @@ -0,0 +1,275 @@ +"""Betweenness centrality measures for subsets of nodes.""" + +import networkx as nx +from networkx.algorithms.centrality.betweenness import ( + _add_edge_keys, +) +from networkx.algorithms.centrality.betweenness import ( + _single_source_dijkstra_path_basic as dijkstra, +) +from networkx.algorithms.centrality.betweenness import ( + _single_source_shortest_path_basic as shortest_path, +) + +__all__ = [ + "betweenness_centrality_subset", + "edge_betweenness_centrality_subset", +] + + +@nx._dispatchable(edge_attrs="weight") +def betweenness_centrality_subset(G, sources, targets, normalized=False, weight=None): + r"""Compute betweenness centrality for a subset of nodes. + + .. math:: + + c_B(v) =\sum_{s\in S, t \in T} \frac{\sigma(s, t|v)}{\sigma(s, t)} + + where $S$ is the set of sources, $T$ is the set of targets, + $\sigma(s, t)$ is the number of shortest $(s, t)$-paths, + and $\sigma(s, t|v)$ is the number of those paths + passing through some node $v$ other than $s, t$. + If $s = t$, $\sigma(s, t) = 1$, + and if $v \in {s, t}$, $\sigma(s, t|v) = 0$ [2]_. + + + Parameters + ---------- + G : graph + A NetworkX graph. + + sources: list of nodes + Nodes to use as sources for shortest paths in betweenness + + targets: list of nodes + Nodes to use as targets for shortest paths in betweenness + + normalized : bool, optional + If True the betweenness values are normalized by $2/((n-1)(n-2))$ + for graphs, and $1/((n-1)(n-2))$ for directed graphs where $n$ + is the number of nodes in G. + + weight : None or string, optional (default=None) + If None, all edge weights are considered equal. + Otherwise holds the name of the edge attribute used as weight. + Weights are used to calculate weighted shortest paths, so they are + interpreted as distances. + + Returns + ------- + nodes : dictionary + Dictionary of nodes with betweenness centrality as the value. + + See Also + -------- + edge_betweenness_centrality + load_centrality + + Notes + ----- + The basic algorithm is from [1]_. + + For weighted graphs the edge weights must be greater than zero. + Zero edge weights can produce an infinite number of equal length + paths between pairs of nodes. + + The normalization might seem a little strange but it is + designed to make betweenness_centrality(G) be the same as + betweenness_centrality_subset(G,sources=G.nodes(),targets=G.nodes()). + + The total number of paths between source and target is counted + differently for directed and undirected graphs. Directed paths + are easy to count. Undirected paths are tricky: should a path + from "u" to "v" count as 1 undirected path or as 2 directed paths? + + For betweenness_centrality we report the number of undirected + paths when G is undirected. + + For betweenness_centrality_subset the reporting is different. + If the source and target subsets are the same, then we want + to count undirected paths. But if the source and target subsets + differ -- for example, if sources is {0} and targets is {1}, + then we are only counting the paths in one direction. They are + undirected paths but we are counting them in a directed way. + To count them as undirected paths, each should count as half a path. + + References + ---------- + .. [1] Ulrik Brandes, A Faster Algorithm for Betweenness Centrality. + Journal of Mathematical Sociology 25(2):163-177, 2001. + https://doi.org/10.1080/0022250X.2001.9990249 + .. [2] Ulrik Brandes: On Variants of Shortest-Path Betweenness + Centrality and their Generic Computation. + Social Networks 30(2):136-145, 2008. + https://doi.org/10.1016/j.socnet.2007.11.001 + """ + b = dict.fromkeys(G, 0.0) # b[v]=0 for v in G + for s in sources: + # single source shortest paths + if weight is None: # use BFS + S, P, sigma, _ = shortest_path(G, s) + else: # use Dijkstra's algorithm + S, P, sigma, _ = dijkstra(G, s, weight) + b = _accumulate_subset(b, S, P, sigma, s, targets) + b = _rescale(b, len(G), normalized=normalized, directed=G.is_directed()) + return b + + +@nx._dispatchable(edge_attrs="weight") +def edge_betweenness_centrality_subset( + G, sources, targets, normalized=False, weight=None +): + r"""Compute betweenness centrality for edges for a subset of nodes. + + .. math:: + + c_B(v) =\sum_{s\in S,t \in T} \frac{\sigma(s, t|e)}{\sigma(s, t)} + + where $S$ is the set of sources, $T$ is the set of targets, + $\sigma(s, t)$ is the number of shortest $(s, t)$-paths, + and $\sigma(s, t|e)$ is the number of those paths + passing through edge $e$ [2]_. + + Parameters + ---------- + G : graph + A networkx graph. + + sources: list of nodes + Nodes to use as sources for shortest paths in betweenness + + targets: list of nodes + Nodes to use as targets for shortest paths in betweenness + + normalized : bool, optional + If True the betweenness values are normalized by `2/(n(n-1))` + for graphs, and `1/(n(n-1))` for directed graphs where `n` + is the number of nodes in G. + + weight : None or string, optional (default=None) + If None, all edge weights are considered equal. + Otherwise holds the name of the edge attribute used as weight. + Weights are used to calculate weighted shortest paths, so they are + interpreted as distances. + + Returns + ------- + edges : dictionary + Dictionary of edges with Betweenness centrality as the value. + + See Also + -------- + betweenness_centrality + edge_load + + Notes + ----- + The basic algorithm is from [1]_. + + For weighted graphs the edge weights must be greater than zero. + Zero edge weights can produce an infinite number of equal length + paths between pairs of nodes. + + The normalization might seem a little strange but it is the same + as in edge_betweenness_centrality() and is designed to make + edge_betweenness_centrality(G) be the same as + edge_betweenness_centrality_subset(G,sources=G.nodes(),targets=G.nodes()). + + References + ---------- + .. [1] Ulrik Brandes, A Faster Algorithm for Betweenness Centrality. + Journal of Mathematical Sociology 25(2):163-177, 2001. + https://doi.org/10.1080/0022250X.2001.9990249 + .. [2] Ulrik Brandes: On Variants of Shortest-Path Betweenness + Centrality and their Generic Computation. + Social Networks 30(2):136-145, 2008. + https://doi.org/10.1016/j.socnet.2007.11.001 + """ + b = dict.fromkeys(G, 0.0) # b[v]=0 for v in G + b.update(dict.fromkeys(G.edges(), 0.0)) # b[e] for e in G.edges() + for s in sources: + # single source shortest paths + if weight is None: # use BFS + S, P, sigma, _ = shortest_path(G, s) + else: # use Dijkstra's algorithm + S, P, sigma, _ = dijkstra(G, s, weight) + b = _accumulate_edges_subset(b, S, P, sigma, s, targets) + for n in G: # remove nodes to only return edges + del b[n] + b = _rescale_e(b, len(G), normalized=normalized, directed=G.is_directed()) + if G.is_multigraph(): + b = _add_edge_keys(G, b, weight=weight) + return b + + +def _accumulate_subset(betweenness, S, P, sigma, s, targets): + delta = dict.fromkeys(S, 0.0) + target_set = set(targets) - {s} + while S: + w = S.pop() + if w in target_set: + coeff = (delta[w] + 1.0) / sigma[w] + else: + coeff = delta[w] / sigma[w] + for v in P[w]: + delta[v] += sigma[v] * coeff + if w != s: + betweenness[w] += delta[w] + return betweenness + + +def _accumulate_edges_subset(betweenness, S, P, sigma, s, targets): + """edge_betweenness_centrality_subset helper.""" + delta = dict.fromkeys(S, 0) + target_set = set(targets) + while S: + w = S.pop() + for v in P[w]: + if w in target_set: + c = (sigma[v] / sigma[w]) * (1.0 + delta[w]) + else: + c = delta[w] / len(P[w]) + if (v, w) not in betweenness: + betweenness[(w, v)] += c + else: + betweenness[(v, w)] += c + delta[v] += c + if w != s: + betweenness[w] += delta[w] + return betweenness + + +def _rescale(betweenness, n, normalized, directed=False): + """betweenness_centrality_subset helper.""" + if normalized: + if n <= 2: + scale = None # no normalization b=0 for all nodes + else: + scale = 1.0 / ((n - 1) * (n - 2)) + else: # rescale by 2 for undirected graphs + if not directed: + scale = 0.5 + else: + scale = None + if scale is not None: + for v in betweenness: + betweenness[v] *= scale + return betweenness + + +def _rescale_e(betweenness, n, normalized, directed=False): + """edge_betweenness_centrality_subset helper.""" + if normalized: + if n <= 1: + scale = None # no normalization b=0 for all nodes + else: + scale = 1.0 / (n * (n - 1)) + else: # rescale by 2 for undirected graphs + if not directed: + scale = 0.5 + else: + scale = None + if scale is not None: + for v in betweenness: + betweenness[v] *= scale + return betweenness diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/closeness.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/closeness.py new file mode 100644 index 0000000..1cc2f95 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/closeness.py @@ -0,0 +1,282 @@ +""" +Closeness centrality measures. +""" + +import functools + +import networkx as nx +from networkx.exception import NetworkXError +from networkx.utils.decorators import not_implemented_for + +__all__ = ["closeness_centrality", "incremental_closeness_centrality"] + + +@nx._dispatchable(edge_attrs="distance") +def closeness_centrality(G, u=None, distance=None, wf_improved=True): + r"""Compute closeness centrality for nodes. + + Closeness centrality [1]_ of a node `u` is the reciprocal of the + average shortest path distance to `u` over all `n-1` reachable nodes. + + .. math:: + + C(u) = \frac{n - 1}{\sum_{v=1}^{n-1} d(v, u)}, + + where `d(v, u)` is the shortest-path distance between `v` and `u`, + and `n-1` is the number of nodes reachable from `u`. Notice that the + closeness distance function computes the incoming distance to `u` + for directed graphs. To use outward distance, act on `G.reverse()`. + + Notice that higher values of closeness indicate higher centrality. + + Wasserman and Faust propose an improved formula for graphs with + more than one connected component. The result is "a ratio of the + fraction of actors in the group who are reachable, to the average + distance" from the reachable actors [2]_. You might think this + scale factor is inverted but it is not. As is, nodes from small + components receive a smaller closeness value. Letting `N` denote + the number of nodes in the graph, + + .. math:: + + C_{WF}(u) = \frac{n-1}{N-1} \frac{n - 1}{\sum_{v=1}^{n-1} d(v, u)}, + + Parameters + ---------- + G : graph + A NetworkX graph + + u : node, optional + Return only the value for node u + + distance : edge attribute key, optional (default=None) + Use the specified edge attribute as the edge distance in shortest + path calculations. If `None` (the default) all edges have a distance of 1. + Absent edge attributes are assigned a distance of 1. Note that no check + is performed to ensure that edges have the provided attribute. + + wf_improved : bool, optional (default=True) + If True, scale by the fraction of nodes reachable. This gives the + Wasserman and Faust improved formula. For single component graphs + it is the same as the original formula. + + Returns + ------- + nodes : dictionary + Dictionary of nodes with closeness centrality as the value. + + Examples + -------- + >>> G = nx.Graph([(0, 1), (0, 2), (0, 3), (1, 2), (1, 3)]) + >>> nx.closeness_centrality(G) + {0: 1.0, 1: 1.0, 2: 0.75, 3: 0.75} + + See Also + -------- + betweenness_centrality, load_centrality, eigenvector_centrality, + degree_centrality, incremental_closeness_centrality + + Notes + ----- + The closeness centrality is normalized to `(n-1)/(|G|-1)` where + `n` is the number of nodes in the connected part of graph + containing the node. If the graph is not completely connected, + this algorithm computes the closeness centrality for each + connected part separately scaled by that parts size. + + If the 'distance' keyword is set to an edge attribute key then the + shortest-path length will be computed using Dijkstra's algorithm with + that edge attribute as the edge weight. + + The closeness centrality uses *inward* distance to a node, not outward. + If you want to use outword distances apply the function to `G.reverse()` + + In NetworkX 2.2 and earlier a bug caused Dijkstra's algorithm to use the + outward distance rather than the inward distance. If you use a 'distance' + keyword and a DiGraph, your results will change between v2.2 and v2.3. + + References + ---------- + .. [1] Linton C. Freeman: Centrality in networks: I. + Conceptual clarification. Social Networks 1:215-239, 1979. + https://doi.org/10.1016/0378-8733(78)90021-7 + .. [2] pg. 201 of Wasserman, S. and Faust, K., + Social Network Analysis: Methods and Applications, 1994, + Cambridge University Press. + """ + if G.is_directed(): + G = G.reverse() # create a reversed graph view + + if distance is not None: + # use Dijkstra's algorithm with specified attribute as edge weight + path_length = functools.partial( + nx.single_source_dijkstra_path_length, weight=distance + ) + else: + path_length = nx.single_source_shortest_path_length + + if u is None: + nodes = G.nodes + else: + nodes = [u] + closeness_dict = {} + for n in nodes: + sp = path_length(G, n) + totsp = sum(sp.values()) + len_G = len(G) + _closeness_centrality = 0.0 + if totsp > 0.0 and len_G > 1: + _closeness_centrality = (len(sp) - 1.0) / totsp + # normalize to number of nodes-1 in connected part + if wf_improved: + s = (len(sp) - 1.0) / (len_G - 1) + _closeness_centrality *= s + closeness_dict[n] = _closeness_centrality + if u is not None: + return closeness_dict[u] + return closeness_dict + + +@not_implemented_for("directed") +@nx._dispatchable(mutates_input=True) +def incremental_closeness_centrality( + G, edge, prev_cc=None, insertion=True, wf_improved=True +): + r"""Incremental closeness centrality for nodes. + + Compute closeness centrality for nodes using level-based work filtering + as described in Incremental Algorithms for Closeness Centrality by Sariyuce et al. + + Level-based work filtering detects unnecessary updates to the closeness + centrality and filters them out. + + --- + From "Incremental Algorithms for Closeness Centrality": + + Theorem 1: Let :math:`G = (V, E)` be a graph and u and v be two vertices in V + such that there is no edge (u, v) in E. Let :math:`G' = (V, E \cup uv)` + Then :math:`cc[s] = cc'[s]` if and only if :math:`\left|dG(s, u) - dG(s, v)\right| \leq 1`. + + Where :math:`dG(u, v)` denotes the length of the shortest path between + two vertices u, v in a graph G, cc[s] is the closeness centrality for a + vertex s in V, and cc'[s] is the closeness centrality for a + vertex s in V, with the (u, v) edge added. + --- + + We use Theorem 1 to filter out updates when adding or removing an edge. + When adding an edge (u, v), we compute the shortest path lengths from all + other nodes to u and to v before the node is added. When removing an edge, + we compute the shortest path lengths after the edge is removed. Then we + apply Theorem 1 to use previously computed closeness centrality for nodes + where :math:`\left|dG(s, u) - dG(s, v)\right| \leq 1`. This works only for + undirected, unweighted graphs; the distance argument is not supported. + + Closeness centrality [1]_ of a node `u` is the reciprocal of the + sum of the shortest path distances from `u` to all `n-1` other nodes. + Since the sum of distances depends on the number of nodes in the + graph, closeness is normalized by the sum of minimum possible + distances `n-1`. + + .. math:: + + C(u) = \frac{n - 1}{\sum_{v=1}^{n-1} d(v, u)}, + + where `d(v, u)` is the shortest-path distance between `v` and `u`, + and `n` is the number of nodes in the graph. + + Notice that higher values of closeness indicate higher centrality. + + Parameters + ---------- + G : graph + A NetworkX graph + + edge : tuple + The modified edge (u, v) in the graph. + + prev_cc : dictionary + The previous closeness centrality for all nodes in the graph. + + insertion : bool, optional + If True (default) the edge was inserted, otherwise it was deleted from the graph. + + wf_improved : bool, optional (default=True) + If True, scale by the fraction of nodes reachable. This gives the + Wasserman and Faust improved formula. For single component graphs + it is the same as the original formula. + + Returns + ------- + nodes : dictionary + Dictionary of nodes with closeness centrality as the value. + + See Also + -------- + betweenness_centrality, load_centrality, eigenvector_centrality, + degree_centrality, closeness_centrality + + Notes + ----- + The closeness centrality is normalized to `(n-1)/(|G|-1)` where + `n` is the number of nodes in the connected part of graph + containing the node. If the graph is not completely connected, + this algorithm computes the closeness centrality for each + connected part separately. + + References + ---------- + .. [1] Freeman, L.C., 1979. Centrality in networks: I. + Conceptual clarification. Social Networks 1, 215--239. + https://doi.org/10.1016/0378-8733(78)90021-7 + .. [2] Sariyuce, A.E. ; Kaya, K. ; Saule, E. ; Catalyiirek, U.V. Incremental + Algorithms for Closeness Centrality. 2013 IEEE International Conference on Big Data + http://sariyuce.com/papers/bigdata13.pdf + """ + if prev_cc is not None and set(prev_cc.keys()) != set(G.nodes()): + raise NetworkXError("prev_cc and G do not have the same nodes") + + # Unpack edge + (u, v) = edge + path_length = nx.single_source_shortest_path_length + + if insertion: + # For edge insertion, we want shortest paths before the edge is inserted + du = path_length(G, u) + dv = path_length(G, v) + + G.add_edge(u, v) + else: + G.remove_edge(u, v) + + # For edge removal, we want shortest paths after the edge is removed + du = path_length(G, u) + dv = path_length(G, v) + + if prev_cc is None: + return nx.closeness_centrality(G) + + nodes = G.nodes() + closeness_dict = {} + for n in nodes: + if n in du and n in dv and abs(du[n] - dv[n]) <= 1: + closeness_dict[n] = prev_cc[n] + else: + sp = path_length(G, n) + totsp = sum(sp.values()) + len_G = len(G) + _closeness_centrality = 0.0 + if totsp > 0.0 and len_G > 1: + _closeness_centrality = (len(sp) - 1.0) / totsp + # normalize to number of nodes-1 in connected part + if wf_improved: + s = (len(sp) - 1.0) / (len_G - 1) + _closeness_centrality *= s + closeness_dict[n] = _closeness_centrality + + # Leave the graph as we found it + if insertion: + G.remove_edge(u, v) + else: + G.add_edge(u, v) + + return closeness_dict diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/current_flow_betweenness.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/current_flow_betweenness.py new file mode 100644 index 0000000..bfde279 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/current_flow_betweenness.py @@ -0,0 +1,342 @@ +"""Current-flow betweenness centrality measures.""" + +import networkx as nx +from networkx.algorithms.centrality.flow_matrix import ( + CGInverseLaplacian, + FullInverseLaplacian, + SuperLUInverseLaplacian, + flow_matrix_row, +) +from networkx.utils import ( + not_implemented_for, + py_random_state, + reverse_cuthill_mckee_ordering, +) + +__all__ = [ + "current_flow_betweenness_centrality", + "approximate_current_flow_betweenness_centrality", + "edge_current_flow_betweenness_centrality", +] + + +@not_implemented_for("directed") +@py_random_state(7) +@nx._dispatchable(edge_attrs="weight") +def approximate_current_flow_betweenness_centrality( + G, + normalized=True, + weight=None, + dtype=float, + solver="full", + epsilon=0.5, + kmax=10000, + seed=None, +): + r"""Compute the approximate current-flow betweenness centrality for nodes. + + Approximates the current-flow betweenness centrality within absolute + error of epsilon with high probability [1]_. + + + Parameters + ---------- + G : graph + A NetworkX graph + + normalized : bool, optional (default=True) + If True the betweenness values are normalized by 2/[(n-1)(n-2)] where + n is the number of nodes in G. + + weight : string or None, optional (default=None) + Key for edge data used as the edge weight. + If None, then use 1 as each edge weight. + The weight reflects the capacity or the strength of the + edge. + + dtype : data type (float) + Default data type for internal matrices. + Set to np.float32 for lower memory consumption. + + solver : string (default='full') + Type of linear solver to use for computing the flow matrix. + Options are "full" (uses most memory), "lu" (recommended), and + "cg" (uses least memory). + + epsilon: float + Absolute error tolerance. + + kmax: int + Maximum number of sample node pairs to use for approximation. + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + nodes : dictionary + Dictionary of nodes with betweenness centrality as the value. + + See Also + -------- + current_flow_betweenness_centrality + + Notes + ----- + The running time is $O((1/\epsilon^2)m{\sqrt k} \log n)$ + and the space required is $O(m)$ for $n$ nodes and $m$ edges. + + If the edges have a 'weight' attribute they will be used as + weights in this algorithm. Unspecified weights are set to 1. + + References + ---------- + .. [1] Ulrik Brandes and Daniel Fleischer: + Centrality Measures Based on Current Flow. + Proc. 22nd Symp. Theoretical Aspects of Computer Science (STACS '05). + LNCS 3404, pp. 533-544. Springer-Verlag, 2005. + https://doi.org/10.1007/978-3-540-31856-9_44 + """ + import numpy as np + + if not nx.is_connected(G): + raise nx.NetworkXError("Graph not connected.") + solvername = { + "full": FullInverseLaplacian, + "lu": SuperLUInverseLaplacian, + "cg": CGInverseLaplacian, + } + n = G.number_of_nodes() + ordering = list(reverse_cuthill_mckee_ordering(G)) + # make a copy with integer labels according to rcm ordering + # this could be done without a copy if we really wanted to + H = nx.relabel_nodes(G, dict(zip(ordering, range(n)))) + L = nx.laplacian_matrix(H, nodelist=range(n), weight=weight).asformat("csc") + L = L.astype(dtype) + C = solvername[solver](L, dtype=dtype) # initialize solver + betweenness = dict.fromkeys(H, 0.0) + nb = (n - 1.0) * (n - 2.0) # normalization factor + cstar = n * (n - 1) / nb + l = 1 # parameter in approximation, adjustable + k = l * int(np.ceil((cstar / epsilon) ** 2 * np.log(n))) + if k > kmax: + msg = f"Number random pairs k>kmax ({k}>{kmax}) " + raise nx.NetworkXError(msg, "Increase kmax or epsilon") + cstar2k = cstar / (2 * k) + for _ in range(k): + s, t = pair = seed.sample(range(n), 2) + b = np.zeros(n, dtype=dtype) + b[s] = 1 + b[t] = -1 + p = C.solve(b) + for v in H: + if v in pair: + continue + for nbr in H[v]: + w = H[v][nbr].get(weight, 1.0) + betweenness[v] += float(w * np.abs(p[v] - p[nbr]) * cstar2k) + if normalized: + factor = 1.0 + else: + factor = nb / 2.0 + # remap to original node names and "unnormalize" if required + return {ordering[k]: v * factor for k, v in betweenness.items()} + + +@not_implemented_for("directed") +@nx._dispatchable(edge_attrs="weight") +def current_flow_betweenness_centrality( + G, normalized=True, weight=None, dtype=float, solver="full" +): + r"""Compute current-flow betweenness centrality for nodes. + + Current-flow betweenness centrality uses an electrical current + model for information spreading in contrast to betweenness + centrality which uses shortest paths. + + Current-flow betweenness centrality is also known as + random-walk betweenness centrality [2]_. + + Parameters + ---------- + G : graph + A NetworkX graph + + normalized : bool, optional (default=True) + If True the betweenness values are normalized by 2/[(n-1)(n-2)] where + n is the number of nodes in G. + + weight : string or None, optional (default=None) + Key for edge data used as the edge weight. + If None, then use 1 as each edge weight. + The weight reflects the capacity or the strength of the + edge. + + dtype : data type (float) + Default data type for internal matrices. + Set to np.float32 for lower memory consumption. + + solver : string (default='full') + Type of linear solver to use for computing the flow matrix. + Options are "full" (uses most memory), "lu" (recommended), and + "cg" (uses least memory). + + Returns + ------- + nodes : dictionary + Dictionary of nodes with betweenness centrality as the value. + + See Also + -------- + approximate_current_flow_betweenness_centrality + betweenness_centrality + edge_betweenness_centrality + edge_current_flow_betweenness_centrality + + Notes + ----- + Current-flow betweenness can be computed in $O(I(n-1)+mn \log n)$ + time [1]_, where $I(n-1)$ is the time needed to compute the + inverse Laplacian. For a full matrix this is $O(n^3)$ but using + sparse methods you can achieve $O(nm{\sqrt k})$ where $k$ is the + Laplacian matrix condition number. + + The space required is $O(nw)$ where $w$ is the width of the sparse + Laplacian matrix. Worse case is $w=n$ for $O(n^2)$. + + If the edges have a 'weight' attribute they will be used as + weights in this algorithm. Unspecified weights are set to 1. + + References + ---------- + .. [1] Centrality Measures Based on Current Flow. + Ulrik Brandes and Daniel Fleischer, + Proc. 22nd Symp. Theoretical Aspects of Computer Science (STACS '05). + LNCS 3404, pp. 533-544. Springer-Verlag, 2005. + https://doi.org/10.1007/978-3-540-31856-9_44 + + .. [2] A measure of betweenness centrality based on random walks, + M. E. J. Newman, Social Networks 27, 39-54 (2005). + """ + if not nx.is_connected(G): + raise nx.NetworkXError("Graph not connected.") + N = G.number_of_nodes() + ordering = list(reverse_cuthill_mckee_ordering(G)) + # make a copy with integer labels according to rcm ordering + # this could be done without a copy if we really wanted to + H = nx.relabel_nodes(G, dict(zip(ordering, range(N)))) + betweenness = dict.fromkeys(H, 0.0) # b[n]=0 for n in H + for row, (s, t) in flow_matrix_row(H, weight=weight, dtype=dtype, solver=solver): + pos = dict(zip(row.argsort()[::-1], range(N))) + for i in range(N): + betweenness[s] += (i - pos[i]) * row.item(i) + betweenness[t] += (N - i - 1 - pos[i]) * row.item(i) + if normalized: + nb = (N - 1.0) * (N - 2.0) # normalization factor + else: + nb = 2.0 + return {ordering[n]: (b - n) * 2.0 / nb for n, b in betweenness.items()} + + +@not_implemented_for("directed") +@nx._dispatchable(edge_attrs="weight") +def edge_current_flow_betweenness_centrality( + G, normalized=True, weight=None, dtype=float, solver="full" +): + r"""Compute current-flow betweenness centrality for edges. + + Current-flow betweenness centrality uses an electrical current + model for information spreading in contrast to betweenness + centrality which uses shortest paths. + + Current-flow betweenness centrality is also known as + random-walk betweenness centrality [2]_. + + Parameters + ---------- + G : graph + A NetworkX graph + + normalized : bool, optional (default=True) + If True the betweenness values are normalized by 2/[(n-1)(n-2)] where + n is the number of nodes in G. + + weight : string or None, optional (default=None) + Key for edge data used as the edge weight. + If None, then use 1 as each edge weight. + The weight reflects the capacity or the strength of the + edge. + + dtype : data type (default=float) + Default data type for internal matrices. + Set to np.float32 for lower memory consumption. + + solver : string (default='full') + Type of linear solver to use for computing the flow matrix. + Options are "full" (uses most memory), "lu" (recommended), and + "cg" (uses least memory). + + Returns + ------- + nodes : dictionary + Dictionary of edge tuples with betweenness centrality as the value. + + Raises + ------ + NetworkXError + The algorithm does not support DiGraphs. + If the input graph is an instance of DiGraph class, NetworkXError + is raised. + + See Also + -------- + betweenness_centrality + edge_betweenness_centrality + current_flow_betweenness_centrality + + Notes + ----- + Current-flow betweenness can be computed in $O(I(n-1)+mn \log n)$ + time [1]_, where $I(n-1)$ is the time needed to compute the + inverse Laplacian. For a full matrix this is $O(n^3)$ but using + sparse methods you can achieve $O(nm{\sqrt k})$ where $k$ is the + Laplacian matrix condition number. + + The space required is $O(nw)$ where $w$ is the width of the sparse + Laplacian matrix. Worse case is $w=n$ for $O(n^2)$. + + If the edges have a 'weight' attribute they will be used as + weights in this algorithm. Unspecified weights are set to 1. + + References + ---------- + .. [1] Centrality Measures Based on Current Flow. + Ulrik Brandes and Daniel Fleischer, + Proc. 22nd Symp. Theoretical Aspects of Computer Science (STACS '05). + LNCS 3404, pp. 533-544. Springer-Verlag, 2005. + https://doi.org/10.1007/978-3-540-31856-9_44 + + .. [2] A measure of betweenness centrality based on random walks, + M. E. J. Newman, Social Networks 27, 39-54 (2005). + """ + if not nx.is_connected(G): + raise nx.NetworkXError("Graph not connected.") + N = G.number_of_nodes() + ordering = list(reverse_cuthill_mckee_ordering(G)) + # make a copy with integer labels according to rcm ordering + # this could be done without a copy if we really wanted to + H = nx.relabel_nodes(G, dict(zip(ordering, range(N)))) + edges = (tuple(sorted((u, v))) for u, v in H.edges()) + betweenness = dict.fromkeys(edges, 0.0) + if normalized: + nb = (N - 1.0) * (N - 2.0) # normalization factor + else: + nb = 2.0 + for row, (e) in flow_matrix_row(H, weight=weight, dtype=dtype, solver=solver): + pos = dict(zip(row.argsort()[::-1], range(1, N + 1))) + for i in range(N): + betweenness[e] += (i + 1 - pos[i]) * row.item(i) + betweenness[e] += (N - i - pos[i]) * row.item(i) + betweenness[e] /= nb + return {(ordering[s], ordering[t]): b for (s, t), b in betweenness.items()} diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/current_flow_betweenness_subset.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/current_flow_betweenness_subset.py new file mode 100644 index 0000000..911718c --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/current_flow_betweenness_subset.py @@ -0,0 +1,227 @@ +"""Current-flow betweenness centrality measures for subsets of nodes.""" + +import networkx as nx +from networkx.algorithms.centrality.flow_matrix import flow_matrix_row +from networkx.utils import not_implemented_for, reverse_cuthill_mckee_ordering + +__all__ = [ + "current_flow_betweenness_centrality_subset", + "edge_current_flow_betweenness_centrality_subset", +] + + +@not_implemented_for("directed") +@nx._dispatchable(edge_attrs="weight") +def current_flow_betweenness_centrality_subset( + G, sources, targets, normalized=True, weight=None, dtype=float, solver="lu" +): + r"""Compute current-flow betweenness centrality for subsets of nodes. + + Current-flow betweenness centrality uses an electrical current + model for information spreading in contrast to betweenness + centrality which uses shortest paths. + + Current-flow betweenness centrality is also known as + random-walk betweenness centrality [2]_. + + Parameters + ---------- + G : graph + A NetworkX graph + + sources: list of nodes + Nodes to use as sources for current + + targets: list of nodes + Nodes to use as sinks for current + + normalized : bool, optional (default=True) + If True the betweenness values are normalized by b=b/(n-1)(n-2) where + n is the number of nodes in G. + + weight : string or None, optional (default=None) + Key for edge data used as the edge weight. + If None, then use 1 as each edge weight. + The weight reflects the capacity or the strength of the + edge. + + dtype: data type (float) + Default data type for internal matrices. + Set to np.float32 for lower memory consumption. + + solver: string (default='lu') + Type of linear solver to use for computing the flow matrix. + Options are "full" (uses most memory), "lu" (recommended), and + "cg" (uses least memory). + + Returns + ------- + nodes : dictionary + Dictionary of nodes with betweenness centrality as the value. + + See Also + -------- + approximate_current_flow_betweenness_centrality + betweenness_centrality + edge_betweenness_centrality + edge_current_flow_betweenness_centrality + + Notes + ----- + Current-flow betweenness can be computed in $O(I(n-1)+mn \log n)$ + time [1]_, where $I(n-1)$ is the time needed to compute the + inverse Laplacian. For a full matrix this is $O(n^3)$ but using + sparse methods you can achieve $O(nm{\sqrt k})$ where $k$ is the + Laplacian matrix condition number. + + The space required is $O(nw)$ where $w$ is the width of the sparse + Laplacian matrix. Worse case is $w=n$ for $O(n^2)$. + + If the edges have a 'weight' attribute they will be used as + weights in this algorithm. Unspecified weights are set to 1. + + References + ---------- + .. [1] Centrality Measures Based on Current Flow. + Ulrik Brandes and Daniel Fleischer, + Proc. 22nd Symp. Theoretical Aspects of Computer Science (STACS '05). + LNCS 3404, pp. 533-544. Springer-Verlag, 2005. + https://doi.org/10.1007/978-3-540-31856-9_44 + + .. [2] A measure of betweenness centrality based on random walks, + M. E. J. Newman, Social Networks 27, 39-54 (2005). + """ + import numpy as np + + from networkx.utils import reverse_cuthill_mckee_ordering + + if not nx.is_connected(G): + raise nx.NetworkXError("Graph not connected.") + N = G.number_of_nodes() + ordering = list(reverse_cuthill_mckee_ordering(G)) + # make a copy with integer labels according to rcm ordering + # this could be done without a copy if we really wanted to + mapping = dict(zip(ordering, range(N))) + H = nx.relabel_nodes(G, mapping) + betweenness = dict.fromkeys(H, 0.0) # b[n]=0 for n in H + for row, (s, t) in flow_matrix_row(H, weight=weight, dtype=dtype, solver=solver): + for ss in sources: + i = mapping[ss] + for tt in targets: + j = mapping[tt] + betweenness[s] += 0.5 * abs(row.item(i) - row.item(j)) + betweenness[t] += 0.5 * abs(row.item(i) - row.item(j)) + if normalized: + nb = (N - 1.0) * (N - 2.0) # normalization factor + else: + nb = 2.0 + for node in H: + betweenness[node] = betweenness[node] / nb + 1.0 / (2 - N) + return {ordering[node]: value for node, value in betweenness.items()} + + +@not_implemented_for("directed") +@nx._dispatchable(edge_attrs="weight") +def edge_current_flow_betweenness_centrality_subset( + G, sources, targets, normalized=True, weight=None, dtype=float, solver="lu" +): + r"""Compute current-flow betweenness centrality for edges using subsets + of nodes. + + Current-flow betweenness centrality uses an electrical current + model for information spreading in contrast to betweenness + centrality which uses shortest paths. + + Current-flow betweenness centrality is also known as + random-walk betweenness centrality [2]_. + + Parameters + ---------- + G : graph + A NetworkX graph + + sources: list of nodes + Nodes to use as sources for current + + targets: list of nodes + Nodes to use as sinks for current + + normalized : bool, optional (default=True) + If True the betweenness values are normalized by b=b/(n-1)(n-2) where + n is the number of nodes in G. + + weight : string or None, optional (default=None) + Key for edge data used as the edge weight. + If None, then use 1 as each edge weight. + The weight reflects the capacity or the strength of the + edge. + + dtype: data type (float) + Default data type for internal matrices. + Set to np.float32 for lower memory consumption. + + solver: string (default='lu') + Type of linear solver to use for computing the flow matrix. + Options are "full" (uses most memory), "lu" (recommended), and + "cg" (uses least memory). + + Returns + ------- + nodes : dict + Dictionary of edge tuples with betweenness centrality as the value. + + See Also + -------- + betweenness_centrality + edge_betweenness_centrality + current_flow_betweenness_centrality + + Notes + ----- + Current-flow betweenness can be computed in $O(I(n-1)+mn \log n)$ + time [1]_, where $I(n-1)$ is the time needed to compute the + inverse Laplacian. For a full matrix this is $O(n^3)$ but using + sparse methods you can achieve $O(nm{\sqrt k})$ where $k$ is the + Laplacian matrix condition number. + + The space required is $O(nw)$ where $w$ is the width of the sparse + Laplacian matrix. Worse case is $w=n$ for $O(n^2)$. + + If the edges have a 'weight' attribute they will be used as + weights in this algorithm. Unspecified weights are set to 1. + + References + ---------- + .. [1] Centrality Measures Based on Current Flow. + Ulrik Brandes and Daniel Fleischer, + Proc. 22nd Symp. Theoretical Aspects of Computer Science (STACS '05). + LNCS 3404, pp. 533-544. Springer-Verlag, 2005. + https://doi.org/10.1007/978-3-540-31856-9_44 + + .. [2] A measure of betweenness centrality based on random walks, + M. E. J. Newman, Social Networks 27, 39-54 (2005). + """ + import numpy as np + + if not nx.is_connected(G): + raise nx.NetworkXError("Graph not connected.") + N = G.number_of_nodes() + ordering = list(reverse_cuthill_mckee_ordering(G)) + # make a copy with integer labels according to rcm ordering + # this could be done without a copy if we really wanted to + mapping = dict(zip(ordering, range(N))) + H = nx.relabel_nodes(G, mapping) + edges = (tuple(sorted((u, v))) for u, v in H.edges()) + betweenness = dict.fromkeys(edges, 0.0) + if normalized: + nb = (N - 1.0) * (N - 2.0) # normalization factor + else: + nb = 2.0 + for row, (e) in flow_matrix_row(H, weight=weight, dtype=dtype, solver=solver): + for ss in sources: + i = mapping[ss] + for tt in targets: + j = mapping[tt] + betweenness[e] += 0.5 * abs(row.item(i) - row.item(j)) + betweenness[e] /= nb + return {(ordering[s], ordering[t]): value for (s, t), value in betweenness.items()} diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/current_flow_closeness.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/current_flow_closeness.py new file mode 100644 index 0000000..67f8639 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/current_flow_closeness.py @@ -0,0 +1,96 @@ +"""Current-flow closeness centrality measures.""" + +import networkx as nx +from networkx.algorithms.centrality.flow_matrix import ( + CGInverseLaplacian, + FullInverseLaplacian, + SuperLUInverseLaplacian, +) +from networkx.utils import not_implemented_for, reverse_cuthill_mckee_ordering + +__all__ = ["current_flow_closeness_centrality", "information_centrality"] + + +@not_implemented_for("directed") +@nx._dispatchable(edge_attrs="weight") +def current_flow_closeness_centrality(G, weight=None, dtype=float, solver="lu"): + """Compute current-flow closeness centrality for nodes. + + Current-flow closeness centrality is variant of closeness + centrality based on effective resistance between nodes in + a network. This metric is also known as information centrality. + + Parameters + ---------- + G : graph + A NetworkX graph. + + weight : None or string, optional (default=None) + If None, all edge weights are considered equal. + Otherwise holds the name of the edge attribute used as weight. + The weight reflects the capacity or the strength of the + edge. + + dtype: data type (default=float) + Default data type for internal matrices. + Set to np.float32 for lower memory consumption. + + solver: string (default='lu') + Type of linear solver to use for computing the flow matrix. + Options are "full" (uses most memory), "lu" (recommended), and + "cg" (uses least memory). + + Returns + ------- + nodes : dictionary + Dictionary of nodes with current flow closeness centrality as the value. + + See Also + -------- + closeness_centrality + + Notes + ----- + The algorithm is from Brandes [1]_. + + See also [2]_ for the original definition of information centrality. + + References + ---------- + .. [1] Ulrik Brandes and Daniel Fleischer, + Centrality Measures Based on Current Flow. + Proc. 22nd Symp. Theoretical Aspects of Computer Science (STACS '05). + LNCS 3404, pp. 533-544. Springer-Verlag, 2005. + https://doi.org/10.1007/978-3-540-31856-9_44 + + .. [2] Karen Stephenson and Marvin Zelen: + Rethinking centrality: Methods and examples. + Social Networks 11(1):1-37, 1989. + https://doi.org/10.1016/0378-8733(89)90016-6 + """ + if not nx.is_connected(G): + raise nx.NetworkXError("Graph not connected.") + solvername = { + "full": FullInverseLaplacian, + "lu": SuperLUInverseLaplacian, + "cg": CGInverseLaplacian, + } + N = G.number_of_nodes() + ordering = list(reverse_cuthill_mckee_ordering(G)) + # make a copy with integer labels according to rcm ordering + # this could be done without a copy if we really wanted to + H = nx.relabel_nodes(G, dict(zip(ordering, range(N)))) + betweenness = dict.fromkeys(H, 0.0) # b[n]=0 for n in H + N = H.number_of_nodes() + L = nx.laplacian_matrix(H, nodelist=range(N), weight=weight).asformat("csc") + L = L.astype(dtype) + C2 = solvername[solver](L, width=1, dtype=dtype) # initialize solver + for v in H: + col = C2.get_row(v) + for w in H: + betweenness[v] += col.item(v) - 2 * col.item(w) + betweenness[w] += col.item(v) + return {ordering[node]: 1 / value for node, value in betweenness.items()} + + +information_centrality = current_flow_closeness_centrality diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/degree_alg.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/degree_alg.py new file mode 100644 index 0000000..395f1ac --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/degree_alg.py @@ -0,0 +1,150 @@ +"""Degree centrality measures.""" + +import networkx as nx +from networkx.utils.decorators import not_implemented_for + +__all__ = ["degree_centrality", "in_degree_centrality", "out_degree_centrality"] + + +@nx._dispatchable +def degree_centrality(G): + """Compute the degree centrality for nodes. + + The degree centrality for a node v is the fraction of nodes it + is connected to. + + Parameters + ---------- + G : graph + A networkx graph + + Returns + ------- + nodes : dictionary + Dictionary of nodes with degree centrality as the value. + + Examples + -------- + >>> G = nx.Graph([(0, 1), (0, 2), (0, 3), (1, 2), (1, 3)]) + >>> nx.degree_centrality(G) + {0: 1.0, 1: 1.0, 2: 0.6666666666666666, 3: 0.6666666666666666} + + See Also + -------- + betweenness_centrality, load_centrality, eigenvector_centrality + + Notes + ----- + The degree centrality values are normalized by dividing by the maximum + possible degree in a simple graph n-1 where n is the number of nodes in G. + + For multigraphs or graphs with self loops the maximum degree might + be higher than n-1 and values of degree centrality greater than 1 + are possible. + """ + if len(G) <= 1: + return dict.fromkeys(G, 1) + + s = 1.0 / (len(G) - 1.0) + centrality = {n: d * s for n, d in G.degree()} + return centrality + + +@not_implemented_for("undirected") +@nx._dispatchable +def in_degree_centrality(G): + """Compute the in-degree centrality for nodes. + + The in-degree centrality for a node v is the fraction of nodes its + incoming edges are connected to. + + Parameters + ---------- + G : graph + A NetworkX graph + + Returns + ------- + nodes : dictionary + Dictionary of nodes with in-degree centrality as values. + + Raises + ------ + NetworkXNotImplemented + If G is undirected. + + Examples + -------- + >>> G = nx.DiGraph([(0, 1), (0, 2), (0, 3), (1, 2), (1, 3)]) + >>> nx.in_degree_centrality(G) + {0: 0.0, 1: 0.3333333333333333, 2: 0.6666666666666666, 3: 0.6666666666666666} + + See Also + -------- + degree_centrality, out_degree_centrality + + Notes + ----- + The degree centrality values are normalized by dividing by the maximum + possible degree in a simple graph n-1 where n is the number of nodes in G. + + For multigraphs or graphs with self loops the maximum degree might + be higher than n-1 and values of degree centrality greater than 1 + are possible. + """ + if len(G) <= 1: + return dict.fromkeys(G, 1) + + s = 1.0 / (len(G) - 1.0) + centrality = {n: d * s for n, d in G.in_degree()} + return centrality + + +@not_implemented_for("undirected") +@nx._dispatchable +def out_degree_centrality(G): + """Compute the out-degree centrality for nodes. + + The out-degree centrality for a node v is the fraction of nodes its + outgoing edges are connected to. + + Parameters + ---------- + G : graph + A NetworkX graph + + Returns + ------- + nodes : dictionary + Dictionary of nodes with out-degree centrality as values. + + Raises + ------ + NetworkXNotImplemented + If G is undirected. + + Examples + -------- + >>> G = nx.DiGraph([(0, 1), (0, 2), (0, 3), (1, 2), (1, 3)]) + >>> nx.out_degree_centrality(G) + {0: 1.0, 1: 0.6666666666666666, 2: 0.0, 3: 0.0} + + See Also + -------- + degree_centrality, in_degree_centrality + + Notes + ----- + The degree centrality values are normalized by dividing by the maximum + possible degree in a simple graph n-1 where n is the number of nodes in G. + + For multigraphs or graphs with self loops the maximum degree might + be higher than n-1 and values of degree centrality greater than 1 + are possible. + """ + if len(G) <= 1: + return dict.fromkeys(G, 1) + + s = 1.0 / (len(G) - 1.0) + centrality = {n: d * s for n, d in G.out_degree()} + return centrality diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/dispersion.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/dispersion.py new file mode 100644 index 0000000..a3fa685 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/dispersion.py @@ -0,0 +1,107 @@ +from itertools import combinations + +import networkx as nx + +__all__ = ["dispersion"] + + +@nx._dispatchable +def dispersion(G, u=None, v=None, normalized=True, alpha=1.0, b=0.0, c=0.0): + r"""Calculate dispersion between `u` and `v` in `G`. + + A link between two actors (`u` and `v`) has a high dispersion when their + mutual ties (`s` and `t`) are not well connected with each other. + + Parameters + ---------- + G : graph + A NetworkX graph. + u : node, optional + The source for the dispersion score (e.g. ego node of the network). + v : node, optional + The target of the dispersion score if specified. + normalized : bool + If True (default) normalize by the embeddedness of the nodes (u and v). + alpha, b, c : float + Parameters for the normalization procedure. When `normalized` is True, + the dispersion value is normalized by:: + + result = ((dispersion + b) ** alpha) / (embeddedness + c) + + as long as the denominator is nonzero. + + Returns + ------- + nodes : dictionary + If u (v) is specified, returns a dictionary of nodes with dispersion + score for all "target" ("source") nodes. If neither u nor v is + specified, returns a dictionary of dictionaries for all nodes 'u' in the + graph with a dispersion score for each node 'v'. + + Notes + ----- + This implementation follows Lars Backstrom and Jon Kleinberg [1]_. Typical + usage would be to run dispersion on the ego network $G_u$ if $u$ were + specified. Running :func:`dispersion` with neither $u$ nor $v$ specified + can take some time to complete. + + References + ---------- + .. [1] Romantic Partnerships and the Dispersion of Social Ties: + A Network Analysis of Relationship Status on Facebook. + Lars Backstrom, Jon Kleinberg. + https://arxiv.org/pdf/1310.6753v1.pdf + + """ + + def _dispersion(G_u, u, v): + """dispersion for all nodes 'v' in a ego network G_u of node 'u'""" + u_nbrs = set(G_u[u]) + ST = {n for n in G_u[v] if n in u_nbrs} + set_uv = {u, v} + # all possible ties of connections that u and b share + possib = combinations(ST, 2) + total = 0 + for s, t in possib: + # neighbors of s that are in G_u, not including u and v + nbrs_s = u_nbrs.intersection(G_u[s]) - set_uv + # s and t are not directly connected + if t not in nbrs_s: + # s and t do not share a connection + if nbrs_s.isdisjoint(G_u[t]): + # tick for disp(u, v) + total += 1 + # neighbors that u and v share + embeddedness = len(ST) + + dispersion_val = total + if normalized: + dispersion_val = (total + b) ** alpha + if embeddedness + c != 0: + dispersion_val /= embeddedness + c + + return dispersion_val + + if u is None: + # v and u are not specified + if v is None: + results = {n: {} for n in G} + for u in G: + for v in G[u]: + results[u][v] = _dispersion(G, u, v) + # u is not specified, but v is + else: + results = dict.fromkeys(G[v], {}) + for u in G[v]: + results[u] = _dispersion(G, v, u) + else: + # u is specified with no target v + if v is None: + results = dict.fromkeys(G[u], {}) + for v in G[u]: + results[v] = _dispersion(G, u, v) + # both u and v are specified + else: + results = _dispersion(G, u, v) + + return results diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/eigenvector.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/eigenvector.py new file mode 100644 index 0000000..0bfb974 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/eigenvector.py @@ -0,0 +1,357 @@ +"""Functions for computing eigenvector centrality.""" + +import math + +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = ["eigenvector_centrality", "eigenvector_centrality_numpy"] + + +@not_implemented_for("multigraph") +@nx._dispatchable(edge_attrs="weight") +def eigenvector_centrality(G, max_iter=100, tol=1.0e-6, nstart=None, weight=None): + r"""Compute the eigenvector centrality for the graph G. + + Eigenvector centrality computes the centrality for a node by adding + the centrality of its predecessors. The centrality for node $i$ is the + $i$-th element of a left eigenvector associated with the eigenvalue $\lambda$ + of maximum modulus that is positive. Such an eigenvector $x$ is + defined up to a multiplicative constant by the equation + + .. math:: + + \lambda x^T = x^T A, + + where $A$ is the adjacency matrix of the graph G. By definition of + row-column product, the equation above is equivalent to + + .. math:: + + \lambda x_i = \sum_{j\to i}x_j. + + That is, adding the eigenvector centralities of the predecessors of + $i$ one obtains the eigenvector centrality of $i$ multiplied by + $\lambda$. In the case of undirected graphs, $x$ also solves the familiar + right-eigenvector equation $Ax = \lambda x$. + + By virtue of the Perron–Frobenius theorem [1]_, if G is strongly + connected there is a unique eigenvector $x$, and all its entries + are strictly positive. + + If G is not strongly connected there might be several left + eigenvectors associated with $\lambda$, and some of their elements + might be zero. + + Parameters + ---------- + G : graph + A networkx graph. + + max_iter : integer, optional (default=100) + Maximum number of power iterations. + + tol : float, optional (default=1.0e-6) + Error tolerance (in Euclidean norm) used to check convergence in + power iteration. + + nstart : dictionary, optional (default=None) + Starting value of power iteration for each node. Must have a nonzero + projection on the desired eigenvector for the power method to converge. + If None, this implementation uses an all-ones vector, which is a safe + choice. + + weight : None or string, optional (default=None) + If None, all edge weights are considered equal. Otherwise holds the + name of the edge attribute used as weight. In this measure the + weight is interpreted as the connection strength. + + Returns + ------- + nodes : dictionary + Dictionary of nodes with eigenvector centrality as the value. The + associated vector has unit Euclidean norm and the values are + nonegative. + + Examples + -------- + >>> G = nx.path_graph(4) + >>> centrality = nx.eigenvector_centrality(G) + >>> sorted((v, f"{c:0.2f}") for v, c in centrality.items()) + [(0, '0.37'), (1, '0.60'), (2, '0.60'), (3, '0.37')] + + Raises + ------ + NetworkXPointlessConcept + If the graph G is the null graph. + + NetworkXError + If each value in `nstart` is zero. + + PowerIterationFailedConvergence + If the algorithm fails to converge to the specified tolerance + within the specified number of iterations of the power iteration + method. + + See Also + -------- + eigenvector_centrality_numpy + :func:`~networkx.algorithms.link_analysis.pagerank_alg.pagerank` + :func:`~networkx.algorithms.link_analysis.hits_alg.hits` + + Notes + ----- + Eigenvector centrality was introduced by Landau [2]_ for chess + tournaments. It was later rediscovered by Wei [3]_ and then + popularized by Kendall [4]_ in the context of sport ranking. Berge + introduced a general definition for graphs based on social connections + [5]_. Bonacich [6]_ reintroduced again eigenvector centrality and made + it popular in link analysis. + + This function computes the left dominant eigenvector, which corresponds + to adding the centrality of predecessors: this is the usual approach. + To add the centrality of successors first reverse the graph with + ``G.reverse()``. + + The implementation uses power iteration [7]_ to compute a dominant + eigenvector starting from the provided vector `nstart`. Convergence is + guaranteed as long as `nstart` has a nonzero projection on a dominant + eigenvector, which certainly happens using the default value. + + The method stops when the change in the computed vector between two + iterations is smaller than an error tolerance of ``G.number_of_nodes() + * tol`` or after ``max_iter`` iterations, but in the second case it + raises an exception. + + This implementation uses $(A + I)$ rather than the adjacency matrix + $A$ because the change preserves eigenvectors, but it shifts the + spectrum, thus guaranteeing convergence even for networks with + negative eigenvalues of maximum modulus. + + References + ---------- + .. [1] Abraham Berman and Robert J. Plemmons. + "Nonnegative Matrices in the Mathematical Sciences." + Classics in Applied Mathematics. SIAM, 1994. + + .. [2] Edmund Landau. + "Zur relativen Wertbemessung der Turnierresultate." + Deutsches Wochenschach, 11:366–369, 1895. + + .. [3] Teh-Hsing Wei. + "The Algebraic Foundations of Ranking Theory." + PhD thesis, University of Cambridge, 1952. + + .. [4] Maurice G. Kendall. + "Further contributions to the theory of paired comparisons." + Biometrics, 11(1):43–62, 1955. + https://www.jstor.org/stable/3001479 + + .. [5] Claude Berge + "Théorie des graphes et ses applications." + Dunod, Paris, France, 1958. + + .. [6] Phillip Bonacich. + "Technique for analyzing overlapping memberships." + Sociological Methodology, 4:176–185, 1972. + https://www.jstor.org/stable/270732 + + .. [7] Power iteration:: https://en.wikipedia.org/wiki/Power_iteration + + """ + if len(G) == 0: + raise nx.NetworkXPointlessConcept( + "cannot compute centrality for the null graph" + ) + # If no initial vector is provided, start with the all-ones vector. + if nstart is None: + nstart = dict.fromkeys(G, 1) + if all(v == 0 for v in nstart.values()): + raise nx.NetworkXError("initial vector cannot have all zero values") + # Normalize the initial vector so that each entry is in [0, 1]. This is + # guaranteed to never have a divide-by-zero error by the previous line. + nstart_sum = sum(nstart.values()) + x = {k: v / nstart_sum for k, v in nstart.items()} + nnodes = G.number_of_nodes() + # make up to max_iter iterations + for _ in range(max_iter): + xlast = x + x = xlast.copy() # Start with xlast times I to iterate with (A+I) + # do the multiplication y^T = x^T A (left eigenvector) + for n in x: + for nbr in G[n]: + w = G[n][nbr].get(weight, 1) if weight else 1 + x[nbr] += xlast[n] * w + # Normalize the vector. The normalization denominator `norm` + # should never be zero by the Perron--Frobenius + # theorem. However, in case it is due to numerical error, we + # assume the norm to be one instead. + norm = math.hypot(*x.values()) or 1 + x = {k: v / norm for k, v in x.items()} + # Check for convergence (in the L_1 norm). + if sum(abs(x[n] - xlast[n]) for n in x) < nnodes * tol: + return x + raise nx.PowerIterationFailedConvergence(max_iter) + + +@nx._dispatchable(edge_attrs="weight") +def eigenvector_centrality_numpy(G, weight=None, max_iter=50, tol=0): + r"""Compute the eigenvector centrality for the graph `G`. + + Eigenvector centrality computes the centrality for a node by adding + the centrality of its predecessors. The centrality for node $i$ is the + $i$-th element of a left eigenvector associated with the eigenvalue $\lambda$ + of maximum modulus that is positive. Such an eigenvector $x$ is + defined up to a multiplicative constant by the equation + + .. math:: + + \lambda x^T = x^T A, + + where $A$ is the adjacency matrix of the graph `G`. By definition of + row-column product, the equation above is equivalent to + + .. math:: + + \lambda x_i = \sum_{j\to i}x_j. + + That is, adding the eigenvector centralities of the predecessors of + $i$ one obtains the eigenvector centrality of $i$ multiplied by + $\lambda$. In the case of undirected graphs, $x$ also solves the familiar + right-eigenvector equation $Ax = \lambda x$. + + By virtue of the Perron--Frobenius theorem [1]_, if `G` is (strongly) + connected, there is a unique eigenvector $x$, and all its entries + are strictly positive. + + However, if `G` is not (strongly) connected, there might be several left + eigenvectors associated with $\lambda$, and some of their elements + might be zero. + Depending on the method used to choose eigenvectors, round-off error can affect + which of the infinitely many eigenvectors is reported. + This can lead to inconsistent results for the same graph, + which the underlying implementation is not robust to. + For this reason, only (strongly) connected graphs are accepted. + + Parameters + ---------- + G : graph + A connected NetworkX graph. + + weight : None or string, optional (default=None) + If ``None``, all edge weights are considered equal. Otherwise holds the + name of the edge attribute used as weight. In this measure the + weight is interpreted as the connection strength. + + max_iter : integer, optional (default=50) + Maximum number of Arnoldi update iterations allowed. + + tol : float, optional (default=0) + Relative accuracy for eigenvalues (stopping criterion). + The default value of 0 implies machine precision. + + Returns + ------- + nodes : dict of nodes + Dictionary of nodes with eigenvector centrality as the value. The + associated vector has unit Euclidean norm and the values are + nonnegative. + + Examples + -------- + >>> G = nx.path_graph(4) + >>> centrality = nx.eigenvector_centrality_numpy(G) + >>> print([f"{node} {centrality[node]:0.2f}" for node in centrality]) + ['0 0.37', '1 0.60', '2 0.60', '3 0.37'] + + Raises + ------ + NetworkXPointlessConcept + If the graph `G` is the null graph. + + ArpackNoConvergence + When the requested convergence is not obtained. The currently + converged eigenvalues and eigenvectors can be found as + eigenvalues and eigenvectors attributes of the exception object. + + AmbiguousSolution + If `G` is not connected. + + See Also + -------- + :func:`scipy.sparse.linalg.eigs` + eigenvector_centrality + :func:`~networkx.algorithms.link_analysis.pagerank_alg.pagerank` + :func:`~networkx.algorithms.link_analysis.hits_alg.hits` + + Notes + ----- + Eigenvector centrality was introduced by Landau [2]_ for chess + tournaments. It was later rediscovered by Wei [3]_ and then + popularized by Kendall [4]_ in the context of sport ranking. Berge + introduced a general definition for graphs based on social connections + [5]_. Bonacich [6]_ reintroduced again eigenvector centrality and made + it popular in link analysis. + + This function computes the left dominant eigenvector, which corresponds + to adding the centrality of predecessors: this is the usual approach. + To add the centrality of successors first reverse the graph with + ``G.reverse()``. + + This implementation uses the + :func:`SciPy sparse eigenvalue solver` (ARPACK) + to find the largest eigenvalue/eigenvector pair using Arnoldi iterations + [7]_. + + References + ---------- + .. [1] Abraham Berman and Robert J. Plemmons. + "Nonnegative Matrices in the Mathematical Sciences". + Classics in Applied Mathematics. SIAM, 1994. + + .. [2] Edmund Landau. + "Zur relativen Wertbemessung der Turnierresultate". + Deutsches Wochenschach, 11:366--369, 1895. + + .. [3] Teh-Hsing Wei. + "The Algebraic Foundations of Ranking Theory". + PhD thesis, University of Cambridge, 1952. + + .. [4] Maurice G. Kendall. + "Further contributions to the theory of paired comparisons". + Biometrics, 11(1):43--62, 1955. + https://www.jstor.org/stable/3001479 + + .. [5] Claude Berge. + "Théorie des graphes et ses applications". + Dunod, Paris, France, 1958. + + .. [6] Phillip Bonacich. + "Technique for analyzing overlapping memberships". + Sociological Methodology, 4:176--185, 1972. + https://www.jstor.org/stable/270732 + + .. [7] Arnoldi, W. E. (1951). + "The principle of minimized iterations in the solution of the matrix eigenvalue problem". + Quarterly of Applied Mathematics. 9 (1): 17--29. + https://doi.org/10.1090/qam/42792 + """ + import numpy as np + import scipy as sp + + if len(G) == 0: + raise nx.NetworkXPointlessConcept( + "cannot compute centrality for the null graph" + ) + connected = nx.is_strongly_connected(G) if G.is_directed() else nx.is_connected(G) + if not connected: # See gh-6888. + raise nx.AmbiguousSolution( + "`eigenvector_centrality_numpy` does not give consistent results for disconnected graphs" + ) + M = nx.to_scipy_sparse_array(G, nodelist=list(G), weight=weight, dtype=float) + _, eigenvector = sp.sparse.linalg.eigs( + M.T, k=1, which="LR", maxiter=max_iter, tol=tol + ) + largest = eigenvector.flatten().real + norm = np.sign(largest.sum()) * sp.linalg.norm(largest) + return dict(zip(G, (largest / norm).tolist())) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/flow_matrix.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/flow_matrix.py new file mode 100644 index 0000000..e72b5e9 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/flow_matrix.py @@ -0,0 +1,130 @@ +# Helpers for current-flow betweenness and current-flow closeness +# Lazy computations for inverse Laplacian and flow-matrix rows. +import networkx as nx + + +@nx._dispatchable(edge_attrs="weight") +def flow_matrix_row(G, weight=None, dtype=float, solver="lu"): + # Generate a row of the current-flow matrix + import numpy as np + + solvername = { + "full": FullInverseLaplacian, + "lu": SuperLUInverseLaplacian, + "cg": CGInverseLaplacian, + } + n = G.number_of_nodes() + L = nx.laplacian_matrix(G, nodelist=range(n), weight=weight).asformat("csc") + L = L.astype(dtype) + C = solvername[solver](L, dtype=dtype) # initialize solver + w = C.w # w is the Laplacian matrix width + # row-by-row flow matrix + for u, v in sorted(sorted((u, v)) for u, v in G.edges()): + B = np.zeros(w, dtype=dtype) + c = G[u][v].get(weight, 1.0) + B[u % w] = c + B[v % w] = -c + # get only the rows needed in the inverse laplacian + # and multiply to get the flow matrix row + row = B @ C.get_rows(u, v) + yield row, (u, v) + + +# Class to compute the inverse laplacian only for specified rows +# Allows computation of the current-flow matrix without storing entire +# inverse laplacian matrix +class InverseLaplacian: + def __init__(self, L, width=None, dtype=None): + global np + import numpy as np + + (n, n) = L.shape + self.dtype = dtype + self.n = n + if width is None: + self.w = self.width(L) + else: + self.w = width + self.C = np.zeros((self.w, n), dtype=dtype) + self.L1 = L[1:, 1:] + self.init_solver(L) + + def init_solver(self, L): + pass + + def solve(self, r): + raise nx.NetworkXError("Implement solver") + + def solve_inverse(self, r): + raise nx.NetworkXError("Implement solver") + + def get_rows(self, r1, r2): + for r in range(r1, r2 + 1): + self.C[r % self.w, 1:] = self.solve_inverse(r) + return self.C + + def get_row(self, r): + self.C[r % self.w, 1:] = self.solve_inverse(r) + return self.C[r % self.w] + + def width(self, L): + m = 0 + for i, row in enumerate(L): + w = 0 + y = np.nonzero(row)[-1] + if len(y) > 0: + v = y - i + w = v.max() - v.min() + 1 + m = max(w, m) + return m + + +class FullInverseLaplacian(InverseLaplacian): + def init_solver(self, L): + self.IL = np.zeros(L.shape, dtype=self.dtype) + self.IL[1:, 1:] = np.linalg.inv(self.L1.todense()) + + def solve(self, rhs): + s = np.zeros(rhs.shape, dtype=self.dtype) + s = self.IL @ rhs + return s + + def solve_inverse(self, r): + return self.IL[r, 1:] + + +class SuperLUInverseLaplacian(InverseLaplacian): + def init_solver(self, L): + import scipy as sp + + self.lusolve = sp.sparse.linalg.factorized(self.L1.tocsc()) + + def solve_inverse(self, r): + rhs = np.zeros(self.n, dtype=self.dtype) + rhs[r] = 1 + return self.lusolve(rhs[1:]) + + def solve(self, rhs): + s = np.zeros(rhs.shape, dtype=self.dtype) + s[1:] = self.lusolve(rhs[1:]) + return s + + +class CGInverseLaplacian(InverseLaplacian): + def init_solver(self, L): + global sp + import scipy as sp + + ilu = sp.sparse.linalg.spilu(self.L1.tocsc()) + n = self.n - 1 + self.M = sp.sparse.linalg.LinearOperator(shape=(n, n), matvec=ilu.solve) + + def solve(self, rhs): + s = np.zeros(rhs.shape, dtype=self.dtype) + s[1:] = sp.sparse.linalg.cg(self.L1, rhs[1:], M=self.M, atol=0)[0] + return s + + def solve_inverse(self, r): + rhs = np.zeros(self.n, self.dtype) + rhs[r] = 1 + return sp.sparse.linalg.cg(self.L1, rhs[1:], M=self.M, atol=0)[0] diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/group.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/group.py new file mode 100644 index 0000000..cff1d47 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/group.py @@ -0,0 +1,787 @@ +"""Group centrality measures.""" + +from copy import deepcopy + +import networkx as nx +from networkx.algorithms.centrality.betweenness import ( + _accumulate_endpoints, + _single_source_dijkstra_path_basic, + _single_source_shortest_path_basic, +) +from networkx.utils.decorators import not_implemented_for + +__all__ = [ + "group_betweenness_centrality", + "group_closeness_centrality", + "group_degree_centrality", + "group_in_degree_centrality", + "group_out_degree_centrality", + "prominent_group", +] + + +@nx._dispatchable(edge_attrs="weight") +def group_betweenness_centrality(G, C, normalized=True, weight=None, endpoints=False): + r"""Compute the group betweenness centrality for a group of nodes. + + Group betweenness centrality of a group of nodes $C$ is the sum of the + fraction of all-pairs shortest paths that pass through any vertex in $C$ + + .. math:: + + c_B(v) =\sum_{s,t \in V} \frac{\sigma(s, t|v)}{\sigma(s, t)} + + where $V$ is the set of nodes, $\sigma(s, t)$ is the number of + shortest $(s, t)$-paths, and $\sigma(s, t|C)$ is the number of + those paths passing through some node in group $C$. Note that + $(s, t)$ are not members of the group ($V-C$ is the set of nodes + in $V$ that are not in $C$). + + Parameters + ---------- + G : graph + A NetworkX graph. + + C : list or set or list of lists or list of sets + A group or a list of groups containing nodes which belong to G, for which group betweenness + centrality is to be calculated. + + normalized : bool, optional (default=True) + If True, group betweenness is normalized by `1/((|V|-|C|)(|V|-|C|-1))` + where `|V|` is the number of nodes in G and `|C|` is the number of nodes in C. + + weight : None or string, optional (default=None) + If None, all edge weights are considered equal. + Otherwise holds the name of the edge attribute used as weight. + The weight of an edge is treated as the length or distance between the two sides. + + endpoints : bool, optional (default=False) + If True include the endpoints in the shortest path counts. + + Raises + ------ + NodeNotFound + If node(s) in C are not present in G. + + Returns + ------- + betweenness : list of floats or float + If C is a single group then return a float. If C is a list with + several groups then return a list of group betweenness centralities. + + See Also + -------- + betweenness_centrality + + Notes + ----- + Group betweenness centrality is described in [1]_ and its importance discussed in [3]_. + The initial implementation of the algorithm is mentioned in [2]_. This function uses + an improved algorithm presented in [4]_. + + The number of nodes in the group must be a maximum of n - 2 where `n` + is the total number of nodes in the graph. + + For weighted graphs the edge weights must be greater than zero. + Zero edge weights can produce an infinite number of equal length + paths between pairs of nodes. + + The total number of paths between source and target is counted + differently for directed and undirected graphs. Directed paths + between "u" and "v" are counted as two possible paths (one each + direction) while undirected paths between "u" and "v" are counted + as one path. Said another way, the sum in the expression above is + over all ``s != t`` for directed graphs and for ``s < t`` for undirected graphs. + + + References + ---------- + .. [1] M G Everett and S P Borgatti: + The Centrality of Groups and Classes. + Journal of Mathematical Sociology. 23(3): 181-201. 1999. + http://www.analytictech.com/borgatti/group_centrality.htm + .. [2] Ulrik Brandes: + On Variants of Shortest-Path Betweenness + Centrality and their Generic Computation. + Social Networks 30(2):136-145, 2008. + http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.72.9610&rep=rep1&type=pdf + .. [3] Sourav Medya et. al.: + Group Centrality Maximization via Network Design. + SIAM International Conference on Data Mining, SDM 2018, 126–134. + https://sites.cs.ucsb.edu/~arlei/pubs/sdm18.pdf + .. [4] Rami Puzis, Yuval Elovici, and Shlomi Dolev. + "Fast algorithm for successive computation of group betweenness centrality." + https://journals.aps.org/pre/pdf/10.1103/PhysRevE.76.056709 + + """ + GBC = [] # initialize betweenness + list_of_groups = True + # check weather C contains one or many groups + if any(el in G for el in C): + C = [C] + list_of_groups = False + set_v = {node for group in C for node in group} + if set_v - G.nodes: # element(s) of C not in G + raise nx.NodeNotFound(f"The node(s) {set_v - G.nodes} are in C but not in G.") + + # pre-processing + PB, sigma, D = _group_preprocessing(G, set_v, weight) + + # the algorithm for each group + for group in C: + group = set(group) # set of nodes in group + # initialize the matrices of the sigma and the PB + GBC_group = 0 + sigma_m = deepcopy(sigma) + PB_m = deepcopy(PB) + sigma_m_v = deepcopy(sigma_m) + PB_m_v = deepcopy(PB_m) + for v in group: + GBC_group += PB_m[v][v] + for x in group: + for y in group: + dxvy = 0 + dxyv = 0 + dvxy = 0 + if not ( + sigma_m[x][y] == 0 or sigma_m[x][v] == 0 or sigma_m[v][y] == 0 + ): + if D[x][v] == D[x][y] + D[y][v]: + dxyv = sigma_m[x][y] * sigma_m[y][v] / sigma_m[x][v] + if D[x][y] == D[x][v] + D[v][y]: + dxvy = sigma_m[x][v] * sigma_m[v][y] / sigma_m[x][y] + if D[v][y] == D[v][x] + D[x][y]: + dvxy = sigma_m[v][x] * sigma[x][y] / sigma[v][y] + sigma_m_v[x][y] = sigma_m[x][y] * (1 - dxvy) + PB_m_v[x][y] = PB_m[x][y] - PB_m[x][y] * dxvy + if y != v: + PB_m_v[x][y] -= PB_m[x][v] * dxyv + if x != v: + PB_m_v[x][y] -= PB_m[v][y] * dvxy + sigma_m, sigma_m_v = sigma_m_v, sigma_m + PB_m, PB_m_v = PB_m_v, PB_m + + # endpoints + v, c = len(G), len(group) + if not endpoints: + scale = 0 + # if the graph is connected then subtract the endpoints from + # the count for all the nodes in the graph. else count how many + # nodes are connected to the group's nodes and subtract that. + if nx.is_directed(G): + if nx.is_strongly_connected(G): + scale = c * (2 * v - c - 1) + elif nx.is_connected(G): + scale = c * (2 * v - c - 1) + if scale == 0: + for group_node1 in group: + for node in D[group_node1]: + if node != group_node1: + if node in group: + scale += 1 + else: + scale += 2 + GBC_group -= scale + + # normalized + if normalized: + scale = 1 / ((v - c) * (v - c - 1)) + GBC_group *= scale + + # If undirected than count only the undirected edges + elif not G.is_directed(): + GBC_group /= 2 + + GBC.append(GBC_group) + if list_of_groups: + return GBC + return GBC[0] + + +def _group_preprocessing(G, set_v, weight): + sigma = {} + delta = {} + D = {} + betweenness = dict.fromkeys(G, 0) + for s in G: + if weight is None: # use BFS + S, P, sigma[s], D[s] = _single_source_shortest_path_basic(G, s) + else: # use Dijkstra's algorithm + S, P, sigma[s], D[s] = _single_source_dijkstra_path_basic(G, s, weight) + betweenness, delta[s] = _accumulate_endpoints(betweenness, S, P, sigma[s], s) + for i in delta[s]: # add the paths from s to i and rescale sigma + if s != i: + delta[s][i] += 1 + if weight is not None: + sigma[s][i] = sigma[s][i] / 2 + # building the path betweenness matrix only for nodes that appear in the group + PB = dict.fromkeys(G) + for group_node1 in set_v: + PB[group_node1] = dict.fromkeys(G, 0.0) + for group_node2 in set_v: + if group_node2 not in D[group_node1]: + continue + for node in G: + # if node is connected to the two group nodes than continue + if group_node2 in D[node] and group_node1 in D[node]: + if ( + D[node][group_node2] + == D[node][group_node1] + D[group_node1][group_node2] + ): + PB[group_node1][group_node2] += ( + delta[node][group_node2] + * sigma[node][group_node1] + * sigma[group_node1][group_node2] + / sigma[node][group_node2] + ) + return PB, sigma, D + + +@nx._dispatchable(edge_attrs="weight") +def prominent_group( + G, k, weight=None, C=None, endpoints=False, normalized=True, greedy=False +): + r"""Find the prominent group of size $k$ in graph $G$. The prominence of the + group is evaluated by the group betweenness centrality. + + Group betweenness centrality of a group of nodes $C$ is the sum of the + fraction of all-pairs shortest paths that pass through any vertex in $C$ + + .. math:: + + c_B(v) =\sum_{s,t \in V} \frac{\sigma(s, t|v)}{\sigma(s, t)} + + where $V$ is the set of nodes, $\sigma(s, t)$ is the number of + shortest $(s, t)$-paths, and $\sigma(s, t|C)$ is the number of + those paths passing through some node in group $C$. Note that + $(s, t)$ are not members of the group ($V-C$ is the set of nodes + in $V$ that are not in $C$). + + Parameters + ---------- + G : graph + A NetworkX graph. + + k : int + The number of nodes in the group. + + normalized : bool, optional (default=True) + If True, group betweenness is normalized by ``1/((|V|-|C|)(|V|-|C|-1))`` + where ``|V|`` is the number of nodes in G and ``|C|`` is the number of + nodes in C. + + weight : None or string, optional (default=None) + If None, all edge weights are considered equal. + Otherwise holds the name of the edge attribute used as weight. + The weight of an edge is treated as the length or distance between the two sides. + + endpoints : bool, optional (default=False) + If True include the endpoints in the shortest path counts. + + C : list or set, optional (default=None) + list of nodes which won't be candidates of the prominent group. + + greedy : bool, optional (default=False) + Using a naive greedy algorithm in order to find non-optimal prominent + group. For scale free networks the results are negligibly below the optimal + results. + + Raises + ------ + NodeNotFound + If node(s) in C are not present in G. + + Returns + ------- + max_GBC : float + The group betweenness centrality of the prominent group. + + max_group : list + The list of nodes in the prominent group. + + See Also + -------- + betweenness_centrality, group_betweenness_centrality + + Notes + ----- + Group betweenness centrality is described in [1]_ and its importance discussed in [3]_. + The algorithm is described in [2]_ and is based on techniques mentioned in [4]_. + + The number of nodes in the group must be a maximum of ``n - 2`` where ``n`` + is the total number of nodes in the graph. + + For weighted graphs the edge weights must be greater than zero. + Zero edge weights can produce an infinite number of equal length + paths between pairs of nodes. + + The total number of paths between source and target is counted + differently for directed and undirected graphs. Directed paths + between "u" and "v" are counted as two possible paths (one each + direction) while undirected paths between "u" and "v" are counted + as one path. Said another way, the sum in the expression above is + over all ``s != t`` for directed graphs and for ``s < t`` for undirected graphs. + + References + ---------- + .. [1] M G Everett and S P Borgatti: + The Centrality of Groups and Classes. + Journal of Mathematical Sociology. 23(3): 181-201. 1999. + http://www.analytictech.com/borgatti/group_centrality.htm + .. [2] Rami Puzis, Yuval Elovici, and Shlomi Dolev: + "Finding the Most Prominent Group in Complex Networks" + AI communications 20(4): 287-296, 2007. + https://www.researchgate.net/profile/Rami_Puzis2/publication/220308855 + .. [3] Sourav Medya et. al.: + Group Centrality Maximization via Network Design. + SIAM International Conference on Data Mining, SDM 2018, 126–134. + https://sites.cs.ucsb.edu/~arlei/pubs/sdm18.pdf + .. [4] Rami Puzis, Yuval Elovici, and Shlomi Dolev. + "Fast algorithm for successive computation of group betweenness centrality." + https://journals.aps.org/pre/pdf/10.1103/PhysRevE.76.056709 + """ + import numpy as np + import pandas as pd + + if C is not None: + C = set(C) + if C - G.nodes: # element(s) of C not in G + raise nx.NodeNotFound(f"The node(s) {C - G.nodes} are in C but not in G.") + nodes = list(G.nodes - C) + else: + nodes = list(G.nodes) + DF_tree = nx.Graph() + DF_tree.__networkx_cache__ = None # Disable caching + PB, sigma, D = _group_preprocessing(G, nodes, weight) + betweenness = pd.DataFrame.from_dict(PB) + if C is not None: + for node in C: + # remove from the betweenness all the nodes not part of the group + betweenness = betweenness.drop(index=node) + betweenness = betweenness.drop(columns=node) + CL = [node for _, node in sorted(zip(np.diag(betweenness), nodes), reverse=True)] + max_GBC = 0 + max_group = [] + DF_tree.add_node( + 1, + CL=CL, + betweenness=betweenness, + GBC=0, + GM=[], + sigma=sigma, + cont=dict(zip(nodes, np.diag(betweenness))), + ) + + # the algorithm + DF_tree.nodes[1]["heu"] = 0 + for i in range(k): + DF_tree.nodes[1]["heu"] += DF_tree.nodes[1]["cont"][DF_tree.nodes[1]["CL"][i]] + max_GBC, DF_tree, max_group = _dfbnb( + G, k, DF_tree, max_GBC, 1, D, max_group, nodes, greedy + ) + + v = len(G) + if not endpoints: + scale = 0 + # if the graph is connected then subtract the endpoints from + # the count for all the nodes in the graph. else count how many + # nodes are connected to the group's nodes and subtract that. + if nx.is_directed(G): + if nx.is_strongly_connected(G): + scale = k * (2 * v - k - 1) + elif nx.is_connected(G): + scale = k * (2 * v - k - 1) + if scale == 0: + for group_node1 in max_group: + for node in D[group_node1]: + if node != group_node1: + if node in max_group: + scale += 1 + else: + scale += 2 + max_GBC -= scale + + # normalized + if normalized: + scale = 1 / ((v - k) * (v - k - 1)) + max_GBC *= scale + + # If undirected then count only the undirected edges + elif not G.is_directed(): + max_GBC /= 2 + max_GBC = float(f"{max_GBC:.2f}") + return max_GBC, max_group + + +def _dfbnb(G, k, DF_tree, max_GBC, root, D, max_group, nodes, greedy): + # stopping condition - if we found a group of size k and with higher GBC then prune + if len(DF_tree.nodes[root]["GM"]) == k and DF_tree.nodes[root]["GBC"] > max_GBC: + return DF_tree.nodes[root]["GBC"], DF_tree, DF_tree.nodes[root]["GM"] + # stopping condition - if the size of group members equal to k or there are less than + # k - |GM| in the candidate list or the heuristic function plus the GBC is below the + # maximal GBC found then prune + if ( + len(DF_tree.nodes[root]["GM"]) == k + or len(DF_tree.nodes[root]["CL"]) <= k - len(DF_tree.nodes[root]["GM"]) + or DF_tree.nodes[root]["GBC"] + DF_tree.nodes[root]["heu"] <= max_GBC + ): + return max_GBC, DF_tree, max_group + + # finding the heuristic of both children + node_p, node_m, DF_tree = _heuristic(k, root, DF_tree, D, nodes, greedy) + + # finding the child with the bigger heuristic + GBC and expand + # that node first if greedy then only expand the plus node + if greedy: + max_GBC, DF_tree, max_group = _dfbnb( + G, k, DF_tree, max_GBC, node_p, D, max_group, nodes, greedy + ) + + elif ( + DF_tree.nodes[node_p]["GBC"] + DF_tree.nodes[node_p]["heu"] + > DF_tree.nodes[node_m]["GBC"] + DF_tree.nodes[node_m]["heu"] + ): + max_GBC, DF_tree, max_group = _dfbnb( + G, k, DF_tree, max_GBC, node_p, D, max_group, nodes, greedy + ) + max_GBC, DF_tree, max_group = _dfbnb( + G, k, DF_tree, max_GBC, node_m, D, max_group, nodes, greedy + ) + else: + max_GBC, DF_tree, max_group = _dfbnb( + G, k, DF_tree, max_GBC, node_m, D, max_group, nodes, greedy + ) + max_GBC, DF_tree, max_group = _dfbnb( + G, k, DF_tree, max_GBC, node_p, D, max_group, nodes, greedy + ) + return max_GBC, DF_tree, max_group + + +def _heuristic(k, root, DF_tree, D, nodes, greedy): + import numpy as np + + # This helper function add two nodes to DF_tree - one left son and the + # other right son, finds their heuristic, CL, GBC, and GM + node_p = DF_tree.number_of_nodes() + 1 + node_m = DF_tree.number_of_nodes() + 2 + added_node = DF_tree.nodes[root]["CL"][0] + + # adding the plus node + DF_tree.add_nodes_from([(node_p, deepcopy(DF_tree.nodes[root]))]) + DF_tree.nodes[node_p]["GM"].append(added_node) + DF_tree.nodes[node_p]["GBC"] += DF_tree.nodes[node_p]["cont"][added_node] + root_node = DF_tree.nodes[root] + for x in nodes: + for y in nodes: + dxvy = 0 + dxyv = 0 + dvxy = 0 + if not ( + root_node["sigma"][x][y] == 0 + or root_node["sigma"][x][added_node] == 0 + or root_node["sigma"][added_node][y] == 0 + ): + if D[x][added_node] == D[x][y] + D[y][added_node]: + dxyv = ( + root_node["sigma"][x][y] + * root_node["sigma"][y][added_node] + / root_node["sigma"][x][added_node] + ) + if D[x][y] == D[x][added_node] + D[added_node][y]: + dxvy = ( + root_node["sigma"][x][added_node] + * root_node["sigma"][added_node][y] + / root_node["sigma"][x][y] + ) + if D[added_node][y] == D[added_node][x] + D[x][y]: + dvxy = ( + root_node["sigma"][added_node][x] + * root_node["sigma"][x][y] + / root_node["sigma"][added_node][y] + ) + DF_tree.nodes[node_p]["sigma"][x][y] = root_node["sigma"][x][y] * (1 - dxvy) + DF_tree.nodes[node_p]["betweenness"].loc[y, x] = ( + root_node["betweenness"][x][y] - root_node["betweenness"][x][y] * dxvy + ) + if y != added_node: + DF_tree.nodes[node_p]["betweenness"].loc[y, x] -= ( + root_node["betweenness"][x][added_node] * dxyv + ) + if x != added_node: + DF_tree.nodes[node_p]["betweenness"].loc[y, x] -= ( + root_node["betweenness"][added_node][y] * dvxy + ) + + DF_tree.nodes[node_p]["CL"] = [ + node + for _, node in sorted( + zip(np.diag(DF_tree.nodes[node_p]["betweenness"]), nodes), reverse=True + ) + if node not in DF_tree.nodes[node_p]["GM"] + ] + DF_tree.nodes[node_p]["cont"] = dict( + zip(nodes, np.diag(DF_tree.nodes[node_p]["betweenness"])) + ) + DF_tree.nodes[node_p]["heu"] = 0 + for i in range(k - len(DF_tree.nodes[node_p]["GM"])): + DF_tree.nodes[node_p]["heu"] += DF_tree.nodes[node_p]["cont"][ + DF_tree.nodes[node_p]["CL"][i] + ] + + # adding the minus node - don't insert the first node in the CL to GM + # Insert minus node only if isn't greedy type algorithm + if not greedy: + DF_tree.add_nodes_from([(node_m, deepcopy(DF_tree.nodes[root]))]) + DF_tree.nodes[node_m]["CL"].pop(0) + DF_tree.nodes[node_m]["cont"].pop(added_node) + DF_tree.nodes[node_m]["heu"] = 0 + for i in range(k - len(DF_tree.nodes[node_m]["GM"])): + DF_tree.nodes[node_m]["heu"] += DF_tree.nodes[node_m]["cont"][ + DF_tree.nodes[node_m]["CL"][i] + ] + else: + node_m = None + + return node_p, node_m, DF_tree + + +@nx._dispatchable(edge_attrs="weight") +def group_closeness_centrality(G, S, weight=None): + r"""Compute the group closeness centrality for a group of nodes. + + Group closeness centrality of a group of nodes $S$ is a measure + of how close the group is to the other nodes in the graph. + + .. math:: + + c_{close}(S) = \frac{|V-S|}{\sum_{v \in V-S} d_{S, v}} + + d_{S, v} = min_{u \in S} (d_{u, v}) + + where $V$ is the set of nodes, $d_{S, v}$ is the distance of + the group $S$ from $v$ defined as above. ($V-S$ is the set of nodes + in $V$ that are not in $S$). + + Parameters + ---------- + G : graph + A NetworkX graph. + + S : list or set + S is a group of nodes which belong to G, for which group closeness + centrality is to be calculated. + + weight : None or string, optional (default=None) + If None, all edge weights are considered equal. + Otherwise holds the name of the edge attribute used as weight. + The weight of an edge is treated as the length or distance between the two sides. + + Raises + ------ + NodeNotFound + If node(s) in S are not present in G. + + Returns + ------- + closeness : float + Group closeness centrality of the group S. + + See Also + -------- + closeness_centrality + + Notes + ----- + The measure was introduced in [1]_. + The formula implemented here is described in [2]_. + + Higher values of closeness indicate greater centrality. + + It is assumed that 1 / 0 is 0 (required in the case of directed graphs, + or when a shortest path length is 0). + + The number of nodes in the group must be a maximum of n - 1 where `n` + is the total number of nodes in the graph. + + For directed graphs, the incoming distance is utilized here. To use the + outward distance, act on `G.reverse()`. + + For weighted graphs the edge weights must be greater than zero. + Zero edge weights can produce an infinite number of equal length + paths between pairs of nodes. + + References + ---------- + .. [1] M G Everett and S P Borgatti: + The Centrality of Groups and Classes. + Journal of Mathematical Sociology. 23(3): 181-201. 1999. + http://www.analytictech.com/borgatti/group_centrality.htm + .. [2] J. Zhao et. al.: + Measuring and Maximizing Group Closeness Centrality over + Disk Resident Graphs. + WWWConference Proceedings, 2014. 689-694. + https://doi.org/10.1145/2567948.2579356 + """ + if G.is_directed(): + G = G.reverse() # reverse view + closeness = 0 # initialize to 0 + V = set(G) # set of nodes in G + S = set(S) # set of nodes in group S + V_S = V - S # set of nodes in V but not S + shortest_path_lengths = nx.multi_source_dijkstra_path_length(G, S, weight=weight) + # accumulation + for v in V_S: + try: + closeness += shortest_path_lengths[v] + except KeyError: # no path exists + closeness += 0 + try: + closeness = len(V_S) / closeness + except ZeroDivisionError: # 1 / 0 assumed as 0 + closeness = 0 + return closeness + + +@nx._dispatchable +def group_degree_centrality(G, S): + """Compute the group degree centrality for a group of nodes. + + Group degree centrality of a group of nodes $S$ is the fraction + of non-group members connected to group members. + + Parameters + ---------- + G : graph + A NetworkX graph. + + S : list or set + S is a group of nodes which belong to G, for which group degree + centrality is to be calculated. + + Raises + ------ + NetworkXError + If node(s) in S are not in G. + + Returns + ------- + centrality : float + Group degree centrality of the group S. + + See Also + -------- + degree_centrality + group_in_degree_centrality + group_out_degree_centrality + + Notes + ----- + The measure was introduced in [1]_. + + The number of nodes in the group must be a maximum of n - 1 where `n` + is the total number of nodes in the graph. + + References + ---------- + .. [1] M G Everett and S P Borgatti: + The Centrality of Groups and Classes. + Journal of Mathematical Sociology. 23(3): 181-201. 1999. + http://www.analytictech.com/borgatti/group_centrality.htm + """ + centrality = len(set().union(*[set(G.neighbors(i)) for i in S]) - set(S)) + centrality /= len(G.nodes()) - len(S) + return centrality + + +@not_implemented_for("undirected") +@nx._dispatchable +def group_in_degree_centrality(G, S): + """Compute the group in-degree centrality for a group of nodes. + + Group in-degree centrality of a group of nodes $S$ is the fraction + of non-group members connected to group members by incoming edges. + + Parameters + ---------- + G : graph + A NetworkX graph. + + S : list or set + S is a group of nodes which belong to G, for which group in-degree + centrality is to be calculated. + + Returns + ------- + centrality : float + Group in-degree centrality of the group S. + + Raises + ------ + NetworkXNotImplemented + If G is undirected. + + NodeNotFound + If node(s) in S are not in G. + + See Also + -------- + degree_centrality + group_degree_centrality + group_out_degree_centrality + + Notes + ----- + The number of nodes in the group must be a maximum of n - 1 where `n` + is the total number of nodes in the graph. + + `G.neighbors(i)` gives nodes with an outward edge from i, in a DiGraph, + so for group in-degree centrality, the reverse graph is used. + """ + return group_degree_centrality(G.reverse(), S) + + +@not_implemented_for("undirected") +@nx._dispatchable +def group_out_degree_centrality(G, S): + """Compute the group out-degree centrality for a group of nodes. + + Group out-degree centrality of a group of nodes $S$ is the fraction + of non-group members connected to group members by outgoing edges. + + Parameters + ---------- + G : graph + A NetworkX graph. + + S : list or set + S is a group of nodes which belong to G, for which group in-degree + centrality is to be calculated. + + Returns + ------- + centrality : float + Group out-degree centrality of the group S. + + Raises + ------ + NetworkXNotImplemented + If G is undirected. + + NodeNotFound + If node(s) in S are not in G. + + See Also + -------- + degree_centrality + group_degree_centrality + group_in_degree_centrality + + Notes + ----- + The number of nodes in the group must be a maximum of n - 1 where `n` + is the total number of nodes in the graph. + + `G.neighbors(i)` gives nodes with an outward edge from i, in a DiGraph, + so for group out-degree centrality, the graph itself is used. + """ + return group_degree_centrality(G, S) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/harmonic.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/harmonic.py new file mode 100644 index 0000000..26702a6 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/harmonic.py @@ -0,0 +1,89 @@ +"""Functions for computing the harmonic centrality of a graph.""" + +from functools import partial + +import networkx as nx + +__all__ = ["harmonic_centrality"] + + +@nx._dispatchable(edge_attrs="distance") +def harmonic_centrality(G, nbunch=None, distance=None, sources=None): + r"""Compute harmonic centrality for nodes. + + Harmonic centrality [1]_ of a node `u` is the sum of the reciprocal + of the shortest path distances from all other nodes to `u` + + .. math:: + + C(u) = \sum_{v \neq u} \frac{1}{d(v, u)} + + where `d(v, u)` is the shortest-path distance between `v` and `u`. + + If `sources` is given as an argument, the returned harmonic centrality + values are calculated as the sum of the reciprocals of the shortest + path distances from the nodes specified in `sources` to `u` instead + of from all nodes to `u`. + + Notice that higher values indicate higher centrality. + + Parameters + ---------- + G : graph + A NetworkX graph + + nbunch : container (default: all nodes in G) + Container of nodes for which harmonic centrality values are calculated. + + sources : container (default: all nodes in G) + Container of nodes `v` over which reciprocal distances are computed. + Nodes not in `G` are silently ignored. + + distance : edge attribute key, optional (default=None) + Use the specified edge attribute as the edge distance in shortest + path calculations. If `None`, then each edge will have distance equal to 1. + + Returns + ------- + nodes : dictionary + Dictionary of nodes with harmonic centrality as the value. + + See Also + -------- + betweenness_centrality, load_centrality, eigenvector_centrality, + degree_centrality, closeness_centrality + + Notes + ----- + If the 'distance' keyword is set to an edge attribute key then the + shortest-path length will be computed using Dijkstra's algorithm with + that edge attribute as the edge weight. + + References + ---------- + .. [1] Boldi, Paolo, and Sebastiano Vigna. "Axioms for centrality." + Internet Mathematics 10.3-4 (2014): 222-262. + """ + + nbunch = set(G.nbunch_iter(nbunch) if nbunch is not None else G.nodes) + sources = set(G.nbunch_iter(sources) if sources is not None else G.nodes) + + centrality = dict.fromkeys(nbunch, 0) + + transposed = False + if len(nbunch) < len(sources): + transposed = True + nbunch, sources = sources, nbunch + if nx.is_directed(G): + G = nx.reverse(G, copy=False) + + spl = partial(nx.shortest_path_length, G, weight=distance) + for v in sources: + dist = spl(v) + for u in nbunch.intersection(dist): + d = dist[u] + if d == 0: # handle u == v and edges with 0 weight + continue + centrality[v if transposed else u] += 1 / d + + return centrality diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/katz.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/katz.py new file mode 100644 index 0000000..c4ec9e0 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/katz.py @@ -0,0 +1,331 @@ +"""Katz centrality.""" + +import math + +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = ["katz_centrality", "katz_centrality_numpy"] + + +@not_implemented_for("multigraph") +@nx._dispatchable(edge_attrs="weight") +def katz_centrality( + G, + alpha=0.1, + beta=1.0, + max_iter=1000, + tol=1.0e-6, + nstart=None, + normalized=True, + weight=None, +): + r"""Compute the Katz centrality for the nodes of the graph G. + + Katz centrality computes the centrality for a node based on the centrality + of its neighbors. It is a generalization of the eigenvector centrality. The + Katz centrality for node $i$ is + + .. math:: + + x_i = \alpha \sum_{j} A_{ij} x_j + \beta, + + where $A$ is the adjacency matrix of graph G with eigenvalues $\lambda$. + + The parameter $\beta$ controls the initial centrality and + + .. math:: + + \alpha < \frac{1}{\lambda_{\max}}. + + Katz centrality computes the relative influence of a node within a + network by measuring the number of the immediate neighbors (first + degree nodes) and also all other nodes in the network that connect + to the node under consideration through these immediate neighbors. + + Extra weight can be provided to immediate neighbors through the + parameter $\beta$. Connections made with distant neighbors + are, however, penalized by an attenuation factor $\alpha$ which + should be strictly less than the inverse largest eigenvalue of the + adjacency matrix in order for the Katz centrality to be computed + correctly. More information is provided in [1]_. + + Parameters + ---------- + G : graph + A NetworkX graph. + + alpha : float, optional (default=0.1) + Attenuation factor + + beta : scalar or dictionary, optional (default=1.0) + Weight attributed to the immediate neighborhood. If not a scalar, the + dictionary must have a value for every node. + + max_iter : integer, optional (default=1000) + Maximum number of iterations in power method. + + tol : float, optional (default=1.0e-6) + Error tolerance used to check convergence in power method iteration. + + nstart : dictionary, optional + Starting value of Katz iteration for each node. + + normalized : bool, optional (default=True) + If True normalize the resulting values. + + weight : None or string, optional (default=None) + If None, all edge weights are considered equal. + Otherwise holds the name of the edge attribute used as weight. + In this measure the weight is interpreted as the connection strength. + + Returns + ------- + nodes : dictionary + Dictionary of nodes with Katz centrality as the value. + + Raises + ------ + NetworkXError + If the parameter `beta` is not a scalar but lacks a value for at least + one node + + PowerIterationFailedConvergence + If the algorithm fails to converge to the specified tolerance + within the specified number of iterations of the power iteration + method. + + Examples + -------- + >>> import math + >>> G = nx.path_graph(4) + >>> phi = (1 + math.sqrt(5)) / 2.0 # largest eigenvalue of adj matrix + >>> centrality = nx.katz_centrality(G, 1 / phi - 0.01) + >>> for n, c in sorted(centrality.items()): + ... print(f"{n} {c:.2f}") + 0 0.37 + 1 0.60 + 2 0.60 + 3 0.37 + + See Also + -------- + katz_centrality_numpy + eigenvector_centrality + eigenvector_centrality_numpy + :func:`~networkx.algorithms.link_analysis.pagerank_alg.pagerank` + :func:`~networkx.algorithms.link_analysis.hits_alg.hits` + + Notes + ----- + Katz centrality was introduced by [2]_. + + This algorithm it uses the power method to find the eigenvector + corresponding to the largest eigenvalue of the adjacency matrix of ``G``. + The parameter ``alpha`` should be strictly less than the inverse of largest + eigenvalue of the adjacency matrix for the algorithm to converge. + You can use ``max(nx.adjacency_spectrum(G))`` to get $\lambda_{\max}$ the largest + eigenvalue of the adjacency matrix. + The iteration will stop after ``max_iter`` iterations or an error tolerance of + ``number_of_nodes(G) * tol`` has been reached. + + For strongly connected graphs, as $\alpha \to 1/\lambda_{\max}$, and $\beta > 0$, + Katz centrality approaches the results for eigenvector centrality. + + For directed graphs this finds "left" eigenvectors which corresponds + to the in-edges in the graph. For out-edges Katz centrality, + first reverse the graph with ``G.reverse()``. + + References + ---------- + .. [1] Mark E. J. Newman: + Networks: An Introduction. + Oxford University Press, USA, 2010, p. 720. + .. [2] Leo Katz: + A New Status Index Derived from Sociometric Index. + Psychometrika 18(1):39–43, 1953 + https://link.springer.com/content/pdf/10.1007/BF02289026.pdf + """ + if len(G) == 0: + return {} + + nnodes = G.number_of_nodes() + + if nstart is None: + # choose starting vector with entries of 0 + x = dict.fromkeys(G, 0) + else: + x = nstart + + try: + b = dict.fromkeys(G, float(beta)) + except (TypeError, ValueError, AttributeError) as err: + b = beta + if set(beta) != set(G): + raise nx.NetworkXError( + "beta dictionary must have a value for every node" + ) from err + + # make up to max_iter iterations + for _ in range(max_iter): + xlast = x + x = dict.fromkeys(xlast, 0) + # do the multiplication y^T = Alpha * x^T A + Beta + for n in x: + for nbr in G[n]: + x[nbr] += xlast[n] * G[n][nbr].get(weight, 1) + for n in x: + x[n] = alpha * x[n] + b[n] + + # check convergence + error = sum(abs(x[n] - xlast[n]) for n in x) + if error < nnodes * tol: + if normalized: + # normalize vector + try: + s = 1.0 / math.hypot(*x.values()) + except ZeroDivisionError: + s = 1.0 + else: + s = 1 + for n in x: + x[n] *= s + return x + raise nx.PowerIterationFailedConvergence(max_iter) + + +@not_implemented_for("multigraph") +@nx._dispatchable(edge_attrs="weight") +def katz_centrality_numpy(G, alpha=0.1, beta=1.0, normalized=True, weight=None): + r"""Compute the Katz centrality for the graph G. + + Katz centrality computes the centrality for a node based on the centrality + of its neighbors. It is a generalization of the eigenvector centrality. The + Katz centrality for node $i$ is + + .. math:: + + x_i = \alpha \sum_{j} A_{ij} x_j + \beta, + + where $A$ is the adjacency matrix of graph G with eigenvalues $\lambda$. + + The parameter $\beta$ controls the initial centrality and + + .. math:: + + \alpha < \frac{1}{\lambda_{\max}}. + + Katz centrality computes the relative influence of a node within a + network by measuring the number of the immediate neighbors (first + degree nodes) and also all other nodes in the network that connect + to the node under consideration through these immediate neighbors. + + Extra weight can be provided to immediate neighbors through the + parameter $\beta$. Connections made with distant neighbors + are, however, penalized by an attenuation factor $\alpha$ which + should be strictly less than the inverse largest eigenvalue of the + adjacency matrix in order for the Katz centrality to be computed + correctly. More information is provided in [1]_. + + Parameters + ---------- + G : graph + A NetworkX graph + + alpha : float + Attenuation factor + + beta : scalar or dictionary, optional (default=1.0) + Weight attributed to the immediate neighborhood. If not a scalar the + dictionary must have an value for every node. + + normalized : bool + If True normalize the resulting values. + + weight : None or string, optional + If None, all edge weights are considered equal. + Otherwise holds the name of the edge attribute used as weight. + In this measure the weight is interpreted as the connection strength. + + Returns + ------- + nodes : dictionary + Dictionary of nodes with Katz centrality as the value. + + Raises + ------ + NetworkXError + If the parameter `beta` is not a scalar but lacks a value for at least + one node + + Examples + -------- + >>> import math + >>> G = nx.path_graph(4) + >>> phi = (1 + math.sqrt(5)) / 2.0 # largest eigenvalue of adj matrix + >>> centrality = nx.katz_centrality_numpy(G, 1 / phi) + >>> for n, c in sorted(centrality.items()): + ... print(f"{n} {c:.2f}") + 0 0.37 + 1 0.60 + 2 0.60 + 3 0.37 + + See Also + -------- + katz_centrality + eigenvector_centrality_numpy + eigenvector_centrality + :func:`~networkx.algorithms.link_analysis.pagerank_alg.pagerank` + :func:`~networkx.algorithms.link_analysis.hits_alg.hits` + + Notes + ----- + Katz centrality was introduced by [2]_. + + This algorithm uses a direct linear solver to solve the above equation. + The parameter ``alpha`` should be strictly less than the inverse of largest + eigenvalue of the adjacency matrix for there to be a solution. + You can use ``max(nx.adjacency_spectrum(G))`` to get $\lambda_{\max}$ the largest + eigenvalue of the adjacency matrix. + + For strongly connected graphs, as $\alpha \to 1/\lambda_{\max}$, and $\beta > 0$, + Katz centrality approaches the results for eigenvector centrality. + + For directed graphs this finds "left" eigenvectors which corresponds + to the in-edges in the graph. For out-edges Katz centrality, + first reverse the graph with ``G.reverse()``. + + References + ---------- + .. [1] Mark E. J. Newman: + Networks: An Introduction. + Oxford University Press, USA, 2010, p. 173. + .. [2] Leo Katz: + A New Status Index Derived from Sociometric Index. + Psychometrika 18(1):39–43, 1953 + https://link.springer.com/content/pdf/10.1007/BF02289026.pdf + """ + import numpy as np + + if len(G) == 0: + return {} + try: + nodelist = beta.keys() + if set(nodelist) != set(G): + raise nx.NetworkXError("beta dictionary must have a value for every node") + b = np.array(list(beta.values()), dtype=float) + except AttributeError: + nodelist = list(G) + try: + b = np.ones((len(nodelist), 1)) * beta + except (TypeError, ValueError, AttributeError) as err: + raise nx.NetworkXError("beta must be a number") from err + + A = nx.adjacency_matrix(G, nodelist=nodelist, weight=weight).todense().T + n = A.shape[0] + centrality = np.linalg.solve(np.eye(n, n) - (alpha * A), b).squeeze() + + # Normalize: rely on truediv to cast to float, then tolist to make Python numbers + norm = np.sign(sum(centrality)) * np.linalg.norm(centrality) if normalized else 1 + return dict(zip(nodelist, (centrality / norm).tolist())) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/laplacian.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/laplacian.py new file mode 100644 index 0000000..2fa95d2 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/laplacian.py @@ -0,0 +1,150 @@ +""" +Laplacian centrality measures. +""" + +import networkx as nx + +__all__ = ["laplacian_centrality"] + + +@nx._dispatchable(edge_attrs="weight") +def laplacian_centrality( + G, normalized=True, nodelist=None, weight="weight", walk_type=None, alpha=0.95 +): + r"""Compute the Laplacian centrality for nodes in the graph `G`. + + The Laplacian Centrality of a node ``i`` is measured by the drop in the + Laplacian Energy after deleting node ``i`` from the graph. The Laplacian Energy + is the sum of the squared eigenvalues of a graph's Laplacian matrix. + + .. math:: + + C_L(u_i,G) = \frac{(\Delta E)_i}{E_L (G)} = \frac{E_L (G)-E_L (G_i)}{E_L (G)} + + E_L (G) = \sum_{i=0}^n \lambda_i^2 + + Where $E_L (G)$ is the Laplacian energy of graph `G`, + E_L (G_i) is the Laplacian energy of graph `G` after deleting node ``i`` + and $\lambda_i$ are the eigenvalues of `G`'s Laplacian matrix. + This formula shows the normalized value. Without normalization, + the numerator on the right side is returned. + + Parameters + ---------- + G : graph + A networkx graph + + normalized : bool (default = True) + If True the centrality score is scaled so the sum over all nodes is 1. + If False the centrality score for each node is the drop in Laplacian + energy when that node is removed. + + nodelist : list, optional (default = None) + The rows and columns are ordered according to the nodes in nodelist. + If nodelist is None, then the ordering is produced by G.nodes(). + + weight: string or None, optional (default=`weight`) + Optional parameter `weight` to compute the Laplacian matrix. + The edge data key used to compute each value in the matrix. + If None, then each edge has weight 1. + + walk_type : string or None, optional (default=None) + Optional parameter `walk_type` used when calling + :func:`directed_laplacian_matrix `. + One of ``"random"``, ``"lazy"``, or ``"pagerank"``. If ``walk_type=None`` + (the default), then a value is selected according to the properties of `G`: + - ``walk_type="random"`` if `G` is strongly connected and aperiodic + - ``walk_type="lazy"`` if `G` is strongly connected but not aperiodic + - ``walk_type="pagerank"`` for all other cases. + + alpha : real (default = 0.95) + Optional parameter `alpha` used when calling + :func:`directed_laplacian_matrix `. + (1 - alpha) is the teleportation probability used with pagerank. + + Returns + ------- + nodes : dictionary + Dictionary of nodes with Laplacian centrality as the value. + + Examples + -------- + >>> G = nx.Graph() + >>> edges = [(0, 1, 4), (0, 2, 2), (2, 1, 1), (1, 3, 2), (1, 4, 2), (4, 5, 1)] + >>> G.add_weighted_edges_from(edges) + >>> sorted((v, f"{c:0.2f}") for v, c in laplacian_centrality(G).items()) + [(0, '0.70'), (1, '0.90'), (2, '0.28'), (3, '0.22'), (4, '0.26'), (5, '0.04')] + + Notes + ----- + The algorithm is implemented based on [1]_ with an extension to directed graphs + using the ``directed_laplacian_matrix`` function. + + Raises + ------ + NetworkXPointlessConcept + If the graph `G` is the null graph. + ZeroDivisionError + If the graph `G` has no edges (is empty) and normalization is requested. + + References + ---------- + .. [1] Qi, X., Fuller, E., Wu, Q., Wu, Y., and Zhang, C.-Q. (2012). + Laplacian centrality: A new centrality measure for weighted networks. + Information Sciences, 194:240-253. + https://math.wvu.edu/~cqzhang/Publication-files/my-paper/INS-2012-Laplacian-W.pdf + + See Also + -------- + :func:`~networkx.linalg.laplacianmatrix.directed_laplacian_matrix` + :func:`~networkx.linalg.laplacianmatrix.laplacian_matrix` + """ + import numpy as np + import scipy as sp + + if len(G) == 0: + raise nx.NetworkXPointlessConcept("null graph has no centrality defined") + if G.size(weight=weight) == 0: + if normalized: + raise ZeroDivisionError("graph with no edges has zero full energy") + return dict.fromkeys(G, 0) + + if nodelist is not None: + nodeset = set(G.nbunch_iter(nodelist)) + if len(nodeset) != len(nodelist): + raise nx.NetworkXError("nodelist has duplicate nodes or nodes not in G") + nodes = nodelist + [n for n in G if n not in nodeset] + else: + nodelist = nodes = list(G) + + if G.is_directed(): + lap_matrix = nx.directed_laplacian_matrix(G, nodes, weight, walk_type, alpha) + else: + lap_matrix = nx.laplacian_matrix(G, nodes, weight).toarray() + + full_energy = np.sum(lap_matrix**2) + + # calculate laplacian centrality + laplace_centralities_dict = {} + for i, node in enumerate(nodelist): + # remove row and col i from lap_matrix + all_but_i = list(np.arange(lap_matrix.shape[0])) + all_but_i.remove(i) + A_2 = lap_matrix[all_but_i, :][:, all_but_i] + + # Adjust diagonal for removed row + new_diag = lap_matrix.diagonal() - abs(lap_matrix[:, i]) + np.fill_diagonal(A_2, new_diag[all_but_i]) + + if len(all_but_i) > 0: # catches degenerate case of single node + new_energy = np.sum(A_2**2) + else: + new_energy = 0.0 + + lapl_cent = full_energy - new_energy + if normalized: + lapl_cent = lapl_cent / full_energy + + laplace_centralities_dict[node] = float(lapl_cent) + + return laplace_centralities_dict diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/load.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/load.py new file mode 100644 index 0000000..fc46edd --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/load.py @@ -0,0 +1,200 @@ +"""Load centrality.""" + +from operator import itemgetter + +import networkx as nx + +__all__ = ["load_centrality", "edge_load_centrality"] + + +@nx._dispatchable(edge_attrs="weight") +def newman_betweenness_centrality(G, v=None, cutoff=None, normalized=True, weight=None): + """Compute load centrality for nodes. + + The load centrality of a node is the fraction of all shortest + paths that pass through that node. + + Parameters + ---------- + G : graph + A networkx graph. + + normalized : bool, optional (default=True) + If True the betweenness values are normalized by b=b/(n-1)(n-2) where + n is the number of nodes in G. + + weight : None or string, optional (default=None) + If None, edge weights are ignored. + Otherwise holds the name of the edge attribute used as weight. + The weight of an edge is treated as the length or distance between the two sides. + + cutoff : bool, optional (default=None) + If specified, only consider paths of length <= cutoff. + + Returns + ------- + nodes : dictionary + Dictionary of nodes with centrality as the value. + + See Also + -------- + betweenness_centrality + + Notes + ----- + Load centrality is slightly different than betweenness. It was originally + introduced by [2]_. For this load algorithm see [1]_. + + References + ---------- + .. [1] Mark E. J. Newman: + Scientific collaboration networks. II. + Shortest paths, weighted networks, and centrality. + Physical Review E 64, 016132, 2001. + http://journals.aps.org/pre/abstract/10.1103/PhysRevE.64.016132 + .. [2] Kwang-Il Goh, Byungnam Kahng and Doochul Kim + Universal behavior of Load Distribution in Scale-Free Networks. + Physical Review Letters 87(27):1–4, 2001. + https://doi.org/10.1103/PhysRevLett.87.278701 + """ + if v is not None: # only one node + betweenness = 0.0 + for source in G: + ubetween = _node_betweenness(G, source, cutoff, False, weight) + betweenness += ubetween[v] if v in ubetween else 0 + if normalized: + order = G.order() + if order <= 2: + return betweenness # no normalization b=0 for all nodes + betweenness *= 1.0 / ((order - 1) * (order - 2)) + else: + betweenness = {}.fromkeys(G, 0.0) + for source in betweenness: + ubetween = _node_betweenness(G, source, cutoff, False, weight) + for vk in ubetween: + betweenness[vk] += ubetween[vk] + if normalized: + order = G.order() + if order <= 2: + return betweenness # no normalization b=0 for all nodes + scale = 1.0 / ((order - 1) * (order - 2)) + for v in betweenness: + betweenness[v] *= scale + return betweenness # all nodes + + +def _node_betweenness(G, source, cutoff=False, normalized=True, weight=None): + """Node betweenness_centrality helper: + + See betweenness_centrality for what you probably want. + This actually computes "load" and not betweenness. + See https://networkx.lanl.gov/ticket/103 + + This calculates the load of each node for paths from a single source. + (The fraction of number of shortests paths from source that go + through each node.) + + To get the load for a node you need to do all-pairs shortest paths. + + If weight is not None then use Dijkstra for finding shortest paths. + """ + # get the predecessor and path length data + if weight is None: + (pred, length) = nx.predecessor(G, source, cutoff=cutoff, return_seen=True) + else: + (pred, length) = nx.dijkstra_predecessor_and_distance(G, source, cutoff, weight) + + # order the nodes by path length + onodes = [(l, vert) for (vert, l) in length.items()] + onodes.sort() + onodes[:] = [vert for (l, vert) in onodes if l > 0] + + # initialize betweenness + between = {}.fromkeys(length, 1.0) + + while onodes: + v = onodes.pop() + if v in pred: + num_paths = len(pred[v]) # Discount betweenness if more than + for x in pred[v]: # one shortest path. + if x == source: # stop if hit source because all remaining v + break # also have pred[v]==[source] + between[x] += between[v] / num_paths + # remove source + for v in between: + between[v] -= 1 + # rescale to be between 0 and 1 + if normalized: + l = len(between) + if l > 2: + # scale by 1/the number of possible paths + scale = 1 / ((l - 1) * (l - 2)) + for v in between: + between[v] *= scale + return between + + +load_centrality = newman_betweenness_centrality + + +@nx._dispatchable +def edge_load_centrality(G, cutoff=False): + """Compute edge load. + + WARNING: This concept of edge load has not been analysed + or discussed outside of NetworkX that we know of. + It is based loosely on load_centrality in the sense that + it counts the number of shortest paths which cross each edge. + This function is for demonstration and testing purposes. + + Parameters + ---------- + G : graph + A networkx graph + + cutoff : bool, optional (default=False) + If specified, only consider paths of length <= cutoff. + + Returns + ------- + A dict keyed by edge 2-tuple to the number of shortest paths + which use that edge. Where more than one path is shortest + the count is divided equally among paths. + """ + betweenness = {} + for u, v in G.edges(): + betweenness[(u, v)] = 0.0 + betweenness[(v, u)] = 0.0 + + for source in G: + ubetween = _edge_betweenness(G, source, cutoff=cutoff) + for e, ubetweenv in ubetween.items(): + betweenness[e] += ubetweenv # cumulative total + return betweenness + + +def _edge_betweenness(G, source, nodes=None, cutoff=False): + """Edge betweenness helper.""" + # get the predecessor data + (pred, length) = nx.predecessor(G, source, cutoff=cutoff, return_seen=True) + # order the nodes by path length + onodes = [n for n, d in sorted(length.items(), key=itemgetter(1))] + # initialize betweenness, doesn't account for any edge weights + between = {} + for u, v in G.edges(nodes): + between[(u, v)] = 1.0 + between[(v, u)] = 1.0 + + while onodes: # work through all paths + v = onodes.pop() + if v in pred: + # Discount betweenness if more than one shortest path. + num_paths = len(pred[v]) + for w in pred[v]: + if w in pred: + # Discount betweenness, mult path + num_paths = len(pred[w]) + for x in pred[w]: + between[(w, x)] += between[(v, w)] / num_paths + between[(x, w)] += between[(w, v)] / num_paths + return between diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/percolation.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/percolation.py new file mode 100644 index 0000000..0d4c871 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/percolation.py @@ -0,0 +1,128 @@ +"""Percolation centrality measures.""" + +import networkx as nx +from networkx.algorithms.centrality.betweenness import ( + _single_source_dijkstra_path_basic as dijkstra, +) +from networkx.algorithms.centrality.betweenness import ( + _single_source_shortest_path_basic as shortest_path, +) + +__all__ = ["percolation_centrality"] + + +@nx._dispatchable(node_attrs="attribute", edge_attrs="weight") +def percolation_centrality(G, attribute="percolation", states=None, weight=None): + r"""Compute the percolation centrality for nodes. + + Percolation centrality of a node $v$, at a given time, is defined + as the proportion of ‘percolated paths’ that go through that node. + + This measure quantifies relative impact of nodes based on their + topological connectivity, as well as their percolation states. + + Percolation states of nodes are used to depict network percolation + scenarios (such as during infection transmission in a social network + of individuals, spreading of computer viruses on computer networks, or + transmission of disease over a network of towns) over time. In this + measure usually the percolation state is expressed as a decimal + between 0.0 and 1.0. + + When all nodes are in the same percolated state this measure is + equivalent to betweenness centrality. + + Parameters + ---------- + G : graph + A NetworkX graph. + + attribute : None or string, optional (default='percolation') + Name of the node attribute to use for percolation state, used + if `states` is None. If a node does not set the attribute the + state of that node will be set to the default value of 1. + If all nodes do not have the attribute all nodes will be set to + 1 and the centrality measure will be equivalent to betweenness centrality. + + states : None or dict, optional (default=None) + Specify percolation states for the nodes, nodes as keys states + as values. + + weight : None or string, optional (default=None) + If None, all edge weights are considered equal. + Otherwise holds the name of the edge attribute used as weight. + The weight of an edge is treated as the length or distance between the two sides. + + + Returns + ------- + nodes : dictionary + Dictionary of nodes with percolation centrality as the value. + + See Also + -------- + betweenness_centrality + + Notes + ----- + The algorithm is from Mahendra Piraveenan, Mikhail Prokopenko, and + Liaquat Hossain [1]_ + Pair dependencies are calculated and accumulated using [2]_ + + For weighted graphs the edge weights must be greater than zero. + Zero edge weights can produce an infinite number of equal length + paths between pairs of nodes. + + References + ---------- + .. [1] Mahendra Piraveenan, Mikhail Prokopenko, Liaquat Hossain + Percolation Centrality: Quantifying Graph-Theoretic Impact of Nodes + during Percolation in Networks + http://journals.plos.org/plosone/article?id=10.1371/journal.pone.0053095 + .. [2] Ulrik Brandes: + A Faster Algorithm for Betweenness Centrality. + Journal of Mathematical Sociology 25(2):163-177, 2001. + https://doi.org/10.1080/0022250X.2001.9990249 + """ + percolation = dict.fromkeys(G, 0.0) # b[v]=0 for v in G + + nodes = G + + if states is None: + states = nx.get_node_attributes(nodes, attribute, default=1) + + # sum of all percolation states + p_sigma_x_t = 0.0 + for v in states.values(): + p_sigma_x_t += v + + for s in nodes: + # single source shortest paths + if weight is None: # use BFS + S, P, sigma, _ = shortest_path(G, s) + else: # use Dijkstra's algorithm + S, P, sigma, _ = dijkstra(G, s, weight) + # accumulation + percolation = _accumulate_percolation( + percolation, S, P, sigma, s, states, p_sigma_x_t + ) + + n = len(G) + + for v in percolation: + percolation[v] *= 1 / (n - 2) + + return percolation + + +def _accumulate_percolation(percolation, S, P, sigma, s, states, p_sigma_x_t): + delta = dict.fromkeys(S, 0) + while S: + w = S.pop() + coeff = (1 + delta[w]) / sigma[w] + for v in P[w]: + delta[v] += sigma[v] * coeff + if w != s: + # percolation weight + pw_s_w = states[s] / (p_sigma_x_t - states[w]) + percolation[w] += delta[w] * pw_s_w + return percolation diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/reaching.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/reaching.py new file mode 100644 index 0000000..23018af --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/reaching.py @@ -0,0 +1,209 @@ +"""Functions for computing reaching centrality of a node or a graph.""" + +import networkx as nx +from networkx.utils import pairwise + +__all__ = ["global_reaching_centrality", "local_reaching_centrality"] + + +def _average_weight(G, path, weight=None): + """Returns the average weight of an edge in a weighted path. + + Parameters + ---------- + G : graph + A networkx graph. + + path: list + A list of vertices that define the path. + + weight : None or string, optional (default=None) + If None, edge weights are ignored. Then the average weight of an edge + is assumed to be the multiplicative inverse of the length of the path. + Otherwise holds the name of the edge attribute used as weight. + """ + path_length = len(path) - 1 + if path_length <= 0: + return 0 + if weight is None: + return 1 / path_length + total_weight = sum(G.edges[i, j][weight] for i, j in pairwise(path)) + return total_weight / path_length + + +@nx._dispatchable(edge_attrs="weight") +def global_reaching_centrality(G, weight=None, normalized=True): + """Returns the global reaching centrality of a directed graph. + + The *global reaching centrality* of a weighted directed graph is the + average over all nodes of the difference between the local reaching + centrality of the node and the greatest local reaching centrality of + any node in the graph [1]_. For more information on the local + reaching centrality, see :func:`local_reaching_centrality`. + Informally, the local reaching centrality is the proportion of the + graph that is reachable from the neighbors of the node. + + Parameters + ---------- + G : DiGraph + A networkx DiGraph. + + weight : None or string, optional (default=None) + Attribute to use for edge weights. If ``None``, each edge weight + is assumed to be one. A higher weight implies a stronger + connection between nodes and a *shorter* path length. + + normalized : bool, optional (default=True) + Whether to normalize the edge weights by the total sum of edge + weights. + + Returns + ------- + h : float + The global reaching centrality of the graph. + + Examples + -------- + >>> G = nx.DiGraph() + >>> G.add_edge(1, 2) + >>> G.add_edge(1, 3) + >>> nx.global_reaching_centrality(G) + 1.0 + >>> G.add_edge(3, 2) + >>> nx.global_reaching_centrality(G) + 0.75 + + See also + -------- + local_reaching_centrality + + References + ---------- + .. [1] Mones, Enys, Lilla Vicsek, and Tamás Vicsek. + "Hierarchy Measure for Complex Networks." + *PLoS ONE* 7.3 (2012): e33799. + https://doi.org/10.1371/journal.pone.0033799 + """ + if nx.is_negatively_weighted(G, weight=weight): + raise nx.NetworkXError("edge weights must be positive") + total_weight = G.size(weight=weight) + if total_weight <= 0: + raise nx.NetworkXError("Size of G must be positive") + # If provided, weights must be interpreted as connection strength + # (so higher weights are more likely to be chosen). However, the + # shortest path algorithms in NetworkX assume the provided "weight" + # is actually a distance (so edges with higher weight are less + # likely to be chosen). Therefore we need to invert the weights when + # computing shortest paths. + # + # If weight is None, we leave it as-is so that the shortest path + # algorithm can use a faster, unweighted algorithm. + if weight is not None: + + def as_distance(u, v, d): + return total_weight / d.get(weight, 1) + + shortest_paths = dict(nx.shortest_path(G, weight=as_distance)) + else: + shortest_paths = dict(nx.shortest_path(G)) + + centrality = local_reaching_centrality + # TODO This can be trivially parallelized. + lrc = [ + centrality(G, node, paths=paths, weight=weight, normalized=normalized) + for node, paths in shortest_paths.items() + ] + + max_lrc = max(lrc) + return sum(max_lrc - c for c in lrc) / (len(G) - 1) + + +@nx._dispatchable(edge_attrs="weight") +def local_reaching_centrality(G, v, paths=None, weight=None, normalized=True): + """Returns the local reaching centrality of a node in a directed + graph. + + The *local reaching centrality* of a node in a directed graph is the + proportion of other nodes reachable from that node [1]_. + + Parameters + ---------- + G : DiGraph + A NetworkX DiGraph. + + v : node + A node in the directed graph `G`. + + paths : dictionary (default=None) + If this is not `None` it must be a dictionary representation + of single-source shortest paths, as computed by, for example, + :func:`networkx.shortest_path` with source node `v`. Use this + keyword argument if you intend to invoke this function many + times but don't want the paths to be recomputed each time. + + weight : None or string, optional (default=None) + Attribute to use for edge weights. If `None`, each edge weight + is assumed to be one. A higher weight implies a stronger + connection between nodes and a *shorter* path length. + + normalized : bool, optional (default=True) + Whether to normalize the edge weights by the total sum of edge + weights. + + Returns + ------- + h : float + The local reaching centrality of the node ``v`` in the graph + ``G``. + + Examples + -------- + >>> G = nx.DiGraph() + >>> G.add_edges_from([(1, 2), (1, 3)]) + >>> nx.local_reaching_centrality(G, 3) + 0.0 + >>> G.add_edge(3, 2) + >>> nx.local_reaching_centrality(G, 3) + 0.5 + + See also + -------- + global_reaching_centrality + + References + ---------- + .. [1] Mones, Enys, Lilla Vicsek, and Tamás Vicsek. + "Hierarchy Measure for Complex Networks." + *PLoS ONE* 7.3 (2012): e33799. + https://doi.org/10.1371/journal.pone.0033799 + """ + # Corner case: graph with single node containing a self-loop + if (total_weight := G.size(weight=weight)) > 0 and len(G) == 1: + raise nx.NetworkXError( + "local_reaching_centrality of a single node with self-loop not well-defined" + ) + if paths is None: + if nx.is_negatively_weighted(G, weight=weight): + raise nx.NetworkXError("edge weights must be positive") + if total_weight <= 0: + raise nx.NetworkXError("Size of G must be positive") + if weight is not None: + # Interpret weights as lengths. + def as_distance(u, v, d): + return total_weight / d.get(weight, 1) + + paths = nx.shortest_path(G, source=v, weight=as_distance) + else: + paths = nx.shortest_path(G, source=v) + # If the graph is unweighted, simply return the proportion of nodes + # reachable from the source node ``v``. + if weight is None and G.is_directed(): + return (len(paths) - 1) / (len(G) - 1) + if normalized and weight is not None: + norm = G.size(weight=weight) / G.size() + else: + norm = 1 + # TODO This can be trivially parallelized. + avgw = (_average_weight(G, path, weight=weight) for path in paths.values()) + sum_avg_weight = sum(avgw) / norm + return sum_avg_weight / (len(G) - 1) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/second_order.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/second_order.py new file mode 100644 index 0000000..35583cd --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/second_order.py @@ -0,0 +1,141 @@ +"""Copyright (c) 2015 – Thomson Licensing, SAS + +Redistribution and use in source and binary forms, with or without +modification, are permitted (subject to the limitations in the +disclaimer below) provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + +* Neither the name of Thomson Licensing, or Technicolor, nor the names +of its contributors may be used to endorse or promote products derived +from this software without specific prior written permission. + +NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE +GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT +HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED +WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR +BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN +IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +""" + +import networkx as nx +from networkx.utils import not_implemented_for + +# Authors: Erwan Le Merrer (erwan.lemerrer@technicolor.com) + +__all__ = ["second_order_centrality"] + + +@not_implemented_for("directed") +@nx._dispatchable(edge_attrs="weight") +def second_order_centrality(G, weight="weight"): + """Compute the second order centrality for nodes of G. + + The second order centrality of a given node is the standard deviation of + the return times to that node of a perpetual random walk on G: + + Parameters + ---------- + G : graph + A NetworkX connected and undirected graph. + + weight : string or None, optional (default="weight") + The name of an edge attribute that holds the numerical value + used as a weight. If None then each edge has weight 1. + + Returns + ------- + nodes : dictionary + Dictionary keyed by node with second order centrality as the value. + + Examples + -------- + >>> G = nx.star_graph(10) + >>> soc = nx.second_order_centrality(G) + >>> print(sorted(soc.items(), key=lambda x: x[1])[0][0]) # pick first id + 0 + + Raises + ------ + NetworkXException + If the graph G is empty, non connected or has negative weights. + + See Also + -------- + betweenness_centrality + + Notes + ----- + Lower values of second order centrality indicate higher centrality. + + The algorithm is from Kermarrec, Le Merrer, Sericola and Trédan [1]_. + + This code implements the analytical version of the algorithm, i.e., + there is no simulation of a random walk process involved. The random walk + is here unbiased (corresponding to eq 6 of the paper [1]_), thus the + centrality values are the standard deviations for random walk return times + on the transformed input graph G (equal in-degree at each nodes by adding + self-loops). + + Complexity of this implementation, made to run locally on a single machine, + is O(n^3), with n the size of G, which makes it viable only for small + graphs. + + References + ---------- + .. [1] Anne-Marie Kermarrec, Erwan Le Merrer, Bruno Sericola, Gilles Trédan + "Second order centrality: Distributed assessment of nodes criticity in + complex networks", Elsevier Computer Communications 34(5):619-628, 2011. + """ + import numpy as np + + n = len(G) + + if n == 0: + raise nx.NetworkXException("Empty graph.") + if not nx.is_connected(G): + raise nx.NetworkXException("Non connected graph.") + if any(d.get(weight, 0) < 0 for u, v, d in G.edges(data=True)): + raise nx.NetworkXException("Graph has negative edge weights.") + + # balancing G for Metropolis-Hastings random walks + G = nx.DiGraph(G) + in_deg = dict(G.in_degree(weight=weight)) + d_max = max(in_deg.values()) + for i, deg in in_deg.items(): + if deg < d_max: + G.add_edge(i, i, weight=d_max - deg) + + P = nx.to_numpy_array(G) + P /= P.sum(axis=1)[:, np.newaxis] # to transition probability matrix + + def _Qj(P, j): + P = P.copy() + P[:, j] = 0 + return P + + M = np.empty([n, n]) + + for i in range(n): + M[:, i] = np.linalg.solve( + np.identity(n) - _Qj(P, i), np.ones([n, 1])[:, 0] + ) # eq 3 + + return dict( + zip( + G.nodes, + (float(np.sqrt(2 * np.sum(M[:, i]) - n * (n + 1))) for i in range(n)), + ) + ) # eq 6 diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/subgraph_alg.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/subgraph_alg.py new file mode 100644 index 0000000..9295963 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/subgraph_alg.py @@ -0,0 +1,342 @@ +""" +Subraph centrality and communicability betweenness. +""" + +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = [ + "subgraph_centrality_exp", + "subgraph_centrality", + "communicability_betweenness_centrality", + "estrada_index", +] + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatchable +def subgraph_centrality_exp(G): + r"""Returns the subgraph centrality for each node of G. + + Subgraph centrality of a node `n` is the sum of weighted closed + walks of all lengths starting and ending at node `n`. The weights + decrease with path length. Each closed walk is associated with a + connected subgraph ([1]_). + + Parameters + ---------- + G: graph + + Returns + ------- + nodes:dictionary + Dictionary of nodes with subgraph centrality as the value. + + Raises + ------ + NetworkXError + If the graph is not undirected and simple. + + See Also + -------- + subgraph_centrality: + Alternative algorithm of the subgraph centrality for each node of G. + + Notes + ----- + This version of the algorithm exponentiates the adjacency matrix. + + The subgraph centrality of a node `u` in G can be found using + the matrix exponential of the adjacency matrix of G [1]_, + + .. math:: + + SC(u)=(e^A)_{uu} . + + References + ---------- + .. [1] Ernesto Estrada, Juan A. Rodriguez-Velazquez, + "Subgraph centrality in complex networks", + Physical Review E 71, 056103 (2005). + https://arxiv.org/abs/cond-mat/0504730 + + Examples + -------- + (Example from [1]_) + + >>> G = nx.Graph( + ... [ + ... (1, 2), + ... (1, 5), + ... (1, 8), + ... (2, 3), + ... (2, 8), + ... (3, 4), + ... (3, 6), + ... (4, 5), + ... (4, 7), + ... (5, 6), + ... (6, 7), + ... (7, 8), + ... ] + ... ) + >>> sc = nx.subgraph_centrality_exp(G) + >>> print([f"{node} {sc[node]:0.2f}" for node in sorted(sc)]) + ['1 3.90', '2 3.90', '3 3.64', '4 3.71', '5 3.64', '6 3.71', '7 3.64', '8 3.90'] + """ + # alternative implementation that calculates the matrix exponential + import scipy as sp + + nodelist = list(G) # ordering of nodes in matrix + A = nx.to_numpy_array(G, nodelist) + # convert to 0-1 matrix + A[A != 0.0] = 1 + expA = sp.linalg.expm(A) + # convert diagonal to dictionary keyed by node + sc = dict(zip(nodelist, map(float, expA.diagonal()))) + return sc + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatchable +def subgraph_centrality(G): + r"""Returns subgraph centrality for each node in G. + + Subgraph centrality of a node `n` is the sum of weighted closed + walks of all lengths starting and ending at node `n`. The weights + decrease with path length. Each closed walk is associated with a + connected subgraph ([1]_). + + Parameters + ---------- + G: graph + + Returns + ------- + nodes : dictionary + Dictionary of nodes with subgraph centrality as the value. + + Raises + ------ + NetworkXError + If the graph is not undirected and simple. + + See Also + -------- + subgraph_centrality_exp: + Alternative algorithm of the subgraph centrality for each node of G. + + Notes + ----- + This version of the algorithm computes eigenvalues and eigenvectors + of the adjacency matrix. + + Subgraph centrality of a node `u` in G can be found using + a spectral decomposition of the adjacency matrix [1]_, + + .. math:: + + SC(u)=\sum_{j=1}^{N}(v_{j}^{u})^2 e^{\lambda_{j}}, + + where `v_j` is an eigenvector of the adjacency matrix `A` of G + corresponding to the eigenvalue `\lambda_j`. + + Examples + -------- + (Example from [1]_) + + >>> G = nx.Graph( + ... [ + ... (1, 2), + ... (1, 5), + ... (1, 8), + ... (2, 3), + ... (2, 8), + ... (3, 4), + ... (3, 6), + ... (4, 5), + ... (4, 7), + ... (5, 6), + ... (6, 7), + ... (7, 8), + ... ] + ... ) + >>> sc = nx.subgraph_centrality(G) + >>> print([f"{node} {sc[node]:0.2f}" for node in sorted(sc)]) + ['1 3.90', '2 3.90', '3 3.64', '4 3.71', '5 3.64', '6 3.71', '7 3.64', '8 3.90'] + + References + ---------- + .. [1] Ernesto Estrada, Juan A. Rodriguez-Velazquez, + "Subgraph centrality in complex networks", + Physical Review E 71, 056103 (2005). + https://arxiv.org/abs/cond-mat/0504730 + + """ + import numpy as np + + nodelist = list(G) # ordering of nodes in matrix + A = nx.to_numpy_array(G, nodelist) + # convert to 0-1 matrix + A[np.nonzero(A)] = 1 + w, v = np.linalg.eigh(A) + vsquare = np.array(v) ** 2 + expw = np.exp(w) + xg = vsquare @ expw + # convert vector dictionary keyed by node + sc = dict(zip(nodelist, map(float, xg))) + return sc + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatchable +def communicability_betweenness_centrality(G): + r"""Returns subgraph communicability for all pairs of nodes in G. + + Communicability betweenness measure makes use of the number of walks + connecting every pair of nodes as the basis of a betweenness centrality + measure. + + Parameters + ---------- + G: graph + + Returns + ------- + nodes : dictionary + Dictionary of nodes with communicability betweenness as the value. + + Raises + ------ + NetworkXError + If the graph is not undirected and simple. + + Notes + ----- + Let `G=(V,E)` be a simple undirected graph with `n` nodes and `m` edges, + and `A` denote the adjacency matrix of `G`. + + Let `G(r)=(V,E(r))` be the graph resulting from + removing all edges connected to node `r` but not the node itself. + + The adjacency matrix for `G(r)` is `A+E(r)`, where `E(r)` has nonzeros + only in row and column `r`. + + The subraph betweenness of a node `r` is [1]_ + + .. math:: + + \omega_{r} = \frac{1}{C}\sum_{p}\sum_{q}\frac{G_{prq}}{G_{pq}}, + p\neq q, q\neq r, + + where + `G_{prq}=(e^{A}_{pq} - (e^{A+E(r)})_{pq}` is the number of walks + involving node r, + `G_{pq}=(e^{A})_{pq}` is the number of closed walks starting + at node `p` and ending at node `q`, + and `C=(n-1)^{2}-(n-1)` is a normalization factor equal to the + number of terms in the sum. + + The resulting `\omega_{r}` takes values between zero and one. + The lower bound cannot be attained for a connected + graph, and the upper bound is attained in the star graph. + + References + ---------- + .. [1] Ernesto Estrada, Desmond J. Higham, Naomichi Hatano, + "Communicability Betweenness in Complex Networks" + Physica A 388 (2009) 764-774. + https://arxiv.org/abs/0905.4102 + + Examples + -------- + >>> G = nx.Graph([(0, 1), (1, 2), (1, 5), (5, 4), (2, 4), (2, 3), (4, 3), (3, 6)]) + >>> cbc = nx.communicability_betweenness_centrality(G) + >>> print([f"{node} {cbc[node]:0.2f}" for node in sorted(cbc)]) + ['0 0.03', '1 0.45', '2 0.51', '3 0.45', '4 0.40', '5 0.19', '6 0.03'] + """ + import numpy as np + import scipy as sp + + nodelist = list(G) # ordering of nodes in matrix + n = len(nodelist) + A = nx.to_numpy_array(G, nodelist) + # convert to 0-1 matrix + A[np.nonzero(A)] = 1 + expA = sp.linalg.expm(A) + mapping = dict(zip(nodelist, range(n))) + cbc = {} + for v in G: + # remove row and col of node v + i = mapping[v] + row = A[i, :].copy() + col = A[:, i].copy() + A[i, :] = 0 + A[:, i] = 0 + B = (expA - sp.linalg.expm(A)) / expA + # sum with row/col of node v and diag set to zero + B[i, :] = 0 + B[:, i] = 0 + B -= np.diag(np.diag(B)) + cbc[v] = float(B.sum()) + # put row and col back + A[i, :] = row + A[:, i] = col + # rescale when more than two nodes + order = len(cbc) + if order > 2: + scale = 1.0 / ((order - 1.0) ** 2 - (order - 1.0)) + cbc = {node: value * scale for node, value in cbc.items()} + return cbc + + +@nx._dispatchable +def estrada_index(G): + r"""Returns the Estrada index of a the graph G. + + The Estrada Index is a topological index of folding or 3D "compactness" ([1]_). + + Parameters + ---------- + G: graph + + Returns + ------- + estrada index: float + + Raises + ------ + NetworkXError + If the graph is not undirected and simple. + + Notes + ----- + Let `G=(V,E)` be a simple undirected graph with `n` nodes and let + `\lambda_{1}\leq\lambda_{2}\leq\cdots\lambda_{n}` + be a non-increasing ordering of the eigenvalues of its adjacency + matrix `A`. The Estrada index is ([1]_, [2]_) + + .. math:: + EE(G)=\sum_{j=1}^n e^{\lambda _j}. + + References + ---------- + .. [1] E. Estrada, "Characterization of 3D molecular structure", + Chem. Phys. Lett. 319, 713 (2000). + https://doi.org/10.1016/S0009-2614(00)00158-5 + .. [2] José Antonio de la Peñaa, Ivan Gutman, Juan Rada, + "Estimating the Estrada index", + Linear Algebra and its Applications. 427, 1 (2007). + https://doi.org/10.1016/j.laa.2007.06.020 + + Examples + -------- + >>> G = nx.Graph([(0, 1), (1, 2), (1, 5), (5, 4), (2, 4), (2, 3), (4, 3), (3, 6)]) + >>> ei = nx.estrada_index(G) + >>> print(f"{ei:0.5}") + 20.55 + """ + return sum(subgraph_centrality(G).values()) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/__init__.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..90f41b5 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_betweenness_centrality.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_betweenness_centrality.cpython-312.pyc new file mode 100644 index 0000000..52e42c7 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_betweenness_centrality.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_betweenness_centrality_subset.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_betweenness_centrality_subset.cpython-312.pyc new file mode 100644 index 0000000..384077a Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_betweenness_centrality_subset.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_closeness_centrality.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_closeness_centrality.cpython-312.pyc new file mode 100644 index 0000000..8e79f13 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_closeness_centrality.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_current_flow_betweenness_centrality.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_current_flow_betweenness_centrality.cpython-312.pyc new file mode 100644 index 0000000..62d6fe2 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_current_flow_betweenness_centrality.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_current_flow_betweenness_centrality_subset.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_current_flow_betweenness_centrality_subset.cpython-312.pyc new file mode 100644 index 0000000..3348594 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_current_flow_betweenness_centrality_subset.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_current_flow_closeness.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_current_flow_closeness.cpython-312.pyc new file mode 100644 index 0000000..c8175eb Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_current_flow_closeness.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_degree_centrality.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_degree_centrality.cpython-312.pyc new file mode 100644 index 0000000..2914dd1 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_degree_centrality.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_dispersion.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_dispersion.cpython-312.pyc new file mode 100644 index 0000000..a93c7f8 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_dispersion.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_eigenvector_centrality.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_eigenvector_centrality.cpython-312.pyc new file mode 100644 index 0000000..5d133b7 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_eigenvector_centrality.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_group.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_group.cpython-312.pyc new file mode 100644 index 0000000..10d39a7 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_group.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_harmonic_centrality.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_harmonic_centrality.cpython-312.pyc new file mode 100644 index 0000000..517674c Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_harmonic_centrality.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_katz_centrality.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_katz_centrality.cpython-312.pyc new file mode 100644 index 0000000..cc48685 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_katz_centrality.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_laplacian_centrality.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_laplacian_centrality.cpython-312.pyc new file mode 100644 index 0000000..2916871 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_laplacian_centrality.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_load_centrality.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_load_centrality.cpython-312.pyc new file mode 100644 index 0000000..0554db0 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_load_centrality.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_percolation_centrality.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_percolation_centrality.cpython-312.pyc new file mode 100644 index 0000000..8d27d56 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_percolation_centrality.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_reaching.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_reaching.cpython-312.pyc new file mode 100644 index 0000000..56f1754 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_reaching.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_second_order_centrality.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_second_order_centrality.cpython-312.pyc new file mode 100644 index 0000000..1bf8409 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_second_order_centrality.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_subgraph.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_subgraph.cpython-312.pyc new file mode 100644 index 0000000..dbd82cf Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_subgraph.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_trophic.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_trophic.cpython-312.pyc new file mode 100644 index 0000000..75a4295 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_trophic.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_voterank.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_voterank.cpython-312.pyc new file mode 100644 index 0000000..5beb85f Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_voterank.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_betweenness_centrality.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_betweenness_centrality.py new file mode 100644 index 0000000..5c988ae --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_betweenness_centrality.py @@ -0,0 +1,903 @@ +import math + +import pytest + +import networkx as nx + + +def weighted_G(): + G = nx.Graph() + G.add_edge(0, 1, weight=3) + G.add_edge(0, 2, weight=2) + G.add_edge(0, 3, weight=6) + G.add_edge(0, 4, weight=4) + G.add_edge(1, 3, weight=5) + G.add_edge(1, 5, weight=5) + G.add_edge(2, 4, weight=1) + G.add_edge(3, 4, weight=2) + G.add_edge(3, 5, weight=1) + G.add_edge(4, 5, weight=4) + return G + + +class TestBetweennessCentrality: + def test_K5(self): + """Betweenness centrality: K5""" + G = nx.complete_graph(5) + b = nx.betweenness_centrality(G, weight=None, normalized=False) + b_answer = {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0} + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_K5_endpoints(self): + """Betweenness centrality: K5 endpoints""" + G = nx.complete_graph(5) + b = nx.betweenness_centrality(G, weight=None, normalized=False, endpoints=True) + b_answer = {0: 4.0, 1: 4.0, 2: 4.0, 3: 4.0, 4: 4.0} + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + # normalized = True case + b = nx.betweenness_centrality(G, weight=None, normalized=True, endpoints=True) + b_answer = {0: 0.4, 1: 0.4, 2: 0.4, 3: 0.4, 4: 0.4} + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_P3_normalized(self): + """Betweenness centrality: P3 normalized""" + G = nx.path_graph(3) + b = nx.betweenness_centrality(G, weight=None, normalized=True) + b_answer = {0: 0.0, 1: 1.0, 2: 0.0} + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_P3(self): + """Betweenness centrality: P3""" + G = nx.path_graph(3) + b_answer = {0: 0.0, 1: 1.0, 2: 0.0} + b = nx.betweenness_centrality(G, weight=None, normalized=False) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_sample_from_P3(self): + """Betweenness centrality: P3 sample""" + G = nx.path_graph(3) + b_answer = {0: 0.0, 1: 1.0, 2: 0.0} + b = nx.betweenness_centrality(G, k=3, weight=None, normalized=False, seed=1) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + b = nx.betweenness_centrality(G, k=2, weight=None, normalized=False, seed=1) + # python versions give different results with same seed + b_approx1 = {0: 0.0, 1: 1.0, 2: 0.0} + b_approx2 = {0: 0.0, 1: 0.5, 2: 0.0} + for n in sorted(G): + assert b[n] in (b_approx1[n], b_approx2[n]) + + def test_P3_endpoints(self): + """Betweenness centrality: P3 endpoints""" + G = nx.path_graph(3) + b_answer = {0: 2.0, 1: 3.0, 2: 2.0} + b = nx.betweenness_centrality(G, weight=None, normalized=False, endpoints=True) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + # normalized = True case + b_answer = {0: 2 / 3, 1: 1.0, 2: 2 / 3} + b = nx.betweenness_centrality(G, weight=None, normalized=True, endpoints=True) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_krackhardt_kite_graph(self): + """Betweenness centrality: Krackhardt kite graph""" + G = nx.krackhardt_kite_graph() + b_answer = { + 0: 1.667, + 1: 1.667, + 2: 0.000, + 3: 7.333, + 4: 0.000, + 5: 16.667, + 6: 16.667, + 7: 28.000, + 8: 16.000, + 9: 0.000, + } + for b in b_answer: + b_answer[b] /= 2 + b = nx.betweenness_centrality(G, weight=None, normalized=False) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-3) + + def test_krackhardt_kite_graph_normalized(self): + """Betweenness centrality: Krackhardt kite graph normalized""" + G = nx.krackhardt_kite_graph() + b_answer = { + 0: 0.023, + 1: 0.023, + 2: 0.000, + 3: 0.102, + 4: 0.000, + 5: 0.231, + 6: 0.231, + 7: 0.389, + 8: 0.222, + 9: 0.000, + } + b = nx.betweenness_centrality(G, weight=None, normalized=True) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-3) + + def test_florentine_families_graph(self): + """Betweenness centrality: Florentine families graph""" + G = nx.florentine_families_graph() + b_answer = { + "Acciaiuoli": 0.000, + "Albizzi": 0.212, + "Barbadori": 0.093, + "Bischeri": 0.104, + "Castellani": 0.055, + "Ginori": 0.000, + "Guadagni": 0.255, + "Lamberteschi": 0.000, + "Medici": 0.522, + "Pazzi": 0.000, + "Peruzzi": 0.022, + "Ridolfi": 0.114, + "Salviati": 0.143, + "Strozzi": 0.103, + "Tornabuoni": 0.092, + } + + b = nx.betweenness_centrality(G, weight=None, normalized=True) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-3) + + def test_les_miserables_graph(self): + """Betweenness centrality: Les Miserables graph""" + G = nx.les_miserables_graph() + b_answer = { + "Napoleon": 0.000, + "Myriel": 0.177, + "MlleBaptistine": 0.000, + "MmeMagloire": 0.000, + "CountessDeLo": 0.000, + "Geborand": 0.000, + "Champtercier": 0.000, + "Cravatte": 0.000, + "Count": 0.000, + "OldMan": 0.000, + "Valjean": 0.570, + "Labarre": 0.000, + "Marguerite": 0.000, + "MmeDeR": 0.000, + "Isabeau": 0.000, + "Gervais": 0.000, + "Listolier": 0.000, + "Tholomyes": 0.041, + "Fameuil": 0.000, + "Blacheville": 0.000, + "Favourite": 0.000, + "Dahlia": 0.000, + "Zephine": 0.000, + "Fantine": 0.130, + "MmeThenardier": 0.029, + "Thenardier": 0.075, + "Cosette": 0.024, + "Javert": 0.054, + "Fauchelevent": 0.026, + "Bamatabois": 0.008, + "Perpetue": 0.000, + "Simplice": 0.009, + "Scaufflaire": 0.000, + "Woman1": 0.000, + "Judge": 0.000, + "Champmathieu": 0.000, + "Brevet": 0.000, + "Chenildieu": 0.000, + "Cochepaille": 0.000, + "Pontmercy": 0.007, + "Boulatruelle": 0.000, + "Eponine": 0.011, + "Anzelma": 0.000, + "Woman2": 0.000, + "MotherInnocent": 0.000, + "Gribier": 0.000, + "MmeBurgon": 0.026, + "Jondrette": 0.000, + "Gavroche": 0.165, + "Gillenormand": 0.020, + "Magnon": 0.000, + "MlleGillenormand": 0.048, + "MmePontmercy": 0.000, + "MlleVaubois": 0.000, + "LtGillenormand": 0.000, + "Marius": 0.132, + "BaronessT": 0.000, + "Mabeuf": 0.028, + "Enjolras": 0.043, + "Combeferre": 0.001, + "Prouvaire": 0.000, + "Feuilly": 0.001, + "Courfeyrac": 0.005, + "Bahorel": 0.002, + "Bossuet": 0.031, + "Joly": 0.002, + "Grantaire": 0.000, + "MotherPlutarch": 0.000, + "Gueulemer": 0.005, + "Babet": 0.005, + "Claquesous": 0.005, + "Montparnasse": 0.004, + "Toussaint": 0.000, + "Child1": 0.000, + "Child2": 0.000, + "Brujon": 0.000, + "MmeHucheloup": 0.000, + } + + b = nx.betweenness_centrality(G, weight=None, normalized=True) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-3) + + def test_ladder_graph(self): + """Betweenness centrality: Ladder graph""" + G = nx.Graph() # ladder_graph(3) + G.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3), (2, 4), (4, 5), (3, 5)]) + b_answer = {0: 1.667, 1: 1.667, 2: 6.667, 3: 6.667, 4: 1.667, 5: 1.667} + for b in b_answer: + b_answer[b] /= 2 + b = nx.betweenness_centrality(G, weight=None, normalized=False) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-3) + + def test_disconnected_path(self): + """Betweenness centrality: disconnected path""" + G = nx.Graph() + nx.add_path(G, [0, 1, 2]) + nx.add_path(G, [3, 4, 5, 6]) + b_answer = {0: 0, 1: 1, 2: 0, 3: 0, 4: 2, 5: 2, 6: 0} + b = nx.betweenness_centrality(G, weight=None, normalized=False) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_disconnected_path_endpoints(self): + """Betweenness centrality: disconnected path endpoints""" + G = nx.Graph() + nx.add_path(G, [0, 1, 2]) + nx.add_path(G, [3, 4, 5, 6]) + b_answer = {0: 2, 1: 3, 2: 2, 3: 3, 4: 5, 5: 5, 6: 3} + b = nx.betweenness_centrality(G, weight=None, normalized=False, endpoints=True) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + # normalized = True case + b = nx.betweenness_centrality(G, weight=None, normalized=True, endpoints=True) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n] / 21, abs=1e-7) + + def test_directed_path(self): + """Betweenness centrality: directed path""" + G = nx.DiGraph() + nx.add_path(G, [0, 1, 2]) + b = nx.betweenness_centrality(G, weight=None, normalized=False) + b_answer = {0: 0.0, 1: 1.0, 2: 0.0} + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_directed_path_normalized(self): + """Betweenness centrality: directed path normalized""" + G = nx.DiGraph() + nx.add_path(G, [0, 1, 2]) + b = nx.betweenness_centrality(G, weight=None, normalized=True) + b_answer = {0: 0.0, 1: 0.5, 2: 0.0} + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + @pytest.mark.parametrize( + ("normalized", "endpoints", "is_directed", "k", "expected"), + [ + (True, True, True, None, {0: 1.0, 1: 0.4, 2: 0.4, 3: 0.4, 4: 0.4}), + (True, True, True, 1, {0: 1.0, 1: 1.0, 2: 0.25, 3: 0.25, 4: 0.25}), + (True, True, False, None, {0: 1.0, 1: 0.4, 2: 0.4, 3: 0.4, 4: 0.4}), + (True, True, False, 1, {0: 1.0, 1: 1.0, 2: 0.25, 3: 0.25, 4: 0.25}), + (True, False, True, None, {0: 1.0, 1: 0, 2: 0.0, 3: 0.0, 4: 0.0}), + (True, False, True, 1, {0: 1.0, 1: math.nan, 2: 0.0, 3: 0.0, 4: 0.0}), + (True, False, False, None, {0: 1.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0}), + (True, False, False, 1, {0: 1.0, 1: math.nan, 2: 0.0, 3: 0.0, 4: 0.0}), + (False, True, True, None, {0: 20.0, 1: 8.0, 2: 8.0, 3: 8.0, 4: 8.0}), + (False, True, True, 1, {0: 20.0, 1: 20.0, 2: 5.0, 3: 5.0, 4: 5.0}), + (False, True, False, None, {0: 10.0, 1: 4.0, 2: 4.0, 3: 4.0, 4: 4.0}), + (False, True, False, 1, {0: 10.0, 1: 10.0, 2: 2.5, 3: 2.5, 4: 2.5}), + (False, False, True, None, {0: 12.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0}), + (False, False, True, 1, {0: 12.0, 1: math.nan, 2: 0.0, 3: 0.0, 4: 0.0}), + (False, False, False, None, {0: 6.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0}), + (False, False, False, 1, {0: 6.0, 1: math.nan, 2: 0.0, 3: 0.0, 4: 0.0}), + ], + ) + def test_scale_with_k_on_star_graph( + self, normalized, endpoints, is_directed, k, expected + ): + # seed=1 selects node 1 as the initial node when using k=1. + # Recall node 0 is the center of the star graph. + G = nx.star_graph(4) + if is_directed: + G = G.to_directed() + b = nx.betweenness_centrality( + G, k=k, seed=1, endpoints=endpoints, normalized=normalized + ) + assert b == pytest.approx(expected, nan_ok=True) + + @pytest.mark.parametrize( + ("normalized", "endpoints", "is_directed", "k", "expected"), + [ + ( + *(True, True, True, None), # Use *() splatting for better autoformat + {0: 14 / 20, 1: 14 / 20, 2: 14 / 20, 3: 14 / 20, 4: 14 / 20}, + ), + ( + *(True, True, True, 3), + {0: 9 / 12, 1: 11 / 12, 2: 9 / 12, 3: 6 / 12, 4: 7 / 12}, + ), + ( + *(True, True, False, None), + {0: 10 / 20, 1: 10 / 20, 2: 10 / 20, 3: 10 / 20, 4: 10 / 20}, + ), + ( + *(True, True, False, 3), + {0: 8 / 12, 1: 7 / 12, 2: 4 / 12, 3: 4 / 12, 4: 7 / 12}, + ), + ( + *(True, False, True, None), + {0: 6 / 12, 1: 6 / 12, 2: 6 / 12, 3: 6 / 12, 4: 6 / 12}, + ), + ( + *(True, False, True, 3), + # Use 6 instead of 9 for denominator for source nodes 0, 1, and 4 + {0: 3 / 6, 1: 5 / 6, 2: 6 / 9, 3: 3 / 9, 4: 1 / 6}, + ), + ( + *(True, False, False, None), + {0: 2 / 12, 1: 2 / 12, 2: 2 / 12, 3: 2 / 12, 4: 2 / 12}, + ), + ( + *(True, False, False, 3), + # Use 6 instead of 9 for denominator for source nodes 0, 1, and 4 + {0: 2 / 6, 1: 1 / 6, 2: 1 / 9, 3: 1 / 9, 4: 1 / 6}, + ), + (False, True, True, None, {0: 14, 1: 14, 2: 14, 3: 14, 4: 14}), + ( + *(False, True, True, 3), + {0: 9 * 5 / 3, 1: 11 * 5 / 3, 2: 9 * 5 / 3, 3: 6 * 5 / 3, 4: 7 * 5 / 3}, + ), + (False, True, False, None, {0: 5, 1: 5, 2: 5, 3: 5, 4: 5}), + ( + *(False, True, False, 3), + {0: 8 * 5 / 6, 1: 7 * 5 / 6, 2: 4 * 5 / 6, 3: 4 * 5 / 6, 4: 7 * 5 / 6}, + ), + (False, False, True, None, {0: 6, 1: 6, 2: 6, 3: 6, 4: 6}), + ( + *(False, False, True, 3), + # Use 2 instead of 3 for denominator for source nodes 0, 1, and 4 + {0: 3 * 4 / 2, 1: 5 * 4 / 2, 2: 6 * 4 / 3, 3: 3 * 4 / 3, 4: 1 * 4 / 2}, + ), + (False, False, False, None, {0: 1, 1: 1, 2: 1, 3: 1, 4: 1}), + ( + *(False, False, False, 3), + # Use 4 instead of 6 for denominator for source nodes 0, 1, and 4 + {0: 2 * 4 / 4, 1: 1 * 4 / 4, 2: 1 * 4 / 6, 3: 1 * 4 / 6, 4: 1 * 4 / 4}, + ), + ], + ) + def test_scale_with_k_on_cycle_graph( + self, normalized, endpoints, is_directed, k, expected + ): + # seed=1 selects nodes 0, 1, and 4 as the initial nodes when using k=3. + G = nx.cycle_graph(5, create_using=nx.DiGraph if is_directed else nx.Graph) + b = nx.betweenness_centrality( + G, k=k, seed=1, endpoints=endpoints, normalized=normalized + ) + assert b == pytest.approx(expected) + + def test_k_out_of_bounds_raises(self): + G = nx.cycle_graph(4) + with pytest.raises(ValueError, match="larger"): + nx.betweenness_centrality(G, k=5) + with pytest.raises(ValueError, match="negative"): + nx.betweenness_centrality(G, k=-1) + with pytest.raises(ZeroDivisionError): + nx.betweenness_centrality(G, k=0) + with pytest.raises(ZeroDivisionError): + nx.betweenness_centrality(G, k=0, normalized=False) + # Test edge case: use full population when k == len(G) + # Should we warn or raise instead? + b1 = nx.betweenness_centrality(G, k=4, endpoints=False) + b2 = nx.betweenness_centrality(G, endpoints=False) + assert b1 == b2 + + +class TestWeightedBetweennessCentrality: + def test_K5(self): + """Weighted betweenness centrality: K5""" + G = nx.complete_graph(5) + b = nx.betweenness_centrality(G, weight="weight", normalized=False) + b_answer = {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0} + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_P3_normalized(self): + """Weighted betweenness centrality: P3 normalized""" + G = nx.path_graph(3) + b = nx.betweenness_centrality(G, weight="weight", normalized=True) + b_answer = {0: 0.0, 1: 1.0, 2: 0.0} + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_P3(self): + """Weighted betweenness centrality: P3""" + G = nx.path_graph(3) + b_answer = {0: 0.0, 1: 1.0, 2: 0.0} + b = nx.betweenness_centrality(G, weight="weight", normalized=False) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_krackhardt_kite_graph(self): + """Weighted betweenness centrality: Krackhardt kite graph""" + G = nx.krackhardt_kite_graph() + b_answer = { + 0: 1.667, + 1: 1.667, + 2: 0.000, + 3: 7.333, + 4: 0.000, + 5: 16.667, + 6: 16.667, + 7: 28.000, + 8: 16.000, + 9: 0.000, + } + for b in b_answer: + b_answer[b] /= 2 + + b = nx.betweenness_centrality(G, weight="weight", normalized=False) + + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-3) + + def test_krackhardt_kite_graph_normalized(self): + """Weighted betweenness centrality: + Krackhardt kite graph normalized + """ + G = nx.krackhardt_kite_graph() + b_answer = { + 0: 0.023, + 1: 0.023, + 2: 0.000, + 3: 0.102, + 4: 0.000, + 5: 0.231, + 6: 0.231, + 7: 0.389, + 8: 0.222, + 9: 0.000, + } + b = nx.betweenness_centrality(G, weight="weight", normalized=True) + + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-3) + + def test_florentine_families_graph(self): + """Weighted betweenness centrality: + Florentine families graph""" + G = nx.florentine_families_graph() + b_answer = { + "Acciaiuoli": 0.000, + "Albizzi": 0.212, + "Barbadori": 0.093, + "Bischeri": 0.104, + "Castellani": 0.055, + "Ginori": 0.000, + "Guadagni": 0.255, + "Lamberteschi": 0.000, + "Medici": 0.522, + "Pazzi": 0.000, + "Peruzzi": 0.022, + "Ridolfi": 0.114, + "Salviati": 0.143, + "Strozzi": 0.103, + "Tornabuoni": 0.092, + } + + b = nx.betweenness_centrality(G, weight="weight", normalized=True) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-3) + + def test_les_miserables_graph(self): + """Weighted betweenness centrality: Les Miserables graph""" + G = nx.les_miserables_graph() + b_answer = { + "Napoleon": 0.000, + "Myriel": 0.177, + "MlleBaptistine": 0.000, + "MmeMagloire": 0.000, + "CountessDeLo": 0.000, + "Geborand": 0.000, + "Champtercier": 0.000, + "Cravatte": 0.000, + "Count": 0.000, + "OldMan": 0.000, + "Valjean": 0.454, + "Labarre": 0.000, + "Marguerite": 0.009, + "MmeDeR": 0.000, + "Isabeau": 0.000, + "Gervais": 0.000, + "Listolier": 0.000, + "Tholomyes": 0.066, + "Fameuil": 0.000, + "Blacheville": 0.000, + "Favourite": 0.000, + "Dahlia": 0.000, + "Zephine": 0.000, + "Fantine": 0.114, + "MmeThenardier": 0.046, + "Thenardier": 0.129, + "Cosette": 0.075, + "Javert": 0.193, + "Fauchelevent": 0.026, + "Bamatabois": 0.080, + "Perpetue": 0.000, + "Simplice": 0.001, + "Scaufflaire": 0.000, + "Woman1": 0.000, + "Judge": 0.000, + "Champmathieu": 0.000, + "Brevet": 0.000, + "Chenildieu": 0.000, + "Cochepaille": 0.000, + "Pontmercy": 0.023, + "Boulatruelle": 0.000, + "Eponine": 0.023, + "Anzelma": 0.000, + "Woman2": 0.000, + "MotherInnocent": 0.000, + "Gribier": 0.000, + "MmeBurgon": 0.026, + "Jondrette": 0.000, + "Gavroche": 0.285, + "Gillenormand": 0.024, + "Magnon": 0.005, + "MlleGillenormand": 0.036, + "MmePontmercy": 0.005, + "MlleVaubois": 0.000, + "LtGillenormand": 0.015, + "Marius": 0.072, + "BaronessT": 0.004, + "Mabeuf": 0.089, + "Enjolras": 0.003, + "Combeferre": 0.000, + "Prouvaire": 0.000, + "Feuilly": 0.004, + "Courfeyrac": 0.001, + "Bahorel": 0.007, + "Bossuet": 0.028, + "Joly": 0.000, + "Grantaire": 0.036, + "MotherPlutarch": 0.000, + "Gueulemer": 0.025, + "Babet": 0.015, + "Claquesous": 0.042, + "Montparnasse": 0.050, + "Toussaint": 0.011, + "Child1": 0.000, + "Child2": 0.000, + "Brujon": 0.002, + "MmeHucheloup": 0.034, + } + + b = nx.betweenness_centrality(G, weight="weight", normalized=True) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-3) + + def test_ladder_graph(self): + """Weighted betweenness centrality: Ladder graph""" + G = nx.Graph() # ladder_graph(3) + G.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3), (2, 4), (4, 5), (3, 5)]) + b_answer = {0: 1.667, 1: 1.667, 2: 6.667, 3: 6.667, 4: 1.667, 5: 1.667} + for b in b_answer: + b_answer[b] /= 2 + b = nx.betweenness_centrality(G, weight="weight", normalized=False) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-3) + + def test_G(self): + """Weighted betweenness centrality: G""" + G = weighted_G() + b_answer = {0: 2.0, 1: 0.0, 2: 4.0, 3: 3.0, 4: 4.0, 5: 0.0} + b = nx.betweenness_centrality(G, weight="weight", normalized=False) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_G2(self): + """Weighted betweenness centrality: G2""" + G = nx.DiGraph() + G.add_weighted_edges_from( + [ + ("s", "u", 10), + ("s", "x", 5), + ("u", "v", 1), + ("u", "x", 2), + ("v", "y", 1), + ("x", "u", 3), + ("x", "v", 5), + ("x", "y", 2), + ("y", "s", 7), + ("y", "v", 6), + ] + ) + + b_answer = {"y": 5.0, "x": 5.0, "s": 4.0, "u": 2.0, "v": 2.0} + + b = nx.betweenness_centrality(G, weight="weight", normalized=False) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_G3(self): + """Weighted betweenness centrality: G3""" + G = nx.MultiGraph(weighted_G()) + es = list(G.edges(data=True))[::2] # duplicate every other edge + G.add_edges_from(es) + b_answer = {0: 2.0, 1: 0.0, 2: 4.0, 3: 3.0, 4: 4.0, 5: 0.0} + b = nx.betweenness_centrality(G, weight="weight", normalized=False) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_G4(self): + """Weighted betweenness centrality: G4""" + G = nx.MultiDiGraph() + G.add_weighted_edges_from( + [ + ("s", "u", 10), + ("s", "x", 5), + ("s", "x", 6), + ("u", "v", 1), + ("u", "x", 2), + ("v", "y", 1), + ("v", "y", 1), + ("x", "u", 3), + ("x", "v", 5), + ("x", "y", 2), + ("x", "y", 3), + ("y", "s", 7), + ("y", "v", 6), + ("y", "v", 6), + ] + ) + + b_answer = {"y": 5.0, "x": 5.0, "s": 4.0, "u": 2.0, "v": 2.0} + + b = nx.betweenness_centrality(G, weight="weight", normalized=False) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + +class TestEdgeBetweennessCentrality: + def test_K5(self): + """Edge betweenness centrality: K5""" + G = nx.complete_graph(5) + b = nx.edge_betweenness_centrality(G, weight=None, normalized=False) + b_answer = dict.fromkeys(G.edges(), 1) + for n in sorted(G.edges()): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_normalized_K5(self): + """Edge betweenness centrality: K5""" + G = nx.complete_graph(5) + b = nx.edge_betweenness_centrality(G, weight=None, normalized=True) + b_answer = dict.fromkeys(G.edges(), 1 / 10) + for n in sorted(G.edges()): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_C4(self): + """Edge betweenness centrality: C4""" + G = nx.cycle_graph(4) + b = nx.edge_betweenness_centrality(G, weight=None, normalized=True) + b_answer = {(0, 1): 2, (0, 3): 2, (1, 2): 2, (2, 3): 2} + for n in sorted(G.edges()): + assert b[n] == pytest.approx(b_answer[n] / 6, abs=1e-7) + + def test_P4(self): + """Edge betweenness centrality: P4""" + G = nx.path_graph(4) + b = nx.edge_betweenness_centrality(G, weight=None, normalized=False) + b_answer = {(0, 1): 3, (1, 2): 4, (2, 3): 3} + for n in sorted(G.edges()): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_normalized_P4(self): + """Edge betweenness centrality: P4""" + G = nx.path_graph(4) + b = nx.edge_betweenness_centrality(G, weight=None, normalized=True) + b_answer = {(0, 1): 3, (1, 2): 4, (2, 3): 3} + for n in sorted(G.edges()): + assert b[n] == pytest.approx(b_answer[n] / 6, abs=1e-7) + + def test_balanced_tree(self): + """Edge betweenness centrality: balanced tree""" + G = nx.balanced_tree(r=2, h=2) + b = nx.edge_betweenness_centrality(G, weight=None, normalized=False) + b_answer = {(0, 1): 12, (0, 2): 12, (1, 3): 6, (1, 4): 6, (2, 5): 6, (2, 6): 6} + for n in sorted(G.edges()): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + +class TestWeightedEdgeBetweennessCentrality: + def test_K5(self): + """Edge betweenness centrality: K5""" + G = nx.complete_graph(5) + b = nx.edge_betweenness_centrality(G, weight="weight", normalized=False) + b_answer = dict.fromkeys(G.edges(), 1) + for n in sorted(G.edges()): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_C4(self): + """Edge betweenness centrality: C4""" + G = nx.cycle_graph(4) + b = nx.edge_betweenness_centrality(G, weight="weight", normalized=False) + b_answer = {(0, 1): 2, (0, 3): 2, (1, 2): 2, (2, 3): 2} + for n in sorted(G.edges()): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_P4(self): + """Edge betweenness centrality: P4""" + G = nx.path_graph(4) + b = nx.edge_betweenness_centrality(G, weight="weight", normalized=False) + b_answer = {(0, 1): 3, (1, 2): 4, (2, 3): 3} + for n in sorted(G.edges()): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_balanced_tree(self): + """Edge betweenness centrality: balanced tree""" + G = nx.balanced_tree(r=2, h=2) + b = nx.edge_betweenness_centrality(G, weight="weight", normalized=False) + b_answer = {(0, 1): 12, (0, 2): 12, (1, 3): 6, (1, 4): 6, (2, 5): 6, (2, 6): 6} + for n in sorted(G.edges()): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_weighted_graph(self): + """Edge betweenness centrality: weighted""" + eList = [ + (0, 1, 5), + (0, 2, 4), + (0, 3, 3), + (0, 4, 2), + (1, 2, 4), + (1, 3, 1), + (1, 4, 3), + (2, 4, 5), + (3, 4, 4), + ] + G = nx.Graph() + G.add_weighted_edges_from(eList) + b = nx.edge_betweenness_centrality(G, weight="weight", normalized=False) + b_answer = { + (0, 1): 0.0, + (0, 2): 1.0, + (0, 3): 2.0, + (0, 4): 1.0, + (1, 2): 2.0, + (1, 3): 3.5, + (1, 4): 1.5, + (2, 4): 1.0, + (3, 4): 0.5, + } + for n in sorted(G.edges()): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_normalized_weighted_graph(self): + """Edge betweenness centrality: normalized weighted""" + eList = [ + (0, 1, 5), + (0, 2, 4), + (0, 3, 3), + (0, 4, 2), + (1, 2, 4), + (1, 3, 1), + (1, 4, 3), + (2, 4, 5), + (3, 4, 4), + ] + G = nx.Graph() + G.add_weighted_edges_from(eList) + b = nx.edge_betweenness_centrality(G, weight="weight", normalized=True) + b_answer = { + (0, 1): 0.0, + (0, 2): 1.0, + (0, 3): 2.0, + (0, 4): 1.0, + (1, 2): 2.0, + (1, 3): 3.5, + (1, 4): 1.5, + (2, 4): 1.0, + (3, 4): 0.5, + } + norm = len(G) * (len(G) - 1) / 2 + for n in sorted(G.edges()): + assert b[n] == pytest.approx(b_answer[n] / norm, abs=1e-7) + + def test_weighted_multigraph(self): + """Edge betweenness centrality: weighted multigraph""" + eList = [ + (0, 1, 5), + (0, 1, 4), + (0, 2, 4), + (0, 3, 3), + (0, 3, 3), + (0, 4, 2), + (1, 2, 4), + (1, 3, 1), + (1, 3, 2), + (1, 4, 3), + (1, 4, 4), + (2, 4, 5), + (3, 4, 4), + (3, 4, 4), + ] + G = nx.MultiGraph() + G.add_weighted_edges_from(eList) + b = nx.edge_betweenness_centrality(G, weight="weight", normalized=False) + b_answer = { + (0, 1, 0): 0.0, + (0, 1, 1): 0.5, + (0, 2, 0): 1.0, + (0, 3, 0): 0.75, + (0, 3, 1): 0.75, + (0, 4, 0): 1.0, + (1, 2, 0): 2.0, + (1, 3, 0): 3.0, + (1, 3, 1): 0.0, + (1, 4, 0): 1.5, + (1, 4, 1): 0.0, + (2, 4, 0): 1.0, + (3, 4, 0): 0.25, + (3, 4, 1): 0.25, + } + for n in sorted(G.edges(keys=True)): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_normalized_weighted_multigraph(self): + """Edge betweenness centrality: normalized weighted multigraph""" + eList = [ + (0, 1, 5), + (0, 1, 4), + (0, 2, 4), + (0, 3, 3), + (0, 3, 3), + (0, 4, 2), + (1, 2, 4), + (1, 3, 1), + (1, 3, 2), + (1, 4, 3), + (1, 4, 4), + (2, 4, 5), + (3, 4, 4), + (3, 4, 4), + ] + G = nx.MultiGraph() + G.add_weighted_edges_from(eList) + b = nx.edge_betweenness_centrality(G, weight="weight", normalized=True) + b_answer = { + (0, 1, 0): 0.0, + (0, 1, 1): 0.5, + (0, 2, 0): 1.0, + (0, 3, 0): 0.75, + (0, 3, 1): 0.75, + (0, 4, 0): 1.0, + (1, 2, 0): 2.0, + (1, 3, 0): 3.0, + (1, 3, 1): 0.0, + (1, 4, 0): 1.5, + (1, 4, 1): 0.0, + (2, 4, 0): 1.0, + (3, 4, 0): 0.25, + (3, 4, 1): 0.25, + } + norm = len(G) * (len(G) - 1) / 2 + for n in sorted(G.edges(keys=True)): + assert b[n] == pytest.approx(b_answer[n] / norm, abs=1e-7) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_betweenness_centrality_subset.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_betweenness_centrality_subset.py new file mode 100644 index 0000000..a35a401 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_betweenness_centrality_subset.py @@ -0,0 +1,340 @@ +import pytest + +import networkx as nx + + +class TestSubsetBetweennessCentrality: + def test_K5(self): + """Betweenness Centrality Subset: K5""" + G = nx.complete_graph(5) + b = nx.betweenness_centrality_subset( + G, sources=[0], targets=[1, 3], weight=None + ) + b_answer = {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0} + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_P5_directed(self): + """Betweenness Centrality Subset: P5 directed""" + G = nx.DiGraph() + nx.add_path(G, range(5)) + b_answer = {0: 0, 1: 1, 2: 1, 3: 0, 4: 0, 5: 0} + b = nx.betweenness_centrality_subset(G, sources=[0], targets=[3], weight=None) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_P5(self): + """Betweenness Centrality Subset: P5""" + G = nx.Graph() + nx.add_path(G, range(5)) + b_answer = {0: 0, 1: 0.5, 2: 0.5, 3: 0, 4: 0, 5: 0} + b = nx.betweenness_centrality_subset(G, sources=[0], targets=[3], weight=None) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_P5_multiple_target(self): + """Betweenness Centrality Subset: P5 multiple target""" + G = nx.Graph() + nx.add_path(G, range(5)) + b_answer = {0: 0, 1: 1, 2: 1, 3: 0.5, 4: 0, 5: 0} + b = nx.betweenness_centrality_subset( + G, sources=[0], targets=[3, 4], weight=None + ) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_box(self): + """Betweenness Centrality Subset: box""" + G = nx.Graph() + G.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3)]) + b_answer = {0: 0, 1: 0.25, 2: 0.25, 3: 0} + b = nx.betweenness_centrality_subset(G, sources=[0], targets=[3], weight=None) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_box_and_path(self): + """Betweenness Centrality Subset: box and path""" + G = nx.Graph() + G.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3), (3, 4), (4, 5)]) + b_answer = {0: 0, 1: 0.5, 2: 0.5, 3: 0.5, 4: 0, 5: 0} + b = nx.betweenness_centrality_subset( + G, sources=[0], targets=[3, 4], weight=None + ) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_box_and_path2(self): + """Betweenness Centrality Subset: box and path multiple target""" + G = nx.Graph() + G.add_edges_from([(0, 1), (1, 2), (2, 3), (1, 20), (20, 3), (3, 4)]) + b_answer = {0: 0, 1: 1.0, 2: 0.5, 20: 0.5, 3: 0.5, 4: 0} + b = nx.betweenness_centrality_subset( + G, sources=[0], targets=[3, 4], weight=None + ) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_diamond_multi_path(self): + """Betweenness Centrality Subset: Diamond Multi Path""" + G = nx.Graph() + G.add_edges_from( + [ + (1, 2), + (1, 3), + (1, 4), + (1, 5), + (1, 10), + (10, 11), + (11, 12), + (12, 9), + (2, 6), + (3, 6), + (4, 6), + (5, 7), + (7, 8), + (6, 8), + (8, 9), + ] + ) + b = nx.betweenness_centrality_subset(G, sources=[1], targets=[9], weight=None) + + expected_b = { + 1: 0, + 2: 1.0 / 10, + 3: 1.0 / 10, + 4: 1.0 / 10, + 5: 1.0 / 10, + 6: 3.0 / 10, + 7: 1.0 / 10, + 8: 4.0 / 10, + 9: 0, + 10: 1.0 / 10, + 11: 1.0 / 10, + 12: 1.0 / 10, + } + + for n in sorted(G): + assert b[n] == pytest.approx(expected_b[n], abs=1e-7) + + def test_normalized_p2(self): + """ + Betweenness Centrality Subset: Normalized P2 + if n <= 2: no normalization, betweenness centrality should be 0 for all nodes. + """ + G = nx.Graph() + nx.add_path(G, range(2)) + b_answer = {0: 0, 1: 0.0} + b = nx.betweenness_centrality_subset( + G, sources=[0], targets=[1], normalized=True, weight=None + ) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_normalized_P5_directed(self): + """Betweenness Centrality Subset: Normalized Directed P5""" + G = nx.DiGraph() + nx.add_path(G, range(5)) + b_answer = {0: 0, 1: 1.0 / 12.0, 2: 1.0 / 12.0, 3: 0, 4: 0, 5: 0} + b = nx.betweenness_centrality_subset( + G, sources=[0], targets=[3], normalized=True, weight=None + ) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_weighted_graph(self): + """Betweenness Centrality Subset: Weighted Graph""" + G = nx.DiGraph() + G.add_edge(0, 1, weight=3) + G.add_edge(0, 2, weight=2) + G.add_edge(0, 3, weight=6) + G.add_edge(0, 4, weight=4) + G.add_edge(1, 3, weight=5) + G.add_edge(1, 5, weight=5) + G.add_edge(2, 4, weight=1) + G.add_edge(3, 4, weight=2) + G.add_edge(3, 5, weight=1) + G.add_edge(4, 5, weight=4) + b_answer = {0: 0.0, 1: 0.0, 2: 0.5, 3: 0.5, 4: 0.5, 5: 0.0} + b = nx.betweenness_centrality_subset( + G, sources=[0], targets=[5], normalized=False, weight="weight" + ) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + +class TestEdgeSubsetBetweennessCentrality: + def test_K5(self): + """Edge betweenness subset centrality: K5""" + G = nx.complete_graph(5) + b = nx.edge_betweenness_centrality_subset( + G, sources=[0], targets=[1, 3], weight=None + ) + b_answer = dict.fromkeys(G.edges(), 0) + b_answer[(0, 3)] = b_answer[(0, 1)] = 0.5 + for n in sorted(G.edges()): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_P5_directed(self): + """Edge betweenness subset centrality: P5 directed""" + G = nx.DiGraph() + nx.add_path(G, range(5)) + b_answer = dict.fromkeys(G.edges(), 0) + b_answer[(0, 1)] = b_answer[(1, 2)] = b_answer[(2, 3)] = 1 + b = nx.edge_betweenness_centrality_subset( + G, sources=[0], targets=[3], weight=None + ) + for n in sorted(G.edges()): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_P5(self): + """Edge betweenness subset centrality: P5""" + G = nx.Graph() + nx.add_path(G, range(5)) + b_answer = dict.fromkeys(G.edges(), 0) + b_answer[(0, 1)] = b_answer[(1, 2)] = b_answer[(2, 3)] = 0.5 + b = nx.edge_betweenness_centrality_subset( + G, sources=[0], targets=[3], weight=None + ) + for n in sorted(G.edges()): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_P5_multiple_target(self): + """Edge betweenness subset centrality: P5 multiple target""" + G = nx.Graph() + nx.add_path(G, range(5)) + b_answer = dict.fromkeys(G.edges(), 0) + b_answer[(0, 1)] = b_answer[(1, 2)] = b_answer[(2, 3)] = 1 + b_answer[(3, 4)] = 0.5 + b = nx.edge_betweenness_centrality_subset( + G, sources=[0], targets=[3, 4], weight=None + ) + for n in sorted(G.edges()): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_box(self): + """Edge betweenness subset centrality: box""" + G = nx.Graph() + G.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3)]) + b_answer = dict.fromkeys(G.edges(), 0) + b_answer[(0, 1)] = b_answer[(0, 2)] = 0.25 + b_answer[(1, 3)] = b_answer[(2, 3)] = 0.25 + b = nx.edge_betweenness_centrality_subset( + G, sources=[0], targets=[3], weight=None + ) + for n in sorted(G.edges()): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_box_and_path(self): + """Edge betweenness subset centrality: box and path""" + G = nx.Graph() + G.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3), (3, 4), (4, 5)]) + b_answer = dict.fromkeys(G.edges(), 0) + b_answer[(0, 1)] = b_answer[(0, 2)] = 0.5 + b_answer[(1, 3)] = b_answer[(2, 3)] = 0.5 + b_answer[(3, 4)] = 0.5 + b = nx.edge_betweenness_centrality_subset( + G, sources=[0], targets=[3, 4], weight=None + ) + for n in sorted(G.edges()): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_box_and_path2(self): + """Edge betweenness subset centrality: box and path multiple target""" + G = nx.Graph() + G.add_edges_from([(0, 1), (1, 2), (2, 3), (1, 20), (20, 3), (3, 4)]) + b_answer = dict.fromkeys(G.edges(), 0) + b_answer[(0, 1)] = 1.0 + b_answer[(1, 20)] = b_answer[(3, 20)] = 0.5 + b_answer[(1, 2)] = b_answer[(2, 3)] = 0.5 + b_answer[(3, 4)] = 0.5 + b = nx.edge_betweenness_centrality_subset( + G, sources=[0], targets=[3, 4], weight=None + ) + for n in sorted(G.edges()): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_diamond_multi_path(self): + """Edge betweenness subset centrality: Diamond Multi Path""" + G = nx.Graph() + G.add_edges_from( + [ + (1, 2), + (1, 3), + (1, 4), + (1, 5), + (1, 10), + (10, 11), + (11, 12), + (12, 9), + (2, 6), + (3, 6), + (4, 6), + (5, 7), + (7, 8), + (6, 8), + (8, 9), + ] + ) + b_answer = dict.fromkeys(G.edges(), 0) + b_answer[(8, 9)] = 0.4 + b_answer[(6, 8)] = b_answer[(7, 8)] = 0.2 + b_answer[(2, 6)] = b_answer[(3, 6)] = b_answer[(4, 6)] = 0.2 / 3.0 + b_answer[(1, 2)] = b_answer[(1, 3)] = b_answer[(1, 4)] = 0.2 / 3.0 + b_answer[(5, 7)] = 0.2 + b_answer[(1, 5)] = 0.2 + b_answer[(9, 12)] = 0.1 + b_answer[(11, 12)] = b_answer[(10, 11)] = b_answer[(1, 10)] = 0.1 + b = nx.edge_betweenness_centrality_subset( + G, sources=[1], targets=[9], weight=None + ) + for n in G.edges(): + sort_n = tuple(sorted(n)) + assert b[n] == pytest.approx(b_answer[sort_n], abs=1e-7) + + def test_normalized_p1(self): + """ + Edge betweenness subset centrality: P1 + if n <= 1: no normalization b=0 for all nodes + """ + G = nx.Graph() + nx.add_path(G, range(1)) + b_answer = dict.fromkeys(G.edges(), 0) + b = nx.edge_betweenness_centrality_subset( + G, sources=[0], targets=[0], normalized=True, weight=None + ) + for n in G.edges(): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_normalized_P5_directed(self): + """Edge betweenness subset centrality: Normalized Directed P5""" + G = nx.DiGraph() + nx.add_path(G, range(5)) + b_answer = dict.fromkeys(G.edges(), 0) + b_answer[(0, 1)] = b_answer[(1, 2)] = b_answer[(2, 3)] = 0.05 + b = nx.edge_betweenness_centrality_subset( + G, sources=[0], targets=[3], normalized=True, weight=None + ) + for n in G.edges(): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_weighted_graph(self): + """Edge betweenness subset centrality: Weighted Graph""" + G = nx.DiGraph() + G.add_edge(0, 1, weight=3) + G.add_edge(0, 2, weight=2) + G.add_edge(0, 3, weight=6) + G.add_edge(0, 4, weight=4) + G.add_edge(1, 3, weight=5) + G.add_edge(1, 5, weight=5) + G.add_edge(2, 4, weight=1) + G.add_edge(3, 4, weight=2) + G.add_edge(3, 5, weight=1) + G.add_edge(4, 5, weight=4) + b_answer = dict.fromkeys(G.edges(), 0) + b_answer[(0, 2)] = b_answer[(2, 4)] = b_answer[(4, 5)] = 0.5 + b_answer[(0, 3)] = b_answer[(3, 5)] = 0.5 + b = nx.edge_betweenness_centrality_subset( + G, sources=[0], targets=[5], normalized=False, weight="weight" + ) + for n in G.edges(): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_closeness_centrality.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_closeness_centrality.py new file mode 100644 index 0000000..5419560 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_closeness_centrality.py @@ -0,0 +1,274 @@ +""" +Tests for closeness centrality. +""" + +import pytest + +import networkx as nx + + +@pytest.fixture() +def undirected_G(): + G = nx.fast_gnp_random_graph(n=100, p=0.6, seed=123) + cc = nx.closeness_centrality(G) + return G, cc + + +class TestClosenessCentrality: + def test_wf_improved(self): + G = nx.union(nx.path_graph(4), nx.path_graph([4, 5, 6])) + c = nx.closeness_centrality(G) + cwf = nx.closeness_centrality(G, wf_improved=False) + res = {0: 0.25, 1: 0.375, 2: 0.375, 3: 0.25, 4: 0.222, 5: 0.333, 6: 0.222} + wf_res = {0: 0.5, 1: 0.75, 2: 0.75, 3: 0.5, 4: 0.667, 5: 1.0, 6: 0.667} + for n in G: + assert c[n] == pytest.approx(res[n], abs=1e-3) + assert cwf[n] == pytest.approx(wf_res[n], abs=1e-3) + + def test_digraph(self): + G = nx.path_graph(3, create_using=nx.DiGraph) + c = nx.closeness_centrality(G) + cr = nx.closeness_centrality(G.reverse()) + d = {0: 0.0, 1: 0.500, 2: 0.667} + dr = {0: 0.667, 1: 0.500, 2: 0.0} + for n in G: + assert c[n] == pytest.approx(d[n], abs=1e-3) + assert cr[n] == pytest.approx(dr[n], abs=1e-3) + + def test_k5_closeness(self): + G = nx.complete_graph(5) + c = nx.closeness_centrality(G) + d = {0: 1.000, 1: 1.000, 2: 1.000, 3: 1.000, 4: 1.000} + for n in G: + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_p3_closeness(self): + G = nx.path_graph(3) + c = nx.closeness_centrality(G) + d = {0: 0.667, 1: 1.000, 2: 0.667} + for n in G: + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_krackhardt_closeness(self): + G = nx.krackhardt_kite_graph() + c = nx.closeness_centrality(G) + d = { + 0: 0.529, + 1: 0.529, + 2: 0.500, + 3: 0.600, + 4: 0.500, + 5: 0.643, + 6: 0.643, + 7: 0.600, + 8: 0.429, + 9: 0.310, + } + for n in G: + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_florentine_families_closeness(self): + G = nx.florentine_families_graph() + c = nx.closeness_centrality(G) + d = { + "Acciaiuoli": 0.368, + "Albizzi": 0.483, + "Barbadori": 0.4375, + "Bischeri": 0.400, + "Castellani": 0.389, + "Ginori": 0.333, + "Guadagni": 0.467, + "Lamberteschi": 0.326, + "Medici": 0.560, + "Pazzi": 0.286, + "Peruzzi": 0.368, + "Ridolfi": 0.500, + "Salviati": 0.389, + "Strozzi": 0.4375, + "Tornabuoni": 0.483, + } + for n in G: + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_les_miserables_closeness(self): + G = nx.les_miserables_graph() + c = nx.closeness_centrality(G) + d = { + "Napoleon": 0.302, + "Myriel": 0.429, + "MlleBaptistine": 0.413, + "MmeMagloire": 0.413, + "CountessDeLo": 0.302, + "Geborand": 0.302, + "Champtercier": 0.302, + "Cravatte": 0.302, + "Count": 0.302, + "OldMan": 0.302, + "Valjean": 0.644, + "Labarre": 0.394, + "Marguerite": 0.413, + "MmeDeR": 0.394, + "Isabeau": 0.394, + "Gervais": 0.394, + "Listolier": 0.341, + "Tholomyes": 0.392, + "Fameuil": 0.341, + "Blacheville": 0.341, + "Favourite": 0.341, + "Dahlia": 0.341, + "Zephine": 0.341, + "Fantine": 0.461, + "MmeThenardier": 0.461, + "Thenardier": 0.517, + "Cosette": 0.478, + "Javert": 0.517, + "Fauchelevent": 0.402, + "Bamatabois": 0.427, + "Perpetue": 0.318, + "Simplice": 0.418, + "Scaufflaire": 0.394, + "Woman1": 0.396, + "Judge": 0.404, + "Champmathieu": 0.404, + "Brevet": 0.404, + "Chenildieu": 0.404, + "Cochepaille": 0.404, + "Pontmercy": 0.373, + "Boulatruelle": 0.342, + "Eponine": 0.396, + "Anzelma": 0.352, + "Woman2": 0.402, + "MotherInnocent": 0.398, + "Gribier": 0.288, + "MmeBurgon": 0.344, + "Jondrette": 0.257, + "Gavroche": 0.514, + "Gillenormand": 0.442, + "Magnon": 0.335, + "MlleGillenormand": 0.442, + "MmePontmercy": 0.315, + "MlleVaubois": 0.308, + "LtGillenormand": 0.365, + "Marius": 0.531, + "BaronessT": 0.352, + "Mabeuf": 0.396, + "Enjolras": 0.481, + "Combeferre": 0.392, + "Prouvaire": 0.357, + "Feuilly": 0.392, + "Courfeyrac": 0.400, + "Bahorel": 0.394, + "Bossuet": 0.475, + "Joly": 0.394, + "Grantaire": 0.358, + "MotherPlutarch": 0.285, + "Gueulemer": 0.463, + "Babet": 0.463, + "Claquesous": 0.452, + "Montparnasse": 0.458, + "Toussaint": 0.402, + "Child1": 0.342, + "Child2": 0.342, + "Brujon": 0.380, + "MmeHucheloup": 0.353, + } + for n in G: + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_weighted_closeness(self): + edges = [ + ("s", "u", 10), + ("s", "x", 5), + ("u", "v", 1), + ("u", "x", 2), + ("v", "y", 1), + ("x", "u", 3), + ("x", "v", 5), + ("x", "y", 2), + ("y", "s", 7), + ("y", "v", 6), + ] + XG = nx.Graph() + XG.add_weighted_edges_from(edges) + c = nx.closeness_centrality(XG, distance="weight") + d = {"y": 0.200, "x": 0.286, "s": 0.138, "u": 0.235, "v": 0.200} + for n in sorted(XG): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + +class TestIncrementalClosenessCentrality: + @staticmethod + def pick_add_edge(G): + u = nx.utils.arbitrary_element(G) + possible_nodes = set(G) - (set(G.neighbors(u)) | {u}) + v = nx.utils.arbitrary_element(possible_nodes) + return (u, v) + + @staticmethod + def pick_remove_edge(G): + u = nx.utils.arbitrary_element(G) + possible_nodes = list(G.neighbors(u)) + v = nx.utils.arbitrary_element(possible_nodes) + return (u, v) + + def test_directed_raises(self): + dir_G = nx.gn_graph(n=5) + prev_cc = None + edge = self.pick_add_edge(dir_G) + with pytest.raises(nx.NetworkXNotImplemented): + nx.incremental_closeness_centrality(dir_G, edge, prev_cc, insertion=True) + + def test_wrong_size_prev_cc_raises(self, undirected_G): + G, prev_cc = undirected_G + edge = self.pick_add_edge(G) + prev_cc.pop(0) + with pytest.raises(nx.NetworkXError): + nx.incremental_closeness_centrality(G, edge, prev_cc, insertion=True) + + def test_wrong_nodes_prev_cc_raises(self, undirected_G): + G, prev_cc = undirected_G + + edge = self.pick_add_edge(G) + num_nodes = len(prev_cc) + prev_cc.pop(0) + prev_cc[num_nodes] = 0.5 + with pytest.raises(nx.NetworkXError): + nx.incremental_closeness_centrality(G, edge, prev_cc, insertion=True) + + def test_zero_centrality(self): + G = nx.path_graph(3) + prev_cc = nx.closeness_centrality(G) + edge = self.pick_remove_edge(G) + test_cc = nx.incremental_closeness_centrality(G, edge, prev_cc, insertion=False) + G.remove_edges_from([edge]) + real_cc = nx.closeness_centrality(G) + shared_items = set(test_cc.items()) & set(real_cc.items()) + assert len(shared_items) == len(real_cc) + assert 0 in test_cc.values() + + def test_incremental(self, undirected_G): + # Check that incremental and regular give same output + G, _ = undirected_G + prev_cc = None + for i in range(5): + if i % 2 == 0: + # Remove an edge + insert = False + edge = self.pick_remove_edge(G) + else: + # Add an edge + insert = True + edge = self.pick_add_edge(G) + + test_cc = nx.incremental_closeness_centrality(G, edge, prev_cc, insert) + + if insert: + G.add_edges_from([edge]) + else: + G.remove_edges_from([edge]) + + real_cc = nx.closeness_centrality(G) + + assert set(test_cc.items()) == set(real_cc.items()) + + prev_cc = test_cc diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_current_flow_betweenness_centrality.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_current_flow_betweenness_centrality.py new file mode 100644 index 0000000..4e3d438 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_current_flow_betweenness_centrality.py @@ -0,0 +1,197 @@ +import pytest + +import networkx as nx +from networkx import approximate_current_flow_betweenness_centrality as approximate_cfbc +from networkx import edge_current_flow_betweenness_centrality as edge_current_flow + +np = pytest.importorskip("numpy") +pytest.importorskip("scipy") + + +class TestFlowBetweennessCentrality: + def test_K4_normalized(self): + """Betweenness centrality: K4""" + G = nx.complete_graph(4) + b = nx.current_flow_betweenness_centrality(G, normalized=True) + b_answer = {0: 0.25, 1: 0.25, 2: 0.25, 3: 0.25} + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + G.add_edge(0, 1, weight=0.5, other=0.3) + b = nx.current_flow_betweenness_centrality(G, normalized=True, weight=None) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + wb_answer = {0: 0.2222222, 1: 0.2222222, 2: 0.30555555, 3: 0.30555555} + b = nx.current_flow_betweenness_centrality(G, normalized=True, weight="weight") + for n in sorted(G): + assert b[n] == pytest.approx(wb_answer[n], abs=1e-7) + wb_answer = {0: 0.2051282, 1: 0.2051282, 2: 0.33974358, 3: 0.33974358} + b = nx.current_flow_betweenness_centrality(G, normalized=True, weight="other") + for n in sorted(G): + assert b[n] == pytest.approx(wb_answer[n], abs=1e-7) + + def test_K4(self): + """Betweenness centrality: K4""" + G = nx.complete_graph(4) + for solver in ["full", "lu", "cg"]: + b = nx.current_flow_betweenness_centrality( + G, normalized=False, solver=solver + ) + b_answer = {0: 0.75, 1: 0.75, 2: 0.75, 3: 0.75} + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_P4_normalized(self): + """Betweenness centrality: P4 normalized""" + G = nx.path_graph(4) + b = nx.current_flow_betweenness_centrality(G, normalized=True) + b_answer = {0: 0, 1: 2.0 / 3, 2: 2.0 / 3, 3: 0} + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_P4(self): + """Betweenness centrality: P4""" + G = nx.path_graph(4) + b = nx.current_flow_betweenness_centrality(G, normalized=False) + b_answer = {0: 0, 1: 2, 2: 2, 3: 0} + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_star(self): + """Betweenness centrality: star""" + G = nx.Graph() + nx.add_star(G, ["a", "b", "c", "d"]) + b = nx.current_flow_betweenness_centrality(G, normalized=True) + b_answer = {"a": 1.0, "b": 0.0, "c": 0.0, "d": 0.0} + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_solvers2(self): + """Betweenness centrality: alternate solvers""" + G = nx.complete_graph(4) + for solver in ["full", "lu", "cg"]: + b = nx.current_flow_betweenness_centrality( + G, normalized=False, solver=solver + ) + b_answer = {0: 0.75, 1: 0.75, 2: 0.75, 3: 0.75} + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + +class TestApproximateFlowBetweennessCentrality: + def test_K4_normalized(self): + "Approximate current-flow betweenness centrality: K4 normalized" + G = nx.complete_graph(4) + b = nx.current_flow_betweenness_centrality(G, normalized=True) + epsilon = 0.1 + ba = approximate_cfbc(G, normalized=True, epsilon=0.5 * epsilon) + for n in sorted(G): + np.testing.assert_allclose(b[n], ba[n], atol=epsilon) + + def test_K4(self): + "Approximate current-flow betweenness centrality: K4" + G = nx.complete_graph(4) + b = nx.current_flow_betweenness_centrality(G, normalized=False) + epsilon = 0.1 + ba = approximate_cfbc(G, normalized=False, epsilon=0.5 * epsilon) + for n in sorted(G): + np.testing.assert_allclose(b[n], ba[n], atol=epsilon * len(G) ** 2) + + def test_star(self): + "Approximate current-flow betweenness centrality: star" + G = nx.Graph() + nx.add_star(G, ["a", "b", "c", "d"]) + b = nx.current_flow_betweenness_centrality(G, normalized=True) + epsilon = 0.1 + ba = approximate_cfbc(G, normalized=True, epsilon=0.5 * epsilon) + for n in sorted(G): + np.testing.assert_allclose(b[n], ba[n], atol=epsilon) + + def test_grid(self): + "Approximate current-flow betweenness centrality: 2d grid" + G = nx.grid_2d_graph(4, 4) + b = nx.current_flow_betweenness_centrality(G, normalized=True) + epsilon = 0.1 + ba = approximate_cfbc(G, normalized=True, epsilon=0.5 * epsilon) + for n in sorted(G): + np.testing.assert_allclose(b[n], ba[n], atol=epsilon) + + def test_seed(self): + G = nx.complete_graph(4) + b = approximate_cfbc(G, normalized=False, epsilon=0.05, seed=1) + b_answer = {0: 0.75, 1: 0.75, 2: 0.75, 3: 0.75} + for n in sorted(G): + np.testing.assert_allclose(b[n], b_answer[n], atol=0.1) + + def test_solvers(self): + "Approximate current-flow betweenness centrality: solvers" + G = nx.complete_graph(4) + epsilon = 0.1 + for solver in ["full", "lu", "cg"]: + b = approximate_cfbc( + G, normalized=False, solver=solver, epsilon=0.5 * epsilon + ) + b_answer = {0: 0.75, 1: 0.75, 2: 0.75, 3: 0.75} + for n in sorted(G): + np.testing.assert_allclose(b[n], b_answer[n], atol=epsilon) + + def test_lower_kmax(self): + G = nx.complete_graph(4) + with pytest.raises(nx.NetworkXError, match="Increase kmax or epsilon"): + nx.approximate_current_flow_betweenness_centrality(G, kmax=4) + + +class TestWeightedFlowBetweennessCentrality: + pass + + +class TestEdgeFlowBetweennessCentrality: + def test_K4(self): + """Edge flow betweenness centrality: K4""" + G = nx.complete_graph(4) + b = edge_current_flow(G, normalized=True) + b_answer = dict.fromkeys(G.edges(), 0.25) + for (s, t), v1 in b_answer.items(): + v2 = b.get((s, t), b.get((t, s))) + assert v1 == pytest.approx(v2, abs=1e-7) + + def test_K4_normalized(self): + """Edge flow betweenness centrality: K4""" + G = nx.complete_graph(4) + b = edge_current_flow(G, normalized=False) + b_answer = dict.fromkeys(G.edges(), 0.75) + for (s, t), v1 in b_answer.items(): + v2 = b.get((s, t), b.get((t, s))) + assert v1 == pytest.approx(v2, abs=1e-7) + + def test_C4(self): + """Edge flow betweenness centrality: C4""" + G = nx.cycle_graph(4) + b = edge_current_flow(G, normalized=False) + b_answer = {(0, 1): 1.25, (0, 3): 1.25, (1, 2): 1.25, (2, 3): 1.25} + for (s, t), v1 in b_answer.items(): + v2 = b.get((s, t), b.get((t, s))) + assert v1 == pytest.approx(v2, abs=1e-7) + + def test_P4(self): + """Edge betweenness centrality: P4""" + G = nx.path_graph(4) + b = edge_current_flow(G, normalized=False) + b_answer = {(0, 1): 1.5, (1, 2): 2.0, (2, 3): 1.5} + for (s, t), v1 in b_answer.items(): + v2 = b.get((s, t), b.get((t, s))) + assert v1 == pytest.approx(v2, abs=1e-7) + + +@pytest.mark.parametrize( + "centrality_func", + ( + nx.current_flow_betweenness_centrality, + nx.edge_current_flow_betweenness_centrality, + nx.approximate_current_flow_betweenness_centrality, + ), +) +def test_unconnected_graphs_betweenness_centrality(centrality_func): + G = nx.Graph([(1, 2), (3, 4)]) + G.add_node(5) + with pytest.raises(nx.NetworkXError, match="Graph not connected"): + centrality_func(G) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_current_flow_betweenness_centrality_subset.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_current_flow_betweenness_centrality_subset.py new file mode 100644 index 0000000..7b1611b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_current_flow_betweenness_centrality_subset.py @@ -0,0 +1,147 @@ +import pytest + +pytest.importorskip("numpy") +pytest.importorskip("scipy") + +import networkx as nx +from networkx import edge_current_flow_betweenness_centrality as edge_current_flow +from networkx import ( + edge_current_flow_betweenness_centrality_subset as edge_current_flow_subset, +) + + +class TestFlowBetweennessCentrality: + def test_K4_normalized(self): + """Betweenness centrality: K4""" + G = nx.complete_graph(4) + b = nx.current_flow_betweenness_centrality_subset( + G, list(G), list(G), normalized=True + ) + b_answer = nx.current_flow_betweenness_centrality(G, normalized=True) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_K4(self): + """Betweenness centrality: K4""" + G = nx.complete_graph(4) + b = nx.current_flow_betweenness_centrality_subset( + G, list(G), list(G), normalized=True + ) + b_answer = nx.current_flow_betweenness_centrality(G, normalized=True) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + # test weighted network + G.add_edge(0, 1, weight=0.5, other=0.3) + b = nx.current_flow_betweenness_centrality_subset( + G, list(G), list(G), normalized=True, weight=None + ) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + b = nx.current_flow_betweenness_centrality_subset( + G, list(G), list(G), normalized=True + ) + b_answer = nx.current_flow_betweenness_centrality(G, normalized=True) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + b = nx.current_flow_betweenness_centrality_subset( + G, list(G), list(G), normalized=True, weight="other" + ) + b_answer = nx.current_flow_betweenness_centrality( + G, normalized=True, weight="other" + ) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_P4_normalized(self): + """Betweenness centrality: P4 normalized""" + G = nx.path_graph(4) + b = nx.current_flow_betweenness_centrality_subset( + G, list(G), list(G), normalized=True + ) + b_answer = nx.current_flow_betweenness_centrality(G, normalized=True) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_P4(self): + """Betweenness centrality: P4""" + G = nx.path_graph(4) + b = nx.current_flow_betweenness_centrality_subset( + G, list(G), list(G), normalized=True + ) + b_answer = nx.current_flow_betweenness_centrality(G, normalized=True) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_star(self): + """Betweenness centrality: star""" + G = nx.Graph() + nx.add_star(G, ["a", "b", "c", "d"]) + b = nx.current_flow_betweenness_centrality_subset( + G, list(G), list(G), normalized=True + ) + b_answer = nx.current_flow_betweenness_centrality(G, normalized=True) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + +# class TestWeightedFlowBetweennessCentrality(): +# pass + + +class TestEdgeFlowBetweennessCentrality: + def test_K4_normalized(self): + """Betweenness centrality: K4""" + G = nx.complete_graph(4) + b = edge_current_flow_subset(G, list(G), list(G), normalized=True) + b_answer = edge_current_flow(G, normalized=True) + for (s, t), v1 in b_answer.items(): + v2 = b.get((s, t), b.get((t, s))) + assert v1 == pytest.approx(v2, abs=1e-7) + + def test_K4(self): + """Betweenness centrality: K4""" + G = nx.complete_graph(4) + b = edge_current_flow_subset(G, list(G), list(G), normalized=False) + b_answer = edge_current_flow(G, normalized=False) + for (s, t), v1 in b_answer.items(): + v2 = b.get((s, t), b.get((t, s))) + assert v1 == pytest.approx(v2, abs=1e-7) + # test weighted network + G.add_edge(0, 1, weight=0.5, other=0.3) + b = edge_current_flow_subset(G, list(G), list(G), normalized=False, weight=None) + # weight is None => same as unweighted network + for (s, t), v1 in b_answer.items(): + v2 = b.get((s, t), b.get((t, s))) + assert v1 == pytest.approx(v2, abs=1e-7) + + b = edge_current_flow_subset(G, list(G), list(G), normalized=False) + b_answer = edge_current_flow(G, normalized=False) + for (s, t), v1 in b_answer.items(): + v2 = b.get((s, t), b.get((t, s))) + assert v1 == pytest.approx(v2, abs=1e-7) + + b = edge_current_flow_subset( + G, list(G), list(G), normalized=False, weight="other" + ) + b_answer = edge_current_flow(G, normalized=False, weight="other") + for (s, t), v1 in b_answer.items(): + v2 = b.get((s, t), b.get((t, s))) + assert v1 == pytest.approx(v2, abs=1e-7) + + def test_C4(self): + """Edge betweenness centrality: C4""" + G = nx.cycle_graph(4) + b = edge_current_flow_subset(G, list(G), list(G), normalized=True) + b_answer = edge_current_flow(G, normalized=True) + for (s, t), v1 in b_answer.items(): + v2 = b.get((s, t), b.get((t, s))) + assert v1 == pytest.approx(v2, abs=1e-7) + + def test_P4(self): + """Edge betweenness centrality: P4""" + G = nx.path_graph(4) + b = edge_current_flow_subset(G, list(G), list(G), normalized=True) + b_answer = edge_current_flow(G, normalized=True) + for (s, t), v1 in b_answer.items(): + v2 = b.get((s, t), b.get((t, s))) + assert v1 == pytest.approx(v2, abs=1e-7) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_current_flow_closeness.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_current_flow_closeness.py new file mode 100644 index 0000000..2528d62 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_current_flow_closeness.py @@ -0,0 +1,43 @@ +import pytest + +pytest.importorskip("numpy") +pytest.importorskip("scipy") + +import networkx as nx + + +class TestFlowClosenessCentrality: + def test_K4(self): + """Closeness centrality: K4""" + G = nx.complete_graph(4) + b = nx.current_flow_closeness_centrality(G) + b_answer = {0: 2.0 / 3, 1: 2.0 / 3, 2: 2.0 / 3, 3: 2.0 / 3} + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_P4(self): + """Closeness centrality: P4""" + G = nx.path_graph(4) + b = nx.current_flow_closeness_centrality(G) + b_answer = {0: 1.0 / 6, 1: 1.0 / 4, 2: 1.0 / 4, 3: 1.0 / 6} + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_star(self): + """Closeness centrality: star""" + G = nx.Graph() + nx.add_star(G, ["a", "b", "c", "d"]) + b = nx.current_flow_closeness_centrality(G) + b_answer = {"a": 1.0 / 3, "b": 0.6 / 3, "c": 0.6 / 3, "d": 0.6 / 3} + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_current_flow_closeness_centrality_not_connected(self): + G = nx.Graph() + G.add_nodes_from([1, 2, 3]) + with pytest.raises(nx.NetworkXError): + nx.current_flow_closeness_centrality(G) + + +class TestWeightedFlowClosenessCentrality: + pass diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_degree_centrality.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_degree_centrality.py new file mode 100644 index 0000000..e39aa3b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_degree_centrality.py @@ -0,0 +1,144 @@ +""" +Unit tests for degree centrality. +""" + +import pytest + +import networkx as nx + + +class TestDegreeCentrality: + def setup_method(self): + self.K = nx.krackhardt_kite_graph() + self.P3 = nx.path_graph(3) + self.K5 = nx.complete_graph(5) + + F = nx.Graph() # Florentine families + F.add_edge("Acciaiuoli", "Medici") + F.add_edge("Castellani", "Peruzzi") + F.add_edge("Castellani", "Strozzi") + F.add_edge("Castellani", "Barbadori") + F.add_edge("Medici", "Barbadori") + F.add_edge("Medici", "Ridolfi") + F.add_edge("Medici", "Tornabuoni") + F.add_edge("Medici", "Albizzi") + F.add_edge("Medici", "Salviati") + F.add_edge("Salviati", "Pazzi") + F.add_edge("Peruzzi", "Strozzi") + F.add_edge("Peruzzi", "Bischeri") + F.add_edge("Strozzi", "Ridolfi") + F.add_edge("Strozzi", "Bischeri") + F.add_edge("Ridolfi", "Tornabuoni") + F.add_edge("Tornabuoni", "Guadagni") + F.add_edge("Albizzi", "Ginori") + F.add_edge("Albizzi", "Guadagni") + F.add_edge("Bischeri", "Guadagni") + F.add_edge("Guadagni", "Lamberteschi") + self.F = F + + G = nx.DiGraph() + G.add_edge(0, 5) + G.add_edge(1, 5) + G.add_edge(2, 5) + G.add_edge(3, 5) + G.add_edge(4, 5) + G.add_edge(5, 6) + G.add_edge(5, 7) + G.add_edge(5, 8) + self.G = G + + def test_degree_centrality_1(self): + d = nx.degree_centrality(self.K5) + exact = dict(zip(range(5), [1] * 5)) + for n, dc in d.items(): + assert exact[n] == pytest.approx(dc, abs=1e-7) + + def test_degree_centrality_2(self): + d = nx.degree_centrality(self.P3) + exact = {0: 0.5, 1: 1, 2: 0.5} + for n, dc in d.items(): + assert exact[n] == pytest.approx(dc, abs=1e-7) + + def test_degree_centrality_3(self): + d = nx.degree_centrality(self.K) + exact = { + 0: 0.444, + 1: 0.444, + 2: 0.333, + 3: 0.667, + 4: 0.333, + 5: 0.556, + 6: 0.556, + 7: 0.333, + 8: 0.222, + 9: 0.111, + } + for n, dc in d.items(): + assert exact[n] == pytest.approx(float(f"{dc:.3f}"), abs=1e-7) + + def test_degree_centrality_4(self): + d = nx.degree_centrality(self.F) + names = sorted(self.F.nodes()) + dcs = [ + 0.071, + 0.214, + 0.143, + 0.214, + 0.214, + 0.071, + 0.286, + 0.071, + 0.429, + 0.071, + 0.214, + 0.214, + 0.143, + 0.286, + 0.214, + ] + exact = dict(zip(names, dcs)) + for n, dc in d.items(): + assert exact[n] == pytest.approx(float(f"{dc:.3f}"), abs=1e-7) + + def test_indegree_centrality(self): + d = nx.in_degree_centrality(self.G) + exact = { + 0: 0.0, + 1: 0.0, + 2: 0.0, + 3: 0.0, + 4: 0.0, + 5: 0.625, + 6: 0.125, + 7: 0.125, + 8: 0.125, + } + for n, dc in d.items(): + assert exact[n] == pytest.approx(dc, abs=1e-7) + + def test_outdegree_centrality(self): + d = nx.out_degree_centrality(self.G) + exact = { + 0: 0.125, + 1: 0.125, + 2: 0.125, + 3: 0.125, + 4: 0.125, + 5: 0.375, + 6: 0.0, + 7: 0.0, + 8: 0.0, + } + for n, dc in d.items(): + assert exact[n] == pytest.approx(dc, abs=1e-7) + + def test_small_graph_centrality(self): + G = nx.empty_graph(create_using=nx.DiGraph) + assert {} == nx.degree_centrality(G) + assert {} == nx.out_degree_centrality(G) + assert {} == nx.in_degree_centrality(G) + + G = nx.empty_graph(1, create_using=nx.DiGraph) + assert {0: 1} == nx.degree_centrality(G) + assert {0: 1} == nx.out_degree_centrality(G) + assert {0: 1} == nx.in_degree_centrality(G) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_dispersion.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_dispersion.py new file mode 100644 index 0000000..05de1c4 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_dispersion.py @@ -0,0 +1,73 @@ +import networkx as nx + + +def small_ego_G(): + """The sample network from https://arxiv.org/pdf/1310.6753v1.pdf""" + edges = [ + ("a", "b"), + ("a", "c"), + ("b", "c"), + ("b", "d"), + ("b", "e"), + ("b", "f"), + ("c", "d"), + ("c", "f"), + ("c", "h"), + ("d", "f"), + ("e", "f"), + ("f", "h"), + ("h", "j"), + ("h", "k"), + ("i", "j"), + ("i", "k"), + ("j", "k"), + ("u", "a"), + ("u", "b"), + ("u", "c"), + ("u", "d"), + ("u", "e"), + ("u", "f"), + ("u", "g"), + ("u", "h"), + ("u", "i"), + ("u", "j"), + ("u", "k"), + ] + G = nx.Graph() + G.add_edges_from(edges) + + return G + + +class TestDispersion: + def test_article(self): + """our algorithm matches article's""" + G = small_ego_G() + disp_uh = nx.dispersion(G, "u", "h", normalized=False) + disp_ub = nx.dispersion(G, "u", "b", normalized=False) + assert disp_uh == 4 + assert disp_ub == 1 + + def test_results_length(self): + """there is a result for every node""" + G = small_ego_G() + disp = nx.dispersion(G) + disp_Gu = nx.dispersion(G, "u") + disp_uv = nx.dispersion(G, "u", "h") + assert len(disp) == len(G) + assert len(disp_Gu) == len(G) - 1 + assert isinstance(disp_uv, float) + + def test_dispersion_v_only(self): + G = small_ego_G() + disp_G_h = nx.dispersion(G, v="h", normalized=False) + disp_G_h_normalized = nx.dispersion(G, v="h", normalized=True) + assert disp_G_h == {"c": 0, "f": 0, "j": 0, "k": 0, "u": 4} + assert disp_G_h_normalized == {"c": 0.0, "f": 0.0, "j": 0.0, "k": 0.0, "u": 1.0} + + def test_impossible_things(self): + G = nx.karate_club_graph() + disp = nx.dispersion(G) + for u in disp: + for v in disp[u]: + assert disp[u][v] >= 0 diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_eigenvector_centrality.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_eigenvector_centrality.py new file mode 100644 index 0000000..4a73e1a --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_eigenvector_centrality.py @@ -0,0 +1,186 @@ +import math + +import pytest + +import networkx as nx + +np = pytest.importorskip("numpy") +pytest.importorskip("scipy") + + +class TestEigenvectorCentrality: + def test_K5(self): + """Eigenvector centrality: K5""" + G = nx.complete_graph(5) + b = nx.eigenvector_centrality(G) + v = math.sqrt(1 / 5.0) + b_answer = dict.fromkeys(G, v) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + nstart = dict.fromkeys(G, 1) + b = nx.eigenvector_centrality(G, nstart=nstart) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + b = nx.eigenvector_centrality_numpy(G) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-3) + + def test_P3(self): + """Eigenvector centrality: P3""" + G = nx.path_graph(3) + b_answer = {0: 0.5, 1: 0.7071, 2: 0.5} + b = nx.eigenvector_centrality_numpy(G) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-4) + b = nx.eigenvector_centrality(G) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-4) + + def test_P3_unweighted(self): + """Eigenvector centrality: P3""" + G = nx.path_graph(3) + b_answer = {0: 0.5, 1: 0.7071, 2: 0.5} + b = nx.eigenvector_centrality_numpy(G, weight=None) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-4) + + def test_maxiter(self): + with pytest.raises(nx.PowerIterationFailedConvergence): + G = nx.path_graph(3) + nx.eigenvector_centrality(G, max_iter=0) + + +class TestEigenvectorCentralityDirected: + @classmethod + def setup_class(cls): + G = nx.DiGraph() + + edges = [ + (1, 2), + (1, 3), + (2, 4), + (3, 2), + (3, 5), + (4, 2), + (4, 5), + (4, 6), + (5, 6), + (5, 7), + (5, 8), + (6, 8), + (7, 1), + (7, 5), + (7, 8), + (8, 6), + (8, 7), + ] + + G.add_edges_from(edges, weight=2.0) + cls.G = G.reverse() + cls.G.evc = [ + 0.25368793, + 0.19576478, + 0.32817092, + 0.40430835, + 0.48199885, + 0.15724483, + 0.51346196, + 0.32475403, + ] + + H = nx.DiGraph() + + edges = [ + (1, 2), + (1, 3), + (2, 4), + (3, 2), + (3, 5), + (4, 2), + (4, 5), + (4, 6), + (5, 6), + (5, 7), + (5, 8), + (6, 8), + (7, 1), + (7, 5), + (7, 8), + (8, 6), + (8, 7), + ] + + G.add_edges_from(edges) + cls.H = G.reverse() + cls.H.evc = [ + 0.25368793, + 0.19576478, + 0.32817092, + 0.40430835, + 0.48199885, + 0.15724483, + 0.51346196, + 0.32475403, + ] + + def test_eigenvector_centrality_weighted(self): + G = self.G + p = nx.eigenvector_centrality(G) + for a, b in zip(list(p.values()), self.G.evc): + assert a == pytest.approx(b, abs=1e-4) + + def test_eigenvector_centrality_weighted_numpy(self): + G = self.G + p = nx.eigenvector_centrality_numpy(G) + for a, b in zip(list(p.values()), self.G.evc): + assert a == pytest.approx(b, abs=1e-7) + + def test_eigenvector_centrality_unweighted(self): + G = self.H + p = nx.eigenvector_centrality(G) + for a, b in zip(list(p.values()), self.G.evc): + assert a == pytest.approx(b, abs=1e-4) + + def test_eigenvector_centrality_unweighted_numpy(self): + G = self.H + p = nx.eigenvector_centrality_numpy(G) + for a, b in zip(list(p.values()), self.G.evc): + assert a == pytest.approx(b, abs=1e-7) + + +class TestEigenvectorCentralityExceptions: + def test_multigraph(self): + with pytest.raises(nx.NetworkXException): + nx.eigenvector_centrality(nx.MultiGraph()) + + def test_multigraph_numpy(self): + with pytest.raises(nx.NetworkXException): + nx.eigenvector_centrality_numpy(nx.MultiGraph()) + + def test_null(self): + with pytest.raises(nx.NetworkXException): + nx.eigenvector_centrality(nx.Graph()) + + def test_null_numpy(self): + with pytest.raises(nx.NetworkXException): + nx.eigenvector_centrality_numpy(nx.Graph()) + + @pytest.mark.parametrize( + "G", + [ + nx.empty_graph(3), + nx.DiGraph([(0, 1), (1, 2)]), + ], + ) + def test_disconnected_numpy(self, G): + msg = "does not give consistent results for disconnected" + with pytest.raises(nx.AmbiguousSolution, match=msg): + nx.eigenvector_centrality_numpy(G) + + def test_zero_nstart(self): + G = nx.Graph([(1, 2), (1, 3), (2, 3)]) + with pytest.raises( + nx.NetworkXException, match="initial vector cannot have all zero values" + ): + nx.eigenvector_centrality(G, nstart=dict.fromkeys(G, 0)) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_group.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_group.py new file mode 100644 index 0000000..82343f2 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_group.py @@ -0,0 +1,277 @@ +""" +Tests for Group Centrality Measures +""" + +import pytest + +import networkx as nx + + +class TestGroupBetweennessCentrality: + def test_group_betweenness_single_node(self): + """ + Group betweenness centrality for single node group + """ + G = nx.path_graph(5) + C = [1] + b = nx.group_betweenness_centrality( + G, C, weight=None, normalized=False, endpoints=False + ) + b_answer = 3.0 + assert b == b_answer + + def test_group_betweenness_with_endpoints(self): + """ + Group betweenness centrality for single node group + """ + G = nx.path_graph(5) + C = [1] + b = nx.group_betweenness_centrality( + G, C, weight=None, normalized=False, endpoints=True + ) + b_answer = 7.0 + assert b == b_answer + + def test_group_betweenness_normalized(self): + """ + Group betweenness centrality for group with more than + 1 node and normalized + """ + G = nx.path_graph(5) + C = [1, 3] + b = nx.group_betweenness_centrality( + G, C, weight=None, normalized=True, endpoints=False + ) + b_answer = 1.0 + assert b == b_answer + + def test_two_group_betweenness_value_zero(self): + """ + Group betweenness centrality value of 0 + """ + G = nx.cycle_graph(7) + C = [[0, 1, 6], [0, 1, 5]] + b = nx.group_betweenness_centrality(G, C, weight=None, normalized=False) + b_answer = [0.0, 3.0] + assert b == b_answer + + def test_group_betweenness_value_zero(self): + """ + Group betweenness centrality value of 0 + """ + G = nx.cycle_graph(6) + C = [0, 1, 5] + b = nx.group_betweenness_centrality(G, C, weight=None, normalized=False) + b_answer = 0.0 + assert b == b_answer + + def test_group_betweenness_disconnected_graph(self): + """ + Group betweenness centrality in a disconnected graph + """ + G = nx.path_graph(5) + G.remove_edge(0, 1) + C = [1] + b = nx.group_betweenness_centrality(G, C, weight=None, normalized=False) + b_answer = 0.0 + assert b == b_answer + + def test_group_betweenness_node_not_in_graph(self): + """ + Node(s) in C not in graph, raises NodeNotFound exception + """ + with pytest.raises(nx.NodeNotFound): + nx.group_betweenness_centrality(nx.path_graph(5), [4, 7, 8]) + + def test_group_betweenness_directed_weighted(self): + """ + Group betweenness centrality in a directed and weighted graph + """ + G = nx.DiGraph() + G.add_edge(1, 0, weight=1) + G.add_edge(0, 2, weight=2) + G.add_edge(1, 2, weight=3) + G.add_edge(3, 1, weight=4) + G.add_edge(2, 3, weight=1) + G.add_edge(4, 3, weight=6) + G.add_edge(2, 4, weight=7) + C = [1, 2] + b = nx.group_betweenness_centrality(G, C, weight="weight", normalized=False) + b_answer = 5.0 + assert b == b_answer + + +class TestProminentGroup: + np = pytest.importorskip("numpy") + pd = pytest.importorskip("pandas") + + def test_prominent_group_single_node(self): + """ + Prominent group for single node + """ + G = nx.path_graph(5) + k = 1 + b, g = nx.prominent_group(G, k, normalized=False, endpoints=False) + b_answer, g_answer = 4.0, [2] + assert b == b_answer and g == g_answer + + def test_prominent_group_with_c(self): + """ + Prominent group without some nodes + """ + G = nx.path_graph(5) + k = 1 + b, g = nx.prominent_group(G, k, normalized=False, C=[2]) + b_answer, g_answer = 3.0, [1] + assert b == b_answer and g == g_answer + + def test_prominent_group_normalized_endpoints(self): + """ + Prominent group with normalized result, with endpoints + """ + G = nx.cycle_graph(7) + k = 2 + b, g = nx.prominent_group(G, k, normalized=True, endpoints=True) + b_answer, g_answer = 1.7, [2, 5] + assert b == b_answer and g == g_answer + + def test_prominent_group_disconnected_graph(self): + """ + Prominent group of disconnected graph + """ + G = nx.path_graph(6) + G.remove_edge(0, 1) + k = 1 + b, g = nx.prominent_group(G, k, weight=None, normalized=False) + b_answer, g_answer = 4.0, [3] + assert b == b_answer and g == g_answer + + def test_prominent_group_node_not_in_graph(self): + """ + Node(s) in C not in graph, raises NodeNotFound exception + """ + with pytest.raises(nx.NodeNotFound): + nx.prominent_group(nx.path_graph(5), 1, C=[10]) + + def test_group_betweenness_directed_weighted(self): + """ + Group betweenness centrality in a directed and weighted graph + """ + G = nx.DiGraph() + G.add_edge(1, 0, weight=1) + G.add_edge(0, 2, weight=2) + G.add_edge(1, 2, weight=3) + G.add_edge(3, 1, weight=4) + G.add_edge(2, 3, weight=1) + G.add_edge(4, 3, weight=6) + G.add_edge(2, 4, weight=7) + k = 2 + b, g = nx.prominent_group(G, k, weight="weight", normalized=False) + b_answer, g_answer = 5.0, [1, 2] + assert b == b_answer and g == g_answer + + def test_prominent_group_greedy_algorithm(self): + """ + Group betweenness centrality in a greedy algorithm + """ + G = nx.cycle_graph(7) + k = 2 + b, g = nx.prominent_group(G, k, normalized=True, endpoints=True, greedy=True) + b_answer, g_answer = 1.7, [6, 3] + assert b == b_answer and g == g_answer + + +class TestGroupClosenessCentrality: + def test_group_closeness_single_node(self): + """ + Group closeness centrality for a single node group + """ + G = nx.path_graph(5) + c = nx.group_closeness_centrality(G, [1]) + c_answer = nx.closeness_centrality(G, 1) + assert c == c_answer + + def test_group_closeness_disconnected(self): + """ + Group closeness centrality for a disconnected graph + """ + G = nx.Graph() + G.add_nodes_from([1, 2, 3, 4]) + c = nx.group_closeness_centrality(G, [1, 2]) + c_answer = 0 + assert c == c_answer + + def test_group_closeness_multiple_node(self): + """ + Group closeness centrality for a group with more than + 1 node + """ + G = nx.path_graph(4) + c = nx.group_closeness_centrality(G, [1, 2]) + c_answer = 1 + assert c == c_answer + + def test_group_closeness_node_not_in_graph(self): + """ + Node(s) in S not in graph, raises NodeNotFound exception + """ + with pytest.raises(nx.NodeNotFound): + nx.group_closeness_centrality(nx.path_graph(5), [6, 7, 8]) + + +class TestGroupDegreeCentrality: + def test_group_degree_centrality_single_node(self): + """ + Group degree centrality for a single node group + """ + G = nx.path_graph(4) + d = nx.group_degree_centrality(G, [1]) + d_answer = nx.degree_centrality(G)[1] + assert d == d_answer + + def test_group_degree_centrality_multiple_node(self): + """ + Group degree centrality for group with more than + 1 node + """ + G = nx.Graph() + G.add_nodes_from([1, 2, 3, 4, 5, 6, 7, 8]) + G.add_edges_from( + [(1, 2), (1, 3), (1, 6), (1, 7), (1, 8), (2, 3), (2, 4), (2, 5)] + ) + d = nx.group_degree_centrality(G, [1, 2]) + d_answer = 1 + assert d == d_answer + + def test_group_in_degree_centrality(self): + """ + Group in-degree centrality in a DiGraph + """ + G = nx.DiGraph() + G.add_nodes_from([1, 2, 3, 4, 5, 6, 7, 8]) + G.add_edges_from( + [(1, 2), (1, 3), (1, 6), (1, 7), (1, 8), (2, 3), (2, 4), (2, 5)] + ) + d = nx.group_in_degree_centrality(G, [1, 2]) + d_answer = 0 + assert d == d_answer + + def test_group_out_degree_centrality(self): + """ + Group out-degree centrality in a DiGraph + """ + G = nx.DiGraph() + G.add_nodes_from([1, 2, 3, 4, 5, 6, 7, 8]) + G.add_edges_from( + [(1, 2), (1, 3), (1, 6), (1, 7), (1, 8), (2, 3), (2, 4), (2, 5)] + ) + d = nx.group_out_degree_centrality(G, [1, 2]) + d_answer = 1 + assert d == d_answer + + def test_group_degree_centrality_node_not_in_graph(self): + """ + Node(s) in S not in graph, raises NetworkXError + """ + with pytest.raises(nx.NetworkXError): + nx.group_degree_centrality(nx.path_graph(5), [6, 7, 8]) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_harmonic_centrality.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_harmonic_centrality.py new file mode 100644 index 0000000..4b3dc4a --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_harmonic_centrality.py @@ -0,0 +1,122 @@ +""" +Tests for degree centrality. +""" + +import pytest + +import networkx as nx +from networkx.algorithms.centrality import harmonic_centrality + + +class TestClosenessCentrality: + @classmethod + def setup_class(cls): + cls.P3 = nx.path_graph(3) + cls.P4 = nx.path_graph(4) + cls.K5 = nx.complete_graph(5) + + cls.C4 = nx.cycle_graph(4) + cls.C4_directed = nx.cycle_graph(4, create_using=nx.DiGraph) + + cls.C5 = nx.cycle_graph(5) + + cls.T = nx.balanced_tree(r=2, h=2) + + cls.Gb = nx.DiGraph() + cls.Gb.add_edges_from([(0, 1), (0, 2), (0, 4), (2, 1), (2, 3), (4, 3)]) + + def test_p3_harmonic(self): + c = harmonic_centrality(self.P3) + d = {0: 1.5, 1: 2, 2: 1.5} + for n in sorted(self.P3): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_p4_harmonic(self): + c = harmonic_centrality(self.P4) + d = {0: 1.8333333, 1: 2.5, 2: 2.5, 3: 1.8333333} + for n in sorted(self.P4): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_clique_complete(self): + c = harmonic_centrality(self.K5) + d = {0: 4, 1: 4, 2: 4, 3: 4, 4: 4} + for n in sorted(self.P3): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_cycle_C4(self): + c = harmonic_centrality(self.C4) + d = {0: 2.5, 1: 2.5, 2: 2.5, 3: 2.5} + for n in sorted(self.C4): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_cycle_C5(self): + c = harmonic_centrality(self.C5) + d = {0: 3, 1: 3, 2: 3, 3: 3, 4: 3, 5: 4} + for n in sorted(self.C5): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_bal_tree(self): + c = harmonic_centrality(self.T) + d = {0: 4.0, 1: 4.1666, 2: 4.1666, 3: 2.8333, 4: 2.8333, 5: 2.8333, 6: 2.8333} + for n in sorted(self.T): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_exampleGraph(self): + c = harmonic_centrality(self.Gb) + d = {0: 0, 1: 2, 2: 1, 3: 2.5, 4: 1} + for n in sorted(self.Gb): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_weighted_harmonic(self): + XG = nx.DiGraph() + XG.add_weighted_edges_from( + [ + ("a", "b", 10), + ("d", "c", 5), + ("a", "c", 1), + ("e", "f", 2), + ("f", "c", 1), + ("a", "f", 3), + ] + ) + c = harmonic_centrality(XG, distance="weight") + d = {"a": 0, "b": 0.1, "c": 2.533, "d": 0, "e": 0, "f": 0.83333} + for n in sorted(XG): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_empty(self): + G = nx.DiGraph() + c = harmonic_centrality(G, distance="weight") + d = {} + assert c == d + + def test_singleton(self): + G = nx.DiGraph() + G.add_node(0) + c = harmonic_centrality(G, distance="weight") + d = {0: 0} + assert c == d + + def test_cycle_c4_directed(self): + c = harmonic_centrality(self.C4_directed, nbunch=[0, 1], sources=[1, 2]) + d = {0: 0.833, 1: 0.333} + for n in [0, 1]: + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_cycle_c4_directed_subset(self): + c = harmonic_centrality(self.C4_directed, nbunch=[0, 1]) + d = 1.833 + for n in [0, 1]: + assert c[n] == pytest.approx(d, abs=1e-3) + + def test_p3_harmonic_subset(self): + c = harmonic_centrality(self.P3, sources=[0, 1]) + d = {0: 1, 1: 1, 2: 1.5} + for n in self.P3: + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_p4_harmonic_subset(self): + c = harmonic_centrality(self.P4, nbunch=[2, 3], sources=[0, 1]) + d = {2: 1.5, 3: 0.8333333} + for n in [2, 3]: + assert c[n] == pytest.approx(d[n], abs=1e-3) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_katz_centrality.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_katz_centrality.py new file mode 100644 index 0000000..361a370 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_katz_centrality.py @@ -0,0 +1,345 @@ +import math + +import pytest + +import networkx as nx + + +class TestKatzCentrality: + def test_K5(self): + """Katz centrality: K5""" + G = nx.complete_graph(5) + alpha = 0.1 + b = nx.katz_centrality(G, alpha) + v = math.sqrt(1 / 5.0) + b_answer = dict.fromkeys(G, v) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + nstart = dict.fromkeys(G, 1) + b = nx.katz_centrality(G, alpha, nstart=nstart) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_P3(self): + """Katz centrality: P3""" + alpha = 0.1 + G = nx.path_graph(3) + b_answer = {0: 0.5598852584152165, 1: 0.6107839182711449, 2: 0.5598852584152162} + b = nx.katz_centrality(G, alpha) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-4) + + def test_maxiter(self): + with pytest.raises(nx.PowerIterationFailedConvergence): + nx.katz_centrality(nx.path_graph(3), 0.1, max_iter=0) + + def test_beta_as_scalar(self): + alpha = 0.1 + beta = 0.1 + b_answer = {0: 0.5598852584152165, 1: 0.6107839182711449, 2: 0.5598852584152162} + G = nx.path_graph(3) + b = nx.katz_centrality(G, alpha, beta) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-4) + + def test_beta_as_dict(self): + alpha = 0.1 + beta = {0: 1.0, 1: 1.0, 2: 1.0} + b_answer = {0: 0.5598852584152165, 1: 0.6107839182711449, 2: 0.5598852584152162} + G = nx.path_graph(3) + b = nx.katz_centrality(G, alpha, beta) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-4) + + def test_multiple_alpha(self): + alpha_list = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6] + for alpha in alpha_list: + b_answer = { + 0.1: { + 0: 0.5598852584152165, + 1: 0.6107839182711449, + 2: 0.5598852584152162, + }, + 0.2: { + 0: 0.5454545454545454, + 1: 0.6363636363636365, + 2: 0.5454545454545454, + }, + 0.3: { + 0: 0.5333964609104419, + 1: 0.6564879518897746, + 2: 0.5333964609104419, + }, + 0.4: { + 0: 0.5232045649263551, + 1: 0.6726915834767423, + 2: 0.5232045649263551, + }, + 0.5: { + 0: 0.5144957746691622, + 1: 0.6859943117075809, + 2: 0.5144957746691622, + }, + 0.6: { + 0: 0.5069794004195823, + 1: 0.6970966755769258, + 2: 0.5069794004195823, + }, + } + G = nx.path_graph(3) + b = nx.katz_centrality(G, alpha) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[alpha][n], abs=1e-4) + + def test_multigraph(self): + with pytest.raises(nx.NetworkXException): + nx.katz_centrality(nx.MultiGraph(), 0.1) + + def test_empty(self): + e = nx.katz_centrality(nx.Graph(), 0.1) + assert e == {} + + def test_bad_beta(self): + with pytest.raises(nx.NetworkXException): + G = nx.Graph([(0, 1)]) + beta = {0: 77} + nx.katz_centrality(G, 0.1, beta=beta) + + def test_bad_beta_number(self): + with pytest.raises(nx.NetworkXException): + G = nx.Graph([(0, 1)]) + nx.katz_centrality(G, 0.1, beta="foo") + + +class TestKatzCentralityNumpy: + @classmethod + def setup_class(cls): + global np + np = pytest.importorskip("numpy") + pytest.importorskip("scipy") + + def test_K5(self): + """Katz centrality: K5""" + G = nx.complete_graph(5) + alpha = 0.1 + b = nx.katz_centrality(G, alpha) + v = math.sqrt(1 / 5.0) + b_answer = dict.fromkeys(G, v) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + b = nx.eigenvector_centrality_numpy(G) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-3) + + def test_P3(self): + """Katz centrality: P3""" + alpha = 0.1 + G = nx.path_graph(3) + b_answer = {0: 0.5598852584152165, 1: 0.6107839182711449, 2: 0.5598852584152162} + b = nx.katz_centrality_numpy(G, alpha) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-4) + + def test_beta_as_scalar(self): + alpha = 0.1 + beta = 0.1 + b_answer = {0: 0.5598852584152165, 1: 0.6107839182711449, 2: 0.5598852584152162} + G = nx.path_graph(3) + b = nx.katz_centrality_numpy(G, alpha, beta) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-4) + + def test_beta_as_dict(self): + alpha = 0.1 + beta = {0: 1.0, 1: 1.0, 2: 1.0} + b_answer = {0: 0.5598852584152165, 1: 0.6107839182711449, 2: 0.5598852584152162} + G = nx.path_graph(3) + b = nx.katz_centrality_numpy(G, alpha, beta) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-4) + + def test_multiple_alpha(self): + alpha_list = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6] + for alpha in alpha_list: + b_answer = { + 0.1: { + 0: 0.5598852584152165, + 1: 0.6107839182711449, + 2: 0.5598852584152162, + }, + 0.2: { + 0: 0.5454545454545454, + 1: 0.6363636363636365, + 2: 0.5454545454545454, + }, + 0.3: { + 0: 0.5333964609104419, + 1: 0.6564879518897746, + 2: 0.5333964609104419, + }, + 0.4: { + 0: 0.5232045649263551, + 1: 0.6726915834767423, + 2: 0.5232045649263551, + }, + 0.5: { + 0: 0.5144957746691622, + 1: 0.6859943117075809, + 2: 0.5144957746691622, + }, + 0.6: { + 0: 0.5069794004195823, + 1: 0.6970966755769258, + 2: 0.5069794004195823, + }, + } + G = nx.path_graph(3) + b = nx.katz_centrality_numpy(G, alpha) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[alpha][n], abs=1e-4) + + def test_multigraph(self): + with pytest.raises(nx.NetworkXException): + nx.katz_centrality(nx.MultiGraph(), 0.1) + + def test_empty(self): + e = nx.katz_centrality(nx.Graph(), 0.1) + assert e == {} + + def test_bad_beta(self): + with pytest.raises(nx.NetworkXException): + G = nx.Graph([(0, 1)]) + beta = {0: 77} + nx.katz_centrality_numpy(G, 0.1, beta=beta) + + def test_bad_beta_numbe(self): + with pytest.raises(nx.NetworkXException): + G = nx.Graph([(0, 1)]) + nx.katz_centrality_numpy(G, 0.1, beta="foo") + + def test_K5_unweighted(self): + """Katz centrality: K5""" + G = nx.complete_graph(5) + alpha = 0.1 + b = nx.katz_centrality(G, alpha, weight=None) + v = math.sqrt(1 / 5.0) + b_answer = dict.fromkeys(G, v) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + b = nx.eigenvector_centrality_numpy(G, weight=None) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-3) + + def test_P3_unweighted(self): + """Katz centrality: P3""" + alpha = 0.1 + G = nx.path_graph(3) + b_answer = {0: 0.5598852584152165, 1: 0.6107839182711449, 2: 0.5598852584152162} + b = nx.katz_centrality_numpy(G, alpha, weight=None) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-4) + + +class TestKatzCentralityDirected: + @classmethod + def setup_class(cls): + G = nx.DiGraph() + edges = [ + (1, 2), + (1, 3), + (2, 4), + (3, 2), + (3, 5), + (4, 2), + (4, 5), + (4, 6), + (5, 6), + (5, 7), + (5, 8), + (6, 8), + (7, 1), + (7, 5), + (7, 8), + (8, 6), + (8, 7), + ] + G.add_edges_from(edges, weight=2.0) + cls.G = G.reverse() + cls.G.alpha = 0.1 + cls.G.evc = [ + 0.3289589783189635, + 0.2832077296243516, + 0.3425906003685471, + 0.3970420865198392, + 0.41074871061646284, + 0.272257430756461, + 0.4201989685435462, + 0.34229059218038554, + ] + + H = nx.DiGraph(edges) + cls.H = G.reverse() + cls.H.alpha = 0.1 + cls.H.evc = [ + 0.3289589783189635, + 0.2832077296243516, + 0.3425906003685471, + 0.3970420865198392, + 0.41074871061646284, + 0.272257430756461, + 0.4201989685435462, + 0.34229059218038554, + ] + + def test_katz_centrality_weighted(self): + G = self.G + alpha = self.G.alpha + p = nx.katz_centrality(G, alpha, weight="weight") + for a, b in zip(list(p.values()), self.G.evc): + assert a == pytest.approx(b, abs=1e-7) + + def test_katz_centrality_unweighted(self): + H = self.H + alpha = self.H.alpha + p = nx.katz_centrality(H, alpha, weight="weight") + for a, b in zip(list(p.values()), self.H.evc): + assert a == pytest.approx(b, abs=1e-7) + + +class TestKatzCentralityDirectedNumpy(TestKatzCentralityDirected): + @classmethod + def setup_class(cls): + global np + np = pytest.importorskip("numpy") + pytest.importorskip("scipy") + super().setup_class() + + def test_katz_centrality_weighted(self): + G = self.G + alpha = self.G.alpha + p = nx.katz_centrality_numpy(G, alpha, weight="weight") + for a, b in zip(list(p.values()), self.G.evc): + assert a == pytest.approx(b, abs=1e-7) + + def test_katz_centrality_unweighted(self): + H = self.H + alpha = self.H.alpha + p = nx.katz_centrality_numpy(H, alpha, weight="weight") + for a, b in zip(list(p.values()), self.H.evc): + assert a == pytest.approx(b, abs=1e-7) + + +class TestKatzEigenvectorVKatz: + @classmethod + def setup_class(cls): + global np + np = pytest.importorskip("numpy") + pytest.importorskip("scipy") + + def test_eigenvector_v_katz_random(self): + G = nx.gnp_random_graph(10, 0.5, seed=1234) + l = max(np.linalg.eigvals(nx.adjacency_matrix(G).todense())) + e = nx.eigenvector_centrality_numpy(G) + k = nx.katz_centrality_numpy(G, 1.0 / l) + for n in G: + assert e[n] == pytest.approx(k[n], abs=1e-7) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_laplacian_centrality.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_laplacian_centrality.py new file mode 100644 index 0000000..c2a2915 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_laplacian_centrality.py @@ -0,0 +1,220 @@ +import pytest + +import networkx as nx + +np = pytest.importorskip("numpy") +sp = pytest.importorskip("scipy") + + +def test_laplacian_centrality_null_graph(): + G = nx.Graph() + with pytest.raises(nx.NetworkXPointlessConcept): + d = nx.laplacian_centrality(G, normalized=False) + + +def test_laplacian_centrality_single_node(): + """See gh-6571""" + G = nx.empty_graph(1) + assert nx.laplacian_centrality(G, normalized=False) == {0: 0} + with pytest.raises(ZeroDivisionError): + nx.laplacian_centrality(G, normalized=True) + + +def test_laplacian_centrality_unconnected_nodes(): + """laplacian_centrality on a unconnected node graph should return 0 + + For graphs without edges, the Laplacian energy is 0 and is unchanged with + node removal, so:: + + LC(v) = LE(G) - LE(G - v) = 0 - 0 = 0 + """ + G = nx.empty_graph(3) + assert nx.laplacian_centrality(G, normalized=False) == {0: 0, 1: 0, 2: 0} + + +def test_laplacian_centrality_empty_graph(): + G = nx.empty_graph(3) + with pytest.raises(ZeroDivisionError): + d = nx.laplacian_centrality(G, normalized=True) + + +def test_laplacian_centrality_E(): + E = nx.Graph() + E.add_weighted_edges_from( + [(0, 1, 4), (4, 5, 1), (0, 2, 2), (2, 1, 1), (1, 3, 2), (1, 4, 2)] + ) + d = nx.laplacian_centrality(E) + exact = { + 0: 0.700000, + 1: 0.900000, + 2: 0.280000, + 3: 0.220000, + 4: 0.260000, + 5: 0.040000, + } + + for n, dc in d.items(): + assert exact[n] == pytest.approx(dc, abs=1e-7) + + # Check not normalized + full_energy = 200 + dnn = nx.laplacian_centrality(E, normalized=False) + for n, dc in dnn.items(): + assert exact[n] * full_energy == pytest.approx(dc, abs=1e-7) + + # Check unweighted not-normalized version + duw_nn = nx.laplacian_centrality(E, normalized=False, weight=None) + exact_uw_nn = { + 0: 18, + 1: 34, + 2: 18, + 3: 10, + 4: 16, + 5: 6, + } + for n, dc in duw_nn.items(): + assert exact_uw_nn[n] == pytest.approx(dc, abs=1e-7) + + # Check unweighted version + duw = nx.laplacian_centrality(E, weight=None) + full_energy = 42 + for n, dc in duw.items(): + assert exact_uw_nn[n] / full_energy == pytest.approx(dc, abs=1e-7) + + +def test_laplacian_centrality_KC(): + KC = nx.karate_club_graph() + d = nx.laplacian_centrality(KC) + exact = { + 0: 0.2543593, + 1: 0.1724524, + 2: 0.2166053, + 3: 0.0964646, + 4: 0.0350344, + 5: 0.0571109, + 6: 0.0540713, + 7: 0.0788674, + 8: 0.1222204, + 9: 0.0217565, + 10: 0.0308751, + 11: 0.0215965, + 12: 0.0174372, + 13: 0.118861, + 14: 0.0366341, + 15: 0.0548712, + 16: 0.0172772, + 17: 0.0191969, + 18: 0.0225564, + 19: 0.0331147, + 20: 0.0279955, + 21: 0.0246361, + 22: 0.0382339, + 23: 0.1294193, + 24: 0.0227164, + 25: 0.0644697, + 26: 0.0281555, + 27: 0.075188, + 28: 0.0364742, + 29: 0.0707087, + 30: 0.0708687, + 31: 0.131019, + 32: 0.2370821, + 33: 0.3066709, + } + for n, dc in d.items(): + assert exact[n] == pytest.approx(dc, abs=1e-7) + + # Check not normalized + full_energy = 12502 + dnn = nx.laplacian_centrality(KC, normalized=False) + for n, dc in dnn.items(): + assert exact[n] * full_energy == pytest.approx(dc, abs=1e-3) + + +def test_laplacian_centrality_K(): + K = nx.krackhardt_kite_graph() + d = nx.laplacian_centrality(K) + exact = { + 0: 0.3010753, + 1: 0.3010753, + 2: 0.2258065, + 3: 0.483871, + 4: 0.2258065, + 5: 0.3870968, + 6: 0.3870968, + 7: 0.1935484, + 8: 0.0752688, + 9: 0.0322581, + } + for n, dc in d.items(): + assert exact[n] == pytest.approx(dc, abs=1e-7) + + # Check not normalized + full_energy = 186 + dnn = nx.laplacian_centrality(K, normalized=False) + for n, dc in dnn.items(): + assert exact[n] * full_energy == pytest.approx(dc, abs=1e-3) + + +def test_laplacian_centrality_P3(): + P3 = nx.path_graph(3) + d = nx.laplacian_centrality(P3) + exact = {0: 0.6, 1: 1.0, 2: 0.6} + for n, dc in d.items(): + assert exact[n] == pytest.approx(dc, abs=1e-7) + + +def test_laplacian_centrality_K5(): + K5 = nx.complete_graph(5) + d = nx.laplacian_centrality(K5) + exact = {0: 0.52, 1: 0.52, 2: 0.52, 3: 0.52, 4: 0.52} + for n, dc in d.items(): + assert exact[n] == pytest.approx(dc, abs=1e-7) + + +def test_laplacian_centrality_FF(): + FF = nx.florentine_families_graph() + d = nx.laplacian_centrality(FF) + exact = { + "Acciaiuoli": 0.0804598, + "Medici": 0.4022989, + "Castellani": 0.1724138, + "Peruzzi": 0.183908, + "Strozzi": 0.2528736, + "Barbadori": 0.137931, + "Ridolfi": 0.2183908, + "Tornabuoni": 0.2183908, + "Albizzi": 0.1954023, + "Salviati": 0.1149425, + "Pazzi": 0.0344828, + "Bischeri": 0.1954023, + "Guadagni": 0.2298851, + "Ginori": 0.045977, + "Lamberteschi": 0.0574713, + } + for n, dc in d.items(): + assert exact[n] == pytest.approx(dc, abs=1e-7) + + +def test_laplacian_centrality_DG(): + DG = nx.DiGraph([(0, 5), (1, 5), (2, 5), (3, 5), (4, 5), (5, 6), (5, 7), (5, 8)]) + d = nx.laplacian_centrality(DG) + exact = { + 0: 0.2123352, + 5: 0.515391, + 1: 0.2123352, + 2: 0.2123352, + 3: 0.2123352, + 4: 0.2123352, + 6: 0.2952031, + 7: 0.2952031, + 8: 0.2952031, + } + for n, dc in d.items(): + assert exact[n] == pytest.approx(dc, abs=1e-7) + + # Check not normalized + full_energy = 9.50704 + dnn = nx.laplacian_centrality(DG, normalized=False) + for n, dc in dnn.items(): + assert exact[n] * full_energy == pytest.approx(dc, abs=1e-4) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_load_centrality.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_load_centrality.py new file mode 100644 index 0000000..bf09603 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_load_centrality.py @@ -0,0 +1,344 @@ +import pytest + +import networkx as nx + + +class TestLoadCentrality: + @classmethod + def setup_class(cls): + G = nx.Graph() + G.add_edge(0, 1, weight=3) + G.add_edge(0, 2, weight=2) + G.add_edge(0, 3, weight=6) + G.add_edge(0, 4, weight=4) + G.add_edge(1, 3, weight=5) + G.add_edge(1, 5, weight=5) + G.add_edge(2, 4, weight=1) + G.add_edge(3, 4, weight=2) + G.add_edge(3, 5, weight=1) + G.add_edge(4, 5, weight=4) + cls.G = G + cls.exact_weighted = {0: 4.0, 1: 0.0, 2: 8.0, 3: 6.0, 4: 8.0, 5: 0.0} + cls.K = nx.krackhardt_kite_graph() + cls.P3 = nx.path_graph(3) + cls.P4 = nx.path_graph(4) + cls.K5 = nx.complete_graph(5) + cls.P2 = nx.path_graph(2) + + cls.C4 = nx.cycle_graph(4) + cls.T = nx.balanced_tree(r=2, h=2) + cls.Gb = nx.Graph() + cls.Gb.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3), (2, 4), (4, 5), (3, 5)]) + cls.F = nx.florentine_families_graph() + cls.LM = nx.les_miserables_graph() + cls.D = nx.cycle_graph(3, create_using=nx.DiGraph()) + cls.D.add_edges_from([(3, 0), (4, 3)]) + + def test_not_strongly_connected(self): + b = nx.load_centrality(self.D) + result = {0: 5.0 / 12, 1: 1.0 / 4, 2: 1.0 / 12, 3: 1.0 / 4, 4: 0.000} + for n in sorted(self.D): + assert result[n] == pytest.approx(b[n], abs=1e-3) + assert result[n] == pytest.approx(nx.load_centrality(self.D, n), abs=1e-3) + + def test_P2_normalized_load(self): + G = self.P2 + c = nx.load_centrality(G, normalized=True) + d = {0: 0.000, 1: 0.000} + for n in sorted(G): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_weighted_load(self): + b = nx.load_centrality(self.G, weight="weight", normalized=False) + for n in sorted(self.G): + assert b[n] == self.exact_weighted[n] + + def test_k5_load(self): + G = self.K5 + c = nx.load_centrality(G) + d = {0: 0.000, 1: 0.000, 2: 0.000, 3: 0.000, 4: 0.000} + for n in sorted(G): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_p3_load(self): + G = self.P3 + c = nx.load_centrality(G) + d = {0: 0.000, 1: 1.000, 2: 0.000} + for n in sorted(G): + assert c[n] == pytest.approx(d[n], abs=1e-3) + c = nx.load_centrality(G, v=1) + assert c == pytest.approx(1.0, abs=1e-7) + c = nx.load_centrality(G, v=1, normalized=True) + assert c == pytest.approx(1.0, abs=1e-7) + + def test_p2_load(self): + G = nx.path_graph(2) + c = nx.load_centrality(G) + d = {0: 0.000, 1: 0.000} + for n in sorted(G): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_krackhardt_load(self): + G = self.K + c = nx.load_centrality(G) + d = { + 0: 0.023, + 1: 0.023, + 2: 0.000, + 3: 0.102, + 4: 0.000, + 5: 0.231, + 6: 0.231, + 7: 0.389, + 8: 0.222, + 9: 0.000, + } + for n in sorted(G): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_florentine_families_load(self): + G = self.F + c = nx.load_centrality(G) + d = { + "Acciaiuoli": 0.000, + "Albizzi": 0.211, + "Barbadori": 0.093, + "Bischeri": 0.104, + "Castellani": 0.055, + "Ginori": 0.000, + "Guadagni": 0.251, + "Lamberteschi": 0.000, + "Medici": 0.522, + "Pazzi": 0.000, + "Peruzzi": 0.022, + "Ridolfi": 0.117, + "Salviati": 0.143, + "Strozzi": 0.106, + "Tornabuoni": 0.090, + } + for n in sorted(G): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_les_miserables_load(self): + G = self.LM + c = nx.load_centrality(G) + d = { + "Napoleon": 0.000, + "Myriel": 0.177, + "MlleBaptistine": 0.000, + "MmeMagloire": 0.000, + "CountessDeLo": 0.000, + "Geborand": 0.000, + "Champtercier": 0.000, + "Cravatte": 0.000, + "Count": 0.000, + "OldMan": 0.000, + "Valjean": 0.567, + "Labarre": 0.000, + "Marguerite": 0.000, + "MmeDeR": 0.000, + "Isabeau": 0.000, + "Gervais": 0.000, + "Listolier": 0.000, + "Tholomyes": 0.043, + "Fameuil": 0.000, + "Blacheville": 0.000, + "Favourite": 0.000, + "Dahlia": 0.000, + "Zephine": 0.000, + "Fantine": 0.128, + "MmeThenardier": 0.029, + "Thenardier": 0.075, + "Cosette": 0.024, + "Javert": 0.054, + "Fauchelevent": 0.026, + "Bamatabois": 0.008, + "Perpetue": 0.000, + "Simplice": 0.009, + "Scaufflaire": 0.000, + "Woman1": 0.000, + "Judge": 0.000, + "Champmathieu": 0.000, + "Brevet": 0.000, + "Chenildieu": 0.000, + "Cochepaille": 0.000, + "Pontmercy": 0.007, + "Boulatruelle": 0.000, + "Eponine": 0.012, + "Anzelma": 0.000, + "Woman2": 0.000, + "MotherInnocent": 0.000, + "Gribier": 0.000, + "MmeBurgon": 0.026, + "Jondrette": 0.000, + "Gavroche": 0.164, + "Gillenormand": 0.021, + "Magnon": 0.000, + "MlleGillenormand": 0.047, + "MmePontmercy": 0.000, + "MlleVaubois": 0.000, + "LtGillenormand": 0.000, + "Marius": 0.133, + "BaronessT": 0.000, + "Mabeuf": 0.028, + "Enjolras": 0.041, + "Combeferre": 0.001, + "Prouvaire": 0.000, + "Feuilly": 0.001, + "Courfeyrac": 0.006, + "Bahorel": 0.002, + "Bossuet": 0.032, + "Joly": 0.002, + "Grantaire": 0.000, + "MotherPlutarch": 0.000, + "Gueulemer": 0.005, + "Babet": 0.005, + "Claquesous": 0.005, + "Montparnasse": 0.004, + "Toussaint": 0.000, + "Child1": 0.000, + "Child2": 0.000, + "Brujon": 0.000, + "MmeHucheloup": 0.000, + } + for n in sorted(G): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_unnormalized_k5_load(self): + G = self.K5 + c = nx.load_centrality(G, normalized=False) + d = {0: 0.000, 1: 0.000, 2: 0.000, 3: 0.000, 4: 0.000} + for n in sorted(G): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_unnormalized_p3_load(self): + G = self.P3 + c = nx.load_centrality(G, normalized=False) + d = {0: 0.000, 1: 2.000, 2: 0.000} + for n in sorted(G): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_unnormalized_krackhardt_load(self): + G = self.K + c = nx.load_centrality(G, normalized=False) + d = { + 0: 1.667, + 1: 1.667, + 2: 0.000, + 3: 7.333, + 4: 0.000, + 5: 16.667, + 6: 16.667, + 7: 28.000, + 8: 16.000, + 9: 0.000, + } + + for n in sorted(G): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_unnormalized_florentine_families_load(self): + G = self.F + c = nx.load_centrality(G, normalized=False) + + d = { + "Acciaiuoli": 0.000, + "Albizzi": 38.333, + "Barbadori": 17.000, + "Bischeri": 19.000, + "Castellani": 10.000, + "Ginori": 0.000, + "Guadagni": 45.667, + "Lamberteschi": 0.000, + "Medici": 95.000, + "Pazzi": 0.000, + "Peruzzi": 4.000, + "Ridolfi": 21.333, + "Salviati": 26.000, + "Strozzi": 19.333, + "Tornabuoni": 16.333, + } + for n in sorted(G): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_load_betweenness_difference(self): + # Difference Between Load and Betweenness + # --------------------------------------- The smallest graph + # that shows the difference between load and betweenness is + # G=ladder_graph(3) (Graph B below) + + # Graph A and B are from Tao Zhou, Jian-Guo Liu, Bing-Hong + # Wang: Comment on "Scientific collaboration + # networks. II. Shortest paths, weighted networks, and + # centrality". https://arxiv.org/pdf/physics/0511084 + + # Notice that unlike here, their calculation adds to 1 to the + # betweenness of every node i for every path from i to every + # other node. This is exactly what it should be, based on + # Eqn. (1) in their paper: the eqn is B(v) = \sum_{s\neq t, + # s\neq v}{\frac{\sigma_{st}(v)}{\sigma_{st}}}, therefore, + # they allow v to be the target node. + + # We follow Brandes 2001, who follows Freeman 1977 that make + # the sum for betweenness of v exclude paths where v is either + # the source or target node. To agree with their numbers, we + # must additionally, remove edge (4,8) from the graph, see AC + # example following (there is a mistake in the figure in their + # paper - personal communication). + + # A = nx.Graph() + # A.add_edges_from([(0,1), (1,2), (1,3), (2,4), + # (3,5), (4,6), (4,7), (4,8), + # (5,8), (6,9), (7,9), (8,9)]) + B = nx.Graph() # ladder_graph(3) + B.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3), (2, 4), (4, 5), (3, 5)]) + c = nx.load_centrality(B, normalized=False) + d = {0: 1.750, 1: 1.750, 2: 6.500, 3: 6.500, 4: 1.750, 5: 1.750} + for n in sorted(B): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_c4_edge_load(self): + G = self.C4 + c = nx.edge_load_centrality(G) + d = {(0, 1): 6.000, (0, 3): 6.000, (1, 2): 6.000, (2, 3): 6.000} + for n in G.edges(): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_p4_edge_load(self): + G = self.P4 + c = nx.edge_load_centrality(G) + d = {(0, 1): 6.000, (1, 2): 8.000, (2, 3): 6.000} + for n in G.edges(): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_k5_edge_load(self): + G = self.K5 + c = nx.edge_load_centrality(G) + d = { + (0, 1): 5.000, + (0, 2): 5.000, + (0, 3): 5.000, + (0, 4): 5.000, + (1, 2): 5.000, + (1, 3): 5.000, + (1, 4): 5.000, + (2, 3): 5.000, + (2, 4): 5.000, + (3, 4): 5.000, + } + for n in G.edges(): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_tree_edge_load(self): + G = self.T + c = nx.edge_load_centrality(G) + d = { + (0, 1): 24.000, + (0, 2): 24.000, + (1, 3): 12.000, + (1, 4): 12.000, + (2, 5): 12.000, + (2, 6): 12.000, + } + for n in G.edges(): + assert c[n] == pytest.approx(d[n], abs=1e-3) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_percolation_centrality.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_percolation_centrality.py new file mode 100644 index 0000000..0cb8f52 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_percolation_centrality.py @@ -0,0 +1,87 @@ +import pytest + +import networkx as nx + + +def example1a_G(): + G = nx.Graph() + G.add_node(1, percolation=0.1) + G.add_node(2, percolation=0.2) + G.add_node(3, percolation=0.2) + G.add_node(4, percolation=0.2) + G.add_node(5, percolation=0.3) + G.add_node(6, percolation=0.2) + G.add_node(7, percolation=0.5) + G.add_node(8, percolation=0.5) + G.add_edges_from([(1, 4), (2, 4), (3, 4), (4, 5), (5, 6), (6, 7), (6, 8)]) + return G + + +def example1b_G(): + G = nx.Graph() + G.add_node(1, percolation=0.3) + G.add_node(2, percolation=0.5) + G.add_node(3, percolation=0.5) + G.add_node(4, percolation=0.2) + G.add_node(5, percolation=0.3) + G.add_node(6, percolation=0.2) + G.add_node(7, percolation=0.1) + G.add_node(8, percolation=0.1) + G.add_edges_from([(1, 4), (2, 4), (3, 4), (4, 5), (5, 6), (6, 7), (6, 8)]) + return G + + +def test_percolation_example1a(): + """percolation centrality: example 1a""" + G = example1a_G() + p = nx.percolation_centrality(G) + p_answer = {4: 0.625, 6: 0.667} + for n, k in p_answer.items(): + assert p[n] == pytest.approx(k, abs=1e-3) + + +def test_percolation_example1b(): + """percolation centrality: example 1a""" + G = example1b_G() + p = nx.percolation_centrality(G) + p_answer = {4: 0.825, 6: 0.4} + for n, k in p_answer.items(): + assert p[n] == pytest.approx(k, abs=1e-3) + + +def test_converge_to_betweenness(): + """percolation centrality: should converge to betweenness + centrality when all nodes are percolated the same""" + # taken from betweenness test test_florentine_families_graph + G = nx.florentine_families_graph() + b_answer = { + "Acciaiuoli": 0.000, + "Albizzi": 0.212, + "Barbadori": 0.093, + "Bischeri": 0.104, + "Castellani": 0.055, + "Ginori": 0.000, + "Guadagni": 0.255, + "Lamberteschi": 0.000, + "Medici": 0.522, + "Pazzi": 0.000, + "Peruzzi": 0.022, + "Ridolfi": 0.114, + "Salviati": 0.143, + "Strozzi": 0.103, + "Tornabuoni": 0.092, + } + + # If no initial state is provided, state for + # every node defaults to 1 + p_answer = nx.percolation_centrality(G) + assert p_answer == pytest.approx(b_answer, abs=1e-3) + + p_states = {k: 0.3 for k, v in b_answer.items()} + p_answer = nx.percolation_centrality(G, states=p_states) + assert p_answer == pytest.approx(b_answer, abs=1e-3) + + +def test_default_percolation(): + G = nx.erdos_renyi_graph(42, 0.42, seed=42) + assert nx.percolation_centrality(G) == pytest.approx(nx.betweenness_centrality(G)) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_reaching.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_reaching.py new file mode 100644 index 0000000..35d50e7 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_reaching.py @@ -0,0 +1,140 @@ +"""Unit tests for the :mod:`networkx.algorithms.centrality.reaching` module.""" + +import pytest + +import networkx as nx + + +class TestGlobalReachingCentrality: + """Unit tests for the global reaching centrality function.""" + + def test_non_positive_weights(self): + with pytest.raises(nx.NetworkXError): + G = nx.DiGraph() + nx.global_reaching_centrality(G, weight="weight") + + def test_negatively_weighted(self): + with pytest.raises(nx.NetworkXError): + G = nx.Graph() + G.add_weighted_edges_from([(0, 1, -2), (1, 2, +1)]) + nx.global_reaching_centrality(G, weight="weight") + + def test_directed_star(self): + G = nx.DiGraph() + G.add_weighted_edges_from([(1, 2, 0.5), (1, 3, 0.5)]) + grc = nx.global_reaching_centrality + assert grc(G, normalized=False, weight="weight") == 0.5 + assert grc(G) == 1 + + def test_undirected_unweighted_star(self): + G = nx.star_graph(2) + grc = nx.global_reaching_centrality + assert grc(G, normalized=False, weight=None) == 0.25 + + def test_undirected_weighted_star(self): + G = nx.Graph() + G.add_weighted_edges_from([(1, 2, 1), (1, 3, 2)]) + grc = nx.global_reaching_centrality + assert grc(G, normalized=False, weight="weight") == 0.375 + + def test_cycle_directed_unweighted(self): + G = nx.DiGraph() + G.add_edge(1, 2) + G.add_edge(2, 1) + assert nx.global_reaching_centrality(G, weight=None) == 0 + + def test_cycle_undirected_unweighted(self): + G = nx.Graph() + G.add_edge(1, 2) + assert nx.global_reaching_centrality(G, weight=None) == 0 + + def test_cycle_directed_weighted(self): + G = nx.DiGraph() + G.add_weighted_edges_from([(1, 2, 1), (2, 1, 1)]) + assert nx.global_reaching_centrality(G) == 0 + + def test_cycle_undirected_weighted(self): + G = nx.Graph() + G.add_edge(1, 2, weight=1) + grc = nx.global_reaching_centrality + assert grc(G, normalized=False) == 0 + + def test_directed_weighted(self): + G = nx.DiGraph() + G.add_edge("A", "B", weight=5) + G.add_edge("B", "C", weight=1) + G.add_edge("B", "D", weight=0.25) + G.add_edge("D", "E", weight=1) + + denom = len(G) - 1 + A_local = sum([5, 3, 2.625, 2.0833333333333]) / denom + B_local = sum([1, 0.25, 0.625]) / denom + C_local = 0 + D_local = sum([1]) / denom + E_local = 0 + + local_reach_ctrs = [A_local, C_local, B_local, D_local, E_local] + max_local = max(local_reach_ctrs) + expected = sum(max_local - lrc for lrc in local_reach_ctrs) / denom + grc = nx.global_reaching_centrality + actual = grc(G, normalized=False, weight="weight") + assert expected == pytest.approx(actual, abs=1e-7) + + def test_single_node_with_cycle(self): + G = nx.DiGraph([(1, 1)]) + with pytest.raises(nx.NetworkXError, match="local_reaching_centrality"): + nx.global_reaching_centrality(G) + + def test_single_node_with_weighted_cycle(self): + G = nx.DiGraph() + G.add_weighted_edges_from([(1, 1, 2)]) + with pytest.raises(nx.NetworkXError, match="local_reaching_centrality"): + nx.global_reaching_centrality(G, weight="weight") + + +class TestLocalReachingCentrality: + """Unit tests for the local reaching centrality function.""" + + def test_non_positive_weights(self): + with pytest.raises(nx.NetworkXError): + G = nx.DiGraph() + G.add_weighted_edges_from([(0, 1, 0)]) + nx.local_reaching_centrality(G, 0, weight="weight") + + def test_negatively_weighted(self): + with pytest.raises(nx.NetworkXError): + G = nx.Graph() + G.add_weighted_edges_from([(0, 1, -2), (1, 2, +1)]) + nx.local_reaching_centrality(G, 0, weight="weight") + + def test_undirected_unweighted_star(self): + G = nx.star_graph(2) + grc = nx.local_reaching_centrality + assert grc(G, 1, weight=None, normalized=False) == 0.75 + + def test_undirected_weighted_star(self): + G = nx.Graph() + G.add_weighted_edges_from([(1, 2, 1), (1, 3, 2)]) + centrality = nx.local_reaching_centrality( + G, 1, normalized=False, weight="weight" + ) + assert centrality == 1.5 + + def test_undirected_weighted_normalized(self): + G = nx.Graph() + G.add_weighted_edges_from([(1, 2, 1), (1, 3, 2)]) + centrality = nx.local_reaching_centrality( + G, 1, normalized=True, weight="weight" + ) + assert centrality == 1.0 + + def test_single_node_with_cycle(self): + G = nx.DiGraph([(1, 1)]) + with pytest.raises(nx.NetworkXError, match="local_reaching_centrality"): + nx.local_reaching_centrality(G, 1) + + def test_single_node_with_weighted_cycle(self): + G = nx.DiGraph() + G.add_weighted_edges_from([(1, 1, 2)]) + with pytest.raises(nx.NetworkXError, match="local_reaching_centrality"): + nx.local_reaching_centrality(G, 1, weight="weight") diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_second_order_centrality.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_second_order_centrality.py new file mode 100644 index 0000000..cc30478 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_second_order_centrality.py @@ -0,0 +1,82 @@ +""" +Tests for second order centrality. +""" + +import pytest + +pytest.importorskip("numpy") +pytest.importorskip("scipy") + +import networkx as nx + + +def test_empty(): + with pytest.raises(nx.NetworkXException): + G = nx.empty_graph() + nx.second_order_centrality(G) + + +def test_non_connected(): + with pytest.raises(nx.NetworkXException): + G = nx.Graph() + G.add_node(0) + G.add_node(1) + nx.second_order_centrality(G) + + +def test_non_negative_edge_weights(): + with pytest.raises(nx.NetworkXException): + G = nx.path_graph(2) + G.add_edge(0, 1, weight=-1) + nx.second_order_centrality(G) + + +def test_weight_attribute(): + G = nx.Graph() + G.add_weighted_edges_from([(0, 1, 1.0), (1, 2, 3.5)], weight="w") + expected = {0: 3.431, 1: 3.082, 2: 5.612} + b = nx.second_order_centrality(G, weight="w") + + for n in sorted(G): + assert b[n] == pytest.approx(expected[n], abs=1e-2) + + +def test_one_node_graph(): + """Second order centrality: single node""" + G = nx.Graph() + G.add_node(0) + G.add_edge(0, 0) + assert nx.second_order_centrality(G)[0] == 0 + + +def test_P3(): + """Second order centrality: line graph, as defined in paper""" + G = nx.path_graph(3) + b_answer = {0: 3.741, 1: 1.414, 2: 3.741} + + b = nx.second_order_centrality(G) + + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-2) + + +def test_K3(): + """Second order centrality: complete graph, as defined in paper""" + G = nx.complete_graph(3) + b_answer = {0: 1.414, 1: 1.414, 2: 1.414} + + b = nx.second_order_centrality(G) + + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-2) + + +def test_ring_graph(): + """Second order centrality: ring graph, as defined in paper""" + G = nx.cycle_graph(5) + b_answer = {0: 4.472, 1: 4.472, 2: 4.472, 3: 4.472, 4: 4.472} + + b = nx.second_order_centrality(G) + + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-2) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_subgraph.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_subgraph.py new file mode 100644 index 0000000..7109275 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_subgraph.py @@ -0,0 +1,110 @@ +import pytest + +pytest.importorskip("numpy") +pytest.importorskip("scipy") + +import networkx as nx +from networkx.algorithms.centrality.subgraph_alg import ( + communicability_betweenness_centrality, + estrada_index, + subgraph_centrality, + subgraph_centrality_exp, +) + + +class TestSubgraph: + def test_subgraph_centrality(self): + answer = {0: 1.5430806348152433, 1: 1.5430806348152433} + result = subgraph_centrality(nx.path_graph(2)) + for k, v in result.items(): + assert answer[k] == pytest.approx(v, abs=1e-7) + + answer1 = { + "1": 1.6445956054135658, + "Albert": 2.4368257358712189, + "Aric": 2.4368257358712193, + "Dan": 3.1306328496328168, + "Franck": 2.3876142275231915, + } + G1 = nx.Graph( + [ + ("Franck", "Aric"), + ("Aric", "Dan"), + ("Dan", "Albert"), + ("Albert", "Franck"), + ("Dan", "1"), + ("Franck", "Albert"), + ] + ) + result1 = subgraph_centrality(G1) + for k, v in result1.items(): + assert answer1[k] == pytest.approx(v, abs=1e-7) + result1 = subgraph_centrality_exp(G1) + for k, v in result1.items(): + assert answer1[k] == pytest.approx(v, abs=1e-7) + + def test_subgraph_centrality_big_graph(self): + g199 = nx.complete_graph(199) + g200 = nx.complete_graph(200) + + comm199 = nx.subgraph_centrality(g199) + comm199_exp = nx.subgraph_centrality_exp(g199) + + comm200 = nx.subgraph_centrality(g200) + comm200_exp = nx.subgraph_centrality_exp(g200) + + def test_communicability_betweenness_centrality_small(self): + result = communicability_betweenness_centrality(nx.path_graph(2)) + assert result == {0: 0, 1: 0} + + result = communicability_betweenness_centrality(nx.path_graph(1)) + assert result == {0: 0} + + result = communicability_betweenness_centrality(nx.path_graph(0)) + assert result == {} + + answer = {0: 0.1411224421177313, 1: 1.0, 2: 0.1411224421177313} + result = communicability_betweenness_centrality(nx.path_graph(3)) + for k, v in result.items(): + assert answer[k] == pytest.approx(v, abs=1e-7) + + result = communicability_betweenness_centrality(nx.complete_graph(3)) + for k, v in result.items(): + assert 0.49786143366223296 == pytest.approx(v, abs=1e-7) + + def test_communicability_betweenness_centrality(self): + answer = { + 0: 0.07017447951484615, + 1: 0.71565598701107991, + 2: 0.71565598701107991, + 3: 0.07017447951484615, + } + result = communicability_betweenness_centrality(nx.path_graph(4)) + for k, v in result.items(): + assert answer[k] == pytest.approx(v, abs=1e-7) + + answer1 = { + "1": 0.060039074193949521, + "Albert": 0.315470761661372, + "Aric": 0.31547076166137211, + "Dan": 0.68297778678316201, + "Franck": 0.21977926617449497, + } + G1 = nx.Graph( + [ + ("Franck", "Aric"), + ("Aric", "Dan"), + ("Dan", "Albert"), + ("Albert", "Franck"), + ("Dan", "1"), + ("Franck", "Albert"), + ] + ) + result1 = communicability_betweenness_centrality(G1) + for k, v in result1.items(): + assert answer1[k] == pytest.approx(v, abs=1e-7) + + def test_estrada_index(self): + answer = 1041.2470334195475 + result = estrada_index(nx.karate_club_graph()) + assert answer == pytest.approx(result, abs=1e-7) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_trophic.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_trophic.py new file mode 100644 index 0000000..8f7d74d --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_trophic.py @@ -0,0 +1,302 @@ +"""Test trophic levels, trophic differences and trophic coherence""" + +import pytest + +import networkx as nx + +np = pytest.importorskip("numpy") +pytest.importorskip("scipy") + + +def test_trophic_levels(): + """Trivial example""" + G = nx.DiGraph() + G.add_edge("a", "b") + G.add_edge("b", "c") + + d = nx.trophic_levels(G) + assert d == {"a": 1, "b": 2, "c": 3} + + +def test_trophic_levels_levine(): + """Example from Figure 5 in Stephen Levine (1980) J. theor. Biol. 83, + 195-207 + """ + S = nx.DiGraph() + S.add_edge(1, 2, weight=1.0) + S.add_edge(1, 3, weight=0.2) + S.add_edge(1, 4, weight=0.8) + S.add_edge(2, 3, weight=0.2) + S.add_edge(2, 5, weight=0.3) + S.add_edge(4, 3, weight=0.6) + S.add_edge(4, 5, weight=0.7) + S.add_edge(5, 4, weight=0.2) + + # save copy for later, test intermediate implementation details first + S2 = S.copy() + + # drop nodes of in-degree zero + z = [nid for nid, d in S.in_degree if d == 0] + for nid in z: + S.remove_node(nid) + + # find adjacency matrix + q = nx.linalg.graphmatrix.adjacency_matrix(S).T + + # fmt: off + expected_q = np.array([ + [0, 0, 0., 0], + [0.2, 0, 0.6, 0], + [0, 0, 0, 0.2], + [0.3, 0, 0.7, 0] + ]) + # fmt: on + assert np.array_equal(q.todense(), expected_q) + + # must be square, size of number of nodes + assert len(q.shape) == 2 + assert q.shape[0] == q.shape[1] + assert q.shape[0] == len(S) + + nn = q.shape[0] + + i = np.eye(nn) + n = np.linalg.inv(i - q) + y = np.asarray(n) @ np.ones(nn) + + expected_y = np.array([1, 2.07906977, 1.46511628, 2.3255814]) + assert np.allclose(y, expected_y) + + expected_d = {1: 1, 2: 2, 3: 3.07906977, 4: 2.46511628, 5: 3.3255814} + + d = nx.trophic_levels(S2) + + for nid, level in d.items(): + expected_level = expected_d[nid] + assert expected_level == pytest.approx(level, abs=1e-7) + + +def test_trophic_levels_simple(): + matrix_a = np.array([[0, 0], [1, 0]]) + G = nx.from_numpy_array(matrix_a, create_using=nx.DiGraph) + d = nx.trophic_levels(G) + assert d[0] == pytest.approx(2, abs=1e-7) + assert d[1] == pytest.approx(1, abs=1e-7) + + +def test_trophic_levels_more_complex(): + # fmt: off + matrix = np.array([ + [0, 1, 0, 0], + [0, 0, 1, 0], + [0, 0, 0, 1], + [0, 0, 0, 0] + ]) + # fmt: on + G = nx.from_numpy_array(matrix, create_using=nx.DiGraph) + d = nx.trophic_levels(G) + expected_result = [1, 2, 3, 4] + for ind in range(4): + assert d[ind] == pytest.approx(expected_result[ind], abs=1e-7) + + # fmt: off + matrix = np.array([ + [0, 1, 1, 0], + [0, 0, 1, 1], + [0, 0, 0, 1], + [0, 0, 0, 0] + ]) + # fmt: on + G = nx.from_numpy_array(matrix, create_using=nx.DiGraph) + d = nx.trophic_levels(G) + + expected_result = [1, 2, 2.5, 3.25] + for ind in range(4): + assert d[ind] == pytest.approx(expected_result[ind], abs=1e-7) + + +def test_trophic_levels_even_more_complex(): + # fmt: off + # Another, bigger matrix + matrix = np.array([ + [0, 0, 0, 0, 0], + [0, 1, 0, 1, 0], + [1, 0, 0, 0, 0], + [0, 1, 0, 0, 0], + [0, 0, 0, 1, 0] + ]) + # Generated this linear system using pen and paper: + K = np.array([ + [1, 0, -1, 0, 0], + [0, 0.5, 0, -0.5, 0], + [0, 0, 1, 0, 0], + [0, -0.5, 0, 1, -0.5], + [0, 0, 0, 0, 1], + ]) + # fmt: on + result_1 = np.ravel(np.linalg.inv(K) @ np.ones(5)) + G = nx.from_numpy_array(matrix, create_using=nx.DiGraph) + result_2 = nx.trophic_levels(G) + + for ind in range(5): + assert result_1[ind] == pytest.approx(result_2[ind], abs=1e-7) + + +def test_trophic_levels_singular_matrix(): + """Should raise an error with graphs with only non-basal nodes""" + matrix = np.identity(4) + G = nx.from_numpy_array(matrix, create_using=nx.DiGraph) + with pytest.raises(nx.NetworkXError, match="no basal nodes"): + nx.trophic_levels(G) + + +def test_trophic_levels_singular_with_basal(): + """Should fail to compute if there are any parts of the graph which are not + reachable from any basal node (with in-degree zero). + """ + G = nx.DiGraph() + # a has in-degree zero + G.add_edge("a", "b") + + # b is one level above a, c and d + G.add_edge("c", "b") + G.add_edge("d", "b") + + # c and d form a loop, neither are reachable from a + G.add_edge("c", "d") + G.add_edge("d", "c") + + with pytest.raises(nx.NetworkXError) as e: + nx.trophic_levels(G) + msg = ( + "Trophic levels are only defined for graphs where every node " + + "has a path from a basal node (basal nodes are nodes with no " + + "incoming edges)." + ) + assert msg in str(e.value) + + # if self-loops are allowed, smaller example: + G = nx.DiGraph() + G.add_edge("a", "b") # a has in-degree zero + G.add_edge("c", "b") # b is one level above a and c + G.add_edge("c", "c") # c has a self-loop + with pytest.raises(nx.NetworkXError) as e: + nx.trophic_levels(G) + msg = ( + "Trophic levels are only defined for graphs where every node " + + "has a path from a basal node (basal nodes are nodes with no " + + "incoming edges)." + ) + assert msg in str(e.value) + + +def test_trophic_differences(): + matrix_a = np.array([[0, 1], [0, 0]]) + G = nx.from_numpy_array(matrix_a, create_using=nx.DiGraph) + diffs = nx.trophic_differences(G) + assert diffs[(0, 1)] == pytest.approx(1, abs=1e-7) + + # fmt: off + matrix_b = np.array([ + [0, 1, 1, 0], + [0, 0, 1, 1], + [0, 0, 0, 1], + [0, 0, 0, 0] + ]) + # fmt: on + G = nx.from_numpy_array(matrix_b, create_using=nx.DiGraph) + diffs = nx.trophic_differences(G) + + assert diffs[(0, 1)] == pytest.approx(1, abs=1e-7) + assert diffs[(0, 2)] == pytest.approx(1.5, abs=1e-7) + assert diffs[(1, 2)] == pytest.approx(0.5, abs=1e-7) + assert diffs[(1, 3)] == pytest.approx(1.25, abs=1e-7) + assert diffs[(2, 3)] == pytest.approx(0.75, abs=1e-7) + + +def test_trophic_incoherence_parameter_no_cannibalism(): + matrix_a = np.array([[0, 1], [0, 0]]) + G = nx.from_numpy_array(matrix_a, create_using=nx.DiGraph) + q = nx.trophic_incoherence_parameter(G, cannibalism=False) + assert q == pytest.approx(0, abs=1e-7) + + # fmt: off + matrix_b = np.array([ + [0, 1, 1, 0], + [0, 0, 1, 1], + [0, 0, 0, 1], + [0, 0, 0, 0] + ]) + # fmt: on + G = nx.from_numpy_array(matrix_b, create_using=nx.DiGraph) + q = nx.trophic_incoherence_parameter(G, cannibalism=False) + assert q == pytest.approx(np.std([1, 1.5, 0.5, 0.75, 1.25]), abs=1e-7) + + # fmt: off + matrix_c = np.array([ + [0, 1, 1, 0], + [0, 1, 1, 1], + [0, 0, 0, 1], + [0, 0, 0, 1] + ]) + # fmt: on + G = nx.from_numpy_array(matrix_c, create_using=nx.DiGraph) + q = nx.trophic_incoherence_parameter(G, cannibalism=False) + # Ignore the -link + assert q == pytest.approx(np.std([1, 1.5, 0.5, 0.75, 1.25]), abs=1e-7) + + # no self-loops case + # fmt: off + matrix_d = np.array([ + [0, 1, 1, 0], + [0, 0, 1, 1], + [0, 0, 0, 1], + [0, 0, 0, 0] + ]) + # fmt: on + G = nx.from_numpy_array(matrix_d, create_using=nx.DiGraph) + q = nx.trophic_incoherence_parameter(G, cannibalism=False) + # Ignore the -link + assert q == pytest.approx(np.std([1, 1.5, 0.5, 0.75, 1.25]), abs=1e-7) + + +def test_trophic_incoherence_parameter_cannibalism(): + matrix_a = np.array([[0, 1], [0, 0]]) + G = nx.from_numpy_array(matrix_a, create_using=nx.DiGraph) + q = nx.trophic_incoherence_parameter(G, cannibalism=True) + assert q == pytest.approx(0, abs=1e-7) + + # fmt: off + matrix_b = np.array([ + [0, 0, 0, 0, 0], + [0, 1, 0, 1, 0], + [1, 0, 0, 0, 0], + [0, 1, 0, 0, 0], + [0, 0, 0, 1, 0] + ]) + # fmt: on + G = nx.from_numpy_array(matrix_b, create_using=nx.DiGraph) + q = nx.trophic_incoherence_parameter(G, cannibalism=True) + assert q == pytest.approx(2, abs=1e-7) + + # fmt: off + matrix_c = np.array([ + [0, 1, 1, 0], + [0, 0, 1, 1], + [0, 0, 0, 1], + [0, 0, 0, 0] + ]) + # fmt: on + G = nx.from_numpy_array(matrix_c, create_using=nx.DiGraph) + q = nx.trophic_incoherence_parameter(G, cannibalism=True) + # Ignore the -link + assert q == pytest.approx(np.std([1, 1.5, 0.5, 0.75, 1.25]), abs=1e-7) + + +def test_no_basal_node(): + G = nx.DiGraph([(1, 2), (2, 3), (3, 1)]) # No basal node, should raise an error + with pytest.raises(nx.NetworkXError, match="no basal node"): + nx.trophic_levels(G) + G.add_node(4) # add basal node, but not connected + with pytest.raises(nx.NetworkXError, match="every node .* path from a basal node"): + nx.trophic_levels(G) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_voterank.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_voterank.py new file mode 100644 index 0000000..a5cfb61 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_voterank.py @@ -0,0 +1,64 @@ +""" +Unit tests for VoteRank. +""" + +import networkx as nx + + +class TestVoteRankCentrality: + # Example Graph present in reference paper + def test_voterank_centrality_1(self): + G = nx.Graph() + G.add_edges_from( + [ + (7, 8), + (7, 5), + (7, 9), + (5, 0), + (0, 1), + (0, 2), + (0, 3), + (0, 4), + (1, 6), + (2, 6), + (3, 6), + (4, 6), + ] + ) + assert [0, 7, 6] == nx.voterank(G) + + def test_voterank_emptygraph(self): + G = nx.Graph() + assert [] == nx.voterank(G) + + # Graph unit test + def test_voterank_centrality_2(self): + G = nx.florentine_families_graph() + d = nx.voterank(G, 4) + exact = ["Medici", "Strozzi", "Guadagni", "Castellani"] + assert exact == d + + # DiGraph unit test + def test_voterank_centrality_3(self): + G = nx.gnc_graph(10, seed=7) + d = nx.voterank(G, 4) + exact = [3, 6, 8] + assert exact == d + + # MultiGraph unit test + def test_voterank_centrality_4(self): + G = nx.MultiGraph() + G.add_edges_from( + [(0, 1), (0, 1), (1, 2), (2, 5), (2, 5), (5, 6), (5, 6), (2, 4), (4, 3)] + ) + exact = [2, 1, 5, 4] + assert exact == nx.voterank(G) + + # MultiDiGraph unit test + def test_voterank_centrality_5(self): + G = nx.MultiDiGraph() + G.add_edges_from( + [(0, 1), (0, 1), (1, 2), (2, 5), (2, 5), (5, 6), (5, 6), (2, 4), (4, 3)] + ) + exact = [2, 0, 5, 4] + assert exact == nx.voterank(G) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/trophic.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/trophic.py new file mode 100644 index 0000000..608c110 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/trophic.py @@ -0,0 +1,181 @@ +"""Trophic levels""" + +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = ["trophic_levels", "trophic_differences", "trophic_incoherence_parameter"] + + +@not_implemented_for("undirected") +@nx._dispatchable(edge_attrs="weight") +def trophic_levels(G, weight="weight"): + r"""Compute the trophic levels of nodes. + + The trophic level of a node $i$ is + + .. math:: + + s_i = 1 + \frac{1}{k^{in}_i} \sum_{j} a_{ij} s_j + + where $k^{in}_i$ is the in-degree of i + + .. math:: + + k^{in}_i = \sum_{j} a_{ij} + + and nodes with $k^{in}_i = 0$ have $s_i = 1$ by convention. + + These are calculated using the method outlined in Levine [1]_. + + Parameters + ---------- + G : DiGraph + A directed networkx graph + + Returns + ------- + nodes : dict + Dictionary of nodes with trophic level as the value. + + References + ---------- + .. [1] Stephen Levine (1980) J. theor. Biol. 83, 195-207 + """ + + basal_nodes = [n for n, deg in G.in_degree if deg == 0] + if not basal_nodes: + raise nx.NetworkXError( + "This graph has no basal nodes (nodes with no incoming edges)." + "Trophic levels are not defined without at least one basal node." + ) + + reachable_nodes = { + node for layer in nx.bfs_layers(G, sources=basal_nodes) for node in layer + } + + if len(reachable_nodes) != len(G.nodes): + raise nx.NetworkXError( + "Trophic levels are only defined for graphs where every node has a path " + "from a basal node (basal nodes are nodes with no incoming edges)." + ) + + import numpy as np + + # find adjacency matrix + a = nx.adjacency_matrix(G, weight=weight).T.toarray() + + # drop rows/columns where in-degree is zero + rowsum = np.sum(a, axis=1) + p = a[rowsum != 0][:, rowsum != 0] + # normalise so sum of in-degree weights is 1 along each row + p = p / rowsum[rowsum != 0][:, np.newaxis] + + # calculate trophic levels + nn = p.shape[0] + i = np.eye(nn) + try: + n = np.linalg.inv(i - p) + except np.linalg.LinAlgError as err: + # LinAlgError is raised when there is a non-basal node + msg = ( + "Trophic levels are only defined for graphs where every " + + "node has a path from a basal node (basal nodes are nodes " + + "with no incoming edges)." + ) + raise nx.NetworkXError(msg) from err + y = n.sum(axis=1) + 1 + + levels = {} + + # all nodes with in-degree zero have trophic level == 1 + zero_node_ids = (node_id for node_id, degree in G.in_degree if degree == 0) + for node_id in zero_node_ids: + levels[node_id] = 1 + + # all other nodes have levels as calculated + nonzero_node_ids = (node_id for node_id, degree in G.in_degree if degree != 0) + for i, node_id in enumerate(nonzero_node_ids): + levels[node_id] = y.item(i) + + return levels + + +@not_implemented_for("undirected") +@nx._dispatchable(edge_attrs="weight") +def trophic_differences(G, weight="weight"): + r"""Compute the trophic differences of the edges of a directed graph. + + The trophic difference $x_ij$ for each edge is defined in Johnson et al. + [1]_ as: + + .. math:: + x_ij = s_j - s_i + + Where $s_i$ is the trophic level of node $i$. + + Parameters + ---------- + G : DiGraph + A directed networkx graph + + Returns + ------- + diffs : dict + Dictionary of edges with trophic differences as the value. + + References + ---------- + .. [1] Samuel Johnson, Virginia Dominguez-Garcia, Luca Donetti, Miguel A. + Munoz (2014) PNAS "Trophic coherence determines food-web stability" + """ + levels = trophic_levels(G, weight=weight) + diffs = {} + for u, v in G.edges: + diffs[(u, v)] = levels[v] - levels[u] + return diffs + + +@not_implemented_for("undirected") +@nx._dispatchable(edge_attrs="weight") +def trophic_incoherence_parameter(G, weight="weight", cannibalism=False): + r"""Compute the trophic incoherence parameter of a graph. + + Trophic coherence is defined as the homogeneity of the distribution of + trophic distances: the more similar, the more coherent. This is measured by + the standard deviation of the trophic differences and referred to as the + trophic incoherence parameter $q$ by [1]. + + Parameters + ---------- + G : DiGraph + A directed networkx graph + + cannibalism: Boolean + If set to False, self edges are not considered in the calculation + + Returns + ------- + trophic_incoherence_parameter : float + The trophic coherence of a graph + + References + ---------- + .. [1] Samuel Johnson, Virginia Dominguez-Garcia, Luca Donetti, Miguel A. + Munoz (2014) PNAS "Trophic coherence determines food-web stability" + """ + import numpy as np + + if cannibalism: + diffs = trophic_differences(G, weight=weight) + else: + # If no cannibalism, remove self-edges + self_loops = list(nx.selfloop_edges(G)) + if self_loops: + # Make a copy so we do not change G's edges in memory + G_2 = G.copy() + G_2.remove_edges_from(self_loops) + else: + # Avoid copy otherwise + G_2 = G + diffs = trophic_differences(G_2, weight=weight) + return float(np.std(list(diffs.values()))) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/voterank_alg.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/voterank_alg.py new file mode 100644 index 0000000..9b510b2 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/voterank_alg.py @@ -0,0 +1,95 @@ +"""Algorithm to select influential nodes in a graph using VoteRank.""" + +import networkx as nx + +__all__ = ["voterank"] + + +@nx._dispatchable +def voterank(G, number_of_nodes=None): + """Select a list of influential nodes in a graph using VoteRank algorithm + + VoteRank [1]_ computes a ranking of the nodes in a graph G based on a + voting scheme. With VoteRank, all nodes vote for each of its in-neighbors + and the node with the highest votes is elected iteratively. The voting + ability of out-neighbors of elected nodes is decreased in subsequent turns. + + Parameters + ---------- + G : graph + A NetworkX graph. + + number_of_nodes : integer, optional + Number of ranked nodes to extract (default all nodes). + + Returns + ------- + voterank : list + Ordered list of computed seeds. + Only nodes with positive number of votes are returned. + + Examples + -------- + >>> G = nx.Graph([(0, 1), (0, 2), (0, 3), (1, 4)]) + >>> nx.voterank(G) + [0, 1] + + The algorithm can be used both for undirected and directed graphs. + However, the directed version is different in two ways: + (i) nodes only vote for their in-neighbors and + (ii) only the voting ability of elected node and its out-neighbors are updated: + + >>> G = nx.DiGraph([(0, 1), (2, 1), (2, 3), (3, 4)]) + >>> nx.voterank(G) + [2, 3] + + Notes + ----- + Each edge is treated independently in case of multigraphs. + + References + ---------- + .. [1] Zhang, J.-X. et al. (2016). + Identifying a set of influential spreaders in complex networks. + Sci. Rep. 6, 27823; doi: 10.1038/srep27823. + """ + influential_nodes = [] + vote_rank = {} + if len(G) == 0: + return influential_nodes + if number_of_nodes is None or number_of_nodes > len(G): + number_of_nodes = len(G) + if G.is_directed(): + # For directed graphs compute average out-degree + avgDegree = sum(deg for _, deg in G.out_degree()) / len(G) + else: + # For undirected graphs compute average degree + avgDegree = sum(deg for _, deg in G.degree()) / len(G) + # step 1 - initiate all nodes to (0,1) (score, voting ability) + for n in G.nodes(): + vote_rank[n] = [0, 1] + # Repeat steps 1b to 4 until num_seeds are elected. + for _ in range(number_of_nodes): + # step 1b - reset rank + for n in G.nodes(): + vote_rank[n][0] = 0 + # step 2 - vote + for n, nbr in G.edges(): + # In directed graphs nodes only vote for their in-neighbors + vote_rank[n][0] += vote_rank[nbr][1] + if not G.is_directed(): + vote_rank[nbr][0] += vote_rank[n][1] + for n in influential_nodes: + vote_rank[n][0] = 0 + # step 3 - select top node + n = max(G.nodes, key=lambda x: vote_rank[x][0]) + if vote_rank[n][0] == 0: + return influential_nodes + influential_nodes.append(n) + # weaken the selected node + vote_rank[n] = [0, 0] + # step 4 - update voterank properties + for _, nbr in G.edges(n): + vote_rank[nbr][1] -= 1 / avgDegree + vote_rank[nbr][1] = max(vote_rank[nbr][1], 0) + return influential_nodes diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/chains.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/chains.py new file mode 100644 index 0000000..ae342d9 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/chains.py @@ -0,0 +1,172 @@ +"""Functions for finding chains in a graph.""" + +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = ["chain_decomposition"] + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatchable +def chain_decomposition(G, root=None): + """Returns the chain decomposition of a graph. + + The *chain decomposition* of a graph with respect a depth-first + search tree is a set of cycles or paths derived from the set of + fundamental cycles of the tree in the following manner. Consider + each fundamental cycle with respect to the given tree, represented + as a list of edges beginning with the nontree edge oriented away + from the root of the tree. For each fundamental cycle, if it + overlaps with any previous fundamental cycle, just take the initial + non-overlapping segment, which is a path instead of a cycle. Each + cycle or path is called a *chain*. For more information, see [1]_. + + Parameters + ---------- + G : undirected graph + + root : node (optional) + A node in the graph `G`. If specified, only the chain + decomposition for the connected component containing this node + will be returned. This node indicates the root of the depth-first + search tree. + + Yields + ------ + chain : list + A list of edges representing a chain. There is no guarantee on + the orientation of the edges in each chain (for example, if a + chain includes the edge joining nodes 1 and 2, the chain may + include either (1, 2) or (2, 1)). + + Raises + ------ + NodeNotFound + If `root` is not in the graph `G`. + + Examples + -------- + >>> G = nx.Graph([(0, 1), (1, 4), (3, 4), (3, 5), (4, 5)]) + >>> list(nx.chain_decomposition(G)) + [[(4, 5), (5, 3), (3, 4)]] + + Notes + ----- + The worst-case running time of this implementation is linear in the + number of nodes and number of edges [1]_. + + References + ---------- + .. [1] Jens M. Schmidt (2013). "A simple test on 2-vertex- + and 2-edge-connectivity." *Information Processing Letters*, + 113, 241–244. Elsevier. + + """ + + def _dfs_cycle_forest(G, root=None): + """Builds a directed graph composed of cycles from the given graph. + + `G` is an undirected simple graph. `root` is a node in the graph + from which the depth-first search is started. + + This function returns both the depth-first search cycle graph + (as a :class:`~networkx.DiGraph`) and the list of nodes in + depth-first preorder. The depth-first search cycle graph is a + directed graph whose edges are the edges of `G` oriented toward + the root if the edge is a tree edge and away from the root if + the edge is a non-tree edge. If `root` is not specified, this + performs a depth-first search on each connected component of `G` + and returns a directed forest instead. + + If `root` is not in the graph, this raises :exc:`KeyError`. + + """ + # Create a directed graph from the depth-first search tree with + # root node `root` in which tree edges are directed toward the + # root and nontree edges are directed away from the root. For + # each node with an incident nontree edge, this creates a + # directed cycle starting with the nontree edge and returning to + # that node. + # + # The `parent` node attribute stores the parent of each node in + # the DFS tree. The `nontree` edge attribute indicates whether + # the edge is a tree edge or a nontree edge. + # + # We also store the order of the nodes found in the depth-first + # search in the `nodes` list. + H = nx.DiGraph() + nodes = [] + for u, v, d in nx.dfs_labeled_edges(G, source=root): + if d == "forward": + # `dfs_labeled_edges()` yields (root, root, 'forward') + # if it is beginning the search on a new connected + # component. + if u == v: + H.add_node(v, parent=None) + nodes.append(v) + else: + H.add_node(v, parent=u) + H.add_edge(v, u, nontree=False) + nodes.append(v) + # `dfs_labeled_edges` considers nontree edges in both + # orientations, so we need to not add the edge if it its + # other orientation has been added. + elif d == "nontree" and v not in H[u]: + H.add_edge(v, u, nontree=True) + else: + # Do nothing on 'reverse' edges; we only care about + # forward and nontree edges. + pass + return H, nodes + + def _build_chain(G, u, v, visited): + """Generate the chain starting from the given nontree edge. + + `G` is a DFS cycle graph as constructed by + :func:`_dfs_cycle_graph`. The edge (`u`, `v`) is a nontree edge + that begins a chain. `visited` is a set representing the nodes + in `G` that have already been visited. + + This function yields the edges in an initial segment of the + fundamental cycle of `G` starting with the nontree edge (`u`, + `v`) that includes all the edges up until the first node that + appears in `visited`. The tree edges are given by the 'parent' + node attribute. The `visited` set is updated to add each node in + an edge yielded by this function. + + """ + while v not in visited: + yield u, v + visited.add(v) + u, v = v, G.nodes[v]["parent"] + yield u, v + + # Check if the root is in the graph G. If not, raise NodeNotFound + if root is not None and root not in G: + raise nx.NodeNotFound(f"Root node {root} is not in graph") + + # Create a directed version of H that has the DFS edges directed + # toward the root and the nontree edges directed away from the root + # (in each connected component). + H, nodes = _dfs_cycle_forest(G, root) + + # Visit the nodes again in DFS order. For each node, and for each + # nontree edge leaving that node, compute the fundamental cycle for + # that nontree edge starting with that edge. If the fundamental + # cycle overlaps with any visited nodes, just take the prefix of the + # cycle up to the point of visited nodes. + # + # We repeat this process for each connected component (implicitly, + # since `nodes` already has a list of the nodes grouped by connected + # component). + visited = set() + for u in nodes: + visited.add(u) + # For each nontree edge going out of node u... + edges = ((u, v) for u, v, d in H.out_edges(u, data="nontree") if d) + for u, v in edges: + # Create the cycle or cycle prefix starting with the + # nontree edge. + chain = list(_build_chain(H, u, v, visited)) + yield chain diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/chordal.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/chordal.py new file mode 100644 index 0000000..deb6ed0 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/chordal.py @@ -0,0 +1,443 @@ +""" +Algorithms for chordal graphs. + +A graph is chordal if every cycle of length at least 4 has a chord +(an edge joining two nodes not adjacent in the cycle). +https://en.wikipedia.org/wiki/Chordal_graph +""" + +import sys + +import networkx as nx +from networkx.algorithms.components import connected_components +from networkx.utils import arbitrary_element, not_implemented_for + +__all__ = [ + "is_chordal", + "find_induced_nodes", + "chordal_graph_cliques", + "chordal_graph_treewidth", + "NetworkXTreewidthBoundExceeded", + "complete_to_chordal_graph", +] + + +class NetworkXTreewidthBoundExceeded(nx.NetworkXException): + """Exception raised when a treewidth bound has been provided and it has + been exceeded""" + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatchable +def is_chordal(G): + """Checks whether G is a chordal graph. + + A graph is chordal if every cycle of length at least 4 has a chord + (an edge joining two nodes not adjacent in the cycle). + + Parameters + ---------- + G : graph + A NetworkX graph. + + Returns + ------- + chordal : bool + True if G is a chordal graph and False otherwise. + + Raises + ------ + NetworkXNotImplemented + The algorithm does not support DiGraph, MultiGraph and MultiDiGraph. + + Examples + -------- + >>> e = [ + ... (1, 2), + ... (1, 3), + ... (2, 3), + ... (2, 4), + ... (3, 4), + ... (3, 5), + ... (3, 6), + ... (4, 5), + ... (4, 6), + ... (5, 6), + ... ] + >>> G = nx.Graph(e) + >>> nx.is_chordal(G) + True + + Notes + ----- + The routine tries to go through every node following maximum cardinality + search. It returns False when it finds that the separator for any node + is not a clique. Based on the algorithms in [1]_. + + Self loops are ignored. + + References + ---------- + .. [1] R. E. Tarjan and M. Yannakakis, Simple linear-time algorithms + to test chordality of graphs, test acyclicity of hypergraphs, and + selectively reduce acyclic hypergraphs, SIAM J. Comput., 13 (1984), + pp. 566–579. + """ + if len(G.nodes) <= 3: + return True + return len(_find_chordality_breaker(G)) == 0 + + +@nx._dispatchable +def find_induced_nodes(G, s, t, treewidth_bound=sys.maxsize): + """Returns the set of induced nodes in the path from s to t. + + Parameters + ---------- + G : graph + A chordal NetworkX graph + s : node + Source node to look for induced nodes + t : node + Destination node to look for induced nodes + treewidth_bound: float + Maximum treewidth acceptable for the graph H. The search + for induced nodes will end as soon as the treewidth_bound is exceeded. + + Returns + ------- + induced_nodes : Set of nodes + The set of induced nodes in the path from s to t in G + + Raises + ------ + NetworkXError + The algorithm does not support DiGraph, MultiGraph and MultiDiGraph. + If the input graph is an instance of one of these classes, a + :exc:`NetworkXError` is raised. + The algorithm can only be applied to chordal graphs. If the input + graph is found to be non-chordal, a :exc:`NetworkXError` is raised. + + Examples + -------- + >>> G = nx.Graph() + >>> G = nx.generators.classic.path_graph(10) + >>> induced_nodes = nx.find_induced_nodes(G, 1, 9, 2) + >>> sorted(induced_nodes) + [1, 2, 3, 4, 5, 6, 7, 8, 9] + + Notes + ----- + G must be a chordal graph and (s,t) an edge that is not in G. + + If a treewidth_bound is provided, the search for induced nodes will end + as soon as the treewidth_bound is exceeded. + + The algorithm is inspired by Algorithm 4 in [1]_. + A formal definition of induced node can also be found on that reference. + + Self Loops are ignored + + References + ---------- + .. [1] Learning Bounded Treewidth Bayesian Networks. + Gal Elidan, Stephen Gould; JMLR, 9(Dec):2699--2731, 2008. + http://jmlr.csail.mit.edu/papers/volume9/elidan08a/elidan08a.pdf + """ + if not is_chordal(G): + raise nx.NetworkXError("Input graph is not chordal.") + + H = nx.Graph(G) + H.add_edge(s, t) + induced_nodes = set() + triplet = _find_chordality_breaker(H, s, treewidth_bound) + while triplet: + (u, v, w) = triplet + induced_nodes.update(triplet) + for n in triplet: + if n != s: + H.add_edge(s, n) + triplet = _find_chordality_breaker(H, s, treewidth_bound) + if induced_nodes: + # Add t and the second node in the induced path from s to t. + induced_nodes.add(t) + for u in G[s]: + if len(induced_nodes & set(G[u])) == 2: + induced_nodes.add(u) + break + return induced_nodes + + +@nx._dispatchable +def chordal_graph_cliques(G): + """Returns all maximal cliques of a chordal graph. + + The algorithm breaks the graph in connected components and performs a + maximum cardinality search in each component to get the cliques. + + Parameters + ---------- + G : graph + A NetworkX graph + + Yields + ------ + frozenset of nodes + Maximal cliques, each of which is a frozenset of + nodes in `G`. The order of cliques is arbitrary. + + Raises + ------ + NetworkXError + The algorithm does not support DiGraph, MultiGraph and MultiDiGraph. + The algorithm can only be applied to chordal graphs. If the input + graph is found to be non-chordal, a :exc:`NetworkXError` is raised. + + Examples + -------- + >>> e = [ + ... (1, 2), + ... (1, 3), + ... (2, 3), + ... (2, 4), + ... (3, 4), + ... (3, 5), + ... (3, 6), + ... (4, 5), + ... (4, 6), + ... (5, 6), + ... (7, 8), + ... ] + >>> G = nx.Graph(e) + >>> G.add_node(9) + >>> cliques = [c for c in chordal_graph_cliques(G)] + >>> cliques[0] + frozenset({1, 2, 3}) + """ + for C in (G.subgraph(c).copy() for c in connected_components(G)): + if C.number_of_nodes() == 1: + if nx.number_of_selfloops(C) > 0: + raise nx.NetworkXError("Input graph is not chordal.") + yield frozenset(C.nodes()) + else: + unnumbered = set(C.nodes()) + v = arbitrary_element(C) + unnumbered.remove(v) + numbered = {v} + clique_wanna_be = {v} + while unnumbered: + v = _max_cardinality_node(C, unnumbered, numbered) + unnumbered.remove(v) + numbered.add(v) + new_clique_wanna_be = set(C.neighbors(v)) & numbered + sg = C.subgraph(clique_wanna_be) + if _is_complete_graph(sg): + new_clique_wanna_be.add(v) + if not new_clique_wanna_be >= clique_wanna_be: + yield frozenset(clique_wanna_be) + clique_wanna_be = new_clique_wanna_be + else: + raise nx.NetworkXError("Input graph is not chordal.") + yield frozenset(clique_wanna_be) + + +@nx._dispatchable +def chordal_graph_treewidth(G): + """Returns the treewidth of the chordal graph G. + + Parameters + ---------- + G : graph + A NetworkX graph + + Returns + ------- + treewidth : int + The size of the largest clique in the graph minus one. + + Raises + ------ + NetworkXError + The algorithm does not support DiGraph, MultiGraph and MultiDiGraph. + The algorithm can only be applied to chordal graphs. If the input + graph is found to be non-chordal, a :exc:`NetworkXError` is raised. + + Examples + -------- + >>> e = [ + ... (1, 2), + ... (1, 3), + ... (2, 3), + ... (2, 4), + ... (3, 4), + ... (3, 5), + ... (3, 6), + ... (4, 5), + ... (4, 6), + ... (5, 6), + ... (7, 8), + ... ] + >>> G = nx.Graph(e) + >>> G.add_node(9) + >>> nx.chordal_graph_treewidth(G) + 3 + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Tree_decomposition#Treewidth + """ + if not is_chordal(G): + raise nx.NetworkXError("Input graph is not chordal.") + + max_clique = -1 + for clique in nx.chordal_graph_cliques(G): + max_clique = max(max_clique, len(clique)) + return max_clique - 1 + + +def _is_complete_graph(G): + """Returns True if G is a complete graph.""" + if nx.number_of_selfloops(G) > 0: + raise nx.NetworkXError("Self loop found in _is_complete_graph()") + n = G.number_of_nodes() + if n < 2: + return True + e = G.number_of_edges() + max_edges = (n * (n - 1)) / 2 + return e == max_edges + + +def _find_missing_edge(G): + """Given a non-complete graph G, returns a missing edge.""" + nodes = set(G) + for u in G: + missing = nodes - set(list(G[u].keys()) + [u]) + if missing: + return (u, missing.pop()) + + +def _max_cardinality_node(G, choices, wanna_connect): + """Returns a the node in choices that has more connections in G + to nodes in wanna_connect. + """ + max_number = -1 + for x in choices: + number = len([y for y in G[x] if y in wanna_connect]) + if number > max_number: + max_number = number + max_cardinality_node = x + return max_cardinality_node + + +def _find_chordality_breaker(G, s=None, treewidth_bound=sys.maxsize): + """Given a graph G, starts a max cardinality search + (starting from s if s is given and from an arbitrary node otherwise) + trying to find a non-chordal cycle. + + If it does find one, it returns (u,v,w) where u,v,w are the three + nodes that together with s are involved in the cycle. + + It ignores any self loops. + """ + if len(G) == 0: + raise nx.NetworkXPointlessConcept("Graph has no nodes.") + unnumbered = set(G) + if s is None: + s = arbitrary_element(G) + unnumbered.remove(s) + numbered = {s} + current_treewidth = -1 + while unnumbered: # and current_treewidth <= treewidth_bound: + v = _max_cardinality_node(G, unnumbered, numbered) + unnumbered.remove(v) + numbered.add(v) + clique_wanna_be = set(G[v]) & numbered + sg = G.subgraph(clique_wanna_be) + if _is_complete_graph(sg): + # The graph seems to be chordal by now. We update the treewidth + current_treewidth = max(current_treewidth, len(clique_wanna_be)) + if current_treewidth > treewidth_bound: + raise nx.NetworkXTreewidthBoundExceeded( + f"treewidth_bound exceeded: {current_treewidth}" + ) + else: + # sg is not a clique, + # look for an edge that is not included in sg + (u, w) = _find_missing_edge(sg) + return (u, v, w) + return () + + +@not_implemented_for("directed") +@nx._dispatchable(returns_graph=True) +def complete_to_chordal_graph(G): + """Return a copy of G completed to a chordal graph + + Adds edges to a copy of G to create a chordal graph. A graph G=(V,E) is + called chordal if for each cycle with length bigger than 3, there exist + two non-adjacent nodes connected by an edge (called a chord). + + Parameters + ---------- + G : NetworkX graph + Undirected graph + + Returns + ------- + H : NetworkX graph + The chordal enhancement of G + alpha : Dictionary + The elimination ordering of nodes of G + + Notes + ----- + There are different approaches to calculate the chordal + enhancement of a graph. The algorithm used here is called + MCS-M and gives at least minimal (local) triangulation of graph. Note + that this triangulation is not necessarily a global minimum. + + https://en.wikipedia.org/wiki/Chordal_graph + + References + ---------- + .. [1] Berry, Anne & Blair, Jean & Heggernes, Pinar & Peyton, Barry. (2004) + Maximum Cardinality Search for Computing Minimal Triangulations of + Graphs. Algorithmica. 39. 287-298. 10.1007/s00453-004-1084-3. + + Examples + -------- + >>> from networkx.algorithms.chordal import complete_to_chordal_graph + >>> G = nx.wheel_graph(10) + >>> H, alpha = complete_to_chordal_graph(G) + """ + H = G.copy() + alpha = dict.fromkeys(H, 0) + if nx.is_chordal(H): + return H, alpha + chords = set() + weight = dict.fromkeys(H.nodes(), 0) + unnumbered_nodes = list(H.nodes()) + for i in range(len(H.nodes()), 0, -1): + # get the node in unnumbered_nodes with the maximum weight + z = max(unnumbered_nodes, key=lambda node: weight[node]) + unnumbered_nodes.remove(z) + alpha[z] = i + update_nodes = [] + for y in unnumbered_nodes: + if G.has_edge(y, z): + update_nodes.append(y) + else: + # y_weight will be bigger than node weights between y and z + y_weight = weight[y] + lower_nodes = [ + node for node in unnumbered_nodes if weight[node] < y_weight + ] + if nx.has_path(H.subgraph(lower_nodes + [z, y]), y, z): + update_nodes.append(y) + chords.add((z, y)) + # during calculation of paths the weights should not be updated + for node in update_nodes: + weight[node] += 1 + H.add_edges_from(chords) + return H, alpha diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/clique.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/clique.py new file mode 100644 index 0000000..fe961a9 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/clique.py @@ -0,0 +1,757 @@ +"""Functions for finding and manipulating cliques. + +Finding the largest clique in a graph is NP-complete problem, so most of +these algorithms have an exponential running time; for more information, +see the Wikipedia article on the clique problem [1]_. + +.. [1] clique problem:: https://en.wikipedia.org/wiki/Clique_problem + +""" + +from collections import Counter, defaultdict, deque +from itertools import chain, combinations, islice + +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = [ + "find_cliques", + "find_cliques_recursive", + "make_max_clique_graph", + "make_clique_bipartite", + "node_clique_number", + "number_of_cliques", + "enumerate_all_cliques", + "max_weight_clique", +] + + +@not_implemented_for("directed") +@nx._dispatchable +def enumerate_all_cliques(G): + """Returns all cliques in an undirected graph. + + This function returns an iterator over cliques, each of which is a + list of nodes. The iteration is ordered by cardinality of the + cliques: first all cliques of size one, then all cliques of size + two, etc. + + Parameters + ---------- + G : NetworkX graph + An undirected graph. + + Returns + ------- + iterator + An iterator over cliques, each of which is a list of nodes in + `G`. The cliques are ordered according to size. + + Notes + ----- + To obtain a list of all cliques, use + `list(enumerate_all_cliques(G))`. However, be aware that in the + worst-case, the length of this list can be exponential in the number + of nodes in the graph (for example, when the graph is the complete + graph). This function avoids storing all cliques in memory by only + keeping current candidate node lists in memory during its search. + + The implementation is adapted from the algorithm by Zhang, et + al. (2005) [1]_ to output all cliques discovered. + + This algorithm ignores self-loops and parallel edges, since cliques + are not conventionally defined with such edges. + + References + ---------- + .. [1] Yun Zhang, Abu-Khzam, F.N., Baldwin, N.E., Chesler, E.J., + Langston, M.A., Samatova, N.F., + "Genome-Scale Computational Approaches to Memory-Intensive + Applications in Systems Biology". + *Supercomputing*, 2005. Proceedings of the ACM/IEEE SC 2005 + Conference, pp. 12, 12--18 Nov. 2005. + . + + """ + index = {} + nbrs = {} + for u in G: + index[u] = len(index) + # Neighbors of u that appear after u in the iteration order of G. + nbrs[u] = {v for v in G[u] if v not in index} + + queue = deque(([u], sorted(nbrs[u], key=index.__getitem__)) for u in G) + # Loop invariants: + # 1. len(base) is nondecreasing. + # 2. (base + cnbrs) is sorted with respect to the iteration order of G. + # 3. cnbrs is a set of common neighbors of nodes in base. + while queue: + base, cnbrs = map(list, queue.popleft()) + yield base + for i, u in enumerate(cnbrs): + # Use generators to reduce memory consumption. + queue.append( + ( + chain(base, [u]), + filter(nbrs[u].__contains__, islice(cnbrs, i + 1, None)), + ) + ) + + +@not_implemented_for("directed") +@nx._dispatchable +def find_cliques(G, nodes=None): + """Returns all maximal cliques in an undirected graph. + + For each node *n*, a *maximal clique for n* is a largest complete + subgraph containing *n*. The largest maximal clique is sometimes + called the *maximum clique*. + + This function returns an iterator over cliques, each of which is a + list of nodes. It is an iterative implementation, so should not + suffer from recursion depth issues. + + This function accepts a list of `nodes` and only the maximal cliques + containing all of these `nodes` are returned. It can considerably speed up + the running time if some specific cliques are desired. + + Parameters + ---------- + G : NetworkX graph + An undirected graph. + + nodes : list, optional (default=None) + If provided, only yield *maximal cliques* containing all nodes in `nodes`. + If `nodes` isn't a clique itself, a ValueError is raised. + + Returns + ------- + iterator + An iterator over maximal cliques, each of which is a list of + nodes in `G`. If `nodes` is provided, only the maximal cliques + containing all the nodes in `nodes` are returned. The order of + cliques is arbitrary. + + Raises + ------ + ValueError + If `nodes` is not a clique. + + Examples + -------- + >>> from pprint import pprint # For nice dict formatting + >>> G = nx.karate_club_graph() + >>> sum(1 for c in nx.find_cliques(G)) # The number of maximal cliques in G + 36 + >>> max(nx.find_cliques(G), key=len) # The largest maximal clique in G + [0, 1, 2, 3, 13] + + The size of the largest maximal clique is known as the *clique number* of + the graph, which can be found directly with: + + >>> max(len(c) for c in nx.find_cliques(G)) + 5 + + One can also compute the number of maximal cliques in `G` that contain a given + node. The following produces a dictionary keyed by node whose + values are the number of maximal cliques in `G` that contain the node: + + >>> from collections import Counter + >>> from itertools import chain + >>> counts = Counter(chain.from_iterable(nx.find_cliques(G))) + >>> pprint(dict(counts)) + {0: 13, + 1: 6, + 2: 7, + 3: 3, + 4: 2, + 5: 3, + 6: 3, + 7: 1, + 8: 3, + 9: 2, + 10: 2, + 11: 1, + 12: 1, + 13: 2, + 14: 1, + 15: 1, + 16: 1, + 17: 1, + 18: 1, + 19: 2, + 20: 1, + 21: 1, + 22: 1, + 23: 3, + 24: 2, + 25: 2, + 26: 1, + 27: 3, + 28: 2, + 29: 2, + 30: 2, + 31: 4, + 32: 9, + 33: 14} + + Or, similarly, the maximal cliques in `G` that contain a given node. + For example, the 4 maximal cliques that contain node 31: + + >>> [c for c in nx.find_cliques(G) if 31 in c] + [[0, 31], [33, 32, 31], [33, 28, 31], [24, 25, 31]] + + See Also + -------- + find_cliques_recursive + A recursive version of the same algorithm. + + Notes + ----- + To obtain a list of all maximal cliques, use + `list(find_cliques(G))`. However, be aware that in the worst-case, + the length of this list can be exponential in the number of nodes in + the graph. This function avoids storing all cliques in memory by + only keeping current candidate node lists in memory during its search. + + This implementation is based on the algorithm published by Bron and + Kerbosch (1973) [1]_, as adapted by Tomita, Tanaka and Takahashi + (2006) [2]_ and discussed in Cazals and Karande (2008) [3]_. It + essentially unrolls the recursion used in the references to avoid + issues of recursion stack depth (for a recursive implementation, see + :func:`find_cliques_recursive`). + + This algorithm ignores self-loops and parallel edges, since cliques + are not conventionally defined with such edges. + + References + ---------- + .. [1] Bron, C. and Kerbosch, J. + "Algorithm 457: finding all cliques of an undirected graph". + *Communications of the ACM* 16, 9 (Sep. 1973), 575--577. + + + .. [2] Etsuji Tomita, Akira Tanaka, Haruhisa Takahashi, + "The worst-case time complexity for generating all maximal + cliques and computational experiments", + *Theoretical Computer Science*, Volume 363, Issue 1, + Computing and Combinatorics, + 10th Annual International Conference on + Computing and Combinatorics (COCOON 2004), 25 October 2006, Pages 28--42 + + + .. [3] F. Cazals, C. Karande, + "A note on the problem of reporting maximal cliques", + *Theoretical Computer Science*, + Volume 407, Issues 1--3, 6 November 2008, Pages 564--568, + + + """ + if len(G) == 0: + return + + adj = {u: {v for v in G[u] if v != u} for u in G} + + # Initialize Q with the given nodes and subg, cand with their nbrs + Q = nodes[:] if nodes is not None else [] + cand = set(G) + for node in Q: + if node not in cand: + raise ValueError(f"The given `nodes` {nodes} do not form a clique") + cand &= adj[node] + + if not cand: + yield Q[:] + return + + subg = cand.copy() + stack = [] + Q.append(None) + + u = max(subg, key=lambda u: len(cand & adj[u])) + ext_u = cand - adj[u] + + try: + while True: + if ext_u: + q = ext_u.pop() + cand.remove(q) + Q[-1] = q + adj_q = adj[q] + subg_q = subg & adj_q + if not subg_q: + yield Q[:] + else: + cand_q = cand & adj_q + if cand_q: + stack.append((subg, cand, ext_u)) + Q.append(None) + subg = subg_q + cand = cand_q + u = max(subg, key=lambda u: len(cand & adj[u])) + ext_u = cand - adj[u] + else: + Q.pop() + subg, cand, ext_u = stack.pop() + except IndexError: + pass + + +# TODO Should this also be not implemented for directed graphs? +@nx._dispatchable +def find_cliques_recursive(G, nodes=None): + """Returns all maximal cliques in a graph. + + For each node *v*, a *maximal clique for v* is a largest complete + subgraph containing *v*. The largest maximal clique is sometimes + called the *maximum clique*. + + This function returns an iterator over cliques, each of which is a + list of nodes. It is a recursive implementation, so may suffer from + recursion depth issues, but is included for pedagogical reasons. + For a non-recursive implementation, see :func:`find_cliques`. + + This function accepts a list of `nodes` and only the maximal cliques + containing all of these `nodes` are returned. It can considerably speed up + the running time if some specific cliques are desired. + + Parameters + ---------- + G : NetworkX graph + + nodes : list, optional (default=None) + If provided, only yield *maximal cliques* containing all nodes in `nodes`. + If `nodes` isn't a clique itself, a ValueError is raised. + + Returns + ------- + iterator + An iterator over maximal cliques, each of which is a list of + nodes in `G`. If `nodes` is provided, only the maximal cliques + containing all the nodes in `nodes` are yielded. The order of + cliques is arbitrary. + + Raises + ------ + ValueError + If `nodes` is not a clique. + + See Also + -------- + find_cliques + An iterative version of the same algorithm. See docstring for examples. + + Notes + ----- + To obtain a list of all maximal cliques, use + `list(find_cliques_recursive(G))`. However, be aware that in the + worst-case, the length of this list can be exponential in the number + of nodes in the graph. This function avoids storing all cliques in memory + by only keeping current candidate node lists in memory during its search. + + This implementation is based on the algorithm published by Bron and + Kerbosch (1973) [1]_, as adapted by Tomita, Tanaka and Takahashi + (2006) [2]_ and discussed in Cazals and Karande (2008) [3]_. For a + non-recursive implementation, see :func:`find_cliques`. + + This algorithm ignores self-loops and parallel edges, since cliques + are not conventionally defined with such edges. + + References + ---------- + .. [1] Bron, C. and Kerbosch, J. + "Algorithm 457: finding all cliques of an undirected graph". + *Communications of the ACM* 16, 9 (Sep. 1973), 575--577. + + + .. [2] Etsuji Tomita, Akira Tanaka, Haruhisa Takahashi, + "The worst-case time complexity for generating all maximal + cliques and computational experiments", + *Theoretical Computer Science*, Volume 363, Issue 1, + Computing and Combinatorics, + 10th Annual International Conference on + Computing and Combinatorics (COCOON 2004), 25 October 2006, Pages 28--42 + + + .. [3] F. Cazals, C. Karande, + "A note on the problem of reporting maximal cliques", + *Theoretical Computer Science*, + Volume 407, Issues 1--3, 6 November 2008, Pages 564--568, + + + """ + if len(G) == 0: + return iter([]) + + adj = {u: {v for v in G[u] if v != u} for u in G} + + # Initialize Q with the given nodes and subg, cand with their nbrs + Q = nodes[:] if nodes is not None else [] + cand_init = set(G) + for node in Q: + if node not in cand_init: + raise ValueError(f"The given `nodes` {nodes} do not form a clique") + cand_init &= adj[node] + + if not cand_init: + return iter([Q]) + + subg_init = cand_init.copy() + + def expand(subg, cand): + u = max(subg, key=lambda u: len(cand & adj[u])) + for q in cand - adj[u]: + cand.remove(q) + Q.append(q) + adj_q = adj[q] + subg_q = subg & adj_q + if not subg_q: + yield Q[:] + else: + cand_q = cand & adj_q + if cand_q: + yield from expand(subg_q, cand_q) + Q.pop() + + return expand(subg_init, cand_init) + + +@nx._dispatchable(returns_graph=True) +def make_max_clique_graph(G, create_using=None): + """Returns the maximal clique graph of the given graph. + + The nodes of the maximal clique graph of `G` are the cliques of + `G` and an edge joins two cliques if the cliques are not disjoint. + + Parameters + ---------- + G : NetworkX graph + + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + Returns + ------- + NetworkX graph + A graph whose nodes are the cliques of `G` and whose edges + join two cliques if they are not disjoint. + + Notes + ----- + This function behaves like the following code:: + + import networkx as nx + + G = nx.make_clique_bipartite(G) + cliques = [v for v in G.nodes() if G.nodes[v]["bipartite"] == 0] + G = nx.bipartite.projected_graph(G, cliques) + G = nx.relabel_nodes(G, {-v: v - 1 for v in G}) + + It should be faster, though, since it skips all the intermediate + steps. + + """ + if create_using is None: + B = G.__class__() + else: + B = nx.empty_graph(0, create_using) + cliques = list(enumerate(set(c) for c in find_cliques(G))) + # Add a numbered node for each clique. + B.add_nodes_from(i for i, c in cliques) + # Join cliques by an edge if they share a node. + clique_pairs = combinations(cliques, 2) + B.add_edges_from((i, j) for (i, c1), (j, c2) in clique_pairs if c1 & c2) + return B + + +@nx._dispatchable(returns_graph=True) +def make_clique_bipartite(G, fpos=None, create_using=None, name=None): + """Returns the bipartite clique graph corresponding to `G`. + + In the returned bipartite graph, the "bottom" nodes are the nodes of + `G` and the "top" nodes represent the maximal cliques of `G`. + There is an edge from node *v* to clique *C* in the returned graph + if and only if *v* is an element of *C*. + + Parameters + ---------- + G : NetworkX graph + An undirected graph. + + fpos : bool + If True or not None, the returned graph will have an + additional attribute, `pos`, a dictionary mapping node to + position in the Euclidean plane. + + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + Returns + ------- + NetworkX graph + A bipartite graph whose "bottom" set is the nodes of the graph + `G`, whose "top" set is the cliques of `G`, and whose edges + join nodes of `G` to the cliques that contain them. + + The nodes of the graph `G` have the node attribute + 'bipartite' set to 1 and the nodes representing cliques + have the node attribute 'bipartite' set to 0, as is the + convention for bipartite graphs in NetworkX. + + """ + B = nx.empty_graph(0, create_using) + B.clear() + # The "bottom" nodes in the bipartite graph are the nodes of the + # original graph, G. + B.add_nodes_from(G, bipartite=1) + for i, cl in enumerate(find_cliques(G)): + # The "top" nodes in the bipartite graph are the cliques. These + # nodes get negative numbers as labels. + name = -i - 1 + B.add_node(name, bipartite=0) + B.add_edges_from((v, name) for v in cl) + return B + + +@nx._dispatchable +def node_clique_number(G, nodes=None, cliques=None, separate_nodes=False): + """Returns the size of the largest maximal clique containing each given node. + + Returns a single or list depending on input nodes. + An optional list of cliques can be input if already computed. + + Parameters + ---------- + G : NetworkX graph + An undirected graph. + + cliques : list, optional (default=None) + A list of cliques, each of which is itself a list of nodes. + If not specified, the list of all cliques will be computed + using :func:`find_cliques`. + + Returns + ------- + int or dict + If `nodes` is a single node, returns the size of the + largest maximal clique in `G` containing that node. + Otherwise return a dict keyed by node to the size + of the largest maximal clique containing that node. + + See Also + -------- + find_cliques + find_cliques yields the maximal cliques of G. + It accepts a `nodes` argument which restricts consideration to + maximal cliques containing all the given `nodes`. + The search for the cliques is optimized for `nodes`. + """ + if cliques is None: + if nodes is not None: + # Use ego_graph to decrease size of graph + # check for single node + if nodes in G: + return max(len(c) for c in find_cliques(nx.ego_graph(G, nodes))) + # handle multiple nodes + return { + n: max(len(c) for c in find_cliques(nx.ego_graph(G, n))) for n in nodes + } + + # nodes is None--find all cliques + cliques = list(find_cliques(G)) + + # single node requested + if nodes in G: + return max(len(c) for c in cliques if nodes in c) + + # multiple nodes requested + # preprocess all nodes (faster than one at a time for even 2 nodes) + size_for_n = defaultdict(int) + for c in cliques: + size_of_c = len(c) + for n in c: + if size_for_n[n] < size_of_c: + size_for_n[n] = size_of_c + if nodes is None: + return size_for_n + return {n: size_for_n[n] for n in nodes} + + +def number_of_cliques(G, nodes=None, cliques=None): + """Returns the number of maximal cliques for each node. + + Returns a single or list depending on input nodes. + Optional list of cliques can be input if already computed. + """ + if cliques is None: + cliques = find_cliques(G) + + if nodes is None: + nodes = list(G.nodes()) # none, get entire graph + + if not isinstance(nodes, list): # check for a list + v = nodes + # assume it is a single value + numcliq = sum(1 for c in cliques if v in c) + else: + numcliq = Counter(chain.from_iterable(cliques)) + numcliq = {v: numcliq[v] for v in nodes} # return a dict + return numcliq + + +class MaxWeightClique: + """A class for the maximum weight clique algorithm. + + This class is a helper for the `max_weight_clique` function. The class + should not normally be used directly. + + Parameters + ---------- + G : NetworkX graph + The undirected graph for which a maximum weight clique is sought + weight : string or None, optional (default='weight') + The node attribute that holds the integer value used as a weight. + If None, then each node has weight 1. + + Attributes + ---------- + G : NetworkX graph + The undirected graph for which a maximum weight clique is sought + node_weights: dict + The weight of each node + incumbent_nodes : list + The nodes of the incumbent clique (the best clique found so far) + incumbent_weight: int + The weight of the incumbent clique + """ + + def __init__(self, G, weight): + self.G = G + self.incumbent_nodes = [] + self.incumbent_weight = 0 + + if weight is None: + self.node_weights = dict.fromkeys(G.nodes(), 1) + else: + for v in G.nodes(): + if weight not in G.nodes[v]: + errmsg = f"Node {v!r} does not have the requested weight field." + raise KeyError(errmsg) + if not isinstance(G.nodes[v][weight], int): + errmsg = f"The {weight!r} field of node {v!r} is not an integer." + raise ValueError(errmsg) + self.node_weights = {v: G.nodes[v][weight] for v in G.nodes()} + + def update_incumbent_if_improved(self, C, C_weight): + """Update the incumbent if the node set C has greater weight. + + C is assumed to be a clique. + """ + if C_weight > self.incumbent_weight: + self.incumbent_nodes = C[:] + self.incumbent_weight = C_weight + + def greedily_find_independent_set(self, P): + """Greedily find an independent set of nodes from a set of + nodes P.""" + independent_set = [] + P = P[:] + while P: + v = P[0] + independent_set.append(v) + P = [w for w in P if v != w and not self.G.has_edge(v, w)] + return independent_set + + def find_branching_nodes(self, P, target): + """Find a set of nodes to branch on.""" + residual_wt = {v: self.node_weights[v] for v in P} + total_wt = 0 + P = P[:] + while P: + independent_set = self.greedily_find_independent_set(P) + min_wt_in_class = min(residual_wt[v] for v in independent_set) + total_wt += min_wt_in_class + if total_wt > target: + break + for v in independent_set: + residual_wt[v] -= min_wt_in_class + P = [v for v in P if residual_wt[v] != 0] + return P + + def expand(self, C, C_weight, P): + """Look for the best clique that contains all the nodes in C and zero or + more of the nodes in P, backtracking if it can be shown that no such + clique has greater weight than the incumbent. + """ + self.update_incumbent_if_improved(C, C_weight) + branching_nodes = self.find_branching_nodes(P, self.incumbent_weight - C_weight) + while branching_nodes: + v = branching_nodes.pop() + P.remove(v) + new_C = C + [v] + new_C_weight = C_weight + self.node_weights[v] + new_P = [w for w in P if self.G.has_edge(v, w)] + self.expand(new_C, new_C_weight, new_P) + + def find_max_weight_clique(self): + """Find a maximum weight clique.""" + # Sort nodes in reverse order of degree for speed + nodes = sorted(self.G.nodes(), key=lambda v: self.G.degree(v), reverse=True) + nodes = [v for v in nodes if self.node_weights[v] > 0] + self.expand([], 0, nodes) + + +@not_implemented_for("directed") +@nx._dispatchable(node_attrs="weight") +def max_weight_clique(G, weight="weight"): + """Find a maximum weight clique in G. + + A *clique* in a graph is a set of nodes such that every two distinct nodes + are adjacent. The *weight* of a clique is the sum of the weights of its + nodes. A *maximum weight clique* of graph G is a clique C in G such that + no clique in G has weight greater than the weight of C. + + Parameters + ---------- + G : NetworkX graph + Undirected graph + weight : string or None, optional (default='weight') + The node attribute that holds the integer value used as a weight. + If None, then each node has weight 1. + + Returns + ------- + clique : list + the nodes of a maximum weight clique + weight : int + the weight of a maximum weight clique + + Notes + ----- + The implementation is recursive, and therefore it may run into recursion + depth issues if G contains a clique whose number of nodes is close to the + recursion depth limit. + + At each search node, the algorithm greedily constructs a weighted + independent set cover of part of the graph in order to find a small set of + nodes on which to branch. The algorithm is very similar to the algorithm + of Tavares et al. [1]_, other than the fact that the NetworkX version does + not use bitsets. This style of algorithm for maximum weight clique (and + maximum weight independent set, which is the same problem but on the + complement graph) has a decades-long history. See Algorithm B of Warren + and Hicks [2]_ and the references in that paper. + + References + ---------- + .. [1] Tavares, W.A., Neto, M.B.C., Rodrigues, C.D., Michelon, P.: Um + algoritmo de branch and bound para o problema da clique máxima + ponderada. Proceedings of XLVII SBPO 1 (2015). + + .. [2] Warren, Jeffrey S, Hicks, Illya V.: Combinatorial Branch-and-Bound + for the Maximum Weight Independent Set Problem. Technical Report, + Texas A&M University (2016). + """ + + mwc = MaxWeightClique(G, weight) + mwc.find_max_weight_clique() + return mwc.incumbent_nodes, mwc.incumbent_weight diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/cluster.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/cluster.py new file mode 100644 index 0000000..ceb0c4e --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/cluster.py @@ -0,0 +1,658 @@ +"""Algorithms to characterize the number of triangles in a graph.""" + +from collections import Counter +from itertools import chain, combinations + +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = [ + "triangles", + "average_clustering", + "clustering", + "transitivity", + "square_clustering", + "generalized_degree", +] + + +@not_implemented_for("directed") +@nx._dispatchable +def triangles(G, nodes=None): + """Compute the number of triangles. + + Finds the number of triangles that include a node as one vertex. + + Parameters + ---------- + G : graph + A networkx graph + + nodes : node, iterable of nodes, or None (default=None) + If a singleton node, return the number of triangles for that node. + If an iterable, compute the number of triangles for each of those nodes. + If `None` (the default) compute the number of triangles for all nodes in `G`. + + Returns + ------- + out : dict or int + If `nodes` is a container of nodes, returns number of triangles keyed by node (dict). + If `nodes` is a specific node, returns number of triangles for the node (int). + + Examples + -------- + >>> G = nx.complete_graph(5) + >>> print(nx.triangles(G, 0)) + 6 + >>> print(nx.triangles(G)) + {0: 6, 1: 6, 2: 6, 3: 6, 4: 6} + >>> print(list(nx.triangles(G, [0, 1]).values())) + [6, 6] + + Notes + ----- + Self loops are ignored. + + """ + if nodes is not None: + # If `nodes` represents a single node, return only its number of triangles + if nodes in G: + return next(_triangles_and_degree_iter(G, nodes))[2] // 2 + + # if `nodes` is a container of nodes, then return a + # dictionary mapping node to number of triangles. + return {v: t // 2 for v, d, t, _ in _triangles_and_degree_iter(G, nodes)} + + # if nodes is None, then compute triangles for the complete graph + + # dict used to avoid visiting the same nodes twice + # this allows calculating/counting each triangle only once + later_nbrs = {} + + # iterate over the nodes in a graph + for node, neighbors in G.adjacency(): + later_nbrs[node] = {n for n in neighbors if n not in later_nbrs and n != node} + + # instantiate Counter for each node to include isolated nodes + # add 1 to the count if a nodes neighbor's neighbor is also a neighbor + triangle_counts = Counter(dict.fromkeys(G, 0)) + for node1, neighbors in later_nbrs.items(): + for node2 in neighbors: + third_nodes = neighbors & later_nbrs[node2] + m = len(third_nodes) + triangle_counts[node1] += m + triangle_counts[node2] += m + triangle_counts.update(third_nodes) + + return dict(triangle_counts) + + +@not_implemented_for("multigraph") +def _triangles_and_degree_iter(G, nodes=None): + """Return an iterator of (node, degree, triangles, generalized degree). + + This double counts triangles so you may want to divide by 2. + See degree(), triangles() and generalized_degree() for definitions + and details. + + """ + if nodes is None: + nodes_nbrs = G.adj.items() + else: + nodes_nbrs = ((n, G[n]) for n in G.nbunch_iter(nodes)) + + for v, v_nbrs in nodes_nbrs: + vs = set(v_nbrs) - {v} + gen_degree = Counter(len(vs & (set(G[w]) - {w})) for w in vs) + ntriangles = sum(k * val for k, val in gen_degree.items()) + yield (v, len(vs), ntriangles, gen_degree) + + +@not_implemented_for("multigraph") +def _weighted_triangles_and_degree_iter(G, nodes=None, weight="weight"): + """Return an iterator of (node, degree, weighted_triangles). + + Used for weighted clustering. + Note: this returns the geometric average weight of edges in the triangle. + Also, each triangle is counted twice (each direction). + So you may want to divide by 2. + + """ + import numpy as np + + if weight is None or G.number_of_edges() == 0: + max_weight = 1 + else: + max_weight = max(d.get(weight, 1) for u, v, d in G.edges(data=True)) + if nodes is None: + nodes_nbrs = G.adj.items() + else: + nodes_nbrs = ((n, G[n]) for n in G.nbunch_iter(nodes)) + + def wt(u, v): + return G[u][v].get(weight, 1) / max_weight + + for i, nbrs in nodes_nbrs: + inbrs = set(nbrs) - {i} + weighted_triangles = 0 + seen = set() + for j in inbrs: + seen.add(j) + # This avoids counting twice -- we double at the end. + jnbrs = set(G[j]) - seen + # Only compute the edge weight once, before the inner inner + # loop. + wij = wt(i, j) + weighted_triangles += np.cbrt( + [(wij * wt(j, k) * wt(k, i)) for k in inbrs & jnbrs] + ).sum() + yield (i, len(inbrs), 2 * float(weighted_triangles)) + + +@not_implemented_for("multigraph") +def _directed_triangles_and_degree_iter(G, nodes=None): + """Return an iterator of + (node, total_degree, reciprocal_degree, directed_triangles). + + Used for directed clustering. + Note that unlike `_triangles_and_degree_iter()`, this function counts + directed triangles so does not count triangles twice. + + """ + nodes_nbrs = ((n, G._pred[n], G._succ[n]) for n in G.nbunch_iter(nodes)) + + for i, preds, succs in nodes_nbrs: + ipreds = set(preds) - {i} + isuccs = set(succs) - {i} + + directed_triangles = 0 + for j in chain(ipreds, isuccs): + jpreds = set(G._pred[j]) - {j} + jsuccs = set(G._succ[j]) - {j} + directed_triangles += sum( + 1 + for k in chain( + (ipreds & jpreds), + (ipreds & jsuccs), + (isuccs & jpreds), + (isuccs & jsuccs), + ) + ) + dtotal = len(ipreds) + len(isuccs) + dbidirectional = len(ipreds & isuccs) + yield (i, dtotal, dbidirectional, directed_triangles) + + +@not_implemented_for("multigraph") +def _directed_weighted_triangles_and_degree_iter(G, nodes=None, weight="weight"): + """Return an iterator of + (node, total_degree, reciprocal_degree, directed_weighted_triangles). + + Used for directed weighted clustering. + Note that unlike `_weighted_triangles_and_degree_iter()`, this function counts + directed triangles so does not count triangles twice. + + """ + import numpy as np + + if weight is None or G.number_of_edges() == 0: + max_weight = 1 + else: + max_weight = max(d.get(weight, 1) for u, v, d in G.edges(data=True)) + + nodes_nbrs = ((n, G._pred[n], G._succ[n]) for n in G.nbunch_iter(nodes)) + + def wt(u, v): + return G[u][v].get(weight, 1) / max_weight + + for i, preds, succs in nodes_nbrs: + ipreds = set(preds) - {i} + isuccs = set(succs) - {i} + + directed_triangles = 0 + for j in ipreds: + jpreds = set(G._pred[j]) - {j} + jsuccs = set(G._succ[j]) - {j} + directed_triangles += np.cbrt( + [(wt(j, i) * wt(k, i) * wt(k, j)) for k in ipreds & jpreds] + ).sum() + directed_triangles += np.cbrt( + [(wt(j, i) * wt(k, i) * wt(j, k)) for k in ipreds & jsuccs] + ).sum() + directed_triangles += np.cbrt( + [(wt(j, i) * wt(i, k) * wt(k, j)) for k in isuccs & jpreds] + ).sum() + directed_triangles += np.cbrt( + [(wt(j, i) * wt(i, k) * wt(j, k)) for k in isuccs & jsuccs] + ).sum() + + for j in isuccs: + jpreds = set(G._pred[j]) - {j} + jsuccs = set(G._succ[j]) - {j} + directed_triangles += np.cbrt( + [(wt(i, j) * wt(k, i) * wt(k, j)) for k in ipreds & jpreds] + ).sum() + directed_triangles += np.cbrt( + [(wt(i, j) * wt(k, i) * wt(j, k)) for k in ipreds & jsuccs] + ).sum() + directed_triangles += np.cbrt( + [(wt(i, j) * wt(i, k) * wt(k, j)) for k in isuccs & jpreds] + ).sum() + directed_triangles += np.cbrt( + [(wt(i, j) * wt(i, k) * wt(j, k)) for k in isuccs & jsuccs] + ).sum() + + dtotal = len(ipreds) + len(isuccs) + dbidirectional = len(ipreds & isuccs) + yield (i, dtotal, dbidirectional, float(directed_triangles)) + + +@nx._dispatchable(edge_attrs="weight") +def average_clustering(G, nodes=None, weight=None, count_zeros=True): + r"""Compute the average clustering coefficient for the graph G. + + The clustering coefficient for the graph is the average, + + .. math:: + + C = \frac{1}{n}\sum_{v \in G} c_v, + + where :math:`n` is the number of nodes in `G`. + + Parameters + ---------- + G : graph + + nodes : container of nodes, optional (default=all nodes in G) + Compute average clustering for nodes in this container. + + weight : string or None, optional (default=None) + The edge attribute that holds the numerical value used as a weight. + If None, then each edge has weight 1. + + count_zeros : bool + If False include only the nodes with nonzero clustering in the average. + + Returns + ------- + avg : float + Average clustering + + Examples + -------- + >>> G = nx.complete_graph(5) + >>> print(nx.average_clustering(G)) + 1.0 + + Notes + ----- + This is a space saving routine; it might be faster + to use the clustering function to get a list and then take the average. + + Self loops are ignored. + + References + ---------- + .. [1] Generalizations of the clustering coefficient to weighted + complex networks by J. Saramäki, M. Kivelä, J.-P. Onnela, + K. Kaski, and J. Kertész, Physical Review E, 75 027105 (2007). + http://jponnela.com/web_documents/a9.pdf + .. [2] Marcus Kaiser, Mean clustering coefficients: the role of isolated + nodes and leafs on clustering measures for small-world networks. + https://arxiv.org/abs/0802.2512 + """ + c = clustering(G, nodes, weight=weight).values() + if not count_zeros: + c = [v for v in c if abs(v) > 0] + return sum(c) / len(c) + + +@nx._dispatchable(edge_attrs="weight") +def clustering(G, nodes=None, weight=None): + r"""Compute the clustering coefficient for nodes. + + For unweighted graphs, the clustering of a node :math:`u` + is the fraction of possible triangles through that node that exist, + + .. math:: + + c_u = \frac{2 T(u)}{deg(u)(deg(u)-1)}, + + where :math:`T(u)` is the number of triangles through node :math:`u` and + :math:`deg(u)` is the degree of :math:`u`. + + For weighted graphs, there are several ways to define clustering [1]_. + the one used here is defined + as the geometric average of the subgraph edge weights [2]_, + + .. math:: + + c_u = \frac{1}{deg(u)(deg(u)-1))} + \sum_{vw} (\hat{w}_{uv} \hat{w}_{uw} \hat{w}_{vw})^{1/3}. + + The edge weights :math:`\hat{w}_{uv}` are normalized by the maximum weight + in the network :math:`\hat{w}_{uv} = w_{uv}/\max(w)`. + + The value of :math:`c_u` is assigned to 0 if :math:`deg(u) < 2`. + + Additionally, this weighted definition has been generalized to support negative edge weights [3]_. + + For directed graphs, the clustering is similarly defined as the fraction + of all possible directed triangles or geometric average of the subgraph + edge weights for unweighted and weighted directed graph respectively [4]_. + + .. math:: + + c_u = \frac{T(u)}{2(deg^{tot}(u)(deg^{tot}(u)-1) - 2deg^{\leftrightarrow}(u))}, + + where :math:`T(u)` is the number of directed triangles through node + :math:`u`, :math:`deg^{tot}(u)` is the sum of in degree and out degree of + :math:`u` and :math:`deg^{\leftrightarrow}(u)` is the reciprocal degree of + :math:`u`. + + + Parameters + ---------- + G : graph + + nodes : node, iterable of nodes, or None (default=None) + If a singleton node, return the number of triangles for that node. + If an iterable, compute the number of triangles for each of those nodes. + If `None` (the default) compute the number of triangles for all nodes in `G`. + + weight : string or None, optional (default=None) + The edge attribute that holds the numerical value used as a weight. + If None, then each edge has weight 1. + + Returns + ------- + out : float, or dictionary + Clustering coefficient at specified nodes + + Examples + -------- + >>> G = nx.complete_graph(5) + >>> print(nx.clustering(G, 0)) + 1.0 + >>> print(nx.clustering(G)) + {0: 1.0, 1: 1.0, 2: 1.0, 3: 1.0, 4: 1.0} + + Notes + ----- + Self loops are ignored. + + References + ---------- + .. [1] Generalizations of the clustering coefficient to weighted + complex networks by J. Saramäki, M. Kivelä, J.-P. Onnela, + K. Kaski, and J. Kertész, Physical Review E, 75 027105 (2007). + http://jponnela.com/web_documents/a9.pdf + .. [2] Intensity and coherence of motifs in weighted complex + networks by J. P. Onnela, J. Saramäki, J. Kertész, and K. Kaski, + Physical Review E, 71(6), 065103 (2005). + .. [3] Generalization of Clustering Coefficients to Signed Correlation Networks + by G. Costantini and M. Perugini, PloS one, 9(2), e88669 (2014). + .. [4] Clustering in complex directed networks by G. Fagiolo, + Physical Review E, 76(2), 026107 (2007). + """ + if G.is_directed(): + if weight is not None: + td_iter = _directed_weighted_triangles_and_degree_iter(G, nodes, weight) + clusterc = { + v: 0 if t == 0 else t / ((dt * (dt - 1) - 2 * db) * 2) + for v, dt, db, t in td_iter + } + else: + td_iter = _directed_triangles_and_degree_iter(G, nodes) + clusterc = { + v: 0 if t == 0 else t / ((dt * (dt - 1) - 2 * db) * 2) + for v, dt, db, t in td_iter + } + else: + # The formula 2*T/(d*(d-1)) from docs is t/(d*(d-1)) here b/c t==2*T + if weight is not None: + td_iter = _weighted_triangles_and_degree_iter(G, nodes, weight) + clusterc = {v: 0 if t == 0 else t / (d * (d - 1)) for v, d, t in td_iter} + else: + td_iter = _triangles_and_degree_iter(G, nodes) + clusterc = {v: 0 if t == 0 else t / (d * (d - 1)) for v, d, t, _ in td_iter} + if nodes in G: + # Return the value of the sole entry in the dictionary. + return clusterc[nodes] + return clusterc + + +@nx._dispatchable +def transitivity(G): + r"""Compute graph transitivity, the fraction of all possible triangles + present in G. + + Possible triangles are identified by the number of "triads" + (two edges with a shared vertex). + + The transitivity is + + .. math:: + + T = 3\frac{\#triangles}{\#triads}. + + Parameters + ---------- + G : graph + + Returns + ------- + out : float + Transitivity + + Notes + ----- + Self loops are ignored. + + Examples + -------- + >>> G = nx.complete_graph(5) + >>> print(nx.transitivity(G)) + 1.0 + """ + triangles_contri = [ + (t, d * (d - 1)) for v, d, t, _ in _triangles_and_degree_iter(G) + ] + # If the graph is empty + if len(triangles_contri) == 0: + return 0 + triangles, contri = map(sum, zip(*triangles_contri)) + return 0 if triangles == 0 else triangles / contri + + +@nx._dispatchable +def square_clustering(G, nodes=None): + r"""Compute the squares clustering coefficient for nodes. + + For each node return the fraction of possible squares that exist at + the node [1]_ + + .. math:: + C_4(v) = \frac{ \sum_{u=1}^{k_v} + \sum_{w=u+1}^{k_v} q_v(u,w) }{ \sum_{u=1}^{k_v} + \sum_{w=u+1}^{k_v} [a_v(u,w) + q_v(u,w)]}, + + where :math:`q_v(u,w)` are the number of common neighbors of :math:`u` and + :math:`w` other than :math:`v` (ie squares), and :math:`a_v(u,w) = (k_u - + (1+q_v(u,w)+\theta_{uv})) + (k_w - (1+q_v(u,w)+\theta_{uw}))`, where + :math:`\theta_{uw} = 1` if :math:`u` and :math:`w` are connected and 0 + otherwise. [2]_ + + Parameters + ---------- + G : graph + + nodes : container of nodes, optional (default=all nodes in G) + Compute clustering for nodes in this container. + + Returns + ------- + c4 : dictionary + A dictionary keyed by node with the square clustering coefficient value. + + Examples + -------- + >>> G = nx.complete_graph(5) + >>> print(nx.square_clustering(G, 0)) + 1.0 + >>> print(nx.square_clustering(G)) + {0: 1.0, 1: 1.0, 2: 1.0, 3: 1.0, 4: 1.0} + + Notes + ----- + Self loops are ignored. + + While :math:`C_3(v)` (triangle clustering) gives the probability that + two neighbors of node v are connected with each other, :math:`C_4(v)` is + the probability that two neighbors of node v share a common + neighbor different from v. This algorithm can be applied to both + bipartite and unipartite networks. + + References + ---------- + .. [1] Pedro G. Lind, Marta C. González, and Hans J. Herrmann. 2005 + Cycles and clustering in bipartite networks. + Physical Review E (72) 056127. + .. [2] Zhang, Peng et al. Clustering Coefficient and Community Structure of + Bipartite Networks. Physica A: Statistical Mechanics and its Applications 387.27 (2008): 6869–6875. + https://arxiv.org/abs/0710.0117v1 + """ + if nodes is None: + node_iter = G + else: + node_iter = G.nbunch_iter(nodes) + clustering = {} + _G_adj = G._adj + + class GAdj(dict): + """Calculate (and cache) node neighbor sets excluding self-loops.""" + + def __missing__(self, v): + v_neighbors = self[v] = set(_G_adj[v]) + v_neighbors.discard(v) # Ignore self-loops + return v_neighbors + + G_adj = GAdj() # Values are sets of neighbors (no self-loops) + + for v in node_iter: + v_neighbors = G_adj[v] + v_degrees_m1 = len(v_neighbors) - 1 # degrees[v] - 1 (used below) + if v_degrees_m1 <= 0: + # Can't form a square without at least two neighbors + clustering[v] = 0 + continue + + # Count squares with nodes u-v-w-x from the current node v. + # Terms of the denominator: potential = uw_degrees - uw_count - triangles - squares + # uw_degrees: degrees[u] + degrees[w] for each u-w combo + uw_degrees = 0 + # uw_count: 1 for each u and 1 for each w for all combos (degrees * (degrees - 1)) + uw_count = len(v_neighbors) * v_degrees_m1 + # triangles: 1 for each edge where u-w or w-u are connected (i.e. triangles) + triangles = 0 + # squares: the number of squares (also the numerator) + squares = 0 + + # Iterate over all neighbors + for u in v_neighbors: + u_neighbors = G_adj[u] + uw_degrees += len(u_neighbors) * v_degrees_m1 + # P2 from https://arxiv.org/abs/2007.11111 + p2 = len(u_neighbors & v_neighbors) + # triangles is C_3, sigma_4 from https://arxiv.org/abs/2007.11111 + # This double-counts triangles compared to `triangles` function + triangles += p2 + # squares is C_4, sigma_12 from https://arxiv.org/abs/2007.11111 + # Include this term, b/c a neighbor u can also be a neighbor of neighbor x + squares += p2 * (p2 - 1) # Will divide by 2 later + + # And iterate over all neighbors of neighbors. + # These nodes x may be the corners opposite v in squares u-v-w-x. + two_hop_neighbors = set.union(*(G_adj[u] for u in v_neighbors)) + two_hop_neighbors -= v_neighbors # Neighbors already counted above + two_hop_neighbors.discard(v) + for x in two_hop_neighbors: + p2 = len(v_neighbors & G_adj[x]) + squares += p2 * (p2 - 1) # Will divide by 2 later + + squares //= 2 + potential = uw_degrees - uw_count - triangles - squares + if potential > 0: + clustering[v] = squares / potential + else: + clustering[v] = 0 + if nodes in G: + # Return the value of the sole entry in the dictionary. + return clustering[nodes] + return clustering + + +@not_implemented_for("directed") +@nx._dispatchable +def generalized_degree(G, nodes=None): + r"""Compute the generalized degree for nodes. + + For each node, the generalized degree shows how many edges of given + triangle multiplicity the node is connected to. The triangle multiplicity + of an edge is the number of triangles an edge participates in. The + generalized degree of node :math:`i` can be written as a vector + :math:`\mathbf{k}_i=(k_i^{(0)}, \dotsc, k_i^{(N-2)})` where + :math:`k_i^{(j)}` is the number of edges attached to node :math:`i` that + participate in :math:`j` triangles. + + Parameters + ---------- + G : graph + + nodes : container of nodes, optional (default=all nodes in G) + Compute the generalized degree for nodes in this container. + + Returns + ------- + out : Counter, or dictionary of Counters + Generalized degree of specified nodes. The Counter is keyed by edge + triangle multiplicity. + + Examples + -------- + >>> G = nx.complete_graph(5) + >>> print(nx.generalized_degree(G, 0)) + Counter({3: 4}) + >>> print(nx.generalized_degree(G)) + {0: Counter({3: 4}), 1: Counter({3: 4}), 2: Counter({3: 4}), 3: Counter({3: 4}), 4: Counter({3: 4})} + + To recover the number of triangles attached to a node: + + >>> k1 = nx.generalized_degree(G, 0) + >>> sum([k * v for k, v in k1.items()]) / 2 == nx.triangles(G, 0) + True + + Notes + ----- + Self loops are ignored. + + In a network of N nodes, the highest triangle multiplicity an edge can have + is N-2. + + The return value does not include a `zero` entry if no edges of a + particular triangle multiplicity are present. + + The number of triangles node :math:`i` is attached to can be recovered from + the generalized degree :math:`\mathbf{k}_i=(k_i^{(0)}, \dotsc, + k_i^{(N-2)})` by :math:`(k_i^{(1)}+2k_i^{(2)}+\dotsc +(N-2)k_i^{(N-2)})/2`. + + References + ---------- + .. [1] Networks with arbitrary edge multiplicities by V. Zlatić, + D. Garlaschelli and G. Caldarelli, EPL (Europhysics Letters), + Volume 97, Number 2 (2012). + https://iopscience.iop.org/article/10.1209/0295-5075/97/28005 + """ + if nodes in G: + return next(_triangles_and_degree_iter(G, nodes))[3] + return {v: gd for v, d, t, gd in _triangles_and_degree_iter(G, nodes)} diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/coloring/__init__.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/coloring/__init__.py new file mode 100644 index 0000000..39381d9 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/coloring/__init__.py @@ -0,0 +1,4 @@ +from networkx.algorithms.coloring.greedy_coloring import * +from networkx.algorithms.coloring.equitable_coloring import equitable_color + +__all__ = ["greedy_color", "equitable_color"] diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/coloring/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/coloring/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..b1f482b Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/coloring/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/coloring/__pycache__/equitable_coloring.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/coloring/__pycache__/equitable_coloring.cpython-312.pyc new file mode 100644 index 0000000..004ec5b Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/coloring/__pycache__/equitable_coloring.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/coloring/__pycache__/greedy_coloring.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/coloring/__pycache__/greedy_coloring.cpython-312.pyc new file mode 100644 index 0000000..c387513 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/coloring/__pycache__/greedy_coloring.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/coloring/equitable_coloring.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/coloring/equitable_coloring.py new file mode 100644 index 0000000..e464a07 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/coloring/equitable_coloring.py @@ -0,0 +1,505 @@ +""" +Equitable coloring of graphs with bounded degree. +""" + +from collections import defaultdict + +import networkx as nx + +__all__ = ["equitable_color"] + + +@nx._dispatchable +def is_coloring(G, coloring): + """Determine if the coloring is a valid coloring for the graph G.""" + # Verify that the coloring is valid. + return all(coloring[s] != coloring[d] for s, d in G.edges) + + +@nx._dispatchable +def is_equitable(G, coloring, num_colors=None): + """Determines if the coloring is valid and equitable for the graph G.""" + + if not is_coloring(G, coloring): + return False + + # Verify whether it is equitable. + color_set_size = defaultdict(int) + for color in coloring.values(): + color_set_size[color] += 1 + + if num_colors is not None: + for color in range(num_colors): + if color not in color_set_size: + # These colors do not have any vertices attached to them. + color_set_size[color] = 0 + + # If there are more than 2 distinct values, the coloring cannot be equitable + all_set_sizes = set(color_set_size.values()) + if len(all_set_sizes) == 0 and num_colors is None: # Was an empty graph + return True + elif len(all_set_sizes) == 1: + return True + elif len(all_set_sizes) == 2: + a, b = list(all_set_sizes) + return abs(a - b) <= 1 + else: # len(all_set_sizes) > 2: + return False + + +def make_C_from_F(F): + C = defaultdict(list) + for node, color in F.items(): + C[color].append(node) + + return C + + +def make_N_from_L_C(L, C): + nodes = L.keys() + colors = C.keys() + return { + (node, color): sum(1 for v in L[node] if v in C[color]) + for node in nodes + for color in colors + } + + +def make_H_from_C_N(C, N): + return { + (c1, c2): sum(1 for node in C[c1] if N[(node, c2)] == 0) for c1 in C for c2 in C + } + + +def change_color(u, X, Y, N, H, F, C, L): + """Change the color of 'u' from X to Y and update N, H, F, C.""" + assert F[u] == X and X != Y + + # Change the class of 'u' from X to Y + F[u] = Y + + for k in C: + # 'u' witnesses an edge from k -> Y instead of from k -> X now. + if N[u, k] == 0: + H[(X, k)] -= 1 + H[(Y, k)] += 1 + + for v in L[u]: + # 'v' has lost a neighbor in X and gained one in Y + N[(v, X)] -= 1 + N[(v, Y)] += 1 + + if N[(v, X)] == 0: + # 'v' witnesses F[v] -> X + H[(F[v], X)] += 1 + + if N[(v, Y)] == 1: + # 'v' no longer witnesses F[v] -> Y + H[(F[v], Y)] -= 1 + + C[X].remove(u) + C[Y].append(u) + + +def move_witnesses(src_color, dst_color, N, H, F, C, T_cal, L): + """Move witness along a path from src_color to dst_color.""" + X = src_color + while X != dst_color: + Y = T_cal[X] + # Move _any_ witness from X to Y = T_cal[X] + w = next(x for x in C[X] if N[(x, Y)] == 0) + change_color(w, X, Y, N=N, H=H, F=F, C=C, L=L) + X = Y + + +@nx._dispatchable(mutates_input=True) +def pad_graph(G, num_colors): + """Add a disconnected complete clique K_p such that the number of nodes in + the graph becomes a multiple of `num_colors`. + + Assumes that the graph's nodes are labelled using integers. + + Returns the number of nodes with each color. + """ + + n_ = len(G) + r = num_colors - 1 + + # Ensure that the number of nodes in G is a multiple of (r + 1) + s = n_ // (r + 1) + if n_ != s * (r + 1): + p = (r + 1) - n_ % (r + 1) + s += 1 + + # Complete graph K_p between (imaginary) nodes [n_, ... , n_ + p] + K = nx.relabel_nodes(nx.complete_graph(p), {idx: idx + n_ for idx in range(p)}) + G.add_edges_from(K.edges) + + return s + + +def procedure_P(V_minus, V_plus, N, H, F, C, L, excluded_colors=None): + """Procedure P as described in the paper.""" + + if excluded_colors is None: + excluded_colors = set() + + A_cal = set() + T_cal = {} + R_cal = [] + + # BFS to determine A_cal, i.e. colors reachable from V- + reachable = [V_minus] + marked = set(reachable) + idx = 0 + + while idx < len(reachable): + pop = reachable[idx] + idx += 1 + + A_cal.add(pop) + R_cal.append(pop) + + # TODO: Checking whether a color has been visited can be made faster by + # using a look-up table instead of testing for membership in a set by a + # logarithmic factor. + next_layer = [] + for k in C: + if ( + H[(k, pop)] > 0 + and k not in A_cal + and k not in excluded_colors + and k not in marked + ): + next_layer.append(k) + + for dst in next_layer: + # Record that `dst` can reach `pop` + T_cal[dst] = pop + + marked.update(next_layer) + reachable.extend(next_layer) + + # Variables for the algorithm + b = len(C) - len(A_cal) + + if V_plus in A_cal: + # Easy case: V+ is in A_cal + # Move one node from V+ to V- using T_cal to find the parents. + move_witnesses(V_plus, V_minus, N=N, H=H, F=F, C=C, T_cal=T_cal, L=L) + else: + # If there is a solo edge, we can resolve the situation by + # moving witnesses from B to A, making G[A] equitable and then + # recursively balancing G[B - w] with a different V_minus and + # but the same V_plus. + + A_0 = set() + A_cal_0 = set() + num_terminal_sets_found = 0 + made_equitable = False + + for W_1 in R_cal[::-1]: + for v in C[W_1]: + X = None + + for U in C: + if N[(v, U)] == 0 and U in A_cal and U != W_1: + X = U + + # v does not witness an edge in H[A_cal] + if X is None: + continue + + for U in C: + # Note: Departing from the paper here. + if N[(v, U)] >= 1 and U not in A_cal: + X_prime = U + w = v + + try: + # Finding the solo neighbor of w in X_prime + y = next( + node + for node in L[w] + if F[node] == X_prime and N[(node, W_1)] == 1 + ) + except StopIteration: + pass + else: + W = W_1 + + # Move w from W to X, now X has one extra node. + change_color(w, W, X, N=N, H=H, F=F, C=C, L=L) + + # Move witness from X to V_minus, making the coloring + # equitable. + move_witnesses( + src_color=X, + dst_color=V_minus, + N=N, + H=H, + F=F, + C=C, + T_cal=T_cal, + L=L, + ) + + # Move y from X_prime to W, making W the correct size. + change_color(y, X_prime, W, N=N, H=H, F=F, C=C, L=L) + + # Then call the procedure on G[B - y] + procedure_P( + V_minus=X_prime, + V_plus=V_plus, + N=N, + H=H, + C=C, + F=F, + L=L, + excluded_colors=excluded_colors.union(A_cal), + ) + made_equitable = True + break + + if made_equitable: + break + else: + # No node in W_1 was found such that + # it had a solo-neighbor. + A_cal_0.add(W_1) + A_0.update(C[W_1]) + num_terminal_sets_found += 1 + + if num_terminal_sets_found == b: + # Otherwise, construct the maximal independent set and find + # a pair of z_1, z_2 as in Case II. + + # BFS to determine B_cal': the set of colors reachable from V+ + B_cal_prime = set() + T_cal_prime = {} + + reachable = [V_plus] + marked = set(reachable) + idx = 0 + while idx < len(reachable): + pop = reachable[idx] + idx += 1 + + B_cal_prime.add(pop) + + # No need to check for excluded_colors here because + # they only exclude colors from A_cal + next_layer = [ + k + for k in C + if H[(pop, k)] > 0 and k not in B_cal_prime and k not in marked + ] + + for dst in next_layer: + T_cal_prime[pop] = dst + + marked.update(next_layer) + reachable.extend(next_layer) + + # Construct the independent set of G[B'] + I_set = set() + I_covered = set() + W_covering = {} + + B_prime = [node for k in B_cal_prime for node in C[k]] + + # Add the nodes in V_plus to I first. + for z in C[V_plus] + B_prime: + if z in I_covered or F[z] not in B_cal_prime: + continue + + I_set.add(z) + I_covered.add(z) + I_covered.update(list(L[z])) + + for w in L[z]: + if F[w] in A_cal_0 and N[(z, F[w])] == 1: + if w not in W_covering: + W_covering[w] = z + else: + # Found z1, z2 which have the same solo + # neighbor in some W + z_1 = W_covering[w] + # z_2 = z + + Z = F[z_1] + W = F[w] + + # shift nodes along W, V- + move_witnesses( + W, V_minus, N=N, H=H, F=F, C=C, T_cal=T_cal, L=L + ) + + # shift nodes along V+ to Z + move_witnesses( + V_plus, + Z, + N=N, + H=H, + F=F, + C=C, + T_cal=T_cal_prime, + L=L, + ) + + # change color of z_1 to W + change_color(z_1, Z, W, N=N, H=H, F=F, C=C, L=L) + + # change color of w to some color in B_cal + W_plus = next( + k for k in C if N[(w, k)] == 0 and k not in A_cal + ) + change_color(w, W, W_plus, N=N, H=H, F=F, C=C, L=L) + + # recurse with G[B \cup W*] + excluded_colors.update( + [k for k in C if k != W and k not in B_cal_prime] + ) + procedure_P( + V_minus=W, + V_plus=W_plus, + N=N, + H=H, + C=C, + F=F, + L=L, + excluded_colors=excluded_colors, + ) + + made_equitable = True + break + + if made_equitable: + break + else: + assert False, ( + "Must find a w which is the solo neighbor " + "of two vertices in B_cal_prime." + ) + + if made_equitable: + break + + +@nx._dispatchable +def equitable_color(G, num_colors): + """Provides an equitable coloring for nodes of `G`. + + Attempts to color a graph using `num_colors` colors, where no neighbors of + a node can have same color as the node itself and the number of nodes with + each color differ by at most 1. `num_colors` must be greater than the + maximum degree of `G`. The algorithm is described in [1]_ and has + complexity O(num_colors * n**2). + + Parameters + ---------- + G : networkX graph + The nodes of this graph will be colored. + + num_colors : number of colors to use + This number must be at least one more than the maximum degree of nodes + in the graph. + + Returns + ------- + A dictionary with keys representing nodes and values representing + corresponding coloring. + + Examples + -------- + >>> G = nx.cycle_graph(4) + >>> nx.coloring.equitable_color(G, num_colors=3) # doctest: +SKIP + {0: 2, 1: 1, 2: 2, 3: 0} + + Raises + ------ + NetworkXAlgorithmError + If `num_colors` is not at least the maximum degree of the graph `G` + + References + ---------- + .. [1] Kierstead, H. A., Kostochka, A. V., Mydlarz, M., & Szemerédi, E. + (2010). A fast algorithm for equitable coloring. Combinatorica, 30(2), + 217-224. + """ + + # Map nodes to integers for simplicity later. + nodes_to_int = {} + int_to_nodes = {} + + for idx, node in enumerate(G.nodes): + nodes_to_int[node] = idx + int_to_nodes[idx] = node + + G = nx.relabel_nodes(G, nodes_to_int, copy=True) + + # Basic graph statistics and sanity check. + if len(G.nodes) > 0: + r_ = max(G.degree(node) for node in G.nodes) + else: + r_ = 0 + + if r_ >= num_colors: + raise nx.NetworkXAlgorithmError( + f"Graph has maximum degree {r_}, needs " + f"{r_ + 1} (> {num_colors}) colors for guaranteed coloring." + ) + + # Ensure that the number of nodes in G is a multiple of (r + 1) + pad_graph(G, num_colors) + + # Starting the algorithm. + # L = {node: list(G.neighbors(node)) for node in G.nodes} + L_ = {node: [] for node in G.nodes} + + # Arbitrary equitable allocation of colors to nodes. + F = {node: idx % num_colors for idx, node in enumerate(G.nodes)} + + C = make_C_from_F(F) + + # The neighborhood is empty initially. + N = make_N_from_L_C(L_, C) + + # Currently all nodes witness all edges. + H = make_H_from_C_N(C, N) + + # Start of algorithm. + edges_seen = set() + + for u in sorted(G.nodes): + for v in sorted(G.neighbors(u)): + # Do not double count edges if (v, u) has already been seen. + if (v, u) in edges_seen: + continue + + edges_seen.add((u, v)) + + L_[u].append(v) + L_[v].append(u) + + N[(u, F[v])] += 1 + N[(v, F[u])] += 1 + + if F[u] != F[v]: + # Were 'u' and 'v' witnesses for F[u] -> F[v] or F[v] -> F[u]? + if N[(u, F[v])] == 1: + H[F[u], F[v]] -= 1 # u cannot witness an edge between F[u], F[v] + + if N[(v, F[u])] == 1: + H[F[v], F[u]] -= 1 # v cannot witness an edge between F[v], F[u] + + if N[(u, F[u])] != 0: + # Find the first color where 'u' does not have any neighbors. + Y = next(k for k in C if N[(u, k)] == 0) + X = F[u] + change_color(u, X, Y, N=N, H=H, F=F, C=C, L=L_) + + # Procedure P + procedure_P(V_minus=X, V_plus=Y, N=N, H=H, F=F, C=C, L=L_) + + return {int_to_nodes[x]: F[x] for x in int_to_nodes} diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/coloring/greedy_coloring.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/coloring/greedy_coloring.py new file mode 100644 index 0000000..311bc3a --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/coloring/greedy_coloring.py @@ -0,0 +1,565 @@ +""" +Greedy graph coloring using various strategies. +""" + +import itertools +from collections import defaultdict, deque + +import networkx as nx +from networkx.utils import arbitrary_element, py_random_state + +__all__ = [ + "greedy_color", + "strategy_connected_sequential", + "strategy_connected_sequential_bfs", + "strategy_connected_sequential_dfs", + "strategy_independent_set", + "strategy_largest_first", + "strategy_random_sequential", + "strategy_saturation_largest_first", + "strategy_smallest_last", +] + + +def strategy_largest_first(G, colors): + """Returns a list of the nodes of ``G`` in decreasing order by + degree. + + ``G`` is a NetworkX graph. ``colors`` is ignored. + + """ + return sorted(G, key=G.degree, reverse=True) + + +@py_random_state(2) +def strategy_random_sequential(G, colors, seed=None): + """Returns a random permutation of the nodes of ``G`` as a list. + + ``G`` is a NetworkX graph. ``colors`` is ignored. + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + """ + nodes = list(G) + seed.shuffle(nodes) + return nodes + + +def strategy_smallest_last(G, colors): + """Returns a deque of the nodes of ``G``, "smallest" last. + + Specifically, the degrees of each node are tracked in a bucket queue. + From this, the node of minimum degree is repeatedly popped from the + graph, updating its neighbors' degrees. + + ``G`` is a NetworkX graph. ``colors`` is ignored. + + This implementation of the strategy runs in $O(n + m)$ time + (ignoring polylogarithmic factors), where $n$ is the number of nodes + and $m$ is the number of edges. + + This strategy is related to :func:`strategy_independent_set`: if we + interpret each node removed as an independent set of size one, then + this strategy chooses an independent set of size one instead of a + maximal independent set. + + """ + H = G.copy() + result = deque() + + # Build initial degree list (i.e. the bucket queue data structure) + degrees = defaultdict(set) # set(), for fast random-access removals + lbound = float("inf") + for node, d in H.degree(): + degrees[d].add(node) + lbound = min(lbound, d) # Lower bound on min-degree. + + def find_min_degree(): + # Save time by starting the iterator at `lbound`, not 0. + # The value that we find will be our new `lbound`, which we set later. + return next(d for d in itertools.count(lbound) if d in degrees) + + for _ in G: + # Pop a min-degree node and add it to the list. + min_degree = find_min_degree() + u = degrees[min_degree].pop() + if not degrees[min_degree]: # Clean up the degree list. + del degrees[min_degree] + result.appendleft(u) + + # Update degrees of removed node's neighbors. + for v in H[u]: + degree = H.degree(v) + degrees[degree].remove(v) + if not degrees[degree]: # Clean up the degree list. + del degrees[degree] + degrees[degree - 1].add(v) + + # Finally, remove the node. + H.remove_node(u) + lbound = min_degree - 1 # Subtract 1 in case of tied neighbors. + + return result + + +def _maximal_independent_set(G): + """Returns a maximal independent set of nodes in ``G`` by repeatedly + choosing an independent node of minimum degree (with respect to the + subgraph of unchosen nodes). + + """ + result = set() + remaining = set(G) + while remaining: + G = G.subgraph(remaining) + v = min(remaining, key=G.degree) + result.add(v) + remaining -= set(G[v]) | {v} + return result + + +def strategy_independent_set(G, colors): + """Uses a greedy independent set removal strategy to determine the + colors. + + This function updates ``colors`` **in-place** and return ``None``, + unlike the other strategy functions in this module. + + This algorithm repeatedly finds and removes a maximal independent + set, assigning each node in the set an unused color. + + ``G`` is a NetworkX graph. + + This strategy is related to :func:`strategy_smallest_last`: in that + strategy, an independent set of size one is chosen at each step + instead of a maximal independent set. + + """ + remaining_nodes = set(G) + while len(remaining_nodes) > 0: + nodes = _maximal_independent_set(G.subgraph(remaining_nodes)) + remaining_nodes -= nodes + yield from nodes + + +def strategy_connected_sequential_bfs(G, colors): + """Returns an iterable over nodes in ``G`` in the order given by a + breadth-first traversal. + + The generated sequence has the property that for each node except + the first, at least one neighbor appeared earlier in the sequence. + + ``G`` is a NetworkX graph. ``colors`` is ignored. + + """ + return strategy_connected_sequential(G, colors, "bfs") + + +def strategy_connected_sequential_dfs(G, colors): + """Returns an iterable over nodes in ``G`` in the order given by a + depth-first traversal. + + The generated sequence has the property that for each node except + the first, at least one neighbor appeared earlier in the sequence. + + ``G`` is a NetworkX graph. ``colors`` is ignored. + + """ + return strategy_connected_sequential(G, colors, "dfs") + + +def strategy_connected_sequential(G, colors, traversal="bfs"): + """Returns an iterable over nodes in ``G`` in the order given by a + breadth-first or depth-first traversal. + + ``traversal`` must be one of the strings ``'dfs'`` or ``'bfs'``, + representing depth-first traversal or breadth-first traversal, + respectively. + + The generated sequence has the property that for each node except + the first, at least one neighbor appeared earlier in the sequence. + + ``G`` is a NetworkX graph. ``colors`` is ignored. + + """ + if traversal == "bfs": + traverse = nx.bfs_edges + elif traversal == "dfs": + traverse = nx.dfs_edges + else: + raise nx.NetworkXError( + "Please specify one of the strings 'bfs' or" + " 'dfs' for connected sequential ordering" + ) + for component in nx.connected_components(G): + source = arbitrary_element(component) + # Yield the source node, then all the nodes in the specified + # traversal order. + yield source + for _, end in traverse(G.subgraph(component), source): + yield end + + +def strategy_saturation_largest_first(G, colors): + """Iterates over all the nodes of ``G`` in "saturation order" (also + known as "DSATUR"). + + ``G`` is a NetworkX graph. ``colors`` is a dictionary mapping nodes of + ``G`` to colors, for those nodes that have already been colored. + + """ + distinct_colors = {v: set() for v in G} + + # Add the node color assignments given in colors to the + # distinct colors set for each neighbor of that node + for node, color in colors.items(): + for neighbor in G[node]: + distinct_colors[neighbor].add(color) + + # Check that the color assignments in colors are valid + # i.e. no neighboring nodes have the same color + if len(colors) >= 2: + for node, color in colors.items(): + if color in distinct_colors[node]: + raise nx.NetworkXError("Neighboring nodes must have different colors") + + # If 0 nodes have been colored, simply choose the node of highest degree. + if not colors: + node = max(G, key=G.degree) + yield node + # Add the color 0 to the distinct colors set for each + # neighbor of that node. + for v in G[node]: + distinct_colors[v].add(0) + + while len(G) != len(colors): + # Update the distinct color sets for the neighbors. + for node, color in colors.items(): + for neighbor in G[node]: + distinct_colors[neighbor].add(color) + + # Compute the maximum saturation and the set of nodes that + # achieve that saturation. + saturation = {v: len(c) for v, c in distinct_colors.items() if v not in colors} + # Yield the node with the highest saturation, and break ties by + # degree. + node = max(saturation, key=lambda v: (saturation[v], G.degree(v))) + yield node + + +#: Dictionary mapping name of a strategy as a string to the strategy function. +STRATEGIES = { + "largest_first": strategy_largest_first, + "random_sequential": strategy_random_sequential, + "smallest_last": strategy_smallest_last, + "independent_set": strategy_independent_set, + "connected_sequential_bfs": strategy_connected_sequential_bfs, + "connected_sequential_dfs": strategy_connected_sequential_dfs, + "connected_sequential": strategy_connected_sequential, + "saturation_largest_first": strategy_saturation_largest_first, + "DSATUR": strategy_saturation_largest_first, +} + + +@nx._dispatchable +def greedy_color(G, strategy="largest_first", interchange=False): + """Color a graph using various strategies of greedy graph coloring. + + Attempts to color a graph using as few colors as possible, where no + neighbors of a node can have same color as the node itself. The + given strategy determines the order in which nodes are colored. + + The strategies are described in [1]_, and smallest-last is based on + [2]_. + + Parameters + ---------- + G : NetworkX graph + + strategy : string or function(G, colors) + A function (or a string representing a function) that provides + the coloring strategy, by returning nodes in the ordering they + should be colored. ``G`` is the graph, and ``colors`` is a + dictionary of the currently assigned colors, keyed by nodes. The + function must return an iterable over all the nodes in ``G``. + + If the strategy function is an iterator generator (that is, a + function with ``yield`` statements), keep in mind that the + ``colors`` dictionary will be updated after each ``yield``, since + this function chooses colors greedily. + + If ``strategy`` is a string, it must be one of the following, + each of which represents one of the built-in strategy functions. + + * ``'largest_first'`` + * ``'random_sequential'`` + * ``'smallest_last'`` + * ``'independent_set'`` + * ``'connected_sequential_bfs'`` + * ``'connected_sequential_dfs'`` + * ``'connected_sequential'`` (alias for the previous strategy) + * ``'saturation_largest_first'`` + * ``'DSATUR'`` (alias for the previous strategy) + + interchange: bool + Will use the color interchange algorithm described by [3]_ if set + to ``True``. + + Note that ``saturation_largest_first`` and ``independent_set`` + do not work with interchange. Furthermore, if you use + interchange with your own strategy function, you cannot rely + on the values in the ``colors`` argument. + + Returns + ------- + A dictionary with keys representing nodes and values representing + corresponding coloring. + + Examples + -------- + >>> G = nx.cycle_graph(4) + >>> d = nx.coloring.greedy_color(G, strategy="largest_first") + >>> d in [{0: 0, 1: 1, 2: 0, 3: 1}, {0: 1, 1: 0, 2: 1, 3: 0}] + True + + Raises + ------ + NetworkXPointlessConcept + If ``strategy`` is ``saturation_largest_first`` or + ``independent_set`` and ``interchange`` is ``True``. + + References + ---------- + .. [1] Adrian Kosowski, and Krzysztof Manuszewski, + Classical Coloring of Graphs, Graph Colorings, 2-19, 2004. + ISBN 0-8218-3458-4. + .. [2] David W. Matula, and Leland L. Beck, "Smallest-last + ordering and clustering and graph coloring algorithms." *J. ACM* 30, + 3 (July 1983), 417–427. + .. [3] Maciej M. Sysło, Narsingh Deo, Janusz S. Kowalik, + Discrete Optimization Algorithms with Pascal Programs, 415-424, 1983. + ISBN 0-486-45353-7. + + """ + if len(G) == 0: + return {} + # Determine the strategy provided by the caller. + strategy = STRATEGIES.get(strategy, strategy) + if not callable(strategy): + raise nx.NetworkXError( + f"strategy must be callable or a valid string. {strategy} not valid." + ) + # Perform some validation on the arguments before executing any + # strategy functions. + if interchange: + if strategy is strategy_independent_set: + msg = "interchange cannot be used with independent_set" + raise nx.NetworkXPointlessConcept(msg) + if strategy is strategy_saturation_largest_first: + msg = "interchange cannot be used with saturation_largest_first" + raise nx.NetworkXPointlessConcept(msg) + colors = {} + nodes = strategy(G, colors) + if interchange: + return _greedy_coloring_with_interchange(G, nodes) + for u in nodes: + # Set to keep track of colors of neighbors + nbr_colors = {colors[v] for v in G[u] if v in colors} + # Find the first unused color. + for color in itertools.count(): + if color not in nbr_colors: + break + # Assign the new color to the current node. + colors[u] = color + return colors + + +# Tools for coloring with interchanges +class _Node: + __slots__ = ["node_id", "color", "adj_list", "adj_color"] + + def __init__(self, node_id, n): + self.node_id = node_id + self.color = -1 + self.adj_list = None + self.adj_color = [None for _ in range(n)] + + def __repr__(self): + return ( + f"Node_id: {self.node_id}, Color: {self.color}, " + f"Adj_list: ({self.adj_list}), adj_color: ({self.adj_color})" + ) + + def assign_color(self, adj_entry, color): + adj_entry.col_prev = None + adj_entry.col_next = self.adj_color[color] + self.adj_color[color] = adj_entry + if adj_entry.col_next is not None: + adj_entry.col_next.col_prev = adj_entry + + def clear_color(self, adj_entry, color): + if adj_entry.col_prev is None: + self.adj_color[color] = adj_entry.col_next + else: + adj_entry.col_prev.col_next = adj_entry.col_next + if adj_entry.col_next is not None: + adj_entry.col_next.col_prev = adj_entry.col_prev + + def iter_neighbors(self): + adj_node = self.adj_list + while adj_node is not None: + yield adj_node + adj_node = adj_node.next + + def iter_neighbors_color(self, color): + adj_color_node = self.adj_color[color] + while adj_color_node is not None: + yield adj_color_node.node_id + adj_color_node = adj_color_node.col_next + + +class _AdjEntry: + __slots__ = ["node_id", "next", "mate", "col_next", "col_prev"] + + def __init__(self, node_id): + self.node_id = node_id + self.next = None + self.mate = None + self.col_next = None + self.col_prev = None + + def __repr__(self): + col_next = None if self.col_next is None else self.col_next.node_id + col_prev = None if self.col_prev is None else self.col_prev.node_id + return ( + f"Node_id: {self.node_id}, Next: ({self.next}), " + f"Mate: ({self.mate.node_id}), " + f"col_next: ({col_next}), col_prev: ({col_prev})" + ) + + +def _greedy_coloring_with_interchange(G, nodes): + """Return a coloring for `original_graph` using interchange approach + + This procedure is an adaption of the algorithm described by [1]_, + and is an implementation of coloring with interchange. Please be + advised, that the datastructures used are rather complex because + they are optimized to minimize the time spent identifying + subcomponents of the graph, which are possible candidates for color + interchange. + + Parameters + ---------- + G : NetworkX graph + The graph to be colored + + nodes : list + nodes ordered using the strategy of choice + + Returns + ------- + dict : + A dictionary keyed by node to a color value + + References + ---------- + .. [1] Maciej M. Syslo, Narsingh Deo, Janusz S. Kowalik, + Discrete Optimization Algorithms with Pascal Programs, 415-424, 1983. + ISBN 0-486-45353-7. + """ + n = len(G) + + graph = {node: _Node(node, n) for node in G} + + for node1, node2 in G.edges(): + adj_entry1 = _AdjEntry(node2) + adj_entry2 = _AdjEntry(node1) + adj_entry1.mate = adj_entry2 + adj_entry2.mate = adj_entry1 + node1_head = graph[node1].adj_list + adj_entry1.next = node1_head + graph[node1].adj_list = adj_entry1 + node2_head = graph[node2].adj_list + adj_entry2.next = node2_head + graph[node2].adj_list = adj_entry2 + + k = 0 + for node in nodes: + # Find the smallest possible, unused color + neighbors = graph[node].iter_neighbors() + col_used = {graph[adj_node.node_id].color for adj_node in neighbors} + col_used.discard(-1) + k1 = next(itertools.dropwhile(lambda x: x in col_used, itertools.count())) + + # k1 is now the lowest available color + if k1 > k: + connected = True + visited = set() + col1 = -1 + col2 = -1 + while connected and col1 < k: + col1 += 1 + neighbor_cols = graph[node].iter_neighbors_color(col1) + col1_adj = list(neighbor_cols) + + col2 = col1 + while connected and col2 < k: + col2 += 1 + visited = set(col1_adj) + frontier = list(col1_adj) + i = 0 + while i < len(frontier): + search_node = frontier[i] + i += 1 + col_opp = col2 if graph[search_node].color == col1 else col1 + neighbor_cols = graph[search_node].iter_neighbors_color(col_opp) + + for neighbor in neighbor_cols: + if neighbor not in visited: + visited.add(neighbor) + frontier.append(neighbor) + + # Search if node is not adj to any col2 vertex + connected = ( + len( + visited.intersection(graph[node].iter_neighbors_color(col2)) + ) + > 0 + ) + + # If connected is false then we can swap !!! + if not connected: + # Update all the nodes in the component + for search_node in visited: + graph[search_node].color = ( + col2 if graph[search_node].color == col1 else col1 + ) + col2_adj = graph[search_node].adj_color[col2] + graph[search_node].adj_color[col2] = graph[search_node].adj_color[ + col1 + ] + graph[search_node].adj_color[col1] = col2_adj + + # Update all the neighboring nodes + for search_node in visited: + col = graph[search_node].color + col_opp = col1 if col == col2 else col2 + for adj_node in graph[search_node].iter_neighbors(): + if graph[adj_node.node_id].color != col_opp: + # Direct reference to entry + adj_mate = adj_node.mate + graph[adj_node.node_id].clear_color(adj_mate, col_opp) + graph[adj_node.node_id].assign_color(adj_mate, col) + k1 = col1 + + # We can color this node color k1 + graph[node].color = k1 + k = max(k1, k) + + # Update the neighbors of this node + for adj_node in graph[node].iter_neighbors(): + adj_mate = adj_node.mate + graph[adj_node.node_id].assign_color(adj_mate, k1) + + return {node.node_id: node.color for node in graph.values()} diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/coloring/tests/__init__.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/coloring/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/coloring/tests/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/coloring/tests/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..df32ca1 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/coloring/tests/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/coloring/tests/__pycache__/test_coloring.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/coloring/tests/__pycache__/test_coloring.cpython-312.pyc new file mode 100644 index 0000000..2bb34f0 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/coloring/tests/__pycache__/test_coloring.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/coloring/tests/test_coloring.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/coloring/tests/test_coloring.py new file mode 100644 index 0000000..1e5a913 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/coloring/tests/test_coloring.py @@ -0,0 +1,863 @@ +"""Greedy coloring test suite.""" + +import itertools + +import pytest + +import networkx as nx + +is_coloring = nx.algorithms.coloring.equitable_coloring.is_coloring +is_equitable = nx.algorithms.coloring.equitable_coloring.is_equitable + + +ALL_STRATEGIES = [ + "largest_first", + "random_sequential", + "smallest_last", + "independent_set", + "connected_sequential_bfs", + "connected_sequential_dfs", + "connected_sequential", + "saturation_largest_first", + "DSATUR", +] + +# List of strategies where interchange=True results in an error +INTERCHANGE_INVALID = ["independent_set", "saturation_largest_first", "DSATUR"] + + +class TestColoring: + def test_basic_cases(self): + def check_basic_case(graph_func, n_nodes, strategy, interchange): + graph = graph_func() + coloring = nx.coloring.greedy_color( + graph, strategy=strategy, interchange=interchange + ) + assert verify_length(coloring, n_nodes) + assert verify_coloring(graph, coloring) + + for graph_func, n_nodes in BASIC_TEST_CASES.items(): + for interchange in [True, False]: + for strategy in ALL_STRATEGIES: + check_basic_case(graph_func, n_nodes, strategy, False) + if strategy not in INTERCHANGE_INVALID: + check_basic_case(graph_func, n_nodes, strategy, True) + + def test_special_cases(self): + def check_special_case(strategy, graph_func, interchange, colors): + graph = graph_func() + coloring = nx.coloring.greedy_color( + graph, strategy=strategy, interchange=interchange + ) + if not hasattr(colors, "__len__"): + colors = [colors] + assert any(verify_length(coloring, n_colors) for n_colors in colors) + assert verify_coloring(graph, coloring) + + for strategy, arglist in SPECIAL_TEST_CASES.items(): + for args in arglist: + check_special_case(strategy, args[0], args[1], args[2]) + + def test_interchange_invalid(self): + graph = one_node_graph() + for strategy in INTERCHANGE_INVALID: + pytest.raises( + nx.NetworkXPointlessConcept, + nx.coloring.greedy_color, + graph, + strategy=strategy, + interchange=True, + ) + + def test_bad_inputs(self): + graph = one_node_graph() + pytest.raises( + nx.NetworkXError, + nx.coloring.greedy_color, + graph, + strategy="invalid strategy", + ) + + def test_strategy_as_function(self): + graph = lf_shc() + colors_1 = nx.coloring.greedy_color(graph, "largest_first") + colors_2 = nx.coloring.greedy_color(graph, nx.coloring.strategy_largest_first) + assert colors_1 == colors_2 + + def test_seed_argument(self): + graph = lf_shc() + rs = nx.coloring.strategy_random_sequential + c1 = nx.coloring.greedy_color(graph, lambda g, c: rs(g, c, seed=1)) + for u, v in graph.edges: + assert c1[u] != c1[v] + + def test_is_coloring(self): + G = nx.Graph() + G.add_edges_from([(0, 1), (1, 2)]) + coloring = {0: 0, 1: 1, 2: 0} + assert is_coloring(G, coloring) + + coloring[0] = 1 + assert not is_coloring(G, coloring) + assert not is_equitable(G, coloring) + + def test_is_equitable(self): + G = nx.Graph() + G.add_edges_from([(0, 1), (1, 2)]) + coloring = {0: 0, 1: 1, 2: 0} + assert is_equitable(G, coloring) + + G.add_edges_from([(2, 3), (2, 4), (2, 5)]) + coloring[3] = 1 + coloring[4] = 1 + coloring[5] = 1 + assert is_coloring(G, coloring) + assert not is_equitable(G, coloring) + + def test_num_colors(self): + G = nx.Graph() + G.add_edges_from([(0, 1), (0, 2), (0, 3)]) + pytest.raises(nx.NetworkXAlgorithmError, nx.coloring.equitable_color, G, 2) + + def test_equitable_color(self): + G = nx.fast_gnp_random_graph(n=10, p=0.2, seed=42) + coloring = nx.coloring.equitable_color(G, max_degree(G) + 1) + assert is_equitable(G, coloring) + + def test_equitable_color_empty(self): + G = nx.empty_graph() + coloring = nx.coloring.equitable_color(G, max_degree(G) + 1) + assert is_equitable(G, coloring) + + def test_equitable_color_large(self): + G = nx.fast_gnp_random_graph(100, 0.1, seed=42) + coloring = nx.coloring.equitable_color(G, max_degree(G) + 1) + assert is_equitable(G, coloring, num_colors=max_degree(G) + 1) + + def test_case_V_plus_not_in_A_cal(self): + # Hand crafted case to avoid the easy case. + L = { + 0: [2, 5], + 1: [3, 4], + 2: [0, 8], + 3: [1, 7], + 4: [1, 6], + 5: [0, 6], + 6: [4, 5], + 7: [3], + 8: [2], + } + + F = { + # Color 0 + 0: 0, + 1: 0, + # Color 1 + 2: 1, + 3: 1, + 4: 1, + 5: 1, + # Color 2 + 6: 2, + 7: 2, + 8: 2, + } + + C = nx.algorithms.coloring.equitable_coloring.make_C_from_F(F) + N = nx.algorithms.coloring.equitable_coloring.make_N_from_L_C(L, C) + H = nx.algorithms.coloring.equitable_coloring.make_H_from_C_N(C, N) + + nx.algorithms.coloring.equitable_coloring.procedure_P( + V_minus=0, V_plus=1, N=N, H=H, F=F, C=C, L=L + ) + check_state(L=L, N=N, H=H, F=F, C=C) + + def test_cast_no_solo(self): + L = { + 0: [8, 9], + 1: [10, 11], + 2: [8], + 3: [9], + 4: [10, 11], + 5: [8], + 6: [9], + 7: [10, 11], + 8: [0, 2, 5], + 9: [0, 3, 6], + 10: [1, 4, 7], + 11: [1, 4, 7], + } + + F = {0: 0, 1: 0, 2: 2, 3: 2, 4: 2, 5: 3, 6: 3, 7: 3, 8: 1, 9: 1, 10: 1, 11: 1} + + C = nx.algorithms.coloring.equitable_coloring.make_C_from_F(F) + N = nx.algorithms.coloring.equitable_coloring.make_N_from_L_C(L, C) + H = nx.algorithms.coloring.equitable_coloring.make_H_from_C_N(C, N) + + nx.algorithms.coloring.equitable_coloring.procedure_P( + V_minus=0, V_plus=1, N=N, H=H, F=F, C=C, L=L + ) + check_state(L=L, N=N, H=H, F=F, C=C) + + def test_hard_prob(self): + # Tests for two levels of recursion. + num_colors, s = 5, 5 + + G = nx.Graph() + G.add_edges_from( + [ + (0, 10), + (0, 11), + (0, 12), + (0, 23), + (10, 4), + (10, 9), + (10, 20), + (11, 4), + (11, 8), + (11, 16), + (12, 9), + (12, 22), + (12, 23), + (23, 7), + (1, 17), + (1, 18), + (1, 19), + (1, 24), + (17, 5), + (17, 13), + (17, 22), + (18, 5), + (19, 5), + (19, 6), + (19, 8), + (24, 7), + (24, 16), + (2, 4), + (2, 13), + (2, 14), + (2, 15), + (4, 6), + (13, 5), + (13, 21), + (14, 6), + (14, 15), + (15, 6), + (15, 21), + (3, 16), + (3, 20), + (3, 21), + (3, 22), + (16, 8), + (20, 8), + (21, 9), + (22, 7), + ] + ) + F = {node: node // s for node in range(num_colors * s)} + F[s - 1] = num_colors - 1 + + params = make_params_from_graph(G=G, F=F) + + nx.algorithms.coloring.equitable_coloring.procedure_P( + V_minus=0, V_plus=num_colors - 1, **params + ) + check_state(**params) + + def test_hardest_prob(self): + # Tests for two levels of recursion. + num_colors, s = 10, 4 + + G = nx.Graph() + G.add_edges_from( + [ + (0, 19), + (0, 24), + (0, 29), + (0, 30), + (0, 35), + (19, 3), + (19, 7), + (19, 9), + (19, 15), + (19, 21), + (19, 24), + (19, 30), + (19, 38), + (24, 5), + (24, 11), + (24, 13), + (24, 20), + (24, 30), + (24, 37), + (24, 38), + (29, 6), + (29, 10), + (29, 13), + (29, 15), + (29, 16), + (29, 17), + (29, 20), + (29, 26), + (30, 6), + (30, 10), + (30, 15), + (30, 22), + (30, 23), + (30, 39), + (35, 6), + (35, 9), + (35, 14), + (35, 18), + (35, 22), + (35, 23), + (35, 25), + (35, 27), + (1, 20), + (1, 26), + (1, 31), + (1, 34), + (1, 38), + (20, 4), + (20, 8), + (20, 14), + (20, 18), + (20, 28), + (20, 33), + (26, 7), + (26, 10), + (26, 14), + (26, 18), + (26, 21), + (26, 32), + (26, 39), + (31, 5), + (31, 8), + (31, 13), + (31, 16), + (31, 17), + (31, 21), + (31, 25), + (31, 27), + (34, 7), + (34, 8), + (34, 13), + (34, 18), + (34, 22), + (34, 23), + (34, 25), + (34, 27), + (38, 4), + (38, 9), + (38, 12), + (38, 14), + (38, 21), + (38, 27), + (2, 3), + (2, 18), + (2, 21), + (2, 28), + (2, 32), + (2, 33), + (2, 36), + (2, 37), + (2, 39), + (3, 5), + (3, 9), + (3, 13), + (3, 22), + (3, 23), + (3, 25), + (3, 27), + (18, 6), + (18, 11), + (18, 15), + (18, 39), + (21, 4), + (21, 10), + (21, 14), + (21, 36), + (28, 6), + (28, 10), + (28, 14), + (28, 16), + (28, 17), + (28, 25), + (28, 27), + (32, 5), + (32, 10), + (32, 12), + (32, 16), + (32, 17), + (32, 22), + (32, 23), + (33, 7), + (33, 10), + (33, 12), + (33, 16), + (33, 17), + (33, 25), + (33, 27), + (36, 5), + (36, 8), + (36, 15), + (36, 16), + (36, 17), + (36, 25), + (36, 27), + (37, 5), + (37, 11), + (37, 15), + (37, 16), + (37, 17), + (37, 22), + (37, 23), + (39, 7), + (39, 8), + (39, 15), + (39, 22), + (39, 23), + ] + ) + F = {node: node // s for node in range(num_colors * s)} + F[s - 1] = num_colors - 1 # V- = 0, V+ = num_colors - 1 + + params = make_params_from_graph(G=G, F=F) + + nx.algorithms.coloring.equitable_coloring.procedure_P( + V_minus=0, V_plus=num_colors - 1, **params + ) + check_state(**params) + + def test_strategy_saturation_largest_first(self): + def color_remaining_nodes( + G, + colored_nodes, + full_color_assignment=None, + nodes_to_add_between_calls=1, + ): + color_assignments = [] + aux_colored_nodes = colored_nodes.copy() + + node_iterator = nx.algorithms.coloring.greedy_coloring.strategy_saturation_largest_first( + G, aux_colored_nodes + ) + + for u in node_iterator: + # Set to keep track of colors of neighbors + nbr_colors = { + aux_colored_nodes[v] for v in G[u] if v in aux_colored_nodes + } + # Find the first unused color. + for color in itertools.count(): + if color not in nbr_colors: + break + aux_colored_nodes[u] = color + color_assignments.append((u, color)) + + # Color nodes between iterations + for i in range(nodes_to_add_between_calls - 1): + if not len(color_assignments) + len(colored_nodes) >= len( + full_color_assignment + ): + full_color_assignment_node, color = full_color_assignment[ + len(color_assignments) + len(colored_nodes) + ] + + # Assign the new color to the current node. + aux_colored_nodes[full_color_assignment_node] = color + color_assignments.append((full_color_assignment_node, color)) + + return color_assignments, aux_colored_nodes + + for G, _, _ in SPECIAL_TEST_CASES["saturation_largest_first"]: + G = G() + + # Check that function still works when nodes are colored between iterations + for nodes_to_add_between_calls in range(1, 5): + # Get a full color assignment, (including the order in which nodes were colored) + colored_nodes = {} + full_color_assignment, full_colored_nodes = color_remaining_nodes( + G, colored_nodes + ) + + # For each node in the color assignment, add it to colored_nodes and re-run the function + for ind, (node, color) in enumerate(full_color_assignment): + colored_nodes[node] = color + + ( + partial_color_assignment, + partial_colored_nodes, + ) = color_remaining_nodes( + G, + colored_nodes, + full_color_assignment=full_color_assignment, + nodes_to_add_between_calls=nodes_to_add_between_calls, + ) + + # Check that the color assignment and order of remaining nodes are the same + assert full_color_assignment[ind + 1 :] == partial_color_assignment + assert full_colored_nodes == partial_colored_nodes + + +# ############################ Utility functions ############################ +def verify_coloring(graph, coloring): + for node in graph.nodes(): + if node not in coloring: + return False + + color = coloring[node] + for neighbor in graph.neighbors(node): + if coloring[neighbor] == color: + return False + + return True + + +def verify_length(coloring, expected): + coloring = dict_to_sets(coloring) + return len(coloring) == expected + + +def dict_to_sets(colors): + if len(colors) == 0: + return [] + + k = max(colors.values()) + 1 + sets = [set() for _ in range(k)] + + for node, color in colors.items(): + sets[color].add(node) + + return sets + + +# ############################ Graph Generation ############################ + + +def empty_graph(): + return nx.Graph() + + +def one_node_graph(): + graph = nx.Graph() + graph.add_nodes_from([1]) + return graph + + +def two_node_graph(): + graph = nx.Graph() + graph.add_nodes_from([1, 2]) + graph.add_edges_from([(1, 2)]) + return graph + + +def three_node_clique(): + graph = nx.Graph() + graph.add_nodes_from([1, 2, 3]) + graph.add_edges_from([(1, 2), (1, 3), (2, 3)]) + return graph + + +def disconnected(): + graph = nx.Graph() + graph.add_edges_from([(1, 2), (2, 3), (4, 5), (5, 6)]) + return graph + + +def rs_shc(): + graph = nx.Graph() + graph.add_nodes_from([1, 2, 3, 4]) + graph.add_edges_from([(1, 2), (2, 3), (3, 4)]) + return graph + + +def slf_shc(): + graph = nx.Graph() + graph.add_nodes_from([1, 2, 3, 4, 5, 6, 7]) + graph.add_edges_from( + [(1, 2), (1, 5), (1, 6), (2, 3), (2, 7), (3, 4), (3, 7), (4, 5), (4, 6), (5, 6)] + ) + return graph + + +def slf_hc(): + graph = nx.Graph() + graph.add_nodes_from([1, 2, 3, 4, 5, 6, 7, 8]) + graph.add_edges_from( + [ + (1, 2), + (1, 3), + (1, 4), + (1, 5), + (2, 3), + (2, 4), + (2, 6), + (5, 7), + (5, 8), + (6, 7), + (6, 8), + (7, 8), + ] + ) + return graph + + +def lf_shc(): + graph = nx.Graph() + graph.add_nodes_from([1, 2, 3, 4, 5, 6]) + graph.add_edges_from([(6, 1), (1, 4), (4, 3), (3, 2), (2, 5)]) + return graph + + +def lf_hc(): + graph = nx.Graph() + graph.add_nodes_from([1, 2, 3, 4, 5, 6, 7]) + graph.add_edges_from( + [ + (1, 7), + (1, 6), + (1, 3), + (1, 4), + (7, 2), + (2, 6), + (2, 3), + (2, 5), + (5, 3), + (5, 4), + (4, 3), + ] + ) + return graph + + +def sl_shc(): + graph = nx.Graph() + graph.add_nodes_from([1, 2, 3, 4, 5, 6]) + graph.add_edges_from( + [(1, 2), (1, 3), (2, 3), (1, 4), (2, 5), (3, 6), (4, 5), (4, 6), (5, 6)] + ) + return graph + + +def sl_hc(): + graph = nx.Graph() + graph.add_nodes_from([1, 2, 3, 4, 5, 6, 7, 8]) + graph.add_edges_from( + [ + (1, 2), + (1, 3), + (1, 5), + (1, 7), + (2, 3), + (2, 4), + (2, 8), + (8, 4), + (8, 6), + (8, 7), + (7, 5), + (7, 6), + (3, 4), + (4, 6), + (6, 5), + (5, 3), + ] + ) + return graph + + +def gis_shc(): + graph = nx.Graph() + graph.add_nodes_from([1, 2, 3, 4]) + graph.add_edges_from([(1, 2), (2, 3), (3, 4)]) + return graph + + +def gis_hc(): + graph = nx.Graph() + graph.add_nodes_from([1, 2, 3, 4, 5, 6]) + graph.add_edges_from([(1, 5), (2, 5), (3, 6), (4, 6), (5, 6)]) + return graph + + +def cs_shc(): + graph = nx.Graph() + graph.add_nodes_from([1, 2, 3, 4, 5]) + graph.add_edges_from([(1, 2), (1, 5), (2, 3), (2, 4), (2, 5), (3, 4), (4, 5)]) + return graph + + +def rsi_shc(): + graph = nx.Graph() + graph.add_nodes_from([1, 2, 3, 4, 5, 6]) + graph.add_edges_from( + [(1, 2), (1, 5), (1, 6), (2, 3), (3, 4), (4, 5), (4, 6), (5, 6)] + ) + return graph + + +def lfi_shc(): + graph = nx.Graph() + graph.add_nodes_from([1, 2, 3, 4, 5, 6, 7]) + graph.add_edges_from( + [(1, 2), (1, 5), (1, 6), (2, 3), (2, 7), (3, 4), (3, 7), (4, 5), (4, 6), (5, 6)] + ) + return graph + + +def lfi_hc(): + graph = nx.Graph() + graph.add_nodes_from([1, 2, 3, 4, 5, 6, 7, 8, 9]) + graph.add_edges_from( + [ + (1, 2), + (1, 5), + (1, 6), + (1, 7), + (2, 3), + (2, 8), + (2, 9), + (3, 4), + (3, 8), + (3, 9), + (4, 5), + (4, 6), + (4, 7), + (5, 6), + ] + ) + return graph + + +def sli_shc(): + graph = nx.Graph() + graph.add_nodes_from([1, 2, 3, 4, 5, 6, 7]) + graph.add_edges_from( + [ + (1, 2), + (1, 3), + (1, 5), + (1, 7), + (2, 3), + (2, 6), + (3, 4), + (4, 5), + (4, 6), + (5, 7), + (6, 7), + ] + ) + return graph + + +def sli_hc(): + graph = nx.Graph() + graph.add_nodes_from([1, 2, 3, 4, 5, 6, 7, 8, 9]) + graph.add_edges_from( + [ + (1, 2), + (1, 3), + (1, 4), + (1, 5), + (2, 3), + (2, 7), + (2, 8), + (2, 9), + (3, 6), + (3, 7), + (3, 9), + (4, 5), + (4, 6), + (4, 8), + (4, 9), + (5, 6), + (5, 7), + (5, 8), + (6, 7), + (6, 9), + (7, 8), + (8, 9), + ] + ) + return graph + + +# -------------------------------------------------------------------------- +# Basic tests for all strategies +# For each basic graph function, specify the number of expected colors. +BASIC_TEST_CASES = { + empty_graph: 0, + one_node_graph: 1, + two_node_graph: 2, + disconnected: 2, + three_node_clique: 3, +} + + +# -------------------------------------------------------------------------- +# Special test cases. Each strategy has a list of tuples of the form +# (graph function, interchange, valid # of colors) +SPECIAL_TEST_CASES = { + "random_sequential": [ + (rs_shc, False, (2, 3)), + (rs_shc, True, 2), + (rsi_shc, True, (3, 4)), + ], + "saturation_largest_first": [(slf_shc, False, (3, 4)), (slf_hc, False, 4)], + "largest_first": [ + (lf_shc, False, (2, 3)), + (lf_hc, False, 4), + (lf_shc, True, 2), + (lf_hc, True, 3), + (lfi_shc, True, (3, 4)), + (lfi_hc, True, 4), + ], + "smallest_last": [ + (sl_shc, False, (3, 4)), + (sl_hc, False, 5), + (sl_shc, True, 3), + (sl_hc, True, 4), + (sli_shc, True, (3, 4)), + (sli_hc, True, 5), + ], + "independent_set": [(gis_shc, False, (2, 3)), (gis_hc, False, 3)], + "connected_sequential": [(cs_shc, False, (3, 4)), (cs_shc, True, 3)], + "connected_sequential_dfs": [(cs_shc, False, (3, 4))], +} + + +# -------------------------------------------------------------------------- +# Helper functions to test +# (graph function, interchange, valid # of colors) + + +def check_state(L, N, H, F, C): + s = len(C[0]) + num_colors = len(C.keys()) + + assert all(u in L[v] for u in L for v in L[u]) + assert all(F[u] != F[v] for u in L for v in L[u]) + assert all(len(L[u]) < num_colors for u in L) + assert all(len(C[x]) == s for x in C) + assert all(H[(c1, c2)] >= 0 for c1 in C for c2 in C) + assert all(N[(u, F[u])] == 0 for u in F) + + +def max_degree(G): + """Get the maximum degree of any node in G.""" + return max(G.degree(node) for node in G.nodes) if len(G.nodes) > 0 else 0 + + +def make_params_from_graph(G, F): + """Returns {N, L, H, C} from the given graph.""" + num_nodes = len(G) + L = {u: [] for u in range(num_nodes)} + for u, v in G.edges: + L[u].append(v) + L[v].append(u) + + C = nx.algorithms.coloring.equitable_coloring.make_C_from_F(F) + N = nx.algorithms.coloring.equitable_coloring.make_N_from_L_C(L, C) + H = nx.algorithms.coloring.equitable_coloring.make_H_from_C_N(C, N) + + return {"N": N, "F": F, "C": C, "H": H, "L": L} diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/communicability_alg.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/communicability_alg.py new file mode 100644 index 0000000..dea156b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/communicability_alg.py @@ -0,0 +1,163 @@ +""" +Communicability. +""" + +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = ["communicability", "communicability_exp"] + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatchable +def communicability(G): + r"""Returns communicability between all pairs of nodes in G. + + The communicability between pairs of nodes in G is the sum of + walks of different lengths starting at node u and ending at node v. + + Parameters + ---------- + G: graph + + Returns + ------- + comm: dictionary of dictionaries + Dictionary of dictionaries keyed by nodes with communicability + as the value. + + Raises + ------ + NetworkXError + If the graph is not undirected and simple. + + See Also + -------- + communicability_exp: + Communicability between all pairs of nodes in G using spectral + decomposition. + communicability_betweenness_centrality: + Communicability betweenness centrality for each node in G. + + Notes + ----- + This algorithm uses a spectral decomposition of the adjacency matrix. + Let G=(V,E) be a simple undirected graph. Using the connection between + the powers of the adjacency matrix and the number of walks in the graph, + the communicability between nodes `u` and `v` based on the graph spectrum + is [1]_ + + .. math:: + C(u,v)=\sum_{j=1}^{n}\phi_{j}(u)\phi_{j}(v)e^{\lambda_{j}}, + + where `\phi_{j}(u)` is the `u\rm{th}` element of the `j\rm{th}` orthonormal + eigenvector of the adjacency matrix associated with the eigenvalue + `\lambda_{j}`. + + References + ---------- + .. [1] Ernesto Estrada, Naomichi Hatano, + "Communicability in complex networks", + Phys. Rev. E 77, 036111 (2008). + https://arxiv.org/abs/0707.0756 + + Examples + -------- + >>> G = nx.Graph([(0, 1), (1, 2), (1, 5), (5, 4), (2, 4), (2, 3), (4, 3), (3, 6)]) + >>> c = nx.communicability(G) + """ + import numpy as np + + nodelist = list(G) # ordering of nodes in matrix + A = nx.to_numpy_array(G, nodelist) + # convert to 0-1 matrix + A[A != 0.0] = 1 + w, vec = np.linalg.eigh(A) + expw = np.exp(w) + mapping = dict(zip(nodelist, range(len(nodelist)))) + c = {} + # computing communicabilities + for u in G: + c[u] = {} + for v in G: + s = 0 + p = mapping[u] + q = mapping[v] + for j in range(len(nodelist)): + s += vec[:, j][p] * vec[:, j][q] * expw[j] + c[u][v] = float(s) + return c + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatchable +def communicability_exp(G): + r"""Returns communicability between all pairs of nodes in G. + + Communicability between pair of node (u,v) of node in G is the sum of + walks of different lengths starting at node u and ending at node v. + + Parameters + ---------- + G: graph + + Returns + ------- + comm: dictionary of dictionaries + Dictionary of dictionaries keyed by nodes with communicability + as the value. + + Raises + ------ + NetworkXError + If the graph is not undirected and simple. + + See Also + -------- + communicability: + Communicability between pairs of nodes in G. + communicability_betweenness_centrality: + Communicability betweenness centrality for each node in G. + + Notes + ----- + This algorithm uses matrix exponentiation of the adjacency matrix. + + Let G=(V,E) be a simple undirected graph. Using the connection between + the powers of the adjacency matrix and the number of walks in the graph, + the communicability between nodes u and v is [1]_, + + .. math:: + C(u,v) = (e^A)_{uv}, + + where `A` is the adjacency matrix of G. + + References + ---------- + .. [1] Ernesto Estrada, Naomichi Hatano, + "Communicability in complex networks", + Phys. Rev. E 77, 036111 (2008). + https://arxiv.org/abs/0707.0756 + + Examples + -------- + >>> G = nx.Graph([(0, 1), (1, 2), (1, 5), (5, 4), (2, 4), (2, 3), (4, 3), (3, 6)]) + >>> c = nx.communicability_exp(G) + """ + import scipy as sp + + nodelist = list(G) # ordering of nodes in matrix + A = nx.to_numpy_array(G, nodelist) + # convert to 0-1 matrix + A[A != 0.0] = 1 + # communicability matrix + expA = sp.linalg.expm(A) + mapping = dict(zip(nodelist, range(len(nodelist)))) + c = {} + for u in G: + c[u] = {} + for v in G: + c[u][v] = float(expA[mapping[u], mapping[v]]) + return c diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/community/__init__.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/__init__.py new file mode 100644 index 0000000..494a8e1 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/__init__.py @@ -0,0 +1,28 @@ +"""Functions for computing and measuring community structure. + +The ``community`` subpackage can be accessed by using :mod:`networkx.community`, then accessing the +functions as attributes of ``community``. For example:: + + >>> import networkx as nx + >>> G = nx.barbell_graph(5, 1) + >>> communities_generator = nx.community.girvan_newman(G) + >>> top_level_communities = next(communities_generator) + >>> next_level_communities = next(communities_generator) + >>> sorted(map(sorted, next_level_communities)) + [[0, 1, 2, 3, 4], [5], [6, 7, 8, 9, 10]] + +""" + +from networkx.algorithms.community.asyn_fluid import * +from networkx.algorithms.community.centrality import * +from networkx.algorithms.community.divisive import * +from networkx.algorithms.community.kclique import * +from networkx.algorithms.community.kernighan_lin import * +from networkx.algorithms.community.label_propagation import * +from networkx.algorithms.community.lukes import * +from networkx.algorithms.community.modularity_max import * +from networkx.algorithms.community.quality import * +from networkx.algorithms.community.community_utils import * +from networkx.algorithms.community.louvain import * +from networkx.algorithms.community.leiden import * +from networkx.algorithms.community.local import * diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/community/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..44a3729 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/community/__pycache__/asyn_fluid.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/__pycache__/asyn_fluid.cpython-312.pyc new file mode 100644 index 0000000..1f02db1 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/__pycache__/asyn_fluid.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/community/__pycache__/centrality.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/__pycache__/centrality.cpython-312.pyc new file mode 100644 index 0000000..1861b95 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/__pycache__/centrality.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/community/__pycache__/community_utils.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/__pycache__/community_utils.cpython-312.pyc new file mode 100644 index 0000000..20ac92d Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/__pycache__/community_utils.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/community/__pycache__/divisive.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/__pycache__/divisive.cpython-312.pyc new file mode 100644 index 0000000..879caeb Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/__pycache__/divisive.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/community/__pycache__/kclique.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/__pycache__/kclique.cpython-312.pyc new file mode 100644 index 0000000..1f5b123 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/__pycache__/kclique.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/community/__pycache__/kernighan_lin.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/__pycache__/kernighan_lin.cpython-312.pyc new file mode 100644 index 0000000..9e23c05 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/__pycache__/kernighan_lin.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/community/__pycache__/label_propagation.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/__pycache__/label_propagation.cpython-312.pyc new file mode 100644 index 0000000..b5dca23 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/__pycache__/label_propagation.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/community/__pycache__/leiden.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/__pycache__/leiden.cpython-312.pyc new file mode 100644 index 0000000..18422f6 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/__pycache__/leiden.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/community/__pycache__/local.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/__pycache__/local.cpython-312.pyc new file mode 100644 index 0000000..b007766 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/__pycache__/local.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/community/__pycache__/louvain.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/__pycache__/louvain.cpython-312.pyc new file mode 100644 index 0000000..9becd97 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/__pycache__/louvain.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/community/__pycache__/lukes.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/__pycache__/lukes.cpython-312.pyc new file mode 100644 index 0000000..e92cdb8 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/__pycache__/lukes.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/community/__pycache__/modularity_max.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/__pycache__/modularity_max.cpython-312.pyc new file mode 100644 index 0000000..4f1485e Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/__pycache__/modularity_max.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/community/__pycache__/quality.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/__pycache__/quality.cpython-312.pyc new file mode 100644 index 0000000..b12fe19 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/__pycache__/quality.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/community/asyn_fluid.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/asyn_fluid.py new file mode 100644 index 0000000..fea72c1 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/asyn_fluid.py @@ -0,0 +1,151 @@ +"""Asynchronous Fluid Communities algorithm for community detection.""" + +from collections import Counter + +import networkx as nx +from networkx.algorithms.components import is_connected +from networkx.exception import NetworkXError +from networkx.utils import groups, not_implemented_for, py_random_state + +__all__ = ["asyn_fluidc"] + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@py_random_state(3) +@nx._dispatchable +def asyn_fluidc(G, k, max_iter=100, seed=None): + """Returns communities in `G` as detected by Fluid Communities algorithm. + + The asynchronous fluid communities algorithm is described in + [1]_. The algorithm is based on the simple idea of fluids interacting + in an environment, expanding and pushing each other. Its initialization is + random, so found communities may vary on different executions. + + The algorithm proceeds as follows. First each of the initial k communities + is initialized in a random vertex in the graph. Then the algorithm iterates + over all vertices in a random order, updating the community of each vertex + based on its own community and the communities of its neighbors. This + process is performed several times until convergence. + At all times, each community has a total density of 1, which is equally + distributed among the vertices it contains. If a vertex changes of + community, vertex densities of affected communities are adjusted + immediately. When a complete iteration over all vertices is done, such that + no vertex changes the community it belongs to, the algorithm has converged + and returns. + + This is the original version of the algorithm described in [1]_. + Unfortunately, it does not support weighted graphs yet. + + Parameters + ---------- + G : NetworkX graph + Graph must be simple and undirected. + + k : integer + The number of communities to be found. + + max_iter : integer + The number of maximum iterations allowed. By default 100. + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + communities : iterable + Iterable of communities given as sets of nodes. + + Notes + ----- + k variable is not an optional argument. + + References + ---------- + .. [1] Parés F., Garcia-Gasulla D. et al. "Fluid Communities: A + Competitive and Highly Scalable Community Detection Algorithm". + [https://arxiv.org/pdf/1703.09307.pdf]. + """ + # Initial checks + if not isinstance(k, int): + raise NetworkXError("k must be an integer.") + if not k > 0: + raise NetworkXError("k must be greater than 0.") + if not is_connected(G): + raise NetworkXError("Fluid Communities require connected Graphs.") + if len(G) < k: + raise NetworkXError("k cannot be bigger than the number of nodes.") + # Initialization + max_density = 1.0 + vertices = list(G) + seed.shuffle(vertices) + communities = {n: i for i, n in enumerate(vertices[:k])} + density = {} + com_to_numvertices = {} + for vertex in communities: + com_to_numvertices[communities[vertex]] = 1 + density[communities[vertex]] = max_density + # Set up control variables and start iterating + iter_count = 0 + cont = True + while cont: + cont = False + iter_count += 1 + # Loop over all vertices in graph in a random order + vertices = list(G) + seed.shuffle(vertices) + for vertex in vertices: + # Updating rule + com_counter = Counter() + # Take into account self vertex community + try: + com_counter.update({communities[vertex]: density[communities[vertex]]}) + except KeyError: + pass + # Gather neighbor vertex communities + for v in G[vertex]: + try: + com_counter.update({communities[v]: density[communities[v]]}) + except KeyError: + continue + # Check which is the community with highest density + new_com = -1 + if len(com_counter.keys()) > 0: + max_freq = max(com_counter.values()) + best_communities = [ + com + for com, freq in com_counter.items() + if (max_freq - freq) < 0.0001 + ] + # If actual vertex com in best communities, it is preserved + try: + if communities[vertex] in best_communities: + new_com = communities[vertex] + except KeyError: + pass + # If vertex community changes... + if new_com == -1: + # Set flag of non-convergence + cont = True + # Randomly chose a new community from candidates + new_com = seed.choice(best_communities) + # Update previous community status + try: + com_to_numvertices[communities[vertex]] -= 1 + density[communities[vertex]] = ( + max_density / com_to_numvertices[communities[vertex]] + ) + except KeyError: + pass + # Update new community status + communities[vertex] = new_com + com_to_numvertices[communities[vertex]] += 1 + density[communities[vertex]] = ( + max_density / com_to_numvertices[communities[vertex]] + ) + # If maximum iterations reached --> output actual results + if iter_count > max_iter: + break + # Return results by grouping communities as list of vertices + return iter(groups(communities).values()) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/community/centrality.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/centrality.py new file mode 100644 index 0000000..4328170 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/centrality.py @@ -0,0 +1,171 @@ +"""Functions for computing communities based on centrality notions.""" + +import networkx as nx + +__all__ = ["girvan_newman"] + + +@nx._dispatchable(preserve_edge_attrs="most_valuable_edge") +def girvan_newman(G, most_valuable_edge=None): + """Finds communities in a graph using the Girvan–Newman method. + + Parameters + ---------- + G : NetworkX graph + + most_valuable_edge : function + Function that takes a graph as input and outputs an edge. The + edge returned by this function will be recomputed and removed at + each iteration of the algorithm. + + If not specified, the edge with the highest + :func:`networkx.edge_betweenness_centrality` will be used. + + Returns + ------- + iterator + Iterator over tuples of sets of nodes in `G`. Each set of node + is a community, each tuple is a sequence of communities at a + particular level of the algorithm. + + Examples + -------- + To get the first pair of communities:: + + >>> G = nx.path_graph(10) + >>> comp = nx.community.girvan_newman(G) + >>> tuple(sorted(c) for c in next(comp)) + ([0, 1, 2, 3, 4], [5, 6, 7, 8, 9]) + + To get only the first *k* tuples of communities, use + :func:`itertools.islice`:: + + >>> import itertools + >>> G = nx.path_graph(8) + >>> k = 2 + >>> comp = nx.community.girvan_newman(G) + >>> for communities in itertools.islice(comp, k): + ... print(tuple(sorted(c) for c in communities)) + ... + ([0, 1, 2, 3], [4, 5, 6, 7]) + ([0, 1], [2, 3], [4, 5, 6, 7]) + + To stop getting tuples of communities once the number of communities + is greater than *k*, use :func:`itertools.takewhile`:: + + >>> import itertools + >>> G = nx.path_graph(8) + >>> k = 4 + >>> comp = nx.community.girvan_newman(G) + >>> limited = itertools.takewhile(lambda c: len(c) <= k, comp) + >>> for communities in limited: + ... print(tuple(sorted(c) for c in communities)) + ... + ([0, 1, 2, 3], [4, 5, 6, 7]) + ([0, 1], [2, 3], [4, 5, 6, 7]) + ([0, 1], [2, 3], [4, 5], [6, 7]) + + To just choose an edge to remove based on the weight:: + + >>> from operator import itemgetter + >>> G = nx.path_graph(10) + >>> edges = G.edges() + >>> nx.set_edge_attributes(G, {(u, v): v for u, v in edges}, "weight") + >>> def heaviest(G): + ... u, v, w = max(G.edges(data="weight"), key=itemgetter(2)) + ... return (u, v) + ... + >>> comp = nx.community.girvan_newman(G, most_valuable_edge=heaviest) + >>> tuple(sorted(c) for c in next(comp)) + ([0, 1, 2, 3, 4, 5, 6, 7, 8], [9]) + + To utilize edge weights when choosing an edge with, for example, the + highest betweenness centrality:: + + >>> from networkx import edge_betweenness_centrality as betweenness + >>> def most_central_edge(G): + ... centrality = betweenness(G, weight="weight") + ... return max(centrality, key=centrality.get) + ... + >>> G = nx.path_graph(10) + >>> comp = nx.community.girvan_newman(G, most_valuable_edge=most_central_edge) + >>> tuple(sorted(c) for c in next(comp)) + ([0, 1, 2, 3, 4], [5, 6, 7, 8, 9]) + + To specify a different ranking algorithm for edges, use the + `most_valuable_edge` keyword argument:: + + >>> from networkx import edge_betweenness_centrality + >>> from random import random + >>> def most_central_edge(G): + ... centrality = edge_betweenness_centrality(G) + ... max_cent = max(centrality.values()) + ... # Scale the centrality values so they are between 0 and 1, + ... # and add some random noise. + ... centrality = {e: c / max_cent for e, c in centrality.items()} + ... # Add some random noise. + ... centrality = {e: c + random() for e, c in centrality.items()} + ... return max(centrality, key=centrality.get) + ... + >>> G = nx.path_graph(10) + >>> comp = nx.community.girvan_newman(G, most_valuable_edge=most_central_edge) + + Notes + ----- + The Girvan–Newman algorithm detects communities by progressively + removing edges from the original graph. The algorithm removes the + "most valuable" edge, traditionally the edge with the highest + betweenness centrality, at each step. As the graph breaks down into + pieces, the tightly knit community structure is exposed and the + result can be depicted as a dendrogram. + + """ + # If the graph is already empty, simply return its connected + # components. + if G.number_of_edges() == 0: + yield tuple(nx.connected_components(G)) + return + # If no function is provided for computing the most valuable edge, + # use the edge betweenness centrality. + if most_valuable_edge is None: + + def most_valuable_edge(G): + """Returns the edge with the highest betweenness centrality + in the graph `G`. + + """ + # We have guaranteed that the graph is non-empty, so this + # dictionary will never be empty. + betweenness = nx.edge_betweenness_centrality(G) + return max(betweenness, key=betweenness.get) + + # The copy of G here must include the edge weight data. + g = G.copy().to_undirected() + # Self-loops must be removed because their removal has no effect on + # the connected components of the graph. + g.remove_edges_from(nx.selfloop_edges(g)) + while g.number_of_edges() > 0: + yield _without_most_central_edges(g, most_valuable_edge) + + +def _without_most_central_edges(G, most_valuable_edge): + """Returns the connected components of the graph that results from + repeatedly removing the most "valuable" edge in the graph. + + `G` must be a non-empty graph. This function modifies the graph `G` + in-place; that is, it removes edges on the graph `G`. + + `most_valuable_edge` is a function that takes the graph `G` as input + (or a subgraph with one or more edges of `G` removed) and returns an + edge. That edge will be removed and this process will be repeated + until the number of connected components in the graph increases. + + """ + original_num_components = nx.number_connected_components(G) + num_new_components = original_num_components + while num_new_components <= original_num_components: + edge = most_valuable_edge(G) + G.remove_edge(*edge) + new_components = tuple(nx.connected_components(G)) + num_new_components = len(new_components) + return new_components diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/community/community_utils.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/community_utils.py new file mode 100644 index 0000000..ba73a6b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/community_utils.py @@ -0,0 +1,30 @@ +"""Helper functions for community-finding algorithms.""" + +import networkx as nx + +__all__ = ["is_partition"] + + +@nx._dispatchable +def is_partition(G, communities): + """Returns *True* if `communities` is a partition of the nodes of `G`. + + A partition of a universe set is a family of pairwise disjoint sets + whose union is the entire universe set. + + Parameters + ---------- + G : NetworkX graph. + + communities : list or iterable of sets of nodes + If not a list, the iterable is converted internally to a list. + If it is an iterator it is exhausted. + + """ + # Alternate implementation: + # return all(sum(1 if v in c else 0 for c in communities) == 1 for v in G) + if not isinstance(communities, list): + communities = list(communities) + nodes = {n for c in communities for n in c if n in G} + + return len(G) == len(nodes) == sum(len(c) for c in communities) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/community/divisive.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/divisive.py new file mode 100644 index 0000000..be3c7d8 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/divisive.py @@ -0,0 +1,216 @@ +import functools + +import networkx as nx + +__all__ = [ + "edge_betweenness_partition", + "edge_current_flow_betweenness_partition", +] + + +@nx._dispatchable(edge_attrs="weight") +def edge_betweenness_partition(G, number_of_sets, *, weight=None): + """Partition created by iteratively removing the highest edge betweenness edge. + + This algorithm works by calculating the edge betweenness for all + edges and removing the edge with the highest value. It is then + determined whether the graph has been broken into at least + `number_of_sets` connected components. + If not the process is repeated. + + Parameters + ---------- + G : NetworkX Graph, DiGraph or MultiGraph + Graph to be partitioned + + number_of_sets : int + Number of sets in the desired partition of the graph + + weight : key, optional, default=None + The key to use if using weights for edge betweenness calculation + + Returns + ------- + C : list of sets + Partition of the nodes of G + + Raises + ------ + NetworkXError + If number_of_sets is <= 0 or if number_of_sets > len(G) + + Examples + -------- + >>> G = nx.karate_club_graph() + >>> part = nx.community.edge_betweenness_partition(G, 2) + >>> {0, 1, 3, 4, 5, 6, 7, 10, 11, 12, 13, 16, 17, 19, 21} in part + True + >>> { + ... 2, + ... 8, + ... 9, + ... 14, + ... 15, + ... 18, + ... 20, + ... 22, + ... 23, + ... 24, + ... 25, + ... 26, + ... 27, + ... 28, + ... 29, + ... 30, + ... 31, + ... 32, + ... 33, + ... } in part + True + + See Also + -------- + edge_current_flow_betweenness_partition + + Notes + ----- + This algorithm is fairly slow, as both the calculation of connected + components and edge betweenness relies on all pairs shortest + path algorithms. They could potentially be combined to cut down + on overall computation time. + + References + ---------- + .. [1] Santo Fortunato 'Community Detection in Graphs' Physical Reports + Volume 486, Issue 3-5 p. 75-174 + http://arxiv.org/abs/0906.0612 + """ + if number_of_sets <= 0: + raise nx.NetworkXError("number_of_sets must be >0") + if number_of_sets == 1: + return [set(G)] + if number_of_sets == len(G): + return [{n} for n in G] + if number_of_sets > len(G): + raise nx.NetworkXError("number_of_sets must be <= len(G)") + + H = G.copy() + partition = list(nx.connected_components(H)) + while len(partition) < number_of_sets: + ranking = nx.edge_betweenness_centrality(H, weight=weight) + edge = max(ranking, key=ranking.get) + H.remove_edge(*edge) + partition = list(nx.connected_components(H)) + return partition + + +@nx._dispatchable(edge_attrs="weight") +def edge_current_flow_betweenness_partition(G, number_of_sets, *, weight=None): + """Partition created by removing the highest edge current flow betweenness edge. + + This algorithm works by calculating the edge current flow + betweenness for all edges and removing the edge with the + highest value. It is then determined whether the graph has + been broken into at least `number_of_sets` connected + components. If not the process is repeated. + + Parameters + ---------- + G : NetworkX Graph, DiGraph or MultiGraph + Graph to be partitioned + + number_of_sets : int + Number of sets in the desired partition of the graph + + weight : key, optional (default=None) + The edge attribute key to use as weights for + edge current flow betweenness calculations + + Returns + ------- + C : list of sets + Partition of G + + Raises + ------ + NetworkXError + If number_of_sets is <= 0 or number_of_sets > len(G) + + Examples + -------- + >>> G = nx.karate_club_graph() + >>> part = nx.community.edge_current_flow_betweenness_partition(G, 2) + >>> {0, 1, 2, 3, 4, 5, 6, 7, 9, 10, 11, 12, 13, 16, 17, 19, 21} in part + True + >>> {8, 14, 15, 18, 20, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33} in part + True + + + See Also + -------- + edge_betweenness_partition + + Notes + ----- + This algorithm is extremely slow, as the recalculation of the edge + current flow betweenness is extremely slow. + + References + ---------- + .. [1] Santo Fortunato 'Community Detection in Graphs' Physical Reports + Volume 486, Issue 3-5 p. 75-174 + http://arxiv.org/abs/0906.0612 + """ + if number_of_sets <= 0: + raise nx.NetworkXError("number_of_sets must be >0") + elif number_of_sets == 1: + return [set(G)] + elif number_of_sets == len(G): + return [{n} for n in G] + elif number_of_sets > len(G): + raise nx.NetworkXError("number_of_sets must be <= len(G)") + + rank = functools.partial( + nx.edge_current_flow_betweenness_centrality, normalized=False, weight=weight + ) + + # current flow requires a connected network so we track the components explicitly + H = G.copy() + partition = list(nx.connected_components(H)) + if len(partition) > 1: + Hcc_subgraphs = [H.subgraph(cc).copy() for cc in partition] + else: + Hcc_subgraphs = [H] + + ranking = {} + for Hcc in Hcc_subgraphs: + ranking.update(rank(Hcc)) + + while len(partition) < number_of_sets: + edge = max(ranking, key=ranking.get) + for cc, Hcc in zip(partition, Hcc_subgraphs): + if edge[0] in cc: + Hcc.remove_edge(*edge) + del ranking[edge] + splitcc_list = list(nx.connected_components(Hcc)) + if len(splitcc_list) > 1: + # there are 2 connected components. split off smaller one + cc_new = min(splitcc_list, key=len) + Hcc_new = Hcc.subgraph(cc_new).copy() + # update edge rankings for Hcc_new + newranks = rank(Hcc_new) + for e, r in newranks.items(): + ranking[e if e in ranking else e[::-1]] = r + # append new cc and Hcc to their lists. + partition.append(cc_new) + Hcc_subgraphs.append(Hcc_new) + + # leave existing cc and Hcc in their lists, but shrink them + Hcc.remove_nodes_from(cc_new) + cc.difference_update(cc_new) + # update edge rankings for Hcc whether it was split or not + newranks = rank(Hcc) + for e, r in newranks.items(): + ranking[e if e in ranking else e[::-1]] = r + break + return partition diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/community/kclique.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/kclique.py new file mode 100644 index 0000000..c724910 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/kclique.py @@ -0,0 +1,79 @@ +from collections import defaultdict + +import networkx as nx + +__all__ = ["k_clique_communities"] + + +@nx._dispatchable +def k_clique_communities(G, k, cliques=None): + """Find k-clique communities in graph using the percolation method. + + A k-clique community is the union of all cliques of size k that + can be reached through adjacent (sharing k-1 nodes) k-cliques. + + Parameters + ---------- + G : NetworkX graph + + k : int + Size of smallest clique + + cliques: list or generator + Precomputed cliques (use networkx.find_cliques(G)) + + Returns + ------- + Yields sets of nodes, one for each k-clique community. + + Examples + -------- + >>> G = nx.complete_graph(5) + >>> K5 = nx.convert_node_labels_to_integers(G, first_label=2) + >>> G.add_edges_from(K5.edges()) + >>> c = list(nx.community.k_clique_communities(G, 4)) + >>> sorted(list(c[0])) + [0, 1, 2, 3, 4, 5, 6] + >>> list(nx.community.k_clique_communities(G, 6)) + [] + + References + ---------- + .. [1] Gergely Palla, Imre Derényi, Illés Farkas1, and Tamás Vicsek, + Uncovering the overlapping community structure of complex networks + in nature and society Nature 435, 814-818, 2005, + doi:10.1038/nature03607 + """ + if k < 2: + raise nx.NetworkXError(f"k={k}, k must be greater than 1.") + if cliques is None: + cliques = nx.find_cliques(G) + cliques = [frozenset(c) for c in cliques if len(c) >= k] + + # First index which nodes are in which cliques + membership_dict = defaultdict(list) + for clique in cliques: + for node in clique: + membership_dict[node].append(clique) + + # For each clique, see which adjacent cliques percolate + perc_graph = nx.Graph() + perc_graph.add_nodes_from(cliques) + for clique in cliques: + for adj_clique in _get_adjacent_cliques(clique, membership_dict): + if len(clique.intersection(adj_clique)) >= (k - 1): + perc_graph.add_edge(clique, adj_clique) + + # Connected components of clique graph with perc edges + # are the percolated cliques + for component in nx.connected_components(perc_graph): + yield (frozenset.union(*component)) + + +def _get_adjacent_cliques(clique, membership_dict): + adjacent_cliques = set() + for n in clique: + for adj_clique in membership_dict[n]: + if clique != adj_clique: + adjacent_cliques.add(adj_clique) + return adjacent_cliques diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/community/kernighan_lin.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/kernighan_lin.py new file mode 100644 index 0000000..f6397d8 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/kernighan_lin.py @@ -0,0 +1,139 @@ +"""Functions for computing the Kernighan–Lin bipartition algorithm.""" + +from itertools import count + +import networkx as nx +from networkx.algorithms.community.community_utils import is_partition +from networkx.utils import BinaryHeap, not_implemented_for, py_random_state + +__all__ = ["kernighan_lin_bisection"] + + +def _kernighan_lin_sweep(edges, side): + """ + This is a modified form of Kernighan-Lin, which moves single nodes at a + time, alternating between sides to keep the bisection balanced. We keep + two min-heaps of swap costs to make optimal-next-move selection fast. + """ + costs0, costs1 = costs = BinaryHeap(), BinaryHeap() + for u, side_u, edges_u in zip(count(), side, edges): + cost_u = sum(w if side[v] else -w for v, w in edges_u) + costs[side_u].insert(u, cost_u if side_u else -cost_u) + + def _update_costs(costs_x, x): + for y, w in edges[x]: + costs_y = costs[side[y]] + cost_y = costs_y.get(y) + if cost_y is not None: + cost_y += 2 * (-w if costs_x is costs_y else w) + costs_y.insert(y, cost_y, True) + + i = 0 + totcost = 0 + while costs0 and costs1: + u, cost_u = costs0.pop() + _update_costs(costs0, u) + v, cost_v = costs1.pop() + _update_costs(costs1, v) + totcost += cost_u + cost_v + i += 1 + yield totcost, i, (u, v) + + +@not_implemented_for("directed") +@py_random_state(4) +@nx._dispatchable(edge_attrs="weight") +def kernighan_lin_bisection(G, partition=None, max_iter=10, weight="weight", seed=None): + """Partition a graph into two blocks using the Kernighan–Lin + algorithm. + + This algorithm partitions a network into two sets by iteratively + swapping pairs of nodes to reduce the edge cut between the two sets. The + pairs are chosen according to a modified form of Kernighan-Lin [1]_, which + moves node individually, alternating between sides to keep the bisection + balanced. + + Parameters + ---------- + G : NetworkX graph + Graph must be undirected. + + partition : tuple + Pair of iterables containing an initial partition. If not + specified, a random balanced partition is used. + + max_iter : int + Maximum number of times to attempt swaps to find an + improvement before giving up. + + weight : key + Edge data key to use as weight. If None, the weights are all + set to one. + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + Only used if partition is None + + Returns + ------- + partition : tuple + A pair of sets of nodes representing the bipartition. + + Raises + ------ + NetworkXError + If partition is not a valid partition of the nodes of the graph. + + References + ---------- + .. [1] Kernighan, B. W.; Lin, Shen (1970). + "An efficient heuristic procedure for partitioning graphs." + *Bell Systems Technical Journal* 49: 291--307. + Oxford University Press 2011. + + """ + n = len(G) + labels = list(G) + seed.shuffle(labels) + index = {v: i for i, v in enumerate(labels)} + + if partition is None: + side = [0] * (n // 2) + [1] * ((n + 1) // 2) + else: + try: + A, B = partition + except (TypeError, ValueError) as err: + raise nx.NetworkXError("partition must be two sets") from err + if not is_partition(G, (A, B)): + raise nx.NetworkXError("partition invalid") + side = [0] * n + for a in A: + side[index[a]] = 1 + + if G.is_multigraph(): + edges = [ + [ + (index[u], sum(e.get(weight, 1) for e in d.values())) + for u, d in G[v].items() + ] + for v in labels + ] + else: + edges = [ + [(index[u], e.get(weight, 1)) for u, e in G[v].items()] for v in labels + ] + + for i in range(max_iter): + costs = list(_kernighan_lin_sweep(edges, side)) + min_cost, min_i, _ = min(costs) + if min_cost >= 0: + break + + for _, _, (u, v) in costs[:min_i]: + side[u] = 1 + side[v] = 0 + + A = {u for u, s in zip(labels, side) if s == 0} + B = {u for u, s in zip(labels, side) if s == 1} + return A, B diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/community/label_propagation.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/label_propagation.py new file mode 100644 index 0000000..7488028 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/label_propagation.py @@ -0,0 +1,338 @@ +""" +Label propagation community detection algorithms. +""" + +from collections import Counter, defaultdict, deque + +import networkx as nx +from networkx.utils import groups, not_implemented_for, py_random_state + +__all__ = [ + "label_propagation_communities", + "asyn_lpa_communities", + "fast_label_propagation_communities", +] + + +@py_random_state("seed") +@nx._dispatchable(edge_attrs="weight") +def fast_label_propagation_communities(G, *, weight=None, seed=None): + """Returns communities in `G` as detected by fast label propagation. + + The fast label propagation algorithm is described in [1]_. The algorithm is + probabilistic and the found communities may vary in different executions. + + The algorithm operates as follows. First, the community label of each node is + set to a unique label. The algorithm then repeatedly updates the labels of + the nodes to the most frequent label in their neighborhood. In case of ties, + a random label is chosen from the most frequent labels. + + The algorithm maintains a queue of nodes that still need to be processed. + Initially, all nodes are added to the queue in a random order. Then the nodes + are removed from the queue one by one and processed. If a node updates its label, + all its neighbors that have a different label are added to the queue (if not + already in the queue). The algorithm stops when the queue is empty. + + Parameters + ---------- + G : Graph, DiGraph, MultiGraph, or MultiDiGraph + Any NetworkX graph. + + weight : string, or None (default) + The edge attribute representing a non-negative weight of an edge. If None, + each edge is assumed to have weight one. The weight of an edge is used in + determining the frequency with which a label appears among the neighbors of + a node (edge with weight `w` is equivalent to `w` unweighted edges). + + seed : integer, random_state, or None (default) + Indicator of random number generation state. See :ref:`Randomness`. + + Returns + ------- + communities : iterable + Iterable of communities given as sets of nodes. + + Notes + ----- + Edge directions are ignored for directed graphs. + Edge weights must be non-negative numbers. + + References + ---------- + .. [1] Vincent A. Traag & Lovro Šubelj. "Large network community detection by + fast label propagation." Scientific Reports 13 (2023): 2701. + https://doi.org/10.1038/s41598-023-29610-z + """ + + # Queue of nodes to be processed. + nodes_queue = deque(G) + seed.shuffle(nodes_queue) + + # Set of nodes in the queue. + nodes_set = set(G) + + # Assign unique label to each node. + comms = {node: i for i, node in enumerate(G)} + + while nodes_queue: + # Remove next node from the queue to process. + node = nodes_queue.popleft() + nodes_set.remove(node) + + # Isolated nodes retain their initial label. + if G.degree(node) > 0: + # Compute frequency of labels in node's neighborhood. + label_freqs = _fast_label_count(G, comms, node, weight) + max_freq = max(label_freqs.values()) + + # Always sample new label from most frequent labels. + comm = seed.choice( + [comm for comm in label_freqs if label_freqs[comm] == max_freq] + ) + + if comms[node] != comm: + comms[node] = comm + + # Add neighbors that have different label to the queue. + for nbr in nx.all_neighbors(G, node): + if comms[nbr] != comm and nbr not in nodes_set: + nodes_queue.append(nbr) + nodes_set.add(nbr) + + yield from groups(comms).values() + + +def _fast_label_count(G, comms, node, weight=None): + """Computes the frequency of labels in the neighborhood of a node. + + Returns a dictionary keyed by label to the frequency of that label. + """ + + if weight is None: + # Unweighted (un)directed simple graph. + if not G.is_multigraph(): + label_freqs = Counter(map(comms.get, nx.all_neighbors(G, node))) + + # Unweighted (un)directed multigraph. + else: + label_freqs = defaultdict(int) + for nbr in G[node]: + label_freqs[comms[nbr]] += len(G[node][nbr]) + + if G.is_directed(): + for nbr in G.pred[node]: + label_freqs[comms[nbr]] += len(G.pred[node][nbr]) + + else: + # Weighted undirected simple/multigraph. + label_freqs = defaultdict(float) + for _, nbr, w in G.edges(node, data=weight, default=1): + label_freqs[comms[nbr]] += w + + # Weighted directed simple/multigraph. + if G.is_directed(): + for nbr, _, w in G.in_edges(node, data=weight, default=1): + label_freqs[comms[nbr]] += w + + return label_freqs + + +@py_random_state(2) +@nx._dispatchable(edge_attrs="weight") +def asyn_lpa_communities(G, weight=None, seed=None): + """Returns communities in `G` as detected by asynchronous label + propagation. + + The asynchronous label propagation algorithm is described in + [1]_. The algorithm is probabilistic and the found communities may + vary on different executions. + + The algorithm proceeds as follows. After initializing each node with + a unique label, the algorithm repeatedly sets the label of a node to + be the label that appears most frequently among that nodes + neighbors. The algorithm halts when each node has the label that + appears most frequently among its neighbors. The algorithm is + asynchronous because each node is updated without waiting for + updates on the remaining nodes. + + This generalized version of the algorithm in [1]_ accepts edge + weights. + + Parameters + ---------- + G : Graph + + weight : string + The edge attribute representing the weight of an edge. + If None, each edge is assumed to have weight one. In this + algorithm, the weight of an edge is used in determining the + frequency with which a label appears among the neighbors of a + node: a higher weight means the label appears more often. + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + communities : iterable + Iterable of communities given as sets of nodes. + + Notes + ----- + Edge weight attributes must be numerical. + + References + ---------- + .. [1] Raghavan, Usha Nandini, Réka Albert, and Soundar Kumara. "Near + linear time algorithm to detect community structures in large-scale + networks." Physical Review E 76.3 (2007): 036106. + """ + + labels = {n: i for i, n in enumerate(G)} + cont = True + + while cont: + cont = False + nodes = list(G) + seed.shuffle(nodes) + + for node in nodes: + if not G[node]: + continue + + # Get label frequencies among adjacent nodes. + # Depending on the order they are processed in, + # some nodes will be in iteration t and others in t-1, + # making the algorithm asynchronous. + if weight is None: + # initialising a Counter from an iterator of labels is + # faster for getting unweighted label frequencies + label_freq = Counter(map(labels.get, G[node])) + else: + # updating a defaultdict is substantially faster + # for getting weighted label frequencies + label_freq = defaultdict(float) + for _, v, wt in G.edges(node, data=weight, default=1): + label_freq[labels[v]] += wt + + # Get the labels that appear with maximum frequency. + max_freq = max(label_freq.values()) + best_labels = [ + label for label, freq in label_freq.items() if freq == max_freq + ] + + # If the node does not have one of the maximum frequency labels, + # randomly choose one of them and update the node's label. + # Continue the iteration as long as at least one node + # doesn't have a maximum frequency label. + if labels[node] not in best_labels: + labels[node] = seed.choice(best_labels) + cont = True + + yield from groups(labels).values() + + +@not_implemented_for("directed") +@nx._dispatchable +def label_propagation_communities(G): + """Generates community sets determined by label propagation + + Finds communities in `G` using a semi-synchronous label propagation + method [1]_. This method combines the advantages of both the synchronous + and asynchronous models. Not implemented for directed graphs. + + Parameters + ---------- + G : graph + An undirected NetworkX graph. + + Returns + ------- + communities : iterable + A dict_values object that contains a set of nodes for each community. + + Raises + ------ + NetworkXNotImplemented + If the graph is directed + + References + ---------- + .. [1] Cordasco, G., & Gargano, L. (2010, December). Community detection + via semi-synchronous label propagation algorithms. In Business + Applications of Social Network Analysis (BASNA), 2010 IEEE International + Workshop on (pp. 1-8). IEEE. + """ + coloring = _color_network(G) + # Create a unique label for each node in the graph + labeling = {v: k for k, v in enumerate(G)} + while not _labeling_complete(labeling, G): + # Update the labels of every node with the same color. + for color, nodes in coloring.items(): + for n in nodes: + _update_label(n, labeling, G) + + clusters = defaultdict(set) + for node, label in labeling.items(): + clusters[label].add(node) + return clusters.values() + + +def _color_network(G): + """Colors the network so that neighboring nodes all have distinct colors. + + Returns a dict keyed by color to a set of nodes with that color. + """ + coloring = {} # color => set(node) + colors = nx.coloring.greedy_color(G) + for node, color in colors.items(): + if color in coloring: + coloring[color].add(node) + else: + coloring[color] = {node} + return coloring + + +def _labeling_complete(labeling, G): + """Determines whether or not LPA is done. + + Label propagation is complete when all nodes have a label that is + in the set of highest frequency labels amongst its neighbors. + + Nodes with no neighbors are considered complete. + """ + return all( + labeling[v] in _most_frequent_labels(v, labeling, G) for v in G if len(G[v]) > 0 + ) + + +def _most_frequent_labels(node, labeling, G): + """Returns a set of all labels with maximum frequency in `labeling`. + + Input `labeling` should be a dict keyed by node to labels. + """ + if not G[node]: + # Nodes with no neighbors are themselves a community and are labeled + # accordingly, hence the immediate if statement. + return {labeling[node]} + + # Compute the frequencies of all neighbors of node + freqs = Counter(labeling[q] for q in G[node]) + max_freq = max(freqs.values()) + return {label for label, freq in freqs.items() if freq == max_freq} + + +def _update_label(node, labeling, G): + """Updates the label of a node using the Prec-Max tie breaking algorithm + + The algorithm is explained in: 'Community Detection via Semi-Synchronous + Label Propagation Algorithms' Cordasco and Gargano, 2011 + """ + high_labels = _most_frequent_labels(node, labeling, G) + if len(high_labels) == 1: + labeling[node] = high_labels.pop() + elif len(high_labels) > 1: + # Prec-Max + if labeling[node] not in high_labels: + labeling[node] = max(high_labels) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/community/leiden.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/leiden.py new file mode 100644 index 0000000..f79c012 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/leiden.py @@ -0,0 +1,162 @@ +"""Functions for detecting communities based on Leiden Community Detection +algorithm. + +These functions do not have NetworkX implementations. +They may only be run with an installable :doc:`backend ` +that supports them. +""" + +import itertools +from collections import deque + +import networkx as nx +from networkx.utils import not_implemented_for, py_random_state + +__all__ = ["leiden_communities", "leiden_partitions"] + + +@not_implemented_for("directed") +@py_random_state("seed") +@nx._dispatchable(edge_attrs="weight", implemented_by_nx=False) +def leiden_communities(G, weight="weight", resolution=1, max_level=None, seed=None): + r"""Find a best partition of `G` using Leiden Community Detection (backend required) + + Leiden Community Detection is an algorithm to extract the community structure + of a network based on modularity optimization. It is an improvement upon the + Louvain Community Detection algorithm. See :any:`louvain_communities`. + + Unlike the Louvain algorithm, it guarantees that communities are well connected in addition + to being faster and uncovering better partitions. [1]_ + + The algorithm works in 3 phases. On the first phase, it adds the nodes to a queue randomly + and assigns every node to be in its own community. For each node it tries to find the + maximum positive modularity gain by moving each node to all of its neighbor communities. + If a node is moved from its community, it adds to the rear of the queue all neighbors of + the node that do not belong to the node’s new community and that are not in the queue. + + The first phase continues until the queue is empty. + + The second phase consists in refining the partition $P$ obtained from the first phase. It starts + with a singleton partition $P_{refined}$. Then it merges nodes locally in $P_{refined}$ within + each community of the partition $P$. Nodes are merged with a community in $P_{refined}$ only if + both are sufficiently well connected to their community in $P$. This means that after the + refinement phase is concluded, communities in $P$ sometimes will have been split into multiple + communities. + + The third phase consists of aggregating the network by building a new network whose nodes are + now the communities found in the second phase. However, the non-refined partition is used to create + an initial partition for the aggregate network. + + Once this phase is complete it is possible to reapply the first and second phases creating bigger + communities with increased modularity. + + The above three phases are executed until no modularity gain is achieved or `max_level` number + of iterations have been performed. + + Parameters + ---------- + G : NetworkX graph + weight : string or None, optional (default="weight") + The name of an edge attribute that holds the numerical value + used as a weight. If None then each edge has weight 1. + resolution : float, optional (default=1) + If resolution is less than 1, the algorithm favors larger communities. + Greater than 1 favors smaller communities. + max_level : int or None, optional (default=None) + The maximum number of levels (steps of the algorithm) to compute. + Must be a positive integer or None. If None, then there is no max + level and the algorithm will run until converged. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + list + A list of disjoint sets (partition of `G`). Each set represents one community. + All communities together contain all the nodes in `G`. + + Examples + -------- + >>> import networkx as nx + >>> G = nx.petersen_graph() + >>> nx.community.leiden_communities(G, backend="example_backend") # doctest: +SKIP + [{2, 3, 5, 7, 8}, {0, 1, 4, 6, 9}] + + Notes + ----- + The order in which the nodes are considered can affect the final output. In the algorithm + the ordering happens using a random shuffle. + + References + ---------- + .. [1] Traag, V.A., Waltman, L. & van Eck, N.J. From Leiden to Leiden: guaranteeing + well-connected communities. Sci Rep 9, 5233 (2019). https://doi.org/10.1038/s41598-019-41695-z + + See Also + -------- + leiden_partitions + :any:`louvain_communities` + """ + partitions = leiden_partitions(G, weight, resolution, seed) + if max_level is not None: + if max_level <= 0: + raise ValueError("max_level argument must be a positive integer or None") + partitions = itertools.islice(partitions, max_level) + final_partition = deque(partitions, maxlen=1) + return final_partition.pop() + + +@not_implemented_for("directed") +@py_random_state("seed") +@nx._dispatchable(edge_attrs="weight", implemented_by_nx=False) +def leiden_partitions(G, weight="weight", resolution=1, seed=None): + """Yield partitions for each level of Leiden Community Detection (backend required) + + Leiden Community Detection is an algorithm to extract the community + structure of a network based on modularity optimization. + + The partitions across levels (steps of the algorithm) form a dendrogram + of communities. A dendrogram is a diagram representing a tree and each + level represents a partition of the G graph. The top level contains the + smallest communities and as you traverse to the bottom of the tree the + communities get bigger and the overall modularity increases making + the partition better. + + Each level is generated by executing the three phases of the Leiden Community + Detection algorithm. See :any:`leiden_communities`. + + Parameters + ---------- + G : NetworkX graph + weight : string or None, optional (default="weight") + The name of an edge attribute that holds the numerical value + used as a weight. If None then each edge has weight 1. + resolution : float, optional (default=1) + If resolution is less than 1, the algorithm favors larger communities. + Greater than 1 favors smaller communities. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Yields + ------ + list + A list of disjoint sets (partition of `G`). Each set represents one community. + All communities together contain all the nodes in `G`. The yielded partitions + increase modularity with each iteration. + + References + ---------- + .. [1] Traag, V.A., Waltman, L. & van Eck, N.J. From Leiden to Leiden: guaranteeing + well-connected communities. Sci Rep 9, 5233 (2019). https://doi.org/10.1038/s41598-019-41695-z + + See Also + -------- + leiden_communities + :any:`louvain_partitions` + """ + raise NotImplementedError( + "'leiden_partitions' is not implemented by networkx. " + "Please try a different backend." + ) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/community/local.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/local.py new file mode 100644 index 0000000..68fc06a --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/local.py @@ -0,0 +1,220 @@ +""" +Local Community Detection Algorithms + +Local Community Detection (LCD) aims to detected one or a few communities +starting from certain source nodes in the network. This differs from Global +Community Detection (GCD), which aims to partition an entire network into +communities. + +LCD is often useful when only a portion of the graph is known or the +graph is large enough that GCD is infeasable + +[1]_ Gives a good introduction and overview of LCD + +References +---------- +.. [1] Baltsou, Georgia, Konstantinos Christopoulos, and Konstantinos Tsichlas. + Local community detection: A survey. IEEE Access 10 (2022): 110701-110726. + https://doi.org/10.1109/ACCESS.2022.3213980 + + +""" + +__all__ = ["greedy_source_expansion"] + + +def _clauset_greedy_source_expansion(G, *, source, cutoff=None): + if cutoff is None: + cutoff = float("inf") + C = {source} + B = {source} + U = G[source].keys() - C + T = {frozenset([node, nbr]) for node in B for nbr in G.neighbors(node)} + I = {edge for edge in T if all(node in C for node in edge)} + + R_value = 0 + while len(C) < cutoff: + if not U: + break + + max_R = 0 + best_node = None + best_node_B = best_node_T = best_node_I = set() + + for v in U: + R_tmp, B_tmp, T_tmp, I_tmp = _calculate_local_modularity_for_candidate( + G, v, C, B, T, I + ) + if R_tmp > max_R: + max_R = R_tmp + best_node = v + best_node_B = B_tmp + best_node_T = T_tmp + best_node_I = I_tmp + + C = C | {best_node} + U.update(G[best_node].keys() - C) + U.remove(best_node) + B = best_node_B + T = best_node_T + I = best_node_I + if max_R < R_value: + break + R_value = max_R + + return C + + +def _calculate_local_modularity_for_candidate(G, v, C, B, T, I): + """ + Compute the local modularity R and updated variables when adding node v to the community. + + Parameters + ---------- + G : NetworkX graph + The input graph. + v : node + The candidate node to add to the community. + C : set + The current set of community nodes. + B : set + The current set of boundary nodes. + T : set of frozenset + The current set of boundary edges. + I : set of frozenset + The current set of internal boundary edges. + + Returns + ------- + R_tmp : float + The local modularity after adding node v. + B_tmp : set + The updated set of boundary nodes. + T_tmp : set of frozenset + The updated set of boundary edges. + I_tmp : set of frozenset + The updated set of internal boundary edges. + """ + C_tmp = C | {v} + B_tmp = B.copy() + T_tmp = T.copy() + I_tmp = I.copy() + removed_B_nodes = set() + + # Update boundary nodes and edges + for nbr in G[v]: + if nbr not in C_tmp: + # v has nbrs not in the community, so it remains a boundary node + B_tmp.add(v) + # Add edge between v and nbr to boundary edges + T_tmp.add(frozenset([v, nbr])) + + if nbr in B: + # Check if nbr should be removed from boundary nodes + # Go through nbrs nbrs to see if it is still a boundary node + nbr_still_in_B = any(nbr_nbr not in C_tmp for nbr_nbr in G[nbr]) + if not nbr_still_in_B: + B_tmp.remove(nbr) + removed_B_nodes.add(nbr) + + if nbr in C_tmp: + # Add edge between v and nbr to internal edges + I_tmp.add(frozenset([v, nbr])) + + # Remove edges no longer in the boundary + for removed_node in removed_B_nodes: + for removed_node_nbr in G[removed_node]: + if removed_node_nbr not in B_tmp: + T_tmp.discard(frozenset([removed_node_nbr, removed_node])) + I_tmp.discard(frozenset([removed_node_nbr, removed_node])) + + R_tmp = len(I_tmp) / len(T_tmp) if len(T_tmp) > 0 else 1 + return R_tmp, B_tmp, T_tmp, I_tmp + + +ALGORITHMS = { + "clauset": _clauset_greedy_source_expansion, +} + + +def greedy_source_expansion(G, *, source, cutoff=None, method="clauset"): + r"""Find the local community around a source node. + + Find the local community around a source node using Greedy Source + Expansion. Greedy Source Expansion generally identifies a local community + starting from the source node and expands it based on the criteria of the + chosen algorithm. + + The algorithm is specified with the `method` keyword argument. + + * `"clauset"` [1]_ uses local modularity gain to determine local communities. + The algorithm adds nbring nodes that maximize local modularity to the + community iteratively, stopping when no additional nodes improve the modularity + or when a predefined cutoff is reached. + + Local modularity measures the density of edges within a community relative + to the total graph. By focusing on local modularity, the algorithm efficiently + uncovers communities around a specific node without requiring global + optimization over the entire graph. + + The algorithm assumes that the graph $G$ consists of a known community $C$ and + an unknown set of nodes $U$, which are adjacent to $C$ . The boundary of the + community $B$, consists of nodes in $C$ that have at least one nbr in $U$. + + Mathematically, the local modularity is expressed as: + + .. math:: + R = \frac{I}{T} + + where $T$ is the number of edges with one or more endpoints in $B$, and $I$ is the + number of those edges with neither endpoint in $U$. + + Parameters + ---------- + G : NetworkX graph + The input graph. + + source : node + The source node from which the community expansion begins. + + cutoff : int, optional (default=None) + The maximum number of nodes to include in the community. If None, the algorithm + expands until no further modularity gain can be made. + + method : string, optional (default='clauset') + The algorithm to use to carry out greedy source expansion. + Supported options: 'clauset'. Other inputs produce a ValueError + + Returns + ------- + set + A set of nodes representing the local community around the source node. + + Examples + -------- + >>> G = nx.karate_club_graph() + >>> nx.community.greedy_source_expansion(G, source=16) + {16, 0, 4, 5, 6, 10} + + Notes + ----- + This algorithm is designed for detecting local communities around a specific node, + which is useful for large networks where global community detection is computationally + expensive. + + The result of the algorithm may vary based on the structure of the graph, the choice of + the source node, and the presence of ties between nodes during the greedy expansion process. + + References + ---------- + .. [1] Clauset, Aaron. Finding local community structure in networks. + Physical Review E—Statistical, Nonlinear, and Soft Matter Physics 72, no. 2 (2005): 026132. + https://arxiv.org/pdf/physics/0503036 + + """ + try: + algo = ALGORITHMS[method] + except KeyError as e: + raise ValueError(f"{method} is not a valid choice for an algorithm.") from e + + return algo(G, source=source, cutoff=cutoff) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/community/louvain.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/louvain.py new file mode 100644 index 0000000..c8407a8 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/louvain.py @@ -0,0 +1,384 @@ +"""Functions for detecting communities based on Louvain Community Detection +Algorithm""" + +import itertools +from collections import defaultdict, deque + +import networkx as nx +from networkx.algorithms.community import modularity +from networkx.utils import py_random_state + +__all__ = ["louvain_communities", "louvain_partitions"] + + +@py_random_state("seed") +@nx._dispatchable(edge_attrs="weight") +def louvain_communities( + G, weight="weight", resolution=1, threshold=0.0000001, max_level=None, seed=None +): + r"""Find the best partition of a graph using the Louvain Community Detection + Algorithm. + + Louvain Community Detection Algorithm is a simple method to extract the community + structure of a network. This is a heuristic method based on modularity optimization. [1]_ + + The algorithm works in 2 steps. On the first step it assigns every node to be + in its own community and then for each node it tries to find the maximum positive + modularity gain by moving each node to all of its neighbor communities. If no positive + gain is achieved the node remains in its original community. + + The modularity gain obtained by moving an isolated node $i$ into a community $C$ can + easily be calculated by the following formula (combining [1]_ [2]_ and some algebra): + + .. math:: + \Delta Q = \frac{k_{i,in}}{2m} - \gamma\frac{ \Sigma_{tot} \cdot k_i}{2m^2} + + where $m$ is the size of the graph, $k_{i,in}$ is the sum of the weights of the links + from $i$ to nodes in $C$, $k_i$ is the sum of the weights of the links incident to node $i$, + $\Sigma_{tot}$ is the sum of the weights of the links incident to nodes in $C$ and $\gamma$ + is the resolution parameter. + + For the directed case the modularity gain can be computed using this formula according to [3]_ + + .. math:: + \Delta Q = \frac{k_{i,in}}{m} + - \gamma\frac{k_i^{out} \cdot\Sigma_{tot}^{in} + k_i^{in} \cdot \Sigma_{tot}^{out}}{m^2} + + where $k_i^{out}$, $k_i^{in}$ are the outer and inner weighted degrees of node $i$ and + $\Sigma_{tot}^{in}$, $\Sigma_{tot}^{out}$ are the sum of in-going and out-going links incident + to nodes in $C$. + + The first phase continues until no individual move can improve the modularity. + + The second phase consists in building a new network whose nodes are now the communities + found in the first phase. To do so, the weights of the links between the new nodes are given by + the sum of the weight of the links between nodes in the corresponding two communities. Once this + phase is complete it is possible to reapply the first phase creating bigger communities with + increased modularity. + + The above two phases are executed until no modularity gain is achieved (or is less than + the `threshold`, or until `max_levels` is reached). + + Be careful with self-loops in the input graph. These are treated as + previously reduced communities -- as if the process had been started + in the middle of the algorithm. Large self-loop edge weights thus + represent strong communities and in practice may be hard to add + other nodes to. If your input graph edge weights for self-loops + do not represent already reduced communities you may want to remove + the self-loops before inputting that graph. + + Parameters + ---------- + G : NetworkX graph + weight : string or None, optional (default="weight") + The name of an edge attribute that holds the numerical value + used as a weight. If None then each edge has weight 1. + resolution : float, optional (default=1) + If resolution is less than 1, the algorithm favors larger communities. + Greater than 1 favors smaller communities + threshold : float, optional (default=0.0000001) + Modularity gain threshold for each level. If the gain of modularity + between 2 levels of the algorithm is less than the given threshold + then the algorithm stops and returns the resulting communities. + max_level : int or None, optional (default=None) + The maximum number of levels (steps of the algorithm) to compute. + Must be a positive integer or None. If None, then there is no max + level and the threshold parameter determines the stopping condition. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + list + A list of sets (partition of `G`). Each set represents one community and contains + all the nodes that constitute it. + + Examples + -------- + >>> import networkx as nx + >>> G = nx.petersen_graph() + >>> nx.community.louvain_communities(G, seed=123) + [{0, 4, 5, 7, 9}, {1, 2, 3, 6, 8}] + + Notes + ----- + The order in which the nodes are considered can affect the final output. In the algorithm + the ordering happens using a random shuffle. + + References + ---------- + .. [1] Blondel, V.D. et al. Fast unfolding of communities in + large networks. J. Stat. Mech 10008, 1-12(2008). https://doi.org/10.1088/1742-5468/2008/10/P10008 + .. [2] Traag, V.A., Waltman, L. & van Eck, N.J. From Louvain to Leiden: guaranteeing + well-connected communities. Sci Rep 9, 5233 (2019). https://doi.org/10.1038/s41598-019-41695-z + .. [3] Nicolas Dugué, Anthony Perez. Directed Louvain : maximizing modularity in directed networks. + [Research Report] Université d’Orléans. 2015. hal-01231784. https://hal.archives-ouvertes.fr/hal-01231784 + + See Also + -------- + louvain_partitions + :any:`leiden_communities` + """ + + partitions = louvain_partitions(G, weight, resolution, threshold, seed) + if max_level is not None: + if max_level <= 0: + raise ValueError("max_level argument must be a positive integer or None") + partitions = itertools.islice(partitions, max_level) + final_partition = deque(partitions, maxlen=1) + return final_partition.pop() + + +@py_random_state("seed") +@nx._dispatchable(edge_attrs="weight") +def louvain_partitions( + G, weight="weight", resolution=1, threshold=0.0000001, seed=None +): + """Yield partitions for each level of the Louvain Community Detection Algorithm + + Louvain Community Detection Algorithm is a simple method to extract the community + structure of a network. This is a heuristic method based on modularity optimization. [1]_ + + The partitions at each level (step of the algorithm) form a dendrogram of communities. + A dendrogram is a diagram representing a tree and each level represents + a partition of the G graph. The top level contains the smallest communities + and as you traverse to the bottom of the tree the communities get bigger + and the overall modularity increases making the partition better. + + Each level is generated by executing the two phases of the Louvain Community + Detection Algorithm. + + Be careful with self-loops in the input graph. These are treated as + previously reduced communities -- as if the process had been started + in the middle of the algorithm. Large self-loop edge weights thus + represent strong communities and in practice may be hard to add + other nodes to. If your input graph edge weights for self-loops + do not represent already reduced communities you may want to remove + the self-loops before inputting that graph. + + Parameters + ---------- + G : NetworkX graph + weight : string or None, optional (default="weight") + The name of an edge attribute that holds the numerical value + used as a weight. If None then each edge has weight 1. + resolution : float, optional (default=1) + If resolution is less than 1, the algorithm favors larger communities. + Greater than 1 favors smaller communities + threshold : float, optional (default=0.0000001) + Modularity gain threshold for each level. If the gain of modularity + between 2 levels of the algorithm is less than the given threshold + then the algorithm stops and returns the resulting communities. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Yields + ------ + list + A list of sets (partition of `G`). Each set represents one community and contains + all the nodes that constitute it. + + References + ---------- + .. [1] Blondel, V.D. et al. Fast unfolding of communities in + large networks. J. Stat. Mech 10008, 1-12(2008) + + See Also + -------- + louvain_communities + :any:`leiden_partitions` + """ + + partition = [{u} for u in G.nodes()] + if nx.is_empty(G): + yield partition + return + mod = modularity(G, partition, resolution=resolution, weight=weight) + is_directed = G.is_directed() + if G.is_multigraph(): + graph = _convert_multigraph(G, weight, is_directed) + else: + graph = G.__class__() + graph.add_nodes_from(G) + graph.add_weighted_edges_from(G.edges(data=weight, default=1)) + + m = graph.size(weight="weight") + partition, inner_partition, improvement = _one_level( + graph, m, partition, resolution, is_directed, seed + ) + improvement = True + while improvement: + # gh-5901 protect the sets in the yielded list from further manipulation here + yield [s.copy() for s in partition] + new_mod = modularity( + graph, inner_partition, resolution=resolution, weight="weight" + ) + if new_mod - mod <= threshold: + return + mod = new_mod + graph = _gen_graph(graph, inner_partition) + partition, inner_partition, improvement = _one_level( + graph, m, partition, resolution, is_directed, seed + ) + + +def _one_level(G, m, partition, resolution=1, is_directed=False, seed=None): + """Calculate one level of the Louvain partitions tree + + Parameters + ---------- + G : NetworkX Graph/DiGraph + The graph from which to detect communities + m : number + The size of the graph `G`. + partition : list of sets of nodes + A valid partition of the graph `G` + resolution : positive number + The resolution parameter for computing the modularity of a partition + is_directed : bool + True if `G` is a directed graph. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + """ + node2com = {u: i for i, u in enumerate(G.nodes())} + inner_partition = [{u} for u in G.nodes()] + if is_directed: + in_degrees = dict(G.in_degree(weight="weight")) + out_degrees = dict(G.out_degree(weight="weight")) + Stot_in = list(in_degrees.values()) + Stot_out = list(out_degrees.values()) + # Calculate weights for both in and out neighbors without considering self-loops + nbrs = {} + for u in G: + nbrs[u] = defaultdict(float) + for _, n, wt in G.out_edges(u, data="weight"): + if u != n: + nbrs[u][n] += wt + for n, _, wt in G.in_edges(u, data="weight"): + if u != n: + nbrs[u][n] += wt + else: + degrees = dict(G.degree(weight="weight")) + Stot = list(degrees.values()) + nbrs = {u: {v: data["weight"] for v, data in G[u].items() if v != u} for u in G} + rand_nodes = list(G.nodes) + seed.shuffle(rand_nodes) + nb_moves = 1 + improvement = False + while nb_moves > 0: + nb_moves = 0 + for u in rand_nodes: + best_mod = 0 + best_com = node2com[u] + weights2com = _neighbor_weights(nbrs[u], node2com) + if is_directed: + in_degree = in_degrees[u] + out_degree = out_degrees[u] + Stot_in[best_com] -= in_degree + Stot_out[best_com] -= out_degree + remove_cost = ( + -weights2com[best_com] / m + + resolution + * (out_degree * Stot_in[best_com] + in_degree * Stot_out[best_com]) + / m**2 + ) + else: + degree = degrees[u] + Stot[best_com] -= degree + remove_cost = -weights2com[best_com] / m + resolution * ( + Stot[best_com] * degree + ) / (2 * m**2) + for nbr_com, wt in weights2com.items(): + if is_directed: + gain = ( + remove_cost + + wt / m + - resolution + * ( + out_degree * Stot_in[nbr_com] + + in_degree * Stot_out[nbr_com] + ) + / m**2 + ) + else: + gain = ( + remove_cost + + wt / m + - resolution * (Stot[nbr_com] * degree) / (2 * m**2) + ) + if gain > best_mod: + best_mod = gain + best_com = nbr_com + if is_directed: + Stot_in[best_com] += in_degree + Stot_out[best_com] += out_degree + else: + Stot[best_com] += degree + if best_com != node2com[u]: + com = G.nodes[u].get("nodes", {u}) + partition[node2com[u]].difference_update(com) + inner_partition[node2com[u]].remove(u) + partition[best_com].update(com) + inner_partition[best_com].add(u) + improvement = True + nb_moves += 1 + node2com[u] = best_com + partition = list(filter(len, partition)) + inner_partition = list(filter(len, inner_partition)) + return partition, inner_partition, improvement + + +def _neighbor_weights(nbrs, node2com): + """Calculate weights between node and its neighbor communities. + + Parameters + ---------- + nbrs : dictionary + Dictionary with nodes' neighbors as keys and their edge weight as value. + node2com : dictionary + Dictionary with all graph's nodes as keys and their community index as value. + + """ + weights = defaultdict(float) + for nbr, wt in nbrs.items(): + weights[node2com[nbr]] += wt + return weights + + +def _gen_graph(G, partition): + """Generate a new graph based on the partitions of a given graph""" + H = G.__class__() + node2com = {} + for i, part in enumerate(partition): + nodes = set() + for node in part: + node2com[node] = i + nodes.update(G.nodes[node].get("nodes", {node})) + H.add_node(i, nodes=nodes) + + for node1, node2, wt in G.edges(data=True): + wt = wt["weight"] + com1 = node2com[node1] + com2 = node2com[node2] + temp = H.get_edge_data(com1, com2, {"weight": 0})["weight"] + H.add_edge(com1, com2, weight=wt + temp) + return H + + +def _convert_multigraph(G, weight, is_directed): + """Convert a Multigraph to normal Graph""" + if is_directed: + H = nx.DiGraph() + else: + H = nx.Graph() + H.add_nodes_from(G) + for u, v, wt in G.edges(data=weight, default=1): + if H.has_edge(u, v): + H[u][v]["weight"] += wt + else: + H.add_edge(u, v, weight=wt) + return H diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/community/lukes.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/lukes.py new file mode 100644 index 0000000..08dd7cd --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/lukes.py @@ -0,0 +1,227 @@ +"""Lukes Algorithm for exact optimal weighted tree partitioning.""" + +from copy import deepcopy +from functools import lru_cache +from random import choice + +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = ["lukes_partitioning"] + +D_EDGE_W = "weight" +D_EDGE_VALUE = 1.0 +D_NODE_W = "weight" +D_NODE_VALUE = 1 +PKEY = "partitions" +CLUSTER_EVAL_CACHE_SIZE = 2048 + + +def _split_n_from(n, min_size_of_first_part): + # splits j in two parts of which the first is at least + # the second argument + assert n >= min_size_of_first_part + for p1 in range(min_size_of_first_part, n + 1): + yield p1, n - p1 + + +@nx._dispatchable(node_attrs="node_weight", edge_attrs="edge_weight") +def lukes_partitioning(G, max_size, node_weight=None, edge_weight=None): + """Optimal partitioning of a weighted tree using the Lukes algorithm. + + This algorithm partitions a connected, acyclic graph featuring integer + node weights and float edge weights. The resulting clusters are such + that the total weight of the nodes in each cluster does not exceed + max_size and that the weight of the edges that are cut by the partition + is minimum. The algorithm is based on [1]_. + + Parameters + ---------- + G : NetworkX graph + + max_size : int + Maximum weight a partition can have in terms of sum of + node_weight for all nodes in the partition + + edge_weight : key + Edge data key to use as weight. If None, the weights are all + set to one. + + node_weight : key + Node data key to use as weight. If None, the weights are all + set to one. The data must be int. + + Returns + ------- + partition : list + A list of sets of nodes representing the clusters of the + partition. + + Raises + ------ + NotATree + If G is not a tree. + TypeError + If any of the values of node_weight is not int. + + References + ---------- + .. [1] Lukes, J. A. (1974). + "Efficient Algorithm for the Partitioning of Trees." + IBM Journal of Research and Development, 18(3), 217–224. + + """ + # First sanity check and tree preparation + if not nx.is_tree(G): + raise nx.NotATree("lukes_partitioning works only on trees") + else: + if nx.is_directed(G): + root = [n for n, d in G.in_degree() if d == 0] + assert len(root) == 1 + root = root[0] + t_G = deepcopy(G) + else: + root = choice(list(G.nodes)) + # this has the desirable side effect of not inheriting attributes + t_G = nx.dfs_tree(G, root) + + # Since we do not want to screw up the original graph, + # if we have a blank attribute, we make a deepcopy + if edge_weight is None or node_weight is None: + safe_G = deepcopy(G) + if edge_weight is None: + nx.set_edge_attributes(safe_G, D_EDGE_VALUE, D_EDGE_W) + edge_weight = D_EDGE_W + if node_weight is None: + nx.set_node_attributes(safe_G, D_NODE_VALUE, D_NODE_W) + node_weight = D_NODE_W + else: + safe_G = G + + # Second sanity check + # The values of node_weight MUST BE int. + # I cannot see any room for duck typing without incurring serious + # danger of subtle bugs. + all_n_attr = nx.get_node_attributes(safe_G, node_weight).values() + for x in all_n_attr: + if not isinstance(x, int): + raise TypeError( + "lukes_partitioning needs integer " + f"values for node_weight ({node_weight})" + ) + + # SUBROUTINES ----------------------- + # these functions are defined here for two reasons: + # - brevity: we can leverage global "safe_G" + # - caching: signatures are hashable + + @not_implemented_for("undirected") + # this is intended to be called only on t_G + def _leaves(gr): + for x in gr.nodes: + if not nx.descendants(gr, x): + yield x + + @not_implemented_for("undirected") + def _a_parent_of_leaves_only(gr): + tleaves = set(_leaves(gr)) + for n in set(gr.nodes) - tleaves: + if all(x in tleaves for x in nx.descendants(gr, n)): + return n + + @lru_cache(CLUSTER_EVAL_CACHE_SIZE) + def _value_of_cluster(cluster): + valid_edges = [e for e in safe_G.edges if e[0] in cluster and e[1] in cluster] + return sum(safe_G.edges[e][edge_weight] for e in valid_edges) + + def _value_of_partition(partition): + return sum(_value_of_cluster(frozenset(c)) for c in partition) + + @lru_cache(CLUSTER_EVAL_CACHE_SIZE) + def _weight_of_cluster(cluster): + return sum(safe_G.nodes[n][node_weight] for n in cluster) + + def _pivot(partition, node): + ccx = [c for c in partition if node in c] + assert len(ccx) == 1 + return ccx[0] + + def _concatenate_or_merge(partition_1, partition_2, x, i, ref_weight): + ccx = _pivot(partition_1, x) + cci = _pivot(partition_2, i) + merged_xi = ccx.union(cci) + + # We first check if we can do the merge. + # If so, we do the actual calculations, otherwise we concatenate + if _weight_of_cluster(frozenset(merged_xi)) <= ref_weight: + cp1 = list(filter(lambda x: x != ccx, partition_1)) + cp2 = list(filter(lambda x: x != cci, partition_2)) + + option_2 = [merged_xi] + cp1 + cp2 + return option_2, _value_of_partition(option_2) + else: + option_1 = partition_1 + partition_2 + return option_1, _value_of_partition(option_1) + + # INITIALIZATION ----------------------- + leaves = set(_leaves(t_G)) + for lv in leaves: + t_G.nodes[lv][PKEY] = {} + slot = safe_G.nodes[lv][node_weight] + t_G.nodes[lv][PKEY][slot] = [{lv}] + t_G.nodes[lv][PKEY][0] = [{lv}] + + for inner in [x for x in t_G.nodes if x not in leaves]: + t_G.nodes[inner][PKEY] = {} + slot = safe_G.nodes[inner][node_weight] + t_G.nodes[inner][PKEY][slot] = [{inner}] + nx._clear_cache(t_G) + + # CORE ALGORITHM ----------------------- + while True: + x_node = _a_parent_of_leaves_only(t_G) + weight_of_x = safe_G.nodes[x_node][node_weight] + best_value = 0 + best_partition = None + bp_buffer = {} + x_descendants = nx.descendants(t_G, x_node) + for i_node in x_descendants: + for j in range(weight_of_x, max_size + 1): + for a, b in _split_n_from(j, weight_of_x): + if ( + a not in t_G.nodes[x_node][PKEY] + or b not in t_G.nodes[i_node][PKEY] + ): + # it's not possible to form this particular weight sum + continue + + part1 = t_G.nodes[x_node][PKEY][a] + part2 = t_G.nodes[i_node][PKEY][b] + part, value = _concatenate_or_merge(part1, part2, x_node, i_node, j) + + if j not in bp_buffer or bp_buffer[j][1] < value: + # we annotate in the buffer the best partition for j + bp_buffer[j] = part, value + + # we also keep track of the overall best partition + if best_value <= value: + best_value = value + best_partition = part + + # as illustrated in Lukes, once we finished a child, we can + # discharge the partitions we found into the graph + # (the key phrase is make all x == x') + # so that they are used by the subsequent children + for w, (best_part_for_vl, vl) in bp_buffer.items(): + t_G.nodes[x_node][PKEY][w] = best_part_for_vl + bp_buffer.clear() + + # the absolute best partition for this node + # across all weights has to be stored at 0 + t_G.nodes[x_node][PKEY][0] = best_partition + t_G.remove_nodes_from(x_descendants) + + if x_node == root: + # the 0-labeled partition of root + # is the optimal one for the whole tree + return t_G.nodes[root][PKEY][0] diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/community/modularity_max.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/modularity_max.py new file mode 100644 index 0000000..f465e01 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/modularity_max.py @@ -0,0 +1,451 @@ +"""Functions for detecting communities based on modularity.""" + +from collections import defaultdict + +import networkx as nx +from networkx.algorithms.community.quality import modularity +from networkx.utils import not_implemented_for +from networkx.utils.mapped_queue import MappedQueue + +__all__ = [ + "greedy_modularity_communities", + "naive_greedy_modularity_communities", +] + + +def _greedy_modularity_communities_generator(G, weight=None, resolution=1): + r"""Yield community partitions of G and the modularity change at each step. + + This function performs Clauset-Newman-Moore greedy modularity maximization [2]_ + At each step of the process it yields the change in modularity that will occur in + the next step followed by yielding the new community partition after that step. + + Greedy modularity maximization begins with each node in its own community + and repeatedly joins the pair of communities that lead to the largest + modularity until one community contains all nodes (the partition has one set). + + This function maximizes the generalized modularity, where `resolution` + is the resolution parameter, often expressed as $\gamma$. + See :func:`~networkx.algorithms.community.quality.modularity`. + + Parameters + ---------- + G : NetworkX graph + + weight : string or None, optional (default=None) + The name of an edge attribute that holds the numerical value used + as a weight. If None, then each edge has weight 1. + The degree is the sum of the edge weights adjacent to the node. + + resolution : float (default=1) + If resolution is less than 1, modularity favors larger communities. + Greater than 1 favors smaller communities. + + Yields + ------ + Alternating yield statements produce the following two objects: + + communities: dict_values + A dict_values of frozensets of nodes, one for each community. + This represents a partition of the nodes of the graph into communities. + The first yield is the partition with each node in its own community. + + dq: float + The change in modularity when merging the next two communities + that leads to the largest modularity. + + See Also + -------- + modularity + + References + ---------- + .. [1] Newman, M. E. J. "Networks: An Introduction", page 224 + Oxford University Press 2011. + .. [2] Clauset, A., Newman, M. E., & Moore, C. + "Finding community structure in very large networks." + Physical Review E 70(6), 2004. + .. [3] Reichardt and Bornholdt "Statistical Mechanics of Community + Detection" Phys. Rev. E74, 2006. + .. [4] Newman, M. E. J."Analysis of weighted networks" + Physical Review E 70(5 Pt 2):056131, 2004. + """ + directed = G.is_directed() + N = G.number_of_nodes() + + # Count edges (or the sum of edge-weights for weighted graphs) + m = G.size(weight) + q0 = 1 / m + + # Calculate degrees (notation from the papers) + # a : the fraction of (weighted) out-degree for each node + # b : the fraction of (weighted) in-degree for each node + if directed: + a = {node: deg_out * q0 for node, deg_out in G.out_degree(weight=weight)} + b = {node: deg_in * q0 for node, deg_in in G.in_degree(weight=weight)} + else: + a = b = {node: deg * q0 * 0.5 for node, deg in G.degree(weight=weight)} + + # this preliminary step collects the edge weights for each node pair + # It handles multigraph and digraph and works fine for graph. + dq_dict = defaultdict(lambda: defaultdict(float)) + for u, v, wt in G.edges(data=weight, default=1): + if u == v: + continue + dq_dict[u][v] += wt + dq_dict[v][u] += wt + + # now scale and subtract the expected edge-weights term + for u, nbrdict in dq_dict.items(): + for v, wt in nbrdict.items(): + dq_dict[u][v] = q0 * wt - resolution * (a[u] * b[v] + b[u] * a[v]) + + # Use -dq to get a max_heap instead of a min_heap + # dq_heap holds a heap for each node's neighbors + dq_heap = {u: MappedQueue({(u, v): -dq for v, dq in dq_dict[u].items()}) for u in G} + # H -> all_dq_heap holds a heap with the best items for each node + H = MappedQueue([dq_heap[n].heap[0] for n in G if len(dq_heap[n]) > 0]) + + # Initialize single-node communities + communities = {n: frozenset([n]) for n in G} + yield communities.values() + + # Merge the two communities that lead to the largest modularity + while len(H) > 1: + # Find best merge + # Remove from heap of row maxes + # Ties will be broken by choosing the pair with lowest min community id + try: + negdq, u, v = H.pop() + except IndexError: + break + dq = -negdq + yield dq + # Remove best merge from row u heap + dq_heap[u].pop() + # Push new row max onto H + if len(dq_heap[u]) > 0: + H.push(dq_heap[u].heap[0]) + # If this element was also at the root of row v, we need to remove the + # duplicate entry from H + if dq_heap[v].heap[0] == (v, u): + H.remove((v, u)) + # Remove best merge from row v heap + dq_heap[v].remove((v, u)) + # Push new row max onto H + if len(dq_heap[v]) > 0: + H.push(dq_heap[v].heap[0]) + else: + # Duplicate wasn't in H, just remove from row v heap + dq_heap[v].remove((v, u)) + + # Perform merge + communities[v] = frozenset(communities[u] | communities[v]) + del communities[u] + + # Get neighbor communities connected to the merged communities + u_nbrs = set(dq_dict[u]) + v_nbrs = set(dq_dict[v]) + all_nbrs = (u_nbrs | v_nbrs) - {u, v} + both_nbrs = u_nbrs & v_nbrs + # Update dq for merge of u into v + for w in all_nbrs: + # Calculate new dq value + if w in both_nbrs: + dq_vw = dq_dict[v][w] + dq_dict[u][w] + elif w in v_nbrs: + dq_vw = dq_dict[v][w] - resolution * (a[u] * b[w] + a[w] * b[u]) + else: # w in u_nbrs + dq_vw = dq_dict[u][w] - resolution * (a[v] * b[w] + a[w] * b[v]) + # Update rows v and w + for row, col in [(v, w), (w, v)]: + dq_heap_row = dq_heap[row] + # Update dict for v,w only (u is removed below) + dq_dict[row][col] = dq_vw + # Save old max of per-row heap + if len(dq_heap_row) > 0: + d_oldmax = dq_heap_row.heap[0] + else: + d_oldmax = None + # Add/update heaps + d = (row, col) + d_negdq = -dq_vw + # Save old value for finding heap index + if w in v_nbrs: + # Update existing element in per-row heap + dq_heap_row.update(d, d, priority=d_negdq) + else: + # We're creating a new nonzero element, add to heap + dq_heap_row.push(d, priority=d_negdq) + # Update heap of row maxes if necessary + if d_oldmax is None: + # No entries previously in this row, push new max + H.push(d, priority=d_negdq) + else: + # We've updated an entry in this row, has the max changed? + row_max = dq_heap_row.heap[0] + if d_oldmax != row_max or d_oldmax.priority != row_max.priority: + H.update(d_oldmax, row_max) + + # Remove row/col u from dq_dict matrix + for w in dq_dict[u]: + # Remove from dict + dq_old = dq_dict[w][u] + del dq_dict[w][u] + # Remove from heaps if we haven't already + if w != v: + # Remove both row and column + for row, col in [(w, u), (u, w)]: + dq_heap_row = dq_heap[row] + # Check if replaced dq is row max + d_old = (row, col) + if dq_heap_row.heap[0] == d_old: + # Update per-row heap and heap of row maxes + dq_heap_row.remove(d_old) + H.remove(d_old) + # Update row max + if len(dq_heap_row) > 0: + H.push(dq_heap_row.heap[0]) + else: + # Only update per-row heap + dq_heap_row.remove(d_old) + + del dq_dict[u] + # Mark row u as deleted, but keep placeholder + dq_heap[u] = MappedQueue() + # Merge u into v and update a + a[v] += a[u] + a[u] = 0 + if directed: + b[v] += b[u] + b[u] = 0 + + yield communities.values() + + +@nx._dispatchable(edge_attrs="weight") +def greedy_modularity_communities( + G, + weight=None, + resolution=1, + cutoff=1, + best_n=None, +): + r"""Find communities in G using greedy modularity maximization. + + This function uses Clauset-Newman-Moore greedy modularity maximization [2]_ + to find the community partition with the largest modularity. + + Greedy modularity maximization begins with each node in its own community + and repeatedly joins the pair of communities that lead to the largest + modularity until no further increase in modularity is possible (a maximum). + Two keyword arguments adjust the stopping condition. `cutoff` is a lower + limit on the number of communities so you can stop the process before + reaching a maximum (used to save computation time). `best_n` is an upper + limit on the number of communities so you can make the process continue + until at most n communities remain even if the maximum modularity occurs + for more. To obtain exactly n communities, set both `cutoff` and `best_n` to n. + + This function maximizes the generalized modularity, where `resolution` + is the resolution parameter, often expressed as $\gamma$. + See :func:`~networkx.algorithms.community.quality.modularity`. + + Parameters + ---------- + G : NetworkX graph + + weight : string or None, optional (default=None) + The name of an edge attribute that holds the numerical value used + as a weight. If None, then each edge has weight 1. + The degree is the sum of the edge weights adjacent to the node. + + resolution : float, optional (default=1) + If resolution is less than 1, modularity favors larger communities. + Greater than 1 favors smaller communities. + + cutoff : int, optional (default=1) + A minimum number of communities below which the merging process stops. + The process stops at this number of communities even if modularity + is not maximized. The goal is to let the user stop the process early. + The process stops before the cutoff if it finds a maximum of modularity. + + best_n : int or None, optional (default=None) + A maximum number of communities above which the merging process will + not stop. This forces community merging to continue after modularity + starts to decrease until `best_n` communities remain. + If ``None``, don't force it to continue beyond a maximum. + + Raises + ------ + ValueError : If the `cutoff` or `best_n` value is not in the range + ``[1, G.number_of_nodes()]``, or if `best_n` < `cutoff`. + + Returns + ------- + communities: list + A list of frozensets of nodes, one for each community. + Sorted by length with largest communities first. + + Examples + -------- + >>> G = nx.karate_club_graph() + >>> c = nx.community.greedy_modularity_communities(G) + >>> sorted(c[0]) + [8, 14, 15, 18, 20, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33] + + See Also + -------- + modularity + + References + ---------- + .. [1] Newman, M. E. J. "Networks: An Introduction", page 224 + Oxford University Press 2011. + .. [2] Clauset, A., Newman, M. E., & Moore, C. + "Finding community structure in very large networks." + Physical Review E 70(6), 2004. + .. [3] Reichardt and Bornholdt "Statistical Mechanics of Community + Detection" Phys. Rev. E74, 2006. + .. [4] Newman, M. E. J."Analysis of weighted networks" + Physical Review E 70(5 Pt 2):056131, 2004. + """ + if not G.size(): + return [{n} for n in G] + + if (cutoff < 1) or (cutoff > G.number_of_nodes()): + raise ValueError(f"cutoff must be between 1 and {len(G)}. Got {cutoff}.") + if best_n is not None: + if (best_n < 1) or (best_n > G.number_of_nodes()): + raise ValueError(f"best_n must be between 1 and {len(G)}. Got {best_n}.") + if best_n < cutoff: + raise ValueError(f"Must have best_n >= cutoff. Got {best_n} < {cutoff}") + if best_n == 1: + return [set(G)] + else: + best_n = G.number_of_nodes() + + # retrieve generator object to construct output + community_gen = _greedy_modularity_communities_generator( + G, weight=weight, resolution=resolution + ) + + # construct the first best community + communities = next(community_gen) + + # continue merging communities until one of the breaking criteria is satisfied + while len(communities) > cutoff: + try: + dq = next(community_gen) + # StopIteration occurs when communities are the connected components + except StopIteration: + communities = sorted(communities, key=len, reverse=True) + # if best_n requires more merging, merge big sets for highest modularity + while len(communities) > best_n: + comm1, comm2, *rest = communities + communities = [comm1 ^ comm2] + communities.extend(rest) + return communities + + # keep going unless max_mod is reached or best_n says to merge more + if dq < 0 and len(communities) <= best_n: + break + communities = next(community_gen) + + return sorted(communities, key=len, reverse=True) + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatchable(edge_attrs="weight") +def naive_greedy_modularity_communities(G, resolution=1, weight=None): + r"""Find communities in G using greedy modularity maximization. + + This implementation is O(n^4), much slower than alternatives, but it is + provided as an easy-to-understand reference implementation. + + Greedy modularity maximization begins with each node in its own community + and joins the pair of communities that most increases modularity until no + such pair exists. + + This function maximizes the generalized modularity, where `resolution` + is the resolution parameter, often expressed as $\gamma$. + See :func:`~networkx.algorithms.community.quality.modularity`. + + Parameters + ---------- + G : NetworkX graph + Graph must be simple and undirected. + + resolution : float (default=1) + If resolution is less than 1, modularity favors larger communities. + Greater than 1 favors smaller communities. + + weight : string or None, optional (default=None) + The name of an edge attribute that holds the numerical value used + as a weight. If None, then each edge has weight 1. + The degree is the sum of the edge weights adjacent to the node. + + Returns + ------- + list + A list of sets of nodes, one for each community. + Sorted by length with largest communities first. + + Examples + -------- + >>> G = nx.karate_club_graph() + >>> c = nx.community.naive_greedy_modularity_communities(G) + >>> sorted(c[0]) + [8, 14, 15, 18, 20, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33] + + See Also + -------- + greedy_modularity_communities + modularity + """ + # First create one community for each node + communities = [frozenset([u]) for u in G.nodes()] + # Track merges + merges = [] + # Greedily merge communities until no improvement is possible + old_modularity = None + new_modularity = modularity(G, communities, resolution=resolution, weight=weight) + while old_modularity is None or new_modularity > old_modularity: + # Save modularity for comparison + old_modularity = new_modularity + # Find best pair to merge + trial_communities = list(communities) + to_merge = None + for i, u in enumerate(communities): + for j, v in enumerate(communities): + # Skip i==j and empty communities + if j <= i or len(u) == 0 or len(v) == 0: + continue + # Merge communities u and v + trial_communities[j] = u | v + trial_communities[i] = frozenset([]) + trial_modularity = modularity( + G, trial_communities, resolution=resolution, weight=weight + ) + if trial_modularity >= new_modularity: + # Check if strictly better or tie + if trial_modularity > new_modularity: + # Found new best, save modularity and group indexes + new_modularity = trial_modularity + to_merge = (i, j, new_modularity - old_modularity) + elif to_merge and min(i, j) < min(to_merge[0], to_merge[1]): + # Break ties by choosing pair with lowest min id + new_modularity = trial_modularity + to_merge = (i, j, new_modularity - old_modularity) + # Un-merge + trial_communities[i] = u + trial_communities[j] = v + if to_merge is not None: + # If the best merge improves modularity, use it + merges.append(to_merge) + i, j, dq = to_merge + u, v = communities[i], communities[j] + communities[j] = u | v + communities[i] = frozenset([]) + # Remove empty communities and sort + return sorted((c for c in communities if len(c) > 0), key=len, reverse=True) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/community/quality.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/quality.py new file mode 100644 index 0000000..4b4dcbb --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/quality.py @@ -0,0 +1,347 @@ +"""Functions for measuring the quality of a partition (into +communities). + +""" + +from itertools import combinations + +import networkx as nx +from networkx import NetworkXError +from networkx.algorithms.community.community_utils import is_partition +from networkx.utils.decorators import argmap + +__all__ = ["modularity", "partition_quality"] + + +class NotAPartition(NetworkXError): + """Raised if a given collection is not a partition.""" + + def __init__(self, G, collection): + msg = f"{collection} is not a valid partition of the graph {G}" + super().__init__(msg) + + +def _require_partition(G, partition): + """Decorator to check that a valid partition is input to a function + + Raises :exc:`networkx.NetworkXError` if the partition is not valid. + + This decorator should be used on functions whose first two arguments + are a graph and a partition of the nodes of that graph (in that + order):: + + >>> @require_partition + ... def foo(G, partition): + ... print("partition is valid!") + ... + >>> G = nx.complete_graph(5) + >>> partition = [{0, 1}, {2, 3}, {4}] + >>> foo(G, partition) + partition is valid! + >>> partition = [{0}, {2, 3}, {4}] + >>> foo(G, partition) + Traceback (most recent call last): + ... + networkx.exception.NetworkXError: `partition` is not a valid partition of the nodes of G + >>> partition = [{0, 1}, {1, 2, 3}, {4}] + >>> foo(G, partition) + Traceback (most recent call last): + ... + networkx.exception.NetworkXError: `partition` is not a valid partition of the nodes of G + + """ + if is_partition(G, partition): + return G, partition + raise nx.NetworkXError("`partition` is not a valid partition of the nodes of G") + + +require_partition = argmap(_require_partition, (0, 1)) + + +@nx._dispatchable +def intra_community_edges(G, partition): + """Returns the number of intra-community edges for a partition of `G`. + + Parameters + ---------- + G : NetworkX graph. + + partition : iterable of sets of nodes + This must be a partition of the nodes of `G`. + + The "intra-community edges" are those edges joining a pair of nodes + in the same block of the partition. + + """ + return sum(G.subgraph(block).size() for block in partition) + + +@nx._dispatchable +def inter_community_edges(G, partition): + """Returns the number of inter-community edges for a partition of `G`. + according to the given + partition of the nodes of `G`. + + Parameters + ---------- + G : NetworkX graph. + + partition : iterable of sets of nodes + This must be a partition of the nodes of `G`. + + The *inter-community edges* are those edges joining a pair of nodes + in different blocks of the partition. + + Implementation note: this function creates an intermediate graph + that may require the same amount of memory as that of `G`. + + """ + # Alternate implementation that does not require constructing a new + # graph object (but does require constructing an affiliation + # dictionary): + # + # aff = dict(chain.from_iterable(((v, block) for v in block) + # for block in partition)) + # return sum(1 for u, v in G.edges() if aff[u] != aff[v]) + # + MG = nx.MultiDiGraph if G.is_directed() else nx.MultiGraph + return nx.quotient_graph(G, partition, create_using=MG).size() + + +@nx._dispatchable +def inter_community_non_edges(G, partition): + """Returns the number of inter-community non-edges according to the + given partition of the nodes of `G`. + + Parameters + ---------- + G : NetworkX graph. + + partition : iterable of sets of nodes + This must be a partition of the nodes of `G`. + + A *non-edge* is a pair of nodes (undirected if `G` is undirected) + that are not adjacent in `G`. The *inter-community non-edges* are + those non-edges on a pair of nodes in different blocks of the + partition. + + Implementation note: this function creates two intermediate graphs, + which may require up to twice the amount of memory as required to + store `G`. + + """ + # Alternate implementation that does not require constructing two + # new graph objects (but does require constructing an affiliation + # dictionary): + # + # aff = dict(chain.from_iterable(((v, block) for v in block) + # for block in partition)) + # return sum(1 for u, v in nx.non_edges(G) if aff[u] != aff[v]) + # + return inter_community_edges(nx.complement(G), partition) + + +@nx._dispatchable(edge_attrs="weight") +def modularity(G, communities, weight="weight", resolution=1): + r"""Returns the modularity of the given partition of the graph. + + Modularity is defined in [1]_ as + + .. math:: + Q = \frac{1}{2m} \sum_{ij} \left( A_{ij} - \gamma\frac{k_ik_j}{2m}\right) + \delta(c_i,c_j) + + where $m$ is the number of edges (or sum of all edge weights as in [5]_), + $A$ is the adjacency matrix of `G`, $k_i$ is the (weighted) degree of $i$, + $\gamma$ is the resolution parameter, and $\delta(c_i, c_j)$ is 1 if $i$ and + $j$ are in the same community else 0. + + According to [2]_ (and verified by some algebra) this can be reduced to + + .. math:: + Q = \sum_{c=1}^{n} + \left[ \frac{L_c}{m} - \gamma\left( \frac{k_c}{2m} \right) ^2 \right] + + where the sum iterates over all communities $c$, $m$ is the number of edges, + $L_c$ is the number of intra-community links for community $c$, + $k_c$ is the sum of degrees of the nodes in community $c$, + and $\gamma$ is the resolution parameter. + + The resolution parameter sets an arbitrary tradeoff between intra-group + edges and inter-group edges. More complex grouping patterns can be + discovered by analyzing the same network with multiple values of gamma + and then combining the results [3]_. That said, it is very common to + simply use gamma=1. More on the choice of gamma is in [4]_. + + The second formula is the one actually used in calculation of the modularity. + For directed graphs the second formula replaces $k_c$ with $k^{in}_c k^{out}_c$. + + Parameters + ---------- + G : NetworkX Graph + + communities : list or iterable of set of nodes + These node sets must represent a partition of G's nodes. + + weight : string or None, optional (default="weight") + The edge attribute that holds the numerical value used + as a weight. If None or an edge does not have that attribute, + then that edge has weight 1. + + resolution : float (default=1) + If resolution is less than 1, modularity favors larger communities. + Greater than 1 favors smaller communities. + + Returns + ------- + Q : float + The modularity of the partition. + + Raises + ------ + NotAPartition + If `communities` is not a partition of the nodes of `G`. + + Examples + -------- + >>> G = nx.barbell_graph(3, 0) + >>> nx.community.modularity(G, [{0, 1, 2}, {3, 4, 5}]) + 0.35714285714285715 + >>> nx.community.modularity(G, nx.community.label_propagation_communities(G)) + 0.35714285714285715 + + References + ---------- + .. [1] M. E. J. Newman "Networks: An Introduction", page 224. + Oxford University Press, 2011. + .. [2] Clauset, Aaron, Mark EJ Newman, and Cristopher Moore. + "Finding community structure in very large networks." + Phys. Rev. E 70.6 (2004). + .. [3] Reichardt and Bornholdt "Statistical Mechanics of Community Detection" + Phys. Rev. E 74, 016110, 2006. https://doi.org/10.1103/PhysRevE.74.016110 + .. [4] M. E. J. Newman, "Equivalence between modularity optimization and + maximum likelihood methods for community detection" + Phys. Rev. E 94, 052315, 2016. https://doi.org/10.1103/PhysRevE.94.052315 + .. [5] Blondel, V.D. et al. "Fast unfolding of communities in large + networks" J. Stat. Mech 10008, 1-12 (2008). + https://doi.org/10.1088/1742-5468/2008/10/P10008 + """ + if not isinstance(communities, list): + communities = list(communities) + if not is_partition(G, communities): + raise NotAPartition(G, communities) + + directed = G.is_directed() + if directed: + out_degree = dict(G.out_degree(weight=weight)) + in_degree = dict(G.in_degree(weight=weight)) + m = sum(out_degree.values()) + norm = 1 / m**2 + else: + out_degree = in_degree = dict(G.degree(weight=weight)) + deg_sum = sum(out_degree.values()) + m = deg_sum / 2 + norm = 1 / deg_sum**2 + + def community_contribution(community): + comm = set(community) + L_c = sum(wt for u, v, wt in G.edges(comm, data=weight, default=1) if v in comm) + + out_degree_sum = sum(out_degree[u] for u in comm) + in_degree_sum = sum(in_degree[u] for u in comm) if directed else out_degree_sum + + return L_c / m - resolution * out_degree_sum * in_degree_sum * norm + + return sum(map(community_contribution, communities)) + + +@require_partition +@nx._dispatchable +def partition_quality(G, partition): + """Returns the coverage and performance of a partition of G. + + The *coverage* of a partition is the ratio of the number of + intra-community edges to the total number of edges in the graph. + + The *performance* of a partition is the number of + intra-community edges plus inter-community non-edges divided by the total + number of potential edges. + + This algorithm has complexity $O(C^2 + L)$ where C is the number of + communities and L is the number of links. + + Parameters + ---------- + G : NetworkX graph + + partition : sequence + Partition of the nodes of `G`, represented as a sequence of + sets of nodes (blocks). Each block of the partition represents a + community. + + Returns + ------- + (float, float) + The (coverage, performance) tuple of the partition, as defined above. + + Raises + ------ + NetworkXError + If `partition` is not a valid partition of the nodes of `G`. + + Notes + ----- + If `G` is a multigraph; + - for coverage, the multiplicity of edges is counted + - for performance, the result is -1 (total number of possible edges is not defined) + + References + ---------- + .. [1] Santo Fortunato. + "Community Detection in Graphs". + *Physical Reports*, Volume 486, Issue 3--5 pp. 75--174 + + """ + + node_community = {} + for i, community in enumerate(partition): + for node in community: + node_community[node] = i + + # `performance` is not defined for multigraphs + if not G.is_multigraph(): + # Iterate over the communities, quadratic, to calculate `possible_inter_community_edges` + possible_inter_community_edges = sum( + len(p1) * len(p2) for p1, p2 in combinations(partition, 2) + ) + + if G.is_directed(): + possible_inter_community_edges *= 2 + else: + possible_inter_community_edges = 0 + + # Compute the number of edges in the complete graph -- `n` nodes, + # directed or undirected, depending on `G` + n = len(G) + total_pairs = n * (n - 1) + if not G.is_directed(): + total_pairs //= 2 + + intra_community_edges = 0 + inter_community_non_edges = possible_inter_community_edges + + # Iterate over the links to count `intra_community_edges` and `inter_community_non_edges` + for e in G.edges(): + if node_community[e[0]] == node_community[e[1]]: + intra_community_edges += 1 + else: + inter_community_non_edges -= 1 + + coverage = intra_community_edges / len(G.edges) + + if G.is_multigraph(): + performance = -1.0 + else: + performance = (intra_community_edges + inter_community_non_edges) / total_pairs + + return coverage, performance diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/__init__.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..71639e7 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/__pycache__/test_asyn_fluid.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/__pycache__/test_asyn_fluid.cpython-312.pyc new file mode 100644 index 0000000..e71797d Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/__pycache__/test_asyn_fluid.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/__pycache__/test_centrality.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/__pycache__/test_centrality.cpython-312.pyc new file mode 100644 index 0000000..47ffd00 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/__pycache__/test_centrality.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/__pycache__/test_divisive.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/__pycache__/test_divisive.cpython-312.pyc new file mode 100644 index 0000000..c7caca4 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/__pycache__/test_divisive.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/__pycache__/test_kclique.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/__pycache__/test_kclique.cpython-312.pyc new file mode 100644 index 0000000..9c5fda4 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/__pycache__/test_kclique.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/__pycache__/test_kernighan_lin.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/__pycache__/test_kernighan_lin.cpython-312.pyc new file mode 100644 index 0000000..36eaaf9 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/__pycache__/test_kernighan_lin.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/__pycache__/test_label_propagation.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/__pycache__/test_label_propagation.cpython-312.pyc new file mode 100644 index 0000000..48ed8e6 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/__pycache__/test_label_propagation.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/__pycache__/test_leiden.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/__pycache__/test_leiden.cpython-312.pyc new file mode 100644 index 0000000..aec189c Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/__pycache__/test_leiden.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/__pycache__/test_local.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/__pycache__/test_local.cpython-312.pyc new file mode 100644 index 0000000..ee8c26b Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/__pycache__/test_local.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/__pycache__/test_louvain.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/__pycache__/test_louvain.cpython-312.pyc new file mode 100644 index 0000000..7ab8ea8 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/__pycache__/test_louvain.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/__pycache__/test_lukes.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/__pycache__/test_lukes.cpython-312.pyc new file mode 100644 index 0000000..be2a2fc Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/__pycache__/test_lukes.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/__pycache__/test_modularity_max.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/__pycache__/test_modularity_max.cpython-312.pyc new file mode 100644 index 0000000..ae18219 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/__pycache__/test_modularity_max.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/__pycache__/test_quality.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/__pycache__/test_quality.cpython-312.pyc new file mode 100644 index 0000000..65c9ece Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/__pycache__/test_quality.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/__pycache__/test_utils.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/__pycache__/test_utils.cpython-312.pyc new file mode 100644 index 0000000..f304f2c Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/__pycache__/test_utils.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/test_asyn_fluid.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/test_asyn_fluid.py new file mode 100644 index 0000000..6c023be --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/test_asyn_fluid.py @@ -0,0 +1,136 @@ +import pytest + +import networkx as nx +from networkx import Graph, NetworkXError +from networkx.algorithms.community import asyn_fluidc + + +@pytest.mark.parametrize("graph_constructor", (nx.DiGraph, nx.MultiGraph)) +def test_raises_on_directed_and_multigraphs(graph_constructor): + G = graph_constructor([(0, 1), (1, 2)]) + with pytest.raises(nx.NetworkXNotImplemented): + nx.community.asyn_fluidc(G, 1) + + +def test_exceptions(): + test = Graph() + test.add_node("a") + pytest.raises(NetworkXError, asyn_fluidc, test, "hi") + pytest.raises(NetworkXError, asyn_fluidc, test, -1) + pytest.raises(NetworkXError, asyn_fluidc, test, 3) + test.add_node("b") + pytest.raises(NetworkXError, asyn_fluidc, test, 1) + + +def test_single_node(): + test = Graph() + + test.add_node("a") + + # ground truth + ground_truth = {frozenset(["a"])} + + communities = asyn_fluidc(test, 1) + result = {frozenset(c) for c in communities} + assert result == ground_truth + + +def test_two_nodes(): + test = Graph() + + test.add_edge("a", "b") + + # ground truth + ground_truth = {frozenset(["a"]), frozenset(["b"])} + + communities = asyn_fluidc(test, 2) + result = {frozenset(c) for c in communities} + assert result == ground_truth + + +def test_two_clique_communities(): + test = Graph() + + # c1 + test.add_edge("a", "b") + test.add_edge("a", "c") + test.add_edge("b", "c") + + # connection + test.add_edge("c", "d") + + # c2 + test.add_edge("d", "e") + test.add_edge("d", "f") + test.add_edge("f", "e") + + # ground truth + ground_truth = {frozenset(["a", "c", "b"]), frozenset(["e", "d", "f"])} + + communities = asyn_fluidc(test, 2, seed=7) + result = {frozenset(c) for c in communities} + assert result == ground_truth + + +def test_five_clique_ring(): + test = Graph() + + # c1 + test.add_edge("1a", "1b") + test.add_edge("1a", "1c") + test.add_edge("1a", "1d") + test.add_edge("1b", "1c") + test.add_edge("1b", "1d") + test.add_edge("1c", "1d") + + # c2 + test.add_edge("2a", "2b") + test.add_edge("2a", "2c") + test.add_edge("2a", "2d") + test.add_edge("2b", "2c") + test.add_edge("2b", "2d") + test.add_edge("2c", "2d") + + # c3 + test.add_edge("3a", "3b") + test.add_edge("3a", "3c") + test.add_edge("3a", "3d") + test.add_edge("3b", "3c") + test.add_edge("3b", "3d") + test.add_edge("3c", "3d") + + # c4 + test.add_edge("4a", "4b") + test.add_edge("4a", "4c") + test.add_edge("4a", "4d") + test.add_edge("4b", "4c") + test.add_edge("4b", "4d") + test.add_edge("4c", "4d") + + # c5 + test.add_edge("5a", "5b") + test.add_edge("5a", "5c") + test.add_edge("5a", "5d") + test.add_edge("5b", "5c") + test.add_edge("5b", "5d") + test.add_edge("5c", "5d") + + # connections + test.add_edge("1a", "2c") + test.add_edge("2a", "3c") + test.add_edge("3a", "4c") + test.add_edge("4a", "5c") + test.add_edge("5a", "1c") + + # ground truth + ground_truth = { + frozenset(["1a", "1b", "1c", "1d"]), + frozenset(["2a", "2b", "2c", "2d"]), + frozenset(["3a", "3b", "3c", "3d"]), + frozenset(["4a", "4b", "4c", "4d"]), + frozenset(["5a", "5b", "5c", "5d"]), + } + + communities = asyn_fluidc(test, 5, seed=9) + result = {frozenset(c) for c in communities} + assert result == ground_truth diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/test_centrality.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/test_centrality.py new file mode 100644 index 0000000..1a8982f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/test_centrality.py @@ -0,0 +1,85 @@ +"""Unit tests for the :mod:`networkx.algorithms.community.centrality` +module. + +""" + +from operator import itemgetter + +import networkx as nx + + +def set_of_sets(iterable): + return set(map(frozenset, iterable)) + + +def validate_communities(result, expected): + assert set_of_sets(result) == set_of_sets(expected) + + +def validate_possible_communities(result, *expected): + assert any(set_of_sets(result) == set_of_sets(p) for p in expected) + + +class TestGirvanNewman: + """Unit tests for the + :func:`networkx.algorithms.community.centrality.girvan_newman` + function. + + """ + + def test_no_edges(self): + G = nx.empty_graph(3) + communities = list(nx.community.girvan_newman(G)) + assert len(communities) == 1 + validate_communities(communities[0], [{0}, {1}, {2}]) + + def test_undirected(self): + # Start with the graph .-.-.-. + G = nx.path_graph(4) + communities = list(nx.community.girvan_newman(G)) + assert len(communities) == 3 + # After one removal, we get the graph .-. .-. + validate_communities(communities[0], [{0, 1}, {2, 3}]) + # After the next, we get the graph .-. . ., but there are two + # symmetric possible versions. + validate_possible_communities( + communities[1], [{0}, {1}, {2, 3}], [{0, 1}, {2}, {3}] + ) + # After the last removal, we always get the empty graph. + validate_communities(communities[2], [{0}, {1}, {2}, {3}]) + + def test_directed(self): + G = nx.DiGraph(nx.path_graph(4)) + communities = list(nx.community.girvan_newman(G)) + assert len(communities) == 3 + validate_communities(communities[0], [{0, 1}, {2, 3}]) + validate_possible_communities( + communities[1], [{0}, {1}, {2, 3}], [{0, 1}, {2}, {3}] + ) + validate_communities(communities[2], [{0}, {1}, {2}, {3}]) + + def test_selfloops(self): + G = nx.path_graph(4) + G.add_edge(0, 0) + G.add_edge(2, 2) + communities = list(nx.community.girvan_newman(G)) + assert len(communities) == 3 + validate_communities(communities[0], [{0, 1}, {2, 3}]) + validate_possible_communities( + communities[1], [{0}, {1}, {2, 3}], [{0, 1}, {2}, {3}] + ) + validate_communities(communities[2], [{0}, {1}, {2}, {3}]) + + def test_most_valuable_edge(self): + G = nx.Graph() + G.add_weighted_edges_from([(0, 1, 3), (1, 2, 2), (2, 3, 1)]) + # Let the most valuable edge be the one with the highest weight. + + def heaviest(G): + return max(G.edges(data="weight"), key=itemgetter(2))[:2] + + communities = list(nx.community.girvan_newman(G, heaviest)) + assert len(communities) == 3 + validate_communities(communities[0], [{0}, {1, 2, 3}]) + validate_communities(communities[1], [{0}, {1}, {2, 3}]) + validate_communities(communities[2], [{0}, {1}, {2}, {3}]) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/test_divisive.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/test_divisive.py new file mode 100644 index 0000000..6331503 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/test_divisive.py @@ -0,0 +1,106 @@ +import pytest + +import networkx as nx + + +def test_edge_betweenness_partition(): + G = nx.barbell_graph(3, 0) + C = nx.community.edge_betweenness_partition(G, 2) + answer = [{0, 1, 2}, {3, 4, 5}] + assert len(C) == len(answer) + for s in answer: + assert s in C + + G = nx.barbell_graph(3, 1) + C = nx.community.edge_betweenness_partition(G, 3) + answer = [{0, 1, 2}, {4, 5, 6}, {3}] + assert len(C) == len(answer) + for s in answer: + assert s in C + + C = nx.community.edge_betweenness_partition(G, 7) + answer = [{n} for n in G] + assert len(C) == len(answer) + for s in answer: + assert s in C + + C = nx.community.edge_betweenness_partition(G, 1) + assert C == [set(G)] + + C = nx.community.edge_betweenness_partition(G, 1, weight="weight") + assert C == [set(G)] + + with pytest.raises(nx.NetworkXError): + nx.community.edge_betweenness_partition(G, 0) + + with pytest.raises(nx.NetworkXError): + nx.community.edge_betweenness_partition(G, -1) + + with pytest.raises(nx.NetworkXError): + nx.community.edge_betweenness_partition(G, 10) + + +def test_edge_current_flow_betweenness_partition(): + pytest.importorskip("scipy") + + G = nx.barbell_graph(3, 0) + C = nx.community.edge_current_flow_betweenness_partition(G, 2) + answer = [{0, 1, 2}, {3, 4, 5}] + assert len(C) == len(answer) + for s in answer: + assert s in C + + G = nx.barbell_graph(3, 1) + C = nx.community.edge_current_flow_betweenness_partition(G, 2) + answers = [[{0, 1, 2, 3}, {4, 5, 6}], [{0, 1, 2}, {3, 4, 5, 6}]] + assert len(C) == len(answers[0]) + assert any(all(s in answer for s in C) for answer in answers) + + C = nx.community.edge_current_flow_betweenness_partition(G, 3) + answer = [{0, 1, 2}, {4, 5, 6}, {3}] + assert len(C) == len(answer) + for s in answer: + assert s in C + + C = nx.community.edge_current_flow_betweenness_partition(G, 4) + answers = [[{1, 2}, {4, 5, 6}, {3}, {0}], [{0, 1, 2}, {5, 6}, {3}, {4}]] + assert len(C) == len(answers[0]) + assert any(all(s in answer for s in C) for answer in answers) + + C = nx.community.edge_current_flow_betweenness_partition(G, 5) + answer = [{1, 2}, {5, 6}, {3}, {0}, {4}] + assert len(C) == len(answer) + for s in answer: + assert s in C + + C = nx.community.edge_current_flow_betweenness_partition(G, 6) + answers = [[{2}, {5, 6}, {3}, {0}, {4}, {1}], [{1, 2}, {6}, {3}, {0}, {4}, {5}]] + assert len(C) == len(answers[0]) + assert any(all(s in answer for s in C) for answer in answers) + + C = nx.community.edge_current_flow_betweenness_partition(G, 7) + answer = [{n} for n in G] + assert len(C) == len(answer) + for s in answer: + assert s in C + + C = nx.community.edge_current_flow_betweenness_partition(G, 1) + assert C == [set(G)] + + C = nx.community.edge_current_flow_betweenness_partition(G, 1, weight="weight") + assert C == [set(G)] + + with pytest.raises(nx.NetworkXError): + nx.community.edge_current_flow_betweenness_partition(G, 0) + + with pytest.raises(nx.NetworkXError): + nx.community.edge_current_flow_betweenness_partition(G, -1) + + with pytest.raises(nx.NetworkXError): + nx.community.edge_current_flow_betweenness_partition(G, 10) + + N = 10 + G = nx.empty_graph(N) + for i in range(2, N - 1): + C = nx.community.edge_current_flow_betweenness_partition(G, i) + assert C == [{n} for n in G] diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/test_kclique.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/test_kclique.py new file mode 100644 index 0000000..aa0b7e8 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/test_kclique.py @@ -0,0 +1,91 @@ +from itertools import combinations + +import pytest + +import networkx as nx + + +def test_overlapping_K5(): + G = nx.Graph() + G.add_edges_from(combinations(range(5), 2)) # Add a five clique + G.add_edges_from(combinations(range(2, 7), 2)) # Add another five clique + c = list(nx.community.k_clique_communities(G, 4)) + assert c == [frozenset(range(7))] + c = set(nx.community.k_clique_communities(G, 5)) + assert c == {frozenset(range(5)), frozenset(range(2, 7))} + + +def test_isolated_K5(): + G = nx.Graph() + G.add_edges_from(combinations(range(5), 2)) # Add a five clique + G.add_edges_from(combinations(range(5, 10), 2)) # Add another five clique + c = set(nx.community.k_clique_communities(G, 5)) + assert c == {frozenset(range(5)), frozenset(range(5, 10))} + + +class TestZacharyKarateClub: + def setup_method(self): + self.G = nx.karate_club_graph() + + def _check_communities(self, k, expected): + communities = set(nx.community.k_clique_communities(self.G, k)) + assert communities == expected + + def test_k2(self): + # clique percolation with k=2 is just connected components + expected = {frozenset(self.G)} + self._check_communities(2, expected) + + def test_k3(self): + comm1 = [ + 0, + 1, + 2, + 3, + 7, + 8, + 12, + 13, + 14, + 15, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 26, + 27, + 28, + 29, + 30, + 31, + 32, + 33, + ] + comm2 = [0, 4, 5, 6, 10, 16] + comm3 = [24, 25, 31] + expected = {frozenset(comm1), frozenset(comm2), frozenset(comm3)} + self._check_communities(3, expected) + + def test_k4(self): + expected = { + frozenset([0, 1, 2, 3, 7, 13]), + frozenset([8, 32, 30, 33]), + frozenset([32, 33, 29, 23]), + } + self._check_communities(4, expected) + + def test_k5(self): + expected = {frozenset([0, 1, 2, 3, 7, 13])} + self._check_communities(5, expected) + + def test_k6(self): + expected = set() + self._check_communities(6, expected) + + +def test_bad_k(): + with pytest.raises(nx.NetworkXError): + list(nx.community.k_clique_communities(nx.Graph(), 1)) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/test_kernighan_lin.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/test_kernighan_lin.py new file mode 100644 index 0000000..25d53d5 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/test_kernighan_lin.py @@ -0,0 +1,92 @@ +"""Unit tests for the :mod:`networkx.algorithms.community.kernighan_lin` +module. +""" + +from itertools import permutations + +import pytest + +import networkx as nx +from networkx.algorithms.community import kernighan_lin_bisection + + +def assert_partition_equal(x, y): + assert set(map(frozenset, x)) == set(map(frozenset, y)) + + +def test_partition(): + G = nx.barbell_graph(3, 0) + C = kernighan_lin_bisection(G) + assert_partition_equal(C, [{0, 1, 2}, {3, 4, 5}]) + + +def test_partition_argument(): + G = nx.barbell_graph(3, 0) + partition = [{0, 1, 2}, {3, 4, 5}] + C = kernighan_lin_bisection(G, partition) + assert_partition_equal(C, partition) + + +def test_partition_argument_non_integer_nodes(): + G = nx.Graph([("A", "B"), ("A", "C"), ("B", "C"), ("C", "D")]) + partition = ({"A", "B"}, {"C", "D"}) + C = kernighan_lin_bisection(G, partition) + assert_partition_equal(C, partition) + + +def test_seed_argument(): + G = nx.barbell_graph(3, 0) + C = kernighan_lin_bisection(G, seed=1) + assert_partition_equal(C, [{0, 1, 2}, {3, 4, 5}]) + + +def test_non_disjoint_partition(): + with pytest.raises(nx.NetworkXError): + G = nx.barbell_graph(3, 0) + partition = ({0, 1, 2}, {2, 3, 4, 5}) + kernighan_lin_bisection(G, partition) + + +def test_too_many_blocks(): + with pytest.raises(nx.NetworkXError): + G = nx.barbell_graph(3, 0) + partition = ({0, 1}, {2}, {3, 4, 5}) + kernighan_lin_bisection(G, partition) + + +def test_multigraph(): + G = nx.cycle_graph(4) + M = nx.MultiGraph(G.edges()) + M.add_edges_from(G.edges()) + M.remove_edge(1, 2) + for labels in permutations(range(4)): + mapping = dict(zip(M, labels)) + A, B = kernighan_lin_bisection(nx.relabel_nodes(M, mapping), seed=0) + assert_partition_equal( + [A, B], [{mapping[0], mapping[1]}, {mapping[2], mapping[3]}] + ) + + +def test_max_iter_argument(): + G = nx.Graph( + [ + ("A", "B", {"weight": 1}), + ("A", "C", {"weight": 2}), + ("A", "D", {"weight": 3}), + ("A", "E", {"weight": 2}), + ("A", "F", {"weight": 4}), + ("B", "C", {"weight": 1}), + ("B", "D", {"weight": 4}), + ("B", "E", {"weight": 2}), + ("B", "F", {"weight": 1}), + ("C", "D", {"weight": 3}), + ("C", "E", {"weight": 2}), + ("C", "F", {"weight": 1}), + ("D", "E", {"weight": 4}), + ("D", "F", {"weight": 3}), + ("E", "F", {"weight": 2}), + ] + ) + partition = ({"A", "B", "C"}, {"D", "E", "F"}) + C = kernighan_lin_bisection(G, partition, max_iter=1) + assert_partition_equal(C, ({"A", "F", "C"}, {"D", "E", "B"})) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/test_label_propagation.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/test_label_propagation.py new file mode 100644 index 0000000..4be72db --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/test_label_propagation.py @@ -0,0 +1,241 @@ +from itertools import chain, combinations + +import pytest + +import networkx as nx + + +def test_directed_not_supported(): + with pytest.raises(nx.NetworkXNotImplemented): + # not supported for directed graphs + test = nx.DiGraph() + test.add_edge("a", "b") + test.add_edge("a", "c") + test.add_edge("b", "d") + result = nx.community.label_propagation_communities(test) + + +def test_iterator_vs_iterable(): + G = nx.empty_graph("a") + assert list(nx.community.label_propagation_communities(G)) == [{"a"}] + for community in nx.community.label_propagation_communities(G): + assert community == {"a"} + pytest.raises(TypeError, next, nx.community.label_propagation_communities(G)) + + +def test_one_node(): + test = nx.Graph() + test.add_node("a") + + # The expected communities are: + ground_truth = {frozenset(["a"])} + + communities = nx.community.label_propagation_communities(test) + result = {frozenset(c) for c in communities} + assert result == ground_truth + + +def test_unconnected_communities(): + test = nx.Graph() + # community 1 + test.add_edge("a", "c") + test.add_edge("a", "d") + test.add_edge("d", "c") + # community 2 + test.add_edge("b", "e") + test.add_edge("e", "f") + test.add_edge("f", "b") + + # The expected communities are: + ground_truth = {frozenset(["a", "c", "d"]), frozenset(["b", "e", "f"])} + + communities = nx.community.label_propagation_communities(test) + result = {frozenset(c) for c in communities} + assert result == ground_truth + + +def test_connected_communities(): + test = nx.Graph() + # community 1 + test.add_edge("a", "b") + test.add_edge("c", "a") + test.add_edge("c", "b") + test.add_edge("d", "a") + test.add_edge("d", "b") + test.add_edge("d", "c") + test.add_edge("e", "a") + test.add_edge("e", "b") + test.add_edge("e", "c") + test.add_edge("e", "d") + # community 2 + test.add_edge("1", "2") + test.add_edge("3", "1") + test.add_edge("3", "2") + test.add_edge("4", "1") + test.add_edge("4", "2") + test.add_edge("4", "3") + test.add_edge("5", "1") + test.add_edge("5", "2") + test.add_edge("5", "3") + test.add_edge("5", "4") + # edge between community 1 and 2 + test.add_edge("a", "1") + # community 3 + test.add_edge("x", "y") + # community 4 with only a single node + test.add_node("z") + + # The expected communities are: + ground_truth1 = { + frozenset(["a", "b", "c", "d", "e"]), + frozenset(["1", "2", "3", "4", "5"]), + frozenset(["x", "y"]), + frozenset(["z"]), + } + ground_truth2 = { + frozenset(["a", "b", "c", "d", "e", "1", "2", "3", "4", "5"]), + frozenset(["x", "y"]), + frozenset(["z"]), + } + ground_truth = (ground_truth1, ground_truth2) + + communities = nx.community.label_propagation_communities(test) + result = {frozenset(c) for c in communities} + assert result in ground_truth + + +def test_termination(): + # ensure termination of asyn_lpa_communities in two cases + # that led to an endless loop in a previous version + test1 = nx.karate_club_graph() + test2 = nx.caveman_graph(2, 10) + test2.add_edges_from([(0, 20), (20, 10)]) + nx.community.asyn_lpa_communities(test1) + nx.community.asyn_lpa_communities(test2) + + +class TestAsynLpaCommunities: + def _check_communities(self, G, expected): + """Checks that the communities computed from the given graph ``G`` + using the :func:`~networkx.asyn_lpa_communities` function match + the set of nodes given in ``expected``. + + ``expected`` must be a :class:`set` of :class:`frozenset` + instances, each element of which is a node in the graph. + + """ + communities = nx.community.asyn_lpa_communities(G) + result = {frozenset(c) for c in communities} + assert result == expected + + def test_null_graph(self): + G = nx.null_graph() + ground_truth = set() + self._check_communities(G, ground_truth) + + def test_single_node(self): + G = nx.empty_graph(1) + ground_truth = {frozenset([0])} + self._check_communities(G, ground_truth) + + def test_simple_communities(self): + # This graph is the disjoint union of two triangles. + G = nx.Graph(["ab", "ac", "bc", "de", "df", "fe"]) + ground_truth = {frozenset("abc"), frozenset("def")} + self._check_communities(G, ground_truth) + + def test_seed_argument(self): + G = nx.Graph(["ab", "ac", "bc", "de", "df", "fe"]) + ground_truth = {frozenset("abc"), frozenset("def")} + communities = nx.community.asyn_lpa_communities(G, seed=1) + result = {frozenset(c) for c in communities} + assert result == ground_truth + + def test_several_communities(self): + # This graph is the disjoint union of five triangles. + ground_truth = {frozenset(range(3 * i, 3 * (i + 1))) for i in range(5)} + edges = chain.from_iterable(combinations(c, 2) for c in ground_truth) + G = nx.Graph(edges) + self._check_communities(G, ground_truth) + + +class TestFastLabelPropagationCommunities: + N = 100 # number of nodes + K = 15 # average node degree + + def _check_communities(self, G, truth, weight=None, seed=42): + C = nx.community.fast_label_propagation_communities(G, weight=weight, seed=seed) + assert {frozenset(c) for c in C} == truth + + def test_null_graph(self): + G = nx.null_graph() + truth = set() + self._check_communities(G, truth) + + def test_empty_graph(self): + G = nx.empty_graph(self.N) + truth = {frozenset([i]) for i in G} + self._check_communities(G, truth) + + def test_star_graph(self): + G = nx.star_graph(self.N) + truth = {frozenset(G)} + self._check_communities(G, truth) + + def test_complete_graph(self): + G = nx.complete_graph(self.N) + truth = {frozenset(G)} + self._check_communities(G, truth) + + def test_bipartite_graph(self): + G = nx.complete_bipartite_graph(self.N // 2, self.N // 2) + truth = {frozenset(G)} + self._check_communities(G, truth) + + def test_random_graph(self): + G = nx.gnm_random_graph(self.N, self.N * self.K // 2, seed=42) + truth = {frozenset(G)} + self._check_communities(G, truth) + + def test_disjoin_cliques(self): + G = nx.Graph(["ab", "AB", "AC", "BC", "12", "13", "14", "23", "24", "34"]) + truth = {frozenset("ab"), frozenset("ABC"), frozenset("1234")} + self._check_communities(G, truth) + + def test_ring_of_cliques(self): + N, K = self.N, self.K + G = nx.ring_of_cliques(N, K) + truth = {frozenset([K * i + k for k in range(K)]) for i in range(N)} + self._check_communities(G, truth) + + def test_larger_graph(self): + G = nx.gnm_random_graph(100 * self.N, 50 * self.N * self.K, seed=42) + nx.community.fast_label_propagation_communities(G) + + def test_graph_type(self): + G1 = nx.complete_graph(self.N, nx.MultiDiGraph()) + G2 = nx.MultiGraph(G1) + G3 = nx.DiGraph(G1) + G4 = nx.Graph(G1) + truth = {frozenset(G1)} + self._check_communities(G1, truth) + self._check_communities(G2, truth) + self._check_communities(G3, truth) + self._check_communities(G4, truth) + + def test_weight_argument(self): + G = nx.MultiDiGraph() + G.add_edge(1, 2, weight=1.41) + G.add_edge(2, 1, weight=1.41) + G.add_edge(2, 3) + G.add_edge(3, 4, weight=3.14) + truth = {frozenset({1, 2}), frozenset({3, 4})} + self._check_communities(G, truth, weight="weight") + + def test_seed_argument(self): + G = nx.karate_club_graph() + C = nx.community.fast_label_propagation_communities(G, seed=2023) + truth = {frozenset(c) for c in C} + self._check_communities(G, truth, seed=2023) + # smoke test that seed=None works + C = nx.community.fast_label_propagation_communities(G, seed=None) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/test_leiden.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/test_leiden.py new file mode 100644 index 0000000..7e61b94 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/test_leiden.py @@ -0,0 +1,138 @@ +import pytest + +import networkx as nx +from networkx.algorithms.community import leiden_communities, leiden_partitions + +# Leiden is not yet implemented by networkx, so only run tests in this file for +# backends that implement Leiden. +no_backends_for_leiden_communities = ( + "not set(nx.config.backend_priority.algos) & leiden_communities.backends" +) + +no_backends_for_leiden_partitions = ( + "not set(nx.config.backend_priority.algos) & leiden_partitions.backends" +) + + +def test_leiden_with_nx_backend(): + G = nx.karate_club_graph() + with pytest.raises(NotImplementedError): + nx.community.leiden_partitions(G, backend="networkx") + with pytest.raises(NotImplementedError): + nx.community.leiden_communities(G, backend="networkx") + + +@pytest.mark.skipif(no_backends_for_leiden_communities) +def test_modularity_increase(): + G = nx.LFR_benchmark_graph( + 250, 3, 1.5, 0.009, average_degree=5, min_community=20, seed=10 + ) + partition = [{u} for u in G.nodes()] + mod = nx.community.modularity(G, partition) + partition = nx.community.leiden_communities(G) + + assert nx.community.modularity(G, partition) > mod + + +@pytest.mark.skipif(no_backends_for_leiden_communities) +def test_valid_partition(): + G = nx.LFR_benchmark_graph( + 250, 3, 1.5, 0.009, average_degree=5, min_community=20, seed=10 + ) + partition = nx.community.leiden_communities(G) + + assert nx.community.is_partition(G, partition) + + +@pytest.mark.skipif(no_backends_for_leiden_partitions) +def test_partition_iterator(): + G = nx.path_graph(15) + parts_iter = nx.community.leiden_partitions(G, seed=42) + first_part = next(parts_iter) + first_copy = [s.copy() for s in first_part] + + # check 1st part stays fixed even after 2nd iteration (like gh-5901 in louvain) + assert first_copy[0] == first_part[0] + second_part = next(parts_iter) + assert first_copy[0] == first_part[0] + + +@pytest.mark.skipif(no_backends_for_leiden_communities) +def test_none_weight_param(): + G = nx.karate_club_graph() + nx.set_edge_attributes( + G, {edge: i * i for i, edge in enumerate(G.edges)}, name="foo" + ) + + partition1 = nx.community.leiden_communities(G, weight=None, seed=2) + partition2 = nx.community.leiden_communities(G, weight="foo", seed=2) + partition3 = nx.community.leiden_communities(G, weight="weight", seed=2) + + assert partition1 != partition2 + assert partition2 != partition3 + + +@pytest.mark.skipif(no_backends_for_leiden_communities) +def test_quality(): + G = nx.LFR_benchmark_graph( + 250, 3, 1.5, 0.009, average_degree=5, min_community=20, seed=10 + ) + H = nx.MultiGraph(G) + + partition = nx.community.leiden_communities(G) + partition2 = nx.community.leiden_communities(H) + + quality = nx.community.partition_quality(G, partition)[0] + quality2 = nx.community.partition_quality(H, partition2)[0] + + assert quality >= 0.65 + assert quality2 >= 0.65 + + +@pytest.mark.skipif(no_backends_for_leiden_communities) +def test_resolution(): + G = nx.LFR_benchmark_graph( + 250, 3, 1.5, 0.009, average_degree=5, min_community=20, seed=10 + ) + + partition1 = nx.community.leiden_communities(G, resolution=0.5, seed=12) + partition2 = nx.community.leiden_communities(G, seed=12) + partition3 = nx.community.leiden_communities(G, resolution=2, seed=12) + + assert len(partition1) <= len(partition2) + assert len(partition2) <= len(partition3) + + +@pytest.mark.skipif(no_backends_for_leiden_communities) +def test_empty_graph(): + G = nx.Graph() + G.add_nodes_from(range(5)) + expected = [{0}, {1}, {2}, {3}, {4}] + assert nx.community.leiden_communities(G) == expected + + +@pytest.mark.skipif(no_backends_for_leiden_communities) +def test_directed_not_implemented(): + G = nx.cycle_graph(4, create_using=nx.DiGraph) + with pytest.raises(nx.NetworkXNotImplemented): + nx.community.leiden_communities(G) + + +@pytest.mark.skipif(no_backends_for_leiden_partitions) +@pytest.mark.skipif(no_backends_for_leiden_communities) +def test_max_level(): + G = nx.LFR_benchmark_graph( + 250, 3, 1.5, 0.009, average_degree=5, min_community=20, seed=10 + ) + parts_iter = nx.community.leiden_partitions(G, seed=42) + for max_level, expected in enumerate(parts_iter, 1): + partition = nx.community.leiden_communities(G, max_level=max_level, seed=42) + assert partition == expected + assert max_level > 1 # Ensure we are actually testing max_level + # max_level is an upper limit; it's okay if we stop before it's hit. + partition = nx.community.leiden_communities(G, max_level=max_level + 1, seed=42) + assert partition == expected + with pytest.raises( + ValueError, match="max_level argument must be a positive integer" + ): + nx.community.leiden_communities(G, max_level=0) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/test_local.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/test_local.py new file mode 100644 index 0000000..d0ec43f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/test_local.py @@ -0,0 +1,76 @@ +import pytest + +import networkx as nx + + +def test_greedy_source_expansion_karate_club(): + G = nx.karate_club_graph() + + community = nx.community.greedy_source_expansion(G, source=16) + + expected = {0, 4, 5, 6, 10, 16} + + assert community == expected + + +def test_greedy_source_expansion_cutoff(): + G = nx.karate_club_graph() + + community = nx.community.greedy_source_expansion(G, source=16, cutoff=3) + + assert community == {5, 6, 16} + + +def test_greedy_source_expansion_invalid_method(): + G = nx.karate_club_graph() + + with pytest.raises(ValueError): + nx.community.greedy_source_expansion(G, source=16, cutoff=3, method="invalid") + + +def test_greedy_source_expansion_connected_component(): + G_edges = [(0, 2), (0, 1), (1, 0), (2, 1), (2, 0), (3, 4), (4, 3)] + G = nx.Graph(G_edges) + expected = {0, 1, 2} + community = nx.community.greedy_source_expansion(G, source=0) + assert community == expected + + +def test_greedy_source_expansion_directed_graph(): + G_edges = [ + (0, 2), + (0, 1), + (1, 0), + (2, 1), + (2, 0), + (3, 4), + (4, 3), + (4, 5), + (5, 3), + (5, 6), + (0, 6), + ] + G = nx.DiGraph(G_edges) + + expected = {0, 1, 2, 6} + community = nx.community.greedy_source_expansion(G, source=0) + assert community == expected + + +def test_greedy_source_expansion_multigraph(): + G = nx.MultiGraph(nx.karate_club_graph()) + G.add_edge(0, 1) + G.add_edge(0, 9) + + expected = {0, 4, 5, 6, 10, 16} + + community = nx.community.greedy_source_expansion(G, source=16) + + assert community == expected + + +def test_greedy_source_expansion_empty_graph(): + G = nx.Graph() + G.add_nodes_from(range(5)) + expected = {0} + assert nx.community.greedy_source_expansion(G, source=0) == expected diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/test_louvain.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/test_louvain.py new file mode 100644 index 0000000..816e6f1 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/test_louvain.py @@ -0,0 +1,264 @@ +import pytest + +import networkx as nx + + +def test_modularity_increase(): + G = nx.LFR_benchmark_graph( + 250, 3, 1.5, 0.009, average_degree=5, min_community=20, seed=10 + ) + partition = [{u} for u in G.nodes()] + mod = nx.community.modularity(G, partition) + partition = nx.community.louvain_communities(G) + + assert nx.community.modularity(G, partition) > mod + + +def test_valid_partition(): + G = nx.LFR_benchmark_graph( + 250, 3, 1.5, 0.009, average_degree=5, min_community=20, seed=10 + ) + H = G.to_directed() + partition = nx.community.louvain_communities(G) + partition2 = nx.community.louvain_communities(H) + + assert nx.community.is_partition(G, partition) + assert nx.community.is_partition(H, partition2) + + +def test_karate_club_partition(): + G = nx.karate_club_graph() + part = [ + {0, 1, 2, 3, 7, 9, 11, 12, 13, 17, 19, 21}, + {16, 4, 5, 6, 10}, + {23, 25, 27, 28, 24, 31}, + {32, 33, 8, 14, 15, 18, 20, 22, 26, 29, 30}, + ] + partition = nx.community.louvain_communities(G, seed=2, weight=None) + + assert part == partition + + +def test_partition_iterator(): + G = nx.path_graph(15) + parts_iter = nx.community.louvain_partitions(G, seed=42) + first_part = next(parts_iter) + first_copy = [s.copy() for s in first_part] + + # gh-5901 reports sets changing after next partition is yielded + assert first_copy[0] == first_part[0] + second_part = next(parts_iter) + assert first_copy[0] == first_part[0] + + +def test_undirected_selfloops(): + G = nx.karate_club_graph() + expected_partition = nx.community.louvain_communities(G, seed=2, weight=None) + part = [ + {0, 1, 2, 3, 7, 9, 11, 12, 13, 17, 19, 21}, + {16, 4, 5, 6, 10}, + {23, 25, 27, 28, 24, 31}, + {32, 33, 8, 14, 15, 18, 20, 22, 26, 29, 30}, + ] + assert expected_partition == part + + G.add_weighted_edges_from([(i, i, i * 1000) for i in range(9)]) + # large self-loop weight impacts partition + partition = nx.community.louvain_communities(G, seed=2, weight="weight") + assert part != partition + + # small self-loop weights aren't enough to impact partition in this graph + partition = nx.community.louvain_communities(G, seed=2, weight=None) + assert part == partition + + +def test_directed_selfloops(): + G = nx.DiGraph() + G.add_nodes_from(range(11)) + G_edges = [ + (0, 2), + (0, 1), + (1, 0), + (2, 1), + (2, 0), + (3, 4), + (4, 3), + (7, 8), + (8, 7), + (9, 10), + (10, 9), + ] + G.add_edges_from(G_edges) + G_expected_partition = nx.community.louvain_communities(G, seed=123, weight=None) + + G.add_weighted_edges_from([(i, i, i * 1000) for i in range(3)]) + # large self-loop weight impacts partition + G_partition = nx.community.louvain_communities(G, seed=123, weight="weight") + assert G_partition != G_expected_partition + + # small self-loop weights aren't enough to impact partition in this graph + G_partition = nx.community.louvain_communities(G, seed=123, weight=None) + assert G_partition == G_expected_partition + + +def test_directed_partition(): + """ + Test 2 cases that were looping infinitely + from issues #5175 and #5704 + """ + G = nx.DiGraph() + H = nx.DiGraph() + G.add_nodes_from(range(10)) + H.add_nodes_from([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) + G_edges = [ + (0, 2), + (0, 1), + (1, 0), + (2, 1), + (2, 0), + (3, 4), + (4, 3), + (7, 8), + (8, 7), + (9, 10), + (10, 9), + ] + H_edges = [ + (1, 2), + (1, 6), + (1, 9), + (2, 3), + (2, 4), + (2, 5), + (3, 4), + (4, 3), + (4, 5), + (5, 4), + (6, 7), + (6, 8), + (9, 10), + (9, 11), + (10, 11), + (11, 10), + ] + G.add_edges_from(G_edges) + H.add_edges_from(H_edges) + + G_expected_partition = [{0, 1, 2}, {3, 4}, {5}, {6}, {8, 7}, {9, 10}] + G_partition = nx.community.louvain_communities(G, seed=123, weight=None) + + H_expected_partition = [{2, 3, 4, 5}, {8, 1, 6, 7}, {9, 10, 11}] + H_partition = nx.community.louvain_communities(H, seed=123, weight=None) + + assert G_partition == G_expected_partition + assert H_partition == H_expected_partition + + +def test_none_weight_param(): + G = nx.karate_club_graph() + nx.set_edge_attributes( + G, {edge: i * i for i, edge in enumerate(G.edges)}, name="foo" + ) + + part = [ + {0, 1, 2, 3, 7, 9, 11, 12, 13, 17, 19, 21}, + {16, 4, 5, 6, 10}, + {23, 25, 27, 28, 24, 31}, + {32, 33, 8, 14, 15, 18, 20, 22, 26, 29, 30}, + ] + partition1 = nx.community.louvain_communities(G, weight=None, seed=2) + partition2 = nx.community.louvain_communities(G, weight="foo", seed=2) + partition3 = nx.community.louvain_communities(G, weight="weight", seed=2) + + assert part == partition1 + assert part != partition2 + assert part != partition3 + assert partition2 != partition3 + + +def test_quality(): + G = nx.LFR_benchmark_graph( + 250, 3, 1.5, 0.009, average_degree=5, min_community=20, seed=10 + ) + H = nx.gn_graph(200, seed=1234) + I = nx.MultiGraph(G) + J = nx.MultiDiGraph(H) + + partition = nx.community.louvain_communities(G) + partition2 = nx.community.louvain_communities(H) + partition3 = nx.community.louvain_communities(I) + partition4 = nx.community.louvain_communities(J) + + quality = nx.community.partition_quality(G, partition)[0] + quality2 = nx.community.partition_quality(H, partition2)[0] + quality3 = nx.community.partition_quality(I, partition3)[0] + quality4 = nx.community.partition_quality(J, partition4)[0] + + assert quality >= 0.65 + assert quality2 >= 0.65 + assert quality3 >= 0.65 + assert quality4 >= 0.65 + + +def test_multigraph(): + G = nx.karate_club_graph() + H = nx.MultiGraph(G) + G.add_edge(0, 1, weight=10) + H.add_edge(0, 1, weight=9) + G.add_edge(0, 9, foo=20) + H.add_edge(0, 9, foo=20) + + partition1 = nx.community.louvain_communities(G, seed=1234) + partition2 = nx.community.louvain_communities(H, seed=1234) + partition3 = nx.community.louvain_communities(H, weight="foo", seed=1234) + + assert partition1 == partition2 != partition3 + + +def test_resolution(): + G = nx.LFR_benchmark_graph( + 250, 3, 1.5, 0.009, average_degree=5, min_community=20, seed=10 + ) + + partition1 = nx.community.louvain_communities(G, resolution=0.5, seed=12) + partition2 = nx.community.louvain_communities(G, seed=12) + partition3 = nx.community.louvain_communities(G, resolution=2, seed=12) + + assert len(partition1) <= len(partition2) <= len(partition3) + + +def test_threshold(): + G = nx.LFR_benchmark_graph( + 250, 3, 1.5, 0.009, average_degree=5, min_community=20, seed=10 + ) + partition1 = nx.community.louvain_communities(G, threshold=0.3, seed=2) + partition2 = nx.community.louvain_communities(G, seed=2) + mod1 = nx.community.modularity(G, partition1) + mod2 = nx.community.modularity(G, partition2) + + assert mod1 <= mod2 + + +def test_empty_graph(): + G = nx.Graph() + G.add_nodes_from(range(5)) + expected = [{0}, {1}, {2}, {3}, {4}] + assert nx.community.louvain_communities(G) == expected + + +def test_max_level(): + G = nx.LFR_benchmark_graph( + 250, 3, 1.5, 0.009, average_degree=5, min_community=20, seed=10 + ) + parts_iter = nx.community.louvain_partitions(G, seed=42) + for max_level, expected in enumerate(parts_iter, 1): + partition = nx.community.louvain_communities(G, max_level=max_level, seed=42) + assert partition == expected + assert max_level > 1 # Ensure we are actually testing max_level + # max_level is an upper limit; it's okay if we stop before it's hit. + partition = nx.community.louvain_communities(G, max_level=max_level + 1, seed=42) + assert partition == expected + with pytest.raises( + ValueError, match="max_level argument must be a positive integer" + ): + nx.community.louvain_communities(G, max_level=0) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/test_lukes.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/test_lukes.py new file mode 100644 index 0000000..cfa48f0 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/test_lukes.py @@ -0,0 +1,152 @@ +from itertools import product + +import pytest + +import networkx as nx + +EWL = "e_weight" +NWL = "n_weight" + + +# first test from the Lukes original paper +def paper_1_case(float_edge_wt=False, explicit_node_wt=True, directed=False): + # problem-specific constants + limit = 3 + + # configuration + if float_edge_wt: + shift = 0.001 + else: + shift = 0 + + if directed: + example_1 = nx.DiGraph() + else: + example_1 = nx.Graph() + + # graph creation + example_1.add_edge(1, 2, **{EWL: 3 + shift}) + example_1.add_edge(1, 4, **{EWL: 2 + shift}) + example_1.add_edge(2, 3, **{EWL: 4 + shift}) + example_1.add_edge(2, 5, **{EWL: 6 + shift}) + + # node weights + if explicit_node_wt: + nx.set_node_attributes(example_1, 1, NWL) + wtu = NWL + else: + wtu = None + + # partitioning + clusters_1 = { + frozenset(x) + for x in nx.community.lukes_partitioning( + example_1, limit, node_weight=wtu, edge_weight=EWL + ) + } + + return clusters_1 + + +# second test from the Lukes original paper +def paper_2_case(explicit_edge_wt=True, directed=False): + # problem specific constants + byte_block_size = 32 + + # configuration + if directed: + example_2 = nx.DiGraph() + else: + example_2 = nx.Graph() + + if explicit_edge_wt: + edic = {EWL: 1} + wtu = EWL + else: + edic = {} + wtu = None + + # graph creation + example_2.add_edge("name", "home_address", **edic) + example_2.add_edge("name", "education", **edic) + example_2.add_edge("education", "bs", **edic) + example_2.add_edge("education", "ms", **edic) + example_2.add_edge("education", "phd", **edic) + example_2.add_edge("name", "telephone", **edic) + example_2.add_edge("telephone", "home", **edic) + example_2.add_edge("telephone", "office", **edic) + example_2.add_edge("office", "no1", **edic) + example_2.add_edge("office", "no2", **edic) + + example_2.nodes["name"][NWL] = 20 + example_2.nodes["education"][NWL] = 10 + example_2.nodes["bs"][NWL] = 1 + example_2.nodes["ms"][NWL] = 1 + example_2.nodes["phd"][NWL] = 1 + example_2.nodes["home_address"][NWL] = 8 + example_2.nodes["telephone"][NWL] = 8 + example_2.nodes["home"][NWL] = 8 + example_2.nodes["office"][NWL] = 4 + example_2.nodes["no1"][NWL] = 1 + example_2.nodes["no2"][NWL] = 1 + + # partitioning + clusters_2 = { + frozenset(x) + for x in nx.community.lukes_partitioning( + example_2, byte_block_size, node_weight=NWL, edge_weight=wtu + ) + } + + return clusters_2 + + +def test_paper_1_case(): + ground_truth = {frozenset([1, 4]), frozenset([2, 3, 5])} + + tf = (True, False) + for flt, nwt, drc in product(tf, tf, tf): + part = paper_1_case(flt, nwt, drc) + assert part == ground_truth + + +def test_paper_2_case(): + ground_truth = { + frozenset(["education", "bs", "ms", "phd"]), + frozenset(["name", "home_address"]), + frozenset(["telephone", "home", "office", "no1", "no2"]), + } + + tf = (True, False) + for ewt, drc in product(tf, tf): + part = paper_2_case(ewt, drc) + assert part == ground_truth + + +def test_mandatory_tree(): + not_a_tree = nx.complete_graph(4) + + with pytest.raises(nx.NotATree): + nx.community.lukes_partitioning(not_a_tree, 5) + + +def test_mandatory_integrality(): + byte_block_size = 32 + + ex_1_broken = nx.DiGraph() + + ex_1_broken.add_edge(1, 2, **{EWL: 3.2}) + ex_1_broken.add_edge(1, 4, **{EWL: 2.4}) + ex_1_broken.add_edge(2, 3, **{EWL: 4.0}) + ex_1_broken.add_edge(2, 5, **{EWL: 6.3}) + + ex_1_broken.nodes[1][NWL] = 1.2 # ! + ex_1_broken.nodes[2][NWL] = 1 + ex_1_broken.nodes[3][NWL] = 1 + ex_1_broken.nodes[4][NWL] = 1 + ex_1_broken.nodes[5][NWL] = 2 + + with pytest.raises(TypeError): + nx.community.lukes_partitioning( + ex_1_broken, byte_block_size, node_weight=NWL, edge_weight=EWL + ) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/test_modularity_max.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/test_modularity_max.py new file mode 100644 index 0000000..0121367 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/test_modularity_max.py @@ -0,0 +1,340 @@ +import pytest + +import networkx as nx +from networkx.algorithms.community import ( + greedy_modularity_communities, + naive_greedy_modularity_communities, +) + + +@pytest.mark.parametrize( + "func", (greedy_modularity_communities, naive_greedy_modularity_communities) +) +def test_modularity_communities(func): + G = nx.karate_club_graph() + john_a = frozenset( + [8, 14, 15, 18, 20, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33] + ) + mr_hi = frozenset([0, 4, 5, 6, 10, 11, 16, 19]) + overlap = frozenset([1, 2, 3, 7, 9, 12, 13, 17, 21]) + expected = {john_a, overlap, mr_hi} + assert set(func(G, weight=None)) == expected + + +@pytest.mark.parametrize( + "func", (greedy_modularity_communities, naive_greedy_modularity_communities) +) +def test_modularity_communities_categorical_labels(func): + # Using other than 0-starting contiguous integers as node-labels. + G = nx.Graph( + [ + ("a", "b"), + ("a", "c"), + ("b", "c"), + ("b", "d"), # inter-community edge + ("d", "e"), + ("d", "f"), + ("d", "g"), + ("f", "g"), + ("d", "e"), + ("f", "e"), + ] + ) + expected = {frozenset({"f", "g", "e", "d"}), frozenset({"a", "b", "c"})} + assert set(func(G)) == expected + + +def test_greedy_modularity_communities_components(): + # Test for gh-5530 + G = nx.Graph([(0, 1), (2, 3), (4, 5), (5, 6)]) + # usual case with 3 components + assert greedy_modularity_communities(G) == [{4, 5, 6}, {0, 1}, {2, 3}] + # best_n can make the algorithm continue even when modularity goes down + assert greedy_modularity_communities(G, best_n=3) == [{4, 5, 6}, {0, 1}, {2, 3}] + assert greedy_modularity_communities(G, best_n=2) == [{0, 1, 4, 5, 6}, {2, 3}] + assert greedy_modularity_communities(G, best_n=1) == [{0, 1, 2, 3, 4, 5, 6}] + + +def test_greedy_modularity_communities_relabeled(): + # Test for gh-4966 + G = nx.balanced_tree(2, 2) + mapping = {0: "a", 1: "b", 2: "c", 3: "d", 4: "e", 5: "f", 6: "g", 7: "h"} + G = nx.relabel_nodes(G, mapping) + expected = [frozenset({"e", "d", "a", "b"}), frozenset({"c", "f", "g"})] + assert greedy_modularity_communities(G) == expected + + +def test_greedy_modularity_communities_directed(): + G = nx.DiGraph( + [ + ("a", "b"), + ("a", "c"), + ("b", "c"), + ("b", "d"), # inter-community edge + ("d", "e"), + ("d", "f"), + ("d", "g"), + ("f", "g"), + ("d", "e"), + ("f", "e"), + ] + ) + expected = [frozenset({"f", "g", "e", "d"}), frozenset({"a", "b", "c"})] + assert greedy_modularity_communities(G) == expected + + # with loops + G = nx.DiGraph() + G.add_edges_from( + [(1, 1), (1, 2), (1, 3), (2, 3), (1, 4), (4, 4), (5, 5), (4, 5), (4, 6), (5, 6)] + ) + expected = [frozenset({1, 2, 3}), frozenset({4, 5, 6})] + assert greedy_modularity_communities(G) == expected + + +@pytest.mark.parametrize( + "func", (greedy_modularity_communities, naive_greedy_modularity_communities) +) +def test_modularity_communities_weighted(func): + G = nx.balanced_tree(2, 3) + for a, b in G.edges: + if ((a == 1) or (a == 2)) and (b != 0): + G[a][b]["weight"] = 10.0 + else: + G[a][b]["weight"] = 1.0 + + expected = [{0, 1, 3, 4, 7, 8, 9, 10}, {2, 5, 6, 11, 12, 13, 14}] + + assert func(G, weight="weight") == expected + assert func(G, weight="weight", resolution=0.9) == expected + assert func(G, weight="weight", resolution=0.3) == expected + assert func(G, weight="weight", resolution=1.1) != expected + + +def test_modularity_communities_floating_point(): + # check for floating point error when used as key in the mapped_queue dict. + # Test for gh-4992 and gh-5000 + G = nx.Graph() + G.add_weighted_edges_from( + [(0, 1, 12), (1, 4, 71), (2, 3, 15), (2, 4, 10), (3, 6, 13)] + ) + expected = [{0, 1, 4}, {2, 3, 6}] + assert greedy_modularity_communities(G, weight="weight") == expected + assert ( + greedy_modularity_communities(G, weight="weight", resolution=0.99) == expected + ) + + +def test_modularity_communities_directed_weighted(): + G = nx.DiGraph() + G.add_weighted_edges_from( + [ + (1, 2, 5), + (1, 3, 3), + (2, 3, 6), + (2, 6, 1), + (1, 4, 1), + (4, 5, 3), + (4, 6, 7), + (5, 6, 2), + (5, 7, 5), + (5, 8, 4), + (6, 8, 3), + ] + ) + expected = [frozenset({4, 5, 6, 7, 8}), frozenset({1, 2, 3})] + assert greedy_modularity_communities(G, weight="weight") == expected + + # A large weight of the edge (2, 6) causes 6 to change group, even if it shares + # only one connection with the new group and 3 with the old one. + G[2][6]["weight"] = 20 + expected = [frozenset({1, 2, 3, 6}), frozenset({4, 5, 7, 8})] + assert greedy_modularity_communities(G, weight="weight") == expected + + +def test_greedy_modularity_communities_multigraph(): + G = nx.MultiGraph() + G.add_edges_from( + [ + (1, 2), + (1, 2), + (1, 3), + (2, 3), + (1, 4), + (2, 4), + (4, 5), + (5, 6), + (5, 7), + (5, 7), + (6, 7), + (7, 8), + (5, 8), + ] + ) + expected = [frozenset({1, 2, 3, 4}), frozenset({5, 6, 7, 8})] + assert greedy_modularity_communities(G) == expected + + # Converting (4, 5) into a multi-edge causes node 4 to change group. + G.add_edge(4, 5) + expected = [frozenset({4, 5, 6, 7, 8}), frozenset({1, 2, 3})] + assert greedy_modularity_communities(G) == expected + + +def test_greedy_modularity_communities_multigraph_weighted(): + G = nx.MultiGraph() + G.add_weighted_edges_from( + [ + (1, 2, 5), + (1, 2, 3), + (1, 3, 6), + (1, 3, 6), + (2, 3, 4), + (1, 4, 1), + (1, 4, 1), + (2, 4, 3), + (2, 4, 3), + (4, 5, 1), + (5, 6, 3), + (5, 6, 7), + (5, 6, 4), + (5, 7, 9), + (5, 7, 9), + (6, 7, 8), + (7, 8, 2), + (7, 8, 2), + (5, 8, 6), + (5, 8, 6), + ] + ) + expected = [frozenset({1, 2, 3, 4}), frozenset({5, 6, 7, 8})] + assert greedy_modularity_communities(G, weight="weight") == expected + + # Adding multi-edge (4, 5, 16) causes node 4 to change group. + G.add_edge(4, 5, weight=16) + expected = [frozenset({4, 5, 6, 7, 8}), frozenset({1, 2, 3})] + assert greedy_modularity_communities(G, weight="weight") == expected + + # Increasing the weight of edge (1, 4) causes node 4 to return to the former group. + G[1][4][1]["weight"] = 3 + expected = [frozenset({1, 2, 3, 4}), frozenset({5, 6, 7, 8})] + assert greedy_modularity_communities(G, weight="weight") == expected + + +def test_greed_modularity_communities_multidigraph(): + G = nx.MultiDiGraph() + G.add_edges_from( + [ + (1, 2), + (1, 2), + (3, 1), + (2, 3), + (2, 3), + (3, 2), + (1, 4), + (2, 4), + (4, 2), + (4, 5), + (5, 6), + (5, 6), + (6, 5), + (5, 7), + (6, 7), + (7, 8), + (5, 8), + (8, 4), + ] + ) + expected = [frozenset({1, 2, 3, 4}), frozenset({5, 6, 7, 8})] + assert greedy_modularity_communities(G, weight="weight") == expected + + +def test_greed_modularity_communities_multidigraph_weighted(): + G = nx.MultiDiGraph() + G.add_weighted_edges_from( + [ + (1, 2, 5), + (1, 2, 3), + (3, 1, 6), + (1, 3, 6), + (3, 2, 4), + (1, 4, 2), + (1, 4, 5), + (2, 4, 3), + (3, 2, 8), + (4, 2, 3), + (4, 3, 5), + (4, 5, 2), + (5, 6, 3), + (5, 6, 7), + (6, 5, 4), + (5, 7, 9), + (5, 7, 9), + (7, 6, 8), + (7, 8, 2), + (8, 7, 2), + (5, 8, 6), + (5, 8, 6), + ] + ) + expected = [frozenset({1, 2, 3, 4}), frozenset({5, 6, 7, 8})] + assert greedy_modularity_communities(G, weight="weight") == expected + + +def test_resolution_parameter_impact(): + G = nx.barbell_graph(5, 3) + + gamma = 1 + expected = [frozenset(range(5)), frozenset(range(8, 13)), frozenset(range(5, 8))] + assert greedy_modularity_communities(G, resolution=gamma) == expected + assert naive_greedy_modularity_communities(G, resolution=gamma) == expected + + gamma = 2.5 + expected = [{0, 1, 2, 3}, {9, 10, 11, 12}, {5, 6, 7}, {4}, {8}] + assert greedy_modularity_communities(G, resolution=gamma) == expected + assert naive_greedy_modularity_communities(G, resolution=gamma) == expected + + gamma = 0.3 + expected = [frozenset(range(8)), frozenset(range(8, 13))] + assert greedy_modularity_communities(G, resolution=gamma) == expected + assert naive_greedy_modularity_communities(G, resolution=gamma) == expected + + +def test_cutoff_parameter(): + G = nx.circular_ladder_graph(4) + + # No aggregation: + expected = [{k} for k in range(8)] + assert greedy_modularity_communities(G, cutoff=8) == expected + + # Aggregation to half order (number of nodes) + expected = [{k, k + 1} for k in range(0, 8, 2)] + assert greedy_modularity_communities(G, cutoff=4) == expected + + # Default aggregation case (here, 2 communities emerge) + expected = [frozenset(range(4)), frozenset(range(4, 8))] + assert greedy_modularity_communities(G, cutoff=1) == expected + + +def test_best_n(): + G = nx.barbell_graph(5, 3) + + # Same result as without enforcing cutoff: + best_n = 3 + expected = [frozenset(range(5)), frozenset(range(8, 13)), frozenset(range(5, 8))] + assert greedy_modularity_communities(G, best_n=best_n) == expected + + # One additional merging step: + best_n = 2 + expected = [frozenset(range(8)), frozenset(range(8, 13))] + assert greedy_modularity_communities(G, best_n=best_n) == expected + + # Two additional merging steps: + best_n = 1 + expected = [frozenset(range(13))] + assert greedy_modularity_communities(G, best_n=best_n) == expected + + +def test_greedy_modularity_communities_corner_cases(): + G = nx.empty_graph() + assert nx.community.greedy_modularity_communities(G) == [] + G.add_nodes_from(range(3)) + assert nx.community.greedy_modularity_communities(G) == [{0}, {1}, {2}] diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/test_quality.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/test_quality.py new file mode 100644 index 0000000..c502c7e --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/test_quality.py @@ -0,0 +1,139 @@ +"""Unit tests for the :mod:`networkx.algorithms.community.quality` +module. + +""" + +import pytest + +import networkx as nx +from networkx import barbell_graph +from networkx.algorithms.community import modularity, partition_quality +from networkx.algorithms.community.quality import inter_community_edges + + +class TestPerformance: + """Unit tests for the :func:`performance` function.""" + + def test_bad_partition(self): + """Tests that a poor partition has a low performance measure.""" + G = barbell_graph(3, 0) + partition = [{0, 1, 4}, {2, 3, 5}] + assert 8 / 15 == pytest.approx(partition_quality(G, partition)[1], abs=1e-7) + + def test_good_partition(self): + """Tests that a good partition has a high performance measure.""" + G = barbell_graph(3, 0) + partition = [{0, 1, 2}, {3, 4, 5}] + assert 14 / 15 == pytest.approx(partition_quality(G, partition)[1], abs=1e-7) + + +class TestCoverage: + """Unit tests for the :func:`coverage` function.""" + + def test_bad_partition(self): + """Tests that a poor partition has a low coverage measure.""" + G = barbell_graph(3, 0) + partition = [{0, 1, 4}, {2, 3, 5}] + assert 3 / 7 == pytest.approx(partition_quality(G, partition)[0], abs=1e-7) + + def test_good_partition(self): + """Tests that a good partition has a high coverage measure.""" + G = barbell_graph(3, 0) + partition = [{0, 1, 2}, {3, 4, 5}] + assert 6 / 7 == pytest.approx(partition_quality(G, partition)[0], abs=1e-7) + + +def test_modularity(): + G = nx.barbell_graph(3, 0) + C = [{0, 1, 4}, {2, 3, 5}] + assert (-16 / (14**2)) == pytest.approx(modularity(G, C), abs=1e-7) + C = [{0, 1, 2}, {3, 4, 5}] + assert (35 * 2) / (14**2) == pytest.approx(modularity(G, C), abs=1e-7) + + n = 1000 + G = nx.erdos_renyi_graph(n, 0.09, seed=42, directed=True) + C = [set(range(n // 2)), set(range(n // 2, n))] + assert 0.00017154251389292754 == pytest.approx(modularity(G, C), abs=1e-7) + + G = nx.margulis_gabber_galil_graph(10) + mid_value = G.number_of_nodes() // 2 + nodes = list(G.nodes) + C = [set(nodes[:mid_value]), set(nodes[mid_value:])] + assert 0.13 == pytest.approx(modularity(G, C), abs=1e-7) + + G = nx.DiGraph() + G.add_edges_from([(2, 1), (2, 3), (3, 4)]) + C = [{1, 2}, {3, 4}] + assert 2 / 9 == pytest.approx(modularity(G, C), abs=1e-7) + + +def test_modularity_resolution(): + G = nx.barbell_graph(3, 0) + C = [{0, 1, 4}, {2, 3, 5}] + assert modularity(G, C) == pytest.approx(3 / 7 - 100 / 14**2) + gamma = 2 + result = modularity(G, C, resolution=gamma) + assert result == pytest.approx(3 / 7 - gamma * 100 / 14**2) + gamma = 0.2 + result = modularity(G, C, resolution=gamma) + assert result == pytest.approx(3 / 7 - gamma * 100 / 14**2) + + C = [{0, 1, 2}, {3, 4, 5}] + assert modularity(G, C) == pytest.approx(6 / 7 - 98 / 14**2) + gamma = 2 + result = modularity(G, C, resolution=gamma) + assert result == pytest.approx(6 / 7 - gamma * 98 / 14**2) + gamma = 0.2 + result = modularity(G, C, resolution=gamma) + assert result == pytest.approx(6 / 7 - gamma * 98 / 14**2) + + G = nx.barbell_graph(5, 3) + C = [frozenset(range(5)), frozenset(range(8, 13)), frozenset(range(5, 8))] + gamma = 1 + result = modularity(G, C, resolution=gamma) + # This C is maximal for gamma=1: modularity = 0.518229 + assert result == pytest.approx((22 / 24) - gamma * (918 / (48**2))) + gamma = 2 + result = modularity(G, C, resolution=gamma) + assert result == pytest.approx((22 / 24) - gamma * (918 / (48**2))) + gamma = 0.2 + result = modularity(G, C, resolution=gamma) + assert result == pytest.approx((22 / 24) - gamma * (918 / (48**2))) + + C = [{0, 1, 2, 3}, {9, 10, 11, 12}, {5, 6, 7}, {4}, {8}] + gamma = 1 + result = modularity(G, C, resolution=gamma) + assert result == pytest.approx((14 / 24) - gamma * (598 / (48**2))) + gamma = 2.5 + result = modularity(G, C, resolution=gamma) + # This C is maximal for gamma=2.5: modularity = -0.06553819 + assert result == pytest.approx((14 / 24) - gamma * (598 / (48**2))) + gamma = 0.2 + result = modularity(G, C, resolution=gamma) + assert result == pytest.approx((14 / 24) - gamma * (598 / (48**2))) + + C = [frozenset(range(8)), frozenset(range(8, 13))] + gamma = 1 + result = modularity(G, C, resolution=gamma) + assert result == pytest.approx((23 / 24) - gamma * (1170 / (48**2))) + gamma = 2 + result = modularity(G, C, resolution=gamma) + assert result == pytest.approx((23 / 24) - gamma * (1170 / (48**2))) + gamma = 0.3 + result = modularity(G, C, resolution=gamma) + # This C is maximal for gamma=0.3: modularity = 0.805990 + assert result == pytest.approx((23 / 24) - gamma * (1170 / (48**2))) + + +def test_inter_community_edges_with_digraphs(): + G = nx.complete_graph(2, create_using=nx.DiGraph()) + partition = [{0}, {1}] + assert inter_community_edges(G, partition) == 2 + + G = nx.complete_graph(10, create_using=nx.DiGraph()) + partition = [{0}, {1, 2}, {3, 4, 5}, {6, 7, 8, 9}] + assert inter_community_edges(G, partition) == 70 + + G = nx.cycle_graph(4, create_using=nx.DiGraph()) + partition = [{0, 1}, {2, 3}] + assert inter_community_edges(G, partition) == 2 diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/test_utils.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/test_utils.py new file mode 100644 index 0000000..ea019db --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/community/tests/test_utils.py @@ -0,0 +1,26 @@ +"""Unit tests for the :mod:`networkx.algorithms.community.utils` module.""" + +import networkx as nx + + +def test_is_partition(): + G = nx.empty_graph(3) + assert nx.community.is_partition(G, [{0, 1}, {2}]) + assert nx.community.is_partition(G, ({0, 1}, {2})) + assert nx.community.is_partition(G, ([0, 1], [2])) + assert nx.community.is_partition(G, [[0, 1], [2]]) + + +def test_not_covering(): + G = nx.empty_graph(3) + assert not nx.community.is_partition(G, [{0}, {1}]) + + +def test_not_disjoint(): + G = nx.empty_graph(3) + assert not nx.community.is_partition(G, [{0, 1}, {1, 2}]) + + +def test_not_node(): + G = nx.empty_graph(3) + assert not nx.community.is_partition(G, [{0, 1}, {3}]) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/components/__init__.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/components/__init__.py new file mode 100644 index 0000000..f9ae2ca --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/components/__init__.py @@ -0,0 +1,6 @@ +from .connected import * +from .strongly_connected import * +from .weakly_connected import * +from .attracting import * +from .biconnected import * +from .semiconnected import * diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/components/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/components/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..b2485a3 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/components/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/components/__pycache__/attracting.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/components/__pycache__/attracting.cpython-312.pyc new file mode 100644 index 0000000..af32275 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/components/__pycache__/attracting.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/components/__pycache__/biconnected.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/components/__pycache__/biconnected.cpython-312.pyc new file mode 100644 index 0000000..097cfe7 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/components/__pycache__/biconnected.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/components/__pycache__/connected.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/components/__pycache__/connected.cpython-312.pyc new file mode 100644 index 0000000..109900d Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/components/__pycache__/connected.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/components/__pycache__/semiconnected.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/components/__pycache__/semiconnected.cpython-312.pyc new file mode 100644 index 0000000..d166efb Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/components/__pycache__/semiconnected.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/components/__pycache__/strongly_connected.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/components/__pycache__/strongly_connected.cpython-312.pyc new file mode 100644 index 0000000..46d3a20 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/components/__pycache__/strongly_connected.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/components/__pycache__/weakly_connected.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/components/__pycache__/weakly_connected.cpython-312.pyc new file mode 100644 index 0000000..1cacc39 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/components/__pycache__/weakly_connected.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/components/attracting.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/components/attracting.py new file mode 100644 index 0000000..3d77cd9 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/components/attracting.py @@ -0,0 +1,115 @@ +"""Attracting components.""" + +import networkx as nx +from networkx.utils.decorators import not_implemented_for + +__all__ = [ + "number_attracting_components", + "attracting_components", + "is_attracting_component", +] + + +@not_implemented_for("undirected") +@nx._dispatchable +def attracting_components(G): + """Generates the attracting components in `G`. + + An attracting component in a directed graph `G` is a strongly connected + component with the property that a random walker on the graph will never + leave the component, once it enters the component. + + The nodes in attracting components can also be thought of as recurrent + nodes. If a random walker enters the attractor containing the node, then + the node will be visited infinitely often. + + To obtain induced subgraphs on each component use: + ``(G.subgraph(c).copy() for c in attracting_components(G))`` + + Parameters + ---------- + G : DiGraph, MultiDiGraph + The graph to be analyzed. + + Returns + ------- + attractors : generator of sets + A generator of sets of nodes, one for each attracting component of G. + + Raises + ------ + NetworkXNotImplemented + If the input graph is undirected. + + See Also + -------- + number_attracting_components + is_attracting_component + + """ + scc = list(nx.strongly_connected_components(G)) + cG = nx.condensation(G, scc) + for n in cG: + if cG.out_degree(n) == 0: + yield scc[n] + + +@not_implemented_for("undirected") +@nx._dispatchable +def number_attracting_components(G): + """Returns the number of attracting components in `G`. + + Parameters + ---------- + G : DiGraph, MultiDiGraph + The graph to be analyzed. + + Returns + ------- + n : int + The number of attracting components in G. + + Raises + ------ + NetworkXNotImplemented + If the input graph is undirected. + + See Also + -------- + attracting_components + is_attracting_component + + """ + return sum(1 for ac in attracting_components(G)) + + +@not_implemented_for("undirected") +@nx._dispatchable +def is_attracting_component(G): + """Returns True if `G` consists of a single attracting component. + + Parameters + ---------- + G : DiGraph, MultiDiGraph + The graph to be analyzed. + + Returns + ------- + attracting : bool + True if `G` has a single attracting component. Otherwise, False. + + Raises + ------ + NetworkXNotImplemented + If the input graph is undirected. + + See Also + -------- + attracting_components + number_attracting_components + + """ + ac = list(attracting_components(G)) + if len(ac) == 1: + return len(ac[0]) == len(G) + return False diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/components/biconnected.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/components/biconnected.py new file mode 100644 index 0000000..fd0f386 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/components/biconnected.py @@ -0,0 +1,394 @@ +"""Biconnected components and articulation points.""" + +from itertools import chain + +import networkx as nx +from networkx.utils.decorators import not_implemented_for + +__all__ = [ + "biconnected_components", + "biconnected_component_edges", + "is_biconnected", + "articulation_points", +] + + +@not_implemented_for("directed") +@nx._dispatchable +def is_biconnected(G): + """Returns True if the graph is biconnected, False otherwise. + + A graph is biconnected if, and only if, it cannot be disconnected by + removing only one node (and all edges incident on that node). If + removing a node increases the number of disconnected components + in the graph, that node is called an articulation point, or cut + vertex. A biconnected graph has no articulation points. + + Parameters + ---------- + G : NetworkX Graph + An undirected graph. + + Returns + ------- + biconnected : bool + True if the graph is biconnected, False otherwise. + + Raises + ------ + NetworkXNotImplemented + If the input graph is not undirected. + + Examples + -------- + >>> G = nx.path_graph(4) + >>> print(nx.is_biconnected(G)) + False + >>> G.add_edge(0, 3) + >>> print(nx.is_biconnected(G)) + True + + See Also + -------- + biconnected_components + articulation_points + biconnected_component_edges + is_strongly_connected + is_weakly_connected + is_connected + is_semiconnected + + Notes + ----- + The algorithm to find articulation points and biconnected + components is implemented using a non-recursive depth-first-search + (DFS) that keeps track of the highest level that back edges reach + in the DFS tree. A node `n` is an articulation point if, and only + if, there exists a subtree rooted at `n` such that there is no + back edge from any successor of `n` that links to a predecessor of + `n` in the DFS tree. By keeping track of all the edges traversed + by the DFS we can obtain the biconnected components because all + edges of a bicomponent will be traversed consecutively between + articulation points. + + References + ---------- + .. [1] Hopcroft, J.; Tarjan, R. (1973). + "Efficient algorithms for graph manipulation". + Communications of the ACM 16: 372–378. doi:10.1145/362248.362272 + + """ + bccs = biconnected_components(G) + try: + bcc = next(bccs) + except StopIteration: + # No bicomponents (empty graph?) + return False + try: + next(bccs) + except StopIteration: + # Only one bicomponent + return len(bcc) == len(G) + else: + # Multiple bicomponents + return False + + +@not_implemented_for("directed") +@nx._dispatchable +def biconnected_component_edges(G): + """Returns a generator of lists of edges, one list for each biconnected + component of the input graph. + + Biconnected components are maximal subgraphs such that the removal of a + node (and all edges incident on that node) will not disconnect the + subgraph. Note that nodes may be part of more than one biconnected + component. Those nodes are articulation points, or cut vertices. + However, each edge belongs to one, and only one, biconnected component. + + Notice that by convention a dyad is considered a biconnected component. + + Parameters + ---------- + G : NetworkX Graph + An undirected graph. + + Returns + ------- + edges : generator of lists + Generator of lists of edges, one list for each bicomponent. + + Raises + ------ + NetworkXNotImplemented + If the input graph is not undirected. + + Examples + -------- + >>> G = nx.barbell_graph(4, 2) + >>> print(nx.is_biconnected(G)) + False + >>> bicomponents_edges = list(nx.biconnected_component_edges(G)) + >>> len(bicomponents_edges) + 5 + >>> G.add_edge(2, 8) + >>> print(nx.is_biconnected(G)) + True + >>> bicomponents_edges = list(nx.biconnected_component_edges(G)) + >>> len(bicomponents_edges) + 1 + + See Also + -------- + is_biconnected, + biconnected_components, + articulation_points, + + Notes + ----- + The algorithm to find articulation points and biconnected + components is implemented using a non-recursive depth-first-search + (DFS) that keeps track of the highest level that back edges reach + in the DFS tree. A node `n` is an articulation point if, and only + if, there exists a subtree rooted at `n` such that there is no + back edge from any successor of `n` that links to a predecessor of + `n` in the DFS tree. By keeping track of all the edges traversed + by the DFS we can obtain the biconnected components because all + edges of a bicomponent will be traversed consecutively between + articulation points. + + References + ---------- + .. [1] Hopcroft, J.; Tarjan, R. (1973). + "Efficient algorithms for graph manipulation". + Communications of the ACM 16: 372–378. doi:10.1145/362248.362272 + + """ + yield from _biconnected_dfs(G, components=True) + + +@not_implemented_for("directed") +@nx._dispatchable +def biconnected_components(G): + """Returns a generator of sets of nodes, one set for each biconnected + component of the graph + + Biconnected components are maximal subgraphs such that the removal of a + node (and all edges incident on that node) will not disconnect the + subgraph. Note that nodes may be part of more than one biconnected + component. Those nodes are articulation points, or cut vertices. The + removal of articulation points will increase the number of connected + components of the graph. + + Notice that by convention a dyad is considered a biconnected component. + + Parameters + ---------- + G : NetworkX Graph + An undirected graph. + + Returns + ------- + nodes : generator + Generator of sets of nodes, one set for each biconnected component. + + Raises + ------ + NetworkXNotImplemented + If the input graph is not undirected. + + Examples + -------- + >>> G = nx.lollipop_graph(5, 1) + >>> print(nx.is_biconnected(G)) + False + >>> bicomponents = list(nx.biconnected_components(G)) + >>> len(bicomponents) + 2 + >>> G.add_edge(0, 5) + >>> print(nx.is_biconnected(G)) + True + >>> bicomponents = list(nx.biconnected_components(G)) + >>> len(bicomponents) + 1 + + You can generate a sorted list of biconnected components, largest + first, using sort. + + >>> G.remove_edge(0, 5) + >>> [len(c) for c in sorted(nx.biconnected_components(G), key=len, reverse=True)] + [5, 2] + + If you only want the largest connected component, it's more + efficient to use max instead of sort. + + >>> Gc = max(nx.biconnected_components(G), key=len) + + To create the components as subgraphs use: + ``(G.subgraph(c).copy() for c in biconnected_components(G))`` + + See Also + -------- + is_biconnected + articulation_points + biconnected_component_edges + k_components : this function is a special case where k=2 + bridge_components : similar to this function, but is defined using + 2-edge-connectivity instead of 2-node-connectivity. + + Notes + ----- + The algorithm to find articulation points and biconnected + components is implemented using a non-recursive depth-first-search + (DFS) that keeps track of the highest level that back edges reach + in the DFS tree. A node `n` is an articulation point if, and only + if, there exists a subtree rooted at `n` such that there is no + back edge from any successor of `n` that links to a predecessor of + `n` in the DFS tree. By keeping track of all the edges traversed + by the DFS we can obtain the biconnected components because all + edges of a bicomponent will be traversed consecutively between + articulation points. + + References + ---------- + .. [1] Hopcroft, J.; Tarjan, R. (1973). + "Efficient algorithms for graph manipulation". + Communications of the ACM 16: 372–378. doi:10.1145/362248.362272 + + """ + for comp in _biconnected_dfs(G, components=True): + yield set(chain.from_iterable(comp)) + + +@not_implemented_for("directed") +@nx._dispatchable +def articulation_points(G): + """Yield the articulation points, or cut vertices, of a graph. + + An articulation point or cut vertex is any node whose removal (along with + all its incident edges) increases the number of connected components of + a graph. An undirected connected graph without articulation points is + biconnected. Articulation points belong to more than one biconnected + component of a graph. + + Notice that by convention a dyad is considered a biconnected component. + + Parameters + ---------- + G : NetworkX Graph + An undirected graph. + + Yields + ------ + node + An articulation point in the graph. + + Raises + ------ + NetworkXNotImplemented + If the input graph is not undirected. + + Examples + -------- + + >>> G = nx.barbell_graph(4, 2) + >>> print(nx.is_biconnected(G)) + False + >>> len(list(nx.articulation_points(G))) + 4 + >>> G.add_edge(2, 8) + >>> print(nx.is_biconnected(G)) + True + >>> len(list(nx.articulation_points(G))) + 0 + + See Also + -------- + is_biconnected + biconnected_components + biconnected_component_edges + + Notes + ----- + The algorithm to find articulation points and biconnected + components is implemented using a non-recursive depth-first-search + (DFS) that keeps track of the highest level that back edges reach + in the DFS tree. A node `n` is an articulation point if, and only + if, there exists a subtree rooted at `n` such that there is no + back edge from any successor of `n` that links to a predecessor of + `n` in the DFS tree. By keeping track of all the edges traversed + by the DFS we can obtain the biconnected components because all + edges of a bicomponent will be traversed consecutively between + articulation points. + + References + ---------- + .. [1] Hopcroft, J.; Tarjan, R. (1973). + "Efficient algorithms for graph manipulation". + Communications of the ACM 16: 372–378. doi:10.1145/362248.362272 + + """ + seen = set() + for articulation in _biconnected_dfs(G, components=False): + if articulation not in seen: + seen.add(articulation) + yield articulation + + +@not_implemented_for("directed") +def _biconnected_dfs(G, components=True): + # depth-first search algorithm to generate articulation points + # and biconnected components + visited = set() + for start in G: + if start in visited: + continue + discovery = {start: 0} # time of first discovery of node during search + low = {start: 0} + root_children = 0 + visited.add(start) + edge_stack = [] + stack = [(start, start, iter(G[start]))] + edge_index = {} + while stack: + grandparent, parent, children = stack[-1] + try: + child = next(children) + if grandparent == child: + continue + if child in visited: + if discovery[child] <= discovery[parent]: # back edge + low[parent] = min(low[parent], discovery[child]) + if components: + edge_index[parent, child] = len(edge_stack) + edge_stack.append((parent, child)) + else: + low[child] = discovery[child] = len(discovery) + visited.add(child) + stack.append((parent, child, iter(G[child]))) + if components: + edge_index[parent, child] = len(edge_stack) + edge_stack.append((parent, child)) + + except StopIteration: + stack.pop() + if len(stack) > 1: + if low[parent] >= discovery[grandparent]: + if components: + ind = edge_index[grandparent, parent] + yield edge_stack[ind:] + del edge_stack[ind:] + + else: + yield grandparent + low[grandparent] = min(low[parent], low[grandparent]) + elif stack: # length 1 so grandparent is root + root_children += 1 + if components: + ind = edge_index[grandparent, parent] + yield edge_stack[ind:] + del edge_stack[ind:] + if not components: + # root node is articulation point if it has more than 1 child + if root_children > 1: + yield start diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/components/connected.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/components/connected.py new file mode 100644 index 0000000..1884789 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/components/connected.py @@ -0,0 +1,216 @@ +"""Connected components.""" + +import networkx as nx +from networkx.utils.decorators import not_implemented_for + +from ...utils import arbitrary_element + +__all__ = [ + "number_connected_components", + "connected_components", + "is_connected", + "node_connected_component", +] + + +@not_implemented_for("directed") +@nx._dispatchable +def connected_components(G): + """Generate connected components. + + Parameters + ---------- + G : NetworkX graph + An undirected graph + + Returns + ------- + comp : generator of sets + A generator of sets of nodes, one for each component of G. + + Raises + ------ + NetworkXNotImplemented + If G is directed. + + Examples + -------- + Generate a sorted list of connected components, largest first. + + >>> G = nx.path_graph(4) + >>> nx.add_path(G, [10, 11, 12]) + >>> [len(c) for c in sorted(nx.connected_components(G), key=len, reverse=True)] + [4, 3] + + If you only want the largest connected component, it's more + efficient to use max instead of sort. + + >>> largest_cc = max(nx.connected_components(G), key=len) + + To create the induced subgraph of each component use: + + >>> S = [G.subgraph(c).copy() for c in nx.connected_components(G)] + + See Also + -------- + strongly_connected_components + weakly_connected_components + + Notes + ----- + For undirected graphs only. + + """ + seen = set() + n = len(G) + for v in G: + if v not in seen: + c = _plain_bfs(G, n - len(seen), v) + seen.update(c) + yield c + + +@not_implemented_for("directed") +@nx._dispatchable +def number_connected_components(G): + """Returns the number of connected components. + + Parameters + ---------- + G : NetworkX graph + An undirected graph. + + Returns + ------- + n : integer + Number of connected components + + Raises + ------ + NetworkXNotImplemented + If G is directed. + + Examples + -------- + >>> G = nx.Graph([(0, 1), (1, 2), (5, 6), (3, 4)]) + >>> nx.number_connected_components(G) + 3 + + See Also + -------- + connected_components + number_weakly_connected_components + number_strongly_connected_components + + Notes + ----- + For undirected graphs only. + + """ + return sum(1 for cc in connected_components(G)) + + +@not_implemented_for("directed") +@nx._dispatchable +def is_connected(G): + """Returns True if the graph is connected, False otherwise. + + Parameters + ---------- + G : NetworkX Graph + An undirected graph. + + Returns + ------- + connected : bool + True if the graph is connected, false otherwise. + + Raises + ------ + NetworkXNotImplemented + If G is directed. + + Examples + -------- + >>> G = nx.path_graph(4) + >>> print(nx.is_connected(G)) + True + + See Also + -------- + is_strongly_connected + is_weakly_connected + is_semiconnected + is_biconnected + connected_components + + Notes + ----- + For undirected graphs only. + + """ + n = len(G) + if n == 0: + raise nx.NetworkXPointlessConcept( + "Connectivity is undefined for the null graph." + ) + return sum(1 for node in _plain_bfs(G, n, arbitrary_element(G))) == len(G) + + +@not_implemented_for("directed") +@nx._dispatchable +def node_connected_component(G, n): + """Returns the set of nodes in the component of graph containing node n. + + Parameters + ---------- + G : NetworkX Graph + An undirected graph. + + n : node label + A node in G + + Returns + ------- + comp : set + A set of nodes in the component of G containing node n. + + Raises + ------ + NetworkXNotImplemented + If G is directed. + + Examples + -------- + >>> G = nx.Graph([(0, 1), (1, 2), (5, 6), (3, 4)]) + >>> nx.node_connected_component(G, 0) # nodes of component that contains node 0 + {0, 1, 2} + + See Also + -------- + connected_components + + Notes + ----- + For undirected graphs only. + + """ + return _plain_bfs(G, len(G), n) + + +def _plain_bfs(G, n, source): + """A fast BFS node generator""" + adj = G._adj + seen = {source} + nextlevel = [source] + while nextlevel: + thislevel = nextlevel + nextlevel = [] + for v in thislevel: + for w in adj[v]: + if w not in seen: + seen.add(w) + nextlevel.append(w) + if len(seen) == n: + return seen + return seen diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/components/semiconnected.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/components/semiconnected.py new file mode 100644 index 0000000..9ca5d76 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/components/semiconnected.py @@ -0,0 +1,71 @@ +"""Semiconnectedness.""" + +import networkx as nx +from networkx.utils import not_implemented_for, pairwise + +__all__ = ["is_semiconnected"] + + +@not_implemented_for("undirected") +@nx._dispatchable +def is_semiconnected(G): + r"""Returns True if the graph is semiconnected, False otherwise. + + A graph is semiconnected if and only if for any pair of nodes, either one + is reachable from the other, or they are mutually reachable. + + This function uses a theorem that states that a DAG is semiconnected + if for any topological sort, for node $v_n$ in that sort, there is an + edge $(v_i, v_{i+1})$. That allows us to check if a non-DAG `G` is + semiconnected by condensing the graph: i.e. constructing a new graph `H` + with nodes being the strongly connected components of `G`, and edges + (scc_1, scc_2) if there is a edge $(v_1, v_2)$ in `G` for some + $v_1 \in scc_1$ and $v_2 \in scc_2$. That results in a DAG, so we compute + the topological sort of `H` and check if for every $n$ there is an edge + $(scc_n, scc_{n+1})$. + + Parameters + ---------- + G : NetworkX graph + A directed graph. + + Returns + ------- + semiconnected : bool + True if the graph is semiconnected, False otherwise. + + Raises + ------ + NetworkXNotImplemented + If the input graph is undirected. + + NetworkXPointlessConcept + If the graph is empty. + + Examples + -------- + >>> G = nx.path_graph(4, create_using=nx.DiGraph()) + >>> print(nx.is_semiconnected(G)) + True + >>> G = nx.DiGraph([(1, 2), (3, 2)]) + >>> print(nx.is_semiconnected(G)) + False + + See Also + -------- + is_strongly_connected + is_weakly_connected + is_connected + is_biconnected + """ + if len(G) == 0: + raise nx.NetworkXPointlessConcept( + "Connectivity is undefined for the null graph." + ) + + if not nx.is_weakly_connected(G): + return False + + H = nx.condensation(G) + + return all(H.has_edge(u, v) for u, v in pairwise(nx.topological_sort(H))) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/components/strongly_connected.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/components/strongly_connected.py new file mode 100644 index 0000000..393728f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/components/strongly_connected.py @@ -0,0 +1,351 @@ +"""Strongly connected components.""" + +import networkx as nx +from networkx.utils.decorators import not_implemented_for + +__all__ = [ + "number_strongly_connected_components", + "strongly_connected_components", + "is_strongly_connected", + "kosaraju_strongly_connected_components", + "condensation", +] + + +@not_implemented_for("undirected") +@nx._dispatchable +def strongly_connected_components(G): + """Generate nodes in strongly connected components of graph. + + Parameters + ---------- + G : NetworkX Graph + A directed graph. + + Returns + ------- + comp : generator of sets + A generator of sets of nodes, one for each strongly connected + component of G. + + Raises + ------ + NetworkXNotImplemented + If G is undirected. + + Examples + -------- + Generate a sorted list of strongly connected components, largest first. + + >>> G = nx.cycle_graph(4, create_using=nx.DiGraph()) + >>> nx.add_cycle(G, [10, 11, 12]) + >>> [ + ... len(c) + ... for c in sorted(nx.strongly_connected_components(G), key=len, reverse=True) + ... ] + [4, 3] + + If you only want the largest component, it's more efficient to + use max instead of sort. + + >>> largest = max(nx.strongly_connected_components(G), key=len) + + See Also + -------- + connected_components + weakly_connected_components + kosaraju_strongly_connected_components + + Notes + ----- + Uses Tarjan's algorithm[1]_ with Nuutila's modifications[2]_. + Nonrecursive version of algorithm. + + References + ---------- + .. [1] Depth-first search and linear graph algorithms, R. Tarjan + SIAM Journal of Computing 1(2):146-160, (1972). + + .. [2] On finding the strongly connected components in a directed graph. + E. Nuutila and E. Soisalon-Soinen + Information Processing Letters 49(1): 9-14, (1994).. + + """ + preorder = {} + lowlink = {} + scc_found = set() + scc_queue = [] + i = 0 # Preorder counter + neighbors = {v: iter(G[v]) for v in G} + for source in G: + if source not in scc_found: + queue = [source] + while queue: + v = queue[-1] + if v not in preorder: + i = i + 1 + preorder[v] = i + done = True + for w in neighbors[v]: + if w not in preorder: + queue.append(w) + done = False + break + if done: + lowlink[v] = preorder[v] + for w in G[v]: + if w not in scc_found: + if preorder[w] > preorder[v]: + lowlink[v] = min([lowlink[v], lowlink[w]]) + else: + lowlink[v] = min([lowlink[v], preorder[w]]) + queue.pop() + if lowlink[v] == preorder[v]: + scc = {v} + while scc_queue and preorder[scc_queue[-1]] > preorder[v]: + k = scc_queue.pop() + scc.add(k) + scc_found.update(scc) + yield scc + else: + scc_queue.append(v) + + +@not_implemented_for("undirected") +@nx._dispatchable +def kosaraju_strongly_connected_components(G, source=None): + """Generate nodes in strongly connected components of graph. + + Parameters + ---------- + G : NetworkX Graph + A directed graph. + + Returns + ------- + comp : generator of sets + A generator of sets of nodes, one for each strongly connected + component of G. + + Raises + ------ + NetworkXNotImplemented + If G is undirected. + + Examples + -------- + Generate a sorted list of strongly connected components, largest first. + + >>> G = nx.cycle_graph(4, create_using=nx.DiGraph()) + >>> nx.add_cycle(G, [10, 11, 12]) + >>> [ + ... len(c) + ... for c in sorted( + ... nx.kosaraju_strongly_connected_components(G), key=len, reverse=True + ... ) + ... ] + [4, 3] + + If you only want the largest component, it's more efficient to + use max instead of sort. + + >>> largest = max(nx.kosaraju_strongly_connected_components(G), key=len) + + See Also + -------- + strongly_connected_components + + Notes + ----- + Uses Kosaraju's algorithm. + + """ + post = list(nx.dfs_postorder_nodes(G.reverse(copy=False), source=source)) + + seen = set() + while post: + r = post.pop() + if r in seen: + continue + c = nx.dfs_preorder_nodes(G, r) + new = {v for v in c if v not in seen} + seen.update(new) + yield new + + +@not_implemented_for("undirected") +@nx._dispatchable +def number_strongly_connected_components(G): + """Returns number of strongly connected components in graph. + + Parameters + ---------- + G : NetworkX graph + A directed graph. + + Returns + ------- + n : integer + Number of strongly connected components + + Raises + ------ + NetworkXNotImplemented + If G is undirected. + + Examples + -------- + >>> G = nx.DiGraph( + ... [(0, 1), (1, 2), (2, 0), (2, 3), (4, 5), (3, 4), (5, 6), (6, 3), (6, 7)] + ... ) + >>> nx.number_strongly_connected_components(G) + 3 + + See Also + -------- + strongly_connected_components + number_connected_components + number_weakly_connected_components + + Notes + ----- + For directed graphs only. + """ + return sum(1 for scc in strongly_connected_components(G)) + + +@not_implemented_for("undirected") +@nx._dispatchable +def is_strongly_connected(G): + """Test directed graph for strong connectivity. + + A directed graph is strongly connected if and only if every vertex in + the graph is reachable from every other vertex. + + Parameters + ---------- + G : NetworkX Graph + A directed graph. + + Returns + ------- + connected : bool + True if the graph is strongly connected, False otherwise. + + Examples + -------- + >>> G = nx.DiGraph([(0, 1), (1, 2), (2, 3), (3, 0), (2, 4), (4, 2)]) + >>> nx.is_strongly_connected(G) + True + >>> G.remove_edge(2, 3) + >>> nx.is_strongly_connected(G) + False + + Raises + ------ + NetworkXNotImplemented + If G is undirected. + + See Also + -------- + is_weakly_connected + is_semiconnected + is_connected + is_biconnected + strongly_connected_components + + Notes + ----- + For directed graphs only. + """ + if len(G) == 0: + raise nx.NetworkXPointlessConcept( + """Connectivity is undefined for the null graph.""" + ) + + return len(next(strongly_connected_components(G))) == len(G) + + +@not_implemented_for("undirected") +@nx._dispatchable(returns_graph=True) +def condensation(G, scc=None): + """Returns the condensation of G. + + The condensation of G is the graph with each of the strongly connected + components contracted into a single node. + + Parameters + ---------- + G : NetworkX DiGraph + A directed graph. + + scc: list or generator (optional, default=None) + Strongly connected components. If provided, the elements in + `scc` must partition the nodes in `G`. If not provided, it will be + calculated as scc=nx.strongly_connected_components(G). + + Returns + ------- + C : NetworkX DiGraph + The condensation graph C of G. The node labels are integers + corresponding to the index of the component in the list of + strongly connected components of G. C has a graph attribute named + 'mapping' with a dictionary mapping the original nodes to the + nodes in C to which they belong. Each node in C also has a node + attribute 'members' with the set of original nodes in G that + form the SCC that the node in C represents. + + Raises + ------ + NetworkXNotImplemented + If G is undirected. + + Examples + -------- + Contracting two sets of strongly connected nodes into two distinct SCC + using the barbell graph. + + >>> G = nx.barbell_graph(4, 0) + >>> G.remove_edge(3, 4) + >>> G = nx.DiGraph(G) + >>> H = nx.condensation(G) + >>> H.nodes.data() + NodeDataView({0: {'members': {0, 1, 2, 3}}, 1: {'members': {4, 5, 6, 7}}}) + >>> H.graph["mapping"] + {0: 0, 1: 0, 2: 0, 3: 0, 4: 1, 5: 1, 6: 1, 7: 1} + + Contracting a complete graph into one single SCC. + + >>> G = nx.complete_graph(7, create_using=nx.DiGraph) + >>> H = nx.condensation(G) + >>> H.nodes + NodeView((0,)) + >>> H.nodes.data() + NodeDataView({0: {'members': {0, 1, 2, 3, 4, 5, 6}}}) + + Notes + ----- + After contracting all strongly connected components to a single node, + the resulting graph is a directed acyclic graph. + + """ + if scc is None: + scc = nx.strongly_connected_components(G) + mapping = {} + members = {} + C = nx.DiGraph() + # Add mapping dict as graph attribute + C.graph["mapping"] = mapping + if len(G) == 0: + return C + for i, component in enumerate(scc): + members[i] = component + mapping.update((n, i) for n in component) + number_of_components = i + 1 + C.add_nodes_from(range(number_of_components)) + C.add_edges_from( + (mapping[u], mapping[v]) for u, v in G.edges() if mapping[u] != mapping[v] + ) + # Add a list of members (ie original nodes) to each node (ie scc) in C. + nx.set_node_attributes(C, members, "members") + return C diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/components/tests/__init__.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/components/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/components/tests/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/components/tests/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..dfb4e80 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/components/tests/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/components/tests/__pycache__/test_attracting.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/components/tests/__pycache__/test_attracting.cpython-312.pyc new file mode 100644 index 0000000..97ebb26 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/components/tests/__pycache__/test_attracting.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/components/tests/__pycache__/test_biconnected.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/components/tests/__pycache__/test_biconnected.cpython-312.pyc new file mode 100644 index 0000000..5de7b41 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/components/tests/__pycache__/test_biconnected.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/components/tests/__pycache__/test_connected.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/components/tests/__pycache__/test_connected.cpython-312.pyc new file mode 100644 index 0000000..37dc6a5 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/components/tests/__pycache__/test_connected.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/components/tests/__pycache__/test_semiconnected.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/components/tests/__pycache__/test_semiconnected.cpython-312.pyc new file mode 100644 index 0000000..c14733f Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/components/tests/__pycache__/test_semiconnected.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/components/tests/__pycache__/test_strongly_connected.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/components/tests/__pycache__/test_strongly_connected.cpython-312.pyc new file mode 100644 index 0000000..ea37d71 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/components/tests/__pycache__/test_strongly_connected.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/components/tests/__pycache__/test_weakly_connected.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/components/tests/__pycache__/test_weakly_connected.cpython-312.pyc new file mode 100644 index 0000000..16ea897 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/components/tests/__pycache__/test_weakly_connected.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/components/tests/test_attracting.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/components/tests/test_attracting.py new file mode 100644 index 0000000..336c40d --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/components/tests/test_attracting.py @@ -0,0 +1,70 @@ +import pytest + +import networkx as nx +from networkx import NetworkXNotImplemented + + +class TestAttractingComponents: + @classmethod + def setup_class(cls): + cls.G1 = nx.DiGraph() + cls.G1.add_edges_from( + [ + (5, 11), + (11, 2), + (11, 9), + (11, 10), + (7, 11), + (7, 8), + (8, 9), + (3, 8), + (3, 10), + ] + ) + cls.G2 = nx.DiGraph() + cls.G2.add_edges_from([(0, 1), (0, 2), (1, 1), (1, 2), (2, 1)]) + + cls.G3 = nx.DiGraph() + cls.G3.add_edges_from([(0, 1), (1, 2), (2, 1), (0, 3), (3, 4), (4, 3)]) + + cls.G4 = nx.DiGraph() + + def test_attracting_components(self): + ac = list(nx.attracting_components(self.G1)) + assert {2} in ac + assert {9} in ac + assert {10} in ac + + ac = list(nx.attracting_components(self.G2)) + ac = [tuple(sorted(x)) for x in ac] + assert ac == [(1, 2)] + + ac = list(nx.attracting_components(self.G3)) + ac = [tuple(sorted(x)) for x in ac] + assert (1, 2) in ac + assert (3, 4) in ac + assert len(ac) == 2 + + ac = list(nx.attracting_components(self.G4)) + assert ac == [] + + def test_number_attacting_components(self): + assert nx.number_attracting_components(self.G1) == 3 + assert nx.number_attracting_components(self.G2) == 1 + assert nx.number_attracting_components(self.G3) == 2 + assert nx.number_attracting_components(self.G4) == 0 + + def test_is_attracting_component(self): + assert not nx.is_attracting_component(self.G1) + assert not nx.is_attracting_component(self.G2) + assert not nx.is_attracting_component(self.G3) + g2 = self.G3.subgraph([1, 2]) + assert nx.is_attracting_component(g2) + assert not nx.is_attracting_component(self.G4) + + def test_connected_raise(self): + G = nx.Graph() + with pytest.raises(NetworkXNotImplemented): + next(nx.attracting_components(G)) + pytest.raises(NetworkXNotImplemented, nx.number_attracting_components, G) + pytest.raises(NetworkXNotImplemented, nx.is_attracting_component, G) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/components/tests/test_biconnected.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/components/tests/test_biconnected.py new file mode 100644 index 0000000..19d2d88 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/components/tests/test_biconnected.py @@ -0,0 +1,248 @@ +import pytest + +import networkx as nx +from networkx import NetworkXNotImplemented + + +def assert_components_edges_equal(x, y): + sx = {frozenset(frozenset(e) for e in c) for c in x} + sy = {frozenset(frozenset(e) for e in c) for c in y} + assert sx == sy + + +def assert_components_equal(x, y): + sx = {frozenset(c) for c in x} + sy = {frozenset(c) for c in y} + assert sx == sy + + +def test_barbell(): + G = nx.barbell_graph(8, 4) + nx.add_path(G, [7, 20, 21, 22]) + nx.add_cycle(G, [22, 23, 24, 25]) + pts = set(nx.articulation_points(G)) + assert pts == {7, 8, 9, 10, 11, 12, 20, 21, 22} + + answer = [ + {12, 13, 14, 15, 16, 17, 18, 19}, + {0, 1, 2, 3, 4, 5, 6, 7}, + {22, 23, 24, 25}, + {11, 12}, + {10, 11}, + {9, 10}, + {8, 9}, + {7, 8}, + {21, 22}, + {20, 21}, + {7, 20}, + ] + assert_components_equal(list(nx.biconnected_components(G)), answer) + + G.add_edge(2, 17) + pts = set(nx.articulation_points(G)) + assert pts == {7, 20, 21, 22} + + +def test_articulation_points_repetitions(): + G = nx.Graph() + G.add_edges_from([(0, 1), (1, 2), (1, 3)]) + assert list(nx.articulation_points(G)) == [1] + + +def test_articulation_points_cycle(): + G = nx.cycle_graph(3) + nx.add_cycle(G, [1, 3, 4]) + pts = set(nx.articulation_points(G)) + assert pts == {1} + + +def test_is_biconnected(): + G = nx.cycle_graph(3) + assert nx.is_biconnected(G) + nx.add_cycle(G, [1, 3, 4]) + assert not nx.is_biconnected(G) + + +def test_empty_is_biconnected(): + G = nx.empty_graph(5) + assert not nx.is_biconnected(G) + G.add_edge(0, 1) + assert not nx.is_biconnected(G) + + +def test_biconnected_components_cycle(): + G = nx.cycle_graph(3) + nx.add_cycle(G, [1, 3, 4]) + answer = [{0, 1, 2}, {1, 3, 4}] + assert_components_equal(list(nx.biconnected_components(G)), answer) + + +def test_biconnected_components1(): + # graph example from + # https://web.archive.org/web/20121229123447/http://www.ibluemojo.com/school/articul_algorithm.html + edges = [ + (0, 1), + (0, 5), + (0, 6), + (0, 14), + (1, 5), + (1, 6), + (1, 14), + (2, 4), + (2, 10), + (3, 4), + (3, 15), + (4, 6), + (4, 7), + (4, 10), + (5, 14), + (6, 14), + (7, 9), + (8, 9), + (8, 12), + (8, 13), + (10, 15), + (11, 12), + (11, 13), + (12, 13), + ] + G = nx.Graph(edges) + pts = set(nx.articulation_points(G)) + assert pts == {4, 6, 7, 8, 9} + comps = list(nx.biconnected_component_edges(G)) + answer = [ + [(3, 4), (15, 3), (10, 15), (10, 4), (2, 10), (4, 2)], + [(13, 12), (13, 8), (11, 13), (12, 11), (8, 12)], + [(9, 8)], + [(7, 9)], + [(4, 7)], + [(6, 4)], + [(14, 0), (5, 1), (5, 0), (14, 5), (14, 1), (6, 14), (6, 0), (1, 6), (0, 1)], + ] + assert_components_edges_equal(comps, answer) + + +def test_biconnected_components2(): + G = nx.Graph() + nx.add_cycle(G, "ABC") + nx.add_cycle(G, "CDE") + nx.add_cycle(G, "FIJHG") + nx.add_cycle(G, "GIJ") + G.add_edge("E", "G") + comps = list(nx.biconnected_component_edges(G)) + answer = [ + [ + tuple("GF"), + tuple("FI"), + tuple("IG"), + tuple("IJ"), + tuple("JG"), + tuple("JH"), + tuple("HG"), + ], + [tuple("EG")], + [tuple("CD"), tuple("DE"), tuple("CE")], + [tuple("AB"), tuple("BC"), tuple("AC")], + ] + assert_components_edges_equal(comps, answer) + + +def test_biconnected_davis(): + D = nx.davis_southern_women_graph() + bcc = list(nx.biconnected_components(D))[0] + assert set(D) == bcc # All nodes in a giant bicomponent + # So no articulation points + assert len(list(nx.articulation_points(D))) == 0 + + +def test_biconnected_karate(): + K = nx.karate_club_graph() + answer = [ + { + 0, + 1, + 2, + 3, + 7, + 8, + 9, + 12, + 13, + 14, + 15, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 27, + 28, + 29, + 30, + 31, + 32, + 33, + }, + {0, 4, 5, 6, 10, 16}, + {0, 11}, + ] + bcc = list(nx.biconnected_components(K)) + assert_components_equal(bcc, answer) + assert set(nx.articulation_points(K)) == {0} + + +def test_biconnected_eppstein(): + # tests from http://www.ics.uci.edu/~eppstein/PADS/Biconnectivity.py + G1 = nx.Graph( + { + 0: [1, 2, 5], + 1: [0, 5], + 2: [0, 3, 4], + 3: [2, 4, 5, 6], + 4: [2, 3, 5, 6], + 5: [0, 1, 3, 4], + 6: [3, 4], + } + ) + G2 = nx.Graph( + { + 0: [2, 5], + 1: [3, 8], + 2: [0, 3, 5], + 3: [1, 2, 6, 8], + 4: [7], + 5: [0, 2], + 6: [3, 8], + 7: [4], + 8: [1, 3, 6], + } + ) + assert nx.is_biconnected(G1) + assert not nx.is_biconnected(G2) + answer_G2 = [{1, 3, 6, 8}, {0, 2, 5}, {2, 3}, {4, 7}] + bcc = list(nx.biconnected_components(G2)) + assert_components_equal(bcc, answer_G2) + + +def test_null_graph(): + G = nx.Graph() + assert not nx.is_biconnected(G) + assert list(nx.biconnected_components(G)) == [] + assert list(nx.biconnected_component_edges(G)) == [] + assert list(nx.articulation_points(G)) == [] + + +def test_connected_raise(): + DG = nx.DiGraph() + with pytest.raises(NetworkXNotImplemented): + next(nx.biconnected_components(DG)) + with pytest.raises(NetworkXNotImplemented): + next(nx.biconnected_component_edges(DG)) + with pytest.raises(NetworkXNotImplemented): + next(nx.articulation_points(DG)) + pytest.raises(NetworkXNotImplemented, nx.is_biconnected, DG) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/components/tests/test_connected.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/components/tests/test_connected.py new file mode 100644 index 0000000..207214c --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/components/tests/test_connected.py @@ -0,0 +1,138 @@ +import pytest + +import networkx as nx +from networkx import NetworkXNotImplemented +from networkx import convert_node_labels_to_integers as cnlti +from networkx.classes.tests import dispatch_interface + + +class TestConnected: + @classmethod + def setup_class(cls): + G1 = cnlti(nx.grid_2d_graph(2, 2), first_label=0, ordering="sorted") + G2 = cnlti(nx.lollipop_graph(3, 3), first_label=4, ordering="sorted") + G3 = cnlti(nx.house_graph(), first_label=10, ordering="sorted") + cls.G = nx.union(G1, G2) + cls.G = nx.union(cls.G, G3) + cls.DG = nx.DiGraph([(1, 2), (1, 3), (2, 3)]) + cls.grid = cnlti(nx.grid_2d_graph(4, 4), first_label=1) + + cls.gc = [] + G = nx.DiGraph() + G.add_edges_from( + [ + (1, 2), + (2, 3), + (2, 8), + (3, 4), + (3, 7), + (4, 5), + (5, 3), + (5, 6), + (7, 4), + (7, 6), + (8, 1), + (8, 7), + ] + ) + C = [[3, 4, 5, 7], [1, 2, 8], [6]] + cls.gc.append((G, C)) + + G = nx.DiGraph() + G.add_edges_from([(1, 2), (1, 3), (1, 4), (4, 2), (3, 4), (2, 3)]) + C = [[2, 3, 4], [1]] + cls.gc.append((G, C)) + + G = nx.DiGraph() + G.add_edges_from([(1, 2), (2, 3), (3, 2), (2, 1)]) + C = [[1, 2, 3]] + cls.gc.append((G, C)) + + # Eppstein's tests + G = nx.DiGraph({0: [1], 1: [2, 3], 2: [4, 5], 3: [4, 5], 4: [6], 5: [], 6: []}) + C = [[0], [1], [2], [3], [4], [5], [6]] + cls.gc.append((G, C)) + + G = nx.DiGraph({0: [1], 1: [2, 3, 4], 2: [0, 3], 3: [4], 4: [3]}) + C = [[0, 1, 2], [3, 4]] + cls.gc.append((G, C)) + + G = nx.DiGraph() + C = [] + cls.gc.append((G, C)) + + def test_connected_components(self): + # Test duplicated below + cc = nx.connected_components + G = self.G + C = { + frozenset([0, 1, 2, 3]), + frozenset([4, 5, 6, 7, 8, 9]), + frozenset([10, 11, 12, 13, 14]), + } + assert {frozenset(g) for g in cc(G)} == C + + def test_connected_components_nx_loopback(self): + # This tests the @nx._dispatchable mechanism, treating nx.connected_components + # as if it were a re-implementation from another package. + # Test duplicated from above + cc = nx.connected_components + G = dispatch_interface.convert(self.G) + C = { + frozenset([0, 1, 2, 3]), + frozenset([4, 5, 6, 7, 8, 9]), + frozenset([10, 11, 12, 13, 14]), + } + if "nx_loopback" in nx.config.backends or not nx.config.backends: + # If `nx.config.backends` is empty, then `_dispatchable.__call__` takes a + # "fast path" and does not check graph inputs, so using an unknown backend + # here will still work. + assert {frozenset(g) for g in cc(G)} == C + else: + # This raises, because "nx_loopback" is not registered as a backend. + with pytest.raises( + ImportError, match="'nx_loopback' backend is not installed" + ): + cc(G) + + def test_number_connected_components(self): + ncc = nx.number_connected_components + assert ncc(self.G) == 3 + + def test_number_connected_components2(self): + ncc = nx.number_connected_components + assert ncc(self.grid) == 1 + + def test_connected_components2(self): + cc = nx.connected_components + G = self.grid + C = {frozenset([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16])} + assert {frozenset(g) for g in cc(G)} == C + + def test_node_connected_components(self): + ncc = nx.node_connected_component + G = self.grid + C = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16} + assert ncc(G, 1) == C + + def test_is_connected(self): + assert nx.is_connected(self.grid) + G = nx.Graph() + G.add_nodes_from([1, 2]) + assert not nx.is_connected(G) + + def test_connected_raise(self): + with pytest.raises(NetworkXNotImplemented): + next(nx.connected_components(self.DG)) + pytest.raises(NetworkXNotImplemented, nx.number_connected_components, self.DG) + pytest.raises(NetworkXNotImplemented, nx.node_connected_component, self.DG, 1) + pytest.raises(NetworkXNotImplemented, nx.is_connected, self.DG) + pytest.raises(nx.NetworkXPointlessConcept, nx.is_connected, nx.Graph()) + + def test_connected_mutability(self): + G = self.grid + seen = set() + for component in nx.connected_components(G): + assert len(seen & component) == 0 + seen.update(component) + component.clear() diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/components/tests/test_semiconnected.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/components/tests/test_semiconnected.py new file mode 100644 index 0000000..6376bbf --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/components/tests/test_semiconnected.py @@ -0,0 +1,55 @@ +from itertools import chain + +import pytest + +import networkx as nx + + +class TestIsSemiconnected: + def test_undirected(self): + pytest.raises(nx.NetworkXNotImplemented, nx.is_semiconnected, nx.Graph()) + pytest.raises(nx.NetworkXNotImplemented, nx.is_semiconnected, nx.MultiGraph()) + + def test_empty(self): + pytest.raises(nx.NetworkXPointlessConcept, nx.is_semiconnected, nx.DiGraph()) + pytest.raises( + nx.NetworkXPointlessConcept, nx.is_semiconnected, nx.MultiDiGraph() + ) + + def test_single_node_graph(self): + G = nx.DiGraph() + G.add_node(0) + assert nx.is_semiconnected(G) + + def test_path(self): + G = nx.path_graph(100, create_using=nx.DiGraph()) + assert nx.is_semiconnected(G) + G.add_edge(100, 99) + assert not nx.is_semiconnected(G) + + def test_cycle(self): + G = nx.cycle_graph(100, create_using=nx.DiGraph()) + assert nx.is_semiconnected(G) + G = nx.path_graph(100, create_using=nx.DiGraph()) + G.add_edge(0, 99) + assert nx.is_semiconnected(G) + + def test_tree(self): + G = nx.DiGraph() + G.add_edges_from( + chain.from_iterable([(i, 2 * i + 1), (i, 2 * i + 2)] for i in range(100)) + ) + assert not nx.is_semiconnected(G) + + def test_dumbbell(self): + G = nx.cycle_graph(100, create_using=nx.DiGraph()) + G.add_edges_from((i + 100, (i + 1) % 100 + 100) for i in range(100)) + assert not nx.is_semiconnected(G) # G is disconnected. + G.add_edge(100, 99) + assert nx.is_semiconnected(G) + + def test_alternating_path(self): + G = nx.DiGraph( + chain.from_iterable([(i, i - 1), (i, i + 1)] for i in range(0, 100, 2)) + ) + assert not nx.is_semiconnected(G) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/components/tests/test_strongly_connected.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/components/tests/test_strongly_connected.py new file mode 100644 index 0000000..27f4098 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/components/tests/test_strongly_connected.py @@ -0,0 +1,193 @@ +import pytest + +import networkx as nx +from networkx import NetworkXNotImplemented + + +class TestStronglyConnected: + @classmethod + def setup_class(cls): + cls.gc = [] + G = nx.DiGraph() + G.add_edges_from( + [ + (1, 2), + (2, 3), + (2, 8), + (3, 4), + (3, 7), + (4, 5), + (5, 3), + (5, 6), + (7, 4), + (7, 6), + (8, 1), + (8, 7), + ] + ) + C = {frozenset([3, 4, 5, 7]), frozenset([1, 2, 8]), frozenset([6])} + cls.gc.append((G, C)) + + G = nx.DiGraph() + G.add_edges_from([(1, 2), (1, 3), (1, 4), (4, 2), (3, 4), (2, 3)]) + C = {frozenset([2, 3, 4]), frozenset([1])} + cls.gc.append((G, C)) + + G = nx.DiGraph() + G.add_edges_from([(1, 2), (2, 3), (3, 2), (2, 1)]) + C = {frozenset([1, 2, 3])} + cls.gc.append((G, C)) + + # Eppstein's tests + G = nx.DiGraph({0: [1], 1: [2, 3], 2: [4, 5], 3: [4, 5], 4: [6], 5: [], 6: []}) + C = { + frozenset([0]), + frozenset([1]), + frozenset([2]), + frozenset([3]), + frozenset([4]), + frozenset([5]), + frozenset([6]), + } + cls.gc.append((G, C)) + + G = nx.DiGraph({0: [1], 1: [2, 3, 4], 2: [0, 3], 3: [4], 4: [3]}) + C = {frozenset([0, 1, 2]), frozenset([3, 4])} + cls.gc.append((G, C)) + + def test_tarjan(self): + scc = nx.strongly_connected_components + for G, C in self.gc: + assert {frozenset(g) for g in scc(G)} == C + + def test_kosaraju(self): + scc = nx.kosaraju_strongly_connected_components + for G, C in self.gc: + assert {frozenset(g) for g in scc(G)} == C + + def test_number_strongly_connected_components(self): + ncc = nx.number_strongly_connected_components + for G, C in self.gc: + assert ncc(G) == len(C) + + def test_is_strongly_connected(self): + for G, C in self.gc: + if len(C) == 1: + assert nx.is_strongly_connected(G) + else: + assert not nx.is_strongly_connected(G) + + def test_contract_scc1(self): + G = nx.DiGraph() + G.add_edges_from( + [ + (1, 2), + (2, 3), + (2, 11), + (2, 12), + (3, 4), + (4, 3), + (4, 5), + (5, 6), + (6, 5), + (6, 7), + (7, 8), + (7, 9), + (7, 10), + (8, 9), + (9, 7), + (10, 6), + (11, 2), + (11, 4), + (11, 6), + (12, 6), + (12, 11), + ] + ) + scc = list(nx.strongly_connected_components(G)) + cG = nx.condensation(G, scc) + # DAG + assert nx.is_directed_acyclic_graph(cG) + # nodes + assert sorted(cG.nodes()) == [0, 1, 2, 3] + # edges + mapping = {} + for i, component in enumerate(scc): + for n in component: + mapping[n] = i + edge = (mapping[2], mapping[3]) + assert cG.has_edge(*edge) + edge = (mapping[2], mapping[5]) + assert cG.has_edge(*edge) + edge = (mapping[3], mapping[5]) + assert cG.has_edge(*edge) + + def test_contract_scc_isolate(self): + # Bug found and fixed in [1687]. + G = nx.DiGraph() + G.add_edge(1, 2) + G.add_edge(2, 1) + scc = list(nx.strongly_connected_components(G)) + cG = nx.condensation(G, scc) + assert list(cG.nodes()) == [0] + assert list(cG.edges()) == [] + + def test_contract_scc_edge(self): + G = nx.DiGraph() + G.add_edge(1, 2) + G.add_edge(2, 1) + G.add_edge(2, 3) + G.add_edge(3, 4) + G.add_edge(4, 3) + scc = list(nx.strongly_connected_components(G)) + cG = nx.condensation(G, scc) + assert sorted(cG.nodes()) == [0, 1] + if 1 in scc[0]: + edge = (0, 1) + else: + edge = (1, 0) + assert list(cG.edges()) == [edge] + + def test_condensation_mapping_and_members(self): + G, C = self.gc[1] + C = sorted(C, key=len, reverse=True) + cG = nx.condensation(G) + mapping = cG.graph["mapping"] + assert all(n in G for n in mapping) + assert all(0 == cN for n, cN in mapping.items() if n in C[0]) + assert all(1 == cN for n, cN in mapping.items() if n in C[1]) + for n, d in cG.nodes(data=True): + assert set(C[n]) == cG.nodes[n]["members"] + + def test_null_graph(self): + G = nx.DiGraph() + assert list(nx.strongly_connected_components(G)) == [] + assert list(nx.kosaraju_strongly_connected_components(G)) == [] + assert len(nx.condensation(G)) == 0 + pytest.raises( + nx.NetworkXPointlessConcept, nx.is_strongly_connected, nx.DiGraph() + ) + + def test_connected_raise(self): + G = nx.Graph() + with pytest.raises(NetworkXNotImplemented): + next(nx.strongly_connected_components(G)) + with pytest.raises(NetworkXNotImplemented): + next(nx.kosaraju_strongly_connected_components(G)) + pytest.raises(NetworkXNotImplemented, nx.is_strongly_connected, G) + pytest.raises(NetworkXNotImplemented, nx.condensation, G) + + strong_cc_methods = ( + nx.strongly_connected_components, + nx.kosaraju_strongly_connected_components, + ) + + @pytest.mark.parametrize("get_components", strong_cc_methods) + def test_connected_mutability(self, get_components): + DG = nx.path_graph(5, create_using=nx.DiGraph) + G = nx.disjoint_union(DG, DG) + seen = set() + for component in get_components(G): + assert len(seen & component) == 0 + seen.update(component) + component.clear() diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/components/tests/test_weakly_connected.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/components/tests/test_weakly_connected.py new file mode 100644 index 0000000..f014478 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/components/tests/test_weakly_connected.py @@ -0,0 +1,96 @@ +import pytest + +import networkx as nx +from networkx import NetworkXNotImplemented + + +class TestWeaklyConnected: + @classmethod + def setup_class(cls): + cls.gc = [] + G = nx.DiGraph() + G.add_edges_from( + [ + (1, 2), + (2, 3), + (2, 8), + (3, 4), + (3, 7), + (4, 5), + (5, 3), + (5, 6), + (7, 4), + (7, 6), + (8, 1), + (8, 7), + ] + ) + C = [[3, 4, 5, 7], [1, 2, 8], [6]] + cls.gc.append((G, C)) + + G = nx.DiGraph() + G.add_edges_from([(1, 2), (1, 3), (1, 4), (4, 2), (3, 4), (2, 3)]) + C = [[2, 3, 4], [1]] + cls.gc.append((G, C)) + + G = nx.DiGraph() + G.add_edges_from([(1, 2), (2, 3), (3, 2), (2, 1)]) + C = [[1, 2, 3]] + cls.gc.append((G, C)) + + # Eppstein's tests + G = nx.DiGraph({0: [1], 1: [2, 3], 2: [4, 5], 3: [4, 5], 4: [6], 5: [], 6: []}) + C = [[0], [1], [2], [3], [4], [5], [6]] + cls.gc.append((G, C)) + + G = nx.DiGraph({0: [1], 1: [2, 3, 4], 2: [0, 3], 3: [4], 4: [3]}) + C = [[0, 1, 2], [3, 4]] + cls.gc.append((G, C)) + + def test_weakly_connected_components(self): + for G, C in self.gc: + U = G.to_undirected() + w = {frozenset(g) for g in nx.weakly_connected_components(G)} + c = {frozenset(g) for g in nx.connected_components(U)} + assert w == c + + def test_number_weakly_connected_components(self): + for G, C in self.gc: + U = G.to_undirected() + w = nx.number_weakly_connected_components(G) + c = nx.number_connected_components(U) + assert w == c + + def test_is_weakly_connected(self): + for G, C in self.gc: + U = G.to_undirected() + assert nx.is_weakly_connected(G) == nx.is_connected(U) + + def test_null_graph(self): + G = nx.DiGraph() + assert list(nx.weakly_connected_components(G)) == [] + assert nx.number_weakly_connected_components(G) == 0 + with pytest.raises(nx.NetworkXPointlessConcept): + next(nx.is_weakly_connected(G)) + + def test_connected_raise(self): + G = nx.Graph() + with pytest.raises(NetworkXNotImplemented): + next(nx.weakly_connected_components(G)) + pytest.raises(NetworkXNotImplemented, nx.number_weakly_connected_components, G) + pytest.raises(NetworkXNotImplemented, nx.is_weakly_connected, G) + + def test_connected_mutability(self): + DG = nx.path_graph(5, create_using=nx.DiGraph) + G = nx.disjoint_union(DG, DG) + seen = set() + for component in nx.weakly_connected_components(G): + assert len(seen & component) == 0 + seen.update(component) + component.clear() + + +def test_is_weakly_connected_empty_graph_raises(): + G = nx.DiGraph() + with pytest.raises(nx.NetworkXPointlessConcept, match="Connectivity is undefined"): + nx.is_weakly_connected(G) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/components/weakly_connected.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/components/weakly_connected.py new file mode 100644 index 0000000..564adb9 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/components/weakly_connected.py @@ -0,0 +1,197 @@ +"""Weakly connected components.""" + +import networkx as nx +from networkx.utils.decorators import not_implemented_for + +__all__ = [ + "number_weakly_connected_components", + "weakly_connected_components", + "is_weakly_connected", +] + + +@not_implemented_for("undirected") +@nx._dispatchable +def weakly_connected_components(G): + """Generate weakly connected components of G. + + Parameters + ---------- + G : NetworkX graph + A directed graph + + Returns + ------- + comp : generator of sets + A generator of sets of nodes, one for each weakly connected + component of G. + + Raises + ------ + NetworkXNotImplemented + If G is undirected. + + Examples + -------- + Generate a sorted list of weakly connected components, largest first. + + >>> G = nx.path_graph(4, create_using=nx.DiGraph()) + >>> nx.add_path(G, [10, 11, 12]) + >>> [ + ... len(c) + ... for c in sorted(nx.weakly_connected_components(G), key=len, reverse=True) + ... ] + [4, 3] + + If you only want the largest component, it's more efficient to + use max instead of sort: + + >>> largest_cc = max(nx.weakly_connected_components(G), key=len) + + See Also + -------- + connected_components + strongly_connected_components + + Notes + ----- + For directed graphs only. + + """ + seen = set() + n = len(G) # must be outside the loop to avoid performance hit with graph views + for v in G: + if v not in seen: + c = set(_plain_bfs(G, n - len(seen), v)) + seen.update(c) + yield c + + +@not_implemented_for("undirected") +@nx._dispatchable +def number_weakly_connected_components(G): + """Returns the number of weakly connected components in G. + + Parameters + ---------- + G : NetworkX graph + A directed graph. + + Returns + ------- + n : integer + Number of weakly connected components + + Raises + ------ + NetworkXNotImplemented + If G is undirected. + + Examples + -------- + >>> G = nx.DiGraph([(0, 1), (2, 1), (3, 4)]) + >>> nx.number_weakly_connected_components(G) + 2 + + See Also + -------- + weakly_connected_components + number_connected_components + number_strongly_connected_components + + Notes + ----- + For directed graphs only. + + """ + return sum(1 for wcc in weakly_connected_components(G)) + + +@not_implemented_for("undirected") +@nx._dispatchable +def is_weakly_connected(G): + """Test directed graph for weak connectivity. + + A directed graph is weakly connected if and only if the graph + is connected when the direction of the edge between nodes is ignored. + + Note that if a graph is strongly connected (i.e. the graph is connected + even when we account for directionality), it is by definition weakly + connected as well. + + Parameters + ---------- + G : NetworkX Graph + A directed graph. + + Returns + ------- + connected : bool + True if the graph is weakly connected, False otherwise. + + Raises + ------ + NetworkXNotImplemented + If G is undirected. + + Examples + -------- + >>> G = nx.DiGraph([(0, 1), (2, 1)]) + >>> G.add_node(3) + >>> nx.is_weakly_connected(G) # node 3 is not connected to the graph + False + >>> G.add_edge(2, 3) + >>> nx.is_weakly_connected(G) + True + + See Also + -------- + is_strongly_connected + is_semiconnected + is_connected + is_biconnected + weakly_connected_components + + Notes + ----- + For directed graphs only. + + """ + if len(G) == 0: + raise nx.NetworkXPointlessConcept( + """Connectivity is undefined for the null graph.""" + ) + + return len(next(weakly_connected_components(G))) == len(G) + + +def _plain_bfs(G, n, source): + """A fast BFS node generator + + The direction of the edge between nodes is ignored. + + For directed graphs only. + + """ + Gsucc = G._succ + Gpred = G._pred + seen = {source} + nextlevel = [source] + + yield source + while nextlevel: + thislevel = nextlevel + nextlevel = [] + for v in thislevel: + for w in Gsucc[v]: + if w not in seen: + seen.add(w) + nextlevel.append(w) + yield w + for w in Gpred[v]: + if w not in seen: + seen.add(w) + nextlevel.append(w) + yield w + if len(seen) == n: + return diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/__init__.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/__init__.py new file mode 100644 index 0000000..d08a360 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/__init__.py @@ -0,0 +1,11 @@ +"""Connectivity and cut algorithms""" + +from .connectivity import * +from .cuts import * +from .edge_augmentation import * +from .edge_kcomponents import * +from .disjoint_paths import * +from .kcomponents import * +from .kcutsets import * +from .stoerwagner import * +from .utils import * diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..91bacd7 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/__pycache__/connectivity.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/__pycache__/connectivity.cpython-312.pyc new file mode 100644 index 0000000..474e390 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/__pycache__/connectivity.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/__pycache__/cuts.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/__pycache__/cuts.cpython-312.pyc new file mode 100644 index 0000000..14d7804 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/__pycache__/cuts.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/__pycache__/disjoint_paths.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/__pycache__/disjoint_paths.cpython-312.pyc new file mode 100644 index 0000000..978a51d Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/__pycache__/disjoint_paths.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/__pycache__/edge_augmentation.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/__pycache__/edge_augmentation.cpython-312.pyc new file mode 100644 index 0000000..43d67ed Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/__pycache__/edge_augmentation.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/__pycache__/edge_kcomponents.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/__pycache__/edge_kcomponents.cpython-312.pyc new file mode 100644 index 0000000..914ddfe Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/__pycache__/edge_kcomponents.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/__pycache__/kcomponents.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/__pycache__/kcomponents.cpython-312.pyc new file mode 100644 index 0000000..fff1b09 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/__pycache__/kcomponents.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/__pycache__/kcutsets.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/__pycache__/kcutsets.cpython-312.pyc new file mode 100644 index 0000000..17b46cd Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/__pycache__/kcutsets.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/__pycache__/stoerwagner.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/__pycache__/stoerwagner.cpython-312.pyc new file mode 100644 index 0000000..8234335 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/__pycache__/stoerwagner.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/__pycache__/utils.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/__pycache__/utils.cpython-312.pyc new file mode 100644 index 0000000..b4a2013 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/__pycache__/utils.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/connectivity.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/connectivity.py new file mode 100644 index 0000000..210413f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/connectivity.py @@ -0,0 +1,811 @@ +""" +Flow based connectivity algorithms +""" + +import itertools +from operator import itemgetter + +import networkx as nx + +# Define the default maximum flow function to use in all flow based +# connectivity algorithms. +from networkx.algorithms.flow import ( + boykov_kolmogorov, + build_residual_network, + dinitz, + edmonds_karp, + preflow_push, + shortest_augmenting_path, +) + +from .utils import build_auxiliary_edge_connectivity, build_auxiliary_node_connectivity + +default_flow_func = edmonds_karp + +__all__ = [ + "average_node_connectivity", + "local_node_connectivity", + "node_connectivity", + "local_edge_connectivity", + "edge_connectivity", + "all_pairs_node_connectivity", +] + + +@nx._dispatchable(graphs={"G": 0, "auxiliary?": 4}, preserve_graph_attrs={"auxiliary"}) +def local_node_connectivity( + G, s, t, flow_func=None, auxiliary=None, residual=None, cutoff=None +): + r"""Computes local node connectivity for nodes s and t. + + Local node connectivity for two non adjacent nodes s and t is the + minimum number of nodes that must be removed (along with their incident + edges) to disconnect them. + + This is a flow based implementation of node connectivity. We compute the + maximum flow on an auxiliary digraph build from the original input + graph (see below for details). + + Parameters + ---------- + G : NetworkX graph + Undirected graph + + s : node + Source node + + t : node + Target node + + flow_func : function + A function for computing the maximum flow among a pair of nodes. + The function has to accept at least three parameters: a Digraph, + a source node, and a target node. And return a residual network + that follows NetworkX conventions (see :meth:`maximum_flow` for + details). If flow_func is None, the default maximum flow function + (:meth:`edmonds_karp`) is used. See below for details. The choice + of the default function may change from version to version and + should not be relied on. Default value: None. + + auxiliary : NetworkX DiGraph + Auxiliary digraph to compute flow based node connectivity. It has + to have a graph attribute called mapping with a dictionary mapping + node names in G and in the auxiliary digraph. If provided + it will be reused instead of recreated. Default value: None. + + residual : NetworkX DiGraph + Residual network to compute maximum flow. If provided it will be + reused instead of recreated. Default value: None. + + cutoff : integer, float, or None (default: None) + If specified, the maximum flow algorithm will terminate when the + flow value reaches or exceeds the cutoff. This only works for flows + that support the cutoff parameter (most do) and is ignored otherwise. + + Returns + ------- + K : integer + local node connectivity for nodes s and t + + Examples + -------- + This function is not imported in the base NetworkX namespace, so you + have to explicitly import it from the connectivity package: + + >>> from networkx.algorithms.connectivity import local_node_connectivity + + We use in this example the platonic icosahedral graph, which has node + connectivity 5. + + >>> G = nx.icosahedral_graph() + >>> local_node_connectivity(G, 0, 6) + 5 + + If you need to compute local connectivity on several pairs of + nodes in the same graph, it is recommended that you reuse the + data structures that NetworkX uses in the computation: the + auxiliary digraph for node connectivity, and the residual + network for the underlying maximum flow computation. + + Example of how to compute local node connectivity among + all pairs of nodes of the platonic icosahedral graph reusing + the data structures. + + >>> import itertools + >>> # You also have to explicitly import the function for + >>> # building the auxiliary digraph from the connectivity package + >>> from networkx.algorithms.connectivity import build_auxiliary_node_connectivity + >>> H = build_auxiliary_node_connectivity(G) + >>> # And the function for building the residual network from the + >>> # flow package + >>> from networkx.algorithms.flow import build_residual_network + >>> # Note that the auxiliary digraph has an edge attribute named capacity + >>> R = build_residual_network(H, "capacity") + >>> result = dict.fromkeys(G, dict()) + >>> # Reuse the auxiliary digraph and the residual network by passing them + >>> # as parameters + >>> for u, v in itertools.combinations(G, 2): + ... k = local_node_connectivity(G, u, v, auxiliary=H, residual=R) + ... result[u][v] = k + >>> all(result[u][v] == 5 for u, v in itertools.combinations(G, 2)) + True + + You can also use alternative flow algorithms for computing node + connectivity. For instance, in dense networks the algorithm + :meth:`shortest_augmenting_path` will usually perform better than + the default :meth:`edmonds_karp` which is faster for sparse + networks with highly skewed degree distributions. Alternative flow + functions have to be explicitly imported from the flow package. + + >>> from networkx.algorithms.flow import shortest_augmenting_path + >>> local_node_connectivity(G, 0, 6, flow_func=shortest_augmenting_path) + 5 + + Notes + ----- + This is a flow based implementation of node connectivity. We compute the + maximum flow using, by default, the :meth:`edmonds_karp` algorithm (see: + :meth:`maximum_flow`) on an auxiliary digraph build from the original + input graph: + + For an undirected graph G having `n` nodes and `m` edges we derive a + directed graph H with `2n` nodes and `2m+n` arcs by replacing each + original node `v` with two nodes `v_A`, `v_B` linked by an (internal) + arc in H. Then for each edge (`u`, `v`) in G we add two arcs + (`u_B`, `v_A`) and (`v_B`, `u_A`) in H. Finally we set the attribute + capacity = 1 for each arc in H [1]_ . + + For a directed graph G having `n` nodes and `m` arcs we derive a + directed graph H with `2n` nodes and `m+n` arcs by replacing each + original node `v` with two nodes `v_A`, `v_B` linked by an (internal) + arc (`v_A`, `v_B`) in H. Then for each arc (`u`, `v`) in G we add one arc + (`u_B`, `v_A`) in H. Finally we set the attribute capacity = 1 for + each arc in H. + + This is equal to the local node connectivity because the value of + a maximum s-t-flow is equal to the capacity of a minimum s-t-cut. + + See also + -------- + :meth:`local_edge_connectivity` + :meth:`node_connectivity` + :meth:`minimum_node_cut` + :meth:`maximum_flow` + :meth:`edmonds_karp` + :meth:`preflow_push` + :meth:`shortest_augmenting_path` + + References + ---------- + .. [1] Kammer, Frank and Hanjo Taubig. Graph Connectivity. in Brandes and + Erlebach, 'Network Analysis: Methodological Foundations', Lecture + Notes in Computer Science, Volume 3418, Springer-Verlag, 2005. + http://www.informatik.uni-augsburg.de/thi/personen/kammer/Graph_Connectivity.pdf + + """ + if flow_func is None: + flow_func = default_flow_func + + if auxiliary is None: + H = build_auxiliary_node_connectivity(G) + else: + H = auxiliary + + mapping = H.graph.get("mapping", None) + if mapping is None: + raise nx.NetworkXError("Invalid auxiliary digraph.") + + kwargs = {"flow_func": flow_func, "residual": residual} + + if flow_func is not preflow_push: + kwargs["cutoff"] = cutoff + + if flow_func is shortest_augmenting_path: + kwargs["two_phase"] = True + + return nx.maximum_flow_value(H, f"{mapping[s]}B", f"{mapping[t]}A", **kwargs) + + +@nx._dispatchable +def node_connectivity(G, s=None, t=None, flow_func=None): + r"""Returns node connectivity for a graph or digraph G. + + Node connectivity is equal to the minimum number of nodes that + must be removed to disconnect G or render it trivial. If source + and target nodes are provided, this function returns the local node + connectivity: the minimum number of nodes that must be removed to break + all paths from source to target in G. + + Parameters + ---------- + G : NetworkX graph + Undirected graph + + s : node + Source node. Optional. Default value: None. + + t : node + Target node. Optional. Default value: None. + + flow_func : function + A function for computing the maximum flow among a pair of nodes. + The function has to accept at least three parameters: a Digraph, + a source node, and a target node. And return a residual network + that follows NetworkX conventions (see :meth:`maximum_flow` for + details). If flow_func is None, the default maximum flow function + (:meth:`edmonds_karp`) is used. See below for details. The + choice of the default function may change from version + to version and should not be relied on. Default value: None. + + Returns + ------- + K : integer + Node connectivity of G, or local node connectivity if source + and target are provided. + + Examples + -------- + >>> # Platonic icosahedral graph is 5-node-connected + >>> G = nx.icosahedral_graph() + >>> nx.node_connectivity(G) + 5 + + You can use alternative flow algorithms for the underlying maximum + flow computation. In dense networks the algorithm + :meth:`shortest_augmenting_path` will usually perform better + than the default :meth:`edmonds_karp`, which is faster for + sparse networks with highly skewed degree distributions. Alternative + flow functions have to be explicitly imported from the flow package. + + >>> from networkx.algorithms.flow import shortest_augmenting_path + >>> nx.node_connectivity(G, flow_func=shortest_augmenting_path) + 5 + + If you specify a pair of nodes (source and target) as parameters, + this function returns the value of local node connectivity. + + >>> nx.node_connectivity(G, 3, 7) + 5 + + If you need to perform several local computations among different + pairs of nodes on the same graph, it is recommended that you reuse + the data structures used in the maximum flow computations. See + :meth:`local_node_connectivity` for details. + + Notes + ----- + This is a flow based implementation of node connectivity. The + algorithm works by solving $O((n-\delta-1+\delta(\delta-1)/2))$ + maximum flow problems on an auxiliary digraph. Where $\delta$ + is the minimum degree of G. For details about the auxiliary + digraph and the computation of local node connectivity see + :meth:`local_node_connectivity`. This implementation is based + on algorithm 11 in [1]_. + + See also + -------- + :meth:`local_node_connectivity` + :meth:`edge_connectivity` + :meth:`maximum_flow` + :meth:`edmonds_karp` + :meth:`preflow_push` + :meth:`shortest_augmenting_path` + + References + ---------- + .. [1] Abdol-Hossein Esfahanian. Connectivity Algorithms. + http://www.cse.msu.edu/~cse835/Papers/Graph_connectivity_revised.pdf + + """ + if (s is not None and t is None) or (s is None and t is not None): + raise nx.NetworkXError("Both source and target must be specified.") + + # Local node connectivity + if s is not None and t is not None: + if s not in G: + raise nx.NetworkXError(f"node {s} not in graph") + if t not in G: + raise nx.NetworkXError(f"node {t} not in graph") + return local_node_connectivity(G, s, t, flow_func=flow_func) + + # Global node connectivity + if G.is_directed(): + if not nx.is_weakly_connected(G): + return 0 + iter_func = itertools.permutations + # It is necessary to consider both predecessors + # and successors for directed graphs + + def neighbors(v): + return itertools.chain.from_iterable([G.predecessors(v), G.successors(v)]) + + else: + if not nx.is_connected(G): + return 0 + iter_func = itertools.combinations + neighbors = G.neighbors + + # Reuse the auxiliary digraph and the residual network + H = build_auxiliary_node_connectivity(G) + R = build_residual_network(H, "capacity") + kwargs = {"flow_func": flow_func, "auxiliary": H, "residual": R} + + # Pick a node with minimum degree + # Node connectivity is bounded by degree. + v, K = min(G.degree(), key=itemgetter(1)) + # compute local node connectivity with all its non-neighbors nodes + for w in set(G) - set(neighbors(v)) - {v}: + kwargs["cutoff"] = K + K = min(K, local_node_connectivity(G, v, w, **kwargs)) + # Also for non adjacent pairs of neighbors of v + for x, y in iter_func(neighbors(v), 2): + if y in G[x]: + continue + kwargs["cutoff"] = K + K = min(K, local_node_connectivity(G, x, y, **kwargs)) + + return K + + +@nx._dispatchable +def average_node_connectivity(G, flow_func=None): + r"""Returns the average connectivity of a graph G. + + The average connectivity `\bar{\kappa}` of a graph G is the average + of local node connectivity over all pairs of nodes of G [1]_ . + + .. math:: + + \bar{\kappa}(G) = \frac{\sum_{u,v} \kappa_{G}(u,v)}{{n \choose 2}} + + Parameters + ---------- + + G : NetworkX graph + Undirected graph + + flow_func : function + A function for computing the maximum flow among a pair of nodes. + The function has to accept at least three parameters: a Digraph, + a source node, and a target node. And return a residual network + that follows NetworkX conventions (see :meth:`maximum_flow` for + details). If flow_func is None, the default maximum flow function + (:meth:`edmonds_karp`) is used. See :meth:`local_node_connectivity` + for details. The choice of the default function may change from + version to version and should not be relied on. Default value: None. + + Returns + ------- + K : float + Average node connectivity + + See also + -------- + :meth:`local_node_connectivity` + :meth:`node_connectivity` + :meth:`edge_connectivity` + :meth:`maximum_flow` + :meth:`edmonds_karp` + :meth:`preflow_push` + :meth:`shortest_augmenting_path` + + References + ---------- + .. [1] Beineke, L., O. Oellermann, and R. Pippert (2002). The average + connectivity of a graph. Discrete mathematics 252(1-3), 31-45. + http://www.sciencedirect.com/science/article/pii/S0012365X01001807 + + """ + if G.is_directed(): + iter_func = itertools.permutations + else: + iter_func = itertools.combinations + + # Reuse the auxiliary digraph and the residual network + H = build_auxiliary_node_connectivity(G) + R = build_residual_network(H, "capacity") + kwargs = {"flow_func": flow_func, "auxiliary": H, "residual": R} + + num, den = 0, 0 + for u, v in iter_func(G, 2): + num += local_node_connectivity(G, u, v, **kwargs) + den += 1 + + if den == 0: # Null Graph + return 0 + return num / den + + +@nx._dispatchable +def all_pairs_node_connectivity(G, nbunch=None, flow_func=None): + """Compute node connectivity between all pairs of nodes of G. + + Parameters + ---------- + G : NetworkX graph + Undirected graph + + nbunch: container + Container of nodes. If provided node connectivity will be computed + only over pairs of nodes in nbunch. + + flow_func : function + A function for computing the maximum flow among a pair of nodes. + The function has to accept at least three parameters: a Digraph, + a source node, and a target node. And return a residual network + that follows NetworkX conventions (see :meth:`maximum_flow` for + details). If flow_func is None, the default maximum flow function + (:meth:`edmonds_karp`) is used. See below for details. The + choice of the default function may change from version + to version and should not be relied on. Default value: None. + + Returns + ------- + all_pairs : dict + A dictionary with node connectivity between all pairs of nodes + in G, or in nbunch if provided. + + See also + -------- + :meth:`local_node_connectivity` + :meth:`edge_connectivity` + :meth:`local_edge_connectivity` + :meth:`maximum_flow` + :meth:`edmonds_karp` + :meth:`preflow_push` + :meth:`shortest_augmenting_path` + + """ + if nbunch is None: + nbunch = G + else: + nbunch = set(nbunch) + + directed = G.is_directed() + if directed: + iter_func = itertools.permutations + else: + iter_func = itertools.combinations + + all_pairs = {n: {} for n in nbunch} + + # Reuse auxiliary digraph and residual network + H = build_auxiliary_node_connectivity(G) + mapping = H.graph["mapping"] + R = build_residual_network(H, "capacity") + kwargs = {"flow_func": flow_func, "auxiliary": H, "residual": R} + + for u, v in iter_func(nbunch, 2): + K = local_node_connectivity(G, u, v, **kwargs) + all_pairs[u][v] = K + if not directed: + all_pairs[v][u] = K + + return all_pairs + + +@nx._dispatchable(graphs={"G": 0, "auxiliary?": 4}) +def local_edge_connectivity( + G, s, t, flow_func=None, auxiliary=None, residual=None, cutoff=None +): + r"""Returns local edge connectivity for nodes s and t in G. + + Local edge connectivity for two nodes s and t is the minimum number + of edges that must be removed to disconnect them. + + This is a flow based implementation of edge connectivity. We compute the + maximum flow on an auxiliary digraph build from the original + network (see below for details). This is equal to the local edge + connectivity because the value of a maximum s-t-flow is equal to the + capacity of a minimum s-t-cut (Ford and Fulkerson theorem) [1]_ . + + Parameters + ---------- + G : NetworkX graph + Undirected or directed graph + + s : node + Source node + + t : node + Target node + + flow_func : function + A function for computing the maximum flow among a pair of nodes. + The function has to accept at least three parameters: a Digraph, + a source node, and a target node. And return a residual network + that follows NetworkX conventions (see :meth:`maximum_flow` for + details). If flow_func is None, the default maximum flow function + (:meth:`edmonds_karp`) is used. See below for details. The + choice of the default function may change from version + to version and should not be relied on. Default value: None. + + auxiliary : NetworkX DiGraph + Auxiliary digraph for computing flow based edge connectivity. If + provided it will be reused instead of recreated. Default value: None. + + residual : NetworkX DiGraph + Residual network to compute maximum flow. If provided it will be + reused instead of recreated. Default value: None. + + cutoff : integer, float, or None (default: None) + If specified, the maximum flow algorithm will terminate when the + flow value reaches or exceeds the cutoff. This only works for flows + that support the cutoff parameter (most do) and is ignored otherwise. + + Returns + ------- + K : integer + local edge connectivity for nodes s and t. + + Examples + -------- + This function is not imported in the base NetworkX namespace, so you + have to explicitly import it from the connectivity package: + + >>> from networkx.algorithms.connectivity import local_edge_connectivity + + We use in this example the platonic icosahedral graph, which has edge + connectivity 5. + + >>> G = nx.icosahedral_graph() + >>> local_edge_connectivity(G, 0, 6) + 5 + + If you need to compute local connectivity on several pairs of + nodes in the same graph, it is recommended that you reuse the + data structures that NetworkX uses in the computation: the + auxiliary digraph for edge connectivity, and the residual + network for the underlying maximum flow computation. + + Example of how to compute local edge connectivity among + all pairs of nodes of the platonic icosahedral graph reusing + the data structures. + + >>> import itertools + >>> # You also have to explicitly import the function for + >>> # building the auxiliary digraph from the connectivity package + >>> from networkx.algorithms.connectivity import build_auxiliary_edge_connectivity + >>> H = build_auxiliary_edge_connectivity(G) + >>> # And the function for building the residual network from the + >>> # flow package + >>> from networkx.algorithms.flow import build_residual_network + >>> # Note that the auxiliary digraph has an edge attribute named capacity + >>> R = build_residual_network(H, "capacity") + >>> result = dict.fromkeys(G, dict()) + >>> # Reuse the auxiliary digraph and the residual network by passing them + >>> # as parameters + >>> for u, v in itertools.combinations(G, 2): + ... k = local_edge_connectivity(G, u, v, auxiliary=H, residual=R) + ... result[u][v] = k + >>> all(result[u][v] == 5 for u, v in itertools.combinations(G, 2)) + True + + You can also use alternative flow algorithms for computing edge + connectivity. For instance, in dense networks the algorithm + :meth:`shortest_augmenting_path` will usually perform better than + the default :meth:`edmonds_karp` which is faster for sparse + networks with highly skewed degree distributions. Alternative flow + functions have to be explicitly imported from the flow package. + + >>> from networkx.algorithms.flow import shortest_augmenting_path + >>> local_edge_connectivity(G, 0, 6, flow_func=shortest_augmenting_path) + 5 + + Notes + ----- + This is a flow based implementation of edge connectivity. We compute the + maximum flow using, by default, the :meth:`edmonds_karp` algorithm on an + auxiliary digraph build from the original input graph: + + If the input graph is undirected, we replace each edge (`u`,`v`) with + two reciprocal arcs (`u`, `v`) and (`v`, `u`) and then we set the attribute + 'capacity' for each arc to 1. If the input graph is directed we simply + add the 'capacity' attribute. This is an implementation of algorithm 1 + in [1]_. + + The maximum flow in the auxiliary network is equal to the local edge + connectivity because the value of a maximum s-t-flow is equal to the + capacity of a minimum s-t-cut (Ford and Fulkerson theorem). + + See also + -------- + :meth:`edge_connectivity` + :meth:`local_node_connectivity` + :meth:`node_connectivity` + :meth:`maximum_flow` + :meth:`edmonds_karp` + :meth:`preflow_push` + :meth:`shortest_augmenting_path` + + References + ---------- + .. [1] Abdol-Hossein Esfahanian. Connectivity Algorithms. + http://www.cse.msu.edu/~cse835/Papers/Graph_connectivity_revised.pdf + + """ + if flow_func is None: + flow_func = default_flow_func + + if auxiliary is None: + H = build_auxiliary_edge_connectivity(G) + else: + H = auxiliary + + kwargs = {"flow_func": flow_func, "residual": residual} + + if flow_func is not preflow_push: + kwargs["cutoff"] = cutoff + + if flow_func is shortest_augmenting_path: + kwargs["two_phase"] = True + + return nx.maximum_flow_value(H, s, t, **kwargs) + + +@nx._dispatchable +def edge_connectivity(G, s=None, t=None, flow_func=None, cutoff=None): + r"""Returns the edge connectivity of the graph or digraph G. + + The edge connectivity is equal to the minimum number of edges that + must be removed to disconnect G or render it trivial. If source + and target nodes are provided, this function returns the local edge + connectivity: the minimum number of edges that must be removed to + break all paths from source to target in G. + + Parameters + ---------- + G : NetworkX graph + Undirected or directed graph + + s : node + Source node. Optional. Default value: None. + + t : node + Target node. Optional. Default value: None. + + flow_func : function + A function for computing the maximum flow among a pair of nodes. + The function has to accept at least three parameters: a Digraph, + a source node, and a target node. And return a residual network + that follows NetworkX conventions (see :meth:`maximum_flow` for + details). If flow_func is None, the default maximum flow function + (:meth:`edmonds_karp`) is used. See below for details. The + choice of the default function may change from version + to version and should not be relied on. Default value: None. + + cutoff : integer, float, or None (default: None) + If specified, the maximum flow algorithm will terminate when the + flow value reaches or exceeds the cutoff. This only works for flows + that support the cutoff parameter (most do) and is ignored otherwise. + + Returns + ------- + K : integer + Edge connectivity for G, or local edge connectivity if source + and target were provided + + Examples + -------- + >>> # Platonic icosahedral graph is 5-edge-connected + >>> G = nx.icosahedral_graph() + >>> nx.edge_connectivity(G) + 5 + + You can use alternative flow algorithms for the underlying + maximum flow computation. In dense networks the algorithm + :meth:`shortest_augmenting_path` will usually perform better + than the default :meth:`edmonds_karp`, which is faster for + sparse networks with highly skewed degree distributions. + Alternative flow functions have to be explicitly imported + from the flow package. + + >>> from networkx.algorithms.flow import shortest_augmenting_path + >>> nx.edge_connectivity(G, flow_func=shortest_augmenting_path) + 5 + + If you specify a pair of nodes (source and target) as parameters, + this function returns the value of local edge connectivity. + + >>> nx.edge_connectivity(G, 3, 7) + 5 + + If you need to perform several local computations among different + pairs of nodes on the same graph, it is recommended that you reuse + the data structures used in the maximum flow computations. See + :meth:`local_edge_connectivity` for details. + + Notes + ----- + This is a flow based implementation of global edge connectivity. + For undirected graphs the algorithm works by finding a 'small' + dominating set of nodes of G (see algorithm 7 in [1]_ ) and + computing local maximum flow (see :meth:`local_edge_connectivity`) + between an arbitrary node in the dominating set and the rest of + nodes in it. This is an implementation of algorithm 6 in [1]_ . + For directed graphs, the algorithm does n calls to the maximum + flow function. This is an implementation of algorithm 8 in [1]_ . + + See also + -------- + :meth:`local_edge_connectivity` + :meth:`local_node_connectivity` + :meth:`node_connectivity` + :meth:`maximum_flow` + :meth:`edmonds_karp` + :meth:`preflow_push` + :meth:`shortest_augmenting_path` + :meth:`k_edge_components` + :meth:`k_edge_subgraphs` + + References + ---------- + .. [1] Abdol-Hossein Esfahanian. Connectivity Algorithms. + http://www.cse.msu.edu/~cse835/Papers/Graph_connectivity_revised.pdf + + """ + if (s is not None and t is None) or (s is None and t is not None): + raise nx.NetworkXError("Both source and target must be specified.") + + # Local edge connectivity + if s is not None and t is not None: + if s not in G: + raise nx.NetworkXError(f"node {s} not in graph") + if t not in G: + raise nx.NetworkXError(f"node {t} not in graph") + return local_edge_connectivity(G, s, t, flow_func=flow_func, cutoff=cutoff) + + # Global edge connectivity + # reuse auxiliary digraph and residual network + H = build_auxiliary_edge_connectivity(G) + R = build_residual_network(H, "capacity") + kwargs = {"flow_func": flow_func, "auxiliary": H, "residual": R} + + if G.is_directed(): + # Algorithm 8 in [1] + if not nx.is_weakly_connected(G): + return 0 + + # initial value for \lambda is minimum degree + L = min(d for n, d in G.degree()) + nodes = list(G) + n = len(nodes) + + if cutoff is not None: + L = min(cutoff, L) + + for i in range(n): + kwargs["cutoff"] = L + try: + L = min(L, local_edge_connectivity(G, nodes[i], nodes[i + 1], **kwargs)) + except IndexError: # last node! + L = min(L, local_edge_connectivity(G, nodes[i], nodes[0], **kwargs)) + return L + else: # undirected + # Algorithm 6 in [1] + if not nx.is_connected(G): + return 0 + + # initial value for \lambda is minimum degree + L = min(d for n, d in G.degree()) + + if cutoff is not None: + L = min(cutoff, L) + + # A dominating set is \lambda-covering + # We need a dominating set with at least two nodes + for node in G: + D = nx.dominating_set(G, start_with=node) + v = D.pop() + if D: + break + else: + # in complete graphs the dominating sets will always be of one node + # thus we return min degree + return L + + for w in D: + kwargs["cutoff"] = L + L = min(L, local_edge_connectivity(G, v, w, **kwargs)) + + return L diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/cuts.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/cuts.py new file mode 100644 index 0000000..e7806e1 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/cuts.py @@ -0,0 +1,616 @@ +""" +Flow based cut algorithms +""" + +import itertools + +import networkx as nx + +# Define the default maximum flow function to use in all flow based +# cut algorithms. +from networkx.algorithms.flow import build_residual_network, edmonds_karp + +from .utils import build_auxiliary_edge_connectivity, build_auxiliary_node_connectivity + +default_flow_func = edmonds_karp + +__all__ = [ + "minimum_st_node_cut", + "minimum_node_cut", + "minimum_st_edge_cut", + "minimum_edge_cut", +] + + +@nx._dispatchable( + graphs={"G": 0, "auxiliary?": 4}, + preserve_edge_attrs={"auxiliary": {"capacity": float("inf")}}, + preserve_graph_attrs={"auxiliary"}, +) +def minimum_st_edge_cut(G, s, t, flow_func=None, auxiliary=None, residual=None): + """Returns the edges of the cut-set of a minimum (s, t)-cut. + + This function returns the set of edges of minimum cardinality that, + if removed, would destroy all paths among source and target in G. + Edge weights are not considered. See :meth:`minimum_cut` for + computing minimum cuts considering edge weights. + + Parameters + ---------- + G : NetworkX graph + + s : node + Source node for the flow. + + t : node + Sink node for the flow. + + auxiliary : NetworkX DiGraph + Auxiliary digraph to compute flow based node connectivity. It has + to have a graph attribute called mapping with a dictionary mapping + node names in G and in the auxiliary digraph. If provided + it will be reused instead of recreated. Default value: None. + + flow_func : function + A function for computing the maximum flow among a pair of nodes. + The function has to accept at least three parameters: a Digraph, + a source node, and a target node. And return a residual network + that follows NetworkX conventions (see :meth:`maximum_flow` for + details). If flow_func is None, the default maximum flow function + (:meth:`edmonds_karp`) is used. See :meth:`node_connectivity` for + details. The choice of the default function may change from version + to version and should not be relied on. Default value: None. + + residual : NetworkX DiGraph + Residual network to compute maximum flow. If provided it will be + reused instead of recreated. Default value: None. + + Returns + ------- + cutset : set + Set of edges that, if removed from the graph, will disconnect it. + + See also + -------- + :meth:`minimum_cut` + :meth:`minimum_node_cut` + :meth:`minimum_edge_cut` + :meth:`stoer_wagner` + :meth:`node_connectivity` + :meth:`edge_connectivity` + :meth:`maximum_flow` + :meth:`edmonds_karp` + :meth:`preflow_push` + :meth:`shortest_augmenting_path` + + Examples + -------- + This function is not imported in the base NetworkX namespace, so you + have to explicitly import it from the connectivity package: + + >>> from networkx.algorithms.connectivity import minimum_st_edge_cut + + We use in this example the platonic icosahedral graph, which has edge + connectivity 5. + + >>> G = nx.icosahedral_graph() + >>> len(minimum_st_edge_cut(G, 0, 6)) + 5 + + If you need to compute local edge cuts on several pairs of + nodes in the same graph, it is recommended that you reuse the + data structures that NetworkX uses in the computation: the + auxiliary digraph for edge connectivity, and the residual + network for the underlying maximum flow computation. + + Example of how to compute local edge cuts among all pairs of + nodes of the platonic icosahedral graph reusing the data + structures. + + >>> import itertools + >>> # You also have to explicitly import the function for + >>> # building the auxiliary digraph from the connectivity package + >>> from networkx.algorithms.connectivity import build_auxiliary_edge_connectivity + >>> H = build_auxiliary_edge_connectivity(G) + >>> # And the function for building the residual network from the + >>> # flow package + >>> from networkx.algorithms.flow import build_residual_network + >>> # Note that the auxiliary digraph has an edge attribute named capacity + >>> R = build_residual_network(H, "capacity") + >>> result = dict.fromkeys(G, dict()) + >>> # Reuse the auxiliary digraph and the residual network by passing them + >>> # as parameters + >>> for u, v in itertools.combinations(G, 2): + ... k = len(minimum_st_edge_cut(G, u, v, auxiliary=H, residual=R)) + ... result[u][v] = k + >>> all(result[u][v] == 5 for u, v in itertools.combinations(G, 2)) + True + + You can also use alternative flow algorithms for computing edge + cuts. For instance, in dense networks the algorithm + :meth:`shortest_augmenting_path` will usually perform better than + the default :meth:`edmonds_karp` which is faster for sparse + networks with highly skewed degree distributions. Alternative flow + functions have to be explicitly imported from the flow package. + + >>> from networkx.algorithms.flow import shortest_augmenting_path + >>> len(minimum_st_edge_cut(G, 0, 6, flow_func=shortest_augmenting_path)) + 5 + + """ + if flow_func is None: + flow_func = default_flow_func + + if auxiliary is None: + H = build_auxiliary_edge_connectivity(G) + else: + H = auxiliary + + kwargs = {"capacity": "capacity", "flow_func": flow_func, "residual": residual} + + cut_value, partition = nx.minimum_cut(H, s, t, **kwargs) + reachable, non_reachable = partition + # Any edge in the original graph linking the two sets in the + # partition is part of the edge cutset + cutset = set() + for u, nbrs in ((n, G[n]) for n in reachable): + cutset.update((u, v) for v in nbrs if v in non_reachable) + + return cutset + + +@nx._dispatchable( + graphs={"G": 0, "auxiliary?": 4}, + preserve_node_attrs={"auxiliary": {"id": None}}, + preserve_graph_attrs={"auxiliary"}, +) +def minimum_st_node_cut(G, s, t, flow_func=None, auxiliary=None, residual=None): + r"""Returns a set of nodes of minimum cardinality that disconnect source + from target in G. + + This function returns the set of nodes of minimum cardinality that, + if removed, would destroy all paths among source and target in G. + + Parameters + ---------- + G : NetworkX graph + + s : node + Source node. + + t : node + Target node. + + flow_func : function + A function for computing the maximum flow among a pair of nodes. + The function has to accept at least three parameters: a Digraph, + a source node, and a target node. And return a residual network + that follows NetworkX conventions (see :meth:`maximum_flow` for + details). If flow_func is None, the default maximum flow function + (:meth:`edmonds_karp`) is used. See below for details. The choice + of the default function may change from version to version and + should not be relied on. Default value: None. + + auxiliary : NetworkX DiGraph + Auxiliary digraph to compute flow based node connectivity. It has + to have a graph attribute called mapping with a dictionary mapping + node names in G and in the auxiliary digraph. If provided + it will be reused instead of recreated. Default value: None. + + residual : NetworkX DiGraph + Residual network to compute maximum flow. If provided it will be + reused instead of recreated. Default value: None. + + Returns + ------- + cutset : set + Set of nodes that, if removed, would destroy all paths between + source and target in G. + + Returns an empty set if source and target are either in different + components or are directly connected by an edge, as no node removal + can destroy the path. + + Examples + -------- + This function is not imported in the base NetworkX namespace, so you + have to explicitly import it from the connectivity package: + + >>> from networkx.algorithms.connectivity import minimum_st_node_cut + + We use in this example the platonic icosahedral graph, which has node + connectivity 5. + + >>> G = nx.icosahedral_graph() + >>> len(minimum_st_node_cut(G, 0, 6)) + 5 + + If you need to compute local st cuts between several pairs of + nodes in the same graph, it is recommended that you reuse the + data structures that NetworkX uses in the computation: the + auxiliary digraph for node connectivity and node cuts, and the + residual network for the underlying maximum flow computation. + + Example of how to compute local st node cuts reusing the data + structures: + + >>> # You also have to explicitly import the function for + >>> # building the auxiliary digraph from the connectivity package + >>> from networkx.algorithms.connectivity import build_auxiliary_node_connectivity + >>> H = build_auxiliary_node_connectivity(G) + >>> # And the function for building the residual network from the + >>> # flow package + >>> from networkx.algorithms.flow import build_residual_network + >>> # Note that the auxiliary digraph has an edge attribute named capacity + >>> R = build_residual_network(H, "capacity") + >>> # Reuse the auxiliary digraph and the residual network by passing them + >>> # as parameters + >>> len(minimum_st_node_cut(G, 0, 6, auxiliary=H, residual=R)) + 5 + + You can also use alternative flow algorithms for computing minimum st + node cuts. For instance, in dense networks the algorithm + :meth:`shortest_augmenting_path` will usually perform better than + the default :meth:`edmonds_karp` which is faster for sparse + networks with highly skewed degree distributions. Alternative flow + functions have to be explicitly imported from the flow package. + + >>> from networkx.algorithms.flow import shortest_augmenting_path + >>> len(minimum_st_node_cut(G, 0, 6, flow_func=shortest_augmenting_path)) + 5 + + Notes + ----- + This is a flow based implementation of minimum node cut. The algorithm + is based in solving a number of maximum flow computations to determine + the capacity of the minimum cut on an auxiliary directed network that + corresponds to the minimum node cut of G. It handles both directed + and undirected graphs. This implementation is based on algorithm 11 + in [1]_. + + See also + -------- + :meth:`minimum_node_cut` + :meth:`minimum_edge_cut` + :meth:`stoer_wagner` + :meth:`node_connectivity` + :meth:`edge_connectivity` + :meth:`maximum_flow` + :meth:`edmonds_karp` + :meth:`preflow_push` + :meth:`shortest_augmenting_path` + + References + ---------- + .. [1] Abdol-Hossein Esfahanian. Connectivity Algorithms. + http://www.cse.msu.edu/~cse835/Papers/Graph_connectivity_revised.pdf + + """ + if auxiliary is None: + H = build_auxiliary_node_connectivity(G) + else: + H = auxiliary + + mapping = H.graph.get("mapping", None) + if mapping is None: + raise nx.NetworkXError("Invalid auxiliary digraph.") + if G.has_edge(s, t) or G.has_edge(t, s): + return set() + kwargs = {"flow_func": flow_func, "residual": residual, "auxiliary": H} + + # The edge cut in the auxiliary digraph corresponds to the node cut in the + # original graph. + edge_cut = minimum_st_edge_cut(H, f"{mapping[s]}B", f"{mapping[t]}A", **kwargs) + # Each node in the original graph maps to two nodes of the auxiliary graph + node_cut = {H.nodes[node]["id"] for edge in edge_cut for node in edge} + return node_cut - {s, t} + + +@nx._dispatchable +def minimum_node_cut(G, s=None, t=None, flow_func=None): + r"""Returns a set of nodes of minimum cardinality that disconnects G. + + If source and target nodes are provided, this function returns the + set of nodes of minimum cardinality that, if removed, would destroy + all paths among source and target in G. If not, it returns a set + of nodes of minimum cardinality that disconnects G. + + Parameters + ---------- + G : NetworkX graph + + s : node + Source node. Optional. Default value: None. + + t : node + Target node. Optional. Default value: None. + + flow_func : function + A function for computing the maximum flow among a pair of nodes. + The function has to accept at least three parameters: a Digraph, + a source node, and a target node. And return a residual network + that follows NetworkX conventions (see :meth:`maximum_flow` for + details). If flow_func is None, the default maximum flow function + (:meth:`edmonds_karp`) is used. See below for details. The + choice of the default function may change from version + to version and should not be relied on. Default value: None. + + Returns + ------- + cutset : set + Set of nodes that, if removed, would disconnect G. If source + and target nodes are provided, the set contains the nodes that + if removed, would destroy all paths between source and target. + + Examples + -------- + >>> # Platonic icosahedral graph has node connectivity 5 + >>> G = nx.icosahedral_graph() + >>> node_cut = nx.minimum_node_cut(G) + >>> len(node_cut) + 5 + + You can use alternative flow algorithms for the underlying maximum + flow computation. In dense networks the algorithm + :meth:`shortest_augmenting_path` will usually perform better + than the default :meth:`edmonds_karp`, which is faster for + sparse networks with highly skewed degree distributions. Alternative + flow functions have to be explicitly imported from the flow package. + + >>> from networkx.algorithms.flow import shortest_augmenting_path + >>> node_cut == nx.minimum_node_cut(G, flow_func=shortest_augmenting_path) + True + + If you specify a pair of nodes (source and target) as parameters, + this function returns a local st node cut. + + >>> len(nx.minimum_node_cut(G, 3, 7)) + 5 + + If you need to perform several local st cuts among different + pairs of nodes on the same graph, it is recommended that you reuse + the data structures used in the maximum flow computations. See + :meth:`minimum_st_node_cut` for details. + + Notes + ----- + This is a flow based implementation of minimum node cut. The algorithm + is based in solving a number of maximum flow computations to determine + the capacity of the minimum cut on an auxiliary directed network that + corresponds to the minimum node cut of G. It handles both directed + and undirected graphs. This implementation is based on algorithm 11 + in [1]_. + + See also + -------- + :meth:`minimum_st_node_cut` + :meth:`minimum_cut` + :meth:`minimum_edge_cut` + :meth:`stoer_wagner` + :meth:`node_connectivity` + :meth:`edge_connectivity` + :meth:`maximum_flow` + :meth:`edmonds_karp` + :meth:`preflow_push` + :meth:`shortest_augmenting_path` + + References + ---------- + .. [1] Abdol-Hossein Esfahanian. Connectivity Algorithms. + http://www.cse.msu.edu/~cse835/Papers/Graph_connectivity_revised.pdf + + """ + if (s is not None and t is None) or (s is None and t is not None): + raise nx.NetworkXError("Both source and target must be specified.") + + # Local minimum node cut. + if s is not None and t is not None: + if s not in G: + raise nx.NetworkXError(f"node {s} not in graph") + if t not in G: + raise nx.NetworkXError(f"node {t} not in graph") + return minimum_st_node_cut(G, s, t, flow_func=flow_func) + + # Global minimum node cut. + # Analog to the algorithm 11 for global node connectivity in [1]. + if G.is_directed(): + if not nx.is_weakly_connected(G): + raise nx.NetworkXError("Input graph is not connected") + iter_func = itertools.permutations + + def neighbors(v): + return itertools.chain.from_iterable([G.predecessors(v), G.successors(v)]) + + else: + if not nx.is_connected(G): + raise nx.NetworkXError("Input graph is not connected") + iter_func = itertools.combinations + neighbors = G.neighbors + + # Reuse the auxiliary digraph and the residual network. + H = build_auxiliary_node_connectivity(G) + R = build_residual_network(H, "capacity") + kwargs = {"flow_func": flow_func, "auxiliary": H, "residual": R} + + # Choose a node with minimum degree. + v = min(G, key=G.degree) + # Initial node cutset is all neighbors of the node with minimum degree. + min_cut = set(G[v]) + # Compute st node cuts between v and all its non-neighbors nodes in G. + for w in set(G) - set(neighbors(v)) - {v}: + this_cut = minimum_st_node_cut(G, v, w, **kwargs) + if len(min_cut) >= len(this_cut): + min_cut = this_cut + # Also for non adjacent pairs of neighbors of v. + for x, y in iter_func(neighbors(v), 2): + if y in G[x]: + continue + this_cut = minimum_st_node_cut(G, x, y, **kwargs) + if len(min_cut) >= len(this_cut): + min_cut = this_cut + + return min_cut + + +@nx._dispatchable +def minimum_edge_cut(G, s=None, t=None, flow_func=None): + r"""Returns a set of edges of minimum cardinality that disconnects G. + + If source and target nodes are provided, this function returns the + set of edges of minimum cardinality that, if removed, would break + all paths among source and target in G. If not, it returns a set of + edges of minimum cardinality that disconnects G. + + Parameters + ---------- + G : NetworkX graph + + s : node + Source node. Optional. Default value: None. + + t : node + Target node. Optional. Default value: None. + + flow_func : function + A function for computing the maximum flow among a pair of nodes. + The function has to accept at least three parameters: a Digraph, + a source node, and a target node. And return a residual network + that follows NetworkX conventions (see :meth:`maximum_flow` for + details). If flow_func is None, the default maximum flow function + (:meth:`edmonds_karp`) is used. See below for details. The + choice of the default function may change from version + to version and should not be relied on. Default value: None. + + Returns + ------- + cutset : set + Set of edges that, if removed, would disconnect G. If source + and target nodes are provided, the set contains the edges that + if removed, would destroy all paths between source and target. + + Examples + -------- + >>> # Platonic icosahedral graph has edge connectivity 5 + >>> G = nx.icosahedral_graph() + >>> len(nx.minimum_edge_cut(G)) + 5 + + You can use alternative flow algorithms for the underlying + maximum flow computation. In dense networks the algorithm + :meth:`shortest_augmenting_path` will usually perform better + than the default :meth:`edmonds_karp`, which is faster for + sparse networks with highly skewed degree distributions. + Alternative flow functions have to be explicitly imported + from the flow package. + + >>> from networkx.algorithms.flow import shortest_augmenting_path + >>> len(nx.minimum_edge_cut(G, flow_func=shortest_augmenting_path)) + 5 + + If you specify a pair of nodes (source and target) as parameters, + this function returns the value of local edge connectivity. + + >>> nx.edge_connectivity(G, 3, 7) + 5 + + If you need to perform several local computations among different + pairs of nodes on the same graph, it is recommended that you reuse + the data structures used in the maximum flow computations. See + :meth:`local_edge_connectivity` for details. + + Notes + ----- + This is a flow based implementation of minimum edge cut. For + undirected graphs the algorithm works by finding a 'small' dominating + set of nodes of G (see algorithm 7 in [1]_) and computing the maximum + flow between an arbitrary node in the dominating set and the rest of + nodes in it. This is an implementation of algorithm 6 in [1]_. For + directed graphs, the algorithm does n calls to the max flow function. + The function raises an error if the directed graph is not weakly + connected and returns an empty set if it is weakly connected. + It is an implementation of algorithm 8 in [1]_. + + See also + -------- + :meth:`minimum_st_edge_cut` + :meth:`minimum_node_cut` + :meth:`stoer_wagner` + :meth:`node_connectivity` + :meth:`edge_connectivity` + :meth:`maximum_flow` + :meth:`edmonds_karp` + :meth:`preflow_push` + :meth:`shortest_augmenting_path` + + References + ---------- + .. [1] Abdol-Hossein Esfahanian. Connectivity Algorithms. + http://www.cse.msu.edu/~cse835/Papers/Graph_connectivity_revised.pdf + + """ + if (s is not None and t is None) or (s is None and t is not None): + raise nx.NetworkXError("Both source and target must be specified.") + + # reuse auxiliary digraph and residual network + H = build_auxiliary_edge_connectivity(G) + R = build_residual_network(H, "capacity") + kwargs = {"flow_func": flow_func, "residual": R, "auxiliary": H} + + # Local minimum edge cut if s and t are not None + if s is not None and t is not None: + if s not in G: + raise nx.NetworkXError(f"node {s} not in graph") + if t not in G: + raise nx.NetworkXError(f"node {t} not in graph") + return minimum_st_edge_cut(H, s, t, **kwargs) + + # Global minimum edge cut + # Analog to the algorithm for global edge connectivity + if G.is_directed(): + # Based on algorithm 8 in [1] + if not nx.is_weakly_connected(G): + raise nx.NetworkXError("Input graph is not connected") + + # Initial cutset is all edges of a node with minimum degree + node = min(G, key=G.degree) + min_cut = set(G.edges(node)) + nodes = list(G) + n = len(nodes) + for i in range(n): + try: + this_cut = minimum_st_edge_cut(H, nodes[i], nodes[i + 1], **kwargs) + if len(this_cut) <= len(min_cut): + min_cut = this_cut + except IndexError: # Last node! + this_cut = minimum_st_edge_cut(H, nodes[i], nodes[0], **kwargs) + if len(this_cut) <= len(min_cut): + min_cut = this_cut + + return min_cut + + else: # undirected + # Based on algorithm 6 in [1] + if not nx.is_connected(G): + raise nx.NetworkXError("Input graph is not connected") + + # Initial cutset is all edges of a node with minimum degree + node = min(G, key=G.degree) + min_cut = set(G.edges(node)) + # A dominating set is \lambda-covering + # We need a dominating set with at least two nodes + for node in G: + D = nx.dominating_set(G, start_with=node) + v = D.pop() + if D: + break + else: + # in complete graphs the dominating set will always be of one node + # thus we return min_cut, which now contains the edges of a node + # with minimum degree + return min_cut + for w in D: + this_cut = minimum_st_edge_cut(H, v, w, **kwargs) + if len(this_cut) <= len(min_cut): + min_cut = this_cut + + return min_cut diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/disjoint_paths.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/disjoint_paths.py new file mode 100644 index 0000000..fdd8522 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/disjoint_paths.py @@ -0,0 +1,408 @@ +"""Flow based node and edge disjoint paths.""" + +from itertools import filterfalse as _filterfalse + +import networkx as nx + +# Define the default maximum flow function to use for the underlying +# maximum flow computations +from networkx.algorithms.flow import ( + edmonds_karp, + preflow_push, + shortest_augmenting_path, +) +from networkx.exception import NetworkXNoPath + +# Functions to build auxiliary data structures. +from .utils import build_auxiliary_edge_connectivity, build_auxiliary_node_connectivity + +__all__ = ["edge_disjoint_paths", "node_disjoint_paths"] +default_flow_func = edmonds_karp + + +@nx._dispatchable( + graphs={"G": 0, "auxiliary?": 5}, + preserve_edge_attrs={"auxiliary": {"capacity": float("inf")}}, +) +def edge_disjoint_paths( + G, s, t, flow_func=None, cutoff=None, auxiliary=None, residual=None +): + """Returns the edges disjoint paths between source and target. + + Edge disjoint paths are paths that do not share any edge. The + number of edge disjoint paths between source and target is equal + to their edge connectivity. + + Parameters + ---------- + G : NetworkX graph + + s : node + Source node for the flow. + + t : node + Sink node for the flow. + + flow_func : function + A function for computing the maximum flow among a pair of nodes. + The function has to accept at least three parameters: a Digraph, + a source node, and a target node. And return a residual network + that follows NetworkX conventions (see :meth:`maximum_flow` for + details). If flow_func is None, the default maximum flow function + (:meth:`edmonds_karp`) is used. The choice of the default function + may change from version to version and should not be relied on. + Default value: None. + + cutoff : integer or None (default: None) + Maximum number of paths to yield. If specified, the maximum flow + algorithm will terminate when the flow value reaches or exceeds the + cutoff. This only works for flows that support the cutoff parameter + (most do) and is ignored otherwise. + + auxiliary : NetworkX DiGraph + Auxiliary digraph to compute flow based edge connectivity. It has + to have a graph attribute called mapping with a dictionary mapping + node names in G and in the auxiliary digraph. If provided + it will be reused instead of recreated. Default value: None. + + residual : NetworkX DiGraph + Residual network to compute maximum flow. If provided it will be + reused instead of recreated. Default value: None. + + Returns + ------- + paths : generator + A generator of edge independent paths. + + Raises + ------ + NetworkXNoPath + If there is no path between source and target. + + NetworkXError + If source or target are not in the graph G. + + See also + -------- + :meth:`node_disjoint_paths` + :meth:`edge_connectivity` + :meth:`maximum_flow` + :meth:`edmonds_karp` + :meth:`preflow_push` + :meth:`shortest_augmenting_path` + + Examples + -------- + We use in this example the platonic icosahedral graph, which has node + edge connectivity 5, thus there are 5 edge disjoint paths between any + pair of nodes. + + >>> G = nx.icosahedral_graph() + >>> len(list(nx.edge_disjoint_paths(G, 0, 6))) + 5 + + + If you need to compute edge disjoint paths on several pairs of + nodes in the same graph, it is recommended that you reuse the + data structures that NetworkX uses in the computation: the + auxiliary digraph for edge connectivity, and the residual + network for the underlying maximum flow computation. + + Example of how to compute edge disjoint paths among all pairs of + nodes of the platonic icosahedral graph reusing the data + structures. + + >>> import itertools + >>> # You also have to explicitly import the function for + >>> # building the auxiliary digraph from the connectivity package + >>> from networkx.algorithms.connectivity import build_auxiliary_edge_connectivity + >>> H = build_auxiliary_edge_connectivity(G) + >>> # And the function for building the residual network from the + >>> # flow package + >>> from networkx.algorithms.flow import build_residual_network + >>> # Note that the auxiliary digraph has an edge attribute named capacity + >>> R = build_residual_network(H, "capacity") + >>> result = {n: {} for n in G} + >>> # Reuse the auxiliary digraph and the residual network by passing them + >>> # as arguments + >>> for u, v in itertools.combinations(G, 2): + ... k = len(list(nx.edge_disjoint_paths(G, u, v, auxiliary=H, residual=R))) + ... result[u][v] = k + >>> all(result[u][v] == 5 for u, v in itertools.combinations(G, 2)) + True + + You can also use alternative flow algorithms for computing edge disjoint + paths. For instance, in dense networks the algorithm + :meth:`shortest_augmenting_path` will usually perform better than + the default :meth:`edmonds_karp` which is faster for sparse + networks with highly skewed degree distributions. Alternative flow + functions have to be explicitly imported from the flow package. + + >>> from networkx.algorithms.flow import shortest_augmenting_path + >>> len(list(nx.edge_disjoint_paths(G, 0, 6, flow_func=shortest_augmenting_path))) + 5 + + Notes + ----- + This is a flow based implementation of edge disjoint paths. We compute + the maximum flow between source and target on an auxiliary directed + network. The saturated edges in the residual network after running the + maximum flow algorithm correspond to edge disjoint paths between source + and target in the original network. This function handles both directed + and undirected graphs, and can use all flow algorithms from NetworkX flow + package. + + """ + if s not in G: + raise nx.NetworkXError(f"node {s} not in graph") + if t not in G: + raise nx.NetworkXError(f"node {t} not in graph") + + if flow_func is None: + flow_func = default_flow_func + + if auxiliary is None: + H = build_auxiliary_edge_connectivity(G) + else: + H = auxiliary + + # Maximum possible edge disjoint paths + possible = min(H.out_degree(s), H.in_degree(t)) + if not possible: + raise NetworkXNoPath + + if cutoff is None: + cutoff = possible + else: + cutoff = min(cutoff, possible) + + # Compute maximum flow between source and target. Flow functions in + # NetworkX return a residual network. + kwargs = { + "capacity": "capacity", + "residual": residual, + "cutoff": cutoff, + "value_only": True, + } + if flow_func is preflow_push: + del kwargs["cutoff"] + if flow_func is shortest_augmenting_path: + kwargs["two_phase"] = True + R = flow_func(H, s, t, **kwargs) + + if R.graph["flow_value"] == 0: + raise NetworkXNoPath + + # Saturated edges in the residual network form the edge disjoint paths + # between source and target + cutset = [ + (u, v) + for u, v, d in R.edges(data=True) + if d["capacity"] == d["flow"] and d["flow"] > 0 + ] + # This is equivalent of what flow.utils.build_flow_dict returns, but + # only for the nodes with saturated edges and without reporting 0 flows. + flow_dict = {n: {} for edge in cutset for n in edge} + for u, v in cutset: + flow_dict[u][v] = 1 + + # Rebuild the edge disjoint paths from the flow dictionary. + paths_found = 0 + for v in list(flow_dict[s]): + if paths_found >= cutoff: + # preflow_push does not support cutoff: we have to + # keep track of the paths founds and stop at cutoff. + break + path = [s] + if v == t: + path.append(v) + yield path + continue + u = v + while u != t: + path.append(u) + try: + u, _ = flow_dict[u].popitem() + except KeyError: + break + else: + path.append(t) + yield path + paths_found += 1 + + +@nx._dispatchable( + graphs={"G": 0, "auxiliary?": 5}, + preserve_node_attrs={"auxiliary": {"id": None}}, + preserve_graph_attrs={"auxiliary"}, +) +def node_disjoint_paths( + G, s, t, flow_func=None, cutoff=None, auxiliary=None, residual=None +): + r"""Computes node disjoint paths between source and target. + + Node disjoint paths are paths that only share their first and last + nodes. The number of node independent paths between two nodes is + equal to their local node connectivity. + + Parameters + ---------- + G : NetworkX graph + + s : node + Source node. + + t : node + Target node. + + flow_func : function + A function for computing the maximum flow among a pair of nodes. + The function has to accept at least three parameters: a Digraph, + a source node, and a target node. And return a residual network + that follows NetworkX conventions (see :meth:`maximum_flow` for + details). If flow_func is None, the default maximum flow function + (:meth:`edmonds_karp`) is used. See below for details. The choice + of the default function may change from version to version and + should not be relied on. Default value: None. + + cutoff : integer or None (default: None) + Maximum number of paths to yield. If specified, the maximum flow + algorithm will terminate when the flow value reaches or exceeds the + cutoff. This only works for flows that support the cutoff parameter + (most do) and is ignored otherwise. + + auxiliary : NetworkX DiGraph + Auxiliary digraph to compute flow based node connectivity. It has + to have a graph attribute called mapping with a dictionary mapping + node names in G and in the auxiliary digraph. If provided + it will be reused instead of recreated. Default value: None. + + residual : NetworkX DiGraph + Residual network to compute maximum flow. If provided it will be + reused instead of recreated. Default value: None. + + Returns + ------- + paths : generator + Generator of node disjoint paths. + + Raises + ------ + NetworkXNoPath + If there is no path between source and target. + + NetworkXError + If source or target are not in the graph G. + + Examples + -------- + We use in this example the platonic icosahedral graph, which has node + connectivity 5, thus there are 5 node disjoint paths between any pair + of non neighbor nodes. + + >>> G = nx.icosahedral_graph() + >>> len(list(nx.node_disjoint_paths(G, 0, 6))) + 5 + + If you need to compute node disjoint paths between several pairs of + nodes in the same graph, it is recommended that you reuse the + data structures that NetworkX uses in the computation: the + auxiliary digraph for node connectivity and node cuts, and the + residual network for the underlying maximum flow computation. + + Example of how to compute node disjoint paths reusing the data + structures: + + >>> # You also have to explicitly import the function for + >>> # building the auxiliary digraph from the connectivity package + >>> from networkx.algorithms.connectivity import build_auxiliary_node_connectivity + >>> H = build_auxiliary_node_connectivity(G) + >>> # And the function for building the residual network from the + >>> # flow package + >>> from networkx.algorithms.flow import build_residual_network + >>> # Note that the auxiliary digraph has an edge attribute named capacity + >>> R = build_residual_network(H, "capacity") + >>> # Reuse the auxiliary digraph and the residual network by passing them + >>> # as arguments + >>> len(list(nx.node_disjoint_paths(G, 0, 6, auxiliary=H, residual=R))) + 5 + + You can also use alternative flow algorithms for computing node disjoint + paths. For instance, in dense networks the algorithm + :meth:`shortest_augmenting_path` will usually perform better than + the default :meth:`edmonds_karp` which is faster for sparse + networks with highly skewed degree distributions. Alternative flow + functions have to be explicitly imported from the flow package. + + >>> from networkx.algorithms.flow import shortest_augmenting_path + >>> len(list(nx.node_disjoint_paths(G, 0, 6, flow_func=shortest_augmenting_path))) + 5 + + Notes + ----- + This is a flow based implementation of node disjoint paths. We compute + the maximum flow between source and target on an auxiliary directed + network. The saturated edges in the residual network after running the + maximum flow algorithm correspond to node disjoint paths between source + and target in the original network. This function handles both directed + and undirected graphs, and can use all flow algorithms from NetworkX flow + package. + + See also + -------- + :meth:`edge_disjoint_paths` + :meth:`node_connectivity` + :meth:`maximum_flow` + :meth:`edmonds_karp` + :meth:`preflow_push` + :meth:`shortest_augmenting_path` + + """ + if s not in G: + raise nx.NetworkXError(f"node {s} not in graph") + if t not in G: + raise nx.NetworkXError(f"node {t} not in graph") + + if auxiliary is None: + H = build_auxiliary_node_connectivity(G) + else: + H = auxiliary + + mapping = H.graph.get("mapping", None) + if mapping is None: + raise nx.NetworkXError("Invalid auxiliary digraph.") + + # Maximum possible edge disjoint paths + possible = min(H.out_degree(f"{mapping[s]}B"), H.in_degree(f"{mapping[t]}A")) + if not possible: + raise NetworkXNoPath + + if cutoff is None: + cutoff = possible + else: + cutoff = min(cutoff, possible) + + kwargs = { + "flow_func": flow_func, + "residual": residual, + "auxiliary": H, + "cutoff": cutoff, + } + + # The edge disjoint paths in the auxiliary digraph correspond to the node + # disjoint paths in the original graph. + paths_edges = edge_disjoint_paths(H, f"{mapping[s]}B", f"{mapping[t]}A", **kwargs) + for path in paths_edges: + # Each node in the original graph maps to two nodes in auxiliary graph + yield list(_unique_everseen(H.nodes[node]["id"] for node in path)) + + +def _unique_everseen(iterable): + # Adapted from https://docs.python.org/3/library/itertools.html examples + "List unique elements, preserving order. Remember all elements ever seen." + # unique_everseen('AAAABBBCCDAABBB') --> A B C D + seen = set() + seen_add = seen.add + for element in _filterfalse(seen.__contains__, iterable): + seen_add(element) + yield element diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/edge_augmentation.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/edge_augmentation.py new file mode 100644 index 0000000..6dfe014 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/edge_augmentation.py @@ -0,0 +1,1270 @@ +""" +Algorithms for finding k-edge-augmentations + +A k-edge-augmentation is a set of edges, that once added to a graph, ensures +that the graph is k-edge-connected; i.e. the graph cannot be disconnected +unless k or more edges are removed. Typically, the goal is to find the +augmentation with minimum weight. In general, it is not guaranteed that a +k-edge-augmentation exists. + +See Also +-------- +:mod:`edge_kcomponents` : algorithms for finding k-edge-connected components +:mod:`connectivity` : algorithms for determining edge connectivity. +""" + +import itertools as it +import math +from collections import defaultdict, namedtuple + +import networkx as nx +from networkx.utils import not_implemented_for, py_random_state + +__all__ = ["k_edge_augmentation", "is_k_edge_connected", "is_locally_k_edge_connected"] + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatchable +def is_k_edge_connected(G, k): + """Tests to see if a graph is k-edge-connected. + + Is it impossible to disconnect the graph by removing fewer than k edges? + If so, then G is k-edge-connected. + + Parameters + ---------- + G : NetworkX graph + An undirected graph. + + k : integer + edge connectivity to test for + + Returns + ------- + boolean + True if G is k-edge-connected. + + See Also + -------- + :func:`is_locally_k_edge_connected` + + Examples + -------- + >>> G = nx.barbell_graph(10, 0) + >>> nx.is_k_edge_connected(G, k=1) + True + >>> nx.is_k_edge_connected(G, k=2) + False + """ + if k < 1: + raise ValueError(f"k must be positive, not {k}") + # First try to quickly determine if G is not k-edge-connected + if G.number_of_nodes() < k + 1: + return False + elif any(d < k for n, d in G.degree()): + return False + else: + # Otherwise perform the full check + if k == 1: + return nx.is_connected(G) + elif k == 2: + return nx.is_connected(G) and not nx.has_bridges(G) + else: + return nx.edge_connectivity(G, cutoff=k) >= k + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatchable +def is_locally_k_edge_connected(G, s, t, k): + """Tests to see if an edge in a graph is locally k-edge-connected. + + Is it impossible to disconnect s and t by removing fewer than k edges? + If so, then s and t are locally k-edge-connected in G. + + Parameters + ---------- + G : NetworkX graph + An undirected graph. + + s : node + Source node + + t : node + Target node + + k : integer + local edge connectivity for nodes s and t + + Returns + ------- + boolean + True if s and t are locally k-edge-connected in G. + + See Also + -------- + :func:`is_k_edge_connected` + + Examples + -------- + >>> from networkx.algorithms.connectivity import is_locally_k_edge_connected + >>> G = nx.barbell_graph(10, 0) + >>> is_locally_k_edge_connected(G, 5, 15, k=1) + True + >>> is_locally_k_edge_connected(G, 5, 15, k=2) + False + >>> is_locally_k_edge_connected(G, 1, 5, k=2) + True + """ + if k < 1: + raise ValueError(f"k must be positive, not {k}") + + # First try to quickly determine s, t is not k-locally-edge-connected in G + if G.degree(s) < k or G.degree(t) < k: + return False + else: + # Otherwise perform the full check + if k == 1: + return nx.has_path(G, s, t) + else: + localk = nx.connectivity.local_edge_connectivity(G, s, t, cutoff=k) + return localk >= k + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatchable +def k_edge_augmentation(G, k, avail=None, weight=None, partial=False): + """Finds set of edges to k-edge-connect G. + + Adding edges from the augmentation to G make it impossible to disconnect G + unless k or more edges are removed. This function uses the most efficient + function available (depending on the value of k and if the problem is + weighted or unweighted) to search for a minimum weight subset of available + edges that k-edge-connects G. In general, finding a k-edge-augmentation is + NP-hard, so solutions are not guaranteed to be minimal. Furthermore, a + k-edge-augmentation may not exist. + + Parameters + ---------- + G : NetworkX graph + An undirected graph. + + k : integer + Desired edge connectivity + + avail : dict or a set of 2 or 3 tuples + The available edges that can be used in the augmentation. + + If unspecified, then all edges in the complement of G are available. + Otherwise, each item is an available edge (with an optional weight). + + In the unweighted case, each item is an edge ``(u, v)``. + + In the weighted case, each item is a 3-tuple ``(u, v, d)`` or a dict + with items ``(u, v): d``. The third item, ``d``, can be a dictionary + or a real number. If ``d`` is a dictionary ``d[weight]`` + correspondings to the weight. + + weight : string + key to use to find weights if ``avail`` is a set of 3-tuples where the + third item in each tuple is a dictionary. + + partial : boolean + If partial is True and no feasible k-edge-augmentation exists, then all + a partial k-edge-augmentation is generated. Adding the edges in a + partial augmentation to G, minimizes the number of k-edge-connected + components and maximizes the edge connectivity between those + components. For details, see :func:`partial_k_edge_augmentation`. + + Yields + ------ + edge : tuple + Edges that, once added to G, would cause G to become k-edge-connected. + If partial is False, an error is raised if this is not possible. + Otherwise, generated edges form a partial augmentation, which + k-edge-connects any part of G where it is possible, and maximally + connects the remaining parts. + + Raises + ------ + NetworkXUnfeasible + If partial is False and no k-edge-augmentation exists. + + NetworkXNotImplemented + If the input graph is directed or a multigraph. + + ValueError: + If k is less than 1 + + Notes + ----- + When k=1 this returns an optimal solution. + + When k=2 and ``avail`` is None, this returns an optimal solution. + Otherwise when k=2, this returns a 2-approximation of the optimal solution. + + For k>3, this problem is NP-hard and this uses a randomized algorithm that + produces a feasible solution, but provides no guarantees on the + solution weight. + + Examples + -------- + >>> # Unweighted cases + >>> G = nx.path_graph((1, 2, 3, 4)) + >>> G.add_node(5) + >>> sorted(nx.k_edge_augmentation(G, k=1)) + [(1, 5)] + >>> sorted(nx.k_edge_augmentation(G, k=2)) + [(1, 5), (5, 4)] + >>> sorted(nx.k_edge_augmentation(G, k=3)) + [(1, 4), (1, 5), (2, 5), (3, 5), (4, 5)] + >>> complement = list(nx.k_edge_augmentation(G, k=5, partial=True)) + >>> G.add_edges_from(complement) + >>> nx.edge_connectivity(G) + 4 + + >>> # Weighted cases + >>> G = nx.path_graph((1, 2, 3, 4)) + >>> G.add_node(5) + >>> # avail can be a tuple with a dict + >>> avail = [(1, 5, {"weight": 11}), (2, 5, {"weight": 10})] + >>> sorted(nx.k_edge_augmentation(G, k=1, avail=avail, weight="weight")) + [(2, 5)] + >>> # or avail can be a 3-tuple with a real number + >>> avail = [(1, 5, 11), (2, 5, 10), (4, 3, 1), (4, 5, 51)] + >>> sorted(nx.k_edge_augmentation(G, k=2, avail=avail)) + [(1, 5), (2, 5), (4, 5)] + >>> # or avail can be a dict + >>> avail = {(1, 5): 11, (2, 5): 10, (4, 3): 1, (4, 5): 51} + >>> sorted(nx.k_edge_augmentation(G, k=2, avail=avail)) + [(1, 5), (2, 5), (4, 5)] + >>> # If augmentation is infeasible, then a partial solution can be found + >>> avail = {(1, 5): 11} + >>> sorted(nx.k_edge_augmentation(G, k=2, avail=avail, partial=True)) + [(1, 5)] + """ + try: + if k <= 0: + raise ValueError(f"k must be a positive integer, not {k}") + elif G.number_of_nodes() < k + 1: + msg = f"impossible to {k} connect in graph with less than {k + 1} nodes" + raise nx.NetworkXUnfeasible(msg) + elif avail is not None and len(avail) == 0: + if not nx.is_k_edge_connected(G, k): + raise nx.NetworkXUnfeasible("no available edges") + aug_edges = [] + elif k == 1: + aug_edges = one_edge_augmentation( + G, avail=avail, weight=weight, partial=partial + ) + elif k == 2: + aug_edges = bridge_augmentation(G, avail=avail, weight=weight) + else: + # raise NotImplementedError(f'not implemented for k>2. k={k}') + aug_edges = greedy_k_edge_augmentation( + G, k=k, avail=avail, weight=weight, seed=0 + ) + # Do eager evaluation so we can catch any exceptions + # Before executing partial code. + yield from list(aug_edges) + except nx.NetworkXUnfeasible: + if partial: + # Return all available edges + if avail is None: + aug_edges = complement_edges(G) + else: + # If we can't k-edge-connect the entire graph, try to + # k-edge-connect as much as possible + aug_edges = partial_k_edge_augmentation( + G, k=k, avail=avail, weight=weight + ) + yield from aug_edges + else: + raise + + +@nx._dispatchable +def partial_k_edge_augmentation(G, k, avail, weight=None): + """Finds augmentation that k-edge-connects as much of the graph as possible. + + When a k-edge-augmentation is not possible, we can still try to find a + small set of edges that partially k-edge-connects as much of the graph as + possible. All possible edges are generated between remaining parts. + This minimizes the number of k-edge-connected subgraphs in the resulting + graph and maximizes the edge connectivity between those subgraphs. + + Parameters + ---------- + G : NetworkX graph + An undirected graph. + + k : integer + Desired edge connectivity + + avail : dict or a set of 2 or 3 tuples + For more details, see :func:`k_edge_augmentation`. + + weight : string + key to use to find weights if ``avail`` is a set of 3-tuples. + For more details, see :func:`k_edge_augmentation`. + + Yields + ------ + edge : tuple + Edges in the partial augmentation of G. These edges k-edge-connect any + part of G where it is possible, and maximally connects the remaining + parts. In other words, all edges from avail are generated except for + those within subgraphs that have already become k-edge-connected. + + Notes + ----- + Construct H that augments G with all edges in avail. + Find the k-edge-subgraphs of H. + For each k-edge-subgraph, if the number of nodes is more than k, then find + the k-edge-augmentation of that graph and add it to the solution. Then add + all edges in avail between k-edge subgraphs to the solution. + + See Also + -------- + :func:`k_edge_augmentation` + + Examples + -------- + >>> G = nx.path_graph((1, 2, 3, 4, 5, 6, 7)) + >>> G.add_node(8) + >>> avail = [(1, 3), (1, 4), (1, 5), (2, 4), (2, 5), (3, 5), (1, 8)] + >>> sorted(partial_k_edge_augmentation(G, k=2, avail=avail)) + [(1, 5), (1, 8)] + """ + + def _edges_between_disjoint(H, only1, only2): + """finds edges between disjoint nodes""" + only1_adj = {u: set(H.adj[u]) for u in only1} + for u, neighbs in only1_adj.items(): + # Find the neighbors of u in only1 that are also in only2 + neighbs12 = neighbs.intersection(only2) + for v in neighbs12: + yield (u, v) + + avail_uv, avail_w = _unpack_available_edges(avail, weight=weight, G=G) + + # Find which parts of the graph can be k-edge-connected + H = G.copy() + H.add_edges_from( + ( + (u, v, {"weight": w, "generator": (u, v)}) + for (u, v), w in zip(avail, avail_w) + ) + ) + k_edge_subgraphs = list(nx.k_edge_subgraphs(H, k=k)) + + # Generate edges to k-edge-connect internal subgraphs + for nodes in k_edge_subgraphs: + if len(nodes) > 1: + # Get the k-edge-connected subgraph + C = H.subgraph(nodes).copy() + # Find the internal edges that were available + sub_avail = { + d["generator"]: d["weight"] + for (u, v, d) in C.edges(data=True) + if "generator" in d + } + # Remove potential augmenting edges + C.remove_edges_from(sub_avail.keys()) + # Find a subset of these edges that makes the component + # k-edge-connected and ignore the rest + yield from nx.k_edge_augmentation(C, k=k, avail=sub_avail) + + # Generate all edges between CCs that could not be k-edge-connected + for cc1, cc2 in it.combinations(k_edge_subgraphs, 2): + for u, v in _edges_between_disjoint(H, cc1, cc2): + d = H.get_edge_data(u, v) + edge = d.get("generator", None) + if edge is not None: + yield edge + + +@not_implemented_for("multigraph") +@not_implemented_for("directed") +@nx._dispatchable +def one_edge_augmentation(G, avail=None, weight=None, partial=False): + """Finds minimum weight set of edges to connect G. + + Equivalent to :func:`k_edge_augmentation` when k=1. Adding the resulting + edges to G will make it 1-edge-connected. The solution is optimal for both + weighted and non-weighted variants. + + Parameters + ---------- + G : NetworkX graph + An undirected graph. + + avail : dict or a set of 2 or 3 tuples + For more details, see :func:`k_edge_augmentation`. + + weight : string + key to use to find weights if ``avail`` is a set of 3-tuples. + For more details, see :func:`k_edge_augmentation`. + + partial : boolean + If partial is True and no feasible k-edge-augmentation exists, then the + augmenting edges minimize the number of connected components. + + Yields + ------ + edge : tuple + Edges in the one-augmentation of G + + Raises + ------ + NetworkXUnfeasible + If partial is False and no one-edge-augmentation exists. + + Notes + ----- + Uses either :func:`unconstrained_one_edge_augmentation` or + :func:`weighted_one_edge_augmentation` depending on whether ``avail`` is + specified. Both algorithms are based on finding a minimum spanning tree. + As such both algorithms find optimal solutions and run in linear time. + + See Also + -------- + :func:`k_edge_augmentation` + """ + if avail is None: + return unconstrained_one_edge_augmentation(G) + else: + return weighted_one_edge_augmentation( + G, avail=avail, weight=weight, partial=partial + ) + + +@not_implemented_for("multigraph") +@not_implemented_for("directed") +@nx._dispatchable +def bridge_augmentation(G, avail=None, weight=None): + """Finds the a set of edges that bridge connects G. + + Equivalent to :func:`k_edge_augmentation` when k=2, and partial=False. + Adding the resulting edges to G will make it 2-edge-connected. If no + constraints are specified the returned set of edges is minimum an optimal, + otherwise the solution is approximated. + + Parameters + ---------- + G : NetworkX graph + An undirected graph. + + avail : dict or a set of 2 or 3 tuples + For more details, see :func:`k_edge_augmentation`. + + weight : string + key to use to find weights if ``avail`` is a set of 3-tuples. + For more details, see :func:`k_edge_augmentation`. + + Yields + ------ + edge : tuple + Edges in the bridge-augmentation of G + + Raises + ------ + NetworkXUnfeasible + If no bridge-augmentation exists. + + Notes + ----- + If there are no constraints the solution can be computed in linear time + using :func:`unconstrained_bridge_augmentation`. Otherwise, the problem + becomes NP-hard and is the solution is approximated by + :func:`weighted_bridge_augmentation`. + + See Also + -------- + :func:`k_edge_augmentation` + """ + if G.number_of_nodes() < 3: + raise nx.NetworkXUnfeasible("impossible to bridge connect less than 3 nodes") + if avail is None: + return unconstrained_bridge_augmentation(G) + else: + return weighted_bridge_augmentation(G, avail, weight=weight) + + +# --- Algorithms and Helpers --- + + +def _ordered(u, v): + """Returns the nodes in an undirected edge in lower-triangular order""" + return (u, v) if u < v else (v, u) + + +def _unpack_available_edges(avail, weight=None, G=None): + """Helper to separate avail into edges and corresponding weights""" + if weight is None: + weight = "weight" + if isinstance(avail, dict): + avail_uv = list(avail.keys()) + avail_w = list(avail.values()) + else: + + def _try_getitem(d): + try: + return d[weight] + except TypeError: + return d + + avail_uv = [tup[0:2] for tup in avail] + avail_w = [1 if len(tup) == 2 else _try_getitem(tup[-1]) for tup in avail] + + if G is not None: + # Edges already in the graph are filtered + flags = [not G.has_edge(u, v) for u, v in avail_uv] + avail_uv = list(it.compress(avail_uv, flags)) + avail_w = list(it.compress(avail_w, flags)) + return avail_uv, avail_w + + +MetaEdge = namedtuple("MetaEdge", ("meta_uv", "uv", "w")) + + +def _lightest_meta_edges(mapping, avail_uv, avail_w): + """Maps available edges in the original graph to edges in the metagraph. + + Parameters + ---------- + mapping : dict + mapping produced by :func:`collapse`, that maps each node in the + original graph to a node in the meta graph + + avail_uv : list + list of edges + + avail_w : list + list of edge weights + + Notes + ----- + Each node in the metagraph is a k-edge-connected component in the original + graph. We don't care about any edge within the same k-edge-connected + component, so we ignore self edges. We also are only interested in the + minimum weight edge bridging each k-edge-connected component so, we group + the edges by meta-edge and take the lightest in each group. + + Examples + -------- + >>> # Each group represents a meta-node + >>> groups = ([1, 2, 3], [4, 5], [6]) + >>> mapping = {n: meta_n for meta_n, ns in enumerate(groups) for n in ns} + >>> avail_uv = [(1, 2), (3, 6), (1, 4), (5, 2), (6, 1), (2, 6), (3, 1)] + >>> avail_w = [20, 99, 20, 15, 50, 99, 20] + >>> sorted(_lightest_meta_edges(mapping, avail_uv, avail_w)) + [MetaEdge(meta_uv=(0, 1), uv=(5, 2), w=15), MetaEdge(meta_uv=(0, 2), uv=(6, 1), w=50)] + """ + grouped_wuv = defaultdict(list) + for w, (u, v) in zip(avail_w, avail_uv): + # Order the meta-edge so it can be used as a dict key + meta_uv = _ordered(mapping[u], mapping[v]) + # Group each available edge using the meta-edge as a key + grouped_wuv[meta_uv].append((w, u, v)) + + # Now that all available edges are grouped, choose one per group + for (mu, mv), choices_wuv in grouped_wuv.items(): + # Ignore available edges within the same meta-node + if mu != mv: + # Choose the lightest available edge belonging to each meta-edge + w, u, v = min(choices_wuv) + yield MetaEdge((mu, mv), (u, v), w) + + +@nx._dispatchable +def unconstrained_one_edge_augmentation(G): + """Finds the smallest set of edges to connect G. + + This is a variant of the unweighted MST problem. + If G is not empty, a feasible solution always exists. + + Parameters + ---------- + G : NetworkX graph + An undirected graph. + + Yields + ------ + edge : tuple + Edges in the one-edge-augmentation of G + + See Also + -------- + :func:`one_edge_augmentation` + :func:`k_edge_augmentation` + + Examples + -------- + >>> G = nx.Graph([(1, 2), (2, 3), (4, 5)]) + >>> G.add_nodes_from([6, 7, 8]) + >>> sorted(unconstrained_one_edge_augmentation(G)) + [(1, 4), (4, 6), (6, 7), (7, 8)] + """ + ccs1 = list(nx.connected_components(G)) + C = collapse(G, ccs1) + # When we are not constrained, we can just make a meta graph tree. + meta_nodes = list(C.nodes()) + # build a path in the metagraph + meta_aug = list(zip(meta_nodes, meta_nodes[1:])) + # map that path to the original graph + inverse = defaultdict(list) + for k, v in C.graph["mapping"].items(): + inverse[v].append(k) + for mu, mv in meta_aug: + yield (inverse[mu][0], inverse[mv][0]) + + +@nx._dispatchable +def weighted_one_edge_augmentation(G, avail, weight=None, partial=False): + """Finds the minimum weight set of edges to connect G if one exists. + + This is a variant of the weighted MST problem. + + Parameters + ---------- + G : NetworkX graph + An undirected graph. + + avail : dict or a set of 2 or 3 tuples + For more details, see :func:`k_edge_augmentation`. + + weight : string + key to use to find weights if ``avail`` is a set of 3-tuples. + For more details, see :func:`k_edge_augmentation`. + + partial : boolean + If partial is True and no feasible k-edge-augmentation exists, then the + augmenting edges minimize the number of connected components. + + Yields + ------ + edge : tuple + Edges in the subset of avail chosen to connect G. + + See Also + -------- + :func:`one_edge_augmentation` + :func:`k_edge_augmentation` + + Examples + -------- + >>> G = nx.Graph([(1, 2), (2, 3), (4, 5)]) + >>> G.add_nodes_from([6, 7, 8]) + >>> # any edge not in avail has an implicit weight of infinity + >>> avail = [(1, 3), (1, 5), (4, 7), (4, 8), (6, 1), (8, 1), (8, 2)] + >>> sorted(weighted_one_edge_augmentation(G, avail)) + [(1, 5), (4, 7), (6, 1), (8, 1)] + >>> # find another solution by giving large weights to edges in the + >>> # previous solution (note some of the old edges must be used) + >>> avail = [(1, 3), (1, 5, 99), (4, 7, 9), (6, 1, 99), (8, 1, 99), (8, 2)] + >>> sorted(weighted_one_edge_augmentation(G, avail)) + [(1, 5), (4, 7), (6, 1), (8, 2)] + """ + avail_uv, avail_w = _unpack_available_edges(avail, weight=weight, G=G) + # Collapse CCs in the original graph into nodes in a metagraph + # Then find an MST of the metagraph instead of the original graph + C = collapse(G, nx.connected_components(G)) + mapping = C.graph["mapping"] + # Assign each available edge to an edge in the metagraph + candidate_mapping = _lightest_meta_edges(mapping, avail_uv, avail_w) + # nx.set_edge_attributes(C, name='weight', values=0) + C.add_edges_from( + (mu, mv, {"weight": w, "generator": uv}) + for (mu, mv), uv, w in candidate_mapping + ) + # Find MST of the meta graph + meta_mst = nx.minimum_spanning_tree(C) + if not partial and not nx.is_connected(meta_mst): + raise nx.NetworkXUnfeasible("Not possible to connect G with available edges") + # Yield the edge that generated the meta-edge + for mu, mv, d in meta_mst.edges(data=True): + if "generator" in d: + edge = d["generator"] + yield edge + + +@nx._dispatchable +def unconstrained_bridge_augmentation(G): + """Finds an optimal 2-edge-augmentation of G using the fewest edges. + + This is an implementation of the algorithm detailed in [1]_. + The basic idea is to construct a meta-graph of bridge-ccs, connect leaf + nodes of the trees to connect the entire graph, and finally connect the + leafs of the tree in dfs-preorder to bridge connect the entire graph. + + Parameters + ---------- + G : NetworkX graph + An undirected graph. + + Yields + ------ + edge : tuple + Edges in the bridge augmentation of G + + Notes + ----- + Input: a graph G. + First find the bridge components of G and collapse each bridge-cc into a + node of a metagraph graph C, which is guaranteed to be a forest of trees. + + C contains p "leafs" --- nodes with exactly one incident edge. + C contains q "isolated nodes" --- nodes with no incident edges. + + Theorem: If p + q > 1, then at least :math:`ceil(p / 2) + q` edges are + needed to bridge connect C. This algorithm achieves this min number. + + The method first adds enough edges to make G into a tree and then pairs + leafs in a simple fashion. + + Let n be the number of trees in C. Let v(i) be an isolated vertex in the + i-th tree if one exists, otherwise it is a pair of distinct leafs nodes + in the i-th tree. Alternating edges from these sets (i.e. adding edges + A1 = [(v(i)[0], v(i + 1)[1]), v(i + 1)[0], v(i + 2)[1])...]) connects C + into a tree T. This tree has p' = p + 2q - 2(n -1) leafs and no isolated + vertices. A1 has n - 1 edges. The next step finds ceil(p' / 2) edges to + biconnect any tree with p' leafs. + + Convert T into an arborescence T' by picking an arbitrary root node with + degree >= 2 and directing all edges away from the root. Note the + implementation implicitly constructs T'. + + The leafs of T are the nodes with no existing edges in T'. + Order the leafs of T' by DFS preorder. Then break this list in half + and add the zipped pairs to A2. + + The set A = A1 + A2 is the minimum augmentation in the metagraph. + + To convert this to edges in the original graph + + References + ---------- + .. [1] Eswaran, Kapali P., and R. Endre Tarjan. (1975) Augmentation problems. + http://epubs.siam.org/doi/abs/10.1137/0205044 + + See Also + -------- + :func:`bridge_augmentation` + :func:`k_edge_augmentation` + + Examples + -------- + >>> G = nx.path_graph((1, 2, 3, 4, 5, 6, 7)) + >>> sorted(unconstrained_bridge_augmentation(G)) + [(1, 7)] + >>> G = nx.path_graph((1, 2, 3, 2, 4, 5, 6, 7)) + >>> sorted(unconstrained_bridge_augmentation(G)) + [(1, 3), (3, 7)] + >>> G = nx.Graph([(0, 1), (0, 2), (1, 2)]) + >>> G.add_node(4) + >>> sorted(unconstrained_bridge_augmentation(G)) + [(1, 4), (4, 0)] + """ + # ----- + # Mapping of terms from (Eswaran and Tarjan): + # G = G_0 - the input graph + # C = G_0' - the bridge condensation of G. (This is a forest of trees) + # A1 = A_1 - the edges to connect the forest into a tree + # leaf = pendant - a node with degree of 1 + + # alpha(v) = maps the node v in G to its meta-node in C + # beta(x) = maps the meta-node x in C to any node in the bridge + # component of G corresponding to x. + + # find the 2-edge-connected components of G + bridge_ccs = list(nx.connectivity.bridge_components(G)) + # condense G into an forest C + C = collapse(G, bridge_ccs) + + # Choose pairs of distinct leaf nodes in each tree. If this is not + # possible then make a pair using the single isolated node in the tree. + vset1 = [ + tuple(cc) * 2 # case1: an isolated node + if len(cc) == 1 + else sorted(cc, key=C.degree)[0:2] # case2: pair of leaf nodes + for cc in nx.connected_components(C) + ] + if len(vset1) > 1: + # Use this set to construct edges that connect C into a tree. + nodes1 = [vs[0] for vs in vset1] + nodes2 = [vs[1] for vs in vset1] + A1 = list(zip(nodes1[1:], nodes2)) + else: + A1 = [] + # Connect each tree in the forest to construct an arborescence + T = C.copy() + T.add_edges_from(A1) + + # If there are only two leaf nodes, we simply connect them. + leafs = [n for n, d in T.degree() if d == 1] + if len(leafs) == 1: + A2 = [] + if len(leafs) == 2: + A2 = [tuple(leafs)] + else: + # Choose an arbitrary non-leaf root + try: + root = next(n for n, d in T.degree() if d > 1) + except StopIteration: # no nodes found with degree > 1 + return + # order the leaves of C by (induced directed) preorder + v2 = [n for n in nx.dfs_preorder_nodes(T, root) if T.degree(n) == 1] + # connecting first half of the leafs in pre-order to the second + # half will bridge connect the tree with the fewest edges. + half = math.ceil(len(v2) / 2) + A2 = list(zip(v2[:half], v2[-half:])) + + # collect the edges used to augment the original forest + aug_tree_edges = A1 + A2 + + # Construct the mapping (beta) from meta-nodes to regular nodes + inverse = defaultdict(list) + for k, v in C.graph["mapping"].items(): + inverse[v].append(k) + # sort so we choose minimum degree nodes first + inverse = { + mu: sorted(mapped, key=lambda u: (G.degree(u), u)) + for mu, mapped in inverse.items() + } + + # For each meta-edge, map back to an arbitrary pair in the original graph + G2 = G.copy() + for mu, mv in aug_tree_edges: + # Find the first available edge that doesn't exist and return it + for u, v in it.product(inverse[mu], inverse[mv]): + if not G2.has_edge(u, v): + G2.add_edge(u, v) + yield u, v + break + + +@nx._dispatchable +def weighted_bridge_augmentation(G, avail, weight=None): + """Finds an approximate min-weight 2-edge-augmentation of G. + + This is an implementation of the approximation algorithm detailed in [1]_. + It chooses a set of edges from avail to add to G that renders it + 2-edge-connected if such a subset exists. This is done by finding a + minimum spanning arborescence of a specially constructed metagraph. + + Parameters + ---------- + G : NetworkX graph + An undirected graph. + + avail : set of 2 or 3 tuples. + candidate edges (with optional weights) to choose from + + weight : string + key to use to find weights if avail is a set of 3-tuples where the + third item in each tuple is a dictionary. + + Yields + ------ + edge : tuple + Edges in the subset of avail chosen to bridge augment G. + + Notes + ----- + Finding a weighted 2-edge-augmentation is NP-hard. + Any edge not in ``avail`` is considered to have a weight of infinity. + The approximation factor is 2 if ``G`` is connected and 3 if it is not. + Runs in :math:`O(m + n log(n))` time + + References + ---------- + .. [1] Khuller, Samir, and Ramakrishna Thurimella. (1993) Approximation + algorithms for graph augmentation. + http://www.sciencedirect.com/science/article/pii/S0196677483710102 + + See Also + -------- + :func:`bridge_augmentation` + :func:`k_edge_augmentation` + + Examples + -------- + >>> G = nx.path_graph((1, 2, 3, 4)) + >>> # When the weights are equal, (1, 4) is the best + >>> avail = [(1, 4, 1), (1, 3, 1), (2, 4, 1)] + >>> sorted(weighted_bridge_augmentation(G, avail)) + [(1, 4)] + >>> # Giving (1, 4) a high weight makes the two edge solution the best. + >>> avail = [(1, 4, 1000), (1, 3, 1), (2, 4, 1)] + >>> sorted(weighted_bridge_augmentation(G, avail)) + [(1, 3), (2, 4)] + >>> # ------ + >>> G = nx.path_graph((1, 2, 3, 4)) + >>> G.add_node(5) + >>> avail = [(1, 5, 11), (2, 5, 10), (4, 3, 1), (4, 5, 1)] + >>> sorted(weighted_bridge_augmentation(G, avail=avail)) + [(1, 5), (4, 5)] + >>> avail = [(1, 5, 11), (2, 5, 10), (4, 3, 1), (4, 5, 51)] + >>> sorted(weighted_bridge_augmentation(G, avail=avail)) + [(1, 5), (2, 5), (4, 5)] + """ + + if weight is None: + weight = "weight" + + # If input G is not connected the approximation factor increases to 3 + if not nx.is_connected(G): + H = G.copy() + connectors = list(one_edge_augmentation(H, avail=avail, weight=weight)) + H.add_edges_from(connectors) + + yield from connectors + else: + connectors = [] + H = G + + if len(avail) == 0: + if nx.has_bridges(H): + raise nx.NetworkXUnfeasible("no augmentation possible") + + avail_uv, avail_w = _unpack_available_edges(avail, weight=weight, G=H) + + # Collapse input into a metagraph. Meta nodes are bridge-ccs + bridge_ccs = nx.connectivity.bridge_components(H) + C = collapse(H, bridge_ccs) + + # Use the meta graph to shrink avail to a small feasible subset + mapping = C.graph["mapping"] + # Choose the minimum weight feasible edge in each group + meta_to_wuv = { + (mu, mv): (w, uv) + for (mu, mv), uv, w in _lightest_meta_edges(mapping, avail_uv, avail_w) + } + + # Mapping of terms from (Khuller and Thurimella): + # C : G_0 = (V, E^0) + # This is the metagraph where each node is a 2-edge-cc in G. + # The edges in C represent bridges in the original graph. + # (mu, mv) : E - E^0 # they group both avail and given edges in E + # T : \Gamma + # D : G^D = (V, E_D) + + # The paper uses ancestor because children point to parents, which is + # contrary to networkx standards. So, we actually need to run + # nx.least_common_ancestor on the reversed Tree. + + # Pick an arbitrary leaf from C as the root + try: + root = next(n for n, d in C.degree() if d == 1) + except StopIteration: # no nodes found with degree == 1 + return + # Root C into a tree TR by directing all edges away from the root + # Note in their paper T directs edges towards the root + TR = nx.dfs_tree(C, root) + + # Add to D the directed edges of T and set their weight to zero + # This indicates that it costs nothing to use edges that were given. + D = nx.reverse(TR).copy() + + nx.set_edge_attributes(D, name="weight", values=0) + + # The LCA of mu and mv in T is the shared ancestor of mu and mv that is + # located farthest from the root. + lca_gen = nx.tree_all_pairs_lowest_common_ancestor( + TR, root=root, pairs=meta_to_wuv.keys() + ) + + for (mu, mv), lca in lca_gen: + w, uv = meta_to_wuv[(mu, mv)] + if lca == mu: + # If u is an ancestor of v in TR, then add edge u->v to D + D.add_edge(lca, mv, weight=w, generator=uv) + elif lca == mv: + # If v is an ancestor of u in TR, then add edge v->u to D + D.add_edge(lca, mu, weight=w, generator=uv) + else: + # If neither u nor v is a ancestor of the other in TR + # let t = lca(TR, u, v) and add edges t->u and t->v + # Track the original edge that GENERATED these edges. + D.add_edge(lca, mu, weight=w, generator=uv) + D.add_edge(lca, mv, weight=w, generator=uv) + + # Then compute a minimum rooted branching + try: + # Note the original edges must be directed towards to root for the + # branching to give us a bridge-augmentation. + A = _minimum_rooted_branching(D, root) + except nx.NetworkXException as err: + # If there is no branching then augmentation is not possible + raise nx.NetworkXUnfeasible("no 2-edge-augmentation possible") from err + + # For each edge e, in the branching that did not belong to the directed + # tree T, add the corresponding edge that **GENERATED** it (this is not + # necessarily e itself!) + + # ensure the third case does not generate edges twice + bridge_connectors = set() + for mu, mv in A.edges(): + data = D.get_edge_data(mu, mv) + if "generator" in data: + # Add the avail edge that generated the branching edge. + edge = data["generator"] + bridge_connectors.add(edge) + + yield from bridge_connectors + + +def _minimum_rooted_branching(D, root): + """Helper function to compute a minimum rooted branching (aka rooted + arborescence) + + Before the branching can be computed, the directed graph must be rooted by + removing the predecessors of root. + + A branching / arborescence of rooted graph G is a subgraph that contains a + directed path from the root to every other vertex. It is the directed + analog of the minimum spanning tree problem. + + References + ---------- + [1] Khuller, Samir (2002) Advanced Algorithms Lecture 24 Notes. + https://web.archive.org/web/20121030033722/https://www.cs.umd.edu/class/spring2011/cmsc651/lec07.pdf + """ + rooted = D.copy() + # root the graph by removing all predecessors to `root`. + rooted.remove_edges_from([(u, root) for u in D.predecessors(root)]) + # Then compute the branching / arborescence. + A = nx.minimum_spanning_arborescence(rooted) + return A + + +@nx._dispatchable(returns_graph=True) +def collapse(G, grouped_nodes): + """Collapses each group of nodes into a single node. + + This is similar to condensation, but works on undirected graphs. + + Parameters + ---------- + G : NetworkX Graph + + grouped_nodes: list or generator + Grouping of nodes to collapse. The grouping must be disjoint. + If grouped_nodes are strongly_connected_components then this is + equivalent to :func:`condensation`. + + Returns + ------- + C : NetworkX Graph + The collapsed graph C of G with respect to the node grouping. The node + labels are integers corresponding to the index of the component in the + list of grouped_nodes. C has a graph attribute named 'mapping' with a + dictionary mapping the original nodes to the nodes in C to which they + belong. Each node in C also has a node attribute 'members' with the set + of original nodes in G that form the group that the node in C + represents. + + Examples + -------- + >>> # Collapses a graph using disjoint groups, but not necessarily connected + >>> G = nx.Graph([(1, 0), (2, 3), (3, 1), (3, 4), (4, 5), (5, 6), (5, 7)]) + >>> G.add_node("A") + >>> grouped_nodes = [{0, 1, 2, 3}, {5, 6, 7}] + >>> C = collapse(G, grouped_nodes) + >>> members = nx.get_node_attributes(C, "members") + >>> sorted(members.keys()) + [0, 1, 2, 3] + >>> member_values = set(map(frozenset, members.values())) + >>> assert {0, 1, 2, 3} in member_values + >>> assert {4} in member_values + >>> assert {5, 6, 7} in member_values + >>> assert {"A"} in member_values + """ + mapping = {} + members = {} + C = G.__class__() + i = 0 # required if G is empty + remaining = set(G.nodes()) + for i, group in enumerate(grouped_nodes): + group = set(group) + assert remaining.issuperset(group), ( + "grouped nodes must exist in G and be disjoint" + ) + remaining.difference_update(group) + members[i] = group + mapping.update((n, i) for n in group) + # remaining nodes are in their own group + for i, node in enumerate(remaining, start=i + 1): + group = {node} + members[i] = group + mapping.update((n, i) for n in group) + number_of_groups = i + 1 + C.add_nodes_from(range(number_of_groups)) + C.add_edges_from( + (mapping[u], mapping[v]) for u, v in G.edges() if mapping[u] != mapping[v] + ) + # Add a list of members (ie original nodes) to each node (ie scc) in C. + nx.set_node_attributes(C, name="members", values=members) + # Add mapping dict as graph attribute + C.graph["mapping"] = mapping + return C + + +@nx._dispatchable +def complement_edges(G): + """Returns only the edges in the complement of G + + Parameters + ---------- + G : NetworkX Graph + + Yields + ------ + edge : tuple + Edges in the complement of G + + Examples + -------- + >>> G = nx.path_graph((1, 2, 3, 4)) + >>> sorted(complement_edges(G)) + [(1, 3), (1, 4), (2, 4)] + >>> G = nx.path_graph((1, 2, 3, 4), nx.DiGraph()) + >>> sorted(complement_edges(G)) + [(1, 3), (1, 4), (2, 1), (2, 4), (3, 1), (3, 2), (4, 1), (4, 2), (4, 3)] + >>> G = nx.complete_graph(1000) + >>> sorted(complement_edges(G)) + [] + """ + G_adj = G._adj # Store as a variable to eliminate attribute lookup + if G.is_directed(): + for u, v in it.combinations(G.nodes(), 2): + if v not in G_adj[u]: + yield (u, v) + if u not in G_adj[v]: + yield (v, u) + else: + for u, v in it.combinations(G.nodes(), 2): + if v not in G_adj[u]: + yield (u, v) + + +def _compat_shuffle(rng, input): + """wrapper around rng.shuffle for python 2 compatibility reasons""" + rng.shuffle(input) + + +@not_implemented_for("multigraph") +@not_implemented_for("directed") +@py_random_state(4) +@nx._dispatchable +def greedy_k_edge_augmentation(G, k, avail=None, weight=None, seed=None): + """Greedy algorithm for finding a k-edge-augmentation + + Parameters + ---------- + G : NetworkX graph + An undirected graph. + + k : integer + Desired edge connectivity + + avail : dict or a set of 2 or 3 tuples + For more details, see :func:`k_edge_augmentation`. + + weight : string + key to use to find weights if ``avail`` is a set of 3-tuples. + For more details, see :func:`k_edge_augmentation`. + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Yields + ------ + edge : tuple + Edges in the greedy augmentation of G + + Notes + ----- + The algorithm is simple. Edges are incrementally added between parts of the + graph that are not yet locally k-edge-connected. Then edges are from the + augmenting set are pruned as long as local-edge-connectivity is not broken. + + This algorithm is greedy and does not provide optimality guarantees. It + exists only to provide :func:`k_edge_augmentation` with the ability to + generate a feasible solution for arbitrary k. + + See Also + -------- + :func:`k_edge_augmentation` + + Examples + -------- + >>> G = nx.path_graph((1, 2, 3, 4, 5, 6, 7)) + >>> sorted(greedy_k_edge_augmentation(G, k=2)) + [(1, 7)] + >>> sorted(greedy_k_edge_augmentation(G, k=1, avail=[])) + [] + >>> G = nx.path_graph((1, 2, 3, 4, 5, 6, 7)) + >>> avail = {(u, v): 1 for (u, v) in complement_edges(G)} + >>> # randomized pruning process can produce different solutions + >>> sorted(greedy_k_edge_augmentation(G, k=4, avail=avail, seed=2)) + [(1, 3), (1, 4), (1, 5), (1, 6), (1, 7), (2, 4), (2, 6), (3, 7), (5, 7)] + >>> sorted(greedy_k_edge_augmentation(G, k=4, avail=avail, seed=3)) + [(1, 3), (1, 5), (1, 6), (2, 4), (2, 6), (3, 7), (4, 7), (5, 7)] + """ + # Result set + aug_edges = [] + + done = is_k_edge_connected(G, k) + if done: + return + if avail is None: + # all edges are available + avail_uv = list(complement_edges(G)) + avail_w = [1] * len(avail_uv) + else: + # Get the unique set of unweighted edges + avail_uv, avail_w = _unpack_available_edges(avail, weight=weight, G=G) + + # Greedy: order lightest edges. Use degree sum to tie-break + tiebreaker = [sum(map(G.degree, uv)) for uv in avail_uv] + avail_wduv = sorted(zip(avail_w, tiebreaker, avail_uv)) + avail_uv = [uv for w, d, uv in avail_wduv] + + # Incrementally add edges in until we are k-connected + H = G.copy() + for u, v in avail_uv: + done = False + if not is_locally_k_edge_connected(H, u, v, k=k): + # Only add edges in parts that are not yet locally k-edge-connected + aug_edges.append((u, v)) + H.add_edge(u, v) + # Did adding this edge help? + if H.degree(u) >= k and H.degree(v) >= k: + done = is_k_edge_connected(H, k) + if done: + break + + # Check for feasibility + if not done: + raise nx.NetworkXUnfeasible("not able to k-edge-connect with available edges") + + # Randomized attempt to reduce the size of the solution + _compat_shuffle(seed, aug_edges) + for u, v in list(aug_edges): + # Don't remove if we know it would break connectivity + if H.degree(u) <= k or H.degree(v) <= k: + continue + H.remove_edge(u, v) + aug_edges.remove((u, v)) + if not is_k_edge_connected(H, k=k): + # If removing this edge breaks feasibility, undo + H.add_edge(u, v) + aug_edges.append((u, v)) + + # Generate results + yield from aug_edges diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/edge_kcomponents.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/edge_kcomponents.py new file mode 100644 index 0000000..96886f2 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/edge_kcomponents.py @@ -0,0 +1,592 @@ +""" +Algorithms for finding k-edge-connected components and subgraphs. + +A k-edge-connected component (k-edge-cc) is a maximal set of nodes in G, such +that all pairs of node have an edge-connectivity of at least k. + +A k-edge-connected subgraph (k-edge-subgraph) is a maximal set of nodes in G, +such that the subgraph of G defined by the nodes has an edge-connectivity at +least k. +""" + +import itertools as it +from functools import partial + +import networkx as nx +from networkx.utils import arbitrary_element, not_implemented_for + +__all__ = [ + "k_edge_components", + "k_edge_subgraphs", + "bridge_components", + "EdgeComponentAuxGraph", +] + + +@not_implemented_for("multigraph") +@nx._dispatchable +def k_edge_components(G, k): + """Generates nodes in each maximal k-edge-connected component in G. + + Parameters + ---------- + G : NetworkX graph + + k : Integer + Desired edge connectivity + + Returns + ------- + k_edge_components : a generator of k-edge-ccs. Each set of returned nodes + will have k-edge-connectivity in the graph G. + + See Also + -------- + :func:`local_edge_connectivity` + :func:`k_edge_subgraphs` : similar to this function, but the subgraph + defined by the nodes must also have k-edge-connectivity. + :func:`k_components` : similar to this function, but uses node-connectivity + instead of edge-connectivity + + Raises + ------ + NetworkXNotImplemented + If the input graph is a multigraph. + + ValueError: + If k is less than 1 + + Notes + ----- + Attempts to use the most efficient implementation available based on k. + If k=1, this is simply connected components for directed graphs and + connected components for undirected graphs. + If k=2 on an efficient bridge connected component algorithm from _[1] is + run based on the chain decomposition. + Otherwise, the algorithm from _[2] is used. + + Examples + -------- + >>> import itertools as it + >>> from networkx.utils import pairwise + >>> paths = [ + ... (1, 2, 4, 3, 1, 4), + ... (5, 6, 7, 8, 5, 7, 8, 6), + ... ] + >>> G = nx.Graph() + >>> G.add_nodes_from(it.chain(*paths)) + >>> G.add_edges_from(it.chain(*[pairwise(path) for path in paths])) + >>> # note this returns {1, 4} unlike k_edge_subgraphs + >>> sorted(map(sorted, nx.k_edge_components(G, k=3))) + [[1, 4], [2], [3], [5, 6, 7, 8]] + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Bridge_%28graph_theory%29 + .. [2] Wang, Tianhao, et al. (2015) A simple algorithm for finding all + k-edge-connected components. + http://journals.plos.org/plosone/article?id=10.1371/journal.pone.0136264 + """ + # Compute k-edge-ccs using the most efficient algorithms available. + if k < 1: + raise ValueError("k cannot be less than 1") + if G.is_directed(): + if k == 1: + return nx.strongly_connected_components(G) + else: + # TODO: investigate https://arxiv.org/abs/1412.6466 for k=2 + aux_graph = EdgeComponentAuxGraph.construct(G) + return aux_graph.k_edge_components(k) + else: + if k == 1: + return nx.connected_components(G) + elif k == 2: + return bridge_components(G) + else: + aux_graph = EdgeComponentAuxGraph.construct(G) + return aux_graph.k_edge_components(k) + + +@not_implemented_for("multigraph") +@nx._dispatchable +def k_edge_subgraphs(G, k): + """Generates nodes in each maximal k-edge-connected subgraph in G. + + Parameters + ---------- + G : NetworkX graph + + k : Integer + Desired edge connectivity + + Returns + ------- + k_edge_subgraphs : a generator of k-edge-subgraphs + Each k-edge-subgraph is a maximal set of nodes that defines a subgraph + of G that is k-edge-connected. + + See Also + -------- + :func:`edge_connectivity` + :func:`k_edge_components` : similar to this function, but nodes only + need to have k-edge-connectivity within the graph G and the subgraphs + might not be k-edge-connected. + + Raises + ------ + NetworkXNotImplemented + If the input graph is a multigraph. + + ValueError: + If k is less than 1 + + Notes + ----- + Attempts to use the most efficient implementation available based on k. + If k=1, or k=2 and the graph is undirected, then this simply calls + `k_edge_components`. Otherwise the algorithm from _[1] is used. + + Examples + -------- + >>> import itertools as it + >>> from networkx.utils import pairwise + >>> paths = [ + ... (1, 2, 4, 3, 1, 4), + ... (5, 6, 7, 8, 5, 7, 8, 6), + ... ] + >>> G = nx.Graph() + >>> G.add_nodes_from(it.chain(*paths)) + >>> G.add_edges_from(it.chain(*[pairwise(path) for path in paths])) + >>> # note this does not return {1, 4} unlike k_edge_components + >>> sorted(map(sorted, nx.k_edge_subgraphs(G, k=3))) + [[1], [2], [3], [4], [5, 6, 7, 8]] + + References + ---------- + .. [1] Zhou, Liu, et al. (2012) Finding maximal k-edge-connected subgraphs + from a large graph. ACM International Conference on Extending Database + Technology 2012 480-–491. + https://openproceedings.org/2012/conf/edbt/ZhouLYLCL12.pdf + """ + if k < 1: + raise ValueError("k cannot be less than 1") + if G.is_directed(): + if k <= 1: + # For directed graphs , + # When k == 1, k-edge-ccs and k-edge-subgraphs are the same + return k_edge_components(G, k) + else: + return _k_edge_subgraphs_nodes(G, k) + else: + if k <= 2: + # For undirected graphs, + # when k <= 2, k-edge-ccs and k-edge-subgraphs are the same + return k_edge_components(G, k) + else: + return _k_edge_subgraphs_nodes(G, k) + + +def _k_edge_subgraphs_nodes(G, k): + """Helper to get the nodes from the subgraphs. + + This allows k_edge_subgraphs to return a generator. + """ + for C in general_k_edge_subgraphs(G, k): + yield set(C.nodes()) + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatchable +def bridge_components(G): + """Finds all bridge-connected components G. + + Parameters + ---------- + G : NetworkX undirected graph + + Returns + ------- + bridge_components : a generator of 2-edge-connected components + + + See Also + -------- + :func:`k_edge_subgraphs` : this function is a special case for an + undirected graph where k=2. + :func:`biconnected_components` : similar to this function, but is defined + using 2-node-connectivity instead of 2-edge-connectivity. + + Raises + ------ + NetworkXNotImplemented + If the input graph is directed or a multigraph. + + Notes + ----- + Bridge-connected components are also known as 2-edge-connected components. + + Examples + -------- + >>> # The barbell graph with parameter zero has a single bridge + >>> G = nx.barbell_graph(5, 0) + >>> from networkx.algorithms.connectivity.edge_kcomponents import bridge_components + >>> sorted(map(sorted, bridge_components(G))) + [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]] + """ + H = G.copy() + H.remove_edges_from(nx.bridges(G)) + yield from nx.connected_components(H) + + +class EdgeComponentAuxGraph: + r"""A simple algorithm to find all k-edge-connected components in a graph. + + Constructing the auxiliary graph (which may take some time) allows for the + k-edge-ccs to be found in linear time for arbitrary k. + + Notes + ----- + This implementation is based on [1]_. The idea is to construct an auxiliary + graph from which the k-edge-ccs can be extracted in linear time. The + auxiliary graph is constructed in $O(|V|\cdot F)$ operations, where F is the + complexity of max flow. Querying the components takes an additional $O(|V|)$ + operations. This algorithm can be slow for large graphs, but it handles an + arbitrary k and works for both directed and undirected inputs. + + The undirected case for k=1 is exactly connected components. + The undirected case for k=2 is exactly bridge connected components. + The directed case for k=1 is exactly strongly connected components. + + References + ---------- + .. [1] Wang, Tianhao, et al. (2015) A simple algorithm for finding all + k-edge-connected components. + http://journals.plos.org/plosone/article?id=10.1371/journal.pone.0136264 + + Examples + -------- + >>> import itertools as it + >>> from networkx.utils import pairwise + >>> from networkx.algorithms.connectivity import EdgeComponentAuxGraph + >>> # Build an interesting graph with multiple levels of k-edge-ccs + >>> paths = [ + ... (1, 2, 3, 4, 1, 3, 4, 2), # a 3-edge-cc (a 4 clique) + ... (5, 6, 7, 5), # a 2-edge-cc (a 3 clique) + ... (1, 5), # combine first two ccs into a 1-edge-cc + ... (0,), # add an additional disconnected 1-edge-cc + ... ] + >>> G = nx.Graph() + >>> G.add_nodes_from(it.chain(*paths)) + >>> G.add_edges_from(it.chain(*[pairwise(path) for path in paths])) + >>> # Constructing the AuxGraph takes about O(n ** 4) + >>> aux_graph = EdgeComponentAuxGraph.construct(G) + >>> # Once constructed, querying takes O(n) + >>> sorted(map(sorted, aux_graph.k_edge_components(k=1))) + [[0], [1, 2, 3, 4, 5, 6, 7]] + >>> sorted(map(sorted, aux_graph.k_edge_components(k=2))) + [[0], [1, 2, 3, 4], [5, 6, 7]] + >>> sorted(map(sorted, aux_graph.k_edge_components(k=3))) + [[0], [1, 2, 3, 4], [5], [6], [7]] + >>> sorted(map(sorted, aux_graph.k_edge_components(k=4))) + [[0], [1], [2], [3], [4], [5], [6], [7]] + + The auxiliary graph is primarily used for k-edge-ccs but it + can also speed up the queries of k-edge-subgraphs by refining the + search space. + + >>> import itertools as it + >>> from networkx.utils import pairwise + >>> from networkx.algorithms.connectivity import EdgeComponentAuxGraph + >>> paths = [ + ... (1, 2, 4, 3, 1, 4), + ... ] + >>> G = nx.Graph() + >>> G.add_nodes_from(it.chain(*paths)) + >>> G.add_edges_from(it.chain(*[pairwise(path) for path in paths])) + >>> aux_graph = EdgeComponentAuxGraph.construct(G) + >>> sorted(map(sorted, aux_graph.k_edge_subgraphs(k=3))) + [[1], [2], [3], [4]] + >>> sorted(map(sorted, aux_graph.k_edge_components(k=3))) + [[1, 4], [2], [3]] + """ + + # @not_implemented_for('multigraph') # TODO: fix decor for classmethods + @classmethod + def construct(EdgeComponentAuxGraph, G): + """Builds an auxiliary graph encoding edge-connectivity between nodes. + + Notes + ----- + Given G=(V, E), initialize an empty auxiliary graph A. + Choose an arbitrary source node s. Initialize a set N of available + nodes (that can be used as the sink). The algorithm picks an + arbitrary node t from N - {s}, and then computes the minimum st-cut + (S, T) with value w. If G is directed the minimum of the st-cut or + the ts-cut is used instead. Then, the edge (s, t) is added to the + auxiliary graph with weight w. The algorithm is called recursively + first using S as the available nodes and s as the source, and then + using T and t. Recursion stops when the source is the only available + node. + + Parameters + ---------- + G : NetworkX graph + """ + # workaround for classmethod decorator + not_implemented_for("multigraph")(lambda G: G)(G) + + def _recursive_build(H, A, source, avail): + # Terminate once the flow has been compute to every node. + if {source} == avail: + return + # pick an arbitrary node as the sink + sink = arbitrary_element(avail - {source}) + # find the minimum cut and its weight + value, (S, T) = nx.minimum_cut(H, source, sink) + if H.is_directed(): + # check if the reverse direction has a smaller cut + value_, (T_, S_) = nx.minimum_cut(H, sink, source) + if value_ < value: + value, S, T = value_, S_, T_ + # add edge with weight of cut to the aux graph + A.add_edge(source, sink, weight=value) + # recursively call until all but one node is used + _recursive_build(H, A, source, avail.intersection(S)) + _recursive_build(H, A, sink, avail.intersection(T)) + + # Copy input to ensure all edges have unit capacity + H = G.__class__() + H.add_nodes_from(G.nodes()) + H.add_edges_from(G.edges(), capacity=1) + + # A is the auxiliary graph to be constructed + # It is a weighted undirected tree + A = nx.Graph() + + # Pick an arbitrary node as the source + if H.number_of_nodes() > 0: + source = arbitrary_element(H.nodes()) + # Initialize a set of elements that can be chosen as the sink + avail = set(H.nodes()) + + # This constructs A + _recursive_build(H, A, source, avail) + + # This class is a container the holds the auxiliary graph A and + # provides access the k_edge_components function. + self = EdgeComponentAuxGraph() + self.A = A + self.H = H + return self + + def k_edge_components(self, k): + """Queries the auxiliary graph for k-edge-connected components. + + Parameters + ---------- + k : Integer + Desired edge connectivity + + Returns + ------- + k_edge_components : a generator of k-edge-ccs + + Notes + ----- + Given the auxiliary graph, the k-edge-connected components can be + determined in linear time by removing all edges with weights less than + k from the auxiliary graph. The resulting connected components are the + k-edge-ccs in the original graph. + """ + if k < 1: + raise ValueError("k cannot be less than 1") + A = self.A + # "traverse the auxiliary graph A and delete all edges with weights less + # than k" + aux_weights = nx.get_edge_attributes(A, "weight") + # Create a relevant graph with the auxiliary edges with weights >= k + R = nx.Graph() + R.add_nodes_from(A.nodes()) + R.add_edges_from(e for e, w in aux_weights.items() if w >= k) + + # Return the nodes that are k-edge-connected in the original graph + yield from nx.connected_components(R) + + def k_edge_subgraphs(self, k): + """Queries the auxiliary graph for k-edge-connected subgraphs. + + Parameters + ---------- + k : Integer + Desired edge connectivity + + Returns + ------- + k_edge_subgraphs : a generator of k-edge-subgraphs + + Notes + ----- + Refines the k-edge-ccs into k-edge-subgraphs. The running time is more + than $O(|V|)$. + + For single values of k it is faster to use `nx.k_edge_subgraphs`. + But for multiple values of k, it can be faster to build AuxGraph and + then use this method. + """ + if k < 1: + raise ValueError("k cannot be less than 1") + H = self.H + A = self.A + # "traverse the auxiliary graph A and delete all edges with weights less + # than k" + aux_weights = nx.get_edge_attributes(A, "weight") + # Create a relevant graph with the auxiliary edges with weights >= k + R = nx.Graph() + R.add_nodes_from(A.nodes()) + R.add_edges_from(e for e, w in aux_weights.items() if w >= k) + + # Return the components whose subgraphs are k-edge-connected + for cc in nx.connected_components(R): + if len(cc) < k: + # Early return optimization + for node in cc: + yield {node} + else: + # Call subgraph solution to refine the results + C = H.subgraph(cc) + yield from k_edge_subgraphs(C, k) + + +def _low_degree_nodes(G, k, nbunch=None): + """Helper for finding nodes with degree less than k.""" + # Nodes with degree less than k cannot be k-edge-connected. + if G.is_directed(): + # Consider both in and out degree in the directed case + seen = set() + for node, degree in G.out_degree(nbunch): + if degree < k: + seen.add(node) + yield node + for node, degree in G.in_degree(nbunch): + if node not in seen and degree < k: + seen.add(node) + yield node + else: + # Only the degree matters in the undirected case + for node, degree in G.degree(nbunch): + if degree < k: + yield node + + +def _high_degree_components(G, k): + """Helper for filtering components that can't be k-edge-connected. + + Removes and generates each node with degree less than k. Then generates + remaining components where all nodes have degree at least k. + """ + # Iteratively remove parts of the graph that are not k-edge-connected + H = G.copy() + singletons = set(_low_degree_nodes(H, k)) + while singletons: + # Only search neighbors of removed nodes + nbunch = set(it.chain.from_iterable(map(H.neighbors, singletons))) + nbunch.difference_update(singletons) + H.remove_nodes_from(singletons) + for node in singletons: + yield {node} + singletons = set(_low_degree_nodes(H, k, nbunch)) + + # Note: remaining connected components may not be k-edge-connected + if G.is_directed(): + yield from nx.strongly_connected_components(H) + else: + yield from nx.connected_components(H) + + +@nx._dispatchable(returns_graph=True) +def general_k_edge_subgraphs(G, k): + """General algorithm to find all maximal k-edge-connected subgraphs in `G`. + + Parameters + ---------- + G : nx.Graph + Graph in which all maximal k-edge-connected subgraphs will be found. + + k : int + + Yields + ------ + k_edge_subgraphs : Graph instances that are k-edge-subgraphs + Each k-edge-subgraph contains a maximal set of nodes that defines a + subgraph of `G` that is k-edge-connected. + + Notes + ----- + Implementation of the basic algorithm from [1]_. The basic idea is to find + a global minimum cut of the graph. If the cut value is at least k, then the + graph is a k-edge-connected subgraph and can be added to the results. + Otherwise, the cut is used to split the graph in two and the procedure is + applied recursively. If the graph is just a single node, then it is also + added to the results. At the end, each result is either guaranteed to be + a single node or a subgraph of G that is k-edge-connected. + + This implementation contains optimizations for reducing the number of calls + to max-flow, but there are other optimizations in [1]_ that could be + implemented. + + References + ---------- + .. [1] Zhou, Liu, et al. (2012) Finding maximal k-edge-connected subgraphs + from a large graph. ACM International Conference on Extending Database + Technology 2012 480-–491. + https://openproceedings.org/2012/conf/edbt/ZhouLYLCL12.pdf + + Examples + -------- + >>> from networkx.utils import pairwise + >>> paths = [ + ... (11, 12, 13, 14, 11, 13, 14, 12), # a 4-clique + ... (21, 22, 23, 24, 21, 23, 24, 22), # another 4-clique + ... # connect the cliques with high degree but low connectivity + ... (50, 13), + ... (12, 50, 22), + ... (13, 102, 23), + ... (14, 101, 24), + ... ] + >>> G = nx.Graph(it.chain(*[pairwise(path) for path in paths])) + >>> sorted(len(k_sg) for k_sg in k_edge_subgraphs(G, k=3)) + [1, 1, 1, 4, 4] + """ + if k < 1: + raise ValueError("k cannot be less than 1") + + # Node pruning optimization (incorporates early return) + # find_ccs is either connected_components/strongly_connected_components + find_ccs = partial(_high_degree_components, k=k) + + # Quick return optimization + if G.number_of_nodes() < k: + for node in G.nodes(): + yield G.subgraph([node]).copy() + return + + # Intermediate results + R0 = {G.subgraph(cc).copy() for cc in find_ccs(G)} + # Subdivide CCs in the intermediate results until they are k-conn + while R0: + G1 = R0.pop() + if G1.number_of_nodes() == 1: + yield G1 + else: + # Find a global minimum cut + cut_edges = nx.minimum_edge_cut(G1) + cut_value = len(cut_edges) + if cut_value < k: + # G1 is not k-edge-connected, so subdivide it + G1.remove_edges_from(cut_edges) + for cc in find_ccs(G1): + R0.add(G1.subgraph(cc).copy()) + else: + # Otherwise we found a k-edge-connected subgraph + yield G1 diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/kcomponents.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/kcomponents.py new file mode 100644 index 0000000..e2f1ba2 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/kcomponents.py @@ -0,0 +1,223 @@ +""" +Moody and White algorithm for k-components +""" + +from collections import defaultdict +from itertools import combinations +from operator import itemgetter + +import networkx as nx + +# Define the default maximum flow function. +from networkx.algorithms.flow import edmonds_karp +from networkx.utils import not_implemented_for + +default_flow_func = edmonds_karp + +__all__ = ["k_components"] + + +@not_implemented_for("directed") +@nx._dispatchable +def k_components(G, flow_func=None): + r"""Returns the k-component structure of a graph G. + + A `k`-component is a maximal subgraph of a graph G that has, at least, + node connectivity `k`: we need to remove at least `k` nodes to break it + into more components. `k`-components have an inherent hierarchical + structure because they are nested in terms of connectivity: a connected + graph can contain several 2-components, each of which can contain + one or more 3-components, and so forth. + + Parameters + ---------- + G : NetworkX graph + + flow_func : function + Function to perform the underlying flow computations. Default value + :meth:`edmonds_karp`. This function performs better in sparse graphs with + right tailed degree distributions. :meth:`shortest_augmenting_path` will + perform better in denser graphs. + + Returns + ------- + k_components : dict + Dictionary with all connectivity levels `k` in the input Graph as keys + and a list of sets of nodes that form a k-component of level `k` as + values. + + Raises + ------ + NetworkXNotImplemented + If the input graph is directed. + + Examples + -------- + >>> # Petersen graph has 10 nodes and it is triconnected, thus all + >>> # nodes are in a single component on all three connectivity levels + >>> G = nx.petersen_graph() + >>> k_components = nx.k_components(G) + + Notes + ----- + Moody and White [1]_ (appendix A) provide an algorithm for identifying + k-components in a graph, which is based on Kanevsky's algorithm [2]_ + for finding all minimum-size node cut-sets of a graph (implemented in + :meth:`all_node_cuts` function): + + 1. Compute node connectivity, k, of the input graph G. + + 2. Identify all k-cutsets at the current level of connectivity using + Kanevsky's algorithm. + + 3. Generate new graph components based on the removal of + these cutsets. Nodes in a cutset belong to both sides + of the induced cut. + + 4. If the graph is neither complete nor trivial, return to 1; + else end. + + This implementation also uses some heuristics (see [3]_ for details) + to speed up the computation. + + See also + -------- + node_connectivity + all_node_cuts + biconnected_components : special case of this function when k=2 + k_edge_components : similar to this function, but uses edge-connectivity + instead of node-connectivity + + References + ---------- + .. [1] Moody, J. and D. White (2003). Social cohesion and embeddedness: + A hierarchical conception of social groups. + American Sociological Review 68(1), 103--28. + http://www2.asanet.org/journals/ASRFeb03MoodyWhite.pdf + + .. [2] Kanevsky, A. (1993). Finding all minimum-size separating vertex + sets in a graph. Networks 23(6), 533--541. + http://onlinelibrary.wiley.com/doi/10.1002/net.3230230604/abstract + + .. [3] Torrents, J. and F. Ferraro (2015). Structural Cohesion: + Visualization and Heuristics for Fast Computation. + https://arxiv.org/pdf/1503.04476v1 + + """ + # Dictionary with connectivity level (k) as keys and a list of + # sets of nodes that form a k-component as values. Note that + # k-components can overlap (but only k - 1 nodes). + k_components = defaultdict(list) + # Define default flow function + if flow_func is None: + flow_func = default_flow_func + # Bicomponents as a base to check for higher order k-components + for component in nx.connected_components(G): + # isolated nodes have connectivity 0 + comp = set(component) + if len(comp) > 1: + k_components[1].append(comp) + bicomponents = [G.subgraph(c) for c in nx.biconnected_components(G)] + for bicomponent in bicomponents: + bicomp = set(bicomponent) + # avoid considering dyads as bicomponents + if len(bicomp) > 2: + k_components[2].append(bicomp) + for B in bicomponents: + if len(B) <= 2: + continue + k = nx.node_connectivity(B, flow_func=flow_func) + if k > 2: + k_components[k].append(set(B)) + # Perform cuts in a DFS like order. + cuts = list(nx.all_node_cuts(B, k=k, flow_func=flow_func)) + stack = [(k, _generate_partition(B, cuts, k))] + while stack: + (parent_k, partition) = stack[-1] + try: + nodes = next(partition) + C = B.subgraph(nodes) + this_k = nx.node_connectivity(C, flow_func=flow_func) + if this_k > parent_k and this_k > 2: + k_components[this_k].append(set(C)) + cuts = list(nx.all_node_cuts(C, k=this_k, flow_func=flow_func)) + if cuts: + stack.append((this_k, _generate_partition(C, cuts, this_k))) + except StopIteration: + stack.pop() + + # This is necessary because k-components may only be reported at their + # maximum k level. But we want to return a dictionary in which keys are + # connectivity levels and values list of sets of components, without + # skipping any connectivity level. Also, it's possible that subsets of + # an already detected k-component appear at a level k. Checking for this + # in the while loop above penalizes the common case. Thus we also have to + # _consolidate all connectivity levels in _reconstruct_k_components. + return _reconstruct_k_components(k_components) + + +def _consolidate(sets, k): + """Merge sets that share k or more elements. + + See: http://rosettacode.org/wiki/Set_consolidation + + The iterative python implementation posted there is + faster than this because of the overhead of building a + Graph and calling nx.connected_components, but it's not + clear for us if we can use it in NetworkX because there + is no licence for the code. + + """ + G = nx.Graph() + nodes = dict(enumerate(sets)) + G.add_nodes_from(nodes) + G.add_edges_from( + (u, v) for u, v in combinations(nodes, 2) if len(nodes[u] & nodes[v]) >= k + ) + for component in nx.connected_components(G): + yield set.union(*[nodes[n] for n in component]) + + +def _generate_partition(G, cuts, k): + def has_nbrs_in_partition(G, node, partition): + return any(n in partition for n in G[node]) + + components = [] + nodes = {n for n, d in G.degree() if d > k} - {n for cut in cuts for n in cut} + H = G.subgraph(nodes) + for cc in nx.connected_components(H): + component = set(cc) + for cut in cuts: + for node in cut: + if has_nbrs_in_partition(G, node, cc): + component.add(node) + if len(component) < G.order(): + components.append(component) + yield from _consolidate(components, k + 1) + + +def _reconstruct_k_components(k_comps): + result = {} + max_k = max(k_comps) + for k in reversed(range(1, max_k + 1)): + if k == max_k: + result[k] = list(_consolidate(k_comps[k], k)) + elif k not in k_comps: + result[k] = list(_consolidate(result[k + 1], k)) + else: + nodes_at_k = set.union(*k_comps[k]) + to_add = [c for c in result[k + 1] if any(n not in nodes_at_k for n in c)] + if to_add: + result[k] = list(_consolidate(k_comps[k] + to_add, k)) + else: + result[k] = list(_consolidate(k_comps[k], k)) + return result + + +def build_k_number_dict(kcomps): + result = {} + for k, comps in sorted(kcomps.items(), key=itemgetter(0)): + for comp in comps: + for node in comp: + result[node] = k + return result diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/kcutsets.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/kcutsets.py new file mode 100644 index 0000000..de26f4c --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/kcutsets.py @@ -0,0 +1,235 @@ +""" +Kanevsky all minimum node k cutsets algorithm. +""" + +import copy +from collections import defaultdict +from itertools import combinations +from operator import itemgetter + +import networkx as nx +from networkx.algorithms.flow import ( + build_residual_network, + edmonds_karp, + shortest_augmenting_path, +) + +from .utils import build_auxiliary_node_connectivity + +default_flow_func = edmonds_karp + + +__all__ = ["all_node_cuts"] + + +@nx._dispatchable +def all_node_cuts(G, k=None, flow_func=None): + r"""Returns all minimum k cutsets of an undirected graph G. + + This implementation is based on Kanevsky's algorithm [1]_ for finding all + minimum-size node cut-sets of an undirected graph G; ie the set (or sets) + of nodes of cardinality equal to the node connectivity of G. Thus if + removed, would break G into two or more connected components. + + Parameters + ---------- + G : NetworkX graph + Undirected graph + + k : Integer + Node connectivity of the input graph. If k is None, then it is + computed. Default value: None. + + flow_func : function + Function to perform the underlying flow computations. Default value is + :func:`~networkx.algorithms.flow.edmonds_karp`. This function performs + better in sparse graphs with right tailed degree distributions. + :func:`~networkx.algorithms.flow.shortest_augmenting_path` will + perform better in denser graphs. + + + Returns + ------- + cuts : a generator of node cutsets + Each node cutset has cardinality equal to the node connectivity of + the input graph. + + Examples + -------- + >>> # A two-dimensional grid graph has 4 cutsets of cardinality 2 + >>> G = nx.grid_2d_graph(5, 5) + >>> cutsets = list(nx.all_node_cuts(G)) + >>> len(cutsets) + 4 + >>> all(2 == len(cutset) for cutset in cutsets) + True + >>> nx.node_connectivity(G) + 2 + + Notes + ----- + This implementation is based on the sequential algorithm for finding all + minimum-size separating vertex sets in a graph [1]_. The main idea is to + compute minimum cuts using local maximum flow computations among a set + of nodes of highest degree and all other non-adjacent nodes in the Graph. + Once we find a minimum cut, we add an edge between the high degree + node and the target node of the local maximum flow computation to make + sure that we will not find that minimum cut again. + + See also + -------- + node_connectivity + edmonds_karp + shortest_augmenting_path + + References + ---------- + .. [1] Kanevsky, A. (1993). Finding all minimum-size separating vertex + sets in a graph. Networks 23(6), 533--541. + http://onlinelibrary.wiley.com/doi/10.1002/net.3230230604/abstract + + """ + if not nx.is_connected(G): + raise nx.NetworkXError("Input graph is disconnected.") + + # Address some corner cases first. + # For complete Graphs + + if nx.density(G) == 1: + yield from () + return + + # Initialize data structures. + # Keep track of the cuts already computed so we do not repeat them. + seen = [] + # Even-Tarjan reduction is what we call auxiliary digraph + # for node connectivity. + H = build_auxiliary_node_connectivity(G) + H_nodes = H.nodes # for speed + mapping = H.graph["mapping"] + # Keep a copy of original predecessors, H will be modified later. + # Shallow copy is enough. + original_H_pred = copy.copy(H._pred) + R = build_residual_network(H, "capacity") + kwargs = {"capacity": "capacity", "residual": R} + # Define default flow function + if flow_func is None: + flow_func = default_flow_func + if flow_func is shortest_augmenting_path: + kwargs["two_phase"] = True + # Begin the actual algorithm + # step 1: Find node connectivity k of G + if k is None: + k = nx.node_connectivity(G, flow_func=flow_func) + # step 2: + # Find k nodes with top degree, call it X: + X = {n for n, d in sorted(G.degree(), key=itemgetter(1), reverse=True)[:k]} + # Check if X is a k-node-cutset + if _is_separating_set(G, X): + seen.append(X) + yield X + + for x in X: + # step 3: Compute local connectivity flow of x with all other + # non adjacent nodes in G + non_adjacent = set(G) - {x} - set(G[x]) + for v in non_adjacent: + # step 4: compute maximum flow in an Even-Tarjan reduction H of G + # and step 5: build the associated residual network R + R = flow_func(H, f"{mapping[x]}B", f"{mapping[v]}A", **kwargs) + flow_value = R.graph["flow_value"] + + if flow_value == k: + # Find the nodes incident to the flow. + E1 = flowed_edges = [ + (u, w) for (u, w, d) in R.edges(data=True) if d["flow"] != 0 + ] + VE1 = incident_nodes = {n for edge in E1 for n in edge} + # Remove saturated edges form the residual network. + # Note that reversed edges are introduced with capacity 0 + # in the residual graph and they need to be removed too. + saturated_edges = [ + (u, w, d) + for (u, w, d) in R.edges(data=True) + if d["capacity"] == d["flow"] or d["capacity"] == 0 + ] + R.remove_edges_from(saturated_edges) + R_closure = nx.transitive_closure(R) + # step 6: shrink the strongly connected components of + # residual flow network R and call it L. + L = nx.condensation(R) + cmap = L.graph["mapping"] + inv_cmap = defaultdict(list) + for n, scc in cmap.items(): + inv_cmap[scc].append(n) + # Find the incident nodes in the condensed graph. + VE1 = {cmap[n] for n in VE1} + # step 7: Compute all antichains of L; + # they map to closed sets in H. + # Any edge in H that links a closed set is part of a cutset. + for antichain in nx.antichains(L): + # Only antichains that are subsets of incident nodes counts. + # Lemma 8 in reference. + if not set(antichain).issubset(VE1): + continue + # Nodes in an antichain of the condensation graph of + # the residual network map to a closed set of nodes that + # define a node partition of the auxiliary digraph H + # through taking all of antichain's predecessors in the + # transitive closure. + S = set() + for scc in antichain: + S.update(inv_cmap[scc]) + S_ancestors = set() + for n in S: + S_ancestors.update(R_closure._pred[n]) + S.update(S_ancestors) + if f"{mapping[x]}B" not in S or f"{mapping[v]}A" in S: + continue + # Find the cutset that links the node partition (S,~S) in H + cutset = set() + for u in S: + cutset.update((u, w) for w in original_H_pred[u] if w not in S) + # The edges in H that form the cutset are internal edges + # (ie edges that represent a node of the original graph G) + if any(H_nodes[u]["id"] != H_nodes[w]["id"] for u, w in cutset): + continue + node_cut = {H_nodes[u]["id"] for u, _ in cutset} + + if len(node_cut) == k: + # The cut is invalid if it includes internal edges of + # end nodes. The other half of Lemma 8 in ref. + if x in node_cut or v in node_cut: + continue + if node_cut not in seen: + yield node_cut + seen.append(node_cut) + + # Add an edge (x, v) to make sure that we do not + # find this cutset again. This is equivalent + # of adding the edge in the input graph + # G.add_edge(x, v) and then regenerate H and R: + # Add edges to the auxiliary digraph. + # See build_residual_network for convention we used + # in residual graphs. + H.add_edge(f"{mapping[x]}B", f"{mapping[v]}A", capacity=1) + H.add_edge(f"{mapping[v]}B", f"{mapping[x]}A", capacity=1) + # Add edges to the residual network. + R.add_edge(f"{mapping[x]}B", f"{mapping[v]}A", capacity=1) + R.add_edge(f"{mapping[v]}A", f"{mapping[x]}B", capacity=0) + R.add_edge(f"{mapping[v]}B", f"{mapping[x]}A", capacity=1) + R.add_edge(f"{mapping[x]}A", f"{mapping[v]}B", capacity=0) + + # Add again the saturated edges to reuse the residual network + R.add_edges_from(saturated_edges) + + +def _is_separating_set(G, cut): + """Assumes that the input graph is connected""" + if len(cut) == len(G) - 1: + return True + + H = nx.restricted_view(G, cut, []) + if nx.is_connected(H): + return False + return True diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/stoerwagner.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/stoerwagner.py new file mode 100644 index 0000000..29604b1 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/stoerwagner.py @@ -0,0 +1,152 @@ +""" +Stoer-Wagner minimum cut algorithm. +""" + +from itertools import islice + +import networkx as nx + +from ...utils import BinaryHeap, arbitrary_element, not_implemented_for + +__all__ = ["stoer_wagner"] + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatchable(edge_attrs="weight") +def stoer_wagner(G, weight="weight", heap=BinaryHeap): + r"""Returns the weighted minimum edge cut using the Stoer-Wagner algorithm. + + Determine the minimum edge cut of a connected graph using the + Stoer-Wagner algorithm. In weighted cases, all weights must be + nonnegative. + + The running time of the algorithm depends on the type of heaps used: + + ============== ============================================= + Type of heap Running time + ============== ============================================= + Binary heap $O(n (m + n) \log n)$ + Fibonacci heap $O(nm + n^2 \log n)$ + Pairing heap $O(2^{2 \sqrt{\log \log n}} nm + n^2 \log n)$ + ============== ============================================= + + Parameters + ---------- + G : NetworkX graph + Edges of the graph are expected to have an attribute named by the + weight parameter below. If this attribute is not present, the edge is + considered to have unit weight. + + weight : string + Name of the weight attribute of the edges. If the attribute is not + present, unit weight is assumed. Default value: 'weight'. + + heap : class + Type of heap to be used in the algorithm. It should be a subclass of + :class:`MinHeap` or implement a compatible interface. + + If a stock heap implementation is to be used, :class:`BinaryHeap` is + recommended over :class:`PairingHeap` for Python implementations without + optimized attribute accesses (e.g., CPython) despite a slower + asymptotic running time. For Python implementations with optimized + attribute accesses (e.g., PyPy), :class:`PairingHeap` provides better + performance. Default value: :class:`BinaryHeap`. + + Returns + ------- + cut_value : integer or float + The sum of weights of edges in a minimum cut. + + partition : pair of node lists + A partitioning of the nodes that defines a minimum cut. + + Raises + ------ + NetworkXNotImplemented + If the graph is directed or a multigraph. + + NetworkXError + If the graph has less than two nodes, is not connected or has a + negative-weighted edge. + + Examples + -------- + >>> G = nx.Graph() + >>> G.add_edge("x", "a", weight=3) + >>> G.add_edge("x", "b", weight=1) + >>> G.add_edge("a", "c", weight=3) + >>> G.add_edge("b", "c", weight=5) + >>> G.add_edge("b", "d", weight=4) + >>> G.add_edge("d", "e", weight=2) + >>> G.add_edge("c", "y", weight=2) + >>> G.add_edge("e", "y", weight=3) + >>> cut_value, partition = nx.stoer_wagner(G) + >>> cut_value + 4 + """ + n = len(G) + if n < 2: + raise nx.NetworkXError("graph has less than two nodes.") + if not nx.is_connected(G): + raise nx.NetworkXError("graph is not connected.") + + # Make a copy of the graph for internal use. + G = nx.Graph( + (u, v, {"weight": e.get(weight, 1)}) for u, v, e in G.edges(data=True) if u != v + ) + G.__networkx_cache__ = None # Disable caching + + for u, v, e in G.edges(data=True): + if e["weight"] < 0: + raise nx.NetworkXError("graph has a negative-weighted edge.") + + cut_value = float("inf") + nodes = set(G) + contractions = [] # contracted node pairs + + # Repeatedly pick a pair of nodes to contract until only one node is left. + for i in range(n - 1): + # Pick an arbitrary node u and create a set A = {u}. + u = arbitrary_element(G) + A = {u} + # Repeatedly pick the node "most tightly connected" to A and add it to + # A. The tightness of connectivity of a node not in A is defined by the + # of edges connecting it to nodes in A. + h = heap() # min-heap emulating a max-heap + for v, e in G[u].items(): + h.insert(v, -e["weight"]) + # Repeat until all but one node has been added to A. + for j in range(n - i - 2): + u = h.pop()[0] + A.add(u) + for v, e in G[u].items(): + if v not in A: + h.insert(v, h.get(v, 0) - e["weight"]) + # A and the remaining node v define a "cut of the phase". There is a + # minimum cut of the original graph that is also a cut of the phase. + # Due to contractions in earlier phases, v may in fact represent + # multiple nodes in the original graph. + v, w = h.min() + w = -w + if w < cut_value: + cut_value = w + best_phase = i + # Contract v and the last node added to A. + contractions.append((u, v)) + for w, e in G[v].items(): + if w != u: + if w not in G[u]: + G.add_edge(u, w, weight=e["weight"]) + else: + G[u][w]["weight"] += e["weight"] + G.remove_node(v) + + # Recover the optimal partitioning from the contractions. + G = nx.Graph(islice(contractions, best_phase)) + v = contractions[best_phase][1] + G.add_node(v) + reachable = set(nx.single_source_shortest_path_length(G, v)) + partition = (list(reachable), list(nodes - reachable)) + + return cut_value, partition diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/tests/__init__.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/tests/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/tests/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..28b7cde Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/tests/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/tests/__pycache__/test_connectivity.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/tests/__pycache__/test_connectivity.cpython-312.pyc new file mode 100644 index 0000000..a3997b6 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/tests/__pycache__/test_connectivity.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/tests/__pycache__/test_cuts.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/tests/__pycache__/test_cuts.cpython-312.pyc new file mode 100644 index 0000000..9e0bdbb Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/tests/__pycache__/test_cuts.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/tests/__pycache__/test_disjoint_paths.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/tests/__pycache__/test_disjoint_paths.cpython-312.pyc new file mode 100644 index 0000000..2176140 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/tests/__pycache__/test_disjoint_paths.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/tests/__pycache__/test_edge_augmentation.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/tests/__pycache__/test_edge_augmentation.cpython-312.pyc new file mode 100644 index 0000000..e8d510e Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/tests/__pycache__/test_edge_augmentation.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/tests/__pycache__/test_edge_kcomponents.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/tests/__pycache__/test_edge_kcomponents.cpython-312.pyc new file mode 100644 index 0000000..922ec56 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/tests/__pycache__/test_edge_kcomponents.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/tests/__pycache__/test_kcomponents.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/tests/__pycache__/test_kcomponents.cpython-312.pyc new file mode 100644 index 0000000..f0d4bf2 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/tests/__pycache__/test_kcomponents.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/tests/__pycache__/test_kcutsets.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/tests/__pycache__/test_kcutsets.cpython-312.pyc new file mode 100644 index 0000000..9c0a4c6 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/tests/__pycache__/test_kcutsets.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/tests/__pycache__/test_stoer_wagner.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/tests/__pycache__/test_stoer_wagner.cpython-312.pyc new file mode 100644 index 0000000..f1551d7 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/tests/__pycache__/test_stoer_wagner.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/tests/test_connectivity.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/tests/test_connectivity.py new file mode 100644 index 0000000..7aef247 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/tests/test_connectivity.py @@ -0,0 +1,421 @@ +import itertools + +import pytest + +import networkx as nx +from networkx.algorithms import flow +from networkx.algorithms.connectivity import ( + local_edge_connectivity, + local_node_connectivity, +) + +flow_funcs = [ + flow.boykov_kolmogorov, + flow.dinitz, + flow.edmonds_karp, + flow.preflow_push, + flow.shortest_augmenting_path, +] + + +# helper functions for tests + + +def _generate_no_biconnected(max_attempts=50): + attempts = 0 + while True: + G = nx.fast_gnp_random_graph(100, 0.0575, seed=42) + if nx.is_connected(G) and not nx.is_biconnected(G): + attempts = 0 + yield G + else: + if attempts >= max_attempts: + msg = f"Tried {max_attempts} times: no suitable Graph." + raise Exception(msg) + else: + attempts += 1 + + +def test_average_connectivity(): + # figure 1 from: + # Beineke, L., O. Oellermann, and R. Pippert (2002). The average + # connectivity of a graph. Discrete mathematics 252(1-3), 31-45 + # http://www.sciencedirect.com/science/article/pii/S0012365X01001807 + G1 = nx.path_graph(3) + G1.add_edges_from([(1, 3), (1, 4)]) + G2 = nx.path_graph(3) + G2.add_edges_from([(1, 3), (1, 4), (0, 3), (0, 4), (3, 4)]) + G3 = nx.Graph() + for flow_func in flow_funcs: + kwargs = {"flow_func": flow_func} + errmsg = f"Assertion failed in function: {flow_func.__name__}" + assert nx.average_node_connectivity(G1, **kwargs) == 1, errmsg + assert nx.average_node_connectivity(G2, **kwargs) == 2.2, errmsg + assert nx.average_node_connectivity(G3, **kwargs) == 0, errmsg + + +def test_average_connectivity_directed(): + G = nx.DiGraph([(1, 3), (1, 4), (1, 5)]) + for flow_func in flow_funcs: + errmsg = f"Assertion failed in function: {flow_func.__name__}" + assert nx.average_node_connectivity(G) == 0.25, errmsg + + +def test_articulation_points(): + Ggen = _generate_no_biconnected() + for flow_func in flow_funcs: + for i in range(3): + G = next(Ggen) + errmsg = f"Assertion failed in function: {flow_func.__name__}" + assert nx.node_connectivity(G, flow_func=flow_func) == 1, errmsg + + +def test_brandes_erlebach(): + # Figure 1 chapter 7: Connectivity + # http://www.informatik.uni-augsburg.de/thi/personen/kammer/Graph_Connectivity.pdf + G = nx.Graph() + G.add_edges_from( + [ + (1, 2), + (1, 3), + (1, 4), + (1, 5), + (2, 3), + (2, 6), + (3, 4), + (3, 6), + (4, 6), + (4, 7), + (5, 7), + (6, 8), + (6, 9), + (7, 8), + (7, 10), + (8, 11), + (9, 10), + (9, 11), + (10, 11), + ] + ) + for flow_func in flow_funcs: + kwargs = {"flow_func": flow_func} + errmsg = f"Assertion failed in function: {flow_func.__name__}" + assert 3 == local_edge_connectivity(G, 1, 11, **kwargs), errmsg + assert 3 == nx.edge_connectivity(G, 1, 11, **kwargs), errmsg + assert 2 == local_node_connectivity(G, 1, 11, **kwargs), errmsg + assert 2 == nx.node_connectivity(G, 1, 11, **kwargs), errmsg + assert 2 == nx.edge_connectivity(G, **kwargs), errmsg + assert 2 == nx.node_connectivity(G, **kwargs), errmsg + if flow_func is flow.preflow_push: + assert 3 == nx.edge_connectivity(G, 1, 11, cutoff=2, **kwargs), errmsg + else: + assert 2 == nx.edge_connectivity(G, 1, 11, cutoff=2, **kwargs), errmsg + + +def test_white_harary_1(): + # Figure 1b white and harary (2001) + # https://doi.org/10.1111/0081-1750.00098 + # A graph with high adhesion (edge connectivity) and low cohesion + # (vertex connectivity) + G = nx.disjoint_union(nx.complete_graph(4), nx.complete_graph(4)) + G.remove_node(7) + for i in range(4, 7): + G.add_edge(0, i) + G = nx.disjoint_union(G, nx.complete_graph(4)) + G.remove_node(G.order() - 1) + for i in range(7, 10): + G.add_edge(0, i) + for flow_func in flow_funcs: + errmsg = f"Assertion failed in function: {flow_func.__name__}" + assert 1 == nx.node_connectivity(G, flow_func=flow_func), errmsg + assert 3 == nx.edge_connectivity(G, flow_func=flow_func), errmsg + + +def test_white_harary_2(): + # Figure 8 white and harary (2001) + # https://doi.org/10.1111/0081-1750.00098 + G = nx.disjoint_union(nx.complete_graph(4), nx.complete_graph(4)) + G.add_edge(0, 4) + # kappa <= lambda <= delta + assert 3 == min(nx.core_number(G).values()) + for flow_func in flow_funcs: + errmsg = f"Assertion failed in function: {flow_func.__name__}" + assert 1 == nx.node_connectivity(G, flow_func=flow_func), errmsg + assert 1 == nx.edge_connectivity(G, flow_func=flow_func), errmsg + + +def test_complete_graphs(): + for n in range(5, 20, 5): + for flow_func in flow_funcs: + G = nx.complete_graph(n) + errmsg = f"Assertion failed in function: {flow_func.__name__}" + assert n - 1 == nx.node_connectivity(G, flow_func=flow_func), errmsg + assert n - 1 == nx.node_connectivity( + G.to_directed(), flow_func=flow_func + ), errmsg + assert n - 1 == nx.edge_connectivity(G, flow_func=flow_func), errmsg + assert n - 1 == nx.edge_connectivity( + G.to_directed(), flow_func=flow_func + ), errmsg + + +def test_empty_graphs(): + for k in range(5, 25, 5): + G = nx.empty_graph(k) + for flow_func in flow_funcs: + errmsg = f"Assertion failed in function: {flow_func.__name__}" + assert 0 == nx.node_connectivity(G, flow_func=flow_func), errmsg + assert 0 == nx.edge_connectivity(G, flow_func=flow_func), errmsg + + +def test_petersen(): + G = nx.petersen_graph() + for flow_func in flow_funcs: + errmsg = f"Assertion failed in function: {flow_func.__name__}" + assert 3 == nx.node_connectivity(G, flow_func=flow_func), errmsg + assert 3 == nx.edge_connectivity(G, flow_func=flow_func), errmsg + + +def test_tutte(): + G = nx.tutte_graph() + for flow_func in flow_funcs: + errmsg = f"Assertion failed in function: {flow_func.__name__}" + assert 3 == nx.node_connectivity(G, flow_func=flow_func), errmsg + assert 3 == nx.edge_connectivity(G, flow_func=flow_func), errmsg + + +def test_dodecahedral(): + G = nx.dodecahedral_graph() + for flow_func in flow_funcs: + errmsg = f"Assertion failed in function: {flow_func.__name__}" + assert 3 == nx.node_connectivity(G, flow_func=flow_func), errmsg + assert 3 == nx.edge_connectivity(G, flow_func=flow_func), errmsg + + +def test_octahedral(): + G = nx.octahedral_graph() + for flow_func in flow_funcs: + errmsg = f"Assertion failed in function: {flow_func.__name__}" + assert 4 == nx.node_connectivity(G, flow_func=flow_func), errmsg + assert 4 == nx.edge_connectivity(G, flow_func=flow_func), errmsg + + +def test_icosahedral(): + G = nx.icosahedral_graph() + for flow_func in flow_funcs: + errmsg = f"Assertion failed in function: {flow_func.__name__}" + assert 5 == nx.node_connectivity(G, flow_func=flow_func), errmsg + assert 5 == nx.edge_connectivity(G, flow_func=flow_func), errmsg + + +def test_missing_source(): + G = nx.path_graph(4) + for flow_func in flow_funcs: + pytest.raises( + nx.NetworkXError, nx.node_connectivity, G, 10, 1, flow_func=flow_func + ) + + +def test_missing_target(): + G = nx.path_graph(4) + for flow_func in flow_funcs: + pytest.raises( + nx.NetworkXError, nx.node_connectivity, G, 1, 10, flow_func=flow_func + ) + + +def test_edge_missing_source(): + G = nx.path_graph(4) + for flow_func in flow_funcs: + pytest.raises( + nx.NetworkXError, nx.edge_connectivity, G, 10, 1, flow_func=flow_func + ) + + +def test_edge_missing_target(): + G = nx.path_graph(4) + for flow_func in flow_funcs: + pytest.raises( + nx.NetworkXError, nx.edge_connectivity, G, 1, 10, flow_func=flow_func + ) + + +def test_not_weakly_connected(): + G = nx.DiGraph() + nx.add_path(G, [1, 2, 3]) + nx.add_path(G, [4, 5]) + for flow_func in flow_funcs: + errmsg = f"Assertion failed in function: {flow_func.__name__}" + assert nx.node_connectivity(G) == 0, errmsg + assert nx.edge_connectivity(G) == 0, errmsg + + +def test_not_connected(): + G = nx.Graph() + nx.add_path(G, [1, 2, 3]) + nx.add_path(G, [4, 5]) + for flow_func in flow_funcs: + errmsg = f"Assertion failed in function: {flow_func.__name__}" + assert nx.node_connectivity(G) == 0, errmsg + assert nx.edge_connectivity(G) == 0, errmsg + + +def test_directed_edge_connectivity(): + G = nx.cycle_graph(10, create_using=nx.DiGraph()) # only one direction + D = nx.cycle_graph(10).to_directed() # 2 reciprocal edges + for flow_func in flow_funcs: + errmsg = f"Assertion failed in function: {flow_func.__name__}" + assert 1 == nx.edge_connectivity(G, flow_func=flow_func), errmsg + assert 1 == local_edge_connectivity(G, 1, 4, flow_func=flow_func), errmsg + assert 1 == nx.edge_connectivity(G, 1, 4, flow_func=flow_func), errmsg + assert 2 == nx.edge_connectivity(D, flow_func=flow_func), errmsg + assert 2 == local_edge_connectivity(D, 1, 4, flow_func=flow_func), errmsg + assert 2 == nx.edge_connectivity(D, 1, 4, flow_func=flow_func), errmsg + + +def test_cutoff(): + G = nx.complete_graph(5) + for local_func in [local_edge_connectivity, local_node_connectivity]: + for flow_func in flow_funcs: + if flow_func is flow.preflow_push: + # cutoff is not supported by preflow_push + continue + for cutoff in [3, 2, 1]: + result = local_func(G, 0, 4, flow_func=flow_func, cutoff=cutoff) + assert cutoff == result, f"cutoff error in {flow_func.__name__}" + + +def test_invalid_auxiliary(): + G = nx.complete_graph(5) + pytest.raises(nx.NetworkXError, local_node_connectivity, G, 0, 3, auxiliary=G) + + +def test_interface_only_source(): + G = nx.complete_graph(5) + for interface_func in [nx.node_connectivity, nx.edge_connectivity]: + pytest.raises(nx.NetworkXError, interface_func, G, s=0) + + +def test_interface_only_target(): + G = nx.complete_graph(5) + for interface_func in [nx.node_connectivity, nx.edge_connectivity]: + pytest.raises(nx.NetworkXError, interface_func, G, t=3) + + +def test_edge_connectivity_flow_vs_stoer_wagner(): + graph_funcs = [nx.icosahedral_graph, nx.octahedral_graph, nx.dodecahedral_graph] + for graph_func in graph_funcs: + G = graph_func() + assert nx.stoer_wagner(G)[0] == nx.edge_connectivity(G) + + +class TestAllPairsNodeConnectivity: + @classmethod + def setup_class(cls): + cls.path = nx.path_graph(7) + cls.directed_path = nx.path_graph(7, create_using=nx.DiGraph()) + cls.cycle = nx.cycle_graph(7) + cls.directed_cycle = nx.cycle_graph(7, create_using=nx.DiGraph()) + cls.gnp = nx.gnp_random_graph(30, 0.1, seed=42) + cls.directed_gnp = nx.gnp_random_graph(30, 0.1, directed=True, seed=42) + cls.K20 = nx.complete_graph(20) + cls.K10 = nx.complete_graph(10) + cls.K5 = nx.complete_graph(5) + cls.G_list = [ + cls.path, + cls.directed_path, + cls.cycle, + cls.directed_cycle, + cls.gnp, + cls.directed_gnp, + cls.K10, + cls.K5, + cls.K20, + ] + + def test_cycles(self): + K_undir = nx.all_pairs_node_connectivity(self.cycle) + for source in K_undir: + for target, k in K_undir[source].items(): + assert k == 2 + K_dir = nx.all_pairs_node_connectivity(self.directed_cycle) + for source in K_dir: + for target, k in K_dir[source].items(): + assert k == 1 + + def test_complete(self): + for G in [self.K10, self.K5, self.K20]: + K = nx.all_pairs_node_connectivity(G) + for source in K: + for target, k in K[source].items(): + assert k == len(G) - 1 + + def test_paths(self): + K_undir = nx.all_pairs_node_connectivity(self.path) + for source in K_undir: + for target, k in K_undir[source].items(): + assert k == 1 + K_dir = nx.all_pairs_node_connectivity(self.directed_path) + for source in K_dir: + for target, k in K_dir[source].items(): + if source < target: + assert k == 1 + else: + assert k == 0 + + def test_all_pairs_connectivity_nbunch(self): + G = nx.complete_graph(5) + nbunch = [0, 2, 3] + C = nx.all_pairs_node_connectivity(G, nbunch=nbunch) + assert len(C) == len(nbunch) + + def test_all_pairs_connectivity_icosahedral(self): + G = nx.icosahedral_graph() + C = nx.all_pairs_node_connectivity(G) + assert all(5 == C[u][v] for u, v in itertools.combinations(G, 2)) + + def test_all_pairs_connectivity(self): + G = nx.Graph() + nodes = [0, 1, 2, 3] + nx.add_path(G, nodes) + A = {n: {} for n in G} + for u, v in itertools.combinations(nodes, 2): + A[u][v] = A[v][u] = nx.node_connectivity(G, u, v) + C = nx.all_pairs_node_connectivity(G) + assert sorted((k, sorted(v)) for k, v in A.items()) == sorted( + (k, sorted(v)) for k, v in C.items() + ) + + def test_all_pairs_connectivity_directed(self): + G = nx.DiGraph() + nodes = [0, 1, 2, 3] + nx.add_path(G, nodes) + A = {n: {} for n in G} + for u, v in itertools.permutations(nodes, 2): + A[u][v] = nx.node_connectivity(G, u, v) + C = nx.all_pairs_node_connectivity(G) + assert sorted((k, sorted(v)) for k, v in A.items()) == sorted( + (k, sorted(v)) for k, v in C.items() + ) + + def test_all_pairs_connectivity_nbunch_combinations(self): + G = nx.complete_graph(5) + nbunch = [0, 2, 3] + A = {n: {} for n in nbunch} + for u, v in itertools.combinations(nbunch, 2): + A[u][v] = A[v][u] = nx.node_connectivity(G, u, v) + C = nx.all_pairs_node_connectivity(G, nbunch=nbunch) + assert sorted((k, sorted(v)) for k, v in A.items()) == sorted( + (k, sorted(v)) for k, v in C.items() + ) + + def test_all_pairs_connectivity_nbunch_iter(self): + G = nx.complete_graph(5) + nbunch = [0, 2, 3] + A = {n: {} for n in nbunch} + for u, v in itertools.combinations(nbunch, 2): + A[u][v] = A[v][u] = nx.node_connectivity(G, u, v) + C = nx.all_pairs_node_connectivity(G, nbunch=iter(nbunch)) + assert sorted((k, sorted(v)) for k, v in A.items()) == sorted( + (k, sorted(v)) for k, v in C.items() + ) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/tests/test_cuts.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/tests/test_cuts.py new file mode 100644 index 0000000..964aff9 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/tests/test_cuts.py @@ -0,0 +1,309 @@ +import pytest + +import networkx as nx +from networkx.algorithms import flow +from networkx.algorithms.connectivity import minimum_st_edge_cut, minimum_st_node_cut +from networkx.utils import arbitrary_element + +flow_funcs = [ + flow.boykov_kolmogorov, + flow.dinitz, + flow.edmonds_karp, + flow.preflow_push, + flow.shortest_augmenting_path, +] + +# Tests for node and edge cutsets + + +def _generate_no_biconnected(max_attempts=50): + attempts = 0 + while True: + G = nx.fast_gnp_random_graph(100, 0.0575, seed=42) + if nx.is_connected(G) and not nx.is_biconnected(G): + attempts = 0 + yield G + else: + if attempts >= max_attempts: + msg = f"Tried {attempts} times: no suitable Graph." + raise Exception(msg) + else: + attempts += 1 + + +def test_articulation_points(): + Ggen = _generate_no_biconnected() + for flow_func in flow_funcs: + errmsg = f"Assertion failed in function: {flow_func.__name__}" + for i in range(1): # change 1 to 3 or more for more realizations. + G = next(Ggen) + cut = nx.minimum_node_cut(G, flow_func=flow_func) + assert len(cut) == 1, errmsg + assert cut.pop() in set(nx.articulation_points(G)), errmsg + + +def test_brandes_erlebach_book(): + # Figure 1 chapter 7: Connectivity + # http://www.informatik.uni-augsburg.de/thi/personen/kammer/Graph_Connectivity.pdf + G = nx.Graph() + G.add_edges_from( + [ + (1, 2), + (1, 3), + (1, 4), + (1, 5), + (2, 3), + (2, 6), + (3, 4), + (3, 6), + (4, 6), + (4, 7), + (5, 7), + (6, 8), + (6, 9), + (7, 8), + (7, 10), + (8, 11), + (9, 10), + (9, 11), + (10, 11), + ] + ) + for flow_func in flow_funcs: + kwargs = {"flow_func": flow_func} + errmsg = f"Assertion failed in function: {flow_func.__name__}" + # edge cutsets + assert 3 == len(nx.minimum_edge_cut(G, 1, 11, **kwargs)), errmsg + edge_cut = nx.minimum_edge_cut(G, **kwargs) + # Node 5 has only two edges + assert 2 == len(edge_cut), errmsg + H = G.copy() + H.remove_edges_from(edge_cut) + assert not nx.is_connected(H), errmsg + # node cuts + assert {6, 7} == minimum_st_node_cut(G, 1, 11, **kwargs), errmsg + assert {6, 7} == nx.minimum_node_cut(G, 1, 11, **kwargs), errmsg + node_cut = nx.minimum_node_cut(G, **kwargs) + assert 2 == len(node_cut), errmsg + H = G.copy() + H.remove_nodes_from(node_cut) + assert not nx.is_connected(H), errmsg + + +def test_white_harary_paper(): + # Figure 1b white and harary (2001) + # https://doi.org/10.1111/0081-1750.00098 + # A graph with high adhesion (edge connectivity) and low cohesion + # (node connectivity) + G = nx.disjoint_union(nx.complete_graph(4), nx.complete_graph(4)) + G.remove_node(7) + for i in range(4, 7): + G.add_edge(0, i) + G = nx.disjoint_union(G, nx.complete_graph(4)) + G.remove_node(G.order() - 1) + for i in range(7, 10): + G.add_edge(0, i) + for flow_func in flow_funcs: + kwargs = {"flow_func": flow_func} + errmsg = f"Assertion failed in function: {flow_func.__name__}" + # edge cuts + edge_cut = nx.minimum_edge_cut(G, **kwargs) + assert 3 == len(edge_cut), errmsg + H = G.copy() + H.remove_edges_from(edge_cut) + assert not nx.is_connected(H), errmsg + # node cuts + node_cut = nx.minimum_node_cut(G, **kwargs) + assert {0} == node_cut, errmsg + H = G.copy() + H.remove_nodes_from(node_cut) + assert not nx.is_connected(H), errmsg + + +def test_petersen_cutset(): + G = nx.petersen_graph() + for flow_func in flow_funcs: + kwargs = {"flow_func": flow_func} + errmsg = f"Assertion failed in function: {flow_func.__name__}" + # edge cuts + edge_cut = nx.minimum_edge_cut(G, **kwargs) + assert 3 == len(edge_cut), errmsg + H = G.copy() + H.remove_edges_from(edge_cut) + assert not nx.is_connected(H), errmsg + # node cuts + node_cut = nx.minimum_node_cut(G, **kwargs) + assert 3 == len(node_cut), errmsg + H = G.copy() + H.remove_nodes_from(node_cut) + assert not nx.is_connected(H), errmsg + + +def test_octahedral_cutset(): + G = nx.octahedral_graph() + for flow_func in flow_funcs: + kwargs = {"flow_func": flow_func} + errmsg = f"Assertion failed in function: {flow_func.__name__}" + # edge cuts + edge_cut = nx.minimum_edge_cut(G, **kwargs) + assert 4 == len(edge_cut), errmsg + H = G.copy() + H.remove_edges_from(edge_cut) + assert not nx.is_connected(H), errmsg + # node cuts + node_cut = nx.minimum_node_cut(G, **kwargs) + assert 4 == len(node_cut), errmsg + H = G.copy() + H.remove_nodes_from(node_cut) + assert not nx.is_connected(H), errmsg + + +def test_icosahedral_cutset(): + G = nx.icosahedral_graph() + for flow_func in flow_funcs: + kwargs = {"flow_func": flow_func} + errmsg = f"Assertion failed in function: {flow_func.__name__}" + # edge cuts + edge_cut = nx.minimum_edge_cut(G, **kwargs) + assert 5 == len(edge_cut), errmsg + H = G.copy() + H.remove_edges_from(edge_cut) + assert not nx.is_connected(H), errmsg + # node cuts + node_cut = nx.minimum_node_cut(G, **kwargs) + assert 5 == len(node_cut), errmsg + H = G.copy() + H.remove_nodes_from(node_cut) + assert not nx.is_connected(H), errmsg + + +def test_node_cutset_exception(): + G = nx.Graph() + G.add_edges_from([(1, 2), (3, 4)]) + for flow_func in flow_funcs: + pytest.raises(nx.NetworkXError, nx.minimum_node_cut, G, flow_func=flow_func) + + +def test_node_cutset_random_graphs(): + for flow_func in flow_funcs: + errmsg = f"Assertion failed in function: {flow_func.__name__}" + for i in range(3): + G = nx.fast_gnp_random_graph(50, 0.25, seed=42) + if not nx.is_connected(G): + ccs = iter(nx.connected_components(G)) + start = arbitrary_element(next(ccs)) + G.add_edges_from((start, arbitrary_element(c)) for c in ccs) + cutset = nx.minimum_node_cut(G, flow_func=flow_func) + assert nx.node_connectivity(G) == len(cutset), errmsg + G.remove_nodes_from(cutset) + assert not nx.is_connected(G), errmsg + + +def test_edge_cutset_random_graphs(): + for flow_func in flow_funcs: + errmsg = f"Assertion failed in function: {flow_func.__name__}" + for i in range(3): + G = nx.fast_gnp_random_graph(50, 0.25, seed=42) + if not nx.is_connected(G): + ccs = iter(nx.connected_components(G)) + start = arbitrary_element(next(ccs)) + G.add_edges_from((start, arbitrary_element(c)) for c in ccs) + cutset = nx.minimum_edge_cut(G, flow_func=flow_func) + assert nx.edge_connectivity(G) == len(cutset), errmsg + G.remove_edges_from(cutset) + assert not nx.is_connected(G), errmsg + + +def test_empty_graphs(): + G = nx.Graph() + D = nx.DiGraph() + for interface_func in [nx.minimum_node_cut, nx.minimum_edge_cut]: + for flow_func in flow_funcs: + pytest.raises( + nx.NetworkXPointlessConcept, interface_func, G, flow_func=flow_func + ) + pytest.raises( + nx.NetworkXPointlessConcept, interface_func, D, flow_func=flow_func + ) + + +def test_unbounded(): + G = nx.complete_graph(5) + for flow_func in flow_funcs: + assert 4 == len(minimum_st_edge_cut(G, 1, 4, flow_func=flow_func)) + + +def test_missing_source(): + G = nx.path_graph(4) + for interface_func in [nx.minimum_edge_cut, nx.minimum_node_cut]: + for flow_func in flow_funcs: + pytest.raises( + nx.NetworkXError, interface_func, G, 10, 1, flow_func=flow_func + ) + + +def test_missing_target(): + G = nx.path_graph(4) + for interface_func in [nx.minimum_edge_cut, nx.minimum_node_cut]: + for flow_func in flow_funcs: + pytest.raises( + nx.NetworkXError, interface_func, G, 1, 10, flow_func=flow_func + ) + + +def test_not_weakly_connected(): + G = nx.DiGraph() + nx.add_path(G, [1, 2, 3]) + nx.add_path(G, [4, 5]) + for interface_func in [nx.minimum_edge_cut, nx.minimum_node_cut]: + for flow_func in flow_funcs: + pytest.raises(nx.NetworkXError, interface_func, G, flow_func=flow_func) + + +def test_not_connected(): + G = nx.Graph() + nx.add_path(G, [1, 2, 3]) + nx.add_path(G, [4, 5]) + for interface_func in [nx.minimum_edge_cut, nx.minimum_node_cut]: + for flow_func in flow_funcs: + pytest.raises(nx.NetworkXError, interface_func, G, flow_func=flow_func) + + +def tests_min_cut_complete(): + G = nx.complete_graph(5) + for interface_func in [nx.minimum_edge_cut, nx.minimum_node_cut]: + for flow_func in flow_funcs: + assert 4 == len(interface_func(G, flow_func=flow_func)) + + +def tests_min_cut_complete_directed(): + G = nx.complete_graph(5) + G = G.to_directed() + for interface_func in [nx.minimum_edge_cut, nx.minimum_node_cut]: + for flow_func in flow_funcs: + assert 4 == len(interface_func(G, flow_func=flow_func)) + + +def tests_minimum_st_node_cut(): + G = nx.Graph() + G.add_nodes_from([0, 1, 2, 3, 7, 8, 11, 12]) + G.add_edges_from([(7, 11), (1, 11), (1, 12), (12, 8), (0, 1)]) + nodelist = minimum_st_node_cut(G, 7, 11) + assert nodelist == set() + + +def test_invalid_auxiliary(): + G = nx.complete_graph(5) + pytest.raises(nx.NetworkXError, minimum_st_node_cut, G, 0, 3, auxiliary=G) + + +def test_interface_only_source(): + G = nx.complete_graph(5) + for interface_func in [nx.minimum_node_cut, nx.minimum_edge_cut]: + pytest.raises(nx.NetworkXError, interface_func, G, s=0) + + +def test_interface_only_target(): + G = nx.complete_graph(5) + for interface_func in [nx.minimum_node_cut, nx.minimum_edge_cut]: + pytest.raises(nx.NetworkXError, interface_func, G, t=3) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/tests/test_disjoint_paths.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/tests/test_disjoint_paths.py new file mode 100644 index 0000000..0c0fad9 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/tests/test_disjoint_paths.py @@ -0,0 +1,249 @@ +import pytest + +import networkx as nx +from networkx.algorithms import flow +from networkx.utils import pairwise + +flow_funcs = [ + flow.boykov_kolmogorov, + flow.edmonds_karp, + flow.dinitz, + flow.preflow_push, + flow.shortest_augmenting_path, +] + + +def is_path(G, path): + return all(v in G[u] for u, v in pairwise(path)) + + +def are_edge_disjoint_paths(G, paths): + if not paths: + return False + for path in paths: + assert is_path(G, path) + paths_edges = [list(pairwise(p)) for p in paths] + num_of_edges = sum(len(e) for e in paths_edges) + num_unique_edges = len(set.union(*[set(es) for es in paths_edges])) + if num_of_edges == num_unique_edges: + return True + return False + + +def are_node_disjoint_paths(G, paths): + if not paths: + return False + for path in paths: + assert is_path(G, path) + # first and last nodes are source and target + st = {paths[0][0], paths[0][-1]} + num_of_nodes = len([n for path in paths for n in path if n not in st]) + num_unique_nodes = len({n for path in paths for n in path if n not in st}) + if num_of_nodes == num_unique_nodes: + return True + return False + + +def test_graph_from_pr_2053(): + G = nx.Graph() + G.add_edges_from( + [ + ("A", "B"), + ("A", "D"), + ("A", "F"), + ("A", "G"), + ("B", "C"), + ("B", "D"), + ("B", "G"), + ("C", "D"), + ("C", "E"), + ("C", "Z"), + ("D", "E"), + ("D", "F"), + ("E", "F"), + ("E", "Z"), + ("F", "Z"), + ("G", "Z"), + ] + ) + for flow_func in flow_funcs: + kwargs = {"flow_func": flow_func} + errmsg = f"Assertion failed in function: {flow_func.__name__}" + # edge disjoint paths + edge_paths = list(nx.edge_disjoint_paths(G, "A", "Z", **kwargs)) + assert are_edge_disjoint_paths(G, edge_paths), errmsg + assert nx.edge_connectivity(G, "A", "Z") == len(edge_paths), errmsg + # node disjoint paths + node_paths = list(nx.node_disjoint_paths(G, "A", "Z", **kwargs)) + assert are_node_disjoint_paths(G, node_paths), errmsg + assert nx.node_connectivity(G, "A", "Z") == len(node_paths), errmsg + + +def test_florentine_families(): + G = nx.florentine_families_graph() + for flow_func in flow_funcs: + kwargs = {"flow_func": flow_func} + errmsg = f"Assertion failed in function: {flow_func.__name__}" + # edge disjoint paths + edge_dpaths = list(nx.edge_disjoint_paths(G, "Medici", "Strozzi", **kwargs)) + assert are_edge_disjoint_paths(G, edge_dpaths), errmsg + assert nx.edge_connectivity(G, "Medici", "Strozzi") == len(edge_dpaths), errmsg + # node disjoint paths + node_dpaths = list(nx.node_disjoint_paths(G, "Medici", "Strozzi", **kwargs)) + assert are_node_disjoint_paths(G, node_dpaths), errmsg + assert nx.node_connectivity(G, "Medici", "Strozzi") == len(node_dpaths), errmsg + + +def test_karate(): + G = nx.karate_club_graph() + for flow_func in flow_funcs: + kwargs = {"flow_func": flow_func} + errmsg = f"Assertion failed in function: {flow_func.__name__}" + # edge disjoint paths + edge_dpaths = list(nx.edge_disjoint_paths(G, 0, 33, **kwargs)) + assert are_edge_disjoint_paths(G, edge_dpaths), errmsg + assert nx.edge_connectivity(G, 0, 33) == len(edge_dpaths), errmsg + # node disjoint paths + node_dpaths = list(nx.node_disjoint_paths(G, 0, 33, **kwargs)) + assert are_node_disjoint_paths(G, node_dpaths), errmsg + assert nx.node_connectivity(G, 0, 33) == len(node_dpaths), errmsg + + +def test_petersen_disjoint_paths(): + G = nx.petersen_graph() + for flow_func in flow_funcs: + kwargs = {"flow_func": flow_func} + errmsg = f"Assertion failed in function: {flow_func.__name__}" + # edge disjoint paths + edge_dpaths = list(nx.edge_disjoint_paths(G, 0, 6, **kwargs)) + assert are_edge_disjoint_paths(G, edge_dpaths), errmsg + assert 3 == len(edge_dpaths), errmsg + # node disjoint paths + node_dpaths = list(nx.node_disjoint_paths(G, 0, 6, **kwargs)) + assert are_node_disjoint_paths(G, node_dpaths), errmsg + assert 3 == len(node_dpaths), errmsg + + +def test_octahedral_disjoint_paths(): + G = nx.octahedral_graph() + for flow_func in flow_funcs: + kwargs = {"flow_func": flow_func} + errmsg = f"Assertion failed in function: {flow_func.__name__}" + # edge disjoint paths + edge_dpaths = list(nx.edge_disjoint_paths(G, 0, 5, **kwargs)) + assert are_edge_disjoint_paths(G, edge_dpaths), errmsg + assert 4 == len(edge_dpaths), errmsg + # node disjoint paths + node_dpaths = list(nx.node_disjoint_paths(G, 0, 5, **kwargs)) + assert are_node_disjoint_paths(G, node_dpaths), errmsg + assert 4 == len(node_dpaths), errmsg + + +def test_icosahedral_disjoint_paths(): + G = nx.icosahedral_graph() + for flow_func in flow_funcs: + kwargs = {"flow_func": flow_func} + errmsg = f"Assertion failed in function: {flow_func.__name__}" + # edge disjoint paths + edge_dpaths = list(nx.edge_disjoint_paths(G, 0, 6, **kwargs)) + assert are_edge_disjoint_paths(G, edge_dpaths), errmsg + assert 5 == len(edge_dpaths), errmsg + # node disjoint paths + node_dpaths = list(nx.node_disjoint_paths(G, 0, 6, **kwargs)) + assert are_node_disjoint_paths(G, node_dpaths), errmsg + assert 5 == len(node_dpaths), errmsg + + +def test_cutoff_disjoint_paths(): + G = nx.icosahedral_graph() + for flow_func in flow_funcs: + kwargs = {"flow_func": flow_func} + errmsg = f"Assertion failed in function: {flow_func.__name__}" + for cutoff in [2, 4]: + kwargs["cutoff"] = cutoff + # edge disjoint paths + edge_dpaths = list(nx.edge_disjoint_paths(G, 0, 6, **kwargs)) + assert are_edge_disjoint_paths(G, edge_dpaths), errmsg + assert cutoff == len(edge_dpaths), errmsg + # node disjoint paths + node_dpaths = list(nx.node_disjoint_paths(G, 0, 6, **kwargs)) + assert are_node_disjoint_paths(G, node_dpaths), errmsg + assert cutoff == len(node_dpaths), errmsg + + +def test_missing_source_edge_paths(): + with pytest.raises(nx.NetworkXError): + G = nx.path_graph(4) + list(nx.edge_disjoint_paths(G, 10, 1)) + + +def test_missing_source_node_paths(): + with pytest.raises(nx.NetworkXError): + G = nx.path_graph(4) + list(nx.node_disjoint_paths(G, 10, 1)) + + +def test_missing_target_edge_paths(): + with pytest.raises(nx.NetworkXError): + G = nx.path_graph(4) + list(nx.edge_disjoint_paths(G, 1, 10)) + + +def test_missing_target_node_paths(): + with pytest.raises(nx.NetworkXError): + G = nx.path_graph(4) + list(nx.node_disjoint_paths(G, 1, 10)) + + +def test_not_weakly_connected_edges(): + with pytest.raises(nx.NetworkXNoPath): + G = nx.DiGraph() + nx.add_path(G, [1, 2, 3]) + nx.add_path(G, [4, 5]) + list(nx.edge_disjoint_paths(G, 1, 5)) + + +def test_not_weakly_connected_nodes(): + with pytest.raises(nx.NetworkXNoPath): + G = nx.DiGraph() + nx.add_path(G, [1, 2, 3]) + nx.add_path(G, [4, 5]) + list(nx.node_disjoint_paths(G, 1, 5)) + + +def test_not_connected_edges(): + with pytest.raises(nx.NetworkXNoPath): + G = nx.Graph() + nx.add_path(G, [1, 2, 3]) + nx.add_path(G, [4, 5]) + list(nx.edge_disjoint_paths(G, 1, 5)) + + +def test_not_connected_nodes(): + with pytest.raises(nx.NetworkXNoPath): + G = nx.Graph() + nx.add_path(G, [1, 2, 3]) + nx.add_path(G, [4, 5]) + list(nx.node_disjoint_paths(G, 1, 5)) + + +def test_isolated_edges(): + with pytest.raises(nx.NetworkXNoPath): + G = nx.Graph() + G.add_node(1) + nx.add_path(G, [4, 5]) + list(nx.edge_disjoint_paths(G, 1, 5)) + + +def test_isolated_nodes(): + with pytest.raises(nx.NetworkXNoPath): + G = nx.Graph() + G.add_node(1) + nx.add_path(G, [4, 5]) + list(nx.node_disjoint_paths(G, 1, 5)) + + +def test_invalid_auxiliary(): + with pytest.raises(nx.NetworkXError): + G = nx.complete_graph(5) + list(nx.node_disjoint_paths(G, 0, 3, auxiliary=G)) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/tests/test_edge_augmentation.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/tests/test_edge_augmentation.py new file mode 100644 index 0000000..b580e5b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/tests/test_edge_augmentation.py @@ -0,0 +1,502 @@ +import itertools as it +import random + +import pytest + +import networkx as nx +from networkx.algorithms.connectivity import k_edge_augmentation +from networkx.algorithms.connectivity.edge_augmentation import ( + _unpack_available_edges, + collapse, + complement_edges, + is_k_edge_connected, + is_locally_k_edge_connected, +) +from networkx.utils import pairwise + +# This should be set to the largest k for which an efficient algorithm is +# explicitly defined. +MAX_EFFICIENT_K = 2 + + +def tarjan_bridge_graph(): + # graph from tarjan paper + # RE Tarjan - "A note on finding the bridges of a graph" + # Information Processing Letters, 1974 - Elsevier + # doi:10.1016/0020-0190(74)90003-9. + # define 2-connected components and bridges + ccs = [ + (1, 2, 4, 3, 1, 4), + (5, 6, 7, 5), + (8, 9, 10, 8), + (17, 18, 16, 15, 17), + (11, 12, 14, 13, 11, 14), + ] + bridges = [(4, 8), (3, 5), (3, 17)] + G = nx.Graph(it.chain(*(pairwise(path) for path in ccs + bridges))) + return G + + +def test_weight_key(): + G = nx.Graph() + G.add_nodes_from([1, 2, 3, 4, 5, 6, 7, 8, 9]) + G.add_edges_from([(3, 8), (1, 2), (2, 3)]) + impossible = {(3, 6), (3, 9)} + rng = random.Random(0) + avail_uv = list(set(complement_edges(G)) - impossible) + avail = [(u, v, {"cost": rng.random()}) for u, v in avail_uv] + + _augment_and_check(G, k=1) + _augment_and_check(G, k=1, avail=avail_uv) + _augment_and_check(G, k=1, avail=avail, weight="cost") + + _check_augmentations(G, avail, weight="cost") + + +def test_is_locally_k_edge_connected_exceptions(): + pytest.raises(nx.NetworkXNotImplemented, is_k_edge_connected, nx.DiGraph(), k=0) + pytest.raises(nx.NetworkXNotImplemented, is_k_edge_connected, nx.MultiGraph(), k=0) + pytest.raises(ValueError, is_k_edge_connected, nx.Graph(), k=0) + + +def test_is_k_edge_connected(): + G = nx.barbell_graph(10, 0) + assert is_k_edge_connected(G, k=1) + assert not is_k_edge_connected(G, k=2) + + G = nx.Graph() + G.add_nodes_from([5, 15]) + assert not is_k_edge_connected(G, k=1) + assert not is_k_edge_connected(G, k=2) + + G = nx.complete_graph(5) + assert is_k_edge_connected(G, k=1) + assert is_k_edge_connected(G, k=2) + assert is_k_edge_connected(G, k=3) + assert is_k_edge_connected(G, k=4) + + G = nx.compose(nx.complete_graph([0, 1, 2]), nx.complete_graph([3, 4, 5])) + assert not is_k_edge_connected(G, k=1) + assert not is_k_edge_connected(G, k=2) + assert not is_k_edge_connected(G, k=3) + + +def test_is_k_edge_connected_exceptions(): + pytest.raises( + nx.NetworkXNotImplemented, is_locally_k_edge_connected, nx.DiGraph(), 1, 2, k=0 + ) + pytest.raises( + nx.NetworkXNotImplemented, + is_locally_k_edge_connected, + nx.MultiGraph(), + 1, + 2, + k=0, + ) + pytest.raises(ValueError, is_locally_k_edge_connected, nx.Graph(), 1, 2, k=0) + + +def test_is_locally_k_edge_connected(): + G = nx.barbell_graph(10, 0) + assert is_locally_k_edge_connected(G, 5, 15, k=1) + assert not is_locally_k_edge_connected(G, 5, 15, k=2) + + G = nx.Graph() + G.add_nodes_from([5, 15]) + assert not is_locally_k_edge_connected(G, 5, 15, k=2) + + +def test_null_graph(): + G = nx.Graph() + _check_augmentations(G, max_k=MAX_EFFICIENT_K + 2) + + +def test_cliques(): + for n in range(1, 10): + G = nx.complete_graph(n) + _check_augmentations(G, max_k=MAX_EFFICIENT_K + 2) + + +def test_clique_and_node(): + for n in range(1, 10): + G = nx.complete_graph(n) + G.add_node(n + 1) + _check_augmentations(G, max_k=MAX_EFFICIENT_K + 2) + + +def test_point_graph(): + G = nx.Graph() + G.add_node(1) + _check_augmentations(G, max_k=MAX_EFFICIENT_K + 2) + + +def test_edgeless_graph(): + G = nx.Graph() + G.add_nodes_from([1, 2, 3, 4]) + _check_augmentations(G) + + +def test_invalid_k(): + G = nx.Graph() + pytest.raises(ValueError, list, k_edge_augmentation(G, k=-1)) + pytest.raises(ValueError, list, k_edge_augmentation(G, k=0)) + + +def test_unfeasible(): + G = tarjan_bridge_graph() + pytest.raises(nx.NetworkXUnfeasible, list, k_edge_augmentation(G, k=1, avail=[])) + + pytest.raises(nx.NetworkXUnfeasible, list, k_edge_augmentation(G, k=2, avail=[])) + + pytest.raises( + nx.NetworkXUnfeasible, list, k_edge_augmentation(G, k=2, avail=[(7, 9)]) + ) + + # partial solutions should not error if real solutions are infeasible + aug_edges = list(k_edge_augmentation(G, k=2, avail=[(7, 9)], partial=True)) + assert aug_edges == [(7, 9)] + + _check_augmentations(G, avail=[], max_k=MAX_EFFICIENT_K + 2) + + _check_augmentations(G, avail=[(7, 9)], max_k=MAX_EFFICIENT_K + 2) + + +def test_tarjan(): + G = tarjan_bridge_graph() + + aug_edges = set(_augment_and_check(G, k=2)[0]) + print(f"aug_edges = {aug_edges!r}") + # can't assert edge exactly equality due to non-determinant edge order + # but we do know the size of the solution must be 3 + assert len(aug_edges) == 3 + + avail = [ + (9, 7), + (8, 5), + (2, 10), + (6, 13), + (11, 18), + (1, 17), + (2, 3), + (16, 17), + (18, 14), + (15, 14), + ] + aug_edges = set(_augment_and_check(G, avail=avail, k=2)[0]) + + # Can't assert exact length since approximation depends on the order of a + # dict traversal. + assert len(aug_edges) <= 3 * 2 + + _check_augmentations(G, avail) + + +def test_configuration(): + # seeds = [2718183590, 2470619828, 1694705158, 3001036531, 2401251497] + seeds = [1001, 1002, 1003, 1004] + for seed in seeds: + deg_seq = nx.random_powerlaw_tree_sequence(20, seed=seed, tries=5000) + G = nx.Graph(nx.configuration_model(deg_seq, seed=seed)) + G.remove_edges_from(nx.selfloop_edges(G)) + _check_augmentations(G) + + +def test_shell(): + # seeds = [2057382236, 3331169846, 1840105863, 476020778, 2247498425] + seeds = [18] + for seed in seeds: + constructor = [(12, 70, 0.8), (15, 40, 0.6)] + G = nx.random_shell_graph(constructor, seed=seed) + _check_augmentations(G) + + +def test_karate(): + G = nx.karate_club_graph() + _check_augmentations(G) + + +def test_star(): + G = nx.star_graph(3) + _check_augmentations(G) + + G = nx.star_graph(5) + _check_augmentations(G) + + G = nx.star_graph(10) + _check_augmentations(G) + + +def test_barbell(): + G = nx.barbell_graph(5, 0) + _check_augmentations(G) + + G = nx.barbell_graph(5, 2) + _check_augmentations(G) + + G = nx.barbell_graph(5, 3) + _check_augmentations(G) + + G = nx.barbell_graph(5, 4) + _check_augmentations(G) + + +def test_bridge(): + G = nx.Graph([(2393, 2257), (2393, 2685), (2685, 2257), (1758, 2257)]) + _check_augmentations(G) + + +def test_gnp_augmentation(): + rng = random.Random(0) + G = nx.gnp_random_graph(30, 0.005, seed=0) + # Randomly make edges available + avail = { + (u, v): 1 + rng.random() for u, v in complement_edges(G) if rng.random() < 0.25 + } + _check_augmentations(G, avail) + + +def _assert_solution_properties(G, aug_edges, avail_dict=None): + """Checks that aug_edges are consistently formatted""" + if avail_dict is not None: + assert all(e in avail_dict for e in aug_edges), ( + "when avail is specified aug-edges should be in avail" + ) + + unique_aug = set(map(tuple, map(sorted, aug_edges))) + unique_aug = list(map(tuple, map(sorted, aug_edges))) + assert len(aug_edges) == len(unique_aug), "edges should be unique" + + assert not any(u == v for u, v in unique_aug), "should be no self-edges" + + assert not any(G.has_edge(u, v) for u, v in unique_aug), ( + "aug edges and G.edges should be disjoint" + ) + + +def _augment_and_check( + G, k, avail=None, weight=None, verbose=False, orig_k=None, max_aug_k=None +): + """ + Does one specific augmentation and checks for properties of the result + """ + if orig_k is None: + try: + orig_k = nx.edge_connectivity(G) + except nx.NetworkXPointlessConcept: + orig_k = 0 + info = {} + try: + if avail is not None: + # ensure avail is in dict form + avail_dict = dict(zip(*_unpack_available_edges(avail, weight=weight))) + else: + avail_dict = None + try: + # Find the augmentation if possible + generator = nx.k_edge_augmentation(G, k=k, weight=weight, avail=avail) + assert not isinstance(generator, list), "should always return an iter" + aug_edges = [] + for edge in generator: + aug_edges.append(edge) + except nx.NetworkXUnfeasible: + infeasible = True + info["infeasible"] = True + assert len(aug_edges) == 0, "should not generate anything if unfeasible" + + if avail is None: + n_nodes = G.number_of_nodes() + assert n_nodes <= k, ( + "unconstrained cases are only unfeasible if |V| <= k. " + f"Got |V|={n_nodes} and k={k}" + ) + else: + if max_aug_k is None: + G_aug_all = G.copy() + G_aug_all.add_edges_from(avail_dict.keys()) + try: + max_aug_k = nx.edge_connectivity(G_aug_all) + except nx.NetworkXPointlessConcept: + max_aug_k = 0 + + assert max_aug_k < k, ( + "avail should only be unfeasible if using all edges " + "does not achieve k-edge-connectivity" + ) + + # Test for a partial solution + partial_edges = list( + nx.k_edge_augmentation(G, k=k, weight=weight, partial=True, avail=avail) + ) + + info["n_partial_edges"] = len(partial_edges) + + if avail_dict is None: + assert set(partial_edges) == set(complement_edges(G)), ( + "unweighted partial solutions should be the complement" + ) + elif len(avail_dict) > 0: + H = G.copy() + + # Find the partial / full augmented connectivity + H.add_edges_from(partial_edges) + partial_conn = nx.edge_connectivity(H) + + H.add_edges_from(set(avail_dict.keys())) + full_conn = nx.edge_connectivity(H) + + # Full connectivity should be no better than our partial + # solution. + assert partial_conn == full_conn, ( + "adding more edges should not increase k-conn" + ) + + # Find the new edge-connectivity after adding the augmenting edges + aug_edges = partial_edges + else: + infeasible = False + + # Find the weight of the augmentation + num_edges = len(aug_edges) + if avail is not None: + total_weight = sum(avail_dict[e] for e in aug_edges) + else: + total_weight = num_edges + + info["total_weight"] = total_weight + info["num_edges"] = num_edges + + # Find the new edge-connectivity after adding the augmenting edges + G_aug = G.copy() + G_aug.add_edges_from(aug_edges) + try: + aug_k = nx.edge_connectivity(G_aug) + except nx.NetworkXPointlessConcept: + aug_k = 0 + info["aug_k"] = aug_k + + # Do checks + if not infeasible and orig_k < k: + assert info["aug_k"] >= k, f"connectivity should increase to k={k} or more" + + assert info["aug_k"] >= orig_k, "augmenting should never reduce connectivity" + + _assert_solution_properties(G, aug_edges, avail_dict) + + except Exception: + info["failed"] = True + print(f"edges = {list(G.edges())}") + print(f"nodes = {list(G.nodes())}") + print(f"aug_edges = {list(aug_edges)}") + print(f"info = {info}") + raise + else: + if verbose: + print(f"info = {info}") + + if infeasible: + aug_edges = None + return aug_edges, info + + +def _check_augmentations(G, avail=None, max_k=None, weight=None, verbose=False): + """Helper to check weighted/unweighted cases with multiple values of k""" + # Using all available edges, find the maximum edge-connectivity + try: + orig_k = nx.edge_connectivity(G) + except nx.NetworkXPointlessConcept: + orig_k = 0 + + if avail is not None: + all_aug_edges = _unpack_available_edges(avail, weight=weight)[0] + G_aug_all = G.copy() + G_aug_all.add_edges_from(all_aug_edges) + try: + max_aug_k = nx.edge_connectivity(G_aug_all) + except nx.NetworkXPointlessConcept: + max_aug_k = 0 + else: + max_aug_k = G.number_of_nodes() - 1 + + if max_k is None: + max_k = min(4, max_aug_k) + + avail_uniform = dict.fromkeys(complement_edges(G), 1) + + if verbose: + print("\n=== CHECK_AUGMENTATION ===") + print(f"G.number_of_nodes = {G.number_of_nodes()!r}") + print(f"G.number_of_edges = {G.number_of_edges()!r}") + print(f"max_k = {max_k!r}") + print(f"max_aug_k = {max_aug_k!r}") + print(f"orig_k = {orig_k!r}") + + # check augmentation for multiple values of k + for k in range(1, max_k + 1): + if verbose: + print("---------------") + print(f"Checking k = {k}") + + # Check the unweighted version + if verbose: + print("unweighted case") + aug_edges1, info1 = _augment_and_check(G, k=k, verbose=verbose, orig_k=orig_k) + + # Check that the weighted version with all available edges and uniform + # weights gives a similar solution to the unweighted case. + if verbose: + print("weighted uniform case") + aug_edges2, info2 = _augment_and_check( + G, + k=k, + avail=avail_uniform, + verbose=verbose, + orig_k=orig_k, + max_aug_k=G.number_of_nodes() - 1, + ) + + # Check the weighted version + if avail is not None: + if verbose: + print("weighted case") + aug_edges3, info3 = _augment_and_check( + G, + k=k, + avail=avail, + weight=weight, + verbose=verbose, + max_aug_k=max_aug_k, + orig_k=orig_k, + ) + + if aug_edges1 is not None: + # Check approximation ratios + if k == 1: + # when k=1, both solutions should be optimal + assert info2["total_weight"] == info1["total_weight"] + if k == 2: + # when k=2, the weighted version is an approximation + if orig_k == 0: + # the approximation ratio is 3 if G is not connected + assert info2["total_weight"] <= info1["total_weight"] * 3 + else: + # the approximation ratio is 2 if G is was connected + assert info2["total_weight"] <= info1["total_weight"] * 2 + _check_unconstrained_bridge_property(G, info1) + + +def _check_unconstrained_bridge_property(G, info1): + # Check Theorem 5 from Eswaran and Tarjan. (1975) Augmentation problems + import math + + bridge_ccs = list(nx.connectivity.bridge_components(G)) + # condense G into an forest C + C = collapse(G, bridge_ccs) + + p = len([n for n, d in C.degree() if d == 1]) # leafs + q = len([n for n, d in C.degree() if d == 0]) # isolated + if p + q > 1: + size_target = math.ceil(p / 2) + q + size_aug = info1["num_edges"] + assert size_aug == size_target, ( + "augmentation size is different from what theory predicts" + ) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/tests/test_edge_kcomponents.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/tests/test_edge_kcomponents.py new file mode 100644 index 0000000..f14ed64 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/tests/test_edge_kcomponents.py @@ -0,0 +1,488 @@ +import itertools as it + +import pytest + +import networkx as nx +from networkx.algorithms.connectivity import EdgeComponentAuxGraph, bridge_components +from networkx.algorithms.connectivity.edge_kcomponents import general_k_edge_subgraphs +from networkx.utils import pairwise + +# ---------------- +# Helper functions +# ---------------- + + +def fset(list_of_sets): + """allows == to be used for list of sets""" + return set(map(frozenset, list_of_sets)) + + +def _assert_subgraph_edge_connectivity(G, ccs_subgraph, k): + """ + tests properties of k-edge-connected subgraphs + + the actual edge connectivity should be no less than k unless the cc is a + single node. + """ + for cc in ccs_subgraph: + C = G.subgraph(cc) + if len(cc) > 1: + connectivity = nx.edge_connectivity(C) + assert connectivity >= k + + +def _memo_connectivity(G, u, v, memo): + edge = (u, v) + if edge in memo: + return memo[edge] + if not G.is_directed(): + redge = (v, u) + if redge in memo: + return memo[redge] + memo[edge] = nx.edge_connectivity(G, *edge) + return memo[edge] + + +def _all_pairs_connectivity(G, cc, k, memo): + # Brute force check + for u, v in it.combinations(cc, 2): + # Use a memoization dict to save on computation + connectivity = _memo_connectivity(G, u, v, memo) + if G.is_directed(): + connectivity = min(connectivity, _memo_connectivity(G, v, u, memo)) + assert connectivity >= k + + +def _assert_local_cc_edge_connectivity(G, ccs_local, k, memo): + """ + tests properties of k-edge-connected components + + the local edge connectivity between each pair of nodes in the original + graph should be no less than k unless the cc is a single node. + """ + for cc in ccs_local: + if len(cc) > 1: + # Strategy for testing a bit faster: If the subgraph has high edge + # connectivity then it must have local connectivity + C = G.subgraph(cc) + connectivity = nx.edge_connectivity(C) + if connectivity < k: + # Otherwise do the brute force (with memoization) check + _all_pairs_connectivity(G, cc, k, memo) + + +# Helper function +def _check_edge_connectivity(G): + """ + Helper - generates all k-edge-components using the aux graph. Checks the + both local and subgraph edge connectivity of each cc. Also checks that + alternate methods of computing the k-edge-ccs generate the same result. + """ + # Construct the auxiliary graph that can be used to make each k-cc or k-sub + aux_graph = EdgeComponentAuxGraph.construct(G) + + # memoize the local connectivity in this graph + memo = {} + + for k in it.count(1): + # Test "local" k-edge-components and k-edge-subgraphs + ccs_local = fset(aux_graph.k_edge_components(k)) + ccs_subgraph = fset(aux_graph.k_edge_subgraphs(k)) + + # Check connectivity properties that should be guaranteed by the + # algorithms. + _assert_local_cc_edge_connectivity(G, ccs_local, k, memo) + _assert_subgraph_edge_connectivity(G, ccs_subgraph, k) + + if k == 1 or k == 2 and not G.is_directed(): + assert ccs_local == ccs_subgraph, ( + "Subgraphs and components should be the same when k == 1 or (k == 2 and not G.directed())" + ) + + if G.is_directed(): + # Test special case methods are the same as the aux graph + if k == 1: + alt_sccs = fset(nx.strongly_connected_components(G)) + assert alt_sccs == ccs_local, "k=1 failed alt" + assert alt_sccs == ccs_subgraph, "k=1 failed alt" + else: + # Test special case methods are the same as the aux graph + if k == 1: + alt_ccs = fset(nx.connected_components(G)) + assert alt_ccs == ccs_local, "k=1 failed alt" + assert alt_ccs == ccs_subgraph, "k=1 failed alt" + elif k == 2: + alt_bridge_ccs = fset(bridge_components(G)) + assert alt_bridge_ccs == ccs_local, "k=2 failed alt" + assert alt_bridge_ccs == ccs_subgraph, "k=2 failed alt" + # if new methods for k == 3 or k == 4 are implemented add them here + + # Check the general subgraph method works by itself + alt_subgraph_ccs = fset( + [set(C.nodes()) for C in general_k_edge_subgraphs(G, k=k)] + ) + assert alt_subgraph_ccs == ccs_subgraph, "alt subgraph method failed" + + # Stop once k is larger than all special case methods + # and we cannot break down ccs any further. + if k > 2 and all(len(cc) == 1 for cc in ccs_local): + break + + +# ---------------- +# Misc tests +# ---------------- + + +def test_zero_k_exception(): + G = nx.Graph() + # functions that return generators error immediately + pytest.raises(ValueError, nx.k_edge_components, G, k=0) + pytest.raises(ValueError, nx.k_edge_subgraphs, G, k=0) + + # actual generators only error when you get the first item + aux_graph = EdgeComponentAuxGraph.construct(G) + pytest.raises(ValueError, list, aux_graph.k_edge_components(k=0)) + pytest.raises(ValueError, list, aux_graph.k_edge_subgraphs(k=0)) + + pytest.raises(ValueError, list, general_k_edge_subgraphs(G, k=0)) + + +def test_empty_input(): + G = nx.Graph() + assert [] == list(nx.k_edge_components(G, k=5)) + assert [] == list(nx.k_edge_subgraphs(G, k=5)) + + G = nx.DiGraph() + assert [] == list(nx.k_edge_components(G, k=5)) + assert [] == list(nx.k_edge_subgraphs(G, k=5)) + + +def test_not_implemented(): + G = nx.MultiGraph() + pytest.raises(nx.NetworkXNotImplemented, EdgeComponentAuxGraph.construct, G) + pytest.raises(nx.NetworkXNotImplemented, nx.k_edge_components, G, k=2) + pytest.raises(nx.NetworkXNotImplemented, nx.k_edge_subgraphs, G, k=2) + with pytest.raises(nx.NetworkXNotImplemented): + next(bridge_components(G)) + with pytest.raises(nx.NetworkXNotImplemented): + next(bridge_components(nx.DiGraph())) + + +def test_general_k_edge_subgraph_quick_return(): + # tests quick return optimization + G = nx.Graph() + G.add_node(0) + subgraphs = list(general_k_edge_subgraphs(G, k=1)) + assert len(subgraphs) == 1 + for subgraph in subgraphs: + assert subgraph.number_of_nodes() == 1 + + G.add_node(1) + subgraphs = list(general_k_edge_subgraphs(G, k=1)) + assert len(subgraphs) == 2 + for subgraph in subgraphs: + assert subgraph.number_of_nodes() == 1 + + +# ---------------- +# Undirected tests +# ---------------- + + +def test_random_gnp(): + # seeds = [1550709854, 1309423156, 4208992358, 2785630813, 1915069929] + seeds = [12, 13] + + for seed in seeds: + G = nx.gnp_random_graph(20, 0.2, seed=seed) + _check_edge_connectivity(G) + + +def test_configuration(): + # seeds = [2718183590, 2470619828, 1694705158, 3001036531, 2401251497] + seeds = [14, 15] + for seed in seeds: + deg_seq = nx.random_powerlaw_tree_sequence(20, seed=seed, tries=5000) + G = nx.Graph(nx.configuration_model(deg_seq, seed=seed)) + G.remove_edges_from(nx.selfloop_edges(G)) + _check_edge_connectivity(G) + + +def test_shell(): + # seeds = [2057382236, 3331169846, 1840105863, 476020778, 2247498425] + seeds = [20] + for seed in seeds: + constructor = [(12, 70, 0.8), (15, 40, 0.6)] + G = nx.random_shell_graph(constructor, seed=seed) + _check_edge_connectivity(G) + + +def test_karate(): + G = nx.karate_club_graph() + _check_edge_connectivity(G) + + +def test_tarjan_bridge(): + # graph from tarjan paper + # RE Tarjan - "A note on finding the bridges of a graph" + # Information Processing Letters, 1974 - Elsevier + # doi:10.1016/0020-0190(74)90003-9. + # define 2-connected components and bridges + ccs = [ + (1, 2, 4, 3, 1, 4), + (5, 6, 7, 5), + (8, 9, 10, 8), + (17, 18, 16, 15, 17), + (11, 12, 14, 13, 11, 14), + ] + bridges = [(4, 8), (3, 5), (3, 17)] + G = nx.Graph(it.chain(*(pairwise(path) for path in ccs + bridges))) + _check_edge_connectivity(G) + + +def test_bridge_cc(): + # define 2-connected components and bridges + cc2 = [(1, 2, 4, 3, 1, 4), (8, 9, 10, 8), (11, 12, 13, 11)] + bridges = [(4, 8), (3, 5), (20, 21), (22, 23, 24)] + G = nx.Graph(it.chain(*(pairwise(path) for path in cc2 + bridges))) + bridge_ccs = fset(bridge_components(G)) + target_ccs = fset( + [{1, 2, 3, 4}, {5}, {8, 9, 10}, {11, 12, 13}, {20}, {21}, {22}, {23}, {24}] + ) + assert bridge_ccs == target_ccs + _check_edge_connectivity(G) + + +def test_undirected_aux_graph(): + # Graph similar to the one in + # http://journals.plos.org/plosone/article?id=10.1371/journal.pone.0136264 + a, b, c, d, e, f, g, h, i = "abcdefghi" + paths = [ + (a, d, b, f, c), + (a, e, b), + (a, e, b, c, g, b, a), + (c, b), + (f, g, f), + (h, i), + ] + G = nx.Graph(it.chain(*[pairwise(path) for path in paths])) + aux_graph = EdgeComponentAuxGraph.construct(G) + + components_1 = fset(aux_graph.k_edge_subgraphs(k=1)) + target_1 = fset([{a, b, c, d, e, f, g}, {h, i}]) + assert target_1 == components_1 + + # Check that the undirected case for k=1 agrees with CCs + alt_1 = fset(nx.k_edge_subgraphs(G, k=1)) + assert alt_1 == components_1 + + components_2 = fset(aux_graph.k_edge_subgraphs(k=2)) + target_2 = fset([{a, b, c, d, e, f, g}, {h}, {i}]) + assert target_2 == components_2 + + # Check that the undirected case for k=2 agrees with bridge components + alt_2 = fset(nx.k_edge_subgraphs(G, k=2)) + assert alt_2 == components_2 + + components_3 = fset(aux_graph.k_edge_subgraphs(k=3)) + target_3 = fset([{a}, {b, c, f, g}, {d}, {e}, {h}, {i}]) + assert target_3 == components_3 + + components_4 = fset(aux_graph.k_edge_subgraphs(k=4)) + target_4 = fset([{a}, {b}, {c}, {d}, {e}, {f}, {g}, {h}, {i}]) + assert target_4 == components_4 + + _check_edge_connectivity(G) + + +def test_local_subgraph_difference(): + paths = [ + (11, 12, 13, 14, 11, 13, 14, 12), # first 4-clique + (21, 22, 23, 24, 21, 23, 24, 22), # second 4-clique + # paths connecting each node of the 4 cliques + (11, 101, 21), + (12, 102, 22), + (13, 103, 23), + (14, 104, 24), + ] + G = nx.Graph(it.chain(*[pairwise(path) for path in paths])) + aux_graph = EdgeComponentAuxGraph.construct(G) + + # Each clique is returned separately in k-edge-subgraphs + subgraph_ccs = fset(aux_graph.k_edge_subgraphs(3)) + subgraph_target = fset( + [{101}, {102}, {103}, {104}, {21, 22, 23, 24}, {11, 12, 13, 14}] + ) + assert subgraph_ccs == subgraph_target + + # But in k-edge-ccs they are returned together + # because they are locally 3-edge-connected + local_ccs = fset(aux_graph.k_edge_components(3)) + local_target = fset([{101}, {102}, {103}, {104}, {11, 12, 13, 14, 21, 22, 23, 24}]) + assert local_ccs == local_target + + +def test_local_subgraph_difference_directed(): + dipaths = [(1, 2, 3, 4, 1), (1, 3, 1)] + G = nx.DiGraph(it.chain(*[pairwise(path) for path in dipaths])) + + assert fset(nx.k_edge_components(G, k=1)) == fset(nx.k_edge_subgraphs(G, k=1)) + + # Unlike undirected graphs, when k=2, for directed graphs there is a case + # where the k-edge-ccs are not the same as the k-edge-subgraphs. + # (in directed graphs ccs and subgraphs are the same when k=2) + assert fset(nx.k_edge_components(G, k=2)) != fset(nx.k_edge_subgraphs(G, k=2)) + + assert fset(nx.k_edge_components(G, k=3)) == fset(nx.k_edge_subgraphs(G, k=3)) + + _check_edge_connectivity(G) + + +def test_triangles(): + paths = [ + (11, 12, 13, 11), # first 3-clique + (21, 22, 23, 21), # second 3-clique + (11, 21), # connected by an edge + ] + G = nx.Graph(it.chain(*[pairwise(path) for path in paths])) + + # subgraph and ccs are the same in all cases here + assert fset(nx.k_edge_components(G, k=1)) == fset(nx.k_edge_subgraphs(G, k=1)) + + assert fset(nx.k_edge_components(G, k=2)) == fset(nx.k_edge_subgraphs(G, k=2)) + + assert fset(nx.k_edge_components(G, k=3)) == fset(nx.k_edge_subgraphs(G, k=3)) + + _check_edge_connectivity(G) + + +def test_four_clique(): + paths = [ + (11, 12, 13, 14, 11, 13, 14, 12), # first 4-clique + (21, 22, 23, 24, 21, 23, 24, 22), # second 4-clique + # paths connecting the 4 cliques such that they are + # 3-connected in G, but not in the subgraph. + # Case where the nodes bridging them do not have degree less than 3. + (100, 13), + (12, 100, 22), + (13, 200, 23), + (14, 300, 24), + ] + G = nx.Graph(it.chain(*[pairwise(path) for path in paths])) + + # The subgraphs and ccs are different for k=3 + local_ccs = fset(nx.k_edge_components(G, k=3)) + subgraphs = fset(nx.k_edge_subgraphs(G, k=3)) + assert local_ccs != subgraphs + + # The cliques ares in the same cc + clique1 = frozenset(paths[0]) + clique2 = frozenset(paths[1]) + assert clique1.union(clique2).union({100}) in local_ccs + + # but different subgraphs + assert clique1 in subgraphs + assert clique2 in subgraphs + + assert G.degree(100) == 3 + + _check_edge_connectivity(G) + + +def test_five_clique(): + # Make a graph that can be disconnected less than 4 edges, but no node has + # degree less than 4. + G = nx.disjoint_union(nx.complete_graph(5), nx.complete_graph(5)) + paths = [ + # add aux-connections + (1, 100, 6), + (2, 100, 7), + (3, 200, 8), + (4, 200, 100), + ] + G.add_edges_from(it.chain(*[pairwise(path) for path in paths])) + assert min(dict(nx.degree(G)).values()) == 4 + + # For k=3 they are the same + assert fset(nx.k_edge_components(G, k=3)) == fset(nx.k_edge_subgraphs(G, k=3)) + + # For k=4 they are the different + # the aux nodes are in the same CC as clique 1 but no the same subgraph + assert fset(nx.k_edge_components(G, k=4)) != fset(nx.k_edge_subgraphs(G, k=4)) + + # For k=5 they are not the same + assert fset(nx.k_edge_components(G, k=5)) != fset(nx.k_edge_subgraphs(G, k=5)) + + # For k=6 they are the same + assert fset(nx.k_edge_components(G, k=6)) == fset(nx.k_edge_subgraphs(G, k=6)) + _check_edge_connectivity(G) + + +# ---------------- +# Undirected tests +# ---------------- + + +def test_directed_aux_graph(): + # Graph similar to the one in + # http://journals.plos.org/plosone/article?id=10.1371/journal.pone.0136264 + a, b, c, d, e, f, g, h, i = "abcdefghi" + dipaths = [ + (a, d, b, f, c), + (a, e, b), + (a, e, b, c, g, b, a), + (c, b), + (f, g, f), + (h, i), + ] + G = nx.DiGraph(it.chain(*[pairwise(path) for path in dipaths])) + aux_graph = EdgeComponentAuxGraph.construct(G) + + components_1 = fset(aux_graph.k_edge_subgraphs(k=1)) + target_1 = fset([{a, b, c, d, e, f, g}, {h}, {i}]) + assert target_1 == components_1 + + # Check that the directed case for k=1 agrees with SCCs + alt_1 = fset(nx.strongly_connected_components(G)) + assert alt_1 == components_1 + + components_2 = fset(aux_graph.k_edge_subgraphs(k=2)) + target_2 = fset([{i}, {e}, {d}, {b, c, f, g}, {h}, {a}]) + assert target_2 == components_2 + + components_3 = fset(aux_graph.k_edge_subgraphs(k=3)) + target_3 = fset([{a}, {b}, {c}, {d}, {e}, {f}, {g}, {h}, {i}]) + assert target_3 == components_3 + + +def test_random_gnp_directed(): + # seeds = [3894723670, 500186844, 267231174, 2181982262, 1116750056] + seeds = [21] + for seed in seeds: + G = nx.gnp_random_graph(20, 0.2, directed=True, seed=seed) + _check_edge_connectivity(G) + + +def test_configuration_directed(): + # seeds = [671221681, 2403749451, 124433910, 672335939, 1193127215] + seeds = [67] + for seed in seeds: + deg_seq = nx.random_powerlaw_tree_sequence(20, seed=seed, tries=5000) + G = nx.DiGraph(nx.configuration_model(deg_seq, seed=seed)) + G.remove_edges_from(nx.selfloop_edges(G)) + _check_edge_connectivity(G) + + +def test_shell_directed(): + # seeds = [3134027055, 4079264063, 1350769518, 1405643020, 530038094] + seeds = [31] + for seed in seeds: + constructor = [(12, 70, 0.8), (15, 40, 0.6)] + G = nx.random_shell_graph(constructor, seed=seed).to_directed() + _check_edge_connectivity(G) + + +def test_karate_directed(): + G = nx.karate_club_graph().to_directed() + _check_edge_connectivity(G) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/tests/test_kcomponents.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/tests/test_kcomponents.py new file mode 100644 index 0000000..f4436ac --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/tests/test_kcomponents.py @@ -0,0 +1,296 @@ +# Test for Moody and White k-components algorithm +import pytest + +import networkx as nx +from networkx.algorithms.connectivity.kcomponents import ( + _consolidate, + build_k_number_dict, +) + +## +# A nice synthetic graph +## + + +def torrents_and_ferraro_graph(): + # Graph from https://arxiv.org/pdf/1503.04476v1 p.26 + G = nx.convert_node_labels_to_integers( + nx.grid_graph([5, 5]), label_attribute="labels" + ) + rlabels = nx.get_node_attributes(G, "labels") + labels = {v: k for k, v in rlabels.items()} + + for nodes in [(labels[(0, 4)], labels[(1, 4)]), (labels[(3, 4)], labels[(4, 4)])]: + new_node = G.order() + 1 + # Petersen graph is triconnected + P = nx.petersen_graph() + G = nx.disjoint_union(G, P) + # Add two edges between the grid and P + G.add_edge(new_node + 1, nodes[0]) + G.add_edge(new_node, nodes[1]) + # K5 is 4-connected + K = nx.complete_graph(5) + G = nx.disjoint_union(G, K) + # Add three edges between P and K5 + G.add_edge(new_node + 2, new_node + 11) + G.add_edge(new_node + 3, new_node + 12) + G.add_edge(new_node + 4, new_node + 13) + # Add another K5 sharing a node + G = nx.disjoint_union(G, K) + nbrs = G[new_node + 10] + G.remove_node(new_node + 10) + for nbr in nbrs: + G.add_edge(new_node + 17, nbr) + # This edge makes the graph biconnected; it's + # needed because K5s share only one node. + G.add_edge(new_node + 16, new_node + 8) + + for nodes in [(labels[(0, 0)], labels[(1, 0)]), (labels[(3, 0)], labels[(4, 0)])]: + new_node = G.order() + 1 + # Petersen graph is triconnected + P = nx.petersen_graph() + G = nx.disjoint_union(G, P) + # Add two edges between the grid and P + G.add_edge(new_node + 1, nodes[0]) + G.add_edge(new_node, nodes[1]) + # K5 is 4-connected + K = nx.complete_graph(5) + G = nx.disjoint_union(G, K) + # Add three edges between P and K5 + G.add_edge(new_node + 2, new_node + 11) + G.add_edge(new_node + 3, new_node + 12) + G.add_edge(new_node + 4, new_node + 13) + # Add another K5 sharing two nodes + G = nx.disjoint_union(G, K) + nbrs = G[new_node + 10] + G.remove_node(new_node + 10) + for nbr in nbrs: + G.add_edge(new_node + 17, nbr) + nbrs2 = G[new_node + 9] + G.remove_node(new_node + 9) + for nbr in nbrs2: + G.add_edge(new_node + 18, nbr) + return G + + +def test_directed(): + with pytest.raises(nx.NetworkXNotImplemented): + G = nx.gnp_random_graph(10, 0.2, directed=True, seed=42) + nx.k_components(G) + + +# Helper function +def _check_connectivity(G, k_components): + for k, components in k_components.items(): + if k < 3: + continue + # check that k-components have node connectivity >= k. + for component in components: + C = G.subgraph(component) + K = nx.node_connectivity(C) + assert K >= k + + +@pytest.mark.slow +def test_torrents_and_ferraro_graph(): + G = torrents_and_ferraro_graph() + result = nx.k_components(G) + _check_connectivity(G, result) + + # In this example graph there are 8 3-components, 4 with 15 nodes + # and 4 with 5 nodes. + assert len(result[3]) == 8 + assert len([c for c in result[3] if len(c) == 15]) == 4 + assert len([c for c in result[3] if len(c) == 5]) == 4 + # There are also 8 4-components all with 5 nodes. + assert len(result[4]) == 8 + assert all(len(c) == 5 for c in result[4]) + + +@pytest.mark.slow +def test_random_gnp(): + G = nx.gnp_random_graph(50, 0.2, seed=42) + result = nx.k_components(G) + _check_connectivity(G, result) + + +@pytest.mark.slow +def test_shell(): + constructor = [(20, 80, 0.8), (80, 180, 0.6)] + G = nx.random_shell_graph(constructor, seed=42) + result = nx.k_components(G) + _check_connectivity(G, result) + + +def test_configuration(): + deg_seq = nx.random_powerlaw_tree_sequence(100, tries=5, seed=72) + G = nx.Graph(nx.configuration_model(deg_seq)) + G.remove_edges_from(nx.selfloop_edges(G)) + result = nx.k_components(G) + _check_connectivity(G, result) + + +def test_karate(): + G = nx.karate_club_graph() + result = nx.k_components(G) + _check_connectivity(G, result) + + +def test_karate_component_number(): + karate_k_num = { + 0: 4, + 1: 4, + 2: 4, + 3: 4, + 4: 3, + 5: 3, + 6: 3, + 7: 4, + 8: 4, + 9: 2, + 10: 3, + 11: 1, + 12: 2, + 13: 4, + 14: 2, + 15: 2, + 16: 2, + 17: 2, + 18: 2, + 19: 3, + 20: 2, + 21: 2, + 22: 2, + 23: 3, + 24: 3, + 25: 3, + 26: 2, + 27: 3, + 28: 3, + 29: 3, + 30: 4, + 31: 3, + 32: 4, + 33: 4, + } + G = nx.karate_club_graph() + k_components = nx.k_components(G) + k_num = build_k_number_dict(k_components) + assert karate_k_num == k_num + + +def test_davis_southern_women(): + G = nx.davis_southern_women_graph() + result = nx.k_components(G) + _check_connectivity(G, result) + + +def test_davis_southern_women_detail_3_and_4(): + solution = { + 3: [ + { + "Nora Fayette", + "E10", + "Myra Liddel", + "E12", + "E14", + "Frances Anderson", + "Evelyn Jefferson", + "Ruth DeSand", + "Helen Lloyd", + "Eleanor Nye", + "E9", + "E8", + "E5", + "E4", + "E7", + "E6", + "E1", + "Verne Sanderson", + "E3", + "E2", + "Theresa Anderson", + "Pearl Oglethorpe", + "Katherina Rogers", + "Brenda Rogers", + "E13", + "Charlotte McDowd", + "Sylvia Avondale", + "Laura Mandeville", + } + ], + 4: [ + { + "Nora Fayette", + "E10", + "Verne Sanderson", + "E12", + "Frances Anderson", + "Evelyn Jefferson", + "Ruth DeSand", + "Helen Lloyd", + "Eleanor Nye", + "E9", + "E8", + "E5", + "E4", + "E7", + "E6", + "Myra Liddel", + "E3", + "Theresa Anderson", + "Katherina Rogers", + "Brenda Rogers", + "Charlotte McDowd", + "Sylvia Avondale", + "Laura Mandeville", + } + ], + } + G = nx.davis_southern_women_graph() + result = nx.k_components(G) + for k, components in result.items(): + if k < 3: + continue + assert len(components) == len(solution[k]) + for component in components: + assert component in solution[k] + + +def test_set_consolidation_rosettacode(): + # Tests from http://rosettacode.org/wiki/Set_consolidation + def list_of_sets_equal(result, solution): + assert {frozenset(s) for s in result} == {frozenset(s) for s in solution} + + question = [{"A", "B"}, {"C", "D"}] + solution = [{"A", "B"}, {"C", "D"}] + list_of_sets_equal(_consolidate(question, 1), solution) + question = [{"A", "B"}, {"B", "C"}] + solution = [{"A", "B", "C"}] + list_of_sets_equal(_consolidate(question, 1), solution) + question = [{"A", "B"}, {"C", "D"}, {"D", "B"}] + solution = [{"A", "C", "B", "D"}] + list_of_sets_equal(_consolidate(question, 1), solution) + question = [{"H", "I", "K"}, {"A", "B"}, {"C", "D"}, {"D", "B"}, {"F", "G", "H"}] + solution = [{"A", "C", "B", "D"}, {"G", "F", "I", "H", "K"}] + list_of_sets_equal(_consolidate(question, 1), solution) + question = [ + {"A", "H"}, + {"H", "I", "K"}, + {"A", "B"}, + {"C", "D"}, + {"D", "B"}, + {"F", "G", "H"}, + ] + solution = [{"A", "C", "B", "D", "G", "F", "I", "H", "K"}] + list_of_sets_equal(_consolidate(question, 1), solution) + question = [ + {"H", "I", "K"}, + {"A", "B"}, + {"C", "D"}, + {"D", "B"}, + {"F", "G", "H"}, + {"A", "H"}, + ] + solution = [{"A", "C", "B", "D", "G", "F", "I", "H", "K"}] + list_of_sets_equal(_consolidate(question, 1), solution) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/tests/test_kcutsets.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/tests/test_kcutsets.py new file mode 100644 index 0000000..67c0bfd --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/tests/test_kcutsets.py @@ -0,0 +1,270 @@ +# Jordi Torrents +# Test for k-cutsets +import itertools + +import pytest + +import networkx as nx +from networkx.algorithms import flow +from networkx.algorithms.connectivity.kcutsets import _is_separating_set + +MAX_CUTSETS_TO_TEST = 4 # originally 100. cut to decrease testing time + +flow_funcs = [ + flow.boykov_kolmogorov, + flow.dinitz, + flow.edmonds_karp, + flow.preflow_push, + flow.shortest_augmenting_path, +] + + +## +# Some nice synthetic graphs +## +def graph_example_1(): + G = nx.convert_node_labels_to_integers( + nx.grid_graph([5, 5]), label_attribute="labels" + ) + rlabels = nx.get_node_attributes(G, "labels") + labels = {v: k for k, v in rlabels.items()} + + for nodes in [ + (labels[(0, 0)], labels[(1, 0)]), + (labels[(0, 4)], labels[(1, 4)]), + (labels[(3, 0)], labels[(4, 0)]), + (labels[(3, 4)], labels[(4, 4)]), + ]: + new_node = G.order() + 1 + # Petersen graph is triconnected + P = nx.petersen_graph() + G = nx.disjoint_union(G, P) + # Add two edges between the grid and P + G.add_edge(new_node + 1, nodes[0]) + G.add_edge(new_node, nodes[1]) + # K5 is 4-connected + K = nx.complete_graph(5) + G = nx.disjoint_union(G, K) + # Add three edges between P and K5 + G.add_edge(new_node + 2, new_node + 11) + G.add_edge(new_node + 3, new_node + 12) + G.add_edge(new_node + 4, new_node + 13) + # Add another K5 sharing a node + G = nx.disjoint_union(G, K) + nbrs = G[new_node + 10] + G.remove_node(new_node + 10) + for nbr in nbrs: + G.add_edge(new_node + 17, nbr) + G.add_edge(new_node + 16, new_node + 5) + return G + + +def torrents_and_ferraro_graph(): + G = nx.convert_node_labels_to_integers( + nx.grid_graph([5, 5]), label_attribute="labels" + ) + rlabels = nx.get_node_attributes(G, "labels") + labels = {v: k for k, v in rlabels.items()} + + for nodes in [(labels[(0, 4)], labels[(1, 4)]), (labels[(3, 4)], labels[(4, 4)])]: + new_node = G.order() + 1 + # Petersen graph is triconnected + P = nx.petersen_graph() + G = nx.disjoint_union(G, P) + # Add two edges between the grid and P + G.add_edge(new_node + 1, nodes[0]) + G.add_edge(new_node, nodes[1]) + # K5 is 4-connected + K = nx.complete_graph(5) + G = nx.disjoint_union(G, K) + # Add three edges between P and K5 + G.add_edge(new_node + 2, new_node + 11) + G.add_edge(new_node + 3, new_node + 12) + G.add_edge(new_node + 4, new_node + 13) + # Add another K5 sharing a node + G = nx.disjoint_union(G, K) + nbrs = G[new_node + 10] + G.remove_node(new_node + 10) + for nbr in nbrs: + G.add_edge(new_node + 17, nbr) + # Commenting this makes the graph not biconnected !! + # This stupid mistake make one reviewer very angry :P + G.add_edge(new_node + 16, new_node + 8) + + for nodes in [(labels[(0, 0)], labels[(1, 0)]), (labels[(3, 0)], labels[(4, 0)])]: + new_node = G.order() + 1 + # Petersen graph is triconnected + P = nx.petersen_graph() + G = nx.disjoint_union(G, P) + # Add two edges between the grid and P + G.add_edge(new_node + 1, nodes[0]) + G.add_edge(new_node, nodes[1]) + # K5 is 4-connected + K = nx.complete_graph(5) + G = nx.disjoint_union(G, K) + # Add three edges between P and K5 + G.add_edge(new_node + 2, new_node + 11) + G.add_edge(new_node + 3, new_node + 12) + G.add_edge(new_node + 4, new_node + 13) + # Add another K5 sharing two nodes + G = nx.disjoint_union(G, K) + nbrs = G[new_node + 10] + G.remove_node(new_node + 10) + for nbr in nbrs: + G.add_edge(new_node + 17, nbr) + nbrs2 = G[new_node + 9] + G.remove_node(new_node + 9) + for nbr in nbrs2: + G.add_edge(new_node + 18, nbr) + return G + + +# Helper function +def _check_separating_sets(G): + for cc in nx.connected_components(G): + if len(cc) < 3: + continue + Gc = G.subgraph(cc) + node_conn = nx.node_connectivity(Gc) + all_cuts = nx.all_node_cuts(Gc) + # Only test a limited number of cut sets to reduce test time. + for cut in itertools.islice(all_cuts, MAX_CUTSETS_TO_TEST): + assert node_conn == len(cut) + assert not nx.is_connected(nx.restricted_view(G, cut, [])) + + +@pytest.mark.slow +def test_torrents_and_ferraro_graph(): + G = torrents_and_ferraro_graph() + _check_separating_sets(G) + + +def test_example_1(): + G = graph_example_1() + _check_separating_sets(G) + + +def test_random_gnp(): + G = nx.gnp_random_graph(100, 0.1, seed=42) + _check_separating_sets(G) + + +def test_shell(): + constructor = [(20, 80, 0.8), (80, 180, 0.6)] + G = nx.random_shell_graph(constructor, seed=42) + _check_separating_sets(G) + + +def test_configuration(): + deg_seq = nx.random_powerlaw_tree_sequence(100, tries=5, seed=72) + G = nx.Graph(nx.configuration_model(deg_seq)) + G.remove_edges_from(nx.selfloop_edges(G)) + _check_separating_sets(G) + + +def test_karate(): + G = nx.karate_club_graph() + _check_separating_sets(G) + + +def _generate_no_biconnected(max_attempts=50): + attempts = 0 + while True: + G = nx.fast_gnp_random_graph(100, 0.0575, seed=42) + if nx.is_connected(G) and not nx.is_biconnected(G): + attempts = 0 + yield G + else: + if attempts >= max_attempts: + msg = f"Tried {attempts} times: no suitable Graph." + raise Exception(msg) + else: + attempts += 1 + + +def test_articulation_points(): + Ggen = _generate_no_biconnected() + for i in range(1): # change 1 to 3 or more for more realizations. + G = next(Ggen) + articulation_points = [{a} for a in nx.articulation_points(G)] + for cut in nx.all_node_cuts(G): + assert cut in articulation_points + + +def test_grid_2d_graph(): + # All minimum node cuts of a 2d grid + # are the four pairs of nodes that are + # neighbors of the four corner nodes. + G = nx.grid_2d_graph(5, 5) + solution = [{(0, 1), (1, 0)}, {(3, 0), (4, 1)}, {(3, 4), (4, 3)}, {(0, 3), (1, 4)}] + for cut in nx.all_node_cuts(G): + assert cut in solution + + +def test_disconnected_graph(): + G = nx.fast_gnp_random_graph(100, 0.01, seed=42) + cuts = nx.all_node_cuts(G) + pytest.raises(nx.NetworkXError, next, cuts) + + +@pytest.mark.slow +def test_alternative_flow_functions(): + graphs = [nx.grid_2d_graph(4, 4), nx.cycle_graph(5)] + for G in graphs: + node_conn = nx.node_connectivity(G) + for flow_func in flow_funcs: + all_cuts = nx.all_node_cuts(G, flow_func=flow_func) + # Only test a limited number of cut sets to reduce test time. + for cut in itertools.islice(all_cuts, MAX_CUTSETS_TO_TEST): + assert node_conn == len(cut) + assert not nx.is_connected(nx.restricted_view(G, cut, [])) + + +def test_is_separating_set_complete_graph(): + G = nx.complete_graph(5) + assert _is_separating_set(G, {0, 1, 2, 3}) + + +def test_is_separating_set(): + for i in [5, 10, 15]: + G = nx.star_graph(i) + max_degree_node = max(G, key=G.degree) + assert _is_separating_set(G, {max_degree_node}) + + +def test_non_repeated_cuts(): + # The algorithm was repeating the cut {0, 1} for the giant biconnected + # component of the Karate club graph. + K = nx.karate_club_graph() + bcc = max(list(nx.biconnected_components(K)), key=len) + G = K.subgraph(bcc) + solution = [{32, 33}, {2, 33}, {0, 3}, {0, 1}, {29, 33}] + cuts = list(nx.all_node_cuts(G)) + assert len(solution) == len(cuts) + for cut in cuts: + assert cut in solution + + +def test_cycle_graph(): + G = nx.cycle_graph(5) + solution = [{0, 2}, {0, 3}, {1, 3}, {1, 4}, {2, 4}] + cuts = list(nx.all_node_cuts(G)) + assert len(solution) == len(cuts) + for cut in cuts: + assert cut in solution + + +def test_complete_graph(): + G = nx.complete_graph(5) + assert nx.node_connectivity(G) == 4 + assert list(nx.all_node_cuts(G)) == [] + + +def test_all_node_cuts_simple_case(): + G = nx.complete_graph(5) + G.remove_edges_from([(0, 1), (3, 4)]) + expected = [{0, 1, 2}, {2, 3, 4}] + actual = list(nx.all_node_cuts(G)) + assert len(actual) == len(expected) + for cut in actual: + assert cut in expected diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/tests/test_stoer_wagner.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/tests/test_stoer_wagner.py new file mode 100644 index 0000000..2b9e2ba --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/tests/test_stoer_wagner.py @@ -0,0 +1,102 @@ +from itertools import chain + +import pytest + +import networkx as nx + + +def _check_partition(G, cut_value, partition, weight): + assert isinstance(partition, tuple) + assert len(partition) == 2 + assert isinstance(partition[0], list) + assert isinstance(partition[1], list) + assert len(partition[0]) > 0 + assert len(partition[1]) > 0 + assert sum(map(len, partition)) == len(G) + assert set(chain.from_iterable(partition)) == set(G) + partition = tuple(map(set, partition)) + w = 0 + for u, v, e in G.edges(data=True): + if (u in partition[0]) == (v in partition[1]): + w += e.get(weight, 1) + assert w == cut_value + + +def _test_stoer_wagner(G, answer, weight="weight"): + cut_value, partition = nx.stoer_wagner(G, weight, heap=nx.utils.PairingHeap) + assert cut_value == answer + _check_partition(G, cut_value, partition, weight) + cut_value, partition = nx.stoer_wagner(G, weight, heap=nx.utils.BinaryHeap) + assert cut_value == answer + _check_partition(G, cut_value, partition, weight) + + +def test_graph1(): + G = nx.Graph() + G.add_edge("x", "a", weight=3) + G.add_edge("x", "b", weight=1) + G.add_edge("a", "c", weight=3) + G.add_edge("b", "c", weight=5) + G.add_edge("b", "d", weight=4) + G.add_edge("d", "e", weight=2) + G.add_edge("c", "y", weight=2) + G.add_edge("e", "y", weight=3) + _test_stoer_wagner(G, 4) + + +def test_graph2(): + G = nx.Graph() + G.add_edge("x", "a") + G.add_edge("x", "b") + G.add_edge("a", "c") + G.add_edge("b", "c") + G.add_edge("b", "d") + G.add_edge("d", "e") + G.add_edge("c", "y") + G.add_edge("e", "y") + _test_stoer_wagner(G, 2) + + +def test_graph3(): + # Source: + # Stoer, M. and Wagner, F. (1997). "A simple min-cut algorithm". Journal of + # the ACM 44 (4), 585-591. + G = nx.Graph() + G.add_edge(1, 2, weight=2) + G.add_edge(1, 5, weight=3) + G.add_edge(2, 3, weight=3) + G.add_edge(2, 5, weight=2) + G.add_edge(2, 6, weight=2) + G.add_edge(3, 4, weight=4) + G.add_edge(3, 7, weight=2) + G.add_edge(4, 7, weight=2) + G.add_edge(4, 8, weight=2) + G.add_edge(5, 6, weight=3) + G.add_edge(6, 7, weight=1) + G.add_edge(7, 8, weight=3) + _test_stoer_wagner(G, 4) + + +def test_weight_name(): + G = nx.Graph() + G.add_edge(1, 2, weight=1, cost=8) + G.add_edge(1, 3, cost=2) + G.add_edge(2, 3, cost=4) + _test_stoer_wagner(G, 6, weight="cost") + + +def test_exceptions(): + G = nx.Graph() + pytest.raises(nx.NetworkXError, nx.stoer_wagner, G) + G.add_node(1) + pytest.raises(nx.NetworkXError, nx.stoer_wagner, G) + G.add_node(2) + pytest.raises(nx.NetworkXError, nx.stoer_wagner, G) + G.add_edge(1, 2, weight=-2) + pytest.raises(nx.NetworkXError, nx.stoer_wagner, G) + G = nx.DiGraph() + pytest.raises(nx.NetworkXNotImplemented, nx.stoer_wagner, G) + G = nx.MultiGraph() + pytest.raises(nx.NetworkXNotImplemented, nx.stoer_wagner, G) + G = nx.MultiDiGraph() + pytest.raises(nx.NetworkXNotImplemented, nx.stoer_wagner, G) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/utils.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/utils.py new file mode 100644 index 0000000..7bf9994 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/connectivity/utils.py @@ -0,0 +1,88 @@ +""" +Utilities for connectivity package +""" + +import networkx as nx + +__all__ = ["build_auxiliary_node_connectivity", "build_auxiliary_edge_connectivity"] + + +@nx._dispatchable(returns_graph=True) +def build_auxiliary_node_connectivity(G): + r"""Creates a directed graph D from an undirected graph G to compute flow + based node connectivity. + + For an undirected graph G having `n` nodes and `m` edges we derive a + directed graph D with `2n` nodes and `2m+n` arcs by replacing each + original node `v` with two nodes `vA`, `vB` linked by an (internal) + arc in D. Then for each edge (`u`, `v`) in G we add two arcs (`uB`, `vA`) + and (`vB`, `uA`) in D. Finally we set the attribute capacity = 1 for each + arc in D [1]_. + + For a directed graph having `n` nodes and `m` arcs we derive a + directed graph D with `2n` nodes and `m+n` arcs by replacing each + original node `v` with two nodes `vA`, `vB` linked by an (internal) + arc (`vA`, `vB`) in D. Then for each arc (`u`, `v`) in G we add one + arc (`uB`, `vA`) in D. Finally we set the attribute capacity = 1 for + each arc in D. + + A dictionary with a mapping between nodes in the original graph and the + auxiliary digraph is stored as a graph attribute: D.graph['mapping']. + + References + ---------- + .. [1] Kammer, Frank and Hanjo Taubig. Graph Connectivity. in Brandes and + Erlebach, 'Network Analysis: Methodological Foundations', Lecture + Notes in Computer Science, Volume 3418, Springer-Verlag, 2005. + https://doi.org/10.1007/978-3-540-31955-9_7 + + """ + directed = G.is_directed() + + mapping = {} + H = nx.DiGraph() + + for i, node in enumerate(G): + mapping[node] = i + H.add_node(f"{i}A", id=node) + H.add_node(f"{i}B", id=node) + H.add_edge(f"{i}A", f"{i}B", capacity=1) + + edges = [] + for source, target in G.edges(): + edges.append((f"{mapping[source]}B", f"{mapping[target]}A")) + if not directed: + edges.append((f"{mapping[target]}B", f"{mapping[source]}A")) + H.add_edges_from(edges, capacity=1) + + # Store mapping as graph attribute + H.graph["mapping"] = mapping + return H + + +@nx._dispatchable(returns_graph=True) +def build_auxiliary_edge_connectivity(G): + """Auxiliary digraph for computing flow based edge connectivity + + If the input graph is undirected, we replace each edge (`u`,`v`) with + two reciprocal arcs (`u`, `v`) and (`v`, `u`) and then we set the attribute + 'capacity' for each arc to 1. If the input graph is directed we simply + add the 'capacity' attribute. Part of algorithm 1 in [1]_ . + + References + ---------- + .. [1] Abdol-Hossein Esfahanian. Connectivity Algorithms. (this is a + chapter, look for the reference of the book). + http://www.cse.msu.edu/~cse835/Papers/Graph_connectivity_revised.pdf + """ + if G.is_directed(): + H = nx.DiGraph() + H.add_nodes_from(G.nodes()) + H.add_edges_from(G.edges(), capacity=1) + return H + else: + H = nx.DiGraph() + H.add_nodes_from(G.nodes()) + for source, target in G.edges(): + H.add_edges_from([(source, target), (target, source)], capacity=1) + return H diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/core.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/core.py new file mode 100644 index 0000000..fec26ec --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/core.py @@ -0,0 +1,588 @@ +""" +Find the k-cores of a graph. + +The k-core is found by recursively pruning nodes with degrees less than k. + +See the following references for details: + +An O(m) Algorithm for Cores Decomposition of Networks +Vladimir Batagelj and Matjaz Zaversnik, 2003. +https://arxiv.org/abs/cs.DS/0310049 + +Generalized Cores +Vladimir Batagelj and Matjaz Zaversnik, 2002. +https://arxiv.org/pdf/cs/0202039 + +For directed graphs a more general notion is that of D-cores which +looks at (k, l) restrictions on (in, out) degree. The (k, k) D-core +is the k-core. + +D-cores: Measuring Collaboration of Directed Graphs Based on Degeneracy +Christos Giatsidis, Dimitrios M. Thilikos, Michalis Vazirgiannis, ICDM 2011. +http://www.graphdegeneracy.org/dcores_ICDM_2011.pdf + +Multi-scale structure and topological anomaly detection via a new network \ +statistic: The onion decomposition +L. Hébert-Dufresne, J. A. Grochow, and A. Allard +Scientific Reports 6, 31708 (2016) +http://doi.org/10.1038/srep31708 + +""" + +import networkx as nx + +__all__ = [ + "core_number", + "k_core", + "k_shell", + "k_crust", + "k_corona", + "k_truss", + "onion_layers", +] + + +@nx.utils.not_implemented_for("multigraph") +@nx._dispatchable +def core_number(G): + """Returns the core number for each node. + + A k-core is a maximal subgraph that contains nodes of degree k or more. + + The core number of a node is the largest value k of a k-core containing + that node. + + Parameters + ---------- + G : NetworkX graph + An undirected or directed graph + + Returns + ------- + core_number : dictionary + A dictionary keyed by node to the core number. + + Raises + ------ + NetworkXNotImplemented + If `G` is a multigraph or contains self loops. + + Notes + ----- + For directed graphs the node degree is defined to be the + in-degree + out-degree. + + Examples + -------- + >>> degrees = [0, 1, 2, 2, 2, 2, 3] + >>> H = nx.havel_hakimi_graph(degrees) + >>> nx.core_number(H) + {0: 1, 1: 2, 2: 2, 3: 2, 4: 1, 5: 2, 6: 0} + >>> G = nx.DiGraph() + >>> G.add_edges_from([(1, 2), (2, 1), (2, 3), (2, 4), (3, 4), (4, 3)]) + >>> nx.core_number(G) + {1: 2, 2: 2, 3: 2, 4: 2} + + References + ---------- + .. [1] An O(m) Algorithm for Cores Decomposition of Networks + Vladimir Batagelj and Matjaz Zaversnik, 2003. + https://arxiv.org/abs/cs.DS/0310049 + """ + if nx.number_of_selfloops(G) > 0: + msg = ( + "Input graph has self loops which is not permitted; " + "Consider using G.remove_edges_from(nx.selfloop_edges(G))." + ) + raise nx.NetworkXNotImplemented(msg) + degrees = dict(G.degree()) + # Sort nodes by degree. + nodes = sorted(degrees, key=degrees.get) + bin_boundaries = [0] + curr_degree = 0 + for i, v in enumerate(nodes): + if degrees[v] > curr_degree: + bin_boundaries.extend([i] * (degrees[v] - curr_degree)) + curr_degree = degrees[v] + node_pos = {v: pos for pos, v in enumerate(nodes)} + # The initial guess for the core number of a node is its degree. + core = degrees + nbrs = {v: list(nx.all_neighbors(G, v)) for v in G} + for v in nodes: + for u in nbrs[v]: + if core[u] > core[v]: + nbrs[u].remove(v) + pos = node_pos[u] + bin_start = bin_boundaries[core[u]] + node_pos[u] = bin_start + node_pos[nodes[bin_start]] = pos + nodes[bin_start], nodes[pos] = nodes[pos], nodes[bin_start] + bin_boundaries[core[u]] += 1 + core[u] -= 1 + return core + + +def _core_subgraph(G, k_filter, k=None, core=None): + """Returns the subgraph induced by nodes passing filter `k_filter`. + + Parameters + ---------- + G : NetworkX graph + The graph or directed graph to process + k_filter : filter function + This function filters the nodes chosen. It takes three inputs: + A node of G, the filter's cutoff, and the core dict of the graph. + The function should return a Boolean value. + k : int, optional + The order of the core. If not specified use the max core number. + This value is used as the cutoff for the filter. + core : dict, optional + Precomputed core numbers keyed by node for the graph `G`. + If not specified, the core numbers will be computed from `G`. + + """ + if core is None: + core = core_number(G) + if k is None: + k = max(core.values()) + nodes = (v for v in core if k_filter(v, k, core)) + return G.subgraph(nodes).copy() + + +@nx.utils.not_implemented_for("multigraph") +@nx._dispatchable(preserve_all_attrs=True, returns_graph=True) +def k_core(G, k=None, core_number=None): + """Returns the k-core of G. + + A k-core is a maximal subgraph that contains nodes of degree `k` or more. + + Parameters + ---------- + G : NetworkX graph + A graph or directed graph + k : int, optional + The order of the core. If not specified return the main core. + core_number : dictionary, optional + Precomputed core numbers for the graph G. + + Returns + ------- + G : NetworkX graph + The k-core subgraph + + Raises + ------ + NetworkXNotImplemented + The k-core is not defined for multigraphs or graphs with self loops. + + Notes + ----- + The main core is the core with `k` as the largest core_number. + + For directed graphs the node degree is defined to be the + in-degree + out-degree. + + Graph, node, and edge attributes are copied to the subgraph. + + Examples + -------- + >>> degrees = [0, 1, 2, 2, 2, 2, 3] + >>> H = nx.havel_hakimi_graph(degrees) + >>> H.degree + DegreeView({0: 1, 1: 2, 2: 2, 3: 2, 4: 2, 5: 3, 6: 0}) + >>> nx.k_core(H).nodes + NodeView((1, 2, 3, 5)) + + See Also + -------- + core_number + + References + ---------- + .. [1] An O(m) Algorithm for Cores Decomposition of Networks + Vladimir Batagelj and Matjaz Zaversnik, 2003. + https://arxiv.org/abs/cs.DS/0310049 + """ + + def k_filter(v, k, c): + return c[v] >= k + + return _core_subgraph(G, k_filter, k, core_number) + + +@nx.utils.not_implemented_for("multigraph") +@nx._dispatchable(preserve_all_attrs=True, returns_graph=True) +def k_shell(G, k=None, core_number=None): + """Returns the k-shell of G. + + The k-shell is the subgraph induced by nodes with core number k. + That is, nodes in the k-core that are not in the (k+1)-core. + + Parameters + ---------- + G : NetworkX graph + A graph or directed graph. + k : int, optional + The order of the shell. If not specified return the outer shell. + core_number : dictionary, optional + Precomputed core numbers for the graph G. + + + Returns + ------- + G : NetworkX graph + The k-shell subgraph + + Raises + ------ + NetworkXNotImplemented + The k-shell is not implemented for multigraphs or graphs with self loops. + + Notes + ----- + This is similar to k_corona but in that case only neighbors in the + k-core are considered. + + For directed graphs the node degree is defined to be the + in-degree + out-degree. + + Graph, node, and edge attributes are copied to the subgraph. + + Examples + -------- + >>> degrees = [0, 1, 2, 2, 2, 2, 3] + >>> H = nx.havel_hakimi_graph(degrees) + >>> H.degree + DegreeView({0: 1, 1: 2, 2: 2, 3: 2, 4: 2, 5: 3, 6: 0}) + >>> nx.k_shell(H, k=1).nodes + NodeView((0, 4)) + + See Also + -------- + core_number + k_corona + + + References + ---------- + .. [1] A model of Internet topology using k-shell decomposition + Shai Carmi, Shlomo Havlin, Scott Kirkpatrick, Yuval Shavitt, + and Eran Shir, PNAS July 3, 2007 vol. 104 no. 27 11150-11154 + http://www.pnas.org/content/104/27/11150.full + """ + + def k_filter(v, k, c): + return c[v] == k + + return _core_subgraph(G, k_filter, k, core_number) + + +@nx.utils.not_implemented_for("multigraph") +@nx._dispatchable(preserve_all_attrs=True, returns_graph=True) +def k_crust(G, k=None, core_number=None): + """Returns the k-crust of G. + + The k-crust is the graph G with the edges of the k-core removed + and isolated nodes found after the removal of edges are also removed. + + Parameters + ---------- + G : NetworkX graph + A graph or directed graph. + k : int, optional + The order of the shell. If not specified return the main crust. + core_number : dictionary, optional + Precomputed core numbers for the graph G. + + Returns + ------- + G : NetworkX graph + The k-crust subgraph + + Raises + ------ + NetworkXNotImplemented + The k-crust is not implemented for multigraphs or graphs with self loops. + + Notes + ----- + This definition of k-crust is different than the definition in [1]_. + The k-crust in [1]_ is equivalent to the k+1 crust of this algorithm. + + For directed graphs the node degree is defined to be the + in-degree + out-degree. + + Graph, node, and edge attributes are copied to the subgraph. + + Examples + -------- + >>> degrees = [0, 1, 2, 2, 2, 2, 3] + >>> H = nx.havel_hakimi_graph(degrees) + >>> H.degree + DegreeView({0: 1, 1: 2, 2: 2, 3: 2, 4: 2, 5: 3, 6: 0}) + >>> nx.k_crust(H, k=1).nodes + NodeView((0, 4, 6)) + + See Also + -------- + core_number + + References + ---------- + .. [1] A model of Internet topology using k-shell decomposition + Shai Carmi, Shlomo Havlin, Scott Kirkpatrick, Yuval Shavitt, + and Eran Shir, PNAS July 3, 2007 vol. 104 no. 27 11150-11154 + http://www.pnas.org/content/104/27/11150.full + """ + # Default for k is one less than in _core_subgraph, so just inline. + # Filter is c[v] <= k + if core_number is None: + core_number = nx.core_number(G) + if k is None: + k = max(core_number.values()) - 1 + nodes = (v for v in core_number if core_number[v] <= k) + return G.subgraph(nodes).copy() + + +@nx.utils.not_implemented_for("multigraph") +@nx._dispatchable(preserve_all_attrs=True, returns_graph=True) +def k_corona(G, k, core_number=None): + """Returns the k-corona of G. + + The k-corona is the subgraph of nodes in the k-core which have + exactly k neighbors in the k-core. + + Parameters + ---------- + G : NetworkX graph + A graph or directed graph + k : int + The order of the corona. + core_number : dictionary, optional + Precomputed core numbers for the graph G. + + Returns + ------- + G : NetworkX graph + The k-corona subgraph + + Raises + ------ + NetworkXNotImplemented + The k-corona is not defined for multigraphs or graphs with self loops. + + Notes + ----- + For directed graphs the node degree is defined to be the + in-degree + out-degree. + + Graph, node, and edge attributes are copied to the subgraph. + + Examples + -------- + >>> degrees = [0, 1, 2, 2, 2, 2, 3] + >>> H = nx.havel_hakimi_graph(degrees) + >>> H.degree + DegreeView({0: 1, 1: 2, 2: 2, 3: 2, 4: 2, 5: 3, 6: 0}) + >>> nx.k_corona(H, k=2).nodes + NodeView((1, 2, 3, 5)) + + See Also + -------- + core_number + + References + ---------- + .. [1] k -core (bootstrap) percolation on complex networks: + Critical phenomena and nonlocal effects, + A. V. Goltsev, S. N. Dorogovtsev, and J. F. F. Mendes, + Phys. Rev. E 73, 056101 (2006) + http://link.aps.org/doi/10.1103/PhysRevE.73.056101 + """ + + def func(v, k, c): + return c[v] == k and k == sum(1 for w in G[v] if c[w] >= k) + + return _core_subgraph(G, func, k, core_number) + + +@nx.utils.not_implemented_for("directed") +@nx.utils.not_implemented_for("multigraph") +@nx._dispatchable(preserve_all_attrs=True, returns_graph=True) +def k_truss(G, k): + """Returns the k-truss of `G`. + + The k-truss is the maximal induced subgraph of `G` which contains at least + three vertices where every edge is incident to at least `k-2` triangles. + + Parameters + ---------- + G : NetworkX graph + An undirected graph + k : int + The order of the truss + + Returns + ------- + H : NetworkX graph + The k-truss subgraph + + Raises + ------ + NetworkXNotImplemented + If `G` is a multigraph or directed graph or if it contains self loops. + + Notes + ----- + A k-clique is a (k-2)-truss and a k-truss is a (k+1)-core. + + Graph, node, and edge attributes are copied to the subgraph. + + K-trusses were originally defined in [2] which states that the k-truss + is the maximal induced subgraph where each edge belongs to at least + `k-2` triangles. A more recent paper, [1], uses a slightly different + definition requiring that each edge belong to at least `k` triangles. + This implementation uses the original definition of `k-2` triangles. + + Examples + -------- + >>> degrees = [0, 1, 2, 2, 2, 2, 3] + >>> H = nx.havel_hakimi_graph(degrees) + >>> H.degree + DegreeView({0: 1, 1: 2, 2: 2, 3: 2, 4: 2, 5: 3, 6: 0}) + >>> nx.k_truss(H, k=2).nodes + NodeView((0, 1, 2, 3, 4, 5)) + + References + ---------- + .. [1] Bounds and Algorithms for k-truss. Paul Burkhardt, Vance Faber, + David G. Harris, 2018. https://arxiv.org/abs/1806.05523v2 + .. [2] Trusses: Cohesive Subgraphs for Social Network Analysis. Jonathan + Cohen, 2005. + """ + if nx.number_of_selfloops(G) > 0: + msg = ( + "Input graph has self loops which is not permitted; " + "Consider using G.remove_edges_from(nx.selfloop_edges(G))." + ) + raise nx.NetworkXNotImplemented(msg) + + H = G.copy() + + n_dropped = 1 + while n_dropped > 0: + n_dropped = 0 + to_drop = [] + seen = set() + for u in H: + nbrs_u = set(H[u]) + seen.add(u) + new_nbrs = [v for v in nbrs_u if v not in seen] + for v in new_nbrs: + if len(nbrs_u & set(H[v])) < (k - 2): + to_drop.append((u, v)) + H.remove_edges_from(to_drop) + n_dropped = len(to_drop) + H.remove_nodes_from(list(nx.isolates(H))) + + return H + + +@nx.utils.not_implemented_for("multigraph") +@nx.utils.not_implemented_for("directed") +@nx._dispatchable +def onion_layers(G): + """Returns the layer of each vertex in an onion decomposition of the graph. + + The onion decomposition refines the k-core decomposition by providing + information on the internal organization of each k-shell. It is usually + used alongside the `core numbers`. + + Parameters + ---------- + G : NetworkX graph + An undirected graph without self loops. + + Returns + ------- + od_layers : dictionary + A dictionary keyed by node to the onion layer. The layers are + contiguous integers starting at 1. + + Raises + ------ + NetworkXNotImplemented + If `G` is a multigraph or directed graph or if it contains self loops. + + Examples + -------- + >>> degrees = [0, 1, 2, 2, 2, 2, 3] + >>> H = nx.havel_hakimi_graph(degrees) + >>> H.degree + DegreeView({0: 1, 1: 2, 2: 2, 3: 2, 4: 2, 5: 3, 6: 0}) + >>> nx.onion_layers(H) + {6: 1, 0: 2, 4: 3, 1: 4, 2: 4, 3: 4, 5: 4} + + See Also + -------- + core_number + + References + ---------- + .. [1] Multi-scale structure and topological anomaly detection via a new + network statistic: The onion decomposition + L. Hébert-Dufresne, J. A. Grochow, and A. Allard + Scientific Reports 6, 31708 (2016) + http://doi.org/10.1038/srep31708 + .. [2] Percolation and the effective structure of complex networks + A. Allard and L. Hébert-Dufresne + Physical Review X 9, 011023 (2019) + http://doi.org/10.1103/PhysRevX.9.011023 + """ + if nx.number_of_selfloops(G) > 0: + msg = ( + "Input graph contains self loops which is not permitted; " + "Consider using G.remove_edges_from(nx.selfloop_edges(G))." + ) + raise nx.NetworkXNotImplemented(msg) + # Dictionaries to register the k-core/onion decompositions. + od_layers = {} + # Adjacency list + neighbors = {v: list(nx.all_neighbors(G, v)) for v in G} + # Effective degree of nodes. + degrees = dict(G.degree()) + # Performs the onion decomposition. + current_core = 1 + current_layer = 1 + # Sets vertices of degree 0 to layer 1, if any. + isolated_nodes = list(nx.isolates(G)) + if len(isolated_nodes) > 0: + for v in isolated_nodes: + od_layers[v] = current_layer + degrees.pop(v) + current_layer = 2 + # Finds the layer for the remaining nodes. + while len(degrees) > 0: + # Sets the order for looking at nodes. + nodes = sorted(degrees, key=degrees.get) + # Sets properly the current core. + min_degree = degrees[nodes[0]] + if min_degree > current_core: + current_core = min_degree + # Identifies vertices in the current layer. + this_layer = [] + for n in nodes: + if degrees[n] > current_core: + break + this_layer.append(n) + # Identifies the core/layer of the vertices in the current layer. + for v in this_layer: + od_layers[v] = current_layer + for n in neighbors[v]: + neighbors[n].remove(v) + degrees[n] = degrees[n] - 1 + degrees.pop(v) + # Updates the layer count. + current_layer = current_layer + 1 + # Returns the dictionaries containing the onion layer of each vertices. + return od_layers diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/covering.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/covering.py new file mode 100644 index 0000000..cdf607b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/covering.py @@ -0,0 +1,142 @@ +"""Functions related to graph covers.""" + +from functools import partial +from itertools import chain + +import networkx as nx +from networkx.utils import arbitrary_element, not_implemented_for + +__all__ = ["min_edge_cover", "is_edge_cover"] + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatchable +def min_edge_cover(G, matching_algorithm=None): + """Returns the min cardinality edge cover of the graph as a set of edges. + + A smallest edge cover can be found in polynomial time by finding + a maximum matching and extending it greedily so that all nodes + are covered. This function follows that process. A maximum matching + algorithm can be specified for the first step of the algorithm. + The resulting set may return a set with one 2-tuple for each edge, + (the usual case) or with both 2-tuples `(u, v)` and `(v, u)` for + each edge. The latter is only done when a bipartite matching algorithm + is specified as `matching_algorithm`. + + Parameters + ---------- + G : NetworkX graph + An undirected graph. + + matching_algorithm : function + A function that returns a maximum cardinality matching for `G`. + The function must take one input, the graph `G`, and return + either a set of edges (with only one direction for the pair of nodes) + or a dictionary mapping each node to its mate. If not specified, + :func:`~networkx.algorithms.matching.max_weight_matching` is used. + Common bipartite matching functions include + :func:`~networkx.algorithms.bipartite.matching.hopcroft_karp_matching` + or + :func:`~networkx.algorithms.bipartite.matching.eppstein_matching`. + + Returns + ------- + min_cover : set + + A set of the edges in a minimum edge cover in the form of tuples. + It contains only one of the equivalent 2-tuples `(u, v)` and `(v, u)` + for each edge. If a bipartite method is used to compute the matching, + the returned set contains both the 2-tuples `(u, v)` and `(v, u)` + for each edge of a minimum edge cover. + + Examples + -------- + >>> G = nx.Graph([(0, 1), (0, 2), (0, 3), (1, 2), (1, 3)]) + >>> sorted(nx.min_edge_cover(G)) + [(2, 1), (3, 0)] + + Notes + ----- + An edge cover of a graph is a set of edges such that every node of + the graph is incident to at least one edge of the set. + The minimum edge cover is an edge covering of smallest cardinality. + + Due to its implementation, the worst-case running time of this algorithm + is bounded by the worst-case running time of the function + ``matching_algorithm``. + + Minimum edge cover for `G` can also be found using + :func:`~networkx.algorithms.bipartite.covering.min_edge_covering` which is + simply this function with a default matching algorithm of + :func:`~networkx.algorithms.bipartite.matching.hopcroft_karp_matching` + """ + if len(G) == 0: + return set() + if nx.number_of_isolates(G) > 0: + # ``min_cover`` does not exist as there is an isolated node + raise nx.NetworkXException( + "Graph has a node with no edge incident on it, so no edge cover exists." + ) + if matching_algorithm is None: + matching_algorithm = partial(nx.max_weight_matching, maxcardinality=True) + maximum_matching = matching_algorithm(G) + # ``min_cover`` is superset of ``maximum_matching`` + try: + # bipartite matching algs return dict so convert if needed + min_cover = set(maximum_matching.items()) + bipartite_cover = True + except AttributeError: + min_cover = maximum_matching + bipartite_cover = False + # iterate for uncovered nodes + uncovered_nodes = set(G) - {v for u, v in min_cover} - {u for u, v in min_cover} + for v in uncovered_nodes: + # Since `v` is uncovered, each edge incident to `v` will join it + # with a covered node (otherwise, if there were an edge joining + # uncovered nodes `u` and `v`, the maximum matching algorithm + # would have found it), so we can choose an arbitrary edge + # incident to `v`. (This applies only in a simple graph, not a + # multigraph.) + u = arbitrary_element(G[v]) + min_cover.add((u, v)) + if bipartite_cover: + min_cover.add((v, u)) + return min_cover + + +@not_implemented_for("directed") +@nx._dispatchable +def is_edge_cover(G, cover): + """Decides whether a set of edges is a valid edge cover of the graph. + + Given a set of edges, whether it is an edge covering can + be decided if we just check whether all nodes of the graph + has an edge from the set, incident on it. + + Parameters + ---------- + G : NetworkX graph + An undirected bipartite graph. + + cover : set + Set of edges to be checked. + + Returns + ------- + bool + Whether the set of edges is a valid edge cover of the graph. + + Examples + -------- + >>> G = nx.Graph([(0, 1), (0, 2), (0, 3), (1, 2), (1, 3)]) + >>> cover = {(2, 1), (3, 0)} + >>> nx.is_edge_cover(G, cover) + True + + Notes + ----- + An edge cover of a graph is a set of edges such that every node of + the graph is incident to at least one edge of the set. + """ + return set(G) <= set(chain.from_iterable(cover)) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/cuts.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/cuts.py new file mode 100644 index 0000000..e951431 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/cuts.py @@ -0,0 +1,398 @@ +"""Functions for finding and evaluating cuts in a graph.""" + +from itertools import chain + +import networkx as nx + +__all__ = [ + "boundary_expansion", + "conductance", + "cut_size", + "edge_expansion", + "mixing_expansion", + "node_expansion", + "normalized_cut_size", + "volume", +] + + +# TODO STILL NEED TO UPDATE ALL THE DOCUMENTATION! + + +@nx._dispatchable(edge_attrs="weight") +def cut_size(G, S, T=None, weight=None): + """Returns the size of the cut between two sets of nodes. + + A *cut* is a partition of the nodes of a graph into two sets. The + *cut size* is the sum of the weights of the edges "between" the two + sets of nodes. + + Parameters + ---------- + G : NetworkX graph + + S : collection + A collection of nodes in `G`. + + T : collection + A collection of nodes in `G`. If not specified, this is taken to + be the set complement of `S`. + + weight : object + Edge attribute key to use as weight. If not specified, edges + have weight one. + + Returns + ------- + number + Total weight of all edges from nodes in set `S` to nodes in + set `T` (and, in the case of directed graphs, all edges from + nodes in `T` to nodes in `S`). + + Examples + -------- + In the graph with two cliques joined by a single edges, the natural + bipartition of the graph into two blocks, one for each clique, + yields a cut of weight one:: + + >>> G = nx.barbell_graph(3, 0) + >>> S = {0, 1, 2} + >>> T = {3, 4, 5} + >>> nx.cut_size(G, S, T) + 1 + + Each parallel edge in a multigraph is counted when determining the + cut size:: + + >>> G = nx.MultiGraph(["ab", "ab"]) + >>> S = {"a"} + >>> T = {"b"} + >>> nx.cut_size(G, S, T) + 2 + + Notes + ----- + In a multigraph, the cut size is the total weight of edges including + multiplicity. + + """ + edges = nx.edge_boundary(G, S, T, data=weight, default=1) + if G.is_directed(): + edges = chain(edges, nx.edge_boundary(G, T, S, data=weight, default=1)) + return sum(weight for u, v, weight in edges) + + +@nx._dispatchable(edge_attrs="weight") +def volume(G, S, weight=None): + """Returns the volume of a set of nodes. + + The *volume* of a set *S* is the sum of the (out-)degrees of nodes + in *S* (taking into account parallel edges in multigraphs). [1] + + Parameters + ---------- + G : NetworkX graph + + S : collection + A collection of nodes in `G`. + + weight : object + Edge attribute key to use as weight. If not specified, edges + have weight one. + + Returns + ------- + number + The volume of the set of nodes represented by `S` in the graph + `G`. + + See also + -------- + conductance + cut_size + edge_expansion + edge_boundary + normalized_cut_size + + References + ---------- + .. [1] David Gleich. + *Hierarchical Directed Spectral Graph Partitioning*. + + + """ + degree = G.out_degree if G.is_directed() else G.degree + return sum(d for v, d in degree(S, weight=weight)) + + +@nx._dispatchable(edge_attrs="weight") +def normalized_cut_size(G, S, T=None, weight=None): + """Returns the normalized size of the cut between two sets of nodes. + + The *normalized cut size* is the cut size times the sum of the + reciprocal sizes of the volumes of the two sets. [1] + + Parameters + ---------- + G : NetworkX graph + + S : collection + A collection of nodes in `G`. + + T : collection + A collection of nodes in `G`. + + weight : object + Edge attribute key to use as weight. If not specified, edges + have weight one. + + Returns + ------- + number + The normalized cut size between the two sets `S` and `T`. + + Notes + ----- + In a multigraph, the cut size is the total weight of edges including + multiplicity. + + See also + -------- + conductance + cut_size + edge_expansion + volume + + References + ---------- + .. [1] David Gleich. + *Hierarchical Directed Spectral Graph Partitioning*. + + + """ + if T is None: + T = set(G) - set(S) + num_cut_edges = cut_size(G, S, T=T, weight=weight) + volume_S = volume(G, S, weight=weight) + volume_T = volume(G, T, weight=weight) + return num_cut_edges * ((1 / volume_S) + (1 / volume_T)) + + +@nx._dispatchable(edge_attrs="weight") +def conductance(G, S, T=None, weight=None): + """Returns the conductance of two sets of nodes. + + The *conductance* is the quotient of the cut size and the smaller of + the volumes of the two sets. [1] + + Parameters + ---------- + G : NetworkX graph + + S : collection + A collection of nodes in `G`. + + T : collection + A collection of nodes in `G`. + + weight : object + Edge attribute key to use as weight. If not specified, edges + have weight one. + + Returns + ------- + number + The conductance between the two sets `S` and `T`. + + See also + -------- + cut_size + edge_expansion + normalized_cut_size + volume + + References + ---------- + .. [1] David Gleich. + *Hierarchical Directed Spectral Graph Partitioning*. + + + """ + if T is None: + T = set(G) - set(S) + num_cut_edges = cut_size(G, S, T, weight=weight) + volume_S = volume(G, S, weight=weight) + volume_T = volume(G, T, weight=weight) + return num_cut_edges / min(volume_S, volume_T) + + +@nx._dispatchable(edge_attrs="weight") +def edge_expansion(G, S, T=None, weight=None): + """Returns the edge expansion between two node sets. + + The *edge expansion* is the quotient of the cut size and the smaller + of the cardinalities of the two sets. [1] + + Parameters + ---------- + G : NetworkX graph + + S : collection + A collection of nodes in `G`. + + T : collection + A collection of nodes in `G`. + + weight : object + Edge attribute key to use as weight. If not specified, edges + have weight one. + + Returns + ------- + number + The edge expansion between the two sets `S` and `T`. + + See also + -------- + boundary_expansion + mixing_expansion + node_expansion + + References + ---------- + .. [1] Fan Chung. + *Spectral Graph Theory*. + (CBMS Regional Conference Series in Mathematics, No. 92), + American Mathematical Society, 1997, ISBN 0-8218-0315-8 + + + """ + if T is None: + T = set(G) - set(S) + num_cut_edges = cut_size(G, S, T=T, weight=weight) + return num_cut_edges / min(len(S), len(T)) + + +@nx._dispatchable(edge_attrs="weight") +def mixing_expansion(G, S, T=None, weight=None): + """Returns the mixing expansion between two node sets. + + The *mixing expansion* is the quotient of the cut size and twice the + number of edges in the graph. [1] + + Parameters + ---------- + G : NetworkX graph + + S : collection + A collection of nodes in `G`. + + T : collection + A collection of nodes in `G`. + + weight : object + Edge attribute key to use as weight. If not specified, edges + have weight one. + + Returns + ------- + number + The mixing expansion between the two sets `S` and `T`. + + See also + -------- + boundary_expansion + edge_expansion + node_expansion + + References + ---------- + .. [1] Vadhan, Salil P. + "Pseudorandomness." + *Foundations and Trends + in Theoretical Computer Science* 7.1–3 (2011): 1–336. + + + """ + num_cut_edges = cut_size(G, S, T=T, weight=weight) + num_total_edges = G.number_of_edges() + return num_cut_edges / (2 * num_total_edges) + + +# TODO What is the generalization to two arguments, S and T? Does the +# denominator become `min(len(S), len(T))`? +@nx._dispatchable +def node_expansion(G, S): + """Returns the node expansion of the set `S`. + + The *node expansion* is the quotient of the size of the node + boundary of *S* and the cardinality of *S*. [1] + + Parameters + ---------- + G : NetworkX graph + + S : collection + A collection of nodes in `G`. + + Returns + ------- + number + The node expansion of the set `S`. + + See also + -------- + boundary_expansion + edge_expansion + mixing_expansion + + References + ---------- + .. [1] Vadhan, Salil P. + "Pseudorandomness." + *Foundations and Trends + in Theoretical Computer Science* 7.1–3 (2011): 1–336. + + + """ + neighborhood = set(chain.from_iterable(G.neighbors(v) for v in S)) + return len(neighborhood) / len(S) + + +# TODO What is the generalization to two arguments, S and T? Does the +# denominator become `min(len(S), len(T))`? +@nx._dispatchable +def boundary_expansion(G, S): + """Returns the boundary expansion of the set `S`. + + The *boundary expansion* is the quotient of the size + of the node boundary and the cardinality of *S*. [1] + + Parameters + ---------- + G : NetworkX graph + + S : collection + A collection of nodes in `G`. + + Returns + ------- + number + The boundary expansion of the set `S`. + + See also + -------- + edge_expansion + mixing_expansion + node_expansion + + References + ---------- + .. [1] Vadhan, Salil P. + "Pseudorandomness." + *Foundations and Trends in Theoretical Computer Science* + 7.1–3 (2011): 1–336. + + + """ + return len(nx.node_boundary(G, S)) / len(S) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/cycles.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/cycles.py new file mode 100644 index 0000000..3890a20 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/cycles.py @@ -0,0 +1,1234 @@ +""" +======================== +Cycle finding algorithms +======================== +""" + +from collections import defaultdict +from itertools import combinations, product +from math import inf + +import networkx as nx +from networkx.utils import not_implemented_for, pairwise + +__all__ = [ + "cycle_basis", + "simple_cycles", + "recursive_simple_cycles", + "find_cycle", + "minimum_cycle_basis", + "chordless_cycles", + "girth", +] + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatchable +def cycle_basis(G, root=None): + """Returns a list of cycles which form a basis for cycles of G. + + A basis for cycles of a network is a minimal collection of + cycles such that any cycle in the network can be written + as a sum of cycles in the basis. Here summation of cycles + is defined as "exclusive or" of the edges. Cycle bases are + useful, e.g. when deriving equations for electric circuits + using Kirchhoff's Laws. + + Parameters + ---------- + G : NetworkX Graph + root : node, optional + Specify starting node for basis. + + Returns + ------- + A list of cycle lists. Each cycle list is a list of nodes + which forms a cycle (loop) in G. + + Examples + -------- + >>> G = nx.Graph() + >>> nx.add_cycle(G, [0, 1, 2, 3]) + >>> nx.add_cycle(G, [0, 3, 4, 5]) + >>> nx.cycle_basis(G, 0) + [[3, 4, 5, 0], [1, 2, 3, 0]] + + Notes + ----- + This is adapted from algorithm CACM 491 [1]_. + + References + ---------- + .. [1] Paton, K. An algorithm for finding a fundamental set of + cycles of a graph. Comm. ACM 12, 9 (Sept 1969), 514-518. + + See Also + -------- + simple_cycles + minimum_cycle_basis + """ + gnodes = dict.fromkeys(G) # set-like object that maintains node order + cycles = [] + while gnodes: # loop over connected components + if root is None: + root = gnodes.popitem()[0] + stack = [root] + pred = {root: root} + used = {root: set()} + while stack: # walk the spanning tree finding cycles + z = stack.pop() # use last-in so cycles easier to find + zused = used[z] + for nbr in G[z]: + if nbr not in used: # new node + pred[nbr] = z + stack.append(nbr) + used[nbr] = {z} + elif nbr == z: # self loops + cycles.append([z]) + elif nbr not in zused: # found a cycle + pn = used[nbr] + cycle = [nbr, z] + p = pred[z] + while p not in pn: + cycle.append(p) + p = pred[p] + cycle.append(p) + cycles.append(cycle) + used[nbr].add(z) + for node in pred: + gnodes.pop(node, None) + root = None + return cycles + + +@nx._dispatchable +def simple_cycles(G, length_bound=None): + """Find simple cycles (elementary circuits) of a graph. + + A "simple cycle", or "elementary circuit", is a closed path where + no node appears twice. In a directed graph, two simple cycles are distinct + if they are not cyclic permutations of each other. In an undirected graph, + two simple cycles are distinct if they are not cyclic permutations of each + other nor of the other's reversal. + + Optionally, the cycles are bounded in length. In the unbounded case, we use + a nonrecursive, iterator/generator version of Johnson's algorithm [1]_. In + the bounded case, we use a version of the algorithm of Gupta and + Suzumura [2]_. There may be better algorithms for some cases [3]_ [4]_ [5]_. + + The algorithms of Johnson, and Gupta and Suzumura, are enhanced by some + well-known preprocessing techniques. When `G` is directed, we restrict our + attention to strongly connected components of `G`, generate all simple cycles + containing a certain node, remove that node, and further decompose the + remainder into strongly connected components. When `G` is undirected, we + restrict our attention to biconnected components, generate all simple cycles + containing a particular edge, remove that edge, and further decompose the + remainder into biconnected components. + + Note that multigraphs are supported by this function -- and in undirected + multigraphs, a pair of parallel edges is considered a cycle of length 2. + Likewise, self-loops are considered to be cycles of length 1. We define + cycles as sequences of nodes; so the presence of loops and parallel edges + does not change the number of simple cycles in a graph. + + Parameters + ---------- + G : NetworkX Graph + A networkx graph. Undirected, directed, and multigraphs are all supported. + + length_bound : int or None, optional (default=None) + If `length_bound` is an int, generate all simple cycles of `G` with length at + most `length_bound`. Otherwise, generate all simple cycles of `G`. + + Yields + ------ + list of nodes + Each cycle is represented by a list of nodes along the cycle. + + Examples + -------- + >>> G = nx.DiGraph([(0, 0), (0, 1), (0, 2), (1, 2), (2, 0), (2, 1), (2, 2)]) + >>> sorted(nx.simple_cycles(G)) + [[0], [0, 1, 2], [0, 2], [1, 2], [2]] + + To filter the cycles so that they don't include certain nodes or edges, + copy your graph and eliminate those nodes or edges before calling. + For example, to exclude self-loops from the above example: + + >>> H = G.copy() + >>> H.remove_edges_from(nx.selfloop_edges(G)) + >>> sorted(nx.simple_cycles(H)) + [[0, 1, 2], [0, 2], [1, 2]] + + Notes + ----- + When `length_bound` is None, the time complexity is $O((n+e)(c+1))$ for $n$ + nodes, $e$ edges and $c$ simple circuits. Otherwise, when ``length_bound > 1``, + the time complexity is $O((c+n)(k-1)d^k)$ where $d$ is the average degree of + the nodes of `G` and $k$ = `length_bound`. + + Raises + ------ + ValueError + when ``length_bound < 0``. + + References + ---------- + .. [1] Finding all the elementary circuits of a directed graph. + D. B. Johnson, SIAM Journal on Computing 4, no. 1, 77-84, 1975. + https://doi.org/10.1137/0204007 + .. [2] Finding All Bounded-Length Simple Cycles in a Directed Graph + A. Gupta and T. Suzumura https://arxiv.org/abs/2105.10094 + .. [3] Enumerating the cycles of a digraph: a new preprocessing strategy. + G. Loizou and P. Thanish, Information Sciences, v. 27, 163-182, 1982. + .. [4] A search strategy for the elementary cycles of a directed graph. + J.L. Szwarcfiter and P.E. Lauer, BIT NUMERICAL MATHEMATICS, + v. 16, no. 2, 192-204, 1976. + .. [5] Optimal Listing of Cycles and st-Paths in Undirected Graphs + R. Ferreira and R. Grossi and A. Marino and N. Pisanti and R. Rizzi and + G. Sacomoto https://arxiv.org/abs/1205.2766 + + See Also + -------- + cycle_basis + chordless_cycles + """ + + if length_bound is not None: + if length_bound == 0: + return + elif length_bound < 0: + raise ValueError("length bound must be non-negative") + + directed = G.is_directed() + yield from ([v] for v, Gv in G.adj.items() if v in Gv) + + if length_bound is not None and length_bound == 1: + return + + if G.is_multigraph() and not directed: + visited = set() + for u, Gu in G.adj.items(): + multiplicity = ((v, len(Guv)) for v, Guv in Gu.items() if v in visited) + yield from ([u, v] for v, m in multiplicity if m > 1) + visited.add(u) + + # explicitly filter out loops; implicitly filter out parallel edges + if directed: + G = nx.DiGraph((u, v) for u, Gu in G.adj.items() for v in Gu if v != u) + else: + G = nx.Graph((u, v) for u, Gu in G.adj.items() for v in Gu if v != u) + + # this case is not strictly necessary but improves performance + if length_bound is not None and length_bound == 2: + if directed: + visited = set() + for u, Gu in G.adj.items(): + yield from ( + [v, u] for v in visited.intersection(Gu) if G.has_edge(v, u) + ) + visited.add(u) + return + + if directed: + yield from _directed_cycle_search(G, length_bound) + else: + yield from _undirected_cycle_search(G, length_bound) + + +def _directed_cycle_search(G, length_bound): + """A dispatch function for `simple_cycles` for directed graphs. + + We generate all cycles of G through binary partition. + + 1. Pick a node v in G which belongs to at least one cycle + a. Generate all cycles of G which contain the node v. + b. Recursively generate all cycles of G \\ v. + + This is accomplished through the following: + + 1. Compute the strongly connected components SCC of G. + 2. Select and remove a biconnected component C from BCC. Select a + non-tree edge (u, v) of a depth-first search of G[C]. + 3. For each simple cycle P containing v in G[C], yield P. + 4. Add the biconnected components of G[C \\ v] to BCC. + + If the parameter length_bound is not None, then step 3 will be limited to + simple cycles of length at most length_bound. + + Parameters + ---------- + G : NetworkX DiGraph + A directed graph + + length_bound : int or None + If length_bound is an int, generate all simple cycles of G with length at most length_bound. + Otherwise, generate all simple cycles of G. + + Yields + ------ + list of nodes + Each cycle is represented by a list of nodes along the cycle. + """ + + scc = nx.strongly_connected_components + components = [c for c in scc(G) if len(c) >= 2] + while components: + c = components.pop() + Gc = G.subgraph(c) + v = next(iter(c)) + if length_bound is None: + yield from _johnson_cycle_search(Gc, [v]) + else: + yield from _bounded_cycle_search(Gc, [v], length_bound) + # delete v after searching G, to make sure we can find v + G.remove_node(v) + components.extend(c for c in scc(Gc) if len(c) >= 2) + + +def _undirected_cycle_search(G, length_bound): + """A dispatch function for `simple_cycles` for undirected graphs. + + We generate all cycles of G through binary partition. + + 1. Pick an edge (u, v) in G which belongs to at least one cycle + a. Generate all cycles of G which contain the edge (u, v) + b. Recursively generate all cycles of G \\ (u, v) + + This is accomplished through the following: + + 1. Compute the biconnected components BCC of G. + 2. Select and remove a biconnected component C from BCC. Select a + non-tree edge (u, v) of a depth-first search of G[C]. + 3. For each (v -> u) path P remaining in G[C] \\ (u, v), yield P. + 4. Add the biconnected components of G[C] \\ (u, v) to BCC. + + If the parameter length_bound is not None, then step 3 will be limited to simple paths + of length at most length_bound. + + Parameters + ---------- + G : NetworkX Graph + An undirected graph + + length_bound : int or None + If length_bound is an int, generate all simple cycles of G with length at most length_bound. + Otherwise, generate all simple cycles of G. + + Yields + ------ + list of nodes + Each cycle is represented by a list of nodes along the cycle. + """ + + bcc = nx.biconnected_components + components = [c for c in bcc(G) if len(c) >= 3] + while components: + c = components.pop() + Gc = G.subgraph(c) + uv = list(next(iter(Gc.edges))) + G.remove_edge(*uv) + # delete (u, v) before searching G, to avoid fake 3-cycles [u, v, u] + if length_bound is None: + yield from _johnson_cycle_search(Gc, uv) + else: + yield from _bounded_cycle_search(Gc, uv, length_bound) + components.extend(c for c in bcc(Gc) if len(c) >= 3) + + +class _NeighborhoodCache(dict): + """Very lightweight graph wrapper which caches neighborhoods as list. + + This dict subclass uses the __missing__ functionality to query graphs for + their neighborhoods, and store the result as a list. This is used to avoid + the performance penalty incurred by subgraph views. + """ + + def __init__(self, G): + self.G = G + + def __missing__(self, v): + Gv = self[v] = list(self.G[v]) + return Gv + + +def _johnson_cycle_search(G, path): + """The main loop of the cycle-enumeration algorithm of Johnson. + + Parameters + ---------- + G : NetworkX Graph or DiGraph + A graph + + path : list + A cycle prefix. All cycles generated will begin with this prefix. + + Yields + ------ + list of nodes + Each cycle is represented by a list of nodes along the cycle. + + References + ---------- + .. [1] Finding all the elementary circuits of a directed graph. + D. B. Johnson, SIAM Journal on Computing 4, no. 1, 77-84, 1975. + https://doi.org/10.1137/0204007 + + """ + + G = _NeighborhoodCache(G) + blocked = set(path) + B = defaultdict(set) # graph portions that yield no elementary circuit + start = path[0] + stack = [iter(G[path[-1]])] + closed = [False] + while stack: + nbrs = stack[-1] + for w in nbrs: + if w == start: + yield path[:] + closed[-1] = True + elif w not in blocked: + path.append(w) + closed.append(False) + stack.append(iter(G[w])) + blocked.add(w) + break + else: # no more nbrs + stack.pop() + v = path.pop() + if closed.pop(): + if closed: + closed[-1] = True + unblock_stack = {v} + while unblock_stack: + u = unblock_stack.pop() + if u in blocked: + blocked.remove(u) + unblock_stack.update(B[u]) + B[u].clear() + else: + for w in G[v]: + B[w].add(v) + + +def _bounded_cycle_search(G, path, length_bound): + """The main loop of the cycle-enumeration algorithm of Gupta and Suzumura. + + Parameters + ---------- + G : NetworkX Graph or DiGraph + A graph + + path : list + A cycle prefix. All cycles generated will begin with this prefix. + + length_bound: int + A length bound. All cycles generated will have length at most length_bound. + + Yields + ------ + list of nodes + Each cycle is represented by a list of nodes along the cycle. + + References + ---------- + .. [1] Finding All Bounded-Length Simple Cycles in a Directed Graph + A. Gupta and T. Suzumura https://arxiv.org/abs/2105.10094 + + """ + G = _NeighborhoodCache(G) + lock = dict.fromkeys(path, 0) + B = defaultdict(set) + start = path[0] + stack = [iter(G[path[-1]])] + blen = [length_bound] + while stack: + nbrs = stack[-1] + for w in nbrs: + if w == start: + yield path[:] + blen[-1] = 1 + elif len(path) < lock.get(w, length_bound): + path.append(w) + blen.append(length_bound) + lock[w] = len(path) + stack.append(iter(G[w])) + break + else: + stack.pop() + v = path.pop() + bl = blen.pop() + if blen: + blen[-1] = min(blen[-1], bl) + if bl < length_bound: + relax_stack = [(bl, v)] + while relax_stack: + bl, u = relax_stack.pop() + if lock.get(u, length_bound) < length_bound - bl + 1: + lock[u] = length_bound - bl + 1 + relax_stack.extend((bl + 1, w) for w in B[u].difference(path)) + else: + for w in G[v]: + B[w].add(v) + + +@nx._dispatchable +def chordless_cycles(G, length_bound=None): + """Find simple chordless cycles of a graph. + + A `simple cycle` is a closed path where no node appears twice. In a simple + cycle, a `chord` is an additional edge between two nodes in the cycle. A + `chordless cycle` is a simple cycle without chords. Said differently, a + chordless cycle is a cycle C in a graph G where the number of edges in the + induced graph G[C] is equal to the length of `C`. + + Note that some care must be taken in the case that G is not a simple graph + nor a simple digraph. Some authors limit the definition of chordless cycles + to have a prescribed minimum length; we do not. + + 1. We interpret self-loops to be chordless cycles, except in multigraphs + with multiple loops in parallel. Likewise, in a chordless cycle of + length greater than 1, there can be no nodes with self-loops. + + 2. We interpret directed two-cycles to be chordless cycles, except in + multi-digraphs when any edge in a two-cycle has a parallel copy. + + 3. We interpret parallel pairs of undirected edges as two-cycles, except + when a third (or more) parallel edge exists between the two nodes. + + 4. Generalizing the above, edges with parallel clones may not occur in + chordless cycles. + + In a directed graph, two chordless cycles are distinct if they are not + cyclic permutations of each other. In an undirected graph, two chordless + cycles are distinct if they are not cyclic permutations of each other nor of + the other's reversal. + + Optionally, the cycles are bounded in length. + + We use an algorithm strongly inspired by that of Dias et al [1]_. It has + been modified in the following ways: + + 1. Recursion is avoided, per Python's limitations + + 2. The labeling function is not necessary, because the starting paths + are chosen (and deleted from the host graph) to prevent multiple + occurrences of the same path + + 3. The search is optionally bounded at a specified length + + 4. Support for directed graphs is provided by extending cycles along + forward edges, and blocking nodes along forward and reverse edges + + 5. Support for multigraphs is provided by omitting digons from the set + of forward edges + + Parameters + ---------- + G : NetworkX DiGraph + A directed graph + + length_bound : int or None, optional (default=None) + If length_bound is an int, generate all simple cycles of G with length at + most length_bound. Otherwise, generate all simple cycles of G. + + Yields + ------ + list of nodes + Each cycle is represented by a list of nodes along the cycle. + + Examples + -------- + >>> sorted(list(nx.chordless_cycles(nx.complete_graph(4)))) + [[1, 0, 2], [1, 0, 3], [2, 0, 3], [2, 1, 3]] + + Notes + ----- + When length_bound is None, and the graph is simple, the time complexity is + $O((n+e)(c+1))$ for $n$ nodes, $e$ edges and $c$ chordless cycles. + + Raises + ------ + ValueError + when length_bound < 0. + + References + ---------- + .. [1] Efficient enumeration of chordless cycles + E. Dias and D. Castonguay and H. Longo and W.A.R. Jradi + https://arxiv.org/abs/1309.1051 + + See Also + -------- + simple_cycles + """ + + if length_bound is not None: + if length_bound == 0: + return + elif length_bound < 0: + raise ValueError("length bound must be non-negative") + + directed = G.is_directed() + multigraph = G.is_multigraph() + + if multigraph: + yield from ([v] for v, Gv in G.adj.items() if len(Gv.get(v, ())) == 1) + else: + yield from ([v] for v, Gv in G.adj.items() if v in Gv) + + if length_bound is not None and length_bound == 1: + return + + # Nodes with loops cannot belong to longer cycles. Let's delete them here. + # also, we implicitly reduce the multiplicity of edges down to 1 in the case + # of multiedges. + loops = set(nx.nodes_with_selfloops(G)) + edges = ((u, v) for u in G if u not in loops for v in G._adj[u] if v not in loops) + if directed: + F = nx.DiGraph(edges) + B = F.to_undirected(as_view=False) + else: + F = nx.Graph(edges) + B = None + + # If we're given a multigraph, we have a few cases to consider with parallel + # edges. + # + # 1. If we have 2 or more edges in parallel between the nodes (u, v), we + # must not construct longer cycles along (u, v). + # 2. If G is not directed, then a pair of parallel edges between (u, v) is a + # chordless cycle unless there exists a third (or more) parallel edge. + # 3. If G is directed, then parallel edges do not form cycles, but do + # preclude back-edges from forming cycles (handled in the next section), + # Thus, if an edge (u, v) is duplicated and the reverse (v, u) is also + # present, then we remove both from F. + # + # In directed graphs, we need to consider both directions that edges can + # take, so iterate over all edges (u, v) and possibly (v, u). In undirected + # graphs, we need to be a little careful to only consider every edge once, + # so we use a "visited" set to emulate node-order comparisons. + + if multigraph: + if not directed: + B = F.copy() + visited = set() + for u, Gu in G.adj.items(): + if u in loops: + continue + if directed: + multiplicity = ((v, len(Guv)) for v, Guv in Gu.items()) + for v, m in multiplicity: + if m > 1: + F.remove_edges_from(((u, v), (v, u))) + else: + multiplicity = ((v, len(Guv)) for v, Guv in Gu.items() if v in visited) + for v, m in multiplicity: + if m == 2: + yield [u, v] + if m > 1: + F.remove_edge(u, v) + visited.add(u) + + # If we're given a directed graphs, we need to think about digons. If we + # have two edges (u, v) and (v, u), then that's a two-cycle. If either edge + # was duplicated above, then we removed both from F. So, any digons we find + # here are chordless. After finding digons, we remove their edges from F + # to avoid traversing them in the search for chordless cycles. + if directed: + for u, Fu in F.adj.items(): + digons = [[u, v] for v in Fu if F.has_edge(v, u)] + yield from digons + F.remove_edges_from(digons) + F.remove_edges_from(e[::-1] for e in digons) + + if length_bound is not None and length_bound == 2: + return + + # Now, we prepare to search for cycles. We have removed all cycles of + # lengths 1 and 2, so F is a simple graph or simple digraph. We repeatedly + # separate digraphs into their strongly connected components, and undirected + # graphs into their biconnected components. For each component, we pick a + # node v, search for chordless cycles based at each "stem" (u, v, w), and + # then remove v from that component before separating the graph again. + if directed: + separate = nx.strongly_connected_components + + # Directed stems look like (u -> v -> w), so we use the product of + # predecessors of v with successors of v. + def stems(C, v): + for u, w in product(C.pred[v], C.succ[v]): + if not G.has_edge(u, w): # omit stems with acyclic chords + yield [u, v, w], F.has_edge(w, u) + + else: + separate = nx.biconnected_components + + # Undirected stems look like (u ~ v ~ w), but we must not also search + # (w ~ v ~ u), so we use combinations of v's neighbors of length 2. + def stems(C, v): + yield from (([u, v, w], F.has_edge(w, u)) for u, w in combinations(C[v], 2)) + + components = [c for c in separate(F) if len(c) > 2] + while components: + c = components.pop() + v = next(iter(c)) + Fc = F.subgraph(c) + Fcc = Bcc = None + for S, is_triangle in stems(Fc, v): + if is_triangle: + yield S + else: + if Fcc is None: + Fcc = _NeighborhoodCache(Fc) + Bcc = Fcc if B is None else _NeighborhoodCache(B.subgraph(c)) + yield from _chordless_cycle_search(Fcc, Bcc, S, length_bound) + + components.extend(c for c in separate(F.subgraph(c - {v})) if len(c) > 2) + + +def _chordless_cycle_search(F, B, path, length_bound): + """The main loop for chordless cycle enumeration. + + This algorithm is strongly inspired by that of Dias et al [1]_. It has been + modified in the following ways: + + 1. Recursion is avoided, per Python's limitations + + 2. The labeling function is not necessary, because the starting paths + are chosen (and deleted from the host graph) to prevent multiple + occurrences of the same path + + 3. The search is optionally bounded at a specified length + + 4. Support for directed graphs is provided by extending cycles along + forward edges, and blocking nodes along forward and reverse edges + + 5. Support for multigraphs is provided by omitting digons from the set + of forward edges + + Parameters + ---------- + F : _NeighborhoodCache + A graph of forward edges to follow in constructing cycles + + B : _NeighborhoodCache + A graph of blocking edges to prevent the production of chordless cycles + + path : list + A cycle prefix. All cycles generated will begin with this prefix. + + length_bound : int + A length bound. All cycles generated will have length at most length_bound. + + + Yields + ------ + list of nodes + Each cycle is represented by a list of nodes along the cycle. + + References + ---------- + .. [1] Efficient enumeration of chordless cycles + E. Dias and D. Castonguay and H. Longo and W.A.R. Jradi + https://arxiv.org/abs/1309.1051 + + """ + blocked = defaultdict(int) + target = path[0] + blocked[path[1]] = 1 + for w in path[1:]: + for v in B[w]: + blocked[v] += 1 + + stack = [iter(F[path[2]])] + while stack: + nbrs = stack[-1] + for w in nbrs: + if blocked[w] == 1 and (length_bound is None or len(path) < length_bound): + Fw = F[w] + if target in Fw: + yield path + [w] + else: + Bw = B[w] + if target in Bw: + continue + for v in Bw: + blocked[v] += 1 + path.append(w) + stack.append(iter(Fw)) + break + else: + stack.pop() + for v in B[path.pop()]: + blocked[v] -= 1 + + +@not_implemented_for("undirected") +@nx._dispatchable(mutates_input=True) +def recursive_simple_cycles(G): + """Find simple cycles (elementary circuits) of a directed graph. + + A `simple cycle`, or `elementary circuit`, is a closed path where + no node appears twice. Two elementary circuits are distinct if they + are not cyclic permutations of each other. + + This version uses a recursive algorithm to build a list of cycles. + You should probably use the iterator version called simple_cycles(). + Warning: This recursive version uses lots of RAM! + It appears in NetworkX for pedagogical value. + + Parameters + ---------- + G : NetworkX DiGraph + A directed graph + + Returns + ------- + A list of cycles, where each cycle is represented by a list of nodes + along the cycle. + + Example: + + >>> edges = [(0, 0), (0, 1), (0, 2), (1, 2), (2, 0), (2, 1), (2, 2)] + >>> G = nx.DiGraph(edges) + >>> nx.recursive_simple_cycles(G) + [[0], [2], [0, 1, 2], [0, 2], [1, 2]] + + Notes + ----- + The implementation follows pp. 79-80 in [1]_. + + The time complexity is $O((n+e)(c+1))$ for $n$ nodes, $e$ edges and $c$ + elementary circuits. + + References + ---------- + .. [1] Finding all the elementary circuits of a directed graph. + D. B. Johnson, SIAM Journal on Computing 4, no. 1, 77-84, 1975. + https://doi.org/10.1137/0204007 + + See Also + -------- + simple_cycles, cycle_basis + """ + + # Jon Olav Vik, 2010-08-09 + def _unblock(thisnode): + """Recursively unblock and remove nodes from B[thisnode].""" + if blocked[thisnode]: + blocked[thisnode] = False + while B[thisnode]: + _unblock(B[thisnode].pop()) + + def circuit(thisnode, startnode, component): + closed = False # set to True if elementary path is closed + path.append(thisnode) + blocked[thisnode] = True + for nextnode in component[thisnode]: # direct successors of thisnode + if nextnode == startnode: + result.append(path[:]) + closed = True + elif not blocked[nextnode]: + if circuit(nextnode, startnode, component): + closed = True + if closed: + _unblock(thisnode) + else: + for nextnode in component[thisnode]: + if thisnode not in B[nextnode]: # TODO: use set for speedup? + B[nextnode].append(thisnode) + path.pop() # remove thisnode from path + return closed + + path = [] # stack of nodes in current path + blocked = defaultdict(bool) # vertex: blocked from search? + B = defaultdict(list) # graph portions that yield no elementary circuit + result = [] # list to accumulate the circuits found + + # Johnson's algorithm exclude self cycle edges like (v, v) + # To be backward compatible, we record those cycles in advance + # and then remove from subG + for v in G: + if G.has_edge(v, v): + result.append([v]) + G.remove_edge(v, v) + + # Johnson's algorithm requires some ordering of the nodes. + # They might not be sortable so we assign an arbitrary ordering. + ordering = dict(zip(G, range(len(G)))) + for s in ordering: + # Build the subgraph induced by s and following nodes in the ordering + subgraph = G.subgraph(node for node in G if ordering[node] >= ordering[s]) + # Find the strongly connected component in the subgraph + # that contains the least node according to the ordering + strongcomp = nx.strongly_connected_components(subgraph) + mincomp = min(strongcomp, key=lambda ns: min(ordering[n] for n in ns)) + component = G.subgraph(mincomp) + if len(component) > 1: + # smallest node in the component according to the ordering + startnode = min(component, key=ordering.__getitem__) + for node in component: + blocked[node] = False + B[node][:] = [] + dummy = circuit(startnode, startnode, component) + return result + + +@nx._dispatchable +def find_cycle(G, source=None, orientation=None): + """Returns a cycle found via depth-first traversal. + + The cycle is a list of edges indicating the cyclic path. + Orientation of directed edges is controlled by `orientation`. + + Parameters + ---------- + G : graph + A directed/undirected graph/multigraph. + + source : node, list of nodes + The node from which the traversal begins. If None, then a source + is chosen arbitrarily and repeatedly until all edges from each node in + the graph are searched. + + orientation : None | 'original' | 'reverse' | 'ignore' (default: None) + For directed graphs and directed multigraphs, edge traversals need not + respect the original orientation of the edges. + When set to 'reverse' every edge is traversed in the reverse direction. + When set to 'ignore', every edge is treated as undirected. + When set to 'original', every edge is treated as directed. + In all three cases, the yielded edge tuples add a last entry to + indicate the direction in which that edge was traversed. + If orientation is None, the yielded edge has no direction indicated. + The direction is respected, but not reported. + + Returns + ------- + edges : directed edges + A list of directed edges indicating the path taken for the loop. + If no cycle is found, then an exception is raised. + For graphs, an edge is of the form `(u, v)` where `u` and `v` + are the tail and head of the edge as determined by the traversal. + For multigraphs, an edge is of the form `(u, v, key)`, where `key` is + the key of the edge. When the graph is directed, then `u` and `v` + are always in the order of the actual directed edge. + If orientation is not None then the edge tuple is extended to include + the direction of traversal ('forward' or 'reverse') on that edge. + + Raises + ------ + NetworkXNoCycle + If no cycle was found. + + Examples + -------- + In this example, we construct a DAG and find, in the first call, that there + are no directed cycles, and so an exception is raised. In the second call, + we ignore edge orientations and find that there is an undirected cycle. + Note that the second call finds a directed cycle while effectively + traversing an undirected graph, and so, we found an "undirected cycle". + This means that this DAG structure does not form a directed tree (which + is also known as a polytree). + + >>> G = nx.DiGraph([(0, 1), (0, 2), (1, 2)]) + >>> nx.find_cycle(G, orientation="original") + Traceback (most recent call last): + ... + networkx.exception.NetworkXNoCycle: No cycle found. + >>> list(nx.find_cycle(G, orientation="ignore")) + [(0, 1, 'forward'), (1, 2, 'forward'), (0, 2, 'reverse')] + + See Also + -------- + simple_cycles + """ + if not G.is_directed() or orientation in (None, "original"): + + def tailhead(edge): + return edge[:2] + + elif orientation == "reverse": + + def tailhead(edge): + return edge[1], edge[0] + + elif orientation == "ignore": + + def tailhead(edge): + if edge[-1] == "reverse": + return edge[1], edge[0] + return edge[:2] + + explored = set() + cycle = [] + final_node = None + for start_node in G.nbunch_iter(source): + if start_node in explored: + # No loop is possible. + continue + + edges = [] + # All nodes seen in this iteration of edge_dfs + seen = {start_node} + # Nodes in active path. + active_nodes = {start_node} + previous_head = None + + for edge in nx.edge_dfs(G, start_node, orientation): + # Determine if this edge is a continuation of the active path. + tail, head = tailhead(edge) + if head in explored: + # Then we've already explored it. No loop is possible. + continue + if previous_head is not None and tail != previous_head: + # This edge results from backtracking. + # Pop until we get a node whose head equals the current tail. + # So for example, we might have: + # (0, 1), (1, 2), (2, 3), (1, 4) + # which must become: + # (0, 1), (1, 4) + while True: + try: + popped_edge = edges.pop() + except IndexError: + edges = [] + active_nodes = {tail} + break + else: + popped_head = tailhead(popped_edge)[1] + active_nodes.remove(popped_head) + + if edges: + last_head = tailhead(edges[-1])[1] + if tail == last_head: + break + edges.append(edge) + + if head in active_nodes: + # We have a loop! + cycle.extend(edges) + final_node = head + break + else: + seen.add(head) + active_nodes.add(head) + previous_head = head + + if cycle: + break + else: + explored.update(seen) + + else: + assert len(cycle) == 0 + raise nx.exception.NetworkXNoCycle("No cycle found.") + + # We now have a list of edges which ends on a cycle. + # So we need to remove from the beginning edges that are not relevant. + + for i, edge in enumerate(cycle): + tail, head = tailhead(edge) + if tail == final_node: + break + + return cycle[i:] + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatchable(edge_attrs="weight") +def minimum_cycle_basis(G, weight=None): + """Returns a minimum weight cycle basis for G + + Minimum weight means a cycle basis for which the total weight + (length for unweighted graphs) of all the cycles is minimum. + + Parameters + ---------- + G : NetworkX Graph + weight: string + name of the edge attribute to use for edge weights + + Returns + ------- + A list of cycle lists. Each cycle list is a list of nodes + which forms a cycle (loop) in G. Note that the nodes are not + necessarily returned in a order by which they appear in the cycle + + Examples + -------- + >>> G = nx.Graph() + >>> nx.add_cycle(G, [0, 1, 2, 3]) + >>> nx.add_cycle(G, [0, 3, 4, 5]) + >>> nx.minimum_cycle_basis(G) + [[5, 4, 3, 0], [3, 2, 1, 0]] + + References: + [1] Kavitha, Telikepalli, et al. "An O(m^2n) Algorithm for + Minimum Cycle Basis of Graphs." + http://link.springer.com/article/10.1007/s00453-007-9064-z + [2] de Pina, J. 1995. Applications of shortest path methods. + Ph.D. thesis, University of Amsterdam, Netherlands + + See Also + -------- + simple_cycles, cycle_basis + """ + # We first split the graph in connected subgraphs + return sum( + (_min_cycle_basis(G.subgraph(c), weight) for c in nx.connected_components(G)), + [], + ) + + +def _min_cycle_basis(G, weight): + cb = [] + # We extract the edges not in a spanning tree. We do not really need a + # *minimum* spanning tree. That is why we call the next function with + # weight=None. Depending on implementation, it may be faster as well + tree_edges = list(nx.minimum_spanning_edges(G, weight=None, data=False)) + chords = G.edges - tree_edges - {(v, u) for u, v in tree_edges} + + # We maintain a set of vectors orthogonal to sofar found cycles + set_orth = [{edge} for edge in chords] + while set_orth: + base = set_orth.pop() + # kth cycle is "parallel" to kth vector in set_orth + cycle_edges = _min_cycle(G, base, weight) + cb.append([v for u, v in cycle_edges]) + + # now update set_orth so that k+1,k+2... th elements are + # orthogonal to the newly found cycle, as per [p. 336, 1] + set_orth = [ + ( + {e for e in orth if e not in base if e[::-1] not in base} + | {e for e in base if e not in orth if e[::-1] not in orth} + ) + if sum((e in orth or e[::-1] in orth) for e in cycle_edges) % 2 + else orth + for orth in set_orth + ] + return cb + + +def _min_cycle(G, orth, weight): + """ + Computes the minimum weight cycle in G, + orthogonal to the vector orth as per [p. 338, 1] + Use (u, 1) to indicate the lifted copy of u (denoted u' in paper). + """ + Gi = nx.Graph() + + # Add 2 copies of each edge in G to Gi. + # If edge is in orth, add cross edge; otherwise in-plane edge + for u, v, wt in G.edges(data=weight, default=1): + if (u, v) in orth or (v, u) in orth: + Gi.add_edges_from([(u, (v, 1)), ((u, 1), v)], Gi_weight=wt) + else: + Gi.add_edges_from([(u, v), ((u, 1), (v, 1))], Gi_weight=wt) + + # find the shortest length in Gi between n and (n, 1) for each n + # Note: Use "Gi_weight" for name of weight attribute + spl = nx.shortest_path_length + lift = {n: spl(Gi, source=n, target=(n, 1), weight="Gi_weight") for n in G} + + # Now compute that short path in Gi, which translates to a cycle in G + start = min(lift, key=lift.get) + end = (start, 1) + min_path_i = nx.shortest_path(Gi, source=start, target=end, weight="Gi_weight") + + # Now we obtain the actual path, re-map nodes in Gi to those in G + min_path = [n if n in G else n[0] for n in min_path_i] + + # Now remove the edges that occur two times + # two passes: flag which edges get kept, then build it + edgelist = list(pairwise(min_path)) + edgeset = set() + for e in edgelist: + if e in edgeset: + edgeset.remove(e) + elif e[::-1] in edgeset: + edgeset.remove(e[::-1]) + else: + edgeset.add(e) + + min_edgelist = [] + for e in edgelist: + if e in edgeset: + min_edgelist.append(e) + edgeset.remove(e) + elif e[::-1] in edgeset: + min_edgelist.append(e[::-1]) + edgeset.remove(e[::-1]) + + return min_edgelist + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatchable +def girth(G): + """Returns the girth of the graph. + + The girth of a graph is the length of its shortest cycle, or infinity if + the graph is acyclic. The algorithm follows the description given on the + Wikipedia page [1]_, and runs in time O(mn) on a graph with m edges and n + nodes. + + Parameters + ---------- + G : NetworkX Graph + + Returns + ------- + int or math.inf + + Examples + -------- + All examples below (except P_5) can easily be checked using Wikipedia, + which has a page for each of these famous graphs. + + >>> nx.girth(nx.chvatal_graph()) + 4 + >>> nx.girth(nx.tutte_graph()) + 4 + >>> nx.girth(nx.petersen_graph()) + 5 + >>> nx.girth(nx.heawood_graph()) + 6 + >>> nx.girth(nx.pappus_graph()) + 6 + >>> nx.girth(nx.path_graph(5)) + inf + + References + ---------- + .. [1] `Wikipedia: Girth `_ + + """ + girth = depth_limit = inf + tree_edge = nx.algorithms.traversal.breadth_first_search.TREE_EDGE + level_edge = nx.algorithms.traversal.breadth_first_search.LEVEL_EDGE + for n in G: + # run a BFS from source n, keeping track of distances; since we want + # the shortest cycle, no need to explore beyond the current minimum length + depth = {n: 0} + for u, v, label in nx.bfs_labeled_edges(G, n): + du = depth[u] + if du > depth_limit: + break + if label is tree_edge: + depth[v] = du + 1 + else: + # if (u, v) is a level edge, the length is du + du + 1 (odd) + # otherwise, it's a forward edge; length is du + (du + 1) + 1 (even) + delta = label is level_edge + length = du + du + 2 - delta + if length < girth: + girth = length + depth_limit = du - delta + + return girth diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/d_separation.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/d_separation.py new file mode 100644 index 0000000..3d85a2c --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/d_separation.py @@ -0,0 +1,677 @@ +""" +Algorithm for testing d-separation in DAGs. + +*d-separation* is a test for conditional independence in probability +distributions that can be factorized using DAGs. It is a purely +graphical test that uses the underlying graph and makes no reference +to the actual distribution parameters. See [1]_ for a formal +definition. + +The implementation is based on the conceptually simple linear time +algorithm presented in [2]_. Refer to [3]_, [4]_ for a couple of +alternative algorithms. + +The functional interface in NetworkX consists of three functions: + +- `find_minimal_d_separator` returns a minimal d-separator set ``z``. + That is, removing any node or nodes from it makes it no longer a d-separator. +- `is_d_separator` checks if a given set is a d-separator. +- `is_minimal_d_separator` checks if a given set is a minimal d-separator. + +D-separators +------------ + +Here, we provide a brief overview of d-separation and related concepts that +are relevant for understanding it: + +The ideas of d-separation and d-connection relate to paths being open or blocked. + +- A "path" is a sequence of nodes connected in order by edges. Unlike for most + graph theory analysis, the direction of the edges is ignored. Thus the path + can be thought of as a traditional path on the undirected version of the graph. +- A "candidate d-separator" ``z`` is a set of nodes being considered as + possibly blocking all paths between two prescribed sets ``x`` and ``y`` of nodes. + We refer to each node in the candidate d-separator as "known". +- A "collider" node on a path is a node that is a successor of its two neighbor + nodes on the path. That is, ``c`` is a collider if the edge directions + along the path look like ``... u -> c <- v ...``. +- If a collider node or any of its descendants are "known", the collider + is called an "open collider". Otherwise it is a "blocking collider". +- Any path can be "blocked" in two ways. If the path contains a "known" node + that is not a collider, the path is blocked. Also, if the path contains a + collider that is not a "known" node, the path is blocked. +- A path is "open" if it is not blocked. That is, it is open if every node is + either an open collider or not a "known". Said another way, every + "known" in the path is a collider and every collider is open (has a + "known" as a inclusive descendant). The concept of "open path" is meant to + demonstrate a probabilistic conditional dependence between two nodes given + prescribed knowledge ("known" nodes). +- Two sets ``x`` and ``y`` of nodes are "d-separated" by a set of nodes ``z`` + if all paths between nodes in ``x`` and nodes in ``y`` are blocked. That is, + if there are no open paths from any node in ``x`` to any node in ``y``. + Such a set ``z`` is a "d-separator" of ``x`` and ``y``. +- A "minimal d-separator" is a d-separator ``z`` for which no node or subset + of nodes can be removed with it still being a d-separator. + +The d-separator blocks some paths between ``x`` and ``y`` but opens others. +Nodes in the d-separator block paths if the nodes are not colliders. +But if a collider or its descendant nodes are in the d-separation set, the +colliders are open, allowing a path through that collider. + +Illustration of D-separation with examples +------------------------------------------ + +A pair of two nodes, ``u`` and ``v``, are d-connected if there is a path +from ``u`` to ``v`` that is not blocked. That means, there is an open +path from ``u`` to ``v``. + +For example, if the d-separating set is the empty set, then the following paths are +open between ``u`` and ``v``: + +- u <- n -> v +- u -> w -> ... -> n -> v + +If on the other hand, ``n`` is in the d-separating set, then ``n`` blocks +those paths between ``u`` and ``v``. + +Colliders block a path if they and their descendants are not included +in the d-separating set. An example of a path that is blocked when the +d-separating set is empty is: + +- u -> w -> ... -> n <- v + +The node ``n`` is a collider in this path and is not in the d-separating set. +So ``n`` blocks this path. However, if ``n`` or a descendant of ``n`` is +included in the d-separating set, then the path through the collider +at ``n`` (... -> n <- ...) is "open". + +D-separation is concerned with blocking all paths between nodes from ``x`` to ``y``. +A d-separating set between ``x`` and ``y`` is one where all paths are blocked. + +D-separation and its applications in probability +------------------------------------------------ + +D-separation is commonly used in probabilistic causal-graph models. D-separation +connects the idea of probabilistic "dependence" with separation in a graph. If +one assumes the causal Markov condition [5]_, (every node is conditionally +independent of its non-descendants, given its parents) then d-separation implies +conditional independence in probability distributions. +Symmetrically, d-connection implies dependence. + +The intuition is as follows. The edges on a causal graph indicate which nodes +influence the outcome of other nodes directly. An edge from u to v +implies that the outcome of event ``u`` influences the probabilities for +the outcome of event ``v``. Certainly knowing ``u`` changes predictions for ``v``. +But also knowing ``v`` changes predictions for ``u``. The outcomes are dependent. +Furthermore, an edge from ``v`` to ``w`` would mean that ``w`` and ``v`` are dependent +and thus that ``u`` could indirectly influence ``w``. + +Without any knowledge about the system (candidate d-separating set is empty) +a causal graph ``u -> v -> w`` allows all three nodes to be dependent. But +if we know the outcome of ``v``, the conditional probabilities of outcomes for +``u`` and ``w`` are independent of each other. That is, once we know the outcome +for ``v``, the probabilities for ``w`` do not depend on the outcome for ``u``. +This is the idea behind ``v`` blocking the path if it is "known" (in the candidate +d-separating set). + +The same argument works whether the direction of the edges are both +left-going and when both arrows head out from the middle. Having a "known" +node on a path blocks the collider-free path because those relationships +make the conditional probabilities independent. + +The direction of the causal edges does impact dependence precisely in the +case of a collider e.g. ``u -> v <- w``. In that situation, both ``u`` and ``w`` +influence ``v``. But they do not directly influence each other. So without any +knowledge of any outcomes, ``u`` and ``w`` are independent. That is the idea behind +colliders blocking the path. But, if ``v`` is known, the conditional probabilities +of ``u`` and ``w`` can be dependent. This is the heart of Berkson's Paradox [6]_. +For example, suppose ``u`` and ``w`` are boolean events (they either happen or do not) +and ``v`` represents the outcome "at least one of ``u`` and ``w`` occur". Then knowing +``v`` is true makes the conditional probabilities of ``u`` and ``w`` dependent. +Essentially, knowing that at least one of them is true raises the probability of +each. But further knowledge that ``w`` is true (or false) change the conditional +probability of ``u`` to either the original value or 1. So the conditional +probability of ``u`` depends on the outcome of ``w`` even though there is no +causal relationship between them. When a collider is known, dependence can +occur across paths through that collider. This is the reason open colliders +do not block paths. + +Furthermore, even if ``v`` is not "known", if one of its descendants is "known" +we can use that information to know more about ``v`` which again makes +``u`` and ``w`` potentially dependent. Suppose the chance of ``n`` occurring +is much higher when ``v`` occurs ("at least one of ``u`` and ``w`` occur"). +Then if we know ``n`` occurred, it is more likely that ``v`` occurred and that +makes the chance of ``u`` and ``w`` dependent. This is the idea behind why +a collider does no block a path if any descendant of the collider is "known". + +When two sets of nodes ``x`` and ``y`` are d-separated by a set ``z``, +it means that given the outcomes of the nodes in ``z``, the probabilities +of outcomes of the nodes in ``x`` are independent of the outcomes of the +nodes in ``y`` and vice versa. + +Examples +-------- +A Hidden Markov Model with 5 observed states and 5 hidden states +where the hidden states have causal relationships resulting in +a path results in the following causal network. We check that +early states along the path are separated from late state in +the path by the d-separator of the middle hidden state. +Thus if we condition on the middle hidden state, the early +state probabilities are independent of the late state outcomes. + +>>> G = nx.DiGraph() +>>> G.add_edges_from( +... [ +... ("H1", "H2"), +... ("H2", "H3"), +... ("H3", "H4"), +... ("H4", "H5"), +... ("H1", "O1"), +... ("H2", "O2"), +... ("H3", "O3"), +... ("H4", "O4"), +... ("H5", "O5"), +... ] +... ) +>>> x, y, z = ({"H1", "O1"}, {"H5", "O5"}, {"H3"}) +>>> nx.is_d_separator(G, x, y, z) +True +>>> nx.is_minimal_d_separator(G, x, y, z) +True +>>> nx.is_minimal_d_separator(G, x, y, z | {"O3"}) +False +>>> z = nx.find_minimal_d_separator(G, x | y, {"O2", "O3", "O4"}) +>>> z == {"H2", "H4"} +True + +If no minimal_d_separator exists, `None` is returned + +>>> other_z = nx.find_minimal_d_separator(G, x | y, {"H2", "H3"}) +>>> other_z is None +True + + +References +---------- + +.. [1] Pearl, J. (2009). Causality. Cambridge: Cambridge University Press. + +.. [2] Darwiche, A. (2009). Modeling and reasoning with Bayesian networks. + Cambridge: Cambridge University Press. + +.. [3] Shachter, Ross D. "Bayes-ball: The rational pastime (for + determining irrelevance and requisite information in belief networks + and influence diagrams)." In Proceedings of the Fourteenth Conference + on Uncertainty in Artificial Intelligence (UAI), (pp. 480–487). 1998. + +.. [4] Koller, D., & Friedman, N. (2009). + Probabilistic graphical models: principles and techniques. The MIT Press. + +.. [5] https://en.wikipedia.org/wiki/Causal_Markov_condition + +.. [6] https://en.wikipedia.org/wiki/Berkson%27s_paradox + +""" + +from collections import deque +from itertools import chain + +import networkx as nx +from networkx.utils import UnionFind, not_implemented_for + +__all__ = [ + "is_d_separator", + "is_minimal_d_separator", + "find_minimal_d_separator", +] + + +@not_implemented_for("undirected") +@nx._dispatchable +def is_d_separator(G, x, y, z): + """Return whether node sets `x` and `y` are d-separated by `z`. + + Parameters + ---------- + G : nx.DiGraph + A NetworkX DAG. + + x : node or set of nodes + First node or set of nodes in `G`. + + y : node or set of nodes + Second node or set of nodes in `G`. + + z : node or set of nodes + Potential separator (set of conditioning nodes in `G`). Can be empty set. + + Returns + ------- + b : bool + A boolean that is true if `x` is d-separated from `y` given `z` in `G`. + + Raises + ------ + NetworkXError + The *d-separation* test is commonly used on disjoint sets of + nodes in acyclic directed graphs. Accordingly, the algorithm + raises a :exc:`NetworkXError` if the node sets are not + disjoint or if the input graph is not a DAG. + + NodeNotFound + If any of the input nodes are not found in the graph, + a :exc:`NodeNotFound` exception is raised + + Notes + ----- + A d-separating set in a DAG is a set of nodes that + blocks all paths between the two sets. Nodes in `z` + block a path if they are part of the path and are not a collider, + or a descendant of a collider. Also colliders that are not in `z` + block a path. A collider structure along a path + is ``... -> c <- ...`` where ``c`` is the collider node. + + https://en.wikipedia.org/wiki/Bayesian_network#d-separation + """ + try: + x = {x} if x in G else x + y = {y} if y in G else y + z = {z} if z in G else z + + intersection = x & y or x & z or y & z + if intersection: + raise nx.NetworkXError( + f"The sets are not disjoint, with intersection {intersection}" + ) + + set_v = x | y | z + if set_v - G.nodes: + raise nx.NodeNotFound(f"The node(s) {set_v - G.nodes} are not found in G") + except TypeError: + raise nx.NodeNotFound("One of x, y, or z is not a node or a set of nodes in G") + + if not nx.is_directed_acyclic_graph(G): + raise nx.NetworkXError("graph should be directed acyclic") + + # contains -> and <-> edges from starting node T + forward_deque = deque([]) + forward_visited = set() + + # contains <- and - edges from starting node T + backward_deque = deque(x) + backward_visited = set() + + ancestors_or_z = set().union(*[nx.ancestors(G, node) for node in x]) | z | x + + while forward_deque or backward_deque: + if backward_deque: + node = backward_deque.popleft() + backward_visited.add(node) + if node in y: + return False + if node in z: + continue + + # add <- edges to backward deque + backward_deque.extend(G.pred[node].keys() - backward_visited) + # add -> edges to forward deque + forward_deque.extend(G.succ[node].keys() - forward_visited) + + if forward_deque: + node = forward_deque.popleft() + forward_visited.add(node) + if node in y: + return False + + # Consider if -> node <- is opened due to ancestor of node in z + if node in ancestors_or_z: + # add <- edges to backward deque + backward_deque.extend(G.pred[node].keys() - backward_visited) + if node not in z: + # add -> edges to forward deque + forward_deque.extend(G.succ[node].keys() - forward_visited) + + return True + + +@not_implemented_for("undirected") +@nx._dispatchable +def find_minimal_d_separator(G, x, y, *, included=None, restricted=None): + """Returns a minimal d-separating set between `x` and `y` if possible + + A d-separating set in a DAG is a set of nodes that blocks all + paths between the two sets of nodes, `x` and `y`. This function + constructs a d-separating set that is "minimal", meaning no nodes can + be removed without it losing the d-separating property for `x` and `y`. + If no d-separating sets exist for `x` and `y`, this returns `None`. + + In a DAG there may be more than one minimal d-separator between two + sets of nodes. Minimal d-separators are not always unique. This function + returns one minimal d-separator, or `None` if no d-separator exists. + + Uses the algorithm presented in [1]_. The complexity of the algorithm + is :math:`O(m)`, where :math:`m` stands for the number of edges in + the subgraph of G consisting of only the ancestors of `x` and `y`. + For full details, see [1]_. + + Parameters + ---------- + G : graph + A networkx DAG. + x : set | node + A node or set of nodes in the graph. + y : set | node + A node or set of nodes in the graph. + included : set | node | None + A node or set of nodes which must be included in the found separating set, + default is None, which means the empty set. + restricted : set | node | None + Restricted node or set of nodes to consider. Only these nodes can be in + the found separating set, default is None meaning all nodes in ``G``. + + Returns + ------- + z : set | None + The minimal d-separating set, if at least one d-separating set exists, + otherwise None. + + Raises + ------ + NetworkXError + Raises a :exc:`NetworkXError` if the input graph is not a DAG + or if node sets `x`, `y`, and `included` are not disjoint. + + NodeNotFound + If any of the input nodes are not found in the graph, + a :exc:`NodeNotFound` exception is raised. + + References + ---------- + .. [1] van der Zander, Benito, and Maciej Liśkiewicz. "Finding + minimal d-separators in linear time and applications." In + Uncertainty in Artificial Intelligence, pp. 637-647. PMLR, 2020. + """ + if not nx.is_directed_acyclic_graph(G): + raise nx.NetworkXError("graph should be directed acyclic") + + try: + x = {x} if x in G else x + y = {y} if y in G else y + + if included is None: + included = set() + elif included in G: + included = {included} + + if restricted is None: + restricted = set(G) + elif restricted in G: + restricted = {restricted} + + set_y = x | y | included | restricted + if set_y - G.nodes: + raise nx.NodeNotFound(f"The node(s) {set_y - G.nodes} are not found in G") + except TypeError: + raise nx.NodeNotFound( + "One of x, y, included or restricted is not a node or set of nodes in G" + ) + + if not included <= restricted: + raise nx.NetworkXError( + f"Included nodes {included} must be in restricted nodes {restricted}" + ) + + intersection = x & y or x & included or y & included + if intersection: + raise nx.NetworkXError( + f"The sets x, y, included are not disjoint. Overlap: {intersection}" + ) + + nodeset = x | y | included + ancestors_x_y_included = nodeset.union(*[nx.ancestors(G, node) for node in nodeset]) + + z_init = restricted & (ancestors_x_y_included - (x | y)) + + x_closure = _reachable(G, x, ancestors_x_y_included, z_init) + if x_closure & y: + return None + + z_updated = z_init & (x_closure | included) + y_closure = _reachable(G, y, ancestors_x_y_included, z_updated) + return z_updated & (y_closure | included) + + +@not_implemented_for("undirected") +@nx._dispatchable +def is_minimal_d_separator(G, x, y, z, *, included=None, restricted=None): + """Determine if `z` is a minimal d-separator for `x` and `y`. + + A d-separator, `z`, in a DAG is a set of nodes that blocks + all paths from nodes in set `x` to nodes in set `y`. + A minimal d-separator is a d-separator `z` such that removing + any subset of nodes makes it no longer a d-separator. + + Note: This function checks whether `z` is a d-separator AND is + minimal. One can use the function `is_d_separator` to only check if + `z` is a d-separator. See examples below. + + Parameters + ---------- + G : nx.DiGraph + A NetworkX DAG. + x : node | set + A node or set of nodes in the graph. + y : node | set + A node or set of nodes in the graph. + z : node | set + The node or set of nodes to check if it is a minimal d-separating set. + The function :func:`is_d_separator` is called inside this function + to verify that `z` is in fact a d-separator. + included : set | node | None + A node or set of nodes which must be included in the found separating set, + default is ``None``, which means the empty set. + restricted : set | node | None + Restricted node or set of nodes to consider. Only these nodes can be in + the found separating set, default is ``None`` meaning all nodes in ``G``. + + Returns + ------- + bool + Whether or not the set `z` is a minimal d-separator subject to + `restricted` nodes and `included` node constraints. + + Examples + -------- + >>> G = nx.path_graph([0, 1, 2, 3], create_using=nx.DiGraph) + >>> G.add_node(4) + >>> nx.is_minimal_d_separator(G, 0, 2, {1}) + True + >>> # since {1} is the minimal d-separator, {1, 3, 4} is not minimal + >>> nx.is_minimal_d_separator(G, 0, 2, {1, 3, 4}) + False + >>> # alternatively, if we only want to check that {1, 3, 4} is a d-separator + >>> nx.is_d_separator(G, 0, 2, {1, 3, 4}) + True + + Raises + ------ + NetworkXError + Raises a :exc:`NetworkXError` if the input graph is not a DAG. + + NodeNotFound + If any of the input nodes are not found in the graph, + a :exc:`NodeNotFound` exception is raised. + + References + ---------- + .. [1] van der Zander, Benito, and Maciej Liśkiewicz. "Finding + minimal d-separators in linear time and applications." In + Uncertainty in Artificial Intelligence, pp. 637-647. PMLR, 2020. + + Notes + ----- + This function works on verifying that a set is minimal and + d-separating between two nodes. Uses criterion (a), (b), (c) on + page 4 of [1]_. a) closure(`x`) and `y` are disjoint. b) `z` contains + all nodes from `included` and is contained in the `restricted` + nodes and in the union of ancestors of `x`, `y`, and `included`. + c) the nodes in `z` not in `included` are contained in both + closure(x) and closure(y). The closure of a set is the set of nodes + connected to the set by a directed path in G. + + The complexity is :math:`O(m)`, where :math:`m` stands for the + number of edges in the subgraph of G consisting of only the + ancestors of `x` and `y`. + + For full details, see [1]_. + """ + if not nx.is_directed_acyclic_graph(G): + raise nx.NetworkXError("graph should be directed acyclic") + + try: + x = {x} if x in G else x + y = {y} if y in G else y + z = {z} if z in G else z + + if included is None: + included = set() + elif included in G: + included = {included} + + if restricted is None: + restricted = set(G) + elif restricted in G: + restricted = {restricted} + + set_y = x | y | included | restricted + if set_y - G.nodes: + raise nx.NodeNotFound(f"The node(s) {set_y - G.nodes} are not found in G") + except TypeError: + raise nx.NodeNotFound( + "One of x, y, z, included or restricted is not a node or set of nodes in G" + ) + + if not included <= z: + raise nx.NetworkXError( + f"Included nodes {included} must be in proposed separating set z {x}" + ) + if not z <= restricted: + raise nx.NetworkXError( + f"Separating set {z} must be contained in restricted set {restricted}" + ) + + intersection = x.intersection(y) or x.intersection(z) or y.intersection(z) + if intersection: + raise nx.NetworkXError( + f"The sets are not disjoint, with intersection {intersection}" + ) + + nodeset = x | y | included + ancestors_x_y_included = nodeset.union(*[nx.ancestors(G, n) for n in nodeset]) + + # criterion (a) -- check that z is actually a separator + x_closure = _reachable(G, x, ancestors_x_y_included, z) + if x_closure & y: + return False + + # criterion (b) -- basic constraint; included and restricted already checked above + if not (z <= ancestors_x_y_included): + return False + + # criterion (c) -- check that z is minimal + y_closure = _reachable(G, y, ancestors_x_y_included, z) + if not ((z - included) <= (x_closure & y_closure)): + return False + return True + + +@not_implemented_for("undirected") +def _reachable(G, x, a, z): + """Modified Bayes-Ball algorithm for finding d-connected nodes. + + Find all nodes in `a` that are d-connected to those in `x` by + those in `z`. This is an implementation of the function + `REACHABLE` in [1]_ (which is itself a modification of the + Bayes-Ball algorithm [2]_) when restricted to DAGs. + + Parameters + ---------- + G : nx.DiGraph + A NetworkX DAG. + x : node | set + A node in the DAG, or a set of nodes. + a : node | set + A (set of) node(s) in the DAG containing the ancestors of `x`. + z : node | set + The node or set of nodes conditioned on when checking d-connectedness. + + Returns + ------- + w : set + The closure of `x` in `a` with respect to d-connectedness + given `z`. + + References + ---------- + .. [1] van der Zander, Benito, and Maciej Liśkiewicz. "Finding + minimal d-separators in linear time and applications." In + Uncertainty in Artificial Intelligence, pp. 637-647. PMLR, 2020. + + .. [2] Shachter, Ross D. "Bayes-ball: The rational pastime + (for determining irrelevance and requisite information in + belief networks and influence diagrams)." In Proceedings of the + Fourteenth Conference on Uncertainty in Artificial Intelligence + (UAI), (pp. 480–487). 1998. + """ + + def _pass(e, v, f, n): + """Whether a ball entering node `v` along edge `e` passes to `n` along `f`. + + Boolean function defined on page 6 of [1]_. + + Parameters + ---------- + e : bool + Directed edge by which the ball got to node `v`; `True` iff directed into `v`. + v : node + Node where the ball is. + f : bool + Directed edge connecting nodes `v` and `n`; `True` iff directed `n`. + n : node + Checking whether the ball passes to this node. + + Returns + ------- + b : bool + Whether the ball passes or not. + + References + ---------- + .. [1] van der Zander, Benito, and Maciej Liśkiewicz. "Finding + minimal d-separators in linear time and applications." In + Uncertainty in Artificial Intelligence, pp. 637-647. PMLR, 2020. + """ + is_element_of_A = n in a + # almost_definite_status = True # always true for DAGs; not so for RCGs + collider_if_in_Z = v not in z or (e and not f) + return is_element_of_A and collider_if_in_Z # and almost_definite_status + + queue = deque([]) + for node in x: + if bool(G.pred[node]): + queue.append((True, node)) + if bool(G.succ[node]): + queue.append((False, node)) + processed = queue.copy() + + while any(queue): + e, v = queue.popleft() + preds = ((False, n) for n in G.pred[v]) + succs = ((True, n) for n in G.succ[v]) + f_n_pairs = chain(preds, succs) + for f, n in f_n_pairs: + if (f, n) not in processed and _pass(e, v, f, n): + queue.append((f, n)) + processed.append((f, n)) + + return {w for (_, w) in processed} diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/dag.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/dag.py new file mode 100644 index 0000000..130b4dc --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/dag.py @@ -0,0 +1,1468 @@ +"""Algorithms for directed acyclic graphs (DAGs). + +Note that most of these functions are only guaranteed to work for DAGs. +In general, these functions do not check for acyclic-ness, so it is up +to the user to check for that. +""" + +import heapq +from collections import deque +from functools import partial +from itertools import chain, combinations, product, starmap +from math import gcd + +import networkx as nx +from networkx.utils import arbitrary_element, not_implemented_for, pairwise + +__all__ = [ + "descendants", + "ancestors", + "topological_sort", + "lexicographical_topological_sort", + "all_topological_sorts", + "topological_generations", + "is_directed_acyclic_graph", + "is_aperiodic", + "transitive_closure", + "transitive_closure_dag", + "transitive_reduction", + "antichains", + "dag_longest_path", + "dag_longest_path_length", + "dag_to_branching", + "compute_v_structures", +] + +chaini = chain.from_iterable + + +@nx._dispatchable +def descendants(G, source): + """Returns all nodes reachable from `source` in `G`. + + Parameters + ---------- + G : NetworkX Graph + source : node in `G` + + Returns + ------- + set() + The descendants of `source` in `G` + + Raises + ------ + NetworkXError + If node `source` is not in `G`. + + Examples + -------- + >>> DG = nx.path_graph(5, create_using=nx.DiGraph) + >>> sorted(nx.descendants(DG, 2)) + [3, 4] + + The `source` node is not a descendant of itself, but can be included manually: + + >>> sorted(nx.descendants(DG, 2) | {2}) + [2, 3, 4] + + See also + -------- + ancestors + """ + return {child for parent, child in nx.bfs_edges(G, source)} + + +@nx._dispatchable +def ancestors(G, source): + """Returns all nodes having a path to `source` in `G`. + + Parameters + ---------- + G : NetworkX Graph + source : node in `G` + + Returns + ------- + set() + The ancestors of `source` in `G` + + Raises + ------ + NetworkXError + If node `source` is not in `G`. + + Examples + -------- + >>> DG = nx.path_graph(5, create_using=nx.DiGraph) + >>> sorted(nx.ancestors(DG, 2)) + [0, 1] + + The `source` node is not an ancestor of itself, but can be included manually: + + >>> sorted(nx.ancestors(DG, 2) | {2}) + [0, 1, 2] + + See also + -------- + descendants + """ + return {child for parent, child in nx.bfs_edges(G, source, reverse=True)} + + +@nx._dispatchable +def has_cycle(G): + """Decides whether the directed graph has a cycle.""" + try: + # Feed the entire iterator into a zero-length deque. + deque(topological_sort(G), maxlen=0) + except nx.NetworkXUnfeasible: + return True + else: + return False + + +@nx._dispatchable +def is_directed_acyclic_graph(G): + """Returns True if the graph `G` is a directed acyclic graph (DAG) or + False if not. + + Parameters + ---------- + G : NetworkX graph + + Returns + ------- + bool + True if `G` is a DAG, False otherwise + + Examples + -------- + Undirected graph:: + + >>> G = nx.Graph([(1, 2), (2, 3)]) + >>> nx.is_directed_acyclic_graph(G) + False + + Directed graph with cycle:: + + >>> G = nx.DiGraph([(1, 2), (2, 3), (3, 1)]) + >>> nx.is_directed_acyclic_graph(G) + False + + Directed acyclic graph:: + + >>> G = nx.DiGraph([(1, 2), (2, 3)]) + >>> nx.is_directed_acyclic_graph(G) + True + + See also + -------- + topological_sort + """ + return G.is_directed() and not has_cycle(G) + + +@nx._dispatchable +def topological_generations(G): + """Stratifies a DAG into generations. + + A topological generation is node collection in which ancestors of a node in each + generation are guaranteed to be in a previous generation, and any descendants of + a node are guaranteed to be in a following generation. Nodes are guaranteed to + be in the earliest possible generation that they can belong to. + + Parameters + ---------- + G : NetworkX digraph + A directed acyclic graph (DAG) + + Yields + ------ + sets of nodes + Yields sets of nodes representing each generation. + + Raises + ------ + NetworkXError + Generations are defined for directed graphs only. If the graph + `G` is undirected, a :exc:`NetworkXError` is raised. + + NetworkXUnfeasible + If `G` is not a directed acyclic graph (DAG) no topological generations + exist and a :exc:`NetworkXUnfeasible` exception is raised. This can also + be raised if `G` is changed while the returned iterator is being processed + + RuntimeError + If `G` is changed while the returned iterator is being processed. + + Examples + -------- + >>> DG = nx.DiGraph([(2, 1), (3, 1)]) + >>> [sorted(generation) for generation in nx.topological_generations(DG)] + [[2, 3], [1]] + + Notes + ----- + The generation in which a node resides can also be determined by taking the + max-path-distance from the node to the farthest leaf node. That value can + be obtained with this function using `enumerate(topological_generations(G))`. + + See also + -------- + topological_sort + """ + if not G.is_directed(): + raise nx.NetworkXError("Topological sort not defined on undirected graphs.") + + multigraph = G.is_multigraph() + indegree_map = {v: d for v, d in G.in_degree() if d > 0} + zero_indegree = [v for v, d in G.in_degree() if d == 0] + + while zero_indegree: + this_generation = zero_indegree + zero_indegree = [] + for node in this_generation: + if node not in G: + raise RuntimeError("Graph changed during iteration") + for child in G.neighbors(node): + try: + indegree_map[child] -= len(G[node][child]) if multigraph else 1 + except KeyError as err: + raise RuntimeError("Graph changed during iteration") from err + if indegree_map[child] == 0: + zero_indegree.append(child) + del indegree_map[child] + yield this_generation + + if indegree_map: + raise nx.NetworkXUnfeasible( + "Graph contains a cycle or graph changed during iteration" + ) + + +@nx._dispatchable +def topological_sort(G): + """Returns a generator of nodes in topologically sorted order. + + A topological sort is a nonunique permutation of the nodes of a + directed graph such that an edge from u to v implies that u + appears before v in the topological sort order. This ordering is + valid only if the graph has no directed cycles. + + Parameters + ---------- + G : NetworkX digraph + A directed acyclic graph (DAG) + + Yields + ------ + nodes + Yields the nodes in topological sorted order. + + Raises + ------ + NetworkXError + Topological sort is defined for directed graphs only. If the graph `G` + is undirected, a :exc:`NetworkXError` is raised. + + NetworkXUnfeasible + If `G` is not a directed acyclic graph (DAG) no topological sort exists + and a :exc:`NetworkXUnfeasible` exception is raised. This can also be + raised if `G` is changed while the returned iterator is being processed + + RuntimeError + If `G` is changed while the returned iterator is being processed. + + Examples + -------- + To get the reverse order of the topological sort: + + >>> DG = nx.DiGraph([(1, 2), (2, 3)]) + >>> list(reversed(list(nx.topological_sort(DG)))) + [3, 2, 1] + + If your DiGraph naturally has the edges representing tasks/inputs + and nodes representing people/processes that initiate tasks, then + topological_sort is not quite what you need. You will have to change + the tasks to nodes with dependence reflected by edges. The result is + a kind of topological sort of the edges. This can be done + with :func:`networkx.line_graph` as follows: + + >>> list(nx.topological_sort(nx.line_graph(DG))) + [(1, 2), (2, 3)] + + Notes + ----- + This algorithm is based on a description and proof in + "Introduction to Algorithms: A Creative Approach" [1]_ . + + See also + -------- + is_directed_acyclic_graph, lexicographical_topological_sort + + References + ---------- + .. [1] Manber, U. (1989). + *Introduction to Algorithms - A Creative Approach.* Addison-Wesley. + """ + for generation in nx.topological_generations(G): + yield from generation + + +@nx._dispatchable +def lexicographical_topological_sort(G, key=None): + """Generate the nodes in the unique lexicographical topological sort order. + + Generates a unique ordering of nodes by first sorting topologically (for which there are often + multiple valid orderings) and then additionally by sorting lexicographically. + + A topological sort arranges the nodes of a directed graph so that the + upstream node of each directed edge precedes the downstream node. + It is always possible to find a solution for directed graphs that have no cycles. + There may be more than one valid solution. + + Lexicographical sorting is just sorting alphabetically. It is used here to break ties in the + topological sort and to determine a single, unique ordering. This can be useful in comparing + sort results. + + The lexicographical order can be customized by providing a function to the `key=` parameter. + The definition of the key function is the same as used in python's built-in `sort()`. + The function takes a single argument and returns a key to use for sorting purposes. + + Lexicographical sorting can fail if the node names are un-sortable. See the example below. + The solution is to provide a function to the `key=` argument that returns sortable keys. + + + Parameters + ---------- + G : NetworkX digraph + A directed acyclic graph (DAG) + + key : function, optional + A function of one argument that converts a node name to a comparison key. + It defines and resolves ambiguities in the sort order. Defaults to the identity function. + + Yields + ------ + nodes + Yields the nodes of G in lexicographical topological sort order. + + Raises + ------ + NetworkXError + Topological sort is defined for directed graphs only. If the graph `G` + is undirected, a :exc:`NetworkXError` is raised. + + NetworkXUnfeasible + If `G` is not a directed acyclic graph (DAG) no topological sort exists + and a :exc:`NetworkXUnfeasible` exception is raised. This can also be + raised if `G` is changed while the returned iterator is being processed + + RuntimeError + If `G` is changed while the returned iterator is being processed. + + TypeError + Results from un-sortable node names. + Consider using `key=` parameter to resolve ambiguities in the sort order. + + Examples + -------- + >>> DG = nx.DiGraph([(2, 1), (2, 5), (1, 3), (1, 4), (5, 4)]) + >>> list(nx.lexicographical_topological_sort(DG)) + [2, 1, 3, 5, 4] + >>> list(nx.lexicographical_topological_sort(DG, key=lambda x: -x)) + [2, 5, 1, 4, 3] + + The sort will fail for any graph with integer and string nodes. Comparison of integer to strings + is not defined in python. Is 3 greater or less than 'red'? + + >>> DG = nx.DiGraph([(1, "red"), (3, "red"), (1, "green"), (2, "blue")]) + >>> list(nx.lexicographical_topological_sort(DG)) + Traceback (most recent call last): + ... + TypeError: '<' not supported between instances of 'str' and 'int' + ... + + Incomparable nodes can be resolved using a `key` function. This example function + allows comparison of integers and strings by returning a tuple where the first + element is True for `str`, False otherwise. The second element is the node name. + This groups the strings and integers separately so they can be compared only among themselves. + + >>> key = lambda node: (isinstance(node, str), node) + >>> list(nx.lexicographical_topological_sort(DG, key=key)) + [1, 2, 3, 'blue', 'green', 'red'] + + Notes + ----- + This algorithm is based on a description and proof in + "Introduction to Algorithms: A Creative Approach" [1]_ . + + See also + -------- + topological_sort + + References + ---------- + .. [1] Manber, U. (1989). + *Introduction to Algorithms - A Creative Approach.* Addison-Wesley. + """ + if not G.is_directed(): + msg = "Topological sort not defined on undirected graphs." + raise nx.NetworkXError(msg) + + if key is None: + + def key(node): + return node + + nodeid_map = {n: i for i, n in enumerate(G)} + + def create_tuple(node): + return key(node), nodeid_map[node], node + + indegree_map = {v: d for v, d in G.in_degree() if d > 0} + # These nodes have zero indegree and ready to be returned. + zero_indegree = [create_tuple(v) for v, d in G.in_degree() if d == 0] + heapq.heapify(zero_indegree) + + while zero_indegree: + _, _, node = heapq.heappop(zero_indegree) + + if node not in G: + raise RuntimeError("Graph changed during iteration") + for _, child in G.edges(node): + try: + indegree_map[child] -= 1 + except KeyError as err: + raise RuntimeError("Graph changed during iteration") from err + if indegree_map[child] == 0: + try: + heapq.heappush(zero_indegree, create_tuple(child)) + except TypeError as err: + raise TypeError( + f"{err}\nConsider using `key=` parameter to resolve ambiguities in the sort order." + ) + del indegree_map[child] + + yield node + + if indegree_map: + msg = "Graph contains a cycle or graph changed during iteration" + raise nx.NetworkXUnfeasible(msg) + + +@not_implemented_for("undirected") +@nx._dispatchable +def all_topological_sorts(G): + """Returns a generator of _all_ topological sorts of the directed graph G. + + A topological sort is a nonunique permutation of the nodes such that an + edge from u to v implies that u appears before v in the topological sort + order. + + Parameters + ---------- + G : NetworkX DiGraph + A directed graph + + Yields + ------ + topological_sort_order : list + a list of nodes in `G`, representing one of the topological sort orders + + Raises + ------ + NetworkXNotImplemented + If `G` is not directed + NetworkXUnfeasible + If `G` is not acyclic + + Examples + -------- + To enumerate all topological sorts of directed graph: + + >>> DG = nx.DiGraph([(1, 2), (2, 3), (2, 4)]) + >>> list(nx.all_topological_sorts(DG)) + [[1, 2, 4, 3], [1, 2, 3, 4]] + + Notes + ----- + Implements an iterative version of the algorithm given in [1]. + + References + ---------- + .. [1] Knuth, Donald E., Szwarcfiter, Jayme L. (1974). + "A Structured Program to Generate All Topological Sorting Arrangements" + Information Processing Letters, Volume 2, Issue 6, 1974, Pages 153-157, + ISSN 0020-0190, + https://doi.org/10.1016/0020-0190(74)90001-5. + Elsevier (North-Holland), Amsterdam + """ + if not G.is_directed(): + raise nx.NetworkXError("Topological sort not defined on undirected graphs.") + + # the names of count and D are chosen to match the global variables in [1] + # number of edges originating in a vertex v + count = dict(G.in_degree()) + # vertices with indegree 0 + D = deque([v for v, d in G.in_degree() if d == 0]) + # stack of first value chosen at a position k in the topological sort + bases = [] + current_sort = [] + + # do-while construct + while True: + assert all(count[v] == 0 for v in D) + + if len(current_sort) == len(G): + yield list(current_sort) + + # clean-up stack + while len(current_sort) > 0: + assert len(bases) == len(current_sort) + q = current_sort.pop() + + # "restores" all edges (q, x) + # NOTE: it is important to iterate over edges instead + # of successors, so count is updated correctly in multigraphs + for _, j in G.out_edges(q): + count[j] += 1 + assert count[j] >= 0 + # remove entries from D + while len(D) > 0 and count[D[-1]] > 0: + D.pop() + + # corresponds to a circular shift of the values in D + # if the first value chosen (the base) is in the first + # position of D again, we are done and need to consider the + # previous condition + D.appendleft(q) + if D[-1] == bases[-1]: + # all possible values have been chosen at current position + # remove corresponding marker + bases.pop() + else: + # there are still elements that have not been fixed + # at the current position in the topological sort + # stop removing elements, escape inner loop + break + + else: + if len(D) == 0: + raise nx.NetworkXUnfeasible("Graph contains a cycle.") + + # choose next node + q = D.pop() + # "erase" all edges (q, x) + # NOTE: it is important to iterate over edges instead + # of successors, so count is updated correctly in multigraphs + for _, j in G.out_edges(q): + count[j] -= 1 + assert count[j] >= 0 + if count[j] == 0: + D.append(j) + current_sort.append(q) + + # base for current position might _not_ be fixed yet + if len(bases) < len(current_sort): + bases.append(q) + + if len(bases) == 0: + break + + +@nx._dispatchable +def is_aperiodic(G): + """Returns True if `G` is aperiodic. + + A strongly connected directed graph is aperiodic if there is no integer ``k > 1`` + that divides the length of every cycle in the graph. + + This function requires the graph `G` to be strongly connected and will raise + an error if it's not. For graphs that are not strongly connected, you should + first identify their strongly connected components + (using :func:`~networkx.algorithms.components.strongly_connected_components`) + or attracting components + (using :func:`~networkx.algorithms.components.attracting_components`), + and then apply this function to those individual components. + + Parameters + ---------- + G : NetworkX DiGraph + A directed graph + + Returns + ------- + bool + True if the graph is aperiodic False otherwise + + Raises + ------ + NetworkXError + If `G` is not directed + NetworkXError + If `G` is not strongly connected + NetworkXPointlessConcept + If `G` has no nodes + + Examples + -------- + A graph consisting of one cycle, the length of which is 2. Therefore ``k = 2`` + divides the length of every cycle in the graph and thus the graph + is *not aperiodic*:: + + >>> DG = nx.DiGraph([(1, 2), (2, 1)]) + >>> nx.is_aperiodic(DG) + False + + A graph consisting of two cycles: one of length 2 and the other of length 3. + The cycle lengths are coprime, so there is no single value of k where ``k > 1`` + that divides each cycle length and therefore the graph is *aperiodic*:: + + >>> DG = nx.DiGraph([(1, 2), (2, 3), (3, 1), (1, 4), (4, 1)]) + >>> nx.is_aperiodic(DG) + True + + A graph created from cycles of the same length can still be aperiodic since + the cycles can overlap and form new cycles of different lengths. For example, + the following graph contains a cycle ``[4, 2, 3, 1]`` of length 4, which is coprime + with the explicitly added cycles of length 3, so the graph is aperiodic:: + + >>> DG = nx.DiGraph() + >>> nx.add_cycle(DG, [1, 2, 3]) + >>> nx.add_cycle(DG, [2, 1, 4]) + >>> nx.is_aperiodic(DG) + True + + A single-node graph's aperiodicity depends on whether it has a self-loop: + it is aperiodic if a self-loop exists, and periodic otherwise:: + + >>> G = nx.DiGraph() + >>> G.add_node(1) + >>> nx.is_aperiodic(G) + False + >>> G.add_edge(1, 1) + >>> nx.is_aperiodic(G) + True + + A Markov chain can be modeled as a directed graph, with nodes representing + states and edges representing transitions with non-zero probability. + Aperiodicity is typically considered for irreducible Markov chains, + which are those that are *strongly connected* as graphs. + + The following Markov chain is irreducible and aperiodic, and thus + ergodic. It is guaranteed to have a unique stationary distribution:: + + >>> G = nx.DiGraph() + >>> nx.add_cycle(G, [1, 2, 3, 4]) + >>> G.add_edge(1, 3) + >>> nx.is_aperiodic(G) + True + + Reducible Markov chains can sometimes have a unique stationary distribution. + This occurs if the chain has exactly one closed communicating class and + that class itself is aperiodic (see [1]_). You can use + :func:`~networkx.algorithms.components.attracting_components` + to find these closed communicating classes:: + + >>> G = nx.DiGraph([(1, 3), (2, 3)]) + >>> nx.add_cycle(G, [3, 4, 5, 6]) + >>> nx.add_cycle(G, [3, 5, 6]) + >>> communicating_classes = list(nx.strongly_connected_components(G)) + >>> len(communicating_classes) + 3 + >>> closed_communicating_classes = list(nx.attracting_components(G)) + >>> len(closed_communicating_classes) + 1 + >>> nx.is_aperiodic(G.subgraph(closed_communicating_classes[0])) + True + + Notes + ----- + This uses the method outlined in [1]_, which runs in $O(m)$ time + given $m$ edges in `G`. + + References + ---------- + .. [1] Jarvis, J. P.; Shier, D. R. (1996), + "Graph-theoretic analysis of finite Markov chains," + in Shier, D. R.; Wallenius, K. T., Applied Mathematical Modeling: + A Multidisciplinary Approach, CRC Press. + """ + if not G.is_directed(): + raise nx.NetworkXError("is_aperiodic not defined for undirected graphs") + if len(G) == 0: + raise nx.NetworkXPointlessConcept("Graph has no nodes.") + if not nx.is_strongly_connected(G): + raise nx.NetworkXError("Graph is not strongly connected.") + s = arbitrary_element(G) + levels = {s: 0} + this_level = [s] + g = 0 + lev = 1 + while this_level: + next_level = [] + for u in this_level: + for v in G[u]: + if v in levels: # Non-Tree Edge + g = gcd(g, levels[u] - levels[v] + 1) + else: # Tree Edge + next_level.append(v) + levels[v] = lev + this_level = next_level + lev += 1 + return g == 1 + + +@nx._dispatchable(preserve_all_attrs=True, returns_graph=True) +def transitive_closure(G, reflexive=False): + """Returns transitive closure of a graph + + The transitive closure of G = (V,E) is a graph G+ = (V,E+) such that + for all v, w in V there is an edge (v, w) in E+ if and only if there + is a path from v to w in G. + + Handling of paths from v to v has some flexibility within this definition. + A reflexive transitive closure creates a self-loop for the path + from v to v of length 0. The usual transitive closure creates a + self-loop only if a cycle exists (a path from v to v with length > 0). + We also allow an option for no self-loops. + + Parameters + ---------- + G : NetworkX Graph + A directed/undirected graph/multigraph. + reflexive : Bool or None, optional (default: False) + Determines when cycles create self-loops in the Transitive Closure. + If True, trivial cycles (length 0) create self-loops. The result + is a reflexive transitive closure of G. + If False (the default) non-trivial cycles create self-loops. + If None, self-loops are not created. + + Returns + ------- + NetworkX graph + The transitive closure of `G` + + Raises + ------ + NetworkXError + If `reflexive` not in `{None, True, False}` + + Examples + -------- + The treatment of trivial (i.e. length 0) cycles is controlled by the + `reflexive` parameter. + + Trivial (i.e. length 0) cycles do not create self-loops when + ``reflexive=False`` (the default):: + + >>> DG = nx.DiGraph([(1, 2), (2, 3)]) + >>> TC = nx.transitive_closure(DG, reflexive=False) + >>> TC.edges() + OutEdgeView([(1, 2), (1, 3), (2, 3)]) + + However, nontrivial (i.e. length greater than 0) cycles create self-loops + when ``reflexive=False`` (the default):: + + >>> DG = nx.DiGraph([(1, 2), (2, 3), (3, 1)]) + >>> TC = nx.transitive_closure(DG, reflexive=False) + >>> TC.edges() + OutEdgeView([(1, 2), (1, 3), (1, 1), (2, 3), (2, 1), (2, 2), (3, 1), (3, 2), (3, 3)]) + + Trivial cycles (length 0) create self-loops when ``reflexive=True``:: + + >>> DG = nx.DiGraph([(1, 2), (2, 3)]) + >>> TC = nx.transitive_closure(DG, reflexive=True) + >>> TC.edges() + OutEdgeView([(1, 2), (1, 1), (1, 3), (2, 3), (2, 2), (3, 3)]) + + And the third option is not to create self-loops at all when ``reflexive=None``:: + + >>> DG = nx.DiGraph([(1, 2), (2, 3), (3, 1)]) + >>> TC = nx.transitive_closure(DG, reflexive=None) + >>> TC.edges() + OutEdgeView([(1, 2), (1, 3), (2, 3), (2, 1), (3, 1), (3, 2)]) + + References + ---------- + .. [1] https://www.ics.uci.edu/~eppstein/PADS/PartialOrder.py + """ + TC = G.copy() + + if reflexive not in {None, True, False}: + raise nx.NetworkXError("Incorrect value for the parameter `reflexive`") + + for v in G: + if reflexive is None: + TC.add_edges_from((v, u) for u in nx.descendants(G, v) if u not in TC[v]) + elif reflexive is True: + TC.add_edges_from( + (v, u) for u in nx.descendants(G, v) | {v} if u not in TC[v] + ) + elif reflexive is False: + TC.add_edges_from((v, e[1]) for e in nx.edge_bfs(G, v) if e[1] not in TC[v]) + + return TC + + +@not_implemented_for("undirected") +@nx._dispatchable(preserve_all_attrs=True, returns_graph=True) +def transitive_closure_dag(G, topo_order=None): + """Returns the transitive closure of a directed acyclic graph. + + This function is faster than the function `transitive_closure`, but fails + if the graph has a cycle. + + The transitive closure of G = (V,E) is a graph G+ = (V,E+) such that + for all v, w in V there is an edge (v, w) in E+ if and only if there + is a non-null path from v to w in G. + + Parameters + ---------- + G : NetworkX DiGraph + A directed acyclic graph (DAG) + + topo_order: list or tuple, optional + A topological order for G (if None, the function will compute one) + + Returns + ------- + NetworkX DiGraph + The transitive closure of `G` + + Raises + ------ + NetworkXNotImplemented + If `G` is not directed + NetworkXUnfeasible + If `G` has a cycle + + Examples + -------- + >>> DG = nx.DiGraph([(1, 2), (2, 3)]) + >>> TC = nx.transitive_closure_dag(DG) + >>> TC.edges() + OutEdgeView([(1, 2), (1, 3), (2, 3)]) + + Notes + ----- + This algorithm is probably simple enough to be well-known but I didn't find + a mention in the literature. + """ + if topo_order is None: + topo_order = list(topological_sort(G)) + + TC = G.copy() + + # idea: traverse vertices following a reverse topological order, connecting + # each vertex to its descendants at distance 2 as we go + for v in reversed(topo_order): + TC.add_edges_from((v, u) for u in nx.descendants_at_distance(TC, v, 2)) + + return TC + + +@not_implemented_for("undirected") +@nx._dispatchable(returns_graph=True) +def transitive_reduction(G): + """Returns transitive reduction of a directed graph + + The transitive reduction of G = (V,E) is a graph G- = (V,E-) such that + for all v,w in V there is an edge (v,w) in E- if and only if (v,w) is + in E and there is no path from v to w in G with length greater than 1. + + Parameters + ---------- + G : NetworkX DiGraph + A directed acyclic graph (DAG) + + Returns + ------- + NetworkX DiGraph + The transitive reduction of `G` + + Raises + ------ + NetworkXError + If `G` is not a directed acyclic graph (DAG) transitive reduction is + not uniquely defined and a :exc:`NetworkXError` exception is raised. + + Examples + -------- + To perform transitive reduction on a DiGraph: + + >>> DG = nx.DiGraph([(1, 2), (2, 3), (1, 3)]) + >>> TR = nx.transitive_reduction(DG) + >>> list(TR.edges) + [(1, 2), (2, 3)] + + To avoid unnecessary data copies, this implementation does not return a + DiGraph with node/edge data. + To perform transitive reduction on a DiGraph and transfer node/edge data: + + >>> DG = nx.DiGraph() + >>> DG.add_edges_from([(1, 2), (2, 3), (1, 3)], color="red") + >>> TR = nx.transitive_reduction(DG) + >>> TR.add_nodes_from(DG.nodes(data=True)) + >>> TR.add_edges_from((u, v, DG.edges[u, v]) for u, v in TR.edges) + >>> list(TR.edges(data=True)) + [(1, 2, {'color': 'red'}), (2, 3, {'color': 'red'})] + + References + ---------- + https://en.wikipedia.org/wiki/Transitive_reduction + + """ + if not is_directed_acyclic_graph(G): + msg = "Directed Acyclic Graph required for transitive_reduction" + raise nx.NetworkXError(msg) + TR = nx.DiGraph() + TR.add_nodes_from(G.nodes()) + descendants = {} + # count before removing set stored in descendants + check_count = dict(G.in_degree) + for u in G: + u_nbrs = set(G[u]) + for v in G[u]: + if v in u_nbrs: + if v not in descendants: + descendants[v] = {y for x, y in nx.dfs_edges(G, v)} + u_nbrs -= descendants[v] + check_count[v] -= 1 + if check_count[v] == 0: + del descendants[v] + TR.add_edges_from((u, v) for v in u_nbrs) + return TR + + +@not_implemented_for("undirected") +@nx._dispatchable +def antichains(G, topo_order=None): + """Generates antichains from a directed acyclic graph (DAG). + + An antichain is a subset of a partially ordered set such that any + two elements in the subset are incomparable. + + Parameters + ---------- + G : NetworkX DiGraph + A directed acyclic graph (DAG) + + topo_order: list or tuple, optional + A topological order for G (if None, the function will compute one) + + Yields + ------ + antichain : list + a list of nodes in `G` representing an antichain + + Raises + ------ + NetworkXNotImplemented + If `G` is not directed + + NetworkXUnfeasible + If `G` contains a cycle + + Examples + -------- + >>> DG = nx.DiGraph([(1, 2), (1, 3)]) + >>> list(nx.antichains(DG)) + [[], [3], [2], [2, 3], [1]] + + Notes + ----- + This function was originally developed by Peter Jipsen and Franco Saliola + for the SAGE project. It's included in NetworkX with permission from the + authors. Original SAGE code at: + + https://github.com/sagemath/sage/blob/master/src/sage/combinat/posets/hasse_diagram.py + + References + ---------- + .. [1] Free Lattices, by R. Freese, J. Jezek and J. B. Nation, + AMS, Vol 42, 1995, p. 226. + """ + if topo_order is None: + topo_order = list(nx.topological_sort(G)) + + TC = nx.transitive_closure_dag(G, topo_order) + antichains_stacks = [([], list(reversed(topo_order)))] + + while antichains_stacks: + (antichain, stack) = antichains_stacks.pop() + # Invariant: + # - the elements of antichain are independent + # - the elements of stack are independent from those of antichain + yield antichain + while stack: + x = stack.pop() + new_antichain = antichain + [x] + new_stack = [t for t in stack if not ((t in TC[x]) or (x in TC[t]))] + antichains_stacks.append((new_antichain, new_stack)) + + +@not_implemented_for("undirected") +@nx._dispatchable(edge_attrs={"weight": "default_weight"}) +def dag_longest_path(G, weight="weight", default_weight=1, topo_order=None): + """Returns the longest path in a directed acyclic graph (DAG). + + If `G` has edges with `weight` attribute the edge data are used as + weight values. + + Parameters + ---------- + G : NetworkX DiGraph + A directed acyclic graph (DAG) + + weight : str, optional + Edge data key to use for weight + + default_weight : int, optional + The weight of edges that do not have a weight attribute + + topo_order: list or tuple, optional + A topological order for `G` (if None, the function will compute one) + + Returns + ------- + list + Longest path + + Raises + ------ + NetworkXNotImplemented + If `G` is not directed + + Examples + -------- + >>> DG = nx.DiGraph( + ... [(0, 1, {"cost": 1}), (1, 2, {"cost": 1}), (0, 2, {"cost": 42})] + ... ) + >>> list(nx.all_simple_paths(DG, 0, 2)) + [[0, 1, 2], [0, 2]] + >>> nx.dag_longest_path(DG) + [0, 1, 2] + >>> nx.dag_longest_path(DG, weight="cost") + [0, 2] + + In the case where multiple valid topological orderings exist, `topo_order` + can be used to specify a specific ordering: + + >>> DG = nx.DiGraph([(0, 1), (0, 2)]) + >>> sorted(nx.all_topological_sorts(DG)) # Valid topological orderings + [[0, 1, 2], [0, 2, 1]] + >>> nx.dag_longest_path(DG, topo_order=[0, 1, 2]) + [0, 1] + >>> nx.dag_longest_path(DG, topo_order=[0, 2, 1]) + [0, 2] + + See also + -------- + dag_longest_path_length + + """ + if not G: + return [] + + if topo_order is None: + topo_order = nx.topological_sort(G) + + dist = {} # stores {v : (length, u)} + for v in topo_order: + us = [ + ( + dist[u][0] + + ( + max(data.values(), key=lambda x: x.get(weight, default_weight)) + if G.is_multigraph() + else data + ).get(weight, default_weight), + u, + ) + for u, data in G.pred[v].items() + ] + + # Use the best predecessor if there is one and its distance is + # non-negative, otherwise terminate. + maxu = max(us, key=lambda x: x[0]) if us else (0, v) + dist[v] = maxu if maxu[0] >= 0 else (0, v) + + u = None + v = max(dist, key=lambda x: dist[x][0]) + path = [] + while u != v: + path.append(v) + u = v + v = dist[v][1] + + path.reverse() + return path + + +@not_implemented_for("undirected") +@nx._dispatchable(edge_attrs={"weight": "default_weight"}) +def dag_longest_path_length(G, weight="weight", default_weight=1): + """Returns the longest path length in a DAG + + Parameters + ---------- + G : NetworkX DiGraph + A directed acyclic graph (DAG) + + weight : string, optional + Edge data key to use for weight + + default_weight : int, optional + The weight of edges that do not have a weight attribute + + Returns + ------- + int + Longest path length + + Raises + ------ + NetworkXNotImplemented + If `G` is not directed + + Examples + -------- + >>> DG = nx.DiGraph( + ... [(0, 1, {"cost": 1}), (1, 2, {"cost": 1}), (0, 2, {"cost": 42})] + ... ) + >>> list(nx.all_simple_paths(DG, 0, 2)) + [[0, 1, 2], [0, 2]] + >>> nx.dag_longest_path_length(DG) + 2 + >>> nx.dag_longest_path_length(DG, weight="cost") + 42 + + See also + -------- + dag_longest_path + """ + path = nx.dag_longest_path(G, weight, default_weight) + path_length = 0 + if G.is_multigraph(): + for u, v in pairwise(path): + i = max(G[u][v], key=lambda x: G[u][v][x].get(weight, default_weight)) + path_length += G[u][v][i].get(weight, default_weight) + else: + for u, v in pairwise(path): + path_length += G[u][v].get(weight, default_weight) + + return path_length + + +@nx._dispatchable +def root_to_leaf_paths(G): + """Yields root-to-leaf paths in a directed acyclic graph. + + `G` must be a directed acyclic graph. If not, the behavior of this + function is undefined. A "root" in this graph is a node of in-degree + zero and a "leaf" a node of out-degree zero. + + When invoked, this function iterates over each path from any root to + any leaf. A path is a list of nodes. + + """ + roots = (v for v, d in G.in_degree() if d == 0) + leaves = (v for v, d in G.out_degree() if d == 0) + all_paths = partial(nx.all_simple_paths, G) + # TODO In Python 3, this would be better as `yield from ...`. + return chaini(starmap(all_paths, product(roots, leaves))) + + +@not_implemented_for("multigraph") +@not_implemented_for("undirected") +@nx._dispatchable(returns_graph=True) +def dag_to_branching(G): + """Returns a branching representing all (overlapping) paths from + root nodes to leaf nodes in the given directed acyclic graph. + + As described in :mod:`networkx.algorithms.tree.recognition`, a + *branching* is a directed forest in which each node has at most one + parent. In other words, a branching is a disjoint union of + *arborescences*. For this function, each node of in-degree zero in + `G` becomes a root of one of the arborescences, and there will be + one leaf node for each distinct path from that root to a leaf node + in `G`. + + Each node `v` in `G` with *k* parents becomes *k* distinct nodes in + the returned branching, one for each parent, and the sub-DAG rooted + at `v` is duplicated for each copy. The algorithm then recurses on + the children of each copy of `v`. + + Parameters + ---------- + G : NetworkX graph + A directed acyclic graph. + + Returns + ------- + DiGraph + The branching in which there is a bijection between root-to-leaf + paths in `G` (in which multiple paths may share the same leaf) + and root-to-leaf paths in the branching (in which there is a + unique path from a root to a leaf). + + Each node has an attribute 'source' whose value is the original + node to which this node corresponds. No other graph, node, or + edge attributes are copied into this new graph. + + Raises + ------ + NetworkXNotImplemented + If `G` is not directed, or if `G` is a multigraph. + + HasACycle + If `G` is not acyclic. + + Examples + -------- + To examine which nodes in the returned branching were produced by + which original node in the directed acyclic graph, we can collect + the mapping from source node to new nodes into a dictionary. For + example, consider the directed diamond graph:: + + >>> from collections import defaultdict + >>> from operator import itemgetter + >>> + >>> G = nx.DiGraph(nx.utils.pairwise("abd")) + >>> G.add_edges_from(nx.utils.pairwise("acd")) + >>> B = nx.dag_to_branching(G) + >>> + >>> sources = defaultdict(set) + >>> for v, source in B.nodes(data="source"): + ... sources[source].add(v) + >>> len(sources["a"]) + 1 + >>> len(sources["d"]) + 2 + + To copy node attributes from the original graph to the new graph, + you can use a dictionary like the one constructed in the above + example:: + + >>> for source, nodes in sources.items(): + ... for v in nodes: + ... B.nodes[v].update(G.nodes[source]) + + Notes + ----- + This function is not idempotent in the sense that the node labels in + the returned branching may be uniquely generated each time the + function is invoked. In fact, the node labels may not be integers; + in order to relabel the nodes to be more readable, you can use the + :func:`networkx.convert_node_labels_to_integers` function. + + The current implementation of this function uses + :func:`networkx.prefix_tree`, so it is subject to the limitations of + that function. + + """ + if has_cycle(G): + msg = "dag_to_branching is only defined for acyclic graphs" + raise nx.HasACycle(msg) + paths = root_to_leaf_paths(G) + B = nx.prefix_tree(paths) + # Remove the synthetic `root`(0) and `NIL`(-1) nodes from the tree + B.remove_node(0) + B.remove_node(-1) + return B + + +@not_implemented_for("undirected") +@nx._dispatchable +def compute_v_structures(G): + """Yields 3-node tuples that represent the v-structures in `G`. + + .. deprecated:: 3.4 + + `compute_v_structures` actually yields colliders. It will be removed in + version 3.6. Use `nx.dag.v_structures` or `nx.dag.colliders` instead. + + Colliders are triples in the directed acyclic graph (DAG) where two parent nodes + point to the same child node. V-structures are colliders where the two parent + nodes are not adjacent. In a causal graph setting, the parents do not directly + depend on each other, but conditioning on the child node provides an association. + + Parameters + ---------- + G : graph + A networkx `~networkx.DiGraph`. + + Yields + ------ + A 3-tuple representation of a v-structure + Each v-structure is a 3-tuple with the parent, collider, and other parent. + + Raises + ------ + NetworkXNotImplemented + If `G` is an undirected graph. + + Examples + -------- + >>> G = nx.DiGraph([(1, 2), (0, 4), (3, 1), (2, 4), (0, 5), (4, 5), (1, 5)]) + >>> nx.is_directed_acyclic_graph(G) + True + >>> list(nx.compute_v_structures(G)) + [(0, 4, 2), (0, 5, 4), (0, 5, 1), (4, 5, 1)] + + See Also + -------- + v_structures + colliders + + Notes + ----- + This function was written to be used on DAGs, however it works on cyclic graphs + too. Since colliders are referred to in the cyclic causal graph literature + [2]_ we allow cyclic graphs in this function. It is suggested that you test if + your input graph is acyclic as in the example if you want that property. + + References + ---------- + .. [1] `Pearl's PRIMER `_ + Ch-2 page 50: v-structures def. + .. [2] A Hyttinen, P.O. Hoyer, F. Eberhardt, M J ̈arvisalo, (2013) + "Discovering cyclic causal models with latent variables: + a general SAT-based procedure", UAI'13: Proceedings of the Twenty-Ninth + Conference on Uncertainty in Artificial Intelligence, pg 301–310, + `doi:10.5555/3023638.3023669 `_ + """ + import warnings + + warnings.warn( + ( + "\n\n`compute_v_structures` actually yields colliders. It will be\n" + "removed in version 3.6. Use `nx.dag.v_structures` or `nx.dag.colliders`\n" + "instead.\n" + ), + category=DeprecationWarning, + stacklevel=5, + ) + + return colliders(G) + + +@not_implemented_for("undirected") +@nx._dispatchable +def v_structures(G): + """Yields 3-node tuples that represent the v-structures in `G`. + + Colliders are triples in the directed acyclic graph (DAG) where two parent nodes + point to the same child node. V-structures are colliders where the two parent + nodes are not adjacent. In a causal graph setting, the parents do not directly + depend on each other, but conditioning on the child node provides an association. + + Parameters + ---------- + G : graph + A networkx `~networkx.DiGraph`. + + Yields + ------ + A 3-tuple representation of a v-structure + Each v-structure is a 3-tuple with the parent, collider, and other parent. + + Raises + ------ + NetworkXNotImplemented + If `G` is an undirected graph. + + Examples + -------- + >>> G = nx.DiGraph([(1, 2), (0, 4), (3, 1), (2, 4), (0, 5), (4, 5), (1, 5)]) + >>> nx.is_directed_acyclic_graph(G) + True + >>> list(nx.dag.v_structures(G)) + [(0, 4, 2), (0, 5, 1), (4, 5, 1)] + + See Also + -------- + colliders + + Notes + ----- + This function was written to be used on DAGs, however it works on cyclic graphs + too. Since colliders are referred to in the cyclic causal graph literature + [2]_ we allow cyclic graphs in this function. It is suggested that you test if + your input graph is acyclic as in the example if you want that property. + + References + ---------- + .. [1] `Pearl's PRIMER `_ + Ch-2 page 50: v-structures def. + .. [2] A Hyttinen, P.O. Hoyer, F. Eberhardt, M J ̈arvisalo, (2013) + "Discovering cyclic causal models with latent variables: + a general SAT-based procedure", UAI'13: Proceedings of the Twenty-Ninth + Conference on Uncertainty in Artificial Intelligence, pg 301–310, + `doi:10.5555/3023638.3023669 `_ + """ + for p1, c, p2 in colliders(G): + if not (G.has_edge(p1, p2) or G.has_edge(p2, p1)): + yield (p1, c, p2) + + +@not_implemented_for("undirected") +@nx._dispatchable +def colliders(G): + """Yields 3-node tuples that represent the colliders in `G`. + + In a Directed Acyclic Graph (DAG), if you have three nodes A, B, and C, and + there are edges from A to C and from B to C, then C is a collider [1]_ . In + a causal graph setting, this means that both events A and B are "causing" C, + and conditioning on C provide an association between A and B even if + no direct causal relationship exists between A and B. + + Parameters + ---------- + G : graph + A networkx `~networkx.DiGraph`. + + Yields + ------ + A 3-tuple representation of a collider + Each collider is a 3-tuple with the parent, collider, and other parent. + + Raises + ------ + NetworkXNotImplemented + If `G` is an undirected graph. + + Examples + -------- + >>> G = nx.DiGraph([(1, 2), (0, 4), (3, 1), (2, 4), (0, 5), (4, 5), (1, 5)]) + >>> nx.is_directed_acyclic_graph(G) + True + >>> list(nx.dag.colliders(G)) + [(0, 4, 2), (0, 5, 4), (0, 5, 1), (4, 5, 1)] + + See Also + -------- + v_structures + + Notes + ----- + This function was written to be used on DAGs, however it works on cyclic graphs + too. Since colliders are referred to in the cyclic causal graph literature + [2]_ we allow cyclic graphs in this function. It is suggested that you test if + your input graph is acyclic as in the example if you want that property. + + References + ---------- + .. [1] `Wikipedia: Collider in causal graphs `_ + .. [2] A Hyttinen, P.O. Hoyer, F. Eberhardt, M J ̈arvisalo, (2013) + "Discovering cyclic causal models with latent variables: + a general SAT-based procedure", UAI'13: Proceedings of the Twenty-Ninth + Conference on Uncertainty in Artificial Intelligence, pg 301–310, + `doi:10.5555/3023638.3023669 `_ + """ + for node in G.nodes: + for p1, p2 in combinations(G.predecessors(node), 2): + yield (p1, node, p2) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/distance_measures.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/distance_measures.py new file mode 100644 index 0000000..8ce3fb0 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/distance_measures.py @@ -0,0 +1,1159 @@ +"""Graph diameter, radius, eccentricity and other properties.""" + +import math + +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = [ + "eccentricity", + "diameter", + "harmonic_diameter", + "radius", + "periphery", + "center", + "barycenter", + "resistance_distance", + "kemeny_constant", + "effective_graph_resistance", +] + + +@not_implemented_for("directed") +def _tree_center(G): + """Returns the center of an undirected tree graph. + + The center of a tree consists of nodes that minimize the maximum eccentricity. + That is, these nodes minimize the maximum distance to all other nodes. + This implementation currently only works for unweighted edges. + + If the input graph is not a tree, results are not guaranteed to be correct and while + some non-trees will raise a ``NetworkXError`` not all non-trees will be discovered. + Thus, this function should not be used if caller is unsure whether the input graph + is a tree. Use ``networkx.is_tree(G)`` to check. + + Parameters + ---------- + G : NetworkX graph + A tree graph (undirected, acyclic graph). + + Returns + ------- + center : list + A list of nodes in the center of the tree. This could be one or two nodes. + + Raises + ------ + NetworkXError + If algorithm detects input graph is not a tree. There is no guarantee + this error will always raise if a non-tree is passed. + + Notes + ----- + This algorithm iteratively removes leaves (nodes with degree 1) from the tree until + there are only 1 or 2 nodes left. The remaining nodes form the center of the tree. + + This algorithm's time complexity is O(N) where N is the number of nodes in the tree. + + Examples + -------- + >>> G = nx.Graph([(1, 2), (1, 3), (2, 4), (2, 5)]) + >>> _tree_center(G) + [1, 2] + + >>> G = nx.Graph([(1, 2), (2, 3), (3, 4), (4, 5)]) + >>> _tree_center(G) + [3] + """ + center_candidates_degree = dict(G.degree) + leaves = {node for node, degree in center_candidates_degree.items() if degree == 1} + + # It's better to fail than an infinite loop, so check leaves to ensure progress + while len(center_candidates_degree) > 2 and leaves: + new_leaves = set() + for leaf in leaves: + del center_candidates_degree[leaf] + for neighbor in G.neighbors(leaf): + if neighbor not in center_candidates_degree: + continue + center_candidates_degree[neighbor] -= 1 + if center_candidates_degree[neighbor] == 1: + new_leaves.add(neighbor) + leaves = new_leaves + + if not leaves and len(center_candidates_degree) >= 2: + # We detected graph is not a tree. This check does not cover all cases. + # For example, it does not cover the case where we have two islands (A-B) and (B-C) + # where we might eliminate (B-C) leaves and return [A, B] as centers. + raise nx.NetworkXError("Input graph is not a tree") + + return list(center_candidates_degree) + + +def _extrema_bounding(G, compute="diameter", weight=None): + """Compute requested extreme distance metric of undirected graph G + + Computation is based on smart lower and upper bounds, and in practice + linear in the number of nodes, rather than quadratic (except for some + border cases such as complete graphs or circle shaped graphs). + + Parameters + ---------- + G : NetworkX graph + An undirected graph + + compute : string denoting the requesting metric + "diameter" for the maximal eccentricity value, + "radius" for the minimal eccentricity value, + "periphery" for the set of nodes with eccentricity equal to the diameter, + "center" for the set of nodes with eccentricity equal to the radius, + "eccentricities" for the maximum distance from each node to all other nodes in G + + weight : string, function, or None + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number. + + If this is None, every edge has weight/distance/cost 1. + + Weights stored as floating point values can lead to small round-off + errors in distances. Use integer weights to avoid this. + + Weights should be positive, since they are distances. + + Returns + ------- + value : value of the requested metric + int for "diameter" and "radius" or + list of nodes for "center" and "periphery" or + dictionary of eccentricity values keyed by node for "eccentricities" + + Raises + ------ + NetworkXError + If the graph consists of multiple components + ValueError + If `compute` is not one of "diameter", "radius", "periphery", "center", or "eccentricities". + + Notes + ----- + This algorithm was proposed in [1]_ and discussed further in [2]_ and [3]_. + + References + ---------- + .. [1] F. W. Takes, W. A. Kosters, + "Determining the diameter of small world networks." + Proceedings of the 20th ACM international conference on Information and + knowledge management, 2011 + https://dl.acm.org/doi/abs/10.1145/2063576.2063748 + .. [2] F. W. Takes, W. A. Kosters, + "Computing the Eccentricity Distribution of Large Graphs." + Algorithms, 2013 + https://www.mdpi.com/1999-4893/6/1/100 + .. [3] M. Borassi, P. Crescenzi, M. Habib, W. A. Kosters, A. Marino, F. W. Takes, + "Fast diameter and radius BFS-based computation in (weakly connected) + real-world graphs: With an application to the six degrees of separation + games." + Theoretical Computer Science, 2015 + https://www.sciencedirect.com/science/article/pii/S0304397515001644 + """ + # init variables + degrees = dict(G.degree()) # start with the highest degree node + minlowernode = max(degrees, key=degrees.get) + N = len(degrees) # number of nodes + # alternate between smallest lower and largest upper bound + high = False + # status variables + ecc_lower = dict.fromkeys(G, 0) + ecc_upper = dict.fromkeys(G, math.inf) + candidates = set(G) + + # (re)set bound extremes + minlower = math.inf + maxlower = 0 + minupper = math.inf + maxupper = 0 + + # repeat the following until there are no more candidates + while candidates: + if high: + current = maxuppernode # select node with largest upper bound + else: + current = minlowernode # select node with smallest lower bound + high = not high + + # get distances from/to current node and derive eccentricity + dist = nx.shortest_path_length(G, source=current, weight=weight) + + if len(dist) != N: + msg = "Cannot compute metric because graph is not connected." + raise nx.NetworkXError(msg) + current_ecc = max(dist.values()) + + # print status update + # print ("ecc of " + str(current) + " (" + str(ecc_lower[current]) + "/" + # + str(ecc_upper[current]) + ", deg: " + str(dist[current]) + ") is " + # + str(current_ecc)) + # print(ecc_upper) + + # (re)set bound extremes + maxuppernode = None + minlowernode = None + + # update node bounds + for i in candidates: + # update eccentricity bounds + d = dist[i] + ecc_lower[i] = low = max(ecc_lower[i], max(d, (current_ecc - d))) + ecc_upper[i] = upp = min(ecc_upper[i], current_ecc + d) + + # update min/max values of lower and upper bounds + minlower = min(ecc_lower[i], minlower) + maxlower = max(ecc_lower[i], maxlower) + minupper = min(ecc_upper[i], minupper) + maxupper = max(ecc_upper[i], maxupper) + + # update candidate set + if compute == "diameter": + ruled_out = { + i + for i in candidates + if ecc_upper[i] <= maxlower and 2 * ecc_lower[i] >= maxupper + } + elif compute == "radius": + ruled_out = { + i + for i in candidates + if ecc_lower[i] >= minupper and ecc_upper[i] + 1 <= 2 * minlower + } + elif compute == "periphery": + ruled_out = { + i + for i in candidates + if ecc_upper[i] < maxlower + and (maxlower == maxupper or ecc_lower[i] > maxupper) + } + elif compute == "center": + ruled_out = { + i + for i in candidates + if ecc_lower[i] > minupper + and (minlower == minupper or ecc_upper[i] + 1 < 2 * minlower) + } + elif compute == "eccentricities": + ruled_out = set() + else: + msg = "compute must be one of 'diameter', 'radius', 'periphery', 'center', 'eccentricities'" + raise ValueError(msg) + + ruled_out.update(i for i in candidates if ecc_lower[i] == ecc_upper[i]) + candidates -= ruled_out + + # for i in ruled_out: + # print("removing %g: ecc_u: %g maxl: %g ecc_l: %g maxu: %g"% + # (i,ecc_upper[i],maxlower,ecc_lower[i],maxupper)) + # print("node %g: ecc_u: %g maxl: %g ecc_l: %g maxu: %g"% + # (4,ecc_upper[4],maxlower,ecc_lower[4],maxupper)) + # print("NODE 4: %g"%(ecc_upper[4] <= maxlower)) + # print("NODE 4: %g"%(2 * ecc_lower[4] >= maxupper)) + # print("NODE 4: %g"%(ecc_upper[4] <= maxlower + # and 2 * ecc_lower[4] >= maxupper)) + + # updating maxuppernode and minlowernode for selection in next round + for i in candidates: + if ( + minlowernode is None + or ( + ecc_lower[i] == ecc_lower[minlowernode] + and degrees[i] > degrees[minlowernode] + ) + or (ecc_lower[i] < ecc_lower[minlowernode]) + ): + minlowernode = i + + if ( + maxuppernode is None + or ( + ecc_upper[i] == ecc_upper[maxuppernode] + and degrees[i] > degrees[maxuppernode] + ) + or (ecc_upper[i] > ecc_upper[maxuppernode]) + ): + maxuppernode = i + + # print status update + # print (" min=" + str(minlower) + "/" + str(minupper) + + # " max=" + str(maxlower) + "/" + str(maxupper) + + # " candidates: " + str(len(candidates))) + # print("cand:",candidates) + # print("ecc_l",ecc_lower) + # print("ecc_u",ecc_upper) + # wait = input("press Enter to continue") + + # return the correct value of the requested metric + if compute == "diameter": + return maxlower + if compute == "radius": + return minupper + if compute == "periphery": + p = [v for v in G if ecc_lower[v] == maxlower] + return p + if compute == "center": + c = [v for v in G if ecc_upper[v] == minupper] + return c + if compute == "eccentricities": + return ecc_lower + return None + + +@nx._dispatchable(edge_attrs="weight") +def eccentricity(G, v=None, sp=None, weight=None): + """Returns the eccentricity of nodes in G. + + The eccentricity of a node v is the maximum distance from v to + all other nodes in G. + + Parameters + ---------- + G : NetworkX graph + A graph + + v : node, optional + Return value of specified node + + sp : dict of dicts, optional + All pairs shortest path lengths as a dictionary of dictionaries + + weight : string, function, or None (default=None) + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number. + + If this is None, every edge has weight/distance/cost 1. + + Weights stored as floating point values can lead to small round-off + errors in distances. Use integer weights to avoid this. + + Weights should be positive, since they are distances. + + Returns + ------- + ecc : dictionary + A dictionary of eccentricity values keyed by node. + + Examples + -------- + >>> G = nx.Graph([(1, 2), (1, 3), (1, 4), (3, 4), (3, 5), (4, 5)]) + >>> dict(nx.eccentricity(G)) + {1: 2, 2: 3, 3: 2, 4: 2, 5: 3} + + >>> dict( + ... nx.eccentricity(G, v=[1, 5]) + ... ) # This returns the eccentricity of node 1 & 5 + {1: 2, 5: 3} + + """ + # if v is None: # none, use entire graph + # nodes=G.nodes() + # elif v in G: # is v a single node + # nodes=[v] + # else: # assume v is a container of nodes + # nodes=v + order = G.order() + e = {} + for n in G.nbunch_iter(v): + if sp is None: + length = nx.shortest_path_length(G, source=n, weight=weight) + + L = len(length) + else: + try: + length = sp[n] + L = len(length) + except TypeError as err: + raise nx.NetworkXError('Format of "sp" is invalid.') from err + if L != order: + if G.is_directed(): + msg = ( + "Found infinite path length because the digraph is not" + " strongly connected" + ) + else: + msg = "Found infinite path length because the graph is not connected" + raise nx.NetworkXError(msg) + + e[n] = max(length.values()) + + if v in G: + return e[v] # return single value + return e + + +@nx._dispatchable(edge_attrs="weight") +def diameter(G, e=None, usebounds=False, weight=None): + """Returns the diameter of the graph G. + + The diameter is the maximum eccentricity. + + Parameters + ---------- + G : NetworkX graph + A graph + + e : eccentricity dictionary, optional + A precomputed dictionary of eccentricities. + + usebounds : bool, optional + If `True`, use extrema bounding (see Notes) when computing the diameter + for undirected graphs. Extrema bounding may accelerate the + distance calculation for some graphs. `usebounds` is ignored if `G` is + directed or if `e` is not `None`. Default is `False`. + + weight : string, function, or None + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number. + + If this is None, every edge has weight/distance/cost 1. + + Weights stored as floating point values can lead to small round-off + errors in distances. Use integer weights to avoid this. + + Weights should be positive, since they are distances. + + Returns + ------- + d : integer + Diameter of graph + + Notes + ----- + When ``usebounds=True``, the computation makes use of smart lower + and upper bounds and is often linear in the number of nodes, rather than + quadratic (except for some border cases such as complete graphs or circle + shaped-graphs). + + Examples + -------- + >>> G = nx.Graph([(1, 2), (1, 3), (1, 4), (3, 4), (3, 5), (4, 5)]) + >>> nx.diameter(G) + 3 + + See Also + -------- + eccentricity + """ + if usebounds is True and e is None and not G.is_directed(): + return _extrema_bounding(G, compute="diameter", weight=weight) + if e is None: + e = eccentricity(G, weight=weight) + return max(e.values()) + + +@nx._dispatchable(edge_attrs="weight") +def harmonic_diameter(G, sp=None, *, weight=None): + """Returns the harmonic diameter of the graph G. + + The harmonic diameter of a graph is the harmonic mean of the distances + between all pairs of distinct vertices. Graphs that are not strongly + connected have infinite diameter and mean distance, making such + measures not useful. Restricting the diameter or mean distance to + finite distances yields paradoxical values (e.g., a perfect match + would have diameter one). The harmonic mean handles gracefully + infinite distances (e.g., a perfect match has harmonic diameter equal + to the number of vertices minus one), making it possible to assign a + meaningful value to all graphs. + + Note that in [1] the harmonic diameter is called "connectivity length": + however, "harmonic diameter" is a more standard name from the + theory of metric spaces. The name "harmonic mean distance" is perhaps + a more descriptive name, but is not used in the literature, so we use the + name "harmonic diameter" here. + + Parameters + ---------- + G : NetworkX graph + A graph + + sp : dict of dicts, optional + All-pairs shortest path lengths as a dictionary of dictionaries + + weight : string, function, or None (default=None) + If None, every edge has weight/distance 1. + If a string, use this edge attribute as the edge weight. + Any edge attribute not present defaults to 1. + If a function, the weight of an edge is the value returned by the function. + The function must accept exactly three positional arguments: + the two endpoints of an edge and the dictionary of edge attributes for + that edge. The function must return a number. + + Returns + ------- + hd : float + Harmonic diameter of graph + + References + ---------- + .. [1] Massimo Marchiori and Vito Latora, "Harmony in the small-world". + *Physica A: Statistical Mechanics and Its Applications* + 285(3-4), pages 539-546, 2000. + + """ + order = G.order() + + sum_invd = 0 + for n in G: + if sp is None: + length = nx.single_source_dijkstra_path_length(G, n, weight=weight) + else: + try: + length = sp[n] + L = len(length) + except TypeError as err: + raise nx.NetworkXError('Format of "sp" is invalid.') from err + + for d in length.values(): + # Note that this will skip the zero distance from n to itself, + # as it should be, but also zero-weight paths in weighted graphs. + if d != 0: + sum_invd += 1 / d + + if sum_invd != 0: + return order * (order - 1) / sum_invd + if order > 1: + return math.inf + return math.nan + + +@nx._dispatchable(edge_attrs="weight") +def periphery(G, e=None, usebounds=False, weight=None): + """Returns the periphery of the graph G. + + The periphery is the set of nodes with eccentricity equal to the diameter. + + Parameters + ---------- + G : NetworkX graph + A graph + + e : eccentricity dictionary, optional + A precomputed dictionary of eccentricities. + + usebounds : bool, optional + If `True`, use extrema bounding (see Notes) when computing the periphery + for undirected graphs. Extrema bounding may accelerate the + distance calculation for some graphs. `usebounds` is ignored if `G` is + directed or if `e` is not `None`. Default is `False`. + + weight : string, function, or None + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number. + + If this is None, every edge has weight/distance/cost 1. + + Weights stored as floating point values can lead to small round-off + errors in distances. Use integer weights to avoid this. + + Weights should be positive, since they are distances. + + Returns + ------- + p : list + List of nodes in periphery + + Notes + ----- + When ``usebounds=True``, the computation makes use of smart lower + and upper bounds and is often linear in the number of nodes, rather than + quadratic (except for some border cases such as complete graphs or circle + shaped-graphs). + + Examples + -------- + >>> G = nx.Graph([(1, 2), (1, 3), (1, 4), (3, 4), (3, 5), (4, 5)]) + >>> nx.periphery(G) + [2, 5] + + See Also + -------- + barycenter + center + """ + if usebounds is True and e is None and not G.is_directed(): + return _extrema_bounding(G, compute="periphery", weight=weight) + if e is None: + e = eccentricity(G, weight=weight) + diameter = max(e.values()) + p = [v for v in e if e[v] == diameter] + return p + + +@nx._dispatchable(edge_attrs="weight") +def radius(G, e=None, usebounds=False, weight=None): + """Returns the radius of the graph G. + + The radius is the minimum eccentricity. + + Parameters + ---------- + G : NetworkX graph + A graph + + e : eccentricity dictionary, optional + A precomputed dictionary of eccentricities. + + usebounds : bool, optional + If `True`, use extrema bounding (see Notes) when computing the radius + for undirected graphs. Extrema bounding may accelerate the + distance calculation for some graphs. `usebounds` is ignored if `G` is + directed or if `e` is not `None`. Default is `False`. + + weight : string, function, or None + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number. + + If this is None, every edge has weight/distance/cost 1. + + Weights stored as floating point values can lead to small round-off + errors in distances. Use integer weights to avoid this. + + Weights should be positive, since they are distances. + + Returns + ------- + r : integer + Radius of graph + + Notes + ----- + When ``usebounds=True``, the computation makes use of smart lower + and upper bounds and is often linear in the number of nodes, rather than + quadratic (except for some border cases such as complete graphs or circle + shaped-graphs). + + Examples + -------- + >>> G = nx.Graph([(1, 2), (1, 3), (1, 4), (3, 4), (3, 5), (4, 5)]) + >>> nx.radius(G) + 2 + + """ + if usebounds is True and e is None and not G.is_directed(): + return _extrema_bounding(G, compute="radius", weight=weight) + if e is None: + e = eccentricity(G, weight=weight) + return min(e.values()) + + +@nx._dispatchable(edge_attrs="weight") +def center(G, e=None, usebounds=False, weight=None): + """Returns the center of the graph G. + + The center is the set of nodes with eccentricity equal to radius. + + Parameters + ---------- + G : NetworkX graph + A graph + + e : eccentricity dictionary, optional + A precomputed dictionary of eccentricities. + + usebounds : bool, optional + If `True`, use extrema bounding (see Notes) when computing the center + for undirected graphs. Extrema bounding may accelerate the + distance calculation for some graphs. `usebounds` is ignored if `G` is + directed or if `e` is not `None`. Default is `False`. + + weight : string, function, or None + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number. + + If this is None, every edge has weight/distance/cost 1. + + Weights stored as floating point values can lead to small round-off + errors in distances. Use integer weights to avoid this. + + Weights should be positive, since they are distances. + + Returns + ------- + c : list + List of nodes in center + + Notes + ----- + When ``usebounds=True``, the computation makes use of smart lower + and upper bounds and is often linear in the number of nodes, rather than + quadratic (except for some border cases such as complete graphs or circle + shaped-graphs). + + Examples + -------- + >>> G = nx.Graph([(1, 2), (1, 3), (1, 4), (3, 4), (3, 5), (4, 5)]) + >>> list(nx.center(G)) + [1, 3, 4] + + See Also + -------- + barycenter + periphery + """ + if usebounds is True and e is None and not G.is_directed(): + return _extrema_bounding(G, compute="center", weight=weight) + if e is None and weight is None and not G.is_directed() and nx.is_tree(G): + return _tree_center(G) + if e is None: + e = eccentricity(G, weight=weight) + radius = min(e.values()) + p = [v for v in e if e[v] == radius] + return p + + +@nx._dispatchable(edge_attrs="weight", mutates_input={"attr": 2}) +def barycenter(G, weight=None, attr=None, sp=None): + r"""Calculate barycenter of a connected graph, optionally with edge weights. + + The :dfn:`barycenter` a + :func:`connected ` graph + :math:`G` is the subgraph induced by the set of its nodes :math:`v` + minimizing the objective function + + .. math:: + + \sum_{u \in V(G)} d_G(u, v), + + where :math:`d_G` is the (possibly weighted) :func:`path length + `. + The barycenter is also called the :dfn:`median`. See [West01]_, p. 78. + + Parameters + ---------- + G : :class:`networkx.Graph` + The connected graph :math:`G`. + weight : :class:`str`, optional + Passed through to + :func:`~networkx.algorithms.shortest_paths.generic.shortest_path_length`. + attr : :class:`str`, optional + If given, write the value of the objective function to each node's + `attr` attribute. Otherwise do not store the value. + sp : dict of dicts, optional + All pairs shortest path lengths as a dictionary of dictionaries + + Returns + ------- + list + Nodes of `G` that induce the barycenter of `G`. + + Raises + ------ + NetworkXNoPath + If `G` is disconnected. `G` may appear disconnected to + :func:`barycenter` if `sp` is given but is missing shortest path + lengths for any pairs. + ValueError + If `sp` and `weight` are both given. + + Examples + -------- + >>> G = nx.Graph([(1, 2), (1, 3), (1, 4), (3, 4), (3, 5), (4, 5)]) + >>> nx.barycenter(G) + [1, 3, 4] + + See Also + -------- + center + periphery + """ + if sp is None: + sp = nx.shortest_path_length(G, weight=weight) + else: + sp = sp.items() + if weight is not None: + raise ValueError("Cannot use both sp, weight arguments together") + smallest, barycenter_vertices, n = float("inf"), [], len(G) + for v, dists in sp: + if len(dists) < n: + raise nx.NetworkXNoPath( + f"Input graph {G} is disconnected, so every induced subgraph " + "has infinite barycentricity." + ) + barycentricity = sum(dists.values()) + if attr is not None: + G.nodes[v][attr] = barycentricity + if barycentricity < smallest: + smallest = barycentricity + barycenter_vertices = [v] + elif barycentricity == smallest: + barycenter_vertices.append(v) + if attr is not None: + nx._clear_cache(G) + return barycenter_vertices + + +@not_implemented_for("directed") +@nx._dispatchable(edge_attrs="weight") +def resistance_distance(G, nodeA=None, nodeB=None, weight=None, invert_weight=True): + """Returns the resistance distance between pairs of nodes in graph G. + + The resistance distance between two nodes of a graph is akin to treating + the graph as a grid of resistors with a resistance equal to the provided + weight [1]_, [2]_. + + If weight is not provided, then a weight of 1 is used for all edges. + + If two nodes are the same, the resistance distance is zero. + + Parameters + ---------- + G : NetworkX graph + A graph + + nodeA : node or None, optional (default=None) + A node within graph G. + If None, compute resistance distance using all nodes as source nodes. + + nodeB : node or None, optional (default=None) + A node within graph G. + If None, compute resistance distance using all nodes as target nodes. + + weight : string or None, optional (default=None) + The edge data key used to compute the resistance distance. + If None, then each edge has weight 1. + + invert_weight : boolean (default=True) + Proper calculation of resistance distance requires building the + Laplacian matrix with the reciprocal of the weight. Not required + if the weight is already inverted. Weight cannot be zero. + + Returns + ------- + rd : dict or float + If `nodeA` and `nodeB` are given, resistance distance between `nodeA` + and `nodeB`. If `nodeA` or `nodeB` is unspecified (the default), a + dictionary of nodes with resistance distances as the value. + + Raises + ------ + NetworkXNotImplemented + If `G` is a directed graph. + + NetworkXError + If `G` is not connected, or contains no nodes, + or `nodeA` is not in `G` or `nodeB` is not in `G`. + + Examples + -------- + >>> G = nx.Graph([(1, 2), (1, 3), (1, 4), (3, 4), (3, 5), (4, 5)]) + >>> round(nx.resistance_distance(G, 1, 3), 10) + 0.625 + + Notes + ----- + The implementation is based on Theorem A in [2]_. Self-loops are ignored. + Multi-edges are contracted in one edge with weight equal to the harmonic sum of the weights. + + References + ---------- + .. [1] Wikipedia + "Resistance distance." + https://en.wikipedia.org/wiki/Resistance_distance + .. [2] D. J. Klein and M. Randic. + Resistance distance. + J. of Math. Chem. 12:81-95, 1993. + """ + import numpy as np + + if len(G) == 0: + raise nx.NetworkXError("Graph G must contain at least one node.") + if not nx.is_connected(G): + raise nx.NetworkXError("Graph G must be strongly connected.") + if nodeA is not None and nodeA not in G: + raise nx.NetworkXError("Node A is not in graph G.") + if nodeB is not None and nodeB not in G: + raise nx.NetworkXError("Node B is not in graph G.") + + G = G.copy() + node_list = list(G) + + # Invert weights + if invert_weight and weight is not None: + if G.is_multigraph(): + for u, v, k, d in G.edges(keys=True, data=True): + d[weight] = 1 / d[weight] + else: + for u, v, d in G.edges(data=True): + d[weight] = 1 / d[weight] + + # Compute resistance distance using the Pseudo-inverse of the Laplacian + # Self-loops are ignored + L = nx.laplacian_matrix(G, weight=weight).todense() + Linv = np.linalg.pinv(L, hermitian=True) + + # Return relevant distances + if nodeA is not None and nodeB is not None: + i = node_list.index(nodeA) + j = node_list.index(nodeB) + return Linv.item(i, i) + Linv.item(j, j) - Linv.item(i, j) - Linv.item(j, i) + + elif nodeA is not None: + i = node_list.index(nodeA) + d = {} + for n in G: + j = node_list.index(n) + d[n] = Linv.item(i, i) + Linv.item(j, j) - Linv.item(i, j) - Linv.item(j, i) + return d + + elif nodeB is not None: + j = node_list.index(nodeB) + d = {} + for n in G: + i = node_list.index(n) + d[n] = Linv.item(i, i) + Linv.item(j, j) - Linv.item(i, j) - Linv.item(j, i) + return d + + else: + d = {} + for n in G: + i = node_list.index(n) + d[n] = {} + for n2 in G: + j = node_list.index(n2) + d[n][n2] = ( + Linv.item(i, i) + + Linv.item(j, j) + - Linv.item(i, j) + - Linv.item(j, i) + ) + return d + + +@not_implemented_for("directed") +@nx._dispatchable(edge_attrs="weight") +def effective_graph_resistance(G, weight=None, invert_weight=True): + """Returns the Effective graph resistance of G. + + Also known as the Kirchhoff index. + + The effective graph resistance is defined as the sum + of the resistance distance of every node pair in G [1]_. + + If weight is not provided, then a weight of 1 is used for all edges. + + The effective graph resistance of a disconnected graph is infinite. + + Parameters + ---------- + G : NetworkX graph + A graph + + weight : string or None, optional (default=None) + The edge data key used to compute the effective graph resistance. + If None, then each edge has weight 1. + + invert_weight : boolean (default=True) + Proper calculation of resistance distance requires building the + Laplacian matrix with the reciprocal of the weight. Not required + if the weight is already inverted. Weight cannot be zero. + + Returns + ------- + RG : float + The effective graph resistance of `G`. + + Raises + ------ + NetworkXNotImplemented + If `G` is a directed graph. + + NetworkXError + If `G` does not contain any nodes. + + Examples + -------- + >>> G = nx.Graph([(1, 2), (1, 3), (1, 4), (3, 4), (3, 5), (4, 5)]) + >>> round(nx.effective_graph_resistance(G), 10) + 10.25 + + Notes + ----- + The implementation is based on Theorem 2.2 in [2]_. Self-loops are ignored. + Multi-edges are contracted in one edge with weight equal to the harmonic sum of the weights. + + References + ---------- + .. [1] Wolfram + "Kirchhoff Index." + https://mathworld.wolfram.com/KirchhoffIndex.html + .. [2] W. Ellens, F. M. Spieksma, P. Van Mieghem, A. Jamakovic, R. E. Kooij. + Effective graph resistance. + Lin. Alg. Appl. 435:2491-2506, 2011. + """ + import numpy as np + + if len(G) == 0: + raise nx.NetworkXError("Graph G must contain at least one node.") + + # Disconnected graphs have infinite Effective graph resistance + if not nx.is_connected(G): + return float("inf") + + # Invert weights + G = G.copy() + if invert_weight and weight is not None: + if G.is_multigraph(): + for u, v, k, d in G.edges(keys=True, data=True): + d[weight] = 1 / d[weight] + else: + for u, v, d in G.edges(data=True): + d[weight] = 1 / d[weight] + + # Get Laplacian eigenvalues + mu = np.sort(nx.laplacian_spectrum(G, weight=weight)) + + # Compute Effective graph resistance based on spectrum of the Laplacian + # Self-loops are ignored + return float(np.sum(1 / mu[1:]) * G.number_of_nodes()) + + +@nx.utils.not_implemented_for("directed") +@nx._dispatchable(edge_attrs="weight") +def kemeny_constant(G, *, weight=None): + """Returns the Kemeny constant of the given graph. + + The *Kemeny constant* (or Kemeny's constant) of a graph `G` + can be computed by regarding the graph as a Markov chain. + The Kemeny constant is then the expected number of time steps + to transition from a starting state i to a random destination state + sampled from the Markov chain's stationary distribution. + The Kemeny constant is independent of the chosen initial state [1]_. + + The Kemeny constant measures the time needed for spreading + across a graph. Low values indicate a closely connected graph + whereas high values indicate a spread-out graph. + + If weight is not provided, then a weight of 1 is used for all edges. + + Since `G` represents a Markov chain, the weights must be positive. + + Parameters + ---------- + G : NetworkX graph + + weight : string or None, optional (default=None) + The edge data key used to compute the Kemeny constant. + If None, then each edge has weight 1. + + Returns + ------- + float + The Kemeny constant of the graph `G`. + + Raises + ------ + NetworkXNotImplemented + If the graph `G` is directed. + + NetworkXError + If the graph `G` is not connected, or contains no nodes, + or has edges with negative weights. + + Examples + -------- + >>> G = nx.complete_graph(5) + >>> round(nx.kemeny_constant(G), 10) + 3.2 + + Notes + ----- + The implementation is based on equation (3.3) in [2]_. + Self-loops are allowed and indicate a Markov chain where + the state can remain the same. Multi-edges are contracted + in one edge with weight equal to the sum of the weights. + + References + ---------- + .. [1] Wikipedia + "Kemeny's constant." + https://en.wikipedia.org/wiki/Kemeny%27s_constant + .. [2] Lovász L. + Random walks on graphs: A survey. + Paul Erdös is Eighty, vol. 2, Bolyai Society, + Mathematical Studies, Keszthely, Hungary (1993), pp. 1-46 + """ + import numpy as np + import scipy as sp + + if len(G) == 0: + raise nx.NetworkXError("Graph G must contain at least one node.") + if not nx.is_connected(G): + raise nx.NetworkXError("Graph G must be connected.") + if nx.is_negatively_weighted(G, weight=weight): + raise nx.NetworkXError("The weights of graph G must be nonnegative.") + + # Compute matrix H = D^-1/2 A D^-1/2 + A = nx.adjacency_matrix(G, weight=weight) + n, m = A.shape + diags = A.sum(axis=1) + with np.errstate(divide="ignore"): + diags_sqrt = 1.0 / np.sqrt(diags) + diags_sqrt[np.isinf(diags_sqrt)] = 0 + DH = sp.sparse.csr_array(sp.sparse.spdiags(diags_sqrt, 0, m, n, format="csr")) + H = DH @ (A @ DH) + + # Compute eigenvalues of H + eig = np.sort(sp.linalg.eigvalsh(H.todense())) + + # Compute the Kemeny constant + return float(np.sum(1 / (1 - eig[:-1]))) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/distance_regular.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/distance_regular.py new file mode 100644 index 0000000..27b4d02 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/distance_regular.py @@ -0,0 +1,238 @@ +""" +======================= +Distance-regular graphs +======================= +""" + +import networkx as nx +from networkx.utils import not_implemented_for + +from .distance_measures import diameter + +__all__ = [ + "is_distance_regular", + "is_strongly_regular", + "intersection_array", + "global_parameters", +] + + +@nx._dispatchable +def is_distance_regular(G): + """Returns True if the graph is distance regular, False otherwise. + + A connected graph G is distance-regular if for any nodes x,y + and any integers i,j=0,1,...,d (where d is the graph + diameter), the number of vertices at distance i from x and + distance j from y depends only on i,j and the graph distance + between x and y, independently of the choice of x and y. + + Parameters + ---------- + G: Networkx graph (undirected) + + Returns + ------- + bool + True if the graph is Distance Regular, False otherwise + + Examples + -------- + >>> G = nx.hypercube_graph(6) + >>> nx.is_distance_regular(G) + True + + See Also + -------- + intersection_array, global_parameters + + Notes + ----- + For undirected and simple graphs only + + References + ---------- + .. [1] Brouwer, A. E.; Cohen, A. M.; and Neumaier, A. + Distance-Regular Graphs. New York: Springer-Verlag, 1989. + .. [2] Weisstein, Eric W. "Distance-Regular Graph." + http://mathworld.wolfram.com/Distance-RegularGraph.html + + """ + try: + intersection_array(G) + return True + except nx.NetworkXError: + return False + + +def global_parameters(b, c): + """Returns global parameters for a given intersection array. + + Given a distance-regular graph G with integers b_i, c_i,i = 0,....,d + such that for any 2 vertices x,y in G at a distance i=d(x,y), there + are exactly c_i neighbors of y at a distance of i-1 from x and b_i + neighbors of y at a distance of i+1 from x. + + Thus, a distance regular graph has the global parameters, + [[c_0,a_0,b_0],[c_1,a_1,b_1],......,[c_d,a_d,b_d]] for the + intersection array [b_0,b_1,.....b_{d-1};c_1,c_2,.....c_d] + where a_i+b_i+c_i=k , k= degree of every vertex. + + Parameters + ---------- + b : list + + c : list + + Returns + ------- + iterable + An iterable over three tuples. + + Examples + -------- + >>> G = nx.dodecahedral_graph() + >>> b, c = nx.intersection_array(G) + >>> list(nx.global_parameters(b, c)) + [(0, 0, 3), (1, 0, 2), (1, 1, 1), (1, 1, 1), (2, 0, 1), (3, 0, 0)] + + References + ---------- + .. [1] Weisstein, Eric W. "Global Parameters." + From MathWorld--A Wolfram Web Resource. + http://mathworld.wolfram.com/GlobalParameters.html + + See Also + -------- + intersection_array + """ + return ((y, b[0] - x - y, x) for x, y in zip(b + [0], [0] + c)) + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatchable +def intersection_array(G): + """Returns the intersection array of a distance-regular graph. + + Given a distance-regular graph G with integers b_i, c_i,i = 0,....,d + such that for any 2 vertices x,y in G at a distance i=d(x,y), there + are exactly c_i neighbors of y at a distance of i-1 from x and b_i + neighbors of y at a distance of i+1 from x. + + A distance regular graph's intersection array is given by, + [b_0,b_1,.....b_{d-1};c_1,c_2,.....c_d] + + Parameters + ---------- + G: Networkx graph (undirected) + + Returns + ------- + b,c: tuple of lists + + Examples + -------- + >>> G = nx.icosahedral_graph() + >>> nx.intersection_array(G) + ([5, 2, 1], [1, 2, 5]) + + References + ---------- + .. [1] Weisstein, Eric W. "Intersection Array." + From MathWorld--A Wolfram Web Resource. + http://mathworld.wolfram.com/IntersectionArray.html + + See Also + -------- + global_parameters + """ + # test for regular graph (all degrees must be equal) + if len(G) == 0: + raise nx.NetworkXPointlessConcept("Graph has no nodes.") + degree = iter(G.degree()) + (_, k) = next(degree) + for _, knext in degree: + if knext != k: + raise nx.NetworkXError("Graph is not distance regular.") + k = knext + path_length = dict(nx.all_pairs_shortest_path_length(G)) + diameter = max(max(path_length[n].values()) for n in path_length) + bint = {} # 'b' intersection array + cint = {} # 'c' intersection array + for u in G: + for v in G: + try: + i = path_length[u][v] + except KeyError as err: # graph must be connected + raise nx.NetworkXError("Graph is not distance regular.") from err + # number of neighbors of v at a distance of i-1 from u + c = len([n for n in G[v] if path_length[n][u] == i - 1]) + # number of neighbors of v at a distance of i+1 from u + b = len([n for n in G[v] if path_length[n][u] == i + 1]) + # b,c are independent of u and v + if cint.get(i, c) != c or bint.get(i, b) != b: + raise nx.NetworkXError("Graph is not distance regular") + bint[i] = b + cint[i] = c + return ( + [bint.get(j, 0) for j in range(diameter)], + [cint.get(j + 1, 0) for j in range(diameter)], + ) + + +# TODO There is a definition for directed strongly regular graphs. +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatchable +def is_strongly_regular(G): + """Returns True if and only if the given graph is strongly + regular. + + An undirected graph is *strongly regular* if + + * it is regular, + * each pair of adjacent vertices has the same number of neighbors in + common, + * each pair of nonadjacent vertices has the same number of neighbors + in common. + + Each strongly regular graph is a distance-regular graph. + Conversely, if a distance-regular graph has diameter two, then it is + a strongly regular graph. For more information on distance-regular + graphs, see :func:`is_distance_regular`. + + Parameters + ---------- + G : NetworkX graph + An undirected graph. + + Returns + ------- + bool + Whether `G` is strongly regular. + + Examples + -------- + + The cycle graph on five vertices is strongly regular. It is + two-regular, each pair of adjacent vertices has no shared neighbors, + and each pair of nonadjacent vertices has one shared neighbor:: + + >>> G = nx.cycle_graph(5) + >>> nx.is_strongly_regular(G) + True + + """ + # Here is an alternate implementation based directly on the + # definition of strongly regular graphs: + # + # return (all_equal(G.degree().values()) + # and all_equal(len(common_neighbors(G, u, v)) + # for u, v in G.edges()) + # and all_equal(len(common_neighbors(G, u, v)) + # for u, v in non_edges(G))) + # + # We instead use the fact that a distance-regular graph of diameter + # two is strongly regular. + return is_distance_regular(G) and diameter(G) == 2 diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/dominance.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/dominance.py new file mode 100644 index 0000000..30cb811 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/dominance.py @@ -0,0 +1,135 @@ +""" +Dominance algorithms. +""" + +from functools import reduce + +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = ["immediate_dominators", "dominance_frontiers"] + + +@not_implemented_for("undirected") +@nx._dispatchable +def immediate_dominators(G, start): + """Returns the immediate dominators of all nodes of a directed graph. + + Parameters + ---------- + G : a DiGraph or MultiDiGraph + The graph where dominance is to be computed. + + start : node + The start node of dominance computation. + + Returns + ------- + idom : dict keyed by nodes + A dict containing the immediate dominators of each node reachable from + `start`. + + Raises + ------ + NetworkXNotImplemented + If `G` is undirected. + + NetworkXError + If `start` is not in `G`. + + Notes + ----- + Except for `start`, the immediate dominators are the parents of their + corresponding nodes in the dominator tree. + + Examples + -------- + >>> G = nx.DiGraph([(1, 2), (1, 3), (2, 5), (3, 4), (4, 5)]) + >>> sorted(nx.immediate_dominators(G, 1).items()) + [(1, 1), (2, 1), (3, 1), (4, 3), (5, 1)] + + References + ---------- + .. [1] Cooper, Keith D., Harvey, Timothy J. and Kennedy, Ken. + "A simple, fast dominance algorithm." (2006). + https://hdl.handle.net/1911/96345 + """ + if start not in G: + raise nx.NetworkXError("start is not in G") + + idom = {start: start} + + order = list(nx.dfs_postorder_nodes(G, start)) + dfn = {u: i for i, u in enumerate(order)} + order.pop() + order.reverse() + + def intersect(u, v): + while u != v: + while dfn[u] < dfn[v]: + u = idom[u] + while dfn[u] > dfn[v]: + v = idom[v] + return u + + changed = True + while changed: + changed = False + for u in order: + new_idom = reduce(intersect, (v for v in G.pred[u] if v in idom)) + if u not in idom or idom[u] != new_idom: + idom[u] = new_idom + changed = True + + return idom + + +@nx._dispatchable +def dominance_frontiers(G, start): + """Returns the dominance frontiers of all nodes of a directed graph. + + Parameters + ---------- + G : a DiGraph or MultiDiGraph + The graph where dominance is to be computed. + + start : node + The start node of dominance computation. + + Returns + ------- + df : dict keyed by nodes + A dict containing the dominance frontiers of each node reachable from + `start` as lists. + + Raises + ------ + NetworkXNotImplemented + If `G` is undirected. + + NetworkXError + If `start` is not in `G`. + + Examples + -------- + >>> G = nx.DiGraph([(1, 2), (1, 3), (2, 5), (3, 4), (4, 5)]) + >>> sorted((u, sorted(df)) for u, df in nx.dominance_frontiers(G, 1).items()) + [(1, []), (2, [5]), (3, [5]), (4, [5]), (5, [])] + + References + ---------- + .. [1] Cooper, Keith D., Harvey, Timothy J. and Kennedy, Ken. + "A simple, fast dominance algorithm." (2006). + https://hdl.handle.net/1911/96345 + """ + idom = nx.immediate_dominators(G, start) + + df = {u: set() for u in idom} + for u in idom: + if len(G.pred[u]) >= 2: + for v in G.pred[u]: + if v in idom: + while v != idom[u]: + df[v].add(u) + v = idom[v] + return df diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/dominating.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/dominating.py new file mode 100644 index 0000000..41c102a --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/dominating.py @@ -0,0 +1,268 @@ +"""Functions for computing dominating sets in a graph.""" + +import math +from heapq import heappop, heappush +from itertools import chain, count + +import networkx as nx + +__all__ = [ + "dominating_set", + "is_dominating_set", + "connected_dominating_set", + "is_connected_dominating_set", +] + + +@nx._dispatchable +def dominating_set(G, start_with=None): + r"""Finds a dominating set for the graph G. + + A *dominating set* for a graph with node set *V* is a subset *D* of + *V* such that every node not in *D* is adjacent to at least one + member of *D* [1]_. + + Parameters + ---------- + G : NetworkX graph + + start_with : node (default=None) + Node to use as a starting point for the algorithm. + + Returns + ------- + D : set + A dominating set for G. + + Notes + ----- + This function is an implementation of algorithm 7 in [2]_ which + finds some dominating set, not necessarily the smallest one. + + See also + -------- + is_dominating_set + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Dominating_set + + .. [2] Abdol-Hossein Esfahanian. Connectivity Algorithms. + http://www.cse.msu.edu/~cse835/Papers/Graph_connectivity_revised.pdf + + """ + all_nodes = set(G) + if start_with is None: + start_with = nx.utils.arbitrary_element(all_nodes) + if start_with not in G: + raise nx.NetworkXError(f"node {start_with} is not in G") + dominating_set = {start_with} + dominated_nodes = set(G[start_with]) + remaining_nodes = all_nodes - dominated_nodes - dominating_set + while remaining_nodes: + # Choose an arbitrary node and determine its undominated neighbors. + v = remaining_nodes.pop() + undominated_nbrs = set(G[v]) - dominating_set + # Add the node to the dominating set and the neighbors to the + # dominated set. Finally, remove all of those nodes from the set + # of remaining nodes. + dominating_set.add(v) + dominated_nodes |= undominated_nbrs + remaining_nodes -= undominated_nbrs + return dominating_set + + +@nx._dispatchable +def is_dominating_set(G, nbunch): + """Checks if `nbunch` is a dominating set for `G`. + + A *dominating set* for a graph with node set *V* is a subset *D* of + *V* such that every node not in *D* is adjacent to at least one + member of *D* [1]_. + + Parameters + ---------- + G : NetworkX graph + + nbunch : iterable + An iterable of nodes in the graph `G`. + + Returns + ------- + dominating : bool + True if `nbunch` is a dominating set of `G`, false otherwise. + + See also + -------- + dominating_set + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Dominating_set + + """ + testset = {n for n in nbunch if n in G} + nbrs = set(chain.from_iterable(G[n] for n in testset)) + return len(set(G) - testset - nbrs) == 0 + + +@nx.utils.not_implemented_for("directed") +@nx._dispatchable +def connected_dominating_set(G): + """Returns a connected dominating set. + + A *dominating set* for a graph *G* with node set *V* is a subset *D* of *V* + such that every node not in *D* is adjacent to at least one member of *D* + [1]_. A *connected dominating set* is a dominating set *C* that induces a + connected subgraph of *G* [2]_. + Note that connected dominating sets are not unique in general and that there + may be other connected dominating sets. + + Parameters + ---------- + G : NewtorkX graph + Undirected connected graph. + + Returns + ------- + connected_dominating_set : set + A dominating set of nodes which induces a connected subgraph of G. + + Raises + ------ + NetworkXNotImplemented + If G is directed. + + NetworkXError + If G is disconnected. + + Examples + ________ + >>> G = nx.Graph( + ... [ + ... (1, 2), + ... (1, 3), + ... (1, 4), + ... (1, 5), + ... (1, 6), + ... (2, 7), + ... (3, 8), + ... (4, 9), + ... (5, 10), + ... (6, 11), + ... (7, 12), + ... (8, 12), + ... (9, 12), + ... (10, 12), + ... (11, 12), + ... ] + ... ) + >>> nx.connected_dominating_set(G) + {1, 2, 3, 4, 5, 6, 7} + + Notes + ----- + This function implements Algorithm I in its basic version as described + in [3]_. The idea behind the algorithm is the following: grow a tree *T*, + starting from a node with maximum degree. Throughout the growing process, + nonleaf nodes in *T* are our connected dominating set (CDS), leaf nodes in + *T* are marked as "seen" and nodes in G that are not yet in *T* are marked as + "unseen". We maintain a max-heap of all "seen" nodes, and track the number + of "unseen" neighbors for each node. At each step we pop the heap top -- a + "seen" (leaf) node with maximal number of "unseen" neighbors, add it to the + CDS and mark all its "unseen" neighbors as "seen". For each one of the newly + created "seen" nodes, we also decrement the number of "unseen" neighbors for + all its neighbors. The algorithm terminates when there are no more "unseen" + nodes. + Runtime complexity of this implementation is $O(|E|*log|V|)$ (amortized). + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Dominating_set + .. [2] https://en.wikipedia.org/wiki/Connected_dominating_set + .. [3] Guha, S. and Khuller, S. + *Approximation Algorithms for Connected Dominating Sets*, + Algorithmica, 20, 374-387, 1998. + + """ + if len(G) == 0: + return set() + + if not nx.is_connected(G): + raise nx.NetworkXError("G must be a connected graph") + + if len(G) == 1: + return set(G) + + G_succ = G._adj # For speed-up + + # Use the count c to avoid comparing nodes + c = count() + + # Keep track of the number of unseen nodes adjacent to each node + unseen_degree = dict(G.degree) + + # Find node with highest degree and update its neighbors + (max_deg_node, max_deg) = max(unseen_degree.items(), key=lambda x: x[1]) + for nbr in G_succ[max_deg_node]: + unseen_degree[nbr] -= 1 + + # Initially all nodes except max_deg_node are unseen + unseen = set(G) - {max_deg_node} + + # We want a max-heap of the unseen-degree using heapq, which is a min-heap + # So we store the negative of the unseen-degree + seen = [(-max_deg, next(c), max_deg_node)] + + connected_dominating_set = set() + + # Main loop + while unseen: + (neg_deg, cnt, u) = heappop(seen) + # Check if u's unseen-degree changed while in the heap + if -neg_deg > unseen_degree[u]: + heappush(seen, (-unseen_degree[u], cnt, u)) + continue + # Mark all u's unseen neighbors as seen and add them to the heap + for v in G_succ[u]: + if v in unseen: + unseen.remove(v) + for nbr in G_succ[v]: + unseen_degree[nbr] -= 1 + heappush(seen, (-unseen_degree[v], next(c), v)) + # Add u to the dominating set + connected_dominating_set.add(u) + + return connected_dominating_set + + +@nx.utils.not_implemented_for("directed") +@nx._dispatchable +def is_connected_dominating_set(G, nbunch): + """Checks if `nbunch` is a connected dominating set for `G`. + + A *dominating set* for a graph *G* with node set *V* is a subset *D* of + *V* such that every node not in *D* is adjacent to at least one + member of *D* [1]_. A *connected dominating set* is a dominating + set *C* that induces a connected subgraph of *G* [2]_. + + Parameters + ---------- + G : NetworkX graph + Undirected graph. + + nbunch : iterable + An iterable of nodes in the graph `G`. + + Returns + ------- + connected_dominating : bool + True if `nbunch` is connected dominating set of `G`, false otherwise. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Dominating_set + .. [2] https://en.wikipedia.org/wiki/Connected_dominating_set + + """ + return nx.is_dominating_set(G, nbunch) and nx.is_connected(nx.subgraph(G, nbunch)) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/efficiency_measures.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/efficiency_measures.py new file mode 100644 index 0000000..b8e9d7a --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/efficiency_measures.py @@ -0,0 +1,167 @@ +"""Provides functions for computing the efficiency of nodes and graphs.""" + +import networkx as nx +from networkx.exception import NetworkXNoPath + +from ..utils import not_implemented_for + +__all__ = ["efficiency", "local_efficiency", "global_efficiency"] + + +@not_implemented_for("directed") +@nx._dispatchable +def efficiency(G, u, v): + """Returns the efficiency of a pair of nodes in a graph. + + The *efficiency* of a pair of nodes is the multiplicative inverse of the + shortest path distance between the nodes [1]_. Returns 0 if no path + between nodes. + + Parameters + ---------- + G : :class:`networkx.Graph` + An undirected graph for which to compute the average local efficiency. + u, v : node + Nodes in the graph ``G``. + + Returns + ------- + float + Multiplicative inverse of the shortest path distance between the nodes. + + Examples + -------- + >>> G = nx.Graph([(0, 1), (0, 2), (0, 3), (1, 2), (1, 3)]) + >>> nx.efficiency(G, 2, 3) # this gives efficiency for node 2 and 3 + 0.5 + + Notes + ----- + Edge weights are ignored when computing the shortest path distances. + + See also + -------- + local_efficiency + global_efficiency + + References + ---------- + .. [1] Latora, Vito, and Massimo Marchiori. + "Efficient behavior of small-world networks." + *Physical Review Letters* 87.19 (2001): 198701. + + + """ + try: + eff = 1 / nx.shortest_path_length(G, u, v) + except NetworkXNoPath: + eff = 0 + return eff + + +@not_implemented_for("directed") +@nx._dispatchable +def global_efficiency(G): + """Returns the average global efficiency of the graph. + + The *efficiency* of a pair of nodes in a graph is the multiplicative + inverse of the shortest path distance between the nodes. The *average + global efficiency* of a graph is the average efficiency of all pairs of + nodes [1]_. + + Parameters + ---------- + G : :class:`networkx.Graph` + An undirected graph for which to compute the average global efficiency. + + Returns + ------- + float + The average global efficiency of the graph. + + Examples + -------- + >>> G = nx.Graph([(0, 1), (0, 2), (0, 3), (1, 2), (1, 3)]) + >>> round(nx.global_efficiency(G), 12) + 0.916666666667 + + Notes + ----- + Edge weights are ignored when computing the shortest path distances. + + See also + -------- + local_efficiency + + References + ---------- + .. [1] Latora, Vito, and Massimo Marchiori. + "Efficient behavior of small-world networks." + *Physical Review Letters* 87.19 (2001): 198701. + + + """ + n = len(G) + denom = n * (n - 1) + if denom != 0: + lengths = nx.all_pairs_shortest_path_length(G) + g_eff = 0 + for source, targets in lengths: + for target, distance in targets.items(): + if distance > 0: + g_eff += 1 / distance + g_eff /= denom + # g_eff = sum(1 / d for s, tgts in lengths + # for t, d in tgts.items() if d > 0) / denom + else: + g_eff = 0 + # TODO This can be made more efficient by computing all pairs shortest + # path lengths in parallel. + return g_eff + + +@not_implemented_for("directed") +@nx._dispatchable +def local_efficiency(G): + """Returns the average local efficiency of the graph. + + The *efficiency* of a pair of nodes in a graph is the multiplicative + inverse of the shortest path distance between the nodes. The *local + efficiency* of a node in the graph is the average global efficiency of the + subgraph induced by the neighbors of the node. The *average local + efficiency* is the average of the local efficiencies of each node [1]_. + + Parameters + ---------- + G : :class:`networkx.Graph` + An undirected graph for which to compute the average local efficiency. + + Returns + ------- + float + The average local efficiency of the graph. + + Examples + -------- + >>> G = nx.Graph([(0, 1), (0, 2), (0, 3), (1, 2), (1, 3)]) + >>> nx.local_efficiency(G) + 0.9166666666666667 + + Notes + ----- + Edge weights are ignored when computing the shortest path distances. + + See also + -------- + global_efficiency + + References + ---------- + .. [1] Latora, Vito, and Massimo Marchiori. + "Efficient behavior of small-world networks." + *Physical Review Letters* 87.19 (2001): 198701. + + + """ + efficiency_list = (global_efficiency(G.subgraph(G[v])) for v in G) + return sum(efficiency_list) / len(G) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/euler.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/euler.py new file mode 100644 index 0000000..2c308e3 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/euler.py @@ -0,0 +1,470 @@ +""" +Eulerian circuits and graphs. +""" + +from itertools import combinations + +import networkx as nx + +from ..utils import arbitrary_element, not_implemented_for + +__all__ = [ + "is_eulerian", + "eulerian_circuit", + "eulerize", + "is_semieulerian", + "has_eulerian_path", + "eulerian_path", +] + + +@nx._dispatchable +def is_eulerian(G): + """Returns True if and only if `G` is Eulerian. + + A graph is *Eulerian* if it has an Eulerian circuit. An *Eulerian + circuit* is a closed walk that includes each edge of a graph exactly + once. + + Graphs with isolated vertices (i.e. vertices with zero degree) are not + considered to have Eulerian circuits. Therefore, if the graph is not + connected (or not strongly connected, for directed graphs), this function + returns False. + + Parameters + ---------- + G : NetworkX graph + A graph, either directed or undirected. + + Examples + -------- + >>> nx.is_eulerian(nx.DiGraph({0: [3], 1: [2], 2: [3], 3: [0, 1]})) + True + >>> nx.is_eulerian(nx.complete_graph(5)) + True + >>> nx.is_eulerian(nx.petersen_graph()) + False + + If you prefer to allow graphs with isolated vertices to have Eulerian circuits, + you can first remove such vertices and then call `is_eulerian` as below example shows. + + >>> G = nx.Graph([(0, 1), (1, 2), (0, 2)]) + >>> G.add_node(3) + >>> nx.is_eulerian(G) + False + + >>> G.remove_nodes_from(list(nx.isolates(G))) + >>> nx.is_eulerian(G) + True + + + """ + if G.is_directed(): + # Every node must have equal in degree and out degree and the + # graph must be strongly connected + return all( + G.in_degree(n) == G.out_degree(n) for n in G + ) and nx.is_strongly_connected(G) + # An undirected Eulerian graph has no vertices of odd degree and + # must be connected. + return all(d % 2 == 0 for v, d in G.degree()) and nx.is_connected(G) + + +@nx._dispatchable +def is_semieulerian(G): + """Return True iff `G` is semi-Eulerian. + + G is semi-Eulerian if it has an Eulerian path but no Eulerian circuit. + + See Also + -------- + has_eulerian_path + is_eulerian + """ + return has_eulerian_path(G) and not is_eulerian(G) + + +def _find_path_start(G): + """Return a suitable starting vertex for an Eulerian path. + + If no path exists, return None. + """ + if not has_eulerian_path(G): + return None + + if is_eulerian(G): + return arbitrary_element(G) + + if G.is_directed(): + v1, v2 = (v for v in G if G.in_degree(v) != G.out_degree(v)) + # Determines which is the 'start' node (as opposed to the 'end') + if G.out_degree(v1) > G.in_degree(v1): + return v1 + else: + return v2 + + else: + # In an undirected graph randomly choose one of the possibilities + start = [v for v in G if G.degree(v) % 2 != 0][0] + return start + + +def _simplegraph_eulerian_circuit(G, source): + if G.is_directed(): + degree = G.out_degree + edges = G.out_edges + else: + degree = G.degree + edges = G.edges + vertex_stack = [source] + last_vertex = None + while vertex_stack: + current_vertex = vertex_stack[-1] + if degree(current_vertex) == 0: + if last_vertex is not None: + yield (last_vertex, current_vertex) + last_vertex = current_vertex + vertex_stack.pop() + else: + _, next_vertex = arbitrary_element(edges(current_vertex)) + vertex_stack.append(next_vertex) + G.remove_edge(current_vertex, next_vertex) + + +def _multigraph_eulerian_circuit(G, source): + if G.is_directed(): + degree = G.out_degree + edges = G.out_edges + else: + degree = G.degree + edges = G.edges + vertex_stack = [(source, None)] + last_vertex = None + last_key = None + while vertex_stack: + current_vertex, current_key = vertex_stack[-1] + if degree(current_vertex) == 0: + if last_vertex is not None: + yield (last_vertex, current_vertex, last_key) + last_vertex, last_key = current_vertex, current_key + vertex_stack.pop() + else: + triple = arbitrary_element(edges(current_vertex, keys=True)) + _, next_vertex, next_key = triple + vertex_stack.append((next_vertex, next_key)) + G.remove_edge(current_vertex, next_vertex, next_key) + + +@nx._dispatchable +def eulerian_circuit(G, source=None, keys=False): + """Returns an iterator over the edges of an Eulerian circuit in `G`. + + An *Eulerian circuit* is a closed walk that includes each edge of a + graph exactly once. + + Parameters + ---------- + G : NetworkX graph + A graph, either directed or undirected. + + source : node, optional + Starting node for circuit. + + keys : bool + If False, edges generated by this function will be of the form + ``(u, v)``. Otherwise, edges will be of the form ``(u, v, k)``. + This option is ignored unless `G` is a multigraph. + + Returns + ------- + edges : iterator + An iterator over edges in the Eulerian circuit. + + Raises + ------ + NetworkXError + If the graph is not Eulerian. + + See Also + -------- + is_eulerian + + Notes + ----- + This is a linear time implementation of an algorithm adapted from [1]_. + + For general information about Euler tours, see [2]_. + + References + ---------- + .. [1] J. Edmonds, E. L. Johnson. + Matching, Euler tours and the Chinese postman. + Mathematical programming, Volume 5, Issue 1 (1973), 111-114. + .. [2] https://en.wikipedia.org/wiki/Eulerian_path + + Examples + -------- + To get an Eulerian circuit in an undirected graph:: + + >>> G = nx.complete_graph(3) + >>> list(nx.eulerian_circuit(G)) + [(0, 2), (2, 1), (1, 0)] + >>> list(nx.eulerian_circuit(G, source=1)) + [(1, 2), (2, 0), (0, 1)] + + To get the sequence of vertices in an Eulerian circuit:: + + >>> [u for u, v in nx.eulerian_circuit(G)] + [0, 2, 1] + + """ + if not is_eulerian(G): + raise nx.NetworkXError("G is not Eulerian.") + if G.is_directed(): + G = G.reverse() + else: + G = G.copy() + if source is None: + source = arbitrary_element(G) + if G.is_multigraph(): + for u, v, k in _multigraph_eulerian_circuit(G, source): + if keys: + yield u, v, k + else: + yield u, v + else: + yield from _simplegraph_eulerian_circuit(G, source) + + +@nx._dispatchable +def has_eulerian_path(G, source=None): + """Return True iff `G` has an Eulerian path. + + An Eulerian path is a path in a graph which uses each edge of a graph + exactly once. If `source` is specified, then this function checks + whether an Eulerian path that starts at node `source` exists. + + A directed graph has an Eulerian path iff: + - at most one vertex has out_degree - in_degree = 1, + - at most one vertex has in_degree - out_degree = 1, + - every other vertex has equal in_degree and out_degree, + - and all of its vertices belong to a single connected + component of the underlying undirected graph. + + If `source` is not None, an Eulerian path starting at `source` exists if no + other node has out_degree - in_degree = 1. This is equivalent to either + there exists an Eulerian circuit or `source` has out_degree - in_degree = 1 + and the conditions above hold. + + An undirected graph has an Eulerian path iff: + - exactly zero or two vertices have odd degree, + - and all of its vertices belong to a single connected component. + + If `source` is not None, an Eulerian path starting at `source` exists if + either there exists an Eulerian circuit or `source` has an odd degree and the + conditions above hold. + + Graphs with isolated vertices (i.e. vertices with zero degree) are not considered + to have an Eulerian path. Therefore, if the graph is not connected (or not strongly + connected, for directed graphs), this function returns False. + + Parameters + ---------- + G : NetworkX Graph + The graph to find an euler path in. + + source : node, optional + Starting node for path. + + Returns + ------- + Bool : True if G has an Eulerian path. + + Examples + -------- + If you prefer to allow graphs with isolated vertices to have Eulerian path, + you can first remove such vertices and then call `has_eulerian_path` as below example shows. + + >>> G = nx.Graph([(0, 1), (1, 2), (0, 2)]) + >>> G.add_node(3) + >>> nx.has_eulerian_path(G) + False + + >>> G.remove_nodes_from(list(nx.isolates(G))) + >>> nx.has_eulerian_path(G) + True + + See Also + -------- + is_eulerian + eulerian_path + """ + if nx.is_eulerian(G): + return True + + if G.is_directed(): + ins = G.in_degree + outs = G.out_degree + # Since we know it is not eulerian, outs - ins must be 1 for source + if source is not None and outs[source] - ins[source] != 1: + return False + + unbalanced_ins = 0 + unbalanced_outs = 0 + for v in G: + if ins[v] - outs[v] == 1: + unbalanced_ins += 1 + elif outs[v] - ins[v] == 1: + unbalanced_outs += 1 + elif ins[v] != outs[v]: + return False + + return ( + unbalanced_ins <= 1 and unbalanced_outs <= 1 and nx.is_weakly_connected(G) + ) + else: + # We know it is not eulerian, so degree of source must be odd. + if source is not None and G.degree[source] % 2 != 1: + return False + + # Sum is 2 since we know it is not eulerian (which implies sum is 0) + return sum(d % 2 == 1 for v, d in G.degree()) == 2 and nx.is_connected(G) + + +@nx._dispatchable +def eulerian_path(G, source=None, keys=False): + """Return an iterator over the edges of an Eulerian path in `G`. + + Parameters + ---------- + G : NetworkX Graph + The graph in which to look for an eulerian path. + source : node or None (default: None) + The node at which to start the search. None means search over all + starting nodes. + keys : Bool (default: False) + Indicates whether to yield edge 3-tuples (u, v, edge_key). + The default yields edge 2-tuples + + Yields + ------ + Edge tuples along the eulerian path. + + Warning: If `source` provided is not the start node of an Euler path + will raise error even if an Euler Path exists. + """ + if not has_eulerian_path(G, source): + raise nx.NetworkXError("Graph has no Eulerian paths.") + if G.is_directed(): + G = G.reverse() + if source is None or nx.is_eulerian(G) is False: + source = _find_path_start(G) + if G.is_multigraph(): + for u, v, k in _multigraph_eulerian_circuit(G, source): + if keys: + yield u, v, k + else: + yield u, v + else: + yield from _simplegraph_eulerian_circuit(G, source) + else: + G = G.copy() + if source is None: + source = _find_path_start(G) + if G.is_multigraph(): + if keys: + yield from reversed( + [(v, u, k) for u, v, k in _multigraph_eulerian_circuit(G, source)] + ) + else: + yield from reversed( + [(v, u) for u, v, k in _multigraph_eulerian_circuit(G, source)] + ) + else: + yield from reversed( + [(v, u) for u, v in _simplegraph_eulerian_circuit(G, source)] + ) + + +@not_implemented_for("directed") +@nx._dispatchable(returns_graph=True) +def eulerize(G): + """Transforms a graph into an Eulerian graph. + + If `G` is Eulerian the result is `G` as a MultiGraph, otherwise the result is a smallest + (in terms of the number of edges) multigraph whose underlying simple graph is `G`. + + Parameters + ---------- + G : NetworkX graph + An undirected graph + + Returns + ------- + G : NetworkX multigraph + + Raises + ------ + NetworkXError + If the graph is not connected. + + See Also + -------- + is_eulerian + eulerian_circuit + + References + ---------- + .. [1] J. Edmonds, E. L. Johnson. + Matching, Euler tours and the Chinese postman. + Mathematical programming, Volume 5, Issue 1 (1973), 111-114. + .. [2] https://en.wikipedia.org/wiki/Eulerian_path + .. [3] http://web.math.princeton.edu/math_alive/5/Notes1.pdf + + Examples + -------- + >>> G = nx.complete_graph(10) + >>> H = nx.eulerize(G) + >>> nx.is_eulerian(H) + True + + """ + if G.order() == 0: + raise nx.NetworkXPointlessConcept("Cannot Eulerize null graph") + if not nx.is_connected(G): + raise nx.NetworkXError("G is not connected") + odd_degree_nodes = [n for n, d in G.degree() if d % 2 == 1] + G = nx.MultiGraph(G) + if len(odd_degree_nodes) == 0: + return G + + # get all shortest paths between vertices of odd degree + odd_deg_pairs_paths = [ + (m, {n: nx.shortest_path(G, source=m, target=n)}) + for m, n in combinations(odd_degree_nodes, 2) + ] + + # use the number of vertices in a graph + 1 as an upper bound on + # the maximum length of a path in G + upper_bound_on_max_path_length = len(G) + 1 + + # use "len(G) + 1 - len(P)", + # where P is a shortest path between vertices n and m, + # as edge-weights in a new graph + # store the paths in the graph for easy indexing later + Gp = nx.Graph() + for n, Ps in odd_deg_pairs_paths: + for m, P in Ps.items(): + if n != m: + Gp.add_edge( + m, n, weight=upper_bound_on_max_path_length - len(P), path=P + ) + + # find the minimum weight matching of edges in the weighted graph + best_matching = nx.Graph(list(nx.max_weight_matching(Gp))) + + # duplicate each edge along each path in the set of paths in Gp + for m, n in best_matching.edges(): + path = Gp[m][n]["path"] + G.add_edges_from(nx.utils.pairwise(path)) + return G diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/__init__.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/__init__.py new file mode 100644 index 0000000..c5d19ab --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/__init__.py @@ -0,0 +1,11 @@ +from .maxflow import * +from .mincost import * +from .boykovkolmogorov import * +from .dinitz_alg import * +from .edmondskarp import * +from .gomory_hu import * +from .preflowpush import * +from .shortestaugmentingpath import * +from .capacityscaling import * +from .networksimplex import * +from .utils import build_flow_dict, build_residual_network diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..3ac682c Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/__pycache__/boykovkolmogorov.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/__pycache__/boykovkolmogorov.cpython-312.pyc new file mode 100644 index 0000000..f1493ed Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/__pycache__/boykovkolmogorov.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/__pycache__/capacityscaling.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/__pycache__/capacityscaling.cpython-312.pyc new file mode 100644 index 0000000..554874e Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/__pycache__/capacityscaling.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/__pycache__/dinitz_alg.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/__pycache__/dinitz_alg.cpython-312.pyc new file mode 100644 index 0000000..b876aa9 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/__pycache__/dinitz_alg.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/__pycache__/edmondskarp.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/__pycache__/edmondskarp.cpython-312.pyc new file mode 100644 index 0000000..9fa503c Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/__pycache__/edmondskarp.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/__pycache__/gomory_hu.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/__pycache__/gomory_hu.cpython-312.pyc new file mode 100644 index 0000000..6427f65 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/__pycache__/gomory_hu.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/__pycache__/maxflow.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/__pycache__/maxflow.cpython-312.pyc new file mode 100644 index 0000000..8a6c735 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/__pycache__/maxflow.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/__pycache__/mincost.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/__pycache__/mincost.cpython-312.pyc new file mode 100644 index 0000000..174eb07 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/__pycache__/mincost.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/__pycache__/networksimplex.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/__pycache__/networksimplex.cpython-312.pyc new file mode 100644 index 0000000..b6f819c Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/__pycache__/networksimplex.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/__pycache__/preflowpush.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/__pycache__/preflowpush.cpython-312.pyc new file mode 100644 index 0000000..8d9260f Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/__pycache__/preflowpush.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/__pycache__/shortestaugmentingpath.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/__pycache__/shortestaugmentingpath.cpython-312.pyc new file mode 100644 index 0000000..eea7e89 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/__pycache__/shortestaugmentingpath.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/__pycache__/utils.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/__pycache__/utils.cpython-312.pyc new file mode 100644 index 0000000..2490dbc Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/__pycache__/utils.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/boykovkolmogorov.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/boykovkolmogorov.py new file mode 100644 index 0000000..30899c6 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/boykovkolmogorov.py @@ -0,0 +1,370 @@ +""" +Boykov-Kolmogorov algorithm for maximum flow problems. +""" + +from collections import deque +from operator import itemgetter + +import networkx as nx +from networkx.algorithms.flow.utils import build_residual_network + +__all__ = ["boykov_kolmogorov"] + + +@nx._dispatchable(edge_attrs={"capacity": float("inf")}, returns_graph=True) +def boykov_kolmogorov( + G, s, t, capacity="capacity", residual=None, value_only=False, cutoff=None +): + r"""Find a maximum single-commodity flow using Boykov-Kolmogorov algorithm. + + This function returns the residual network resulting after computing + the maximum flow. See below for details about the conventions + NetworkX uses for defining residual networks. + + This algorithm has worse case complexity $O(n^2 m |C|)$ for $n$ nodes, $m$ + edges, and $|C|$ the cost of the minimum cut [1]_. This implementation + uses the marking heuristic defined in [2]_ which improves its running + time in many practical problems. + + Parameters + ---------- + G : NetworkX graph + Edges of the graph are expected to have an attribute called + 'capacity'. If this attribute is not present, the edge is + considered to have infinite capacity. + + s : node + Source node for the flow. + + t : node + Sink node for the flow. + + capacity : string + Edges of the graph G are expected to have an attribute capacity + that indicates how much flow the edge can support. If this + attribute is not present, the edge is considered to have + infinite capacity. Default value: 'capacity'. + + residual : NetworkX graph + Residual network on which the algorithm is to be executed. If None, a + new residual network is created. Default value: None. + + value_only : bool + If True compute only the value of the maximum flow. This parameter + will be ignored by this algorithm because it is not applicable. + + cutoff : integer, float + If specified, the algorithm will terminate when the flow value reaches + or exceeds the cutoff. In this case, it may be unable to immediately + determine a minimum cut. Default value: None. + + Returns + ------- + R : NetworkX DiGraph + Residual network after computing the maximum flow. + + Raises + ------ + NetworkXError + The algorithm does not support MultiGraph and MultiDiGraph. If + the input graph is an instance of one of these two classes, a + NetworkXError is raised. + + NetworkXUnbounded + If the graph has a path of infinite capacity, the value of a + feasible flow on the graph is unbounded above and the function + raises a NetworkXUnbounded. + + See also + -------- + :meth:`maximum_flow` + :meth:`minimum_cut` + :meth:`preflow_push` + :meth:`shortest_augmenting_path` + + Notes + ----- + The residual network :samp:`R` from an input graph :samp:`G` has the + same nodes as :samp:`G`. :samp:`R` is a DiGraph that contains a pair + of edges :samp:`(u, v)` and :samp:`(v, u)` iff :samp:`(u, v)` is not a + self-loop, and at least one of :samp:`(u, v)` and :samp:`(v, u)` exists + in :samp:`G`. + + For each edge :samp:`(u, v)` in :samp:`R`, :samp:`R[u][v]['capacity']` + is equal to the capacity of :samp:`(u, v)` in :samp:`G` if it exists + in :samp:`G` or zero otherwise. If the capacity is infinite, + :samp:`R[u][v]['capacity']` will have a high arbitrary finite value + that does not affect the solution of the problem. This value is stored in + :samp:`R.graph['inf']`. For each edge :samp:`(u, v)` in :samp:`R`, + :samp:`R[u][v]['flow']` represents the flow function of :samp:`(u, v)` and + satisfies :samp:`R[u][v]['flow'] == -R[v][u]['flow']`. + + The flow value, defined as the total flow into :samp:`t`, the sink, is + stored in :samp:`R.graph['flow_value']`. If :samp:`cutoff` is not + specified, reachability to :samp:`t` using only edges :samp:`(u, v)` such + that :samp:`R[u][v]['flow'] < R[u][v]['capacity']` induces a minimum + :samp:`s`-:samp:`t` cut. + + Examples + -------- + >>> from networkx.algorithms.flow import boykov_kolmogorov + + The functions that implement flow algorithms and output a residual + network, such as this one, are not imported to the base NetworkX + namespace, so you have to explicitly import them from the flow package. + + >>> G = nx.DiGraph() + >>> G.add_edge("x", "a", capacity=3.0) + >>> G.add_edge("x", "b", capacity=1.0) + >>> G.add_edge("a", "c", capacity=3.0) + >>> G.add_edge("b", "c", capacity=5.0) + >>> G.add_edge("b", "d", capacity=4.0) + >>> G.add_edge("d", "e", capacity=2.0) + >>> G.add_edge("c", "y", capacity=2.0) + >>> G.add_edge("e", "y", capacity=3.0) + >>> R = boykov_kolmogorov(G, "x", "y") + >>> flow_value = nx.maximum_flow_value(G, "x", "y") + >>> flow_value + 3.0 + >>> flow_value == R.graph["flow_value"] + True + + A nice feature of the Boykov-Kolmogorov algorithm is that a partition + of the nodes that defines a minimum cut can be easily computed based + on the search trees used during the algorithm. These trees are stored + in the graph attribute `trees` of the residual network. + + >>> source_tree, target_tree = R.graph["trees"] + >>> partition = (set(source_tree), set(G) - set(source_tree)) + + Or equivalently: + + >>> partition = (set(G) - set(target_tree), set(target_tree)) + + References + ---------- + .. [1] Boykov, Y., & Kolmogorov, V. (2004). An experimental comparison + of min-cut/max-flow algorithms for energy minimization in vision. + Pattern Analysis and Machine Intelligence, IEEE Transactions on, + 26(9), 1124-1137. + https://doi.org/10.1109/TPAMI.2004.60 + + .. [2] Vladimir Kolmogorov. Graph-based Algorithms for Multi-camera + Reconstruction Problem. PhD thesis, Cornell University, CS Department, + 2003. pp. 109-114. + https://web.archive.org/web/20170809091249/https://pub.ist.ac.at/~vnk/papers/thesis.pdf + + """ + R = boykov_kolmogorov_impl(G, s, t, capacity, residual, cutoff) + R.graph["algorithm"] = "boykov_kolmogorov" + nx._clear_cache(R) + return R + + +def boykov_kolmogorov_impl(G, s, t, capacity, residual, cutoff): + if s not in G: + raise nx.NetworkXError(f"node {str(s)} not in graph") + if t not in G: + raise nx.NetworkXError(f"node {str(t)} not in graph") + if s == t: + raise nx.NetworkXError("source and sink are the same node") + + if residual is None: + R = build_residual_network(G, capacity) + else: + R = residual + + # Initialize/reset the residual network. + # This is way too slow + # nx.set_edge_attributes(R, 0, 'flow') + for u in R: + for e in R[u].values(): + e["flow"] = 0 + + # Use an arbitrary high value as infinite. It is computed + # when building the residual network. + INF = R.graph["inf"] + + if cutoff is None: + cutoff = INF + + R_succ = R.succ + R_pred = R.pred + + def grow(): + """Bidirectional breadth-first search for the growth stage. + + Returns a connecting edge, that is and edge that connects + a node from the source search tree with a node from the + target search tree. + The first node in the connecting edge is always from the + source tree and the last node from the target tree. + """ + while active: + u = active[0] + if u in source_tree: + this_tree = source_tree + other_tree = target_tree + neighbors = R_succ + else: + this_tree = target_tree + other_tree = source_tree + neighbors = R_pred + for v, attr in neighbors[u].items(): + if attr["capacity"] - attr["flow"] > 0: + if v not in this_tree: + if v in other_tree: + return (u, v) if this_tree is source_tree else (v, u) + this_tree[v] = u + dist[v] = dist[u] + 1 + timestamp[v] = timestamp[u] + active.append(v) + elif v in this_tree and _is_closer(u, v): + this_tree[v] = u + dist[v] = dist[u] + 1 + timestamp[v] = timestamp[u] + _ = active.popleft() + return None, None + + def augment(u, v): + """Augmentation stage. + + Reconstruct path and determine its residual capacity. + We start from a connecting edge, which links a node + from the source tree to a node from the target tree. + The connecting edge is the output of the grow function + and the input of this function. + """ + attr = R_succ[u][v] + flow = min(INF, attr["capacity"] - attr["flow"]) + path = [u] + # Trace a path from u to s in source_tree. + w = u + while w != s: + n = w + w = source_tree[n] + attr = R_pred[n][w] + flow = min(flow, attr["capacity"] - attr["flow"]) + path.append(w) + path.reverse() + # Trace a path from v to t in target_tree. + path.append(v) + w = v + while w != t: + n = w + w = target_tree[n] + attr = R_succ[n][w] + flow = min(flow, attr["capacity"] - attr["flow"]) + path.append(w) + # Augment flow along the path and check for saturated edges. + it = iter(path) + u = next(it) + these_orphans = [] + for v in it: + R_succ[u][v]["flow"] += flow + R_succ[v][u]["flow"] -= flow + if R_succ[u][v]["flow"] == R_succ[u][v]["capacity"]: + if v in source_tree: + source_tree[v] = None + these_orphans.append(v) + if u in target_tree: + target_tree[u] = None + these_orphans.append(u) + u = v + orphans.extend(sorted(these_orphans, key=dist.get)) + return flow + + def adopt(): + """Adoption stage. + + Reconstruct search trees by adopting or discarding orphans. + During augmentation stage some edges got saturated and thus + the source and target search trees broke down to forests, with + orphans as roots of some of its trees. We have to reconstruct + the search trees rooted to source and target before we can grow + them again. + """ + while orphans: + u = orphans.popleft() + if u in source_tree: + tree = source_tree + neighbors = R_pred + else: + tree = target_tree + neighbors = R_succ + nbrs = ((n, attr, dist[n]) for n, attr in neighbors[u].items() if n in tree) + for v, attr, d in sorted(nbrs, key=itemgetter(2)): + if attr["capacity"] - attr["flow"] > 0: + if _has_valid_root(v, tree): + tree[u] = v + dist[u] = dist[v] + 1 + timestamp[u] = time + break + else: + nbrs = ( + (n, attr, dist[n]) for n, attr in neighbors[u].items() if n in tree + ) + for v, attr, d in sorted(nbrs, key=itemgetter(2)): + if attr["capacity"] - attr["flow"] > 0: + if v not in active: + active.append(v) + if tree[v] == u: + tree[v] = None + orphans.appendleft(v) + if u in active: + active.remove(u) + del tree[u] + + def _has_valid_root(n, tree): + path = [] + v = n + while v is not None: + path.append(v) + if v in (s, t): + base_dist = 0 + break + elif timestamp[v] == time: + base_dist = dist[v] + break + v = tree[v] + else: + return False + length = len(path) + for i, u in enumerate(path, 1): + dist[u] = base_dist + length - i + timestamp[u] = time + return True + + def _is_closer(u, v): + return timestamp[v] <= timestamp[u] and dist[v] > dist[u] + 1 + + source_tree = {s: None} + target_tree = {t: None} + active = deque([s, t]) + orphans = deque() + flow_value = 0 + # data structures for the marking heuristic + time = 1 + timestamp = {s: time, t: time} + dist = {s: 0, t: 0} + while flow_value < cutoff: + # Growth stage + u, v = grow() + if u is None: + break + time += 1 + # Augmentation stage + flow_value += augment(u, v) + # Adoption stage + adopt() + + if flow_value * 2 > INF: + raise nx.NetworkXUnbounded("Infinite capacity path, flow unbounded above.") + + # Add source and target tree in a graph attribute. + # A partition that defines a minimum cut can be directly + # computed from the search trees as explained in the docstrings. + R.graph["trees"] = (source_tree, target_tree) + # Add the standard flow_value graph attribute. + R.graph["flow_value"] = flow_value + return R diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/capacityscaling.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/capacityscaling.py new file mode 100644 index 0000000..bf68565 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/capacityscaling.py @@ -0,0 +1,407 @@ +""" +Capacity scaling minimum cost flow algorithm. +""" + +__all__ = ["capacity_scaling"] + +from itertools import chain +from math import log + +import networkx as nx + +from ...utils import BinaryHeap, arbitrary_element, not_implemented_for + + +def _detect_unboundedness(R): + """Detect infinite-capacity negative cycles.""" + G = nx.DiGraph() + G.add_nodes_from(R) + + # Value simulating infinity. + inf = R.graph["inf"] + # True infinity. + f_inf = float("inf") + for u in R: + for v, e in R[u].items(): + # Compute the minimum weight of infinite-capacity (u, v) edges. + w = f_inf + for k, e in e.items(): + if e["capacity"] == inf: + w = min(w, e["weight"]) + if w != f_inf: + G.add_edge(u, v, weight=w) + + if nx.negative_edge_cycle(G): + raise nx.NetworkXUnbounded( + "Negative cost cycle of infinite capacity found. " + "Min cost flow may be unbounded below." + ) + + +@not_implemented_for("undirected") +def _build_residual_network(G, demand, capacity, weight): + """Build a residual network and initialize a zero flow.""" + if sum(G.nodes[u].get(demand, 0) for u in G) != 0: + raise nx.NetworkXUnfeasible("Sum of the demands should be 0.") + + R = nx.MultiDiGraph() + R.add_nodes_from( + (u, {"excess": -G.nodes[u].get(demand, 0), "potential": 0}) for u in G + ) + + inf = float("inf") + # Detect selfloops with infinite capacities and negative weights. + for u, v, e in nx.selfloop_edges(G, data=True): + if e.get(weight, 0) < 0 and e.get(capacity, inf) == inf: + raise nx.NetworkXUnbounded( + "Negative cost cycle of infinite capacity found. " + "Min cost flow may be unbounded below." + ) + + # Extract edges with positive capacities. Self loops excluded. + if G.is_multigraph(): + edge_list = [ + (u, v, k, e) + for u, v, k, e in G.edges(data=True, keys=True) + if u != v and e.get(capacity, inf) > 0 + ] + else: + edge_list = [ + (u, v, 0, e) + for u, v, e in G.edges(data=True) + if u != v and e.get(capacity, inf) > 0 + ] + # Simulate infinity with the larger of the sum of absolute node imbalances + # the sum of finite edge capacities or any positive value if both sums are + # zero. This allows the infinite-capacity edges to be distinguished for + # unboundedness detection and directly participate in residual capacity + # calculation. + inf = ( + max( + sum(abs(R.nodes[u]["excess"]) for u in R), + 2 + * sum( + e[capacity] + for u, v, k, e in edge_list + if capacity in e and e[capacity] != inf + ), + ) + or 1 + ) + for u, v, k, e in edge_list: + r = min(e.get(capacity, inf), inf) + w = e.get(weight, 0) + # Add both (u, v) and (v, u) into the residual network marked with the + # original key. (key[1] == True) indicates the (u, v) is in the + # original network. + R.add_edge(u, v, key=(k, True), capacity=r, weight=w, flow=0) + R.add_edge(v, u, key=(k, False), capacity=0, weight=-w, flow=0) + + # Record the value simulating infinity. + R.graph["inf"] = inf + + _detect_unboundedness(R) + + return R + + +def _build_flow_dict(G, R, capacity, weight): + """Build a flow dictionary from a residual network.""" + inf = float("inf") + flow_dict = {} + if G.is_multigraph(): + for u in G: + flow_dict[u] = {} + for v, es in G[u].items(): + flow_dict[u][v] = { + # Always saturate negative selfloops. + k: ( + 0 + if ( + u != v or e.get(capacity, inf) <= 0 or e.get(weight, 0) >= 0 + ) + else e[capacity] + ) + for k, e in es.items() + } + for v, es in R[u].items(): + if v in flow_dict[u]: + flow_dict[u][v].update( + (k[0], e["flow"]) for k, e in es.items() if e["flow"] > 0 + ) + else: + for u in G: + flow_dict[u] = { + # Always saturate negative selfloops. + v: ( + 0 + if (u != v or e.get(capacity, inf) <= 0 or e.get(weight, 0) >= 0) + else e[capacity] + ) + for v, e in G[u].items() + } + flow_dict[u].update( + (v, e["flow"]) + for v, es in R[u].items() + for e in es.values() + if e["flow"] > 0 + ) + return flow_dict + + +@nx._dispatchable( + node_attrs="demand", edge_attrs={"capacity": float("inf"), "weight": 0} +) +def capacity_scaling( + G, demand="demand", capacity="capacity", weight="weight", heap=BinaryHeap +): + r"""Find a minimum cost flow satisfying all demands in digraph G. + + This is a capacity scaling successive shortest augmenting path algorithm. + + G is a digraph with edge costs and capacities and in which nodes + have demand, i.e., they want to send or receive some amount of + flow. A negative demand means that the node wants to send flow, a + positive demand means that the node want to receive flow. A flow on + the digraph G satisfies all demand if the net flow into each node + is equal to the demand of that node. + + Parameters + ---------- + G : NetworkX graph + DiGraph or MultiDiGraph on which a minimum cost flow satisfying all + demands is to be found. + + demand : string + Nodes of the graph G are expected to have an attribute demand + that indicates how much flow a node wants to send (negative + demand) or receive (positive demand). Note that the sum of the + demands should be 0 otherwise the problem in not feasible. If + this attribute is not present, a node is considered to have 0 + demand. Default value: 'demand'. + + capacity : string + Edges of the graph G are expected to have an attribute capacity + that indicates how much flow the edge can support. If this + attribute is not present, the edge is considered to have + infinite capacity. Default value: 'capacity'. + + weight : string + Edges of the graph G are expected to have an attribute weight + that indicates the cost incurred by sending one unit of flow on + that edge. If not present, the weight is considered to be 0. + Default value: 'weight'. + + heap : class + Type of heap to be used in the algorithm. It should be a subclass of + :class:`MinHeap` or implement a compatible interface. + + If a stock heap implementation is to be used, :class:`BinaryHeap` is + recommended over :class:`PairingHeap` for Python implementations without + optimized attribute accesses (e.g., CPython) despite a slower + asymptotic running time. For Python implementations with optimized + attribute accesses (e.g., PyPy), :class:`PairingHeap` provides better + performance. Default value: :class:`BinaryHeap`. + + Returns + ------- + flowCost : integer + Cost of a minimum cost flow satisfying all demands. + + flowDict : dictionary + If G is a digraph, a dict-of-dicts keyed by nodes such that + flowDict[u][v] is the flow on edge (u, v). + If G is a MultiDiGraph, a dict-of-dicts-of-dicts keyed by nodes + so that flowDict[u][v][key] is the flow on edge (u, v, key). + + Raises + ------ + NetworkXError + This exception is raised if the input graph is not directed, + not connected. + + NetworkXUnfeasible + This exception is raised in the following situations: + + * The sum of the demands is not zero. Then, there is no + flow satisfying all demands. + * There is no flow satisfying all demand. + + NetworkXUnbounded + This exception is raised if the digraph G has a cycle of + negative cost and infinite capacity. Then, the cost of a flow + satisfying all demands is unbounded below. + + Notes + ----- + This algorithm does not work if edge weights are floating-point numbers. + + See also + -------- + :meth:`network_simplex` + + Examples + -------- + A simple example of a min cost flow problem. + + >>> G = nx.DiGraph() + >>> G.add_node("a", demand=-5) + >>> G.add_node("d", demand=5) + >>> G.add_edge("a", "b", weight=3, capacity=4) + >>> G.add_edge("a", "c", weight=6, capacity=10) + >>> G.add_edge("b", "d", weight=1, capacity=9) + >>> G.add_edge("c", "d", weight=2, capacity=5) + >>> flowCost, flowDict = nx.capacity_scaling(G) + >>> flowCost + 24 + >>> flowDict + {'a': {'b': 4, 'c': 1}, 'd': {}, 'b': {'d': 4}, 'c': {'d': 1}} + + It is possible to change the name of the attributes used for the + algorithm. + + >>> G = nx.DiGraph() + >>> G.add_node("p", spam=-4) + >>> G.add_node("q", spam=2) + >>> G.add_node("a", spam=-2) + >>> G.add_node("d", spam=-1) + >>> G.add_node("t", spam=2) + >>> G.add_node("w", spam=3) + >>> G.add_edge("p", "q", cost=7, vacancies=5) + >>> G.add_edge("p", "a", cost=1, vacancies=4) + >>> G.add_edge("q", "d", cost=2, vacancies=3) + >>> G.add_edge("t", "q", cost=1, vacancies=2) + >>> G.add_edge("a", "t", cost=2, vacancies=4) + >>> G.add_edge("d", "w", cost=3, vacancies=4) + >>> G.add_edge("t", "w", cost=4, vacancies=1) + >>> flowCost, flowDict = nx.capacity_scaling( + ... G, demand="spam", capacity="vacancies", weight="cost" + ... ) + >>> flowCost + 37 + >>> flowDict + {'p': {'q': 2, 'a': 2}, 'q': {'d': 1}, 'a': {'t': 4}, 'd': {'w': 2}, 't': {'q': 1, 'w': 1}, 'w': {}} + """ + R = _build_residual_network(G, demand, capacity, weight) + + inf = float("inf") + # Account cost of negative selfloops. + flow_cost = sum( + 0 + if e.get(capacity, inf) <= 0 or e.get(weight, 0) >= 0 + else e[capacity] * e[weight] + for u, v, e in nx.selfloop_edges(G, data=True) + ) + + # Determine the maximum edge capacity. + wmax = max(chain([-inf], (e["capacity"] for u, v, e in R.edges(data=True)))) + if wmax == -inf: + # Residual network has no edges. + return flow_cost, _build_flow_dict(G, R, capacity, weight) + + R_nodes = R.nodes + R_succ = R.succ + + delta = 2 ** int(log(wmax, 2)) + while delta >= 1: + # Saturate Δ-residual edges with negative reduced costs to achieve + # Δ-optimality. + for u in R: + p_u = R_nodes[u]["potential"] + for v, es in R_succ[u].items(): + for k, e in es.items(): + flow = e["capacity"] - e["flow"] + if e["weight"] - p_u + R_nodes[v]["potential"] < 0: + flow = e["capacity"] - e["flow"] + if flow >= delta: + e["flow"] += flow + R_succ[v][u][(k[0], not k[1])]["flow"] -= flow + R_nodes[u]["excess"] -= flow + R_nodes[v]["excess"] += flow + # Determine the Δ-active nodes. + S = set() + T = set() + S_add = S.add + S_remove = S.remove + T_add = T.add + T_remove = T.remove + for u in R: + excess = R_nodes[u]["excess"] + if excess >= delta: + S_add(u) + elif excess <= -delta: + T_add(u) + # Repeatedly augment flow from S to T along shortest paths until + # Δ-feasibility is achieved. + while S and T: + s = arbitrary_element(S) + t = None + # Search for a shortest path in terms of reduce costs from s to + # any t in T in the Δ-residual network. + d = {} + pred = {s: None} + h = heap() + h_insert = h.insert + h_get = h.get + h_insert(s, 0) + while h: + u, d_u = h.pop() + d[u] = d_u + if u in T: + # Path found. + t = u + break + p_u = R_nodes[u]["potential"] + for v, es in R_succ[u].items(): + if v in d: + continue + wmin = inf + # Find the minimum-weighted (u, v) Δ-residual edge. + for k, e in es.items(): + if e["capacity"] - e["flow"] >= delta: + w = e["weight"] + if w < wmin: + wmin = w + kmin = k + emin = e + if wmin == inf: + continue + # Update the distance label of v. + d_v = d_u + wmin - p_u + R_nodes[v]["potential"] + if h_insert(v, d_v): + pred[v] = (u, kmin, emin) + if t is not None: + # Augment Δ units of flow from s to t. + while u != s: + v = u + u, k, e = pred[v] + e["flow"] += delta + R_succ[v][u][(k[0], not k[1])]["flow"] -= delta + # Account node excess and deficit. + R_nodes[s]["excess"] -= delta + R_nodes[t]["excess"] += delta + if R_nodes[s]["excess"] < delta: + S_remove(s) + if R_nodes[t]["excess"] > -delta: + T_remove(t) + # Update node potentials. + d_t = d[t] + for u, d_u in d.items(): + R_nodes[u]["potential"] -= d_u - d_t + else: + # Path not found. + S_remove(s) + delta //= 2 + + if any(R.nodes[u]["excess"] != 0 for u in R): + raise nx.NetworkXUnfeasible("No flow satisfying all demands.") + + # Calculate the flow cost. + for u in R: + for v, es in R_succ[u].items(): + for e in es.values(): + flow = e["flow"] + if flow > 0: + flow_cost += flow * e["weight"] + + return flow_cost, _build_flow_dict(G, R, capacity, weight) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/dinitz_alg.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/dinitz_alg.py new file mode 100644 index 0000000..f369642 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/dinitz_alg.py @@ -0,0 +1,238 @@ +""" +Dinitz' algorithm for maximum flow problems. +""" + +from collections import deque + +import networkx as nx +from networkx.algorithms.flow.utils import build_residual_network +from networkx.utils import pairwise + +__all__ = ["dinitz"] + + +@nx._dispatchable(edge_attrs={"capacity": float("inf")}, returns_graph=True) +def dinitz(G, s, t, capacity="capacity", residual=None, value_only=False, cutoff=None): + """Find a maximum single-commodity flow using Dinitz' algorithm. + + This function returns the residual network resulting after computing + the maximum flow. See below for details about the conventions + NetworkX uses for defining residual networks. + + This algorithm has a running time of $O(n^2 m)$ for $n$ nodes and $m$ + edges [1]_. + + + Parameters + ---------- + G : NetworkX graph + Edges of the graph are expected to have an attribute called + 'capacity'. If this attribute is not present, the edge is + considered to have infinite capacity. + + s : node + Source node for the flow. + + t : node + Sink node for the flow. + + capacity : string + Edges of the graph G are expected to have an attribute capacity + that indicates how much flow the edge can support. If this + attribute is not present, the edge is considered to have + infinite capacity. Default value: 'capacity'. + + residual : NetworkX graph + Residual network on which the algorithm is to be executed. If None, a + new residual network is created. Default value: None. + + value_only : bool + If True compute only the value of the maximum flow. This parameter + will be ignored by this algorithm because it is not applicable. + + cutoff : integer, float + If specified, the algorithm will terminate when the flow value reaches + or exceeds the cutoff. In this case, it may be unable to immediately + determine a minimum cut. Default value: None. + + Returns + ------- + R : NetworkX DiGraph + Residual network after computing the maximum flow. + + Raises + ------ + NetworkXError + The algorithm does not support MultiGraph and MultiDiGraph. If + the input graph is an instance of one of these two classes, a + NetworkXError is raised. + + NetworkXUnbounded + If the graph has a path of infinite capacity, the value of a + feasible flow on the graph is unbounded above and the function + raises a NetworkXUnbounded. + + See also + -------- + :meth:`maximum_flow` + :meth:`minimum_cut` + :meth:`preflow_push` + :meth:`shortest_augmenting_path` + + Notes + ----- + The residual network :samp:`R` from an input graph :samp:`G` has the + same nodes as :samp:`G`. :samp:`R` is a DiGraph that contains a pair + of edges :samp:`(u, v)` and :samp:`(v, u)` iff :samp:`(u, v)` is not a + self-loop, and at least one of :samp:`(u, v)` and :samp:`(v, u)` exists + in :samp:`G`. + + For each edge :samp:`(u, v)` in :samp:`R`, :samp:`R[u][v]['capacity']` + is equal to the capacity of :samp:`(u, v)` in :samp:`G` if it exists + in :samp:`G` or zero otherwise. If the capacity is infinite, + :samp:`R[u][v]['capacity']` will have a high arbitrary finite value + that does not affect the solution of the problem. This value is stored in + :samp:`R.graph['inf']`. For each edge :samp:`(u, v)` in :samp:`R`, + :samp:`R[u][v]['flow']` represents the flow function of :samp:`(u, v)` and + satisfies :samp:`R[u][v]['flow'] == -R[v][u]['flow']`. + + The flow value, defined as the total flow into :samp:`t`, the sink, is + stored in :samp:`R.graph['flow_value']`. If :samp:`cutoff` is not + specified, reachability to :samp:`t` using only edges :samp:`(u, v)` such + that :samp:`R[u][v]['flow'] < R[u][v]['capacity']` induces a minimum + :samp:`s`-:samp:`t` cut. + + Examples + -------- + >>> from networkx.algorithms.flow import dinitz + + The functions that implement flow algorithms and output a residual + network, such as this one, are not imported to the base NetworkX + namespace, so you have to explicitly import them from the flow package. + + >>> G = nx.DiGraph() + >>> G.add_edge("x", "a", capacity=3.0) + >>> G.add_edge("x", "b", capacity=1.0) + >>> G.add_edge("a", "c", capacity=3.0) + >>> G.add_edge("b", "c", capacity=5.0) + >>> G.add_edge("b", "d", capacity=4.0) + >>> G.add_edge("d", "e", capacity=2.0) + >>> G.add_edge("c", "y", capacity=2.0) + >>> G.add_edge("e", "y", capacity=3.0) + >>> R = dinitz(G, "x", "y") + >>> flow_value = nx.maximum_flow_value(G, "x", "y") + >>> flow_value + 3.0 + >>> flow_value == R.graph["flow_value"] + True + + References + ---------- + .. [1] Dinitz' Algorithm: The Original Version and Even's Version. + 2006. Yefim Dinitz. In Theoretical Computer Science. Lecture + Notes in Computer Science. Volume 3895. pp 218-240. + https://doi.org/10.1007/11685654_10 + + """ + R = dinitz_impl(G, s, t, capacity, residual, cutoff) + R.graph["algorithm"] = "dinitz" + nx._clear_cache(R) + return R + + +def dinitz_impl(G, s, t, capacity, residual, cutoff): + if s not in G: + raise nx.NetworkXError(f"node {str(s)} not in graph") + if t not in G: + raise nx.NetworkXError(f"node {str(t)} not in graph") + if s == t: + raise nx.NetworkXError("source and sink are the same node") + + if residual is None: + R = build_residual_network(G, capacity) + else: + R = residual + + # Initialize/reset the residual network. + for u in R: + for e in R[u].values(): + e["flow"] = 0 + + # Use an arbitrary high value as infinite. It is computed + # when building the residual network. + INF = R.graph["inf"] + + if cutoff is None: + cutoff = INF + + R_succ = R.succ + R_pred = R.pred + + def breath_first_search(): + parents = {} + vertex_dist = {s: 0} + queue = deque([(s, 0)]) + # Record all the potential edges of shortest augmenting paths + while queue: + if t in parents: + break + u, dist = queue.popleft() + for v, attr in R_succ[u].items(): + if attr["capacity"] - attr["flow"] > 0: + if v in parents: + if vertex_dist[v] == dist + 1: + parents[v].append(u) + else: + parents[v] = deque([u]) + vertex_dist[v] = dist + 1 + queue.append((v, dist + 1)) + return parents + + def depth_first_search(parents): + # DFS to find all the shortest augmenting paths + """Build a path using DFS starting from the sink""" + total_flow = 0 + u = t + # path also functions as a stack + path = [u] + # The loop ends with no augmenting path left in the layered graph + while True: + if len(parents[u]) > 0: + v = parents[u][0] + path.append(v) + else: + path.pop() + if len(path) == 0: + break + v = path[-1] + parents[v].popleft() + # Augment the flow along the path found + if v == s: + flow = INF + for u, v in pairwise(path): + flow = min(flow, R_pred[u][v]["capacity"] - R_pred[u][v]["flow"]) + for u, v in pairwise(reversed(path)): + R_pred[v][u]["flow"] += flow + R_pred[u][v]["flow"] -= flow + # Find the proper node to continue the search + if R_pred[v][u]["capacity"] - R_pred[v][u]["flow"] == 0: + parents[v].popleft() + while path[-1] != v: + path.pop() + total_flow += flow + v = path[-1] + u = v + return total_flow + + flow_value = 0 + while flow_value < cutoff: + parents = breath_first_search() + if t not in parents: + break + this_flow = depth_first_search(parents) + if this_flow * 2 > INF: + raise nx.NetworkXUnbounded("Infinite capacity path, flow unbounded above.") + flow_value += this_flow + + R.graph["flow_value"] = flow_value + return R diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/edmondskarp.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/edmondskarp.py new file mode 100644 index 0000000..5006326 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/edmondskarp.py @@ -0,0 +1,241 @@ +""" +Edmonds-Karp algorithm for maximum flow problems. +""" + +import networkx as nx +from networkx.algorithms.flow.utils import build_residual_network + +__all__ = ["edmonds_karp"] + + +def edmonds_karp_core(R, s, t, cutoff): + """Implementation of the Edmonds-Karp algorithm.""" + R_nodes = R.nodes + R_pred = R.pred + R_succ = R.succ + + inf = R.graph["inf"] + + def augment(path): + """Augment flow along a path from s to t.""" + # Determine the path residual capacity. + flow = inf + it = iter(path) + u = next(it) + for v in it: + attr = R_succ[u][v] + flow = min(flow, attr["capacity"] - attr["flow"]) + u = v + if flow * 2 > inf: + raise nx.NetworkXUnbounded("Infinite capacity path, flow unbounded above.") + # Augment flow along the path. + it = iter(path) + u = next(it) + for v in it: + R_succ[u][v]["flow"] += flow + R_succ[v][u]["flow"] -= flow + u = v + return flow + + def bidirectional_bfs(): + """Bidirectional breadth-first search for an augmenting path.""" + pred = {s: None} + q_s = [s] + succ = {t: None} + q_t = [t] + while True: + q = [] + if len(q_s) <= len(q_t): + for u in q_s: + for v, attr in R_succ[u].items(): + if v not in pred and attr["flow"] < attr["capacity"]: + pred[v] = u + if v in succ: + return v, pred, succ + q.append(v) + if not q: + return None, None, None + q_s = q + else: + for u in q_t: + for v, attr in R_pred[u].items(): + if v not in succ and attr["flow"] < attr["capacity"]: + succ[v] = u + if v in pred: + return v, pred, succ + q.append(v) + if not q: + return None, None, None + q_t = q + + # Look for shortest augmenting paths using breadth-first search. + flow_value = 0 + while flow_value < cutoff: + v, pred, succ = bidirectional_bfs() + if pred is None: + break + path = [v] + # Trace a path from s to v. + u = v + while u != s: + u = pred[u] + path.append(u) + path.reverse() + # Trace a path from v to t. + u = v + while u != t: + u = succ[u] + path.append(u) + flow_value += augment(path) + + return flow_value + + +def edmonds_karp_impl(G, s, t, capacity, residual, cutoff): + """Implementation of the Edmonds-Karp algorithm.""" + if s not in G: + raise nx.NetworkXError(f"node {str(s)} not in graph") + if t not in G: + raise nx.NetworkXError(f"node {str(t)} not in graph") + if s == t: + raise nx.NetworkXError("source and sink are the same node") + + if residual is None: + R = build_residual_network(G, capacity) + else: + R = residual + + # Initialize/reset the residual network. + for u in R: + for e in R[u].values(): + e["flow"] = 0 + + if cutoff is None: + cutoff = float("inf") + R.graph["flow_value"] = edmonds_karp_core(R, s, t, cutoff) + + return R + + +@nx._dispatchable(edge_attrs={"capacity": float("inf")}, returns_graph=True) +def edmonds_karp( + G, s, t, capacity="capacity", residual=None, value_only=False, cutoff=None +): + """Find a maximum single-commodity flow using the Edmonds-Karp algorithm. + + This function returns the residual network resulting after computing + the maximum flow. See below for details about the conventions + NetworkX uses for defining residual networks. + + This algorithm has a running time of $O(n m^2)$ for $n$ nodes and $m$ + edges. + + + Parameters + ---------- + G : NetworkX graph + Edges of the graph are expected to have an attribute called + 'capacity'. If this attribute is not present, the edge is + considered to have infinite capacity. + + s : node + Source node for the flow. + + t : node + Sink node for the flow. + + capacity : string + Edges of the graph G are expected to have an attribute capacity + that indicates how much flow the edge can support. If this + attribute is not present, the edge is considered to have + infinite capacity. Default value: 'capacity'. + + residual : NetworkX graph + Residual network on which the algorithm is to be executed. If None, a + new residual network is created. Default value: None. + + value_only : bool + If True compute only the value of the maximum flow. This parameter + will be ignored by this algorithm because it is not applicable. + + cutoff : integer, float + If specified, the algorithm will terminate when the flow value reaches + or exceeds the cutoff. In this case, it may be unable to immediately + determine a minimum cut. Default value: None. + + Returns + ------- + R : NetworkX DiGraph + Residual network after computing the maximum flow. + + Raises + ------ + NetworkXError + The algorithm does not support MultiGraph and MultiDiGraph. If + the input graph is an instance of one of these two classes, a + NetworkXError is raised. + + NetworkXUnbounded + If the graph has a path of infinite capacity, the value of a + feasible flow on the graph is unbounded above and the function + raises a NetworkXUnbounded. + + See also + -------- + :meth:`maximum_flow` + :meth:`minimum_cut` + :meth:`preflow_push` + :meth:`shortest_augmenting_path` + + Notes + ----- + The residual network :samp:`R` from an input graph :samp:`G` has the + same nodes as :samp:`G`. :samp:`R` is a DiGraph that contains a pair + of edges :samp:`(u, v)` and :samp:`(v, u)` iff :samp:`(u, v)` is not a + self-loop, and at least one of :samp:`(u, v)` and :samp:`(v, u)` exists + in :samp:`G`. + + For each edge :samp:`(u, v)` in :samp:`R`, :samp:`R[u][v]['capacity']` + is equal to the capacity of :samp:`(u, v)` in :samp:`G` if it exists + in :samp:`G` or zero otherwise. If the capacity is infinite, + :samp:`R[u][v]['capacity']` will have a high arbitrary finite value + that does not affect the solution of the problem. This value is stored in + :samp:`R.graph['inf']`. For each edge :samp:`(u, v)` in :samp:`R`, + :samp:`R[u][v]['flow']` represents the flow function of :samp:`(u, v)` and + satisfies :samp:`R[u][v]['flow'] == -R[v][u]['flow']`. + + The flow value, defined as the total flow into :samp:`t`, the sink, is + stored in :samp:`R.graph['flow_value']`. If :samp:`cutoff` is not + specified, reachability to :samp:`t` using only edges :samp:`(u, v)` such + that :samp:`R[u][v]['flow'] < R[u][v]['capacity']` induces a minimum + :samp:`s`-:samp:`t` cut. + + Examples + -------- + >>> from networkx.algorithms.flow import edmonds_karp + + The functions that implement flow algorithms and output a residual + network, such as this one, are not imported to the base NetworkX + namespace, so you have to explicitly import them from the flow package. + + >>> G = nx.DiGraph() + >>> G.add_edge("x", "a", capacity=3.0) + >>> G.add_edge("x", "b", capacity=1.0) + >>> G.add_edge("a", "c", capacity=3.0) + >>> G.add_edge("b", "c", capacity=5.0) + >>> G.add_edge("b", "d", capacity=4.0) + >>> G.add_edge("d", "e", capacity=2.0) + >>> G.add_edge("c", "y", capacity=2.0) + >>> G.add_edge("e", "y", capacity=3.0) + >>> R = edmonds_karp(G, "x", "y") + >>> flow_value = nx.maximum_flow_value(G, "x", "y") + >>> flow_value + 3.0 + >>> flow_value == R.graph["flow_value"] + True + + """ + R = edmonds_karp_impl(G, s, t, capacity, residual, cutoff) + R.graph["algorithm"] = "edmonds_karp" + nx._clear_cache(R) + return R diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/gomory_hu.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/gomory_hu.py new file mode 100644 index 0000000..69913da --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/gomory_hu.py @@ -0,0 +1,178 @@ +""" +Gomory-Hu tree of undirected Graphs. +""" + +import networkx as nx +from networkx.utils import not_implemented_for + +from .edmondskarp import edmonds_karp +from .utils import build_residual_network + +default_flow_func = edmonds_karp + +__all__ = ["gomory_hu_tree"] + + +@not_implemented_for("directed") +@nx._dispatchable(edge_attrs={"capacity": float("inf")}, returns_graph=True) +def gomory_hu_tree(G, capacity="capacity", flow_func=None): + r"""Returns the Gomory-Hu tree of an undirected graph G. + + A Gomory-Hu tree of an undirected graph with capacities is a + weighted tree that represents the minimum s-t cuts for all s-t + pairs in the graph. + + It only requires `n-1` minimum cut computations instead of the + obvious `n(n-1)/2`. The tree represents all s-t cuts as the + minimum cut value among any pair of nodes is the minimum edge + weight in the shortest path between the two nodes in the + Gomory-Hu tree. + + The Gomory-Hu tree also has the property that removing the + edge with the minimum weight in the shortest path between + any two nodes leaves two connected components that form + a partition of the nodes in G that defines the minimum s-t + cut. + + See Examples section below for details. + + Parameters + ---------- + G : NetworkX graph + Undirected graph + + capacity : string + Edges of the graph G are expected to have an attribute capacity + that indicates how much flow the edge can support. If this + attribute is not present, the edge is considered to have + infinite capacity. Default value: 'capacity'. + + flow_func : function + Function to perform the underlying flow computations. Default value + :func:`edmonds_karp`. This function performs better in sparse graphs + with right tailed degree distributions. + :func:`shortest_augmenting_path` will perform better in denser + graphs. + + Returns + ------- + Tree : NetworkX graph + A NetworkX graph representing the Gomory-Hu tree of the input graph. + + Raises + ------ + NetworkXNotImplemented + Raised if the input graph is directed. + + NetworkXError + Raised if the input graph is an empty Graph. + + Examples + -------- + >>> G = nx.karate_club_graph() + >>> nx.set_edge_attributes(G, 1, "capacity") + >>> T = nx.gomory_hu_tree(G) + >>> # The value of the minimum cut between any pair + ... # of nodes in G is the minimum edge weight in the + ... # shortest path between the two nodes in the + ... # Gomory-Hu tree. + ... def minimum_edge_weight_in_shortest_path(T, u, v): + ... path = nx.shortest_path(T, u, v, weight="weight") + ... return min((T[u][v]["weight"], (u, v)) for (u, v) in zip(path, path[1:])) + >>> u, v = 0, 33 + >>> cut_value, edge = minimum_edge_weight_in_shortest_path(T, u, v) + >>> cut_value + 10 + >>> nx.minimum_cut_value(G, u, v) + 10 + >>> # The Gomory-Hu tree also has the property that removing the + ... # edge with the minimum weight in the shortest path between + ... # any two nodes leaves two connected components that form + ... # a partition of the nodes in G that defines the minimum s-t + ... # cut. + ... cut_value, edge = minimum_edge_weight_in_shortest_path(T, u, v) + >>> T.remove_edge(*edge) + >>> U, V = list(nx.connected_components(T)) + >>> # Thus U and V form a partition that defines a minimum cut + ... # between u and v in G. You can compute the edge cut set, + ... # that is, the set of edges that if removed from G will + ... # disconnect u from v in G, with this information: + ... cutset = set() + >>> for x, nbrs in ((n, G[n]) for n in U): + ... cutset.update((x, y) for y in nbrs if y in V) + >>> # Because we have set the capacities of all edges to 1 + ... # the cutset contains ten edges + ... len(cutset) + 10 + >>> # You can use any maximum flow algorithm for the underlying + ... # flow computations using the argument flow_func + ... from networkx.algorithms import flow + >>> T = nx.gomory_hu_tree(G, flow_func=flow.boykov_kolmogorov) + >>> cut_value, edge = minimum_edge_weight_in_shortest_path(T, u, v) + >>> cut_value + 10 + >>> nx.minimum_cut_value(G, u, v, flow_func=flow.boykov_kolmogorov) + 10 + + Notes + ----- + This implementation is based on Gusfield approach [1]_ to compute + Gomory-Hu trees, which does not require node contractions and has + the same computational complexity than the original method. + + See also + -------- + :func:`minimum_cut` + :func:`maximum_flow` + + References + ---------- + .. [1] Gusfield D: Very simple methods for all pairs network flow analysis. + SIAM J Comput 19(1):143-155, 1990. + + """ + if flow_func is None: + flow_func = default_flow_func + + if len(G) == 0: # empty graph + msg = "Empty Graph does not have a Gomory-Hu tree representation" + raise nx.NetworkXError(msg) + + # Start the tree as a star graph with an arbitrary node at the center + tree = {} + labels = {} + iter_nodes = iter(G) + root = next(iter_nodes) + for n in iter_nodes: + tree[n] = root + + # Reuse residual network + R = build_residual_network(G, capacity) + + # For all the leaves in the star graph tree (that is n-1 nodes). + for source in tree: + # Find neighbor in the tree + target = tree[source] + # compute minimum cut + cut_value, partition = nx.minimum_cut( + G, source, target, capacity=capacity, flow_func=flow_func, residual=R + ) + labels[(source, target)] = cut_value + # Update the tree + # Source will always be in partition[0] and target in partition[1] + for node in partition[0]: + if node != source and node in tree and tree[node] == target: + tree[node] = source + labels[node, source] = labels.get((node, target), cut_value) + # + if target != root and tree[target] in partition[0]: + labels[source, tree[target]] = labels[target, tree[target]] + labels[target, source] = cut_value + tree[source] = tree[target] + tree[target] = source + + # Build the tree + T = nx.Graph() + T.add_nodes_from(G) + T.add_weighted_edges_from(((u, v, labels[u, v]) for u, v in tree.items())) + return T diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/maxflow.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/maxflow.py new file mode 100644 index 0000000..93497a4 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/maxflow.py @@ -0,0 +1,611 @@ +""" +Maximum flow (and minimum cut) algorithms on capacitated graphs. +""" + +import networkx as nx + +from .boykovkolmogorov import boykov_kolmogorov +from .dinitz_alg import dinitz +from .edmondskarp import edmonds_karp +from .preflowpush import preflow_push +from .shortestaugmentingpath import shortest_augmenting_path +from .utils import build_flow_dict + +# Define the default flow function for computing maximum flow. +default_flow_func = preflow_push + +__all__ = ["maximum_flow", "maximum_flow_value", "minimum_cut", "minimum_cut_value"] + + +@nx._dispatchable(graphs="flowG", edge_attrs={"capacity": float("inf")}) +def maximum_flow(flowG, _s, _t, capacity="capacity", flow_func=None, **kwargs): + """Find a maximum single-commodity flow. + + Parameters + ---------- + flowG : NetworkX graph + Edges of the graph are expected to have an attribute called + 'capacity'. If this attribute is not present, the edge is + considered to have infinite capacity. + + _s : node + Source node for the flow. + + _t : node + Sink node for the flow. + + capacity : string + Edges of the graph G are expected to have an attribute capacity + that indicates how much flow the edge can support. If this + attribute is not present, the edge is considered to have + infinite capacity. Default value: 'capacity'. + + flow_func : function + A function for computing the maximum flow among a pair of nodes + in a capacitated graph. The function has to accept at least three + parameters: a Graph or Digraph, a source node, and a target node. + And return a residual network that follows NetworkX conventions + (see Notes). If flow_func is None, the default maximum + flow function (:meth:`preflow_push`) is used. See below for + alternative algorithms. The choice of the default function may change + from version to version and should not be relied on. Default value: + None. + + kwargs : Any other keyword parameter is passed to the function that + computes the maximum flow. + + Returns + ------- + flow_value : integer, float + Value of the maximum flow, i.e., net outflow from the source. + + flow_dict : dict + A dictionary containing the value of the flow that went through + each edge. + + Raises + ------ + NetworkXError + The algorithm does not support MultiGraph and MultiDiGraph. If + the input graph is an instance of one of these two classes, a + NetworkXError is raised. + + NetworkXUnbounded + If the graph has a path of infinite capacity, the value of a + feasible flow on the graph is unbounded above and the function + raises a NetworkXUnbounded. + + See also + -------- + :meth:`maximum_flow_value` + :meth:`minimum_cut` + :meth:`minimum_cut_value` + :meth:`edmonds_karp` + :meth:`preflow_push` + :meth:`shortest_augmenting_path` + + Notes + ----- + The function used in the flow_func parameter has to return a residual + network that follows NetworkX conventions: + + The residual network :samp:`R` from an input graph :samp:`G` has the + same nodes as :samp:`G`. :samp:`R` is a DiGraph that contains a pair + of edges :samp:`(u, v)` and :samp:`(v, u)` iff :samp:`(u, v)` is not a + self-loop, and at least one of :samp:`(u, v)` and :samp:`(v, u)` exists + in :samp:`G`. + + For each edge :samp:`(u, v)` in :samp:`R`, :samp:`R[u][v]['capacity']` + is equal to the capacity of :samp:`(u, v)` in :samp:`G` if it exists + in :samp:`G` or zero otherwise. If the capacity is infinite, + :samp:`R[u][v]['capacity']` will have a high arbitrary finite value + that does not affect the solution of the problem. This value is stored in + :samp:`R.graph['inf']`. For each edge :samp:`(u, v)` in :samp:`R`, + :samp:`R[u][v]['flow']` represents the flow function of :samp:`(u, v)` and + satisfies :samp:`R[u][v]['flow'] == -R[v][u]['flow']`. + + The flow value, defined as the total flow into :samp:`t`, the sink, is + stored in :samp:`R.graph['flow_value']`. Reachability to :samp:`t` using + only edges :samp:`(u, v)` such that + :samp:`R[u][v]['flow'] < R[u][v]['capacity']` induces a minimum + :samp:`s`-:samp:`t` cut. + + Specific algorithms may store extra data in :samp:`R`. + + The function should supports an optional boolean parameter value_only. When + True, it can optionally terminate the algorithm as soon as the maximum flow + value and the minimum cut can be determined. + + Note that the resulting maximum flow may contain flow cycles, + back-flow to the source, or some flow exiting the sink. + These are possible if there are cycles in the network. + + Examples + -------- + >>> G = nx.DiGraph() + >>> G.add_edge("x", "a", capacity=3.0) + >>> G.add_edge("x", "b", capacity=1.0) + >>> G.add_edge("a", "c", capacity=3.0) + >>> G.add_edge("b", "c", capacity=5.0) + >>> G.add_edge("b", "d", capacity=4.0) + >>> G.add_edge("d", "e", capacity=2.0) + >>> G.add_edge("c", "y", capacity=2.0) + >>> G.add_edge("e", "y", capacity=3.0) + + maximum_flow returns both the value of the maximum flow and a + dictionary with all flows. + + >>> flow_value, flow_dict = nx.maximum_flow(G, "x", "y") + >>> flow_value + 3.0 + >>> print(flow_dict["x"]["b"]) + 1.0 + + You can also use alternative algorithms for computing the + maximum flow by using the flow_func parameter. + + >>> from networkx.algorithms.flow import shortest_augmenting_path + >>> flow_value == nx.maximum_flow(G, "x", "y", flow_func=shortest_augmenting_path)[ + ... 0 + ... ] + True + + """ + if flow_func is None: + if kwargs: + raise nx.NetworkXError( + "You have to explicitly set a flow_func if" + " you need to pass parameters via kwargs." + ) + flow_func = default_flow_func + + if not callable(flow_func): + raise nx.NetworkXError("flow_func has to be callable.") + + R = flow_func(flowG, _s, _t, capacity=capacity, value_only=False, **kwargs) + flow_dict = build_flow_dict(flowG, R) + + return (R.graph["flow_value"], flow_dict) + + +@nx._dispatchable(graphs="flowG", edge_attrs={"capacity": float("inf")}) +def maximum_flow_value(flowG, _s, _t, capacity="capacity", flow_func=None, **kwargs): + """Find the value of maximum single-commodity flow. + + Parameters + ---------- + flowG : NetworkX graph + Edges of the graph are expected to have an attribute called + 'capacity'. If this attribute is not present, the edge is + considered to have infinite capacity. + + _s : node + Source node for the flow. + + _t : node + Sink node for the flow. + + capacity : string + Edges of the graph G are expected to have an attribute capacity + that indicates how much flow the edge can support. If this + attribute is not present, the edge is considered to have + infinite capacity. Default value: 'capacity'. + + flow_func : function + A function for computing the maximum flow among a pair of nodes + in a capacitated graph. The function has to accept at least three + parameters: a Graph or Digraph, a source node, and a target node. + And return a residual network that follows NetworkX conventions + (see Notes). If flow_func is None, the default maximum + flow function (:meth:`preflow_push`) is used. See below for + alternative algorithms. The choice of the default function may change + from version to version and should not be relied on. Default value: + None. + + kwargs : Any other keyword parameter is passed to the function that + computes the maximum flow. + + Returns + ------- + flow_value : integer, float + Value of the maximum flow, i.e., net outflow from the source. + + Raises + ------ + NetworkXError + The algorithm does not support MultiGraph and MultiDiGraph. If + the input graph is an instance of one of these two classes, a + NetworkXError is raised. + + NetworkXUnbounded + If the graph has a path of infinite capacity, the value of a + feasible flow on the graph is unbounded above and the function + raises a NetworkXUnbounded. + + See also + -------- + :meth:`maximum_flow` + :meth:`minimum_cut` + :meth:`minimum_cut_value` + :meth:`edmonds_karp` + :meth:`preflow_push` + :meth:`shortest_augmenting_path` + + Notes + ----- + The function used in the flow_func parameter has to return a residual + network that follows NetworkX conventions: + + The residual network :samp:`R` from an input graph :samp:`G` has the + same nodes as :samp:`G`. :samp:`R` is a DiGraph that contains a pair + of edges :samp:`(u, v)` and :samp:`(v, u)` iff :samp:`(u, v)` is not a + self-loop, and at least one of :samp:`(u, v)` and :samp:`(v, u)` exists + in :samp:`G`. + + For each edge :samp:`(u, v)` in :samp:`R`, :samp:`R[u][v]['capacity']` + is equal to the capacity of :samp:`(u, v)` in :samp:`G` if it exists + in :samp:`G` or zero otherwise. If the capacity is infinite, + :samp:`R[u][v]['capacity']` will have a high arbitrary finite value + that does not affect the solution of the problem. This value is stored in + :samp:`R.graph['inf']`. For each edge :samp:`(u, v)` in :samp:`R`, + :samp:`R[u][v]['flow']` represents the flow function of :samp:`(u, v)` and + satisfies :samp:`R[u][v]['flow'] == -R[v][u]['flow']`. + + The flow value, defined as the total flow into :samp:`t`, the sink, is + stored in :samp:`R.graph['flow_value']`. Reachability to :samp:`t` using + only edges :samp:`(u, v)` such that + :samp:`R[u][v]['flow'] < R[u][v]['capacity']` induces a minimum + :samp:`s`-:samp:`t` cut. + + Specific algorithms may store extra data in :samp:`R`. + + The function should supports an optional boolean parameter value_only. When + True, it can optionally terminate the algorithm as soon as the maximum flow + value and the minimum cut can be determined. + + Examples + -------- + >>> G = nx.DiGraph() + >>> G.add_edge("x", "a", capacity=3.0) + >>> G.add_edge("x", "b", capacity=1.0) + >>> G.add_edge("a", "c", capacity=3.0) + >>> G.add_edge("b", "c", capacity=5.0) + >>> G.add_edge("b", "d", capacity=4.0) + >>> G.add_edge("d", "e", capacity=2.0) + >>> G.add_edge("c", "y", capacity=2.0) + >>> G.add_edge("e", "y", capacity=3.0) + + maximum_flow_value computes only the value of the + maximum flow: + + >>> flow_value = nx.maximum_flow_value(G, "x", "y") + >>> flow_value + 3.0 + + You can also use alternative algorithms for computing the + maximum flow by using the flow_func parameter. + + >>> from networkx.algorithms.flow import shortest_augmenting_path + >>> flow_value == nx.maximum_flow_value( + ... G, "x", "y", flow_func=shortest_augmenting_path + ... ) + True + + """ + if flow_func is None: + if kwargs: + raise nx.NetworkXError( + "You have to explicitly set a flow_func if" + " you need to pass parameters via kwargs." + ) + flow_func = default_flow_func + + if not callable(flow_func): + raise nx.NetworkXError("flow_func has to be callable.") + + R = flow_func(flowG, _s, _t, capacity=capacity, value_only=True, **kwargs) + + return R.graph["flow_value"] + + +@nx._dispatchable(graphs="flowG", edge_attrs={"capacity": float("inf")}) +def minimum_cut(flowG, _s, _t, capacity="capacity", flow_func=None, **kwargs): + """Compute the value and the node partition of a minimum (s, t)-cut. + + Use the max-flow min-cut theorem, i.e., the capacity of a minimum + capacity cut is equal to the flow value of a maximum flow. + + Parameters + ---------- + flowG : NetworkX graph + Edges of the graph are expected to have an attribute called + 'capacity'. If this attribute is not present, the edge is + considered to have infinite capacity. + + _s : node + Source node for the flow. + + _t : node + Sink node for the flow. + + capacity : string + Edges of the graph G are expected to have an attribute capacity + that indicates how much flow the edge can support. If this + attribute is not present, the edge is considered to have + infinite capacity. Default value: 'capacity'. + + flow_func : function + A function for computing the maximum flow among a pair of nodes + in a capacitated graph. The function has to accept at least three + parameters: a Graph or Digraph, a source node, and a target node. + And return a residual network that follows NetworkX conventions + (see Notes). If flow_func is None, the default maximum + flow function (:meth:`preflow_push`) is used. See below for + alternative algorithms. The choice of the default function may change + from version to version and should not be relied on. Default value: + None. + + kwargs : Any other keyword parameter is passed to the function that + computes the maximum flow. + + Returns + ------- + cut_value : integer, float + Value of the minimum cut. + + partition : pair of node sets + A partitioning of the nodes that defines a minimum cut. + + Raises + ------ + NetworkXUnbounded + If the graph has a path of infinite capacity, all cuts have + infinite capacity and the function raises a NetworkXError. + + See also + -------- + :meth:`maximum_flow` + :meth:`maximum_flow_value` + :meth:`minimum_cut_value` + :meth:`edmonds_karp` + :meth:`preflow_push` + :meth:`shortest_augmenting_path` + + Notes + ----- + The function used in the flow_func parameter has to return a residual + network that follows NetworkX conventions: + + The residual network :samp:`R` from an input graph :samp:`G` has the + same nodes as :samp:`G`. :samp:`R` is a DiGraph that contains a pair + of edges :samp:`(u, v)` and :samp:`(v, u)` iff :samp:`(u, v)` is not a + self-loop, and at least one of :samp:`(u, v)` and :samp:`(v, u)` exists + in :samp:`G`. + + For each edge :samp:`(u, v)` in :samp:`R`, :samp:`R[u][v]['capacity']` + is equal to the capacity of :samp:`(u, v)` in :samp:`G` if it exists + in :samp:`G` or zero otherwise. If the capacity is infinite, + :samp:`R[u][v]['capacity']` will have a high arbitrary finite value + that does not affect the solution of the problem. This value is stored in + :samp:`R.graph['inf']`. For each edge :samp:`(u, v)` in :samp:`R`, + :samp:`R[u][v]['flow']` represents the flow function of :samp:`(u, v)` and + satisfies :samp:`R[u][v]['flow'] == -R[v][u]['flow']`. + + The flow value, defined as the total flow into :samp:`t`, the sink, is + stored in :samp:`R.graph['flow_value']`. Reachability to :samp:`t` using + only edges :samp:`(u, v)` such that + :samp:`R[u][v]['flow'] < R[u][v]['capacity']` induces a minimum + :samp:`s`-:samp:`t` cut. + + Specific algorithms may store extra data in :samp:`R`. + + The function should supports an optional boolean parameter value_only. When + True, it can optionally terminate the algorithm as soon as the maximum flow + value and the minimum cut can be determined. + + Examples + -------- + >>> G = nx.DiGraph() + >>> G.add_edge("x", "a", capacity=3.0) + >>> G.add_edge("x", "b", capacity=1.0) + >>> G.add_edge("a", "c", capacity=3.0) + >>> G.add_edge("b", "c", capacity=5.0) + >>> G.add_edge("b", "d", capacity=4.0) + >>> G.add_edge("d", "e", capacity=2.0) + >>> G.add_edge("c", "y", capacity=2.0) + >>> G.add_edge("e", "y", capacity=3.0) + + minimum_cut computes both the value of the + minimum cut and the node partition: + + >>> cut_value, partition = nx.minimum_cut(G, "x", "y") + >>> reachable, non_reachable = partition + + 'partition' here is a tuple with the two sets of nodes that define + the minimum cut. You can compute the cut set of edges that induce + the minimum cut as follows: + + >>> cutset = set() + >>> for u, nbrs in ((n, G[n]) for n in reachable): + ... cutset.update((u, v) for v in nbrs if v in non_reachable) + >>> print(sorted(cutset)) + [('c', 'y'), ('x', 'b')] + >>> cut_value == sum(G.edges[u, v]["capacity"] for (u, v) in cutset) + True + + You can also use alternative algorithms for computing the + minimum cut by using the flow_func parameter. + + >>> from networkx.algorithms.flow import shortest_augmenting_path + >>> cut_value == nx.minimum_cut(G, "x", "y", flow_func=shortest_augmenting_path)[0] + True + + """ + if flow_func is None: + if kwargs: + raise nx.NetworkXError( + "You have to explicitly set a flow_func if" + " you need to pass parameters via kwargs." + ) + flow_func = default_flow_func + + if not callable(flow_func): + raise nx.NetworkXError("flow_func has to be callable.") + + if kwargs.get("cutoff") is not None and flow_func is preflow_push: + raise nx.NetworkXError("cutoff should not be specified.") + + R = flow_func(flowG, _s, _t, capacity=capacity, value_only=True, **kwargs) + # Remove saturated edges from the residual network + cutset = [(u, v, d) for u, v, d in R.edges(data=True) if d["flow"] == d["capacity"]] + R.remove_edges_from(cutset) + + # Then, reachable and non reachable nodes from source in the + # residual network form the node partition that defines + # the minimum cut. + non_reachable = set(nx.shortest_path_length(R, target=_t)) + partition = (set(flowG) - non_reachable, non_reachable) + # Finally add again cutset edges to the residual network to make + # sure that it is reusable. + R.add_edges_from(cutset) + return (R.graph["flow_value"], partition) + + +@nx._dispatchable(graphs="flowG", edge_attrs={"capacity": float("inf")}) +def minimum_cut_value(flowG, _s, _t, capacity="capacity", flow_func=None, **kwargs): + """Compute the value of a minimum (s, t)-cut. + + Use the max-flow min-cut theorem, i.e., the capacity of a minimum + capacity cut is equal to the flow value of a maximum flow. + + Parameters + ---------- + flowG : NetworkX graph + Edges of the graph are expected to have an attribute called + 'capacity'. If this attribute is not present, the edge is + considered to have infinite capacity. + + _s : node + Source node for the flow. + + _t : node + Sink node for the flow. + + capacity : string + Edges of the graph G are expected to have an attribute capacity + that indicates how much flow the edge can support. If this + attribute is not present, the edge is considered to have + infinite capacity. Default value: 'capacity'. + + flow_func : function + A function for computing the maximum flow among a pair of nodes + in a capacitated graph. The function has to accept at least three + parameters: a Graph or Digraph, a source node, and a target node. + And return a residual network that follows NetworkX conventions + (see Notes). If flow_func is None, the default maximum + flow function (:meth:`preflow_push`) is used. See below for + alternative algorithms. The choice of the default function may change + from version to version and should not be relied on. Default value: + None. + + kwargs : Any other keyword parameter is passed to the function that + computes the maximum flow. + + Returns + ------- + cut_value : integer, float + Value of the minimum cut. + + Raises + ------ + NetworkXUnbounded + If the graph has a path of infinite capacity, all cuts have + infinite capacity and the function raises a NetworkXError. + + See also + -------- + :meth:`maximum_flow` + :meth:`maximum_flow_value` + :meth:`minimum_cut` + :meth:`edmonds_karp` + :meth:`preflow_push` + :meth:`shortest_augmenting_path` + + Notes + ----- + The function used in the flow_func parameter has to return a residual + network that follows NetworkX conventions: + + The residual network :samp:`R` from an input graph :samp:`G` has the + same nodes as :samp:`G`. :samp:`R` is a DiGraph that contains a pair + of edges :samp:`(u, v)` and :samp:`(v, u)` iff :samp:`(u, v)` is not a + self-loop, and at least one of :samp:`(u, v)` and :samp:`(v, u)` exists + in :samp:`G`. + + For each edge :samp:`(u, v)` in :samp:`R`, :samp:`R[u][v]['capacity']` + is equal to the capacity of :samp:`(u, v)` in :samp:`G` if it exists + in :samp:`G` or zero otherwise. If the capacity is infinite, + :samp:`R[u][v]['capacity']` will have a high arbitrary finite value + that does not affect the solution of the problem. This value is stored in + :samp:`R.graph['inf']`. For each edge :samp:`(u, v)` in :samp:`R`, + :samp:`R[u][v]['flow']` represents the flow function of :samp:`(u, v)` and + satisfies :samp:`R[u][v]['flow'] == -R[v][u]['flow']`. + + The flow value, defined as the total flow into :samp:`t`, the sink, is + stored in :samp:`R.graph['flow_value']`. Reachability to :samp:`t` using + only edges :samp:`(u, v)` such that + :samp:`R[u][v]['flow'] < R[u][v]['capacity']` induces a minimum + :samp:`s`-:samp:`t` cut. + + Specific algorithms may store extra data in :samp:`R`. + + The function should supports an optional boolean parameter value_only. When + True, it can optionally terminate the algorithm as soon as the maximum flow + value and the minimum cut can be determined. + + Examples + -------- + >>> G = nx.DiGraph() + >>> G.add_edge("x", "a", capacity=3.0) + >>> G.add_edge("x", "b", capacity=1.0) + >>> G.add_edge("a", "c", capacity=3.0) + >>> G.add_edge("b", "c", capacity=5.0) + >>> G.add_edge("b", "d", capacity=4.0) + >>> G.add_edge("d", "e", capacity=2.0) + >>> G.add_edge("c", "y", capacity=2.0) + >>> G.add_edge("e", "y", capacity=3.0) + + minimum_cut_value computes only the value of the + minimum cut: + + >>> cut_value = nx.minimum_cut_value(G, "x", "y") + >>> cut_value + 3.0 + + You can also use alternative algorithms for computing the + minimum cut by using the flow_func parameter. + + >>> from networkx.algorithms.flow import shortest_augmenting_path + >>> cut_value == nx.minimum_cut_value( + ... G, "x", "y", flow_func=shortest_augmenting_path + ... ) + True + + """ + if flow_func is None: + if kwargs: + raise nx.NetworkXError( + "You have to explicitly set a flow_func if" + " you need to pass parameters via kwargs." + ) + flow_func = default_flow_func + + if not callable(flow_func): + raise nx.NetworkXError("flow_func has to be callable.") + + if kwargs.get("cutoff") is not None and flow_func is preflow_push: + raise nx.NetworkXError("cutoff should not be specified.") + + R = flow_func(flowG, _s, _t, capacity=capacity, value_only=True, **kwargs) + + return R.graph["flow_value"] diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/mincost.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/mincost.py new file mode 100644 index 0000000..2f9390d --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/mincost.py @@ -0,0 +1,356 @@ +""" +Minimum cost flow algorithms on directed connected graphs. +""" + +__all__ = ["min_cost_flow_cost", "min_cost_flow", "cost_of_flow", "max_flow_min_cost"] + +import networkx as nx + + +@nx._dispatchable( + node_attrs="demand", edge_attrs={"capacity": float("inf"), "weight": 0} +) +def min_cost_flow_cost(G, demand="demand", capacity="capacity", weight="weight"): + r"""Find the cost of a minimum cost flow satisfying all demands in digraph G. + + G is a digraph with edge costs and capacities and in which nodes + have demand, i.e., they want to send or receive some amount of + flow. A negative demand means that the node wants to send flow, a + positive demand means that the node want to receive flow. A flow on + the digraph G satisfies all demand if the net flow into each node + is equal to the demand of that node. + + Parameters + ---------- + G : NetworkX graph + DiGraph on which a minimum cost flow satisfying all demands is + to be found. + + demand : string + Nodes of the graph G are expected to have an attribute demand + that indicates how much flow a node wants to send (negative + demand) or receive (positive demand). Note that the sum of the + demands should be 0 otherwise the problem in not feasible. If + this attribute is not present, a node is considered to have 0 + demand. Default value: 'demand'. + + capacity : string + Edges of the graph G are expected to have an attribute capacity + that indicates how much flow the edge can support. If this + attribute is not present, the edge is considered to have + infinite capacity. Default value: 'capacity'. + + weight : string + Edges of the graph G are expected to have an attribute weight + that indicates the cost incurred by sending one unit of flow on + that edge. If not present, the weight is considered to be 0. + Default value: 'weight'. + + Returns + ------- + flowCost : integer, float + Cost of a minimum cost flow satisfying all demands. + + Raises + ------ + NetworkXError + This exception is raised if the input graph is not directed or + not connected. + + NetworkXUnfeasible + This exception is raised in the following situations: + + * The sum of the demands is not zero. Then, there is no + flow satisfying all demands. + * There is no flow satisfying all demand. + + NetworkXUnbounded + This exception is raised if the digraph G has a cycle of + negative cost and infinite capacity. Then, the cost of a flow + satisfying all demands is unbounded below. + + See also + -------- + cost_of_flow, max_flow_min_cost, min_cost_flow, network_simplex + + Notes + ----- + This algorithm is not guaranteed to work if edge weights or demands + are floating point numbers (overflows and roundoff errors can + cause problems). As a workaround you can use integer numbers by + multiplying the relevant edge attributes by a convenient + constant factor (eg 100). + + Examples + -------- + A simple example of a min cost flow problem. + + >>> G = nx.DiGraph() + >>> G.add_node("a", demand=-5) + >>> G.add_node("d", demand=5) + >>> G.add_edge("a", "b", weight=3, capacity=4) + >>> G.add_edge("a", "c", weight=6, capacity=10) + >>> G.add_edge("b", "d", weight=1, capacity=9) + >>> G.add_edge("c", "d", weight=2, capacity=5) + >>> flowCost = nx.min_cost_flow_cost(G) + >>> flowCost + 24 + """ + return nx.network_simplex(G, demand=demand, capacity=capacity, weight=weight)[0] + + +@nx._dispatchable( + node_attrs="demand", edge_attrs={"capacity": float("inf"), "weight": 0} +) +def min_cost_flow(G, demand="demand", capacity="capacity", weight="weight"): + r"""Returns a minimum cost flow satisfying all demands in digraph G. + + G is a digraph with edge costs and capacities and in which nodes + have demand, i.e., they want to send or receive some amount of + flow. A negative demand means that the node wants to send flow, a + positive demand means that the node want to receive flow. A flow on + the digraph G satisfies all demand if the net flow into each node + is equal to the demand of that node. + + Parameters + ---------- + G : NetworkX graph + DiGraph on which a minimum cost flow satisfying all demands is + to be found. + + demand : string + Nodes of the graph G are expected to have an attribute demand + that indicates how much flow a node wants to send (negative + demand) or receive (positive demand). Note that the sum of the + demands should be 0 otherwise the problem in not feasible. If + this attribute is not present, a node is considered to have 0 + demand. Default value: 'demand'. + + capacity : string + Edges of the graph G are expected to have an attribute capacity + that indicates how much flow the edge can support. If this + attribute is not present, the edge is considered to have + infinite capacity. Default value: 'capacity'. + + weight : string + Edges of the graph G are expected to have an attribute weight + that indicates the cost incurred by sending one unit of flow on + that edge. If not present, the weight is considered to be 0. + Default value: 'weight'. + + Returns + ------- + flowDict : dictionary + Dictionary of dictionaries keyed by nodes such that + flowDict[u][v] is the flow edge (u, v). + + Raises + ------ + NetworkXError + This exception is raised if the input graph is not directed or + not connected. + + NetworkXUnfeasible + This exception is raised in the following situations: + + * The sum of the demands is not zero. Then, there is no + flow satisfying all demands. + * There is no flow satisfying all demand. + + NetworkXUnbounded + This exception is raised if the digraph G has a cycle of + negative cost and infinite capacity. Then, the cost of a flow + satisfying all demands is unbounded below. + + See also + -------- + cost_of_flow, max_flow_min_cost, min_cost_flow_cost, network_simplex + + Notes + ----- + This algorithm is not guaranteed to work if edge weights or demands + are floating point numbers (overflows and roundoff errors can + cause problems). As a workaround you can use integer numbers by + multiplying the relevant edge attributes by a convenient + constant factor (eg 100). + + Examples + -------- + A simple example of a min cost flow problem. + + >>> G = nx.DiGraph() + >>> G.add_node("a", demand=-5) + >>> G.add_node("d", demand=5) + >>> G.add_edge("a", "b", weight=3, capacity=4) + >>> G.add_edge("a", "c", weight=6, capacity=10) + >>> G.add_edge("b", "d", weight=1, capacity=9) + >>> G.add_edge("c", "d", weight=2, capacity=5) + >>> flowDict = nx.min_cost_flow(G) + >>> flowDict + {'a': {'b': 4, 'c': 1}, 'd': {}, 'b': {'d': 4}, 'c': {'d': 1}} + """ + return nx.network_simplex(G, demand=demand, capacity=capacity, weight=weight)[1] + + +@nx._dispatchable(edge_attrs={"weight": 0}) +def cost_of_flow(G, flowDict, weight="weight"): + """Compute the cost of the flow given by flowDict on graph G. + + Note that this function does not check for the validity of the + flow flowDict. This function will fail if the graph G and the + flow don't have the same edge set. + + Parameters + ---------- + G : NetworkX graph + DiGraph on which a minimum cost flow satisfying all demands is + to be found. + + weight : string + Edges of the graph G are expected to have an attribute weight + that indicates the cost incurred by sending one unit of flow on + that edge. If not present, the weight is considered to be 0. + Default value: 'weight'. + + flowDict : dictionary + Dictionary of dictionaries keyed by nodes such that + flowDict[u][v] is the flow edge (u, v). + + Returns + ------- + cost : Integer, float + The total cost of the flow. This is given by the sum over all + edges of the product of the edge's flow and the edge's weight. + + See also + -------- + max_flow_min_cost, min_cost_flow, min_cost_flow_cost, network_simplex + + Notes + ----- + This algorithm is not guaranteed to work if edge weights or demands + are floating point numbers (overflows and roundoff errors can + cause problems). As a workaround you can use integer numbers by + multiplying the relevant edge attributes by a convenient + constant factor (eg 100). + + Examples + -------- + >>> G = nx.DiGraph() + >>> G.add_node("a", demand=-5) + >>> G.add_node("d", demand=5) + >>> G.add_edge("a", "b", weight=3, capacity=4) + >>> G.add_edge("a", "c", weight=6, capacity=10) + >>> G.add_edge("b", "d", weight=1, capacity=9) + >>> G.add_edge("c", "d", weight=2, capacity=5) + >>> flowDict = nx.min_cost_flow(G) + >>> flowDict + {'a': {'b': 4, 'c': 1}, 'd': {}, 'b': {'d': 4}, 'c': {'d': 1}} + >>> nx.cost_of_flow(G, flowDict) + 24 + """ + return sum((flowDict[u][v] * d.get(weight, 0) for u, v, d in G.edges(data=True))) + + +@nx._dispatchable(edge_attrs={"capacity": float("inf"), "weight": 0}) +def max_flow_min_cost(G, s, t, capacity="capacity", weight="weight"): + """Returns a maximum (s, t)-flow of minimum cost. + + G is a digraph with edge costs and capacities. There is a source + node s and a sink node t. This function finds a maximum flow from + s to t whose total cost is minimized. + + Parameters + ---------- + G : NetworkX graph + DiGraph on which a minimum cost flow satisfying all demands is + to be found. + + s: node label + Source of the flow. + + t: node label + Destination of the flow. + + capacity: string + Edges of the graph G are expected to have an attribute capacity + that indicates how much flow the edge can support. If this + attribute is not present, the edge is considered to have + infinite capacity. Default value: 'capacity'. + + weight: string + Edges of the graph G are expected to have an attribute weight + that indicates the cost incurred by sending one unit of flow on + that edge. If not present, the weight is considered to be 0. + Default value: 'weight'. + + Returns + ------- + flowDict: dictionary + Dictionary of dictionaries keyed by nodes such that + flowDict[u][v] is the flow edge (u, v). + + Raises + ------ + NetworkXError + This exception is raised if the input graph is not directed or + not connected. + + NetworkXUnbounded + This exception is raised if there is an infinite capacity path + from s to t in G. In this case there is no maximum flow. This + exception is also raised if the digraph G has a cycle of + negative cost and infinite capacity. Then, the cost of a flow + is unbounded below. + + See also + -------- + cost_of_flow, min_cost_flow, min_cost_flow_cost, network_simplex + + Notes + ----- + This algorithm is not guaranteed to work if edge weights or demands + are floating point numbers (overflows and roundoff errors can + cause problems). As a workaround you can use integer numbers by + multiplying the relevant edge attributes by a convenient + constant factor (eg 100). + + Examples + -------- + >>> G = nx.DiGraph() + >>> G.add_edges_from( + ... [ + ... (1, 2, {"capacity": 12, "weight": 4}), + ... (1, 3, {"capacity": 20, "weight": 6}), + ... (2, 3, {"capacity": 6, "weight": -3}), + ... (2, 6, {"capacity": 14, "weight": 1}), + ... (3, 4, {"weight": 9}), + ... (3, 5, {"capacity": 10, "weight": 5}), + ... (4, 2, {"capacity": 19, "weight": 13}), + ... (4, 5, {"capacity": 4, "weight": 0}), + ... (5, 7, {"capacity": 28, "weight": 2}), + ... (6, 5, {"capacity": 11, "weight": 1}), + ... (6, 7, {"weight": 8}), + ... (7, 4, {"capacity": 6, "weight": 6}), + ... ] + ... ) + >>> mincostFlow = nx.max_flow_min_cost(G, 1, 7) + >>> mincost = nx.cost_of_flow(G, mincostFlow) + >>> mincost + 373 + >>> from networkx.algorithms.flow import maximum_flow + >>> maxFlow = maximum_flow(G, 1, 7)[1] + >>> nx.cost_of_flow(G, maxFlow) >= mincost + True + >>> mincostFlowValue = sum((mincostFlow[u][7] for u in G.predecessors(7))) - sum( + ... (mincostFlow[7][v] for v in G.successors(7)) + ... ) + >>> mincostFlowValue == nx.maximum_flow_value(G, 1, 7) + True + + """ + maxFlow = nx.maximum_flow_value(G, s, t, capacity=capacity) + H = nx.DiGraph(G) + H.add_node(s, demand=-maxFlow) + H.add_node(t, demand=maxFlow) + return min_cost_flow(H, capacity=capacity, weight=weight) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/networksimplex.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/networksimplex.py new file mode 100644 index 0000000..5baa976 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/networksimplex.py @@ -0,0 +1,662 @@ +""" +Minimum cost flow algorithms on directed connected graphs. +""" + +__all__ = ["network_simplex"] + +from itertools import chain, islice, repeat +from math import ceil, sqrt + +import networkx as nx +from networkx.utils import not_implemented_for + + +class _DataEssentialsAndFunctions: + def __init__( + self, G, multigraph, demand="demand", capacity="capacity", weight="weight" + ): + # Number all nodes and edges and hereafter reference them using ONLY their numbers + self.node_list = list(G) # nodes + self.node_indices = {u: i for i, u in enumerate(self.node_list)} # node indices + self.node_demands = [ + G.nodes[u].get(demand, 0) for u in self.node_list + ] # node demands + + self.edge_sources = [] # edge sources + self.edge_targets = [] # edge targets + if multigraph: + self.edge_keys = [] # edge keys + self.edge_indices = {} # edge indices + self.edge_capacities = [] # edge capacities + self.edge_weights = [] # edge weights + + if not multigraph: + edges = G.edges(data=True) + else: + edges = G.edges(data=True, keys=True) + + inf = float("inf") + edges = (e for e in edges if e[0] != e[1] and e[-1].get(capacity, inf) != 0) + for i, e in enumerate(edges): + self.edge_sources.append(self.node_indices[e[0]]) + self.edge_targets.append(self.node_indices[e[1]]) + if multigraph: + self.edge_keys.append(e[2]) + self.edge_indices[e[:-1]] = i + self.edge_capacities.append(e[-1].get(capacity, inf)) + self.edge_weights.append(e[-1].get(weight, 0)) + + # spanning tree specific data to be initialized + + self.edge_count = None # number of edges + self.edge_flow = None # edge flows + self.node_potentials = None # node potentials + self.parent = None # parent nodes + self.parent_edge = None # edges to parents + self.subtree_size = None # subtree sizes + self.next_node_dft = None # next nodes in depth-first thread + self.prev_node_dft = None # previous nodes in depth-first thread + self.last_descendent_dft = None # last descendants in depth-first thread + self._spanning_tree_initialized = ( + False # False until initialize_spanning_tree() is called + ) + + def initialize_spanning_tree(self, n, faux_inf): + self.edge_count = len(self.edge_indices) # number of edges + self.edge_flow = list( + chain(repeat(0, self.edge_count), (abs(d) for d in self.node_demands)) + ) # edge flows + self.node_potentials = [ + faux_inf if d <= 0 else -faux_inf for d in self.node_demands + ] # node potentials + self.parent = list(chain(repeat(-1, n), [None])) # parent nodes + self.parent_edge = list( + range(self.edge_count, self.edge_count + n) + ) # edges to parents + self.subtree_size = list(chain(repeat(1, n), [n + 1])) # subtree sizes + self.next_node_dft = list( + chain(range(1, n), [-1, 0]) + ) # next nodes in depth-first thread + self.prev_node_dft = list(range(-1, n)) # previous nodes in depth-first thread + self.last_descendent_dft = list( + chain(range(n), [n - 1]) + ) # last descendants in depth-first thread + self._spanning_tree_initialized = True # True only if all the assignments pass + + def find_apex(self, p, q): + """ + Find the lowest common ancestor of nodes p and q in the spanning tree. + """ + size_p = self.subtree_size[p] + size_q = self.subtree_size[q] + while True: + while size_p < size_q: + p = self.parent[p] + size_p = self.subtree_size[p] + while size_p > size_q: + q = self.parent[q] + size_q = self.subtree_size[q] + if size_p == size_q: + if p != q: + p = self.parent[p] + size_p = self.subtree_size[p] + q = self.parent[q] + size_q = self.subtree_size[q] + else: + return p + + def trace_path(self, p, w): + """ + Returns the nodes and edges on the path from node p to its ancestor w. + """ + Wn = [p] + We = [] + while p != w: + We.append(self.parent_edge[p]) + p = self.parent[p] + Wn.append(p) + return Wn, We + + def find_cycle(self, i, p, q): + """ + Returns the nodes and edges on the cycle containing edge i == (p, q) + when the latter is added to the spanning tree. + + The cycle is oriented in the direction from p to q. + """ + w = self.find_apex(p, q) + Wn, We = self.trace_path(p, w) + Wn.reverse() + We.reverse() + if We != [i]: + We.append(i) + WnR, WeR = self.trace_path(q, w) + del WnR[-1] + Wn += WnR + We += WeR + return Wn, We + + def augment_flow(self, Wn, We, f): + """ + Augment f units of flow along a cycle represented by Wn and We. + """ + for i, p in zip(We, Wn): + if self.edge_sources[i] == p: + self.edge_flow[i] += f + else: + self.edge_flow[i] -= f + + def trace_subtree(self, p): + """ + Yield the nodes in the subtree rooted at a node p. + """ + yield p + l = self.last_descendent_dft[p] + while p != l: + p = self.next_node_dft[p] + yield p + + def remove_edge(self, s, t): + """ + Remove an edge (s, t) where parent[t] == s from the spanning tree. + """ + size_t = self.subtree_size[t] + prev_t = self.prev_node_dft[t] + last_t = self.last_descendent_dft[t] + next_last_t = self.next_node_dft[last_t] + # Remove (s, t). + self.parent[t] = None + self.parent_edge[t] = None + # Remove the subtree rooted at t from the depth-first thread. + self.next_node_dft[prev_t] = next_last_t + self.prev_node_dft[next_last_t] = prev_t + self.next_node_dft[last_t] = t + self.prev_node_dft[t] = last_t + # Update the subtree sizes and last descendants of the (old) ancestors + # of t. + while s is not None: + self.subtree_size[s] -= size_t + if self.last_descendent_dft[s] == last_t: + self.last_descendent_dft[s] = prev_t + s = self.parent[s] + + def make_root(self, q): + """ + Make a node q the root of its containing subtree. + """ + ancestors = [] + while q is not None: + ancestors.append(q) + q = self.parent[q] + ancestors.reverse() + for p, q in zip(ancestors, islice(ancestors, 1, None)): + size_p = self.subtree_size[p] + last_p = self.last_descendent_dft[p] + prev_q = self.prev_node_dft[q] + last_q = self.last_descendent_dft[q] + next_last_q = self.next_node_dft[last_q] + # Make p a child of q. + self.parent[p] = q + self.parent[q] = None + self.parent_edge[p] = self.parent_edge[q] + self.parent_edge[q] = None + self.subtree_size[p] = size_p - self.subtree_size[q] + self.subtree_size[q] = size_p + # Remove the subtree rooted at q from the depth-first thread. + self.next_node_dft[prev_q] = next_last_q + self.prev_node_dft[next_last_q] = prev_q + self.next_node_dft[last_q] = q + self.prev_node_dft[q] = last_q + if last_p == last_q: + self.last_descendent_dft[p] = prev_q + last_p = prev_q + # Add the remaining parts of the subtree rooted at p as a subtree + # of q in the depth-first thread. + self.prev_node_dft[p] = last_q + self.next_node_dft[last_q] = p + self.next_node_dft[last_p] = q + self.prev_node_dft[q] = last_p + self.last_descendent_dft[q] = last_p + + def add_edge(self, i, p, q): + """ + Add an edge (p, q) to the spanning tree where q is the root of a subtree. + """ + last_p = self.last_descendent_dft[p] + next_last_p = self.next_node_dft[last_p] + size_q = self.subtree_size[q] + last_q = self.last_descendent_dft[q] + # Make q a child of p. + self.parent[q] = p + self.parent_edge[q] = i + # Insert the subtree rooted at q into the depth-first thread. + self.next_node_dft[last_p] = q + self.prev_node_dft[q] = last_p + self.prev_node_dft[next_last_p] = last_q + self.next_node_dft[last_q] = next_last_p + # Update the subtree sizes and last descendants of the (new) ancestors + # of q. + while p is not None: + self.subtree_size[p] += size_q + if self.last_descendent_dft[p] == last_p: + self.last_descendent_dft[p] = last_q + p = self.parent[p] + + def update_potentials(self, i, p, q): + """ + Update the potentials of the nodes in the subtree rooted at a node + q connected to its parent p by an edge i. + """ + if q == self.edge_targets[i]: + d = self.node_potentials[p] - self.edge_weights[i] - self.node_potentials[q] + else: + d = self.node_potentials[p] + self.edge_weights[i] - self.node_potentials[q] + for q in self.trace_subtree(q): + self.node_potentials[q] += d + + def reduced_cost(self, i): + """Returns the reduced cost of an edge i.""" + c = ( + self.edge_weights[i] + - self.node_potentials[self.edge_sources[i]] + + self.node_potentials[self.edge_targets[i]] + ) + return c if self.edge_flow[i] == 0 else -c + + def find_entering_edges(self): + """Yield entering edges until none can be found.""" + if self.edge_count == 0: + return + + # Entering edges are found by combining Dantzig's rule and Bland's + # rule. The edges are cyclically grouped into blocks of size B. Within + # each block, Dantzig's rule is applied to find an entering edge. The + # blocks to search is determined following Bland's rule. + B = int(ceil(sqrt(self.edge_count))) # pivot block size + M = (self.edge_count + B - 1) // B # number of blocks needed to cover all edges + m = 0 # number of consecutive blocks without eligible + # entering edges + f = 0 # first edge in block + while m < M: + # Determine the next block of edges. + l = f + B + if l <= self.edge_count: + edges = range(f, l) + else: + l -= self.edge_count + edges = chain(range(f, self.edge_count), range(l)) + f = l + # Find the first edge with the lowest reduced cost. + i = min(edges, key=self.reduced_cost) + c = self.reduced_cost(i) + if c >= 0: + # No entering edge found in the current block. + m += 1 + else: + # Entering edge found. + if self.edge_flow[i] == 0: + p = self.edge_sources[i] + q = self.edge_targets[i] + else: + p = self.edge_targets[i] + q = self.edge_sources[i] + yield i, p, q + m = 0 + # All edges have nonnegative reduced costs. The current flow is + # optimal. + + def residual_capacity(self, i, p): + """Returns the residual capacity of an edge i in the direction away + from its endpoint p. + """ + return ( + self.edge_capacities[i] - self.edge_flow[i] + if self.edge_sources[i] == p + else self.edge_flow[i] + ) + + def find_leaving_edge(self, Wn, We): + """Returns the leaving edge in a cycle represented by Wn and We.""" + j, s = min( + zip(reversed(We), reversed(Wn)), + key=lambda i_p: self.residual_capacity(*i_p), + ) + t = self.edge_targets[j] if self.edge_sources[j] == s else self.edge_sources[j] + return j, s, t + + +@not_implemented_for("undirected") +@nx._dispatchable( + node_attrs="demand", edge_attrs={"capacity": float("inf"), "weight": 0} +) +def network_simplex(G, demand="demand", capacity="capacity", weight="weight"): + r"""Find a minimum cost flow satisfying all demands in digraph G. + + This is a primal network simplex algorithm that uses the leaving + arc rule to prevent cycling. + + G is a digraph with edge costs and capacities and in which nodes + have demand, i.e., they want to send or receive some amount of + flow. A negative demand means that the node wants to send flow, a + positive demand means that the node want to receive flow. A flow on + the digraph G satisfies all demand if the net flow into each node + is equal to the demand of that node. + + Parameters + ---------- + G : NetworkX graph + DiGraph on which a minimum cost flow satisfying all demands is + to be found. + + demand : string + Nodes of the graph G are expected to have an attribute demand + that indicates how much flow a node wants to send (negative + demand) or receive (positive demand). Note that the sum of the + demands should be 0 otherwise the problem in not feasible. If + this attribute is not present, a node is considered to have 0 + demand. Default value: 'demand'. + + capacity : string + Edges of the graph G are expected to have an attribute capacity + that indicates how much flow the edge can support. If this + attribute is not present, the edge is considered to have + infinite capacity. Default value: 'capacity'. + + weight : string + Edges of the graph G are expected to have an attribute weight + that indicates the cost incurred by sending one unit of flow on + that edge. If not present, the weight is considered to be 0. + Default value: 'weight'. + + Returns + ------- + flowCost : integer, float + Cost of a minimum cost flow satisfying all demands. + + flowDict : dictionary + Dictionary of dictionaries keyed by nodes such that + flowDict[u][v] is the flow edge (u, v). + + Raises + ------ + NetworkXError + This exception is raised if the input graph is not directed or + not connected. + + NetworkXUnfeasible + This exception is raised in the following situations: + + * The sum of the demands is not zero. Then, there is no + flow satisfying all demands. + * There is no flow satisfying all demand. + + NetworkXUnbounded + This exception is raised if the digraph G has a cycle of + negative cost and infinite capacity. Then, the cost of a flow + satisfying all demands is unbounded below. + + Notes + ----- + This algorithm is not guaranteed to work if edge weights or demands + are floating point numbers (overflows and roundoff errors can + cause problems). As a workaround you can use integer numbers by + multiplying the relevant edge attributes by a convenient + constant factor (eg 100). + + See also + -------- + cost_of_flow, max_flow_min_cost, min_cost_flow, min_cost_flow_cost + + Examples + -------- + A simple example of a min cost flow problem. + + >>> G = nx.DiGraph() + >>> G.add_node("a", demand=-5) + >>> G.add_node("d", demand=5) + >>> G.add_edge("a", "b", weight=3, capacity=4) + >>> G.add_edge("a", "c", weight=6, capacity=10) + >>> G.add_edge("b", "d", weight=1, capacity=9) + >>> G.add_edge("c", "d", weight=2, capacity=5) + >>> flowCost, flowDict = nx.network_simplex(G) + >>> flowCost + 24 + >>> flowDict + {'a': {'b': 4, 'c': 1}, 'd': {}, 'b': {'d': 4}, 'c': {'d': 1}} + + The mincost flow algorithm can also be used to solve shortest path + problems. To find the shortest path between two nodes u and v, + give all edges an infinite capacity, give node u a demand of -1 and + node v a demand a 1. Then run the network simplex. The value of a + min cost flow will be the distance between u and v and edges + carrying positive flow will indicate the path. + + >>> G = nx.DiGraph() + >>> G.add_weighted_edges_from( + ... [ + ... ("s", "u", 10), + ... ("s", "x", 5), + ... ("u", "v", 1), + ... ("u", "x", 2), + ... ("v", "y", 1), + ... ("x", "u", 3), + ... ("x", "v", 5), + ... ("x", "y", 2), + ... ("y", "s", 7), + ... ("y", "v", 6), + ... ] + ... ) + >>> G.add_node("s", demand=-1) + >>> G.add_node("v", demand=1) + >>> flowCost, flowDict = nx.network_simplex(G) + >>> flowCost == nx.shortest_path_length(G, "s", "v", weight="weight") + True + >>> sorted([(u, v) for u in flowDict for v in flowDict[u] if flowDict[u][v] > 0]) + [('s', 'x'), ('u', 'v'), ('x', 'u')] + >>> nx.shortest_path(G, "s", "v", weight="weight") + ['s', 'x', 'u', 'v'] + + It is possible to change the name of the attributes used for the + algorithm. + + >>> G = nx.DiGraph() + >>> G.add_node("p", spam=-4) + >>> G.add_node("q", spam=2) + >>> G.add_node("a", spam=-2) + >>> G.add_node("d", spam=-1) + >>> G.add_node("t", spam=2) + >>> G.add_node("w", spam=3) + >>> G.add_edge("p", "q", cost=7, vacancies=5) + >>> G.add_edge("p", "a", cost=1, vacancies=4) + >>> G.add_edge("q", "d", cost=2, vacancies=3) + >>> G.add_edge("t", "q", cost=1, vacancies=2) + >>> G.add_edge("a", "t", cost=2, vacancies=4) + >>> G.add_edge("d", "w", cost=3, vacancies=4) + >>> G.add_edge("t", "w", cost=4, vacancies=1) + >>> flowCost, flowDict = nx.network_simplex( + ... G, demand="spam", capacity="vacancies", weight="cost" + ... ) + >>> flowCost + 37 + >>> flowDict + {'p': {'q': 2, 'a': 2}, 'q': {'d': 1}, 'a': {'t': 4}, 'd': {'w': 2}, 't': {'q': 1, 'w': 1}, 'w': {}} + + References + ---------- + .. [1] Z. Kiraly, P. Kovacs. + Efficient implementation of minimum-cost flow algorithms. + Acta Universitatis Sapientiae, Informatica 4(1):67--118. 2012. + .. [2] R. Barr, F. Glover, D. Klingman. + Enhancement of spanning tree labeling procedures for network + optimization. + INFOR 17(1):16--34. 1979. + """ + ########################################################################### + # Problem essentials extraction and sanity check + ########################################################################### + + if len(G) == 0: + raise nx.NetworkXError("graph has no nodes") + + multigraph = G.is_multigraph() + + # extracting data essential to problem + DEAF = _DataEssentialsAndFunctions( + G, multigraph, demand=demand, capacity=capacity, weight=weight + ) + + ########################################################################### + # Quick Error Detection + ########################################################################### + + inf = float("inf") + for u, d in zip(DEAF.node_list, DEAF.node_demands): + if abs(d) == inf: + raise nx.NetworkXError(f"node {u!r} has infinite demand") + for e, w in zip(DEAF.edge_indices, DEAF.edge_weights): + if abs(w) == inf: + raise nx.NetworkXError(f"edge {e!r} has infinite weight") + if not multigraph: + edges = nx.selfloop_edges(G, data=True) + else: + edges = nx.selfloop_edges(G, data=True, keys=True) + for e in edges: + if abs(e[-1].get(weight, 0)) == inf: + raise nx.NetworkXError(f"edge {e[:-1]!r} has infinite weight") + + ########################################################################### + # Quick Infeasibility Detection + ########################################################################### + + if sum(DEAF.node_demands) != 0: + raise nx.NetworkXUnfeasible("total node demand is not zero") + for e, c in zip(DEAF.edge_indices, DEAF.edge_capacities): + if c < 0: + raise nx.NetworkXUnfeasible(f"edge {e!r} has negative capacity") + if not multigraph: + edges = nx.selfloop_edges(G, data=True) + else: + edges = nx.selfloop_edges(G, data=True, keys=True) + for e in edges: + if e[-1].get(capacity, inf) < 0: + raise nx.NetworkXUnfeasible(f"edge {e[:-1]!r} has negative capacity") + + ########################################################################### + # Initialization + ########################################################################### + + # Add a dummy node -1 and connect all existing nodes to it with infinite- + # capacity dummy edges. Node -1 will serve as the root of the + # spanning tree of the network simplex method. The new edges will used to + # trivially satisfy the node demands and create an initial strongly + # feasible spanning tree. + for i, d in enumerate(DEAF.node_demands): + # Must be greater-than here. Zero-demand nodes must have + # edges pointing towards the root to ensure strong feasibility. + if d > 0: + DEAF.edge_sources.append(-1) + DEAF.edge_targets.append(i) + else: + DEAF.edge_sources.append(i) + DEAF.edge_targets.append(-1) + faux_inf = ( + 3 + * max( + sum(c for c in DEAF.edge_capacities if c < inf), + sum(abs(w) for w in DEAF.edge_weights), + sum(abs(d) for d in DEAF.node_demands), + ) + or 1 + ) + + n = len(DEAF.node_list) # number of nodes + DEAF.edge_weights.extend(repeat(faux_inf, n)) + DEAF.edge_capacities.extend(repeat(faux_inf, n)) + + # Construct the initial spanning tree. + DEAF.initialize_spanning_tree(n, faux_inf) + + ########################################################################### + # Pivot loop + ########################################################################### + + for i, p, q in DEAF.find_entering_edges(): + Wn, We = DEAF.find_cycle(i, p, q) + j, s, t = DEAF.find_leaving_edge(Wn, We) + DEAF.augment_flow(Wn, We, DEAF.residual_capacity(j, s)) + # Do nothing more if the entering edge is the same as the leaving edge. + if i != j: + if DEAF.parent[t] != s: + # Ensure that s is the parent of t. + s, t = t, s + if We.index(i) > We.index(j): + # Ensure that q is in the subtree rooted at t. + p, q = q, p + DEAF.remove_edge(s, t) + DEAF.make_root(q) + DEAF.add_edge(i, p, q) + DEAF.update_potentials(i, p, q) + + ########################################################################### + # Infeasibility and unboundedness detection + ########################################################################### + + if any(DEAF.edge_flow[i] != 0 for i in range(-n, 0)): + raise nx.NetworkXUnfeasible("no flow satisfies all node demands") + + if any(DEAF.edge_flow[i] * 2 >= faux_inf for i in range(DEAF.edge_count)) or any( + e[-1].get(capacity, inf) == inf and e[-1].get(weight, 0) < 0 + for e in nx.selfloop_edges(G, data=True) + ): + raise nx.NetworkXUnbounded("negative cycle with infinite capacity found") + + ########################################################################### + # Flow cost calculation and flow dict construction + ########################################################################### + + del DEAF.edge_flow[DEAF.edge_count :] + flow_cost = sum(w * x for w, x in zip(DEAF.edge_weights, DEAF.edge_flow)) + flow_dict = {n: {} for n in DEAF.node_list} + + def add_entry(e): + """Add a flow dict entry.""" + d = flow_dict[e[0]] + for k in e[1:-2]: + try: + d = d[k] + except KeyError: + t = {} + d[k] = t + d = t + d[e[-2]] = e[-1] + + DEAF.edge_sources = ( + DEAF.node_list[s] for s in DEAF.edge_sources + ) # Use original nodes. + DEAF.edge_targets = ( + DEAF.node_list[t] for t in DEAF.edge_targets + ) # Use original nodes. + if not multigraph: + for e in zip(DEAF.edge_sources, DEAF.edge_targets, DEAF.edge_flow): + add_entry(e) + edges = G.edges(data=True) + else: + for e in zip( + DEAF.edge_sources, DEAF.edge_targets, DEAF.edge_keys, DEAF.edge_flow + ): + add_entry(e) + edges = G.edges(data=True, keys=True) + for e in edges: + if e[0] != e[1]: + if e[-1].get(capacity, inf) == 0: + add_entry(e[:-1] + (0,)) + else: + w = e[-1].get(weight, 0) + if w >= 0: + add_entry(e[:-1] + (0,)) + else: + c = e[-1][capacity] + flow_cost += w * c + add_entry(e[:-1] + (c,)) + + return flow_cost, flow_dict diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/preflowpush.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/preflowpush.py new file mode 100644 index 0000000..42cadc2 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/preflowpush.py @@ -0,0 +1,425 @@ +""" +Highest-label preflow-push algorithm for maximum flow problems. +""" + +from collections import deque +from itertools import islice + +import networkx as nx + +from ...utils import arbitrary_element +from .utils import ( + CurrentEdge, + GlobalRelabelThreshold, + Level, + build_residual_network, + detect_unboundedness, +) + +__all__ = ["preflow_push"] + + +def preflow_push_impl(G, s, t, capacity, residual, global_relabel_freq, value_only): + """Implementation of the highest-label preflow-push algorithm.""" + if s not in G: + raise nx.NetworkXError(f"node {str(s)} not in graph") + if t not in G: + raise nx.NetworkXError(f"node {str(t)} not in graph") + if s == t: + raise nx.NetworkXError("source and sink are the same node") + + if global_relabel_freq is None: + global_relabel_freq = 0 + if global_relabel_freq < 0: + raise nx.NetworkXError("global_relabel_freq must be nonnegative.") + + if residual is None: + R = build_residual_network(G, capacity) + else: + R = residual + + detect_unboundedness(R, s, t) + + R_nodes = R.nodes + R_pred = R.pred + R_succ = R.succ + + # Initialize/reset the residual network. + for u in R: + R_nodes[u]["excess"] = 0 + for e in R_succ[u].values(): + e["flow"] = 0 + + def reverse_bfs(src): + """Perform a reverse breadth-first search from src in the residual + network. + """ + heights = {src: 0} + q = deque([(src, 0)]) + while q: + u, height = q.popleft() + height += 1 + for v, attr in R_pred[u].items(): + if v not in heights and attr["flow"] < attr["capacity"]: + heights[v] = height + q.append((v, height)) + return heights + + # Initialize heights of the nodes. + heights = reverse_bfs(t) + + if s not in heights: + # t is not reachable from s in the residual network. The maximum flow + # must be zero. + R.graph["flow_value"] = 0 + return R + + n = len(R) + # max_height represents the height of the highest level below level n with + # at least one active node. + max_height = max(heights[u] for u in heights if u != s) + heights[s] = n + + grt = GlobalRelabelThreshold(n, R.size(), global_relabel_freq) + + # Initialize heights and 'current edge' data structures of the nodes. + for u in R: + R_nodes[u]["height"] = heights[u] if u in heights else n + 1 + R_nodes[u]["curr_edge"] = CurrentEdge(R_succ[u]) + + def push(u, v, flow): + """Push flow units of flow from u to v.""" + R_succ[u][v]["flow"] += flow + R_succ[v][u]["flow"] -= flow + R_nodes[u]["excess"] -= flow + R_nodes[v]["excess"] += flow + + # The maximum flow must be nonzero now. Initialize the preflow by + # saturating all edges emanating from s. + for u, attr in R_succ[s].items(): + flow = attr["capacity"] + if flow > 0: + push(s, u, flow) + + # Partition nodes into levels. + levels = [Level() for i in range(2 * n)] + for u in R: + if u != s and u != t: + level = levels[R_nodes[u]["height"]] + if R_nodes[u]["excess"] > 0: + level.active.add(u) + else: + level.inactive.add(u) + + def activate(v): + """Move a node from the inactive set to the active set of its level.""" + if v != s and v != t: + level = levels[R_nodes[v]["height"]] + if v in level.inactive: + level.inactive.remove(v) + level.active.add(v) + + def relabel(u): + """Relabel a node to create an admissible edge.""" + grt.add_work(len(R_succ[u])) + return ( + min( + R_nodes[v]["height"] + for v, attr in R_succ[u].items() + if attr["flow"] < attr["capacity"] + ) + + 1 + ) + + def discharge(u, is_phase1): + """Discharge a node until it becomes inactive or, during phase 1 (see + below), its height reaches at least n. The node is known to have the + largest height among active nodes. + """ + height = R_nodes[u]["height"] + curr_edge = R_nodes[u]["curr_edge"] + # next_height represents the next height to examine after discharging + # the current node. During phase 1, it is capped to below n. + next_height = height + levels[height].active.remove(u) + while True: + v, attr = curr_edge.get() + if height == R_nodes[v]["height"] + 1 and attr["flow"] < attr["capacity"]: + flow = min(R_nodes[u]["excess"], attr["capacity"] - attr["flow"]) + push(u, v, flow) + activate(v) + if R_nodes[u]["excess"] == 0: + # The node has become inactive. + levels[height].inactive.add(u) + break + try: + curr_edge.move_to_next() + except StopIteration: + # We have run off the end of the adjacency list, and there can + # be no more admissible edges. Relabel the node to create one. + height = relabel(u) + if is_phase1 and height >= n - 1: + # Although the node is still active, with a height at least + # n - 1, it is now known to be on the s side of the minimum + # s-t cut. Stop processing it until phase 2. + levels[height].active.add(u) + break + # The first relabel operation after global relabeling may not + # increase the height of the node since the 'current edge' data + # structure is not rewound. Use height instead of (height - 1) + # in case other active nodes at the same level are missed. + next_height = height + R_nodes[u]["height"] = height + return next_height + + def gap_heuristic(height): + """Apply the gap heuristic.""" + # Move all nodes at levels (height + 1) to max_height to level n + 1. + for level in islice(levels, height + 1, max_height + 1): + for u in level.active: + R_nodes[u]["height"] = n + 1 + for u in level.inactive: + R_nodes[u]["height"] = n + 1 + levels[n + 1].active.update(level.active) + level.active.clear() + levels[n + 1].inactive.update(level.inactive) + level.inactive.clear() + + def global_relabel(from_sink): + """Apply the global relabeling heuristic.""" + src = t if from_sink else s + heights = reverse_bfs(src) + if not from_sink: + # s must be reachable from t. Remove t explicitly. + del heights[t] + max_height = max(heights.values()) + if from_sink: + # Also mark nodes from which t is unreachable for relabeling. This + # serves the same purpose as the gap heuristic. + for u in R: + if u not in heights and R_nodes[u]["height"] < n: + heights[u] = n + 1 + else: + # Shift the computed heights because the height of s is n. + for u in heights: + heights[u] += n + max_height += n + del heights[src] + for u, new_height in heights.items(): + old_height = R_nodes[u]["height"] + if new_height != old_height: + if u in levels[old_height].active: + levels[old_height].active.remove(u) + levels[new_height].active.add(u) + else: + levels[old_height].inactive.remove(u) + levels[new_height].inactive.add(u) + R_nodes[u]["height"] = new_height + return max_height + + # Phase 1: Find the maximum preflow by pushing as much flow as possible to + # t. + + height = max_height + while height > 0: + # Discharge active nodes in the current level. + while True: + level = levels[height] + if not level.active: + # All active nodes in the current level have been discharged. + # Move to the next lower level. + height -= 1 + break + # Record the old height and level for the gap heuristic. + old_height = height + old_level = level + u = arbitrary_element(level.active) + height = discharge(u, True) + if grt.is_reached(): + # Global relabeling heuristic: Recompute the exact heights of + # all nodes. + height = global_relabel(True) + max_height = height + grt.clear_work() + elif not old_level.active and not old_level.inactive: + # Gap heuristic: If the level at old_height is empty (a 'gap'), + # a minimum cut has been identified. All nodes with heights + # above old_height can have their heights set to n + 1 and not + # be further processed before a maximum preflow is found. + gap_heuristic(old_height) + height = old_height - 1 + max_height = height + else: + # Update the height of the highest level with at least one + # active node. + max_height = max(max_height, height) + + # A maximum preflow has been found. The excess at t is the maximum flow + # value. + if value_only: + R.graph["flow_value"] = R_nodes[t]["excess"] + return R + + # Phase 2: Convert the maximum preflow into a maximum flow by returning the + # excess to s. + + # Relabel all nodes so that they have accurate heights. + height = global_relabel(False) + grt.clear_work() + + # Continue to discharge the active nodes. + while height > n: + # Discharge active nodes in the current level. + while True: + level = levels[height] + if not level.active: + # All active nodes in the current level have been discharged. + # Move to the next lower level. + height -= 1 + break + u = arbitrary_element(level.active) + height = discharge(u, False) + if grt.is_reached(): + # Global relabeling heuristic. + height = global_relabel(False) + grt.clear_work() + + R.graph["flow_value"] = R_nodes[t]["excess"] + return R + + +@nx._dispatchable(edge_attrs={"capacity": float("inf")}, returns_graph=True) +def preflow_push( + G, s, t, capacity="capacity", residual=None, global_relabel_freq=1, value_only=False +): + r"""Find a maximum single-commodity flow using the highest-label + preflow-push algorithm. + + This function returns the residual network resulting after computing + the maximum flow. See below for details about the conventions + NetworkX uses for defining residual networks. + + This algorithm has a running time of $O(n^2 \sqrt{m})$ for $n$ nodes and + $m$ edges. + + + Parameters + ---------- + G : NetworkX graph + Edges of the graph are expected to have an attribute called + 'capacity'. If this attribute is not present, the edge is + considered to have infinite capacity. + + s : node + Source node for the flow. + + t : node + Sink node for the flow. + + capacity : string + Edges of the graph G are expected to have an attribute capacity + that indicates how much flow the edge can support. If this + attribute is not present, the edge is considered to have + infinite capacity. Default value: 'capacity'. + + residual : NetworkX graph + Residual network on which the algorithm is to be executed. If None, a + new residual network is created. Default value: None. + + global_relabel_freq : integer, float + Relative frequency of applying the global relabeling heuristic to speed + up the algorithm. If it is None, the heuristic is disabled. Default + value: 1. + + value_only : bool + If False, compute a maximum flow; otherwise, compute a maximum preflow + which is enough for computing the maximum flow value. Default value: + False. + + Returns + ------- + R : NetworkX DiGraph + Residual network after computing the maximum flow. + + Raises + ------ + NetworkXError + The algorithm does not support MultiGraph and MultiDiGraph. If + the input graph is an instance of one of these two classes, a + NetworkXError is raised. + + NetworkXUnbounded + If the graph has a path of infinite capacity, the value of a + feasible flow on the graph is unbounded above and the function + raises a NetworkXUnbounded. + + See also + -------- + :meth:`maximum_flow` + :meth:`minimum_cut` + :meth:`edmonds_karp` + :meth:`shortest_augmenting_path` + + Notes + ----- + The residual network :samp:`R` from an input graph :samp:`G` has the + same nodes as :samp:`G`. :samp:`R` is a DiGraph that contains a pair + of edges :samp:`(u, v)` and :samp:`(v, u)` iff :samp:`(u, v)` is not a + self-loop, and at least one of :samp:`(u, v)` and :samp:`(v, u)` exists + in :samp:`G`. For each node :samp:`u` in :samp:`R`, + :samp:`R.nodes[u]['excess']` represents the difference between flow into + :samp:`u` and flow out of :samp:`u`. + + For each edge :samp:`(u, v)` in :samp:`R`, :samp:`R[u][v]['capacity']` + is equal to the capacity of :samp:`(u, v)` in :samp:`G` if it exists + in :samp:`G` or zero otherwise. If the capacity is infinite, + :samp:`R[u][v]['capacity']` will have a high arbitrary finite value + that does not affect the solution of the problem. This value is stored in + :samp:`R.graph['inf']`. For each edge :samp:`(u, v)` in :samp:`R`, + :samp:`R[u][v]['flow']` represents the flow function of :samp:`(u, v)` and + satisfies :samp:`R[u][v]['flow'] == -R[v][u]['flow']`. + + The flow value, defined as the total flow into :samp:`t`, the sink, is + stored in :samp:`R.graph['flow_value']`. Reachability to :samp:`t` using + only edges :samp:`(u, v)` such that + :samp:`R[u][v]['flow'] < R[u][v]['capacity']` induces a minimum + :samp:`s`-:samp:`t` cut. + + Examples + -------- + >>> from networkx.algorithms.flow import preflow_push + + The functions that implement flow algorithms and output a residual + network, such as this one, are not imported to the base NetworkX + namespace, so you have to explicitly import them from the flow package. + + >>> G = nx.DiGraph() + >>> G.add_edge("x", "a", capacity=3.0) + >>> G.add_edge("x", "b", capacity=1.0) + >>> G.add_edge("a", "c", capacity=3.0) + >>> G.add_edge("b", "c", capacity=5.0) + >>> G.add_edge("b", "d", capacity=4.0) + >>> G.add_edge("d", "e", capacity=2.0) + >>> G.add_edge("c", "y", capacity=2.0) + >>> G.add_edge("e", "y", capacity=3.0) + >>> R = preflow_push(G, "x", "y") + >>> flow_value = nx.maximum_flow_value(G, "x", "y") + >>> flow_value == R.graph["flow_value"] + True + >>> # preflow_push also stores the maximum flow value + >>> # in the excess attribute of the sink node t + >>> flow_value == R.nodes["y"]["excess"] + True + >>> # For some problems, you might only want to compute a + >>> # maximum preflow. + >>> R = preflow_push(G, "x", "y", value_only=True) + >>> flow_value == R.graph["flow_value"] + True + >>> flow_value == R.nodes["y"]["excess"] + True + + """ + R = preflow_push_impl(G, s, t, capacity, residual, global_relabel_freq, value_only) + R.graph["algorithm"] = "preflow_push" + nx._clear_cache(R) + return R diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/shortestaugmentingpath.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/shortestaugmentingpath.py new file mode 100644 index 0000000..9f1193f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/shortestaugmentingpath.py @@ -0,0 +1,300 @@ +""" +Shortest augmenting path algorithm for maximum flow problems. +""" + +from collections import deque + +import networkx as nx + +from .edmondskarp import edmonds_karp_core +from .utils import CurrentEdge, build_residual_network + +__all__ = ["shortest_augmenting_path"] + + +def shortest_augmenting_path_impl(G, s, t, capacity, residual, two_phase, cutoff): + """Implementation of the shortest augmenting path algorithm.""" + if s not in G: + raise nx.NetworkXError(f"node {str(s)} not in graph") + if t not in G: + raise nx.NetworkXError(f"node {str(t)} not in graph") + if s == t: + raise nx.NetworkXError("source and sink are the same node") + + if residual is None: + R = build_residual_network(G, capacity) + else: + R = residual + + R_nodes = R.nodes + R_pred = R.pred + R_succ = R.succ + + # Initialize/reset the residual network. + for u in R: + for e in R_succ[u].values(): + e["flow"] = 0 + + # Initialize heights of the nodes. + heights = {t: 0} + q = deque([(t, 0)]) + while q: + u, height = q.popleft() + height += 1 + for v, attr in R_pred[u].items(): + if v not in heights and attr["flow"] < attr["capacity"]: + heights[v] = height + q.append((v, height)) + + if s not in heights: + # t is not reachable from s in the residual network. The maximum flow + # must be zero. + R.graph["flow_value"] = 0 + return R + + n = len(G) + m = R.size() / 2 + + # Initialize heights and 'current edge' data structures of the nodes. + for u in R: + R_nodes[u]["height"] = heights[u] if u in heights else n + R_nodes[u]["curr_edge"] = CurrentEdge(R_succ[u]) + + # Initialize counts of nodes in each level. + counts = [0] * (2 * n - 1) + for u in R: + counts[R_nodes[u]["height"]] += 1 + + inf = R.graph["inf"] + + def augment(path): + """Augment flow along a path from s to t.""" + # Determine the path residual capacity. + flow = inf + it = iter(path) + u = next(it) + for v in it: + attr = R_succ[u][v] + flow = min(flow, attr["capacity"] - attr["flow"]) + u = v + if flow * 2 > inf: + raise nx.NetworkXUnbounded("Infinite capacity path, flow unbounded above.") + # Augment flow along the path. + it = iter(path) + u = next(it) + for v in it: + R_succ[u][v]["flow"] += flow + R_succ[v][u]["flow"] -= flow + u = v + return flow + + def relabel(u): + """Relabel a node to create an admissible edge.""" + height = n - 1 + for v, attr in R_succ[u].items(): + if attr["flow"] < attr["capacity"]: + height = min(height, R_nodes[v]["height"]) + return height + 1 + + if cutoff is None: + cutoff = float("inf") + + # Phase 1: Look for shortest augmenting paths using depth-first search. + + flow_value = 0 + path = [s] + u = s + d = n if not two_phase else int(min(m**0.5, 2 * n ** (2.0 / 3))) + done = R_nodes[s]["height"] >= d + while not done: + height = R_nodes[u]["height"] + curr_edge = R_nodes[u]["curr_edge"] + # Depth-first search for the next node on the path to t. + while True: + v, attr = curr_edge.get() + if height == R_nodes[v]["height"] + 1 and attr["flow"] < attr["capacity"]: + # Advance to the next node following an admissible edge. + path.append(v) + u = v + break + try: + curr_edge.move_to_next() + except StopIteration: + counts[height] -= 1 + if counts[height] == 0: + # Gap heuristic: If relabeling causes a level to become + # empty, a minimum cut has been identified. The algorithm + # can now be terminated. + R.graph["flow_value"] = flow_value + return R + height = relabel(u) + if u == s and height >= d: + if not two_phase: + # t is disconnected from s in the residual network. No + # more augmenting paths exist. + R.graph["flow_value"] = flow_value + return R + else: + # t is at least d steps away from s. End of phase 1. + done = True + break + counts[height] += 1 + R_nodes[u]["height"] = height + if u != s: + # After relabeling, the last edge on the path is no longer + # admissible. Retreat one step to look for an alternative. + path.pop() + u = path[-1] + break + if u == t: + # t is reached. Augment flow along the path and reset it for a new + # depth-first search. + flow_value += augment(path) + if flow_value >= cutoff: + R.graph["flow_value"] = flow_value + return R + path = [s] + u = s + + # Phase 2: Look for shortest augmenting paths using breadth-first search. + flow_value += edmonds_karp_core(R, s, t, cutoff - flow_value) + + R.graph["flow_value"] = flow_value + return R + + +@nx._dispatchable(edge_attrs={"capacity": float("inf")}, returns_graph=True) +def shortest_augmenting_path( + G, + s, + t, + capacity="capacity", + residual=None, + value_only=False, + two_phase=False, + cutoff=None, +): + r"""Find a maximum single-commodity flow using the shortest augmenting path + algorithm. + + This function returns the residual network resulting after computing + the maximum flow. See below for details about the conventions + NetworkX uses for defining residual networks. + + This algorithm has a running time of $O(n^2 m)$ for $n$ nodes and $m$ + edges. + + + Parameters + ---------- + G : NetworkX graph + Edges of the graph are expected to have an attribute called + 'capacity'. If this attribute is not present, the edge is + considered to have infinite capacity. + + s : node + Source node for the flow. + + t : node + Sink node for the flow. + + capacity : string + Edges of the graph G are expected to have an attribute capacity + that indicates how much flow the edge can support. If this + attribute is not present, the edge is considered to have + infinite capacity. Default value: 'capacity'. + + residual : NetworkX graph + Residual network on which the algorithm is to be executed. If None, a + new residual network is created. Default value: None. + + value_only : bool + If True compute only the value of the maximum flow. This parameter + will be ignored by this algorithm because it is not applicable. + + two_phase : bool + If True, a two-phase variant is used. The two-phase variant improves + the running time on unit-capacity networks from $O(nm)$ to + $O(\min(n^{2/3}, m^{1/2}) m)$. Default value: False. + + cutoff : integer, float + If specified, the algorithm will terminate when the flow value reaches + or exceeds the cutoff. In this case, it may be unable to immediately + determine a minimum cut. Default value: None. + + Returns + ------- + R : NetworkX DiGraph + Residual network after computing the maximum flow. + + Raises + ------ + NetworkXError + The algorithm does not support MultiGraph and MultiDiGraph. If + the input graph is an instance of one of these two classes, a + NetworkXError is raised. + + NetworkXUnbounded + If the graph has a path of infinite capacity, the value of a + feasible flow on the graph is unbounded above and the function + raises a NetworkXUnbounded. + + See also + -------- + :meth:`maximum_flow` + :meth:`minimum_cut` + :meth:`edmonds_karp` + :meth:`preflow_push` + + Notes + ----- + The residual network :samp:`R` from an input graph :samp:`G` has the + same nodes as :samp:`G`. :samp:`R` is a DiGraph that contains a pair + of edges :samp:`(u, v)` and :samp:`(v, u)` iff :samp:`(u, v)` is not a + self-loop, and at least one of :samp:`(u, v)` and :samp:`(v, u)` exists + in :samp:`G`. + + For each edge :samp:`(u, v)` in :samp:`R`, :samp:`R[u][v]['capacity']` + is equal to the capacity of :samp:`(u, v)` in :samp:`G` if it exists + in :samp:`G` or zero otherwise. If the capacity is infinite, + :samp:`R[u][v]['capacity']` will have a high arbitrary finite value + that does not affect the solution of the problem. This value is stored in + :samp:`R.graph['inf']`. For each edge :samp:`(u, v)` in :samp:`R`, + :samp:`R[u][v]['flow']` represents the flow function of :samp:`(u, v)` and + satisfies :samp:`R[u][v]['flow'] == -R[v][u]['flow']`. + + The flow value, defined as the total flow into :samp:`t`, the sink, is + stored in :samp:`R.graph['flow_value']`. If :samp:`cutoff` is not + specified, reachability to :samp:`t` using only edges :samp:`(u, v)` such + that :samp:`R[u][v]['flow'] < R[u][v]['capacity']` induces a minimum + :samp:`s`-:samp:`t` cut. + + Examples + -------- + >>> from networkx.algorithms.flow import shortest_augmenting_path + + The functions that implement flow algorithms and output a residual + network, such as this one, are not imported to the base NetworkX + namespace, so you have to explicitly import them from the flow package. + + >>> G = nx.DiGraph() + >>> G.add_edge("x", "a", capacity=3.0) + >>> G.add_edge("x", "b", capacity=1.0) + >>> G.add_edge("a", "c", capacity=3.0) + >>> G.add_edge("b", "c", capacity=5.0) + >>> G.add_edge("b", "d", capacity=4.0) + >>> G.add_edge("d", "e", capacity=2.0) + >>> G.add_edge("c", "y", capacity=2.0) + >>> G.add_edge("e", "y", capacity=3.0) + >>> R = shortest_augmenting_path(G, "x", "y") + >>> flow_value = nx.maximum_flow_value(G, "x", "y") + >>> flow_value + 3.0 + >>> flow_value == R.graph["flow_value"] + True + + """ + R = shortest_augmenting_path_impl(G, s, t, capacity, residual, two_phase, cutoff) + R.graph["algorithm"] = "shortest_augmenting_path" + nx._clear_cache(R) + return R diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/tests/__init__.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/tests/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/tests/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..054250d Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/tests/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/tests/__pycache__/test_gomory_hu.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/tests/__pycache__/test_gomory_hu.cpython-312.pyc new file mode 100644 index 0000000..d418f3c Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/tests/__pycache__/test_gomory_hu.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/tests/__pycache__/test_maxflow.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/tests/__pycache__/test_maxflow.cpython-312.pyc new file mode 100644 index 0000000..b78334a Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/tests/__pycache__/test_maxflow.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/tests/__pycache__/test_maxflow_large_graph.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/tests/__pycache__/test_maxflow_large_graph.cpython-312.pyc new file mode 100644 index 0000000..2e6f63d Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/tests/__pycache__/test_maxflow_large_graph.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/tests/__pycache__/test_mincost.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/tests/__pycache__/test_mincost.cpython-312.pyc new file mode 100644 index 0000000..a798a04 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/tests/__pycache__/test_mincost.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/tests/__pycache__/test_networksimplex.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/tests/__pycache__/test_networksimplex.cpython-312.pyc new file mode 100644 index 0000000..4f99e47 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/tests/__pycache__/test_networksimplex.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/tests/gl1.gpickle.bz2 b/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/tests/gl1.gpickle.bz2 new file mode 100644 index 0000000..e6ed574 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/tests/gl1.gpickle.bz2 differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/tests/gw1.gpickle.bz2 b/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/tests/gw1.gpickle.bz2 new file mode 100644 index 0000000..abd0e8a Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/tests/gw1.gpickle.bz2 differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/tests/netgen-2.gpickle.bz2 b/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/tests/netgen-2.gpickle.bz2 new file mode 100644 index 0000000..cd3ea80 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/tests/netgen-2.gpickle.bz2 differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/tests/test_gomory_hu.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/tests/test_gomory_hu.py new file mode 100644 index 0000000..1649ec8 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/tests/test_gomory_hu.py @@ -0,0 +1,128 @@ +from itertools import combinations + +import pytest + +import networkx as nx +from networkx.algorithms.flow import ( + boykov_kolmogorov, + dinitz, + edmonds_karp, + preflow_push, + shortest_augmenting_path, +) + +flow_funcs = [ + boykov_kolmogorov, + dinitz, + edmonds_karp, + preflow_push, + shortest_augmenting_path, +] + + +class TestGomoryHuTree: + def minimum_edge_weight(self, T, u, v): + path = nx.shortest_path(T, u, v, weight="weight") + return min((T[u][v]["weight"], (u, v)) for (u, v) in zip(path, path[1:])) + + def compute_cutset(self, G, T_orig, edge): + T = T_orig.copy() + T.remove_edge(*edge) + U, V = list(nx.connected_components(T)) + cutset = set() + for x, nbrs in ((n, G[n]) for n in U): + cutset.update((x, y) for y in nbrs if y in V) + return cutset + + def test_default_flow_function_karate_club_graph(self): + G = nx.karate_club_graph() + nx.set_edge_attributes(G, 1, "capacity") + T = nx.gomory_hu_tree(G) + assert nx.is_tree(T) + for u, v in combinations(G, 2): + cut_value, edge = self.minimum_edge_weight(T, u, v) + assert nx.minimum_cut_value(G, u, v) == cut_value + + def test_karate_club_graph(self): + G = nx.karate_club_graph() + nx.set_edge_attributes(G, 1, "capacity") + for flow_func in flow_funcs: + T = nx.gomory_hu_tree(G, flow_func=flow_func) + assert nx.is_tree(T) + for u, v in combinations(G, 2): + cut_value, edge = self.minimum_edge_weight(T, u, v) + assert nx.minimum_cut_value(G, u, v) == cut_value + + def test_davis_southern_women_graph(self): + G = nx.davis_southern_women_graph() + nx.set_edge_attributes(G, 1, "capacity") + for flow_func in flow_funcs: + T = nx.gomory_hu_tree(G, flow_func=flow_func) + assert nx.is_tree(T) + for u, v in combinations(G, 2): + cut_value, edge = self.minimum_edge_weight(T, u, v) + assert nx.minimum_cut_value(G, u, v) == cut_value + + def test_florentine_families_graph(self): + G = nx.florentine_families_graph() + nx.set_edge_attributes(G, 1, "capacity") + for flow_func in flow_funcs: + T = nx.gomory_hu_tree(G, flow_func=flow_func) + assert nx.is_tree(T) + for u, v in combinations(G, 2): + cut_value, edge = self.minimum_edge_weight(T, u, v) + assert nx.minimum_cut_value(G, u, v) == cut_value + + @pytest.mark.slow + def test_les_miserables_graph_cutset(self): + G = nx.les_miserables_graph() + nx.set_edge_attributes(G, 1, "capacity") + for flow_func in flow_funcs: + T = nx.gomory_hu_tree(G, flow_func=flow_func) + assert nx.is_tree(T) + for u, v in combinations(G, 2): + cut_value, edge = self.minimum_edge_weight(T, u, v) + assert nx.minimum_cut_value(G, u, v) == cut_value + + def test_karate_club_graph_cutset(self): + G = nx.karate_club_graph() + nx.set_edge_attributes(G, 1, "capacity") + T = nx.gomory_hu_tree(G) + assert nx.is_tree(T) + u, v = 0, 33 + cut_value, edge = self.minimum_edge_weight(T, u, v) + cutset = self.compute_cutset(G, T, edge) + assert cut_value == len(cutset) + + def test_wikipedia_example(self): + # Example from https://en.wikipedia.org/wiki/Gomory%E2%80%93Hu_tree + G = nx.Graph() + G.add_weighted_edges_from( + ( + (0, 1, 1), + (0, 2, 7), + (1, 2, 1), + (1, 3, 3), + (1, 4, 2), + (2, 4, 4), + (3, 4, 1), + (3, 5, 6), + (4, 5, 2), + ) + ) + for flow_func in flow_funcs: + T = nx.gomory_hu_tree(G, capacity="weight", flow_func=flow_func) + assert nx.is_tree(T) + for u, v in combinations(G, 2): + cut_value, edge = self.minimum_edge_weight(T, u, v) + assert nx.minimum_cut_value(G, u, v, capacity="weight") == cut_value + + def test_directed_raises(self): + with pytest.raises(nx.NetworkXNotImplemented): + G = nx.DiGraph() + T = nx.gomory_hu_tree(G) + + def test_empty_raises(self): + with pytest.raises(nx.NetworkXError): + G = nx.empty_graph() + T = nx.gomory_hu_tree(G) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/tests/test_maxflow.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/tests/test_maxflow.py new file mode 100644 index 0000000..71381ff --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/tests/test_maxflow.py @@ -0,0 +1,573 @@ +"""Maximum flow algorithms test suite.""" + +import pytest + +import networkx as nx +from networkx.algorithms.flow import ( + boykov_kolmogorov, + build_flow_dict, + build_residual_network, + dinitz, + edmonds_karp, + preflow_push, + shortest_augmenting_path, +) + +flow_funcs = { + boykov_kolmogorov, + dinitz, + edmonds_karp, + preflow_push, + shortest_augmenting_path, +} + +max_min_funcs = {nx.maximum_flow, nx.minimum_cut} +flow_value_funcs = {nx.maximum_flow_value, nx.minimum_cut_value} +interface_funcs = max_min_funcs | flow_value_funcs +all_funcs = flow_funcs | interface_funcs + + +def compute_cutset(G, partition): + reachable, non_reachable = partition + cutset = set() + for u, nbrs in ((n, G[n]) for n in reachable): + cutset.update((u, v) for v in nbrs if v in non_reachable) + return cutset + + +def validate_flows(G, s, t, flowDict, solnValue, capacity, flow_func): + errmsg = f"Assertion failed in function: {flow_func.__name__}" + assert set(G) == set(flowDict), errmsg + for u in G: + assert set(G[u]) == set(flowDict[u]), errmsg + excess = dict.fromkeys(flowDict, 0) + for u in flowDict: + for v, flow in flowDict[u].items(): + if capacity in G[u][v]: + assert flow <= G[u][v][capacity] + assert flow >= 0, errmsg + excess[u] -= flow + excess[v] += flow + for u, exc in excess.items(): + if u == s: + assert exc == -solnValue, errmsg + elif u == t: + assert exc == solnValue, errmsg + else: + assert exc == 0, errmsg + + +def validate_cuts(G, s, t, solnValue, partition, capacity, flow_func): + errmsg = f"Assertion failed in function: {flow_func.__name__}" + assert all(n in G for n in partition[0]), errmsg + assert all(n in G for n in partition[1]), errmsg + cutset = compute_cutset(G, partition) + assert all(G.has_edge(u, v) for (u, v) in cutset), errmsg + assert solnValue == sum(G[u][v][capacity] for (u, v) in cutset), errmsg + H = G.copy() + H.remove_edges_from(cutset) + if not G.is_directed(): + assert not nx.is_connected(H), errmsg + else: + assert not nx.is_strongly_connected(H), errmsg + + +def compare_flows_and_cuts(G, s, t, solnValue, capacity="capacity"): + for flow_func in flow_funcs: + errmsg = f"Assertion failed in function: {flow_func.__name__}" + R = flow_func(G, s, t, capacity) + # Test both legacy and new implementations. + flow_value = R.graph["flow_value"] + flow_dict = build_flow_dict(G, R) + assert flow_value == solnValue, errmsg + validate_flows(G, s, t, flow_dict, solnValue, capacity, flow_func) + # Minimum cut + cut_value, partition = nx.minimum_cut( + G, s, t, capacity=capacity, flow_func=flow_func + ) + validate_cuts(G, s, t, solnValue, partition, capacity, flow_func) + + +class TestMaxflowMinCutCommon: + def test_graph1(self): + # Trivial undirected graph + G = nx.Graph() + G.add_edge(1, 2, capacity=1.0) + + # solution flows + # {1: {2: 1.0}, 2: {1: 1.0}} + + compare_flows_and_cuts(G, 1, 2, 1.0) + + def test_graph2(self): + # A more complex undirected graph + # adapted from https://web.archive.org/web/20220815055650/https://www.topcoder.com/thrive/articles/Maximum%20Flow:%20Part%20One + G = nx.Graph() + G.add_edge("x", "a", capacity=3.0) + G.add_edge("x", "b", capacity=1.0) + G.add_edge("a", "c", capacity=3.0) + G.add_edge("b", "c", capacity=5.0) + G.add_edge("b", "d", capacity=4.0) + G.add_edge("d", "e", capacity=2.0) + G.add_edge("c", "y", capacity=2.0) + G.add_edge("e", "y", capacity=3.0) + + # H + # { + # "x": {"a": 3, "b": 1}, + # "a": {"c": 3, "x": 3}, + # "b": {"c": 1, "d": 2, "x": 1}, + # "c": {"a": 3, "b": 1, "y": 2}, + # "d": {"b": 2, "e": 2}, + # "e": {"d": 2, "y": 2}, + # "y": {"c": 2, "e": 2}, + # } + + compare_flows_and_cuts(G, "x", "y", 4.0) + + def test_digraph1(self): + # The classic directed graph example + G = nx.DiGraph() + G.add_edge("a", "b", capacity=1000.0) + G.add_edge("a", "c", capacity=1000.0) + G.add_edge("b", "c", capacity=1.0) + G.add_edge("b", "d", capacity=1000.0) + G.add_edge("c", "d", capacity=1000.0) + + # H + # { + # "a": {"b": 1000.0, "c": 1000.0}, + # "b": {"c": 0, "d": 1000.0}, + # "c": {"d": 1000.0}, + # "d": {}, + # } + + compare_flows_and_cuts(G, "a", "d", 2000.0) + + def test_digraph2(self): + # An example in which some edges end up with zero flow. + G = nx.DiGraph() + G.add_edge("s", "b", capacity=2) + G.add_edge("s", "c", capacity=1) + G.add_edge("c", "d", capacity=1) + G.add_edge("d", "a", capacity=1) + G.add_edge("b", "a", capacity=2) + G.add_edge("a", "t", capacity=2) + + # H + # { + # "s": {"b": 2, "c": 0}, + # "c": {"d": 0}, + # "d": {"a": 0}, + # "b": {"a": 2}, + # "a": {"t": 2}, + # "t": {}, + # } + + compare_flows_and_cuts(G, "s", "t", 2) + + def test_digraph3(self): + # A directed graph example from Cormen et al. + G = nx.DiGraph() + G.add_edge("s", "v1", capacity=16.0) + G.add_edge("s", "v2", capacity=13.0) + G.add_edge("v1", "v2", capacity=10.0) + G.add_edge("v2", "v1", capacity=4.0) + G.add_edge("v1", "v3", capacity=12.0) + G.add_edge("v3", "v2", capacity=9.0) + G.add_edge("v2", "v4", capacity=14.0) + G.add_edge("v4", "v3", capacity=7.0) + G.add_edge("v3", "t", capacity=20.0) + G.add_edge("v4", "t", capacity=4.0) + + # H + # { + # "s": {"v1": 12.0, "v2": 11.0}, + # "v2": {"v1": 0, "v4": 11.0}, + # "v1": {"v2": 0, "v3": 12.0}, + # "v3": {"v2": 0, "t": 19.0}, + # "v4": {"v3": 7.0, "t": 4.0}, + # "t": {}, + # } + + compare_flows_and_cuts(G, "s", "t", 23.0) + + def test_digraph4(self): + # A more complex directed graph + # from https://web.archive.org/web/20220815055650/https://www.topcoder.com/thrive/articles/Maximum%20Flow:%20Part%20One + G = nx.DiGraph() + G.add_edge("x", "a", capacity=3.0) + G.add_edge("x", "b", capacity=1.0) + G.add_edge("a", "c", capacity=3.0) + G.add_edge("b", "c", capacity=5.0) + G.add_edge("b", "d", capacity=4.0) + G.add_edge("d", "e", capacity=2.0) + G.add_edge("c", "y", capacity=2.0) + G.add_edge("e", "y", capacity=3.0) + + # H + # { + # "x": {"a": 2.0, "b": 1.0}, + # "a": {"c": 2.0}, + # "b": {"c": 0, "d": 1.0}, + # "c": {"y": 2.0}, + # "d": {"e": 1.0}, + # "e": {"y": 1.0}, + # "y": {}, + # } + + compare_flows_and_cuts(G, "x", "y", 3.0) + + def test_wikipedia_dinitz_example(self): + # Nice example from https://en.wikipedia.org/wiki/Dinic's_algorithm + G = nx.DiGraph() + G.add_edge("s", 1, capacity=10) + G.add_edge("s", 2, capacity=10) + G.add_edge(1, 3, capacity=4) + G.add_edge(1, 4, capacity=8) + G.add_edge(1, 2, capacity=2) + G.add_edge(2, 4, capacity=9) + G.add_edge(3, "t", capacity=10) + G.add_edge(4, 3, capacity=6) + G.add_edge(4, "t", capacity=10) + + # solution flows + # { + # 1: {2: 0, 3: 4, 4: 6}, + # 2: {4: 9}, + # 3: {"t": 9}, + # 4: {3: 5, "t": 10}, + # "s": {1: 10, 2: 9}, + # "t": {}, + # } + + compare_flows_and_cuts(G, "s", "t", 19) + + def test_optional_capacity(self): + # Test optional capacity parameter. + G = nx.DiGraph() + G.add_edge("x", "a", spam=3.0) + G.add_edge("x", "b", spam=1.0) + G.add_edge("a", "c", spam=3.0) + G.add_edge("b", "c", spam=5.0) + G.add_edge("b", "d", spam=4.0) + G.add_edge("d", "e", spam=2.0) + G.add_edge("c", "y", spam=2.0) + G.add_edge("e", "y", spam=3.0) + + # solution flows + # { + # "x": {"a": 2.0, "b": 1.0}, + # "a": {"c": 2.0}, + # "b": {"c": 0, "d": 1.0}, + # "c": {"y": 2.0}, + # "d": {"e": 1.0}, + # "e": {"y": 1.0}, + # "y": {}, + # } + solnValue = 3.0 + s = "x" + t = "y" + + compare_flows_and_cuts(G, s, t, solnValue, capacity="spam") + + def test_digraph_infcap_edges(self): + # DiGraph with infinite capacity edges + G = nx.DiGraph() + G.add_edge("s", "a") + G.add_edge("s", "b", capacity=30) + G.add_edge("a", "c", capacity=25) + G.add_edge("b", "c", capacity=12) + G.add_edge("a", "t", capacity=60) + G.add_edge("c", "t") + + # H + # { + # "s": {"a": 85, "b": 12}, + # "a": {"c": 25, "t": 60}, + # "b": {"c": 12}, + # "c": {"t": 37}, + # "t": {}, + # } + + compare_flows_and_cuts(G, "s", "t", 97) + + # DiGraph with infinite capacity digon + G = nx.DiGraph() + G.add_edge("s", "a", capacity=85) + G.add_edge("s", "b", capacity=30) + G.add_edge("a", "c") + G.add_edge("c", "a") + G.add_edge("b", "c", capacity=12) + G.add_edge("a", "t", capacity=60) + G.add_edge("c", "t", capacity=37) + + # H + # { + # "s": {"a": 85, "b": 12}, + # "a": {"c": 25, "t": 60}, + # "c": {"a": 0, "t": 37}, + # "b": {"c": 12}, + # "t": {}, + # } + + compare_flows_and_cuts(G, "s", "t", 97) + + def test_digraph_infcap_path(self): + # Graph with infinite capacity (s, t)-path + G = nx.DiGraph() + G.add_edge("s", "a") + G.add_edge("s", "b", capacity=30) + G.add_edge("a", "c") + G.add_edge("b", "c", capacity=12) + G.add_edge("a", "t", capacity=60) + G.add_edge("c", "t") + + for flow_func in all_funcs: + pytest.raises(nx.NetworkXUnbounded, flow_func, G, "s", "t") + + def test_graph_infcap_edges(self): + # Undirected graph with infinite capacity edges + G = nx.Graph() + G.add_edge("s", "a") + G.add_edge("s", "b", capacity=30) + G.add_edge("a", "c", capacity=25) + G.add_edge("b", "c", capacity=12) + G.add_edge("a", "t", capacity=60) + G.add_edge("c", "t") + + # H + # { + # "s": {"a": 85, "b": 12}, + # "a": {"c": 25, "s": 85, "t": 60}, + # "b": {"c": 12, "s": 12}, + # "c": {"a": 25, "b": 12, "t": 37}, + # "t": {"a": 60, "c": 37}, + # } + + compare_flows_and_cuts(G, "s", "t", 97) + + def test_digraph5(self): + # From ticket #429 by mfrasca. + G = nx.DiGraph() + G.add_edge("s", "a", capacity=2) + G.add_edge("s", "b", capacity=2) + G.add_edge("a", "b", capacity=5) + G.add_edge("a", "t", capacity=1) + G.add_edge("b", "a", capacity=1) + G.add_edge("b", "t", capacity=3) + # flow solution + # { + # "a": {"b": 1, "t": 1}, + # "b": {"a": 0, "t": 3}, + # "s": {"a": 2, "b": 2}, + # "t": {}, + # } + compare_flows_and_cuts(G, "s", "t", 4) + + def test_disconnected(self): + G = nx.Graph() + G.add_weighted_edges_from([(0, 1, 1), (1, 2, 1), (2, 3, 1)], weight="capacity") + G.remove_node(1) + assert nx.maximum_flow_value(G, 0, 3) == 0 + # flow solution + # {0: {}, 2: {3: 0}, 3: {2: 0}} + compare_flows_and_cuts(G, 0, 3, 0) + + def test_source_target_not_in_graph(self): + G = nx.Graph() + G.add_weighted_edges_from([(0, 1, 1), (1, 2, 1), (2, 3, 1)], weight="capacity") + G.remove_node(0) + for flow_func in all_funcs: + pytest.raises(nx.NetworkXError, flow_func, G, 0, 3) + G.add_weighted_edges_from([(0, 1, 1), (1, 2, 1), (2, 3, 1)], weight="capacity") + G.remove_node(3) + for flow_func in all_funcs: + pytest.raises(nx.NetworkXError, flow_func, G, 0, 3) + + def test_source_target_coincide(self): + G = nx.Graph() + G.add_node(0) + for flow_func in all_funcs: + pytest.raises(nx.NetworkXError, flow_func, G, 0, 0) + + def test_multigraphs_raise(self): + G = nx.MultiGraph() + M = nx.MultiDiGraph() + G.add_edges_from([(0, 1), (1, 0)], capacity=True) + for flow_func in all_funcs: + pytest.raises(nx.NetworkXError, flow_func, G, 0, 0) + + +class TestMaxFlowMinCutInterface: + def setup_method(self): + G = nx.DiGraph() + G.add_edge("x", "a", capacity=3.0) + G.add_edge("x", "b", capacity=1.0) + G.add_edge("a", "c", capacity=3.0) + G.add_edge("b", "c", capacity=5.0) + G.add_edge("b", "d", capacity=4.0) + G.add_edge("d", "e", capacity=2.0) + G.add_edge("c", "y", capacity=2.0) + G.add_edge("e", "y", capacity=3.0) + self.G = G + H = nx.DiGraph() + H.add_edge(0, 1, capacity=1.0) + H.add_edge(1, 2, capacity=1.0) + self.H = H + + def test_flow_func_not_callable(self): + elements = ["this_should_be_callable", 10, {1, 2, 3}] + G = nx.Graph() + G.add_weighted_edges_from([(0, 1, 1), (1, 2, 1), (2, 3, 1)], weight="capacity") + for flow_func in interface_funcs: + for element in elements: + pytest.raises(nx.NetworkXError, flow_func, G, 0, 1, flow_func=element) + pytest.raises(nx.NetworkXError, flow_func, G, 0, 1, flow_func=element) + + def test_flow_func_parameters(self): + G = self.G + fv = 3.0 + for interface_func in interface_funcs: + for flow_func in flow_funcs: + errmsg = ( + f"Assertion failed in function: {flow_func.__name__} " + f"in interface {interface_func.__name__}" + ) + result = interface_func(G, "x", "y", flow_func=flow_func) + if interface_func in max_min_funcs: + result = result[0] + assert fv == result, errmsg + + def test_minimum_cut_no_cutoff(self): + G = self.G + pytest.raises( + nx.NetworkXError, + nx.minimum_cut, + G, + "x", + "y", + flow_func=preflow_push, + cutoff=1.0, + ) + pytest.raises( + nx.NetworkXError, + nx.minimum_cut_value, + G, + "x", + "y", + flow_func=preflow_push, + cutoff=1.0, + ) + + def test_kwargs(self): + G = self.H + fv = 1.0 + to_test = ( + (shortest_augmenting_path, {"two_phase": True}), + (preflow_push, {"global_relabel_freq": 5}), + ) + for interface_func in interface_funcs: + for flow_func, kwargs in to_test: + errmsg = ( + f"Assertion failed in function: {flow_func.__name__} " + f"in interface {interface_func.__name__}" + ) + result = interface_func(G, 0, 2, flow_func=flow_func, **kwargs) + if interface_func in max_min_funcs: + result = result[0] + assert fv == result, errmsg + + def test_kwargs_default_flow_func(self): + G = self.H + for interface_func in interface_funcs: + pytest.raises( + nx.NetworkXError, interface_func, G, 0, 1, global_relabel_freq=2 + ) + + def test_reusing_residual(self): + G = self.G + fv = 3.0 + s, t = "x", "y" + R = build_residual_network(G, "capacity") + for interface_func in interface_funcs: + for flow_func in flow_funcs: + errmsg = ( + f"Assertion failed in function: {flow_func.__name__} " + f"in interface {interface_func.__name__}" + ) + for i in range(3): + result = interface_func( + G, "x", "y", flow_func=flow_func, residual=R + ) + if interface_func in max_min_funcs: + result = result[0] + assert fv == result, errmsg + + +# Tests specific to one algorithm +def test_preflow_push_global_relabel_freq(): + G = nx.DiGraph() + G.add_edge(1, 2, capacity=1) + R = preflow_push(G, 1, 2, global_relabel_freq=None) + assert R.graph["flow_value"] == 1 + pytest.raises(nx.NetworkXError, preflow_push, G, 1, 2, global_relabel_freq=-1) + + +def test_preflow_push_makes_enough_space(): + # From ticket #1542 + G = nx.DiGraph() + nx.add_path(G, [0, 1, 3], capacity=1) + nx.add_path(G, [1, 2, 3], capacity=1) + R = preflow_push(G, 0, 3, value_only=False) + assert R.graph["flow_value"] == 1 + + +def test_shortest_augmenting_path_two_phase(): + k = 5 + p = 1000 + G = nx.DiGraph() + for i in range(k): + G.add_edge("s", (i, 0), capacity=1) + nx.add_path(G, ((i, j) for j in range(p)), capacity=1) + G.add_edge((i, p - 1), "t", capacity=1) + R = shortest_augmenting_path(G, "s", "t", two_phase=True) + assert R.graph["flow_value"] == k + R = shortest_augmenting_path(G, "s", "t", two_phase=False) + assert R.graph["flow_value"] == k + + +class TestCutoff: + def test_cutoff(self): + k = 5 + p = 1000 + G = nx.DiGraph() + for i in range(k): + G.add_edge("s", (i, 0), capacity=2) + nx.add_path(G, ((i, j) for j in range(p)), capacity=2) + G.add_edge((i, p - 1), "t", capacity=2) + R = shortest_augmenting_path(G, "s", "t", two_phase=True, cutoff=k) + assert k <= R.graph["flow_value"] <= (2 * k) + R = shortest_augmenting_path(G, "s", "t", two_phase=False, cutoff=k) + assert k <= R.graph["flow_value"] <= (2 * k) + R = edmonds_karp(G, "s", "t", cutoff=k) + assert k <= R.graph["flow_value"] <= (2 * k) + R = dinitz(G, "s", "t", cutoff=k) + assert k <= R.graph["flow_value"] <= (2 * k) + R = boykov_kolmogorov(G, "s", "t", cutoff=k) + assert k <= R.graph["flow_value"] <= (2 * k) + + def test_complete_graph_cutoff(self): + G = nx.complete_graph(5) + nx.set_edge_attributes(G, dict.fromkeys(G.edges(), 1), "capacity") + for flow_func in [ + shortest_augmenting_path, + edmonds_karp, + dinitz, + boykov_kolmogorov, + ]: + for cutoff in [3, 2, 1]: + result = nx.maximum_flow_value( + G, 0, 4, flow_func=flow_func, cutoff=cutoff + ) + assert cutoff == result, f"cutoff error in {flow_func.__name__}" diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/tests/test_maxflow_large_graph.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/tests/test_maxflow_large_graph.py new file mode 100644 index 0000000..fdc4e3d --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/tests/test_maxflow_large_graph.py @@ -0,0 +1,155 @@ +"""Maximum flow algorithms test suite on large graphs.""" + +import bz2 +import importlib.resources +import pickle + +import pytest + +import networkx as nx +from networkx.algorithms.flow import ( + boykov_kolmogorov, + build_flow_dict, + build_residual_network, + dinitz, + edmonds_karp, + preflow_push, + shortest_augmenting_path, +) + +flow_funcs = [ + boykov_kolmogorov, + dinitz, + edmonds_karp, + preflow_push, + shortest_augmenting_path, +] + + +def gen_pyramid(N): + # This graph admits a flow of value 1 for which every arc is at + # capacity (except the arcs incident to the sink which have + # infinite capacity). + G = nx.DiGraph() + + for i in range(N - 1): + cap = 1.0 / (i + 2) + for j in range(i + 1): + G.add_edge((i, j), (i + 1, j), capacity=cap) + cap = 1.0 / (i + 1) - cap + G.add_edge((i, j), (i + 1, j + 1), capacity=cap) + cap = 1.0 / (i + 2) - cap + + for j in range(N): + G.add_edge((N - 1, j), "t") + + return G + + +def read_graph(name): + fname = ( + importlib.resources.files("networkx.algorithms.flow.tests") + / f"{name}.gpickle.bz2" + ) + + with bz2.BZ2File(fname, "rb") as f: + G = pickle.load(f) + return G + + +def validate_flows(G, s, t, soln_value, R, flow_func): + flow_value = R.graph["flow_value"] + flow_dict = build_flow_dict(G, R) + errmsg = f"Assertion failed in function: {flow_func.__name__}" + assert soln_value == flow_value, errmsg + assert set(G) == set(flow_dict), errmsg + for u in G: + assert set(G[u]) == set(flow_dict[u]), errmsg + excess = dict.fromkeys(flow_dict, 0) + for u in flow_dict: + for v, flow in flow_dict[u].items(): + assert flow <= G[u][v].get("capacity", float("inf")), errmsg + assert flow >= 0, errmsg + excess[u] -= flow + excess[v] += flow + for u, exc in excess.items(): + if u == s: + assert exc == -soln_value, errmsg + elif u == t: + assert exc == soln_value, errmsg + else: + assert exc == 0, errmsg + + +class TestMaxflowLargeGraph: + def test_complete_graph(self): + N = 50 + G = nx.complete_graph(N) + nx.set_edge_attributes(G, 5, "capacity") + R = build_residual_network(G, "capacity") + kwargs = {"residual": R} + + for flow_func in flow_funcs: + kwargs["flow_func"] = flow_func + errmsg = f"Assertion failed in function: {flow_func.__name__}" + flow_value = nx.maximum_flow_value(G, 1, 2, **kwargs) + assert flow_value == 5 * (N - 1), errmsg + + def test_pyramid(self): + N = 10 + # N = 100 # this gives a graph with 5051 nodes + G = gen_pyramid(N) + R = build_residual_network(G, "capacity") + kwargs = {"residual": R} + + for flow_func in flow_funcs: + kwargs["flow_func"] = flow_func + errmsg = f"Assertion failed in function: {flow_func.__name__}" + flow_value = nx.maximum_flow_value(G, (0, 0), "t", **kwargs) + assert flow_value == pytest.approx(1.0, abs=1e-7) + + def test_gl1(self): + G = read_graph("gl1") + s = 1 + t = len(G) + R = build_residual_network(G, "capacity") + kwargs = {"residual": R} + + # do one flow_func to save time + flow_func = flow_funcs[0] + validate_flows(G, s, t, 156545, flow_func(G, s, t, **kwargs), flow_func) + + # for flow_func in flow_funcs: + # validate_flows(G, s, t, 156545, flow_func(G, s, t, **kwargs), + # flow_func) + + @pytest.mark.slow + def test_gw1(self): + G = read_graph("gw1") + s = 1 + t = len(G) + R = build_residual_network(G, "capacity") + kwargs = {"residual": R} + + for flow_func in flow_funcs: + validate_flows(G, s, t, 1202018, flow_func(G, s, t, **kwargs), flow_func) + + def test_wlm3(self): + G = read_graph("wlm3") + s = 1 + t = len(G) + R = build_residual_network(G, "capacity") + kwargs = {"residual": R} + + # do one flow_func to save time + flow_func = flow_funcs[0] + validate_flows(G, s, t, 11875108, flow_func(G, s, t, **kwargs), flow_func) + + # for flow_func in flow_funcs: + # validate_flows(G, s, t, 11875108, flow_func(G, s, t, **kwargs), + # flow_func) + + def test_preflow_push_global_relabel(self): + G = read_graph("gw1") + R = preflow_push(G, 1, len(G), global_relabel_freq=50) + assert R.graph["flow_value"] == 1202018 diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/tests/test_mincost.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/tests/test_mincost.py new file mode 100644 index 0000000..edc6262 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/tests/test_mincost.py @@ -0,0 +1,475 @@ +import bz2 +import importlib.resources +import pickle + +import pytest + +import networkx as nx + + +class TestMinCostFlow: + def test_simple_digraph(self): + G = nx.DiGraph() + G.add_node("a", demand=-5) + G.add_node("d", demand=5) + G.add_edge("a", "b", weight=3, capacity=4) + G.add_edge("a", "c", weight=6, capacity=10) + G.add_edge("b", "d", weight=1, capacity=9) + G.add_edge("c", "d", weight=2, capacity=5) + flowCost, H = nx.network_simplex(G) + soln = {"a": {"b": 4, "c": 1}, "b": {"d": 4}, "c": {"d": 1}, "d": {}} + assert flowCost == 24 + assert nx.min_cost_flow_cost(G) == 24 + assert H == soln + assert nx.min_cost_flow(G) == soln + assert nx.cost_of_flow(G, H) == 24 + + flowCost, H = nx.capacity_scaling(G) + assert flowCost == 24 + assert nx.cost_of_flow(G, H) == 24 + assert H == soln + + def test_negcycle_infcap(self): + G = nx.DiGraph() + G.add_node("s", demand=-5) + G.add_node("t", demand=5) + G.add_edge("s", "a", weight=1, capacity=3) + G.add_edge("a", "b", weight=3) + G.add_edge("c", "a", weight=-6) + G.add_edge("b", "d", weight=1) + G.add_edge("d", "c", weight=-2) + G.add_edge("d", "t", weight=1, capacity=3) + pytest.raises(nx.NetworkXUnfeasible, nx.network_simplex, G) + pytest.raises(nx.NetworkXUnbounded, nx.capacity_scaling, G) + + def test_sum_demands_not_zero(self): + G = nx.DiGraph() + G.add_node("s", demand=-5) + G.add_node("t", demand=4) + G.add_edge("s", "a", weight=1, capacity=3) + G.add_edge("a", "b", weight=3) + G.add_edge("a", "c", weight=-6) + G.add_edge("b", "d", weight=1) + G.add_edge("c", "d", weight=-2) + G.add_edge("d", "t", weight=1, capacity=3) + pytest.raises(nx.NetworkXUnfeasible, nx.network_simplex, G) + pytest.raises(nx.NetworkXUnfeasible, nx.capacity_scaling, G) + + def test_no_flow_satisfying_demands(self): + G = nx.DiGraph() + G.add_node("s", demand=-5) + G.add_node("t", demand=5) + G.add_edge("s", "a", weight=1, capacity=3) + G.add_edge("a", "b", weight=3) + G.add_edge("a", "c", weight=-6) + G.add_edge("b", "d", weight=1) + G.add_edge("c", "d", weight=-2) + G.add_edge("d", "t", weight=1, capacity=3) + pytest.raises(nx.NetworkXUnfeasible, nx.network_simplex, G) + pytest.raises(nx.NetworkXUnfeasible, nx.capacity_scaling, G) + + def test_transshipment(self): + G = nx.DiGraph() + G.add_node("a", demand=1) + G.add_node("b", demand=-2) + G.add_node("c", demand=-2) + G.add_node("d", demand=3) + G.add_node("e", demand=-4) + G.add_node("f", demand=-4) + G.add_node("g", demand=3) + G.add_node("h", demand=2) + G.add_node("r", demand=3) + G.add_edge("a", "c", weight=3) + G.add_edge("r", "a", weight=2) + G.add_edge("b", "a", weight=9) + G.add_edge("r", "c", weight=0) + G.add_edge("b", "r", weight=-6) + G.add_edge("c", "d", weight=5) + G.add_edge("e", "r", weight=4) + G.add_edge("e", "f", weight=3) + G.add_edge("h", "b", weight=4) + G.add_edge("f", "d", weight=7) + G.add_edge("f", "h", weight=12) + G.add_edge("g", "d", weight=12) + G.add_edge("f", "g", weight=-1) + G.add_edge("h", "g", weight=-10) + flowCost, H = nx.network_simplex(G) + soln = { + "a": {"c": 0}, + "b": {"a": 0, "r": 2}, + "c": {"d": 3}, + "d": {}, + "e": {"r": 3, "f": 1}, + "f": {"d": 0, "g": 3, "h": 2}, + "g": {"d": 0}, + "h": {"b": 0, "g": 0}, + "r": {"a": 1, "c": 1}, + } + assert flowCost == 41 + assert nx.min_cost_flow_cost(G) == 41 + assert H == soln + assert nx.min_cost_flow(G) == soln + assert nx.cost_of_flow(G, H) == 41 + + flowCost, H = nx.capacity_scaling(G) + assert flowCost == 41 + assert nx.cost_of_flow(G, H) == 41 + assert H == soln + + def test_max_flow_min_cost(self): + G = nx.DiGraph() + G.add_edge("s", "a", bandwidth=6) + G.add_edge("s", "c", bandwidth=10, cost=10) + G.add_edge("a", "b", cost=6) + G.add_edge("b", "d", bandwidth=8, cost=7) + G.add_edge("c", "d", cost=10) + G.add_edge("d", "t", bandwidth=5, cost=5) + soln = { + "s": {"a": 5, "c": 0}, + "a": {"b": 5}, + "b": {"d": 5}, + "c": {"d": 0}, + "d": {"t": 5}, + "t": {}, + } + flow = nx.max_flow_min_cost(G, "s", "t", capacity="bandwidth", weight="cost") + assert flow == soln + assert nx.cost_of_flow(G, flow, weight="cost") == 90 + + G.add_edge("t", "s", cost=-100) + flowCost, flow = nx.capacity_scaling(G, capacity="bandwidth", weight="cost") + G.remove_edge("t", "s") + assert flowCost == -410 + assert flow["t"]["s"] == 5 + del flow["t"]["s"] + assert flow == soln + assert nx.cost_of_flow(G, flow, weight="cost") == 90 + + def test_digraph1(self): + # From Bradley, S. P., Hax, A. C. and Magnanti, T. L. Applied + # Mathematical Programming. Addison-Wesley, 1977. + G = nx.DiGraph() + G.add_node(1, demand=-20) + G.add_node(4, demand=5) + G.add_node(5, demand=15) + G.add_edges_from( + [ + (1, 2, {"capacity": 15, "weight": 4}), + (1, 3, {"capacity": 8, "weight": 4}), + (2, 3, {"weight": 2}), + (2, 4, {"capacity": 4, "weight": 2}), + (2, 5, {"capacity": 10, "weight": 6}), + (3, 4, {"capacity": 15, "weight": 1}), + (3, 5, {"capacity": 5, "weight": 3}), + (4, 5, {"weight": 2}), + (5, 3, {"capacity": 4, "weight": 1}), + ] + ) + flowCost, H = nx.network_simplex(G) + soln = { + 1: {2: 12, 3: 8}, + 2: {3: 8, 4: 4, 5: 0}, + 3: {4: 11, 5: 5}, + 4: {5: 10}, + 5: {3: 0}, + } + assert flowCost == 150 + assert nx.min_cost_flow_cost(G) == 150 + assert H == soln + assert nx.min_cost_flow(G) == soln + assert nx.cost_of_flow(G, H) == 150 + + flowCost, H = nx.capacity_scaling(G) + assert flowCost == 150 + assert H == soln + assert nx.cost_of_flow(G, H) == 150 + + def test_digraph2(self): + # Example from ticket #430 from mfrasca. Original source: + # http://www.cs.princeton.edu/courses/archive/spr03/cs226/lectures/mincost.4up.pdf, slide 11. + G = nx.DiGraph() + G.add_edge("s", 1, capacity=12) + G.add_edge("s", 2, capacity=6) + G.add_edge("s", 3, capacity=14) + G.add_edge(1, 2, capacity=11, weight=4) + G.add_edge(2, 3, capacity=9, weight=6) + G.add_edge(1, 4, capacity=5, weight=5) + G.add_edge(1, 5, capacity=2, weight=12) + G.add_edge(2, 5, capacity=4, weight=4) + G.add_edge(2, 6, capacity=2, weight=6) + G.add_edge(3, 6, capacity=31, weight=3) + G.add_edge(4, 5, capacity=18, weight=4) + G.add_edge(5, 6, capacity=9, weight=5) + G.add_edge(4, "t", capacity=3) + G.add_edge(5, "t", capacity=7) + G.add_edge(6, "t", capacity=22) + flow = nx.max_flow_min_cost(G, "s", "t") + soln = { + 1: {2: 6, 4: 5, 5: 1}, + 2: {3: 6, 5: 4, 6: 2}, + 3: {6: 20}, + 4: {5: 2, "t": 3}, + 5: {6: 0, "t": 7}, + 6: {"t": 22}, + "s": {1: 12, 2: 6, 3: 14}, + "t": {}, + } + assert flow == soln + + G.add_edge("t", "s", weight=-100) + flowCost, flow = nx.capacity_scaling(G) + G.remove_edge("t", "s") + assert flow["t"]["s"] == 32 + assert flowCost == -3007 + del flow["t"]["s"] + assert flow == soln + assert nx.cost_of_flow(G, flow) == 193 + + def test_digraph3(self): + """Combinatorial Optimization: Algorithms and Complexity, + Papadimitriou Steiglitz at page 140 has an example, 7.1, but that + admits multiple solutions, so I alter it a bit. From ticket #430 + by mfrasca.""" + + G = nx.DiGraph() + G.add_edge("s", "a") + G["s"]["a"].update({0: 2, 1: 4}) + G.add_edge("s", "b") + G["s"]["b"].update({0: 2, 1: 1}) + G.add_edge("a", "b") + G["a"]["b"].update({0: 5, 1: 2}) + G.add_edge("a", "t") + G["a"]["t"].update({0: 1, 1: 5}) + G.add_edge("b", "a") + G["b"]["a"].update({0: 1, 1: 3}) + G.add_edge("b", "t") + G["b"]["t"].update({0: 3, 1: 2}) + + "PS.ex.7.1: testing main function" + sol = nx.max_flow_min_cost(G, "s", "t", capacity=0, weight=1) + flow = sum(v for v in sol["s"].values()) + assert 4 == flow + assert 23 == nx.cost_of_flow(G, sol, weight=1) + assert sol["s"] == {"a": 2, "b": 2} + assert sol["a"] == {"b": 1, "t": 1} + assert sol["b"] == {"a": 0, "t": 3} + assert sol["t"] == {} + + G.add_edge("t", "s") + G["t"]["s"].update({1: -100}) + flowCost, sol = nx.capacity_scaling(G, capacity=0, weight=1) + G.remove_edge("t", "s") + flow = sum(v for v in sol["s"].values()) + assert 4 == flow + assert sol["t"]["s"] == 4 + assert flowCost == -377 + del sol["t"]["s"] + assert sol["s"] == {"a": 2, "b": 2} + assert sol["a"] == {"b": 1, "t": 1} + assert sol["b"] == {"a": 0, "t": 3} + assert sol["t"] == {} + assert nx.cost_of_flow(G, sol, weight=1) == 23 + + def test_zero_capacity_edges(self): + """Address issue raised in ticket #617 by arv.""" + G = nx.DiGraph() + G.add_edges_from( + [ + (1, 2, {"capacity": 1, "weight": 1}), + (1, 5, {"capacity": 1, "weight": 1}), + (2, 3, {"capacity": 0, "weight": 1}), + (2, 5, {"capacity": 1, "weight": 1}), + (5, 3, {"capacity": 2, "weight": 1}), + (5, 4, {"capacity": 0, "weight": 1}), + (3, 4, {"capacity": 2, "weight": 1}), + ] + ) + G.nodes[1]["demand"] = -1 + G.nodes[2]["demand"] = -1 + G.nodes[4]["demand"] = 2 + + flowCost, H = nx.network_simplex(G) + soln = {1: {2: 0, 5: 1}, 2: {3: 0, 5: 1}, 3: {4: 2}, 4: {}, 5: {3: 2, 4: 0}} + assert flowCost == 6 + assert nx.min_cost_flow_cost(G) == 6 + assert H == soln + assert nx.min_cost_flow(G) == soln + assert nx.cost_of_flow(G, H) == 6 + + flowCost, H = nx.capacity_scaling(G) + assert flowCost == 6 + assert H == soln + assert nx.cost_of_flow(G, H) == 6 + + def test_digon(self): + """Check if digons are handled properly. Taken from ticket + #618 by arv.""" + nodes = [(1, {}), (2, {"demand": -4}), (3, {"demand": 4})] + edges = [ + (1, 2, {"capacity": 3, "weight": 600000}), + (2, 1, {"capacity": 2, "weight": 0}), + (2, 3, {"capacity": 5, "weight": 714285}), + (3, 2, {"capacity": 2, "weight": 0}), + ] + G = nx.DiGraph(edges) + G.add_nodes_from(nodes) + flowCost, H = nx.network_simplex(G) + soln = {1: {2: 0}, 2: {1: 0, 3: 4}, 3: {2: 0}} + assert flowCost == 2857140 + assert nx.min_cost_flow_cost(G) == 2857140 + assert H == soln + assert nx.min_cost_flow(G) == soln + assert nx.cost_of_flow(G, H) == 2857140 + + flowCost, H = nx.capacity_scaling(G) + assert flowCost == 2857140 + assert H == soln + assert nx.cost_of_flow(G, H) == 2857140 + + def test_deadend(self): + """Check if one-node cycles are handled properly. Taken from ticket + #2906 from @sshraven.""" + G = nx.DiGraph() + + G.add_nodes_from(range(5), demand=0) + G.nodes[4]["demand"] = -13 + G.nodes[3]["demand"] = 13 + + G.add_edges_from([(0, 2), (0, 3), (2, 1)], capacity=20, weight=0.1) + pytest.raises(nx.NetworkXUnfeasible, nx.min_cost_flow, G) + + def test_infinite_capacity_neg_digon(self): + """An infinite capacity negative cost digon results in an unbounded + instance.""" + nodes = [(1, {}), (2, {"demand": -4}), (3, {"demand": 4})] + edges = [ + (1, 2, {"weight": -600}), + (2, 1, {"weight": 0}), + (2, 3, {"capacity": 5, "weight": 714285}), + (3, 2, {"capacity": 2, "weight": 0}), + ] + G = nx.DiGraph(edges) + G.add_nodes_from(nodes) + pytest.raises(nx.NetworkXUnbounded, nx.network_simplex, G) + pytest.raises(nx.NetworkXUnbounded, nx.capacity_scaling, G) + + def test_finite_capacity_neg_digon(self): + """The digon should receive the maximum amount of flow it can handle. + Taken from ticket #749 by @chuongdo.""" + G = nx.DiGraph() + G.add_edge("a", "b", capacity=1, weight=-1) + G.add_edge("b", "a", capacity=1, weight=-1) + min_cost = -2 + assert nx.min_cost_flow_cost(G) == min_cost + + flowCost, H = nx.capacity_scaling(G) + assert flowCost == -2 + assert H == {"a": {"b": 1}, "b": {"a": 1}} + assert nx.cost_of_flow(G, H) == -2 + + def test_multidigraph(self): + """Multidigraphs are acceptable.""" + G = nx.MultiDiGraph() + G.add_weighted_edges_from([(1, 2, 1), (2, 3, 2)], weight="capacity") + flowCost, H = nx.network_simplex(G) + assert flowCost == 0 + assert H == {1: {2: {0: 0}}, 2: {3: {0: 0}}, 3: {}} + + flowCost, H = nx.capacity_scaling(G) + assert flowCost == 0 + assert H == {1: {2: {0: 0}}, 2: {3: {0: 0}}, 3: {}} + + def test_negative_selfloops(self): + """Negative selfloops should cause an exception if uncapacitated and + always be saturated otherwise. + """ + G = nx.DiGraph() + G.add_edge(1, 1, weight=-1) + pytest.raises(nx.NetworkXUnbounded, nx.network_simplex, G) + pytest.raises(nx.NetworkXUnbounded, nx.capacity_scaling, G) + G[1][1]["capacity"] = 2 + flowCost, H = nx.network_simplex(G) + assert flowCost == -2 + assert H == {1: {1: 2}} + flowCost, H = nx.capacity_scaling(G) + assert flowCost == -2 + assert H == {1: {1: 2}} + + G = nx.MultiDiGraph() + G.add_edge(1, 1, "x", weight=-1) + G.add_edge(1, 1, "y", weight=1) + pytest.raises(nx.NetworkXUnbounded, nx.network_simplex, G) + pytest.raises(nx.NetworkXUnbounded, nx.capacity_scaling, G) + G[1][1]["x"]["capacity"] = 2 + flowCost, H = nx.network_simplex(G) + assert flowCost == -2 + assert H == {1: {1: {"x": 2, "y": 0}}} + flowCost, H = nx.capacity_scaling(G) + assert flowCost == -2 + assert H == {1: {1: {"x": 2, "y": 0}}} + + def test_bone_shaped(self): + # From #1283 + G = nx.DiGraph() + G.add_node(0, demand=-4) + G.add_node(1, demand=2) + G.add_node(2, demand=2) + G.add_node(3, demand=4) + G.add_node(4, demand=-2) + G.add_node(5, demand=-2) + G.add_edge(0, 1, capacity=4) + G.add_edge(0, 2, capacity=4) + G.add_edge(4, 3, capacity=4) + G.add_edge(5, 3, capacity=4) + G.add_edge(0, 3, capacity=0) + flowCost, H = nx.network_simplex(G) + assert flowCost == 0 + assert H == {0: {1: 2, 2: 2, 3: 0}, 1: {}, 2: {}, 3: {}, 4: {3: 2}, 5: {3: 2}} + flowCost, H = nx.capacity_scaling(G) + assert flowCost == 0 + assert H == {0: {1: 2, 2: 2, 3: 0}, 1: {}, 2: {}, 3: {}, 4: {3: 2}, 5: {3: 2}} + + def test_exceptions(self): + G = nx.Graph() + pytest.raises(nx.NetworkXNotImplemented, nx.network_simplex, G) + pytest.raises(nx.NetworkXNotImplemented, nx.capacity_scaling, G) + G = nx.MultiGraph() + pytest.raises(nx.NetworkXNotImplemented, nx.network_simplex, G) + pytest.raises(nx.NetworkXNotImplemented, nx.capacity_scaling, G) + G = nx.DiGraph() + pytest.raises(nx.NetworkXError, nx.network_simplex, G) + # pytest.raises(nx.NetworkXError, nx.capacity_scaling, G) + G.add_node(0, demand=float("inf")) + pytest.raises(nx.NetworkXError, nx.network_simplex, G) + pytest.raises(nx.NetworkXUnfeasible, nx.capacity_scaling, G) + G.nodes[0]["demand"] = 0 + G.add_node(1, demand=0) + G.add_edge(0, 1, weight=-float("inf")) + pytest.raises(nx.NetworkXError, nx.network_simplex, G) + pytest.raises(nx.NetworkXUnfeasible, nx.capacity_scaling, G) + G[0][1]["weight"] = 0 + G.add_edge(0, 0, weight=float("inf")) + pytest.raises(nx.NetworkXError, nx.network_simplex, G) + # pytest.raises(nx.NetworkXError, nx.capacity_scaling, G) + G[0][0]["weight"] = 0 + G[0][1]["capacity"] = -1 + pytest.raises(nx.NetworkXUnfeasible, nx.network_simplex, G) + # pytest.raises(nx.NetworkXUnfeasible, nx.capacity_scaling, G) + G[0][1]["capacity"] = 0 + G[0][0]["capacity"] = -1 + pytest.raises(nx.NetworkXUnfeasible, nx.network_simplex, G) + # pytest.raises(nx.NetworkXUnfeasible, nx.capacity_scaling, G) + + def test_large(self): + fname = ( + importlib.resources.files("networkx.algorithms.flow.tests") + / "netgen-2.gpickle.bz2" + ) + with bz2.BZ2File(fname, "rb") as f: + G = pickle.load(f) + flowCost, flowDict = nx.network_simplex(G) + assert 6749969302 == flowCost + assert 6749969302 == nx.cost_of_flow(G, flowDict) + flowCost, flowDict = nx.capacity_scaling(G) + assert 6749969302 == flowCost + assert 6749969302 == nx.cost_of_flow(G, flowDict) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/tests/test_networksimplex.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/tests/test_networksimplex.py new file mode 100644 index 0000000..9a37fc6 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/tests/test_networksimplex.py @@ -0,0 +1,481 @@ +import bz2 +import importlib.resources +import pickle + +import pytest + +import networkx as nx + + +@pytest.fixture +def simple_flow_graph(): + G = nx.DiGraph() + G.add_node("a", demand=0) + G.add_node("b", demand=-5) + G.add_node("c", demand=50000000) + G.add_node("d", demand=-49999995) + G.add_edge("a", "b", weight=3, capacity=4) + G.add_edge("a", "c", weight=6, capacity=10) + G.add_edge("b", "d", weight=1, capacity=9) + G.add_edge("c", "d", weight=2, capacity=5) + return G + + +@pytest.fixture +def simple_no_flow_graph(): + G = nx.DiGraph() + G.add_node("s", demand=-5) + G.add_node("t", demand=5) + G.add_edge("s", "a", weight=1, capacity=3) + G.add_edge("a", "b", weight=3) + G.add_edge("a", "c", weight=-6) + G.add_edge("b", "d", weight=1) + G.add_edge("c", "d", weight=-2) + G.add_edge("d", "t", weight=1, capacity=3) + return G + + +def get_flowcost_from_flowdict(G, flowDict): + """Returns flow cost calculated from flow dictionary""" + flowCost = 0 + for u in flowDict: + for v in flowDict[u]: + flowCost += flowDict[u][v] * G[u][v]["weight"] + return flowCost + + +def test_infinite_demand_raise(simple_flow_graph): + G = simple_flow_graph + inf = float("inf") + node_name = "a" + nx.set_node_attributes(G, {node_name: {"demand": inf}}) + with pytest.raises( + nx.NetworkXError, + match=f"node '{node_name}' has infinite demand", + ): + nx.network_simplex(G) + + +def test_neg_infinite_demand_raise(simple_flow_graph): + G = simple_flow_graph + inf = float("inf") + node_name = "a" + nx.set_node_attributes(G, {node_name: {"demand": -inf}}) + with pytest.raises( + nx.NetworkXError, + match=f"node '{node_name}' has infinite demand", + ): + nx.network_simplex(G) + + +def test_infinite_weight_raise(simple_flow_graph): + G = simple_flow_graph + inf = float("inf") + nx.set_edge_attributes( + G, {("a", "b"): {"weight": inf}, ("b", "d"): {"weight": inf}} + ) + with pytest.raises( + nx.NetworkXError, + match=r"edge .* has infinite weight", + ): + nx.network_simplex(G) + + +def test_nonzero_net_demand_raise(simple_flow_graph): + G = simple_flow_graph + nx.set_node_attributes(G, {"b": {"demand": -4}}) + with pytest.raises( + nx.NetworkXUnfeasible, + match="total node demand is not zero", + ): + nx.network_simplex(G) + + +def test_negative_capacity_raise(simple_flow_graph): + G = simple_flow_graph + nx.set_edge_attributes(G, {("a", "b"): {"weight": 1}, ("b", "d"): {"capacity": -9}}) + with pytest.raises( + nx.NetworkXUnfeasible, + match=r"edge .* has negative capacity", + ): + nx.network_simplex(G) + + +def test_no_flow_satisfying_demands(simple_no_flow_graph): + G = simple_no_flow_graph + with pytest.raises( + nx.NetworkXUnfeasible, + match="no flow satisfies all node demands", + ): + nx.network_simplex(G) + + +def test_sum_demands_not_zero(simple_no_flow_graph): + G = simple_no_flow_graph + nx.set_node_attributes(G, {"t": {"demand": 4}}) + with pytest.raises( + nx.NetworkXUnfeasible, + match="total node demand is not zero", + ): + nx.network_simplex(G) + + +def test_google_or_tools_example(): + """ + https://developers.google.com/optimization/flow/mincostflow + """ + G = nx.DiGraph() + start_nodes = [0, 0, 1, 1, 1, 2, 2, 3, 4] + end_nodes = [1, 2, 2, 3, 4, 3, 4, 4, 2] + capacities = [15, 8, 20, 4, 10, 15, 4, 20, 5] + unit_costs = [4, 4, 2, 2, 6, 1, 3, 2, 3] + supplies = [20, 0, 0, -5, -15] + answer = 150 + + for i in range(len(supplies)): + G.add_node(i, demand=(-1) * supplies[i]) # supplies are negative of demand + + for i in range(len(start_nodes)): + G.add_edge( + start_nodes[i], end_nodes[i], weight=unit_costs[i], capacity=capacities[i] + ) + + flowCost, flowDict = nx.network_simplex(G) + assert flowCost == answer + assert flowCost == get_flowcost_from_flowdict(G, flowDict) + + +def test_google_or_tools_example2(): + """ + https://developers.google.com/optimization/flow/mincostflow + """ + G = nx.DiGraph() + start_nodes = [0, 0, 1, 1, 1, 2, 2, 3, 4, 3] + end_nodes = [1, 2, 2, 3, 4, 3, 4, 4, 2, 5] + capacities = [15, 8, 20, 4, 10, 15, 4, 20, 5, 10] + unit_costs = [4, 4, 2, 2, 6, 1, 3, 2, 3, 4] + supplies = [23, 0, 0, -5, -15, -3] + answer = 183 + + for i in range(len(supplies)): + G.add_node(i, demand=(-1) * supplies[i]) # supplies are negative of demand + + for i in range(len(start_nodes)): + G.add_edge( + start_nodes[i], end_nodes[i], weight=unit_costs[i], capacity=capacities[i] + ) + + flowCost, flowDict = nx.network_simplex(G) + assert flowCost == answer + assert flowCost == get_flowcost_from_flowdict(G, flowDict) + + +def test_large(): + fname = ( + importlib.resources.files("networkx.algorithms.flow.tests") + / "netgen-2.gpickle.bz2" + ) + + with bz2.BZ2File(fname, "rb") as f: + G = pickle.load(f) + flowCost, flowDict = nx.network_simplex(G) + assert 6749969302 == flowCost + assert 6749969302 == nx.cost_of_flow(G, flowDict) + + +def test_simple_digraph(): + G = nx.DiGraph() + G.add_node("a", demand=-5) + G.add_node("d", demand=5) + G.add_edge("a", "b", weight=3, capacity=4) + G.add_edge("a", "c", weight=6, capacity=10) + G.add_edge("b", "d", weight=1, capacity=9) + G.add_edge("c", "d", weight=2, capacity=5) + flowCost, H = nx.network_simplex(G) + soln = {"a": {"b": 4, "c": 1}, "b": {"d": 4}, "c": {"d": 1}, "d": {}} + assert flowCost == 24 + assert nx.min_cost_flow_cost(G) == 24 + assert H == soln + + +def test_negcycle_infcap(): + G = nx.DiGraph() + G.add_node("s", demand=-5) + G.add_node("t", demand=5) + G.add_edge("s", "a", weight=1, capacity=3) + G.add_edge("a", "b", weight=3) + G.add_edge("c", "a", weight=-6) + G.add_edge("b", "d", weight=1) + G.add_edge("d", "c", weight=-2) + G.add_edge("d", "t", weight=1, capacity=3) + pytest.raises(nx.NetworkXUnfeasible, nx.network_simplex, G) + + +def test_transshipment(): + G = nx.DiGraph() + G.add_node("a", demand=1) + G.add_node("b", demand=-2) + G.add_node("c", demand=-2) + G.add_node("d", demand=3) + G.add_node("e", demand=-4) + G.add_node("f", demand=-4) + G.add_node("g", demand=3) + G.add_node("h", demand=2) + G.add_node("r", demand=3) + G.add_edge("a", "c", weight=3) + G.add_edge("r", "a", weight=2) + G.add_edge("b", "a", weight=9) + G.add_edge("r", "c", weight=0) + G.add_edge("b", "r", weight=-6) + G.add_edge("c", "d", weight=5) + G.add_edge("e", "r", weight=4) + G.add_edge("e", "f", weight=3) + G.add_edge("h", "b", weight=4) + G.add_edge("f", "d", weight=7) + G.add_edge("f", "h", weight=12) + G.add_edge("g", "d", weight=12) + G.add_edge("f", "g", weight=-1) + G.add_edge("h", "g", weight=-10) + flowCost, H = nx.network_simplex(G) + soln = { + "a": {"c": 0}, + "b": {"a": 0, "r": 2}, + "c": {"d": 3}, + "d": {}, + "e": {"r": 3, "f": 1}, + "f": {"d": 0, "g": 3, "h": 2}, + "g": {"d": 0}, + "h": {"b": 0, "g": 0}, + "r": {"a": 1, "c": 1}, + } + assert flowCost == 41 + assert H == soln + + +def test_digraph1(): + # From Bradley, S. P., Hax, A. C. and Magnanti, T. L. Applied + # Mathematical Programming. Addison-Wesley, 1977. + G = nx.DiGraph() + G.add_node(1, demand=-20) + G.add_node(4, demand=5) + G.add_node(5, demand=15) + G.add_edges_from( + [ + (1, 2, {"capacity": 15, "weight": 4}), + (1, 3, {"capacity": 8, "weight": 4}), + (2, 3, {"weight": 2}), + (2, 4, {"capacity": 4, "weight": 2}), + (2, 5, {"capacity": 10, "weight": 6}), + (3, 4, {"capacity": 15, "weight": 1}), + (3, 5, {"capacity": 5, "weight": 3}), + (4, 5, {"weight": 2}), + (5, 3, {"capacity": 4, "weight": 1}), + ] + ) + flowCost, H = nx.network_simplex(G) + soln = { + 1: {2: 12, 3: 8}, + 2: {3: 8, 4: 4, 5: 0}, + 3: {4: 11, 5: 5}, + 4: {5: 10}, + 5: {3: 0}, + } + assert flowCost == 150 + assert nx.min_cost_flow_cost(G) == 150 + assert H == soln + + +def test_zero_capacity_edges(): + """Address issue raised in ticket #617 by arv.""" + G = nx.DiGraph() + G.add_edges_from( + [ + (1, 2, {"capacity": 1, "weight": 1}), + (1, 5, {"capacity": 1, "weight": 1}), + (2, 3, {"capacity": 0, "weight": 1}), + (2, 5, {"capacity": 1, "weight": 1}), + (5, 3, {"capacity": 2, "weight": 1}), + (5, 4, {"capacity": 0, "weight": 1}), + (3, 4, {"capacity": 2, "weight": 1}), + ] + ) + G.nodes[1]["demand"] = -1 + G.nodes[2]["demand"] = -1 + G.nodes[4]["demand"] = 2 + + flowCost, H = nx.network_simplex(G) + soln = {1: {2: 0, 5: 1}, 2: {3: 0, 5: 1}, 3: {4: 2}, 4: {}, 5: {3: 2, 4: 0}} + assert flowCost == 6 + assert nx.min_cost_flow_cost(G) == 6 + assert H == soln + + +def test_digon(): + """Check if digons are handled properly. Taken from ticket + #618 by arv.""" + nodes = [(1, {}), (2, {"demand": -4}), (3, {"demand": 4})] + edges = [ + (1, 2, {"capacity": 3, "weight": 600000}), + (2, 1, {"capacity": 2, "weight": 0}), + (2, 3, {"capacity": 5, "weight": 714285}), + (3, 2, {"capacity": 2, "weight": 0}), + ] + G = nx.DiGraph(edges) + G.add_nodes_from(nodes) + flowCost, H = nx.network_simplex(G) + soln = {1: {2: 0}, 2: {1: 0, 3: 4}, 3: {2: 0}} + assert flowCost == 2857140 + + +def test_deadend(): + """Check if one-node cycles are handled properly. Taken from ticket + #2906 from @sshraven.""" + G = nx.DiGraph() + + G.add_nodes_from(range(5), demand=0) + G.nodes[4]["demand"] = -13 + G.nodes[3]["demand"] = 13 + + G.add_edges_from([(0, 2), (0, 3), (2, 1)], capacity=20, weight=0.1) + pytest.raises(nx.NetworkXUnfeasible, nx.network_simplex, G) + + +def test_infinite_capacity_neg_digon(): + """An infinite capacity negative cost digon results in an unbounded + instance.""" + nodes = [(1, {}), (2, {"demand": -4}), (3, {"demand": 4})] + edges = [ + (1, 2, {"weight": -600}), + (2, 1, {"weight": 0}), + (2, 3, {"capacity": 5, "weight": 714285}), + (3, 2, {"capacity": 2, "weight": 0}), + ] + G = nx.DiGraph(edges) + G.add_nodes_from(nodes) + pytest.raises(nx.NetworkXUnbounded, nx.network_simplex, G) + + +def test_multidigraph(): + """Multidigraphs are acceptable.""" + G = nx.MultiDiGraph() + G.add_weighted_edges_from([(1, 2, 1), (2, 3, 2)], weight="capacity") + flowCost, H = nx.network_simplex(G) + assert flowCost == 0 + assert H == {1: {2: {0: 0}}, 2: {3: {0: 0}}, 3: {}} + + +def test_negative_selfloops(): + """Negative selfloops should cause an exception if uncapacitated and + always be saturated otherwise. + """ + G = nx.DiGraph() + G.add_edge(1, 1, weight=-1) + pytest.raises(nx.NetworkXUnbounded, nx.network_simplex, G) + + G[1][1]["capacity"] = 2 + flowCost, H = nx.network_simplex(G) + assert flowCost == -2 + assert H == {1: {1: 2}} + + G = nx.MultiDiGraph() + G.add_edge(1, 1, "x", weight=-1) + G.add_edge(1, 1, "y", weight=1) + pytest.raises(nx.NetworkXUnbounded, nx.network_simplex, G) + + G[1][1]["x"]["capacity"] = 2 + flowCost, H = nx.network_simplex(G) + assert flowCost == -2 + assert H == {1: {1: {"x": 2, "y": 0}}} + + +def test_bone_shaped(): + # From #1283 + G = nx.DiGraph() + G.add_node(0, demand=-4) + G.add_node(1, demand=2) + G.add_node(2, demand=2) + G.add_node(3, demand=4) + G.add_node(4, demand=-2) + G.add_node(5, demand=-2) + G.add_edge(0, 1, capacity=4) + G.add_edge(0, 2, capacity=4) + G.add_edge(4, 3, capacity=4) + G.add_edge(5, 3, capacity=4) + G.add_edge(0, 3, capacity=0) + flowCost, H = nx.network_simplex(G) + assert flowCost == 0 + assert H == {0: {1: 2, 2: 2, 3: 0}, 1: {}, 2: {}, 3: {}, 4: {3: 2}, 5: {3: 2}} + + +def test_graphs_type_exceptions(): + G = nx.Graph() + pytest.raises(nx.NetworkXNotImplemented, nx.network_simplex, G) + G = nx.MultiGraph() + pytest.raises(nx.NetworkXNotImplemented, nx.network_simplex, G) + G = nx.DiGraph() + pytest.raises(nx.NetworkXError, nx.network_simplex, G) + + +@pytest.fixture() +def faux_inf_example(): + """Base test graph for probing faux_infinity bound. See gh-7562""" + G = nx.DiGraph() + + # Add nodes with demands + G.add_node("s0", demand=-4) + G.add_node("s1", demand=-4) + G.add_node("ns", demand=0) + G.add_node("nc", demand=0) + G.add_node("c0", demand=4) + G.add_node("c1", demand=4) + + # Uniformly weighted edges + G.add_edge("s0", "ns", weight=1) + G.add_edge("s1", "ns", weight=1) + G.add_edge("ns", "nc", weight=1) + G.add_edge("nc", "c0", weight=1) + G.add_edge("nc", "c1", weight=1) + + return G + + +@pytest.mark.parametrize("large_capacity", [True, False]) +@pytest.mark.parametrize("large_demand", [True, False]) +@pytest.mark.parametrize("large_weight", [True, False]) +def test_network_simplex_faux_infinity( + faux_inf_example, large_capacity, large_demand, large_weight +): + """network_simplex should not raise an exception as a result of faux_infinity + for these cases. See gh-7562""" + G = faux_inf_example + lv = 1_000_000_000 + + # Modify the base graph with combinations of large values for capacity, + # demand, and weight to probe faux_inifity + if large_capacity: + G["s0"]["ns"]["capacity"] = lv + if large_demand: + G.nodes["s0"]["demand"] = -lv + G.nodes["c1"]["demand"] = lv + if large_weight: + G["s1"]["ns"]["weight"] = lv + + # Execute without raising + fc, fd = nx.network_simplex(G) + + +def test_network_simplex_unbounded_flow(): + G = nx.DiGraph() + # Add nodes + G.add_node("A") + G.add_node("B") + G.add_node("C") + + # Add edges forming a negative cycle + G.add_weighted_edges_from([("A", "B", -5), ("B", "C", -5), ("C", "A", -5)]) + + with pytest.raises( + nx.NetworkXUnbounded, + match="negative cycle with infinite capacity found", + ): + nx.network_simplex(G) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/tests/wlm3.gpickle.bz2 b/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/tests/wlm3.gpickle.bz2 new file mode 100644 index 0000000..8ce935a Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/tests/wlm3.gpickle.bz2 differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/utils.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/utils.py new file mode 100644 index 0000000..9780746 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/flow/utils.py @@ -0,0 +1,194 @@ +""" +Utility classes and functions for network flow algorithms. +""" + +from collections import deque + +import networkx as nx + +__all__ = [ + "CurrentEdge", + "Level", + "GlobalRelabelThreshold", + "build_residual_network", + "detect_unboundedness", + "build_flow_dict", +] + + +class CurrentEdge: + """Mechanism for iterating over out-edges incident to a node in a circular + manner. StopIteration exception is raised when wraparound occurs. + """ + + __slots__ = ("_edges", "_it", "_curr") + + def __init__(self, edges): + self._edges = edges + if self._edges: + self._rewind() + + def get(self): + return self._curr + + def move_to_next(self): + try: + self._curr = next(self._it) + except StopIteration: + self._rewind() + raise + + def _rewind(self): + self._it = iter(self._edges.items()) + self._curr = next(self._it) + + def __eq__(self, other): + return (getattr(self, "_curr", None), self._edges) == ( + (getattr(other, "_curr", None), other._edges) + ) + + +class Level: + """Active and inactive nodes in a level.""" + + __slots__ = ("active", "inactive") + + def __init__(self): + self.active = set() + self.inactive = set() + + +class GlobalRelabelThreshold: + """Measurement of work before the global relabeling heuristic should be + applied. + """ + + def __init__(self, n, m, freq): + self._threshold = (n + m) / freq if freq else float("inf") + self._work = 0 + + def add_work(self, work): + self._work += work + + def is_reached(self): + return self._work >= self._threshold + + def clear_work(self): + self._work = 0 + + +@nx._dispatchable(edge_attrs={"capacity": float("inf")}, returns_graph=True) +def build_residual_network(G, capacity): + """Build a residual network and initialize a zero flow. + + The residual network :samp:`R` from an input graph :samp:`G` has the + same nodes as :samp:`G`. :samp:`R` is a DiGraph that contains a pair + of edges :samp:`(u, v)` and :samp:`(v, u)` iff :samp:`(u, v)` is not a + self-loop, and at least one of :samp:`(u, v)` and :samp:`(v, u)` exists + in :samp:`G`. + + For each edge :samp:`(u, v)` in :samp:`R`, :samp:`R[u][v]['capacity']` + is equal to the capacity of :samp:`(u, v)` in :samp:`G` if it exists + in :samp:`G` or zero otherwise. If the capacity is infinite, + :samp:`R[u][v]['capacity']` will have a high arbitrary finite value + that does not affect the solution of the problem. This value is stored in + :samp:`R.graph['inf']`. For each edge :samp:`(u, v)` in :samp:`R`, + :samp:`R[u][v]['flow']` represents the flow function of :samp:`(u, v)` and + satisfies :samp:`R[u][v]['flow'] == -R[v][u]['flow']`. + + The flow value, defined as the total flow into :samp:`t`, the sink, is + stored in :samp:`R.graph['flow_value']`. If :samp:`cutoff` is not + specified, reachability to :samp:`t` using only edges :samp:`(u, v)` such + that :samp:`R[u][v]['flow'] < R[u][v]['capacity']` induces a minimum + :samp:`s`-:samp:`t` cut. + + """ + if G.is_multigraph(): + raise nx.NetworkXError("MultiGraph and MultiDiGraph not supported (yet).") + + R = nx.DiGraph() + R.__networkx_cache__ = None # Disable caching + R.add_nodes_from(G) + + inf = float("inf") + # Extract edges with positive capacities. Self loops excluded. + edge_list = [ + (u, v, attr) + for u, v, attr in G.edges(data=True) + if u != v and attr.get(capacity, inf) > 0 + ] + # Simulate infinity with three times the sum of the finite edge capacities + # or any positive value if the sum is zero. This allows the + # infinite-capacity edges to be distinguished for unboundedness detection + # and directly participate in residual capacity calculation. If the maximum + # flow is finite, these edges cannot appear in the minimum cut and thus + # guarantee correctness. Since the residual capacity of an + # infinite-capacity edge is always at least 2/3 of inf, while that of an + # finite-capacity edge is at most 1/3 of inf, if an operation moves more + # than 1/3 of inf units of flow to t, there must be an infinite-capacity + # s-t path in G. + inf = ( + 3 + * sum( + attr[capacity] + for u, v, attr in edge_list + if capacity in attr and attr[capacity] != inf + ) + or 1 + ) + if G.is_directed(): + for u, v, attr in edge_list: + r = min(attr.get(capacity, inf), inf) + if not R.has_edge(u, v): + # Both (u, v) and (v, u) must be present in the residual + # network. + R.add_edge(u, v, capacity=r) + R.add_edge(v, u, capacity=0) + else: + # The edge (u, v) was added when (v, u) was visited. + R[u][v]["capacity"] = r + else: + for u, v, attr in edge_list: + # Add a pair of edges with equal residual capacities. + r = min(attr.get(capacity, inf), inf) + R.add_edge(u, v, capacity=r) + R.add_edge(v, u, capacity=r) + + # Record the value simulating infinity. + R.graph["inf"] = inf + + return R + + +@nx._dispatchable( + graphs="R", + preserve_edge_attrs={"R": {"capacity": float("inf")}}, + preserve_graph_attrs=True, +) +def detect_unboundedness(R, s, t): + """Detect an infinite-capacity s-t path in R.""" + q = deque([s]) + seen = {s} + inf = R.graph["inf"] + while q: + u = q.popleft() + for v, attr in R[u].items(): + if attr["capacity"] == inf and v not in seen: + if v == t: + raise nx.NetworkXUnbounded( + "Infinite capacity path, flow unbounded above." + ) + seen.add(v) + q.append(v) + + +@nx._dispatchable(graphs={"G": 0, "R": 1}, preserve_edge_attrs={"R": {"flow": None}}) +def build_flow_dict(G, R): + """Build a flow dictionary from a residual network.""" + flow_dict = {} + for u in G: + flow_dict[u] = dict.fromkeys(G[u], 0) + flow_dict[u].update( + (v, attr["flow"]) for v, attr in R[u].items() if attr["flow"] > 0 + ) + return flow_dict diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/graph_hashing.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/graph_hashing.py new file mode 100644 index 0000000..92a69cc --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/graph_hashing.py @@ -0,0 +1,435 @@ +""" +Functions for hashing graphs to strings. +Isomorphic graphs should be assigned identical hashes. +For now, only Weisfeiler-Lehman hashing is implemented. +""" + +import warnings +from collections import Counter, defaultdict +from hashlib import blake2b + +import networkx as nx + +__all__ = ["weisfeiler_lehman_graph_hash", "weisfeiler_lehman_subgraph_hashes"] + + +def _hash_label(label, digest_size): + return blake2b(label.encode("ascii"), digest_size=digest_size).hexdigest() + + +def _init_node_labels(G, edge_attr, node_attr): + if node_attr: + return {u: str(dd[node_attr]) for u, dd in G.nodes(data=True)} + elif edge_attr: + return dict.fromkeys(G, "") + else: + warnings.warn( + "The hashes produced for graphs without node or edge attributes" + "changed in v3.5 due to a bugfix (see documentation).", + UserWarning, + stacklevel=2, + ) + if nx.is_directed(G): + return {u: str(G.in_degree(u)) + "_" + str(G.out_degree(u)) for u in G} + else: + return {u: str(deg) for u, deg in G.degree()} + + +def _neighborhood_aggregate_undirected(G, node, node_labels, edge_attr=None): + """ + Compute new labels for given node in an undirected graph by aggregating + the labels of each node's neighbors. + """ + label_list = [] + for nbr in G.neighbors(node): + prefix = "" if edge_attr is None else str(G[node][nbr][edge_attr]) + label_list.append(prefix + node_labels[nbr]) + return node_labels[node] + "".join(sorted(label_list)) + + +def _neighborhood_aggregate_directed(G, node, node_labels, edge_attr=None): + """ + Compute new labels for given node in a directed graph by aggregating + the labels of each node's neighbors. + """ + successor_labels = [] + for nbr in G.successors(node): + prefix = "s_" + "" if edge_attr is None else str(G[node][nbr][edge_attr]) + successor_labels.append(prefix + node_labels[nbr]) + + predecessor_labels = [] + for nbr in G.predecessors(node): + prefix = "p_" + "" if edge_attr is None else str(G[nbr][node][edge_attr]) + predecessor_labels.append(prefix + node_labels[nbr]) + return ( + node_labels[node] + + "".join(sorted(successor_labels)) + + "".join(sorted(predecessor_labels)) + ) + + +@nx.utils.not_implemented_for("multigraph") +@nx._dispatchable(edge_attrs={"edge_attr": None}, node_attrs="node_attr") +def weisfeiler_lehman_graph_hash( + G, edge_attr=None, node_attr=None, iterations=3, digest_size=16 +): + """Return Weisfeiler Lehman (WL) graph hash. + + .. Warning:: Hash values for directed graphs and graphs without edge or + node attributes have changed in v3.5. In previous versions, + directed graphs did not distinguish in- and outgoing edges. Also, + graphs without attributes set initial states such that effectively + one extra iteration of WL occurred than indicated by `iterations`. + For undirected graphs without node or edge labels, the old + hashes can be obtained by increasing the iteration count by one. + For more details, see `issue #7806 + `_. + + The function iteratively aggregates and hashes neighborhoods of each node. + After each node's neighbors are hashed to obtain updated node labels, + a hashed histogram of resulting labels is returned as the final hash. + + Hashes are identical for isomorphic graphs and strong guarantees that + non-isomorphic graphs will get different hashes. See [1]_ for details. + + If no node or edge attributes are provided, the degree of each node + is used as its initial label. + Otherwise, node and/or edge labels are used to compute the hash. + + Parameters + ---------- + G : graph + The graph to be hashed. + Can have node and/or edge attributes. Can also have no attributes. + edge_attr : string, optional (default=None) + The key in edge attribute dictionary to be used for hashing. + If None, edge labels are ignored. + node_attr: string, optional (default=None) + The key in node attribute dictionary to be used for hashing. + If None, and no edge_attr given, use the degrees of the nodes as labels. + iterations: int, optional (default=3) + Number of neighbor aggregations to perform. + Should be larger for larger graphs. + digest_size: int, optional (default=16) + Size (in bytes) of blake2b hash digest to use for hashing node labels. + + Returns + ------- + h : string + Hexadecimal string corresponding to hash of `G` (length ``2 * digest_size``). + + Raises + ------ + ValueError + If `iterations` is not a positve number. + + Examples + -------- + Two graphs with edge attributes that are isomorphic, except for + differences in the edge labels. + + >>> G1 = nx.Graph() + >>> G1.add_edges_from( + ... [ + ... (1, 2, {"label": "A"}), + ... (2, 3, {"label": "A"}), + ... (3, 1, {"label": "A"}), + ... (1, 4, {"label": "B"}), + ... ] + ... ) + >>> G2 = nx.Graph() + >>> G2.add_edges_from( + ... [ + ... (5, 6, {"label": "B"}), + ... (6, 7, {"label": "A"}), + ... (7, 5, {"label": "A"}), + ... (7, 8, {"label": "A"}), + ... ] + ... ) + + Omitting the `edge_attr` option, results in identical hashes. + + >>> nx.weisfeiler_lehman_graph_hash(G1) + 'c045439172215f49e0bef8c3d26c6b61' + >>> nx.weisfeiler_lehman_graph_hash(G2) + 'c045439172215f49e0bef8c3d26c6b61' + + With edge labels, the graphs are no longer assigned + the same hash digest. + + >>> nx.weisfeiler_lehman_graph_hash(G1, edge_attr="label") + 'c653d85538bcf041d88c011f4f905f10' + >>> nx.weisfeiler_lehman_graph_hash(G2, edge_attr="label") + '3dcd84af1ca855d0eff3c978d88e7ec7' + + Notes + ----- + To return the WL hashes of each subgraph of a graph, use + `weisfeiler_lehman_subgraph_hashes` + + Similarity between hashes does not imply similarity between graphs. + + References + ---------- + .. [1] Shervashidze, Nino, Pascal Schweitzer, Erik Jan Van Leeuwen, + Kurt Mehlhorn, and Karsten M. Borgwardt. Weisfeiler Lehman + Graph Kernels. Journal of Machine Learning Research. 2011. + http://www.jmlr.org/papers/volume12/shervashidze11a/shervashidze11a.pdf + + See also + -------- + weisfeiler_lehman_subgraph_hashes + """ + + if G.is_directed(): + _neighborhood_aggregate = _neighborhood_aggregate_directed + warnings.warn( + "The hashes produced for directed graphs changed in version v3.5" + " due to a bugfix to track in and out edges separately (see documentation).", + UserWarning, + stacklevel=2, + ) + else: + _neighborhood_aggregate = _neighborhood_aggregate_undirected + + def weisfeiler_lehman_step(G, labels, edge_attr=None): + """ + Apply neighborhood aggregation to each node + in the graph. + Computes a dictionary with labels for each node. + """ + new_labels = {} + for node in G.nodes(): + label = _neighborhood_aggregate(G, node, labels, edge_attr=edge_attr) + new_labels[node] = _hash_label(label, digest_size) + return new_labels + + if iterations <= 0: + raise ValueError("The WL algorithm requires that `iterations` be positive") + + # set initial node labels + node_labels = _init_node_labels(G, edge_attr, node_attr) + + # If the graph has no attributes, initial labels are the nodes' degrees. + # This is equivalent to doing the first iterations of WL. + if not edge_attr and not node_attr: + iterations -= 1 + + subgraph_hash_counts = [] + for _ in range(iterations): + node_labels = weisfeiler_lehman_step(G, node_labels, edge_attr=edge_attr) + counter = Counter(node_labels.values()) + # sort the counter, extend total counts + subgraph_hash_counts.extend(sorted(counter.items(), key=lambda x: x[0])) + + # hash the final counter + return _hash_label(str(tuple(subgraph_hash_counts)), digest_size) + + +@nx.utils.not_implemented_for("multigraph") +@nx._dispatchable(edge_attrs={"edge_attr": None}, node_attrs="node_attr") +def weisfeiler_lehman_subgraph_hashes( + G, + edge_attr=None, + node_attr=None, + iterations=3, + digest_size=16, + include_initial_labels=False, +): + """ + Return a dictionary of subgraph hashes by node. + + .. Warning:: Hash values for directed graphs have changed in version + v3.5. In previous versions, directed graphs did not distinguish in- + and outgoing edges. + Graphs without attributes previously performed an extra iteration of + WL at initialisation, which was not visible in the output of this + function. This hash value is now included in the returned dictionary, + shifting the other calculated hashes one position to the right. To + obtain the same last subgraph hash, increase the number of iterations + by one. + For more details, see `issue #7806 + `_. + + Dictionary keys are nodes in `G`, and values are a list of hashes. + Each hash corresponds to a subgraph rooted at a given node u in `G`. + Lists of subgraph hashes are sorted in increasing order of depth from + their root node, with the hash at index i corresponding to a subgraph + of nodes at most i-hops (i edges) distance from u. Thus, each list will contain + `iterations` elements - a hash for a subgraph at each depth. If + `include_initial_labels` is set to `True`, each list will additionally + have contain a hash of the initial node label (or equivalently a + subgraph of depth 0) prepended, totalling ``iterations + 1`` elements. + + The function iteratively aggregates and hashes neighborhoods of each node. + This is achieved for each step by replacing for each node its label from + the previous iteration with its hashed 1-hop neighborhood aggregate. + The new node label is then appended to a list of node labels for each + node. + + To aggregate neighborhoods for a node $u$ at each step, all labels of + nodes adjacent to $u$ are concatenated. If the `edge_attr` parameter is set, + labels for each neighboring node are prefixed with the value of this attribute + along the connecting edge from this neighbor to node $u$. The resulting string + is then hashed to compress this information into a fixed digest size. + + Thus, at the i-th iteration, nodes within i hops influence any given + hashed node label. We can therefore say that at depth $i$ for node $u$ + we have a hash for a subgraph induced by the i-hop neighborhood of $u$. + + The output can be used to create general Weisfeiler-Lehman graph kernels, + or generate features for graphs or nodes - for example to generate 'words' in + a graph as seen in the 'graph2vec' algorithm. + See [1]_ & [2]_ respectively for details. + + Hashes are identical for isomorphic subgraphs and there exist strong + guarantees that non-isomorphic graphs will get different hashes. + See [1]_ for details. + + If no node or edge attributes are provided, the degree of each node + is used as its initial label. + Otherwise, node and/or edge labels are used to compute the hash. + + Parameters + ---------- + G : graph + The graph to be hashed. + Can have node and/or edge attributes. Can also have no attributes. + edge_attr : string, optional (default=None) + The key in edge attribute dictionary to be used for hashing. + If None, edge labels are ignored. + node_attr : string, optional (default=None) + The key in node attribute dictionary to be used for hashing. + If None, and no edge_attr given, use the degrees of the nodes as labels. + If None, and edge_attr is given, each node starts with an identical label. + iterations : int, optional (default=3) + Number of neighbor aggregations to perform. + Should be larger for larger graphs. + digest_size : int, optional (default=16) + Size (in bytes) of blake2b hash digest to use for hashing node labels. + The default size is 16 bytes. + include_initial_labels : bool, optional (default=False) + If True, include the hashed initial node label as the first subgraph + hash for each node. + + Returns + ------- + node_subgraph_hashes : dict + A dictionary with each key given by a node in G, and each value given + by the subgraph hashes in order of depth from the key node. + Hashes are hexadecimal strings (hence ``2 * digest_size`` long). + + + Raises + ------ + ValueError + If `iterations` is not a positve number. + + Examples + -------- + Finding similar nodes in different graphs: + + >>> G1 = nx.Graph() + >>> G1.add_edges_from([(1, 2), (2, 3), (2, 4), (3, 5), (4, 6), (5, 7), (6, 7)]) + >>> G2 = nx.Graph() + >>> G2.add_edges_from([(1, 3), (2, 3), (1, 6), (1, 5), (4, 6)]) + >>> g1_hashes = nx.weisfeiler_lehman_subgraph_hashes( + ... G1, iterations=4, digest_size=8 + ... ) + >>> g2_hashes = nx.weisfeiler_lehman_subgraph_hashes( + ... G2, iterations=4, digest_size=8 + ... ) + + Even though G1 and G2 are not isomorphic (they have different numbers of edges), + the hash sequence of depth 3 for node 1 in G1 and node 5 in G2 are similar: + + >>> g1_hashes[1] + ['f6fc42039fba3776', 'a93b64973cfc8897', 'db1b43ae35a1878f', '57872a7d2059c1c0'] + >>> g2_hashes[5] + ['f6fc42039fba3776', 'a93b64973cfc8897', 'db1b43ae35a1878f', '1716d2a4012fa4bc'] + + The first 3 WL subgraph hashes match. From this we can conclude that it's very + likely the neighborhood of 3 hops around these nodes are isomorphic. + + However the 4-hop neighborhoods of ``G1`` and ``G2`` are not isomorphic since the + 4th hashes in the lists above are not equal. + + These nodes may be candidates to be classified together since their local topology + is similar. + + Notes + ----- + To hash the full graph when subgraph hashes are not needed, use + `weisfeiler_lehman_graph_hash` for efficiency. + + Similarity between hashes does not imply similarity between graphs. + + References + ---------- + .. [1] Shervashidze, Nino, Pascal Schweitzer, Erik Jan Van Leeuwen, + Kurt Mehlhorn, and Karsten M. Borgwardt. Weisfeiler Lehman + Graph Kernels. Journal of Machine Learning Research. 2011. + http://www.jmlr.org/papers/volume12/shervashidze11a/shervashidze11a.pdf + .. [2] Annamalai Narayanan, Mahinthan Chandramohan, Rajasekar Venkatesan, + Lihui Chen, Yang Liu and Shantanu Jaiswa. graph2vec: Learning + Distributed Representations of Graphs. arXiv. 2017 + https://arxiv.org/pdf/1707.05005.pdf + + See also + -------- + weisfeiler_lehman_graph_hash + """ + + if G.is_directed(): + _neighborhood_aggregate = _neighborhood_aggregate_directed + warnings.warn( + "The hashes produced for directed graphs changed in v3.5" + " due to a bugfix (see documentation).", + UserWarning, + stacklevel=2, + ) + else: + _neighborhood_aggregate = _neighborhood_aggregate_undirected + + def weisfeiler_lehman_step(G, labels, node_subgraph_hashes, edge_attr=None): + """ + Apply neighborhood aggregation to each node + in the graph. + Computes a dictionary with labels for each node. + Appends the new hashed label to the dictionary of subgraph hashes + originating from and indexed by each node in G + """ + new_labels = {} + for node in G.nodes(): + label = _neighborhood_aggregate(G, node, labels, edge_attr=edge_attr) + hashed_label = _hash_label(label, digest_size) + new_labels[node] = hashed_label + node_subgraph_hashes[node].append(hashed_label) + return new_labels + + if iterations <= 0: + raise ValueError("The WL algorithm requires that `iterations` be positive") + + node_labels = _init_node_labels(G, edge_attr, node_attr) + + if include_initial_labels: + node_subgraph_hashes = { + k: [_hash_label(v, digest_size)] for k, v in node_labels.items() + } + else: + node_subgraph_hashes = defaultdict(list) + + # If the graph has no attributes, initial labels are the nodes' degrees. + # This is equivalent to doing the first iterations of WL. + if not edge_attr and not node_attr: + iterations -= 1 + for node in G.nodes(): + hashed_label = _hash_label(node_labels[node], digest_size) + node_subgraph_hashes[node].append(hashed_label) + + for _ in range(iterations): + node_labels = weisfeiler_lehman_step( + G, node_labels, node_subgraph_hashes, edge_attr + ) + + return dict(node_subgraph_hashes) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/graphical.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/graphical.py new file mode 100644 index 0000000..d5d82de --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/graphical.py @@ -0,0 +1,483 @@ +"""Test sequences for graphiness.""" + +import heapq + +import networkx as nx + +__all__ = [ + "is_graphical", + "is_multigraphical", + "is_pseudographical", + "is_digraphical", + "is_valid_degree_sequence_erdos_gallai", + "is_valid_degree_sequence_havel_hakimi", +] + + +@nx._dispatchable(graphs=None) +def is_graphical(sequence, method="eg"): + """Returns True if sequence is a valid degree sequence. + + A degree sequence is valid if some graph can realize it. + + Parameters + ---------- + sequence : list or iterable container + A sequence of integer node degrees + + method : "eg" | "hh" (default: 'eg') + The method used to validate the degree sequence. + "eg" corresponds to the Erdős-Gallai algorithm + [EG1960]_, [choudum1986]_, and + "hh" to the Havel-Hakimi algorithm + [havel1955]_, [hakimi1962]_, [CL1996]_. + + Returns + ------- + valid : bool + True if the sequence is a valid degree sequence and False if not. + + Examples + -------- + >>> G = nx.path_graph(4) + >>> sequence = (d for n, d in G.degree()) + >>> nx.is_graphical(sequence) + True + + To test a non-graphical sequence: + >>> sequence_list = [d for n, d in G.degree()] + >>> sequence_list[-1] += 1 + >>> nx.is_graphical(sequence_list) + False + + References + ---------- + .. [EG1960] Erdős and Gallai, Mat. Lapok 11 264, 1960. + .. [choudum1986] S.A. Choudum. "A simple proof of the Erdős-Gallai theorem on + graph sequences." Bulletin of the Australian Mathematical Society, 33, + pp 67-70, 1986. https://doi.org/10.1017/S0004972700002872 + .. [havel1955] Havel, V. "A Remark on the Existence of Finite Graphs" + Casopis Pest. Mat. 80, 477-480, 1955. + .. [hakimi1962] Hakimi, S. "On the Realizability of a Set of Integers as + Degrees of the Vertices of a Graph." SIAM J. Appl. Math. 10, 496-506, 1962. + .. [CL1996] G. Chartrand and L. Lesniak, "Graphs and Digraphs", + Chapman and Hall/CRC, 1996. + """ + if method == "eg": + valid = is_valid_degree_sequence_erdos_gallai(list(sequence)) + elif method == "hh": + valid = is_valid_degree_sequence_havel_hakimi(list(sequence)) + else: + msg = "`method` must be 'eg' or 'hh'" + raise nx.NetworkXException(msg) + return valid + + +def _basic_graphical_tests(deg_sequence): + # Sort and perform some simple tests on the sequence + deg_sequence = nx.utils.make_list_of_ints(deg_sequence) + p = len(deg_sequence) + num_degs = [0] * p + dmax, dmin, dsum, n = 0, p, 0, 0 + for d in deg_sequence: + # Reject if degree is negative or larger than the sequence length + if d < 0 or d >= p: + raise nx.NetworkXUnfeasible + # Process only the non-zero integers + elif d > 0: + dmax, dmin, dsum, n = max(dmax, d), min(dmin, d), dsum + d, n + 1 + num_degs[d] += 1 + # Reject sequence if it has odd sum or is oversaturated + if dsum % 2 or dsum > n * (n - 1): + raise nx.NetworkXUnfeasible + return dmax, dmin, dsum, n, num_degs + + +@nx._dispatchable(graphs=None) +def is_valid_degree_sequence_havel_hakimi(deg_sequence): + r"""Returns True if deg_sequence can be realized by a simple graph. + + The validation proceeds using the Havel-Hakimi theorem + [havel1955]_, [hakimi1962]_, [CL1996]_. + Worst-case run time is $O(s)$ where $s$ is the sum of the sequence. + + Parameters + ---------- + deg_sequence : list + A list of integers where each element specifies the degree of a node + in a graph. + + Returns + ------- + valid : bool + True if deg_sequence is graphical and False if not. + + Examples + -------- + >>> G = nx.Graph([(1, 2), (1, 3), (2, 3), (3, 4), (4, 2), (5, 1), (5, 4)]) + >>> sequence = (d for _, d in G.degree()) + >>> nx.is_valid_degree_sequence_havel_hakimi(sequence) + True + + To test a non-valid sequence: + >>> sequence_list = [d for _, d in G.degree()] + >>> sequence_list[-1] += 1 + >>> nx.is_valid_degree_sequence_havel_hakimi(sequence_list) + False + + Notes + ----- + The ZZ condition says that for the sequence d if + + .. math:: + |d| >= \frac{(\max(d) + \min(d) + 1)^2}{4*\min(d)} + + then d is graphical. This was shown in Theorem 6 in [1]_. + + References + ---------- + .. [1] I.E. Zverovich and V.E. Zverovich. "Contributions to the theory + of graphic sequences", Discrete Mathematics, 105, pp. 292-303 (1992). + .. [havel1955] Havel, V. "A Remark on the Existence of Finite Graphs" + Casopis Pest. Mat. 80, 477-480, 1955. + .. [hakimi1962] Hakimi, S. "On the Realizability of a Set of Integers as + Degrees of the Vertices of a Graph." SIAM J. Appl. Math. 10, 496-506, 1962. + .. [CL1996] G. Chartrand and L. Lesniak, "Graphs and Digraphs", + Chapman and Hall/CRC, 1996. + """ + try: + dmax, dmin, dsum, n, num_degs = _basic_graphical_tests(deg_sequence) + except nx.NetworkXUnfeasible: + return False + # Accept if sequence has no non-zero degrees or passes the ZZ condition + if n == 0 or 4 * dmin * n >= (dmax + dmin + 1) * (dmax + dmin + 1): + return True + + modstubs = [0] * (dmax + 1) + # Successively reduce degree sequence by removing the maximum degree + while n > 0: + # Retrieve the maximum degree in the sequence + while num_degs[dmax] == 0: + dmax -= 1 + # If there are not enough stubs to connect to, then the sequence is + # not graphical + if dmax > n - 1: + return False + + # Remove largest stub in list + num_degs[dmax], n = num_degs[dmax] - 1, n - 1 + # Reduce the next dmax largest stubs + mslen = 0 + k = dmax + for i in range(dmax): + while num_degs[k] == 0: + k -= 1 + num_degs[k], n = num_degs[k] - 1, n - 1 + if k > 1: + modstubs[mslen] = k - 1 + mslen += 1 + # Add back to the list any non-zero stubs that were removed + for i in range(mslen): + stub = modstubs[i] + num_degs[stub], n = num_degs[stub] + 1, n + 1 + return True + + +@nx._dispatchable(graphs=None) +def is_valid_degree_sequence_erdos_gallai(deg_sequence): + r"""Returns True if deg_sequence can be realized by a simple graph. + + The validation is done using the Erdős-Gallai theorem [EG1960]_. + + Parameters + ---------- + deg_sequence : list + A list of integers + + Returns + ------- + valid : bool + True if deg_sequence is graphical and False if not. + + Examples + -------- + >>> G = nx.Graph([(1, 2), (1, 3), (2, 3), (3, 4), (4, 2), (5, 1), (5, 4)]) + >>> sequence = (d for _, d in G.degree()) + >>> nx.is_valid_degree_sequence_erdos_gallai(sequence) + True + + To test a non-valid sequence: + >>> sequence_list = [d for _, d in G.degree()] + >>> sequence_list[-1] += 1 + >>> nx.is_valid_degree_sequence_erdos_gallai(sequence_list) + False + + Notes + ----- + + This implementation uses an equivalent form of the Erdős-Gallai criterion. + Worst-case run time is $O(n)$ where $n$ is the length of the sequence. + + Specifically, a sequence d is graphical if and only if the + sum of the sequence is even and for all strong indices k in the sequence, + + .. math:: + + \sum_{i=1}^{k} d_i \leq k(k-1) + \sum_{j=k+1}^{n} \min(d_i,k) + = k(n-1) - ( k \sum_{j=0}^{k-1} n_j - \sum_{j=0}^{k-1} j n_j ) + + A strong index k is any index where d_k >= k and the value n_j is the + number of occurrences of j in d. The maximal strong index is called the + Durfee index. + + This particular rearrangement comes from the proof of Theorem 3 in [2]_. + + The ZZ condition says that for the sequence d if + + .. math:: + |d| >= \frac{(\max(d) + \min(d) + 1)^2}{4*\min(d)} + + then d is graphical. This was shown in Theorem 6 in [2]_. + + References + ---------- + .. [1] A. Tripathi and S. Vijay. "A note on a theorem of Erdős & Gallai", + Discrete Mathematics, 265, pp. 417-420 (2003). + .. [2] I.E. Zverovich and V.E. Zverovich. "Contributions to the theory + of graphic sequences", Discrete Mathematics, 105, pp. 292-303 (1992). + .. [EG1960] Erdős and Gallai, Mat. Lapok 11 264, 1960. + """ + try: + dmax, dmin, dsum, n, num_degs = _basic_graphical_tests(deg_sequence) + except nx.NetworkXUnfeasible: + return False + # Accept if sequence has no non-zero degrees or passes the ZZ condition + if n == 0 or 4 * dmin * n >= (dmax + dmin + 1) * (dmax + dmin + 1): + return True + + # Perform the EG checks using the reformulation of Zverovich and Zverovich + k, sum_deg, sum_nj, sum_jnj = 0, 0, 0, 0 + for dk in range(dmax, dmin - 1, -1): + if dk < k + 1: # Check if already past Durfee index + return True + if num_degs[dk] > 0: + run_size = num_degs[dk] # Process a run of identical-valued degrees + if dk < k + run_size: # Check if end of run is past Durfee index + run_size = dk - k # Adjust back to Durfee index + sum_deg += run_size * dk + for v in range(run_size): + sum_nj += num_degs[k + v] + sum_jnj += (k + v) * num_degs[k + v] + k += run_size + if sum_deg > k * (n - 1) - k * sum_nj + sum_jnj: + return False + return True + + +@nx._dispatchable(graphs=None) +def is_multigraphical(sequence): + """Returns True if some multigraph can realize the sequence. + + Parameters + ---------- + sequence : list + A list of integers + + Returns + ------- + valid : bool + True if deg_sequence is a multigraphic degree sequence and False if not. + + Examples + -------- + >>> G = nx.MultiGraph([(1, 2), (1, 3), (2, 3), (3, 4), (4, 2), (5, 1), (5, 4)]) + >>> sequence = (d for _, d in G.degree()) + >>> nx.is_multigraphical(sequence) + True + + To test a non-multigraphical sequence: + >>> sequence_list = [d for _, d in G.degree()] + >>> sequence_list[-1] += 1 + >>> nx.is_multigraphical(sequence_list) + False + + Notes + ----- + The worst-case run time is $O(n)$ where $n$ is the length of the sequence. + + References + ---------- + .. [1] S. L. Hakimi. "On the realizability of a set of integers as + degrees of the vertices of a linear graph", J. SIAM, 10, pp. 496-506 + (1962). + """ + try: + deg_sequence = nx.utils.make_list_of_ints(sequence) + except nx.NetworkXError: + return False + dsum, dmax = 0, 0 + for d in deg_sequence: + if d < 0: + return False + dsum, dmax = dsum + d, max(dmax, d) + if dsum % 2 or dsum < 2 * dmax: + return False + return True + + +@nx._dispatchable(graphs=None) +def is_pseudographical(sequence): + """Returns True if some pseudograph can realize the sequence. + + Every nonnegative integer sequence with an even sum is pseudographical + (see [1]_). + + Parameters + ---------- + sequence : list or iterable container + A sequence of integer node degrees + + Returns + ------- + valid : bool + True if the sequence is a pseudographic degree sequence and False if not. + + Examples + -------- + >>> G = nx.Graph([(1, 2), (1, 3), (2, 3), (3, 4), (4, 2), (5, 1), (5, 4)]) + >>> sequence = (d for _, d in G.degree()) + >>> nx.is_pseudographical(sequence) + True + + To test a non-pseudographical sequence: + >>> sequence_list = [d for _, d in G.degree()] + >>> sequence_list[-1] += 1 + >>> nx.is_pseudographical(sequence_list) + False + + Notes + ----- + The worst-case run time is $O(n)$ where n is the length of the sequence. + + References + ---------- + .. [1] F. Boesch and F. Harary. "Line removal algorithms for graphs + and their degree lists", IEEE Trans. Circuits and Systems, CAS-23(12), + pp. 778-782 (1976). + """ + try: + deg_sequence = nx.utils.make_list_of_ints(sequence) + except nx.NetworkXError: + return False + return sum(deg_sequence) % 2 == 0 and min(deg_sequence) >= 0 + + +@nx._dispatchable(graphs=None) +def is_digraphical(in_sequence, out_sequence): + r"""Returns True if some directed graph can realize the in- and out-degree + sequences. + + Parameters + ---------- + in_sequence : list or iterable container + A sequence of integer node in-degrees + + out_sequence : list or iterable container + A sequence of integer node out-degrees + + Returns + ------- + valid : bool + True if in and out-sequences are digraphic False if not. + + Examples + -------- + >>> G = nx.DiGraph([(1, 2), (1, 3), (2, 3), (3, 4), (4, 2), (5, 1), (5, 4)]) + >>> in_seq = (d for n, d in G.in_degree()) + >>> out_seq = (d for n, d in G.out_degree()) + >>> nx.is_digraphical(in_seq, out_seq) + True + + To test a non-digraphical scenario: + >>> in_seq_list = [d for n, d in G.in_degree()] + >>> in_seq_list[-1] += 1 + >>> nx.is_digraphical(in_seq_list, out_seq) + False + + Notes + ----- + This algorithm is from Kleitman and Wang [1]_. + The worst case runtime is $O(s \times \log n)$ where $s$ and $n$ are the + sum and length of the sequences respectively. + + References + ---------- + .. [1] D.J. Kleitman and D.L. Wang + Algorithms for Constructing Graphs and Digraphs with Given Valences + and Factors, Discrete Mathematics, 6(1), pp. 79-88 (1973) + """ + try: + in_deg_sequence = nx.utils.make_list_of_ints(in_sequence) + out_deg_sequence = nx.utils.make_list_of_ints(out_sequence) + except nx.NetworkXError: + return False + # Process the sequences and form two heaps to store degree pairs with + # either zero or non-zero out degrees + sumin, sumout, nin, nout = 0, 0, len(in_deg_sequence), len(out_deg_sequence) + maxn = max(nin, nout) + maxin = 0 + if maxn == 0: + return True + stubheap, zeroheap = [], [] + for n in range(maxn): + in_deg, out_deg = 0, 0 + if n < nout: + out_deg = out_deg_sequence[n] + if n < nin: + in_deg = in_deg_sequence[n] + if in_deg < 0 or out_deg < 0: + return False + sumin, sumout, maxin = sumin + in_deg, sumout + out_deg, max(maxin, in_deg) + if in_deg > 0: + stubheap.append((-1 * out_deg, -1 * in_deg)) + elif out_deg > 0: + zeroheap.append(-1 * out_deg) + if sumin != sumout: + return False + heapq.heapify(stubheap) + heapq.heapify(zeroheap) + + modstubs = [(0, 0)] * (maxin + 1) + # Successively reduce degree sequence by removing the maximum out degree + while stubheap: + # Take the first value in the sequence with non-zero in degree + (freeout, freein) = heapq.heappop(stubheap) + freein *= -1 + if freein > len(stubheap) + len(zeroheap): + return False + + # Attach out stubs to the nodes with the most in stubs + mslen = 0 + for i in range(freein): + if zeroheap and (not stubheap or stubheap[0][0] > zeroheap[0]): + stubout = heapq.heappop(zeroheap) + stubin = 0 + else: + (stubout, stubin) = heapq.heappop(stubheap) + if stubout == 0: + return False + # Check if target is now totally connected + if stubout + 1 < 0 or stubin < 0: + modstubs[mslen] = (stubout + 1, stubin) + mslen += 1 + + # Add back the nodes to the heap that still have available stubs + for i in range(mslen): + stub = modstubs[i] + if stub[1] < 0: + heapq.heappush(stubheap, stub) + else: + heapq.heappush(zeroheap, stub[0]) + if freeout < 0: + heapq.heappush(zeroheap, freeout) + return True diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/hierarchy.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/hierarchy.py new file mode 100644 index 0000000..d5a0552 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/hierarchy.py @@ -0,0 +1,57 @@ +""" +Flow Hierarchy. +""" + +import networkx as nx + +__all__ = ["flow_hierarchy"] + + +@nx._dispatchable(edge_attrs="weight") +def flow_hierarchy(G, weight=None): + """Returns the flow hierarchy of a directed network. + + Flow hierarchy is defined as the fraction of edges not participating + in cycles in a directed graph [1]_. + + Parameters + ---------- + G : DiGraph or MultiDiGraph + A directed graph + + weight : string, optional (default=None) + Attribute to use for edge weights. If None the weight defaults to 1. + + Returns + ------- + h : float + Flow hierarchy value + + Raises + ------ + NetworkXError + If `G` is not a directed graph or if `G` has no edges. + + Notes + ----- + The algorithm described in [1]_ computes the flow hierarchy through + exponentiation of the adjacency matrix. This function implements an + alternative approach that finds strongly connected components. + An edge is in a cycle if and only if it is in a strongly connected + component, which can be found in $O(m)$ time using Tarjan's algorithm. + + References + ---------- + .. [1] Luo, J.; Magee, C.L. (2011), + Detecting evolving patterns of self-organizing networks by flow + hierarchy measurement, Complexity, Volume 16 Issue 6 53-61. + DOI: 10.1002/cplx.20368 + http://web.mit.edu/~cmagee/www/documents/28-DetectingEvolvingPatterns_FlowHierarchy.pdf + """ + # corner case: G has no edges + if nx.is_empty(G): + raise nx.NetworkXError("flow_hierarchy not applicable to empty graphs") + if not G.is_directed(): + raise nx.NetworkXError("G must be a digraph in flow_hierarchy") + scc = nx.strongly_connected_components(G) + return 1 - sum(G.subgraph(c).size(weight) for c in scc) / G.size(weight) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/hybrid.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/hybrid.py new file mode 100644 index 0000000..9d3dd30 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/hybrid.py @@ -0,0 +1,196 @@ +""" +Provides functions for finding and testing for locally `(k, l)`-connected +graphs. + +""" + +import copy + +import networkx as nx + +__all__ = ["kl_connected_subgraph", "is_kl_connected"] + + +@nx._dispatchable(returns_graph=True) +def kl_connected_subgraph(G, k, l, low_memory=False, same_as_graph=False): + """Returns the maximum locally `(k, l)`-connected subgraph of `G`. + + A graph is locally `(k, l)`-connected if for each edge `(u, v)` in the + graph there are at least `l` edge-disjoint paths of length at most `k` + joining `u` to `v`. + + Parameters + ---------- + G : NetworkX graph + The graph in which to find a maximum locally `(k, l)`-connected + subgraph. + + k : integer + The maximum length of paths to consider. A higher number means a looser + connectivity requirement. + + l : integer + The number of edge-disjoint paths. A higher number means a stricter + connectivity requirement. + + low_memory : bool + If this is True, this function uses an algorithm that uses slightly + more time but less memory. + + same_as_graph : bool + If True then return a tuple of the form `(H, is_same)`, + where `H` is the maximum locally `(k, l)`-connected subgraph and + `is_same` is a Boolean representing whether `G` is locally `(k, + l)`-connected (and hence, whether `H` is simply a copy of the input + graph `G`). + + Returns + ------- + NetworkX graph or two-tuple + If `same_as_graph` is True, then this function returns a + two-tuple as described above. Otherwise, it returns only the maximum + locally `(k, l)`-connected subgraph. + + See also + -------- + is_kl_connected + + References + ---------- + .. [1] Chung, Fan and Linyuan Lu. "The Small World Phenomenon in Hybrid + Power Law Graphs." *Complex Networks*. Springer Berlin Heidelberg, + 2004. 89--104. + + """ + H = copy.deepcopy(G) # subgraph we construct by removing from G + + graphOK = True + deleted_some = True # hack to start off the while loop + while deleted_some: + deleted_some = False + # We use `for edge in list(H.edges()):` instead of + # `for edge in H.edges():` because we edit the graph `H` in + # the loop. Hence using an iterator will result in + # `RuntimeError: dictionary changed size during iteration` + for edge in list(H.edges()): + (u, v) = edge + # Get copy of graph needed for this search + if low_memory: + verts = {u, v} + for i in range(k): + for w in verts.copy(): + verts.update(G[w]) + G2 = G.subgraph(verts).copy() + else: + G2 = copy.deepcopy(G) + ### + path = [u, v] + cnt = 0 + accept = 0 + while path: + cnt += 1 # Found a path + if cnt >= l: + accept = 1 + break + # record edges along this graph + prev = u + for w in path: + if prev != w: + G2.remove_edge(prev, w) + prev = w + # path = shortest_path(G2, u, v, k) # ??? should "Cutoff" be k+1? + try: + path = nx.shortest_path(G2, u, v) # ??? should "Cutoff" be k+1? + except nx.NetworkXNoPath: + path = False + # No Other Paths + if accept == 0: + H.remove_edge(u, v) + deleted_some = True + if graphOK: + graphOK = False + # We looked through all edges and removed none of them. + # So, H is the maximal (k,l)-connected subgraph of G + if same_as_graph: + return (H, graphOK) + return H + + +@nx._dispatchable +def is_kl_connected(G, k, l, low_memory=False): + """Returns True if and only if `G` is locally `(k, l)`-connected. + + A graph is locally `(k, l)`-connected if for each edge `(u, v)` in the + graph there are at least `l` edge-disjoint paths of length at most `k` + joining `u` to `v`. + + Parameters + ---------- + G : NetworkX graph + The graph to test for local `(k, l)`-connectedness. + + k : integer + The maximum length of paths to consider. A higher number means a looser + connectivity requirement. + + l : integer + The number of edge-disjoint paths. A higher number means a stricter + connectivity requirement. + + low_memory : bool + If this is True, this function uses an algorithm that uses slightly + more time but less memory. + + Returns + ------- + bool + Whether the graph is locally `(k, l)`-connected subgraph. + + See also + -------- + kl_connected_subgraph + + References + ---------- + .. [1] Chung, Fan and Linyuan Lu. "The Small World Phenomenon in Hybrid + Power Law Graphs." *Complex Networks*. Springer Berlin Heidelberg, + 2004. 89--104. + + """ + graphOK = True + for edge in G.edges(): + (u, v) = edge + # Get copy of graph needed for this search + if low_memory: + verts = {u, v} + for i in range(k): + [verts.update(G.neighbors(w)) for w in verts.copy()] + G2 = G.subgraph(verts) + else: + G2 = copy.deepcopy(G) + ### + path = [u, v] + cnt = 0 + accept = 0 + while path: + cnt += 1 # Found a path + if cnt >= l: + accept = 1 + break + # record edges along this graph + prev = u + for w in path: + if w != prev: + G2.remove_edge(prev, w) + prev = w + # path = shortest_path(G2, u, v, k) # ??? should "Cutoff" be k+1? + try: + path = nx.shortest_path(G2, u, v) # ??? should "Cutoff" be k+1? + except nx.NetworkXNoPath: + path = False + # No Other Paths + if accept == 0: + graphOK = False + break + # return status + return graphOK diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/isolate.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/isolate.py new file mode 100644 index 0000000..1ea8abe --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/isolate.py @@ -0,0 +1,107 @@ +""" +Functions for identifying isolate (degree zero) nodes. +""" + +import networkx as nx + +__all__ = ["is_isolate", "isolates", "number_of_isolates"] + + +@nx._dispatchable +def is_isolate(G, n): + """Determines whether a node is an isolate. + + An *isolate* is a node with no neighbors (that is, with degree + zero). For directed graphs, this means no in-neighbors and no + out-neighbors. + + Parameters + ---------- + G : NetworkX graph + + n : node + A node in `G`. + + Returns + ------- + is_isolate : bool + True if and only if `n` has no neighbors. + + Examples + -------- + >>> G = nx.Graph() + >>> G.add_edge(1, 2) + >>> G.add_node(3) + >>> nx.is_isolate(G, 2) + False + >>> nx.is_isolate(G, 3) + True + """ + return G.degree(n) == 0 + + +@nx._dispatchable +def isolates(G): + """Iterator over isolates in the graph. + + An *isolate* is a node with no neighbors (that is, with degree + zero). For directed graphs, this means no in-neighbors and no + out-neighbors. + + Parameters + ---------- + G : NetworkX graph + + Returns + ------- + iterator + An iterator over the isolates of `G`. + + Examples + -------- + To get a list of all isolates of a graph, use the :class:`list` + constructor:: + + >>> G = nx.Graph() + >>> G.add_edge(1, 2) + >>> G.add_node(3) + >>> list(nx.isolates(G)) + [3] + + To remove all isolates in the graph, first create a list of the + isolates, then use :meth:`Graph.remove_nodes_from`:: + + >>> G.remove_nodes_from(list(nx.isolates(G))) + >>> list(G) + [1, 2] + + For digraphs, isolates have zero in-degree and zero out_degre:: + + >>> G = nx.DiGraph([(0, 1), (1, 2)]) + >>> G.add_node(3) + >>> list(nx.isolates(G)) + [3] + + """ + return (n for n, d in G.degree() if d == 0) + + +@nx._dispatchable +def number_of_isolates(G): + """Returns the number of isolates in the graph. + + An *isolate* is a node with no neighbors (that is, with degree + zero). For directed graphs, this means no in-neighbors and no + out-neighbors. + + Parameters + ---------- + G : NetworkX graph + + Returns + ------- + int + The number of degree zero nodes in the graph `G`. + + """ + return sum(1 for v in isolates(G)) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/__init__.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/__init__.py new file mode 100644 index 0000000..58c2268 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/__init__.py @@ -0,0 +1,7 @@ +from networkx.algorithms.isomorphism.isomorph import * +from networkx.algorithms.isomorphism.vf2userfunc import * +from networkx.algorithms.isomorphism.matchhelpers import * +from networkx.algorithms.isomorphism.temporalisomorphvf2 import * +from networkx.algorithms.isomorphism.ismags import * +from networkx.algorithms.isomorphism.tree_isomorphism import * +from networkx.algorithms.isomorphism.vf2pp import * diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..d6f0ec7 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/__pycache__/ismags.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/__pycache__/ismags.cpython-312.pyc new file mode 100644 index 0000000..f815ef4 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/__pycache__/ismags.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/__pycache__/isomorph.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/__pycache__/isomorph.cpython-312.pyc new file mode 100644 index 0000000..eeb2497 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/__pycache__/isomorph.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/__pycache__/isomorphvf2.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/__pycache__/isomorphvf2.cpython-312.pyc new file mode 100644 index 0000000..cb8fb10 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/__pycache__/isomorphvf2.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/__pycache__/matchhelpers.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/__pycache__/matchhelpers.cpython-312.pyc new file mode 100644 index 0000000..7f4fd19 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/__pycache__/matchhelpers.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/__pycache__/temporalisomorphvf2.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/__pycache__/temporalisomorphvf2.cpython-312.pyc new file mode 100644 index 0000000..795b2f2 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/__pycache__/temporalisomorphvf2.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/__pycache__/tree_isomorphism.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/__pycache__/tree_isomorphism.cpython-312.pyc new file mode 100644 index 0000000..eee53ec Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/__pycache__/tree_isomorphism.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/__pycache__/vf2pp.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/__pycache__/vf2pp.cpython-312.pyc new file mode 100644 index 0000000..2af9ef0 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/__pycache__/vf2pp.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/__pycache__/vf2userfunc.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/__pycache__/vf2userfunc.cpython-312.pyc new file mode 100644 index 0000000..6c7903a Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/__pycache__/vf2userfunc.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/ismags.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/ismags.py new file mode 100644 index 0000000..0387dbe --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/ismags.py @@ -0,0 +1,1174 @@ +""" +ISMAGS Algorithm +================ + +Provides a Python implementation of the ISMAGS algorithm. [1]_ + +It is capable of finding (subgraph) isomorphisms between two graphs, taking the +symmetry of the subgraph into account. In most cases the VF2 algorithm is +faster (at least on small graphs) than this implementation, but in some cases +there is an exponential number of isomorphisms that are symmetrically +equivalent. In that case, the ISMAGS algorithm will provide only one solution +per symmetry group. + +>>> petersen = nx.petersen_graph() +>>> ismags = nx.isomorphism.ISMAGS(petersen, petersen) +>>> isomorphisms = list(ismags.isomorphisms_iter(symmetry=False)) +>>> len(isomorphisms) +120 +>>> isomorphisms = list(ismags.isomorphisms_iter(symmetry=True)) +>>> answer = [{0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8, 9: 9}] +>>> answer == isomorphisms +True + +In addition, this implementation also provides an interface to find the +largest common induced subgraph [2]_ between any two graphs, again taking +symmetry into account. Given `graph` and `subgraph` the algorithm will remove +nodes from the `subgraph` until `subgraph` is isomorphic to a subgraph of +`graph`. Since only the symmetry of `subgraph` is taken into account it is +worth thinking about how you provide your graphs: + +>>> graph1 = nx.path_graph(4) +>>> graph2 = nx.star_graph(3) +>>> ismags = nx.isomorphism.ISMAGS(graph1, graph2) +>>> ismags.is_isomorphic() +False +>>> largest_common_subgraph = list(ismags.largest_common_subgraph()) +>>> answer = [{1: 0, 0: 1, 2: 2}, {2: 0, 1: 1, 3: 2}] +>>> answer == largest_common_subgraph +True +>>> ismags2 = nx.isomorphism.ISMAGS(graph2, graph1) +>>> largest_common_subgraph = list(ismags2.largest_common_subgraph()) +>>> answer = [ +... {1: 0, 0: 1, 2: 2}, +... {1: 0, 0: 1, 3: 2}, +... {2: 0, 0: 1, 1: 2}, +... {2: 0, 0: 1, 3: 2}, +... {3: 0, 0: 1, 1: 2}, +... {3: 0, 0: 1, 2: 2}, +... ] +>>> answer == largest_common_subgraph +True + +However, when not taking symmetry into account, it doesn't matter: + +>>> largest_common_subgraph = list(ismags.largest_common_subgraph(symmetry=False)) +>>> answer = [ +... {1: 0, 0: 1, 2: 2}, +... {1: 0, 2: 1, 0: 2}, +... {2: 0, 1: 1, 3: 2}, +... {2: 0, 3: 1, 1: 2}, +... {1: 0, 0: 1, 2: 3}, +... {1: 0, 2: 1, 0: 3}, +... {2: 0, 1: 1, 3: 3}, +... {2: 0, 3: 1, 1: 3}, +... {1: 0, 0: 2, 2: 3}, +... {1: 0, 2: 2, 0: 3}, +... {2: 0, 1: 2, 3: 3}, +... {2: 0, 3: 2, 1: 3}, +... ] +>>> answer == largest_common_subgraph +True +>>> largest_common_subgraph = list(ismags2.largest_common_subgraph(symmetry=False)) +>>> answer = [ +... {1: 0, 0: 1, 2: 2}, +... {1: 0, 0: 1, 3: 2}, +... {2: 0, 0: 1, 1: 2}, +... {2: 0, 0: 1, 3: 2}, +... {3: 0, 0: 1, 1: 2}, +... {3: 0, 0: 1, 2: 2}, +... {1: 1, 0: 2, 2: 3}, +... {1: 1, 0: 2, 3: 3}, +... {2: 1, 0: 2, 1: 3}, +... {2: 1, 0: 2, 3: 3}, +... {3: 1, 0: 2, 1: 3}, +... {3: 1, 0: 2, 2: 3}, +... ] +>>> answer == largest_common_subgraph +True + +Notes +----- +- The current implementation works for undirected graphs only. The algorithm + in general should work for directed graphs as well though. +- Node keys for both provided graphs need to be fully orderable as well as + hashable. +- Node and edge equality is assumed to be transitive: if A is equal to B, and + B is equal to C, then A is equal to C. + +References +---------- +.. [1] M. Houbraken, S. Demeyer, T. Michoel, P. Audenaert, D. Colle, + M. Pickavet, "The Index-Based Subgraph Matching Algorithm with General + Symmetries (ISMAGS): Exploiting Symmetry for Faster Subgraph + Enumeration", PLoS One 9(5): e97896, 2014. + https://doi.org/10.1371/journal.pone.0097896 +.. [2] https://en.wikipedia.org/wiki/Maximum_common_induced_subgraph +""" + +__all__ = ["ISMAGS"] + +import itertools +from collections import Counter, defaultdict +from functools import reduce, wraps + + +def are_all_equal(iterable): + """ + Returns ``True`` if and only if all elements in `iterable` are equal; and + ``False`` otherwise. + + Parameters + ---------- + iterable: collections.abc.Iterable + The container whose elements will be checked. + + Returns + ------- + bool + ``True`` iff all elements in `iterable` compare equal, ``False`` + otherwise. + """ + try: + shape = iterable.shape + except AttributeError: + pass + else: + if len(shape) > 1: + message = "The function does not works on multidimensional arrays." + raise NotImplementedError(message) from None + + iterator = iter(iterable) + first = next(iterator, None) + return all(item == first for item in iterator) + + +def make_partitions(items, test): + """ + Partitions items into sets based on the outcome of ``test(item1, item2)``. + Pairs of items for which `test` returns `True` end up in the same set. + + Parameters + ---------- + items : collections.abc.Iterable[collections.abc.Hashable] + Items to partition + test : collections.abc.Callable[collections.abc.Hashable, collections.abc.Hashable] + A function that will be called with 2 arguments, taken from items. + Should return `True` if those 2 items need to end up in the same + partition, and `False` otherwise. + + Returns + ------- + list[set] + A list of sets, with each set containing part of the items in `items`, + such that ``all(test(*pair) for pair in itertools.combinations(set, 2)) + == True`` + + Notes + ----- + The function `test` is assumed to be transitive: if ``test(a, b)`` and + ``test(b, c)`` return ``True``, then ``test(a, c)`` must also be ``True``. + """ + partitions = [] + for item in items: + for partition in partitions: + p_item = next(iter(partition)) + if test(item, p_item): + partition.add(item) + break + else: # No break + partitions.append({item}) + return partitions + + +def partition_to_color(partitions): + """ + Creates a dictionary that maps each item in each partition to the index of + the partition to which it belongs. + + Parameters + ---------- + partitions: collections.abc.Sequence[collections.abc.Iterable] + As returned by :func:`make_partitions`. + + Returns + ------- + dict + """ + colors = {} + for color, keys in enumerate(partitions): + for key in keys: + colors[key] = color + return colors + + +def intersect(collection_of_sets): + """ + Given an collection of sets, returns the intersection of those sets. + + Parameters + ---------- + collection_of_sets: collections.abc.Collection[set] + A collection of sets. + + Returns + ------- + set + An intersection of all sets in `collection_of_sets`. Will have the same + type as the item initially taken from `collection_of_sets`. + """ + collection_of_sets = list(collection_of_sets) + first = collection_of_sets.pop() + out = reduce(set.intersection, collection_of_sets, set(first)) + return type(first)(out) + + +class ISMAGS: + """ + Implements the ISMAGS subgraph matching algorithm. [1]_ ISMAGS stands for + "Index-based Subgraph Matching Algorithm with General Symmetries". As the + name implies, it is symmetry aware and will only generate non-symmetric + isomorphisms. + + Notes + ----- + The implementation imposes additional conditions compared to the VF2 + algorithm on the graphs provided and the comparison functions + (:attr:`node_equality` and :attr:`edge_equality`): + + - Node keys in both graphs must be orderable as well as hashable. + - Equality must be transitive: if A is equal to B, and B is equal to C, + then A must be equal to C. + + Attributes + ---------- + graph: networkx.Graph + subgraph: networkx.Graph + node_equality: collections.abc.Callable + The function called to see if two nodes should be considered equal. + It's signature looks like this: + ``f(graph1: networkx.Graph, node1, graph2: networkx.Graph, node2) -> bool``. + `node1` is a node in `graph1`, and `node2` a node in `graph2`. + Constructed from the argument `node_match`. + edge_equality: collections.abc.Callable + The function called to see if two edges should be considered equal. + It's signature looks like this: + ``f(graph1: networkx.Graph, edge1, graph2: networkx.Graph, edge2) -> bool``. + `edge1` is an edge in `graph1`, and `edge2` an edge in `graph2`. + Constructed from the argument `edge_match`. + + References + ---------- + .. [1] M. Houbraken, S. Demeyer, T. Michoel, P. Audenaert, D. Colle, + M. Pickavet, "The Index-Based Subgraph Matching Algorithm with General + Symmetries (ISMAGS): Exploiting Symmetry for Faster Subgraph + Enumeration", PLoS One 9(5): e97896, 2014. + https://doi.org/10.1371/journal.pone.0097896 + """ + + def __init__(self, graph, subgraph, node_match=None, edge_match=None, cache=None): + """ + Parameters + ---------- + graph: networkx.Graph + subgraph: networkx.Graph + node_match: collections.abc.Callable or None + Function used to determine whether two nodes are equivalent. Its + signature should look like ``f(n1: dict, n2: dict) -> bool``, with + `n1` and `n2` node property dicts. See also + :func:`~networkx.algorithms.isomorphism.categorical_node_match` and + friends. + If `None`, all nodes are considered equal. + edge_match: collections.abc.Callable or None + Function used to determine whether two edges are equivalent. Its + signature should look like ``f(e1: dict, e2: dict) -> bool``, with + `e1` and `e2` edge property dicts. See also + :func:`~networkx.algorithms.isomorphism.categorical_edge_match` and + friends. + If `None`, all edges are considered equal. + cache: collections.abc.Mapping + A cache used for caching graph symmetries. + """ + # TODO: graph and subgraph setter methods that invalidate the caches. + # TODO: allow for precomputed partitions and colors + self.graph = graph + self.subgraph = subgraph + self._symmetry_cache = cache + # Naming conventions are taken from the original paper. For your + # sanity: + # sg: subgraph + # g: graph + # e: edge(s) + # n: node(s) + # So: sgn means "subgraph nodes". + self._sgn_partitions_ = None + self._sge_partitions_ = None + + self._sgn_colors_ = None + self._sge_colors_ = None + + self._gn_partitions_ = None + self._ge_partitions_ = None + + self._gn_colors_ = None + self._ge_colors_ = None + + self._node_compat_ = None + self._edge_compat_ = None + + if node_match is None: + self.node_equality = self._node_match_maker(lambda n1, n2: True) + self._sgn_partitions_ = [set(self.subgraph.nodes)] + self._gn_partitions_ = [set(self.graph.nodes)] + self._node_compat_ = {0: 0} + else: + self.node_equality = self._node_match_maker(node_match) + if edge_match is None: + self.edge_equality = self._edge_match_maker(lambda e1, e2: True) + self._sge_partitions_ = [set(self.subgraph.edges)] + self._ge_partitions_ = [set(self.graph.edges)] + self._edge_compat_ = {0: 0} + else: + self.edge_equality = self._edge_match_maker(edge_match) + + @property + def _sgn_partitions(self): + if self._sgn_partitions_ is None: + + def nodematch(node1, node2): + return self.node_equality(self.subgraph, node1, self.subgraph, node2) + + self._sgn_partitions_ = make_partitions(self.subgraph.nodes, nodematch) + return self._sgn_partitions_ + + @property + def _sge_partitions(self): + if self._sge_partitions_ is None: + + def edgematch(edge1, edge2): + return self.edge_equality(self.subgraph, edge1, self.subgraph, edge2) + + self._sge_partitions_ = make_partitions(self.subgraph.edges, edgematch) + return self._sge_partitions_ + + @property + def _gn_partitions(self): + if self._gn_partitions_ is None: + + def nodematch(node1, node2): + return self.node_equality(self.graph, node1, self.graph, node2) + + self._gn_partitions_ = make_partitions(self.graph.nodes, nodematch) + return self._gn_partitions_ + + @property + def _ge_partitions(self): + if self._ge_partitions_ is None: + + def edgematch(edge1, edge2): + return self.edge_equality(self.graph, edge1, self.graph, edge2) + + self._ge_partitions_ = make_partitions(self.graph.edges, edgematch) + return self._ge_partitions_ + + @property + def _sgn_colors(self): + if self._sgn_colors_ is None: + self._sgn_colors_ = partition_to_color(self._sgn_partitions) + return self._sgn_colors_ + + @property + def _sge_colors(self): + if self._sge_colors_ is None: + self._sge_colors_ = partition_to_color(self._sge_partitions) + return self._sge_colors_ + + @property + def _gn_colors(self): + if self._gn_colors_ is None: + self._gn_colors_ = partition_to_color(self._gn_partitions) + return self._gn_colors_ + + @property + def _ge_colors(self): + if self._ge_colors_ is None: + self._ge_colors_ = partition_to_color(self._ge_partitions) + return self._ge_colors_ + + @property + def _node_compatibility(self): + if self._node_compat_ is not None: + return self._node_compat_ + self._node_compat_ = {} + for sgn_part_color, gn_part_color in itertools.product( + range(len(self._sgn_partitions)), range(len(self._gn_partitions)) + ): + sgn = next(iter(self._sgn_partitions[sgn_part_color])) + gn = next(iter(self._gn_partitions[gn_part_color])) + if self.node_equality(self.subgraph, sgn, self.graph, gn): + self._node_compat_[sgn_part_color] = gn_part_color + return self._node_compat_ + + @property + def _edge_compatibility(self): + if self._edge_compat_ is not None: + return self._edge_compat_ + self._edge_compat_ = {} + for sge_part_color, ge_part_color in itertools.product( + range(len(self._sge_partitions)), range(len(self._ge_partitions)) + ): + sge = next(iter(self._sge_partitions[sge_part_color])) + ge = next(iter(self._ge_partitions[ge_part_color])) + if self.edge_equality(self.subgraph, sge, self.graph, ge): + self._edge_compat_[sge_part_color] = ge_part_color + return self._edge_compat_ + + @staticmethod + def _node_match_maker(cmp): + @wraps(cmp) + def comparer(graph1, node1, graph2, node2): + return cmp(graph1.nodes[node1], graph2.nodes[node2]) + + return comparer + + @staticmethod + def _edge_match_maker(cmp): + @wraps(cmp) + def comparer(graph1, edge1, graph2, edge2): + return cmp(graph1.edges[edge1], graph2.edges[edge2]) + + return comparer + + def find_isomorphisms(self, symmetry=True): + """Find all subgraph isomorphisms between subgraph and graph + + Finds isomorphisms where :attr:`subgraph` <= :attr:`graph`. + + Parameters + ---------- + symmetry: bool + Whether symmetry should be taken into account. If False, found + isomorphisms may be symmetrically equivalent. + + Yields + ------ + dict + The found isomorphism mappings of {graph_node: subgraph_node}. + """ + # The networkx VF2 algorithm is slightly funny in when it yields an + # empty dict and when not. + if not self.subgraph: + yield {} + return + elif not self.graph: + return + elif len(self.graph) < len(self.subgraph): + return + + if symmetry: + _, cosets = self.analyze_symmetry( + self.subgraph, self._sgn_partitions, self._sge_colors + ) + constraints = self._make_constraints(cosets) + else: + constraints = [] + + candidates = self._find_nodecolor_candidates() + la_candidates = self._get_lookahead_candidates() + for sgn in self.subgraph: + extra_candidates = la_candidates[sgn] + if extra_candidates: + candidates[sgn] = candidates[sgn] | {frozenset(extra_candidates)} + + if any(candidates.values()): + start_sgn = min(candidates, key=lambda n: min(candidates[n], key=len)) + candidates[start_sgn] = (intersect(candidates[start_sgn]),) + yield from self._map_nodes(start_sgn, candidates, constraints) + else: + return + + @staticmethod + def _find_neighbor_color_count(graph, node, node_color, edge_color): + """ + For `node` in `graph`, count the number of edges of a specific color + it has to nodes of a specific color. + """ + counts = Counter() + neighbors = graph[node] + for neighbor in neighbors: + n_color = node_color[neighbor] + if (node, neighbor) in edge_color: + e_color = edge_color[node, neighbor] + else: + e_color = edge_color[neighbor, node] + counts[e_color, n_color] += 1 + return counts + + def _get_lookahead_candidates(self): + """ + Returns a mapping of {subgraph node: collection of graph nodes} for + which the graph nodes are feasible candidates for the subgraph node, as + determined by looking ahead one edge. + """ + g_counts = {} + for gn in self.graph: + g_counts[gn] = self._find_neighbor_color_count( + self.graph, gn, self._gn_colors, self._ge_colors + ) + candidates = defaultdict(set) + for sgn in self.subgraph: + sg_count = self._find_neighbor_color_count( + self.subgraph, sgn, self._sgn_colors, self._sge_colors + ) + new_sg_count = Counter() + for (sge_color, sgn_color), count in sg_count.items(): + try: + ge_color = self._edge_compatibility[sge_color] + gn_color = self._node_compatibility[sgn_color] + except KeyError: + pass + else: + new_sg_count[ge_color, gn_color] = count + + for gn, g_count in g_counts.items(): + if all(new_sg_count[x] <= g_count[x] for x in new_sg_count): + # Valid candidate + candidates[sgn].add(gn) + return candidates + + def largest_common_subgraph(self, symmetry=True): + """ + Find the largest common induced subgraphs between :attr:`subgraph` and + :attr:`graph`. + + Parameters + ---------- + symmetry: bool + Whether symmetry should be taken into account. If False, found + largest common subgraphs may be symmetrically equivalent. + + Yields + ------ + dict + The found isomorphism mappings of {graph_node: subgraph_node}. + """ + # The networkx VF2 algorithm is slightly funny in when it yields an + # empty dict and when not. + if not self.subgraph: + yield {} + return + elif not self.graph: + return + + if symmetry: + _, cosets = self.analyze_symmetry( + self.subgraph, self._sgn_partitions, self._sge_colors + ) + constraints = self._make_constraints(cosets) + else: + constraints = [] + + candidates = self._find_nodecolor_candidates() + + if any(candidates.values()): + yield from self._largest_common_subgraph(candidates, constraints) + else: + return + + def analyze_symmetry(self, graph, node_partitions, edge_colors): + """ + Find a minimal set of permutations and corresponding co-sets that + describe the symmetry of `graph`, given the node and edge equalities + given by `node_partitions` and `edge_colors`, respectively. + + Parameters + ---------- + graph : networkx.Graph + The graph whose symmetry should be analyzed. + node_partitions : list of sets + A list of sets containing node keys. Node keys in the same set + are considered equivalent. Every node key in `graph` should be in + exactly one of the sets. If all nodes are equivalent, this should + be ``[set(graph.nodes)]``. + edge_colors : dict mapping edges to their colors + A dict mapping every edge in `graph` to its corresponding color. + Edges with the same color are considered equivalent. If all edges + are equivalent, this should be ``{e: 0 for e in graph.edges}``. + + + Returns + ------- + set[frozenset] + The found permutations. This is a set of frozensets of pairs of node + keys which can be exchanged without changing :attr:`subgraph`. + dict[collections.abc.Hashable, set[collections.abc.Hashable]] + The found co-sets. The co-sets is a dictionary of + ``{node key: set of node keys}``. + Every key-value pair describes which ``values`` can be interchanged + without changing nodes less than ``key``. + """ + if self._symmetry_cache is not None: + key = hash( + ( + tuple(graph.nodes), + tuple(graph.edges), + tuple(map(tuple, node_partitions)), + tuple(edge_colors.items()), + ) + ) + if key in self._symmetry_cache: + return self._symmetry_cache[key] + node_partitions = list( + self._refine_node_partitions(graph, node_partitions, edge_colors) + ) + assert len(node_partitions) == 1 + node_partitions = node_partitions[0] + permutations, cosets = self._process_ordered_pair_partitions( + graph, node_partitions, node_partitions, edge_colors + ) + if self._symmetry_cache is not None: + self._symmetry_cache[key] = permutations, cosets + return permutations, cosets + + def is_isomorphic(self, symmetry=False): + """ + Returns True if :attr:`graph` is isomorphic to :attr:`subgraph` and + False otherwise. + + Returns + ------- + bool + """ + return len(self.subgraph) == len(self.graph) and self.subgraph_is_isomorphic( + symmetry + ) + + def subgraph_is_isomorphic(self, symmetry=False): + """ + Returns True if a subgraph of :attr:`graph` is isomorphic to + :attr:`subgraph` and False otherwise. + + Returns + ------- + bool + """ + # symmetry=False, since we only need to know whether there is any + # example; figuring out all symmetry elements probably costs more time + # than it gains. + isom = next(self.subgraph_isomorphisms_iter(symmetry=symmetry), None) + return isom is not None + + def isomorphisms_iter(self, symmetry=True): + """ + Does the same as :meth:`find_isomorphisms` if :attr:`graph` and + :attr:`subgraph` have the same number of nodes. + """ + if len(self.graph) == len(self.subgraph): + yield from self.subgraph_isomorphisms_iter(symmetry=symmetry) + + def subgraph_isomorphisms_iter(self, symmetry=True): + """Alternative name for :meth:`find_isomorphisms`.""" + return self.find_isomorphisms(symmetry) + + def _find_nodecolor_candidates(self): + """ + Per node in subgraph find all nodes in graph that have the same color. + """ + candidates = defaultdict(set) + for sgn in self.subgraph.nodes: + sgn_color = self._sgn_colors[sgn] + if sgn_color in self._node_compatibility: + gn_color = self._node_compatibility[sgn_color] + candidates[sgn].add(frozenset(self._gn_partitions[gn_color])) + else: + candidates[sgn].add(frozenset()) + candidates = dict(candidates) + for sgn, options in candidates.items(): + candidates[sgn] = frozenset(options) + return candidates + + @staticmethod + def _make_constraints(cosets): + """ + Turn cosets into constraints. + """ + constraints = [] + for node_i, node_ts in cosets.items(): + for node_t in node_ts: + if node_i != node_t: + # Node i must be smaller than node t. + constraints.append((node_i, node_t)) + return constraints + + @staticmethod + def _find_node_edge_color(graph, node_colors, edge_colors): + """ + For every node in graph, come up with a color that combines 1) the + color of the node, and 2) the number of edges of a color to each type + of node. + """ + counts = defaultdict(lambda: defaultdict(int)) + for node1, node2 in graph.edges: + if (node1, node2) in edge_colors: + # FIXME directed graphs + ecolor = edge_colors[node1, node2] + else: + ecolor = edge_colors[node2, node1] + # Count per node how many edges it has of what color to nodes of + # what color + counts[node1][ecolor, node_colors[node2]] += 1 + counts[node2][ecolor, node_colors[node1]] += 1 + + node_edge_colors = {} + for node in graph.nodes: + node_edge_colors[node] = node_colors[node], set(counts[node].items()) + + return node_edge_colors + + @staticmethod + def _get_permutations_by_length(items): + """ + Get all permutations of items, but only permute items with the same + length. + + >>> found = list(ISMAGS._get_permutations_by_length([[1], [2], [3, 4], [4, 5]])) + >>> answer = [ + ... (([1], [2]), ([3, 4], [4, 5])), + ... (([1], [2]), ([4, 5], [3, 4])), + ... (([2], [1]), ([3, 4], [4, 5])), + ... (([2], [1]), ([4, 5], [3, 4])), + ... ] + >>> found == answer + True + """ + by_len = defaultdict(list) + for item in items: + by_len[len(item)].append(item) + + yield from itertools.product( + *(itertools.permutations(by_len[l]) for l in sorted(by_len)) + ) + + @classmethod + def _refine_node_partitions(cls, graph, node_partitions, edge_colors, branch=False): + """ + Given a partition of nodes in graph, make the partitions smaller such + that all nodes in a partition have 1) the same color, and 2) the same + number of edges to specific other partitions. + """ + + def equal_color(node1, node2): + return node_edge_colors[node1] == node_edge_colors[node2] + + node_partitions = list(node_partitions) + node_colors = partition_to_color(node_partitions) + node_edge_colors = cls._find_node_edge_color(graph, node_colors, edge_colors) + if all( + are_all_equal(node_edge_colors[node] for node in partition) + for partition in node_partitions + ): + yield node_partitions + return + + new_partitions = [] + output = [new_partitions] + for partition in node_partitions: + if not are_all_equal(node_edge_colors[node] for node in partition): + refined = make_partitions(partition, equal_color) + if ( + branch + and len(refined) != 1 + and len({len(r) for r in refined}) != len([len(r) for r in refined]) + ): + # This is where it breaks. There are multiple new cells + # in refined with the same length, and their order + # matters. + # So option 1) Hit it with a big hammer and simply make all + # orderings. + permutations = cls._get_permutations_by_length(refined) + new_output = [] + for n_p in output: + for permutation in permutations: + new_output.append(n_p + list(permutation[0])) + output = new_output + else: + for n_p in output: + n_p.extend(sorted(refined, key=len)) + else: + for n_p in output: + n_p.append(partition) + for n_p in output: + yield from cls._refine_node_partitions(graph, n_p, edge_colors, branch) + + def _edges_of_same_color(self, sgn1, sgn2): + """ + Returns all edges in :attr:`graph` that have the same colour as the + edge between sgn1 and sgn2 in :attr:`subgraph`. + """ + if (sgn1, sgn2) in self._sge_colors: + # FIXME directed graphs + sge_color = self._sge_colors[sgn1, sgn2] + else: + sge_color = self._sge_colors[sgn2, sgn1] + if sge_color in self._edge_compatibility: + ge_color = self._edge_compatibility[sge_color] + g_edges = self._ge_partitions[ge_color] + else: + g_edges = [] + return g_edges + + def _map_nodes(self, sgn, candidates, constraints, mapping=None, to_be_mapped=None): + """ + Find all subgraph isomorphisms honoring constraints. + """ + if mapping is None: + mapping = {} + else: + mapping = mapping.copy() + if to_be_mapped is None: + to_be_mapped = set(self.subgraph.nodes) + + # Note, we modify candidates here. Doesn't seem to affect results, but + # remember this. + # candidates = candidates.copy() + sgn_candidates = intersect(candidates[sgn]) + candidates[sgn] = frozenset([sgn_candidates]) + for gn in sgn_candidates: + # We're going to try to map sgn to gn. + if gn in mapping.values() or sgn not in to_be_mapped: + # gn is already mapped to something + continue # pragma: no cover + + # REDUCTION and COMBINATION + mapping[sgn] = gn + # BASECASE + if to_be_mapped == set(mapping.keys()): + yield {v: k for k, v in mapping.items()} + continue + left_to_map = to_be_mapped - set(mapping.keys()) + + new_candidates = candidates.copy() + sgn_nbrs = set(self.subgraph[sgn]) + not_gn_nbrs = set(self.graph.nodes) - set(self.graph[gn]) + for sgn2 in left_to_map: + if sgn2 not in sgn_nbrs: + gn2_options = not_gn_nbrs + else: + # Get all edges to gn of the right color: + g_edges = self._edges_of_same_color(sgn, sgn2) + # FIXME directed graphs + # And all nodes involved in those which are connected to gn + gn2_options = {n for e in g_edges for n in e if gn in e} + # Node color compatibility should be taken care of by the + # initial candidate lists made by find_subgraphs + + # Add gn2_options to the right collection. Since new_candidates + # is a dict of frozensets of frozensets of node indices it's + # a bit clunky. We can't do .add, and + also doesn't work. We + # could do |, but I deem union to be clearer. + new_candidates[sgn2] = new_candidates[sgn2].union( + [frozenset(gn2_options)] + ) + + if (sgn, sgn2) in constraints: + gn2_options = {gn2 for gn2 in self.graph if gn2 > gn} + elif (sgn2, sgn) in constraints: + gn2_options = {gn2 for gn2 in self.graph if gn2 < gn} + else: + continue # pragma: no cover + new_candidates[sgn2] = new_candidates[sgn2].union( + [frozenset(gn2_options)] + ) + + # The next node is the one that is unmapped and has fewest + # candidates + next_sgn = min(left_to_map, key=lambda n: min(new_candidates[n], key=len)) + yield from self._map_nodes( + next_sgn, + new_candidates, + constraints, + mapping=mapping, + to_be_mapped=to_be_mapped, + ) + # Unmap sgn-gn. Strictly not necessary since it'd get overwritten + # when making a new mapping for sgn. + # del mapping[sgn] + + def _largest_common_subgraph(self, candidates, constraints, to_be_mapped=None): + """ + Find all largest common subgraphs honoring constraints. + """ + if to_be_mapped is None: + to_be_mapped = {frozenset(self.subgraph.nodes)} + + # The LCS problem is basically a repeated subgraph isomorphism problem + # with smaller and smaller subgraphs. We store the nodes that are + # "part of" the subgraph in to_be_mapped, and we make it a little + # smaller every iteration. + + current_size = len(next(iter(to_be_mapped), [])) + + found_iso = False + if current_size <= len(self.graph): + # There's no point in trying to find isomorphisms of + # graph >= subgraph if subgraph has more nodes than graph. + + # Try the isomorphism first with the nodes with lowest ID. So sort + # them. Those are more likely to be part of the final + # correspondence. This makes finding the first answer(s) faster. In + # theory. + for nodes in sorted(to_be_mapped, key=sorted): + # Find the isomorphism between subgraph[to_be_mapped] <= graph + next_sgn = min(nodes, key=lambda n: min(candidates[n], key=len)) + isomorphs = self._map_nodes( + next_sgn, candidates, constraints, to_be_mapped=nodes + ) + + # This is effectively `yield from isomorphs`, except that we look + # whether an item was yielded. + try: + item = next(isomorphs) + except StopIteration: + pass + else: + yield item + yield from isomorphs + found_iso = True + + # BASECASE + if found_iso or current_size == 1: + # Shrinking has no point because either 1) we end up with a smaller + # common subgraph (and we want the largest), or 2) there'll be no + # more subgraph. + return + + left_to_be_mapped = set() + for nodes in to_be_mapped: + for sgn in nodes: + # We're going to remove sgn from to_be_mapped, but subject to + # symmetry constraints. We know that for every constraint we + # have those subgraph nodes are equal. So whenever we would + # remove the lower part of a constraint, remove the higher + # instead. This is all dealth with by _remove_node. And because + # left_to_be_mapped is a set, we don't do double work. + + # And finally, make the subgraph one node smaller. + # REDUCTION + new_nodes = self._remove_node(sgn, nodes, constraints) + left_to_be_mapped.add(new_nodes) + # COMBINATION + yield from self._largest_common_subgraph( + candidates, constraints, to_be_mapped=left_to_be_mapped + ) + + @staticmethod + def _remove_node(node, nodes, constraints): + """ + Returns a new set where node has been removed from nodes, subject to + symmetry constraints. We know, that for every constraint we have + those subgraph nodes are equal. So whenever we would remove the + lower part of a constraint, remove the higher instead. + """ + while True: + for low, high in constraints: + if low == node and high in nodes: + node = high + break + else: # no break, couldn't find node in constraints + break + return frozenset(nodes - {node}) + + @staticmethod + def _find_permutations(top_partitions, bottom_partitions): + """ + Return the pairs of top/bottom partitions where the partitions are + different. Ensures that all partitions in both top and bottom + partitions have size 1. + """ + # Find permutations + permutations = set() + for top, bot in zip(top_partitions, bottom_partitions): + # top and bot have only one element + if len(top) != 1 or len(bot) != 1: + raise IndexError( + "Not all nodes are coupled. This is" + f" impossible: {top_partitions}, {bottom_partitions}" + ) + if top != bot: + permutations.add(frozenset((next(iter(top)), next(iter(bot))))) + return permutations + + @staticmethod + def _update_orbits(orbits, permutations): + """ + Update orbits based on permutations. Orbits is modified in place. + For every pair of items in permutations their respective orbits are + merged. + """ + for permutation in permutations: + node, node2 = permutation + # Find the orbits that contain node and node2, and replace the + # orbit containing node with the union + first = second = None + for idx, orbit in enumerate(orbits): + if first is not None and second is not None: + break + if node in orbit: + first = idx + if node2 in orbit: + second = idx + if first != second: + orbits[first].update(orbits[second]) + del orbits[second] + + def _couple_nodes( + self, + top_partitions, + bottom_partitions, + pair_idx, + t_node, + b_node, + graph, + edge_colors, + ): + """ + Generate new partitions from top and bottom_partitions where t_node is + coupled to b_node. pair_idx is the index of the partitions where t_ and + b_node can be found. + """ + t_partition = top_partitions[pair_idx] + b_partition = bottom_partitions[pair_idx] + assert t_node in t_partition and b_node in b_partition + # Couple node to node2. This means they get their own partition + new_top_partitions = [top.copy() for top in top_partitions] + new_bottom_partitions = [bot.copy() for bot in bottom_partitions] + new_t_groups = {t_node}, t_partition - {t_node} + new_b_groups = {b_node}, b_partition - {b_node} + # Replace the old partitions with the coupled ones + del new_top_partitions[pair_idx] + del new_bottom_partitions[pair_idx] + new_top_partitions[pair_idx:pair_idx] = new_t_groups + new_bottom_partitions[pair_idx:pair_idx] = new_b_groups + + new_top_partitions = self._refine_node_partitions( + graph, new_top_partitions, edge_colors + ) + new_bottom_partitions = self._refine_node_partitions( + graph, new_bottom_partitions, edge_colors, branch=True + ) + new_top_partitions = list(new_top_partitions) + assert len(new_top_partitions) == 1 + new_top_partitions = new_top_partitions[0] + for bot in new_bottom_partitions: + yield list(new_top_partitions), bot + + def _process_ordered_pair_partitions( + self, + graph, + top_partitions, + bottom_partitions, + edge_colors, + orbits=None, + cosets=None, + ): + """ + Processes ordered pair partitions as per the reference paper. Finds and + returns all permutations and cosets that leave the graph unchanged. + """ + if orbits is None: + orbits = [{node} for node in graph.nodes] + else: + # Note that we don't copy orbits when we are given one. This means + # we leak information between the recursive branches. This is + # intentional! + orbits = orbits + if cosets is None: + cosets = {} + else: + cosets = cosets.copy() + + if not all( + len(t_p) == len(b_p) for t_p, b_p in zip(top_partitions, bottom_partitions) + ): + # This used to be an assertion, but it gets tripped in rare cases: + # 5 - 4 \ / 12 - 13 + # 0 - 3 + # 9 - 8 / \ 16 - 17 + # Assume 0 and 3 are coupled and no longer equivalent. At that point + # {4, 8} and {12, 16} are no longer equivalent, and neither are + # {5, 9} and {13, 17}. Coupling 4 and refinement results in 5 and 9 + # getting their own partitions, *but not 13 and 17*. Further + # iterations will attempt to couple 5 to {13, 17}, which cannot + # result in more symmetries? + return [], cosets + + # BASECASE + if all(len(top) == 1 for top in top_partitions): + # All nodes are mapped + permutations = self._find_permutations(top_partitions, bottom_partitions) + self._update_orbits(orbits, permutations) + if permutations: + return [permutations], cosets + else: + return [], cosets + + permutations = [] + unmapped_nodes = { + (node, idx) + for idx, t_partition in enumerate(top_partitions) + for node in t_partition + if len(t_partition) > 1 + } + node, pair_idx = min(unmapped_nodes) + b_partition = bottom_partitions[pair_idx] + + for node2 in sorted(b_partition): + if len(b_partition) == 1: + # Can never result in symmetry + continue + if node != node2 and any( + node in orbit and node2 in orbit for orbit in orbits + ): + # Orbit prune branch + continue + # REDUCTION + # Couple node to node2 + partitions = self._couple_nodes( + top_partitions, + bottom_partitions, + pair_idx, + node, + node2, + graph, + edge_colors, + ) + for opp in partitions: + new_top_partitions, new_bottom_partitions = opp + + new_perms, new_cosets = self._process_ordered_pair_partitions( + graph, + new_top_partitions, + new_bottom_partitions, + edge_colors, + orbits, + cosets, + ) + # COMBINATION + permutations += new_perms + cosets.update(new_cosets) + + mapped = { + k + for top, bottom in zip(top_partitions, bottom_partitions) + for k in top + if len(top) == 1 and top == bottom + } + ks = {k for k in graph.nodes if k < node} + # Have all nodes with ID < node been mapped? + find_coset = ks <= mapped and node not in cosets + if find_coset: + # Find the orbit that contains node + for orbit in orbits: + if node in orbit: + cosets[node] = orbit.copy() + return permutations, cosets diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/isomorph.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/isomorph.py new file mode 100644 index 0000000..f49594a --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/isomorph.py @@ -0,0 +1,336 @@ +""" +Graph isomorphism functions. +""" + +import itertools +from collections import Counter + +import networkx as nx +from networkx.exception import NetworkXError + +__all__ = [ + "could_be_isomorphic", + "fast_could_be_isomorphic", + "faster_could_be_isomorphic", + "is_isomorphic", +] + + +@nx._dispatchable(graphs={"G1": 0, "G2": 1}) +def could_be_isomorphic(G1, G2, *, properties="dtc"): + """Returns False if graphs are definitely not isomorphic. + True does NOT guarantee isomorphism. + + Parameters + ---------- + G1, G2 : graphs + The two graphs `G1` and `G2` must be the same type. + + properties : str, default="dct" + Determines which properties of the graph are checked. Each character + indicates a particular property as follows: + + - if ``"d"`` in `properties`: degree of each node + - if ``"t"`` in `properties`: number of triangles for each node + - if ``"c"`` in `properties`: number of maximal cliques for each node + + Unrecognized characters are ignored. The default is ``"dtc"``, which + compares the sequence of ``(degree, num_triangles, num_cliques)`` properties + between `G1` and `G2`. Generally, ``properties="dt"`` would be faster, and + ``properties="d"`` faster still. See Notes for additional details on + property selection. + + Returns + ------- + bool + A Boolean value representing whether `G1` could be isomorphic with `G2` + according to the specified `properties`. + + Notes + ----- + The triangle sequence contains the number of triangles each node is part of. + The clique sequence contains for each node the number of maximal cliques + involving that node. + + Some properties are faster to compute than others. And there are other + properties we could include and don't. But of the three properties listed here, + comparing the degree distributions is the fastest. The "triangles" property + is slower (and also a stricter version of "could") and the "maximal cliques" + property is slower still, but usually faster than doing a full isomorphism + check. + """ + + # Check global properties + if G1.order() != G2.order(): + return False + + properties_to_check = set(properties) + G1_props, G2_props = [], [] + + def _properties_consistent(): + # Ravel the properties into a table with # nodes rows and # properties columns + G1_ptable = [tuple(p[n] for p in G1_props) for n in G1] + G2_ptable = [tuple(p[n] for p in G2_props) for n in G2] + + return sorted(G1_ptable) == sorted(G2_ptable) + + # The property table is built and checked as each individual property is + # added. The reason for this is the building/checking the property table + # is in general much faster than computing the properties, making it + # worthwhile to check multiple times to enable early termination when + # a subset of properties don't match + + # Degree sequence + if "d" in properties_to_check: + G1_props.append(G1.degree()) + G2_props.append(G2.degree()) + if not _properties_consistent(): + return False + # Sequence of triangles per node + if "t" in properties_to_check: + G1_props.append(nx.triangles(G1)) + G2_props.append(nx.triangles(G2)) + if not _properties_consistent(): + return False + # Sequence of maximal cliques per node + if "c" in properties_to_check: + G1_props.append(Counter(itertools.chain.from_iterable(nx.find_cliques(G1)))) + G2_props.append(Counter(itertools.chain.from_iterable(nx.find_cliques(G2)))) + if not _properties_consistent(): + return False + + # All checked conditions passed + return True + + +def graph_could_be_isomorphic(G1, G2): + """ + .. deprecated:: 3.5 + + `graph_could_be_isomorphic` is a deprecated alias for `could_be_isomorphic`. + Use `could_be_isomorphic` instead. + """ + import warnings + + warnings.warn( + "graph_could_be_isomorphic is deprecated, use `could_be_isomorphic` instead.", + category=DeprecationWarning, + stacklevel=2, + ) + return could_be_isomorphic(G1, G2) + + +@nx._dispatchable(graphs={"G1": 0, "G2": 1}) +def fast_could_be_isomorphic(G1, G2): + """Returns False if graphs are definitely not isomorphic. + + True does NOT guarantee isomorphism. + + Parameters + ---------- + G1, G2 : graphs + The two graphs G1 and G2 must be the same type. + + Notes + ----- + Checks for matching degree and triangle sequences. The triangle + sequence contains the number of triangles each node is part of. + """ + # Check global properties + if G1.order() != G2.order(): + return False + + # Check local properties + d1 = G1.degree() + t1 = nx.triangles(G1) + props1 = [[d, t1[v]] for v, d in d1] + props1.sort() + + d2 = G2.degree() + t2 = nx.triangles(G2) + props2 = [[d, t2[v]] for v, d in d2] + props2.sort() + + if props1 != props2: + return False + + # OK... + return True + + +def fast_graph_could_be_isomorphic(G1, G2): + """ + .. deprecated:: 3.5 + + `fast_graph_could_be_isomorphic` is a deprecated alias for + `fast_could_be_isomorphic`. Use `fast_could_be_isomorphic` instead. + """ + import warnings + + warnings.warn( + "fast_graph_could_be_isomorphic is deprecated, use fast_could_be_isomorphic instead", + category=DeprecationWarning, + stacklevel=2, + ) + return fast_could_be_isomorphic(G1, G2) + + +@nx._dispatchable(graphs={"G1": 0, "G2": 1}) +def faster_could_be_isomorphic(G1, G2): + """Returns False if graphs are definitely not isomorphic. + + True does NOT guarantee isomorphism. + + Parameters + ---------- + G1, G2 : graphs + The two graphs G1 and G2 must be the same type. + + Notes + ----- + Checks for matching degree sequences. + """ + # Check global properties + if G1.order() != G2.order(): + return False + + # Check local properties + d1 = sorted(d for n, d in G1.degree()) + d2 = sorted(d for n, d in G2.degree()) + + if d1 != d2: + return False + + # OK... + return True + + +def faster_graph_could_be_isomorphic(G1, G2): + """ + .. deprecated:: 3.5 + + `faster_graph_could_be_isomorphic` is a deprecated alias for + `faster_could_be_isomorphic`. Use `faster_could_be_isomorphic` instead. + """ + import warnings + + warnings.warn( + "faster_graph_could_be_isomorphic is deprecated, use faster_could_be_isomorphic instead", + category=DeprecationWarning, + stacklevel=2, + ) + return faster_could_be_isomorphic(G1, G2) + + +@nx._dispatchable( + graphs={"G1": 0, "G2": 1}, + preserve_edge_attrs="edge_match", + preserve_node_attrs="node_match", +) +def is_isomorphic(G1, G2, node_match=None, edge_match=None): + """Returns True if the graphs G1 and G2 are isomorphic and False otherwise. + + Parameters + ---------- + G1, G2: graphs + The two graphs G1 and G2 must be the same type. + + node_match : callable + A function that returns True if node n1 in G1 and n2 in G2 should + be considered equal during the isomorphism test. + If node_match is not specified then node attributes are not considered. + + The function will be called like + + node_match(G1.nodes[n1], G2.nodes[n2]). + + That is, the function will receive the node attribute dictionaries + for n1 and n2 as inputs. + + edge_match : callable + A function that returns True if the edge attribute dictionary + for the pair of nodes (u1, v1) in G1 and (u2, v2) in G2 should + be considered equal during the isomorphism test. If edge_match is + not specified then edge attributes are not considered. + + The function will be called like + + edge_match(G1[u1][v1], G2[u2][v2]). + + That is, the function will receive the edge attribute dictionaries + of the edges under consideration. + + Notes + ----- + Uses the vf2 algorithm [1]_. + + Examples + -------- + >>> import networkx.algorithms.isomorphism as iso + + For digraphs G1 and G2, using 'weight' edge attribute (default: 1) + + >>> G1 = nx.DiGraph() + >>> G2 = nx.DiGraph() + >>> nx.add_path(G1, [1, 2, 3, 4], weight=1) + >>> nx.add_path(G2, [10, 20, 30, 40], weight=2) + >>> em = iso.numerical_edge_match("weight", 1) + >>> nx.is_isomorphic(G1, G2) # no weights considered + True + >>> nx.is_isomorphic(G1, G2, edge_match=em) # match weights + False + + For multidigraphs G1 and G2, using 'fill' node attribute (default: '') + + >>> G1 = nx.MultiDiGraph() + >>> G2 = nx.MultiDiGraph() + >>> G1.add_nodes_from([1, 2, 3], fill="red") + >>> G2.add_nodes_from([10, 20, 30, 40], fill="red") + >>> nx.add_path(G1, [1, 2, 3, 4], weight=3, linewidth=2.5) + >>> nx.add_path(G2, [10, 20, 30, 40], weight=3) + >>> nm = iso.categorical_node_match("fill", "red") + >>> nx.is_isomorphic(G1, G2, node_match=nm) + True + + For multidigraphs G1 and G2, using 'weight' edge attribute (default: 7) + + >>> G1.add_edge(1, 2, weight=7) + 1 + >>> G2.add_edge(10, 20) + 1 + >>> em = iso.numerical_multiedge_match("weight", 7, rtol=1e-6) + >>> nx.is_isomorphic(G1, G2, edge_match=em) + True + + For multigraphs G1 and G2, using 'weight' and 'linewidth' edge attributes + with default values 7 and 2.5. Also using 'fill' node attribute with + default value 'red'. + + >>> em = iso.numerical_multiedge_match(["weight", "linewidth"], [7, 2.5]) + >>> nm = iso.categorical_node_match("fill", "red") + >>> nx.is_isomorphic(G1, G2, edge_match=em, node_match=nm) + True + + See Also + -------- + numerical_node_match, numerical_edge_match, numerical_multiedge_match + categorical_node_match, categorical_edge_match, categorical_multiedge_match + + References + ---------- + .. [1] L. P. Cordella, P. Foggia, C. Sansone, M. Vento, + "An Improved Algorithm for Matching Large Graphs", + 3rd IAPR-TC15 Workshop on Graph-based Representations in + Pattern Recognition, Cuen, pp. 149-159, 2001. + https://www.researchgate.net/publication/200034365_An_Improved_Algorithm_for_Matching_Large_Graphs + """ + if G1.is_directed() and G2.is_directed(): + GM = nx.algorithms.isomorphism.DiGraphMatcher + elif (not G1.is_directed()) and (not G2.is_directed()): + GM = nx.algorithms.isomorphism.GraphMatcher + else: + raise NetworkXError("Graphs G1 and G2 are not of the same type.") + + gm = GM(G1, G2, node_match=node_match, edge_match=edge_match) + + return gm.is_isomorphic() diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/isomorphvf2.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/isomorphvf2.py new file mode 100644 index 0000000..587503a --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/isomorphvf2.py @@ -0,0 +1,1262 @@ +""" +************* +VF2 Algorithm +************* + +An implementation of VF2 algorithm for graph isomorphism testing. + +The simplest interface to use this module is to call the +:func:`is_isomorphic ` +function. + +Introduction +------------ + +The GraphMatcher and DiGraphMatcher are responsible for matching +graphs or directed graphs in a predetermined manner. This +usually means a check for an isomorphism, though other checks +are also possible. For example, a subgraph of one graph +can be checked for isomorphism to a second graph. + +Matching is done via syntactic feasibility. It is also possible +to check for semantic feasibility. Feasibility, then, is defined +as the logical AND of the two functions. + +To include a semantic check, the (Di)GraphMatcher class should be +subclassed, and the +:meth:`semantic_feasibility ` +function should be redefined. By default, the semantic feasibility function always +returns ``True``. The effect of this is that semantics are not +considered in the matching of G1 and G2. + +Examples +-------- + +Suppose G1 and G2 are isomorphic graphs. Verification is as follows: + +>>> from networkx.algorithms import isomorphism +>>> G1 = nx.path_graph(4) +>>> G2 = nx.path_graph(4) +>>> GM = isomorphism.GraphMatcher(G1, G2) +>>> GM.is_isomorphic() +True + +GM.mapping stores the isomorphism mapping from G1 to G2. + +>>> GM.mapping +{0: 0, 1: 1, 2: 2, 3: 3} + + +Suppose G1 and G2 are isomorphic directed graphs. +Verification is as follows: + +>>> G1 = nx.path_graph(4, create_using=nx.DiGraph) +>>> G2 = nx.path_graph(4, create_using=nx.DiGraph) +>>> DiGM = isomorphism.DiGraphMatcher(G1, G2) +>>> DiGM.is_isomorphic() +True + +DiGM.mapping stores the isomorphism mapping from G1 to G2. + +>>> DiGM.mapping +{0: 0, 1: 1, 2: 2, 3: 3} + + + +Subgraph Isomorphism +-------------------- +Graph theory literature can be ambiguous about the meaning of the +above statement, and we seek to clarify it now. + +In the VF2 literature, a mapping ``M`` is said to be a graph-subgraph +isomorphism iff ``M`` is an isomorphism between ``G2`` and a subgraph of ``G1``. +Thus, to say that ``G1`` and ``G2`` are graph-subgraph isomorphic is to say +that a subgraph of ``G1`` is isomorphic to ``G2``. + +Other literature uses the phrase 'subgraph isomorphic' as in '``G1`` does +not have a subgraph isomorphic to ``G2``'. Another use is as an in adverb +for isomorphic. Thus, to say that ``G1`` and ``G2`` are subgraph isomorphic +is to say that a subgraph of ``G1`` is isomorphic to ``G2``. + +Finally, the term 'subgraph' can have multiple meanings. In this +context, 'subgraph' always means a 'node-induced subgraph'. Edge-induced +subgraph isomorphisms are not directly supported, but one should be +able to perform the check by making use of +:func:`line_graph `. For +subgraphs which are not induced, the term 'monomorphism' is preferred +over 'isomorphism'. + +Let ``G = (N, E)`` be a graph with a set of nodes ``N`` and set of edges ``E``. + +If ``G' = (N', E')`` is a subgraph, then: + ``N'`` is a subset of ``N`` and + ``E'`` is a subset of ``E``. + +If ``G' = (N', E')`` is a node-induced subgraph, then: + ``N'`` is a subset of ``N`` and + ``E'`` is the subset of edges in ``E`` relating nodes in ``N'``. + +If ``G' = (N', E')`` is an edge-induced subgraph, then: + ``N'`` is the subset of nodes in ``N`` related by edges in ``E'`` and + ``E'`` is a subset of ``E``. + +If ``G' = (N', E')`` is a monomorphism, then: + ``N'`` is a subset of ``N`` and + ``E'`` is a subset of the set of edges in ``E`` relating nodes in ``N'``. + +Note that if ``G'`` is a node-induced subgraph of ``G``, then it is always a +subgraph monomorphism of ``G``, but the opposite is not always true, as a +monomorphism can have fewer edges. + +References +---------- +[1] Luigi P. Cordella, Pasquale Foggia, Carlo Sansone, Mario Vento, + "A (Sub)Graph Isomorphism Algorithm for Matching Large Graphs", + IEEE Transactions on Pattern Analysis and Machine Intelligence, + vol. 26, no. 10, pp. 1367-1372, Oct., 2004. + http://ieeexplore.ieee.org/iel5/34/29305/01323804.pdf + +[2] L. P. Cordella, P. Foggia, C. Sansone, M. Vento, "An Improved + Algorithm for Matching Large Graphs", 3rd IAPR-TC15 Workshop + on Graph-based Representations in Pattern Recognition, Cuen, + pp. 149-159, 2001. + https://www.researchgate.net/publication/200034365_An_Improved_Algorithm_for_Matching_Large_Graphs + +See Also +-------- +:meth:`semantic_feasibility ` +:meth:`syntactic_feasibility ` + +Notes +----- + +The implementation handles both directed and undirected graphs as well +as multigraphs. + +In general, the subgraph isomorphism problem is NP-complete whereas the +graph isomorphism problem is most likely not NP-complete (although no +polynomial-time algorithm is known to exist). + +""" + +# This work was originally coded by Christopher Ellison +# as part of the Computational Mechanics Python (CMPy) project. +# James P. Crutchfield, principal investigator. +# Complexity Sciences Center and Physics Department, UC Davis. + +import sys + +import networkx as nx + +__all__ = ["GraphMatcher", "DiGraphMatcher"] + + +class GraphMatcher: + """Implementation of VF2 algorithm for matching undirected graphs. + + Suitable for Graph and MultiGraph instances. + """ + + def __init__(self, G1, G2): + """Initialize GraphMatcher. + + Parameters + ---------- + G1,G2: NetworkX Graph or MultiGraph instances. + The two graphs to check for isomorphism or monomorphism. + + Examples + -------- + To create a GraphMatcher which checks for syntactic feasibility: + + >>> from networkx.algorithms import isomorphism + >>> G1 = nx.path_graph(4) + >>> G2 = nx.path_graph(4) + >>> GM = isomorphism.GraphMatcher(G1, G2) + """ + if G1.is_directed() != G2.is_directed(): + raise nx.NetworkXError("G1 and G2 must have the same directedness") + + is_directed_matcher = self._is_directed_matcher() + if not is_directed_matcher and (G1.is_directed() or G2.is_directed()): + raise nx.NetworkXError( + "(Multi-)GraphMatcher() not defined for directed graphs. " + "Use (Multi-)DiGraphMatcher() instead." + ) + + if is_directed_matcher and not (G1.is_directed() and G2.is_directed()): + raise nx.NetworkXError( + "(Multi-)DiGraphMatcher() not defined for undirected graphs. " + "Use (Multi-)GraphMatcher() instead." + ) + + self.G1 = G1 + self.G2 = G2 + self.G1_nodes = set(G1.nodes()) + self.G2_nodes = set(G2.nodes()) + self.G2_node_order = {n: i for i, n in enumerate(G2)} + + # Set recursion limit. + self.old_recursion_limit = sys.getrecursionlimit() + expected_max_recursion_level = len(self.G2) + if self.old_recursion_limit < 1.5 * expected_max_recursion_level: + # Give some breathing room. + sys.setrecursionlimit(int(1.5 * expected_max_recursion_level)) + + # Declare that we will be searching for a graph-graph isomorphism. + self.test = "graph" + + # Initialize state + self.initialize() + + def _is_directed_matcher(self): + return False + + def reset_recursion_limit(self): + """Restores the recursion limit.""" + # TODO: + # Currently, we use recursion and set the recursion level higher. + # It would be nice to restore the level, but because the + # (Di)GraphMatcher classes make use of cyclic references, garbage + # collection will never happen when we define __del__() to + # restore the recursion level. The result is a memory leak. + # So for now, we do not automatically restore the recursion level, + # and instead provide a method to do this manually. Eventually, + # we should turn this into a non-recursive implementation. + sys.setrecursionlimit(self.old_recursion_limit) + + def candidate_pairs_iter(self): + """Iterator over candidate pairs of nodes in G1 and G2.""" + + # All computations are done using the current state! + + G1_nodes = self.G1_nodes + G2_nodes = self.G2_nodes + min_key = self.G2_node_order.__getitem__ + + # First we compute the inout-terminal sets. + T1_inout = [node for node in self.inout_1 if node not in self.core_1] + T2_inout = [node for node in self.inout_2 if node not in self.core_2] + + # If T1_inout and T2_inout are both nonempty. + # P(s) = T1_inout x {min T2_inout} + if T1_inout and T2_inout: + node_2 = min(T2_inout, key=min_key) + for node_1 in T1_inout: + yield node_1, node_2 + + else: + # If T1_inout and T2_inout were both empty.... + # P(s) = (N_1 - M_1) x {min (N_2 - M_2)} + # if not (T1_inout or T2_inout): # as suggested by [2], incorrect + if 1: # as inferred from [1], correct + # First we determine the candidate node for G2 + other_node = min(G2_nodes - set(self.core_2), key=min_key) + for node in self.G1: + if node not in self.core_1: + yield node, other_node + + # For all other cases, we don't have any candidate pairs. + + def initialize(self): + """Reinitializes the state of the algorithm. + + This method should be redefined if using something other than GMState. + If only subclassing GraphMatcher, a redefinition is not necessary. + + """ + + # core_1[n] contains the index of the node paired with n, which is m, + # provided n is in the mapping. + # core_2[m] contains the index of the node paired with m, which is n, + # provided m is in the mapping. + self.core_1 = {} + self.core_2 = {} + + # See the paper for definitions of M_x and T_x^{y} + + # inout_1[n] is non-zero if n is in M_1 or in T_1^{inout} + # inout_2[m] is non-zero if m is in M_2 or in T_2^{inout} + # + # The value stored is the depth of the SSR tree when the node became + # part of the corresponding set. + self.inout_1 = {} + self.inout_2 = {} + # Practically, these sets simply store the nodes in the subgraph. + + self.state = GMState(self) + + # Provide a convenient way to access the isomorphism mapping. + self.mapping = self.core_1.copy() + + def is_isomorphic(self): + """Returns True if G1 and G2 are isomorphic graphs.""" + + # Let's do two very quick checks! + # QUESTION: Should we call faster_graph_could_be_isomorphic(G1,G2)? + # For now, I just copy the code. + + # Check global properties + if self.G1.order() != self.G2.order(): + return False + + # Check local properties + d1 = sorted(d for n, d in self.G1.degree()) + d2 = sorted(d for n, d in self.G2.degree()) + if d1 != d2: + return False + + try: + x = next(self.isomorphisms_iter()) + return True + except StopIteration: + return False + + def isomorphisms_iter(self): + """Generator over isomorphisms between G1 and G2.""" + # Declare that we are looking for a graph-graph isomorphism. + self.test = "graph" + self.initialize() + yield from self.match() + + def match(self): + """Extends the isomorphism mapping. + + This function is called recursively to determine if a complete + isomorphism can be found between G1 and G2. It cleans up the class + variables after each recursive call. If an isomorphism is found, + we yield the mapping. + + """ + if len(self.core_1) == len(self.G2): + # Save the final mapping, otherwise garbage collection deletes it. + self.mapping = self.core_1.copy() + # The mapping is complete. + yield self.mapping + else: + for G1_node, G2_node in self.candidate_pairs_iter(): + if self.syntactic_feasibility(G1_node, G2_node): + if self.semantic_feasibility(G1_node, G2_node): + # Recursive call, adding the feasible state. + newstate = self.state.__class__(self, G1_node, G2_node) + yield from self.match() + + # restore data structures + newstate.restore() + + def semantic_feasibility(self, G1_node, G2_node): + """Returns True if adding (G1_node, G2_node) is semantically feasible. + + The semantic feasibility function should return True if it is + acceptable to add the candidate pair (G1_node, G2_node) to the current + partial isomorphism mapping. The logic should focus on semantic + information contained in the edge data or a formalized node class. + + By acceptable, we mean that the subsequent mapping can still become a + complete isomorphism mapping. Thus, if adding the candidate pair + definitely makes it so that the subsequent mapping cannot become a + complete isomorphism mapping, then this function must return False. + + The default semantic feasibility function always returns True. The + effect is that semantics are not considered in the matching of G1 + and G2. + + The semantic checks might differ based on the what type of test is + being performed. A keyword description of the test is stored in + self.test. Here is a quick description of the currently implemented + tests:: + + test='graph' + Indicates that the graph matcher is looking for a graph-graph + isomorphism. + + test='subgraph' + Indicates that the graph matcher is looking for a subgraph-graph + isomorphism such that a subgraph of G1 is isomorphic to G2. + + test='mono' + Indicates that the graph matcher is looking for a subgraph-graph + monomorphism such that a subgraph of G1 is monomorphic to G2. + + Any subclass which redefines semantic_feasibility() must maintain + the above form to keep the match() method functional. Implementations + should consider multigraphs. + """ + return True + + def subgraph_is_isomorphic(self): + """Returns `True` if a subgraph of ``G1`` is isomorphic to ``G2``. + + Examples + -------- + When creating the `GraphMatcher`, the order of the arguments is important + + >>> G = nx.Graph([("A", "B"), ("B", "C"), ("A", "C")]) + >>> H = nx.Graph([(0, 1), (1, 2), (0, 2), (1, 3), (0, 4)]) + + Check whether a subgraph of G is isomorphic to H: + + >>> isomatcher = nx.isomorphism.GraphMatcher(G, H) + >>> isomatcher.subgraph_is_isomorphic() + False + + Check whether a subgraph of H is isomorphic to G: + + >>> isomatcher = nx.isomorphism.GraphMatcher(H, G) + >>> isomatcher.subgraph_is_isomorphic() + True + """ + try: + x = next(self.subgraph_isomorphisms_iter()) + return True + except StopIteration: + return False + + def subgraph_is_monomorphic(self): + """Returns `True` if a subgraph of ``G1`` is monomorphic to ``G2``. + + Examples + -------- + When creating the `GraphMatcher`, the order of the arguments is important. + + >>> G = nx.Graph([("A", "B"), ("B", "C")]) + >>> H = nx.Graph([(0, 1), (1, 2), (0, 2)]) + + Check whether a subgraph of G is monomorphic to H: + + >>> isomatcher = nx.isomorphism.GraphMatcher(G, H) + >>> isomatcher.subgraph_is_monomorphic() + False + + Check whether a subgraph of H is monomorphic to G: + + >>> isomatcher = nx.isomorphism.GraphMatcher(H, G) + >>> isomatcher.subgraph_is_monomorphic() + True + """ + try: + x = next(self.subgraph_monomorphisms_iter()) + return True + except StopIteration: + return False + + def subgraph_isomorphisms_iter(self): + """Generator over isomorphisms between a subgraph of ``G1`` and ``G2``. + + Examples + -------- + When creating the `GraphMatcher`, the order of the arguments is important + + >>> G = nx.Graph([("A", "B"), ("B", "C"), ("A", "C")]) + >>> H = nx.Graph([(0, 1), (1, 2), (0, 2), (1, 3), (0, 4)]) + + Yield isomorphic mappings between ``H`` and subgraphs of ``G``: + + >>> isomatcher = nx.isomorphism.GraphMatcher(G, H) + >>> list(isomatcher.subgraph_isomorphisms_iter()) + [] + + Yield isomorphic mappings between ``G`` and subgraphs of ``H``: + + >>> isomatcher = nx.isomorphism.GraphMatcher(H, G) + >>> next(isomatcher.subgraph_isomorphisms_iter()) + {0: 'A', 1: 'B', 2: 'C'} + + """ + # Declare that we are looking for graph-subgraph isomorphism. + self.test = "subgraph" + self.initialize() + yield from self.match() + + def subgraph_monomorphisms_iter(self): + """Generator over monomorphisms between a subgraph of ``G1`` and ``G2``. + + Examples + -------- + When creating the `GraphMatcher`, the order of the arguments is important. + + >>> G = nx.Graph([("A", "B"), ("B", "C")]) + >>> H = nx.Graph([(0, 1), (1, 2), (0, 2)]) + + Yield monomorphic mappings between ``H`` and subgraphs of ``G``: + + >>> isomatcher = nx.isomorphism.GraphMatcher(G, H) + >>> list(isomatcher.subgraph_monomorphisms_iter()) + [] + + Yield monomorphic mappings between ``G`` and subgraphs of ``H``: + + >>> isomatcher = nx.isomorphism.GraphMatcher(H, G) + >>> next(isomatcher.subgraph_monomorphisms_iter()) + {0: 'A', 1: 'B', 2: 'C'} + """ + # Declare that we are looking for graph-subgraph monomorphism. + self.test = "mono" + self.initialize() + yield from self.match() + + def syntactic_feasibility(self, G1_node, G2_node): + """Returns True if adding (G1_node, G2_node) is syntactically feasible. + + This function returns True if it is adding the candidate pair + to the current partial isomorphism/monomorphism mapping is allowable. + The addition is allowable if the inclusion of the candidate pair does + not make it impossible for an isomorphism/monomorphism to be found. + """ + + # The VF2 algorithm was designed to work with graphs having, at most, + # one edge connecting any two nodes. This is not the case when + # dealing with an MultiGraphs. + # + # Basically, when we test the look-ahead rules R_neighbor, we will + # make sure that the number of edges are checked. We also add + # a R_self check to verify that the number of selfloops is acceptable. + # + # Users might be comparing Graph instances with MultiGraph instances. + # So the generic GraphMatcher class must work with MultiGraphs. + # Care must be taken since the value in the innermost dictionary is a + # singlet for Graph instances. For MultiGraphs, the value in the + # innermost dictionary is a list. + + ### + # Test at each step to get a return value as soon as possible. + ### + + # Look ahead 0 + + # R_self + + # The number of selfloops for G1_node must equal the number of + # self-loops for G2_node. Without this check, we would fail on + # R_neighbor at the next recursion level. But it is good to prune the + # search tree now. + + if self.test == "mono": + if self.G1.number_of_edges(G1_node, G1_node) < self.G2.number_of_edges( + G2_node, G2_node + ): + return False + else: + if self.G1.number_of_edges(G1_node, G1_node) != self.G2.number_of_edges( + G2_node, G2_node + ): + return False + + # R_neighbor + + # For each neighbor n' of n in the partial mapping, the corresponding + # node m' is a neighbor of m, and vice versa. Also, the number of + # edges must be equal. + if self.test != "mono": + for neighbor in self.G1[G1_node]: + if neighbor in self.core_1: + if self.core_1[neighbor] not in self.G2[G2_node]: + return False + elif self.G1.number_of_edges( + neighbor, G1_node + ) != self.G2.number_of_edges(self.core_1[neighbor], G2_node): + return False + + for neighbor in self.G2[G2_node]: + if neighbor in self.core_2: + if self.core_2[neighbor] not in self.G1[G1_node]: + return False + elif self.test == "mono": + if self.G1.number_of_edges( + self.core_2[neighbor], G1_node + ) < self.G2.number_of_edges(neighbor, G2_node): + return False + else: + if self.G1.number_of_edges( + self.core_2[neighbor], G1_node + ) != self.G2.number_of_edges(neighbor, G2_node): + return False + + if self.test != "mono": + # Look ahead 1 + + # R_terminout + # The number of neighbors of n in T_1^{inout} is equal to the + # number of neighbors of m that are in T_2^{inout}, and vice versa. + num1 = 0 + for neighbor in self.G1[G1_node]: + if (neighbor in self.inout_1) and (neighbor not in self.core_1): + num1 += 1 + num2 = 0 + for neighbor in self.G2[G2_node]: + if (neighbor in self.inout_2) and (neighbor not in self.core_2): + num2 += 1 + if self.test == "graph": + if num1 != num2: + return False + else: # self.test == 'subgraph' + if not (num1 >= num2): + return False + + # Look ahead 2 + + # R_new + + # The number of neighbors of n that are neither in the core_1 nor + # T_1^{inout} is equal to the number of neighbors of m + # that are neither in core_2 nor T_2^{inout}. + num1 = 0 + for neighbor in self.G1[G1_node]: + if neighbor not in self.inout_1: + num1 += 1 + num2 = 0 + for neighbor in self.G2[G2_node]: + if neighbor not in self.inout_2: + num2 += 1 + if self.test == "graph": + if num1 != num2: + return False + else: # self.test == 'subgraph' + if not (num1 >= num2): + return False + + # Otherwise, this node pair is syntactically feasible! + return True + + +class DiGraphMatcher(GraphMatcher): + """Implementation of VF2 algorithm for matching directed graphs. + + Suitable for DiGraph and MultiDiGraph instances. + """ + + def __init__(self, G1, G2): + """Initialize DiGraphMatcher. + + G1 and G2 should be nx.Graph or nx.MultiGraph instances. + + Examples + -------- + To create a GraphMatcher which checks for syntactic feasibility: + + >>> from networkx.algorithms import isomorphism + >>> G1 = nx.DiGraph(nx.path_graph(4, create_using=nx.DiGraph())) + >>> G2 = nx.DiGraph(nx.path_graph(4, create_using=nx.DiGraph())) + >>> DiGM = isomorphism.DiGraphMatcher(G1, G2) + """ + super().__init__(G1, G2) + + def _is_directed_matcher(self): + return True + + def candidate_pairs_iter(self): + """Iterator over candidate pairs of nodes in G1 and G2.""" + + # All computations are done using the current state! + + G1_nodes = self.G1_nodes + G2_nodes = self.G2_nodes + min_key = self.G2_node_order.__getitem__ + + # First we compute the out-terminal sets. + T1_out = [node for node in self.out_1 if node not in self.core_1] + T2_out = [node for node in self.out_2 if node not in self.core_2] + + # If T1_out and T2_out are both nonempty. + # P(s) = T1_out x {min T2_out} + if T1_out and T2_out: + node_2 = min(T2_out, key=min_key) + for node_1 in T1_out: + yield node_1, node_2 + + # If T1_out and T2_out were both empty.... + # We compute the in-terminal sets. + + # elif not (T1_out or T2_out): # as suggested by [2], incorrect + else: # as suggested by [1], correct + T1_in = [node for node in self.in_1 if node not in self.core_1] + T2_in = [node for node in self.in_2 if node not in self.core_2] + + # If T1_in and T2_in are both nonempty. + # P(s) = T1_out x {min T2_out} + if T1_in and T2_in: + node_2 = min(T2_in, key=min_key) + for node_1 in T1_in: + yield node_1, node_2 + + # If all terminal sets are empty... + # P(s) = (N_1 - M_1) x {min (N_2 - M_2)} + + # elif not (T1_in or T2_in): # as suggested by [2], incorrect + else: # as inferred from [1], correct + node_2 = min(G2_nodes - set(self.core_2), key=min_key) + for node_1 in G1_nodes: + if node_1 not in self.core_1: + yield node_1, node_2 + + # For all other cases, we don't have any candidate pairs. + + def initialize(self): + """Reinitializes the state of the algorithm. + + This method should be redefined if using something other than DiGMState. + If only subclassing GraphMatcher, a redefinition is not necessary. + """ + + # core_1[n] contains the index of the node paired with n, which is m, + # provided n is in the mapping. + # core_2[m] contains the index of the node paired with m, which is n, + # provided m is in the mapping. + self.core_1 = {} + self.core_2 = {} + + # See the paper for definitions of M_x and T_x^{y} + + # in_1[n] is non-zero if n is in M_1 or in T_1^{in} + # out_1[n] is non-zero if n is in M_1 or in T_1^{out} + # + # in_2[m] is non-zero if m is in M_2 or in T_2^{in} + # out_2[m] is non-zero if m is in M_2 or in T_2^{out} + # + # The value stored is the depth of the search tree when the node became + # part of the corresponding set. + self.in_1 = {} + self.in_2 = {} + self.out_1 = {} + self.out_2 = {} + + self.state = DiGMState(self) + + # Provide a convenient way to access the isomorphism mapping. + self.mapping = self.core_1.copy() + + def syntactic_feasibility(self, G1_node, G2_node): + """Returns True if adding (G1_node, G2_node) is syntactically feasible. + + This function returns True if it is adding the candidate pair + to the current partial isomorphism/monomorphism mapping is allowable. + The addition is allowable if the inclusion of the candidate pair does + not make it impossible for an isomorphism/monomorphism to be found. + """ + + # The VF2 algorithm was designed to work with graphs having, at most, + # one edge connecting any two nodes. This is not the case when + # dealing with an MultiGraphs. + # + # Basically, when we test the look-ahead rules R_pred and R_succ, we + # will make sure that the number of edges are checked. We also add + # a R_self check to verify that the number of selfloops is acceptable. + + # Users might be comparing DiGraph instances with MultiDiGraph + # instances. So the generic DiGraphMatcher class must work with + # MultiDiGraphs. Care must be taken since the value in the innermost + # dictionary is a singlet for DiGraph instances. For MultiDiGraphs, + # the value in the innermost dictionary is a list. + + ### + # Test at each step to get a return value as soon as possible. + ### + + # Look ahead 0 + + # R_self + + # The number of selfloops for G1_node must equal the number of + # self-loops for G2_node. Without this check, we would fail on R_pred + # at the next recursion level. This should prune the tree even further. + if self.test == "mono": + if self.G1.number_of_edges(G1_node, G1_node) < self.G2.number_of_edges( + G2_node, G2_node + ): + return False + else: + if self.G1.number_of_edges(G1_node, G1_node) != self.G2.number_of_edges( + G2_node, G2_node + ): + return False + + # R_pred + + # For each predecessor n' of n in the partial mapping, the + # corresponding node m' is a predecessor of m, and vice versa. Also, + # the number of edges must be equal + if self.test != "mono": + for predecessor in self.G1.pred[G1_node]: + if predecessor in self.core_1: + if self.core_1[predecessor] not in self.G2.pred[G2_node]: + return False + elif self.G1.number_of_edges( + predecessor, G1_node + ) != self.G2.number_of_edges(self.core_1[predecessor], G2_node): + return False + + for predecessor in self.G2.pred[G2_node]: + if predecessor in self.core_2: + if self.core_2[predecessor] not in self.G1.pred[G1_node]: + return False + elif self.test == "mono": + if self.G1.number_of_edges( + self.core_2[predecessor], G1_node + ) < self.G2.number_of_edges(predecessor, G2_node): + return False + else: + if self.G1.number_of_edges( + self.core_2[predecessor], G1_node + ) != self.G2.number_of_edges(predecessor, G2_node): + return False + + # R_succ + + # For each successor n' of n in the partial mapping, the corresponding + # node m' is a successor of m, and vice versa. Also, the number of + # edges must be equal. + if self.test != "mono": + for successor in self.G1[G1_node]: + if successor in self.core_1: + if self.core_1[successor] not in self.G2[G2_node]: + return False + elif self.G1.number_of_edges( + G1_node, successor + ) != self.G2.number_of_edges(G2_node, self.core_1[successor]): + return False + + for successor in self.G2[G2_node]: + if successor in self.core_2: + if self.core_2[successor] not in self.G1[G1_node]: + return False + elif self.test == "mono": + if self.G1.number_of_edges( + G1_node, self.core_2[successor] + ) < self.G2.number_of_edges(G2_node, successor): + return False + else: + if self.G1.number_of_edges( + G1_node, self.core_2[successor] + ) != self.G2.number_of_edges(G2_node, successor): + return False + + if self.test != "mono": + # Look ahead 1 + + # R_termin + # The number of predecessors of n that are in T_1^{in} is equal to the + # number of predecessors of m that are in T_2^{in}. + num1 = 0 + for predecessor in self.G1.pred[G1_node]: + if (predecessor in self.in_1) and (predecessor not in self.core_1): + num1 += 1 + num2 = 0 + for predecessor in self.G2.pred[G2_node]: + if (predecessor in self.in_2) and (predecessor not in self.core_2): + num2 += 1 + if self.test == "graph": + if num1 != num2: + return False + else: # self.test == 'subgraph' + if not (num1 >= num2): + return False + + # The number of successors of n that are in T_1^{in} is equal to the + # number of successors of m that are in T_2^{in}. + num1 = 0 + for successor in self.G1[G1_node]: + if (successor in self.in_1) and (successor not in self.core_1): + num1 += 1 + num2 = 0 + for successor in self.G2[G2_node]: + if (successor in self.in_2) and (successor not in self.core_2): + num2 += 1 + if self.test == "graph": + if num1 != num2: + return False + else: # self.test == 'subgraph' + if not (num1 >= num2): + return False + + # R_termout + + # The number of predecessors of n that are in T_1^{out} is equal to the + # number of predecessors of m that are in T_2^{out}. + num1 = 0 + for predecessor in self.G1.pred[G1_node]: + if (predecessor in self.out_1) and (predecessor not in self.core_1): + num1 += 1 + num2 = 0 + for predecessor in self.G2.pred[G2_node]: + if (predecessor in self.out_2) and (predecessor not in self.core_2): + num2 += 1 + if self.test == "graph": + if num1 != num2: + return False + else: # self.test == 'subgraph' + if not (num1 >= num2): + return False + + # The number of successors of n that are in T_1^{out} is equal to the + # number of successors of m that are in T_2^{out}. + num1 = 0 + for successor in self.G1[G1_node]: + if (successor in self.out_1) and (successor not in self.core_1): + num1 += 1 + num2 = 0 + for successor in self.G2[G2_node]: + if (successor in self.out_2) and (successor not in self.core_2): + num2 += 1 + if self.test == "graph": + if num1 != num2: + return False + else: # self.test == 'subgraph' + if not (num1 >= num2): + return False + + # Look ahead 2 + + # R_new + + # The number of predecessors of n that are neither in the core_1 nor + # T_1^{in} nor T_1^{out} is equal to the number of predecessors of m + # that are neither in core_2 nor T_2^{in} nor T_2^{out}. + num1 = 0 + for predecessor in self.G1.pred[G1_node]: + if (predecessor not in self.in_1) and (predecessor not in self.out_1): + num1 += 1 + num2 = 0 + for predecessor in self.G2.pred[G2_node]: + if (predecessor not in self.in_2) and (predecessor not in self.out_2): + num2 += 1 + if self.test == "graph": + if num1 != num2: + return False + else: # self.test == 'subgraph' + if not (num1 >= num2): + return False + + # The number of successors of n that are neither in the core_1 nor + # T_1^{in} nor T_1^{out} is equal to the number of successors of m + # that are neither in core_2 nor T_2^{in} nor T_2^{out}. + num1 = 0 + for successor in self.G1[G1_node]: + if (successor not in self.in_1) and (successor not in self.out_1): + num1 += 1 + num2 = 0 + for successor in self.G2[G2_node]: + if (successor not in self.in_2) and (successor not in self.out_2): + num2 += 1 + if self.test == "graph": + if num1 != num2: + return False + else: # self.test == 'subgraph' + if not (num1 >= num2): + return False + + # Otherwise, this node pair is syntactically feasible! + return True + + def subgraph_is_isomorphic(self): + """Returns `True` if a subgraph of ``G1`` is isomorphic to ``G2``. + + Examples + -------- + When creating the `DiGraphMatcher`, the order of the arguments is important + + >>> G = nx.DiGraph([("A", "B"), ("B", "A"), ("B", "C"), ("C", "B")]) + >>> H = nx.DiGraph(nx.path_graph(5)) + + Check whether a subgraph of G is isomorphic to H: + + >>> isomatcher = nx.isomorphism.DiGraphMatcher(G, H) + >>> isomatcher.subgraph_is_isomorphic() + False + + Check whether a subgraph of H is isomorphic to G: + + >>> isomatcher = nx.isomorphism.DiGraphMatcher(H, G) + >>> isomatcher.subgraph_is_isomorphic() + True + """ + return super().subgraph_is_isomorphic() + + def subgraph_is_monomorphic(self): + """Returns `True` if a subgraph of ``G1`` is monomorphic to ``G2``. + + Examples + -------- + When creating the `DiGraphMatcher`, the order of the arguments is important. + + >>> G = nx.DiGraph([("A", "B"), ("C", "B"), ("D", "C")]) + >>> H = nx.DiGraph([(0, 1), (1, 2), (2, 3), (3, 2)]) + + Check whether a subgraph of G is monomorphic to H: + + >>> isomatcher = nx.isomorphism.DiGraphMatcher(G, H) + >>> isomatcher.subgraph_is_monomorphic() + False + + Check whether a subgraph of H is isomorphic to G: + + >>> isomatcher = nx.isomorphism.DiGraphMatcher(H, G) + >>> isomatcher.subgraph_is_monomorphic() + True + """ + return super().subgraph_is_monomorphic() + + def subgraph_isomorphisms_iter(self): + """Generator over isomorphisms between a subgraph of ``G1`` and ``G2``. + + Examples + -------- + When creating the `DiGraphMatcher`, the order of the arguments is important + + >>> G = nx.DiGraph([("B", "C"), ("C", "B"), ("C", "D"), ("D", "C")]) + >>> H = nx.DiGraph(nx.path_graph(5)) + + Yield isomorphic mappings between ``H`` and subgraphs of ``G``: + + >>> isomatcher = nx.isomorphism.DiGraphMatcher(G, H) + >>> list(isomatcher.subgraph_isomorphisms_iter()) + [] + + Yield isomorphic mappings between ``G`` and subgraphs of ``H``: + + >>> isomatcher = nx.isomorphism.DiGraphMatcher(H, G) + >>> next(isomatcher.subgraph_isomorphisms_iter()) + {0: 'B', 1: 'C', 2: 'D'} + """ + return super().subgraph_isomorphisms_iter() + + def subgraph_monomorphisms_iter(self): + """Generator over monomorphisms between a subgraph of ``G1`` and ``G2``. + + Examples + -------- + When creating the `DiGraphMatcher`, the order of the arguments is important. + + >>> G = nx.DiGraph([("A", "B"), ("C", "B"), ("D", "C")]) + >>> H = nx.DiGraph([(0, 1), (1, 2), (2, 3), (3, 2)]) + + Yield monomorphic mappings between ``H`` and subgraphs of ``G``: + + >>> isomatcher = nx.isomorphism.DiGraphMatcher(G, H) + >>> list(isomatcher.subgraph_monomorphisms_iter()) + [] + + Yield monomorphic mappings between ``G`` and subgraphs of ``H``: + + >>> isomatcher = nx.isomorphism.DiGraphMatcher(H, G) + >>> next(isomatcher.subgraph_monomorphisms_iter()) + {3: 'A', 2: 'B', 1: 'C', 0: 'D'} + """ + return super().subgraph_monomorphisms_iter() + + +class GMState: + """Internal representation of state for the GraphMatcher class. + + This class is used internally by the GraphMatcher class. It is used + only to store state specific data. There will be at most G2.order() of + these objects in memory at a time, due to the depth-first search + strategy employed by the VF2 algorithm. + """ + + def __init__(self, GM, G1_node=None, G2_node=None): + """Initializes GMState object. + + Pass in the GraphMatcher to which this GMState belongs and the + new node pair that will be added to the GraphMatcher's current + isomorphism mapping. + """ + self.GM = GM + + # Initialize the last stored node pair. + self.G1_node = None + self.G2_node = None + self.depth = len(GM.core_1) + + if G1_node is None or G2_node is None: + # Then we reset the class variables + GM.core_1 = {} + GM.core_2 = {} + GM.inout_1 = {} + GM.inout_2 = {} + + # Watch out! G1_node == 0 should evaluate to True. + if G1_node is not None and G2_node is not None: + # Add the node pair to the isomorphism mapping. + GM.core_1[G1_node] = G2_node + GM.core_2[G2_node] = G1_node + + # Store the node that was added last. + self.G1_node = G1_node + self.G2_node = G2_node + + # Now we must update the other two vectors. + # We will add only if it is not in there already! + self.depth = len(GM.core_1) + + # First we add the new nodes... + if G1_node not in GM.inout_1: + GM.inout_1[G1_node] = self.depth + if G2_node not in GM.inout_2: + GM.inout_2[G2_node] = self.depth + + # Now we add every other node... + + # Updates for T_1^{inout} + new_nodes = set() + for node in GM.core_1: + new_nodes.update( + [neighbor for neighbor in GM.G1[node] if neighbor not in GM.core_1] + ) + for node in new_nodes: + if node not in GM.inout_1: + GM.inout_1[node] = self.depth + + # Updates for T_2^{inout} + new_nodes = set() + for node in GM.core_2: + new_nodes.update( + [neighbor for neighbor in GM.G2[node] if neighbor not in GM.core_2] + ) + for node in new_nodes: + if node not in GM.inout_2: + GM.inout_2[node] = self.depth + + def restore(self): + """Deletes the GMState object and restores the class variables.""" + # First we remove the node that was added from the core vectors. + # Watch out! G1_node == 0 should evaluate to True. + if self.G1_node is not None and self.G2_node is not None: + del self.GM.core_1[self.G1_node] + del self.GM.core_2[self.G2_node] + + # Now we revert the other two vectors. + # Thus, we delete all entries which have this depth level. + for vector in (self.GM.inout_1, self.GM.inout_2): + for node in list(vector.keys()): + if vector[node] == self.depth: + del vector[node] + + +class DiGMState: + """Internal representation of state for the DiGraphMatcher class. + + This class is used internally by the DiGraphMatcher class. It is used + only to store state specific data. There will be at most G2.order() of + these objects in memory at a time, due to the depth-first search + strategy employed by the VF2 algorithm. + + """ + + def __init__(self, GM, G1_node=None, G2_node=None): + """Initializes DiGMState object. + + Pass in the DiGraphMatcher to which this DiGMState belongs and the + new node pair that will be added to the GraphMatcher's current + isomorphism mapping. + """ + self.GM = GM + + # Initialize the last stored node pair. + self.G1_node = None + self.G2_node = None + self.depth = len(GM.core_1) + + if G1_node is None or G2_node is None: + # Then we reset the class variables + GM.core_1 = {} + GM.core_2 = {} + GM.in_1 = {} + GM.in_2 = {} + GM.out_1 = {} + GM.out_2 = {} + + # Watch out! G1_node == 0 should evaluate to True. + if G1_node is not None and G2_node is not None: + # Add the node pair to the isomorphism mapping. + GM.core_1[G1_node] = G2_node + GM.core_2[G2_node] = G1_node + + # Store the node that was added last. + self.G1_node = G1_node + self.G2_node = G2_node + + # Now we must update the other four vectors. + # We will add only if it is not in there already! + self.depth = len(GM.core_1) + + # First we add the new nodes... + for vector in (GM.in_1, GM.out_1): + if G1_node not in vector: + vector[G1_node] = self.depth + for vector in (GM.in_2, GM.out_2): + if G2_node not in vector: + vector[G2_node] = self.depth + + # Now we add every other node... + + # Updates for T_1^{in} + new_nodes = set() + for node in GM.core_1: + new_nodes.update( + [ + predecessor + for predecessor in GM.G1.predecessors(node) + if predecessor not in GM.core_1 + ] + ) + for node in new_nodes: + if node not in GM.in_1: + GM.in_1[node] = self.depth + + # Updates for T_2^{in} + new_nodes = set() + for node in GM.core_2: + new_nodes.update( + [ + predecessor + for predecessor in GM.G2.predecessors(node) + if predecessor not in GM.core_2 + ] + ) + for node in new_nodes: + if node not in GM.in_2: + GM.in_2[node] = self.depth + + # Updates for T_1^{out} + new_nodes = set() + for node in GM.core_1: + new_nodes.update( + [ + successor + for successor in GM.G1.successors(node) + if successor not in GM.core_1 + ] + ) + for node in new_nodes: + if node not in GM.out_1: + GM.out_1[node] = self.depth + + # Updates for T_2^{out} + new_nodes = set() + for node in GM.core_2: + new_nodes.update( + [ + successor + for successor in GM.G2.successors(node) + if successor not in GM.core_2 + ] + ) + for node in new_nodes: + if node not in GM.out_2: + GM.out_2[node] = self.depth + + def restore(self): + """Deletes the DiGMState object and restores the class variables.""" + + # First we remove the node that was added from the core vectors. + # Watch out! G1_node == 0 should evaluate to True. + if self.G1_node is not None and self.G2_node is not None: + del self.GM.core_1[self.G1_node] + del self.GM.core_2[self.G2_node] + + # Now we revert the other four vectors. + # Thus, we delete all entries which have this depth level. + for vector in (self.GM.in_1, self.GM.in_2, self.GM.out_1, self.GM.out_2): + for node in list(vector.keys()): + if vector[node] == self.depth: + del vector[node] diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/matchhelpers.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/matchhelpers.py new file mode 100644 index 0000000..b48820d --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/matchhelpers.py @@ -0,0 +1,352 @@ +"""Functions which help end users define customize node_match and +edge_match functions to use during isomorphism checks. +""" + +import math +import types +from itertools import permutations + +__all__ = [ + "categorical_node_match", + "categorical_edge_match", + "categorical_multiedge_match", + "numerical_node_match", + "numerical_edge_match", + "numerical_multiedge_match", + "generic_node_match", + "generic_edge_match", + "generic_multiedge_match", +] + + +def copyfunc(f, name=None): + """Returns a deepcopy of a function.""" + return types.FunctionType( + f.__code__, f.__globals__, name or f.__name__, f.__defaults__, f.__closure__ + ) + + +def allclose(x, y, rtol=1.0000000000000001e-05, atol=1e-08): + """Returns True if x and y are sufficiently close, elementwise. + + Parameters + ---------- + rtol : float + The relative error tolerance. + atol : float + The absolute error tolerance. + + """ + # assume finite weights, see numpy.allclose() for reference + return all(math.isclose(xi, yi, rel_tol=rtol, abs_tol=atol) for xi, yi in zip(x, y)) + + +categorical_doc = """ +Returns a comparison function for a categorical node attribute. + +The value(s) of the attr(s) must be hashable and comparable via the == +operator since they are placed into a set([]) object. If the sets from +G1 and G2 are the same, then the constructed function returns True. + +Parameters +---------- +attr : string | list + The categorical node attribute to compare, or a list of categorical + node attributes to compare. +default : value | list + The default value for the categorical node attribute, or a list of + default values for the categorical node attributes. + +Returns +------- +match : function + The customized, categorical `node_match` function. + +Examples +-------- +>>> import networkx.algorithms.isomorphism as iso +>>> nm = iso.categorical_node_match("size", 1) +>>> nm = iso.categorical_node_match(["color", "size"], ["red", 2]) + +""" + + +def categorical_node_match(attr, default): + if isinstance(attr, str): + + def match(data1, data2): + return data1.get(attr, default) == data2.get(attr, default) + + else: + attrs = list(zip(attr, default)) # Python 3 + + def match(data1, data2): + return all(data1.get(attr, d) == data2.get(attr, d) for attr, d in attrs) + + return match + + +categorical_edge_match = copyfunc(categorical_node_match, "categorical_edge_match") + + +def categorical_multiedge_match(attr, default): + if isinstance(attr, str): + + def match(datasets1, datasets2): + values1 = {data.get(attr, default) for data in datasets1.values()} + values2 = {data.get(attr, default) for data in datasets2.values()} + return values1 == values2 + + else: + attrs = list(zip(attr, default)) # Python 3 + + def match(datasets1, datasets2): + values1 = set() + for data1 in datasets1.values(): + x = tuple(data1.get(attr, d) for attr, d in attrs) + values1.add(x) + values2 = set() + for data2 in datasets2.values(): + x = tuple(data2.get(attr, d) for attr, d in attrs) + values2.add(x) + return values1 == values2 + + return match + + +# Docstrings for categorical functions. +categorical_node_match.__doc__ = categorical_doc +categorical_edge_match.__doc__ = categorical_doc.replace("node", "edge") +tmpdoc = categorical_doc.replace("node", "edge") +tmpdoc = tmpdoc.replace("categorical_edge_match", "categorical_multiedge_match") +categorical_multiedge_match.__doc__ = tmpdoc + + +numerical_doc = """ +Returns a comparison function for a numerical node attribute. + +The value(s) of the attr(s) must be numerical and sortable. If the +sorted list of values from G1 and G2 are the same within some +tolerance, then the constructed function returns True. + +Parameters +---------- +attr : string | list + The numerical node attribute to compare, or a list of numerical + node attributes to compare. +default : value | list + The default value for the numerical node attribute, or a list of + default values for the numerical node attributes. +rtol : float + The relative error tolerance. +atol : float + The absolute error tolerance. + +Returns +------- +match : function + The customized, numerical `node_match` function. + +Examples +-------- +>>> import networkx.algorithms.isomorphism as iso +>>> nm = iso.numerical_node_match("weight", 1.0) +>>> nm = iso.numerical_node_match(["weight", "linewidth"], [0.25, 0.5]) + +""" + + +def numerical_node_match(attr, default, rtol=1.0000000000000001e-05, atol=1e-08): + if isinstance(attr, str): + + def match(data1, data2): + return math.isclose( + data1.get(attr, default), + data2.get(attr, default), + rel_tol=rtol, + abs_tol=atol, + ) + + else: + attrs = list(zip(attr, default)) # Python 3 + + def match(data1, data2): + values1 = [data1.get(attr, d) for attr, d in attrs] + values2 = [data2.get(attr, d) for attr, d in attrs] + return allclose(values1, values2, rtol=rtol, atol=atol) + + return match + + +numerical_edge_match = copyfunc(numerical_node_match, "numerical_edge_match") + + +def numerical_multiedge_match(attr, default, rtol=1.0000000000000001e-05, atol=1e-08): + if isinstance(attr, str): + + def match(datasets1, datasets2): + values1 = sorted(data.get(attr, default) for data in datasets1.values()) + values2 = sorted(data.get(attr, default) for data in datasets2.values()) + return allclose(values1, values2, rtol=rtol, atol=atol) + + else: + attrs = list(zip(attr, default)) # Python 3 + + def match(datasets1, datasets2): + values1 = [] + for data1 in datasets1.values(): + x = tuple(data1.get(attr, d) for attr, d in attrs) + values1.append(x) + values2 = [] + for data2 in datasets2.values(): + x = tuple(data2.get(attr, d) for attr, d in attrs) + values2.append(x) + values1.sort() + values2.sort() + for xi, yi in zip(values1, values2): + if not allclose(xi, yi, rtol=rtol, atol=atol): + return False + else: + return True + + return match + + +# Docstrings for numerical functions. +numerical_node_match.__doc__ = numerical_doc +numerical_edge_match.__doc__ = numerical_doc.replace("node", "edge") +tmpdoc = numerical_doc.replace("node", "edge") +tmpdoc = tmpdoc.replace("numerical_edge_match", "numerical_multiedge_match") +numerical_multiedge_match.__doc__ = tmpdoc + + +generic_doc = """ +Returns a comparison function for a generic attribute. + +The value(s) of the attr(s) are compared using the specified +operators. If all the attributes are equal, then the constructed +function returns True. + +Parameters +---------- +attr : string | list + The node attribute to compare, or a list of node attributes + to compare. +default : value | list + The default value for the node attribute, or a list of + default values for the node attributes. +op : callable | list + The operator to use when comparing attribute values, or a list + of operators to use when comparing values for each attribute. + +Returns +------- +match : function + The customized, generic `node_match` function. + +Examples +-------- +>>> from operator import eq +>>> from math import isclose +>>> from networkx.algorithms.isomorphism import generic_node_match +>>> nm = generic_node_match("weight", 1.0, isclose) +>>> nm = generic_node_match("color", "red", eq) +>>> nm = generic_node_match(["weight", "color"], [1.0, "red"], [isclose, eq]) + +""" + + +def generic_node_match(attr, default, op): + if isinstance(attr, str): + + def match(data1, data2): + return op(data1.get(attr, default), data2.get(attr, default)) + + else: + attrs = list(zip(attr, default, op)) # Python 3 + + def match(data1, data2): + for attr, d, operator in attrs: + if not operator(data1.get(attr, d), data2.get(attr, d)): + return False + else: + return True + + return match + + +generic_edge_match = copyfunc(generic_node_match, "generic_edge_match") + + +def generic_multiedge_match(attr, default, op): + """Returns a comparison function for a generic attribute. + + The value(s) of the attr(s) are compared using the specified + operators. If all the attributes are equal, then the constructed + function returns True. Potentially, the constructed edge_match + function can be slow since it must verify that no isomorphism + exists between the multiedges before it returns False. + + Parameters + ---------- + attr : string | list + The edge attribute to compare, or a list of node attributes + to compare. + default : value | list + The default value for the edge attribute, or a list of + default values for the edgeattributes. + op : callable | list + The operator to use when comparing attribute values, or a list + of operators to use when comparing values for each attribute. + + Returns + ------- + match : function + The customized, generic `edge_match` function. + + Examples + -------- + >>> from operator import eq + >>> from math import isclose + >>> from networkx.algorithms.isomorphism import generic_node_match + >>> nm = generic_node_match("weight", 1.0, isclose) + >>> nm = generic_node_match("color", "red", eq) + >>> nm = generic_node_match(["weight", "color"], [1.0, "red"], [isclose, eq]) + + """ + + # This is slow, but generic. + # We must test every possible isomorphism between the edges. + if isinstance(attr, str): + attr = [attr] + default = [default] + op = [op] + attrs = list(zip(attr, default)) # Python 3 + + def match(datasets1, datasets2): + values1 = [] + for data1 in datasets1.values(): + x = tuple(data1.get(attr, d) for attr, d in attrs) + values1.append(x) + values2 = [] + for data2 in datasets2.values(): + x = tuple(data2.get(attr, d) for attr, d in attrs) + values2.append(x) + for vals2 in permutations(values2): + for xi, yi in zip(values1, vals2): + if not all(map(lambda x, y, z: z(x, y), xi, yi, op)): + # This is not an isomorphism, go to next permutation. + break + else: + # Then we found an isomorphism. + return True + else: + # Then there are no isomorphisms between the multiedges. + return False + + return match + + +# Docstrings for numerical functions. +generic_node_match.__doc__ = generic_doc +generic_edge_match.__doc__ = generic_doc.replace("node", "edge") diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/temporalisomorphvf2.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/temporalisomorphvf2.py new file mode 100644 index 0000000..62cacc7 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/temporalisomorphvf2.py @@ -0,0 +1,308 @@ +""" +***************************** +Time-respecting VF2 Algorithm +***************************** + +An extension of the VF2 algorithm for time-respecting graph isomorphism +testing in temporal graphs. + +A temporal graph is one in which edges contain a datetime attribute, +denoting when interaction occurred between the incident nodes. A +time-respecting subgraph of a temporal graph is a subgraph such that +all interactions incident to a node occurred within a time threshold, +delta, of each other. A directed time-respecting subgraph has the +added constraint that incoming interactions to a node must precede +outgoing interactions from the same node - this enforces a sense of +directed flow. + +Introduction +------------ + +The TimeRespectingGraphMatcher and TimeRespectingDiGraphMatcher +extend the GraphMatcher and DiGraphMatcher classes, respectively, +to include temporal constraints on matches. This is achieved through +a semantic check, via the semantic_feasibility() function. + +As well as including G1 (the graph in which to seek embeddings) and +G2 (the subgraph structure of interest), the name of the temporal +attribute on the edges and the time threshold, delta, must be supplied +as arguments to the matching constructors. + +A delta of zero is the strictest temporal constraint on the match - +only embeddings in which all interactions occur at the same time will +be returned. A delta of one day will allow embeddings in which +adjacent interactions occur up to a day apart. + +Examples +-------- + +Examples will be provided when the datetime type has been incorporated. + + +Temporal Subgraph Isomorphism +----------------------------- + +A brief discussion of the somewhat diverse current literature will be +included here. + +References +---------- + +[1] Redmond, U. and Cunningham, P. Temporal subgraph isomorphism. In: +The 2013 IEEE/ACM International Conference on Advances in Social +Networks Analysis and Mining (ASONAM). Niagara Falls, Canada; 2013: +pages 1451 - 1452. [65] + +For a discussion of the literature on temporal networks: + +[3] P. Holme and J. Saramaki. Temporal networks. Physics Reports, +519(3):97–125, 2012. + +Notes +----- + +Handles directed and undirected graphs and graphs with parallel edges. + +""" + +import networkx as nx + +from .isomorphvf2 import DiGraphMatcher, GraphMatcher + +__all__ = ["TimeRespectingGraphMatcher", "TimeRespectingDiGraphMatcher"] + + +class TimeRespectingGraphMatcher(GraphMatcher): + def __init__(self, G1, G2, temporal_attribute_name, delta): + """Initialize TimeRespectingGraphMatcher. + + G1 and G2 should be nx.Graph or nx.MultiGraph instances. + + Examples + -------- + To create a TimeRespectingGraphMatcher which checks for + syntactic and semantic feasibility: + + >>> from networkx.algorithms import isomorphism + >>> from datetime import timedelta + >>> G1 = nx.Graph(nx.path_graph(4, create_using=nx.Graph())) + + >>> G2 = nx.Graph(nx.path_graph(4, create_using=nx.Graph())) + + >>> GM = isomorphism.TimeRespectingGraphMatcher( + ... G1, G2, "date", timedelta(days=1) + ... ) + """ + self.temporal_attribute_name = temporal_attribute_name + self.delta = delta + super().__init__(G1, G2) + + def one_hop(self, Gx, Gx_node, neighbors): + """ + Edges one hop out from a node in the mapping should be + time-respecting with respect to each other. + """ + dates = [] + for n in neighbors: + if isinstance(Gx, nx.Graph): # Graph G[u][v] returns the data dictionary. + dates.append(Gx[Gx_node][n][self.temporal_attribute_name]) + else: # MultiGraph G[u][v] returns a dictionary of key -> data dictionary. + for edge in Gx[Gx_node][ + n + ].values(): # Iterates all edges between node pair. + dates.append(edge[self.temporal_attribute_name]) + if any(x is None for x in dates): + raise ValueError("Datetime not supplied for at least one edge.") + return not dates or max(dates) - min(dates) <= self.delta + + def two_hop(self, Gx, core_x, Gx_node, neighbors): + """ + Paths of length 2 from Gx_node should be time-respecting. + """ + return all( + self.one_hop(Gx, v, [n for n in Gx[v] if n in core_x] + [Gx_node]) + for v in neighbors + ) + + def semantic_feasibility(self, G1_node, G2_node): + """Returns True if adding (G1_node, G2_node) is semantically + feasible. + + Any subclass which redefines semantic_feasibility() must + maintain the self.tests if needed, to keep the match() method + functional. Implementations should consider multigraphs. + """ + neighbors = [n for n in self.G1[G1_node] if n in self.core_1] + if not self.one_hop(self.G1, G1_node, neighbors): # Fail fast on first node. + return False + if not self.two_hop(self.G1, self.core_1, G1_node, neighbors): + return False + # Otherwise, this node is semantically feasible! + return True + + +class TimeRespectingDiGraphMatcher(DiGraphMatcher): + def __init__(self, G1, G2, temporal_attribute_name, delta): + """Initialize TimeRespectingDiGraphMatcher. + + G1 and G2 should be nx.DiGraph or nx.MultiDiGraph instances. + + Examples + -------- + To create a TimeRespectingDiGraphMatcher which checks for + syntactic and semantic feasibility: + + >>> from networkx.algorithms import isomorphism + >>> from datetime import timedelta + >>> G1 = nx.DiGraph(nx.path_graph(4, create_using=nx.DiGraph())) + + >>> G2 = nx.DiGraph(nx.path_graph(4, create_using=nx.DiGraph())) + + >>> GM = isomorphism.TimeRespectingDiGraphMatcher( + ... G1, G2, "date", timedelta(days=1) + ... ) + """ + self.temporal_attribute_name = temporal_attribute_name + self.delta = delta + super().__init__(G1, G2) + + def get_pred_dates(self, Gx, Gx_node, core_x, pred): + """ + Get the dates of edges from predecessors. + """ + pred_dates = [] + if isinstance(Gx, nx.DiGraph): # Graph G[u][v] returns the data dictionary. + for n in pred: + pred_dates.append(Gx[n][Gx_node][self.temporal_attribute_name]) + else: # MultiGraph G[u][v] returns a dictionary of key -> data dictionary. + for n in pred: + for edge in Gx[n][ + Gx_node + ].values(): # Iterates all edge data between node pair. + pred_dates.append(edge[self.temporal_attribute_name]) + return pred_dates + + def get_succ_dates(self, Gx, Gx_node, core_x, succ): + """ + Get the dates of edges to successors. + """ + succ_dates = [] + if isinstance(Gx, nx.DiGraph): # Graph G[u][v] returns the data dictionary. + for n in succ: + succ_dates.append(Gx[Gx_node][n][self.temporal_attribute_name]) + else: # MultiGraph G[u][v] returns a dictionary of key -> data dictionary. + for n in succ: + for edge in Gx[Gx_node][ + n + ].values(): # Iterates all edge data between node pair. + succ_dates.append(edge[self.temporal_attribute_name]) + return succ_dates + + def one_hop(self, Gx, Gx_node, core_x, pred, succ): + """ + The ego node. + """ + pred_dates = self.get_pred_dates(Gx, Gx_node, core_x, pred) + succ_dates = self.get_succ_dates(Gx, Gx_node, core_x, succ) + return self.test_one(pred_dates, succ_dates) and self.test_two( + pred_dates, succ_dates + ) + + def two_hop_pred(self, Gx, Gx_node, core_x, pred): + """ + The predecessors of the ego node. + """ + return all( + self.one_hop( + Gx, + p, + core_x, + self.preds(Gx, core_x, p), + self.succs(Gx, core_x, p, Gx_node), + ) + for p in pred + ) + + def two_hop_succ(self, Gx, Gx_node, core_x, succ): + """ + The successors of the ego node. + """ + return all( + self.one_hop( + Gx, + s, + core_x, + self.preds(Gx, core_x, s, Gx_node), + self.succs(Gx, core_x, s), + ) + for s in succ + ) + + def preds(self, Gx, core_x, v, Gx_node=None): + pred = [n for n in Gx.predecessors(v) if n in core_x] + if Gx_node: + pred.append(Gx_node) + return pred + + def succs(self, Gx, core_x, v, Gx_node=None): + succ = [n for n in Gx.successors(v) if n in core_x] + if Gx_node: + succ.append(Gx_node) + return succ + + def test_one(self, pred_dates, succ_dates): + """ + Edges one hop out from Gx_node in the mapping should be + time-respecting with respect to each other, regardless of + direction. + """ + time_respecting = True + dates = pred_dates + succ_dates + + if any(x is None for x in dates): + raise ValueError("Date or datetime not supplied for at least one edge.") + + dates.sort() # Small to large. + if 0 < len(dates) and not (dates[-1] - dates[0] <= self.delta): + time_respecting = False + return time_respecting + + def test_two(self, pred_dates, succ_dates): + """ + Edges from a dual Gx_node in the mapping should be ordered in + a time-respecting manner. + """ + time_respecting = True + pred_dates.sort() + succ_dates.sort() + # First out before last in; negative of the necessary condition for time-respect. + if ( + 0 < len(succ_dates) + and 0 < len(pred_dates) + and succ_dates[0] < pred_dates[-1] + ): + time_respecting = False + return time_respecting + + def semantic_feasibility(self, G1_node, G2_node): + """Returns True if adding (G1_node, G2_node) is semantically + feasible. + + Any subclass which redefines semantic_feasibility() must + maintain the self.tests if needed, to keep the match() method + functional. Implementations should consider multigraphs. + """ + pred, succ = ( + [n for n in self.G1.predecessors(G1_node) if n in self.core_1], + [n for n in self.G1.successors(G1_node) if n in self.core_1], + ) + if not self.one_hop( + self.G1, G1_node, self.core_1, pred, succ + ): # Fail fast on first node. + return False + if not self.two_hop_pred(self.G1, G1_node, self.core_1, pred): + return False + if not self.two_hop_succ(self.G1, G1_node, self.core_1, succ): + return False + # Otherwise, this node is semantically feasible! + return True diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/tests/__init__.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/tests/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/tests/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..5e6989c Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/tests/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/tests/__pycache__/test_ismags.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/tests/__pycache__/test_ismags.cpython-312.pyc new file mode 100644 index 0000000..3bca9da Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/tests/__pycache__/test_ismags.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/tests/__pycache__/test_isomorphism.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/tests/__pycache__/test_isomorphism.cpython-312.pyc new file mode 100644 index 0000000..abae016 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/tests/__pycache__/test_isomorphism.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/tests/__pycache__/test_isomorphvf2.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/tests/__pycache__/test_isomorphvf2.cpython-312.pyc new file mode 100644 index 0000000..2a1ec58 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/tests/__pycache__/test_isomorphvf2.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/tests/__pycache__/test_match_helpers.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/tests/__pycache__/test_match_helpers.cpython-312.pyc new file mode 100644 index 0000000..5956427 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/tests/__pycache__/test_match_helpers.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/tests/__pycache__/test_temporalisomorphvf2.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/tests/__pycache__/test_temporalisomorphvf2.cpython-312.pyc new file mode 100644 index 0000000..79545e2 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/tests/__pycache__/test_temporalisomorphvf2.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/tests/__pycache__/test_tree_isomorphism.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/tests/__pycache__/test_tree_isomorphism.cpython-312.pyc new file mode 100644 index 0000000..91d771f Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/tests/__pycache__/test_tree_isomorphism.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/tests/__pycache__/test_vf2pp.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/tests/__pycache__/test_vf2pp.cpython-312.pyc new file mode 100644 index 0000000..3c4e9c9 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/tests/__pycache__/test_vf2pp.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/tests/__pycache__/test_vf2pp_helpers.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/tests/__pycache__/test_vf2pp_helpers.cpython-312.pyc new file mode 100644 index 0000000..23fb45d Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/tests/__pycache__/test_vf2pp_helpers.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/tests/__pycache__/test_vf2userfunc.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/tests/__pycache__/test_vf2userfunc.cpython-312.pyc new file mode 100644 index 0000000..0a53986 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/tests/__pycache__/test_vf2userfunc.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/tests/iso_r01_s80.A99 b/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/tests/iso_r01_s80.A99 new file mode 100644 index 0000000..dac54f0 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/tests/iso_r01_s80.A99 differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/tests/iso_r01_s80.B99 b/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/tests/iso_r01_s80.B99 new file mode 100644 index 0000000..6c6af68 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/tests/iso_r01_s80.B99 differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/tests/si2_b06_m200.A99 b/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/tests/si2_b06_m200.A99 new file mode 100644 index 0000000..60c3a3c Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/tests/si2_b06_m200.A99 differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/tests/si2_b06_m200.B99 b/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/tests/si2_b06_m200.B99 new file mode 100644 index 0000000..0236872 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/tests/si2_b06_m200.B99 differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/tests/test_ismags.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/tests/test_ismags.py new file mode 100644 index 0000000..46e5cea --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/tests/test_ismags.py @@ -0,0 +1,351 @@ +""" +Tests for ISMAGS isomorphism algorithm. +""" + +import pytest + +import networkx as nx +from networkx.algorithms import isomorphism as iso + + +def _matches_to_sets(matches): + """ + Helper function to facilitate comparing collections of dictionaries in + which order does not matter. + """ + return {frozenset(m.items()) for m in matches} + + +class TestSelfIsomorphism: + data = [ + ( + [ + (0, {"name": "a"}), + (1, {"name": "a"}), + (2, {"name": "b"}), + (3, {"name": "b"}), + (4, {"name": "a"}), + (5, {"name": "a"}), + ], + [(0, 1), (1, 2), (2, 3), (3, 4), (4, 5)], + ), + (range(1, 5), [(1, 2), (2, 4), (4, 3), (3, 1)]), + ( + [], + [ + (0, 1), + (1, 2), + (2, 3), + (3, 4), + (4, 5), + (5, 0), + (0, 6), + (6, 7), + (2, 8), + (8, 9), + (4, 10), + (10, 11), + ], + ), + ([], [(0, 1), (1, 2), (1, 4), (2, 3), (3, 5), (3, 6)]), + ( + # 5 - 4 \ / 12 - 13 + # 0 - 3 + # 9 - 8 / \ 16 - 17 + # Assume 0 and 3 are coupled and no longer equivalent. + # Coupling node 4 to 8 means that 5 and 9 + # are no longer equivalent, pushing them in their own partitions. + # However, {5, 9} was considered equivalent to {13, 17}, which is *not* + # taken into account in the second refinement, tripping a (former) + # assertion failure. Note that this is actually the minimal failing + # example. + [], + [ + (0, 3), + (0, 4), + (4, 5), + (0, 8), + (8, 9), + (3, 12), + (12, 13), + (3, 16), + (16, 17), + ], + ), + ] + + def test_self_isomorphism(self): + """ + For some small, symmetric graphs, make sure that 1) they are isomorphic + to themselves, and 2) that only the identity mapping is found. + """ + for node_data, edge_data in self.data: + graph = nx.Graph() + graph.add_nodes_from(node_data) + graph.add_edges_from(edge_data) + + ismags = iso.ISMAGS( + graph, graph, node_match=iso.categorical_node_match("name", None) + ) + assert ismags.is_isomorphic() + assert ismags.subgraph_is_isomorphic() + assert list(ismags.subgraph_isomorphisms_iter(symmetry=True)) == [ + {n: n for n in graph.nodes} + ] + + def test_edgecase_self_isomorphism(self): + """ + This edgecase is one of the cases in which it is hard to find all + symmetry elements. + """ + graph = nx.Graph() + nx.add_path(graph, range(5)) + graph.add_edges_from([(2, 5), (5, 6)]) + + ismags = iso.ISMAGS(graph, graph) + ismags_answer = list(ismags.find_isomorphisms(True)) + assert ismags_answer == [{n: n for n in graph.nodes}] + + graph = nx.relabel_nodes(graph, {0: 0, 1: 1, 2: 2, 3: 3, 4: 6, 5: 4, 6: 5}) + ismags = iso.ISMAGS(graph, graph) + ismags_answer = list(ismags.find_isomorphisms(True)) + assert ismags_answer == [{n: n for n in graph.nodes}] + + def test_directed_self_isomorphism(self): + """ + For some small, directed, symmetric graphs, make sure that 1) they are + isomorphic to themselves, and 2) that only the identity mapping is + found. + """ + for node_data, edge_data in self.data: + graph = nx.Graph() + graph.add_nodes_from(node_data) + graph.add_edges_from(edge_data) + + ismags = iso.ISMAGS( + graph, graph, node_match=iso.categorical_node_match("name", None) + ) + assert ismags.is_isomorphic() + assert ismags.subgraph_is_isomorphic() + assert list(ismags.subgraph_isomorphisms_iter(symmetry=True)) == [ + {n: n for n in graph.nodes} + ] + + +class TestSubgraphIsomorphism: + def test_isomorphism(self): + g1 = nx.Graph() + nx.add_cycle(g1, range(4)) + + g2 = nx.Graph() + nx.add_cycle(g2, range(4)) + g2.add_edges_from(list(zip(g2, range(4, 8)))) + ismags = iso.ISMAGS(g2, g1) + assert list(ismags.subgraph_isomorphisms_iter(symmetry=True)) == [ + {n: n for n in g1.nodes} + ] + + def test_isomorphism2(self): + g1 = nx.Graph() + nx.add_path(g1, range(3)) + + g2 = g1.copy() + g2.add_edge(1, 3) + + ismags = iso.ISMAGS(g2, g1) + matches = ismags.subgraph_isomorphisms_iter(symmetry=True) + expected_symmetric = [ + {0: 0, 1: 1, 2: 2}, + {0: 0, 1: 1, 3: 2}, + {2: 0, 1: 1, 3: 2}, + ] + assert _matches_to_sets(matches) == _matches_to_sets(expected_symmetric) + + matches = ismags.subgraph_isomorphisms_iter(symmetry=False) + expected_asymmetric = [ + {0: 2, 1: 1, 2: 0}, + {0: 2, 1: 1, 3: 0}, + {2: 2, 1: 1, 3: 0}, + ] + assert _matches_to_sets(matches) == _matches_to_sets( + expected_symmetric + expected_asymmetric + ) + + def test_labeled_nodes(self): + g1 = nx.Graph() + nx.add_cycle(g1, range(3)) + g1.nodes[1]["attr"] = True + + g2 = g1.copy() + g2.add_edge(1, 3) + ismags = iso.ISMAGS(g2, g1, node_match=lambda x, y: x == y) + matches = ismags.subgraph_isomorphisms_iter(symmetry=True) + expected_symmetric = [{0: 0, 1: 1, 2: 2}] + assert _matches_to_sets(matches) == _matches_to_sets(expected_symmetric) + + matches = ismags.subgraph_isomorphisms_iter(symmetry=False) + expected_asymmetric = [{0: 2, 1: 1, 2: 0}] + assert _matches_to_sets(matches) == _matches_to_sets( + expected_symmetric + expected_asymmetric + ) + + def test_labeled_edges(self): + g1 = nx.Graph() + nx.add_cycle(g1, range(3)) + g1.edges[1, 2]["attr"] = True + + g2 = g1.copy() + g2.add_edge(1, 3) + ismags = iso.ISMAGS(g2, g1, edge_match=lambda x, y: x == y) + matches = ismags.subgraph_isomorphisms_iter(symmetry=True) + expected_symmetric = [{0: 0, 1: 1, 2: 2}] + assert _matches_to_sets(matches) == _matches_to_sets(expected_symmetric) + + matches = ismags.subgraph_isomorphisms_iter(symmetry=False) + expected_asymmetric = [{1: 2, 0: 0, 2: 1}] + assert _matches_to_sets(matches) == _matches_to_sets( + expected_symmetric + expected_asymmetric + ) + + +class TestWikipediaExample: + # Nodes 'a', 'b', 'c' and 'd' form a column. + # Nodes 'g', 'h', 'i' and 'j' form a column. + g1edges = [ + ["a", "g"], + ["a", "h"], + ["a", "i"], + ["b", "g"], + ["b", "h"], + ["b", "j"], + ["c", "g"], + ["c", "i"], + ["c", "j"], + ["d", "h"], + ["d", "i"], + ["d", "j"], + ] + + # Nodes 1,2,3,4 form the clockwise corners of a large square. + # Nodes 5,6,7,8 form the clockwise corners of a small square + g2edges = [ + [1, 2], + [2, 3], + [3, 4], + [4, 1], + [5, 6], + [6, 7], + [7, 8], + [8, 5], + [1, 5], + [2, 6], + [3, 7], + [4, 8], + ] + + def test_graph(self): + g1 = nx.Graph() + g2 = nx.Graph() + g1.add_edges_from(self.g1edges) + g2.add_edges_from(self.g2edges) + gm = iso.ISMAGS(g1, g2) + assert gm.is_isomorphic() + + +class TestLargestCommonSubgraph: + def test_mcis(self): + # Example graphs from DOI: 10.1002/spe.588 + graph1 = nx.Graph() + graph1.add_edges_from([(1, 2), (2, 3), (2, 4), (3, 4), (4, 5)]) + graph1.nodes[1]["color"] = 0 + + graph2 = nx.Graph() + graph2.add_edges_from( + [(1, 2), (2, 3), (2, 4), (3, 4), (3, 5), (5, 6), (5, 7), (6, 7)] + ) + graph2.nodes[1]["color"] = 1 + graph2.nodes[6]["color"] = 2 + graph2.nodes[7]["color"] = 2 + + ismags = iso.ISMAGS( + graph1, graph2, node_match=iso.categorical_node_match("color", None) + ) + assert list(ismags.subgraph_isomorphisms_iter(True)) == [] + assert list(ismags.subgraph_isomorphisms_iter(False)) == [] + found_mcis = _matches_to_sets(ismags.largest_common_subgraph()) + expected = _matches_to_sets( + [{2: 2, 3: 4, 4: 3, 5: 5}, {2: 4, 3: 2, 4: 3, 5: 5}] + ) + assert expected == found_mcis + + ismags = iso.ISMAGS( + graph2, graph1, node_match=iso.categorical_node_match("color", None) + ) + assert list(ismags.subgraph_isomorphisms_iter(True)) == [] + assert list(ismags.subgraph_isomorphisms_iter(False)) == [] + found_mcis = _matches_to_sets(ismags.largest_common_subgraph()) + # Same answer, but reversed. + expected = _matches_to_sets( + [{2: 2, 3: 4, 4: 3, 5: 5}, {4: 2, 2: 3, 3: 4, 5: 5}] + ) + assert expected == found_mcis + + def test_symmetry_mcis(self): + graph1 = nx.Graph() + nx.add_path(graph1, range(4)) + + graph2 = nx.Graph() + nx.add_path(graph2, range(3)) + graph2.add_edge(1, 3) + + # Only the symmetry of graph2 is taken into account here. + ismags1 = iso.ISMAGS( + graph1, graph2, node_match=iso.categorical_node_match("color", None) + ) + assert list(ismags1.subgraph_isomorphisms_iter(True)) == [] + found_mcis = _matches_to_sets(ismags1.largest_common_subgraph()) + expected = _matches_to_sets([{0: 0, 1: 1, 2: 2}, {1: 0, 3: 2, 2: 1}]) + assert expected == found_mcis + + # Only the symmetry of graph1 is taken into account here. + ismags2 = iso.ISMAGS( + graph2, graph1, node_match=iso.categorical_node_match("color", None) + ) + assert list(ismags2.subgraph_isomorphisms_iter(True)) == [] + found_mcis = _matches_to_sets(ismags2.largest_common_subgraph()) + expected = _matches_to_sets( + [ + {3: 2, 0: 0, 1: 1}, + {2: 0, 0: 2, 1: 1}, + {3: 0, 0: 2, 1: 1}, + {3: 0, 1: 1, 2: 2}, + {0: 0, 1: 1, 2: 2}, + {2: 0, 3: 2, 1: 1}, + ] + ) + + assert expected == found_mcis + + found_mcis1 = _matches_to_sets(ismags1.largest_common_subgraph(False)) + found_mcis2 = ismags2.largest_common_subgraph(False) + found_mcis2 = [{v: k for k, v in d.items()} for d in found_mcis2] + found_mcis2 = _matches_to_sets(found_mcis2) + + expected = _matches_to_sets( + [ + {3: 2, 1: 3, 2: 1}, + {2: 0, 0: 2, 1: 1}, + {1: 2, 3: 3, 2: 1}, + {3: 0, 1: 3, 2: 1}, + {0: 2, 2: 3, 1: 1}, + {3: 0, 1: 2, 2: 1}, + {2: 0, 0: 3, 1: 1}, + {0: 0, 2: 3, 1: 1}, + {1: 0, 3: 3, 2: 1}, + {1: 0, 3: 2, 2: 1}, + {0: 3, 1: 1, 2: 2}, + {0: 0, 1: 1, 2: 2}, + ] + ) + assert expected == found_mcis1 + assert expected == found_mcis2 diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/tests/test_isomorphism.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/tests/test_isomorphism.py new file mode 100644 index 0000000..806c337 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/tests/test_isomorphism.py @@ -0,0 +1,109 @@ +from functools import partial + +import pytest + +import networkx as nx +from networkx.algorithms import isomorphism as iso + +# Convenience functions for testing that the behavior of `could_be_isomorphic` +# with the "properties" kwarg is equivalent to the corresponding function (i.e. +# nx.fast_could_be_isomorphic or nx.faster_could_be_isomorphic) +fast_cbi = partial(nx.could_be_isomorphic, properties="dt") +faster_cbi = partial(nx.could_be_isomorphic, properties="d") + + +def test_graph_could_be_isomorphic_variants_deprecated(): + G1 = nx.Graph([(1, 2), (1, 3), (1, 5), (2, 3)]) + G2 = nx.Graph([(10, 20), (20, 30), (10, 30), (10, 50)]) + with pytest.deprecated_call(): # graph_could_be_isomorphic + result = nx.isomorphism.isomorph.graph_could_be_isomorphic(G1, G2) + assert nx.could_be_isomorphic(G1, G2) == result + with pytest.deprecated_call(): # fast_graph_could_be_isomorphic + result = nx.isomorphism.isomorph.fast_graph_could_be_isomorphic(G1, G2) + assert nx.fast_could_be_isomorphic(G1, G2) == result + with pytest.deprecated_call(): + result = nx.isomorphism.isomorph.faster_graph_could_be_isomorphic(G1, G2) + assert nx.faster_could_be_isomorphic(G1, G2) == result + + +@pytest.mark.parametrize("atlas_ids", [(699, 706), (864, 870)]) +def test_could_be_isomorphic_combined_properties(atlas_ids): + """There are two pairs of graphs from the graph atlas that have the same + combined degree-triangle distribution, but a different maximal clique + distribution. See gh-7852.""" + G, H = (nx.graph_atlas(idx) for idx in atlas_ids) + + assert not nx.is_isomorphic(G, H) + + # Degree only + assert nx.faster_could_be_isomorphic(G, H) + assert nx.could_be_isomorphic(G, H, properties="d") + # Degrees & triangles + assert nx.fast_could_be_isomorphic(G, H) + assert nx.could_be_isomorphic(G, H, properties="dt") + # Full properties table (degrees, triangles, cliques) + assert not nx.could_be_isomorphic(G, H) + assert not nx.could_be_isomorphic(G, H, properties="dtc") + # For these two cases, the clique distribution alone is enough to verify + # the graphs can't be isomorphic + assert not nx.could_be_isomorphic(G, H, properties="c") + + +def test_could_be_isomorphic_individual_vs_combined_dt(): + """A test case where G and H have identical degree and triangle distributions, + but are different when compared together""" + G = nx.Graph([(0, 1), (0, 2), (0, 3), (0, 4), (1, 2), (3, 4), (4, 5), (4, 6)]) + H = G.copy() + # Modify graphs to produce different clique distributions + G.add_edge(0, 7) + H.add_edge(4, 7) + assert nx.could_be_isomorphic(G, H, properties="d") + assert nx.could_be_isomorphic(G, H, properties="t") + assert not nx.could_be_isomorphic(G, H, properties="dt") + assert not nx.could_be_isomorphic(G, H, properties="c") + + +class TestIsomorph: + @classmethod + def setup_class(cls): + cls.G1 = nx.Graph() + cls.G2 = nx.Graph() + cls.G3 = nx.Graph() + cls.G4 = nx.Graph() + cls.G5 = nx.Graph() + cls.G6 = nx.Graph() + cls.G1.add_edges_from([[1, 2], [1, 3], [1, 5], [2, 3]]) + cls.G2.add_edges_from([[10, 20], [20, 30], [10, 30], [10, 50]]) + cls.G3.add_edges_from([[1, 2], [1, 3], [1, 5], [2, 5]]) + cls.G4.add_edges_from([[1, 2], [1, 3], [1, 5], [2, 4]]) + cls.G5.add_edges_from([[1, 2], [1, 3]]) + cls.G6.add_edges_from([[10, 20], [20, 30], [10, 30], [10, 50], [20, 50]]) + + def test_could_be_isomorphic(self): + assert iso.could_be_isomorphic(self.G1, self.G2) + assert iso.could_be_isomorphic(self.G1, self.G3) + assert not iso.could_be_isomorphic(self.G1, self.G4) + assert iso.could_be_isomorphic(self.G3, self.G2) + assert not iso.could_be_isomorphic(self.G1, self.G6) + + @pytest.mark.parametrize("fn", (iso.fast_could_be_isomorphic, fast_cbi)) + def test_fast_could_be_isomorphic(self, fn): + assert fn(self.G3, self.G2) + assert not fn(self.G3, self.G5) + assert not fn(self.G1, self.G6) + + @pytest.mark.parametrize("fn", (iso.faster_could_be_isomorphic, faster_cbi)) + def test_faster_could_be_isomorphic(self, fn): + assert fn(self.G3, self.G2) + assert not fn(self.G3, self.G5) + assert not fn(self.G1, self.G6) + + def test_is_isomorphic(self): + assert iso.is_isomorphic(self.G1, self.G2) + assert not iso.is_isomorphic(self.G1, self.G4) + assert iso.is_isomorphic(self.G1.to_directed(), self.G2.to_directed()) + assert not iso.is_isomorphic(self.G1.to_directed(), self.G4.to_directed()) + with pytest.raises( + nx.NetworkXError, match="Graphs G1 and G2 are not of the same type." + ): + iso.is_isomorphic(self.G1.to_directed(), self.G1) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/tests/test_isomorphvf2.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/tests/test_isomorphvf2.py new file mode 100644 index 0000000..9a351ed --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/tests/test_isomorphvf2.py @@ -0,0 +1,439 @@ +""" +Tests for VF2 isomorphism algorithm. +""" + +import importlib.resources +import random +import struct + +import pytest + +import networkx as nx +from networkx.algorithms import isomorphism as iso + + +class TestWikipediaExample: + # Source: https://en.wikipedia.org/wiki/Graph_isomorphism + + # Nodes 'a', 'b', 'c' and 'd' form a column. + # Nodes 'g', 'h', 'i' and 'j' form a column. + g1edges = [ + ["a", "g"], + ["a", "h"], + ["a", "i"], + ["b", "g"], + ["b", "h"], + ["b", "j"], + ["c", "g"], + ["c", "i"], + ["c", "j"], + ["d", "h"], + ["d", "i"], + ["d", "j"], + ] + + # Nodes 1,2,3,4 form the clockwise corners of a large square. + # Nodes 5,6,7,8 form the clockwise corners of a small square + g2edges = [ + [1, 2], + [2, 3], + [3, 4], + [4, 1], + [5, 6], + [6, 7], + [7, 8], + [8, 5], + [1, 5], + [2, 6], + [3, 7], + [4, 8], + ] + + def test_graph(self): + g1 = nx.Graph() + g2 = nx.Graph() + g1.add_edges_from(self.g1edges) + g2.add_edges_from(self.g2edges) + gm = iso.GraphMatcher(g1, g2) + assert gm.is_isomorphic() + # Just testing some cases + assert gm.subgraph_is_monomorphic() + + mapping = sorted(gm.mapping.items()) + + # this mapping is only one of the possibilities + # so this test needs to be reconsidered + # isomap = [('a', 1), ('b', 6), ('c', 3), ('d', 8), + # ('g', 2), ('h', 5), ('i', 4), ('j', 7)] + # assert_equal(mapping, isomap) + + def test_subgraph(self): + g1 = nx.Graph() + g2 = nx.Graph() + g1.add_edges_from(self.g1edges) + g2.add_edges_from(self.g2edges) + g3 = g2.subgraph([1, 2, 3, 4]) + gm = iso.GraphMatcher(g1, g3) + assert gm.subgraph_is_isomorphic() + + def test_subgraph_mono(self): + g1 = nx.Graph() + g2 = nx.Graph() + g1.add_edges_from(self.g1edges) + g2.add_edges_from([[1, 2], [2, 3], [3, 4]]) + gm = iso.GraphMatcher(g1, g2) + assert gm.subgraph_is_monomorphic() + + +class TestVF2GraphDB: + # https://web.archive.org/web/20090303210205/http://amalfi.dis.unina.it/graph/db/ + + @staticmethod + def create_graph(filename): + """Creates a Graph instance from the filename.""" + + # The file is assumed to be in the format from the VF2 graph database. + # Each file is composed of 16-bit numbers (unsigned short int). + # So we will want to read 2 bytes at a time. + + # We can read the number as follows: + # number = struct.unpack(' 0: + # get all the pairs of labels and nodes of children and sort by labels + # reverse=True to preserve DFS order, see gh-7945 + s = sorted(((label[u], u) for u in dT.successors(v)), reverse=True) + + # invert to give a list of two tuples + # the sorted labels, and the corresponding children + ordered_labels[v], ordered_children[v] = list(zip(*s)) + + # now collect and sort the sorted ordered_labels + # for all nodes in L[i], carrying along the node + forlabel = sorted((ordered_labels[v], v) for v in L[i]) + + # now assign labels to these nodes, according to the sorted order + # starting from 0, where identical ordered_labels get the same label + current = 0 + for i, (ol, v) in enumerate(forlabel): + # advance to next label if not 0, and different from previous + if (i != 0) and (ol != forlabel[i - 1][0]): + current += 1 + label[v] = current + + # they are isomorphic if the labels of newroot1 and newroot2 are 0 + isomorphism = [] + if label[newroot1] == 0 and label[newroot2] == 0: + # now lets get the isomorphism by walking the ordered_children + stack = [(newroot1, newroot2)] + while stack: + curr_v, curr_w = stack.pop() + isomorphism.append((curr_v, curr_w)) + stack.extend(zip(ordered_children[curr_v], ordered_children[curr_w])) + + # get the mapping back in terms of the old names + # return in sorted order for neatness + isomorphism = [(namemap[u], namemap[v]) for (u, v) in isomorphism] + + return isomorphism + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatchable(graphs={"t1": 0, "t2": 1}) +def tree_isomorphism(t1, t2): + """ + Return an isomorphic mapping between two trees `t1` and `t2`. + + If `t1` and `t2` are not isomorphic, an empty list is returned. + Note that two trees may have more than one isomorphism, and this routine just + returns one valid mapping. + + Parameters + ---------- + t1 : undirected NetworkX graph + One of the trees being compared + + t2 : undirected NetworkX graph + The other tree being compared + + Returns + ------- + isomorphism : list + A list of pairs in which the left element is a node in `t1` + and the right element is a node in `t2`. The pairs are in + arbitrary order. If the nodes in one tree is mapped to the names in + the other, then trees will be identical. Note that an isomorphism + will not necessarily be unique. + + If `t1` and `t2` are not isomorphic, then it returns the empty list. + + Raises + ------ + NetworkXError + If either `t1` or `t2` is not a tree + + Notes + ----- + This runs in ``O(n*log(n))`` time for trees with ``n`` nodes. + """ + if not nx.is_tree(t1): + raise nx.NetworkXError("t1 is not a tree") + if not nx.is_tree(t2): + raise nx.NetworkXError("t2 is not a tree") + + # To be isomorphic, t1 and t2 must have the same number of nodes and sorted + # degree sequences + if not nx.faster_could_be_isomorphic(t1, t2): + return [] + + # A tree can have either 1 or 2 centers. + # If the number doesn't match then t1 and t2 are not isomorphic. + center1 = nx.center(t1) + center2 = nx.center(t2) + + if len(center1) != len(center2): + return [] + + # If there is only 1 center in each, then use it. + if len(center1) == 1: + return rooted_tree_isomorphism(t1, center1[0], t2, center2[0]) + + # If there both have 2 centers, then try the first for t1 + # with the first for t2. + attempts = rooted_tree_isomorphism(t1, center1[0], t2, center2[0]) + + # If that worked we're done. + if len(attempts) > 0: + return attempts + + # Otherwise, try center1[0] with the center2[1], and see if that works + return rooted_tree_isomorphism(t1, center1[0], t2, center2[1]) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/vf2pp.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/vf2pp.py new file mode 100644 index 0000000..f8d18ef --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/vf2pp.py @@ -0,0 +1,1102 @@ +""" +*************** +VF2++ Algorithm +*************** + +An implementation of the VF2++ algorithm [1]_ for Graph Isomorphism testing. + +The simplest interface to use this module is to call: + +`vf2pp_is_isomorphic`: to check whether two graphs are isomorphic. +`vf2pp_isomorphism`: to obtain the node mapping between two graphs, +in case they are isomorphic. +`vf2pp_all_isomorphisms`: to generate all possible mappings between two graphs, +if isomorphic. + +Introduction +------------ +The VF2++ algorithm, follows a similar logic to that of VF2, while also +introducing new easy-to-check cutting rules and determining the optimal access +order of nodes. It is also implemented in a non-recursive manner, which saves +both time and space, when compared to its previous counterpart. + +The optimal node ordering is obtained after taking into consideration both the +degree but also the label rarity of each node. +This way we place the nodes that are more likely to match, first in the order, +thus examining the most promising branches in the beginning. +The rules also consider node labels, making it easier to prune unfruitful +branches early in the process. + +Examples +-------- + +Suppose G1 and G2 are Isomorphic Graphs. Verification is as follows: + +Without node labels: + +>>> import networkx as nx +>>> G1 = nx.path_graph(4) +>>> G2 = nx.path_graph(4) +>>> nx.vf2pp_is_isomorphic(G1, G2, node_label=None) +True +>>> nx.vf2pp_isomorphism(G1, G2, node_label=None) +{1: 1, 2: 2, 0: 0, 3: 3} + +With node labels: + +>>> G1 = nx.path_graph(4) +>>> G2 = nx.path_graph(4) +>>> mapped = {1: 1, 2: 2, 3: 3, 0: 0} +>>> nx.set_node_attributes( +... G1, dict(zip(G1, ["blue", "red", "green", "yellow"])), "label" +... ) +>>> nx.set_node_attributes( +... G2, +... dict(zip([mapped[u] for u in G1], ["blue", "red", "green", "yellow"])), +... "label", +... ) +>>> nx.vf2pp_is_isomorphic(G1, G2, node_label="label") +True +>>> nx.vf2pp_isomorphism(G1, G2, node_label="label") +{1: 1, 2: 2, 0: 0, 3: 3} + +References +---------- +.. [1] Jüttner, Alpár & Madarasi, Péter. (2018). "VF2++—An improved subgraph + isomorphism algorithm". Discrete Applied Mathematics. 242. + https://doi.org/10.1016/j.dam.2018.02.018 + +""" + +import collections + +import networkx as nx + +__all__ = ["vf2pp_isomorphism", "vf2pp_is_isomorphic", "vf2pp_all_isomorphisms"] + +_GraphParameters = collections.namedtuple( + "_GraphParameters", + [ + "G1", + "G2", + "G1_labels", + "G2_labels", + "nodes_of_G1Labels", + "nodes_of_G2Labels", + "G2_nodes_of_degree", + ], +) + +_StateParameters = collections.namedtuple( + "_StateParameters", + [ + "mapping", + "reverse_mapping", + "T1", + "T1_in", + "T1_tilde", + "T1_tilde_in", + "T2", + "T2_in", + "T2_tilde", + "T2_tilde_in", + ], +) + + +@nx._dispatchable(graphs={"G1": 0, "G2": 1}, node_attrs={"node_label": "default_label"}) +def vf2pp_isomorphism(G1, G2, node_label=None, default_label=None): + """Return an isomorphic mapping between `G1` and `G2` if it exists. + + Parameters + ---------- + G1, G2 : NetworkX Graph or MultiGraph instances. + The two graphs to check for isomorphism. + + node_label : str, optional + The name of the node attribute to be used when comparing nodes. + The default is `None`, meaning node attributes are not considered + in the comparison. Any node that doesn't have the `node_label` + attribute uses `default_label` instead. + + default_label : scalar + Default value to use when a node doesn't have an attribute + named `node_label`. Default is `None`. + + Returns + ------- + dict or None + Node mapping if the two graphs are isomorphic. None otherwise. + """ + try: + mapping = next(vf2pp_all_isomorphisms(G1, G2, node_label, default_label)) + return mapping + except StopIteration: + return None + + +@nx._dispatchable(graphs={"G1": 0, "G2": 1}, node_attrs={"node_label": "default_label"}) +def vf2pp_is_isomorphic(G1, G2, node_label=None, default_label=None): + """Examines whether G1 and G2 are isomorphic. + + Parameters + ---------- + G1, G2 : NetworkX Graph or MultiGraph instances. + The two graphs to check for isomorphism. + + node_label : str, optional + The name of the node attribute to be used when comparing nodes. + The default is `None`, meaning node attributes are not considered + in the comparison. Any node that doesn't have the `node_label` + attribute uses `default_label` instead. + + default_label : scalar + Default value to use when a node doesn't have an attribute + named `node_label`. Default is `None`. + + Returns + ------- + bool + True if the two graphs are isomorphic, False otherwise. + """ + if vf2pp_isomorphism(G1, G2, node_label, default_label) is not None: + return True + return False + + +@nx._dispatchable(graphs={"G1": 0, "G2": 1}, node_attrs={"node_label": "default_label"}) +def vf2pp_all_isomorphisms(G1, G2, node_label=None, default_label=None): + """Yields all the possible mappings between G1 and G2. + + Parameters + ---------- + G1, G2 : NetworkX Graph or MultiGraph instances. + The two graphs to check for isomorphism. + + node_label : str, optional + The name of the node attribute to be used when comparing nodes. + The default is `None`, meaning node attributes are not considered + in the comparison. Any node that doesn't have the `node_label` + attribute uses `default_label` instead. + + default_label : scalar + Default value to use when a node doesn't have an attribute + named `node_label`. Default is `None`. + + Yields + ------ + dict + Isomorphic mapping between the nodes in `G1` and `G2`. + """ + if G1.number_of_nodes() == 0 or G2.number_of_nodes() == 0: + return False + + # Create the degree dicts based on graph type + if G1.is_directed(): + G1_degree = { + n: (in_degree, out_degree) + for (n, in_degree), (_, out_degree) in zip(G1.in_degree, G1.out_degree) + } + G2_degree = { + n: (in_degree, out_degree) + for (n, in_degree), (_, out_degree) in zip(G2.in_degree, G2.out_degree) + } + else: + G1_degree = dict(G1.degree) + G2_degree = dict(G2.degree) + + if not G1.is_directed(): + find_candidates = _find_candidates + restore_Tinout = _restore_Tinout + else: + find_candidates = _find_candidates_Di + restore_Tinout = _restore_Tinout_Di + + # Check that both graphs have the same number of nodes and degree sequence + if G1.order() != G2.order(): + return False + if sorted(G1_degree.values()) != sorted(G2_degree.values()): + return False + + # Initialize parameters and cache necessary information about degree and labels + graph_params, state_params = _initialize_parameters( + G1, G2, G2_degree, node_label, default_label + ) + + # Check if G1 and G2 have the same labels, and that number of nodes per label + # is equal between the two graphs + if not _precheck_label_properties(graph_params): + return False + + # Calculate the optimal node ordering + node_order = _matching_order(graph_params) + + # Initialize the stack + stack = [] + candidates = iter( + find_candidates(node_order[0], graph_params, state_params, G1_degree) + ) + stack.append((node_order[0], candidates)) + + mapping = state_params.mapping + reverse_mapping = state_params.reverse_mapping + + # Index of the node from the order, currently being examined + matching_node = 1 + + while stack: + current_node, candidate_nodes = stack[-1] + + try: + candidate = next(candidate_nodes) + except StopIteration: + # If no remaining candidates, return to a previous state, and follow another branch + stack.pop() + matching_node -= 1 + if stack: + # Pop the previously added u-v pair, and look for a different candidate _v for u + popped_node1, _ = stack[-1] + popped_node2 = mapping[popped_node1] + mapping.pop(popped_node1) + reverse_mapping.pop(popped_node2) + restore_Tinout(popped_node1, popped_node2, graph_params, state_params) + continue + + if _feasibility(current_node, candidate, graph_params, state_params): + # Terminate if mapping is extended to its full + if len(mapping) == G2.number_of_nodes() - 1: + cp_mapping = mapping.copy() + cp_mapping[current_node] = candidate + yield cp_mapping + continue + + # Feasibility rules pass, so extend the mapping and update the parameters + mapping[current_node] = candidate + reverse_mapping[candidate] = current_node + _update_Tinout(current_node, candidate, graph_params, state_params) + # Append the next node and its candidates to the stack + candidates = iter( + find_candidates( + node_order[matching_node], graph_params, state_params, G1_degree + ) + ) + stack.append((node_order[matching_node], candidates)) + matching_node += 1 + + +def _precheck_label_properties(graph_params): + G1, G2, G1_labels, G2_labels, nodes_of_G1Labels, nodes_of_G2Labels, _ = graph_params + if any( + label not in nodes_of_G1Labels or len(nodes_of_G1Labels[label]) != len(nodes) + for label, nodes in nodes_of_G2Labels.items() + ): + return False + return True + + +def _initialize_parameters(G1, G2, G2_degree, node_label=None, default_label=-1): + """Initializes all the necessary parameters for VF2++ + + Parameters + ---------- + G1,G2: NetworkX Graph or MultiGraph instances. + The two graphs to check for isomorphism or monomorphism + + G1_labels,G2_labels: dict + The label of every node in G1 and G2 respectively + + Returns + ------- + graph_params: namedtuple + Contains all the Graph-related parameters: + + G1,G2 + G1_labels,G2_labels: dict + + state_params: namedtuple + Contains all the State-related parameters: + + mapping: dict + The mapping as extended so far. Maps nodes of G1 to nodes of G2 + + reverse_mapping: dict + The reverse mapping as extended so far. Maps nodes from G2 to nodes of G1. + It's basically "mapping" reversed + + T1, T2: set + Ti contains uncovered neighbors of covered nodes from Gi, i.e. nodes + that are not in the mapping, but are neighbors of nodes that are. + + T1_out, T2_out: set + Ti_out contains all the nodes from Gi, that are neither in the mapping + nor in Ti + """ + G1_labels = dict(G1.nodes(data=node_label, default=default_label)) + G2_labels = dict(G2.nodes(data=node_label, default=default_label)) + + graph_params = _GraphParameters( + G1, + G2, + G1_labels, + G2_labels, + nx.utils.groups(G1_labels), + nx.utils.groups(G2_labels), + nx.utils.groups(G2_degree), + ) + + T1, T1_in = set(), set() + T2, T2_in = set(), set() + if G1.is_directed(): + T1_tilde, T1_tilde_in = ( + set(G1.nodes()), + set(), + ) # todo: do we need Ti_tilde_in? What nodes does it have? + T2_tilde, T2_tilde_in = set(G2.nodes()), set() + else: + T1_tilde, T1_tilde_in = set(G1.nodes()), set() + T2_tilde, T2_tilde_in = set(G2.nodes()), set() + + state_params = _StateParameters( + {}, + {}, + T1, + T1_in, + T1_tilde, + T1_tilde_in, + T2, + T2_in, + T2_tilde, + T2_tilde_in, + ) + + return graph_params, state_params + + +def _matching_order(graph_params): + """The node ordering as introduced in VF2++. + + Notes + ----- + Taking into account the structure of the Graph and the node labeling, the + nodes are placed in an order such that, most of the unfruitful/infeasible + branches of the search space can be pruned on high levels, significantly + decreasing the number of visited states. The premise is that, the algorithm + will be able to recognize inconsistencies early, proceeding to go deep into + the search tree only if it's needed. + + Parameters + ---------- + graph_params: namedtuple + Contains: + + G1,G2: NetworkX Graph or MultiGraph instances. + The two graphs to check for isomorphism or monomorphism. + + G1_labels,G2_labels: dict + The label of every node in G1 and G2 respectively. + + Returns + ------- + node_order: list + The ordering of the nodes. + """ + G1, G2, G1_labels, _, _, nodes_of_G2Labels, _ = graph_params + if not G1 and not G2: + return {} + + if G1.is_directed(): + G1 = G1.to_undirected(as_view=True) + + V1_unordered = set(G1.nodes()) + label_rarity = {label: len(nodes) for label, nodes in nodes_of_G2Labels.items()} + used_degrees = dict.fromkeys(G1, 0) + node_order = [] + + while V1_unordered: + max_rarity = min(label_rarity[G1_labels[x]] for x in V1_unordered) + rarest_nodes = [ + n for n in V1_unordered if label_rarity[G1_labels[n]] == max_rarity + ] + max_node = max(rarest_nodes, key=G1.degree) + + for dlevel_nodes in nx.bfs_layers(G1, max_node): + nodes_to_add = dlevel_nodes.copy() + while nodes_to_add: + max_used_degree = max(used_degrees[n] for n in nodes_to_add) + max_used_degree_nodes = [ + n for n in nodes_to_add if used_degrees[n] == max_used_degree + ] + max_degree = max(G1.degree[n] for n in max_used_degree_nodes) + max_degree_nodes = [ + n for n in max_used_degree_nodes if G1.degree[n] == max_degree + ] + next_node = min( + max_degree_nodes, key=lambda x: label_rarity[G1_labels[x]] + ) + + node_order.append(next_node) + for node in G1.neighbors(next_node): + used_degrees[node] += 1 + + nodes_to_add.remove(next_node) + label_rarity[G1_labels[next_node]] -= 1 + V1_unordered.discard(next_node) + + return node_order + + +def _find_candidates( + u, graph_params, state_params, G1_degree +): # todo: make the 4th argument the degree of u + """Given node u of G1, finds the candidates of u from G2. + + Parameters + ---------- + u: Graph node + The node from G1 for which to find the candidates from G2. + + graph_params: namedtuple + Contains all the Graph-related parameters: + + G1,G2: NetworkX Graph or MultiGraph instances. + The two graphs to check for isomorphism or monomorphism + + G1_labels,G2_labels: dict + The label of every node in G1 and G2 respectively + + state_params: namedtuple + Contains all the State-related parameters: + + mapping: dict + The mapping as extended so far. Maps nodes of G1 to nodes of G2 + + reverse_mapping: dict + The reverse mapping as extended so far. Maps nodes from G2 to nodes + of G1. It's basically "mapping" reversed + + T1, T2: set + Ti contains uncovered neighbors of covered nodes from Gi, i.e. nodes + that are not in the mapping, but are neighbors of nodes that are. + + T1_tilde, T2_tilde: set + Ti_tilde contains all the nodes from Gi, that are neither in the + mapping nor in Ti + + Returns + ------- + candidates: set + The nodes from G2 which are candidates for u. + """ + G1, G2, G1_labels, _, _, nodes_of_G2Labels, G2_nodes_of_degree = graph_params + mapping, reverse_mapping, _, _, _, _, _, _, T2_tilde, _ = state_params + + covered_nbrs = [nbr for nbr in G1[u] if nbr in mapping] + if not covered_nbrs: + candidates = set(nodes_of_G2Labels[G1_labels[u]]) + candidates.intersection_update(G2_nodes_of_degree[G1_degree[u]]) + candidates.intersection_update(T2_tilde) + candidates.difference_update(reverse_mapping) + if G1.is_multigraph(): + candidates.difference_update( + { + node + for node in candidates + if G1.number_of_edges(u, u) != G2.number_of_edges(node, node) + } + ) + return candidates + + nbr1 = covered_nbrs[0] + common_nodes = set(G2[mapping[nbr1]]) + + for nbr1 in covered_nbrs[1:]: + common_nodes.intersection_update(G2[mapping[nbr1]]) + + common_nodes.difference_update(reverse_mapping) + common_nodes.intersection_update(G2_nodes_of_degree[G1_degree[u]]) + common_nodes.intersection_update(nodes_of_G2Labels[G1_labels[u]]) + if G1.is_multigraph(): + common_nodes.difference_update( + { + node + for node in common_nodes + if G1.number_of_edges(u, u) != G2.number_of_edges(node, node) + } + ) + return common_nodes + + +def _find_candidates_Di(u, graph_params, state_params, G1_degree): + G1, G2, G1_labels, _, _, nodes_of_G2Labels, G2_nodes_of_degree = graph_params + mapping, reverse_mapping, _, _, _, _, _, _, T2_tilde, _ = state_params + + covered_successors = [succ for succ in G1[u] if succ in mapping] + covered_predecessors = [pred for pred in G1.pred[u] if pred in mapping] + + if not (covered_successors or covered_predecessors): + candidates = set(nodes_of_G2Labels[G1_labels[u]]) + candidates.intersection_update(G2_nodes_of_degree[G1_degree[u]]) + candidates.intersection_update(T2_tilde) + candidates.difference_update(reverse_mapping) + if G1.is_multigraph(): + candidates.difference_update( + { + node + for node in candidates + if G1.number_of_edges(u, u) != G2.number_of_edges(node, node) + } + ) + return candidates + + if covered_successors: + succ1 = covered_successors[0] + common_nodes = set(G2.pred[mapping[succ1]]) + + for succ1 in covered_successors[1:]: + common_nodes.intersection_update(G2.pred[mapping[succ1]]) + else: + pred1 = covered_predecessors.pop() + common_nodes = set(G2[mapping[pred1]]) + + for pred1 in covered_predecessors: + common_nodes.intersection_update(G2[mapping[pred1]]) + + common_nodes.difference_update(reverse_mapping) + common_nodes.intersection_update(G2_nodes_of_degree[G1_degree[u]]) + common_nodes.intersection_update(nodes_of_G2Labels[G1_labels[u]]) + if G1.is_multigraph(): + common_nodes.difference_update( + { + node + for node in common_nodes + if G1.number_of_edges(u, u) != G2.number_of_edges(node, node) + } + ) + return common_nodes + + +def _feasibility(node1, node2, graph_params, state_params): + """Given a candidate pair of nodes u and v from G1 and G2 respectively, + checks if it's feasible to extend the mapping, i.e. if u and v can be matched. + + Notes + ----- + This function performs all the necessary checking by applying both consistency + and cutting rules. + + Parameters + ---------- + node1, node2: Graph node + The candidate pair of nodes being checked for matching + + graph_params: namedtuple + Contains all the Graph-related parameters: + + G1,G2: NetworkX Graph or MultiGraph instances. + The two graphs to check for isomorphism or monomorphism + + G1_labels,G2_labels: dict + The label of every node in G1 and G2 respectively + + state_params: namedtuple + Contains all the State-related parameters: + + mapping: dict + The mapping as extended so far. Maps nodes of G1 to nodes of G2 + + reverse_mapping: dict + The reverse mapping as extended so far. Maps nodes from G2 to nodes + of G1. It's basically "mapping" reversed + + T1, T2: set + Ti contains uncovered neighbors of covered nodes from Gi, i.e. nodes + that are not in the mapping, but are neighbors of nodes that are. + + T1_out, T2_out: set + Ti_out contains all the nodes from Gi, that are neither in the mapping + nor in Ti + + Returns + ------- + True if all checks are successful, False otherwise. + """ + G1 = graph_params.G1 + + if _cut_PT(node1, node2, graph_params, state_params): + return False + + if G1.is_multigraph(): + if not _consistent_PT(node1, node2, graph_params, state_params): + return False + + return True + + +def _cut_PT(u, v, graph_params, state_params): + """Implements the cutting rules for the ISO problem. + + Parameters + ---------- + u, v: Graph node + The two candidate nodes being examined. + + graph_params: namedtuple + Contains all the Graph-related parameters: + + G1,G2: NetworkX Graph or MultiGraph instances. + The two graphs to check for isomorphism or monomorphism + + G1_labels,G2_labels: dict + The label of every node in G1 and G2 respectively + + state_params: namedtuple + Contains all the State-related parameters: + + mapping: dict + The mapping as extended so far. Maps nodes of G1 to nodes of G2 + + reverse_mapping: dict + The reverse mapping as extended so far. Maps nodes from G2 to nodes + of G1. It's basically "mapping" reversed + + T1, T2: set + Ti contains uncovered neighbors of covered nodes from Gi, i.e. nodes + that are not in the mapping, but are neighbors of nodes that are. + + T1_tilde, T2_tilde: set + Ti_out contains all the nodes from Gi, that are neither in the + mapping nor in Ti + + Returns + ------- + True if we should prune this branch, i.e. the node pair failed the cutting checks. False otherwise. + """ + G1, G2, G1_labels, G2_labels, _, _, _ = graph_params + ( + _, + _, + T1, + T1_in, + T1_tilde, + _, + T2, + T2_in, + T2_tilde, + _, + ) = state_params + + u_labels_predecessors, v_labels_predecessors = {}, {} + if G1.is_directed(): + u_labels_predecessors = nx.utils.groups( + {n1: G1_labels[n1] for n1 in G1.pred[u]} + ) + v_labels_predecessors = nx.utils.groups( + {n2: G2_labels[n2] for n2 in G2.pred[v]} + ) + + if set(u_labels_predecessors.keys()) != set(v_labels_predecessors.keys()): + return True + + u_labels_successors = nx.utils.groups({n1: G1_labels[n1] for n1 in G1[u]}) + v_labels_successors = nx.utils.groups({n2: G2_labels[n2] for n2 in G2[v]}) + + # if the neighbors of u, do not have the same labels as those of v, NOT feasible. + if set(u_labels_successors.keys()) != set(v_labels_successors.keys()): + return True + + for label, G1_nbh in u_labels_successors.items(): + G2_nbh = v_labels_successors[label] + + if G1.is_multigraph(): + # Check for every neighbor in the neighborhood, if u-nbr1 has same edges as v-nbr2 + u_nbrs_edges = sorted(G1.number_of_edges(u, x) for x in G1_nbh) + v_nbrs_edges = sorted(G2.number_of_edges(v, x) for x in G2_nbh) + if any( + u_nbr_edges != v_nbr_edges + for u_nbr_edges, v_nbr_edges in zip(u_nbrs_edges, v_nbrs_edges) + ): + return True + + if len(T1.intersection(G1_nbh)) != len(T2.intersection(G2_nbh)): + return True + if len(T1_tilde.intersection(G1_nbh)) != len(T2_tilde.intersection(G2_nbh)): + return True + if G1.is_directed() and len(T1_in.intersection(G1_nbh)) != len( + T2_in.intersection(G2_nbh) + ): + return True + + if not G1.is_directed(): + return False + + for label, G1_pred in u_labels_predecessors.items(): + G2_pred = v_labels_predecessors[label] + + if G1.is_multigraph(): + # Check for every neighbor in the neighborhood, if u-nbr1 has same edges as v-nbr2 + u_pred_edges = sorted(G1.number_of_edges(u, x) for x in G1_pred) + v_pred_edges = sorted(G2.number_of_edges(v, x) for x in G2_pred) + if any( + u_nbr_edges != v_nbr_edges + for u_nbr_edges, v_nbr_edges in zip(u_pred_edges, v_pred_edges) + ): + return True + + if len(T1.intersection(G1_pred)) != len(T2.intersection(G2_pred)): + return True + if len(T1_tilde.intersection(G1_pred)) != len(T2_tilde.intersection(G2_pred)): + return True + if len(T1_in.intersection(G1_pred)) != len(T2_in.intersection(G2_pred)): + return True + + return False + + +def _consistent_PT(u, v, graph_params, state_params): + """Checks the consistency of extending the mapping using the current node pair. + + Parameters + ---------- + u, v: Graph node + The two candidate nodes being examined. + + graph_params: namedtuple + Contains all the Graph-related parameters: + + G1,G2: NetworkX Graph or MultiGraph instances. + The two graphs to check for isomorphism or monomorphism + + G1_labels,G2_labels: dict + The label of every node in G1 and G2 respectively + + state_params: namedtuple + Contains all the State-related parameters: + + mapping: dict + The mapping as extended so far. Maps nodes of G1 to nodes of G2 + + reverse_mapping: dict + The reverse mapping as extended so far. Maps nodes from G2 to nodes of G1. + It's basically "mapping" reversed + + T1, T2: set + Ti contains uncovered neighbors of covered nodes from Gi, i.e. nodes + that are not in the mapping, but are neighbors of nodes that are. + + T1_out, T2_out: set + Ti_out contains all the nodes from Gi, that are neither in the mapping + nor in Ti + + Returns + ------- + True if the pair passes all the consistency checks successfully. False otherwise. + """ + G1, G2 = graph_params.G1, graph_params.G2 + mapping, reverse_mapping = state_params.mapping, state_params.reverse_mapping + + for neighbor in G1[u]: + if neighbor in mapping: + if G1.number_of_edges(u, neighbor) != G2.number_of_edges( + v, mapping[neighbor] + ): + return False + + for neighbor in G2[v]: + if neighbor in reverse_mapping: + if G1.number_of_edges(u, reverse_mapping[neighbor]) != G2.number_of_edges( + v, neighbor + ): + return False + + if not G1.is_directed(): + return True + + for predecessor in G1.pred[u]: + if predecessor in mapping: + if G1.number_of_edges(predecessor, u) != G2.number_of_edges( + mapping[predecessor], v + ): + return False + + for predecessor in G2.pred[v]: + if predecessor in reverse_mapping: + if G1.number_of_edges( + reverse_mapping[predecessor], u + ) != G2.number_of_edges(predecessor, v): + return False + + return True + + +def _update_Tinout(new_node1, new_node2, graph_params, state_params): + """Updates the Ti/Ti_out (i=1,2) when a new node pair u-v is added to the mapping. + + Notes + ----- + This function should be called right after the feasibility checks are passed, + and node1 is mapped to node2. The purpose of this function is to avoid brute + force computing of Ti/Ti_out by iterating over all nodes of the graph and + checking which nodes satisfy the necessary conditions. Instead, in every step + of the algorithm we focus exclusively on the two nodes that are being added + to the mapping, incrementally updating Ti/Ti_out. + + Parameters + ---------- + new_node1, new_node2: Graph node + The two new nodes, added to the mapping. + + graph_params: namedtuple + Contains all the Graph-related parameters: + + G1,G2: NetworkX Graph or MultiGraph instances. + The two graphs to check for isomorphism or monomorphism + + G1_labels,G2_labels: dict + The label of every node in G1 and G2 respectively + + state_params: namedtuple + Contains all the State-related parameters: + + mapping: dict + The mapping as extended so far. Maps nodes of G1 to nodes of G2 + + reverse_mapping: dict + The reverse mapping as extended so far. Maps nodes from G2 to nodes of G1. + It's basically "mapping" reversed + + T1, T2: set + Ti contains uncovered neighbors of covered nodes from Gi, i.e. nodes + that are not in the mapping, but are neighbors of nodes that are. + + T1_tilde, T2_tilde: set + Ti_out contains all the nodes from Gi, that are neither in the mapping nor in Ti + """ + G1, G2, _, _, _, _, _ = graph_params + ( + mapping, + reverse_mapping, + T1, + T1_in, + T1_tilde, + T1_tilde_in, + T2, + T2_in, + T2_tilde, + T2_tilde_in, + ) = state_params + + uncovered_successors_G1 = {succ for succ in G1[new_node1] if succ not in mapping} + uncovered_successors_G2 = { + succ for succ in G2[new_node2] if succ not in reverse_mapping + } + + # Add the uncovered neighbors of node1 and node2 in T1 and T2 respectively + T1.update(uncovered_successors_G1) + T2.update(uncovered_successors_G2) + T1.discard(new_node1) + T2.discard(new_node2) + + T1_tilde.difference_update(uncovered_successors_G1) + T2_tilde.difference_update(uncovered_successors_G2) + T1_tilde.discard(new_node1) + T2_tilde.discard(new_node2) + + if not G1.is_directed(): + return + + uncovered_predecessors_G1 = { + pred for pred in G1.pred[new_node1] if pred not in mapping + } + uncovered_predecessors_G2 = { + pred for pred in G2.pred[new_node2] if pred not in reverse_mapping + } + + T1_in.update(uncovered_predecessors_G1) + T2_in.update(uncovered_predecessors_G2) + T1_in.discard(new_node1) + T2_in.discard(new_node2) + + T1_tilde.difference_update(uncovered_predecessors_G1) + T2_tilde.difference_update(uncovered_predecessors_G2) + T1_tilde.discard(new_node1) + T2_tilde.discard(new_node2) + + +def _restore_Tinout(popped_node1, popped_node2, graph_params, state_params): + """Restores the previous version of Ti/Ti_out when a node pair is deleted + from the mapping. + + Parameters + ---------- + popped_node1, popped_node2: Graph node + The two nodes deleted from the mapping. + + graph_params: namedtuple + Contains all the Graph-related parameters: + + G1,G2: NetworkX Graph or MultiGraph instances. + The two graphs to check for isomorphism or monomorphism + + G1_labels,G2_labels: dict + The label of every node in G1 and G2 respectively + + state_params: namedtuple + Contains all the State-related parameters: + + mapping: dict + The mapping as extended so far. Maps nodes of G1 to nodes of G2 + + reverse_mapping: dict + The reverse mapping as extended so far. Maps nodes from G2 to nodes of G1. + It's basically "mapping" reversed + + T1, T2: set + Ti contains uncovered neighbors of covered nodes from Gi, i.e. nodes + that are not in the mapping, but are neighbors of nodes that are. + + T1_tilde, T2_tilde: set + Ti_out contains all the nodes from Gi, that are neither in the mapping + nor in Ti + """ + # If the node we want to remove from the mapping, has at least one covered + # neighbor, add it to T1. + G1, G2, _, _, _, _, _ = graph_params + ( + mapping, + reverse_mapping, + T1, + T1_in, + T1_tilde, + T1_tilde_in, + T2, + T2_in, + T2_tilde, + T2_tilde_in, + ) = state_params + + is_added = False + for neighbor in G1[popped_node1]: + if neighbor in mapping: + # if a neighbor of the excluded node1 is in the mapping, keep node1 in T1 + is_added = True + T1.add(popped_node1) + else: + # check if its neighbor has another connection with a covered node. + # If not, only then exclude it from T1 + if any(nbr in mapping for nbr in G1[neighbor]): + continue + T1.discard(neighbor) + T1_tilde.add(neighbor) + + # Case where the node is not present in neither the mapping nor T1. + # By definition, it should belong to T1_tilde + if not is_added: + T1_tilde.add(popped_node1) + + is_added = False + for neighbor in G2[popped_node2]: + if neighbor in reverse_mapping: + is_added = True + T2.add(popped_node2) + else: + if any(nbr in reverse_mapping for nbr in G2[neighbor]): + continue + T2.discard(neighbor) + T2_tilde.add(neighbor) + + if not is_added: + T2_tilde.add(popped_node2) + + +def _restore_Tinout_Di(popped_node1, popped_node2, graph_params, state_params): + # If the node we want to remove from the mapping, has at least one covered neighbor, add it to T1. + G1, G2, _, _, _, _, _ = graph_params + ( + mapping, + reverse_mapping, + T1, + T1_in, + T1_tilde, + T1_tilde_in, + T2, + T2_in, + T2_tilde, + T2_tilde_in, + ) = state_params + + is_added = False + for successor in G1[popped_node1]: + if successor in mapping: + # if a neighbor of the excluded node1 is in the mapping, keep node1 in T1 + is_added = True + T1_in.add(popped_node1) + else: + # check if its neighbor has another connection with a covered node. + # If not, only then exclude it from T1 + if not any(pred in mapping for pred in G1.pred[successor]): + T1.discard(successor) + + if not any(succ in mapping for succ in G1[successor]): + T1_in.discard(successor) + + if successor not in T1: + if successor not in T1_in: + T1_tilde.add(successor) + + for predecessor in G1.pred[popped_node1]: + if predecessor in mapping: + # if a neighbor of the excluded node1 is in the mapping, keep node1 in T1 + is_added = True + T1.add(popped_node1) + else: + # check if its neighbor has another connection with a covered node. + # If not, only then exclude it from T1 + if not any(pred in mapping for pred in G1.pred[predecessor]): + T1.discard(predecessor) + + if not any(succ in mapping for succ in G1[predecessor]): + T1_in.discard(predecessor) + + if not (predecessor in T1 or predecessor in T1_in): + T1_tilde.add(predecessor) + + # Case where the node is not present in neither the mapping nor T1. + # By definition it should belong to T1_tilde + if not is_added: + T1_tilde.add(popped_node1) + + is_added = False + for successor in G2[popped_node2]: + if successor in reverse_mapping: + is_added = True + T2_in.add(popped_node2) + else: + if not any(pred in reverse_mapping for pred in G2.pred[successor]): + T2.discard(successor) + + if not any(succ in reverse_mapping for succ in G2[successor]): + T2_in.discard(successor) + + if successor not in T2: + if successor not in T2_in: + T2_tilde.add(successor) + + for predecessor in G2.pred[popped_node2]: + if predecessor in reverse_mapping: + # if a neighbor of the excluded node1 is in the mapping, keep node1 in T1 + is_added = True + T2.add(popped_node2) + else: + # check if its neighbor has another connection with a covered node. + # If not, only then exclude it from T1 + if not any(pred in reverse_mapping for pred in G2.pred[predecessor]): + T2.discard(predecessor) + + if not any(succ in reverse_mapping for succ in G2[predecessor]): + T2_in.discard(predecessor) + + if not (predecessor in T2 or predecessor in T2_in): + T2_tilde.add(predecessor) + + if not is_added: + T2_tilde.add(popped_node2) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/vf2userfunc.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/vf2userfunc.py new file mode 100644 index 0000000..6fcf8a1 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/isomorphism/vf2userfunc.py @@ -0,0 +1,192 @@ +""" +Module to simplify the specification of user-defined equality functions for +node and edge attributes during isomorphism checks. + +During the construction of an isomorphism, the algorithm considers two +candidate nodes n1 in G1 and n2 in G2. The graphs G1 and G2 are then +compared with respect to properties involving n1 and n2, and if the outcome +is good, then the candidate nodes are considered isomorphic. NetworkX +provides a simple mechanism for users to extend the comparisons to include +node and edge attributes. + +Node attributes are handled by the node_match keyword. When considering +n1 and n2, the algorithm passes their node attribute dictionaries to +node_match, and if it returns False, then n1 and n2 cannot be +considered to be isomorphic. + +Edge attributes are handled by the edge_match keyword. When considering +n1 and n2, the algorithm must verify that outgoing edges from n1 are +commensurate with the outgoing edges for n2. If the graph is directed, +then a similar check is also performed for incoming edges. + +Focusing only on outgoing edges, we consider pairs of nodes (n1, v1) from +G1 and (n2, v2) from G2. For graphs and digraphs, there is only one edge +between (n1, v1) and only one edge between (n2, v2). Those edge attribute +dictionaries are passed to edge_match, and if it returns False, then +n1 and n2 cannot be considered isomorphic. For multigraphs and +multidigraphs, there can be multiple edges between (n1, v1) and also +multiple edges between (n2, v2). Now, there must exist an isomorphism +from "all the edges between (n1, v1)" to "all the edges between (n2, v2)". +So, all of the edge attribute dictionaries are passed to edge_match, and +it must determine if there is an isomorphism between the two sets of edges. +""" + +from . import isomorphvf2 as vf2 + +__all__ = ["GraphMatcher", "DiGraphMatcher", "MultiGraphMatcher", "MultiDiGraphMatcher"] + + +def _semantic_feasibility(self, G1_node, G2_node): + """Returns True if mapping G1_node to G2_node is semantically feasible.""" + # Make sure the nodes match + if self.node_match is not None: + nm = self.node_match(self.G1.nodes[G1_node], self.G2.nodes[G2_node]) + if not nm: + return False + + # Make sure the edges match + if self.edge_match is not None: + # Cached lookups + G1nbrs = self.G1_adj[G1_node] + G2nbrs = self.G2_adj[G2_node] + core_1 = self.core_1 + edge_match = self.edge_match + + for neighbor in G1nbrs: + # G1_node is not in core_1, so we must handle R_self separately + if neighbor == G1_node: + if G2_node in G2nbrs and not edge_match( + G1nbrs[G1_node], G2nbrs[G2_node] + ): + return False + elif neighbor in core_1: + G2_nbr = core_1[neighbor] + if G2_nbr in G2nbrs and not edge_match( + G1nbrs[neighbor], G2nbrs[G2_nbr] + ): + return False + # syntactic check has already verified that neighbors are symmetric + + return True + + +class GraphMatcher(vf2.GraphMatcher): + """VF2 isomorphism checker for undirected graphs.""" + + def __init__(self, G1, G2, node_match=None, edge_match=None): + """Initialize graph matcher. + + Parameters + ---------- + G1, G2: graph + The graphs to be tested. + + node_match: callable + A function that returns True iff node n1 in G1 and n2 in G2 + should be considered equal during the isomorphism test. The + function will be called like:: + + node_match(G1.nodes[n1], G2.nodes[n2]) + + That is, the function will receive the node attribute dictionaries + of the nodes under consideration. If None, then no attributes are + considered when testing for an isomorphism. + + edge_match: callable + A function that returns True iff the edge attribute dictionary for + the pair of nodes (u1, v1) in G1 and (u2, v2) in G2 should be + considered equal during the isomorphism test. The function will be + called like:: + + edge_match(G1[u1][v1], G2[u2][v2]) + + That is, the function will receive the edge attribute dictionaries + of the edges under consideration. If None, then no attributes are + considered when testing for an isomorphism. + + """ + vf2.GraphMatcher.__init__(self, G1, G2) + + self.node_match = node_match + self.edge_match = edge_match + + # These will be modified during checks to minimize code repeat. + self.G1_adj = self.G1.adj + self.G2_adj = self.G2.adj + + semantic_feasibility = _semantic_feasibility + + +class DiGraphMatcher(vf2.DiGraphMatcher): + """VF2 isomorphism checker for directed graphs.""" + + def __init__(self, G1, G2, node_match=None, edge_match=None): + """Initialize graph matcher. + + Parameters + ---------- + G1, G2 : graph + The graphs to be tested. + + node_match : callable + A function that returns True iff node n1 in G1 and n2 in G2 + should be considered equal during the isomorphism test. The + function will be called like:: + + node_match(G1.nodes[n1], G2.nodes[n2]) + + That is, the function will receive the node attribute dictionaries + of the nodes under consideration. If None, then no attributes are + considered when testing for an isomorphism. + + edge_match : callable + A function that returns True iff the edge attribute dictionary for + the pair of nodes (u1, v1) in G1 and (u2, v2) in G2 should be + considered equal during the isomorphism test. The function will be + called like:: + + edge_match(G1[u1][v1], G2[u2][v2]) + + That is, the function will receive the edge attribute dictionaries + of the edges under consideration. If None, then no attributes are + considered when testing for an isomorphism. + + """ + vf2.DiGraphMatcher.__init__(self, G1, G2) + + self.node_match = node_match + self.edge_match = edge_match + + # These will be modified during checks to minimize code repeat. + self.G1_adj = self.G1.adj + self.G2_adj = self.G2.adj + + def semantic_feasibility(self, G1_node, G2_node): + """Returns True if mapping G1_node to G2_node is semantically feasible.""" + + # Test node_match and also test edge_match on successors + feasible = _semantic_feasibility(self, G1_node, G2_node) + if not feasible: + return False + + # Test edge_match on predecessors + self.G1_adj = self.G1.pred + self.G2_adj = self.G2.pred + feasible = _semantic_feasibility(self, G1_node, G2_node) + self.G1_adj = self.G1.adj + self.G2_adj = self.G2.adj + + return feasible + + +# The "semantics" of edge_match are different for multi(di)graphs, but +# the implementation is the same. So, technically we do not need to +# provide "multi" versions, but we do so to match NetworkX's base classes. + + +class MultiGraphMatcher(GraphMatcher): + """VF2 isomorphism checker for undirected multigraphs.""" + + +class MultiDiGraphMatcher(DiGraphMatcher): + """VF2 isomorphism checker for directed multigraphs.""" diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/link_analysis/__init__.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/link_analysis/__init__.py new file mode 100644 index 0000000..6009f00 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/link_analysis/__init__.py @@ -0,0 +1,2 @@ +from networkx.algorithms.link_analysis.hits_alg import * +from networkx.algorithms.link_analysis.pagerank_alg import * diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/link_analysis/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/link_analysis/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..02973b3 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/link_analysis/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/link_analysis/__pycache__/hits_alg.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/link_analysis/__pycache__/hits_alg.cpython-312.pyc new file mode 100644 index 0000000..5895347 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/link_analysis/__pycache__/hits_alg.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/link_analysis/__pycache__/pagerank_alg.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/link_analysis/__pycache__/pagerank_alg.cpython-312.pyc new file mode 100644 index 0000000..154dfb6 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/link_analysis/__pycache__/pagerank_alg.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/link_analysis/hits_alg.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/link_analysis/hits_alg.py new file mode 100644 index 0000000..d9e3069 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/link_analysis/hits_alg.py @@ -0,0 +1,337 @@ +"""Hubs and authorities analysis of graph structure.""" + +import networkx as nx + +__all__ = ["hits"] + + +@nx._dispatchable(preserve_edge_attrs={"G": {"weight": 1}}) +def hits(G, max_iter=100, tol=1.0e-8, nstart=None, normalized=True): + """Returns HITS hubs and authorities values for nodes. + + The HITS algorithm computes two numbers for a node. + Authorities estimates the node value based on the incoming links. + Hubs estimates the node value based on outgoing links. + + Parameters + ---------- + G : graph + A NetworkX graph + + max_iter : integer, optional + Maximum number of iterations in power method. + + tol : float, optional + Error tolerance used to check convergence in power method iteration. + + nstart : dictionary, optional + Starting value of each node for power method iteration. + + normalized : bool (default=True) + Normalize results by the sum of all of the values. + + Returns + ------- + (hubs,authorities) : two-tuple of dictionaries + Two dictionaries keyed by node containing the hub and authority + values. + + Raises + ------ + PowerIterationFailedConvergence + If the algorithm fails to converge to the specified tolerance + within the specified number of iterations of the power iteration + method. + + Examples + -------- + >>> G = nx.path_graph(4) + >>> h, a = nx.hits(G) + + Notes + ----- + The eigenvector calculation is done by the power iteration method + and has no guarantee of convergence. The iteration will stop + after max_iter iterations or an error tolerance of + number_of_nodes(G)*tol has been reached. + + The HITS algorithm was designed for directed graphs but this + algorithm does not check if the input graph is directed and will + execute on undirected graphs. + + References + ---------- + .. [1] A. Langville and C. Meyer, + "A survey of eigenvector methods of web information retrieval." + http://citeseer.ist.psu.edu/713792.html + .. [2] Jon Kleinberg, + Authoritative sources in a hyperlinked environment + Journal of the ACM 46 (5): 604-32, 1999. + doi:10.1145/324133.324140. + http://www.cs.cornell.edu/home/kleinber/auth.pdf. + """ + import numpy as np + import scipy as sp + + if len(G) == 0: + return {}, {} + A = nx.adjacency_matrix(G, nodelist=list(G), dtype=float) + + if nstart is not None: + nstart = np.array(list(nstart.values())) + if max_iter <= 0: + raise nx.PowerIterationFailedConvergence(max_iter) + try: + _, _, vt = sp.sparse.linalg.svds(A, k=1, v0=nstart, maxiter=max_iter, tol=tol) + except sp.sparse.linalg.ArpackNoConvergence as exc: + raise nx.PowerIterationFailedConvergence(max_iter) from exc + + a = vt.flatten().real + h = A @ a + if normalized: + h /= h.sum() + a /= a.sum() + hubs = dict(zip(G, map(float, h))) + authorities = dict(zip(G, map(float, a))) + return hubs, authorities + + +def _hits_python(G, max_iter=100, tol=1.0e-8, nstart=None, normalized=True): + if isinstance(G, nx.MultiGraph | nx.MultiDiGraph): + raise Exception("hits() not defined for graphs with multiedges.") + if len(G) == 0: + return {}, {} + # choose fixed starting vector if not given + if nstart is None: + h = dict.fromkeys(G, 1.0 / G.number_of_nodes()) + else: + h = nstart + # normalize starting vector + s = 1.0 / sum(h.values()) + for k in h: + h[k] *= s + for _ in range(max_iter): # power iteration: make up to max_iter iterations + hlast = h + h = dict.fromkeys(hlast.keys(), 0) + a = dict.fromkeys(hlast.keys(), 0) + # this "matrix multiply" looks odd because it is + # doing a left multiply a^T=hlast^T*G + for n in h: + for nbr in G[n]: + a[nbr] += hlast[n] * G[n][nbr].get("weight", 1) + # now multiply h=Ga + for n in h: + for nbr in G[n]: + h[n] += a[nbr] * G[n][nbr].get("weight", 1) + # normalize vector + s = 1.0 / max(h.values()) + for n in h: + h[n] *= s + # normalize vector + s = 1.0 / max(a.values()) + for n in a: + a[n] *= s + # check convergence, l1 norm + err = sum(abs(h[n] - hlast[n]) for n in h) + if err < tol: + break + else: + raise nx.PowerIterationFailedConvergence(max_iter) + if normalized: + s = 1.0 / sum(a.values()) + for n in a: + a[n] *= s + s = 1.0 / sum(h.values()) + for n in h: + h[n] *= s + return h, a + + +def _hits_numpy(G, normalized=True): + """Returns HITS hubs and authorities values for nodes. + + The HITS algorithm computes two numbers for a node. + Authorities estimates the node value based on the incoming links. + Hubs estimates the node value based on outgoing links. + + Parameters + ---------- + G : graph + A NetworkX graph + + normalized : bool (default=True) + Normalize results by the sum of all of the values. + + Returns + ------- + (hubs,authorities) : two-tuple of dictionaries + Two dictionaries keyed by node containing the hub and authority + values. + + Examples + -------- + >>> G = nx.path_graph(4) + + The `hubs` and `authorities` are given by the eigenvectors corresponding to the + maximum eigenvalues of the hubs_matrix and the authority_matrix, respectively. + + The ``hubs`` and ``authority`` matrices are computed from the adjacency + matrix: + + >>> adj_ary = nx.to_numpy_array(G) + >>> hubs_matrix = adj_ary @ adj_ary.T + >>> authority_matrix = adj_ary.T @ adj_ary + + `_hits_numpy` maps the eigenvector corresponding to the maximum eigenvalue + of the respective matrices to the nodes in `G`: + + >>> from networkx.algorithms.link_analysis.hits_alg import _hits_numpy + >>> hubs, authority = _hits_numpy(G) + + Notes + ----- + The eigenvector calculation uses NumPy's interface to LAPACK. + + The HITS algorithm was designed for directed graphs but this + algorithm does not check if the input graph is directed and will + execute on undirected graphs. + + References + ---------- + .. [1] A. Langville and C. Meyer, + "A survey of eigenvector methods of web information retrieval." + http://citeseer.ist.psu.edu/713792.html + .. [2] Jon Kleinberg, + Authoritative sources in a hyperlinked environment + Journal of the ACM 46 (5): 604-32, 1999. + doi:10.1145/324133.324140. + http://www.cs.cornell.edu/home/kleinber/auth.pdf. + """ + import numpy as np + + if len(G) == 0: + return {}, {} + adj_ary = nx.to_numpy_array(G) + # Hub matrix + H = adj_ary @ adj_ary.T + e, ev = np.linalg.eig(H) + h = ev[:, np.argmax(e)] # eigenvector corresponding to the maximum eigenvalue + # Authority matrix + A = adj_ary.T @ adj_ary + e, ev = np.linalg.eig(A) + a = ev[:, np.argmax(e)] # eigenvector corresponding to the maximum eigenvalue + if normalized: + h /= h.sum() + a /= a.sum() + else: + h /= h.max() + a /= a.max() + hubs = dict(zip(G, map(float, h))) + authorities = dict(zip(G, map(float, a))) + return hubs, authorities + + +def _hits_scipy(G, max_iter=100, tol=1.0e-6, nstart=None, normalized=True): + """Returns HITS hubs and authorities values for nodes. + + + The HITS algorithm computes two numbers for a node. + Authorities estimates the node value based on the incoming links. + Hubs estimates the node value based on outgoing links. + + Parameters + ---------- + G : graph + A NetworkX graph + + max_iter : integer, optional + Maximum number of iterations in power method. + + tol : float, optional + Error tolerance used to check convergence in power method iteration. + + nstart : dictionary, optional + Starting value of each node for power method iteration. + + normalized : bool (default=True) + Normalize results by the sum of all of the values. + + Returns + ------- + (hubs,authorities) : two-tuple of dictionaries + Two dictionaries keyed by node containing the hub and authority + values. + + Examples + -------- + >>> from networkx.algorithms.link_analysis.hits_alg import _hits_scipy + >>> G = nx.path_graph(4) + >>> h, a = _hits_scipy(G) + + Notes + ----- + This implementation uses SciPy sparse matrices. + + The eigenvector calculation is done by the power iteration method + and has no guarantee of convergence. The iteration will stop + after max_iter iterations or an error tolerance of + number_of_nodes(G)*tol has been reached. + + The HITS algorithm was designed for directed graphs but this + algorithm does not check if the input graph is directed and will + execute on undirected graphs. + + Raises + ------ + PowerIterationFailedConvergence + If the algorithm fails to converge to the specified tolerance + within the specified number of iterations of the power iteration + method. + + References + ---------- + .. [1] A. Langville and C. Meyer, + "A survey of eigenvector methods of web information retrieval." + http://citeseer.ist.psu.edu/713792.html + .. [2] Jon Kleinberg, + Authoritative sources in a hyperlinked environment + Journal of the ACM 46 (5): 604-632, 1999. + doi:10.1145/324133.324140. + http://www.cs.cornell.edu/home/kleinber/auth.pdf. + """ + import numpy as np + + if len(G) == 0: + return {}, {} + A = nx.to_scipy_sparse_array(G, nodelist=list(G)) + (n, _) = A.shape # should be square + ATA = A.T @ A # authority matrix + # choose fixed starting vector if not given + if nstart is None: + x = np.ones((n, 1)) / n + else: + x = np.array([nstart.get(n, 0) for n in list(G)], dtype=float) + x /= x.sum() + + # power iteration on authority matrix + i = 0 + while True: + xlast = x + x = ATA @ x + x /= x.max() + # check convergence, l1 norm + err = np.absolute(x - xlast).sum() + if err < tol: + break + if i > max_iter: + raise nx.PowerIterationFailedConvergence(max_iter) + i += 1 + + a = x.flatten() + h = A @ a + if normalized: + h /= h.sum() + a /= a.sum() + hubs = dict(zip(G, map(float, h))) + authorities = dict(zip(G, map(float, a))) + return hubs, authorities diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/link_analysis/pagerank_alg.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/link_analysis/pagerank_alg.py new file mode 100644 index 0000000..2ab0d86 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/link_analysis/pagerank_alg.py @@ -0,0 +1,499 @@ +"""PageRank analysis of graph structure.""" + +import networkx as nx + +__all__ = ["pagerank", "google_matrix"] + + +@nx._dispatchable(edge_attrs="weight") +def pagerank( + G, + alpha=0.85, + personalization=None, + max_iter=100, + tol=1.0e-6, + nstart=None, + weight="weight", + dangling=None, +): + """Returns the PageRank of the nodes in the graph. + + PageRank computes a ranking of the nodes in the graph G based on + the structure of the incoming links. It was originally designed as + an algorithm to rank web pages. + + Parameters + ---------- + G : graph + A NetworkX graph. Undirected graphs will be converted to a directed + graph with two directed edges for each undirected edge. + + alpha : float, optional + Damping parameter for PageRank, default=0.85. + + personalization: dict, optional + The "personalization vector" consisting of a dictionary with a + key some subset of graph nodes and personalization value each of those. + At least one personalization value must be non-zero. + If not specified, a nodes personalization value will be zero. + By default, a uniform distribution is used. + + max_iter : integer, optional + Maximum number of iterations in power method eigenvalue solver. + + tol : float, optional + Error tolerance used to check convergence in power method solver. + The iteration will stop after a tolerance of ``len(G) * tol`` is reached. + + nstart : dictionary, optional + Starting value of PageRank iteration for each node. + + weight : key, optional + Edge data key to use as weight. If None weights are set to 1. + + dangling: dict, optional + The outedges to be assigned to any "dangling" nodes, i.e., nodes without + any outedges. The dict key is the node the outedge points to and the dict + value is the weight of that outedge. By default, dangling nodes are given + outedges according to the personalization vector (uniform if not + specified). This must be selected to result in an irreducible transition + matrix (see notes under google_matrix). It may be common to have the + dangling dict to be the same as the personalization dict. + + + Returns + ------- + pagerank : dictionary + Dictionary of nodes with PageRank as value + + Examples + -------- + >>> G = nx.DiGraph(nx.path_graph(4)) + >>> pr = nx.pagerank(G, alpha=0.9) + + Notes + ----- + The eigenvector calculation is done by the power iteration method + and has no guarantee of convergence. The iteration will stop after + an error tolerance of ``len(G) * tol`` has been reached. If the + number of iterations exceed `max_iter`, a + :exc:`networkx.exception.PowerIterationFailedConvergence` exception + is raised. + + The PageRank algorithm was designed for directed graphs but this + algorithm does not check if the input graph is directed and will + execute on undirected graphs by converting each edge in the + directed graph to two edges. + + See Also + -------- + google_matrix + :func:`~networkx.algorithms.bipartite.link_analysis.birank` + + Raises + ------ + PowerIterationFailedConvergence + If the algorithm fails to converge to the specified tolerance + within the specified number of iterations of the power iteration + method. + + References + ---------- + .. [1] A. Langville and C. Meyer, + "A survey of eigenvector methods of web information retrieval." + http://citeseer.ist.psu.edu/713792.html + .. [2] Page, Lawrence; Brin, Sergey; Motwani, Rajeev and Winograd, Terry, + The PageRank citation ranking: Bringing order to the Web. 1999 + http://dbpubs.stanford.edu:8090/pub/showDoc.Fulltext?lang=en&doc=1999-66&format=pdf + + """ + return _pagerank_scipy( + G, alpha, personalization, max_iter, tol, nstart, weight, dangling + ) + + +def _pagerank_python( + G, + alpha=0.85, + personalization=None, + max_iter=100, + tol=1.0e-6, + nstart=None, + weight="weight", + dangling=None, +): + if len(G) == 0: + return {} + + D = G.to_directed() + + # Create a copy in (right) stochastic form + W = nx.stochastic_graph(D, weight=weight) + N = W.number_of_nodes() + + # Choose fixed starting vector if not given + if nstart is None: + x = dict.fromkeys(W, 1.0 / N) + else: + # Normalized nstart vector + s = sum(nstart.values()) + x = {k: v / s for k, v in nstart.items()} + + if personalization is None: + # Assign uniform personalization vector if not given + p = dict.fromkeys(W, 1.0 / N) + else: + s = sum(personalization.values()) + p = {k: v / s for k, v in personalization.items()} + + if dangling is None: + # Use personalization vector if dangling vector not specified + dangling_weights = p + else: + s = sum(dangling.values()) + dangling_weights = {k: v / s for k, v in dangling.items()} + dangling_nodes = [n for n in W if W.out_degree(n, weight=weight) == 0.0] + + # power iteration: make up to max_iter iterations + for _ in range(max_iter): + xlast = x + x = dict.fromkeys(xlast.keys(), 0) + danglesum = alpha * sum(xlast[n] for n in dangling_nodes) + for n in x: + # this matrix multiply looks odd because it is + # doing a left multiply x^T=xlast^T*W + for _, nbr, wt in W.edges(n, data=weight): + x[nbr] += alpha * xlast[n] * wt + x[n] += danglesum * dangling_weights.get(n, 0) + (1.0 - alpha) * p.get(n, 0) + # check convergence, l1 norm + err = sum(abs(x[n] - xlast[n]) for n in x) + if err < N * tol: + return x + raise nx.PowerIterationFailedConvergence(max_iter) + + +@nx._dispatchable(edge_attrs="weight") +def google_matrix( + G, alpha=0.85, personalization=None, nodelist=None, weight="weight", dangling=None +): + """Returns the Google matrix of the graph. + + Parameters + ---------- + G : graph + A NetworkX graph. Undirected graphs will be converted to a directed + graph with two directed edges for each undirected edge. + + alpha : float + The damping factor. + + personalization: dict, optional + The "personalization vector" consisting of a dictionary with a + key some subset of graph nodes and personalization value each of those. + At least one personalization value must be non-zero. + If not specified, a nodes personalization value will be zero. + By default, a uniform distribution is used. + + nodelist : list, optional + The rows and columns are ordered according to the nodes in nodelist. + If nodelist is None, then the ordering is produced by G.nodes(). + + weight : key, optional + Edge data key to use as weight. If None weights are set to 1. + + dangling: dict, optional + The outedges to be assigned to any "dangling" nodes, i.e., nodes without + any outedges. The dict key is the node the outedge points to and the dict + value is the weight of that outedge. By default, dangling nodes are given + outedges according to the personalization vector (uniform if not + specified) This must be selected to result in an irreducible transition + matrix (see notes below). It may be common to have the dangling dict to + be the same as the personalization dict. + + Returns + ------- + A : 2D NumPy ndarray + Google matrix of the graph + + Notes + ----- + The array returned represents the transition matrix that describes the + Markov chain used in PageRank. For PageRank to converge to a unique + solution (i.e., a unique stationary distribution in a Markov chain), the + transition matrix must be irreducible. In other words, it must be that + there exists a path between every pair of nodes in the graph, or else there + is the potential of "rank sinks." + + This implementation works with Multi(Di)Graphs. For multigraphs the + weight between two nodes is set to be the sum of all edge weights + between those nodes. + + See Also + -------- + pagerank + """ + import numpy as np + + if nodelist is None: + nodelist = list(G) + + A = nx.to_numpy_array(G, nodelist=nodelist, weight=weight) + N = len(G) + if N == 0: + return A + + # Personalization vector + if personalization is None: + p = np.repeat(1.0 / N, N) + else: + p = np.array([personalization.get(n, 0) for n in nodelist], dtype=float) + if p.sum() == 0: + raise ZeroDivisionError + p /= p.sum() + + # Dangling nodes + if dangling is None: + dangling_weights = p + else: + # Convert the dangling dictionary into an array in nodelist order + dangling_weights = np.array([dangling.get(n, 0) for n in nodelist], dtype=float) + dangling_weights /= dangling_weights.sum() + dangling_nodes = np.where(A.sum(axis=1) == 0)[0] + + # Assign dangling_weights to any dangling nodes (nodes with no out links) + A[dangling_nodes] = dangling_weights + + A /= A.sum(axis=1)[:, np.newaxis] # Normalize rows to sum to 1 + + return alpha * A + (1 - alpha) * p + + +def _pagerank_numpy( + G, alpha=0.85, personalization=None, weight="weight", dangling=None +): + """Returns the PageRank of the nodes in the graph. + + PageRank computes a ranking of the nodes in the graph G based on + the structure of the incoming links. It was originally designed as + an algorithm to rank web pages. + + Parameters + ---------- + G : graph + A NetworkX graph. Undirected graphs will be converted to a directed + graph with two directed edges for each undirected edge. + + alpha : float, optional + Damping parameter for PageRank, default=0.85. + + personalization: dict, optional + The "personalization vector" consisting of a dictionary with a + key some subset of graph nodes and personalization value each of those. + At least one personalization value must be non-zero. + If not specified, a nodes personalization value will be zero. + By default, a uniform distribution is used. + + weight : key, optional + Edge data key to use as weight. If None weights are set to 1. + + dangling: dict, optional + The outedges to be assigned to any "dangling" nodes, i.e., nodes without + any outedges. The dict key is the node the outedge points to and the dict + value is the weight of that outedge. By default, dangling nodes are given + outedges according to the personalization vector (uniform if not + specified) This must be selected to result in an irreducible transition + matrix (see notes under google_matrix). It may be common to have the + dangling dict to be the same as the personalization dict. + + Returns + ------- + pagerank : dictionary + Dictionary of nodes with PageRank as value. + + Examples + -------- + >>> from networkx.algorithms.link_analysis.pagerank_alg import _pagerank_numpy + >>> G = nx.DiGraph(nx.path_graph(4)) + >>> pr = _pagerank_numpy(G, alpha=0.9) + + Notes + ----- + The eigenvector calculation uses NumPy's interface to the LAPACK + eigenvalue solvers. This will be the fastest and most accurate + for small graphs. + + This implementation works with Multi(Di)Graphs. For multigraphs the + weight between two nodes is set to be the sum of all edge weights + between those nodes. + + See Also + -------- + pagerank, google_matrix + + References + ---------- + .. [1] A. Langville and C. Meyer, + "A survey of eigenvector methods of web information retrieval." + http://citeseer.ist.psu.edu/713792.html + .. [2] Page, Lawrence; Brin, Sergey; Motwani, Rajeev and Winograd, Terry, + The PageRank citation ranking: Bringing order to the Web. 1999 + http://dbpubs.stanford.edu:8090/pub/showDoc.Fulltext?lang=en&doc=1999-66&format=pdf + """ + import numpy as np + + if len(G) == 0: + return {} + M = google_matrix( + G, alpha, personalization=personalization, weight=weight, dangling=dangling + ) + # use numpy LAPACK solver + eigenvalues, eigenvectors = np.linalg.eig(M.T) + ind = np.argmax(eigenvalues) + # eigenvector of largest eigenvalue is at ind, normalized + largest = np.array(eigenvectors[:, ind]).flatten().real + norm = largest.sum() + return dict(zip(G, map(float, largest / norm))) + + +def _pagerank_scipy( + G, + alpha=0.85, + personalization=None, + max_iter=100, + tol=1.0e-6, + nstart=None, + weight="weight", + dangling=None, +): + """Returns the PageRank of the nodes in the graph. + + PageRank computes a ranking of the nodes in the graph G based on + the structure of the incoming links. It was originally designed as + an algorithm to rank web pages. + + Parameters + ---------- + G : graph + A NetworkX graph. Undirected graphs will be converted to a directed + graph with two directed edges for each undirected edge. + + alpha : float, optional + Damping parameter for PageRank, default=0.85. + + personalization: dict, optional + The "personalization vector" consisting of a dictionary with a + key some subset of graph nodes and personalization value each of those. + At least one personalization value must be non-zero. + If not specified, a nodes personalization value will be zero. + By default, a uniform distribution is used. + + max_iter : integer, optional + Maximum number of iterations in power method eigenvalue solver. + + tol : float, optional + Error tolerance used to check convergence in power method solver. + The iteration will stop after a tolerance of ``len(G) * tol`` is reached. + + nstart : dictionary, optional + Starting value of PageRank iteration for each node. + + weight : key, optional + Edge data key to use as weight. If None weights are set to 1. + + dangling: dict, optional + The outedges to be assigned to any "dangling" nodes, i.e., nodes without + any outedges. The dict key is the node the outedge points to and the dict + value is the weight of that outedge. By default, dangling nodes are given + outedges according to the personalization vector (uniform if not + specified) This must be selected to result in an irreducible transition + matrix (see notes under google_matrix). It may be common to have the + dangling dict to be the same as the personalization dict. + + Returns + ------- + pagerank : dictionary + Dictionary of nodes with PageRank as value + + Examples + -------- + >>> from networkx.algorithms.link_analysis.pagerank_alg import _pagerank_scipy + >>> G = nx.DiGraph(nx.path_graph(4)) + >>> pr = _pagerank_scipy(G, alpha=0.9) + + Notes + ----- + The eigenvector calculation uses power iteration with a SciPy + sparse matrix representation. + + This implementation works with Multi(Di)Graphs. For multigraphs the + weight between two nodes is set to be the sum of all edge weights + between those nodes. + + See Also + -------- + pagerank + + Raises + ------ + PowerIterationFailedConvergence + If the algorithm fails to converge to the specified tolerance + within the specified number of iterations of the power iteration + method. + + References + ---------- + .. [1] A. Langville and C. Meyer, + "A survey of eigenvector methods of web information retrieval." + http://citeseer.ist.psu.edu/713792.html + .. [2] Page, Lawrence; Brin, Sergey; Motwani, Rajeev and Winograd, Terry, + The PageRank citation ranking: Bringing order to the Web. 1999 + http://dbpubs.stanford.edu:8090/pub/showDoc.Fulltext?lang=en&doc=1999-66&format=pdf + """ + import numpy as np + import scipy as sp + + N = len(G) + if N == 0: + return {} + + nodelist = list(G) + A = nx.to_scipy_sparse_array(G, nodelist=nodelist, weight=weight, dtype=float) + S = A.sum(axis=1) + S[S != 0] = 1.0 / S[S != 0] + # TODO: csr_array + Q = sp.sparse.csr_array(sp.sparse.spdiags(S.T, 0, *A.shape)) + A = Q @ A + + # initial vector + if nstart is None: + x = np.repeat(1.0 / N, N) + else: + x = np.array([nstart.get(n, 0) for n in nodelist], dtype=float) + x /= x.sum() + + # Personalization vector + if personalization is None: + p = np.repeat(1.0 / N, N) + else: + p = np.array([personalization.get(n, 0) for n in nodelist], dtype=float) + if p.sum() == 0: + raise ZeroDivisionError + p /= p.sum() + # Dangling nodes + if dangling is None: + dangling_weights = p + else: + # Convert the dangling dictionary into an array in nodelist order + dangling_weights = np.array([dangling.get(n, 0) for n in nodelist], dtype=float) + dangling_weights /= dangling_weights.sum() + is_dangling = np.where(S == 0)[0] + + # power iteration: make up to max_iter iterations + for _ in range(max_iter): + xlast = x + x = alpha * (x @ A + sum(x[is_dangling]) * dangling_weights) + (1 - alpha) * p + # check convergence, l1 norm + err = np.absolute(x - xlast).sum() + if err < N * tol: + return dict(zip(nodelist, map(float, x))) + raise nx.PowerIterationFailedConvergence(max_iter) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/link_analysis/tests/__init__.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/link_analysis/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/link_analysis/tests/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/link_analysis/tests/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..6b5a85d Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/link_analysis/tests/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/link_analysis/tests/__pycache__/test_hits.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/link_analysis/tests/__pycache__/test_hits.cpython-312.pyc new file mode 100644 index 0000000..be8f89b Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/link_analysis/tests/__pycache__/test_hits.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/link_analysis/tests/__pycache__/test_pagerank.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/link_analysis/tests/__pycache__/test_pagerank.cpython-312.pyc new file mode 100644 index 0000000..91978ec Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/link_analysis/tests/__pycache__/test_pagerank.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/link_analysis/tests/test_hits.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/link_analysis/tests/test_hits.py new file mode 100644 index 0000000..fedd59c --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/link_analysis/tests/test_hits.py @@ -0,0 +1,77 @@ +import pytest + +import networkx as nx +from networkx.algorithms.link_analysis.hits_alg import ( + _hits_numpy, + _hits_python, + _hits_scipy, +) + +np = pytest.importorskip("numpy") +sp = pytest.importorskip("scipy") + +# Example from +# A. Langville and C. Meyer, "A survey of eigenvector methods of web +# information retrieval." http://citeseer.ist.psu.edu/713792.html + + +class TestHITS: + @classmethod + def setup_class(cls): + G = nx.DiGraph() + + edges = [(1, 3), (1, 5), (2, 1), (3, 5), (5, 4), (5, 3), (6, 5)] + + G.add_edges_from(edges, weight=1) + cls.G = G + cls.G.a = dict( + zip(sorted(G), [0.000000, 0.000000, 0.366025, 0.133975, 0.500000, 0.000000]) + ) + cls.G.h = dict( + zip(sorted(G), [0.366025, 0.000000, 0.211325, 0.000000, 0.211325, 0.211325]) + ) + + def test_hits_numpy(self): + G = self.G + h, a = _hits_numpy(G) + for n in G: + assert h[n] == pytest.approx(G.h[n], abs=1e-4) + for n in G: + assert a[n] == pytest.approx(G.a[n], abs=1e-4) + + @pytest.mark.parametrize("hits_alg", (nx.hits, _hits_python, _hits_scipy)) + def test_hits(self, hits_alg): + G = self.G + h, a = hits_alg(G, tol=1.0e-08) + for n in G: + assert h[n] == pytest.approx(G.h[n], abs=1e-4) + for n in G: + assert a[n] == pytest.approx(G.a[n], abs=1e-4) + nstart = dict.fromkeys(G, 1.0 / 2) + h, a = hits_alg(G, nstart=nstart) + for n in G: + assert h[n] == pytest.approx(G.h[n], abs=1e-4) + for n in G: + assert a[n] == pytest.approx(G.a[n], abs=1e-4) + + def test_empty(self): + G = nx.Graph() + assert nx.hits(G) == ({}, {}) + assert _hits_numpy(G) == ({}, {}) + assert _hits_python(G) == ({}, {}) + assert _hits_scipy(G) == ({}, {}) + + def test_hits_not_convergent(self): + G = nx.path_graph(50) + with pytest.raises(nx.PowerIterationFailedConvergence): + _hits_scipy(G, max_iter=1) + with pytest.raises(nx.PowerIterationFailedConvergence): + _hits_python(G, max_iter=1) + with pytest.raises(nx.PowerIterationFailedConvergence): + _hits_scipy(G, max_iter=0) + with pytest.raises(nx.PowerIterationFailedConvergence): + _hits_python(G, max_iter=0) + with pytest.raises(nx.PowerIterationFailedConvergence): + nx.hits(G, max_iter=0) + with pytest.raises(nx.PowerIterationFailedConvergence): + nx.hits(G, max_iter=1) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/link_analysis/tests/test_pagerank.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/link_analysis/tests/test_pagerank.py new file mode 100644 index 0000000..fb08423 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/link_analysis/tests/test_pagerank.py @@ -0,0 +1,213 @@ +import random + +import pytest + +import networkx as nx +from networkx.algorithms.link_analysis.pagerank_alg import ( + _pagerank_numpy, + _pagerank_python, + _pagerank_scipy, +) +from networkx.classes.tests import dispatch_interface + +np = pytest.importorskip("numpy") +pytest.importorskip("scipy") + +# Example from +# A. Langville and C. Meyer, "A survey of eigenvector methods of web +# information retrieval." http://citeseer.ist.psu.edu/713792.html + + +class TestPageRank: + @classmethod + def setup_class(cls): + G = nx.DiGraph() + edges = [ + (1, 2), + (1, 3), + # 2 is a dangling node + (3, 1), + (3, 2), + (3, 5), + (4, 5), + (4, 6), + (5, 4), + (5, 6), + (6, 4), + ] + G.add_edges_from(edges) + cls.G = G + cls.G.pagerank = dict( + zip( + sorted(G), + [ + 0.03721197, + 0.05395735, + 0.04150565, + 0.37508082, + 0.20599833, + 0.28624589, + ], + ) + ) + cls.dangling_node_index = 1 + cls.dangling_edges = {1: 2, 2: 3, 3: 0, 4: 0, 5: 0, 6: 0} + cls.G.dangling_pagerank = dict( + zip( + sorted(G), + [0.10844518, 0.18618601, 0.0710892, 0.2683668, 0.15919783, 0.20671497], + ) + ) + + @pytest.mark.parametrize("alg", (nx.pagerank, _pagerank_python)) + def test_pagerank(self, alg): + G = self.G + p = alg(G, alpha=0.9, tol=1.0e-08) + for n in G: + assert p[n] == pytest.approx(G.pagerank[n], abs=1e-4) + + nstart = {n: random.random() for n in G} + p = alg(G, alpha=0.9, tol=1.0e-08, nstart=nstart) + for n in G: + assert p[n] == pytest.approx(G.pagerank[n], abs=1e-4) + + @pytest.mark.parametrize("alg", (nx.pagerank, _pagerank_python)) + def test_pagerank_max_iter(self, alg): + with pytest.raises(nx.PowerIterationFailedConvergence): + alg(self.G, max_iter=0) + + def test_numpy_pagerank(self): + G = self.G + p = _pagerank_numpy(G, alpha=0.9) + for n in G: + assert p[n] == pytest.approx(G.pagerank[n], abs=1e-4) + + def test_google_matrix(self): + G = self.G + M = nx.google_matrix(G, alpha=0.9, nodelist=sorted(G)) + _, ev = np.linalg.eig(M.T) + p = ev[:, 0] / ev[:, 0].sum() + for a, b in zip(p, self.G.pagerank.values()): + assert a == pytest.approx(b, abs=1e-7) + + @pytest.mark.parametrize("alg", (nx.pagerank, _pagerank_python, _pagerank_numpy)) + def test_personalization(self, alg): + G = nx.complete_graph(4) + personalize = {0: 1, 1: 1, 2: 4, 3: 4} + answer = { + 0: 0.23246732615667579, + 1: 0.23246732615667579, + 2: 0.267532673843324, + 3: 0.2675326738433241, + } + p = alg(G, alpha=0.85, personalization=personalize) + for n in G: + assert p[n] == pytest.approx(answer[n], abs=1e-4) + + @pytest.mark.parametrize("alg", (nx.pagerank, _pagerank_python, nx.google_matrix)) + def test_zero_personalization_vector(self, alg): + G = nx.complete_graph(4) + personalize = {0: 0, 1: 0, 2: 0, 3: 0} + pytest.raises(ZeroDivisionError, alg, G, personalization=personalize) + + @pytest.mark.parametrize("alg", (nx.pagerank, _pagerank_python)) + def test_one_nonzero_personalization_value(self, alg): + G = nx.complete_graph(4) + personalize = {0: 0, 1: 0, 2: 0, 3: 1} + answer = { + 0: 0.22077931820379187, + 1: 0.22077931820379187, + 2: 0.22077931820379187, + 3: 0.3376620453886241, + } + p = alg(G, alpha=0.85, personalization=personalize) + for n in G: + assert p[n] == pytest.approx(answer[n], abs=1e-4) + + @pytest.mark.parametrize("alg", (nx.pagerank, _pagerank_python)) + def test_incomplete_personalization(self, alg): + G = nx.complete_graph(4) + personalize = {3: 1} + answer = { + 0: 0.22077931820379187, + 1: 0.22077931820379187, + 2: 0.22077931820379187, + 3: 0.3376620453886241, + } + p = alg(G, alpha=0.85, personalization=personalize) + for n in G: + assert p[n] == pytest.approx(answer[n], abs=1e-4) + + def test_dangling_matrix(self): + """ + Tests that the google_matrix doesn't change except for the dangling + nodes. + """ + G = self.G + dangling = self.dangling_edges + dangling_sum = sum(dangling.values()) + M1 = nx.google_matrix(G, personalization=dangling) + M2 = nx.google_matrix(G, personalization=dangling, dangling=dangling) + for i in range(len(G)): + for j in range(len(G)): + if i == self.dangling_node_index and (j + 1) in dangling: + assert M2[i, j] == pytest.approx( + dangling[j + 1] / dangling_sum, abs=1e-4 + ) + else: + assert M2[i, j] == pytest.approx(M1[i, j], abs=1e-4) + + @pytest.mark.parametrize("alg", (nx.pagerank, _pagerank_python, _pagerank_numpy)) + def test_dangling_pagerank(self, alg): + pr = alg(self.G, dangling=self.dangling_edges) + for n in self.G: + assert pr[n] == pytest.approx(self.G.dangling_pagerank[n], abs=1e-4) + + def test_empty(self): + G = nx.Graph() + assert nx.pagerank(G) == {} + assert _pagerank_python(G) == {} + assert _pagerank_numpy(G) == {} + assert nx.google_matrix(G).shape == (0, 0) + + @pytest.mark.parametrize("alg", (nx.pagerank, _pagerank_python)) + def test_multigraph(self, alg): + G = nx.MultiGraph() + G.add_edges_from([(1, 2), (1, 2), (1, 2), (2, 3), (2, 3), ("3", 3), ("3", 3)]) + answer = { + 1: 0.21066048614468322, + 2: 0.3395308825985378, + 3: 0.28933951385531687, + "3": 0.16046911740146227, + } + p = alg(G) + for n in G: + assert p[n] == pytest.approx(answer[n], abs=1e-4) + + +class TestPageRankScipy(TestPageRank): + def test_scipy_pagerank(self): + G = self.G + p = _pagerank_scipy(G, alpha=0.9, tol=1.0e-08) + for n in G: + assert p[n] == pytest.approx(G.pagerank[n], abs=1e-4) + personalize = {n: random.random() for n in G} + p = _pagerank_scipy(G, alpha=0.9, tol=1.0e-08, personalization=personalize) + + nstart = {n: random.random() for n in G} + p = _pagerank_scipy(G, alpha=0.9, tol=1.0e-08, nstart=nstart) + for n in G: + assert p[n] == pytest.approx(G.pagerank[n], abs=1e-4) + + def test_scipy_pagerank_max_iter(self): + with pytest.raises(nx.PowerIterationFailedConvergence): + _pagerank_scipy(self.G, max_iter=0) + + def test_dangling_scipy_pagerank(self): + pr = _pagerank_scipy(self.G, dangling=self.dangling_edges) + for n in self.G: + assert pr[n] == pytest.approx(self.G.dangling_pagerank[n], abs=1e-4) + + def test_empty_scipy(self): + G = nx.Graph() + assert _pagerank_scipy(G) == {} diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/link_prediction.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/link_prediction.py new file mode 100644 index 0000000..3615f26 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/link_prediction.py @@ -0,0 +1,687 @@ +""" +Link prediction algorithms. +""" + +from math import log + +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = [ + "resource_allocation_index", + "jaccard_coefficient", + "adamic_adar_index", + "preferential_attachment", + "cn_soundarajan_hopcroft", + "ra_index_soundarajan_hopcroft", + "within_inter_cluster", + "common_neighbor_centrality", +] + + +def _apply_prediction(G, func, ebunch=None): + """Applies the given function to each edge in the specified iterable + of edges. + + `G` is an instance of :class:`networkx.Graph`. + + `func` is a function on two inputs, each of which is a node in the + graph. The function can return anything, but it should return a + value representing a prediction of the likelihood of a "link" + joining the two nodes. + + `ebunch` is an iterable of pairs of nodes. If not specified, all + non-edges in the graph `G` will be used. + + """ + if ebunch is None: + ebunch = nx.non_edges(G) + else: + for u, v in ebunch: + if u not in G: + raise nx.NodeNotFound(f"Node {u} not in G.") + if v not in G: + raise nx.NodeNotFound(f"Node {v} not in G.") + return ((u, v, func(u, v)) for u, v in ebunch) + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatchable +def resource_allocation_index(G, ebunch=None): + r"""Compute the resource allocation index of all node pairs in ebunch. + + Resource allocation index of `u` and `v` is defined as + + .. math:: + + \sum_{w \in \Gamma(u) \cap \Gamma(v)} \frac{1}{|\Gamma(w)|} + + where $\Gamma(u)$ denotes the set of neighbors of $u$. + + Parameters + ---------- + G : graph + A NetworkX undirected graph. + + ebunch : iterable of node pairs, optional (default = None) + Resource allocation index will be computed for each pair of + nodes given in the iterable. The pairs must be given as + 2-tuples (u, v) where u and v are nodes in the graph. If ebunch + is None then all nonexistent edges in the graph will be used. + Default value: None. + + Returns + ------- + piter : iterator + An iterator of 3-tuples in the form (u, v, p) where (u, v) is a + pair of nodes and p is their resource allocation index. + + Raises + ------ + NetworkXNotImplemented + If `G` is a `DiGraph`, a `Multigraph` or a `MultiDiGraph`. + + NodeNotFound + If `ebunch` has a node that is not in `G`. + + Examples + -------- + >>> G = nx.complete_graph(5) + >>> preds = nx.resource_allocation_index(G, [(0, 1), (2, 3)]) + >>> for u, v, p in preds: + ... print(f"({u}, {v}) -> {p:.8f}") + (0, 1) -> 0.75000000 + (2, 3) -> 0.75000000 + + References + ---------- + .. [1] T. Zhou, L. Lu, Y.-C. Zhang. + Predicting missing links via local information. + Eur. Phys. J. B 71 (2009) 623. + https://arxiv.org/pdf/0901.0553.pdf + """ + + def predict(u, v): + return sum(1 / G.degree(w) for w in nx.common_neighbors(G, u, v)) + + return _apply_prediction(G, predict, ebunch) + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatchable +def jaccard_coefficient(G, ebunch=None): + r"""Compute the Jaccard coefficient of all node pairs in ebunch. + + Jaccard coefficient of nodes `u` and `v` is defined as + + .. math:: + + \frac{|\Gamma(u) \cap \Gamma(v)|}{|\Gamma(u) \cup \Gamma(v)|} + + where $\Gamma(u)$ denotes the set of neighbors of $u$. + + Parameters + ---------- + G : graph + A NetworkX undirected graph. + + ebunch : iterable of node pairs, optional (default = None) + Jaccard coefficient will be computed for each pair of nodes + given in the iterable. The pairs must be given as 2-tuples + (u, v) where u and v are nodes in the graph. If ebunch is None + then all nonexistent edges in the graph will be used. + Default value: None. + + Returns + ------- + piter : iterator + An iterator of 3-tuples in the form (u, v, p) where (u, v) is a + pair of nodes and p is their Jaccard coefficient. + + Raises + ------ + NetworkXNotImplemented + If `G` is a `DiGraph`, a `Multigraph` or a `MultiDiGraph`. + + NodeNotFound + If `ebunch` has a node that is not in `G`. + + Examples + -------- + >>> G = nx.complete_graph(5) + >>> preds = nx.jaccard_coefficient(G, [(0, 1), (2, 3)]) + >>> for u, v, p in preds: + ... print(f"({u}, {v}) -> {p:.8f}") + (0, 1) -> 0.60000000 + (2, 3) -> 0.60000000 + + References + ---------- + .. [1] D. Liben-Nowell, J. Kleinberg. + The Link Prediction Problem for Social Networks (2004). + http://www.cs.cornell.edu/home/kleinber/link-pred.pdf + """ + + def predict(u, v): + union_size = len(set(G[u]) | set(G[v])) + if union_size == 0: + return 0 + return len(nx.common_neighbors(G, u, v)) / union_size + + return _apply_prediction(G, predict, ebunch) + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatchable +def adamic_adar_index(G, ebunch=None): + r"""Compute the Adamic-Adar index of all node pairs in ebunch. + + Adamic-Adar index of `u` and `v` is defined as + + .. math:: + + \sum_{w \in \Gamma(u) \cap \Gamma(v)} \frac{1}{\log |\Gamma(w)|} + + where $\Gamma(u)$ denotes the set of neighbors of $u$. + This index leads to zero-division for nodes only connected via self-loops. + It is intended to be used when no self-loops are present. + + Parameters + ---------- + G : graph + NetworkX undirected graph. + + ebunch : iterable of node pairs, optional (default = None) + Adamic-Adar index will be computed for each pair of nodes given + in the iterable. The pairs must be given as 2-tuples (u, v) + where u and v are nodes in the graph. If ebunch is None then all + nonexistent edges in the graph will be used. + Default value: None. + + Returns + ------- + piter : iterator + An iterator of 3-tuples in the form (u, v, p) where (u, v) is a + pair of nodes and p is their Adamic-Adar index. + + Raises + ------ + NetworkXNotImplemented + If `G` is a `DiGraph`, a `Multigraph` or a `MultiDiGraph`. + + NodeNotFound + If `ebunch` has a node that is not in `G`. + + Examples + -------- + >>> G = nx.complete_graph(5) + >>> preds = nx.adamic_adar_index(G, [(0, 1), (2, 3)]) + >>> for u, v, p in preds: + ... print(f"({u}, {v}) -> {p:.8f}") + (0, 1) -> 2.16404256 + (2, 3) -> 2.16404256 + + References + ---------- + .. [1] D. Liben-Nowell, J. Kleinberg. + The Link Prediction Problem for Social Networks (2004). + http://www.cs.cornell.edu/home/kleinber/link-pred.pdf + """ + + def predict(u, v): + return sum(1 / log(G.degree(w)) for w in nx.common_neighbors(G, u, v)) + + return _apply_prediction(G, predict, ebunch) + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatchable +def common_neighbor_centrality(G, ebunch=None, alpha=0.8): + r"""Return the CCPA score for each pair of nodes. + + Compute the Common Neighbor and Centrality based Parameterized Algorithm(CCPA) + score of all node pairs in ebunch. + + CCPA score of `u` and `v` is defined as + + .. math:: + + \alpha \cdot (|\Gamma (u){\cap }^{}\Gamma (v)|)+(1-\alpha )\cdot \frac{N}{{d}_{uv}} + + where $\Gamma(u)$ denotes the set of neighbors of $u$, $\Gamma(v)$ denotes the + set of neighbors of $v$, $\alpha$ is parameter varies between [0,1], $N$ denotes + total number of nodes in the Graph and ${d}_{uv}$ denotes shortest distance + between $u$ and $v$. + + This algorithm is based on two vital properties of nodes, namely the number + of common neighbors and their centrality. Common neighbor refers to the common + nodes between two nodes. Centrality refers to the prestige that a node enjoys + in a network. + + .. seealso:: + + :func:`common_neighbors` + + Parameters + ---------- + G : graph + NetworkX undirected graph. + + ebunch : iterable of node pairs, optional (default = None) + Preferential attachment score will be computed for each pair of + nodes given in the iterable. The pairs must be given as + 2-tuples (u, v) where u and v are nodes in the graph. If ebunch + is None then all nonexistent edges in the graph will be used. + Default value: None. + + alpha : Parameter defined for participation of Common Neighbor + and Centrality Algorithm share. Values for alpha should + normally be between 0 and 1. Default value set to 0.8 + because author found better performance at 0.8 for all the + dataset. + Default value: 0.8 + + + Returns + ------- + piter : iterator + An iterator of 3-tuples in the form (u, v, p) where (u, v) is a + pair of nodes and p is their Common Neighbor and Centrality based + Parameterized Algorithm(CCPA) score. + + Raises + ------ + NetworkXNotImplemented + If `G` is a `DiGraph`, a `Multigraph` or a `MultiDiGraph`. + + NetworkXAlgorithmError + If self loops exist in `ebunch` or in `G` (if `ebunch` is `None`). + + NodeNotFound + If `ebunch` has a node that is not in `G`. + + Examples + -------- + >>> G = nx.complete_graph(5) + >>> preds = nx.common_neighbor_centrality(G, [(0, 1), (2, 3)]) + >>> for u, v, p in preds: + ... print(f"({u}, {v}) -> {p}") + (0, 1) -> 3.4000000000000004 + (2, 3) -> 3.4000000000000004 + + References + ---------- + .. [1] Ahmad, I., Akhtar, M.U., Noor, S. et al. + Missing Link Prediction using Common Neighbor and Centrality based Parameterized Algorithm. + Sci Rep 10, 364 (2020). + https://doi.org/10.1038/s41598-019-57304-y + """ + + # When alpha == 1, the CCPA score simplifies to the number of common neighbors. + if alpha == 1: + + def predict(u, v): + if u == v: + raise nx.NetworkXAlgorithmError("Self loops are not supported") + + return len(nx.common_neighbors(G, u, v)) + + else: + spl = dict(nx.shortest_path_length(G)) + inf = float("inf") + + def predict(u, v): + if u == v: + raise nx.NetworkXAlgorithmError("Self loops are not supported") + path_len = spl[u].get(v, inf) + + n_nbrs = len(nx.common_neighbors(G, u, v)) + return alpha * n_nbrs + (1 - alpha) * len(G) / path_len + + return _apply_prediction(G, predict, ebunch) + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatchable +def preferential_attachment(G, ebunch=None): + r"""Compute the preferential attachment score of all node pairs in ebunch. + + Preferential attachment score of `u` and `v` is defined as + + .. math:: + + |\Gamma(u)| |\Gamma(v)| + + where $\Gamma(u)$ denotes the set of neighbors of $u$. + + Parameters + ---------- + G : graph + NetworkX undirected graph. + + ebunch : iterable of node pairs, optional (default = None) + Preferential attachment score will be computed for each pair of + nodes given in the iterable. The pairs must be given as + 2-tuples (u, v) where u and v are nodes in the graph. If ebunch + is None then all nonexistent edges in the graph will be used. + Default value: None. + + Returns + ------- + piter : iterator + An iterator of 3-tuples in the form (u, v, p) where (u, v) is a + pair of nodes and p is their preferential attachment score. + + Raises + ------ + NetworkXNotImplemented + If `G` is a `DiGraph`, a `Multigraph` or a `MultiDiGraph`. + + NodeNotFound + If `ebunch` has a node that is not in `G`. + + Examples + -------- + >>> G = nx.complete_graph(5) + >>> preds = nx.preferential_attachment(G, [(0, 1), (2, 3)]) + >>> for u, v, p in preds: + ... print(f"({u}, {v}) -> {p}") + (0, 1) -> 16 + (2, 3) -> 16 + + References + ---------- + .. [1] D. Liben-Nowell, J. Kleinberg. + The Link Prediction Problem for Social Networks (2004). + http://www.cs.cornell.edu/home/kleinber/link-pred.pdf + """ + + def predict(u, v): + return G.degree(u) * G.degree(v) + + return _apply_prediction(G, predict, ebunch) + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatchable(node_attrs="community") +def cn_soundarajan_hopcroft(G, ebunch=None, community="community"): + r"""Count the number of common neighbors of all node pairs in ebunch + using community information. + + For two nodes $u$ and $v$, this function computes the number of + common neighbors and bonus one for each common neighbor belonging to + the same community as $u$ and $v$. Mathematically, + + .. math:: + + |\Gamma(u) \cap \Gamma(v)| + \sum_{w \in \Gamma(u) \cap \Gamma(v)} f(w) + + where $f(w)$ equals 1 if $w$ belongs to the same community as $u$ + and $v$ or 0 otherwise and $\Gamma(u)$ denotes the set of + neighbors of $u$. + + Parameters + ---------- + G : graph + A NetworkX undirected graph. + + ebunch : iterable of node pairs, optional (default = None) + The score will be computed for each pair of nodes given in the + iterable. The pairs must be given as 2-tuples (u, v) where u + and v are nodes in the graph. If ebunch is None then all + nonexistent edges in the graph will be used. + Default value: None. + + community : string, optional (default = 'community') + Nodes attribute name containing the community information. + G[u][community] identifies which community u belongs to. Each + node belongs to at most one community. Default value: 'community'. + + Returns + ------- + piter : iterator + An iterator of 3-tuples in the form (u, v, p) where (u, v) is a + pair of nodes and p is their score. + + Raises + ------ + NetworkXNotImplemented + If `G` is a `DiGraph`, a `Multigraph` or a `MultiDiGraph`. + + NetworkXAlgorithmError + If no community information is available for a node in `ebunch` or in `G` (if `ebunch` is `None`). + + NodeNotFound + If `ebunch` has a node that is not in `G`. + + Examples + -------- + >>> G = nx.path_graph(3) + >>> G.nodes[0]["community"] = 0 + >>> G.nodes[1]["community"] = 0 + >>> G.nodes[2]["community"] = 0 + >>> preds = nx.cn_soundarajan_hopcroft(G, [(0, 2)]) + >>> for u, v, p in preds: + ... print(f"({u}, {v}) -> {p}") + (0, 2) -> 2 + + References + ---------- + .. [1] Sucheta Soundarajan and John Hopcroft. + Using community information to improve the precision of link + prediction methods. + In Proceedings of the 21st international conference companion on + World Wide Web (WWW '12 Companion). ACM, New York, NY, USA, 607-608. + http://doi.acm.org/10.1145/2187980.2188150 + """ + + def predict(u, v): + Cu = _community(G, u, community) + Cv = _community(G, v, community) + cnbors = nx.common_neighbors(G, u, v) + neighbors = ( + sum(_community(G, w, community) == Cu for w in cnbors) if Cu == Cv else 0 + ) + return len(cnbors) + neighbors + + return _apply_prediction(G, predict, ebunch) + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatchable(node_attrs="community") +def ra_index_soundarajan_hopcroft(G, ebunch=None, community="community"): + r"""Compute the resource allocation index of all node pairs in + ebunch using community information. + + For two nodes $u$ and $v$, this function computes the resource + allocation index considering only common neighbors belonging to the + same community as $u$ and $v$. Mathematically, + + .. math:: + + \sum_{w \in \Gamma(u) \cap \Gamma(v)} \frac{f(w)}{|\Gamma(w)|} + + where $f(w)$ equals 1 if $w$ belongs to the same community as $u$ + and $v$ or 0 otherwise and $\Gamma(u)$ denotes the set of + neighbors of $u$. + + Parameters + ---------- + G : graph + A NetworkX undirected graph. + + ebunch : iterable of node pairs, optional (default = None) + The score will be computed for each pair of nodes given in the + iterable. The pairs must be given as 2-tuples (u, v) where u + and v are nodes in the graph. If ebunch is None then all + nonexistent edges in the graph will be used. + Default value: None. + + community : string, optional (default = 'community') + Nodes attribute name containing the community information. + G[u][community] identifies which community u belongs to. Each + node belongs to at most one community. Default value: 'community'. + + Returns + ------- + piter : iterator + An iterator of 3-tuples in the form (u, v, p) where (u, v) is a + pair of nodes and p is their score. + + Raises + ------ + NetworkXNotImplemented + If `G` is a `DiGraph`, a `Multigraph` or a `MultiDiGraph`. + + NetworkXAlgorithmError + If no community information is available for a node in `ebunch` or in `G` (if `ebunch` is `None`). + + NodeNotFound + If `ebunch` has a node that is not in `G`. + + Examples + -------- + >>> G = nx.Graph() + >>> G.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3)]) + >>> G.nodes[0]["community"] = 0 + >>> G.nodes[1]["community"] = 0 + >>> G.nodes[2]["community"] = 1 + >>> G.nodes[3]["community"] = 0 + >>> preds = nx.ra_index_soundarajan_hopcroft(G, [(0, 3)]) + >>> for u, v, p in preds: + ... print(f"({u}, {v}) -> {p:.8f}") + (0, 3) -> 0.50000000 + + References + ---------- + .. [1] Sucheta Soundarajan and John Hopcroft. + Using community information to improve the precision of link + prediction methods. + In Proceedings of the 21st international conference companion on + World Wide Web (WWW '12 Companion). ACM, New York, NY, USA, 607-608. + http://doi.acm.org/10.1145/2187980.2188150 + """ + + def predict(u, v): + Cu = _community(G, u, community) + Cv = _community(G, v, community) + if Cu != Cv: + return 0 + cnbors = nx.common_neighbors(G, u, v) + return sum(1 / G.degree(w) for w in cnbors if _community(G, w, community) == Cu) + + return _apply_prediction(G, predict, ebunch) + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatchable(node_attrs="community") +def within_inter_cluster(G, ebunch=None, delta=0.001, community="community"): + """Compute the ratio of within- and inter-cluster common neighbors + of all node pairs in ebunch. + + For two nodes `u` and `v`, if a common neighbor `w` belongs to the + same community as them, `w` is considered as within-cluster common + neighbor of `u` and `v`. Otherwise, it is considered as + inter-cluster common neighbor of `u` and `v`. The ratio between the + size of the set of within- and inter-cluster common neighbors is + defined as the WIC measure. [1]_ + + Parameters + ---------- + G : graph + A NetworkX undirected graph. + + ebunch : iterable of node pairs, optional (default = None) + The WIC measure will be computed for each pair of nodes given in + the iterable. The pairs must be given as 2-tuples (u, v) where + u and v are nodes in the graph. If ebunch is None then all + nonexistent edges in the graph will be used. + Default value: None. + + delta : float, optional (default = 0.001) + Value to prevent division by zero in case there is no + inter-cluster common neighbor between two nodes. See [1]_ for + details. Default value: 0.001. + + community : string, optional (default = 'community') + Nodes attribute name containing the community information. + G[u][community] identifies which community u belongs to. Each + node belongs to at most one community. Default value: 'community'. + + Returns + ------- + piter : iterator + An iterator of 3-tuples in the form (u, v, p) where (u, v) is a + pair of nodes and p is their WIC measure. + + Raises + ------ + NetworkXNotImplemented + If `G` is a `DiGraph`, a `Multigraph` or a `MultiDiGraph`. + + NetworkXAlgorithmError + - If `delta` is less than or equal to zero. + - If no community information is available for a node in `ebunch` or in `G` (if `ebunch` is `None`). + + NodeNotFound + If `ebunch` has a node that is not in `G`. + + Examples + -------- + >>> G = nx.Graph() + >>> G.add_edges_from([(0, 1), (0, 2), (0, 3), (1, 4), (2, 4), (3, 4)]) + >>> G.nodes[0]["community"] = 0 + >>> G.nodes[1]["community"] = 1 + >>> G.nodes[2]["community"] = 0 + >>> G.nodes[3]["community"] = 0 + >>> G.nodes[4]["community"] = 0 + >>> preds = nx.within_inter_cluster(G, [(0, 4)]) + >>> for u, v, p in preds: + ... print(f"({u}, {v}) -> {p:.8f}") + (0, 4) -> 1.99800200 + >>> preds = nx.within_inter_cluster(G, [(0, 4)], delta=0.5) + >>> for u, v, p in preds: + ... print(f"({u}, {v}) -> {p:.8f}") + (0, 4) -> 1.33333333 + + References + ---------- + .. [1] Jorge Carlos Valverde-Rebaza and Alneu de Andrade Lopes. + Link prediction in complex networks based on cluster information. + In Proceedings of the 21st Brazilian conference on Advances in + Artificial Intelligence (SBIA'12) + https://doi.org/10.1007/978-3-642-34459-6_10 + """ + if delta <= 0: + raise nx.NetworkXAlgorithmError("Delta must be greater than zero") + + def predict(u, v): + Cu = _community(G, u, community) + Cv = _community(G, v, community) + if Cu != Cv: + return 0 + cnbors = nx.common_neighbors(G, u, v) + within = {w for w in cnbors if _community(G, w, community) == Cu} + inter = cnbors - within + return len(within) / (len(inter) + delta) + + return _apply_prediction(G, predict, ebunch) + + +def _community(G, u, community): + """Get the community of the given node.""" + node_u = G.nodes[u] + try: + return node_u[community] + except KeyError as err: + raise nx.NetworkXAlgorithmError( + f"No community information available for Node {u}" + ) from err diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/lowest_common_ancestors.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/lowest_common_ancestors.py new file mode 100644 index 0000000..963a783 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/lowest_common_ancestors.py @@ -0,0 +1,280 @@ +"""Algorithms for finding the lowest common ancestor of trees and DAGs.""" + +from collections import defaultdict +from collections.abc import Mapping, Set +from itertools import combinations_with_replacement + +import networkx as nx +from networkx.utils import UnionFind, arbitrary_element, not_implemented_for + +__all__ = [ + "all_pairs_lowest_common_ancestor", + "tree_all_pairs_lowest_common_ancestor", + "lowest_common_ancestor", +] + + +@not_implemented_for("undirected") +@nx._dispatchable +def all_pairs_lowest_common_ancestor(G, pairs=None): + """Return the lowest common ancestor of all pairs or the provided pairs + + Parameters + ---------- + G : NetworkX directed graph + + pairs : iterable of pairs of nodes, optional (default: all pairs) + The pairs of nodes of interest. + If None, will find the LCA of all pairs of nodes. + + Yields + ------ + ((node1, node2), lca) : 2-tuple + Where lca is least common ancestor of node1 and node2. + Note that for the default case, the order of the node pair is not considered, + e.g. you will not get both ``(a, b)`` and ``(b, a)`` + + Raises + ------ + NetworkXPointlessConcept + If `G` is null. + NetworkXError + If `G` is not a DAG. + + Examples + -------- + >>> from pprint import pprint + + The default behavior is to yield the lowest common ancestor for all + possible combinations of nodes in `G`, including self-pairings: + + >>> G = nx.DiGraph([(0, 1), (0, 3), (1, 2)]) + >>> pprint(dict(nx.all_pairs_lowest_common_ancestor(G))) + {(0, 0): 0, + (0, 1): 0, + (0, 2): 0, + (0, 3): 0, + (1, 1): 1, + (1, 2): 1, + (1, 3): 0, + (2, 2): 2, + (3, 2): 0, + (3, 3): 3} + + The pairs argument can be used to limit the output to only the + specified node pairings: + + >>> dict(nx.all_pairs_lowest_common_ancestor(G, pairs=[(1, 2), (2, 3)])) + {(1, 2): 1, (2, 3): 0} + + Notes + ----- + Only defined on non-null directed acyclic graphs. + + See Also + -------- + lowest_common_ancestor + """ + if not nx.is_directed_acyclic_graph(G): + raise nx.NetworkXError("LCA only defined on directed acyclic graphs.") + if len(G) == 0: + raise nx.NetworkXPointlessConcept("LCA meaningless on null graphs.") + + if pairs is None: + pairs = combinations_with_replacement(G, 2) + else: + # Convert iterator to iterable, if necessary. Trim duplicates. + pairs = dict.fromkeys(pairs) + # Verify that each of the nodes in the provided pairs is in G + nodeset = set(G) + for pair in pairs: + if set(pair) - nodeset: + raise nx.NodeNotFound( + f"Node(s) {set(pair) - nodeset} from pair {pair} not in G." + ) + + # Once input validation is done, construct the generator + def generate_lca_from_pairs(G, pairs): + ancestor_cache = {} + + for v, w in pairs: + if v not in ancestor_cache: + ancestor_cache[v] = nx.ancestors(G, v) + ancestor_cache[v].add(v) + if w not in ancestor_cache: + ancestor_cache[w] = nx.ancestors(G, w) + ancestor_cache[w].add(w) + + common_ancestors = ancestor_cache[v] & ancestor_cache[w] + + if common_ancestors: + common_ancestor = next(iter(common_ancestors)) + while True: + successor = None + for lower_ancestor in G.successors(common_ancestor): + if lower_ancestor in common_ancestors: + successor = lower_ancestor + break + if successor is None: + break + common_ancestor = successor + yield ((v, w), common_ancestor) + + return generate_lca_from_pairs(G, pairs) + + +@not_implemented_for("undirected") +@nx._dispatchable +def lowest_common_ancestor(G, node1, node2, default=None): + """Compute the lowest common ancestor of the given pair of nodes. + + Parameters + ---------- + G : NetworkX directed graph + + node1, node2 : nodes in the graph. + + default : object + Returned if no common ancestor between `node1` and `node2` + + Returns + ------- + The lowest common ancestor of node1 and node2, + or default if they have no common ancestors. + + Examples + -------- + >>> G = nx.DiGraph() + >>> nx.add_path(G, (0, 1, 2, 3)) + >>> nx.add_path(G, (0, 4, 3)) + >>> nx.lowest_common_ancestor(G, 2, 4) + 0 + + See Also + -------- + all_pairs_lowest_common_ancestor""" + + ans = list(all_pairs_lowest_common_ancestor(G, pairs=[(node1, node2)])) + if ans: + assert len(ans) == 1 + return ans[0][1] + return default + + +@not_implemented_for("undirected") +@nx._dispatchable +def tree_all_pairs_lowest_common_ancestor(G, root=None, pairs=None): + r"""Yield the lowest common ancestor for sets of pairs in a tree. + + Parameters + ---------- + G : NetworkX directed graph (must be a tree) + + root : node, optional (default: None) + The root of the subtree to operate on. + If None, assume the entire graph has exactly one source and use that. + + pairs : iterable or iterator of pairs of nodes, optional (default: None) + The pairs of interest. If None, Defaults to all pairs of nodes + under `root` that have a lowest common ancestor. + + Returns + ------- + lcas : generator of tuples `((u, v), lca)` where `u` and `v` are nodes + in `pairs` and `lca` is their lowest common ancestor. + + Examples + -------- + >>> import pprint + >>> G = nx.DiGraph([(1, 3), (2, 4), (1, 2)]) + >>> pprint.pprint(dict(nx.tree_all_pairs_lowest_common_ancestor(G))) + {(1, 1): 1, + (2, 1): 1, + (2, 2): 2, + (3, 1): 1, + (3, 2): 1, + (3, 3): 3, + (3, 4): 1, + (4, 1): 1, + (4, 2): 2, + (4, 4): 4} + + We can also use `pairs` argument to specify the pairs of nodes for which we + want to compute lowest common ancestors. Here is an example: + + >>> dict(nx.tree_all_pairs_lowest_common_ancestor(G, pairs=[(1, 4), (2, 3)])) + {(2, 3): 1, (1, 4): 1} + + Notes + ----- + Only defined on non-null trees represented with directed edges from + parents to children. Uses Tarjan's off-line lowest-common-ancestors + algorithm. Runs in time $O(4 \times (V + E + P))$ time, where 4 is the largest + value of the inverse Ackermann function likely to ever come up in actual + use, and $P$ is the number of pairs requested (or $V^2$ if all are needed). + + Tarjan, R. E. (1979), "Applications of path compression on balanced trees", + Journal of the ACM 26 (4): 690-715, doi:10.1145/322154.322161. + + See Also + -------- + all_pairs_lowest_common_ancestor: similar routine for general DAGs + lowest_common_ancestor: just a single pair for general DAGs + """ + if len(G) == 0: + raise nx.NetworkXPointlessConcept("LCA meaningless on null graphs.") + + # Index pairs of interest for efficient lookup from either side. + if pairs is not None: + pair_dict = defaultdict(set) + # See note on all_pairs_lowest_common_ancestor. + if not isinstance(pairs, Mapping | Set): + pairs = set(pairs) + for u, v in pairs: + for n in (u, v): + if n not in G: + msg = f"The node {str(n)} is not in the digraph." + raise nx.NodeNotFound(msg) + pair_dict[u].add(v) + pair_dict[v].add(u) + + # If root is not specified, find the exactly one node with in degree 0 and + # use it. Raise an error if none are found, or more than one is. Also check + # for any nodes with in degree larger than 1, which would imply G is not a + # tree. + if root is None: + for n, deg in G.in_degree: + if deg == 0: + if root is not None: + msg = "No root specified and tree has multiple sources." + raise nx.NetworkXError(msg) + root = n + # checking deg>1 is not sufficient for MultiDiGraphs + elif deg > 1 and len(G.pred[n]) > 1: + msg = "Tree LCA only defined on trees; use DAG routine." + raise nx.NetworkXError(msg) + if root is None: + raise nx.NetworkXError("Graph contains a cycle.") + + # Iterative implementation of Tarjan's offline lca algorithm + # as described in CLRS on page 521 (2nd edition)/page 584 (3rd edition) + uf = UnionFind() + ancestors = {} + for node in G: + ancestors[node] = uf[node] + + colors = defaultdict(bool) + for node in nx.dfs_postorder_nodes(G, root): + colors[node] = True + for v in pair_dict[node] if pairs is not None else G: + if colors[v]: + # If the user requested both directions of a pair, give it. + # Otherwise, just give one. + if pairs is not None and (node, v) in pairs: + yield (node, v), ancestors[uf[v]] + if pairs is None or (v, node) in pairs: + yield (v, node), ancestors[uf[v]] + if node != root: + parent = arbitrary_element(G.pred[node]) + uf.union(parent, node) + ancestors[uf[parent]] = parent diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/matching.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/matching.py new file mode 100644 index 0000000..7d9fc58 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/matching.py @@ -0,0 +1,1151 @@ +"""Functions for computing and verifying matchings in a graph.""" + +from itertools import combinations, repeat + +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = [ + "is_matching", + "is_maximal_matching", + "is_perfect_matching", + "max_weight_matching", + "min_weight_matching", + "maximal_matching", +] + + +@not_implemented_for("multigraph") +@not_implemented_for("directed") +@nx._dispatchable +def maximal_matching(G): + r"""Find a maximal matching in the graph. + + A matching is a subset of edges in which no node occurs more than once. + A maximal matching cannot add more edges and still be a matching. + + Parameters + ---------- + G : NetworkX graph + Undirected graph + + Returns + ------- + matching : set + A maximal matching of the graph. + + Examples + -------- + >>> G = nx.Graph([(1, 2), (1, 3), (2, 3), (2, 4), (3, 5), (4, 5)]) + >>> sorted(nx.maximal_matching(G)) + [(1, 2), (3, 5)] + + Notes + ----- + The algorithm greedily selects a maximal matching M of the graph G + (i.e. no superset of M exists). It runs in $O(|E|)$ time. + """ + matching = set() + nodes = set() + for edge in G.edges(): + # If the edge isn't covered, add it to the matching + # then remove neighborhood of u and v from consideration. + u, v = edge + if u not in nodes and v not in nodes and u != v: + matching.add(edge) + nodes.update(edge) + return matching + + +def matching_dict_to_set(matching): + """Converts matching dict format to matching set format + + Converts a dictionary representing a matching (as returned by + :func:`max_weight_matching`) to a set representing a matching (as + returned by :func:`maximal_matching`). + + In the definition of maximal matching adopted by NetworkX, + self-loops are not allowed, so the provided dictionary is expected + to never have any mapping from a key to itself. However, the + dictionary is expected to have mirrored key/value pairs, for + example, key ``u`` with value ``v`` and key ``v`` with value ``u``. + + """ + edges = set() + for edge in matching.items(): + u, v = edge + if (v, u) in edges or edge in edges: + continue + if u == v: + raise nx.NetworkXError(f"Selfloops cannot appear in matchings {edge}") + edges.add(edge) + return edges + + +@nx._dispatchable +def is_matching(G, matching): + """Return True if ``matching`` is a valid matching of ``G`` + + A *matching* in a graph is a set of edges in which no two distinct + edges share a common endpoint. Each node is incident to at most one + edge in the matching. The edges are said to be independent. + + Parameters + ---------- + G : NetworkX graph + + matching : dict or set + A dictionary or set representing a matching. If a dictionary, it + must have ``matching[u] == v`` and ``matching[v] == u`` for each + edge ``(u, v)`` in the matching. If a set, it must have elements + of the form ``(u, v)``, where ``(u, v)`` is an edge in the + matching. + + Returns + ------- + bool + Whether the given set or dictionary represents a valid matching + in the graph. + + Raises + ------ + NetworkXError + If the proposed matching has an edge to a node not in G. + Or if the matching is not a collection of 2-tuple edges. + + Examples + -------- + >>> G = nx.Graph([(1, 2), (1, 3), (2, 3), (2, 4), (3, 5), (4, 5)]) + >>> nx.is_maximal_matching(G, {1: 3, 2: 4}) # using dict to represent matching + True + + >>> nx.is_matching(G, {(1, 3), (2, 4)}) # using set to represent matching + True + + """ + if isinstance(matching, dict): + matching = matching_dict_to_set(matching) + + nodes = set() + for edge in matching: + if len(edge) != 2: + raise nx.NetworkXError(f"matching has non-2-tuple edge {edge}") + u, v = edge + if u not in G or v not in G: + raise nx.NetworkXError(f"matching contains edge {edge} with node not in G") + if u == v: + return False + if not G.has_edge(u, v): + return False + if u in nodes or v in nodes: + return False + nodes.update(edge) + return True + + +@nx._dispatchable +def is_maximal_matching(G, matching): + """Return True if ``matching`` is a maximal matching of ``G`` + + A *maximal matching* in a graph is a matching in which adding any + edge would cause the set to no longer be a valid matching. + + Parameters + ---------- + G : NetworkX graph + + matching : dict or set + A dictionary or set representing a matching. If a dictionary, it + must have ``matching[u] == v`` and ``matching[v] == u`` for each + edge ``(u, v)`` in the matching. If a set, it must have elements + of the form ``(u, v)``, where ``(u, v)`` is an edge in the + matching. + + Returns + ------- + bool + Whether the given set or dictionary represents a valid maximal + matching in the graph. + + Examples + -------- + >>> G = nx.Graph([(1, 2), (1, 3), (2, 3), (3, 4), (3, 5)]) + >>> nx.is_maximal_matching(G, {(1, 2), (3, 4)}) + True + + """ + if isinstance(matching, dict): + matching = matching_dict_to_set(matching) + # If the given set is not a matching, then it is not a maximal matching. + edges = set() + nodes = set() + for edge in matching: + if len(edge) != 2: + raise nx.NetworkXError(f"matching has non-2-tuple edge {edge}") + u, v = edge + if u not in G or v not in G: + raise nx.NetworkXError(f"matching contains edge {edge} with node not in G") + if u == v: + return False + if not G.has_edge(u, v): + return False + if u in nodes or v in nodes: + return False + nodes.update(edge) + edges.add(edge) + edges.add((v, u)) + # A matching is maximal if adding any new edge from G to it + # causes the resulting set to match some node twice. + # Be careful to check for adding selfloops + for u, v in G.edges: + if (u, v) not in edges: + # could add edge (u, v) to edges and have a bigger matching + if u not in nodes and v not in nodes and u != v: + return False + return True + + +@nx._dispatchable +def is_perfect_matching(G, matching): + """Return True if ``matching`` is a perfect matching for ``G`` + + A *perfect matching* in a graph is a matching in which exactly one edge + is incident upon each vertex. + + Parameters + ---------- + G : NetworkX graph + + matching : dict or set + A dictionary or set representing a matching. If a dictionary, it + must have ``matching[u] == v`` and ``matching[v] == u`` for each + edge ``(u, v)`` in the matching. If a set, it must have elements + of the form ``(u, v)``, where ``(u, v)`` is an edge in the + matching. + + Returns + ------- + bool + Whether the given set or dictionary represents a valid perfect + matching in the graph. + + Examples + -------- + >>> G = nx.Graph([(1, 2), (1, 3), (2, 3), (2, 4), (3, 5), (4, 5), (4, 6)]) + >>> my_match = {1: 2, 3: 5, 4: 6} + >>> nx.is_perfect_matching(G, my_match) + True + + """ + if isinstance(matching, dict): + matching = matching_dict_to_set(matching) + + nodes = set() + for edge in matching: + if len(edge) != 2: + raise nx.NetworkXError(f"matching has non-2-tuple edge {edge}") + u, v = edge + if u not in G or v not in G: + raise nx.NetworkXError(f"matching contains edge {edge} with node not in G") + if u == v: + return False + if not G.has_edge(u, v): + return False + if u in nodes or v in nodes: + return False + nodes.update(edge) + return len(nodes) == len(G) + + +@not_implemented_for("multigraph") +@not_implemented_for("directed") +@nx._dispatchable(edge_attrs="weight") +def min_weight_matching(G, weight="weight"): + """Computing a minimum-weight maximal matching of G. + + Use the maximum-weight algorithm with edge weights subtracted + from the maximum weight of all edges. + + A matching is a subset of edges in which no node occurs more than once. + The weight of a matching is the sum of the weights of its edges. + A maximal matching cannot add more edges and still be a matching. + The cardinality of a matching is the number of matched edges. + + This method replaces the edge weights with 1 plus the maximum edge weight + minus the original edge weight. + + new_weight = (max_weight + 1) - edge_weight + + then runs :func:`max_weight_matching` with the new weights. + The max weight matching with these new weights corresponds + to the min weight matching using the original weights. + Adding 1 to the max edge weight keeps all edge weights positive + and as integers if they started as integers. + + You might worry that adding 1 to each weight would make the algorithm + favor matchings with more edges. But we use the parameter + `maxcardinality=True` in `max_weight_matching` to ensure that the + number of edges in the competing matchings are the same and thus + the optimum does not change due to changes in the number of edges. + + Read the documentation of `max_weight_matching` for more information. + + Parameters + ---------- + G : NetworkX graph + Undirected graph + + weight: string, optional (default='weight') + Edge data key corresponding to the edge weight. + If key not found, uses 1 as weight. + + Returns + ------- + matching : set + A minimal weight matching of the graph. + + See Also + -------- + max_weight_matching + """ + if len(G.edges) == 0: + return max_weight_matching(G, maxcardinality=True, weight=weight) + G_edges = G.edges(data=weight, default=1) + max_weight = 1 + max(w for _, _, w in G_edges) + InvG = nx.Graph() + edges = ((u, v, max_weight - w) for u, v, w in G_edges) + InvG.add_weighted_edges_from(edges, weight=weight) + return max_weight_matching(InvG, maxcardinality=True, weight=weight) + + +@not_implemented_for("multigraph") +@not_implemented_for("directed") +@nx._dispatchable(edge_attrs="weight") +def max_weight_matching(G, maxcardinality=False, weight="weight"): + """Compute a maximum-weighted matching of G. + + A matching is a subset of edges in which no node occurs more than once. + The weight of a matching is the sum of the weights of its edges. + A maximal matching cannot add more edges and still be a matching. + The cardinality of a matching is the number of matched edges. + + Parameters + ---------- + G : NetworkX graph + Undirected graph + + maxcardinality: bool, optional (default=False) + If maxcardinality is True, compute the maximum-cardinality matching + with maximum weight among all maximum-cardinality matchings. + + weight: string, optional (default='weight') + Edge data key corresponding to the edge weight. + If key not found, uses 1 as weight. + + + Returns + ------- + matching : set + A maximal matching of the graph. + + Examples + -------- + >>> G = nx.Graph() + >>> edges = [(1, 2, 6), (1, 3, 2), (2, 3, 1), (2, 4, 7), (3, 5, 9), (4, 5, 3)] + >>> G.add_weighted_edges_from(edges) + >>> sorted(nx.max_weight_matching(G)) + [(2, 4), (5, 3)] + + Notes + ----- + If G has edges with weight attributes the edge data are used as + weight values else the weights are assumed to be 1. + + This function takes time O(number_of_nodes ** 3). + + If all edge weights are integers, the algorithm uses only integer + computations. If floating point weights are used, the algorithm + could return a slightly suboptimal matching due to numeric + precision errors. + + This method is based on the "blossom" method for finding augmenting + paths and the "primal-dual" method for finding a matching of maximum + weight, both methods invented by Jack Edmonds [1]_. + + Bipartite graphs can also be matched using the functions present in + :mod:`networkx.algorithms.bipartite.matching`. + + References + ---------- + .. [1] "Efficient Algorithms for Finding Maximum Matching in Graphs", + Zvi Galil, ACM Computing Surveys, 1986. + """ + # + # The algorithm is taken from "Efficient Algorithms for Finding Maximum + # Matching in Graphs" by Zvi Galil, ACM Computing Surveys, 1986. + # It is based on the "blossom" method for finding augmenting paths and + # the "primal-dual" method for finding a matching of maximum weight, both + # methods invented by Jack Edmonds. + # + # A C program for maximum weight matching by Ed Rothberg was used + # extensively to validate this new code. + # + # Many terms used in the code comments are explained in the paper + # by Galil. You will probably need the paper to make sense of this code. + # + + class NoNode: + """Dummy value which is different from any node.""" + + class Blossom: + """Representation of a non-trivial blossom or sub-blossom.""" + + __slots__ = ["childs", "edges", "mybestedges"] + + # b.childs is an ordered list of b's sub-blossoms, starting with + # the base and going round the blossom. + + # b.edges is the list of b's connecting edges, such that + # b.edges[i] = (v, w) where v is a vertex in b.childs[i] + # and w is a vertex in b.childs[wrap(i+1)]. + + # If b is a top-level S-blossom, + # b.mybestedges is a list of least-slack edges to neighboring + # S-blossoms, or None if no such list has been computed yet. + # This is used for efficient computation of delta3. + + # Generate the blossom's leaf vertices. + def leaves(self): + stack = [*self.childs] + while stack: + t = stack.pop() + if isinstance(t, Blossom): + stack.extend(t.childs) + else: + yield t + + # Get a list of vertices. + gnodes = list(G) + if not gnodes: + return set() # don't bother with empty graphs + + # Find the maximum edge weight. + maxweight = 0 + allinteger = True + for i, j, d in G.edges(data=True): + wt = d.get(weight, 1) + if i != j and wt > maxweight: + maxweight = wt + allinteger = allinteger and (str(type(wt)).split("'")[1] in ("int", "long")) + + # If v is a matched vertex, mate[v] is its partner vertex. + # If v is a single vertex, v does not occur as a key in mate. + # Initially all vertices are single; updated during augmentation. + mate = {} + + # If b is a top-level blossom, + # label.get(b) is None if b is unlabeled (free), + # 1 if b is an S-blossom, + # 2 if b is a T-blossom. + # The label of a vertex is found by looking at the label of its top-level + # containing blossom. + # If v is a vertex inside a T-blossom, label[v] is 2 iff v is reachable + # from an S-vertex outside the blossom. + # Labels are assigned during a stage and reset after each augmentation. + label = {} + + # If b is a labeled top-level blossom, + # labeledge[b] = (v, w) is the edge through which b obtained its label + # such that w is a vertex in b, or None if b's base vertex is single. + # If w is a vertex inside a T-blossom and label[w] == 2, + # labeledge[w] = (v, w) is an edge through which w is reachable from + # outside the blossom. + labeledge = {} + + # If v is a vertex, inblossom[v] is the top-level blossom to which v + # belongs. + # If v is a top-level vertex, inblossom[v] == v since v is itself + # a (trivial) top-level blossom. + # Initially all vertices are top-level trivial blossoms. + inblossom = dict(zip(gnodes, gnodes)) + + # If b is a sub-blossom, + # blossomparent[b] is its immediate parent (sub-)blossom. + # If b is a top-level blossom, blossomparent[b] is None. + blossomparent = dict(zip(gnodes, repeat(None))) + + # If b is a (sub-)blossom, + # blossombase[b] is its base VERTEX (i.e. recursive sub-blossom). + blossombase = dict(zip(gnodes, gnodes)) + + # If w is a free vertex (or an unreached vertex inside a T-blossom), + # bestedge[w] = (v, w) is the least-slack edge from an S-vertex, + # or None if there is no such edge. + # If b is a (possibly trivial) top-level S-blossom, + # bestedge[b] = (v, w) is the least-slack edge to a different S-blossom + # (v inside b), or None if there is no such edge. + # This is used for efficient computation of delta2 and delta3. + bestedge = {} + + # If v is a vertex, + # dualvar[v] = 2 * u(v) where u(v) is the v's variable in the dual + # optimization problem (if all edge weights are integers, multiplication + # by two ensures that all values remain integers throughout the algorithm). + # Initially, u(v) = maxweight / 2. + dualvar = dict(zip(gnodes, repeat(maxweight))) + + # If b is a non-trivial blossom, + # blossomdual[b] = z(b) where z(b) is b's variable in the dual + # optimization problem. + blossomdual = {} + + # If (v, w) in allowedge or (w, v) in allowedg, then the edge + # (v, w) is known to have zero slack in the optimization problem; + # otherwise the edge may or may not have zero slack. + allowedge = {} + + # Queue of newly discovered S-vertices. + queue = [] + + # Return 2 * slack of edge (v, w) (does not work inside blossoms). + def slack(v, w): + return dualvar[v] + dualvar[w] - 2 * G[v][w].get(weight, 1) + + # Assign label t to the top-level blossom containing vertex w, + # coming through an edge from vertex v. + def assignLabel(w, t, v): + b = inblossom[w] + assert label.get(w) is None and label.get(b) is None + label[w] = label[b] = t + if v is not None: + labeledge[w] = labeledge[b] = (v, w) + else: + labeledge[w] = labeledge[b] = None + bestedge[w] = bestedge[b] = None + if t == 1: + # b became an S-vertex/blossom; add it(s vertices) to the queue. + if isinstance(b, Blossom): + queue.extend(b.leaves()) + else: + queue.append(b) + elif t == 2: + # b became a T-vertex/blossom; assign label S to its mate. + # (If b is a non-trivial blossom, its base is the only vertex + # with an external mate.) + base = blossombase[b] + assignLabel(mate[base], 1, base) + + # Trace back from vertices v and w to discover either a new blossom + # or an augmenting path. Return the base vertex of the new blossom, + # or NoNode if an augmenting path was found. + def scanBlossom(v, w): + # Trace back from v and w, placing breadcrumbs as we go. + path = [] + base = NoNode + while v is not NoNode: + # Look for a breadcrumb in v's blossom or put a new breadcrumb. + b = inblossom[v] + if label[b] & 4: + base = blossombase[b] + break + assert label[b] == 1 + path.append(b) + label[b] = 5 + # Trace one step back. + if labeledge[b] is None: + # The base of blossom b is single; stop tracing this path. + assert blossombase[b] not in mate + v = NoNode + else: + assert labeledge[b][0] == mate[blossombase[b]] + v = labeledge[b][0] + b = inblossom[v] + assert label[b] == 2 + # b is a T-blossom; trace one more step back. + v = labeledge[b][0] + # Swap v and w so that we alternate between both paths. + if w is not NoNode: + v, w = w, v + # Remove breadcrumbs. + for b in path: + label[b] = 1 + # Return base vertex, if we found one. + return base + + # Construct a new blossom with given base, through S-vertices v and w. + # Label the new blossom as S; set its dual variable to zero; + # relabel its T-vertices to S and add them to the queue. + def addBlossom(base, v, w): + bb = inblossom[base] + bv = inblossom[v] + bw = inblossom[w] + # Create blossom. + b = Blossom() + blossombase[b] = base + blossomparent[b] = None + blossomparent[bb] = b + # Make list of sub-blossoms and their interconnecting edge endpoints. + b.childs = path = [] + b.edges = edgs = [(v, w)] + # Trace back from v to base. + while bv != bb: + # Add bv to the new blossom. + blossomparent[bv] = b + path.append(bv) + edgs.append(labeledge[bv]) + assert label[bv] == 2 or ( + label[bv] == 1 and labeledge[bv][0] == mate[blossombase[bv]] + ) + # Trace one step back. + v = labeledge[bv][0] + bv = inblossom[v] + # Add base sub-blossom; reverse lists. + path.append(bb) + path.reverse() + edgs.reverse() + # Trace back from w to base. + while bw != bb: + # Add bw to the new blossom. + blossomparent[bw] = b + path.append(bw) + edgs.append((labeledge[bw][1], labeledge[bw][0])) + assert label[bw] == 2 or ( + label[bw] == 1 and labeledge[bw][0] == mate[blossombase[bw]] + ) + # Trace one step back. + w = labeledge[bw][0] + bw = inblossom[w] + # Set label to S. + assert label[bb] == 1 + label[b] = 1 + labeledge[b] = labeledge[bb] + # Set dual variable to zero. + blossomdual[b] = 0 + # Relabel vertices. + for v in b.leaves(): + if label[inblossom[v]] == 2: + # This T-vertex now turns into an S-vertex because it becomes + # part of an S-blossom; add it to the queue. + queue.append(v) + inblossom[v] = b + # Compute b.mybestedges. + bestedgeto = {} + for bv in path: + if isinstance(bv, Blossom): + if bv.mybestedges is not None: + # Walk this subblossom's least-slack edges. + nblist = bv.mybestedges + # The sub-blossom won't need this data again. + bv.mybestedges = None + else: + # This subblossom does not have a list of least-slack + # edges; get the information from the vertices. + nblist = [ + (v, w) for v in bv.leaves() for w in G.neighbors(v) if v != w + ] + else: + nblist = [(bv, w) for w in G.neighbors(bv) if bv != w] + for k in nblist: + (i, j) = k + if inblossom[j] == b: + i, j = j, i + bj = inblossom[j] + if ( + bj != b + and label.get(bj) == 1 + and ((bj not in bestedgeto) or slack(i, j) < slack(*bestedgeto[bj])) + ): + bestedgeto[bj] = k + # Forget about least-slack edge of the subblossom. + bestedge[bv] = None + b.mybestedges = list(bestedgeto.values()) + # Select bestedge[b]. + mybestedge = None + bestedge[b] = None + for k in b.mybestedges: + kslack = slack(*k) + if mybestedge is None or kslack < mybestslack: + mybestedge = k + mybestslack = kslack + bestedge[b] = mybestedge + + # Expand the given top-level blossom. + def expandBlossom(b, endstage): + # This is an obnoxiously complicated recursive function for the sake of + # a stack-transformation. So, we hack around the complexity by using + # a trampoline pattern. By yielding the arguments to each recursive + # call, we keep the actual callstack flat. + + def _recurse(b, endstage): + # Convert sub-blossoms into top-level blossoms. + for s in b.childs: + blossomparent[s] = None + if isinstance(s, Blossom): + if endstage and blossomdual[s] == 0: + # Recursively expand this sub-blossom. + yield s + else: + for v in s.leaves(): + inblossom[v] = s + else: + inblossom[s] = s + # If we expand a T-blossom during a stage, its sub-blossoms must be + # relabeled. + if (not endstage) and label.get(b) == 2: + # Start at the sub-blossom through which the expanding + # blossom obtained its label, and relabel sub-blossoms untili + # we reach the base. + # Figure out through which sub-blossom the expanding blossom + # obtained its label initially. + entrychild = inblossom[labeledge[b][1]] + # Decide in which direction we will go round the blossom. + j = b.childs.index(entrychild) + if j & 1: + # Start index is odd; go forward and wrap. + j -= len(b.childs) + jstep = 1 + else: + # Start index is even; go backward. + jstep = -1 + # Move along the blossom until we get to the base. + v, w = labeledge[b] + while j != 0: + # Relabel the T-sub-blossom. + if jstep == 1: + p, q = b.edges[j] + else: + q, p = b.edges[j - 1] + label[w] = None + label[q] = None + assignLabel(w, 2, v) + # Step to the next S-sub-blossom and note its forward edge. + allowedge[(p, q)] = allowedge[(q, p)] = True + j += jstep + if jstep == 1: + v, w = b.edges[j] + else: + w, v = b.edges[j - 1] + # Step to the next T-sub-blossom. + allowedge[(v, w)] = allowedge[(w, v)] = True + j += jstep + # Relabel the base T-sub-blossom WITHOUT stepping through to + # its mate (so don't call assignLabel). + bw = b.childs[j] + label[w] = label[bw] = 2 + labeledge[w] = labeledge[bw] = (v, w) + bestedge[bw] = None + # Continue along the blossom until we get back to entrychild. + j += jstep + while b.childs[j] != entrychild: + # Examine the vertices of the sub-blossom to see whether + # it is reachable from a neighboring S-vertex outside the + # expanding blossom. + bv = b.childs[j] + if label.get(bv) == 1: + # This sub-blossom just got label S through one of its + # neighbors; leave it be. + j += jstep + continue + if isinstance(bv, Blossom): + for v in bv.leaves(): + if label.get(v): + break + else: + v = bv + # If the sub-blossom contains a reachable vertex, assign + # label T to the sub-blossom. + if label.get(v): + assert label[v] == 2 + assert inblossom[v] == bv + label[v] = None + label[mate[blossombase[bv]]] = None + assignLabel(v, 2, labeledge[v][0]) + j += jstep + # Remove the expanded blossom entirely. + label.pop(b, None) + labeledge.pop(b, None) + bestedge.pop(b, None) + del blossomparent[b] + del blossombase[b] + del blossomdual[b] + + # Now, we apply the trampoline pattern. We simulate a recursive + # callstack by maintaining a stack of generators, each yielding a + # sequence of function arguments. We grow the stack by appending a call + # to _recurse on each argument tuple, and shrink the stack whenever a + # generator is exhausted. + stack = [_recurse(b, endstage)] + while stack: + top = stack[-1] + for s in top: + stack.append(_recurse(s, endstage)) + break + else: + stack.pop() + + # Swap matched/unmatched edges over an alternating path through blossom b + # between vertex v and the base vertex. Keep blossom bookkeeping + # consistent. + def augmentBlossom(b, v): + # This is an obnoxiously complicated recursive function for the sake of + # a stack-transformation. So, we hack around the complexity by using + # a trampoline pattern. By yielding the arguments to each recursive + # call, we keep the actual callstack flat. + + def _recurse(b, v): + # Bubble up through the blossom tree from vertex v to an immediate + # sub-blossom of b. + t = v + while blossomparent[t] != b: + t = blossomparent[t] + # Recursively deal with the first sub-blossom. + if isinstance(t, Blossom): + yield (t, v) + # Decide in which direction we will go round the blossom. + i = j = b.childs.index(t) + if i & 1: + # Start index is odd; go forward and wrap. + j -= len(b.childs) + jstep = 1 + else: + # Start index is even; go backward. + jstep = -1 + # Move along the blossom until we get to the base. + while j != 0: + # Step to the next sub-blossom and augment it recursively. + j += jstep + t = b.childs[j] + if jstep == 1: + w, x = b.edges[j] + else: + x, w = b.edges[j - 1] + if isinstance(t, Blossom): + yield (t, w) + # Step to the next sub-blossom and augment it recursively. + j += jstep + t = b.childs[j] + if isinstance(t, Blossom): + yield (t, x) + # Match the edge connecting those sub-blossoms. + mate[w] = x + mate[x] = w + # Rotate the list of sub-blossoms to put the new base at the front. + b.childs = b.childs[i:] + b.childs[:i] + b.edges = b.edges[i:] + b.edges[:i] + blossombase[b] = blossombase[b.childs[0]] + assert blossombase[b] == v + + # Now, we apply the trampoline pattern. We simulate a recursive + # callstack by maintaining a stack of generators, each yielding a + # sequence of function arguments. We grow the stack by appending a call + # to _recurse on each argument tuple, and shrink the stack whenever a + # generator is exhausted. + stack = [_recurse(b, v)] + while stack: + top = stack[-1] + for args in top: + stack.append(_recurse(*args)) + break + else: + stack.pop() + + # Swap matched/unmatched edges over an alternating path between two + # single vertices. The augmenting path runs through S-vertices v and w. + def augmentMatching(v, w): + for s, j in ((v, w), (w, v)): + # Match vertex s to vertex j. Then trace back from s + # until we find a single vertex, swapping matched and unmatched + # edges as we go. + while 1: + bs = inblossom[s] + assert label[bs] == 1 + assert (labeledge[bs] is None and blossombase[bs] not in mate) or ( + labeledge[bs][0] == mate[blossombase[bs]] + ) + # Augment through the S-blossom from s to base. + if isinstance(bs, Blossom): + augmentBlossom(bs, s) + # Update mate[s] + mate[s] = j + # Trace one step back. + if labeledge[bs] is None: + # Reached single vertex; stop. + break + t = labeledge[bs][0] + bt = inblossom[t] + assert label[bt] == 2 + # Trace one more step back. + s, j = labeledge[bt] + # Augment through the T-blossom from j to base. + assert blossombase[bt] == t + if isinstance(bt, Blossom): + augmentBlossom(bt, j) + # Update mate[j] + mate[j] = s + + # Verify that the optimum solution has been reached. + def verifyOptimum(): + if maxcardinality: + # Vertices may have negative dual; + # find a constant non-negative number to add to all vertex duals. + vdualoffset = max(0, -min(dualvar.values())) + else: + vdualoffset = 0 + # 0. all dual variables are non-negative + assert min(dualvar.values()) + vdualoffset >= 0 + assert len(blossomdual) == 0 or min(blossomdual.values()) >= 0 + # 0. all edges have non-negative slack and + # 1. all matched edges have zero slack; + for i, j, d in G.edges(data=True): + wt = d.get(weight, 1) + if i == j: + continue # ignore self-loops + s = dualvar[i] + dualvar[j] - 2 * wt + iblossoms = [i] + jblossoms = [j] + while blossomparent[iblossoms[-1]] is not None: + iblossoms.append(blossomparent[iblossoms[-1]]) + while blossomparent[jblossoms[-1]] is not None: + jblossoms.append(blossomparent[jblossoms[-1]]) + iblossoms.reverse() + jblossoms.reverse() + for bi, bj in zip(iblossoms, jblossoms): + if bi != bj: + break + s += 2 * blossomdual[bi] + assert s >= 0 + if mate.get(i) == j or mate.get(j) == i: + assert mate[i] == j and mate[j] == i + assert s == 0 + # 2. all single vertices have zero dual value; + for v in gnodes: + assert (v in mate) or dualvar[v] + vdualoffset == 0 + # 3. all blossoms with positive dual value are full. + for b in blossomdual: + if blossomdual[b] > 0: + assert len(b.edges) % 2 == 1 + for i, j in b.edges[1::2]: + assert mate[i] == j and mate[j] == i + # Ok. + + # Main loop: continue until no further improvement is possible. + while 1: + # Each iteration of this loop is a "stage". + # A stage finds an augmenting path and uses that to improve + # the matching. + + # Remove labels from top-level blossoms/vertices. + label.clear() + labeledge.clear() + + # Forget all about least-slack edges. + bestedge.clear() + for b in blossomdual: + b.mybestedges = None + + # Loss of labeling means that we can not be sure that currently + # allowable edges remain allowable throughout this stage. + allowedge.clear() + + # Make queue empty. + queue[:] = [] + + # Label single blossoms/vertices with S and put them in the queue. + for v in gnodes: + if (v not in mate) and label.get(inblossom[v]) is None: + assignLabel(v, 1, None) + + # Loop until we succeed in augmenting the matching. + augmented = 0 + while 1: + # Each iteration of this loop is a "substage". + # A substage tries to find an augmenting path; + # if found, the path is used to improve the matching and + # the stage ends. If there is no augmenting path, the + # primal-dual method is used to pump some slack out of + # the dual variables. + + # Continue labeling until all vertices which are reachable + # through an alternating path have got a label. + while queue and not augmented: + # Take an S vertex from the queue. + v = queue.pop() + assert label[inblossom[v]] == 1 + + # Scan its neighbors: + for w in G.neighbors(v): + if w == v: + continue # ignore self-loops + # w is a neighbor to v + bv = inblossom[v] + bw = inblossom[w] + if bv == bw: + # this edge is internal to a blossom; ignore it + continue + if (v, w) not in allowedge: + kslack = slack(v, w) + if kslack <= 0: + # edge k has zero slack => it is allowable + allowedge[(v, w)] = allowedge[(w, v)] = True + if (v, w) in allowedge: + if label.get(bw) is None: + # (C1) w is a free vertex; + # label w with T and label its mate with S (R12). + assignLabel(w, 2, v) + elif label.get(bw) == 1: + # (C2) w is an S-vertex (not in the same blossom); + # follow back-links to discover either an + # augmenting path or a new blossom. + base = scanBlossom(v, w) + if base is not NoNode: + # Found a new blossom; add it to the blossom + # bookkeeping and turn it into an S-blossom. + addBlossom(base, v, w) + else: + # Found an augmenting path; augment the + # matching and end this stage. + augmentMatching(v, w) + augmented = 1 + break + elif label.get(w) is None: + # w is inside a T-blossom, but w itself has not + # yet been reached from outside the blossom; + # mark it as reached (we need this to relabel + # during T-blossom expansion). + assert label[bw] == 2 + label[w] = 2 + labeledge[w] = (v, w) + elif label.get(bw) == 1: + # keep track of the least-slack non-allowable edge to + # a different S-blossom. + if bestedge.get(bv) is None or kslack < slack(*bestedge[bv]): + bestedge[bv] = (v, w) + elif label.get(w) is None: + # w is a free vertex (or an unreached vertex inside + # a T-blossom) but we can not reach it yet; + # keep track of the least-slack edge that reaches w. + if bestedge.get(w) is None or kslack < slack(*bestedge[w]): + bestedge[w] = (v, w) + + if augmented: + break + + # There is no augmenting path under these constraints; + # compute delta and reduce slack in the optimization problem. + # (Note that our vertex dual variables, edge slacks and delta's + # are pre-multiplied by two.) + deltatype = -1 + delta = deltaedge = deltablossom = None + + # Compute delta1: the minimum value of any vertex dual. + if not maxcardinality: + deltatype = 1 + delta = min(dualvar.values()) + + # Compute delta2: the minimum slack on any edge between + # an S-vertex and a free vertex. + for v in G.nodes(): + if label.get(inblossom[v]) is None and bestedge.get(v) is not None: + d = slack(*bestedge[v]) + if deltatype == -1 or d < delta: + delta = d + deltatype = 2 + deltaedge = bestedge[v] + + # Compute delta3: half the minimum slack on any edge between + # a pair of S-blossoms. + for b in blossomparent: + if ( + blossomparent[b] is None + and label.get(b) == 1 + and bestedge.get(b) is not None + ): + kslack = slack(*bestedge[b]) + if allinteger: + assert (kslack % 2) == 0 + d = kslack // 2 + else: + d = kslack / 2.0 + if deltatype == -1 or d < delta: + delta = d + deltatype = 3 + deltaedge = bestedge[b] + + # Compute delta4: minimum z variable of any T-blossom. + for b in blossomdual: + if ( + blossomparent[b] is None + and label.get(b) == 2 + and (deltatype == -1 or blossomdual[b] < delta) + ): + delta = blossomdual[b] + deltatype = 4 + deltablossom = b + + if deltatype == -1: + # No further improvement possible; max-cardinality optimum + # reached. Do a final delta update to make the optimum + # verifiable. + assert maxcardinality + deltatype = 1 + delta = max(0, min(dualvar.values())) + + # Update dual variables according to delta. + for v in gnodes: + if label.get(inblossom[v]) == 1: + # S-vertex: 2*u = 2*u - 2*delta + dualvar[v] -= delta + elif label.get(inblossom[v]) == 2: + # T-vertex: 2*u = 2*u + 2*delta + dualvar[v] += delta + for b in blossomdual: + if blossomparent[b] is None: + if label.get(b) == 1: + # top-level S-blossom: z = z + 2*delta + blossomdual[b] += delta + elif label.get(b) == 2: + # top-level T-blossom: z = z - 2*delta + blossomdual[b] -= delta + + # Take action at the point where minimum delta occurred. + if deltatype == 1: + # No further improvement possible; optimum reached. + break + elif deltatype == 2: + # Use the least-slack edge to continue the search. + (v, w) = deltaedge + assert label[inblossom[v]] == 1 + allowedge[(v, w)] = allowedge[(w, v)] = True + queue.append(v) + elif deltatype == 3: + # Use the least-slack edge to continue the search. + (v, w) = deltaedge + allowedge[(v, w)] = allowedge[(w, v)] = True + assert label[inblossom[v]] == 1 + queue.append(v) + elif deltatype == 4: + # Expand the least-z blossom. + expandBlossom(deltablossom, False) + + # End of a this substage. + + # Paranoia check that the matching is symmetric. + for v in mate: + assert mate[mate[v]] == v + + # Stop when no more augmenting path can be found. + if not augmented: + break + + # End of a stage; expand all S-blossoms which have zero dual. + for b in list(blossomdual.keys()): + if b not in blossomdual: + continue # already expanded + if blossomparent[b] is None and label.get(b) == 1 and blossomdual[b] == 0: + expandBlossom(b, True) + + # Verify that we reached the optimum solution (only for integer weights). + if allinteger: + verifyOptimum() + + return matching_dict_to_set(mate) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/minors/__init__.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/minors/__init__.py new file mode 100644 index 0000000..cf15ddb --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/minors/__init__.py @@ -0,0 +1,27 @@ +""" +Subpackages related to graph-minor problems. + +In graph theory, an undirected graph H is called a minor of the graph G if H +can be formed from G by deleting edges and vertices and by contracting edges +[1]_. + +References +---------- +.. [1] https://en.wikipedia.org/wiki/Graph_minor +""" + +from networkx.algorithms.minors.contraction import ( + contracted_edge, + contracted_nodes, + equivalence_classes, + identified_nodes, + quotient_graph, +) + +__all__ = [ + "contracted_edge", + "contracted_nodes", + "equivalence_classes", + "identified_nodes", + "quotient_graph", +] diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/minors/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/minors/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..c9107eb Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/minors/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/minors/__pycache__/contraction.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/minors/__pycache__/contraction.cpython-312.pyc new file mode 100644 index 0000000..d0828bb Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/minors/__pycache__/contraction.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/minors/contraction.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/minors/contraction.py new file mode 100644 index 0000000..0d27c5c --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/minors/contraction.py @@ -0,0 +1,666 @@ +"""Provides functions for computing minors of a graph.""" + +from itertools import chain, combinations, permutations, product + +import networkx as nx +from networkx import density +from networkx.exception import NetworkXException +from networkx.utils import arbitrary_element + +__all__ = [ + "contracted_edge", + "contracted_nodes", + "equivalence_classes", + "identified_nodes", + "quotient_graph", +] + +chaini = chain.from_iterable + + +def equivalence_classes(iterable, relation): + """Returns equivalence classes of `relation` when applied to `iterable`. + + The equivalence classes, or blocks, consist of objects from `iterable` + which are all equivalent. They are defined to be equivalent if the + `relation` function returns `True` when passed any two objects from that + class, and `False` otherwise. To define an equivalence relation the + function must be reflexive, symmetric and transitive. + + Parameters + ---------- + iterable : list, tuple, or set + An iterable of elements/nodes. + + relation : function + A Boolean-valued function that implements an equivalence relation + (reflexive, symmetric, transitive binary relation) on the elements + of `iterable` - it must take two elements and return `True` if + they are related, or `False` if not. + + Returns + ------- + set of frozensets + A set of frozensets representing the partition induced by the equivalence + relation function `relation` on the elements of `iterable`. Each + member set in the return set represents an equivalence class, or + block, of the partition. + + Duplicate elements will be ignored so it makes the most sense for + `iterable` to be a :class:`set`. + + Notes + ----- + This function does not check that `relation` represents an equivalence + relation. You can check that your equivalence classes provide a partition + using `is_partition`. + + Examples + -------- + Let `X` be the set of integers from `0` to `9`, and consider an equivalence + relation `R` on `X` of congruence modulo `3`: this means that two integers + `x` and `y` in `X` are equivalent under `R` if they leave the same + remainder when divided by `3`, i.e. `(x - y) mod 3 = 0`. + + The equivalence classes of this relation are `{0, 3, 6, 9}`, `{1, 4, 7}`, + `{2, 5, 8}`: `0`, `3`, `6`, `9` are all divisible by `3` and leave zero + remainder; `1`, `4`, `7` leave remainder `1`; while `2`, `5` and `8` leave + remainder `2`. We can see this by calling `equivalence_classes` with + `X` and a function implementation of `R`. + + >>> X = set(range(10)) + >>> def mod3(x, y): + ... return (x - y) % 3 == 0 + >>> equivalence_classes(X, mod3) # doctest: +SKIP + {frozenset({1, 4, 7}), frozenset({8, 2, 5}), frozenset({0, 9, 3, 6})} + """ + # For simplicity of implementation, we initialize the return value as a + # list of lists, then convert it to a set of sets at the end of the + # function. + blocks = [] + # Determine the equivalence class for each element of the iterable. + for y in iterable: + # Each element y must be in *exactly one* equivalence class. + # + # Each block is guaranteed to be non-empty + for block in blocks: + x = arbitrary_element(block) + if relation(x, y): + block.append(y) + break + else: + # If the element y is not part of any known equivalence class, it + # must be in its own, so we create a new singleton equivalence + # class for it. + blocks.append([y]) + return {frozenset(block) for block in blocks} + + +@nx._dispatchable(edge_attrs="weight", returns_graph=True) +def quotient_graph( + G, + partition, + edge_relation=None, + node_data=None, + edge_data=None, + weight="weight", + relabel=False, + create_using=None, +): + """Returns the quotient graph of `G` under the specified equivalence + relation on nodes. + + Parameters + ---------- + G : NetworkX graph + The graph for which to return the quotient graph with the + specified node relation. + + partition : function, or dict or list of lists, tuples or sets + If a function, this function must represent an equivalence + relation on the nodes of `G`. It must take two arguments *u* + and *v* and return True exactly when *u* and *v* are in the + same equivalence class. The equivalence classes form the nodes + in the returned graph. + + If a dict of lists/tuples/sets, the keys can be any meaningful + block labels, but the values must be the block lists/tuples/sets + (one list/tuple/set per block), and the blocks must form a valid + partition of the nodes of the graph. That is, each node must be + in exactly one block of the partition. + + If a list of sets, the list must form a valid partition of + the nodes of the graph. That is, each node must be in exactly + one block of the partition. + + edge_relation : Boolean function with two arguments + This function must represent an edge relation on the *blocks* of + the `partition` of `G`. It must take two arguments, *B* and *C*, + each one a set of nodes, and return True exactly when there should be + an edge joining block *B* to block *C* in the returned graph. + + If `edge_relation` is not specified, it is assumed to be the + following relation. Block *B* is related to block *C* if and + only if some node in *B* is adjacent to some node in *C*, + according to the edge set of `G`. + + node_data : function + This function takes one argument, *B*, a set of nodes in `G`, + and must return a dictionary representing the node data + attributes to set on the node representing *B* in the quotient graph. + If None, the following node attributes will be set: + + * 'graph', the subgraph of the graph `G` that this block + represents, + * 'nnodes', the number of nodes in this block, + * 'nedges', the number of edges within this block, + * 'density', the density of the subgraph of `G` that this + block represents. + + edge_data : function + This function takes two arguments, *B* and *C*, each one a set + of nodes, and must return a dictionary representing the edge + data attributes to set on the edge joining *B* and *C*, should + there be an edge joining *B* and *C* in the quotient graph (if + no such edge occurs in the quotient graph as determined by + `edge_relation`, then the output of this function is ignored). + + If the quotient graph would be a multigraph, this function is + not applied, since the edge data from each edge in the graph + `G` appears in the edges of the quotient graph. + + weight : string or None, optional (default="weight") + The name of an edge attribute that holds the numerical value + used as a weight. If None then each edge has weight 1. + + relabel : bool + If True, relabel the nodes of the quotient graph to be + nonnegative integers. Otherwise, the nodes are identified with + :class:`frozenset` instances representing the blocks given in + `partition`. + + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + Returns + ------- + NetworkX graph + The quotient graph of `G` under the equivalence relation + specified by `partition`. If the partition were given as a + list of :class:`set` instances and `relabel` is False, + each node will be a :class:`frozenset` corresponding to the same + :class:`set`. + + Raises + ------ + NetworkXException + If the given partition is not a valid partition of the nodes of + `G`. + + Examples + -------- + The quotient graph of the complete bipartite graph under the "same + neighbors" equivalence relation is `K_2`. Under this relation, two nodes + are equivalent if they are not adjacent but have the same neighbor set. + + >>> G = nx.complete_bipartite_graph(2, 3) + >>> same_neighbors = lambda u, v: (u not in G[v] and v not in G[u] and G[u] == G[v]) + >>> Q = nx.quotient_graph(G, same_neighbors) + >>> K2 = nx.complete_graph(2) + >>> nx.is_isomorphic(Q, K2) + True + + The quotient graph of a directed graph under the "same strongly connected + component" equivalence relation is the condensation of the graph (see + :func:`condensation`). This example comes from the Wikipedia article + *`Strongly connected component`_*. + + >>> G = nx.DiGraph() + >>> edges = [ + ... "ab", + ... "be", + ... "bf", + ... "bc", + ... "cg", + ... "cd", + ... "dc", + ... "dh", + ... "ea", + ... "ef", + ... "fg", + ... "gf", + ... "hd", + ... "hf", + ... ] + >>> G.add_edges_from(tuple(x) for x in edges) + >>> components = list(nx.strongly_connected_components(G)) + >>> sorted(sorted(component) for component in components) + [['a', 'b', 'e'], ['c', 'd', 'h'], ['f', 'g']] + >>> + >>> C = nx.condensation(G, components) + >>> component_of = C.graph["mapping"] + >>> same_component = lambda u, v: component_of[u] == component_of[v] + >>> Q = nx.quotient_graph(G, same_component) + >>> nx.is_isomorphic(C, Q) + True + + Node identification can be represented as the quotient of a graph under the + equivalence relation that places the two nodes in one block and each other + node in its own singleton block. + + >>> K24 = nx.complete_bipartite_graph(2, 4) + >>> K34 = nx.complete_bipartite_graph(3, 4) + >>> C = nx.contracted_nodes(K34, 1, 2) + >>> nodes = {1, 2} + >>> is_contracted = lambda u, v: u in nodes and v in nodes + >>> Q = nx.quotient_graph(K34, is_contracted) + >>> nx.is_isomorphic(Q, C) + True + >>> nx.is_isomorphic(Q, K24) + True + + The blockmodeling technique described in [1]_ can be implemented as a + quotient graph. + + >>> G = nx.path_graph(6) + >>> partition = [{0, 1}, {2, 3}, {4, 5}] + >>> M = nx.quotient_graph(G, partition, relabel=True) + >>> list(M.edges()) + [(0, 1), (1, 2)] + + Here is the sample example but using partition as a dict of block sets. + + >>> G = nx.path_graph(6) + >>> partition = {0: {0, 1}, 2: {2, 3}, 4: {4, 5}} + >>> M = nx.quotient_graph(G, partition, relabel=True) + >>> list(M.edges()) + [(0, 1), (1, 2)] + + Partitions can be represented in various ways: + + 0. a list/tuple/set of block lists/tuples/sets + 1. a dict with block labels as keys and blocks lists/tuples/sets as values + 2. a dict with block lists/tuples/sets as keys and block labels as values + 3. a function from nodes in the original iterable to block labels + 4. an equivalence relation function on the target iterable + + As `quotient_graph` is designed to accept partitions represented as (0), (1) or + (4) only, the `equivalence_classes` function can be used to get the partitions + in the right form, in order to call `quotient_graph`. + + .. _Strongly connected component: https://en.wikipedia.org/wiki/Strongly_connected_component + + References + ---------- + .. [1] Patrick Doreian, Vladimir Batagelj, and Anuska Ferligoj. + *Generalized Blockmodeling*. + Cambridge University Press, 2004. + + """ + # If the user provided an equivalence relation as a function to compute + # the blocks of the partition on the nodes of G induced by the + # equivalence relation. + if callable(partition): + # equivalence_classes always return partition of whole G. + partition = equivalence_classes(G, partition) + if not nx.community.is_partition(G, partition): + raise nx.NetworkXException( + "Input `partition` is not an equivalence relation for nodes of G" + ) + return _quotient_graph( + G, + partition, + edge_relation, + node_data, + edge_data, + weight, + relabel, + create_using, + ) + + # If the partition is a dict, it is assumed to be one where the keys are + # user-defined block labels, and values are block lists, tuples or sets. + if isinstance(partition, dict): + partition = list(partition.values()) + + # If the user provided partition as a collection of sets. Then we + # need to check if partition covers all of G nodes. If the answer + # is 'No' then we need to prepare suitable subgraph view. + partition_nodes = set().union(*partition) + if len(partition_nodes) != len(G): + G = G.subgraph(partition_nodes) + # Each node in the graph/subgraph must be in exactly one block. + if not nx.community.is_partition(G, partition): + raise NetworkXException("each node must be in exactly one part of `partition`") + return _quotient_graph( + G, + partition, + edge_relation, + node_data, + edge_data, + weight, + relabel, + create_using, + ) + + +def _quotient_graph( + G, partition, edge_relation, node_data, edge_data, weight, relabel, create_using +): + """Construct the quotient graph assuming input has been checked""" + if create_using is None: + H = G.__class__() + else: + H = nx.empty_graph(0, create_using) + # By default set some basic information about the subgraph that each block + # represents on the nodes in the quotient graph. + if node_data is None: + + def node_data(b): + S = G.subgraph(b) + return { + "graph": S, + "nnodes": len(S), + "nedges": S.number_of_edges(), + "density": density(S), + } + + # Each block of the partition becomes a node in the quotient graph. + partition = [frozenset(b) for b in partition] + H.add_nodes_from((b, node_data(b)) for b in partition) + # By default, the edge relation is the relation defined as follows. B is + # adjacent to C if a node in B is adjacent to a node in C, according to the + # edge set of G. + # + # This is not a particularly efficient implementation of this relation: + # there are O(n^2) pairs to check and each check may require O(log n) time + # (to check set membership). This can certainly be parallelized. + if edge_relation is None: + + def edge_relation(b, c): + return any(v in G[u] for u, v in product(b, c)) + + # By default, sum the weights of the edges joining pairs of nodes across + # blocks to get the weight of the edge joining those two blocks. + if edge_data is None: + + def edge_data(b, c): + edgedata = ( + d + for u, v, d in G.edges(b | c, data=True) + if (u in b and v in c) or (u in c and v in b) + ) + return {"weight": sum(d.get(weight, 1) for d in edgedata)} + + block_pairs = permutations(H, 2) if H.is_directed() else combinations(H, 2) + # In a multigraph, add one edge in the quotient graph for each edge + # in the original graph. + if H.is_multigraph(): + edges = chaini( + ( + (b, c, G.get_edge_data(u, v, default={})) + for u, v in product(b, c) + if v in G[u] + ) + for b, c in block_pairs + if edge_relation(b, c) + ) + # In a simple graph, apply the edge data function to each pair of + # blocks to determine the edge data attributes to apply to each edge + # in the quotient graph. + else: + edges = ( + (b, c, edge_data(b, c)) for (b, c) in block_pairs if edge_relation(b, c) + ) + H.add_edges_from(edges) + # If requested by the user, relabel the nodes to be integers, + # numbered in increasing order from zero in the same order as the + # iteration order of `partition`. + if relabel: + # Can't use nx.convert_node_labels_to_integers() here since we + # want the order of iteration to be the same for backward + # compatibility with the nx.blockmodel() function. + labels = {b: i for i, b in enumerate(partition)} + H = nx.relabel_nodes(H, labels) + return H + + +@nx._dispatchable( + preserve_all_attrs=True, mutates_input={"not copy": 4}, returns_graph=True +) +def contracted_nodes( + G, u, v, self_loops=True, copy=True, *, store_contraction_as="contraction" +): + """Returns the graph that results from contracting `u` and `v`. + + Node contraction identifies the two nodes as a single node incident to any + edge that was incident to the original two nodes. + + Parameters + ---------- + G : NetworkX graph + The graph whose nodes will be contracted. + + u, v : nodes + Must be nodes in `G`. + + self_loops : Boolean + If this is True, any edges joining `u` and `v` in `G` become + self-loops on the new node in the returned graph. + + copy : Boolean + If this is True (default True), make a copy of + `G` and return that instead of directly changing `G`. + + store_contraction_as : str or None, default="contraction" + Name of the node/edge attribute where information about the contraction + should be stored. By default information about the contracted node and + any contracted edges is stored in a ``"contraction"`` attribute on the + resulting node and edge. If `None`, information about the contracted + nodes/edges and their data are not stored. + + Returns + ------- + Networkx graph + If Copy is True, + A new graph object of the same type as `G` (leaving `G` unmodified) + with `u` and `v` identified in a single node. The right node `v` + will be merged into the node `u`, so only `u` will appear in the + returned graph. + If copy is False, + Modifies `G` with `u` and `v` identified in a single node. + The right node `v` will be merged into the node `u`, so + only `u` will appear in the returned graph. + + Notes + ----- + For multigraphs, the edge keys for the realigned edges may + not be the same as the edge keys for the old edges. This is + natural because edge keys are unique only within each pair of nodes. + + For non-multigraphs where `u` and `v` are adjacent to a third node + `w`, the edge (`v`, `w`) will be contracted into the edge (`u`, + `w`) with its attributes stored into a "contraction" attribute. + + This function is also available as `identified_nodes`. + + Examples + -------- + Contracting two nonadjacent nodes of the cycle graph on four nodes `C_4` + yields the path graph (ignoring parallel edges): + + >>> G = nx.cycle_graph(4) + >>> M = nx.contracted_nodes(G, 1, 3) + >>> P3 = nx.path_graph(3) + >>> nx.is_isomorphic(M, P3) + True + + >>> G = nx.MultiGraph(P3) + >>> M = nx.contracted_nodes(G, 0, 2) + >>> M.edges + MultiEdgeView([(0, 1, 0), (0, 1, 1)]) + + >>> G = nx.Graph([(1, 2), (2, 2)]) + >>> H = nx.contracted_nodes(G, 1, 2, self_loops=False) + >>> list(H.nodes()) + [1] + >>> list(H.edges()) + [(1, 1)] + + In a ``MultiDiGraph`` with a self loop, the in and out edges will + be treated separately as edges, so while contracting a node which + has a self loop the contraction will add multiple edges: + + >>> G = nx.MultiDiGraph([(1, 2), (2, 2)]) + >>> H = nx.contracted_nodes(G, 1, 2) + >>> list(H.edges()) # edge 1->2, 2->2, 2<-2 from the original Graph G + [(1, 1), (1, 1), (1, 1)] + >>> H = nx.contracted_nodes(G, 1, 2, self_loops=False) + >>> list(H.edges()) # edge 2->2, 2<-2 from the original Graph G + [(1, 1), (1, 1)] + + See Also + -------- + contracted_edge + quotient_graph + + """ + # Copying has significant overhead and can be disabled if needed + H = G.copy() if copy else G + + # edge code uses G.edges(v) instead of G.adj[v] to handle multiedges + if H.is_directed(): + edges_to_remap = chain(G.in_edges(v, data=True), G.out_edges(v, data=True)) + else: + edges_to_remap = G.edges(v, data=True) + + # If the H=G, the generators change as H changes + # This makes the edges_to_remap independent of H + if not copy: + edges_to_remap = list(edges_to_remap) + + v_data = H.nodes[v] + H.remove_node(v) + + # A bit of input munging to extract whether contraction info should be + # stored, and if so bind to a shorter name + if _store_contraction := (store_contraction_as is not None): + contraction = store_contraction_as + + for prev_w, prev_x, d in edges_to_remap: + w = prev_w if prev_w != v else u + x = prev_x if prev_x != v else u + + if ({prev_w, prev_x} == {u, v}) and not self_loops: + continue + + if not H.has_edge(w, x) or G.is_multigraph(): + H.add_edge(w, x, **d) + continue + + # Store information about the contracted edge iff `store_contraction` is not None + if _store_contraction: + if contraction in H.edges[(w, x)]: + H.edges[(w, x)][contraction][(prev_w, prev_x)] = d + else: + H.edges[(w, x)][contraction] = {(prev_w, prev_x): d} + + # Store information about the contracted node iff `store_contraction` + if _store_contraction: + if contraction in H.nodes[u]: + H.nodes[u][contraction][v] = v_data + else: + H.nodes[u][contraction] = {v: v_data} + + return H + + +identified_nodes = contracted_nodes + + +@nx._dispatchable( + preserve_all_attrs=True, mutates_input={"not copy": 3}, returns_graph=True +) +def contracted_edge( + G, edge, self_loops=True, copy=True, *, store_contraction_as="contraction" +): + """Returns the graph that results from contracting the specified edge. + + Edge contraction identifies the two endpoints of the edge as a single node + incident to any edge that was incident to the original two nodes. A graph + that results from edge contraction is called a *minor* of the original + graph. + + Parameters + ---------- + G : NetworkX graph + The graph whose edge will be contracted. + + edge : tuple + Must be a pair of nodes in `G`. + + self_loops : Boolean + If this is True, any edges (including `edge`) joining the + endpoints of `edge` in `G` become self-loops on the new node in the + returned graph. + + copy : Boolean (default True) + If this is True, a the contraction will be performed on a copy of `G`, + otherwise the contraction will happen in place. + + store_contraction_as : str or None, default="contraction" + Name of the node/edge attribute where information about the contraction + should be stored. By default information about the contracted node and + any contracted edges is stored in a ``"contraction"`` attribute on the + resulting node and edge. If `None`, information about the contracted + nodes/edges and their data are not stored. + + Returns + ------- + Networkx graph + A new graph object of the same type as `G` (leaving `G` unmodified) + with endpoints of `edge` identified in a single node. The right node + of `edge` will be merged into the left one, so only the left one will + appear in the returned graph. + + Raises + ------ + ValueError + If `edge` is not an edge in `G`. + + Examples + -------- + Attempting to contract two nonadjacent nodes yields an error: + + >>> G = nx.cycle_graph(4) + >>> nx.contracted_edge(G, (1, 3)) + Traceback (most recent call last): + ... + ValueError: Edge (1, 3) does not exist in graph G; cannot contract it + + Contracting two adjacent nodes in the cycle graph on *n* nodes yields the + cycle graph on *n - 1* nodes: + + >>> C5 = nx.cycle_graph(5) + >>> C4 = nx.cycle_graph(4) + >>> M = nx.contracted_edge(C5, (0, 1), self_loops=False) + >>> nx.is_isomorphic(M, C4) + True + + See also + -------- + contracted_nodes + quotient_graph + + """ + u, v = edge[:2] + if not G.has_edge(u, v): + raise ValueError(f"Edge {edge} does not exist in graph G; cannot contract it") + return contracted_nodes( + G, + u, + v, + self_loops=self_loops, + copy=copy, + store_contraction_as=store_contraction_as, + ) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/minors/tests/__pycache__/test_contraction.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/minors/tests/__pycache__/test_contraction.cpython-312.pyc new file mode 100644 index 0000000..a227de4 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/minors/tests/__pycache__/test_contraction.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/minors/tests/test_contraction.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/minors/tests/test_contraction.py new file mode 100644 index 0000000..2cecfa1 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/minors/tests/test_contraction.py @@ -0,0 +1,544 @@ +"""Unit tests for the :mod:`networkx.algorithms.minors.contraction` module.""" + +import pytest + +import networkx as nx +from networkx.utils import arbitrary_element, edges_equal, nodes_equal + + +def test_quotient_graph_complete_multipartite(): + """Tests that the quotient graph of the complete *n*-partite graph + under the "same neighbors" node relation is the complete graph on *n* + nodes. + + """ + G = nx.complete_multipartite_graph(2, 3, 4) + # Two nodes are equivalent if they are not adjacent but have the same + # neighbor set. + + def same_neighbors(u, v): + return u not in G[v] and v not in G[u] and G[u] == G[v] + + expected = nx.complete_graph(3) + actual = nx.quotient_graph(G, same_neighbors) + # It won't take too long to run a graph isomorphism algorithm on such + # small graphs. + assert nx.is_isomorphic(expected, actual) + + +def test_quotient_graph_complete_bipartite(): + """Tests that the quotient graph of the complete bipartite graph under + the "same neighbors" node relation is `K_2`. + + """ + G = nx.complete_bipartite_graph(2, 3) + # Two nodes are equivalent if they are not adjacent but have the same + # neighbor set. + + def same_neighbors(u, v): + return u not in G[v] and v not in G[u] and G[u] == G[v] + + expected = nx.complete_graph(2) + actual = nx.quotient_graph(G, same_neighbors) + # It won't take too long to run a graph isomorphism algorithm on such + # small graphs. + assert nx.is_isomorphic(expected, actual) + + +def test_quotient_graph_edge_relation(): + """Tests for specifying an alternate edge relation for the quotient + graph. + + """ + G = nx.path_graph(5) + + def identity(u, v): + return u == v + + def same_parity(b, c): + return arbitrary_element(b) % 2 == arbitrary_element(c) % 2 + + actual = nx.quotient_graph(G, identity, same_parity) + expected = nx.Graph() + expected.add_edges_from([(0, 2), (0, 4), (2, 4)]) + expected.add_edge(1, 3) + assert nx.is_isomorphic(actual, expected) + + +def test_condensation_as_quotient(): + """This tests that the condensation of a graph can be viewed as the + quotient graph under the "in the same connected component" equivalence + relation. + + """ + # This example graph comes from the file `test_strongly_connected.py`. + G = nx.DiGraph() + G.add_edges_from( + [ + (1, 2), + (2, 3), + (2, 11), + (2, 12), + (3, 4), + (4, 3), + (4, 5), + (5, 6), + (6, 5), + (6, 7), + (7, 8), + (7, 9), + (7, 10), + (8, 9), + (9, 7), + (10, 6), + (11, 2), + (11, 4), + (11, 6), + (12, 6), + (12, 11), + ] + ) + scc = list(nx.strongly_connected_components(G)) + C = nx.condensation(G, scc) + component_of = C.graph["mapping"] + # Two nodes are equivalent if they are in the same connected component. + + def same_component(u, v): + return component_of[u] == component_of[v] + + Q = nx.quotient_graph(G, same_component) + assert nx.is_isomorphic(C, Q) + + +def test_path(): + G = nx.path_graph(6) + partition = [{0, 1}, {2, 3}, {4, 5}] + M = nx.quotient_graph(G, partition, relabel=True) + assert nodes_equal(M, [0, 1, 2]) + assert edges_equal(M.edges(), [(0, 1), (1, 2)]) + for n in M: + assert M.nodes[n]["nedges"] == 1 + assert M.nodes[n]["nnodes"] == 2 + assert M.nodes[n]["density"] == 1 + + +def test_path__partition_provided_as_dict_of_lists(): + G = nx.path_graph(6) + partition = {0: [0, 1], 2: [2, 3], 4: [4, 5]} + M = nx.quotient_graph(G, partition, relabel=True) + assert nodes_equal(M, [0, 1, 2]) + assert edges_equal(M.edges(), [(0, 1), (1, 2)]) + for n in M: + assert M.nodes[n]["nedges"] == 1 + assert M.nodes[n]["nnodes"] == 2 + assert M.nodes[n]["density"] == 1 + + +def test_path__partition_provided_as_dict_of_tuples(): + G = nx.path_graph(6) + partition = {0: (0, 1), 2: (2, 3), 4: (4, 5)} + M = nx.quotient_graph(G, partition, relabel=True) + assert nodes_equal(M, [0, 1, 2]) + assert edges_equal(M.edges(), [(0, 1), (1, 2)]) + for n in M: + assert M.nodes[n]["nedges"] == 1 + assert M.nodes[n]["nnodes"] == 2 + assert M.nodes[n]["density"] == 1 + + +def test_path__partition_provided_as_dict_of_sets(): + G = nx.path_graph(6) + partition = {0: {0, 1}, 2: {2, 3}, 4: {4, 5}} + M = nx.quotient_graph(G, partition, relabel=True) + assert nodes_equal(M, [0, 1, 2]) + assert edges_equal(M.edges(), [(0, 1), (1, 2)]) + for n in M: + assert M.nodes[n]["nedges"] == 1 + assert M.nodes[n]["nnodes"] == 2 + assert M.nodes[n]["density"] == 1 + + +def test_multigraph_path(): + G = nx.MultiGraph(nx.path_graph(6)) + partition = [{0, 1}, {2, 3}, {4, 5}] + M = nx.quotient_graph(G, partition, relabel=True) + assert nodes_equal(M, [0, 1, 2]) + assert edges_equal(M.edges(), [(0, 1), (1, 2)]) + for n in M: + assert M.nodes[n]["nedges"] == 1 + assert M.nodes[n]["nnodes"] == 2 + assert M.nodes[n]["density"] == 1 + + +def test_directed_path(): + G = nx.DiGraph() + nx.add_path(G, range(6)) + partition = [{0, 1}, {2, 3}, {4, 5}] + M = nx.quotient_graph(G, partition, relabel=True) + assert nodes_equal(M, [0, 1, 2]) + assert edges_equal(M.edges(), [(0, 1), (1, 2)]) + for n in M: + assert M.nodes[n]["nedges"] == 1 + assert M.nodes[n]["nnodes"] == 2 + assert M.nodes[n]["density"] == 0.5 + + +def test_directed_multigraph_path(): + G = nx.MultiDiGraph() + nx.add_path(G, range(6)) + partition = [{0, 1}, {2, 3}, {4, 5}] + M = nx.quotient_graph(G, partition, relabel=True) + assert nodes_equal(M, [0, 1, 2]) + assert edges_equal(M.edges(), [(0, 1), (1, 2)]) + for n in M: + assert M.nodes[n]["nedges"] == 1 + assert M.nodes[n]["nnodes"] == 2 + assert M.nodes[n]["density"] == 0.5 + + +def test_overlapping_blocks(): + with pytest.raises(nx.NetworkXException): + G = nx.path_graph(6) + partition = [{0, 1, 2}, {2, 3}, {4, 5}] + nx.quotient_graph(G, partition) + + +def test_weighted_path(): + G = nx.path_graph(6) + for i in range(5): + G[i][i + 1]["w"] = i + 1 + partition = [{0, 1}, {2, 3}, {4, 5}] + M = nx.quotient_graph(G, partition, weight="w", relabel=True) + assert nodes_equal(M, [0, 1, 2]) + assert edges_equal(M.edges(), [(0, 1), (1, 2)]) + assert M[0][1]["weight"] == 2 + assert M[1][2]["weight"] == 4 + for n in M: + assert M.nodes[n]["nedges"] == 1 + assert M.nodes[n]["nnodes"] == 2 + assert M.nodes[n]["density"] == 1 + + +def test_barbell(): + G = nx.barbell_graph(3, 0) + partition = [{0, 1, 2}, {3, 4, 5}] + M = nx.quotient_graph(G, partition, relabel=True) + assert nodes_equal(M, [0, 1]) + assert edges_equal(M.edges(), [(0, 1)]) + for n in M: + assert M.nodes[n]["nedges"] == 3 + assert M.nodes[n]["nnodes"] == 3 + assert M.nodes[n]["density"] == 1 + + +def test_barbell_plus(): + G = nx.barbell_graph(3, 0) + # Add an extra edge joining the bells. + G.add_edge(0, 5) + partition = [{0, 1, 2}, {3, 4, 5}] + M = nx.quotient_graph(G, partition, relabel=True) + assert nodes_equal(M, [0, 1]) + assert edges_equal(M.edges(), [(0, 1)]) + assert M[0][1]["weight"] == 2 + for n in M: + assert M.nodes[n]["nedges"] == 3 + assert M.nodes[n]["nnodes"] == 3 + assert M.nodes[n]["density"] == 1 + + +def test_blockmodel(): + G = nx.path_graph(6) + partition = [[0, 1], [2, 3], [4, 5]] + M = nx.quotient_graph(G, partition, relabel=True) + assert nodes_equal(M.nodes(), [0, 1, 2]) + assert edges_equal(M.edges(), [(0, 1), (1, 2)]) + for n in M.nodes(): + assert M.nodes[n]["nedges"] == 1 + assert M.nodes[n]["nnodes"] == 2 + assert M.nodes[n]["density"] == 1.0 + + +def test_multigraph_blockmodel(): + G = nx.MultiGraph(nx.path_graph(6)) + partition = [[0, 1], [2, 3], [4, 5]] + M = nx.quotient_graph(G, partition, create_using=nx.MultiGraph(), relabel=True) + assert nodes_equal(M.nodes(), [0, 1, 2]) + assert edges_equal(M.edges(), [(0, 1), (1, 2)]) + for n in M.nodes(): + assert M.nodes[n]["nedges"] == 1 + assert M.nodes[n]["nnodes"] == 2 + assert M.nodes[n]["density"] == 1.0 + + +def test_quotient_graph_incomplete_partition(): + G = nx.path_graph(6) + partition = [] + H = nx.quotient_graph(G, partition, relabel=True) + assert nodes_equal(H.nodes(), []) + assert edges_equal(H.edges(), []) + + partition = [[0, 1], [2, 3], [5]] + H = nx.quotient_graph(G, partition, relabel=True) + assert nodes_equal(H.nodes(), [0, 1, 2]) + assert edges_equal(H.edges(), [(0, 1)]) + + +@pytest.mark.parametrize("store_contraction_as", ("contraction", "c", None)) +@pytest.mark.parametrize("copy", (True, False)) +@pytest.mark.parametrize("selfloops", (True, False)) +def test_undirected_node_contraction(store_contraction_as, copy, selfloops): + """Tests for node contraction in an undirected graph.""" + G = nx.cycle_graph(4) + actual = nx.contracted_nodes( + G, + 0, + 1, + copy=copy, + self_loops=selfloops, + store_contraction_as=store_contraction_as, + ) + + expected = nx.cycle_graph(3) + if selfloops: + expected.add_edge(0, 0) + + assert nx.is_isomorphic(actual, expected) + + if not copy: + assert actual is G + + # Test contracted node attributes + if store_contraction_as is not None: + assert actual.nodes[0][store_contraction_as] == {1: {}} + else: + assert actual.nodes[0] == {} + # There should be no contracted edges for this case + assert all(d == {} for _, _, d in actual.edges(data=True)) + + +@pytest.mark.parametrize("store_contraction_as", ("contraction", "c", None)) +@pytest.mark.parametrize("copy", (True, False)) +@pytest.mark.parametrize("selfloops", (True, False)) +def test_directed_node_contraction(store_contraction_as, copy, selfloops): + """Tests for node contraction in a directed graph.""" + G = nx.DiGraph(nx.cycle_graph(4)) + actual = nx.contracted_nodes( + G, + 0, + 1, + copy=copy, + self_loops=selfloops, + store_contraction_as=store_contraction_as, + ) + + expected = nx.DiGraph(nx.cycle_graph(3)) + if selfloops: + expected.add_edge(0, 0) + + assert nx.is_isomorphic(actual, expected) + + if not copy: + assert actual is G + # Test contracted node attributes + if store_contraction_as is not None: + assert actual.nodes[0][store_contraction_as] == {1: {}} + else: + assert actual.nodes[0] == {} + # Test contracted edge attributes (only relevant if self loops is enabled) + if selfloops and store_contraction_as: + assert actual.edges[(0, 0)][store_contraction_as] == {(1, 0): {}} + else: + assert all(d == {} for _, _, d in actual.edges(data=True)) + + +@pytest.mark.parametrize("store_contraction_as", ("contraction", "c", None)) +@pytest.mark.parametrize("copy", (True, False)) +@pytest.mark.parametrize("selfloops", (True, False)) +def test_contracted_nodes_multigraph(store_contraction_as, copy, selfloops): + """Tests that using a MultiGraph creates multiple edges. `store_contraction_as` + has no effect for multigraphs.""" + G = nx.path_graph(3, create_using=nx.MultiGraph) + G.add_edges_from([(0, 1), (0, 0), (0, 2)]) + actual = nx.contracted_nodes( + G, + 0, + 2, + copy=copy, + self_loops=selfloops, + store_contraction_as=store_contraction_as, + ) + # Two (0, 1) edges from G, another from the contraction of edge (1, 2) + expected = nx.MultiGraph([(0, 1), (0, 1), (0, 1), (0, 0)]) + # One (0, 0) edge from G, another from the contraction of edge (0, 2), but + # only if `selfloops` is True + if selfloops: + expected.add_edge(0, 0) + + assert edges_equal(actual.edges, expected.edges) + if not copy: + assert actual is G + + +def test_multigraph_keys(): + """Tests that multiedge keys are reset in new graph.""" + G = nx.path_graph(3, create_using=nx.MultiGraph()) + G.add_edge(0, 1, 5) + G.add_edge(0, 0, 0) + G.add_edge(0, 2, 5) + actual = nx.contracted_nodes(G, 0, 2) + expected = nx.MultiGraph() + expected.add_edge(0, 1, 0) + expected.add_edge(0, 1, 5) + expected.add_edge(0, 1, 2) # keyed as 2 b/c 2 edges already in G + expected.add_edge(0, 0, 0) + expected.add_edge(0, 0, 1) # this comes from (0, 2, 5) + assert edges_equal(actual.edges, expected.edges) + + +@pytest.mark.parametrize("store_contraction_as", ("contraction", "c", None)) +@pytest.mark.parametrize("copy", (True, False)) +@pytest.mark.parametrize("selfloops", (True, False)) +def test_node_attributes(store_contraction_as, copy, selfloops): + """Tests that node contraction preserves node attributes.""" + G = nx.cycle_graph(4) + # Add some data to the two nodes being contracted. + G.nodes[0]["foo"] = "bar" + G.nodes[1]["baz"] = "xyzzy" + actual = nx.contracted_nodes( + G, + 0, + 1, + copy=copy, + self_loops=selfloops, + store_contraction_as=store_contraction_as, + ) + # We expect that contracting the nodes 0 and 1 in C_4 yields K_3, but + # with nodes labeled 0, 2, and 3. + expected = nx.complete_graph(3) + expected = nx.relabel_nodes(expected, {1: 2, 2: 3}) + expected.nodes[0]["foo"] = "bar" + # ... and a self-loop (0, 0), if self_loops=True + if selfloops: + expected.add_edge(0, 0) + + if store_contraction_as: + cdict = {1: {"baz": "xyzzy"}} + expected.nodes[0].update({"foo": "bar", store_contraction_as: cdict}) + + assert nx.is_isomorphic(actual, expected) + assert actual.nodes(data=True) == expected.nodes(data=True) + if not copy: + assert actual is G + + +@pytest.mark.parametrize("store_contraction_as", ("contraction", "c", None)) +def test_edge_attributes(store_contraction_as): + """Tests that node contraction preserves edge attributes.""" + # Shape: src1 --> dest <-- src2 + G = nx.DiGraph([("src1", "dest"), ("src2", "dest")]) + G["src1"]["dest"]["value"] = "src1-->dest" + G["src2"]["dest"]["value"] = "src2-->dest" + + # New Shape: src1 --> dest + H = nx.contracted_nodes( + G, "src1", "src2", store_contraction_as=store_contraction_as + ) + assert H.edges[("src1", "dest")]["value"] == "src1-->dest" # Should be unchanged + if store_contraction_as: + assert ( + H.edges[("src1", "dest")][store_contraction_as][("src2", "dest")]["value"] + == "src2-->dest" + ) + else: + assert store_contraction_as not in H.edges[("src1", "dest")] + + G = nx.MultiDiGraph(G) + # New Shape: src1 -(x2)-> dest + H = nx.contracted_nodes( + G, "src1", "src2", store_contraction_as=store_contraction_as + ) + # store_contraction should not affect multigraphs + assert len(H.edges(("src1", "dest"))) == 2 + assert H.edges[("src1", "dest", 0)]["value"] == "src1-->dest" + assert H.edges[("src1", "dest", 1)]["value"] == "src2-->dest" + + +def test_contract_loop_graph(): + """Tests for node contraction when nodes have loops.""" + G = nx.cycle_graph(4) + G.add_edge(0, 0) + actual = nx.contracted_nodes(G, 0, 1) + expected = nx.complete_graph([0, 2, 3]) + expected.add_edge(0, 0) + assert edges_equal(actual.edges, expected.edges) + actual = nx.contracted_nodes(G, 1, 0) + expected = nx.complete_graph([1, 2, 3]) + expected.add_edge(1, 1) + assert edges_equal(actual.edges, expected.edges) + + +@pytest.mark.parametrize("store_contraction_as", ("contraction", "c", None)) +@pytest.mark.parametrize("copy", (True, False)) +@pytest.mark.parametrize("selfloops", (True, False)) +def test_undirected_edge_contraction(store_contraction_as, copy, selfloops): + """Tests for node contraction in an undirected graph.""" + G = nx.cycle_graph(4) + actual = nx.contracted_edge( + G, + (0, 1), + copy=copy, + self_loops=selfloops, + store_contraction_as=store_contraction_as, + ) + + expected = nx.cycle_graph(3) + if selfloops: + expected.add_edge(0, 0) + + assert nx.is_isomorphic(actual, expected) + + if not copy: + assert actual is G + + # Test contracted node attributes + if store_contraction_as is not None: + assert actual.nodes[0][store_contraction_as] == {1: {}} + else: + assert actual.nodes[0] == {} + # There should be no contracted edges for this case + assert all(d == {} for _, _, d in actual.edges(data=True)) + + +@pytest.mark.parametrize("edge", [(0, 1), (0, 1, 0)]) +@pytest.mark.parametrize("store_contraction_as", ("contraction", "c", None)) +@pytest.mark.parametrize("copy", [True, False]) +@pytest.mark.parametrize("selfloops", [True, False]) +def test_multigraph_edge_contraction(edge, store_contraction_as, copy, selfloops): + """Tests for edge contraction in a multigraph""" + G = nx.cycle_graph(4, create_using=nx.MultiGraph) + actual = nx.contracted_edge( + G, + edge, + copy=copy, + self_loops=selfloops, + store_contraction_as=store_contraction_as, + ) + expected = nx.relabel_nodes( + nx.complete_graph(3, create_using=nx.MultiGraph), {0: 0, 1: 2, 2: 3} + ) + if selfloops: + expected.add_edge(0, 0) + + assert edges_equal(actual.edges, expected.edges) + if not copy: + assert actual is G + + +def test_nonexistent_edge(): + """Tests that attempting to contract a nonexistent edge raises an + exception. + + """ + G = nx.cycle_graph(4) + with pytest.raises(ValueError): + nx.contracted_edge(G, (0, 2)) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/mis.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/mis.py new file mode 100644 index 0000000..0652ac4 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/mis.py @@ -0,0 +1,78 @@ +""" +Algorithm to find a maximal (not maximum) independent set. + +""" + +import networkx as nx +from networkx.utils import not_implemented_for, py_random_state + +__all__ = ["maximal_independent_set"] + + +@not_implemented_for("directed") +@py_random_state(2) +@nx._dispatchable +def maximal_independent_set(G, nodes=None, seed=None): + """Returns a random maximal independent set guaranteed to contain + a given set of nodes. + + An independent set is a set of nodes such that the subgraph + of G induced by these nodes contains no edges. A maximal + independent set is an independent set such that it is not possible + to add a new node and still get an independent set. + + Parameters + ---------- + G : NetworkX graph + + nodes : list or iterable + Nodes that must be part of the independent set. This set of nodes + must be independent. + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + indep_nodes : list + List of nodes that are part of a maximal independent set. + + Raises + ------ + NetworkXUnfeasible + If the nodes in the provided list are not part of the graph or + do not form an independent set, an exception is raised. + + NetworkXNotImplemented + If `G` is directed. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> nx.maximal_independent_set(G) # doctest: +SKIP + [4, 0, 2] + >>> nx.maximal_independent_set(G, [1]) # doctest: +SKIP + [1, 3] + + Notes + ----- + This algorithm does not solve the maximum independent set problem. + + """ + if not nodes: + nodes = {seed.choice(list(G))} + else: + nodes = set(nodes) + if not nodes.issubset(G): + raise nx.NetworkXUnfeasible(f"{nodes} is not a subset of the nodes of G") + neighbors = set.union(*[set(G.adj[v]) for v in nodes]) + if set.intersection(neighbors, nodes): + raise nx.NetworkXUnfeasible(f"{nodes} is not an independent set of G") + indep_nodes = list(nodes) + available_nodes = set(G.nodes()).difference(neighbors.union(nodes)) + while available_nodes: + node = seed.choice(list(available_nodes)) + indep_nodes.append(node) + available_nodes.difference_update(list(G.adj[node]) + [node]) + return indep_nodes diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/moral.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/moral.py new file mode 100644 index 0000000..e2acf80 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/moral.py @@ -0,0 +1,59 @@ +r"""Function for computing the moral graph of a directed graph.""" + +import itertools + +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = ["moral_graph"] + + +@not_implemented_for("undirected") +@nx._dispatchable(returns_graph=True) +def moral_graph(G): + r"""Return the Moral Graph + + Returns the moralized graph of a given directed graph. + + Parameters + ---------- + G : NetworkX graph + Directed graph + + Returns + ------- + H : NetworkX graph + The undirected moralized graph of G + + Raises + ------ + NetworkXNotImplemented + If `G` is undirected. + + Examples + -------- + >>> G = nx.DiGraph([(1, 2), (2, 3), (2, 5), (3, 4), (4, 3)]) + >>> G_moral = nx.moral_graph(G) + >>> G_moral.edges() + EdgeView([(1, 2), (2, 3), (2, 5), (2, 4), (3, 4)]) + + Notes + ----- + A moral graph is an undirected graph H = (V, E) generated from a + directed Graph, where if a node has more than one parent node, edges + between these parent nodes are inserted and all directed edges become + undirected. + + https://en.wikipedia.org/wiki/Moral_graph + + References + ---------- + .. [1] Wray L. Buntine. 1995. Chain graphs for learning. + In Proceedings of the Eleventh conference on Uncertainty + in artificial intelligence (UAI'95) + """ + H = G.to_undirected() + for preds in G.pred.values(): + predecessors_combinations = itertools.combinations(preds, r=2) + H.add_edges_from(predecessors_combinations) + return H diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/node_classification.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/node_classification.py new file mode 100644 index 0000000..b69a6c9 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/node_classification.py @@ -0,0 +1,219 @@ +"""This module provides the functions for node classification problem. + +The functions in this module are not imported +into the top level `networkx` namespace. +You can access these functions by importing +the `networkx.algorithms.node_classification` modules, +then accessing the functions as attributes of `node_classification`. +For example: + + >>> from networkx.algorithms import node_classification + >>> G = nx.path_graph(4) + >>> G.edges() + EdgeView([(0, 1), (1, 2), (2, 3)]) + >>> G.nodes[0]["label"] = "A" + >>> G.nodes[3]["label"] = "B" + >>> node_classification.harmonic_function(G) + ['A', 'A', 'B', 'B'] + +References +---------- +Zhu, X., Ghahramani, Z., & Lafferty, J. (2003, August). +Semi-supervised learning using gaussian fields and harmonic functions. +In ICML (Vol. 3, pp. 912-919). +""" + +import networkx as nx + +__all__ = ["harmonic_function", "local_and_global_consistency"] + + +@nx.utils.not_implemented_for("directed") +@nx._dispatchable(node_attrs="label_name") +def harmonic_function(G, max_iter=30, label_name="label"): + """Node classification by Harmonic function + + Function for computing Harmonic function algorithm by Zhu et al. + + Parameters + ---------- + G : NetworkX Graph + max_iter : int + maximum number of iterations allowed + label_name : string + name of target labels to predict + + Returns + ------- + predicted : list + List of length ``len(G)`` with the predicted labels for each node. + + Raises + ------ + NetworkXError + If no nodes in `G` have attribute `label_name`. + + Examples + -------- + >>> from networkx.algorithms import node_classification + >>> G = nx.path_graph(4) + >>> G.nodes[0]["label"] = "A" + >>> G.nodes[3]["label"] = "B" + >>> G.nodes(data=True) + NodeDataView({0: {'label': 'A'}, 1: {}, 2: {}, 3: {'label': 'B'}}) + >>> G.edges() + EdgeView([(0, 1), (1, 2), (2, 3)]) + >>> predicted = node_classification.harmonic_function(G) + >>> predicted + ['A', 'A', 'B', 'B'] + + References + ---------- + Zhu, X., Ghahramani, Z., & Lafferty, J. (2003, August). + Semi-supervised learning using gaussian fields and harmonic functions. + In ICML (Vol. 3, pp. 912-919). + """ + import numpy as np + import scipy as sp + + X = nx.to_scipy_sparse_array(G) # adjacency matrix + labels, label_dict = _get_label_info(G, label_name) + + if labels.shape[0] == 0: + raise nx.NetworkXError( + f"No node on the input graph is labeled by '{label_name}'." + ) + + n_samples = X.shape[0] + n_classes = label_dict.shape[0] + F = np.zeros((n_samples, n_classes)) + + # Build propagation matrix + degrees = X.sum(axis=0) + degrees[degrees == 0] = 1 # Avoid division by 0 + # TODO: csr_array + D = sp.sparse.csr_array(sp.sparse.diags((1.0 / degrees), offsets=0)) + P = (D @ X).tolil() + P[labels[:, 0]] = 0 # labels[:, 0] indicates IDs of labeled nodes + # Build base matrix + B = np.zeros((n_samples, n_classes)) + B[labels[:, 0], labels[:, 1]] = 1 + + for _ in range(max_iter): + F = (P @ F) + B + + return label_dict[np.argmax(F, axis=1)].tolist() + + +@nx.utils.not_implemented_for("directed") +@nx._dispatchable(node_attrs="label_name") +def local_and_global_consistency(G, alpha=0.99, max_iter=30, label_name="label"): + """Node classification by Local and Global Consistency + + Function for computing Local and global consistency algorithm by Zhou et al. + + Parameters + ---------- + G : NetworkX Graph + alpha : float + Clamping factor + max_iter : int + Maximum number of iterations allowed + label_name : string + Name of target labels to predict + + Returns + ------- + predicted : list + List of length ``len(G)`` with the predicted labels for each node. + + Raises + ------ + NetworkXError + If no nodes in `G` have attribute `label_name`. + + Examples + -------- + >>> from networkx.algorithms import node_classification + >>> G = nx.path_graph(4) + >>> G.nodes[0]["label"] = "A" + >>> G.nodes[3]["label"] = "B" + >>> G.nodes(data=True) + NodeDataView({0: {'label': 'A'}, 1: {}, 2: {}, 3: {'label': 'B'}}) + >>> G.edges() + EdgeView([(0, 1), (1, 2), (2, 3)]) + >>> predicted = node_classification.local_and_global_consistency(G) + >>> predicted + ['A', 'A', 'B', 'B'] + + References + ---------- + Zhou, D., Bousquet, O., Lal, T. N., Weston, J., & Schölkopf, B. (2004). + Learning with local and global consistency. + Advances in neural information processing systems, 16(16), 321-328. + """ + import numpy as np + import scipy as sp + + X = nx.to_scipy_sparse_array(G) # adjacency matrix + labels, label_dict = _get_label_info(G, label_name) + + if labels.shape[0] == 0: + raise nx.NetworkXError( + f"No node on the input graph is labeled by '{label_name}'." + ) + + n_samples = X.shape[0] + n_classes = label_dict.shape[0] + F = np.zeros((n_samples, n_classes)) + + # Build propagation matrix + degrees = X.sum(axis=0) + degrees[degrees == 0] = 1 # Avoid division by 0 + # TODO: csr_array + D2 = np.sqrt(sp.sparse.csr_array(sp.sparse.diags((1.0 / degrees), offsets=0))) + P = alpha * ((D2 @ X) @ D2) + # Build base matrix + B = np.zeros((n_samples, n_classes)) + B[labels[:, 0], labels[:, 1]] = 1 - alpha + + for _ in range(max_iter): + F = (P @ F) + B + + return label_dict[np.argmax(F, axis=1)].tolist() + + +def _get_label_info(G, label_name): + """Get and return information of labels from the input graph + + Parameters + ---------- + G : Network X graph + label_name : string + Name of the target label + + Returns + ------- + labels : numpy array, shape = [n_labeled_samples, 2] + Array of pairs of labeled node ID and label ID + label_dict : numpy array, shape = [n_classes] + Array of labels + i-th element contains the label corresponding label ID `i` + """ + import numpy as np + + labels = [] + label_to_id = {} + lid = 0 + for i, n in enumerate(G.nodes(data=True)): + if label_name in n[1]: + label = n[1][label_name] + if label not in label_to_id: + label_to_id[label] = lid + lid += 1 + labels.append([i, label_to_id[label]]) + labels = np.array(labels) + label_dict = np.array( + [label for label, _ in sorted(label_to_id.items(), key=lambda x: x[1])] + ) + return (labels, label_dict) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/non_randomness.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/non_randomness.py new file mode 100644 index 0000000..1379911 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/non_randomness.py @@ -0,0 +1,98 @@ +r"""Computation of graph non-randomness""" + +import math + +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = ["non_randomness"] + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatchable(edge_attrs="weight") +def non_randomness(G, k=None, weight="weight"): + """Compute the non-randomness of graph G. + + The first returned value nr is the sum of non-randomness values of all + edges within the graph (where the non-randomness of an edge tends to be + small when the two nodes linked by that edge are from two different + communities). + + The second computed value nr_rd is a relative measure that indicates + to what extent graph G is different from random graphs in terms + of probability. When it is close to 0, the graph tends to be more + likely generated by an Erdos Renyi model. + + Parameters + ---------- + G : NetworkX graph + Graph must be symmetric, connected, and without self-loops. + + k : int + The number of communities in G. + If k is not set, the function will use a default community + detection algorithm to set it. + + weight : string or None, optional (default=None) + The name of an edge attribute that holds the numerical value used + as a weight. If None, then each edge has weight 1, i.e., the graph is + binary. + + Returns + ------- + non-randomness : (float, float) tuple + Non-randomness, Relative non-randomness w.r.t. + Erdos Renyi random graphs. + + Raises + ------ + NetworkXException + if the input graph is not connected. + NetworkXError + if the input graph contains self-loops or if graph has no edges. + + Examples + -------- + >>> G = nx.karate_club_graph() + >>> nr, nr_rd = nx.non_randomness(G, 2) + >>> nr, nr_rd = nx.non_randomness(G, 2, "weight") + + Notes + ----- + This computes Eq. (4.4) and (4.5) in Ref. [1]_. + + If a weight field is passed, this algorithm will use the eigenvalues + of the weighted adjacency matrix to compute Eq. (4.4) and (4.5). + + References + ---------- + .. [1] Xiaowei Ying and Xintao Wu, + On Randomness Measures for Social Networks, + SIAM International Conference on Data Mining. 2009 + """ + import numpy as np + + # corner case: graph has no edges + if nx.is_empty(G): + raise nx.NetworkXError("non_randomness not applicable to empty graphs") + if not nx.is_connected(G): + raise nx.NetworkXException("Non connected graph.") + if len(list(nx.selfloop_edges(G))) > 0: + raise nx.NetworkXError("Graph must not contain self-loops") + + if k is None: + k = len(tuple(nx.community.label_propagation_communities(G))) + + # eq. 4.4 + eigenvalues = np.linalg.eigvals(nx.to_numpy_array(G, weight=weight)) + nr = float(np.real(np.sum(eigenvalues[:k]))) + + n = G.number_of_nodes() + m = G.number_of_edges() + p = (2 * k * m) / (n * (n - k)) + + # eq. 4.5 + nr_rd = (nr - ((n - 2 * k) * p + k)) / math.sqrt(2 * k * p * (1 - p)) + + return nr, nr_rd diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/operators/__init__.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/operators/__init__.py new file mode 100644 index 0000000..0ebc6ab --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/operators/__init__.py @@ -0,0 +1,4 @@ +from networkx.algorithms.operators.all import * +from networkx.algorithms.operators.binary import * +from networkx.algorithms.operators.product import * +from networkx.algorithms.operators.unary import * diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/operators/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/operators/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..317f1e1 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/operators/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/operators/__pycache__/all.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/operators/__pycache__/all.cpython-312.pyc new file mode 100644 index 0000000..5667d04 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/operators/__pycache__/all.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/operators/__pycache__/binary.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/operators/__pycache__/binary.cpython-312.pyc new file mode 100644 index 0000000..8c613d1 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/operators/__pycache__/binary.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/operators/__pycache__/product.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/operators/__pycache__/product.cpython-312.pyc new file mode 100644 index 0000000..5275f54 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/operators/__pycache__/product.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/operators/__pycache__/unary.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/operators/__pycache__/unary.cpython-312.pyc new file mode 100644 index 0000000..b661ae9 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/operators/__pycache__/unary.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/operators/all.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/operators/all.py new file mode 100644 index 0000000..322a15a --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/operators/all.py @@ -0,0 +1,324 @@ +"""Operations on many graphs.""" + +from itertools import chain, repeat + +import networkx as nx + +__all__ = ["union_all", "compose_all", "disjoint_union_all", "intersection_all"] + + +@nx._dispatchable(graphs="[graphs]", preserve_all_attrs=True, returns_graph=True) +def union_all(graphs, rename=()): + """Returns the union of all graphs. + + The graphs must be disjoint, otherwise an exception is raised. + + Parameters + ---------- + graphs : iterable + Iterable of NetworkX graphs + + rename : iterable , optional + Node names of graphs can be changed by specifying the tuple + rename=('G-','H-') (for example). Node "u" in G is then renamed + "G-u" and "v" in H is renamed "H-v". Infinite generators (like itertools.count) + are also supported. + + Returns + ------- + U : a graph with the same type as the first graph in list + + Raises + ------ + ValueError + If `graphs` is an empty list. + + NetworkXError + In case of mixed type graphs, like MultiGraph and Graph, or directed and undirected graphs. + + Notes + ----- + For operating on mixed type graphs, they should be converted to the same type. + >>> G = nx.Graph() + >>> H = nx.DiGraph() + >>> GH = union_all([nx.DiGraph(G), H]) + + To force a disjoint union with node relabeling, use + disjoint_union_all(G,H) or convert_node_labels_to integers(). + + Graph, edge, and node attributes are propagated to the union graph. + If a graph attribute is present in multiple graphs, then the value + from the last graph in the list with that attribute is used. + + Examples + -------- + >>> G1 = nx.Graph([(1, 2), (2, 3)]) + >>> G2 = nx.Graph([(4, 5), (5, 6)]) + >>> result_graph = nx.union_all([G1, G2]) + >>> result_graph.nodes() + NodeView((1, 2, 3, 4, 5, 6)) + >>> result_graph.edges() + EdgeView([(1, 2), (2, 3), (4, 5), (5, 6)]) + + See Also + -------- + union + disjoint_union_all + """ + R = None + seen_nodes = set() + + # rename graph to obtain disjoint node labels + def add_prefix(graph, prefix): + if prefix is None: + return graph + + def label(x): + return f"{prefix}{x}" + + return nx.relabel_nodes(graph, label) + + rename = chain(rename, repeat(None)) + graphs = (add_prefix(G, name) for G, name in zip(graphs, rename)) + + for i, G in enumerate(graphs): + G_nodes_set = set(G.nodes) + if i == 0: + # Union is the same type as first graph + R = G.__class__() + elif G.is_directed() != R.is_directed(): + raise nx.NetworkXError("All graphs must be directed or undirected.") + elif G.is_multigraph() != R.is_multigraph(): + raise nx.NetworkXError("All graphs must be graphs or multigraphs.") + elif not seen_nodes.isdisjoint(G_nodes_set): + raise nx.NetworkXError( + "The node sets of the graphs are not disjoint.\n" + "Use `rename` to specify prefixes for the graphs or use\n" + "disjoint_union(G1, G2, ..., GN)." + ) + + seen_nodes |= G_nodes_set + R.graph.update(G.graph) + R.add_nodes_from(G.nodes(data=True)) + R.add_edges_from( + G.edges(keys=True, data=True) if G.is_multigraph() else G.edges(data=True) + ) + + if R is None: + raise ValueError("cannot apply union_all to an empty list") + + return R + + +@nx._dispatchable(graphs="[graphs]", preserve_all_attrs=True, returns_graph=True) +def disjoint_union_all(graphs): + """Returns the disjoint union of all graphs. + + This operation forces distinct integer node labels starting with 0 + for the first graph in the list and numbering consecutively. + + Parameters + ---------- + graphs : iterable + Iterable of NetworkX graphs + + Returns + ------- + U : A graph with the same type as the first graph in list + + Raises + ------ + ValueError + If `graphs` is an empty list. + + NetworkXError + In case of mixed type graphs, like MultiGraph and Graph, or directed and undirected graphs. + + Examples + -------- + >>> G1 = nx.Graph([(1, 2), (2, 3)]) + >>> G2 = nx.Graph([(4, 5), (5, 6)]) + >>> U = nx.disjoint_union_all([G1, G2]) + >>> list(U.nodes()) + [0, 1, 2, 3, 4, 5] + >>> list(U.edges()) + [(0, 1), (1, 2), (3, 4), (4, 5)] + + Notes + ----- + For operating on mixed type graphs, they should be converted to the same type. + + Graph, edge, and node attributes are propagated to the union graph. + If a graph attribute is present in multiple graphs, then the value + from the last graph in the list with that attribute is used. + """ + + def yield_relabeled(graphs): + first_label = 0 + for G in graphs: + yield nx.convert_node_labels_to_integers(G, first_label=first_label) + first_label += len(G) + + R = union_all(yield_relabeled(graphs)) + + return R + + +@nx._dispatchable(graphs="[graphs]", preserve_all_attrs=True, returns_graph=True) +def compose_all(graphs): + """Returns the composition of all graphs. + + Composition is the simple union of the node sets and edge sets. + The node sets of the supplied graphs need not be disjoint. + + Parameters + ---------- + graphs : iterable + Iterable of NetworkX graphs + + Returns + ------- + C : A graph with the same type as the first graph in list + + Raises + ------ + ValueError + If `graphs` is an empty list. + + NetworkXError + In case of mixed type graphs, like MultiGraph and Graph, or directed and undirected graphs. + + Examples + -------- + >>> G1 = nx.Graph([(1, 2), (2, 3)]) + >>> G2 = nx.Graph([(3, 4), (5, 6)]) + >>> C = nx.compose_all([G1, G2]) + >>> list(C.nodes()) + [1, 2, 3, 4, 5, 6] + >>> list(C.edges()) + [(1, 2), (2, 3), (3, 4), (5, 6)] + + Notes + ----- + For operating on mixed type graphs, they should be converted to the same type. + + Graph, edge, and node attributes are propagated to the union graph. + If a graph attribute is present in multiple graphs, then the value + from the last graph in the list with that attribute is used. + """ + R = None + + # add graph attributes, H attributes take precedent over G attributes + for i, G in enumerate(graphs): + if i == 0: + # create new graph + R = G.__class__() + elif G.is_directed() != R.is_directed(): + raise nx.NetworkXError("All graphs must be directed or undirected.") + elif G.is_multigraph() != R.is_multigraph(): + raise nx.NetworkXError("All graphs must be graphs or multigraphs.") + + R.graph.update(G.graph) + R.add_nodes_from(G.nodes(data=True)) + R.add_edges_from( + G.edges(keys=True, data=True) if G.is_multigraph() else G.edges(data=True) + ) + + if R is None: + raise ValueError("cannot apply compose_all to an empty list") + + return R + + +@nx._dispatchable(graphs="[graphs]", returns_graph=True) +def intersection_all(graphs): + """Returns a new graph that contains only the nodes and the edges that exist in + all graphs. + + Parameters + ---------- + graphs : iterable + Iterable of NetworkX graphs + + Returns + ------- + R : A new graph with the same type as the first graph in list + + Raises + ------ + ValueError + If `graphs` is an empty list. + + NetworkXError + In case of mixed type graphs, like MultiGraph and Graph, or directed and undirected graphs. + + Notes + ----- + For operating on mixed type graphs, they should be converted to the same type. + + Attributes from the graph, nodes, and edges are not copied to the new + graph. + + The resulting graph can be updated with attributes if desired. + For example, code which adds the minimum attribute for each node across all + graphs could work:: + + >>> g = nx.Graph() + >>> g.add_node(0, capacity=4) + >>> g.add_node(1, capacity=3) + >>> g.add_edge(0, 1) + + >>> h = g.copy() + >>> h.nodes[0]["capacity"] = 2 + + >>> gh = nx.intersection_all([g, h]) + + >>> new_node_attr = { + ... n: min(*(anyG.nodes[n].get("capacity", float("inf")) for anyG in [g, h])) + ... for n in gh + ... } + >>> nx.set_node_attributes(gh, new_node_attr, "new_capacity") + >>> gh.nodes(data=True) + NodeDataView({0: {'new_capacity': 2}, 1: {'new_capacity': 3}}) + + Examples + -------- + >>> G1 = nx.Graph([(1, 2), (2, 3)]) + >>> G2 = nx.Graph([(2, 3), (3, 4)]) + >>> R = nx.intersection_all([G1, G2]) + >>> list(R.nodes()) + [2, 3] + >>> list(R.edges()) + [(2, 3)] + + """ + R = None + + for i, G in enumerate(graphs): + G_nodes_set = set(G.nodes) + G_edges_set = set(G.edges) + if not G.is_directed(): + if G.is_multigraph(): + G_edges_set.update((v, u, k) for u, v, k in list(G_edges_set)) + else: + G_edges_set.update((v, u) for u, v in list(G_edges_set)) + if i == 0: + # create new graph + R = G.__class__() + node_intersection = G_nodes_set + edge_intersection = G_edges_set + elif G.is_directed() != R.is_directed(): + raise nx.NetworkXError("All graphs must be directed or undirected.") + elif G.is_multigraph() != R.is_multigraph(): + raise nx.NetworkXError("All graphs must be graphs or multigraphs.") + else: + node_intersection &= G_nodes_set + edge_intersection &= G_edges_set + + if R is None: + raise ValueError("cannot apply intersection_all to an empty list") + + R.add_nodes_from(node_intersection) + R.add_edges_from(edge_intersection) + + return R diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/operators/binary.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/operators/binary.py new file mode 100644 index 0000000..c121292 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/operators/binary.py @@ -0,0 +1,468 @@ +""" +Operations on graphs including union, intersection, difference. +""" + +import networkx as nx + +__all__ = [ + "union", + "compose", + "disjoint_union", + "intersection", + "difference", + "symmetric_difference", + "full_join", +] +_G_H = {"G": 0, "H": 1} + + +@nx._dispatchable(graphs=_G_H, preserve_all_attrs=True, returns_graph=True) +def union(G, H, rename=()): + """Combine graphs G and H. The names of nodes must be unique. + + A name collision between the graphs will raise an exception. + + A renaming facility is provided to avoid name collisions. + + + Parameters + ---------- + G, H : graph + A NetworkX graph + + rename : iterable , optional + Node names of G and H can be changed by specifying the tuple + rename=('G-','H-') (for example). Node "u" in G is then renamed + "G-u" and "v" in H is renamed "H-v". + + Returns + ------- + U : A union graph with the same type as G. + + See Also + -------- + compose + :func:`~networkx.Graph.update` + disjoint_union + + Notes + ----- + To combine graphs that have common nodes, consider compose(G, H) + or the method, Graph.update(). + + disjoint_union() is similar to union() except that it avoids name clashes + by relabeling the nodes with sequential integers. + + Edge and node attributes are propagated from G and H to the union graph. + Graph attributes are also propagated, but if they are present in both G and H, + then the value from H is used. + + Examples + -------- + >>> from pprint import pprint + >>> G = nx.Graph([(0, 1), (0, 2), (1, 2)]) + >>> H = nx.Graph([(0, 1), (0, 3), (1, 3), (1, 2)]) + >>> U = nx.union(G, H, rename=("G", "H")) + >>> U.nodes + NodeView(('G0', 'G1', 'G2', 'H0', 'H1', 'H3', 'H2')) + >>> edgelist = list(U.edges) + >>> pprint(edgelist) + [('G0', 'G1'), + ('G0', 'G2'), + ('G1', 'G2'), + ('H0', 'H1'), + ('H0', 'H3'), + ('H1', 'H3'), + ('H1', 'H2')] + + + """ + return nx.union_all([G, H], rename) + + +@nx._dispatchable(graphs=_G_H, preserve_all_attrs=True, returns_graph=True) +def disjoint_union(G, H): + """Combine graphs G and H. The nodes are assumed to be unique (disjoint). + + This algorithm automatically relabels nodes to avoid name collisions. + + Parameters + ---------- + G,H : graph + A NetworkX graph + + Returns + ------- + U : A union graph with the same type as G. + + See Also + -------- + union + compose + :func:`~networkx.Graph.update` + + Notes + ----- + A new graph is created, of the same class as G. It is recommended + that G and H be either both directed or both undirected. + + The nodes of G are relabeled 0 to len(G)-1, and the nodes of H are + relabeled len(G) to len(G)+len(H)-1. + + Renumbering forces G and H to be disjoint, so no exception is ever raised for a name collision. + To preserve the check for common nodes, use union(). + + Edge and node attributes are propagated from G and H to the union graph. + Graph attributes are also propagated, but if they are present in both G and H, + then the value from H is used. + + To combine graphs that have common nodes, consider compose(G, H) + or the method, Graph.update(). + + Examples + -------- + >>> G = nx.Graph([(0, 1), (0, 2), (1, 2)]) + >>> H = nx.Graph([(0, 3), (1, 2), (2, 3)]) + >>> G.nodes[0]["key1"] = 5 + >>> H.nodes[0]["key2"] = 10 + >>> U = nx.disjoint_union(G, H) + >>> U.nodes(data=True) + NodeDataView({0: {'key1': 5}, 1: {}, 2: {}, 3: {'key2': 10}, 4: {}, 5: {}, 6: {}}) + >>> U.edges + EdgeView([(0, 1), (0, 2), (1, 2), (3, 4), (4, 6), (5, 6)]) + """ + return nx.disjoint_union_all([G, H]) + + +@nx._dispatchable(graphs=_G_H, returns_graph=True) +def intersection(G, H): + """Returns a new graph that contains only the nodes and the edges that exist in + both G and H. + + Parameters + ---------- + G,H : graph + A NetworkX graph. G and H can have different node sets but must be both graphs or both multigraphs. + + Raises + ------ + NetworkXError + If one is a MultiGraph and the other one is a graph. + + Returns + ------- + GH : A new graph with the same type as G. + + Notes + ----- + Attributes from the graph, nodes, and edges are not copied to the new + graph. If you want a new graph of the intersection of G and H + with the attributes (including edge data) from G use remove_nodes_from() + as follows + + >>> G = nx.path_graph(3) + >>> H = nx.path_graph(5) + >>> R = G.copy() + >>> R.remove_nodes_from(n for n in G if n not in H) + >>> R.remove_edges_from(e for e in G.edges if e not in H.edges) + + Examples + -------- + >>> G = nx.Graph([(0, 1), (0, 2), (1, 2)]) + >>> H = nx.Graph([(0, 3), (1, 2), (2, 3)]) + >>> R = nx.intersection(G, H) + >>> R.nodes + NodeView((0, 1, 2)) + >>> R.edges + EdgeView([(1, 2)]) + """ + return nx.intersection_all([G, H]) + + +@nx._dispatchable(graphs=_G_H, returns_graph=True) +def difference(G, H): + """Returns a new graph that contains the edges that exist in G but not in H. + + The node sets of H and G must be the same. + + Parameters + ---------- + G,H : graph + A NetworkX graph. G and H must have the same node sets. + + Returns + ------- + D : A new graph with the same type as G. + + Notes + ----- + Attributes from the graph, nodes, and edges are not copied to the new + graph. If you want a new graph of the difference of G and H with + the attributes (including edge data) from G use remove_nodes_from() + as follows: + + >>> G = nx.path_graph(3) + >>> H = nx.path_graph(5) + >>> R = G.copy() + >>> R.remove_nodes_from(n for n in G if n in H) + + Examples + -------- + >>> G = nx.Graph([(0, 1), (0, 2), (1, 2), (1, 3)]) + >>> H = nx.Graph([(0, 1), (1, 2), (0, 3)]) + >>> R = nx.difference(G, H) + >>> R.nodes + NodeView((0, 1, 2, 3)) + >>> R.edges + EdgeView([(0, 2), (1, 3)]) + """ + # create new graph + if not G.is_multigraph() == H.is_multigraph(): + raise nx.NetworkXError("G and H must both be graphs or multigraphs.") + R = nx.create_empty_copy(G, with_data=False) + + if set(G) != set(H): + raise nx.NetworkXError("Node sets of graphs not equal") + + if G.is_multigraph(): + edges = G.edges(keys=True) + else: + edges = G.edges() + for e in edges: + if not H.has_edge(*e): + R.add_edge(*e) + return R + + +@nx._dispatchable(graphs=_G_H, returns_graph=True) +def symmetric_difference(G, H): + """Returns new graph with edges that exist in either G or H but not both. + + The node sets of H and G must be the same. + + Parameters + ---------- + G,H : graph + A NetworkX graph. G and H must have the same node sets. + + Returns + ------- + D : A new graph with the same type as G. + + Notes + ----- + Attributes from the graph, nodes, and edges are not copied to the new + graph. + + Examples + -------- + >>> G = nx.Graph([(0, 1), (0, 2), (1, 2), (1, 3)]) + >>> H = nx.Graph([(0, 1), (1, 2), (0, 3)]) + >>> R = nx.symmetric_difference(G, H) + >>> R.nodes + NodeView((0, 1, 2, 3)) + >>> R.edges + EdgeView([(0, 2), (0, 3), (1, 3)]) + """ + # create new graph + if not G.is_multigraph() == H.is_multigraph(): + raise nx.NetworkXError("G and H must both be graphs or multigraphs.") + R = nx.create_empty_copy(G, with_data=False) + + if set(G) != set(H): + raise nx.NetworkXError("Node sets of graphs not equal") + + gnodes = set(G) # set of nodes in G + hnodes = set(H) # set of nodes in H + nodes = gnodes.symmetric_difference(hnodes) + R.add_nodes_from(nodes) + + if G.is_multigraph(): + edges = G.edges(keys=True) + else: + edges = G.edges() + # we could copy the data here but then this function doesn't + # match intersection and difference + for e in edges: + if not H.has_edge(*e): + R.add_edge(*e) + + if H.is_multigraph(): + edges = H.edges(keys=True) + else: + edges = H.edges() + for e in edges: + if not G.has_edge(*e): + R.add_edge(*e) + return R + + +@nx._dispatchable(graphs=_G_H, preserve_all_attrs=True, returns_graph=True) +def compose(G, H): + """Compose graph G with H by combining nodes and edges into a single graph. + + The node sets and edges sets do not need to be disjoint. + + Composing preserves the attributes of nodes and edges. + Attribute values from H take precedent over attribute values from G. + + Parameters + ---------- + G, H : graph + A NetworkX graph + + Returns + ------- + C: A new graph with the same type as G + + See Also + -------- + :func:`~networkx.Graph.update` + union + disjoint_union + + Notes + ----- + It is recommended that G and H be either both directed or both undirected. + + For MultiGraphs, the edges are identified by incident nodes AND edge-key. + This can cause surprises (i.e., edge `(1, 2)` may or may not be the same + in two graphs) if you use MultiGraph without keeping track of edge keys. + + If combining the attributes of common nodes is not desired, consider union(), + which raises an exception for name collisions. + + Examples + -------- + >>> G = nx.Graph([(0, 1), (0, 2)]) + >>> H = nx.Graph([(0, 1), (1, 2)]) + >>> R = nx.compose(G, H) + >>> R.nodes + NodeView((0, 1, 2)) + >>> R.edges + EdgeView([(0, 1), (0, 2), (1, 2)]) + + By default, the attributes from `H` take precedent over attributes from `G`. + If you prefer another way of combining attributes, you can update them after the compose operation: + + >>> G = nx.Graph([(0, 1, {"weight": 2.0}), (3, 0, {"weight": 100.0})]) + >>> H = nx.Graph([(0, 1, {"weight": 10.0}), (1, 2, {"weight": -1.0})]) + >>> nx.set_node_attributes(G, {0: "dark", 1: "light", 3: "black"}, name="color") + >>> nx.set_node_attributes(H, {0: "green", 1: "orange", 2: "yellow"}, name="color") + >>> GcomposeH = nx.compose(G, H) + + Normally, color attribute values of nodes of GcomposeH come from H. We can workaround this as follows: + + >>> node_data = { + ... n: G.nodes[n]["color"] + " " + H.nodes[n]["color"] + ... for n in G.nodes & H.nodes + ... } + >>> nx.set_node_attributes(GcomposeH, node_data, "color") + >>> print(GcomposeH.nodes[0]["color"]) + dark green + + >>> print(GcomposeH.nodes[3]["color"]) + black + + Similarly, we can update edge attributes after the compose operation in a way we prefer: + + >>> edge_data = { + ... e: G.edges[e]["weight"] * H.edges[e]["weight"] for e in G.edges & H.edges + ... } + >>> nx.set_edge_attributes(GcomposeH, edge_data, "weight") + >>> print(GcomposeH.edges[(0, 1)]["weight"]) + 20.0 + + >>> print(GcomposeH.edges[(3, 0)]["weight"]) + 100.0 + """ + return nx.compose_all([G, H]) + + +@nx._dispatchable(graphs=_G_H, preserve_all_attrs=True, returns_graph=True) +def full_join(G, H, rename=(None, None)): + """Returns the full join of graphs G and H. + + Full join is the union of G and H in which all edges between + G and H are added. + The node sets of G and H must be disjoint, + otherwise an exception is raised. + + Parameters + ---------- + G, H : graph + A NetworkX graph + + rename : tuple , default=(None, None) + Node names of G and H can be changed by specifying the tuple + rename=('G-','H-') (for example). Node "u" in G is then renamed + "G-u" and "v" in H is renamed "H-v". + + Returns + ------- + U : The full join graph with the same type as G. + + Notes + ----- + It is recommended that G and H be either both directed or both undirected. + + If G is directed, then edges from G to H are added as well as from H to G. + + Note that full_join() does not produce parallel edges for MultiGraphs. + + The full join operation of graphs G and H is the same as getting + their complement, performing a disjoint union, and finally getting + the complement of the resulting graph. + + Graph, edge, and node attributes are propagated from G and H + to the union graph. If a graph attribute is present in both + G and H the value from H is used. + + Examples + -------- + >>> from pprint import pprint + >>> G = nx.Graph([(0, 1), (0, 2)]) + >>> H = nx.Graph([(3, 4)]) + >>> R = nx.full_join(G, H, rename=("G", "H")) + >>> R.nodes + NodeView(('G0', 'G1', 'G2', 'H3', 'H4')) + >>> edgelist = list(R.edges) + >>> pprint(edgelist) + [('G0', 'G1'), + ('G0', 'G2'), + ('G0', 'H3'), + ('G0', 'H4'), + ('G1', 'H3'), + ('G1', 'H4'), + ('G2', 'H3'), + ('G2', 'H4'), + ('H3', 'H4')] + + See Also + -------- + union + disjoint_union + """ + R = union(G, H, rename) + + def add_prefix(graph, prefix): + if prefix is None: + return graph + + def label(x): + return f"{prefix}{x}" + + return nx.relabel_nodes(graph, label) + + G = add_prefix(G, rename[0]) + H = add_prefix(H, rename[1]) + + for i in G: + for j in H: + R.add_edge(i, j) + if R.is_directed(): + for i in H: + for j in G: + R.add_edge(i, j) + + return R diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/operators/product.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/operators/product.py new file mode 100644 index 0000000..28ca78b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/operators/product.py @@ -0,0 +1,633 @@ +""" +Graph products. +""" + +from itertools import product + +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = [ + "tensor_product", + "cartesian_product", + "lexicographic_product", + "strong_product", + "power", + "rooted_product", + "corona_product", + "modular_product", +] +_G_H = {"G": 0, "H": 1} + + +def _dict_product(d1, d2): + return {k: (d1.get(k), d2.get(k)) for k in set(d1) | set(d2)} + + +# Generators for producing graph products +def _node_product(G, H): + for u, v in product(G, H): + yield ((u, v), _dict_product(G.nodes[u], H.nodes[v])) + + +def _directed_edges_cross_edges(G, H): + if not G.is_multigraph() and not H.is_multigraph(): + for u, v, c in G.edges(data=True): + for x, y, d in H.edges(data=True): + yield (u, x), (v, y), _dict_product(c, d) + if not G.is_multigraph() and H.is_multigraph(): + for u, v, c in G.edges(data=True): + for x, y, k, d in H.edges(data=True, keys=True): + yield (u, x), (v, y), k, _dict_product(c, d) + if G.is_multigraph() and not H.is_multigraph(): + for u, v, k, c in G.edges(data=True, keys=True): + for x, y, d in H.edges(data=True): + yield (u, x), (v, y), k, _dict_product(c, d) + if G.is_multigraph() and H.is_multigraph(): + for u, v, j, c in G.edges(data=True, keys=True): + for x, y, k, d in H.edges(data=True, keys=True): + yield (u, x), (v, y), (j, k), _dict_product(c, d) + + +def _undirected_edges_cross_edges(G, H): + if not G.is_multigraph() and not H.is_multigraph(): + for u, v, c in G.edges(data=True): + for x, y, d in H.edges(data=True): + yield (v, x), (u, y), _dict_product(c, d) + if not G.is_multigraph() and H.is_multigraph(): + for u, v, c in G.edges(data=True): + for x, y, k, d in H.edges(data=True, keys=True): + yield (v, x), (u, y), k, _dict_product(c, d) + if G.is_multigraph() and not H.is_multigraph(): + for u, v, k, c in G.edges(data=True, keys=True): + for x, y, d in H.edges(data=True): + yield (v, x), (u, y), k, _dict_product(c, d) + if G.is_multigraph() and H.is_multigraph(): + for u, v, j, c in G.edges(data=True, keys=True): + for x, y, k, d in H.edges(data=True, keys=True): + yield (v, x), (u, y), (j, k), _dict_product(c, d) + + +def _edges_cross_nodes(G, H): + if G.is_multigraph(): + for u, v, k, d in G.edges(data=True, keys=True): + for x in H: + yield (u, x), (v, x), k, d + else: + for u, v, d in G.edges(data=True): + for x in H: + if H.is_multigraph(): + yield (u, x), (v, x), None, d + else: + yield (u, x), (v, x), d + + +def _nodes_cross_edges(G, H): + if H.is_multigraph(): + for x in G: + for u, v, k, d in H.edges(data=True, keys=True): + yield (x, u), (x, v), k, d + else: + for x in G: + for u, v, d in H.edges(data=True): + if G.is_multigraph(): + yield (x, u), (x, v), None, d + else: + yield (x, u), (x, v), d + + +def _edges_cross_nodes_and_nodes(G, H): + if G.is_multigraph(): + for u, v, k, d in G.edges(data=True, keys=True): + for x in H: + for y in H: + yield (u, x), (v, y), k, d + else: + for u, v, d in G.edges(data=True): + for x in H: + for y in H: + if H.is_multigraph(): + yield (u, x), (v, y), None, d + else: + yield (u, x), (v, y), d + + +def _init_product_graph(G, H): + if G.is_directed() != H.is_directed(): + msg = "G and H must be both directed or both undirected" + raise nx.NetworkXError(msg) + if G.is_multigraph() or H.is_multigraph(): + GH = nx.MultiGraph() + else: + GH = nx.Graph() + if G.is_directed(): + GH = GH.to_directed() + return GH + + +@nx._dispatchable(graphs=_G_H, preserve_node_attrs=True, returns_graph=True) +def tensor_product(G, H): + r"""Returns the tensor product of G and H. + + The tensor product $P$ of the graphs $G$ and $H$ has a node set that + is the Cartesian product of the node sets, $V(P)=V(G) \times V(H)$. + $P$ has an edge $((u,v), (x,y))$ if and only if $(u,x)$ is an edge in $G$ + and $(v,y)$ is an edge in $H$. + + Tensor product is sometimes also referred to as the categorical product, + direct product, cardinal product or conjunction. + + + Parameters + ---------- + G, H: graphs + Networkx graphs. + + Returns + ------- + P: NetworkX graph + The tensor product of G and H. P will be a multi-graph if either G + or H is a multi-graph, will be a directed if G and H are directed, + and undirected if G and H are undirected. + + Raises + ------ + NetworkXError + If G and H are not both directed or both undirected. + + Notes + ----- + Node attributes in P are two-tuple of the G and H node attributes. + Missing attributes are assigned None. + + Examples + -------- + >>> G = nx.Graph() + >>> H = nx.Graph() + >>> G.add_node(0, a1=True) + >>> H.add_node("a", a2="Spam") + >>> P = nx.tensor_product(G, H) + >>> list(P) + [(0, 'a')] + + Edge attributes and edge keys (for multigraphs) are also copied to the + new product graph + """ + GH = _init_product_graph(G, H) + GH.add_nodes_from(_node_product(G, H)) + GH.add_edges_from(_directed_edges_cross_edges(G, H)) + if not GH.is_directed(): + GH.add_edges_from(_undirected_edges_cross_edges(G, H)) + return GH + + +@nx._dispatchable(graphs=_G_H, preserve_node_attrs=True, returns_graph=True) +def cartesian_product(G, H): + r"""Returns the Cartesian product of G and H. + + The Cartesian product $P$ of the graphs $G$ and $H$ has a node set that + is the Cartesian product of the node sets, $V(P)=V(G) \times V(H)$. + $P$ has an edge $((u,v),(x,y))$ if and only if either $u$ is equal to $x$ + and both $v$ and $y$ are adjacent in $H$ or if $v$ is equal to $y$ and + both $u$ and $x$ are adjacent in $G$. + + Parameters + ---------- + G, H: graphs + Networkx graphs. + + Returns + ------- + P: NetworkX graph + The Cartesian product of G and H. P will be a multi-graph if either G + or H is a multi-graph. Will be a directed if G and H are directed, + and undirected if G and H are undirected. + + Raises + ------ + NetworkXError + If G and H are not both directed or both undirected. + + Notes + ----- + Node attributes in P are two-tuple of the G and H node attributes. + Missing attributes are assigned None. + + Examples + -------- + >>> G = nx.Graph() + >>> H = nx.Graph() + >>> G.add_node(0, a1=True) + >>> H.add_node("a", a2="Spam") + >>> P = nx.cartesian_product(G, H) + >>> list(P) + [(0, 'a')] + + Edge attributes and edge keys (for multigraphs) are also copied to the + new product graph + """ + GH = _init_product_graph(G, H) + GH.add_nodes_from(_node_product(G, H)) + GH.add_edges_from(_edges_cross_nodes(G, H)) + GH.add_edges_from(_nodes_cross_edges(G, H)) + return GH + + +@nx._dispatchable(graphs=_G_H, preserve_node_attrs=True, returns_graph=True) +def lexicographic_product(G, H): + r"""Returns the lexicographic product of G and H. + + The lexicographical product $P$ of the graphs $G$ and $H$ has a node set + that is the Cartesian product of the node sets, $V(P)=V(G) \times V(H)$. + $P$ has an edge $((u,v), (x,y))$ if and only if $(u,v)$ is an edge in $G$ + or $u==v$ and $(x,y)$ is an edge in $H$. + + Parameters + ---------- + G, H: graphs + Networkx graphs. + + Returns + ------- + P: NetworkX graph + The Cartesian product of G and H. P will be a multi-graph if either G + or H is a multi-graph. Will be a directed if G and H are directed, + and undirected if G and H are undirected. + + Raises + ------ + NetworkXError + If G and H are not both directed or both undirected. + + Notes + ----- + Node attributes in P are two-tuple of the G and H node attributes. + Missing attributes are assigned None. + + Examples + -------- + >>> G = nx.Graph() + >>> H = nx.Graph() + >>> G.add_node(0, a1=True) + >>> H.add_node("a", a2="Spam") + >>> P = nx.lexicographic_product(G, H) + >>> list(P) + [(0, 'a')] + + Edge attributes and edge keys (for multigraphs) are also copied to the + new product graph + """ + GH = _init_product_graph(G, H) + GH.add_nodes_from(_node_product(G, H)) + # Edges in G regardless of H designation + GH.add_edges_from(_edges_cross_nodes_and_nodes(G, H)) + # For each x in G, only if there is an edge in H + GH.add_edges_from(_nodes_cross_edges(G, H)) + return GH + + +@nx._dispatchable(graphs=_G_H, preserve_node_attrs=True, returns_graph=True) +def strong_product(G, H): + r"""Returns the strong product of G and H. + + The strong product $P$ of the graphs $G$ and $H$ has a node set that + is the Cartesian product of the node sets, $V(P)=V(G) \times V(H)$. + $P$ has an edge $((u,x), (v,y))$ if any of the following conditions + are met: + + - $u=v$ and $(x,y)$ is an edge in $H$ + - $x=y$ and $(u,v)$ is an edge in $G$ + - $(u,v)$ is an edge in $G$ and $(x,y)$ is an edge in $H$ + + Parameters + ---------- + G, H: graphs + Networkx graphs. + + Returns + ------- + P: NetworkX graph + The Cartesian product of G and H. P will be a multi-graph if either G + or H is a multi-graph. Will be a directed if G and H are directed, + and undirected if G and H are undirected. + + Raises + ------ + NetworkXError + If G and H are not both directed or both undirected. + + Notes + ----- + Node attributes in P are two-tuple of the G and H node attributes. + Missing attributes are assigned None. + + Examples + -------- + >>> G = nx.Graph() + >>> H = nx.Graph() + >>> G.add_node(0, a1=True) + >>> H.add_node("a", a2="Spam") + >>> P = nx.strong_product(G, H) + >>> list(P) + [(0, 'a')] + + Edge attributes and edge keys (for multigraphs) are also copied to the + new product graph + """ + GH = _init_product_graph(G, H) + GH.add_nodes_from(_node_product(G, H)) + GH.add_edges_from(_nodes_cross_edges(G, H)) + GH.add_edges_from(_edges_cross_nodes(G, H)) + GH.add_edges_from(_directed_edges_cross_edges(G, H)) + if not GH.is_directed(): + GH.add_edges_from(_undirected_edges_cross_edges(G, H)) + return GH + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatchable(returns_graph=True) +def power(G, k): + """Returns the specified power of a graph. + + The $k$th power of a simple graph $G$, denoted $G^k$, is a + graph on the same set of nodes in which two distinct nodes $u$ and + $v$ are adjacent in $G^k$ if and only if the shortest path + distance between $u$ and $v$ in $G$ is at most $k$. + + Parameters + ---------- + G : graph + A NetworkX simple graph object. + + k : positive integer + The power to which to raise the graph `G`. + + Returns + ------- + NetworkX simple graph + `G` to the power `k`. + + Raises + ------ + ValueError + If the exponent `k` is not positive. + + NetworkXNotImplemented + If `G` is not a simple graph. + + Examples + -------- + The number of edges will never decrease when taking successive + powers: + + >>> G = nx.path_graph(4) + >>> list(nx.power(G, 2).edges) + [(0, 1), (0, 2), (1, 2), (1, 3), (2, 3)] + >>> list(nx.power(G, 3).edges) + [(0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3)] + + The `k` th power of a cycle graph on *n* nodes is the complete graph + on *n* nodes, if `k` is at least ``n // 2``: + + >>> G = nx.cycle_graph(5) + >>> H = nx.complete_graph(5) + >>> nx.is_isomorphic(nx.power(G, 2), H) + True + >>> G = nx.cycle_graph(8) + >>> H = nx.complete_graph(8) + >>> nx.is_isomorphic(nx.power(G, 4), H) + True + + References + ---------- + .. [1] J. A. Bondy, U. S. R. Murty, *Graph Theory*. Springer, 2008. + + Notes + ----- + This definition of "power graph" comes from Exercise 3.1.6 of + *Graph Theory* by Bondy and Murty [1]_. + + """ + if k <= 0: + raise ValueError("k must be a positive integer") + H = nx.Graph() + H.add_nodes_from(G) + # update BFS code to ignore self loops. + for n in G: + seen = {} # level (number of hops) when seen in BFS + level = 1 # the current level + nextlevel = G[n] + while nextlevel: + thislevel = nextlevel # advance to next level + nextlevel = {} # and start a new list (fringe) + for v in thislevel: + if v == n: # avoid self loop + continue + if v not in seen: + seen[v] = level # set the level of vertex v + nextlevel.update(G[v]) # add neighbors of v + if k <= level: + break + level += 1 + H.add_edges_from((n, nbr) for nbr in seen) + return H + + +@not_implemented_for("multigraph") +@nx._dispatchable(graphs=_G_H, returns_graph=True) +def rooted_product(G, H, root): + """Return the rooted product of graphs G and H rooted at root in H. + + A new graph is constructed representing the rooted product of + the inputted graphs, G and H, with a root in H. + A rooted product duplicates H for each nodes in G with the root + of H corresponding to the node in G. Nodes are renamed as the direct + product of G and H. The result is a subgraph of the cartesian product. + + Parameters + ---------- + G,H : graph + A NetworkX graph + root : node + A node in H + + Returns + ------- + R : The rooted product of G and H with a specified root in H + + Notes + ----- + The nodes of R are the Cartesian Product of the nodes of G and H. + The nodes of G and H are not relabeled. + """ + if root not in H: + raise nx.NodeNotFound("root must be a vertex in H") + + R = nx.Graph() + R.add_nodes_from(product(G, H)) + + R.add_edges_from(((e[0], root), (e[1], root)) for e in G.edges()) + R.add_edges_from(((g, e[0]), (g, e[1])) for g in G for e in H.edges()) + + return R + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatchable(graphs=_G_H, returns_graph=True) +def corona_product(G, H): + r"""Returns the Corona product of G and H. + + The corona product of $G$ and $H$ is the graph $C = G \circ H$ obtained by + taking one copy of $G$, called the center graph, $|V(G)|$ copies of $H$, + called the outer graph, and making the $i$-th vertex of $G$ adjacent to + every vertex of the $i$-th copy of $H$, where $1 ≤ i ≤ |V(G)|$. + + Parameters + ---------- + G, H: NetworkX graphs + The graphs to take the carona product of. + `G` is the center graph and `H` is the outer graph + + Returns + ------- + C: NetworkX graph + The Corona product of G and H. + + Raises + ------ + NetworkXError + If G and H are not both directed or both undirected. + + Examples + -------- + >>> G = nx.cycle_graph(4) + >>> H = nx.path_graph(2) + >>> C = nx.corona_product(G, H) + >>> list(C) + [0, 1, 2, 3, (0, 0), (0, 1), (1, 0), (1, 1), (2, 0), (2, 1), (3, 0), (3, 1)] + >>> print(C) + Graph with 12 nodes and 16 edges + + References + ---------- + [1] M. Tavakoli, F. Rahbarnia, and A. R. Ashrafi, + "Studying the corona product of graphs under some graph invariants," + Transactions on Combinatorics, vol. 3, no. 3, pp. 43–49, Sep. 2014, + doi: 10.22108/toc.2014.5542. + [2] A. Faraji, "Corona Product in Graph Theory," Ali Faraji, May 11, 2021. + https://blog.alifaraji.ir/math/graph-theory/corona-product.html (accessed Dec. 07, 2021). + """ + GH = _init_product_graph(G, H) + GH.add_nodes_from(G) + GH.add_edges_from(G.edges) + + for G_node in G: + # copy nodes of H in GH, call it H_i + GH.add_nodes_from((G_node, v) for v in H) + + # copy edges of H_i based on H + GH.add_edges_from( + ((G_node, e0), (G_node, e1), d) for e0, e1, d in H.edges.data() + ) + + # creating new edges between H_i and a G's node + GH.add_edges_from((G_node, (G_node, H_node)) for H_node in H) + + return GH + + +@nx._dispatchable( + graphs=_G_H, preserve_edge_attrs=True, preserve_node_attrs=True, returns_graph=True +) +def modular_product(G, H): + r"""Returns the Modular product of G and H. + + The modular product of `G` and `H` is the graph $M = G \nabla H$, + consisting of the node set $V(M) = V(G) \times V(H)$ that is the Cartesian + product of the node sets of `G` and `H`. Further, M contains an edge ((u, v), (x, y)): + + - if u is adjacent to x in `G` and v is adjacent to y in `H`, or + - if u is not adjacent to x in `G` and v is not adjacent to y in `H`. + + More formally:: + + E(M) = {((u, v), (x, y)) | ((u, x) in E(G) and (v, y) in E(H)) or + ((u, x) not in E(G) and (v, y) not in E(H))} + + Parameters + ---------- + G, H: NetworkX graphs + The graphs to take the modular product of. + + Returns + ------- + M: NetworkX graph + The Modular product of `G` and `H`. + + Raises + ------ + NetworkXNotImplemented + If `G` is not a simple graph. + + Examples + -------- + >>> G = nx.cycle_graph(4) + >>> H = nx.path_graph(2) + >>> M = nx.modular_product(G, H) + >>> list(M) + [(0, 0), (0, 1), (1, 0), (1, 1), (2, 0), (2, 1), (3, 0), (3, 1)] + >>> print(M) + Graph with 8 nodes and 8 edges + + Notes + ----- + The *modular product* is defined in [1]_ and was first + introduced as the *weak modular product*. + + The modular product reduces the problem of counting isomorphic subgraphs + in `G` and `H` to the problem of counting cliques in M. The subgraphs of + `G` and `H` that are induced by the nodes of a clique in M are + isomorphic [2]_ [3]_. + + References + ---------- + .. [1] R. Hammack, W. Imrich, and S. Klavžar, + "Handbook of Product Graphs", CRC Press, 2011. + + .. [2] H. G. Barrow and R. M. Burstall, + "Subgraph isomorphism, matching relational structures and maximal + cliques", Information Processing Letters, vol. 4, issue 4, pp. 83-84, + 1976, https://doi.org/10.1016/0020-0190(76)90049-1. + + .. [3] V. G. Vizing, "Reduction of the problem of isomorphism and isomorphic + entrance to the task of finding the nondensity of a graph." Proc. Third + All-Union Conference on Problems of Theoretical Cybernetics. 1974. + """ + if G.is_directed() or H.is_directed(): + raise nx.NetworkXNotImplemented( + "Modular product not implemented for directed graphs" + ) + if G.is_multigraph() or H.is_multigraph(): + raise nx.NetworkXNotImplemented( + "Modular product not implemented for multigraphs" + ) + + GH = _init_product_graph(G, H) + GH.add_nodes_from(_node_product(G, H)) + + for u, v, c in G.edges(data=True): + for x, y, d in H.edges(data=True): + GH.add_edge((u, x), (v, y), **_dict_product(c, d)) + GH.add_edge((v, x), (u, y), **_dict_product(c, d)) + + G = nx.complement(G) + H = nx.complement(H) + + for u, v, c in G.edges(data=True): + for x, y, d in H.edges(data=True): + GH.add_edge((u, x), (v, y), **_dict_product(c, d)) + GH.add_edge((v, x), (u, y), **_dict_product(c, d)) + + return GH diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/operators/tests/__init__.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/operators/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/operators/tests/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/operators/tests/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..4cd30ce Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/operators/tests/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/operators/tests/__pycache__/test_all.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/operators/tests/__pycache__/test_all.cpython-312.pyc new file mode 100644 index 0000000..9737b35 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/operators/tests/__pycache__/test_all.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/operators/tests/__pycache__/test_binary.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/operators/tests/__pycache__/test_binary.cpython-312.pyc new file mode 100644 index 0000000..1b30347 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/operators/tests/__pycache__/test_binary.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/operators/tests/__pycache__/test_product.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/operators/tests/__pycache__/test_product.cpython-312.pyc new file mode 100644 index 0000000..22eba41 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/operators/tests/__pycache__/test_product.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/operators/tests/__pycache__/test_unary.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/operators/tests/__pycache__/test_unary.cpython-312.pyc new file mode 100644 index 0000000..2497925 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/operators/tests/__pycache__/test_unary.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/operators/tests/test_all.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/operators/tests/test_all.py new file mode 100644 index 0000000..8ec29c1 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/operators/tests/test_all.py @@ -0,0 +1,328 @@ +import pytest + +import networkx as nx +from networkx.utils import edges_equal + + +def test_union_all_attributes(): + g = nx.Graph() + g.add_node(0, x=4) + g.add_node(1, x=5) + g.add_edge(0, 1, size=5) + g.graph["name"] = "g" + + h = g.copy() + h.graph["name"] = "h" + h.graph["attr"] = "attr" + h.nodes[0]["x"] = 7 + + j = g.copy() + j.graph["name"] = "j" + j.graph["attr"] = "attr" + j.nodes[0]["x"] = 7 + + ghj = nx.union_all([g, h, j], rename=("g", "h", "j")) + assert set(ghj.nodes()) == {"h0", "h1", "g0", "g1", "j0", "j1"} + for n in ghj: + graph, node = n + assert ghj.nodes[n] == eval(graph).nodes[int(node)] + + assert ghj.graph["attr"] == "attr" + assert ghj.graph["name"] == "j" # j graph attributes take precedent + + +def test_intersection_all(): + G = nx.Graph() + H = nx.Graph() + R = nx.Graph(awesome=True) + G.add_nodes_from([1, 2, 3, 4]) + G.add_edge(1, 2) + G.add_edge(2, 3) + H.add_nodes_from([1, 2, 3, 4]) + H.add_edge(2, 3) + H.add_edge(3, 4) + R.add_nodes_from([1, 2, 3, 4]) + R.add_edge(2, 3) + R.add_edge(4, 1) + I = nx.intersection_all([G, H, R]) + assert set(I.nodes()) == {1, 2, 3, 4} + assert sorted(I.edges()) == [(2, 3)] + assert I.graph == {} + + +def test_intersection_all_different_node_sets(): + G = nx.Graph() + H = nx.Graph() + R = nx.Graph() + G.add_nodes_from([1, 2, 3, 4, 6, 7]) + G.add_edge(1, 2) + G.add_edge(2, 3) + G.add_edge(6, 7) + H.add_nodes_from([1, 2, 3, 4]) + H.add_edge(2, 3) + H.add_edge(3, 4) + R.add_nodes_from([1, 2, 3, 4, 8, 9]) + R.add_edge(2, 3) + R.add_edge(4, 1) + R.add_edge(8, 9) + I = nx.intersection_all([G, H, R]) + assert set(I.nodes()) == {1, 2, 3, 4} + assert sorted(I.edges()) == [(2, 3)] + + +def test_intersection_all_attributes(): + g = nx.Graph() + g.add_node(0, x=4) + g.add_node(1, x=5) + g.add_edge(0, 1, size=5) + g.graph["name"] = "g" + + h = g.copy() + h.graph["name"] = "h" + h.graph["attr"] = "attr" + h.nodes[0]["x"] = 7 + + gh = nx.intersection_all([g, h]) + assert set(gh.nodes()) == set(g.nodes()) + assert set(gh.nodes()) == set(h.nodes()) + assert sorted(gh.edges()) == sorted(g.edges()) + + +def test_intersection_all_attributes_different_node_sets(): + g = nx.Graph() + g.add_node(0, x=4) + g.add_node(1, x=5) + g.add_edge(0, 1, size=5) + g.graph["name"] = "g" + + h = g.copy() + g.add_node(2) + h.graph["name"] = "h" + h.graph["attr"] = "attr" + h.nodes[0]["x"] = 7 + + gh = nx.intersection_all([g, h]) + assert set(gh.nodes()) == set(h.nodes()) + assert sorted(gh.edges()) == sorted(g.edges()) + + +def test_intersection_all_multigraph_attributes(): + g = nx.MultiGraph() + g.add_edge(0, 1, key=0) + g.add_edge(0, 1, key=1) + g.add_edge(0, 1, key=2) + h = nx.MultiGraph() + h.add_edge(0, 1, key=0) + h.add_edge(0, 1, key=3) + gh = nx.intersection_all([g, h]) + assert set(gh.nodes()) == set(g.nodes()) + assert set(gh.nodes()) == set(h.nodes()) + assert sorted(gh.edges()) == [(0, 1)] + assert sorted(gh.edges(keys=True)) == [(0, 1, 0)] + + +def test_intersection_all_multigraph_attributes_different_node_sets(): + g = nx.MultiGraph() + g.add_edge(0, 1, key=0) + g.add_edge(0, 1, key=1) + g.add_edge(0, 1, key=2) + g.add_edge(1, 2, key=1) + g.add_edge(1, 2, key=2) + h = nx.MultiGraph() + h.add_edge(0, 1, key=0) + h.add_edge(0, 1, key=2) + h.add_edge(0, 1, key=3) + gh = nx.intersection_all([g, h]) + assert set(gh.nodes()) == set(h.nodes()) + assert sorted(gh.edges()) == [(0, 1), (0, 1)] + assert sorted(gh.edges(keys=True)) == [(0, 1, 0), (0, 1, 2)] + + +def test_intersection_all_digraph(): + g = nx.DiGraph() + g.add_edges_from([(1, 2), (2, 3)]) + h = nx.DiGraph() + h.add_edges_from([(2, 1), (2, 3)]) + gh = nx.intersection_all([g, h]) + assert sorted(gh.edges()) == [(2, 3)] + + +def test_union_all_and_compose_all(): + K3 = nx.complete_graph(3) + P3 = nx.path_graph(3) + + G1 = nx.DiGraph() + G1.add_edge("A", "B") + G1.add_edge("A", "C") + G1.add_edge("A", "D") + G2 = nx.DiGraph() + G2.add_edge("1", "2") + G2.add_edge("1", "3") + G2.add_edge("1", "4") + + G = nx.union_all([G1, G2]) + H = nx.compose_all([G1, G2]) + assert edges_equal(G.edges(), H.edges()) + assert not G.has_edge("A", "1") + pytest.raises(nx.NetworkXError, nx.union, K3, P3) + H1 = nx.union_all([H, G1], rename=("H", "G1")) + assert sorted(H1.nodes()) == [ + "G1A", + "G1B", + "G1C", + "G1D", + "H1", + "H2", + "H3", + "H4", + "HA", + "HB", + "HC", + "HD", + ] + + H2 = nx.union_all([H, G2], rename=("H", "")) + assert sorted(H2.nodes()) == [ + "1", + "2", + "3", + "4", + "H1", + "H2", + "H3", + "H4", + "HA", + "HB", + "HC", + "HD", + ] + + assert not H1.has_edge("NB", "NA") + + G = nx.compose_all([G, G]) + assert edges_equal(G.edges(), H.edges()) + + G2 = nx.union_all([G2, G2], rename=("", "copy")) + assert sorted(G2.nodes()) == [ + "1", + "2", + "3", + "4", + "copy1", + "copy2", + "copy3", + "copy4", + ] + + assert sorted(G2.neighbors("copy4")) == [] + assert sorted(G2.neighbors("copy1")) == ["copy2", "copy3", "copy4"] + assert len(G) == 8 + assert nx.number_of_edges(G) == 6 + + E = nx.disjoint_union_all([G, G]) + assert len(E) == 16 + assert nx.number_of_edges(E) == 12 + + E = nx.disjoint_union_all([G1, G2]) + assert sorted(E.nodes()) == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11] + + G1 = nx.DiGraph() + G1.add_edge("A", "B") + G2 = nx.DiGraph() + G2.add_edge(1, 2) + G3 = nx.DiGraph() + G3.add_edge(11, 22) + G4 = nx.union_all([G1, G2, G3], rename=("G1", "G2", "G3")) + assert sorted(G4.nodes()) == ["G1A", "G1B", "G21", "G22", "G311", "G322"] + + +def test_union_all_multigraph(): + G = nx.MultiGraph() + G.add_edge(1, 2, key=0) + G.add_edge(1, 2, key=1) + H = nx.MultiGraph() + H.add_edge(3, 4, key=0) + H.add_edge(3, 4, key=1) + GH = nx.union_all([G, H]) + assert set(GH) == set(G) | set(H) + assert set(GH.edges(keys=True)) == set(G.edges(keys=True)) | set(H.edges(keys=True)) + + +def test_input_output(): + l = [nx.Graph([(1, 2)]), nx.Graph([(3, 4)], awesome=True)] + U = nx.disjoint_union_all(l) + assert len(l) == 2 + assert U.graph["awesome"] + C = nx.compose_all(l) + assert len(l) == 2 + l = [nx.Graph([(1, 2)]), nx.Graph([(1, 2)])] + R = nx.intersection_all(l) + assert len(l) == 2 + + +def test_mixed_type_union(): + with pytest.raises(nx.NetworkXError): + G = nx.Graph() + H = nx.MultiGraph() + I = nx.Graph() + U = nx.union_all([G, H, I]) + with pytest.raises(nx.NetworkXError): + X = nx.Graph() + Y = nx.DiGraph() + XY = nx.union_all([X, Y]) + + +def test_mixed_type_disjoint_union(): + with pytest.raises(nx.NetworkXError): + G = nx.Graph() + H = nx.MultiGraph() + I = nx.Graph() + U = nx.disjoint_union_all([G, H, I]) + with pytest.raises(nx.NetworkXError): + X = nx.Graph() + Y = nx.DiGraph() + XY = nx.disjoint_union_all([X, Y]) + + +def test_mixed_type_intersection(): + with pytest.raises(nx.NetworkXError): + G = nx.Graph() + H = nx.MultiGraph() + I = nx.Graph() + U = nx.intersection_all([G, H, I]) + with pytest.raises(nx.NetworkXError): + X = nx.Graph() + Y = nx.DiGraph() + XY = nx.intersection_all([X, Y]) + + +def test_mixed_type_compose(): + with pytest.raises(nx.NetworkXError): + G = nx.Graph() + H = nx.MultiGraph() + I = nx.Graph() + U = nx.compose_all([G, H, I]) + with pytest.raises(nx.NetworkXError): + X = nx.Graph() + Y = nx.DiGraph() + XY = nx.compose_all([X, Y]) + + +def test_empty_union(): + with pytest.raises(ValueError): + nx.union_all([]) + + +def test_empty_disjoint_union(): + with pytest.raises(ValueError): + nx.disjoint_union_all([]) + + +def test_empty_compose_all(): + with pytest.raises(ValueError): + nx.compose_all([]) + + +def test_empty_intersection_all(): + with pytest.raises(ValueError): + nx.intersection_all([]) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/operators/tests/test_binary.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/operators/tests/test_binary.py new file mode 100644 index 0000000..e8e534e --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/operators/tests/test_binary.py @@ -0,0 +1,451 @@ +import pytest + +import networkx as nx +from networkx.utils import edges_equal + + +def test_union_attributes(): + g = nx.Graph() + g.add_node(0, x=4) + g.add_node(1, x=5) + g.add_edge(0, 1, size=5) + g.graph["name"] = "g" + + h = g.copy() + h.graph["name"] = "h" + h.graph["attr"] = "attr" + h.nodes[0]["x"] = 7 + + gh = nx.union(g, h, rename=("g", "h")) + assert set(gh.nodes()) == {"h0", "h1", "g0", "g1"} + for n in gh: + graph, node = n + assert gh.nodes[n] == eval(graph).nodes[int(node)] + + assert gh.graph["attr"] == "attr" + assert gh.graph["name"] == "h" # h graph attributes take precedent + + +def test_intersection(): + G = nx.Graph() + H = nx.Graph() + G.add_nodes_from([1, 2, 3, 4]) + G.add_edge(1, 2) + G.add_edge(2, 3) + H.add_nodes_from([1, 2, 3, 4]) + H.add_edge(2, 3) + H.add_edge(3, 4) + I = nx.intersection(G, H) + assert set(I.nodes()) == {1, 2, 3, 4} + assert sorted(I.edges()) == [(2, 3)] + + +def test_intersection_node_sets_different(): + G = nx.Graph() + H = nx.Graph() + G.add_nodes_from([1, 2, 3, 4, 7]) + G.add_edge(1, 2) + G.add_edge(2, 3) + H.add_nodes_from([1, 2, 3, 4, 5, 6]) + H.add_edge(2, 3) + H.add_edge(3, 4) + H.add_edge(5, 6) + I = nx.intersection(G, H) + assert set(I.nodes()) == {1, 2, 3, 4} + assert sorted(I.edges()) == [(2, 3)] + + +def test_intersection_attributes(): + g = nx.Graph() + g.add_node(0, x=4) + g.add_node(1, x=5) + g.add_edge(0, 1, size=5) + g.graph["name"] = "g" + + h = g.copy() + h.graph["name"] = "h" + h.graph["attr"] = "attr" + h.nodes[0]["x"] = 7 + gh = nx.intersection(g, h) + + assert set(gh.nodes()) == set(g.nodes()) + assert set(gh.nodes()) == set(h.nodes()) + assert sorted(gh.edges()) == sorted(g.edges()) + + +def test_intersection_attributes_node_sets_different(): + g = nx.Graph() + g.add_node(0, x=4) + g.add_node(1, x=5) + g.add_node(2, x=3) + g.add_edge(0, 1, size=5) + g.graph["name"] = "g" + + h = g.copy() + h.graph["name"] = "h" + h.graph["attr"] = "attr" + h.nodes[0]["x"] = 7 + h.remove_node(2) + + gh = nx.intersection(g, h) + assert set(gh.nodes()) == set(h.nodes()) + assert sorted(gh.edges()) == sorted(g.edges()) + + +def test_intersection_multigraph_attributes(): + g = nx.MultiGraph() + g.add_edge(0, 1, key=0) + g.add_edge(0, 1, key=1) + g.add_edge(0, 1, key=2) + h = nx.MultiGraph() + h.add_edge(0, 1, key=0) + h.add_edge(0, 1, key=3) + gh = nx.intersection(g, h) + assert set(gh.nodes()) == set(g.nodes()) + assert set(gh.nodes()) == set(h.nodes()) + assert sorted(gh.edges()) == [(0, 1)] + assert sorted(gh.edges(keys=True)) == [(0, 1, 0)] + + +def test_intersection_multigraph_attributes_node_set_different(): + g = nx.MultiGraph() + g.add_edge(0, 1, key=0) + g.add_edge(0, 1, key=1) + g.add_edge(0, 1, key=2) + g.add_edge(0, 2, key=2) + g.add_edge(0, 2, key=1) + h = nx.MultiGraph() + h.add_edge(0, 1, key=0) + h.add_edge(0, 1, key=3) + gh = nx.intersection(g, h) + assert set(gh.nodes()) == set(h.nodes()) + assert sorted(gh.edges()) == [(0, 1)] + assert sorted(gh.edges(keys=True)) == [(0, 1, 0)] + + +def test_difference(): + G = nx.Graph() + H = nx.Graph() + G.add_nodes_from([1, 2, 3, 4]) + G.add_edge(1, 2) + G.add_edge(2, 3) + H.add_nodes_from([1, 2, 3, 4]) + H.add_edge(2, 3) + H.add_edge(3, 4) + D = nx.difference(G, H) + assert set(D.nodes()) == {1, 2, 3, 4} + assert sorted(D.edges()) == [(1, 2)] + D = nx.difference(H, G) + assert set(D.nodes()) == {1, 2, 3, 4} + assert sorted(D.edges()) == [(3, 4)] + D = nx.symmetric_difference(G, H) + assert set(D.nodes()) == {1, 2, 3, 4} + assert sorted(D.edges()) == [(1, 2), (3, 4)] + + +def test_difference2(): + G = nx.Graph() + H = nx.Graph() + G.add_nodes_from([1, 2, 3, 4]) + H.add_nodes_from([1, 2, 3, 4]) + G.add_edge(1, 2) + H.add_edge(1, 2) + G.add_edge(2, 3) + D = nx.difference(G, H) + assert set(D.nodes()) == {1, 2, 3, 4} + assert sorted(D.edges()) == [(2, 3)] + D = nx.difference(H, G) + assert set(D.nodes()) == {1, 2, 3, 4} + assert sorted(D.edges()) == [] + H.add_edge(3, 4) + D = nx.difference(H, G) + assert set(D.nodes()) == {1, 2, 3, 4} + assert sorted(D.edges()) == [(3, 4)] + + +def test_difference_attributes(): + g = nx.Graph() + g.add_node(0, x=4) + g.add_node(1, x=5) + g.add_edge(0, 1, size=5) + g.graph["name"] = "g" + + h = g.copy() + h.graph["name"] = "h" + h.graph["attr"] = "attr" + h.nodes[0]["x"] = 7 + + gh = nx.difference(g, h) + assert set(gh.nodes()) == set(g.nodes()) + assert set(gh.nodes()) == set(h.nodes()) + assert sorted(gh.edges()) == [] + # node and graph data should not be copied over + assert gh.nodes.data() != g.nodes.data() + assert gh.graph != g.graph + + +def test_difference_multigraph_attributes(): + g = nx.MultiGraph() + g.add_edge(0, 1, key=0) + g.add_edge(0, 1, key=1) + g.add_edge(0, 1, key=2) + h = nx.MultiGraph() + h.add_edge(0, 1, key=0) + h.add_edge(0, 1, key=3) + gh = nx.difference(g, h) + assert set(gh.nodes()) == set(g.nodes()) + assert set(gh.nodes()) == set(h.nodes()) + assert sorted(gh.edges()) == [(0, 1), (0, 1)] + assert sorted(gh.edges(keys=True)) == [(0, 1, 1), (0, 1, 2)] + + +def test_difference_raise(): + G = nx.path_graph(4) + H = nx.path_graph(3) + pytest.raises(nx.NetworkXError, nx.difference, G, H) + pytest.raises(nx.NetworkXError, nx.symmetric_difference, G, H) + + +def test_symmetric_difference_multigraph(): + g = nx.MultiGraph() + g.add_edge(0, 1, key=0) + g.add_edge(0, 1, key=1) + g.add_edge(0, 1, key=2) + h = nx.MultiGraph() + h.add_edge(0, 1, key=0) + h.add_edge(0, 1, key=3) + gh = nx.symmetric_difference(g, h) + assert set(gh.nodes()) == set(g.nodes()) + assert set(gh.nodes()) == set(h.nodes()) + assert sorted(gh.edges()) == 3 * [(0, 1)] + assert sorted(sorted(e) for e in gh.edges(keys=True)) == [ + [0, 1, 1], + [0, 1, 2], + [0, 1, 3], + ] + + +def test_union_and_compose(): + K3 = nx.complete_graph(3) + P3 = nx.path_graph(3) + + G1 = nx.DiGraph() + G1.add_edge("A", "B") + G1.add_edge("A", "C") + G1.add_edge("A", "D") + G2 = nx.DiGraph() + G2.add_edge("1", "2") + G2.add_edge("1", "3") + G2.add_edge("1", "4") + + G = nx.union(G1, G2) + H = nx.compose(G1, G2) + assert edges_equal(G.edges(), H.edges()) + assert not G.has_edge("A", 1) + pytest.raises(nx.NetworkXError, nx.union, K3, P3) + H1 = nx.union(H, G1, rename=("H", "G1")) + assert sorted(H1.nodes()) == [ + "G1A", + "G1B", + "G1C", + "G1D", + "H1", + "H2", + "H3", + "H4", + "HA", + "HB", + "HC", + "HD", + ] + + H2 = nx.union(H, G2, rename=("H", "")) + assert sorted(H2.nodes()) == [ + "1", + "2", + "3", + "4", + "H1", + "H2", + "H3", + "H4", + "HA", + "HB", + "HC", + "HD", + ] + + assert not H1.has_edge("NB", "NA") + + G = nx.compose(G, G) + assert edges_equal(G.edges(), H.edges()) + + G2 = nx.union(G2, G2, rename=("", "copy")) + assert sorted(G2.nodes()) == [ + "1", + "2", + "3", + "4", + "copy1", + "copy2", + "copy3", + "copy4", + ] + + assert sorted(G2.neighbors("copy4")) == [] + assert sorted(G2.neighbors("copy1")) == ["copy2", "copy3", "copy4"] + assert len(G) == 8 + assert nx.number_of_edges(G) == 6 + + E = nx.disjoint_union(G, G) + assert len(E) == 16 + assert nx.number_of_edges(E) == 12 + + E = nx.disjoint_union(G1, G2) + assert sorted(E.nodes()) == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11] + + G = nx.Graph() + H = nx.Graph() + G.add_nodes_from([(1, {"a1": 1})]) + H.add_nodes_from([(1, {"b1": 1})]) + R = nx.compose(G, H) + assert R.nodes == {1: {"a1": 1, "b1": 1}} + + +def test_union_multigraph(): + G = nx.MultiGraph() + G.add_edge(1, 2, key=0) + G.add_edge(1, 2, key=1) + H = nx.MultiGraph() + H.add_edge(3, 4, key=0) + H.add_edge(3, 4, key=1) + GH = nx.union(G, H) + assert set(GH) == set(G) | set(H) + assert set(GH.edges(keys=True)) == set(G.edges(keys=True)) | set(H.edges(keys=True)) + + +def test_disjoint_union_multigraph(): + G = nx.MultiGraph() + G.add_edge(0, 1, key=0) + G.add_edge(0, 1, key=1) + H = nx.MultiGraph() + H.add_edge(2, 3, key=0) + H.add_edge(2, 3, key=1) + GH = nx.disjoint_union(G, H) + assert set(GH) == set(G) | set(H) + assert set(GH.edges(keys=True)) == set(G.edges(keys=True)) | set(H.edges(keys=True)) + + +def test_compose_multigraph(): + G = nx.MultiGraph() + G.add_edge(1, 2, key=0) + G.add_edge(1, 2, key=1) + H = nx.MultiGraph() + H.add_edge(3, 4, key=0) + H.add_edge(3, 4, key=1) + GH = nx.compose(G, H) + assert set(GH) == set(G) | set(H) + assert set(GH.edges(keys=True)) == set(G.edges(keys=True)) | set(H.edges(keys=True)) + H.add_edge(1, 2, key=2) + GH = nx.compose(G, H) + assert set(GH) == set(G) | set(H) + assert set(GH.edges(keys=True)) == set(G.edges(keys=True)) | set(H.edges(keys=True)) + + +def test_full_join_graph(): + # Simple Graphs + G = nx.Graph() + G.add_node(0) + G.add_edge(1, 2) + H = nx.Graph() + H.add_edge(3, 4) + + U = nx.full_join(G, H) + assert set(U) == set(G) | set(H) + assert len(U) == len(G) + len(H) + assert len(U.edges()) == len(G.edges()) + len(H.edges()) + len(G) * len(H) + + # Rename + U = nx.full_join(G, H, rename=("g", "h")) + assert set(U) == {"g0", "g1", "g2", "h3", "h4"} + assert len(U) == len(G) + len(H) + assert len(U.edges()) == len(G.edges()) + len(H.edges()) + len(G) * len(H) + + # Rename graphs with string-like nodes + G = nx.Graph() + G.add_node("a") + G.add_edge("b", "c") + H = nx.Graph() + H.add_edge("d", "e") + + U = nx.full_join(G, H, rename=("g", "h")) + assert set(U) == {"ga", "gb", "gc", "hd", "he"} + assert len(U) == len(G) + len(H) + assert len(U.edges()) == len(G.edges()) + len(H.edges()) + len(G) * len(H) + + # DiGraphs + G = nx.DiGraph() + G.add_node(0) + G.add_edge(1, 2) + H = nx.DiGraph() + H.add_edge(3, 4) + + U = nx.full_join(G, H) + assert set(U) == set(G) | set(H) + assert len(U) == len(G) + len(H) + assert len(U.edges()) == len(G.edges()) + len(H.edges()) + len(G) * len(H) * 2 + + # DiGraphs Rename + U = nx.full_join(G, H, rename=("g", "h")) + assert set(U) == {"g0", "g1", "g2", "h3", "h4"} + assert len(U) == len(G) + len(H) + assert len(U.edges()) == len(G.edges()) + len(H.edges()) + len(G) * len(H) * 2 + + +def test_full_join_multigraph(): + # MultiGraphs + G = nx.MultiGraph() + G.add_node(0) + G.add_edge(1, 2) + H = nx.MultiGraph() + H.add_edge(3, 4) + + U = nx.full_join(G, H) + assert set(U) == set(G) | set(H) + assert len(U) == len(G) + len(H) + assert len(U.edges()) == len(G.edges()) + len(H.edges()) + len(G) * len(H) + + # MultiGraphs rename + U = nx.full_join(G, H, rename=("g", "h")) + assert set(U) == {"g0", "g1", "g2", "h3", "h4"} + assert len(U) == len(G) + len(H) + assert len(U.edges()) == len(G.edges()) + len(H.edges()) + len(G) * len(H) + + # MultiDiGraphs + G = nx.MultiDiGraph() + G.add_node(0) + G.add_edge(1, 2) + H = nx.MultiDiGraph() + H.add_edge(3, 4) + + U = nx.full_join(G, H) + assert set(U) == set(G) | set(H) + assert len(U) == len(G) + len(H) + assert len(U.edges()) == len(G.edges()) + len(H.edges()) + len(G) * len(H) * 2 + + # MultiDiGraphs rename + U = nx.full_join(G, H, rename=("g", "h")) + assert set(U) == {"g0", "g1", "g2", "h3", "h4"} + assert len(U) == len(G) + len(H) + assert len(U.edges()) == len(G.edges()) + len(H.edges()) + len(G) * len(H) * 2 + + +def test_mixed_type_union(): + G = nx.Graph() + H = nx.MultiGraph() + pytest.raises(nx.NetworkXError, nx.union, G, H) + pytest.raises(nx.NetworkXError, nx.disjoint_union, G, H) + pytest.raises(nx.NetworkXError, nx.intersection, G, H) + pytest.raises(nx.NetworkXError, nx.difference, G, H) + pytest.raises(nx.NetworkXError, nx.symmetric_difference, G, H) + pytest.raises(nx.NetworkXError, nx.compose, G, H) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/operators/tests/test_product.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/operators/tests/test_product.py new file mode 100644 index 0000000..8ee54b9 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/operators/tests/test_product.py @@ -0,0 +1,491 @@ +import pytest + +import networkx as nx +from networkx.utils import edges_equal + + +def test_tensor_product_raises(): + with pytest.raises(nx.NetworkXError): + P = nx.tensor_product(nx.DiGraph(), nx.Graph()) + + +def test_tensor_product_null(): + null = nx.null_graph() + empty10 = nx.empty_graph(10) + K3 = nx.complete_graph(3) + K10 = nx.complete_graph(10) + P3 = nx.path_graph(3) + P10 = nx.path_graph(10) + # null graph + G = nx.tensor_product(null, null) + assert nx.is_isomorphic(G, null) + # null_graph X anything = null_graph and v.v. + G = nx.tensor_product(null, empty10) + assert nx.is_isomorphic(G, null) + G = nx.tensor_product(null, K3) + assert nx.is_isomorphic(G, null) + G = nx.tensor_product(null, K10) + assert nx.is_isomorphic(G, null) + G = nx.tensor_product(null, P3) + assert nx.is_isomorphic(G, null) + G = nx.tensor_product(null, P10) + assert nx.is_isomorphic(G, null) + G = nx.tensor_product(empty10, null) + assert nx.is_isomorphic(G, null) + G = nx.tensor_product(K3, null) + assert nx.is_isomorphic(G, null) + G = nx.tensor_product(K10, null) + assert nx.is_isomorphic(G, null) + G = nx.tensor_product(P3, null) + assert nx.is_isomorphic(G, null) + G = nx.tensor_product(P10, null) + assert nx.is_isomorphic(G, null) + + +def test_tensor_product_size(): + P5 = nx.path_graph(5) + K3 = nx.complete_graph(3) + K5 = nx.complete_graph(5) + + G = nx.tensor_product(P5, K3) + assert nx.number_of_nodes(G) == 5 * 3 + G = nx.tensor_product(K3, K5) + assert nx.number_of_nodes(G) == 3 * 5 + + +def test_tensor_product_combinations(): + # basic smoke test, more realistic tests would be useful + P5 = nx.path_graph(5) + K3 = nx.complete_graph(3) + G = nx.tensor_product(P5, K3) + assert nx.number_of_nodes(G) == 5 * 3 + G = nx.tensor_product(P5, nx.MultiGraph(K3)) + assert nx.number_of_nodes(G) == 5 * 3 + G = nx.tensor_product(nx.MultiGraph(P5), K3) + assert nx.number_of_nodes(G) == 5 * 3 + G = nx.tensor_product(nx.MultiGraph(P5), nx.MultiGraph(K3)) + assert nx.number_of_nodes(G) == 5 * 3 + + G = nx.tensor_product(nx.DiGraph(P5), nx.DiGraph(K3)) + assert nx.number_of_nodes(G) == 5 * 3 + + +def test_tensor_product_classic_result(): + K2 = nx.complete_graph(2) + G = nx.petersen_graph() + G = nx.tensor_product(G, K2) + assert nx.is_isomorphic(G, nx.desargues_graph()) + + G = nx.cycle_graph(5) + G = nx.tensor_product(G, K2) + assert nx.is_isomorphic(G, nx.cycle_graph(10)) + + G = nx.tetrahedral_graph() + G = nx.tensor_product(G, K2) + assert nx.is_isomorphic(G, nx.cubical_graph()) + + +def test_tensor_product_random(): + G = nx.erdos_renyi_graph(10, 2 / 10.0) + H = nx.erdos_renyi_graph(10, 2 / 10.0) + GH = nx.tensor_product(G, H) + + for u_G, u_H in GH.nodes(): + for v_G, v_H in GH.nodes(): + if H.has_edge(u_H, v_H) and G.has_edge(u_G, v_G): + assert GH.has_edge((u_G, u_H), (v_G, v_H)) + else: + assert not GH.has_edge((u_G, u_H), (v_G, v_H)) + + +def test_cartesian_product_multigraph(): + G = nx.MultiGraph() + G.add_edge(1, 2, key=0) + G.add_edge(1, 2, key=1) + H = nx.MultiGraph() + H.add_edge(3, 4, key=0) + H.add_edge(3, 4, key=1) + GH = nx.cartesian_product(G, H) + assert set(GH) == {(1, 3), (2, 3), (2, 4), (1, 4)} + assert {(frozenset([u, v]), k) for u, v, k in GH.edges(keys=True)} == { + (frozenset([u, v]), k) + for u, v, k in [ + ((1, 3), (2, 3), 0), + ((1, 3), (2, 3), 1), + ((1, 3), (1, 4), 0), + ((1, 3), (1, 4), 1), + ((2, 3), (2, 4), 0), + ((2, 3), (2, 4), 1), + ((2, 4), (1, 4), 0), + ((2, 4), (1, 4), 1), + ] + } + + +def test_cartesian_product_raises(): + with pytest.raises(nx.NetworkXError): + P = nx.cartesian_product(nx.DiGraph(), nx.Graph()) + + +def test_cartesian_product_null(): + null = nx.null_graph() + empty10 = nx.empty_graph(10) + K3 = nx.complete_graph(3) + K10 = nx.complete_graph(10) + P3 = nx.path_graph(3) + P10 = nx.path_graph(10) + # null graph + G = nx.cartesian_product(null, null) + assert nx.is_isomorphic(G, null) + # null_graph X anything = null_graph and v.v. + G = nx.cartesian_product(null, empty10) + assert nx.is_isomorphic(G, null) + G = nx.cartesian_product(null, K3) + assert nx.is_isomorphic(G, null) + G = nx.cartesian_product(null, K10) + assert nx.is_isomorphic(G, null) + G = nx.cartesian_product(null, P3) + assert nx.is_isomorphic(G, null) + G = nx.cartesian_product(null, P10) + assert nx.is_isomorphic(G, null) + G = nx.cartesian_product(empty10, null) + assert nx.is_isomorphic(G, null) + G = nx.cartesian_product(K3, null) + assert nx.is_isomorphic(G, null) + G = nx.cartesian_product(K10, null) + assert nx.is_isomorphic(G, null) + G = nx.cartesian_product(P3, null) + assert nx.is_isomorphic(G, null) + G = nx.cartesian_product(P10, null) + assert nx.is_isomorphic(G, null) + + +def test_cartesian_product_size(): + # order(GXH)=order(G)*order(H) + K5 = nx.complete_graph(5) + P5 = nx.path_graph(5) + K3 = nx.complete_graph(3) + G = nx.cartesian_product(P5, K3) + assert nx.number_of_nodes(G) == 5 * 3 + assert nx.number_of_edges(G) == nx.number_of_edges(P5) * nx.number_of_nodes( + K3 + ) + nx.number_of_edges(K3) * nx.number_of_nodes(P5) + G = nx.cartesian_product(K3, K5) + assert nx.number_of_nodes(G) == 3 * 5 + assert nx.number_of_edges(G) == nx.number_of_edges(K5) * nx.number_of_nodes( + K3 + ) + nx.number_of_edges(K3) * nx.number_of_nodes(K5) + + +def test_cartesian_product_classic(): + # test some classic product graphs + P2 = nx.path_graph(2) + P3 = nx.path_graph(3) + # cube = 2-path X 2-path + G = nx.cartesian_product(P2, P2) + G = nx.cartesian_product(P2, G) + assert nx.is_isomorphic(G, nx.cubical_graph()) + + # 3x3 grid + G = nx.cartesian_product(P3, P3) + assert nx.is_isomorphic(G, nx.grid_2d_graph(3, 3)) + + +def test_cartesian_product_random(): + G = nx.erdos_renyi_graph(10, 2 / 10.0) + H = nx.erdos_renyi_graph(10, 2 / 10.0) + GH = nx.cartesian_product(G, H) + + for u_G, u_H in GH.nodes(): + for v_G, v_H in GH.nodes(): + if (u_G == v_G and H.has_edge(u_H, v_H)) or ( + u_H == v_H and G.has_edge(u_G, v_G) + ): + assert GH.has_edge((u_G, u_H), (v_G, v_H)) + else: + assert not GH.has_edge((u_G, u_H), (v_G, v_H)) + + +def test_lexicographic_product_raises(): + with pytest.raises(nx.NetworkXError): + P = nx.lexicographic_product(nx.DiGraph(), nx.Graph()) + + +def test_lexicographic_product_null(): + null = nx.null_graph() + empty10 = nx.empty_graph(10) + K3 = nx.complete_graph(3) + K10 = nx.complete_graph(10) + P3 = nx.path_graph(3) + P10 = nx.path_graph(10) + # null graph + G = nx.lexicographic_product(null, null) + assert nx.is_isomorphic(G, null) + # null_graph X anything = null_graph and v.v. + G = nx.lexicographic_product(null, empty10) + assert nx.is_isomorphic(G, null) + G = nx.lexicographic_product(null, K3) + assert nx.is_isomorphic(G, null) + G = nx.lexicographic_product(null, K10) + assert nx.is_isomorphic(G, null) + G = nx.lexicographic_product(null, P3) + assert nx.is_isomorphic(G, null) + G = nx.lexicographic_product(null, P10) + assert nx.is_isomorphic(G, null) + G = nx.lexicographic_product(empty10, null) + assert nx.is_isomorphic(G, null) + G = nx.lexicographic_product(K3, null) + assert nx.is_isomorphic(G, null) + G = nx.lexicographic_product(K10, null) + assert nx.is_isomorphic(G, null) + G = nx.lexicographic_product(P3, null) + assert nx.is_isomorphic(G, null) + G = nx.lexicographic_product(P10, null) + assert nx.is_isomorphic(G, null) + + +def test_lexicographic_product_size(): + K5 = nx.complete_graph(5) + P5 = nx.path_graph(5) + K3 = nx.complete_graph(3) + G = nx.lexicographic_product(P5, K3) + assert nx.number_of_nodes(G) == 5 * 3 + G = nx.lexicographic_product(K3, K5) + assert nx.number_of_nodes(G) == 3 * 5 + + +def test_lexicographic_product_combinations(): + P5 = nx.path_graph(5) + K3 = nx.complete_graph(3) + G = nx.lexicographic_product(P5, K3) + assert nx.number_of_nodes(G) == 5 * 3 + G = nx.lexicographic_product(nx.MultiGraph(P5), K3) + assert nx.number_of_nodes(G) == 5 * 3 + G = nx.lexicographic_product(P5, nx.MultiGraph(K3)) + assert nx.number_of_nodes(G) == 5 * 3 + G = nx.lexicographic_product(nx.MultiGraph(P5), nx.MultiGraph(K3)) + assert nx.number_of_nodes(G) == 5 * 3 + + # No classic easily found classic results for lexicographic product + + +def test_lexicographic_product_random(): + G = nx.erdos_renyi_graph(10, 2 / 10.0) + H = nx.erdos_renyi_graph(10, 2 / 10.0) + GH = nx.lexicographic_product(G, H) + + for u_G, u_H in GH.nodes(): + for v_G, v_H in GH.nodes(): + if G.has_edge(u_G, v_G) or (u_G == v_G and H.has_edge(u_H, v_H)): + assert GH.has_edge((u_G, u_H), (v_G, v_H)) + else: + assert not GH.has_edge((u_G, u_H), (v_G, v_H)) + + +def test_strong_product_raises(): + with pytest.raises(nx.NetworkXError): + P = nx.strong_product(nx.DiGraph(), nx.Graph()) + + +def test_strong_product_null(): + null = nx.null_graph() + empty10 = nx.empty_graph(10) + K3 = nx.complete_graph(3) + K10 = nx.complete_graph(10) + P3 = nx.path_graph(3) + P10 = nx.path_graph(10) + # null graph + G = nx.strong_product(null, null) + assert nx.is_isomorphic(G, null) + # null_graph X anything = null_graph and v.v. + G = nx.strong_product(null, empty10) + assert nx.is_isomorphic(G, null) + G = nx.strong_product(null, K3) + assert nx.is_isomorphic(G, null) + G = nx.strong_product(null, K10) + assert nx.is_isomorphic(G, null) + G = nx.strong_product(null, P3) + assert nx.is_isomorphic(G, null) + G = nx.strong_product(null, P10) + assert nx.is_isomorphic(G, null) + G = nx.strong_product(empty10, null) + assert nx.is_isomorphic(G, null) + G = nx.strong_product(K3, null) + assert nx.is_isomorphic(G, null) + G = nx.strong_product(K10, null) + assert nx.is_isomorphic(G, null) + G = nx.strong_product(P3, null) + assert nx.is_isomorphic(G, null) + G = nx.strong_product(P10, null) + assert nx.is_isomorphic(G, null) + + +def test_strong_product_size(): + K5 = nx.complete_graph(5) + P5 = nx.path_graph(5) + K3 = nx.complete_graph(3) + G = nx.strong_product(P5, K3) + assert nx.number_of_nodes(G) == 5 * 3 + G = nx.strong_product(K3, K5) + assert nx.number_of_nodes(G) == 3 * 5 + + +def test_strong_product_combinations(): + P5 = nx.path_graph(5) + K3 = nx.complete_graph(3) + G = nx.strong_product(P5, K3) + assert nx.number_of_nodes(G) == 5 * 3 + G = nx.strong_product(nx.MultiGraph(P5), K3) + assert nx.number_of_nodes(G) == 5 * 3 + G = nx.strong_product(P5, nx.MultiGraph(K3)) + assert nx.number_of_nodes(G) == 5 * 3 + G = nx.strong_product(nx.MultiGraph(P5), nx.MultiGraph(K3)) + assert nx.number_of_nodes(G) == 5 * 3 + + # No classic easily found classic results for strong product + + +def test_strong_product_random(): + G = nx.erdos_renyi_graph(10, 2 / 10.0) + H = nx.erdos_renyi_graph(10, 2 / 10.0) + GH = nx.strong_product(G, H) + + for u_G, u_H in GH.nodes(): + for v_G, v_H in GH.nodes(): + if ( + (u_G == v_G and H.has_edge(u_H, v_H)) + or (u_H == v_H and G.has_edge(u_G, v_G)) + or (G.has_edge(u_G, v_G) and H.has_edge(u_H, v_H)) + ): + assert GH.has_edge((u_G, u_H), (v_G, v_H)) + else: + assert not GH.has_edge((u_G, u_H), (v_G, v_H)) + + +def test_graph_power_raises(): + with pytest.raises(nx.NetworkXNotImplemented): + nx.power(nx.MultiDiGraph(), 2) + + +def test_graph_power(): + # wikipedia example for graph power + G = nx.cycle_graph(7) + G.add_edge(6, 7) + G.add_edge(7, 8) + G.add_edge(8, 9) + G.add_edge(9, 2) + H = nx.power(G, 2) + + assert edges_equal( + list(H.edges()), + [ + (0, 1), + (0, 2), + (0, 5), + (0, 6), + (0, 7), + (1, 9), + (1, 2), + (1, 3), + (1, 6), + (2, 3), + (2, 4), + (2, 8), + (2, 9), + (3, 4), + (3, 5), + (3, 9), + (4, 5), + (4, 6), + (5, 6), + (5, 7), + (6, 7), + (6, 8), + (7, 8), + (7, 9), + (8, 9), + ], + ) + + +def test_graph_power_negative(): + with pytest.raises(ValueError): + nx.power(nx.Graph(), -1) + + +def test_rooted_product_raises(): + with pytest.raises(nx.NodeNotFound): + nx.rooted_product(nx.Graph(), nx.path_graph(2), 10) + + +def test_rooted_product(): + G = nx.cycle_graph(5) + H = nx.Graph() + H.add_edges_from([("a", "b"), ("b", "c"), ("b", "d")]) + R = nx.rooted_product(G, H, "a") + assert len(R) == len(G) * len(H) + assert R.size() == G.size() + len(G) * H.size() + + +def test_corona_product(): + G = nx.cycle_graph(3) + H = nx.path_graph(2) + C = nx.corona_product(G, H) + assert len(C) == (len(G) * len(H)) + len(G) + assert C.size() == G.size() + len(G) * H.size() + len(G) * len(H) + + +def test_modular_product(): + G = nx.path_graph(3) + H = nx.path_graph(4) + M = nx.modular_product(G, H) + assert len(M) == len(G) * len(H) + + assert edges_equal( + list(M.edges()), + [ + ((0, 0), (1, 1)), + ((0, 0), (2, 2)), + ((0, 0), (2, 3)), + ((0, 1), (1, 0)), + ((0, 1), (1, 2)), + ((0, 1), (2, 3)), + ((0, 2), (1, 1)), + ((0, 2), (1, 3)), + ((0, 2), (2, 0)), + ((0, 3), (1, 2)), + ((0, 3), (2, 0)), + ((0, 3), (2, 1)), + ((1, 0), (2, 1)), + ((1, 1), (2, 0)), + ((1, 1), (2, 2)), + ((1, 2), (2, 1)), + ((1, 2), (2, 3)), + ((1, 3), (2, 2)), + ], + ) + + +def test_modular_product_raises(): + G = nx.Graph([(0, 1), (1, 2), (2, 0)]) + H = nx.Graph([(0, 1), (1, 2), (2, 0)]) + DG = nx.DiGraph([(0, 1), (1, 2), (2, 0)]) + DH = nx.DiGraph([(0, 1), (1, 2), (2, 0)]) + with pytest.raises(nx.NetworkXNotImplemented): + nx.modular_product(G, DH) + with pytest.raises(nx.NetworkXNotImplemented): + nx.modular_product(DG, H) + with pytest.raises(nx.NetworkXNotImplemented): + nx.modular_product(DG, DH) + + MG = nx.MultiGraph([(0, 1), (1, 2), (2, 0), (0, 1)]) + MH = nx.MultiGraph([(0, 1), (1, 2), (2, 0), (0, 1)]) + with pytest.raises(nx.NetworkXNotImplemented): + nx.modular_product(G, MH) + with pytest.raises(nx.NetworkXNotImplemented): + nx.modular_product(MG, H) + with pytest.raises(nx.NetworkXNotImplemented): + nx.modular_product(MG, MH) + with pytest.raises(nx.NetworkXNotImplemented): + # check multigraph with no multiedges + nx.modular_product(nx.MultiGraph(G), H) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/operators/tests/test_unary.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/operators/tests/test_unary.py new file mode 100644 index 0000000..d68e55c --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/operators/tests/test_unary.py @@ -0,0 +1,55 @@ +import pytest + +import networkx as nx + + +def test_complement(): + null = nx.null_graph() + empty1 = nx.empty_graph(1) + empty10 = nx.empty_graph(10) + K3 = nx.complete_graph(3) + K5 = nx.complete_graph(5) + K10 = nx.complete_graph(10) + P2 = nx.path_graph(2) + P3 = nx.path_graph(3) + P5 = nx.path_graph(5) + P10 = nx.path_graph(10) + # complement of the complete graph is empty + + G = nx.complement(K3) + assert nx.is_isomorphic(G, nx.empty_graph(3)) + G = nx.complement(K5) + assert nx.is_isomorphic(G, nx.empty_graph(5)) + # for any G, G=complement(complement(G)) + P3cc = nx.complement(nx.complement(P3)) + assert nx.is_isomorphic(P3, P3cc) + nullcc = nx.complement(nx.complement(null)) + assert nx.is_isomorphic(null, nullcc) + b = nx.bull_graph() + bcc = nx.complement(nx.complement(b)) + assert nx.is_isomorphic(b, bcc) + + +def test_complement_2(): + G1 = nx.DiGraph() + G1.add_edge("A", "B") + G1.add_edge("A", "C") + G1.add_edge("A", "D") + G1C = nx.complement(G1) + assert sorted(G1C.edges()) == [ + ("B", "A"), + ("B", "C"), + ("B", "D"), + ("C", "A"), + ("C", "B"), + ("C", "D"), + ("D", "A"), + ("D", "B"), + ("D", "C"), + ] + + +def test_reverse1(): + # Other tests for reverse are done by the DiGraph and MultiDigraph. + G1 = nx.Graph() + pytest.raises(nx.NetworkXError, nx.reverse, G1) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/operators/unary.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/operators/unary.py new file mode 100644 index 0000000..79e44d1 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/operators/unary.py @@ -0,0 +1,77 @@ +"""Unary operations on graphs""" + +import networkx as nx + +__all__ = ["complement", "reverse"] + + +@nx._dispatchable(returns_graph=True) +def complement(G): + """Returns the graph complement of G. + + Parameters + ---------- + G : graph + A NetworkX graph + + Returns + ------- + GC : A new graph. + + Notes + ----- + Note that `complement` does not create self-loops and also + does not produce parallel edges for MultiGraphs. + + Graph, node, and edge data are not propagated to the new graph. + + Examples + -------- + >>> G = nx.Graph([(1, 2), (1, 3), (2, 3), (3, 4), (3, 5)]) + >>> G_complement = nx.complement(G) + >>> G_complement.edges() # This shows the edges of the complemented graph + EdgeView([(1, 4), (1, 5), (2, 4), (2, 5), (4, 5)]) + + """ + R = G.__class__() + R.add_nodes_from(G) + R.add_edges_from( + ((n, n2) for n, nbrs in G.adjacency() for n2 in G if n2 not in nbrs if n != n2) + ) + return R + + +@nx._dispatchable(returns_graph=True) +def reverse(G, copy=True): + """Returns the reverse directed graph of G. + + Parameters + ---------- + G : directed graph + A NetworkX directed graph + copy : bool + If True, then a new graph is returned. If False, then the graph is + reversed in place. + + Returns + ------- + H : directed graph + The reversed G. + + Raises + ------ + NetworkXError + If graph is undirected. + + Examples + -------- + >>> G = nx.DiGraph([(1, 2), (1, 3), (2, 3), (3, 4), (3, 5)]) + >>> G_reversed = nx.reverse(G) + >>> G_reversed.edges() + OutEdgeView([(2, 1), (3, 1), (3, 2), (4, 3), (5, 3)]) + + """ + if not G.is_directed(): + raise nx.NetworkXError("Cannot reverse an undirected graph.") + else: + return G.reverse(copy=copy) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/planar_drawing.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/planar_drawing.py new file mode 100644 index 0000000..ea25809 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/planar_drawing.py @@ -0,0 +1,464 @@ +from collections import defaultdict + +import networkx as nx + +__all__ = ["combinatorial_embedding_to_pos"] + + +def combinatorial_embedding_to_pos(embedding, fully_triangulate=False): + """Assigns every node a (x, y) position based on the given embedding + + The algorithm iteratively inserts nodes of the input graph in a certain + order and rearranges previously inserted nodes so that the planar drawing + stays valid. This is done efficiently by only maintaining relative + positions during the node placements and calculating the absolute positions + at the end. For more information see [1]_. + + Parameters + ---------- + embedding : nx.PlanarEmbedding + This defines the order of the edges + + fully_triangulate : bool + If set to True the algorithm adds edges to a copy of the input + embedding and makes it chordal. + + Returns + ------- + pos : dict + Maps each node to a tuple that defines the (x, y) position + + References + ---------- + .. [1] M. Chrobak and T.H. Payne: + A Linear-time Algorithm for Drawing a Planar Graph on a Grid 1989 + http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.51.6677 + + """ + if len(embedding.nodes()) < 4: + # Position the node in any triangle + default_positions = [(0, 0), (2, 0), (1, 1)] + pos = {} + for i, v in enumerate(embedding.nodes()): + pos[v] = default_positions[i] + return pos + + embedding, outer_face = triangulate_embedding(embedding, fully_triangulate) + + # The following dicts map a node to another node + # If a node is not in the key set it means that the node is not yet in G_k + # If a node maps to None then the corresponding subtree does not exist + left_t_child = {} + right_t_child = {} + + # The following dicts map a node to an integer + delta_x = {} + y_coordinate = {} + + node_list = get_canonical_ordering(embedding, outer_face) + + # 1. Phase: Compute relative positions + + # Initialization + v1, v2, v3 = node_list[0][0], node_list[1][0], node_list[2][0] + + delta_x[v1] = 0 + y_coordinate[v1] = 0 + right_t_child[v1] = v3 + left_t_child[v1] = None + + delta_x[v2] = 1 + y_coordinate[v2] = 0 + right_t_child[v2] = None + left_t_child[v2] = None + + delta_x[v3] = 1 + y_coordinate[v3] = 1 + right_t_child[v3] = v2 + left_t_child[v3] = None + + for k in range(3, len(node_list)): + vk, contour_nbrs = node_list[k] + wp = contour_nbrs[0] + wp1 = contour_nbrs[1] + wq = contour_nbrs[-1] + wq1 = contour_nbrs[-2] + adds_mult_tri = len(contour_nbrs) > 2 + + # Stretch gaps: + delta_x[wp1] += 1 + delta_x[wq] += 1 + + delta_x_wp_wq = sum(delta_x[x] for x in contour_nbrs[1:]) + + # Adjust offsets + delta_x[vk] = (-y_coordinate[wp] + delta_x_wp_wq + y_coordinate[wq]) // 2 + y_coordinate[vk] = (y_coordinate[wp] + delta_x_wp_wq + y_coordinate[wq]) // 2 + delta_x[wq] = delta_x_wp_wq - delta_x[vk] + if adds_mult_tri: + delta_x[wp1] -= delta_x[vk] + + # Install v_k: + right_t_child[wp] = vk + right_t_child[vk] = wq + if adds_mult_tri: + left_t_child[vk] = wp1 + right_t_child[wq1] = None + else: + left_t_child[vk] = None + + # 2. Phase: Set absolute positions + pos = {} + pos[v1] = (0, y_coordinate[v1]) + remaining_nodes = [v1] + while remaining_nodes: + parent_node = remaining_nodes.pop() + + # Calculate position for left child + set_position( + parent_node, left_t_child, remaining_nodes, delta_x, y_coordinate, pos + ) + # Calculate position for right child + set_position( + parent_node, right_t_child, remaining_nodes, delta_x, y_coordinate, pos + ) + return pos + + +def set_position(parent, tree, remaining_nodes, delta_x, y_coordinate, pos): + """Helper method to calculate the absolute position of nodes.""" + child = tree[parent] + parent_node_x = pos[parent][0] + if child is not None: + # Calculate pos of child + child_x = parent_node_x + delta_x[child] + pos[child] = (child_x, y_coordinate[child]) + # Remember to calculate pos of its children + remaining_nodes.append(child) + + +def get_canonical_ordering(embedding, outer_face): + """Returns a canonical ordering of the nodes + + The canonical ordering of nodes (v1, ..., vn) must fulfill the following + conditions: + (See Lemma 1 in [2]_) + + - For the subgraph G_k of the input graph induced by v1, ..., vk it holds: + - 2-connected + - internally triangulated + - the edge (v1, v2) is part of the outer face + - For a node v(k+1) the following holds: + - The node v(k+1) is part of the outer face of G_k + - It has at least two neighbors in G_k + - All neighbors of v(k+1) in G_k lie consecutively on the outer face of + G_k (excluding the edge (v1, v2)). + + The algorithm used here starts with G_n (containing all nodes). It first + selects the nodes v1 and v2. And then tries to find the order of the other + nodes by checking which node can be removed in order to fulfill the + conditions mentioned above. This is done by calculating the number of + chords of nodes on the outer face. For more information see [1]_. + + Parameters + ---------- + embedding : nx.PlanarEmbedding + The embedding must be triangulated + outer_face : list + The nodes on the outer face of the graph + + Returns + ------- + ordering : list + A list of tuples `(vk, wp_wq)`. Here `vk` is the node at this position + in the canonical ordering. The element `wp_wq` is a list of nodes that + make up the outer face of G_k. + + References + ---------- + .. [1] Steven Chaplick. + Canonical Orders of Planar Graphs and (some of) Their Applications 2015 + https://wuecampus2.uni-wuerzburg.de/moodle/pluginfile.php/545727/mod_resource/content/0/vg-ss15-vl03-canonical-orders-druckversion.pdf + .. [2] M. Chrobak and T.H. Payne: + A Linear-time Algorithm for Drawing a Planar Graph on a Grid 1989 + http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.51.6677 + + """ + v1 = outer_face[0] + v2 = outer_face[1] + chords = defaultdict(int) # Maps nodes to the number of their chords + marked_nodes = set() + ready_to_pick = set(outer_face) + + # Initialize outer_face_ccw_nbr (do not include v1 -> v2) + outer_face_ccw_nbr = {} + prev_nbr = v2 + for idx in range(2, len(outer_face)): + outer_face_ccw_nbr[prev_nbr] = outer_face[idx] + prev_nbr = outer_face[idx] + outer_face_ccw_nbr[prev_nbr] = v1 + + # Initialize outer_face_cw_nbr (do not include v2 -> v1) + outer_face_cw_nbr = {} + prev_nbr = v1 + for idx in range(len(outer_face) - 1, 0, -1): + outer_face_cw_nbr[prev_nbr] = outer_face[idx] + prev_nbr = outer_face[idx] + + def is_outer_face_nbr(x, y): + if x not in outer_face_ccw_nbr: + return outer_face_cw_nbr[x] == y + if x not in outer_face_cw_nbr: + return outer_face_ccw_nbr[x] == y + return outer_face_ccw_nbr[x] == y or outer_face_cw_nbr[x] == y + + def is_on_outer_face(x): + return x not in marked_nodes and (x in outer_face_ccw_nbr or x == v1) + + # Initialize number of chords + for v in outer_face: + for nbr in embedding.neighbors_cw_order(v): + if is_on_outer_face(nbr) and not is_outer_face_nbr(v, nbr): + chords[v] += 1 + ready_to_pick.discard(v) + + # Initialize canonical_ordering + canonical_ordering = [None] * len(embedding.nodes()) + canonical_ordering[0] = (v1, []) + canonical_ordering[1] = (v2, []) + ready_to_pick.discard(v1) + ready_to_pick.discard(v2) + + for k in range(len(embedding.nodes()) - 1, 1, -1): + # 1. Pick v from ready_to_pick + v = ready_to_pick.pop() + marked_nodes.add(v) + + # v has exactly two neighbors on the outer face (wp and wq) + wp = None + wq = None + # Iterate over neighbors of v to find wp and wq + nbr_iterator = iter(embedding.neighbors_cw_order(v)) + while True: + nbr = next(nbr_iterator) + if nbr in marked_nodes: + # Only consider nodes that are not yet removed + continue + if is_on_outer_face(nbr): + # nbr is either wp or wq + if nbr == v1: + wp = v1 + elif nbr == v2: + wq = v2 + else: + if outer_face_cw_nbr[nbr] == v: + # nbr is wp + wp = nbr + else: + # nbr is wq + wq = nbr + if wp is not None and wq is not None: + # We don't need to iterate any further + break + + # Obtain new nodes on outer face (neighbors of v from wp to wq) + wp_wq = [wp] + nbr = wp + while nbr != wq: + # Get next neighbor (clockwise on the outer face) + next_nbr = embedding[v][nbr]["ccw"] + wp_wq.append(next_nbr) + # Update outer face + outer_face_cw_nbr[nbr] = next_nbr + outer_face_ccw_nbr[next_nbr] = nbr + # Move to next neighbor of v + nbr = next_nbr + + if len(wp_wq) == 2: + # There was a chord between wp and wq, decrease number of chords + chords[wp] -= 1 + if chords[wp] == 0: + ready_to_pick.add(wp) + chords[wq] -= 1 + if chords[wq] == 0: + ready_to_pick.add(wq) + else: + # Update all chords involving w_(p+1) to w_(q-1) + new_face_nodes = set(wp_wq[1:-1]) + for w in new_face_nodes: + # If we do not find a chord for w later we can pick it next + ready_to_pick.add(w) + for nbr in embedding.neighbors_cw_order(w): + if is_on_outer_face(nbr) and not is_outer_face_nbr(w, nbr): + # There is a chord involving w + chords[w] += 1 + ready_to_pick.discard(w) + if nbr not in new_face_nodes: + # Also increase chord for the neighbor + # We only iterator over new_face_nodes + chords[nbr] += 1 + ready_to_pick.discard(nbr) + # Set the canonical ordering node and the list of contour neighbors + canonical_ordering[k] = (v, wp_wq) + + return canonical_ordering + + +def triangulate_face(embedding, v1, v2): + """Triangulates the face given by half edge (v, w) + + Parameters + ---------- + embedding : nx.PlanarEmbedding + v1 : node + The half-edge (v1, v2) belongs to the face that gets triangulated + v2 : node + """ + _, v3 = embedding.next_face_half_edge(v1, v2) + _, v4 = embedding.next_face_half_edge(v2, v3) + if v1 in (v2, v3): + # The component has less than 3 nodes + return + while v1 != v4: + # Add edge if not already present on other side + if embedding.has_edge(v1, v3): + # Cannot triangulate at this position + v1, v2, v3 = v2, v3, v4 + else: + # Add edge for triangulation + embedding.add_half_edge(v1, v3, ccw=v2) + embedding.add_half_edge(v3, v1, cw=v2) + v1, v2, v3 = v1, v3, v4 + # Get next node + _, v4 = embedding.next_face_half_edge(v2, v3) + + +def triangulate_embedding(embedding, fully_triangulate=True): + """Triangulates the embedding. + + Traverses faces of the embedding and adds edges to a copy of the + embedding to triangulate it. + The method also ensures that the resulting graph is 2-connected by adding + edges if the same vertex is contained twice on a path around a face. + + Parameters + ---------- + embedding : nx.PlanarEmbedding + The input graph must contain at least 3 nodes. + + fully_triangulate : bool + If set to False the face with the most nodes is chooses as outer face. + This outer face does not get triangulated. + + Returns + ------- + (embedding, outer_face) : (nx.PlanarEmbedding, list) tuple + The element `embedding` is a new embedding containing all edges from + the input embedding and the additional edges to triangulate the graph. + The element `outer_face` is a list of nodes that lie on the outer face. + If the graph is fully triangulated these are three arbitrary connected + nodes. + + """ + if len(embedding.nodes) <= 1: + return embedding, list(embedding.nodes) + embedding = nx.PlanarEmbedding(embedding) + + # Get a list with a node for each connected component + component_nodes = [next(iter(x)) for x in nx.connected_components(embedding)] + + # 1. Make graph a single component (add edge between components) + for i in range(len(component_nodes) - 1): + v1 = component_nodes[i] + v2 = component_nodes[i + 1] + embedding.connect_components(v1, v2) + + # 2. Calculate faces, ensure 2-connectedness and determine outer face + outer_face = [] # A face with the most number of nodes + face_list = [] + edges_visited = set() # Used to keep track of already visited faces + for v in embedding.nodes(): + for w in embedding.neighbors_cw_order(v): + new_face = make_bi_connected(embedding, v, w, edges_visited) + if new_face: + # Found a new face + face_list.append(new_face) + if len(new_face) > len(outer_face): + # The face is a candidate to be the outer face + outer_face = new_face + + # 3. Triangulate (internal) faces + for face in face_list: + if face is not outer_face or fully_triangulate: + # Triangulate this face + triangulate_face(embedding, face[0], face[1]) + + if fully_triangulate: + v1 = outer_face[0] + v2 = outer_face[1] + v3 = embedding[v2][v1]["ccw"] + outer_face = [v1, v2, v3] + + return embedding, outer_face + + +def make_bi_connected(embedding, starting_node, outgoing_node, edges_counted): + """Triangulate a face and make it 2-connected + + This method also adds all edges on the face to `edges_counted`. + + Parameters + ---------- + embedding: nx.PlanarEmbedding + The embedding that defines the faces + starting_node : node + A node on the face + outgoing_node : node + A node such that the half edge (starting_node, outgoing_node) belongs + to the face + edges_counted: set + Set of all half-edges that belong to a face that have been visited + + Returns + ------- + face_nodes: list + A list of all nodes at the border of this face + """ + + # Check if the face has already been calculated + if (starting_node, outgoing_node) in edges_counted: + # This face was already counted + return [] + edges_counted.add((starting_node, outgoing_node)) + + # Add all edges to edges_counted which have this face to their left + v1 = starting_node + v2 = outgoing_node + face_list = [starting_node] # List of nodes around the face + face_set = set(face_list) # Set for faster queries + _, v3 = embedding.next_face_half_edge(v1, v2) + + # Move the nodes v1, v2, v3 around the face: + while v2 != starting_node or v3 != outgoing_node: + if v1 == v2: + raise nx.NetworkXException("Invalid half-edge") + # cycle is not completed yet + if v2 in face_set: + # v2 encountered twice: Add edge to ensure 2-connectedness + embedding.add_half_edge(v1, v3, ccw=v2) + embedding.add_half_edge(v3, v1, cw=v2) + edges_counted.add((v2, v3)) + edges_counted.add((v3, v1)) + v2 = v1 + else: + face_set.add(v2) + face_list.append(v2) + + # set next edge + v1 = v2 + v2, v3 = embedding.next_face_half_edge(v2, v3) + + # remember that this edge has been counted + edges_counted.add((v1, v2)) + + return face_list diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/planarity.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/planarity.py new file mode 100644 index 0000000..41778f8 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/planarity.py @@ -0,0 +1,1463 @@ +from collections import defaultdict +from copy import deepcopy + +import networkx as nx + +__all__ = ["check_planarity", "is_planar", "PlanarEmbedding"] + + +@nx._dispatchable +def is_planar(G): + """Returns True if and only if `G` is planar. + + A graph is *planar* iff it can be drawn in a plane without + any edge intersections. + + Parameters + ---------- + G : NetworkX graph + + Returns + ------- + bool + Whether the graph is planar. + + Examples + -------- + >>> G = nx.Graph([(0, 1), (0, 2)]) + >>> nx.is_planar(G) + True + >>> nx.is_planar(nx.complete_graph(5)) + False + + See Also + -------- + check_planarity : + Check if graph is planar *and* return a `PlanarEmbedding` instance if True. + """ + + return check_planarity(G, counterexample=False)[0] + + +@nx._dispatchable(returns_graph=True) +def check_planarity(G, counterexample=False): + """Check if a graph is planar and return a counterexample or an embedding. + + A graph is planar iff it can be drawn in a plane without + any edge intersections. + + Parameters + ---------- + G : NetworkX graph + counterexample : bool + A Kuratowski subgraph (to proof non planarity) is only returned if set + to true. + + Returns + ------- + (is_planar, certificate) : (bool, NetworkX graph) tuple + is_planar is true if the graph is planar. + If the graph is planar `certificate` is a PlanarEmbedding + otherwise it is a Kuratowski subgraph. + + Examples + -------- + >>> G = nx.Graph([(0, 1), (0, 2)]) + >>> is_planar, P = nx.check_planarity(G) + >>> print(is_planar) + True + + When `G` is planar, a `PlanarEmbedding` instance is returned: + + >>> P.get_data() + {0: [1, 2], 1: [0], 2: [0]} + + Notes + ----- + A (combinatorial) embedding consists of cyclic orderings of the incident + edges at each vertex. Given such an embedding there are multiple approaches + discussed in literature to drawing the graph (subject to various + constraints, e.g. integer coordinates), see e.g. [2]. + + The planarity check algorithm and extraction of the combinatorial embedding + is based on the Left-Right Planarity Test [1]. + + A counterexample is only generated if the corresponding parameter is set, + because the complexity of the counterexample generation is higher. + + See also + -------- + is_planar : + Check for planarity without creating a `PlanarEmbedding` or counterexample. + + References + ---------- + .. [1] Ulrik Brandes: + The Left-Right Planarity Test + 2009 + http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.217.9208 + .. [2] Takao Nishizeki, Md Saidur Rahman: + Planar graph drawing + Lecture Notes Series on Computing: Volume 12 + 2004 + """ + + planarity_state = LRPlanarity(G) + embedding = planarity_state.lr_planarity() + if embedding is None: + # graph is not planar + if counterexample: + return False, get_counterexample(G) + else: + return False, None + else: + # graph is planar + return True, embedding + + +@nx._dispatchable(returns_graph=True) +def check_planarity_recursive(G, counterexample=False): + """Recursive version of :meth:`check_planarity`.""" + planarity_state = LRPlanarity(G) + embedding = planarity_state.lr_planarity_recursive() + if embedding is None: + # graph is not planar + if counterexample: + return False, get_counterexample_recursive(G) + else: + return False, None + else: + # graph is planar + return True, embedding + + +@nx._dispatchable(returns_graph=True) +def get_counterexample(G): + """Obtains a Kuratowski subgraph. + + Raises nx.NetworkXException if G is planar. + + The function removes edges such that the graph is still not planar. + At some point the removal of any edge would make the graph planar. + This subgraph must be a Kuratowski subgraph. + + Parameters + ---------- + G : NetworkX graph + + Returns + ------- + subgraph : NetworkX graph + A Kuratowski subgraph that proves that G is not planar. + + """ + # copy graph + G = nx.Graph(G) + + if check_planarity(G)[0]: + raise nx.NetworkXException("G is planar - no counter example.") + + # find Kuratowski subgraph + subgraph = nx.Graph() + for u in G: + nbrs = list(G[u]) + for v in nbrs: + G.remove_edge(u, v) + if check_planarity(G)[0]: + G.add_edge(u, v) + subgraph.add_edge(u, v) + + return subgraph + + +@nx._dispatchable(returns_graph=True) +def get_counterexample_recursive(G): + """Recursive version of :meth:`get_counterexample`.""" + + # copy graph + G = nx.Graph(G) + + if check_planarity_recursive(G)[0]: + raise nx.NetworkXException("G is planar - no counter example.") + + # find Kuratowski subgraph + subgraph = nx.Graph() + for u in G: + nbrs = list(G[u]) + for v in nbrs: + G.remove_edge(u, v) + if check_planarity_recursive(G)[0]: + G.add_edge(u, v) + subgraph.add_edge(u, v) + + return subgraph + + +class Interval: + """Represents a set of return edges. + + All return edges in an interval induce a same constraint on the contained + edges, which means that all edges must either have a left orientation or + all edges must have a right orientation. + """ + + def __init__(self, low=None, high=None): + self.low = low + self.high = high + + def empty(self): + """Check if the interval is empty""" + return self.low is None and self.high is None + + def copy(self): + """Returns a copy of this interval""" + return Interval(self.low, self.high) + + def conflicting(self, b, planarity_state): + """Returns True if interval I conflicts with edge b""" + return ( + not self.empty() + and planarity_state.lowpt[self.high] > planarity_state.lowpt[b] + ) + + +class ConflictPair: + """Represents a different constraint between two intervals. + + The edges in the left interval must have a different orientation than + the one in the right interval. + """ + + def __init__(self, left=Interval(), right=Interval()): + self.left = left + self.right = right + + def swap(self): + """Swap left and right intervals""" + temp = self.left + self.left = self.right + self.right = temp + + def lowest(self, planarity_state): + """Returns the lowest lowpoint of a conflict pair""" + if self.left.empty(): + return planarity_state.lowpt[self.right.low] + if self.right.empty(): + return planarity_state.lowpt[self.left.low] + return min( + planarity_state.lowpt[self.left.low], planarity_state.lowpt[self.right.low] + ) + + +def top_of_stack(l): + """Returns the element on top of the stack.""" + if not l: + return None + return l[-1] + + +class LRPlanarity: + """A class to maintain the state during planarity check.""" + + __slots__ = [ + "G", + "roots", + "height", + "lowpt", + "lowpt2", + "nesting_depth", + "parent_edge", + "DG", + "adjs", + "ordered_adjs", + "ref", + "side", + "S", + "stack_bottom", + "lowpt_edge", + "left_ref", + "right_ref", + "embedding", + ] + + def __init__(self, G): + # copy G without adding self-loops + self.G = nx.Graph() + self.G.add_nodes_from(G.nodes) + for e in G.edges: + if e[0] != e[1]: + self.G.add_edge(e[0], e[1]) + + self.roots = [] + + # distance from tree root + self.height = defaultdict(lambda: None) + + self.lowpt = {} # height of lowest return point of an edge + self.lowpt2 = {} # height of second lowest return point + self.nesting_depth = {} # for nesting order + + # None -> missing edge + self.parent_edge = defaultdict(lambda: None) + + # oriented DFS graph + self.DG = nx.DiGraph() + self.DG.add_nodes_from(G.nodes) + + self.adjs = {} + self.ordered_adjs = {} + + self.ref = defaultdict(lambda: None) + self.side = defaultdict(lambda: 1) + + # stack of conflict pairs + self.S = [] + self.stack_bottom = {} + self.lowpt_edge = {} + + self.left_ref = {} + self.right_ref = {} + + self.embedding = PlanarEmbedding() + + def lr_planarity(self): + """Execute the LR planarity test. + + Returns + ------- + embedding : dict + If the graph is planar an embedding is returned. Otherwise None. + """ + if self.G.order() > 2 and self.G.size() > 3 * self.G.order() - 6: + # graph is not planar + return None + + # make adjacency lists for dfs + for v in self.G: + self.adjs[v] = list(self.G[v]) + + # orientation of the graph by depth first search traversal + for v in self.G: + if self.height[v] is None: + self.height[v] = 0 + self.roots.append(v) + self.dfs_orientation(v) + + # Free no longer used variables + self.G = None + self.lowpt2 = None + self.adjs = None + + # testing + for v in self.DG: # sort the adjacency lists by nesting depth + # note: this sorting leads to non linear time + self.ordered_adjs[v] = sorted( + self.DG[v], key=lambda x: self.nesting_depth[(v, x)] + ) + for v in self.roots: + if not self.dfs_testing(v): + return None + + # Free no longer used variables + self.height = None + self.lowpt = None + self.S = None + self.stack_bottom = None + self.lowpt_edge = None + + for e in self.DG.edges: + self.nesting_depth[e] = self.sign(e) * self.nesting_depth[e] + + self.embedding.add_nodes_from(self.DG.nodes) + for v in self.DG: + # sort the adjacency lists again + self.ordered_adjs[v] = sorted( + self.DG[v], key=lambda x: self.nesting_depth[(v, x)] + ) + # initialize the embedding + previous_node = None + for w in self.ordered_adjs[v]: + self.embedding.add_half_edge(v, w, ccw=previous_node) + previous_node = w + + # Free no longer used variables + self.DG = None + self.nesting_depth = None + self.ref = None + + # compute the complete embedding + for v in self.roots: + self.dfs_embedding(v) + + # Free no longer used variables + self.roots = None + self.parent_edge = None + self.ordered_adjs = None + self.left_ref = None + self.right_ref = None + self.side = None + + return self.embedding + + def lr_planarity_recursive(self): + """Recursive version of :meth:`lr_planarity`.""" + if self.G.order() > 2 and self.G.size() > 3 * self.G.order() - 6: + # graph is not planar + return None + + # orientation of the graph by depth first search traversal + for v in self.G: + if self.height[v] is None: + self.height[v] = 0 + self.roots.append(v) + self.dfs_orientation_recursive(v) + + # Free no longer used variable + self.G = None + + # testing + for v in self.DG: # sort the adjacency lists by nesting depth + # note: this sorting leads to non linear time + self.ordered_adjs[v] = sorted( + self.DG[v], key=lambda x: self.nesting_depth[(v, x)] + ) + for v in self.roots: + if not self.dfs_testing_recursive(v): + return None + + for e in self.DG.edges: + self.nesting_depth[e] = self.sign_recursive(e) * self.nesting_depth[e] + + self.embedding.add_nodes_from(self.DG.nodes) + for v in self.DG: + # sort the adjacency lists again + self.ordered_adjs[v] = sorted( + self.DG[v], key=lambda x: self.nesting_depth[(v, x)] + ) + # initialize the embedding + previous_node = None + for w in self.ordered_adjs[v]: + self.embedding.add_half_edge(v, w, ccw=previous_node) + previous_node = w + + # compute the complete embedding + for v in self.roots: + self.dfs_embedding_recursive(v) + + return self.embedding + + def dfs_orientation(self, v): + """Orient the graph by DFS, compute lowpoints and nesting order.""" + # the recursion stack + dfs_stack = [v] + # index of next edge to handle in adjacency list of each node + ind = defaultdict(lambda: 0) + # boolean to indicate whether to skip the initial work for an edge + skip_init = defaultdict(lambda: False) + + while dfs_stack: + v = dfs_stack.pop() + e = self.parent_edge[v] + + for w in self.adjs[v][ind[v] :]: + vw = (v, w) + + if not skip_init[vw]: + if (v, w) in self.DG.edges or (w, v) in self.DG.edges: + ind[v] += 1 + continue # the edge was already oriented + + self.DG.add_edge(v, w) # orient the edge + + self.lowpt[vw] = self.height[v] + self.lowpt2[vw] = self.height[v] + if self.height[w] is None: # (v, w) is a tree edge + self.parent_edge[w] = vw + self.height[w] = self.height[v] + 1 + + dfs_stack.append(v) # revisit v after finishing w + dfs_stack.append(w) # visit w next + skip_init[vw] = True # don't redo this block + break # handle next node in dfs_stack (i.e. w) + else: # (v, w) is a back edge + self.lowpt[vw] = self.height[w] + + # determine nesting graph + self.nesting_depth[vw] = 2 * self.lowpt[vw] + if self.lowpt2[vw] < self.height[v]: # chordal + self.nesting_depth[vw] += 1 + + # update lowpoints of parent edge e + if e is not None: + if self.lowpt[vw] < self.lowpt[e]: + self.lowpt2[e] = min(self.lowpt[e], self.lowpt2[vw]) + self.lowpt[e] = self.lowpt[vw] + elif self.lowpt[vw] > self.lowpt[e]: + self.lowpt2[e] = min(self.lowpt2[e], self.lowpt[vw]) + else: + self.lowpt2[e] = min(self.lowpt2[e], self.lowpt2[vw]) + + ind[v] += 1 + + def dfs_orientation_recursive(self, v): + """Recursive version of :meth:`dfs_orientation`.""" + e = self.parent_edge[v] + for w in self.G[v]: + if (v, w) in self.DG.edges or (w, v) in self.DG.edges: + continue # the edge was already oriented + vw = (v, w) + self.DG.add_edge(v, w) # orient the edge + + self.lowpt[vw] = self.height[v] + self.lowpt2[vw] = self.height[v] + if self.height[w] is None: # (v, w) is a tree edge + self.parent_edge[w] = vw + self.height[w] = self.height[v] + 1 + self.dfs_orientation_recursive(w) + else: # (v, w) is a back edge + self.lowpt[vw] = self.height[w] + + # determine nesting graph + self.nesting_depth[vw] = 2 * self.lowpt[vw] + if self.lowpt2[vw] < self.height[v]: # chordal + self.nesting_depth[vw] += 1 + + # update lowpoints of parent edge e + if e is not None: + if self.lowpt[vw] < self.lowpt[e]: + self.lowpt2[e] = min(self.lowpt[e], self.lowpt2[vw]) + self.lowpt[e] = self.lowpt[vw] + elif self.lowpt[vw] > self.lowpt[e]: + self.lowpt2[e] = min(self.lowpt2[e], self.lowpt[vw]) + else: + self.lowpt2[e] = min(self.lowpt2[e], self.lowpt2[vw]) + + def dfs_testing(self, v): + """Test for LR partition.""" + # the recursion stack + dfs_stack = [v] + # index of next edge to handle in adjacency list of each node + ind = defaultdict(lambda: 0) + # boolean to indicate whether to skip the initial work for an edge + skip_init = defaultdict(lambda: False) + + while dfs_stack: + v = dfs_stack.pop() + e = self.parent_edge[v] + # to indicate whether to skip the final block after the for loop + skip_final = False + + for w in self.ordered_adjs[v][ind[v] :]: + ei = (v, w) + + if not skip_init[ei]: + self.stack_bottom[ei] = top_of_stack(self.S) + + if ei == self.parent_edge[w]: # tree edge + dfs_stack.append(v) # revisit v after finishing w + dfs_stack.append(w) # visit w next + skip_init[ei] = True # don't redo this block + skip_final = True # skip final work after breaking + break # handle next node in dfs_stack (i.e. w) + else: # back edge + self.lowpt_edge[ei] = ei + self.S.append(ConflictPair(right=Interval(ei, ei))) + + # integrate new return edges + if self.lowpt[ei] < self.height[v]: + if w == self.ordered_adjs[v][0]: # e_i has return edge + self.lowpt_edge[e] = self.lowpt_edge[ei] + else: # add constraints of e_i + if not self.add_constraints(ei, e): + # graph is not planar + return False + + ind[v] += 1 + + if not skip_final: + # remove back edges returning to parent + if e is not None: # v isn't root + self.remove_back_edges(e) + + return True + + def dfs_testing_recursive(self, v): + """Recursive version of :meth:`dfs_testing`.""" + e = self.parent_edge[v] + for w in self.ordered_adjs[v]: + ei = (v, w) + self.stack_bottom[ei] = top_of_stack(self.S) + if ei == self.parent_edge[w]: # tree edge + if not self.dfs_testing_recursive(w): + return False + else: # back edge + self.lowpt_edge[ei] = ei + self.S.append(ConflictPair(right=Interval(ei, ei))) + + # integrate new return edges + if self.lowpt[ei] < self.height[v]: + if w == self.ordered_adjs[v][0]: # e_i has return edge + self.lowpt_edge[e] = self.lowpt_edge[ei] + else: # add constraints of e_i + if not self.add_constraints(ei, e): + # graph is not planar + return False + + # remove back edges returning to parent + if e is not None: # v isn't root + self.remove_back_edges(e) + return True + + def add_constraints(self, ei, e): + P = ConflictPair() + # merge return edges of e_i into P.right + while True: + Q = self.S.pop() + if not Q.left.empty(): + Q.swap() + if not Q.left.empty(): # not planar + return False + if self.lowpt[Q.right.low] > self.lowpt[e]: + # merge intervals + if P.right.empty(): # topmost interval + P.right = Q.right.copy() + else: + self.ref[P.right.low] = Q.right.high + P.right.low = Q.right.low + else: # align + self.ref[Q.right.low] = self.lowpt_edge[e] + if top_of_stack(self.S) == self.stack_bottom[ei]: + break + # merge conflicting return edges of e_1,...,e_i-1 into P.L + while top_of_stack(self.S).left.conflicting(ei, self) or top_of_stack( + self.S + ).right.conflicting(ei, self): + Q = self.S.pop() + if Q.right.conflicting(ei, self): + Q.swap() + if Q.right.conflicting(ei, self): # not planar + return False + # merge interval below lowpt(e_i) into P.R + self.ref[P.right.low] = Q.right.high + if Q.right.low is not None: + P.right.low = Q.right.low + + if P.left.empty(): # topmost interval + P.left = Q.left.copy() + else: + self.ref[P.left.low] = Q.left.high + P.left.low = Q.left.low + + if not (P.left.empty() and P.right.empty()): + self.S.append(P) + return True + + def remove_back_edges(self, e): + u = e[0] + # trim back edges ending at parent u + # drop entire conflict pairs + while self.S and top_of_stack(self.S).lowest(self) == self.height[u]: + P = self.S.pop() + if P.left.low is not None: + self.side[P.left.low] = -1 + + if self.S: # one more conflict pair to consider + P = self.S.pop() + # trim left interval + while P.left.high is not None and P.left.high[1] == u: + P.left.high = self.ref[P.left.high] + if P.left.high is None and P.left.low is not None: + # just emptied + self.ref[P.left.low] = P.right.low + self.side[P.left.low] = -1 + P.left.low = None + # trim right interval + while P.right.high is not None and P.right.high[1] == u: + P.right.high = self.ref[P.right.high] + if P.right.high is None and P.right.low is not None: + # just emptied + self.ref[P.right.low] = P.left.low + self.side[P.right.low] = -1 + P.right.low = None + self.S.append(P) + + # side of e is side of a highest return edge + if self.lowpt[e] < self.height[u]: # e has return edge + hl = top_of_stack(self.S).left.high + hr = top_of_stack(self.S).right.high + + if hl is not None and (hr is None or self.lowpt[hl] > self.lowpt[hr]): + self.ref[e] = hl + else: + self.ref[e] = hr + + def dfs_embedding(self, v): + """Completes the embedding.""" + # the recursion stack + dfs_stack = [v] + # index of next edge to handle in adjacency list of each node + ind = defaultdict(lambda: 0) + + while dfs_stack: + v = dfs_stack.pop() + + for w in self.ordered_adjs[v][ind[v] :]: + ind[v] += 1 + ei = (v, w) + + if ei == self.parent_edge[w]: # tree edge + self.embedding.add_half_edge_first(w, v) + self.left_ref[v] = w + self.right_ref[v] = w + + dfs_stack.append(v) # revisit v after finishing w + dfs_stack.append(w) # visit w next + break # handle next node in dfs_stack (i.e. w) + else: # back edge + if self.side[ei] == 1: + self.embedding.add_half_edge(w, v, ccw=self.right_ref[w]) + else: + self.embedding.add_half_edge(w, v, cw=self.left_ref[w]) + self.left_ref[w] = v + + def dfs_embedding_recursive(self, v): + """Recursive version of :meth:`dfs_embedding`.""" + for w in self.ordered_adjs[v]: + ei = (v, w) + if ei == self.parent_edge[w]: # tree edge + self.embedding.add_half_edge_first(w, v) + self.left_ref[v] = w + self.right_ref[v] = w + self.dfs_embedding_recursive(w) + else: # back edge + if self.side[ei] == 1: + # place v directly after right_ref[w] in embed. list of w + self.embedding.add_half_edge(w, v, ccw=self.right_ref[w]) + else: + # place v directly before left_ref[w] in embed. list of w + self.embedding.add_half_edge(w, v, cw=self.left_ref[w]) + self.left_ref[w] = v + + def sign(self, e): + """Resolve the relative side of an edge to the absolute side.""" + # the recursion stack + dfs_stack = [e] + # dict to remember reference edges + old_ref = defaultdict(lambda: None) + + while dfs_stack: + e = dfs_stack.pop() + + if self.ref[e] is not None: + dfs_stack.append(e) # revisit e after finishing self.ref[e] + dfs_stack.append(self.ref[e]) # visit self.ref[e] next + old_ref[e] = self.ref[e] # remember value of self.ref[e] + self.ref[e] = None + else: + self.side[e] *= self.side[old_ref[e]] + + return self.side[e] + + def sign_recursive(self, e): + """Recursive version of :meth:`sign`.""" + if self.ref[e] is not None: + self.side[e] = self.side[e] * self.sign_recursive(self.ref[e]) + self.ref[e] = None + return self.side[e] + + +class PlanarEmbedding(nx.DiGraph): + """Represents a planar graph with its planar embedding. + + The planar embedding is given by a `combinatorial embedding + `_. + + .. note:: `check_planarity` is the preferred way to check if a graph is planar. + + **Neighbor ordering:** + + In comparison to a usual graph structure, the embedding also stores the + order of all neighbors for every vertex. + The order of the neighbors can be given in clockwise (cw) direction or + counterclockwise (ccw) direction. This order is stored as edge attributes + in the underlying directed graph. For the edge (u, v) the edge attribute + 'cw' is set to the neighbor of u that follows immediately after v in + clockwise direction. + + In order for a PlanarEmbedding to be valid it must fulfill multiple + conditions. It is possible to check if these conditions are fulfilled with + the method :meth:`check_structure`. + The conditions are: + + * Edges must go in both directions (because the edge attributes differ) + * Every edge must have a 'cw' and 'ccw' attribute which corresponds to a + correct planar embedding. + + As long as a PlanarEmbedding is invalid only the following methods should + be called: + + * :meth:`add_half_edge` + * :meth:`connect_components` + + Even though the graph is a subclass of nx.DiGraph, it can still be used + for algorithms that require undirected graphs, because the method + :meth:`is_directed` is overridden. This is possible, because a valid + PlanarGraph must have edges in both directions. + + **Half edges:** + + In methods like `add_half_edge` the term "half-edge" is used, which is + a term that is used in `doubly connected edge lists + `_. It is used + to emphasize that the edge is only in one direction and there exists + another half-edge in the opposite direction. + While conventional edges always have two faces (including outer face) next + to them, it is possible to assign each half-edge *exactly one* face. + For a half-edge (u, v) that is oriented such that u is below v then the + face that belongs to (u, v) is to the right of this half-edge. + + See Also + -------- + is_planar : + Preferred way to check if an existing graph is planar. + + check_planarity : + A convenient way to create a `PlanarEmbedding`. If not planar, + it returns a subgraph that shows this. + + Examples + -------- + + Create an embedding of a star graph (compare `nx.star_graph(3)`): + + >>> G = nx.PlanarEmbedding() + >>> G.add_half_edge(0, 1) + >>> G.add_half_edge(0, 2, ccw=1) + >>> G.add_half_edge(0, 3, ccw=2) + >>> G.add_half_edge(1, 0) + >>> G.add_half_edge(2, 0) + >>> G.add_half_edge(3, 0) + + Alternatively the same embedding can also be defined in counterclockwise + orientation. The following results in exactly the same PlanarEmbedding: + + >>> G = nx.PlanarEmbedding() + >>> G.add_half_edge(0, 1) + >>> G.add_half_edge(0, 3, cw=1) + >>> G.add_half_edge(0, 2, cw=3) + >>> G.add_half_edge(1, 0) + >>> G.add_half_edge(2, 0) + >>> G.add_half_edge(3, 0) + + After creating a graph, it is possible to validate that the PlanarEmbedding + object is correct: + + >>> G.check_structure() + + """ + + def __init__(self, incoming_graph_data=None, **attr): + super().__init__(incoming_graph_data=incoming_graph_data, **attr) + self.add_edge = self.__forbidden + self.add_edges_from = self.__forbidden + self.add_weighted_edges_from = self.__forbidden + + def __forbidden(self, *args, **kwargs): + """Forbidden operation + + Any edge additions to a PlanarEmbedding should be done using + method `add_half_edge`. + """ + raise NotImplementedError( + "Use `add_half_edge` method to add edges to a PlanarEmbedding." + ) + + def get_data(self): + """Converts the adjacency structure into a better readable structure. + + Returns + ------- + embedding : dict + A dict mapping all nodes to a list of neighbors sorted in + clockwise order. + + See Also + -------- + set_data + + """ + embedding = {} + for v in self: + embedding[v] = list(self.neighbors_cw_order(v)) + return embedding + + def set_data(self, data): + """Inserts edges according to given sorted neighbor list. + + The input format is the same as the output format of get_data(). + + Parameters + ---------- + data : dict + A dict mapping all nodes to a list of neighbors sorted in + clockwise order. + + See Also + -------- + get_data + + """ + for v in data: + ref = None + for w in reversed(data[v]): + self.add_half_edge(v, w, cw=ref) + ref = w + + def remove_node(self, n): + """Remove node n. + + Removes the node n and all adjacent edges, updating the + PlanarEmbedding to account for any resulting edge removal. + Attempting to remove a non-existent node will raise an exception. + + Parameters + ---------- + n : node + A node in the graph + + Raises + ------ + NetworkXError + If n is not in the graph. + + See Also + -------- + remove_nodes_from + + """ + try: + for u in self._pred[n]: + succs_u = self._succ[u] + un_cw = succs_u[n]["cw"] + un_ccw = succs_u[n]["ccw"] + del succs_u[n] + del self._pred[u][n] + if n != un_cw: + succs_u[un_cw]["ccw"] = un_ccw + succs_u[un_ccw]["cw"] = un_cw + del self._node[n] + del self._succ[n] + del self._pred[n] + except KeyError as err: # NetworkXError if n not in self + raise nx.NetworkXError( + f"The node {n} is not in the planar embedding." + ) from err + nx._clear_cache(self) + + def remove_nodes_from(self, nodes): + """Remove multiple nodes. + + Parameters + ---------- + nodes : iterable container + A container of nodes (list, dict, set, etc.). If a node + in the container is not in the graph it is silently ignored. + + See Also + -------- + remove_node + + Notes + ----- + When removing nodes from an iterator over the graph you are changing, + a `RuntimeError` will be raised with message: + `RuntimeError: dictionary changed size during iteration`. This + happens when the graph's underlying dictionary is modified during + iteration. To avoid this error, evaluate the iterator into a separate + object, e.g. by using `list(iterator_of_nodes)`, and pass this + object to `G.remove_nodes_from`. + + """ + for n in nodes: + if n in self._node: + self.remove_node(n) + # silently skip non-existing nodes + + def neighbors_cw_order(self, v): + """Generator for the neighbors of v in clockwise order. + + Parameters + ---------- + v : node + + Yields + ------ + node + + """ + succs = self._succ[v] + if not succs: + # v has no neighbors + return + start_node = next(reversed(succs)) + yield start_node + current_node = succs[start_node]["cw"] + while start_node != current_node: + yield current_node + current_node = succs[current_node]["cw"] + + def add_half_edge(self, start_node, end_node, *, cw=None, ccw=None): + """Adds a half-edge from `start_node` to `end_node`. + + If the half-edge is not the first one out of `start_node`, a reference + node must be provided either in the clockwise (parameter `cw`) or in + the counterclockwise (parameter `ccw`) direction. Only one of `cw`/`ccw` + can be specified (or neither in the case of the first edge). + Note that specifying a reference in the clockwise (`cw`) direction means + inserting the new edge in the first counterclockwise position with + respect to the reference (and vice-versa). + + Parameters + ---------- + start_node : node + Start node of inserted edge. + end_node : node + End node of inserted edge. + cw, ccw: node + End node of reference edge. + Omit or pass `None` if adding the first out-half-edge of `start_node`. + + + Raises + ------ + NetworkXException + If the `cw` or `ccw` node is not a successor of `start_node`. + If `start_node` has successors, but neither `cw` or `ccw` is provided. + If both `cw` and `ccw` are specified. + + See Also + -------- + connect_components + """ + + succs = self._succ.get(start_node) + if succs: + # there is already some edge out of start_node + leftmost_nbr = next(reversed(self._succ[start_node])) + if cw is not None: + if cw not in succs: + raise nx.NetworkXError("Invalid clockwise reference node.") + if ccw is not None: + raise nx.NetworkXError("Only one of cw/ccw can be specified.") + ref_ccw = succs[cw]["ccw"] + super().add_edge(start_node, end_node, cw=cw, ccw=ref_ccw) + succs[ref_ccw]["cw"] = end_node + succs[cw]["ccw"] = end_node + # when (cw == leftmost_nbr), the newly added neighbor is + # already at the end of dict self._succ[start_node] and + # takes the place of the former leftmost_nbr + move_leftmost_nbr_to_end = cw != leftmost_nbr + elif ccw is not None: + if ccw not in succs: + raise nx.NetworkXError("Invalid counterclockwise reference node.") + ref_cw = succs[ccw]["cw"] + super().add_edge(start_node, end_node, cw=ref_cw, ccw=ccw) + succs[ref_cw]["ccw"] = end_node + succs[ccw]["cw"] = end_node + move_leftmost_nbr_to_end = True + else: + raise nx.NetworkXError( + "Node already has out-half-edge(s), either cw or ccw reference node required." + ) + if move_leftmost_nbr_to_end: + # LRPlanarity (via self.add_half_edge_first()) requires that + # we keep track of the leftmost neighbor, which we accomplish + # by keeping it as the last key in dict self._succ[start_node] + succs[leftmost_nbr] = succs.pop(leftmost_nbr) + + else: + if cw is not None or ccw is not None: + raise nx.NetworkXError("Invalid reference node.") + # adding the first edge out of start_node + super().add_edge(start_node, end_node, ccw=end_node, cw=end_node) + + def check_structure(self): + """Runs without exceptions if this object is valid. + + Checks that the following properties are fulfilled: + + * Edges go in both directions (because the edge attributes differ). + * Every edge has a 'cw' and 'ccw' attribute which corresponds to a + correct planar embedding. + + Running this method verifies that the underlying Graph must be planar. + + Raises + ------ + NetworkXException + This exception is raised with a short explanation if the + PlanarEmbedding is invalid. + """ + # Check fundamental structure + for v in self: + try: + sorted_nbrs = set(self.neighbors_cw_order(v)) + except KeyError as err: + msg = f"Bad embedding. Missing orientation for a neighbor of {v}" + raise nx.NetworkXException(msg) from err + + unsorted_nbrs = set(self[v]) + if sorted_nbrs != unsorted_nbrs: + msg = "Bad embedding. Edge orientations not set correctly." + raise nx.NetworkXException(msg) + for w in self[v]: + # Check if opposite half-edge exists + if not self.has_edge(w, v): + msg = "Bad embedding. Opposite half-edge is missing." + raise nx.NetworkXException(msg) + + # Check planarity + counted_half_edges = set() + for component in nx.connected_components(self): + if len(component) == 1: + # Don't need to check single node component + continue + num_nodes = len(component) + num_half_edges = 0 + num_faces = 0 + for v in component: + for w in self.neighbors_cw_order(v): + num_half_edges += 1 + if (v, w) not in counted_half_edges: + # We encountered a new face + num_faces += 1 + # Mark all half-edges belonging to this face + self.traverse_face(v, w, counted_half_edges) + num_edges = num_half_edges // 2 # num_half_edges is even + if num_nodes - num_edges + num_faces != 2: + # The result does not match Euler's formula + msg = "Bad embedding. The graph does not match Euler's formula" + raise nx.NetworkXException(msg) + + def add_half_edge_ccw(self, start_node, end_node, reference_neighbor): + """Adds a half-edge from start_node to end_node. + + The half-edge is added counter clockwise next to the existing half-edge + (start_node, reference_neighbor). + + Parameters + ---------- + start_node : node + Start node of inserted edge. + end_node : node + End node of inserted edge. + reference_neighbor: node + End node of reference edge. + + Raises + ------ + NetworkXException + If the reference_neighbor does not exist. + + See Also + -------- + add_half_edge + add_half_edge_cw + connect_components + + """ + self.add_half_edge(start_node, end_node, cw=reference_neighbor) + + def add_half_edge_cw(self, start_node, end_node, reference_neighbor): + """Adds a half-edge from start_node to end_node. + + The half-edge is added clockwise next to the existing half-edge + (start_node, reference_neighbor). + + Parameters + ---------- + start_node : node + Start node of inserted edge. + end_node : node + End node of inserted edge. + reference_neighbor: node + End node of reference edge. + + Raises + ------ + NetworkXException + If the reference_neighbor does not exist. + + See Also + -------- + add_half_edge + add_half_edge_ccw + connect_components + """ + self.add_half_edge(start_node, end_node, ccw=reference_neighbor) + + def remove_edge(self, u, v): + """Remove the edge between u and v. + + Parameters + ---------- + u, v : nodes + Remove the half-edges (u, v) and (v, u) and update the + edge ordering around the removed edge. + + Raises + ------ + NetworkXError + If there is not an edge between u and v. + + See Also + -------- + remove_edges_from : remove a collection of edges + """ + try: + succs_u = self._succ[u] + succs_v = self._succ[v] + uv_cw = succs_u[v]["cw"] + uv_ccw = succs_u[v]["ccw"] + vu_cw = succs_v[u]["cw"] + vu_ccw = succs_v[u]["ccw"] + del succs_u[v] + del self._pred[v][u] + del succs_v[u] + del self._pred[u][v] + if v != uv_cw: + succs_u[uv_cw]["ccw"] = uv_ccw + succs_u[uv_ccw]["cw"] = uv_cw + if u != vu_cw: + succs_v[vu_cw]["ccw"] = vu_ccw + succs_v[vu_ccw]["cw"] = vu_cw + except KeyError as err: + raise nx.NetworkXError( + f"The edge {u}-{v} is not in the planar embedding." + ) from err + nx._clear_cache(self) + + def remove_edges_from(self, ebunch): + """Remove all edges specified in ebunch. + + Parameters + ---------- + ebunch: list or container of edge tuples + Each pair of half-edges between the nodes given in the tuples + will be removed from the graph. The nodes can be passed as: + + - 2-tuples (u, v) half-edges (u, v) and (v, u). + - 3-tuples (u, v, k) where k is ignored. + + See Also + -------- + remove_edge : remove a single edge + + Notes + ----- + Will fail silently if an edge in ebunch is not in the graph. + + Examples + -------- + >>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> ebunch = [(1, 2), (2, 3)] + >>> G.remove_edges_from(ebunch) + """ + for e in ebunch: + u, v = e[:2] # ignore edge data + # assuming that the PlanarEmbedding is valid, if the half_edge + # (u, v) is in the graph, then so is half_edge (v, u) + if u in self._succ and v in self._succ[u]: + self.remove_edge(u, v) + + def connect_components(self, v, w): + """Adds half-edges for (v, w) and (w, v) at some position. + + This method should only be called if v and w are in different + components, or it might break the embedding. + This especially means that if `connect_components(v, w)` + is called it is not allowed to call `connect_components(w, v)` + afterwards. The neighbor orientations in both directions are + all set correctly after the first call. + + Parameters + ---------- + v : node + w : node + + See Also + -------- + add_half_edge + """ + if v in self._succ and self._succ[v]: + ref = next(reversed(self._succ[v])) + else: + ref = None + self.add_half_edge(v, w, cw=ref) + if w in self._succ and self._succ[w]: + ref = next(reversed(self._succ[w])) + else: + ref = None + self.add_half_edge(w, v, cw=ref) + + def add_half_edge_first(self, start_node, end_node): + """Add a half-edge and set end_node as start_node's leftmost neighbor. + + The new edge is inserted counterclockwise with respect to the current + leftmost neighbor, if there is one. + + Parameters + ---------- + start_node : node + end_node : node + + See Also + -------- + add_half_edge + connect_components + """ + succs = self._succ.get(start_node) + # the leftmost neighbor is the last entry in the + # self._succ[start_node] dict + leftmost_nbr = next(reversed(succs)) if succs else None + self.add_half_edge(start_node, end_node, cw=leftmost_nbr) + + def next_face_half_edge(self, v, w): + """Returns the following half-edge left of a face. + + Parameters + ---------- + v : node + w : node + + Returns + ------- + half-edge : tuple + """ + new_node = self[w][v]["ccw"] + return w, new_node + + def traverse_face(self, v, w, mark_half_edges=None): + """Returns nodes on the face that belong to the half-edge (v, w). + + The face that is traversed lies to the right of the half-edge (in an + orientation where v is below w). + + Optionally it is possible to pass a set to which all encountered half + edges are added. Before calling this method, this set must not include + any half-edges that belong to the face. + + Parameters + ---------- + v : node + Start node of half-edge. + w : node + End node of half-edge. + mark_half_edges: set, optional + Set to which all encountered half-edges are added. + + Returns + ------- + face : list + A list of nodes that lie on this face. + """ + if mark_half_edges is None: + mark_half_edges = set() + + face_nodes = [v] + mark_half_edges.add((v, w)) + prev_node = v + cur_node = w + # Last half-edge is (incoming_node, v) + incoming_node = self[v][w]["cw"] + + while cur_node != v or prev_node != incoming_node: + face_nodes.append(cur_node) + prev_node, cur_node = self.next_face_half_edge(prev_node, cur_node) + if (prev_node, cur_node) in mark_half_edges: + raise nx.NetworkXException("Bad planar embedding. Impossible face.") + mark_half_edges.add((prev_node, cur_node)) + + return face_nodes + + def is_directed(self): + """A valid PlanarEmbedding is undirected. + + All reverse edges are contained, i.e. for every existing + half-edge (v, w) the half-edge in the opposite direction (w, v) is also + contained. + """ + return False + + def copy(self, as_view=False): + if as_view is True: + return nx.graphviews.generic_graph_view(self) + G = self.__class__() + G.graph.update(self.graph) + G.add_nodes_from((n, d.copy()) for n, d in self._node.items()) + super(self.__class__, G).add_edges_from( + (u, v, datadict.copy()) + for u, nbrs in self._adj.items() + for v, datadict in nbrs.items() + ) + return G + + def to_undirected(self, reciprocal=False, as_view=False): + """ + Returns a non-embedding undirected representation of the graph. + + This method strips the planar embedding information and provides + a simple undirected graph representation. While creating the undirected graph, + all edge attributes are retained except the ``"cw"`` and ``"ccw"`` attributes + which are removed from the edge data. Those attributes are specific to + the requirements of planar embeddings. + + Parameters + ---------- + reciprocal : bool (optional) + Not supported for PlanarEmbedding. This parameter raises an exception + if used. All valid embeddings include reciprocal half-edges by definition, + making this parameter unnecessary. + as_view : bool (optional, default=False) + Not supported for PlanarEmbedding. This parameter raises an exception + if used. + + Returns + ------- + G : Graph + An undirected graph with the same name and nodes as the PlanarEmbedding. + Edges are included with their data, except for the ``"cw"`` and ``"ccw"`` + attributes, which are omitted. + + + Notes + ----- + - If edges exist in both directions ``(u, v)`` and ``(v, u)`` in the PlanarEmbedding, + attributes for the resulting undirected edge will be combined, excluding ``"cw"`` + and ``"ccw"``. + - A deep copy is made of the other edge attributes as well as the + node and graph attributes, ensuring independence of the resulting graph. + - Subclass-specific data structures used in the original graph may not transfer + to the undirected graph. The resulting graph will be of type ``nx.Graph``. + """ + + if reciprocal: + raise ValueError( + "'reciprocal=True' is not supported for PlanarEmbedding.\n" + "All valid embeddings include reciprocal half-edges by definition,\n" + "making this parameter unnecessary." + ) + + if as_view: + raise ValueError("'as_view=True' is not supported for PlanarEmbedding.") + + graph_class = self.to_undirected_class() + G = graph_class() + G.graph.update(deepcopy(self.graph)) + G.add_nodes_from((n, deepcopy(d)) for n, d in self._node.items()) + G.add_edges_from( + (u, v, {k: deepcopy(v) for k, v in d.items() if k not in {"cw", "ccw"}}) + for u, nbrs in self._adj.items() + for v, d in nbrs.items() + ) + return G diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/polynomials.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/polynomials.py new file mode 100644 index 0000000..7ebc755 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/polynomials.py @@ -0,0 +1,306 @@ +"""Provides algorithms supporting the computation of graph polynomials. + +Graph polynomials are polynomial-valued graph invariants that encode a wide +variety of structural information. Examples include the Tutte polynomial, +chromatic polynomial, characteristic polynomial, and matching polynomial. An +extensive treatment is provided in [1]_. + +For a simple example, the `~sympy.matrices.matrices.MatrixDeterminant.charpoly` +method can be used to compute the characteristic polynomial from the adjacency +matrix of a graph. Consider the complete graph ``K_4``: + +>>> import sympy +>>> x = sympy.Symbol("x") +>>> G = nx.complete_graph(4) +>>> A = nx.to_numpy_array(G, dtype=int) +>>> M = sympy.SparseMatrix(A) +>>> M.charpoly(x).as_expr() +x**4 - 6*x**2 - 8*x - 3 + + +.. [1] Y. Shi, M. Dehmer, X. Li, I. Gutman, + "Graph Polynomials" +""" + +from collections import deque + +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = ["tutte_polynomial", "chromatic_polynomial"] + + +@not_implemented_for("directed") +@nx._dispatchable +def tutte_polynomial(G): + r"""Returns the Tutte polynomial of `G` + + This function computes the Tutte polynomial via an iterative version of + the deletion-contraction algorithm. + + The Tutte polynomial `T_G(x, y)` is a fundamental graph polynomial invariant in + two variables. It encodes a wide array of information related to the + edge-connectivity of a graph; "Many problems about graphs can be reduced to + problems of finding and evaluating the Tutte polynomial at certain values" [1]_. + In fact, every deletion-contraction-expressible feature of a graph is a + specialization of the Tutte polynomial [2]_ (see Notes for examples). + + There are several equivalent definitions; here are three: + + Def 1 (rank-nullity expansion): For `G` an undirected graph, `n(G)` the + number of vertices of `G`, `E` the edge set of `G`, `V` the vertex set of + `G`, and `c(A)` the number of connected components of the graph with vertex + set `V` and edge set `A` [3]_: + + .. math:: + + T_G(x, y) = \sum_{A \in E} (x-1)^{c(A) - c(E)} (y-1)^{c(A) + |A| - n(G)} + + Def 2 (spanning tree expansion): Let `G` be an undirected graph, `T` a spanning + tree of `G`, and `E` the edge set of `G`. Let `E` have an arbitrary strict + linear order `L`. Let `B_e` be the unique minimal nonempty edge cut of + $E \setminus T \cup {e}$. An edge `e` is internally active with respect to + `T` and `L` if `e` is the least edge in `B_e` according to the linear order + `L`. The internal activity of `T` (denoted `i(T)`) is the number of edges + in $E \setminus T$ that are internally active with respect to `T` and `L`. + Let `P_e` be the unique path in $T \cup {e}$ whose source and target vertex + are the same. An edge `e` is externally active with respect to `T` and `L` + if `e` is the least edge in `P_e` according to the linear order `L`. The + external activity of `T` (denoted `e(T)`) is the number of edges in + $E \setminus T$ that are externally active with respect to `T` and `L`. + Then [4]_ [5]_: + + .. math:: + + T_G(x, y) = \sum_{T \text{ a spanning tree of } G} x^{i(T)} y^{e(T)} + + Def 3 (deletion-contraction recurrence): For `G` an undirected graph, `G-e` + the graph obtained from `G` by deleting edge `e`, `G/e` the graph obtained + from `G` by contracting edge `e`, `k(G)` the number of cut-edges of `G`, + and `l(G)` the number of self-loops of `G`: + + .. math:: + T_G(x, y) = \begin{cases} + x^{k(G)} y^{l(G)}, & \text{if all edges are cut-edges or self-loops} \\ + T_{G-e}(x, y) + T_{G/e}(x, y), & \text{otherwise, for an arbitrary edge $e$ not a cut-edge or loop} + \end{cases} + + Parameters + ---------- + G : NetworkX graph + + Returns + ------- + instance of `sympy.core.add.Add` + A Sympy expression representing the Tutte polynomial for `G`. + + Examples + -------- + >>> C = nx.cycle_graph(5) + >>> nx.tutte_polynomial(C) + x**4 + x**3 + x**2 + x + y + + >>> D = nx.diamond_graph() + >>> nx.tutte_polynomial(D) + x**3 + 2*x**2 + 2*x*y + x + y**2 + y + + Notes + ----- + Some specializations of the Tutte polynomial: + + - `T_G(1, 1)` counts the number of spanning trees of `G` + - `T_G(1, 2)` counts the number of connected spanning subgraphs of `G` + - `T_G(2, 1)` counts the number of spanning forests in `G` + - `T_G(0, 2)` counts the number of strong orientations of `G` + - `T_G(2, 0)` counts the number of acyclic orientations of `G` + + Edge contraction is defined and deletion-contraction is introduced in [6]_. + Combinatorial meaning of the coefficients is introduced in [7]_. + Universality, properties, and applications are discussed in [8]_. + + Practically, up-front computation of the Tutte polynomial may be useful when + users wish to repeatedly calculate edge-connectivity-related information + about one or more graphs. + + References + ---------- + .. [1] M. Brandt, + "The Tutte Polynomial." + Talking About Combinatorial Objects Seminar, 2015 + https://math.berkeley.edu/~brandtm/talks/tutte.pdf + .. [2] A. Björklund, T. Husfeldt, P. Kaski, M. Koivisto, + "Computing the Tutte polynomial in vertex-exponential time" + 49th Annual IEEE Symposium on Foundations of Computer Science, 2008 + https://ieeexplore.ieee.org/abstract/document/4691000 + .. [3] Y. Shi, M. Dehmer, X. Li, I. Gutman, + "Graph Polynomials," p. 14 + .. [4] Y. Shi, M. Dehmer, X. Li, I. Gutman, + "Graph Polynomials," p. 46 + .. [5] A. Nešetril, J. Goodall, + "Graph invariants, homomorphisms, and the Tutte polynomial" + https://iuuk.mff.cuni.cz/~andrew/Tutte.pdf + .. [6] D. B. West, + "Introduction to Graph Theory," p. 84 + .. [7] G. Coutinho, + "A brief introduction to the Tutte polynomial" + Structural Analysis of Complex Networks, 2011 + https://homepages.dcc.ufmg.br/~gabriel/seminars/coutinho_tuttepolynomial_seminar.pdf + .. [8] J. A. Ellis-Monaghan, C. Merino, + "Graph polynomials and their applications I: The Tutte polynomial" + Structural Analysis of Complex Networks, 2011 + https://arxiv.org/pdf/0803.3079.pdf + """ + import sympy + + x = sympy.Symbol("x") + y = sympy.Symbol("y") + stack = deque() + stack.append(nx.MultiGraph(G)) + + polynomial = 0 + while stack: + G = stack.pop() + bridges = set(nx.bridges(G)) + + e = None + for i in G.edges: + if (i[0], i[1]) not in bridges and i[0] != i[1]: + e = i + break + if not e: + loops = list(nx.selfloop_edges(G, keys=True)) + polynomial += x ** len(bridges) * y ** len(loops) + else: + # deletion-contraction + C = nx.contracted_edge(G, e, self_loops=True) + C.remove_edge(e[0], e[0]) + G.remove_edge(*e) + stack.append(G) + stack.append(C) + return sympy.simplify(polynomial) + + +@not_implemented_for("directed") +@nx._dispatchable +def chromatic_polynomial(G): + r"""Returns the chromatic polynomial of `G` + + This function computes the chromatic polynomial via an iterative version of + the deletion-contraction algorithm. + + The chromatic polynomial `X_G(x)` is a fundamental graph polynomial + invariant in one variable. Evaluating `X_G(k)` for an natural number `k` + enumerates the proper k-colorings of `G`. + + There are several equivalent definitions; here are three: + + Def 1 (explicit formula): + For `G` an undirected graph, `c(G)` the number of connected components of + `G`, `E` the edge set of `G`, and `G(S)` the spanning subgraph of `G` with + edge set `S` [1]_: + + .. math:: + + X_G(x) = \sum_{S \subseteq E} (-1)^{|S|} x^{c(G(S))} + + + Def 2 (interpolating polynomial): + For `G` an undirected graph, `n(G)` the number of vertices of `G`, `k_0 = 0`, + and `k_i` the number of distinct ways to color the vertices of `G` with `i` + unique colors (for `i` a natural number at most `n(G)`), `X_G(x)` is the + unique Lagrange interpolating polynomial of degree `n(G)` through the points + `(0, k_0), (1, k_1), \dots, (n(G), k_{n(G)})` [2]_. + + + Def 3 (chromatic recurrence): + For `G` an undirected graph, `G-e` the graph obtained from `G` by deleting + edge `e`, `G/e` the graph obtained from `G` by contracting edge `e`, `n(G)` + the number of vertices of `G`, and `e(G)` the number of edges of `G` [3]_: + + .. math:: + X_G(x) = \begin{cases} + x^{n(G)}, & \text{if $e(G)=0$} \\ + X_{G-e}(x) - X_{G/e}(x), & \text{otherwise, for an arbitrary edge $e$} + \end{cases} + + This formulation is also known as the Fundamental Reduction Theorem [4]_. + + + Parameters + ---------- + G : NetworkX graph + + Returns + ------- + instance of `sympy.core.add.Add` + A Sympy expression representing the chromatic polynomial for `G`. + + Examples + -------- + >>> C = nx.cycle_graph(5) + >>> nx.chromatic_polynomial(C) + x**5 - 5*x**4 + 10*x**3 - 10*x**2 + 4*x + + >>> G = nx.complete_graph(4) + >>> nx.chromatic_polynomial(G) + x**4 - 6*x**3 + 11*x**2 - 6*x + + Notes + ----- + Interpretation of the coefficients is discussed in [5]_. Several special + cases are listed in [2]_. + + The chromatic polynomial is a specialization of the Tutte polynomial; in + particular, ``X_G(x) = T_G(x, 0)`` [6]_. + + The chromatic polynomial may take negative arguments, though evaluations + may not have chromatic interpretations. For instance, ``X_G(-1)`` enumerates + the acyclic orientations of `G` [7]_. + + References + ---------- + .. [1] D. B. West, + "Introduction to Graph Theory," p. 222 + .. [2] E. W. Weisstein + "Chromatic Polynomial" + MathWorld--A Wolfram Web Resource + https://mathworld.wolfram.com/ChromaticPolynomial.html + .. [3] D. B. West, + "Introduction to Graph Theory," p. 221 + .. [4] J. Zhang, J. Goodall, + "An Introduction to Chromatic Polynomials" + https://math.mit.edu/~apost/courses/18.204_2018/Julie_Zhang_paper.pdf + .. [5] R. C. Read, + "An Introduction to Chromatic Polynomials" + Journal of Combinatorial Theory, 1968 + https://math.berkeley.edu/~mrklug/ReadChromatic.pdf + .. [6] W. T. Tutte, + "Graph-polynomials" + Advances in Applied Mathematics, 2004 + https://www.sciencedirect.com/science/article/pii/S0196885803000411 + .. [7] R. P. Stanley, + "Acyclic orientations of graphs" + Discrete Mathematics, 2006 + https://math.mit.edu/~rstan/pubs/pubfiles/18.pdf + """ + import sympy + + x = sympy.Symbol("x") + stack = deque() + stack.append(nx.MultiGraph(G, contraction_idx=0)) + + polynomial = 0 + while stack: + G = stack.pop() + edges = list(G.edges) + if not edges: + polynomial += (-1) ** G.graph["contraction_idx"] * x ** len(G) + else: + e = edges[0] + C = nx.contracted_edge(G, e, self_loops=True) + C.graph["contraction_idx"] = G.graph["contraction_idx"] + 1 + C.remove_edge(e[0], e[0]) + G.remove_edge(*e) + stack.append(G) + stack.append(C) + return polynomial diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/reciprocity.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/reciprocity.py new file mode 100644 index 0000000..5ea7ed2 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/reciprocity.py @@ -0,0 +1,98 @@ +"""Algorithms to calculate reciprocity in a directed graph.""" + +import networkx as nx +from networkx import NetworkXError + +from ..utils import not_implemented_for + +__all__ = ["reciprocity", "overall_reciprocity"] + + +@not_implemented_for("undirected", "multigraph") +@nx._dispatchable +def reciprocity(G, nodes=None): + r"""Compute the reciprocity in a directed graph. + + The reciprocity of a directed graph is defined as the ratio + of the number of edges pointing in both directions to the total + number of edges in the graph. + Formally, $r = |{(u,v) \in G|(v,u) \in G}| / |{(u,v) \in G}|$. + + The reciprocity of a single node u is defined similarly, + it is the ratio of the number of edges in both directions to + the total number of edges attached to node u. + + Parameters + ---------- + G : graph + A networkx directed graph + nodes : container of nodes, optional (default=whole graph) + Compute reciprocity for nodes in this container. + + Returns + ------- + out : dictionary + Reciprocity keyed by node label. + + Notes + ----- + The reciprocity is not defined for isolated nodes. + In such cases this function will return None. + + """ + # If `nodes` is not specified, calculate the reciprocity of the graph. + if nodes is None: + return overall_reciprocity(G) + + # If `nodes` represents a single node in the graph, return only its + # reciprocity. + if nodes in G: + reciprocity = next(_reciprocity_iter(G, nodes))[1] + if reciprocity is None: + raise NetworkXError("Not defined for isolated nodes.") + else: + return reciprocity + + # Otherwise, `nodes` represents an iterable of nodes, so return a + # dictionary mapping node to its reciprocity. + return dict(_reciprocity_iter(G, nodes)) + + +def _reciprocity_iter(G, nodes): + """Return an iterator of (node, reciprocity).""" + n = G.nbunch_iter(nodes) + for node in n: + pred = set(G.predecessors(node)) + succ = set(G.successors(node)) + overlap = pred & succ + n_total = len(pred) + len(succ) + + # Reciprocity is not defined for isolated nodes. + # Return None. + if n_total == 0: + yield (node, None) + else: + reciprocity = 2 * len(overlap) / n_total + yield (node, reciprocity) + + +@not_implemented_for("undirected", "multigraph") +@nx._dispatchable +def overall_reciprocity(G): + """Compute the reciprocity for the whole graph. + + See the doc of reciprocity for the definition. + + Parameters + ---------- + G : graph + A networkx graph + + """ + n_all_edge = G.number_of_edges() + n_overlap_edge = (n_all_edge - G.to_undirected().number_of_edges()) * 2 + + if n_all_edge == 0: + raise NetworkXError("Not defined for empty graphs") + + return n_overlap_edge / n_all_edge diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/regular.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/regular.py new file mode 100644 index 0000000..f483ef3 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/regular.py @@ -0,0 +1,215 @@ +"""Functions for computing and verifying regular graphs.""" + +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = ["is_regular", "is_k_regular", "k_factor"] + + +@nx._dispatchable +def is_regular(G): + """Determines whether the graph ``G`` is a regular graph. + + A regular graph is a graph where each vertex has the same degree. A + regular digraph is a graph where the indegree and outdegree of each + vertex are equal. + + Parameters + ---------- + G : NetworkX graph + + Returns + ------- + bool + Whether the given graph or digraph is regular. + + Examples + -------- + >>> G = nx.DiGraph([(1, 2), (2, 3), (3, 4), (4, 1)]) + >>> nx.is_regular(G) + True + + """ + if len(G) == 0: + raise nx.NetworkXPointlessConcept("Graph has no nodes.") + n1 = nx.utils.arbitrary_element(G) + if not G.is_directed(): + d1 = G.degree(n1) + return all(d1 == d for _, d in G.degree) + else: + d_in = G.in_degree(n1) + in_regular = all(d_in == d for _, d in G.in_degree) + d_out = G.out_degree(n1) + out_regular = all(d_out == d for _, d in G.out_degree) + return in_regular and out_regular + + +@not_implemented_for("directed") +@nx._dispatchable +def is_k_regular(G, k): + """Determines whether the graph ``G`` is a k-regular graph. + + A k-regular graph is a graph where each vertex has degree k. + + Parameters + ---------- + G : NetworkX graph + + Returns + ------- + bool + Whether the given graph is k-regular. + + Examples + -------- + >>> G = nx.Graph([(1, 2), (2, 3), (3, 4), (4, 1)]) + >>> nx.is_k_regular(G, k=3) + False + + """ + return all(d == k for n, d in G.degree) + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatchable(preserve_edge_attrs=True, returns_graph=True) +def k_factor(G, k, matching_weight="weight"): + """Compute a k-factor of G + + A k-factor of a graph is a spanning k-regular subgraph. + A spanning k-regular subgraph of G is a subgraph that contains + each vertex of G and a subset of the edges of G such that each + vertex has degree k. + + Parameters + ---------- + G : NetworkX graph + Undirected graph + + matching_weight: string, optional (default='weight') + Edge data key corresponding to the edge weight. + Used for finding the max-weighted perfect matching. + If key not found, uses 1 as weight. + + Returns + ------- + G2 : NetworkX graph + A k-factor of G + + Examples + -------- + >>> G = nx.Graph([(1, 2), (2, 3), (3, 4), (4, 1)]) + >>> G2 = nx.k_factor(G, k=1) + >>> G2.edges() + EdgeView([(1, 2), (3, 4)]) + + References + ---------- + .. [1] "An algorithm for computing simple k-factors.", + Meijer, Henk, Yurai Núñez-Rodríguez, and David Rappaport, + Information processing letters, 2009. + """ + + from networkx.algorithms.matching import is_perfect_matching, max_weight_matching + + class LargeKGadget: + def __init__(self, k, degree, node, g): + self.original = node + self.g = g + self.k = k + self.degree = degree + + self.outer_vertices = [(node, x) for x in range(degree)] + self.core_vertices = [(node, x + degree) for x in range(degree - k)] + + def replace_node(self): + adj_view = self.g[self.original] + neighbors = list(adj_view.keys()) + edge_attrs = list(adj_view.values()) + for outer, neighbor, edge_attrs in zip( + self.outer_vertices, neighbors, edge_attrs + ): + self.g.add_edge(outer, neighbor, **edge_attrs) + for core in self.core_vertices: + for outer in self.outer_vertices: + self.g.add_edge(core, outer) + self.g.remove_node(self.original) + + def restore_node(self): + self.g.add_node(self.original) + for outer in self.outer_vertices: + adj_view = self.g[outer] + for neighbor, edge_attrs in list(adj_view.items()): + if neighbor not in self.core_vertices: + self.g.add_edge(self.original, neighbor, **edge_attrs) + break + g.remove_nodes_from(self.outer_vertices) + g.remove_nodes_from(self.core_vertices) + + class SmallKGadget: + def __init__(self, k, degree, node, g): + self.original = node + self.k = k + self.degree = degree + self.g = g + + self.outer_vertices = [(node, x) for x in range(degree)] + self.inner_vertices = [(node, x + degree) for x in range(degree)] + self.core_vertices = [(node, x + 2 * degree) for x in range(k)] + + def replace_node(self): + adj_view = self.g[self.original] + for outer, inner, (neighbor, edge_attrs) in zip( + self.outer_vertices, self.inner_vertices, list(adj_view.items()) + ): + self.g.add_edge(outer, inner) + self.g.add_edge(outer, neighbor, **edge_attrs) + for core in self.core_vertices: + for inner in self.inner_vertices: + self.g.add_edge(core, inner) + self.g.remove_node(self.original) + + def restore_node(self): + self.g.add_node(self.original) + for outer in self.outer_vertices: + adj_view = self.g[outer] + for neighbor, edge_attrs in adj_view.items(): + if neighbor not in self.core_vertices: + self.g.add_edge(self.original, neighbor, **edge_attrs) + break + self.g.remove_nodes_from(self.outer_vertices) + self.g.remove_nodes_from(self.inner_vertices) + self.g.remove_nodes_from(self.core_vertices) + + # Step 1 + if any(d < k for _, d in G.degree): + raise nx.NetworkXUnfeasible("Graph contains a vertex with degree less than k") + g = G.copy() + + # Step 2 + gadgets = [] + for node, degree in list(g.degree): + if k < degree / 2.0: + gadget = SmallKGadget(k, degree, node, g) + else: + gadget = LargeKGadget(k, degree, node, g) + gadget.replace_node() + gadgets.append(gadget) + + # Step 3 + matching = max_weight_matching(g, maxcardinality=True, weight=matching_weight) + + # Step 4 + if not is_perfect_matching(g, matching): + raise nx.NetworkXUnfeasible( + "Cannot find k-factor because no perfect matching exists" + ) + + for edge in g.edges(): + if edge not in matching and (edge[1], edge[0]) not in matching: + g.remove_edge(edge[0], edge[1]) + + for gadget in gadgets: + gadget.restore_node() + + return g diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/richclub.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/richclub.py new file mode 100644 index 0000000..445b27d --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/richclub.py @@ -0,0 +1,138 @@ +"""Functions for computing rich-club coefficients.""" + +from itertools import accumulate + +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = ["rich_club_coefficient"] + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatchable +def rich_club_coefficient(G, normalized=True, Q=100, seed=None): + r"""Returns the rich-club coefficient of the graph `G`. + + For each degree *k*, the *rich-club coefficient* is the ratio of the + number of actual to the number of potential edges for nodes with + degree greater than *k*: + + .. math:: + + \phi(k) = \frac{2 E_k}{N_k (N_k - 1)} + + where `N_k` is the number of nodes with degree larger than *k*, and + `E_k` is the number of edges among those nodes. + + Parameters + ---------- + G : NetworkX graph + Undirected graph with neither parallel edges nor self-loops. + normalized : bool (optional) + Normalize using randomized network as in [1]_ + Q : float (optional, default=100) + If `normalized` is True, perform `Q * m` double-edge + swaps, where `m` is the number of edges in `G`, to use as a + null-model for normalization. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + rc : dictionary + A dictionary, keyed by degree, with rich-club coefficient values. + + Raises + ------ + NetworkXError + If `G` has fewer than four nodes and ``normalized=True``. + A randomly sampled graph for normalization cannot be generated in this case. + + Examples + -------- + >>> G = nx.Graph([(0, 1), (0, 2), (1, 2), (1, 3), (1, 4), (4, 5)]) + >>> rc = nx.rich_club_coefficient(G, normalized=False, seed=42) + >>> rc[0] + 0.4 + + Notes + ----- + The rich club definition and algorithm are found in [1]_. This + algorithm ignores any edge weights and is not defined for directed + graphs or graphs with parallel edges or self loops. + + Normalization is done by computing the rich club coefficient for a randomly + sampled graph with the same degree distribution as `G` by + repeatedly swapping the endpoints of existing edges. For graphs with fewer than 4 + nodes, it is not possible to generate a random graph with a prescribed + degree distribution, as the degree distribution fully determines the graph + (hence making the coefficients trivially normalized to 1). + This function raises an exception in this case. + + Estimates for appropriate values of `Q` are found in [2]_. + + References + ---------- + .. [1] Julian J. McAuley, Luciano da Fontoura Costa, + and Tibério S. Caetano, + "The rich-club phenomenon across complex network hierarchies", + Applied Physics Letters Vol 91 Issue 8, August 2007. + https://arxiv.org/abs/physics/0701290 + .. [2] R. Milo, N. Kashtan, S. Itzkovitz, M. E. J. Newman, U. Alon, + "Uniform generation of random graphs with arbitrary degree + sequences", 2006. https://arxiv.org/abs/cond-mat/0312028 + """ + if nx.number_of_selfloops(G) > 0: + raise Exception( + "rich_club_coefficient is not implemented for graphs with self loops." + ) + rc = _compute_rc(G) + if normalized: + # make R a copy of G, randomize with Q*|E| double edge swaps + # and use rich_club coefficient of R to normalize + R = G.copy() + E = R.number_of_edges() + nx.double_edge_swap(R, Q * E, max_tries=Q * E * 10, seed=seed) + rcran = _compute_rc(R) + rc = {k: v / rcran[k] for k, v in rc.items()} + return rc + + +def _compute_rc(G): + """Returns the rich-club coefficient for each degree in the graph + `G`. + + `G` is an undirected graph without multiedges. + + Returns a dictionary mapping degree to rich-club coefficient for + that degree. + + """ + deghist = nx.degree_histogram(G) + total = sum(deghist) + # Compute the number of nodes with degree greater than `k`, for each + # degree `k` (omitting the last entry, which is zero). + nks = (total - cs for cs in accumulate(deghist) if total - cs > 1) + # Create a sorted list of pairs of edge endpoint degrees. + # + # The list is sorted in reverse order so that we can pop from the + # right side of the list later, instead of popping from the left + # side of the list, which would have a linear time cost. + edge_degrees = sorted((sorted(map(G.degree, e)) for e in G.edges()), reverse=True) + ek = G.number_of_edges() + if ek == 0: + return {} + + k1, k2 = edge_degrees.pop() + rc = {} + for d, nk in enumerate(nks): + while k1 <= d: + if len(edge_degrees) == 0: + ek = 0 + break + k1, k2 = edge_degrees.pop() + ek -= 1 + rc[d] = 2 * ek / (nk * (nk - 1)) + return rc diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/__init__.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/__init__.py new file mode 100644 index 0000000..eb0d91c --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/__init__.py @@ -0,0 +1,5 @@ +from networkx.algorithms.shortest_paths.generic import * +from networkx.algorithms.shortest_paths.unweighted import * +from networkx.algorithms.shortest_paths.weighted import * +from networkx.algorithms.shortest_paths.astar import * +from networkx.algorithms.shortest_paths.dense import * diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..c0968b2 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/__pycache__/astar.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/__pycache__/astar.cpython-312.pyc new file mode 100644 index 0000000..56b6d08 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/__pycache__/astar.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/__pycache__/dense.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/__pycache__/dense.cpython-312.pyc new file mode 100644 index 0000000..d7a3f44 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/__pycache__/dense.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/__pycache__/generic.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/__pycache__/generic.cpython-312.pyc new file mode 100644 index 0000000..941e6fa Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/__pycache__/generic.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/__pycache__/unweighted.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/__pycache__/unweighted.cpython-312.pyc new file mode 100644 index 0000000..76c0471 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/__pycache__/unweighted.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/__pycache__/weighted.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/__pycache__/weighted.cpython-312.pyc new file mode 100644 index 0000000..e8b002b Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/__pycache__/weighted.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/astar.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/astar.py new file mode 100644 index 0000000..1182297 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/astar.py @@ -0,0 +1,239 @@ +"""Shortest paths and path lengths using the A* ("A star") algorithm.""" + +from heapq import heappop, heappush +from itertools import count + +import networkx as nx +from networkx.algorithms.shortest_paths.weighted import _weight_function + +__all__ = ["astar_path", "astar_path_length"] + + +@nx._dispatchable(edge_attrs="weight", preserve_node_attrs="heuristic") +def astar_path(G, source, target, heuristic=None, weight="weight", *, cutoff=None): + """Returns a list of nodes in a shortest path between source and target + using the A* ("A-star") algorithm. + + There may be more than one shortest path. This returns only one. + + Parameters + ---------- + G : NetworkX graph + + source : node + Starting node for path + + target : node + Ending node for path + + heuristic : function + A function to evaluate the estimate of the distance + from the a node to the target. The function takes + two nodes arguments and must return a number. + If the heuristic is inadmissible (if it might + overestimate the cost of reaching the goal from a node), + the result may not be a shortest path. + The algorithm does not support updating heuristic + values for the same node due to caching the first + heuristic calculation per node. + + weight : string or function + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number or None to indicate a hidden edge. + + cutoff : float, optional + If this is provided, the search will be bounded to this value. I.e. if + the evaluation function surpasses this value for a node n, the node will not + be expanded further and will be ignored. More formally, let h'(n) be the + heuristic function, and g(n) be the cost of reaching n from the source node. Then, + if g(n) + h'(n) > cutoff, the node will not be explored further. + Note that if the heuristic is inadmissible, it is possible that paths + are ignored even though they satisfy the cutoff. + + Raises + ------ + NetworkXNoPath + If no path exists between source and target. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> print(nx.astar_path(G, 0, 4)) + [0, 1, 2, 3, 4] + >>> G = nx.grid_graph(dim=[3, 3]) # nodes are two-tuples (x,y) + >>> nx.set_edge_attributes(G, {e: e[1][0] * 2 for e in G.edges()}, "cost") + >>> def dist(a, b): + ... (x1, y1) = a + ... (x2, y2) = b + ... return ((x1 - x2) ** 2 + (y1 - y2) ** 2) ** 0.5 + >>> print(nx.astar_path(G, (0, 0), (2, 2), heuristic=dist, weight="cost")) + [(0, 0), (0, 1), (0, 2), (1, 2), (2, 2)] + + Notes + ----- + Edge weight attributes must be numerical. + Distances are calculated as sums of weighted edges traversed. + + The weight function can be used to hide edges by returning None. + So ``weight = lambda u, v, d: 1 if d['color']=="red" else None`` + will find the shortest red path. + + See Also + -------- + shortest_path, dijkstra_path + + """ + if source not in G: + raise nx.NodeNotFound(f"Source {source} is not in G") + + if target not in G: + raise nx.NodeNotFound(f"Target {target} is not in G") + + if heuristic is None: + # The default heuristic is h=0 - same as Dijkstra's algorithm + def heuristic(u, v): + return 0 + + weight = _weight_function(G, weight) + + G_succ = G._adj # For speed-up (and works for both directed and undirected graphs) + + # The queue stores priority, node, cost to reach, and parent. + # Uses Python heapq to keep in priority order. + # Add a counter to the queue to prevent the underlying heap from + # attempting to compare the nodes themselves. The hash breaks ties in the + # priority and is guaranteed unique for all nodes in the graph. + c = count() + queue = [(0, next(c), source, 0, None)] + + # Maps enqueued nodes to distance of discovered paths and the + # computed heuristics to target. We avoid computing the heuristics + # more than once and inserting the node into the queue too many times. + enqueued = {} + # Maps explored nodes to parent closest to the source. + explored = {} + + while queue: + # Pop the smallest item from queue. + _, __, curnode, dist, parent = heappop(queue) + + if curnode == target: + path = [curnode] + node = parent + while node is not None: + path.append(node) + node = explored[node] + path.reverse() + return path + + if curnode in explored: + # Do not override the parent of starting node + if explored[curnode] is None: + continue + + # Skip bad paths that were enqueued before finding a better one + qcost, h = enqueued[curnode] + if qcost < dist: + continue + + explored[curnode] = parent + + for neighbor, w in G_succ[curnode].items(): + cost = weight(curnode, neighbor, w) + if cost is None: + continue + ncost = dist + cost + if neighbor in enqueued: + qcost, h = enqueued[neighbor] + # if qcost <= ncost, a less costly path from the + # neighbor to the source was already determined. + # Therefore, we won't attempt to push this neighbor + # to the queue + if qcost <= ncost: + continue + else: + h = heuristic(neighbor, target) + + if cutoff and ncost + h > cutoff: + continue + + enqueued[neighbor] = ncost, h + heappush(queue, (ncost + h, next(c), neighbor, ncost, curnode)) + + raise nx.NetworkXNoPath(f"Node {target} not reachable from {source}") + + +@nx._dispatchable(edge_attrs="weight", preserve_node_attrs="heuristic") +def astar_path_length( + G, source, target, heuristic=None, weight="weight", *, cutoff=None +): + """Returns the length of the shortest path between source and target using + the A* ("A-star") algorithm. + + Parameters + ---------- + G : NetworkX graph + + source : node + Starting node for path + + target : node + Ending node for path + + heuristic : function + A function to evaluate the estimate of the distance + from the a node to the target. The function takes + two nodes arguments and must return a number. + If the heuristic is inadmissible (if it might + overestimate the cost of reaching the goal from a node), + the result may not be a shortest path. + The algorithm does not support updating heuristic + values for the same node due to caching the first + heuristic calculation per node. + + weight : string or function + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number or None to indicate a hidden edge. + + cutoff : float, optional + If this is provided, the search will be bounded to this value. I.e. if + the evaluation function surpasses this value for a node n, the node will not + be expanded further and will be ignored. More formally, let h'(n) be the + heuristic function, and g(n) be the cost of reaching n from the source node. Then, + if g(n) + h'(n) > cutoff, the node will not be explored further. + Note that if the heuristic is inadmissible, it is possible that paths + are ignored even though they satisfy the cutoff. + + Raises + ------ + NetworkXNoPath + If no path exists between source and target. + + See Also + -------- + astar_path + + """ + if source not in G or target not in G: + msg = f"Either source {source} or target {target} is not in G" + raise nx.NodeNotFound(msg) + + weight = _weight_function(G, weight) + path = astar_path(G, source, target, heuristic, weight, cutoff=cutoff) + return sum(weight(u, v, G[u][v]) for u, v in zip(path[:-1], path[1:])) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/dense.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/dense.py new file mode 100644 index 0000000..d259d8c --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/dense.py @@ -0,0 +1,264 @@ +"""Floyd-Warshall algorithm for shortest paths.""" + +import networkx as nx + +__all__ = [ + "floyd_warshall", + "floyd_warshall_predecessor_and_distance", + "reconstruct_path", + "floyd_warshall_numpy", +] + + +@nx._dispatchable(edge_attrs="weight") +def floyd_warshall_numpy(G, nodelist=None, weight="weight"): + """Find all-pairs shortest path lengths using Floyd's algorithm. + + This algorithm for finding shortest paths takes advantage of + matrix representations of a graph and works well for dense + graphs where all-pairs shortest path lengths are desired. + The results are returned as a NumPy array, distance[i, j], + where i and j are the indexes of two nodes in nodelist. + The entry distance[i, j] is the distance along a shortest + path from i to j. If no path exists the distance is Inf. + + Parameters + ---------- + G : NetworkX graph + + nodelist : list, optional (default=G.nodes) + The rows and columns are ordered by the nodes in nodelist. + If nodelist is None then the ordering is produced by G.nodes. + Nodelist should include all nodes in G. + + weight: string, optional (default='weight') + Edge data key corresponding to the edge weight. + + Returns + ------- + distance : 2D numpy.ndarray + A numpy array of shortest path distances between nodes. + If there is no path between two nodes the value is Inf. + + Examples + -------- + >>> G = nx.DiGraph() + >>> G.add_weighted_edges_from( + ... [(0, 1, 5), (1, 2, 2), (2, 3, -3), (1, 3, 10), (3, 2, 8)] + ... ) + >>> nx.floyd_warshall_numpy(G) + array([[ 0., 5., 7., 4.], + [inf, 0., 2., -1.], + [inf, inf, 0., -3.], + [inf, inf, 8., 0.]]) + + Notes + ----- + Floyd's algorithm is appropriate for finding shortest paths in + dense graphs or graphs with negative weights when Dijkstra's + algorithm fails. This algorithm can still fail if there are negative + cycles. It has running time $O(n^3)$ with running space of $O(n^2)$. + + Raises + ------ + NetworkXError + If nodelist is not a list of the nodes in G. + """ + import numpy as np + + if nodelist is not None: + if not (len(nodelist) == len(G) == len(set(nodelist))): + raise nx.NetworkXError( + "nodelist must contain every node in G with no repeats." + "If you wanted a subgraph of G use G.subgraph(nodelist)" + ) + + # To handle cases when an edge has weight=0, we must make sure that + # nonedges are not given the value 0 as well. + A = nx.to_numpy_array( + G, nodelist, multigraph_weight=min, weight=weight, nonedge=np.inf + ) + n, m = A.shape + np.fill_diagonal(A, 0) # diagonal elements should be zero + for i in range(n): + # The second term has the same shape as A due to broadcasting + A = np.minimum(A, A[i, :][np.newaxis, :] + A[:, i][:, np.newaxis]) + return A + + +@nx._dispatchable(edge_attrs="weight") +def floyd_warshall_predecessor_and_distance(G, weight="weight"): + """Find all-pairs shortest path lengths using Floyd's algorithm. + + Parameters + ---------- + G : NetworkX graph + + weight: string, optional (default= 'weight') + Edge data key corresponding to the edge weight. + + Returns + ------- + predecessor,distance : dictionaries + Dictionaries, keyed by source and target, of predecessors and distances + in the shortest path. + + Examples + -------- + >>> G = nx.DiGraph() + >>> G.add_weighted_edges_from( + ... [ + ... ("s", "u", 10), + ... ("s", "x", 5), + ... ("u", "v", 1), + ... ("u", "x", 2), + ... ("v", "y", 1), + ... ("x", "u", 3), + ... ("x", "v", 5), + ... ("x", "y", 2), + ... ("y", "s", 7), + ... ("y", "v", 6), + ... ] + ... ) + >>> predecessors, _ = nx.floyd_warshall_predecessor_and_distance(G) + >>> print(nx.reconstruct_path("s", "v", predecessors)) + ['s', 'x', 'u', 'v'] + + Notes + ----- + Floyd's algorithm is appropriate for finding shortest paths + in dense graphs or graphs with negative weights when Dijkstra's algorithm + fails. This algorithm can still fail if there are negative cycles. + It has running time $O(n^3)$ with running space of $O(n^2)$. + + See Also + -------- + floyd_warshall + floyd_warshall_numpy + all_pairs_shortest_path + all_pairs_shortest_path_length + """ + from collections import defaultdict + + # dictionary-of-dictionaries representation for dist and pred + # use some defaultdict magick here + # for dist the default is the floating point inf value + dist = defaultdict(lambda: defaultdict(lambda: float("inf"))) + for u in G: + dist[u][u] = 0 + pred = defaultdict(dict) + # initialize path distance dictionary to be the adjacency matrix + # also set the distance to self to 0 (zero diagonal) + undirected = not G.is_directed() + for u, v, d in G.edges(data=True): + e_weight = d.get(weight, 1.0) + dist[u][v] = min(e_weight, dist[u][v]) + pred[u][v] = u + if undirected: + dist[v][u] = min(e_weight, dist[v][u]) + pred[v][u] = v + for w in G: + dist_w = dist[w] # save recomputation + for u in G: + dist_u = dist[u] # save recomputation + for v in G: + d = dist_u[w] + dist_w[v] + if dist_u[v] > d: + dist_u[v] = d + pred[u][v] = pred[w][v] + return dict(pred), dict(dist) + + +@nx._dispatchable(graphs=None) +def reconstruct_path(source, target, predecessors): + """Reconstruct a path from source to target using the predecessors + dict as returned by floyd_warshall_predecessor_and_distance + + Parameters + ---------- + source : node + Starting node for path + + target : node + Ending node for path + + predecessors: dictionary + Dictionary, keyed by source and target, of predecessors in the + shortest path, as returned by floyd_warshall_predecessor_and_distance + + Returns + ------- + path : list + A list of nodes containing the shortest path from source to target + + If source and target are the same, an empty list is returned + + Notes + ----- + This function is meant to give more applicability to the + floyd_warshall_predecessor_and_distance function + + See Also + -------- + floyd_warshall_predecessor_and_distance + """ + if source == target: + return [] + prev = predecessors[source] + curr = prev[target] + path = [target, curr] + while curr != source: + curr = prev[curr] + path.append(curr) + return list(reversed(path)) + + +@nx._dispatchable(edge_attrs="weight") +def floyd_warshall(G, weight="weight"): + """Find all-pairs shortest path lengths using Floyd's algorithm. + + Parameters + ---------- + G : NetworkX graph + + weight: string, optional (default= 'weight') + Edge data key corresponding to the edge weight. + + + Returns + ------- + distance : dict + A dictionary, keyed by source and target, of shortest paths distances + between nodes. + + Examples + -------- + >>> from pprint import pprint + >>> G = nx.DiGraph() + >>> G.add_weighted_edges_from( + ... [(0, 1, 5), (1, 2, 2), (2, 3, -3), (1, 3, 10), (3, 2, 8)] + ... ) + >>> fw = nx.floyd_warshall(G, weight="weight") + >>> results = {a: dict(b) for a, b in fw.items()} + >>> pprint(results) + {0: {0: 0, 1: 5, 2: 7, 3: 4}, + 1: {0: inf, 1: 0, 2: 2, 3: -1}, + 2: {0: inf, 1: inf, 2: 0, 3: -3}, + 3: {0: inf, 1: inf, 2: 8, 3: 0}} + + Notes + ----- + Floyd's algorithm is appropriate for finding shortest paths + in dense graphs or graphs with negative weights when Dijkstra's algorithm + fails. This algorithm can still fail if there are negative cycles. + It has running time $O(n^3)$ with running space of $O(n^2)$. + + See Also + -------- + floyd_warshall_predecessor_and_distance + floyd_warshall_numpy + all_pairs_shortest_path + all_pairs_shortest_path_length + """ + # could make this its own function to reduce memory costs + return floyd_warshall_predecessor_and_distance(G, weight=weight)[1] diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/generic.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/generic.py new file mode 100644 index 0000000..660032f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/generic.py @@ -0,0 +1,713 @@ +""" +Compute the shortest paths and path lengths between nodes in the graph. + +These algorithms work with undirected and directed graphs. + +""" + +import networkx as nx + +__all__ = [ + "shortest_path", + "all_shortest_paths", + "single_source_all_shortest_paths", + "all_pairs_all_shortest_paths", + "shortest_path_length", + "average_shortest_path_length", + "has_path", +] + + +@nx._dispatchable +def has_path(G, source, target): + """Returns *True* if *G* has a path from *source* to *target*. + + Parameters + ---------- + G : NetworkX graph + + source : node + Starting node for path + + target : node + Ending node for path + """ + try: + nx.shortest_path(G, source, target) + except nx.NetworkXNoPath: + return False + return True + + +@nx._dispatchable(edge_attrs="weight") +def shortest_path(G, source=None, target=None, weight=None, method="dijkstra"): + """Compute shortest paths in the graph. + + Parameters + ---------- + G : NetworkX graph + + source : node, optional + Starting node for path. If not specified, compute shortest + paths for each possible starting node. + + target : node, optional + Ending node for path. If not specified, compute shortest + paths to all possible nodes. + + weight : None, string or function, optional (default = None) + If None, every edge has weight/distance/cost 1. + If a string, use this edge attribute as the edge weight. + Any edge attribute not present defaults to 1. + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly + three positional arguments: the two endpoints of an edge and + the dictionary of edge attributes for that edge. + The function must return a number. + + method : string, optional (default = 'dijkstra') + The algorithm to use to compute the path. + Supported options: 'dijkstra', 'bellman-ford'. + Other inputs produce a ValueError. + If `weight` is None, unweighted graph methods are used, and this + suggestion is ignored. + + Returns + ------- + path: list or dictionary or iterator + All returned paths include both the source and target in the path. + + If the source and target are both specified, return a single list + of nodes in a shortest path from the source to the target. + + If only the source is specified, return a dictionary keyed by + targets with a list of nodes in a shortest path from the source + to one of the targets. + + If only the target is specified, return a dictionary keyed by + sources with a list of nodes in a shortest path from one of the + sources to the target. + + If neither the source nor target are specified, return an iterator + over (source, dictionary) where dictionary is keyed by target to + list of nodes in a shortest path from the source to the target. + + Raises + ------ + NodeNotFound + If `source` is not in `G`. + + ValueError + If `method` is not among the supported options. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> print(nx.shortest_path(G, source=0, target=4)) + [0, 1, 2, 3, 4] + >>> p = nx.shortest_path(G, source=0) # target not specified + >>> p[3] # shortest path from source=0 to target=3 + [0, 1, 2, 3] + >>> p = nx.shortest_path(G, target=4) # source not specified + >>> p[1] # shortest path from source=1 to target=4 + [1, 2, 3, 4] + >>> p = dict(nx.shortest_path(G)) # source, target not specified + >>> p[2][4] # shortest path from source=2 to target=4 + [2, 3, 4] + + Notes + ----- + There may be more than one shortest path between a source and target. + This returns only one of them. + + See Also + -------- + all_pairs_shortest_path + all_pairs_dijkstra_path + all_pairs_bellman_ford_path + single_source_shortest_path + single_source_dijkstra_path + single_source_bellman_ford_path + """ + if method not in ("dijkstra", "bellman-ford"): + # so we don't need to check in each branch later + raise ValueError(f"method not supported: {method}") + method = "unweighted" if weight is None else method + if source is None: + if target is None: + # Find paths between all pairs. Iterator of dicts. + if method == "unweighted": + paths = nx.all_pairs_shortest_path(G) + elif method == "dijkstra": + paths = nx.all_pairs_dijkstra_path(G, weight=weight) + else: # method == 'bellman-ford': + paths = nx.all_pairs_bellman_ford_path(G, weight=weight) + else: + # Find paths from all nodes co-accessible to the target. + if G.is_directed(): + G = G.reverse(copy=False) + if method == "unweighted": + paths = nx.single_source_shortest_path(G, target) + elif method == "dijkstra": + paths = nx.single_source_dijkstra_path(G, target, weight=weight) + else: # method == 'bellman-ford': + paths = nx.single_source_bellman_ford_path(G, target, weight=weight) + # Now flip the paths so they go from a source to the target. + for target in paths: + paths[target] = list(reversed(paths[target])) + else: + if target is None: + # Find paths to all nodes accessible from the source. + if method == "unweighted": + paths = nx.single_source_shortest_path(G, source) + elif method == "dijkstra": + paths = nx.single_source_dijkstra_path(G, source, weight=weight) + else: # method == 'bellman-ford': + paths = nx.single_source_bellman_ford_path(G, source, weight=weight) + else: + # Find shortest source-target path. + if method == "unweighted": + paths = nx.bidirectional_shortest_path(G, source, target) + elif method == "dijkstra": + _, paths = nx.bidirectional_dijkstra(G, source, target, weight) + else: # method == 'bellman-ford': + paths = nx.bellman_ford_path(G, source, target, weight) + return paths + + +@nx._dispatchable(edge_attrs="weight") +def shortest_path_length(G, source=None, target=None, weight=None, method="dijkstra"): + """Compute shortest path lengths in the graph. + + Parameters + ---------- + G : NetworkX graph + + source : node, optional + Starting node for path. + If not specified, compute shortest path lengths using all nodes as + source nodes. + + target : node, optional + Ending node for path. + If not specified, compute shortest path lengths using all nodes as + target nodes. + + weight : None, string or function, optional (default = None) + If None, every edge has weight/distance/cost 1. + If a string, use this edge attribute as the edge weight. + Any edge attribute not present defaults to 1. + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly + three positional arguments: the two endpoints of an edge and + the dictionary of edge attributes for that edge. + The function must return a number. + + method : string, optional (default = 'dijkstra') + The algorithm to use to compute the path length. + Supported options: 'dijkstra', 'bellman-ford'. + Other inputs produce a ValueError. + If `weight` is None, unweighted graph methods are used, and this + suggestion is ignored. + + Returns + ------- + length: number or iterator + If the source and target are both specified, return the length of + the shortest path from the source to the target. + + If only the source is specified, return a dict keyed by target + to the shortest path length from the source to that target. + + If only the target is specified, return a dict keyed by source + to the shortest path length from that source to the target. + + If neither the source nor target are specified, return an iterator + over (source, dictionary) where dictionary is keyed by target to + shortest path length from source to that target. + + Raises + ------ + NodeNotFound + If `source` is not in `G`. + + NetworkXNoPath + If no path exists between source and target. + + ValueError + If `method` is not among the supported options. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> nx.shortest_path_length(G, source=0, target=4) + 4 + >>> p = nx.shortest_path_length(G, source=0) # target not specified + >>> p[4] + 4 + >>> p = nx.shortest_path_length(G, target=4) # source not specified + >>> p[0] + 4 + >>> p = dict(nx.shortest_path_length(G)) # source,target not specified + >>> p[0][4] + 4 + + Notes + ----- + The length of the path is always 1 less than the number of nodes involved + in the path since the length measures the number of edges followed. + + For digraphs this returns the shortest directed path length. To find path + lengths in the reverse direction use G.reverse(copy=False) first to flip + the edge orientation. + + See Also + -------- + all_pairs_shortest_path_length + all_pairs_dijkstra_path_length + all_pairs_bellman_ford_path_length + single_source_shortest_path_length + single_source_dijkstra_path_length + single_source_bellman_ford_path_length + """ + if method not in ("dijkstra", "bellman-ford"): + # so we don't need to check in each branch later + raise ValueError(f"method not supported: {method}") + method = "unweighted" if weight is None else method + if source is None: + if target is None: + # Find paths between all pairs. + if method == "unweighted": + paths = nx.all_pairs_shortest_path_length(G) + elif method == "dijkstra": + paths = nx.all_pairs_dijkstra_path_length(G, weight=weight) + else: # method == 'bellman-ford': + paths = nx.all_pairs_bellman_ford_path_length(G, weight=weight) + else: + # Find paths from all nodes co-accessible to the target. + if G.is_directed(): + G = G.reverse(copy=False) + if method == "unweighted": + path_length = nx.single_source_shortest_path_length + paths = path_length(G, target) + elif method == "dijkstra": + path_length = nx.single_source_dijkstra_path_length + paths = path_length(G, target, weight=weight) + else: # method == 'bellman-ford': + path_length = nx.single_source_bellman_ford_path_length + paths = path_length(G, target, weight=weight) + else: + if target is None: + # Find paths to all nodes accessible from the source. + if method == "unweighted": + paths = nx.single_source_shortest_path_length(G, source) + elif method == "dijkstra": + path_length = nx.single_source_dijkstra_path_length + paths = path_length(G, source, weight=weight) + else: # method == 'bellman-ford': + path_length = nx.single_source_bellman_ford_path_length + paths = path_length(G, source, weight=weight) + else: + # Find shortest source-target path. + if method == "unweighted": + p = nx.bidirectional_shortest_path(G, source, target) + paths = len(p) - 1 + elif method == "dijkstra": + paths = nx.dijkstra_path_length(G, source, target, weight) + else: # method == 'bellman-ford': + paths = nx.bellman_ford_path_length(G, source, target, weight) + return paths + + +@nx._dispatchable(edge_attrs="weight") +def average_shortest_path_length(G, weight=None, method=None): + r"""Returns the average shortest path length. + + The average shortest path length is + + .. math:: + + a =\sum_{\substack{s,t \in V \\ s\neq t}} \frac{d(s, t)}{n(n-1)} + + where `V` is the set of nodes in `G`, + `d(s, t)` is the shortest path from `s` to `t`, + and `n` is the number of nodes in `G`. + + .. versionchanged:: 3.0 + An exception is raised for directed graphs that are not strongly + connected. + + Parameters + ---------- + G : NetworkX graph + + weight : None, string or function, optional (default = None) + If None, every edge has weight/distance/cost 1. + If a string, use this edge attribute as the edge weight. + Any edge attribute not present defaults to 1. + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly + three positional arguments: the two endpoints of an edge and + the dictionary of edge attributes for that edge. + The function must return a number. + + method : string, optional (default = 'unweighted' or 'dijkstra') + The algorithm to use to compute the path lengths. + Supported options are 'unweighted', 'dijkstra', 'bellman-ford', + 'floyd-warshall' and 'floyd-warshall-numpy'. + Other method values produce a ValueError. + The default method is 'unweighted' if `weight` is None, + otherwise the default method is 'dijkstra'. + + Raises + ------ + NetworkXPointlessConcept + If `G` is the null graph (that is, the graph on zero nodes). + + NetworkXError + If `G` is not connected (or not strongly connected, in the case + of a directed graph). + + ValueError + If `method` is not among the supported options. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> nx.average_shortest_path_length(G) + 2.0 + + For disconnected graphs, you can compute the average shortest path + length for each component + + >>> G = nx.Graph([(1, 2), (3, 4)]) + >>> for C in (G.subgraph(c).copy() for c in nx.connected_components(G)): + ... print(nx.average_shortest_path_length(C)) + 1.0 + 1.0 + + """ + single_source_methods = ["unweighted", "dijkstra", "bellman-ford"] + all_pairs_methods = ["floyd-warshall", "floyd-warshall-numpy"] + supported_methods = single_source_methods + all_pairs_methods + + if method is None: + method = "unweighted" if weight is None else "dijkstra" + if method not in supported_methods: + raise ValueError(f"method not supported: {method}") + + n = len(G) + # For the special case of the null graph, raise an exception, since + # there are no paths in the null graph. + if n == 0: + msg = ( + "the null graph has no paths, thus there is no average shortest path length" + ) + raise nx.NetworkXPointlessConcept(msg) + # For the special case of the trivial graph, return zero immediately. + if n == 1: + return 0 + # Shortest path length is undefined if the graph is not strongly connected. + if G.is_directed() and not nx.is_strongly_connected(G): + raise nx.NetworkXError("Graph is not strongly connected.") + # Shortest path length is undefined if the graph is not connected. + if not G.is_directed() and not nx.is_connected(G): + raise nx.NetworkXError("Graph is not connected.") + + # Compute all-pairs shortest paths. + def path_length(v): + if method == "unweighted": + return nx.single_source_shortest_path_length(G, v) + elif method == "dijkstra": + return nx.single_source_dijkstra_path_length(G, v, weight=weight) + elif method == "bellman-ford": + return nx.single_source_bellman_ford_path_length(G, v, weight=weight) + + if method in single_source_methods: + # Sum the distances for each (ordered) pair of source and target node. + s = sum(l for u in G for l in path_length(u).values()) + else: + if method == "floyd-warshall": + all_pairs = nx.floyd_warshall(G, weight=weight) + s = sum(sum(t.values()) for t in all_pairs.values()) + elif method == "floyd-warshall-numpy": + s = float(nx.floyd_warshall_numpy(G, weight=weight).sum()) + return s / (n * (n - 1)) + + +@nx._dispatchable(edge_attrs="weight") +def all_shortest_paths(G, source, target, weight=None, method="dijkstra"): + """Compute all shortest simple paths in the graph. + + Parameters + ---------- + G : NetworkX graph + + source : node + Starting node for path. + + target : node + Ending node for path. + + weight : None, string or function, optional (default = None) + If None, every edge has weight/distance/cost 1. + If a string, use this edge attribute as the edge weight. + Any edge attribute not present defaults to 1. + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly + three positional arguments: the two endpoints of an edge and + the dictionary of edge attributes for that edge. + The function must return a number. + + method : string, optional (default = 'dijkstra') + The algorithm to use to compute the path lengths. + Supported options: 'dijkstra', 'bellman-ford'. + Other inputs produce a ValueError. + If `weight` is None, unweighted graph methods are used, and this + suggestion is ignored. + + Returns + ------- + paths : generator of lists + A generator of all paths between source and target. + + Raises + ------ + ValueError + If `method` is not among the supported options. + + NetworkXNoPath + If `target` cannot be reached from `source`. + + Examples + -------- + >>> G = nx.Graph() + >>> nx.add_path(G, [0, 1, 2]) + >>> nx.add_path(G, [0, 10, 2]) + >>> print([p for p in nx.all_shortest_paths(G, source=0, target=2)]) + [[0, 1, 2], [0, 10, 2]] + + Notes + ----- + There may be many shortest paths between the source and target. If G + contains zero-weight cycles, this function will not produce all shortest + paths because doing so would produce infinitely many paths of unbounded + length -- instead, we only produce the shortest simple paths. + + See Also + -------- + shortest_path + single_source_shortest_path + all_pairs_shortest_path + """ + method = "unweighted" if weight is None else method + if method == "unweighted": + pred = nx.predecessor(G, source) + elif method == "dijkstra": + pred, dist = nx.dijkstra_predecessor_and_distance(G, source, weight=weight) + elif method == "bellman-ford": + pred, dist = nx.bellman_ford_predecessor_and_distance(G, source, weight=weight) + else: + raise ValueError(f"method not supported: {method}") + + return _build_paths_from_predecessors({source}, target, pred) + + +@nx._dispatchable(edge_attrs="weight") +def single_source_all_shortest_paths(G, source, weight=None, method="dijkstra"): + """Compute all shortest simple paths from the given source in the graph. + + Parameters + ---------- + G : NetworkX graph + + source : node + Starting node for path. + + weight : None, string or function, optional (default = None) + If None, every edge has weight/distance/cost 1. + If a string, use this edge attribute as the edge weight. + Any edge attribute not present defaults to 1. + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly + three positional arguments: the two endpoints of an edge and + the dictionary of edge attributes for that edge. + The function must return a number. + + method : string, optional (default = 'dijkstra') + The algorithm to use to compute the path lengths. + Supported options: 'dijkstra', 'bellman-ford'. + Other inputs produce a ValueError. + If `weight` is None, unweighted graph methods are used, and this + suggestion is ignored. + + Returns + ------- + paths : generator of dictionary + A generator of all paths between source and all nodes in the graph. + + Raises + ------ + ValueError + If `method` is not among the supported options. + + Examples + -------- + >>> G = nx.Graph() + >>> nx.add_path(G, [0, 1, 2, 3, 0]) + >>> dict(nx.single_source_all_shortest_paths(G, source=0)) + {0: [[0]], 1: [[0, 1]], 3: [[0, 3]], 2: [[0, 1, 2], [0, 3, 2]]} + + Notes + ----- + There may be many shortest paths between the source and target. If G + contains zero-weight cycles, this function will not produce all shortest + paths because doing so would produce infinitely many paths of unbounded + length -- instead, we only produce the shortest simple paths. + + See Also + -------- + shortest_path + all_shortest_paths + single_source_shortest_path + all_pairs_shortest_path + all_pairs_all_shortest_paths + """ + method = "unweighted" if weight is None else method + if method == "unweighted": + pred = nx.predecessor(G, source) + elif method == "dijkstra": + pred, dist = nx.dijkstra_predecessor_and_distance(G, source, weight=weight) + elif method == "bellman-ford": + pred, dist = nx.bellman_ford_predecessor_and_distance(G, source, weight=weight) + else: + raise ValueError(f"method not supported: {method}") + for n in pred: + yield n, list(_build_paths_from_predecessors({source}, n, pred)) + + +@nx._dispatchable(edge_attrs="weight") +def all_pairs_all_shortest_paths(G, weight=None, method="dijkstra"): + """Compute all shortest paths between all nodes. + + Parameters + ---------- + G : NetworkX graph + + weight : None, string or function, optional (default = None) + If None, every edge has weight/distance/cost 1. + If a string, use this edge attribute as the edge weight. + Any edge attribute not present defaults to 1. + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly + three positional arguments: the two endpoints of an edge and + the dictionary of edge attributes for that edge. + The function must return a number. + + method : string, optional (default = 'dijkstra') + The algorithm to use to compute the path lengths. + Supported options: 'dijkstra', 'bellman-ford'. + Other inputs produce a ValueError. + If `weight` is None, unweighted graph methods are used, and this + suggestion is ignored. + + Returns + ------- + paths : generator of dictionary + Dictionary of arrays, keyed by source and target, of all shortest paths. + + Raises + ------ + ValueError + If `method` is not among the supported options. + + Examples + -------- + >>> G = nx.cycle_graph(4) + >>> dict(nx.all_pairs_all_shortest_paths(G))[0][2] + [[0, 1, 2], [0, 3, 2]] + >>> dict(nx.all_pairs_all_shortest_paths(G))[0][3] + [[0, 3]] + + Notes + ----- + There may be multiple shortest paths with equal lengths. Unlike + all_pairs_shortest_path, this method returns all shortest paths. + + See Also + -------- + all_pairs_shortest_path + single_source_all_shortest_paths + """ + for n in G: + yield ( + n, + dict(single_source_all_shortest_paths(G, n, weight=weight, method=method)), + ) + + +def _build_paths_from_predecessors(sources, target, pred): + """Compute all simple paths to target, given the predecessors found in + pred, terminating when any source in sources is found. + + Parameters + ---------- + sources : set + Starting nodes for path. + + target : node + Ending node for path. + + pred : dict + A dictionary of predecessor lists, keyed by node + + Returns + ------- + paths : generator of lists + A generator of all paths between source and target. + + Raises + ------ + NetworkXNoPath + If `target` cannot be reached from `source`. + + Notes + ----- + There may be many paths between the sources and target. If there are + cycles among the predecessors, this function will not produce all + possible paths because doing so would produce infinitely many paths + of unbounded length -- instead, we only produce simple paths. + + See Also + -------- + shortest_path + single_source_shortest_path + all_pairs_shortest_path + all_shortest_paths + bellman_ford_path + """ + if target not in pred: + raise nx.NetworkXNoPath(f"Target {target} cannot be reached from given sources") + + seen = {target} + stack = [[target, 0]] + top = 0 + while top >= 0: + node, i = stack[top] + if node in sources: + yield [p for p, n in reversed(stack[: top + 1])] + if len(pred[node]) > i: + stack[top][1] = i + 1 + next = pred[node][i] + if next in seen: + continue + else: + seen.add(next) + top += 1 + if top == len(stack): + stack.append([next, 0]) + else: + stack[top][:] = [next, 0] + else: + seen.discard(node) + top -= 1 diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/tests/__init__.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/tests/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/tests/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..e0440a4 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/tests/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/tests/__pycache__/test_astar.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/tests/__pycache__/test_astar.cpython-312.pyc new file mode 100644 index 0000000..4207308 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/tests/__pycache__/test_astar.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/tests/__pycache__/test_dense.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/tests/__pycache__/test_dense.cpython-312.pyc new file mode 100644 index 0000000..11b1b04 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/tests/__pycache__/test_dense.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/tests/__pycache__/test_dense_numpy.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/tests/__pycache__/test_dense_numpy.cpython-312.pyc new file mode 100644 index 0000000..7140d46 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/tests/__pycache__/test_dense_numpy.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/tests/__pycache__/test_generic.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/tests/__pycache__/test_generic.cpython-312.pyc new file mode 100644 index 0000000..ea4f258 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/tests/__pycache__/test_generic.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/tests/__pycache__/test_unweighted.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/tests/__pycache__/test_unweighted.cpython-312.pyc new file mode 100644 index 0000000..03635e9 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/tests/__pycache__/test_unweighted.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/tests/__pycache__/test_weighted.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/tests/__pycache__/test_weighted.cpython-312.pyc new file mode 100644 index 0000000..a940c65 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/tests/__pycache__/test_weighted.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/tests/test_astar.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/tests/test_astar.py new file mode 100644 index 0000000..3abf211 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/tests/test_astar.py @@ -0,0 +1,254 @@ +import pytest + +import networkx as nx +from networkx.utils import pairwise + + +class TestAStar: + @classmethod + def setup_class(cls): + edges = [ + ("s", "u", 10), + ("s", "x", 5), + ("u", "v", 1), + ("u", "x", 2), + ("v", "y", 1), + ("x", "u", 3), + ("x", "v", 5), + ("x", "y", 2), + ("y", "s", 7), + ("y", "v", 6), + ] + cls.XG = nx.DiGraph() + cls.XG.add_weighted_edges_from(edges) + + def test_multiple_optimal_paths(self): + """Tests that A* algorithm finds any of multiple optimal paths""" + heuristic_values = {"a": 1.35, "b": 1.18, "c": 0.67, "d": 0} + + def h(u, v): + return heuristic_values[u] + + graph = nx.Graph() + points = ["a", "b", "c", "d"] + edges = [("a", "b", 0.18), ("a", "c", 0.68), ("b", "c", 0.50), ("c", "d", 0.67)] + + graph.add_nodes_from(points) + graph.add_weighted_edges_from(edges) + + path1 = ["a", "c", "d"] + path2 = ["a", "b", "c", "d"] + assert nx.astar_path(graph, "a", "d", h) in (path1, path2) + + def test_astar_directed(self): + assert nx.astar_path(self.XG, "s", "v") == ["s", "x", "u", "v"] + assert nx.astar_path_length(self.XG, "s", "v") == 9 + + def test_astar_directed_weight_function(self): + def w1(u, v, d): + return d["weight"] + + assert nx.astar_path(self.XG, "x", "u", weight=w1) == ["x", "u"] + assert nx.astar_path_length(self.XG, "x", "u", weight=w1) == 3 + assert nx.astar_path(self.XG, "s", "v", weight=w1) == ["s", "x", "u", "v"] + assert nx.astar_path_length(self.XG, "s", "v", weight=w1) == 9 + + def w2(u, v, d): + return None if (u, v) == ("x", "u") else d["weight"] + + assert nx.astar_path(self.XG, "x", "u", weight=w2) == ["x", "y", "s", "u"] + assert nx.astar_path_length(self.XG, "x", "u", weight=w2) == 19 + assert nx.astar_path(self.XG, "s", "v", weight=w2) == ["s", "x", "v"] + assert nx.astar_path_length(self.XG, "s", "v", weight=w2) == 10 + + def w3(u, v, d): + return d["weight"] + 10 + + assert nx.astar_path(self.XG, "x", "u", weight=w3) == ["x", "u"] + assert nx.astar_path_length(self.XG, "x", "u", weight=w3) == 13 + assert nx.astar_path(self.XG, "s", "v", weight=w3) == ["s", "x", "v"] + assert nx.astar_path_length(self.XG, "s", "v", weight=w3) == 30 + + def test_astar_multigraph(self): + G = nx.MultiDiGraph(self.XG) + G.add_weighted_edges_from((u, v, 1000) for (u, v) in list(G.edges())) + assert nx.astar_path(G, "s", "v") == ["s", "x", "u", "v"] + assert nx.astar_path_length(G, "s", "v") == 9 + + def test_astar_undirected(self): + GG = self.XG.to_undirected() + # make sure we get lower weight + # to_undirected might choose either edge with weight 2 or weight 3 + GG["u"]["x"]["weight"] = 2 + GG["y"]["v"]["weight"] = 2 + assert nx.astar_path(GG, "s", "v") == ["s", "x", "u", "v"] + assert nx.astar_path_length(GG, "s", "v") == 8 + + def test_astar_directed2(self): + XG2 = nx.DiGraph() + edges = [ + (1, 4, 1), + (4, 5, 1), + (5, 6, 1), + (6, 3, 1), + (1, 3, 50), + (1, 2, 100), + (2, 3, 100), + ] + XG2.add_weighted_edges_from(edges) + assert nx.astar_path(XG2, 1, 3) == [1, 4, 5, 6, 3] + + def test_astar_undirected2(self): + XG3 = nx.Graph() + edges = [(0, 1, 2), (1, 2, 12), (2, 3, 1), (3, 4, 5), (4, 5, 1), (5, 0, 10)] + XG3.add_weighted_edges_from(edges) + assert nx.astar_path(XG3, 0, 3) == [0, 1, 2, 3] + assert nx.astar_path_length(XG3, 0, 3) == 15 + + def test_astar_undirected3(self): + XG4 = nx.Graph() + edges = [ + (0, 1, 2), + (1, 2, 2), + (2, 3, 1), + (3, 4, 1), + (4, 5, 1), + (5, 6, 1), + (6, 7, 1), + (7, 0, 1), + ] + XG4.add_weighted_edges_from(edges) + assert nx.astar_path(XG4, 0, 2) == [0, 1, 2] + assert nx.astar_path_length(XG4, 0, 2) == 4 + + """ Tests that A* finds correct path when multiple paths exist + and the best one is not expanded first (GH issue #3464) + """ + + def test_astar_directed3(self): + heuristic_values = {"n5": 36, "n2": 4, "n1": 0, "n0": 0} + + def h(u, v): + return heuristic_values[u] + + edges = [("n5", "n1", 11), ("n5", "n2", 9), ("n2", "n1", 1), ("n1", "n0", 32)] + graph = nx.DiGraph() + graph.add_weighted_edges_from(edges) + answer = ["n5", "n2", "n1", "n0"] + assert nx.astar_path(graph, "n5", "n0", h) == answer + + """ Tests that parent is not wrongly overridden when a node + is re-explored multiple times. + """ + + def test_astar_directed4(self): + edges = [ + ("a", "b", 1), + ("a", "c", 1), + ("b", "d", 2), + ("c", "d", 1), + ("d", "e", 1), + ] + graph = nx.DiGraph() + graph.add_weighted_edges_from(edges) + assert nx.astar_path(graph, "a", "e") == ["a", "c", "d", "e"] + + # >>> MXG4=NX.MultiGraph(XG4) + # >>> MXG4.add_edge(0,1,3) + # >>> NX.dijkstra_path(MXG4,0,2) + # [0, 1, 2] + + def test_astar_w1(self): + G = nx.DiGraph() + G.add_edges_from( + [ + ("s", "u"), + ("s", "x"), + ("u", "v"), + ("u", "x"), + ("v", "y"), + ("x", "u"), + ("x", "w"), + ("w", "v"), + ("x", "y"), + ("y", "s"), + ("y", "v"), + ] + ) + assert nx.astar_path(G, "s", "v") == ["s", "u", "v"] + assert nx.astar_path_length(G, "s", "v") == 2 + + def test_astar_nopath(self): + with pytest.raises(nx.NodeNotFound): + nx.astar_path(self.XG, "s", "moon") + + def test_astar_cutoff(self): + with pytest.raises(nx.NetworkXNoPath): + # optimal path_length in XG is 9 + nx.astar_path(self.XG, "s", "v", cutoff=8.0) + with pytest.raises(nx.NetworkXNoPath): + nx.astar_path_length(self.XG, "s", "v", cutoff=8.0) + + def test_astar_admissible_heuristic_with_cutoff(self): + heuristic_values = {"s": 36, "y": 4, "x": 0, "u": 0, "v": 0} + + def h(u, v): + return heuristic_values[u] + + assert nx.astar_path_length(self.XG, "s", "v") == 9 + assert nx.astar_path_length(self.XG, "s", "v", heuristic=h) == 9 + assert nx.astar_path_length(self.XG, "s", "v", heuristic=h, cutoff=12) == 9 + assert nx.astar_path_length(self.XG, "s", "v", heuristic=h, cutoff=9) == 9 + with pytest.raises(nx.NetworkXNoPath): + nx.astar_path_length(self.XG, "s", "v", heuristic=h, cutoff=8) + + def test_astar_inadmissible_heuristic_with_cutoff(self): + heuristic_values = {"s": 36, "y": 14, "x": 10, "u": 10, "v": 0} + + def h(u, v): + return heuristic_values[u] + + # optimal path_length in XG is 9. This heuristic gives over-estimate. + assert nx.astar_path_length(self.XG, "s", "v", heuristic=h) == 10 + assert nx.astar_path_length(self.XG, "s", "v", heuristic=h, cutoff=15) == 10 + with pytest.raises(nx.NetworkXNoPath): + nx.astar_path_length(self.XG, "s", "v", heuristic=h, cutoff=9) + with pytest.raises(nx.NetworkXNoPath): + nx.astar_path_length(self.XG, "s", "v", heuristic=h, cutoff=12) + + def test_astar_cutoff2(self): + assert nx.astar_path(self.XG, "s", "v", cutoff=10.0) == ["s", "x", "u", "v"] + assert nx.astar_path_length(self.XG, "s", "v") == 9 + + def test_cycle(self): + C = nx.cycle_graph(7) + assert nx.astar_path(C, 0, 3) == [0, 1, 2, 3] + assert nx.dijkstra_path(C, 0, 4) == [0, 6, 5, 4] + + def test_unorderable_nodes(self): + """Tests that A* accommodates nodes that are not orderable. + + For more information, see issue #554. + + """ + # Create the cycle graph on four nodes, with nodes represented + # as (unorderable) Python objects. + nodes = [object() for n in range(4)] + G = nx.Graph() + G.add_edges_from(pairwise(nodes, cyclic=True)) + path = nx.astar_path(G, nodes[0], nodes[2]) + assert len(path) == 3 + + def test_astar_NetworkXNoPath(self): + """Tests that exception is raised when there exists no + path between source and target""" + G = nx.gnp_random_graph(10, 0.2, seed=10) + with pytest.raises(nx.NetworkXNoPath): + nx.astar_path(G, 4, 9) + + def test_astar_NodeNotFound(self): + """Tests that exception is raised when either + source or target is not in graph""" + G = nx.gnp_random_graph(10, 0.2, seed=10) + with pytest.raises(nx.NodeNotFound): + nx.astar_path_length(G, 11, 9) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/tests/test_dense.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/tests/test_dense.py new file mode 100644 index 0000000..6923bfe --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/tests/test_dense.py @@ -0,0 +1,212 @@ +import pytest + +import networkx as nx + + +class TestFloyd: + @classmethod + def setup_class(cls): + pass + + def test_floyd_warshall_predecessor_and_distance(self): + XG = nx.DiGraph() + XG.add_weighted_edges_from( + [ + ("s", "u", 10), + ("s", "x", 5), + ("u", "v", 1), + ("u", "x", 2), + ("v", "y", 1), + ("x", "u", 3), + ("x", "v", 5), + ("x", "y", 2), + ("y", "s", 7), + ("y", "v", 6), + ] + ) + path, dist = nx.floyd_warshall_predecessor_and_distance(XG) + assert dist["s"]["v"] == 9 + assert path["s"]["v"] == "u" + assert dist == { + "y": {"y": 0, "x": 12, "s": 7, "u": 15, "v": 6}, + "x": {"y": 2, "x": 0, "s": 9, "u": 3, "v": 4}, + "s": {"y": 7, "x": 5, "s": 0, "u": 8, "v": 9}, + "u": {"y": 2, "x": 2, "s": 9, "u": 0, "v": 1}, + "v": {"y": 1, "x": 13, "s": 8, "u": 16, "v": 0}, + } + + GG = XG.to_undirected() + # make sure we get lower weight + # to_undirected might choose either edge with weight 2 or weight 3 + GG["u"]["x"]["weight"] = 2 + path, dist = nx.floyd_warshall_predecessor_and_distance(GG) + assert dist["s"]["v"] == 8 + # skip this test, could be alternate path s-u-v + # assert_equal(path['s']['v'],'y') + + G = nx.DiGraph() # no weights + G.add_edges_from( + [ + ("s", "u"), + ("s", "x"), + ("u", "v"), + ("u", "x"), + ("v", "y"), + ("x", "u"), + ("x", "v"), + ("x", "y"), + ("y", "s"), + ("y", "v"), + ] + ) + path, dist = nx.floyd_warshall_predecessor_and_distance(G) + assert dist["s"]["v"] == 2 + # skip this test, could be alternate path s-u-v + # assert_equal(path['s']['v'],'x') + + # alternate interface + dist = nx.floyd_warshall(G) + assert dist["s"]["v"] == 2 + + # floyd_warshall_predecessor_and_distance returns + # dicts-of-defautdicts + # make sure we don't get empty dictionary + XG = nx.DiGraph() + XG.add_weighted_edges_from( + [("v", "x", 5.0), ("y", "x", 5.0), ("v", "y", 6.0), ("x", "u", 2.0)] + ) + path, dist = nx.floyd_warshall_predecessor_and_distance(XG) + inf = float("inf") + assert dist == { + "v": {"v": 0, "x": 5.0, "y": 6.0, "u": 7.0}, + "x": {"x": 0, "u": 2.0, "v": inf, "y": inf}, + "y": {"y": 0, "x": 5.0, "v": inf, "u": 7.0}, + "u": {"u": 0, "v": inf, "x": inf, "y": inf}, + } + assert path == { + "v": {"x": "v", "y": "v", "u": "x"}, + "x": {"u": "x"}, + "y": {"x": "y", "u": "x"}, + } + + def test_reconstruct_path(self): + with pytest.raises(KeyError): + XG = nx.DiGraph() + XG.add_weighted_edges_from( + [ + ("s", "u", 10), + ("s", "x", 5), + ("u", "v", 1), + ("u", "x", 2), + ("v", "y", 1), + ("x", "u", 3), + ("x", "v", 5), + ("x", "y", 2), + ("y", "s", 7), + ("y", "v", 6), + ] + ) + predecessors, _ = nx.floyd_warshall_predecessor_and_distance(XG) + + path = nx.reconstruct_path("s", "v", predecessors) + assert path == ["s", "x", "u", "v"] + + path = nx.reconstruct_path("s", "s", predecessors) + assert path == [] + + # this part raises the keyError + nx.reconstruct_path("1", "2", predecessors) + + def test_cycle(self): + path, dist = nx.floyd_warshall_predecessor_and_distance(nx.cycle_graph(7)) + assert dist[0][3] == 3 + assert path[0][3] == 2 + assert dist[0][4] == 3 + + def test_weighted(self): + XG3 = nx.Graph() + XG3.add_weighted_edges_from( + [[0, 1, 2], [1, 2, 12], [2, 3, 1], [3, 4, 5], [4, 5, 1], [5, 0, 10]] + ) + path, dist = nx.floyd_warshall_predecessor_and_distance(XG3) + assert dist[0][3] == 15 + assert path[0][3] == 2 + + def test_weighted2(self): + XG4 = nx.Graph() + XG4.add_weighted_edges_from( + [ + [0, 1, 2], + [1, 2, 2], + [2, 3, 1], + [3, 4, 1], + [4, 5, 1], + [5, 6, 1], + [6, 7, 1], + [7, 0, 1], + ] + ) + path, dist = nx.floyd_warshall_predecessor_and_distance(XG4) + assert dist[0][2] == 4 + assert path[0][2] == 1 + + def test_weight_parameter(self): + XG4 = nx.Graph() + XG4.add_edges_from( + [ + (0, 1, {"heavy": 2}), + (1, 2, {"heavy": 2}), + (2, 3, {"heavy": 1}), + (3, 4, {"heavy": 1}), + (4, 5, {"heavy": 1}), + (5, 6, {"heavy": 1}), + (6, 7, {"heavy": 1}), + (7, 0, {"heavy": 1}), + ] + ) + path, dist = nx.floyd_warshall_predecessor_and_distance(XG4, weight="heavy") + assert dist[0][2] == 4 + assert path[0][2] == 1 + + def test_zero_distance(self): + XG = nx.DiGraph() + XG.add_weighted_edges_from( + [ + ("s", "u", 10), + ("s", "x", 5), + ("u", "v", 1), + ("u", "x", 2), + ("v", "y", 1), + ("x", "u", 3), + ("x", "v", 5), + ("x", "y", 2), + ("y", "s", 7), + ("y", "v", 6), + ] + ) + path, dist = nx.floyd_warshall_predecessor_and_distance(XG) + + for u in XG: + assert dist[u][u] == 0 + + GG = XG.to_undirected() + # make sure we get lower weight + # to_undirected might choose either edge with weight 2 or weight 3 + GG["u"]["x"]["weight"] = 2 + path, dist = nx.floyd_warshall_predecessor_and_distance(GG) + + for u in GG: + dist[u][u] = 0 + + def test_zero_weight(self): + G = nx.DiGraph() + edges = [(1, 2, -2), (2, 3, -4), (1, 5, 1), (5, 4, 0), (4, 3, -5), (2, 5, -7)] + G.add_weighted_edges_from(edges) + dist = nx.floyd_warshall(G) + assert dist[1][3] == -14 + + G = nx.MultiDiGraph() + edges.append((2, 5, -7)) + G.add_weighted_edges_from(edges) + dist = nx.floyd_warshall(G) + assert dist[1][3] == -14 diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/tests/test_dense_numpy.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/tests/test_dense_numpy.py new file mode 100644 index 0000000..6eb95a5 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/tests/test_dense_numpy.py @@ -0,0 +1,88 @@ +import pytest + +import networkx as nx + +np = pytest.importorskip("numpy") + + +def test_cycle_numpy(): + dist = nx.floyd_warshall_numpy(nx.cycle_graph(7)) + assert dist[0, 3] == 3 + assert dist[0, 4] == 3 + + +def test_weighted_numpy_three_edges(): + XG3 = nx.Graph() + XG3.add_weighted_edges_from( + [[0, 1, 2], [1, 2, 12], [2, 3, 1], [3, 4, 5], [4, 5, 1], [5, 0, 10]] + ) + dist = nx.floyd_warshall_numpy(XG3) + assert dist[0, 3] == 15 + + +def test_weighted_numpy_two_edges(): + XG4 = nx.Graph() + XG4.add_weighted_edges_from( + [ + [0, 1, 2], + [1, 2, 2], + [2, 3, 1], + [3, 4, 1], + [4, 5, 1], + [5, 6, 1], + [6, 7, 1], + [7, 0, 1], + ] + ) + dist = nx.floyd_warshall_numpy(XG4) + assert dist[0, 2] == 4 + + +def test_weight_parameter_numpy(): + XG4 = nx.Graph() + XG4.add_edges_from( + [ + (0, 1, {"heavy": 2}), + (1, 2, {"heavy": 2}), + (2, 3, {"heavy": 1}), + (3, 4, {"heavy": 1}), + (4, 5, {"heavy": 1}), + (5, 6, {"heavy": 1}), + (6, 7, {"heavy": 1}), + (7, 0, {"heavy": 1}), + ] + ) + dist = nx.floyd_warshall_numpy(XG4, weight="heavy") + assert dist[0, 2] == 4 + + +def test_directed_cycle_numpy(): + G = nx.DiGraph() + nx.add_cycle(G, [0, 1, 2, 3]) + pred, dist = nx.floyd_warshall_predecessor_and_distance(G) + D = nx.utils.dict_to_numpy_array(dist) + np.testing.assert_equal(nx.floyd_warshall_numpy(G), D) + + +def test_zero_weight(): + G = nx.DiGraph() + edges = [(1, 2, -2), (2, 3, -4), (1, 5, 1), (5, 4, 0), (4, 3, -5), (2, 5, -7)] + G.add_weighted_edges_from(edges) + dist = nx.floyd_warshall_numpy(G) + assert int(np.min(dist)) == -14 + + G = nx.MultiDiGraph() + edges.append((2, 5, -7)) + G.add_weighted_edges_from(edges) + dist = nx.floyd_warshall_numpy(G) + assert int(np.min(dist)) == -14 + + +def test_nodelist(): + G = nx.path_graph(7) + dist = nx.floyd_warshall_numpy(G, nodelist=[3, 5, 4, 6, 2, 1, 0]) + assert dist[0, 3] == 3 + assert dist[0, 1] == 2 + assert dist[6, 2] == 4 + pytest.raises(nx.NetworkXError, nx.floyd_warshall_numpy, G, [1, 3]) + pytest.raises(nx.NetworkXError, nx.floyd_warshall_numpy, G, list(range(9))) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/tests/test_generic.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/tests/test_generic.py new file mode 100644 index 0000000..578c83b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/tests/test_generic.py @@ -0,0 +1,450 @@ +import pytest + +import networkx as nx + + +def validate_grid_path(r, c, s, t, p): + assert isinstance(p, list) + assert p[0] == s + assert p[-1] == t + s = ((s - 1) // c, (s - 1) % c) + t = ((t - 1) // c, (t - 1) % c) + assert len(p) == abs(t[0] - s[0]) + abs(t[1] - s[1]) + 1 + p = [((u - 1) // c, (u - 1) % c) for u in p] + for u in p: + assert 0 <= u[0] < r + assert 0 <= u[1] < c + for u, v in zip(p[:-1], p[1:]): + assert (abs(v[0] - u[0]), abs(v[1] - u[1])) in [(0, 1), (1, 0)] + + +class TestGenericPath: + @classmethod + def setup_class(cls): + from networkx import convert_node_labels_to_integers as cnlti + + cls.grid = cnlti(nx.grid_2d_graph(4, 4), first_label=1, ordering="sorted") + cls.cycle = nx.cycle_graph(7) + cls.directed_cycle = nx.cycle_graph(7, create_using=nx.DiGraph()) + cls.neg_weights = nx.DiGraph() + cls.neg_weights.add_edge(0, 1, weight=1) + cls.neg_weights.add_edge(0, 2, weight=3) + cls.neg_weights.add_edge(1, 3, weight=1) + cls.neg_weights.add_edge(2, 3, weight=-2) + + def test_shortest_path(self): + assert nx.shortest_path(self.cycle, 0, 3) == [0, 1, 2, 3] + assert nx.shortest_path(self.cycle, 0, 4) == [0, 6, 5, 4] + validate_grid_path(4, 4, 1, 12, nx.shortest_path(self.grid, 1, 12)) + assert nx.shortest_path(self.directed_cycle, 0, 3) == [0, 1, 2, 3] + # now with weights + assert nx.shortest_path(self.cycle, 0, 3, weight="weight") == [0, 1, 2, 3] + assert nx.shortest_path(self.cycle, 0, 4, weight="weight") == [0, 6, 5, 4] + validate_grid_path( + 4, 4, 1, 12, nx.shortest_path(self.grid, 1, 12, weight="weight") + ) + assert nx.shortest_path(self.directed_cycle, 0, 3, weight="weight") == [ + 0, + 1, + 2, + 3, + ] + # weights and method specified + assert nx.shortest_path( + self.directed_cycle, 0, 3, weight="weight", method="dijkstra" + ) == [0, 1, 2, 3] + assert nx.shortest_path( + self.directed_cycle, 0, 3, weight="weight", method="bellman-ford" + ) == [0, 1, 2, 3] + # when Dijkstra's will probably (depending on precise implementation) + # incorrectly return [0, 1, 3] instead + assert nx.shortest_path( + self.neg_weights, 0, 3, weight="weight", method="bellman-ford" + ) == [0, 2, 3] + # confirm bad method rejection + pytest.raises(ValueError, nx.shortest_path, self.cycle, method="SPAM") + # confirm absent source rejection + pytest.raises(nx.NodeNotFound, nx.shortest_path, self.cycle, 8) + + def test_shortest_path_target(self): + answer = {0: [0, 1], 1: [1], 2: [2, 1]} + sp = nx.shortest_path(nx.path_graph(3), target=1) + assert sp == answer + # with weights + sp = nx.shortest_path(nx.path_graph(3), target=1, weight="weight") + assert sp == answer + # weights and method specified + sp = nx.shortest_path( + nx.path_graph(3), target=1, weight="weight", method="dijkstra" + ) + assert sp == answer + sp = nx.shortest_path( + nx.path_graph(3), target=1, weight="weight", method="bellman-ford" + ) + assert sp == answer + + def test_shortest_path_length(self): + assert nx.shortest_path_length(self.cycle, 0, 3) == 3 + assert nx.shortest_path_length(self.grid, 1, 12) == 5 + assert nx.shortest_path_length(self.directed_cycle, 0, 4) == 4 + # now with weights + assert nx.shortest_path_length(self.cycle, 0, 3, weight="weight") == 3 + assert nx.shortest_path_length(self.grid, 1, 12, weight="weight") == 5 + assert nx.shortest_path_length(self.directed_cycle, 0, 4, weight="weight") == 4 + # weights and method specified + assert ( + nx.shortest_path_length( + self.cycle, 0, 3, weight="weight", method="dijkstra" + ) + == 3 + ) + assert ( + nx.shortest_path_length( + self.cycle, 0, 3, weight="weight", method="bellman-ford" + ) + == 3 + ) + # confirm bad method rejection + pytest.raises(ValueError, nx.shortest_path_length, self.cycle, method="SPAM") + # confirm absent source rejection + pytest.raises(nx.NodeNotFound, nx.shortest_path_length, self.cycle, 8) + + def test_shortest_path_length_target(self): + answer = {0: 1, 1: 0, 2: 1} + sp = nx.shortest_path_length(nx.path_graph(3), target=1) + assert sp == answer + # with weights + sp = nx.shortest_path_length(nx.path_graph(3), target=1, weight="weight") + assert sp == answer + # weights and method specified + sp = nx.shortest_path_length( + nx.path_graph(3), target=1, weight="weight", method="dijkstra" + ) + assert sp == answer + sp = nx.shortest_path_length( + nx.path_graph(3), target=1, weight="weight", method="bellman-ford" + ) + assert sp == answer + + def test_single_source_shortest_path(self): + p = nx.shortest_path(self.cycle, 0) + assert p[3] == [0, 1, 2, 3] + assert p == nx.single_source_shortest_path(self.cycle, 0) + p = nx.shortest_path(self.grid, 1) + validate_grid_path(4, 4, 1, 12, p[12]) + # now with weights + p = nx.shortest_path(self.cycle, 0, weight="weight") + assert p[3] == [0, 1, 2, 3] + assert p == nx.single_source_dijkstra_path(self.cycle, 0) + p = nx.shortest_path(self.grid, 1, weight="weight") + validate_grid_path(4, 4, 1, 12, p[12]) + # weights and method specified + p = nx.shortest_path(self.cycle, 0, method="dijkstra", weight="weight") + assert p[3] == [0, 1, 2, 3] + assert p == nx.single_source_shortest_path(self.cycle, 0) + p = nx.shortest_path(self.cycle, 0, method="bellman-ford", weight="weight") + assert p[3] == [0, 1, 2, 3] + assert p == nx.single_source_shortest_path(self.cycle, 0) + + def test_single_source_shortest_path_length(self): + ans = nx.shortest_path_length(self.cycle, 0) + assert ans == {0: 0, 1: 1, 2: 2, 3: 3, 4: 3, 5: 2, 6: 1} + assert ans == nx.single_source_shortest_path_length(self.cycle, 0) + ans = nx.shortest_path_length(self.grid, 1) + assert ans[16] == 6 + # now with weights + ans = nx.shortest_path_length(self.cycle, 0, weight="weight") + assert ans == {0: 0, 1: 1, 2: 2, 3: 3, 4: 3, 5: 2, 6: 1} + assert ans == nx.single_source_dijkstra_path_length(self.cycle, 0) + ans = nx.shortest_path_length(self.grid, 1, weight="weight") + assert ans[16] == 6 + # weights and method specified + ans = dict( + nx.shortest_path_length(self.cycle, 0, weight="weight", method="dijkstra") + ) + assert ans == {0: 0, 1: 1, 2: 2, 3: 3, 4: 3, 5: 2, 6: 1} + assert ans == nx.single_source_dijkstra_path_length(self.cycle, 0) + ans = dict( + nx.shortest_path_length( + self.cycle, 0, weight="weight", method="bellman-ford" + ) + ) + assert ans == {0: 0, 1: 1, 2: 2, 3: 3, 4: 3, 5: 2, 6: 1} + assert ans == nx.single_source_bellman_ford_path_length(self.cycle, 0) + + def test_single_source_all_shortest_paths(self): + cycle_ans = {0: [[0]], 1: [[0, 1]], 2: [[0, 1, 2], [0, 3, 2]], 3: [[0, 3]]} + ans = dict(nx.single_source_all_shortest_paths(nx.cycle_graph(4), 0)) + assert sorted(ans[2]) == cycle_ans[2] + ans = dict(nx.single_source_all_shortest_paths(self.grid, 1)) + grid_ans = [ + [1, 2, 3, 7, 11], + [1, 2, 6, 7, 11], + [1, 2, 6, 10, 11], + [1, 5, 6, 7, 11], + [1, 5, 6, 10, 11], + [1, 5, 9, 10, 11], + ] + assert sorted(ans[11]) == grid_ans + ans = dict( + nx.single_source_all_shortest_paths(nx.cycle_graph(4), 0, weight="weight") + ) + assert sorted(ans[2]) == cycle_ans[2] + ans = dict( + nx.single_source_all_shortest_paths( + nx.cycle_graph(4), 0, method="bellman-ford", weight="weight" + ) + ) + assert sorted(ans[2]) == cycle_ans[2] + ans = dict(nx.single_source_all_shortest_paths(self.grid, 1, weight="weight")) + assert sorted(ans[11]) == grid_ans + ans = dict( + nx.single_source_all_shortest_paths( + self.grid, 1, method="bellman-ford", weight="weight" + ) + ) + assert sorted(ans[11]) == grid_ans + G = nx.cycle_graph(4) + G.add_node(4) + ans = dict(nx.single_source_all_shortest_paths(G, 0)) + assert sorted(ans[2]) == [[0, 1, 2], [0, 3, 2]] + ans = dict(nx.single_source_all_shortest_paths(G, 4)) + assert sorted(ans[4]) == [[4]] + + def test_all_pairs_shortest_path(self): + # shortest_path w/o source and target returns a generator instead of + # a dict beginning in version 3.5. Only the first call needed changed here. + p = dict(nx.shortest_path(self.cycle)) + assert p[0][3] == [0, 1, 2, 3] + assert p == dict(nx.all_pairs_shortest_path(self.cycle)) + p = dict(nx.shortest_path(self.grid)) + validate_grid_path(4, 4, 1, 12, p[1][12]) + # now with weights + p = dict(nx.shortest_path(self.cycle, weight="weight")) + assert p[0][3] == [0, 1, 2, 3] + assert p == dict(nx.all_pairs_dijkstra_path(self.cycle)) + p = dict(nx.shortest_path(self.grid, weight="weight")) + validate_grid_path(4, 4, 1, 12, p[1][12]) + # weights and method specified + p = dict(nx.shortest_path(self.cycle, weight="weight", method="dijkstra")) + assert p[0][3] == [0, 1, 2, 3] + assert p == dict(nx.all_pairs_dijkstra_path(self.cycle)) + p = dict(nx.shortest_path(self.cycle, weight="weight", method="bellman-ford")) + assert p[0][3] == [0, 1, 2, 3] + assert p == dict(nx.all_pairs_bellman_ford_path(self.cycle)) + + def test_all_pairs_shortest_path_length(self): + ans = dict(nx.shortest_path_length(self.cycle)) + assert ans[0] == {0: 0, 1: 1, 2: 2, 3: 3, 4: 3, 5: 2, 6: 1} + assert ans == dict(nx.all_pairs_shortest_path_length(self.cycle)) + ans = dict(nx.shortest_path_length(self.grid)) + assert ans[1][16] == 6 + # now with weights + ans = dict(nx.shortest_path_length(self.cycle, weight="weight")) + assert ans[0] == {0: 0, 1: 1, 2: 2, 3: 3, 4: 3, 5: 2, 6: 1} + assert ans == dict(nx.all_pairs_dijkstra_path_length(self.cycle)) + ans = dict(nx.shortest_path_length(self.grid, weight="weight")) + assert ans[1][16] == 6 + # weights and method specified + ans = dict( + nx.shortest_path_length(self.cycle, weight="weight", method="dijkstra") + ) + assert ans[0] == {0: 0, 1: 1, 2: 2, 3: 3, 4: 3, 5: 2, 6: 1} + assert ans == dict(nx.all_pairs_dijkstra_path_length(self.cycle)) + ans = dict( + nx.shortest_path_length(self.cycle, weight="weight", method="bellman-ford") + ) + assert ans[0] == {0: 0, 1: 1, 2: 2, 3: 3, 4: 3, 5: 2, 6: 1} + assert ans == dict(nx.all_pairs_bellman_ford_path_length(self.cycle)) + + def test_all_pairs_all_shortest_paths(self): + ans = dict(nx.all_pairs_all_shortest_paths(nx.cycle_graph(4))) + assert sorted(ans[1][3]) == [[1, 0, 3], [1, 2, 3]] + ans = dict(nx.all_pairs_all_shortest_paths(nx.cycle_graph(4)), weight="weight") + assert sorted(ans[1][3]) == [[1, 0, 3], [1, 2, 3]] + ans = dict( + nx.all_pairs_all_shortest_paths(nx.cycle_graph(4)), + method="bellman-ford", + weight="weight", + ) + assert sorted(ans[1][3]) == [[1, 0, 3], [1, 2, 3]] + G = nx.cycle_graph(4) + G.add_node(4) + ans = dict(nx.all_pairs_all_shortest_paths(G)) + assert sorted(ans[4][4]) == [[4]] + + def test_has_path(self): + G = nx.Graph() + nx.add_path(G, range(3)) + nx.add_path(G, range(3, 5)) + assert nx.has_path(G, 0, 2) + assert not nx.has_path(G, 0, 4) + + def test_has_path_singleton(self): + G = nx.empty_graph(1) + assert nx.has_path(G, 0, 0) + + def test_all_shortest_paths(self): + G = nx.Graph() + nx.add_path(G, [0, 1, 2, 3]) + nx.add_path(G, [0, 10, 20, 3]) + assert [[0, 1, 2, 3], [0, 10, 20, 3]] == sorted(nx.all_shortest_paths(G, 0, 3)) + # with weights + G = nx.Graph() + nx.add_path(G, [0, 1, 2, 3]) + nx.add_path(G, [0, 10, 20, 3]) + assert [[0, 1, 2, 3], [0, 10, 20, 3]] == sorted( + nx.all_shortest_paths(G, 0, 3, weight="weight") + ) + # weights and method specified + G = nx.Graph() + nx.add_path(G, [0, 1, 2, 3]) + nx.add_path(G, [0, 10, 20, 3]) + assert [[0, 1, 2, 3], [0, 10, 20, 3]] == sorted( + nx.all_shortest_paths(G, 0, 3, weight="weight", method="dijkstra") + ) + G = nx.Graph() + nx.add_path(G, [0, 1, 2, 3]) + nx.add_path(G, [0, 10, 20, 3]) + assert [[0, 1, 2, 3], [0, 10, 20, 3]] == sorted( + nx.all_shortest_paths(G, 0, 3, weight="weight", method="bellman-ford") + ) + + def test_all_shortest_paths_raise(self): + with pytest.raises(nx.NetworkXNoPath): + G = nx.path_graph(4) + G.add_node(4) + list(nx.all_shortest_paths(G, 0, 4)) + + def test_bad_method(self): + with pytest.raises(ValueError): + G = nx.path_graph(2) + list(nx.all_shortest_paths(G, 0, 1, weight="weight", method="SPAM")) + + def test_single_source_all_shortest_paths_bad_method(self): + with pytest.raises(ValueError): + G = nx.path_graph(2) + dict( + nx.single_source_all_shortest_paths( + G, 0, weight="weight", method="SPAM" + ) + ) + + def test_all_shortest_paths_zero_weight_edge(self): + g = nx.Graph() + nx.add_path(g, [0, 1, 3]) + nx.add_path(g, [0, 1, 2, 3]) + g.edges[1, 2]["weight"] = 0 + paths30d = list( + nx.all_shortest_paths(g, 3, 0, weight="weight", method="dijkstra") + ) + paths03d = list( + nx.all_shortest_paths(g, 0, 3, weight="weight", method="dijkstra") + ) + paths30b = list( + nx.all_shortest_paths(g, 3, 0, weight="weight", method="bellman-ford") + ) + paths03b = list( + nx.all_shortest_paths(g, 0, 3, weight="weight", method="bellman-ford") + ) + assert sorted(paths03d) == sorted(p[::-1] for p in paths30d) + assert sorted(paths03d) == sorted(p[::-1] for p in paths30b) + assert sorted(paths03b) == sorted(p[::-1] for p in paths30b) + + +class TestAverageShortestPathLength: + def test_cycle_graph(self): + ans = nx.average_shortest_path_length(nx.cycle_graph(7)) + assert ans == pytest.approx(2, abs=1e-7) + + def test_path_graph(self): + ans = nx.average_shortest_path_length(nx.path_graph(5)) + assert ans == pytest.approx(2, abs=1e-7) + + def test_weighted(self): + G = nx.Graph() + nx.add_cycle(G, range(7), weight=2) + ans = nx.average_shortest_path_length(G, weight="weight") + assert ans == pytest.approx(4, abs=1e-7) + G = nx.Graph() + nx.add_path(G, range(5), weight=2) + ans = nx.average_shortest_path_length(G, weight="weight") + assert ans == pytest.approx(4, abs=1e-7) + + def test_specified_methods(self): + G = nx.Graph() + nx.add_cycle(G, range(7), weight=2) + ans = nx.average_shortest_path_length(G, weight="weight", method="dijkstra") + assert ans == pytest.approx(4, abs=1e-7) + ans = nx.average_shortest_path_length(G, weight="weight", method="bellman-ford") + assert ans == pytest.approx(4, abs=1e-7) + ans = nx.average_shortest_path_length( + G, weight="weight", method="floyd-warshall" + ) + assert ans == pytest.approx(4, abs=1e-7) + + G = nx.Graph() + nx.add_path(G, range(5), weight=2) + ans = nx.average_shortest_path_length(G, weight="weight", method="dijkstra") + assert ans == pytest.approx(4, abs=1e-7) + ans = nx.average_shortest_path_length(G, weight="weight", method="bellman-ford") + assert ans == pytest.approx(4, abs=1e-7) + ans = nx.average_shortest_path_length( + G, weight="weight", method="floyd-warshall" + ) + assert ans == pytest.approx(4, abs=1e-7) + + def test_directed_not_strongly_connected(self): + G = nx.DiGraph([(0, 1)]) + with pytest.raises(nx.NetworkXError, match="Graph is not strongly connected"): + nx.average_shortest_path_length(G) + + def test_undirected_not_connected(self): + g = nx.Graph() + g.add_nodes_from(range(3)) + g.add_edge(0, 1) + pytest.raises(nx.NetworkXError, nx.average_shortest_path_length, g) + + def test_trivial_graph(self): + """Tests that the trivial graph has average path length zero, + since there is exactly one path of length zero in the trivial + graph. + + For more information, see issue #1960. + + """ + G = nx.trivial_graph() + assert nx.average_shortest_path_length(G) == 0 + + def test_null_graph(self): + with pytest.raises(nx.NetworkXPointlessConcept): + nx.average_shortest_path_length(nx.null_graph()) + + def test_bad_method(self): + with pytest.raises(ValueError): + G = nx.path_graph(2) + nx.average_shortest_path_length(G, weight="weight", method="SPAM") + + +class TestAverageShortestPathLengthNumpy: + @classmethod + def setup_class(cls): + global np + import pytest + + np = pytest.importorskip("numpy") + + def test_specified_methods_numpy(self): + G = nx.Graph() + nx.add_cycle(G, range(7), weight=2) + ans = nx.average_shortest_path_length( + G, weight="weight", method="floyd-warshall-numpy" + ) + np.testing.assert_almost_equal(ans, 4) + + G = nx.Graph() + nx.add_path(G, range(5), weight=2) + ans = nx.average_shortest_path_length( + G, weight="weight", method="floyd-warshall-numpy" + ) + np.testing.assert_almost_equal(ans, 4) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/tests/test_unweighted.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/tests/test_unweighted.py new file mode 100644 index 0000000..4adbd84 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/tests/test_unweighted.py @@ -0,0 +1,149 @@ +import pytest + +import networkx as nx + + +def validate_grid_path(r, c, s, t, p): + assert isinstance(p, list) + assert p[0] == s + assert p[-1] == t + s = ((s - 1) // c, (s - 1) % c) + t = ((t - 1) // c, (t - 1) % c) + assert len(p) == abs(t[0] - s[0]) + abs(t[1] - s[1]) + 1 + p = [((u - 1) // c, (u - 1) % c) for u in p] + for u in p: + assert 0 <= u[0] < r + assert 0 <= u[1] < c + for u, v in zip(p[:-1], p[1:]): + assert (abs(v[0] - u[0]), abs(v[1] - u[1])) in [(0, 1), (1, 0)] + + +class TestUnweightedPath: + @classmethod + def setup_class(cls): + from networkx import convert_node_labels_to_integers as cnlti + + cls.grid = cnlti(nx.grid_2d_graph(4, 4), first_label=1, ordering="sorted") + cls.cycle = nx.cycle_graph(7) + cls.directed_cycle = nx.cycle_graph(7, create_using=nx.DiGraph()) + + def test_bidirectional_shortest_path(self): + assert nx.bidirectional_shortest_path(self.cycle, 0, 3) == [0, 1, 2, 3] + assert nx.bidirectional_shortest_path(self.cycle, 0, 4) == [0, 6, 5, 4] + validate_grid_path( + 4, 4, 1, 12, nx.bidirectional_shortest_path(self.grid, 1, 12) + ) + assert nx.bidirectional_shortest_path(self.directed_cycle, 0, 3) == [0, 1, 2, 3] + # test source = target + assert nx.bidirectional_shortest_path(self.cycle, 3, 3) == [3] + + @pytest.mark.parametrize( + ("src", "tgt"), + ( + (8, 3), # source not in graph + (3, 8), # target not in graph + (8, 10), # neither source nor target in graph + (8, 8), # src == tgt, neither in graph - tests order of input checks + ), + ) + def test_bidirectional_shortest_path_src_tgt_not_in_graph(self, src, tgt): + with pytest.raises( + nx.NodeNotFound, + match=f"(Source {src}|Target {tgt}) is not in G", + ): + nx.bidirectional_shortest_path(self.cycle, src, tgt) + + def test_shortest_path_length(self): + assert nx.shortest_path_length(self.cycle, 0, 3) == 3 + assert nx.shortest_path_length(self.grid, 1, 12) == 5 + assert nx.shortest_path_length(self.directed_cycle, 0, 4) == 4 + # now with weights + assert nx.shortest_path_length(self.cycle, 0, 3, weight=True) == 3 + assert nx.shortest_path_length(self.grid, 1, 12, weight=True) == 5 + assert nx.shortest_path_length(self.directed_cycle, 0, 4, weight=True) == 4 + + def test_single_source_shortest_path(self): + p = nx.single_source_shortest_path(self.directed_cycle, 3) + assert p[0] == [3, 4, 5, 6, 0] + p = nx.single_source_shortest_path(self.cycle, 0) + assert p[3] == [0, 1, 2, 3] + p = nx.single_source_shortest_path(self.cycle, 0, cutoff=0) + assert p == {0: [0]} + + def test_single_source_shortest_path_length(self): + pl = nx.single_source_shortest_path_length + lengths = {0: 0, 1: 1, 2: 2, 3: 3, 4: 3, 5: 2, 6: 1} + assert dict(pl(self.cycle, 0)) == lengths + lengths = {0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6} + assert dict(pl(self.directed_cycle, 0)) == lengths + + def test_single_target_shortest_path(self): + p = nx.single_target_shortest_path(self.directed_cycle, 0) + assert p[3] == [3, 4, 5, 6, 0] + p = nx.single_target_shortest_path(self.cycle, 0) + assert p[3] == [3, 2, 1, 0] + p = nx.single_target_shortest_path(self.cycle, 0, cutoff=0) + assert p == {0: [0]} + # test missing targets + target = 8 + with pytest.raises(nx.NodeNotFound, match=f"Target {target} not in G"): + nx.single_target_shortest_path(self.cycle, target) + + def test_single_target_shortest_path_length(self): + pl = nx.single_target_shortest_path_length + lengths = {0: 0, 1: 1, 2: 2, 3: 3, 4: 3, 5: 2, 6: 1} + assert pl(self.cycle, 0) == lengths + lengths = {0: 0, 1: 6, 2: 5, 3: 4, 4: 3, 5: 2, 6: 1} + assert pl(self.directed_cycle, 0) == lengths + # test missing targets + target = 8 + with pytest.raises(nx.NodeNotFound, match=f"Target {target} is not in G"): + nx.single_target_shortest_path_length(self.cycle, target) + + def test_all_pairs_shortest_path(self): + p = dict(nx.all_pairs_shortest_path(self.cycle)) + assert p[0][3] == [0, 1, 2, 3] + p = dict(nx.all_pairs_shortest_path(self.grid)) + validate_grid_path(4, 4, 1, 12, p[1][12]) + + def test_all_pairs_shortest_path_length(self): + l = dict(nx.all_pairs_shortest_path_length(self.cycle)) + assert l[0] == {0: 0, 1: 1, 2: 2, 3: 3, 4: 3, 5: 2, 6: 1} + l = dict(nx.all_pairs_shortest_path_length(self.grid)) + assert l[1][16] == 6 + + def test_predecessor_path(self): + G = nx.path_graph(4) + assert nx.predecessor(G, 0) == {0: [], 1: [0], 2: [1], 3: [2]} + assert nx.predecessor(G, 0, 3) == [2] + + def test_predecessor_cycle(self): + G = nx.cycle_graph(4) + pred = nx.predecessor(G, 0) + assert pred[0] == [] + assert pred[1] == [0] + assert pred[2] in [[1, 3], [3, 1]] + assert pred[3] == [0] + + def test_predecessor_cutoff(self): + G = nx.path_graph(4) + p = nx.predecessor(G, 0, 3) + assert 4 not in p + + def test_predecessor_target(self): + G = nx.path_graph(4) + p = nx.predecessor(G, 0, 3) + assert p == [2] + p = nx.predecessor(G, 0, 3, cutoff=2) + assert p == [] + p, s = nx.predecessor(G, 0, 3, return_seen=True) + assert p == [2] + assert s == 3 + p, s = nx.predecessor(G, 0, 3, cutoff=2, return_seen=True) + assert p == [] + assert s == -1 + + def test_predecessor_missing_source(self): + source = 8 + with pytest.raises(nx.NodeNotFound, match=f"Source {source} not in G"): + nx.predecessor(self.cycle, source) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/tests/test_weighted.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/tests/test_weighted.py new file mode 100644 index 0000000..30bb07e --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/tests/test_weighted.py @@ -0,0 +1,972 @@ +import pytest + +import networkx as nx +from networkx.utils import pairwise + + +def validate_path(G, s, t, soln_len, path, weight="weight"): + assert path[0] == s + assert path[-1] == t + + if callable(weight): + weight_f = weight + else: + if G.is_multigraph(): + + def weight_f(u, v, d): + return min(e.get(weight, 1) for e in d.values()) + + else: + + def weight_f(u, v, d): + return d.get(weight, 1) + + computed = sum(weight_f(u, v, G[u][v]) for u, v in pairwise(path)) + assert soln_len == computed + + +def validate_length_path(G, s, t, soln_len, length, path, weight="weight"): + assert soln_len == length + validate_path(G, s, t, length, path, weight=weight) + + +class WeightedTestBase: + """Base class for test classes that test functions for computing + shortest paths in weighted graphs. + + """ + + def setup_method(self): + """Creates some graphs for use in the unit tests.""" + cnlti = nx.convert_node_labels_to_integers + self.grid = cnlti(nx.grid_2d_graph(4, 4), first_label=1, ordering="sorted") + self.cycle = nx.cycle_graph(7) + self.directed_cycle = nx.cycle_graph(7, create_using=nx.DiGraph()) + self.XG = nx.DiGraph() + self.XG.add_weighted_edges_from( + [ + ("s", "u", 10), + ("s", "x", 5), + ("u", "v", 1), + ("u", "x", 2), + ("v", "y", 1), + ("x", "u", 3), + ("x", "v", 5), + ("x", "y", 2), + ("y", "s", 7), + ("y", "v", 6), + ] + ) + self.MXG = nx.MultiDiGraph(self.XG) + self.MXG.add_edge("s", "u", weight=15) + self.XG2 = nx.DiGraph() + self.XG2.add_weighted_edges_from( + [ + [1, 4, 1], + [4, 5, 1], + [5, 6, 1], + [6, 3, 1], + [1, 3, 50], + [1, 2, 100], + [2, 3, 100], + ] + ) + + self.XG3 = nx.Graph() + self.XG3.add_weighted_edges_from( + [[0, 1, 2], [1, 2, 12], [2, 3, 1], [3, 4, 5], [4, 5, 1], [5, 0, 10]] + ) + + self.XG4 = nx.Graph() + self.XG4.add_weighted_edges_from( + [ + [0, 1, 2], + [1, 2, 2], + [2, 3, 1], + [3, 4, 1], + [4, 5, 1], + [5, 6, 1], + [6, 7, 1], + [7, 0, 1], + ] + ) + self.MXG4 = nx.MultiGraph(self.XG4) + self.MXG4.add_edge(0, 1, weight=3) + self.G = nx.DiGraph() # no weights + self.G.add_edges_from( + [ + ("s", "u"), + ("s", "x"), + ("u", "v"), + ("u", "x"), + ("v", "y"), + ("x", "u"), + ("x", "v"), + ("x", "y"), + ("y", "s"), + ("y", "v"), + ] + ) + + +class TestWeightedPath(WeightedTestBase): + def test_dijkstra(self): + (D, P) = nx.single_source_dijkstra(self.XG, "s") + validate_path(self.XG, "s", "v", 9, P["v"]) + assert D["v"] == 9 + + validate_path( + self.XG, "s", "v", 9, nx.single_source_dijkstra_path(self.XG, "s")["v"] + ) + assert nx.single_source_dijkstra_path_length(self.XG, "s")["v"] == 9 + + validate_path( + self.XG, "s", "v", 9, nx.single_source_dijkstra(self.XG, "s")[1]["v"] + ) + validate_path( + self.MXG, "s", "v", 9, nx.single_source_dijkstra_path(self.MXG, "s")["v"] + ) + + GG = self.XG.to_undirected() + # make sure we get lower weight + # to_undirected might choose either edge with weight 2 or weight 3 + GG["u"]["x"]["weight"] = 2 + (D, P) = nx.single_source_dijkstra(GG, "s") + validate_path(GG, "s", "v", 8, P["v"]) + assert D["v"] == 8 # uses lower weight of 2 on u<->x edge + validate_path(GG, "s", "v", 8, nx.dijkstra_path(GG, "s", "v")) + assert nx.dijkstra_path_length(GG, "s", "v") == 8 + + validate_path(self.XG2, 1, 3, 4, nx.dijkstra_path(self.XG2, 1, 3)) + validate_path(self.XG3, 0, 3, 15, nx.dijkstra_path(self.XG3, 0, 3)) + assert nx.dijkstra_path_length(self.XG3, 0, 3) == 15 + validate_path(self.XG4, 0, 2, 4, nx.dijkstra_path(self.XG4, 0, 2)) + assert nx.dijkstra_path_length(self.XG4, 0, 2) == 4 + validate_path(self.MXG4, 0, 2, 4, nx.dijkstra_path(self.MXG4, 0, 2)) + validate_path( + self.G, "s", "v", 2, nx.single_source_dijkstra(self.G, "s", "v")[1] + ) + validate_path( + self.G, "s", "v", 2, nx.single_source_dijkstra(self.G, "s")[1]["v"] + ) + + validate_path(self.G, "s", "v", 2, nx.dijkstra_path(self.G, "s", "v")) + assert nx.dijkstra_path_length(self.G, "s", "v") == 2 + + # NetworkXError: node s not reachable from moon + pytest.raises(nx.NetworkXNoPath, nx.dijkstra_path, self.G, "s", "moon") + pytest.raises(nx.NetworkXNoPath, nx.dijkstra_path_length, self.G, "s", "moon") + + validate_path(self.cycle, 0, 3, 3, nx.dijkstra_path(self.cycle, 0, 3)) + validate_path(self.cycle, 0, 4, 3, nx.dijkstra_path(self.cycle, 0, 4)) + + assert nx.single_source_dijkstra(self.cycle, 0, 0) == (0, [0]) + + def test_bidirectional_dijkstra(self): + validate_length_path( + self.XG, "s", "v", 9, *nx.bidirectional_dijkstra(self.XG, "s", "v") + ) + validate_length_path( + self.G, "s", "v", 2, *nx.bidirectional_dijkstra(self.G, "s", "v") + ) + validate_length_path( + self.cycle, 0, 3, 3, *nx.bidirectional_dijkstra(self.cycle, 0, 3) + ) + validate_length_path( + self.cycle, 0, 4, 3, *nx.bidirectional_dijkstra(self.cycle, 0, 4) + ) + validate_length_path( + self.XG3, 0, 3, 15, *nx.bidirectional_dijkstra(self.XG3, 0, 3) + ) + validate_length_path( + self.XG4, 0, 2, 4, *nx.bidirectional_dijkstra(self.XG4, 0, 2) + ) + + # need more tests here + P = nx.single_source_dijkstra_path(self.XG, "s")["v"] + validate_path( + self.XG, + "s", + "v", + sum(self.XG[u][v]["weight"] for u, v in zip(P[:-1], P[1:])), + nx.dijkstra_path(self.XG, "s", "v"), + ) + + # check absent source + G = nx.path_graph(2) + pytest.raises(nx.NodeNotFound, nx.bidirectional_dijkstra, G, 3, 0) + + def test_weight_functions(self): + def heuristic(*z): + return sum(val**2 for val in z) + + def getpath(pred, v, s): + return [v] if v == s else getpath(pred, pred[v], s) + [v] + + def goldberg_radzik(g, s, t, weight="weight"): + pred, dist = nx.goldberg_radzik(g, s, weight=weight) + dist = dist[t] + return dist, getpath(pred, t, s) + + def astar(g, s, t, weight="weight"): + path = nx.astar_path(g, s, t, heuristic, weight=weight) + dist = nx.astar_path_length(g, s, t, heuristic, weight=weight) + return dist, path + + def vlp(G, s, t, l, F, w): + res = F(G, s, t, weight=w) + validate_length_path(G, s, t, l, *res, weight=w) + + G = self.cycle + s = 6 + t = 4 + path = [6] + list(range(t + 1)) + + def weight(u, v, _): + return 1 + v**2 + + length = sum(weight(u, v, None) for u, v in pairwise(path)) + vlp(G, s, t, length, nx.bidirectional_dijkstra, weight) + vlp(G, s, t, length, nx.single_source_dijkstra, weight) + vlp(G, s, t, length, nx.single_source_bellman_ford, weight) + vlp(G, s, t, length, goldberg_radzik, weight) + vlp(G, s, t, length, astar, weight) + + def weight(u, v, _): + return 2 ** (u * v) + + length = sum(weight(u, v, None) for u, v in pairwise(path)) + vlp(G, s, t, length, nx.bidirectional_dijkstra, weight) + vlp(G, s, t, length, nx.single_source_dijkstra, weight) + vlp(G, s, t, length, nx.single_source_bellman_ford, weight) + vlp(G, s, t, length, goldberg_radzik, weight) + vlp(G, s, t, length, astar, weight) + + def test_bidirectional_dijkstra_no_path(self): + with pytest.raises(nx.NetworkXNoPath): + G = nx.Graph() + nx.add_path(G, [1, 2, 3]) + nx.add_path(G, [4, 5, 6]) + path = nx.bidirectional_dijkstra(G, 1, 6) + + @pytest.mark.parametrize( + "fn", + ( + nx.dijkstra_path, + nx.dijkstra_path_length, + nx.single_source_dijkstra_path, + nx.single_source_dijkstra_path_length, + nx.single_source_dijkstra, + nx.dijkstra_predecessor_and_distance, + ), + ) + def test_absent_source(self, fn): + G = nx.path_graph(2) + with pytest.raises(nx.NodeNotFound): + fn(G, 3, 0) + # Test when source == target, which is handled specially by some functions + with pytest.raises(nx.NodeNotFound): + fn(G, 3, 3) + + def test_dijkstra_predecessor1(self): + G = nx.path_graph(4) + assert nx.dijkstra_predecessor_and_distance(G, 0) == ( + {0: [], 1: [0], 2: [1], 3: [2]}, + {0: 0, 1: 1, 2: 2, 3: 3}, + ) + + def test_dijkstra_predecessor2(self): + # 4-cycle + G = nx.Graph([(0, 1), (1, 2), (2, 3), (3, 0)]) + pred, dist = nx.dijkstra_predecessor_and_distance(G, (0)) + assert pred[0] == [] + assert pred[1] == [0] + assert pred[2] in [[1, 3], [3, 1]] + assert pred[3] == [0] + assert dist == {0: 0, 1: 1, 2: 2, 3: 1} + + def test_dijkstra_predecessor3(self): + XG = nx.DiGraph() + XG.add_weighted_edges_from( + [ + ("s", "u", 10), + ("s", "x", 5), + ("u", "v", 1), + ("u", "x", 2), + ("v", "y", 1), + ("x", "u", 3), + ("x", "v", 5), + ("x", "y", 2), + ("y", "s", 7), + ("y", "v", 6), + ] + ) + (P, D) = nx.dijkstra_predecessor_and_distance(XG, "s") + assert P["v"] == ["u"] + assert D["v"] == 9 + (P, D) = nx.dijkstra_predecessor_and_distance(XG, "s", cutoff=8) + assert "v" not in D + + def test_single_source_dijkstra_path_length(self): + pl = nx.single_source_dijkstra_path_length + assert dict(pl(self.MXG4, 0))[2] == 4 + spl = pl(self.MXG4, 0, cutoff=2) + assert 2 not in spl + + def test_bidirectional_dijkstra_multigraph(self): + G = nx.MultiGraph() + G.add_edge("a", "b", weight=10) + G.add_edge("a", "b", weight=100) + dp = nx.bidirectional_dijkstra(G, "a", "b") + assert dp == (10, ["a", "b"]) + + def test_dijkstra_pred_distance_multigraph(self): + G = nx.MultiGraph() + G.add_edge("a", "b", key="short", foo=5, weight=100) + G.add_edge("a", "b", key="long", bar=1, weight=110) + p, d = nx.dijkstra_predecessor_and_distance(G, "a") + assert p == {"a": [], "b": ["a"]} + assert d == {"a": 0, "b": 100} + + def test_negative_edge_cycle(self): + G = nx.cycle_graph(5, create_using=nx.DiGraph()) + assert not nx.negative_edge_cycle(G) + G.add_edge(8, 9, weight=-7) + G.add_edge(9, 8, weight=3) + graph_size = len(G) + assert nx.negative_edge_cycle(G) + assert graph_size == len(G) + pytest.raises(ValueError, nx.single_source_dijkstra_path_length, G, 8) + pytest.raises(ValueError, nx.single_source_dijkstra, G, 8) + pytest.raises(ValueError, nx.dijkstra_predecessor_and_distance, G, 8) + G.add_edge(9, 10) + pytest.raises(ValueError, nx.bidirectional_dijkstra, G, 8, 10) + G = nx.MultiDiGraph() + G.add_edge(2, 2, weight=-1) + assert nx.negative_edge_cycle(G) + + def test_negative_edge_cycle_empty(self): + G = nx.DiGraph() + assert not nx.negative_edge_cycle(G) + + def test_negative_edge_cycle_custom_weight_key(self): + d = nx.DiGraph() + d.add_edge("a", "b", w=-2) + d.add_edge("b", "a", w=-1) + assert nx.negative_edge_cycle(d, weight="w") + + def test_weight_function(self): + """Tests that a callable weight is interpreted as a weight + function instead of an edge attribute. + + """ + # Create a triangle in which the edge from node 0 to node 2 has + # a large weight and the other two edges have a small weight. + G = nx.complete_graph(3) + G.adj[0][2]["weight"] = 10 + G.adj[0][1]["weight"] = 1 + G.adj[1][2]["weight"] = 1 + + # The weight function will take the multiplicative inverse of + # the weights on the edges. This way, weights that were large + # before now become small and vice versa. + + def weight(u, v, d): + return 1 / d["weight"] + + # The shortest path from 0 to 2 using the actual weights on the + # edges should be [0, 1, 2]. + distance, path = nx.single_source_dijkstra(G, 0, 2) + assert distance == 2 + assert path == [0, 1, 2] + # However, with the above weight function, the shortest path + # should be [0, 2], since that has a very small weight. + distance, path = nx.single_source_dijkstra(G, 0, 2, weight=weight) + assert distance == 1 / 10 + assert path == [0, 2] + + def test_all_pairs_dijkstra_path(self): + cycle = nx.cycle_graph(7) + p = dict(nx.all_pairs_dijkstra_path(cycle)) + assert p[0][3] == [0, 1, 2, 3] + + cycle[1][2]["weight"] = 10 + p = dict(nx.all_pairs_dijkstra_path(cycle)) + assert p[0][3] == [0, 6, 5, 4, 3] + + def test_all_pairs_dijkstra_path_length(self): + cycle = nx.cycle_graph(7) + pl = dict(nx.all_pairs_dijkstra_path_length(cycle)) + assert pl[0] == {0: 0, 1: 1, 2: 2, 3: 3, 4: 3, 5: 2, 6: 1} + + cycle[1][2]["weight"] = 10 + pl = dict(nx.all_pairs_dijkstra_path_length(cycle)) + assert pl[0] == {0: 0, 1: 1, 2: 5, 3: 4, 4: 3, 5: 2, 6: 1} + + def test_all_pairs_dijkstra(self): + cycle = nx.cycle_graph(7) + out = dict(nx.all_pairs_dijkstra(cycle)) + assert out[0][0] == {0: 0, 1: 1, 2: 2, 3: 3, 4: 3, 5: 2, 6: 1} + assert out[0][1][3] == [0, 1, 2, 3] + + cycle[1][2]["weight"] = 10 + out = dict(nx.all_pairs_dijkstra(cycle)) + assert out[0][0] == {0: 0, 1: 1, 2: 5, 3: 4, 4: 3, 5: 2, 6: 1} + assert out[0][1][3] == [0, 6, 5, 4, 3] + + +class TestDijkstraPathLength: + """Unit tests for the :func:`networkx.dijkstra_path_length` + function. + + """ + + def test_weight_function(self): + """Tests for computing the length of the shortest path using + Dijkstra's algorithm with a user-defined weight function. + + """ + # Create a triangle in which the edge from node 0 to node 2 has + # a large weight and the other two edges have a small weight. + G = nx.complete_graph(3) + G.adj[0][2]["weight"] = 10 + G.adj[0][1]["weight"] = 1 + G.adj[1][2]["weight"] = 1 + + # The weight function will take the multiplicative inverse of + # the weights on the edges. This way, weights that were large + # before now become small and vice versa. + + def weight(u, v, d): + return 1 / d["weight"] + + # The shortest path from 0 to 2 using the actual weights on the + # edges should be [0, 1, 2]. However, with the above weight + # function, the shortest path should be [0, 2], since that has a + # very small weight. + length = nx.dijkstra_path_length(G, 0, 2, weight=weight) + assert length == 1 / 10 + + +class TestMultiSourceDijkstra: + """Unit tests for the multi-source dialect of Dijkstra's shortest + path algorithms. + + """ + + def test_no_sources(self): + with pytest.raises(ValueError): + nx.multi_source_dijkstra(nx.Graph(), {}) + + def test_path_no_sources(self): + with pytest.raises(ValueError): + nx.multi_source_dijkstra_path(nx.Graph(), {}) + + def test_path_length_no_sources(self): + with pytest.raises(ValueError): + nx.multi_source_dijkstra_path_length(nx.Graph(), {}) + + @pytest.mark.parametrize( + "fn", + ( + nx.multi_source_dijkstra_path, + nx.multi_source_dijkstra_path_length, + nx.multi_source_dijkstra, + ), + ) + def test_absent_source(self, fn): + G = nx.path_graph(2) + with pytest.raises(nx.NodeNotFound): + fn(G, [3], 0) + with pytest.raises(nx.NodeNotFound): + fn(G, [3], 3) + + def test_two_sources(self): + edges = [(0, 1, 1), (1, 2, 1), (2, 3, 10), (3, 4, 1)] + G = nx.Graph() + G.add_weighted_edges_from(edges) + sources = {0, 4} + distances, paths = nx.multi_source_dijkstra(G, sources) + expected_distances = {0: 0, 1: 1, 2: 2, 3: 1, 4: 0} + expected_paths = {0: [0], 1: [0, 1], 2: [0, 1, 2], 3: [4, 3], 4: [4]} + assert distances == expected_distances + assert paths == expected_paths + + def test_simple_paths(self): + G = nx.path_graph(4) + lengths = nx.multi_source_dijkstra_path_length(G, [0]) + assert lengths == {n: n for n in G} + paths = nx.multi_source_dijkstra_path(G, [0]) + assert paths == {n: list(range(n + 1)) for n in G} + + +class TestBellmanFordAndGoldbergRadzik(WeightedTestBase): + def test_single_node_graph(self): + G = nx.DiGraph() + G.add_node(0) + assert nx.single_source_bellman_ford_path(G, 0) == {0: [0]} + assert nx.single_source_bellman_ford_path_length(G, 0) == {0: 0} + assert nx.single_source_bellman_ford(G, 0) == ({0: 0}, {0: [0]}) + assert nx.bellman_ford_predecessor_and_distance(G, 0) == ({0: []}, {0: 0}) + assert nx.goldberg_radzik(G, 0) == ({0: None}, {0: 0}) + + def test_absent_source_bellman_ford(self): + # the check is in _bellman_ford; this provides regression testing + # against later changes to "client" Bellman-Ford functions + G = nx.path_graph(2) + for fn in ( + nx.bellman_ford_predecessor_and_distance, + nx.bellman_ford_path, + nx.bellman_ford_path_length, + nx.single_source_bellman_ford_path, + nx.single_source_bellman_ford_path_length, + nx.single_source_bellman_ford, + ): + pytest.raises(nx.NodeNotFound, fn, G, 3, 0) + pytest.raises(nx.NodeNotFound, fn, G, 3, 3) + + def test_absent_source_goldberg_radzik(self): + with pytest.raises(nx.NodeNotFound): + G = nx.path_graph(2) + nx.goldberg_radzik(G, 3, 0) + + def test_negative_cycle_heuristic(self): + G = nx.DiGraph() + G.add_edge(0, 1, weight=-1) + G.add_edge(1, 2, weight=-1) + G.add_edge(2, 3, weight=-1) + G.add_edge(3, 0, weight=3) + assert not nx.negative_edge_cycle(G, heuristic=True) + G.add_edge(2, 0, weight=1.999) + assert nx.negative_edge_cycle(G, heuristic=True) + G.edges[2, 0]["weight"] = 2 + assert not nx.negative_edge_cycle(G, heuristic=True) + + def test_negative_cycle_consistency(self): + import random + + unif = random.uniform + for random_seed in range(2): # range(20): + random.seed(random_seed) + for density in [0.1, 0.9]: # .3, .7, .9]: + for N in [1, 10, 20]: # range(1, 60 - int(30 * density)): + for max_cost in [1, 90]: # [1, 10, 40, 90]: + G = nx.binomial_graph(N, density, seed=4, directed=True) + edges = ((u, v, unif(-1, max_cost)) for u, v in G.edges) + G.add_weighted_edges_from(edges) + + no_heuristic = nx.negative_edge_cycle(G, heuristic=False) + with_heuristic = nx.negative_edge_cycle(G, heuristic=True) + assert no_heuristic == with_heuristic + + def test_negative_cycle(self): + G = nx.cycle_graph(5, create_using=nx.DiGraph()) + G.add_edge(1, 2, weight=-7) + for i in range(5): + pytest.raises( + nx.NetworkXUnbounded, nx.single_source_bellman_ford_path, G, i + ) + pytest.raises( + nx.NetworkXUnbounded, nx.single_source_bellman_ford_path_length, G, i + ) + pytest.raises(nx.NetworkXUnbounded, nx.single_source_bellman_ford, G, i) + pytest.raises( + nx.NetworkXUnbounded, nx.bellman_ford_predecessor_and_distance, G, i + ) + pytest.raises(nx.NetworkXUnbounded, nx.goldberg_radzik, G, i) + G = nx.cycle_graph(5) # undirected Graph + G.add_edge(1, 2, weight=-3) + for i in range(5): + pytest.raises( + nx.NetworkXUnbounded, nx.single_source_bellman_ford_path, G, i + ) + pytest.raises( + nx.NetworkXUnbounded, nx.single_source_bellman_ford_path_length, G, i + ) + pytest.raises(nx.NetworkXUnbounded, nx.single_source_bellman_ford, G, i) + pytest.raises( + nx.NetworkXUnbounded, nx.bellman_ford_predecessor_and_distance, G, i + ) + pytest.raises(nx.NetworkXUnbounded, nx.goldberg_radzik, G, i) + G = nx.DiGraph([(1, 1, {"weight": -1})]) + pytest.raises(nx.NetworkXUnbounded, nx.single_source_bellman_ford_path, G, 1) + pytest.raises( + nx.NetworkXUnbounded, nx.single_source_bellman_ford_path_length, G, 1 + ) + pytest.raises(nx.NetworkXUnbounded, nx.single_source_bellman_ford, G, 1) + pytest.raises( + nx.NetworkXUnbounded, nx.bellman_ford_predecessor_and_distance, G, 1 + ) + pytest.raises(nx.NetworkXUnbounded, nx.goldberg_radzik, G, 1) + G = nx.MultiDiGraph([(1, 1, {"weight": -1})]) + pytest.raises(nx.NetworkXUnbounded, nx.single_source_bellman_ford_path, G, 1) + pytest.raises( + nx.NetworkXUnbounded, nx.single_source_bellman_ford_path_length, G, 1 + ) + pytest.raises(nx.NetworkXUnbounded, nx.single_source_bellman_ford, G, 1) + pytest.raises( + nx.NetworkXUnbounded, nx.bellman_ford_predecessor_and_distance, G, 1 + ) + pytest.raises(nx.NetworkXUnbounded, nx.goldberg_radzik, G, 1) + + def test_zero_cycle(self): + G = nx.cycle_graph(5, create_using=nx.DiGraph()) + G.add_edge(2, 3, weight=-4) + # check that zero cycle doesn't raise + nx.goldberg_radzik(G, 1) + nx.bellman_ford_predecessor_and_distance(G, 1) + + G.add_edge(2, 3, weight=-4.0001) + # check that negative cycle does raise + pytest.raises( + nx.NetworkXUnbounded, nx.bellman_ford_predecessor_and_distance, G, 1 + ) + pytest.raises(nx.NetworkXUnbounded, nx.goldberg_radzik, G, 1) + + def test_find_negative_cycle_longer_cycle(self): + G = nx.cycle_graph(5, create_using=nx.DiGraph()) + nx.add_cycle(G, [3, 5, 6, 7, 8, 9]) + G.add_edge(1, 2, weight=-30) + assert nx.find_negative_cycle(G, 1) == [0, 1, 2, 3, 4, 0] + assert nx.find_negative_cycle(G, 7) == [2, 3, 4, 0, 1, 2] + + def test_find_negative_cycle_no_cycle(self): + G = nx.path_graph(5, create_using=nx.DiGraph()) + pytest.raises(nx.NetworkXError, nx.find_negative_cycle, G, 3) + + def test_find_negative_cycle_single_edge(self): + G = nx.Graph() + G.add_edge(0, 1, weight=-1) + assert nx.find_negative_cycle(G, 1) == [1, 0, 1] + + def test_negative_weight(self): + G = nx.cycle_graph(5, create_using=nx.DiGraph()) + G.add_edge(1, 2, weight=-3) + assert nx.single_source_bellman_ford_path(G, 0) == { + 0: [0], + 1: [0, 1], + 2: [0, 1, 2], + 3: [0, 1, 2, 3], + 4: [0, 1, 2, 3, 4], + } + assert nx.single_source_bellman_ford_path_length(G, 0) == { + 0: 0, + 1: 1, + 2: -2, + 3: -1, + 4: 0, + } + assert nx.single_source_bellman_ford(G, 0) == ( + {0: 0, 1: 1, 2: -2, 3: -1, 4: 0}, + {0: [0], 1: [0, 1], 2: [0, 1, 2], 3: [0, 1, 2, 3], 4: [0, 1, 2, 3, 4]}, + ) + assert nx.bellman_ford_predecessor_and_distance(G, 0) == ( + {0: [], 1: [0], 2: [1], 3: [2], 4: [3]}, + {0: 0, 1: 1, 2: -2, 3: -1, 4: 0}, + ) + assert nx.goldberg_radzik(G, 0) == ( + {0: None, 1: 0, 2: 1, 3: 2, 4: 3}, + {0: 0, 1: 1, 2: -2, 3: -1, 4: 0}, + ) + + def test_not_connected(self): + G = nx.complete_graph(6) + G.add_edge(10, 11) + G.add_edge(10, 12) + assert nx.single_source_bellman_ford_path(G, 0) == { + 0: [0], + 1: [0, 1], + 2: [0, 2], + 3: [0, 3], + 4: [0, 4], + 5: [0, 5], + } + assert nx.single_source_bellman_ford_path_length(G, 0) == { + 0: 0, + 1: 1, + 2: 1, + 3: 1, + 4: 1, + 5: 1, + } + assert nx.single_source_bellman_ford(G, 0) == ( + {0: 0, 1: 1, 2: 1, 3: 1, 4: 1, 5: 1}, + {0: [0], 1: [0, 1], 2: [0, 2], 3: [0, 3], 4: [0, 4], 5: [0, 5]}, + ) + assert nx.bellman_ford_predecessor_and_distance(G, 0) == ( + {0: [], 1: [0], 2: [0], 3: [0], 4: [0], 5: [0]}, + {0: 0, 1: 1, 2: 1, 3: 1, 4: 1, 5: 1}, + ) + assert nx.goldberg_radzik(G, 0) == ( + {0: None, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0}, + {0: 0, 1: 1, 2: 1, 3: 1, 4: 1, 5: 1}, + ) + + # not connected, with a component not containing the source that + # contains a negative cycle. + G = nx.complete_graph(6) + G.add_edges_from( + [ + ("A", "B", {"load": 3}), + ("B", "C", {"load": -10}), + ("C", "A", {"load": 2}), + ] + ) + assert nx.single_source_bellman_ford_path(G, 0, weight="load") == { + 0: [0], + 1: [0, 1], + 2: [0, 2], + 3: [0, 3], + 4: [0, 4], + 5: [0, 5], + } + assert nx.single_source_bellman_ford_path_length(G, 0, weight="load") == { + 0: 0, + 1: 1, + 2: 1, + 3: 1, + 4: 1, + 5: 1, + } + assert nx.single_source_bellman_ford(G, 0, weight="load") == ( + {0: 0, 1: 1, 2: 1, 3: 1, 4: 1, 5: 1}, + {0: [0], 1: [0, 1], 2: [0, 2], 3: [0, 3], 4: [0, 4], 5: [0, 5]}, + ) + assert nx.bellman_ford_predecessor_and_distance(G, 0, weight="load") == ( + {0: [], 1: [0], 2: [0], 3: [0], 4: [0], 5: [0]}, + {0: 0, 1: 1, 2: 1, 3: 1, 4: 1, 5: 1}, + ) + assert nx.goldberg_radzik(G, 0, weight="load") == ( + {0: None, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0}, + {0: 0, 1: 1, 2: 1, 3: 1, 4: 1, 5: 1}, + ) + + def test_multigraph(self): + assert nx.bellman_ford_path(self.MXG, "s", "v") == ["s", "x", "u", "v"] + assert nx.bellman_ford_path_length(self.MXG, "s", "v") == 9 + assert nx.single_source_bellman_ford_path(self.MXG, "s")["v"] == [ + "s", + "x", + "u", + "v", + ] + assert nx.single_source_bellman_ford_path_length(self.MXG, "s")["v"] == 9 + D, P = nx.single_source_bellman_ford(self.MXG, "s", target="v") + assert D == 9 + assert P == ["s", "x", "u", "v"] + P, D = nx.bellman_ford_predecessor_and_distance(self.MXG, "s") + assert P["v"] == ["u"] + assert D["v"] == 9 + P, D = nx.goldberg_radzik(self.MXG, "s") + assert P["v"] == "u" + assert D["v"] == 9 + assert nx.bellman_ford_path(self.MXG4, 0, 2) == [0, 1, 2] + assert nx.bellman_ford_path_length(self.MXG4, 0, 2) == 4 + assert nx.single_source_bellman_ford_path(self.MXG4, 0)[2] == [0, 1, 2] + assert nx.single_source_bellman_ford_path_length(self.MXG4, 0)[2] == 4 + D, P = nx.single_source_bellman_ford(self.MXG4, 0, target=2) + assert D == 4 + assert P == [0, 1, 2] + P, D = nx.bellman_ford_predecessor_and_distance(self.MXG4, 0) + assert P[2] == [1] + assert D[2] == 4 + P, D = nx.goldberg_radzik(self.MXG4, 0) + assert P[2] == 1 + assert D[2] == 4 + + def test_others(self): + assert nx.bellman_ford_path(self.XG, "s", "v") == ["s", "x", "u", "v"] + assert nx.bellman_ford_path_length(self.XG, "s", "v") == 9 + assert nx.single_source_bellman_ford_path(self.XG, "s")["v"] == [ + "s", + "x", + "u", + "v", + ] + assert nx.single_source_bellman_ford_path_length(self.XG, "s")["v"] == 9 + D, P = nx.single_source_bellman_ford(self.XG, "s", target="v") + assert D == 9 + assert P == ["s", "x", "u", "v"] + (P, D) = nx.bellman_ford_predecessor_and_distance(self.XG, "s") + assert P["v"] == ["u"] + assert D["v"] == 9 + (P, D) = nx.goldberg_radzik(self.XG, "s") + assert P["v"] == "u" + assert D["v"] == 9 + + def test_path_graph(self): + G = nx.path_graph(4) + assert nx.single_source_bellman_ford_path(G, 0) == { + 0: [0], + 1: [0, 1], + 2: [0, 1, 2], + 3: [0, 1, 2, 3], + } + assert nx.single_source_bellman_ford_path_length(G, 0) == { + 0: 0, + 1: 1, + 2: 2, + 3: 3, + } + assert nx.single_source_bellman_ford(G, 0) == ( + {0: 0, 1: 1, 2: 2, 3: 3}, + {0: [0], 1: [0, 1], 2: [0, 1, 2], 3: [0, 1, 2, 3]}, + ) + assert nx.bellman_ford_predecessor_and_distance(G, 0) == ( + {0: [], 1: [0], 2: [1], 3: [2]}, + {0: 0, 1: 1, 2: 2, 3: 3}, + ) + assert nx.goldberg_radzik(G, 0) == ( + {0: None, 1: 0, 2: 1, 3: 2}, + {0: 0, 1: 1, 2: 2, 3: 3}, + ) + assert nx.single_source_bellman_ford_path(G, 3) == { + 0: [3, 2, 1, 0], + 1: [3, 2, 1], + 2: [3, 2], + 3: [3], + } + assert nx.single_source_bellman_ford_path_length(G, 3) == { + 0: 3, + 1: 2, + 2: 1, + 3: 0, + } + assert nx.single_source_bellman_ford(G, 3) == ( + {0: 3, 1: 2, 2: 1, 3: 0}, + {0: [3, 2, 1, 0], 1: [3, 2, 1], 2: [3, 2], 3: [3]}, + ) + assert nx.bellman_ford_predecessor_and_distance(G, 3) == ( + {0: [1], 1: [2], 2: [3], 3: []}, + {0: 3, 1: 2, 2: 1, 3: 0}, + ) + assert nx.goldberg_radzik(G, 3) == ( + {0: 1, 1: 2, 2: 3, 3: None}, + {0: 3, 1: 2, 2: 1, 3: 0}, + ) + + def test_4_cycle(self): + # 4-cycle + G = nx.Graph([(0, 1), (1, 2), (2, 3), (3, 0)]) + dist, path = nx.single_source_bellman_ford(G, 0) + assert dist == {0: 0, 1: 1, 2: 2, 3: 1} + assert path[0] == [0] + assert path[1] == [0, 1] + assert path[2] in [[0, 1, 2], [0, 3, 2]] + assert path[3] == [0, 3] + + pred, dist = nx.bellman_ford_predecessor_and_distance(G, 0) + assert pred[0] == [] + assert pred[1] == [0] + assert pred[2] in [[1, 3], [3, 1]] + assert pred[3] == [0] + assert dist == {0: 0, 1: 1, 2: 2, 3: 1} + + pred, dist = nx.goldberg_radzik(G, 0) + assert pred[0] is None + assert pred[1] == 0 + assert pred[2] in [1, 3] + assert pred[3] == 0 + assert dist == {0: 0, 1: 1, 2: 2, 3: 1} + + def test_negative_weight_bf_path(self): + G = nx.DiGraph() + G.add_nodes_from("abcd") + G.add_edge("a", "d", weight=0) + G.add_edge("a", "b", weight=1) + G.add_edge("b", "c", weight=-3) + G.add_edge("c", "d", weight=1) + + assert nx.bellman_ford_path(G, "a", "d") == ["a", "b", "c", "d"] + assert nx.bellman_ford_path_length(G, "a", "d") == -1 + + def test_zero_cycle_smoke(self): + D = nx.DiGraph() + D.add_weighted_edges_from([(0, 1, 1), (1, 2, 1), (2, 3, 1), (3, 1, -2)]) + + nx.bellman_ford_path(D, 1, 3) + nx.dijkstra_path(D, 1, 3) + nx.bidirectional_dijkstra(D, 1, 3) + # FIXME nx.goldberg_radzik(D, 1) + + +class TestJohnsonAlgorithm(WeightedTestBase): + def test_single_node_graph(self): + G = nx.DiGraph() + G.add_node(0) + assert nx.johnson(G) == {0: {0: [0]}} + + def test_negative_cycle(self): + G = nx.DiGraph() + G.add_weighted_edges_from( + [ + ("0", "3", 3), + ("0", "1", -5), + ("1", "0", -5), + ("0", "2", 2), + ("1", "2", 4), + ("2", "3", 1), + ] + ) + pytest.raises(nx.NetworkXUnbounded, nx.johnson, G) + G = nx.Graph() + G.add_weighted_edges_from( + [ + ("0", "3", 3), + ("0", "1", -5), + ("1", "0", -5), + ("0", "2", 2), + ("1", "2", 4), + ("2", "3", 1), + ] + ) + pytest.raises(nx.NetworkXUnbounded, nx.johnson, G) + + def test_negative_weights(self): + G = nx.DiGraph() + G.add_weighted_edges_from( + [("0", "3", 3), ("0", "1", -5), ("0", "2", 2), ("1", "2", 4), ("2", "3", 1)] + ) + paths = nx.johnson(G) + assert paths == { + "1": {"1": ["1"], "3": ["1", "2", "3"], "2": ["1", "2"]}, + "0": { + "1": ["0", "1"], + "0": ["0"], + "3": ["0", "1", "2", "3"], + "2": ["0", "1", "2"], + }, + "3": {"3": ["3"]}, + "2": {"3": ["2", "3"], "2": ["2"]}, + } + + def test_unweighted_graph(self): + G = nx.Graph() + G.add_edges_from([(1, 0), (2, 1)]) + H = G.copy() + nx.set_edge_attributes(H, values=1, name="weight") + assert nx.johnson(G) == nx.johnson(H) + + def test_partially_weighted_graph_with_negative_edges(self): + G = nx.DiGraph() + G.add_edges_from([(0, 1), (1, 2), (2, 0), (1, 0)]) + G[1][0]["weight"] = -2 + G[0][1]["weight"] = 3 + G[1][2]["weight"] = -4 + + H = G.copy() + H[2][0]["weight"] = 1 + + I = G.copy() + I[2][0]["weight"] = 8 + + assert nx.johnson(G) == nx.johnson(H) + assert nx.johnson(G) != nx.johnson(I) + + def test_graphs(self): + validate_path(self.XG, "s", "v", 9, nx.johnson(self.XG)["s"]["v"]) + validate_path(self.MXG, "s", "v", 9, nx.johnson(self.MXG)["s"]["v"]) + validate_path(self.XG2, 1, 3, 4, nx.johnson(self.XG2)[1][3]) + validate_path(self.XG3, 0, 3, 15, nx.johnson(self.XG3)[0][3]) + validate_path(self.XG4, 0, 2, 4, nx.johnson(self.XG4)[0][2]) + validate_path(self.MXG4, 0, 2, 4, nx.johnson(self.MXG4)[0][2]) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/unweighted.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/unweighted.py new file mode 100644 index 0000000..2a53031 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/unweighted.py @@ -0,0 +1,562 @@ +""" +Shortest path algorithms for unweighted graphs. +""" + +import networkx as nx + +__all__ = [ + "bidirectional_shortest_path", + "single_source_shortest_path", + "single_source_shortest_path_length", + "single_target_shortest_path", + "single_target_shortest_path_length", + "all_pairs_shortest_path", + "all_pairs_shortest_path_length", + "predecessor", +] + + +@nx._dispatchable +def single_source_shortest_path_length(G, source, cutoff=None): + """Compute the shortest path lengths from `source` to all reachable nodes in `G`. + + Parameters + ---------- + G : NetworkX graph + + source : node + Starting node for path + + cutoff : integer, optional + Depth to stop the search. Only paths of length <= `cutoff` are returned. + + Returns + ------- + lengths : dict + Dict keyed by node to shortest path length to `source`. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> nx.single_source_shortest_path_length(G, 0) + {0: 0, 1: 1, 2: 2, 3: 3, 4: 4} + + See Also + -------- + :any:`shortest_path_length` : + Shortest path length with specifiable source, target, and weight. + :any:`single_source_dijkstra_path_length` : + Shortest weighted path length from source with Dijkstra algorithm. + :any:`single_source_bellman_ford_path_length` : + Shortest weighted path length from source with Bellman-Ford algorithm. + """ + if source not in G: + raise nx.NodeNotFound(f"Source {source} is not in G") + if cutoff is None: + cutoff = float("inf") + nextlevel = [source] + return dict(_single_shortest_path_length(G._adj, nextlevel, cutoff)) + + +def _single_shortest_path_length(adj, firstlevel, cutoff): + """Yields (node, level) in a breadth first search + + Shortest Path Length helper function + Parameters + ---------- + adj : dict + Adjacency dict or view + firstlevel : list + starting nodes, e.g. [source] or [target] + cutoff : int or float + level at which we stop the process + """ + seen = set(firstlevel) + nextlevel = firstlevel + level = 0 + n = len(adj) + for v in nextlevel: + yield (v, level) + while nextlevel and cutoff > level: + level += 1 + thislevel = nextlevel + nextlevel = [] + for v in thislevel: + for w in adj[v]: + if w not in seen: + seen.add(w) + nextlevel.append(w) + yield (w, level) + if len(seen) == n: + return + + +@nx._dispatchable +def single_target_shortest_path_length(G, target, cutoff=None): + """Compute the shortest path lengths to target from all reachable nodes. + + Parameters + ---------- + G : NetworkX graph + + target : node + Target node for path + + cutoff : integer, optional + Depth to stop the search. Only paths of length <= cutoff are returned. + + Returns + ------- + lengths : dictionary + Dictionary, keyed by source, of shortest path lengths. + + Examples + -------- + >>> G = nx.path_graph(5, create_using=nx.DiGraph()) + >>> length = nx.single_target_shortest_path_length(G, 4) + >>> length[0] + 4 + >>> for node in range(5): + ... print(f"{node}: {length[node]}") + 0: 4 + 1: 3 + 2: 2 + 3: 1 + 4: 0 + + See Also + -------- + single_source_shortest_path_length, shortest_path_length + """ + if target not in G: + raise nx.NodeNotFound(f"Target {target} is not in G") + if cutoff is None: + cutoff = float("inf") + # handle either directed or undirected + adj = G._pred if G.is_directed() else G._adj + nextlevel = [target] + return dict(_single_shortest_path_length(adj, nextlevel, cutoff)) + + +@nx._dispatchable +def all_pairs_shortest_path_length(G, cutoff=None): + """Computes the shortest path lengths between all nodes in `G`. + + Parameters + ---------- + G : NetworkX graph + + cutoff : integer, optional + Depth at which to stop the search. Only paths of length at most + `cutoff` are returned. + + Returns + ------- + lengths : iterator + (source, dictionary) iterator with dictionary keyed by target and + shortest path length as the key value. + + Notes + ----- + The iterator returned only has reachable node pairs. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> length = dict(nx.all_pairs_shortest_path_length(G)) + >>> for node in [0, 1, 2, 3, 4]: + ... print(f"1 - {node}: {length[1][node]}") + 1 - 0: 1 + 1 - 1: 0 + 1 - 2: 1 + 1 - 3: 2 + 1 - 4: 3 + >>> length[3][2] + 1 + >>> length[2][2] + 0 + + """ + length = single_source_shortest_path_length + # TODO This can be trivially parallelized. + for n in G: + yield (n, length(G, n, cutoff=cutoff)) + + +@nx._dispatchable +def bidirectional_shortest_path(G, source, target): + """Returns a list of nodes in a shortest path between source and target. + + Parameters + ---------- + G : NetworkX graph + + source : node label + starting node for path + + target : node label + ending node for path + + Returns + ------- + path: list + List of nodes in a path from source to target. + + Raises + ------ + NetworkXNoPath + If no path exists between source and target. + + Examples + -------- + >>> G = nx.Graph() + >>> nx.add_path(G, [0, 1, 2, 3, 0, 4, 5, 6, 7, 4]) + >>> nx.bidirectional_shortest_path(G, 2, 6) + [2, 1, 0, 4, 5, 6] + + See Also + -------- + shortest_path + + Notes + ----- + This algorithm is used by shortest_path(G, source, target). + """ + + if source not in G: + raise nx.NodeNotFound(f"Source {source} is not in G") + + if target not in G: + raise nx.NodeNotFound(f"Target {target} is not in G") + + # call helper to do the real work + results = _bidirectional_pred_succ(G, source, target) + pred, succ, w = results + + # build path from pred+w+succ + path = [] + # from source to w + while w is not None: + path.append(w) + w = pred[w] + path.reverse() + # from w to target + w = succ[path[-1]] + while w is not None: + path.append(w) + w = succ[w] + + return path + + +def _bidirectional_pred_succ(G, source, target): + """Bidirectional shortest path helper. + + Returns (pred, succ, w) where + pred is a dictionary of predecessors from w to the source, and + succ is a dictionary of successors from w to the target. + """ + # does BFS from both source and target and meets in the middle + if target == source: + return ({target: None}, {source: None}, source) + + # handle either directed or undirected + if G.is_directed(): + Gpred = G.pred + Gsucc = G.succ + else: + Gpred = G.adj + Gsucc = G.adj + + # predecessor and successors in search + pred = {source: None} + succ = {target: None} + + # initialize fringes, start with forward + forward_fringe = [source] + reverse_fringe = [target] + + while forward_fringe and reverse_fringe: + if len(forward_fringe) <= len(reverse_fringe): + this_level = forward_fringe + forward_fringe = [] + for v in this_level: + for w in Gsucc[v]: + if w not in pred: + forward_fringe.append(w) + pred[w] = v + if w in succ: # path found + return pred, succ, w + else: + this_level = reverse_fringe + reverse_fringe = [] + for v in this_level: + for w in Gpred[v]: + if w not in succ: + succ[w] = v + reverse_fringe.append(w) + if w in pred: # found path + return pred, succ, w + + raise nx.NetworkXNoPath(f"No path between {source} and {target}.") + + +@nx._dispatchable +def single_source_shortest_path(G, source, cutoff=None): + """Compute shortest path between source + and all other nodes reachable from source. + + Parameters + ---------- + G : NetworkX graph + + source : node label + Starting node for path + + cutoff : integer, optional + Depth to stop the search. Only paths of length <= cutoff are returned. + + Returns + ------- + paths : dictionary + Dictionary, keyed by target, of shortest paths. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> path = nx.single_source_shortest_path(G, 0) + >>> path[4] + [0, 1, 2, 3, 4] + + Notes + ----- + The shortest path is not necessarily unique. So there can be multiple + paths between the source and each target node, all of which have the + same 'shortest' length. For each target node, this function returns + only one of those paths. + + See Also + -------- + shortest_path + """ + if source not in G: + raise nx.NodeNotFound(f"Source {source} not in G") + + def join(p1, p2): + return p1 + p2 + + if cutoff is None: + cutoff = float("inf") + nextlevel = {source: 1} # list of nodes to check at next level + paths = {source: [source]} # paths dictionary (paths to key from source) + return dict(_single_shortest_path(G.adj, nextlevel, paths, cutoff, join)) + + +def _single_shortest_path(adj, firstlevel, paths, cutoff, join): + """Returns shortest paths + + Shortest Path helper function + Parameters + ---------- + adj : dict + Adjacency dict or view + firstlevel : dict + starting nodes, e.g. {source: 1} or {target: 1} + paths : dict + paths for starting nodes, e.g. {source: [source]} + cutoff : int or float + level at which we stop the process + join : function + function to construct a path from two partial paths. Requires two + list inputs `p1` and `p2`, and returns a list. Usually returns + `p1 + p2` (forward from source) or `p2 + p1` (backward from target) + """ + level = 0 # the current level + nextlevel = firstlevel + while nextlevel and cutoff > level: + thislevel = nextlevel + nextlevel = {} + for v in thislevel: + for w in adj[v]: + if w not in paths: + paths[w] = join(paths[v], [w]) + nextlevel[w] = 1 + level += 1 + return paths + + +@nx._dispatchable +def single_target_shortest_path(G, target, cutoff=None): + """Compute shortest path to target from all nodes that reach target. + + Parameters + ---------- + G : NetworkX graph + + target : node label + Target node for path + + cutoff : integer, optional + Depth to stop the search. Only paths of length <= cutoff are returned. + + Returns + ------- + paths : dictionary + Dictionary, keyed by target, of shortest paths. + + Examples + -------- + >>> G = nx.path_graph(5, create_using=nx.DiGraph()) + >>> path = nx.single_target_shortest_path(G, 4) + >>> path[0] + [0, 1, 2, 3, 4] + + Notes + ----- + The shortest path is not necessarily unique. So there can be multiple + paths between the source and each target node, all of which have the + same 'shortest' length. For each target node, this function returns + only one of those paths. + + See Also + -------- + shortest_path, single_source_shortest_path + """ + if target not in G: + raise nx.NodeNotFound(f"Target {target} not in G") + + def join(p1, p2): + return p2 + p1 + + # handle undirected graphs + adj = G.pred if G.is_directed() else G.adj + if cutoff is None: + cutoff = float("inf") + nextlevel = {target: 1} # list of nodes to check at next level + paths = {target: [target]} # paths dictionary (paths to key from source) + return dict(_single_shortest_path(adj, nextlevel, paths, cutoff, join)) + + +@nx._dispatchable +def all_pairs_shortest_path(G, cutoff=None): + """Compute shortest paths between all nodes. + + Parameters + ---------- + G : NetworkX graph + + cutoff : integer, optional + Depth at which to stop the search. Only paths of length at most + `cutoff` are returned. + + Returns + ------- + paths : iterator + Dictionary, keyed by source and target, of shortest paths. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> path = dict(nx.all_pairs_shortest_path(G)) + >>> print(path[0][4]) + [0, 1, 2, 3, 4] + + Notes + ----- + There may be multiple shortest paths with the same length between + two nodes. For each pair, this function returns only one of those paths. + + See Also + -------- + floyd_warshall + all_pairs_all_shortest_paths + + """ + # TODO This can be trivially parallelized. + for n in G: + yield (n, single_source_shortest_path(G, n, cutoff=cutoff)) + + +@nx._dispatchable +def predecessor(G, source, target=None, cutoff=None, return_seen=None): + """Returns dict of predecessors for the path from source to all nodes in G. + + Parameters + ---------- + G : NetworkX graph + + source : node label + Starting node for path + + target : node label, optional + Ending node for path. If provided only predecessors between + source and target are returned + + cutoff : integer, optional + Depth to stop the search. Only paths of length <= cutoff are returned. + + return_seen : bool, optional (default=None) + Whether to return a dictionary, keyed by node, of the level (number of + hops) to reach the node (as seen during breadth-first-search). + + Returns + ------- + pred : dictionary + Dictionary, keyed by node, of predecessors in the shortest path. + + + (pred, seen): tuple of dictionaries + If `return_seen` argument is set to `True`, then a tuple of dictionaries + is returned. The first element is the dictionary, keyed by node, of + predecessors in the shortest path. The second element is the dictionary, + keyed by node, of the level (number of hops) to reach the node (as seen + during breadth-first-search). + + Examples + -------- + >>> G = nx.path_graph(4) + >>> list(G) + [0, 1, 2, 3] + >>> nx.predecessor(G, 0) + {0: [], 1: [0], 2: [1], 3: [2]} + >>> nx.predecessor(G, 0, return_seen=True) + ({0: [], 1: [0], 2: [1], 3: [2]}, {0: 0, 1: 1, 2: 2, 3: 3}) + + + """ + if source not in G: + raise nx.NodeNotFound(f"Source {source} not in G") + + level = 0 # the current level + nextlevel = [source] # list of nodes to check at next level + seen = {source: level} # level (number of hops) when seen in BFS + pred = {source: []} # predecessor dictionary + while nextlevel: + level = level + 1 + thislevel = nextlevel + nextlevel = [] + for v in thislevel: + for w in G[v]: + if w not in seen: + pred[w] = [v] + seen[w] = level + nextlevel.append(w) + elif seen[w] == level: # add v to predecessor list if it + pred[w].append(v) # is at the correct level + if cutoff and cutoff <= level: + break + + if target is not None: + if return_seen: + if target not in pred: + return ([], -1) # No predecessor + return (pred[target], seen[target]) + else: + if target not in pred: + return [] # No predecessor + return pred[target] + else: + if return_seen: + return (pred, seen) + else: + return pred diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/weighted.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/weighted.py new file mode 100644 index 0000000..0c69d4b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/shortest_paths/weighted.py @@ -0,0 +1,2516 @@ +""" +Shortest path algorithms for weighted graphs. +""" + +from collections import deque +from heapq import heappop, heappush +from itertools import count + +import networkx as nx +from networkx.algorithms.shortest_paths.generic import _build_paths_from_predecessors + +__all__ = [ + "dijkstra_path", + "dijkstra_path_length", + "bidirectional_dijkstra", + "single_source_dijkstra", + "single_source_dijkstra_path", + "single_source_dijkstra_path_length", + "multi_source_dijkstra", + "multi_source_dijkstra_path", + "multi_source_dijkstra_path_length", + "all_pairs_dijkstra", + "all_pairs_dijkstra_path", + "all_pairs_dijkstra_path_length", + "dijkstra_predecessor_and_distance", + "bellman_ford_path", + "bellman_ford_path_length", + "single_source_bellman_ford", + "single_source_bellman_ford_path", + "single_source_bellman_ford_path_length", + "all_pairs_bellman_ford_path", + "all_pairs_bellman_ford_path_length", + "bellman_ford_predecessor_and_distance", + "negative_edge_cycle", + "find_negative_cycle", + "goldberg_radzik", + "johnson", +] + + +def _weight_function(G, weight): + """Returns a function that returns the weight of an edge. + + The returned function is specifically suitable for input to + functions :func:`_dijkstra` and :func:`_bellman_ford_relaxation`. + + Parameters + ---------- + G : NetworkX graph. + + weight : string or function + If it is callable, `weight` itself is returned. If it is a string, + it is assumed to be the name of the edge attribute that represents + the weight of an edge. In that case, a function is returned that + gets the edge weight according to the specified edge attribute. + + Returns + ------- + function + This function returns a callable that accepts exactly three inputs: + a node, an node adjacent to the first one, and the edge attribute + dictionary for the eedge joining those nodes. That function returns + a number representing the weight of an edge. + + If `G` is a multigraph, and `weight` is not callable, the + minimum edge weight over all parallel edges is returned. If any edge + does not have an attribute with key `weight`, it is assumed to + have weight one. + + """ + if callable(weight): + return weight + # If the weight keyword argument is not callable, we assume it is a + # string representing the edge attribute containing the weight of + # the edge. + if G.is_multigraph(): + return lambda u, v, d: min(attr.get(weight, 1) for attr in d.values()) + return lambda u, v, data: data.get(weight, 1) + + +@nx._dispatchable(edge_attrs="weight") +def dijkstra_path(G, source, target, weight="weight"): + """Returns the shortest weighted path from source to target in G. + + Uses Dijkstra's Method to compute the shortest weighted path + between two nodes in a graph. + + Parameters + ---------- + G : NetworkX graph + + source : node + Starting node + + target : node + Ending node + + weight : string or function + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number or None to indicate a hidden edge. + + Returns + ------- + path : list + List of nodes in a shortest path. + + Raises + ------ + NodeNotFound + If `source` is not in `G`. + + NetworkXNoPath + If no path exists between source and target. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> print(nx.dijkstra_path(G, 0, 4)) + [0, 1, 2, 3, 4] + + Find edges of shortest path in Multigraph + + >>> G = nx.MultiDiGraph() + >>> G.add_weighted_edges_from([(1, 2, 0.75), (1, 2, 0.5), (2, 3, 0.5), (1, 3, 1.5)]) + >>> nodes = nx.dijkstra_path(G, 1, 3) + >>> edges = nx.utils.pairwise(nodes) + >>> list( + ... (u, v, min(G[u][v], key=lambda k: G[u][v][k].get("weight", 1))) + ... for u, v in edges + ... ) + [(1, 2, 1), (2, 3, 0)] + + Notes + ----- + Edge weight attributes must be numerical. + Distances are calculated as sums of weighted edges traversed. + + The weight function can be used to hide edges by returning None. + So ``weight = lambda u, v, d: 1 if d['color']=="red" else None`` + will find the shortest red path. + + The weight function can be used to include node weights. + + >>> def func(u, v, d): + ... node_u_wt = G.nodes[u].get("node_weight", 1) + ... node_v_wt = G.nodes[v].get("node_weight", 1) + ... edge_wt = d.get("weight", 1) + ... return node_u_wt / 2 + node_v_wt / 2 + edge_wt + + In this example we take the average of start and end node + weights of an edge and add it to the weight of the edge. + + The function :func:`single_source_dijkstra` computes both + path and length-of-path if you need both, use that. + + See Also + -------- + bidirectional_dijkstra + bellman_ford_path + single_source_dijkstra + """ + (length, path) = single_source_dijkstra(G, source, target=target, weight=weight) + return path + + +@nx._dispatchable(edge_attrs="weight") +def dijkstra_path_length(G, source, target, weight="weight"): + """Returns the shortest weighted path length in G from source to target. + + Uses Dijkstra's Method to compute the shortest weighted path length + between two nodes in a graph. + + Parameters + ---------- + G : NetworkX graph + + source : node label + starting node for path + + target : node label + ending node for path + + weight : string or function + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number or None to indicate a hidden edge. + + Returns + ------- + length : number + Shortest path length. + + Raises + ------ + NodeNotFound + If `source` is not in `G`. + + NetworkXNoPath + If no path exists between source and target. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> nx.dijkstra_path_length(G, 0, 4) + 4 + + Notes + ----- + Edge weight attributes must be numerical. + Distances are calculated as sums of weighted edges traversed. + + The weight function can be used to hide edges by returning None. + So ``weight = lambda u, v, d: 1 if d['color']=="red" else None`` + will find the shortest red path. + + The function :func:`single_source_dijkstra` computes both + path and length-of-path if you need both, use that. + + See Also + -------- + bidirectional_dijkstra + bellman_ford_path_length + single_source_dijkstra + + """ + if source not in G: + raise nx.NodeNotFound(f"Node {source} not found in graph") + if source == target: + return 0 + weight = _weight_function(G, weight) + length = _dijkstra(G, source, weight, target=target) + try: + return length[target] + except KeyError as err: + raise nx.NetworkXNoPath(f"Node {target} not reachable from {source}") from err + + +@nx._dispatchable(edge_attrs="weight") +def single_source_dijkstra_path(G, source, cutoff=None, weight="weight"): + """Find shortest weighted paths in G from a source node. + + Compute shortest path between source and all other reachable + nodes for a weighted graph. + + Parameters + ---------- + G : NetworkX graph + + source : node + Starting node for path. + + cutoff : integer or float, optional + Length (sum of edge weights) at which the search is stopped. + If cutoff is provided, only return paths with summed weight <= cutoff. + + weight : string or function + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number or None to indicate a hidden edge. + + Returns + ------- + paths : dictionary + Dictionary of shortest path lengths keyed by target. + + Raises + ------ + NodeNotFound + If `source` is not in `G`. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> path = nx.single_source_dijkstra_path(G, 0) + >>> path[4] + [0, 1, 2, 3, 4] + + Notes + ----- + Edge weight attributes must be numerical. + Distances are calculated as sums of weighted edges traversed. + + The weight function can be used to hide edges by returning None. + So ``weight = lambda u, v, d: 1 if d['color']=="red" else None`` + will find the shortest red path. + + See Also + -------- + single_source_dijkstra, single_source_bellman_ford + + """ + return multi_source_dijkstra_path(G, {source}, cutoff=cutoff, weight=weight) + + +@nx._dispatchable(edge_attrs="weight") +def single_source_dijkstra_path_length(G, source, cutoff=None, weight="weight"): + """Find shortest weighted path lengths in G from a source node. + + Compute the shortest path length between source and all other + reachable nodes for a weighted graph. + + Parameters + ---------- + G : NetworkX graph + + source : node label + Starting node for path + + cutoff : integer or float, optional + Length (sum of edge weights) at which the search is stopped. + If cutoff is provided, only return paths with summed weight <= cutoff. + + weight : string or function + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number or None to indicate a hidden edge. + + Returns + ------- + length : dict + Dict keyed by node to shortest path length from source. + + Raises + ------ + NodeNotFound + If `source` is not in `G`. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> length = nx.single_source_dijkstra_path_length(G, 0) + >>> length[4] + 4 + >>> for node in [0, 1, 2, 3, 4]: + ... print(f"{node}: {length[node]}") + 0: 0 + 1: 1 + 2: 2 + 3: 3 + 4: 4 + + Notes + ----- + Edge weight attributes must be numerical. + Distances are calculated as sums of weighted edges traversed. + + The weight function can be used to hide edges by returning None. + So ``weight = lambda u, v, d: 1 if d['color']=="red" else None`` + will find the shortest red path. + + See Also + -------- + single_source_dijkstra, single_source_bellman_ford_path_length + + """ + return multi_source_dijkstra_path_length(G, {source}, cutoff=cutoff, weight=weight) + + +@nx._dispatchable(edge_attrs="weight") +def single_source_dijkstra(G, source, target=None, cutoff=None, weight="weight"): + """Find shortest weighted paths and lengths from a source node. + + Compute the shortest path length between source and all other + reachable nodes for a weighted graph. + + Uses Dijkstra's algorithm to compute shortest paths and lengths + between a source and all other reachable nodes in a weighted graph. + + Parameters + ---------- + G : NetworkX graph + + source : node label + Starting node for path + + target : node label, optional + Ending node for path + + cutoff : integer or float, optional + Length (sum of edge weights) at which the search is stopped. + If cutoff is provided, only return paths with summed weight <= cutoff. + + + weight : string or function + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number or None to indicate a hidden edge. + + Returns + ------- + distance, path : pair of dictionaries, or numeric and list. + If target is None, paths and lengths to all nodes are computed. + The return value is a tuple of two dictionaries keyed by target nodes. + The first dictionary stores distance to each target node. + The second stores the path to each target node. + If target is not None, returns a tuple (distance, path), where + distance is the distance from source to target and path is a list + representing the path from source to target. + + Raises + ------ + NodeNotFound + If `source` is not in `G`. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> length, path = nx.single_source_dijkstra(G, 0) + >>> length[4] + 4 + >>> for node in [0, 1, 2, 3, 4]: + ... print(f"{node}: {length[node]}") + 0: 0 + 1: 1 + 2: 2 + 3: 3 + 4: 4 + >>> path[4] + [0, 1, 2, 3, 4] + >>> length, path = nx.single_source_dijkstra(G, 0, 1) + >>> length + 1 + >>> path + [0, 1] + + Notes + ----- + Edge weight attributes must be numerical. + Distances are calculated as sums of weighted edges traversed. + + The weight function can be used to hide edges by returning None. + So ``weight = lambda u, v, d: 1 if d['color']=="red" else None`` + will find the shortest red path. + + Based on the Python cookbook recipe (119466) at + https://code.activestate.com/recipes/119466/ + + This algorithm is not guaranteed to work if edge weights + are negative or are floating point numbers + (overflows and roundoff errors can cause problems). + + See Also + -------- + single_source_dijkstra_path + single_source_dijkstra_path_length + single_source_bellman_ford + """ + return multi_source_dijkstra( + G, {source}, cutoff=cutoff, target=target, weight=weight + ) + + +@nx._dispatchable(edge_attrs="weight") +def multi_source_dijkstra_path(G, sources, cutoff=None, weight="weight"): + """Find shortest weighted paths in G from a given set of source + nodes. + + Compute shortest path between any of the source nodes and all other + reachable nodes for a weighted graph. + + Parameters + ---------- + G : NetworkX graph + + sources : non-empty set of nodes + Starting nodes for paths. If this is just a set containing a + single node, then all paths computed by this function will start + from that node. If there are two or more nodes in the set, the + computed paths may begin from any one of the start nodes. + + cutoff : integer or float, optional + Length (sum of edge weights) at which the search is stopped. + If cutoff is provided, only return paths with summed weight <= cutoff. + + weight : string or function + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number or None to indicate a hidden edge. + + Returns + ------- + paths : dictionary + Dictionary of shortest paths keyed by target. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> path = nx.multi_source_dijkstra_path(G, {0, 4}) + >>> path[1] + [0, 1] + >>> path[3] + [4, 3] + + Notes + ----- + Edge weight attributes must be numerical. + Distances are calculated as sums of weighted edges traversed. + + The weight function can be used to hide edges by returning None. + So ``weight = lambda u, v, d: 1 if d['color']=="red" else None`` + will find the shortest red path. + + Raises + ------ + ValueError + If `sources` is empty. + NodeNotFound + If any of `sources` is not in `G`. + + See Also + -------- + multi_source_dijkstra, multi_source_bellman_ford + + """ + length, path = multi_source_dijkstra(G, sources, cutoff=cutoff, weight=weight) + return path + + +@nx._dispatchable(edge_attrs="weight") +def multi_source_dijkstra_path_length(G, sources, cutoff=None, weight="weight"): + """Find shortest weighted path lengths in G from a given set of + source nodes. + + Compute the shortest path length between any of the source nodes and + all other reachable nodes for a weighted graph. + + Parameters + ---------- + G : NetworkX graph + + sources : non-empty set of nodes + Starting nodes for paths. If this is just a set containing a + single node, then all paths computed by this function will start + from that node. If there are two or more nodes in the set, the + computed paths may begin from any one of the start nodes. + + cutoff : integer or float, optional + Length (sum of edge weights) at which the search is stopped. + If cutoff is provided, only return paths with summed weight <= cutoff. + + weight : string or function + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number or None to indicate a hidden edge. + + Returns + ------- + length : dict + Dict keyed by node to shortest path length to nearest source. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> length = nx.multi_source_dijkstra_path_length(G, {0, 4}) + >>> for node in [0, 1, 2, 3, 4]: + ... print(f"{node}: {length[node]}") + 0: 0 + 1: 1 + 2: 2 + 3: 1 + 4: 0 + + Notes + ----- + Edge weight attributes must be numerical. + Distances are calculated as sums of weighted edges traversed. + + The weight function can be used to hide edges by returning None. + So ``weight = lambda u, v, d: 1 if d['color']=="red" else None`` + will find the shortest red path. + + Raises + ------ + ValueError + If `sources` is empty. + NodeNotFound + If any of `sources` is not in `G`. + + See Also + -------- + multi_source_dijkstra + + """ + if not sources: + raise ValueError("sources must not be empty") + for s in sources: + if s not in G: + raise nx.NodeNotFound(f"Node {s} not found in graph") + weight = _weight_function(G, weight) + return _dijkstra_multisource(G, sources, weight, cutoff=cutoff) + + +@nx._dispatchable(edge_attrs="weight") +def multi_source_dijkstra(G, sources, target=None, cutoff=None, weight="weight"): + """Find shortest weighted paths and lengths from a given set of + source nodes. + + Uses Dijkstra's algorithm to compute the shortest paths and lengths + between one of the source nodes and the given `target`, or all other + reachable nodes if not specified, for a weighted graph. + + Parameters + ---------- + G : NetworkX graph + + sources : non-empty set of nodes + Starting nodes for paths. If this is just a set containing a + single node, then all paths computed by this function will start + from that node. If there are two or more nodes in the set, the + computed paths may begin from any one of the start nodes. + + target : node label, optional + Ending node for path + + cutoff : integer or float, optional + Length (sum of edge weights) at which the search is stopped. + If cutoff is provided, only return paths with summed weight <= cutoff. + + weight : string or function + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number or None to indicate a hidden edge. + + Returns + ------- + distance, path : pair of dictionaries, or numeric and list + If target is None, returns a tuple of two dictionaries keyed by node. + The first dictionary stores distance from one of the source nodes. + The second stores the path from one of the sources to that node. + If target is not None, returns a tuple of (distance, path) where + distance is the distance from source to target and path is a list + representing the path from source to target. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> length, path = nx.multi_source_dijkstra(G, {0, 4}) + >>> for node in [0, 1, 2, 3, 4]: + ... print(f"{node}: {length[node]}") + 0: 0 + 1: 1 + 2: 2 + 3: 1 + 4: 0 + >>> path[1] + [0, 1] + >>> path[3] + [4, 3] + + >>> length, path = nx.multi_source_dijkstra(G, {0, 4}, 1) + >>> length + 1 + >>> path + [0, 1] + + Notes + ----- + Edge weight attributes must be numerical. + Distances are calculated as sums of weighted edges traversed. + + The weight function can be used to hide edges by returning None. + So ``weight = lambda u, v, d: 1 if d['color']=="red" else None`` + will find the shortest red path. + + Based on the Python cookbook recipe (119466) at + https://code.activestate.com/recipes/119466/ + + This algorithm is not guaranteed to work if edge weights + are negative or are floating point numbers + (overflows and roundoff errors can cause problems). + + Raises + ------ + ValueError + If `sources` is empty. + NodeNotFound + If any of `sources` is not in `G`. + + See Also + -------- + multi_source_dijkstra_path + multi_source_dijkstra_path_length + + """ + if not sources: + raise ValueError("sources must not be empty") + for s in sources: + if s not in G: + raise nx.NodeNotFound(f"Node {s} not found in graph") + if target in sources: + return (0, [target]) + weight = _weight_function(G, weight) + paths = {source: [source] for source in sources} # dictionary of paths + dist = _dijkstra_multisource( + G, sources, weight, paths=paths, cutoff=cutoff, target=target + ) + if target is None: + return (dist, paths) + try: + return (dist[target], paths[target]) + except KeyError as err: + raise nx.NetworkXNoPath(f"No path to {target}.") from err + + +def _dijkstra(G, source, weight, pred=None, paths=None, cutoff=None, target=None): + """Uses Dijkstra's algorithm to find shortest weighted paths from a + single source. + + This is a convenience function for :func:`_dijkstra_multisource` + with all the arguments the same, except the keyword argument + `sources` set to ``[source]``. + + """ + return _dijkstra_multisource( + G, [source], weight, pred=pred, paths=paths, cutoff=cutoff, target=target + ) + + +def _dijkstra_multisource( + G, sources, weight, pred=None, paths=None, cutoff=None, target=None +): + """Uses Dijkstra's algorithm to find shortest weighted paths + + Parameters + ---------- + G : NetworkX graph + + sources : non-empty iterable of nodes + Starting nodes for paths. If this is just an iterable containing + a single node, then all paths computed by this function will + start from that node. If there are two or more nodes in this + iterable, the computed paths may begin from any one of the start + nodes. + + weight: function + Function with (u, v, data) input that returns that edge's weight + or None to indicate a hidden edge + + pred: dict of lists, optional(default=None) + dict to store a list of predecessors keyed by that node + If None, predecessors are not stored. + + paths: dict, optional (default=None) + dict to store the path list from source to each node, keyed by node. + If None, paths are not stored. + + target : node label, optional + Ending node for path. Search is halted when target is found. + + cutoff : integer or float, optional + Length (sum of edge weights) at which the search is stopped. + If cutoff is provided, only return paths with summed weight <= cutoff. + + Returns + ------- + distance : dictionary + A mapping from node to shortest distance to that node from one + of the source nodes. + + Raises + ------ + NodeNotFound + If any of `sources` is not in `G`. + + Notes + ----- + The optional predecessor and path dictionaries can be accessed by + the caller through the original pred and paths objects passed + as arguments. No need to explicitly return pred or paths. + + """ + G_succ = G._adj # For speed-up (and works for both directed and undirected graphs) + + dist = {} # dictionary of final distances + seen = {} + # fringe is heapq with 3-tuples (distance,c,node) + # use the count c to avoid comparing nodes (may not be able to) + c = count() + fringe = [] + for source in sources: + seen[source] = 0 + heappush(fringe, (0, next(c), source)) + while fringe: + (d, _, v) = heappop(fringe) + if v in dist: + continue # already searched this node. + dist[v] = d + if v == target: + break + for u, e in G_succ[v].items(): + cost = weight(v, u, e) + if cost is None: + continue + vu_dist = dist[v] + cost + if cutoff is not None: + if vu_dist > cutoff: + continue + if u in dist: + u_dist = dist[u] + if vu_dist < u_dist: + raise ValueError("Contradictory paths found:", "negative weights?") + elif pred is not None and vu_dist == u_dist: + pred[u].append(v) + elif u not in seen or vu_dist < seen[u]: + seen[u] = vu_dist + heappush(fringe, (vu_dist, next(c), u)) + if paths is not None: + paths[u] = paths[v] + [u] + if pred is not None: + pred[u] = [v] + elif vu_dist == seen[u]: + if pred is not None: + pred[u].append(v) + + # The optional predecessor and path dictionaries can be accessed + # by the caller via the pred and paths objects passed as arguments. + return dist + + +@nx._dispatchable(edge_attrs="weight") +def dijkstra_predecessor_and_distance(G, source, cutoff=None, weight="weight"): + """Compute weighted shortest path length and predecessors. + + Uses Dijkstra's Method to obtain the shortest weighted paths + and return dictionaries of predecessors for each node and + distance for each node from the `source`. + + Parameters + ---------- + G : NetworkX graph + + source : node label + Starting node for path + + cutoff : integer or float, optional + Length (sum of edge weights) at which the search is stopped. + If cutoff is provided, only return paths with summed weight <= cutoff. + + weight : string or function + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number or None to indicate a hidden edge. + + Returns + ------- + pred, distance : dictionaries + Returns two dictionaries representing a list of predecessors + of a node and the distance to each node. + + Raises + ------ + NodeNotFound + If `source` is not in `G`. + + Notes + ----- + Edge weight attributes must be numerical. + Distances are calculated as sums of weighted edges traversed. + + The list of predecessors contains more than one element only when + there are more than one shortest paths to the key node. + + Examples + -------- + >>> G = nx.path_graph(5, create_using=nx.DiGraph()) + >>> pred, dist = nx.dijkstra_predecessor_and_distance(G, 0) + >>> sorted(pred.items()) + [(0, []), (1, [0]), (2, [1]), (3, [2]), (4, [3])] + >>> sorted(dist.items()) + [(0, 0), (1, 1), (2, 2), (3, 3), (4, 4)] + + >>> pred, dist = nx.dijkstra_predecessor_and_distance(G, 0, 1) + >>> sorted(pred.items()) + [(0, []), (1, [0])] + >>> sorted(dist.items()) + [(0, 0), (1, 1)] + """ + if source not in G: + raise nx.NodeNotFound(f"Node {source} is not found in the graph") + weight = _weight_function(G, weight) + pred = {source: []} # dictionary of predecessors + return (pred, _dijkstra(G, source, weight, pred=pred, cutoff=cutoff)) + + +@nx._dispatchable(edge_attrs="weight") +def all_pairs_dijkstra(G, cutoff=None, weight="weight"): + """Find shortest weighted paths and lengths between all nodes. + + Parameters + ---------- + G : NetworkX graph + + cutoff : integer or float, optional + Length (sum of edge weights) at which the search is stopped. + If cutoff is provided, only return paths with summed weight <= cutoff. + + weight : string or function + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edge[u][v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number or None to indicate a hidden edge. + + Yields + ------ + (node, (distance, path)) : (node obj, (dict, dict)) + Each source node has two associated dicts. The first holds distance + keyed by target and the second holds paths keyed by target. + (See single_source_dijkstra for the source/target node terminology.) + If desired you can apply `dict()` to this function to create a dict + keyed by source node to the two dicts. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> len_path = dict(nx.all_pairs_dijkstra(G)) + >>> len_path[3][0][1] + 2 + >>> for node in [0, 1, 2, 3, 4]: + ... print(f"3 - {node}: {len_path[3][0][node]}") + 3 - 0: 3 + 3 - 1: 2 + 3 - 2: 1 + 3 - 3: 0 + 3 - 4: 1 + >>> len_path[3][1][1] + [3, 2, 1] + >>> for n, (dist, path) in nx.all_pairs_dijkstra(G): + ... print(path[1]) + [0, 1] + [1] + [2, 1] + [3, 2, 1] + [4, 3, 2, 1] + + Notes + ----- + Edge weight attributes must be numerical. + Distances are calculated as sums of weighted edges traversed. + + The yielded dicts only have keys for reachable nodes. + """ + for n in G: + dist, path = single_source_dijkstra(G, n, cutoff=cutoff, weight=weight) + yield (n, (dist, path)) + + +@nx._dispatchable(edge_attrs="weight") +def all_pairs_dijkstra_path_length(G, cutoff=None, weight="weight"): + """Compute shortest path lengths between all nodes in a weighted graph. + + Parameters + ---------- + G : NetworkX graph + + cutoff : integer or float, optional + Length (sum of edge weights) at which the search is stopped. + If cutoff is provided, only return paths with summed weight <= cutoff. + + weight : string or function + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number or None to indicate a hidden edge. + + Returns + ------- + distance : iterator + (source, dictionary) iterator with dictionary keyed by target and + shortest path length as the key value. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> length = dict(nx.all_pairs_dijkstra_path_length(G)) + >>> for node in [0, 1, 2, 3, 4]: + ... print(f"1 - {node}: {length[1][node]}") + 1 - 0: 1 + 1 - 1: 0 + 1 - 2: 1 + 1 - 3: 2 + 1 - 4: 3 + >>> length[3][2] + 1 + >>> length[2][2] + 0 + + Notes + ----- + Edge weight attributes must be numerical. + Distances are calculated as sums of weighted edges traversed. + + The dictionary returned only has keys for reachable node pairs. + """ + length = single_source_dijkstra_path_length + for n in G: + yield (n, length(G, n, cutoff=cutoff, weight=weight)) + + +@nx._dispatchable(edge_attrs="weight") +def all_pairs_dijkstra_path(G, cutoff=None, weight="weight"): + """Compute shortest paths between all nodes in a weighted graph. + + Parameters + ---------- + G : NetworkX graph + + cutoff : integer or float, optional + Length (sum of edge weights) at which the search is stopped. + If cutoff is provided, only return paths with summed weight <= cutoff. + + weight : string or function + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number or None to indicate a hidden edge. + + Returns + ------- + paths : iterator + (source, dictionary) iterator with dictionary keyed by target and + shortest path as the key value. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> path = dict(nx.all_pairs_dijkstra_path(G)) + >>> path[0][4] + [0, 1, 2, 3, 4] + + Notes + ----- + Edge weight attributes must be numerical. + Distances are calculated as sums of weighted edges traversed. + + See Also + -------- + floyd_warshall, all_pairs_bellman_ford_path + + """ + path = single_source_dijkstra_path + # TODO This can be trivially parallelized. + for n in G: + yield (n, path(G, n, cutoff=cutoff, weight=weight)) + + +@nx._dispatchable(edge_attrs="weight") +def bellman_ford_predecessor_and_distance( + G, source, target=None, weight="weight", heuristic=False +): + """Compute shortest path lengths and predecessors on shortest paths + in weighted graphs. + + The algorithm has a running time of $O(mn)$ where $n$ is the number of + nodes and $m$ is the number of edges. It is slower than Dijkstra but + can handle negative edge weights. + + If a negative cycle is detected, you can use :func:`find_negative_cycle` + to return the cycle and examine it. Shortest paths are not defined when + a negative cycle exists because once reached, the path can cycle forever + to build up arbitrarily low weights. + + Parameters + ---------- + G : NetworkX graph + The algorithm works for all types of graphs, including directed + graphs and multigraphs. + + source: node label + Starting node for path + + target : node label, optional + Ending node for path + + weight : string or function + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number. + + heuristic : bool + Determines whether to use a heuristic to early detect negative + cycles at a hopefully negligible cost. + + Returns + ------- + pred, dist : dictionaries + Returns two dictionaries keyed by node to predecessor in the + path and to the distance from the source respectively. + + Raises + ------ + NodeNotFound + If `source` is not in `G`. + + NetworkXUnbounded + If the (di)graph contains a negative (di)cycle, the + algorithm raises an exception to indicate the presence of the + negative (di)cycle. Note: any negative weight edge in an + undirected graph is a negative cycle. + + Examples + -------- + >>> G = nx.path_graph(5, create_using=nx.DiGraph()) + >>> pred, dist = nx.bellman_ford_predecessor_and_distance(G, 0) + >>> sorted(pred.items()) + [(0, []), (1, [0]), (2, [1]), (3, [2]), (4, [3])] + >>> sorted(dist.items()) + [(0, 0), (1, 1), (2, 2), (3, 3), (4, 4)] + + >>> pred, dist = nx.bellman_ford_predecessor_and_distance(G, 0, 1) + >>> sorted(pred.items()) + [(0, []), (1, [0]), (2, [1]), (3, [2]), (4, [3])] + >>> sorted(dist.items()) + [(0, 0), (1, 1), (2, 2), (3, 3), (4, 4)] + + >>> G = nx.cycle_graph(5, create_using=nx.DiGraph()) + >>> G[1][2]["weight"] = -7 + >>> nx.bellman_ford_predecessor_and_distance(G, 0) + Traceback (most recent call last): + ... + networkx.exception.NetworkXUnbounded: Negative cycle detected. + + See Also + -------- + find_negative_cycle + + Notes + ----- + Edge weight attributes must be numerical. + Distances are calculated as sums of weighted edges traversed. + + The dictionaries returned only have keys for nodes reachable from + the source. + + In the case where the (di)graph is not connected, if a component + not containing the source contains a negative (di)cycle, it + will not be detected. + + In NetworkX v2.1 and prior, the source node had predecessor `[None]`. + In NetworkX v2.2 this changed to the source node having predecessor `[]` + """ + if source not in G: + raise nx.NodeNotFound(f"Node {source} is not found in the graph") + weight = _weight_function(G, weight) + if G.is_multigraph(): + if any( + weight(u, v, {k: d}) < 0 + for u, v, k, d in nx.selfloop_edges(G, keys=True, data=True) + ): + raise nx.NetworkXUnbounded("Negative cycle detected.") + else: + if any(weight(u, v, d) < 0 for u, v, d in nx.selfloop_edges(G, data=True)): + raise nx.NetworkXUnbounded("Negative cycle detected.") + + dist = {source: 0} + pred = {source: []} + + if len(G) == 1: + return pred, dist + + weight = _weight_function(G, weight) + + dist = _bellman_ford( + G, [source], weight, pred=pred, dist=dist, target=target, heuristic=heuristic + ) + return (pred, dist) + + +def _bellman_ford( + G, + source, + weight, + pred=None, + paths=None, + dist=None, + target=None, + heuristic=True, +): + """Calls relaxation loop for Bellman–Ford algorithm and builds paths + + This is an implementation of the SPFA variant. + See https://en.wikipedia.org/wiki/Shortest_Path_Faster_Algorithm + + Parameters + ---------- + G : NetworkX graph + + source: list + List of source nodes. The shortest path from any of the source + nodes will be found if multiple sources are provided. + + weight : function + The weight of an edge is the value returned by the function. The + function must accept exactly three positional arguments: the two + endpoints of an edge and the dictionary of edge attributes for + that edge. The function must return a number. + + pred: dict of lists, optional (default=None) + dict to store a list of predecessors keyed by that node + If None, predecessors are not stored + + paths: dict, optional (default=None) + dict to store the path list from source to each node, keyed by node + If None, paths are not stored + + dist: dict, optional (default=None) + dict to store distance from source to the keyed node + If None, returned dist dict contents default to 0 for every node in the + source list + + target: node label, optional + Ending node for path. Path lengths to other destinations may (and + probably will) be incorrect. + + heuristic : bool + Determines whether to use a heuristic to early detect negative + cycles at a hopefully negligible cost. + + Returns + ------- + dist : dict + Returns a dict keyed by node to the distance from the source. + Dicts for paths and pred are in the mutated input dicts by those names. + + Raises + ------ + NodeNotFound + If any of `source` is not in `G`. + + NetworkXUnbounded + If the (di)graph contains a negative (di)cycle, the + algorithm raises an exception to indicate the presence of the + negative (di)cycle. Note: any negative weight edge in an + undirected graph is a negative cycle + """ + if pred is None: + pred = {v: [] for v in source} + + if dist is None: + dist = dict.fromkeys(source, 0) + + negative_cycle_found = _inner_bellman_ford( + G, + source, + weight, + pred, + dist, + heuristic, + ) + if negative_cycle_found is not None: + raise nx.NetworkXUnbounded("Negative cycle detected.") + + if paths is not None: + sources = set(source) + dsts = [target] if target is not None else pred + for dst in dsts: + gen = _build_paths_from_predecessors(sources, dst, pred) + paths[dst] = next(gen) + + return dist + + +def _inner_bellman_ford( + G, + sources, + weight, + pred, + dist=None, + heuristic=True, +): + """Inner Relaxation loop for Bellman–Ford algorithm. + + This is an implementation of the SPFA variant. + See https://en.wikipedia.org/wiki/Shortest_Path_Faster_Algorithm + + Parameters + ---------- + G : NetworkX graph + + source: list + List of source nodes. The shortest path from any of the source + nodes will be found if multiple sources are provided. + + weight : function + The weight of an edge is the value returned by the function. The + function must accept exactly three positional arguments: the two + endpoints of an edge and the dictionary of edge attributes for + that edge. The function must return a number. + + pred: dict of lists + dict to store a list of predecessors keyed by that node + + dist: dict, optional (default=None) + dict to store distance from source to the keyed node + If None, returned dist dict contents default to 0 for every node in the + source list + + heuristic : bool + Determines whether to use a heuristic to early detect negative + cycles at a hopefully negligible cost. + + Returns + ------- + node or None + Return a node `v` where processing discovered a negative cycle. + If no negative cycle found, return None. + + Raises + ------ + NodeNotFound + If any of `source` is not in `G`. + """ + for s in sources: + if s not in G: + raise nx.NodeNotFound(f"Source {s} not in G") + + if pred is None: + pred = {v: [] for v in sources} + + if dist is None: + dist = dict.fromkeys(sources, 0) + + # Heuristic Storage setup. Note: use None because nodes cannot be None + nonexistent_edge = (None, None) + pred_edge = dict.fromkeys(sources) + recent_update = dict.fromkeys(sources, nonexistent_edge) + + G_succ = G._adj # For speed-up (and works for both directed and undirected graphs) + inf = float("inf") + n = len(G) + + count = {} + q = deque(sources) + in_q = set(sources) + while q: + u = q.popleft() + in_q.remove(u) + + # Skip relaxations if any of the predecessors of u is in the queue. + if all(pred_u not in in_q for pred_u in pred[u]): + dist_u = dist[u] + for v, e in G_succ[u].items(): + dist_v = dist_u + weight(u, v, e) + + if dist_v < dist.get(v, inf): + # In this conditional branch we are updating the path with v. + # If it happens that some earlier update also added node v + # that implies the existence of a negative cycle since + # after the update node v would lie on the update path twice. + # The update path is stored up to one of the source nodes, + # therefore u is always in the dict recent_update + if heuristic: + if v in recent_update[u]: + # Negative cycle found! + pred[v].append(u) + return v + + # Transfer the recent update info from u to v if the + # same source node is the head of the update path. + # If the source node is responsible for the cost update, + # then clear the history and use it instead. + if v in pred_edge and pred_edge[v] == u: + recent_update[v] = recent_update[u] + else: + recent_update[v] = (u, v) + + if v not in in_q: + q.append(v) + in_q.add(v) + count_v = count.get(v, 0) + 1 + if count_v == n: + # Negative cycle found! + return v + + count[v] = count_v + dist[v] = dist_v + pred[v] = [u] + pred_edge[v] = u + + elif dist.get(v) is not None and dist_v == dist.get(v): + pred[v].append(u) + + # successfully found shortest_path. No negative cycles found. + return None + + +@nx._dispatchable(edge_attrs="weight") +def bellman_ford_path(G, source, target, weight="weight"): + """Returns the shortest path from source to target in a weighted graph G. + + Parameters + ---------- + G : NetworkX graph + + source : node + Starting node + + target : node + Ending node + + weight : string or function (default="weight") + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number. + + Returns + ------- + path : list + List of nodes in a shortest path. + + Raises + ------ + NodeNotFound + If `source` is not in `G`. + + NetworkXNoPath + If no path exists between source and target. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> nx.bellman_ford_path(G, 0, 4) + [0, 1, 2, 3, 4] + + Notes + ----- + Edge weight attributes must be numerical. + Distances are calculated as sums of weighted edges traversed. + + See Also + -------- + dijkstra_path, bellman_ford_path_length + """ + length, path = single_source_bellman_ford(G, source, target=target, weight=weight) + return path + + +@nx._dispatchable(edge_attrs="weight") +def bellman_ford_path_length(G, source, target, weight="weight"): + """Returns the shortest path length from source to target + in a weighted graph. + + Parameters + ---------- + G : NetworkX graph + + source : node label + starting node for path + + target : node label + ending node for path + + weight : string or function (default="weight") + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number. + + Returns + ------- + length : number + Shortest path length. + + Raises + ------ + NodeNotFound + If `source` is not in `G`. + + NetworkXNoPath + If no path exists between source and target. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> nx.bellman_ford_path_length(G, 0, 4) + 4 + + Notes + ----- + Edge weight attributes must be numerical. + Distances are calculated as sums of weighted edges traversed. + + See Also + -------- + dijkstra_path_length, bellman_ford_path + """ + if source == target: + if source not in G: + raise nx.NodeNotFound(f"Node {source} not found in graph") + return 0 + + weight = _weight_function(G, weight) + + length = _bellman_ford(G, [source], weight, target=target) + + try: + return length[target] + except KeyError as err: + raise nx.NetworkXNoPath(f"node {target} not reachable from {source}") from err + + +@nx._dispatchable(edge_attrs="weight") +def single_source_bellman_ford_path(G, source, weight="weight"): + """Compute shortest path between source and all other reachable + nodes for a weighted graph. + + Parameters + ---------- + G : NetworkX graph + + source : node + Starting node for path. + + weight : string or function (default="weight") + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number. + + Returns + ------- + paths : dictionary + Dictionary of shortest path lengths keyed by target. + + Raises + ------ + NodeNotFound + If `source` is not in `G`. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> path = nx.single_source_bellman_ford_path(G, 0) + >>> path[4] + [0, 1, 2, 3, 4] + + Notes + ----- + Edge weight attributes must be numerical. + Distances are calculated as sums of weighted edges traversed. + + See Also + -------- + single_source_dijkstra, single_source_bellman_ford + + """ + (length, path) = single_source_bellman_ford(G, source, weight=weight) + return path + + +@nx._dispatchable(edge_attrs="weight") +def single_source_bellman_ford_path_length(G, source, weight="weight"): + """Compute the shortest path length between source and all other + reachable nodes for a weighted graph. + + Parameters + ---------- + G : NetworkX graph + + source : node label + Starting node for path + + weight : string or function (default="weight") + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number. + + Returns + ------- + length : dictionary + Dictionary of shortest path length keyed by target + + Raises + ------ + NodeNotFound + If `source` is not in `G`. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> length = nx.single_source_bellman_ford_path_length(G, 0) + >>> length[4] + 4 + >>> for node in [0, 1, 2, 3, 4]: + ... print(f"{node}: {length[node]}") + 0: 0 + 1: 1 + 2: 2 + 3: 3 + 4: 4 + + Notes + ----- + Edge weight attributes must be numerical. + Distances are calculated as sums of weighted edges traversed. + + See Also + -------- + single_source_dijkstra, single_source_bellman_ford + + """ + weight = _weight_function(G, weight) + return _bellman_ford(G, [source], weight) + + +@nx._dispatchable(edge_attrs="weight") +def single_source_bellman_ford(G, source, target=None, weight="weight"): + """Compute shortest paths and lengths in a weighted graph G. + + Uses Bellman-Ford algorithm for shortest paths. + + Parameters + ---------- + G : NetworkX graph + + source : node label + Starting node for path + + target : node label, optional + Ending node for path + + weight : string or function + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number. + + Returns + ------- + distance, path : pair of dictionaries, or numeric and list + If target is None, returns a tuple of two dictionaries keyed by node. + The first dictionary stores distance from one of the source nodes. + The second stores the path from one of the sources to that node. + If target is not None, returns a tuple of (distance, path) where + distance is the distance from source to target and path is a list + representing the path from source to target. + + Raises + ------ + NodeNotFound + If `source` is not in `G`. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> length, path = nx.single_source_bellman_ford(G, 0) + >>> length[4] + 4 + >>> for node in [0, 1, 2, 3, 4]: + ... print(f"{node}: {length[node]}") + 0: 0 + 1: 1 + 2: 2 + 3: 3 + 4: 4 + >>> path[4] + [0, 1, 2, 3, 4] + >>> length, path = nx.single_source_bellman_ford(G, 0, 1) + >>> length + 1 + >>> path + [0, 1] + + Notes + ----- + Edge weight attributes must be numerical. + Distances are calculated as sums of weighted edges traversed. + + See Also + -------- + single_source_dijkstra + single_source_bellman_ford_path + single_source_bellman_ford_path_length + """ + if source == target: + if source not in G: + raise nx.NodeNotFound(f"Node {source} is not found in the graph") + return (0, [source]) + + weight = _weight_function(G, weight) + + paths = {source: [source]} # dictionary of paths + dist = _bellman_ford(G, [source], weight, paths=paths, target=target) + if target is None: + return (dist, paths) + try: + return (dist[target], paths[target]) + except KeyError as err: + msg = f"Node {target} not reachable from {source}" + raise nx.NetworkXNoPath(msg) from err + + +@nx._dispatchable(edge_attrs="weight") +def all_pairs_bellman_ford_path_length(G, weight="weight"): + """Compute shortest path lengths between all nodes in a weighted graph. + + Parameters + ---------- + G : NetworkX graph + + weight : string or function (default="weight") + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number. + + Returns + ------- + distance : iterator + (source, dictionary) iterator with dictionary keyed by target and + shortest path length as the key value. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> length = dict(nx.all_pairs_bellman_ford_path_length(G)) + >>> for node in [0, 1, 2, 3, 4]: + ... print(f"1 - {node}: {length[1][node]}") + 1 - 0: 1 + 1 - 1: 0 + 1 - 2: 1 + 1 - 3: 2 + 1 - 4: 3 + >>> length[3][2] + 1 + >>> length[2][2] + 0 + + Notes + ----- + Edge weight attributes must be numerical. + Distances are calculated as sums of weighted edges traversed. + + The dictionary returned only has keys for reachable node pairs. + """ + length = single_source_bellman_ford_path_length + for n in G: + yield (n, dict(length(G, n, weight=weight))) + + +@nx._dispatchable(edge_attrs="weight") +def all_pairs_bellman_ford_path(G, weight="weight"): + """Compute shortest paths between all nodes in a weighted graph. + + Parameters + ---------- + G : NetworkX graph + + weight : string or function (default="weight") + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number. + + Returns + ------- + paths : iterator + (source, dictionary) iterator with dictionary keyed by target and + shortest path as the key value. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> path = dict(nx.all_pairs_bellman_ford_path(G)) + >>> path[0][4] + [0, 1, 2, 3, 4] + + Notes + ----- + Edge weight attributes must be numerical. + Distances are calculated as sums of weighted edges traversed. + + See Also + -------- + floyd_warshall, all_pairs_dijkstra_path + + """ + path = single_source_bellman_ford_path + for n in G: + yield (n, path(G, n, weight=weight)) + + +@nx._dispatchable(edge_attrs="weight") +def goldberg_radzik(G, source, weight="weight"): + """Compute shortest path lengths and predecessors on shortest paths + in weighted graphs. + + The algorithm has a running time of $O(mn)$ where $n$ is the number of + nodes and $m$ is the number of edges. It is slower than Dijkstra but + can handle negative edge weights. + + Parameters + ---------- + G : NetworkX graph + The algorithm works for all types of graphs, including directed + graphs and multigraphs. + + source: node label + Starting node for path + + weight : string or function + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number. + + Returns + ------- + pred, dist : dictionaries + Returns two dictionaries keyed by node to predecessor in the + path and to the distance from the source respectively. + + Raises + ------ + NodeNotFound + If `source` is not in `G`. + + NetworkXUnbounded + If the (di)graph contains a negative (di)cycle, the + algorithm raises an exception to indicate the presence of the + negative (di)cycle. Note: any negative weight edge in an + undirected graph is a negative cycle. + + As of NetworkX v3.2, a zero weight cycle is no longer + incorrectly reported as a negative weight cycle. + + + Examples + -------- + >>> G = nx.path_graph(5, create_using=nx.DiGraph()) + >>> pred, dist = nx.goldberg_radzik(G, 0) + >>> sorted(pred.items()) + [(0, None), (1, 0), (2, 1), (3, 2), (4, 3)] + >>> sorted(dist.items()) + [(0, 0), (1, 1), (2, 2), (3, 3), (4, 4)] + + >>> G = nx.cycle_graph(5, create_using=nx.DiGraph()) + >>> G[1][2]["weight"] = -7 + >>> nx.goldberg_radzik(G, 0) + Traceback (most recent call last): + ... + networkx.exception.NetworkXUnbounded: Negative cycle detected. + + Notes + ----- + Edge weight attributes must be numerical. + Distances are calculated as sums of weighted edges traversed. + + The dictionaries returned only have keys for nodes reachable from + the source. + + In the case where the (di)graph is not connected, if a component + not containing the source contains a negative (di)cycle, it + will not be detected. + + """ + if source not in G: + raise nx.NodeNotFound(f"Node {source} is not found in the graph") + weight = _weight_function(G, weight) + if G.is_multigraph(): + if any( + weight(u, v, {k: d}) < 0 + for u, v, k, d in nx.selfloop_edges(G, keys=True, data=True) + ): + raise nx.NetworkXUnbounded("Negative cycle detected.") + else: + if any(weight(u, v, d) < 0 for u, v, d in nx.selfloop_edges(G, data=True)): + raise nx.NetworkXUnbounded("Negative cycle detected.") + + if len(G) == 1: + return {source: None}, {source: 0} + + G_succ = G._adj # For speed-up (and works for both directed and undirected graphs) + + inf = float("inf") + d = dict.fromkeys(G, inf) + d[source] = 0 + pred = {source: None} + + def topo_sort(relabeled): + """Topologically sort nodes relabeled in the previous round and detect + negative cycles. + """ + # List of nodes to scan in this round. Denoted by A in Goldberg and + # Radzik's paper. + to_scan = [] + # In the DFS in the loop below, neg_count records for each node the + # number of edges of negative reduced costs on the path from a DFS root + # to the node in the DFS forest. The reduced cost of an edge (u, v) is + # defined as d[u] + weight[u][v] - d[v]. + # + # neg_count also doubles as the DFS visit marker array. + neg_count = {} + for u in relabeled: + # Skip visited nodes. + if u in neg_count: + continue + d_u = d[u] + # Skip nodes without out-edges of negative reduced costs. + if all(d_u + weight(u, v, e) >= d[v] for v, e in G_succ[u].items()): + continue + # Nonrecursive DFS that inserts nodes reachable from u via edges of + # nonpositive reduced costs into to_scan in (reverse) topological + # order. + stack = [(u, iter(G_succ[u].items()))] + in_stack = {u} + neg_count[u] = 0 + while stack: + u, it = stack[-1] + try: + v, e = next(it) + except StopIteration: + to_scan.append(u) + stack.pop() + in_stack.remove(u) + continue + t = d[u] + weight(u, v, e) + d_v = d[v] + if t < d_v: + is_neg = t < d_v + d[v] = t + pred[v] = u + if v not in neg_count: + neg_count[v] = neg_count[u] + int(is_neg) + stack.append((v, iter(G_succ[v].items()))) + in_stack.add(v) + elif v in in_stack and neg_count[u] + int(is_neg) > neg_count[v]: + # (u, v) is a back edge, and the cycle formed by the + # path v to u and (u, v) contains at least one edge of + # negative reduced cost. The cycle must be of negative + # cost. + raise nx.NetworkXUnbounded("Negative cycle detected.") + to_scan.reverse() + return to_scan + + def relax(to_scan): + """Relax out-edges of relabeled nodes.""" + relabeled = set() + # Scan nodes in to_scan in topological order and relax incident + # out-edges. Add the relabled nodes to labeled. + for u in to_scan: + d_u = d[u] + for v, e in G_succ[u].items(): + w_e = weight(u, v, e) + if d_u + w_e < d[v]: + d[v] = d_u + w_e + pred[v] = u + relabeled.add(v) + return relabeled + + # Set of nodes relabled in the last round of scan operations. Denoted by B + # in Goldberg and Radzik's paper. + relabeled = {source} + + while relabeled: + to_scan = topo_sort(relabeled) + relabeled = relax(to_scan) + + d = {u: d[u] for u in pred} + return pred, d + + +@nx._dispatchable(edge_attrs="weight") +def negative_edge_cycle(G, weight="weight", heuristic=True): + """Returns True if there exists a negative edge cycle anywhere in G. + + Parameters + ---------- + G : NetworkX graph + + weight : string or function + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number. + + heuristic : bool + Determines whether to use a heuristic to early detect negative + cycles at a negligible cost. In case of graphs with a negative cycle, + the performance of detection increases by at least an order of magnitude. + + Returns + ------- + negative_cycle : bool + True if a negative edge cycle exists, otherwise False. + + Examples + -------- + >>> G = nx.cycle_graph(5, create_using=nx.DiGraph()) + >>> print(nx.negative_edge_cycle(G)) + False + >>> G[1][2]["weight"] = -7 + >>> print(nx.negative_edge_cycle(G)) + True + + Notes + ----- + Edge weight attributes must be numerical. + Distances are calculated as sums of weighted edges traversed. + + This algorithm uses bellman_ford_predecessor_and_distance() but finds + negative cycles on any component by first adding a new node connected to + every node, and starting bellman_ford_predecessor_and_distance on that + node. It then removes that extra node. + """ + if G.size() == 0: + return False + + # find unused node to use temporarily + newnode = -1 + while newnode in G: + newnode -= 1 + # connect it to all nodes + G.add_edges_from([(newnode, n) for n in G]) + + try: + bellman_ford_predecessor_and_distance( + G, newnode, weight=weight, heuristic=heuristic + ) + except nx.NetworkXUnbounded: + return True + finally: + G.remove_node(newnode) + return False + + +@nx._dispatchable(edge_attrs="weight") +def find_negative_cycle(G, source, weight="weight"): + """Returns a cycle with negative total weight if it exists. + + Bellman-Ford is used to find shortest_paths. That algorithm + stops if there exists a negative cycle. This algorithm + picks up from there and returns the found negative cycle. + + The cycle consists of a list of nodes in the cycle order. The last + node equals the first to make it a cycle. + You can look up the edge weights in the original graph. In the case + of multigraphs the relevant edge is the minimal weight edge between + the nodes in the 2-tuple. + + If the graph has no negative cycle, a NetworkXError is raised. + + Parameters + ---------- + G : NetworkX graph + + source: node label + The search for the negative cycle will start from this node. + + weight : string or function + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number. + + Examples + -------- + >>> G = nx.DiGraph() + >>> G.add_weighted_edges_from( + ... [(0, 1, 2), (1, 2, 2), (2, 0, 1), (1, 4, 2), (4, 0, -5)] + ... ) + >>> nx.find_negative_cycle(G, 0) + [4, 0, 1, 4] + + Returns + ------- + cycle : list + A list of nodes in the order of the cycle found. The last node + equals the first to indicate a cycle. + + Raises + ------ + NetworkXError + If no negative cycle is found. + """ + weight = _weight_function(G, weight) + pred = {source: []} + + v = _inner_bellman_ford(G, [source], weight, pred=pred) + if v is None: + raise nx.NetworkXError("No negative cycles detected.") + + # negative cycle detected... find it + neg_cycle = [] + stack = [(v, list(pred[v]))] + seen = {v} + while stack: + node, preds = stack[-1] + if v in preds: + # found the cycle + neg_cycle.extend([node, v]) + neg_cycle = list(reversed(neg_cycle)) + return neg_cycle + + if preds: + nbr = preds.pop() + if nbr not in seen: + stack.append((nbr, list(pred[nbr]))) + neg_cycle.append(node) + seen.add(nbr) + else: + stack.pop() + if neg_cycle: + neg_cycle.pop() + else: + if v in G[v] and weight(G, v, v) < 0: + return [v, v] + # should not reach here + raise nx.NetworkXError("Negative cycle is detected but not found") + # should not get here... + msg = "negative cycle detected but not identified" + raise nx.NetworkXUnbounded(msg) + + +@nx._dispatchable(edge_attrs="weight") +def bidirectional_dijkstra(G, source, target, weight="weight"): + r"""Dijkstra's algorithm for shortest paths using bidirectional search. + + Parameters + ---------- + G : NetworkX graph + + source : node + Starting node. + + target : node + Ending node. + + weight : string or function + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number or None to indicate a hidden edge. + + Returns + ------- + length, path : number and list + length is the distance from source to target. + path is a list of nodes on a path from source to target. + + Raises + ------ + NodeNotFound + If `source` or `target` is not in `G`. + + NetworkXNoPath + If no path exists between source and target. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> length, path = nx.bidirectional_dijkstra(G, 0, 4) + >>> print(length) + 4 + >>> print(path) + [0, 1, 2, 3, 4] + + Notes + ----- + Edge weight attributes must be numerical. + Distances are calculated as sums of weighted edges traversed. + + The weight function can be used to hide edges by returning None. + So ``weight = lambda u, v, d: 1 if d['color']=="red" else None`` + will find the shortest red path. + + In practice bidirectional Dijkstra is much more than twice as fast as + ordinary Dijkstra. + + Ordinary Dijkstra expands nodes in a sphere-like manner from the + source. The radius of this sphere will eventually be the length + of the shortest path. Bidirectional Dijkstra will expand nodes + from both the source and the target, making two spheres of half + this radius. Volume of the first sphere is `\pi*r*r` while the + others are `2*\pi*r/2*r/2`, making up half the volume. + + This algorithm is not guaranteed to work if edge weights + are negative or are floating point numbers + (overflows and roundoff errors can cause problems). + + See Also + -------- + shortest_path + shortest_path_length + """ + if source not in G: + raise nx.NodeNotFound(f"Source {source} is not in G") + + if target not in G: + raise nx.NodeNotFound(f"Target {target} is not in G") + + if source == target: + return (0, [source]) + + weight = _weight_function(G, weight) + # Init: [Forward, Backward] + dists = [{}, {}] # dictionary of final distances + paths = [{source: [source]}, {target: [target]}] # dictionary of paths + fringe = [[], []] # heap of (distance, node) for choosing node to expand + seen = [{source: 0}, {target: 0}] # dict of distances to seen nodes + c = count() + # initialize fringe heap + heappush(fringe[0], (0, next(c), source)) + heappush(fringe[1], (0, next(c), target)) + # neighs for extracting correct neighbor information + if G.is_directed(): + neighs = [G._succ, G._pred] + else: + neighs = [G._adj, G._adj] + # variables to hold shortest discovered path + # finaldist = 1e30000 + finalpath = [] + dir = 1 + while fringe[0] and fringe[1]: + # choose direction + # dir == 0 is forward direction and dir == 1 is back + dir = 1 - dir + # extract closest to expand + (dist, _, v) = heappop(fringe[dir]) + if v in dists[dir]: + # Shortest path to v has already been found + continue + # update distance + dists[dir][v] = dist # equal to seen[dir][v] + if v in dists[1 - dir]: + # if we have scanned v in both directions we are done + # we have now discovered the shortest path + return (finaldist, finalpath) + + for w, d in neighs[dir][v].items(): + # weight(v, w, d) for forward and weight(w, v, d) for back direction + cost = weight(v, w, d) if dir == 0 else weight(w, v, d) + if cost is None: + continue + vwLength = dists[dir][v] + cost + if w in dists[dir]: + if vwLength < dists[dir][w]: + raise ValueError("Contradictory paths found: negative weights?") + elif w not in seen[dir] or vwLength < seen[dir][w]: + # relaxing + seen[dir][w] = vwLength + heappush(fringe[dir], (vwLength, next(c), w)) + paths[dir][w] = paths[dir][v] + [w] + if w in seen[0] and w in seen[1]: + # see if this path is better than the already + # discovered shortest path + totaldist = seen[0][w] + seen[1][w] + if finalpath == [] or finaldist > totaldist: + finaldist = totaldist + revpath = paths[1][w][:] + revpath.reverse() + finalpath = paths[0][w] + revpath[1:] + raise nx.NetworkXNoPath(f"No path between {source} and {target}.") + + +@nx._dispatchable(edge_attrs="weight") +def johnson(G, weight="weight"): + r"""Uses Johnson's Algorithm to compute shortest paths. + + Johnson's Algorithm finds a shortest path between each pair of + nodes in a weighted graph even if negative weights are present. + + Parameters + ---------- + G : NetworkX graph + + weight : string or function + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number. + + Returns + ------- + distance : dictionary + Dictionary, keyed by source and target, of shortest paths. + + Examples + -------- + >>> graph = nx.DiGraph() + >>> graph.add_weighted_edges_from( + ... [("0", "3", 3), ("0", "1", -5), ("0", "2", 2), ("1", "2", 4), ("2", "3", 1)] + ... ) + >>> paths = nx.johnson(graph, weight="weight") + >>> paths["0"]["2"] + ['0', '1', '2'] + + Notes + ----- + Johnson's algorithm is suitable even for graphs with negative weights. It + works by using the Bellman–Ford algorithm to compute a transformation of + the input graph that removes all negative weights, allowing Dijkstra's + algorithm to be used on the transformed graph. + + The time complexity of this algorithm is $O(n^2 \log n + n m)$, + where $n$ is the number of nodes and $m$ the number of edges in the + graph. For dense graphs, this may be faster than the Floyd–Warshall + algorithm. + + See Also + -------- + floyd_warshall_predecessor_and_distance + floyd_warshall_numpy + all_pairs_shortest_path + all_pairs_shortest_path_length + all_pairs_dijkstra_path + bellman_ford_predecessor_and_distance + all_pairs_bellman_ford_path + all_pairs_bellman_ford_path_length + + """ + dist = dict.fromkeys(G, 0) + pred = {v: [] for v in G} + weight = _weight_function(G, weight) + + # Calculate distance of shortest paths + dist_bellman = _bellman_ford(G, list(G), weight, pred=pred, dist=dist) + + # Update the weight function to take into account the Bellman--Ford + # relaxation distances. + def new_weight(u, v, d): + return weight(u, v, d) + dist_bellman[u] - dist_bellman[v] + + def dist_path(v): + paths = {v: [v]} + _dijkstra(G, v, new_weight, paths=paths) + return paths + + return {v: dist_path(v) for v in G} diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/similarity.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/similarity.py new file mode 100644 index 0000000..245bc89 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/similarity.py @@ -0,0 +1,1783 @@ +"""Functions measuring similarity using graph edit distance. + +The graph edit distance is the number of edge/node changes needed +to make two graphs isomorphic. + +The default algorithm/implementation is sub-optimal for some graphs. +The problem of finding the exact Graph Edit Distance (GED) is NP-hard +so it is often slow. If the simple interface `graph_edit_distance` +takes too long for your graph, try `optimize_graph_edit_distance` +and/or `optimize_edit_paths`. + +At the same time, I encourage capable people to investigate +alternative GED algorithms, in order to improve the choices available. +""" + +import math +import time +import warnings +from dataclasses import dataclass +from itertools import product + +import networkx as nx +from networkx.utils import np_random_state + +__all__ = [ + "graph_edit_distance", + "optimal_edit_paths", + "optimize_graph_edit_distance", + "optimize_edit_paths", + "simrank_similarity", + "panther_similarity", + "generate_random_paths", +] + + +@nx._dispatchable( + graphs={"G1": 0, "G2": 1}, preserve_edge_attrs=True, preserve_node_attrs=True +) +def graph_edit_distance( + G1, + G2, + node_match=None, + edge_match=None, + node_subst_cost=None, + node_del_cost=None, + node_ins_cost=None, + edge_subst_cost=None, + edge_del_cost=None, + edge_ins_cost=None, + roots=None, + upper_bound=None, + timeout=None, +): + """Returns GED (graph edit distance) between graphs G1 and G2. + + Graph edit distance is a graph similarity measure analogous to + Levenshtein distance for strings. It is defined as minimum cost + of edit path (sequence of node and edge edit operations) + transforming graph G1 to graph isomorphic to G2. + + Parameters + ---------- + G1, G2: graphs + The two graphs G1 and G2 must be of the same type. + + node_match : callable + A function that returns True if node n1 in G1 and n2 in G2 + should be considered equal during matching. + + The function will be called like + + node_match(G1.nodes[n1], G2.nodes[n2]). + + That is, the function will receive the node attribute + dictionaries for n1 and n2 as inputs. + + Ignored if node_subst_cost is specified. If neither + node_match nor node_subst_cost are specified then node + attributes are not considered. + + edge_match : callable + A function that returns True if the edge attribute dictionaries + for the pair of nodes (u1, v1) in G1 and (u2, v2) in G2 should + be considered equal during matching. + + The function will be called like + + edge_match(G1[u1][v1], G2[u2][v2]). + + That is, the function will receive the edge attribute + dictionaries of the edges under consideration. + + Ignored if edge_subst_cost is specified. If neither + edge_match nor edge_subst_cost are specified then edge + attributes are not considered. + + node_subst_cost, node_del_cost, node_ins_cost : callable + Functions that return the costs of node substitution, node + deletion, and node insertion, respectively. + + The functions will be called like + + node_subst_cost(G1.nodes[n1], G2.nodes[n2]), + node_del_cost(G1.nodes[n1]), + node_ins_cost(G2.nodes[n2]). + + That is, the functions will receive the node attribute + dictionaries as inputs. The functions are expected to return + positive numeric values. + + Function node_subst_cost overrides node_match if specified. + If neither node_match nor node_subst_cost are specified then + default node substitution cost of 0 is used (node attributes + are not considered during matching). + + If node_del_cost is not specified then default node deletion + cost of 1 is used. If node_ins_cost is not specified then + default node insertion cost of 1 is used. + + edge_subst_cost, edge_del_cost, edge_ins_cost : callable + Functions that return the costs of edge substitution, edge + deletion, and edge insertion, respectively. + + The functions will be called like + + edge_subst_cost(G1[u1][v1], G2[u2][v2]), + edge_del_cost(G1[u1][v1]), + edge_ins_cost(G2[u2][v2]). + + That is, the functions will receive the edge attribute + dictionaries as inputs. The functions are expected to return + positive numeric values. + + Function edge_subst_cost overrides edge_match if specified. + If neither edge_match nor edge_subst_cost are specified then + default edge substitution cost of 0 is used (edge attributes + are not considered during matching). + + If edge_del_cost is not specified then default edge deletion + cost of 1 is used. If edge_ins_cost is not specified then + default edge insertion cost of 1 is used. + + roots : 2-tuple + Tuple where first element is a node in G1 and the second + is a node in G2. + These nodes are forced to be matched in the comparison to + allow comparison between rooted graphs. + + upper_bound : numeric + Maximum edit distance to consider. Return None if no edit + distance under or equal to upper_bound exists. + + timeout : numeric + Maximum number of seconds to execute. + After timeout is met, the current best GED is returned. + + Examples + -------- + >>> G1 = nx.cycle_graph(6) + >>> G2 = nx.wheel_graph(7) + >>> nx.graph_edit_distance(G1, G2) + 7.0 + + >>> G1 = nx.star_graph(5) + >>> G2 = nx.star_graph(5) + >>> nx.graph_edit_distance(G1, G2, roots=(0, 0)) + 0.0 + >>> nx.graph_edit_distance(G1, G2, roots=(1, 0)) + 8.0 + + See Also + -------- + optimal_edit_paths, optimize_graph_edit_distance, + + is_isomorphic: test for graph edit distance of 0 + + References + ---------- + .. [1] Zeina Abu-Aisheh, Romain Raveaux, Jean-Yves Ramel, Patrick + Martineau. An Exact Graph Edit Distance Algorithm for Solving + Pattern Recognition Problems. 4th International Conference on + Pattern Recognition Applications and Methods 2015, Jan 2015, + Lisbon, Portugal. 2015, + <10.5220/0005209202710278>. + https://hal.archives-ouvertes.fr/hal-01168816 + + """ + bestcost = None + for _, _, cost in optimize_edit_paths( + G1, + G2, + node_match, + edge_match, + node_subst_cost, + node_del_cost, + node_ins_cost, + edge_subst_cost, + edge_del_cost, + edge_ins_cost, + upper_bound, + True, + roots, + timeout, + ): + # assert bestcost is None or cost < bestcost + bestcost = cost + return bestcost + + +@nx._dispatchable(graphs={"G1": 0, "G2": 1}) +def optimal_edit_paths( + G1, + G2, + node_match=None, + edge_match=None, + node_subst_cost=None, + node_del_cost=None, + node_ins_cost=None, + edge_subst_cost=None, + edge_del_cost=None, + edge_ins_cost=None, + upper_bound=None, +): + """Returns all minimum-cost edit paths transforming G1 to G2. + + Graph edit path is a sequence of node and edge edit operations + transforming graph G1 to graph isomorphic to G2. Edit operations + include substitutions, deletions, and insertions. + + Parameters + ---------- + G1, G2: graphs + The two graphs G1 and G2 must be of the same type. + + node_match : callable + A function that returns True if node n1 in G1 and n2 in G2 + should be considered equal during matching. + + The function will be called like + + node_match(G1.nodes[n1], G2.nodes[n2]). + + That is, the function will receive the node attribute + dictionaries for n1 and n2 as inputs. + + Ignored if node_subst_cost is specified. If neither + node_match nor node_subst_cost are specified then node + attributes are not considered. + + edge_match : callable + A function that returns True if the edge attribute dictionaries + for the pair of nodes (u1, v1) in G1 and (u2, v2) in G2 should + be considered equal during matching. + + The function will be called like + + edge_match(G1[u1][v1], G2[u2][v2]). + + That is, the function will receive the edge attribute + dictionaries of the edges under consideration. + + Ignored if edge_subst_cost is specified. If neither + edge_match nor edge_subst_cost are specified then edge + attributes are not considered. + + node_subst_cost, node_del_cost, node_ins_cost : callable + Functions that return the costs of node substitution, node + deletion, and node insertion, respectively. + + The functions will be called like + + node_subst_cost(G1.nodes[n1], G2.nodes[n2]), + node_del_cost(G1.nodes[n1]), + node_ins_cost(G2.nodes[n2]). + + That is, the functions will receive the node attribute + dictionaries as inputs. The functions are expected to return + positive numeric values. + + Function node_subst_cost overrides node_match if specified. + If neither node_match nor node_subst_cost are specified then + default node substitution cost of 0 is used (node attributes + are not considered during matching). + + If node_del_cost is not specified then default node deletion + cost of 1 is used. If node_ins_cost is not specified then + default node insertion cost of 1 is used. + + edge_subst_cost, edge_del_cost, edge_ins_cost : callable + Functions that return the costs of edge substitution, edge + deletion, and edge insertion, respectively. + + The functions will be called like + + edge_subst_cost(G1[u1][v1], G2[u2][v2]), + edge_del_cost(G1[u1][v1]), + edge_ins_cost(G2[u2][v2]). + + That is, the functions will receive the edge attribute + dictionaries as inputs. The functions are expected to return + positive numeric values. + + Function edge_subst_cost overrides edge_match if specified. + If neither edge_match nor edge_subst_cost are specified then + default edge substitution cost of 0 is used (edge attributes + are not considered during matching). + + If edge_del_cost is not specified then default edge deletion + cost of 1 is used. If edge_ins_cost is not specified then + default edge insertion cost of 1 is used. + + upper_bound : numeric + Maximum edit distance to consider. + + Returns + ------- + edit_paths : list of tuples (node_edit_path, edge_edit_path) + - node_edit_path : list of tuples ``(u, v)`` indicating node transformations + between `G1` and `G2`. ``u`` is `None` for insertion, ``v`` is `None` + for deletion. + - edge_edit_path : list of tuples ``((u1, v1), (u2, v2))`` indicating edge + transformations between `G1` and `G2`. ``(None, (u2,v2))`` for insertion + and ``((u1,v1), None)`` for deletion. + + cost : numeric + Optimal edit path cost (graph edit distance). When the cost + is zero, it indicates that `G1` and `G2` are isomorphic. + + Examples + -------- + >>> G1 = nx.cycle_graph(4) + >>> G2 = nx.wheel_graph(5) + >>> paths, cost = nx.optimal_edit_paths(G1, G2) + >>> len(paths) + 40 + >>> cost + 5.0 + + Notes + ----- + To transform `G1` into a graph isomorphic to `G2`, apply the node + and edge edits in the returned ``edit_paths``. + In the case of isomorphic graphs, the cost is zero, and the paths + represent different isomorphic mappings (isomorphisms). That is, the + edits involve renaming nodes and edges to match the structure of `G2`. + + See Also + -------- + graph_edit_distance, optimize_edit_paths + + References + ---------- + .. [1] Zeina Abu-Aisheh, Romain Raveaux, Jean-Yves Ramel, Patrick + Martineau. An Exact Graph Edit Distance Algorithm for Solving + Pattern Recognition Problems. 4th International Conference on + Pattern Recognition Applications and Methods 2015, Jan 2015, + Lisbon, Portugal. 2015, + <10.5220/0005209202710278>. + https://hal.archives-ouvertes.fr/hal-01168816 + + """ + paths = [] + bestcost = None + for vertex_path, edge_path, cost in optimize_edit_paths( + G1, + G2, + node_match, + edge_match, + node_subst_cost, + node_del_cost, + node_ins_cost, + edge_subst_cost, + edge_del_cost, + edge_ins_cost, + upper_bound, + False, + ): + # assert bestcost is None or cost <= bestcost + if bestcost is not None and cost < bestcost: + paths = [] + paths.append((vertex_path, edge_path)) + bestcost = cost + return paths, bestcost + + +@nx._dispatchable(graphs={"G1": 0, "G2": 1}) +def optimize_graph_edit_distance( + G1, + G2, + node_match=None, + edge_match=None, + node_subst_cost=None, + node_del_cost=None, + node_ins_cost=None, + edge_subst_cost=None, + edge_del_cost=None, + edge_ins_cost=None, + upper_bound=None, +): + """Returns consecutive approximations of GED (graph edit distance) + between graphs G1 and G2. + + Graph edit distance is a graph similarity measure analogous to + Levenshtein distance for strings. It is defined as minimum cost + of edit path (sequence of node and edge edit operations) + transforming graph G1 to graph isomorphic to G2. + + Parameters + ---------- + G1, G2: graphs + The two graphs G1 and G2 must be of the same type. + + node_match : callable + A function that returns True if node n1 in G1 and n2 in G2 + should be considered equal during matching. + + The function will be called like + + node_match(G1.nodes[n1], G2.nodes[n2]). + + That is, the function will receive the node attribute + dictionaries for n1 and n2 as inputs. + + Ignored if node_subst_cost is specified. If neither + node_match nor node_subst_cost are specified then node + attributes are not considered. + + edge_match : callable + A function that returns True if the edge attribute dictionaries + for the pair of nodes (u1, v1) in G1 and (u2, v2) in G2 should + be considered equal during matching. + + The function will be called like + + edge_match(G1[u1][v1], G2[u2][v2]). + + That is, the function will receive the edge attribute + dictionaries of the edges under consideration. + + Ignored if edge_subst_cost is specified. If neither + edge_match nor edge_subst_cost are specified then edge + attributes are not considered. + + node_subst_cost, node_del_cost, node_ins_cost : callable + Functions that return the costs of node substitution, node + deletion, and node insertion, respectively. + + The functions will be called like + + node_subst_cost(G1.nodes[n1], G2.nodes[n2]), + node_del_cost(G1.nodes[n1]), + node_ins_cost(G2.nodes[n2]). + + That is, the functions will receive the node attribute + dictionaries as inputs. The functions are expected to return + positive numeric values. + + Function node_subst_cost overrides node_match if specified. + If neither node_match nor node_subst_cost are specified then + default node substitution cost of 0 is used (node attributes + are not considered during matching). + + If node_del_cost is not specified then default node deletion + cost of 1 is used. If node_ins_cost is not specified then + default node insertion cost of 1 is used. + + edge_subst_cost, edge_del_cost, edge_ins_cost : callable + Functions that return the costs of edge substitution, edge + deletion, and edge insertion, respectively. + + The functions will be called like + + edge_subst_cost(G1[u1][v1], G2[u2][v2]), + edge_del_cost(G1[u1][v1]), + edge_ins_cost(G2[u2][v2]). + + That is, the functions will receive the edge attribute + dictionaries as inputs. The functions are expected to return + positive numeric values. + + Function edge_subst_cost overrides edge_match if specified. + If neither edge_match nor edge_subst_cost are specified then + default edge substitution cost of 0 is used (edge attributes + are not considered during matching). + + If edge_del_cost is not specified then default edge deletion + cost of 1 is used. If edge_ins_cost is not specified then + default edge insertion cost of 1 is used. + + upper_bound : numeric + Maximum edit distance to consider. + + Returns + ------- + Generator of consecutive approximations of graph edit distance. + + Examples + -------- + >>> G1 = nx.cycle_graph(6) + >>> G2 = nx.wheel_graph(7) + >>> for v in nx.optimize_graph_edit_distance(G1, G2): + ... minv = v + >>> minv + 7.0 + + See Also + -------- + graph_edit_distance, optimize_edit_paths + + References + ---------- + .. [1] Zeina Abu-Aisheh, Romain Raveaux, Jean-Yves Ramel, Patrick + Martineau. An Exact Graph Edit Distance Algorithm for Solving + Pattern Recognition Problems. 4th International Conference on + Pattern Recognition Applications and Methods 2015, Jan 2015, + Lisbon, Portugal. 2015, + <10.5220/0005209202710278>. + https://hal.archives-ouvertes.fr/hal-01168816 + """ + for _, _, cost in optimize_edit_paths( + G1, + G2, + node_match, + edge_match, + node_subst_cost, + node_del_cost, + node_ins_cost, + edge_subst_cost, + edge_del_cost, + edge_ins_cost, + upper_bound, + True, + ): + yield cost + + +@nx._dispatchable( + graphs={"G1": 0, "G2": 1}, preserve_edge_attrs=True, preserve_node_attrs=True +) +def optimize_edit_paths( + G1, + G2, + node_match=None, + edge_match=None, + node_subst_cost=None, + node_del_cost=None, + node_ins_cost=None, + edge_subst_cost=None, + edge_del_cost=None, + edge_ins_cost=None, + upper_bound=None, + strictly_decreasing=True, + roots=None, + timeout=None, +): + """GED (graph edit distance) calculation: advanced interface. + + Graph edit path is a sequence of node and edge edit operations + transforming graph G1 to graph isomorphic to G2. Edit operations + include substitutions, deletions, and insertions. + + Graph edit distance is defined as minimum cost of edit path. + + Parameters + ---------- + G1, G2: graphs + The two graphs G1 and G2 must be of the same type. + + node_match : callable + A function that returns True if node n1 in G1 and n2 in G2 + should be considered equal during matching. + + The function will be called like + + node_match(G1.nodes[n1], G2.nodes[n2]). + + That is, the function will receive the node attribute + dictionaries for n1 and n2 as inputs. + + Ignored if node_subst_cost is specified. If neither + node_match nor node_subst_cost are specified then node + attributes are not considered. + + edge_match : callable + A function that returns True if the edge attribute dictionaries + for the pair of nodes (u1, v1) in G1 and (u2, v2) in G2 should + be considered equal during matching. + + The function will be called like + + edge_match(G1[u1][v1], G2[u2][v2]). + + That is, the function will receive the edge attribute + dictionaries of the edges under consideration. + + Ignored if edge_subst_cost is specified. If neither + edge_match nor edge_subst_cost are specified then edge + attributes are not considered. + + node_subst_cost, node_del_cost, node_ins_cost : callable + Functions that return the costs of node substitution, node + deletion, and node insertion, respectively. + + The functions will be called like + + node_subst_cost(G1.nodes[n1], G2.nodes[n2]), + node_del_cost(G1.nodes[n1]), + node_ins_cost(G2.nodes[n2]). + + That is, the functions will receive the node attribute + dictionaries as inputs. The functions are expected to return + positive numeric values. + + Function node_subst_cost overrides node_match if specified. + If neither node_match nor node_subst_cost are specified then + default node substitution cost of 0 is used (node attributes + are not considered during matching). + + If node_del_cost is not specified then default node deletion + cost of 1 is used. If node_ins_cost is not specified then + default node insertion cost of 1 is used. + + edge_subst_cost, edge_del_cost, edge_ins_cost : callable + Functions that return the costs of edge substitution, edge + deletion, and edge insertion, respectively. + + The functions will be called like + + edge_subst_cost(G1[u1][v1], G2[u2][v2]), + edge_del_cost(G1[u1][v1]), + edge_ins_cost(G2[u2][v2]). + + That is, the functions will receive the edge attribute + dictionaries as inputs. The functions are expected to return + positive numeric values. + + Function edge_subst_cost overrides edge_match if specified. + If neither edge_match nor edge_subst_cost are specified then + default edge substitution cost of 0 is used (edge attributes + are not considered during matching). + + If edge_del_cost is not specified then default edge deletion + cost of 1 is used. If edge_ins_cost is not specified then + default edge insertion cost of 1 is used. + + upper_bound : numeric + Maximum edit distance to consider. + + strictly_decreasing : bool + If True, return consecutive approximations of strictly + decreasing cost. Otherwise, return all edit paths of cost + less than or equal to the previous minimum cost. + + roots : 2-tuple + Tuple where first element is a node in G1 and the second + is a node in G2. + These nodes are forced to be matched in the comparison to + allow comparison between rooted graphs. + + timeout : numeric + Maximum number of seconds to execute. + After timeout is met, the current best GED is returned. + + Returns + ------- + Generator of tuples (node_edit_path, edge_edit_path, cost) + node_edit_path : list of tuples (u, v) + edge_edit_path : list of tuples ((u1, v1), (u2, v2)) + cost : numeric + + See Also + -------- + graph_edit_distance, optimize_graph_edit_distance, optimal_edit_paths + + References + ---------- + .. [1] Zeina Abu-Aisheh, Romain Raveaux, Jean-Yves Ramel, Patrick + Martineau. An Exact Graph Edit Distance Algorithm for Solving + Pattern Recognition Problems. 4th International Conference on + Pattern Recognition Applications and Methods 2015, Jan 2015, + Lisbon, Portugal. 2015, + <10.5220/0005209202710278>. + https://hal.archives-ouvertes.fr/hal-01168816 + + """ + # TODO: support DiGraph + + import numpy as np + import scipy as sp + + @dataclass + class CostMatrix: + C: ... + lsa_row_ind: ... + lsa_col_ind: ... + ls: ... + + def make_CostMatrix(C, m, n): + # assert(C.shape == (m + n, m + n)) + lsa_row_ind, lsa_col_ind = sp.optimize.linear_sum_assignment(C) + + # Fixup dummy assignments: + # each substitution i<->j should have dummy assignment m+j<->n+i + # NOTE: fast reduce of Cv relies on it + # Create masks for substitution and dummy indices + is_subst = (lsa_row_ind < m) & (lsa_col_ind < n) + is_dummy = (lsa_row_ind >= m) & (lsa_col_ind >= n) + + # Map dummy assignments to the correct indices + lsa_row_ind[is_dummy] = lsa_col_ind[is_subst] + m + lsa_col_ind[is_dummy] = lsa_row_ind[is_subst] + n + + return CostMatrix( + C, lsa_row_ind, lsa_col_ind, C[lsa_row_ind, lsa_col_ind].sum() + ) + + def extract_C(C, i, j, m, n): + # assert(C.shape == (m + n, m + n)) + row_ind = [k in i or k - m in j for k in range(m + n)] + col_ind = [k in j or k - n in i for k in range(m + n)] + return C[row_ind, :][:, col_ind] + + def reduce_C(C, i, j, m, n): + # assert(C.shape == (m + n, m + n)) + row_ind = [k not in i and k - m not in j for k in range(m + n)] + col_ind = [k not in j and k - n not in i for k in range(m + n)] + return C[row_ind, :][:, col_ind] + + def reduce_ind(ind, i): + # assert set(ind) == set(range(len(ind))) + rind = ind[[k not in i for k in ind]] + for k in set(i): + rind[rind >= k] -= 1 + return rind + + def match_edges(u, v, pending_g, pending_h, Ce, matched_uv=None): + """ + Parameters: + u, v: matched vertices, u=None or v=None for + deletion/insertion + pending_g, pending_h: lists of edges not yet mapped + Ce: CostMatrix of pending edge mappings + matched_uv: partial vertex edit path + list of tuples (u, v) of previously matched vertex + mappings u<->v, u=None or v=None for + deletion/insertion + + Returns: + list of (i, j): indices of edge mappings g<->h + localCe: local CostMatrix of edge mappings + (basically submatrix of Ce at cross of rows i, cols j) + """ + M = len(pending_g) + N = len(pending_h) + # assert Ce.C.shape == (M + N, M + N) + + # only attempt to match edges after one node match has been made + # this will stop self-edges on the first node being automatically deleted + # even when a substitution is the better option + if matched_uv is None or len(matched_uv) == 0: + g_ind = [] + h_ind = [] + else: + g_ind = [ + i + for i in range(M) + if pending_g[i][:2] == (u, u) + or any( + pending_g[i][:2] in ((p, u), (u, p), (p, p)) for p, q in matched_uv + ) + ] + h_ind = [ + j + for j in range(N) + if pending_h[j][:2] == (v, v) + or any( + pending_h[j][:2] in ((q, v), (v, q), (q, q)) for p, q in matched_uv + ) + ] + + m = len(g_ind) + n = len(h_ind) + + if m or n: + C = extract_C(Ce.C, g_ind, h_ind, M, N) + # assert C.shape == (m + n, m + n) + + # Forbid structurally invalid matches + # NOTE: inf remembered from Ce construction + for k, i in enumerate(g_ind): + g = pending_g[i][:2] + for l, j in enumerate(h_ind): + h = pending_h[j][:2] + if nx.is_directed(G1) or nx.is_directed(G2): + if any( + g == (p, u) and h == (q, v) or g == (u, p) and h == (v, q) + for p, q in matched_uv + ): + continue + else: + if any( + g in ((p, u), (u, p)) and h in ((q, v), (v, q)) + for p, q in matched_uv + ): + continue + if g == (u, u) or any(g == (p, p) for p, q in matched_uv): + continue + if h == (v, v) or any(h == (q, q) for p, q in matched_uv): + continue + C[k, l] = inf + + localCe = make_CostMatrix(C, m, n) + ij = [ + ( + g_ind[k] if k < m else M + h_ind[l], + h_ind[l] if l < n else N + g_ind[k], + ) + for k, l in zip(localCe.lsa_row_ind, localCe.lsa_col_ind) + if k < m or l < n + ] + + else: + ij = [] + localCe = CostMatrix(np.empty((0, 0)), [], [], 0) + + return ij, localCe + + def reduce_Ce(Ce, ij, m, n): + if len(ij): + i, j = zip(*ij) + m_i = m - sum(1 for t in i if t < m) + n_j = n - sum(1 for t in j if t < n) + return make_CostMatrix(reduce_C(Ce.C, i, j, m, n), m_i, n_j) + return Ce + + def get_edit_ops( + matched_uv, pending_u, pending_v, Cv, pending_g, pending_h, Ce, matched_cost + ): + """ + Parameters: + matched_uv: partial vertex edit path + list of tuples (u, v) of vertex mappings u<->v, + u=None or v=None for deletion/insertion + pending_u, pending_v: lists of vertices not yet mapped + Cv: CostMatrix of pending vertex mappings + pending_g, pending_h: lists of edges not yet mapped + Ce: CostMatrix of pending edge mappings + matched_cost: cost of partial edit path + + Returns: + sequence of + (i, j): indices of vertex mapping u<->v + Cv_ij: reduced CostMatrix of pending vertex mappings + (basically Cv with row i, col j removed) + list of (x, y): indices of edge mappings g<->h + Ce_xy: reduced CostMatrix of pending edge mappings + (basically Ce with rows x, cols y removed) + cost: total cost of edit operation + NOTE: most promising ops first + """ + m = len(pending_u) + n = len(pending_v) + # assert Cv.C.shape == (m + n, m + n) + + # 1) a vertex mapping from optimal linear sum assignment + i, j = min( + (k, l) for k, l in zip(Cv.lsa_row_ind, Cv.lsa_col_ind) if k < m or l < n + ) + xy, localCe = match_edges( + pending_u[i] if i < m else None, + pending_v[j] if j < n else None, + pending_g, + pending_h, + Ce, + matched_uv, + ) + Ce_xy = reduce_Ce(Ce, xy, len(pending_g), len(pending_h)) + # assert Ce.ls <= localCe.ls + Ce_xy.ls + if prune(matched_cost + Cv.ls + localCe.ls + Ce_xy.ls): + pass + else: + # get reduced Cv efficiently + Cv_ij = CostMatrix( + reduce_C(Cv.C, (i,), (j,), m, n), + reduce_ind(Cv.lsa_row_ind, (i, m + j)), + reduce_ind(Cv.lsa_col_ind, (j, n + i)), + Cv.ls - Cv.C[i, j], + ) + yield (i, j), Cv_ij, xy, Ce_xy, Cv.C[i, j] + localCe.ls + + # 2) other candidates, sorted by lower-bound cost estimate + other = [] + fixed_i, fixed_j = i, j + if m <= n: + candidates = ( + (t, fixed_j) + for t in range(m + n) + if t != fixed_i and (t < m or t == m + fixed_j) + ) + else: + candidates = ( + (fixed_i, t) + for t in range(m + n) + if t != fixed_j and (t < n or t == n + fixed_i) + ) + for i, j in candidates: + if prune(matched_cost + Cv.C[i, j] + Ce.ls): + continue + Cv_ij = make_CostMatrix( + reduce_C(Cv.C, (i,), (j,), m, n), + m - 1 if i < m else m, + n - 1 if j < n else n, + ) + # assert Cv.ls <= Cv.C[i, j] + Cv_ij.ls + if prune(matched_cost + Cv.C[i, j] + Cv_ij.ls + Ce.ls): + continue + xy, localCe = match_edges( + pending_u[i] if i < m else None, + pending_v[j] if j < n else None, + pending_g, + pending_h, + Ce, + matched_uv, + ) + if prune(matched_cost + Cv.C[i, j] + Cv_ij.ls + localCe.ls): + continue + Ce_xy = reduce_Ce(Ce, xy, len(pending_g), len(pending_h)) + # assert Ce.ls <= localCe.ls + Ce_xy.ls + if prune(matched_cost + Cv.C[i, j] + Cv_ij.ls + localCe.ls + Ce_xy.ls): + continue + other.append(((i, j), Cv_ij, xy, Ce_xy, Cv.C[i, j] + localCe.ls)) + + yield from sorted(other, key=lambda t: t[4] + t[1].ls + t[3].ls) + + def get_edit_paths( + matched_uv, + pending_u, + pending_v, + Cv, + matched_gh, + pending_g, + pending_h, + Ce, + matched_cost, + ): + """ + Parameters: + matched_uv: partial vertex edit path + list of tuples (u, v) of vertex mappings u<->v, + u=None or v=None for deletion/insertion + pending_u, pending_v: lists of vertices not yet mapped + Cv: CostMatrix of pending vertex mappings + matched_gh: partial edge edit path + list of tuples (g, h) of edge mappings g<->h, + g=None or h=None for deletion/insertion + pending_g, pending_h: lists of edges not yet mapped + Ce: CostMatrix of pending edge mappings + matched_cost: cost of partial edit path + + Returns: + sequence of (vertex_path, edge_path, cost) + vertex_path: complete vertex edit path + list of tuples (u, v) of vertex mappings u<->v, + u=None or v=None for deletion/insertion + edge_path: complete edge edit path + list of tuples (g, h) of edge mappings g<->h, + g=None or h=None for deletion/insertion + cost: total cost of edit path + NOTE: path costs are non-increasing + """ + if prune(matched_cost + Cv.ls + Ce.ls): + return + + if not max(len(pending_u), len(pending_v)): + # assert not len(pending_g) + # assert not len(pending_h) + # path completed! + # assert matched_cost <= maxcost_value + nonlocal maxcost_value + maxcost_value = min(maxcost_value, matched_cost) + yield matched_uv, matched_gh, matched_cost + + else: + edit_ops = get_edit_ops( + matched_uv, + pending_u, + pending_v, + Cv, + pending_g, + pending_h, + Ce, + matched_cost, + ) + for ij, Cv_ij, xy, Ce_xy, edit_cost in edit_ops: + i, j = ij + # assert Cv.C[i, j] + sum(Ce.C[t] for t in xy) == edit_cost + if prune(matched_cost + edit_cost + Cv_ij.ls + Ce_xy.ls): + continue + + # dive deeper + u = pending_u.pop(i) if i < len(pending_u) else None + v = pending_v.pop(j) if j < len(pending_v) else None + matched_uv.append((u, v)) + for x, y in xy: + len_g = len(pending_g) + len_h = len(pending_h) + matched_gh.append( + ( + pending_g[x] if x < len_g else None, + pending_h[y] if y < len_h else None, + ) + ) + sortedx = sorted(x for x, y in xy) + sortedy = sorted(y for x, y in xy) + G = [ + (pending_g.pop(x) if x < len(pending_g) else None) + for x in reversed(sortedx) + ] + H = [ + (pending_h.pop(y) if y < len(pending_h) else None) + for y in reversed(sortedy) + ] + + yield from get_edit_paths( + matched_uv, + pending_u, + pending_v, + Cv_ij, + matched_gh, + pending_g, + pending_h, + Ce_xy, + matched_cost + edit_cost, + ) + + # backtrack + if u is not None: + pending_u.insert(i, u) + if v is not None: + pending_v.insert(j, v) + matched_uv.pop() + for x, g in zip(sortedx, reversed(G)): + if g is not None: + pending_g.insert(x, g) + for y, h in zip(sortedy, reversed(H)): + if h is not None: + pending_h.insert(y, h) + for _ in xy: + matched_gh.pop() + + # Initialization + + pending_u = list(G1.nodes) + pending_v = list(G2.nodes) + + initial_cost = 0 + if roots: + root_u, root_v = roots + if root_u not in pending_u or root_v not in pending_v: + raise nx.NodeNotFound("Root node not in graph.") + + # remove roots from pending + pending_u.remove(root_u) + pending_v.remove(root_v) + + # cost matrix of vertex mappings + m = len(pending_u) + n = len(pending_v) + C = np.zeros((m + n, m + n)) + if node_subst_cost: + C[0:m, 0:n] = np.array( + [ + node_subst_cost(G1.nodes[u], G2.nodes[v]) + for u in pending_u + for v in pending_v + ] + ).reshape(m, n) + if roots: + initial_cost = node_subst_cost(G1.nodes[root_u], G2.nodes[root_v]) + elif node_match: + C[0:m, 0:n] = np.array( + [ + 1 - int(node_match(G1.nodes[u], G2.nodes[v])) + for u in pending_u + for v in pending_v + ] + ).reshape(m, n) + if roots: + initial_cost = 1 - node_match(G1.nodes[root_u], G2.nodes[root_v]) + else: + # all zeroes + pass + # assert not min(m, n) or C[0:m, 0:n].min() >= 0 + if node_del_cost: + del_costs = [node_del_cost(G1.nodes[u]) for u in pending_u] + else: + del_costs = [1] * len(pending_u) + # assert not m or min(del_costs) >= 0 + if node_ins_cost: + ins_costs = [node_ins_cost(G2.nodes[v]) for v in pending_v] + else: + ins_costs = [1] * len(pending_v) + # assert not n or min(ins_costs) >= 0 + inf = C[0:m, 0:n].sum() + sum(del_costs) + sum(ins_costs) + 1 + C[0:m, n : n + m] = np.array( + [del_costs[i] if i == j else inf for i in range(m) for j in range(m)] + ).reshape(m, m) + C[m : m + n, 0:n] = np.array( + [ins_costs[i] if i == j else inf for i in range(n) for j in range(n)] + ).reshape(n, n) + Cv = make_CostMatrix(C, m, n) + + pending_g = list(G1.edges) + pending_h = list(G2.edges) + + # cost matrix of edge mappings + m = len(pending_g) + n = len(pending_h) + C = np.zeros((m + n, m + n)) + if edge_subst_cost: + C[0:m, 0:n] = np.array( + [ + edge_subst_cost(G1.edges[g], G2.edges[h]) + for g in pending_g + for h in pending_h + ] + ).reshape(m, n) + elif edge_match: + C[0:m, 0:n] = np.array( + [ + 1 - int(edge_match(G1.edges[g], G2.edges[h])) + for g in pending_g + for h in pending_h + ] + ).reshape(m, n) + else: + # all zeroes + pass + # assert not min(m, n) or C[0:m, 0:n].min() >= 0 + if edge_del_cost: + del_costs = [edge_del_cost(G1.edges[g]) for g in pending_g] + else: + del_costs = [1] * len(pending_g) + # assert not m or min(del_costs) >= 0 + if edge_ins_cost: + ins_costs = [edge_ins_cost(G2.edges[h]) for h in pending_h] + else: + ins_costs = [1] * len(pending_h) + # assert not n or min(ins_costs) >= 0 + inf = C[0:m, 0:n].sum() + sum(del_costs) + sum(ins_costs) + 1 + C[0:m, n : n + m] = np.array( + [del_costs[i] if i == j else inf for i in range(m) for j in range(m)] + ).reshape(m, m) + C[m : m + n, 0:n] = np.array( + [ins_costs[i] if i == j else inf for i in range(n) for j in range(n)] + ).reshape(n, n) + Ce = make_CostMatrix(C, m, n) + + maxcost_value = Cv.C.sum() + Ce.C.sum() + 1 + + if timeout is not None: + if timeout <= 0: + raise nx.NetworkXError("Timeout value must be greater than 0") + start = time.perf_counter() + + def prune(cost): + if timeout is not None: + if time.perf_counter() - start > timeout: + return True + if upper_bound is not None: + if cost > upper_bound: + return True + if cost > maxcost_value: + return True + if strictly_decreasing and cost >= maxcost_value: + return True + return False + + # Now go! + + done_uv = [] if roots is None else [roots] + + for vertex_path, edge_path, cost in get_edit_paths( + done_uv, pending_u, pending_v, Cv, [], pending_g, pending_h, Ce, initial_cost + ): + # assert sorted(G1.nodes) == sorted(u for u, v in vertex_path if u is not None) + # assert sorted(G2.nodes) == sorted(v for u, v in vertex_path if v is not None) + # assert sorted(G1.edges) == sorted(g for g, h in edge_path if g is not None) + # assert sorted(G2.edges) == sorted(h for g, h in edge_path if h is not None) + # print(vertex_path, edge_path, cost, file = sys.stderr) + # assert cost == maxcost_value + yield list(vertex_path), list(edge_path), float(cost) + + +@nx._dispatchable +def simrank_similarity( + G, + source=None, + target=None, + importance_factor=0.9, + max_iterations=1000, + tolerance=1e-4, +): + """Returns the SimRank similarity of nodes in the graph ``G``. + + SimRank is a similarity metric that says "two objects are considered + to be similar if they are referenced by similar objects." [1]_. + + The pseudo-code definition from the paper is:: + + def simrank(G, u, v): + in_neighbors_u = G.predecessors(u) + in_neighbors_v = G.predecessors(v) + scale = C / (len(in_neighbors_u) * len(in_neighbors_v)) + return scale * sum( + simrank(G, w, x) for w, x in product(in_neighbors_u, in_neighbors_v) + ) + + where ``G`` is the graph, ``u`` is the source, ``v`` is the target, + and ``C`` is a float decay or importance factor between 0 and 1. + + The SimRank algorithm for determining node similarity is defined in + [2]_. + + Parameters + ---------- + G : NetworkX graph + A NetworkX graph + + source : node + If this is specified, the returned dictionary maps each node + ``v`` in the graph to the similarity between ``source`` and + ``v``. + + target : node + If both ``source`` and ``target`` are specified, the similarity + value between ``source`` and ``target`` is returned. If + ``target`` is specified but ``source`` is not, this argument is + ignored. + + importance_factor : float + The relative importance of indirect neighbors with respect to + direct neighbors. + + max_iterations : integer + Maximum number of iterations. + + tolerance : float + Error tolerance used to check convergence. When an iteration of + the algorithm finds that no similarity value changes more than + this amount, the algorithm halts. + + Returns + ------- + similarity : dictionary or float + If ``source`` and ``target`` are both ``None``, this returns a + dictionary of dictionaries, where keys are node pairs and value + are similarity of the pair of nodes. + + If ``source`` is not ``None`` but ``target`` is, this returns a + dictionary mapping node to the similarity of ``source`` and that + node. + + If neither ``source`` nor ``target`` is ``None``, this returns + the similarity value for the given pair of nodes. + + Raises + ------ + ExceededMaxIterations + If the algorithm does not converge within ``max_iterations``. + + NodeNotFound + If either ``source`` or ``target`` is not in `G`. + + Examples + -------- + >>> G = nx.cycle_graph(2) + >>> nx.simrank_similarity(G) + {0: {0: 1.0, 1: 0.0}, 1: {0: 0.0, 1: 1.0}} + >>> nx.simrank_similarity(G, source=0) + {0: 1.0, 1: 0.0} + >>> nx.simrank_similarity(G, source=0, target=0) + 1.0 + + The result of this function can be converted to a numpy array + representing the SimRank matrix by using the node order of the + graph to determine which row and column represent each node. + Other ordering of nodes is also possible. + + >>> import numpy as np + >>> sim = nx.simrank_similarity(G) + >>> np.array([[sim[u][v] for v in G] for u in G]) + array([[1., 0.], + [0., 1.]]) + >>> sim_1d = nx.simrank_similarity(G, source=0) + >>> np.array([sim[0][v] for v in G]) + array([1., 0.]) + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/SimRank + .. [2] G. Jeh and J. Widom. + "SimRank: a measure of structural-context similarity", + In KDD'02: Proceedings of the Eighth ACM SIGKDD + International Conference on Knowledge Discovery and Data Mining, + pp. 538--543. ACM Press, 2002. + """ + import numpy as np + + nodelist = list(G) + if source is not None: + if source not in nodelist: + raise nx.NodeNotFound(f"Source node {source} not in G") + else: + s_indx = nodelist.index(source) + else: + s_indx = None + + if target is not None: + if target not in nodelist: + raise nx.NodeNotFound(f"Target node {target} not in G") + else: + t_indx = nodelist.index(target) + else: + t_indx = None + + x = _simrank_similarity_numpy( + G, s_indx, t_indx, importance_factor, max_iterations, tolerance + ) + + if isinstance(x, np.ndarray): + if x.ndim == 1: + return dict(zip(G, x.tolist())) + # else x.ndim == 2 + return {u: dict(zip(G, row)) for u, row in zip(G, x.tolist())} + return float(x) + + +def _simrank_similarity_python( + G, + source=None, + target=None, + importance_factor=0.9, + max_iterations=1000, + tolerance=1e-4, +): + """Returns the SimRank similarity of nodes in the graph ``G``. + + This pure Python version is provided for pedagogical purposes. + + Examples + -------- + >>> G = nx.cycle_graph(2) + >>> nx.similarity._simrank_similarity_python(G) + {0: {0: 1, 1: 0.0}, 1: {0: 0.0, 1: 1}} + >>> nx.similarity._simrank_similarity_python(G, source=0) + {0: 1, 1: 0.0} + >>> nx.similarity._simrank_similarity_python(G, source=0, target=0) + 1 + """ + # build up our similarity adjacency dictionary output + newsim = {u: {v: 1 if u == v else 0 for v in G} for u in G} + + # These functions compute the update to the similarity value of the nodes + # `u` and `v` with respect to the previous similarity values. + def avg_sim(s): + return sum(newsim[w][x] for (w, x) in s) / len(s) if s else 0.0 + + Gadj = G.pred if G.is_directed() else G.adj + + def sim(u, v): + return importance_factor * avg_sim(list(product(Gadj[u], Gadj[v]))) + + for its in range(max_iterations): + oldsim = newsim + newsim = {u: {v: sim(u, v) if u != v else 1 for v in G} for u in G} + is_close = all( + all( + abs(newsim[u][v] - old) <= tolerance * (1 + abs(old)) + for v, old in nbrs.items() + ) + for u, nbrs in oldsim.items() + ) + if is_close: + break + + if its + 1 == max_iterations: + raise nx.ExceededMaxIterations( + f"simrank did not converge after {max_iterations} iterations." + ) + + if source is not None and target is not None: + return newsim[source][target] + if source is not None: + return newsim[source] + return newsim + + +def _simrank_similarity_numpy( + G, + source=None, + target=None, + importance_factor=0.9, + max_iterations=1000, + tolerance=1e-4, +): + """Calculate SimRank of nodes in ``G`` using matrices with ``numpy``. + + The SimRank algorithm for determining node similarity is defined in + [1]_. + + Parameters + ---------- + G : NetworkX graph + A NetworkX graph + + source : node + If this is specified, the returned dictionary maps each node + ``v`` in the graph to the similarity between ``source`` and + ``v``. + + target : node + If both ``source`` and ``target`` are specified, the similarity + value between ``source`` and ``target`` is returned. If + ``target`` is specified but ``source`` is not, this argument is + ignored. + + importance_factor : float + The relative importance of indirect neighbors with respect to + direct neighbors. + + max_iterations : integer + Maximum number of iterations. + + tolerance : float + Error tolerance used to check convergence. When an iteration of + the algorithm finds that no similarity value changes more than + this amount, the algorithm halts. + + Returns + ------- + similarity : numpy array or float + If ``source`` and ``target`` are both ``None``, this returns a + 2D array containing SimRank scores of the nodes. + + If ``source`` is not ``None`` but ``target`` is, this returns an + 1D array containing SimRank scores of ``source`` and that + node. + + If neither ``source`` nor ``target`` is ``None``, this returns + the similarity value for the given pair of nodes. + + Examples + -------- + >>> G = nx.cycle_graph(2) + >>> nx.similarity._simrank_similarity_numpy(G) + array([[1., 0.], + [0., 1.]]) + >>> nx.similarity._simrank_similarity_numpy(G, source=0) + array([1., 0.]) + >>> nx.similarity._simrank_similarity_numpy(G, source=0, target=0) + 1.0 + + References + ---------- + .. [1] G. Jeh and J. Widom. + "SimRank: a measure of structural-context similarity", + In KDD'02: Proceedings of the Eighth ACM SIGKDD + International Conference on Knowledge Discovery and Data Mining, + pp. 538--543. ACM Press, 2002. + """ + # This algorithm follows roughly + # + # S = max{C * (A.T * S * A), I} + # + # where C is the importance factor, A is the column normalized + # adjacency matrix, and I is the identity matrix. + import numpy as np + + adjacency_matrix = nx.to_numpy_array(G) + + # column-normalize the ``adjacency_matrix`` + s = np.array(adjacency_matrix.sum(axis=0)) + s[s == 0] = 1 + adjacency_matrix /= s # adjacency_matrix.sum(axis=0) + + newsim = np.eye(len(G), dtype=np.float64) + for its in range(max_iterations): + prevsim = newsim.copy() + newsim = importance_factor * ((adjacency_matrix.T @ prevsim) @ adjacency_matrix) + np.fill_diagonal(newsim, 1.0) + + if np.allclose(prevsim, newsim, atol=tolerance): + break + + if its + 1 == max_iterations: + raise nx.ExceededMaxIterations( + f"simrank did not converge after {max_iterations} iterations." + ) + + if source is not None and target is not None: + return float(newsim[source, target]) + if source is not None: + return newsim[source] + return newsim + + +@nx._dispatchable(edge_attrs="weight") +def panther_similarity( + G, source, k=5, path_length=5, c=0.5, delta=0.1, eps=None, weight="weight" +): + r"""Returns the Panther similarity of nodes in the graph `G` to node ``v``. + + Panther is a similarity metric that says "two objects are considered + to be similar if they frequently appear on the same paths." [1]_. + + Parameters + ---------- + G : NetworkX graph + A NetworkX graph + source : node + Source node for which to find the top `k` similar other nodes + k : int (default = 5) + The number of most similar nodes to return. + path_length : int (default = 5) + How long the randomly generated paths should be (``T`` in [1]_) + c : float (default = 0.5) + A universal positive constant used to scale the number + of sample random paths to generate. + delta : float (default = 0.1) + The probability that the similarity $S$ is not an epsilon-approximation to (R, phi), + where $R$ is the number of random paths and $\phi$ is the probability + that an element sampled from a set $A \subseteq D$, where $D$ is the domain. + eps : float or None (default = None) + The error bound. Per [1]_, a good value is ``sqrt(1/|E|)``. Therefore, + if no value is provided, the recommended computed value will be used. + weight : string or None, optional (default="weight") + The name of an edge attribute that holds the numerical value + used as a weight. If None then each edge has weight 1. + + Returns + ------- + similarity : dictionary + Dictionary of nodes to similarity scores (as floats). Note: + the self-similarity (i.e., ``v``) will not be included in + the returned dictionary. So, for ``k = 5``, a dictionary of + top 4 nodes and their similarity scores will be returned. + + Raises + ------ + NetworkXUnfeasible + If `source` is an isolated node. + + NodeNotFound + If `source` is not in `G`. + + Notes + ----- + The isolated nodes in `G` are ignored. + + Examples + -------- + >>> G = nx.star_graph(10) + >>> sim = nx.panther_similarity(G, 0) + + References + ---------- + .. [1] Zhang, J., Tang, J., Ma, C., Tong, H., Jing, Y., & Li, J. + Panther: Fast top-k similarity search on large networks. + In Proceedings of the ACM SIGKDD International Conference + on Knowledge Discovery and Data Mining (Vol. 2015-August, pp. 1445–1454). + Association for Computing Machinery. https://doi.org/10.1145/2783258.2783267. + """ + import numpy as np + + if source not in G: + raise nx.NodeNotFound(f"Source node {source} not in G") + + isolates = set(nx.isolates(G)) + + if source in isolates: + raise nx.NetworkXUnfeasible( + f"Panther similarity is not defined for the isolated source node {source}." + ) + + G = G.subgraph([node for node in G.nodes if node not in isolates]).copy() + + num_nodes = G.number_of_nodes() + if num_nodes < k: + warnings.warn( + f"Number of nodes is {num_nodes}, but requested k is {k}. " + "Setting k to number of nodes." + ) + k = num_nodes + # According to [1], they empirically determined + # a good value for ``eps`` to be sqrt( 1 / |E| ) + if eps is None: + eps = np.sqrt(1.0 / G.number_of_edges()) + + inv_node_map = {name: index for index, name in enumerate(G.nodes)} + node_map = np.array(G) + + # Calculate the sample size ``R`` for how many paths + # to randomly generate + t_choose_2 = math.comb(path_length, 2) + sample_size = int((c / eps**2) * (np.log2(t_choose_2) + 1 + np.log(1 / delta))) + index_map = {} + _ = list( + generate_random_paths( + G, sample_size, path_length=path_length, index_map=index_map, weight=weight + ) + ) + S = np.zeros(num_nodes) + + inv_sample_size = 1 / sample_size + + source_paths = set(index_map[source]) + + # Calculate the path similarities + # between ``source`` (v) and ``node`` (v_j) + # using our inverted index mapping of + # vertices to paths + for node, paths in index_map.items(): + # Only consider paths where both + # ``node`` and ``source`` are present + common_paths = source_paths.intersection(paths) + S[inv_node_map[node]] = len(common_paths) * inv_sample_size + + # Retrieve top ``k`` similar + # Note: the below performed anywhere from 4-10x faster + # (depending on input sizes) vs the equivalent ``np.argsort(S)[::-1]`` + top_k_unsorted = np.argpartition(S, -k)[-k:] + top_k_sorted = top_k_unsorted[np.argsort(S[top_k_unsorted])][::-1] + + # Add back the similarity scores + top_k_with_val = dict( + zip(node_map[top_k_sorted].tolist(), S[top_k_sorted].tolist()) + ) + + # Remove the self-similarity + top_k_with_val.pop(source, None) + return top_k_with_val + + +@np_random_state(5) +@nx._dispatchable(edge_attrs="weight") +def generate_random_paths( + G, + sample_size, + path_length=5, + index_map=None, + weight="weight", + seed=None, + *, + source=None, +): + """Randomly generate `sample_size` paths of length `path_length`. + + Parameters + ---------- + G : NetworkX graph + A NetworkX graph + sample_size : integer + The number of paths to generate. This is ``R`` in [1]_. + path_length : integer (default = 5) + The maximum size of the path to randomly generate. + This is ``T`` in [1]_. According to the paper, ``T >= 5`` is + recommended. + index_map : dictionary, optional + If provided, this will be populated with the inverted + index of nodes mapped to the set of generated random path + indices within ``paths``. + weight : string or None, optional (default="weight") + The name of an edge attribute that holds the numerical value + used as a weight. If None then each edge has weight 1. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + source : node, optional + Node to use as the starting point for all generated paths. + If None then starting nodes are selected at random with uniform probability. + + Returns + ------- + paths : generator of lists + Generator of `sample_size` paths each with length `path_length`. + + Examples + -------- + The generator yields `sample_size` number of paths of length `path_length` + drawn from `G`: + + >>> G = nx.complete_graph(5) + >>> next(nx.generate_random_paths(G, sample_size=1, path_length=3, seed=42)) + [3, 4, 2, 3] + >>> list(nx.generate_random_paths(G, sample_size=3, path_length=4, seed=42)) + [[3, 4, 2, 3, 0], [2, 0, 2, 1, 0], [2, 0, 4, 3, 0]] + + By passing a dictionary into `index_map`, it will build an + inverted index mapping of nodes to the paths in which that node is present: + + >>> G = nx.wheel_graph(10) + >>> index_map = {} + >>> random_paths = list( + ... nx.generate_random_paths(G, sample_size=3, index_map=index_map, seed=2771) + ... ) + >>> random_paths + [[3, 2, 1, 9, 8, 7], [4, 0, 5, 6, 7, 8], [3, 0, 5, 0, 9, 8]] + >>> paths_containing_node_0 = [ + ... random_paths[path_idx] for path_idx in index_map.get(0, []) + ... ] + >>> paths_containing_node_0 + [[4, 0, 5, 6, 7, 8], [3, 0, 5, 0, 9, 8]] + + References + ---------- + .. [1] Zhang, J., Tang, J., Ma, C., Tong, H., Jing, Y., & Li, J. + Panther: Fast top-k similarity search on large networks. + In Proceedings of the ACM SIGKDD International Conference + on Knowledge Discovery and Data Mining (Vol. 2015-August, pp. 1445–1454). + Association for Computing Machinery. https://doi.org/10.1145/2783258.2783267. + """ + import numpy as np + + randint_fn = ( + seed.integers if isinstance(seed, np.random.Generator) else seed.randint + ) + + # Calculate transition probabilities between + # every pair of vertices according to Eq. (3) + adj_mat = nx.to_numpy_array(G, weight=weight) + inv_row_sums = np.reciprocal(adj_mat.sum(axis=1)).reshape(-1, 1) + transition_probabilities = adj_mat * inv_row_sums + + node_map = list(G) + num_nodes = G.number_of_nodes() + + for path_index in range(sample_size): + if source is None: + # Sample current vertex v = v_i uniformly at random + node_index = randint_fn(num_nodes) + node = node_map[node_index] + else: + if source not in node_map: + raise nx.NodeNotFound(f"Initial node {source} not in G") + + node = source + node_index = node_map.index(node) + + # Add v into p_r and add p_r into the path set + # of v, i.e., P_v + path = [node] + + # Build the inverted index (P_v) of vertices to paths + if index_map is not None: + if node in index_map: + index_map[node].add(path_index) + else: + index_map[node] = {path_index} + + starting_index = node_index + for _ in range(path_length): + # Randomly sample a neighbor (v_j) according + # to transition probabilities from ``node`` (v) to its neighbors + nbr_index = seed.choice( + num_nodes, p=transition_probabilities[starting_index] + ) + + # Set current vertex (v = v_j) + starting_index = nbr_index + + # Add v into p_r + nbr_node = node_map[nbr_index] + path.append(nbr_node) + + # Add p_r into P_v + if index_map is not None: + if nbr_node in index_map: + index_map[nbr_node].add(path_index) + else: + index_map[nbr_node] = {path_index} + + yield path diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/simple_paths.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/simple_paths.py new file mode 100644 index 0000000..e8cf1d1 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/simple_paths.py @@ -0,0 +1,948 @@ +from heapq import heappop, heappush +from itertools import count + +import networkx as nx +from networkx.algorithms.shortest_paths.weighted import _weight_function +from networkx.utils import not_implemented_for, pairwise + +__all__ = [ + "all_simple_paths", + "is_simple_path", + "shortest_simple_paths", + "all_simple_edge_paths", +] + + +@nx._dispatchable +def is_simple_path(G, nodes): + """Returns True if and only if `nodes` form a simple path in `G`. + + A *simple path* in a graph is a nonempty sequence of nodes in which + no node appears more than once in the sequence, and each adjacent + pair of nodes in the sequence is adjacent in the graph. + + Parameters + ---------- + G : graph + A NetworkX graph. + nodes : list + A list of one or more nodes in the graph `G`. + + Returns + ------- + bool + Whether the given list of nodes represents a simple path in `G`. + + Notes + ----- + An empty list of nodes is not a path but a list of one node is a + path. Here's an explanation why. + + This function operates on *node paths*. One could also consider + *edge paths*. There is a bijection between node paths and edge + paths. + + The *length of a path* is the number of edges in the path, so a list + of nodes of length *n* corresponds to a path of length *n* - 1. + Thus the smallest edge path would be a list of zero edges, the empty + path. This corresponds to a list of one node. + + To convert between a node path and an edge path, you can use code + like the following:: + + >>> from networkx.utils import pairwise + >>> nodes = [0, 1, 2, 3] + >>> edges = list(pairwise(nodes)) + >>> edges + [(0, 1), (1, 2), (2, 3)] + >>> nodes = [edges[0][0]] + [v for u, v in edges] + >>> nodes + [0, 1, 2, 3] + + Examples + -------- + >>> G = nx.cycle_graph(4) + >>> nx.is_simple_path(G, [2, 3, 0]) + True + >>> nx.is_simple_path(G, [0, 2]) + False + + """ + # The empty list is not a valid path. Could also return + # NetworkXPointlessConcept here. + if len(nodes) == 0: + return False + + # If the list is a single node, just check that the node is actually + # in the graph. + if len(nodes) == 1: + return nodes[0] in G + + # check that all nodes in the list are in the graph, if at least one + # is not in the graph, then this is not a simple path + if not all(n in G for n in nodes): + return False + + # If the list contains repeated nodes, then it's not a simple path + if len(set(nodes)) != len(nodes): + return False + + # Test that each adjacent pair of nodes is adjacent. + return all(v in G[u] for u, v in pairwise(nodes)) + + +@nx._dispatchable +def all_simple_paths(G, source, target, cutoff=None): + """Generate all simple paths in the graph G from source to target. + + A simple path is a path with no repeated nodes. + + Parameters + ---------- + G : NetworkX graph + + source : node + Starting node for path + + target : nodes + Single node or iterable of nodes at which to end path + + cutoff : integer, optional + Depth to stop the search. Only paths of length <= cutoff are returned. + + Returns + ------- + path_generator: generator + A generator that produces lists of simple paths. If there are no paths + between the source and target within the given cutoff the generator + produces no output. If it is possible to traverse the same sequence of + nodes in multiple ways, namely through parallel edges, then it will be + returned multiple times (once for each viable edge combination). + + Examples + -------- + This iterator generates lists of nodes:: + + >>> G = nx.complete_graph(4) + >>> for path in nx.all_simple_paths(G, source=0, target=3): + ... print(path) + ... + [0, 1, 2, 3] + [0, 1, 3] + [0, 2, 1, 3] + [0, 2, 3] + [0, 3] + + You can generate only those paths that are shorter than a certain + length by using the `cutoff` keyword argument:: + + >>> paths = nx.all_simple_paths(G, source=0, target=3, cutoff=2) + >>> print(list(paths)) + [[0, 1, 3], [0, 2, 3], [0, 3]] + + To get each path as the corresponding list of edges, you can use the + :func:`networkx.utils.pairwise` helper function:: + + >>> paths = nx.all_simple_paths(G, source=0, target=3) + >>> for path in map(nx.utils.pairwise, paths): + ... print(list(path)) + [(0, 1), (1, 2), (2, 3)] + [(0, 1), (1, 3)] + [(0, 2), (2, 1), (1, 3)] + [(0, 2), (2, 3)] + [(0, 3)] + + Pass an iterable of nodes as target to generate all paths ending in any of several nodes:: + + >>> G = nx.complete_graph(4) + >>> for path in nx.all_simple_paths(G, source=0, target=[3, 2]): + ... print(path) + ... + [0, 1, 2] + [0, 1, 2, 3] + [0, 1, 3] + [0, 1, 3, 2] + [0, 2] + [0, 2, 1, 3] + [0, 2, 3] + [0, 3] + [0, 3, 1, 2] + [0, 3, 2] + + The singleton path from ``source`` to itself is considered a simple path and is + included in the results: + + >>> G = nx.empty_graph(5) + >>> list(nx.all_simple_paths(G, source=0, target=0)) + [[0]] + + >>> G = nx.path_graph(3) + >>> list(nx.all_simple_paths(G, source=0, target={0, 1, 2})) + [[0], [0, 1], [0, 1, 2]] + + Iterate over each path from the root nodes to the leaf nodes in a + directed acyclic graph using a functional programming approach:: + + >>> from itertools import chain + >>> from itertools import product + >>> from itertools import starmap + >>> from functools import partial + >>> + >>> chaini = chain.from_iterable + >>> + >>> G = nx.DiGraph([(0, 1), (1, 2), (0, 3), (3, 2)]) + >>> roots = (v for v, d in G.in_degree() if d == 0) + >>> leaves = (v for v, d in G.out_degree() if d == 0) + >>> all_paths = partial(nx.all_simple_paths, G) + >>> list(chaini(starmap(all_paths, product(roots, leaves)))) + [[0, 1, 2], [0, 3, 2]] + + The same list computed using an iterative approach:: + + >>> G = nx.DiGraph([(0, 1), (1, 2), (0, 3), (3, 2)]) + >>> roots = (v for v, d in G.in_degree() if d == 0) + >>> leaves = (v for v, d in G.out_degree() if d == 0) + >>> all_paths = [] + >>> for root in roots: + ... for leaf in leaves: + ... paths = nx.all_simple_paths(G, root, leaf) + ... all_paths.extend(paths) + >>> all_paths + [[0, 1, 2], [0, 3, 2]] + + Iterate over each path from the root nodes to the leaf nodes in a + directed acyclic graph passing all leaves together to avoid unnecessary + compute:: + + >>> G = nx.DiGraph([(0, 1), (2, 1), (1, 3), (1, 4)]) + >>> roots = (v for v, d in G.in_degree() if d == 0) + >>> leaves = [v for v, d in G.out_degree() if d == 0] + >>> all_paths = [] + >>> for root in roots: + ... paths = nx.all_simple_paths(G, root, leaves) + ... all_paths.extend(paths) + >>> all_paths + [[0, 1, 3], [0, 1, 4], [2, 1, 3], [2, 1, 4]] + + If parallel edges offer multiple ways to traverse a given sequence of + nodes, this sequence of nodes will be returned multiple times: + + >>> G = nx.MultiDiGraph([(0, 1), (0, 1), (1, 2)]) + >>> list(nx.all_simple_paths(G, 0, 2)) + [[0, 1, 2], [0, 1, 2]] + + Notes + ----- + This algorithm uses a modified depth-first search to generate the + paths [1]_. A single path can be found in $O(V+E)$ time but the + number of simple paths in a graph can be very large, e.g. $O(n!)$ in + the complete graph of order $n$. + + This function does not check that a path exists between `source` and + `target`. For large graphs, this may result in very long runtimes. + Consider using `has_path` to check that a path exists between `source` and + `target` before calling this function on large graphs. + + References + ---------- + .. [1] R. Sedgewick, "Algorithms in C, Part 5: Graph Algorithms", + Addison Wesley Professional, 3rd ed., 2001. + + See Also + -------- + all_shortest_paths, shortest_path, has_path + + """ + for edge_path in all_simple_edge_paths(G, source, target, cutoff): + yield [source] + [edge[1] for edge in edge_path] + + +@nx._dispatchable +def all_simple_edge_paths(G, source, target, cutoff=None): + """Generate lists of edges for all simple paths in G from source to target. + + A simple path is a path with no repeated nodes. + + Parameters + ---------- + G : NetworkX graph + + source : node + Starting node for path + + target : nodes + Single node or iterable of nodes at which to end path + + cutoff : integer, optional + Depth to stop the search. Only paths of length <= cutoff are returned. + + Returns + ------- + path_generator: generator + A generator that produces lists of simple paths. If there are no paths + between the source and target within the given cutoff the generator + produces no output. + For multigraphs, the list of edges have elements of the form `(u,v,k)`. + Where `k` corresponds to the edge key. + + Examples + -------- + + Print the simple path edges of a Graph:: + + >>> g = nx.Graph([(1, 2), (2, 4), (1, 3), (3, 4)]) + >>> for path in sorted(nx.all_simple_edge_paths(g, 1, 4)): + ... print(path) + [(1, 2), (2, 4)] + [(1, 3), (3, 4)] + + Print the simple path edges of a MultiGraph. Returned edges come with + their associated keys:: + + >>> mg = nx.MultiGraph() + >>> mg.add_edge(1, 2, key="k0") + 'k0' + >>> mg.add_edge(1, 2, key="k1") + 'k1' + >>> mg.add_edge(2, 3, key="k0") + 'k0' + >>> for path in sorted(nx.all_simple_edge_paths(mg, 1, 3)): + ... print(path) + [(1, 2, 'k0'), (2, 3, 'k0')] + [(1, 2, 'k1'), (2, 3, 'k0')] + + When ``source`` is one of the targets, the empty path starting and ending at + ``source`` without traversing any edge is considered a valid simple edge path + and is included in the results: + + >>> G = nx.Graph() + >>> G.add_node(0) + >>> paths = list(nx.all_simple_edge_paths(G, 0, 0)) + >>> for path in paths: + ... print(path) + [] + >>> len(paths) + 1 + + + Notes + ----- + This algorithm uses a modified depth-first search to generate the + paths [1]_. A single path can be found in $O(V+E)$ time but the + number of simple paths in a graph can be very large, e.g. $O(n!)$ in + the complete graph of order $n$. + + References + ---------- + .. [1] R. Sedgewick, "Algorithms in C, Part 5: Graph Algorithms", + Addison Wesley Professional, 3rd ed., 2001. + + See Also + -------- + all_shortest_paths, shortest_path, all_simple_paths + + """ + if source not in G: + raise nx.NodeNotFound(f"source node {source} not in graph") + + if target in G: + targets = {target} + else: + try: + targets = set(target) + except TypeError as err: + raise nx.NodeNotFound(f"target node {target} not in graph") from err + + cutoff = cutoff if cutoff is not None else len(G) - 1 + + if cutoff >= 0 and targets: + yield from _all_simple_edge_paths(G, source, targets, cutoff) + + +def _all_simple_edge_paths(G, source, targets, cutoff): + # We simulate recursion with a stack, keeping the current path being explored + # and the outgoing edge iterators at each point in the stack. + # To avoid unnecessary checks, the loop is structured in a way such that a path + # is considered for yielding only after a new node/edge is added. + # We bootstrap the search by adding a dummy iterator to the stack that only yields + # a dummy edge to source (so that the trivial path has a chance of being included). + + get_edges = ( + (lambda node: G.edges(node, keys=True)) + if G.is_multigraph() + else (lambda node: G.edges(node)) + ) + + # The current_path is a dictionary that maps nodes in the path to the edge that was + # used to enter that node (instead of a list of edges) because we want both a fast + # membership test for nodes in the path and the preservation of insertion order. + current_path = {None: None} + stack = [iter([(None, source)])] + + while stack: + # 1. Try to extend the current path. + next_edge = next((e for e in stack[-1] if e[1] not in current_path), None) + if next_edge is None: + # All edges of the last node in the current path have been explored. + stack.pop() + current_path.popitem() + continue + previous_node, next_node, *_ = next_edge + + # 2. Check if we've reached a target. + if next_node in targets: + yield (list(current_path.values()) + [next_edge])[2:] # remove dummy edge + + # 3. Only expand the search through the next node if it makes sense. + if len(current_path) - 1 < cutoff and ( + targets - current_path.keys() - {next_node} + ): + current_path[next_node] = next_edge + stack.append(iter(get_edges(next_node))) + + +@not_implemented_for("multigraph") +@nx._dispatchable(edge_attrs="weight") +def shortest_simple_paths(G, source, target, weight=None): + """Generate all simple paths in the graph G from source to target, + starting from shortest ones. + + A simple path is a path with no repeated nodes. + + If a weighted shortest path search is to be used, no negative weights + are allowed. + + Parameters + ---------- + G : NetworkX graph + + source : node + Starting node for path + + target : node + Ending node for path + + weight : string or function + If it is a string, it is the name of the edge attribute to be + used as a weight. + + If it is a function, the weight of an edge is the value returned + by the function. The function must accept exactly three positional + arguments: the two endpoints of an edge and the dictionary of edge + attributes for that edge. The function must return a number or None. + The weight function can be used to hide edges by returning None. + So ``weight = lambda u, v, d: 1 if d['color']=="red" else None`` + will find the shortest red path. + + If None all edges are considered to have unit weight. Default + value None. + + Returns + ------- + path_generator: generator + A generator that produces lists of simple paths, in order from + shortest to longest. + + Raises + ------ + NetworkXNoPath + If no path exists between source and target. + + NetworkXError + If source or target nodes are not in the input graph. + + NetworkXNotImplemented + If the input graph is a Multi[Di]Graph. + + Examples + -------- + + >>> G = nx.cycle_graph(7) + >>> paths = list(nx.shortest_simple_paths(G, 0, 3)) + >>> print(paths) + [[0, 1, 2, 3], [0, 6, 5, 4, 3]] + + You can use this function to efficiently compute the k shortest/best + paths between two nodes. + + >>> from itertools import islice + >>> def k_shortest_paths(G, source, target, k, weight=None): + ... return list( + ... islice(nx.shortest_simple_paths(G, source, target, weight=weight), k) + ... ) + >>> for path in k_shortest_paths(G, 0, 3, 2): + ... print(path) + [0, 1, 2, 3] + [0, 6, 5, 4, 3] + + Notes + ----- + This procedure is based on algorithm by Jin Y. Yen [1]_. Finding + the first $K$ paths requires $O(KN^3)$ operations. + + See Also + -------- + all_shortest_paths + shortest_path + all_simple_paths + + References + ---------- + .. [1] Jin Y. Yen, "Finding the K Shortest Loopless Paths in a + Network", Management Science, Vol. 17, No. 11, Theory Series + (Jul., 1971), pp. 712-716. + + """ + if source not in G: + raise nx.NodeNotFound(f"source node {source} not in graph") + + if target not in G: + raise nx.NodeNotFound(f"target node {target} not in graph") + + if weight is None: + length_func = len + shortest_path_func = _bidirectional_shortest_path + else: + wt = _weight_function(G, weight) + + def length_func(path): + return sum( + wt(u, v, G.get_edge_data(u, v)) for (u, v) in zip(path, path[1:]) + ) + + shortest_path_func = _bidirectional_dijkstra + + listA = [] + listB = PathBuffer() + prev_path = None + while True: + if not prev_path: + length, path = shortest_path_func(G, source, target, weight=weight) + listB.push(length, path) + else: + ignore_nodes = set() + ignore_edges = set() + for i in range(1, len(prev_path)): + root = prev_path[:i] + root_length = length_func(root) + for path in listA: + if path[:i] == root: + ignore_edges.add((path[i - 1], path[i])) + try: + length, spur = shortest_path_func( + G, + root[-1], + target, + ignore_nodes=ignore_nodes, + ignore_edges=ignore_edges, + weight=weight, + ) + path = root[:-1] + spur + listB.push(root_length + length, path) + except nx.NetworkXNoPath: + pass + ignore_nodes.add(root[-1]) + + if listB: + path = listB.pop() + yield path + listA.append(path) + prev_path = path + else: + break + + +class PathBuffer: + def __init__(self): + self.paths = set() + self.sortedpaths = [] + self.counter = count() + + def __len__(self): + return len(self.sortedpaths) + + def push(self, cost, path): + hashable_path = tuple(path) + if hashable_path not in self.paths: + heappush(self.sortedpaths, (cost, next(self.counter), path)) + self.paths.add(hashable_path) + + def pop(self): + (cost, num, path) = heappop(self.sortedpaths) + hashable_path = tuple(path) + self.paths.remove(hashable_path) + return path + + +def _bidirectional_shortest_path( + G, source, target, ignore_nodes=None, ignore_edges=None, weight=None +): + """Returns the shortest path between source and target ignoring + nodes and edges in the containers ignore_nodes and ignore_edges. + + This is a custom modification of the standard bidirectional shortest + path implementation at networkx.algorithms.unweighted + + Parameters + ---------- + G : NetworkX graph + + source : node + starting node for path + + target : node + ending node for path + + ignore_nodes : container of nodes + nodes to ignore, optional + + ignore_edges : container of edges + edges to ignore, optional + + weight : None + This function accepts a weight argument for convenience of + shortest_simple_paths function. It will be ignored. + + Returns + ------- + path: list + List of nodes in a path from source to target. + + Raises + ------ + NetworkXNoPath + If no path exists between source and target. + + See Also + -------- + shortest_path + + """ + # call helper to do the real work + results = _bidirectional_pred_succ(G, source, target, ignore_nodes, ignore_edges) + pred, succ, w = results + + # build path from pred+w+succ + path = [] + # from w to target + while w is not None: + path.append(w) + w = succ[w] + # from source to w + w = pred[path[0]] + while w is not None: + path.insert(0, w) + w = pred[w] + + return len(path), path + + +def _bidirectional_pred_succ(G, source, target, ignore_nodes=None, ignore_edges=None): + """Bidirectional shortest path helper. + Returns (pred,succ,w) where + pred is a dictionary of predecessors from w to the source, and + succ is a dictionary of successors from w to the target. + """ + # does BFS from both source and target and meets in the middle + if ignore_nodes and (source in ignore_nodes or target in ignore_nodes): + raise nx.NetworkXNoPath(f"No path between {source} and {target}.") + if target == source: + return ({target: None}, {source: None}, source) + + # handle either directed or undirected + if G.is_directed(): + Gpred = G.predecessors + Gsucc = G.successors + else: + Gpred = G.neighbors + Gsucc = G.neighbors + + # support optional nodes filter + if ignore_nodes: + + def filter_iter(nodes): + def iterate(v): + for w in nodes(v): + if w not in ignore_nodes: + yield w + + return iterate + + Gpred = filter_iter(Gpred) + Gsucc = filter_iter(Gsucc) + + # support optional edges filter + if ignore_edges: + if G.is_directed(): + + def filter_pred_iter(pred_iter): + def iterate(v): + for w in pred_iter(v): + if (w, v) not in ignore_edges: + yield w + + return iterate + + def filter_succ_iter(succ_iter): + def iterate(v): + for w in succ_iter(v): + if (v, w) not in ignore_edges: + yield w + + return iterate + + Gpred = filter_pred_iter(Gpred) + Gsucc = filter_succ_iter(Gsucc) + + else: + + def filter_iter(nodes): + def iterate(v): + for w in nodes(v): + if (v, w) not in ignore_edges and (w, v) not in ignore_edges: + yield w + + return iterate + + Gpred = filter_iter(Gpred) + Gsucc = filter_iter(Gsucc) + + # predecessor and successors in search + pred = {source: None} + succ = {target: None} + + # initialize fringes, start with forward + forward_fringe = [source] + reverse_fringe = [target] + + while forward_fringe and reverse_fringe: + if len(forward_fringe) <= len(reverse_fringe): + this_level = forward_fringe + forward_fringe = [] + for v in this_level: + for w in Gsucc(v): + if w not in pred: + forward_fringe.append(w) + pred[w] = v + if w in succ: + # found path + return pred, succ, w + else: + this_level = reverse_fringe + reverse_fringe = [] + for v in this_level: + for w in Gpred(v): + if w not in succ: + succ[w] = v + reverse_fringe.append(w) + if w in pred: + # found path + return pred, succ, w + + raise nx.NetworkXNoPath(f"No path between {source} and {target}.") + + +def _bidirectional_dijkstra( + G, source, target, weight="weight", ignore_nodes=None, ignore_edges=None +): + """Dijkstra's algorithm for shortest paths using bidirectional search. + + This function returns the shortest path between source and target + ignoring nodes and edges in the containers ignore_nodes and + ignore_edges. + + This is a custom modification of the standard Dijkstra bidirectional + shortest path implementation at networkx.algorithms.weighted + + Parameters + ---------- + G : NetworkX graph + + source : node + Starting node. + + target : node + Ending node. + + weight: string, function, optional (default='weight') + Edge data key or weight function corresponding to the edge weight + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number or None to indicate a hidden edge. + + ignore_nodes : container of nodes + nodes to ignore, optional + + ignore_edges : container of edges + edges to ignore, optional + + Returns + ------- + length : number + Shortest path length. + + Returns a tuple of two dictionaries keyed by node. + The first dictionary stores distance from the source. + The second stores the path from the source to that node. + + Raises + ------ + NetworkXNoPath + If no path exists between source and target. + + Notes + ----- + Edge weight attributes must be numerical. + Distances are calculated as sums of weighted edges traversed. + + The weight function can be used to hide edges by returning None. + So ``weight = lambda u, v, d: 1 if d['color']=="red" else None`` + will find the shortest red path. + + In practice bidirectional Dijkstra is much more than twice as fast as + ordinary Dijkstra. + + Ordinary Dijkstra expands nodes in a sphere-like manner from the + source. The radius of this sphere will eventually be the length + of the shortest path. Bidirectional Dijkstra will expand nodes + from both the source and the target, making two spheres of half + this radius. Volume of the first sphere is pi*r*r while the + others are 2*pi*r/2*r/2, making up half the volume. + + This algorithm is not guaranteed to work if edge weights + are negative or are floating point numbers + (overflows and roundoff errors can cause problems). + + See Also + -------- + shortest_path + shortest_path_length + """ + if ignore_nodes and (source in ignore_nodes or target in ignore_nodes): + raise nx.NetworkXNoPath(f"No path between {source} and {target}.") + if source == target: + if source not in G: + raise nx.NodeNotFound(f"Node {source} not in graph") + return (0, [source]) + + # handle either directed or undirected + if G.is_directed(): + Gpred = G.predecessors + Gsucc = G.successors + else: + Gpred = G.neighbors + Gsucc = G.neighbors + + # support optional nodes filter + if ignore_nodes: + + def filter_iter(nodes): + def iterate(v): + for w in nodes(v): + if w not in ignore_nodes: + yield w + + return iterate + + Gpred = filter_iter(Gpred) + Gsucc = filter_iter(Gsucc) + + # support optional edges filter + if ignore_edges: + if G.is_directed(): + + def filter_pred_iter(pred_iter): + def iterate(v): + for w in pred_iter(v): + if (w, v) not in ignore_edges: + yield w + + return iterate + + def filter_succ_iter(succ_iter): + def iterate(v): + for w in succ_iter(v): + if (v, w) not in ignore_edges: + yield w + + return iterate + + Gpred = filter_pred_iter(Gpred) + Gsucc = filter_succ_iter(Gsucc) + + else: + + def filter_iter(nodes): + def iterate(v): + for w in nodes(v): + if (v, w) not in ignore_edges and (w, v) not in ignore_edges: + yield w + + return iterate + + Gpred = filter_iter(Gpred) + Gsucc = filter_iter(Gsucc) + + wt = _weight_function(G, weight) + # Init: Forward Backward + dists = [{}, {}] # dictionary of final distances + paths = [{source: [source]}, {target: [target]}] # dictionary of paths + fringe = [[], []] # heap of (distance, node) tuples for + # extracting next node to expand + seen = [{source: 0}, {target: 0}] # dictionary of distances to + # nodes seen + c = count() + # initialize fringe heap + heappush(fringe[0], (0, next(c), source)) + heappush(fringe[1], (0, next(c), target)) + # neighs for extracting correct neighbor information + neighs = [Gsucc, Gpred] + # variables to hold shortest discovered path + # finaldist = 1e30000 + finalpath = [] + dir = 1 + while fringe[0] and fringe[1]: + # choose direction + # dir == 0 is forward direction and dir == 1 is back + dir = 1 - dir + # extract closest to expand + (dist, _, v) = heappop(fringe[dir]) + if v in dists[dir]: + # Shortest path to v has already been found + continue + # update distance + dists[dir][v] = dist # equal to seen[dir][v] + if v in dists[1 - dir]: + # if we have scanned v in both directions we are done + # we have now discovered the shortest path + return (finaldist, finalpath) + + for w in neighs[dir](v): + if dir == 0: # forward + minweight = wt(v, w, G.get_edge_data(v, w)) + else: # back, must remember to change v,w->w,v + minweight = wt(w, v, G.get_edge_data(w, v)) + if minweight is None: + continue + vwLength = dists[dir][v] + minweight + + if w in dists[dir]: + if vwLength < dists[dir][w]: + raise ValueError("Contradictory paths found: negative weights?") + elif w not in seen[dir] or vwLength < seen[dir][w]: + # relaxing + seen[dir][w] = vwLength + heappush(fringe[dir], (vwLength, next(c), w)) + paths[dir][w] = paths[dir][v] + [w] + if w in seen[0] and w in seen[1]: + # see if this path is better than the already + # discovered shortest path + totaldist = seen[0][w] + seen[1][w] + if finalpath == [] or finaldist > totaldist: + finaldist = totaldist + revpath = paths[1][w][:] + revpath.reverse() + finalpath = paths[0][w] + revpath[1:] + raise nx.NetworkXNoPath(f"No path between {source} and {target}.") diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/smallworld.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/smallworld.py new file mode 100644 index 0000000..456a4ca --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/smallworld.py @@ -0,0 +1,404 @@ +"""Functions for estimating the small-world-ness of graphs. + +A small world network is characterized by a small average shortest path length, +and a large clustering coefficient. + +Small-worldness is commonly measured with the coefficient sigma or omega. + +Both coefficients compare the average clustering coefficient and shortest path +length of a given graph against the same quantities for an equivalent random +or lattice graph. + +For more information, see the Wikipedia article on small-world network [1]_. + +.. [1] Small-world network:: https://en.wikipedia.org/wiki/Small-world_network + +""" + +import networkx as nx +from networkx.utils import not_implemented_for, py_random_state + +__all__ = ["random_reference", "lattice_reference", "sigma", "omega"] + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@py_random_state(3) +@nx._dispatchable(returns_graph=True) +def random_reference(G, niter=1, connectivity=True, seed=None): + """Compute a random graph by swapping edges of a given graph. + + Parameters + ---------- + G : graph + An undirected graph with 4 or more nodes. + + niter : integer (optional, default=1) + An edge is rewired approximately `niter` times. + + connectivity : boolean (optional, default=True) + When True, ensure connectivity for the randomized graph. + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + G : graph + The randomized graph. + + Raises + ------ + NetworkXError + If there are fewer than 4 nodes or 2 edges in `G` + + Notes + ----- + The implementation is adapted from the algorithm by Maslov and Sneppen + (2002) [1]_. + + References + ---------- + .. [1] Maslov, Sergei, and Kim Sneppen. + "Specificity and stability in topology of protein networks." + Science 296.5569 (2002): 910-913. + """ + if len(G) < 4: + raise nx.NetworkXError("Graph has fewer than four nodes.") + if len(G.edges) < 2: + raise nx.NetworkXError("Graph has fewer that 2 edges") + + from networkx.utils import cumulative_distribution, discrete_sequence + + local_conn = nx.connectivity.local_edge_connectivity + + G = G.copy() + keys, degrees = zip(*G.degree()) # keys, degree + cdf = cumulative_distribution(degrees) # cdf of degree + nnodes = len(G) + nedges = nx.number_of_edges(G) + niter = niter * nedges + ntries = int(nnodes * nedges / (nnodes * (nnodes - 1) / 2)) + swapcount = 0 + + for i in range(niter): + n = 0 + while n < ntries: + # pick two random edges without creating edge list + # choose source node indices from discrete distribution + (ai, ci) = discrete_sequence(2, cdistribution=cdf, seed=seed) + if ai == ci: + continue # same source, skip + a = keys[ai] # convert index to label + c = keys[ci] + # choose target uniformly from neighbors + b = seed.choice(list(G.neighbors(a))) + d = seed.choice(list(G.neighbors(c))) + if b in [a, c, d] or d in [a, b, c]: + continue # all vertices should be different + + # don't create parallel edges + if (d not in G[a]) and (b not in G[c]): + G.add_edge(a, d) + G.add_edge(c, b) + G.remove_edge(a, b) + G.remove_edge(c, d) + + # Check if the graph is still connected + if connectivity and local_conn(G, a, b) == 0: + # Not connected, revert the swap + G.remove_edge(a, d) + G.remove_edge(c, b) + G.add_edge(a, b) + G.add_edge(c, d) + else: + swapcount += 1 + break + n += 1 + return G + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@py_random_state(4) +@nx._dispatchable(returns_graph=True) +def lattice_reference(G, niter=5, D=None, connectivity=True, seed=None): + """Latticize the given graph by swapping edges. + + Parameters + ---------- + G : graph + An undirected graph. + + niter : integer (optional, default=1) + An edge is rewired approximately niter times. + + D : numpy.array (optional, default=None) + Distance to the diagonal matrix. + + connectivity : boolean (optional, default=True) + Ensure connectivity for the latticized graph when set to True. + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + G : graph + The latticized graph. + + Raises + ------ + NetworkXError + If there are fewer than 4 nodes or 2 edges in `G` + + Notes + ----- + The implementation is adapted from the algorithm by Sporns et al. [1]_. + which is inspired from the original work by Maslov and Sneppen(2002) [2]_. + + References + ---------- + .. [1] Sporns, Olaf, and Jonathan D. Zwi. + "The small world of the cerebral cortex." + Neuroinformatics 2.2 (2004): 145-162. + .. [2] Maslov, Sergei, and Kim Sneppen. + "Specificity and stability in topology of protein networks." + Science 296.5569 (2002): 910-913. + """ + import numpy as np + + from networkx.utils import cumulative_distribution, discrete_sequence + + local_conn = nx.connectivity.local_edge_connectivity + + if len(G) < 4: + raise nx.NetworkXError("Graph has fewer than four nodes.") + if len(G.edges) < 2: + raise nx.NetworkXError("Graph has fewer that 2 edges") + # Instead of choosing uniformly at random from a generated edge list, + # this algorithm chooses nonuniformly from the set of nodes with + # probability weighted by degree. + G = G.copy() + keys, degrees = zip(*G.degree()) # keys, degree + cdf = cumulative_distribution(degrees) # cdf of degree + + nnodes = len(G) + nedges = nx.number_of_edges(G) + if D is None: + D = np.zeros((nnodes, nnodes)) + un = np.arange(1, nnodes) + um = np.arange(nnodes - 1, 0, -1) + u = np.append((0,), np.where(un < um, un, um)) + + for v in range(int(np.ceil(nnodes / 2))): + D[nnodes - v - 1, :] = np.append(u[v + 1 :], u[: v + 1]) + D[v, :] = D[nnodes - v - 1, :][::-1] + + niter = niter * nedges + # maximal number of rewiring attempts per 'niter' + max_attempts = int(nnodes * nedges / (nnodes * (nnodes - 1) / 2)) + + for _ in range(niter): + n = 0 + while n < max_attempts: + # pick two random edges without creating edge list + # choose source node indices from discrete distribution + (ai, ci) = discrete_sequence(2, cdistribution=cdf, seed=seed) + if ai == ci: + continue # same source, skip + a = keys[ai] # convert index to label + c = keys[ci] + # choose target uniformly from neighbors + b = seed.choice(list(G.neighbors(a))) + d = seed.choice(list(G.neighbors(c))) + bi = keys.index(b) + di = keys.index(d) + + if b in [a, c, d] or d in [a, b, c]: + continue # all vertices should be different + + # don't create parallel edges + if (d not in G[a]) and (b not in G[c]): + if D[ai, bi] + D[ci, di] >= D[ai, ci] + D[bi, di]: + # only swap if we get closer to the diagonal + G.add_edge(a, d) + G.add_edge(c, b) + G.remove_edge(a, b) + G.remove_edge(c, d) + + # Check if the graph is still connected + if connectivity and local_conn(G, a, b) == 0: + # Not connected, revert the swap + G.remove_edge(a, d) + G.remove_edge(c, b) + G.add_edge(a, b) + G.add_edge(c, d) + else: + break + n += 1 + + return G + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@py_random_state(3) +@nx._dispatchable +def sigma(G, niter=100, nrand=10, seed=None): + """Returns the small-world coefficient (sigma) of the given graph. + + The small-world coefficient is defined as: + sigma = C/Cr / L/Lr + where C and L are respectively the average clustering coefficient and + average shortest path length of G. Cr and Lr are respectively the average + clustering coefficient and average shortest path length of an equivalent + random graph. + + A graph is commonly classified as small-world if sigma>1. + + Parameters + ---------- + G : NetworkX graph + An undirected graph. + niter : integer (optional, default=100) + Approximate number of rewiring per edge to compute the equivalent + random graph. + nrand : integer (optional, default=10) + Number of random graphs generated to compute the average clustering + coefficient (Cr) and average shortest path length (Lr). + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + sigma : float + The small-world coefficient of G. + + Notes + ----- + The implementation is adapted from Humphries et al. [1]_ [2]_. + + References + ---------- + .. [1] The brainstem reticular formation is a small-world, not scale-free, + network M. D. Humphries, K. Gurney and T. J. Prescott, + Proc. Roy. Soc. B 2006 273, 503-511, doi:10.1098/rspb.2005.3354. + .. [2] Humphries and Gurney (2008). + "Network 'Small-World-Ness': A Quantitative Method for Determining + Canonical Network Equivalence". + PLoS One. 3 (4). PMID 18446219. doi:10.1371/journal.pone.0002051. + """ + import numpy as np + + # Compute the mean clustering coefficient and average shortest path length + # for an equivalent random graph + randMetrics = {"C": [], "L": []} + for i in range(nrand): + Gr = random_reference(G, niter=niter, seed=seed) + randMetrics["C"].append(nx.transitivity(Gr)) + randMetrics["L"].append(nx.average_shortest_path_length(Gr)) + + C = nx.transitivity(G) + L = nx.average_shortest_path_length(G) + Cr = np.mean(randMetrics["C"]) + Lr = np.mean(randMetrics["L"]) + + sigma = (C / Cr) / (L / Lr) + + return float(sigma) + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@py_random_state(3) +@nx._dispatchable +def omega(G, niter=5, nrand=10, seed=None): + """Returns the small-world coefficient (omega) of a graph + + The small-world coefficient of a graph G is: + + omega = Lr/L - C/Cl + + where C and L are respectively the average clustering coefficient and + average shortest path length of G. Lr is the average shortest path length + of an equivalent random graph and Cl is the average clustering coefficient + of an equivalent lattice graph. + + The small-world coefficient (omega) measures how much G is like a lattice + or a random graph. Negative values mean G is similar to a lattice whereas + positive values mean G is a random graph. + Values close to 0 mean that G has small-world characteristics. + + Parameters + ---------- + G : NetworkX graph + An undirected graph. + + niter: integer (optional, default=5) + Approximate number of rewiring per edge to compute the equivalent + random graph. + + nrand: integer (optional, default=10) + Number of random graphs generated to compute the maximal clustering + coefficient (Cr) and average shortest path length (Lr). + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + + Returns + ------- + omega : float + The small-world coefficient (omega) + + Notes + ----- + The implementation is adapted from the algorithm by Telesford et al. [1]_. + + References + ---------- + .. [1] Telesford, Joyce, Hayasaka, Burdette, and Laurienti (2011). + "The Ubiquity of Small-World Networks". + Brain Connectivity. 1 (0038): 367-75. PMC 3604768. PMID 22432451. + doi:10.1089/brain.2011.0038. + """ + import numpy as np + + # Compute the mean clustering coefficient and average shortest path length + # for an equivalent random graph + randMetrics = {"C": [], "L": []} + + # Calculate initial average clustering coefficient which potentially will + # get replaced by higher clustering coefficients from generated lattice + # reference graphs + Cl = nx.average_clustering(G) + + niter_lattice_reference = niter + niter_random_reference = niter * 2 + + for _ in range(nrand): + # Generate random graph + Gr = random_reference(G, niter=niter_random_reference, seed=seed) + randMetrics["L"].append(nx.average_shortest_path_length(Gr)) + + # Generate lattice graph + Gl = lattice_reference(G, niter=niter_lattice_reference, seed=seed) + + # Replace old clustering coefficient, if clustering is higher in + # generated lattice reference + Cl_temp = nx.average_clustering(Gl) + if Cl_temp > Cl: + Cl = Cl_temp + + C = nx.average_clustering(G) + L = nx.average_shortest_path_length(G) + Lr = np.mean(randMetrics["L"]) + + omega = (Lr / L) - (C / Cl) + + return float(omega) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/smetric.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/smetric.py new file mode 100644 index 0000000..d985aa8 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/smetric.py @@ -0,0 +1,30 @@ +import networkx as nx + +__all__ = ["s_metric"] + + +@nx._dispatchable +def s_metric(G): + """Returns the s-metric [1]_ of graph. + + The s-metric is defined as the sum of the products ``deg(u) * deg(v)`` + for every edge ``(u, v)`` in `G`. + + Parameters + ---------- + G : graph + The graph used to compute the s-metric. + + Returns + ------- + s : float + The s-metric of the graph. + + References + ---------- + .. [1] Lun Li, David Alderson, John C. Doyle, and Walter Willinger, + Towards a Theory of Scale-Free Graphs: + Definition, Properties, and Implications (Extended Version), 2005. + https://arxiv.org/abs/cond-mat/0501169 + """ + return float(sum(G.degree(u) * G.degree(v) for (u, v) in G.edges())) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/sparsifiers.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/sparsifiers.py new file mode 100644 index 0000000..5932237 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/sparsifiers.py @@ -0,0 +1,296 @@ +"""Functions for computing sparsifiers of graphs.""" + +import math + +import networkx as nx +from networkx.utils import not_implemented_for, py_random_state + +__all__ = ["spanner"] + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@py_random_state(3) +@nx._dispatchable(edge_attrs="weight", returns_graph=True) +def spanner(G, stretch, weight=None, seed=None): + """Returns a spanner of the given graph with the given stretch. + + A spanner of a graph G = (V, E) with stretch t is a subgraph + H = (V, E_S) such that E_S is a subset of E and the distance between + any pair of nodes in H is at most t times the distance between the + nodes in G. + + Parameters + ---------- + G : NetworkX graph + An undirected simple graph. + + stretch : float + The stretch of the spanner. + + weight : object + The edge attribute to use as distance. + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + NetworkX graph + A spanner of the given graph with the given stretch. + + Raises + ------ + ValueError + If a stretch less than 1 is given. + + Notes + ----- + This function implements the spanner algorithm by Baswana and Sen, + see [1]. + + This algorithm is a randomized las vegas algorithm: The expected + running time is O(km) where k = (stretch + 1) // 2 and m is the + number of edges in G. The returned graph is always a spanner of the + given graph with the specified stretch. For weighted graphs the + number of edges in the spanner is O(k * n^(1 + 1 / k)) where k is + defined as above and n is the number of nodes in G. For unweighted + graphs the number of edges is O(n^(1 + 1 / k) + kn). + + References + ---------- + [1] S. Baswana, S. Sen. A Simple and Linear Time Randomized + Algorithm for Computing Sparse Spanners in Weighted Graphs. + Random Struct. Algorithms 30(4): 532-563 (2007). + """ + if stretch < 1: + raise ValueError("stretch must be at least 1") + + k = (stretch + 1) // 2 + + # initialize spanner H with empty edge set + H = nx.empty_graph() + H.add_nodes_from(G.nodes) + + # phase 1: forming the clusters + # the residual graph has V' from the paper as its node set + # and E' from the paper as its edge set + residual_graph = _setup_residual_graph(G, weight) + # clustering is a dictionary that maps nodes in a cluster to the + # cluster center + clustering = {v: v for v in G.nodes} + sample_prob = math.pow(G.number_of_nodes(), -1 / k) + size_limit = 2 * math.pow(G.number_of_nodes(), 1 + 1 / k) + + i = 0 + while i < k - 1: + # step 1: sample centers + sampled_centers = set() + for center in set(clustering.values()): + if seed.random() < sample_prob: + sampled_centers.add(center) + + # combined loop for steps 2 and 3 + edges_to_add = set() + edges_to_remove = set() + new_clustering = {} + for v in residual_graph.nodes: + if clustering[v] in sampled_centers: + continue + + # step 2: find neighboring (sampled) clusters and + # lightest edges to them + lightest_edge_neighbor, lightest_edge_weight = _lightest_edge_dicts( + residual_graph, clustering, v + ) + neighboring_sampled_centers = ( + set(lightest_edge_weight.keys()) & sampled_centers + ) + + # step 3: add edges to spanner + if not neighboring_sampled_centers: + # connect to each neighboring center via lightest edge + for neighbor in lightest_edge_neighbor.values(): + edges_to_add.add((v, neighbor)) + # remove all incident edges + for neighbor in residual_graph.adj[v]: + edges_to_remove.add((v, neighbor)) + + else: # there is a neighboring sampled center + closest_center = min( + neighboring_sampled_centers, key=lightest_edge_weight.get + ) + closest_center_weight = lightest_edge_weight[closest_center] + closest_center_neighbor = lightest_edge_neighbor[closest_center] + + edges_to_add.add((v, closest_center_neighbor)) + new_clustering[v] = closest_center + + # connect to centers with edge weight less than + # closest_center_weight + for center, edge_weight in lightest_edge_weight.items(): + if edge_weight < closest_center_weight: + neighbor = lightest_edge_neighbor[center] + edges_to_add.add((v, neighbor)) + + # remove edges to centers with edge weight less than + # closest_center_weight + for neighbor in residual_graph.adj[v]: + nbr_cluster = clustering[neighbor] + nbr_weight = lightest_edge_weight[nbr_cluster] + if ( + nbr_cluster == closest_center + or nbr_weight < closest_center_weight + ): + edges_to_remove.add((v, neighbor)) + + # check whether iteration added too many edges to spanner, + # if so repeat + if len(edges_to_add) > size_limit: + # an iteration is repeated O(1) times on expectation + continue + + # iteration succeeded + i = i + 1 + + # actually add edges to spanner + for u, v in edges_to_add: + _add_edge_to_spanner(H, residual_graph, u, v, weight) + + # actually delete edges from residual graph + residual_graph.remove_edges_from(edges_to_remove) + + # copy old clustering data to new_clustering + for node, center in clustering.items(): + if center in sampled_centers: + new_clustering[node] = center + clustering = new_clustering + + # step 4: remove intra-cluster edges + for u in residual_graph.nodes: + for v in list(residual_graph.adj[u]): + if clustering[u] == clustering[v]: + residual_graph.remove_edge(u, v) + + # update residual graph node set + for v in list(residual_graph.nodes): + if v not in clustering: + residual_graph.remove_node(v) + + # phase 2: vertex-cluster joining + for v in residual_graph.nodes: + lightest_edge_neighbor, _ = _lightest_edge_dicts(residual_graph, clustering, v) + for neighbor in lightest_edge_neighbor.values(): + _add_edge_to_spanner(H, residual_graph, v, neighbor, weight) + + return H + + +def _setup_residual_graph(G, weight): + """Setup residual graph as a copy of G with unique edges weights. + + The node set of the residual graph corresponds to the set V' from + the Baswana-Sen paper and the edge set corresponds to the set E' + from the paper. + + This function associates distinct weights to the edges of the + residual graph (even for unweighted input graphs), as required by + the algorithm. + + Parameters + ---------- + G : NetworkX graph + An undirected simple graph. + + weight : object + The edge attribute to use as distance. + + Returns + ------- + NetworkX graph + The residual graph used for the Baswana-Sen algorithm. + """ + residual_graph = G.copy() + + # establish unique edge weights, even for unweighted graphs + for u, v in G.edges(): + if not weight: + residual_graph[u][v]["weight"] = (id(u), id(v)) + else: + residual_graph[u][v]["weight"] = (G[u][v][weight], id(u), id(v)) + + return residual_graph + + +def _lightest_edge_dicts(residual_graph, clustering, node): + """Find the lightest edge to each cluster. + + Searches for the minimum-weight edge to each cluster adjacent to + the given node. + + Parameters + ---------- + residual_graph : NetworkX graph + The residual graph used by the Baswana-Sen algorithm. + + clustering : dictionary + The current clustering of the nodes. + + node : node + The node from which the search originates. + + Returns + ------- + lightest_edge_neighbor, lightest_edge_weight : dictionary, dictionary + lightest_edge_neighbor is a dictionary that maps a center C to + a node v in the corresponding cluster such that the edge from + the given node to v is the lightest edge from the given node to + any node in cluster. lightest_edge_weight maps a center C to the + weight of the aforementioned edge. + + Notes + ----- + If a cluster has no node that is adjacent to the given node in the + residual graph then the center of the cluster is not a key in the + returned dictionaries. + """ + lightest_edge_neighbor = {} + lightest_edge_weight = {} + for neighbor in residual_graph.adj[node]: + nbr_center = clustering[neighbor] + weight = residual_graph[node][neighbor]["weight"] + if ( + nbr_center not in lightest_edge_weight + or weight < lightest_edge_weight[nbr_center] + ): + lightest_edge_neighbor[nbr_center] = neighbor + lightest_edge_weight[nbr_center] = weight + return lightest_edge_neighbor, lightest_edge_weight + + +def _add_edge_to_spanner(H, residual_graph, u, v, weight): + """Add the edge {u, v} to the spanner H and take weight from + the residual graph. + + Parameters + ---------- + H : NetworkX graph + The spanner under construction. + + residual_graph : NetworkX graph + The residual graph used by the Baswana-Sen algorithm. The weight + for the edge is taken from this graph. + + u : node + One endpoint of the edge. + + v : node + The other endpoint of the edge. + + weight : object + The edge attribute to use as distance. + """ + H.add_edge(u, v) + if weight: + H[u][v][weight] = residual_graph[u][v]["weight"][0] diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/structuralholes.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/structuralholes.py new file mode 100644 index 0000000..f43c58c --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/structuralholes.py @@ -0,0 +1,374 @@ +"""Functions for computing measures of structural holes.""" + +import networkx as nx + +__all__ = ["constraint", "local_constraint", "effective_size"] + + +@nx._dispatchable(edge_attrs="weight") +def mutual_weight(G, u, v, weight=None): + """Returns the sum of the weights of the edge from `u` to `v` and + the edge from `v` to `u` in `G`. + + `weight` is the edge data key that represents the edge weight. If + the specified key is `None` or is not in the edge data for an edge, + that edge is assumed to have weight 1. + + Pre-conditions: `u` and `v` must both be in `G`. + + """ + try: + a_uv = G[u][v].get(weight, 1) + except KeyError: + a_uv = 0 + try: + a_vu = G[v][u].get(weight, 1) + except KeyError: + a_vu = 0 + return a_uv + a_vu + + +@nx._dispatchable(edge_attrs="weight") +def normalized_mutual_weight(G, u, v, norm=sum, weight=None): + """Returns normalized mutual weight of the edges from `u` to `v` + with respect to the mutual weights of the neighbors of `u` in `G`. + + `norm` specifies how the normalization factor is computed. It must + be a function that takes a single argument and returns a number. + The argument will be an iterable of mutual weights + of pairs ``(u, w)``, where ``w`` ranges over each (in- and + out-)neighbor of ``u``. Commons values for `normalization` are + ``sum`` and ``max``. + + `weight` can be ``None`` or a string, if None, all edge weights + are considered equal. Otherwise holds the name of the edge + attribute used as weight. + + """ + scale = norm(mutual_weight(G, u, w, weight) for w in set(nx.all_neighbors(G, u))) + return 0 if scale == 0 else mutual_weight(G, u, v, weight) / scale + + +@nx._dispatchable(edge_attrs="weight") +def effective_size(G, nodes=None, weight=None): + r"""Returns the effective size of all nodes in the graph ``G``. + + The *effective size* of a node's ego network is based on the concept + of redundancy. A person's ego network has redundancy to the extent + that her contacts are connected to each other as well. The + nonredundant part of a person's relationships is the effective + size of her ego network [1]_. Formally, the effective size of a + node $u$, denoted $e(u)$, is defined by + + .. math:: + + e(u) = \sum_{v \in N(u) \setminus \{u\}} + \left(1 - \sum_{w \in N(v)} p_{uw} m_{vw}\right) + + where $N(u)$ is the set of neighbors of $u$ and $p_{uw}$ is the + normalized mutual weight of the (directed or undirected) edges + joining $u$ and $v$, for each vertex $u$ and $v$ [1]_. And $m_{vw}$ + is the mutual weight of $v$ and $w$ divided by $v$ highest mutual + weight with any of its neighbors. The *mutual weight* of $u$ and $v$ + is the sum of the weights of edges joining them (edge weights are + assumed to be one if the graph is unweighted). + + For the case of unweighted and undirected graphs, Borgatti proposed + a simplified formula to compute effective size [2]_ + + .. math:: + + e(u) = n - \frac{2t}{n} + + where `t` is the number of ties in the ego network (not including + ties to ego) and `n` is the number of nodes (excluding ego). + + Parameters + ---------- + G : NetworkX graph + The graph containing ``v``. Directed graphs are treated like + undirected graphs when computing neighbors of ``v``. + + nodes : container, optional + Container of nodes in the graph ``G`` to compute the effective size. + If None, the effective size of every node is computed. + + weight : None or string, optional + If None, all edge weights are considered equal. + Otherwise holds the name of the edge attribute used as weight. + + Returns + ------- + dict + Dictionary with nodes as keys and the effective size of the node as values. + + Notes + ----- + Isolated nodes, including nodes which only have self-loop edges, do not + have a well-defined effective size:: + + >>> G = nx.path_graph(3) + >>> G.add_edge(4, 4) + >>> nx.effective_size(G) + {0: 1.0, 1: 2.0, 2: 1.0, 4: nan} + + Burt also defined the related concept of *efficiency* of a node's ego + network, which is its effective size divided by the degree of that + node [1]_. So you can easily compute efficiency: + + >>> G = nx.DiGraph() + >>> G.add_edges_from([(0, 1), (0, 2), (1, 0), (2, 1)]) + >>> esize = nx.effective_size(G) + >>> efficiency = {n: v / G.degree(n) for n, v in esize.items()} + + See also + -------- + constraint + + References + ---------- + .. [1] Burt, Ronald S. + *Structural Holes: The Social Structure of Competition.* + Cambridge: Harvard University Press, 1995. + + .. [2] Borgatti, S. + "Structural Holes: Unpacking Burt's Redundancy Measures" + CONNECTIONS 20(1):35-38. + http://www.analytictech.com/connections/v20(1)/holes.htm + + """ + + def redundancy(G, u, v, weight=None): + nmw = normalized_mutual_weight + r = sum( + nmw(G, u, w, weight=weight) * nmw(G, v, w, norm=max, weight=weight) + for w in set(nx.all_neighbors(G, u)) + ) + return 1 - r + + # Check if scipy is available + try: + # Needed for errstate + import numpy as np + + # make sure nx.adjacency_matrix will not raise + import scipy as sp + + has_scipy = True + except: + has_scipy = False + + if nodes is None and has_scipy: + # In order to compute constraint of all nodes, + # algorithms based on sparse matrices can be much faster + + # Obtain the adjacency matrix + P = nx.adjacency_matrix(G, weight=weight) + + # Calculate mutual weights + mutual_weights1 = P + P.T + mutual_weights2 = mutual_weights1.copy() + + with np.errstate(divide="ignore"): + # Mutual_weights1 = Normalize mutual weights by row sums + mutual_weights1 /= mutual_weights1.sum(axis=1)[:, np.newaxis] + + # Mutual_weights2 = Normalize mutual weights by row max + mutual_weights2 /= mutual_weights2.max(axis=1).toarray() + + # Calculate effective sizes + r = 1 - (mutual_weights1 @ mutual_weights2.T).toarray() + effective_size = ((mutual_weights1 > 0) * r).sum(axis=1) + + # Special treatment: isolated nodes (ignoring selfloops) marked with "nan" + sum_mutual_weights = mutual_weights1.sum(axis=1) - mutual_weights1.diagonal() + isolated_nodes = sum_mutual_weights == 0 + effective_size[isolated_nodes] = float("nan") + # Use tolist() to automatically convert numpy scalars -> Python scalars + return dict(zip(G, effective_size.tolist())) + + # Results for only requested nodes + effective_size = {} + if nodes is None: + nodes = G + # Use Borgatti's simplified formula for unweighted and undirected graphs + if not G.is_directed() and weight is None: + for v in nodes: + # Effective size is not defined for isolated nodes, including nodes + # with only self-edges + if all(u == v for u in G[v]): + effective_size[v] = float("nan") + continue + E = nx.ego_graph(G, v, center=False, undirected=True) + effective_size[v] = len(E) - (2 * E.size()) / len(E) + else: + for v in nodes: + # Effective size is not defined for isolated nodes, including nodes + # with only self-edges + if all(u == v for u in G[v]): + effective_size[v] = float("nan") + continue + effective_size[v] = sum( + redundancy(G, v, u, weight) for u in set(nx.all_neighbors(G, v)) + ) + return effective_size + + +@nx._dispatchable(edge_attrs="weight") +def constraint(G, nodes=None, weight=None): + r"""Returns the constraint on all nodes in the graph ``G``. + + The *constraint* is a measure of the extent to which a node *v* is + invested in those nodes that are themselves invested in the + neighbors of *v*. Formally, the *constraint on v*, denoted `c(v)`, + is defined by + + .. math:: + + c(v) = \sum_{w \in N(v) \setminus \{v\}} \ell(v, w) + + where $N(v)$ is the subset of the neighbors of `v` that are either + predecessors or successors of `v` and $\ell(v, w)$ is the local + constraint on `v` with respect to `w` [1]_. For the definition of local + constraint, see :func:`local_constraint`. + + Parameters + ---------- + G : NetworkX graph + The graph containing ``v``. This can be either directed or undirected. + + nodes : container, optional + Container of nodes in the graph ``G`` to compute the constraint. If + None, the constraint of every node is computed. + + weight : None or string, optional + If None, all edge weights are considered equal. + Otherwise holds the name of the edge attribute used as weight. + + Returns + ------- + dict + Dictionary with nodes as keys and the constraint on the node as values. + + See also + -------- + local_constraint + + References + ---------- + .. [1] Burt, Ronald S. + "Structural holes and good ideas". + American Journal of Sociology (110): 349–399. + + """ + + # Check if scipy is available + try: + # Needed for errstate + import numpy as np + + # make sure nx.adjacency_matrix will not raise + import scipy as sp + + has_scipy = True + except: + has_scipy = False + + if nodes is None and has_scipy: + # In order to compute constraint of all nodes, + # algorithms based on sparse matrices can be much faster + + # Obtain the adjacency matrix + P = nx.adjacency_matrix(G, weight=weight) + + # Calculate mutual weights + mutual_weights = P + P.T + + # Normalize mutual weights by row sums + sum_mutual_weights = mutual_weights.sum(axis=1) + with np.errstate(divide="ignore"): + mutual_weights /= sum_mutual_weights[:, np.newaxis] + + # Calculate local constraints and constraints + local_constraints = (mutual_weights + mutual_weights @ mutual_weights) ** 2 + constraints = ((mutual_weights > 0) * local_constraints).sum(axis=1) + + # Special treatment: isolated nodes marked with "nan" + isolated_nodes = sum_mutual_weights - 2 * mutual_weights.diagonal() == 0 + constraints[isolated_nodes] = float("nan") + # Use tolist() to automatically convert numpy scalars -> Python scalars + return dict(zip(G, constraints.tolist())) + + # Result for only requested nodes + constraint = {} + if nodes is None: + nodes = G + for v in nodes: + # Constraint is not defined for isolated nodes + if len(G[v]) == 0: + constraint[v] = float("nan") + continue + constraint[v] = sum( + local_constraint(G, v, n, weight) for n in set(nx.all_neighbors(G, v)) + ) + return constraint + + +@nx._dispatchable(edge_attrs="weight") +def local_constraint(G, u, v, weight=None): + r"""Returns the local constraint on the node ``u`` with respect to + the node ``v`` in the graph ``G``. + + Formally, the *local constraint on u with respect to v*, denoted + $\ell(u, v)$, is defined by + + .. math:: + + \ell(u, v) = \left(p_{uv} + \sum_{w \in N(v)} p_{uw} p_{wv}\right)^2, + + where $N(v)$ is the set of neighbors of $v$ and $p_{uv}$ is the + normalized mutual weight of the (directed or undirected) edges + joining $u$ and $v$, for each vertex $u$ and $v$ [1]_. The *mutual + weight* of $u$ and $v$ is the sum of the weights of edges joining + them (edge weights are assumed to be one if the graph is + unweighted). + + Parameters + ---------- + G : NetworkX graph + The graph containing ``u`` and ``v``. This can be either + directed or undirected. + + u : node + A node in the graph ``G``. + + v : node + A node in the graph ``G``. + + weight : None or string, optional + If None, all edge weights are considered equal. + Otherwise holds the name of the edge attribute used as weight. + + Returns + ------- + float + The constraint of the node ``v`` in the graph ``G``. + + See also + -------- + constraint + + References + ---------- + .. [1] Burt, Ronald S. + "Structural holes and good ideas". + American Journal of Sociology (110): 349–399. + + """ + nmw = normalized_mutual_weight + direct = nmw(G, u, v, weight=weight) + indirect = sum( + nmw(G, u, w, weight=weight) * nmw(G, w, v, weight=weight) + for w in set(nx.all_neighbors(G, u)) + ) + return (direct + indirect) ** 2 diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/summarization.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/summarization.py new file mode 100644 index 0000000..23db8da --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/summarization.py @@ -0,0 +1,564 @@ +""" +Graph summarization finds smaller representations of graphs resulting in faster +runtime of algorithms, reduced storage needs, and noise reduction. +Summarization has applications in areas such as visualization, pattern mining, +clustering and community detection, and more. Core graph summarization +techniques are grouping/aggregation, bit-compression, +simplification/sparsification, and influence based. Graph summarization +algorithms often produce either summary graphs in the form of supergraphs or +sparsified graphs, or a list of independent structures. Supergraphs are the +most common product, which consist of supernodes and original nodes and are +connected by edges and superedges, which represent aggregate edges between +nodes and supernodes. + +Grouping/aggregation based techniques compress graphs by representing +close/connected nodes and edges in a graph by a single node/edge in a +supergraph. Nodes can be grouped together into supernodes based on their +structural similarities or proximity within a graph to reduce the total number +of nodes in a graph. Edge-grouping techniques group edges into lossy/lossless +nodes called compressor or virtual nodes to reduce the total number of edges in +a graph. Edge-grouping techniques can be lossless, meaning that they can be +used to re-create the original graph, or techniques can be lossy, requiring +less space to store the summary graph, but at the expense of lower +reconstruction accuracy of the original graph. + +Bit-compression techniques minimize the amount of information needed to +describe the original graph, while revealing structural patterns in the +original graph. The two-part minimum description length (MDL) is often used to +represent the model and the original graph in terms of the model. A key +difference between graph compression and graph summarization is that graph +summarization focuses on finding structural patterns within the original graph, +whereas graph compression focuses on compressions the original graph to be as +small as possible. **NOTE**: Some bit-compression methods exist solely to +compress a graph without creating a summary graph or finding comprehensible +structural patterns. + +Simplification/Sparsification techniques attempt to create a sparse +representation of a graph by removing unimportant nodes and edges from the +graph. Sparsified graphs differ from supergraphs created by +grouping/aggregation by only containing a subset of the original nodes and +edges of the original graph. + +Influence based techniques aim to find a high-level description of influence +propagation in a large graph. These methods are scarce and have been mostly +applied to social graphs. + +*dedensification* is a grouping/aggregation based technique to compress the +neighborhoods around high-degree nodes in unweighted graphs by adding +compressor nodes that summarize multiple edges of the same type to +high-degree nodes (nodes with a degree greater than a given threshold). +Dedensification was developed for the purpose of increasing performance of +query processing around high-degree nodes in graph databases and enables direct +operations on the compressed graph. The structural patterns surrounding +high-degree nodes in the original is preserved while using fewer edges and +adding a small number of compressor nodes. The degree of nodes present in the +original graph is also preserved. The current implementation of dedensification +supports graphs with one edge type. + +For more information on graph summarization, see `Graph Summarization Methods +and Applications: A Survey `_ +""" + +from collections import Counter, defaultdict + +import networkx as nx + +__all__ = ["dedensify", "snap_aggregation"] + + +@nx._dispatchable(mutates_input={"not copy": 3}, returns_graph=True) +def dedensify(G, threshold, prefix=None, copy=True): + """Compresses neighborhoods around high-degree nodes + + Reduces the number of edges to high-degree nodes by adding compressor nodes + that summarize multiple edges of the same type to high-degree nodes (nodes + with a degree greater than a given threshold). Dedensification also has + the added benefit of reducing the number of edges around high-degree nodes. + The implementation currently supports graphs with a single edge type. + + Parameters + ---------- + G: graph + A networkx graph + threshold: int + Minimum degree threshold of a node to be considered a high degree node. + The threshold must be greater than or equal to 2. + prefix: str or None, optional (default: None) + An optional prefix for denoting compressor nodes + copy: bool, optional (default: True) + Indicates if dedensification should be done inplace + + Returns + ------- + dedensified networkx graph : (graph, set) + 2-tuple of the dedensified graph and set of compressor nodes + + Notes + ----- + According to the algorithm in [1]_, removes edges in a graph by + compressing/decompressing the neighborhoods around high degree nodes by + adding compressor nodes that summarize multiple edges of the same type + to high-degree nodes. Dedensification will only add a compressor node when + doing so will reduce the total number of edges in the given graph. This + implementation currently supports graphs with a single edge type. + + Examples + -------- + Dedensification will only add compressor nodes when doing so would result + in fewer edges:: + + >>> original_graph = nx.DiGraph() + >>> original_graph.add_nodes_from( + ... ["1", "2", "3", "4", "5", "6", "A", "B", "C"] + ... ) + >>> original_graph.add_edges_from( + ... [ + ... ("1", "C"), ("1", "B"), + ... ("2", "C"), ("2", "B"), ("2", "A"), + ... ("3", "B"), ("3", "A"), ("3", "6"), + ... ("4", "C"), ("4", "B"), ("4", "A"), + ... ("5", "B"), ("5", "A"), + ... ("6", "5"), + ... ("A", "6") + ... ] + ... ) + >>> c_graph, c_nodes = nx.dedensify(original_graph, threshold=2) + >>> original_graph.number_of_edges() + 15 + >>> c_graph.number_of_edges() + 14 + + A dedensified, directed graph can be "densified" to reconstruct the + original graph:: + + >>> original_graph = nx.DiGraph() + >>> original_graph.add_nodes_from( + ... ["1", "2", "3", "4", "5", "6", "A", "B", "C"] + ... ) + >>> original_graph.add_edges_from( + ... [ + ... ("1", "C"), ("1", "B"), + ... ("2", "C"), ("2", "B"), ("2", "A"), + ... ("3", "B"), ("3", "A"), ("3", "6"), + ... ("4", "C"), ("4", "B"), ("4", "A"), + ... ("5", "B"), ("5", "A"), + ... ("6", "5"), + ... ("A", "6") + ... ] + ... ) + >>> c_graph, c_nodes = nx.dedensify(original_graph, threshold=2) + >>> # re-densifies the compressed graph into the original graph + >>> for c_node in c_nodes: + ... all_neighbors = set(nx.all_neighbors(c_graph, c_node)) + ... out_neighbors = set(c_graph.neighbors(c_node)) + ... for out_neighbor in out_neighbors: + ... c_graph.remove_edge(c_node, out_neighbor) + ... in_neighbors = all_neighbors - out_neighbors + ... for in_neighbor in in_neighbors: + ... c_graph.remove_edge(in_neighbor, c_node) + ... for out_neighbor in out_neighbors: + ... c_graph.add_edge(in_neighbor, out_neighbor) + ... c_graph.remove_node(c_node) + ... + >>> nx.is_isomorphic(original_graph, c_graph) + True + + References + ---------- + .. [1] Maccioni, A., & Abadi, D. J. (2016, August). + Scalable pattern matching over compressed graphs via dedensification. + In Proceedings of the 22nd ACM SIGKDD International Conference on + Knowledge Discovery and Data Mining (pp. 1755-1764). + http://www.cs.umd.edu/~abadi/papers/graph-dedense.pdf + """ + if threshold < 2: + raise nx.NetworkXError("The degree threshold must be >= 2") + + degrees = G.in_degree if G.is_directed() else G.degree + # Group nodes based on degree threshold + high_degree_nodes = {n for n, d in degrees if d > threshold} + low_degree_nodes = G.nodes() - high_degree_nodes + + auxiliary = {} + for node in G: + high_degree_nbrs = frozenset(high_degree_nodes & set(G[node])) + if high_degree_nbrs: + if high_degree_nbrs in auxiliary: + auxiliary[high_degree_nbrs].add(node) + else: + auxiliary[high_degree_nbrs] = {node} + + if copy: + G = G.copy() + + compressor_nodes = set() + for index, (high_degree_nodes, low_degree_nodes) in enumerate(auxiliary.items()): + low_degree_node_count = len(low_degree_nodes) + high_degree_node_count = len(high_degree_nodes) + old_edges = high_degree_node_count * low_degree_node_count + new_edges = high_degree_node_count + low_degree_node_count + if old_edges <= new_edges: + continue + compression_node = "".join(str(node) for node in high_degree_nodes) + if prefix: + compression_node = str(prefix) + compression_node + for node in low_degree_nodes: + for high_node in high_degree_nodes: + if G.has_edge(node, high_node): + G.remove_edge(node, high_node) + + G.add_edge(node, compression_node) + for node in high_degree_nodes: + G.add_edge(compression_node, node) + compressor_nodes.add(compression_node) + return G, compressor_nodes + + +def _snap_build_graph( + G, + groups, + node_attributes, + edge_attributes, + neighbor_info, + edge_types, + prefix, + supernode_attribute, + superedge_attribute, +): + """ + Build the summary graph from the data structures produced in the SNAP aggregation algorithm + + Used in the SNAP aggregation algorithm to build the output summary graph and supernode + lookup dictionary. This process uses the original graph and the data structures to + create the supernodes with the correct node attributes, and the superedges with the correct + edge attributes + + Parameters + ---------- + G: networkx.Graph + the original graph to be summarized + groups: dict + A dictionary of unique group IDs and their corresponding node groups + node_attributes: iterable + An iterable of the node attributes considered in the summarization process + edge_attributes: iterable + An iterable of the edge attributes considered in the summarization process + neighbor_info: dict + A data structure indicating the number of edges a node has with the + groups in the current summarization of each edge type + edge_types: dict + dictionary of edges in the graph and their corresponding attributes recognized + in the summarization + prefix: string + The prefix to be added to all supernodes + supernode_attribute: str + The node attribute for recording the supernode groupings of nodes + superedge_attribute: str + The edge attribute for recording the edge types represented by superedges + + Returns + ------- + summary graph: Networkx graph + """ + output = G.__class__() + node_label_lookup = {} + for index, group_id in enumerate(groups): + group_set = groups[group_id] + supernode = f"{prefix}{index}" + node_label_lookup[group_id] = supernode + supernode_attributes = { + attr: G.nodes[next(iter(group_set))][attr] for attr in node_attributes + } + supernode_attributes[supernode_attribute] = group_set + output.add_node(supernode, **supernode_attributes) + + for group_id in groups: + group_set = groups[group_id] + source_supernode = node_label_lookup[group_id] + for other_group, group_edge_types in neighbor_info[ + next(iter(group_set)) + ].items(): + if group_edge_types: + target_supernode = node_label_lookup[other_group] + summary_graph_edge = (source_supernode, target_supernode) + + edge_types = [ + dict(zip(edge_attributes, edge_type)) + for edge_type in group_edge_types + ] + + has_edge = output.has_edge(*summary_graph_edge) + if output.is_multigraph(): + if not has_edge: + for edge_type in edge_types: + output.add_edge(*summary_graph_edge, **edge_type) + elif not output.is_directed(): + existing_edge_data = output.get_edge_data(*summary_graph_edge) + for edge_type in edge_types: + if edge_type not in existing_edge_data.values(): + output.add_edge(*summary_graph_edge, **edge_type) + else: + superedge_attributes = {superedge_attribute: edge_types} + output.add_edge(*summary_graph_edge, **superedge_attributes) + + return output + + +def _snap_eligible_group(G, groups, group_lookup, edge_types): + """ + Determines if a group is eligible to be split. + + A group is eligible to be split if all nodes in the group have edges of the same type(s) + with the same other groups. + + Parameters + ---------- + G: graph + graph to be summarized + groups: dict + A dictionary of unique group IDs and their corresponding node groups + group_lookup: dict + dictionary of nodes and their current corresponding group ID + edge_types: dict + dictionary of edges in the graph and their corresponding attributes recognized + in the summarization + + Returns + ------- + tuple: group ID to split, and neighbor-groups participation_counts data structure + """ + nbr_info = {node: {gid: Counter() for gid in groups} for node in group_lookup} + for group_id in groups: + current_group = groups[group_id] + + # build nbr_info for nodes in group + for node in current_group: + nbr_info[node] = {group_id: Counter() for group_id in groups} + edges = G.edges(node, keys=True) if G.is_multigraph() else G.edges(node) + for edge in edges: + neighbor = edge[1] + edge_type = edge_types[edge] + neighbor_group_id = group_lookup[neighbor] + nbr_info[node][neighbor_group_id][edge_type] += 1 + + # check if group_id is eligible to be split + group_size = len(current_group) + for other_group_id in groups: + edge_counts = Counter() + for node in current_group: + edge_counts.update(nbr_info[node][other_group_id].keys()) + + if not all(count == group_size for count in edge_counts.values()): + # only the nbr_info of the returned group_id is required for handling group splits + return group_id, nbr_info + + # if no eligible groups, complete nbr_info is calculated + return None, nbr_info + + +def _snap_split(groups, neighbor_info, group_lookup, group_id): + """ + Splits a group based on edge types and updates the groups accordingly + + Splits the group with the given group_id based on the edge types + of the nodes so that each new grouping will all have the same + edges with other nodes. + + Parameters + ---------- + groups: dict + A dictionary of unique group IDs and their corresponding node groups + neighbor_info: dict + A data structure indicating the number of edges a node has with the + groups in the current summarization of each edge type + edge_types: dict + dictionary of edges in the graph and their corresponding attributes recognized + in the summarization + group_lookup: dict + dictionary of nodes and their current corresponding group ID + group_id: object + ID of group to be split + + Returns + ------- + dict + The updated groups based on the split + """ + new_group_mappings = defaultdict(set) + for node in groups[group_id]: + signature = tuple( + frozenset(edge_types) for edge_types in neighbor_info[node].values() + ) + new_group_mappings[signature].add(node) + + # leave the biggest new_group as the original group + new_groups = sorted(new_group_mappings.values(), key=len) + for new_group in new_groups[:-1]: + # Assign unused integer as the new_group_id + # ids are tuples, so will not interact with the original group_ids + new_group_id = len(groups) + groups[new_group_id] = new_group + groups[group_id] -= new_group + for node in new_group: + group_lookup[node] = new_group_id + + return groups + + +@nx._dispatchable( + node_attrs="[node_attributes]", edge_attrs="[edge_attributes]", returns_graph=True +) +def snap_aggregation( + G, + node_attributes, + edge_attributes=(), + prefix="Supernode-", + supernode_attribute="group", + superedge_attribute="types", +): + """Creates a summary graph based on attributes and connectivity. + + This function uses the Summarization by Grouping Nodes on Attributes + and Pairwise edges (SNAP) algorithm for summarizing a given + graph by grouping nodes by node attributes and their edge attributes + into supernodes in a summary graph. This name SNAP should not be + confused with the Stanford Network Analysis Project (SNAP). + + Here is a high-level view of how this algorithm works: + + 1) Group nodes by node attribute values. + + 2) Iteratively split groups until all nodes in each group have edges + to nodes in the same groups. That is, until all the groups are homogeneous + in their member nodes' edges to other groups. For example, + if all the nodes in group A only have edge to nodes in group B, then the + group is homogeneous and does not need to be split. If all nodes in group B + have edges with nodes in groups {A, C}, but some also have edges with other + nodes in B, then group B is not homogeneous and needs to be split into + groups have edges with {A, C} and a group of nodes having + edges with {A, B, C}. This way, viewers of the summary graph can + assume that all nodes in the group have the exact same node attributes and + the exact same edges. + + 3) Build the output summary graph, where the groups are represented by + super-nodes. Edges represent the edges shared between all the nodes in each + respective groups. + + A SNAP summary graph can be used to visualize graphs that are too large to display + or visually analyze, or to efficiently identify sets of similar nodes with similar connectivity + patterns to other sets of similar nodes based on specified node and/or edge attributes in a graph. + + Parameters + ---------- + G: graph + Networkx Graph to be summarized + node_attributes: iterable, required + An iterable of the node attributes used to group nodes in the summarization process. Nodes + with the same values for these attributes will be grouped together in the summary graph. + edge_attributes: iterable, optional + An iterable of the edge attributes considered in the summarization process. If provided, unique + combinations of the attribute values found in the graph are used to + determine the edge types in the graph. If not provided, all edges + are considered to be of the same type. + prefix: str + The prefix used to denote supernodes in the summary graph. Defaults to 'Supernode-'. + supernode_attribute: str + The node attribute for recording the supernode groupings of nodes. Defaults to 'group'. + superedge_attribute: str + The edge attribute for recording the edge types of multiple edges. Defaults to 'types'. + + Returns + ------- + networkx.Graph: summary graph + + Examples + -------- + SNAP aggregation takes a graph and summarizes it in the context of user-provided + node and edge attributes such that a viewer can more easily extract and + analyze the information represented by the graph + + >>> nodes = { + ... "A": dict(color="Red"), + ... "B": dict(color="Red"), + ... "C": dict(color="Red"), + ... "D": dict(color="Red"), + ... "E": dict(color="Blue"), + ... "F": dict(color="Blue"), + ... } + >>> edges = [ + ... ("A", "E", "Strong"), + ... ("B", "F", "Strong"), + ... ("C", "E", "Weak"), + ... ("D", "F", "Weak"), + ... ] + >>> G = nx.Graph() + >>> for node in nodes: + ... attributes = nodes[node] + ... G.add_node(node, **attributes) + >>> for source, target, type in edges: + ... G.add_edge(source, target, type=type) + >>> node_attributes = ("color",) + >>> edge_attributes = ("type",) + >>> summary_graph = nx.snap_aggregation( + ... G, node_attributes=node_attributes, edge_attributes=edge_attributes + ... ) + + Notes + ----- + The summary graph produced is called a maximum Attribute-edge + compatible (AR-compatible) grouping. According to [1]_, an + AR-compatible grouping means that all nodes in each group have the same + exact node attribute values and the same exact edges and + edge types to one or more nodes in the same groups. The maximal + AR-compatible grouping is the grouping with the minimal cardinality. + + The AR-compatible grouping is the most detailed grouping provided by + any of the SNAP algorithms. + + References + ---------- + .. [1] Y. Tian, R. A. Hankins, and J. M. Patel. Efficient aggregation + for graph summarization. In Proc. 2008 ACM-SIGMOD Int. Conf. + Management of Data (SIGMOD’08), pages 567–580, Vancouver, Canada, + June 2008. + """ + edge_types = { + edge: tuple(attrs.get(attr) for attr in edge_attributes) + for edge, attrs in G.edges.items() + } + if not G.is_directed(): + if G.is_multigraph(): + # list is needed to avoid mutating while iterating + edges = [((v, u, k), etype) for (u, v, k), etype in edge_types.items()] + else: + # list is needed to avoid mutating while iterating + edges = [((v, u), etype) for (u, v), etype in edge_types.items()] + edge_types.update(edges) + + group_lookup = { + node: tuple(attrs[attr] for attr in node_attributes) + for node, attrs in G.nodes.items() + } + groups = defaultdict(set) + for node, node_type in group_lookup.items(): + groups[node_type].add(node) + + eligible_group_id, nbr_info = _snap_eligible_group( + G, groups, group_lookup, edge_types + ) + while eligible_group_id: + groups = _snap_split(groups, nbr_info, group_lookup, eligible_group_id) + eligible_group_id, nbr_info = _snap_eligible_group( + G, groups, group_lookup, edge_types + ) + return _snap_build_graph( + G, + groups, + node_attributes, + edge_attributes, + nbr_info, + edge_types, + prefix, + supernode_attribute, + superedge_attribute, + ) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/swap.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/swap.py new file mode 100644 index 0000000..cb3cc1c --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/swap.py @@ -0,0 +1,406 @@ +"""Swap edges in a graph.""" + +import math + +import networkx as nx +from networkx.utils import py_random_state + +__all__ = ["double_edge_swap", "connected_double_edge_swap", "directed_edge_swap"] + + +@nx.utils.not_implemented_for("undirected") +@py_random_state(3) +@nx._dispatchable(mutates_input=True, returns_graph=True) +def directed_edge_swap(G, *, nswap=1, max_tries=100, seed=None): + """Swap three edges in a directed graph while keeping the node degrees fixed. + + A directed edge swap swaps three edges such that a -> b -> c -> d becomes + a -> c -> b -> d. This pattern of swapping allows all possible states with the + same in- and out-degree distribution in a directed graph to be reached. + + If the swap would create parallel edges (e.g. if a -> c already existed in the + previous example), another attempt is made to find a suitable trio of edges. + + Parameters + ---------- + G : DiGraph + A directed graph + + nswap : integer (optional, default=1) + Number of three-edge (directed) swaps to perform + + max_tries : integer (optional, default=100) + Maximum number of attempts to swap edges + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + G : DiGraph + The graph after the edges are swapped. + + Raises + ------ + NetworkXError + If `G` is not directed, or + If nswap > max_tries, or + If there are fewer than 4 nodes or 3 edges in `G`. + NetworkXAlgorithmError + If the number of swap attempts exceeds `max_tries` before `nswap` swaps are made + + Notes + ----- + Does not enforce any connectivity constraints. + + The graph G is modified in place. + + A later swap is allowed to undo a previous swap. + + References + ---------- + .. [1] Erdős, Péter L., et al. “A Simple Havel-Hakimi Type Algorithm to Realize + Graphical Degree Sequences of Directed Graphs.” ArXiv:0905.4913 [Math], + Jan. 2010. https://doi.org/10.48550/arXiv.0905.4913. + Published 2010 in Elec. J. Combinatorics (17(1)). R66. + http://www.combinatorics.org/Volume_17/PDF/v17i1r66.pdf + .. [2] “Combinatorics - Reaching All Possible Simple Directed Graphs with a given + Degree Sequence with 2-Edge Swaps.” Mathematics Stack Exchange, + https://math.stackexchange.com/questions/22272/. Accessed 30 May 2022. + """ + if nswap > max_tries: + raise nx.NetworkXError("Number of swaps > number of tries allowed.") + if len(G) < 4: + raise nx.NetworkXError("DiGraph has fewer than four nodes.") + if len(G.edges) < 3: + raise nx.NetworkXError("DiGraph has fewer than 3 edges") + + # Instead of choosing uniformly at random from a generated edge list, + # this algorithm chooses nonuniformly from the set of nodes with + # probability weighted by degree. + tries = 0 + swapcount = 0 + keys, degrees = zip(*G.degree()) # keys, degree + cdf = nx.utils.cumulative_distribution(degrees) # cdf of degree + discrete_sequence = nx.utils.discrete_sequence + + while swapcount < nswap: + # choose source node index from discrete distribution + start_index = discrete_sequence(1, cdistribution=cdf, seed=seed)[0] + start = keys[start_index] + tries += 1 + + if tries > max_tries: + msg = f"Maximum number of swap attempts ({tries}) exceeded before desired swaps achieved ({nswap})." + raise nx.NetworkXAlgorithmError(msg) + + # If the given node doesn't have any out edges, then there isn't anything to swap + if G.out_degree(start) == 0: + continue + second = seed.choice(list(G.succ[start])) + if start == second: + continue + + if G.out_degree(second) == 0: + continue + third = seed.choice(list(G.succ[second])) + if second == third: + continue + + if G.out_degree(third) == 0: + continue + fourth = seed.choice(list(G.succ[third])) + if third == fourth: + continue + + if ( + third not in G.succ[start] + and fourth not in G.succ[second] + and second not in G.succ[third] + ): + # Swap nodes + G.add_edge(start, third) + G.add_edge(third, second) + G.add_edge(second, fourth) + G.remove_edge(start, second) + G.remove_edge(second, third) + G.remove_edge(third, fourth) + swapcount += 1 + + return G + + +@py_random_state(3) +@nx._dispatchable(mutates_input=True, returns_graph=True) +def double_edge_swap(G, nswap=1, max_tries=100, seed=None): + """Swap two edges in the graph while keeping the node degrees fixed. + + A double-edge swap removes two randomly chosen edges u-v and x-y + and creates the new edges u-x and v-y:: + + u--v u v + becomes | | + x--y x y + + If either the edge u-x or v-y already exist no swap is performed + and another attempt is made to find a suitable edge pair. + + Parameters + ---------- + G : graph + An undirected graph + + nswap : integer (optional, default=1) + Number of double-edge swaps to perform + + max_tries : integer (optional) + Maximum number of attempts to swap edges + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + G : graph + The graph after double edge swaps. + + Raises + ------ + NetworkXError + If `G` is directed, or + If `nswap` > `max_tries`, or + If there are fewer than 4 nodes or 2 edges in `G`. + NetworkXAlgorithmError + If the number of swap attempts exceeds `max_tries` before `nswap` swaps are made + + Notes + ----- + Does not enforce any connectivity constraints. + + The graph G is modified in place. + """ + if G.is_directed(): + raise nx.NetworkXError( + "double_edge_swap() not defined for directed graphs. Use directed_edge_swap instead." + ) + if nswap > max_tries: + raise nx.NetworkXError("Number of swaps > number of tries allowed.") + if len(G) < 4: + raise nx.NetworkXError("Graph has fewer than four nodes.") + if len(G.edges) < 2: + raise nx.NetworkXError("Graph has fewer than 2 edges") + # Instead of choosing uniformly at random from a generated edge list, + # this algorithm chooses nonuniformly from the set of nodes with + # probability weighted by degree. + n = 0 + swapcount = 0 + keys, degrees = zip(*G.degree()) # keys, degree + cdf = nx.utils.cumulative_distribution(degrees) # cdf of degree + discrete_sequence = nx.utils.discrete_sequence + while swapcount < nswap: + # if random.random() < 0.5: continue # trick to avoid periodicities? + # pick two random edges without creating edge list + # choose source node indices from discrete distribution + (ui, xi) = discrete_sequence(2, cdistribution=cdf, seed=seed) + if ui == xi: + continue # same source, skip + u = keys[ui] # convert index to label + x = keys[xi] + # choose target uniformly from neighbors + v = seed.choice(list(G[u])) + y = seed.choice(list(G[x])) + if v == y: + continue # same target, skip + if (x not in G[u]) and (y not in G[v]): # don't create parallel edges + G.add_edge(u, x) + G.add_edge(v, y) + G.remove_edge(u, v) + G.remove_edge(x, y) + swapcount += 1 + if n >= max_tries: + e = ( + f"Maximum number of swap attempts ({n}) exceeded " + f"before desired swaps achieved ({nswap})." + ) + raise nx.NetworkXAlgorithmError(e) + n += 1 + return G + + +@py_random_state(3) +@nx._dispatchable(mutates_input=True) +def connected_double_edge_swap(G, nswap=1, _window_threshold=3, seed=None): + """Attempts the specified number of double-edge swaps in the graph `G`. + + A double-edge swap removes two randomly chosen edges `(u, v)` and `(x, + y)` and creates the new edges `(u, x)` and `(v, y)`:: + + u--v u v + becomes | | + x--y x y + + If either `(u, x)` or `(v, y)` already exist, then no swap is performed + so the actual number of swapped edges is always *at most* `nswap`. + + Parameters + ---------- + G : graph + An undirected graph + + nswap : integer (optional, default=1) + Number of double-edge swaps to perform + + _window_threshold : integer + + The window size below which connectedness of the graph will be checked + after each swap. + + The "window" in this function is a dynamically updated integer that + represents the number of swap attempts to make before checking if the + graph remains connected. It is an optimization used to decrease the + running time of the algorithm in exchange for increased complexity of + implementation. + + If the window size is below this threshold, then the algorithm checks + after each swap if the graph remains connected by checking if there is a + path joining the two nodes whose edge was just removed. If the window + size is above this threshold, then the algorithm performs do all the + swaps in the window and only then check if the graph is still connected. + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + int + The number of successful swaps + + Raises + ------ + + NetworkXError + + If the input graph is not connected, or if the graph has fewer than four + nodes. + + Notes + ----- + + The initial graph `G` must be connected, and the resulting graph is + connected. The graph `G` is modified in place. + + References + ---------- + .. [1] C. Gkantsidis and M. Mihail and E. Zegura, + The Markov chain simulation method for generating connected + power law random graphs, 2003. + http://citeseer.ist.psu.edu/gkantsidis03markov.html + """ + if not nx.is_connected(G): + raise nx.NetworkXError("Graph not connected") + if len(G) < 4: + raise nx.NetworkXError("Graph has fewer than four nodes.") + n = 0 + swapcount = 0 + deg = G.degree() + # Label key for nodes + dk = [n for n, d in G.degree()] + cdf = nx.utils.cumulative_distribution([d for n, d in G.degree()]) + discrete_sequence = nx.utils.discrete_sequence + window = 1 + while n < nswap: + wcount = 0 + swapped = [] + # If the window is small, we just check each time whether the graph is + # connected by checking if the nodes that were just separated are still + # connected. + if window < _window_threshold: + # This Boolean keeps track of whether there was a failure or not. + fail = False + while wcount < window and n < nswap: + # Pick two random edges without creating the edge list. Choose + # source nodes from the discrete degree distribution. + (ui, xi) = discrete_sequence(2, cdistribution=cdf, seed=seed) + # If the source nodes are the same, skip this pair. + if ui == xi: + continue + # Convert an index to a node label. + u = dk[ui] + x = dk[xi] + # Choose targets uniformly from neighbors. + v = seed.choice(list(G.neighbors(u))) + y = seed.choice(list(G.neighbors(x))) + # If the target nodes are the same, skip this pair. + if v == y: + continue + if x not in G[u] and y not in G[v]: + G.remove_edge(u, v) + G.remove_edge(x, y) + G.add_edge(u, x) + G.add_edge(v, y) + swapped.append((u, v, x, y)) + swapcount += 1 + n += 1 + # If G remains connected... + if nx.has_path(G, u, v): + wcount += 1 + # Otherwise, undo the changes. + else: + G.add_edge(u, v) + G.add_edge(x, y) + G.remove_edge(u, x) + G.remove_edge(v, y) + swapcount -= 1 + fail = True + # If one of the swaps failed, reduce the window size. + if fail: + window = math.ceil(window / 2) + else: + window += 1 + # If the window is large, then there is a good chance that a bunch of + # swaps will work. It's quicker to do all those swaps first and then + # check if the graph remains connected. + else: + while wcount < window and n < nswap: + # Pick two random edges without creating the edge list. Choose + # source nodes from the discrete degree distribution. + (ui, xi) = discrete_sequence(2, cdistribution=cdf, seed=seed) + # If the source nodes are the same, skip this pair. + if ui == xi: + continue + # Convert an index to a node label. + u = dk[ui] + x = dk[xi] + # Choose targets uniformly from neighbors. + v = seed.choice(list(G.neighbors(u))) + y = seed.choice(list(G.neighbors(x))) + # If the target nodes are the same, skip this pair. + if v == y: + continue + if x not in G[u] and y not in G[v]: + G.remove_edge(u, v) + G.remove_edge(x, y) + G.add_edge(u, x) + G.add_edge(v, y) + swapped.append((u, v, x, y)) + swapcount += 1 + n += 1 + wcount += 1 + # If the graph remains connected, increase the window size. + if nx.is_connected(G): + window += 1 + # Otherwise, undo the changes from the previous window and decrease + # the window size. + else: + while swapped: + (u, v, x, y) = swapped.pop() + G.add_edge(u, v) + G.add_edge(x, y) + G.remove_edge(u, x) + G.remove_edge(v, y) + swapcount -= 1 + window = math.ceil(window / 2) + return swapcount diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__init__.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..25ea490 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_asteroidal.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_asteroidal.cpython-312.pyc new file mode 100644 index 0000000..e3df714 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_asteroidal.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_boundary.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_boundary.cpython-312.pyc new file mode 100644 index 0000000..c16a133 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_boundary.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_bridges.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_bridges.cpython-312.pyc new file mode 100644 index 0000000..5c2625c Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_bridges.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_broadcasting.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_broadcasting.cpython-312.pyc new file mode 100644 index 0000000..2336ed1 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_broadcasting.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_chains.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_chains.cpython-312.pyc new file mode 100644 index 0000000..b52e032 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_chains.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_chordal.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_chordal.cpython-312.pyc new file mode 100644 index 0000000..48dc525 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_chordal.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_clique.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_clique.cpython-312.pyc new file mode 100644 index 0000000..6f3f59e Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_clique.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_cluster.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_cluster.cpython-312.pyc new file mode 100644 index 0000000..cbc50ad Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_cluster.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_communicability.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_communicability.cpython-312.pyc new file mode 100644 index 0000000..df2b831 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_communicability.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_core.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_core.cpython-312.pyc new file mode 100644 index 0000000..d4a19af Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_core.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_covering.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_covering.cpython-312.pyc new file mode 100644 index 0000000..f28bda5 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_covering.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_cuts.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_cuts.cpython-312.pyc new file mode 100644 index 0000000..11a3b05 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_cuts.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_cycles.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_cycles.cpython-312.pyc new file mode 100644 index 0000000..a824759 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_cycles.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_d_separation.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_d_separation.cpython-312.pyc new file mode 100644 index 0000000..5ccece3 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_d_separation.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_dag.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_dag.cpython-312.pyc new file mode 100644 index 0000000..50a61b9 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_dag.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_distance_measures.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_distance_measures.cpython-312.pyc new file mode 100644 index 0000000..1998a7d Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_distance_measures.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_distance_regular.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_distance_regular.cpython-312.pyc new file mode 100644 index 0000000..8e2cae4 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_distance_regular.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_dominance.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_dominance.cpython-312.pyc new file mode 100644 index 0000000..5078529 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_dominance.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_dominating.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_dominating.cpython-312.pyc new file mode 100644 index 0000000..ee9904b Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_dominating.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_efficiency.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_efficiency.cpython-312.pyc new file mode 100644 index 0000000..0267338 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_efficiency.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_euler.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_euler.cpython-312.pyc new file mode 100644 index 0000000..8dfca69 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_euler.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_graph_hashing.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_graph_hashing.cpython-312.pyc new file mode 100644 index 0000000..27d6791 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_graph_hashing.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_graphical.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_graphical.cpython-312.pyc new file mode 100644 index 0000000..82e90b1 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_graphical.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_hierarchy.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_hierarchy.cpython-312.pyc new file mode 100644 index 0000000..2ef0b98 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_hierarchy.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_hybrid.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_hybrid.cpython-312.pyc new file mode 100644 index 0000000..34b0609 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_hybrid.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_isolate.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_isolate.cpython-312.pyc new file mode 100644 index 0000000..f6dbe70 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_isolate.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_link_prediction.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_link_prediction.cpython-312.pyc new file mode 100644 index 0000000..036ca25 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_link_prediction.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_lowest_common_ancestors.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_lowest_common_ancestors.cpython-312.pyc new file mode 100644 index 0000000..b53f0f0 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_lowest_common_ancestors.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_matching.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_matching.cpython-312.pyc new file mode 100644 index 0000000..ed289e2 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_matching.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_max_weight_clique.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_max_weight_clique.cpython-312.pyc new file mode 100644 index 0000000..0160b87 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_max_weight_clique.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_mis.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_mis.cpython-312.pyc new file mode 100644 index 0000000..3fd4a5d Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_mis.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_moral.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_moral.cpython-312.pyc new file mode 100644 index 0000000..ead4274 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_moral.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_node_classification.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_node_classification.cpython-312.pyc new file mode 100644 index 0000000..5cd51d1 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_node_classification.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_non_randomness.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_non_randomness.cpython-312.pyc new file mode 100644 index 0000000..33b4a24 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_non_randomness.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_planar_drawing.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_planar_drawing.cpython-312.pyc new file mode 100644 index 0000000..915a7cc Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_planar_drawing.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_planarity.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_planarity.cpython-312.pyc new file mode 100644 index 0000000..23ccff9 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_planarity.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_polynomials.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_polynomials.cpython-312.pyc new file mode 100644 index 0000000..c02bb79 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_polynomials.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_reciprocity.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_reciprocity.cpython-312.pyc new file mode 100644 index 0000000..a823642 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_reciprocity.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_regular.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_regular.cpython-312.pyc new file mode 100644 index 0000000..c074ee0 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_regular.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_richclub.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_richclub.cpython-312.pyc new file mode 100644 index 0000000..43abf94 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_richclub.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_similarity.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_similarity.cpython-312.pyc new file mode 100644 index 0000000..91db20d Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_similarity.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_simple_paths.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_simple_paths.cpython-312.pyc new file mode 100644 index 0000000..854c485 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_simple_paths.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_smallworld.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_smallworld.cpython-312.pyc new file mode 100644 index 0000000..399fde5 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_smallworld.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_smetric.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_smetric.cpython-312.pyc new file mode 100644 index 0000000..d492824 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_smetric.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_sparsifiers.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_sparsifiers.cpython-312.pyc new file mode 100644 index 0000000..a0a990e Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_sparsifiers.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_structuralholes.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_structuralholes.cpython-312.pyc new file mode 100644 index 0000000..efa1cdd Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_structuralholes.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_summarization.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_summarization.cpython-312.pyc new file mode 100644 index 0000000..f13d9f9 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_summarization.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_swap.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_swap.cpython-312.pyc new file mode 100644 index 0000000..4df47a2 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_swap.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_threshold.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_threshold.cpython-312.pyc new file mode 100644 index 0000000..b8af104 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_threshold.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_time_dependent.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_time_dependent.cpython-312.pyc new file mode 100644 index 0000000..ef87e96 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_time_dependent.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_tournament.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_tournament.cpython-312.pyc new file mode 100644 index 0000000..ee21ad6 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_tournament.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_triads.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_triads.cpython-312.pyc new file mode 100644 index 0000000..bec45fb Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_triads.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_vitality.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_vitality.cpython-312.pyc new file mode 100644 index 0000000..15177ac Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_vitality.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_voronoi.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_voronoi.cpython-312.pyc new file mode 100644 index 0000000..c814fbb Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_voronoi.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_walks.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_walks.cpython-312.pyc new file mode 100644 index 0000000..ee471b6 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_walks.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_wiener.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_wiener.cpython-312.pyc new file mode 100644 index 0000000..47042af Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/__pycache__/test_wiener.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_asteroidal.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_asteroidal.py new file mode 100644 index 0000000..67131b2 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_asteroidal.py @@ -0,0 +1,23 @@ +import networkx as nx + + +def test_is_at_free(): + is_at_free = nx.asteroidal.is_at_free + + cycle = nx.cycle_graph(6) + assert not is_at_free(cycle) + + path = nx.path_graph(6) + assert is_at_free(path) + + small_graph = nx.complete_graph(2) + assert is_at_free(small_graph) + + petersen = nx.petersen_graph() + assert not is_at_free(petersen) + + clique = nx.complete_graph(6) + assert is_at_free(clique) + + line_clique = nx.line_graph(clique) + assert not is_at_free(line_clique) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_boundary.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_boundary.py new file mode 100644 index 0000000..856be46 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_boundary.py @@ -0,0 +1,154 @@ +"""Unit tests for the :mod:`networkx.algorithms.boundary` module.""" + +from itertools import combinations + +import pytest + +import networkx as nx +from networkx import convert_node_labels_to_integers as cnlti +from networkx.utils import edges_equal + + +class TestNodeBoundary: + """Unit tests for the :func:`~networkx.node_boundary` function.""" + + def test_null_graph(self): + """Tests that the null graph has empty node boundaries.""" + null = nx.null_graph() + assert nx.node_boundary(null, []) == set() + assert nx.node_boundary(null, [], []) == set() + assert nx.node_boundary(null, [1, 2, 3]) == set() + assert nx.node_boundary(null, [1, 2, 3], [4, 5, 6]) == set() + assert nx.node_boundary(null, [1, 2, 3], [3, 4, 5]) == set() + + def test_path_graph(self): + P10 = cnlti(nx.path_graph(10), first_label=1) + assert nx.node_boundary(P10, []) == set() + assert nx.node_boundary(P10, [], []) == set() + assert nx.node_boundary(P10, [1, 2, 3]) == {4} + assert nx.node_boundary(P10, [4, 5, 6]) == {3, 7} + assert nx.node_boundary(P10, [3, 4, 5, 6, 7]) == {2, 8} + assert nx.node_boundary(P10, [8, 9, 10]) == {7} + assert nx.node_boundary(P10, [4, 5, 6], [9, 10]) == set() + + def test_complete_graph(self): + K10 = cnlti(nx.complete_graph(10), first_label=1) + assert nx.node_boundary(K10, []) == set() + assert nx.node_boundary(K10, [], []) == set() + assert nx.node_boundary(K10, [1, 2, 3]) == {4, 5, 6, 7, 8, 9, 10} + assert nx.node_boundary(K10, [4, 5, 6]) == {1, 2, 3, 7, 8, 9, 10} + assert nx.node_boundary(K10, [3, 4, 5, 6, 7]) == {1, 2, 8, 9, 10} + assert nx.node_boundary(K10, [4, 5, 6], []) == set() + assert nx.node_boundary(K10, K10) == set() + assert nx.node_boundary(K10, [1, 2, 3], [3, 4, 5]) == {4, 5} + + def test_petersen(self): + """Check boundaries in the petersen graph + + cheeger(G,k)=min(|bdy(S)|/|S| for |S|=k, 0>> list(cycles("abc")) + [('a', 'b', 'c'), ('b', 'c', 'a'), ('c', 'a', 'b')] + + """ + n = len(seq) + cycled_seq = cycle(seq) + for x in seq: + yield tuple(islice(cycled_seq, n)) + next(cycled_seq) + + +def cyclic_equals(seq1, seq2): + """Decide whether two sequences are equal up to cyclic permutations. + + For example:: + + >>> cyclic_equals("xyz", "zxy") + True + >>> cyclic_equals("xyz", "zyx") + False + + """ + # Cast seq2 to a tuple since `cycles()` yields tuples. + seq2 = tuple(seq2) + return any(x == tuple(seq2) for x in cycles(seq1)) + + +class TestChainDecomposition: + """Unit tests for the chain decomposition function.""" + + def assertContainsChain(self, chain, expected): + # A cycle could be expressed in two different orientations, one + # forward and one backward, so we need to check for cyclic + # equality in both orientations. + reversed_chain = list(reversed([tuple(reversed(e)) for e in chain])) + for candidate in expected: + if cyclic_equals(chain, candidate): + break + if cyclic_equals(reversed_chain, candidate): + break + else: + self.fail("chain not found") + + def test_decomposition(self): + edges = [ + # DFS tree edges. + (1, 2), + (2, 3), + (3, 4), + (3, 5), + (5, 6), + (6, 7), + (7, 8), + (5, 9), + (9, 10), + # Nontree edges. + (1, 3), + (1, 4), + (2, 5), + (5, 10), + (6, 8), + ] + G = nx.Graph(edges) + expected = [ + [(1, 3), (3, 2), (2, 1)], + [(1, 4), (4, 3)], + [(2, 5), (5, 3)], + [(5, 10), (10, 9), (9, 5)], + [(6, 8), (8, 7), (7, 6)], + ] + chains = list(nx.chain_decomposition(G, root=1)) + assert len(chains) == len(expected) + + def test_barbell_graph(self): + # The (3, 0) barbell graph has two triangles joined by a single edge. + G = nx.barbell_graph(3, 0) + chains = list(nx.chain_decomposition(G, root=0)) + expected = [[(0, 1), (1, 2), (2, 0)], [(3, 4), (4, 5), (5, 3)]] + assert len(chains) == len(expected) + for chain in chains: + self.assertContainsChain(chain, expected) + + def test_disconnected_graph(self): + """Test for a graph with multiple connected components.""" + G = nx.barbell_graph(3, 0) + H = nx.barbell_graph(3, 0) + mapping = dict(zip(range(6), "abcdef")) + nx.relabel_nodes(H, mapping, copy=False) + G = nx.union(G, H) + chains = list(nx.chain_decomposition(G)) + expected = [ + [(0, 1), (1, 2), (2, 0)], + [(3, 4), (4, 5), (5, 3)], + [("a", "b"), ("b", "c"), ("c", "a")], + [("d", "e"), ("e", "f"), ("f", "d")], + ] + assert len(chains) == len(expected) + for chain in chains: + self.assertContainsChain(chain, expected) + + def test_disconnected_graph_root_node(self): + """Test for a single component of a disconnected graph.""" + G = nx.barbell_graph(3, 0) + H = nx.barbell_graph(3, 0) + mapping = dict(zip(range(6), "abcdef")) + nx.relabel_nodes(H, mapping, copy=False) + G = nx.union(G, H) + chains = list(nx.chain_decomposition(G, root="a")) + expected = [ + [("a", "b"), ("b", "c"), ("c", "a")], + [("d", "e"), ("e", "f"), ("f", "d")], + ] + assert len(chains) == len(expected) + for chain in chains: + self.assertContainsChain(chain, expected) + + def test_chain_decomposition_root_not_in_G(self): + """Test chain decomposition when root is not in graph""" + G = nx.Graph() + G.add_nodes_from([1, 2, 3]) + with pytest.raises(nx.NodeNotFound): + nx.has_bridges(G, root=6) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_chordal.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_chordal.py new file mode 100644 index 0000000..148b22f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_chordal.py @@ -0,0 +1,129 @@ +import pytest + +import networkx as nx + + +class TestMCS: + @classmethod + def setup_class(cls): + # simple graph + connected_chordal_G = nx.Graph() + connected_chordal_G.add_edges_from( + [ + (1, 2), + (1, 3), + (2, 3), + (2, 4), + (3, 4), + (3, 5), + (3, 6), + (4, 5), + (4, 6), + (5, 6), + ] + ) + cls.connected_chordal_G = connected_chordal_G + + chordal_G = nx.Graph() + chordal_G.add_edges_from( + [ + (1, 2), + (1, 3), + (2, 3), + (2, 4), + (3, 4), + (3, 5), + (3, 6), + (4, 5), + (4, 6), + (5, 6), + (7, 8), + ] + ) + chordal_G.add_node(9) + cls.chordal_G = chordal_G + + non_chordal_G = nx.Graph() + non_chordal_G.add_edges_from([(1, 2), (1, 3), (2, 4), (2, 5), (3, 4), (3, 5)]) + cls.non_chordal_G = non_chordal_G + + self_loop_G = nx.Graph() + self_loop_G.add_edges_from([(1, 1)]) + cls.self_loop_G = self_loop_G + + @pytest.mark.parametrize("G", (nx.DiGraph(), nx.MultiGraph(), nx.MultiDiGraph())) + def test_is_chordal_not_implemented(self, G): + with pytest.raises(nx.NetworkXNotImplemented): + nx.is_chordal(G) + + def test_is_chordal(self): + assert not nx.is_chordal(self.non_chordal_G) + assert nx.is_chordal(self.chordal_G) + assert nx.is_chordal(self.connected_chordal_G) + assert nx.is_chordal(nx.Graph()) + assert nx.is_chordal(nx.complete_graph(3)) + assert nx.is_chordal(nx.cycle_graph(3)) + assert not nx.is_chordal(nx.cycle_graph(5)) + assert nx.is_chordal(self.self_loop_G) + + def test_induced_nodes(self): + G = nx.generators.classic.path_graph(10) + Induced_nodes = nx.find_induced_nodes(G, 1, 9, 2) + assert Induced_nodes == {1, 2, 3, 4, 5, 6, 7, 8, 9} + pytest.raises( + nx.NetworkXTreewidthBoundExceeded, nx.find_induced_nodes, G, 1, 9, 1 + ) + Induced_nodes = nx.find_induced_nodes(self.chordal_G, 1, 6) + assert Induced_nodes == {1, 2, 4, 6} + pytest.raises(nx.NetworkXError, nx.find_induced_nodes, self.non_chordal_G, 1, 5) + + def test_graph_treewidth(self): + with pytest.raises(nx.NetworkXError, match="Input graph is not chordal"): + nx.chordal_graph_treewidth(self.non_chordal_G) + + def test_chordal_find_cliques(self): + cliques = { + frozenset([9]), + frozenset([7, 8]), + frozenset([1, 2, 3]), + frozenset([2, 3, 4]), + frozenset([3, 4, 5, 6]), + } + assert set(nx.chordal_graph_cliques(self.chordal_G)) == cliques + with pytest.raises(nx.NetworkXError, match="Input graph is not chordal"): + set(nx.chordal_graph_cliques(self.non_chordal_G)) + with pytest.raises(nx.NetworkXError, match="Input graph is not chordal"): + set(nx.chordal_graph_cliques(self.self_loop_G)) + + def test_chordal_find_cliques_path(self): + G = nx.path_graph(10) + cliqueset = nx.chordal_graph_cliques(G) + for u, v in G.edges(): + assert frozenset([u, v]) in cliqueset or frozenset([v, u]) in cliqueset + + def test_chordal_find_cliquesCC(self): + cliques = {frozenset([1, 2, 3]), frozenset([2, 3, 4]), frozenset([3, 4, 5, 6])} + cgc = nx.chordal_graph_cliques + assert set(cgc(self.connected_chordal_G)) == cliques + + def test_complete_to_chordal_graph(self): + fgrg = nx.fast_gnp_random_graph + test_graphs = [ + nx.barbell_graph(6, 2), + nx.cycle_graph(15), + nx.wheel_graph(20), + nx.grid_graph([10, 4]), + nx.ladder_graph(15), + nx.star_graph(5), + nx.bull_graph(), + fgrg(20, 0.3, seed=1), + ] + for G in test_graphs: + H, a = nx.complete_to_chordal_graph(G) + assert nx.is_chordal(H) + assert len(a) == H.number_of_nodes() + if nx.is_chordal(G): + assert G.number_of_edges() == H.number_of_edges() + assert set(a.values()) == {0} + else: + assert len(set(a.values())) == H.number_of_nodes() diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_clique.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_clique.py new file mode 100644 index 0000000..3bee210 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_clique.py @@ -0,0 +1,291 @@ +import pytest + +import networkx as nx +from networkx import convert_node_labels_to_integers as cnlti + + +class TestCliques: + def setup_method(self): + z = [3, 4, 3, 4, 2, 4, 2, 1, 1, 1, 1] + self.G = cnlti(nx.generators.havel_hakimi_graph(z), first_label=1) + self.cl = list(nx.find_cliques(self.G)) + H = nx.complete_graph(6) + H = nx.relabel_nodes(H, {i: i + 1 for i in range(6)}) + H.remove_edges_from([(2, 6), (2, 5), (2, 4), (1, 3), (5, 3)]) + self.H = H + + def test_find_cliques1(self): + cl = list(nx.find_cliques(self.G)) + rcl = nx.find_cliques_recursive(self.G) + expected = [[2, 6, 1, 3], [2, 6, 4], [5, 4, 7], [8, 9], [10, 11]] + assert sorted(map(sorted, cl)) == sorted(map(sorted, rcl)) + assert sorted(map(sorted, cl)) == sorted(map(sorted, expected)) + + def test_selfloops(self): + self.G.add_edge(1, 1) + cl = list(nx.find_cliques(self.G)) + rcl = list(nx.find_cliques_recursive(self.G)) + assert set(map(frozenset, cl)) == set(map(frozenset, rcl)) + answer = [{2, 6, 1, 3}, {2, 6, 4}, {5, 4, 7}, {8, 9}, {10, 11}] + assert len(answer) == len(cl) + assert all(set(c) in answer for c in cl) + + def test_find_cliques2(self): + hcl = list(nx.find_cliques(self.H)) + assert sorted(map(sorted, hcl)) == [[1, 2], [1, 4, 5, 6], [2, 3], [3, 4, 6]] + + def test_find_cliques3(self): + # all cliques are [[2, 6, 1, 3], [2, 6, 4], [5, 4, 7], [8, 9], [10, 11]] + + cl = list(nx.find_cliques(self.G, [2])) + rcl = nx.find_cliques_recursive(self.G, [2]) + expected = [[2, 6, 1, 3], [2, 6, 4]] + assert sorted(map(sorted, rcl)) == sorted(map(sorted, expected)) + assert sorted(map(sorted, cl)) == sorted(map(sorted, expected)) + + cl = list(nx.find_cliques(self.G, [2, 3])) + rcl = nx.find_cliques_recursive(self.G, [2, 3]) + expected = [[2, 6, 1, 3]] + assert sorted(map(sorted, rcl)) == sorted(map(sorted, expected)) + assert sorted(map(sorted, cl)) == sorted(map(sorted, expected)) + + cl = list(nx.find_cliques(self.G, [2, 6, 4])) + rcl = nx.find_cliques_recursive(self.G, [2, 6, 4]) + expected = [[2, 6, 4]] + assert sorted(map(sorted, rcl)) == sorted(map(sorted, expected)) + assert sorted(map(sorted, cl)) == sorted(map(sorted, expected)) + + cl = list(nx.find_cliques(self.G, [2, 6, 4])) + rcl = nx.find_cliques_recursive(self.G, [2, 6, 4]) + expected = [[2, 6, 4]] + assert sorted(map(sorted, rcl)) == sorted(map(sorted, expected)) + assert sorted(map(sorted, cl)) == sorted(map(sorted, expected)) + + with pytest.raises(ValueError): + list(nx.find_cliques(self.G, [2, 6, 4, 1])) + + with pytest.raises(ValueError): + list(nx.find_cliques_recursive(self.G, [2, 6, 4, 1])) + + def test_number_of_cliques(self): + G = self.G + assert nx.number_of_cliques(G, 1) == 1 + assert list(nx.number_of_cliques(G, [1]).values()) == [1] + assert list(nx.number_of_cliques(G, [1, 2]).values()) == [1, 2] + assert nx.number_of_cliques(G, [1, 2]) == {1: 1, 2: 2} + assert nx.number_of_cliques(G, 2) == 2 + assert nx.number_of_cliques(G) == { + 1: 1, + 2: 2, + 3: 1, + 4: 2, + 5: 1, + 6: 2, + 7: 1, + 8: 1, + 9: 1, + 10: 1, + 11: 1, + } + assert nx.number_of_cliques(G, nodes=list(G)) == { + 1: 1, + 2: 2, + 3: 1, + 4: 2, + 5: 1, + 6: 2, + 7: 1, + 8: 1, + 9: 1, + 10: 1, + 11: 1, + } + assert nx.number_of_cliques(G, nodes=[2, 3, 4]) == {2: 2, 3: 1, 4: 2} + assert nx.number_of_cliques(G, cliques=self.cl) == { + 1: 1, + 2: 2, + 3: 1, + 4: 2, + 5: 1, + 6: 2, + 7: 1, + 8: 1, + 9: 1, + 10: 1, + 11: 1, + } + assert nx.number_of_cliques(G, list(G), cliques=self.cl) == { + 1: 1, + 2: 2, + 3: 1, + 4: 2, + 5: 1, + 6: 2, + 7: 1, + 8: 1, + 9: 1, + 10: 1, + 11: 1, + } + + def test_node_clique_number(self): + G = self.G + assert nx.node_clique_number(G, 1) == 4 + assert list(nx.node_clique_number(G, [1]).values()) == [4] + assert list(nx.node_clique_number(G, [1, 2]).values()) == [4, 4] + assert nx.node_clique_number(G, [1, 2]) == {1: 4, 2: 4} + assert nx.node_clique_number(G, 1) == 4 + assert nx.node_clique_number(G) == { + 1: 4, + 2: 4, + 3: 4, + 4: 3, + 5: 3, + 6: 4, + 7: 3, + 8: 2, + 9: 2, + 10: 2, + 11: 2, + } + assert nx.node_clique_number(G, cliques=self.cl) == { + 1: 4, + 2: 4, + 3: 4, + 4: 3, + 5: 3, + 6: 4, + 7: 3, + 8: 2, + 9: 2, + 10: 2, + 11: 2, + } + assert nx.node_clique_number(G, [1, 2], cliques=self.cl) == {1: 4, 2: 4} + assert nx.node_clique_number(G, 1, cliques=self.cl) == 4 + + def test_make_clique_bipartite(self): + G = self.G + B = nx.make_clique_bipartite(G) + assert sorted(B) == [-5, -4, -3, -2, -1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11] + # Project onto the nodes of the original graph. + H = nx.projected_graph(B, range(1, 12)) + assert H.adj == G.adj + # Project onto the nodes representing the cliques. + H1 = nx.projected_graph(B, range(-5, 0)) + # Relabel the negative numbers as positive ones. + H1 = nx.relabel_nodes(H1, {-v: v for v in range(1, 6)}) + assert sorted(H1) == [1, 2, 3, 4, 5] + + def test_make_max_clique_graph(self): + """Tests that the maximal clique graph is the same as the bipartite + clique graph after being projected onto the nodes representing the + cliques. + + """ + G = self.G + B = nx.make_clique_bipartite(G) + # Project onto the nodes representing the cliques. + H1 = nx.projected_graph(B, range(-5, 0)) + # Relabel the negative numbers as nonnegative ones, starting at + # 0. + H1 = nx.relabel_nodes(H1, {-v: v - 1 for v in range(1, 6)}) + H2 = nx.make_max_clique_graph(G) + assert H1.adj == H2.adj + + def test_directed(self): + with pytest.raises(nx.NetworkXNotImplemented): + next(nx.find_cliques(nx.DiGraph())) + + def test_find_cliques_trivial(self): + G = nx.Graph() + assert sorted(nx.find_cliques(G)) == [] + assert sorted(nx.find_cliques_recursive(G)) == [] + + def test_make_max_clique_graph_create_using(self): + G = nx.Graph([(1, 2), (3, 1), (4, 1), (5, 6)]) + E = nx.Graph([(0, 1), (0, 2), (1, 2)]) + E.add_node(3) + assert nx.is_isomorphic(nx.make_max_clique_graph(G, create_using=nx.Graph), E) + + +class TestEnumerateAllCliques: + def test_paper_figure_4(self): + # Same graph as given in Fig. 4 of paper enumerate_all_cliques is + # based on. + # http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1559964&isnumber=33129 + G = nx.Graph() + edges_fig_4 = [ + ("a", "b"), + ("a", "c"), + ("a", "d"), + ("a", "e"), + ("b", "c"), + ("b", "d"), + ("b", "e"), + ("c", "d"), + ("c", "e"), + ("d", "e"), + ("f", "b"), + ("f", "c"), + ("f", "g"), + ("g", "f"), + ("g", "c"), + ("g", "d"), + ("g", "e"), + ] + G.add_edges_from(edges_fig_4) + + cliques = list(nx.enumerate_all_cliques(G)) + clique_sizes = list(map(len, cliques)) + assert sorted(clique_sizes) == clique_sizes + + expected_cliques = [ + ["a"], + ["b"], + ["c"], + ["d"], + ["e"], + ["f"], + ["g"], + ["a", "b"], + ["a", "b", "d"], + ["a", "b", "d", "e"], + ["a", "b", "e"], + ["a", "c"], + ["a", "c", "d"], + ["a", "c", "d", "e"], + ["a", "c", "e"], + ["a", "d"], + ["a", "d", "e"], + ["a", "e"], + ["b", "c"], + ["b", "c", "d"], + ["b", "c", "d", "e"], + ["b", "c", "e"], + ["b", "c", "f"], + ["b", "d"], + ["b", "d", "e"], + ["b", "e"], + ["b", "f"], + ["c", "d"], + ["c", "d", "e"], + ["c", "d", "e", "g"], + ["c", "d", "g"], + ["c", "e"], + ["c", "e", "g"], + ["c", "f"], + ["c", "f", "g"], + ["c", "g"], + ["d", "e"], + ["d", "e", "g"], + ["d", "g"], + ["e", "g"], + ["f", "g"], + ["a", "b", "c"], + ["a", "b", "c", "d"], + ["a", "b", "c", "d", "e"], + ["a", "b", "c", "e"], + ] + + assert sorted(map(sorted, cliques)) == sorted(map(sorted, expected_cliques)) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_cluster.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_cluster.py new file mode 100644 index 0000000..6d2a410 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_cluster.py @@ -0,0 +1,584 @@ +import pytest + +import networkx as nx + + +def test_square_clustering_adjacent_squares(): + G = nx.Graph([(1, 2), (1, 3), (2, 4), (3, 4), (3, 5), (4, 6), (5, 6)]) + # Corner nodes: C_4 == 0.5, central face nodes: C_4 = 1 / 3 + expected = {1: 0.5, 2: 0.5, 3: 1 / 3, 4: 1 / 3, 5: 0.5, 6: 0.5} + assert nx.square_clustering(G) == expected + + +def test_square_clustering_2d_grid(): + G = nx.grid_2d_graph(3, 3) + # Central node: 4 squares out of 20 potential + expected = { + (0, 0): 1 / 3, + (0, 1): 0.25, + (0, 2): 1 / 3, + (1, 0): 0.25, + (1, 1): 0.2, + (1, 2): 0.25, + (2, 0): 1 / 3, + (2, 1): 0.25, + (2, 2): 1 / 3, + } + assert nx.square_clustering(G) == expected + + +def test_square_clustering_multiple_squares_non_complete(): + """An example where all nodes are part of all squares, but not every node + is connected to every other.""" + G = nx.Graph([(0, 1), (0, 2), (1, 3), (2, 3), (1, 4), (2, 4), (1, 5), (2, 5)]) + expected = dict.fromkeys(G, 1) + assert nx.square_clustering(G) == expected + + +class TestTriangles: + def test_empty(self): + G = nx.Graph() + assert list(nx.triangles(G).values()) == [] + + def test_path(self): + G = nx.path_graph(10) + assert list(nx.triangles(G).values()) == [0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + assert nx.triangles(G) == { + 0: 0, + 1: 0, + 2: 0, + 3: 0, + 4: 0, + 5: 0, + 6: 0, + 7: 0, + 8: 0, + 9: 0, + } + + def test_cubical(self): + G = nx.cubical_graph() + assert list(nx.triangles(G).values()) == [0, 0, 0, 0, 0, 0, 0, 0] + assert nx.triangles(G, 1) == 0 + assert list(nx.triangles(G, [1, 2]).values()) == [0, 0] + assert nx.triangles(G, 1) == 0 + assert nx.triangles(G, [1, 2]) == {1: 0, 2: 0} + + def test_k5(self): + G = nx.complete_graph(5) + assert list(nx.triangles(G).values()) == [6, 6, 6, 6, 6] + assert sum(nx.triangles(G).values()) / 3 == 10 + assert nx.triangles(G, 1) == 6 + G.remove_edge(1, 2) + assert list(nx.triangles(G).values()) == [5, 3, 3, 5, 5] + assert nx.triangles(G, 1) == 3 + G.add_edge(3, 3) # ignore self-edges + assert list(nx.triangles(G).values()) == [5, 3, 3, 5, 5] + assert nx.triangles(G, 3) == 5 + + +class TestDirectedClustering: + def test_clustering(self): + G = nx.DiGraph() + assert list(nx.clustering(G).values()) == [] + assert nx.clustering(G) == {} + + def test_path(self): + G = nx.path_graph(10, create_using=nx.DiGraph()) + assert list(nx.clustering(G).values()) == [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ] + assert nx.clustering(G) == { + 0: 0, + 1: 0, + 2: 0, + 3: 0, + 4: 0, + 5: 0, + 6: 0, + 7: 0, + 8: 0, + 9: 0, + } + assert nx.clustering(G, 0) == 0 + + def test_k5(self): + G = nx.complete_graph(5, create_using=nx.DiGraph()) + assert list(nx.clustering(G).values()) == [1, 1, 1, 1, 1] + assert nx.average_clustering(G) == 1 + G.remove_edge(1, 2) + assert list(nx.clustering(G).values()) == [ + 11 / 12, + 1, + 1, + 11 / 12, + 11 / 12, + ] + assert nx.clustering(G, [1, 4]) == {1: 1, 4: 11 / 12} + G.remove_edge(2, 1) + assert list(nx.clustering(G).values()) == [ + 5 / 6, + 1, + 1, + 5 / 6, + 5 / 6, + ] + assert nx.clustering(G, [1, 4]) == {1: 1, 4: 0.83333333333333337} + assert nx.clustering(G, 4) == 5 / 6 + + def test_triangle_and_edge(self): + G = nx.cycle_graph(3, create_using=nx.DiGraph()) + G.add_edge(0, 4) + assert nx.clustering(G)[0] == 1 / 6 + + +class TestDirectedWeightedClustering: + @classmethod + def setup_class(cls): + global np + np = pytest.importorskip("numpy") + + def test_clustering(self): + G = nx.DiGraph() + assert list(nx.clustering(G, weight="weight").values()) == [] + assert nx.clustering(G) == {} + + def test_path(self): + G = nx.path_graph(10, create_using=nx.DiGraph()) + assert list(nx.clustering(G, weight="weight").values()) == [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ] + assert nx.clustering(G, weight="weight") == { + 0: 0, + 1: 0, + 2: 0, + 3: 0, + 4: 0, + 5: 0, + 6: 0, + 7: 0, + 8: 0, + 9: 0, + } + + def test_k5(self): + G = nx.complete_graph(5, create_using=nx.DiGraph()) + assert list(nx.clustering(G, weight="weight").values()) == [1, 1, 1, 1, 1] + assert nx.average_clustering(G, weight="weight") == 1 + G.remove_edge(1, 2) + assert list(nx.clustering(G, weight="weight").values()) == [ + 11 / 12, + 1, + 1, + 11 / 12, + 11 / 12, + ] + assert nx.clustering(G, [1, 4], weight="weight") == {1: 1, 4: 11 / 12} + G.remove_edge(2, 1) + assert list(nx.clustering(G, weight="weight").values()) == [ + 5 / 6, + 1, + 1, + 5 / 6, + 5 / 6, + ] + assert nx.clustering(G, [1, 4], weight="weight") == { + 1: 1, + 4: 0.83333333333333337, + } + + def test_triangle_and_edge(self): + G = nx.cycle_graph(3, create_using=nx.DiGraph()) + G.add_edge(0, 4, weight=2) + assert nx.clustering(G)[0] == 1 / 6 + # Relaxed comparisons to allow graphblas-algorithms to pass tests + np.testing.assert_allclose(nx.clustering(G, weight="weight")[0], 1 / 12) + np.testing.assert_allclose(nx.clustering(G, 0, weight="weight"), 1 / 12) + + +class TestWeightedClustering: + @classmethod + def setup_class(cls): + global np + np = pytest.importorskip("numpy") + + def test_clustering(self): + G = nx.Graph() + assert list(nx.clustering(G, weight="weight").values()) == [] + assert nx.clustering(G) == {} + + def test_path(self): + G = nx.path_graph(10) + assert list(nx.clustering(G, weight="weight").values()) == [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ] + assert nx.clustering(G, weight="weight") == { + 0: 0, + 1: 0, + 2: 0, + 3: 0, + 4: 0, + 5: 0, + 6: 0, + 7: 0, + 8: 0, + 9: 0, + } + + def test_cubical(self): + G = nx.cubical_graph() + assert list(nx.clustering(G, weight="weight").values()) == [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ] + assert nx.clustering(G, 1) == 0 + assert list(nx.clustering(G, [1, 2], weight="weight").values()) == [0, 0] + assert nx.clustering(G, 1, weight="weight") == 0 + assert nx.clustering(G, [1, 2], weight="weight") == {1: 0, 2: 0} + + def test_k5(self): + G = nx.complete_graph(5) + assert list(nx.clustering(G, weight="weight").values()) == [1, 1, 1, 1, 1] + assert nx.average_clustering(G, weight="weight") == 1 + G.remove_edge(1, 2) + assert list(nx.clustering(G, weight="weight").values()) == [ + 5 / 6, + 1, + 1, + 5 / 6, + 5 / 6, + ] + assert nx.clustering(G, [1, 4], weight="weight") == { + 1: 1, + 4: 0.83333333333333337, + } + + def test_triangle_and_edge(self): + G = nx.cycle_graph(3) + G.add_edge(0, 4, weight=2) + assert nx.clustering(G)[0] == 1 / 3 + np.testing.assert_allclose(nx.clustering(G, weight="weight")[0], 1 / 6) + np.testing.assert_allclose(nx.clustering(G, 0, weight="weight"), 1 / 6) + + def test_triangle_and_signed_edge(self): + G = nx.cycle_graph(3) + G.add_edge(0, 1, weight=-1) + G.add_edge(3, 0, weight=0) + assert nx.clustering(G)[0] == 1 / 3 + assert nx.clustering(G, weight="weight")[0] == -1 / 3 + + +class TestClustering: + @classmethod + def setup_class(cls): + pytest.importorskip("numpy") + + def test_clustering(self): + G = nx.Graph() + assert list(nx.clustering(G).values()) == [] + assert nx.clustering(G) == {} + + def test_path(self): + G = nx.path_graph(10) + assert list(nx.clustering(G).values()) == [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ] + assert nx.clustering(G) == { + 0: 0, + 1: 0, + 2: 0, + 3: 0, + 4: 0, + 5: 0, + 6: 0, + 7: 0, + 8: 0, + 9: 0, + } + + def test_cubical(self): + G = nx.cubical_graph() + assert list(nx.clustering(G).values()) == [0, 0, 0, 0, 0, 0, 0, 0] + assert nx.clustering(G, 1) == 0 + assert list(nx.clustering(G, [1, 2]).values()) == [0, 0] + assert nx.clustering(G, 1) == 0 + assert nx.clustering(G, [1, 2]) == {1: 0, 2: 0} + + def test_k5(self): + G = nx.complete_graph(5) + assert list(nx.clustering(G).values()) == [1, 1, 1, 1, 1] + assert nx.average_clustering(G) == 1 + G.remove_edge(1, 2) + assert list(nx.clustering(G).values()) == [ + 5 / 6, + 1, + 1, + 5 / 6, + 5 / 6, + ] + assert nx.clustering(G, [1, 4]) == {1: 1, 4: 0.83333333333333337} + + def test_k5_signed(self): + G = nx.complete_graph(5) + assert list(nx.clustering(G).values()) == [1, 1, 1, 1, 1] + assert nx.average_clustering(G) == 1 + G.remove_edge(1, 2) + G.add_edge(0, 1, weight=-1) + assert list(nx.clustering(G, weight="weight").values()) == [ + 1 / 6, + -1 / 3, + 1, + 3 / 6, + 3 / 6, + ] + + +class TestTransitivity: + def test_transitivity(self): + G = nx.Graph() + assert nx.transitivity(G) == 0 + + def test_path(self): + G = nx.path_graph(10) + assert nx.transitivity(G) == 0 + + def test_cubical(self): + G = nx.cubical_graph() + assert nx.transitivity(G) == 0 + + def test_k5(self): + G = nx.complete_graph(5) + assert nx.transitivity(G) == 1 + G.remove_edge(1, 2) + assert nx.transitivity(G) == 0.875 + + +class TestSquareClustering: + def test_clustering(self): + G = nx.Graph() + assert list(nx.square_clustering(G).values()) == [] + assert nx.square_clustering(G) == {} + + def test_path(self): + G = nx.path_graph(10) + assert list(nx.square_clustering(G).values()) == [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ] + assert nx.square_clustering(G) == { + 0: 0, + 1: 0, + 2: 0, + 3: 0, + 4: 0, + 5: 0, + 6: 0, + 7: 0, + 8: 0, + 9: 0, + } + + def test_cubical(self): + G = nx.cubical_graph() + assert list(nx.square_clustering(G).values()) == [ + 1 / 3, + 1 / 3, + 1 / 3, + 1 / 3, + 1 / 3, + 1 / 3, + 1 / 3, + 1 / 3, + ] + assert list(nx.square_clustering(G, [1, 2]).values()) == [1 / 3, 1 / 3] + assert nx.square_clustering(G, [1])[1] == 1 / 3 + assert nx.square_clustering(G, 1) == 1 / 3 + assert nx.square_clustering(G, [1, 2]) == {1: 1 / 3, 2: 1 / 3} + + def test_k5(self): + G = nx.complete_graph(5) + assert list(nx.square_clustering(G).values()) == [1, 1, 1, 1, 1] + + def test_bipartite_k5(self): + G = nx.complete_bipartite_graph(5, 5) + assert list(nx.square_clustering(G).values()) == [1, 1, 1, 1, 1, 1, 1, 1, 1, 1] + + def test_lind_square_clustering(self): + """Test C4 for figure 1 Lind et al (2005)""" + G = nx.Graph( + [ + (1, 2), + (1, 3), + (1, 6), + (1, 7), + (2, 4), + (2, 5), + (3, 4), + (3, 5), + (6, 7), + (7, 8), + (6, 8), + (7, 9), + (7, 10), + (6, 11), + (6, 12), + (2, 13), + (2, 14), + (3, 15), + (3, 16), + ] + ) + G1 = G.subgraph([1, 2, 3, 4, 5, 13, 14, 15, 16]) + G2 = G.subgraph([1, 6, 7, 8, 9, 10, 11, 12]) + assert nx.square_clustering(G, [1])[1] == 3 / 43 + assert nx.square_clustering(G1, [1])[1] == 2 / 6 + assert nx.square_clustering(G2, [1])[1] == 1 / 5 + + def test_peng_square_clustering(self): + """Test eq2 for figure 1 Peng et al (2008)""" + # Example graph from figure 1b + G = nx.Graph([(1, 2), (1, 3), (2, 4), (3, 4), (3, 5), (3, 6)]) + # From table 1, row 2 + expected = {1: 1 / 3, 2: 1, 3: 0.2, 4: 1 / 3, 5: 0, 6: 0} + assert nx.square_clustering(G) == expected + + def test_self_loops_square_clustering(self): + G = nx.path_graph(5) + assert nx.square_clustering(G) == {0: 0, 1: 0, 2: 0, 3: 0, 4: 0} + G.add_edges_from([(0, 0), (1, 1), (2, 2)]) + assert nx.square_clustering(G) == {0: 0, 1: 0, 2: 0, 3: 0, 4: 0} + + +class TestAverageClustering: + @classmethod + def setup_class(cls): + pytest.importorskip("numpy") + + def test_empty(self): + G = nx.Graph() + with pytest.raises(ZeroDivisionError): + nx.average_clustering(G) + + def test_average_clustering(self): + G = nx.cycle_graph(3) + G.add_edge(2, 3) + assert nx.average_clustering(G) == (1 + 1 + 1 / 3) / 4 + assert nx.average_clustering(G, count_zeros=True) == (1 + 1 + 1 / 3) / 4 + assert nx.average_clustering(G, count_zeros=False) == (1 + 1 + 1 / 3) / 3 + assert nx.average_clustering(G, [1, 2, 3]) == (1 + 1 / 3) / 3 + assert nx.average_clustering(G, [1, 2, 3], count_zeros=True) == (1 + 1 / 3) / 3 + assert nx.average_clustering(G, [1, 2, 3], count_zeros=False) == (1 + 1 / 3) / 2 + + def test_average_clustering_signed(self): + G = nx.cycle_graph(3) + G.add_edge(2, 3) + G.add_edge(0, 1, weight=-1) + assert nx.average_clustering(G, weight="weight") == (-1 - 1 - 1 / 3) / 4 + assert ( + nx.average_clustering(G, weight="weight", count_zeros=True) + == (-1 - 1 - 1 / 3) / 4 + ) + assert ( + nx.average_clustering(G, weight="weight", count_zeros=False) + == (-1 - 1 - 1 / 3) / 3 + ) + + +class TestDirectedAverageClustering: + @classmethod + def setup_class(cls): + pytest.importorskip("numpy") + + def test_empty(self): + G = nx.DiGraph() + with pytest.raises(ZeroDivisionError): + nx.average_clustering(G) + + def test_average_clustering(self): + G = nx.cycle_graph(3, create_using=nx.DiGraph()) + G.add_edge(2, 3) + assert nx.average_clustering(G) == (1 + 1 + 1 / 3) / 8 + assert nx.average_clustering(G, count_zeros=True) == (1 + 1 + 1 / 3) / 8 + assert nx.average_clustering(G, count_zeros=False) == (1 + 1 + 1 / 3) / 6 + assert nx.average_clustering(G, [1, 2, 3]) == (1 + 1 / 3) / 6 + assert nx.average_clustering(G, [1, 2, 3], count_zeros=True) == (1 + 1 / 3) / 6 + assert nx.average_clustering(G, [1, 2, 3], count_zeros=False) == (1 + 1 / 3) / 4 + + +class TestGeneralizedDegree: + def test_generalized_degree(self): + G = nx.Graph() + assert nx.generalized_degree(G) == {} + + def test_path(self): + G = nx.path_graph(5) + assert nx.generalized_degree(G, 0) == {0: 1} + assert nx.generalized_degree(G, 1) == {0: 2} + + def test_cubical(self): + G = nx.cubical_graph() + assert nx.generalized_degree(G, 0) == {0: 3} + + def test_k5(self): + G = nx.complete_graph(5) + assert nx.generalized_degree(G, 0) == {3: 4} + G.remove_edge(0, 1) + assert nx.generalized_degree(G, 0) == {2: 3} + assert nx.generalized_degree(G, [1, 2]) == {1: {2: 3}, 2: {2: 2, 3: 2}} + assert nx.generalized_degree(G) == { + 0: {2: 3}, + 1: {2: 3}, + 2: {2: 2, 3: 2}, + 3: {2: 2, 3: 2}, + 4: {2: 2, 3: 2}, + } diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_communicability.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_communicability.py new file mode 100644 index 0000000..0f44709 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_communicability.py @@ -0,0 +1,80 @@ +from collections import defaultdict + +import pytest + +pytest.importorskip("numpy") +pytest.importorskip("scipy") + +import networkx as nx +from networkx.algorithms.communicability_alg import communicability, communicability_exp + + +class TestCommunicability: + def test_communicability(self): + answer = { + 0: {0: 1.5430806348152435, 1: 1.1752011936438012}, + 1: {0: 1.1752011936438012, 1: 1.5430806348152435}, + } + # answer={(0, 0): 1.5430806348152435, + # (0, 1): 1.1752011936438012, + # (1, 0): 1.1752011936438012, + # (1, 1): 1.5430806348152435} + + result = communicability(nx.path_graph(2)) + for k1, val in result.items(): + for k2 in val: + assert answer[k1][k2] == pytest.approx(result[k1][k2], abs=1e-7) + + def test_communicability2(self): + answer_orig = { + ("1", "1"): 1.6445956054135658, + ("1", "Albert"): 0.7430186221096251, + ("1", "Aric"): 0.7430186221096251, + ("1", "Dan"): 1.6208126320442937, + ("1", "Franck"): 0.42639707170035257, + ("Albert", "1"): 0.7430186221096251, + ("Albert", "Albert"): 2.4368257358712189, + ("Albert", "Aric"): 1.4368257358712191, + ("Albert", "Dan"): 2.0472097037446453, + ("Albert", "Franck"): 1.8340111678944691, + ("Aric", "1"): 0.7430186221096251, + ("Aric", "Albert"): 1.4368257358712191, + ("Aric", "Aric"): 2.4368257358712193, + ("Aric", "Dan"): 2.0472097037446457, + ("Aric", "Franck"): 1.8340111678944691, + ("Dan", "1"): 1.6208126320442937, + ("Dan", "Albert"): 2.0472097037446453, + ("Dan", "Aric"): 2.0472097037446457, + ("Dan", "Dan"): 3.1306328496328168, + ("Dan", "Franck"): 1.4860372442192515, + ("Franck", "1"): 0.42639707170035257, + ("Franck", "Albert"): 1.8340111678944691, + ("Franck", "Aric"): 1.8340111678944691, + ("Franck", "Dan"): 1.4860372442192515, + ("Franck", "Franck"): 2.3876142275231915, + } + + answer = defaultdict(dict) + for (k1, k2), v in answer_orig.items(): + answer[k1][k2] = v + + G1 = nx.Graph( + [ + ("Franck", "Aric"), + ("Aric", "Dan"), + ("Dan", "Albert"), + ("Albert", "Franck"), + ("Dan", "1"), + ("Franck", "Albert"), + ] + ) + + result = communicability(G1) + for k1, val in result.items(): + for k2 in val: + assert answer[k1][k2] == pytest.approx(result[k1][k2], abs=1e-7) + + result = communicability_exp(G1) + for k1, val in result.items(): + for k2 in val: + assert answer[k1][k2] == pytest.approx(result[k1][k2], abs=1e-7) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_core.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_core.py new file mode 100644 index 0000000..7cbaf75 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_core.py @@ -0,0 +1,266 @@ +import pytest + +import networkx as nx +from networkx.utils import nodes_equal + + +class TestCore: + @classmethod + def setup_class(cls): + # G is the example graph in Figure 1 from Batagelj and + # Zaversnik's paper titled An O(m) Algorithm for Cores + # Decomposition of Networks, 2003, + # http://arXiv.org/abs/cs/0310049. With nodes labeled as + # shown, the 3-core is given by nodes 1-8, the 2-core by nodes + # 9-16, the 1-core by nodes 17-20 and node 21 is in the + # 0-core. + t1 = nx.convert_node_labels_to_integers(nx.tetrahedral_graph(), 1) + t2 = nx.convert_node_labels_to_integers(t1, 5) + G = nx.union(t1, t2) + G.add_edges_from( + [ + (3, 7), + (2, 11), + (11, 5), + (11, 12), + (5, 12), + (12, 19), + (12, 18), + (3, 9), + (7, 9), + (7, 10), + (9, 10), + (9, 20), + (17, 13), + (13, 14), + (14, 15), + (15, 16), + (16, 13), + ] + ) + G.add_node(21) + cls.G = G + + # Create the graph H resulting from the degree sequence + # [0, 1, 2, 2, 2, 2, 3] when using the Havel-Hakimi algorithm. + + degseq = [0, 1, 2, 2, 2, 2, 3] + H = nx.havel_hakimi_graph(degseq) + mapping = {6: 0, 0: 1, 4: 3, 5: 6, 3: 4, 1: 2, 2: 5} + cls.H = nx.relabel_nodes(H, mapping) + + def test_trivial(self): + """Empty graph""" + G = nx.Graph() + assert nx.core_number(G) == {} + + def test_core_number(self): + core = nx.core_number(self.G) + nodes_by_core = [sorted(n for n in core if core[n] == val) for val in range(4)] + assert nodes_equal(nodes_by_core[0], [21]) + assert nodes_equal(nodes_by_core[1], [17, 18, 19, 20]) + assert nodes_equal(nodes_by_core[2], [9, 10, 11, 12, 13, 14, 15, 16]) + assert nodes_equal(nodes_by_core[3], [1, 2, 3, 4, 5, 6, 7, 8]) + + def test_core_number2(self): + core = nx.core_number(self.H) + nodes_by_core = [sorted(n for n in core if core[n] == val) for val in range(3)] + assert nodes_equal(nodes_by_core[0], [0]) + assert nodes_equal(nodes_by_core[1], [1, 3]) + assert nodes_equal(nodes_by_core[2], [2, 4, 5, 6]) + + def test_core_number_multigraph(self): + G = nx.complete_graph(3) + G = nx.MultiGraph(G) + G.add_edge(1, 2) + with pytest.raises( + nx.NetworkXNotImplemented, match="not implemented for multigraph type" + ): + nx.core_number(G) + + def test_core_number_self_loop(self): + G = nx.cycle_graph(3) + G.add_edge(0, 0) + with pytest.raises( + nx.NetworkXNotImplemented, match="Input graph has self loops" + ): + nx.core_number(G) + + def test_directed_core_number(self): + """core number had a bug for directed graphs found in issue #1959""" + # small example where too timid edge removal can make cn[2] = 3 + G = nx.DiGraph() + edges = [(1, 2), (2, 1), (2, 3), (2, 4), (3, 4), (4, 3)] + G.add_edges_from(edges) + assert nx.core_number(G) == {1: 2, 2: 2, 3: 2, 4: 2} + # small example where too aggressive edge removal can make cn[2] = 2 + more_edges = [(1, 5), (3, 5), (4, 5), (3, 6), (4, 6), (5, 6)] + G.add_edges_from(more_edges) + assert nx.core_number(G) == {1: 3, 2: 3, 3: 3, 4: 3, 5: 3, 6: 3} + + def test_main_core(self): + main_core_subgraph = nx.k_core(self.H) + assert sorted(main_core_subgraph.nodes()) == [2, 4, 5, 6] + + def test_k_core(self): + # k=0 + k_core_subgraph = nx.k_core(self.H, k=0) + assert sorted(k_core_subgraph.nodes()) == sorted(self.H.nodes()) + # k=1 + k_core_subgraph = nx.k_core(self.H, k=1) + assert sorted(k_core_subgraph.nodes()) == [1, 2, 3, 4, 5, 6] + # k = 2 + k_core_subgraph = nx.k_core(self.H, k=2) + assert sorted(k_core_subgraph.nodes()) == [2, 4, 5, 6] + + def test_k_core_multigraph(self): + core_number = nx.core_number(self.H) + H = nx.MultiGraph(self.H) + with pytest.raises(nx.NetworkXNotImplemented): + nx.k_core(H, k=0, core_number=core_number) + + def test_main_crust(self): + main_crust_subgraph = nx.k_crust(self.H) + assert sorted(main_crust_subgraph.nodes()) == [0, 1, 3] + + def test_k_crust(self): + # k = 0 + k_crust_subgraph = nx.k_crust(self.H, k=2) + assert sorted(k_crust_subgraph.nodes()) == sorted(self.H.nodes()) + # k=1 + k_crust_subgraph = nx.k_crust(self.H, k=1) + assert sorted(k_crust_subgraph.nodes()) == [0, 1, 3] + # k=2 + k_crust_subgraph = nx.k_crust(self.H, k=0) + assert sorted(k_crust_subgraph.nodes()) == [0] + + def test_k_crust_multigraph(self): + core_number = nx.core_number(self.H) + H = nx.MultiGraph(self.H) + with pytest.raises(nx.NetworkXNotImplemented): + nx.k_crust(H, k=0, core_number=core_number) + + def test_main_shell(self): + main_shell_subgraph = nx.k_shell(self.H) + assert sorted(main_shell_subgraph.nodes()) == [2, 4, 5, 6] + + def test_k_shell(self): + # k=0 + k_shell_subgraph = nx.k_shell(self.H, k=2) + assert sorted(k_shell_subgraph.nodes()) == [2, 4, 5, 6] + # k=1 + k_shell_subgraph = nx.k_shell(self.H, k=1) + assert sorted(k_shell_subgraph.nodes()) == [1, 3] + # k=2 + k_shell_subgraph = nx.k_shell(self.H, k=0) + assert sorted(k_shell_subgraph.nodes()) == [0] + + def test_k_shell_multigraph(self): + core_number = nx.core_number(self.H) + H = nx.MultiGraph(self.H) + with pytest.raises(nx.NetworkXNotImplemented): + nx.k_shell(H, k=0, core_number=core_number) + + def test_k_corona(self): + # k=0 + k_corona_subgraph = nx.k_corona(self.H, k=2) + assert sorted(k_corona_subgraph.nodes()) == [2, 4, 5, 6] + # k=1 + k_corona_subgraph = nx.k_corona(self.H, k=1) + assert sorted(k_corona_subgraph.nodes()) == [1] + # k=2 + k_corona_subgraph = nx.k_corona(self.H, k=0) + assert sorted(k_corona_subgraph.nodes()) == [0] + + def test_k_corona_multigraph(self): + core_number = nx.core_number(self.H) + H = nx.MultiGraph(self.H) + with pytest.raises(nx.NetworkXNotImplemented): + nx.k_corona(H, k=0, core_number=core_number) + + def test_k_truss(self): + # k=-1 + k_truss_subgraph = nx.k_truss(self.G, -1) + assert sorted(k_truss_subgraph.nodes()) == list(range(1, 21)) + # k=0 + k_truss_subgraph = nx.k_truss(self.G, 0) + assert sorted(k_truss_subgraph.nodes()) == list(range(1, 21)) + # k=1 + k_truss_subgraph = nx.k_truss(self.G, 1) + assert sorted(k_truss_subgraph.nodes()) == list(range(1, 21)) + # k=2 + k_truss_subgraph = nx.k_truss(self.G, 2) + assert sorted(k_truss_subgraph.nodes()) == list(range(1, 21)) + # k=3 + k_truss_subgraph = nx.k_truss(self.G, 3) + assert sorted(k_truss_subgraph.nodes()) == list(range(1, 13)) + + k_truss_subgraph = nx.k_truss(self.G, 4) + assert sorted(k_truss_subgraph.nodes()) == list(range(1, 9)) + + k_truss_subgraph = nx.k_truss(self.G, 5) + assert sorted(k_truss_subgraph.nodes()) == [] + + def test_k_truss_digraph(self): + G = nx.complete_graph(3) + G = nx.DiGraph(G) + G.add_edge(2, 1) + with pytest.raises( + nx.NetworkXNotImplemented, match="not implemented for directed type" + ): + nx.k_truss(G, k=1) + + def test_k_truss_multigraph(self): + G = nx.complete_graph(3) + G = nx.MultiGraph(G) + G.add_edge(1, 2) + with pytest.raises( + nx.NetworkXNotImplemented, match="not implemented for multigraph type" + ): + nx.k_truss(G, k=1) + + def test_k_truss_self_loop(self): + G = nx.cycle_graph(3) + G.add_edge(0, 0) + with pytest.raises( + nx.NetworkXNotImplemented, match="Input graph has self loops" + ): + nx.k_truss(G, k=1) + + def test_onion_layers(self): + layers = nx.onion_layers(self.G) + nodes_by_layer = [ + sorted(n for n in layers if layers[n] == val) for val in range(1, 7) + ] + assert nodes_equal(nodes_by_layer[0], [21]) + assert nodes_equal(nodes_by_layer[1], [17, 18, 19, 20]) + assert nodes_equal(nodes_by_layer[2], [10, 12, 13, 14, 15, 16]) + assert nodes_equal(nodes_by_layer[3], [9, 11]) + assert nodes_equal(nodes_by_layer[4], [1, 2, 4, 5, 6, 8]) + assert nodes_equal(nodes_by_layer[5], [3, 7]) + + def test_onion_digraph(self): + G = nx.complete_graph(3) + G = nx.DiGraph(G) + G.add_edge(2, 1) + with pytest.raises( + nx.NetworkXNotImplemented, match="not implemented for directed type" + ): + nx.onion_layers(G) + + def test_onion_multigraph(self): + G = nx.complete_graph(3) + G = nx.MultiGraph(G) + G.add_edge(1, 2) + with pytest.raises( + nx.NetworkXNotImplemented, match="not implemented for multigraph type" + ): + nx.onion_layers(G) + + def test_onion_self_loop(self): + G = nx.cycle_graph(3) + G.add_edge(0, 0) + with pytest.raises( + nx.NetworkXNotImplemented, match="Input graph contains self loops" + ): + nx.onion_layers(G) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_covering.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_covering.py new file mode 100644 index 0000000..b2f97a8 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_covering.py @@ -0,0 +1,85 @@ +import pytest + +import networkx as nx + + +class TestMinEdgeCover: + """Tests for :func:`networkx.algorithms.min_edge_cover`""" + + def test_empty_graph(self): + G = nx.Graph() + assert nx.min_edge_cover(G) == set() + + def test_graph_with_loop(self): + G = nx.Graph() + G.add_edge(0, 0) + assert nx.min_edge_cover(G) == {(0, 0)} + + def test_graph_with_isolated_v(self): + G = nx.Graph() + G.add_node(1) + with pytest.raises( + nx.NetworkXException, + match="Graph has a node with no edge incident on it, so no edge cover exists.", + ): + nx.min_edge_cover(G) + + def test_graph_single_edge(self): + G = nx.Graph([(0, 1)]) + assert nx.min_edge_cover(G) in ({(0, 1)}, {(1, 0)}) + + def test_graph_two_edge_path(self): + G = nx.path_graph(3) + min_cover = nx.min_edge_cover(G) + assert len(min_cover) == 2 + for u, v in G.edges: + assert (u, v) in min_cover or (v, u) in min_cover + + def test_bipartite_explicit(self): + G = nx.Graph() + G.add_nodes_from([1, 2, 3, 4], bipartite=0) + G.add_nodes_from(["a", "b", "c"], bipartite=1) + G.add_edges_from([(1, "a"), (1, "b"), (2, "b"), (2, "c"), (3, "c"), (4, "a")]) + # Use bipartite method by prescribing the algorithm + min_cover = nx.min_edge_cover( + G, nx.algorithms.bipartite.matching.eppstein_matching + ) + assert nx.is_edge_cover(G, min_cover) + assert len(min_cover) == 8 + # Use the default method which is not specialized for bipartite + min_cover2 = nx.min_edge_cover(G) + assert nx.is_edge_cover(G, min_cover2) + assert len(min_cover2) == 4 + + def test_complete_graph_even(self): + G = nx.complete_graph(10) + min_cover = nx.min_edge_cover(G) + assert nx.is_edge_cover(G, min_cover) + assert len(min_cover) == 5 + + def test_complete_graph_odd(self): + G = nx.complete_graph(11) + min_cover = nx.min_edge_cover(G) + assert nx.is_edge_cover(G, min_cover) + assert len(min_cover) == 6 + + +class TestIsEdgeCover: + """Tests for :func:`networkx.algorithms.is_edge_cover`""" + + def test_empty_graph(self): + G = nx.Graph() + assert nx.is_edge_cover(G, set()) + + def test_graph_with_loop(self): + G = nx.Graph() + G.add_edge(1, 1) + assert nx.is_edge_cover(G, {(1, 1)}) + + def test_graph_single_edge(self): + G = nx.Graph() + G.add_edge(0, 1) + assert nx.is_edge_cover(G, {(0, 0), (1, 1)}) + assert nx.is_edge_cover(G, {(0, 1), (1, 0)}) + assert nx.is_edge_cover(G, {(0, 1)}) + assert not nx.is_edge_cover(G, {(0, 0)}) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_cuts.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_cuts.py new file mode 100644 index 0000000..923efa5 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_cuts.py @@ -0,0 +1,171 @@ +"""Unit tests for the :mod:`networkx.algorithms.cuts` module.""" + +import networkx as nx + + +class TestCutSize: + """Unit tests for the :func:`~networkx.cut_size` function.""" + + def test_symmetric(self): + """Tests that the cut size is symmetric.""" + G = nx.barbell_graph(3, 0) + S = {0, 1, 4} + T = {2, 3, 5} + assert nx.cut_size(G, S, T) == 4 + assert nx.cut_size(G, T, S) == 4 + + def test_single_edge(self): + """Tests for a cut of a single edge.""" + G = nx.barbell_graph(3, 0) + S = {0, 1, 2} + T = {3, 4, 5} + assert nx.cut_size(G, S, T) == 1 + assert nx.cut_size(G, T, S) == 1 + + def test_directed(self): + """Tests that each directed edge is counted once in the cut.""" + G = nx.barbell_graph(3, 0).to_directed() + S = {0, 1, 2} + T = {3, 4, 5} + assert nx.cut_size(G, S, T) == 2 + assert nx.cut_size(G, T, S) == 2 + + def test_directed_symmetric(self): + """Tests that a cut in a directed graph is symmetric.""" + G = nx.barbell_graph(3, 0).to_directed() + S = {0, 1, 4} + T = {2, 3, 5} + assert nx.cut_size(G, S, T) == 8 + assert nx.cut_size(G, T, S) == 8 + + def test_multigraph(self): + """Tests that parallel edges are each counted for a cut.""" + G = nx.MultiGraph(["ab", "ab"]) + assert nx.cut_size(G, {"a"}, {"b"}) == 2 + + +class TestVolume: + """Unit tests for the :func:`~networkx.volume` function.""" + + def test_graph(self): + G = nx.cycle_graph(4) + assert nx.volume(G, {0, 1}) == 4 + + def test_digraph(self): + G = nx.DiGraph([(0, 1), (1, 2), (2, 3), (3, 0)]) + assert nx.volume(G, {0, 1}) == 2 + + def test_multigraph(self): + edges = list(nx.cycle_graph(4).edges()) + G = nx.MultiGraph(edges * 2) + assert nx.volume(G, {0, 1}) == 8 + + def test_multidigraph(self): + edges = [(0, 1), (1, 2), (2, 3), (3, 0)] + G = nx.MultiDiGraph(edges * 2) + assert nx.volume(G, {0, 1}) == 4 + + def test_barbell(self): + G = nx.barbell_graph(3, 0) + assert nx.volume(G, {0, 1, 2}) == 7 + assert nx.volume(G, {3, 4, 5}) == 7 + + +class TestNormalizedCutSize: + """Unit tests for the :func:`~networkx.normalized_cut_size` function.""" + + def test_graph(self): + G = nx.path_graph(4) + S = {1, 2} + T = set(G) - S + size = nx.normalized_cut_size(G, S, T) + # The cut looks like this: o-{-o--o-}-o + expected = 2 * ((1 / 4) + (1 / 2)) + assert expected == size + # Test with no input T + assert expected == nx.normalized_cut_size(G, S) + + def test_directed(self): + G = nx.DiGraph([(0, 1), (1, 2), (2, 3)]) + S = {1, 2} + T = set(G) - S + size = nx.normalized_cut_size(G, S, T) + # The cut looks like this: o-{->o-->o-}->o + expected = 2 * ((1 / 2) + (1 / 1)) + assert expected == size + # Test with no input T + assert expected == nx.normalized_cut_size(G, S) + + +class TestConductance: + """Unit tests for the :func:`~networkx.conductance` function.""" + + def test_graph(self): + G = nx.barbell_graph(5, 0) + # Consider the singleton sets containing the "bridge" nodes. + # There is only one cut edge, and each set has volume five. + S = {4} + T = {5} + conductance = nx.conductance(G, S, T) + expected = 1 / 5 + assert expected == conductance + # Test with no input T + G2 = nx.barbell_graph(3, 0) + # There is only one cut edge, and each set has volume seven. + S2 = {0, 1, 2} + assert nx.conductance(G2, S2) == 1 / 7 + + +class TestEdgeExpansion: + """Unit tests for the :func:`~networkx.edge_expansion` function.""" + + def test_graph(self): + G = nx.barbell_graph(5, 0) + S = set(range(5)) + T = set(G) - S + expansion = nx.edge_expansion(G, S, T) + expected = 1 / 5 + assert expected == expansion + # Test with no input T + assert expected == nx.edge_expansion(G, S) + + +class TestNodeExpansion: + """Unit tests for the :func:`~networkx.node_expansion` function.""" + + def test_graph(self): + G = nx.path_graph(8) + S = {3, 4, 5} + expansion = nx.node_expansion(G, S) + # The neighborhood of S has cardinality five, and S has + # cardinality three. + expected = 5 / 3 + assert expected == expansion + + +class TestBoundaryExpansion: + """Unit tests for the :func:`~networkx.boundary_expansion` function.""" + + def test_graph(self): + G = nx.complete_graph(10) + S = set(range(4)) + expansion = nx.boundary_expansion(G, S) + # The node boundary of S has cardinality six, and S has + # cardinality three. + expected = 6 / 4 + assert expected == expansion + + +class TestMixingExpansion: + """Unit tests for the :func:`~networkx.mixing_expansion` function.""" + + def test_graph(self): + G = nx.barbell_graph(5, 0) + S = set(range(5)) + T = set(G) - S + expansion = nx.mixing_expansion(G, S, T) + # There is one cut edge, and the total number of edges in the + # graph is twice the total number of edges in a clique of size + # five, plus one more for the bridge. + expected = 1 / (2 * (5 * 4 + 1)) + assert expected == expansion diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_cycles.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_cycles.py new file mode 100644 index 0000000..1b43929 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_cycles.py @@ -0,0 +1,984 @@ +import random +from itertools import chain, islice, tee +from math import inf + +import pytest + +import networkx as nx +from networkx.algorithms.traversal.edgedfs import FORWARD, REVERSE + + +def check_independent(basis): + if len(basis) == 0: + return + + np = pytest.importorskip("numpy") + sp = pytest.importorskip("scipy") # Required by incidence_matrix + + H = nx.Graph() + for b in basis: + nx.add_cycle(H, b) + inc = nx.incidence_matrix(H, oriented=True) + rank = np.linalg.matrix_rank(inc.toarray(), tol=None, hermitian=False) + assert inc.shape[1] - rank == len(basis) + + +class TestCycles: + @classmethod + def setup_class(cls): + G = nx.Graph() + nx.add_cycle(G, [0, 1, 2, 3]) + nx.add_cycle(G, [0, 3, 4, 5]) + nx.add_cycle(G, [0, 1, 6, 7, 8]) + G.add_edge(8, 9) + cls.G = G + + def is_cyclic_permutation(self, a, b): + n = len(a) + if len(b) != n: + return False + l = a + a + return any(l[i : i + n] == b for i in range(n)) + + def test_cycle_basis(self): + G = self.G + cy = nx.cycle_basis(G, 0) + sort_cy = sorted(sorted(c) for c in cy) + assert sort_cy == [[0, 1, 2, 3], [0, 1, 6, 7, 8], [0, 3, 4, 5]] + cy = nx.cycle_basis(G, 1) + sort_cy = sorted(sorted(c) for c in cy) + assert sort_cy == [[0, 1, 2, 3], [0, 1, 6, 7, 8], [0, 3, 4, 5]] + cy = nx.cycle_basis(G, 9) + sort_cy = sorted(sorted(c) for c in cy) + assert sort_cy == [[0, 1, 2, 3], [0, 1, 6, 7, 8], [0, 3, 4, 5]] + # test disconnected graphs + nx.add_cycle(G, "ABC") + cy = nx.cycle_basis(G, 9) + sort_cy = sorted(sorted(c) for c in cy[:-1]) + [sorted(cy[-1])] + assert sort_cy == [[0, 1, 2, 3], [0, 1, 6, 7, 8], [0, 3, 4, 5], ["A", "B", "C"]] + + def test_cycle_basis2(self): + with pytest.raises(nx.NetworkXNotImplemented): + G = nx.DiGraph() + cy = nx.cycle_basis(G, 0) + + def test_cycle_basis3(self): + with pytest.raises(nx.NetworkXNotImplemented): + G = nx.MultiGraph() + cy = nx.cycle_basis(G, 0) + + def test_cycle_basis_ordered(self): + # see gh-6654 replace sets with (ordered) dicts + G = nx.cycle_graph(5) + G.update(nx.cycle_graph(range(3, 8))) + cbG = nx.cycle_basis(G) + + perm = {1: 0, 0: 1} # switch 0 and 1 + H = nx.relabel_nodes(G, perm) + cbH = [[perm.get(n, n) for n in cyc] for cyc in nx.cycle_basis(H)] + assert cbG == cbH + + def test_cycle_basis_self_loop(self): + """Tests the function for graphs with self loops""" + G = nx.Graph() + nx.add_cycle(G, [0, 1, 2, 3]) + nx.add_cycle(G, [0, 0, 6, 2]) + cy = nx.cycle_basis(G) + sort_cy = sorted(sorted(c) for c in cy) + assert sort_cy == [[0], [0, 1, 2], [0, 2, 3], [0, 2, 6]] + + def test_simple_cycles(self): + edges = [(0, 0), (0, 1), (0, 2), (1, 2), (2, 0), (2, 1), (2, 2)] + G = nx.DiGraph(edges) + cc = sorted(nx.simple_cycles(G)) + ca = [[0], [0, 1, 2], [0, 2], [1, 2], [2]] + assert len(cc) == len(ca) + for c in cc: + assert any(self.is_cyclic_permutation(c, rc) for rc in ca) + + def test_simple_cycles_singleton(self): + G = nx.Graph([(0, 0)]) # self-loop + assert list(nx.simple_cycles(G)) == [[0]] + + def test_unsortable(self): + # this test ensures that graphs whose nodes without an intrinsic + # ordering do not cause issues + G = nx.DiGraph() + nx.add_cycle(G, ["a", 1]) + c = list(nx.simple_cycles(G)) + assert len(c) == 1 + + def test_simple_cycles_small(self): + G = nx.DiGraph() + nx.add_cycle(G, [1, 2, 3]) + c = sorted(nx.simple_cycles(G)) + assert len(c) == 1 + assert self.is_cyclic_permutation(c[0], [1, 2, 3]) + nx.add_cycle(G, [10, 20, 30]) + cc = sorted(nx.simple_cycles(G)) + assert len(cc) == 2 + ca = [[1, 2, 3], [10, 20, 30]] + for c in cc: + assert any(self.is_cyclic_permutation(c, rc) for rc in ca) + + def test_simple_cycles_empty(self): + G = nx.DiGraph() + assert list(nx.simple_cycles(G)) == [] + + def worst_case_graph(self, k): + # see figure 1 in Johnson's paper + # this graph has exactly 3k simple cycles + G = nx.DiGraph() + for n in range(2, k + 2): + G.add_edge(1, n) + G.add_edge(n, k + 2) + G.add_edge(2 * k + 1, 1) + for n in range(k + 2, 2 * k + 2): + G.add_edge(n, 2 * k + 2) + G.add_edge(n, n + 1) + G.add_edge(2 * k + 3, k + 2) + for n in range(2 * k + 3, 3 * k + 3): + G.add_edge(2 * k + 2, n) + G.add_edge(n, 3 * k + 3) + G.add_edge(3 * k + 3, 2 * k + 2) + return G + + def test_worst_case_graph(self): + # see figure 1 in Johnson's paper + for k in range(3, 10): + G = self.worst_case_graph(k) + l = len(list(nx.simple_cycles(G))) + assert l == 3 * k + + def test_recursive_simple_and_not(self): + for k in range(2, 10): + G = self.worst_case_graph(k) + cc = sorted(nx.simple_cycles(G)) + rcc = sorted(nx.recursive_simple_cycles(G)) + assert len(cc) == len(rcc) + for c in cc: + assert any(self.is_cyclic_permutation(c, r) for r in rcc) + for rc in rcc: + assert any(self.is_cyclic_permutation(rc, c) for c in cc) + + def test_simple_graph_with_reported_bug(self): + G = nx.DiGraph() + edges = [ + (0, 2), + (0, 3), + (1, 0), + (1, 3), + (2, 1), + (2, 4), + (3, 2), + (3, 4), + (4, 0), + (4, 1), + (4, 5), + (5, 0), + (5, 1), + (5, 2), + (5, 3), + ] + G.add_edges_from(edges) + cc = sorted(nx.simple_cycles(G)) + assert len(cc) == 26 + rcc = sorted(nx.recursive_simple_cycles(G)) + assert len(cc) == len(rcc) + for c in cc: + assert any(self.is_cyclic_permutation(c, rc) for rc in rcc) + for rc in rcc: + assert any(self.is_cyclic_permutation(rc, c) for c in cc) + + +def pairwise(iterable): + a, b = tee(iterable) + next(b, None) + return zip(a, b) + + +def cycle_edges(c): + return pairwise(chain(c, islice(c, 1))) + + +def directed_cycle_edgeset(c): + return frozenset(cycle_edges(c)) + + +def undirected_cycle_edgeset(c): + if len(c) == 1: + return frozenset(cycle_edges(c)) + return frozenset(map(frozenset, cycle_edges(c))) + + +def multigraph_cycle_edgeset(c): + if len(c) <= 2: + return frozenset(cycle_edges(c)) + else: + return frozenset(map(frozenset, cycle_edges(c))) + + +class TestCycleEnumeration: + @staticmethod + def K(n): + return nx.complete_graph(n) + + @staticmethod + def D(n): + return nx.complete_graph(n).to_directed() + + @staticmethod + def edgeset_function(g): + if g.is_directed(): + return directed_cycle_edgeset + elif g.is_multigraph(): + return multigraph_cycle_edgeset + else: + return undirected_cycle_edgeset + + def check_cycle(self, g, c, es, cache, source, original_c, length_bound, chordless): + if length_bound is not None and len(c) > length_bound: + raise RuntimeError( + f"computed cycle {original_c} exceeds length bound {length_bound}" + ) + if source == "computed": + if es in cache: + raise RuntimeError( + f"computed cycle {original_c} has already been found!" + ) + else: + cache[es] = tuple(original_c) + else: + if es in cache: + cache.pop(es) + else: + raise RuntimeError(f"expected cycle {original_c} was not computed") + + if not all(g.has_edge(*e) for e in es): + raise RuntimeError( + f"{source} claimed cycle {original_c} is not a cycle of g" + ) + if chordless and len(g.subgraph(c).edges) > len(c): + raise RuntimeError(f"{source} cycle {original_c} is not chordless") + + def check_cycle_algorithm( + self, + g, + expected_cycles, + length_bound=None, + chordless=False, + algorithm=None, + ): + if algorithm is None: + algorithm = nx.chordless_cycles if chordless else nx.simple_cycles + + # note: we shuffle the labels of g to rule out accidentally-correct + # behavior which occurred during the development of chordless cycle + # enumeration algorithms + + relabel = list(range(len(g))) + rng = random.Random(42) + rng.shuffle(relabel) + label = dict(zip(g, relabel)) + unlabel = dict(zip(relabel, g)) + h = nx.relabel_nodes(g, label, copy=True) + + edgeset = self.edgeset_function(h) + + params = {} + if length_bound is not None: + params["length_bound"] = length_bound + + cycle_cache = {} + for c in algorithm(h, **params): + original_c = [unlabel[x] for x in c] + es = edgeset(c) + self.check_cycle( + h, c, es, cycle_cache, "computed", original_c, length_bound, chordless + ) + + if isinstance(expected_cycles, int): + if len(cycle_cache) != expected_cycles: + raise RuntimeError( + f"expected {expected_cycles} cycles, got {len(cycle_cache)}" + ) + return + for original_c in expected_cycles: + c = [label[x] for x in original_c] + es = edgeset(c) + self.check_cycle( + h, c, es, cycle_cache, "expected", original_c, length_bound, chordless + ) + + if len(cycle_cache): + for c in cycle_cache.values(): + raise RuntimeError( + f"computed cycle {c} is valid but not in the expected cycle set!" + ) + + def check_cycle_enumeration_integer_sequence( + self, + g_family, + cycle_counts, + length_bound=None, + chordless=False, + algorithm=None, + ): + for g, num_cycles in zip(g_family, cycle_counts): + self.check_cycle_algorithm( + g, + num_cycles, + length_bound=length_bound, + chordless=chordless, + algorithm=algorithm, + ) + + def test_directed_chordless_cycle_digons(self): + g = nx.DiGraph() + nx.add_cycle(g, range(5)) + nx.add_cycle(g, range(5)[::-1]) + g.add_edge(0, 0) + expected_cycles = [(0,), (1, 2), (2, 3), (3, 4)] + self.check_cycle_algorithm(g, expected_cycles, chordless=True) + + self.check_cycle_algorithm(g, expected_cycles, chordless=True, length_bound=2) + + expected_cycles = [c for c in expected_cycles if len(c) < 2] + self.check_cycle_algorithm(g, expected_cycles, chordless=True, length_bound=1) + + def test_chordless_cycles_multigraph_self_loops(self): + G = nx.MultiGraph([(1, 1), (2, 2), (1, 2), (1, 2)]) + expected_cycles = [[1], [2]] + self.check_cycle_algorithm(G, expected_cycles, chordless=True) + + G.add_edges_from([(2, 3), (3, 4), (3, 4), (1, 3)]) + expected_cycles = [[1], [2], [3, 4]] + self.check_cycle_algorithm(G, expected_cycles, chordless=True) + + def test_directed_chordless_cycle_undirected(self): + g = nx.DiGraph([(1, 2), (2, 3), (3, 4), (4, 5), (5, 0), (5, 1), (0, 2)]) + expected_cycles = [(0, 2, 3, 4, 5), (1, 2, 3, 4, 5)] + self.check_cycle_algorithm(g, expected_cycles, chordless=True) + + g = nx.DiGraph() + nx.add_cycle(g, range(5)) + nx.add_cycle(g, range(4, 9)) + g.add_edge(7, 3) + expected_cycles = [(0, 1, 2, 3, 4), (3, 4, 5, 6, 7), (4, 5, 6, 7, 8)] + self.check_cycle_algorithm(g, expected_cycles, chordless=True) + + g.add_edge(3, 7) + expected_cycles = [(0, 1, 2, 3, 4), (3, 7), (4, 5, 6, 7, 8)] + self.check_cycle_algorithm(g, expected_cycles, chordless=True) + + expected_cycles = [(3, 7)] + self.check_cycle_algorithm(g, expected_cycles, chordless=True, length_bound=4) + + g.remove_edge(7, 3) + expected_cycles = [(0, 1, 2, 3, 4), (4, 5, 6, 7, 8)] + self.check_cycle_algorithm(g, expected_cycles, chordless=True) + + g = nx.DiGraph((i, j) for i in range(10) for j in range(i)) + expected_cycles = [] + self.check_cycle_algorithm(g, expected_cycles, chordless=True) + + def test_chordless_cycles_directed(self): + G = nx.DiGraph() + nx.add_cycle(G, range(5)) + nx.add_cycle(G, range(4, 12)) + expected = [[*range(5)], [*range(4, 12)]] + self.check_cycle_algorithm(G, expected, chordless=True) + self.check_cycle_algorithm( + G, [c for c in expected if len(c) <= 5], length_bound=5, chordless=True + ) + + G.add_edge(7, 3) + expected.append([*range(3, 8)]) + self.check_cycle_algorithm(G, expected, chordless=True) + self.check_cycle_algorithm( + G, [c for c in expected if len(c) <= 5], length_bound=5, chordless=True + ) + + G.add_edge(3, 7) + expected[-1] = [7, 3] + self.check_cycle_algorithm(G, expected, chordless=True) + self.check_cycle_algorithm( + G, [c for c in expected if len(c) <= 5], length_bound=5, chordless=True + ) + + expected.pop() + G.remove_edge(7, 3) + self.check_cycle_algorithm(G, expected, chordless=True) + self.check_cycle_algorithm( + G, [c for c in expected if len(c) <= 5], length_bound=5, chordless=True + ) + + def test_directed_chordless_cycle_diclique(self): + g_family = [self.D(n) for n in range(10)] + expected_cycles = [(n * n - n) // 2 for n in range(10)] + self.check_cycle_enumeration_integer_sequence( + g_family, expected_cycles, chordless=True + ) + + expected_cycles = [(n * n - n) // 2 for n in range(10)] + self.check_cycle_enumeration_integer_sequence( + g_family, expected_cycles, length_bound=2 + ) + + def test_directed_chordless_loop_blockade(self): + g = nx.DiGraph((i, i) for i in range(10)) + nx.add_cycle(g, range(10)) + expected_cycles = [(i,) for i in range(10)] + self.check_cycle_algorithm(g, expected_cycles, chordless=True) + + self.check_cycle_algorithm(g, expected_cycles, length_bound=1) + + g = nx.MultiDiGraph(g) + g.add_edges_from((i, i) for i in range(0, 10, 2)) + expected_cycles = [(i,) for i in range(1, 10, 2)] + self.check_cycle_algorithm(g, expected_cycles, chordless=True) + + def test_simple_cycles_notable_clique_sequences(self): + # A000292: Number of labeled graphs on n+3 nodes that are triangles. + g_family = [self.K(n) for n in range(2, 12)] + expected = [0, 1, 4, 10, 20, 35, 56, 84, 120, 165, 220] + self.check_cycle_enumeration_integer_sequence( + g_family, expected, length_bound=3 + ) + + def triangles(g, **kwargs): + yield from (c for c in nx.simple_cycles(g, **kwargs) if len(c) == 3) + + # directed complete graphs have twice as many triangles thanks to reversal + g_family = [self.D(n) for n in range(2, 12)] + expected = [2 * e for e in expected] + self.check_cycle_enumeration_integer_sequence( + g_family, expected, length_bound=3, algorithm=triangles + ) + + def four_cycles(g, **kwargs): + yield from (c for c in nx.simple_cycles(g, **kwargs) if len(c) == 4) + + # A050534: the number of 4-cycles in the complete graph K_{n+1} + expected = [0, 0, 0, 3, 15, 45, 105, 210, 378, 630, 990] + g_family = [self.K(n) for n in range(1, 12)] + self.check_cycle_enumeration_integer_sequence( + g_family, expected, length_bound=4, algorithm=four_cycles + ) + + # directed complete graphs have twice as many 4-cycles thanks to reversal + expected = [2 * e for e in expected] + g_family = [self.D(n) for n in range(1, 15)] + self.check_cycle_enumeration_integer_sequence( + g_family, expected, length_bound=4, algorithm=four_cycles + ) + + # A006231: the number of elementary circuits in a complete directed graph with n nodes + expected = [0, 1, 5, 20, 84, 409, 2365] + g_family = [self.D(n) for n in range(1, 8)] + self.check_cycle_enumeration_integer_sequence(g_family, expected) + + # A002807: Number of cycles in the complete graph on n nodes K_{n}. + expected = [0, 0, 0, 1, 7, 37, 197, 1172] + g_family = [self.K(n) for n in range(8)] + self.check_cycle_enumeration_integer_sequence(g_family, expected) + + def test_directed_chordless_cycle_parallel_multiedges(self): + g = nx.MultiGraph() + + nx.add_cycle(g, range(5)) + expected = [[*range(5)]] + self.check_cycle_algorithm(g, expected, chordless=True) + + nx.add_cycle(g, range(5)) + expected = [*cycle_edges(range(5))] + self.check_cycle_algorithm(g, expected, chordless=True) + + nx.add_cycle(g, range(5)) + expected = [] + self.check_cycle_algorithm(g, expected, chordless=True) + + g = nx.MultiDiGraph() + + nx.add_cycle(g, range(5)) + expected = [[*range(5)]] + self.check_cycle_algorithm(g, expected, chordless=True) + + nx.add_cycle(g, range(5)) + self.check_cycle_algorithm(g, [], chordless=True) + + nx.add_cycle(g, range(5)) + self.check_cycle_algorithm(g, [], chordless=True) + + g = nx.MultiDiGraph() + + nx.add_cycle(g, range(5)) + nx.add_cycle(g, range(5)[::-1]) + expected = [*cycle_edges(range(5))] + self.check_cycle_algorithm(g, expected, chordless=True) + + nx.add_cycle(g, range(5)) + self.check_cycle_algorithm(g, [], chordless=True) + + def test_chordless_cycles_graph(self): + G = nx.Graph() + nx.add_cycle(G, range(5)) + nx.add_cycle(G, range(4, 12)) + expected = [[*range(5)], [*range(4, 12)]] + self.check_cycle_algorithm(G, expected, chordless=True) + self.check_cycle_algorithm( + G, [c for c in expected if len(c) <= 5], length_bound=5, chordless=True + ) + + G.add_edge(7, 3) + expected.append([*range(3, 8)]) + expected.append([4, 3, 7, 8, 9, 10, 11]) + self.check_cycle_algorithm(G, expected, chordless=True) + self.check_cycle_algorithm( + G, [c for c in expected if len(c) <= 5], length_bound=5, chordless=True + ) + + def test_chordless_cycles_giant_hamiltonian(self): + # ... o - e - o - e - o ... # o = odd, e = even + # ... ---/ \-----/ \--- ... # <-- "long" edges + # + # each long edge belongs to exactly one triangle, and one giant cycle + # of length n/2. The remaining edges each belong to a triangle + + n = 1000 + assert n % 2 == 0 + G = nx.Graph() + for v in range(n): + if not v % 2: + G.add_edge(v, (v + 2) % n) + G.add_edge(v, (v + 1) % n) + + expected = [[*range(0, n, 2)]] + [ + [x % n for x in range(i, i + 3)] for i in range(0, n, 2) + ] + self.check_cycle_algorithm(G, expected, chordless=True) + self.check_cycle_algorithm( + G, [c for c in expected if len(c) <= 3], length_bound=3, chordless=True + ) + + # ... o -> e -> o -> e -> o ... # o = odd, e = even + # ... <---/ \---<---/ \---< ... # <-- "long" edges + # + # this time, we orient the short and long edges in opposition + # the cycle structure of this graph is the same, but we need to reverse + # the long one in our representation. Also, we need to drop the size + # because our partitioning algorithm uses strongly connected components + # instead of separating graphs by their strong articulation points + + n = 100 + assert n % 2 == 0 + G = nx.DiGraph() + for v in range(n): + G.add_edge(v, (v + 1) % n) + if not v % 2: + G.add_edge((v + 2) % n, v) + + expected = [[*range(n - 2, -2, -2)]] + [ + [x % n for x in range(i, i + 3)] for i in range(0, n, 2) + ] + self.check_cycle_algorithm(G, expected, chordless=True) + self.check_cycle_algorithm( + G, [c for c in expected if len(c) <= 3], length_bound=3, chordless=True + ) + + def test_simple_cycles_acyclic_tournament(self): + n = 10 + G = nx.DiGraph((x, y) for x in range(n) for y in range(x)) + self.check_cycle_algorithm(G, []) + self.check_cycle_algorithm(G, [], chordless=True) + + for k in range(n + 1): + self.check_cycle_algorithm(G, [], length_bound=k) + self.check_cycle_algorithm(G, [], length_bound=k, chordless=True) + + def test_simple_cycles_graph(self): + testG = nx.cycle_graph(8) + cyc1 = tuple(range(8)) + self.check_cycle_algorithm(testG, [cyc1]) + + testG.add_edge(4, -1) + nx.add_path(testG, [3, -2, -3, -4]) + self.check_cycle_algorithm(testG, [cyc1]) + + testG.update(nx.cycle_graph(range(8, 16))) + cyc2 = tuple(range(8, 16)) + self.check_cycle_algorithm(testG, [cyc1, cyc2]) + + testG.update(nx.cycle_graph(range(4, 12))) + cyc3 = tuple(range(4, 12)) + expected = { + (0, 1, 2, 3, 4, 5, 6, 7), # cyc1 + (8, 9, 10, 11, 12, 13, 14, 15), # cyc2 + (4, 5, 6, 7, 8, 9, 10, 11), # cyc3 + (4, 5, 6, 7, 8, 15, 14, 13, 12, 11), # cyc2 + cyc3 + (0, 1, 2, 3, 4, 11, 10, 9, 8, 7), # cyc1 + cyc3 + (0, 1, 2, 3, 4, 11, 12, 13, 14, 15, 8, 7), # cyc1 + cyc2 + cyc3 + } + self.check_cycle_algorithm(testG, expected) + assert len(expected) == (2**3 - 1) - 1 # 1 disjoint comb: cyc1 + cyc2 + + # Basis size = 5 (2 loops overlapping gives 5 small loops + # E + # / \ Note: A-F = 10-15 + # 1-2-3-4-5 + # / | | \ cyc1=012DAB -- left + # 0 D F 6 cyc2=234E -- top + # \ | | / cyc3=45678F -- right + # B-A-9-8-7 cyc4=89AC -- bottom + # \ / cyc5=234F89AD -- middle + # C + # + # combinations of 5 basis elements: 2^5 - 1 (one includes no cycles) + # + # disjoint combs: (11 total) not simple cycles + # Any pair not including cyc5 => choose(4, 2) = 6 + # Any triple not including cyc5 => choose(4, 3) = 4 + # Any quad not including cyc5 => choose(4, 4) = 1 + # + # we expect 31 - 11 = 20 simple cycles + # + testG = nx.cycle_graph(12) + testG.update(nx.cycle_graph([12, 10, 13, 2, 14, 4, 15, 8]).edges) + expected = (2**5 - 1) - 11 # 11 disjoint combinations + self.check_cycle_algorithm(testG, expected) + + def test_simple_cycles_bounded(self): + # iteratively construct a cluster of nested cycles running in the same direction + # there should be one cycle of every length + d = nx.DiGraph() + expected = [] + for n in range(10): + nx.add_cycle(d, range(n)) + expected.append(n) + for k, e in enumerate(expected): + self.check_cycle_algorithm(d, e, length_bound=k) + + # iteratively construct a path of undirected cycles, connected at articulation + # points. there should be one cycle of every length except 2: no digons + g = nx.Graph() + top = 0 + expected = [] + for n in range(10): + expected.append(n if n < 2 else n - 1) + if n == 2: + # no digons in undirected graphs + continue + nx.add_cycle(g, range(top, top + n)) + top += n + for k, e in enumerate(expected): + self.check_cycle_algorithm(g, e, length_bound=k) + + def test_simple_cycles_bound_corner_cases(self): + G = nx.cycle_graph(4) + DG = nx.cycle_graph(4, create_using=nx.DiGraph) + assert list(nx.simple_cycles(G, length_bound=0)) == [] + assert list(nx.simple_cycles(DG, length_bound=0)) == [] + assert list(nx.chordless_cycles(G, length_bound=0)) == [] + assert list(nx.chordless_cycles(DG, length_bound=0)) == [] + + def test_simple_cycles_bound_error(self): + with pytest.raises(ValueError): + G = nx.DiGraph() + for c in nx.simple_cycles(G, -1): + assert False + + with pytest.raises(ValueError): + G = nx.Graph() + for c in nx.simple_cycles(G, -1): + assert False + + with pytest.raises(ValueError): + G = nx.Graph() + for c in nx.chordless_cycles(G, -1): + assert False + + with pytest.raises(ValueError): + G = nx.DiGraph() + for c in nx.chordless_cycles(G, -1): + assert False + + def test_chordless_cycles_clique(self): + g_family = [self.K(n) for n in range(2, 15)] + expected = [0, 1, 4, 10, 20, 35, 56, 84, 120, 165, 220, 286, 364] + self.check_cycle_enumeration_integer_sequence( + g_family, expected, chordless=True + ) + + # directed cliques have as many digons as undirected graphs have edges + expected = [(n * n - n) // 2 for n in range(15)] + g_family = [self.D(n) for n in range(15)] + self.check_cycle_enumeration_integer_sequence( + g_family, expected, chordless=True + ) + + +# These tests might fail with hash randomization since they depend on +# edge_dfs. For more information, see the comments in: +# networkx/algorithms/traversal/tests/test_edgedfs.py + + +class TestFindCycle: + @classmethod + def setup_class(cls): + cls.nodes = [0, 1, 2, 3] + cls.edges = [(-1, 0), (0, 1), (1, 0), (1, 0), (2, 1), (3, 1)] + + def test_graph_nocycle(self): + G = nx.Graph(self.edges) + pytest.raises(nx.exception.NetworkXNoCycle, nx.find_cycle, G, self.nodes) + + def test_graph_cycle(self): + G = nx.Graph(self.edges) + G.add_edge(2, 0) + x = list(nx.find_cycle(G, self.nodes)) + x_ = [(0, 1), (1, 2), (2, 0)] + assert x == x_ + + def test_graph_orientation_none(self): + G = nx.Graph(self.edges) + G.add_edge(2, 0) + x = list(nx.find_cycle(G, self.nodes, orientation=None)) + x_ = [(0, 1), (1, 2), (2, 0)] + assert x == x_ + + def test_graph_orientation_original(self): + G = nx.Graph(self.edges) + G.add_edge(2, 0) + x = list(nx.find_cycle(G, self.nodes, orientation="original")) + x_ = [(0, 1, FORWARD), (1, 2, FORWARD), (2, 0, FORWARD)] + assert x == x_ + + def test_digraph(self): + G = nx.DiGraph(self.edges) + x = list(nx.find_cycle(G, self.nodes)) + x_ = [(0, 1), (1, 0)] + assert x == x_ + + def test_digraph_orientation_none(self): + G = nx.DiGraph(self.edges) + x = list(nx.find_cycle(G, self.nodes, orientation=None)) + x_ = [(0, 1), (1, 0)] + assert x == x_ + + def test_digraph_orientation_original(self): + G = nx.DiGraph(self.edges) + x = list(nx.find_cycle(G, self.nodes, orientation="original")) + x_ = [(0, 1, FORWARD), (1, 0, FORWARD)] + assert x == x_ + + def test_multigraph(self): + G = nx.MultiGraph(self.edges) + x = list(nx.find_cycle(G, self.nodes)) + x_ = [(0, 1, 0), (1, 0, 1)] # or (1, 0, 2) + # Hash randomization...could be any edge. + assert x[0] == x_[0] + assert x[1][:2] == x_[1][:2] + + def test_multidigraph(self): + G = nx.MultiDiGraph(self.edges) + x = list(nx.find_cycle(G, self.nodes)) + x_ = [(0, 1, 0), (1, 0, 0)] # (1, 0, 1) + assert x[0] == x_[0] + assert x[1][:2] == x_[1][:2] + + def test_digraph_ignore(self): + G = nx.DiGraph(self.edges) + x = list(nx.find_cycle(G, self.nodes, orientation="ignore")) + x_ = [(0, 1, FORWARD), (1, 0, FORWARD)] + assert x == x_ + + def test_digraph_reverse(self): + G = nx.DiGraph(self.edges) + x = list(nx.find_cycle(G, self.nodes, orientation="reverse")) + x_ = [(1, 0, REVERSE), (0, 1, REVERSE)] + assert x == x_ + + def test_multidigraph_ignore(self): + G = nx.MultiDiGraph(self.edges) + x = list(nx.find_cycle(G, self.nodes, orientation="ignore")) + x_ = [(0, 1, 0, FORWARD), (1, 0, 0, FORWARD)] # or (1, 0, 1, 1) + assert x[0] == x_[0] + assert x[1][:2] == x_[1][:2] + assert x[1][3] == x_[1][3] + + def test_multidigraph_ignore2(self): + # Loop traversed an edge while ignoring its orientation. + G = nx.MultiDiGraph([(0, 1), (1, 2), (1, 2)]) + x = list(nx.find_cycle(G, [0, 1, 2], orientation="ignore")) + x_ = [(1, 2, 0, FORWARD), (1, 2, 1, REVERSE)] + assert x == x_ + + def test_multidigraph_original(self): + # Node 2 doesn't need to be searched again from visited from 4. + # The goal here is to cover the case when 2 to be researched from 4, + # when 4 is visited from the first time (so we must make sure that 4 + # is not visited from 2, and hence, we respect the edge orientation). + G = nx.MultiDiGraph([(0, 1), (1, 2), (2, 3), (4, 2)]) + pytest.raises( + nx.exception.NetworkXNoCycle, + nx.find_cycle, + G, + [0, 1, 2, 3, 4], + orientation="original", + ) + + def test_dag(self): + G = nx.DiGraph([(0, 1), (0, 2), (1, 2)]) + pytest.raises( + nx.exception.NetworkXNoCycle, nx.find_cycle, G, orientation="original" + ) + x = list(nx.find_cycle(G, orientation="ignore")) + assert x == [(0, 1, FORWARD), (1, 2, FORWARD), (0, 2, REVERSE)] + + def test_prev_explored(self): + # https://github.com/networkx/networkx/issues/2323 + + G = nx.DiGraph() + G.add_edges_from([(1, 0), (2, 0), (1, 2), (2, 1)]) + pytest.raises(nx.NetworkXNoCycle, nx.find_cycle, G, source=0) + x = list(nx.find_cycle(G, 1)) + x_ = [(1, 2), (2, 1)] + assert x == x_ + + x = list(nx.find_cycle(G, 2)) + x_ = [(2, 1), (1, 2)] + assert x == x_ + + x = list(nx.find_cycle(G)) + x_ = [(1, 2), (2, 1)] + assert x == x_ + + def test_no_cycle(self): + # https://github.com/networkx/networkx/issues/2439 + + G = nx.DiGraph() + G.add_edges_from([(1, 2), (2, 0), (3, 1), (3, 2)]) + pytest.raises(nx.NetworkXNoCycle, nx.find_cycle, G, source=0) + pytest.raises(nx.NetworkXNoCycle, nx.find_cycle, G) + + +def assert_basis_equal(a, b): + assert sorted(a) == sorted(b) + + +class TestMinimumCycleBasis: + @classmethod + def setup_class(cls): + T = nx.Graph() + nx.add_cycle(T, [1, 2, 3, 4], weight=1) + T.add_edge(2, 4, weight=5) + cls.diamond_graph = T + + def test_unweighted_diamond(self): + mcb = nx.minimum_cycle_basis(self.diamond_graph) + assert_basis_equal(mcb, [[2, 4, 1], [3, 4, 2]]) + + def test_weighted_diamond(self): + mcb = nx.minimum_cycle_basis(self.diamond_graph, weight="weight") + assert_basis_equal(mcb, [[2, 4, 1], [4, 3, 2, 1]]) + + def test_dimensionality(self): + # checks |MCB|=|E|-|V|+|NC| + ntrial = 10 + for seed in range(1234, 1234 + ntrial): + rg = nx.erdos_renyi_graph(10, 0.3, seed=seed) + nnodes = rg.number_of_nodes() + nedges = rg.number_of_edges() + ncomp = nx.number_connected_components(rg) + + mcb = nx.minimum_cycle_basis(rg) + assert len(mcb) == nedges - nnodes + ncomp + check_independent(mcb) + + def test_complete_graph(self): + cg = nx.complete_graph(5) + mcb = nx.minimum_cycle_basis(cg) + assert all(len(cycle) == 3 for cycle in mcb) + check_independent(mcb) + + def test_tree_graph(self): + tg = nx.balanced_tree(3, 3) + assert not nx.minimum_cycle_basis(tg) + + def test_petersen_graph(self): + G = nx.petersen_graph() + mcb = list(nx.minimum_cycle_basis(G)) + expected = [ + [4, 9, 7, 5, 0], + [1, 2, 3, 4, 0], + [1, 6, 8, 5, 0], + [4, 3, 8, 5, 0], + [1, 6, 9, 4, 0], + [1, 2, 7, 5, 0], + ] + assert len(mcb) == len(expected) + assert all(c in expected for c in mcb) + + # check that order of the nodes is a path + for c in mcb: + assert all(G.has_edge(u, v) for u, v in nx.utils.pairwise(c, cyclic=True)) + # check independence of the basis + check_independent(mcb) + + def test_gh6787_variable_weighted_complete_graph(self): + N = 8 + cg = nx.complete_graph(N) + cg.add_weighted_edges_from([(u, v, 9) for u, v in cg.edges]) + cg.add_weighted_edges_from([(u, v, 1) for u, v in nx.cycle_graph(N).edges]) + mcb = nx.minimum_cycle_basis(cg, weight="weight") + check_independent(mcb) + + def test_gh6787_and_edge_attribute_names(self): + G = nx.cycle_graph(4) + G.add_weighted_edges_from([(0, 2, 10), (1, 3, 10)], weight="dist") + expected = [[1, 3, 0], [3, 2, 1, 0], [1, 2, 0]] + mcb = list(nx.minimum_cycle_basis(G, weight="dist")) + assert len(mcb) == len(expected) + assert all(c in expected for c in mcb) + + # test not using a weight with weight attributes + expected = [[1, 3, 0], [1, 2, 0], [3, 2, 0]] + mcb = list(nx.minimum_cycle_basis(G)) + assert len(mcb) == len(expected) + assert all(c in expected for c in mcb) + + +class TestGirth: + @pytest.mark.parametrize( + ("G", "expected"), + ( + (nx.chvatal_graph(), 4), + (nx.tutte_graph(), 4), + (nx.petersen_graph(), 5), + (nx.heawood_graph(), 6), + (nx.pappus_graph(), 6), + (nx.random_labeled_tree(10, seed=42), inf), + (nx.empty_graph(10), inf), + (nx.Graph(chain(cycle_edges(range(5)), cycle_edges(range(6, 10)))), 4), + ( + nx.Graph( + [ + (0, 6), + (0, 8), + (0, 9), + (1, 8), + (2, 8), + (2, 9), + (4, 9), + (5, 9), + (6, 8), + (6, 9), + (7, 8), + ] + ), + 3, + ), + ), + ) + def test_girth(self, G, expected): + assert nx.girth(G) == expected diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_d_separation.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_d_separation.py new file mode 100644 index 0000000..f760829 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_d_separation.py @@ -0,0 +1,340 @@ +from itertools import combinations + +import pytest + +import networkx as nx + + +def path_graph(): + """Return a path graph of length three.""" + G = nx.path_graph(3, create_using=nx.DiGraph) + G.graph["name"] = "path" + nx.freeze(G) + return G + + +def fork_graph(): + """Return a three node fork graph.""" + G = nx.DiGraph(name="fork") + G.add_edges_from([(0, 1), (0, 2)]) + nx.freeze(G) + return G + + +def collider_graph(): + """Return a collider/v-structure graph with three nodes.""" + G = nx.DiGraph(name="collider") + G.add_edges_from([(0, 2), (1, 2)]) + nx.freeze(G) + return G + + +def naive_bayes_graph(): + """Return a simply Naive Bayes PGM graph.""" + G = nx.DiGraph(name="naive_bayes") + G.add_edges_from([(0, 1), (0, 2), (0, 3), (0, 4)]) + nx.freeze(G) + return G + + +def asia_graph(): + """Return the 'Asia' PGM graph.""" + G = nx.DiGraph(name="asia") + G.add_edges_from( + [ + ("asia", "tuberculosis"), + ("smoking", "cancer"), + ("smoking", "bronchitis"), + ("tuberculosis", "either"), + ("cancer", "either"), + ("either", "xray"), + ("either", "dyspnea"), + ("bronchitis", "dyspnea"), + ] + ) + nx.freeze(G) + return G + + +@pytest.fixture(name="path_graph") +def path_graph_fixture(): + return path_graph() + + +@pytest.fixture(name="fork_graph") +def fork_graph_fixture(): + return fork_graph() + + +@pytest.fixture(name="collider_graph") +def collider_graph_fixture(): + return collider_graph() + + +@pytest.fixture(name="naive_bayes_graph") +def naive_bayes_graph_fixture(): + return naive_bayes_graph() + + +@pytest.fixture(name="asia_graph") +def asia_graph_fixture(): + return asia_graph() + + +@pytest.fixture() +def large_collider_graph(): + edge_list = [("A", "B"), ("C", "B"), ("B", "D"), ("D", "E"), ("B", "F"), ("G", "E")] + G = nx.DiGraph(edge_list) + return G + + +@pytest.fixture() +def chain_and_fork_graph(): + edge_list = [("A", "B"), ("B", "C"), ("B", "D"), ("D", "C")] + G = nx.DiGraph(edge_list) + return G + + +@pytest.fixture() +def no_separating_set_graph(): + edge_list = [("A", "B")] + G = nx.DiGraph(edge_list) + return G + + +@pytest.fixture() +def large_no_separating_set_graph(): + edge_list = [("A", "B"), ("C", "A"), ("C", "B")] + G = nx.DiGraph(edge_list) + return G + + +@pytest.fixture() +def collider_trek_graph(): + edge_list = [("A", "B"), ("C", "B"), ("C", "D")] + G = nx.DiGraph(edge_list) + return G + + +@pytest.mark.parametrize( + "graph", + [path_graph(), fork_graph(), collider_graph(), naive_bayes_graph(), asia_graph()], +) +def test_markov_condition(graph): + """Test that the Markov condition holds for each PGM graph.""" + for node in graph.nodes: + parents = set(graph.predecessors(node)) + non_descendants = graph.nodes - nx.descendants(graph, node) - {node} - parents + assert nx.is_d_separator(graph, {node}, non_descendants, parents) + + +def test_path_graph_dsep(path_graph): + """Example-based test of d-separation for path_graph.""" + assert nx.is_d_separator(path_graph, {0}, {2}, {1}) + assert not nx.is_d_separator(path_graph, {0}, {2}, set()) + + +def test_fork_graph_dsep(fork_graph): + """Example-based test of d-separation for fork_graph.""" + assert nx.is_d_separator(fork_graph, {1}, {2}, {0}) + assert not nx.is_d_separator(fork_graph, {1}, {2}, set()) + + +def test_collider_graph_dsep(collider_graph): + """Example-based test of d-separation for collider_graph.""" + assert nx.is_d_separator(collider_graph, {0}, {1}, set()) + assert not nx.is_d_separator(collider_graph, {0}, {1}, {2}) + + +def test_naive_bayes_dsep(naive_bayes_graph): + """Example-based test of d-separation for naive_bayes_graph.""" + for u, v in combinations(range(1, 5), 2): + assert nx.is_d_separator(naive_bayes_graph, {u}, {v}, {0}) + assert not nx.is_d_separator(naive_bayes_graph, {u}, {v}, set()) + + +def test_asia_graph_dsep(asia_graph): + """Example-based test of d-separation for asia_graph.""" + assert nx.is_d_separator( + asia_graph, {"asia", "smoking"}, {"dyspnea", "xray"}, {"bronchitis", "either"} + ) + assert nx.is_d_separator( + asia_graph, {"tuberculosis", "cancer"}, {"bronchitis"}, {"smoking", "xray"} + ) + + +def test_undirected_graphs_are_not_supported(): + """ + Test that undirected graphs are not supported. + + d-separation and its related algorithms do not apply in + the case of undirected graphs. + """ + g = nx.path_graph(3, nx.Graph) + with pytest.raises(nx.NetworkXNotImplemented): + nx.is_d_separator(g, {0}, {1}, {2}) + with pytest.raises(nx.NetworkXNotImplemented): + nx.is_minimal_d_separator(g, {0}, {1}, {2}) + with pytest.raises(nx.NetworkXNotImplemented): + nx.find_minimal_d_separator(g, {0}, {1}) + + +def test_cyclic_graphs_raise_error(): + """ + Test that cycle graphs should cause erroring. + + This is because PGMs assume a directed acyclic graph. + """ + g = nx.cycle_graph(3, nx.DiGraph) + with pytest.raises(nx.NetworkXError): + nx.is_d_separator(g, {0}, {1}, {2}) + with pytest.raises(nx.NetworkXError): + nx.find_minimal_d_separator(g, {0}, {1}) + with pytest.raises(nx.NetworkXError): + nx.is_minimal_d_separator(g, {0}, {1}, {2}) + + +def test_invalid_nodes_raise_error(asia_graph): + """ + Test that graphs that have invalid nodes passed in raise errors. + """ + # Check both set and node arguments + with pytest.raises(nx.NodeNotFound): + nx.is_d_separator(asia_graph, {0}, {1}, {2}) + with pytest.raises(nx.NodeNotFound): + nx.is_d_separator(asia_graph, 0, 1, 2) + with pytest.raises(nx.NodeNotFound): + nx.is_minimal_d_separator(asia_graph, {0}, {1}, {2}) + with pytest.raises(nx.NodeNotFound): + nx.is_minimal_d_separator(asia_graph, 0, 1, 2) + with pytest.raises(nx.NodeNotFound): + nx.find_minimal_d_separator(asia_graph, {0}, {1}) + with pytest.raises(nx.NodeNotFound): + nx.find_minimal_d_separator(asia_graph, 0, 1) + + +def test_nondisjoint_node_sets_raise_error(collider_graph): + """ + Test that error is raised when node sets aren't disjoint. + """ + with pytest.raises(nx.NetworkXError): + nx.is_d_separator(collider_graph, 0, 1, 0) + with pytest.raises(nx.NetworkXError): + nx.is_d_separator(collider_graph, 0, 2, 0) + with pytest.raises(nx.NetworkXError): + nx.is_d_separator(collider_graph, 0, 0, 1) + with pytest.raises(nx.NetworkXError): + nx.is_d_separator(collider_graph, 1, 0, 0) + with pytest.raises(nx.NetworkXError): + nx.find_minimal_d_separator(collider_graph, 0, 0) + with pytest.raises(nx.NetworkXError): + nx.find_minimal_d_separator(collider_graph, 0, 1, included=0) + with pytest.raises(nx.NetworkXError): + nx.find_minimal_d_separator(collider_graph, 1, 0, included=0) + with pytest.raises(nx.NetworkXError): + nx.is_minimal_d_separator(collider_graph, 0, 0, set()) + with pytest.raises(nx.NetworkXError): + nx.is_minimal_d_separator(collider_graph, 0, 1, set(), included=0) + with pytest.raises(nx.NetworkXError): + nx.is_minimal_d_separator(collider_graph, 1, 0, set(), included=0) + + +def test_is_minimal_d_separator( + large_collider_graph, + chain_and_fork_graph, + no_separating_set_graph, + large_no_separating_set_graph, + collider_trek_graph, +): + # Case 1: + # create a graph A -> B <- C + # B -> D -> E; + # B -> F; + # G -> E; + assert not nx.is_d_separator(large_collider_graph, {"B"}, {"E"}, set()) + + # minimal set of the corresponding graph + # for B and E should be (D,) + Zmin = nx.find_minimal_d_separator(large_collider_graph, "B", "E") + # check that the minimal d-separator is a d-separating set + assert nx.is_d_separator(large_collider_graph, "B", "E", Zmin) + # the minimal separating set should also pass the test for minimality + assert nx.is_minimal_d_separator(large_collider_graph, "B", "E", Zmin) + # function should also work with set arguments + assert nx.is_minimal_d_separator(large_collider_graph, {"A", "B"}, {"G", "E"}, Zmin) + assert Zmin == {"D"} + + # Case 2: + # create a graph A -> B -> C + # B -> D -> C; + assert not nx.is_d_separator(chain_and_fork_graph, {"A"}, {"C"}, set()) + Zmin = nx.find_minimal_d_separator(chain_and_fork_graph, "A", "C") + + # the minimal separating set should pass the test for minimality + assert nx.is_minimal_d_separator(chain_and_fork_graph, "A", "C", Zmin) + assert Zmin == {"B"} + Znotmin = Zmin.union({"D"}) + assert not nx.is_minimal_d_separator(chain_and_fork_graph, "A", "C", Znotmin) + + # Case 3: + # create a graph A -> B + + # there is no m-separating set between A and B at all, so + # no minimal m-separating set can exist + assert not nx.is_d_separator(no_separating_set_graph, {"A"}, {"B"}, set()) + assert nx.find_minimal_d_separator(no_separating_set_graph, "A", "B") is None + + # Case 4: + # create a graph A -> B with A <- C -> B + + # there is no m-separating set between A and B at all, so + # no minimal m-separating set can exist + # however, the algorithm will initially propose C as a + # minimal (but invalid) separating set + assert not nx.is_d_separator(large_no_separating_set_graph, {"A"}, {"B"}, {"C"}) + assert nx.find_minimal_d_separator(large_no_separating_set_graph, "A", "B") is None + + # Test `included` and `excluded` args + # create graph A -> B <- C -> D + assert nx.find_minimal_d_separator(collider_trek_graph, "A", "D", included="B") == { + "B", + "C", + } + assert ( + nx.find_minimal_d_separator( + collider_trek_graph, "A", "D", included="B", restricted="B" + ) + is None + ) + + +def test_is_minimal_d_separator_checks_dsep(): + """Test that is_minimal_d_separator checks for d-separation as well.""" + g = nx.DiGraph() + g.add_edges_from( + [ + ("A", "B"), + ("A", "E"), + ("B", "C"), + ("B", "D"), + ("D", "C"), + ("D", "F"), + ("E", "D"), + ("E", "F"), + ] + ) + + assert not nx.is_d_separator(g, {"C"}, {"F"}, {"D"}) + + # since {'D'} and {} are not d-separators, we return false + assert not nx.is_minimal_d_separator(g, "C", "F", {"D"}) + assert not nx.is_minimal_d_separator(g, "C", "F", set()) + + +def test__reachable(large_collider_graph): + reachable = nx.algorithms.d_separation._reachable + g = large_collider_graph + x = {"F", "D"} + ancestors = {"A", "B", "C", "D", "F"} + assert reachable(g, x, ancestors, {"B"}) == {"B", "F", "D"} + assert reachable(g, x, ancestors, set()) == ancestors diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_dag.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_dag.py new file mode 100644 index 0000000..4312ce3 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_dag.py @@ -0,0 +1,837 @@ +from collections import deque +from itertools import combinations, permutations + +import pytest + +import networkx as nx +from networkx.utils import edges_equal, pairwise + + +# Recipe from the itertools documentation. +def _consume(iterator): + "Consume the iterator entirely." + # Feed the entire iterator into a zero-length deque. + deque(iterator, maxlen=0) + + +class TestDagLongestPath: + """Unit tests computing the longest path in a directed acyclic graph.""" + + def test_empty(self): + G = nx.DiGraph() + assert nx.dag_longest_path(G) == [] + + def test_unweighted1(self): + edges = [(1, 2), (2, 3), (2, 4), (3, 5), (5, 6), (3, 7)] + G = nx.DiGraph(edges) + assert nx.dag_longest_path(G) == [1, 2, 3, 5, 6] + + def test_unweighted2(self): + edges = [(1, 2), (2, 3), (3, 4), (4, 5), (1, 3), (1, 5), (3, 5)] + G = nx.DiGraph(edges) + assert nx.dag_longest_path(G) == [1, 2, 3, 4, 5] + + def test_weighted(self): + G = nx.DiGraph() + edges = [(1, 2, -5), (2, 3, 1), (3, 4, 1), (4, 5, 0), (3, 5, 4), (1, 6, 2)] + G.add_weighted_edges_from(edges) + assert nx.dag_longest_path(G) == [2, 3, 5] + + def test_undirected_not_implemented(self): + G = nx.Graph() + pytest.raises(nx.NetworkXNotImplemented, nx.dag_longest_path, G) + + def test_unorderable_nodes(self): + """Tests that computing the longest path does not depend on + nodes being orderable. + + For more information, see issue #1989. + + """ + # Create the directed path graph on four nodes in a diamond shape, + # with nodes represented as (unorderable) Python objects. + nodes = [object() for n in range(4)] + G = nx.DiGraph() + G.add_edge(nodes[0], nodes[1]) + G.add_edge(nodes[0], nodes[2]) + G.add_edge(nodes[2], nodes[3]) + G.add_edge(nodes[1], nodes[3]) + + # this will raise NotImplementedError when nodes need to be ordered + nx.dag_longest_path(G) + + def test_multigraph_unweighted(self): + edges = [(1, 2), (2, 3), (2, 3), (3, 4), (4, 5), (1, 3), (1, 5), (3, 5)] + G = nx.MultiDiGraph(edges) + assert nx.dag_longest_path(G) == [1, 2, 3, 4, 5] + + def test_multigraph_weighted(self): + G = nx.MultiDiGraph() + edges = [ + (1, 2, 2), + (2, 3, 2), + (1, 3, 1), + (1, 3, 5), + (1, 3, 2), + ] + G.add_weighted_edges_from(edges) + assert nx.dag_longest_path(G) == [1, 3] + + def test_multigraph_weighted_default_weight(self): + G = nx.MultiDiGraph([(1, 2), (2, 3)]) # Unweighted edges + G.add_weighted_edges_from([(1, 3, 1), (1, 3, 5), (1, 3, 2)]) + + # Default value for default weight is 1 + assert nx.dag_longest_path(G) == [1, 3] + assert nx.dag_longest_path(G, default_weight=3) == [1, 2, 3] + + +class TestDagLongestPathLength: + """Unit tests for computing the length of a longest path in a + directed acyclic graph. + + """ + + def test_unweighted(self): + edges = [(1, 2), (2, 3), (2, 4), (3, 5), (5, 6), (5, 7)] + G = nx.DiGraph(edges) + assert nx.dag_longest_path_length(G) == 4 + + edges = [(1, 2), (2, 3), (3, 4), (4, 5), (1, 3), (1, 5), (3, 5)] + G = nx.DiGraph(edges) + assert nx.dag_longest_path_length(G) == 4 + + # test degenerate graphs + G = nx.DiGraph() + G.add_node(1) + assert nx.dag_longest_path_length(G) == 0 + + def test_undirected_not_implemented(self): + G = nx.Graph() + pytest.raises(nx.NetworkXNotImplemented, nx.dag_longest_path_length, G) + + def test_weighted(self): + edges = [(1, 2, -5), (2, 3, 1), (3, 4, 1), (4, 5, 0), (3, 5, 4), (1, 6, 2)] + G = nx.DiGraph() + G.add_weighted_edges_from(edges) + assert nx.dag_longest_path_length(G) == 5 + + def test_multigraph_unweighted(self): + edges = [(1, 2), (2, 3), (2, 3), (3, 4), (4, 5), (1, 3), (1, 5), (3, 5)] + G = nx.MultiDiGraph(edges) + assert nx.dag_longest_path_length(G) == 4 + + def test_multigraph_weighted(self): + G = nx.MultiDiGraph() + edges = [ + (1, 2, 2), + (2, 3, 2), + (1, 3, 1), + (1, 3, 5), + (1, 3, 2), + ] + G.add_weighted_edges_from(edges) + assert nx.dag_longest_path_length(G) == 5 + + +class TestDAG: + @classmethod + def setup_class(cls): + pass + + def test_topological_sort1(self): + DG = nx.DiGraph([(1, 2), (1, 3), (2, 3)]) + + for algorithm in [nx.topological_sort, nx.lexicographical_topological_sort]: + assert tuple(algorithm(DG)) == (1, 2, 3) + + DG.add_edge(3, 2) + + for algorithm in [nx.topological_sort, nx.lexicographical_topological_sort]: + pytest.raises(nx.NetworkXUnfeasible, _consume, algorithm(DG)) + + DG.remove_edge(2, 3) + + for algorithm in [nx.topological_sort, nx.lexicographical_topological_sort]: + assert tuple(algorithm(DG)) == (1, 3, 2) + + DG.remove_edge(3, 2) + + assert tuple(nx.topological_sort(DG)) in {(1, 2, 3), (1, 3, 2)} + assert tuple(nx.lexicographical_topological_sort(DG)) == (1, 2, 3) + + def test_is_directed_acyclic_graph(self): + G = nx.generators.complete_graph(2) + assert not nx.is_directed_acyclic_graph(G) + assert not nx.is_directed_acyclic_graph(G.to_directed()) + assert not nx.is_directed_acyclic_graph(nx.Graph([(3, 4), (4, 5)])) + assert nx.is_directed_acyclic_graph(nx.DiGraph([(3, 4), (4, 5)])) + + def test_topological_sort2(self): + DG = nx.DiGraph( + { + 1: [2], + 2: [3], + 3: [4], + 4: [5], + 5: [1], + 11: [12], + 12: [13], + 13: [14], + 14: [15], + } + ) + pytest.raises(nx.NetworkXUnfeasible, _consume, nx.topological_sort(DG)) + + assert not nx.is_directed_acyclic_graph(DG) + + DG.remove_edge(1, 2) + _consume(nx.topological_sort(DG)) + assert nx.is_directed_acyclic_graph(DG) + + def test_topological_sort3(self): + DG = nx.DiGraph() + DG.add_edges_from([(1, i) for i in range(2, 5)]) + DG.add_edges_from([(2, i) for i in range(5, 9)]) + DG.add_edges_from([(6, i) for i in range(9, 12)]) + DG.add_edges_from([(4, i) for i in range(12, 15)]) + + def validate(order): + assert isinstance(order, list) + assert set(order) == set(DG) + for u, v in combinations(order, 2): + assert not nx.has_path(DG, v, u) + + validate(list(nx.topological_sort(DG))) + + DG.add_edge(14, 1) + pytest.raises(nx.NetworkXUnfeasible, _consume, nx.topological_sort(DG)) + + def test_topological_sort4(self): + G = nx.Graph() + G.add_edge(1, 2) + # Only directed graphs can be topologically sorted. + pytest.raises(nx.NetworkXError, _consume, nx.topological_sort(G)) + + def test_topological_sort5(self): + G = nx.DiGraph() + G.add_edge(0, 1) + assert list(nx.topological_sort(G)) == [0, 1] + + def test_topological_sort6(self): + for algorithm in [nx.topological_sort, nx.lexicographical_topological_sort]: + + def runtime_error(): + DG = nx.DiGraph([(1, 2), (2, 3), (3, 4)]) + first = True + for x in algorithm(DG): + if first: + first = False + DG.add_edge(5 - x, 5) + + def unfeasible_error(): + DG = nx.DiGraph([(1, 2), (2, 3), (3, 4)]) + first = True + for x in algorithm(DG): + if first: + first = False + DG.remove_node(4) + + def runtime_error2(): + DG = nx.DiGraph([(1, 2), (2, 3), (3, 4)]) + first = True + for x in algorithm(DG): + if first: + first = False + DG.remove_node(2) + + pytest.raises(RuntimeError, runtime_error) + pytest.raises(RuntimeError, runtime_error2) + pytest.raises(nx.NetworkXUnfeasible, unfeasible_error) + + def test_all_topological_sorts_1(self): + DG = nx.DiGraph([(1, 2), (2, 3), (3, 4), (4, 5)]) + assert list(nx.all_topological_sorts(DG)) == [[1, 2, 3, 4, 5]] + + def test_all_topological_sorts_2(self): + DG = nx.DiGraph([(1, 3), (2, 1), (2, 4), (4, 3), (4, 5)]) + assert sorted(nx.all_topological_sorts(DG)) == [ + [2, 1, 4, 3, 5], + [2, 1, 4, 5, 3], + [2, 4, 1, 3, 5], + [2, 4, 1, 5, 3], + [2, 4, 5, 1, 3], + ] + + def test_all_topological_sorts_3(self): + def unfeasible(): + DG = nx.DiGraph([(1, 2), (2, 3), (3, 4), (4, 2), (4, 5)]) + # convert to list to execute generator + list(nx.all_topological_sorts(DG)) + + def not_implemented(): + G = nx.Graph([(1, 2), (2, 3)]) + # convert to list to execute generator + list(nx.all_topological_sorts(G)) + + def not_implemented_2(): + G = nx.MultiGraph([(1, 2), (1, 2), (2, 3)]) + list(nx.all_topological_sorts(G)) + + pytest.raises(nx.NetworkXUnfeasible, unfeasible) + pytest.raises(nx.NetworkXNotImplemented, not_implemented) + pytest.raises(nx.NetworkXNotImplemented, not_implemented_2) + + def test_all_topological_sorts_4(self): + DG = nx.DiGraph() + for i in range(7): + DG.add_node(i) + assert sorted(map(list, permutations(DG.nodes))) == sorted( + nx.all_topological_sorts(DG) + ) + + def test_all_topological_sorts_multigraph_1(self): + DG = nx.MultiDiGraph([(1, 2), (1, 2), (2, 3), (3, 4), (3, 5), (3, 5), (3, 5)]) + assert sorted(nx.all_topological_sorts(DG)) == sorted( + [[1, 2, 3, 4, 5], [1, 2, 3, 5, 4]] + ) + + def test_all_topological_sorts_multigraph_2(self): + N = 9 + edges = [] + for i in range(1, N): + edges.extend([(i, i + 1)] * i) + DG = nx.MultiDiGraph(edges) + assert list(nx.all_topological_sorts(DG)) == [list(range(1, N + 1))] + + def test_ancestors(self): + G = nx.DiGraph() + ancestors = nx.algorithms.dag.ancestors + G.add_edges_from([(1, 2), (1, 3), (4, 2), (4, 3), (4, 5), (2, 6), (5, 6)]) + assert ancestors(G, 6) == {1, 2, 4, 5} + assert ancestors(G, 3) == {1, 4} + assert ancestors(G, 1) == set() + pytest.raises(nx.NetworkXError, ancestors, G, 8) + + def test_descendants(self): + G = nx.DiGraph() + descendants = nx.algorithms.dag.descendants + G.add_edges_from([(1, 2), (1, 3), (4, 2), (4, 3), (4, 5), (2, 6), (5, 6)]) + assert descendants(G, 1) == {2, 3, 6} + assert descendants(G, 4) == {2, 3, 5, 6} + assert descendants(G, 3) == set() + pytest.raises(nx.NetworkXError, descendants, G, 8) + + def test_transitive_closure(self): + G = nx.DiGraph([(1, 2), (2, 3), (3, 4)]) + solution = [(1, 2), (1, 3), (1, 4), (2, 3), (2, 4), (3, 4)] + assert edges_equal(nx.transitive_closure(G).edges(), solution) + G = nx.DiGraph([(1, 2), (2, 3), (2, 4)]) + solution = [(1, 2), (1, 3), (1, 4), (2, 3), (2, 4)] + assert edges_equal(nx.transitive_closure(G).edges(), solution) + G = nx.DiGraph([(1, 2), (2, 3), (3, 1)]) + solution = [(1, 2), (2, 1), (2, 3), (3, 2), (1, 3), (3, 1)] + soln = sorted(solution + [(n, n) for n in G]) + assert edges_equal(sorted(nx.transitive_closure(G).edges()), soln) + + G = nx.Graph([(1, 2), (2, 3), (3, 4)]) + solution = [(1, 2), (1, 3), (1, 4), (2, 3), (2, 4), (3, 4)] + assert edges_equal(sorted(nx.transitive_closure(G).edges()), solution) + + G = nx.MultiGraph([(1, 2), (2, 3), (3, 4)]) + solution = [(1, 2), (1, 3), (1, 4), (2, 3), (2, 4), (3, 4)] + assert edges_equal(sorted(nx.transitive_closure(G).edges()), solution) + + G = nx.MultiDiGraph([(1, 2), (2, 3), (3, 4)]) + solution = [(1, 2), (1, 3), (1, 4), (2, 3), (2, 4), (3, 4)] + assert edges_equal(sorted(nx.transitive_closure(G).edges()), solution) + + # test if edge data is copied + G = nx.DiGraph([(1, 2, {"a": 3}), (2, 3, {"b": 0}), (3, 4)]) + H = nx.transitive_closure(G) + for u, v in G.edges(): + assert G.get_edge_data(u, v) == H.get_edge_data(u, v) + + k = 10 + G = nx.DiGraph((i, i + 1, {"f": "b", "weight": i}) for i in range(k)) + H = nx.transitive_closure(G) + for u, v in G.edges(): + assert G.get_edge_data(u, v) == H.get_edge_data(u, v) + + G = nx.Graph() + with pytest.raises(nx.NetworkXError): + nx.transitive_closure(G, reflexive="wrong input") + + def test_reflexive_transitive_closure(self): + G = nx.DiGraph([(1, 2), (2, 3), (3, 4)]) + solution = [(1, 2), (1, 3), (1, 4), (2, 3), (2, 4), (3, 4)] + soln = sorted(solution + [(n, n) for n in G]) + assert edges_equal(nx.transitive_closure(G).edges(), solution) + assert edges_equal(nx.transitive_closure(G, False).edges(), solution) + assert edges_equal(nx.transitive_closure(G, True).edges(), soln) + assert edges_equal(nx.transitive_closure(G, None).edges(), solution) + + G = nx.DiGraph([(1, 2), (2, 3), (2, 4)]) + solution = [(1, 2), (1, 3), (1, 4), (2, 3), (2, 4)] + soln = sorted(solution + [(n, n) for n in G]) + assert edges_equal(nx.transitive_closure(G).edges(), solution) + assert edges_equal(nx.transitive_closure(G, False).edges(), solution) + assert edges_equal(nx.transitive_closure(G, True).edges(), soln) + assert edges_equal(nx.transitive_closure(G, None).edges(), solution) + + G = nx.DiGraph([(1, 2), (2, 3), (3, 1)]) + solution = sorted([(1, 2), (2, 1), (2, 3), (3, 2), (1, 3), (3, 1)]) + soln = sorted(solution + [(n, n) for n in G]) + assert edges_equal(sorted(nx.transitive_closure(G).edges()), soln) + assert edges_equal(sorted(nx.transitive_closure(G, False).edges()), soln) + assert edges_equal(sorted(nx.transitive_closure(G, None).edges()), solution) + assert edges_equal(sorted(nx.transitive_closure(G, True).edges()), soln) + + G = nx.Graph([(1, 2), (2, 3), (3, 4)]) + solution = [(1, 2), (1, 3), (1, 4), (2, 3), (2, 4), (3, 4)] + soln = sorted(solution + [(n, n) for n in G]) + assert edges_equal(nx.transitive_closure(G).edges(), solution) + assert edges_equal(nx.transitive_closure(G, False).edges(), solution) + assert edges_equal(nx.transitive_closure(G, True).edges(), soln) + assert edges_equal(nx.transitive_closure(G, None).edges(), solution) + + G = nx.MultiGraph([(1, 2), (2, 3), (3, 4)]) + solution = [(1, 2), (1, 3), (1, 4), (2, 3), (2, 4), (3, 4)] + soln = sorted(solution + [(n, n) for n in G]) + assert edges_equal(nx.transitive_closure(G).edges(), solution) + assert edges_equal(nx.transitive_closure(G, False).edges(), solution) + assert edges_equal(nx.transitive_closure(G, True).edges(), soln) + assert edges_equal(nx.transitive_closure(G, None).edges(), solution) + + G = nx.MultiDiGraph([(1, 2), (2, 3), (3, 4)]) + solution = [(1, 2), (1, 3), (1, 4), (2, 3), (2, 4), (3, 4)] + soln = sorted(solution + [(n, n) for n in G]) + assert edges_equal(nx.transitive_closure(G).edges(), solution) + assert edges_equal(nx.transitive_closure(G, False).edges(), solution) + assert edges_equal(nx.transitive_closure(G, True).edges(), soln) + assert edges_equal(nx.transitive_closure(G, None).edges(), solution) + + def test_transitive_closure_dag(self): + G = nx.DiGraph([(1, 2), (2, 3), (3, 4)]) + transitive_closure = nx.algorithms.dag.transitive_closure_dag + solution = [(1, 2), (1, 3), (1, 4), (2, 3), (2, 4), (3, 4)] + assert edges_equal(transitive_closure(G).edges(), solution) + G = nx.DiGraph([(1, 2), (2, 3), (2, 4)]) + solution = [(1, 2), (1, 3), (1, 4), (2, 3), (2, 4)] + assert edges_equal(transitive_closure(G).edges(), solution) + G = nx.Graph([(1, 2), (2, 3), (3, 4)]) + pytest.raises(nx.NetworkXNotImplemented, transitive_closure, G) + + # test if edge data is copied + G = nx.DiGraph([(1, 2, {"a": 3}), (2, 3, {"b": 0}), (3, 4)]) + H = transitive_closure(G) + for u, v in G.edges(): + assert G.get_edge_data(u, v) == H.get_edge_data(u, v) + + k = 10 + G = nx.DiGraph((i, i + 1, {"foo": "bar", "weight": i}) for i in range(k)) + H = transitive_closure(G) + for u, v in G.edges(): + assert G.get_edge_data(u, v) == H.get_edge_data(u, v) + + def test_transitive_reduction(self): + G = nx.DiGraph([(1, 2), (1, 3), (1, 4), (2, 3), (2, 4), (3, 4)]) + transitive_reduction = nx.algorithms.dag.transitive_reduction + solution = [(1, 2), (2, 3), (3, 4)] + assert edges_equal(transitive_reduction(G).edges(), solution) + G = nx.DiGraph([(1, 2), (1, 3), (1, 4), (2, 3), (2, 4)]) + transitive_reduction = nx.algorithms.dag.transitive_reduction + solution = [(1, 2), (2, 3), (2, 4)] + assert edges_equal(transitive_reduction(G).edges(), solution) + G = nx.Graph([(1, 2), (2, 3), (3, 4)]) + pytest.raises(nx.NetworkXNotImplemented, transitive_reduction, G) + + def _check_antichains(self, solution, result): + sol = [frozenset(a) for a in solution] + res = [frozenset(a) for a in result] + assert set(sol) == set(res) + + def test_antichains(self): + antichains = nx.algorithms.dag.antichains + G = nx.DiGraph([(1, 2), (2, 3), (3, 4)]) + solution = [[], [4], [3], [2], [1]] + self._check_antichains(list(antichains(G)), solution) + G = nx.DiGraph([(1, 2), (2, 3), (2, 4), (3, 5), (5, 6), (5, 7)]) + solution = [ + [], + [4], + [7], + [7, 4], + [6], + [6, 4], + [6, 7], + [6, 7, 4], + [5], + [5, 4], + [3], + [3, 4], + [2], + [1], + ] + self._check_antichains(list(antichains(G)), solution) + G = nx.DiGraph([(1, 2), (1, 3), (3, 4), (3, 5), (5, 6)]) + solution = [ + [], + [6], + [5], + [4], + [4, 6], + [4, 5], + [3], + [2], + [2, 6], + [2, 5], + [2, 4], + [2, 4, 6], + [2, 4, 5], + [2, 3], + [1], + ] + self._check_antichains(list(antichains(G)), solution) + G = nx.DiGraph({0: [1, 2], 1: [4], 2: [3], 3: [4]}) + solution = [[], [4], [3], [2], [1], [1, 3], [1, 2], [0]] + self._check_antichains(list(antichains(G)), solution) + G = nx.DiGraph() + self._check_antichains(list(antichains(G)), [[]]) + G = nx.DiGraph() + G.add_nodes_from([0, 1, 2]) + solution = [[], [0], [1], [1, 0], [2], [2, 0], [2, 1], [2, 1, 0]] + self._check_antichains(list(antichains(G)), solution) + + def f(x): + return list(antichains(x)) + + G = nx.Graph([(1, 2), (2, 3), (3, 4)]) + pytest.raises(nx.NetworkXNotImplemented, f, G) + G = nx.DiGraph([(1, 2), (2, 3), (3, 1)]) + pytest.raises(nx.NetworkXUnfeasible, f, G) + + def test_lexicographical_topological_sort(self): + G = nx.DiGraph([(1, 2), (2, 3), (1, 4), (1, 5), (2, 6)]) + assert list(nx.lexicographical_topological_sort(G)) == [1, 2, 3, 4, 5, 6] + assert list(nx.lexicographical_topological_sort(G, key=lambda x: x)) == [ + 1, + 2, + 3, + 4, + 5, + 6, + ] + assert list(nx.lexicographical_topological_sort(G, key=lambda x: -x)) == [ + 1, + 5, + 4, + 2, + 6, + 3, + ] + + def test_lexicographical_topological_sort2(self): + """ + Check the case of two or more nodes with same key value. + Want to avoid exception raised due to comparing nodes directly. + See Issue #3493 + """ + + class Test_Node: + def __init__(self, n): + self.label = n + self.priority = 1 + + def __repr__(self): + return f"Node({self.label})" + + def sorting_key(node): + return node.priority + + test_nodes = [Test_Node(n) for n in range(4)] + G = nx.DiGraph() + edges = [(0, 1), (0, 2), (0, 3), (2, 3)] + G.add_edges_from((test_nodes[a], test_nodes[b]) for a, b in edges) + + sorting = list(nx.lexicographical_topological_sort(G, key=sorting_key)) + assert sorting == test_nodes + + +def test_topological_generations(): + G = nx.DiGraph( + {1: [2, 3], 2: [4, 5], 3: [7], 4: [], 5: [6, 7], 6: [], 7: []} + ).reverse() + # order within each generation is inconsequential + generations = [sorted(gen) for gen in nx.topological_generations(G)] + expected = [[4, 6, 7], [3, 5], [2], [1]] + assert generations == expected + + MG = nx.MultiDiGraph(G.edges) + MG.add_edge(2, 1) + generations = [sorted(gen) for gen in nx.topological_generations(MG)] + assert generations == expected + + +def test_topological_generations_empty(): + G = nx.DiGraph() + assert list(nx.topological_generations(G)) == [] + + +def test_topological_generations_cycle(): + G = nx.DiGraph([[2, 1], [3, 1], [1, 2]]) + with pytest.raises(nx.NetworkXUnfeasible): + list(nx.topological_generations(G)) + + +def test_is_aperiodic_cycle(): + G = nx.DiGraph() + nx.add_cycle(G, [1, 2, 3, 4]) + assert not nx.is_aperiodic(G) + + +def test_is_aperiodic_cycle2(): + G = nx.DiGraph() + nx.add_cycle(G, [1, 2, 3, 4]) + nx.add_cycle(G, [3, 4, 5, 6, 7]) + assert nx.is_aperiodic(G) + + +def test_is_aperiodic_cycle3(): + G = nx.DiGraph() + nx.add_cycle(G, [1, 2, 3, 4]) + nx.add_cycle(G, [3, 4, 5, 6]) + assert not nx.is_aperiodic(G) + + +def test_is_aperiodic_cycle4(): + G = nx.DiGraph() + nx.add_cycle(G, [1, 2, 3, 4]) + G.add_edge(1, 3) + assert nx.is_aperiodic(G) + + +def test_is_aperiodic_selfloop(): + G = nx.DiGraph() + nx.add_cycle(G, [1, 2, 3, 4]) + G.add_edge(1, 1) + assert nx.is_aperiodic(G) + + +def test_is_aperiodic_null_graph_raises(): + G = nx.DiGraph() + pytest.raises(nx.NetworkXPointlessConcept, nx.is_aperiodic, G) + + +def test_is_aperiodic_undirected_raises(): + G = nx.Graph([(1, 2), (2, 3), (3, 1)]) + pytest.raises(nx.NetworkXError, nx.is_aperiodic, G) + + +def test_is_aperiodic_disconnected_raises(): + G = nx.DiGraph() + nx.add_cycle(G, [0, 1, 2]) + G.add_edge(3, 3) + pytest.raises(nx.NetworkXError, nx.is_aperiodic, G) + + +def test_is_aperiodic_weakly_connected_raises(): + G = nx.DiGraph([(1, 2), (2, 3)]) + pytest.raises(nx.NetworkXError, nx.is_aperiodic, G) + + +def test_is_aperiodic_empty_graph(): + G = nx.empty_graph(create_using=nx.DiGraph) + with pytest.raises(nx.NetworkXPointlessConcept, match="Graph has no nodes."): + nx.is_aperiodic(G) + + +def test_is_aperiodic_bipartite(): + # Bipartite graph + G = nx.DiGraph(nx.davis_southern_women_graph()) + assert not nx.is_aperiodic(G) + + +def test_is_aperiodic_single_node(): + G = nx.DiGraph() + G.add_node(0) + assert not nx.is_aperiodic(G) + G.add_edge(0, 0) + assert nx.is_aperiodic(G) + + +class TestDagToBranching: + """Unit tests for the :func:`networkx.dag_to_branching` function.""" + + def test_single_root(self): + """Tests that a directed acyclic graph with a single degree + zero node produces an arborescence. + + """ + G = nx.DiGraph([(0, 1), (0, 2), (1, 3), (2, 3)]) + B = nx.dag_to_branching(G) + expected = nx.DiGraph([(0, 1), (1, 3), (0, 2), (2, 4)]) + assert nx.is_arborescence(B) + assert nx.is_isomorphic(B, expected) + + def test_multiple_roots(self): + """Tests that a directed acyclic graph with multiple degree zero + nodes creates an arborescence with multiple (weakly) connected + components. + + """ + G = nx.DiGraph([(0, 1), (0, 2), (1, 3), (2, 3), (5, 2)]) + B = nx.dag_to_branching(G) + expected = nx.DiGraph([(0, 1), (1, 3), (0, 2), (2, 4), (5, 6), (6, 7)]) + assert nx.is_branching(B) + assert not nx.is_arborescence(B) + assert nx.is_isomorphic(B, expected) + + # # Attributes are not copied by this function. If they were, this would + # # be a good test to uncomment. + # def test_copy_attributes(self): + # """Tests that node attributes are copied in the branching.""" + # G = nx.DiGraph([(0, 1), (0, 2), (1, 3), (2, 3)]) + # for v in G: + # G.node[v]['label'] = str(v) + # B = nx.dag_to_branching(G) + # # Determine the root node of the branching. + # root = next(v for v, d in B.in_degree() if d == 0) + # assert_equal(B.node[root]['label'], '0') + # children = B[root] + # # Get the left and right children, nodes 1 and 2, respectively. + # left, right = sorted(children, key=lambda v: B.node[v]['label']) + # assert_equal(B.node[left]['label'], '1') + # assert_equal(B.node[right]['label'], '2') + # # Get the left grandchild. + # children = B[left] + # assert_equal(len(children), 1) + # left_grandchild = arbitrary_element(children) + # assert_equal(B.node[left_grandchild]['label'], '3') + # # Get the right grandchild. + # children = B[right] + # assert_equal(len(children), 1) + # right_grandchild = arbitrary_element(children) + # assert_equal(B.node[right_grandchild]['label'], '3') + + def test_already_arborescence(self): + """Tests that a directed acyclic graph that is already an + arborescence produces an isomorphic arborescence as output. + + """ + A = nx.balanced_tree(2, 2, create_using=nx.DiGraph()) + B = nx.dag_to_branching(A) + assert nx.is_isomorphic(A, B) + + def test_already_branching(self): + """Tests that a directed acyclic graph that is already a + branching produces an isomorphic branching as output. + + """ + T1 = nx.balanced_tree(2, 2, create_using=nx.DiGraph()) + T2 = nx.balanced_tree(2, 2, create_using=nx.DiGraph()) + G = nx.disjoint_union(T1, T2) + B = nx.dag_to_branching(G) + assert nx.is_isomorphic(G, B) + + def test_not_acyclic(self): + """Tests that a non-acyclic graph causes an exception.""" + with pytest.raises(nx.HasACycle): + G = nx.DiGraph(pairwise("abc", cyclic=True)) + nx.dag_to_branching(G) + + def test_undirected(self): + with pytest.raises(nx.NetworkXNotImplemented): + nx.dag_to_branching(nx.Graph()) + + def test_multigraph(self): + with pytest.raises(nx.NetworkXNotImplemented): + nx.dag_to_branching(nx.MultiGraph()) + + def test_multidigraph(self): + with pytest.raises(nx.NetworkXNotImplemented): + nx.dag_to_branching(nx.MultiDiGraph()) + + +def test_ancestors_descendants_undirected(): + """Regression test to ensure ancestors and descendants work as expected on + undirected graphs.""" + G = nx.path_graph(5) + nx.ancestors(G, 2) == nx.descendants(G, 2) == {0, 1, 3, 4} + + +def test_compute_v_structures_raise(): + G = nx.Graph() + with pytest.raises(nx.NetworkXNotImplemented, match="for undirected type"): + nx.compute_v_structures(G) + + +def test_compute_v_structures(): + edges = [(0, 1), (0, 2), (3, 2)] + G = nx.DiGraph(edges) + + v_structs = set(nx.compute_v_structures(G)) + assert len(v_structs) == 1 + assert (0, 2, 3) in v_structs + + edges = [("A", "B"), ("C", "B"), ("B", "D"), ("D", "E"), ("G", "E")] + G = nx.DiGraph(edges) + v_structs = set(nx.compute_v_structures(G)) + assert len(v_structs) == 2 + + +def test_compute_v_structures_deprecated(): + G = nx.DiGraph() + with pytest.deprecated_call(): + nx.compute_v_structures(G) + + +def test_v_structures_raise(): + G = nx.Graph() + with pytest.raises(nx.NetworkXNotImplemented, match="for undirected type"): + nx.dag.v_structures(G) + + +@pytest.mark.parametrize( + ("edgelist", "expected"), + ( + ( + [(0, 1), (0, 2), (3, 2)], + {(0, 2, 3)}, + ), + ( + [("A", "B"), ("C", "B"), ("D", "G"), ("D", "E"), ("G", "E")], + {("A", "B", "C")}, + ), + ([(0, 1), (2, 1), (0, 2)], set()), # adjacent parents case: see gh-7385 + ), +) +def test_v_structures(edgelist, expected): + G = nx.DiGraph(edgelist) + v_structs = set(nx.dag.v_structures(G)) + assert v_structs == expected + + +def test_colliders_raise(): + G = nx.Graph() + with pytest.raises(nx.NetworkXNotImplemented, match="for undirected type"): + nx.dag.colliders(G) + + +@pytest.mark.parametrize( + ("edgelist", "expected"), + ( + ( + [(0, 1), (0, 2), (3, 2)], + {(0, 2, 3)}, + ), + ( + [("A", "B"), ("C", "B"), ("D", "G"), ("D", "E"), ("G", "E")], + {("A", "B", "C"), ("D", "E", "G")}, + ), + ), +) +def test_colliders(edgelist, expected): + G = nx.DiGraph(edgelist) + colliders = set(nx.dag.colliders(G)) + assert colliders == expected diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_distance_measures.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_distance_measures.py new file mode 100644 index 0000000..1668fef --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_distance_measures.py @@ -0,0 +1,831 @@ +import itertools +import math +from random import Random + +import pytest + +import networkx as nx +from networkx import convert_node_labels_to_integers as cnlti +from networkx.algorithms.distance_measures import _extrema_bounding + + +def test__extrema_bounding_invalid_compute_kwarg(): + G = nx.path_graph(3) + with pytest.raises(ValueError, match="compute must be one of"): + _extrema_bounding(G, compute="spam") + + +class TestDistance: + def setup_method(self): + self.G = cnlti(nx.grid_2d_graph(4, 4), first_label=1, ordering="sorted") + + @pytest.mark.parametrize("seed", list(range(10))) + @pytest.mark.parametrize("n", list(range(10, 20))) + @pytest.mark.parametrize("prob", [x / 10 for x in range(0, 10, 2)]) + def test_use_bounds_on_off_consistency(self, seed, n, prob): + """Test for consistency of distance metrics when using usebounds=True. + + We validate consistency for `networkx.diameter`, `networkx.radius`, `networkx.periphery` + and `networkx.center` when passing `usebounds=True`. Expectation is that method + returns the same result whether we pass usebounds=True or not. + + For this we generate random connected graphs and validate method returns the same. + """ + metrics = [nx.diameter, nx.radius, nx.periphery, nx.center] + max_weight = [5, 10, 1000] + rng = Random(seed) + # we compose it with a random tree to ensure graph is connected + G = nx.compose( + nx.random_labeled_tree(n, seed=rng), + nx.erdos_renyi_graph(n, prob, seed=rng), + ) + for metric in metrics: + # checking unweighted case + assert metric(G) == metric(G, usebounds=True) + for w in max_weight: + for u, v in G.edges(): + G[u][v]["w"] = rng.randint(0, w) + # checking weighted case + assert metric(G, weight="w") == metric(G, weight="w", usebounds=True) + + def test_eccentricity(self): + assert nx.eccentricity(self.G, 1) == 6 + e = nx.eccentricity(self.G) + assert e[1] == 6 + + sp = dict(nx.shortest_path_length(self.G)) + e = nx.eccentricity(self.G, sp=sp) + assert e[1] == 6 + + e = nx.eccentricity(self.G, v=1) + assert e == 6 + + # This behavior changed in version 1.8 (ticket #739) + e = nx.eccentricity(self.G, v=[1, 1]) + assert e[1] == 6 + e = nx.eccentricity(self.G, v=[1, 2]) + assert e[1] == 6 + + # test against graph with one node + G = nx.path_graph(1) + e = nx.eccentricity(G) + assert e[0] == 0 + e = nx.eccentricity(G, v=0) + assert e == 0 + pytest.raises(nx.NetworkXError, nx.eccentricity, G, 1) + + # test against empty graph + G = nx.empty_graph() + e = nx.eccentricity(G) + assert e == {} + + def test_diameter(self): + assert nx.diameter(self.G) == 6 + + def test_harmonic_diameter(self): + assert nx.harmonic_diameter(self.G) == pytest.approx(2.0477815699658715) + assert nx.harmonic_diameter(nx.star_graph(3)) == pytest.approx(1.333333) + + def test_harmonic_diameter_empty(self): + assert math.isnan(nx.harmonic_diameter(nx.empty_graph())) + + def test_harmonic_diameter_single_node(self): + assert math.isnan(nx.harmonic_diameter(nx.empty_graph(1))) + + def test_harmonic_diameter_discrete(self): + assert math.isinf(nx.harmonic_diameter(nx.empty_graph(3))) + + def test_harmonic_diameter_not_strongly_connected(self): + DG = nx.DiGraph() + DG.add_edge(0, 1) + assert nx.harmonic_diameter(DG) == 2 + + def test_harmonic_diameter_weighted_paths(self): + G = nx.star_graph(3) + # check defaults + G.add_weighted_edges_from([(*e, 1) for i, e in enumerate(G.edges)], "weight") + assert nx.harmonic_diameter(G) == pytest.approx(1.333333) + assert nx.harmonic_diameter(G, weight="weight") == pytest.approx(1.333333) + + # check impact of weights and alternate weight name + G.add_weighted_edges_from([(*e, i) for i, e in enumerate(G.edges)], "dist") + assert nx.harmonic_diameter(G, weight="dist") == pytest.approx(1.8) + + def test_radius(self): + assert nx.radius(self.G) == 4 + + def test_periphery(self): + assert set(nx.periphery(self.G)) == {1, 4, 13, 16} + + def test_center_simple_tree(self): + G = nx.Graph([(1, 2), (1, 3), (2, 4), (2, 5)]) + assert nx.center(G) == [1, 2] + + @pytest.mark.parametrize("r", range(2, 5)) + @pytest.mark.parametrize("h", range(1, 5)) + def test_center_balanced_tree(self, r, h): + G = nx.balanced_tree(r, h) + assert nx.center(G) == [0] + + def test_center(self): + assert set(nx.center(self.G)) == {6, 7, 10, 11} + + @pytest.mark.parametrize("n", [1, 2, 99, 100]) + def test_center_path_graphs(self, n): + G = nx.path_graph(n) + expected = {(n - 1) // 2, math.ceil((n - 1) / 2)} + assert set(nx.center(G)) == expected + + def test_bound_diameter(self): + assert nx.diameter(self.G, usebounds=True) == 6 + + def test_bound_radius(self): + assert nx.radius(self.G, usebounds=True) == 4 + + def test_bound_periphery(self): + result = {1, 4, 13, 16} + assert set(nx.periphery(self.G, usebounds=True)) == result + + def test_bound_center(self): + result = {6, 7, 10, 11} + assert set(nx.center(self.G, usebounds=True)) == result + + def test_radius_exception(self): + G = nx.Graph() + G.add_edge(1, 2) + G.add_edge(3, 4) + pytest.raises(nx.NetworkXError, nx.diameter, G) + + def test_eccentricity_infinite(self): + with pytest.raises(nx.NetworkXError): + G = nx.Graph([(1, 2), (3, 4)]) + e = nx.eccentricity(G) + + def test_eccentricity_undirected_not_connected(self): + with pytest.raises(nx.NetworkXError): + G = nx.Graph([(1, 2), (3, 4)]) + e = nx.eccentricity(G, sp=1) + + def test_eccentricity_directed_weakly_connected(self): + with pytest.raises(nx.NetworkXError): + DG = nx.DiGraph([(1, 2), (1, 3)]) + nx.eccentricity(DG) + + +class TestWeightedDistance: + def setup_method(self): + G = nx.Graph() + G.add_edge(0, 1, weight=0.6, cost=0.6, high_cost=6) + G.add_edge(0, 2, weight=0.2, cost=0.2, high_cost=2) + G.add_edge(2, 3, weight=0.1, cost=0.1, high_cost=1) + G.add_edge(2, 4, weight=0.7, cost=0.7, high_cost=7) + G.add_edge(2, 5, weight=0.9, cost=0.9, high_cost=9) + G.add_edge(1, 5, weight=0.3, cost=0.3, high_cost=3) + self.G = G + self.weight_fn = lambda v, u, e: 2 + + def test_eccentricity_weight_None(self): + assert nx.eccentricity(self.G, 1, weight=None) == 3 + e = nx.eccentricity(self.G, weight=None) + assert e[1] == 3 + + e = nx.eccentricity(self.G, v=1, weight=None) + assert e == 3 + + # This behavior changed in version 1.8 (ticket #739) + e = nx.eccentricity(self.G, v=[1, 1], weight=None) + assert e[1] == 3 + e = nx.eccentricity(self.G, v=[1, 2], weight=None) + assert e[1] == 3 + + def test_eccentricity_weight_attr(self): + assert nx.eccentricity(self.G, 1, weight="weight") == 1.5 + e = nx.eccentricity(self.G, weight="weight") + assert ( + e + == nx.eccentricity(self.G, weight="cost") + != nx.eccentricity(self.G, weight="high_cost") + ) + assert e[1] == 1.5 + + e = nx.eccentricity(self.G, v=1, weight="weight") + assert e == 1.5 + + # This behavior changed in version 1.8 (ticket #739) + e = nx.eccentricity(self.G, v=[1, 1], weight="weight") + assert e[1] == 1.5 + e = nx.eccentricity(self.G, v=[1, 2], weight="weight") + assert e[1] == 1.5 + + def test_eccentricity_weight_fn(self): + assert nx.eccentricity(self.G, 1, weight=self.weight_fn) == 6 + e = nx.eccentricity(self.G, weight=self.weight_fn) + assert e[1] == 6 + + e = nx.eccentricity(self.G, v=1, weight=self.weight_fn) + assert e == 6 + + # This behavior changed in version 1.8 (ticket #739) + e = nx.eccentricity(self.G, v=[1, 1], weight=self.weight_fn) + assert e[1] == 6 + e = nx.eccentricity(self.G, v=[1, 2], weight=self.weight_fn) + assert e[1] == 6 + + def test_diameter_weight_None(self): + assert nx.diameter(self.G, weight=None) == 3 + + def test_diameter_weight_attr(self): + assert ( + nx.diameter(self.G, weight="weight") + == nx.diameter(self.G, weight="cost") + == 1.6 + != nx.diameter(self.G, weight="high_cost") + ) + + def test_diameter_weight_fn(self): + assert nx.diameter(self.G, weight=self.weight_fn) == 6 + + def test_radius_weight_None(self): + assert pytest.approx(nx.radius(self.G, weight=None)) == 2 + + def test_radius_weight_attr(self): + assert ( + pytest.approx(nx.radius(self.G, weight="weight")) + == pytest.approx(nx.radius(self.G, weight="cost")) + == 0.9 + != nx.radius(self.G, weight="high_cost") + ) + + def test_radius_weight_fn(self): + assert nx.radius(self.G, weight=self.weight_fn) == 4 + + def test_periphery_weight_None(self): + for v in set(nx.periphery(self.G, weight=None)): + assert nx.eccentricity(self.G, v, weight=None) == nx.diameter( + self.G, weight=None + ) + + def test_periphery_weight_attr(self): + periphery = set(nx.periphery(self.G, weight="weight")) + assert ( + periphery + == set(nx.periphery(self.G, weight="cost")) + == set(nx.periphery(self.G, weight="high_cost")) + ) + for v in periphery: + assert ( + nx.eccentricity(self.G, v, weight="high_cost") + != nx.eccentricity(self.G, v, weight="weight") + == nx.eccentricity(self.G, v, weight="cost") + == nx.diameter(self.G, weight="weight") + == nx.diameter(self.G, weight="cost") + != nx.diameter(self.G, weight="high_cost") + ) + assert nx.eccentricity(self.G, v, weight="high_cost") == nx.diameter( + self.G, weight="high_cost" + ) + + def test_periphery_weight_fn(self): + for v in set(nx.periphery(self.G, weight=self.weight_fn)): + assert nx.eccentricity(self.G, v, weight=self.weight_fn) == nx.diameter( + self.G, weight=self.weight_fn + ) + + def test_center_weight_None(self): + for v in set(nx.center(self.G, weight=None)): + assert pytest.approx(nx.eccentricity(self.G, v, weight=None)) == nx.radius( + self.G, weight=None + ) + + def test_center_weight_attr(self): + center = set(nx.center(self.G, weight="weight")) + assert ( + center + == set(nx.center(self.G, weight="cost")) + != set(nx.center(self.G, weight="high_cost")) + ) + for v in center: + assert ( + nx.eccentricity(self.G, v, weight="high_cost") + != pytest.approx(nx.eccentricity(self.G, v, weight="weight")) + == pytest.approx(nx.eccentricity(self.G, v, weight="cost")) + == nx.radius(self.G, weight="weight") + == nx.radius(self.G, weight="cost") + != nx.radius(self.G, weight="high_cost") + ) + assert nx.eccentricity(self.G, v, weight="high_cost") == nx.radius( + self.G, weight="high_cost" + ) + + def test_center_weight_fn(self): + for v in set(nx.center(self.G, weight=self.weight_fn)): + assert nx.eccentricity(self.G, v, weight=self.weight_fn) == nx.radius( + self.G, weight=self.weight_fn + ) + + def test_bound_diameter_weight_None(self): + assert nx.diameter(self.G, usebounds=True, weight=None) == 3 + + def test_bound_diameter_weight_attr(self): + assert ( + nx.diameter(self.G, usebounds=True, weight="high_cost") + != nx.diameter(self.G, usebounds=True, weight="weight") + == nx.diameter(self.G, usebounds=True, weight="cost") + == 1.6 + != nx.diameter(self.G, usebounds=True, weight="high_cost") + ) + assert nx.diameter(self.G, usebounds=True, weight="high_cost") == nx.diameter( + self.G, usebounds=True, weight="high_cost" + ) + + def test_bound_diameter_weight_fn(self): + assert nx.diameter(self.G, usebounds=True, weight=self.weight_fn) == 6 + + def test_bound_radius_weight_None(self): + assert pytest.approx(nx.radius(self.G, usebounds=True, weight=None)) == 2 + + def test_bound_radius_weight_attr(self): + assert ( + nx.radius(self.G, usebounds=True, weight="high_cost") + != pytest.approx(nx.radius(self.G, usebounds=True, weight="weight")) + == pytest.approx(nx.radius(self.G, usebounds=True, weight="cost")) + == 0.9 + != nx.radius(self.G, usebounds=True, weight="high_cost") + ) + assert nx.radius(self.G, usebounds=True, weight="high_cost") == nx.radius( + self.G, usebounds=True, weight="high_cost" + ) + + def test_bound_radius_weight_fn(self): + assert nx.radius(self.G, usebounds=True, weight=self.weight_fn) == 4 + + def test_bound_periphery_weight_None(self): + result = {1, 3, 4} + assert set(nx.periphery(self.G, usebounds=True, weight=None)) == result + + def test_bound_periphery_weight_attr(self): + result = {4, 5} + assert ( + set(nx.periphery(self.G, usebounds=True, weight="weight")) + == set(nx.periphery(self.G, usebounds=True, weight="cost")) + == result + ) + + def test_bound_periphery_weight_fn(self): + result = {1, 3, 4} + assert ( + set(nx.periphery(self.G, usebounds=True, weight=self.weight_fn)) == result + ) + + def test_bound_center_weight_None(self): + result = {0, 2, 5} + assert set(nx.center(self.G, usebounds=True, weight=None)) == result + + def test_bound_center_weight_attr(self): + result = {0} + assert ( + set(nx.center(self.G, usebounds=True, weight="weight")) + == set(nx.center(self.G, usebounds=True, weight="cost")) + == result + ) + + def test_bound_center_weight_fn(self): + result = {0, 2, 5} + assert set(nx.center(self.G, usebounds=True, weight=self.weight_fn)) == result + + +class TestResistanceDistance: + @classmethod + def setup_class(cls): + global np + np = pytest.importorskip("numpy") + sp = pytest.importorskip("scipy") + + def setup_method(self): + G = nx.Graph() + G.add_edge(1, 2, weight=2) + G.add_edge(2, 3, weight=4) + G.add_edge(3, 4, weight=1) + G.add_edge(1, 4, weight=3) + self.G = G + + def test_resistance_distance_directed_graph(self): + G = nx.DiGraph() + with pytest.raises(nx.NetworkXNotImplemented): + nx.resistance_distance(G) + + def test_resistance_distance_empty(self): + G = nx.Graph() + with pytest.raises(nx.NetworkXError): + nx.resistance_distance(G) + + def test_resistance_distance_not_connected(self): + with pytest.raises(nx.NetworkXError): + self.G.add_node(5) + nx.resistance_distance(self.G, 1, 5) + + def test_resistance_distance_nodeA_not_in_graph(self): + with pytest.raises(nx.NetworkXError): + nx.resistance_distance(self.G, 9, 1) + + def test_resistance_distance_nodeB_not_in_graph(self): + with pytest.raises(nx.NetworkXError): + nx.resistance_distance(self.G, 1, 9) + + def test_resistance_distance(self): + rd = nx.resistance_distance(self.G, 1, 3, "weight", True) + test_data = 1 / (1 / (2 + 4) + 1 / (1 + 3)) + assert round(rd, 5) == round(test_data, 5) + + def test_resistance_distance_noinv(self): + rd = nx.resistance_distance(self.G, 1, 3, "weight", False) + test_data = 1 / (1 / (1 / 2 + 1 / 4) + 1 / (1 / 1 + 1 / 3)) + assert round(rd, 5) == round(test_data, 5) + + def test_resistance_distance_no_weight(self): + rd = nx.resistance_distance(self.G, 1, 3) + assert round(rd, 5) == 1 + + def test_resistance_distance_neg_weight(self): + self.G[2][3]["weight"] = -4 + rd = nx.resistance_distance(self.G, 1, 3, "weight", True) + test_data = 1 / (1 / (2 + -4) + 1 / (1 + 3)) + assert round(rd, 5) == round(test_data, 5) + + def test_multigraph(self): + G = nx.MultiGraph() + G.add_edge(1, 2, weight=2) + G.add_edge(2, 3, weight=4) + G.add_edge(3, 4, weight=1) + G.add_edge(1, 4, weight=3) + rd = nx.resistance_distance(G, 1, 3, "weight", True) + assert np.isclose(rd, 1 / (1 / (2 + 4) + 1 / (1 + 3))) + + def test_resistance_distance_div0(self): + with pytest.raises(ZeroDivisionError): + self.G[1][2]["weight"] = 0 + nx.resistance_distance(self.G, 1, 3, "weight") + + def test_resistance_distance_same_node(self): + assert nx.resistance_distance(self.G, 1, 1) == 0 + + def test_resistance_distance_only_nodeA(self): + rd = nx.resistance_distance(self.G, nodeA=1) + test_data = {} + test_data[1] = 0 + test_data[2] = 0.75 + test_data[3] = 1 + test_data[4] = 0.75 + assert isinstance(rd, dict) + assert sorted(rd.keys()) == sorted(test_data.keys()) + for key in rd: + assert np.isclose(rd[key], test_data[key]) + + def test_resistance_distance_only_nodeB(self): + rd = nx.resistance_distance(self.G, nodeB=1) + test_data = {} + test_data[1] = 0 + test_data[2] = 0.75 + test_data[3] = 1 + test_data[4] = 0.75 + assert isinstance(rd, dict) + assert sorted(rd.keys()) == sorted(test_data.keys()) + for key in rd: + assert np.isclose(rd[key], test_data[key]) + + def test_resistance_distance_all(self): + rd = nx.resistance_distance(self.G) + assert isinstance(rd, dict) + assert round(rd[1][3], 5) == 1 + + +class TestEffectiveGraphResistance: + @classmethod + def setup_class(cls): + global np + np = pytest.importorskip("numpy") + sp = pytest.importorskip("scipy") + + def setup_method(self): + G = nx.Graph() + G.add_edge(1, 2, weight=2) + G.add_edge(1, 3, weight=1) + G.add_edge(2, 3, weight=4) + self.G = G + + def test_effective_graph_resistance_directed_graph(self): + G = nx.DiGraph() + with pytest.raises(nx.NetworkXNotImplemented): + nx.effective_graph_resistance(G) + + def test_effective_graph_resistance_empty(self): + G = nx.Graph() + with pytest.raises(nx.NetworkXError): + nx.effective_graph_resistance(G) + + def test_effective_graph_resistance_not_connected(self): + G = nx.Graph([(1, 2), (3, 4)]) + RG = nx.effective_graph_resistance(G) + assert np.isinf(RG) + + def test_effective_graph_resistance(self): + RG = nx.effective_graph_resistance(self.G, "weight", True) + rd12 = 1 / (1 / (1 + 4) + 1 / 2) + rd13 = 1 / (1 / (1 + 2) + 1 / 4) + rd23 = 1 / (1 / (2 + 4) + 1 / 1) + assert np.isclose(RG, rd12 + rd13 + rd23) + + def test_effective_graph_resistance_noinv(self): + RG = nx.effective_graph_resistance(self.G, "weight", False) + rd12 = 1 / (1 / (1 / 1 + 1 / 4) + 1 / (1 / 2)) + rd13 = 1 / (1 / (1 / 1 + 1 / 2) + 1 / (1 / 4)) + rd23 = 1 / (1 / (1 / 2 + 1 / 4) + 1 / (1 / 1)) + assert np.isclose(RG, rd12 + rd13 + rd23) + + def test_effective_graph_resistance_no_weight(self): + RG = nx.effective_graph_resistance(self.G) + assert np.isclose(RG, 2) + + def test_effective_graph_resistance_neg_weight(self): + self.G[2][3]["weight"] = -4 + RG = nx.effective_graph_resistance(self.G, "weight", True) + rd12 = 1 / (1 / (1 + -4) + 1 / 2) + rd13 = 1 / (1 / (1 + 2) + 1 / (-4)) + rd23 = 1 / (1 / (2 + -4) + 1 / 1) + assert np.isclose(RG, rd12 + rd13 + rd23) + + def test_effective_graph_resistance_multigraph(self): + G = nx.MultiGraph() + G.add_edge(1, 2, weight=2) + G.add_edge(1, 3, weight=1) + G.add_edge(2, 3, weight=1) + G.add_edge(2, 3, weight=3) + RG = nx.effective_graph_resistance(G, "weight", True) + edge23 = 1 / (1 / 1 + 1 / 3) + rd12 = 1 / (1 / (1 + edge23) + 1 / 2) + rd13 = 1 / (1 / (1 + 2) + 1 / edge23) + rd23 = 1 / (1 / (2 + edge23) + 1 / 1) + assert np.isclose(RG, rd12 + rd13 + rd23) + + def test_effective_graph_resistance_div0(self): + with pytest.raises(ZeroDivisionError): + self.G[1][2]["weight"] = 0 + nx.effective_graph_resistance(self.G, "weight") + + def test_effective_graph_resistance_complete_graph(self): + N = 10 + G = nx.complete_graph(N) + RG = nx.effective_graph_resistance(G) + assert np.isclose(RG, N - 1) + + def test_effective_graph_resistance_path_graph(self): + N = 10 + G = nx.path_graph(N) + RG = nx.effective_graph_resistance(G) + assert np.isclose(RG, (N - 1) * N * (N + 1) // 6) + + +class TestBarycenter: + """Test :func:`networkx.algorithms.distance_measures.barycenter`.""" + + def barycenter_as_subgraph(self, g, **kwargs): + """Return the subgraph induced on the barycenter of g""" + b = nx.barycenter(g, **kwargs) + assert isinstance(b, list) + assert set(b) <= set(g) + return g.subgraph(b) + + def test_must_be_connected(self): + pytest.raises(nx.NetworkXNoPath, nx.barycenter, nx.empty_graph(5)) + + def test_sp_kwarg(self): + # Complete graph K_5. Normally it works... + K_5 = nx.complete_graph(5) + sp = dict(nx.shortest_path_length(K_5)) + assert nx.barycenter(K_5, sp=sp) == list(K_5) + + # ...but not with the weight argument + for u, v, data in K_5.edges.data(): + data["weight"] = 1 + pytest.raises(ValueError, nx.barycenter, K_5, sp=sp, weight="weight") + + # ...and a corrupted sp can make it seem like K_5 is disconnected + del sp[0][1] + pytest.raises(nx.NetworkXNoPath, nx.barycenter, K_5, sp=sp) + + def test_trees(self): + """The barycenter of a tree is a single vertex or an edge. + + See [West01]_, p. 78. + """ + prng = Random(0xDEADBEEF) + for i in range(50): + RT = nx.random_labeled_tree(prng.randint(1, 75), seed=prng) + b = self.barycenter_as_subgraph(RT) + if len(b) == 2: + assert b.size() == 1 + else: + assert len(b) == 1 + assert b.size() == 0 + + def test_this_one_specific_tree(self): + """Test the tree pictured at the bottom of [West01]_, p. 78.""" + g = nx.Graph( + { + "a": ["b"], + "b": ["a", "x"], + "x": ["b", "y"], + "y": ["x", "z"], + "z": ["y", 0, 1, 2, 3, 4], + 0: ["z"], + 1: ["z"], + 2: ["z"], + 3: ["z"], + 4: ["z"], + } + ) + b = self.barycenter_as_subgraph(g, attr="barycentricity") + assert list(b) == ["z"] + assert not b.edges + expected_barycentricity = { + 0: 23, + 1: 23, + 2: 23, + 3: 23, + 4: 23, + "a": 35, + "b": 27, + "x": 21, + "y": 17, + "z": 15, + } + for node, barycentricity in expected_barycentricity.items(): + assert g.nodes[node]["barycentricity"] == barycentricity + + # Doubling weights should do nothing but double the barycentricities + for edge in g.edges: + g.edges[edge]["weight"] = 2 + b = self.barycenter_as_subgraph(g, weight="weight", attr="barycentricity2") + assert list(b) == ["z"] + assert not b.edges + for node, barycentricity in expected_barycentricity.items(): + assert g.nodes[node]["barycentricity2"] == barycentricity * 2 + + +class TestKemenyConstant: + @classmethod + def setup_class(cls): + global np + np = pytest.importorskip("numpy") + sp = pytest.importorskip("scipy") + + def setup_method(self): + G = nx.Graph() + w12 = 2 + w13 = 3 + w23 = 4 + G.add_edge(1, 2, weight=w12) + G.add_edge(1, 3, weight=w13) + G.add_edge(2, 3, weight=w23) + self.G = G + + def test_kemeny_constant_directed(self): + G = nx.DiGraph() + G.add_edge(1, 2) + G.add_edge(1, 3) + G.add_edge(2, 3) + with pytest.raises(nx.NetworkXNotImplemented): + nx.kemeny_constant(G) + + def test_kemeny_constant_not_connected(self): + self.G.add_node(5) + with pytest.raises(nx.NetworkXError): + nx.kemeny_constant(self.G) + + def test_kemeny_constant_no_nodes(self): + G = nx.Graph() + with pytest.raises(nx.NetworkXError): + nx.kemeny_constant(G) + + def test_kemeny_constant_negative_weight(self): + G = nx.Graph() + w12 = 2 + w13 = 3 + w23 = -10 + G.add_edge(1, 2, weight=w12) + G.add_edge(1, 3, weight=w13) + G.add_edge(2, 3, weight=w23) + with pytest.raises(nx.NetworkXError): + nx.kemeny_constant(G, weight="weight") + + def test_kemeny_constant(self): + K = nx.kemeny_constant(self.G, weight="weight") + w12 = 2 + w13 = 3 + w23 = 4 + test_data = ( + 3 + / 2 + * (w12 + w13) + * (w12 + w23) + * (w13 + w23) + / ( + w12**2 * (w13 + w23) + + w13**2 * (w12 + w23) + + w23**2 * (w12 + w13) + + 3 * w12 * w13 * w23 + ) + ) + assert np.isclose(K, test_data) + + def test_kemeny_constant_no_weight(self): + K = nx.kemeny_constant(self.G) + assert np.isclose(K, 4 / 3) + + def test_kemeny_constant_multigraph(self): + G = nx.MultiGraph() + w12_1 = 2 + w12_2 = 1 + w13 = 3 + w23 = 4 + G.add_edge(1, 2, weight=w12_1) + G.add_edge(1, 2, weight=w12_2) + G.add_edge(1, 3, weight=w13) + G.add_edge(2, 3, weight=w23) + K = nx.kemeny_constant(G, weight="weight") + w12 = w12_1 + w12_2 + test_data = ( + 3 + / 2 + * (w12 + w13) + * (w12 + w23) + * (w13 + w23) + / ( + w12**2 * (w13 + w23) + + w13**2 * (w12 + w23) + + w23**2 * (w12 + w13) + + 3 * w12 * w13 * w23 + ) + ) + assert np.isclose(K, test_data) + + def test_kemeny_constant_weight0(self): + G = nx.Graph() + w12 = 0 + w13 = 3 + w23 = 4 + G.add_edge(1, 2, weight=w12) + G.add_edge(1, 3, weight=w13) + G.add_edge(2, 3, weight=w23) + K = nx.kemeny_constant(G, weight="weight") + test_data = ( + 3 + / 2 + * (w12 + w13) + * (w12 + w23) + * (w13 + w23) + / ( + w12**2 * (w13 + w23) + + w13**2 * (w12 + w23) + + w23**2 * (w12 + w13) + + 3 * w12 * w13 * w23 + ) + ) + assert np.isclose(K, test_data) + + def test_kemeny_constant_selfloop(self): + G = nx.Graph() + w11 = 1 + w12 = 2 + w13 = 3 + w23 = 4 + G.add_edge(1, 1, weight=w11) + G.add_edge(1, 2, weight=w12) + G.add_edge(1, 3, weight=w13) + G.add_edge(2, 3, weight=w23) + K = nx.kemeny_constant(G, weight="weight") + test_data = ( + (2 * w11 + 3 * w12 + 3 * w13) + * (w12 + w23) + * (w13 + w23) + / ( + (w12 * w13 + w12 * w23 + w13 * w23) + * (w11 + 2 * w12 + 2 * w13 + 2 * w23) + ) + ) + assert np.isclose(K, test_data) + + def test_kemeny_constant_complete_bipartite_graph(self): + # Theorem 1 in https://www.sciencedirect.com/science/article/pii/S0166218X20302912 + n1 = 5 + n2 = 4 + G = nx.complete_bipartite_graph(n1, n2) + K = nx.kemeny_constant(G) + assert np.isclose(K, n1 + n2 - 3 / 2) + + def test_kemeny_constant_path_graph(self): + # Theorem 2 in https://www.sciencedirect.com/science/article/pii/S0166218X20302912 + n = 10 + G = nx.path_graph(n) + K = nx.kemeny_constant(G) + assert np.isclose(K, n**2 / 3 - 2 * n / 3 + 1 / 2) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_distance_regular.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_distance_regular.py new file mode 100644 index 0000000..545fb6d --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_distance_regular.py @@ -0,0 +1,85 @@ +import pytest + +import networkx as nx +from networkx import is_strongly_regular + + +@pytest.mark.parametrize( + "f", (nx.is_distance_regular, nx.intersection_array, nx.is_strongly_regular) +) +@pytest.mark.parametrize("graph_constructor", (nx.DiGraph, nx.MultiGraph)) +def test_raises_on_directed_and_multigraphs(f, graph_constructor): + G = graph_constructor([(0, 1), (1, 2)]) + with pytest.raises(nx.NetworkXNotImplemented): + f(G) + + +class TestDistanceRegular: + def test_is_distance_regular(self): + assert nx.is_distance_regular(nx.icosahedral_graph()) + assert nx.is_distance_regular(nx.petersen_graph()) + assert nx.is_distance_regular(nx.cubical_graph()) + assert nx.is_distance_regular(nx.complete_bipartite_graph(3, 3)) + assert nx.is_distance_regular(nx.tetrahedral_graph()) + assert nx.is_distance_regular(nx.dodecahedral_graph()) + assert nx.is_distance_regular(nx.pappus_graph()) + assert nx.is_distance_regular(nx.heawood_graph()) + assert nx.is_distance_regular(nx.cycle_graph(3)) + # no distance regular + assert not nx.is_distance_regular(nx.path_graph(4)) + + def test_not_connected(self): + G = nx.cycle_graph(4) + nx.add_cycle(G, [5, 6, 7]) + assert not nx.is_distance_regular(G) + + def test_global_parameters(self): + b, c = nx.intersection_array(nx.cycle_graph(5)) + g = nx.global_parameters(b, c) + assert list(g) == [(0, 0, 2), (1, 0, 1), (1, 1, 0)] + b, c = nx.intersection_array(nx.cycle_graph(3)) + g = nx.global_parameters(b, c) + assert list(g) == [(0, 0, 2), (1, 1, 0)] + + def test_intersection_array(self): + b, c = nx.intersection_array(nx.cycle_graph(5)) + assert b == [2, 1] + assert c == [1, 1] + b, c = nx.intersection_array(nx.dodecahedral_graph()) + assert b == [3, 2, 1, 1, 1] + assert c == [1, 1, 1, 2, 3] + b, c = nx.intersection_array(nx.icosahedral_graph()) + assert b == [5, 2, 1] + assert c == [1, 2, 5] + + +@pytest.mark.parametrize("f", (nx.is_distance_regular, nx.is_strongly_regular)) +def test_empty_graph_raises(f): + G = nx.Graph() + with pytest.raises(nx.NetworkXPointlessConcept, match="Graph has no nodes"): + f(G) + + +class TestStronglyRegular: + """Unit tests for the :func:`~networkx.is_strongly_regular` + function. + + """ + + def test_cycle_graph(self): + """Tests that the cycle graph on five vertices is strongly + regular. + + """ + G = nx.cycle_graph(5) + assert is_strongly_regular(G) + + def test_petersen_graph(self): + """Tests that the Petersen graph is strongly regular.""" + G = nx.petersen_graph() + assert is_strongly_regular(G) + + def test_path_graph(self): + """Tests that the path graph is not strongly regular.""" + G = nx.path_graph(4) + assert not is_strongly_regular(G) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_dominance.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_dominance.py new file mode 100644 index 0000000..e79807f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_dominance.py @@ -0,0 +1,286 @@ +import pytest + +import networkx as nx + + +class TestImmediateDominators: + def test_exceptions(self): + G = nx.Graph() + G.add_node(0) + pytest.raises(nx.NetworkXNotImplemented, nx.immediate_dominators, G, 0) + G = nx.MultiGraph(G) + pytest.raises(nx.NetworkXNotImplemented, nx.immediate_dominators, G, 0) + G = nx.DiGraph([[0, 0]]) + pytest.raises(nx.NetworkXError, nx.immediate_dominators, G, 1) + + def test_singleton(self): + G = nx.DiGraph() + G.add_node(0) + assert nx.immediate_dominators(G, 0) == {0: 0} + G.add_edge(0, 0) + assert nx.immediate_dominators(G, 0) == {0: 0} + + def test_path(self): + n = 5 + G = nx.path_graph(n, create_using=nx.DiGraph()) + assert nx.immediate_dominators(G, 0) == {i: max(i - 1, 0) for i in range(n)} + + def test_cycle(self): + n = 5 + G = nx.cycle_graph(n, create_using=nx.DiGraph()) + assert nx.immediate_dominators(G, 0) == {i: max(i - 1, 0) for i in range(n)} + + def test_unreachable(self): + n = 5 + assert n > 1 + G = nx.path_graph(n, create_using=nx.DiGraph()) + assert nx.immediate_dominators(G, n // 2) == { + i: max(i - 1, n // 2) for i in range(n // 2, n) + } + + def test_irreducible1(self): + """ + Graph taken from figure 2 of "A simple, fast dominance algorithm." (2006). + https://hdl.handle.net/1911/96345 + """ + edges = [(1, 2), (2, 1), (3, 2), (4, 1), (5, 3), (5, 4)] + G = nx.DiGraph(edges) + assert nx.immediate_dominators(G, 5) == dict.fromkeys(range(1, 6), 5) + + def test_irreducible2(self): + """ + Graph taken from figure 4 of "A simple, fast dominance algorithm." (2006). + https://hdl.handle.net/1911/96345 + """ + + edges = [(1, 2), (2, 1), (2, 3), (3, 2), (4, 2), (4, 3), (5, 1), (6, 4), (6, 5)] + G = nx.DiGraph(edges) + result = nx.immediate_dominators(G, 6) + assert result == dict.fromkeys(range(1, 7), 6) + + def test_domrel_png(self): + # Graph taken from https://commons.wikipedia.org/wiki/File:Domrel.png + edges = [(1, 2), (2, 3), (2, 4), (2, 6), (3, 5), (4, 5), (5, 2)] + G = nx.DiGraph(edges) + result = nx.immediate_dominators(G, 1) + assert result == {1: 1, 2: 1, 3: 2, 4: 2, 5: 2, 6: 2} + # Test postdominance. + result = nx.immediate_dominators(G.reverse(copy=False), 6) + assert result == {1: 2, 2: 6, 3: 5, 4: 5, 5: 2, 6: 6} + + def test_boost_example(self): + # Graph taken from Figure 1 of + # http://www.boost.org/doc/libs/1_56_0/libs/graph/doc/lengauer_tarjan_dominator.htm + edges = [(0, 1), (1, 2), (1, 3), (2, 7), (3, 4), (4, 5), (4, 6), (5, 7), (6, 4)] + G = nx.DiGraph(edges) + result = nx.immediate_dominators(G, 0) + assert result == {0: 0, 1: 0, 2: 1, 3: 1, 4: 3, 5: 4, 6: 4, 7: 1} + # Test postdominance. + result = nx.immediate_dominators(G.reverse(copy=False), 7) + assert result == {0: 1, 1: 7, 2: 7, 3: 4, 4: 5, 5: 7, 6: 4, 7: 7} + + +class TestDominanceFrontiers: + def test_exceptions(self): + G = nx.Graph() + G.add_node(0) + pytest.raises(nx.NetworkXNotImplemented, nx.dominance_frontiers, G, 0) + G = nx.MultiGraph(G) + pytest.raises(nx.NetworkXNotImplemented, nx.dominance_frontiers, G, 0) + G = nx.DiGraph([[0, 0]]) + pytest.raises(nx.NetworkXError, nx.dominance_frontiers, G, 1) + + def test_singleton(self): + G = nx.DiGraph() + G.add_node(0) + assert nx.dominance_frontiers(G, 0) == {0: set()} + G.add_edge(0, 0) + assert nx.dominance_frontiers(G, 0) == {0: set()} + + def test_path(self): + n = 5 + G = nx.path_graph(n, create_using=nx.DiGraph()) + assert nx.dominance_frontiers(G, 0) == {i: set() for i in range(n)} + + def test_cycle(self): + n = 5 + G = nx.cycle_graph(n, create_using=nx.DiGraph()) + assert nx.dominance_frontiers(G, 0) == {i: set() for i in range(n)} + + def test_unreachable(self): + n = 5 + assert n > 1 + G = nx.path_graph(n, create_using=nx.DiGraph()) + assert nx.dominance_frontiers(G, n // 2) == {i: set() for i in range(n // 2, n)} + + def test_irreducible1(self): + """ + Graph taken from figure 2 of "A simple, fast dominance algorithm." (2006). + https://hdl.handle.net/1911/96345 + """ + edges = [(1, 2), (2, 1), (3, 2), (4, 1), (5, 3), (5, 4)] + G = nx.DiGraph(edges) + assert nx.dominance_frontiers(G, 5) == { + 1: {2}, + 2: {1}, + 3: {2}, + 4: {1}, + 5: set(), + } + + def test_irreducible2(self): + """ + Graph taken from figure 4 of "A simple, fast dominance algorithm." (2006). + https://hdl.handle.net/1911/96345 + """ + edges = [(1, 2), (2, 1), (2, 3), (3, 2), (4, 2), (4, 3), (5, 1), (6, 4), (6, 5)] + G = nx.DiGraph(edges) + assert nx.dominance_frontiers(G, 6) == { + 1: {2}, + 2: {1, 3}, + 3: {2}, + 4: {2, 3}, + 5: {1}, + 6: set(), + } + + def test_domrel_png(self): + # Graph taken from https://commons.wikipedia.org/wiki/File:Domrel.png + edges = [(1, 2), (2, 3), (2, 4), (2, 6), (3, 5), (4, 5), (5, 2)] + G = nx.DiGraph(edges) + assert nx.dominance_frontiers(G, 1) == { + 1: set(), + 2: {2}, + 3: {5}, + 4: {5}, + 5: {2}, + 6: set(), + } + # Test postdominance. + result = nx.dominance_frontiers(G.reverse(copy=False), 6) + assert result == {1: set(), 2: {2}, 3: {2}, 4: {2}, 5: {2}, 6: set()} + + def test_boost_example(self): + # Graph taken from Figure 1 of + # http://www.boost.org/doc/libs/1_56_0/libs/graph/doc/lengauer_tarjan_dominator.htm + edges = [(0, 1), (1, 2), (1, 3), (2, 7), (3, 4), (4, 5), (4, 6), (5, 7), (6, 4)] + G = nx.DiGraph(edges) + assert nx.dominance_frontiers(G, 0) == { + 0: set(), + 1: set(), + 2: {7}, + 3: {7}, + 4: {4, 7}, + 5: {7}, + 6: {4}, + 7: set(), + } + # Test postdominance. + result = nx.dominance_frontiers(G.reverse(copy=False), 7) + expected = { + 0: set(), + 1: set(), + 2: {1}, + 3: {1}, + 4: {1, 4}, + 5: {1}, + 6: {4}, + 7: set(), + } + assert result == expected + + def test_discard_issue(self): + # https://github.com/networkx/networkx/issues/2071 + g = nx.DiGraph() + g.add_edges_from( + [ + ("b0", "b1"), + ("b1", "b2"), + ("b2", "b3"), + ("b3", "b1"), + ("b1", "b5"), + ("b5", "b6"), + ("b5", "b8"), + ("b6", "b7"), + ("b8", "b7"), + ("b7", "b3"), + ("b3", "b4"), + ] + ) + df = nx.dominance_frontiers(g, "b0") + assert df == { + "b4": set(), + "b5": {"b3"}, + "b6": {"b7"}, + "b7": {"b3"}, + "b0": set(), + "b1": {"b1"}, + "b2": {"b3"}, + "b3": {"b1"}, + "b8": {"b7"}, + } + + def test_loop(self): + g = nx.DiGraph() + g.add_edges_from([("a", "b"), ("b", "c"), ("b", "a")]) + df = nx.dominance_frontiers(g, "a") + assert df == {"a": set(), "b": set(), "c": set()} + + def test_missing_immediate_doms(self): + # see https://github.com/networkx/networkx/issues/2070 + g = nx.DiGraph() + edges = [ + ("entry_1", "b1"), + ("b1", "b2"), + ("b2", "b3"), + ("b3", "exit"), + ("entry_2", "b3"), + ] + + # entry_1 + # | + # b1 + # | + # b2 entry_2 + # | / + # b3 + # | + # exit + + g.add_edges_from(edges) + # formerly raised KeyError on entry_2 when parsing b3 + # because entry_2 does not have immediate doms (no path) + nx.dominance_frontiers(g, "entry_1") + + def test_loops_larger(self): + # from + # http://ecee.colorado.edu/~waite/Darmstadt/motion.html + g = nx.DiGraph() + edges = [ + ("entry", "exit"), + ("entry", "1"), + ("1", "2"), + ("2", "3"), + ("3", "4"), + ("4", "5"), + ("5", "6"), + ("6", "exit"), + ("6", "2"), + ("5", "3"), + ("4", "4"), + ] + + g.add_edges_from(edges) + df = nx.dominance_frontiers(g, "entry") + answer = { + "entry": set(), + "1": {"exit"}, + "2": {"exit", "2"}, + "3": {"exit", "3", "2"}, + "4": {"exit", "4", "3", "2"}, + "5": {"exit", "3", "2"}, + "6": {"exit", "2"}, + "exit": set(), + } + for n in df: + assert set(df[n]) == set(answer[n]) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_dominating.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_dominating.py new file mode 100644 index 0000000..5f51777 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_dominating.py @@ -0,0 +1,115 @@ +import pytest + +import networkx as nx + + +def test_dominating_set(): + G = nx.gnp_random_graph(100, 0.1) + D = nx.dominating_set(G) + assert nx.is_dominating_set(G, D) + D = nx.dominating_set(G, start_with=0) + assert nx.is_dominating_set(G, D) + + +def test_complete(): + """In complete graphs each node is a dominating set. + Thus the dominating set has to be of cardinality 1. + """ + K4 = nx.complete_graph(4) + assert len(nx.dominating_set(K4)) == 1 + K5 = nx.complete_graph(5) + assert len(nx.dominating_set(K5)) == 1 + + +def test_raise_dominating_set(): + with pytest.raises(nx.NetworkXError): + G = nx.path_graph(4) + D = nx.dominating_set(G, start_with=10) + + +def test_is_dominating_set(): + G = nx.path_graph(4) + d = {1, 3} + assert nx.is_dominating_set(G, d) + d = {0, 2} + assert nx.is_dominating_set(G, d) + d = {1} + assert not nx.is_dominating_set(G, d) + + +def test_wikipedia_is_dominating_set(): + """Example from https://en.wikipedia.org/wiki/Dominating_set""" + G = nx.cycle_graph(4) + G.add_edges_from([(0, 4), (1, 4), (2, 5)]) + assert nx.is_dominating_set(G, {4, 3, 5}) + assert nx.is_dominating_set(G, {0, 2}) + assert nx.is_dominating_set(G, {1, 2}) + + +def test_is_connected_dominating_set(): + G = nx.path_graph(4) + D = {1, 2} + assert nx.is_connected_dominating_set(G, D) + D = {1, 3} + assert not nx.is_connected_dominating_set(G, D) + D = {2, 3} + assert nx.is_connected(nx.subgraph(G, D)) + assert not nx.is_connected_dominating_set(G, D) + + +def test_null_graph_connected_dominating_set(): + G = nx.Graph() + assert 0 == len(nx.connected_dominating_set(G)) + + +def test_single_node_graph_connected_dominating_set(): + G = nx.Graph() + G.add_node(1) + CD = nx.connected_dominating_set(G) + assert nx.is_connected_dominating_set(G, CD) + + +def test_raise_disconnected_graph_connected_dominating_set(): + with pytest.raises(nx.NetworkXError): + G = nx.Graph() + G.add_node(1) + G.add_node(2) + nx.connected_dominating_set(G) + + +def test_complete_graph_connected_dominating_set(): + K5 = nx.complete_graph(5) + assert 1 == len(nx.connected_dominating_set(K5)) + K7 = nx.complete_graph(7) + assert 1 == len(nx.connected_dominating_set(K7)) + + +def test_docstring_example_connected_dominating_set(): + G = nx.Graph( + [ + (1, 2), + (1, 3), + (1, 4), + (1, 5), + (1, 6), + (2, 7), + (3, 8), + (4, 9), + (5, 10), + (6, 11), + (7, 12), + (8, 12), + (9, 12), + (10, 12), + (11, 12), + ] + ) + assert {1, 2, 3, 4, 5, 6, 7} == nx.connected_dominating_set(G) + + +@pytest.mark.parametrize("seed", [1, 13, 29]) +@pytest.mark.parametrize("n,k,p", [(10, 3, 0.2), (100, 10, 0.7), (1000, 50, 0.5)]) +def test_connected_watts_strogatz_graph_connected_dominating_set(n, k, p, seed): + G = nx.connected_watts_strogatz_graph(n, k, p, seed=seed) + D = nx.connected_dominating_set(G) + assert nx.is_connected_dominating_set(G, D) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_efficiency.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_efficiency.py new file mode 100644 index 0000000..9a2e7d0 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_efficiency.py @@ -0,0 +1,58 @@ +"""Unit tests for the :mod:`networkx.algorithms.efficiency` module.""" + +import networkx as nx + + +class TestEfficiency: + def setup_method(self): + # G1 is a disconnected graph + self.G1 = nx.Graph() + self.G1.add_nodes_from([1, 2, 3]) + # G2 is a cycle graph + self.G2 = nx.cycle_graph(4) + # G3 is the triangle graph with one additional edge + self.G3 = nx.lollipop_graph(3, 1) + + def test_efficiency_disconnected_nodes(self): + """ + When nodes are disconnected, efficiency is 0 + """ + assert nx.efficiency(self.G1, 1, 2) == 0 + + def test_local_efficiency_disconnected_graph(self): + """ + In a disconnected graph the efficiency is 0 + """ + assert nx.local_efficiency(self.G1) == 0 + + def test_efficiency(self): + assert nx.efficiency(self.G2, 0, 1) == 1 + assert nx.efficiency(self.G2, 0, 2) == 1 / 2 + + def test_global_efficiency(self): + assert nx.global_efficiency(self.G2) == 5 / 6 + + def test_global_efficiency_complete_graph(self): + """ + Tests that the average global efficiency of the complete graph is one. + """ + for n in range(2, 10): + G = nx.complete_graph(n) + assert nx.global_efficiency(G) == 1 + + def test_local_efficiency_complete_graph(self): + """ + Test that the local efficiency for a complete graph with at least 3 + nodes should be one. For a graph with only 2 nodes, the induced + subgraph has no edges. + """ + for n in range(3, 10): + G = nx.complete_graph(n) + assert nx.local_efficiency(G) == 1 + + def test_using_ego_graph(self): + """ + Test that the ego graph is used when computing local efficiency. + For more information, see GitHub issue #2710. + """ + assert nx.local_efficiency(self.G3) == 7 / 12 diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_euler.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_euler.py new file mode 100644 index 0000000..b5871f0 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_euler.py @@ -0,0 +1,314 @@ +import collections + +import pytest + +import networkx as nx + + +@pytest.mark.parametrize("f", (nx.is_eulerian, nx.is_semieulerian)) +def test_empty_graph_raises(f): + G = nx.Graph() + with pytest.raises(nx.NetworkXPointlessConcept, match="Connectivity is undefined"): + f(G) + + +class TestIsEulerian: + def test_is_eulerian(self): + assert nx.is_eulerian(nx.complete_graph(5)) + assert nx.is_eulerian(nx.complete_graph(7)) + assert nx.is_eulerian(nx.hypercube_graph(4)) + assert nx.is_eulerian(nx.hypercube_graph(6)) + + assert not nx.is_eulerian(nx.complete_graph(4)) + assert not nx.is_eulerian(nx.complete_graph(6)) + assert not nx.is_eulerian(nx.hypercube_graph(3)) + assert not nx.is_eulerian(nx.hypercube_graph(5)) + + assert not nx.is_eulerian(nx.petersen_graph()) + assert not nx.is_eulerian(nx.path_graph(4)) + + def test_is_eulerian2(self): + # not connected + G = nx.Graph() + G.add_nodes_from([1, 2, 3]) + assert not nx.is_eulerian(G) + # not strongly connected + G = nx.DiGraph() + G.add_nodes_from([1, 2, 3]) + assert not nx.is_eulerian(G) + G = nx.MultiDiGraph() + G.add_edge(1, 2) + G.add_edge(2, 3) + G.add_edge(2, 3) + G.add_edge(3, 1) + assert not nx.is_eulerian(G) + + +class TestEulerianCircuit: + def test_eulerian_circuit_cycle(self): + G = nx.cycle_graph(4) + + edges = list(nx.eulerian_circuit(G, source=0)) + nodes = [u for u, v in edges] + assert nodes == [0, 3, 2, 1] + assert edges == [(0, 3), (3, 2), (2, 1), (1, 0)] + + edges = list(nx.eulerian_circuit(G, source=1)) + nodes = [u for u, v in edges] + assert nodes == [1, 2, 3, 0] + assert edges == [(1, 2), (2, 3), (3, 0), (0, 1)] + + G = nx.complete_graph(3) + + edges = list(nx.eulerian_circuit(G, source=0)) + nodes = [u for u, v in edges] + assert nodes == [0, 2, 1] + assert edges == [(0, 2), (2, 1), (1, 0)] + + edges = list(nx.eulerian_circuit(G, source=1)) + nodes = [u for u, v in edges] + assert nodes == [1, 2, 0] + assert edges == [(1, 2), (2, 0), (0, 1)] + + def test_eulerian_circuit_digraph(self): + G = nx.DiGraph() + nx.add_cycle(G, [0, 1, 2, 3]) + + edges = list(nx.eulerian_circuit(G, source=0)) + nodes = [u for u, v in edges] + assert nodes == [0, 1, 2, 3] + assert edges == [(0, 1), (1, 2), (2, 3), (3, 0)] + + edges = list(nx.eulerian_circuit(G, source=1)) + nodes = [u for u, v in edges] + assert nodes == [1, 2, 3, 0] + assert edges == [(1, 2), (2, 3), (3, 0), (0, 1)] + + def test_multigraph(self): + G = nx.MultiGraph() + nx.add_cycle(G, [0, 1, 2, 3]) + G.add_edge(1, 2) + G.add_edge(1, 2) + edges = list(nx.eulerian_circuit(G, source=0)) + nodes = [u for u, v in edges] + assert nodes == [0, 3, 2, 1, 2, 1] + assert edges == [(0, 3), (3, 2), (2, 1), (1, 2), (2, 1), (1, 0)] + + def test_multigraph_with_keys(self): + G = nx.MultiGraph() + nx.add_cycle(G, [0, 1, 2, 3]) + G.add_edge(1, 2) + G.add_edge(1, 2) + edges = list(nx.eulerian_circuit(G, source=0, keys=True)) + nodes = [u for u, v, k in edges] + assert nodes == [0, 3, 2, 1, 2, 1] + assert edges[:2] == [(0, 3, 0), (3, 2, 0)] + assert collections.Counter(edges[2:5]) == collections.Counter( + [(2, 1, 0), (1, 2, 1), (2, 1, 2)] + ) + assert edges[5:] == [(1, 0, 0)] + + def test_not_eulerian(self): + with pytest.raises(nx.NetworkXError): + f = list(nx.eulerian_circuit(nx.complete_graph(4))) + + +class TestIsSemiEulerian: + def test_is_semieulerian(self): + # Test graphs with Eulerian paths but no cycles return True. + assert nx.is_semieulerian(nx.path_graph(4)) + G = nx.path_graph(6, create_using=nx.DiGraph) + assert nx.is_semieulerian(G) + + # Test graphs with Eulerian cycles return False. + assert not nx.is_semieulerian(nx.complete_graph(5)) + assert not nx.is_semieulerian(nx.complete_graph(7)) + assert not nx.is_semieulerian(nx.hypercube_graph(4)) + assert not nx.is_semieulerian(nx.hypercube_graph(6)) + + +class TestHasEulerianPath: + def test_has_eulerian_path_cyclic(self): + # Test graphs with Eulerian cycles return True. + assert nx.has_eulerian_path(nx.complete_graph(5)) + assert nx.has_eulerian_path(nx.complete_graph(7)) + assert nx.has_eulerian_path(nx.hypercube_graph(4)) + assert nx.has_eulerian_path(nx.hypercube_graph(6)) + + def test_has_eulerian_path_non_cyclic(self): + # Test graphs with Eulerian paths but no cycles return True. + assert nx.has_eulerian_path(nx.path_graph(4)) + G = nx.path_graph(6, create_using=nx.DiGraph) + assert nx.has_eulerian_path(G) + + def test_has_eulerian_path_directed_graph(self): + # Test directed graphs and returns False + G = nx.DiGraph() + G.add_edges_from([(0, 1), (1, 2), (0, 2)]) + assert not nx.has_eulerian_path(G) + + # Test directed graphs without isolated node returns True + G = nx.DiGraph() + G.add_edges_from([(0, 1), (1, 2), (2, 0)]) + assert nx.has_eulerian_path(G) + + # Test directed graphs with isolated node returns False + G.add_node(3) + assert not nx.has_eulerian_path(G) + + @pytest.mark.parametrize("G", (nx.Graph(), nx.DiGraph())) + def test_has_eulerian_path_not_weakly_connected(self, G): + G.add_edges_from([(0, 1), (2, 3), (3, 2)]) + assert not nx.has_eulerian_path(G) + + @pytest.mark.parametrize("G", (nx.Graph(), nx.DiGraph())) + def test_has_eulerian_path_unbalancedins_more_than_one(self, G): + G.add_edges_from([(0, 1), (2, 3)]) + assert not nx.has_eulerian_path(G) + + +class TestFindPathStart: + def testfind_path_start(self): + find_path_start = nx.algorithms.euler._find_path_start + # Test digraphs return correct starting node. + G = nx.path_graph(6, create_using=nx.DiGraph) + assert find_path_start(G) == 0 + edges = [(0, 1), (1, 2), (2, 0), (4, 0)] + assert find_path_start(nx.DiGraph(edges)) == 4 + + # Test graph with no Eulerian path return None. + edges = [(0, 1), (1, 2), (2, 3), (2, 4)] + assert find_path_start(nx.DiGraph(edges)) is None + + +class TestEulerianPath: + def test_eulerian_path(self): + x = [(4, 0), (0, 1), (1, 2), (2, 0)] + for e1, e2 in zip(x, nx.eulerian_path(nx.DiGraph(x))): + assert e1 == e2 + + def test_eulerian_path_straight_link(self): + G = nx.DiGraph() + result = [(1, 2), (2, 3), (3, 4), (4, 5)] + G.add_edges_from(result) + assert result == list(nx.eulerian_path(G)) + assert result == list(nx.eulerian_path(G, source=1)) + with pytest.raises(nx.NetworkXError): + list(nx.eulerian_path(G, source=3)) + with pytest.raises(nx.NetworkXError): + list(nx.eulerian_path(G, source=4)) + with pytest.raises(nx.NetworkXError): + list(nx.eulerian_path(G, source=5)) + + def test_eulerian_path_multigraph(self): + G = nx.MultiDiGraph() + result = [(2, 1), (1, 2), (2, 1), (1, 2), (2, 3), (3, 4), (4, 3)] + G.add_edges_from(result) + assert result == list(nx.eulerian_path(G)) + assert result == list(nx.eulerian_path(G, source=2)) + with pytest.raises(nx.NetworkXError): + list(nx.eulerian_path(G, source=3)) + with pytest.raises(nx.NetworkXError): + list(nx.eulerian_path(G, source=4)) + + def test_eulerian_path_eulerian_circuit(self): + G = nx.DiGraph() + result = [(1, 2), (2, 3), (3, 4), (4, 1)] + result2 = [(2, 3), (3, 4), (4, 1), (1, 2)] + result3 = [(3, 4), (4, 1), (1, 2), (2, 3)] + G.add_edges_from(result) + assert result == list(nx.eulerian_path(G)) + assert result == list(nx.eulerian_path(G, source=1)) + assert result2 == list(nx.eulerian_path(G, source=2)) + assert result3 == list(nx.eulerian_path(G, source=3)) + + def test_eulerian_path_undirected(self): + G = nx.Graph() + result = [(1, 2), (2, 3), (3, 4), (4, 5)] + result2 = [(5, 4), (4, 3), (3, 2), (2, 1)] + G.add_edges_from(result) + assert list(nx.eulerian_path(G)) in (result, result2) + assert result == list(nx.eulerian_path(G, source=1)) + assert result2 == list(nx.eulerian_path(G, source=5)) + with pytest.raises(nx.NetworkXError): + list(nx.eulerian_path(G, source=3)) + with pytest.raises(nx.NetworkXError): + list(nx.eulerian_path(G, source=2)) + + def test_eulerian_path_multigraph_undirected(self): + G = nx.MultiGraph() + result = [(2, 1), (1, 2), (2, 1), (1, 2), (2, 3), (3, 4)] + G.add_edges_from(result) + assert result == list(nx.eulerian_path(G)) + assert result == list(nx.eulerian_path(G, source=2)) + with pytest.raises(nx.NetworkXError): + list(nx.eulerian_path(G, source=3)) + with pytest.raises(nx.NetworkXError): + list(nx.eulerian_path(G, source=1)) + + @pytest.mark.parametrize( + ("graph_type", "result"), + ( + (nx.MultiGraph, [(0, 1, 0), (1, 0, 1)]), + (nx.MultiDiGraph, [(0, 1, 0), (1, 0, 0)]), + ), + ) + def test_eulerian_with_keys(self, graph_type, result): + G = graph_type([(0, 1), (1, 0)]) + answer = nx.eulerian_path(G, keys=True) + assert list(answer) == result + + +class TestEulerize: + def test_disconnected(self): + with pytest.raises(nx.NetworkXError): + G = nx.from_edgelist([(0, 1), (2, 3)]) + nx.eulerize(G) + + def test_null_graph(self): + with pytest.raises(nx.NetworkXPointlessConcept): + nx.eulerize(nx.Graph()) + + def test_null_multigraph(self): + with pytest.raises(nx.NetworkXPointlessConcept): + nx.eulerize(nx.MultiGraph()) + + def test_on_empty_graph(self): + with pytest.raises(nx.NetworkXError): + nx.eulerize(nx.empty_graph(3)) + + def test_on_eulerian(self): + G = nx.cycle_graph(3) + H = nx.eulerize(G) + assert nx.is_isomorphic(G, H) + + def test_on_eulerian_multigraph(self): + G = nx.MultiGraph(nx.cycle_graph(3)) + G.add_edge(0, 1) + H = nx.eulerize(G) + assert nx.is_eulerian(H) + + def test_on_complete_graph(self): + G = nx.complete_graph(4) + assert nx.is_eulerian(nx.eulerize(G)) + assert nx.is_eulerian(nx.eulerize(nx.MultiGraph(G))) + + def test_on_non_eulerian_graph(self): + G = nx.cycle_graph(18) + G.add_edge(0, 18) + G.add_edge(18, 19) + G.add_edge(17, 19) + G.add_edge(4, 20) + G.add_edge(20, 21) + G.add_edge(21, 22) + G.add_edge(22, 23) + G.add_edge(23, 24) + G.add_edge(24, 25) + G.add_edge(25, 26) + G.add_edge(26, 27) + G.add_edge(27, 28) + G.add_edge(28, 13) + assert not nx.is_eulerian(G) + G = nx.eulerize(G) + assert nx.is_eulerian(G) + assert nx.number_of_edges(G) == 39 diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_graph_hashing.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_graph_hashing.py new file mode 100644 index 0000000..6c90c8f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_graph_hashing.py @@ -0,0 +1,872 @@ +import copy + +import pytest + +import networkx as nx + +# Unit tests relevant for both functions in this module. + + +def test_positive_iters(): + G1 = nx.empty_graph() + with pytest.raises( + ValueError, + match="The WL algorithm requires that `iterations` be positive", + ): + nx.weisfeiler_lehman_graph_hash(G1, iterations=-3) + with pytest.raises( + ValueError, + match="The WL algorithm requires that `iterations` be positive", + ): + nx.weisfeiler_lehman_subgraph_hashes(G1, iterations=-3) + with pytest.raises( + ValueError, + match="The WL algorithm requires that `iterations` be positive", + ): + nx.weisfeiler_lehman_graph_hash(G1, iterations=0) + with pytest.raises( + ValueError, + match="The WL algorithm requires that `iterations` be positive", + ): + nx.weisfeiler_lehman_subgraph_hashes(G1, iterations=0) + + +# Unit tests for the :func:`~networkx.weisfeiler_lehman_graph_hash` function + + +def test_empty_graph_hash(): + """ + empty graphs should give hashes regardless of other params + """ + G1 = nx.empty_graph() + G2 = nx.empty_graph() + + h1 = nx.weisfeiler_lehman_graph_hash(G1) + h2 = nx.weisfeiler_lehman_graph_hash(G2) + h3 = nx.weisfeiler_lehman_graph_hash(G2, edge_attr="edge_attr1") + h4 = nx.weisfeiler_lehman_graph_hash(G2, node_attr="node_attr1") + h5 = nx.weisfeiler_lehman_graph_hash( + G2, edge_attr="edge_attr1", node_attr="node_attr1" + ) + h6 = nx.weisfeiler_lehman_graph_hash(G2, iterations=10) + + assert h1 == h2 + assert h1 == h3 + assert h1 == h4 + assert h1 == h5 + assert h1 == h6 + + +def test_directed(): + """ + A directed graph with no bi-directional edges should yield different a graph hash + to the same graph taken as undirected if there are no hash collisions. + """ + r = 10 + for i in range(r): + G_directed = nx.gn_graph(10 + r, seed=100 + i) + G_undirected = nx.to_undirected(G_directed) + + h_directed = nx.weisfeiler_lehman_graph_hash(G_directed) + h_undirected = nx.weisfeiler_lehman_graph_hash(G_undirected) + + assert h_directed != h_undirected + + +def test_reversed(): + """ + A directed graph with no bi-directional edges should yield different a graph hash + to the same graph taken with edge directions reversed if there are no hash + collisions. Here we test a cycle graph which is the minimal counterexample + """ + G = nx.cycle_graph(5, create_using=nx.DiGraph) + nx.set_node_attributes(G, {n: str(n) for n in G.nodes()}, name="label") + + G_reversed = G.reverse() + + h = nx.weisfeiler_lehman_graph_hash(G, node_attr="label") + h_reversed = nx.weisfeiler_lehman_graph_hash(G_reversed, node_attr="label") + + assert h != h_reversed + + +def test_isomorphic(): + """ + graph hashes should be invariant to node-relabeling (when the output is reindexed + by the same mapping) + """ + n, r = 100, 10 + p = 1.0 / r + for i in range(1, r + 1): + G1 = nx.erdos_renyi_graph(n, p * i, seed=200 + i) + G2 = nx.relabel_nodes(G1, {u: -1 * u for u in G1.nodes()}) + + g1_hash = nx.weisfeiler_lehman_graph_hash(G1) + g2_hash = nx.weisfeiler_lehman_graph_hash(G2) + + assert g1_hash == g2_hash + + +def test_isomorphic_edge_attr(): + """ + Isomorphic graphs with differing edge attributes should yield different graph + hashes if the 'edge_attr' argument is supplied and populated in the graph, + and there are no hash collisions. + The output should still be invariant to node-relabeling + """ + n, r = 100, 10 + p = 1.0 / r + for i in range(1, r + 1): + G1 = nx.erdos_renyi_graph(n, p * i, seed=300 + i) + + for a, b in G1.edges: + G1[a][b]["edge_attr1"] = f"{a}-{b}-1" + G1[a][b]["edge_attr2"] = f"{a}-{b}-2" + + g1_hash_with_edge_attr1 = nx.weisfeiler_lehman_graph_hash( + G1, edge_attr="edge_attr1" + ) + g1_hash_with_edge_attr2 = nx.weisfeiler_lehman_graph_hash( + G1, edge_attr="edge_attr2" + ) + g1_hash_no_edge_attr = nx.weisfeiler_lehman_graph_hash(G1, edge_attr=None) + + assert g1_hash_with_edge_attr1 != g1_hash_no_edge_attr + assert g1_hash_with_edge_attr2 != g1_hash_no_edge_attr + assert g1_hash_with_edge_attr1 != g1_hash_with_edge_attr2 + + G2 = nx.relabel_nodes(G1, {u: -1 * u for u in G1.nodes()}) + + g2_hash_with_edge_attr1 = nx.weisfeiler_lehman_graph_hash( + G2, edge_attr="edge_attr1" + ) + g2_hash_with_edge_attr2 = nx.weisfeiler_lehman_graph_hash( + G2, edge_attr="edge_attr2" + ) + + assert g1_hash_with_edge_attr1 == g2_hash_with_edge_attr1 + assert g1_hash_with_edge_attr2 == g2_hash_with_edge_attr2 + + +def test_missing_edge_attr(): + """ + If the 'edge_attr' argument is supplied but is missing from an edge in the graph, + we should raise a KeyError + """ + G = nx.Graph() + G.add_edges_from([(1, 2, {"edge_attr1": "a"}), (1, 3, {})]) + pytest.raises(KeyError, nx.weisfeiler_lehman_graph_hash, G, edge_attr="edge_attr1") + + +def test_isomorphic_node_attr(): + """ + Isomorphic graphs with differing node attributes should yield different graph + hashes if the 'node_attr' argument is supplied and populated in the graph, and + there are no hash collisions. + The output should still be invariant to node-relabeling + """ + n, r = 100, 10 + p = 1.0 / r + for i in range(1, r + 1): + G1 = nx.erdos_renyi_graph(n, p * i, seed=400 + i) + + for u in G1.nodes(): + G1.nodes[u]["node_attr1"] = f"{u}-1" + G1.nodes[u]["node_attr2"] = f"{u}-2" + + g1_hash_with_node_attr1 = nx.weisfeiler_lehman_graph_hash( + G1, node_attr="node_attr1" + ) + g1_hash_with_node_attr2 = nx.weisfeiler_lehman_graph_hash( + G1, node_attr="node_attr2" + ) + g1_hash_no_node_attr = nx.weisfeiler_lehman_graph_hash(G1, node_attr=None) + + assert g1_hash_with_node_attr1 != g1_hash_no_node_attr + assert g1_hash_with_node_attr2 != g1_hash_no_node_attr + assert g1_hash_with_node_attr1 != g1_hash_with_node_attr2 + + G2 = nx.relabel_nodes(G1, {u: -1 * u for u in G1.nodes()}) + + g2_hash_with_node_attr1 = nx.weisfeiler_lehman_graph_hash( + G2, node_attr="node_attr1" + ) + g2_hash_with_node_attr2 = nx.weisfeiler_lehman_graph_hash( + G2, node_attr="node_attr2" + ) + + assert g1_hash_with_node_attr1 == g2_hash_with_node_attr1 + assert g1_hash_with_node_attr2 == g2_hash_with_node_attr2 + + +def test_missing_node_attr(): + """ + If the 'node_attr' argument is supplied but is missing from a node in the graph, + we should raise a KeyError + """ + G = nx.Graph() + G.add_nodes_from([(1, {"node_attr1": "a"}), (2, {})]) + G.add_edges_from([(1, 2), (2, 3), (3, 1), (1, 4)]) + pytest.raises(KeyError, nx.weisfeiler_lehman_graph_hash, G, node_attr="node_attr1") + + +def test_isomorphic_edge_attr_and_node_attr(): + """ + Isomorphic graphs with differing node attributes should yield different graph + hashes if the 'node_attr' and 'edge_attr' argument is supplied and populated in + the graph, and there are no hash collisions. + The output should still be invariant to node-relabeling + """ + n, r = 100, 10 + p = 1.0 / r + for i in range(1, r + 1): + G1 = nx.erdos_renyi_graph(n, p * i, seed=500 + i) + + for u in G1.nodes(): + G1.nodes[u]["node_attr1"] = f"{u}-1" + G1.nodes[u]["node_attr2"] = f"{u}-2" + + for a, b in G1.edges: + G1[a][b]["edge_attr1"] = f"{a}-{b}-1" + G1[a][b]["edge_attr2"] = f"{a}-{b}-2" + + g1_hash_edge1_node1 = nx.weisfeiler_lehman_graph_hash( + G1, edge_attr="edge_attr1", node_attr="node_attr1" + ) + g1_hash_edge2_node2 = nx.weisfeiler_lehman_graph_hash( + G1, edge_attr="edge_attr2", node_attr="node_attr2" + ) + g1_hash_edge1_node2 = nx.weisfeiler_lehman_graph_hash( + G1, edge_attr="edge_attr1", node_attr="node_attr2" + ) + g1_hash_no_attr = nx.weisfeiler_lehman_graph_hash(G1) + + assert g1_hash_edge1_node1 != g1_hash_no_attr + assert g1_hash_edge2_node2 != g1_hash_no_attr + assert g1_hash_edge1_node1 != g1_hash_edge2_node2 + assert g1_hash_edge1_node2 != g1_hash_edge2_node2 + assert g1_hash_edge1_node2 != g1_hash_edge1_node1 + + G2 = nx.relabel_nodes(G1, {u: -1 * u for u in G1.nodes()}) + + g2_hash_edge1_node1 = nx.weisfeiler_lehman_graph_hash( + G2, edge_attr="edge_attr1", node_attr="node_attr1" + ) + g2_hash_edge2_node2 = nx.weisfeiler_lehman_graph_hash( + G2, edge_attr="edge_attr2", node_attr="node_attr2" + ) + + assert g1_hash_edge1_node1 == g2_hash_edge1_node1 + assert g1_hash_edge2_node2 == g2_hash_edge2_node2 + + +def test_digest_size(): + """ + The hash string lengths should be as expected for a variety of graphs and + digest sizes + """ + n, r = 100, 10 + p = 1.0 / r + for i in range(1, r + 1): + G = nx.erdos_renyi_graph(n, p * i, seed=1000 + i) + + h16 = nx.weisfeiler_lehman_graph_hash(G) + h32 = nx.weisfeiler_lehman_graph_hash(G, digest_size=32) + + assert h16 != h32 + assert len(h16) == 16 * 2 + assert len(h32) == 32 * 2 + + +def test_directed_bugs(): + """ + These were bugs for directed graphs as discussed in issue #7806 + """ + Ga = nx.DiGraph() + Gb = nx.DiGraph() + Ga.add_nodes_from([1, 2, 3, 4]) + Gb.add_nodes_from([1, 2, 3, 4]) + Ga.add_edges_from([(1, 2), (3, 2)]) + Gb.add_edges_from([(1, 2), (3, 4)]) + Ga_hash = nx.weisfeiler_lehman_graph_hash(Ga) + Gb_hash = nx.weisfeiler_lehman_graph_hash(Gb) + assert Ga_hash != Gb_hash + + Tree1 = nx.DiGraph() + Tree1.add_edges_from([(0, 4), (1, 5), (2, 6), (3, 7)]) + Tree1.add_edges_from([(4, 8), (5, 8), (6, 9), (7, 9)]) + Tree1.add_edges_from([(8, 10), (9, 10)]) + nx.set_node_attributes( + Tree1, {10: "s", 8: "a", 9: "a", 4: "b", 5: "b", 6: "b", 7: "b"}, "weight" + ) + Tree2 = copy.deepcopy(Tree1) + nx.set_node_attributes(Tree1, {0: "d", 1: "c", 2: "d", 3: "c"}, "weight") + nx.set_node_attributes(Tree2, {0: "d", 1: "d", 2: "c", 3: "c"}, "weight") + Tree1_hash_short = nx.weisfeiler_lehman_graph_hash( + Tree1, iterations=1, node_attr="weight" + ) + Tree2_hash_short = nx.weisfeiler_lehman_graph_hash( + Tree2, iterations=1, node_attr="weight" + ) + assert Tree1_hash_short == Tree2_hash_short + Tree1_hash = nx.weisfeiler_lehman_graph_hash( + Tree1, node_attr="weight" + ) # Default is 3 iterations + Tree2_hash = nx.weisfeiler_lehman_graph_hash(Tree2, node_attr="weight") + assert Tree1_hash != Tree2_hash + + +def test_trivial_labels_isomorphism(): + """ + Trivial labelling of the graph should not change isomorphism verdicts. + """ + n, r = 100, 10 + p = 1.0 / r + for i in range(1, r + 1): + G1 = nx.erdos_renyi_graph(n, p * i, seed=500 + i) + G2 = nx.erdos_renyi_graph(n, p * i, seed=42 + i) + G1_hash = nx.weisfeiler_lehman_graph_hash(G1) + G2_hash = nx.weisfeiler_lehman_graph_hash(G2) + equal = G1_hash == G2_hash + + nx.set_node_attributes(G1, values=1, name="weight") + nx.set_node_attributes(G2, values=1, name="weight") + G1_hash_node = nx.weisfeiler_lehman_graph_hash(G1, node_attr="weight") + G2_hash_node = nx.weisfeiler_lehman_graph_hash(G2, node_attr="weight") + equal_node = G1_hash_node == G2_hash_node + + nx.set_edge_attributes(G1, values="a", name="e_weight") + nx.set_edge_attributes(G2, values="a", name="e_weight") + G1_hash_edge = nx.weisfeiler_lehman_graph_hash(G1, edge_attr="e_weight") + G2_hash_edge = nx.weisfeiler_lehman_graph_hash(G2, edge_attr="e_weight") + equal_edge = G1_hash_edge == G2_hash_edge + + G1_hash_both = nx.weisfeiler_lehman_graph_hash( + G1, edge_attr="e_weight", node_attr="weight" + ) + G2_hash_both = nx.weisfeiler_lehman_graph_hash( + G2, edge_attr="e_weight", node_attr="weight" + ) + equal_both = G1_hash_both == G2_hash_both + + assert equal == equal_node + assert equal_node == equal_edge + assert equal_edge == equal_both + + +def test_trivial_labels_isomorphism_directed(): + """ + Trivial labelling of the graph should not change isomorphism verdicts on digraphs. + """ + n, r = 100, 10 + p = 1.0 / r + for i in range(1, r + 1): + G1 = nx.erdos_renyi_graph(n, p * i, directed=True, seed=500 + i) + G2 = nx.erdos_renyi_graph(n, p * i, directed=True, seed=42 + i) + G1_hash = nx.weisfeiler_lehman_graph_hash(G1) + G2_hash = nx.weisfeiler_lehman_graph_hash(G2) + equal = G1_hash == G2_hash + + nx.set_node_attributes(G1, values=1, name="weight") + nx.set_node_attributes(G2, values=1, name="weight") + G1_hash_node = nx.weisfeiler_lehman_graph_hash(G1, node_attr="weight") + G2_hash_node = nx.weisfeiler_lehman_graph_hash(G2, node_attr="weight") + equal_node = G1_hash_node == G2_hash_node + + nx.set_edge_attributes(G1, values="a", name="e_weight") + nx.set_edge_attributes(G2, values="a", name="e_weight") + G1_hash_edge = nx.weisfeiler_lehman_graph_hash(G1, edge_attr="e_weight") + G2_hash_edge = nx.weisfeiler_lehman_graph_hash(G2, edge_attr="e_weight") + equal_edge = G1_hash_edge == G2_hash_edge + + G1_hash_both = nx.weisfeiler_lehman_graph_hash( + G1, edge_attr="e_weight", node_attr="weight" + ) + G2_hash_both = nx.weisfeiler_lehman_graph_hash( + G2, edge_attr="e_weight", node_attr="weight" + ) + equal_both = G1_hash_both == G2_hash_both + + assert equal == equal_node + assert equal_node == equal_edge + assert equal_edge == equal_both + + # Specific case that was found to be a bug in issue #7806 + # Without weights worked + Ga = nx.DiGraph() + Ga.add_nodes_from([1, 2, 3, 4]) + Gb = copy.deepcopy(Ga) + Ga.add_edges_from([(1, 2), (3, 2)]) + Gb.add_edges_from([(1, 2), (3, 4)]) + Ga_hash = nx.weisfeiler_lehman_graph_hash(Ga) + Gb_hash = nx.weisfeiler_lehman_graph_hash(Gb) + assert Ga_hash != Gb_hash + + # Now with trivial weights + nx.set_node_attributes(Ga, values=1, name="weight") + nx.set_node_attributes(Gb, values=1, name="weight") + Ga_hash = nx.weisfeiler_lehman_graph_hash(Ga, node_attr="weight") + Gb_hash = nx.weisfeiler_lehman_graph_hash(Gb, node_attr="weight") + assert Ga_hash != Gb_hash + + +def test_trivial_labels_hashes(): + """ + Test that 'empty' labelling of nodes or edges shouldn't have a different impact + on the calculated hash. Note that we cannot assume that trivial weights have no + impact at all. Without (trivial) weights, a node will start with hashing its + degree. This step is omitted when there are weights. + """ + n, r = 100, 10 + p = 1.0 / r + for i in range(1, r + 1): + G1 = nx.erdos_renyi_graph(n, p * i, seed=500 + i) + nx.set_node_attributes(G1, values="", name="weight") + first = nx.weisfeiler_lehman_graph_hash(G1, node_attr="weight") + nx.set_edge_attributes(G1, values="", name="e_weight") + second = nx.weisfeiler_lehman_graph_hash(G1, edge_attr="e_weight") + assert first == second + third = nx.weisfeiler_lehman_graph_hash( + G1, edge_attr="e_weight", node_attr="weight" + ) + assert second == third + + +# Unit tests for the :func:`~networkx.weisfeiler_lehman_subgraph_hashes` function + + +def is_subiteration(a, b): + """ + returns True if that each hash sequence in 'a' is a prefix for + the corresponding sequence indexed by the same node in 'b'. + """ + return all(b[node][: len(hashes)] == hashes for node, hashes in a.items()) + + +def hexdigest_sizes_correct(a, digest_size): + """ + returns True if all hex digest sizes are the expected length in a + node:subgraph-hashes dictionary. Hex digest string length == 2 * bytes digest + length since each pair of hex digits encodes 1 byte + (https://docs.python.org/3/library/hashlib.html) + """ + hexdigest_size = digest_size * 2 + + def list_digest_sizes_correct(l): + return all(len(x) == hexdigest_size for x in l) + + return all(list_digest_sizes_correct(hashes) for hashes in a.values()) + + +def test_empty_graph_subgraph_hash(): + """ " + empty graphs should give empty dict subgraph hashes regardless of other params + """ + G = nx.empty_graph() + + subgraph_hashes1 = nx.weisfeiler_lehman_subgraph_hashes(G) + subgraph_hashes2 = nx.weisfeiler_lehman_subgraph_hashes(G, edge_attr="edge_attr") + subgraph_hashes3 = nx.weisfeiler_lehman_subgraph_hashes(G, node_attr="edge_attr") + subgraph_hashes4 = nx.weisfeiler_lehman_subgraph_hashes(G, iterations=2) + subgraph_hashes5 = nx.weisfeiler_lehman_subgraph_hashes(G, digest_size=64) + + assert subgraph_hashes1 == {} + assert subgraph_hashes2 == {} + assert subgraph_hashes3 == {} + assert subgraph_hashes4 == {} + assert subgraph_hashes5 == {} + + +def test_directed_subgraph_hash(): + """ + A directed graph with no bi-directional edges should yield different subgraph + hashes to the same graph taken as undirected, if all hashes don't collide. + """ + r = 10 + for i in range(r): + G_directed = nx.gn_graph(10 + r, seed=100 + i) + G_undirected = nx.to_undirected(G_directed) + + directed_subgraph_hashes = nx.weisfeiler_lehman_subgraph_hashes(G_directed) + undirected_subgraph_hashes = nx.weisfeiler_lehman_subgraph_hashes(G_undirected) + + assert directed_subgraph_hashes != undirected_subgraph_hashes + + +def test_reversed_subgraph_hash(): + """ + A directed graph with no bi-directional edges should yield different subgraph + hashes to the same graph taken with edge directions reversed if there are no + hash collisions. Here we test a cycle graph which is the minimal counterexample + """ + G = nx.cycle_graph(5, create_using=nx.DiGraph) + nx.set_node_attributes(G, {n: str(n) for n in G.nodes()}, name="label") + + G_reversed = G.reverse() + + h = nx.weisfeiler_lehman_subgraph_hashes(G, node_attr="label") + h_reversed = nx.weisfeiler_lehman_subgraph_hashes(G_reversed, node_attr="label") + + assert h != h_reversed + + +def test_isomorphic_subgraph_hash(): + """ + the subgraph hashes should be invariant to node-relabeling when the output is + reindexed by the same mapping and all hashes don't collide. + """ + n, r = 100, 10 + p = 1.0 / r + for i in range(1, r + 1): + G1 = nx.erdos_renyi_graph(n, p * i, seed=200 + i) + G2 = nx.relabel_nodes(G1, {u: -1 * u for u in G1.nodes()}) + + g1_subgraph_hashes = nx.weisfeiler_lehman_subgraph_hashes(G1) + g2_subgraph_hashes = nx.weisfeiler_lehman_subgraph_hashes(G2) + + assert g1_subgraph_hashes == {-1 * k: v for k, v in g2_subgraph_hashes.items()} + + +def test_isomorphic_edge_attr_subgraph_hash(): + """ + Isomorphic graphs with differing edge attributes should yield different subgraph + hashes if the 'edge_attr' argument is supplied and populated in the graph, and + all hashes don't collide. + The output should still be invariant to node-relabeling + """ + n, r = 100, 10 + p = 1.0 / r + for i in range(1, r + 1): + G1 = nx.erdos_renyi_graph(n, p * i, seed=300 + i) + + for a, b in G1.edges: + G1[a][b]["edge_attr1"] = f"{a}-{b}-1" + G1[a][b]["edge_attr2"] = f"{a}-{b}-2" + + g1_hash_with_edge_attr1 = nx.weisfeiler_lehman_subgraph_hashes( + G1, edge_attr="edge_attr1" + ) + g1_hash_with_edge_attr2 = nx.weisfeiler_lehman_subgraph_hashes( + G1, edge_attr="edge_attr2" + ) + g1_hash_no_edge_attr = nx.weisfeiler_lehman_subgraph_hashes(G1, edge_attr=None) + + assert g1_hash_with_edge_attr1 != g1_hash_no_edge_attr + assert g1_hash_with_edge_attr2 != g1_hash_no_edge_attr + assert g1_hash_with_edge_attr1 != g1_hash_with_edge_attr2 + + G2 = nx.relabel_nodes(G1, {u: -1 * u for u in G1.nodes()}) + + g2_hash_with_edge_attr1 = nx.weisfeiler_lehman_subgraph_hashes( + G2, edge_attr="edge_attr1" + ) + g2_hash_with_edge_attr2 = nx.weisfeiler_lehman_subgraph_hashes( + G2, edge_attr="edge_attr2" + ) + + assert g1_hash_with_edge_attr1 == { + -1 * k: v for k, v in g2_hash_with_edge_attr1.items() + } + assert g1_hash_with_edge_attr2 == { + -1 * k: v for k, v in g2_hash_with_edge_attr2.items() + } + + +def test_missing_edge_attr_subgraph_hash(): + """ + If the 'edge_attr' argument is supplied but is missing from an edge in the graph, + we should raise a KeyError + """ + G = nx.Graph() + G.add_edges_from([(1, 2, {"edge_attr1": "a"}), (1, 3, {})]) + pytest.raises( + KeyError, nx.weisfeiler_lehman_subgraph_hashes, G, edge_attr="edge_attr1" + ) + + +def test_isomorphic_node_attr_subgraph_hash(): + """ + Isomorphic graphs with differing node attributes should yield different subgraph + hashes if the 'node_attr' argument is supplied and populated in the graph, and + all hashes don't collide. + The output should still be invariant to node-relabeling + """ + n, r = 100, 10 + p = 1.0 / r + for i in range(1, r + 1): + G1 = nx.erdos_renyi_graph(n, p * i, seed=400 + i) + + for u in G1.nodes(): + G1.nodes[u]["node_attr1"] = f"{u}-1" + G1.nodes[u]["node_attr2"] = f"{u}-2" + + g1_hash_with_node_attr1 = nx.weisfeiler_lehman_subgraph_hashes( + G1, node_attr="node_attr1" + ) + g1_hash_with_node_attr2 = nx.weisfeiler_lehman_subgraph_hashes( + G1, node_attr="node_attr2" + ) + g1_hash_no_node_attr = nx.weisfeiler_lehman_subgraph_hashes(G1, node_attr=None) + + assert g1_hash_with_node_attr1 != g1_hash_no_node_attr + assert g1_hash_with_node_attr2 != g1_hash_no_node_attr + assert g1_hash_with_node_attr1 != g1_hash_with_node_attr2 + + G2 = nx.relabel_nodes(G1, {u: -1 * u for u in G1.nodes()}) + + g2_hash_with_node_attr1 = nx.weisfeiler_lehman_subgraph_hashes( + G2, node_attr="node_attr1" + ) + g2_hash_with_node_attr2 = nx.weisfeiler_lehman_subgraph_hashes( + G2, node_attr="node_attr2" + ) + + assert g1_hash_with_node_attr1 == { + -1 * k: v for k, v in g2_hash_with_node_attr1.items() + } + assert g1_hash_with_node_attr2 == { + -1 * k: v for k, v in g2_hash_with_node_attr2.items() + } + + +def test_missing_node_attr_subgraph_hash(): + """ + If the 'node_attr' argument is supplied but is missing from a node in the graph, + we should raise a KeyError + """ + G = nx.Graph() + G.add_nodes_from([(1, {"node_attr1": "a"}), (2, {})]) + G.add_edges_from([(1, 2), (2, 3), (3, 1), (1, 4)]) + pytest.raises( + KeyError, nx.weisfeiler_lehman_subgraph_hashes, G, node_attr="node_attr1" + ) + + +def test_isomorphic_edge_attr_and_node_attr_subgraph_hash(): + """ + Isomorphic graphs with differing node attributes should yield different subgraph + hashes if the 'node_attr' and 'edge_attr' argument is supplied and populated in + the graph, and all hashes don't collide + The output should still be invariant to node-relabeling + """ + n, r = 100, 10 + p = 1.0 / r + for i in range(1, r + 1): + G1 = nx.erdos_renyi_graph(n, p * i, seed=500 + i) + + for u in G1.nodes(): + G1.nodes[u]["node_attr1"] = f"{u}-1" + G1.nodes[u]["node_attr2"] = f"{u}-2" + + for a, b in G1.edges: + G1[a][b]["edge_attr1"] = f"{a}-{b}-1" + G1[a][b]["edge_attr2"] = f"{a}-{b}-2" + + g1_hash_edge1_node1 = nx.weisfeiler_lehman_subgraph_hashes( + G1, edge_attr="edge_attr1", node_attr="node_attr1" + ) + g1_hash_edge2_node2 = nx.weisfeiler_lehman_subgraph_hashes( + G1, edge_attr="edge_attr2", node_attr="node_attr2" + ) + g1_hash_edge1_node2 = nx.weisfeiler_lehman_subgraph_hashes( + G1, edge_attr="edge_attr1", node_attr="node_attr2" + ) + g1_hash_no_attr = nx.weisfeiler_lehman_subgraph_hashes(G1) + + assert g1_hash_edge1_node1 != g1_hash_no_attr + assert g1_hash_edge2_node2 != g1_hash_no_attr + assert g1_hash_edge1_node1 != g1_hash_edge2_node2 + assert g1_hash_edge1_node2 != g1_hash_edge2_node2 + assert g1_hash_edge1_node2 != g1_hash_edge1_node1 + + G2 = nx.relabel_nodes(G1, {u: -1 * u for u in G1.nodes()}) + + g2_hash_edge1_node1 = nx.weisfeiler_lehman_subgraph_hashes( + G2, edge_attr="edge_attr1", node_attr="node_attr1" + ) + g2_hash_edge2_node2 = nx.weisfeiler_lehman_subgraph_hashes( + G2, edge_attr="edge_attr2", node_attr="node_attr2" + ) + + assert g1_hash_edge1_node1 == { + -1 * k: v for k, v in g2_hash_edge1_node1.items() + } + assert g1_hash_edge2_node2 == { + -1 * k: v for k, v in g2_hash_edge2_node2.items() + } + + +def test_iteration_depth(): + """ + All nodes should have the correct number of subgraph hashes in the output when + using degree as initial node labels. + Subsequent iteration depths for the same graph should be additive for each node + """ + n, r = 100, 10 + p = 1.0 / r + for i in range(1, r + 1): + G = nx.erdos_renyi_graph(n, p * i, seed=600 + i) + + depth3 = nx.weisfeiler_lehman_subgraph_hashes(G, iterations=3) + depth4 = nx.weisfeiler_lehman_subgraph_hashes(G, iterations=4) + depth5 = nx.weisfeiler_lehman_subgraph_hashes(G, iterations=5) + + assert all(len(hashes) == 3 for hashes in depth3.values()) + assert all(len(hashes) == 4 for hashes in depth4.values()) + assert all(len(hashes) == 5 for hashes in depth5.values()) + + assert is_subiteration(depth3, depth4) + assert is_subiteration(depth4, depth5) + assert is_subiteration(depth3, depth5) + + +def test_iteration_depth_edge_attr(): + """ + All nodes should have the correct number of subgraph hashes in the output when + setting initial node labels empty and using an edge attribute when aggregating + neighborhoods. + Subsequent iteration depths for the same graph should be additive for each node + """ + n, r = 100, 10 + p = 1.0 / r + for i in range(1, r + 1): + G = nx.erdos_renyi_graph(n, p * i, seed=700 + i) + + for a, b in G.edges: + G[a][b]["edge_attr1"] = f"{a}-{b}-1" + + depth3 = nx.weisfeiler_lehman_subgraph_hashes( + G, edge_attr="edge_attr1", iterations=3 + ) + depth4 = nx.weisfeiler_lehman_subgraph_hashes( + G, edge_attr="edge_attr1", iterations=4 + ) + depth5 = nx.weisfeiler_lehman_subgraph_hashes( + G, edge_attr="edge_attr1", iterations=5 + ) + + assert all(len(hashes) == 3 for hashes in depth3.values()) + assert all(len(hashes) == 4 for hashes in depth4.values()) + assert all(len(hashes) == 5 for hashes in depth5.values()) + + assert is_subiteration(depth3, depth4) + assert is_subiteration(depth4, depth5) + assert is_subiteration(depth3, depth5) + + +def test_iteration_depth_node_attr(): + """ + All nodes should have the correct number of subgraph hashes in the output when + setting initial node labels to an attribute. + Subsequent iteration depths for the same graph should be additive for each node + """ + n, r = 100, 10 + p = 1.0 / r + for i in range(1, r + 1): + G = nx.erdos_renyi_graph(n, p * i, seed=800 + i) + + for u in G.nodes(): + G.nodes[u]["node_attr1"] = f"{u}-1" + + depth3 = nx.weisfeiler_lehman_subgraph_hashes( + G, node_attr="node_attr1", iterations=3 + ) + depth4 = nx.weisfeiler_lehman_subgraph_hashes( + G, node_attr="node_attr1", iterations=4 + ) + depth5 = nx.weisfeiler_lehman_subgraph_hashes( + G, node_attr="node_attr1", iterations=5 + ) + + assert all(len(hashes) == 3 for hashes in depth3.values()) + assert all(len(hashes) == 4 for hashes in depth4.values()) + assert all(len(hashes) == 5 for hashes in depth5.values()) + + assert is_subiteration(depth3, depth4) + assert is_subiteration(depth4, depth5) + assert is_subiteration(depth3, depth5) + + +def test_iteration_depth_node_edge_attr(): + """ + All nodes should have the correct number of subgraph hashes in the output when + setting initial node labels to an attribute and also using an edge attribute when + aggregating neighborhoods. + Subsequent iteration depths for the same graph should be additive for each node + """ + n, r = 100, 10 + p = 1.0 / r + for i in range(1, r + 1): + G = nx.erdos_renyi_graph(n, p * i, seed=900 + i) + + for u in G.nodes(): + G.nodes[u]["node_attr1"] = f"{u}-1" + + for a, b in G.edges: + G[a][b]["edge_attr1"] = f"{a}-{b}-1" + + depth3 = nx.weisfeiler_lehman_subgraph_hashes( + G, edge_attr="edge_attr1", node_attr="node_attr1", iterations=3 + ) + depth4 = nx.weisfeiler_lehman_subgraph_hashes( + G, edge_attr="edge_attr1", node_attr="node_attr1", iterations=4 + ) + depth5 = nx.weisfeiler_lehman_subgraph_hashes( + G, edge_attr="edge_attr1", node_attr="node_attr1", iterations=5 + ) + + assert all(len(hashes) == 3 for hashes in depth3.values()) + assert all(len(hashes) == 4 for hashes in depth4.values()) + assert all(len(hashes) == 5 for hashes in depth5.values()) + + assert is_subiteration(depth3, depth4) + assert is_subiteration(depth4, depth5) + assert is_subiteration(depth3, depth5) + + +def test_digest_size_subgraph_hash(): + """ + The hash string lengths should be as expected for a variety of graphs and + digest sizes + """ + n, r = 100, 10 + p = 1.0 / r + for i in range(1, r + 1): + G = nx.erdos_renyi_graph(n, p * i, seed=1000 + i) + + digest_size16_hashes = nx.weisfeiler_lehman_subgraph_hashes(G) + digest_size32_hashes = nx.weisfeiler_lehman_subgraph_hashes(G, digest_size=32) + + assert digest_size16_hashes != digest_size32_hashes + + assert hexdigest_sizes_correct(digest_size16_hashes, 16) + assert hexdigest_sizes_correct(digest_size32_hashes, 32) + + +def test_initial_node_labels_subgraph_hash(): + """ + Including the hashed initial label prepends an extra hash to the lists + """ + G = nx.path_graph(5) + nx.set_node_attributes(G, {i: int(0 < i < 4) for i in G}, "label") + # initial node labels: + # 0--1--1--1--0 + + without_initial_label = nx.weisfeiler_lehman_subgraph_hashes(G, node_attr="label") + assert all(len(v) == 3 for v in without_initial_label.values()) + # 3 different 1 hop nhds + assert len({v[0] for v in without_initial_label.values()}) == 3 + + with_initial_label = nx.weisfeiler_lehman_subgraph_hashes( + G, node_attr="label", include_initial_labels=True + ) + assert all(len(v) == 4 for v in with_initial_label.values()) + # 2 different initial labels + assert len({v[0] for v in with_initial_label.values()}) == 2 + + # check hashes match otherwise + for u in G: + for a, b in zip( + with_initial_label[u][1:], without_initial_label[u], strict=True + ): + assert a == b diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_graphical.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_graphical.py new file mode 100644 index 0000000..99f766f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_graphical.py @@ -0,0 +1,163 @@ +import pytest + +import networkx as nx + + +def test_valid_degree_sequence1(): + n = 100 + p = 0.3 + for i in range(10): + G = nx.erdos_renyi_graph(n, p) + deg = (d for n, d in G.degree()) + assert nx.is_graphical(deg, method="eg") + assert nx.is_graphical(deg, method="hh") + + +def test_valid_degree_sequence2(): + n = 100 + for i in range(10): + G = nx.barabasi_albert_graph(n, 1) + deg = (d for n, d in G.degree()) + assert nx.is_graphical(deg, method="eg") + assert nx.is_graphical(deg, method="hh") + + +def test_string_input(): + pytest.raises(nx.NetworkXException, nx.is_graphical, [], "foo") + pytest.raises(nx.NetworkXException, nx.is_graphical, ["red"], "hh") + pytest.raises(nx.NetworkXException, nx.is_graphical, ["red"], "eg") + + +def test_non_integer_input(): + pytest.raises(nx.NetworkXException, nx.is_graphical, [72.5], "eg") + pytest.raises(nx.NetworkXException, nx.is_graphical, [72.5], "hh") + + +def test_negative_input(): + assert not nx.is_graphical([-1], "hh") + assert not nx.is_graphical([-1], "eg") + + +class TestAtlas: + @classmethod + def setup_class(cls): + global atlas + from networkx.generators import atlas + + cls.GAG = atlas.graph_atlas_g() + + def test_atlas(self): + for graph in self.GAG: + deg = (d for n, d in graph.degree()) + assert nx.is_graphical(deg, method="eg") + assert nx.is_graphical(deg, method="hh") + + +def test_small_graph_true(): + z = [5, 3, 3, 3, 3, 2, 2, 2, 1, 1, 1] + assert nx.is_graphical(z, method="hh") + assert nx.is_graphical(z, method="eg") + z = [10, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2] + assert nx.is_graphical(z, method="hh") + assert nx.is_graphical(z, method="eg") + z = [1, 1, 1, 1, 1, 2, 2, 2, 3, 4] + assert nx.is_graphical(z, method="hh") + assert nx.is_graphical(z, method="eg") + + +def test_small_graph_false(): + z = [1000, 3, 3, 3, 3, 2, 2, 2, 1, 1, 1] + assert not nx.is_graphical(z, method="hh") + assert not nx.is_graphical(z, method="eg") + z = [6, 5, 4, 4, 2, 1, 1, 1] + assert not nx.is_graphical(z, method="hh") + assert not nx.is_graphical(z, method="eg") + z = [1, 1, 1, 1, 1, 1, 2, 2, 2, 3, 4] + assert not nx.is_graphical(z, method="hh") + assert not nx.is_graphical(z, method="eg") + + +def test_directed_degree_sequence(): + # Test a range of valid directed degree sequences + n, r = 100, 10 + p = 1.0 / r + for i in range(r): + G = nx.erdos_renyi_graph(n, p * (i + 1), None, True) + din = (d for n, d in G.in_degree()) + dout = (d for n, d in G.out_degree()) + assert nx.is_digraphical(din, dout) + + +def test_small_directed_sequences(): + dout = [5, 3, 3, 3, 3, 2, 2, 2, 1, 1, 1] + din = [3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 1] + assert nx.is_digraphical(din, dout) + # Test nongraphical directed sequence + dout = [1000, 3, 3, 3, 3, 2, 2, 2, 1, 1, 1] + din = [103, 102, 102, 102, 102, 102, 102, 102, 102, 102] + assert not nx.is_digraphical(din, dout) + # Test digraphical small sequence + dout = [1, 1, 1, 1, 1, 2, 2, 2, 3, 4] + din = [2, 2, 2, 2, 2, 2, 2, 2, 1, 1] + assert nx.is_digraphical(din, dout) + # Test nonmatching sum + din = [2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1] + assert not nx.is_digraphical(din, dout) + # Test for negative integer in sequence + din = [2, 2, 2, -2, 2, 2, 2, 2, 1, 1, 4] + assert not nx.is_digraphical(din, dout) + # Test for noninteger + din = dout = [1, 1, 1.1, 1] + assert not nx.is_digraphical(din, dout) + din = dout = [1, 1, "rer", 1] + assert not nx.is_digraphical(din, dout) + + +def test_multi_sequence(): + # Test nongraphical multi sequence + seq = [1000, 3, 3, 3, 3, 2, 2, 2, 1, 1] + assert not nx.is_multigraphical(seq) + # Test small graphical multi sequence + seq = [6, 5, 4, 4, 2, 1, 1, 1] + assert nx.is_multigraphical(seq) + # Test for negative integer in sequence + seq = [6, 5, 4, -4, 2, 1, 1, 1] + assert not nx.is_multigraphical(seq) + # Test for sequence with odd sum + seq = [1, 1, 1, 1, 1, 1, 2, 2, 2, 3, 4] + assert not nx.is_multigraphical(seq) + # Test for noninteger + seq = [1, 1, 1.1, 1] + assert not nx.is_multigraphical(seq) + seq = [1, 1, "rer", 1] + assert not nx.is_multigraphical(seq) + + +def test_pseudo_sequence(): + # Test small valid pseudo sequence + seq = [1000, 3, 3, 3, 3, 2, 2, 2, 1, 1] + assert nx.is_pseudographical(seq) + # Test for sequence with odd sum + seq = [1000, 3, 3, 3, 3, 2, 2, 2, 1, 1, 1] + assert not nx.is_pseudographical(seq) + # Test for negative integer in sequence + seq = [1000, 3, 3, 3, 3, 2, 2, -2, 1, 1] + assert not nx.is_pseudographical(seq) + # Test for noninteger + seq = [1, 1, 1.1, 1] + assert not nx.is_pseudographical(seq) + seq = [1, 1, "rer", 1] + assert not nx.is_pseudographical(seq) + + +def test_numpy_degree_sequence(): + np = pytest.importorskip("numpy") + ds = np.array([1, 2, 2, 2, 1], dtype=np.int64) + assert nx.is_graphical(ds, "eg") + assert nx.is_graphical(ds, "hh") + ds = np.array([1, 2, 2, 2, 1], dtype=np.float64) + assert nx.is_graphical(ds, "eg") + assert nx.is_graphical(ds, "hh") + ds = np.array([1.1, 2, 2, 2, 1], dtype=np.float64) + pytest.raises(nx.NetworkXException, nx.is_graphical, ds, "eg") + pytest.raises(nx.NetworkXException, nx.is_graphical, ds, "hh") diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_hierarchy.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_hierarchy.py new file mode 100644 index 0000000..eaa6a67 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_hierarchy.py @@ -0,0 +1,46 @@ +import pytest + +import networkx as nx + + +def test_hierarchy_undirected(): + G = nx.cycle_graph(5) + pytest.raises(nx.NetworkXError, nx.flow_hierarchy, G) + + +def test_hierarchy_cycle(): + G = nx.cycle_graph(5, create_using=nx.DiGraph()) + assert nx.flow_hierarchy(G) == 0.0 + + +def test_hierarchy_tree(): + G = nx.full_rary_tree(2, 16, create_using=nx.DiGraph()) + assert nx.flow_hierarchy(G) == 1.0 + + +def test_hierarchy_1(): + G = nx.DiGraph() + G.add_edges_from([(0, 1), (1, 2), (2, 3), (3, 1), (3, 4), (0, 4)]) + assert nx.flow_hierarchy(G) == 0.5 + + +def test_hierarchy_weight(): + G = nx.DiGraph() + G.add_edges_from( + [ + (0, 1, {"weight": 0.3}), + (1, 2, {"weight": 0.1}), + (2, 3, {"weight": 0.1}), + (3, 1, {"weight": 0.1}), + (3, 4, {"weight": 0.3}), + (0, 4, {"weight": 0.3}), + ] + ) + assert nx.flow_hierarchy(G, weight="weight") == 0.75 + + +@pytest.mark.parametrize("n", (0, 1, 3)) +def test_hierarchy_empty_graph(n): + G = nx.empty_graph(n, create_using=nx.DiGraph) + with pytest.raises(nx.NetworkXError, match=".*not applicable to empty graphs"): + nx.flow_hierarchy(G) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_hybrid.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_hybrid.py new file mode 100644 index 0000000..6af0016 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_hybrid.py @@ -0,0 +1,24 @@ +import networkx as nx + + +def test_2d_grid_graph(): + # FC article claims 2d grid graph of size n is (3,3)-connected + # and (5,9)-connected, but I don't think it is (5,9)-connected + G = nx.grid_2d_graph(8, 8, periodic=True) + assert nx.is_kl_connected(G, 3, 3) + assert not nx.is_kl_connected(G, 5, 9) + (H, graphOK) = nx.kl_connected_subgraph(G, 5, 9, same_as_graph=True) + assert not graphOK + + +def test_small_graph(): + G = nx.Graph() + G.add_edge(1, 2) + G.add_edge(1, 3) + G.add_edge(2, 3) + assert nx.is_kl_connected(G, 2, 2) + H = nx.kl_connected_subgraph(G, 2, 2) + (H, graphOK) = nx.kl_connected_subgraph( + G, 2, 2, low_memory=True, same_as_graph=True + ) + assert graphOK diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_isolate.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_isolate.py new file mode 100644 index 0000000..d29b306 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_isolate.py @@ -0,0 +1,26 @@ +"""Unit tests for the :mod:`networkx.algorithms.isolates` module.""" + +import networkx as nx + + +def test_is_isolate(): + G = nx.Graph() + G.add_edge(0, 1) + G.add_node(2) + assert not nx.is_isolate(G, 0) + assert not nx.is_isolate(G, 1) + assert nx.is_isolate(G, 2) + + +def test_isolates(): + G = nx.Graph() + G.add_edge(0, 1) + G.add_nodes_from([2, 3]) + assert sorted(nx.isolates(G)) == [2, 3] + + +def test_number_of_isolates(): + G = nx.Graph() + G.add_edge(0, 1) + G.add_nodes_from([2, 3]) + assert nx.number_of_isolates(G) == 2 diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_link_prediction.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_link_prediction.py new file mode 100644 index 0000000..1b8ccf2 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_link_prediction.py @@ -0,0 +1,593 @@ +import math +from functools import partial + +import pytest + +import networkx as nx + + +def _test_func(G, ebunch, expected, predict_func, **kwargs): + result = predict_func(G, ebunch, **kwargs) + exp_dict = {tuple(sorted([u, v])): score for u, v, score in expected} + res_dict = {tuple(sorted([u, v])): score for u, v, score in result} + + assert len(exp_dict) == len(res_dict) + for p in exp_dict: + assert exp_dict[p] == pytest.approx(res_dict[p], abs=1e-7) + + +class TestResourceAllocationIndex: + @classmethod + def setup_class(cls): + cls.func = staticmethod(nx.resource_allocation_index) + cls.test = staticmethod(partial(_test_func, predict_func=cls.func)) + + def test_K5(self): + G = nx.complete_graph(5) + self.test(G, [(0, 1)], [(0, 1, 0.75)]) + + def test_P3(self): + G = nx.path_graph(3) + self.test(G, [(0, 2)], [(0, 2, 0.5)]) + + def test_S4(self): + G = nx.star_graph(4) + self.test(G, [(1, 2)], [(1, 2, 0.25)]) + + @pytest.mark.parametrize("graph_type", (nx.DiGraph, nx.MultiGraph, nx.MultiDiGraph)) + def test_notimplemented(self, graph_type): + pytest.raises( + nx.NetworkXNotImplemented, self.func, graph_type([(0, 1), (1, 2)]), [(0, 2)] + ) + + def test_node_not_found(self): + G = nx.Graph() + G.add_edges_from([(0, 1), (0, 2), (2, 3)]) + pytest.raises(nx.NodeNotFound, self.func, G, [(0, 4)]) + + def test_no_common_neighbor(self): + G = nx.Graph() + G.add_nodes_from([0, 1]) + self.test(G, [(0, 1)], [(0, 1, 0)]) + + def test_equal_nodes(self): + G = nx.complete_graph(4) + self.test(G, [(0, 0)], [(0, 0, 1)]) + + def test_all_nonexistent_edges(self): + G = nx.Graph() + G.add_edges_from([(0, 1), (0, 2), (2, 3)]) + self.test(G, None, [(0, 3, 0.5), (1, 2, 0.5), (1, 3, 0)]) + + +class TestJaccardCoefficient: + @classmethod + def setup_class(cls): + cls.func = staticmethod(nx.jaccard_coefficient) + cls.test = staticmethod(partial(_test_func, predict_func=cls.func)) + + def test_K5(self): + G = nx.complete_graph(5) + self.test(G, [(0, 1)], [(0, 1, 0.6)]) + + def test_P4(self): + G = nx.path_graph(4) + self.test(G, [(0, 2)], [(0, 2, 0.5)]) + + @pytest.mark.parametrize("graph_type", (nx.DiGraph, nx.MultiGraph, nx.MultiDiGraph)) + def test_notimplemented(self, graph_type): + pytest.raises( + nx.NetworkXNotImplemented, self.func, graph_type([(0, 1), (1, 2)]), [(0, 2)] + ) + + def test_node_not_found(self): + G = nx.Graph() + G.add_edges_from([(0, 1), (0, 2), (2, 3)]) + pytest.raises(nx.NodeNotFound, self.func, G, [(0, 4)]) + + def test_no_common_neighbor(self): + G = nx.Graph() + G.add_edges_from([(0, 1), (2, 3)]) + self.test(G, [(0, 2)], [(0, 2, 0)]) + + def test_isolated_nodes(self): + G = nx.Graph() + G.add_nodes_from([0, 1]) + self.test(G, [(0, 1)], [(0, 1, 0)]) + + def test_all_nonexistent_edges(self): + G = nx.Graph() + G.add_edges_from([(0, 1), (0, 2), (2, 3)]) + self.test(G, None, [(0, 3, 0.5), (1, 2, 0.5), (1, 3, 0)]) + + +class TestAdamicAdarIndex: + @classmethod + def setup_class(cls): + cls.func = staticmethod(nx.adamic_adar_index) + cls.test = staticmethod(partial(_test_func, predict_func=cls.func)) + + def test_K5(self): + G = nx.complete_graph(5) + self.test(G, [(0, 1)], [(0, 1, 3 / math.log(4))]) + + def test_P3(self): + G = nx.path_graph(3) + self.test(G, [(0, 2)], [(0, 2, 1 / math.log(2))]) + + def test_S4(self): + G = nx.star_graph(4) + self.test(G, [(1, 2)], [(1, 2, 1 / math.log(4))]) + + @pytest.mark.parametrize("graph_type", (nx.DiGraph, nx.MultiGraph, nx.MultiDiGraph)) + def test_notimplemented(self, graph_type): + pytest.raises( + nx.NetworkXNotImplemented, self.func, graph_type([(0, 1), (1, 2)]), [(0, 2)] + ) + + def test_node_not_found(self): + G = nx.Graph() + G.add_edges_from([(0, 1), (0, 2), (2, 3)]) + pytest.raises(nx.NodeNotFound, self.func, G, [(0, 4)]) + + def test_no_common_neighbor(self): + G = nx.Graph() + G.add_nodes_from([0, 1]) + self.test(G, [(0, 1)], [(0, 1, 0)]) + + def test_equal_nodes(self): + G = nx.complete_graph(4) + self.test(G, [(0, 0)], [(0, 0, 3 / math.log(3))]) + + def test_all_nonexistent_edges(self): + G = nx.Graph() + G.add_edges_from([(0, 1), (0, 2), (2, 3)]) + self.test( + G, None, [(0, 3, 1 / math.log(2)), (1, 2, 1 / math.log(2)), (1, 3, 0)] + ) + + +class TestCommonNeighborCentrality: + @classmethod + def setup_class(cls): + cls.func = staticmethod(nx.common_neighbor_centrality) + cls.test = staticmethod(partial(_test_func, predict_func=cls.func)) + + def test_K5(self): + G = nx.complete_graph(5) + self.test(G, [(0, 1)], [(0, 1, 3.0)], alpha=1) + self.test(G, [(0, 1)], [(0, 1, 5.0)], alpha=0) + + def test_P3(self): + G = nx.path_graph(3) + self.test(G, [(0, 2)], [(0, 2, 1.25)], alpha=0.5) + + def test_S4(self): + G = nx.star_graph(4) + self.test(G, [(1, 2)], [(1, 2, 1.75)], alpha=0.5) + + @pytest.mark.parametrize("graph_type", (nx.DiGraph, nx.MultiGraph, nx.MultiDiGraph)) + def test_notimplemented(self, graph_type): + pytest.raises( + nx.NetworkXNotImplemented, self.func, graph_type([(0, 1), (1, 2)]), [(0, 2)] + ) + + def test_node_u_not_found(self): + G = nx.Graph() + G.add_edges_from([(1, 3), (2, 3)]) + pytest.raises(nx.NodeNotFound, self.func, G, [(0, 1)]) + + def test_node_v_not_found(self): + G = nx.Graph() + G.add_edges_from([(0, 1), (0, 2), (2, 3)]) + pytest.raises(nx.NodeNotFound, self.func, G, [(0, 4)]) + + def test_no_common_neighbor(self): + G = nx.Graph() + G.add_nodes_from([0, 1]) + self.test(G, [(0, 1)], [(0, 1, 0)]) + + def test_equal_nodes(self): + G = nx.complete_graph(4) + pytest.raises(nx.NetworkXAlgorithmError, self.test, G, [(0, 0)], []) + + def test_equal_nodes_with_alpha_one_raises_error(self): + G = nx.complete_graph(4) + pytest.raises(nx.NetworkXAlgorithmError, self.test, G, [(0, 0)], [], alpha=1.0) + + def test_all_nonexistent_edges(self): + G = nx.Graph() + G.add_edges_from([(0, 1), (0, 2), (2, 3)]) + self.test(G, None, [(0, 3, 1.5), (1, 2, 1.5), (1, 3, 2 / 3)], alpha=0.5) + + +class TestPreferentialAttachment: + @classmethod + def setup_class(cls): + cls.func = staticmethod(nx.preferential_attachment) + cls.test = staticmethod(partial(_test_func, predict_func=cls.func)) + + def test_K5(self): + G = nx.complete_graph(5) + self.test(G, [(0, 1)], [(0, 1, 16)]) + + def test_P3(self): + G = nx.path_graph(3) + self.test(G, [(0, 1)], [(0, 1, 2)]) + + def test_S4(self): + G = nx.star_graph(4) + self.test(G, [(0, 2)], [(0, 2, 4)]) + + @pytest.mark.parametrize("graph_type", (nx.DiGraph, nx.MultiGraph, nx.MultiDiGraph)) + def test_notimplemented(self, graph_type): + pytest.raises( + nx.NetworkXNotImplemented, self.func, graph_type([(0, 1), (1, 2)]), [(0, 2)] + ) + + def test_node_not_found(self): + G = nx.Graph() + G.add_edges_from([(0, 1), (0, 2), (2, 3)]) + pytest.raises(nx.NodeNotFound, self.func, G, [(0, 4)]) + + def test_zero_degrees(self): + G = nx.Graph() + G.add_nodes_from([0, 1]) + self.test(G, [(0, 1)], [(0, 1, 0)]) + + def test_all_nonexistent_edges(self): + G = nx.Graph() + G.add_edges_from([(0, 1), (0, 2), (2, 3)]) + self.test(G, None, [(0, 3, 2), (1, 2, 2), (1, 3, 1)]) + + +class TestCNSoundarajanHopcroft: + @classmethod + def setup_class(cls): + cls.func = staticmethod(nx.cn_soundarajan_hopcroft) + cls.test = staticmethod( + partial(_test_func, predict_func=cls.func, community="community") + ) + + def test_K5(self): + G = nx.complete_graph(5) + G.nodes[0]["community"] = 0 + G.nodes[1]["community"] = 0 + G.nodes[2]["community"] = 0 + G.nodes[3]["community"] = 0 + G.nodes[4]["community"] = 1 + self.test(G, [(0, 1)], [(0, 1, 5)]) + + def test_P3(self): + G = nx.path_graph(3) + G.nodes[0]["community"] = 0 + G.nodes[1]["community"] = 1 + G.nodes[2]["community"] = 0 + self.test(G, [(0, 2)], [(0, 2, 1)]) + + def test_S4(self): + G = nx.star_graph(4) + G.nodes[0]["community"] = 1 + G.nodes[1]["community"] = 1 + G.nodes[2]["community"] = 1 + G.nodes[3]["community"] = 0 + G.nodes[4]["community"] = 0 + self.test(G, [(1, 2)], [(1, 2, 2)]) + + @pytest.mark.parametrize("graph_type", (nx.DiGraph, nx.MultiGraph, nx.MultiDiGraph)) + def test_notimplemented(self, graph_type): + G = graph_type([(0, 1), (1, 2)]) + G.add_nodes_from([0, 1, 2], community=0) + pytest.raises(nx.NetworkXNotImplemented, self.func, G, [(0, 2)]) + + def test_node_not_found(self): + G = nx.Graph() + G.add_edges_from([(0, 1), (0, 2), (2, 3)]) + G.nodes[0]["community"] = 0 + G.nodes[1]["community"] = 1 + G.nodes[2]["community"] = 0 + G.nodes[3]["community"] = 0 + pytest.raises(nx.NodeNotFound, self.func, G, [(0, 4)]) + + def test_no_common_neighbor(self): + G = nx.Graph() + G.add_nodes_from([0, 1]) + G.nodes[0]["community"] = 0 + G.nodes[1]["community"] = 0 + self.test(G, [(0, 1)], [(0, 1, 0)]) + + def test_equal_nodes(self): + G = nx.complete_graph(3) + G.nodes[0]["community"] = 0 + G.nodes[1]["community"] = 0 + G.nodes[2]["community"] = 0 + self.test(G, [(0, 0)], [(0, 0, 4)]) + + def test_different_community(self): + G = nx.Graph() + G.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3)]) + G.nodes[0]["community"] = 0 + G.nodes[1]["community"] = 0 + G.nodes[2]["community"] = 0 + G.nodes[3]["community"] = 1 + self.test(G, [(0, 3)], [(0, 3, 2)]) + + def test_no_community_information(self): + G = nx.complete_graph(5) + pytest.raises(nx.NetworkXAlgorithmError, list, self.func(G, [(0, 1)])) + + def test_insufficient_community_information(self): + G = nx.Graph() + G.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3)]) + G.nodes[0]["community"] = 0 + G.nodes[1]["community"] = 0 + G.nodes[3]["community"] = 0 + pytest.raises(nx.NetworkXAlgorithmError, list, self.func(G, [(0, 3)])) + + def test_sufficient_community_information(self): + G = nx.Graph() + G.add_edges_from([(0, 1), (1, 2), (1, 3), (2, 4), (3, 4), (4, 5)]) + G.nodes[1]["community"] = 0 + G.nodes[2]["community"] = 0 + G.nodes[3]["community"] = 0 + G.nodes[4]["community"] = 0 + self.test(G, [(1, 4)], [(1, 4, 4)]) + + def test_custom_community_attribute_name(self): + G = nx.Graph() + G.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3)]) + G.nodes[0]["cmty"] = 0 + G.nodes[1]["cmty"] = 0 + G.nodes[2]["cmty"] = 0 + G.nodes[3]["cmty"] = 1 + self.test(G, [(0, 3)], [(0, 3, 2)], community="cmty") + + def test_all_nonexistent_edges(self): + G = nx.Graph() + G.add_edges_from([(0, 1), (0, 2), (2, 3)]) + G.nodes[0]["community"] = 0 + G.nodes[1]["community"] = 1 + G.nodes[2]["community"] = 0 + G.nodes[3]["community"] = 0 + self.test(G, None, [(0, 3, 2), (1, 2, 1), (1, 3, 0)]) + + +class TestRAIndexSoundarajanHopcroft: + @classmethod + def setup_class(cls): + cls.func = staticmethod(nx.ra_index_soundarajan_hopcroft) + cls.test = staticmethod( + partial(_test_func, predict_func=cls.func, community="community") + ) + + def test_K5(self): + G = nx.complete_graph(5) + G.nodes[0]["community"] = 0 + G.nodes[1]["community"] = 0 + G.nodes[2]["community"] = 0 + G.nodes[3]["community"] = 0 + G.nodes[4]["community"] = 1 + self.test(G, [(0, 1)], [(0, 1, 0.5)]) + + def test_P3(self): + G = nx.path_graph(3) + G.nodes[0]["community"] = 0 + G.nodes[1]["community"] = 1 + G.nodes[2]["community"] = 0 + self.test(G, [(0, 2)], [(0, 2, 0)]) + + def test_S4(self): + G = nx.star_graph(4) + G.nodes[0]["community"] = 1 + G.nodes[1]["community"] = 1 + G.nodes[2]["community"] = 1 + G.nodes[3]["community"] = 0 + G.nodes[4]["community"] = 0 + self.test(G, [(1, 2)], [(1, 2, 0.25)]) + + @pytest.mark.parametrize("graph_type", (nx.DiGraph, nx.MultiGraph, nx.MultiDiGraph)) + def test_notimplemented(self, graph_type): + G = graph_type([(0, 1), (1, 2)]) + G.add_nodes_from([0, 1, 2], community=0) + pytest.raises(nx.NetworkXNotImplemented, self.func, G, [(0, 2)]) + + def test_node_not_found(self): + G = nx.Graph() + G.add_edges_from([(0, 1), (0, 2), (2, 3)]) + G.nodes[0]["community"] = 0 + G.nodes[1]["community"] = 1 + G.nodes[2]["community"] = 0 + G.nodes[3]["community"] = 0 + pytest.raises(nx.NodeNotFound, self.func, G, [(0, 4)]) + + def test_no_common_neighbor(self): + G = nx.Graph() + G.add_nodes_from([0, 1]) + G.nodes[0]["community"] = 0 + G.nodes[1]["community"] = 0 + self.test(G, [(0, 1)], [(0, 1, 0)]) + + def test_equal_nodes(self): + G = nx.complete_graph(3) + G.nodes[0]["community"] = 0 + G.nodes[1]["community"] = 0 + G.nodes[2]["community"] = 0 + self.test(G, [(0, 0)], [(0, 0, 1)]) + + def test_different_community(self): + G = nx.Graph() + G.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3)]) + G.nodes[0]["community"] = 0 + G.nodes[1]["community"] = 0 + G.nodes[2]["community"] = 0 + G.nodes[3]["community"] = 1 + self.test(G, [(0, 3)], [(0, 3, 0)]) + + def test_no_community_information(self): + G = nx.complete_graph(5) + pytest.raises(nx.NetworkXAlgorithmError, list, self.func(G, [(0, 1)])) + + def test_insufficient_community_information(self): + G = nx.Graph() + G.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3)]) + G.nodes[0]["community"] = 0 + G.nodes[1]["community"] = 0 + G.nodes[3]["community"] = 0 + pytest.raises(nx.NetworkXAlgorithmError, list, self.func(G, [(0, 3)])) + + def test_sufficient_community_information(self): + G = nx.Graph() + G.add_edges_from([(0, 1), (1, 2), (1, 3), (2, 4), (3, 4), (4, 5)]) + G.nodes[1]["community"] = 0 + G.nodes[2]["community"] = 0 + G.nodes[3]["community"] = 0 + G.nodes[4]["community"] = 0 + self.test(G, [(1, 4)], [(1, 4, 1)]) + + def test_custom_community_attribute_name(self): + G = nx.Graph() + G.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3)]) + G.nodes[0]["cmty"] = 0 + G.nodes[1]["cmty"] = 0 + G.nodes[2]["cmty"] = 0 + G.nodes[3]["cmty"] = 1 + self.test(G, [(0, 3)], [(0, 3, 0)], community="cmty") + + def test_all_nonexistent_edges(self): + G = nx.Graph() + G.add_edges_from([(0, 1), (0, 2), (2, 3)]) + G.nodes[0]["community"] = 0 + G.nodes[1]["community"] = 1 + G.nodes[2]["community"] = 0 + G.nodes[3]["community"] = 0 + self.test(G, None, [(0, 3, 0.5), (1, 2, 0), (1, 3, 0)]) + + +class TestWithinInterCluster: + @classmethod + def setup_class(cls): + cls.delta = 0.001 + cls.func = staticmethod(nx.within_inter_cluster) + cls.test = staticmethod( + partial( + _test_func, + predict_func=cls.func, + delta=cls.delta, + community="community", + ) + ) + + def test_K5(self): + G = nx.complete_graph(5) + G.nodes[0]["community"] = 0 + G.nodes[1]["community"] = 0 + G.nodes[2]["community"] = 0 + G.nodes[3]["community"] = 0 + G.nodes[4]["community"] = 1 + self.test(G, [(0, 1)], [(0, 1, 2 / (1 + self.delta))]) + + def test_P3(self): + G = nx.path_graph(3) + G.nodes[0]["community"] = 0 + G.nodes[1]["community"] = 1 + G.nodes[2]["community"] = 0 + self.test(G, [(0, 2)], [(0, 2, 0)]) + + def test_S4(self): + G = nx.star_graph(4) + G.nodes[0]["community"] = 1 + G.nodes[1]["community"] = 1 + G.nodes[2]["community"] = 1 + G.nodes[3]["community"] = 0 + G.nodes[4]["community"] = 0 + self.test(G, [(1, 2)], [(1, 2, 1 / self.delta)]) + + @pytest.mark.parametrize("graph_type", (nx.DiGraph, nx.MultiGraph, nx.MultiDiGraph)) + def test_notimplemented(self, graph_type): + G = graph_type([(0, 1), (1, 2)]) + G.add_nodes_from([0, 1, 2], community=0) + pytest.raises(nx.NetworkXNotImplemented, self.func, G, [(0, 2)]) + + def test_node_not_found(self): + G = nx.Graph() + G.add_edges_from([(0, 1), (0, 2), (2, 3)]) + G.nodes[0]["community"] = 0 + G.nodes[1]["community"] = 1 + G.nodes[2]["community"] = 0 + G.nodes[3]["community"] = 0 + pytest.raises(nx.NodeNotFound, self.func, G, [(0, 4)]) + + def test_no_common_neighbor(self): + G = nx.Graph() + G.add_nodes_from([0, 1]) + G.nodes[0]["community"] = 0 + G.nodes[1]["community"] = 0 + self.test(G, [(0, 1)], [(0, 1, 0)]) + + def test_equal_nodes(self): + G = nx.complete_graph(3) + G.nodes[0]["community"] = 0 + G.nodes[1]["community"] = 0 + G.nodes[2]["community"] = 0 + self.test(G, [(0, 0)], [(0, 0, 2 / self.delta)]) + + def test_different_community(self): + G = nx.Graph() + G.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3)]) + G.nodes[0]["community"] = 0 + G.nodes[1]["community"] = 0 + G.nodes[2]["community"] = 0 + G.nodes[3]["community"] = 1 + self.test(G, [(0, 3)], [(0, 3, 0)]) + + def test_no_inter_cluster_common_neighbor(self): + G = nx.complete_graph(4) + G.nodes[0]["community"] = 0 + G.nodes[1]["community"] = 0 + G.nodes[2]["community"] = 0 + G.nodes[3]["community"] = 0 + self.test(G, [(0, 3)], [(0, 3, 2 / self.delta)]) + + def test_no_community_information(self): + G = nx.complete_graph(5) + pytest.raises(nx.NetworkXAlgorithmError, list, self.func(G, [(0, 1)])) + + def test_insufficient_community_information(self): + G = nx.Graph() + G.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3)]) + G.nodes[0]["community"] = 0 + G.nodes[1]["community"] = 0 + G.nodes[3]["community"] = 0 + pytest.raises(nx.NetworkXAlgorithmError, list, self.func(G, [(0, 3)])) + + def test_sufficient_community_information(self): + G = nx.Graph() + G.add_edges_from([(0, 1), (1, 2), (1, 3), (2, 4), (3, 4), (4, 5)]) + G.nodes[1]["community"] = 0 + G.nodes[2]["community"] = 0 + G.nodes[3]["community"] = 0 + G.nodes[4]["community"] = 0 + self.test(G, [(1, 4)], [(1, 4, 2 / self.delta)]) + + def test_invalid_delta(self): + G = nx.complete_graph(3) + G.add_nodes_from([0, 1, 2], community=0) + pytest.raises(nx.NetworkXAlgorithmError, self.func, G, [(0, 1)], 0) + pytest.raises(nx.NetworkXAlgorithmError, self.func, G, [(0, 1)], -0.5) + + def test_custom_community_attribute_name(self): + G = nx.complete_graph(4) + G.nodes[0]["cmty"] = 0 + G.nodes[1]["cmty"] = 0 + G.nodes[2]["cmty"] = 0 + G.nodes[3]["cmty"] = 0 + self.test(G, [(0, 3)], [(0, 3, 2 / self.delta)], community="cmty") + + def test_all_nonexistent_edges(self): + G = nx.Graph() + G.add_edges_from([(0, 1), (0, 2), (2, 3)]) + G.nodes[0]["community"] = 0 + G.nodes[1]["community"] = 1 + G.nodes[2]["community"] = 0 + G.nodes[3]["community"] = 0 + self.test(G, None, [(0, 3, 1 / self.delta), (1, 2, 0), (1, 3, 0)]) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_lowest_common_ancestors.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_lowest_common_ancestors.py new file mode 100644 index 0000000..639a31f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_lowest_common_ancestors.py @@ -0,0 +1,459 @@ +from itertools import chain, combinations, product + +import pytest + +import networkx as nx + +tree_all_pairs_lca = nx.tree_all_pairs_lowest_common_ancestor +all_pairs_lca = nx.all_pairs_lowest_common_ancestor + + +def get_pair(dictionary, n1, n2): + if (n1, n2) in dictionary: + return dictionary[n1, n2] + else: + return dictionary[n2, n1] + + +class TestTreeLCA: + @classmethod + def setup_class(cls): + cls.DG = nx.DiGraph() + edges = [(0, 1), (0, 2), (1, 3), (1, 4), (2, 5), (2, 6)] + cls.DG.add_edges_from(edges) + cls.ans = dict(tree_all_pairs_lca(cls.DG, 0)) + gold = {(n, n): n for n in cls.DG} + gold.update({(0, i): 0 for i in range(1, 7)}) + gold.update( + { + (1, 2): 0, + (1, 3): 1, + (1, 4): 1, + (1, 5): 0, + (1, 6): 0, + (2, 3): 0, + (2, 4): 0, + (2, 5): 2, + (2, 6): 2, + (3, 4): 1, + (3, 5): 0, + (3, 6): 0, + (4, 5): 0, + (4, 6): 0, + (5, 6): 2, + } + ) + + cls.gold = gold + + @staticmethod + def assert_has_same_pairs(d1, d2): + for a, b in ((min(pair), max(pair)) for pair in chain(d1, d2)): + assert get_pair(d1, a, b) == get_pair(d2, a, b) + + def test_tree_all_pairs_lca_default_root(self): + assert dict(tree_all_pairs_lca(self.DG)) == self.ans + + def test_tree_all_pairs_lca_return_subset(self): + test_pairs = [(0, 1), (0, 1), (1, 0)] + ans = dict(tree_all_pairs_lca(self.DG, 0, test_pairs)) + assert (0, 1) in ans and (1, 0) in ans + assert len(ans) == 2 + + def test_tree_all_pairs_lca(self): + all_pairs = chain(combinations(self.DG, 2), ((node, node) for node in self.DG)) + + ans = dict(tree_all_pairs_lca(self.DG, 0, all_pairs)) + self.assert_has_same_pairs(ans, self.ans) + + def test_tree_all_pairs_gold_example(self): + ans = dict(tree_all_pairs_lca(self.DG)) + self.assert_has_same_pairs(self.gold, ans) + + def test_tree_all_pairs_lca_invalid_input(self): + empty_digraph = tree_all_pairs_lca(nx.DiGraph()) + pytest.raises(nx.NetworkXPointlessConcept, list, empty_digraph) + + bad_pairs_digraph = tree_all_pairs_lca(self.DG, pairs=[(-1, -2)]) + pytest.raises(nx.NodeNotFound, list, bad_pairs_digraph) + + def test_tree_all_pairs_lca_subtrees(self): + ans = dict(tree_all_pairs_lca(self.DG, 1)) + gold = { + pair: lca + for (pair, lca) in self.gold.items() + if all(n in (1, 3, 4) for n in pair) + } + self.assert_has_same_pairs(gold, ans) + + def test_tree_all_pairs_lca_disconnected_nodes(self): + G = nx.DiGraph() + G.add_node(1) + assert {(1, 1): 1} == dict(tree_all_pairs_lca(G)) + + G.add_node(0) + assert {(1, 1): 1} == dict(tree_all_pairs_lca(G, 1)) + assert {(0, 0): 0} == dict(tree_all_pairs_lca(G, 0)) + + pytest.raises(nx.NetworkXError, list, tree_all_pairs_lca(G)) + + def test_tree_all_pairs_lca_error_if_input_not_tree(self): + # Cycle + G = nx.DiGraph([(1, 2), (2, 1)]) + pytest.raises(nx.NetworkXError, list, tree_all_pairs_lca(G)) + # DAG + G = nx.DiGraph([(0, 2), (1, 2)]) + pytest.raises(nx.NetworkXError, list, tree_all_pairs_lca(G)) + + def test_tree_all_pairs_lca_generator(self): + pairs = iter([(0, 1), (0, 1), (1, 0)]) + some_pairs = dict(tree_all_pairs_lca(self.DG, 0, pairs)) + assert (0, 1) in some_pairs and (1, 0) in some_pairs + assert len(some_pairs) == 2 + + def test_tree_all_pairs_lca_nonexisting_pairs_exception(self): + lca = tree_all_pairs_lca(self.DG, 0, [(-1, -1)]) + pytest.raises(nx.NodeNotFound, list, lca) + # check if node is None + lca = tree_all_pairs_lca(self.DG, None, [(-1, -1)]) + pytest.raises(nx.NodeNotFound, list, lca) + + def test_tree_all_pairs_lca_routine_bails_on_DAGs(self): + G = nx.DiGraph([(3, 4), (5, 4)]) + pytest.raises(nx.NetworkXError, list, tree_all_pairs_lca(G)) + + def test_tree_all_pairs_lca_not_implemented(self): + NNI = nx.NetworkXNotImplemented + G = nx.Graph([(0, 1)]) + with pytest.raises(NNI): + next(tree_all_pairs_lca(G)) + with pytest.raises(NNI): + next(all_pairs_lca(G)) + pytest.raises(NNI, nx.lowest_common_ancestor, G, 0, 1) + G = nx.MultiGraph([(0, 1)]) + with pytest.raises(NNI): + next(tree_all_pairs_lca(G)) + with pytest.raises(NNI): + next(all_pairs_lca(G)) + pytest.raises(NNI, nx.lowest_common_ancestor, G, 0, 1) + + def test_tree_all_pairs_lca_trees_without_LCAs(self): + G = nx.DiGraph() + G.add_node(3) + ans = list(tree_all_pairs_lca(G)) + assert ans == [((3, 3), 3)] + + +class TestMultiTreeLCA(TestTreeLCA): + @classmethod + def setup_class(cls): + cls.DG = nx.MultiDiGraph() + edges = [(0, 1), (0, 2), (1, 3), (1, 4), (2, 5), (2, 6)] + cls.DG.add_edges_from(edges) + cls.ans = dict(tree_all_pairs_lca(cls.DG, 0)) + # add multiedges + cls.DG.add_edges_from(edges) + + gold = {(n, n): n for n in cls.DG} + gold.update({(0, i): 0 for i in range(1, 7)}) + gold.update( + { + (1, 2): 0, + (1, 3): 1, + (1, 4): 1, + (1, 5): 0, + (1, 6): 0, + (2, 3): 0, + (2, 4): 0, + (2, 5): 2, + (2, 6): 2, + (3, 4): 1, + (3, 5): 0, + (3, 6): 0, + (4, 5): 0, + (4, 6): 0, + (5, 6): 2, + } + ) + + cls.gold = gold + + +class TestDAGLCA: + @classmethod + def setup_class(cls): + cls.DG = nx.DiGraph() + nx.add_path(cls.DG, (0, 1, 2, 3)) + nx.add_path(cls.DG, (0, 4, 3)) + nx.add_path(cls.DG, (0, 5, 6, 8, 3)) + nx.add_path(cls.DG, (5, 7, 8)) + cls.DG.add_edge(6, 2) + cls.DG.add_edge(7, 2) + + cls.root_distance = nx.shortest_path_length(cls.DG, source=0) + + cls.gold = { + (1, 1): 1, + (1, 2): 1, + (1, 3): 1, + (1, 4): 0, + (1, 5): 0, + (1, 6): 0, + (1, 7): 0, + (1, 8): 0, + (2, 2): 2, + (2, 3): 2, + (2, 4): 0, + (2, 5): 5, + (2, 6): 6, + (2, 7): 7, + (2, 8): 7, + (3, 3): 3, + (3, 4): 4, + (3, 5): 5, + (3, 6): 6, + (3, 7): 7, + (3, 8): 8, + (4, 4): 4, + (4, 5): 0, + (4, 6): 0, + (4, 7): 0, + (4, 8): 0, + (5, 5): 5, + (5, 6): 5, + (5, 7): 5, + (5, 8): 5, + (6, 6): 6, + (6, 7): 5, + (6, 8): 6, + (7, 7): 7, + (7, 8): 7, + (8, 8): 8, + } + cls.gold.update(((0, n), 0) for n in cls.DG) + + def assert_lca_dicts_same(self, d1, d2, G=None): + """Checks if d1 and d2 contain the same pairs and + have a node at the same distance from root for each. + If G is None use self.DG.""" + if G is None: + G = self.DG + root_distance = self.root_distance + else: + roots = [n for n, deg in G.in_degree if deg == 0] + assert len(roots) == 1 + root_distance = nx.shortest_path_length(G, source=roots[0]) + + for a, b in ((min(pair), max(pair)) for pair in chain(d1, d2)): + assert ( + root_distance[get_pair(d1, a, b)] == root_distance[get_pair(d2, a, b)] + ) + + def test_all_pairs_lca_gold_example(self): + self.assert_lca_dicts_same(dict(all_pairs_lca(self.DG)), self.gold) + + def test_all_pairs_lca_all_pairs_given(self): + all_pairs = list(product(self.DG.nodes(), self.DG.nodes())) + ans = all_pairs_lca(self.DG, pairs=all_pairs) + self.assert_lca_dicts_same(dict(ans), self.gold) + + def test_all_pairs_lca_generator(self): + all_pairs = product(self.DG.nodes(), self.DG.nodes()) + ans = all_pairs_lca(self.DG, pairs=all_pairs) + self.assert_lca_dicts_same(dict(ans), self.gold) + + def test_all_pairs_lca_input_graph_with_two_roots(self): + G = self.DG.copy() + G.add_edge(9, 10) + G.add_edge(9, 4) + gold = self.gold.copy() + gold[9, 9] = 9 + gold[9, 10] = 9 + gold[9, 4] = 9 + gold[9, 3] = 9 + gold[10, 4] = 9 + gold[10, 3] = 9 + gold[10, 10] = 10 + + testing = dict(all_pairs_lca(G)) + + G.add_edge(-1, 9) + G.add_edge(-1, 0) + self.assert_lca_dicts_same(testing, gold, G) + + def test_all_pairs_lca_nonexisting_pairs_exception(self): + pytest.raises(nx.NodeNotFound, all_pairs_lca, self.DG, [(-1, -1)]) + + def test_all_pairs_lca_pairs_without_lca(self): + G = self.DG.copy() + G.add_node(-1) + gen = all_pairs_lca(G, [(-1, -1), (-1, 0)]) + assert dict(gen) == {(-1, -1): -1} + + def test_all_pairs_lca_null_graph(self): + pytest.raises(nx.NetworkXPointlessConcept, all_pairs_lca, nx.DiGraph()) + + def test_all_pairs_lca_non_dags(self): + pytest.raises(nx.NetworkXError, all_pairs_lca, nx.DiGraph([(3, 4), (4, 3)])) + + def test_all_pairs_lca_nonempty_graph_without_lca(self): + G = nx.DiGraph() + G.add_node(3) + ans = list(all_pairs_lca(G)) + assert ans == [((3, 3), 3)] + + def test_all_pairs_lca_bug_gh4942(self): + G = nx.DiGraph([(0, 2), (1, 2), (2, 3)]) + ans = list(all_pairs_lca(G)) + assert len(ans) == 9 + + def test_all_pairs_lca_default_kwarg(self): + G = nx.DiGraph([(0, 1), (2, 1)]) + sentinel = object() + assert nx.lowest_common_ancestor(G, 0, 2, default=sentinel) is sentinel + + def test_all_pairs_lca_identity(self): + G = nx.DiGraph() + G.add_node(3) + assert nx.lowest_common_ancestor(G, 3, 3) == 3 + + def test_all_pairs_lca_issue_4574(self): + G = nx.DiGraph() + G.add_nodes_from(range(17)) + G.add_edges_from( + [ + (2, 0), + (1, 2), + (3, 2), + (5, 2), + (8, 2), + (11, 2), + (4, 5), + (6, 5), + (7, 8), + (10, 8), + (13, 11), + (14, 11), + (15, 11), + (9, 10), + (12, 13), + (16, 15), + ] + ) + + assert nx.lowest_common_ancestor(G, 7, 9) is None + + def test_all_pairs_lca_one_pair_gh4942(self): + G = nx.DiGraph() + # Note: order edge addition is critical to the test + G.add_edge(0, 1) + G.add_edge(2, 0) + G.add_edge(2, 3) + G.add_edge(4, 0) + G.add_edge(5, 2) + + assert nx.lowest_common_ancestor(G, 1, 3) == 2 + + +class TestMultiDiGraph_DAGLCA(TestDAGLCA): + @classmethod + def setup_class(cls): + cls.DG = nx.MultiDiGraph() + nx.add_path(cls.DG, (0, 1, 2, 3)) + # add multiedges + nx.add_path(cls.DG, (0, 1, 2, 3)) + nx.add_path(cls.DG, (0, 4, 3)) + nx.add_path(cls.DG, (0, 5, 6, 8, 3)) + nx.add_path(cls.DG, (5, 7, 8)) + cls.DG.add_edge(6, 2) + cls.DG.add_edge(7, 2) + + cls.root_distance = nx.shortest_path_length(cls.DG, source=0) + + cls.gold = { + (1, 1): 1, + (1, 2): 1, + (1, 3): 1, + (1, 4): 0, + (1, 5): 0, + (1, 6): 0, + (1, 7): 0, + (1, 8): 0, + (2, 2): 2, + (2, 3): 2, + (2, 4): 0, + (2, 5): 5, + (2, 6): 6, + (2, 7): 7, + (2, 8): 7, + (3, 3): 3, + (3, 4): 4, + (3, 5): 5, + (3, 6): 6, + (3, 7): 7, + (3, 8): 8, + (4, 4): 4, + (4, 5): 0, + (4, 6): 0, + (4, 7): 0, + (4, 8): 0, + (5, 5): 5, + (5, 6): 5, + (5, 7): 5, + (5, 8): 5, + (6, 6): 6, + (6, 7): 5, + (6, 8): 6, + (7, 7): 7, + (7, 8): 7, + (8, 8): 8, + } + cls.gold.update(((0, n), 0) for n in cls.DG) + + +def test_all_pairs_lca_self_ancestors(): + """Self-ancestors should always be the node itself, i.e. lca of (0, 0) is 0. + See gh-4458.""" + # DAG for test - note order of node/edge addition is relevant + G = nx.DiGraph() + G.add_nodes_from(range(5)) + G.add_edges_from([(1, 0), (2, 0), (3, 2), (4, 1), (4, 3)]) + + ap_lca = nx.all_pairs_lowest_common_ancestor + assert all(u == v == a for (u, v), a in ap_lca(G) if u == v) + MG = nx.MultiDiGraph(G) + assert all(u == v == a for (u, v), a in ap_lca(MG) if u == v) + MG.add_edges_from([(1, 0), (2, 0)]) + assert all(u == v == a for (u, v), a in ap_lca(MG) if u == v) + + +def test_lca_on_null_graph(): + G = nx.null_graph(create_using=nx.DiGraph) + with pytest.raises( + nx.NetworkXPointlessConcept, match="LCA meaningless on null graphs" + ): + nx.lowest_common_ancestor(G, 0, 0) + + +def test_lca_on_cycle_graph(): + G = nx.cycle_graph(6, create_using=nx.DiGraph) + with pytest.raises( + nx.NetworkXError, match="LCA only defined on directed acyclic graphs" + ): + nx.lowest_common_ancestor(G, 0, 3) + + +def test_lca_multiple_valid_solutions(): + G = nx.DiGraph() + G.add_nodes_from(range(4)) + G.add_edges_from([(2, 0), (3, 0), (2, 1), (3, 1)]) + assert nx.lowest_common_ancestor(G, 0, 1) in {2, 3} + + +def test_lca_dont_rely_on_single_successor(): + # Nodes 0 and 1 have nodes 2 and 3 as immediate ancestors, + # and node 2 also has node 3 as an immediate ancestor. + G = nx.DiGraph() + G.add_nodes_from(range(4)) + G.add_edges_from([(2, 0), (2, 1), (3, 1), (3, 0), (3, 2)]) + assert nx.lowest_common_ancestor(G, 0, 1) == 2 diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_matching.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_matching.py new file mode 100644 index 0000000..703416d --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_matching.py @@ -0,0 +1,548 @@ +import math +from itertools import permutations + +import pytest + +import networkx as nx +from networkx.utils import edges_equal + + +@pytest.mark.parametrize( + "fn", (nx.is_matching, nx.is_maximal_matching, nx.is_perfect_matching) +) +@pytest.mark.parametrize( + "edgeset", + ( + {(0, 5)}, # Single edge, node not in G + {(5, 0)}, # for both edge orders + {(0, 5), (2, 3)}, # node not in G, but other edge is valid matching + {(5, 5), (2, 3)}, # Self-loop hits node not in G validation first + ), +) +def test_is_matching_node_not_in_G(fn, edgeset): + """All is_*matching functions have consistent exception message for node + not in G.""" + G = nx.path_graph(4) + with pytest.raises(nx.NetworkXError, match="matching.*with node not in G"): + fn(G, edgeset) + + +@pytest.mark.parametrize( + "fn", (nx.is_matching, nx.is_maximal_matching, nx.is_perfect_matching) +) +@pytest.mark.parametrize( + "edgeset", + ( + {(0, 1, 2), (2, 3)}, # 3-tuple + {(0,), (2, 3)}, # 1-tuple + ), +) +def test_is_matching_invalid_edge(fn, edgeset): + """All is_*matching functions have consistent exception message for invalid + edges in matching.""" + G = nx.path_graph(4) + with pytest.raises(nx.NetworkXError, match=".*non-2-tuple edge.*"): + fn(G, edgeset) + + +@pytest.mark.parametrize("graph_type", (nx.MultiGraph, nx.DiGraph, nx.MultiDiGraph)) +@pytest.mark.parametrize( + "fn", (nx.max_weight_matching, nx.min_weight_matching, nx.maximal_matching) +) +def test_wrong_graph_type(fn, graph_type): + G = graph_type() + with pytest.raises(nx.NetworkXNotImplemented): + fn(G) + + +class TestMaxWeightMatching: + """Unit tests for the + :func:`~networkx.algorithms.matching.max_weight_matching` function. + + """ + + def test_trivial1(self): + """Empty graph""" + G = nx.Graph() + assert nx.max_weight_matching(G) == set() + assert nx.min_weight_matching(G) == set() + + def test_selfloop(self): + G = nx.Graph() + G.add_edge(0, 0, weight=100) + assert nx.max_weight_matching(G) == set() + assert nx.min_weight_matching(G) == set() + + def test_single_edge(self): + G = nx.Graph() + G.add_edge(0, 1) + assert edges_equal(nx.max_weight_matching(G), {(0, 1)}) + assert edges_equal(nx.min_weight_matching(G), {(0, 1)}) + + def test_two_path(self): + G = nx.Graph() + G.add_edge("one", "two", weight=10) + G.add_edge("two", "three", weight=11) + assert edges_equal(nx.max_weight_matching(G), {("two", "three")}) + assert edges_equal(nx.min_weight_matching(G), {("one", "two")}) + + def test_path(self): + G = nx.Graph() + G.add_edge(1, 2, weight=5) + G.add_edge(2, 3, weight=11) + G.add_edge(3, 4, weight=5) + assert edges_equal(nx.max_weight_matching(G), {(2, 3)}) + assert edges_equal(nx.max_weight_matching(G, 1), {(1, 2), (3, 4)}) + assert edges_equal(nx.min_weight_matching(G), {(1, 2), (3, 4)}) + assert edges_equal(nx.min_weight_matching(G, 1), {(1, 2), (3, 4)}) + + def test_square(self): + G = nx.Graph() + G.add_edge(1, 4, weight=2) + G.add_edge(2, 3, weight=2) + G.add_edge(1, 2, weight=1) + G.add_edge(3, 4, weight=4) + assert edges_equal(nx.max_weight_matching(G), {(1, 2), (3, 4)}) + assert edges_equal(nx.min_weight_matching(G), {(1, 4), (2, 3)}) + + def test_edge_attribute_name(self): + G = nx.Graph() + G.add_edge("one", "two", weight=10, abcd=11) + G.add_edge("two", "three", weight=11, abcd=10) + assert edges_equal(nx.max_weight_matching(G, weight="abcd"), {("one", "two")}) + assert edges_equal(nx.min_weight_matching(G, weight="abcd"), {("two", "three")}) + + def test_floating_point_weights(self): + G = nx.Graph() + G.add_edge(1, 2, weight=math.pi) + G.add_edge(2, 3, weight=math.exp(1)) + G.add_edge(1, 3, weight=3.0) + G.add_edge(1, 4, weight=math.sqrt(2.0)) + assert edges_equal(nx.max_weight_matching(G), {(1, 4), (2, 3)}) + assert edges_equal(nx.min_weight_matching(G), {(1, 4), (2, 3)}) + + def test_negative_weights(self): + G = nx.Graph() + G.add_edge(1, 2, weight=2) + G.add_edge(1, 3, weight=-2) + G.add_edge(2, 3, weight=1) + G.add_edge(2, 4, weight=-1) + G.add_edge(3, 4, weight=-6) + assert edges_equal(nx.max_weight_matching(G), {(1, 2)}) + assert edges_equal( + nx.max_weight_matching(G, maxcardinality=True), {(1, 3), (2, 4)} + ) + assert edges_equal(nx.min_weight_matching(G), {(1, 2), (3, 4)}) + + def test_s_blossom(self): + """Create S-blossom and use it for augmentation:""" + G = nx.Graph() + G.add_weighted_edges_from([(1, 2, 8), (1, 3, 9), (2, 3, 10), (3, 4, 7)]) + answer = {(1, 2), (3, 4)} + assert edges_equal(nx.max_weight_matching(G), answer) + assert edges_equal(nx.min_weight_matching(G), answer) + + G.add_weighted_edges_from([(1, 6, 5), (4, 5, 6)]) + answer = {(1, 6), (2, 3), (4, 5)} + assert edges_equal(nx.max_weight_matching(G), answer) + assert edges_equal(nx.min_weight_matching(G), answer) + + def test_s_t_blossom(self): + """Create S-blossom, relabel as T-blossom, use for augmentation:""" + G = nx.Graph() + G.add_weighted_edges_from( + [(1, 2, 9), (1, 3, 8), (2, 3, 10), (1, 4, 5), (4, 5, 4), (1, 6, 3)] + ) + answer = {(1, 6), (2, 3), (4, 5)} + assert edges_equal(nx.max_weight_matching(G), answer) + assert edges_equal(nx.min_weight_matching(G), answer) + + G.add_edge(4, 5, weight=3) + G.add_edge(1, 6, weight=4) + assert edges_equal(nx.max_weight_matching(G), answer) + assert edges_equal(nx.min_weight_matching(G), answer) + + G.remove_edge(1, 6) + G.add_edge(3, 6, weight=4) + answer = {(1, 2), (3, 6), (4, 5)} + assert edges_equal(nx.max_weight_matching(G), answer) + assert edges_equal(nx.min_weight_matching(G), answer) + + def test_nested_s_blossom(self): + """Create nested S-blossom, use for augmentation:""" + + G = nx.Graph() + G.add_weighted_edges_from( + [ + (1, 2, 9), + (1, 3, 9), + (2, 3, 10), + (2, 4, 8), + (3, 5, 8), + (4, 5, 10), + (5, 6, 6), + ] + ) + expected_edgeset = {(1, 3), (2, 4), (5, 6)} + expected = {frozenset(e) for e in expected_edgeset} + answer = {frozenset(e) for e in nx.max_weight_matching(G)} + assert answer == expected + answer = {frozenset(e) for e in nx.min_weight_matching(G)} + assert answer == expected + + def test_nested_s_blossom_relabel(self): + """Create S-blossom, relabel as S, include in nested S-blossom:""" + G = nx.Graph() + G.add_weighted_edges_from( + [ + (1, 2, 10), + (1, 7, 10), + (2, 3, 12), + (3, 4, 20), + (3, 5, 20), + (4, 5, 25), + (5, 6, 10), + (6, 7, 10), + (7, 8, 8), + ] + ) + answer = {(1, 2), (3, 4), (5, 6), (7, 8)} + assert edges_equal(nx.max_weight_matching(G), answer) + assert edges_equal(nx.min_weight_matching(G), answer) + + def test_nested_s_blossom_expand(self): + """Create nested S-blossom, augment, expand recursively:""" + G = nx.Graph() + G.add_weighted_edges_from( + [ + (1, 2, 8), + (1, 3, 8), + (2, 3, 10), + (2, 4, 12), + (3, 5, 12), + (4, 5, 14), + (4, 6, 12), + (5, 7, 12), + (6, 7, 14), + (7, 8, 12), + ] + ) + answer = {(1, 2), (3, 5), (4, 6), (7, 8)} + assert edges_equal(nx.max_weight_matching(G), answer) + assert edges_equal(nx.min_weight_matching(G), answer) + + def test_s_blossom_relabel_expand(self): + """Create S-blossom, relabel as T, expand:""" + G = nx.Graph() + G.add_weighted_edges_from( + [ + (1, 2, 23), + (1, 5, 22), + (1, 6, 15), + (2, 3, 25), + (3, 4, 22), + (4, 5, 25), + (4, 8, 14), + (5, 7, 13), + ] + ) + answer = {(1, 6), (2, 3), (4, 8), (5, 7)} + assert edges_equal(nx.max_weight_matching(G), answer) + assert edges_equal(nx.min_weight_matching(G), answer) + + def test_nested_s_blossom_relabel_expand(self): + """Create nested S-blossom, relabel as T, expand:""" + G = nx.Graph() + G.add_weighted_edges_from( + [ + (1, 2, 19), + (1, 3, 20), + (1, 8, 8), + (2, 3, 25), + (2, 4, 18), + (3, 5, 18), + (4, 5, 13), + (4, 7, 7), + (5, 6, 7), + ] + ) + answer = {(1, 8), (2, 3), (4, 7), (5, 6)} + assert edges_equal(nx.max_weight_matching(G), answer) + assert edges_equal(nx.min_weight_matching(G), answer) + + def test_nasty_blossom1(self): + """Create blossom, relabel as T in more than one way, expand, + augment: + """ + G = nx.Graph() + G.add_weighted_edges_from( + [ + (1, 2, 45), + (1, 5, 45), + (2, 3, 50), + (3, 4, 45), + (4, 5, 50), + (1, 6, 30), + (3, 9, 35), + (4, 8, 35), + (5, 7, 26), + (9, 10, 5), + ] + ) + answer = {(1, 6), (2, 3), (4, 8), (5, 7), (9, 10)} + assert edges_equal(nx.max_weight_matching(G), answer) + assert edges_equal(nx.min_weight_matching(G), answer) + + def test_nasty_blossom2(self): + """Again but slightly different:""" + G = nx.Graph() + G.add_weighted_edges_from( + [ + (1, 2, 45), + (1, 5, 45), + (2, 3, 50), + (3, 4, 45), + (4, 5, 50), + (1, 6, 30), + (3, 9, 35), + (4, 8, 26), + (5, 7, 40), + (9, 10, 5), + ] + ) + answer = {(1, 6), (2, 3), (4, 8), (5, 7), (9, 10)} + assert edges_equal(nx.max_weight_matching(G), answer) + assert edges_equal(nx.min_weight_matching(G), answer) + + def test_nasty_blossom_least_slack(self): + """Create blossom, relabel as T, expand such that a new + least-slack S-to-free dge is produced, augment: + """ + G = nx.Graph() + G.add_weighted_edges_from( + [ + (1, 2, 45), + (1, 5, 45), + (2, 3, 50), + (3, 4, 45), + (4, 5, 50), + (1, 6, 30), + (3, 9, 35), + (4, 8, 28), + (5, 7, 26), + (9, 10, 5), + ] + ) + answer = {(1, 6), (2, 3), (4, 8), (5, 7), (9, 10)} + assert edges_equal(nx.max_weight_matching(G), answer) + assert edges_equal(nx.min_weight_matching(G), answer) + + def test_nasty_blossom_augmenting(self): + """Create nested blossom, relabel as T in more than one way""" + # expand outer blossom such that inner blossom ends up on an + # augmenting path: + G = nx.Graph() + G.add_weighted_edges_from( + [ + (1, 2, 45), + (1, 7, 45), + (2, 3, 50), + (3, 4, 45), + (4, 5, 95), + (4, 6, 94), + (5, 6, 94), + (6, 7, 50), + (1, 8, 30), + (3, 11, 35), + (5, 9, 36), + (7, 10, 26), + (11, 12, 5), + ] + ) + answer = {(1, 8), (2, 3), (4, 6), (5, 9), (7, 10), (11, 12)} + assert edges_equal(nx.max_weight_matching(G), answer) + assert edges_equal(nx.min_weight_matching(G), answer) + + def test_nasty_blossom_expand_recursively(self): + """Create nested S-blossom, relabel as S, expand recursively:""" + G = nx.Graph() + G.add_weighted_edges_from( + [ + (1, 2, 40), + (1, 3, 40), + (2, 3, 60), + (2, 4, 55), + (3, 5, 55), + (4, 5, 50), + (1, 8, 15), + (5, 7, 30), + (7, 6, 10), + (8, 10, 10), + (4, 9, 30), + ] + ) + answer = {(1, 2), (3, 5), (4, 9), (6, 7), (8, 10)} + assert edges_equal(nx.max_weight_matching(G), answer) + assert edges_equal(nx.min_weight_matching(G), answer) + + +class TestIsMatching: + """Unit tests for the + :func:`~networkx.algorithms.matching.is_matching` function. + + """ + + def test_dict(self): + G = nx.path_graph(4) + assert nx.is_matching(G, {0: 1, 1: 0, 2: 3, 3: 2}) + + def test_empty_matching(self): + G = nx.path_graph(4) + assert nx.is_matching(G, set()) + + def test_single_edge(self): + G = nx.path_graph(4) + assert nx.is_matching(G, {(1, 2)}) + + def test_edge_order(self): + G = nx.path_graph(4) + assert nx.is_matching(G, {(0, 1), (2, 3)}) + assert nx.is_matching(G, {(1, 0), (2, 3)}) + assert nx.is_matching(G, {(0, 1), (3, 2)}) + assert nx.is_matching(G, {(1, 0), (3, 2)}) + + def test_valid_matching(self): + G = nx.path_graph(4) + assert nx.is_matching(G, {(0, 1), (2, 3)}) + + def test_selfloops(self): + G = nx.path_graph(4) + # selfloop edge not in G + assert not nx.is_matching(G, {(0, 0), (1, 2), (2, 3)}) + # selfloop edge in G + G.add_edge(0, 0) + assert not nx.is_matching(G, {(0, 0), (1, 2)}) + + def test_invalid_matching(self): + G = nx.path_graph(4) + assert not nx.is_matching(G, {(0, 1), (1, 2), (2, 3)}) + + def test_invalid_edge(self): + G = nx.path_graph(4) + assert not nx.is_matching(G, {(0, 3), (1, 2)}) + + G = nx.DiGraph(G.edges) + assert nx.is_matching(G, {(0, 1)}) + assert not nx.is_matching(G, {(1, 0)}) + + +class TestIsMaximalMatching: + """Unit tests for the + :func:`~networkx.algorithms.matching.is_maximal_matching` function. + + """ + + def test_dict(self): + G = nx.path_graph(4) + assert nx.is_maximal_matching(G, {0: 1, 1: 0, 2: 3, 3: 2}) + + def test_valid(self): + G = nx.path_graph(4) + assert nx.is_maximal_matching(G, {(0, 1), (2, 3)}) + + def test_not_matching(self): + G = nx.path_graph(4) + assert not nx.is_maximal_matching(G, {(0, 1), (1, 2), (2, 3)}) + assert not nx.is_maximal_matching(G, {(0, 3)}) + G.add_edge(0, 0) + assert not nx.is_maximal_matching(G, {(0, 0)}) + + def test_not_maximal(self): + G = nx.path_graph(4) + assert not nx.is_maximal_matching(G, {(0, 1)}) + + +class TestIsPerfectMatching: + """Unit tests for the + :func:`~networkx.algorithms.matching.is_perfect_matching` function. + + """ + + def test_dict(self): + G = nx.path_graph(4) + assert nx.is_perfect_matching(G, {0: 1, 1: 0, 2: 3, 3: 2}) + + def test_valid(self): + G = nx.path_graph(4) + assert nx.is_perfect_matching(G, {(0, 1), (2, 3)}) + + def test_valid_not_path(self): + G = nx.cycle_graph(4) + G.add_edge(0, 4) + G.add_edge(1, 4) + G.add_edge(5, 2) + + assert nx.is_perfect_matching(G, {(1, 4), (0, 3), (5, 2)}) + + def test_selfloops(self): + G = nx.path_graph(4) + # selfloop edge not in G + assert not nx.is_perfect_matching(G, {(0, 0), (1, 2), (2, 3)}) + # selfloop edge in G + G.add_edge(0, 0) + assert not nx.is_perfect_matching(G, {(0, 0), (1, 2)}) + + def test_not_matching(self): + G = nx.path_graph(4) + assert not nx.is_perfect_matching(G, {(0, 3)}) + assert not nx.is_perfect_matching(G, {(0, 1), (1, 2), (2, 3)}) + + def test_maximal_but_not_perfect(self): + G = nx.cycle_graph(4) + G.add_edge(0, 4) + G.add_edge(1, 4) + + assert not nx.is_perfect_matching(G, {(1, 4), (0, 3)}) + + +class TestMaximalMatching: + """Unit tests for the + :func:`~networkx.algorithms.matching.maximal_matching`. + + """ + + def test_valid_matching(self): + edges = [(1, 2), (1, 5), (2, 3), (2, 5), (3, 4), (3, 6), (5, 6)] + G = nx.Graph(edges) + matching = nx.maximal_matching(G) + assert nx.is_maximal_matching(G, matching) + + def test_single_edge_matching(self): + # In the star graph, any maximal matching has just one edge. + G = nx.star_graph(5) + matching = nx.maximal_matching(G) + assert 1 == len(matching) + assert nx.is_maximal_matching(G, matching) + + def test_self_loops(self): + # Create the path graph with two self-loops. + G = nx.path_graph(3) + G.add_edges_from([(0, 0), (1, 1)]) + matching = nx.maximal_matching(G) + assert len(matching) == 1 + # The matching should never include self-loops. + assert not any(u == v for u, v in matching) + assert nx.is_maximal_matching(G, matching) + + def test_ordering(self): + """Tests that a maximal matching is computed correctly + regardless of the order in which nodes are added to the graph. + + """ + for nodes in permutations(range(3)): + G = nx.Graph() + G.add_nodes_from(nodes) + G.add_edges_from([(0, 1), (0, 2)]) + matching = nx.maximal_matching(G) + assert len(matching) == 1 + assert nx.is_maximal_matching(G, matching) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_max_weight_clique.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_max_weight_clique.py new file mode 100644 index 0000000..6cd8584 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_max_weight_clique.py @@ -0,0 +1,179 @@ +"""Maximum weight clique test suite.""" + +import pytest + +import networkx as nx + + +class TestMaximumWeightClique: + def test_basic_cases(self): + def check_basic_case(graph_func, expected_weight, weight_accessor): + graph = graph_func() + clique, weight = nx.algorithms.max_weight_clique(graph, weight_accessor) + assert verify_clique( + graph, clique, weight, expected_weight, weight_accessor + ) + + for graph_func, (expected_weight, expected_size) in TEST_CASES.items(): + check_basic_case(graph_func, expected_weight, "weight") + check_basic_case(graph_func, expected_size, None) + + def test_key_error(self): + graph = two_node_graph() + with pytest.raises(KeyError): + nx.algorithms.max_weight_clique(graph, "nonexistent-key") + + def test_error_on_non_integer_weight(self): + graph = two_node_graph() + graph.nodes[2]["weight"] = 1.5 + with pytest.raises(ValueError): + nx.algorithms.max_weight_clique(graph) + + def test_unaffected_by_self_loops(self): + graph = two_node_graph() + graph.add_edge(1, 1) + graph.add_edge(2, 2) + clique, weight = nx.algorithms.max_weight_clique(graph, "weight") + assert verify_clique(graph, clique, weight, 30, "weight") + graph = three_node_independent_set() + graph.add_edge(1, 1) + clique, weight = nx.algorithms.max_weight_clique(graph, "weight") + assert verify_clique(graph, clique, weight, 20, "weight") + + def test_30_node_prob(self): + G = nx.Graph() + G.add_nodes_from(range(1, 31)) + for i in range(1, 31): + G.nodes[i]["weight"] = i + 1 + # fmt: off + G.add_edges_from( + [ + (1, 12), (1, 13), (1, 15), (1, 16), (1, 18), (1, 19), (1, 20), + (1, 23), (1, 26), (1, 28), (1, 29), (1, 30), (2, 3), (2, 4), + (2, 5), (2, 8), (2, 9), (2, 10), (2, 14), (2, 17), (2, 18), + (2, 21), (2, 22), (2, 23), (2, 27), (3, 9), (3, 15), (3, 21), + (3, 22), (3, 23), (3, 24), (3, 27), (3, 28), (3, 29), (4, 5), + (4, 6), (4, 8), (4, 21), (4, 22), (4, 23), (4, 26), (4, 28), + (4, 30), (5, 6), (5, 8), (5, 9), (5, 13), (5, 14), (5, 15), + (5, 16), (5, 20), (5, 21), (5, 22), (5, 25), (5, 28), (5, 29), + (6, 7), (6, 8), (6, 13), (6, 17), (6, 18), (6, 19), (6, 24), + (6, 26), (6, 27), (6, 28), (6, 29), (7, 12), (7, 14), (7, 15), + (7, 16), (7, 17), (7, 20), (7, 25), (7, 27), (7, 29), (7, 30), + (8, 10), (8, 15), (8, 16), (8, 18), (8, 20), (8, 22), (8, 24), + (8, 26), (8, 27), (8, 28), (8, 30), (9, 11), (9, 12), (9, 13), + (9, 14), (9, 15), (9, 16), (9, 19), (9, 20), (9, 21), (9, 24), + (9, 30), (10, 12), (10, 15), (10, 18), (10, 19), (10, 20), + (10, 22), (10, 23), (10, 24), (10, 26), (10, 27), (10, 29), + (10, 30), (11, 13), (11, 15), (11, 16), (11, 17), (11, 18), + (11, 19), (11, 20), (11, 22), (11, 29), (11, 30), (12, 14), + (12, 17), (12, 18), (12, 19), (12, 20), (12, 21), (12, 23), + (12, 25), (12, 26), (12, 30), (13, 20), (13, 22), (13, 23), + (13, 24), (13, 30), (14, 16), (14, 20), (14, 21), (14, 22), + (14, 23), (14, 25), (14, 26), (14, 27), (14, 29), (14, 30), + (15, 17), (15, 18), (15, 20), (15, 21), (15, 26), (15, 27), + (15, 28), (16, 17), (16, 18), (16, 19), (16, 20), (16, 21), + (16, 29), (16, 30), (17, 18), (17, 21), (17, 22), (17, 25), + (17, 27), (17, 28), (17, 30), (18, 19), (18, 20), (18, 21), + (18, 22), (18, 23), (18, 24), (19, 20), (19, 22), (19, 23), + (19, 24), (19, 25), (19, 27), (19, 30), (20, 21), (20, 23), + (20, 24), (20, 26), (20, 28), (20, 29), (21, 23), (21, 26), + (21, 27), (21, 29), (22, 24), (22, 25), (22, 26), (22, 29), + (23, 25), (23, 30), (24, 25), (24, 26), (25, 27), (25, 29), + (26, 27), (26, 28), (26, 30), (28, 29), (29, 30), + ] + ) + # fmt: on + clique, weight = nx.algorithms.max_weight_clique(G) + assert verify_clique(G, clique, weight, 111, "weight") + + +# ############################ Utility functions ############################ +def verify_clique( + graph, clique, reported_clique_weight, expected_clique_weight, weight_accessor +): + for node1 in clique: + for node2 in clique: + if node1 == node2: + continue + if not graph.has_edge(node1, node2): + return False + + if weight_accessor is None: + clique_weight = len(clique) + else: + clique_weight = sum(graph.nodes[v]["weight"] for v in clique) + + if clique_weight != expected_clique_weight: + return False + if clique_weight != reported_clique_weight: + return False + + return True + + +# ############################ Graph Generation ############################ + + +def empty_graph(): + return nx.Graph() + + +def one_node_graph(): + graph = nx.Graph() + graph.add_nodes_from([1]) + graph.nodes[1]["weight"] = 10 + return graph + + +def two_node_graph(): + graph = nx.Graph() + graph.add_nodes_from([1, 2]) + graph.add_edges_from([(1, 2)]) + graph.nodes[1]["weight"] = 10 + graph.nodes[2]["weight"] = 20 + return graph + + +def three_node_clique(): + graph = nx.Graph() + graph.add_nodes_from([1, 2, 3]) + graph.add_edges_from([(1, 2), (1, 3), (2, 3)]) + graph.nodes[1]["weight"] = 10 + graph.nodes[2]["weight"] = 20 + graph.nodes[3]["weight"] = 5 + return graph + + +def three_node_independent_set(): + graph = nx.Graph() + graph.add_nodes_from([1, 2, 3]) + graph.nodes[1]["weight"] = 10 + graph.nodes[2]["weight"] = 20 + graph.nodes[3]["weight"] = 5 + return graph + + +def disconnected(): + graph = nx.Graph() + graph.add_edges_from([(1, 2), (2, 3), (4, 5), (5, 6)]) + graph.nodes[1]["weight"] = 10 + graph.nodes[2]["weight"] = 20 + graph.nodes[3]["weight"] = 5 + graph.nodes[4]["weight"] = 100 + graph.nodes[5]["weight"] = 200 + graph.nodes[6]["weight"] = 50 + return graph + + +# -------------------------------------------------------------------------- +# Basic tests for all strategies +# For each basic graph function, specify expected weight of max weight clique +# and expected size of maximum clique +TEST_CASES = { + empty_graph: (0, 0), + one_node_graph: (10, 1), + two_node_graph: (30, 2), + three_node_clique: (35, 3), + three_node_independent_set: (20, 1), + disconnected: (300, 2), +} diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_mis.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_mis.py new file mode 100644 index 0000000..02be02d --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_mis.py @@ -0,0 +1,62 @@ +""" +Tests for maximal (not maximum) independent sets. + +""" + +import random + +import pytest + +import networkx as nx + + +def test_random_seed(): + G = nx.empty_graph(5) + assert nx.maximal_independent_set(G, seed=1) == [1, 0, 3, 2, 4] + + +@pytest.mark.parametrize("graph", [nx.complete_graph(5), nx.complete_graph(55)]) +def test_K5(graph): + """Maximal independent set for complete graphs""" + assert all(nx.maximal_independent_set(graph, [n]) == [n] for n in graph) + + +def test_exceptions(): + """Bad input should raise exception.""" + G = nx.florentine_families_graph() + pytest.raises(nx.NetworkXUnfeasible, nx.maximal_independent_set, G, ["Smith"]) + pytest.raises( + nx.NetworkXUnfeasible, nx.maximal_independent_set, G, ["Salviati", "Pazzi"] + ) + # MaximalIndependentSet is not implemented for directed graphs + pytest.raises(nx.NetworkXNotImplemented, nx.maximal_independent_set, nx.DiGraph(G)) + + +def test_florentine_family(): + G = nx.florentine_families_graph() + indep = nx.maximal_independent_set(G, ["Medici", "Bischeri"]) + assert set(indep) == { + "Medici", + "Bischeri", + "Castellani", + "Pazzi", + "Ginori", + "Lamberteschi", + } + + +def test_bipartite(): + G = nx.complete_bipartite_graph(12, 34) + indep = nx.maximal_independent_set(G, [4, 5, 9, 10]) + assert sorted(indep) == list(range(12)) + + +def test_random_graphs(): + """Generate 5 random graphs of different types and sizes and + make sure that all sets are independent and maximal.""" + for i in range(0, 50, 10): + G = nx.erdos_renyi_graph(i * 10 + 1, random.random()) + IS = nx.maximal_independent_set(G) + assert G.subgraph(IS).number_of_edges() == 0 + nbrs_of_MIS = set.union(*(set(G.neighbors(v)) for v in IS)) + assert all(v in nbrs_of_MIS for v in set(G.nodes()).difference(IS)) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_moral.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_moral.py new file mode 100644 index 0000000..fc98c97 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_moral.py @@ -0,0 +1,15 @@ +import networkx as nx +from networkx.algorithms.moral import moral_graph + + +def test_get_moral_graph(): + graph = nx.DiGraph() + graph.add_nodes_from([1, 2, 3, 4, 5, 6, 7]) + graph.add_edges_from([(1, 2), (3, 2), (4, 1), (4, 5), (6, 5), (7, 5)]) + H = moral_graph(graph) + assert not H.is_directed() + assert H.has_edge(1, 3) + assert H.has_edge(4, 6) + assert H.has_edge(6, 7) + assert H.has_edge(4, 7) + assert not H.has_edge(1, 5) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_node_classification.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_node_classification.py new file mode 100644 index 0000000..2e1fc79 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_node_classification.py @@ -0,0 +1,140 @@ +import pytest + +pytest.importorskip("numpy") +pytest.importorskip("scipy") + +import networkx as nx +from networkx.algorithms import node_classification + + +class TestHarmonicFunction: + def test_path_graph(self): + G = nx.path_graph(4) + label_name = "label" + G.nodes[0][label_name] = "A" + G.nodes[3][label_name] = "B" + predicted = node_classification.harmonic_function(G, label_name=label_name) + assert predicted[0] == "A" + assert predicted[1] == "A" + assert predicted[2] == "B" + assert predicted[3] == "B" + + def test_no_labels(self): + with pytest.raises(nx.NetworkXError): + G = nx.path_graph(4) + node_classification.harmonic_function(G) + + def test_no_nodes(self): + with pytest.raises(nx.NetworkXError): + G = nx.Graph() + node_classification.harmonic_function(G) + + def test_no_edges(self): + with pytest.raises(nx.NetworkXError): + G = nx.Graph() + G.add_node(1) + G.add_node(2) + node_classification.harmonic_function(G) + + def test_digraph(self): + with pytest.raises(nx.NetworkXNotImplemented): + G = nx.DiGraph() + G.add_edge(0, 1) + G.add_edge(1, 2) + G.add_edge(2, 3) + label_name = "label" + G.nodes[0][label_name] = "A" + G.nodes[3][label_name] = "B" + node_classification.harmonic_function(G) + + def test_one_labeled_node(self): + G = nx.path_graph(4) + label_name = "label" + G.nodes[0][label_name] = "A" + predicted = node_classification.harmonic_function(G, label_name=label_name) + assert predicted[0] == "A" + assert predicted[1] == "A" + assert predicted[2] == "A" + assert predicted[3] == "A" + + def test_nodes_all_labeled(self): + G = nx.karate_club_graph() + label_name = "club" + predicted = node_classification.harmonic_function(G, label_name=label_name) + for i in range(len(G)): + assert predicted[i] == G.nodes[i][label_name] + + def test_labeled_nodes_are_not_changed(self): + G = nx.karate_club_graph() + label_name = "club" + label_removed = {0, 1, 2, 3, 4, 5, 6, 7} + for i in label_removed: + del G.nodes[i][label_name] + predicted = node_classification.harmonic_function(G, label_name=label_name) + label_not_removed = set(range(len(G))) - label_removed + for i in label_not_removed: + assert predicted[i] == G.nodes[i][label_name] + + +class TestLocalAndGlobalConsistency: + def test_path_graph(self): + G = nx.path_graph(4) + label_name = "label" + G.nodes[0][label_name] = "A" + G.nodes[3][label_name] = "B" + predicted = node_classification.local_and_global_consistency( + G, label_name=label_name + ) + assert predicted[0] == "A" + assert predicted[1] == "A" + assert predicted[2] == "B" + assert predicted[3] == "B" + + def test_no_labels(self): + with pytest.raises(nx.NetworkXError): + G = nx.path_graph(4) + node_classification.local_and_global_consistency(G) + + def test_no_nodes(self): + with pytest.raises(nx.NetworkXError): + G = nx.Graph() + node_classification.local_and_global_consistency(G) + + def test_no_edges(self): + with pytest.raises(nx.NetworkXError): + G = nx.Graph() + G.add_node(1) + G.add_node(2) + node_classification.local_and_global_consistency(G) + + def test_digraph(self): + with pytest.raises(nx.NetworkXNotImplemented): + G = nx.DiGraph() + G.add_edge(0, 1) + G.add_edge(1, 2) + G.add_edge(2, 3) + label_name = "label" + G.nodes[0][label_name] = "A" + G.nodes[3][label_name] = "B" + node_classification.harmonic_function(G) + + def test_one_labeled_node(self): + G = nx.path_graph(4) + label_name = "label" + G.nodes[0][label_name] = "A" + predicted = node_classification.local_and_global_consistency( + G, label_name=label_name + ) + assert predicted[0] == "A" + assert predicted[1] == "A" + assert predicted[2] == "A" + assert predicted[3] == "A" + + def test_nodes_all_labeled(self): + G = nx.karate_club_graph() + label_name = "club" + predicted = node_classification.local_and_global_consistency( + G, alpha=0, label_name=label_name + ) + for i in range(len(G)): + assert predicted[i] == G.nodes[i][label_name] diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_non_randomness.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_non_randomness.py new file mode 100644 index 0000000..2f495be --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_non_randomness.py @@ -0,0 +1,42 @@ +import pytest + +import networkx as nx + +np = pytest.importorskip("numpy") + + +@pytest.mark.parametrize( + "k, weight, expected", + [ + (None, None, 7.21), # infers 3 communities + (2, None, 11.7), + (None, "weight", 25.45), + (2, "weight", 38.8), + ], +) +def test_non_randomness(k, weight, expected): + G = nx.karate_club_graph() + np.testing.assert_almost_equal( + nx.non_randomness(G, k, weight)[0], expected, decimal=2 + ) + + +def test_non_connected(): + G = nx.Graph([(1, 2)]) + G.add_node(3) + with pytest.raises(nx.NetworkXException, match="Non connected"): + nx.non_randomness(G) + + +def test_self_loops(): + G = nx.Graph() + G.add_edge(1, 2) + G.add_edge(1, 1) + with pytest.raises(nx.NetworkXError, match="Graph must not contain self-loops"): + nx.non_randomness(G) + + +def test_empty_graph(): + G = nx.empty_graph(1) + with pytest.raises(nx.NetworkXError, match=".*not applicable to empty graphs"): + nx.non_randomness(G) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_planar_drawing.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_planar_drawing.py new file mode 100644 index 0000000..b59d5c1 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_planar_drawing.py @@ -0,0 +1,274 @@ +import math + +import pytest + +import networkx as nx +from networkx.algorithms.planar_drawing import triangulate_embedding + + +def test_graph1(): + embedding_data = {0: [1, 2, 3], 1: [2, 0], 2: [3, 0, 1], 3: [2, 0]} + check_embedding_data(embedding_data) + + +def test_graph2(): + embedding_data = { + 0: [8, 6], + 1: [2, 6, 9], + 2: [8, 1, 7, 9, 6, 4], + 3: [9], + 4: [2], + 5: [6, 8], + 6: [9, 1, 0, 5, 2], + 7: [9, 2], + 8: [0, 2, 5], + 9: [1, 6, 2, 7, 3], + } + check_embedding_data(embedding_data) + + +def test_circle_graph(): + embedding_data = { + 0: [1, 9], + 1: [0, 2], + 2: [1, 3], + 3: [2, 4], + 4: [3, 5], + 5: [4, 6], + 6: [5, 7], + 7: [6, 8], + 8: [7, 9], + 9: [8, 0], + } + check_embedding_data(embedding_data) + + +def test_grid_graph(): + embedding_data = { + (0, 1): [(0, 0), (1, 1), (0, 2)], + (1, 2): [(1, 1), (2, 2), (0, 2)], + (0, 0): [(0, 1), (1, 0)], + (2, 1): [(2, 0), (2, 2), (1, 1)], + (1, 1): [(2, 1), (1, 2), (0, 1), (1, 0)], + (2, 0): [(1, 0), (2, 1)], + (2, 2): [(1, 2), (2, 1)], + (1, 0): [(0, 0), (2, 0), (1, 1)], + (0, 2): [(1, 2), (0, 1)], + } + check_embedding_data(embedding_data) + + +def test_one_node_graph(): + embedding_data = {0: []} + check_embedding_data(embedding_data) + + +def test_two_node_graph(): + embedding_data = {0: [1], 1: [0]} + check_embedding_data(embedding_data) + + +def test_three_node_graph(): + embedding_data = {0: [1, 2], 1: [0, 2], 2: [0, 1]} + check_embedding_data(embedding_data) + + +def test_multiple_component_graph1(): + embedding_data = {0: [], 1: []} + check_embedding_data(embedding_data) + + +def test_multiple_component_graph2(): + embedding_data = {0: [1, 2], 1: [0, 2], 2: [0, 1], 3: [4, 5], 4: [3, 5], 5: [3, 4]} + check_embedding_data(embedding_data) + + +def test_invalid_half_edge(): + with pytest.raises(nx.NetworkXException): + embedding_data = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2, 4], 4: [1, 2, 3]} + embedding = nx.PlanarEmbedding() + embedding.set_data(embedding_data) + nx.combinatorial_embedding_to_pos(embedding) + + +def test_triangulate_embedding1(): + embedding = nx.PlanarEmbedding() + embedding.add_node(1) + expected_embedding = {1: []} + check_triangulation(embedding, expected_embedding) + + +def test_triangulate_embedding2(): + embedding = nx.PlanarEmbedding() + embedding.connect_components(1, 2) + expected_embedding = {1: [2], 2: [1]} + check_triangulation(embedding, expected_embedding) + + +def check_triangulation(embedding, expected_embedding): + res_embedding, _ = triangulate_embedding(embedding, True) + assert res_embedding.get_data() == expected_embedding, ( + "Expected embedding incorrect" + ) + res_embedding, _ = triangulate_embedding(embedding, False) + assert res_embedding.get_data() == expected_embedding, ( + "Expected embedding incorrect" + ) + + +def check_embedding_data(embedding_data): + """Checks that the planar embedding of the input is correct""" + embedding = nx.PlanarEmbedding() + embedding.set_data(embedding_data) + pos_fully = nx.combinatorial_embedding_to_pos(embedding, False) + msg = "Planar drawing does not conform to the embedding (fully triangulation)" + assert planar_drawing_conforms_to_embedding(embedding, pos_fully), msg + check_edge_intersections(embedding, pos_fully) + pos_internally = nx.combinatorial_embedding_to_pos(embedding, True) + msg = "Planar drawing does not conform to the embedding (internal triangulation)" + assert planar_drawing_conforms_to_embedding(embedding, pos_internally), msg + check_edge_intersections(embedding, pos_internally) + + +def is_close(a, b, rel_tol=1e-09, abs_tol=0.0): + # Check if float numbers are basically equal, for python >=3.5 there is + # function for that in the standard library + return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol) + + +def point_in_between(a, b, p): + # checks if p is on the line between a and b + x1, y1 = a + x2, y2 = b + px, py = p + dist_1_2 = math.sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2) + dist_1_p = math.sqrt((x1 - px) ** 2 + (y1 - py) ** 2) + dist_2_p = math.sqrt((x2 - px) ** 2 + (y2 - py) ** 2) + return is_close(dist_1_p + dist_2_p, dist_1_2) + + +def check_edge_intersections(G, pos): + """Check all edges in G for intersections. + + Raises an exception if an intersection is found. + + Parameters + ---------- + G : NetworkX graph + pos : dict + Maps every node to a tuple (x, y) representing its position + + """ + for a, b in G.edges(): + for c, d in G.edges(): + # Check if end points are different + if a != c and b != d and b != c and a != d: + x1, y1 = pos[a] + x2, y2 = pos[b] + x3, y3 = pos[c] + x4, y4 = pos[d] + determinant = (x1 - x2) * (y3 - y4) - (y1 - y2) * (x3 - x4) + if determinant != 0: # the lines are not parallel + # calculate intersection point, see: + # https://en.wikipedia.org/wiki/Line%E2%80%93line_intersection + px = (x1 * y2 - y1 * x2) * (x3 - x4) - (x1 - x2) * ( + x3 * y4 - y3 * x4 + ) / determinant + py = (x1 * y2 - y1 * x2) * (y3 - y4) - (y1 - y2) * ( + x3 * y4 - y3 * x4 + ) / determinant + + # Check if intersection lies between the points + if point_in_between(pos[a], pos[b], (px, py)) and point_in_between( + pos[c], pos[d], (px, py) + ): + msg = f"There is an intersection at {px},{py}" + raise nx.NetworkXException(msg) + + # Check overlap + msg = "A node lies on a edge connecting two other nodes" + if ( + point_in_between(pos[a], pos[b], pos[c]) + or point_in_between(pos[a], pos[b], pos[d]) + or point_in_between(pos[c], pos[d], pos[a]) + or point_in_between(pos[c], pos[d], pos[b]) + ): + raise nx.NetworkXException(msg) + # No edge intersection found + + +class Vector: + """Compare vectors by their angle without loss of precision + + All vectors in direction [0, 1] are the smallest. + The vectors grow in clockwise direction. + """ + + __slots__ = ["x", "y", "node", "quadrant"] + + def __init__(self, x, y, node): + self.x = x + self.y = y + self.node = node + if self.x >= 0 and self.y > 0: + self.quadrant = 1 + elif self.x > 0 and self.y <= 0: + self.quadrant = 2 + elif self.x <= 0 and self.y < 0: + self.quadrant = 3 + else: + self.quadrant = 4 + + def __eq__(self, other): + return self.quadrant == other.quadrant and self.x * other.y == self.y * other.x + + def __lt__(self, other): + if self.quadrant < other.quadrant: + return True + elif self.quadrant > other.quadrant: + return False + else: + return self.x * other.y < self.y * other.x + + def __ne__(self, other): + return self != other + + def __le__(self, other): + return not other < self + + def __gt__(self, other): + return other < self + + def __ge__(self, other): + return not self < other + + +def planar_drawing_conforms_to_embedding(embedding, pos): + """Checks if pos conforms to the planar embedding + + Returns true iff the neighbors are actually oriented in the orientation + specified of the embedding + """ + for v in embedding: + nbr_vectors = [] + v_pos = pos[v] + for nbr in embedding[v]: + new_vector = Vector(pos[nbr][0] - v_pos[0], pos[nbr][1] - v_pos[1], nbr) + nbr_vectors.append(new_vector) + # Sort neighbors according to their phi angle + nbr_vectors.sort() + for idx, nbr_vector in enumerate(nbr_vectors): + cw_vector = nbr_vectors[(idx + 1) % len(nbr_vectors)] + ccw_vector = nbr_vectors[idx - 1] + if ( + embedding[v][nbr_vector.node]["cw"] != cw_vector.node + or embedding[v][nbr_vector.node]["ccw"] != ccw_vector.node + ): + return False + if cw_vector.node != nbr_vector.node and cw_vector == nbr_vector: + # Lines overlap + return False + if ccw_vector.node != nbr_vector.node and ccw_vector == nbr_vector: + # Lines overlap + return False + return True diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_planarity.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_planarity.py new file mode 100644 index 0000000..af2d9a9 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_planarity.py @@ -0,0 +1,556 @@ +import pytest + +import networkx as nx +from networkx.algorithms.planarity import ( + check_planarity_recursive, + get_counterexample, + get_counterexample_recursive, +) + + +class TestLRPlanarity: + """Nose Unit tests for the :mod:`networkx.algorithms.planarity` module. + + Tests three things: + 1. Check that the result is correct + (returns planar if and only if the graph is actually planar) + 2. In case a counter example is returned: Check if it is correct + 3. In case an embedding is returned: Check if its actually an embedding + """ + + @staticmethod + def check_graph(G, is_planar=None): + """Raises an exception if the lr_planarity check returns a wrong result + + Parameters + ---------- + G : NetworkX graph + is_planar : bool + The expected result of the planarity check. + If set to None only counter example or embedding are verified. + + """ + + # obtain results of planarity check + is_planar_lr, result = nx.check_planarity(G, True) + is_planar_lr_rec, result_rec = check_planarity_recursive(G, True) + + if is_planar is not None: + # set a message for the assert + if is_planar: + msg = "Wrong planarity check result. Should be planar." + else: + msg = "Wrong planarity check result. Should be non-planar." + + # check if the result is as expected + assert is_planar == is_planar_lr, msg + assert is_planar == is_planar_lr_rec, msg + + if is_planar_lr: + # check embedding + check_embedding(G, result) + check_embedding(G, result_rec) + else: + # check counter example + check_counterexample(G, result) + check_counterexample(G, result_rec) + + def test_simple_planar_graph(self): + e = [ + (1, 2), + (2, 3), + (3, 4), + (4, 6), + (6, 7), + (7, 1), + (1, 5), + (5, 2), + (2, 4), + (4, 5), + (5, 7), + ] + self.check_graph(nx.Graph(e), is_planar=True) + + def test_planar_with_selfloop(self): + e = [ + (1, 1), + (2, 2), + (3, 3), + (4, 4), + (5, 5), + (1, 2), + (1, 3), + (1, 5), + (2, 5), + (2, 4), + (3, 4), + (3, 5), + (4, 5), + ] + self.check_graph(nx.Graph(e), is_planar=True) + + def test_k3_3(self): + self.check_graph(nx.complete_bipartite_graph(3, 3), is_planar=False) + + def test_k5(self): + self.check_graph(nx.complete_graph(5), is_planar=False) + + def test_multiple_components_planar(self): + e = [(1, 2), (2, 3), (3, 1), (4, 5), (5, 6), (6, 4)] + self.check_graph(nx.Graph(e), is_planar=True) + + def test_multiple_components_non_planar(self): + G = nx.complete_graph(5) + # add another planar component to the non planar component + # G stays non planar + G.add_edges_from([(6, 7), (7, 8), (8, 6)]) + self.check_graph(G, is_planar=False) + + def test_non_planar_with_selfloop(self): + G = nx.complete_graph(5) + # add self loops + for i in range(5): + G.add_edge(i, i) + self.check_graph(G, is_planar=False) + + def test_non_planar1(self): + # tests a graph that has no subgraph directly isomorph to K5 or K3_3 + e = [ + (1, 5), + (1, 6), + (1, 7), + (2, 6), + (2, 3), + (3, 5), + (3, 7), + (4, 5), + (4, 6), + (4, 7), + ] + self.check_graph(nx.Graph(e), is_planar=False) + + def test_loop(self): + # test a graph with a selfloop + e = [(1, 2), (2, 2)] + G = nx.Graph(e) + self.check_graph(G, is_planar=True) + + def test_comp(self): + # test multiple component graph + e = [(1, 2), (3, 4)] + G = nx.Graph(e) + G.remove_edge(1, 2) + self.check_graph(G, is_planar=True) + + def test_goldner_harary(self): + # test goldner-harary graph (a maximal planar graph) + e = [ + (1, 2), + (1, 3), + (1, 4), + (1, 5), + (1, 7), + (1, 8), + (1, 10), + (1, 11), + (2, 3), + (2, 4), + (2, 6), + (2, 7), + (2, 9), + (2, 10), + (2, 11), + (3, 4), + (4, 5), + (4, 6), + (4, 7), + (5, 7), + (6, 7), + (7, 8), + (7, 9), + (7, 10), + (8, 10), + (9, 10), + (10, 11), + ] + G = nx.Graph(e) + self.check_graph(G, is_planar=True) + + def test_planar_multigraph(self): + G = nx.MultiGraph([(1, 2), (1, 2), (1, 2), (1, 2), (2, 3), (3, 1)]) + self.check_graph(G, is_planar=True) + + def test_non_planar_multigraph(self): + G = nx.MultiGraph(nx.complete_graph(5)) + G.add_edges_from([(1, 2)] * 5) + self.check_graph(G, is_planar=False) + + def test_planar_digraph(self): + G = nx.DiGraph([(1, 2), (2, 3), (2, 4), (4, 1), (4, 2), (1, 4), (3, 2)]) + self.check_graph(G, is_planar=True) + + def test_non_planar_digraph(self): + G = nx.DiGraph(nx.complete_graph(5)) + G.remove_edge(1, 2) + G.remove_edge(4, 1) + self.check_graph(G, is_planar=False) + + def test_single_component(self): + # Test a graph with only a single node + G = nx.Graph() + G.add_node(1) + self.check_graph(G, is_planar=True) + + def test_graph1(self): + G = nx.Graph( + [ + (3, 10), + (2, 13), + (1, 13), + (7, 11), + (0, 8), + (8, 13), + (0, 2), + (0, 7), + (0, 10), + (1, 7), + ] + ) + self.check_graph(G, is_planar=True) + + def test_graph2(self): + G = nx.Graph( + [ + (1, 2), + (4, 13), + (0, 13), + (4, 5), + (7, 10), + (1, 7), + (0, 3), + (2, 6), + (5, 6), + (7, 13), + (4, 8), + (0, 8), + (0, 9), + (2, 13), + (6, 7), + (3, 6), + (2, 8), + ] + ) + self.check_graph(G, is_planar=False) + + def test_graph3(self): + G = nx.Graph( + [ + (0, 7), + (3, 11), + (3, 4), + (8, 9), + (4, 11), + (1, 7), + (1, 13), + (1, 11), + (3, 5), + (5, 7), + (1, 3), + (0, 4), + (5, 11), + (5, 13), + ] + ) + self.check_graph(G, is_planar=False) + + def test_counterexample_planar(self): + with pytest.raises(nx.NetworkXException): + # Try to get a counterexample of a planar graph + G = nx.Graph() + G.add_node(1) + get_counterexample(G) + + def test_counterexample_planar_recursive(self): + with pytest.raises(nx.NetworkXException): + # Try to get a counterexample of a planar graph + G = nx.Graph() + G.add_node(1) + get_counterexample_recursive(G) + + def test_edge_removal_from_planar_embedding(self): + # PlanarEmbedding.check_structure() must succeed after edge removal + edges = ((0, 1), (1, 2), (2, 3), (3, 4), (4, 0), (0, 2), (0, 3)) + G = nx.Graph(edges) + cert, P = nx.check_planarity(G) + assert cert is True + P.remove_edge(0, 2) + self.check_graph(P, is_planar=True) + P.add_half_edge_ccw(1, 3, 2) + P.add_half_edge_cw(3, 1, 2) + self.check_graph(P, is_planar=True) + P.remove_edges_from(((0, 3), (1, 3))) + self.check_graph(P, is_planar=True) + + @pytest.mark.parametrize("graph_type", (nx.Graph, nx.MultiGraph)) + def test_graph_planar_embedding_to_undirected(self, graph_type): + G = graph_type([(0, 1), (0, 1), (1, 2), (2, 3), (3, 0), (0, 2)]) + is_planar, P = nx.check_planarity(G) + assert is_planar + U = P.to_undirected() + assert isinstance(U, nx.Graph) + assert all((d == {} for _, _, d in U.edges(data=True))) + + @pytest.mark.parametrize( + "reciprocal, as_view", [(True, True), (True, False), (False, True)] + ) + def test_planar_embedding_to_undirected_invalid_parameters( + self, reciprocal, as_view + ): + G = nx.Graph([(0, 1), (1, 2), (2, 3), (3, 0), (0, 2)]) + is_planar, P = nx.check_planarity(G) + assert is_planar + with pytest.raises(ValueError, match="is not supported for PlanarEmbedding."): + P.to_undirected(reciprocal=reciprocal, as_view=as_view) + + +def check_embedding(G, embedding): + """Raises an exception if the combinatorial embedding is not correct + + Parameters + ---------- + G : NetworkX graph + embedding : a dict mapping nodes to a list of edges + This specifies the ordering of the outgoing edges from a node for + a combinatorial embedding + + Notes + ----- + Checks the following things: + - The type of the embedding is correct + - The nodes and edges match the original graph + - Every half edge has its matching opposite half edge + - No intersections of edges (checked by Euler's formula) + """ + + if not isinstance(embedding, nx.PlanarEmbedding): + raise nx.NetworkXException("Bad embedding. Not of type nx.PlanarEmbedding") + + # Check structure + embedding.check_structure() + + # Check that graphs are equivalent + + assert set(G.nodes) == set(embedding.nodes), ( + "Bad embedding. Nodes don't match the original graph." + ) + + # Check that the edges are equal + g_edges = set() + for edge in G.edges: + if edge[0] != edge[1]: + g_edges.add((edge[0], edge[1])) + g_edges.add((edge[1], edge[0])) + assert g_edges == set(embedding.edges), ( + "Bad embedding. Edges don't match the original graph." + ) + + +def check_counterexample(G, sub_graph): + """Raises an exception if the counterexample is wrong. + + Parameters + ---------- + G : NetworkX graph + subdivision_nodes : set + A set of nodes inducing a subgraph as a counterexample + """ + # 1. Create the sub graph + sub_graph = nx.Graph(sub_graph) + + # 2. Remove self loops + for u in sub_graph: + if sub_graph.has_edge(u, u): + sub_graph.remove_edge(u, u) + + # keep track of nodes we might need to contract + contract = list(sub_graph) + + # 3. Contract Edges + while len(contract) > 0: + contract_node = contract.pop() + if contract_node not in sub_graph: + # Node was already contracted + continue + degree = sub_graph.degree[contract_node] + # Check if we can remove the node + if degree == 2: + # Get the two neighbors + neighbors = iter(sub_graph[contract_node]) + u = next(neighbors) + v = next(neighbors) + # Save nodes for later + contract.append(u) + contract.append(v) + # Contract edge + sub_graph.remove_node(contract_node) + sub_graph.add_edge(u, v) + + # 4. Check for isomorphism with K5 or K3_3 graphs + if len(sub_graph) == 5: + if not nx.is_isomorphic(nx.complete_graph(5), sub_graph): + raise nx.NetworkXException("Bad counter example.") + elif len(sub_graph) == 6: + if not nx.is_isomorphic(nx.complete_bipartite_graph(3, 3), sub_graph): + raise nx.NetworkXException("Bad counter example.") + else: + raise nx.NetworkXException("Bad counter example.") + + +class TestPlanarEmbeddingClass: + def test_add_half_edge(self): + embedding = nx.PlanarEmbedding() + embedding.add_half_edge(0, 1) + with pytest.raises( + nx.NetworkXException, match="Invalid clockwise reference node." + ): + embedding.add_half_edge(0, 2, cw=3) + with pytest.raises( + nx.NetworkXException, match="Invalid counterclockwise reference node." + ): + embedding.add_half_edge(0, 2, ccw=3) + with pytest.raises( + nx.NetworkXException, match="Only one of cw/ccw can be specified." + ): + embedding.add_half_edge(0, 2, cw=1, ccw=1) + with pytest.raises( + nx.NetworkXException, + match=( + r"Node already has out-half-edge\(s\), either" + " cw or ccw reference node required." + ), + ): + embedding.add_half_edge(0, 2) + # these should work + embedding.add_half_edge(0, 2, cw=1) + embedding.add_half_edge(0, 3, ccw=1) + assert sorted(embedding.edges(data=True)) == [ + (0, 1, {"ccw": 2, "cw": 3}), + (0, 2, {"cw": 1, "ccw": 3}), + (0, 3, {"cw": 2, "ccw": 1}), + ] + + def test_get_data(self): + embedding = self.get_star_embedding(4) + data = embedding.get_data() + data_cmp = {0: [3, 2, 1], 1: [0], 2: [0], 3: [0]} + assert data == data_cmp + + def test_edge_removal(self): + embedding = nx.PlanarEmbedding() + embedding.set_data( + { + 1: [2, 5, 7], + 2: [1, 3, 4, 5], + 3: [2, 4], + 4: [3, 6, 5, 2], + 5: [7, 1, 2, 4], + 6: [4, 7], + 7: [6, 1, 5], + } + ) + # remove_edges_from() calls remove_edge(), so both are tested here + embedding.remove_edges_from(((5, 4), (1, 5))) + embedding.check_structure() + embedding_expected = nx.PlanarEmbedding() + embedding_expected.set_data( + { + 1: [2, 7], + 2: [1, 3, 4, 5], + 3: [2, 4], + 4: [3, 6, 2], + 5: [7, 2], + 6: [4, 7], + 7: [6, 1, 5], + } + ) + assert nx.utils.graphs_equal(embedding, embedding_expected) + + def test_missing_edge_orientation(self): + embedding = nx.PlanarEmbedding({1: {2: {}}, 2: {1: {}}}) + with pytest.raises(nx.NetworkXException): + # Invalid structure because the orientation of the edge was not set + embedding.check_structure() + + def test_invalid_edge_orientation(self): + embedding = nx.PlanarEmbedding( + { + 1: {2: {"cw": 2, "ccw": 2}}, + 2: {1: {"cw": 1, "ccw": 1}}, + 1: {3: {}}, + 3: {1: {}}, + } + ) + with pytest.raises(nx.NetworkXException): + embedding.check_structure() + + def test_missing_half_edge(self): + embedding = nx.PlanarEmbedding() + embedding.add_half_edge(1, 2) + with pytest.raises(nx.NetworkXException): + # Invalid structure because other half edge is missing + embedding.check_structure() + + def test_not_fulfilling_euler_formula(self): + embedding = nx.PlanarEmbedding() + for i in range(5): + ref = None + for j in range(5): + if i != j: + embedding.add_half_edge(i, j, cw=ref) + ref = j + with pytest.raises(nx.NetworkXException): + embedding.check_structure() + + def test_missing_reference(self): + embedding = nx.PlanarEmbedding() + with pytest.raises(nx.NetworkXException, match="Invalid reference node."): + embedding.add_half_edge(1, 2, ccw=3) + + def test_connect_components(self): + embedding = nx.PlanarEmbedding() + embedding.connect_components(1, 2) + + def test_successful_face_traversal(self): + embedding = nx.PlanarEmbedding() + embedding.add_half_edge(1, 2) + embedding.add_half_edge(2, 1) + face = embedding.traverse_face(1, 2) + assert face == [1, 2] + + def test_unsuccessful_face_traversal(self): + embedding = nx.PlanarEmbedding( + {1: {2: {"cw": 3, "ccw": 2}}, 2: {1: {"cw": 3, "ccw": 1}}} + ) + with pytest.raises(nx.NetworkXException): + embedding.traverse_face(1, 2) + + def test_forbidden_methods(self): + embedding = nx.PlanarEmbedding() + embedding.add_node(42) # no exception + embedding.add_nodes_from([(23, 24)]) # no exception + with pytest.raises(NotImplementedError): + embedding.add_edge(1, 3) + with pytest.raises(NotImplementedError): + embedding.add_edges_from([(0, 2), (1, 4)]) + with pytest.raises(NotImplementedError): + embedding.add_weighted_edges_from([(0, 2, 350), (1, 4, 125)]) + + @staticmethod + def get_star_embedding(n): + embedding = nx.PlanarEmbedding() + ref = None + for i in range(1, n): + embedding.add_half_edge(0, i, cw=ref) + ref = i + embedding.add_half_edge(i, 0) + return embedding diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_polynomials.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_polynomials.py new file mode 100644 index 0000000..a81d6a6 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_polynomials.py @@ -0,0 +1,57 @@ +"""Unit tests for the :mod:`networkx.algorithms.polynomials` module.""" + +import pytest + +import networkx as nx + +sympy = pytest.importorskip("sympy") + + +# Mapping of input graphs to a string representation of their tutte polynomials +_test_tutte_graphs = { + nx.complete_graph(1): "1", + nx.complete_graph(4): "x**3 + 3*x**2 + 4*x*y + 2*x + y**3 + 3*y**2 + 2*y", + nx.cycle_graph(5): "x**4 + x**3 + x**2 + x + y", + nx.diamond_graph(): "x**3 + 2*x**2 + 2*x*y + x + y**2 + y", +} + +_test_chromatic_graphs = { + nx.complete_graph(1): "x", + nx.complete_graph(4): "x**4 - 6*x**3 + 11*x**2 - 6*x", + nx.cycle_graph(5): "x**5 - 5*x**4 + 10*x**3 - 10*x**2 + 4*x", + nx.diamond_graph(): "x**4 - 5*x**3 + 8*x**2 - 4*x", + nx.path_graph(5): "x**5 - 4*x**4 + 6*x**3 - 4*x**2 + x", +} + + +@pytest.mark.parametrize(("G", "expected"), _test_tutte_graphs.items()) +def test_tutte_polynomial(G, expected): + assert nx.tutte_polynomial(G).equals(expected) + + +@pytest.mark.parametrize("G", _test_tutte_graphs.keys()) +def test_tutte_polynomial_disjoint(G): + """Tutte polynomial factors into the Tutte polynomials of its components. + Verify this property with the disjoint union of two copies of the input graph. + """ + t_g = nx.tutte_polynomial(G) + H = nx.disjoint_union(G, G) + t_h = nx.tutte_polynomial(H) + assert sympy.simplify(t_g * t_g).equals(t_h) + + +@pytest.mark.parametrize(("G", "expected"), _test_chromatic_graphs.items()) +def test_chromatic_polynomial(G, expected): + assert nx.chromatic_polynomial(G).equals(expected) + + +@pytest.mark.parametrize("G", _test_chromatic_graphs.keys()) +def test_chromatic_polynomial_disjoint(G): + """Chromatic polynomial factors into the Chromatic polynomials of its + components. Verify this property with the disjoint union of two copies of + the input graph. + """ + x_g = nx.chromatic_polynomial(G) + H = nx.disjoint_union(G, G) + x_h = nx.chromatic_polynomial(H) + assert sympy.simplify(x_g * x_g).equals(x_h) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_reciprocity.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_reciprocity.py new file mode 100644 index 0000000..e713bc4 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_reciprocity.py @@ -0,0 +1,37 @@ +import pytest + +import networkx as nx + + +class TestReciprocity: + # test overall reciprocity by passing whole graph + def test_reciprocity_digraph(self): + DG = nx.DiGraph([(1, 2), (2, 1)]) + reciprocity = nx.reciprocity(DG) + assert reciprocity == 1.0 + + # test empty graph's overall reciprocity which will throw an error + def test_overall_reciprocity_empty_graph(self): + with pytest.raises(nx.NetworkXError): + DG = nx.DiGraph() + nx.overall_reciprocity(DG) + + # test for reciprocity for a list of nodes + def test_reciprocity_graph_nodes(self): + DG = nx.DiGraph([(1, 2), (2, 3), (3, 2)]) + reciprocity = nx.reciprocity(DG, [1, 2]) + expected_reciprocity = {1: 0.0, 2: 0.6666666666666666} + assert reciprocity == expected_reciprocity + + # test for reciprocity for a single node + def test_reciprocity_graph_node(self): + DG = nx.DiGraph([(1, 2), (2, 3), (3, 2)]) + reciprocity = nx.reciprocity(DG, 2) + assert reciprocity == 0.6666666666666666 + + # test for reciprocity for an isolated node + def test_reciprocity_graph_isolated_nodes(self): + with pytest.raises(nx.NetworkXError): + DG = nx.DiGraph([(1, 2)]) + DG.add_node(4) + nx.reciprocity(DG, 4) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_regular.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_regular.py new file mode 100644 index 0000000..021ad32 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_regular.py @@ -0,0 +1,91 @@ +import pytest + +import networkx as nx +import networkx.algorithms.regular as reg +import networkx.generators as gen + + +class TestKFactor: + def test_k_factor_trivial(self): + g = gen.cycle_graph(4) + f = reg.k_factor(g, 2) + assert g.edges == f.edges + + def test_k_factor1(self): + g = gen.grid_2d_graph(4, 4) + g_kf = reg.k_factor(g, 2) + for edge in g_kf.edges(): + assert g.has_edge(edge[0], edge[1]) + for _, degree in g_kf.degree(): + assert degree == 2 + + def test_k_factor2(self): + g = gen.complete_graph(6) + g_kf = reg.k_factor(g, 3) + for edge in g_kf.edges(): + assert g.has_edge(edge[0], edge[1]) + for _, degree in g_kf.degree(): + assert degree == 3 + + def test_k_factor3(self): + g = gen.grid_2d_graph(4, 4) + with pytest.raises(nx.NetworkXUnfeasible): + reg.k_factor(g, 3) + + def test_k_factor4(self): + g = gen.lattice.hexagonal_lattice_graph(4, 4) + # Perfect matching doesn't exist for 4,4 hexagonal lattice graph + with pytest.raises(nx.NetworkXUnfeasible): + reg.k_factor(g, 2) + + def test_k_factor5(self): + g = gen.complete_graph(6) + # small k to exercise SmallKGadget + g_kf = reg.k_factor(g, 2) + for edge in g_kf.edges(): + assert g.has_edge(edge[0], edge[1]) + for _, degree in g_kf.degree(): + assert degree == 2 + + +class TestIsRegular: + def test_is_regular1(self): + g = gen.cycle_graph(4) + assert reg.is_regular(g) + + def test_is_regular2(self): + g = gen.complete_graph(5) + assert reg.is_regular(g) + + def test_is_regular3(self): + g = gen.lollipop_graph(5, 5) + assert not reg.is_regular(g) + + def test_is_regular4(self): + g = nx.DiGraph() + g.add_edges_from([(0, 1), (1, 2), (2, 0)]) + assert reg.is_regular(g) + + +def test_is_regular_empty_graph_raises(): + G = nx.Graph() + with pytest.raises(nx.NetworkXPointlessConcept, match="Graph has no nodes"): + nx.is_regular(G) + + +class TestIsKRegular: + def test_is_k_regular1(self): + g = gen.cycle_graph(4) + assert reg.is_k_regular(g, 2) + assert not reg.is_k_regular(g, 3) + + def test_is_k_regular2(self): + g = gen.complete_graph(5) + assert reg.is_k_regular(g, 4) + assert not reg.is_k_regular(g, 3) + assert not reg.is_k_regular(g, 6) + + def test_is_k_regular3(self): + g = gen.lollipop_graph(5, 5) + assert not reg.is_k_regular(g, 5) + assert not reg.is_k_regular(g, 6) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_richclub.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_richclub.py new file mode 100644 index 0000000..2172157 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_richclub.py @@ -0,0 +1,149 @@ +import pytest + +import networkx as nx + + +def test_richclub(): + G = nx.Graph([(0, 1), (0, 2), (1, 2), (1, 3), (1, 4), (4, 5)]) + rc = nx.richclub.rich_club_coefficient(G, normalized=False) + assert rc == {0: 12.0 / 30, 1: 8.0 / 12} + + # test single value + rc0 = nx.richclub.rich_club_coefficient(G, normalized=False)[0] + assert rc0 == 12.0 / 30.0 + + +def test_richclub_seed(): + G = nx.Graph([(0, 1), (0, 2), (1, 2), (1, 3), (1, 4), (4, 5)]) + rcNorm = nx.richclub.rich_club_coefficient(G, Q=2, seed=1) + assert rcNorm == {0: 1.0, 1: 1.0} + + +def test_richclub_normalized(): + G = nx.Graph([(0, 1), (0, 2), (1, 2), (1, 3), (1, 4), (4, 5)]) + rcNorm = nx.richclub.rich_club_coefficient(G, Q=2, seed=42) + assert rcNorm == {0: 1.0, 1: 1.0} + + +def test_richclub2(): + T = nx.balanced_tree(2, 10) + rc = nx.richclub.rich_club_coefficient(T, normalized=False) + assert rc == { + 0: 4092 / (2047 * 2046.0), + 1: (2044.0 / (1023 * 1022)), + 2: (2040.0 / (1022 * 1021)), + } + + +def test_richclub3(): + # tests edgecase + G = nx.karate_club_graph() + rc = nx.rich_club_coefficient(G, normalized=False) + assert rc == { + 0: 156.0 / 1122, + 1: 154.0 / 1056, + 2: 110.0 / 462, + 3: 78.0 / 240, + 4: 44.0 / 90, + 5: 22.0 / 42, + 6: 10.0 / 20, + 7: 10.0 / 20, + 8: 10.0 / 20, + 9: 6.0 / 12, + 10: 2.0 / 6, + 11: 2.0 / 6, + 12: 0.0, + 13: 0.0, + 14: 0.0, + 15: 0.0, + } + + +def test_richclub4(): + G = nx.Graph() + G.add_edges_from( + [(0, 1), (0, 2), (0, 3), (0, 4), (4, 5), (5, 9), (6, 9), (7, 9), (8, 9)] + ) + rc = nx.rich_club_coefficient(G, normalized=False) + assert rc == {0: 18 / 90.0, 1: 6 / 12.0, 2: 0.0, 3: 0.0} + + +def test_richclub_exception(): + with pytest.raises(nx.NetworkXNotImplemented): + G = nx.DiGraph() + nx.rich_club_coefficient(G) + + +def test_rich_club_exception2(): + with pytest.raises(nx.NetworkXNotImplemented): + G = nx.MultiGraph() + nx.rich_club_coefficient(G) + + +def test_rich_club_selfloop(): + G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + G.add_edge(1, 1) # self loop + G.add_edge(1, 2) + with pytest.raises( + Exception, + match="rich_club_coefficient is not implemented for graphs with self loops.", + ): + nx.rich_club_coefficient(G) + + +def test_rich_club_leq_3_nodes_unnormalized(): + # edgeless graphs upto 3 nodes + G = nx.Graph() + rc = nx.rich_club_coefficient(G, normalized=False) + assert rc == {} + + for i in range(3): + G.add_node(i) + rc = nx.rich_club_coefficient(G, normalized=False) + assert rc == {} + + # 2 nodes, single edge + G = nx.Graph() + G.add_edge(0, 1) + rc = nx.rich_club_coefficient(G, normalized=False) + assert rc == {0: 1} + + # 3 nodes, single edge + G = nx.Graph() + G.add_nodes_from([0, 1, 2]) + G.add_edge(0, 1) + rc = nx.rich_club_coefficient(G, normalized=False) + assert rc == {0: 1} + + # 3 nodes, 2 edges + G.add_edge(1, 2) + rc = nx.rich_club_coefficient(G, normalized=False) + assert rc == {0: 2 / 3} + + # 3 nodes, 3 edges + G.add_edge(0, 2) + rc = nx.rich_club_coefficient(G, normalized=False) + assert rc == {0: 1, 1: 1} + + +def test_rich_club_leq_3_nodes_normalized(): + G = nx.Graph() + with pytest.raises( + nx.exception.NetworkXError, + match="Graph has fewer than four nodes", + ): + rc = nx.rich_club_coefficient(G, normalized=True) + + for i in range(3): + G.add_node(i) + with pytest.raises( + nx.exception.NetworkXError, + match="Graph has fewer than four nodes", + ): + rc = nx.rich_club_coefficient(G, normalized=True) + + +# def test_richclub2_normalized(): +# T = nx.balanced_tree(2,10) +# rcNorm = nx.richclub.rich_club_coefficient(T,Q=2) +# assert_true(rcNorm[0] ==1.0 and rcNorm[1] < 0.9 and rcNorm[2] < 0.9) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_similarity.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_similarity.py new file mode 100644 index 0000000..81d870a --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_similarity.py @@ -0,0 +1,984 @@ +import pytest + +import networkx as nx +from networkx.algorithms.similarity import ( + graph_edit_distance, + optimal_edit_paths, + optimize_graph_edit_distance, +) +from networkx.generators.classic import ( + circular_ladder_graph, + cycle_graph, + path_graph, + wheel_graph, +) + + +@pytest.mark.parametrize("source", (10, "foo")) +def test_generate_random_paths_source_not_in_G(source): + pytest.importorskip("numpy") + G = nx.complete_graph(5) + # No exception at generator construction time + path_gen = nx.generate_random_paths(G, sample_size=3, source=source) + with pytest.raises(nx.NodeNotFound, match="Initial node.*not in G"): + next(path_gen) + + +def nmatch(n1, n2): + return n1 == n2 + + +def ematch(e1, e2): + return e1 == e2 + + +def getCanonical(): + G = nx.Graph() + G.add_node("A", label="A") + G.add_node("B", label="B") + G.add_node("C", label="C") + G.add_node("D", label="D") + G.add_edge("A", "B", label="a-b") + G.add_edge("B", "C", label="b-c") + G.add_edge("B", "D", label="b-d") + return G + + +class TestSimilarity: + @classmethod + def setup_class(cls): + global np + np = pytest.importorskip("numpy") + pytest.importorskip("scipy") + + def test_graph_edit_distance_roots_and_timeout(self): + G0 = nx.star_graph(5) + G1 = G0.copy() + pytest.raises(ValueError, graph_edit_distance, G0, G1, roots=[2]) + pytest.raises(ValueError, graph_edit_distance, G0, G1, roots=[2, 3, 4]) + pytest.raises(nx.NodeNotFound, graph_edit_distance, G0, G1, roots=(9, 3)) + pytest.raises(nx.NodeNotFound, graph_edit_distance, G0, G1, roots=(3, 9)) + pytest.raises(nx.NodeNotFound, graph_edit_distance, G0, G1, roots=(9, 9)) + assert graph_edit_distance(G0, G1, roots=(1, 2)) == 0 + assert graph_edit_distance(G0, G1, roots=(0, 1)) == 8 + assert graph_edit_distance(G0, G1, roots=(1, 2), timeout=5) == 0 + assert graph_edit_distance(G0, G1, roots=(0, 1), timeout=5) == 8 + assert graph_edit_distance(G0, G1, roots=(0, 1), timeout=0.0001) is None + # test raise on 0 timeout + pytest.raises(nx.NetworkXError, graph_edit_distance, G0, G1, timeout=0) + + def test_graph_edit_distance(self): + G0 = nx.Graph() + G1 = path_graph(6) + G2 = cycle_graph(6) + G3 = wheel_graph(7) + + assert graph_edit_distance(G0, G0) == 0 + assert graph_edit_distance(G0, G1) == 11 + assert graph_edit_distance(G1, G0) == 11 + assert graph_edit_distance(G0, G2) == 12 + assert graph_edit_distance(G2, G0) == 12 + assert graph_edit_distance(G0, G3) == 19 + assert graph_edit_distance(G3, G0) == 19 + + assert graph_edit_distance(G1, G1) == 0 + assert graph_edit_distance(G1, G2) == 1 + assert graph_edit_distance(G2, G1) == 1 + assert graph_edit_distance(G1, G3) == 8 + assert graph_edit_distance(G3, G1) == 8 + + assert graph_edit_distance(G2, G2) == 0 + assert graph_edit_distance(G2, G3) == 7 + assert graph_edit_distance(G3, G2) == 7 + + assert graph_edit_distance(G3, G3) == 0 + + def test_graph_edit_distance_node_match(self): + G1 = cycle_graph(5) + G2 = cycle_graph(5) + for n, attr in G1.nodes.items(): + attr["color"] = "red" if n % 2 == 0 else "blue" + for n, attr in G2.nodes.items(): + attr["color"] = "red" if n % 2 == 1 else "blue" + assert graph_edit_distance(G1, G2) == 0 + assert ( + graph_edit_distance( + G1, G2, node_match=lambda n1, n2: n1["color"] == n2["color"] + ) + == 1 + ) + + def test_graph_edit_distance_edge_match(self): + G1 = path_graph(6) + G2 = path_graph(6) + for e, attr in G1.edges.items(): + attr["color"] = "red" if min(e) % 2 == 0 else "blue" + for e, attr in G2.edges.items(): + attr["color"] = "red" if min(e) // 3 == 0 else "blue" + assert graph_edit_distance(G1, G2) == 0 + assert ( + graph_edit_distance( + G1, G2, edge_match=lambda e1, e2: e1["color"] == e2["color"] + ) + == 2 + ) + + def test_graph_edit_distance_node_cost(self): + G1 = path_graph(6) + G2 = path_graph(6) + for n, attr in G1.nodes.items(): + attr["color"] = "red" if n % 2 == 0 else "blue" + for n, attr in G2.nodes.items(): + attr["color"] = "red" if n % 2 == 1 else "blue" + + def node_subst_cost(uattr, vattr): + if uattr["color"] == vattr["color"]: + return 1 + else: + return 10 + + def node_del_cost(attr): + if attr["color"] == "blue": + return 20 + else: + return 50 + + def node_ins_cost(attr): + if attr["color"] == "blue": + return 40 + else: + return 100 + + assert ( + graph_edit_distance( + G1, + G2, + node_subst_cost=node_subst_cost, + node_del_cost=node_del_cost, + node_ins_cost=node_ins_cost, + ) + == 6 + ) + + def test_graph_edit_distance_edge_cost(self): + G1 = path_graph(6) + G2 = path_graph(6) + for e, attr in G1.edges.items(): + attr["color"] = "red" if min(e) % 2 == 0 else "blue" + for e, attr in G2.edges.items(): + attr["color"] = "red" if min(e) // 3 == 0 else "blue" + + def edge_subst_cost(gattr, hattr): + if gattr["color"] == hattr["color"]: + return 0.01 + else: + return 0.1 + + def edge_del_cost(attr): + if attr["color"] == "blue": + return 0.2 + else: + return 0.5 + + def edge_ins_cost(attr): + if attr["color"] == "blue": + return 0.4 + else: + return 1.0 + + assert ( + graph_edit_distance( + G1, + G2, + edge_subst_cost=edge_subst_cost, + edge_del_cost=edge_del_cost, + edge_ins_cost=edge_ins_cost, + ) + == 0.23 + ) + + def test_graph_edit_distance_upper_bound(self): + G1 = circular_ladder_graph(2) + G2 = circular_ladder_graph(6) + assert graph_edit_distance(G1, G2, upper_bound=5) is None + assert graph_edit_distance(G1, G2, upper_bound=24) == 22 + assert graph_edit_distance(G1, G2) == 22 + + def test_optimal_edit_paths(self): + G1 = path_graph(3) + G2 = cycle_graph(3) + paths, cost = optimal_edit_paths(G1, G2) + assert cost == 1 + assert len(paths) == 6 + + def canonical(vertex_path, edge_path): + return ( + tuple(sorted(vertex_path)), + tuple(sorted(edge_path, key=lambda x: (None in x, x))), + ) + + expected_paths = [ + ( + [(0, 0), (1, 1), (2, 2)], + [((0, 1), (0, 1)), ((1, 2), (1, 2)), (None, (0, 2))], + ), + ( + [(0, 0), (1, 2), (2, 1)], + [((0, 1), (0, 2)), ((1, 2), (1, 2)), (None, (0, 1))], + ), + ( + [(0, 1), (1, 0), (2, 2)], + [((0, 1), (0, 1)), ((1, 2), (0, 2)), (None, (1, 2))], + ), + ( + [(0, 1), (1, 2), (2, 0)], + [((0, 1), (1, 2)), ((1, 2), (0, 2)), (None, (0, 1))], + ), + ( + [(0, 2), (1, 0), (2, 1)], + [((0, 1), (0, 2)), ((1, 2), (0, 1)), (None, (1, 2))], + ), + ( + [(0, 2), (1, 1), (2, 0)], + [((0, 1), (1, 2)), ((1, 2), (0, 1)), (None, (0, 2))], + ), + ] + assert {canonical(*p) for p in paths} == {canonical(*p) for p in expected_paths} + + def test_optimize_graph_edit_distance(self): + G1 = circular_ladder_graph(2) + G2 = circular_ladder_graph(6) + bestcost = 1000 + for cost in optimize_graph_edit_distance(G1, G2): + assert cost < bestcost + bestcost = cost + assert bestcost == 22 + + # def test_graph_edit_distance_bigger(self): + # G1 = circular_ladder_graph(12) + # G2 = circular_ladder_graph(16) + # assert_equal(graph_edit_distance(G1, G2), 22) + + def test_selfloops(self): + G0 = nx.Graph() + G1 = nx.Graph() + G1.add_edges_from((("A", "A"), ("A", "B"))) + G2 = nx.Graph() + G2.add_edges_from((("A", "B"), ("B", "B"))) + G3 = nx.Graph() + G3.add_edges_from((("A", "A"), ("A", "B"), ("B", "B"))) + + assert graph_edit_distance(G0, G0) == 0 + assert graph_edit_distance(G0, G1) == 4 + assert graph_edit_distance(G1, G0) == 4 + assert graph_edit_distance(G0, G2) == 4 + assert graph_edit_distance(G2, G0) == 4 + assert graph_edit_distance(G0, G3) == 5 + assert graph_edit_distance(G3, G0) == 5 + + assert graph_edit_distance(G1, G1) == 0 + assert graph_edit_distance(G1, G2) == 0 + assert graph_edit_distance(G2, G1) == 0 + assert graph_edit_distance(G1, G3) == 1 + assert graph_edit_distance(G3, G1) == 1 + + assert graph_edit_distance(G2, G2) == 0 + assert graph_edit_distance(G2, G3) == 1 + assert graph_edit_distance(G3, G2) == 1 + + assert graph_edit_distance(G3, G3) == 0 + + def test_digraph(self): + G0 = nx.DiGraph() + G1 = nx.DiGraph() + G1.add_edges_from((("A", "B"), ("B", "C"), ("C", "D"), ("D", "A"))) + G2 = nx.DiGraph() + G2.add_edges_from((("A", "B"), ("B", "C"), ("C", "D"), ("A", "D"))) + G3 = nx.DiGraph() + G3.add_edges_from((("A", "B"), ("A", "C"), ("B", "D"), ("C", "D"))) + + assert graph_edit_distance(G0, G0) == 0 + assert graph_edit_distance(G0, G1) == 8 + assert graph_edit_distance(G1, G0) == 8 + assert graph_edit_distance(G0, G2) == 8 + assert graph_edit_distance(G2, G0) == 8 + assert graph_edit_distance(G0, G3) == 8 + assert graph_edit_distance(G3, G0) == 8 + + assert graph_edit_distance(G1, G1) == 0 + assert graph_edit_distance(G1, G2) == 2 + assert graph_edit_distance(G2, G1) == 2 + assert graph_edit_distance(G1, G3) == 4 + assert graph_edit_distance(G3, G1) == 4 + + assert graph_edit_distance(G2, G2) == 0 + assert graph_edit_distance(G2, G3) == 2 + assert graph_edit_distance(G3, G2) == 2 + + assert graph_edit_distance(G3, G3) == 0 + + def test_multigraph(self): + G0 = nx.MultiGraph() + G1 = nx.MultiGraph() + G1.add_edges_from((("A", "B"), ("B", "C"), ("A", "C"))) + G2 = nx.MultiGraph() + G2.add_edges_from((("A", "B"), ("B", "C"), ("B", "C"), ("A", "C"))) + G3 = nx.MultiGraph() + G3.add_edges_from((("A", "B"), ("B", "C"), ("A", "C"), ("A", "C"), ("A", "C"))) + + assert graph_edit_distance(G0, G0) == 0 + assert graph_edit_distance(G0, G1) == 6 + assert graph_edit_distance(G1, G0) == 6 + assert graph_edit_distance(G0, G2) == 7 + assert graph_edit_distance(G2, G0) == 7 + assert graph_edit_distance(G0, G3) == 8 + assert graph_edit_distance(G3, G0) == 8 + + assert graph_edit_distance(G1, G1) == 0 + assert graph_edit_distance(G1, G2) == 1 + assert graph_edit_distance(G2, G1) == 1 + assert graph_edit_distance(G1, G3) == 2 + assert graph_edit_distance(G3, G1) == 2 + + assert graph_edit_distance(G2, G2) == 0 + assert graph_edit_distance(G2, G3) == 1 + assert graph_edit_distance(G3, G2) == 1 + + assert graph_edit_distance(G3, G3) == 0 + + def test_multidigraph(self): + G1 = nx.MultiDiGraph() + G1.add_edges_from( + ( + ("hardware", "kernel"), + ("kernel", "hardware"), + ("kernel", "userspace"), + ("userspace", "kernel"), + ) + ) + G2 = nx.MultiDiGraph() + G2.add_edges_from( + ( + ("winter", "spring"), + ("spring", "summer"), + ("summer", "autumn"), + ("autumn", "winter"), + ) + ) + + assert graph_edit_distance(G1, G2) == 5 + assert graph_edit_distance(G2, G1) == 5 + + # by https://github.com/jfbeaumont + def testCopy(self): + G = nx.Graph() + G.add_node("A", label="A") + G.add_node("B", label="B") + G.add_edge("A", "B", label="a-b") + assert ( + graph_edit_distance(G, G.copy(), node_match=nmatch, edge_match=ematch) == 0 + ) + + def testSame(self): + G1 = nx.Graph() + G1.add_node("A", label="A") + G1.add_node("B", label="B") + G1.add_edge("A", "B", label="a-b") + G2 = nx.Graph() + G2.add_node("A", label="A") + G2.add_node("B", label="B") + G2.add_edge("A", "B", label="a-b") + assert graph_edit_distance(G1, G2, node_match=nmatch, edge_match=ematch) == 0 + + def testOneEdgeLabelDiff(self): + G1 = nx.Graph() + G1.add_node("A", label="A") + G1.add_node("B", label="B") + G1.add_edge("A", "B", label="a-b") + G2 = nx.Graph() + G2.add_node("A", label="A") + G2.add_node("B", label="B") + G2.add_edge("A", "B", label="bad") + assert graph_edit_distance(G1, G2, node_match=nmatch, edge_match=ematch) == 1 + + def testOneNodeLabelDiff(self): + G1 = nx.Graph() + G1.add_node("A", label="A") + G1.add_node("B", label="B") + G1.add_edge("A", "B", label="a-b") + G2 = nx.Graph() + G2.add_node("A", label="Z") + G2.add_node("B", label="B") + G2.add_edge("A", "B", label="a-b") + assert graph_edit_distance(G1, G2, node_match=nmatch, edge_match=ematch) == 1 + + def testOneExtraNode(self): + G1 = nx.Graph() + G1.add_node("A", label="A") + G1.add_node("B", label="B") + G1.add_edge("A", "B", label="a-b") + G2 = nx.Graph() + G2.add_node("A", label="A") + G2.add_node("B", label="B") + G2.add_edge("A", "B", label="a-b") + G2.add_node("C", label="C") + assert graph_edit_distance(G1, G2, node_match=nmatch, edge_match=ematch) == 1 + + def testOneExtraEdge(self): + G1 = nx.Graph() + G1.add_node("A", label="A") + G1.add_node("B", label="B") + G1.add_node("C", label="C") + G1.add_node("C", label="C") + G1.add_edge("A", "B", label="a-b") + G2 = nx.Graph() + G2.add_node("A", label="A") + G2.add_node("B", label="B") + G2.add_node("C", label="C") + G2.add_edge("A", "B", label="a-b") + G2.add_edge("A", "C", label="a-c") + assert graph_edit_distance(G1, G2, node_match=nmatch, edge_match=ematch) == 1 + + def testOneExtraNodeAndEdge(self): + G1 = nx.Graph() + G1.add_node("A", label="A") + G1.add_node("B", label="B") + G1.add_edge("A", "B", label="a-b") + G2 = nx.Graph() + G2.add_node("A", label="A") + G2.add_node("B", label="B") + G2.add_node("C", label="C") + G2.add_edge("A", "B", label="a-b") + G2.add_edge("A", "C", label="a-c") + assert graph_edit_distance(G1, G2, node_match=nmatch, edge_match=ematch) == 2 + + def testGraph1(self): + G1 = getCanonical() + G2 = nx.Graph() + G2.add_node("A", label="A") + G2.add_node("B", label="B") + G2.add_node("D", label="D") + G2.add_node("E", label="E") + G2.add_edge("A", "B", label="a-b") + G2.add_edge("B", "D", label="b-d") + G2.add_edge("D", "E", label="d-e") + assert graph_edit_distance(G1, G2, node_match=nmatch, edge_match=ematch) == 3 + + def testGraph2(self): + G1 = getCanonical() + G2 = nx.Graph() + G2.add_node("A", label="A") + G2.add_node("B", label="B") + G2.add_node("C", label="C") + G2.add_node("D", label="D") + G2.add_node("E", label="E") + G2.add_edge("A", "B", label="a-b") + G2.add_edge("B", "C", label="b-c") + G2.add_edge("C", "D", label="c-d") + G2.add_edge("C", "E", label="c-e") + assert graph_edit_distance(G1, G2, node_match=nmatch, edge_match=ematch) == 4 + + def testGraph3(self): + G1 = getCanonical() + G2 = nx.Graph() + G2.add_node("A", label="A") + G2.add_node("B", label="B") + G2.add_node("C", label="C") + G2.add_node("D", label="D") + G2.add_node("E", label="E") + G2.add_node("F", label="F") + G2.add_node("G", label="G") + G2.add_edge("A", "C", label="a-c") + G2.add_edge("A", "D", label="a-d") + G2.add_edge("D", "E", label="d-e") + G2.add_edge("D", "F", label="d-f") + G2.add_edge("D", "G", label="d-g") + G2.add_edge("E", "B", label="e-b") + assert graph_edit_distance(G1, G2, node_match=nmatch, edge_match=ematch) == 12 + + def testGraph4(self): + G1 = getCanonical() + G2 = nx.Graph() + G2.add_node("A", label="A") + G2.add_node("B", label="B") + G2.add_node("C", label="C") + G2.add_node("D", label="D") + G2.add_edge("A", "B", label="a-b") + G2.add_edge("B", "C", label="b-c") + G2.add_edge("C", "D", label="c-d") + assert graph_edit_distance(G1, G2, node_match=nmatch, edge_match=ematch) == 2 + + def testGraph4_a(self): + G1 = getCanonical() + G2 = nx.Graph() + G2.add_node("A", label="A") + G2.add_node("B", label="B") + G2.add_node("C", label="C") + G2.add_node("D", label="D") + G2.add_edge("A", "B", label="a-b") + G2.add_edge("B", "C", label="b-c") + G2.add_edge("A", "D", label="a-d") + assert graph_edit_distance(G1, G2, node_match=nmatch, edge_match=ematch) == 2 + + def testGraph4_b(self): + G1 = getCanonical() + G2 = nx.Graph() + G2.add_node("A", label="A") + G2.add_node("B", label="B") + G2.add_node("C", label="C") + G2.add_node("D", label="D") + G2.add_edge("A", "B", label="a-b") + G2.add_edge("B", "C", label="b-c") + G2.add_edge("B", "D", label="bad") + assert graph_edit_distance(G1, G2, node_match=nmatch, edge_match=ematch) == 1 + + # note: nx.simrank_similarity_numpy not included because returns np.array + simrank_algs = [ + nx.simrank_similarity, + nx.algorithms.similarity._simrank_similarity_python, + ] + + @pytest.mark.parametrize("simrank_similarity", simrank_algs) + def test_simrank_no_source_no_target(self, simrank_similarity): + G = nx.cycle_graph(5) + expected = { + 0: { + 0: 1, + 1: 0.3951219505902448, + 2: 0.5707317069281646, + 3: 0.5707317069281646, + 4: 0.3951219505902449, + }, + 1: { + 0: 0.3951219505902448, + 1: 1, + 2: 0.3951219505902449, + 3: 0.5707317069281646, + 4: 0.5707317069281646, + }, + 2: { + 0: 0.5707317069281646, + 1: 0.3951219505902449, + 2: 1, + 3: 0.3951219505902449, + 4: 0.5707317069281646, + }, + 3: { + 0: 0.5707317069281646, + 1: 0.5707317069281646, + 2: 0.3951219505902449, + 3: 1, + 4: 0.3951219505902449, + }, + 4: { + 0: 0.3951219505902449, + 1: 0.5707317069281646, + 2: 0.5707317069281646, + 3: 0.3951219505902449, + 4: 1, + }, + } + actual = simrank_similarity(G) + for k, v in expected.items(): + assert v == pytest.approx(actual[k], abs=1e-2) + + # For a DiGraph test, use the first graph from the paper cited in + # the docs: https://dl.acm.org/doi/pdf/10.1145/775047.775126 + G = nx.DiGraph() + G.add_node(0, label="Univ") + G.add_node(1, label="ProfA") + G.add_node(2, label="ProfB") + G.add_node(3, label="StudentA") + G.add_node(4, label="StudentB") + G.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 4), (4, 2), (3, 0)]) + + expected = { + 0: {0: 1, 1: 0.0, 2: 0.1323363991265798, 3: 0.0, 4: 0.03387811817640443}, + 1: {0: 0.0, 1: 1, 2: 0.4135512472705618, 3: 0.0, 4: 0.10586911930126384}, + 2: { + 0: 0.1323363991265798, + 1: 0.4135512472705618, + 2: 1, + 3: 0.04234764772050554, + 4: 0.08822426608438655, + }, + 3: {0: 0.0, 1: 0.0, 2: 0.04234764772050554, 3: 1, 4: 0.3308409978164495}, + 4: { + 0: 0.03387811817640443, + 1: 0.10586911930126384, + 2: 0.08822426608438655, + 3: 0.3308409978164495, + 4: 1, + }, + } + # Use the importance_factor from the paper to get the same numbers. + actual = simrank_similarity(G, importance_factor=0.8) + for k, v in expected.items(): + assert v == pytest.approx(actual[k], abs=1e-2) + + @pytest.mark.parametrize("simrank_similarity", simrank_algs) + def test_simrank_source_no_target(self, simrank_similarity): + G = nx.cycle_graph(5) + expected = { + 0: 1, + 1: 0.3951219505902448, + 2: 0.5707317069281646, + 3: 0.5707317069281646, + 4: 0.3951219505902449, + } + actual = simrank_similarity(G, source=0) + assert expected == pytest.approx(actual, abs=1e-2) + + # For a DiGraph test, use the first graph from the paper cited in + # the docs: https://dl.acm.org/doi/pdf/10.1145/775047.775126 + G = nx.DiGraph() + G.add_node(0, label="Univ") + G.add_node(1, label="ProfA") + G.add_node(2, label="ProfB") + G.add_node(3, label="StudentA") + G.add_node(4, label="StudentB") + G.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 4), (4, 2), (3, 0)]) + + expected = {0: 1, 1: 0.0, 2: 0.1323363991265798, 3: 0.0, 4: 0.03387811817640443} + # Use the importance_factor from the paper to get the same numbers. + actual = simrank_similarity(G, importance_factor=0.8, source=0) + assert expected == pytest.approx(actual, abs=1e-2) + + @pytest.mark.parametrize("simrank_similarity", simrank_algs) + def test_simrank_noninteger_nodes(self, simrank_similarity): + G = nx.cycle_graph(5) + G = nx.relabel_nodes(G, dict(enumerate("abcde"))) + expected = { + "a": 1, + "b": 0.3951219505902448, + "c": 0.5707317069281646, + "d": 0.5707317069281646, + "e": 0.3951219505902449, + } + actual = simrank_similarity(G, source="a") + assert expected == pytest.approx(actual, abs=1e-2) + + # For a DiGraph test, use the first graph from the paper cited in + # the docs: https://dl.acm.org/doi/pdf/10.1145/775047.775126 + G = nx.DiGraph() + G.add_node(0, label="Univ") + G.add_node(1, label="ProfA") + G.add_node(2, label="ProfB") + G.add_node(3, label="StudentA") + G.add_node(4, label="StudentB") + G.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 4), (4, 2), (3, 0)]) + node_labels = dict(enumerate(nx.get_node_attributes(G, "label").values())) + G = nx.relabel_nodes(G, node_labels) + + expected = { + "Univ": 1, + "ProfA": 0.0, + "ProfB": 0.1323363991265798, + "StudentA": 0.0, + "StudentB": 0.03387811817640443, + } + # Use the importance_factor from the paper to get the same numbers. + actual = simrank_similarity(G, importance_factor=0.8, source="Univ") + assert expected == pytest.approx(actual, abs=1e-2) + + @pytest.mark.parametrize("simrank_similarity", simrank_algs) + def test_simrank_source_and_target(self, simrank_similarity): + G = nx.cycle_graph(5) + expected = 1 + actual = simrank_similarity(G, source=0, target=0) + assert expected == pytest.approx(actual, abs=1e-2) + + # For a DiGraph test, use the first graph from the paper cited in + # the docs: https://dl.acm.org/doi/pdf/10.1145/775047.775126 + G = nx.DiGraph() + G.add_node(0, label="Univ") + G.add_node(1, label="ProfA") + G.add_node(2, label="ProfB") + G.add_node(3, label="StudentA") + G.add_node(4, label="StudentB") + G.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 4), (4, 2), (3, 0)]) + + expected = 0.1323363991265798 + # Use the importance_factor from the paper to get the same numbers. + # Use the pair (0,2) because (0,0) and (0,1) have trivial results. + actual = simrank_similarity(G, importance_factor=0.8, source=0, target=2) + assert expected == pytest.approx(actual, abs=1e-5) + + @pytest.mark.parametrize("alg", simrank_algs) + def test_simrank_max_iterations(self, alg): + G = nx.cycle_graph(5) + pytest.raises(nx.ExceededMaxIterations, alg, G, max_iterations=10) + + def test_simrank_source_not_found(self): + G = nx.cycle_graph(5) + with pytest.raises(nx.NodeNotFound, match="Source node 10 not in G"): + nx.simrank_similarity(G, source=10) + + def test_simrank_target_not_found(self): + G = nx.cycle_graph(5) + with pytest.raises(nx.NodeNotFound, match="Target node 10 not in G"): + nx.simrank_similarity(G, target=10) + + def test_simrank_between_versions(self): + G = nx.cycle_graph(5) + # _python tolerance 1e-4 + expected_python_tol4 = { + 0: 1, + 1: 0.394512499239852, + 2: 0.5703550452791322, + 3: 0.5703550452791323, + 4: 0.394512499239852, + } + # _numpy tolerance 1e-4 + expected_numpy_tol4 = { + 0: 1.0, + 1: 0.3947180735764555, + 2: 0.570482097206368, + 3: 0.570482097206368, + 4: 0.3947180735764555, + } + actual = nx.simrank_similarity(G, source=0) + assert expected_numpy_tol4 == pytest.approx(actual, abs=1e-7) + # versions differ at 1e-4 level but equal at 1e-3 + assert expected_python_tol4 != pytest.approx(actual, abs=1e-4) + assert expected_python_tol4 == pytest.approx(actual, abs=1e-3) + + actual = nx.similarity._simrank_similarity_python(G, source=0) + assert expected_python_tol4 == pytest.approx(actual, abs=1e-7) + # versions differ at 1e-4 level but equal at 1e-3 + assert expected_numpy_tol4 != pytest.approx(actual, abs=1e-4) + assert expected_numpy_tol4 == pytest.approx(actual, abs=1e-3) + + def test_simrank_numpy_no_source_no_target(self): + G = nx.cycle_graph(5) + expected = np.array( + [ + [ + 1.0, + 0.3947180735764555, + 0.570482097206368, + 0.570482097206368, + 0.3947180735764555, + ], + [ + 0.3947180735764555, + 1.0, + 0.3947180735764555, + 0.570482097206368, + 0.570482097206368, + ], + [ + 0.570482097206368, + 0.3947180735764555, + 1.0, + 0.3947180735764555, + 0.570482097206368, + ], + [ + 0.570482097206368, + 0.570482097206368, + 0.3947180735764555, + 1.0, + 0.3947180735764555, + ], + [ + 0.3947180735764555, + 0.570482097206368, + 0.570482097206368, + 0.3947180735764555, + 1.0, + ], + ] + ) + actual = nx.similarity._simrank_similarity_numpy(G) + np.testing.assert_allclose(expected, actual, atol=1e-7) + + def test_simrank_numpy_source_no_target(self): + G = nx.cycle_graph(5) + expected = np.array( + [ + 1.0, + 0.3947180735764555, + 0.570482097206368, + 0.570482097206368, + 0.3947180735764555, + ] + ) + actual = nx.similarity._simrank_similarity_numpy(G, source=0) + np.testing.assert_allclose(expected, actual, atol=1e-7) + + def test_simrank_numpy_source_and_target(self): + G = nx.cycle_graph(5) + expected = 1.0 + actual = nx.similarity._simrank_similarity_numpy(G, source=0, target=0) + np.testing.assert_allclose(expected, actual, atol=1e-7) + + def test_panther_similarity_unweighted(self): + np.random.seed(42) + + G = nx.Graph() + G.add_edge(0, 1) + G.add_edge(0, 2) + G.add_edge(0, 3) + G.add_edge(1, 2) + G.add_edge(2, 4) + expected = {3: 0.5, 2: 0.5, 1: 0.5, 4: 0.125} + sim = nx.panther_similarity(G, 0, path_length=2) + assert sim == expected + + def test_panther_similarity_weighted(self): + np.random.seed(42) + + G = nx.Graph() + G.add_edge("v1", "v2", w=5) + G.add_edge("v1", "v3", w=1) + G.add_edge("v1", "v4", w=2) + G.add_edge("v2", "v3", w=0.1) + G.add_edge("v3", "v5", w=1) + expected = {"v3": 0.75, "v4": 0.5, "v2": 0.5, "v5": 0.25} + sim = nx.panther_similarity(G, "v1", path_length=2, weight="w") + assert sim == expected + + def test_panther_similarity_source_not_found(self): + G = nx.Graph() + G.add_edges_from([(0, 1), (0, 2), (0, 3), (1, 2), (2, 4)]) + with pytest.raises(nx.NodeNotFound, match="Source node 10 not in G"): + nx.panther_similarity(G, source=10) + + def test_panther_similarity_isolated(self): + G = nx.Graph() + G.add_nodes_from(range(5)) + with pytest.raises( + nx.NetworkXUnfeasible, + match="Panther similarity is not defined for the isolated source node 1.", + ): + nx.panther_similarity(G, source=1) + + @pytest.mark.parametrize("num_paths", (1, 3, 10)) + @pytest.mark.parametrize("source", (0, 1)) + def test_generate_random_paths_with_start(self, num_paths, source): + G = nx.Graph([(0, 1), (0, 2), (0, 3), (1, 2), (2, 4)]) + index_map = {} + + path_gen = nx.generate_random_paths( + G, + num_paths, + path_length=2, + index_map=index_map, + source=source, + ) + paths = list(path_gen) + + # There should be num_paths paths + assert len(paths) == num_paths + # And they should all start with `source` + assert all(p[0] == source for p in paths) + # The index_map for the `source` node should contain the indices for + # all of the generated paths. + assert sorted(index_map[source]) == list(range(num_paths)) + + def test_generate_random_paths_unweighted(self): + index_map = {} + num_paths = 10 + path_length = 2 + G = nx.Graph() + G.add_edge(0, 1) + G.add_edge(0, 2) + G.add_edge(0, 3) + G.add_edge(1, 2) + G.add_edge(2, 4) + paths = nx.generate_random_paths( + G, num_paths, path_length=path_length, index_map=index_map, seed=42 + ) + expected_paths = [ + [3, 0, 3], + [4, 2, 1], + [2, 1, 0], + [2, 0, 3], + [3, 0, 1], + [3, 0, 1], + [4, 2, 0], + [2, 1, 0], + [3, 0, 2], + [2, 1, 2], + ] + expected_map = { + 0: {0, 2, 3, 4, 5, 6, 7, 8}, + 1: {1, 2, 4, 5, 7, 9}, + 2: {1, 2, 3, 6, 7, 8, 9}, + 3: {0, 3, 4, 5, 8}, + 4: {1, 6}, + } + + assert expected_paths == list(paths) + assert expected_map == index_map + + def test_generate_random_paths_weighted(self): + np.random.seed(42) + + index_map = {} + num_paths = 10 + path_length = 6 + G = nx.Graph() + G.add_edge("a", "b", weight=0.6) + G.add_edge("a", "c", weight=0.2) + G.add_edge("c", "d", weight=0.1) + G.add_edge("c", "e", weight=0.7) + G.add_edge("c", "f", weight=0.9) + G.add_edge("a", "d", weight=0.3) + paths = nx.generate_random_paths( + G, num_paths, path_length=path_length, index_map=index_map + ) + + expected_paths = [ + ["d", "c", "f", "c", "d", "a", "b"], + ["e", "c", "f", "c", "f", "c", "e"], + ["d", "a", "b", "a", "b", "a", "c"], + ["b", "a", "d", "a", "b", "a", "b"], + ["d", "a", "b", "a", "b", "a", "d"], + ["d", "a", "b", "a", "b", "a", "c"], + ["d", "a", "b", "a", "b", "a", "b"], + ["f", "c", "f", "c", "f", "c", "e"], + ["d", "a", "d", "a", "b", "a", "b"], + ["e", "c", "f", "c", "e", "c", "d"], + ] + expected_map = { + "d": {0, 2, 3, 4, 5, 6, 8, 9}, + "c": {0, 1, 2, 5, 7, 9}, + "f": {0, 1, 9, 7}, + "a": {0, 2, 3, 4, 5, 6, 8}, + "b": {0, 2, 3, 4, 5, 6, 8}, + "e": {1, 9, 7}, + } + + assert expected_paths == list(paths) + assert expected_map == index_map + + def test_symmetry_with_custom_matching(self): + """G2 has edge (a,b) and G3 has edge (a,a) but node order for G2 is (a,b) + while for G3 it is (b,a)""" + + a, b = "A", "B" + G2 = nx.Graph() + G2.add_nodes_from((a, b)) + G2.add_edges_from([(a, b)]) + G3 = nx.Graph() + G3.add_nodes_from((b, a)) + G3.add_edges_from([(a, a)]) + for G in (G2, G3): + for n in G: + G.nodes[n]["attr"] = n + for e in G.edges: + G.edges[e]["attr"] = e + + def user_match(x, y): + return x == y + + assert ( + nx.graph_edit_distance(G2, G3, node_match=user_match, edge_match=user_match) + == 1 + ) + assert ( + nx.graph_edit_distance(G3, G2, node_match=user_match, edge_match=user_match) + == 1 + ) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_simple_paths.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_simple_paths.py new file mode 100644 index 0000000..7855bba --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_simple_paths.py @@ -0,0 +1,803 @@ +import random + +import pytest + +import networkx as nx +from networkx import convert_node_labels_to_integers as cnlti +from networkx.algorithms.simple_paths import ( + _bidirectional_dijkstra, + _bidirectional_shortest_path, +) +from networkx.utils import arbitrary_element, pairwise + + +class TestIsSimplePath: + """Unit tests for the + :func:`networkx.algorithms.simple_paths.is_simple_path` function. + + """ + + def test_empty_list(self): + """Tests that the empty list is not a valid path, since there + should be a one-to-one correspondence between paths as lists of + nodes and paths as lists of edges. + + """ + G = nx.trivial_graph() + assert not nx.is_simple_path(G, []) + + def test_trivial_path(self): + """Tests that the trivial path, a path of length one, is + considered a simple path in a graph. + + """ + G = nx.trivial_graph() + assert nx.is_simple_path(G, [0]) + + def test_trivial_nonpath(self): + """Tests that a list whose sole element is an object not in the + graph is not considered a simple path. + + """ + G = nx.trivial_graph() + assert not nx.is_simple_path(G, ["not a node"]) + + def test_simple_path(self): + G = nx.path_graph(2) + assert nx.is_simple_path(G, [0, 1]) + + def test_non_simple_path(self): + G = nx.path_graph(2) + assert not nx.is_simple_path(G, [0, 1, 0]) + + def test_cycle(self): + G = nx.cycle_graph(3) + assert not nx.is_simple_path(G, [0, 1, 2, 0]) + + def test_missing_node(self): + G = nx.path_graph(2) + assert not nx.is_simple_path(G, [0, 2]) + + def test_missing_starting_node(self): + G = nx.path_graph(2) + assert not nx.is_simple_path(G, [2, 0]) + + def test_directed_path(self): + G = nx.DiGraph([(0, 1), (1, 2)]) + assert nx.is_simple_path(G, [0, 1, 2]) + + def test_directed_non_path(self): + G = nx.DiGraph([(0, 1), (1, 2)]) + assert not nx.is_simple_path(G, [2, 1, 0]) + + def test_directed_cycle(self): + G = nx.DiGraph([(0, 1), (1, 2), (2, 0)]) + assert not nx.is_simple_path(G, [0, 1, 2, 0]) + + def test_multigraph(self): + G = nx.MultiGraph([(0, 1), (0, 1)]) + assert nx.is_simple_path(G, [0, 1]) + + def test_multidigraph(self): + G = nx.MultiDiGraph([(0, 1), (0, 1), (1, 0), (1, 0)]) + assert nx.is_simple_path(G, [0, 1]) + + +# Tests for all_simple_paths +def test_all_simple_paths(): + G = nx.path_graph(4) + paths = nx.all_simple_paths(G, 0, 3) + assert {tuple(p) for p in paths} == {(0, 1, 2, 3)} + + +def test_all_simple_paths_with_two_targets_emits_two_paths(): + G = nx.path_graph(4) + G.add_edge(2, 4) + paths = nx.all_simple_paths(G, 0, [3, 4]) + assert {tuple(p) for p in paths} == {(0, 1, 2, 3), (0, 1, 2, 4)} + + +def test_digraph_all_simple_paths_with_two_targets_emits_two_paths(): + G = nx.path_graph(4, create_using=nx.DiGraph()) + G.add_edge(2, 4) + paths = nx.all_simple_paths(G, 0, [3, 4]) + assert {tuple(p) for p in paths} == {(0, 1, 2, 3), (0, 1, 2, 4)} + + +def test_all_simple_paths_with_two_targets_cutoff(): + G = nx.path_graph(4) + G.add_edge(2, 4) + paths = nx.all_simple_paths(G, 0, [3, 4], cutoff=3) + assert {tuple(p) for p in paths} == {(0, 1, 2, 3), (0, 1, 2, 4)} + + +def test_digraph_all_simple_paths_with_two_targets_cutoff(): + G = nx.path_graph(4, create_using=nx.DiGraph()) + G.add_edge(2, 4) + paths = nx.all_simple_paths(G, 0, [3, 4], cutoff=3) + assert {tuple(p) for p in paths} == {(0, 1, 2, 3), (0, 1, 2, 4)} + + +def test_all_simple_paths_with_two_targets_in_line_emits_two_paths(): + G = nx.path_graph(4) + paths = nx.all_simple_paths(G, 0, [2, 3]) + assert {tuple(p) for p in paths} == {(0, 1, 2), (0, 1, 2, 3)} + + +def test_all_simple_paths_ignores_cycle(): + G = nx.cycle_graph(3, create_using=nx.DiGraph()) + G.add_edge(1, 3) + paths = nx.all_simple_paths(G, 0, 3) + assert {tuple(p) for p in paths} == {(0, 1, 3)} + + +def test_all_simple_paths_with_two_targets_inside_cycle_emits_two_paths(): + G = nx.cycle_graph(3, create_using=nx.DiGraph()) + G.add_edge(1, 3) + paths = nx.all_simple_paths(G, 0, [2, 3]) + assert {tuple(p) for p in paths} == {(0, 1, 2), (0, 1, 3)} + + +def test_all_simple_paths_source_target(): + G = nx.path_graph(4) + assert list(nx.all_simple_paths(G, 1, 1)) == [[1]] + + +def test_all_simple_paths_cutoff(): + G = nx.complete_graph(4) + paths = nx.all_simple_paths(G, 0, 1, cutoff=1) + assert {tuple(p) for p in paths} == {(0, 1)} + paths = nx.all_simple_paths(G, 0, 1, cutoff=2) + assert {tuple(p) for p in paths} == {(0, 1), (0, 2, 1), (0, 3, 1)} + + +def test_all_simple_paths_on_non_trivial_graph(): + """you may need to draw this graph to make sure it is reasonable""" + G = nx.path_graph(5, create_using=nx.DiGraph()) + G.add_edges_from([(0, 5), (1, 5), (1, 3), (5, 4), (4, 2), (4, 3)]) + paths = nx.all_simple_paths(G, 1, [2, 3]) + assert {tuple(p) for p in paths} == { + (1, 2), + (1, 3, 4, 2), + (1, 5, 4, 2), + (1, 3), + (1, 2, 3), + (1, 5, 4, 3), + (1, 5, 4, 2, 3), + } + paths = nx.all_simple_paths(G, 1, [2, 3], cutoff=3) + assert {tuple(p) for p in paths} == { + (1, 2), + (1, 3, 4, 2), + (1, 5, 4, 2), + (1, 3), + (1, 2, 3), + (1, 5, 4, 3), + } + paths = nx.all_simple_paths(G, 1, [2, 3], cutoff=2) + assert {tuple(p) for p in paths} == {(1, 2), (1, 3), (1, 2, 3)} + + +def test_all_simple_paths_multigraph(): + G = nx.MultiGraph([(1, 2), (1, 2)]) + assert list(nx.all_simple_paths(G, 1, 1)) == [[1]] + nx.add_path(G, [3, 1, 10, 2]) + paths = list(nx.all_simple_paths(G, 1, 2)) + assert len(paths) == 3 + assert {tuple(p) for p in paths} == {(1, 2), (1, 2), (1, 10, 2)} + + +def test_all_simple_paths_multigraph_with_cutoff(): + G = nx.MultiGraph([(1, 2), (1, 2), (1, 10), (10, 2)]) + paths = list(nx.all_simple_paths(G, 1, 2, cutoff=1)) + assert len(paths) == 2 + assert {tuple(p) for p in paths} == {(1, 2), (1, 2)} + + # See GitHub issue #6732. + G = nx.MultiGraph([(0, 1), (0, 2)]) + assert list(nx.all_simple_paths(G, 0, {1, 2}, cutoff=1)) == [[0, 1], [0, 2]] + + +def test_all_simple_paths_directed(): + G = nx.DiGraph() + nx.add_path(G, [1, 2, 3]) + nx.add_path(G, [3, 2, 1]) + paths = nx.all_simple_paths(G, 1, 3) + assert {tuple(p) for p in paths} == {(1, 2, 3)} + + +def test_all_simple_paths_empty(): + G = nx.path_graph(4) + paths = nx.all_simple_paths(G, 0, 3, cutoff=2) + assert list(paths) == [] + + +def test_all_simple_paths_corner_cases(): + assert list(nx.all_simple_paths(nx.empty_graph(2), 0, 0)) == [[0]] + assert list(nx.all_simple_paths(nx.empty_graph(2), 0, 1)) == [] + assert list(nx.all_simple_paths(nx.path_graph(9), 0, 8, 0)) == [] + + +def test_all_simple_paths_source_in_targets(): + # See GitHub issue #6690. + G = nx.path_graph(3) + assert list(nx.all_simple_paths(G, 0, {0, 1, 2})) == [[0], [0, 1], [0, 1, 2]] + + +def hamiltonian_path(G, source): + source = arbitrary_element(G) + neighbors = set(G[source]) - {source} + n = len(G) + for target in neighbors: + for path in nx.all_simple_paths(G, source, target): + if len(path) == n: + yield path + + +def test_hamiltonian_path(): + from itertools import permutations + + G = nx.complete_graph(4) + paths = [list(p) for p in hamiltonian_path(G, 0)] + exact = [[0] + list(p) for p in permutations([1, 2, 3], 3)] + assert sorted(paths) == sorted(exact) + + +def test_cutoff_zero(): + G = nx.complete_graph(4) + paths = nx.all_simple_paths(G, 0, 3, cutoff=0) + assert [list(p) for p in paths] == [] + paths = nx.all_simple_paths(nx.MultiGraph(G), 0, 3, cutoff=0) + assert [list(p) for p in paths] == [] + + +def test_source_missing(): + with pytest.raises(nx.NodeNotFound): + G = nx.Graph() + nx.add_path(G, [1, 2, 3]) + list(nx.all_simple_paths(nx.MultiGraph(G), 0, 3)) + + +def test_target_missing(): + with pytest.raises(nx.NodeNotFound): + G = nx.Graph() + nx.add_path(G, [1, 2, 3]) + list(nx.all_simple_paths(nx.MultiGraph(G), 1, 4)) + + +# Tests for all_simple_edge_paths +def test_all_simple_edge_paths(): + G = nx.path_graph(4) + paths = nx.all_simple_edge_paths(G, 0, 3) + assert {tuple(p) for p in paths} == {((0, 1), (1, 2), (2, 3))} + + +def test_all_simple_edge_paths_empty_path(): + G = nx.empty_graph(1) + assert list(nx.all_simple_edge_paths(G, 0, 0)) == [[]] + + +def test_all_simple_edge_paths_with_two_targets_emits_two_paths(): + G = nx.path_graph(4) + G.add_edge(2, 4) + paths = nx.all_simple_edge_paths(G, 0, [3, 4]) + assert {tuple(p) for p in paths} == { + ((0, 1), (1, 2), (2, 3)), + ((0, 1), (1, 2), (2, 4)), + } + + +def test_digraph_all_simple_edge_paths_with_two_targets_emits_two_paths(): + G = nx.path_graph(4, create_using=nx.DiGraph()) + G.add_edge(2, 4) + paths = nx.all_simple_edge_paths(G, 0, [3, 4]) + assert {tuple(p) for p in paths} == { + ((0, 1), (1, 2), (2, 3)), + ((0, 1), (1, 2), (2, 4)), + } + + +def test_all_simple_edge_paths_with_two_targets_cutoff(): + G = nx.path_graph(4) + G.add_edge(2, 4) + paths = nx.all_simple_edge_paths(G, 0, [3, 4], cutoff=3) + assert {tuple(p) for p in paths} == { + ((0, 1), (1, 2), (2, 3)), + ((0, 1), (1, 2), (2, 4)), + } + + +def test_digraph_all_simple_edge_paths_with_two_targets_cutoff(): + G = nx.path_graph(4, create_using=nx.DiGraph()) + G.add_edge(2, 4) + paths = nx.all_simple_edge_paths(G, 0, [3, 4], cutoff=3) + assert {tuple(p) for p in paths} == { + ((0, 1), (1, 2), (2, 3)), + ((0, 1), (1, 2), (2, 4)), + } + + +def test_all_simple_edge_paths_with_two_targets_in_line_emits_two_paths(): + G = nx.path_graph(4) + paths = nx.all_simple_edge_paths(G, 0, [2, 3]) + assert {tuple(p) for p in paths} == {((0, 1), (1, 2)), ((0, 1), (1, 2), (2, 3))} + + +def test_all_simple_edge_paths_ignores_cycle(): + G = nx.cycle_graph(3, create_using=nx.DiGraph()) + G.add_edge(1, 3) + paths = nx.all_simple_edge_paths(G, 0, 3) + assert {tuple(p) for p in paths} == {((0, 1), (1, 3))} + + +def test_all_simple_edge_paths_with_two_targets_inside_cycle_emits_two_paths(): + G = nx.cycle_graph(3, create_using=nx.DiGraph()) + G.add_edge(1, 3) + paths = nx.all_simple_edge_paths(G, 0, [2, 3]) + assert {tuple(p) for p in paths} == {((0, 1), (1, 2)), ((0, 1), (1, 3))} + + +def test_all_simple_edge_paths_source_target(): + G = nx.path_graph(4) + paths = nx.all_simple_edge_paths(G, 1, 1) + assert list(paths) == [[]] + + +def test_all_simple_edge_paths_cutoff(): + G = nx.complete_graph(4) + paths = nx.all_simple_edge_paths(G, 0, 1, cutoff=1) + assert {tuple(p) for p in paths} == {((0, 1),)} + paths = nx.all_simple_edge_paths(G, 0, 1, cutoff=2) + assert {tuple(p) for p in paths} == {((0, 1),), ((0, 2), (2, 1)), ((0, 3), (3, 1))} + + +def test_all_simple_edge_paths_on_non_trivial_graph(): + """you may need to draw this graph to make sure it is reasonable""" + G = nx.path_graph(5, create_using=nx.DiGraph()) + G.add_edges_from([(0, 5), (1, 5), (1, 3), (5, 4), (4, 2), (4, 3)]) + paths = nx.all_simple_edge_paths(G, 1, [2, 3]) + assert {tuple(p) for p in paths} == { + ((1, 2),), + ((1, 3), (3, 4), (4, 2)), + ((1, 5), (5, 4), (4, 2)), + ((1, 3),), + ((1, 2), (2, 3)), + ((1, 5), (5, 4), (4, 3)), + ((1, 5), (5, 4), (4, 2), (2, 3)), + } + paths = nx.all_simple_edge_paths(G, 1, [2, 3], cutoff=3) + assert {tuple(p) for p in paths} == { + ((1, 2),), + ((1, 3), (3, 4), (4, 2)), + ((1, 5), (5, 4), (4, 2)), + ((1, 3),), + ((1, 2), (2, 3)), + ((1, 5), (5, 4), (4, 3)), + } + paths = nx.all_simple_edge_paths(G, 1, [2, 3], cutoff=2) + assert {tuple(p) for p in paths} == {((1, 2),), ((1, 3),), ((1, 2), (2, 3))} + + +def test_all_simple_edge_paths_multigraph(): + G = nx.MultiGraph([(1, 2), (1, 2)]) + paths = nx.all_simple_edge_paths(G, 1, 1) + assert list(paths) == [[]] + nx.add_path(G, [3, 1, 10, 2]) + paths = list(nx.all_simple_edge_paths(G, 1, 2)) + assert len(paths) == 3 + assert {tuple(p) for p in paths} == { + ((1, 2, 0),), + ((1, 2, 1),), + ((1, 10, 0), (10, 2, 0)), + } + + +def test_all_simple_edge_paths_multigraph_with_cutoff(): + G = nx.MultiGraph([(1, 2), (1, 2), (1, 10), (10, 2)]) + paths = list(nx.all_simple_edge_paths(G, 1, 2, cutoff=1)) + assert len(paths) == 2 + assert {tuple(p) for p in paths} == {((1, 2, 0),), ((1, 2, 1),)} + + +def test_all_simple_edge_paths_directed(): + G = nx.DiGraph() + nx.add_path(G, [1, 2, 3]) + nx.add_path(G, [3, 2, 1]) + paths = nx.all_simple_edge_paths(G, 1, 3) + assert {tuple(p) for p in paths} == {((1, 2), (2, 3))} + + +def test_all_simple_edge_paths_empty(): + G = nx.path_graph(4) + paths = nx.all_simple_edge_paths(G, 0, 3, cutoff=2) + assert list(paths) == [] + + +def test_all_simple_edge_paths_corner_cases(): + assert list(nx.all_simple_edge_paths(nx.empty_graph(2), 0, 0)) == [[]] + assert list(nx.all_simple_edge_paths(nx.empty_graph(2), 0, 1)) == [] + assert list(nx.all_simple_edge_paths(nx.path_graph(9), 0, 8, 0)) == [] + + +def test_all_simple_edge_paths_ignores_self_loop(): + G = nx.Graph([(0, 0), (0, 1), (1, 1), (1, 2)]) + assert list(nx.all_simple_edge_paths(G, 0, 2)) == [[(0, 1), (1, 2)]] + + +def hamiltonian_edge_path(G, source): + source = arbitrary_element(G) + neighbors = set(G[source]) - {source} + n = len(G) + for target in neighbors: + for path in nx.all_simple_edge_paths(G, source, target): + if len(path) == n - 1: + yield path + + +def test_hamiltonian__edge_path(): + from itertools import permutations + + G = nx.complete_graph(4) + paths = hamiltonian_edge_path(G, 0) + exact = [list(pairwise([0] + list(p))) for p in permutations([1, 2, 3], 3)] + assert sorted(exact) == sorted(paths) + + +def test_edge_cutoff_zero(): + G = nx.complete_graph(4) + paths = nx.all_simple_edge_paths(G, 0, 3, cutoff=0) + assert [list(p) for p in paths] == [] + paths = nx.all_simple_edge_paths(nx.MultiGraph(G), 0, 3, cutoff=0) + assert [list(p) for p in paths] == [] + + +def test_edge_source_missing(): + with pytest.raises(nx.NodeNotFound): + G = nx.Graph() + nx.add_path(G, [1, 2, 3]) + list(nx.all_simple_edge_paths(nx.MultiGraph(G), 0, 3)) + + +def test_edge_target_missing(): + with pytest.raises(nx.NodeNotFound): + G = nx.Graph() + nx.add_path(G, [1, 2, 3]) + list(nx.all_simple_edge_paths(nx.MultiGraph(G), 1, 4)) + + +# Tests for shortest_simple_paths +def test_shortest_simple_paths(): + G = cnlti(nx.grid_2d_graph(4, 4), first_label=1, ordering="sorted") + paths = nx.shortest_simple_paths(G, 1, 12) + assert next(paths) == [1, 2, 3, 4, 8, 12] + assert next(paths) == [1, 5, 6, 7, 8, 12] + assert [len(path) for path in nx.shortest_simple_paths(G, 1, 12)] == sorted( + len(path) for path in nx.all_simple_paths(G, 1, 12) + ) + + +def test_shortest_simple_paths_singleton_path(): + G = nx.empty_graph(3) + assert list(nx.shortest_simple_paths(G, 0, 0)) == [[0]] + + +def test_shortest_simple_paths_directed(): + G = nx.cycle_graph(7, create_using=nx.DiGraph()) + paths = nx.shortest_simple_paths(G, 0, 3) + assert list(paths) == [[0, 1, 2, 3]] + + +def test_shortest_simple_paths_directed_with_weight_function(): + def cost(u, v, x): + return 1 + + G = cnlti(nx.grid_2d_graph(4, 4), first_label=1, ordering="sorted") + paths = nx.shortest_simple_paths(G, 1, 12) + assert next(paths) == [1, 2, 3, 4, 8, 12] + assert next(paths) == [1, 5, 6, 7, 8, 12] + assert [ + len(path) for path in nx.shortest_simple_paths(G, 1, 12, weight=cost) + ] == sorted(len(path) for path in nx.all_simple_paths(G, 1, 12)) + + +def test_shortest_simple_paths_with_weight_function(): + def cost(u, v, x): + return 1 + + G = nx.cycle_graph(7, create_using=nx.DiGraph()) + paths = nx.shortest_simple_paths(G, 0, 3, weight=cost) + assert list(paths) == [[0, 1, 2, 3]] + + +def test_shortest_simple_paths_with_none_weight_function(): + def cost(u, v, x): + delta = abs(u - v) + # ignore interior edges + return 1 if (delta == 1 or delta == 4) else None + + G = nx.complete_graph(5) + paths = nx.shortest_simple_paths(G, 0, 2, weight=cost) + assert list(paths) == [[0, 1, 2], [0, 4, 3, 2]] + + +def test_Greg_Bernstein(): + g1 = nx.Graph() + g1.add_nodes_from(["N0", "N1", "N2", "N3", "N4"]) + g1.add_edge("N4", "N1", weight=10.0, capacity=50, name="L5") + g1.add_edge("N4", "N0", weight=7.0, capacity=40, name="L4") + g1.add_edge("N0", "N1", weight=10.0, capacity=45, name="L1") + g1.add_edge("N3", "N0", weight=10.0, capacity=50, name="L0") + g1.add_edge("N2", "N3", weight=12.0, capacity=30, name="L2") + g1.add_edge("N1", "N2", weight=15.0, capacity=42, name="L3") + solution = [["N1", "N0", "N3"], ["N1", "N2", "N3"], ["N1", "N4", "N0", "N3"]] + result = list(nx.shortest_simple_paths(g1, "N1", "N3", weight="weight")) + assert result == solution + + +def test_weighted_shortest_simple_path(): + def cost_func(path): + return sum(G.adj[u][v]["weight"] for (u, v) in zip(path, path[1:])) + + G = nx.complete_graph(5) + weight = {(u, v): random.randint(1, 100) for (u, v) in G.edges()} + nx.set_edge_attributes(G, weight, "weight") + cost = 0 + for path in nx.shortest_simple_paths(G, 0, 3, weight="weight"): + this_cost = cost_func(path) + assert cost <= this_cost + cost = this_cost + + +def test_directed_weighted_shortest_simple_path(): + def cost_func(path): + return sum(G.adj[u][v]["weight"] for (u, v) in zip(path, path[1:])) + + G = nx.complete_graph(5) + G = G.to_directed() + weight = {(u, v): random.randint(1, 100) for (u, v) in G.edges()} + nx.set_edge_attributes(G, weight, "weight") + cost = 0 + for path in nx.shortest_simple_paths(G, 0, 3, weight="weight"): + this_cost = cost_func(path) + assert cost <= this_cost + cost = this_cost + + +def test_weighted_shortest_simple_path_issue2427(): + G = nx.Graph() + G.add_edge("IN", "OUT", weight=2) + G.add_edge("IN", "A", weight=1) + G.add_edge("IN", "B", weight=2) + G.add_edge("B", "OUT", weight=2) + assert list(nx.shortest_simple_paths(G, "IN", "OUT", weight="weight")) == [ + ["IN", "OUT"], + ["IN", "B", "OUT"], + ] + G = nx.Graph() + G.add_edge("IN", "OUT", weight=10) + G.add_edge("IN", "A", weight=1) + G.add_edge("IN", "B", weight=1) + G.add_edge("B", "OUT", weight=1) + assert list(nx.shortest_simple_paths(G, "IN", "OUT", weight="weight")) == [ + ["IN", "B", "OUT"], + ["IN", "OUT"], + ] + + +def test_directed_weighted_shortest_simple_path_issue2427(): + G = nx.DiGraph() + G.add_edge("IN", "OUT", weight=2) + G.add_edge("IN", "A", weight=1) + G.add_edge("IN", "B", weight=2) + G.add_edge("B", "OUT", weight=2) + assert list(nx.shortest_simple_paths(G, "IN", "OUT", weight="weight")) == [ + ["IN", "OUT"], + ["IN", "B", "OUT"], + ] + G = nx.DiGraph() + G.add_edge("IN", "OUT", weight=10) + G.add_edge("IN", "A", weight=1) + G.add_edge("IN", "B", weight=1) + G.add_edge("B", "OUT", weight=1) + assert list(nx.shortest_simple_paths(G, "IN", "OUT", weight="weight")) == [ + ["IN", "B", "OUT"], + ["IN", "OUT"], + ] + + +def test_weight_name(): + G = nx.cycle_graph(7) + nx.set_edge_attributes(G, 1, "weight") + nx.set_edge_attributes(G, 1, "foo") + G.adj[1][2]["foo"] = 7 + paths = list(nx.shortest_simple_paths(G, 0, 3, weight="foo")) + solution = [[0, 6, 5, 4, 3], [0, 1, 2, 3]] + assert paths == solution + + +def test_ssp_source_missing(): + with pytest.raises(nx.NodeNotFound): + G = nx.Graph() + nx.add_path(G, [1, 2, 3]) + list(nx.shortest_simple_paths(G, 0, 3)) + + +def test_ssp_target_missing(): + with pytest.raises(nx.NodeNotFound): + G = nx.Graph() + nx.add_path(G, [1, 2, 3]) + list(nx.shortest_simple_paths(G, 1, 4)) + + +def test_ssp_multigraph(): + with pytest.raises(nx.NetworkXNotImplemented): + G = nx.MultiGraph() + nx.add_path(G, [1, 2, 3]) + list(nx.shortest_simple_paths(G, 1, 4)) + + +def test_ssp_source_missing2(): + with pytest.raises(nx.NetworkXNoPath): + G = nx.Graph() + nx.add_path(G, [0, 1, 2]) + nx.add_path(G, [3, 4, 5]) + list(nx.shortest_simple_paths(G, 0, 3)) + + +def test_bidirectional_shortest_path_restricted_cycle(): + cycle = nx.cycle_graph(7) + length, path = _bidirectional_shortest_path(cycle, 0, 3) + assert path == [0, 1, 2, 3] + length, path = _bidirectional_shortest_path(cycle, 0, 3, ignore_nodes=[1]) + assert path == [0, 6, 5, 4, 3] + + +def test_bidirectional_shortest_path_restricted_wheel(): + wheel = nx.wheel_graph(6) + length, path = _bidirectional_shortest_path(wheel, 1, 3) + assert path in [[1, 0, 3], [1, 2, 3]] + length, path = _bidirectional_shortest_path(wheel, 1, 3, ignore_nodes=[0]) + assert path == [1, 2, 3] + length, path = _bidirectional_shortest_path(wheel, 1, 3, ignore_nodes=[0, 2]) + assert path == [1, 5, 4, 3] + length, path = _bidirectional_shortest_path( + wheel, 1, 3, ignore_edges=[(1, 0), (5, 0), (2, 3)] + ) + assert path in [[1, 2, 0, 3], [1, 5, 4, 3]] + + +def test_bidirectional_shortest_path_restricted_directed_cycle(): + directed_cycle = nx.cycle_graph(7, create_using=nx.DiGraph()) + length, path = _bidirectional_shortest_path(directed_cycle, 0, 3) + assert path == [0, 1, 2, 3] + pytest.raises( + nx.NetworkXNoPath, + _bidirectional_shortest_path, + directed_cycle, + 0, + 3, + ignore_nodes=[1], + ) + length, path = _bidirectional_shortest_path( + directed_cycle, 0, 3, ignore_edges=[(2, 1)] + ) + assert path == [0, 1, 2, 3] + pytest.raises( + nx.NetworkXNoPath, + _bidirectional_shortest_path, + directed_cycle, + 0, + 3, + ignore_edges=[(1, 2)], + ) + + +def test_bidirectional_shortest_path_ignore(): + G = nx.Graph() + nx.add_path(G, [1, 2]) + nx.add_path(G, [1, 3]) + nx.add_path(G, [1, 4]) + pytest.raises( + nx.NetworkXNoPath, _bidirectional_shortest_path, G, 1, 2, ignore_nodes=[1] + ) + pytest.raises( + nx.NetworkXNoPath, _bidirectional_shortest_path, G, 1, 2, ignore_nodes=[2] + ) + G = nx.Graph() + nx.add_path(G, [1, 3]) + nx.add_path(G, [1, 4]) + nx.add_path(G, [3, 2]) + pytest.raises( + nx.NetworkXNoPath, _bidirectional_shortest_path, G, 1, 2, ignore_nodes=[1, 2] + ) + + +def validate_path(G, s, t, soln_len, path): + assert path[0] == s + assert path[-1] == t + assert soln_len == sum( + G[u][v].get("weight", 1) for u, v in zip(path[:-1], path[1:]) + ) + + +def validate_length_path(G, s, t, soln_len, length, path): + assert soln_len == length + validate_path(G, s, t, length, path) + + +def test_bidirectional_dijkstra_restricted(): + XG = nx.DiGraph() + XG.add_weighted_edges_from( + [ + ("s", "u", 10), + ("s", "x", 5), + ("u", "v", 1), + ("u", "x", 2), + ("v", "y", 1), + ("x", "u", 3), + ("x", "v", 5), + ("x", "y", 2), + ("y", "s", 7), + ("y", "v", 6), + ] + ) + + XG3 = nx.Graph() + XG3.add_weighted_edges_from( + [[0, 1, 2], [1, 2, 12], [2, 3, 1], [3, 4, 5], [4, 5, 1], [5, 0, 10]] + ) + validate_length_path(XG, "s", "v", 9, *_bidirectional_dijkstra(XG, "s", "v")) + validate_length_path( + XG, "s", "v", 10, *_bidirectional_dijkstra(XG, "s", "v", ignore_nodes=["u"]) + ) + validate_length_path( + XG, + "s", + "v", + 11, + *_bidirectional_dijkstra(XG, "s", "v", ignore_edges=[("s", "x")]), + ) + pytest.raises( + nx.NetworkXNoPath, + _bidirectional_dijkstra, + XG, + "s", + "v", + ignore_nodes=["u"], + ignore_edges=[("s", "x")], + ) + validate_length_path(XG3, 0, 3, 15, *_bidirectional_dijkstra(XG3, 0, 3)) + validate_length_path( + XG3, 0, 3, 16, *_bidirectional_dijkstra(XG3, 0, 3, ignore_nodes=[1]) + ) + validate_length_path( + XG3, 0, 3, 16, *_bidirectional_dijkstra(XG3, 0, 3, ignore_edges=[(2, 3)]) + ) + pytest.raises( + nx.NetworkXNoPath, + _bidirectional_dijkstra, + XG3, + 0, + 3, + ignore_nodes=[1], + ignore_edges=[(5, 4)], + ) + + +def test_bidirectional_dijkstra_no_path(): + with pytest.raises(nx.NetworkXNoPath): + G = nx.Graph() + nx.add_path(G, [1, 2, 3]) + nx.add_path(G, [4, 5, 6]) + _bidirectional_dijkstra(G, 1, 6) + + +def test_bidirectional_dijkstra_ignore(): + G = nx.Graph() + nx.add_path(G, [1, 2, 10]) + nx.add_path(G, [1, 3, 10]) + pytest.raises(nx.NetworkXNoPath, _bidirectional_dijkstra, G, 1, 2, ignore_nodes=[1]) + pytest.raises(nx.NetworkXNoPath, _bidirectional_dijkstra, G, 1, 2, ignore_nodes=[2]) + pytest.raises( + nx.NetworkXNoPath, _bidirectional_dijkstra, G, 1, 2, ignore_nodes=[1, 2] + ) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_smallworld.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_smallworld.py new file mode 100644 index 0000000..c8e4542 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_smallworld.py @@ -0,0 +1,76 @@ +import pytest + +pytest.importorskip("numpy") + +import networkx as nx +from networkx import lattice_reference, omega, random_reference, sigma + +rng = 42 + + +def test_random_reference(): + G = nx.connected_watts_strogatz_graph(50, 6, 0.1, seed=rng) + Gr = random_reference(G, niter=1, seed=rng) + C = nx.average_clustering(G) + Cr = nx.average_clustering(Gr) + assert C > Cr + + with pytest.raises(nx.NetworkXError): + next(random_reference(nx.Graph())) + with pytest.raises(nx.NetworkXNotImplemented): + next(random_reference(nx.DiGraph())) + + H = nx.Graph(((0, 1), (2, 3))) + Hl = random_reference(H, niter=1, seed=rng) + + +def test_lattice_reference(): + G = nx.connected_watts_strogatz_graph(50, 6, 1, seed=rng) + Gl = lattice_reference(G, niter=1, seed=rng) + L = nx.average_shortest_path_length(G) + Ll = nx.average_shortest_path_length(Gl) + assert Ll > L + + pytest.raises(nx.NetworkXError, lattice_reference, nx.Graph()) + pytest.raises(nx.NetworkXNotImplemented, lattice_reference, nx.DiGraph()) + + H = nx.Graph(((0, 1), (2, 3))) + Hl = lattice_reference(H, niter=1) + + +def test_sigma(): + Gs = nx.connected_watts_strogatz_graph(50, 6, 0.1, seed=rng) + Gr = nx.connected_watts_strogatz_graph(50, 6, 1, seed=rng) + sigmas = sigma(Gs, niter=1, nrand=2, seed=rng) + sigmar = sigma(Gr, niter=1, nrand=2, seed=rng) + assert sigmar < sigmas + + +def test_omega(): + Gl = nx.connected_watts_strogatz_graph(50, 6, 0, seed=rng) + Gr = nx.connected_watts_strogatz_graph(50, 6, 1, seed=rng) + Gs = nx.connected_watts_strogatz_graph(50, 6, 0.1, seed=rng) + omegal = omega(Gl, niter=1, nrand=1, seed=rng) + omegar = omega(Gr, niter=1, nrand=1, seed=rng) + omegas = omega(Gs, niter=1, nrand=1, seed=rng) + assert omegal < omegas and omegas < omegar + + # Test that omega lies within the [-1, 1] bounds + G_barbell = nx.barbell_graph(5, 1) + G_karate = nx.karate_club_graph() + + omega_barbell = nx.omega(G_barbell) + omega_karate = nx.omega(G_karate, nrand=2) + + omegas = (omegal, omegar, omegas, omega_barbell, omega_karate) + + for o in omegas: + assert -1 <= o <= 1 + + +@pytest.mark.parametrize("f", (nx.random_reference, nx.lattice_reference)) +def test_graph_no_edges(f): + G = nx.Graph() + G.add_nodes_from([0, 1, 2, 3]) + with pytest.raises(nx.NetworkXError, match="Graph has fewer that 2 edges"): + f(G) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_smetric.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_smetric.py new file mode 100644 index 0000000..528dbc8 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_smetric.py @@ -0,0 +1,8 @@ +import pytest + +import networkx as nx + + +def test_smetric(): + G = nx.Graph([(1, 2), (2, 3), (2, 4), (1, 4)]) + assert nx.s_metric(G) == 19.0 diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_sparsifiers.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_sparsifiers.py new file mode 100644 index 0000000..e8604e6 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_sparsifiers.py @@ -0,0 +1,138 @@ +"""Unit tests for the sparsifier computation functions.""" + +import pytest + +import networkx as nx +from networkx.utils import py_random_state + +_seed = 2 + + +def _test_spanner(G, spanner, stretch, weight=None): + """Test whether a spanner is valid. + + This function tests whether the given spanner is a subgraph of the + given graph G with the same node set. It also tests for all shortest + paths whether they adhere to the given stretch. + + Parameters + ---------- + G : NetworkX graph + The original graph for which the spanner was constructed. + + spanner : NetworkX graph + The spanner to be tested. + + stretch : float + The proclaimed stretch of the spanner. + + weight : object + The edge attribute to use as distance. + """ + # check node set + assert set(G.nodes()) == set(spanner.nodes()) + + # check edge set and weights + for u, v in spanner.edges(): + assert G.has_edge(u, v) + if weight: + assert spanner[u][v][weight] == G[u][v][weight] + + # check connectivity and stretch + original_length = dict(nx.shortest_path_length(G, weight=weight)) + spanner_length = dict(nx.shortest_path_length(spanner, weight=weight)) + for u in G.nodes(): + for v in G.nodes(): + if u in original_length and v in original_length[u]: + assert spanner_length[u][v] <= stretch * original_length[u][v] + + +@py_random_state(1) +def _assign_random_weights(G, seed=None): + """Assigns random weights to the edges of a graph. + + Parameters + ---------- + + G : NetworkX graph + The original graph for which the spanner was constructed. + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + """ + for u, v in G.edges(): + G[u][v]["weight"] = seed.random() + + +def test_spanner_trivial(): + """Test a trivial spanner with stretch 1.""" + G = nx.complete_graph(20) + spanner = nx.spanner(G, 1, seed=_seed) + + for u, v in G.edges: + assert spanner.has_edge(u, v) + + +def test_spanner_unweighted_complete_graph(): + """Test spanner construction on a complete unweighted graph.""" + G = nx.complete_graph(20) + + spanner = nx.spanner(G, 4, seed=_seed) + _test_spanner(G, spanner, 4) + + spanner = nx.spanner(G, 10, seed=_seed) + _test_spanner(G, spanner, 10) + + +def test_spanner_weighted_complete_graph(): + """Test spanner construction on a complete weighted graph.""" + G = nx.complete_graph(20) + _assign_random_weights(G, seed=_seed) + + spanner = nx.spanner(G, 4, weight="weight", seed=_seed) + _test_spanner(G, spanner, 4, weight="weight") + + spanner = nx.spanner(G, 10, weight="weight", seed=_seed) + _test_spanner(G, spanner, 10, weight="weight") + + +def test_spanner_unweighted_gnp_graph(): + """Test spanner construction on an unweighted gnp graph.""" + G = nx.gnp_random_graph(20, 0.4, seed=_seed) + + spanner = nx.spanner(G, 4, seed=_seed) + _test_spanner(G, spanner, 4) + + spanner = nx.spanner(G, 10, seed=_seed) + _test_spanner(G, spanner, 10) + + +def test_spanner_weighted_gnp_graph(): + """Test spanner construction on an weighted gnp graph.""" + G = nx.gnp_random_graph(20, 0.4, seed=_seed) + _assign_random_weights(G, seed=_seed) + + spanner = nx.spanner(G, 4, weight="weight", seed=_seed) + _test_spanner(G, spanner, 4, weight="weight") + + spanner = nx.spanner(G, 10, weight="weight", seed=_seed) + _test_spanner(G, spanner, 10, weight="weight") + + +def test_spanner_unweighted_disconnected_graph(): + """Test spanner construction on a disconnected graph.""" + G = nx.disjoint_union(nx.complete_graph(10), nx.complete_graph(10)) + + spanner = nx.spanner(G, 4, seed=_seed) + _test_spanner(G, spanner, 4) + + spanner = nx.spanner(G, 10, seed=_seed) + _test_spanner(G, spanner, 10) + + +def test_spanner_invalid_stretch(): + """Check whether an invalid stretch is caught.""" + with pytest.raises(ValueError): + G = nx.empty_graph() + nx.spanner(G, 0) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_structuralholes.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_structuralholes.py new file mode 100644 index 0000000..d0f53cd --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_structuralholes.py @@ -0,0 +1,191 @@ +"""Unit tests for the :mod:`networkx.algorithms.structuralholes` module.""" + +import math + +import pytest + +import networkx as nx +from networkx.classes.tests import dispatch_interface + + +class TestStructuralHolesNoScipy: + """Unit tests for computing measures of structural holes. + + The expected values for these functions were originally computed using the + proprietary software `UCINET`_ and the free software `IGraph`_ , and then + computed by hand to make sure that the results are correct. + + .. _UCINET: https://sites.google.com/site/ucinetsoftware/home + .. _IGraph: http://igraph.org/ + + """ + + def setup_method(self): + self.D = nx.DiGraph() + self.D.add_edges_from([(0, 1), (0, 2), (1, 0), (2, 1)]) + self.D_weights = {(0, 1): 2, (0, 2): 2, (1, 0): 1, (2, 1): 1} + # Example from http://www.analytictech.com/connections/v20(1)/holes.htm + self.G = nx.Graph() + self.G.add_edges_from( + [ + ("A", "B"), + ("A", "F"), + ("A", "G"), + ("A", "E"), + ("E", "G"), + ("F", "G"), + ("B", "G"), + ("B", "D"), + ("D", "G"), + ("G", "C"), + ] + ) + self.G_weights = { + ("A", "B"): 2, + ("A", "F"): 3, + ("A", "G"): 5, + ("A", "E"): 2, + ("E", "G"): 8, + ("F", "G"): 3, + ("B", "G"): 4, + ("B", "D"): 1, + ("D", "G"): 3, + ("G", "C"): 10, + } + self.Dnodes = list(self.D) + self.Gnodes = list(self.G) + + def test_constraint_directed(self): + constraint = nx.constraint(self.D, nodes=self.Dnodes) + assert constraint[0] == pytest.approx(1.003, abs=1e-3) + assert constraint[1] == pytest.approx(1.003, abs=1e-3) + assert constraint[2] == pytest.approx(1.389, abs=1e-3) + + def test_effective_size_directed(self): + effective_size = nx.effective_size(self.D, nodes=self.Dnodes) + assert effective_size[0] == pytest.approx(1.167, abs=1e-3) + assert effective_size[1] == pytest.approx(1.167, abs=1e-3) + assert effective_size[2] == pytest.approx(1, abs=1e-3) + + def test_constraint_weighted_directed(self): + D = self.D.copy() + nx.set_edge_attributes(D, self.D_weights, "weight") + constraint = nx.constraint(D, weight="weight", nodes=self.Dnodes) + assert constraint[0] == pytest.approx(0.840, abs=1e-3) + assert constraint[1] == pytest.approx(1.143, abs=1e-3) + assert constraint[2] == pytest.approx(1.378, abs=1e-3) + + def test_effective_size_weighted_directed(self): + D = self.D.copy() + nx.set_edge_attributes(D, self.D_weights, "weight") + effective_size = nx.effective_size(D, weight="weight", nodes=self.Dnodes) + assert effective_size[0] == pytest.approx(1.567, abs=1e-3) + assert effective_size[1] == pytest.approx(1.083, abs=1e-3) + assert effective_size[2] == pytest.approx(1, abs=1e-3) + + def test_constraint_undirected(self): + constraint = nx.constraint(self.G, nodes=self.Gnodes) + assert constraint["G"] == pytest.approx(0.400, abs=1e-3) + assert constraint["A"] == pytest.approx(0.595, abs=1e-3) + assert constraint["C"] == pytest.approx(1, abs=1e-3) + + def test_effective_size_undirected_borgatti(self): + effective_size = nx.effective_size(self.G, nodes=self.Gnodes) + assert effective_size["G"] == pytest.approx(4.67, abs=1e-2) + assert effective_size["A"] == pytest.approx(2.50, abs=1e-2) + assert effective_size["C"] == pytest.approx(1, abs=1e-2) + + def test_effective_size_undirected(self): + G = self.G.copy() + nx.set_edge_attributes(G, 1, "weight") + effective_size = nx.effective_size(G, weight="weight", nodes=self.Gnodes) + assert effective_size["G"] == pytest.approx(4.67, abs=1e-2) + assert effective_size["A"] == pytest.approx(2.50, abs=1e-2) + assert effective_size["C"] == pytest.approx(1, abs=1e-2) + + def test_constraint_weighted_undirected(self): + G = self.G.copy() + nx.set_edge_attributes(G, self.G_weights, "weight") + constraint = nx.constraint(G, weight="weight", nodes=self.Gnodes) + assert constraint["G"] == pytest.approx(0.299, abs=1e-3) + assert constraint["A"] == pytest.approx(0.795, abs=1e-3) + assert constraint["C"] == pytest.approx(1, abs=1e-3) + + def test_effective_size_weighted_undirected(self): + G = self.G.copy() + nx.set_edge_attributes(G, self.G_weights, "weight") + effective_size = nx.effective_size(G, weight="weight", nodes=self.Gnodes) + assert effective_size["G"] == pytest.approx(5.47, abs=1e-2) + assert effective_size["A"] == pytest.approx(2.47, abs=1e-2) + assert effective_size["C"] == pytest.approx(1, abs=1e-2) + + def test_constraint_isolated(self): + G = self.G.copy() + G.add_node(1) + constraint = nx.constraint(G, nodes=self.Gnodes + [1]) + assert math.isnan(constraint[1]) + + def test_effective_size_isolated(self): + G = self.G.copy() + G.add_node(1) + nx.set_edge_attributes(G, self.G_weights, "weight") + effective_size = nx.effective_size(G, weight="weight", nodes=self.Gnodes + [1]) + assert math.isnan(effective_size[1]) + + def test_effective_size_borgatti_isolated(self): + G = self.G.copy() + G.add_node(1) + effective_size = nx.effective_size(G, nodes=self.Gnodes + [1]) + assert math.isnan(effective_size[1]) + + +class TestStructuralHoles(TestStructuralHolesNoScipy): + pytest.importorskip("scipy") + Dnodes = None + Gnodes = None + + +@pytest.mark.parametrize("graph", (nx.Graph, nx.DiGraph)) +@pytest.mark.parametrize("nodes", (None, [0])) +def test_effective_size_isolated_node_with_selfloop(graph, nodes): + """Behavior consistent with isolated node without self-loop. See gh-6916""" + G = graph([(0, 0)]) # Single node with one self-edge + assert math.isnan(nx.effective_size(G, nodes=nodes)[0]) + + +@pytest.mark.parametrize("graph", (nx.Graph, nx.DiGraph)) +@pytest.mark.parametrize("nodes", (None, [0])) +def test_effective_size_isolated_node_with_selfloop_weighted(graph, nodes): + """Weighted self-loop. See gh-6916""" + G = graph() + G.add_weighted_edges_from([(0, 0, 10)]) + assert math.isnan(nx.effective_size(G, nodes=nodes)[0]) + + +@pytest.mark.parametrize("graph", (nx.Graph, nx.DiGraph)) +def test_constraint_isolated_node_with_selfloop(graph): + """Behavior consistent with isolated node without self-loop. See gh-6916""" + G = graph([(0, 0)]) # Single node with one self-edge + assert math.isnan(nx.constraint(G)[0]) + + +@pytest.mark.parametrize("graph", (nx.Graph, nx.DiGraph)) +def test_constraint_isolated_node_with_selfloop_using_nodes_kwarg(graph): + """Behavior consistent with isolated node without self-loop. See gh-6916""" + G = graph([(0, 0)]) # Single node with one self-edge + assert nx.constraint(G, nodes=[0])[0] == 4 + + +@pytest.mark.parametrize("graph", (nx.Graph, nx.DiGraph)) +def test_constraint_isolated_node_with_selfloop_weighted(graph): + """Weighted self-loop. See gh-6916""" + G = graph() + G.add_weighted_edges_from([(0, 0, 10)]) + assert math.isnan(nx.constraint(G)[0]) + + +@pytest.mark.parametrize("graph", (nx.Graph, nx.DiGraph)) +def test_constraint_isolated_node_with_selfloop_weighted_using_nodes_kwarg(graph): + G = graph() + G.add_weighted_edges_from([(0, 0, 10)]) + assert nx.constraint(G, nodes=[0])[0] == 4 diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_summarization.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_summarization.py new file mode 100644 index 0000000..c3bf82f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_summarization.py @@ -0,0 +1,642 @@ +""" +Unit tests for dedensification and graph summarization +""" + +import pytest + +import networkx as nx + + +class TestDirectedDedensification: + def build_original_graph(self): + original_matrix = [ + ("1", "BC"), + ("2", "ABC"), + ("3", ["A", "B", "6"]), + ("4", "ABC"), + ("5", "AB"), + ("6", ["5"]), + ("A", ["6"]), + ] + graph = nx.DiGraph() + for source, targets in original_matrix: + for target in targets: + graph.add_edge(source, target) + return graph + + def build_compressed_graph(self): + compressed_matrix = [ + ("1", "BC"), + ("2", ["ABC"]), + ("3", ["A", "B", "6"]), + ("4", ["ABC"]), + ("5", "AB"), + ("6", ["5"]), + ("A", ["6"]), + ("ABC", "ABC"), + ] + compressed_graph = nx.DiGraph() + for source, targets in compressed_matrix: + for target in targets: + compressed_graph.add_edge(source, target) + return compressed_graph + + def test_empty(self): + """ + Verify that an empty directed graph results in no compressor nodes + """ + G = nx.DiGraph() + compressed_graph, c_nodes = nx.dedensify(G, threshold=2) + assert c_nodes == set() + + @staticmethod + def densify(G, compressor_nodes, copy=True): + """ + Reconstructs the original graph from a dedensified, directed graph + + Parameters + ---------- + G: dedensified graph + A networkx graph + compressor_nodes: iterable + Iterable of compressor nodes in the dedensified graph + inplace: bool, optional (default: False) + Indicates if densification should be done inplace + + Returns + ------- + G: graph + A densified networkx graph + """ + if copy: + G = G.copy() + for compressor_node in compressor_nodes: + all_neighbors = set(nx.all_neighbors(G, compressor_node)) + out_neighbors = set(G.neighbors(compressor_node)) + for out_neighbor in out_neighbors: + G.remove_edge(compressor_node, out_neighbor) + in_neighbors = all_neighbors - out_neighbors + for in_neighbor in in_neighbors: + G.remove_edge(in_neighbor, compressor_node) + for out_neighbor in out_neighbors: + G.add_edge(in_neighbor, out_neighbor) + G.remove_node(compressor_node) + return G + + def setup_method(self): + self.c_nodes = ("ABC",) + + def test_dedensify_edges(self): + """ + Verifies that dedensify produced the correct edges to/from compressor + nodes in a directed graph + """ + G = self.build_original_graph() + compressed_G = self.build_compressed_graph() + compressed_graph, c_nodes = nx.dedensify(G, threshold=2) + for s, t in compressed_graph.edges(): + o_s = "".join(sorted(s)) + o_t = "".join(sorted(t)) + compressed_graph_exists = compressed_graph.has_edge(s, t) + verified_compressed_exists = compressed_G.has_edge(o_s, o_t) + assert compressed_graph_exists == verified_compressed_exists + assert len(c_nodes) == len(self.c_nodes) + + def test_dedensify_edge_count(self): + """ + Verifies that dedensify produced the correct number of compressor nodes + in a directed graph + """ + G = self.build_original_graph() + original_edge_count = len(G.edges()) + c_G, c_nodes = nx.dedensify(G, threshold=2) + compressed_edge_count = len(c_G.edges()) + assert compressed_edge_count <= original_edge_count + compressed_G = self.build_compressed_graph() + assert compressed_edge_count == len(compressed_G.edges()) + + def test_densify_edges(self): + """ + Verifies that densification produces the correct edges from the + original directed graph + """ + compressed_G = self.build_compressed_graph() + original_graph = self.densify(compressed_G, self.c_nodes, copy=True) + G = self.build_original_graph() + for s, t in G.edges(): + assert G.has_edge(s, t) == original_graph.has_edge(s, t) + + def test_densify_edge_count(self): + """ + Verifies that densification produces the correct number of edges in the + original directed graph + """ + compressed_G = self.build_compressed_graph() + compressed_edge_count = len(compressed_G.edges()) + original_graph = self.densify(compressed_G, self.c_nodes) + original_edge_count = len(original_graph.edges()) + assert compressed_edge_count <= original_edge_count + G = self.build_original_graph() + assert original_edge_count == len(G.edges()) + + +class TestUnDirectedDedensification: + def build_original_graph(self): + """ + Builds graph shown in the original research paper + """ + original_matrix = [ + ("1", "CB"), + ("2", "ABC"), + ("3", ["A", "B", "6"]), + ("4", "ABC"), + ("5", "AB"), + ("6", ["5"]), + ("A", ["6"]), + ] + graph = nx.Graph() + for source, targets in original_matrix: + for target in targets: + graph.add_edge(source, target) + return graph + + def test_empty(self): + """ + Verify that an empty undirected graph results in no compressor nodes + """ + G = nx.Graph() + compressed_G, c_nodes = nx.dedensify(G, threshold=2) + assert c_nodes == set() + + def setup_method(self): + self.c_nodes = ("6AB", "ABC") + + def build_compressed_graph(self): + compressed_matrix = [ + ("1", ["B", "C"]), + ("2", ["ABC"]), + ("3", ["6AB"]), + ("4", ["ABC"]), + ("5", ["6AB"]), + ("6", ["6AB", "A"]), + ("A", ["6AB", "ABC"]), + ("B", ["ABC", "6AB"]), + ("C", ["ABC"]), + ] + compressed_graph = nx.Graph() + for source, targets in compressed_matrix: + for target in targets: + compressed_graph.add_edge(source, target) + return compressed_graph + + def test_dedensify_edges(self): + """ + Verifies that dedensify produced correct compressor nodes and the + correct edges to/from the compressor nodes in an undirected graph + """ + G = self.build_original_graph() + c_G, c_nodes = nx.dedensify(G, threshold=2) + v_compressed_G = self.build_compressed_graph() + for s, t in c_G.edges(): + o_s = "".join(sorted(s)) + o_t = "".join(sorted(t)) + has_compressed_edge = c_G.has_edge(s, t) + verified_has_compressed_edge = v_compressed_G.has_edge(o_s, o_t) + assert has_compressed_edge == verified_has_compressed_edge + assert len(c_nodes) == len(self.c_nodes) + + def test_dedensify_edge_count(self): + """ + Verifies that dedensify produced the correct number of edges in an + undirected graph + """ + G = self.build_original_graph() + c_G, c_nodes = nx.dedensify(G, threshold=2, copy=True) + compressed_edge_count = len(c_G.edges()) + verified_original_edge_count = len(G.edges()) + assert compressed_edge_count <= verified_original_edge_count + verified_compressed_G = self.build_compressed_graph() + verified_compressed_edge_count = len(verified_compressed_G.edges()) + assert compressed_edge_count == verified_compressed_edge_count + + +@pytest.mark.parametrize( + "graph_type", [nx.Graph, nx.DiGraph, nx.MultiGraph, nx.MultiDiGraph] +) +def test_summarization_empty(graph_type): + G = graph_type() + summary_graph = nx.snap_aggregation(G, node_attributes=("color",)) + assert nx.is_isomorphic(summary_graph, G) + + +class AbstractSNAP: + node_attributes = ("color",) + + def build_original_graph(self): + pass + + def build_summary_graph(self): + pass + + def test_summary_graph(self): + original_graph = self.build_original_graph() + summary_graph = self.build_summary_graph() + + relationship_attributes = ("type",) + generated_summary_graph = nx.snap_aggregation( + original_graph, self.node_attributes, relationship_attributes + ) + relabeled_summary_graph = self.deterministic_labels(generated_summary_graph) + assert nx.is_isomorphic(summary_graph, relabeled_summary_graph) + + def deterministic_labels(self, G): + node_labels = list(G.nodes) + node_labels = sorted(node_labels, key=lambda n: sorted(G.nodes[n]["group"])[0]) + node_labels.sort() + + label_mapping = {} + for index, node in enumerate(node_labels): + label = f"Supernode-{index}" + label_mapping[node] = label + + return nx.relabel_nodes(G, label_mapping) + + +class TestSNAPNoEdgeTypes(AbstractSNAP): + relationship_attributes = () + + def test_summary_graph(self): + original_graph = self.build_original_graph() + summary_graph = self.build_summary_graph() + + relationship_attributes = ("type",) + generated_summary_graph = nx.snap_aggregation( + original_graph, self.node_attributes + ) + relabeled_summary_graph = self.deterministic_labels(generated_summary_graph) + assert nx.is_isomorphic(summary_graph, relabeled_summary_graph) + + def build_original_graph(self): + nodes = { + "A": {"color": "Red"}, + "B": {"color": "Red"}, + "C": {"color": "Red"}, + "D": {"color": "Red"}, + "E": {"color": "Blue"}, + "F": {"color": "Blue"}, + "G": {"color": "Blue"}, + "H": {"color": "Blue"}, + "I": {"color": "Yellow"}, + "J": {"color": "Yellow"}, + "K": {"color": "Yellow"}, + "L": {"color": "Yellow"}, + } + edges = [ + ("A", "B"), + ("A", "C"), + ("A", "E"), + ("A", "I"), + ("B", "D"), + ("B", "J"), + ("B", "F"), + ("C", "G"), + ("D", "H"), + ("I", "J"), + ("J", "K"), + ("I", "L"), + ] + G = nx.Graph() + for node in nodes: + attributes = nodes[node] + G.add_node(node, **attributes) + + for source, target in edges: + G.add_edge(source, target) + + return G + + def build_summary_graph(self): + nodes = { + "Supernode-0": {"color": "Red"}, + "Supernode-1": {"color": "Red"}, + "Supernode-2": {"color": "Blue"}, + "Supernode-3": {"color": "Blue"}, + "Supernode-4": {"color": "Yellow"}, + "Supernode-5": {"color": "Yellow"}, + } + edges = [ + ("Supernode-0", "Supernode-0"), + ("Supernode-0", "Supernode-1"), + ("Supernode-0", "Supernode-2"), + ("Supernode-0", "Supernode-4"), + ("Supernode-1", "Supernode-3"), + ("Supernode-4", "Supernode-4"), + ("Supernode-4", "Supernode-5"), + ] + G = nx.Graph() + for node in nodes: + attributes = nodes[node] + G.add_node(node, **attributes) + + for source, target in edges: + G.add_edge(source, target) + + supernodes = { + "Supernode-0": {"A", "B"}, + "Supernode-1": {"C", "D"}, + "Supernode-2": {"E", "F"}, + "Supernode-3": {"G", "H"}, + "Supernode-4": {"I", "J"}, + "Supernode-5": {"K", "L"}, + } + nx.set_node_attributes(G, supernodes, "group") + return G + + +class TestSNAPUndirected(AbstractSNAP): + def build_original_graph(self): + nodes = { + "A": {"color": "Red"}, + "B": {"color": "Red"}, + "C": {"color": "Red"}, + "D": {"color": "Red"}, + "E": {"color": "Blue"}, + "F": {"color": "Blue"}, + "G": {"color": "Blue"}, + "H": {"color": "Blue"}, + "I": {"color": "Yellow"}, + "J": {"color": "Yellow"}, + "K": {"color": "Yellow"}, + "L": {"color": "Yellow"}, + } + edges = [ + ("A", "B", "Strong"), + ("A", "C", "Weak"), + ("A", "E", "Strong"), + ("A", "I", "Weak"), + ("B", "D", "Weak"), + ("B", "J", "Weak"), + ("B", "F", "Strong"), + ("C", "G", "Weak"), + ("D", "H", "Weak"), + ("I", "J", "Strong"), + ("J", "K", "Strong"), + ("I", "L", "Strong"), + ] + G = nx.Graph() + for node in nodes: + attributes = nodes[node] + G.add_node(node, **attributes) + + for source, target, type in edges: + G.add_edge(source, target, type=type) + + return G + + def build_summary_graph(self): + nodes = { + "Supernode-0": {"color": "Red"}, + "Supernode-1": {"color": "Red"}, + "Supernode-2": {"color": "Blue"}, + "Supernode-3": {"color": "Blue"}, + "Supernode-4": {"color": "Yellow"}, + "Supernode-5": {"color": "Yellow"}, + } + edges = [ + ("Supernode-0", "Supernode-0", "Strong"), + ("Supernode-0", "Supernode-1", "Weak"), + ("Supernode-0", "Supernode-2", "Strong"), + ("Supernode-0", "Supernode-4", "Weak"), + ("Supernode-1", "Supernode-3", "Weak"), + ("Supernode-4", "Supernode-4", "Strong"), + ("Supernode-4", "Supernode-5", "Strong"), + ] + G = nx.Graph() + for node in nodes: + attributes = nodes[node] + G.add_node(node, **attributes) + + for source, target, type in edges: + G.add_edge(source, target, types=[{"type": type}]) + + supernodes = { + "Supernode-0": {"A", "B"}, + "Supernode-1": {"C", "D"}, + "Supernode-2": {"E", "F"}, + "Supernode-3": {"G", "H"}, + "Supernode-4": {"I", "J"}, + "Supernode-5": {"K", "L"}, + } + nx.set_node_attributes(G, supernodes, "group") + return G + + +class TestSNAPDirected(AbstractSNAP): + def build_original_graph(self): + nodes = { + "A": {"color": "Red"}, + "B": {"color": "Red"}, + "C": {"color": "Green"}, + "D": {"color": "Green"}, + "E": {"color": "Blue"}, + "F": {"color": "Blue"}, + "G": {"color": "Yellow"}, + "H": {"color": "Yellow"}, + } + edges = [ + ("A", "C", "Strong"), + ("A", "E", "Strong"), + ("A", "F", "Weak"), + ("B", "D", "Strong"), + ("B", "E", "Weak"), + ("B", "F", "Strong"), + ("C", "G", "Strong"), + ("C", "F", "Strong"), + ("D", "E", "Strong"), + ("D", "H", "Strong"), + ("G", "E", "Strong"), + ("H", "F", "Strong"), + ] + G = nx.DiGraph() + for node in nodes: + attributes = nodes[node] + G.add_node(node, **attributes) + + for source, target, type in edges: + G.add_edge(source, target, type=type) + + return G + + def build_summary_graph(self): + nodes = { + "Supernode-0": {"color": "Red"}, + "Supernode-1": {"color": "Green"}, + "Supernode-2": {"color": "Blue"}, + "Supernode-3": {"color": "Yellow"}, + } + edges = [ + ("Supernode-0", "Supernode-1", [{"type": "Strong"}]), + ("Supernode-0", "Supernode-2", [{"type": "Weak"}, {"type": "Strong"}]), + ("Supernode-1", "Supernode-2", [{"type": "Strong"}]), + ("Supernode-1", "Supernode-3", [{"type": "Strong"}]), + ("Supernode-3", "Supernode-2", [{"type": "Strong"}]), + ] + G = nx.DiGraph() + for node in nodes: + attributes = nodes[node] + G.add_node(node, **attributes) + + for source, target, types in edges: + G.add_edge(source, target, types=types) + + supernodes = { + "Supernode-0": {"A", "B"}, + "Supernode-1": {"C", "D"}, + "Supernode-2": {"E", "F"}, + "Supernode-3": {"G", "H"}, + "Supernode-4": {"I", "J"}, + "Supernode-5": {"K", "L"}, + } + nx.set_node_attributes(G, supernodes, "group") + return G + + +class TestSNAPUndirectedMulti(AbstractSNAP): + def build_original_graph(self): + nodes = { + "A": {"color": "Red"}, + "B": {"color": "Red"}, + "C": {"color": "Red"}, + "D": {"color": "Blue"}, + "E": {"color": "Blue"}, + "F": {"color": "Blue"}, + "G": {"color": "Yellow"}, + "H": {"color": "Yellow"}, + "I": {"color": "Yellow"}, + } + edges = [ + ("A", "D", ["Weak", "Strong"]), + ("B", "E", ["Weak", "Strong"]), + ("D", "I", ["Strong"]), + ("E", "H", ["Strong"]), + ("F", "G", ["Weak"]), + ("I", "G", ["Weak", "Strong"]), + ("I", "H", ["Weak", "Strong"]), + ("G", "H", ["Weak", "Strong"]), + ] + G = nx.MultiGraph() + for node in nodes: + attributes = nodes[node] + G.add_node(node, **attributes) + + for source, target, types in edges: + for type in types: + G.add_edge(source, target, type=type) + + return G + + def build_summary_graph(self): + nodes = { + "Supernode-0": {"color": "Red"}, + "Supernode-1": {"color": "Blue"}, + "Supernode-2": {"color": "Yellow"}, + "Supernode-3": {"color": "Blue"}, + "Supernode-4": {"color": "Yellow"}, + "Supernode-5": {"color": "Red"}, + } + edges = [ + ("Supernode-1", "Supernode-2", [{"type": "Weak"}]), + ("Supernode-2", "Supernode-4", [{"type": "Weak"}, {"type": "Strong"}]), + ("Supernode-3", "Supernode-4", [{"type": "Strong"}]), + ("Supernode-3", "Supernode-5", [{"type": "Weak"}, {"type": "Strong"}]), + ("Supernode-4", "Supernode-4", [{"type": "Weak"}, {"type": "Strong"}]), + ] + G = nx.MultiGraph() + for node in nodes: + attributes = nodes[node] + G.add_node(node, **attributes) + + for source, target, types in edges: + for type in types: + G.add_edge(source, target, type=type) + + supernodes = { + "Supernode-0": {"A", "B"}, + "Supernode-1": {"C", "D"}, + "Supernode-2": {"E", "F"}, + "Supernode-3": {"G", "H"}, + "Supernode-4": {"I", "J"}, + "Supernode-5": {"K", "L"}, + } + nx.set_node_attributes(G, supernodes, "group") + return G + + +class TestSNAPDirectedMulti(AbstractSNAP): + def build_original_graph(self): + nodes = { + "A": {"color": "Red"}, + "B": {"color": "Red"}, + "C": {"color": "Green"}, + "D": {"color": "Green"}, + "E": {"color": "Blue"}, + "F": {"color": "Blue"}, + "G": {"color": "Yellow"}, + "H": {"color": "Yellow"}, + } + edges = [ + ("A", "C", ["Weak", "Strong"]), + ("A", "E", ["Strong"]), + ("A", "F", ["Weak"]), + ("B", "D", ["Weak", "Strong"]), + ("B", "E", ["Weak"]), + ("B", "F", ["Strong"]), + ("C", "G", ["Weak", "Strong"]), + ("C", "F", ["Strong"]), + ("D", "E", ["Strong"]), + ("D", "H", ["Weak", "Strong"]), + ("G", "E", ["Strong"]), + ("H", "F", ["Strong"]), + ] + G = nx.MultiDiGraph() + for node in nodes: + attributes = nodes[node] + G.add_node(node, **attributes) + + for source, target, types in edges: + for type in types: + G.add_edge(source, target, type=type) + + return G + + def build_summary_graph(self): + nodes = { + "Supernode-0": {"color": "Red"}, + "Supernode-1": {"color": "Blue"}, + "Supernode-2": {"color": "Yellow"}, + "Supernode-3": {"color": "Blue"}, + } + edges = [ + ("Supernode-0", "Supernode-1", ["Weak", "Strong"]), + ("Supernode-0", "Supernode-2", ["Weak", "Strong"]), + ("Supernode-1", "Supernode-2", ["Strong"]), + ("Supernode-1", "Supernode-3", ["Weak", "Strong"]), + ("Supernode-3", "Supernode-2", ["Strong"]), + ] + G = nx.MultiDiGraph() + for node in nodes: + attributes = nodes[node] + G.add_node(node, **attributes) + + for source, target, types in edges: + for type in types: + G.add_edge(source, target, type=type) + + supernodes = { + "Supernode-0": {"A", "B"}, + "Supernode-1": {"C", "D"}, + "Supernode-2": {"E", "F"}, + "Supernode-3": {"G", "H"}, + } + nx.set_node_attributes(G, supernodes, "group") + return G diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_swap.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_swap.py new file mode 100644 index 0000000..e765bd5 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_swap.py @@ -0,0 +1,179 @@ +import pytest + +import networkx as nx + +cycle = nx.cycle_graph(5, create_using=nx.DiGraph) +tree = nx.DiGraph() +tree.add_edges_from(nx.random_labeled_tree(10, seed=42).edges) +path = nx.path_graph(5, create_using=nx.DiGraph) +binomial = nx.binomial_tree(3, create_using=nx.DiGraph) +HH = nx.directed_havel_hakimi_graph([1, 2, 1, 2, 2, 2], [3, 1, 0, 1, 2, 3]) +balanced_tree = nx.balanced_tree(2, 3, create_using=nx.DiGraph) + + +@pytest.mark.parametrize("G", [path, binomial, HH, cycle, tree, balanced_tree]) +def test_directed_edge_swap(G): + in_degree = set(G.in_degree) + out_degree = set(G.out_degree) + edges = set(G.edges) + nx.directed_edge_swap(G, nswap=1, max_tries=100, seed=1) + assert in_degree == set(G.in_degree) + assert out_degree == set(G.out_degree) + assert edges != set(G.edges) + assert 3 == sum(e not in edges for e in G.edges) + + +def test_directed_edge_swap_undo_previous_swap(): + G = nx.DiGraph(nx.path_graph(4).edges) # only 1 swap possible + edges = set(G.edges) + nx.directed_edge_swap(G, nswap=2, max_tries=100) + assert edges == set(G.edges) + + nx.directed_edge_swap(G, nswap=1, max_tries=100, seed=1) + assert {(0, 2), (1, 3), (2, 1)} == set(G.edges) + nx.directed_edge_swap(G, nswap=1, max_tries=100, seed=1) + assert edges == set(G.edges) + + +def test_edge_cases_directed_edge_swap(): + # Tests cases when swaps are impossible, either too few edges exist, or self loops/cycles are unavoidable + # TODO: Rewrite function to explicitly check for impossible swaps and raise error + e = ( + "Maximum number of swap attempts \\(11\\) exceeded " + "before desired swaps achieved \\(\\d\\)." + ) + graph = nx.DiGraph([(0, 0), (0, 1), (1, 0), (2, 3), (3, 2)]) + with pytest.raises(nx.NetworkXAlgorithmError, match=e): + nx.directed_edge_swap(graph, nswap=1, max_tries=10, seed=1) + + +def test_double_edge_swap(): + graph = nx.barabasi_albert_graph(200, 1) + degrees = sorted(d for n, d in graph.degree()) + G = nx.double_edge_swap(graph, 40) + assert degrees == sorted(d for n, d in graph.degree()) + + +def test_double_edge_swap_seed(): + graph = nx.barabasi_albert_graph(200, 1) + degrees = sorted(d for n, d in graph.degree()) + G = nx.double_edge_swap(graph, 40, seed=1) + assert degrees == sorted(d for n, d in graph.degree()) + + +def test_connected_double_edge_swap(): + graph = nx.barabasi_albert_graph(200, 1) + degrees = sorted(d for n, d in graph.degree()) + G = nx.connected_double_edge_swap(graph, 40, seed=1) + assert nx.is_connected(graph) + assert degrees == sorted(d for n, d in graph.degree()) + + +def test_connected_double_edge_swap_low_window_threshold(): + graph = nx.barabasi_albert_graph(200, 1) + degrees = sorted(d for n, d in graph.degree()) + G = nx.connected_double_edge_swap(graph, 40, _window_threshold=0, seed=1) + assert nx.is_connected(graph) + assert degrees == sorted(d for n, d in graph.degree()) + + +def test_connected_double_edge_swap_star(): + # Testing ui==xi in connected_double_edge_swap + graph = nx.star_graph(40) + degrees = sorted(d for n, d in graph.degree()) + G = nx.connected_double_edge_swap(graph, 1, seed=4) + assert nx.is_connected(graph) + assert degrees == sorted(d for n, d in graph.degree()) + + +def test_connected_double_edge_swap_star_low_window_threshold(): + # Testing ui==xi in connected_double_edge_swap with low window threshold + graph = nx.star_graph(40) + degrees = sorted(d for n, d in graph.degree()) + G = nx.connected_double_edge_swap(graph, 1, _window_threshold=0, seed=4) + assert nx.is_connected(graph) + assert degrees == sorted(d for n, d in graph.degree()) + + +def test_directed_edge_swap_small(): + with pytest.raises(nx.NetworkXError): + G = nx.directed_edge_swap(nx.path_graph(3, create_using=nx.DiGraph)) + + +def test_directed_edge_swap_tries(): + with pytest.raises(nx.NetworkXError): + G = nx.directed_edge_swap( + nx.path_graph(3, create_using=nx.DiGraph), nswap=1, max_tries=0 + ) + + +def test_directed_exception_undirected(): + graph = nx.Graph([(0, 1), (2, 3)]) + with pytest.raises(nx.NetworkXNotImplemented): + G = nx.directed_edge_swap(graph) + + +def test_directed_edge_max_tries(): + with pytest.raises(nx.NetworkXAlgorithmError): + G = nx.directed_edge_swap( + nx.complete_graph(4, nx.DiGraph()), nswap=1, max_tries=5 + ) + + +def test_double_edge_swap_small(): + with pytest.raises(nx.NetworkXError): + G = nx.double_edge_swap(nx.path_graph(3)) + + +def test_double_edge_swap_tries(): + with pytest.raises(nx.NetworkXError): + G = nx.double_edge_swap(nx.path_graph(10), nswap=1, max_tries=0) + + +def test_double_edge_directed(): + graph = nx.DiGraph([(0, 1), (2, 3)]) + with pytest.raises(nx.NetworkXError, match="not defined for directed graphs."): + G = nx.double_edge_swap(graph) + + +def test_double_edge_max_tries(): + with pytest.raises(nx.NetworkXAlgorithmError): + G = nx.double_edge_swap(nx.complete_graph(4), nswap=1, max_tries=5) + + +def test_connected_double_edge_swap_small(): + with pytest.raises(nx.NetworkXError): + G = nx.connected_double_edge_swap(nx.path_graph(3)) + + +def test_connected_double_edge_swap_not_connected(): + with pytest.raises(nx.NetworkXError): + G = nx.path_graph(3) + nx.add_path(G, [10, 11, 12]) + G = nx.connected_double_edge_swap(G) + + +def test_degree_seq_c4(): + G = nx.cycle_graph(4) + degrees = sorted(d for n, d in G.degree()) + G = nx.double_edge_swap(G, 1, 100) + assert degrees == sorted(d for n, d in G.degree()) + + +def test_fewer_than_4_nodes(): + G = nx.DiGraph() + G.add_nodes_from([0, 1, 2]) + with pytest.raises(nx.NetworkXError, match=".*fewer than four nodes."): + nx.directed_edge_swap(G) + + +def test_less_than_3_edges(): + G = nx.DiGraph([(0, 1), (1, 2)]) + G.add_nodes_from([3, 4]) + with pytest.raises(nx.NetworkXError, match=".*fewer than 3 edges"): + nx.directed_edge_swap(G) + + G = nx.Graph() + G.add_nodes_from([0, 1, 2, 3]) + with pytest.raises(nx.NetworkXError, match=".*fewer than 2 edges"): + nx.double_edge_swap(G) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_threshold.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_threshold.py new file mode 100644 index 0000000..19fc7f2 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_threshold.py @@ -0,0 +1,266 @@ +""" +Threshold Graphs +================ +""" + +import pytest + +import networkx as nx +import networkx.algorithms.threshold as nxt + +cnlti = nx.convert_node_labels_to_integers + + +class TestGeneratorThreshold: + def test_threshold_sequence_graph_test(self): + G = nx.star_graph(10) + assert nxt.is_threshold_graph(G) + assert nxt.is_threshold_sequence([d for n, d in G.degree()]) + + G = nx.complete_graph(10) + assert nxt.is_threshold_graph(G) + assert nxt.is_threshold_sequence([d for n, d in G.degree()]) + + deg = [3, 2, 2, 1, 1, 1] + assert not nxt.is_threshold_sequence(deg) + + deg = [3, 2, 2, 1] + assert nxt.is_threshold_sequence(deg) + + G = nx.generators.havel_hakimi_graph(deg) + assert nxt.is_threshold_graph(G) + + def test_creation_sequences(self): + deg = [3, 2, 2, 1] + G = nx.generators.havel_hakimi_graph(deg) + + with pytest.raises(ValueError): + nxt.creation_sequence(deg, with_labels=True, compact=True) + + cs0 = nxt.creation_sequence(deg) + H0 = nxt.threshold_graph(cs0) + assert "".join(cs0) == "ddid" + + cs1 = nxt.creation_sequence(deg, with_labels=True) + H1 = nxt.threshold_graph(cs1) + assert cs1 == [(1, "d"), (2, "d"), (3, "i"), (0, "d")] + + cs2 = nxt.creation_sequence(deg, compact=True) + H2 = nxt.threshold_graph(cs2) + assert cs2 == [2, 1, 1] + assert "".join(nxt.uncompact(cs2)) == "ddid" + assert nx.could_be_isomorphic(H0, G) + assert nx.could_be_isomorphic(H0, H1) + assert nx.could_be_isomorphic(H0, H2) + + def test_make_compact(self): + assert nxt.make_compact(["d", "d", "d", "i", "d", "d"]) == [3, 1, 2] + assert nxt.make_compact([3, 1, 2]) == [3, 1, 2] + pytest.raises(TypeError, nxt.make_compact, [3.0, 1.0, 2.0]) + + def test_uncompact(self): + assert nxt.uncompact([3, 1, 2]) == ["d", "d", "d", "i", "d", "d"] + assert nxt.uncompact(["d", "d", "i", "d"]) == ["d", "d", "i", "d"] + assert nxt.uncompact( + nxt.uncompact([(1, "d"), (2, "d"), (3, "i"), (0, "d")]) + ) == nxt.uncompact([(1, "d"), (2, "d"), (3, "i"), (0, "d")]) + pytest.raises(TypeError, nxt.uncompact, [3.0, 1.0, 2.0]) + + def test_creation_sequence_to_weights(self): + assert nxt.creation_sequence_to_weights([3, 1, 2]) == [ + 0.5, + 0.5, + 0.5, + 0.25, + 0.75, + 0.75, + ] + pytest.raises(TypeError, nxt.creation_sequence_to_weights, [3.0, 1.0, 2.0]) + + def test_weights_to_creation_sequence(self): + deg = [3, 2, 2, 1] + with pytest.raises(ValueError): + nxt.weights_to_creation_sequence(deg, with_labels=True, compact=True) + assert nxt.weights_to_creation_sequence(deg, with_labels=True) == [ + (3, "d"), + (1, "d"), + (2, "d"), + (0, "d"), + ] + assert nxt.weights_to_creation_sequence(deg, compact=True) == [4] + + def test_find_alternating_4_cycle(self): + G = nx.Graph() + G.add_edge(1, 2) + assert not nxt.find_alternating_4_cycle(G) + + def test_shortest_path(self): + deg = [3, 2, 2, 1] + G = nx.generators.havel_hakimi_graph(deg) + cs1 = nxt.creation_sequence(deg, with_labels=True) + for n, m in [(3, 0), (0, 3), (0, 2), (0, 1), (1, 3), (3, 1), (1, 2), (2, 3)]: + assert nxt.shortest_path(cs1, n, m) == nx.shortest_path(G, n, m) + + spl = nxt.shortest_path_length(cs1, 3) + spl2 = nxt.shortest_path_length([t for v, t in cs1], 2) + assert spl == spl2 + + spld = {} + for j, pl in enumerate(spl): + n = cs1[j][0] + spld[n] = pl + assert spld == nx.single_source_shortest_path_length(G, 3) + + assert nxt.shortest_path(["d", "d", "d", "i", "d", "d"], 1, 2) == [1, 2] + assert nxt.shortest_path([3, 1, 2], 1, 2) == [1, 2] + pytest.raises(TypeError, nxt.shortest_path, [3.0, 1.0, 2.0], 1, 2) + pytest.raises(ValueError, nxt.shortest_path, [3, 1, 2], "a", 2) + pytest.raises(ValueError, nxt.shortest_path, [3, 1, 2], 1, "b") + assert nxt.shortest_path([3, 1, 2], 1, 1) == [1] + + def test_shortest_path_length(self): + assert nxt.shortest_path_length([3, 1, 2], 1) == [1, 0, 1, 2, 1, 1] + assert nxt.shortest_path_length(["d", "d", "d", "i", "d", "d"], 1) == [ + 1, + 0, + 1, + 2, + 1, + 1, + ] + assert nxt.shortest_path_length(("d", "d", "d", "i", "d", "d"), 1) == [ + 1, + 0, + 1, + 2, + 1, + 1, + ] + pytest.raises(TypeError, nxt.shortest_path, [3.0, 1.0, 2.0], 1) + + def test_random_threshold_sequence(self): + assert len(nxt.random_threshold_sequence(10, 0.5)) == 10 + assert nxt.random_threshold_sequence(10, 0.5, seed=42) == [ + "d", + "i", + "d", + "d", + "d", + "i", + "i", + "i", + "d", + "d", + ] + pytest.raises(ValueError, nxt.random_threshold_sequence, 10, 1.5) + + def test_right_d_threshold_sequence(self): + assert nxt.right_d_threshold_sequence(3, 2) == ["d", "i", "d"] + pytest.raises(ValueError, nxt.right_d_threshold_sequence, 2, 3) + + def test_left_d_threshold_sequence(self): + assert nxt.left_d_threshold_sequence(3, 2) == ["d", "i", "d"] + pytest.raises(ValueError, nxt.left_d_threshold_sequence, 2, 3) + + def test_weights_thresholds(self): + wseq = [3, 4, 3, 3, 5, 6, 5, 4, 5, 6] + cs = nxt.weights_to_creation_sequence(wseq, threshold=10) + wseq = nxt.creation_sequence_to_weights(cs) + cs2 = nxt.weights_to_creation_sequence(wseq) + assert cs == cs2 + + wseq = nxt.creation_sequence_to_weights(nxt.uncompact([3, 1, 2, 3, 3, 2, 3])) + assert wseq == [ + s * 0.125 for s in [4, 4, 4, 3, 5, 5, 2, 2, 2, 6, 6, 6, 1, 1, 7, 7, 7] + ] + + wseq = nxt.creation_sequence_to_weights([3, 1, 2, 3, 3, 2, 3]) + assert wseq == [ + s * 0.125 for s in [4, 4, 4, 3, 5, 5, 2, 2, 2, 6, 6, 6, 1, 1, 7, 7, 7] + ] + + wseq = nxt.creation_sequence_to_weights(list(enumerate("ddidiiidididi"))) + assert wseq == [s * 0.1 for s in [5, 5, 4, 6, 3, 3, 3, 7, 2, 8, 1, 9, 0]] + + wseq = nxt.creation_sequence_to_weights("ddidiiidididi") + assert wseq == [s * 0.1 for s in [5, 5, 4, 6, 3, 3, 3, 7, 2, 8, 1, 9, 0]] + + wseq = nxt.creation_sequence_to_weights("ddidiiidididid") + ws = [s / 12 for s in [6, 6, 5, 7, 4, 4, 4, 8, 3, 9, 2, 10, 1, 11]] + assert sum(abs(c - d) for c, d in zip(wseq, ws)) < 1e-14 + + def test_finding_routines(self): + G = nx.Graph({1: [2], 2: [3], 3: [4], 4: [5], 5: [6]}) + G.add_edge(2, 4) + G.add_edge(2, 5) + G.add_edge(2, 7) + G.add_edge(3, 6) + G.add_edge(4, 6) + + # Alternating 4 cycle + assert nxt.find_alternating_4_cycle(G) == [1, 2, 3, 6] + + # Threshold graph + TG = nxt.find_threshold_graph(G) + assert nxt.is_threshold_graph(TG) + assert sorted(TG.nodes()) == [1, 2, 3, 4, 5, 7] + + cs = nxt.creation_sequence(dict(TG.degree()), with_labels=True) + assert nxt.find_creation_sequence(G) == cs + + def test_fast_versions_properties_threshold_graphs(self): + cs = "ddiiddid" + G = nxt.threshold_graph(cs) + assert nxt.density("ddiiddid") == nx.density(G) + assert sorted(nxt.degree_sequence(cs)) == sorted(d for n, d in G.degree()) + + ts = nxt.triangle_sequence(cs) + assert ts == list(nx.triangles(G).values()) + assert sum(ts) // 3 == nxt.triangles(cs) + + c1 = nxt.cluster_sequence(cs) + c2 = list(nx.clustering(G).values()) + assert sum(abs(c - d) for c, d in zip(c1, c2)) == pytest.approx(0, abs=1e-7) + + b1 = nx.betweenness_centrality(G).values() + b2 = nxt.betweenness_sequence(cs) + assert sum(abs(c - d) for c, d in zip(b1, b2)) < 1e-7 + + assert nxt.eigenvalues(cs) == [0, 1, 3, 3, 5, 7, 7, 8] + + # Degree Correlation + assert abs(nxt.degree_correlation(cs) + 0.593038821954) < 1e-12 + assert nxt.degree_correlation("diiiddi") == -0.8 + assert nxt.degree_correlation("did") == -1.0 + assert nxt.degree_correlation("ddd") == 1.0 + assert nxt.eigenvalues("dddiii") == [0, 0, 0, 0, 3, 3] + assert nxt.eigenvalues("dddiiid") == [0, 1, 1, 1, 4, 4, 7] + + def test_tg_creation_routines(self): + s = nxt.left_d_threshold_sequence(5, 7) + s = nxt.right_d_threshold_sequence(5, 7) + s1 = nxt.swap_d(s, 1.0, 1.0) + s1 = nxt.swap_d(s, 1.0, 1.0, seed=1) + + def test_eigenvectors(self): + np = pytest.importorskip("numpy") + eigenval = np.linalg.eigvals + pytest.importorskip("scipy") + + cs = "ddiiddid" + G = nxt.threshold_graph(cs) + (tgeval, tgevec) = nxt.eigenvectors(cs) + np.testing.assert_allclose([np.dot(lv, lv) for lv in tgevec], 1.0, rtol=1e-9) + lapl = nx.laplacian_matrix(G) + + def test_create_using(self): + cs = "ddiiddid" + G = nxt.threshold_graph(cs) + pytest.raises( + nx.exception.NetworkXError, + nxt.threshold_graph, + cs, + create_using=nx.DiGraph(), + ) + MG = nxt.threshold_graph(cs, create_using=nx.MultiGraph()) + assert sorted(MG.edges()) == sorted(G.edges()) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_time_dependent.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_time_dependent.py new file mode 100644 index 0000000..1e256f4 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_time_dependent.py @@ -0,0 +1,431 @@ +"""Unit testing for time dependent algorithms.""" + +from datetime import datetime, timedelta + +import pytest + +import networkx as nx + +_delta = timedelta(days=5 * 365) + + +class TestCdIndex: + """Unit testing for the cd index function.""" + + def test_common_graph(self): + G = nx.DiGraph() + G.add_nodes_from([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) + G.add_edge(4, 2) + G.add_edge(4, 0) + G.add_edge(4, 1) + G.add_edge(4, 3) + G.add_edge(5, 2) + G.add_edge(6, 2) + G.add_edge(6, 4) + G.add_edge(7, 4) + G.add_edge(8, 4) + G.add_edge(9, 4) + G.add_edge(9, 1) + G.add_edge(9, 3) + G.add_edge(10, 4) + + node_attrs = { + 0: {"time": datetime(1992, 1, 1)}, + 1: {"time": datetime(1992, 1, 1)}, + 2: {"time": datetime(1993, 1, 1)}, + 3: {"time": datetime(1993, 1, 1)}, + 4: {"time": datetime(1995, 1, 1)}, + 5: {"time": datetime(1997, 1, 1)}, + 6: {"time": datetime(1998, 1, 1)}, + 7: {"time": datetime(1999, 1, 1)}, + 8: {"time": datetime(1999, 1, 1)}, + 9: {"time": datetime(1998, 1, 1)}, + 10: {"time": datetime(1997, 4, 1)}, + } + + nx.set_node_attributes(G, node_attrs) + + assert nx.cd_index(G, 4, time_delta=_delta) == 0.17 + + def test_common_graph_with_given_attributes(self): + G = nx.DiGraph() + G.add_nodes_from([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) + G.add_edge(4, 2) + G.add_edge(4, 0) + G.add_edge(4, 1) + G.add_edge(4, 3) + G.add_edge(5, 2) + G.add_edge(6, 2) + G.add_edge(6, 4) + G.add_edge(7, 4) + G.add_edge(8, 4) + G.add_edge(9, 4) + G.add_edge(9, 1) + G.add_edge(9, 3) + G.add_edge(10, 4) + + node_attrs = { + 0: {"date": datetime(1992, 1, 1)}, + 1: {"date": datetime(1992, 1, 1)}, + 2: {"date": datetime(1993, 1, 1)}, + 3: {"date": datetime(1993, 1, 1)}, + 4: {"date": datetime(1995, 1, 1)}, + 5: {"date": datetime(1997, 1, 1)}, + 6: {"date": datetime(1998, 1, 1)}, + 7: {"date": datetime(1999, 1, 1)}, + 8: {"date": datetime(1999, 1, 1)}, + 9: {"date": datetime(1998, 1, 1)}, + 10: {"date": datetime(1997, 4, 1)}, + } + + nx.set_node_attributes(G, node_attrs) + + assert nx.cd_index(G, 4, time_delta=_delta, time="date") == 0.17 + + def test_common_graph_with_int_attributes(self): + G = nx.DiGraph() + G.add_nodes_from([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) + G.add_edge(4, 2) + G.add_edge(4, 0) + G.add_edge(4, 1) + G.add_edge(4, 3) + G.add_edge(5, 2) + G.add_edge(6, 2) + G.add_edge(6, 4) + G.add_edge(7, 4) + G.add_edge(8, 4) + G.add_edge(9, 4) + G.add_edge(9, 1) + G.add_edge(9, 3) + G.add_edge(10, 4) + + node_attrs = { + 0: {"time": 20}, + 1: {"time": 20}, + 2: {"time": 30}, + 3: {"time": 30}, + 4: {"time": 50}, + 5: {"time": 70}, + 6: {"time": 80}, + 7: {"time": 90}, + 8: {"time": 90}, + 9: {"time": 80}, + 10: {"time": 74}, + } + + nx.set_node_attributes(G, node_attrs) + + assert nx.cd_index(G, 4, time_delta=50) == 0.17 + + def test_common_graph_with_float_attributes(self): + G = nx.DiGraph() + G.add_nodes_from([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) + G.add_edge(4, 2) + G.add_edge(4, 0) + G.add_edge(4, 1) + G.add_edge(4, 3) + G.add_edge(5, 2) + G.add_edge(6, 2) + G.add_edge(6, 4) + G.add_edge(7, 4) + G.add_edge(8, 4) + G.add_edge(9, 4) + G.add_edge(9, 1) + G.add_edge(9, 3) + G.add_edge(10, 4) + + node_attrs = { + 0: {"time": 20.2}, + 1: {"time": 20.2}, + 2: {"time": 30.7}, + 3: {"time": 30.7}, + 4: {"time": 50.9}, + 5: {"time": 70.1}, + 6: {"time": 80.6}, + 7: {"time": 90.7}, + 8: {"time": 90.7}, + 9: {"time": 80.6}, + 10: {"time": 74.2}, + } + + nx.set_node_attributes(G, node_attrs) + + assert nx.cd_index(G, 4, time_delta=50) == 0.17 + + def test_common_graph_with_weights(self): + G = nx.DiGraph() + G.add_nodes_from([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) + G.add_edge(4, 2) + G.add_edge(4, 0) + G.add_edge(4, 1) + G.add_edge(4, 3) + G.add_edge(5, 2) + G.add_edge(6, 2) + G.add_edge(6, 4) + G.add_edge(7, 4) + G.add_edge(8, 4) + G.add_edge(9, 4) + G.add_edge(9, 1) + G.add_edge(9, 3) + G.add_edge(10, 4) + + node_attrs = { + 0: {"time": datetime(1992, 1, 1)}, + 1: {"time": datetime(1992, 1, 1)}, + 2: {"time": datetime(1993, 1, 1)}, + 3: {"time": datetime(1993, 1, 1)}, + 4: {"time": datetime(1995, 1, 1)}, + 5: {"time": datetime(1997, 1, 1)}, + 6: {"time": datetime(1998, 1, 1), "weight": 5}, + 7: {"time": datetime(1999, 1, 1), "weight": 2}, + 8: {"time": datetime(1999, 1, 1), "weight": 6}, + 9: {"time": datetime(1998, 1, 1), "weight": 3}, + 10: {"time": datetime(1997, 4, 1), "weight": 10}, + } + + nx.set_node_attributes(G, node_attrs) + assert nx.cd_index(G, 4, time_delta=_delta, weight="weight") == 0.04 + + def test_node_with_no_predecessors(self): + G = nx.DiGraph() + G.add_nodes_from([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) + G.add_edge(4, 2) + G.add_edge(4, 0) + G.add_edge(4, 3) + G.add_edge(5, 2) + G.add_edge(6, 2) + G.add_edge(6, 4) + G.add_edge(7, 4) + G.add_edge(8, 4) + G.add_edge(9, 4) + G.add_edge(9, 1) + G.add_edge(9, 3) + G.add_edge(10, 4) + + node_attrs = { + 0: {"time": datetime(1992, 1, 1)}, + 1: {"time": datetime(1992, 1, 1)}, + 2: {"time": datetime(1993, 1, 1)}, + 3: {"time": datetime(1993, 1, 1)}, + 4: {"time": datetime(1995, 1, 1)}, + 5: {"time": datetime(2005, 1, 1)}, + 6: {"time": datetime(2010, 1, 1)}, + 7: {"time": datetime(2001, 1, 1)}, + 8: {"time": datetime(2020, 1, 1)}, + 9: {"time": datetime(2017, 1, 1)}, + 10: {"time": datetime(2004, 4, 1)}, + } + + nx.set_node_attributes(G, node_attrs) + assert nx.cd_index(G, 4, time_delta=_delta) == 0.0 + + def test_node_with_no_successors(self): + G = nx.DiGraph() + G.add_nodes_from([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) + G.add_edge(8, 2) + G.add_edge(6, 0) + G.add_edge(6, 3) + G.add_edge(5, 2) + G.add_edge(6, 2) + G.add_edge(6, 4) + G.add_edge(7, 4) + G.add_edge(8, 4) + G.add_edge(9, 4) + G.add_edge(9, 1) + G.add_edge(9, 3) + G.add_edge(10, 4) + + node_attrs = { + 0: {"time": datetime(1992, 1, 1)}, + 1: {"time": datetime(1992, 1, 1)}, + 2: {"time": datetime(1993, 1, 1)}, + 3: {"time": datetime(1993, 1, 1)}, + 4: {"time": datetime(1995, 1, 1)}, + 5: {"time": datetime(1997, 1, 1)}, + 6: {"time": datetime(1998, 1, 1)}, + 7: {"time": datetime(1999, 1, 1)}, + 8: {"time": datetime(1999, 1, 1)}, + 9: {"time": datetime(1998, 1, 1)}, + 10: {"time": datetime(1997, 4, 1)}, + } + + nx.set_node_attributes(G, node_attrs) + assert nx.cd_index(G, 4, time_delta=_delta) == 1.0 + + def test_n_equals_zero(self): + G = nx.DiGraph() + G.add_nodes_from([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) + G.add_edge(4, 2) + G.add_edge(4, 0) + G.add_edge(4, 3) + G.add_edge(6, 4) + G.add_edge(7, 4) + G.add_edge(8, 4) + G.add_edge(9, 4) + G.add_edge(9, 1) + G.add_edge(10, 4) + + node_attrs = { + 0: {"time": datetime(1992, 1, 1)}, + 1: {"time": datetime(1992, 1, 1)}, + 2: {"time": datetime(1993, 1, 1)}, + 3: {"time": datetime(1993, 1, 1)}, + 4: {"time": datetime(1995, 1, 1)}, + 5: {"time": datetime(2005, 1, 1)}, + 6: {"time": datetime(2010, 1, 1)}, + 7: {"time": datetime(2001, 1, 1)}, + 8: {"time": datetime(2020, 1, 1)}, + 9: {"time": datetime(2017, 1, 1)}, + 10: {"time": datetime(2004, 4, 1)}, + } + + nx.set_node_attributes(G, node_attrs) + + with pytest.raises( + nx.NetworkXError, match="The cd index cannot be defined." + ) as ve: + nx.cd_index(G, 4, time_delta=_delta) + + def test_time_timedelta_compatibility(self): + G = nx.DiGraph() + G.add_nodes_from([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) + G.add_edge(4, 2) + G.add_edge(4, 0) + G.add_edge(4, 3) + G.add_edge(6, 4) + G.add_edge(7, 4) + G.add_edge(8, 4) + G.add_edge(9, 4) + G.add_edge(9, 1) + G.add_edge(10, 4) + + node_attrs = { + 0: {"time": 20.2}, + 1: {"time": 20.2}, + 2: {"time": 30.7}, + 3: {"time": 30.7}, + 4: {"time": 50.9}, + 5: {"time": 70.1}, + 6: {"time": 80.6}, + 7: {"time": 90.7}, + 8: {"time": 90.7}, + 9: {"time": 80.6}, + 10: {"time": 74.2}, + } + + nx.set_node_attributes(G, node_attrs) + + with pytest.raises( + nx.NetworkXError, + match="Addition and comparison are not supported between", + ) as ve: + nx.cd_index(G, 4, time_delta=_delta) + + def test_node_with_no_time(self): + G = nx.DiGraph() + G.add_nodes_from([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) + G.add_edge(8, 2) + G.add_edge(6, 0) + G.add_edge(6, 3) + G.add_edge(5, 2) + G.add_edge(6, 2) + G.add_edge(6, 4) + G.add_edge(7, 4) + G.add_edge(8, 4) + G.add_edge(9, 4) + G.add_edge(9, 1) + G.add_edge(9, 3) + G.add_edge(10, 4) + + node_attrs = { + 0: {"time": datetime(1992, 1, 1)}, + 1: {"time": datetime(1992, 1, 1)}, + 2: {"time": datetime(1993, 1, 1)}, + 3: {"time": datetime(1993, 1, 1)}, + 4: {"time": datetime(1995, 1, 1)}, + 6: {"time": datetime(1998, 1, 1)}, + 7: {"time": datetime(1999, 1, 1)}, + 8: {"time": datetime(1999, 1, 1)}, + 9: {"time": datetime(1998, 1, 1)}, + 10: {"time": datetime(1997, 4, 1)}, + } + + nx.set_node_attributes(G, node_attrs) + + with pytest.raises( + nx.NetworkXError, match="Not all nodes have a 'time' attribute." + ) as ve: + nx.cd_index(G, 4, time_delta=_delta) + + def test_maximally_consolidating(self): + G = nx.DiGraph() + G.add_nodes_from([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) + G.add_edge(5, 1) + G.add_edge(5, 2) + G.add_edge(5, 3) + G.add_edge(5, 4) + G.add_edge(6, 1) + G.add_edge(6, 5) + G.add_edge(7, 1) + G.add_edge(7, 5) + G.add_edge(8, 2) + G.add_edge(8, 5) + G.add_edge(9, 5) + G.add_edge(9, 3) + G.add_edge(10, 5) + G.add_edge(10, 3) + G.add_edge(10, 4) + G.add_edge(11, 5) + G.add_edge(11, 4) + + node_attrs = { + 0: {"time": datetime(1992, 1, 1)}, + 1: {"time": datetime(1992, 1, 1)}, + 2: {"time": datetime(1993, 1, 1)}, + 3: {"time": datetime(1993, 1, 1)}, + 4: {"time": datetime(1995, 1, 1)}, + 5: {"time": datetime(1997, 1, 1)}, + 6: {"time": datetime(1998, 1, 1)}, + 7: {"time": datetime(1999, 1, 1)}, + 8: {"time": datetime(1999, 1, 1)}, + 9: {"time": datetime(1998, 1, 1)}, + 10: {"time": datetime(1997, 4, 1)}, + 11: {"time": datetime(1998, 5, 1)}, + } + + nx.set_node_attributes(G, node_attrs) + + assert nx.cd_index(G, 5, time_delta=_delta) == -1 + + def test_maximally_destabilizing(self): + G = nx.DiGraph() + G.add_nodes_from([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) + G.add_edge(5, 1) + G.add_edge(5, 2) + G.add_edge(5, 3) + G.add_edge(5, 4) + G.add_edge(6, 5) + G.add_edge(7, 5) + G.add_edge(8, 5) + G.add_edge(9, 5) + G.add_edge(10, 5) + G.add_edge(11, 5) + + node_attrs = { + 0: {"time": datetime(1992, 1, 1)}, + 1: {"time": datetime(1992, 1, 1)}, + 2: {"time": datetime(1993, 1, 1)}, + 3: {"time": datetime(1993, 1, 1)}, + 4: {"time": datetime(1995, 1, 1)}, + 5: {"time": datetime(1997, 1, 1)}, + 6: {"time": datetime(1998, 1, 1)}, + 7: {"time": datetime(1999, 1, 1)}, + 8: {"time": datetime(1999, 1, 1)}, + 9: {"time": datetime(1998, 1, 1)}, + 10: {"time": datetime(1997, 4, 1)}, + 11: {"time": datetime(1998, 5, 1)}, + } + + nx.set_node_attributes(G, node_attrs) + + assert nx.cd_index(G, 5, time_delta=_delta) == 1 diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_tournament.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_tournament.py new file mode 100644 index 0000000..34d9b22 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_tournament.py @@ -0,0 +1,161 @@ +"""Unit tests for the :mod:`networkx.algorithms.tournament` module.""" + +from itertools import combinations + +import pytest + +from networkx import DiGraph +from networkx.algorithms.tournament import ( + hamiltonian_path, + index_satisfying, + is_reachable, + is_strongly_connected, + is_tournament, + random_tournament, + score_sequence, + tournament_matrix, +) + + +def test_condition_not_satisfied(): + iter_in = [0] + assert index_satisfying(iter_in, lambda x: x > 0) == 1 + + +def test_empty_iterable(): + with pytest.raises(ValueError): + index_satisfying([], lambda x: x > 0) + + +def test_is_tournament(): + G = DiGraph() + G.add_edges_from([(0, 1), (1, 2), (2, 3), (3, 0), (1, 3), (0, 2)]) + assert is_tournament(G) + + +def test_self_loops(): + """A tournament must have no self-loops.""" + G = DiGraph() + G.add_edges_from([(0, 1), (1, 2), (2, 3), (3, 0), (1, 3), (0, 2)]) + G.add_edge(0, 0) + assert not is_tournament(G) + + +def test_missing_edges(): + """A tournament must not have any pair of nodes without at least + one edge joining the pair. + + """ + G = DiGraph() + G.add_edges_from([(0, 1), (1, 2), (2, 3), (3, 0), (1, 3)]) + assert not is_tournament(G) + + +def test_bidirectional_edges(): + """A tournament must not have any pair of nodes with greater + than one edge joining the pair. + + """ + G = DiGraph() + G.add_edges_from([(0, 1), (1, 2), (2, 3), (3, 0), (1, 3), (0, 2)]) + G.add_edge(1, 0) + assert not is_tournament(G) + + +def test_graph_is_tournament(): + for _ in range(10): + G = random_tournament(5) + assert is_tournament(G) + + +def test_graph_is_tournament_seed(): + for _ in range(10): + G = random_tournament(5, seed=1) + assert is_tournament(G) + + +def test_graph_is_tournament_one_node(): + G = random_tournament(1) + assert is_tournament(G) + + +def test_graph_is_tournament_zero_node(): + G = random_tournament(0) + assert is_tournament(G) + + +def test_hamiltonian_empty_graph(): + path = hamiltonian_path(DiGraph()) + assert len(path) == 0 + + +def test_path_is_hamiltonian(): + G = DiGraph() + G.add_edges_from([(0, 1), (1, 2), (2, 3), (3, 0), (1, 3), (0, 2)]) + path = hamiltonian_path(G) + assert len(path) == 4 + assert all(v in G[u] for u, v in zip(path, path[1:])) + + +def test_hamiltonian_cycle(): + """Tests that :func:`networkx.tournament.hamiltonian_path` + returns a Hamiltonian cycle when provided a strongly connected + tournament. + + """ + G = DiGraph() + G.add_edges_from([(0, 1), (1, 2), (2, 3), (3, 0), (1, 3), (0, 2)]) + path = hamiltonian_path(G) + assert len(path) == 4 + assert all(v in G[u] for u, v in zip(path, path[1:])) + assert path[0] in G[path[-1]] + + +def test_score_sequence_edge(): + G = DiGraph([(0, 1)]) + assert score_sequence(G) == [0, 1] + + +def test_score_sequence_triangle(): + G = DiGraph([(0, 1), (1, 2), (2, 0)]) + assert score_sequence(G) == [1, 1, 1] + + +def test_tournament_matrix(): + np = pytest.importorskip("numpy") + pytest.importorskip("scipy") + npt = np.testing + G = DiGraph([(0, 1)]) + m = tournament_matrix(G) + npt.assert_array_equal(m.todense(), np.array([[0, 1], [-1, 0]])) + + +def test_reachable_pair(): + """Tests for a reachable pair of nodes.""" + G = DiGraph([(0, 1), (1, 2), (2, 0)]) + assert is_reachable(G, 0, 2) + + +def test_same_node_is_reachable(): + """Tests that a node is always reachable from it.""" + # G is an arbitrary tournament on ten nodes. + G = DiGraph(sorted(p) for p in combinations(range(10), 2)) + assert all(is_reachable(G, v, v) for v in G) + + +def test_unreachable_pair(): + """Tests for an unreachable pair of nodes.""" + G = DiGraph([(0, 1), (0, 2), (1, 2)]) + assert not is_reachable(G, 1, 0) + + +def test_is_strongly_connected(): + """Tests for a strongly connected tournament.""" + G = DiGraph([(0, 1), (1, 2), (2, 0)]) + assert is_strongly_connected(G) + + +def test_not_strongly_connected(): + """Tests for a tournament that is not strongly connected.""" + G = DiGraph([(0, 1), (0, 2), (1, 2)]) + assert not is_strongly_connected(G) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_triads.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_triads.py new file mode 100644 index 0000000..7f5bca2 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_triads.py @@ -0,0 +1,248 @@ +"""Tests for the :mod:`networkx.algorithms.triads` module.""" + +import itertools +from collections import defaultdict +from random import sample + +import pytest + +import networkx as nx + + +def test_triadic_census(): + """Tests the triadic_census function.""" + G = nx.DiGraph() + G.add_edges_from(["01", "02", "03", "04", "05", "12", "16", "51", "56", "65"]) + expected = { + "030T": 2, + "120C": 1, + "210": 0, + "120U": 0, + "012": 9, + "102": 3, + "021U": 0, + "111U": 0, + "003": 8, + "030C": 0, + "021D": 9, + "201": 0, + "111D": 1, + "300": 0, + "120D": 0, + "021C": 2, + } + actual = nx.triadic_census(G) + assert expected == actual + + +def test_is_triad(): + """Tests the is_triad function""" + G = nx.karate_club_graph() + G = G.to_directed() + for i in range(100): + nodes = sample(sorted(G.nodes()), 3) + G2 = G.subgraph(nodes) + assert nx.is_triad(G2) + + +def test_all_triads(): + """Tests the all_triads function.""" + G = nx.DiGraph() + G.add_edges_from(["01", "02", "03", "04", "05", "12", "16", "51", "56", "65"]) + expected = [ + f"{i},{j},{k}" + for i in range(7) + for j in range(i + 1, 7) + for k in range(j + 1, 7) + ] + expected = [G.subgraph(x.split(",")) for x in expected] + actual = list(nx.all_triads(G)) + assert all(any(nx.is_isomorphic(G1, G2) for G1 in expected) for G2 in actual) + + +def test_triad_type(): + """Tests the triad_type function.""" + # 0 edges (1 type) + G = nx.DiGraph({0: [], 1: [], 2: []}) + assert nx.triad_type(G) == "003" + # 1 edge (1 type) + G = nx.DiGraph({0: [1], 1: [], 2: []}) + assert nx.triad_type(G) == "012" + # 2 edges (4 types) + G = nx.DiGraph([(0, 1), (0, 2)]) + assert nx.triad_type(G) == "021D" + G = nx.DiGraph({0: [1], 1: [0], 2: []}) + assert nx.triad_type(G) == "102" + G = nx.DiGraph([(0, 1), (2, 1)]) + assert nx.triad_type(G) == "021U" + G = nx.DiGraph([(0, 1), (1, 2)]) + assert nx.triad_type(G) == "021C" + # 3 edges (4 types) + G = nx.DiGraph([(0, 1), (1, 0), (2, 1)]) + assert nx.triad_type(G) == "111D" + G = nx.DiGraph([(0, 1), (1, 0), (1, 2)]) + assert nx.triad_type(G) == "111U" + G = nx.DiGraph([(0, 1), (1, 2), (0, 2)]) + assert nx.triad_type(G) == "030T" + G = nx.DiGraph([(0, 1), (1, 2), (2, 0)]) + assert nx.triad_type(G) == "030C" + # 4 edges (4 types) + G = nx.DiGraph([(0, 1), (1, 0), (2, 0), (0, 2)]) + assert nx.triad_type(G) == "201" + G = nx.DiGraph([(0, 1), (1, 0), (2, 0), (2, 1)]) + assert nx.triad_type(G) == "120D" + G = nx.DiGraph([(0, 1), (1, 0), (0, 2), (1, 2)]) + assert nx.triad_type(G) == "120U" + G = nx.DiGraph([(0, 1), (1, 0), (0, 2), (2, 1)]) + assert nx.triad_type(G) == "120C" + # 5 edges (1 type) + G = nx.DiGraph([(0, 1), (1, 0), (2, 1), (1, 2), (0, 2)]) + assert nx.triad_type(G) == "210" + # 6 edges (1 type) + G = nx.DiGraph([(0, 1), (1, 0), (1, 2), (2, 1), (0, 2), (2, 0)]) + assert nx.triad_type(G) == "300" + + +def test_triads_by_type(): + G = nx.DiGraph() + G.add_edges_from(["01", "02", "03", "04", "05", "12", "16", "51", "56", "65"]) + all_triads = nx.all_triads(G) + expected = defaultdict(list) + for triad in all_triads: + name = nx.triad_type(triad) + expected[name].append(triad) + actual = nx.triads_by_type(G) + assert set(actual.keys()) == set(expected.keys()) + for tri_type, actual_Gs in actual.items(): + expected_Gs = expected[tri_type] + for a in actual_Gs: + assert any(nx.is_isomorphic(a, e) for e in expected_Gs) + + +def test_triadic_census_short_path_nodelist(): + G = nx.path_graph("abc", create_using=nx.DiGraph) + expected = {"021C": 1} + for nl in ["a", "b", "c", "ab", "ac", "bc", "abc"]: + triad_census = nx.triadic_census(G, nodelist=nl) + assert expected == {typ: cnt for typ, cnt in triad_census.items() if cnt > 0} + + +def test_triadic_census_correct_nodelist_values(): + G = nx.path_graph(5, create_using=nx.DiGraph) + msg = r"nodelist includes duplicate nodes or nodes not in G" + with pytest.raises(ValueError, match=msg): + nx.triadic_census(G, [1, 2, 2, 3]) + with pytest.raises(ValueError, match=msg): + nx.triadic_census(G, [1, 2, "a", 3]) + + +def test_triadic_census_tiny_graphs(): + tc = nx.triadic_census(nx.empty_graph(0, create_using=nx.DiGraph)) + assert {} == {typ: cnt for typ, cnt in tc.items() if cnt > 0} + tc = nx.triadic_census(nx.empty_graph(1, create_using=nx.DiGraph)) + assert {} == {typ: cnt for typ, cnt in tc.items() if cnt > 0} + tc = nx.triadic_census(nx.empty_graph(2, create_using=nx.DiGraph)) + assert {} == {typ: cnt for typ, cnt in tc.items() if cnt > 0} + tc = nx.triadic_census(nx.DiGraph([(1, 2)])) + assert {} == {typ: cnt for typ, cnt in tc.items() if cnt > 0} + + +def test_triadic_census_selfloops(): + GG = nx.path_graph("abc", create_using=nx.DiGraph) + expected = {"021C": 1} + for n in GG: + G = GG.copy() + G.add_edge(n, n) + tc = nx.triadic_census(G) + assert expected == {typ: cnt for typ, cnt in tc.items() if cnt > 0} + + GG = nx.path_graph("abcde", create_using=nx.DiGraph) + tbt = nx.triads_by_type(GG) + for n in GG: + GG.add_edge(n, n) + tc = nx.triadic_census(GG) + assert tc == {tt: len(tbt[tt]) for tt in tc} + + +def test_triadic_census_four_path(): + G = nx.path_graph("abcd", create_using=nx.DiGraph) + expected = {"012": 2, "021C": 2} + triad_census = nx.triadic_census(G) + assert expected == {typ: cnt for typ, cnt in triad_census.items() if cnt > 0} + + +def test_triadic_census_four_path_nodelist(): + G = nx.path_graph("abcd", create_using=nx.DiGraph) + expected_end = {"012": 2, "021C": 1} + expected_mid = {"012": 1, "021C": 2} + a_triad_census = nx.triadic_census(G, nodelist=["a"]) + assert expected_end == {typ: cnt for typ, cnt in a_triad_census.items() if cnt > 0} + b_triad_census = nx.triadic_census(G, nodelist=["b"]) + assert expected_mid == {typ: cnt for typ, cnt in b_triad_census.items() if cnt > 0} + c_triad_census = nx.triadic_census(G, nodelist=["c"]) + assert expected_mid == {typ: cnt for typ, cnt in c_triad_census.items() if cnt > 0} + d_triad_census = nx.triadic_census(G, nodelist=["d"]) + assert expected_end == {typ: cnt for typ, cnt in d_triad_census.items() if cnt > 0} + + +def test_triadic_census_nodelist(): + """Tests the triadic_census function.""" + G = nx.DiGraph() + G.add_edges_from(["01", "02", "03", "04", "05", "12", "16", "51", "56", "65"]) + expected = { + "030T": 2, + "120C": 1, + "210": 0, + "120U": 0, + "012": 9, + "102": 3, + "021U": 0, + "111U": 0, + "003": 8, + "030C": 0, + "021D": 9, + "201": 0, + "111D": 1, + "300": 0, + "120D": 0, + "021C": 2, + } + actual = dict.fromkeys(expected, 0) + for node in G.nodes(): + node_triad_census = nx.triadic_census(G, nodelist=[node]) + for triad_key in expected: + actual[triad_key] += node_triad_census[triad_key] + # Divide all counts by 3 + for k, v in actual.items(): + actual[k] //= 3 + assert expected == actual + + +@pytest.mark.parametrize("N", [5, 10]) +def test_triadic_census_on_random_graph(N): + G = nx.binomial_graph(N, 0.3, directed=True, seed=42) + tc1 = nx.triadic_census(G) + tbt = nx.triads_by_type(G) + tc2 = {tt: len(tbt[tt]) for tt in tc1} + assert tc1 == tc2 + + for n in G: + tc1 = nx.triadic_census(G, nodelist={n}) + tc2 = {tt: sum(1 for t in tbt.get(tt, []) if n in t) for tt in tc1} + assert tc1 == tc2 + + for ns in itertools.combinations(G, 2): + ns = set(ns) + tc1 = nx.triadic_census(G, nodelist=ns) + tc2 = { + tt: sum(1 for t in tbt.get(tt, []) if any(n in ns for n in t)) for tt in tc1 + } + assert tc1 == tc2 + + for ns in itertools.combinations(G, 3): + ns = set(ns) + tc1 = nx.triadic_census(G, nodelist=ns) + tc2 = { + tt: sum(1 for t in tbt.get(tt, []) if any(n in ns for n in t)) for tt in tc1 + } + assert tc1 == tc2 diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_vitality.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_vitality.py new file mode 100644 index 0000000..248206e --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_vitality.py @@ -0,0 +1,41 @@ +import networkx as nx + + +class TestClosenessVitality: + def test_unweighted(self): + G = nx.cycle_graph(3) + vitality = nx.closeness_vitality(G) + assert vitality == {0: 2, 1: 2, 2: 2} + + def test_weighted(self): + G = nx.Graph() + nx.add_cycle(G, [0, 1, 2], weight=2) + vitality = nx.closeness_vitality(G, weight="weight") + assert vitality == {0: 4, 1: 4, 2: 4} + + def test_unweighted_digraph(self): + G = nx.DiGraph(nx.cycle_graph(3)) + vitality = nx.closeness_vitality(G) + assert vitality == {0: 4, 1: 4, 2: 4} + + def test_weighted_digraph(self): + G = nx.DiGraph() + nx.add_cycle(G, [0, 1, 2], weight=2) + nx.add_cycle(G, [2, 1, 0], weight=2) + vitality = nx.closeness_vitality(G, weight="weight") + assert vitality == {0: 8, 1: 8, 2: 8} + + def test_weighted_multidigraph(self): + G = nx.MultiDiGraph() + nx.add_cycle(G, [0, 1, 2], weight=2) + nx.add_cycle(G, [2, 1, 0], weight=2) + vitality = nx.closeness_vitality(G, weight="weight") + assert vitality == {0: 8, 1: 8, 2: 8} + + def test_disconnecting_graph(self): + """Tests that the closeness vitality of a node whose removal + disconnects the graph is negative infinity. + + """ + G = nx.path_graph(3) + assert nx.closeness_vitality(G, node=1) == -float("inf") diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_voronoi.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_voronoi.py new file mode 100644 index 0000000..3269ae6 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_voronoi.py @@ -0,0 +1,103 @@ +import networkx as nx +from networkx.utils import pairwise + + +class TestVoronoiCells: + """Unit tests for the Voronoi cells function.""" + + def test_isolates(self): + """Tests that a graph with isolated nodes has all isolates in + one block of the partition. + + """ + G = nx.empty_graph(5) + cells = nx.voronoi_cells(G, {0, 2, 4}) + expected = {0: {0}, 2: {2}, 4: {4}, "unreachable": {1, 3}} + assert expected == cells + + def test_undirected_unweighted(self): + G = nx.cycle_graph(6) + cells = nx.voronoi_cells(G, {0, 3}) + expected = {0: {0, 1, 5}, 3: {2, 3, 4}} + assert expected == cells + + def test_directed_unweighted(self): + # This is the singly-linked directed cycle graph on six nodes. + G = nx.DiGraph(pairwise(range(6), cyclic=True)) + cells = nx.voronoi_cells(G, {0, 3}) + expected = {0: {0, 1, 2}, 3: {3, 4, 5}} + assert expected == cells + + def test_directed_inward(self): + """Tests that reversing the graph gives the "inward" Voronoi + partition. + + """ + # This is the singly-linked reverse directed cycle graph on six nodes. + G = nx.DiGraph(pairwise(range(6), cyclic=True)) + G = G.reverse(copy=False) + cells = nx.voronoi_cells(G, {0, 3}) + expected = {0: {0, 4, 5}, 3: {1, 2, 3}} + assert expected == cells + + def test_undirected_weighted(self): + edges = [(0, 1, 10), (1, 2, 1), (2, 3, 1)] + G = nx.Graph() + G.add_weighted_edges_from(edges) + cells = nx.voronoi_cells(G, {0, 3}) + expected = {0: {0}, 3: {1, 2, 3}} + assert expected == cells + + def test_directed_weighted(self): + edges = [(0, 1, 10), (1, 2, 1), (2, 3, 1), (3, 2, 1), (2, 1, 1)] + G = nx.DiGraph() + G.add_weighted_edges_from(edges) + cells = nx.voronoi_cells(G, {0, 3}) + expected = {0: {0}, 3: {1, 2, 3}} + assert expected == cells + + def test_multigraph_unweighted(self): + """Tests that the Voronoi cells for a multigraph are the same as + for a simple graph. + + """ + edges = [(0, 1), (1, 2), (2, 3)] + G = nx.MultiGraph(2 * edges) + H = nx.Graph(G) + G_cells = nx.voronoi_cells(G, {0, 3}) + H_cells = nx.voronoi_cells(H, {0, 3}) + assert G_cells == H_cells + + def test_multidigraph_unweighted(self): + # This is the twice-singly-linked directed cycle graph on six nodes. + edges = list(pairwise(range(6), cyclic=True)) + G = nx.MultiDiGraph(2 * edges) + H = nx.DiGraph(G) + G_cells = nx.voronoi_cells(G, {0, 3}) + H_cells = nx.voronoi_cells(H, {0, 3}) + assert G_cells == H_cells + + def test_multigraph_weighted(self): + edges = [(0, 1, 10), (0, 1, 10), (1, 2, 1), (1, 2, 100), (2, 3, 1), (2, 3, 100)] + G = nx.MultiGraph() + G.add_weighted_edges_from(edges) + cells = nx.voronoi_cells(G, {0, 3}) + expected = {0: {0}, 3: {1, 2, 3}} + assert expected == cells + + def test_multidigraph_weighted(self): + edges = [ + (0, 1, 10), + (0, 1, 10), + (1, 2, 1), + (2, 3, 1), + (3, 2, 10), + (3, 2, 1), + (2, 1, 10), + (2, 1, 1), + ] + G = nx.MultiDiGraph() + G.add_weighted_edges_from(edges) + cells = nx.voronoi_cells(G, {0, 3}) + expected = {0: {0}, 3: {1, 2, 3}} + assert expected == cells diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_walks.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_walks.py new file mode 100644 index 0000000..7a6b323 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_walks.py @@ -0,0 +1,54 @@ +"""Unit tests for the :mod:`networkx.algorithms.walks` module.""" + +import pytest + +import networkx as nx + +pytest.importorskip("numpy") +pytest.importorskip("scipy") + + +def test_directed(): + G = nx.DiGraph([(0, 1), (1, 2), (2, 0)]) + num_walks = nx.number_of_walks(G, 3) + expected = {0: {0: 1, 1: 0, 2: 0}, 1: {0: 0, 1: 1, 2: 0}, 2: {0: 0, 1: 0, 2: 1}} + assert num_walks == expected + + +def test_undirected(): + G = nx.cycle_graph(3) + num_walks = nx.number_of_walks(G, 3) + expected = {0: {0: 2, 1: 3, 2: 3}, 1: {0: 3, 1: 2, 2: 3}, 2: {0: 3, 1: 3, 2: 2}} + assert num_walks == expected + + +def test_non_integer_nodes(): + G = nx.DiGraph([("A", "B"), ("B", "C"), ("C", "A")]) + num_walks = nx.number_of_walks(G, 2) + expected = { + "A": {"A": 0, "B": 0, "C": 1}, + "B": {"A": 1, "B": 0, "C": 0}, + "C": {"A": 0, "B": 1, "C": 0}, + } + assert num_walks == expected + + +def test_zero_length(): + G = nx.cycle_graph(3) + num_walks = nx.number_of_walks(G, 0) + expected = {0: {0: 1, 1: 0, 2: 0}, 1: {0: 0, 1: 1, 2: 0}, 2: {0: 0, 1: 0, 2: 1}} + assert num_walks == expected + + +def test_negative_length_exception(): + G = nx.cycle_graph(3) + with pytest.raises(ValueError): + nx.number_of_walks(G, -1) + + +def test_hidden_weight_attr(): + G = nx.cycle_graph(3) + G.add_edge(1, 2, weight=5) + num_walks = nx.number_of_walks(G, 3) + expected = {0: {0: 2, 1: 3, 2: 3}, 1: {0: 3, 1: 2, 2: 3}, 2: {0: 3, 1: 3, 2: 2}} + assert num_walks == expected diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_wiener.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_wiener.py new file mode 100644 index 0000000..aded951 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/tests/test_wiener.py @@ -0,0 +1,123 @@ +import networkx as nx + + +def test_wiener_index_of_disconnected_graph(): + assert nx.wiener_index(nx.empty_graph(2)) == float("inf") + + +def test_wiener_index_of_directed_graph(): + G = nx.complete_graph(3) + H = nx.DiGraph(G) + assert (2 * nx.wiener_index(G)) == nx.wiener_index(H) + + +def test_wiener_index_of_complete_graph(): + n = 10 + G = nx.complete_graph(n) + assert nx.wiener_index(G) == (n * (n - 1) / 2) + + +def test_wiener_index_of_path_graph(): + # In P_n, there are n - 1 pairs of vertices at distance one, n - + # 2 pairs at distance two, n - 3 at distance three, ..., 1 at + # distance n - 1, so the Wiener index should be + # + # 1 * (n - 1) + 2 * (n - 2) + ... + (n - 2) * 2 + (n - 1) * 1 + # + # For example, in P_5, + # + # 1 * 4 + 2 * 3 + 3 * 2 + 4 * 1 = 2 (1 * 4 + 2 * 3) + # + # and in P_6, + # + # 1 * 5 + 2 * 4 + 3 * 3 + 4 * 2 + 5 * 1 = 2 (1 * 5 + 2 * 4) + 3 * 3 + # + # assuming n is *odd*, this gives the formula + # + # 2 \sum_{i = 1}^{(n - 1) / 2} [i * (n - i)] + # + # assuming n is *even*, this gives the formula + # + # 2 \sum_{i = 1}^{n / 2} [i * (n - i)] - (n / 2) ** 2 + # + n = 9 + G = nx.path_graph(n) + expected = 2 * sum(i * (n - i) for i in range(1, (n // 2) + 1)) + actual = nx.wiener_index(G) + assert expected == actual + + +def test_schultz_and_gutman_index_of_disconnected_graph(): + n = 4 + G = nx.Graph() + G.add_nodes_from(list(range(1, n + 1))) + expected = float("inf") + + G.add_edge(1, 2) + G.add_edge(3, 4) + + actual_1 = nx.schultz_index(G) + actual_2 = nx.gutman_index(G) + + assert expected == actual_1 + assert expected == actual_2 + + +def test_schultz_and_gutman_index_of_complete_bipartite_graph_1(): + n = 3 + m = 3 + cbg = nx.complete_bipartite_graph(n, m) + + expected_1 = n * m * (n + m) + 2 * n * (n - 1) * m + 2 * m * (m - 1) * n + actual_1 = nx.schultz_index(cbg) + + expected_2 = n * m * (n * m) + n * (n - 1) * m * m + m * (m - 1) * n * n + actual_2 = nx.gutman_index(cbg) + + assert expected_1 == actual_1 + assert expected_2 == actual_2 + + +def test_schultz_and_gutman_index_of_complete_bipartite_graph_2(): + n = 2 + m = 5 + cbg = nx.complete_bipartite_graph(n, m) + + expected_1 = n * m * (n + m) + 2 * n * (n - 1) * m + 2 * m * (m - 1) * n + actual_1 = nx.schultz_index(cbg) + + expected_2 = n * m * (n * m) + n * (n - 1) * m * m + m * (m - 1) * n * n + actual_2 = nx.gutman_index(cbg) + + assert expected_1 == actual_1 + assert expected_2 == actual_2 + + +def test_schultz_and_gutman_index_of_complete_graph(): + n = 5 + cg = nx.complete_graph(n) + + expected_1 = n * (n - 1) * (n - 1) + actual_1 = nx.schultz_index(cg) + + assert expected_1 == actual_1 + + expected_2 = n * (n - 1) * (n - 1) * (n - 1) / 2 + actual_2 = nx.gutman_index(cg) + + assert expected_2 == actual_2 + + +def test_schultz_and_gutman_index_of_odd_cycle_graph(): + k = 5 + n = 2 * k + 1 + ocg = nx.cycle_graph(n) + + expected_1 = 2 * n * k * (k + 1) + actual_1 = nx.schultz_index(ocg) + + expected_2 = 2 * n * k * (k + 1) + actual_2 = nx.gutman_index(ocg) + + assert expected_1 == actual_1 + assert expected_2 == actual_2 diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/threshold.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/threshold.py new file mode 100644 index 0000000..21705cc --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/threshold.py @@ -0,0 +1,1035 @@ +""" +Threshold Graphs - Creation, manipulation and identification. +""" + +from math import sqrt + +import networkx as nx +from networkx.utils import py_random_state + +__all__ = ["is_threshold_graph", "find_threshold_graph"] + + +@nx._dispatchable +def is_threshold_graph(G): + """ + Returns `True` if `G` is a threshold graph. + + Parameters + ---------- + G : NetworkX graph instance + An instance of `Graph`, `DiGraph`, `MultiGraph` or `MultiDiGraph` + + Returns + ------- + bool + `True` if `G` is a threshold graph, `False` otherwise. + + Examples + -------- + >>> from networkx.algorithms.threshold import is_threshold_graph + >>> G = nx.path_graph(3) + >>> is_threshold_graph(G) + True + >>> G = nx.barbell_graph(3, 3) + >>> is_threshold_graph(G) + False + + References + ---------- + .. [1] Threshold graphs: https://en.wikipedia.org/wiki/Threshold_graph + """ + return is_threshold_sequence([d for n, d in G.degree()]) + + +def is_threshold_sequence(degree_sequence): + """ + Returns True if the sequence is a threshold degree sequence. + + Uses the property that a threshold graph must be constructed by + adding either dominating or isolated nodes. Thus, it can be + deconstructed iteratively by removing a node of degree zero or a + node that connects to the remaining nodes. If this deconstruction + fails then the sequence is not a threshold sequence. + """ + ds = degree_sequence[:] # get a copy so we don't destroy original + ds.sort() + while ds: + if ds[0] == 0: # if isolated node + ds.pop(0) # remove it + continue + if ds[-1] != len(ds) - 1: # is the largest degree node dominating? + return False # no, not a threshold degree sequence + ds.pop() # yes, largest is the dominating node + ds = [d - 1 for d in ds] # remove it and decrement all degrees + return True + + +def creation_sequence(degree_sequence, with_labels=False, compact=False): + """ + Determines the creation sequence for the given threshold degree sequence. + + The creation sequence is a list of single characters 'd' + or 'i': 'd' for dominating or 'i' for isolated vertices. + Dominating vertices are connected to all vertices present when it + is added. The first node added is by convention 'd'. + This list can be converted to a string if desired using "".join(cs) + + If with_labels==True: + Returns a list of 2-tuples containing the vertex number + and a character 'd' or 'i' which describes the type of vertex. + + If compact==True: + Returns the creation sequence in a compact form that is the number + of 'i's and 'd's alternating. + Examples: + [1,2,2,3] represents d,i,i,d,d,i,i,i + [3,1,2] represents d,d,d,i,d,d + + Notice that the first number is the first vertex to be used for + construction and so is always 'd'. + + with_labels and compact cannot both be True. + + Returns None if the sequence is not a threshold sequence + """ + if with_labels and compact: + raise ValueError("compact sequences cannot be labeled") + + # make an indexed copy + if isinstance(degree_sequence, dict): # labeled degree sequence + ds = [[degree, label] for (label, degree) in degree_sequence.items()] + else: + ds = [[d, i] for i, d in enumerate(degree_sequence)] + ds.sort() + cs = [] # creation sequence + while ds: + if ds[0][0] == 0: # isolated node + (d, v) = ds.pop(0) + if len(ds) > 0: # make sure we start with a d + cs.insert(0, (v, "i")) + else: + cs.insert(0, (v, "d")) + continue + if ds[-1][0] != len(ds) - 1: # Not dominating node + return None # not a threshold degree sequence + (d, v) = ds.pop() + cs.insert(0, (v, "d")) + ds = [[d[0] - 1, d[1]] for d in ds] # decrement due to removing node + + if with_labels: + return cs + if compact: + return make_compact(cs) + return [v[1] for v in cs] # not labeled + + +def make_compact(creation_sequence): + """ + Returns the creation sequence in a compact form + that is the number of 'i's and 'd's alternating. + + Examples + -------- + >>> from networkx.algorithms.threshold import make_compact + >>> make_compact(["d", "i", "i", "d", "d", "i", "i", "i"]) + [1, 2, 2, 3] + >>> make_compact(["d", "d", "d", "i", "d", "d"]) + [3, 1, 2] + + Notice that the first number is the first vertex + to be used for construction and so is always 'd'. + + Labeled creation sequences lose their labels in the + compact representation. + + >>> make_compact([3, 1, 2]) + [3, 1, 2] + """ + first = creation_sequence[0] + if isinstance(first, str): # creation sequence + cs = creation_sequence[:] + elif isinstance(first, tuple): # labeled creation sequence + cs = [s[1] for s in creation_sequence] + elif isinstance(first, int): # compact creation sequence + return creation_sequence + else: + raise TypeError("Not a valid creation sequence type") + + ccs = [] + count = 1 # count the run lengths of d's or i's. + for i in range(1, len(cs)): + if cs[i] == cs[i - 1]: + count += 1 + else: + ccs.append(count) + count = 1 + ccs.append(count) # don't forget the last one + return ccs + + +def uncompact(creation_sequence): + """ + Converts a compact creation sequence for a threshold + graph to a standard creation sequence (unlabeled). + If the creation_sequence is already standard, return it. + See creation_sequence. + """ + first = creation_sequence[0] + if isinstance(first, str): # creation sequence + return creation_sequence + elif isinstance(first, tuple): # labeled creation sequence + return creation_sequence + elif isinstance(first, int): # compact creation sequence + ccscopy = creation_sequence[:] + else: + raise TypeError("Not a valid creation sequence type") + cs = [] + while ccscopy: + cs.extend(ccscopy.pop(0) * ["d"]) + if ccscopy: + cs.extend(ccscopy.pop(0) * ["i"]) + return cs + + +def creation_sequence_to_weights(creation_sequence): + """ + Returns a list of node weights which create the threshold + graph designated by the creation sequence. The weights + are scaled so that the threshold is 1.0. The order of the + nodes is the same as that in the creation sequence. + """ + # Turn input sequence into a labeled creation sequence + first = creation_sequence[0] + if isinstance(first, str): # creation sequence + if isinstance(creation_sequence, list): + wseq = creation_sequence[:] + else: + wseq = list(creation_sequence) # string like 'ddidid' + elif isinstance(first, tuple): # labeled creation sequence + wseq = [v[1] for v in creation_sequence] + elif isinstance(first, int): # compact creation sequence + wseq = uncompact(creation_sequence) + else: + raise TypeError("Not a valid creation sequence type") + # pass through twice--first backwards + wseq.reverse() + w = 0 + prev = "i" + for j, s in enumerate(wseq): + if s == "i": + wseq[j] = w + prev = s + elif prev == "i": + prev = s + w += 1 + wseq.reverse() # now pass through forwards + for j, s in enumerate(wseq): + if s == "d": + wseq[j] = w + prev = s + elif prev == "d": + prev = s + w += 1 + # Now scale weights + if prev == "d": + w += 1 + wscale = 1 / w + return [ww * wscale for ww in wseq] + # return wseq + + +def weights_to_creation_sequence( + weights, threshold=1, with_labels=False, compact=False +): + """ + Returns a creation sequence for a threshold graph + determined by the weights and threshold given as input. + If the sum of two node weights is greater than the + threshold value, an edge is created between these nodes. + + The creation sequence is a list of single characters 'd' + or 'i': 'd' for dominating or 'i' for isolated vertices. + Dominating vertices are connected to all vertices present + when it is added. The first node added is by convention 'd'. + + If with_labels==True: + Returns a list of 2-tuples containing the vertex number + and a character 'd' or 'i' which describes the type of vertex. + + If compact==True: + Returns the creation sequence in a compact form that is the number + of 'i's and 'd's alternating. + Examples: + [1,2,2,3] represents d,i,i,d,d,i,i,i + [3,1,2] represents d,d,d,i,d,d + + Notice that the first number is the first vertex to be used for + construction and so is always 'd'. + + with_labels and compact cannot both be True. + """ + if with_labels and compact: + raise ValueError("compact sequences cannot be labeled") + + # make an indexed copy + if isinstance(weights, dict): # labeled weights + wseq = [[w, label] for (label, w) in weights.items()] + else: + wseq = [[w, i] for i, w in enumerate(weights)] + wseq.sort() + cs = [] # creation sequence + cutoff = threshold - wseq[-1][0] + while wseq: + if wseq[0][0] < cutoff: # isolated node + (w, label) = wseq.pop(0) + cs.append((label, "i")) + else: + (w, label) = wseq.pop() + cs.append((label, "d")) + cutoff = threshold - wseq[-1][0] + if len(wseq) == 1: # make sure we start with a d + (w, label) = wseq.pop() + cs.append((label, "d")) + # put in correct order + cs.reverse() + + if with_labels: + return cs + if compact: + return make_compact(cs) + return [v[1] for v in cs] # not labeled + + +# Manipulating NetworkX.Graphs in context of threshold graphs +@nx._dispatchable(graphs=None, returns_graph=True) +def threshold_graph(creation_sequence, create_using=None): + """ + Create a threshold graph from the creation sequence or compact + creation_sequence. + + The input sequence can be a + + creation sequence (e.g. ['d','i','d','d','d','i']) + labeled creation sequence (e.g. [(0,'d'),(2,'d'),(1,'i')]) + compact creation sequence (e.g. [2,1,1,2,0]) + + Use cs=creation_sequence(degree_sequence,labeled=True) + to convert a degree sequence to a creation sequence. + + Returns None if the sequence is not valid + """ + # Turn input sequence into a labeled creation sequence + first = creation_sequence[0] + if isinstance(first, str): # creation sequence + ci = list(enumerate(creation_sequence)) + elif isinstance(first, tuple): # labeled creation sequence + ci = creation_sequence[:] + elif isinstance(first, int): # compact creation sequence + cs = uncompact(creation_sequence) + ci = list(enumerate(cs)) + else: + print("not a valid creation sequence type") + return None + + G = nx.empty_graph(0, create_using) + if G.is_directed(): + raise nx.NetworkXError("Directed Graph not supported") + + G.name = "Threshold Graph" + + # add nodes and edges + # if type is 'i' just add nodea + # if type is a d connect to everything previous + while ci: + (v, node_type) = ci.pop(0) + if node_type == "d": # dominating type, connect to all existing nodes + # We use `for u in list(G):` instead of + # `for u in G:` because we edit the graph `G` in + # the loop. Hence using an iterator will result in + # `RuntimeError: dictionary changed size during iteration` + for u in list(G): + G.add_edge(v, u) + G.add_node(v) + return G + + +@nx._dispatchable +def find_alternating_4_cycle(G): + """ + Returns False if there aren't any alternating 4 cycles. + Otherwise returns the cycle as [a,b,c,d] where (a,b) + and (c,d) are edges and (a,c) and (b,d) are not. + """ + for u, v in G.edges(): + for w in G.nodes(): + if not G.has_edge(u, w) and u != w: + for x in G.neighbors(w): + if not G.has_edge(v, x) and v != x: + return [u, v, w, x] + return False + + +@nx._dispatchable(returns_graph=True) +def find_threshold_graph(G, create_using=None): + """ + Returns a threshold subgraph that is close to largest in `G`. + + The threshold graph will contain the largest degree node in G. + + Parameters + ---------- + G : NetworkX graph instance + An instance of `Graph`, or `MultiDiGraph` + create_using : NetworkX graph class or `None` (default), optional + Type of graph to use when constructing the threshold graph. + If `None`, infer the appropriate graph type from the input. + + Returns + ------- + graph : + A graph instance representing the threshold graph + + Examples + -------- + >>> from networkx.algorithms.threshold import find_threshold_graph + >>> G = nx.barbell_graph(3, 3) + >>> T = find_threshold_graph(G) + >>> T.nodes # may vary + NodeView((7, 8, 5, 6)) + + References + ---------- + .. [1] Threshold graphs: https://en.wikipedia.org/wiki/Threshold_graph + """ + return threshold_graph(find_creation_sequence(G), create_using) + + +@nx._dispatchable +def find_creation_sequence(G): + """ + Find a threshold subgraph that is close to largest in G. + Returns the labeled creation sequence of that threshold graph. + """ + cs = [] + # get a local pointer to the working part of the graph + H = G + while H.order() > 0: + # get new degree sequence on subgraph + dsdict = dict(H.degree()) + ds = [(d, v) for v, d in dsdict.items()] + ds.sort() + # Update threshold graph nodes + if ds[-1][0] == 0: # all are isolated + cs.extend(zip(dsdict, ["i"] * (len(ds) - 1) + ["d"])) + break # Done! + # pull off isolated nodes + while ds[0][0] == 0: + (d, iso) = ds.pop(0) + cs.append((iso, "i")) + # find new biggest node + (d, bigv) = ds.pop() + # add edges of star to t_g + cs.append((bigv, "d")) + # form subgraph of neighbors of big node + H = H.subgraph(H.neighbors(bigv)) + cs.reverse() + return cs + + +# Properties of Threshold Graphs +def triangles(creation_sequence): + """ + Compute number of triangles in the threshold graph with the + given creation sequence. + """ + # shortcut algorithm that doesn't require computing number + # of triangles at each node. + cs = creation_sequence # alias + dr = cs.count("d") # number of d's in sequence + ntri = dr * (dr - 1) * (dr - 2) / 6 # number of triangles in clique of nd d's + # now add dr choose 2 triangles for every 'i' in sequence where + # dr is the number of d's to the right of the current i + for i, typ in enumerate(cs): + if typ == "i": + ntri += dr * (dr - 1) / 2 + else: + dr -= 1 + return ntri + + +def triangle_sequence(creation_sequence): + """ + Return triangle sequence for the given threshold graph creation sequence. + + """ + cs = creation_sequence + seq = [] + dr = cs.count("d") # number of d's to the right of the current pos + dcur = (dr - 1) * (dr - 2) // 2 # number of triangles through a node of clique dr + irun = 0 # number of i's in the last run + drun = 0 # number of d's in the last run + for i, sym in enumerate(cs): + if sym == "d": + drun += 1 + tri = dcur + (dr - 1) * irun # new triangles at this d + else: # cs[i]="i": + if prevsym == "d": # new string of i's + dcur += (dr - 1) * irun # accumulate shared shortest paths + irun = 0 # reset i run counter + dr -= drun # reduce number of d's to right + drun = 0 # reset d run counter + irun += 1 + tri = dr * (dr - 1) // 2 # new triangles at this i + seq.append(tri) + prevsym = sym + return seq + + +def cluster_sequence(creation_sequence): + """ + Return cluster sequence for the given threshold graph creation sequence. + """ + triseq = triangle_sequence(creation_sequence) + degseq = degree_sequence(creation_sequence) + cseq = [] + for i, deg in enumerate(degseq): + tri = triseq[i] + if deg <= 1: # isolated vertex or single pair gets cc 0 + cseq.append(0) + continue + max_size = (deg * (deg - 1)) // 2 + cseq.append(tri / max_size) + return cseq + + +def degree_sequence(creation_sequence): + """ + Return degree sequence for the threshold graph with the given + creation sequence + """ + cs = creation_sequence # alias + seq = [] + rd = cs.count("d") # number of d to the right + for i, sym in enumerate(cs): + if sym == "d": + rd -= 1 + seq.append(rd + i) + else: + seq.append(rd) + return seq + + +def density(creation_sequence): + """ + Return the density of the graph with this creation_sequence. + The density is the fraction of possible edges present. + """ + N = len(creation_sequence) + two_size = sum(degree_sequence(creation_sequence)) + two_possible = N * (N - 1) + den = two_size / two_possible + return den + + +def degree_correlation(creation_sequence): + """ + Return the degree-degree correlation over all edges. + """ + cs = creation_sequence + s1 = 0 # deg_i*deg_j + s2 = 0 # deg_i^2+deg_j^2 + s3 = 0 # deg_i+deg_j + m = 0 # number of edges + rd = cs.count("d") # number of d nodes to the right + rdi = [i for i, sym in enumerate(cs) if sym == "d"] # index of "d"s + ds = degree_sequence(cs) + for i, sym in enumerate(cs): + if sym == "d": + if i != rdi[0]: + print("Logic error in degree_correlation", i, rdi) + raise ValueError + rdi.pop(0) + degi = ds[i] + for dj in rdi: + degj = ds[dj] + s1 += degj * degi + s2 += degi**2 + degj**2 + s3 += degi + degj + m += 1 + denom = 2 * m * s2 - s3 * s3 + numer = 4 * m * s1 - s3 * s3 + if denom == 0: + if numer == 0: + return 1 + raise ValueError(f"Zero Denominator but Numerator is {numer}") + return numer / denom + + +def shortest_path(creation_sequence, u, v): + """ + Find the shortest path between u and v in a + threshold graph G with the given creation_sequence. + + For an unlabeled creation_sequence, the vertices + u and v must be integers in (0,len(sequence)) referring + to the position of the desired vertices in the sequence. + + For a labeled creation_sequence, u and v are labels of vertices. + + Use cs=creation_sequence(degree_sequence,with_labels=True) + to convert a degree sequence to a creation sequence. + + Returns a list of vertices from u to v. + Example: if they are neighbors, it returns [u,v] + """ + # Turn input sequence into a labeled creation sequence + first = creation_sequence[0] + if isinstance(first, str): # creation sequence + cs = [(i, creation_sequence[i]) for i in range(len(creation_sequence))] + elif isinstance(first, tuple): # labeled creation sequence + cs = creation_sequence[:] + elif isinstance(first, int): # compact creation sequence + ci = uncompact(creation_sequence) + cs = [(i, ci[i]) for i in range(len(ci))] + else: + raise TypeError("Not a valid creation sequence type") + + verts = [s[0] for s in cs] + if v not in verts: + raise ValueError(f"Vertex {v} not in graph from creation_sequence") + if u not in verts: + raise ValueError(f"Vertex {u} not in graph from creation_sequence") + # Done checking + if u == v: + return [u] + + uindex = verts.index(u) + vindex = verts.index(v) + bigind = max(uindex, vindex) + if cs[bigind][1] == "d": + return [u, v] + # must be that cs[bigind][1]=='i' + cs = cs[bigind:] + while cs: + vert = cs.pop() + if vert[1] == "d": + return [u, vert[0], v] + # All after u are type 'i' so no connection + return -1 + + +def shortest_path_length(creation_sequence, i): + """ + Return the shortest path length from indicated node to + every other node for the threshold graph with the given + creation sequence. + Node is indicated by index i in creation_sequence unless + creation_sequence is labeled in which case, i is taken to + be the label of the node. + + Paths lengths in threshold graphs are at most 2. + Length to unreachable nodes is set to -1. + """ + # Turn input sequence into a labeled creation sequence + first = creation_sequence[0] + if isinstance(first, str): # creation sequence + if isinstance(creation_sequence, list): + cs = creation_sequence[:] + else: + cs = list(creation_sequence) + elif isinstance(first, tuple): # labeled creation sequence + cs = [v[1] for v in creation_sequence] + i = [v[0] for v in creation_sequence].index(i) + elif isinstance(first, int): # compact creation sequence + cs = uncompact(creation_sequence) + else: + raise TypeError("Not a valid creation sequence type") + + # Compute + N = len(cs) + spl = [2] * N # length 2 to every node + spl[i] = 0 # except self which is 0 + # 1 for all d's to the right + for j in range(i + 1, N): + if cs[j] == "d": + spl[j] = 1 + if cs[i] == "d": # 1 for all nodes to the left + for j in range(i): + spl[j] = 1 + # and -1 for any trailing i to indicate unreachable + for j in range(N - 1, 0, -1): + if cs[j] == "d": + break + spl[j] = -1 + return spl + + +def betweenness_sequence(creation_sequence, normalized=True): + """ + Return betweenness for the threshold graph with the given creation + sequence. The result is unscaled. To scale the values + to the interval [0,1] divide by (n-1)*(n-2). + """ + cs = creation_sequence + seq = [] # betweenness + lastchar = "d" # first node is always a 'd' + dr = float(cs.count("d")) # number of d's to the right of current pos + irun = 0 # number of i's in the last run + drun = 0 # number of d's in the last run + dlast = 0.0 # betweenness of last d + for i, c in enumerate(cs): + if c == "d": # cs[i]=="d": + # betweenness = amt shared with earlier d's and i's + # + new isolated nodes covered + # + new paths to all previous nodes + b = dlast + (irun - 1) * irun / dr + 2 * irun * (i - drun - irun) / dr + drun += 1 # update counter + else: # cs[i]="i": + if lastchar == "d": # if this is a new run of i's + dlast = b # accumulate betweenness + dr -= drun # update number of d's to the right + drun = 0 # reset d counter + irun = 0 # reset i counter + b = 0 # isolated nodes have zero betweenness + irun += 1 # add another i to the run + seq.append(float(b)) + lastchar = c + + # normalize by the number of possible shortest paths + if normalized: + order = len(cs) + scale = 1.0 / ((order - 1) * (order - 2)) + seq = [s * scale for s in seq] + + return seq + + +def eigenvectors(creation_sequence): + """ + Return a 2-tuple of Laplacian eigenvalues and eigenvectors + for the threshold network with creation_sequence. + The first value is a list of eigenvalues. + The second value is a list of eigenvectors. + The lists are in the same order so corresponding eigenvectors + and eigenvalues are in the same position in the two lists. + + Notice that the order of the eigenvalues returned by eigenvalues(cs) + may not correspond to the order of these eigenvectors. + """ + ccs = make_compact(creation_sequence) + N = sum(ccs) + vec = [0] * N + val = vec[:] + # get number of type d nodes to the right (all for first node) + dr = sum(ccs[::2]) + + nn = ccs[0] + vec[0] = [1.0 / sqrt(N)] * N + val[0] = 0 + e = dr + dr -= nn + type_d = True + i = 1 + dd = 1 + while dd < nn: + scale = 1.0 / sqrt(dd * dd + i) + vec[i] = i * [-scale] + [dd * scale] + [0] * (N - i - 1) + val[i] = e + i += 1 + dd += 1 + if len(ccs) == 1: + return (val, vec) + for nn in ccs[1:]: + scale = 1.0 / sqrt(nn * i * (i + nn)) + vec[i] = i * [-nn * scale] + nn * [i * scale] + [0] * (N - i - nn) + # find eigenvalue + type_d = not type_d + if type_d: + e = i + dr + dr -= nn + else: + e = dr + val[i] = e + st = i + i += 1 + dd = 1 + while dd < nn: + scale = 1.0 / sqrt(i - st + dd * dd) + vec[i] = [0] * st + (i - st) * [-scale] + [dd * scale] + [0] * (N - i - 1) + val[i] = e + i += 1 + dd += 1 + return (val, vec) + + +def spectral_projection(u, eigenpairs): + """ + Returns the coefficients of each eigenvector + in a projection of the vector u onto the normalized + eigenvectors which are contained in eigenpairs. + + eigenpairs should be a list of two objects. The + first is a list of eigenvalues and the second a list + of eigenvectors. The eigenvectors should be lists. + + There's not a lot of error checking on lengths of + arrays, etc. so be careful. + """ + coeff = [] + evect = eigenpairs[1] + for ev in evect: + c = sum(evv * uv for (evv, uv) in zip(ev, u)) + coeff.append(c) + return coeff + + +def eigenvalues(creation_sequence): + """ + Return sequence of eigenvalues of the Laplacian of the threshold + graph for the given creation_sequence. + + Based on the Ferrer's diagram method. The spectrum is integral + and is the conjugate of the degree sequence. + + See:: + + @Article{degree-merris-1994, + author = {Russel Merris}, + title = {Degree maximal graphs are Laplacian integral}, + journal = {Linear Algebra Appl.}, + year = {1994}, + volume = {199}, + pages = {381--389}, + } + + """ + degseq = degree_sequence(creation_sequence) + degseq.sort() + eiglist = [] # zero is always one eigenvalue + eig = 0 + row = len(degseq) + bigdeg = degseq.pop() + while row: + if bigdeg < row: + eiglist.append(eig) + row -= 1 + else: + eig += 1 + if degseq: + bigdeg = degseq.pop() + else: + bigdeg = 0 + return eiglist + + +# Threshold graph creation routines + + +@py_random_state(2) +def random_threshold_sequence(n, p, seed=None): + """ + Create a random threshold sequence of size n. + A creation sequence is built by randomly choosing d's with + probability p and i's with probability 1-p. + + s=nx.random_threshold_sequence(10,0.5) + + returns a threshold sequence of length 10 with equal + probably of an i or a d at each position. + + A "random" threshold graph can be built with + + G=nx.threshold_graph(s) + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + """ + if not (0 <= p <= 1): + raise ValueError("p must be in [0,1]") + + cs = ["d"] # threshold sequences always start with a d + for i in range(1, n): + if seed.random() < p: + cs.append("d") + else: + cs.append("i") + return cs + + +# maybe *_d_threshold_sequence routines should +# be (or be called from) a single routine with a more descriptive name +# and a keyword parameter? +def right_d_threshold_sequence(n, m): + """ + Returns a "right-dominated" threshold sequence with `n` vertices and `m` edges. + + Each vertex in the sequence is either dominant or isolated. + In the "right-dominated" version, once the basic sequence is formed, + isolated vertices may be flipped to dominant from the right in order + to reach the target number of edges. + + Parameters + ---------- + n : int + Number of vertices. + m : int + Number of edges. + + Returns + ------- + A list of 'd' (dominant) and 'i' (isolated) forming a right-dominated threshold sequence. + + Raises + ------ + ValueError + If `m` exceeds the maximum number of edges. + + Examples + -------- + >>> from networkx.algorithms.threshold import right_d_threshold_sequence + >>> right_d_threshold_sequence(5, 3) + ['d', 'i', 'i', 'd', 'i'] + """ + + cs = ["d"] + ["i"] * (n - 1) # create sequence with n insolated nodes + + # m n * (n - 1) / 2: + raise ValueError("Too many edges for this many nodes.") + + # connected case m >n-1 + ind = n - 1 + sum = n - 1 + while sum < m: + cs[ind] = "d" + ind -= 1 + sum += ind + ind = m - (sum - ind) + cs[ind] = "d" + return cs + + +def left_d_threshold_sequence(n, m): + """ + Returns a "left-dominated" threshold sequence with `n` vertices and `m` edges. + + Each vertex in the sequence is either dominant or isolated. + In the "left-dominated" version, once the basic sequence is formed, + isolated vertices may be flipped to dominant from the left in order + to reach the target number of edges. + + Parameters + ---------- + n : int + Number of vertices. + m : int + Number of edges. + + Returns + ------- + A list of 'd' (dominant) and 'i' (isolated) forming a left-dominated threshold sequence. + + Raises + ------ + ValueError + If `m` exceeds the maximum number of edges. + + Examples + -------- + For certain small cases, both left and right dominated versions produce + the same sequence. However, for larger values of `m`, the difference in + flipping order becomes evident. For instance, compare the sequences for + ``n=6, m=8``: + + >>> from networkx.algorithms.threshold import left_d_threshold_sequence + >>> seq = left_d_threshold_sequence(6, 8) + >>> seq + ['d', 'd', 'd', 'i', 'i', 'd'] + + In contrast, the right-dominated version yields: + + >>> from networkx.algorithms.threshold import right_d_threshold_sequence + >>> right_seq = right_d_threshold_sequence(6, 8) + >>> right_seq + ['d', 'i', 'i', 'd', 'i', 'd'] + """ + + cs = ["d"] + ["i"] * (n - 1) # create sequence with n insolated nodes + + # m n * (n - 1) / 2: + raise ValueError("Too many edges for this many nodes.") + + # Connected case when M>N-1 + cs[n - 1] = "d" + sum = n - 1 + ind = 1 + while sum < m: + cs[ind] = "d" + sum += ind + ind += 1 + if sum > m: # be sure not to change the first vertex + cs[sum - m] = "i" + return cs + + +@py_random_state(3) +def swap_d(cs, p_split=1.0, p_combine=1.0, seed=None): + """ + Perform a "swap" operation on a threshold sequence. + + The swap preserves the number of nodes and edges + in the graph for the given sequence. + The resulting sequence is still a threshold sequence. + + Perform one split and one combine operation on the + 'd's of a creation sequence for a threshold graph. + This operation maintains the number of nodes and edges + in the graph, but shifts the edges from node to node + maintaining the threshold quality of the graph. + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + """ + # preprocess the creation sequence + dlist = [i for (i, node_type) in enumerate(cs[1:-1]) if node_type == "d"] + # split + if seed.random() < p_split: + choice = seed.choice(dlist) + split_to = seed.choice(range(choice)) + flip_side = choice - split_to + if split_to != flip_side and cs[split_to] == "i" and cs[flip_side] == "i": + cs[choice] = "i" + cs[split_to] = "d" + cs[flip_side] = "d" + dlist.remove(choice) + # don't add or combine may reverse this action + # dlist.extend([split_to,flip_side]) + # print >>sys.stderr,"split at %s to %s and %s"%(choice,split_to,flip_side) + # combine + if seed.random() < p_combine and dlist: + first_choice = seed.choice(dlist) + second_choice = seed.choice(dlist) + target = first_choice + second_choice + if target >= len(cs) or cs[target] == "d" or first_choice == second_choice: + return cs + # OK to combine + cs[first_choice] = "i" + cs[second_choice] = "i" + cs[target] = "d" + # print >>sys.stderr,"combine %s and %s to make %s."%(first_choice,second_choice,target) + + return cs diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/time_dependent.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/time_dependent.py new file mode 100644 index 0000000..d67cdcf --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/time_dependent.py @@ -0,0 +1,142 @@ +"""Time dependent algorithms.""" + +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = ["cd_index"] + + +@not_implemented_for("undirected") +@not_implemented_for("multigraph") +@nx._dispatchable(node_attrs={"time": None, "weight": 1}) +def cd_index(G, node, time_delta, *, time="time", weight=None): + r"""Compute the CD index for `node` within the graph `G`. + + Calculates the CD index for the given node of the graph, + considering only its predecessors who have the `time` attribute + smaller than or equal to the `time` attribute of the `node` + plus `time_delta`. + + Parameters + ---------- + G : graph + A directed networkx graph whose nodes have `time` attributes and optionally + `weight` attributes (if a weight is not given, it is considered 1). + node : node + The node for which the CD index is calculated. + time_delta : numeric or timedelta + Amount of time after the `time` attribute of the `node`. The value of + `time_delta` must support comparison with the `time` node attribute. For + example, if the `time` attribute of the nodes are `datetime.datetime` + objects, then `time_delta` should be a `datetime.timedelta` object. + time : string (Optional, default is "time") + The name of the node attribute that will be used for the calculations. + weight : string (Optional, default is None) + The name of the node attribute used as weight. + + Returns + ------- + float + The CD index calculated for the node `node` within the graph `G`. + + Raises + ------ + NetworkXError + If not all nodes have a `time` attribute or + `time_delta` and `time` attribute types are not compatible or + `n` equals 0. + + NetworkXNotImplemented + If `G` is a non-directed graph or a multigraph. + + Examples + -------- + >>> from datetime import datetime, timedelta + >>> G = nx.DiGraph() + >>> nodes = { + ... 1: {"time": datetime(2015, 1, 1)}, + ... 2: {"time": datetime(2012, 1, 1), "weight": 4}, + ... 3: {"time": datetime(2010, 1, 1)}, + ... 4: {"time": datetime(2008, 1, 1)}, + ... 5: {"time": datetime(2014, 1, 1)}, + ... } + >>> G.add_nodes_from([(n, nodes[n]) for n in nodes]) + >>> edges = [(1, 3), (1, 4), (2, 3), (3, 4), (3, 5)] + >>> G.add_edges_from(edges) + >>> delta = timedelta(days=5 * 365) + >>> nx.cd_index(G, 3, time_delta=delta, time="time") + 0.5 + >>> nx.cd_index(G, 3, time_delta=delta, time="time", weight="weight") + 0.12 + + Integers can also be used for the time values: + >>> node_times = {1: 2015, 2: 2012, 3: 2010, 4: 2008, 5: 2014} + >>> nx.set_node_attributes(G, node_times, "new_time") + >>> nx.cd_index(G, 3, time_delta=4, time="new_time") + 0.5 + >>> nx.cd_index(G, 3, time_delta=4, time="new_time", weight="weight") + 0.12 + + Notes + ----- + This method implements the algorithm for calculating the CD index, + as described in the paper by Funk and Owen-Smith [1]_. The CD index + is used in order to check how consolidating or destabilizing a patent + is, hence the nodes of the graph represent patents and the edges show + the citations between these patents. The mathematical model is given + below: + + .. math:: + CD_{t}=\frac{1}{n_{t}}\sum_{i=1}^{n}\frac{-2f_{it}b_{it}+f_{it}}{w_{it}}, + + where `f_{it}` equals 1 if `i` cites the focal patent else 0, `b_{it}` equals + 1 if `i` cites any of the focal patents successors else 0, `n_{t}` is the number + of forward citations in `i` and `w_{it}` is a matrix of weight for patent `i` + at time `t`. + + The `datetime.timedelta` package can lead to off-by-one issues when converting + from years to days. In the example above `timedelta(days=5 * 365)` looks like + 5 years, but it isn't because of leap year days. So it gives the same result + as `timedelta(days=4 * 365)`. But using `timedelta(days=5 * 365 + 1)` gives + a 5 year delta **for this choice of years** but may not if the 5 year gap has + more than 1 leap year. To avoid these issues, use integers to represent years, + or be very careful when you convert units of time. + + References + ---------- + .. [1] Funk, Russell J., and Jason Owen-Smith. + "A dynamic network measure of technological change." + Management science 63, no. 3 (2017): 791-817. + http://russellfunk.org/cdindex/static/papers/funk_ms_2017.pdf + + """ + if not all(time in G.nodes[n] for n in G): + raise nx.NetworkXError("Not all nodes have a 'time' attribute.") + + try: + # get target_date + target_date = G.nodes[node][time] + time_delta + # keep the predecessors that existed before the target date + pred = {i for i in G.pred[node] if G.nodes[i][time] <= target_date} + except: + raise nx.NetworkXError( + "Addition and comparison are not supported between 'time_delta' " + "and 'time' types." + ) + + # -1 if any edge between node's predecessors and node's successors, else 1 + b = [-1 if any(j in G[i] for j in G[node]) else 1 for i in pred] + + # n is size of the union of the focal node's predecessors and its successors' predecessors + n = len(pred.union(*(G.pred[s].keys() - {node} for s in G[node]))) + if n == 0: + raise nx.NetworkXError("The cd index cannot be defined.") + + # calculate cd index + if weight is None: + return round(sum(bi for bi in b) / n, 2) + else: + # If a node has the specified weight attribute, its weight is used in the calculation + # otherwise, a weight of 1 is assumed for that node + weights = [G.nodes[i].get(weight, 1) for i in pred] + return round(sum(bi / wt for bi, wt in zip(b, weights)) / n, 2) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tournament.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/tournament.py new file mode 100644 index 0000000..ccb19a4 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/tournament.py @@ -0,0 +1,404 @@ +"""Functions concerning tournament graphs. + +A `tournament graph`_ is a complete oriented graph. In other words, it +is a directed graph in which there is exactly one directed edge joining +each pair of distinct nodes. For each function in this module that +accepts a graph as input, you must provide a tournament graph. The +responsibility is on the caller to ensure that the graph is a tournament +graph: + + >>> G = nx.DiGraph([(0, 1), (1, 2), (2, 0)]) + >>> nx.is_tournament(G) + True + +To access the functions in this module, you must access them through the +:mod:`networkx.tournament` module:: + + >>> nx.tournament.is_reachable(G, 0, 1) + True + +.. _tournament graph: https://en.wikipedia.org/wiki/Tournament_%28graph_theory%29 + +""" + +from itertools import combinations + +import networkx as nx +from networkx.algorithms.simple_paths import is_simple_path as is_path +from networkx.utils import arbitrary_element, not_implemented_for, py_random_state + +__all__ = [ + "hamiltonian_path", + "is_reachable", + "is_strongly_connected", + "is_tournament", + "random_tournament", + "score_sequence", + "tournament_matrix", +] + + +def index_satisfying(iterable, condition): + """Returns the index of the first element in `iterable` that + satisfies the given condition. + + If no such element is found (that is, when the iterable is + exhausted), this returns the length of the iterable (that is, one + greater than the last index of the iterable). + + `iterable` must not be empty. If `iterable` is empty, this + function raises :exc:`ValueError`. + + """ + # Pre-condition: iterable must not be empty. + for i, x in enumerate(iterable): + if condition(x): + return i + # If we reach the end of the iterable without finding an element + # that satisfies the condition, return the length of the iterable, + # which is one greater than the index of its last element. If the + # iterable was empty, `i` will not be defined, so we raise an + # exception. + try: + return i + 1 + except NameError as err: + raise ValueError("iterable must be non-empty") from err + + +@not_implemented_for("undirected") +@not_implemented_for("multigraph") +@nx._dispatchable +def is_tournament(G): + """Returns True if and only if `G` is a tournament. + + A tournament is a directed graph, with neither self-loops nor + multi-edges, in which there is exactly one directed edge joining + each pair of distinct nodes. + + Parameters + ---------- + G : NetworkX graph + A directed graph representing a tournament. + + Returns + ------- + bool + Whether the given graph is a tournament graph. + + Examples + -------- + >>> G = nx.DiGraph([(0, 1), (1, 2), (2, 0)]) + >>> nx.is_tournament(G) + True + + Notes + ----- + Some definitions require a self-loop on each node, but that is not + the convention used here. + + """ + # In a tournament, there is exactly one directed edge joining each pair. + return ( + all((v in G[u]) ^ (u in G[v]) for u, v in combinations(G, 2)) + and nx.number_of_selfloops(G) == 0 + ) + + +@not_implemented_for("undirected") +@not_implemented_for("multigraph") +@nx._dispatchable +def hamiltonian_path(G): + """Returns a Hamiltonian path in the given tournament graph. + + Each tournament has a Hamiltonian path. If furthermore, the + tournament is strongly connected, then the returned Hamiltonian path + is a Hamiltonian cycle (by joining the endpoints of the path). + + Parameters + ---------- + G : NetworkX graph + A directed graph representing a tournament. + + Returns + ------- + path : list + A list of nodes which form a Hamiltonian path in `G`. + + Examples + -------- + >>> G = nx.DiGraph([(0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3)]) + >>> nx.is_tournament(G) + True + >>> nx.tournament.hamiltonian_path(G) + [0, 1, 2, 3] + + Notes + ----- + This is a recursive implementation with an asymptotic running time + of $O(n^2)$, ignoring multiplicative polylogarithmic factors, where + $n$ is the number of nodes in the graph. + + """ + if len(G) == 0: + return [] + if len(G) == 1: + return [arbitrary_element(G)] + v = arbitrary_element(G) + hampath = hamiltonian_path(G.subgraph(set(G) - {v})) + # Get the index of the first node in the path that does *not* have + # an edge to `v`, then insert `v` before that node. + index = index_satisfying(hampath, lambda u: v not in G[u]) + hampath.insert(index, v) + return hampath + + +@py_random_state(1) +@nx._dispatchable(graphs=None, returns_graph=True) +def random_tournament(n, seed=None): + r"""Returns a random tournament graph on `n` nodes. + + Parameters + ---------- + n : int + The number of nodes in the returned graph. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + G : DiGraph + A tournament on `n` nodes, with exactly one directed edge joining + each pair of distinct nodes. + + Notes + ----- + This algorithm adds, for each pair of distinct nodes, an edge with + uniformly random orientation. In other words, `\binom{n}{2}` flips + of an unbiased coin decide the orientations of the edges in the + graph. + + """ + # Flip an unbiased coin for each pair of distinct nodes. + coins = (seed.random() for i in range((n * (n - 1)) // 2)) + pairs = combinations(range(n), 2) + edges = ((u, v) if r < 0.5 else (v, u) for (u, v), r in zip(pairs, coins)) + return nx.DiGraph(edges) + + +@not_implemented_for("undirected") +@not_implemented_for("multigraph") +@nx._dispatchable +def score_sequence(G): + """Returns the score sequence for the given tournament graph. + + The score sequence is the sorted list of the out-degrees of the + nodes of the graph. + + Parameters + ---------- + G : NetworkX graph + A directed graph representing a tournament. + + Returns + ------- + list + A sorted list of the out-degrees of the nodes of `G`. + + Examples + -------- + >>> G = nx.DiGraph([(1, 0), (1, 3), (0, 2), (0, 3), (2, 1), (3, 2)]) + >>> nx.is_tournament(G) + True + >>> nx.tournament.score_sequence(G) + [1, 1, 2, 2] + + """ + return sorted(d for v, d in G.out_degree()) + + +@not_implemented_for("undirected") +@not_implemented_for("multigraph") +@nx._dispatchable(preserve_edge_attrs={"G": {"weight": 1}}) +def tournament_matrix(G): + r"""Returns the tournament matrix for the given tournament graph. + + This function requires SciPy. + + The *tournament matrix* of a tournament graph with edge set *E* is + the matrix *T* defined by + + .. math:: + + T_{i j} = + \begin{cases} + +1 & \text{if } (i, j) \in E \\ + -1 & \text{if } (j, i) \in E \\ + 0 & \text{if } i == j. + \end{cases} + + An equivalent definition is `T = A - A^T`, where *A* is the + adjacency matrix of the graph `G`. + + Parameters + ---------- + G : NetworkX graph + A directed graph representing a tournament. + + Returns + ------- + SciPy sparse array + The tournament matrix of the tournament graph `G`. + + Raises + ------ + ImportError + If SciPy is not available. + + """ + A = nx.adjacency_matrix(G) + return A - A.T + + +@not_implemented_for("undirected") +@not_implemented_for("multigraph") +@nx._dispatchable +def is_reachable(G, s, t): + """Decides whether there is a path from `s` to `t` in the + tournament. + + This function is more theoretically efficient than the reachability + checks than the shortest path algorithms in + :mod:`networkx.algorithms.shortest_paths`. + + The given graph **must** be a tournament, otherwise this function's + behavior is undefined. + + Parameters + ---------- + G : NetworkX graph + A directed graph representing a tournament. + + s : node + A node in the graph. + + t : node + A node in the graph. + + Returns + ------- + bool + Whether there is a path from `s` to `t` in `G`. + + Examples + -------- + >>> G = nx.DiGraph([(1, 0), (1, 3), (1, 2), (2, 3), (2, 0), (3, 0)]) + >>> nx.is_tournament(G) + True + >>> nx.tournament.is_reachable(G, 1, 3) + True + >>> nx.tournament.is_reachable(G, 3, 2) + False + + Notes + ----- + Although this function is more theoretically efficient than the + generic shortest path functions, a speedup requires the use of + parallelism. Though it may in the future, the current implementation + does not use parallelism, thus you may not see much of a speedup. + + This algorithm comes from [1]. + + References + ---------- + .. [1] Tantau, Till. + "A note on the complexity of the reachability problem for + tournaments." + *Electronic Colloquium on Computational Complexity*. 2001. + + """ + + def two_neighborhood(G, v): + """Returns the set of nodes at distance at most two from `v`. + + `G` must be a graph and `v` a node in that graph. + + The returned set includes the nodes at distance zero (that is, + the node `v` itself), the nodes at distance one (that is, the + out-neighbors of `v`), and the nodes at distance two. + + """ + return { + x for x in G if x == v or x in G[v] or any(is_path(G, [v, z, x]) for z in G) + } + + def is_closed(G, nodes): + """Decides whether the given set of nodes is closed. + + A set *S* of nodes is *closed* if for each node *u* in the graph + not in *S* and for each node *v* in *S*, there is an edge from + *u* to *v*. + + """ + return all(v in G[u] for u in set(G) - nodes for v in nodes) + + neighborhoods = [two_neighborhood(G, v) for v in G] + return all(not (is_closed(G, S) and s in S and t not in S) for S in neighborhoods) + + +@not_implemented_for("undirected") +@not_implemented_for("multigraph") +@nx._dispatchable(name="tournament_is_strongly_connected") +def is_strongly_connected(G): + """Decides whether the given tournament is strongly connected. + + This function is more theoretically efficient than the + :func:`~networkx.algorithms.components.is_strongly_connected` + function. + + The given graph **must** be a tournament, otherwise this function's + behavior is undefined. + + Parameters + ---------- + G : NetworkX graph + A directed graph representing a tournament. + + Returns + ------- + bool + Whether the tournament is strongly connected. + + Examples + -------- + >>> G = nx.DiGraph([(0, 1), (0, 2), (1, 2), (1, 3), (2, 3), (3, 0)]) + >>> nx.is_tournament(G) + True + >>> nx.tournament.is_strongly_connected(G) + True + >>> G.remove_edge(3, 0) + >>> G.add_edge(0, 3) + >>> nx.is_tournament(G) + True + >>> nx.tournament.is_strongly_connected(G) + False + + Notes + ----- + Although this function is more theoretically efficient than the + generic strong connectivity function, a speedup requires the use of + parallelism. Though it may in the future, the current implementation + does not use parallelism, thus you may not see much of a speedup. + + This algorithm comes from [1]. + + References + ---------- + .. [1] Tantau, Till. + "A note on the complexity of the reachability problem for + tournaments." + *Electronic Colloquium on Computational Complexity*. 2001. + + + """ + return all(is_reachable(G, u, v) for u in G for v in G) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/traversal/__init__.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/traversal/__init__.py new file mode 100644 index 0000000..93e6cdd --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/traversal/__init__.py @@ -0,0 +1,5 @@ +from .beamsearch import * +from .breadth_first_search import * +from .depth_first_search import * +from .edgedfs import * +from .edgebfs import * diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/traversal/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/traversal/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..7959741 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/traversal/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/traversal/__pycache__/beamsearch.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/traversal/__pycache__/beamsearch.cpython-312.pyc new file mode 100644 index 0000000..1c0ff61 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/traversal/__pycache__/beamsearch.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/traversal/__pycache__/breadth_first_search.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/traversal/__pycache__/breadth_first_search.cpython-312.pyc new file mode 100644 index 0000000..e5a495c Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/traversal/__pycache__/breadth_first_search.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/traversal/__pycache__/depth_first_search.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/traversal/__pycache__/depth_first_search.cpython-312.pyc new file mode 100644 index 0000000..b0529e4 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/traversal/__pycache__/depth_first_search.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/traversal/__pycache__/edgebfs.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/traversal/__pycache__/edgebfs.cpython-312.pyc new file mode 100644 index 0000000..d07c0a1 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/traversal/__pycache__/edgebfs.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/traversal/__pycache__/edgedfs.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/traversal/__pycache__/edgedfs.cpython-312.pyc new file mode 100644 index 0000000..f74ab37 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/traversal/__pycache__/edgedfs.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/traversal/beamsearch.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/traversal/beamsearch.py new file mode 100644 index 0000000..23fbe7b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/traversal/beamsearch.py @@ -0,0 +1,90 @@ +"""Basic algorithms for breadth-first searching the nodes of a graph.""" + +import networkx as nx + +__all__ = ["bfs_beam_edges"] + + +@nx._dispatchable +def bfs_beam_edges(G, source, value, width=None): + """Iterates over edges in a beam search. + + The beam search is a generalized breadth-first search in which only + the "best" *w* neighbors of the current node are enqueued, where *w* + is the beam width and "best" is an application-specific + heuristic. In general, a beam search with a small beam width might + not visit each node in the graph. + + .. note:: + + With the default value of ``width=None`` or `width` greater than the + maximum degree of the graph, this function equates to a slower + version of `~networkx.algorithms.traversal.breadth_first_search.bfs_edges`. + All nodes will be visited, though the order of the reported edges may + vary. In such cases, `value` has no effect - consider using `bfs_edges` + directly instead. + + Parameters + ---------- + G : NetworkX graph + + source : node + Starting node for the breadth-first search; this function + iterates over only those edges in the component reachable from + this node. + + value : function + A function that takes a node of the graph as input and returns a + real number indicating how "good" it is. A higher value means it + is more likely to be visited sooner during the search. When + visiting a new node, only the `width` neighbors with the highest + `value` are enqueued (in decreasing order of `value`). + + width : int (default = None) + The beam width for the search. This is the number of neighbors + (ordered by `value`) to enqueue when visiting each new node. + + Yields + ------ + edge + Edges in the beam search starting from `source`, given as a pair + of nodes. + + Examples + -------- + To give nodes with, for example, a higher centrality precedence + during the search, set the `value` function to return the centrality + value of the node: + + >>> G = nx.karate_club_graph() + >>> centrality = nx.eigenvector_centrality(G) + >>> list(nx.bfs_beam_edges(G, source=0, value=centrality.get, width=3)) + [(0, 2), (0, 1), (0, 8), (2, 32), (1, 13), (8, 33)] + """ + + if width is None: + width = len(G) + + def successors(v): + """Returns a list of the best neighbors of a node. + + `v` is a node in the graph `G`. + + The "best" neighbors are chosen according to the `value` + function (higher is better). Only the `width` best neighbors of + `v` are returned. + """ + # TODO The Python documentation states that for small values, it + # is better to use `heapq.nlargest`. We should determine the + # threshold at which its better to use `heapq.nlargest()` + # instead of `sorted()[:]` and apply that optimization here. + # + # If `width` is greater than the number of neighbors of `v`, all + # neighbors are returned by the semantics of slicing in + # Python. This occurs in the special case that the user did not + # specify a `width`: in this case all neighbors are always + # returned, so this is just a (slower) implementation of + # `bfs_edges(G, source)` but with a sorted enqueue step. + return iter(sorted(G.neighbors(v), key=value, reverse=True)[:width]) + + yield from nx.generic_bfs_edges(G, source, successors) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/traversal/breadth_first_search.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/traversal/breadth_first_search.py new file mode 100644 index 0000000..706a6d9 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/traversal/breadth_first_search.py @@ -0,0 +1,576 @@ +"""Basic algorithms for breadth-first searching the nodes of a graph.""" + +from collections import deque + +import networkx as nx + +__all__ = [ + "bfs_edges", + "bfs_tree", + "bfs_predecessors", + "bfs_successors", + "descendants_at_distance", + "bfs_layers", + "bfs_labeled_edges", + "generic_bfs_edges", +] + + +@nx._dispatchable +def generic_bfs_edges(G, source, neighbors=None, depth_limit=None): + """Iterate over edges in a breadth-first search. + + The breadth-first search begins at `source` and enqueues the + neighbors of newly visited nodes specified by the `neighbors` + function. + + Parameters + ---------- + G : NetworkX graph + + source : node + Starting node for the breadth-first search; this function + iterates over only those edges in the component reachable from + this node. + + neighbors : function + A function that takes a newly visited node of the graph as input + and returns an *iterator* (not just a list) of nodes that are + neighbors of that node with custom ordering. If not specified, this is + just the ``G.neighbors`` method, but in general it can be any function + that returns an iterator over some or all of the neighbors of a + given node, in any order. + + depth_limit : int, optional(default=len(G)) + Specify the maximum search depth. + + Yields + ------ + edge + Edges in the breadth-first search starting from `source`. + + Examples + -------- + >>> G = nx.path_graph(7) + >>> list(nx.generic_bfs_edges(G, source=0)) + [(0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, 6)] + >>> list(nx.generic_bfs_edges(G, source=2)) + [(2, 1), (2, 3), (1, 0), (3, 4), (4, 5), (5, 6)] + >>> list(nx.generic_bfs_edges(G, source=2, depth_limit=2)) + [(2, 1), (2, 3), (1, 0), (3, 4)] + + The `neighbors` param can be used to specify the visitation order of each + node's neighbors generically. In the following example, we modify the default + neighbor to return *odd* nodes first: + + >>> def odd_first(n): + ... return sorted(G.neighbors(n), key=lambda x: x % 2, reverse=True) + + >>> G = nx.star_graph(5) + >>> list(nx.generic_bfs_edges(G, source=0)) # Default neighbor ordering + [(0, 1), (0, 2), (0, 3), (0, 4), (0, 5)] + >>> list(nx.generic_bfs_edges(G, source=0, neighbors=odd_first)) + [(0, 1), (0, 3), (0, 5), (0, 2), (0, 4)] + + Notes + ----- + This implementation is from `PADS`_, which was in the public domain + when it was first accessed in July, 2004. The modifications + to allow depth limits are based on the Wikipedia article + "`Depth-limited-search`_". + + .. _PADS: http://www.ics.uci.edu/~eppstein/PADS/BFS.py + .. _Depth-limited-search: https://en.wikipedia.org/wiki/Depth-limited_search + """ + if neighbors is None: + neighbors = G.neighbors + if depth_limit is None: + depth_limit = len(G) + + seen = {source} + n = len(G) + depth = 0 + next_parents_children = [(source, neighbors(source))] + while next_parents_children and depth < depth_limit: + this_parents_children = next_parents_children + next_parents_children = [] + for parent, children in this_parents_children: + for child in children: + if child not in seen: + seen.add(child) + next_parents_children.append((child, neighbors(child))) + yield parent, child + if len(seen) == n: + return + depth += 1 + + +@nx._dispatchable +def bfs_edges(G, source, reverse=False, depth_limit=None, sort_neighbors=None): + """Iterate over edges in a breadth-first-search starting at source. + + Parameters + ---------- + G : NetworkX graph + + source : node + Specify starting node for breadth-first search; this function + iterates over only those edges in the component reachable from + this node. + + reverse : bool, optional + If True traverse a directed graph in the reverse direction + + depth_limit : int, optional(default=len(G)) + Specify the maximum search depth + + sort_neighbors : function (default=None) + A function that takes an iterator over nodes as the input, and + returns an iterable of the same nodes with a custom ordering. + For example, `sorted` will sort the nodes in increasing order. + + Yields + ------ + edge: 2-tuple of nodes + Yields edges resulting from the breadth-first search. + + Examples + -------- + To get the edges in a breadth-first search:: + + >>> G = nx.path_graph(3) + >>> list(nx.bfs_edges(G, 0)) + [(0, 1), (1, 2)] + >>> list(nx.bfs_edges(G, source=0, depth_limit=1)) + [(0, 1)] + + To get the nodes in a breadth-first search order:: + + >>> G = nx.path_graph(3) + >>> root = 2 + >>> edges = nx.bfs_edges(G, root) + >>> nodes = [root] + [v for u, v in edges] + >>> nodes + [2, 1, 0] + + Notes + ----- + The naming of this function is very similar to + :func:`~networkx.algorithms.traversal.edgebfs.edge_bfs`. The difference + is that ``edge_bfs`` yields edges even if they extend back to an already + explored node while this generator yields the edges of the tree that results + from a breadth-first-search (BFS) so no edges are reported if they extend + to already explored nodes. That means ``edge_bfs`` reports all edges while + ``bfs_edges`` only reports those traversed by a node-based BFS. Yet another + description is that ``bfs_edges`` reports the edges traversed during BFS + while ``edge_bfs`` reports all edges in the order they are explored. + + Based on the breadth-first search implementation in PADS [1]_ + by D. Eppstein, July 2004; with modifications to allow depth limits + as described in [2]_. + + References + ---------- + .. [1] http://www.ics.uci.edu/~eppstein/PADS/BFS.py. + .. [2] https://en.wikipedia.org/wiki/Depth-limited_search + + See Also + -------- + bfs_tree + :func:`~networkx.algorithms.traversal.depth_first_search.dfs_edges` + :func:`~networkx.algorithms.traversal.edgebfs.edge_bfs` + + """ + if reverse and G.is_directed(): + successors = G.predecessors + else: + successors = G.neighbors + + if sort_neighbors is not None: + yield from generic_bfs_edges( + G, source, lambda node: iter(sort_neighbors(successors(node))), depth_limit + ) + else: + yield from generic_bfs_edges(G, source, successors, depth_limit) + + +@nx._dispatchable(returns_graph=True) +def bfs_tree(G, source, reverse=False, depth_limit=None, sort_neighbors=None): + """Returns an oriented tree constructed from of a breadth-first-search + starting at source. + + Parameters + ---------- + G : NetworkX graph + + source : node + Specify starting node for breadth-first search + + reverse : bool, optional + If True traverse a directed graph in the reverse direction + + depth_limit : int, optional(default=len(G)) + Specify the maximum search depth + + sort_neighbors : function (default=None) + A function that takes an iterator over nodes as the input, and + returns an iterable of the same nodes with a custom ordering. + For example, `sorted` will sort the nodes in increasing order. + + Returns + ------- + T: NetworkX DiGraph + An oriented tree + + Examples + -------- + >>> G = nx.path_graph(3) + >>> list(nx.bfs_tree(G, 1).edges()) + [(1, 0), (1, 2)] + >>> H = nx.Graph() + >>> nx.add_path(H, [0, 1, 2, 3, 4, 5, 6]) + >>> nx.add_path(H, [2, 7, 8, 9, 10]) + >>> sorted(list(nx.bfs_tree(H, source=3, depth_limit=3).edges())) + [(1, 0), (2, 1), (2, 7), (3, 2), (3, 4), (4, 5), (5, 6), (7, 8)] + + + Notes + ----- + Based on http://www.ics.uci.edu/~eppstein/PADS/BFS.py + by D. Eppstein, July 2004. The modifications + to allow depth limits based on the Wikipedia article + "`Depth-limited-search`_". + + .. _Depth-limited-search: https://en.wikipedia.org/wiki/Depth-limited_search + + See Also + -------- + dfs_tree + bfs_edges + edge_bfs + """ + T = nx.DiGraph() + T.add_node(source) + edges_gen = bfs_edges( + G, + source, + reverse=reverse, + depth_limit=depth_limit, + sort_neighbors=sort_neighbors, + ) + T.add_edges_from(edges_gen) + return T + + +@nx._dispatchable +def bfs_predecessors(G, source, depth_limit=None, sort_neighbors=None): + """Returns an iterator of predecessors in breadth-first-search from source. + + Parameters + ---------- + G : NetworkX graph + + source : node + Specify starting node for breadth-first search + + depth_limit : int, optional(default=len(G)) + Specify the maximum search depth + + sort_neighbors : function (default=None) + A function that takes an iterator over nodes as the input, and + returns an iterable of the same nodes with a custom ordering. + For example, `sorted` will sort the nodes in increasing order. + + Returns + ------- + pred: iterator + (node, predecessor) iterator where `predecessor` is the predecessor of + `node` in a breadth first search starting from `source`. + + Examples + -------- + >>> G = nx.path_graph(3) + >>> dict(nx.bfs_predecessors(G, 0)) + {1: 0, 2: 1} + >>> H = nx.Graph() + >>> H.add_edges_from([(0, 1), (0, 2), (1, 3), (1, 4), (2, 5), (2, 6)]) + >>> dict(nx.bfs_predecessors(H, 0)) + {1: 0, 2: 0, 3: 1, 4: 1, 5: 2, 6: 2} + >>> M = nx.Graph() + >>> nx.add_path(M, [0, 1, 2, 3, 4, 5, 6]) + >>> nx.add_path(M, [2, 7, 8, 9, 10]) + >>> sorted(nx.bfs_predecessors(M, source=1, depth_limit=3)) + [(0, 1), (2, 1), (3, 2), (4, 3), (7, 2), (8, 7)] + >>> N = nx.DiGraph() + >>> nx.add_path(N, [0, 1, 2, 3, 4, 7]) + >>> nx.add_path(N, [3, 5, 6, 7]) + >>> sorted(nx.bfs_predecessors(N, source=2)) + [(3, 2), (4, 3), (5, 3), (6, 5), (7, 4)] + + Notes + ----- + Based on http://www.ics.uci.edu/~eppstein/PADS/BFS.py + by D. Eppstein, July 2004. The modifications + to allow depth limits based on the Wikipedia article + "`Depth-limited-search`_". + + .. _Depth-limited-search: https://en.wikipedia.org/wiki/Depth-limited_search + + See Also + -------- + bfs_tree + bfs_edges + edge_bfs + """ + for s, t in bfs_edges( + G, source, depth_limit=depth_limit, sort_neighbors=sort_neighbors + ): + yield (t, s) + + +@nx._dispatchable +def bfs_successors(G, source, depth_limit=None, sort_neighbors=None): + """Returns an iterator of successors in breadth-first-search from source. + + Parameters + ---------- + G : NetworkX graph + + source : node + Specify starting node for breadth-first search + + depth_limit : int, optional(default=len(G)) + Specify the maximum search depth + + sort_neighbors : function (default=None) + A function that takes an iterator over nodes as the input, and + returns an iterable of the same nodes with a custom ordering. + For example, `sorted` will sort the nodes in increasing order. + + Returns + ------- + succ: iterator + (node, successors) iterator where `successors` is the non-empty list of + successors of `node` in a breadth first search from `source`. + To appear in the iterator, `node` must have successors. + + Examples + -------- + >>> G = nx.path_graph(3) + >>> dict(nx.bfs_successors(G, 0)) + {0: [1], 1: [2]} + >>> H = nx.Graph() + >>> H.add_edges_from([(0, 1), (0, 2), (1, 3), (1, 4), (2, 5), (2, 6)]) + >>> dict(nx.bfs_successors(H, 0)) + {0: [1, 2], 1: [3, 4], 2: [5, 6]} + >>> G = nx.Graph() + >>> nx.add_path(G, [0, 1, 2, 3, 4, 5, 6]) + >>> nx.add_path(G, [2, 7, 8, 9, 10]) + >>> dict(nx.bfs_successors(G, source=1, depth_limit=3)) + {1: [0, 2], 2: [3, 7], 3: [4], 7: [8]} + >>> G = nx.DiGraph() + >>> nx.add_path(G, [0, 1, 2, 3, 4, 5]) + >>> dict(nx.bfs_successors(G, source=3)) + {3: [4], 4: [5]} + + Notes + ----- + Based on http://www.ics.uci.edu/~eppstein/PADS/BFS.py + by D. Eppstein, July 2004.The modifications + to allow depth limits based on the Wikipedia article + "`Depth-limited-search`_". + + .. _Depth-limited-search: https://en.wikipedia.org/wiki/Depth-limited_search + + See Also + -------- + bfs_tree + bfs_edges + edge_bfs + """ + parent = source + children = [] + for p, c in bfs_edges( + G, source, depth_limit=depth_limit, sort_neighbors=sort_neighbors + ): + if p == parent: + children.append(c) + continue + yield (parent, children) + children = [c] + parent = p + yield (parent, children) + + +@nx._dispatchable +def bfs_layers(G, sources): + """Returns an iterator of all the layers in breadth-first search traversal. + + Parameters + ---------- + G : NetworkX graph + A graph over which to find the layers using breadth-first search. + + sources : node in `G` or iterable of nodes in `G` + Specify starting nodes for single source or multiple sources + breadth-first search. + + Yields + ------ + layer : list of nodes + Yields list of nodes at the same distance from `sources`. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> dict(enumerate(nx.bfs_layers(G, [0, 4]))) + {0: [0, 4], 1: [1, 3], 2: [2]} + >>> H = nx.Graph() + >>> H.add_edges_from([(0, 1), (0, 2), (1, 3), (1, 4), (2, 5), (2, 6)]) + >>> dict(enumerate(nx.bfs_layers(H, [1]))) + {0: [1], 1: [0, 3, 4], 2: [2], 3: [5, 6]} + >>> dict(enumerate(nx.bfs_layers(H, [1, 6]))) + {0: [1, 6], 1: [0, 3, 4, 2], 2: [5]} + """ + if sources in G: + sources = [sources] + + visited = set(sources) + current_layer = list(visited) + + for source in current_layer: + if source not in G: + raise nx.NetworkXError(f"The node {source} is not in the graph.") + + # this is basically BFS, except that the current layer only stores the nodes at + # same distance from sources at each iteration + while current_layer: + yield current_layer + next_layer = [] + for node in current_layer: + for child in G[node]: + if child not in visited: + visited.add(child) + next_layer.append(child) + current_layer = next_layer + + +REVERSE_EDGE = "reverse" +TREE_EDGE = "tree" +FORWARD_EDGE = "forward" +LEVEL_EDGE = "level" + + +@nx._dispatchable +def bfs_labeled_edges(G, sources): + """Iterate over edges in a breadth-first search (BFS) labeled by type. + + We generate triple of the form (*u*, *v*, *d*), where (*u*, *v*) is the + edge being explored in the breadth-first search and *d* is one of the + strings 'tree', 'forward', 'level', or 'reverse'. A 'tree' edge is one in + which *v* is first discovered and placed into the layer below *u*. A + 'forward' edge is one in which *u* is on the layer above *v* and *v* has + already been discovered. A 'level' edge is one in which both *u* and *v* + occur on the same layer. A 'reverse' edge is one in which *u* is on a layer + below *v*. + + We emit each edge exactly once. In an undirected graph, 'reverse' edges do + not occur, because each is discovered either as a 'tree' or 'forward' edge. + + Parameters + ---------- + G : NetworkX graph + A graph over which to find the layers using breadth-first search. + + sources : node in `G` or list of nodes in `G` + Starting nodes for single source or multiple sources breadth-first search + + Yields + ------ + edges: generator + A generator of triples (*u*, *v*, *d*) where (*u*, *v*) is the edge being + explored and *d* is described above. + + Examples + -------- + >>> G = nx.cycle_graph(4, create_using=nx.DiGraph) + >>> list(nx.bfs_labeled_edges(G, 0)) + [(0, 1, 'tree'), (1, 2, 'tree'), (2, 3, 'tree'), (3, 0, 'reverse')] + >>> G = nx.complete_graph(3) + >>> list(nx.bfs_labeled_edges(G, 0)) + [(0, 1, 'tree'), (0, 2, 'tree'), (1, 2, 'level')] + >>> list(nx.bfs_labeled_edges(G, [0, 1])) + [(0, 1, 'level'), (0, 2, 'tree'), (1, 2, 'forward')] + """ + if sources in G: + sources = [sources] + + neighbors = G._adj + directed = G.is_directed() + visited = set() + visit = visited.discard if directed else visited.add + # We use visited in a negative sense, so the visited set stays empty for the + # directed case and level edges are reported on their first occurrence in + # the undirected case. Note our use of visited.discard -- this is built-in + # thus somewhat faster than a python-defined def nop(x): pass + depth = dict.fromkeys(sources, 0) + queue = deque(depth.items()) + push = queue.append + pop = queue.popleft + while queue: + u, du = pop() + for v in neighbors[u]: + if v not in depth: + depth[v] = dv = du + 1 + push((v, dv)) + yield u, v, TREE_EDGE + else: + dv = depth[v] + if du == dv: + if v not in visited: + yield u, v, LEVEL_EDGE + elif du < dv: + yield u, v, FORWARD_EDGE + elif directed: + yield u, v, REVERSE_EDGE + visit(u) + + +@nx._dispatchable +def descendants_at_distance(G, source, distance): + """Returns all nodes at a fixed `distance` from `source` in `G`. + + Parameters + ---------- + G : NetworkX graph + A graph + source : node in `G` + distance : the distance of the wanted nodes from `source` + + Returns + ------- + set() + The descendants of `source` in `G` at the given `distance` from `source` + + Examples + -------- + >>> G = nx.path_graph(5) + >>> nx.descendants_at_distance(G, 2, 2) + {0, 4} + >>> H = nx.DiGraph() + >>> H.add_edges_from([(0, 1), (0, 2), (1, 3), (1, 4), (2, 5), (2, 6)]) + >>> nx.descendants_at_distance(H, 0, 2) + {3, 4, 5, 6} + >>> nx.descendants_at_distance(H, 5, 0) + {5} + >>> nx.descendants_at_distance(H, 5, 1) + set() + """ + if source not in G: + raise nx.NetworkXError(f"The node {source} is not in the graph.") + + bfs_generator = nx.bfs_layers(G, source) + for i, layer in enumerate(bfs_generator): + if i == distance: + return set(layer) + return set() diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/traversal/depth_first_search.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/traversal/depth_first_search.py new file mode 100644 index 0000000..5bac5ec --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/traversal/depth_first_search.py @@ -0,0 +1,529 @@ +"""Basic algorithms for depth-first searching the nodes of a graph.""" + +from collections import defaultdict + +import networkx as nx + +__all__ = [ + "dfs_edges", + "dfs_tree", + "dfs_predecessors", + "dfs_successors", + "dfs_preorder_nodes", + "dfs_postorder_nodes", + "dfs_labeled_edges", +] + + +@nx._dispatchable +def dfs_edges(G, source=None, depth_limit=None, *, sort_neighbors=None): + """Iterate over edges in a depth-first-search (DFS). + + Perform a depth-first-search over the nodes of `G` and yield + the edges in order. This may not generate all edges in `G` + (see `~networkx.algorithms.traversal.edgedfs.edge_dfs`). + + Parameters + ---------- + G : NetworkX graph + + source : node, optional + Specify starting node for depth-first search and yield edges in + the component reachable from source. + + depth_limit : int, optional (default=len(G)) + Specify the maximum search depth. + + sort_neighbors : function (default=None) + A function that takes an iterator over nodes as the input, and + returns an iterable of the same nodes with a custom ordering. + For example, `sorted` will sort the nodes in increasing order. + + Yields + ------ + edge: 2-tuple of nodes + Yields edges resulting from the depth-first-search. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> list(nx.dfs_edges(G, source=0)) + [(0, 1), (1, 2), (2, 3), (3, 4)] + >>> list(nx.dfs_edges(G, source=0, depth_limit=2)) + [(0, 1), (1, 2)] + + Notes + ----- + If a source is not specified then a source is chosen arbitrarily and + repeatedly until all components in the graph are searched. + + The implementation of this function is adapted from David Eppstein's + depth-first search function in PADS [1]_, with modifications + to allow depth limits based on the Wikipedia article + "Depth-limited search" [2]_. + + See Also + -------- + dfs_preorder_nodes + dfs_postorder_nodes + dfs_labeled_edges + :func:`~networkx.algorithms.traversal.edgedfs.edge_dfs` + :func:`~networkx.algorithms.traversal.breadth_first_search.bfs_edges` + + References + ---------- + .. [1] http://www.ics.uci.edu/~eppstein/PADS + .. [2] https://en.wikipedia.org/wiki/Depth-limited_search + """ + if source is None: + # edges for all components + nodes = G + else: + # edges for components with source + nodes = [source] + if depth_limit is None: + depth_limit = len(G) + + get_children = ( + G.neighbors + if sort_neighbors is None + else lambda n: iter(sort_neighbors(G.neighbors(n))) + ) + + visited = set() + for start in nodes: + if start in visited: + continue + visited.add(start) + stack = [(start, get_children(start))] + depth_now = 1 + while stack: + parent, children = stack[-1] + for child in children: + if child not in visited: + yield parent, child + visited.add(child) + if depth_now < depth_limit: + stack.append((child, get_children(child))) + depth_now += 1 + break + else: + stack.pop() + depth_now -= 1 + + +@nx._dispatchable(returns_graph=True) +def dfs_tree(G, source=None, depth_limit=None, *, sort_neighbors=None): + """Returns oriented tree constructed from a depth-first-search from source. + + Parameters + ---------- + G : NetworkX graph + + source : node, optional + Specify starting node for depth-first search. + + depth_limit : int, optional (default=len(G)) + Specify the maximum search depth. + + sort_neighbors : function (default=None) + A function that takes an iterator over nodes as the input, and + returns an iterable of the same nodes with a custom ordering. + For example, `sorted` will sort the nodes in increasing order. + + Returns + ------- + T : NetworkX DiGraph + An oriented tree + + Examples + -------- + >>> G = nx.path_graph(5) + >>> T = nx.dfs_tree(G, source=0, depth_limit=2) + >>> list(T.edges()) + [(0, 1), (1, 2)] + >>> T = nx.dfs_tree(G, source=0) + >>> list(T.edges()) + [(0, 1), (1, 2), (2, 3), (3, 4)] + + See Also + -------- + dfs_preorder_nodes + dfs_postorder_nodes + dfs_labeled_edges + :func:`~networkx.algorithms.traversal.edgedfs.edge_dfs` + :func:`~networkx.algorithms.traversal.breadth_first_search.bfs_tree` + """ + T = nx.DiGraph() + if source is None: + T.add_nodes_from(G) + else: + T.add_node(source) + T.add_edges_from(dfs_edges(G, source, depth_limit, sort_neighbors=sort_neighbors)) + return T + + +@nx._dispatchable +def dfs_predecessors(G, source=None, depth_limit=None, *, sort_neighbors=None): + """Returns dictionary of predecessors in depth-first-search from source. + + Parameters + ---------- + G : NetworkX graph + + source : node, optional + Specify starting node for depth-first search. + Note that you will get predecessors for all nodes in the + component containing `source`. This input only specifies + where the DFS starts. + + depth_limit : int, optional (default=len(G)) + Specify the maximum search depth. + + sort_neighbors : function (default=None) + A function that takes an iterator over nodes as the input, and + returns an iterable of the same nodes with a custom ordering. + For example, `sorted` will sort the nodes in increasing order. + + Returns + ------- + pred: dict + A dictionary with nodes as keys and predecessor nodes as values. + + Examples + -------- + >>> G = nx.path_graph(4) + >>> nx.dfs_predecessors(G, source=0) + {1: 0, 2: 1, 3: 2} + >>> nx.dfs_predecessors(G, source=0, depth_limit=2) + {1: 0, 2: 1} + + Notes + ----- + If a source is not specified then a source is chosen arbitrarily and + repeatedly until all components in the graph are searched. + + The implementation of this function is adapted from David Eppstein's + depth-first search function in `PADS`_, with modifications + to allow depth limits based on the Wikipedia article + "`Depth-limited search`_". + + .. _PADS: http://www.ics.uci.edu/~eppstein/PADS + .. _Depth-limited search: https://en.wikipedia.org/wiki/Depth-limited_search + + See Also + -------- + dfs_preorder_nodes + dfs_postorder_nodes + dfs_labeled_edges + :func:`~networkx.algorithms.traversal.edgedfs.edge_dfs` + :func:`~networkx.algorithms.traversal.breadth_first_search.bfs_tree` + """ + return { + t: s + for s, t in dfs_edges(G, source, depth_limit, sort_neighbors=sort_neighbors) + } + + +@nx._dispatchable +def dfs_successors(G, source=None, depth_limit=None, *, sort_neighbors=None): + """Returns dictionary of successors in depth-first-search from source. + + Parameters + ---------- + G : NetworkX graph + + source : node, optional + Specify starting node for depth-first search. + Note that you will get successors for all nodes in the + component containing `source`. This input only specifies + where the DFS starts. + + depth_limit : int, optional (default=len(G)) + Specify the maximum search depth. + + sort_neighbors : function (default=None) + A function that takes an iterator over nodes as the input, and + returns an iterable of the same nodes with a custom ordering. + For example, `sorted` will sort the nodes in increasing order. + + Returns + ------- + succ: dict + A dictionary with nodes as keys and list of successor nodes as values. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> nx.dfs_successors(G, source=0) + {0: [1], 1: [2], 2: [3], 3: [4]} + >>> nx.dfs_successors(G, source=0, depth_limit=2) + {0: [1], 1: [2]} + + Notes + ----- + If a source is not specified then a source is chosen arbitrarily and + repeatedly until all components in the graph are searched. + + The implementation of this function is adapted from David Eppstein's + depth-first search function in `PADS`_, with modifications + to allow depth limits based on the Wikipedia article + "`Depth-limited search`_". + + .. _PADS: http://www.ics.uci.edu/~eppstein/PADS + .. _Depth-limited search: https://en.wikipedia.org/wiki/Depth-limited_search + + See Also + -------- + dfs_preorder_nodes + dfs_postorder_nodes + dfs_labeled_edges + :func:`~networkx.algorithms.traversal.edgedfs.edge_dfs` + :func:`~networkx.algorithms.traversal.breadth_first_search.bfs_tree` + """ + d = defaultdict(list) + for s, t in dfs_edges( + G, + source=source, + depth_limit=depth_limit, + sort_neighbors=sort_neighbors, + ): + d[s].append(t) + return dict(d) + + +@nx._dispatchable +def dfs_postorder_nodes(G, source=None, depth_limit=None, *, sort_neighbors=None): + """Generate nodes in a depth-first-search post-ordering starting at source. + + Parameters + ---------- + G : NetworkX graph + + source : node, optional + Specify starting node for depth-first search. + + depth_limit : int, optional (default=len(G)) + Specify the maximum search depth. + + sort_neighbors : function (default=None) + A function that takes an iterator over nodes as the input, and + returns an iterable of the same nodes with a custom ordering. + For example, `sorted` will sort the nodes in increasing order. + + Returns + ------- + nodes: generator + A generator of nodes in a depth-first-search post-ordering. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> list(nx.dfs_postorder_nodes(G, source=0)) + [4, 3, 2, 1, 0] + >>> list(nx.dfs_postorder_nodes(G, source=0, depth_limit=2)) + [1, 0] + + Notes + ----- + If a source is not specified then a source is chosen arbitrarily and + repeatedly until all components in the graph are searched. + + The implementation of this function is adapted from David Eppstein's + depth-first search function in `PADS`_, with modifications + to allow depth limits based on the Wikipedia article + "`Depth-limited search`_". + + .. _PADS: http://www.ics.uci.edu/~eppstein/PADS + .. _Depth-limited search: https://en.wikipedia.org/wiki/Depth-limited_search + + See Also + -------- + dfs_edges + dfs_preorder_nodes + dfs_labeled_edges + :func:`~networkx.algorithms.traversal.edgedfs.edge_dfs` + :func:`~networkx.algorithms.traversal.breadth_first_search.bfs_tree` + """ + edges = nx.dfs_labeled_edges( + G, source=source, depth_limit=depth_limit, sort_neighbors=sort_neighbors + ) + return (v for u, v, d in edges if d == "reverse") + + +@nx._dispatchable +def dfs_preorder_nodes(G, source=None, depth_limit=None, *, sort_neighbors=None): + """Generate nodes in a depth-first-search pre-ordering starting at source. + + Parameters + ---------- + G : NetworkX graph + + source : node, optional + Specify starting node for depth-first search and return nodes in + the component reachable from source. + + depth_limit : int, optional (default=len(G)) + Specify the maximum search depth. + + sort_neighbors : function (default=None) + A function that takes an iterator over nodes as the input, and + returns an iterable of the same nodes with a custom ordering. + For example, `sorted` will sort the nodes in increasing order. + + Returns + ------- + nodes: generator + A generator of nodes in a depth-first-search pre-ordering. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> list(nx.dfs_preorder_nodes(G, source=0)) + [0, 1, 2, 3, 4] + >>> list(nx.dfs_preorder_nodes(G, source=0, depth_limit=2)) + [0, 1, 2] + + Notes + ----- + If a source is not specified then a source is chosen arbitrarily and + repeatedly until all components in the graph are searched. + + The implementation of this function is adapted from David Eppstein's + depth-first search function in `PADS`_, with modifications + to allow depth limits based on the Wikipedia article + "`Depth-limited search`_". + + .. _PADS: http://www.ics.uci.edu/~eppstein/PADS + .. _Depth-limited search: https://en.wikipedia.org/wiki/Depth-limited_search + + See Also + -------- + dfs_edges + dfs_postorder_nodes + dfs_labeled_edges + :func:`~networkx.algorithms.traversal.breadth_first_search.bfs_edges` + """ + edges = nx.dfs_labeled_edges( + G, source=source, depth_limit=depth_limit, sort_neighbors=sort_neighbors + ) + return (v for u, v, d in edges if d == "forward") + + +@nx._dispatchable +def dfs_labeled_edges(G, source=None, depth_limit=None, *, sort_neighbors=None): + """Iterate over edges in a depth-first-search (DFS) labeled by type. + + Parameters + ---------- + G : NetworkX graph + + source : node, optional + Specify starting node for depth-first search and return edges in + the component reachable from source. + + depth_limit : int, optional (default=len(G)) + Specify the maximum search depth. + + sort_neighbors : function (default=None) + A function that takes an iterator over nodes as the input, and + returns an iterable of the same nodes with a custom ordering. + For example, `sorted` will sort the nodes in increasing order. + + Returns + ------- + edges: generator + A generator of triples of the form (*u*, *v*, *d*), where (*u*, + *v*) is the edge being explored in the depth-first search and *d* + is one of the strings 'forward', 'nontree', 'reverse', or 'reverse-depth_limit'. + A 'forward' edge is one in which *u* has been visited but *v* has + not. A 'nontree' edge is one in which both *u* and *v* have been + visited but the edge is not in the DFS tree. A 'reverse' edge is + one in which both *u* and *v* have been visited and the edge is in + the DFS tree. When the `depth_limit` is reached via a 'forward' edge, + a 'reverse' edge is immediately generated rather than the subtree + being explored. To indicate this flavor of 'reverse' edge, the string + yielded is 'reverse-depth_limit'. + + Examples + -------- + + The labels reveal the complete transcript of the depth-first search + algorithm in more detail than, for example, :func:`dfs_edges`:: + + >>> from pprint import pprint + >>> + >>> G = nx.DiGraph([(0, 1), (1, 2), (2, 1)]) + >>> pprint(list(nx.dfs_labeled_edges(G, source=0))) + [(0, 0, 'forward'), + (0, 1, 'forward'), + (1, 2, 'forward'), + (2, 1, 'nontree'), + (1, 2, 'reverse'), + (0, 1, 'reverse'), + (0, 0, 'reverse')] + + Notes + ----- + If a source is not specified then a source is chosen arbitrarily and + repeatedly until all components in the graph are searched. + + The implementation of this function is adapted from David Eppstein's + depth-first search function in `PADS`_, with modifications + to allow depth limits based on the Wikipedia article + "`Depth-limited search`_". + + .. _PADS: http://www.ics.uci.edu/~eppstein/PADS + .. _Depth-limited search: https://en.wikipedia.org/wiki/Depth-limited_search + + See Also + -------- + dfs_edges + dfs_preorder_nodes + dfs_postorder_nodes + """ + # Based on http://www.ics.uci.edu/~eppstein/PADS/DFS.py + # by D. Eppstein, July 2004. + if source is None: + # edges for all components + nodes = G + else: + # edges for components with source + nodes = [source] + if depth_limit is None: + depth_limit = len(G) + + get_children = ( + G.neighbors + if sort_neighbors is None + else lambda n: iter(sort_neighbors(G.neighbors(n))) + ) + + visited = set() + for start in nodes: + if start in visited: + continue + yield start, start, "forward" + visited.add(start) + stack = [(start, get_children(start))] + depth_now = 1 + while stack: + parent, children = stack[-1] + for child in children: + if child in visited: + yield parent, child, "nontree" + else: + yield parent, child, "forward" + visited.add(child) + if depth_now < depth_limit: + stack.append((child, iter(get_children(child)))) + depth_now += 1 + break + else: + yield parent, child, "reverse-depth_limit" + else: + stack.pop() + depth_now -= 1 + if stack: + yield stack[-1][0], parent, "reverse" + yield start, start, "reverse" diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/traversal/edgebfs.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/traversal/edgebfs.py new file mode 100644 index 0000000..2f468e1 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/traversal/edgebfs.py @@ -0,0 +1,185 @@ +""" +============================= +Breadth First Search on Edges +============================= + +Algorithms for a breadth-first traversal of edges in a graph. + +""" + +from collections import deque + +import networkx as nx + +FORWARD = "forward" +REVERSE = "reverse" + +__all__ = ["edge_bfs"] + + +@nx._dispatchable +def edge_bfs(G, source=None, orientation=None): + """A directed, breadth-first-search of edges in `G`, beginning at `source`. + + Yield the edges of G in a breadth-first-search order continuing until + all edges are generated. + + Parameters + ---------- + G : graph + A directed/undirected graph/multigraph. + + source : node, list of nodes + The node from which the traversal begins. If None, then a source + is chosen arbitrarily and repeatedly until all edges from each node in + the graph are searched. + + orientation : None | 'original' | 'reverse' | 'ignore' (default: None) + For directed graphs and directed multigraphs, edge traversals need not + respect the original orientation of the edges. + When set to 'reverse' every edge is traversed in the reverse direction. + When set to 'ignore', every edge is treated as undirected. + When set to 'original', every edge is treated as directed. + In all three cases, the yielded edge tuples add a last entry to + indicate the direction in which that edge was traversed. + If orientation is None, the yielded edge has no direction indicated. + The direction is respected, but not reported. + + Yields + ------ + edge : directed edge + A directed edge indicating the path taken by the breadth-first-search. + For graphs, `edge` is of the form `(u, v)` where `u` and `v` + are the tail and head of the edge as determined by the traversal. + For multigraphs, `edge` is of the form `(u, v, key)`, where `key` is + the key of the edge. When the graph is directed, then `u` and `v` + are always in the order of the actual directed edge. + If orientation is not None then the edge tuple is extended to include + the direction of traversal ('forward' or 'reverse') on that edge. + + Examples + -------- + >>> from pprint import pprint + >>> nodes = [0, 1, 2, 3] + >>> edges = [(0, 1), (1, 0), (1, 0), (2, 0), (2, 1), (3, 1)] + + >>> list(nx.edge_bfs(nx.Graph(edges), nodes)) + [(0, 1), (0, 2), (1, 2), (1, 3)] + + >>> list(nx.edge_bfs(nx.DiGraph(edges), nodes)) + [(0, 1), (1, 0), (2, 0), (2, 1), (3, 1)] + + >>> list(nx.edge_bfs(nx.MultiGraph(edges), nodes)) + [(0, 1, 0), (0, 1, 1), (0, 1, 2), (0, 2, 0), (1, 2, 0), (1, 3, 0)] + + >>> list(nx.edge_bfs(nx.MultiDiGraph(edges), nodes)) + [(0, 1, 0), (1, 0, 0), (1, 0, 1), (2, 0, 0), (2, 1, 0), (3, 1, 0)] + + >>> list(nx.edge_bfs(nx.DiGraph(edges), nodes, orientation="ignore")) + [(0, 1, 'forward'), (1, 0, 'reverse'), (2, 0, 'reverse'), (2, 1, 'reverse'), (3, 1, 'reverse')] + + >>> elist = list(nx.edge_bfs(nx.MultiDiGraph(edges), nodes, orientation="ignore")) + >>> pprint(elist) + [(0, 1, 0, 'forward'), + (1, 0, 0, 'reverse'), + (1, 0, 1, 'reverse'), + (2, 0, 0, 'reverse'), + (2, 1, 0, 'reverse'), + (3, 1, 0, 'reverse')] + + Notes + ----- + The goal of this function is to visit edges. It differs from the more + familiar breadth-first-search of nodes, as provided by + :func:`networkx.algorithms.traversal.breadth_first_search.bfs_edges`, in + that it does not stop once every node has been visited. In a directed graph + with edges [(0, 1), (1, 2), (2, 1)], the edge (2, 1) would not be visited + if not for the functionality provided by this function. + + The naming of this function is very similar to bfs_edges. The difference + is that 'edge_bfs' yields edges even if they extend back to an already + explored node while 'bfs_edges' yields the edges of the tree that results + from a breadth-first-search (BFS) so no edges are reported if they extend + to already explored nodes. That means 'edge_bfs' reports all edges while + 'bfs_edges' only report those traversed by a node-based BFS. Yet another + description is that 'bfs_edges' reports the edges traversed during BFS + while 'edge_bfs' reports all edges in the order they are explored. + + See Also + -------- + bfs_edges + bfs_tree + edge_dfs + + """ + nodes = list(G.nbunch_iter(source)) + if not nodes: + return + + directed = G.is_directed() + kwds = {"data": False} + if G.is_multigraph() is True: + kwds["keys"] = True + + # set up edge lookup + if orientation is None: + + def edges_from(node): + return iter(G.edges(node, **kwds)) + + elif not directed or orientation == "original": + + def edges_from(node): + for e in G.edges(node, **kwds): + yield e + (FORWARD,) + + elif orientation == "reverse": + + def edges_from(node): + for e in G.in_edges(node, **kwds): + yield e + (REVERSE,) + + elif orientation == "ignore": + + def edges_from(node): + for e in G.edges(node, **kwds): + yield e + (FORWARD,) + for e in G.in_edges(node, **kwds): + yield e + (REVERSE,) + + else: + raise nx.NetworkXError("invalid orientation argument.") + + if directed: + neighbors = G.successors + + def edge_id(edge): + # remove direction indicator + return edge[:-1] if orientation is not None else edge + + else: + neighbors = G.neighbors + + def edge_id(edge): + return (frozenset(edge[:2]),) + edge[2:] + + check_reverse = directed and orientation in ("reverse", "ignore") + + # start BFS + visited_nodes = set(nodes) + visited_edges = set() + queue = deque([(n, edges_from(n)) for n in nodes]) + while queue: + parent, children_edges = queue.popleft() + for edge in children_edges: + if check_reverse and edge[-1] == REVERSE: + child = edge[0] + else: + child = edge[1] + if child not in visited_nodes: + visited_nodes.add(child) + queue.append((child, edges_from(child))) + edgeid = edge_id(edge) + if edgeid not in visited_edges: + visited_edges.add(edgeid) + yield edge diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/traversal/edgedfs.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/traversal/edgedfs.py new file mode 100644 index 0000000..f817310 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/traversal/edgedfs.py @@ -0,0 +1,182 @@ +""" +=========================== +Depth First Search on Edges +=========================== + +Algorithms for a depth-first traversal of edges in a graph. + +""" + +import networkx as nx + +FORWARD = "forward" +REVERSE = "reverse" + +__all__ = ["edge_dfs"] + + +@nx._dispatchable +def edge_dfs(G, source=None, orientation=None): + """A directed, depth-first-search of edges in `G`, beginning at `source`. + + Yield the edges of G in a depth-first-search order continuing until + all edges are generated. + + Parameters + ---------- + G : graph + A directed/undirected graph/multigraph. + + source : node, list of nodes + The node from which the traversal begins. If None, then a source + is chosen arbitrarily and repeatedly until all edges from each node in + the graph are searched. + + orientation : None | 'original' | 'reverse' | 'ignore' (default: None) + For directed graphs and directed multigraphs, edge traversals need not + respect the original orientation of the edges. + When set to 'reverse' every edge is traversed in the reverse direction. + When set to 'ignore', every edge is treated as undirected. + When set to 'original', every edge is treated as directed. + In all three cases, the yielded edge tuples add a last entry to + indicate the direction in which that edge was traversed. + If orientation is None, the yielded edge has no direction indicated. + The direction is respected, but not reported. + + Yields + ------ + edge : directed edge + A directed edge indicating the path taken by the depth-first traversal. + For graphs, `edge` is of the form `(u, v)` where `u` and `v` + are the tail and head of the edge as determined by the traversal. + For multigraphs, `edge` is of the form `(u, v, key)`, where `key` is + the key of the edge. When the graph is directed, then `u` and `v` + are always in the order of the actual directed edge. + If orientation is not None then the edge tuple is extended to include + the direction of traversal ('forward' or 'reverse') on that edge. + + Examples + -------- + >>> from pprint import pprint + >>> nodes = [0, 1, 2, 3] + >>> edges = [(0, 1), (1, 0), (1, 0), (2, 1), (3, 1)] + + >>> list(nx.edge_dfs(nx.Graph(edges), nodes)) + [(0, 1), (1, 2), (1, 3)] + + >>> list(nx.edge_dfs(nx.DiGraph(edges), nodes)) + [(0, 1), (1, 0), (2, 1), (3, 1)] + + >>> list(nx.edge_dfs(nx.MultiGraph(edges), nodes)) + [(0, 1, 0), (1, 0, 1), (0, 1, 2), (1, 2, 0), (1, 3, 0)] + + >>> list(nx.edge_dfs(nx.MultiDiGraph(edges), nodes)) + [(0, 1, 0), (1, 0, 0), (1, 0, 1), (2, 1, 0), (3, 1, 0)] + + >>> list(nx.edge_dfs(nx.DiGraph(edges), nodes, orientation="ignore")) + [(0, 1, 'forward'), (1, 0, 'forward'), (2, 1, 'reverse'), (3, 1, 'reverse')] + + >>> elist = list(nx.edge_dfs(nx.MultiDiGraph(edges), nodes, orientation="ignore")) + >>> pprint(elist) + [(0, 1, 0, 'forward'), + (1, 0, 0, 'forward'), + (1, 0, 1, 'reverse'), + (2, 1, 0, 'reverse'), + (3, 1, 0, 'reverse')] + + Notes + ----- + The goal of this function is to visit edges. It differs from the more + familiar depth-first traversal of nodes, as provided by + :func:`~networkx.algorithms.traversal.depth_first_search.dfs_edges`, in + that it does not stop once every node has been visited. In a directed graph + with edges [(0, 1), (1, 2), (2, 1)], the edge (2, 1) would not be visited + if not for the functionality provided by this function. + + See Also + -------- + :func:`~networkx.algorithms.traversal.depth_first_search.dfs_edges` + + """ + nodes = list(G.nbunch_iter(source)) + if not nodes: + return + + directed = G.is_directed() + kwds = {"data": False} + if G.is_multigraph() is True: + kwds["keys"] = True + + # set up edge lookup + if orientation is None: + + def edges_from(node): + return iter(G.edges(node, **kwds)) + + elif not directed or orientation == "original": + + def edges_from(node): + for e in G.edges(node, **kwds): + yield e + (FORWARD,) + + elif orientation == "reverse": + + def edges_from(node): + for e in G.in_edges(node, **kwds): + yield e + (REVERSE,) + + elif orientation == "ignore": + + def edges_from(node): + for e in G.edges(node, **kwds): + yield e + (FORWARD,) + for e in G.in_edges(node, **kwds): + yield e + (REVERSE,) + + else: + raise nx.NetworkXError("invalid orientation argument.") + + # set up formation of edge_id to easily look up if edge already returned + if directed: + + def edge_id(edge): + # remove direction indicator + return edge[:-1] if orientation is not None else edge + + else: + + def edge_id(edge): + # single id for undirected requires frozenset on nodes + return (frozenset(edge[:2]),) + edge[2:] + + # Basic setup + check_reverse = directed and orientation in ("reverse", "ignore") + + visited_edges = set() + visited_nodes = set() + edges = {} + + # start DFS + for start_node in nodes: + stack = [start_node] + while stack: + current_node = stack[-1] + if current_node not in visited_nodes: + edges[current_node] = edges_from(current_node) + visited_nodes.add(current_node) + + try: + edge = next(edges[current_node]) + except StopIteration: + # No more edges from the current node. + stack.pop() + else: + edgeid = edge_id(edge) + if edgeid not in visited_edges: + visited_edges.add(edgeid) + # Mark the traversed "to" node as to-be-explored. + if check_reverse and edge[-1] == REVERSE: + stack.append(edge[0]) + else: + stack.append(edge[1]) + yield edge diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/traversal/tests/__init__.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/traversal/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/traversal/tests/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/traversal/tests/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..ee29522 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/traversal/tests/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/traversal/tests/__pycache__/test_beamsearch.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/traversal/tests/__pycache__/test_beamsearch.cpython-312.pyc new file mode 100644 index 0000000..de7f873 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/traversal/tests/__pycache__/test_beamsearch.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/traversal/tests/__pycache__/test_bfs.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/traversal/tests/__pycache__/test_bfs.cpython-312.pyc new file mode 100644 index 0000000..68526a5 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/traversal/tests/__pycache__/test_bfs.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/traversal/tests/__pycache__/test_dfs.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/traversal/tests/__pycache__/test_dfs.cpython-312.pyc new file mode 100644 index 0000000..5d07410 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/traversal/tests/__pycache__/test_dfs.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/traversal/tests/__pycache__/test_edgebfs.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/traversal/tests/__pycache__/test_edgebfs.cpython-312.pyc new file mode 100644 index 0000000..17edd15 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/traversal/tests/__pycache__/test_edgebfs.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/traversal/tests/__pycache__/test_edgedfs.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/traversal/tests/__pycache__/test_edgedfs.cpython-312.pyc new file mode 100644 index 0000000..db28cc5 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/traversal/tests/__pycache__/test_edgedfs.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/traversal/tests/test_beamsearch.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/traversal/tests/test_beamsearch.py new file mode 100644 index 0000000..049f116 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/traversal/tests/test_beamsearch.py @@ -0,0 +1,25 @@ +"""Unit tests for the beam search functions.""" + +import pytest + +import networkx as nx + + +def test_narrow(): + """Tests that a narrow beam width may cause an incomplete search.""" + # In this search, we enqueue only the neighbor 3 at the first + # step, then only the neighbor 2 at the second step. Once at + # node 2, the search chooses node 3, since it has a higher value + # than node 1, but node 3 has already been visited, so the + # search terminates. + G = nx.cycle_graph(4) + edges = nx.bfs_beam_edges(G, source=0, value=lambda n: n, width=1) + assert list(edges) == [(0, 3), (3, 2)] + + +@pytest.mark.parametrize("width", (2, None)) +def test_wide(width): + """All nodes are searched when `width` is None or >= max degree""" + G = nx.cycle_graph(4) + edges = nx.bfs_beam_edges(G, source=0, value=lambda n: n, width=width) + assert list(edges) == [(0, 3), (0, 1), (3, 2)] diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/traversal/tests/test_bfs.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/traversal/tests/test_bfs.py new file mode 100644 index 0000000..7f4f37a --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/traversal/tests/test_bfs.py @@ -0,0 +1,203 @@ +from functools import partial + +import pytest + +import networkx as nx + + +class TestBFS: + @classmethod + def setup_class(cls): + # simple graph + G = nx.Graph() + G.add_edges_from([(0, 1), (1, 2), (1, 3), (2, 4), (3, 4)]) + cls.G = G + + def test_successor(self): + assert dict(nx.bfs_successors(self.G, source=0)) == {0: [1], 1: [2, 3], 2: [4]} + + def test_predecessor(self): + assert dict(nx.bfs_predecessors(self.G, source=0)) == {1: 0, 2: 1, 3: 1, 4: 2} + + def test_bfs_tree(self): + T = nx.bfs_tree(self.G, source=0) + assert sorted(T.nodes()) == sorted(self.G.nodes()) + assert sorted(T.edges()) == [(0, 1), (1, 2), (1, 3), (2, 4)] + + def test_bfs_edges(self): + edges = nx.bfs_edges(self.G, source=0) + assert list(edges) == [(0, 1), (1, 2), (1, 3), (2, 4)] + + def test_bfs_edges_reverse(self): + D = nx.DiGraph() + D.add_edges_from([(0, 1), (1, 2), (1, 3), (2, 4), (3, 4)]) + edges = nx.bfs_edges(D, source=4, reverse=True) + assert list(edges) == [(4, 2), (4, 3), (2, 1), (1, 0)] + + def test_bfs_edges_sorting(self): + D = nx.DiGraph() + D.add_edges_from([(0, 1), (0, 2), (1, 4), (1, 3), (2, 5)]) + sort_desc = partial(sorted, reverse=True) + edges_asc = nx.bfs_edges(D, source=0, sort_neighbors=sorted) + edges_desc = nx.bfs_edges(D, source=0, sort_neighbors=sort_desc) + assert list(edges_asc) == [(0, 1), (0, 2), (1, 3), (1, 4), (2, 5)] + assert list(edges_desc) == [(0, 2), (0, 1), (2, 5), (1, 4), (1, 3)] + + def test_bfs_tree_isolates(self): + G = nx.Graph() + G.add_node(1) + G.add_node(2) + T = nx.bfs_tree(G, source=1) + assert sorted(T.nodes()) == [1] + assert sorted(T.edges()) == [] + + def test_bfs_layers(self): + expected = { + 0: [0], + 1: [1], + 2: [2, 3], + 3: [4], + } + for sources in [0, [0], (i for i in [0]), [0, 0]]: + assert dict(enumerate(nx.bfs_layers(self.G, sources))) == expected + + def test_bfs_layers_missing_source(self): + with pytest.raises(nx.NetworkXError): + next(nx.bfs_layers(self.G, sources="abc")) + with pytest.raises(nx.NetworkXError): + next(nx.bfs_layers(self.G, sources=["abc"])) + + def test_descendants_at_distance(self): + for distance, descendants in enumerate([{0}, {1}, {2, 3}, {4}]): + assert nx.descendants_at_distance(self.G, 0, distance) == descendants + + def test_descendants_at_distance_missing_source(self): + with pytest.raises(nx.NetworkXError): + nx.descendants_at_distance(self.G, "abc", 0) + + def test_bfs_labeled_edges_directed(self): + D = nx.cycle_graph(5, create_using=nx.DiGraph) + expected = [ + (0, 1, "tree"), + (1, 2, "tree"), + (2, 3, "tree"), + (3, 4, "tree"), + (4, 0, "reverse"), + ] + answer = list(nx.bfs_labeled_edges(D, 0)) + assert expected == answer + + D.add_edge(4, 4) + expected.append((4, 4, "level")) + answer = list(nx.bfs_labeled_edges(D, 0)) + assert expected == answer + + D.add_edge(0, 2) + D.add_edge(1, 5) + D.add_edge(2, 5) + D.remove_edge(4, 4) + expected = [ + (0, 1, "tree"), + (0, 2, "tree"), + (1, 2, "level"), + (1, 5, "tree"), + (2, 3, "tree"), + (2, 5, "forward"), + (3, 4, "tree"), + (4, 0, "reverse"), + ] + answer = list(nx.bfs_labeled_edges(D, 0)) + assert expected == answer + + G = D.to_undirected() + G.add_edge(4, 4) + expected = [ + (0, 1, "tree"), + (0, 2, "tree"), + (0, 4, "tree"), + (1, 2, "level"), + (1, 5, "tree"), + (2, 3, "tree"), + (2, 5, "forward"), + (4, 3, "forward"), + (4, 4, "level"), + ] + answer = list(nx.bfs_labeled_edges(G, 0)) + assert expected == answer + + +class TestBreadthLimitedSearch: + @classmethod + def setup_class(cls): + # a tree + G = nx.Graph() + nx.add_path(G, [0, 1, 2, 3, 4, 5, 6]) + nx.add_path(G, [2, 7, 8, 9, 10]) + cls.G = G + # a disconnected graph + D = nx.Graph() + D.add_edges_from([(0, 1), (2, 3)]) + nx.add_path(D, [2, 7, 8, 9, 10]) + cls.D = D + + def test_limited_bfs_successor(self): + assert dict(nx.bfs_successors(self.G, source=1, depth_limit=3)) == { + 1: [0, 2], + 2: [3, 7], + 3: [4], + 7: [8], + } + result = { + n: sorted(s) for n, s in nx.bfs_successors(self.D, source=7, depth_limit=2) + } + assert result == {8: [9], 2: [3], 7: [2, 8]} + + def test_limited_bfs_predecessor(self): + assert dict(nx.bfs_predecessors(self.G, source=1, depth_limit=3)) == { + 0: 1, + 2: 1, + 3: 2, + 4: 3, + 7: 2, + 8: 7, + } + assert dict(nx.bfs_predecessors(self.D, source=7, depth_limit=2)) == { + 2: 7, + 3: 2, + 8: 7, + 9: 8, + } + + def test_limited_bfs_tree(self): + T = nx.bfs_tree(self.G, source=3, depth_limit=1) + assert sorted(T.edges()) == [(3, 2), (3, 4)] + + def test_limited_bfs_edges(self): + edges = nx.bfs_edges(self.G, source=9, depth_limit=4) + assert list(edges) == [(9, 8), (9, 10), (8, 7), (7, 2), (2, 1), (2, 3)] + + def test_limited_bfs_layers(self): + assert dict(enumerate(nx.bfs_layers(self.G, sources=[0]))) == { + 0: [0], + 1: [1], + 2: [2], + 3: [3, 7], + 4: [4, 8], + 5: [5, 9], + 6: [6, 10], + } + assert dict(enumerate(nx.bfs_layers(self.D, sources=2))) == { + 0: [2], + 1: [3, 7], + 2: [8], + 3: [9], + 4: [10], + } + + def test_limited_descendants_at_distance(self): + for distance, descendants in enumerate( + [{0}, {1}, {2}, {3, 7}, {4, 8}, {5, 9}, {6, 10}] + ): + assert nx.descendants_at_distance(self.G, 0, distance) == descendants + for distance, descendants in enumerate([{2}, {3, 7}, {8}, {9}, {10}]): + assert nx.descendants_at_distance(self.D, 2, distance) == descendants diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/traversal/tests/test_dfs.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/traversal/tests/test_dfs.py new file mode 100644 index 0000000..292de05 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/traversal/tests/test_dfs.py @@ -0,0 +1,307 @@ +import networkx as nx + + +class TestDFS: + @classmethod + def setup_class(cls): + # simple graph + G = nx.Graph() + G.add_edges_from([(0, 1), (1, 2), (1, 3), (2, 4), (3, 0), (0, 4)]) + cls.G = G + # simple graph, disconnected + D = nx.Graph() + D.add_edges_from([(0, 1), (2, 3)]) + cls.D = D + + def test_preorder_nodes(self): + assert list(nx.dfs_preorder_nodes(self.G, source=0)) == [0, 1, 2, 4, 3] + assert list(nx.dfs_preorder_nodes(self.D)) == [0, 1, 2, 3] + assert list(nx.dfs_preorder_nodes(self.D, source=2)) == [2, 3] + + def test_postorder_nodes(self): + assert list(nx.dfs_postorder_nodes(self.G, source=0)) == [4, 2, 3, 1, 0] + assert list(nx.dfs_postorder_nodes(self.D)) == [1, 0, 3, 2] + assert list(nx.dfs_postorder_nodes(self.D, source=0)) == [1, 0] + + def test_successor(self): + assert nx.dfs_successors(self.G, source=0) == {0: [1], 1: [2, 3], 2: [4]} + assert nx.dfs_successors(self.G, source=1) == {0: [3, 4], 1: [0], 4: [2]} + assert nx.dfs_successors(self.D) == {0: [1], 2: [3]} + assert nx.dfs_successors(self.D, source=1) == {1: [0]} + + def test_predecessor(self): + assert nx.dfs_predecessors(self.G, source=0) == {1: 0, 2: 1, 3: 1, 4: 2} + assert nx.dfs_predecessors(self.D) == {1: 0, 3: 2} + + def test_dfs_tree(self): + exp_nodes = sorted(self.G.nodes()) + exp_edges = [(0, 1), (1, 2), (1, 3), (2, 4)] + # Search from first node + T = nx.dfs_tree(self.G, source=0) + assert sorted(T.nodes()) == exp_nodes + assert sorted(T.edges()) == exp_edges + # Check source=None + T = nx.dfs_tree(self.G, source=None) + assert sorted(T.nodes()) == exp_nodes + assert sorted(T.edges()) == exp_edges + # Check source=None is the default + T = nx.dfs_tree(self.G) + assert sorted(T.nodes()) == exp_nodes + assert sorted(T.edges()) == exp_edges + + def test_dfs_edges(self): + edges = nx.dfs_edges(self.G, source=0) + assert list(edges) == [(0, 1), (1, 2), (2, 4), (1, 3)] + edges = nx.dfs_edges(self.D) + assert list(edges) == [(0, 1), (2, 3)] + + def test_dfs_edges_sorting(self): + G = nx.Graph([(0, 1), (1, 2), (1, 3), (2, 4), (3, 0), (0, 4)]) + edges_asc = nx.dfs_edges(G, source=0, sort_neighbors=sorted) + edges_desc = nx.dfs_edges( + G, source=0, sort_neighbors=lambda x: sorted(x, reverse=True) + ) + assert list(edges_asc) == [(0, 1), (1, 2), (2, 4), (1, 3)] + assert list(edges_desc) == [(0, 4), (4, 2), (2, 1), (1, 3)] + + def test_dfs_labeled_edges(self): + edges = list(nx.dfs_labeled_edges(self.G, source=0)) + forward = [(u, v) for (u, v, d) in edges if d == "forward"] + assert forward == [(0, 0), (0, 1), (1, 2), (2, 4), (1, 3)] + assert edges == [ + (0, 0, "forward"), + (0, 1, "forward"), + (1, 0, "nontree"), + (1, 2, "forward"), + (2, 1, "nontree"), + (2, 4, "forward"), + (4, 2, "nontree"), + (4, 0, "nontree"), + (2, 4, "reverse"), + (1, 2, "reverse"), + (1, 3, "forward"), + (3, 1, "nontree"), + (3, 0, "nontree"), + (1, 3, "reverse"), + (0, 1, "reverse"), + (0, 3, "nontree"), + (0, 4, "nontree"), + (0, 0, "reverse"), + ] + + def test_dfs_labeled_edges_sorting(self): + G = nx.Graph([(0, 1), (1, 2), (1, 3), (2, 4), (3, 0), (0, 4)]) + edges_asc = nx.dfs_labeled_edges(G, source=0, sort_neighbors=sorted) + edges_desc = nx.dfs_labeled_edges( + G, source=0, sort_neighbors=lambda x: sorted(x, reverse=True) + ) + assert list(edges_asc) == [ + (0, 0, "forward"), + (0, 1, "forward"), + (1, 0, "nontree"), + (1, 2, "forward"), + (2, 1, "nontree"), + (2, 4, "forward"), + (4, 0, "nontree"), + (4, 2, "nontree"), + (2, 4, "reverse"), + (1, 2, "reverse"), + (1, 3, "forward"), + (3, 0, "nontree"), + (3, 1, "nontree"), + (1, 3, "reverse"), + (0, 1, "reverse"), + (0, 3, "nontree"), + (0, 4, "nontree"), + (0, 0, "reverse"), + ] + assert list(edges_desc) == [ + (0, 0, "forward"), + (0, 4, "forward"), + (4, 2, "forward"), + (2, 4, "nontree"), + (2, 1, "forward"), + (1, 3, "forward"), + (3, 1, "nontree"), + (3, 0, "nontree"), + (1, 3, "reverse"), + (1, 2, "nontree"), + (1, 0, "nontree"), + (2, 1, "reverse"), + (4, 2, "reverse"), + (4, 0, "nontree"), + (0, 4, "reverse"), + (0, 3, "nontree"), + (0, 1, "nontree"), + (0, 0, "reverse"), + ] + + def test_dfs_labeled_disconnected_edges(self): + edges = list(nx.dfs_labeled_edges(self.D)) + forward = [(u, v) for (u, v, d) in edges if d == "forward"] + assert forward == [(0, 0), (0, 1), (2, 2), (2, 3)] + assert edges == [ + (0, 0, "forward"), + (0, 1, "forward"), + (1, 0, "nontree"), + (0, 1, "reverse"), + (0, 0, "reverse"), + (2, 2, "forward"), + (2, 3, "forward"), + (3, 2, "nontree"), + (2, 3, "reverse"), + (2, 2, "reverse"), + ] + + def test_dfs_tree_isolates(self): + G = nx.Graph() + G.add_node(1) + G.add_node(2) + T = nx.dfs_tree(G, source=1) + assert sorted(T.nodes()) == [1] + assert sorted(T.edges()) == [] + T = nx.dfs_tree(G, source=None) + assert sorted(T.nodes()) == [1, 2] + assert sorted(T.edges()) == [] + + +class TestDepthLimitedSearch: + @classmethod + def setup_class(cls): + # a tree + G = nx.Graph() + nx.add_path(G, [0, 1, 2, 3, 4, 5, 6]) + nx.add_path(G, [2, 7, 8, 9, 10]) + cls.G = G + # a disconnected graph + D = nx.Graph() + D.add_edges_from([(0, 1), (2, 3)]) + nx.add_path(D, [2, 7, 8, 9, 10]) + cls.D = D + + def test_dls_preorder_nodes(self): + assert list(nx.dfs_preorder_nodes(self.G, source=0, depth_limit=2)) == [0, 1, 2] + assert list(nx.dfs_preorder_nodes(self.D, source=1, depth_limit=2)) == ([1, 0]) + + def test_dls_postorder_nodes(self): + assert list(nx.dfs_postorder_nodes(self.G, source=3, depth_limit=3)) == [ + 1, + 7, + 2, + 5, + 4, + 3, + ] + assert list(nx.dfs_postorder_nodes(self.D, source=2, depth_limit=2)) == ( + [3, 7, 2] + ) + + def test_dls_successor(self): + result = nx.dfs_successors(self.G, source=4, depth_limit=3) + assert {n: set(v) for n, v in result.items()} == { + 2: {1, 7}, + 3: {2}, + 4: {3, 5}, + 5: {6}, + } + result = nx.dfs_successors(self.D, source=7, depth_limit=2) + assert {n: set(v) for n, v in result.items()} == {8: {9}, 2: {3}, 7: {8, 2}} + + def test_dls_predecessor(self): + assert nx.dfs_predecessors(self.G, source=0, depth_limit=3) == { + 1: 0, + 2: 1, + 3: 2, + 7: 2, + } + assert nx.dfs_predecessors(self.D, source=2, depth_limit=3) == { + 8: 7, + 9: 8, + 3: 2, + 7: 2, + } + + def test_dls_tree(self): + T = nx.dfs_tree(self.G, source=3, depth_limit=1) + assert sorted(T.edges()) == [(3, 2), (3, 4)] + + def test_dls_edges(self): + edges = nx.dfs_edges(self.G, source=9, depth_limit=4) + assert list(edges) == [(9, 8), (8, 7), (7, 2), (2, 1), (2, 3), (9, 10)] + + def test_dls_labeled_edges_depth_1(self): + edges = list(nx.dfs_labeled_edges(self.G, source=5, depth_limit=1)) + forward = [(u, v) for (u, v, d) in edges if d == "forward"] + assert forward == [(5, 5), (5, 4), (5, 6)] + # Note: reverse-depth_limit edge types were not reported before gh-6240 + assert edges == [ + (5, 5, "forward"), + (5, 4, "forward"), + (5, 4, "reverse-depth_limit"), + (5, 6, "forward"), + (5, 6, "reverse-depth_limit"), + (5, 5, "reverse"), + ] + + def test_dls_labeled_edges_depth_2(self): + edges = list(nx.dfs_labeled_edges(self.G, source=6, depth_limit=2)) + forward = [(u, v) for (u, v, d) in edges if d == "forward"] + assert forward == [(6, 6), (6, 5), (5, 4)] + assert edges == [ + (6, 6, "forward"), + (6, 5, "forward"), + (5, 4, "forward"), + (5, 4, "reverse-depth_limit"), + (5, 6, "nontree"), + (6, 5, "reverse"), + (6, 6, "reverse"), + ] + + def test_dls_labeled_disconnected_edges(self): + edges = list(nx.dfs_labeled_edges(self.D, depth_limit=1)) + assert edges == [ + (0, 0, "forward"), + (0, 1, "forward"), + (0, 1, "reverse-depth_limit"), + (0, 0, "reverse"), + (2, 2, "forward"), + (2, 3, "forward"), + (2, 3, "reverse-depth_limit"), + (2, 7, "forward"), + (2, 7, "reverse-depth_limit"), + (2, 2, "reverse"), + (8, 8, "forward"), + (8, 7, "nontree"), + (8, 9, "forward"), + (8, 9, "reverse-depth_limit"), + (8, 8, "reverse"), + (10, 10, "forward"), + (10, 9, "nontree"), + (10, 10, "reverse"), + ] + # large depth_limit has no impact + edges = list(nx.dfs_labeled_edges(self.D, depth_limit=19)) + assert edges == [ + (0, 0, "forward"), + (0, 1, "forward"), + (1, 0, "nontree"), + (0, 1, "reverse"), + (0, 0, "reverse"), + (2, 2, "forward"), + (2, 3, "forward"), + (3, 2, "nontree"), + (2, 3, "reverse"), + (2, 7, "forward"), + (7, 2, "nontree"), + (7, 8, "forward"), + (8, 7, "nontree"), + (8, 9, "forward"), + (9, 8, "nontree"), + (9, 10, "forward"), + (10, 9, "nontree"), + (9, 10, "reverse"), + (8, 9, "reverse"), + (7, 8, "reverse"), + (2, 7, "reverse"), + (2, 2, "reverse"), + ] diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/traversal/tests/test_edgebfs.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/traversal/tests/test_edgebfs.py new file mode 100644 index 0000000..1bf3fae --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/traversal/tests/test_edgebfs.py @@ -0,0 +1,147 @@ +import pytest + +import networkx as nx +from networkx.algorithms.traversal.edgedfs import FORWARD, REVERSE + + +class TestEdgeBFS: + @classmethod + def setup_class(cls): + cls.nodes = [0, 1, 2, 3] + cls.edges = [(0, 1), (1, 0), (1, 0), (2, 0), (2, 1), (3, 1)] + + def test_empty(self): + G = nx.Graph() + edges = list(nx.edge_bfs(G)) + assert edges == [] + + def test_graph_single_source(self): + G = nx.Graph(self.edges) + G.add_edge(4, 5) + x = list(nx.edge_bfs(G, [0])) + x_ = [(0, 1), (0, 2), (1, 2), (1, 3)] + assert x == x_ + + def test_graph(self): + G = nx.Graph(self.edges) + x = list(nx.edge_bfs(G, self.nodes)) + x_ = [(0, 1), (0, 2), (1, 2), (1, 3)] + assert x == x_ + + def test_digraph(self): + G = nx.DiGraph(self.edges) + x = list(nx.edge_bfs(G, self.nodes)) + x_ = [(0, 1), (1, 0), (2, 0), (2, 1), (3, 1)] + assert x == x_ + + def test_digraph_orientation_invalid(self): + G = nx.DiGraph(self.edges) + edge_iterator = nx.edge_bfs(G, self.nodes, orientation="hello") + pytest.raises(nx.NetworkXError, list, edge_iterator) + + def test_digraph_orientation_none(self): + G = nx.DiGraph(self.edges) + x = list(nx.edge_bfs(G, self.nodes, orientation=None)) + x_ = [(0, 1), (1, 0), (2, 0), (2, 1), (3, 1)] + assert x == x_ + + def test_digraph_orientation_original(self): + G = nx.DiGraph(self.edges) + x = list(nx.edge_bfs(G, self.nodes, orientation="original")) + x_ = [ + (0, 1, FORWARD), + (1, 0, FORWARD), + (2, 0, FORWARD), + (2, 1, FORWARD), + (3, 1, FORWARD), + ] + assert x == x_ + + def test_digraph2(self): + G = nx.DiGraph() + nx.add_path(G, range(4)) + x = list(nx.edge_bfs(G, [0])) + x_ = [(0, 1), (1, 2), (2, 3)] + assert x == x_ + + def test_digraph_rev(self): + G = nx.DiGraph(self.edges) + x = list(nx.edge_bfs(G, self.nodes, orientation="reverse")) + x_ = [ + (1, 0, REVERSE), + (2, 0, REVERSE), + (0, 1, REVERSE), + (2, 1, REVERSE), + (3, 1, REVERSE), + ] + assert x == x_ + + def test_digraph_rev2(self): + G = nx.DiGraph() + nx.add_path(G, range(4)) + x = list(nx.edge_bfs(G, [3], orientation="reverse")) + x_ = [(2, 3, REVERSE), (1, 2, REVERSE), (0, 1, REVERSE)] + assert x == x_ + + def test_multigraph(self): + G = nx.MultiGraph(self.edges) + x = list(nx.edge_bfs(G, self.nodes)) + x_ = [(0, 1, 0), (0, 1, 1), (0, 1, 2), (0, 2, 0), (1, 2, 0), (1, 3, 0)] + # This is an example of where hash randomization can break. + # There are 3! * 2 alternative outputs, such as: + # [(0, 1, 1), (1, 0, 0), (0, 1, 2), (1, 3, 0), (1, 2, 0)] + # But note, the edges (1,2,0) and (1,3,0) always follow the (0,1,k) + # edges. So the algorithm only guarantees a partial order. A total + # order is guaranteed only if the graph data structures are ordered. + assert x == x_ + + def test_multidigraph(self): + G = nx.MultiDiGraph(self.edges) + x = list(nx.edge_bfs(G, self.nodes)) + x_ = [(0, 1, 0), (1, 0, 0), (1, 0, 1), (2, 0, 0), (2, 1, 0), (3, 1, 0)] + assert x == x_ + + def test_multidigraph_rev(self): + G = nx.MultiDiGraph(self.edges) + x = list(nx.edge_bfs(G, self.nodes, orientation="reverse")) + x_ = [ + (1, 0, 0, REVERSE), + (1, 0, 1, REVERSE), + (2, 0, 0, REVERSE), + (0, 1, 0, REVERSE), + (2, 1, 0, REVERSE), + (3, 1, 0, REVERSE), + ] + assert x == x_ + + def test_digraph_ignore(self): + G = nx.DiGraph(self.edges) + x = list(nx.edge_bfs(G, self.nodes, orientation="ignore")) + x_ = [ + (0, 1, FORWARD), + (1, 0, REVERSE), + (2, 0, REVERSE), + (2, 1, REVERSE), + (3, 1, REVERSE), + ] + assert x == x_ + + def test_digraph_ignore2(self): + G = nx.DiGraph() + nx.add_path(G, range(4)) + x = list(nx.edge_bfs(G, [0], orientation="ignore")) + x_ = [(0, 1, FORWARD), (1, 2, FORWARD), (2, 3, FORWARD)] + assert x == x_ + + def test_multidigraph_ignore(self): + G = nx.MultiDiGraph(self.edges) + x = list(nx.edge_bfs(G, self.nodes, orientation="ignore")) + x_ = [ + (0, 1, 0, FORWARD), + (1, 0, 0, REVERSE), + (1, 0, 1, REVERSE), + (2, 0, 0, REVERSE), + (2, 1, 0, REVERSE), + (3, 1, 0, REVERSE), + ] + assert x == x_ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/traversal/tests/test_edgedfs.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/traversal/tests/test_edgedfs.py new file mode 100644 index 0000000..7c1967c --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/traversal/tests/test_edgedfs.py @@ -0,0 +1,131 @@ +import pytest + +import networkx as nx +from networkx.algorithms import edge_dfs +from networkx.algorithms.traversal.edgedfs import FORWARD, REVERSE + +# These tests can fail with hash randomization. The easiest and clearest way +# to write these unit tests is for the edges to be output in an expected total +# order, but we cannot guarantee the order amongst outgoing edges from a node, +# unless each class uses an ordered data structure for neighbors. This is +# painful to do with the current API. The alternative is that the tests are +# written (IMO confusingly) so that there is not a total order over the edges, +# but only a partial order. Due to the small size of the graphs, hopefully +# failures due to hash randomization will not occur. For an example of how +# this can fail, see TestEdgeDFS.test_multigraph. + + +class TestEdgeDFS: + @classmethod + def setup_class(cls): + cls.nodes = [0, 1, 2, 3] + cls.edges = [(0, 1), (1, 0), (1, 0), (2, 1), (3, 1)] + + def test_empty(self): + G = nx.Graph() + edges = list(edge_dfs(G)) + assert edges == [] + + def test_graph(self): + G = nx.Graph(self.edges) + x = list(edge_dfs(G, self.nodes)) + x_ = [(0, 1), (1, 2), (1, 3)] + assert x == x_ + + def test_digraph(self): + G = nx.DiGraph(self.edges) + x = list(edge_dfs(G, self.nodes)) + x_ = [(0, 1), (1, 0), (2, 1), (3, 1)] + assert x == x_ + + def test_digraph_orientation_invalid(self): + G = nx.DiGraph(self.edges) + edge_iterator = edge_dfs(G, self.nodes, orientation="hello") + pytest.raises(nx.NetworkXError, list, edge_iterator) + + def test_digraph_orientation_none(self): + G = nx.DiGraph(self.edges) + x = list(edge_dfs(G, self.nodes, orientation=None)) + x_ = [(0, 1), (1, 0), (2, 1), (3, 1)] + assert x == x_ + + def test_digraph_orientation_original(self): + G = nx.DiGraph(self.edges) + x = list(edge_dfs(G, self.nodes, orientation="original")) + x_ = [(0, 1, FORWARD), (1, 0, FORWARD), (2, 1, FORWARD), (3, 1, FORWARD)] + assert x == x_ + + def test_digraph2(self): + G = nx.DiGraph() + nx.add_path(G, range(4)) + x = list(edge_dfs(G, [0])) + x_ = [(0, 1), (1, 2), (2, 3)] + assert x == x_ + + def test_digraph_rev(self): + G = nx.DiGraph(self.edges) + x = list(edge_dfs(G, self.nodes, orientation="reverse")) + x_ = [(1, 0, REVERSE), (0, 1, REVERSE), (2, 1, REVERSE), (3, 1, REVERSE)] + assert x == x_ + + def test_digraph_rev2(self): + G = nx.DiGraph() + nx.add_path(G, range(4)) + x = list(edge_dfs(G, [3], orientation="reverse")) + x_ = [(2, 3, REVERSE), (1, 2, REVERSE), (0, 1, REVERSE)] + assert x == x_ + + def test_multigraph(self): + G = nx.MultiGraph(self.edges) + x = list(edge_dfs(G, self.nodes)) + x_ = [(0, 1, 0), (1, 0, 1), (0, 1, 2), (1, 2, 0), (1, 3, 0)] + # This is an example of where hash randomization can break. + # There are 3! * 2 alternative outputs, such as: + # [(0, 1, 1), (1, 0, 0), (0, 1, 2), (1, 3, 0), (1, 2, 0)] + # But note, the edges (1,2,0) and (1,3,0) always follow the (0,1,k) + # edges. So the algorithm only guarantees a partial order. A total + # order is guaranteed only if the graph data structures are ordered. + assert x == x_ + + def test_multidigraph(self): + G = nx.MultiDiGraph(self.edges) + x = list(edge_dfs(G, self.nodes)) + x_ = [(0, 1, 0), (1, 0, 0), (1, 0, 1), (2, 1, 0), (3, 1, 0)] + assert x == x_ + + def test_multidigraph_rev(self): + G = nx.MultiDiGraph(self.edges) + x = list(edge_dfs(G, self.nodes, orientation="reverse")) + x_ = [ + (1, 0, 0, REVERSE), + (0, 1, 0, REVERSE), + (1, 0, 1, REVERSE), + (2, 1, 0, REVERSE), + (3, 1, 0, REVERSE), + ] + assert x == x_ + + def test_digraph_ignore(self): + G = nx.DiGraph(self.edges) + x = list(edge_dfs(G, self.nodes, orientation="ignore")) + x_ = [(0, 1, FORWARD), (1, 0, FORWARD), (2, 1, REVERSE), (3, 1, REVERSE)] + assert x == x_ + + def test_digraph_ignore2(self): + G = nx.DiGraph() + nx.add_path(G, range(4)) + x = list(edge_dfs(G, [0], orientation="ignore")) + x_ = [(0, 1, FORWARD), (1, 2, FORWARD), (2, 3, FORWARD)] + assert x == x_ + + def test_multidigraph_ignore(self): + G = nx.MultiDiGraph(self.edges) + x = list(edge_dfs(G, self.nodes, orientation="ignore")) + x_ = [ + (0, 1, 0, FORWARD), + (1, 0, 0, FORWARD), + (1, 0, 1, REVERSE), + (2, 1, 0, REVERSE), + (3, 1, 0, REVERSE), + ] + assert x == x_ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tree/__init__.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/tree/__init__.py new file mode 100644 index 0000000..7120d4b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/tree/__init__.py @@ -0,0 +1,6 @@ +from .branchings import * +from .coding import * +from .mst import * +from .recognition import * +from .operations import * +from .decomposition import * diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tree/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/tree/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..6957f27 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/tree/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tree/__pycache__/branchings.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/tree/__pycache__/branchings.cpython-312.pyc new file mode 100644 index 0000000..b4ed4f9 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/tree/__pycache__/branchings.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tree/__pycache__/coding.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/tree/__pycache__/coding.cpython-312.pyc new file mode 100644 index 0000000..c76ddc7 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/tree/__pycache__/coding.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tree/__pycache__/decomposition.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/tree/__pycache__/decomposition.cpython-312.pyc new file mode 100644 index 0000000..182778d Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/tree/__pycache__/decomposition.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tree/__pycache__/mst.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/tree/__pycache__/mst.cpython-312.pyc new file mode 100644 index 0000000..c78601c Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/tree/__pycache__/mst.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tree/__pycache__/operations.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/tree/__pycache__/operations.cpython-312.pyc new file mode 100644 index 0000000..2768e39 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/tree/__pycache__/operations.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tree/__pycache__/recognition.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/tree/__pycache__/recognition.cpython-312.pyc new file mode 100644 index 0000000..a552a46 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/tree/__pycache__/recognition.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tree/branchings.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/tree/branchings.py new file mode 100644 index 0000000..cc9c7cf --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/tree/branchings.py @@ -0,0 +1,1042 @@ +""" +Algorithms for finding optimum branchings and spanning arborescences. + +This implementation is based on: + + J. Edmonds, Optimum branchings, J. Res. Natl. Bur. Standards 71B (1967), + 233–240. URL: http://archive.org/details/jresv71Bn4p233 + +""" + +# TODO: Implement method from Gabow, Galil, Spence and Tarjan: +# +# @article{ +# year={1986}, +# issn={0209-9683}, +# journal={Combinatorica}, +# volume={6}, +# number={2}, +# doi={10.1007/BF02579168}, +# title={Efficient algorithms for finding minimum spanning trees in +# undirected and directed graphs}, +# url={https://doi.org/10.1007/BF02579168}, +# publisher={Springer-Verlag}, +# keywords={68 B 15; 68 C 05}, +# author={Gabow, Harold N. and Galil, Zvi and Spencer, Thomas and Tarjan, +# Robert E.}, +# pages={109-122}, +# language={English} +# } +import string +from dataclasses import dataclass, field +from operator import itemgetter +from queue import PriorityQueue + +import networkx as nx +from networkx.utils import py_random_state + +from .recognition import is_arborescence, is_branching + +__all__ = [ + "branching_weight", + "greedy_branching", + "maximum_branching", + "minimum_branching", + "minimal_branching", + "maximum_spanning_arborescence", + "minimum_spanning_arborescence", + "ArborescenceIterator", +] + +KINDS = {"max", "min"} + +STYLES = { + "branching": "branching", + "arborescence": "arborescence", + "spanning arborescence": "arborescence", +} + +INF = float("inf") + + +@py_random_state(1) +def random_string(L=15, seed=None): + return "".join([seed.choice(string.ascii_letters) for n in range(L)]) + + +def _min_weight(weight): + return -weight + + +def _max_weight(weight): + return weight + + +@nx._dispatchable(edge_attrs={"attr": "default"}) +def branching_weight(G, attr="weight", default=1): + """ + Returns the total weight of a branching. + + You must access this function through the networkx.algorithms.tree module. + + Parameters + ---------- + G : DiGraph + The directed graph. + attr : str + The attribute to use as weights. If None, then each edge will be + treated equally with a weight of 1. + default : float + When `attr` is not None, then if an edge does not have that attribute, + `default` specifies what value it should take. + + Returns + ------- + weight: int or float + The total weight of the branching. + + Examples + -------- + >>> G = nx.DiGraph() + >>> G.add_weighted_edges_from([(0, 1, 2), (1, 2, 4), (2, 3, 3), (3, 4, 2)]) + >>> nx.tree.branching_weight(G) + 11 + + """ + return sum(edge[2].get(attr, default) for edge in G.edges(data=True)) + + +@py_random_state(4) +@nx._dispatchable(edge_attrs={"attr": "default"}, returns_graph=True) +def greedy_branching(G, attr="weight", default=1, kind="max", seed=None): + """ + Returns a branching obtained through a greedy algorithm. + + This algorithm is wrong, and cannot give a proper optimal branching. + However, we include it for pedagogical reasons, as it can be helpful to + see what its outputs are. + + The output is a branching, and possibly, a spanning arborescence. However, + it is not guaranteed to be optimal in either case. + + Parameters + ---------- + G : DiGraph + The directed graph to scan. + attr : str + The attribute to use as weights. If None, then each edge will be + treated equally with a weight of 1. + default : float + When `attr` is not None, then if an edge does not have that attribute, + `default` specifies what value it should take. + kind : str + The type of optimum to search for: 'min' or 'max' greedy branching. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + B : directed graph + The greedily obtained branching. + + """ + if kind not in KINDS: + raise nx.NetworkXException("Unknown value for `kind`.") + + if kind == "min": + reverse = False + else: + reverse = True + + if attr is None: + # Generate a random string the graph probably won't have. + attr = random_string(seed=seed) + + edges = [(u, v, data.get(attr, default)) for (u, v, data) in G.edges(data=True)] + + # We sort by weight, but also by nodes to normalize behavior across runs. + try: + edges.sort(key=itemgetter(2, 0, 1), reverse=reverse) + except TypeError: + # This will fail in Python 3.x if the nodes are of varying types. + # In that case, we use the arbitrary order. + edges.sort(key=itemgetter(2), reverse=reverse) + + # The branching begins with a forest of no edges. + B = nx.DiGraph() + B.add_nodes_from(G) + + # Now we add edges greedily so long we maintain the branching. + uf = nx.utils.UnionFind() + for i, (u, v, w) in enumerate(edges): + if uf[u] == uf[v]: + # Adding this edge would form a directed cycle. + continue + elif B.in_degree(v) == 1: + # The edge would increase the degree to be greater than one. + continue + else: + # If attr was None, then don't insert weights... + data = {} + if attr is not None: + data[attr] = w + B.add_edge(u, v, **data) + uf.union(u, v) + + return B + + +@nx._dispatchable(preserve_edge_attrs=True, returns_graph=True) +def maximum_branching( + G, + attr="weight", + default=1, + preserve_attrs=False, + partition=None, +): + ####################################### + ### Data Structure Helper Functions ### + ####################################### + + def edmonds_add_edge(G, edge_index, u, v, key, **d): + """ + Adds an edge to `G` while also updating the edge index. + + This algorithm requires the use of an external dictionary to track + the edge keys since it is possible that the source or destination + node of an edge will be changed and the default key-handling + capabilities of the MultiDiGraph class do not account for this. + + Parameters + ---------- + G : MultiDiGraph + The graph to insert an edge into. + edge_index : dict + A mapping from integers to the edges of the graph. + u : node + The source node of the new edge. + v : node + The destination node of the new edge. + key : int + The key to use from `edge_index`. + d : keyword arguments, optional + Other attributes to store on the new edge. + """ + + if key in edge_index: + uu, vv, _ = edge_index[key] + if (u != uu) or (v != vv): + raise Exception(f"Key {key!r} is already in use.") + + G.add_edge(u, v, key, **d) + edge_index[key] = (u, v, G.succ[u][v][key]) + + def edmonds_remove_node(G, edge_index, n): + """ + Remove a node from the graph, updating the edge index to match. + + Parameters + ---------- + G : MultiDiGraph + The graph to remove an edge from. + edge_index : dict + A mapping from integers to the edges of the graph. + n : node + The node to remove from `G`. + """ + keys = set() + for keydict in G.pred[n].values(): + keys.update(keydict) + for keydict in G.succ[n].values(): + keys.update(keydict) + + for key in keys: + del edge_index[key] + + G.remove_node(n) + + ####################### + ### Algorithm Setup ### + ####################### + + # Pick an attribute name that the original graph is unlikly to have + candidate_attr = "edmonds' secret candidate attribute" + new_node_base_name = "edmonds new node base name " + + G_original = G + G = nx.MultiDiGraph() + G.__networkx_cache__ = None # Disable caching + + # A dict to reliably track mutations to the edges using the key of the edge. + G_edge_index = {} + # Each edge is given an arbitrary numerical key + for key, (u, v, data) in enumerate(G_original.edges(data=True)): + d = {attr: data.get(attr, default)} + + if data.get(partition) is not None: + d[partition] = data.get(partition) + + if preserve_attrs: + for d_k, d_v in data.items(): + if d_k != attr: + d[d_k] = d_v + + edmonds_add_edge(G, G_edge_index, u, v, key, **d) + + level = 0 # Stores the number of contracted nodes + + # These are the buckets from the paper. + # + # In the paper, G^i are modified versions of the original graph. + # D^i and E^i are the nodes and edges of the maximal edges that are + # consistent with G^i. In this implementation, D^i and E^i are stored + # together as the graph B^i. We will have strictly more B^i then the + # paper will have. + # + # Note that the data in graphs and branchings are tuples with the graph as + # the first element and the edge index as the second. + B = nx.MultiDiGraph() + B_edge_index = {} + graphs = [] # G^i list + branchings = [] # B^i list + selected_nodes = set() # D^i bucket + uf = nx.utils.UnionFind() + + # A list of lists of edge indices. Each list is a circuit for graph G^i. + # Note the edge list is not required to be a circuit in G^0. + circuits = [] + + # Stores the index of the minimum edge in the circuit found in G^i and B^i. + # The ordering of the edges seems to preserver the weight ordering from + # G^0. So even if the circuit does not form a circuit in G^0, it is still + # true that the minimum edges in circuit G^0 (despite their weights being + # different) + minedge_circuit = [] + + ########################### + ### Algorithm Structure ### + ########################### + + # Each step listed in the algorithm is an inner function. Thus, the overall + # loop structure is: + # + # while True: + # step_I1() + # if cycle detected: + # step_I2() + # elif every node of G is in D and E is a branching: + # break + + ################################## + ### Algorithm Helper Functions ### + ################################## + + def edmonds_find_desired_edge(v): + """ + Find the edge directed towards v with maximal weight. + + If an edge partition exists in this graph, return the included + edge if it exists and never return any excluded edge. + + Note: There can only be one included edge for each vertex otherwise + the edge partition is empty. + + Parameters + ---------- + v : node + The node to search for the maximal weight incoming edge. + """ + edge = None + max_weight = -INF + for u, _, key, data in G.in_edges(v, data=True, keys=True): + # Skip excluded edges + if data.get(partition) == nx.EdgePartition.EXCLUDED: + continue + + new_weight = data[attr] + + # Return the included edge + if data.get(partition) == nx.EdgePartition.INCLUDED: + max_weight = new_weight + edge = (u, v, key, new_weight, data) + break + + # Find the best open edge + if new_weight > max_weight: + max_weight = new_weight + edge = (u, v, key, new_weight, data) + + return edge, max_weight + + def edmonds_step_I2(v, desired_edge, level): + """ + Perform step I2 from Edmonds' paper + + First, check if the last step I1 created a cycle. If it did not, do nothing. + If it did, store the cycle for later reference and contract it. + + Parameters + ---------- + v : node + The current node to consider + desired_edge : edge + The minimum desired edge to remove from the cycle. + level : int + The current level, i.e. the number of cycles that have already been removed. + """ + u = desired_edge[0] + + Q_nodes = nx.shortest_path(B, v, u) + Q_edges = [ + list(B[Q_nodes[i]][vv].keys())[0] for i, vv in enumerate(Q_nodes[1:]) + ] + Q_edges.append(desired_edge[2]) # Add the new edge key to complete the circuit + + # Get the edge in the circuit with the minimum weight. + # Also, save the incoming weights for each node. + minweight = INF + minedge = None + Q_incoming_weight = {} + for edge_key in Q_edges: + u, v, data = B_edge_index[edge_key] + w = data[attr] + # We cannot remove an included edge, even if it is the + # minimum edge in the circuit + Q_incoming_weight[v] = w + if data.get(partition) == nx.EdgePartition.INCLUDED: + continue + if w < minweight: + minweight = w + minedge = edge_key + + circuits.append(Q_edges) + minedge_circuit.append(minedge) + graphs.append((G.copy(), G_edge_index.copy())) + branchings.append((B.copy(), B_edge_index.copy())) + + # Mutate the graph to contract the circuit + new_node = new_node_base_name + str(level) + G.add_node(new_node) + new_edges = [] + for u, v, key, data in G.edges(data=True, keys=True): + if u in Q_incoming_weight: + if v in Q_incoming_weight: + # Circuit edge. For the moment do nothing, + # eventually it will be removed. + continue + else: + # Outgoing edge from a node in the circuit. + # Make it come from the new node instead + dd = data.copy() + new_edges.append((new_node, v, key, dd)) + else: + if v in Q_incoming_weight: + # Incoming edge to the circuit. + # Update it's weight + w = data[attr] + w += minweight - Q_incoming_weight[v] + dd = data.copy() + dd[attr] = w + new_edges.append((u, new_node, key, dd)) + else: + # Outside edge. No modification needed + continue + + for node in Q_nodes: + edmonds_remove_node(G, G_edge_index, node) + edmonds_remove_node(B, B_edge_index, node) + + selected_nodes.difference_update(set(Q_nodes)) + + for u, v, key, data in new_edges: + edmonds_add_edge(G, G_edge_index, u, v, key, **data) + if candidate_attr in data: + del data[candidate_attr] + edmonds_add_edge(B, B_edge_index, u, v, key, **data) + uf.union(u, v) + + def is_root(G, u, edgekeys): + """ + Returns True if `u` is a root node in G. + + Node `u` is a root node if its in-degree over the specified edges is zero. + + Parameters + ---------- + G : Graph + The current graph. + u : node + The node in `G` to check if it is a root. + edgekeys : iterable of edges + The edges for which to check if `u` is a root of. + """ + if u not in G: + raise Exception(f"{u!r} not in G") + + for v in G.pred[u]: + for edgekey in G.pred[u][v]: + if edgekey in edgekeys: + return False, edgekey + else: + return True, None + + nodes = iter(list(G.nodes)) + while True: + try: + v = next(nodes) + except StopIteration: + # If there are no more new nodes to consider, then we should + # meet stopping condition (b) from the paper: + # (b) every node of G^i is in D^i and E^i is a branching + assert len(G) == len(B) + if len(B): + assert is_branching(B) + + graphs.append((G.copy(), G_edge_index.copy())) + branchings.append((B.copy(), B_edge_index.copy())) + circuits.append([]) + minedge_circuit.append(None) + + break + else: + ##################### + ### BEGIN STEP I1 ### + ##################### + + # This is a very simple step, so I don't think it needs a method of it's own + if v in selected_nodes: + continue + + selected_nodes.add(v) + B.add_node(v) + desired_edge, desired_edge_weight = edmonds_find_desired_edge(v) + + # There might be no desired edge if all edges are excluded or + # v is the last node to be added to B, the ultimate root of the branching + if desired_edge is not None and desired_edge_weight > 0: + u = desired_edge[0] + # Flag adding the edge will create a circuit before merging the two + # connected components of u and v in B + circuit = uf[u] == uf[v] + dd = {attr: desired_edge_weight} + if desired_edge[4].get(partition) is not None: + dd[partition] = desired_edge[4].get(partition) + + edmonds_add_edge(B, B_edge_index, u, v, desired_edge[2], **dd) + G[u][v][desired_edge[2]][candidate_attr] = True + uf.union(u, v) + + ################### + ### END STEP I1 ### + ################### + + ##################### + ### BEGIN STEP I2 ### + ##################### + + if circuit: + edmonds_step_I2(v, desired_edge, level) + nodes = iter(list(G.nodes())) + level += 1 + + ################### + ### END STEP I2 ### + ################### + + ##################### + ### BEGIN STEP I3 ### + ##################### + + # Create a new graph of the same class as the input graph + H = G_original.__class__() + + # Start with the branching edges in the last level. + edges = set(branchings[level][1]) + while level > 0: + level -= 1 + + # The current level is i, and we start counting from 0. + # + # We need the node at level i+1 that results from merging a circuit + # at level i. basename_0 is the first merged node and this happens + # at level 1. That is basename_0 is a node at level 1 that results + # from merging a circuit at level 0. + + merged_node = new_node_base_name + str(level) + circuit = circuits[level] + isroot, edgekey = is_root(graphs[level + 1][0], merged_node, edges) + edges.update(circuit) + + if isroot: + minedge = minedge_circuit[level] + if minedge is None: + raise Exception + + # Remove the edge in the cycle with minimum weight + edges.remove(minedge) + else: + # We have identified an edge at the next higher level that + # transitions into the merged node at this level. That edge + # transitions to some corresponding node at the current level. + # + # We want to remove an edge from the cycle that transitions + # into the corresponding node, otherwise the result would not + # be a branching. + + G, G_edge_index = graphs[level] + target = G_edge_index[edgekey][1] + for edgekey in circuit: + u, v, data = G_edge_index[edgekey] + if v == target: + break + else: + raise Exception("Couldn't find edge incoming to merged node.") + + edges.remove(edgekey) + + H.add_nodes_from(G_original) + for edgekey in edges: + u, v, d = graphs[0][1][edgekey] + dd = {attr: d[attr]} + + if preserve_attrs: + for key, value in d.items(): + if key not in [attr, candidate_attr]: + dd[key] = value + + H.add_edge(u, v, **dd) + + ################### + ### END STEP I3 ### + ################### + + return H + + +@nx._dispatchable(preserve_edge_attrs=True, mutates_input=True, returns_graph=True) +def minimum_branching( + G, attr="weight", default=1, preserve_attrs=False, partition=None +): + for _, _, d in G.edges(data=True): + d[attr] = -d.get(attr, default) + nx._clear_cache(G) + + B = maximum_branching(G, attr, default, preserve_attrs, partition) + + for _, _, d in G.edges(data=True): + d[attr] = -d.get(attr, default) + nx._clear_cache(G) + + for _, _, d in B.edges(data=True): + d[attr] = -d.get(attr, default) + nx._clear_cache(B) + + return B + + +@nx._dispatchable(preserve_edge_attrs=True, mutates_input=True, returns_graph=True) +def minimal_branching( + G, /, *, attr="weight", default=1, preserve_attrs=False, partition=None +): + """ + Returns a minimal branching from `G`. + + A minimal branching is a branching similar to a minimal arborescence but + without the requirement that the result is actually a spanning arborescence. + This allows minimal branchinges to be computed over graphs which may not + have arborescence (such as multiple components). + + Parameters + ---------- + G : (multi)digraph-like + The graph to be searched. + attr : str + The edge attribute used in determining optimality. + default : float + The value of the edge attribute used if an edge does not have + the attribute `attr`. + preserve_attrs : bool + If True, preserve the other attributes of the original graph (that are not + passed to `attr`) + partition : str + The key for the edge attribute containing the partition + data on the graph. Edges can be included, excluded or open using the + `EdgePartition` enum. + + Returns + ------- + B : (multi)digraph-like + A minimal branching. + """ + max_weight = -INF + min_weight = INF + for _, _, w in G.edges(data=attr, default=default): + if w > max_weight: + max_weight = w + if w < min_weight: + min_weight = w + + for _, _, d in G.edges(data=True): + # Transform the weights so that the minimum weight is larger than + # the difference between the max and min weights. This is important + # in order to prevent the edge weights from becoming negative during + # computation + d[attr] = max_weight + 1 + (max_weight - min_weight) - d.get(attr, default) + nx._clear_cache(G) + + B = maximum_branching(G, attr, default, preserve_attrs, partition) + + # Reverse the weight transformations + for _, _, d in G.edges(data=True): + d[attr] = max_weight + 1 + (max_weight - min_weight) - d.get(attr, default) + nx._clear_cache(G) + + for _, _, d in B.edges(data=True): + d[attr] = max_weight + 1 + (max_weight - min_weight) - d.get(attr, default) + nx._clear_cache(B) + + return B + + +@nx._dispatchable(preserve_edge_attrs=True, mutates_input=True, returns_graph=True) +def maximum_spanning_arborescence( + G, attr="weight", default=1, preserve_attrs=False, partition=None +): + # In order to use the same algorithm is the maximum branching, we need to adjust + # the weights of the graph. The branching algorithm can choose to not include an + # edge if it doesn't help find a branching, mainly triggered by edges with negative + # weights. + # + # To prevent this from happening while trying to find a spanning arborescence, we + # just have to tweak the edge weights so that they are all positive and cannot + # become negative during the branching algorithm, find the maximum branching and + # then return them to their original values. + + min_weight = INF + max_weight = -INF + for _, _, w in G.edges(data=attr, default=default): + if w < min_weight: + min_weight = w + if w > max_weight: + max_weight = w + + for _, _, d in G.edges(data=True): + d[attr] = d.get(attr, default) - min_weight + 1 - (min_weight - max_weight) + nx._clear_cache(G) + + B = maximum_branching(G, attr, default, preserve_attrs, partition) + + for _, _, d in G.edges(data=True): + d[attr] = d.get(attr, default) + min_weight - 1 + (min_weight - max_weight) + nx._clear_cache(G) + + for _, _, d in B.edges(data=True): + d[attr] = d.get(attr, default) + min_weight - 1 + (min_weight - max_weight) + nx._clear_cache(B) + + if not is_arborescence(B): + raise nx.exception.NetworkXException("No maximum spanning arborescence in G.") + + return B + + +@nx._dispatchable(preserve_edge_attrs=True, mutates_input=True, returns_graph=True) +def minimum_spanning_arborescence( + G, attr="weight", default=1, preserve_attrs=False, partition=None +): + B = minimal_branching( + G, + attr=attr, + default=default, + preserve_attrs=preserve_attrs, + partition=partition, + ) + + if not is_arborescence(B): + raise nx.exception.NetworkXException("No minimum spanning arborescence in G.") + + return B + + +docstring_branching = """ +Returns a {kind} {style} from G. + +Parameters +---------- +G : (multi)digraph-like + The graph to be searched. +attr : str + The edge attribute used to in determining optimality. +default : float + The value of the edge attribute used if an edge does not have + the attribute `attr`. +preserve_attrs : bool + If True, preserve the other attributes of the original graph (that are not + passed to `attr`) +partition : str + The key for the edge attribute containing the partition + data on the graph. Edges can be included, excluded or open using the + `EdgePartition` enum. + +Returns +------- +B : (multi)digraph-like + A {kind} {style}. +""" + +docstring_arborescence = ( + docstring_branching + + """ +Raises +------ +NetworkXException + If the graph does not contain a {kind} {style}. + +""" +) + +maximum_branching.__doc__ = docstring_branching.format( + kind="maximum", style="branching" +) + +minimum_branching.__doc__ = ( + docstring_branching.format(kind="minimum", style="branching") + + """ +See Also +-------- + minimal_branching +""" +) + +maximum_spanning_arborescence.__doc__ = docstring_arborescence.format( + kind="maximum", style="spanning arborescence" +) + +minimum_spanning_arborescence.__doc__ = docstring_arborescence.format( + kind="minimum", style="spanning arborescence" +) + + +class ArborescenceIterator: + """ + Iterate over all spanning arborescences of a graph in either increasing or + decreasing cost. + + Notes + ----- + This iterator uses the partition scheme from [1]_ (included edges, + excluded edges and open edges). It generates minimum spanning + arborescences using a modified Edmonds' Algorithm which respects the + partition of edges. For arborescences with the same weight, ties are + broken arbitrarily. + + References + ---------- + .. [1] G.K. Janssens, K. Sörensen, An algorithm to generate all spanning + trees in order of increasing cost, Pesquisa Operacional, 2005-08, + Vol. 25 (2), p. 219-229, + https://www.scielo.br/j/pope/a/XHswBwRwJyrfL88dmMwYNWp/?lang=en + """ + + @dataclass(order=True) + class Partition: + """ + This dataclass represents a partition and stores a dict with the edge + data and the weight of the minimum spanning arborescence of the + partition dict. + """ + + mst_weight: float + partition_dict: dict = field(compare=False) + + def __copy__(self): + return ArborescenceIterator.Partition( + self.mst_weight, self.partition_dict.copy() + ) + + def __init__(self, G, weight="weight", minimum=True, init_partition=None): + """ + Initialize the iterator + + Parameters + ---------- + G : nx.DiGraph + The directed graph which we need to iterate trees over + + weight : String, default = "weight" + The edge attribute used to store the weight of the edge + + minimum : bool, default = True + Return the trees in increasing order while true and decreasing order + while false. + + init_partition : tuple, default = None + In the case that certain edges have to be included or excluded from + the arborescences, `init_partition` should be in the form + `(included_edges, excluded_edges)` where each edges is a + `(u, v)`-tuple inside an iterable such as a list or set. + + """ + self.G = G.copy() + self.weight = weight + self.minimum = minimum + self.method = ( + minimum_spanning_arborescence if minimum else maximum_spanning_arborescence + ) + # Randomly create a key for an edge attribute to hold the partition data + self.partition_key = ( + "ArborescenceIterators super secret partition attribute name" + ) + if init_partition is not None: + partition_dict = {} + for e in init_partition[0]: + partition_dict[e] = nx.EdgePartition.INCLUDED + for e in init_partition[1]: + partition_dict[e] = nx.EdgePartition.EXCLUDED + self.init_partition = ArborescenceIterator.Partition(0, partition_dict) + else: + self.init_partition = None + + def __iter__(self): + """ + Returns + ------- + ArborescenceIterator + The iterator object for this graph + """ + self.partition_queue = PriorityQueue() + self._clear_partition(self.G) + + # Write the initial partition if it exists. + if self.init_partition is not None: + self._write_partition(self.init_partition) + + mst_weight = self.method( + self.G, + self.weight, + partition=self.partition_key, + preserve_attrs=True, + ).size(weight=self.weight) + + self.partition_queue.put( + self.Partition( + mst_weight if self.minimum else -mst_weight, + ( + {} + if self.init_partition is None + else self.init_partition.partition_dict + ), + ) + ) + + return self + + def __next__(self): + """ + Returns + ------- + (multi)Graph + The spanning tree of next greatest weight, which ties broken + arbitrarily. + """ + if self.partition_queue.empty(): + del self.G, self.partition_queue + raise StopIteration + + partition = self.partition_queue.get() + self._write_partition(partition) + next_arborescence = self.method( + self.G, + self.weight, + partition=self.partition_key, + preserve_attrs=True, + ) + self._partition(partition, next_arborescence) + + self._clear_partition(next_arborescence) + return next_arborescence + + def _partition(self, partition, partition_arborescence): + """ + Create new partitions based of the minimum spanning tree of the + current minimum partition. + + Parameters + ---------- + partition : Partition + The Partition instance used to generate the current minimum spanning + tree. + partition_arborescence : nx.Graph + The minimum spanning arborescence of the input partition. + """ + # create two new partitions with the data from the input partition dict + p1 = self.Partition(0, partition.partition_dict.copy()) + p2 = self.Partition(0, partition.partition_dict.copy()) + for e in partition_arborescence.edges: + # determine if the edge was open or included + if e not in partition.partition_dict: + # This is an open edge + p1.partition_dict[e] = nx.EdgePartition.EXCLUDED + p2.partition_dict[e] = nx.EdgePartition.INCLUDED + + self._write_partition(p1) + try: + p1_mst = self.method( + self.G, + self.weight, + partition=self.partition_key, + preserve_attrs=True, + ) + + p1_mst_weight = p1_mst.size(weight=self.weight) + p1.mst_weight = p1_mst_weight if self.minimum else -p1_mst_weight + self.partition_queue.put(p1.__copy__()) + except nx.NetworkXException: + pass + + p1.partition_dict = p2.partition_dict.copy() + + def _write_partition(self, partition): + """ + Writes the desired partition into the graph to calculate the minimum + spanning tree. Also, if one incoming edge is included, mark all others + as excluded so that if that vertex is merged during Edmonds' algorithm + we cannot still pick another of that vertex's included edges. + + Parameters + ---------- + partition : Partition + A Partition dataclass describing a partition on the edges of the + graph. + """ + for u, v, d in self.G.edges(data=True): + if (u, v) in partition.partition_dict: + d[self.partition_key] = partition.partition_dict[(u, v)] + else: + d[self.partition_key] = nx.EdgePartition.OPEN + nx._clear_cache(self.G) + + for n in self.G: + included_count = 0 + excluded_count = 0 + for u, v, d in self.G.in_edges(nbunch=n, data=True): + if d.get(self.partition_key) == nx.EdgePartition.INCLUDED: + included_count += 1 + elif d.get(self.partition_key) == nx.EdgePartition.EXCLUDED: + excluded_count += 1 + # Check that if there is an included edges, all other incoming ones + # are excluded. If not fix it! + if included_count == 1 and excluded_count != self.G.in_degree(n) - 1: + for u, v, d in self.G.in_edges(nbunch=n, data=True): + if d.get(self.partition_key) != nx.EdgePartition.INCLUDED: + d[self.partition_key] = nx.EdgePartition.EXCLUDED + + def _clear_partition(self, G): + """ + Removes partition data from the graph + """ + for u, v, d in G.edges(data=True): + if self.partition_key in d: + del d[self.partition_key] + nx._clear_cache(self.G) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tree/coding.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/tree/coding.py new file mode 100644 index 0000000..276c87d --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/tree/coding.py @@ -0,0 +1,413 @@ +"""Functions for encoding and decoding trees. + +Since a tree is a highly restricted form of graph, it can be represented +concisely in several ways. This module includes functions for encoding +and decoding trees in the form of nested tuples and Prüfer +sequences. The former requires a rooted tree, whereas the latter can be +applied to unrooted trees. Furthermore, there is a bijection from Prüfer +sequences to labeled trees. + +""" + +from collections import Counter +from itertools import chain + +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = [ + "from_nested_tuple", + "from_prufer_sequence", + "NotATree", + "to_nested_tuple", + "to_prufer_sequence", +] + + +class NotATree(nx.NetworkXException): + """Raised when a function expects a tree (that is, a connected + undirected graph with no cycles) but gets a non-tree graph as input + instead. + + """ + + +@not_implemented_for("directed") +@nx._dispatchable(graphs="T") +def to_nested_tuple(T, root, canonical_form=False): + """Returns a nested tuple representation of the given tree. + + The nested tuple representation of a tree is defined + recursively. The tree with one node and no edges is represented by + the empty tuple, ``()``. A tree with ``k`` subtrees is represented + by a tuple of length ``k`` in which each element is the nested tuple + representation of a subtree. + + Parameters + ---------- + T : NetworkX graph + An undirected graph object representing a tree. + + root : node + The node in ``T`` to interpret as the root of the tree. + + canonical_form : bool + If ``True``, each tuple is sorted so that the function returns + a canonical form for rooted trees. This means "lighter" subtrees + will appear as nested tuples before "heavier" subtrees. In this + way, each isomorphic rooted tree has the same nested tuple + representation. + + Returns + ------- + tuple + A nested tuple representation of the tree. + + Notes + ----- + This function is *not* the inverse of :func:`from_nested_tuple`; the + only guarantee is that the rooted trees are isomorphic. + + See also + -------- + from_nested_tuple + to_prufer_sequence + + Examples + -------- + The tree need not be a balanced binary tree:: + + >>> T = nx.Graph() + >>> T.add_edges_from([(0, 1), (0, 2), (0, 3)]) + >>> T.add_edges_from([(1, 4), (1, 5)]) + >>> T.add_edges_from([(3, 6), (3, 7)]) + >>> root = 0 + >>> nx.to_nested_tuple(T, root) + (((), ()), (), ((), ())) + + Continuing the above example, if ``canonical_form`` is ``True``, the + nested tuples will be sorted:: + + >>> nx.to_nested_tuple(T, root, canonical_form=True) + ((), ((), ()), ((), ())) + + Even the path graph can be interpreted as a tree:: + + >>> T = nx.path_graph(4) + >>> root = 0 + >>> nx.to_nested_tuple(T, root) + ((((),),),) + + """ + + def _make_tuple(T, root, _parent): + """Recursively compute the nested tuple representation of the + given rooted tree. + + ``_parent`` is the parent node of ``root`` in the supertree in + which ``T`` is a subtree, or ``None`` if ``root`` is the root of + the supertree. This argument is used to determine which + neighbors of ``root`` are children and which is the parent. + + """ + # Get the neighbors of `root` that are not the parent node. We + # are guaranteed that `root` is always in `T` by construction. + children = set(T[root]) - {_parent} + if len(children) == 0: + return () + nested = (_make_tuple(T, v, root) for v in children) + if canonical_form: + nested = sorted(nested) + return tuple(nested) + + # Do some sanity checks on the input. + if not nx.is_tree(T): + raise nx.NotATree("provided graph is not a tree") + if root not in T: + raise nx.NodeNotFound(f"Graph {T} contains no node {root}") + + return _make_tuple(T, root, None) + + +@nx._dispatchable(graphs=None, returns_graph=True) +def from_nested_tuple(sequence, sensible_relabeling=False): + """Returns the rooted tree corresponding to the given nested tuple. + + The nested tuple representation of a tree is defined + recursively. The tree with one node and no edges is represented by + the empty tuple, ``()``. A tree with ``k`` subtrees is represented + by a tuple of length ``k`` in which each element is the nested tuple + representation of a subtree. + + Parameters + ---------- + sequence : tuple + A nested tuple representing a rooted tree. + + sensible_relabeling : bool + Whether to relabel the nodes of the tree so that nodes are + labeled in increasing order according to their breadth-first + search order from the root node. + + Returns + ------- + NetworkX graph + The tree corresponding to the given nested tuple, whose root + node is node 0. If ``sensible_labeling`` is ``True``, nodes will + be labeled in breadth-first search order starting from the root + node. + + Notes + ----- + This function is *not* the inverse of :func:`to_nested_tuple`; the + only guarantee is that the rooted trees are isomorphic. + + See also + -------- + to_nested_tuple + from_prufer_sequence + + Examples + -------- + Sensible relabeling ensures that the nodes are labeled from the root + starting at 0:: + + >>> balanced = (((), ()), ((), ())) + >>> T = nx.from_nested_tuple(balanced, sensible_relabeling=True) + >>> edges = [(0, 1), (0, 2), (1, 3), (1, 4), (2, 5), (2, 6)] + >>> all((u, v) in T.edges() or (v, u) in T.edges() for (u, v) in edges) + True + + """ + + def _make_tree(sequence): + """Recursively creates a tree from the given sequence of nested + tuples. + + This function employs the :func:`~networkx.tree.join` function + to recursively join subtrees into a larger tree. + + """ + # The empty sequence represents the empty tree, which is the + # (unique) graph with a single node. We mark the single node + # with an attribute that indicates that it is the root of the + # graph. + if len(sequence) == 0: + return nx.empty_graph(1) + # For a nonempty sequence, get the subtrees for each child + # sequence and join all the subtrees at their roots. After + # joining the subtrees, the root is node 0. + return nx.tree.join_trees([(_make_tree(child), 0) for child in sequence]) + + # Make the tree and remove the `is_root` node attribute added by the + # helper function. + T = _make_tree(sequence) + if sensible_relabeling: + # Relabel the nodes according to their breadth-first search + # order, starting from the root node (that is, the node 0). + bfs_nodes = chain([0], (v for u, v in nx.bfs_edges(T, 0))) + labels = {v: i for i, v in enumerate(bfs_nodes)} + # We would like to use `copy=False`, but `relabel_nodes` doesn't + # allow a relabel mapping that can't be topologically sorted. + T = nx.relabel_nodes(T, labels) + return T + + +@not_implemented_for("directed") +@nx._dispatchable(graphs="T") +def to_prufer_sequence(T): + r"""Returns the Prüfer sequence of the given tree. + + A *Prüfer sequence* is a list of *n* - 2 numbers between 0 and + *n* - 1, inclusive. The tree corresponding to a given Prüfer + sequence can be recovered by repeatedly joining a node in the + sequence with a node with the smallest potential degree according to + the sequence. + + Parameters + ---------- + T : NetworkX graph + An undirected graph object representing a tree. + + Returns + ------- + list + The Prüfer sequence of the given tree. + + Raises + ------ + NetworkXPointlessConcept + If the number of nodes in `T` is less than two. + + NotATree + If `T` is not a tree. + + KeyError + If the set of nodes in `T` is not {0, …, *n* - 1}. + + Notes + ----- + There is a bijection from labeled trees to Prüfer sequences. This + function is the inverse of the :func:`from_prufer_sequence` + function. + + Sometimes Prüfer sequences use nodes labeled from 1 to *n* instead + of from 0 to *n* - 1. This function requires nodes to be labeled in + the latter form. You can use :func:`~networkx.relabel_nodes` to + relabel the nodes of your tree to the appropriate format. + + This implementation is from [1]_ and has a running time of + $O(n)$. + + See also + -------- + to_nested_tuple + from_prufer_sequence + + References + ---------- + .. [1] Wang, Xiaodong, Lei Wang, and Yingjie Wu. + "An optimal algorithm for Prufer codes." + *Journal of Software Engineering and Applications* 2.02 (2009): 111. + + + Examples + -------- + There is a bijection between Prüfer sequences and labeled trees, so + this function is the inverse of the :func:`from_prufer_sequence` + function: + + >>> edges = [(0, 3), (1, 3), (2, 3), (3, 4), (4, 5)] + >>> tree = nx.Graph(edges) + >>> sequence = nx.to_prufer_sequence(tree) + >>> sequence + [3, 3, 3, 4] + >>> tree2 = nx.from_prufer_sequence(sequence) + >>> list(tree2.edges()) == edges + True + + """ + # Perform some sanity checks on the input. + n = len(T) + if n < 2: + msg = "Prüfer sequence undefined for trees with fewer than two nodes" + raise nx.NetworkXPointlessConcept(msg) + if not nx.is_tree(T): + raise nx.NotATree("provided graph is not a tree") + if set(T) != set(range(n)): + raise KeyError("tree must have node labels {0, ..., n - 1}") + + degree = dict(T.degree()) + + def parents(u): + return next(v for v in T[u] if degree[v] > 1) + + index = u = next(k for k in range(n) if degree[k] == 1) + result = [] + for i in range(n - 2): + v = parents(u) + result.append(v) + degree[v] -= 1 + if v < index and degree[v] == 1: + u = v + else: + index = u = next(k for k in range(index + 1, n) if degree[k] == 1) + return result + + +@nx._dispatchable(graphs=None, returns_graph=True) +def from_prufer_sequence(sequence): + r"""Returns the tree corresponding to the given Prüfer sequence. + + A *Prüfer sequence* is a list of *n* - 2 numbers between 0 and + *n* - 1, inclusive. The tree corresponding to a given Prüfer + sequence can be recovered by repeatedly joining a node in the + sequence with a node with the smallest potential degree according to + the sequence. + + Parameters + ---------- + sequence : list + A Prüfer sequence, which is a list of *n* - 2 integers between + zero and *n* - 1, inclusive. + + Returns + ------- + NetworkX graph + The tree corresponding to the given Prüfer sequence. + + Raises + ------ + NetworkXError + If the Prüfer sequence is not valid. + + Notes + ----- + There is a bijection from labeled trees to Prüfer sequences. This + function is the inverse of the :func:`from_prufer_sequence` function. + + Sometimes Prüfer sequences use nodes labeled from 1 to *n* instead + of from 0 to *n* - 1. This function requires nodes to be labeled in + the latter form. You can use :func:`networkx.relabel_nodes` to + relabel the nodes of your tree to the appropriate format. + + This implementation is from [1]_ and has a running time of + $O(n)$. + + References + ---------- + .. [1] Wang, Xiaodong, Lei Wang, and Yingjie Wu. + "An optimal algorithm for Prufer codes." + *Journal of Software Engineering and Applications* 2.02 (2009): 111. + + + See also + -------- + from_nested_tuple + to_prufer_sequence + + Examples + -------- + There is a bijection between Prüfer sequences and labeled trees, so + this function is the inverse of the :func:`to_prufer_sequence` + function: + + >>> edges = [(0, 3), (1, 3), (2, 3), (3, 4), (4, 5)] + >>> tree = nx.Graph(edges) + >>> sequence = nx.to_prufer_sequence(tree) + >>> sequence + [3, 3, 3, 4] + >>> tree2 = nx.from_prufer_sequence(sequence) + >>> list(tree2.edges()) == edges + True + + """ + n = len(sequence) + 2 + # `degree` stores the remaining degree (plus one) for each node. The + # degree of a node in the decoded tree is one more than the number + # of times it appears in the code. + degree = Counter(chain(sequence, range(n))) + T = nx.empty_graph(n) + # `not_orphaned` is the set of nodes that have a parent in the + # tree. After the loop, there should be exactly two nodes that are + # not in this set. + not_orphaned = set() + index = u = next(k for k in range(n) if degree[k] == 1) + for v in sequence: + # check the validity of the prufer sequence + if v < 0 or v > n - 1: + raise nx.NetworkXError( + f"Invalid Prufer sequence: Values must be between 0 and {n - 1}, got {v}" + ) + T.add_edge(u, v) + not_orphaned.add(u) + degree[v] -= 1 + if v < index and degree[v] == 1: + u = v + else: + index = u = next(k for k in range(index + 1, n) if degree[k] == 1) + # At this point, there must be exactly two orphaned nodes; join them. + orphans = set(T) - not_orphaned + u, v = orphans + T.add_edge(u, v) + return T diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tree/decomposition.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/tree/decomposition.py new file mode 100644 index 0000000..c8b8f24 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/tree/decomposition.py @@ -0,0 +1,88 @@ +r"""Function for computing a junction tree of a graph.""" + +from itertools import combinations + +import networkx as nx +from networkx.algorithms import chordal_graph_cliques, complete_to_chordal_graph, moral +from networkx.utils import not_implemented_for + +__all__ = ["junction_tree"] + + +@not_implemented_for("multigraph") +@nx._dispatchable(returns_graph=True) +def junction_tree(G): + r"""Returns a junction tree of a given graph. + + A junction tree (or clique tree) is constructed from a (un)directed graph G. + The tree is constructed based on a moralized and triangulated version of G. + The tree's nodes consist of maximal cliques and sepsets of the revised graph. + The sepset of two cliques is the intersection of the nodes of these cliques, + e.g. the sepset of (A,B,C) and (A,C,E,F) is (A,C). These nodes are often called + "variables" in this literature. The tree is bipartite with each sepset + connected to its two cliques. + + Junction Trees are not unique as the order of clique consideration determines + which sepsets are included. + + The junction tree algorithm consists of five steps [1]_: + + 1. Moralize the graph + 2. Triangulate the graph + 3. Find maximal cliques + 4. Build the tree from cliques, connecting cliques with shared + nodes, set edge-weight to number of shared variables + 5. Find maximum spanning tree + + + Parameters + ---------- + G : networkx.Graph + Directed or undirected graph. + + Returns + ------- + junction_tree : networkx.Graph + The corresponding junction tree of `G`. + + Raises + ------ + NetworkXNotImplemented + Raised if `G` is an instance of `MultiGraph` or `MultiDiGraph`. + + References + ---------- + .. [1] Junction tree algorithm: + https://en.wikipedia.org/wiki/Junction_tree_algorithm + + .. [2] Finn V. Jensen and Frank Jensen. 1994. Optimal + junction trees. In Proceedings of the Tenth international + conference on Uncertainty in artificial intelligence (UAI’94). + Morgan Kaufmann Publishers Inc., San Francisco, CA, USA, 360–366. + """ + + clique_graph = nx.Graph() + + if G.is_directed(): + G = moral.moral_graph(G) + chordal_graph, _ = complete_to_chordal_graph(G) + + cliques = [tuple(sorted(i)) for i in chordal_graph_cliques(chordal_graph)] + clique_graph.add_nodes_from(cliques, type="clique") + + for edge in combinations(cliques, 2): + set_edge_0 = set(edge[0]) + set_edge_1 = set(edge[1]) + if not set_edge_0.isdisjoint(set_edge_1): + sepset = tuple(sorted(set_edge_0.intersection(set_edge_1))) + clique_graph.add_edge(edge[0], edge[1], weight=len(sepset), sepset=sepset) + + junction_tree = nx.maximum_spanning_tree(clique_graph) + + for edge in list(junction_tree.edges(data=True)): + junction_tree.add_node(edge[2]["sepset"], type="sepset") + junction_tree.add_edge(edge[0], edge[2]["sepset"]) + junction_tree.add_edge(edge[1], edge[2]["sepset"]) + junction_tree.remove_edge(edge[0], edge[1]) + + return junction_tree diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tree/mst.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/tree/mst.py new file mode 100644 index 0000000..83402ec --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/tree/mst.py @@ -0,0 +1,1283 @@ +""" +Algorithms for calculating min/max spanning trees/forests. + +""" + +from dataclasses import dataclass, field +from enum import Enum +from heapq import heappop, heappush +from itertools import count +from math import isnan +from operator import itemgetter +from queue import PriorityQueue + +import networkx as nx +from networkx.utils import UnionFind, not_implemented_for, py_random_state + +__all__ = [ + "minimum_spanning_edges", + "maximum_spanning_edges", + "minimum_spanning_tree", + "maximum_spanning_tree", + "number_of_spanning_trees", + "random_spanning_tree", + "partition_spanning_tree", + "EdgePartition", + "SpanningTreeIterator", +] + + +class EdgePartition(Enum): + """ + An enum to store the state of an edge partition. The enum is written to the + edges of a graph before being pasted to `kruskal_mst_edges`. Options are: + + - EdgePartition.OPEN + - EdgePartition.INCLUDED + - EdgePartition.EXCLUDED + """ + + OPEN = 0 + INCLUDED = 1 + EXCLUDED = 2 + + +@not_implemented_for("multigraph") +@nx._dispatchable(edge_attrs="weight", preserve_edge_attrs="data") +def boruvka_mst_edges( + G, minimum=True, weight="weight", keys=False, data=True, ignore_nan=False +): + """Iterate over edges of a Borůvka's algorithm min/max spanning tree. + + Parameters + ---------- + G : NetworkX Graph + The edges of `G` must have distinct weights, + otherwise the edges may not form a tree. + + minimum : bool (default: True) + Find the minimum (True) or maximum (False) spanning tree. + + weight : string (default: 'weight') + The name of the edge attribute holding the edge weights. + + keys : bool (default: True) + This argument is ignored since this function is not + implemented for multigraphs; it exists only for consistency + with the other minimum spanning tree functions. + + data : bool (default: True) + Flag for whether to yield edge attribute dicts. + If True, yield edges `(u, v, d)`, where `d` is the attribute dict. + If False, yield edges `(u, v)`. + + ignore_nan : bool (default: False) + If a NaN is found as an edge weight normally an exception is raised. + If `ignore_nan is True` then that edge is ignored instead. + + """ + # Initialize a forest, assuming initially that it is the discrete + # partition of the nodes of the graph. + forest = UnionFind(G) + + def best_edge(component): + """Returns the optimum (minimum or maximum) edge on the edge + boundary of the given set of nodes. + + A return value of ``None`` indicates an empty boundary. + + """ + sign = 1 if minimum else -1 + minwt = float("inf") + boundary = None + for e in nx.edge_boundary(G, component, data=True): + wt = e[-1].get(weight, 1) * sign + if isnan(wt): + if ignore_nan: + continue + msg = f"NaN found as an edge weight. Edge {e}" + raise ValueError(msg) + if wt < minwt: + minwt = wt + boundary = e + return boundary + + # Determine the optimum edge in the edge boundary of each component + # in the forest. + best_edges = (best_edge(component) for component in forest.to_sets()) + best_edges = [edge for edge in best_edges if edge is not None] + # If each entry was ``None``, that means the graph was disconnected, + # so we are done generating the forest. + while best_edges: + # Determine the optimum edge in the edge boundary of each + # component in the forest. + # + # This must be a sequence, not an iterator. In this list, the + # same edge may appear twice, in different orientations (but + # that's okay, since a union operation will be called on the + # endpoints the first time it is seen, but not the second time). + # + # Any ``None`` indicates that the edge boundary for that + # component was empty, so that part of the forest has been + # completed. + # + # TODO This can be parallelized, both in the outer loop over + # each component in the forest and in the computation of the + # minimum. (Same goes for the identical lines outside the loop.) + best_edges = (best_edge(component) for component in forest.to_sets()) + best_edges = [edge for edge in best_edges if edge is not None] + # Join trees in the forest using the best edges, and yield that + # edge, since it is part of the spanning tree. + # + # TODO This loop can be parallelized, to an extent (the union + # operation must be atomic). + for u, v, d in best_edges: + if forest[u] != forest[v]: + if data: + yield u, v, d + else: + yield u, v + forest.union(u, v) + + +@nx._dispatchable( + edge_attrs={"weight": None, "partition": None}, preserve_edge_attrs="data" +) +def kruskal_mst_edges( + G, minimum, weight="weight", keys=True, data=True, ignore_nan=False, partition=None +): + """ + Iterate over edge of a Kruskal's algorithm min/max spanning tree. + + Parameters + ---------- + G : NetworkX Graph + The graph holding the tree of interest. + + minimum : bool (default: True) + Find the minimum (True) or maximum (False) spanning tree. + + weight : string (default: 'weight') + The name of the edge attribute holding the edge weights. + + keys : bool (default: True) + If `G` is a multigraph, `keys` controls whether edge keys ar yielded. + Otherwise `keys` is ignored. + + data : bool (default: True) + Flag for whether to yield edge attribute dicts. + If True, yield edges `(u, v, d)`, where `d` is the attribute dict. + If False, yield edges `(u, v)`. + + ignore_nan : bool (default: False) + If a NaN is found as an edge weight normally an exception is raised. + If `ignore_nan is True` then that edge is ignored instead. + + partition : string (default: None) + The name of the edge attribute holding the partition data, if it exists. + Partition data is written to the edges using the `EdgePartition` enum. + If a partition exists, all included edges and none of the excluded edges + will appear in the final tree. Open edges may or may not be used. + + Yields + ------ + edge tuple + The edges as discovered by Kruskal's method. Each edge can + take the following forms: `(u, v)`, `(u, v, d)` or `(u, v, k, d)` + depending on the `key` and `data` parameters + """ + subtrees = UnionFind() + if G.is_multigraph(): + edges = G.edges(keys=True, data=True) + else: + edges = G.edges(data=True) + + """ + Sort the edges of the graph with respect to the partition data. + Edges are returned in the following order: + + * Included edges + * Open edges from smallest to largest weight + * Excluded edges + """ + included_edges = [] + open_edges = [] + for e in edges: + d = e[-1] + wt = d.get(weight, 1) + if isnan(wt): + if ignore_nan: + continue + raise ValueError(f"NaN found as an edge weight. Edge {e}") + + edge = (wt,) + e + if d.get(partition) == EdgePartition.INCLUDED: + included_edges.append(edge) + elif d.get(partition) == EdgePartition.EXCLUDED: + continue + else: + open_edges.append(edge) + + if minimum: + sorted_open_edges = sorted(open_edges, key=itemgetter(0)) + else: + sorted_open_edges = sorted(open_edges, key=itemgetter(0), reverse=True) + + # Condense the lists into one + included_edges.extend(sorted_open_edges) + sorted_edges = included_edges + del open_edges, sorted_open_edges, included_edges + + # Multigraphs need to handle edge keys in addition to edge data. + if G.is_multigraph(): + for wt, u, v, k, d in sorted_edges: + if subtrees[u] != subtrees[v]: + if keys: + if data: + yield u, v, k, d + else: + yield u, v, k + else: + if data: + yield u, v, d + else: + yield u, v + subtrees.union(u, v) + else: + for wt, u, v, d in sorted_edges: + if subtrees[u] != subtrees[v]: + if data: + yield u, v, d + else: + yield u, v + subtrees.union(u, v) + + +@nx._dispatchable(edge_attrs="weight", preserve_edge_attrs="data") +def prim_mst_edges(G, minimum, weight="weight", keys=True, data=True, ignore_nan=False): + """Iterate over edges of Prim's algorithm min/max spanning tree. + + Parameters + ---------- + G : NetworkX Graph + The graph holding the tree of interest. + + minimum : bool (default: True) + Find the minimum (True) or maximum (False) spanning tree. + + weight : string (default: 'weight') + The name of the edge attribute holding the edge weights. + + keys : bool (default: True) + If `G` is a multigraph, `keys` controls whether edge keys ar yielded. + Otherwise `keys` is ignored. + + data : bool (default: True) + Flag for whether to yield edge attribute dicts. + If True, yield edges `(u, v, d)`, where `d` is the attribute dict. + If False, yield edges `(u, v)`. + + ignore_nan : bool (default: False) + If a NaN is found as an edge weight normally an exception is raised. + If `ignore_nan is True` then that edge is ignored instead. + + """ + is_multigraph = G.is_multigraph() + + nodes = set(G) + c = count() + + sign = 1 if minimum else -1 + + while nodes: + u = nodes.pop() + frontier = [] + visited = {u} + if is_multigraph: + for v, keydict in G.adj[u].items(): + for k, d in keydict.items(): + wt = d.get(weight, 1) * sign + if isnan(wt): + if ignore_nan: + continue + msg = f"NaN found as an edge weight. Edge {(u, v, k, d)}" + raise ValueError(msg) + heappush(frontier, (wt, next(c), u, v, k, d)) + else: + for v, d in G.adj[u].items(): + wt = d.get(weight, 1) * sign + if isnan(wt): + if ignore_nan: + continue + msg = f"NaN found as an edge weight. Edge {(u, v, d)}" + raise ValueError(msg) + heappush(frontier, (wt, next(c), u, v, d)) + while nodes and frontier: + if is_multigraph: + W, _, u, v, k, d = heappop(frontier) + else: + W, _, u, v, d = heappop(frontier) + if v in visited or v not in nodes: + continue + # Multigraphs need to handle edge keys in addition to edge data. + if is_multigraph and keys: + if data: + yield u, v, k, d + else: + yield u, v, k + else: + if data: + yield u, v, d + else: + yield u, v + # update frontier + visited.add(v) + nodes.discard(v) + if is_multigraph: + for w, keydict in G.adj[v].items(): + if w in visited: + continue + for k2, d2 in keydict.items(): + new_weight = d2.get(weight, 1) * sign + if isnan(new_weight): + if ignore_nan: + continue + msg = f"NaN found as an edge weight. Edge {(v, w, k2, d2)}" + raise ValueError(msg) + heappush(frontier, (new_weight, next(c), v, w, k2, d2)) + else: + for w, d2 in G.adj[v].items(): + if w in visited: + continue + new_weight = d2.get(weight, 1) * sign + if isnan(new_weight): + if ignore_nan: + continue + msg = f"NaN found as an edge weight. Edge {(v, w, d2)}" + raise ValueError(msg) + heappush(frontier, (new_weight, next(c), v, w, d2)) + + +ALGORITHMS = { + "boruvka": boruvka_mst_edges, + "borůvka": boruvka_mst_edges, + "kruskal": kruskal_mst_edges, + "prim": prim_mst_edges, +} + + +@not_implemented_for("directed") +@nx._dispatchable(edge_attrs="weight", preserve_edge_attrs="data") +def minimum_spanning_edges( + G, algorithm="kruskal", weight="weight", keys=True, data=True, ignore_nan=False +): + """Generate edges in a minimum spanning forest of an undirected + weighted graph. + + A minimum spanning tree is a subgraph of the graph (a tree) + with the minimum sum of edge weights. A spanning forest is a + union of the spanning trees for each connected component of the graph. + + Parameters + ---------- + G : undirected Graph + An undirected graph. If `G` is connected, then the algorithm finds a + spanning tree. Otherwise, a spanning forest is found. + + algorithm : string + The algorithm to use when finding a minimum spanning tree. Valid + choices are 'kruskal', 'prim', or 'boruvka'. The default is 'kruskal'. + + weight : string + Edge data key to use for weight (default 'weight'). + + keys : bool + Whether to yield edge key in multigraphs in addition to the edge. + If `G` is not a multigraph, this is ignored. + + data : bool, optional + If True yield the edge data along with the edge. + + ignore_nan : bool (default: False) + If a NaN is found as an edge weight normally an exception is raised. + If `ignore_nan is True` then that edge is ignored instead. + + Returns + ------- + edges : iterator + An iterator over edges in a maximum spanning tree of `G`. + Edges connecting nodes `u` and `v` are represented as tuples: + `(u, v, k, d)` or `(u, v, k)` or `(u, v, d)` or `(u, v)` + + If `G` is a multigraph, `keys` indicates whether the edge key `k` will + be reported in the third position in the edge tuple. `data` indicates + whether the edge datadict `d` will appear at the end of the edge tuple. + + If `G` is not a multigraph, the tuples are `(u, v, d)` if `data` is True + or `(u, v)` if `data` is False. + + Examples + -------- + >>> from networkx.algorithms import tree + + Find minimum spanning edges by Kruskal's algorithm + + >>> G = nx.cycle_graph(4) + >>> G.add_edge(0, 3, weight=2) + >>> mst = tree.minimum_spanning_edges(G, algorithm="kruskal", data=False) + >>> edgelist = list(mst) + >>> sorted(sorted(e) for e in edgelist) + [[0, 1], [1, 2], [2, 3]] + + Find minimum spanning edges by Prim's algorithm + + >>> G = nx.cycle_graph(4) + >>> G.add_edge(0, 3, weight=2) + >>> mst = tree.minimum_spanning_edges(G, algorithm="prim", data=False) + >>> edgelist = list(mst) + >>> sorted(sorted(e) for e in edgelist) + [[0, 1], [1, 2], [2, 3]] + + Notes + ----- + For Borůvka's algorithm, each edge must have a weight attribute, and + each edge weight must be distinct. + + For the other algorithms, if the graph edges do not have a weight + attribute a default weight of 1 will be used. + + Modified code from David Eppstein, April 2006 + http://www.ics.uci.edu/~eppstein/PADS/ + + """ + try: + algo = ALGORITHMS[algorithm] + except KeyError as err: + msg = f"{algorithm} is not a valid choice for an algorithm." + raise ValueError(msg) from err + + return algo( + G, minimum=True, weight=weight, keys=keys, data=data, ignore_nan=ignore_nan + ) + + +@not_implemented_for("directed") +@nx._dispatchable(edge_attrs="weight", preserve_edge_attrs="data") +def maximum_spanning_edges( + G, algorithm="kruskal", weight="weight", keys=True, data=True, ignore_nan=False +): + """Generate edges in a maximum spanning forest of an undirected + weighted graph. + + A maximum spanning tree is a subgraph of the graph (a tree) + with the maximum possible sum of edge weights. A spanning forest is a + union of the spanning trees for each connected component of the graph. + + Parameters + ---------- + G : undirected Graph + An undirected graph. If `G` is connected, then the algorithm finds a + spanning tree. Otherwise, a spanning forest is found. + + algorithm : string + The algorithm to use when finding a maximum spanning tree. Valid + choices are 'kruskal', 'prim', or 'boruvka'. The default is 'kruskal'. + + weight : string + Edge data key to use for weight (default 'weight'). + + keys : bool + Whether to yield edge key in multigraphs in addition to the edge. + If `G` is not a multigraph, this is ignored. + + data : bool, optional + If True yield the edge data along with the edge. + + ignore_nan : bool (default: False) + If a NaN is found as an edge weight normally an exception is raised. + If `ignore_nan is True` then that edge is ignored instead. + + Returns + ------- + edges : iterator + An iterator over edges in a maximum spanning tree of `G`. + Edges connecting nodes `u` and `v` are represented as tuples: + `(u, v, k, d)` or `(u, v, k)` or `(u, v, d)` or `(u, v)` + + If `G` is a multigraph, `keys` indicates whether the edge key `k` will + be reported in the third position in the edge tuple. `data` indicates + whether the edge datadict `d` will appear at the end of the edge tuple. + + If `G` is not a multigraph, the tuples are `(u, v, d)` if `data` is True + or `(u, v)` if `data` is False. + + Examples + -------- + >>> from networkx.algorithms import tree + + Find maximum spanning edges by Kruskal's algorithm + + >>> G = nx.cycle_graph(4) + >>> G.add_edge(0, 3, weight=2) + >>> mst = tree.maximum_spanning_edges(G, algorithm="kruskal", data=False) + >>> edgelist = list(mst) + >>> sorted(sorted(e) for e in edgelist) + [[0, 1], [0, 3], [1, 2]] + + Find maximum spanning edges by Prim's algorithm + + >>> G = nx.cycle_graph(4) + >>> G.add_edge(0, 3, weight=2) # assign weight 2 to edge 0-3 + >>> mst = tree.maximum_spanning_edges(G, algorithm="prim", data=False) + >>> edgelist = list(mst) + >>> sorted(sorted(e) for e in edgelist) + [[0, 1], [0, 3], [2, 3]] + + Notes + ----- + For Borůvka's algorithm, each edge must have a weight attribute, and + each edge weight must be distinct. + + For the other algorithms, if the graph edges do not have a weight + attribute a default weight of 1 will be used. + + Modified code from David Eppstein, April 2006 + http://www.ics.uci.edu/~eppstein/PADS/ + """ + try: + algo = ALGORITHMS[algorithm] + except KeyError as err: + msg = f"{algorithm} is not a valid choice for an algorithm." + raise ValueError(msg) from err + + return algo( + G, minimum=False, weight=weight, keys=keys, data=data, ignore_nan=ignore_nan + ) + + +@nx._dispatchable(preserve_all_attrs=True, returns_graph=True) +def minimum_spanning_tree(G, weight="weight", algorithm="kruskal", ignore_nan=False): + """Returns a minimum spanning tree or forest on an undirected graph `G`. + + Parameters + ---------- + G : undirected graph + An undirected graph. If `G` is connected, then the algorithm finds a + spanning tree. Otherwise, a spanning forest is found. + + weight : str + Data key to use for edge weights. + + algorithm : string + The algorithm to use when finding a minimum spanning tree. Valid + choices are 'kruskal', 'prim', or 'boruvka'. The default is + 'kruskal'. + + ignore_nan : bool (default: False) + If a NaN is found as an edge weight normally an exception is raised. + If `ignore_nan is True` then that edge is ignored instead. + + Returns + ------- + G : NetworkX Graph + A minimum spanning tree or forest. + + Examples + -------- + >>> G = nx.cycle_graph(4) + >>> G.add_edge(0, 3, weight=2) + >>> T = nx.minimum_spanning_tree(G) + >>> sorted(T.edges(data=True)) + [(0, 1, {}), (1, 2, {}), (2, 3, {})] + + + Notes + ----- + For Borůvka's algorithm, each edge must have a weight attribute, and + each edge weight must be distinct. + + For the other algorithms, if the graph edges do not have a weight + attribute a default weight of 1 will be used. + + There may be more than one tree with the same minimum or maximum weight. + See :mod:`networkx.tree.recognition` for more detailed definitions. + + Isolated nodes with self-loops are in the tree as edgeless isolated nodes. + + """ + edges = minimum_spanning_edges( + G, algorithm, weight, keys=True, data=True, ignore_nan=ignore_nan + ) + T = G.__class__() # Same graph class as G + T.graph.update(G.graph) + T.add_nodes_from(G.nodes.items()) + T.add_edges_from(edges) + return T + + +@nx._dispatchable(preserve_all_attrs=True, returns_graph=True) +def partition_spanning_tree( + G, minimum=True, weight="weight", partition="partition", ignore_nan=False +): + """ + Find a spanning tree while respecting a partition of edges. + + Edges can be flagged as either `INCLUDED` which are required to be in the + returned tree, `EXCLUDED`, which cannot be in the returned tree and `OPEN`. + + This is used in the SpanningTreeIterator to create new partitions following + the algorithm of Sörensen and Janssens [1]_. + + Parameters + ---------- + G : undirected graph + An undirected graph. + + minimum : bool (default: True) + Determines whether the returned tree is the minimum spanning tree of + the partition of the maximum one. + + weight : str + Data key to use for edge weights. + + partition : str + The key for the edge attribute containing the partition + data on the graph. Edges can be included, excluded or open using the + `EdgePartition` enum. + + ignore_nan : bool (default: False) + If a NaN is found as an edge weight normally an exception is raised. + If `ignore_nan is True` then that edge is ignored instead. + + + Returns + ------- + G : NetworkX Graph + A minimum spanning tree using all of the included edges in the graph and + none of the excluded edges. + + References + ---------- + .. [1] G.K. Janssens, K. Sörensen, An algorithm to generate all spanning + trees in order of increasing cost, Pesquisa Operacional, 2005-08, + Vol. 25 (2), p. 219-229, + https://www.scielo.br/j/pope/a/XHswBwRwJyrfL88dmMwYNWp/?lang=en + """ + edges = kruskal_mst_edges( + G, + minimum, + weight, + keys=True, + data=True, + ignore_nan=ignore_nan, + partition=partition, + ) + T = G.__class__() # Same graph class as G + T.graph.update(G.graph) + T.add_nodes_from(G.nodes.items()) + T.add_edges_from(edges) + return T + + +@nx._dispatchable(preserve_all_attrs=True, returns_graph=True) +def maximum_spanning_tree(G, weight="weight", algorithm="kruskal", ignore_nan=False): + """Returns a maximum spanning tree or forest on an undirected graph `G`. + + Parameters + ---------- + G : undirected graph + An undirected graph. If `G` is connected, then the algorithm finds a + spanning tree. Otherwise, a spanning forest is found. + + weight : str + Data key to use for edge weights. + + algorithm : string + The algorithm to use when finding a maximum spanning tree. Valid + choices are 'kruskal', 'prim', or 'boruvka'. The default is + 'kruskal'. + + ignore_nan : bool (default: False) + If a NaN is found as an edge weight normally an exception is raised. + If `ignore_nan is True` then that edge is ignored instead. + + + Returns + ------- + G : NetworkX Graph + A maximum spanning tree or forest. + + + Examples + -------- + >>> G = nx.cycle_graph(4) + >>> G.add_edge(0, 3, weight=2) + >>> T = nx.maximum_spanning_tree(G) + >>> sorted(T.edges(data=True)) + [(0, 1, {}), (0, 3, {'weight': 2}), (1, 2, {})] + + + Notes + ----- + For Borůvka's algorithm, each edge must have a weight attribute, and + each edge weight must be distinct. + + For the other algorithms, if the graph edges do not have a weight + attribute a default weight of 1 will be used. + + There may be more than one tree with the same minimum or maximum weight. + See :mod:`networkx.tree.recognition` for more detailed definitions. + + Isolated nodes with self-loops are in the tree as edgeless isolated nodes. + + """ + edges = maximum_spanning_edges( + G, algorithm, weight, keys=True, data=True, ignore_nan=ignore_nan + ) + edges = list(edges) + T = G.__class__() # Same graph class as G + T.graph.update(G.graph) + T.add_nodes_from(G.nodes.items()) + T.add_edges_from(edges) + return T + + +@py_random_state(3) +@nx._dispatchable(preserve_edge_attrs=True, returns_graph=True) +def random_spanning_tree(G, weight=None, *, multiplicative=True, seed=None): + """ + Sample a random spanning tree using the edges weights of `G`. + + This function supports two different methods for determining the + probability of the graph. If ``multiplicative=True``, the probability + is based on the product of edge weights, and if ``multiplicative=False`` + it is based on the sum of the edge weight. However, since it is + easier to determine the total weight of all spanning trees for the + multiplicative version, that is significantly faster and should be used if + possible. Additionally, setting `weight` to `None` will cause a spanning tree + to be selected with uniform probability. + + The function uses algorithm A8 in [1]_ . + + Parameters + ---------- + G : nx.Graph + An undirected version of the original graph. + + weight : string + The edge key for the edge attribute holding edge weight. + + multiplicative : bool, default=True + If `True`, the probability of each tree is the product of its edge weight + over the sum of the product of all the spanning trees in the graph. If + `False`, the probability is the sum of its edge weight over the sum of + the sum of weights for all spanning trees in the graph. + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + nx.Graph + A spanning tree using the distribution defined by the weight of the tree. + + References + ---------- + .. [1] V. Kulkarni, Generating random combinatorial objects, Journal of + Algorithms, 11 (1990), pp. 185–207 + """ + + def find_node(merged_nodes, node): + """ + We can think of clusters of contracted nodes as having one + representative in the graph. Each node which is not in merged_nodes + is still its own representative. Since a representative can be later + contracted, we need to recursively search though the dict to find + the final representative, but once we know it we can use path + compression to speed up the access of the representative for next time. + + This cannot be replaced by the standard NetworkX union_find since that + data structure will merge nodes with less representing nodes into the + one with more representing nodes but this function requires we merge + them using the order that contract_edges contracts using. + + Parameters + ---------- + merged_nodes : dict + The dict storing the mapping from node to representative + node + The node whose representative we seek + + Returns + ------- + The representative of the `node` + """ + if node not in merged_nodes: + return node + else: + rep = find_node(merged_nodes, merged_nodes[node]) + merged_nodes[node] = rep + return rep + + def prepare_graph(): + """ + For the graph `G`, remove all edges not in the set `V` and then + contract all edges in the set `U`. + + Returns + ------- + A copy of `G` which has had all edges not in `V` removed and all edges + in `U` contracted. + """ + + # The result is a MultiGraph version of G so that parallel edges are + # allowed during edge contraction + result = nx.MultiGraph(incoming_graph_data=G) + + # Remove all edges not in V + edges_to_remove = set(result.edges()).difference(V) + result.remove_edges_from(edges_to_remove) + + # Contract all edges in U + # + # Imagine that you have two edges to contract and they share an + # endpoint like this: + # [0] ----- [1] ----- [2] + # If we contract (0, 1) first, the contraction function will always + # delete the second node it is passed so the resulting graph would be + # [0] ----- [2] + # and edge (1, 2) no longer exists but (0, 2) would need to be contracted + # in its place now. That is why I use the below dict as a merge-find + # data structure with path compression to track how the nodes are merged. + merged_nodes = {} + + for u, v in U: + u_rep = find_node(merged_nodes, u) + v_rep = find_node(merged_nodes, v) + # We cannot contract a node with itself + if u_rep == v_rep: + continue + nx.contracted_nodes(result, u_rep, v_rep, self_loops=False, copy=False) + merged_nodes[v_rep] = u_rep + + return merged_nodes, result + + def spanning_tree_total_weight(G, weight): + """ + Find the sum of weights of the spanning trees of `G` using the + appropriate `method`. + + This is easy if the chosen method is 'multiplicative', since we can + use Kirchhoff's Tree Matrix Theorem directly. However, with the + 'additive' method, this process is slightly more complex and less + computationally efficient as we have to find the number of spanning + trees which contain each possible edge in the graph. + + Parameters + ---------- + G : NetworkX Graph + The graph to find the total weight of all spanning trees on. + + weight : string + The key for the weight edge attribute of the graph. + + Returns + ------- + float + The sum of either the multiplicative or additive weight for all + spanning trees in the graph. + """ + if multiplicative: + return number_of_spanning_trees(G, weight=weight) + else: + # There are two cases for the total spanning tree additive weight. + # 1. There is one edge in the graph. Then the only spanning tree is + # that edge itself, which will have a total weight of that edge + # itself. + if G.number_of_edges() == 1: + return G.edges(data=weight).__iter__().__next__()[2] + # 2. There are no edges or two or more edges in the graph. Then, we find the + # total weight of the spanning trees using the formula in the + # reference paper: take the weight of each edge and multiply it by + # the number of spanning trees which include that edge. This + # can be accomplished by contracting the edge and finding the + # multiplicative total spanning tree weight if the weight of each edge + # is assumed to be 1, which is conveniently built into networkx already, + # by calling number_of_spanning_trees with weight=None. + # Note that with no edges the returned value is just zero. + else: + total = 0 + for u, v, w in G.edges(data=weight): + total += w * nx.number_of_spanning_trees( + nx.contracted_edge(G, edge=(u, v), self_loops=False), + weight=None, + ) + return total + + if G.number_of_nodes() < 2: + # no edges in the spanning tree + return nx.empty_graph(G.nodes) + + U = set() + st_cached_value = 0 + V = set(G.edges()) + shuffled_edges = list(G.edges()) + seed.shuffle(shuffled_edges) + + for u, v in shuffled_edges: + e_weight = G[u][v][weight] if weight is not None else 1 + node_map, prepared_G = prepare_graph() + G_total_tree_weight = spanning_tree_total_weight(prepared_G, weight) + # Add the edge to U so that we can compute the total tree weight + # assuming we include that edge + # Now, if (u, v) cannot exist in G because it is fully contracted out + # of existence, then it by definition cannot influence G_e's Kirchhoff + # value. But, we also cannot pick it. + rep_edge = (find_node(node_map, u), find_node(node_map, v)) + # Check to see if the 'representative edge' for the current edge is + # in prepared_G. If so, then we can pick it. + if rep_edge in prepared_G.edges: + prepared_G_e = nx.contracted_edge( + prepared_G, edge=rep_edge, self_loops=False + ) + G_e_total_tree_weight = spanning_tree_total_weight(prepared_G_e, weight) + if multiplicative: + threshold = e_weight * G_e_total_tree_weight / G_total_tree_weight + else: + numerator = (st_cached_value + e_weight) * nx.number_of_spanning_trees( + prepared_G_e + ) + G_e_total_tree_weight + denominator = ( + st_cached_value * nx.number_of_spanning_trees(prepared_G) + + G_total_tree_weight + ) + threshold = numerator / denominator + else: + threshold = 0.0 + z = seed.uniform(0.0, 1.0) + if z > threshold: + # Remove the edge from V since we did not pick it. + V.remove((u, v)) + else: + # Add the edge to U since we picked it. + st_cached_value += e_weight + U.add((u, v)) + # If we decide to keep an edge, it may complete the spanning tree. + if len(U) == G.number_of_nodes() - 1: + spanning_tree = nx.Graph() + spanning_tree.add_edges_from(U) + return spanning_tree + raise Exception(f"Something went wrong! Only {len(U)} edges in the spanning tree!") + + +class SpanningTreeIterator: + """ + Iterate over all spanning trees of a graph in either increasing or + decreasing cost. + + Notes + ----- + This iterator uses the partition scheme from [1]_ (included edges, + excluded edges and open edges) as well as a modified Kruskal's Algorithm + to generate minimum spanning trees which respect the partition of edges. + For spanning trees with the same weight, ties are broken arbitrarily. + + References + ---------- + .. [1] G.K. Janssens, K. Sörensen, An algorithm to generate all spanning + trees in order of increasing cost, Pesquisa Operacional, 2005-08, + Vol. 25 (2), p. 219-229, + https://www.scielo.br/j/pope/a/XHswBwRwJyrfL88dmMwYNWp/?lang=en + """ + + @dataclass(order=True) + class Partition: + """ + This dataclass represents a partition and stores a dict with the edge + data and the weight of the minimum spanning tree of the partition dict. + """ + + mst_weight: float + partition_dict: dict = field(compare=False) + + def __copy__(self): + return SpanningTreeIterator.Partition( + self.mst_weight, self.partition_dict.copy() + ) + + def __init__(self, G, weight="weight", minimum=True, ignore_nan=False): + """ + Initialize the iterator + + Parameters + ---------- + G : nx.Graph + The directed graph which we need to iterate trees over + + weight : String, default = "weight" + The edge attribute used to store the weight of the edge + + minimum : bool, default = True + Return the trees in increasing order while true and decreasing order + while false. + + ignore_nan : bool, default = False + If a NaN is found as an edge weight normally an exception is raised. + If `ignore_nan is True` then that edge is ignored instead. + """ + self.G = G.copy() + self.G.__networkx_cache__ = None # Disable caching + self.weight = weight + self.minimum = minimum + self.ignore_nan = ignore_nan + # Randomly create a key for an edge attribute to hold the partition data + self.partition_key = ( + "SpanningTreeIterators super secret partition attribute name" + ) + + def __iter__(self): + """ + Returns + ------- + SpanningTreeIterator + The iterator object for this graph + """ + self.partition_queue = PriorityQueue() + self._clear_partition(self.G) + mst_weight = partition_spanning_tree( + self.G, self.minimum, self.weight, self.partition_key, self.ignore_nan + ).size(weight=self.weight) + + self.partition_queue.put( + self.Partition(mst_weight if self.minimum else -mst_weight, {}) + ) + + return self + + def __next__(self): + """ + Returns + ------- + (multi)Graph + The spanning tree of next greatest weight, which ties broken + arbitrarily. + """ + if self.partition_queue.empty(): + del self.G, self.partition_queue + raise StopIteration + + partition = self.partition_queue.get() + self._write_partition(partition) + next_tree = partition_spanning_tree( + self.G, self.minimum, self.weight, self.partition_key, self.ignore_nan + ) + self._partition(partition, next_tree) + + self._clear_partition(next_tree) + return next_tree + + def _partition(self, partition, partition_tree): + """ + Create new partitions based of the minimum spanning tree of the + current minimum partition. + + Parameters + ---------- + partition : Partition + The Partition instance used to generate the current minimum spanning + tree. + partition_tree : nx.Graph + The minimum spanning tree of the input partition. + """ + # create two new partitions with the data from the input partition dict + p1 = self.Partition(0, partition.partition_dict.copy()) + p2 = self.Partition(0, partition.partition_dict.copy()) + for e in partition_tree.edges: + # determine if the edge was open or included + if e not in partition.partition_dict: + # This is an open edge + p1.partition_dict[e] = EdgePartition.EXCLUDED + p2.partition_dict[e] = EdgePartition.INCLUDED + + self._write_partition(p1) + p1_mst = partition_spanning_tree( + self.G, + self.minimum, + self.weight, + self.partition_key, + self.ignore_nan, + ) + p1_mst_weight = p1_mst.size(weight=self.weight) + if nx.is_connected(p1_mst): + p1.mst_weight = p1_mst_weight if self.minimum else -p1_mst_weight + self.partition_queue.put(p1.__copy__()) + p1.partition_dict = p2.partition_dict.copy() + + def _write_partition(self, partition): + """ + Writes the desired partition into the graph to calculate the minimum + spanning tree. + + Parameters + ---------- + partition : Partition + A Partition dataclass describing a partition on the edges of the + graph. + """ + + partition_dict = partition.partition_dict + partition_key = self.partition_key + G = self.G + + edges = ( + G.edges(keys=True, data=True) if G.is_multigraph() else G.edges(data=True) + ) + for *e, d in edges: + d[partition_key] = partition_dict.get(tuple(e), EdgePartition.OPEN) + + def _clear_partition(self, G): + """ + Removes partition data from the graph + """ + partition_key = self.partition_key + edges = ( + G.edges(keys=True, data=True) if G.is_multigraph() else G.edges(data=True) + ) + for *e, d in edges: + if partition_key in d: + del d[partition_key] + + +@nx._dispatchable(edge_attrs="weight") +def number_of_spanning_trees(G, *, root=None, weight=None): + """Returns the number of spanning trees in `G`. + + A spanning tree for an undirected graph is a tree that connects + all nodes in the graph. For a directed graph, the analog of a + spanning tree is called a (spanning) arborescence. The arborescence + includes a unique directed path from the `root` node to each other node. + The graph must be weakly connected, and the root must be a node + that includes all nodes as successors [3]_. Note that to avoid + discussing sink-roots and reverse-arborescences, we have reversed + the edge orientation from [3]_ and use the in-degree laplacian. + + This function (when `weight` is `None`) returns the number of + spanning trees for an undirected graph and the number of + arborescences from a single root node for a directed graph. + When `weight` is the name of an edge attribute which holds the + weight value of each edge, the function returns the sum over + all trees of the multiplicative weight of each tree. That is, + the weight of the tree is the product of its edge weights. + + Kirchoff's Tree Matrix Theorem states that any cofactor of the + Laplacian matrix of a graph is the number of spanning trees in the + graph. (Here we use cofactors for a diagonal entry so that the + cofactor becomes the determinant of the matrix with one row + and its matching column removed.) For a weighted Laplacian matrix, + the cofactor is the sum across all spanning trees of the + multiplicative weight of each tree. That is, the weight of each + tree is the product of its edge weights. The theorem is also + known as Kirchhoff's theorem [1]_ and the Matrix-Tree theorem [2]_. + + For directed graphs, a similar theorem (Tutte's Theorem) holds with + the cofactor chosen to be the one with row and column removed that + correspond to the root. The cofactor is the number of arborescences + with the specified node as root. And the weighted version gives the + sum of the arborescence weights with root `root`. The arborescence + weight is the product of its edge weights. + + Parameters + ---------- + G : NetworkX graph + + root : node + A node in the directed graph `G` that has all nodes as descendants. + (This is ignored for undirected graphs.) + + weight : string or None, optional (default=None) + The name of the edge attribute holding the edge weight. + If `None`, then each edge is assumed to have a weight of 1. + + Returns + ------- + Number + Undirected graphs: + The number of spanning trees of the graph `G`. + Or the sum of all spanning tree weights of the graph `G` + where the weight of a tree is the product of its edge weights. + Directed graphs: + The number of arborescences of `G` rooted at node `root`. + Or the sum of all arborescence weights of the graph `G` with + specified root where the weight of an arborescence is the product + of its edge weights. + + Raises + ------ + NetworkXPointlessConcept + If `G` does not contain any nodes. + + NetworkXError + If the graph `G` is directed and the root node + is not specified or is not in G. + + Examples + -------- + >>> G = nx.complete_graph(5) + >>> round(nx.number_of_spanning_trees(G)) + 125 + + >>> G = nx.Graph() + >>> G.add_edge(1, 2, weight=2) + >>> G.add_edge(1, 3, weight=1) + >>> G.add_edge(2, 3, weight=1) + >>> round(nx.number_of_spanning_trees(G, weight="weight")) + 5 + + Notes + ----- + Self-loops are excluded. Multi-edges are contracted in one edge + equal to the sum of the weights. + + References + ---------- + .. [1] Wikipedia + "Kirchhoff's theorem." + https://en.wikipedia.org/wiki/Kirchhoff%27s_theorem + .. [2] Kirchhoff, G. R. + Über die Auflösung der Gleichungen, auf welche man + bei der Untersuchung der linearen Vertheilung + Galvanischer Ströme geführt wird + Annalen der Physik und Chemie, vol. 72, pp. 497-508, 1847. + .. [3] Margoliash, J. + "Matrix-Tree Theorem for Directed Graphs" + https://www.math.uchicago.edu/~may/VIGRE/VIGRE2010/REUPapers/Margoliash.pdf + """ + import numpy as np + + if len(G) == 0: + raise nx.NetworkXPointlessConcept("Graph G must contain at least one node.") + + # undirected G + if not nx.is_directed(G): + if not nx.is_connected(G): + return 0 + G_laplacian = nx.laplacian_matrix(G, weight=weight).toarray() + return float(np.linalg.det(G_laplacian[1:, 1:])) + + # directed G + if root is None: + raise nx.NetworkXError("Input `root` must be provided when G is directed") + if root not in G: + raise nx.NetworkXError("The node root is not in the graph G.") + if not nx.is_weakly_connected(G): + return 0 + + # Compute directed Laplacian matrix + nodelist = [root] + [n for n in G if n != root] + A = nx.adjacency_matrix(G, nodelist=nodelist, weight=weight) + D = np.diag(A.sum(axis=0)) + G_laplacian = D - A + + # Compute number of spanning trees + return float(np.linalg.det(G_laplacian[1:, 1:])) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tree/operations.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/tree/operations.py new file mode 100644 index 0000000..a75770a --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/tree/operations.py @@ -0,0 +1,106 @@ +"""Operations on trees.""" + +from functools import partial +from itertools import accumulate, chain + +import networkx as nx + +__all__ = ["join_trees"] + + +# Argument types don't match dispatching, but allow manual selection of backend +@nx._dispatchable(graphs=None, returns_graph=True) +def join_trees(rooted_trees, *, label_attribute=None, first_label=0): + """Returns a new rooted tree made by joining `rooted_trees` + + Constructs a new tree by joining each tree in `rooted_trees`. + A new root node is added and connected to each of the roots + of the input trees. While copying the nodes from the trees, + relabeling to integers occurs. If the `label_attribute` is provided, + the old node labels will be stored in the new tree under this attribute. + + Parameters + ---------- + rooted_trees : list + A list of pairs in which each left element is a NetworkX graph + object representing a tree and each right element is the root + node of that tree. The nodes of these trees will be relabeled to + integers. + + label_attribute : str + If provided, the old node labels will be stored in the new tree + under this node attribute. If not provided, the original labels + of the nodes in the input trees are not stored. + + first_label : int, optional (default=0) + Specifies the label for the new root node. If provided, the root node of the joined tree + will have this label. If not provided, the root node will default to a label of 0. + + Returns + ------- + NetworkX graph + The rooted tree resulting from joining the provided `rooted_trees`. The new tree has a root node + labeled as specified by `first_label` (defaulting to 0 if not provided). Subtrees from the input + `rooted_trees` are attached to this new root node. Each non-root node, if the `label_attribute` + is provided, has an attribute that indicates the original label of the node in the input tree. + + Notes + ----- + Trees are stored in NetworkX as NetworkX Graphs. There is no specific + enforcement of the fact that these are trees. Testing for each tree + can be done using :func:`networkx.is_tree`. + + Graph, edge, and node attributes are propagated from the given + rooted trees to the created tree. If there are any overlapping graph + attributes, those from later trees will overwrite those from earlier + trees in the tuple of positional arguments. + + Examples + -------- + Join two full balanced binary trees of height *h* to get a full + balanced binary tree of depth *h* + 1:: + + >>> h = 4 + >>> left = nx.balanced_tree(2, h) + >>> right = nx.balanced_tree(2, h) + >>> joined_tree = nx.join_trees([(left, 0), (right, 0)]) + >>> nx.is_isomorphic(joined_tree, nx.balanced_tree(2, h + 1)) + True + + """ + if not rooted_trees: + return nx.empty_graph(1) + + # Unzip the zipped list of (tree, root) pairs. + trees, roots = zip(*rooted_trees) + + # The join of the trees has the same type as the type of the first tree. + R = type(trees[0])() + + lengths = (len(tree) for tree in trees[:-1]) + first_labels = list(accumulate(lengths, initial=first_label + 1)) + + new_roots = [] + for tree, root, first_node in zip(trees, roots, first_labels): + new_root = first_node + list(tree.nodes()).index(root) + new_roots.append(new_root) + + # Relabel the nodes so that their union is the integers starting at first_label. + relabel = partial( + nx.convert_node_labels_to_integers, label_attribute=label_attribute + ) + new_trees = [ + relabel(tree, first_label=first_label) + for tree, first_label in zip(trees, first_labels) + ] + + # Add all sets of nodes and edges, attributes + for tree in new_trees: + R.update(tree) + + # Finally, join the subtrees at the root. We know first_label is unused by + # the way we relabeled the subtrees. + R.add_node(first_label) + R.add_edges_from((first_label, root) for root in new_roots) + + return R diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tree/recognition.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/tree/recognition.py new file mode 100644 index 0000000..a9eae98 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/tree/recognition.py @@ -0,0 +1,273 @@ +""" +Recognition Tests +================= + +A *forest* is an acyclic, undirected graph, and a *tree* is a connected forest. +Depending on the subfield, there are various conventions for generalizing these +definitions to directed graphs. + +In one convention, directed variants of forest and tree are defined in an +identical manner, except that the direction of the edges is ignored. In effect, +each directed edge is treated as a single undirected edge. Then, additional +restrictions are imposed to define *branchings* and *arborescences*. + +In another convention, directed variants of forest and tree correspond to +the previous convention's branchings and arborescences, respectively. Then two +new terms, *polyforest* and *polytree*, are defined to correspond to the other +convention's forest and tree. + +Summarizing:: + + +-----------------------------+ + | Convention A | Convention B | + +=============================+ + | forest | polyforest | + | tree | polytree | + | branching | forest | + | arborescence | tree | + +-----------------------------+ + +Each convention has its reasons. The first convention emphasizes definitional +similarity in that directed forests and trees are only concerned with +acyclicity and do not have an in-degree constraint, just as their undirected +counterparts do not. The second convention emphasizes functional similarity +in the sense that the directed analog of a spanning tree is a spanning +arborescence. That is, take any spanning tree and choose one node as the root. +Then every edge is assigned a direction such there is a directed path from the +root to every other node. The result is a spanning arborescence. + +NetworkX follows convention "A". Explicitly, these are: + +undirected forest + An undirected graph with no undirected cycles. + +undirected tree + A connected, undirected forest. + +directed forest + A directed graph with no undirected cycles. Equivalently, the underlying + graph structure (which ignores edge orientations) is an undirected forest. + In convention B, this is known as a polyforest. + +directed tree + A weakly connected, directed forest. Equivalently, the underlying graph + structure (which ignores edge orientations) is an undirected tree. In + convention B, this is known as a polytree. + +branching + A directed forest with each node having, at most, one parent. So the maximum + in-degree is equal to 1. In convention B, this is known as a forest. + +arborescence + A directed tree with each node having, at most, one parent. So the maximum + in-degree is equal to 1. In convention B, this is known as a tree. + +For trees and arborescences, the adjective "spanning" may be added to designate +that the graph, when considered as a forest/branching, consists of a single +tree/arborescence that includes all nodes in the graph. It is true, by +definition, that every tree/arborescence is spanning with respect to the nodes +that define the tree/arborescence and so, it might seem redundant to introduce +the notion of "spanning". However, the nodes may represent a subset of +nodes from a larger graph, and it is in this context that the term "spanning" +becomes a useful notion. + +""" + +import networkx as nx + +__all__ = ["is_arborescence", "is_branching", "is_forest", "is_tree"] + + +@nx.utils.not_implemented_for("undirected") +@nx._dispatchable +def is_arborescence(G): + """ + Returns True if `G` is an arborescence. + + An arborescence is a directed tree with maximum in-degree equal to 1. + + Parameters + ---------- + G : graph + The graph to test. + + Returns + ------- + b : bool + A boolean that is True if `G` is an arborescence. + + Examples + -------- + >>> G = nx.DiGraph([(0, 1), (0, 2), (2, 3), (3, 4)]) + >>> nx.is_arborescence(G) + True + >>> G.remove_edge(0, 1) + >>> G.add_edge(1, 2) # maximum in-degree is 2 + >>> nx.is_arborescence(G) + False + + Notes + ----- + In another convention, an arborescence is known as a *tree*. + + See Also + -------- + is_tree + + """ + return is_tree(G) and max(d for n, d in G.in_degree()) <= 1 + + +@nx.utils.not_implemented_for("undirected") +@nx._dispatchable +def is_branching(G): + """ + Returns True if `G` is a branching. + + A branching is a directed forest with maximum in-degree equal to 1. + + Parameters + ---------- + G : directed graph + The directed graph to test. + + Returns + ------- + b : bool + A boolean that is True if `G` is a branching. + + Examples + -------- + >>> G = nx.DiGraph([(0, 1), (1, 2), (2, 3), (3, 4)]) + >>> nx.is_branching(G) + True + >>> G.remove_edge(2, 3) + >>> G.add_edge(3, 1) # maximum in-degree is 2 + >>> nx.is_branching(G) + False + + Notes + ----- + In another convention, a branching is also known as a *forest*. + + See Also + -------- + is_forest + + """ + return is_forest(G) and max(d for n, d in G.in_degree()) <= 1 + + +@nx._dispatchable +def is_forest(G): + """ + Returns True if `G` is a forest. + + A forest is a graph with no undirected cycles. + + For directed graphs, `G` is a forest if the underlying graph is a forest. + The underlying graph is obtained by treating each directed edge as a single + undirected edge in a multigraph. + + Parameters + ---------- + G : graph + The graph to test. + + Returns + ------- + b : bool + A boolean that is True if `G` is a forest. + + Raises + ------ + NetworkXPointlessConcept + If `G` is empty. + + Examples + -------- + >>> G = nx.Graph() + >>> G.add_edges_from([(1, 2), (1, 3), (2, 4), (2, 5)]) + >>> nx.is_forest(G) + True + >>> G.add_edge(4, 1) + >>> nx.is_forest(G) + False + + Notes + ----- + In another convention, a directed forest is known as a *polyforest* and + then *forest* corresponds to a *branching*. + + See Also + -------- + is_branching + + """ + if len(G) == 0: + raise nx.exception.NetworkXPointlessConcept("G has no nodes.") + + if G.is_directed(): + components = (G.subgraph(c) for c in nx.weakly_connected_components(G)) + else: + components = (G.subgraph(c) for c in nx.connected_components(G)) + + return all(len(c) - 1 == c.number_of_edges() for c in components) + + +@nx._dispatchable +def is_tree(G): + """ + Returns True if `G` is a tree. + + A tree is a connected graph with no undirected cycles. + + For directed graphs, `G` is a tree if the underlying graph is a tree. The + underlying graph is obtained by treating each directed edge as a single + undirected edge in a multigraph. + + Parameters + ---------- + G : graph + The graph to test. + + Returns + ------- + b : bool + A boolean that is True if `G` is a tree. + + Raises + ------ + NetworkXPointlessConcept + If `G` is empty. + + Examples + -------- + >>> G = nx.Graph() + >>> G.add_edges_from([(1, 2), (1, 3), (2, 4), (2, 5)]) + >>> nx.is_tree(G) # n-1 edges + True + >>> G.add_edge(3, 4) + >>> nx.is_tree(G) # n edges + False + + Notes + ----- + In another convention, a directed tree is known as a *polytree* and then + *tree* corresponds to an *arborescence*. + + See Also + -------- + is_arborescence + + """ + if len(G) == 0: + raise nx.exception.NetworkXPointlessConcept("G has no nodes.") + + if G.is_directed(): + is_connected = nx.is_weakly_connected + else: + is_connected = nx.is_connected + + # A connected graph with no cycles has n-1 edges. + return len(G) - 1 == G.number_of_edges() and is_connected(G) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tree/tests/__init__.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/tree/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tree/tests/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/tree/tests/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..a8a1996 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/tree/tests/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tree/tests/__pycache__/test_branchings.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/tree/tests/__pycache__/test_branchings.cpython-312.pyc new file mode 100644 index 0000000..0c9ecdd Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/tree/tests/__pycache__/test_branchings.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tree/tests/__pycache__/test_coding.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/tree/tests/__pycache__/test_coding.cpython-312.pyc new file mode 100644 index 0000000..ca7c9bd Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/tree/tests/__pycache__/test_coding.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tree/tests/__pycache__/test_decomposition.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/tree/tests/__pycache__/test_decomposition.cpython-312.pyc new file mode 100644 index 0000000..fa02870 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/tree/tests/__pycache__/test_decomposition.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tree/tests/__pycache__/test_mst.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/tree/tests/__pycache__/test_mst.cpython-312.pyc new file mode 100644 index 0000000..93c1fa7 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/tree/tests/__pycache__/test_mst.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tree/tests/__pycache__/test_operations.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/tree/tests/__pycache__/test_operations.cpython-312.pyc new file mode 100644 index 0000000..2923ac5 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/tree/tests/__pycache__/test_operations.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tree/tests/__pycache__/test_recognition.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/algorithms/tree/tests/__pycache__/test_recognition.cpython-312.pyc new file mode 100644 index 0000000..dd9c80d Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/algorithms/tree/tests/__pycache__/test_recognition.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tree/tests/test_branchings.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/tree/tests/test_branchings.py new file mode 100644 index 0000000..9be976a --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/tree/tests/test_branchings.py @@ -0,0 +1,624 @@ +import math +from operator import itemgetter + +import pytest + +import networkx as nx +from networkx.algorithms.tree import branchings, recognition + +np = pytest.importorskip("numpy") + +# +# Explicitly discussed examples from Edmonds paper. +# + +# Used in Figures A-F. +# +# fmt: off +G_array = np.array([ + # 0 1 2 3 4 5 6 7 8 + [0, 0, 12, 0, 12, 0, 0, 0, 0], # 0 + [4, 0, 0, 0, 0, 13, 0, 0, 0], # 1 + [0, 17, 0, 21, 0, 12, 0, 0, 0], # 2 + [5, 0, 0, 0, 17, 0, 18, 0, 0], # 3 + [0, 0, 0, 0, 0, 0, 0, 12, 0], # 4 + [0, 0, 0, 0, 0, 0, 14, 0, 12], # 5 + [0, 0, 21, 0, 0, 0, 0, 0, 15], # 6 + [0, 0, 0, 19, 0, 0, 15, 0, 0], # 7 + [0, 0, 0, 0, 0, 0, 0, 18, 0], # 8 +], dtype=int) + +# Two copies of the graph from the original paper as disconnected components +G_big_array = np.zeros(np.array(G_array.shape) * 2, dtype=int) +G_big_array[:G_array.shape[0], :G_array.shape[1]] = G_array +G_big_array[G_array.shape[0]:, G_array.shape[1]:] = G_array + +# fmt: on + + +def G1(): + G = nx.from_numpy_array(G_array, create_using=nx.MultiDiGraph) + return G + + +def G2(): + # Now we shift all the weights by -10. + # Should not affect optimal arborescence, but does affect optimal branching. + Garr = G_array.copy() + Garr[np.nonzero(Garr)] -= 10 + G = nx.from_numpy_array(Garr, create_using=nx.MultiDiGraph) + return G + + +# An optimal branching for G1 that is also a spanning arborescence. So it is +# also an optimal spanning arborescence. +# +optimal_arborescence_1 = [ + (0, 2, 12), + (2, 1, 17), + (2, 3, 21), + (1, 5, 13), + (3, 4, 17), + (3, 6, 18), + (6, 8, 15), + (8, 7, 18), +] + +# For G2, the optimal branching of G1 (with shifted weights) is no longer +# an optimal branching, but it is still an optimal spanning arborescence +# (just with shifted weights). An optimal branching for G2 is similar to what +# appears in figure G (this is greedy_subopt_branching_1a below), but with the +# edge (3, 0, 5), which is now (3, 0, -5), removed. Thus, the optimal branching +# is not a spanning arborescence. The code finds optimal_branching_2a. +# An alternative and equivalent branching is optimal_branching_2b. We would +# need to modify the code to iterate through all equivalent optimal branchings. +# +# These are maximal branchings or arborescences. +optimal_branching_2a = [ + (5, 6, 4), + (6, 2, 11), + (6, 8, 5), + (8, 7, 8), + (2, 1, 7), + (2, 3, 11), + (3, 4, 7), +] +optimal_branching_2b = [ + (8, 7, 8), + (7, 3, 9), + (3, 4, 7), + (3, 6, 8), + (6, 2, 11), + (2, 1, 7), + (1, 5, 3), +] +optimal_arborescence_2 = [ + (0, 2, 2), + (2, 1, 7), + (2, 3, 11), + (1, 5, 3), + (3, 4, 7), + (3, 6, 8), + (6, 8, 5), + (8, 7, 8), +] + +# Two suboptimal maximal branchings on G1 obtained from a greedy algorithm. +# 1a matches what is shown in Figure G in Edmonds's paper. +greedy_subopt_branching_1a = [ + (5, 6, 14), + (6, 2, 21), + (6, 8, 15), + (8, 7, 18), + (2, 1, 17), + (2, 3, 21), + (3, 0, 5), + (3, 4, 17), +] +greedy_subopt_branching_1b = [ + (8, 7, 18), + (7, 6, 15), + (6, 2, 21), + (2, 1, 17), + (2, 3, 21), + (1, 5, 13), + (3, 0, 5), + (3, 4, 17), +] + + +def build_branching(edges, double=False): + G = nx.DiGraph() + for u, v, weight in edges: + G.add_edge(u, v, weight=weight) + if double: + G.add_edge(u + 9, v + 9, weight=weight) + return G + + +def sorted_edges(G, attr="weight", default=1): + edges = [(u, v, data.get(attr, default)) for (u, v, data) in G.edges(data=True)] + edges = sorted(edges, key=lambda x: (x[2], x[1], x[0])) + return edges + + +def assert_equal_branchings(G1, G2, attr="weight", default=1): + edges1 = list(G1.edges(data=True)) + edges2 = list(G2.edges(data=True)) + assert len(edges1) == len(edges2) + + # Grab the weights only. + e1 = sorted_edges(G1, attr, default) + e2 = sorted_edges(G2, attr, default) + + for a, b in zip(e1, e2): + assert a[:2] == b[:2] + np.testing.assert_almost_equal(a[2], b[2]) + + +################ + + +def test_optimal_branching1(): + G = build_branching(optimal_arborescence_1) + assert recognition.is_arborescence(G), True + assert branchings.branching_weight(G) == 131 + + +def test_optimal_branching2a(): + G = build_branching(optimal_branching_2a) + assert recognition.is_arborescence(G), True + assert branchings.branching_weight(G) == 53 + + +def test_optimal_branching2b(): + G = build_branching(optimal_branching_2b) + assert recognition.is_arborescence(G), True + assert branchings.branching_weight(G) == 53 + + +def test_optimal_arborescence2(): + G = build_branching(optimal_arborescence_2) + assert recognition.is_arborescence(G), True + assert branchings.branching_weight(G) == 51 + + +def test_greedy_suboptimal_branching1a(): + G = build_branching(greedy_subopt_branching_1a) + assert recognition.is_arborescence(G), True + assert branchings.branching_weight(G) == 128 + + +def test_greedy_suboptimal_branching1b(): + G = build_branching(greedy_subopt_branching_1b) + assert recognition.is_arborescence(G), True + assert branchings.branching_weight(G) == 127 + + +def test_greedy_max1(): + # Standard test. + # + G = G1() + B = branchings.greedy_branching(G) + # There are only two possible greedy branchings. The sorting is such + # that it should equal the second suboptimal branching: 1b. + B_ = build_branching(greedy_subopt_branching_1b) + assert_equal_branchings(B, B_) + + +def test_greedy_branching_kwarg_kind(): + G = G1() + with pytest.raises(nx.NetworkXException, match="Unknown value for `kind`."): + B = branchings.greedy_branching(G, kind="lol") + + +def test_greedy_branching_for_unsortable_nodes(): + G = nx.DiGraph() + G.add_weighted_edges_from([((2, 3), 5, 1), (3, "a", 1), (2, 4, 5)]) + edges = [(u, v, data.get("weight", 1)) for (u, v, data) in G.edges(data=True)] + with pytest.raises(TypeError): + edges.sort(key=itemgetter(2, 0, 1), reverse=True) + B = branchings.greedy_branching(G, kind="max").edges(data=True) + assert list(B) == [ + ((2, 3), 5, {"weight": 1}), + (3, "a", {"weight": 1}), + (2, 4, {"weight": 5}), + ] + + +def test_greedy_max2(): + # Different default weight. + # + G = G1() + del G[1][0][0]["weight"] + B = branchings.greedy_branching(G, default=6) + # Chosen so that edge (3,0,5) is not selected and (1,0,6) is instead. + + edges = [ + (1, 0, 6), + (1, 5, 13), + (7, 6, 15), + (2, 1, 17), + (3, 4, 17), + (8, 7, 18), + (2, 3, 21), + (6, 2, 21), + ] + B_ = build_branching(edges) + assert_equal_branchings(B, B_) + + +def test_greedy_max3(): + # All equal weights. + # + G = G1() + B = branchings.greedy_branching(G, attr=None) + + # This is mostly arbitrary...the output was generated by running the algo. + edges = [ + (2, 1, 1), + (3, 0, 1), + (3, 4, 1), + (5, 8, 1), + (6, 2, 1), + (7, 3, 1), + (7, 6, 1), + (8, 7, 1), + ] + B_ = build_branching(edges) + assert_equal_branchings(B, B_, default=1) + + +def test_greedy_min(): + G = G1() + B = branchings.greedy_branching(G, kind="min") + + edges = [ + (1, 0, 4), + (0, 2, 12), + (0, 4, 12), + (2, 5, 12), + (4, 7, 12), + (5, 8, 12), + (5, 6, 14), + (7, 3, 19), + ] + B_ = build_branching(edges) + assert_equal_branchings(B, B_) + + +def test_edmonds1_maxbranch(): + G = G1() + x = branchings.maximum_branching(G) + x_ = build_branching(optimal_arborescence_1) + assert_equal_branchings(x, x_) + + +def test_edmonds1_maxarbor(): + G = G1() + x = branchings.maximum_spanning_arborescence(G) + x_ = build_branching(optimal_arborescence_1) + assert_equal_branchings(x, x_) + + +def test_edmonds1_minimal_branching(): + # graph will have something like a minimum arborescence but no spanning one + G = nx.from_numpy_array(G_big_array, create_using=nx.DiGraph) + B = branchings.minimal_branching(G) + edges = [ + (3, 0, 5), + (0, 2, 12), + (0, 4, 12), + (2, 5, 12), + (4, 7, 12), + (5, 8, 12), + (5, 6, 14), + (2, 1, 17), + ] + B_ = build_branching(edges, double=True) + assert_equal_branchings(B, B_) + + +def test_edmonds2_maxbranch(): + G = G2() + x = branchings.maximum_branching(G) + x_ = build_branching(optimal_branching_2a) + assert_equal_branchings(x, x_) + + +def test_edmonds2_maxarbor(): + G = G2() + x = branchings.maximum_spanning_arborescence(G) + x_ = build_branching(optimal_arborescence_2) + assert_equal_branchings(x, x_) + + +def test_edmonds2_minarbor(): + G = G1() + x = branchings.minimum_spanning_arborescence(G) + # This was obtained from algorithm. Need to verify it independently. + # Branch weight is: 96 + edges = [ + (3, 0, 5), + (0, 2, 12), + (0, 4, 12), + (2, 5, 12), + (4, 7, 12), + (5, 8, 12), + (5, 6, 14), + (2, 1, 17), + ] + x_ = build_branching(edges) + assert_equal_branchings(x, x_) + + +def test_edmonds3_minbranch1(): + G = G1() + x = branchings.minimum_branching(G) + edges = [] + x_ = build_branching(edges) + assert_equal_branchings(x, x_) + + +def test_edmonds3_minbranch2(): + G = G1() + G.add_edge(8, 9, weight=-10) + x = branchings.minimum_branching(G) + edges = [(8, 9, -10)] + x_ = build_branching(edges) + assert_equal_branchings(x, x_) + + +# Need more tests + + +def test_mst(): + # Make sure we get the same results for undirected graphs. + # Example from: https://en.wikipedia.org/wiki/Kruskal's_algorithm + G = nx.Graph() + edgelist = [ + (0, 3, [("weight", 5)]), + (0, 1, [("weight", 7)]), + (1, 3, [("weight", 9)]), + (1, 2, [("weight", 8)]), + (1, 4, [("weight", 7)]), + (3, 4, [("weight", 15)]), + (3, 5, [("weight", 6)]), + (2, 4, [("weight", 5)]), + (4, 5, [("weight", 8)]), + (4, 6, [("weight", 9)]), + (5, 6, [("weight", 11)]), + ] + G.add_edges_from(edgelist) + G = G.to_directed() + x = branchings.minimum_spanning_arborescence(G) + + edges = [ + ({0, 1}, 7), + ({0, 3}, 5), + ({3, 5}, 6), + ({1, 4}, 7), + ({4, 2}, 5), + ({4, 6}, 9), + ] + + assert x.number_of_edges() == len(edges) + for u, v, d in x.edges(data=True): + assert ({u, v}, d["weight"]) in edges + + +def test_mixed_nodetypes(): + # Smoke test to make sure no TypeError is raised for mixed node types. + G = nx.Graph() + edgelist = [(0, 3, [("weight", 5)]), (0, "1", [("weight", 5)])] + G.add_edges_from(edgelist) + G = G.to_directed() + x = branchings.minimum_spanning_arborescence(G) + + +def test_edmonds1_minbranch(): + # Using -G_array and min should give the same as optimal_arborescence_1, + # but with all edges negative. + edges = [(u, v, -w) for (u, v, w) in optimal_arborescence_1] + + G = nx.from_numpy_array(-G_array, create_using=nx.DiGraph) + + # Quickly make sure max branching is empty. + x = branchings.maximum_branching(G) + x_ = build_branching([]) + assert_equal_branchings(x, x_) + + # Now test the min branching. + x = branchings.minimum_branching(G) + x_ = build_branching(edges) + assert_equal_branchings(x, x_) + + +def test_edge_attribute_preservation_normal_graph(): + # Test that edge attributes are preserved when finding an optimum graph + # using the Edmonds class for normal graphs. + G = nx.Graph() + + edgelist = [ + (0, 1, [("weight", 5), ("otherattr", 1), ("otherattr2", 3)]), + (0, 2, [("weight", 5), ("otherattr", 2), ("otherattr2", 2)]), + (1, 2, [("weight", 6), ("otherattr", 3), ("otherattr2", 1)]), + ] + G.add_edges_from(edgelist) + + B = branchings.maximum_branching(G, preserve_attrs=True) + + assert B[0][1]["otherattr"] == 1 + assert B[0][1]["otherattr2"] == 3 + + +def test_edge_attribute_preservation_multigraph(): + # Test that edge attributes are preserved when finding an optimum graph + # using the Edmonds class for multigraphs. + G = nx.MultiGraph() + + edgelist = [ + (0, 1, [("weight", 5), ("otherattr", 1), ("otherattr2", 3)]), + (0, 2, [("weight", 5), ("otherattr", 2), ("otherattr2", 2)]), + (1, 2, [("weight", 6), ("otherattr", 3), ("otherattr2", 1)]), + ] + G.add_edges_from(edgelist * 2) # Make sure we have duplicate edge paths + + B = branchings.maximum_branching(G, preserve_attrs=True) + + assert B[0][1][0]["otherattr"] == 1 + assert B[0][1][0]["otherattr2"] == 3 + + +def test_edge_attribute_discard(): + # Test that edge attributes are discarded if we do not specify to keep them + G = nx.Graph() + + edgelist = [ + (0, 1, [("weight", 5), ("otherattr", 1), ("otherattr2", 3)]), + (0, 2, [("weight", 5), ("otherattr", 2), ("otherattr2", 2)]), + (1, 2, [("weight", 6), ("otherattr", 3), ("otherattr2", 1)]), + ] + G.add_edges_from(edgelist) + + B = branchings.maximum_branching(G, preserve_attrs=False) + + edge_dict = B[0][1] + with pytest.raises(KeyError): + _ = edge_dict["otherattr"] + + +def test_partition_spanning_arborescence(): + """ + Test that we can generate minimum spanning arborescences which respect the + given partition. + """ + G = nx.from_numpy_array(G_array, create_using=nx.DiGraph) + G[3][0]["partition"] = nx.EdgePartition.EXCLUDED + G[2][3]["partition"] = nx.EdgePartition.INCLUDED + G[7][3]["partition"] = nx.EdgePartition.EXCLUDED + G[0][2]["partition"] = nx.EdgePartition.EXCLUDED + G[6][2]["partition"] = nx.EdgePartition.INCLUDED + + actual_edges = [ + (0, 4, 12), + (1, 0, 4), + (1, 5, 13), + (2, 3, 21), + (4, 7, 12), + (5, 6, 14), + (5, 8, 12), + (6, 2, 21), + ] + + B = branchings.minimum_spanning_arborescence(G, partition="partition") + assert_equal_branchings(build_branching(actual_edges), B) + + +def test_arborescence_iterator_min(): + """ + Tests the arborescence iterator. + + A brute force method found 680 arborescences in this graph. + This test will not verify all of them individually, but will check two + things + + * The iterator returns 680 arborescences + * The weight of the arborescences is non-strictly increasing + + for more information please visit + https://mjschwenne.github.io/2021/06/10/implementing-the-iterators.html + """ + G = nx.from_numpy_array(G_array, create_using=nx.DiGraph) + + arborescence_count = 0 + arborescence_weight = -math.inf + for B in branchings.ArborescenceIterator(G): + arborescence_count += 1 + new_arborescence_weight = B.size(weight="weight") + assert new_arborescence_weight >= arborescence_weight + arborescence_weight = new_arborescence_weight + + assert arborescence_count == 680 + + +def test_arborescence_iterator_max(): + """ + Tests the arborescence iterator. + + A brute force method found 680 arborescences in this graph. + This test will not verify all of them individually, but will check two + things + + * The iterator returns 680 arborescences + * The weight of the arborescences is non-strictly decreasing + + for more information please visit + https://mjschwenne.github.io/2021/06/10/implementing-the-iterators.html + """ + G = nx.from_numpy_array(G_array, create_using=nx.DiGraph) + + arborescence_count = 0 + arborescence_weight = math.inf + for B in branchings.ArborescenceIterator(G, minimum=False): + arborescence_count += 1 + new_arborescence_weight = B.size(weight="weight") + assert new_arborescence_weight <= arborescence_weight + arborescence_weight = new_arborescence_weight + + assert arborescence_count == 680 + + +def test_arborescence_iterator_initial_partition(): + """ + Tests the arborescence iterator with three included edges and three excluded + in the initial partition. + + A brute force method similar to the one used in the above tests found that + there are 16 arborescences which contain the included edges and not the + excluded edges. + """ + G = nx.from_numpy_array(G_array, create_using=nx.DiGraph) + included_edges = [(1, 0), (5, 6), (8, 7)] + excluded_edges = [(0, 2), (3, 6), (1, 5)] + + arborescence_count = 0 + arborescence_weight = -math.inf + for B in branchings.ArborescenceIterator( + G, init_partition=(included_edges, excluded_edges) + ): + arborescence_count += 1 + new_arborescence_weight = B.size(weight="weight") + assert new_arborescence_weight >= arborescence_weight + arborescence_weight = new_arborescence_weight + for e in included_edges: + assert e in B.edges + for e in excluded_edges: + assert e not in B.edges + assert arborescence_count == 16 + + +def test_branchings_with_default_weights(): + """ + Tests that various branching algorithms work on graphs without weights. + For more information, see issue #7279. + """ + graph = nx.erdos_renyi_graph(10, p=0.2, directed=True, seed=123) + + assert all("weight" not in d for (u, v, d) in graph.edges(data=True)), ( + "test is for graphs without a weight attribute" + ) + + # Calling these functions will modify graph inplace to add weights + # copy the graph to avoid this. + nx.minimum_spanning_arborescence(graph.copy()) + nx.maximum_spanning_arborescence(graph.copy()) + nx.minimum_branching(graph.copy()) + nx.maximum_branching(graph.copy()) + nx.algorithms.tree.minimal_branching(graph.copy()) + nx.algorithms.tree.branching_weight(graph.copy()) + nx.algorithms.tree.greedy_branching(graph.copy()) + + assert all("weight" not in d for (u, v, d) in graph.edges(data=True)), ( + "The above calls should not modify the initial graph in-place" + ) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tree/tests/test_coding.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/tree/tests/test_coding.py new file mode 100644 index 0000000..26bd408 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/tree/tests/test_coding.py @@ -0,0 +1,114 @@ +"""Unit tests for the :mod:`~networkx.algorithms.tree.coding` module.""" + +from itertools import product + +import pytest + +import networkx as nx +from networkx.utils import edges_equal, nodes_equal + + +class TestPruferSequence: + """Unit tests for the Prüfer sequence encoding and decoding + functions. + + """ + + def test_nontree(self): + with pytest.raises(nx.NotATree): + G = nx.cycle_graph(3) + nx.to_prufer_sequence(G) + + def test_null_graph(self): + with pytest.raises(nx.NetworkXPointlessConcept): + nx.to_prufer_sequence(nx.null_graph()) + + def test_trivial_graph(self): + with pytest.raises(nx.NetworkXPointlessConcept): + nx.to_prufer_sequence(nx.trivial_graph()) + + def test_bad_integer_labels(self): + with pytest.raises(KeyError): + T = nx.Graph(nx.utils.pairwise("abc")) + nx.to_prufer_sequence(T) + + def test_encoding(self): + """Tests for encoding a tree as a Prüfer sequence using the + iterative strategy. + + """ + # Example from Wikipedia. + tree = nx.Graph([(0, 3), (1, 3), (2, 3), (3, 4), (4, 5)]) + sequence = nx.to_prufer_sequence(tree) + assert sequence == [3, 3, 3, 4] + + def test_decoding(self): + """Tests for decoding a tree from a Prüfer sequence.""" + # Example from Wikipedia. + sequence = [3, 3, 3, 4] + tree = nx.from_prufer_sequence(sequence) + assert nodes_equal(list(tree), list(range(6))) + edges = [(0, 3), (1, 3), (2, 3), (3, 4), (4, 5)] + assert edges_equal(list(tree.edges()), edges) + + def test_decoding2(self): + # Example from "An Optimal Algorithm for Prufer Codes". + sequence = [2, 4, 0, 1, 3, 3] + tree = nx.from_prufer_sequence(sequence) + assert nodes_equal(list(tree), list(range(8))) + edges = [(0, 1), (0, 4), (1, 3), (2, 4), (2, 5), (3, 6), (3, 7)] + assert edges_equal(list(tree.edges()), edges) + + def test_inverse(self): + """Tests that the encoding and decoding functions are inverses.""" + for T in nx.nonisomorphic_trees(4): + T2 = nx.from_prufer_sequence(nx.to_prufer_sequence(T)) + assert nodes_equal(list(T), list(T2)) + assert edges_equal(list(T.edges()), list(T2.edges())) + + for seq in product(range(4), repeat=2): + seq2 = nx.to_prufer_sequence(nx.from_prufer_sequence(seq)) + assert list(seq) == seq2 + + +class TestNestedTuple: + """Unit tests for the nested tuple encoding and decoding functions.""" + + def test_nontree(self): + with pytest.raises(nx.NotATree): + G = nx.cycle_graph(3) + nx.to_nested_tuple(G, 0) + + def test_unknown_root(self): + with pytest.raises(nx.NodeNotFound): + G = nx.path_graph(2) + nx.to_nested_tuple(G, "bogus") + + def test_encoding(self): + T = nx.full_rary_tree(2, 2**3 - 1) + expected = (((), ()), ((), ())) + actual = nx.to_nested_tuple(T, 0) + assert nodes_equal(expected, actual) + + def test_canonical_form(self): + T = nx.Graph() + T.add_edges_from([(0, 1), (0, 2), (0, 3)]) + T.add_edges_from([(1, 4), (1, 5)]) + T.add_edges_from([(3, 6), (3, 7)]) + root = 0 + actual = nx.to_nested_tuple(T, root, canonical_form=True) + expected = ((), ((), ()), ((), ())) + assert actual == expected + + def test_decoding(self): + balanced = (((), ()), ((), ())) + expected = nx.full_rary_tree(2, 2**3 - 1) + actual = nx.from_nested_tuple(balanced) + assert nx.is_isomorphic(expected, actual) + + def test_sensible_relabeling(self): + balanced = (((), ()), ((), ())) + T = nx.from_nested_tuple(balanced, sensible_relabeling=True) + edges = [(0, 1), (0, 2), (1, 3), (1, 4), (2, 5), (2, 6)] + assert nodes_equal(list(T), list(range(2**3 - 1))) + assert edges_equal(list(T.edges()), edges) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tree/tests/test_decomposition.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/tree/tests/test_decomposition.py new file mode 100644 index 0000000..8c37605 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/tree/tests/test_decomposition.py @@ -0,0 +1,79 @@ +import networkx as nx +from networkx.algorithms.tree.decomposition import junction_tree + + +def test_junction_tree_directed_confounders(): + B = nx.DiGraph() + B.add_edges_from([("A", "C"), ("B", "C"), ("C", "D"), ("C", "E")]) + + G = junction_tree(B) + J = nx.Graph() + J.add_edges_from( + [ + (("C", "E"), ("C",)), + (("C",), ("A", "B", "C")), + (("A", "B", "C"), ("C",)), + (("C",), ("C", "D")), + ] + ) + + assert nx.is_isomorphic(G, J) + + +def test_junction_tree_directed_unconnected_nodes(): + B = nx.DiGraph() + B.add_nodes_from([("A", "B", "C", "D")]) + G = junction_tree(B) + + J = nx.Graph() + J.add_nodes_from([("A", "B", "C", "D")]) + + assert nx.is_isomorphic(G, J) + + +def test_junction_tree_directed_cascade(): + B = nx.DiGraph() + B.add_edges_from([("A", "B"), ("B", "C"), ("C", "D")]) + G = junction_tree(B) + + J = nx.Graph() + J.add_edges_from( + [ + (("A", "B"), ("B",)), + (("B",), ("B", "C")), + (("B", "C"), ("C",)), + (("C",), ("C", "D")), + ] + ) + assert nx.is_isomorphic(G, J) + + +def test_junction_tree_directed_unconnected_edges(): + B = nx.DiGraph() + B.add_edges_from([("A", "B"), ("C", "D"), ("E", "F")]) + G = junction_tree(B) + + J = nx.Graph() + J.add_nodes_from([("A", "B"), ("C", "D"), ("E", "F")]) + + assert nx.is_isomorphic(G, J) + + +def test_junction_tree_undirected(): + B = nx.Graph() + B.add_edges_from([("A", "C"), ("A", "D"), ("B", "C"), ("C", "E")]) + G = junction_tree(B) + + J = nx.Graph() + J.add_edges_from( + [ + (("A", "D"), ("A",)), + (("A",), ("A", "C")), + (("A", "C"), ("C",)), + (("C",), ("B", "C")), + (("B", "C"), ("C",)), + (("C",), ("C", "E")), + ] + ) + + assert nx.is_isomorphic(G, J) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tree/tests/test_mst.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/tree/tests/test_mst.py new file mode 100644 index 0000000..f8945a7 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/tree/tests/test_mst.py @@ -0,0 +1,918 @@ +"""Unit tests for the :mod:`networkx.algorithms.tree.mst` module.""" + +import pytest + +import networkx as nx +from networkx.utils import edges_equal, nodes_equal + + +def test_unknown_algorithm(): + with pytest.raises(ValueError): + nx.minimum_spanning_tree(nx.Graph(), algorithm="random") + with pytest.raises( + ValueError, match="random is not a valid choice for an algorithm." + ): + nx.maximum_spanning_edges(nx.Graph(), algorithm="random") + + +class MinimumSpanningTreeTestBase: + """Base class for test classes for minimum spanning tree algorithms. + This class contains some common tests that will be inherited by + subclasses. Each subclass must have a class attribute + :data:`algorithm` that is a string representing the algorithm to + run, as described under the ``algorithm`` keyword argument for the + :func:`networkx.minimum_spanning_edges` function. Subclasses can + then implement any algorithm-specific tests. + """ + + def setup_method(self, method): + """Creates an example graph and stores the expected minimum and + maximum spanning tree edges. + """ + # This stores the class attribute `algorithm` in an instance attribute. + self.algo = self.algorithm + # This example graph comes from Wikipedia: + # https://en.wikipedia.org/wiki/Kruskal's_algorithm + edges = [ + (0, 1, 7), + (0, 3, 5), + (1, 2, 8), + (1, 3, 9), + (1, 4, 7), + (2, 4, 5), + (3, 4, 15), + (3, 5, 6), + (4, 5, 8), + (4, 6, 9), + (5, 6, 11), + ] + self.G = nx.Graph() + self.G.add_weighted_edges_from(edges) + self.minimum_spanning_edgelist = [ + (0, 1, {"weight": 7}), + (0, 3, {"weight": 5}), + (1, 4, {"weight": 7}), + (2, 4, {"weight": 5}), + (3, 5, {"weight": 6}), + (4, 6, {"weight": 9}), + ] + self.maximum_spanning_edgelist = [ + (0, 1, {"weight": 7}), + (1, 2, {"weight": 8}), + (1, 3, {"weight": 9}), + (3, 4, {"weight": 15}), + (4, 6, {"weight": 9}), + (5, 6, {"weight": 11}), + ] + + def test_minimum_edges(self): + edges = nx.minimum_spanning_edges(self.G, algorithm=self.algo) + # Edges from the spanning edges functions don't come in sorted + # orientation, so we need to sort each edge individually. + actual = sorted((min(u, v), max(u, v), d) for u, v, d in edges) + assert edges_equal(actual, self.minimum_spanning_edgelist) + + def test_maximum_edges(self): + edges = nx.maximum_spanning_edges(self.G, algorithm=self.algo) + # Edges from the spanning edges functions don't come in sorted + # orientation, so we need to sort each edge individually. + actual = sorted((min(u, v), max(u, v), d) for u, v, d in edges) + assert edges_equal(actual, self.maximum_spanning_edgelist) + + def test_without_data(self): + edges = nx.minimum_spanning_edges(self.G, algorithm=self.algo, data=False) + # Edges from the spanning edges functions don't come in sorted + # orientation, so we need to sort each edge individually. + actual = sorted((min(u, v), max(u, v)) for u, v in edges) + expected = [(u, v) for u, v, d in self.minimum_spanning_edgelist] + assert edges_equal(actual, expected) + + def test_nan_weights(self): + # Edge weights NaN never appear in the spanning tree. see #2164 + G = self.G + G.add_edge(0, 12, weight=float("nan")) + edges = nx.minimum_spanning_edges( + G, algorithm=self.algo, data=False, ignore_nan=True + ) + actual = sorted((min(u, v), max(u, v)) for u, v in edges) + expected = [(u, v) for u, v, d in self.minimum_spanning_edgelist] + assert edges_equal(actual, expected) + # Now test for raising exception + edges = nx.minimum_spanning_edges( + G, algorithm=self.algo, data=False, ignore_nan=False + ) + with pytest.raises(ValueError): + list(edges) + # test default for ignore_nan as False + edges = nx.minimum_spanning_edges(G, algorithm=self.algo, data=False) + with pytest.raises(ValueError): + list(edges) + + def test_nan_weights_MultiGraph(self): + G = nx.MultiGraph() + G.add_edge(0, 12, weight=float("nan")) + edges = nx.minimum_spanning_edges( + G, algorithm="prim", data=False, ignore_nan=False + ) + with pytest.raises(ValueError): + list(edges) + # test default for ignore_nan as False + edges = nx.minimum_spanning_edges(G, algorithm="prim", data=False) + with pytest.raises(ValueError): + list(edges) + + def test_nan_weights_order(self): + # now try again with a nan edge at the beginning of G.nodes + edges = [ + (0, 1, 7), + (0, 3, 5), + (1, 2, 8), + (1, 3, 9), + (1, 4, 7), + (2, 4, 5), + (3, 4, 15), + (3, 5, 6), + (4, 5, 8), + (4, 6, 9), + (5, 6, 11), + ] + G = nx.Graph() + G.add_weighted_edges_from([(u + 1, v + 1, wt) for u, v, wt in edges]) + G.add_edge(0, 7, weight=float("nan")) + edges = nx.minimum_spanning_edges( + G, algorithm=self.algo, data=False, ignore_nan=True + ) + actual = sorted((min(u, v), max(u, v)) for u, v in edges) + shift = [(u + 1, v + 1) for u, v, d in self.minimum_spanning_edgelist] + assert edges_equal(actual, shift) + + def test_isolated_node(self): + # now try again with an isolated node + edges = [ + (0, 1, 7), + (0, 3, 5), + (1, 2, 8), + (1, 3, 9), + (1, 4, 7), + (2, 4, 5), + (3, 4, 15), + (3, 5, 6), + (4, 5, 8), + (4, 6, 9), + (5, 6, 11), + ] + G = nx.Graph() + G.add_weighted_edges_from([(u + 1, v + 1, wt) for u, v, wt in edges]) + G.add_node(0) + edges = nx.minimum_spanning_edges( + G, algorithm=self.algo, data=False, ignore_nan=True + ) + actual = sorted((min(u, v), max(u, v)) for u, v in edges) + shift = [(u + 1, v + 1) for u, v, d in self.minimum_spanning_edgelist] + assert edges_equal(actual, shift) + + def test_minimum_tree(self): + T = nx.minimum_spanning_tree(self.G, algorithm=self.algo) + actual = sorted(T.edges(data=True)) + assert edges_equal(actual, self.minimum_spanning_edgelist) + + def test_maximum_tree(self): + T = nx.maximum_spanning_tree(self.G, algorithm=self.algo) + actual = sorted(T.edges(data=True)) + assert edges_equal(actual, self.maximum_spanning_edgelist) + + def test_disconnected(self): + G = nx.Graph([(0, 1, {"weight": 1}), (2, 3, {"weight": 2})]) + T = nx.minimum_spanning_tree(G, algorithm=self.algo) + assert nodes_equal(list(T), list(range(4))) + assert edges_equal(list(T.edges()), [(0, 1), (2, 3)]) + + def test_empty_graph(self): + G = nx.empty_graph(3) + T = nx.minimum_spanning_tree(G, algorithm=self.algo) + assert nodes_equal(sorted(T), list(range(3))) + assert T.number_of_edges() == 0 + + def test_attributes(self): + G = nx.Graph() + G.add_edge(1, 2, weight=1, color="red", distance=7) + G.add_edge(2, 3, weight=1, color="green", distance=2) + G.add_edge(1, 3, weight=10, color="blue", distance=1) + G.graph["foo"] = "bar" + T = nx.minimum_spanning_tree(G, algorithm=self.algo) + assert T.graph == G.graph + assert nodes_equal(T, G) + for u, v in T.edges(): + assert T.adj[u][v] == G.adj[u][v] + + def test_weight_attribute(self): + G = nx.Graph() + G.add_edge(0, 1, weight=1, distance=7) + G.add_edge(0, 2, weight=30, distance=1) + G.add_edge(1, 2, weight=1, distance=1) + G.add_node(3) + T = nx.minimum_spanning_tree(G, algorithm=self.algo, weight="distance") + assert nodes_equal(sorted(T), list(range(4))) + assert edges_equal(sorted(T.edges()), [(0, 2), (1, 2)]) + T = nx.maximum_spanning_tree(G, algorithm=self.algo, weight="distance") + assert nodes_equal(sorted(T), list(range(4))) + assert edges_equal(sorted(T.edges()), [(0, 1), (0, 2)]) + + +class TestBoruvka(MinimumSpanningTreeTestBase): + """Unit tests for computing a minimum (or maximum) spanning tree + using Borůvka's algorithm. + """ + + algorithm = "boruvka" + + def test_unicode_name(self): + """Tests that using a Unicode string can correctly indicate + Borůvka's algorithm. + """ + edges = nx.minimum_spanning_edges(self.G, algorithm="borůvka") + # Edges from the spanning edges functions don't come in sorted + # orientation, so we need to sort each edge individually. + actual = sorted((min(u, v), max(u, v), d) for u, v, d in edges) + assert edges_equal(actual, self.minimum_spanning_edgelist) + + +class MultigraphMSTTestBase(MinimumSpanningTreeTestBase): + # Abstract class + + def test_multigraph_keys_min(self): + """Tests that the minimum spanning edges of a multigraph + preserves edge keys. + """ + G = nx.MultiGraph() + G.add_edge(0, 1, key="a", weight=2) + G.add_edge(0, 1, key="b", weight=1) + min_edges = nx.minimum_spanning_edges + mst_edges = min_edges(G, algorithm=self.algo, data=False) + assert edges_equal([(0, 1, "b")], list(mst_edges)) + + def test_multigraph_keys_max(self): + """Tests that the maximum spanning edges of a multigraph + preserves edge keys. + """ + G = nx.MultiGraph() + G.add_edge(0, 1, key="a", weight=2) + G.add_edge(0, 1, key="b", weight=1) + max_edges = nx.maximum_spanning_edges + mst_edges = max_edges(G, algorithm=self.algo, data=False) + assert edges_equal([(0, 1, "a")], list(mst_edges)) + + +class TestKruskal(MultigraphMSTTestBase): + """Unit tests for computing a minimum (or maximum) spanning tree + using Kruskal's algorithm. + """ + + algorithm = "kruskal" + + def test_key_data_bool(self): + """Tests that the keys and data values are included in + MST edges based on whether keys and data parameters are + true or false""" + G = nx.MultiGraph() + G.add_edge(1, 2, key=1, weight=2) + G.add_edge(1, 2, key=2, weight=3) + G.add_edge(3, 2, key=1, weight=2) + G.add_edge(3, 1, key=1, weight=4) + + # keys are included and data is not included + mst_edges = nx.minimum_spanning_edges( + G, algorithm=self.algo, keys=True, data=False + ) + assert edges_equal([(1, 2, 1), (2, 3, 1)], list(mst_edges)) + + # keys are not included and data is included + mst_edges = nx.minimum_spanning_edges( + G, algorithm=self.algo, keys=False, data=True + ) + assert edges_equal( + [(1, 2, {"weight": 2}), (2, 3, {"weight": 2})], list(mst_edges) + ) + + # both keys and data are not included + mst_edges = nx.minimum_spanning_edges( + G, algorithm=self.algo, keys=False, data=False + ) + assert edges_equal([(1, 2), (2, 3)], list(mst_edges)) + + # both keys and data are included + mst_edges = nx.minimum_spanning_edges( + G, algorithm=self.algo, keys=True, data=True + ) + assert edges_equal( + [(1, 2, 1, {"weight": 2}), (2, 3, 1, {"weight": 2})], list(mst_edges) + ) + + +class TestPrim(MultigraphMSTTestBase): + """Unit tests for computing a minimum (or maximum) spanning tree + using Prim's algorithm. + """ + + algorithm = "prim" + + def test_prim_mst_edges_simple_graph(self): + H = nx.Graph() + H.add_edge(1, 2, key=2, weight=3) + H.add_edge(3, 2, key=1, weight=2) + H.add_edge(3, 1, key=1, weight=4) + + mst_edges = nx.minimum_spanning_edges(H, algorithm=self.algo, ignore_nan=True) + assert edges_equal( + [(1, 2, {"key": 2, "weight": 3}), (2, 3, {"key": 1, "weight": 2})], + list(mst_edges), + ) + + def test_ignore_nan(self): + """Tests that the edges with NaN weights are ignored or + raise an Error based on ignore_nan is true or false""" + H = nx.MultiGraph() + H.add_edge(1, 2, key=1, weight=float("nan")) + H.add_edge(1, 2, key=2, weight=3) + H.add_edge(3, 2, key=1, weight=2) + H.add_edge(3, 1, key=1, weight=4) + + # NaN weight edges are ignored when ignore_nan=True + mst_edges = nx.minimum_spanning_edges(H, algorithm=self.algo, ignore_nan=True) + assert edges_equal( + [(1, 2, 2, {"weight": 3}), (2, 3, 1, {"weight": 2})], list(mst_edges) + ) + + # NaN weight edges raise Error when ignore_nan=False + with pytest.raises(ValueError): + list(nx.minimum_spanning_edges(H, algorithm=self.algo, ignore_nan=False)) + + def test_multigraph_keys_tree(self): + G = nx.MultiGraph() + G.add_edge(0, 1, key="a", weight=2) + G.add_edge(0, 1, key="b", weight=1) + T = nx.minimum_spanning_tree(G, algorithm=self.algo) + assert edges_equal([(0, 1, 1)], list(T.edges(data="weight"))) + + def test_multigraph_keys_tree_max(self): + G = nx.MultiGraph() + G.add_edge(0, 1, key="a", weight=2) + G.add_edge(0, 1, key="b", weight=1) + T = nx.maximum_spanning_tree(G, algorithm=self.algo) + assert edges_equal([(0, 1, 2)], list(T.edges(data="weight"))) + + +class TestSpanningTreeIterator: + """ + Tests the spanning tree iterator on the example graph in the 2005 Sörensen + and Janssens paper An Algorithm to Generate all Spanning Trees of a Graph in + Order of Increasing Cost + """ + + def setup_method(self): + # Original Graph + edges = [(0, 1, 5), (1, 2, 4), (1, 4, 6), (2, 3, 5), (2, 4, 7), (3, 4, 3)] + self.G = nx.Graph() + self.G.add_weighted_edges_from(edges) + # List of lists of spanning trees in increasing order + self.spanning_trees = [ + # 1, MST, cost = 17 + [ + (0, 1, {"weight": 5}), + (1, 2, {"weight": 4}), + (2, 3, {"weight": 5}), + (3, 4, {"weight": 3}), + ], + # 2, cost = 18 + [ + (0, 1, {"weight": 5}), + (1, 2, {"weight": 4}), + (1, 4, {"weight": 6}), + (3, 4, {"weight": 3}), + ], + # 3, cost = 19 + [ + (0, 1, {"weight": 5}), + (1, 4, {"weight": 6}), + (2, 3, {"weight": 5}), + (3, 4, {"weight": 3}), + ], + # 4, cost = 19 + [ + (0, 1, {"weight": 5}), + (1, 2, {"weight": 4}), + (2, 4, {"weight": 7}), + (3, 4, {"weight": 3}), + ], + # 5, cost = 20 + [ + (0, 1, {"weight": 5}), + (1, 2, {"weight": 4}), + (1, 4, {"weight": 6}), + (2, 3, {"weight": 5}), + ], + # 6, cost = 21 + [ + (0, 1, {"weight": 5}), + (1, 4, {"weight": 6}), + (2, 4, {"weight": 7}), + (3, 4, {"weight": 3}), + ], + # 7, cost = 21 + [ + (0, 1, {"weight": 5}), + (1, 2, {"weight": 4}), + (2, 3, {"weight": 5}), + (2, 4, {"weight": 7}), + ], + # 8, cost = 23 + [ + (0, 1, {"weight": 5}), + (1, 4, {"weight": 6}), + (2, 3, {"weight": 5}), + (2, 4, {"weight": 7}), + ], + ] + + def test_minimum_spanning_tree_iterator(self): + """ + Tests that the spanning trees are correctly returned in increasing order + """ + tree_index = 0 + for tree in nx.SpanningTreeIterator(self.G): + actual = sorted(tree.edges(data=True)) + assert edges_equal(actual, self.spanning_trees[tree_index]) + tree_index += 1 + + def test_maximum_spanning_tree_iterator(self): + """ + Tests that the spanning trees are correctly returned in decreasing order + """ + tree_index = 7 + for tree in nx.SpanningTreeIterator(self.G, minimum=False): + actual = sorted(tree.edges(data=True)) + assert edges_equal(actual, self.spanning_trees[tree_index]) + tree_index -= 1 + + +class TestSpanningTreeMultiGraphIterator: + """ + Uses the same graph as the above class but with an added edge of twice the weight. + """ + + def setup_method(self): + # New graph + edges = [ + (0, 1, 5), + (0, 1, 10), + (1, 2, 4), + (1, 2, 8), + (1, 4, 6), + (1, 4, 12), + (2, 3, 5), + (2, 3, 10), + (2, 4, 7), + (2, 4, 14), + (3, 4, 3), + (3, 4, 6), + ] + self.G = nx.MultiGraph() + self.G.add_weighted_edges_from(edges) + + # There are 128 trees. I'd rather not list all 128 here, and computing them + # on such a small graph actually doesn't take that long. + from itertools import combinations + + self.spanning_trees = [] + for e in combinations(self.G.edges, 4): + tree = self.G.edge_subgraph(e) + if nx.is_tree(tree): + self.spanning_trees.append(sorted(tree.edges(keys=True, data=True))) + + def test_minimum_spanning_tree_iterator_multigraph(self): + """ + Tests that the spanning trees are correctly returned in increasing order + """ + tree_index = 0 + last_weight = 0 + for tree in nx.SpanningTreeIterator(self.G): + actual = sorted(tree.edges(keys=True, data=True)) + weight = sum([e[3]["weight"] for e in actual]) + assert actual in self.spanning_trees + assert weight >= last_weight + tree_index += 1 + + def test_maximum_spanning_tree_iterator_multigraph(self): + """ + Tests that the spanning trees are correctly returned in decreasing order + """ + tree_index = 127 + # Maximum weight tree is 46 + last_weight = 50 + for tree in nx.SpanningTreeIterator(self.G, minimum=False): + actual = sorted(tree.edges(keys=True, data=True)) + weight = sum([e[3]["weight"] for e in actual]) + assert actual in self.spanning_trees + assert weight <= last_weight + tree_index -= 1 + + +def test_random_spanning_tree_multiplicative_small(): + """ + Using a fixed seed, sample one tree for repeatability. + """ + from math import exp + + pytest.importorskip("scipy") + + gamma = { + (0, 1): -0.6383, + (0, 2): -0.6827, + (0, 5): 0, + (1, 2): -1.0781, + (1, 4): 0, + (2, 3): 0, + (5, 3): -0.2820, + (5, 4): -0.3327, + (4, 3): -0.9927, + } + + # The undirected support of gamma + G = nx.Graph() + for u, v in gamma: + G.add_edge(u, v, lambda_key=exp(gamma[(u, v)])) + + solution_edges = [(2, 3), (3, 4), (0, 5), (5, 4), (4, 1)] + solution = nx.Graph() + solution.add_edges_from(solution_edges) + + sampled_tree = nx.random_spanning_tree(G, "lambda_key", seed=42) + + assert nx.utils.edges_equal(solution.edges, sampled_tree.edges) + + +@pytest.mark.slow +def test_random_spanning_tree_multiplicative_large(): + """ + Sample many trees from the distribution created in the last test + """ + from math import exp + from random import Random + + pytest.importorskip("numpy") + stats = pytest.importorskip("scipy.stats") + + gamma = { + (0, 1): -0.6383, + (0, 2): -0.6827, + (0, 5): 0, + (1, 2): -1.0781, + (1, 4): 0, + (2, 3): 0, + (5, 3): -0.2820, + (5, 4): -0.3327, + (4, 3): -0.9927, + } + + # The undirected support of gamma + G = nx.Graph() + for u, v in gamma: + G.add_edge(u, v, lambda_key=exp(gamma[(u, v)])) + + # Find the multiplicative weight for each tree. + total_weight = 0 + tree_expected = {} + for t in nx.SpanningTreeIterator(G): + # Find the multiplicative weight of the spanning tree + weight = 1 + for u, v, d in t.edges(data="lambda_key"): + weight *= d + tree_expected[t] = weight + total_weight += weight + + # Assert that every tree has an entry in the expected distribution + assert len(tree_expected) == 75 + + # Set the sample size and then calculate the expected number of times we + # expect to see each tree. This test uses a near minimum sample size where + # the most unlikely tree has an expected frequency of 5.15. + # (Minimum required is 5) + # + # Here we also initialize the tree_actual dict so that we know the keys + # match between the two. We will later take advantage of the fact that since + # python 3.7 dict order is guaranteed so the expected and actual data will + # have the same order. + sample_size = 1200 + tree_actual = {} + for t in tree_expected: + tree_expected[t] = (tree_expected[t] / total_weight) * sample_size + tree_actual[t] = 0 + + # Sample the spanning trees + # + # Assert that they are actually trees and record which of the 75 trees we + # have sampled. + # + # For repeatability, we want to take advantage of the decorators in NetworkX + # to randomly sample the same sample each time. However, if we pass in a + # constant seed to sample_spanning_tree we will get the same tree each time. + # Instead, we can create our own random number generator with a fixed seed + # and pass those into sample_spanning_tree. + rng = Random(37) + for _ in range(sample_size): + sampled_tree = nx.random_spanning_tree(G, "lambda_key", seed=rng) + assert nx.is_tree(sampled_tree) + + for t in tree_expected: + if nx.utils.edges_equal(t.edges, sampled_tree.edges): + tree_actual[t] += 1 + break + + # Conduct a Chi squared test to see if the actual distribution matches the + # expected one at an alpha = 0.05 significance level. + # + # H_0: The distribution of trees in tree_actual matches the normalized product + # of the edge weights in the tree. + # + # H_a: The distribution of trees in tree_actual follows some other + # distribution of spanning trees. + _, p = stats.chisquare(list(tree_actual.values()), list(tree_expected.values())) + + # Assert that p is greater than the significance level so that we do not + # reject the null hypothesis + assert not p < 0.05 + + +def test_random_spanning_tree_additive_small(): + """ + Sample a single spanning tree from the additive method. + """ + pytest.importorskip("scipy") + + edges = { + (0, 1): 1, + (0, 2): 1, + (0, 5): 3, + (1, 2): 2, + (1, 4): 3, + (2, 3): 3, + (5, 3): 4, + (5, 4): 5, + (4, 3): 4, + } + + # Build the graph + G = nx.Graph() + for u, v in edges: + G.add_edge(u, v, weight=edges[(u, v)]) + + solution_edges = [(0, 2), (1, 2), (2, 3), (3, 4), (3, 5)] + solution = nx.Graph() + solution.add_edges_from(solution_edges) + + sampled_tree = nx.random_spanning_tree( + G, weight="weight", multiplicative=False, seed=37 + ) + + assert nx.utils.edges_equal(solution.edges, sampled_tree.edges) + + +@pytest.mark.slow +def test_random_spanning_tree_additive_large(): + """ + Sample many spanning trees from the additive method. + """ + from random import Random + + pytest.importorskip("numpy") + stats = pytest.importorskip("scipy.stats") + + edges = { + (0, 1): 1, + (0, 2): 1, + (0, 5): 3, + (1, 2): 2, + (1, 4): 3, + (2, 3): 3, + (5, 3): 4, + (5, 4): 5, + (4, 3): 4, + } + + # Build the graph + G = nx.Graph() + for u, v in edges: + G.add_edge(u, v, weight=edges[(u, v)]) + + # Find the additive weight for each tree. + total_weight = 0 + tree_expected = {} + for t in nx.SpanningTreeIterator(G): + # Find the multiplicative weight of the spanning tree + weight = 0 + for u, v, d in t.edges(data="weight"): + weight += d + tree_expected[t] = weight + total_weight += weight + + # Assert that every tree has an entry in the expected distribution + assert len(tree_expected) == 75 + + # Set the sample size and then calculate the expected number of times we + # expect to see each tree. This test uses a near minimum sample size where + # the most unlikely tree has an expected frequency of 5.07. + # (Minimum required is 5) + # + # Here we also initialize the tree_actual dict so that we know the keys + # match between the two. We will later take advantage of the fact that since + # python 3.7 dict order is guaranteed so the expected and actual data will + # have the same order. + sample_size = 500 + tree_actual = {} + for t in tree_expected: + tree_expected[t] = (tree_expected[t] / total_weight) * sample_size + tree_actual[t] = 0 + + # Sample the spanning trees + # + # Assert that they are actually trees and record which of the 75 trees we + # have sampled. + # + # For repeatability, we want to take advantage of the decorators in NetworkX + # to randomly sample the same sample each time. However, if we pass in a + # constant seed to sample_spanning_tree we will get the same tree each time. + # Instead, we can create our own random number generator with a fixed seed + # and pass those into sample_spanning_tree. + rng = Random(37) + for _ in range(sample_size): + sampled_tree = nx.random_spanning_tree( + G, "weight", multiplicative=False, seed=rng + ) + assert nx.is_tree(sampled_tree) + + for t in tree_expected: + if nx.utils.edges_equal(t.edges, sampled_tree.edges): + tree_actual[t] += 1 + break + + # Conduct a Chi squared test to see if the actual distribution matches the + # expected one at an alpha = 0.05 significance level. + # + # H_0: The distribution of trees in tree_actual matches the normalized product + # of the edge weights in the tree. + # + # H_a: The distribution of trees in tree_actual follows some other + # distribution of spanning trees. + _, p = stats.chisquare(list(tree_actual.values()), list(tree_expected.values())) + + # Assert that p is greater than the significance level so that we do not + # reject the null hypothesis + assert not p < 0.05 + + +def test_random_spanning_tree_empty_graph(): + G = nx.Graph() + rst = nx.tree.random_spanning_tree(G) + assert len(rst.nodes) == 0 + assert len(rst.edges) == 0 + + +def test_random_spanning_tree_single_node_graph(): + G = nx.Graph() + G.add_node(0) + rst = nx.tree.random_spanning_tree(G) + assert len(rst.nodes) == 1 + assert len(rst.edges) == 0 + + +def test_random_spanning_tree_single_node_loop(): + G = nx.Graph() + G.add_node(0) + G.add_edge(0, 0) + rst = nx.tree.random_spanning_tree(G) + assert len(rst.nodes) == 1 + assert len(rst.edges) == 0 + + +class TestNumberSpanningTrees: + @classmethod + def setup_class(cls): + global np + np = pytest.importorskip("numpy") + sp = pytest.importorskip("scipy") + + def test_nst_disconnected(self): + G = nx.empty_graph(2) + assert np.isclose(nx.number_of_spanning_trees(G), 0) + + def test_nst_no_nodes(self): + G = nx.Graph() + with pytest.raises(nx.NetworkXPointlessConcept): + nx.number_of_spanning_trees(G) + + def test_nst_weight(self): + G = nx.Graph() + G.add_edge(1, 2, weight=1) + G.add_edge(1, 3, weight=1) + G.add_edge(2, 3, weight=2) + # weights are ignored + assert np.isclose(nx.number_of_spanning_trees(G), 3) + # including weight + assert np.isclose(nx.number_of_spanning_trees(G, weight="weight"), 5) + + def test_nst_negative_weight(self): + G = nx.Graph() + G.add_edge(1, 2, weight=1) + G.add_edge(1, 3, weight=-1) + G.add_edge(2, 3, weight=-2) + # weights are ignored + assert np.isclose(nx.number_of_spanning_trees(G), 3) + # including weight + assert np.isclose(nx.number_of_spanning_trees(G, weight="weight"), -1) + + def test_nst_selfloop(self): + # self-loops are ignored + G = nx.complete_graph(3) + G.add_edge(1, 1) + assert np.isclose(nx.number_of_spanning_trees(G), 3) + + def test_nst_multigraph(self): + G = nx.MultiGraph() + G.add_edge(1, 2) + G.add_edge(1, 2) + G.add_edge(1, 3) + G.add_edge(2, 3) + assert np.isclose(nx.number_of_spanning_trees(G), 5) + + def test_nst_complete_graph(self): + # this is known as Cayley's formula + N = 5 + G = nx.complete_graph(N) + assert np.isclose(nx.number_of_spanning_trees(G), N ** (N - 2)) + + def test_nst_path_graph(self): + G = nx.path_graph(5) + assert np.isclose(nx.number_of_spanning_trees(G), 1) + + def test_nst_cycle_graph(self): + G = nx.cycle_graph(5) + assert np.isclose(nx.number_of_spanning_trees(G), 5) + + def test_nst_directed_noroot(self): + G = nx.empty_graph(3, create_using=nx.MultiDiGraph) + with pytest.raises(nx.NetworkXError): + nx.number_of_spanning_trees(G) + + def test_nst_directed_root_not_exist(self): + G = nx.empty_graph(3, create_using=nx.MultiDiGraph) + with pytest.raises(nx.NetworkXError): + nx.number_of_spanning_trees(G, root=42) + + def test_nst_directed_not_weak_connected(self): + G = nx.DiGraph() + G.add_edge(1, 2) + G.add_edge(3, 4) + assert np.isclose(nx.number_of_spanning_trees(G, root=1), 0) + + def test_nst_directed_cycle_graph(self): + G = nx.DiGraph() + G = nx.cycle_graph(7, G) + assert np.isclose(nx.number_of_spanning_trees(G, root=0), 1) + + def test_nst_directed_complete_graph(self): + G = nx.DiGraph() + G = nx.complete_graph(7, G) + assert np.isclose(nx.number_of_spanning_trees(G, root=0), 7**5) + + def test_nst_directed_multi(self): + G = nx.MultiDiGraph() + G = nx.cycle_graph(3, G) + G.add_edge(1, 2) + assert np.isclose(nx.number_of_spanning_trees(G, root=0), 2) + + def test_nst_directed_selfloop(self): + G = nx.MultiDiGraph() + G = nx.cycle_graph(3, G) + G.add_edge(1, 1) + assert np.isclose(nx.number_of_spanning_trees(G, root=0), 1) + + def test_nst_directed_weak_connected(self): + G = nx.MultiDiGraph() + G = nx.cycle_graph(3, G) + G.remove_edge(1, 2) + assert np.isclose(nx.number_of_spanning_trees(G, root=0), 0) + + def test_nst_directed_weighted(self): + # from root=1: + # arborescence 1: 1->2, 1->3, weight=2*1 + # arborescence 2: 1->2, 2->3, weight=2*3 + G = nx.DiGraph() + G.add_edge(1, 2, weight=2) + G.add_edge(1, 3, weight=1) + G.add_edge(2, 3, weight=3) + Nst = nx.number_of_spanning_trees(G, root=1, weight="weight") + assert np.isclose(Nst, 8) + Nst = nx.number_of_spanning_trees(G, root=2, weight="weight") + assert np.isclose(Nst, 0) + Nst = nx.number_of_spanning_trees(G, root=3, weight="weight") + assert np.isclose(Nst, 0) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tree/tests/test_operations.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/tree/tests/test_operations.py new file mode 100644 index 0000000..284d94e --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/tree/tests/test_operations.py @@ -0,0 +1,53 @@ +from itertools import chain + +import networkx as nx +from networkx.utils import edges_equal, nodes_equal + + +def _check_custom_label_attribute(input_trees, res_tree, label_attribute): + res_attr_dict = nx.get_node_attributes(res_tree, label_attribute) + res_attr_set = set(res_attr_dict.values()) + input_label = (tree for tree, root in input_trees) + input_label_set = set(chain.from_iterable(input_label)) + return res_attr_set == input_label_set + + +def test_empty_sequence(): + """Joining the empty sequence results in the tree with one node.""" + T = nx.join_trees([]) + assert len(T) == 1 + assert T.number_of_edges() == 0 + + +def test_single(): + """Joining just one tree yields a tree with one more node.""" + T = nx.empty_graph(1) + trees = [(T, 0)] + actual_with_label = nx.join_trees(trees, label_attribute="custom_label") + expected = nx.path_graph(2) + assert nodes_equal(list(expected), list(actual_with_label)) + assert edges_equal(list(expected.edges()), list(actual_with_label.edges())) + + +def test_basic(): + """Joining multiple subtrees at a root node.""" + trees = [(nx.full_rary_tree(2, 2**2 - 1), 0) for i in range(2)] + expected = nx.full_rary_tree(2, 2**3 - 1) + actual = nx.join_trees(trees, label_attribute="old_labels") + assert nx.is_isomorphic(actual, expected) + assert _check_custom_label_attribute(trees, actual, "old_labels") + + actual_without_label = nx.join_trees(trees) + assert nx.is_isomorphic(actual_without_label, expected) + # check that no labels were stored + assert all(not data for _, data in actual_without_label.nodes(data=True)) + + +def test_first_label(): + """Test the functionality of the first_label argument.""" + T1 = nx.path_graph(3) + T2 = nx.path_graph(2) + actual = nx.join_trees([(T1, 0), (T2, 0)], first_label=10) + expected_nodes = set(range(10, 16)) + assert set(actual.nodes()) == expected_nodes + assert set(actual.neighbors(10)) == {11, 14} diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/tree/tests/test_recognition.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/tree/tests/test_recognition.py new file mode 100644 index 0000000..105f5a8 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/tree/tests/test_recognition.py @@ -0,0 +1,174 @@ +import pytest + +import networkx as nx + + +class TestTreeRecognition: + graph = nx.Graph + multigraph = nx.MultiGraph + + @classmethod + def setup_class(cls): + cls.T1 = cls.graph() + + cls.T2 = cls.graph() + cls.T2.add_node(1) + + cls.T3 = cls.graph() + cls.T3.add_nodes_from(range(5)) + edges = [(i, i + 1) for i in range(4)] + cls.T3.add_edges_from(edges) + + cls.T5 = cls.multigraph() + cls.T5.add_nodes_from(range(5)) + edges = [(i, i + 1) for i in range(4)] + cls.T5.add_edges_from(edges) + + cls.T6 = cls.graph() + cls.T6.add_nodes_from([6, 7]) + cls.T6.add_edge(6, 7) + + cls.F1 = nx.compose(cls.T6, cls.T3) + + cls.N4 = cls.graph() + cls.N4.add_node(1) + cls.N4.add_edge(1, 1) + + cls.N5 = cls.graph() + cls.N5.add_nodes_from(range(5)) + + cls.N6 = cls.graph() + cls.N6.add_nodes_from(range(3)) + cls.N6.add_edges_from([(0, 1), (1, 2), (2, 0)]) + + cls.NF1 = nx.compose(cls.T6, cls.N6) + + def test_null_tree(self): + with pytest.raises(nx.NetworkXPointlessConcept): + nx.is_tree(self.graph()) + + def test_null_tree2(self): + with pytest.raises(nx.NetworkXPointlessConcept): + nx.is_tree(self.multigraph()) + + def test_null_forest(self): + with pytest.raises(nx.NetworkXPointlessConcept): + nx.is_forest(self.graph()) + + def test_null_forest2(self): + with pytest.raises(nx.NetworkXPointlessConcept): + nx.is_forest(self.multigraph()) + + def test_is_tree(self): + assert nx.is_tree(self.T2) + assert nx.is_tree(self.T3) + assert nx.is_tree(self.T5) + + def test_is_not_tree(self): + assert not nx.is_tree(self.N4) + assert not nx.is_tree(self.N5) + assert not nx.is_tree(self.N6) + + def test_is_forest(self): + assert nx.is_forest(self.T2) + assert nx.is_forest(self.T3) + assert nx.is_forest(self.T5) + assert nx.is_forest(self.F1) + assert nx.is_forest(self.N5) + + def test_is_not_forest(self): + assert not nx.is_forest(self.N4) + assert not nx.is_forest(self.N6) + assert not nx.is_forest(self.NF1) + + +class TestDirectedTreeRecognition(TestTreeRecognition): + graph = nx.DiGraph + multigraph = nx.MultiDiGraph + + +def test_disconnected_graph(): + # https://github.com/networkx/networkx/issues/1144 + G = nx.Graph() + G.add_edges_from([(0, 1), (1, 2), (2, 0), (3, 4)]) + assert not nx.is_tree(G) + + G = nx.DiGraph() + G.add_edges_from([(0, 1), (1, 2), (2, 0), (3, 4)]) + assert not nx.is_tree(G) + + +def test_dag_nontree(): + G = nx.DiGraph() + G.add_edges_from([(0, 1), (0, 2), (1, 2)]) + assert not nx.is_tree(G) + assert nx.is_directed_acyclic_graph(G) + + +def test_multicycle(): + G = nx.MultiDiGraph() + G.add_edges_from([(0, 1), (0, 1)]) + assert not nx.is_tree(G) + assert nx.is_directed_acyclic_graph(G) + + +def test_emptybranch(): + G = nx.DiGraph() + G.add_nodes_from(range(10)) + assert nx.is_branching(G) + assert not nx.is_arborescence(G) + + +def test_is_branching_empty_graph_raises(): + G = nx.DiGraph() + with pytest.raises(nx.NetworkXPointlessConcept, match="G has no nodes."): + nx.is_branching(G) + + +def test_path(): + G = nx.DiGraph() + nx.add_path(G, range(5)) + assert nx.is_branching(G) + assert nx.is_arborescence(G) + + +def test_notbranching1(): + # Acyclic violation. + G = nx.MultiDiGraph() + G.add_nodes_from(range(10)) + G.add_edges_from([(0, 1), (1, 0)]) + assert not nx.is_branching(G) + assert not nx.is_arborescence(G) + + +def test_notbranching2(): + # In-degree violation. + G = nx.MultiDiGraph() + G.add_nodes_from(range(10)) + G.add_edges_from([(0, 1), (0, 2), (3, 2)]) + assert not nx.is_branching(G) + assert not nx.is_arborescence(G) + + +def test_notarborescence1(): + # Not an arborescence due to not spanning. + G = nx.MultiDiGraph() + G.add_nodes_from(range(10)) + G.add_edges_from([(0, 1), (0, 2), (1, 3), (5, 6)]) + assert nx.is_branching(G) + assert not nx.is_arborescence(G) + + +def test_notarborescence2(): + # Not an arborescence due to in-degree violation. + G = nx.MultiDiGraph() + nx.add_path(G, range(5)) + G.add_edge(6, 4) + assert not nx.is_branching(G) + assert not nx.is_arborescence(G) + + +def test_is_arborescense_empty_graph_raises(): + G = nx.DiGraph() + with pytest.raises(nx.NetworkXPointlessConcept, match="G has no nodes."): + nx.is_arborescence(G) diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/triads.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/triads.py new file mode 100644 index 0000000..2442d6f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/triads.py @@ -0,0 +1,500 @@ +# See https://github.com/networkx/networkx/pull/1474 +# Copyright 2011 Reya Group +# Copyright 2011 Alex Levenson +# Copyright 2011 Diederik van Liere +"""Functions for analyzing triads of a graph.""" + +from collections import defaultdict +from itertools import combinations, permutations + +import networkx as nx +from networkx.utils import not_implemented_for, py_random_state + +__all__ = [ + "triadic_census", + "is_triad", + "all_triads", + "triads_by_type", + "triad_type", +] + +#: The integer codes representing each type of triad. +#: +#: Triads that are the same up to symmetry have the same code. +TRICODES = ( + 1, + 2, + 2, + 3, + 2, + 4, + 6, + 8, + 2, + 6, + 5, + 7, + 3, + 8, + 7, + 11, + 2, + 6, + 4, + 8, + 5, + 9, + 9, + 13, + 6, + 10, + 9, + 14, + 7, + 14, + 12, + 15, + 2, + 5, + 6, + 7, + 6, + 9, + 10, + 14, + 4, + 9, + 9, + 12, + 8, + 13, + 14, + 15, + 3, + 7, + 8, + 11, + 7, + 12, + 14, + 15, + 8, + 14, + 13, + 15, + 11, + 15, + 15, + 16, +) + +#: The names of each type of triad. The order of the elements is +#: important: it corresponds to the tricodes given in :data:`TRICODES`. +TRIAD_NAMES = ( + "003", + "012", + "102", + "021D", + "021U", + "021C", + "111D", + "111U", + "030T", + "030C", + "201", + "120D", + "120U", + "120C", + "210", + "300", +) + + +#: A dictionary mapping triad code to triad name. +TRICODE_TO_NAME = {i: TRIAD_NAMES[code - 1] for i, code in enumerate(TRICODES)} + + +def _tricode(G, v, u, w): + """Returns the integer code of the given triad. + + This is some fancy magic that comes from Batagelj and Mrvar's paper. It + treats each edge joining a pair of `v`, `u`, and `w` as a bit in + the binary representation of an integer. + + """ + combos = ((v, u, 1), (u, v, 2), (v, w, 4), (w, v, 8), (u, w, 16), (w, u, 32)) + return sum(x for u, v, x in combos if v in G[u]) + + +@not_implemented_for("undirected") +@nx._dispatchable +def triadic_census(G, nodelist=None): + """Determines the triadic census of a directed graph. + + The triadic census is a count of how many of the 16 possible types of + triads are present in a directed graph. If a list of nodes is passed, then + only those triads are taken into account which have elements of nodelist in them. + + Parameters + ---------- + G : digraph + A NetworkX DiGraph + nodelist : list + List of nodes for which you want to calculate triadic census + + Returns + ------- + census : dict + Dictionary with triad type as keys and number of occurrences as values. + + Examples + -------- + >>> G = nx.DiGraph([(1, 2), (2, 3), (3, 1), (3, 4), (4, 1), (4, 2)]) + >>> triadic_census = nx.triadic_census(G) + >>> for key, value in triadic_census.items(): + ... print(f"{key}: {value}") + 003: 0 + 012: 0 + 102: 0 + 021D: 0 + 021U: 0 + 021C: 0 + 111D: 0 + 111U: 0 + 030T: 2 + 030C: 2 + 201: 0 + 120D: 0 + 120U: 0 + 120C: 0 + 210: 0 + 300: 0 + + Notes + ----- + This algorithm has complexity $O(m)$ where $m$ is the number of edges in + the graph. + + For undirected graphs, the triadic census can be computed by first converting + the graph into a directed graph using the ``G.to_directed()`` method. + After this conversion, only the triad types 003, 102, 201 and 300 will be + present in the undirected scenario. + + Raises + ------ + ValueError + If `nodelist` contains duplicate nodes or nodes not in `G`. + If you want to ignore this you can preprocess with `set(nodelist) & G.nodes` + + See also + -------- + triad_graph + + References + ---------- + .. [1] Vladimir Batagelj and Andrej Mrvar, A subquadratic triad census + algorithm for large sparse networks with small maximum degree, + University of Ljubljana, + http://vlado.fmf.uni-lj.si/pub/networks/doc/triads/triads.pdf + + """ + nodeset = set(G.nbunch_iter(nodelist)) + if nodelist is not None and len(nodelist) != len(nodeset): + raise ValueError("nodelist includes duplicate nodes or nodes not in G") + + N = len(G) + Nnot = N - len(nodeset) # can signal special counting for subset of nodes + + # create an ordering of nodes with nodeset nodes first + m = {n: i for i, n in enumerate(nodeset)} + if Nnot: + # add non-nodeset nodes later in the ordering + not_nodeset = G.nodes - nodeset + m.update((n, i + N) for i, n in enumerate(not_nodeset)) + + # build all_neighbor dicts for easy counting + # After Python 3.8 can leave off these keys(). Speedup also using G._pred + # nbrs = {n: G._pred[n].keys() | G._succ[n].keys() for n in G} + nbrs = {n: G.pred[n].keys() | G.succ[n].keys() for n in G} + dbl_nbrs = {n: G.pred[n].keys() & G.succ[n].keys() for n in G} + + if Nnot: + sgl_nbrs = {n: G.pred[n].keys() ^ G.succ[n].keys() for n in not_nodeset} + # find number of edges not incident to nodes in nodeset + sgl = sum(1 for n in not_nodeset for nbr in sgl_nbrs[n] if nbr not in nodeset) + sgl_edges_outside = sgl // 2 + dbl = sum(1 for n in not_nodeset for nbr in dbl_nbrs[n] if nbr not in nodeset) + dbl_edges_outside = dbl // 2 + + # Initialize the count for each triad to be zero. + census = dict.fromkeys(TRIAD_NAMES, 0) + # Main loop over nodes + for v in nodeset: + vnbrs = nbrs[v] + dbl_vnbrs = dbl_nbrs[v] + if Nnot: + # set up counts of edges attached to v. + sgl_unbrs_bdy = sgl_unbrs_out = dbl_unbrs_bdy = dbl_unbrs_out = 0 + for u in vnbrs: + if m[u] <= m[v]: + continue + unbrs = nbrs[u] + neighbors = (vnbrs | unbrs) - {u, v} + # Count connected triads. + for w in neighbors: + if m[u] < m[w] or (m[v] < m[w] < m[u] and v not in nbrs[w]): + code = _tricode(G, v, u, w) + census[TRICODE_TO_NAME[code]] += 1 + + # Use a formula for dyadic triads with edge incident to v + if u in dbl_vnbrs: + census["102"] += N - len(neighbors) - 2 + else: + census["012"] += N - len(neighbors) - 2 + + # Count edges attached to v. Subtract later to get triads with v isolated + # _out are (u,unbr) for unbrs outside boundary of nodeset + # _bdy are (u,unbr) for unbrs on boundary of nodeset (get double counted) + if Nnot and u not in nodeset: + sgl_unbrs = sgl_nbrs[u] + sgl_unbrs_bdy += len(sgl_unbrs & vnbrs - nodeset) + sgl_unbrs_out += len(sgl_unbrs - vnbrs - nodeset) + dbl_unbrs = dbl_nbrs[u] + dbl_unbrs_bdy += len(dbl_unbrs & vnbrs - nodeset) + dbl_unbrs_out += len(dbl_unbrs - vnbrs - nodeset) + # if nodeset == G.nodes, skip this b/c we will find the edge later. + if Nnot: + # Count edges outside nodeset not connected with v (v isolated triads) + census["012"] += sgl_edges_outside - (sgl_unbrs_out + sgl_unbrs_bdy // 2) + census["102"] += dbl_edges_outside - (dbl_unbrs_out + dbl_unbrs_bdy // 2) + + # calculate null triads: "003" + # null triads = total number of possible triads - all found triads + total_triangles = (N * (N - 1) * (N - 2)) // 6 + triangles_without_nodeset = (Nnot * (Nnot - 1) * (Nnot - 2)) // 6 + total_census = total_triangles - triangles_without_nodeset + census["003"] = total_census - sum(census.values()) + + return census + + +@nx._dispatchable +def is_triad(G): + """Returns True if the graph G is a triad, else False. + + Parameters + ---------- + G : graph + A NetworkX Graph + + Returns + ------- + istriad : boolean + Whether G is a valid triad + + Examples + -------- + >>> G = nx.DiGraph([(1, 2), (2, 3), (3, 1)]) + >>> nx.is_triad(G) + True + >>> G.add_edge(0, 1) + >>> nx.is_triad(G) + False + """ + if isinstance(G, nx.Graph): + if G.order() == 3 and nx.is_directed(G): + if not any((n, n) in G.edges() for n in G.nodes()): + return True + return False + + +@not_implemented_for("undirected") +@nx._dispatchable(returns_graph=True) +def all_triads(G): + """A generator of all possible triads in G. + + Parameters + ---------- + G : digraph + A NetworkX DiGraph + + Returns + ------- + all_triads : generator of DiGraphs + Generator of triads (order-3 DiGraphs) + + Examples + -------- + >>> G = nx.DiGraph([(1, 2), (2, 3), (3, 1), (3, 4), (4, 1), (4, 2)]) + >>> for triad in nx.all_triads(G): + ... print(triad.edges) + [(1, 2), (2, 3), (3, 1)] + [(1, 2), (4, 1), (4, 2)] + [(3, 1), (3, 4), (4, 1)] + [(2, 3), (3, 4), (4, 2)] + + """ + triplets = combinations(G.nodes(), 3) + for triplet in triplets: + yield G.subgraph(triplet).copy() + + +@not_implemented_for("undirected") +@nx._dispatchable +def triads_by_type(G): + """Returns a list of all triads for each triad type in a directed graph. + There are exactly 16 different types of triads possible. Suppose 1, 2, 3 are three + nodes, they will be classified as a particular triad type if their connections + are as follows: + + - 003: 1, 2, 3 + - 012: 1 -> 2, 3 + - 102: 1 <-> 2, 3 + - 021D: 1 <- 2 -> 3 + - 021U: 1 -> 2 <- 3 + - 021C: 1 -> 2 -> 3 + - 111D: 1 <-> 2 <- 3 + - 111U: 1 <-> 2 -> 3 + - 030T: 1 -> 2 -> 3, 1 -> 3 + - 030C: 1 <- 2 <- 3, 1 -> 3 + - 201: 1 <-> 2 <-> 3 + - 120D: 1 <- 2 -> 3, 1 <-> 3 + - 120U: 1 -> 2 <- 3, 1 <-> 3 + - 120C: 1 -> 2 -> 3, 1 <-> 3 + - 210: 1 -> 2 <-> 3, 1 <-> 3 + - 300: 1 <-> 2 <-> 3, 1 <-> 3 + + Refer to the :doc:`example gallery ` + for visual examples of the triad types. + + Parameters + ---------- + G : digraph + A NetworkX DiGraph + + Returns + ------- + tri_by_type : dict + Dictionary with triad types as keys and lists of triads as values. + + Examples + -------- + >>> G = nx.DiGraph([(1, 2), (1, 3), (2, 3), (3, 1), (5, 6), (5, 4), (6, 7)]) + >>> dict = nx.triads_by_type(G) + >>> dict["120C"][0].edges() + OutEdgeView([(1, 2), (1, 3), (2, 3), (3, 1)]) + >>> dict["012"][0].edges() + OutEdgeView([(1, 2)]) + + References + ---------- + .. [1] Snijders, T. (2012). "Transitivity and triads." University of + Oxford. + https://web.archive.org/web/20170830032057/http://www.stats.ox.ac.uk/~snijders/Trans_Triads_ha.pdf + """ + # num_triads = o * (o - 1) * (o - 2) // 6 + # if num_triads > TRIAD_LIMIT: print(WARNING) + all_tri = all_triads(G) + tri_by_type = defaultdict(list) + for triad in all_tri: + name = triad_type(triad) + tri_by_type[name].append(triad) + return tri_by_type + + +@not_implemented_for("undirected") +@nx._dispatchable +def triad_type(G): + """Returns the sociological triad type for a triad. + + Parameters + ---------- + G : digraph + A NetworkX DiGraph with 3 nodes + + Returns + ------- + triad_type : str + A string identifying the triad type + + Examples + -------- + >>> G = nx.DiGraph([(1, 2), (2, 3), (3, 1)]) + >>> nx.triad_type(G) + '030C' + >>> G.add_edge(1, 3) + >>> nx.triad_type(G) + '120C' + + Notes + ----- + There can be 6 unique edges in a triad (order-3 DiGraph) (so 2^^6=64 unique + triads given 3 nodes). These 64 triads each display exactly 1 of 16 + topologies of triads (topologies can be permuted). These topologies are + identified by the following notation: + + {m}{a}{n}{type} (for example: 111D, 210, 102) + + Here: + + {m} = number of mutual ties (takes 0, 1, 2, 3); a mutual tie is (0,1) + AND (1,0) + {a} = number of asymmetric ties (takes 0, 1, 2, 3); an asymmetric tie + is (0,1) BUT NOT (1,0) or vice versa + {n} = number of null ties (takes 0, 1, 2, 3); a null tie is NEITHER + (0,1) NOR (1,0) + {type} = a letter (takes U, D, C, T) corresponding to up, down, cyclical + and transitive. This is only used for topologies that can have + more than one form (eg: 021D and 021U). + + References + ---------- + .. [1] Snijders, T. (2012). "Transitivity and triads." University of + Oxford. + https://web.archive.org/web/20170830032057/http://www.stats.ox.ac.uk/~snijders/Trans_Triads_ha.pdf + """ + if not is_triad(G): + raise nx.NetworkXAlgorithmError("G is not a triad (order-3 DiGraph)") + num_edges = len(G.edges()) + if num_edges == 0: + return "003" + elif num_edges == 1: + return "012" + elif num_edges == 2: + e1, e2 = G.edges() + if set(e1) == set(e2): + return "102" + elif e1[0] == e2[0]: + return "021D" + elif e1[1] == e2[1]: + return "021U" + elif e1[1] == e2[0] or e2[1] == e1[0]: + return "021C" + elif num_edges == 3: + for e1, e2, e3 in permutations(G.edges(), 3): + if set(e1) == set(e2): + if e3[0] in e1: + return "111U" + # e3[1] in e1: + return "111D" + elif set(e1).symmetric_difference(set(e2)) == set(e3): + if {e1[0], e2[0], e3[0]} == {e1[0], e2[0], e3[0]} == set(G.nodes()): + return "030C" + # e3 == (e1[0], e2[1]) and e2 == (e1[1], e3[1]): + return "030T" + elif num_edges == 4: + for e1, e2, e3, e4 in permutations(G.edges(), 4): + if set(e1) == set(e2): + # identify pair of symmetric edges (which necessarily exists) + if set(e3) == set(e4): + return "201" + if {e3[0]} == {e4[0]} == set(e3).intersection(set(e4)): + return "120D" + if {e3[1]} == {e4[1]} == set(e3).intersection(set(e4)): + return "120U" + if e3[1] == e4[0]: + return "120C" + elif num_edges == 5: + return "210" + elif num_edges == 6: + return "300" diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/vitality.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/vitality.py new file mode 100644 index 0000000..bf4b016 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/vitality.py @@ -0,0 +1,76 @@ +""" +Vitality measures. +""" + +from functools import partial + +import networkx as nx + +__all__ = ["closeness_vitality"] + + +@nx._dispatchable(edge_attrs="weight") +def closeness_vitality(G, node=None, weight=None, wiener_index=None): + """Returns the closeness vitality for nodes in the graph. + + The *closeness vitality* of a node, defined in Section 3.6.2 of [1], + is the change in the sum of distances between all node pairs when + excluding that node. + + Parameters + ---------- + G : NetworkX graph + A strongly-connected graph. + + weight : string + The name of the edge attribute used as weight. This is passed + directly to the :func:`~networkx.wiener_index` function. + + node : object + If specified, only the closeness vitality for this node will be + returned. Otherwise, a dictionary mapping each node to its + closeness vitality will be returned. + + Other parameters + ---------------- + wiener_index : number + If you have already computed the Wiener index of the graph + `G`, you can provide that value here. Otherwise, it will be + computed for you. + + Returns + ------- + dictionary or float + If `node` is None, this function returns a dictionary + with nodes as keys and closeness vitality as the + value. Otherwise, it returns only the closeness vitality for the + specified `node`. + + The closeness vitality of a node may be negative infinity if + removing that node would disconnect the graph. + + Examples + -------- + >>> G = nx.cycle_graph(3) + >>> nx.closeness_vitality(G) + {0: 2.0, 1: 2.0, 2: 2.0} + + See Also + -------- + closeness_centrality + + References + ---------- + .. [1] Ulrik Brandes, Thomas Erlebach (eds.). + *Network Analysis: Methodological Foundations*. + Springer, 2005. + + + """ + if wiener_index is None: + wiener_index = nx.wiener_index(G, weight=weight) + if node is not None: + after = nx.wiener_index(G.subgraph(set(G) - {node}), weight=weight) + return wiener_index - after + vitality = partial(closeness_vitality, G, weight=weight, wiener_index=wiener_index) + return {v: vitality(node=v) for v in G} diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/voronoi.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/voronoi.py new file mode 100644 index 0000000..609a68d --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/voronoi.py @@ -0,0 +1,86 @@ +"""Functions for computing the Voronoi cells of a graph.""" + +import networkx as nx +from networkx.utils import groups + +__all__ = ["voronoi_cells"] + + +@nx._dispatchable(edge_attrs="weight") +def voronoi_cells(G, center_nodes, weight="weight"): + """Returns the Voronoi cells centered at `center_nodes` with respect + to the shortest-path distance metric. + + If $C$ is a set of nodes in the graph and $c$ is an element of $C$, + the *Voronoi cell* centered at a node $c$ is the set of all nodes + $v$ that are closer to $c$ than to any other center node in $C$ with + respect to the shortest-path distance metric. [1]_ + + For directed graphs, this will compute the "outward" Voronoi cells, + as defined in [1]_, in which distance is measured from the center + nodes to the target node. For the "inward" Voronoi cells, use the + :meth:`DiGraph.reverse` method to reverse the orientation of the + edges before invoking this function on the directed graph. + + Parameters + ---------- + G : NetworkX graph + + center_nodes : set + A nonempty set of nodes in the graph `G` that represent the + center of the Voronoi cells. + + weight : string or function + The edge attribute (or an arbitrary function) representing the + weight of an edge. This keyword argument is as described in the + documentation for :func:`~networkx.multi_source_dijkstra_path`, + for example. + + Returns + ------- + dictionary + A mapping from center node to set of all nodes in the graph + closer to that center node than to any other center node. The + keys of the dictionary are the element of `center_nodes`, and + the values of the dictionary form a partition of the nodes of + `G`. + + Examples + -------- + To get only the partition of the graph induced by the Voronoi cells, + take the collection of all values in the returned dictionary:: + + >>> G = nx.path_graph(6) + >>> center_nodes = {0, 3} + >>> cells = nx.voronoi_cells(G, center_nodes) + >>> partition = set(map(frozenset, cells.values())) + >>> sorted(map(sorted, partition)) + [[0, 1], [2, 3, 4, 5]] + + Raises + ------ + ValueError + If `center_nodes` is empty. + + References + ---------- + .. [1] Erwig, Martin. (2000),"The graph Voronoi diagram with applications." + *Networks*, 36: 156--163. + https://doi.org/10.1002/1097-0037(200010)36:3<156::AID-NET2>3.0.CO;2-L + + """ + # Determine the shortest paths from any one of the center nodes to + # every node in the graph. + # + # This raises `ValueError` if `center_nodes` is an empty set. + paths = nx.multi_source_dijkstra_path(G, center_nodes, weight=weight) + # Determine the center node from which the shortest path originates. + nearest = {v: p[0] for v, p in paths.items()} + # Get the mapping from center node to all nodes closer to it than to + # any other center node. + cells = groups(nearest) + # We collect all unreachable nodes under a special key, if there are any. + unreachable = set(G) - set(nearest) + if unreachable: + cells["unreachable"] = unreachable + return cells diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/walks.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/walks.py new file mode 100644 index 0000000..0ef9dac --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/walks.py @@ -0,0 +1,79 @@ +"""Function for computing walks in a graph.""" + +import networkx as nx + +__all__ = ["number_of_walks"] + + +@nx._dispatchable +def number_of_walks(G, walk_length): + """Returns the number of walks connecting each pair of nodes in `G` + + A *walk* is a sequence of nodes in which each adjacent pair of nodes + in the sequence is adjacent in the graph. A walk can repeat the same + edge and go in the opposite direction just as people can walk on a + set of paths, but standing still is not counted as part of the walk. + + This function only counts the walks with `walk_length` edges. Note that + the number of nodes in the walk sequence is one more than `walk_length`. + The number of walks can grow very quickly on a larger graph + and with a larger walk length. + + Parameters + ---------- + G : NetworkX graph + + walk_length : int + A nonnegative integer representing the length of a walk. + + Returns + ------- + dict + A dictionary of dictionaries in which outer keys are source + nodes, inner keys are target nodes, and inner values are the + number of walks of length `walk_length` connecting those nodes. + + Raises + ------ + ValueError + If `walk_length` is negative + + Examples + -------- + + >>> G = nx.Graph([(0, 1), (1, 2)]) + >>> walks = nx.number_of_walks(G, 2) + >>> walks + {0: {0: 1, 1: 0, 2: 1}, 1: {0: 0, 1: 2, 2: 0}, 2: {0: 1, 1: 0, 2: 1}} + >>> total_walks = sum(sum(tgts.values()) for _, tgts in walks.items()) + + You can also get the number of walks from a specific source node using the + returned dictionary. For example, number of walks of length 1 from node 0 + can be found as follows: + + >>> walks = nx.number_of_walks(G, 1) + >>> walks[0] + {0: 0, 1: 1, 2: 0} + >>> sum(walks[0].values()) # walks from 0 of length 1 + 1 + + Similarly, a target node can also be specified: + + >>> walks[0][1] + 1 + + """ + import numpy as np + + if walk_length < 0: + raise ValueError(f"`walk_length` cannot be negative: {walk_length}") + + A = nx.adjacency_matrix(G, weight=None) + # TODO: Use matrix_power from scipy.sparse when available + # power = sp.sparse.linalg.matrix_power(A, walk_length) + power = np.linalg.matrix_power(A.toarray(), walk_length) + result = { + u: {v: power.item(u_idx, v_idx) for v_idx, v in enumerate(G)} + for u_idx, u in enumerate(G) + } + return result diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/wiener.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/wiener.py new file mode 100644 index 0000000..ac3abe4 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/wiener.py @@ -0,0 +1,226 @@ +"""Functions related to the Wiener Index of a graph. + +The Wiener Index is a topological measure of a graph +related to the distance between nodes and their degree. +The Schultz Index and Gutman Index are similar measures. +They are used categorize molecules via the network of +atoms connected by chemical bonds. The indices are +correlated with functional aspects of the molecules. + +References +---------- +.. [1] `Wikipedia: Wiener Index `_ +.. [2] M.V. Diudeaa and I. Gutman, Wiener-Type Topological Indices, + Croatica Chemica Acta, 71 (1998), 21-51. + https://hrcak.srce.hr/132323 +""" + +import itertools as it + +import networkx as nx + +__all__ = ["wiener_index", "schultz_index", "gutman_index"] + + +@nx._dispatchable(edge_attrs="weight") +def wiener_index(G, weight=None): + """Returns the Wiener index of the given graph. + + The *Wiener index* of a graph is the sum of the shortest-path + (weighted) distances between each pair of reachable nodes. + For pairs of nodes in undirected graphs, only one orientation + of the pair is counted. + + Parameters + ---------- + G : NetworkX graph + + weight : string or None, optional (default: None) + If None, every edge has weight 1. + If a string, use this edge attribute as the edge weight. + Any edge attribute not present defaults to 1. + The edge weights are used to computing shortest-path distances. + + Returns + ------- + number + The Wiener index of the graph `G`. + + Raises + ------ + NetworkXError + If the graph `G` is not connected. + + Notes + ----- + If a pair of nodes is not reachable, the distance is assumed to be + infinity. This means that for graphs that are not + strongly-connected, this function returns ``inf``. + + The Wiener index is not usually defined for directed graphs, however + this function uses the natural generalization of the Wiener index to + directed graphs. + + Examples + -------- + The Wiener index of the (unweighted) complete graph on *n* nodes + equals the number of pairs of the *n* nodes, since each pair of + nodes is at distance one:: + + >>> n = 10 + >>> G = nx.complete_graph(n) + >>> nx.wiener_index(G) == n * (n - 1) / 2 + True + + Graphs that are not strongly-connected have infinite Wiener index:: + + >>> G = nx.empty_graph(2) + >>> nx.wiener_index(G) + inf + + References + ---------- + .. [1] `Wikipedia: Wiener Index `_ + """ + connected = nx.is_strongly_connected(G) if G.is_directed() else nx.is_connected(G) + if not connected: + return float("inf") + + spl = nx.shortest_path_length(G, weight=weight) + total = sum(it.chain.from_iterable(nbrs.values() for node, nbrs in spl)) + # Need to account for double counting pairs of nodes in undirected graphs. + return total if G.is_directed() else total / 2 + + +@nx.utils.not_implemented_for("directed") +@nx.utils.not_implemented_for("multigraph") +@nx._dispatchable(edge_attrs="weight") +def schultz_index(G, weight=None): + r"""Returns the Schultz Index (of the first kind) of `G` + + The *Schultz Index* [3]_ of a graph is the sum over all node pairs of + distances times the sum of degrees. Consider an undirected graph `G`. + For each node pair ``(u, v)`` compute ``dist(u, v) * (deg(u) + deg(v)`` + where ``dist`` is the shortest path length between two nodes and ``deg`` + is the degree of a node. + + The Schultz Index is the sum of these quantities over all (unordered) + pairs of nodes. + + Parameters + ---------- + G : NetworkX graph + The undirected graph of interest. + weight : string or None, optional (default: None) + If None, every edge has weight 1. + If a string, use this edge attribute as the edge weight. + Any edge attribute not present defaults to 1. + The edge weights are used to computing shortest-path distances. + + Returns + ------- + number + The first kind of Schultz Index of the graph `G`. + + Examples + -------- + The Schultz Index of the (unweighted) complete graph on *n* nodes + equals the number of pairs of the *n* nodes times ``2 * (n - 1)``, + since each pair of nodes is at distance one and the sum of degree + of two nodes is ``2 * (n - 1)``. + + >>> n = 10 + >>> G = nx.complete_graph(n) + >>> nx.schultz_index(G) == (n * (n - 1) / 2) * (2 * (n - 1)) + True + + Graph that is disconnected + + >>> nx.schultz_index(nx.empty_graph(2)) + inf + + References + ---------- + .. [1] I. Gutman, Selected properties of the Schultz molecular topological index, + J. Chem. Inf. Comput. Sci. 34 (1994), 1087–1089. + https://doi.org/10.1021/ci00021a009 + .. [2] M.V. Diudeaa and I. Gutman, Wiener-Type Topological Indices, + Croatica Chemica Acta, 71 (1998), 21-51. + https://hrcak.srce.hr/132323 + .. [3] H. P. Schultz, Topological organic chemistry. 1. + Graph theory and topological indices of alkanes,i + J. Chem. Inf. Comput. Sci. 29 (1989), 239–257. + + """ + if not nx.is_connected(G): + return float("inf") + + spl = nx.shortest_path_length(G, weight=weight) + d = dict(G.degree, weight=weight) + return sum(dist * (d[u] + d[v]) for u, info in spl for v, dist in info.items()) / 2 + + +@nx.utils.not_implemented_for("directed") +@nx.utils.not_implemented_for("multigraph") +@nx._dispatchable(edge_attrs="weight") +def gutman_index(G, weight=None): + r"""Returns the Gutman Index for the graph `G`. + + The *Gutman Index* measures the topology of networks, especially for molecule + networks of atoms connected by bonds [1]_. It is also called the Schultz Index + of the second kind [2]_. + + Consider an undirected graph `G` with node set ``V``. + The Gutman Index of a graph is the sum over all (unordered) pairs of nodes + of nodes ``(u, v)``, with distance ``dist(u, v)`` and degrees ``deg(u)`` + and ``deg(v)``, of ``dist(u, v) * deg(u) * deg(v)`` + + Parameters + ---------- + G : NetworkX graph + + weight : string or None, optional (default: None) + If None, every edge has weight 1. + If a string, use this edge attribute as the edge weight. + Any edge attribute not present defaults to 1. + The edge weights are used to computing shortest-path distances. + + Returns + ------- + number + The Gutman Index of the graph `G`. + + Examples + -------- + The Gutman Index of the (unweighted) complete graph on *n* nodes + equals the number of pairs of the *n* nodes times ``(n - 1) * (n - 1)``, + since each pair of nodes is at distance one and the product of degree of two + vertices is ``(n - 1) * (n - 1)``. + + >>> n = 10 + >>> G = nx.complete_graph(n) + >>> nx.gutman_index(G) == (n * (n - 1) / 2) * ((n - 1) * (n - 1)) + True + + Graphs that are disconnected + + >>> G = nx.empty_graph(2) + >>> nx.gutman_index(G) + inf + + References + ---------- + .. [1] M.V. Diudeaa and I. Gutman, Wiener-Type Topological Indices, + Croatica Chemica Acta, 71 (1998), 21-51. + https://hrcak.srce.hr/132323 + .. [2] I. Gutman, Selected properties of the Schultz molecular topological index, + J. Chem. Inf. Comput. Sci. 34 (1994), 1087–1089. + https://doi.org/10.1021/ci00021a009 + + """ + if not nx.is_connected(G): + return float("inf") + + spl = nx.shortest_path_length(G, weight=weight) + d = dict(G.degree, weight=weight) + return sum(dist * d[u] * d[v] for u, vinfo in spl for v, dist in vinfo.items()) / 2 diff --git a/.venv/lib/python3.12/site-packages/networkx/classes/__init__.py b/.venv/lib/python3.12/site-packages/networkx/classes/__init__.py new file mode 100644 index 0000000..721fa8b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/classes/__init__.py @@ -0,0 +1,13 @@ +from .graph import Graph +from .digraph import DiGraph +from .multigraph import MultiGraph +from .multidigraph import MultiDiGraph + +from .function import * +from .graphviews import subgraph_view, reverse_view + +from networkx.classes import filters + +from networkx.classes import coreviews +from networkx.classes import graphviews +from networkx.classes import reportviews diff --git a/.venv/lib/python3.12/site-packages/networkx/classes/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/classes/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..386449c Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/classes/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/classes/__pycache__/coreviews.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/classes/__pycache__/coreviews.cpython-312.pyc new file mode 100644 index 0000000..395c77a Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/classes/__pycache__/coreviews.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/classes/__pycache__/digraph.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/classes/__pycache__/digraph.cpython-312.pyc new file mode 100644 index 0000000..cd6e8e2 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/classes/__pycache__/digraph.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/classes/__pycache__/filters.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/classes/__pycache__/filters.cpython-312.pyc new file mode 100644 index 0000000..b25a33b Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/classes/__pycache__/filters.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/classes/__pycache__/function.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/classes/__pycache__/function.cpython-312.pyc new file mode 100644 index 0000000..2b11ee3 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/classes/__pycache__/function.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/classes/__pycache__/graph.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/classes/__pycache__/graph.cpython-312.pyc new file mode 100644 index 0000000..41963c1 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/classes/__pycache__/graph.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/classes/__pycache__/graphviews.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/classes/__pycache__/graphviews.cpython-312.pyc new file mode 100644 index 0000000..b337cda Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/classes/__pycache__/graphviews.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/classes/__pycache__/multidigraph.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/classes/__pycache__/multidigraph.cpython-312.pyc new file mode 100644 index 0000000..42a936f Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/classes/__pycache__/multidigraph.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/classes/__pycache__/multigraph.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/classes/__pycache__/multigraph.cpython-312.pyc new file mode 100644 index 0000000..72e9aff Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/classes/__pycache__/multigraph.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/classes/__pycache__/reportviews.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/classes/__pycache__/reportviews.cpython-312.pyc new file mode 100644 index 0000000..cd19cb3 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/classes/__pycache__/reportviews.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/classes/coreviews.py b/.venv/lib/python3.12/site-packages/networkx/classes/coreviews.py new file mode 100644 index 0000000..4769ffa --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/classes/coreviews.py @@ -0,0 +1,435 @@ +"""Views of core data structures such as nested Mappings (e.g. dict-of-dicts). +These ``Views`` often restrict element access, with either the entire view or +layers of nested mappings being read-only. +""" + +from collections.abc import Mapping + +__all__ = [ + "AtlasView", + "AdjacencyView", + "MultiAdjacencyView", + "UnionAtlas", + "UnionAdjacency", + "UnionMultiInner", + "UnionMultiAdjacency", + "FilterAtlas", + "FilterAdjacency", + "FilterMultiInner", + "FilterMultiAdjacency", +] + + +class AtlasView(Mapping): + """An AtlasView is a Read-only Mapping of Mappings. + + It is a View into a dict-of-dict data structure. + The inner level of dict is read-write. But the + outer level is read-only. + + See Also + ======== + AdjacencyView: View into dict-of-dict-of-dict + MultiAdjacencyView: View into dict-of-dict-of-dict-of-dict + """ + + __slots__ = ("_atlas",) + + def __getstate__(self): + return {"_atlas": self._atlas} + + def __setstate__(self, state): + self._atlas = state["_atlas"] + + def __init__(self, d): + self._atlas = d + + def __len__(self): + return len(self._atlas) + + def __iter__(self): + return iter(self._atlas) + + def __getitem__(self, key): + return self._atlas[key] + + def copy(self): + return {n: self[n].copy() for n in self._atlas} + + def __str__(self): + return str(self._atlas) # {nbr: self[nbr] for nbr in self}) + + def __repr__(self): + return f"{self.__class__.__name__}({self._atlas!r})" + + +class AdjacencyView(AtlasView): + """An AdjacencyView is a Read-only Map of Maps of Maps. + + It is a View into a dict-of-dict-of-dict data structure. + The inner level of dict is read-write. But the + outer levels are read-only. + + See Also + ======== + AtlasView: View into dict-of-dict + MultiAdjacencyView: View into dict-of-dict-of-dict-of-dict + """ + + __slots__ = () # Still uses AtlasView slots names _atlas + + def __getitem__(self, name): + return AtlasView(self._atlas[name]) + + def copy(self): + return {n: self[n].copy() for n in self._atlas} + + +class MultiAdjacencyView(AdjacencyView): + """An MultiAdjacencyView is a Read-only Map of Maps of Maps of Maps. + + It is a View into a dict-of-dict-of-dict-of-dict data structure. + The inner level of dict is read-write. But the + outer levels are read-only. + + See Also + ======== + AtlasView: View into dict-of-dict + AdjacencyView: View into dict-of-dict-of-dict + """ + + __slots__ = () # Still uses AtlasView slots names _atlas + + def __getitem__(self, name): + return AdjacencyView(self._atlas[name]) + + def copy(self): + return {n: self[n].copy() for n in self._atlas} + + +class UnionAtlas(Mapping): + """A read-only union of two atlases (dict-of-dict). + + The two dict-of-dicts represent the inner dict of + an Adjacency: `G.succ[node]` and `G.pred[node]`. + The inner level of dict of both hold attribute key:value + pairs and is read-write. But the outer level is read-only. + + See Also + ======== + UnionAdjacency: View into dict-of-dict-of-dict + UnionMultiAdjacency: View into dict-of-dict-of-dict-of-dict + """ + + __slots__ = ("_succ", "_pred") + + def __getstate__(self): + return {"_succ": self._succ, "_pred": self._pred} + + def __setstate__(self, state): + self._succ = state["_succ"] + self._pred = state["_pred"] + + def __init__(self, succ, pred): + self._succ = succ + self._pred = pred + + def __len__(self): + return len(self._succ.keys() | self._pred.keys()) + + def __iter__(self): + return iter(set(self._succ.keys()) | set(self._pred.keys())) + + def __getitem__(self, key): + try: + return self._succ[key] + except KeyError: + return self._pred[key] + + def copy(self): + result = {nbr: dd.copy() for nbr, dd in self._succ.items()} + for nbr, dd in self._pred.items(): + if nbr in result: + result[nbr].update(dd) + else: + result[nbr] = dd.copy() + return result + + def __str__(self): + return str({nbr: self[nbr] for nbr in self}) + + def __repr__(self): + return f"{self.__class__.__name__}({self._succ!r}, {self._pred!r})" + + +class UnionAdjacency(Mapping): + """A read-only union of dict Adjacencies as a Map of Maps of Maps. + + The two input dict-of-dict-of-dicts represent the union of + `G.succ` and `G.pred`. Return values are UnionAtlas + The inner level of dict is read-write. But the + middle and outer levels are read-only. + + succ : a dict-of-dict-of-dict {node: nbrdict} + pred : a dict-of-dict-of-dict {node: nbrdict} + The keys for the two dicts should be the same + + See Also + ======== + UnionAtlas: View into dict-of-dict + UnionMultiAdjacency: View into dict-of-dict-of-dict-of-dict + """ + + __slots__ = ("_succ", "_pred") + + def __getstate__(self): + return {"_succ": self._succ, "_pred": self._pred} + + def __setstate__(self, state): + self._succ = state["_succ"] + self._pred = state["_pred"] + + def __init__(self, succ, pred): + # keys must be the same for two input dicts + assert len(set(succ.keys()) ^ set(pred.keys())) == 0 + self._succ = succ + self._pred = pred + + def __len__(self): + return len(self._succ) # length of each dict should be the same + + def __iter__(self): + return iter(self._succ) + + def __getitem__(self, nbr): + return UnionAtlas(self._succ[nbr], self._pred[nbr]) + + def copy(self): + return {n: self[n].copy() for n in self._succ} + + def __str__(self): + return str({nbr: self[nbr] for nbr in self}) + + def __repr__(self): + return f"{self.__class__.__name__}({self._succ!r}, {self._pred!r})" + + +class UnionMultiInner(UnionAtlas): + """A read-only union of two inner dicts of MultiAdjacencies. + + The two input dict-of-dict-of-dicts represent the union of + `G.succ[node]` and `G.pred[node]` for MultiDiGraphs. + Return values are UnionAtlas. + The inner level of dict is read-write. But the outer levels are read-only. + + See Also + ======== + UnionAtlas: View into dict-of-dict + UnionAdjacency: View into dict-of-dict-of-dict + UnionMultiAdjacency: View into dict-of-dict-of-dict-of-dict + """ + + __slots__ = () # Still uses UnionAtlas slots names _succ, _pred + + def __getitem__(self, node): + in_succ = node in self._succ + in_pred = node in self._pred + if in_succ: + if in_pred: + return UnionAtlas(self._succ[node], self._pred[node]) + return UnionAtlas(self._succ[node], {}) + return UnionAtlas({}, self._pred[node]) + + def copy(self): + nodes = set(self._succ.keys()) | set(self._pred.keys()) + return {n: self[n].copy() for n in nodes} + + +class UnionMultiAdjacency(UnionAdjacency): + """A read-only union of two dict MultiAdjacencies. + + The two input dict-of-dict-of-dict-of-dicts represent the union of + `G.succ` and `G.pred` for MultiDiGraphs. Return values are UnionAdjacency. + The inner level of dict is read-write. But the outer levels are read-only. + + See Also + ======== + UnionAtlas: View into dict-of-dict + UnionMultiInner: View into dict-of-dict-of-dict + """ + + __slots__ = () # Still uses UnionAdjacency slots names _succ, _pred + + def __getitem__(self, node): + return UnionMultiInner(self._succ[node], self._pred[node]) + + +class FilterAtlas(Mapping): # nodedict, nbrdict, keydict + """A read-only Mapping of Mappings with filtering criteria for nodes. + + It is a view into a dict-of-dict data structure, and it selects only + nodes that meet the criteria defined by ``NODE_OK``. + + See Also + ======== + FilterAdjacency + FilterMultiInner + FilterMultiAdjacency + """ + + def __init__(self, d, NODE_OK): + self._atlas = d + self.NODE_OK = NODE_OK + + def __len__(self): + # check whether NODE_OK stores the number of nodes as `length` + # or the nodes themselves as a set `nodes`. If not, count the nodes. + if hasattr(self.NODE_OK, "length"): + return self.NODE_OK.length + if hasattr(self.NODE_OK, "nodes"): + return len(self.NODE_OK.nodes & self._atlas.keys()) + return sum(1 for n in self._atlas if self.NODE_OK(n)) + + def __iter__(self): + try: # check that NODE_OK has attr 'nodes' + node_ok_shorter = 2 * len(self.NODE_OK.nodes) < len(self._atlas) + except AttributeError: + node_ok_shorter = False + if node_ok_shorter: + return (n for n in self.NODE_OK.nodes if n in self._atlas) + return (n for n in self._atlas if self.NODE_OK(n)) + + def __getitem__(self, key): + if key in self._atlas and self.NODE_OK(key): + return self._atlas[key] + raise KeyError(f"Key {key} not found") + + def __str__(self): + return str({nbr: self[nbr] for nbr in self}) + + def __repr__(self): + return f"{self.__class__.__name__}({self._atlas!r}, {self.NODE_OK!r})" + + +class FilterAdjacency(Mapping): # edgedict + """A read-only Mapping of Mappings with filtering criteria for nodes and edges. + + It is a view into a dict-of-dict-of-dict data structure, and it selects nodes + and edges that satisfy specific criteria defined by ``NODE_OK`` and ``EDGE_OK``, + respectively. + + See Also + ======== + FilterAtlas + FilterMultiInner + FilterMultiAdjacency + """ + + def __init__(self, d, NODE_OK, EDGE_OK): + self._atlas = d + self.NODE_OK = NODE_OK + self.EDGE_OK = EDGE_OK + + def __len__(self): + # check whether NODE_OK stores the number of nodes as `length` + # or the nodes themselves as a set `nodes`. If not, count the nodes. + if hasattr(self.NODE_OK, "length"): + return self.NODE_OK.length + if hasattr(self.NODE_OK, "nodes"): + return len(self.NODE_OK.nodes & self._atlas.keys()) + return sum(1 for n in self._atlas if self.NODE_OK(n)) + + def __iter__(self): + try: # check that NODE_OK has attr 'nodes' + node_ok_shorter = 2 * len(self.NODE_OK.nodes) < len(self._atlas) + except AttributeError: + node_ok_shorter = False + if node_ok_shorter: + return (n for n in self.NODE_OK.nodes if n in self._atlas) + return (n for n in self._atlas if self.NODE_OK(n)) + + def __getitem__(self, node): + if node in self._atlas and self.NODE_OK(node): + + def new_node_ok(nbr): + return self.NODE_OK(nbr) and self.EDGE_OK(node, nbr) + + return FilterAtlas(self._atlas[node], new_node_ok) + raise KeyError(f"Key {node} not found") + + def __str__(self): + return str({nbr: self[nbr] for nbr in self}) + + def __repr__(self): + name = self.__class__.__name__ + return f"{name}({self._atlas!r}, {self.NODE_OK!r}, {self.EDGE_OK!r})" + + +class FilterMultiInner(FilterAdjacency): # muliedge_seconddict + """A read-only Mapping of Mappings with filtering criteria for nodes and edges. + + It is a view into a dict-of-dict-of-dict-of-dict data structure, and it selects nodes + and edges that meet specific criteria defined by ``NODE_OK`` and ``EDGE_OK``. + + See Also + ======== + FilterAtlas + FilterAdjacency + FilterMultiAdjacency + """ + + def __iter__(self): + try: # check that NODE_OK has attr 'nodes' + node_ok_shorter = 2 * len(self.NODE_OK.nodes) < len(self._atlas) + except AttributeError: + node_ok_shorter = False + if node_ok_shorter: + my_nodes = (n for n in self.NODE_OK.nodes if n in self._atlas) + else: + my_nodes = (n for n in self._atlas if self.NODE_OK(n)) + for n in my_nodes: + some_keys_ok = False + for key in self._atlas[n]: + if self.EDGE_OK(n, key): + some_keys_ok = True + break + if some_keys_ok is True: + yield n + + def __getitem__(self, nbr): + if ( + nbr in self._atlas + and self.NODE_OK(nbr) + and any(self.EDGE_OK(nbr, key) for key in self._atlas[nbr]) + ): + + def new_node_ok(key): + return self.EDGE_OK(nbr, key) + + return FilterAtlas(self._atlas[nbr], new_node_ok) + raise KeyError(f"Key {nbr} not found") + + +class FilterMultiAdjacency(FilterAdjacency): # multiedgedict + """A read-only Mapping of Mappings with filtering criteria + for nodes and edges. + + It is a view into a dict-of-dict-of-dict-of-dict data structure, + and it selects nodes and edges that satisfy specific criteria + defined by ``NODE_OK`` and ``EDGE_OK``, respectively. + + See Also + ======== + FilterAtlas + FilterAdjacency + FilterMultiInner + """ + + def __getitem__(self, node): + if node in self._atlas and self.NODE_OK(node): + + def edge_ok(nbr, key): + return self.NODE_OK(nbr) and self.EDGE_OK(node, nbr, key) + + return FilterMultiInner(self._atlas[node], self.NODE_OK, edge_ok) + raise KeyError(f"Key {node} not found") diff --git a/.venv/lib/python3.12/site-packages/networkx/classes/digraph.py b/.venv/lib/python3.12/site-packages/networkx/classes/digraph.py new file mode 100644 index 0000000..2ba56de --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/classes/digraph.py @@ -0,0 +1,1352 @@ +"""Base class for directed graphs.""" + +from copy import deepcopy +from functools import cached_property + +import networkx as nx +from networkx import convert +from networkx.classes.coreviews import AdjacencyView +from networkx.classes.graph import Graph +from networkx.classes.reportviews import ( + DiDegreeView, + InDegreeView, + InEdgeView, + OutDegreeView, + OutEdgeView, +) +from networkx.exception import NetworkXError + +__all__ = ["DiGraph"] + + +class _CachedPropertyResetterAdjAndSucc: + """Data Descriptor class that syncs and resets cached properties adj and succ + + The cached properties `adj` and `succ` are reset whenever `_adj` or `_succ` + are set to new objects. In addition, the attributes `_succ` and `_adj` + are synced so these two names point to the same object. + + Warning: most of the time, when ``G._adj`` is set, ``G._pred`` should also + be set to maintain a valid data structure. They share datadicts. + + This object sits on a class and ensures that any instance of that + class clears its cached properties "succ" and "adj" whenever the + underlying instance attributes "_succ" or "_adj" are set to a new object. + It only affects the set process of the obj._adj and obj._succ attribute. + All get/del operations act as they normally would. + + For info on Data Descriptors see: https://docs.python.org/3/howto/descriptor.html + """ + + def __set__(self, obj, value): + od = obj.__dict__ + od["_adj"] = value + od["_succ"] = value + # reset cached properties + props = [ + "adj", + "succ", + "edges", + "out_edges", + "degree", + "out_degree", + "in_degree", + ] + for prop in props: + if prop in od: + del od[prop] + + +class _CachedPropertyResetterPred: + """Data Descriptor class for _pred that resets ``pred`` cached_property when needed + + This assumes that the ``cached_property`` ``G.pred`` should be reset whenever + ``G._pred`` is set to a new value. + + Warning: most of the time, when ``G._pred`` is set, ``G._adj`` should also + be set to maintain a valid data structure. They share datadicts. + + This object sits on a class and ensures that any instance of that + class clears its cached property "pred" whenever the underlying + instance attribute "_pred" is set to a new object. It only affects + the set process of the obj._pred attribute. All get/del operations + act as they normally would. + + For info on Data Descriptors see: https://docs.python.org/3/howto/descriptor.html + """ + + def __set__(self, obj, value): + od = obj.__dict__ + od["_pred"] = value + # reset cached properties + props = ["pred", "in_edges", "degree", "out_degree", "in_degree"] + for prop in props: + if prop in od: + del od[prop] + + +class DiGraph(Graph): + """ + Base class for directed graphs. + + A DiGraph stores nodes and edges with optional data, or attributes. + + DiGraphs hold directed edges. Self loops are allowed but multiple + (parallel) edges are not. + + Nodes can be arbitrary (hashable) Python objects with optional + key/value attributes. By convention `None` is not used as a node. + + Edges are represented as links between nodes with optional + key/value attributes. + + Parameters + ---------- + incoming_graph_data : input graph (optional, default: None) + Data to initialize graph. If None (default) an empty + graph is created. The data can be any format that is supported + by the to_networkx_graph() function, currently including edge list, + dict of dicts, dict of lists, NetworkX graph, 2D NumPy array, SciPy + sparse matrix, or PyGraphviz graph. + + attr : keyword arguments, optional (default= no attributes) + Attributes to add to graph as key=value pairs. + + See Also + -------- + Graph + MultiGraph + MultiDiGraph + + Examples + -------- + Create an empty graph structure (a "null graph") with no nodes and + no edges. + + >>> G = nx.DiGraph() + + G can be grown in several ways. + + **Nodes:** + + Add one node at a time: + + >>> G.add_node(1) + + Add the nodes from any container (a list, dict, set or + even the lines from a file or the nodes from another graph). + + >>> G.add_nodes_from([2, 3]) + >>> G.add_nodes_from(range(100, 110)) + >>> H = nx.path_graph(10) + >>> G.add_nodes_from(H) + + In addition to strings and integers any hashable Python object + (except None) can represent a node, e.g. a customized node object, + or even another Graph. + + >>> G.add_node(H) + + **Edges:** + + G can also be grown by adding edges. + + Add one edge, + + >>> G.add_edge(1, 2) + + a list of edges, + + >>> G.add_edges_from([(1, 2), (1, 3)]) + + or a collection of edges, + + >>> G.add_edges_from(H.edges) + + If some edges connect nodes not yet in the graph, the nodes + are added automatically. There are no errors when adding + nodes or edges that already exist. + + **Attributes:** + + Each graph, node, and edge can hold key/value attribute pairs + in an associated attribute dictionary (the keys must be hashable). + By default these are empty, but can be added or changed using + add_edge, add_node or direct manipulation of the attribute + dictionaries named graph, node and edge respectively. + + >>> G = nx.DiGraph(day="Friday") + >>> G.graph + {'day': 'Friday'} + + Add node attributes using add_node(), add_nodes_from() or G.nodes + + >>> G.add_node(1, time="5pm") + >>> G.add_nodes_from([3], time="2pm") + >>> G.nodes[1] + {'time': '5pm'} + >>> G.nodes[1]["room"] = 714 + >>> del G.nodes[1]["room"] # remove attribute + >>> list(G.nodes(data=True)) + [(1, {'time': '5pm'}), (3, {'time': '2pm'})] + + Add edge attributes using add_edge(), add_edges_from(), subscript + notation, or G.edges. + + >>> G.add_edge(1, 2, weight=4.7) + >>> G.add_edges_from([(3, 4), (4, 5)], color="red") + >>> G.add_edges_from([(1, 2, {"color": "blue"}), (2, 3, {"weight": 8})]) + >>> G[1][2]["weight"] = 4.7 + >>> G.edges[1, 2]["weight"] = 4 + + Warning: we protect the graph data structure by making `G.edges[1, 2]` a + read-only dict-like structure. However, you can assign to attributes + in e.g. `G.edges[1, 2]`. Thus, use 2 sets of brackets to add/change + data attributes: `G.edges[1, 2]['weight'] = 4` + (For multigraphs: `MG.edges[u, v, key][name] = value`). + + **Shortcuts:** + + Many common graph features allow python syntax to speed reporting. + + >>> 1 in G # check if node in graph + True + >>> [n for n in G if n < 3] # iterate through nodes + [1, 2] + >>> len(G) # number of nodes in graph + 5 + + Often the best way to traverse all edges of a graph is via the neighbors. + The neighbors are reported as an adjacency-dict `G.adj` or `G.adjacency()` + + >>> for n, nbrsdict in G.adjacency(): + ... for nbr, eattr in nbrsdict.items(): + ... if "weight" in eattr: + ... # Do something useful with the edges + ... pass + + But the edges reporting object is often more convenient: + + >>> for u, v, weight in G.edges(data="weight"): + ... if weight is not None: + ... # Do something useful with the edges + ... pass + + **Reporting:** + + Simple graph information is obtained using object-attributes and methods. + Reporting usually provides views instead of containers to reduce memory + usage. The views update as the graph is updated similarly to dict-views. + The objects `nodes`, `edges` and `adj` provide access to data attributes + via lookup (e.g. `nodes[n]`, `edges[u, v]`, `adj[u][v]`) and iteration + (e.g. `nodes.items()`, `nodes.data('color')`, + `nodes.data('color', default='blue')` and similarly for `edges`) + Views exist for `nodes`, `edges`, `neighbors()`/`adj` and `degree`. + + For details on these and other miscellaneous methods, see below. + + **Subclasses (Advanced):** + + The Graph class uses a dict-of-dict-of-dict data structure. + The outer dict (node_dict) holds adjacency information keyed by node. + The next dict (adjlist_dict) represents the adjacency information and holds + edge data keyed by neighbor. The inner dict (edge_attr_dict) represents + the edge data and holds edge attribute values keyed by attribute names. + + Each of these three dicts can be replaced in a subclass by a user defined + dict-like object. In general, the dict-like features should be + maintained but extra features can be added. To replace one of the + dicts create a new graph class by changing the class(!) variable + holding the factory for that dict-like structure. The variable names are + node_dict_factory, node_attr_dict_factory, adjlist_inner_dict_factory, + adjlist_outer_dict_factory, edge_attr_dict_factory and graph_attr_dict_factory. + + node_dict_factory : function, (default: dict) + Factory function to be used to create the dict containing node + attributes, keyed by node id. + It should require no arguments and return a dict-like object + + node_attr_dict_factory: function, (default: dict) + Factory function to be used to create the node attribute + dict which holds attribute values keyed by attribute name. + It should require no arguments and return a dict-like object + + adjlist_outer_dict_factory : function, (default: dict) + Factory function to be used to create the outer-most dict + in the data structure that holds adjacency info keyed by node. + It should require no arguments and return a dict-like object. + + adjlist_inner_dict_factory : function, optional (default: dict) + Factory function to be used to create the adjacency list + dict which holds edge data keyed by neighbor. + It should require no arguments and return a dict-like object + + edge_attr_dict_factory : function, optional (default: dict) + Factory function to be used to create the edge attribute + dict which holds attribute values keyed by attribute name. + It should require no arguments and return a dict-like object. + + graph_attr_dict_factory : function, (default: dict) + Factory function to be used to create the graph attribute + dict which holds attribute values keyed by attribute name. + It should require no arguments and return a dict-like object. + + Typically, if your extension doesn't impact the data structure all + methods will inherited without issue except: `to_directed/to_undirected`. + By default these methods create a DiGraph/Graph class and you probably + want them to create your extension of a DiGraph/Graph. To facilitate + this we define two class variables that you can set in your subclass. + + to_directed_class : callable, (default: DiGraph or MultiDiGraph) + Class to create a new graph structure in the `to_directed` method. + If `None`, a NetworkX class (DiGraph or MultiDiGraph) is used. + + to_undirected_class : callable, (default: Graph or MultiGraph) + Class to create a new graph structure in the `to_undirected` method. + If `None`, a NetworkX class (Graph or MultiGraph) is used. + + **Subclassing Example** + + Create a low memory graph class that effectively disallows edge + attributes by using a single attribute dict for all edges. + This reduces the memory used, but you lose edge attributes. + + >>> class ThinGraph(nx.Graph): + ... all_edge_dict = {"weight": 1} + ... + ... def single_edge_dict(self): + ... return self.all_edge_dict + ... + ... edge_attr_dict_factory = single_edge_dict + >>> G = ThinGraph() + >>> G.add_edge(2, 1) + >>> G[2][1] + {'weight': 1} + >>> G.add_edge(2, 2) + >>> G[2][1] is G[2][2] + True + """ + + _adj = _CachedPropertyResetterAdjAndSucc() # type: ignore[assignment] + _succ = _adj # type: ignore[has-type] + _pred = _CachedPropertyResetterPred() + + def __init__(self, incoming_graph_data=None, **attr): + """Initialize a graph with edges, name, or graph attributes. + + Parameters + ---------- + incoming_graph_data : input graph (optional, default: None) + Data to initialize graph. If None (default) an empty + graph is created. The data can be an edge list, or any + NetworkX graph object. If the corresponding optional Python + packages are installed the data can also be a 2D NumPy array, a + SciPy sparse array, or a PyGraphviz graph. + + attr : keyword arguments, optional (default= no attributes) + Attributes to add to graph as key=value pairs. + + See Also + -------- + convert + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G = nx.Graph(name="my graph") + >>> e = [(1, 2), (2, 3), (3, 4)] # list of edges + >>> G = nx.Graph(e) + + Arbitrary graph attribute pairs (key=value) may be assigned + + >>> G = nx.Graph(e, day="Friday") + >>> G.graph + {'day': 'Friday'} + + """ + self.graph = self.graph_attr_dict_factory() # dictionary for graph attributes + self._node = self.node_dict_factory() # dictionary for node attr + # We store two adjacency lists: + # the predecessors of node n are stored in the dict self._pred + # the successors of node n are stored in the dict self._succ=self._adj + self._adj = self.adjlist_outer_dict_factory() # empty adjacency dict successor + self._pred = self.adjlist_outer_dict_factory() # predecessor + # Note: self._succ = self._adj # successor + + self.__networkx_cache__ = {} + # attempt to load graph with data + if incoming_graph_data is not None: + convert.to_networkx_graph(incoming_graph_data, create_using=self) + # load graph attributes (must be after convert) + self.graph.update(attr) + + @cached_property + def adj(self): + """Graph adjacency object holding the neighbors of each node. + + This object is a read-only dict-like structure with node keys + and neighbor-dict values. The neighbor-dict is keyed by neighbor + to the edge-data-dict. So `G.adj[3][2]['color'] = 'blue'` sets + the color of the edge `(3, 2)` to `"blue"`. + + Iterating over G.adj behaves like a dict. Useful idioms include + `for nbr, datadict in G.adj[n].items():`. + + The neighbor information is also provided by subscripting the graph. + So `for nbr, foovalue in G[node].data('foo', default=1):` works. + + For directed graphs, `G.adj` holds outgoing (successor) info. + """ + return AdjacencyView(self._succ) + + @cached_property + def succ(self): + """Graph adjacency object holding the successors of each node. + + This object is a read-only dict-like structure with node keys + and neighbor-dict values. The neighbor-dict is keyed by neighbor + to the edge-data-dict. So `G.succ[3][2]['color'] = 'blue'` sets + the color of the edge `(3, 2)` to `"blue"`. + + Iterating over G.succ behaves like a dict. Useful idioms include + `for nbr, datadict in G.succ[n].items():`. A data-view not provided + by dicts also exists: `for nbr, foovalue in G.succ[node].data('foo'):` + and a default can be set via a `default` argument to the `data` method. + + The neighbor information is also provided by subscripting the graph. + So `for nbr, foovalue in G[node].data('foo', default=1):` works. + + For directed graphs, `G.adj` is identical to `G.succ`. + """ + return AdjacencyView(self._succ) + + @cached_property + def pred(self): + """Graph adjacency object holding the predecessors of each node. + + This object is a read-only dict-like structure with node keys + and neighbor-dict values. The neighbor-dict is keyed by neighbor + to the edge-data-dict. So `G.pred[2][3]['color'] = 'blue'` sets + the color of the edge `(3, 2)` to `"blue"`. + + Iterating over G.pred behaves like a dict. Useful idioms include + `for nbr, datadict in G.pred[n].items():`. A data-view not provided + by dicts also exists: `for nbr, foovalue in G.pred[node].data('foo'):` + A default can be set via a `default` argument to the `data` method. + """ + return AdjacencyView(self._pred) + + def add_node(self, node_for_adding, **attr): + """Add a single node `node_for_adding` and update node attributes. + + Parameters + ---------- + node_for_adding : node + A node can be any hashable Python object except None. + attr : keyword arguments, optional + Set or change node attributes using key=value. + + See Also + -------- + add_nodes_from + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_node(1) + >>> G.add_node("Hello") + >>> K3 = nx.Graph([(0, 1), (1, 2), (2, 0)]) + >>> G.add_node(K3) + >>> G.number_of_nodes() + 3 + + Use keywords set/change node attributes: + + >>> G.add_node(1, size=10) + >>> G.add_node(3, weight=0.4, UTM=("13S", 382871, 3972649)) + + Notes + ----- + A hashable object is one that can be used as a key in a Python + dictionary. This includes strings, numbers, tuples of strings + and numbers, etc. + + On many platforms hashable items also include mutables such as + NetworkX Graphs, though one should be careful that the hash + doesn't change on mutables. + """ + if node_for_adding not in self._succ: + if node_for_adding is None: + raise ValueError("None cannot be a node") + self._succ[node_for_adding] = self.adjlist_inner_dict_factory() + self._pred[node_for_adding] = self.adjlist_inner_dict_factory() + attr_dict = self._node[node_for_adding] = self.node_attr_dict_factory() + attr_dict.update(attr) + else: # update attr even if node already exists + self._node[node_for_adding].update(attr) + nx._clear_cache(self) + + def add_nodes_from(self, nodes_for_adding, **attr): + """Add multiple nodes. + + Parameters + ---------- + nodes_for_adding : iterable container + A container of nodes (list, dict, set, etc.). + OR + A container of (node, attribute dict) tuples. + Node attributes are updated using the attribute dict. + attr : keyword arguments, optional (default= no attributes) + Update attributes for all nodes in nodes. + Node attributes specified in nodes as a tuple take + precedence over attributes specified via keyword arguments. + + See Also + -------- + add_node + + Notes + ----- + When adding nodes from an iterator over the graph you are changing, + a `RuntimeError` can be raised with message: + `RuntimeError: dictionary changed size during iteration`. This + happens when the graph's underlying dictionary is modified during + iteration. To avoid this error, evaluate the iterator into a separate + object, e.g. by using `list(iterator_of_nodes)`, and pass this + object to `G.add_nodes_from`. + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_nodes_from("Hello") + >>> K3 = nx.Graph([(0, 1), (1, 2), (2, 0)]) + >>> G.add_nodes_from(K3) + >>> sorted(G.nodes(), key=str) + [0, 1, 2, 'H', 'e', 'l', 'o'] + + Use keywords to update specific node attributes for every node. + + >>> G.add_nodes_from([1, 2], size=10) + >>> G.add_nodes_from([3, 4], weight=0.4) + + Use (node, attrdict) tuples to update attributes for specific nodes. + + >>> G.add_nodes_from([(1, dict(size=11)), (2, {"color": "blue"})]) + >>> G.nodes[1]["size"] + 11 + >>> H = nx.Graph() + >>> H.add_nodes_from(G.nodes(data=True)) + >>> H.nodes[1]["size"] + 11 + + Evaluate an iterator over a graph if using it to modify the same graph + + >>> G = nx.DiGraph([(0, 1), (1, 2), (3, 4)]) + >>> # wrong way - will raise RuntimeError + >>> # G.add_nodes_from(n + 1 for n in G.nodes) + >>> # correct way + >>> G.add_nodes_from(list(n + 1 for n in G.nodes)) + """ + for n in nodes_for_adding: + try: + newnode = n not in self._node + newdict = attr + except TypeError: + n, ndict = n + newnode = n not in self._node + newdict = attr.copy() + newdict.update(ndict) + if newnode: + if n is None: + raise ValueError("None cannot be a node") + self._succ[n] = self.adjlist_inner_dict_factory() + self._pred[n] = self.adjlist_inner_dict_factory() + self._node[n] = self.node_attr_dict_factory() + self._node[n].update(newdict) + nx._clear_cache(self) + + def remove_node(self, n): + """Remove node n. + + Removes the node n and all adjacent edges. + Attempting to remove a nonexistent node will raise an exception. + + Parameters + ---------- + n : node + A node in the graph + + Raises + ------ + NetworkXError + If n is not in the graph. + + See Also + -------- + remove_nodes_from + + Examples + -------- + >>> G = nx.path_graph(3) # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> list(G.edges) + [(0, 1), (1, 2)] + >>> G.remove_node(1) + >>> list(G.edges) + [] + + """ + try: + nbrs = self._succ[n] + del self._node[n] + except KeyError as err: # NetworkXError if n not in self + raise NetworkXError(f"The node {n} is not in the digraph.") from err + for u in nbrs: + del self._pred[u][n] # remove all edges n-u in digraph + del self._succ[n] # remove node from succ + for u in self._pred[n]: + del self._succ[u][n] # remove all edges n-u in digraph + del self._pred[n] # remove node from pred + nx._clear_cache(self) + + def remove_nodes_from(self, nodes): + """Remove multiple nodes. + + Parameters + ---------- + nodes : iterable container + A container of nodes (list, dict, set, etc.). If a node + in the container is not in the graph it is silently ignored. + + See Also + -------- + remove_node + + Notes + ----- + When removing nodes from an iterator over the graph you are changing, + a `RuntimeError` will be raised with message: + `RuntimeError: dictionary changed size during iteration`. This + happens when the graph's underlying dictionary is modified during + iteration. To avoid this error, evaluate the iterator into a separate + object, e.g. by using `list(iterator_of_nodes)`, and pass this + object to `G.remove_nodes_from`. + + Examples + -------- + >>> G = nx.path_graph(3) # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> e = list(G.nodes) + >>> e + [0, 1, 2] + >>> G.remove_nodes_from(e) + >>> list(G.nodes) + [] + + Evaluate an iterator over a graph if using it to modify the same graph + + >>> G = nx.DiGraph([(0, 1), (1, 2), (3, 4)]) + >>> # this command will fail, as the graph's dict is modified during iteration + >>> # G.remove_nodes_from(n for n in G.nodes if n < 2) + >>> # this command will work, since the dictionary underlying graph is not modified + >>> G.remove_nodes_from(list(n for n in G.nodes if n < 2)) + """ + for n in nodes: + try: + succs = self._succ[n] + del self._node[n] + for u in succs: + del self._pred[u][n] # remove all edges n-u in digraph + del self._succ[n] # now remove node + for u in self._pred[n]: + del self._succ[u][n] # remove all edges n-u in digraph + del self._pred[n] # now remove node + except KeyError: + pass # silent failure on remove + nx._clear_cache(self) + + def add_edge(self, u_of_edge, v_of_edge, **attr): + """Add an edge between u and v. + + The nodes u and v will be automatically added if they are + not already in the graph. + + Edge attributes can be specified with keywords or by directly + accessing the edge's attribute dictionary. See examples below. + + Parameters + ---------- + u_of_edge, v_of_edge : nodes + Nodes can be, for example, strings or numbers. + Nodes must be hashable (and not None) Python objects. + attr : keyword arguments, optional + Edge data (or labels or objects) can be assigned using + keyword arguments. + + See Also + -------- + add_edges_from : add a collection of edges + + Notes + ----- + Adding an edge that already exists updates the edge data. + + Many NetworkX algorithms designed for weighted graphs use + an edge attribute (by default `weight`) to hold a numerical value. + + Examples + -------- + The following all add the edge e=(1, 2) to graph G: + + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> e = (1, 2) + >>> G.add_edge(1, 2) # explicit two-node form + >>> G.add_edge(*e) # single edge as tuple of two nodes + >>> G.add_edges_from([(1, 2)]) # add edges from iterable container + + Associate data to edges using keywords: + + >>> G.add_edge(1, 2, weight=3) + >>> G.add_edge(1, 3, weight=7, capacity=15, length=342.7) + + For non-string attribute keys, use subscript notation. + + >>> G.add_edge(1, 2) + >>> G[1][2].update({0: 5}) + >>> G.edges[1, 2].update({0: 5}) + """ + u, v = u_of_edge, v_of_edge + # add nodes + if u not in self._succ: + if u is None: + raise ValueError("None cannot be a node") + self._succ[u] = self.adjlist_inner_dict_factory() + self._pred[u] = self.adjlist_inner_dict_factory() + self._node[u] = self.node_attr_dict_factory() + if v not in self._succ: + if v is None: + raise ValueError("None cannot be a node") + self._succ[v] = self.adjlist_inner_dict_factory() + self._pred[v] = self.adjlist_inner_dict_factory() + self._node[v] = self.node_attr_dict_factory() + # add the edge + datadict = self._adj[u].get(v, self.edge_attr_dict_factory()) + datadict.update(attr) + self._succ[u][v] = datadict + self._pred[v][u] = datadict + nx._clear_cache(self) + + def add_edges_from(self, ebunch_to_add, **attr): + """Add all the edges in ebunch_to_add. + + Parameters + ---------- + ebunch_to_add : container of edges + Each edge given in the container will be added to the + graph. The edges must be given as 2-tuples (u, v) or + 3-tuples (u, v, d) where d is a dictionary containing edge data. + attr : keyword arguments, optional + Edge data (or labels or objects) can be assigned using + keyword arguments. + + See Also + -------- + add_edge : add a single edge + add_weighted_edges_from : convenient way to add weighted edges + + Notes + ----- + Adding the same edge twice has no effect but any edge data + will be updated when each duplicate edge is added. + + Edge attributes specified in an ebunch take precedence over + attributes specified via keyword arguments. + + When adding edges from an iterator over the graph you are changing, + a `RuntimeError` can be raised with message: + `RuntimeError: dictionary changed size during iteration`. This + happens when the graph's underlying dictionary is modified during + iteration. To avoid this error, evaluate the iterator into a separate + object, e.g. by using `list(iterator_of_edges)`, and pass this + object to `G.add_edges_from`. + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_edges_from([(0, 1), (1, 2)]) # using a list of edge tuples + >>> e = zip(range(0, 3), range(1, 4)) + >>> G.add_edges_from(e) # Add the path graph 0-1-2-3 + + Associate data to edges + + >>> G.add_edges_from([(1, 2), (2, 3)], weight=3) + >>> G.add_edges_from([(3, 4), (1, 4)], label="WN2898") + + Evaluate an iterator over a graph if using it to modify the same graph + + >>> G = nx.DiGraph([(1, 2), (2, 3), (3, 4)]) + >>> # Grow graph by one new node, adding edges to all existing nodes. + >>> # wrong way - will raise RuntimeError + >>> # G.add_edges_from(((5, n) for n in G.nodes)) + >>> # right way - note that there will be no self-edge for node 5 + >>> G.add_edges_from(list((5, n) for n in G.nodes)) + """ + for e in ebunch_to_add: + ne = len(e) + if ne == 3: + u, v, dd = e + elif ne == 2: + u, v = e + dd = {} + else: + raise NetworkXError(f"Edge tuple {e} must be a 2-tuple or 3-tuple.") + if u not in self._succ: + if u is None: + raise ValueError("None cannot be a node") + self._succ[u] = self.adjlist_inner_dict_factory() + self._pred[u] = self.adjlist_inner_dict_factory() + self._node[u] = self.node_attr_dict_factory() + if v not in self._succ: + if v is None: + raise ValueError("None cannot be a node") + self._succ[v] = self.adjlist_inner_dict_factory() + self._pred[v] = self.adjlist_inner_dict_factory() + self._node[v] = self.node_attr_dict_factory() + datadict = self._adj[u].get(v, self.edge_attr_dict_factory()) + datadict.update(attr) + datadict.update(dd) + self._succ[u][v] = datadict + self._pred[v][u] = datadict + nx._clear_cache(self) + + def remove_edge(self, u, v): + """Remove the edge between u and v. + + Parameters + ---------- + u, v : nodes + Remove the edge between nodes u and v. + + Raises + ------ + NetworkXError + If there is not an edge between u and v. + + See Also + -------- + remove_edges_from : remove a collection of edges + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, etc + >>> nx.add_path(G, [0, 1, 2, 3]) + >>> G.remove_edge(0, 1) + >>> e = (1, 2) + >>> G.remove_edge(*e) # unpacks e from an edge tuple + >>> e = (2, 3, {"weight": 7}) # an edge with attribute data + >>> G.remove_edge(*e[:2]) # select first part of edge tuple + """ + try: + del self._succ[u][v] + del self._pred[v][u] + except KeyError as err: + raise NetworkXError(f"The edge {u}-{v} not in graph.") from err + nx._clear_cache(self) + + def remove_edges_from(self, ebunch): + """Remove all edges specified in ebunch. + + Parameters + ---------- + ebunch: list or container of edge tuples + Each edge given in the list or container will be removed + from the graph. The edges can be: + + - 2-tuples (u, v) edge between u and v. + - 3-tuples (u, v, k) where k is ignored. + + See Also + -------- + remove_edge : remove a single edge + + Notes + ----- + Will fail silently if an edge in ebunch is not in the graph. + + Examples + -------- + >>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> ebunch = [(1, 2), (2, 3)] + >>> G.remove_edges_from(ebunch) + """ + for e in ebunch: + u, v = e[:2] # ignore edge data + if u in self._succ and v in self._succ[u]: + del self._succ[u][v] + del self._pred[v][u] + nx._clear_cache(self) + + def has_successor(self, u, v): + """Returns True if node u has successor v. + + This is true if graph has the edge u->v. + """ + return u in self._succ and v in self._succ[u] + + def has_predecessor(self, u, v): + """Returns True if node u has predecessor v. + + This is true if graph has the edge u<-v. + """ + return u in self._pred and v in self._pred[u] + + def successors(self, n): + """Returns an iterator over successor nodes of n. + + A successor of n is a node m such that there exists a directed + edge from n to m. + + Parameters + ---------- + n : node + A node in the graph + + Raises + ------ + NetworkXError + If n is not in the graph. + + See Also + -------- + predecessors + + Notes + ----- + neighbors() and successors() are the same. + """ + try: + return iter(self._succ[n]) + except KeyError as err: + raise NetworkXError(f"The node {n} is not in the digraph.") from err + + # digraph definitions + neighbors = successors + + def predecessors(self, n): + """Returns an iterator over predecessor nodes of n. + + A predecessor of n is a node m such that there exists a directed + edge from m to n. + + Parameters + ---------- + n : node + A node in the graph + + Raises + ------ + NetworkXError + If n is not in the graph. + + See Also + -------- + successors + """ + try: + return iter(self._pred[n]) + except KeyError as err: + raise NetworkXError(f"The node {n} is not in the digraph.") from err + + @cached_property + def edges(self): + """An OutEdgeView of the DiGraph as G.edges or G.edges(). + + edges(self, nbunch=None, data=False, default=None) + + The OutEdgeView provides set-like operations on the edge-tuples + as well as edge attribute lookup. When called, it also provides + an EdgeDataView object which allows control of access to edge + attributes (but does not provide set-like operations). + Hence, `G.edges[u, v]['color']` provides the value of the color + attribute for edge `(u, v)` while + `for (u, v, c) in G.edges.data('color', default='red'):` + iterates through all the edges yielding the color attribute + with default `'red'` if no color attribute exists. + + Parameters + ---------- + nbunch : single node, container, or all nodes (default= all nodes) + The view will only report edges from these nodes. + data : string or bool, optional (default=False) + The edge attribute returned in 3-tuple (u, v, ddict[data]). + If True, return edge attribute dict in 3-tuple (u, v, ddict). + If False, return 2-tuple (u, v). + default : value, optional (default=None) + Value used for edges that don't have the requested attribute. + Only relevant if data is not True or False. + + Returns + ------- + edges : OutEdgeView + A view of edge attributes, usually it iterates over (u, v) + or (u, v, d) tuples of edges, but can also be used for + attribute lookup as `edges[u, v]['foo']`. + + See Also + -------- + in_edges, out_edges + + Notes + ----- + Nodes in nbunch that are not in the graph will be (quietly) ignored. + For directed graphs this returns the out-edges. + + Examples + -------- + >>> G = nx.DiGraph() # or MultiDiGraph, etc + >>> nx.add_path(G, [0, 1, 2]) + >>> G.add_edge(2, 3, weight=5) + >>> [e for e in G.edges] + [(0, 1), (1, 2), (2, 3)] + >>> G.edges.data() # default data is {} (empty dict) + OutEdgeDataView([(0, 1, {}), (1, 2, {}), (2, 3, {'weight': 5})]) + >>> G.edges.data("weight", default=1) + OutEdgeDataView([(0, 1, 1), (1, 2, 1), (2, 3, 5)]) + >>> G.edges([0, 2]) # only edges originating from these nodes + OutEdgeDataView([(0, 1), (2, 3)]) + >>> G.edges(0) # only edges from node 0 + OutEdgeDataView([(0, 1)]) + + """ + return OutEdgeView(self) + + # alias out_edges to edges + @cached_property + def out_edges(self): + return OutEdgeView(self) + + out_edges.__doc__ = edges.__doc__ + + @cached_property + def in_edges(self): + """A view of the in edges of the graph as G.in_edges or G.in_edges(). + + in_edges(self, nbunch=None, data=False, default=None): + + Parameters + ---------- + nbunch : single node, container, or all nodes (default= all nodes) + The view will only report edges incident to these nodes. + data : string or bool, optional (default=False) + The edge attribute returned in 3-tuple (u, v, ddict[data]). + If True, return edge attribute dict in 3-tuple (u, v, ddict). + If False, return 2-tuple (u, v). + default : value, optional (default=None) + Value used for edges that don't have the requested attribute. + Only relevant if data is not True or False. + + Returns + ------- + in_edges : InEdgeView or InEdgeDataView + A view of edge attributes, usually it iterates over (u, v) + or (u, v, d) tuples of edges, but can also be used for + attribute lookup as `edges[u, v]['foo']`. + + Examples + -------- + >>> G = nx.DiGraph() + >>> G.add_edge(1, 2, color="blue") + >>> G.in_edges() + InEdgeView([(1, 2)]) + >>> G.in_edges(nbunch=2) + InEdgeDataView([(1, 2)]) + + See Also + -------- + edges + """ + return InEdgeView(self) + + @cached_property + def degree(self): + """A DegreeView for the Graph as G.degree or G.degree(). + + The node degree is the number of edges adjacent to the node. + The weighted node degree is the sum of the edge weights for + edges incident to that node. + + This object provides an iterator for (node, degree) as well as + lookup for the degree for a single node. + + Parameters + ---------- + nbunch : single node, container, or all nodes (default= all nodes) + The view will only report edges incident to these nodes. + + weight : string or None, optional (default=None) + The name of an edge attribute that holds the numerical value used + as a weight. If None, then each edge has weight 1. + The degree is the sum of the edge weights adjacent to the node. + + Returns + ------- + DiDegreeView or int + If multiple nodes are requested (the default), returns a `DiDegreeView` + mapping nodes to their degree. + If a single node is requested, returns the degree of the node as an integer. + + See Also + -------- + in_degree, out_degree + + Examples + -------- + >>> G = nx.DiGraph() # or MultiDiGraph + >>> nx.add_path(G, [0, 1, 2, 3]) + >>> G.degree(0) # node 0 with degree 1 + 1 + >>> list(G.degree([0, 1, 2])) + [(0, 1), (1, 2), (2, 2)] + + """ + return DiDegreeView(self) + + @cached_property + def in_degree(self): + """An InDegreeView for (node, in_degree) or in_degree for single node. + + The node in_degree is the number of edges pointing to the node. + The weighted node degree is the sum of the edge weights for + edges incident to that node. + + This object provides an iteration over (node, in_degree) as well as + lookup for the degree for a single node. + + Parameters + ---------- + nbunch : single node, container, or all nodes (default= all nodes) + The view will only report edges incident to these nodes. + + weight : string or None, optional (default=None) + The name of an edge attribute that holds the numerical value used + as a weight. If None, then each edge has weight 1. + The degree is the sum of the edge weights adjacent to the node. + + Returns + ------- + If a single node is requested + deg : int + In-degree of the node + + OR if multiple nodes are requested + nd_iter : iterator + The iterator returns two-tuples of (node, in-degree). + + See Also + -------- + degree, out_degree + + Examples + -------- + >>> G = nx.DiGraph() + >>> nx.add_path(G, [0, 1, 2, 3]) + >>> G.in_degree(0) # node 0 with degree 0 + 0 + >>> list(G.in_degree([0, 1, 2])) + [(0, 0), (1, 1), (2, 1)] + + """ + return InDegreeView(self) + + @cached_property + def out_degree(self): + """An OutDegreeView for (node, out_degree) + + The node out_degree is the number of edges pointing out of the node. + The weighted node degree is the sum of the edge weights for + edges incident to that node. + + This object provides an iterator over (node, out_degree) as well as + lookup for the degree for a single node. + + Parameters + ---------- + nbunch : single node, container, or all nodes (default= all nodes) + The view will only report edges incident to these nodes. + + weight : string or None, optional (default=None) + The name of an edge attribute that holds the numerical value used + as a weight. If None, then each edge has weight 1. + The degree is the sum of the edge weights adjacent to the node. + + Returns + ------- + If a single node is requested + deg : int + Out-degree of the node + + OR if multiple nodes are requested + nd_iter : iterator + The iterator returns two-tuples of (node, out-degree). + + See Also + -------- + degree, in_degree + + Examples + -------- + >>> G = nx.DiGraph() + >>> nx.add_path(G, [0, 1, 2, 3]) + >>> G.out_degree(0) # node 0 with degree 1 + 1 + >>> list(G.out_degree([0, 1, 2])) + [(0, 1), (1, 1), (2, 1)] + + """ + return OutDegreeView(self) + + def clear(self): + """Remove all nodes and edges from the graph. + + This also removes the name, and all graph, node, and edge attributes. + + Examples + -------- + >>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.clear() + >>> list(G.nodes) + [] + >>> list(G.edges) + [] + + """ + self._succ.clear() + self._pred.clear() + self._node.clear() + self.graph.clear() + nx._clear_cache(self) + + def clear_edges(self): + """Remove all edges from the graph without altering nodes. + + Examples + -------- + >>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.clear_edges() + >>> list(G.nodes) + [0, 1, 2, 3] + >>> list(G.edges) + [] + + """ + for predecessor_dict in self._pred.values(): + predecessor_dict.clear() + for successor_dict in self._succ.values(): + successor_dict.clear() + nx._clear_cache(self) + + def is_multigraph(self): + """Returns True if graph is a multigraph, False otherwise.""" + return False + + def is_directed(self): + """Returns True if graph is directed, False otherwise.""" + return True + + def to_undirected(self, reciprocal=False, as_view=False): + """Returns an undirected representation of the digraph. + + Parameters + ---------- + reciprocal : bool (optional) + If True only keep edges that appear in both directions + in the original digraph. + as_view : bool (optional, default=False) + If True return an undirected view of the original directed graph. + + Returns + ------- + G : Graph + An undirected graph with the same name and nodes and + with edge (u, v, data) if either (u, v, data) or (v, u, data) + is in the digraph. If both edges exist in digraph and + their edge data is different, only one edge is created + with an arbitrary choice of which edge data to use. + You must check and correct for this manually if desired. + + See Also + -------- + Graph, copy, add_edge, add_edges_from + + Notes + ----- + If edges in both directions (u, v) and (v, u) exist in the + graph, attributes for the new undirected edge will be a combination of + the attributes of the directed edges. The edge data is updated + in the (arbitrary) order that the edges are encountered. For + more customized control of the edge attributes use add_edge(). + + This returns a "deepcopy" of the edge, node, and + graph attributes which attempts to completely copy + all of the data and references. + + This is in contrast to the similar G=DiGraph(D) which returns a + shallow copy of the data. + + See the Python copy module for more information on shallow + and deep copies, https://docs.python.org/3/library/copy.html. + + Warning: If you have subclassed DiGraph to use dict-like objects + in the data structure, those changes do not transfer to the + Graph created by this method. + + Examples + -------- + >>> G = nx.path_graph(2) # or MultiGraph, etc + >>> H = G.to_directed() + >>> list(H.edges) + [(0, 1), (1, 0)] + >>> G2 = H.to_undirected() + >>> list(G2.edges) + [(0, 1)] + """ + graph_class = self.to_undirected_class() + if as_view is True: + return nx.graphviews.generic_graph_view(self, graph_class) + # deepcopy when not a view + G = graph_class() + G.graph.update(deepcopy(self.graph)) + G.add_nodes_from((n, deepcopy(d)) for n, d in self._node.items()) + if reciprocal is True: + G.add_edges_from( + (u, v, deepcopy(d)) + for u, nbrs in self._adj.items() + for v, d in nbrs.items() + if v in self._pred[u] + ) + else: + G.add_edges_from( + (u, v, deepcopy(d)) + for u, nbrs in self._adj.items() + for v, d in nbrs.items() + ) + return G + + def reverse(self, copy=True): + """Returns the reverse of the graph. + + The reverse is a graph with the same nodes and edges + but with the directions of the edges reversed. + + Parameters + ---------- + copy : bool optional (default=True) + If True, return a new DiGraph holding the reversed edges. + If False, the reverse graph is created using a view of + the original graph. + """ + if copy: + H = self.__class__() + H.graph.update(deepcopy(self.graph)) + H.add_nodes_from((n, deepcopy(d)) for n, d in self.nodes.items()) + H.add_edges_from((v, u, deepcopy(d)) for u, v, d in self.edges(data=True)) + return H + return nx.reverse_view(self) diff --git a/.venv/lib/python3.12/site-packages/networkx/classes/filters.py b/.venv/lib/python3.12/site-packages/networkx/classes/filters.py new file mode 100644 index 0000000..e989e22 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/classes/filters.py @@ -0,0 +1,95 @@ +"""Filter factories to hide or show sets of nodes and edges. + +These filters return the function used when creating `SubGraph`. +""" + +__all__ = [ + "no_filter", + "hide_nodes", + "hide_edges", + "hide_multiedges", + "hide_diedges", + "hide_multidiedges", + "show_nodes", + "show_edges", + "show_multiedges", + "show_diedges", + "show_multidiedges", +] + + +def no_filter(*items): + """Returns a filter function that always evaluates to True.""" + return True + + +def hide_nodes(nodes): + """Returns a filter function that hides specific nodes.""" + nodes = set(nodes) + return lambda node: node not in nodes + + +def hide_diedges(edges): + """Returns a filter function that hides specific directed edges.""" + edges = {(u, v) for u, v in edges} + return lambda u, v: (u, v) not in edges + + +def hide_edges(edges): + """Returns a filter function that hides specific undirected edges.""" + alledges = set(edges) | {(v, u) for (u, v) in edges} + return lambda u, v: (u, v) not in alledges + + +def hide_multidiedges(edges): + """Returns a filter function that hides specific multi-directed edges.""" + edges = {(u, v, k) for u, v, k in edges} + return lambda u, v, k: (u, v, k) not in edges + + +def hide_multiedges(edges): + """Returns a filter function that hides specific multi-undirected edges.""" + alledges = set(edges) | {(v, u, k) for (u, v, k) in edges} + return lambda u, v, k: (u, v, k) not in alledges + + +# write show_nodes as a class to make SubGraph pickleable +class show_nodes: + """Filter class to show specific nodes. + + Attach the set of nodes as an attribute to speed up this commonly used filter + + Note that another allowed attribute for filters is to store the number of nodes + on the filter as attribute `length` (used in `__len__`). It is a user + responsibility to ensure this attribute is accurate if present. + """ + + def __init__(self, nodes): + self.nodes = set(nodes) + + def __call__(self, node): + return node in self.nodes + + +def show_diedges(edges): + """Returns a filter function that shows specific directed edges.""" + edges = {(u, v) for u, v in edges} + return lambda u, v: (u, v) in edges + + +def show_edges(edges): + """Returns a filter function that shows specific undirected edges.""" + alledges = set(edges) | {(v, u) for (u, v) in edges} + return lambda u, v: (u, v) in alledges + + +def show_multidiedges(edges): + """Returns a filter function that shows specific multi-directed edges.""" + edges = {(u, v, k) for u, v, k in edges} + return lambda u, v, k: (u, v, k) in edges + + +def show_multiedges(edges): + """Returns a filter function that shows specific multi-undirected edges.""" + alledges = set(edges) | {(v, u, k) for (u, v, k) in edges} + return lambda u, v, k: (u, v, k) in alledges diff --git a/.venv/lib/python3.12/site-packages/networkx/classes/function.py b/.venv/lib/python3.12/site-packages/networkx/classes/function.py new file mode 100644 index 0000000..6cf7278 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/classes/function.py @@ -0,0 +1,1416 @@ +"""Functional interface to graph methods and assorted utilities.""" + +from collections import Counter +from itertools import chain + +import networkx as nx +from networkx.utils import not_implemented_for, pairwise + +__all__ = [ + "nodes", + "edges", + "degree", + "degree_histogram", + "neighbors", + "number_of_nodes", + "number_of_edges", + "density", + "is_directed", + "freeze", + "is_frozen", + "subgraph", + "induced_subgraph", + "edge_subgraph", + "restricted_view", + "to_directed", + "to_undirected", + "add_star", + "add_path", + "add_cycle", + "create_empty_copy", + "set_node_attributes", + "get_node_attributes", + "remove_node_attributes", + "set_edge_attributes", + "get_edge_attributes", + "remove_edge_attributes", + "all_neighbors", + "non_neighbors", + "non_edges", + "common_neighbors", + "is_weighted", + "is_negatively_weighted", + "is_empty", + "selfloop_edges", + "nodes_with_selfloops", + "number_of_selfloops", + "path_weight", + "is_path", +] + + +def nodes(G): + """Returns a NodeView over the graph nodes. + + This function wraps the :func:`G.nodes ` property. + """ + return G.nodes() + + +def edges(G, nbunch=None): + """Returns an edge view of edges incident to nodes in nbunch. + + Return all edges if nbunch is unspecified or nbunch=None. + + For digraphs, edges=out_edges + + This function wraps the :func:`G.edges ` property. + """ + return G.edges(nbunch) + + +def degree(G, nbunch=None, weight=None): + """Returns a degree view of single node or of nbunch of nodes. + If nbunch is omitted, then return degrees of *all* nodes. + + This function wraps the :func:`G.degree ` property. + """ + return G.degree(nbunch, weight) + + +def neighbors(G, n): + """Returns an iterator over all neighbors of node n. + + This function wraps the :func:`G.neighbors ` function. + """ + return G.neighbors(n) + + +def number_of_nodes(G): + """Returns the number of nodes in the graph. + + This function wraps the :func:`G.number_of_nodes ` function. + """ + return G.number_of_nodes() + + +def number_of_edges(G): + """Returns the number of edges in the graph. + + This function wraps the :func:`G.number_of_edges ` function. + """ + return G.number_of_edges() + + +def density(G): + r"""Returns the density of a graph. + + The density for undirected graphs is + + .. math:: + + d = \frac{2m}{n(n-1)}, + + and for directed graphs is + + .. math:: + + d = \frac{m}{n(n-1)}, + + where `n` is the number of nodes and `m` is the number of edges in `G`. + + Notes + ----- + The density is 0 for a graph without edges and 1 for a complete graph. + The density of multigraphs can be higher than 1. + + Self loops are counted in the total number of edges so graphs with self + loops can have density higher than 1. + """ + n = number_of_nodes(G) + m = number_of_edges(G) + if m == 0 or n <= 1: + return 0 + d = m / (n * (n - 1)) + if not G.is_directed(): + d *= 2 + return d + + +def degree_histogram(G): + """Returns a list of the frequency of each degree value. + + Parameters + ---------- + G : Networkx graph + A graph + + Returns + ------- + hist : list + A list of frequencies of degrees. + The degree values are the index in the list. + + Notes + ----- + Note: the bins are width one, hence len(list) can be large + (Order(number_of_edges)) + """ + counts = Counter(d for n, d in G.degree()) + return [counts.get(i, 0) for i in range(max(counts) + 1 if counts else 0)] + + +def is_directed(G): + """Return True if graph is directed.""" + return G.is_directed() + + +def frozen(*args, **kwargs): + """Dummy method for raising errors when trying to modify frozen graphs""" + raise nx.NetworkXError("Frozen graph can't be modified") + + +def freeze(G): + """Modify graph to prevent further change by adding or removing + nodes or edges. + + Node and edge data can still be modified. + + Parameters + ---------- + G : graph + A NetworkX graph + + Examples + -------- + >>> G = nx.path_graph(4) + >>> G = nx.freeze(G) + >>> try: + ... G.add_edge(4, 5) + ... except nx.NetworkXError as err: + ... print(str(err)) + Frozen graph can't be modified + + Notes + ----- + To "unfreeze" a graph you must make a copy by creating a new graph object: + + >>> graph = nx.path_graph(4) + >>> frozen_graph = nx.freeze(graph) + >>> unfrozen_graph = nx.Graph(frozen_graph) + >>> nx.is_frozen(unfrozen_graph) + False + + See Also + -------- + is_frozen + """ + G.add_node = frozen + G.add_nodes_from = frozen + G.remove_node = frozen + G.remove_nodes_from = frozen + G.add_edge = frozen + G.add_edges_from = frozen + G.add_weighted_edges_from = frozen + G.remove_edge = frozen + G.remove_edges_from = frozen + G.clear = frozen + G.clear_edges = frozen + G.frozen = True + return G + + +def is_frozen(G): + """Returns True if graph is frozen. + + Parameters + ---------- + G : graph + A NetworkX graph + + See Also + -------- + freeze + """ + try: + return G.frozen + except AttributeError: + return False + + +def add_star(G_to_add_to, nodes_for_star, **attr): + """Add a star to Graph G_to_add_to. + + The first node in `nodes_for_star` is the middle of the star. + It is connected to all other nodes. + + Parameters + ---------- + G_to_add_to : graph + A NetworkX graph + nodes_for_star : iterable container + A container of nodes. + attr : keyword arguments, optional (default= no attributes) + Attributes to add to every edge in star. + + See Also + -------- + add_path, add_cycle + + Examples + -------- + >>> G = nx.Graph() + >>> nx.add_star(G, [0, 1, 2, 3]) + >>> nx.add_star(G, [10, 11, 12], weight=2) + """ + nlist = iter(nodes_for_star) + try: + v = next(nlist) + except StopIteration: + return + G_to_add_to.add_node(v) + edges = ((v, n) for n in nlist) + G_to_add_to.add_edges_from(edges, **attr) + + +def add_path(G_to_add_to, nodes_for_path, **attr): + """Add a path to the Graph G_to_add_to. + + Parameters + ---------- + G_to_add_to : graph + A NetworkX graph + nodes_for_path : iterable container + A container of nodes. A path will be constructed from + the nodes (in order) and added to the graph. + attr : keyword arguments, optional (default= no attributes) + Attributes to add to every edge in path. + + See Also + -------- + add_star, add_cycle + + Examples + -------- + >>> G = nx.Graph() + >>> nx.add_path(G, [0, 1, 2, 3]) + >>> nx.add_path(G, [10, 11, 12], weight=7) + """ + nlist = iter(nodes_for_path) + try: + first_node = next(nlist) + except StopIteration: + return + G_to_add_to.add_node(first_node) + G_to_add_to.add_edges_from(pairwise(chain((first_node,), nlist)), **attr) + + +def add_cycle(G_to_add_to, nodes_for_cycle, **attr): + """Add a cycle to the Graph G_to_add_to. + + Parameters + ---------- + G_to_add_to : graph + A NetworkX graph + nodes_for_cycle: iterable container + A container of nodes. A cycle will be constructed from + the nodes (in order) and added to the graph. + attr : keyword arguments, optional (default= no attributes) + Attributes to add to every edge in cycle. + + See Also + -------- + add_path, add_star + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> nx.add_cycle(G, [0, 1, 2, 3]) + >>> nx.add_cycle(G, [10, 11, 12], weight=7) + """ + nlist = iter(nodes_for_cycle) + try: + first_node = next(nlist) + except StopIteration: + return + G_to_add_to.add_node(first_node) + G_to_add_to.add_edges_from( + pairwise(chain((first_node,), nlist), cyclic=True), **attr + ) + + +def subgraph(G, nbunch): + """Returns the subgraph induced on nodes in nbunch. + + Parameters + ---------- + G : graph + A NetworkX graph + + nbunch : list, iterable + A container of nodes that will be iterated through once (thus + it should be an iterator or be iterable). Each element of the + container should be a valid node type: any hashable type except + None. If nbunch is None, return all edges data in the graph. + Nodes in nbunch that are not in the graph will be (quietly) + ignored. + + Notes + ----- + subgraph(G) calls G.subgraph() + """ + return G.subgraph(nbunch) + + +def induced_subgraph(G, nbunch): + """Returns a SubGraph view of `G` showing only nodes in nbunch. + + The induced subgraph of a graph on a set of nodes N is the + graph with nodes N and edges from G which have both ends in N. + + Parameters + ---------- + G : NetworkX Graph + nbunch : node, container of nodes or None (for all nodes) + + Returns + ------- + subgraph : SubGraph View + A read-only view of the subgraph in `G` induced by the nodes. + Changes to the graph `G` will be reflected in the view. + + Notes + ----- + To create a mutable subgraph with its own copies of nodes + edges and attributes use `subgraph.copy()` or `Graph(subgraph)` + + For an inplace reduction of a graph to a subgraph you can remove nodes: + `G.remove_nodes_from(n in G if n not in set(nbunch))` + + If you are going to compute subgraphs of your subgraphs you could + end up with a chain of views that can be very slow once the chain + has about 15 views in it. If they are all induced subgraphs, you + can short-cut the chain by making them all subgraphs of the original + graph. The graph class method `G.subgraph` does this when `G` is + a subgraph. In contrast, this function allows you to choose to build + chains or not, as you wish. The returned subgraph is a view on `G`. + + Examples + -------- + >>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> H = nx.induced_subgraph(G, [0, 1, 3]) + >>> list(H.edges) + [(0, 1)] + >>> list(H.nodes) + [0, 1, 3] + """ + induced_nodes = nx.filters.show_nodes(G.nbunch_iter(nbunch)) + return nx.subgraph_view(G, filter_node=induced_nodes) + + +def edge_subgraph(G, edges): + """Returns a view of the subgraph induced by the specified edges. + + The induced subgraph contains each edge in `edges` and each + node incident to any of those edges. + + Parameters + ---------- + G : NetworkX Graph + edges : iterable + An iterable of edges. Edges not present in `G` are ignored. + + Returns + ------- + subgraph : SubGraph View + A read-only edge-induced subgraph of `G`. + Changes to `G` are reflected in the view. + + Notes + ----- + To create a mutable subgraph with its own copies of nodes + edges and attributes use `subgraph.copy()` or `Graph(subgraph)` + + If you create a subgraph of a subgraph recursively you can end up + with a chain of subgraphs that becomes very slow with about 15 + nested subgraph views. Luckily the edge_subgraph filter nests + nicely so you can use the original graph as G in this function + to avoid chains. We do not rule out chains programmatically so + that odd cases like an `edge_subgraph` of a `restricted_view` + can be created. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> H = G.edge_subgraph([(0, 1), (3, 4)]) + >>> list(H.nodes) + [0, 1, 3, 4] + >>> list(H.edges) + [(0, 1), (3, 4)] + """ + nxf = nx.filters + edges = set(edges) + nodes = set() + for e in edges: + nodes.update(e[:2]) + induced_nodes = nxf.show_nodes(nodes) + if G.is_multigraph(): + if G.is_directed(): + induced_edges = nxf.show_multidiedges(edges) + else: + induced_edges = nxf.show_multiedges(edges) + else: + if G.is_directed(): + induced_edges = nxf.show_diedges(edges) + else: + induced_edges = nxf.show_edges(edges) + return nx.subgraph_view(G, filter_node=induced_nodes, filter_edge=induced_edges) + + +def restricted_view(G, nodes, edges): + """Returns a view of `G` with hidden nodes and edges. + + The resulting subgraph filters out node `nodes` and edges `edges`. + Filtered out nodes also filter out any of their edges. + + Parameters + ---------- + G : NetworkX Graph + nodes : iterable + An iterable of nodes. Nodes not present in `G` are ignored. + edges : iterable + An iterable of edges. Edges not present in `G` are ignored. + + Returns + ------- + subgraph : SubGraph View + A read-only restricted view of `G` filtering out nodes and edges. + Changes to `G` are reflected in the view. + + Notes + ----- + To create a mutable subgraph with its own copies of nodes + edges and attributes use `subgraph.copy()` or `Graph(subgraph)` + + If you create a subgraph of a subgraph recursively you may end up + with a chain of subgraph views. Such chains can get quite slow + for lengths near 15. To avoid long chains, try to make your subgraph + based on the original graph. We do not rule out chains programmatically + so that odd cases like an `edge_subgraph` of a `restricted_view` + can be created. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> H = nx.restricted_view(G, [0], [(1, 2), (3, 4)]) + >>> list(H.nodes) + [1, 2, 3, 4] + >>> list(H.edges) + [(2, 3)] + """ + nxf = nx.filters + hide_nodes = nxf.hide_nodes(nodes) + if G.is_multigraph(): + if G.is_directed(): + hide_edges = nxf.hide_multidiedges(edges) + else: + hide_edges = nxf.hide_multiedges(edges) + else: + if G.is_directed(): + hide_edges = nxf.hide_diedges(edges) + else: + hide_edges = nxf.hide_edges(edges) + return nx.subgraph_view(G, filter_node=hide_nodes, filter_edge=hide_edges) + + +def to_directed(graph): + """Returns a directed view of the graph `graph`. + + Identical to graph.to_directed(as_view=True) + Note that graph.to_directed defaults to `as_view=False` + while this function always provides a view. + """ + return graph.to_directed(as_view=True) + + +def to_undirected(graph): + """Returns an undirected view of the graph `graph`. + + Identical to graph.to_undirected(as_view=True) + Note that graph.to_undirected defaults to `as_view=False` + while this function always provides a view. + """ + return graph.to_undirected(as_view=True) + + +def create_empty_copy(G, with_data=True): + """Returns a copy of the graph G with all of the edges removed. + + Parameters + ---------- + G : graph + A NetworkX graph + + with_data : bool (default=True) + Propagate Graph and Nodes data to the new graph. + + See Also + -------- + empty_graph + + """ + H = G.__class__() + H.add_nodes_from(G.nodes(data=with_data)) + if with_data: + H.graph.update(G.graph) + return H + + +@nx._dispatchable(preserve_node_attrs=True, mutates_input=True) +def set_node_attributes(G, values, name=None): + """Sets node attributes from a given value or dictionary of values. + + .. Warning:: The call order of arguments `values` and `name` + switched between v1.x & v2.x. + + Parameters + ---------- + G : NetworkX Graph + + values : scalar value, dict-like + What the node attribute should be set to. If `values` is + not a dictionary, then it is treated as a single attribute value + that is then applied to every node in `G`. This means that if + you provide a mutable object, like a list, updates to that object + will be reflected in the node attribute for every node. + The attribute name will be `name`. + + If `values` is a dict or a dict of dict, it should be keyed + by node to either an attribute value or a dict of attribute key/value + pairs used to update the node's attributes. + + name : string (optional, default=None) + Name of the node attribute to set if values is a scalar. + + Examples + -------- + After computing some property of the nodes of a graph, you may want + to assign a node attribute to store the value of that property for + each node:: + + >>> G = nx.path_graph(3) + >>> bb = nx.betweenness_centrality(G) + >>> isinstance(bb, dict) + True + >>> nx.set_node_attributes(G, bb, "betweenness") + >>> G.nodes[1]["betweenness"] + 1.0 + + If you provide a list as the second argument, updates to the list + will be reflected in the node attribute for each node:: + + >>> G = nx.path_graph(3) + >>> labels = [] + >>> nx.set_node_attributes(G, labels, "labels") + >>> labels.append("foo") + >>> G.nodes[0]["labels"] + ['foo'] + >>> G.nodes[1]["labels"] + ['foo'] + >>> G.nodes[2]["labels"] + ['foo'] + + If you provide a dictionary of dictionaries as the second argument, + the outer dictionary is assumed to be keyed by node to an inner + dictionary of node attributes for that node:: + + >>> G = nx.path_graph(3) + >>> attrs = {0: {"attr1": 20, "attr2": "nothing"}, 1: {"attr2": 3}} + >>> nx.set_node_attributes(G, attrs) + >>> G.nodes[0]["attr1"] + 20 + >>> G.nodes[0]["attr2"] + 'nothing' + >>> G.nodes[1]["attr2"] + 3 + >>> G.nodes[2] + {} + + Note that if the dictionary contains nodes that are not in `G`, the + values are silently ignored:: + + >>> G = nx.Graph() + >>> G.add_node(0) + >>> nx.set_node_attributes(G, {0: "red", 1: "blue"}, name="color") + >>> G.nodes[0]["color"] + 'red' + >>> 1 in G.nodes + False + + """ + # Set node attributes based on type of `values` + if name is not None: # `values` must not be a dict of dict + try: # `values` is a dict + for n, v in values.items(): + try: + G.nodes[n][name] = values[n] + except KeyError: + pass + except AttributeError: # `values` is a constant + for n in G: + G.nodes[n][name] = values + else: # `values` must be dict of dict + for n, d in values.items(): + try: + G.nodes[n].update(d) + except KeyError: + pass + nx._clear_cache(G) + + +@nx._dispatchable(node_attrs={"name": "default"}) +def get_node_attributes(G, name, default=None): + """Get node attributes from graph + + Parameters + ---------- + G : NetworkX Graph + + name : string + Attribute name + + default: object (default=None) + Default value of the node attribute if there is no value set for that + node in graph. If `None` then nodes without this attribute are not + included in the returned dict. + + Returns + ------- + Dictionary of attributes keyed by node. + + Examples + -------- + >>> G = nx.Graph() + >>> G.add_nodes_from([1, 2, 3], color="red") + >>> color = nx.get_node_attributes(G, "color") + >>> color[1] + 'red' + >>> G.add_node(4) + >>> color = nx.get_node_attributes(G, "color", default="yellow") + >>> color[4] + 'yellow' + """ + if default is not None: + return {n: d.get(name, default) for n, d in G.nodes.items()} + return {n: d[name] for n, d in G.nodes.items() if name in d} + + +@nx._dispatchable(preserve_node_attrs=True, mutates_input=True) +def remove_node_attributes(G, *attr_names, nbunch=None): + """Remove node attributes from all nodes in the graph. + + Parameters + ---------- + G : NetworkX Graph + + *attr_names : List of Strings + The attribute names to remove from the graph. + + nbunch : List of Nodes + Remove the node attributes only from the nodes in this list. + + Examples + -------- + >>> G = nx.Graph() + >>> G.add_nodes_from([1, 2, 3], color="blue") + >>> nx.get_node_attributes(G, "color") + {1: 'blue', 2: 'blue', 3: 'blue'} + >>> nx.remove_node_attributes(G, "color") + >>> nx.get_node_attributes(G, "color") + {} + """ + + if nbunch is None: + nbunch = G.nodes() + + for attr in attr_names: + for n, d in G.nodes(data=True): + if n in nbunch: + try: + del d[attr] + except KeyError: + pass + + +@nx._dispatchable(preserve_edge_attrs=True, mutates_input=True) +def set_edge_attributes(G, values, name=None): + """Sets edge attributes from a given value or dictionary of values. + + .. Warning:: The call order of arguments `values` and `name` + switched between v1.x & v2.x. + + Parameters + ---------- + G : NetworkX Graph + + values : scalar value, dict-like + What the edge attribute should be set to. If `values` is + not a dictionary, then it is treated as a single attribute value + that is then applied to every edge in `G`. This means that if + you provide a mutable object, like a list, updates to that object + will be reflected in the edge attribute for each edge. The attribute + name will be `name`. + + If `values` is a dict or a dict of dict, it should be keyed + by edge tuple to either an attribute value or a dict of attribute + key/value pairs used to update the edge's attributes. + For multigraphs, the edge tuples must be of the form ``(u, v, key)``, + where `u` and `v` are nodes and `key` is the edge key. + For non-multigraphs, the keys must be tuples of the form ``(u, v)``. + + name : string (optional, default=None) + Name of the edge attribute to set if values is a scalar. + + Examples + -------- + After computing some property of the edges of a graph, you may want + to assign a edge attribute to store the value of that property for + each edge:: + + >>> G = nx.path_graph(3) + >>> bb = nx.edge_betweenness_centrality(G, normalized=False) + >>> nx.set_edge_attributes(G, bb, "betweenness") + >>> G.edges[1, 2]["betweenness"] + 2.0 + + If you provide a list as the second argument, updates to the list + will be reflected in the edge attribute for each edge:: + + >>> labels = [] + >>> nx.set_edge_attributes(G, labels, "labels") + >>> labels.append("foo") + >>> G.edges[0, 1]["labels"] + ['foo'] + >>> G.edges[1, 2]["labels"] + ['foo'] + + If you provide a dictionary of dictionaries as the second argument, + the entire dictionary will be used to update edge attributes:: + + >>> G = nx.path_graph(3) + >>> attrs = {(0, 1): {"attr1": 20, "attr2": "nothing"}, (1, 2): {"attr2": 3}} + >>> nx.set_edge_attributes(G, attrs) + >>> G[0][1]["attr1"] + 20 + >>> G[0][1]["attr2"] + 'nothing' + >>> G[1][2]["attr2"] + 3 + + The attributes of one Graph can be used to set those of another. + + >>> H = nx.path_graph(3) + >>> nx.set_edge_attributes(H, G.edges) + + Note that if the dict contains edges that are not in `G`, they are + silently ignored:: + + >>> G = nx.Graph([(0, 1)]) + >>> nx.set_edge_attributes(G, {(1, 2): {"weight": 2.0}}) + >>> (1, 2) in G.edges() + False + + For multigraphs, the `values` dict is expected to be keyed by 3-tuples + including the edge key:: + + >>> MG = nx.MultiGraph() + >>> edges = [(0, 1), (0, 1)] + >>> MG.add_edges_from(edges) # Returns list of edge keys + [0, 1] + >>> attributes = {(0, 1, 0): {"cost": 21}, (0, 1, 1): {"cost": 7}} + >>> nx.set_edge_attributes(MG, attributes) + >>> MG[0][1][0]["cost"] + 21 + >>> MG[0][1][1]["cost"] + 7 + + If MultiGraph attributes are desired for a Graph, you must convert the 3-tuple + multiedge to a 2-tuple edge and the last multiedge's attribute value will + overwrite the previous values. Continuing from the previous case we get:: + + >>> H = nx.path_graph([0, 1, 2]) + >>> nx.set_edge_attributes(H, {(u, v): ed for u, v, ed in MG.edges.data()}) + >>> nx.get_edge_attributes(H, "cost") + {(0, 1): 7} + + """ + if name is not None: + # `values` does not contain attribute names + try: + # if `values` is a dict using `.items()` => {edge: value} + if G.is_multigraph(): + for (u, v, key), value in values.items(): + try: + G._adj[u][v][key][name] = value + except KeyError: + pass + else: + for (u, v), value in values.items(): + try: + G._adj[u][v][name] = value + except KeyError: + pass + except AttributeError: + # treat `values` as a constant + for u, v, data in G.edges(data=True): + data[name] = values + else: + # `values` consists of doct-of-dict {edge: {attr: value}} shape + if G.is_multigraph(): + for (u, v, key), d in values.items(): + try: + G._adj[u][v][key].update(d) + except KeyError: + pass + else: + for (u, v), d in values.items(): + try: + G._adj[u][v].update(d) + except KeyError: + pass + nx._clear_cache(G) + + +@nx._dispatchable(edge_attrs={"name": "default"}) +def get_edge_attributes(G, name, default=None): + """Get edge attributes from graph + + Parameters + ---------- + G : NetworkX Graph + + name : string + Attribute name + + default: object (default=None) + Default value of the edge attribute if there is no value set for that + edge in graph. If `None` then edges without this attribute are not + included in the returned dict. + + Returns + ------- + Dictionary of attributes keyed by edge. For (di)graphs, the keys are + 2-tuples of the form: (u, v). For multi(di)graphs, the keys are 3-tuples of + the form: (u, v, key). + + Examples + -------- + >>> G = nx.Graph() + >>> nx.add_path(G, [1, 2, 3], color="red") + >>> color = nx.get_edge_attributes(G, "color") + >>> color[(1, 2)] + 'red' + >>> G.add_edge(3, 4) + >>> color = nx.get_edge_attributes(G, "color", default="yellow") + >>> color[(3, 4)] + 'yellow' + """ + if G.is_multigraph(): + edges = G.edges(keys=True, data=True) + else: + edges = G.edges(data=True) + if default is not None: + return {x[:-1]: x[-1].get(name, default) for x in edges} + return {x[:-1]: x[-1][name] for x in edges if name in x[-1]} + + +@nx._dispatchable(preserve_edge_attrs=True, mutates_input=True) +def remove_edge_attributes(G, *attr_names, ebunch=None): + """Remove edge attributes from all edges in the graph. + + Parameters + ---------- + G : NetworkX Graph + + *attr_names : List of Strings + The attribute names to remove from the graph. + + Examples + -------- + >>> G = nx.path_graph(3) + >>> nx.set_edge_attributes(G, {(u, v): u + v for u, v in G.edges()}, name="weight") + >>> nx.get_edge_attributes(G, "weight") + {(0, 1): 1, (1, 2): 3} + >>> remove_edge_attributes(G, "weight") + >>> nx.get_edge_attributes(G, "weight") + {} + """ + if ebunch is None: + ebunch = G.edges(keys=True) if G.is_multigraph() else G.edges() + + for attr in attr_names: + edges = ( + G.edges(keys=True, data=True) if G.is_multigraph() else G.edges(data=True) + ) + for *e, d in edges: + if tuple(e) in ebunch: + try: + del d[attr] + except KeyError: + pass + + +def all_neighbors(graph, node): + """Returns all of the neighbors of a node in the graph. + + If the graph is directed returns predecessors as well as successors. + + Parameters + ---------- + graph : NetworkX graph + Graph to find neighbors. + + node : node + The node whose neighbors will be returned. + + Returns + ------- + neighbors : iterator + Iterator of neighbors + """ + if graph.is_directed(): + values = chain(graph.predecessors(node), graph.successors(node)) + else: + values = graph.neighbors(node) + return values + + +def non_neighbors(graph, node): + """Returns the non-neighbors of the node in the graph. + + Parameters + ---------- + graph : NetworkX graph + Graph to find neighbors. + + node : node + The node whose neighbors will be returned. + + Returns + ------- + non_neighbors : set + Set of nodes in the graph that are not neighbors of the node. + """ + return graph._adj.keys() - graph._adj[node].keys() - {node} + + +def non_edges(graph): + """Returns the nonexistent edges in the graph. + + Parameters + ---------- + graph : NetworkX graph. + Graph to find nonexistent edges. + + Returns + ------- + non_edges : iterator + Iterator of edges that are not in the graph. + """ + if graph.is_directed(): + for u in graph: + for v in non_neighbors(graph, u): + yield (u, v) + else: + nodes = set(graph) + while nodes: + u = nodes.pop() + for v in nodes - set(graph[u]): + yield (u, v) + + +@not_implemented_for("directed") +def common_neighbors(G, u, v): + """Returns the common neighbors of two nodes in a graph. + + Parameters + ---------- + G : graph + A NetworkX undirected graph. + + u, v : nodes + Nodes in the graph. + + Returns + ------- + cnbors : set + Set of common neighbors of u and v in the graph. + + Raises + ------ + NetworkXError + If u or v is not a node in the graph. + + Examples + -------- + >>> G = nx.complete_graph(5) + >>> sorted(nx.common_neighbors(G, 0, 1)) + [2, 3, 4] + """ + if u not in G: + raise nx.NetworkXError("u is not in the graph.") + if v not in G: + raise nx.NetworkXError("v is not in the graph.") + + return G._adj[u].keys() & G._adj[v].keys() - {u, v} + + +@nx._dispatchable(preserve_edge_attrs=True) +def is_weighted(G, edge=None, weight="weight"): + """Returns True if `G` has weighted edges. + + Parameters + ---------- + G : graph + A NetworkX graph. + + edge : tuple, optional + A 2-tuple specifying the only edge in `G` that will be tested. If + None, then every edge in `G` is tested. + + weight: string, optional + The attribute name used to query for edge weights. + + Returns + ------- + bool + A boolean signifying if `G`, or the specified edge, is weighted. + + Raises + ------ + NetworkXError + If the specified edge does not exist. + + Examples + -------- + >>> G = nx.path_graph(4) + >>> nx.is_weighted(G) + False + >>> nx.is_weighted(G, (2, 3)) + False + + >>> G = nx.DiGraph() + >>> G.add_edge(1, 2, weight=1) + >>> nx.is_weighted(G) + True + + """ + if edge is not None: + data = G.get_edge_data(*edge) + if data is None: + msg = f"Edge {edge!r} does not exist." + raise nx.NetworkXError(msg) + return weight in data + + if is_empty(G): + # Special handling required since: all([]) == True + return False + + return all(weight in data for u, v, data in G.edges(data=True)) + + +@nx._dispatchable(edge_attrs="weight") +def is_negatively_weighted(G, edge=None, weight="weight"): + """Returns True if `G` has negatively weighted edges. + + Parameters + ---------- + G : graph + A NetworkX graph. + + edge : tuple, optional + A 2-tuple specifying the only edge in `G` that will be tested. If + None, then every edge in `G` is tested. + + weight: string, optional + The attribute name used to query for edge weights. + + Returns + ------- + bool + A boolean signifying if `G`, or the specified edge, is negatively + weighted. + + Raises + ------ + NetworkXError + If the specified edge does not exist. + + Examples + -------- + >>> G = nx.Graph() + >>> G.add_edges_from([(1, 3), (2, 4), (2, 6)]) + >>> G.add_edge(1, 2, weight=4) + >>> nx.is_negatively_weighted(G, (1, 2)) + False + >>> G[2][4]["weight"] = -2 + >>> nx.is_negatively_weighted(G) + True + >>> G = nx.DiGraph() + >>> edges = [("0", "3", 3), ("0", "1", -5), ("1", "0", -2)] + >>> G.add_weighted_edges_from(edges) + >>> nx.is_negatively_weighted(G) + True + + """ + if edge is not None: + data = G.get_edge_data(*edge) + if data is None: + msg = f"Edge {edge!r} does not exist." + raise nx.NetworkXError(msg) + return weight in data and data[weight] < 0 + + return any(weight in data and data[weight] < 0 for u, v, data in G.edges(data=True)) + + +@nx._dispatchable +def is_empty(G): + """Returns True if `G` has no edges. + + Parameters + ---------- + G : graph + A NetworkX graph. + + Returns + ------- + bool + True if `G` has no edges, and False otherwise. + + Notes + ----- + An empty graph can have nodes but not edges. The empty graph with zero + nodes is known as the null graph. This is an $O(n)$ operation where n + is the number of nodes in the graph. + + """ + return not any(G._adj.values()) + + +def nodes_with_selfloops(G): + """Returns an iterator over nodes with self loops. + + A node with a self loop has an edge with both ends adjacent + to that node. + + Returns + ------- + nodelist : iterator + A iterator over nodes with self loops. + + See Also + -------- + selfloop_edges, number_of_selfloops + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_edge(1, 1) + >>> G.add_edge(1, 2) + >>> list(nx.nodes_with_selfloops(G)) + [1] + + """ + return (n for n, nbrs in G._adj.items() if n in nbrs) + + +def selfloop_edges(G, data=False, keys=False, default=None): + """Returns an iterator over selfloop edges. + + A selfloop edge has the same node at both ends. + + Parameters + ---------- + G : graph + A NetworkX graph. + data : string or bool, optional (default=False) + Return selfloop edges as two tuples (u, v) (data=False) + or three-tuples (u, v, datadict) (data=True) + or three-tuples (u, v, datavalue) (data='attrname') + keys : bool, optional (default=False) + If True, return edge keys with each edge. + default : value, optional (default=None) + Value used for edges that don't have the requested attribute. + Only relevant if data is not True or False. + + Returns + ------- + edgeiter : iterator over edge tuples + An iterator over all selfloop edges. + + See Also + -------- + nodes_with_selfloops, number_of_selfloops + + Examples + -------- + >>> G = nx.MultiGraph() # or Graph, DiGraph, MultiDiGraph, etc + >>> ekey = G.add_edge(1, 1) + >>> ekey = G.add_edge(1, 2) + >>> list(nx.selfloop_edges(G)) + [(1, 1)] + >>> list(nx.selfloop_edges(G, data=True)) + [(1, 1, {})] + >>> list(nx.selfloop_edges(G, keys=True)) + [(1, 1, 0)] + >>> list(nx.selfloop_edges(G, keys=True, data=True)) + [(1, 1, 0, {})] + """ + if data is True: + if G.is_multigraph(): + if keys is True: + return ( + (n, n, k, d) + for n, nbrs in G._adj.items() + if n in nbrs + for k, d in nbrs[n].items() + ) + else: + return ( + (n, n, d) + for n, nbrs in G._adj.items() + if n in nbrs + for d in nbrs[n].values() + ) + else: + return ((n, n, nbrs[n]) for n, nbrs in G._adj.items() if n in nbrs) + elif data is not False: + if G.is_multigraph(): + if keys is True: + return ( + (n, n, k, d.get(data, default)) + for n, nbrs in G._adj.items() + if n in nbrs + for k, d in nbrs[n].items() + ) + else: + return ( + (n, n, d.get(data, default)) + for n, nbrs in G._adj.items() + if n in nbrs + for d in nbrs[n].values() + ) + else: + return ( + (n, n, nbrs[n].get(data, default)) + for n, nbrs in G._adj.items() + if n in nbrs + ) + else: + if G.is_multigraph(): + if keys is True: + return ( + (n, n, k) + for n, nbrs in G._adj.items() + if n in nbrs + for k in nbrs[n] + ) + else: + return ( + (n, n) + for n, nbrs in G._adj.items() + if n in nbrs + for i in range(len(nbrs[n])) # for easy edge removal (#4068) + ) + else: + return ((n, n) for n, nbrs in G._adj.items() if n in nbrs) + + +@nx._dispatchable +def number_of_selfloops(G): + """Returns the number of selfloop edges. + + A selfloop edge has the same node at both ends. + + Returns + ------- + nloops : int + The number of selfloops. + + See Also + -------- + nodes_with_selfloops, selfloop_edges + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_edge(1, 1) + >>> G.add_edge(1, 2) + >>> nx.number_of_selfloops(G) + 1 + """ + return sum(1 for _ in nx.selfloop_edges(G)) + + +def is_path(G, path): + """Returns whether or not the specified path exists. + + For it to return True, every node on the path must exist and + each consecutive pair must be connected via one or more edges. + + Parameters + ---------- + G : graph + A NetworkX graph. + + path : list + A list of nodes which defines the path to traverse + + Returns + ------- + bool + True if `path` is a valid path in `G` + + """ + try: + return all(nbr in G._adj[node] for node, nbr in nx.utils.pairwise(path)) + except (KeyError, TypeError): + return False + + +def path_weight(G, path, weight): + """Returns total cost associated with specified path and weight + + Parameters + ---------- + G : graph + A NetworkX graph. + + path: list + A list of node labels which defines the path to traverse + + weight: string + A string indicating which edge attribute to use for path cost + + Returns + ------- + cost: int or float + An integer or a float representing the total cost with respect to the + specified weight of the specified path + + Raises + ------ + NetworkXNoPath + If the specified edge does not exist. + """ + multigraph = G.is_multigraph() + cost = 0 + + if not nx.is_path(G, path): + raise nx.NetworkXNoPath("path does not exist") + for node, nbr in nx.utils.pairwise(path): + if multigraph: + cost += min(v[weight] for v in G._adj[node][nbr].values()) + else: + cost += G._adj[node][nbr][weight] + return cost diff --git a/.venv/lib/python3.12/site-packages/networkx/classes/graph.py b/.venv/lib/python3.12/site-packages/networkx/classes/graph.py new file mode 100644 index 0000000..e6adcf0 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/classes/graph.py @@ -0,0 +1,2071 @@ +"""Base class for undirected graphs. + +The Graph class allows any hashable object as a node +and can associate key/value attribute pairs with each undirected edge. + +Self-loops are allowed but multiple edges are not (see MultiGraph). + +For directed graphs see DiGraph and MultiDiGraph. +""" + +from copy import deepcopy +from functools import cached_property + +import networkx as nx +from networkx import convert +from networkx.classes.coreviews import AdjacencyView +from networkx.classes.reportviews import DegreeView, EdgeView, NodeView +from networkx.exception import NetworkXError + +__all__ = ["Graph"] + + +class _CachedPropertyResetterAdj: + """Data Descriptor class for _adj that resets ``adj`` cached_property when needed + + This assumes that the ``cached_property`` ``G.adj`` should be reset whenever + ``G._adj`` is set to a new value. + + This object sits on a class and ensures that any instance of that + class clears its cached property "adj" whenever the underlying + instance attribute "_adj" is set to a new object. It only affects + the set process of the obj._adj attribute. All get/del operations + act as they normally would. + + For info on Data Descriptors see: https://docs.python.org/3/howto/descriptor.html + """ + + def __set__(self, obj, value): + od = obj.__dict__ + od["_adj"] = value + # reset cached properties + props = ["adj", "edges", "degree"] + for prop in props: + if prop in od: + del od[prop] + + +class _CachedPropertyResetterNode: + """Data Descriptor class for _node that resets ``nodes`` cached_property when needed + + This assumes that the ``cached_property`` ``G.node`` should be reset whenever + ``G._node`` is set to a new value. + + This object sits on a class and ensures that any instance of that + class clears its cached property "nodes" whenever the underlying + instance attribute "_node" is set to a new object. It only affects + the set process of the obj._adj attribute. All get/del operations + act as they normally would. + + For info on Data Descriptors see: https://docs.python.org/3/howto/descriptor.html + """ + + def __set__(self, obj, value): + od = obj.__dict__ + od["_node"] = value + # reset cached properties + if "nodes" in od: + del od["nodes"] + + +class Graph: + """ + Base class for undirected graphs. + + A Graph stores nodes and edges with optional data, or attributes. + + Graphs hold undirected edges. Self loops are allowed but multiple + (parallel) edges are not. + + Nodes can be arbitrary (hashable) Python objects with optional + key/value attributes, except that `None` is not allowed as a node. + + Edges are represented as links between nodes with optional + key/value attributes. + + Parameters + ---------- + incoming_graph_data : input graph (optional, default: None) + Data to initialize graph. If None (default) an empty + graph is created. The data can be any format that is supported + by the to_networkx_graph() function, currently including edge list, + dict of dicts, dict of lists, NetworkX graph, 2D NumPy array, SciPy + sparse matrix, or PyGraphviz graph. + + attr : keyword arguments, optional (default= no attributes) + Attributes to add to graph as key=value pairs. + + See Also + -------- + DiGraph + MultiGraph + MultiDiGraph + + Examples + -------- + Create an empty graph structure (a "null graph") with no nodes and + no edges. + + >>> G = nx.Graph() + + G can be grown in several ways. + + **Nodes:** + + Add one node at a time: + + >>> G.add_node(1) + + Add the nodes from any container (a list, dict, set or + even the lines from a file or the nodes from another graph). + + >>> G.add_nodes_from([2, 3]) + >>> G.add_nodes_from(range(100, 110)) + >>> H = nx.path_graph(10) + >>> G.add_nodes_from(H) + + In addition to strings and integers any hashable Python object + (except None) can represent a node, e.g. a customized node object, + or even another Graph. + + >>> G.add_node(H) + + **Edges:** + + G can also be grown by adding edges. + + Add one edge, + + >>> G.add_edge(1, 2) + + a list of edges, + + >>> G.add_edges_from([(1, 2), (1, 3)]) + + or a collection of edges, + + >>> G.add_edges_from(H.edges) + + If some edges connect nodes not yet in the graph, the nodes + are added automatically. There are no errors when adding + nodes or edges that already exist. + + **Attributes:** + + Each graph, node, and edge can hold key/value attribute pairs + in an associated attribute dictionary (the keys must be hashable). + By default these are empty, but can be added or changed using + add_edge, add_node or direct manipulation of the attribute + dictionaries named graph, node and edge respectively. + + >>> G = nx.Graph(day="Friday") + >>> G.graph + {'day': 'Friday'} + + Add node attributes using add_node(), add_nodes_from() or G.nodes + + >>> G.add_node(1, time="5pm") + >>> G.add_nodes_from([3], time="2pm") + >>> G.nodes[1] + {'time': '5pm'} + >>> G.nodes[1]["room"] = 714 # node must exist already to use G.nodes + >>> del G.nodes[1]["room"] # remove attribute + >>> list(G.nodes(data=True)) + [(1, {'time': '5pm'}), (3, {'time': '2pm'})] + + Add edge attributes using add_edge(), add_edges_from(), subscript + notation, or G.edges. + + >>> G.add_edge(1, 2, weight=4.7) + >>> G.add_edges_from([(3, 4), (4, 5)], color="red") + >>> G.add_edges_from([(1, 2, {"color": "blue"}), (2, 3, {"weight": 8})]) + >>> G[1][2]["weight"] = 4.7 + >>> G.edges[1, 2]["weight"] = 4 + + Warning: we protect the graph data structure by making `G.edges` a + read-only dict-like structure. However, you can assign to attributes + in e.g. `G.edges[1, 2]`. Thus, use 2 sets of brackets to add/change + data attributes: `G.edges[1, 2]['weight'] = 4` + (For multigraphs: `MG.edges[u, v, key][name] = value`). + + **Shortcuts:** + + Many common graph features allow python syntax to speed reporting. + + >>> 1 in G # check if node in graph + True + >>> [n for n in G if n < 3] # iterate through nodes + [1, 2] + >>> len(G) # number of nodes in graph + 5 + + Often the best way to traverse all edges of a graph is via the neighbors. + The neighbors are reported as an adjacency-dict `G.adj` or `G.adjacency()` + + >>> for n, nbrsdict in G.adjacency(): + ... for nbr, eattr in nbrsdict.items(): + ... if "weight" in eattr: + ... # Do something useful with the edges + ... pass + + But the edges() method is often more convenient: + + >>> for u, v, weight in G.edges.data("weight"): + ... if weight is not None: + ... # Do something useful with the edges + ... pass + + **Reporting:** + + Simple graph information is obtained using object-attributes and methods. + Reporting typically provides views instead of containers to reduce memory + usage. The views update as the graph is updated similarly to dict-views. + The objects `nodes`, `edges` and `adj` provide access to data attributes + via lookup (e.g. `nodes[n]`, `edges[u, v]`, `adj[u][v]`) and iteration + (e.g. `nodes.items()`, `nodes.data('color')`, + `nodes.data('color', default='blue')` and similarly for `edges`) + Views exist for `nodes`, `edges`, `neighbors()`/`adj` and `degree`. + + For details on these and other miscellaneous methods, see below. + + **Subclasses (Advanced):** + + The Graph class uses a dict-of-dict-of-dict data structure. + The outer dict (node_dict) holds adjacency information keyed by node. + The next dict (adjlist_dict) represents the adjacency information and holds + edge data keyed by neighbor. The inner dict (edge_attr_dict) represents + the edge data and holds edge attribute values keyed by attribute names. + + Each of these three dicts can be replaced in a subclass by a user defined + dict-like object. In general, the dict-like features should be + maintained but extra features can be added. To replace one of the + dicts create a new graph class by changing the class(!) variable + holding the factory for that dict-like structure. + + node_dict_factory : function, (default: dict) + Factory function to be used to create the dict containing node + attributes, keyed by node id. + It should require no arguments and return a dict-like object + + node_attr_dict_factory: function, (default: dict) + Factory function to be used to create the node attribute + dict which holds attribute values keyed by attribute name. + It should require no arguments and return a dict-like object + + adjlist_outer_dict_factory : function, (default: dict) + Factory function to be used to create the outer-most dict + in the data structure that holds adjacency info keyed by node. + It should require no arguments and return a dict-like object. + + adjlist_inner_dict_factory : function, (default: dict) + Factory function to be used to create the adjacency list + dict which holds edge data keyed by neighbor. + It should require no arguments and return a dict-like object + + edge_attr_dict_factory : function, (default: dict) + Factory function to be used to create the edge attribute + dict which holds attribute values keyed by attribute name. + It should require no arguments and return a dict-like object. + + graph_attr_dict_factory : function, (default: dict) + Factory function to be used to create the graph attribute + dict which holds attribute values keyed by attribute name. + It should require no arguments and return a dict-like object. + + Typically, if your extension doesn't impact the data structure all + methods will inherit without issue except: `to_directed/to_undirected`. + By default these methods create a DiGraph/Graph class and you probably + want them to create your extension of a DiGraph/Graph. To facilitate + this we define two class variables that you can set in your subclass. + + to_directed_class : callable, (default: DiGraph or MultiDiGraph) + Class to create a new graph structure in the `to_directed` method. + If `None`, a NetworkX class (DiGraph or MultiDiGraph) is used. + + to_undirected_class : callable, (default: Graph or MultiGraph) + Class to create a new graph structure in the `to_undirected` method. + If `None`, a NetworkX class (Graph or MultiGraph) is used. + + **Subclassing Example** + + Create a low memory graph class that effectively disallows edge + attributes by using a single attribute dict for all edges. + This reduces the memory used, but you lose edge attributes. + + >>> class ThinGraph(nx.Graph): + ... all_edge_dict = {"weight": 1} + ... + ... def single_edge_dict(self): + ... return self.all_edge_dict + ... + ... edge_attr_dict_factory = single_edge_dict + >>> G = ThinGraph() + >>> G.add_edge(2, 1) + >>> G[2][1] + {'weight': 1} + >>> G.add_edge(2, 2) + >>> G[2][1] is G[2][2] + True + """ + + __networkx_backend__ = "networkx" + + _adj = _CachedPropertyResetterAdj() + _node = _CachedPropertyResetterNode() + + node_dict_factory = dict + node_attr_dict_factory = dict + adjlist_outer_dict_factory = dict + adjlist_inner_dict_factory = dict + edge_attr_dict_factory = dict + graph_attr_dict_factory = dict + + def to_directed_class(self): + """Returns the class to use for empty directed copies. + + If you subclass the base classes, use this to designate + what directed class to use for `to_directed()` copies. + """ + return nx.DiGraph + + def to_undirected_class(self): + """Returns the class to use for empty undirected copies. + + If you subclass the base classes, use this to designate + what directed class to use for `to_directed()` copies. + """ + return Graph + + def __init__(self, incoming_graph_data=None, **attr): + """Initialize a graph with edges, name, or graph attributes. + + Parameters + ---------- + incoming_graph_data : input graph (optional, default: None) + Data to initialize graph. If None (default) an empty + graph is created. The data can be an edge list, or any + NetworkX graph object. If the corresponding optional Python + packages are installed the data can also be a 2D NumPy array, a + SciPy sparse array, or a PyGraphviz graph. + + attr : keyword arguments, optional (default= no attributes) + Attributes to add to graph as key=value pairs. + + See Also + -------- + convert + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G = nx.Graph(name="my graph") + >>> e = [(1, 2), (2, 3), (3, 4)] # list of edges + >>> G = nx.Graph(e) + + Arbitrary graph attribute pairs (key=value) may be assigned + + >>> G = nx.Graph(e, day="Friday") + >>> G.graph + {'day': 'Friday'} + + """ + self.graph = self.graph_attr_dict_factory() # dictionary for graph attributes + self._node = self.node_dict_factory() # empty node attribute dict + self._adj = self.adjlist_outer_dict_factory() # empty adjacency dict + self.__networkx_cache__ = {} + # attempt to load graph with data + if incoming_graph_data is not None: + convert.to_networkx_graph(incoming_graph_data, create_using=self) + # load graph attributes (must be after convert) + self.graph.update(attr) + + @cached_property + def adj(self): + """Graph adjacency object holding the neighbors of each node. + + This object is a read-only dict-like structure with node keys + and neighbor-dict values. The neighbor-dict is keyed by neighbor + to the edge-data-dict. So `G.adj[3][2]['color'] = 'blue'` sets + the color of the edge `(3, 2)` to `"blue"`. + + Iterating over G.adj behaves like a dict. Useful idioms include + `for nbr, datadict in G.adj[n].items():`. + + The neighbor information is also provided by subscripting the graph. + So `for nbr, foovalue in G[node].data('foo', default=1):` works. + + For directed graphs, `G.adj` holds outgoing (successor) info. + """ + return AdjacencyView(self._adj) + + @property + def name(self): + """String identifier of the graph. + + This graph attribute appears in the attribute dict G.graph + keyed by the string `"name"`. as well as an attribute (technically + a property) `G.name`. This is entirely user controlled. + """ + return self.graph.get("name", "") + + @name.setter + def name(self, s): + self.graph["name"] = s + nx._clear_cache(self) + + def __str__(self): + """Returns a short summary of the graph. + + Returns + ------- + info : string + Graph information including the graph name (if any), graph type, and the + number of nodes and edges. + + Examples + -------- + >>> G = nx.Graph(name="foo") + >>> str(G) + "Graph named 'foo' with 0 nodes and 0 edges" + + >>> G = nx.path_graph(3) + >>> str(G) + 'Graph with 3 nodes and 2 edges' + + """ + return "".join( + [ + type(self).__name__, + f" named {self.name!r}" if self.name else "", + f" with {self.number_of_nodes()} nodes and {self.number_of_edges()} edges", + ] + ) + + def __iter__(self): + """Iterate over the nodes. Use: 'for n in G'. + + Returns + ------- + niter : iterator + An iterator over all nodes in the graph. + + Examples + -------- + >>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> [n for n in G] + [0, 1, 2, 3] + >>> list(G) + [0, 1, 2, 3] + """ + return iter(self._node) + + def __contains__(self, n): + """Returns True if n is a node, False otherwise. Use: 'n in G'. + + Examples + -------- + >>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> 1 in G + True + """ + try: + return n in self._node + except TypeError: + return False + + def __len__(self): + """Returns the number of nodes in the graph. Use: 'len(G)'. + + Returns + ------- + nnodes : int + The number of nodes in the graph. + + See Also + -------- + number_of_nodes: identical method + order: identical method + + Examples + -------- + >>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> len(G) + 4 + + """ + return len(self._node) + + def __getitem__(self, n): + """Returns a dict of neighbors of node n. Use: 'G[n]'. + + Parameters + ---------- + n : node + A node in the graph. + + Returns + ------- + adj_dict : dictionary + The adjacency dictionary for nodes connected to n. + + Notes + ----- + G[n] is the same as G.adj[n] and similar to G.neighbors(n) + (which is an iterator over G.adj[n]) + + Examples + -------- + >>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G[0] + AtlasView({1: {}}) + """ + return self.adj[n] + + def add_node(self, node_for_adding, **attr): + """Add a single node `node_for_adding` and update node attributes. + + Parameters + ---------- + node_for_adding : node + A node can be any hashable Python object except None. + attr : keyword arguments, optional + Set or change node attributes using key=value. + + See Also + -------- + add_nodes_from + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_node(1) + >>> G.add_node("Hello") + >>> K3 = nx.Graph([(0, 1), (1, 2), (2, 0)]) + >>> G.add_node(K3) + >>> G.number_of_nodes() + 3 + + Use keywords set/change node attributes: + + >>> G.add_node(1, size=10) + >>> G.add_node(3, weight=0.4, UTM=("13S", 382871, 3972649)) + + Notes + ----- + A hashable object is one that can be used as a key in a Python + dictionary. This includes strings, numbers, tuples of strings + and numbers, etc. + + On many platforms hashable items also include mutables such as + NetworkX Graphs, though one should be careful that the hash + doesn't change on mutables. + """ + if node_for_adding not in self._node: + if node_for_adding is None: + raise ValueError("None cannot be a node") + self._adj[node_for_adding] = self.adjlist_inner_dict_factory() + attr_dict = self._node[node_for_adding] = self.node_attr_dict_factory() + attr_dict.update(attr) + else: # update attr even if node already exists + self._node[node_for_adding].update(attr) + nx._clear_cache(self) + + def add_nodes_from(self, nodes_for_adding, **attr): + """Add multiple nodes. + + Parameters + ---------- + nodes_for_adding : iterable container + A container of nodes (list, dict, set, etc.). + OR + A container of (node, attribute dict) tuples. + Node attributes are updated using the attribute dict. + attr : keyword arguments, optional (default= no attributes) + Update attributes for all nodes in nodes. + Node attributes specified in nodes as a tuple take + precedence over attributes specified via keyword arguments. + + See Also + -------- + add_node + + Notes + ----- + When adding nodes from an iterator over the graph you are changing, + a `RuntimeError` can be raised with message: + `RuntimeError: dictionary changed size during iteration`. This + happens when the graph's underlying dictionary is modified during + iteration. To avoid this error, evaluate the iterator into a separate + object, e.g. by using `list(iterator_of_nodes)`, and pass this + object to `G.add_nodes_from`. + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_nodes_from("Hello") + >>> K3 = nx.Graph([(0, 1), (1, 2), (2, 0)]) + >>> G.add_nodes_from(K3) + >>> sorted(G.nodes(), key=str) + [0, 1, 2, 'H', 'e', 'l', 'o'] + + Use keywords to update specific node attributes for every node. + + >>> G.add_nodes_from([1, 2], size=10) + >>> G.add_nodes_from([3, 4], weight=0.4) + + Use (node, attrdict) tuples to update attributes for specific nodes. + + >>> G.add_nodes_from([(1, dict(size=11)), (2, {"color": "blue"})]) + >>> G.nodes[1]["size"] + 11 + >>> H = nx.Graph() + >>> H.add_nodes_from(G.nodes(data=True)) + >>> H.nodes[1]["size"] + 11 + + Evaluate an iterator over a graph if using it to modify the same graph + + >>> G = nx.Graph([(0, 1), (1, 2), (3, 4)]) + >>> # wrong way - will raise RuntimeError + >>> # G.add_nodes_from(n + 1 for n in G.nodes) + >>> # correct way + >>> G.add_nodes_from(list(n + 1 for n in G.nodes)) + """ + for n in nodes_for_adding: + try: + newnode = n not in self._node + newdict = attr + except TypeError: + n, ndict = n + newnode = n not in self._node + newdict = attr.copy() + newdict.update(ndict) + if newnode: + if n is None: + raise ValueError("None cannot be a node") + self._adj[n] = self.adjlist_inner_dict_factory() + self._node[n] = self.node_attr_dict_factory() + self._node[n].update(newdict) + nx._clear_cache(self) + + def remove_node(self, n): + """Remove node n. + + Removes the node n and all adjacent edges. + Attempting to remove a nonexistent node will raise an exception. + + Parameters + ---------- + n : node + A node in the graph + + Raises + ------ + NetworkXError + If n is not in the graph. + + See Also + -------- + remove_nodes_from + + Examples + -------- + >>> G = nx.path_graph(3) # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> list(G.edges) + [(0, 1), (1, 2)] + >>> G.remove_node(1) + >>> list(G.edges) + [] + + """ + adj = self._adj + try: + nbrs = list(adj[n]) # list handles self-loops (allows mutation) + del self._node[n] + except KeyError as err: # NetworkXError if n not in self + raise NetworkXError(f"The node {n} is not in the graph.") from err + for u in nbrs: + del adj[u][n] # remove all edges n-u in graph + del adj[n] # now remove node + nx._clear_cache(self) + + def remove_nodes_from(self, nodes): + """Remove multiple nodes. + + Parameters + ---------- + nodes : iterable container + A container of nodes (list, dict, set, etc.). If a node + in the container is not in the graph it is silently + ignored. + + See Also + -------- + remove_node + + Notes + ----- + When removing nodes from an iterator over the graph you are changing, + a `RuntimeError` will be raised with message: + `RuntimeError: dictionary changed size during iteration`. This + happens when the graph's underlying dictionary is modified during + iteration. To avoid this error, evaluate the iterator into a separate + object, e.g. by using `list(iterator_of_nodes)`, and pass this + object to `G.remove_nodes_from`. + + Examples + -------- + >>> G = nx.path_graph(3) # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> e = list(G.nodes) + >>> e + [0, 1, 2] + >>> G.remove_nodes_from(e) + >>> list(G.nodes) + [] + + Evaluate an iterator over a graph if using it to modify the same graph + + >>> G = nx.Graph([(0, 1), (1, 2), (3, 4)]) + >>> # this command will fail, as the graph's dict is modified during iteration + >>> # G.remove_nodes_from(n for n in G.nodes if n < 2) + >>> # this command will work, since the dictionary underlying graph is not modified + >>> G.remove_nodes_from(list(n for n in G.nodes if n < 2)) + """ + adj = self._adj + for n in nodes: + try: + del self._node[n] + for u in list(adj[n]): # list handles self-loops + del adj[u][n] # (allows mutation of dict in loop) + del adj[n] + except KeyError: + pass + nx._clear_cache(self) + + @cached_property + def nodes(self): + """A NodeView of the Graph as G.nodes or G.nodes(). + + Can be used as `G.nodes` for data lookup and for set-like operations. + Can also be used as `G.nodes(data='color', default=None)` to return a + NodeDataView which reports specific node data but no set operations. + It presents a dict-like interface as well with `G.nodes.items()` + iterating over `(node, nodedata)` 2-tuples and `G.nodes[3]['foo']` + providing the value of the `foo` attribute for node `3`. In addition, + a view `G.nodes.data('foo')` provides a dict-like interface to the + `foo` attribute of each node. `G.nodes.data('foo', default=1)` + provides a default for nodes that do not have attribute `foo`. + + Parameters + ---------- + data : string or bool, optional (default=False) + The node attribute returned in 2-tuple (n, ddict[data]). + If True, return entire node attribute dict as (n, ddict). + If False, return just the nodes n. + + default : value, optional (default=None) + Value used for nodes that don't have the requested attribute. + Only relevant if data is not True or False. + + Returns + ------- + NodeView + Allows set-like operations over the nodes as well as node + attribute dict lookup and calling to get a NodeDataView. + A NodeDataView iterates over `(n, data)` and has no set operations. + A NodeView iterates over `n` and includes set operations. + + When called, if data is False, an iterator over nodes. + Otherwise an iterator of 2-tuples (node, attribute value) + where the attribute is specified in `data`. + If data is True then the attribute becomes the + entire data dictionary. + + Notes + ----- + If your node data is not needed, it is simpler and equivalent + to use the expression ``for n in G``, or ``list(G)``. + + Examples + -------- + There are two simple ways of getting a list of all nodes in the graph: + + >>> G = nx.path_graph(3) + >>> list(G.nodes) + [0, 1, 2] + >>> list(G) + [0, 1, 2] + + To get the node data along with the nodes: + + >>> G.add_node(1, time="5pm") + >>> G.nodes[0]["foo"] = "bar" + >>> list(G.nodes(data=True)) + [(0, {'foo': 'bar'}), (1, {'time': '5pm'}), (2, {})] + >>> list(G.nodes.data()) + [(0, {'foo': 'bar'}), (1, {'time': '5pm'}), (2, {})] + + >>> list(G.nodes(data="foo")) + [(0, 'bar'), (1, None), (2, None)] + >>> list(G.nodes.data("foo")) + [(0, 'bar'), (1, None), (2, None)] + + >>> list(G.nodes(data="time")) + [(0, None), (1, '5pm'), (2, None)] + >>> list(G.nodes.data("time")) + [(0, None), (1, '5pm'), (2, None)] + + >>> list(G.nodes(data="time", default="Not Available")) + [(0, 'Not Available'), (1, '5pm'), (2, 'Not Available')] + >>> list(G.nodes.data("time", default="Not Available")) + [(0, 'Not Available'), (1, '5pm'), (2, 'Not Available')] + + If some of your nodes have an attribute and the rest are assumed + to have a default attribute value you can create a dictionary + from node/attribute pairs using the `default` keyword argument + to guarantee the value is never None:: + + >>> G = nx.Graph() + >>> G.add_node(0) + >>> G.add_node(1, weight=2) + >>> G.add_node(2, weight=3) + >>> dict(G.nodes(data="weight", default=1)) + {0: 1, 1: 2, 2: 3} + + """ + return NodeView(self) + + def number_of_nodes(self): + """Returns the number of nodes in the graph. + + Returns + ------- + nnodes : int + The number of nodes in the graph. + + See Also + -------- + order: identical method + __len__: identical method + + Examples + -------- + >>> G = nx.path_graph(3) # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.number_of_nodes() + 3 + """ + return len(self._node) + + def order(self): + """Returns the number of nodes in the graph. + + Returns + ------- + nnodes : int + The number of nodes in the graph. + + See Also + -------- + number_of_nodes: identical method + __len__: identical method + + Examples + -------- + >>> G = nx.path_graph(3) # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.order() + 3 + """ + return len(self._node) + + def has_node(self, n): + """Returns True if the graph contains the node n. + + Identical to `n in G` + + Parameters + ---------- + n : node + + Examples + -------- + >>> G = nx.path_graph(3) # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.has_node(0) + True + + It is more readable and simpler to use + + >>> 0 in G + True + + """ + try: + return n in self._node + except TypeError: + return False + + def add_edge(self, u_of_edge, v_of_edge, **attr): + """Add an edge between u and v. + + The nodes u and v will be automatically added if they are + not already in the graph. + + Edge attributes can be specified with keywords or by directly + accessing the edge's attribute dictionary. See examples below. + + Parameters + ---------- + u_of_edge, v_of_edge : nodes + Nodes can be, for example, strings or numbers. + Nodes must be hashable (and not None) Python objects. + attr : keyword arguments, optional + Edge data (or labels or objects) can be assigned using + keyword arguments. + + See Also + -------- + add_edges_from : add a collection of edges + + Notes + ----- + Adding an edge that already exists updates the edge data. + + Many NetworkX algorithms designed for weighted graphs use + an edge attribute (by default `weight`) to hold a numerical value. + + Examples + -------- + The following all add the edge e=(1, 2) to graph G: + + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> e = (1, 2) + >>> G.add_edge(1, 2) # explicit two-node form + >>> G.add_edge(*e) # single edge as tuple of two nodes + >>> G.add_edges_from([(1, 2)]) # add edges from iterable container + + Associate data to edges using keywords: + + >>> G.add_edge(1, 2, weight=3) + >>> G.add_edge(1, 3, weight=7, capacity=15, length=342.7) + + For non-string attribute keys, use subscript notation. + + >>> G.add_edge(1, 2) + >>> G[1][2].update({0: 5}) + >>> G.edges[1, 2].update({0: 5}) + """ + u, v = u_of_edge, v_of_edge + # add nodes + if u not in self._node: + if u is None: + raise ValueError("None cannot be a node") + self._adj[u] = self.adjlist_inner_dict_factory() + self._node[u] = self.node_attr_dict_factory() + if v not in self._node: + if v is None: + raise ValueError("None cannot be a node") + self._adj[v] = self.adjlist_inner_dict_factory() + self._node[v] = self.node_attr_dict_factory() + # add the edge + datadict = self._adj[u].get(v, self.edge_attr_dict_factory()) + datadict.update(attr) + self._adj[u][v] = datadict + self._adj[v][u] = datadict + nx._clear_cache(self) + + def add_edges_from(self, ebunch_to_add, **attr): + """Add all the edges in ebunch_to_add. + + Parameters + ---------- + ebunch_to_add : container of edges + Each edge given in the container will be added to the + graph. The edges must be given as 2-tuples (u, v) or + 3-tuples (u, v, d) where d is a dictionary containing edge data. + attr : keyword arguments, optional + Edge data (or labels or objects) can be assigned using + keyword arguments. + + See Also + -------- + add_edge : add a single edge + add_weighted_edges_from : convenient way to add weighted edges + + Notes + ----- + Adding the same edge twice has no effect but any edge data + will be updated when each duplicate edge is added. + + Edge attributes specified in an ebunch take precedence over + attributes specified via keyword arguments. + + When adding edges from an iterator over the graph you are changing, + a `RuntimeError` can be raised with message: + `RuntimeError: dictionary changed size during iteration`. This + happens when the graph's underlying dictionary is modified during + iteration. To avoid this error, evaluate the iterator into a separate + object, e.g. by using `list(iterator_of_edges)`, and pass this + object to `G.add_edges_from`. + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_edges_from([(0, 1), (1, 2)]) # using a list of edge tuples + >>> e = zip(range(0, 3), range(1, 4)) + >>> G.add_edges_from(e) # Add the path graph 0-1-2-3 + + Associate data to edges + + >>> G.add_edges_from([(1, 2), (2, 3)], weight=3) + >>> G.add_edges_from([(3, 4), (1, 4)], label="WN2898") + + Evaluate an iterator over a graph if using it to modify the same graph + + >>> G = nx.Graph([(1, 2), (2, 3), (3, 4)]) + >>> # Grow graph by one new node, adding edges to all existing nodes. + >>> # wrong way - will raise RuntimeError + >>> # G.add_edges_from(((5, n) for n in G.nodes)) + >>> # correct way - note that there will be no self-edge for node 5 + >>> G.add_edges_from(list((5, n) for n in G.nodes)) + """ + for e in ebunch_to_add: + ne = len(e) + if ne == 3: + u, v, dd = e + elif ne == 2: + u, v = e + dd = {} # doesn't need edge_attr_dict_factory + else: + raise NetworkXError(f"Edge tuple {e} must be a 2-tuple or 3-tuple.") + if u not in self._node: + if u is None: + raise ValueError("None cannot be a node") + self._adj[u] = self.adjlist_inner_dict_factory() + self._node[u] = self.node_attr_dict_factory() + if v not in self._node: + if v is None: + raise ValueError("None cannot be a node") + self._adj[v] = self.adjlist_inner_dict_factory() + self._node[v] = self.node_attr_dict_factory() + datadict = self._adj[u].get(v, self.edge_attr_dict_factory()) + datadict.update(attr) + datadict.update(dd) + self._adj[u][v] = datadict + self._adj[v][u] = datadict + nx._clear_cache(self) + + def add_weighted_edges_from(self, ebunch_to_add, weight="weight", **attr): + """Add weighted edges in `ebunch_to_add` with specified weight attr + + Parameters + ---------- + ebunch_to_add : container of edges + Each edge given in the list or container will be added + to the graph. The edges must be given as 3-tuples (u, v, w) + where w is a number. + weight : string, optional (default= 'weight') + The attribute name for the edge weights to be added. + attr : keyword arguments, optional (default= no attributes) + Edge attributes to add/update for all edges. + + See Also + -------- + add_edge : add a single edge + add_edges_from : add multiple edges + + Notes + ----- + Adding the same edge twice for Graph/DiGraph simply updates + the edge data. For MultiGraph/MultiDiGraph, duplicate edges + are stored. + + When adding edges from an iterator over the graph you are changing, + a `RuntimeError` can be raised with message: + `RuntimeError: dictionary changed size during iteration`. This + happens when the graph's underlying dictionary is modified during + iteration. To avoid this error, evaluate the iterator into a separate + object, e.g. by using `list(iterator_of_edges)`, and pass this + object to `G.add_weighted_edges_from`. + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_weighted_edges_from([(0, 1, 3.0), (1, 2, 7.5)]) + + Evaluate an iterator over edges before passing it + + >>> G = nx.Graph([(1, 2), (2, 3), (3, 4)]) + >>> weight = 0.1 + >>> # Grow graph by one new node, adding edges to all existing nodes. + >>> # wrong way - will raise RuntimeError + >>> # G.add_weighted_edges_from(((5, n, weight) for n in G.nodes)) + >>> # correct way - note that there will be no self-edge for node 5 + >>> G.add_weighted_edges_from(list((5, n, weight) for n in G.nodes)) + """ + self.add_edges_from(((u, v, {weight: d}) for u, v, d in ebunch_to_add), **attr) + nx._clear_cache(self) + + def remove_edge(self, u, v): + """Remove the edge between u and v. + + Parameters + ---------- + u, v : nodes + Remove the edge between nodes u and v. + + Raises + ------ + NetworkXError + If there is not an edge between u and v. + + See Also + -------- + remove_edges_from : remove a collection of edges + + Examples + -------- + >>> G = nx.path_graph(4) # or DiGraph, etc + >>> G.remove_edge(0, 1) + >>> e = (1, 2) + >>> G.remove_edge(*e) # unpacks e from an edge tuple + >>> e = (2, 3, {"weight": 7}) # an edge with attribute data + >>> G.remove_edge(*e[:2]) # select first part of edge tuple + """ + try: + del self._adj[u][v] + if u != v: # self-loop needs only one entry removed + del self._adj[v][u] + except KeyError as err: + raise NetworkXError(f"The edge {u}-{v} is not in the graph") from err + nx._clear_cache(self) + + def remove_edges_from(self, ebunch): + """Remove all edges specified in ebunch. + + Parameters + ---------- + ebunch: list or container of edge tuples + Each edge given in the list or container will be removed + from the graph. The edges can be: + + - 2-tuples (u, v) edge between u and v. + - 3-tuples (u, v, k) where k is ignored. + + See Also + -------- + remove_edge : remove a single edge + + Notes + ----- + Will fail silently if an edge in ebunch is not in the graph. + + Examples + -------- + >>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> ebunch = [(1, 2), (2, 3)] + >>> G.remove_edges_from(ebunch) + """ + adj = self._adj + for e in ebunch: + u, v = e[:2] # ignore edge data if present + if u in adj and v in adj[u]: + del adj[u][v] + if u != v: # self loop needs only one entry removed + del adj[v][u] + nx._clear_cache(self) + + def update(self, edges=None, nodes=None): + """Update the graph using nodes/edges/graphs as input. + + Like dict.update, this method takes a graph as input, adding the + graph's nodes and edges to this graph. It can also take two inputs: + edges and nodes. Finally it can take either edges or nodes. + To specify only nodes the keyword `nodes` must be used. + + The collections of edges and nodes are treated similarly to + the add_edges_from/add_nodes_from methods. When iterated, they + should yield 2-tuples (u, v) or 3-tuples (u, v, datadict). + + Parameters + ---------- + edges : Graph object, collection of edges, or None + The first parameter can be a graph or some edges. If it has + attributes `nodes` and `edges`, then it is taken to be a + Graph-like object and those attributes are used as collections + of nodes and edges to be added to the graph. + If the first parameter does not have those attributes, it is + treated as a collection of edges and added to the graph. + If the first argument is None, no edges are added. + nodes : collection of nodes, or None + The second parameter is treated as a collection of nodes + to be added to the graph unless it is None. + If `edges is None` and `nodes is None` an exception is raised. + If the first parameter is a Graph, then `nodes` is ignored. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> G.update(nx.complete_graph(range(4, 10))) + >>> from itertools import combinations + >>> edges = ( + ... (u, v, {"power": u * v}) + ... for u, v in combinations(range(10, 20), 2) + ... if u * v < 225 + ... ) + >>> nodes = [1000] # for singleton, use a container + >>> G.update(edges, nodes) + + Notes + ----- + It you want to update the graph using an adjacency structure + it is straightforward to obtain the edges/nodes from adjacency. + The following examples provide common cases, your adjacency may + be slightly different and require tweaks of these examples:: + + >>> # dict-of-set/list/tuple + >>> adj = {1: {2, 3}, 2: {1, 3}, 3: {1, 2}} + >>> e = [(u, v) for u, nbrs in adj.items() for v in nbrs] + >>> G.update(edges=e, nodes=adj) + + >>> DG = nx.DiGraph() + >>> # dict-of-dict-of-attribute + >>> adj = {1: {2: 1.3, 3: 0.7}, 2: {1: 1.4}, 3: {1: 0.7}} + >>> e = [ + ... (u, v, {"weight": d}) + ... for u, nbrs in adj.items() + ... for v, d in nbrs.items() + ... ] + >>> DG.update(edges=e, nodes=adj) + + >>> # dict-of-dict-of-dict + >>> adj = {1: {2: {"weight": 1.3}, 3: {"color": 0.7, "weight": 1.2}}} + >>> e = [ + ... (u, v, {"weight": d}) + ... for u, nbrs in adj.items() + ... for v, d in nbrs.items() + ... ] + >>> DG.update(edges=e, nodes=adj) + + >>> # predecessor adjacency (dict-of-set) + >>> pred = {1: {2, 3}, 2: {3}, 3: {3}} + >>> e = [(v, u) for u, nbrs in pred.items() for v in nbrs] + + >>> # MultiGraph dict-of-dict-of-dict-of-attribute + >>> MDG = nx.MultiDiGraph() + >>> adj = { + ... 1: {2: {0: {"weight": 1.3}, 1: {"weight": 1.2}}}, + ... 3: {2: {0: {"weight": 0.7}}}, + ... } + >>> e = [ + ... (u, v, ekey, d) + ... for u, nbrs in adj.items() + ... for v, keydict in nbrs.items() + ... for ekey, d in keydict.items() + ... ] + >>> MDG.update(edges=e) + + See Also + -------- + add_edges_from: add multiple edges to a graph + add_nodes_from: add multiple nodes to a graph + """ + if edges is not None: + if nodes is not None: + self.add_nodes_from(nodes) + self.add_edges_from(edges) + else: + # check if edges is a Graph object + try: + graph_nodes = edges.nodes + graph_edges = edges.edges + except AttributeError: + # edge not Graph-like + self.add_edges_from(edges) + else: # edges is Graph-like + self.add_nodes_from(graph_nodes.data()) + self.add_edges_from(graph_edges.data()) + self.graph.update(edges.graph) + elif nodes is not None: + self.add_nodes_from(nodes) + else: + raise NetworkXError("update needs nodes or edges input") + + def has_edge(self, u, v): + """Returns True if the edge (u, v) is in the graph. + + This is the same as `v in G[u]` without KeyError exceptions. + + Parameters + ---------- + u, v : nodes + Nodes can be, for example, strings or numbers. + Nodes must be hashable (and not None) Python objects. + + Returns + ------- + edge_ind : bool + True if edge is in the graph, False otherwise. + + Examples + -------- + >>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.has_edge(0, 1) # using two nodes + True + >>> e = (0, 1) + >>> G.has_edge(*e) # e is a 2-tuple (u, v) + True + >>> e = (0, 1, {"weight": 7}) + >>> G.has_edge(*e[:2]) # e is a 3-tuple (u, v, data_dictionary) + True + + The following syntax are equivalent: + + >>> G.has_edge(0, 1) + True + >>> 1 in G[0] # though this gives KeyError if 0 not in G + True + + """ + try: + return v in self._adj[u] + except KeyError: + return False + + def neighbors(self, n): + """Returns an iterator over all neighbors of node n. + + This is identical to `iter(G[n])` + + Parameters + ---------- + n : node + A node in the graph + + Returns + ------- + neighbors : iterator + An iterator over all neighbors of node n + + Raises + ------ + NetworkXError + If the node n is not in the graph. + + Examples + -------- + >>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> [n for n in G.neighbors(0)] + [1] + + Notes + ----- + Alternate ways to access the neighbors are ``G.adj[n]`` or ``G[n]``: + + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_edge("a", "b", weight=7) + >>> G["a"] + AtlasView({'b': {'weight': 7}}) + >>> G = nx.path_graph(4) + >>> [n for n in G[0]] + [1] + """ + try: + return iter(self._adj[n]) + except KeyError as err: + raise NetworkXError(f"The node {n} is not in the graph.") from err + + @cached_property + def edges(self): + """An EdgeView of the Graph as G.edges or G.edges(). + + edges(self, nbunch=None, data=False, default=None) + + The EdgeView provides set-like operations on the edge-tuples + as well as edge attribute lookup. When called, it also provides + an EdgeDataView object which allows control of access to edge + attributes (but does not provide set-like operations). + Hence, `G.edges[u, v]['color']` provides the value of the color + attribute for edge `(u, v)` while + `for (u, v, c) in G.edges.data('color', default='red'):` + iterates through all the edges yielding the color attribute + with default `'red'` if no color attribute exists. + + Parameters + ---------- + nbunch : single node, container, or all nodes (default= all nodes) + The view will only report edges from these nodes. + data : string or bool, optional (default=False) + The edge attribute returned in 3-tuple (u, v, ddict[data]). + If True, return edge attribute dict in 3-tuple (u, v, ddict). + If False, return 2-tuple (u, v). + default : value, optional (default=None) + Value used for edges that don't have the requested attribute. + Only relevant if data is not True or False. + + Returns + ------- + edges : EdgeView + A view of edge attributes, usually it iterates over (u, v) + or (u, v, d) tuples of edges, but can also be used for + attribute lookup as `edges[u, v]['foo']`. + + Notes + ----- + Nodes in nbunch that are not in the graph will be (quietly) ignored. + For directed graphs this returns the out-edges. + + Examples + -------- + >>> G = nx.path_graph(3) # or MultiGraph, etc + >>> G.add_edge(2, 3, weight=5) + >>> [e for e in G.edges] + [(0, 1), (1, 2), (2, 3)] + >>> G.edges.data() # default data is {} (empty dict) + EdgeDataView([(0, 1, {}), (1, 2, {}), (2, 3, {'weight': 5})]) + >>> G.edges.data("weight", default=1) + EdgeDataView([(0, 1, 1), (1, 2, 1), (2, 3, 5)]) + >>> G.edges([0, 3]) # only edges from these nodes + EdgeDataView([(0, 1), (3, 2)]) + >>> G.edges(0) # only edges from node 0 + EdgeDataView([(0, 1)]) + """ + return EdgeView(self) + + def get_edge_data(self, u, v, default=None): + """Returns the attribute dictionary associated with edge (u, v). + + This is identical to `G[u][v]` except the default is returned + instead of an exception if the edge doesn't exist. + + Parameters + ---------- + u, v : nodes + default: any Python object (default=None) + Value to return if the edge (u, v) is not found. + + Returns + ------- + edge_dict : dictionary + The edge attribute dictionary. + + Examples + -------- + >>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G[0][1] + {} + + Warning: Assigning to `G[u][v]` is not permitted. + But it is safe to assign attributes `G[u][v]['foo']` + + >>> G[0][1]["weight"] = 7 + >>> G[0][1]["weight"] + 7 + >>> G[1][0]["weight"] + 7 + + >>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.get_edge_data(0, 1) # default edge data is {} + {} + >>> e = (0, 1) + >>> G.get_edge_data(*e) # tuple form + {} + >>> G.get_edge_data("a", "b", default=0) # edge not in graph, return 0 + 0 + """ + try: + return self._adj[u][v] + except KeyError: + return default + + def adjacency(self): + """Returns an iterator over (node, adjacency dict) tuples for all nodes. + + For directed graphs, only outgoing neighbors/adjacencies are included. + + Returns + ------- + adj_iter : iterator + An iterator over (node, adjacency dictionary) for all nodes in + the graph. + + Examples + -------- + >>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> [(n, nbrdict) for n, nbrdict in G.adjacency()] + [(0, {1: {}}), (1, {0: {}, 2: {}}), (2, {1: {}, 3: {}}), (3, {2: {}})] + + """ + return iter(self._adj.items()) + + @cached_property + def degree(self): + """A DegreeView for the Graph as G.degree or G.degree(). + + The node degree is the number of edges adjacent to the node. + The weighted node degree is the sum of the edge weights for + edges incident to that node. + + This object provides an iterator for (node, degree) as well as + lookup for the degree for a single node. + + Parameters + ---------- + nbunch : single node, container, or all nodes (default= all nodes) + The view will only report edges incident to these nodes. + + weight : string or None, optional (default=None) + The name of an edge attribute that holds the numerical value used + as a weight. If None, then each edge has weight 1. + The degree is the sum of the edge weights adjacent to the node. + + Returns + ------- + DegreeView or int + If multiple nodes are requested (the default), returns a `DegreeView` + mapping nodes to their degree. + If a single node is requested, returns the degree of the node as an integer. + + Examples + -------- + >>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.degree[0] # node 0 has degree 1 + 1 + >>> list(G.degree([0, 1, 2])) + [(0, 1), (1, 2), (2, 2)] + """ + return DegreeView(self) + + def clear(self): + """Remove all nodes and edges from the graph. + + This also removes the name, and all graph, node, and edge attributes. + + Examples + -------- + >>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.clear() + >>> list(G.nodes) + [] + >>> list(G.edges) + [] + + """ + self._adj.clear() + self._node.clear() + self.graph.clear() + nx._clear_cache(self) + + def clear_edges(self): + """Remove all edges from the graph without altering nodes. + + Examples + -------- + >>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.clear_edges() + >>> list(G.nodes) + [0, 1, 2, 3] + >>> list(G.edges) + [] + """ + for nbr_dict in self._adj.values(): + nbr_dict.clear() + nx._clear_cache(self) + + def is_multigraph(self): + """Returns True if graph is a multigraph, False otherwise.""" + return False + + def is_directed(self): + """Returns True if graph is directed, False otherwise.""" + return False + + def copy(self, as_view=False): + """Returns a copy of the graph. + + The copy method by default returns an independent shallow copy + of the graph and attributes. That is, if an attribute is a + container, that container is shared by the original an the copy. + Use Python's `copy.deepcopy` for new containers. + + If `as_view` is True then a view is returned instead of a copy. + + Notes + ----- + All copies reproduce the graph structure, but data attributes + may be handled in different ways. There are four types of copies + of a graph that people might want. + + Deepcopy -- A "deepcopy" copies the graph structure as well as + all data attributes and any objects they might contain. + The entire graph object is new so that changes in the copy + do not affect the original object. (see Python's copy.deepcopy) + + Data Reference (Shallow) -- For a shallow copy the graph structure + is copied but the edge, node and graph attribute dicts are + references to those in the original graph. This saves + time and memory but could cause confusion if you change an attribute + in one graph and it changes the attribute in the other. + NetworkX does not provide this level of shallow copy. + + Independent Shallow -- This copy creates new independent attribute + dicts and then does a shallow copy of the attributes. That is, any + attributes that are containers are shared between the new graph + and the original. This is exactly what `dict.copy()` provides. + You can obtain this style copy using: + + >>> G = nx.path_graph(5) + >>> H = G.copy() + >>> H = G.copy(as_view=False) + >>> H = nx.Graph(G) + >>> H = G.__class__(G) + + Fresh Data -- For fresh data, the graph structure is copied while + new empty data attribute dicts are created. The resulting graph + is independent of the original and it has no edge, node or graph + attributes. Fresh copies are not enabled. Instead use: + + >>> H = G.__class__() + >>> H.add_nodes_from(G) + >>> H.add_edges_from(G.edges) + + View -- Inspired by dict-views, graph-views act like read-only + versions of the original graph, providing a copy of the original + structure without requiring any memory for copying the information. + + See the Python copy module for more information on shallow + and deep copies, https://docs.python.org/3/library/copy.html. + + Parameters + ---------- + as_view : bool, optional (default=False) + If True, the returned graph-view provides a read-only view + of the original graph without actually copying any data. + + Returns + ------- + G : Graph + A copy of the graph. + + See Also + -------- + to_directed: return a directed copy of the graph. + + Examples + -------- + >>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> H = G.copy() + + """ + if as_view is True: + return nx.graphviews.generic_graph_view(self) + G = self.__class__() + G.graph.update(self.graph) + G.add_nodes_from((n, d.copy()) for n, d in self._node.items()) + G.add_edges_from( + (u, v, datadict.copy()) + for u, nbrs in self._adj.items() + for v, datadict in nbrs.items() + ) + return G + + def to_directed(self, as_view=False): + """Returns a directed representation of the graph. + + Returns + ------- + G : DiGraph + A directed graph with the same name, same nodes, and with + each edge (u, v, data) replaced by two directed edges + (u, v, data) and (v, u, data). + + Notes + ----- + This returns a "deepcopy" of the edge, node, and + graph attributes which attempts to completely copy + all of the data and references. + + This is in contrast to the similar D=DiGraph(G) which returns a + shallow copy of the data. + + See the Python copy module for more information on shallow + and deep copies, https://docs.python.org/3/library/copy.html. + + Warning: If you have subclassed Graph to use dict-like objects + in the data structure, those changes do not transfer to the + DiGraph created by this method. + + Examples + -------- + >>> G = nx.Graph() # or MultiGraph, etc + >>> G.add_edge(0, 1) + >>> H = G.to_directed() + >>> list(H.edges) + [(0, 1), (1, 0)] + + If already directed, return a (deep) copy + + >>> G = nx.DiGraph() # or MultiDiGraph, etc + >>> G.add_edge(0, 1) + >>> H = G.to_directed() + >>> list(H.edges) + [(0, 1)] + """ + graph_class = self.to_directed_class() + if as_view is True: + return nx.graphviews.generic_graph_view(self, graph_class) + # deepcopy when not a view + G = graph_class() + G.graph.update(deepcopy(self.graph)) + G.add_nodes_from((n, deepcopy(d)) for n, d in self._node.items()) + G.add_edges_from( + (u, v, deepcopy(data)) + for u, nbrs in self._adj.items() + for v, data in nbrs.items() + ) + return G + + def to_undirected(self, as_view=False): + """Returns an undirected copy of the graph. + + Parameters + ---------- + as_view : bool (optional, default=False) + If True return a view of the original undirected graph. + + Returns + ------- + G : Graph/MultiGraph + A deepcopy of the graph. + + See Also + -------- + Graph, copy, add_edge, add_edges_from + + Notes + ----- + This returns a "deepcopy" of the edge, node, and + graph attributes which attempts to completely copy + all of the data and references. + + This is in contrast to the similar `G = nx.DiGraph(D)` which returns a + shallow copy of the data. + + See the Python copy module for more information on shallow + and deep copies, https://docs.python.org/3/library/copy.html. + + Warning: If you have subclassed DiGraph to use dict-like objects + in the data structure, those changes do not transfer to the + Graph created by this method. + + Examples + -------- + >>> G = nx.path_graph(2) # or MultiGraph, etc + >>> H = G.to_directed() + >>> list(H.edges) + [(0, 1), (1, 0)] + >>> G2 = H.to_undirected() + >>> list(G2.edges) + [(0, 1)] + """ + graph_class = self.to_undirected_class() + if as_view is True: + return nx.graphviews.generic_graph_view(self, graph_class) + # deepcopy when not a view + G = graph_class() + G.graph.update(deepcopy(self.graph)) + G.add_nodes_from((n, deepcopy(d)) for n, d in self._node.items()) + G.add_edges_from( + (u, v, deepcopy(d)) + for u, nbrs in self._adj.items() + for v, d in nbrs.items() + ) + return G + + def subgraph(self, nodes): + """Returns a SubGraph view of the subgraph induced on `nodes`. + + The induced subgraph of the graph contains the nodes in `nodes` + and the edges between those nodes. + + Parameters + ---------- + nodes : list, iterable + A container of nodes which will be iterated through once. + + Returns + ------- + G : SubGraph View + A subgraph view of the graph. The graph structure cannot be + changed but node/edge attributes can and are shared with the + original graph. + + Notes + ----- + The graph, edge and node attributes are shared with the original graph. + Changes to the graph structure is ruled out by the view, but changes + to attributes are reflected in the original graph. + + To create a subgraph with its own copy of the edge/node attributes use: + G.subgraph(nodes).copy() + + For an inplace reduction of a graph to a subgraph you can remove nodes: + G.remove_nodes_from([n for n in G if n not in set(nodes)]) + + Subgraph views are sometimes NOT what you want. In most cases where + you want to do more than simply look at the induced edges, it makes + more sense to just create the subgraph as its own graph with code like: + + :: + + # Create a subgraph SG based on a (possibly multigraph) G + SG = G.__class__() + SG.add_nodes_from((n, G.nodes[n]) for n in largest_wcc) + if SG.is_multigraph(): + SG.add_edges_from( + (n, nbr, key, d) + for n, nbrs in G.adj.items() + if n in largest_wcc + for nbr, keydict in nbrs.items() + if nbr in largest_wcc + for key, d in keydict.items() + ) + else: + SG.add_edges_from( + (n, nbr, d) + for n, nbrs in G.adj.items() + if n in largest_wcc + for nbr, d in nbrs.items() + if nbr in largest_wcc + ) + SG.graph.update(G.graph) + + Subgraphs are not guaranteed to preserve the order of nodes or edges + as they appear in the original graph. For example: + + >>> G = nx.Graph() + >>> G.add_nodes_from(reversed(range(10))) + >>> list(G) + [9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + >>> list(G.subgraph([1, 3, 2])) + [1, 2, 3] + + Examples + -------- + >>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> H = G.subgraph([0, 1, 2]) + >>> list(H.edges) + [(0, 1), (1, 2)] + """ + induced_nodes = nx.filters.show_nodes(self.nbunch_iter(nodes)) + # if already a subgraph, don't make a chain + subgraph = nx.subgraph_view + if hasattr(self, "_NODE_OK"): + return subgraph( + self._graph, filter_node=induced_nodes, filter_edge=self._EDGE_OK + ) + return subgraph(self, filter_node=induced_nodes) + + def edge_subgraph(self, edges): + """Returns the subgraph induced by the specified edges. + + The induced subgraph contains each edge in `edges` and each + node incident to any one of those edges. + + Parameters + ---------- + edges : iterable + An iterable of edges in this graph. + + Returns + ------- + G : Graph + An edge-induced subgraph of this graph with the same edge + attributes. + + Notes + ----- + The graph, edge, and node attributes in the returned subgraph + view are references to the corresponding attributes in the original + graph. The view is read-only. + + To create a full graph version of the subgraph with its own copy + of the edge or node attributes, use:: + + G.edge_subgraph(edges).copy() + + Examples + -------- + >>> G = nx.path_graph(5) + >>> H = G.edge_subgraph([(0, 1), (3, 4)]) + >>> list(H.nodes) + [0, 1, 3, 4] + >>> list(H.edges) + [(0, 1), (3, 4)] + + """ + return nx.edge_subgraph(self, edges) + + def size(self, weight=None): + """Returns the number of edges or total of all edge weights. + + Parameters + ---------- + weight : string or None, optional (default=None) + The edge attribute that holds the numerical value used + as a weight. If None, then each edge has weight 1. + + Returns + ------- + size : numeric + The number of edges or + (if weight keyword is provided) the total weight sum. + + If weight is None, returns an int. Otherwise a float + (or more general numeric if the weights are more general). + + See Also + -------- + number_of_edges + + Examples + -------- + >>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.size() + 3 + + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_edge("a", "b", weight=2) + >>> G.add_edge("b", "c", weight=4) + >>> G.size() + 2 + >>> G.size(weight="weight") + 6.0 + """ + s = sum(d for v, d in self.degree(weight=weight)) + # If `weight` is None, the sum of the degrees is guaranteed to be + # even, so we can perform integer division and hence return an + # integer. Otherwise, the sum of the weighted degrees is not + # guaranteed to be an integer, so we perform "real" division. + return s // 2 if weight is None else s / 2 + + def number_of_edges(self, u=None, v=None): + """Returns the number of edges between two nodes. + + Parameters + ---------- + u, v : nodes, optional (default=all edges) + If u and v are specified, return the number of edges between + u and v. Otherwise return the total number of all edges. + + Returns + ------- + nedges : int + The number of edges in the graph. If nodes `u` and `v` are + specified return the number of edges between those nodes. If + the graph is directed, this only returns the number of edges + from `u` to `v`. + + See Also + -------- + size + + Examples + -------- + For undirected graphs, this method counts the total number of + edges in the graph: + + >>> G = nx.path_graph(4) + >>> G.number_of_edges() + 3 + + If you specify two nodes, this counts the total number of edges + joining the two nodes: + + >>> G.number_of_edges(0, 1) + 1 + + For directed graphs, this method can count the total number of + directed edges from `u` to `v`: + + >>> G = nx.DiGraph() + >>> G.add_edge(0, 1) + >>> G.add_edge(1, 0) + >>> G.number_of_edges(0, 1) + 1 + + """ + if u is None: + return int(self.size()) + if v in self._adj[u]: + return 1 + return 0 + + def nbunch_iter(self, nbunch=None): + """Returns an iterator over nodes contained in nbunch that are + also in the graph. + + The nodes in an iterable nbunch are checked for membership in the graph + and if not are silently ignored. + + Parameters + ---------- + nbunch : single node, container, or all nodes (default= all nodes) + The view will only report edges incident to these nodes. + + Returns + ------- + niter : iterator + An iterator over nodes in nbunch that are also in the graph. + If nbunch is None, iterate over all nodes in the graph. + + Raises + ------ + NetworkXError + If nbunch is not a node or sequence of nodes. + If a node in nbunch is not hashable. + + See Also + -------- + Graph.__iter__ + + Notes + ----- + When nbunch is an iterator, the returned iterator yields values + directly from nbunch, becoming exhausted when nbunch is exhausted. + + To test whether nbunch is a single node, one can use + "if nbunch in self:", even after processing with this routine. + + If nbunch is not a node or a (possibly empty) sequence/iterator + or None, a :exc:`NetworkXError` is raised. Also, if any object in + nbunch is not hashable, a :exc:`NetworkXError` is raised. + """ + if nbunch is None: # include all nodes via iterator + bunch = iter(self._adj) + elif nbunch in self: # if nbunch is a single node + bunch = iter([nbunch]) + else: # if nbunch is a sequence of nodes + + def bunch_iter(nlist, adj): + try: + for n in nlist: + if n in adj: + yield n + except TypeError as err: + exc, message = err, err.args[0] + # capture error for non-sequence/iterator nbunch. + if "iter" in message: + exc = NetworkXError( + "nbunch is not a node or a sequence of nodes." + ) + # capture single nodes that are not in the graph. + if "object is not iterable" in message: + exc = NetworkXError(f"Node {nbunch} is not in the graph.") + # capture error for unhashable node. + if "hashable" in message: + exc = NetworkXError( + f"Node {n} in sequence nbunch is not a valid node." + ) + raise exc + + bunch = bunch_iter(nbunch, self._adj) + return bunch diff --git a/.venv/lib/python3.12/site-packages/networkx/classes/graphviews.py b/.venv/lib/python3.12/site-packages/networkx/classes/graphviews.py new file mode 100644 index 0000000..0b09df6 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/classes/graphviews.py @@ -0,0 +1,269 @@ +"""View of Graphs as SubGraph, Reverse, Directed, Undirected. + +In some algorithms it is convenient to temporarily morph +a graph to exclude some nodes or edges. It should be better +to do that via a view than to remove and then re-add. +In other algorithms it is convenient to temporarily morph +a graph to reverse directed edges, or treat a directed graph +as undirected, etc. This module provides those graph views. + +The resulting views are essentially read-only graphs that +report data from the original graph object. We provide an +attribute G._graph which points to the underlying graph object. + +Note: Since graphviews look like graphs, one can end up with +view-of-view-of-view chains. Be careful with chains because +they become very slow with about 15 nested views. +For the common simple case of node induced subgraphs created +from the graph class, we short-cut the chain by returning a +subgraph of the original graph directly rather than a subgraph +of a subgraph. We are careful not to disrupt any edge filter in +the middle subgraph. In general, determining how to short-cut +the chain is tricky and much harder with restricted_views than +with induced subgraphs. +Often it is easiest to use .copy() to avoid chains. +""" + +import networkx as nx +from networkx.classes.coreviews import ( + FilterAdjacency, + FilterAtlas, + FilterMultiAdjacency, + UnionAdjacency, + UnionMultiAdjacency, +) +from networkx.classes.filters import no_filter +from networkx.exception import NetworkXError +from networkx.utils import not_implemented_for + +__all__ = ["generic_graph_view", "subgraph_view", "reverse_view"] + + +def generic_graph_view(G, create_using=None): + """Returns a read-only view of `G`. + + The graph `G` and its attributes are not copied but viewed through the new graph object + of the same class as `G` (or of the class specified in `create_using`). + + Parameters + ---------- + G : graph + A directed/undirected graph/multigraph. + + create_using : NetworkX graph constructor, optional (default=None) + Graph type to create. If graph instance, then cleared before populated. + If `None`, then the appropriate Graph type is inferred from `G`. + + Returns + ------- + newG : graph + A view of the input graph `G` and its attributes as viewed through + the `create_using` class. + + Raises + ------ + NetworkXError + If `G` is a multigraph (or multidigraph) but `create_using` is not, or vice versa. + + Notes + ----- + The returned graph view is read-only (cannot modify the graph). + Yet the view reflects any changes in `G`. The intent is to mimic dict views. + + Examples + -------- + >>> G = nx.Graph() + >>> G.add_edge(1, 2, weight=0.3) + >>> G.add_edge(2, 3, weight=0.5) + >>> G.edges(data=True) + EdgeDataView([(1, 2, {'weight': 0.3}), (2, 3, {'weight': 0.5})]) + + The view exposes the attributes from the original graph. + + >>> viewG = nx.graphviews.generic_graph_view(G) + >>> viewG.edges(data=True) + EdgeDataView([(1, 2, {'weight': 0.3}), (2, 3, {'weight': 0.5})]) + + Changes to `G` are reflected in `viewG`. + + >>> G.remove_edge(2, 3) + >>> G.edges(data=True) + EdgeDataView([(1, 2, {'weight': 0.3})]) + + >>> viewG.edges(data=True) + EdgeDataView([(1, 2, {'weight': 0.3})]) + + We can change the graph type with the `create_using` parameter. + + >>> type(G) + + >>> viewDG = nx.graphviews.generic_graph_view(G, create_using=nx.DiGraph) + >>> type(viewDG) + + """ + if create_using is None: + newG = G.__class__() + else: + newG = nx.empty_graph(0, create_using) + if G.is_multigraph() != newG.is_multigraph(): + raise NetworkXError("Multigraph for G must agree with create_using") + newG = nx.freeze(newG) + + # create view by assigning attributes from G + newG._graph = G + newG.graph = G.graph + + newG._node = G._node + if newG.is_directed(): + if G.is_directed(): + newG._succ = G._succ + newG._pred = G._pred + # newG._adj is synced with _succ + else: + newG._succ = G._adj + newG._pred = G._adj + # newG._adj is synced with _succ + elif G.is_directed(): + if G.is_multigraph(): + newG._adj = UnionMultiAdjacency(G._succ, G._pred) + else: + newG._adj = UnionAdjacency(G._succ, G._pred) + else: + newG._adj = G._adj + return newG + + +def subgraph_view(G, *, filter_node=no_filter, filter_edge=no_filter): + """View of `G` applying a filter on nodes and edges. + + `subgraph_view` provides a read-only view of the input graph that excludes + nodes and edges based on the outcome of two filter functions `filter_node` + and `filter_edge`. + + The `filter_node` function takes one argument --- the node --- and returns + `True` if the node should be included in the subgraph, and `False` if it + should not be included. + + The `filter_edge` function takes two (or three arguments if `G` is a + multi-graph) --- the nodes describing an edge, plus the edge-key if + parallel edges are possible --- and returns `True` if the edge should be + included in the subgraph, and `False` if it should not be included. + + Both node and edge filter functions are called on graph elements as they + are queried, meaning there is no up-front cost to creating the view. + + Parameters + ---------- + G : networkx.Graph + A directed/undirected graph/multigraph + + filter_node : callable, optional + A function taking a node as input, which returns `True` if the node + should appear in the view. + + filter_edge : callable, optional + A function taking as input the two nodes describing an edge (plus the + edge-key if `G` is a multi-graph), which returns `True` if the edge + should appear in the view. + + Returns + ------- + graph : networkx.Graph + A read-only graph view of the input graph. + + Examples + -------- + >>> G = nx.path_graph(6) + + Filter functions operate on the node, and return `True` if the node should + appear in the view: + + >>> def filter_node(n1): + ... return n1 != 5 + >>> view = nx.subgraph_view(G, filter_node=filter_node) + >>> view.nodes() + NodeView((0, 1, 2, 3, 4)) + + We can use a closure pattern to filter graph elements based on additional + data --- for example, filtering on edge data attached to the graph: + + >>> G[3][4]["cross_me"] = False + >>> def filter_edge(n1, n2): + ... return G[n1][n2].get("cross_me", True) + >>> view = nx.subgraph_view(G, filter_edge=filter_edge) + >>> view.edges() + EdgeView([(0, 1), (1, 2), (2, 3), (4, 5)]) + + >>> view = nx.subgraph_view( + ... G, + ... filter_node=filter_node, + ... filter_edge=filter_edge, + ... ) + >>> view.nodes() + NodeView((0, 1, 2, 3, 4)) + >>> view.edges() + EdgeView([(0, 1), (1, 2), (2, 3)]) + """ + newG = nx.freeze(G.__class__()) + newG._NODE_OK = filter_node + newG._EDGE_OK = filter_edge + + # create view by assigning attributes from G + newG._graph = G + newG.graph = G.graph + + newG._node = FilterAtlas(G._node, filter_node) + if G.is_multigraph(): + Adj = FilterMultiAdjacency + + def reverse_edge(u, v, k=None): + return filter_edge(v, u, k) + + else: + Adj = FilterAdjacency + + def reverse_edge(u, v, k=None): + return filter_edge(v, u) + + if G.is_directed(): + newG._succ = Adj(G._succ, filter_node, filter_edge) + newG._pred = Adj(G._pred, filter_node, reverse_edge) + # newG._adj is synced with _succ + else: + newG._adj = Adj(G._adj, filter_node, filter_edge) + return newG + + +@not_implemented_for("undirected") +def reverse_view(G): + """View of `G` with edge directions reversed + + `reverse_view` returns a read-only view of the input graph where + edge directions are reversed. + + Identical to digraph.reverse(copy=False) + + Parameters + ---------- + G : networkx.DiGraph + + Returns + ------- + graph : networkx.DiGraph + + Examples + -------- + >>> G = nx.DiGraph() + >>> G.add_edge(1, 2) + >>> G.add_edge(2, 3) + >>> G.edges() + OutEdgeView([(1, 2), (2, 3)]) + + >>> view = nx.reverse_view(G) + >>> view.edges() + OutEdgeView([(2, 1), (3, 2)]) + """ + newG = generic_graph_view(G) + newG._succ, newG._pred = G._pred, G._succ + # newG._adj is synced with _succ + return newG diff --git a/.venv/lib/python3.12/site-packages/networkx/classes/multidigraph.py b/.venv/lib/python3.12/site-packages/networkx/classes/multidigraph.py new file mode 100644 index 0000000..597af79 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/classes/multidigraph.py @@ -0,0 +1,966 @@ +"""Base class for MultiDiGraph.""" + +from copy import deepcopy +from functools import cached_property + +import networkx as nx +from networkx import convert +from networkx.classes.coreviews import MultiAdjacencyView +from networkx.classes.digraph import DiGraph +from networkx.classes.multigraph import MultiGraph +from networkx.classes.reportviews import ( + DiMultiDegreeView, + InMultiDegreeView, + InMultiEdgeView, + OutMultiDegreeView, + OutMultiEdgeView, +) +from networkx.exception import NetworkXError + +__all__ = ["MultiDiGraph"] + + +class MultiDiGraph(MultiGraph, DiGraph): + """A directed graph class that can store multiedges. + + Multiedges are multiple edges between two nodes. Each edge + can hold optional data or attributes. + + A MultiDiGraph holds directed edges. Self loops are allowed. + + Nodes can be arbitrary (hashable) Python objects with optional + key/value attributes. By convention `None` is not used as a node. + + Edges are represented as links between nodes with optional + key/value attributes. + + Parameters + ---------- + incoming_graph_data : input graph (optional, default: None) + Data to initialize graph. If None (default) an empty + graph is created. The data can be any format that is supported + by the to_networkx_graph() function, currently including edge list, + dict of dicts, dict of lists, NetworkX graph, 2D NumPy array, SciPy + sparse matrix, or PyGraphviz graph. + + multigraph_input : bool or None (default None) + Note: Only used when `incoming_graph_data` is a dict. + If True, `incoming_graph_data` is assumed to be a + dict-of-dict-of-dict-of-dict structure keyed by + node to neighbor to edge keys to edge data for multi-edges. + A NetworkXError is raised if this is not the case. + If False, :func:`to_networkx_graph` is used to try to determine + the dict's graph data structure as either a dict-of-dict-of-dict + keyed by node to neighbor to edge data, or a dict-of-iterable + keyed by node to neighbors. + If None, the treatment for True is tried, but if it fails, + the treatment for False is tried. + + attr : keyword arguments, optional (default= no attributes) + Attributes to add to graph as key=value pairs. + + See Also + -------- + Graph + DiGraph + MultiGraph + + Examples + -------- + Create an empty graph structure (a "null graph") with no nodes and + no edges. + + >>> G = nx.MultiDiGraph() + + G can be grown in several ways. + + **Nodes:** + + Add one node at a time: + + >>> G.add_node(1) + + Add the nodes from any container (a list, dict, set or + even the lines from a file or the nodes from another graph). + + >>> G.add_nodes_from([2, 3]) + >>> G.add_nodes_from(range(100, 110)) + >>> H = nx.path_graph(10) + >>> G.add_nodes_from(H) + + In addition to strings and integers any hashable Python object + (except None) can represent a node, e.g. a customized node object, + or even another Graph. + + >>> G.add_node(H) + + **Edges:** + + G can also be grown by adding edges. + + Add one edge, + + >>> key = G.add_edge(1, 2) + + a list of edges, + + >>> keys = G.add_edges_from([(1, 2), (1, 3)]) + + or a collection of edges, + + >>> keys = G.add_edges_from(H.edges) + + If some edges connect nodes not yet in the graph, the nodes + are added automatically. If an edge already exists, an additional + edge is created and stored using a key to identify the edge. + By default the key is the lowest unused integer. + + >>> keys = G.add_edges_from([(4, 5, dict(route=282)), (4, 5, dict(route=37))]) + >>> G[4] + AdjacencyView({5: {0: {}, 1: {'route': 282}, 2: {'route': 37}}}) + + **Attributes:** + + Each graph, node, and edge can hold key/value attribute pairs + in an associated attribute dictionary (the keys must be hashable). + By default these are empty, but can be added or changed using + add_edge, add_node or direct manipulation of the attribute + dictionaries named graph, node and edge respectively. + + >>> G = nx.MultiDiGraph(day="Friday") + >>> G.graph + {'day': 'Friday'} + + Add node attributes using add_node(), add_nodes_from() or G.nodes + + >>> G.add_node(1, time="5pm") + >>> G.add_nodes_from([3], time="2pm") + >>> G.nodes[1] + {'time': '5pm'} + >>> G.nodes[1]["room"] = 714 + >>> del G.nodes[1]["room"] # remove attribute + >>> list(G.nodes(data=True)) + [(1, {'time': '5pm'}), (3, {'time': '2pm'})] + + Add edge attributes using add_edge(), add_edges_from(), subscript + notation, or G.edges. + + >>> key = G.add_edge(1, 2, weight=4.7) + >>> keys = G.add_edges_from([(3, 4), (4, 5)], color="red") + >>> keys = G.add_edges_from([(1, 2, {"color": "blue"}), (2, 3, {"weight": 8})]) + >>> G[1][2][0]["weight"] = 4.7 + >>> G.edges[1, 2, 0]["weight"] = 4 + + Warning: we protect the graph data structure by making `G.edges[1, + 2, 0]` a read-only dict-like structure. However, you can assign to + attributes in e.g. `G.edges[1, 2, 0]`. Thus, use 2 sets of brackets + to add/change data attributes: `G.edges[1, 2, 0]['weight'] = 4` + (for multigraphs the edge key is required: `MG.edges[u, v, + key][name] = value`). + + **Shortcuts:** + + Many common graph features allow python syntax to speed reporting. + + >>> 1 in G # check if node in graph + True + >>> [n for n in G if n < 3] # iterate through nodes + [1, 2] + >>> len(G) # number of nodes in graph + 5 + >>> G[1] # adjacency dict-like view mapping neighbor -> edge key -> edge attributes + AdjacencyView({2: {0: {'weight': 4}, 1: {'color': 'blue'}}}) + + Often the best way to traverse all edges of a graph is via the neighbors. + The neighbors are available as an adjacency-view `G.adj` object or via + the method `G.adjacency()`. + + >>> for n, nbrsdict in G.adjacency(): + ... for nbr, keydict in nbrsdict.items(): + ... for key, eattr in keydict.items(): + ... if "weight" in eattr: + ... # Do something useful with the edges + ... pass + + But the edges() method is often more convenient: + + >>> for u, v, keys, weight in G.edges(data="weight", keys=True): + ... if weight is not None: + ... # Do something useful with the edges + ... pass + + **Reporting:** + + Simple graph information is obtained using methods and object-attributes. + Reporting usually provides views instead of containers to reduce memory + usage. The views update as the graph is updated similarly to dict-views. + The objects `nodes`, `edges` and `adj` provide access to data attributes + via lookup (e.g. `nodes[n]`, `edges[u, v, k]`, `adj[u][v]`) and iteration + (e.g. `nodes.items()`, `nodes.data('color')`, + `nodes.data('color', default='blue')` and similarly for `edges`) + Views exist for `nodes`, `edges`, `neighbors()`/`adj` and `degree`. + + For details on these and other miscellaneous methods, see below. + + **Subclasses (Advanced):** + + The MultiDiGraph class uses a dict-of-dict-of-dict-of-dict structure. + The outer dict (node_dict) holds adjacency information keyed by node. + The next dict (adjlist_dict) represents the adjacency information + and holds edge_key dicts keyed by neighbor. The edge_key dict holds + each edge_attr dict keyed by edge key. The inner dict + (edge_attr_dict) represents the edge data and holds edge attribute + values keyed by attribute names. + + Each of these four dicts in the dict-of-dict-of-dict-of-dict + structure can be replaced by a user defined dict-like object. + In general, the dict-like features should be maintained but + extra features can be added. To replace one of the dicts create + a new graph class by changing the class(!) variable holding the + factory for that dict-like structure. The variable names are + node_dict_factory, node_attr_dict_factory, adjlist_inner_dict_factory, + adjlist_outer_dict_factory, edge_key_dict_factory, edge_attr_dict_factory + and graph_attr_dict_factory. + + node_dict_factory : function, (default: dict) + Factory function to be used to create the dict containing node + attributes, keyed by node id. + It should require no arguments and return a dict-like object + + node_attr_dict_factory: function, (default: dict) + Factory function to be used to create the node attribute + dict which holds attribute values keyed by attribute name. + It should require no arguments and return a dict-like object + + adjlist_outer_dict_factory : function, (default: dict) + Factory function to be used to create the outer-most dict + in the data structure that holds adjacency info keyed by node. + It should require no arguments and return a dict-like object. + + adjlist_inner_dict_factory : function, (default: dict) + Factory function to be used to create the adjacency list + dict which holds multiedge key dicts keyed by neighbor. + It should require no arguments and return a dict-like object. + + edge_key_dict_factory : function, (default: dict) + Factory function to be used to create the edge key dict + which holds edge data keyed by edge key. + It should require no arguments and return a dict-like object. + + edge_attr_dict_factory : function, (default: dict) + Factory function to be used to create the edge attribute + dict which holds attribute values keyed by attribute name. + It should require no arguments and return a dict-like object. + + graph_attr_dict_factory : function, (default: dict) + Factory function to be used to create the graph attribute + dict which holds attribute values keyed by attribute name. + It should require no arguments and return a dict-like object. + + Typically, if your extension doesn't impact the data structure all + methods will inherited without issue except: `to_directed/to_undirected`. + By default these methods create a DiGraph/Graph class and you probably + want them to create your extension of a DiGraph/Graph. To facilitate + this we define two class variables that you can set in your subclass. + + to_directed_class : callable, (default: DiGraph or MultiDiGraph) + Class to create a new graph structure in the `to_directed` method. + If `None`, a NetworkX class (DiGraph or MultiDiGraph) is used. + + to_undirected_class : callable, (default: Graph or MultiGraph) + Class to create a new graph structure in the `to_undirected` method. + If `None`, a NetworkX class (Graph or MultiGraph) is used. + + **Subclassing Example** + + Create a low memory graph class that effectively disallows edge + attributes by using a single attribute dict for all edges. + This reduces the memory used, but you lose edge attributes. + + >>> class ThinGraph(nx.Graph): + ... all_edge_dict = {"weight": 1} + ... + ... def single_edge_dict(self): + ... return self.all_edge_dict + ... + ... edge_attr_dict_factory = single_edge_dict + >>> G = ThinGraph() + >>> G.add_edge(2, 1) + >>> G[2][1] + {'weight': 1} + >>> G.add_edge(2, 2) + >>> G[2][1] is G[2][2] + True + """ + + # node_dict_factory = dict # already assigned in Graph + # adjlist_outer_dict_factory = dict + # adjlist_inner_dict_factory = dict + edge_key_dict_factory = dict + # edge_attr_dict_factory = dict + + def __init__(self, incoming_graph_data=None, multigraph_input=None, **attr): + """Initialize a graph with edges, name, or graph attributes. + + Parameters + ---------- + incoming_graph_data : input graph + Data to initialize graph. If incoming_graph_data=None (default) + an empty graph is created. The data can be an edge list, or any + NetworkX graph object. If the corresponding optional Python + packages are installed the data can also be a 2D NumPy array, a + SciPy sparse array, or a PyGraphviz graph. + + multigraph_input : bool or None (default None) + Note: Only used when `incoming_graph_data` is a dict. + If True, `incoming_graph_data` is assumed to be a + dict-of-dict-of-dict-of-dict structure keyed by + node to neighbor to edge keys to edge data for multi-edges. + A NetworkXError is raised if this is not the case. + If False, :func:`to_networkx_graph` is used to try to determine + the dict's graph data structure as either a dict-of-dict-of-dict + keyed by node to neighbor to edge data, or a dict-of-iterable + keyed by node to neighbors. + If None, the treatment for True is tried, but if it fails, + the treatment for False is tried. + + attr : keyword arguments, optional (default= no attributes) + Attributes to add to graph as key=value pairs. + + See Also + -------- + convert + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G = nx.Graph(name="my graph") + >>> e = [(1, 2), (2, 3), (3, 4)] # list of edges + >>> G = nx.Graph(e) + + Arbitrary graph attribute pairs (key=value) may be assigned + + >>> G = nx.Graph(e, day="Friday") + >>> G.graph + {'day': 'Friday'} + + """ + # multigraph_input can be None/True/False. So check "is not False" + if isinstance(incoming_graph_data, dict) and multigraph_input is not False: + DiGraph.__init__(self) + try: + convert.from_dict_of_dicts( + incoming_graph_data, create_using=self, multigraph_input=True + ) + self.graph.update(attr) + except Exception as err: + if multigraph_input is True: + raise nx.NetworkXError( + f"converting multigraph_input raised:\n{type(err)}: {err}" + ) + DiGraph.__init__(self, incoming_graph_data, **attr) + else: + DiGraph.__init__(self, incoming_graph_data, **attr) + + @cached_property + def adj(self): + """Graph adjacency object holding the neighbors of each node. + + This object is a read-only dict-like structure with node keys + and neighbor-dict values. The neighbor-dict is keyed by neighbor + to the edgekey-dict. So `G.adj[3][2][0]['color'] = 'blue'` sets + the color of the edge `(3, 2, 0)` to `"blue"`. + + Iterating over G.adj behaves like a dict. Useful idioms include + `for nbr, datadict in G.adj[n].items():`. + + The neighbor information is also provided by subscripting the graph. + So `for nbr, foovalue in G[node].data('foo', default=1):` works. + + For directed graphs, `G.adj` holds outgoing (successor) info. + """ + return MultiAdjacencyView(self._succ) + + @cached_property + def succ(self): + """Graph adjacency object holding the successors of each node. + + This object is a read-only dict-like structure with node keys + and neighbor-dict values. The neighbor-dict is keyed by neighbor + to the edgekey-dict. So `G.adj[3][2][0]['color'] = 'blue'` sets + the color of the edge `(3, 2, 0)` to `"blue"`. + + Iterating over G.adj behaves like a dict. Useful idioms include + `for nbr, datadict in G.adj[n].items():`. + + The neighbor information is also provided by subscripting the graph. + So `for nbr, foovalue in G[node].data('foo', default=1):` works. + + For directed graphs, `G.succ` is identical to `G.adj`. + """ + return MultiAdjacencyView(self._succ) + + @cached_property + def pred(self): + """Graph adjacency object holding the predecessors of each node. + + This object is a read-only dict-like structure with node keys + and neighbor-dict values. The neighbor-dict is keyed by neighbor + to the edgekey-dict. So `G.adj[3][2][0]['color'] = 'blue'` sets + the color of the edge `(3, 2, 0)` to `"blue"`. + + Iterating over G.adj behaves like a dict. Useful idioms include + `for nbr, datadict in G.adj[n].items():`. + """ + return MultiAdjacencyView(self._pred) + + def add_edge(self, u_for_edge, v_for_edge, key=None, **attr): + """Add an edge between u and v. + + The nodes u and v will be automatically added if they are + not already in the graph. + + Edge attributes can be specified with keywords or by directly + accessing the edge's attribute dictionary. See examples below. + + Parameters + ---------- + u_for_edge, v_for_edge : nodes + Nodes can be, for example, strings or numbers. + Nodes must be hashable (and not None) Python objects. + key : hashable identifier, optional (default=lowest unused integer) + Used to distinguish multiedges between a pair of nodes. + attr : keyword arguments, optional + Edge data (or labels or objects) can be assigned using + keyword arguments. + + Returns + ------- + The edge key assigned to the edge. + + See Also + -------- + add_edges_from : add a collection of edges + + Notes + ----- + To replace/update edge data, use the optional key argument + to identify a unique edge. Otherwise a new edge will be created. + + NetworkX algorithms designed for weighted graphs cannot use + multigraphs directly because it is not clear how to handle + multiedge weights. Convert to Graph using edge attribute + 'weight' to enable weighted graph algorithms. + + Default keys are generated using the method `new_edge_key()`. + This method can be overridden by subclassing the base class and + providing a custom `new_edge_key()` method. + + Examples + -------- + The following all add the edge e=(1, 2) to graph G: + + >>> G = nx.MultiDiGraph() + >>> e = (1, 2) + >>> key = G.add_edge(1, 2) # explicit two-node form + >>> G.add_edge(*e) # single edge as tuple of two nodes + 1 + >>> G.add_edges_from([(1, 2)]) # add edges from iterable container + [2] + + Associate data to edges using keywords: + + >>> key = G.add_edge(1, 2, weight=3) + >>> key = G.add_edge(1, 2, key=0, weight=4) # update data for key=0 + >>> key = G.add_edge(1, 3, weight=7, capacity=15, length=342.7) + + For non-string attribute keys, use subscript notation. + + >>> ekey = G.add_edge(1, 2) + >>> G[1][2][0].update({0: 5}) + >>> G.edges[1, 2, 0].update({0: 5}) + """ + u, v = u_for_edge, v_for_edge + # add nodes + if u not in self._succ: + if u is None: + raise ValueError("None cannot be a node") + self._succ[u] = self.adjlist_inner_dict_factory() + self._pred[u] = self.adjlist_inner_dict_factory() + self._node[u] = self.node_attr_dict_factory() + if v not in self._succ: + if v is None: + raise ValueError("None cannot be a node") + self._succ[v] = self.adjlist_inner_dict_factory() + self._pred[v] = self.adjlist_inner_dict_factory() + self._node[v] = self.node_attr_dict_factory() + if key is None: + key = self.new_edge_key(u, v) + if v in self._succ[u]: + keydict = self._adj[u][v] + datadict = keydict.get(key, self.edge_attr_dict_factory()) + datadict.update(attr) + keydict[key] = datadict + else: + # selfloops work this way without special treatment + datadict = self.edge_attr_dict_factory() + datadict.update(attr) + keydict = self.edge_key_dict_factory() + keydict[key] = datadict + self._succ[u][v] = keydict + self._pred[v][u] = keydict + nx._clear_cache(self) + return key + + def remove_edge(self, u, v, key=None): + """Remove an edge between u and v. + + Parameters + ---------- + u, v : nodes + Remove an edge between nodes u and v. + key : hashable identifier, optional (default=None) + Used to distinguish multiple edges between a pair of nodes. + If None, remove a single edge between u and v. If there are + multiple edges, removes the last edge added in terms of + insertion order. + + Raises + ------ + NetworkXError + If there is not an edge between u and v, or + if there is no edge with the specified key. + + See Also + -------- + remove_edges_from : remove a collection of edges + + Examples + -------- + >>> G = nx.MultiDiGraph() + >>> nx.add_path(G, [0, 1, 2, 3]) + >>> G.remove_edge(0, 1) + >>> e = (1, 2) + >>> G.remove_edge(*e) # unpacks e from an edge tuple + + For multiple edges + + >>> G = nx.MultiDiGraph() + >>> G.add_edges_from([(1, 2), (1, 2), (1, 2)]) # key_list returned + [0, 1, 2] + + When ``key=None`` (the default), edges are removed in the opposite + order that they were added: + + >>> G.remove_edge(1, 2) + >>> G.edges(keys=True) + OutMultiEdgeView([(1, 2, 0), (1, 2, 1)]) + + For edges with keys + + >>> G = nx.MultiDiGraph() + >>> G.add_edge(1, 2, key="first") + 'first' + >>> G.add_edge(1, 2, key="second") + 'second' + >>> G.remove_edge(1, 2, key="first") + >>> G.edges(keys=True) + OutMultiEdgeView([(1, 2, 'second')]) + + """ + try: + d = self._adj[u][v] + except KeyError as err: + raise NetworkXError(f"The edge {u}-{v} is not in the graph.") from err + # remove the edge with specified data + if key is None: + d.popitem() + else: + try: + del d[key] + except KeyError as err: + msg = f"The edge {u}-{v} with key {key} is not in the graph." + raise NetworkXError(msg) from err + if len(d) == 0: + # remove the key entries if last edge + del self._succ[u][v] + del self._pred[v][u] + nx._clear_cache(self) + + @cached_property + def edges(self): + """An OutMultiEdgeView of the Graph as G.edges or G.edges(). + + edges(self, nbunch=None, data=False, keys=False, default=None) + + The OutMultiEdgeView provides set-like operations on the edge-tuples + as well as edge attribute lookup. When called, it also provides + an EdgeDataView object which allows control of access to edge + attributes (but does not provide set-like operations). + Hence, ``G.edges[u, v, k]['color']`` provides the value of the color + attribute for the edge from ``u`` to ``v`` with key ``k`` while + ``for (u, v, k, c) in G.edges(data='color', default='red', keys=True):`` + iterates through all the edges yielding the color attribute with + default `'red'` if no color attribute exists. + + Edges are returned as tuples with optional data and keys + in the order (node, neighbor, key, data). If ``keys=True`` is not + provided, the tuples will just be (node, neighbor, data), but + multiple tuples with the same node and neighbor will be + generated when multiple edges between two nodes exist. + + Parameters + ---------- + nbunch : single node, container, or all nodes (default= all nodes) + The view will only report edges from these nodes. + data : string or bool, optional (default=False) + The edge attribute returned in 3-tuple (u, v, ddict[data]). + If True, return edge attribute dict in 3-tuple (u, v, ddict). + If False, return 2-tuple (u, v). + keys : bool, optional (default=False) + If True, return edge keys with each edge, creating (u, v, k, + d) tuples when data is also requested (the default) and (u, + v, k) tuples when data is not requested. + default : value, optional (default=None) + Value used for edges that don't have the requested attribute. + Only relevant if data is not True or False. + + Returns + ------- + edges : OutMultiEdgeView + A view of edge attributes, usually it iterates over (u, v) + (u, v, k) or (u, v, k, d) tuples of edges, but can also be + used for attribute lookup as ``edges[u, v, k]['foo']``. + + Notes + ----- + Nodes in nbunch that are not in the graph will be (quietly) ignored. + For directed graphs this returns the out-edges. + + Examples + -------- + >>> G = nx.MultiDiGraph() + >>> nx.add_path(G, [0, 1, 2]) + >>> key = G.add_edge(2, 3, weight=5) + >>> key2 = G.add_edge(1, 2) # second edge between these nodes + >>> [e for e in G.edges()] + [(0, 1), (1, 2), (1, 2), (2, 3)] + >>> list(G.edges(data=True)) # default data is {} (empty dict) + [(0, 1, {}), (1, 2, {}), (1, 2, {}), (2, 3, {'weight': 5})] + >>> list(G.edges(data="weight", default=1)) + [(0, 1, 1), (1, 2, 1), (1, 2, 1), (2, 3, 5)] + >>> list(G.edges(keys=True)) # default keys are integers + [(0, 1, 0), (1, 2, 0), (1, 2, 1), (2, 3, 0)] + >>> list(G.edges(data=True, keys=True)) + [(0, 1, 0, {}), (1, 2, 0, {}), (1, 2, 1, {}), (2, 3, 0, {'weight': 5})] + >>> list(G.edges(data="weight", default=1, keys=True)) + [(0, 1, 0, 1), (1, 2, 0, 1), (1, 2, 1, 1), (2, 3, 0, 5)] + >>> list(G.edges([0, 2])) + [(0, 1), (2, 3)] + >>> list(G.edges(0)) + [(0, 1)] + >>> list(G.edges(1)) + [(1, 2), (1, 2)] + + See Also + -------- + in_edges, out_edges + """ + return OutMultiEdgeView(self) + + # alias out_edges to edges + @cached_property + def out_edges(self): + return OutMultiEdgeView(self) + + out_edges.__doc__ = edges.__doc__ + + @cached_property + def in_edges(self): + """A view of the in edges of the graph as G.in_edges or G.in_edges(). + + in_edges(self, nbunch=None, data=False, keys=False, default=None) + + Parameters + ---------- + nbunch : single node, container, or all nodes (default= all nodes) + The view will only report edges incident to these nodes. + data : string or bool, optional (default=False) + The edge attribute returned in 3-tuple (u, v, ddict[data]). + If True, return edge attribute dict in 3-tuple (u, v, ddict). + If False, return 2-tuple (u, v). + keys : bool, optional (default=False) + If True, return edge keys with each edge, creating 3-tuples + (u, v, k) or with data, 4-tuples (u, v, k, d). + default : value, optional (default=None) + Value used for edges that don't have the requested attribute. + Only relevant if data is not True or False. + + Returns + ------- + in_edges : InMultiEdgeView or InMultiEdgeDataView + A view of edge attributes, usually it iterates over (u, v) + or (u, v, k) or (u, v, k, d) tuples of edges, but can also be + used for attribute lookup as `edges[u, v, k]['foo']`. + + See Also + -------- + edges + """ + return InMultiEdgeView(self) + + @cached_property + def degree(self): + """A DegreeView for the Graph as G.degree or G.degree(). + + The node degree is the number of edges adjacent to the node. + The weighted node degree is the sum of the edge weights for + edges incident to that node. + + This object provides an iterator for (node, degree) as well as + lookup for the degree for a single node. + + Parameters + ---------- + nbunch : single node, container, or all nodes (default= all nodes) + The view will only report edges incident to these nodes. + + weight : string or None, optional (default=None) + The name of an edge attribute that holds the numerical value used + as a weight. If None, then each edge has weight 1. + The degree is the sum of the edge weights adjacent to the node. + + Returns + ------- + DiMultiDegreeView or int + If multiple nodes are requested (the default), returns a `DiMultiDegreeView` + mapping nodes to their degree. + If a single node is requested, returns the degree of the node as an integer. + + See Also + -------- + out_degree, in_degree + + Examples + -------- + >>> G = nx.MultiDiGraph() + >>> nx.add_path(G, [0, 1, 2, 3]) + >>> G.degree(0) # node 0 with degree 1 + 1 + >>> list(G.degree([0, 1, 2])) + [(0, 1), (1, 2), (2, 2)] + >>> G.add_edge(0, 1) # parallel edge + 1 + >>> list(G.degree([0, 1, 2])) # parallel edges are counted + [(0, 2), (1, 3), (2, 2)] + + """ + return DiMultiDegreeView(self) + + @cached_property + def in_degree(self): + """A DegreeView for (node, in_degree) or in_degree for single node. + + The node in-degree is the number of edges pointing into the node. + The weighted node degree is the sum of the edge weights for + edges incident to that node. + + This object provides an iterator for (node, degree) as well as + lookup for the degree for a single node. + + Parameters + ---------- + nbunch : single node, container, or all nodes (default= all nodes) + The view will only report edges incident to these nodes. + + weight : string or None, optional (default=None) + The edge attribute that holds the numerical value used + as a weight. If None, then each edge has weight 1. + The degree is the sum of the edge weights adjacent to the node. + + Returns + ------- + If a single node is requested + deg : int + Degree of the node + + OR if multiple nodes are requested + nd_iter : iterator + The iterator returns two-tuples of (node, in-degree). + + See Also + -------- + degree, out_degree + + Examples + -------- + >>> G = nx.MultiDiGraph() + >>> nx.add_path(G, [0, 1, 2, 3]) + >>> G.in_degree(0) # node 0 with degree 0 + 0 + >>> list(G.in_degree([0, 1, 2])) + [(0, 0), (1, 1), (2, 1)] + >>> G.add_edge(0, 1) # parallel edge + 1 + >>> list(G.in_degree([0, 1, 2])) # parallel edges counted + [(0, 0), (1, 2), (2, 1)] + + """ + return InMultiDegreeView(self) + + @cached_property + def out_degree(self): + """Returns an iterator for (node, out-degree) or out-degree for single node. + + out_degree(self, nbunch=None, weight=None) + + The node out-degree is the number of edges pointing out of the node. + This function returns the out-degree for a single node or an iterator + for a bunch of nodes or if nothing is passed as argument. + + Parameters + ---------- + nbunch : single node, container, or all nodes (default= all nodes) + The view will only report edges incident to these nodes. + + weight : string or None, optional (default=None) + The edge attribute that holds the numerical value used + as a weight. If None, then each edge has weight 1. + The degree is the sum of the edge weights. + + Returns + ------- + If a single node is requested + deg : int + Degree of the node + + OR if multiple nodes are requested + nd_iter : iterator + The iterator returns two-tuples of (node, out-degree). + + See Also + -------- + degree, in_degree + + Examples + -------- + >>> G = nx.MultiDiGraph() + >>> nx.add_path(G, [0, 1, 2, 3]) + >>> G.out_degree(0) # node 0 with degree 1 + 1 + >>> list(G.out_degree([0, 1, 2])) + [(0, 1), (1, 1), (2, 1)] + >>> G.add_edge(0, 1) # parallel edge + 1 + >>> list(G.out_degree([0, 1, 2])) # counts parallel edges + [(0, 2), (1, 1), (2, 1)] + + """ + return OutMultiDegreeView(self) + + def is_multigraph(self): + """Returns True if graph is a multigraph, False otherwise.""" + return True + + def is_directed(self): + """Returns True if graph is directed, False otherwise.""" + return True + + def to_undirected(self, reciprocal=False, as_view=False): + """Returns an undirected representation of the digraph. + + Parameters + ---------- + reciprocal : bool (optional) + If True only keep edges that appear in both directions + in the original digraph. + as_view : bool (optional, default=False) + If True return an undirected view of the original directed graph. + + Returns + ------- + G : MultiGraph + An undirected graph with the same name and nodes and + with edge (u, v, data) if either (u, v, data) or (v, u, data) + is in the digraph. If both edges exist in digraph and + their edge data is different, only one edge is created + with an arbitrary choice of which edge data to use. + You must check and correct for this manually if desired. + + See Also + -------- + MultiGraph, copy, add_edge, add_edges_from + + Notes + ----- + This returns a "deepcopy" of the edge, node, and + graph attributes which attempts to completely copy + all of the data and references. + + This is in contrast to the similar D=MultiDiGraph(G) which + returns a shallow copy of the data. + + See the Python copy module for more information on shallow + and deep copies, https://docs.python.org/3/library/copy.html. + + Warning: If you have subclassed MultiDiGraph to use dict-like + objects in the data structure, those changes do not transfer + to the MultiGraph created by this method. + + Examples + -------- + >>> G = nx.path_graph(2) # or MultiGraph, etc + >>> H = G.to_directed() + >>> list(H.edges) + [(0, 1), (1, 0)] + >>> G2 = H.to_undirected() + >>> list(G2.edges) + [(0, 1)] + """ + graph_class = self.to_undirected_class() + if as_view is True: + return nx.graphviews.generic_graph_view(self, graph_class) + # deepcopy when not a view + G = graph_class() + G.graph.update(deepcopy(self.graph)) + G.add_nodes_from((n, deepcopy(d)) for n, d in self._node.items()) + if reciprocal is True: + G.add_edges_from( + (u, v, key, deepcopy(data)) + for u, nbrs in self._adj.items() + for v, keydict in nbrs.items() + for key, data in keydict.items() + if v in self._pred[u] and key in self._pred[u][v] + ) + else: + G.add_edges_from( + (u, v, key, deepcopy(data)) + for u, nbrs in self._adj.items() + for v, keydict in nbrs.items() + for key, data in keydict.items() + ) + return G + + def reverse(self, copy=True): + """Returns the reverse of the graph. + + The reverse is a graph with the same nodes and edges + but with the directions of the edges reversed. + + Parameters + ---------- + copy : bool optional (default=True) + If True, return a new DiGraph holding the reversed edges. + If False, the reverse graph is created using a view of + the original graph. + """ + if copy: + H = self.__class__() + H.graph.update(deepcopy(self.graph)) + H.add_nodes_from((n, deepcopy(d)) for n, d in self._node.items()) + H.add_edges_from( + (v, u, k, deepcopy(d)) + for u, v, k, d in self.edges(keys=True, data=True) + ) + return H + return nx.reverse_view(self) diff --git a/.venv/lib/python3.12/site-packages/networkx/classes/multigraph.py b/.venv/lib/python3.12/site-packages/networkx/classes/multigraph.py new file mode 100644 index 0000000..0e3f1ae --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/classes/multigraph.py @@ -0,0 +1,1283 @@ +"""Base class for MultiGraph.""" + +from copy import deepcopy +from functools import cached_property + +import networkx as nx +from networkx import NetworkXError, convert +from networkx.classes.coreviews import MultiAdjacencyView +from networkx.classes.graph import Graph +from networkx.classes.reportviews import MultiDegreeView, MultiEdgeView + +__all__ = ["MultiGraph"] + + +class MultiGraph(Graph): + """ + An undirected graph class that can store multiedges. + + Multiedges are multiple edges between two nodes. Each edge + can hold optional data or attributes. + + A MultiGraph holds undirected edges. Self loops are allowed. + + Nodes can be arbitrary (hashable) Python objects with optional + key/value attributes. By convention `None` is not used as a node. + + Edges are represented as links between nodes with optional + key/value attributes, in a MultiGraph each edge has a key to + distinguish between multiple edges that have the same source and + destination nodes. + + Parameters + ---------- + incoming_graph_data : input graph (optional, default: None) + Data to initialize graph. If None (default) an empty + graph is created. The data can be any format that is supported + by the to_networkx_graph() function, currently including edge list, + dict of dicts, dict of lists, NetworkX graph, 2D NumPy array, + SciPy sparse array, or PyGraphviz graph. + + multigraph_input : bool or None (default None) + Note: Only used when `incoming_graph_data` is a dict. + If True, `incoming_graph_data` is assumed to be a + dict-of-dict-of-dict-of-dict structure keyed by + node to neighbor to edge keys to edge data for multi-edges. + A NetworkXError is raised if this is not the case. + If False, :func:`to_networkx_graph` is used to try to determine + the dict's graph data structure as either a dict-of-dict-of-dict + keyed by node to neighbor to edge data, or a dict-of-iterable + keyed by node to neighbors. + If None, the treatment for True is tried, but if it fails, + the treatment for False is tried. + + attr : keyword arguments, optional (default= no attributes) + Attributes to add to graph as key=value pairs. + + See Also + -------- + Graph + DiGraph + MultiDiGraph + + Examples + -------- + Create an empty graph structure (a "null graph") with no nodes and + no edges. + + >>> G = nx.MultiGraph() + + G can be grown in several ways. + + **Nodes:** + + Add one node at a time: + + >>> G.add_node(1) + + Add the nodes from any container (a list, dict, set or + even the lines from a file or the nodes from another graph). + + >>> G.add_nodes_from([2, 3]) + >>> G.add_nodes_from(range(100, 110)) + >>> H = nx.path_graph(10) + >>> G.add_nodes_from(H) + + In addition to strings and integers any hashable Python object + (except None) can represent a node, e.g. a customized node object, + or even another Graph. + + >>> G.add_node(H) + + **Edges:** + + G can also be grown by adding edges. + + Add one edge, + + >>> key = G.add_edge(1, 2) + + a list of edges, + + >>> keys = G.add_edges_from([(1, 2), (1, 3)]) + + or a collection of edges, + + >>> keys = G.add_edges_from(H.edges) + + If some edges connect nodes not yet in the graph, the nodes + are added automatically. If an edge already exists, an additional + edge is created and stored using a key to identify the edge. + By default the key is the lowest unused integer. + + >>> keys = G.add_edges_from([(4, 5, {"route": 28}), (4, 5, {"route": 37})]) + >>> G[4] + AdjacencyView({3: {0: {}}, 5: {0: {}, 1: {'route': 28}, 2: {'route': 37}}}) + + **Attributes:** + + Each graph, node, and edge can hold key/value attribute pairs + in an associated attribute dictionary (the keys must be hashable). + By default these are empty, but can be added or changed using + add_edge, add_node or direct manipulation of the attribute + dictionaries named graph, node and edge respectively. + + >>> G = nx.MultiGraph(day="Friday") + >>> G.graph + {'day': 'Friday'} + + Add node attributes using add_node(), add_nodes_from() or G.nodes + + >>> G.add_node(1, time="5pm") + >>> G.add_nodes_from([3], time="2pm") + >>> G.nodes[1] + {'time': '5pm'} + >>> G.nodes[1]["room"] = 714 + >>> del G.nodes[1]["room"] # remove attribute + >>> list(G.nodes(data=True)) + [(1, {'time': '5pm'}), (3, {'time': '2pm'})] + + Add edge attributes using add_edge(), add_edges_from(), subscript + notation, or G.edges. + + >>> key = G.add_edge(1, 2, weight=4.7) + >>> keys = G.add_edges_from([(3, 4), (4, 5)], color="red") + >>> keys = G.add_edges_from([(1, 2, {"color": "blue"}), (2, 3, {"weight": 8})]) + >>> G[1][2][0]["weight"] = 4.7 + >>> G.edges[1, 2, 0]["weight"] = 4 + + Warning: we protect the graph data structure by making `G.edges[1, + 2, 0]` a read-only dict-like structure. However, you can assign to + attributes in e.g. `G.edges[1, 2, 0]`. Thus, use 2 sets of brackets + to add/change data attributes: `G.edges[1, 2, 0]['weight'] = 4`. + + **Shortcuts:** + + Many common graph features allow python syntax to speed reporting. + + >>> 1 in G # check if node in graph + True + >>> [n for n in G if n < 3] # iterate through nodes + [1, 2] + >>> len(G) # number of nodes in graph + 5 + >>> G[1] # adjacency dict-like view mapping neighbor -> edge key -> edge attributes + AdjacencyView({2: {0: {'weight': 4}, 1: {'color': 'blue'}}}) + + Often the best way to traverse all edges of a graph is via the neighbors. + The neighbors are reported as an adjacency-dict `G.adj` or `G.adjacency()`. + + >>> for n, nbrsdict in G.adjacency(): + ... for nbr, keydict in nbrsdict.items(): + ... for key, eattr in keydict.items(): + ... if "weight" in eattr: + ... # Do something useful with the edges + ... pass + + But the edges() method is often more convenient: + + >>> for u, v, keys, weight in G.edges(data="weight", keys=True): + ... if weight is not None: + ... # Do something useful with the edges + ... pass + + **Reporting:** + + Simple graph information is obtained using methods and object-attributes. + Reporting usually provides views instead of containers to reduce memory + usage. The views update as the graph is updated similarly to dict-views. + The objects `nodes`, `edges` and `adj` provide access to data attributes + via lookup (e.g. `nodes[n]`, `edges[u, v, k]`, `adj[u][v]`) and iteration + (e.g. `nodes.items()`, `nodes.data('color')`, + `nodes.data('color', default='blue')` and similarly for `edges`) + Views exist for `nodes`, `edges`, `neighbors()`/`adj` and `degree`. + + For details on these and other miscellaneous methods, see below. + + **Subclasses (Advanced):** + + The MultiGraph class uses a dict-of-dict-of-dict-of-dict data structure. + The outer dict (node_dict) holds adjacency information keyed by node. + The next dict (adjlist_dict) represents the adjacency information + and holds edge_key dicts keyed by neighbor. The edge_key dict holds + each edge_attr dict keyed by edge key. The inner dict + (edge_attr_dict) represents the edge data and holds edge attribute + values keyed by attribute names. + + Each of these four dicts in the dict-of-dict-of-dict-of-dict + structure can be replaced by a user defined dict-like object. + In general, the dict-like features should be maintained but + extra features can be added. To replace one of the dicts create + a new graph class by changing the class(!) variable holding the + factory for that dict-like structure. The variable names are + node_dict_factory, node_attr_dict_factory, adjlist_inner_dict_factory, + adjlist_outer_dict_factory, edge_key_dict_factory, edge_attr_dict_factory + and graph_attr_dict_factory. + + node_dict_factory : function, (default: dict) + Factory function to be used to create the dict containing node + attributes, keyed by node id. + It should require no arguments and return a dict-like object + + node_attr_dict_factory: function, (default: dict) + Factory function to be used to create the node attribute + dict which holds attribute values keyed by attribute name. + It should require no arguments and return a dict-like object + + adjlist_outer_dict_factory : function, (default: dict) + Factory function to be used to create the outer-most dict + in the data structure that holds adjacency info keyed by node. + It should require no arguments and return a dict-like object. + + adjlist_inner_dict_factory : function, (default: dict) + Factory function to be used to create the adjacency list + dict which holds multiedge key dicts keyed by neighbor. + It should require no arguments and return a dict-like object. + + edge_key_dict_factory : function, (default: dict) + Factory function to be used to create the edge key dict + which holds edge data keyed by edge key. + It should require no arguments and return a dict-like object. + + edge_attr_dict_factory : function, (default: dict) + Factory function to be used to create the edge attribute + dict which holds attribute values keyed by attribute name. + It should require no arguments and return a dict-like object. + + graph_attr_dict_factory : function, (default: dict) + Factory function to be used to create the graph attribute + dict which holds attribute values keyed by attribute name. + It should require no arguments and return a dict-like object. + + Typically, if your extension doesn't impact the data structure all + methods will inherited without issue except: `to_directed/to_undirected`. + By default these methods create a DiGraph/Graph class and you probably + want them to create your extension of a DiGraph/Graph. To facilitate + this we define two class variables that you can set in your subclass. + + to_directed_class : callable, (default: DiGraph or MultiDiGraph) + Class to create a new graph structure in the `to_directed` method. + If `None`, a NetworkX class (DiGraph or MultiDiGraph) is used. + + to_undirected_class : callable, (default: Graph or MultiGraph) + Class to create a new graph structure in the `to_undirected` method. + If `None`, a NetworkX class (Graph or MultiGraph) is used. + + **Subclassing Example** + + Create a low memory graph class that effectively disallows edge + attributes by using a single attribute dict for all edges. + This reduces the memory used, but you lose edge attributes. + + >>> class ThinGraph(nx.Graph): + ... all_edge_dict = {"weight": 1} + ... + ... def single_edge_dict(self): + ... return self.all_edge_dict + ... + ... edge_attr_dict_factory = single_edge_dict + >>> G = ThinGraph() + >>> G.add_edge(2, 1) + >>> G[2][1] + {'weight': 1} + >>> G.add_edge(2, 2) + >>> G[2][1] is G[2][2] + True + """ + + # node_dict_factory = dict # already assigned in Graph + # adjlist_outer_dict_factory = dict + # adjlist_inner_dict_factory = dict + edge_key_dict_factory = dict + # edge_attr_dict_factory = dict + + def to_directed_class(self): + """Returns the class to use for empty directed copies. + + If you subclass the base classes, use this to designate + what directed class to use for `to_directed()` copies. + """ + return nx.MultiDiGraph + + def to_undirected_class(self): + """Returns the class to use for empty undirected copies. + + If you subclass the base classes, use this to designate + what directed class to use for `to_directed()` copies. + """ + return MultiGraph + + def __init__(self, incoming_graph_data=None, multigraph_input=None, **attr): + """Initialize a graph with edges, name, or graph attributes. + + Parameters + ---------- + incoming_graph_data : input graph + Data to initialize graph. If incoming_graph_data=None (default) + an empty graph is created. The data can be an edge list, or any + NetworkX graph object. If the corresponding optional Python + packages are installed the data can also be a 2D NumPy array, a + SciPy sparse array, or a PyGraphviz graph. + + multigraph_input : bool or None (default None) + Note: Only used when `incoming_graph_data` is a dict. + If True, `incoming_graph_data` is assumed to be a + dict-of-dict-of-dict-of-dict structure keyed by + node to neighbor to edge keys to edge data for multi-edges. + A NetworkXError is raised if this is not the case. + If False, :func:`to_networkx_graph` is used to try to determine + the dict's graph data structure as either a dict-of-dict-of-dict + keyed by node to neighbor to edge data, or a dict-of-iterable + keyed by node to neighbors. + If None, the treatment for True is tried, but if it fails, + the treatment for False is tried. + + attr : keyword arguments, optional (default= no attributes) + Attributes to add to graph as key=value pairs. + + See Also + -------- + convert + + Examples + -------- + >>> G = nx.MultiGraph() + >>> G = nx.MultiGraph(name="my graph") + >>> e = [(1, 2), (1, 2), (2, 3), (3, 4)] # list of edges + >>> G = nx.MultiGraph(e) + + Arbitrary graph attribute pairs (key=value) may be assigned + + >>> G = nx.MultiGraph(e, day="Friday") + >>> G.graph + {'day': 'Friday'} + + """ + # multigraph_input can be None/True/False. So check "is not False" + if isinstance(incoming_graph_data, dict) and multigraph_input is not False: + Graph.__init__(self) + try: + convert.from_dict_of_dicts( + incoming_graph_data, create_using=self, multigraph_input=True + ) + self.graph.update(attr) + except Exception as err: + if multigraph_input is True: + raise nx.NetworkXError( + f"converting multigraph_input raised:\n{type(err)}: {err}" + ) + Graph.__init__(self, incoming_graph_data, **attr) + else: + Graph.__init__(self, incoming_graph_data, **attr) + + @cached_property + def adj(self): + """Graph adjacency object holding the neighbors of each node. + + This object is a read-only dict-like structure with node keys + and neighbor-dict values. The neighbor-dict is keyed by neighbor + to the edgekey-data-dict. So `G.adj[3][2][0]['color'] = 'blue'` sets + the color of the edge `(3, 2, 0)` to `"blue"`. + + Iterating over G.adj behaves like a dict. Useful idioms include + `for nbr, edgesdict in G.adj[n].items():`. + + The neighbor information is also provided by subscripting the graph. + + Examples + -------- + >>> e = [(1, 2), (1, 2), (1, 3), (3, 4)] # list of edges + >>> G = nx.MultiGraph(e) + >>> G.edges[1, 2, 0]["weight"] = 3 + >>> result = set() + >>> for edgekey, data in G[1][2].items(): + ... result.add(data.get("weight", 1)) + >>> result + {1, 3} + + For directed graphs, `G.adj` holds outgoing (successor) info. + """ + return MultiAdjacencyView(self._adj) + + def new_edge_key(self, u, v): + """Returns an unused key for edges between nodes `u` and `v`. + + The nodes `u` and `v` do not need to be already in the graph. + + Notes + ----- + In the standard MultiGraph class the new key is the number of existing + edges between `u` and `v` (increased if necessary to ensure unused). + The first edge will have key 0, then 1, etc. If an edge is removed + further new_edge_keys may not be in this order. + + Parameters + ---------- + u, v : nodes + + Returns + ------- + key : int + """ + try: + keydict = self._adj[u][v] + except KeyError: + return 0 + key = len(keydict) + while key in keydict: + key += 1 + return key + + def add_edge(self, u_for_edge, v_for_edge, key=None, **attr): + """Add an edge between u and v. + + The nodes u and v will be automatically added if they are + not already in the graph. + + Edge attributes can be specified with keywords or by directly + accessing the edge's attribute dictionary. See examples below. + + Parameters + ---------- + u_for_edge, v_for_edge : nodes + Nodes can be, for example, strings or numbers. + Nodes must be hashable (and not None) Python objects. + key : hashable identifier, optional (default=lowest unused integer) + Used to distinguish multiedges between a pair of nodes. + attr : keyword arguments, optional + Edge data (or labels or objects) can be assigned using + keyword arguments. + + Returns + ------- + The edge key assigned to the edge. + + See Also + -------- + add_edges_from : add a collection of edges + + Notes + ----- + To replace/update edge data, use the optional key argument + to identify a unique edge. Otherwise a new edge will be created. + + NetworkX algorithms designed for weighted graphs cannot use + multigraphs directly because it is not clear how to handle + multiedge weights. Convert to Graph using edge attribute + 'weight' to enable weighted graph algorithms. + + Default keys are generated using the method `new_edge_key()`. + This method can be overridden by subclassing the base class and + providing a custom `new_edge_key()` method. + + Examples + -------- + The following each add an additional edge e=(1, 2) to graph G: + + >>> G = nx.MultiGraph() + >>> e = (1, 2) + >>> ekey = G.add_edge(1, 2) # explicit two-node form + >>> G.add_edge(*e) # single edge as tuple of two nodes + 1 + >>> G.add_edges_from([(1, 2)]) # add edges from iterable container + [2] + + Associate data to edges using keywords: + + >>> ekey = G.add_edge(1, 2, weight=3) + >>> ekey = G.add_edge(1, 2, key=0, weight=4) # update data for key=0 + >>> ekey = G.add_edge(1, 3, weight=7, capacity=15, length=342.7) + + For non-string attribute keys, use subscript notation. + + >>> ekey = G.add_edge(1, 2) + >>> G[1][2][0].update({0: 5}) + >>> G.edges[1, 2, 0].update({0: 5}) + """ + u, v = u_for_edge, v_for_edge + # add nodes + if u not in self._adj: + if u is None: + raise ValueError("None cannot be a node") + self._adj[u] = self.adjlist_inner_dict_factory() + self._node[u] = self.node_attr_dict_factory() + if v not in self._adj: + if v is None: + raise ValueError("None cannot be a node") + self._adj[v] = self.adjlist_inner_dict_factory() + self._node[v] = self.node_attr_dict_factory() + if key is None: + key = self.new_edge_key(u, v) + if v in self._adj[u]: + keydict = self._adj[u][v] + datadict = keydict.get(key, self.edge_attr_dict_factory()) + datadict.update(attr) + keydict[key] = datadict + else: + # selfloops work this way without special treatment + datadict = self.edge_attr_dict_factory() + datadict.update(attr) + keydict = self.edge_key_dict_factory() + keydict[key] = datadict + self._adj[u][v] = keydict + self._adj[v][u] = keydict + nx._clear_cache(self) + return key + + def add_edges_from(self, ebunch_to_add, **attr): + """Add all the edges in ebunch_to_add. + + Parameters + ---------- + ebunch_to_add : container of edges + Each edge given in the container will be added to the + graph. The edges can be: + + - 2-tuples (u, v) or + - 3-tuples (u, v, d) for an edge data dict d, or + - 3-tuples (u, v, k) for not iterable key k, or + - 4-tuples (u, v, k, d) for an edge with data and key k + + attr : keyword arguments, optional + Edge data (or labels or objects) can be assigned using + keyword arguments. + + Returns + ------- + A list of edge keys assigned to the edges in `ebunch`. + + See Also + -------- + add_edge : add a single edge + add_weighted_edges_from : convenient way to add weighted edges + + Notes + ----- + Adding the same edge twice has no effect but any edge data + will be updated when each duplicate edge is added. + + Edge attributes specified in an ebunch take precedence over + attributes specified via keyword arguments. + + Default keys are generated using the method ``new_edge_key()``. + This method can be overridden by subclassing the base class and + providing a custom ``new_edge_key()`` method. + + When adding edges from an iterator over the graph you are changing, + a `RuntimeError` can be raised with message: + `RuntimeError: dictionary changed size during iteration`. This + happens when the graph's underlying dictionary is modified during + iteration. To avoid this error, evaluate the iterator into a separate + object, e.g. by using `list(iterator_of_edges)`, and pass this + object to `G.add_edges_from`. + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_edges_from([(0, 1), (1, 2)]) # using a list of edge tuples + >>> e = zip(range(0, 3), range(1, 4)) + >>> G.add_edges_from(e) # Add the path graph 0-1-2-3 + + Associate data to edges + + >>> G.add_edges_from([(1, 2), (2, 3)], weight=3) + >>> G.add_edges_from([(3, 4), (1, 4)], label="WN2898") + + Evaluate an iterator over a graph if using it to modify the same graph + + >>> G = nx.MultiGraph([(1, 2), (2, 3), (3, 4)]) + >>> # Grow graph by one new node, adding edges to all existing nodes. + >>> # wrong way - will raise RuntimeError + >>> # G.add_edges_from(((5, n) for n in G.nodes)) + >>> # right way - note that there will be no self-edge for node 5 + >>> assigned_keys = G.add_edges_from(list((5, n) for n in G.nodes)) + """ + keylist = [] + for e in ebunch_to_add: + ne = len(e) + if ne == 4: + u, v, key, dd = e + elif ne == 3: + u, v, dd = e + key = None + elif ne == 2: + u, v = e + dd = {} + key = None + else: + msg = f"Edge tuple {e} must be a 2-tuple, 3-tuple or 4-tuple." + raise NetworkXError(msg) + ddd = {} + ddd.update(attr) + try: + ddd.update(dd) + except (TypeError, ValueError): + if ne != 3: + raise + key = dd # ne == 3 with 3rd value not dict, must be a key + key = self.add_edge(u, v, key) + self[u][v][key].update(ddd) + keylist.append(key) + nx._clear_cache(self) + return keylist + + def remove_edge(self, u, v, key=None): + """Remove an edge between u and v. + + Parameters + ---------- + u, v : nodes + Remove an edge between nodes u and v. + key : hashable identifier, optional (default=None) + Used to distinguish multiple edges between a pair of nodes. + If None, remove a single edge between u and v. If there are + multiple edges, removes the last edge added in terms of + insertion order. + + Raises + ------ + NetworkXError + If there is not an edge between u and v, or + if there is no edge with the specified key. + + See Also + -------- + remove_edges_from : remove a collection of edges + + Examples + -------- + >>> G = nx.MultiGraph() + >>> nx.add_path(G, [0, 1, 2, 3]) + >>> G.remove_edge(0, 1) + >>> e = (1, 2) + >>> G.remove_edge(*e) # unpacks e from an edge tuple + + For multiple edges + + >>> G = nx.MultiGraph() # or MultiDiGraph, etc + >>> G.add_edges_from([(1, 2), (1, 2), (1, 2)]) # key_list returned + [0, 1, 2] + + When ``key=None`` (the default), edges are removed in the opposite + order that they were added: + + >>> G.remove_edge(1, 2) + >>> G.edges(keys=True) + MultiEdgeView([(1, 2, 0), (1, 2, 1)]) + >>> G.remove_edge(2, 1) # edges are not directed + >>> G.edges(keys=True) + MultiEdgeView([(1, 2, 0)]) + + For edges with keys + + >>> G = nx.MultiGraph() + >>> G.add_edge(1, 2, key="first") + 'first' + >>> G.add_edge(1, 2, key="second") + 'second' + >>> G.remove_edge(1, 2, key="first") + >>> G.edges(keys=True) + MultiEdgeView([(1, 2, 'second')]) + + """ + try: + d = self._adj[u][v] + except KeyError as err: + raise NetworkXError(f"The edge {u}-{v} is not in the graph.") from err + # remove the edge with specified data + if key is None: + d.popitem() + else: + try: + del d[key] + except KeyError as err: + msg = f"The edge {u}-{v} with key {key} is not in the graph." + raise NetworkXError(msg) from err + if len(d) == 0: + # remove the key entries if last edge + del self._adj[u][v] + if u != v: # check for selfloop + del self._adj[v][u] + nx._clear_cache(self) + + def remove_edges_from(self, ebunch): + """Remove all edges specified in ebunch. + + Parameters + ---------- + ebunch: list or container of edge tuples + Each edge given in the list or container will be removed + from the graph. The edges can be: + + - 2-tuples (u, v) A single edge between u and v is removed. + - 3-tuples (u, v, key) The edge identified by key is removed. + - 4-tuples (u, v, key, data) where data is ignored. + + See Also + -------- + remove_edge : remove a single edge + + Notes + ----- + Will fail silently if an edge in ebunch is not in the graph. + + Examples + -------- + >>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> ebunch = [(1, 2), (2, 3)] + >>> G.remove_edges_from(ebunch) + + Removing multiple copies of edges + + >>> G = nx.MultiGraph() + >>> keys = G.add_edges_from([(1, 2), (1, 2), (1, 2)]) + >>> G.remove_edges_from([(1, 2), (2, 1)]) # edges aren't directed + >>> list(G.edges()) + [(1, 2)] + >>> G.remove_edges_from([(1, 2), (1, 2)]) # silently ignore extra copy + >>> list(G.edges) # now empty graph + [] + + When the edge is a 2-tuple ``(u, v)`` but there are multiple edges between + u and v in the graph, the most recent edge (in terms of insertion + order) is removed. + + >>> G = nx.MultiGraph() + >>> for key in ("x", "y", "a"): + ... k = G.add_edge(0, 1, key=key) + >>> G.edges(keys=True) + MultiEdgeView([(0, 1, 'x'), (0, 1, 'y'), (0, 1, 'a')]) + >>> G.remove_edges_from([(0, 1)]) + >>> G.edges(keys=True) + MultiEdgeView([(0, 1, 'x'), (0, 1, 'y')]) + + """ + for e in ebunch: + try: + self.remove_edge(*e[:3]) + except NetworkXError: + pass + nx._clear_cache(self) + + def has_edge(self, u, v, key=None): + """Returns True if the graph has an edge between nodes u and v. + + This is the same as `v in G[u] or key in G[u][v]` + without KeyError exceptions. + + Parameters + ---------- + u, v : nodes + Nodes can be, for example, strings or numbers. + + key : hashable identifier, optional (default=None) + If specified return True only if the edge with + key is found. + + Returns + ------- + edge_ind : bool + True if edge is in the graph, False otherwise. + + Examples + -------- + Can be called either using two nodes u, v, an edge tuple (u, v), + or an edge tuple (u, v, key). + + >>> G = nx.MultiGraph() # or MultiDiGraph + >>> nx.add_path(G, [0, 1, 2, 3]) + >>> G.has_edge(0, 1) # using two nodes + True + >>> e = (0, 1) + >>> G.has_edge(*e) # e is a 2-tuple (u, v) + True + >>> G.add_edge(0, 1, key="a") + 'a' + >>> G.has_edge(0, 1, key="a") # specify key + True + >>> G.has_edge(1, 0, key="a") # edges aren't directed + True + >>> e = (0, 1, "a") + >>> G.has_edge(*e) # e is a 3-tuple (u, v, 'a') + True + + The following syntax are equivalent: + + >>> G.has_edge(0, 1) + True + >>> 1 in G[0] # though this gives :exc:`KeyError` if 0 not in G + True + >>> 0 in G[1] # other order; also gives :exc:`KeyError` if 0 not in G + True + + """ + try: + if key is None: + return v in self._adj[u] + else: + return key in self._adj[u][v] + except KeyError: + return False + + @cached_property + def edges(self): + """Returns an iterator over the edges. + + edges(self, nbunch=None, data=False, keys=False, default=None) + + The MultiEdgeView provides set-like operations on the edge-tuples + as well as edge attribute lookup. When called, it also provides + an EdgeDataView object which allows control of access to edge + attributes (but does not provide set-like operations). + Hence, ``G.edges[u, v, k]['color']`` provides the value of the color + attribute for the edge from ``u`` to ``v`` with key ``k`` while + ``for (u, v, k, c) in G.edges(data='color', keys=True, default="red"):`` + iterates through all the edges yielding the color attribute with + default `'red'` if no color attribute exists. + + Edges are returned as tuples with optional data and keys + in the order (node, neighbor, key, data). If ``keys=True`` is not + provided, the tuples will just be (node, neighbor, data), but + multiple tuples with the same node and neighbor will be generated + when multiple edges exist between two nodes. + + Parameters + ---------- + nbunch : single node, container, or all nodes (default= all nodes) + The view will only report edges from these nodes. + data : string or bool, optional (default=False) + The edge attribute returned in 3-tuple (u, v, ddict[data]). + If True, return edge attribute dict in 3-tuple (u, v, ddict). + If False, return 2-tuple (u, v). + keys : bool, optional (default=False) + If True, return edge keys with each edge, creating (u, v, k) + tuples or (u, v, k, d) tuples if data is also requested. + default : value, optional (default=None) + Value used for edges that don't have the requested attribute. + Only relevant if data is not True or False. + + Returns + ------- + edges : MultiEdgeView + A view of edge attributes, usually it iterates over (u, v) + (u, v, k) or (u, v, k, d) tuples of edges, but can also be + used for attribute lookup as ``edges[u, v, k]['foo']``. + + Notes + ----- + Nodes in nbunch that are not in the graph will be (quietly) ignored. + For directed graphs this returns the out-edges. + + Examples + -------- + >>> G = nx.MultiGraph() + >>> nx.add_path(G, [0, 1, 2]) + >>> key = G.add_edge(2, 3, weight=5) + >>> key2 = G.add_edge(2, 1, weight=2) # multi-edge + >>> [e for e in G.edges()] + [(0, 1), (1, 2), (1, 2), (2, 3)] + >>> G.edges.data() # default data is {} (empty dict) + MultiEdgeDataView([(0, 1, {}), (1, 2, {}), (1, 2, {'weight': 2}), (2, 3, {'weight': 5})]) + >>> G.edges.data("weight", default=1) + MultiEdgeDataView([(0, 1, 1), (1, 2, 1), (1, 2, 2), (2, 3, 5)]) + >>> G.edges(keys=True) # default keys are integers + MultiEdgeView([(0, 1, 0), (1, 2, 0), (1, 2, 1), (2, 3, 0)]) + >>> G.edges.data(keys=True) + MultiEdgeDataView([(0, 1, 0, {}), (1, 2, 0, {}), (1, 2, 1, {'weight': 2}), (2, 3, 0, {'weight': 5})]) + >>> G.edges.data("weight", default=1, keys=True) + MultiEdgeDataView([(0, 1, 0, 1), (1, 2, 0, 1), (1, 2, 1, 2), (2, 3, 0, 5)]) + >>> G.edges([0, 3]) # Note ordering of tuples from listed sources + MultiEdgeDataView([(0, 1), (3, 2)]) + >>> G.edges([0, 3, 2, 1]) # Note ordering of tuples + MultiEdgeDataView([(0, 1), (3, 2), (2, 1), (2, 1)]) + >>> G.edges(0) + MultiEdgeDataView([(0, 1)]) + """ + return MultiEdgeView(self) + + def get_edge_data(self, u, v, key=None, default=None): + """Returns the attribute dictionary associated with edge (u, v, + key). + + If a key is not provided, returns a dictionary mapping edge keys + to attribute dictionaries for each edge between u and v. + + This is identical to `G[u][v][key]` except the default is returned + instead of an exception is the edge doesn't exist. + + Parameters + ---------- + u, v : nodes + + default : any Python object (default=None) + Value to return if the specific edge (u, v, key) is not + found, OR if there are no edges between u and v and no key + is specified. + + key : hashable identifier, optional (default=None) + Return data only for the edge with specified key, as an + attribute dictionary (rather than a dictionary mapping keys + to attribute dictionaries). + + Returns + ------- + edge_dict : dictionary + The edge attribute dictionary, OR a dictionary mapping edge + keys to attribute dictionaries for each of those edges if no + specific key is provided (even if there's only one edge + between u and v). + + Examples + -------- + >>> G = nx.MultiGraph() # or MultiDiGraph + >>> key = G.add_edge(0, 1, key="a", weight=7) + >>> G[0][1]["a"] # key='a' + {'weight': 7} + >>> G.edges[0, 1, "a"] # key='a' + {'weight': 7} + + Warning: we protect the graph data structure by making + `G.edges` and `G[1][2]` read-only dict-like structures. + However, you can assign values to attributes in e.g. + `G.edges[1, 2, 'a']` or `G[1][2]['a']` using an additional + bracket as shown next. You need to specify all edge info + to assign to the edge data associated with an edge. + + >>> G[0][1]["a"]["weight"] = 10 + >>> G.edges[0, 1, "a"]["weight"] = 10 + >>> G[0][1]["a"]["weight"] + 10 + >>> G.edges[1, 0, "a"]["weight"] + 10 + + >>> G = nx.MultiGraph() # or MultiDiGraph + >>> nx.add_path(G, [0, 1, 2, 3]) + >>> G.edges[0, 1, 0]["weight"] = 5 + >>> G.get_edge_data(0, 1) + {0: {'weight': 5}} + >>> e = (0, 1) + >>> G.get_edge_data(*e) # tuple form + {0: {'weight': 5}} + >>> G.get_edge_data(3, 0) # edge not in graph, returns None + >>> G.get_edge_data(3, 0, default=0) # edge not in graph, return default + 0 + >>> G.get_edge_data(1, 0, 0) # specific key gives back + {'weight': 5} + """ + try: + if key is None: + return self._adj[u][v] + else: + return self._adj[u][v][key] + except KeyError: + return default + + @cached_property + def degree(self): + """A DegreeView for the Graph as G.degree or G.degree(). + + The node degree is the number of edges adjacent to the node. + The weighted node degree is the sum of the edge weights for + edges incident to that node. + + This object provides an iterator for (node, degree) as well as + lookup for the degree for a single node. + + Parameters + ---------- + nbunch : single node, container, or all nodes (default= all nodes) + The view will only report edges incident to these nodes. + + weight : string or None, optional (default=None) + The name of an edge attribute that holds the numerical value used + as a weight. If None, then each edge has weight 1. + The degree is the sum of the edge weights adjacent to the node. + + Returns + ------- + MultiDegreeView or int + If multiple nodes are requested (the default), returns a `MultiDegreeView` + mapping nodes to their degree. + If a single node is requested, returns the degree of the node as an integer. + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> nx.add_path(G, [0, 1, 2, 3]) + >>> G.degree(0) # node 0 with degree 1 + 1 + >>> list(G.degree([0, 1])) + [(0, 1), (1, 2)] + + """ + return MultiDegreeView(self) + + def is_multigraph(self): + """Returns True if graph is a multigraph, False otherwise.""" + return True + + def is_directed(self): + """Returns True if graph is directed, False otherwise.""" + return False + + def copy(self, as_view=False): + """Returns a copy of the graph. + + The copy method by default returns an independent shallow copy + of the graph and attributes. That is, if an attribute is a + container, that container is shared by the original an the copy. + Use Python's `copy.deepcopy` for new containers. + + If `as_view` is True then a view is returned instead of a copy. + + Notes + ----- + All copies reproduce the graph structure, but data attributes + may be handled in different ways. There are four types of copies + of a graph that people might want. + + Deepcopy -- A "deepcopy" copies the graph structure as well as + all data attributes and any objects they might contain. + The entire graph object is new so that changes in the copy + do not affect the original object. (see Python's copy.deepcopy) + + Data Reference (Shallow) -- For a shallow copy the graph structure + is copied but the edge, node and graph attribute dicts are + references to those in the original graph. This saves + time and memory but could cause confusion if you change an attribute + in one graph and it changes the attribute in the other. + NetworkX does not provide this level of shallow copy. + + Independent Shallow -- This copy creates new independent attribute + dicts and then does a shallow copy of the attributes. That is, any + attributes that are containers are shared between the new graph + and the original. This is exactly what `dict.copy()` provides. + You can obtain this style copy using: + + >>> G = nx.path_graph(5) + >>> H = G.copy() + >>> H = G.copy(as_view=False) + >>> H = nx.Graph(G) + >>> H = G.__class__(G) + + Fresh Data -- For fresh data, the graph structure is copied while + new empty data attribute dicts are created. The resulting graph + is independent of the original and it has no edge, node or graph + attributes. Fresh copies are not enabled. Instead use: + + >>> H = G.__class__() + >>> H.add_nodes_from(G) + >>> H.add_edges_from(G.edges) + + View -- Inspired by dict-views, graph-views act like read-only + versions of the original graph, providing a copy of the original + structure without requiring any memory for copying the information. + + See the Python copy module for more information on shallow + and deep copies, https://docs.python.org/3/library/copy.html. + + Parameters + ---------- + as_view : bool, optional (default=False) + If True, the returned graph-view provides a read-only view + of the original graph without actually copying any data. + + Returns + ------- + G : Graph + A copy of the graph. + + See Also + -------- + to_directed: return a directed copy of the graph. + + Examples + -------- + >>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> H = G.copy() + + """ + if as_view is True: + return nx.graphviews.generic_graph_view(self) + G = self.__class__() + G.graph.update(self.graph) + G.add_nodes_from((n, d.copy()) for n, d in self._node.items()) + G.add_edges_from( + (u, v, key, datadict.copy()) + for u, nbrs in self._adj.items() + for v, keydict in nbrs.items() + for key, datadict in keydict.items() + ) + return G + + def to_directed(self, as_view=False): + """Returns a directed representation of the graph. + + Returns + ------- + G : MultiDiGraph + A directed graph with the same name, same nodes, and with + each edge (u, v, k, data) replaced by two directed edges + (u, v, k, data) and (v, u, k, data). + + Notes + ----- + This returns a "deepcopy" of the edge, node, and + graph attributes which attempts to completely copy + all of the data and references. + + This is in contrast to the similar D=MultiDiGraph(G) which + returns a shallow copy of the data. + + See the Python copy module for more information on shallow + and deep copies, https://docs.python.org/3/library/copy.html. + + Warning: If you have subclassed MultiGraph to use dict-like objects + in the data structure, those changes do not transfer to the + MultiDiGraph created by this method. + + Examples + -------- + >>> G = nx.MultiGraph() + >>> G.add_edge(0, 1) + 0 + >>> G.add_edge(0, 1) + 1 + >>> H = G.to_directed() + >>> list(H.edges) + [(0, 1, 0), (0, 1, 1), (1, 0, 0), (1, 0, 1)] + + If already directed, return a (deep) copy + + >>> G = nx.MultiDiGraph() + >>> G.add_edge(0, 1) + 0 + >>> H = G.to_directed() + >>> list(H.edges) + [(0, 1, 0)] + """ + graph_class = self.to_directed_class() + if as_view is True: + return nx.graphviews.generic_graph_view(self, graph_class) + # deepcopy when not a view + G = graph_class() + G.graph.update(deepcopy(self.graph)) + G.add_nodes_from((n, deepcopy(d)) for n, d in self._node.items()) + G.add_edges_from( + (u, v, key, deepcopy(datadict)) + for u, nbrs in self.adj.items() + for v, keydict in nbrs.items() + for key, datadict in keydict.items() + ) + return G + + def to_undirected(self, as_view=False): + """Returns an undirected copy of the graph. + + Returns + ------- + G : Graph/MultiGraph + A deepcopy of the graph. + + See Also + -------- + copy, add_edge, add_edges_from + + Notes + ----- + This returns a "deepcopy" of the edge, node, and + graph attributes which attempts to completely copy + all of the data and references. + + This is in contrast to the similar `G = nx.MultiGraph(D)` + which returns a shallow copy of the data. + + See the Python copy module for more information on shallow + and deep copies, https://docs.python.org/3/library/copy.html. + + Warning: If you have subclassed MultiGraph to use dict-like + objects in the data structure, those changes do not transfer + to the MultiGraph created by this method. + + Examples + -------- + >>> G = nx.MultiGraph([(0, 1), (0, 1), (1, 2)]) + >>> H = G.to_directed() + >>> list(H.edges) + [(0, 1, 0), (0, 1, 1), (1, 0, 0), (1, 0, 1), (1, 2, 0), (2, 1, 0)] + >>> G2 = H.to_undirected() + >>> list(G2.edges) + [(0, 1, 0), (0, 1, 1), (1, 2, 0)] + """ + graph_class = self.to_undirected_class() + if as_view is True: + return nx.graphviews.generic_graph_view(self, graph_class) + # deepcopy when not a view + G = graph_class() + G.graph.update(deepcopy(self.graph)) + G.add_nodes_from((n, deepcopy(d)) for n, d in self._node.items()) + G.add_edges_from( + (u, v, key, deepcopy(datadict)) + for u, nbrs in self._adj.items() + for v, keydict in nbrs.items() + for key, datadict in keydict.items() + ) + return G + + def number_of_edges(self, u=None, v=None): + """Returns the number of edges between two nodes. + + Parameters + ---------- + u, v : nodes, optional (Default=all edges) + If u and v are specified, return the number of edges between + u and v. Otherwise return the total number of all edges. + + Returns + ------- + nedges : int + The number of edges in the graph. If nodes `u` and `v` are + specified return the number of edges between those nodes. If + the graph is directed, this only returns the number of edges + from `u` to `v`. + + See Also + -------- + size + + Examples + -------- + For undirected multigraphs, this method counts the total number + of edges in the graph:: + + >>> G = nx.MultiGraph() + >>> G.add_edges_from([(0, 1), (0, 1), (1, 2)]) + [0, 1, 0] + >>> G.number_of_edges() + 3 + + If you specify two nodes, this counts the total number of edges + joining the two nodes:: + + >>> G.number_of_edges(0, 1) + 2 + + For directed multigraphs, this method can count the total number + of directed edges from `u` to `v`:: + + >>> G = nx.MultiDiGraph() + >>> G.add_edges_from([(0, 1), (0, 1), (1, 0)]) + [0, 1, 0] + >>> G.number_of_edges(0, 1) + 2 + >>> G.number_of_edges(1, 0) + 1 + + """ + if u is None: + return self.size() + try: + edgedata = self._adj[u][v] + except KeyError: + return 0 # no such edge + return len(edgedata) diff --git a/.venv/lib/python3.12/site-packages/networkx/classes/reportviews.py b/.venv/lib/python3.12/site-packages/networkx/classes/reportviews.py new file mode 100644 index 0000000..789662d --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/classes/reportviews.py @@ -0,0 +1,1447 @@ +""" +View Classes provide node, edge and degree "views" of a graph. + +Views for nodes, edges and degree are provided for all base graph classes. +A view means a read-only object that is quick to create, automatically +updated when the graph changes, and provides basic access like `n in V`, +`for n in V`, `V[n]` and sometimes set operations. + +The views are read-only iterable containers that are updated as the +graph is updated. As with dicts, the graph should not be updated +while iterating through the view. Views can be iterated multiple times. + +Edge and Node views also allow data attribute lookup. +The resulting attribute dict is writable as `G.edges[3, 4]['color']='red'` +Degree views allow lookup of degree values for single nodes. +Weighted degree is supported with the `weight` argument. + +NodeView +======== + + `V = G.nodes` (or `V = G.nodes()`) allows `len(V)`, `n in V`, set + operations e.g. "G.nodes & H.nodes", and `dd = G.nodes[n]`, where + `dd` is the node data dict. Iteration is over the nodes by default. + +NodeDataView +============ + + To iterate over (node, data) pairs, use arguments to `G.nodes()` + to create a DataView e.g. `DV = G.nodes(data='color', default='red')`. + The DataView iterates as `for n, color in DV` and allows + `(n, 'red') in DV`. Using `DV = G.nodes(data=True)`, the DataViews + use the full datadict in writeable form also allowing contain testing as + `(n, {'color': 'red'}) in VD`. DataViews allow set operations when + data attributes are hashable. + +DegreeView +========== + + `V = G.degree` allows iteration over (node, degree) pairs as well + as lookup: `deg=V[n]`. There are many flavors of DegreeView + for In/Out/Directed/Multi. For Directed Graphs, `G.degree` + counts both in and out going edges. `G.out_degree` and + `G.in_degree` count only specific directions. + Weighted degree using edge data attributes is provide via + `V = G.degree(weight='attr_name')` where any string with the + attribute name can be used. `weight=None` is the default. + No set operations are implemented for degrees, use NodeView. + + The argument `nbunch` restricts iteration to nodes in nbunch. + The DegreeView can still lookup any node even if nbunch is specified. + +EdgeView +======== + + `V = G.edges` or `V = G.edges()` allows iteration over edges as well as + `e in V`, set operations and edge data lookup `dd = G.edges[2, 3]`. + Iteration is over 2-tuples `(u, v)` for Graph/DiGraph. For multigraphs + edges 3-tuples `(u, v, key)` are the default but 2-tuples can be obtained + via `V = G.edges(keys=False)`. + + Set operations for directed graphs treat the edges as a set of 2-tuples. + For undirected graphs, 2-tuples are not a unique representation of edges. + So long as the set being compared to contains unique representations + of its edges, the set operations will act as expected. If the other + set contains both `(0, 1)` and `(1, 0)` however, the result of set + operations may contain both representations of the same edge. + +EdgeDataView +============ + + Edge data can be reported using an EdgeDataView typically created + by calling an EdgeView: `DV = G.edges(data='weight', default=1)`. + The EdgeDataView allows iteration over edge tuples, membership checking + but no set operations. + + Iteration depends on `data` and `default` and for multigraph `keys` + If `data is False` (the default) then iterate over 2-tuples `(u, v)`. + If `data is True` iterate over 3-tuples `(u, v, datadict)`. + Otherwise iterate over `(u, v, datadict.get(data, default))`. + For Multigraphs, if `keys is True`, replace `u, v` with `u, v, key` + to create 3-tuples and 4-tuples. + + The argument `nbunch` restricts edges to those incident to nodes in nbunch. +""" + +from abc import ABC +from collections.abc import Mapping, Set + +import networkx as nx + +__all__ = [ + "NodeView", + "NodeDataView", + "EdgeView", + "OutEdgeView", + "InEdgeView", + "EdgeDataView", + "OutEdgeDataView", + "InEdgeDataView", + "MultiEdgeView", + "OutMultiEdgeView", + "InMultiEdgeView", + "MultiEdgeDataView", + "OutMultiEdgeDataView", + "InMultiEdgeDataView", + "DegreeView", + "DiDegreeView", + "InDegreeView", + "OutDegreeView", + "MultiDegreeView", + "DiMultiDegreeView", + "InMultiDegreeView", + "OutMultiDegreeView", +] + + +# NodeViews +class NodeView(Mapping, Set): + """A NodeView class to act as G.nodes for a NetworkX Graph + + Set operations act on the nodes without considering data. + Iteration is over nodes. Node data can be looked up like a dict. + Use NodeDataView to iterate over node data or to specify a data + attribute for lookup. NodeDataView is created by calling the NodeView. + + Parameters + ---------- + graph : NetworkX graph-like class + + Examples + -------- + >>> G = nx.path_graph(3) + >>> NV = G.nodes() + >>> 2 in NV + True + >>> for n in NV: + ... print(n) + 0 + 1 + 2 + >>> assert NV & {1, 2, 3} == {1, 2} + + >>> G.add_node(2, color="blue") + >>> NV[2] + {'color': 'blue'} + >>> G.add_node(8, color="red") + >>> NDV = G.nodes(data=True) + >>> (2, NV[2]) in NDV + True + >>> for n, dd in NDV: + ... print((n, dd.get("color", "aqua"))) + (0, 'aqua') + (1, 'aqua') + (2, 'blue') + (8, 'red') + >>> NDV[2] == NV[2] + True + + >>> NVdata = G.nodes(data="color", default="aqua") + >>> (2, NVdata[2]) in NVdata + True + >>> for n, dd in NVdata: + ... print((n, dd)) + (0, 'aqua') + (1, 'aqua') + (2, 'blue') + (8, 'red') + >>> NVdata[2] == NV[2] # NVdata gets 'color', NV gets datadict + False + """ + + __slots__ = ("_nodes",) + + def __getstate__(self): + return {"_nodes": self._nodes} + + def __setstate__(self, state): + self._nodes = state["_nodes"] + + def __init__(self, graph): + self._nodes = graph._node + + # Mapping methods + def __len__(self): + return len(self._nodes) + + def __iter__(self): + return iter(self._nodes) + + def __getitem__(self, n): + if isinstance(n, slice): + raise nx.NetworkXError( + f"{type(self).__name__} does not support slicing, " + f"try list(G.nodes)[{n.start}:{n.stop}:{n.step}]" + ) + return self._nodes[n] + + # Set methods + def __contains__(self, n): + return n in self._nodes + + @classmethod + def _from_iterable(cls, it): + return set(it) + + # DataView method + def __call__(self, data=False, default=None): + if data is False: + return self + return NodeDataView(self._nodes, data, default) + + def data(self, data=True, default=None): + """ + Return a read-only view of node data. + + Parameters + ---------- + data : bool or node data key, default=True + If ``data=True`` (the default), return a `NodeDataView` object that + maps each node to *all* of its attributes. `data` may also be an + arbitrary key, in which case the `NodeDataView` maps each node to + the value for the keyed attribute. In this case, if a node does + not have the `data` attribute, the `default` value is used. + default : object, default=None + The value used when a node does not have a specific attribute. + + Returns + ------- + NodeDataView + The layout of the returned NodeDataView depends on the value of the + `data` parameter. + + Notes + ----- + If ``data=False``, returns a `NodeView` object without data. + + See Also + -------- + NodeDataView + + Examples + -------- + >>> G = nx.Graph() + >>> G.add_nodes_from( + ... [ + ... (0, {"color": "red", "weight": 10}), + ... (1, {"color": "blue"}), + ... (2, {"color": "yellow", "weight": 2}), + ... ] + ... ) + + Accessing node data with ``data=True`` (the default) returns a + NodeDataView mapping each node to all of its attributes: + + >>> G.nodes.data() + NodeDataView({0: {'color': 'red', 'weight': 10}, 1: {'color': 'blue'}, 2: {'color': 'yellow', 'weight': 2}}) + + If `data` represents a key in the node attribute dict, a NodeDataView mapping + the nodes to the value for that specific key is returned: + + >>> G.nodes.data("color") + NodeDataView({0: 'red', 1: 'blue', 2: 'yellow'}, data='color') + + If a specific key is not found in an attribute dict, the value specified + by `default` is returned: + + >>> G.nodes.data("weight", default=-999) + NodeDataView({0: 10, 1: -999, 2: 2}, data='weight') + + Note that there is no check that the `data` key is in any of the + node attribute dictionaries: + + >>> G.nodes.data("height") + NodeDataView({0: None, 1: None, 2: None}, data='height') + """ + if data is False: + return self + return NodeDataView(self._nodes, data, default) + + def __str__(self): + return str(list(self)) + + def __repr__(self): + return f"{self.__class__.__name__}({tuple(self)})" + + +class NodeDataView(Set): + """A DataView class for nodes of a NetworkX Graph + + The main use for this class is to iterate through node-data pairs. + The data can be the entire data-dictionary for each node, or it + can be a specific attribute (with default) for each node. + Set operations are enabled with NodeDataView, but don't work in + cases where the data is not hashable. Use with caution. + Typically, set operations on nodes use NodeView, not NodeDataView. + That is, they use `G.nodes` instead of `G.nodes(data='foo')`. + + Parameters + ========== + graph : NetworkX graph-like class + data : bool or string (default=False) + default : object (default=None) + """ + + __slots__ = ("_nodes", "_data", "_default") + + def __getstate__(self): + return {"_nodes": self._nodes, "_data": self._data, "_default": self._default} + + def __setstate__(self, state): + self._nodes = state["_nodes"] + self._data = state["_data"] + self._default = state["_default"] + + def __init__(self, nodedict, data=False, default=None): + self._nodes = nodedict + self._data = data + self._default = default + + @classmethod + def _from_iterable(cls, it): + try: + return set(it) + except TypeError as err: + if "unhashable" in str(err): + msg = " : Could be b/c data=True or your values are unhashable" + raise TypeError(str(err) + msg) from err + raise + + def __len__(self): + return len(self._nodes) + + def __iter__(self): + data = self._data + if data is False: + return iter(self._nodes) + if data is True: + return iter(self._nodes.items()) + return ( + (n, dd[data] if data in dd else self._default) + for n, dd in self._nodes.items() + ) + + def __contains__(self, n): + try: + node_in = n in self._nodes + except TypeError: + n, d = n + return n in self._nodes and self[n] == d + if node_in is True: + return node_in + try: + n, d = n + except (TypeError, ValueError): + return False + return n in self._nodes and self[n] == d + + def __getitem__(self, n): + if isinstance(n, slice): + raise nx.NetworkXError( + f"{type(self).__name__} does not support slicing, " + f"try list(G.nodes.data())[{n.start}:{n.stop}:{n.step}]" + ) + ddict = self._nodes[n] + data = self._data + if data is False or data is True: + return ddict + return ddict[data] if data in ddict else self._default + + def __str__(self): + return str(list(self)) + + def __repr__(self): + name = self.__class__.__name__ + if self._data is False: + return f"{name}({tuple(self)})" + if self._data is True: + return f"{name}({dict(self)})" + return f"{name}({dict(self)}, data={self._data!r})" + + +# DegreeViews +class DiDegreeView: + """A View class for degree of nodes in a NetworkX Graph + + The functionality is like dict.items() with (node, degree) pairs. + Additional functionality includes read-only lookup of node degree, + and calling with optional features nbunch (for only a subset of nodes) + and weight (use edge weights to compute degree). + + Parameters + ========== + graph : NetworkX graph-like class + nbunch : node, container of nodes, or None meaning all nodes (default=None) + weight : bool or string (default=None) + + Notes + ----- + DegreeView can still lookup any node even if nbunch is specified. + + Examples + -------- + >>> G = nx.path_graph(3) + >>> DV = G.degree() + >>> assert DV[2] == 1 + >>> assert sum(deg for n, deg in DV) == 4 + + >>> DVweight = G.degree(weight="span") + >>> G.add_edge(1, 2, span=34) + >>> DVweight[2] + 34 + >>> DVweight[0] # default edge weight is 1 + 1 + >>> sum(span for n, span in DVweight) # sum weighted degrees + 70 + + >>> DVnbunch = G.degree(nbunch=(1, 2)) + >>> assert len(list(DVnbunch)) == 2 # iteration over nbunch only + """ + + def __init__(self, G, nbunch=None, weight=None): + self._graph = G + self._succ = G._succ if hasattr(G, "_succ") else G._adj + self._pred = G._pred if hasattr(G, "_pred") else G._adj + self._nodes = self._succ if nbunch is None else list(G.nbunch_iter(nbunch)) + self._weight = weight + + def __call__(self, nbunch=None, weight=None): + if nbunch is None: + if weight == self._weight: + return self + return self.__class__(self._graph, None, weight) + try: + if nbunch in self._nodes: + if weight == self._weight: + return self[nbunch] + return self.__class__(self._graph, None, weight)[nbunch] + except TypeError: + pass + return self.__class__(self._graph, nbunch, weight) + + def __getitem__(self, n): + weight = self._weight + succs = self._succ[n] + preds = self._pred[n] + if weight is None: + return len(succs) + len(preds) + return sum(dd.get(weight, 1) for dd in succs.values()) + sum( + dd.get(weight, 1) for dd in preds.values() + ) + + def __iter__(self): + weight = self._weight + if weight is None: + for n in self._nodes: + succs = self._succ[n] + preds = self._pred[n] + yield (n, len(succs) + len(preds)) + else: + for n in self._nodes: + succs = self._succ[n] + preds = self._pred[n] + deg = sum(dd.get(weight, 1) for dd in succs.values()) + sum( + dd.get(weight, 1) for dd in preds.values() + ) + yield (n, deg) + + def __len__(self): + return len(self._nodes) + + def __str__(self): + return str(list(self)) + + def __repr__(self): + return f"{self.__class__.__name__}({dict(self)})" + + +class DegreeView(DiDegreeView): + """A DegreeView class to act as G.degree for a NetworkX Graph + + Typical usage focuses on iteration over `(node, degree)` pairs. + The degree is by default the number of edges incident to the node. + Optional argument `weight` enables weighted degree using the edge + attribute named in the `weight` argument. Reporting and iteration + can also be restricted to a subset of nodes using `nbunch`. + + Additional functionality include node lookup so that `G.degree[n]` + reported the (possibly weighted) degree of node `n`. Calling the + view creates a view with different arguments `nbunch` or `weight`. + + Parameters + ========== + graph : NetworkX graph-like class + nbunch : node, container of nodes, or None meaning all nodes (default=None) + weight : string or None (default=None) + + Notes + ----- + DegreeView can still lookup any node even if nbunch is specified. + + Examples + -------- + >>> G = nx.path_graph(3) + >>> DV = G.degree() + >>> assert DV[2] == 1 + >>> assert G.degree[2] == 1 + >>> assert sum(deg for n, deg in DV) == 4 + + >>> DVweight = G.degree(weight="span") + >>> G.add_edge(1, 2, span=34) + >>> DVweight[2] + 34 + >>> DVweight[0] # default edge weight is 1 + 1 + >>> sum(span for n, span in DVweight) # sum weighted degrees + 70 + + >>> DVnbunch = G.degree(nbunch=(1, 2)) + >>> assert len(list(DVnbunch)) == 2 # iteration over nbunch only + """ + + def __getitem__(self, n): + weight = self._weight + nbrs = self._succ[n] + if weight is None: + return len(nbrs) + (n in nbrs) + return sum(dd.get(weight, 1) for dd in nbrs.values()) + ( + n in nbrs and nbrs[n].get(weight, 1) + ) + + def __iter__(self): + weight = self._weight + if weight is None: + for n in self._nodes: + nbrs = self._succ[n] + yield (n, len(nbrs) + (n in nbrs)) + else: + for n in self._nodes: + nbrs = self._succ[n] + deg = sum(dd.get(weight, 1) for dd in nbrs.values()) + ( + n in nbrs and nbrs[n].get(weight, 1) + ) + yield (n, deg) + + +class OutDegreeView(DiDegreeView): + """A DegreeView class to report out_degree for a DiGraph; See DegreeView""" + + def __getitem__(self, n): + weight = self._weight + nbrs = self._succ[n] + if self._weight is None: + return len(nbrs) + return sum(dd.get(self._weight, 1) for dd in nbrs.values()) + + def __iter__(self): + weight = self._weight + if weight is None: + for n in self._nodes: + succs = self._succ[n] + yield (n, len(succs)) + else: + for n in self._nodes: + succs = self._succ[n] + deg = sum(dd.get(weight, 1) for dd in succs.values()) + yield (n, deg) + + +class InDegreeView(DiDegreeView): + """A DegreeView class to report in_degree for a DiGraph; See DegreeView""" + + def __getitem__(self, n): + weight = self._weight + nbrs = self._pred[n] + if weight is None: + return len(nbrs) + return sum(dd.get(weight, 1) for dd in nbrs.values()) + + def __iter__(self): + weight = self._weight + if weight is None: + for n in self._nodes: + preds = self._pred[n] + yield (n, len(preds)) + else: + for n in self._nodes: + preds = self._pred[n] + deg = sum(dd.get(weight, 1) for dd in preds.values()) + yield (n, deg) + + +class MultiDegreeView(DiDegreeView): + """A DegreeView class for undirected multigraphs; See DegreeView""" + + def __getitem__(self, n): + weight = self._weight + nbrs = self._succ[n] + if weight is None: + return sum(len(keys) for keys in nbrs.values()) + ( + n in nbrs and len(nbrs[n]) + ) + # edge weighted graph - degree is sum of nbr edge weights + deg = sum( + d.get(weight, 1) for key_dict in nbrs.values() for d in key_dict.values() + ) + if n in nbrs: + deg += sum(d.get(weight, 1) for d in nbrs[n].values()) + return deg + + def __iter__(self): + weight = self._weight + if weight is None: + for n in self._nodes: + nbrs = self._succ[n] + deg = sum(len(keys) for keys in nbrs.values()) + ( + n in nbrs and len(nbrs[n]) + ) + yield (n, deg) + else: + for n in self._nodes: + nbrs = self._succ[n] + deg = sum( + d.get(weight, 1) + for key_dict in nbrs.values() + for d in key_dict.values() + ) + if n in nbrs: + deg += sum(d.get(weight, 1) for d in nbrs[n].values()) + yield (n, deg) + + +class DiMultiDegreeView(DiDegreeView): + """A DegreeView class for MultiDiGraph; See DegreeView""" + + def __getitem__(self, n): + weight = self._weight + succs = self._succ[n] + preds = self._pred[n] + if weight is None: + return sum(len(keys) for keys in succs.values()) + sum( + len(keys) for keys in preds.values() + ) + # edge weighted graph - degree is sum of nbr edge weights + deg = sum( + d.get(weight, 1) for key_dict in succs.values() for d in key_dict.values() + ) + sum( + d.get(weight, 1) for key_dict in preds.values() for d in key_dict.values() + ) + return deg + + def __iter__(self): + weight = self._weight + if weight is None: + for n in self._nodes: + succs = self._succ[n] + preds = self._pred[n] + deg = sum(len(keys) for keys in succs.values()) + sum( + len(keys) for keys in preds.values() + ) + yield (n, deg) + else: + for n in self._nodes: + succs = self._succ[n] + preds = self._pred[n] + deg = sum( + d.get(weight, 1) + for key_dict in succs.values() + for d in key_dict.values() + ) + sum( + d.get(weight, 1) + for key_dict in preds.values() + for d in key_dict.values() + ) + yield (n, deg) + + +class InMultiDegreeView(DiDegreeView): + """A DegreeView class for inward degree of MultiDiGraph; See DegreeView""" + + def __getitem__(self, n): + weight = self._weight + nbrs = self._pred[n] + if weight is None: + return sum(len(data) for data in nbrs.values()) + # edge weighted graph - degree is sum of nbr edge weights + return sum( + d.get(weight, 1) for key_dict in nbrs.values() for d in key_dict.values() + ) + + def __iter__(self): + weight = self._weight + if weight is None: + for n in self._nodes: + nbrs = self._pred[n] + deg = sum(len(data) for data in nbrs.values()) + yield (n, deg) + else: + for n in self._nodes: + nbrs = self._pred[n] + deg = sum( + d.get(weight, 1) + for key_dict in nbrs.values() + for d in key_dict.values() + ) + yield (n, deg) + + +class OutMultiDegreeView(DiDegreeView): + """A DegreeView class for outward degree of MultiDiGraph; See DegreeView""" + + def __getitem__(self, n): + weight = self._weight + nbrs = self._succ[n] + if weight is None: + return sum(len(data) for data in nbrs.values()) + # edge weighted graph - degree is sum of nbr edge weights + return sum( + d.get(weight, 1) for key_dict in nbrs.values() for d in key_dict.values() + ) + + def __iter__(self): + weight = self._weight + if weight is None: + for n in self._nodes: + nbrs = self._succ[n] + deg = sum(len(data) for data in nbrs.values()) + yield (n, deg) + else: + for n in self._nodes: + nbrs = self._succ[n] + deg = sum( + d.get(weight, 1) + for key_dict in nbrs.values() + for d in key_dict.values() + ) + yield (n, deg) + + +# A base class for all edge views. Ensures all edge view and edge data view +# objects/classes are captured by `isinstance(obj, EdgeViewABC)` and +# `issubclass(cls, EdgeViewABC)` respectively +class EdgeViewABC(ABC): + pass + + +# EdgeDataViews +class OutEdgeDataView(EdgeViewABC): + """EdgeDataView for outward edges of DiGraph; See EdgeDataView""" + + __slots__ = ( + "_viewer", + "_nbunch", + "_data", + "_default", + "_adjdict", + "_nodes_nbrs", + "_report", + ) + + def __getstate__(self): + return { + "viewer": self._viewer, + "nbunch": self._nbunch, + "data": self._data, + "default": self._default, + } + + def __setstate__(self, state): + self.__init__(**state) + + def __init__(self, viewer, nbunch=None, data=False, *, default=None): + self._viewer = viewer + adjdict = self._adjdict = viewer._adjdict + if nbunch is None: + self._nodes_nbrs = adjdict.items + else: + # dict retains order of nodes but acts like a set + nbunch = dict.fromkeys(viewer._graph.nbunch_iter(nbunch)) + self._nodes_nbrs = lambda: [(n, adjdict[n]) for n in nbunch] + self._nbunch = nbunch + self._data = data + self._default = default + # Set _report based on data and default + if data is True: + self._report = lambda n, nbr, dd: (n, nbr, dd) + elif data is False: + self._report = lambda n, nbr, dd: (n, nbr) + else: # data is attribute name + self._report = ( + lambda n, nbr, dd: (n, nbr, dd[data]) + if data in dd + else (n, nbr, default) + ) + + def __len__(self): + return sum(len(nbrs) for n, nbrs in self._nodes_nbrs()) + + def __iter__(self): + return ( + self._report(n, nbr, dd) + for n, nbrs in self._nodes_nbrs() + for nbr, dd in nbrs.items() + ) + + def __contains__(self, e): + u, v = e[:2] + if self._nbunch is not None and u not in self._nbunch: + return False # this edge doesn't start in nbunch + try: + ddict = self._adjdict[u][v] + except KeyError: + return False + return e == self._report(u, v, ddict) + + def __str__(self): + return str(list(self)) + + def __repr__(self): + return f"{self.__class__.__name__}({list(self)})" + + +class EdgeDataView(OutEdgeDataView): + """A EdgeDataView class for edges of Graph + + This view is primarily used to iterate over the edges reporting + edges as node-tuples with edge data optionally reported. The + argument `nbunch` allows restriction to edges incident to nodes + in that container/singleton. The default (nbunch=None) + reports all edges. The arguments `data` and `default` control + what edge data is reported. The default `data is False` reports + only node-tuples for each edge. If `data is True` the entire edge + data dict is returned. Otherwise `data` is assumed to hold the name + of the edge attribute to report with default `default` if that + edge attribute is not present. + + Parameters + ---------- + nbunch : container of nodes, node or None (default None) + data : False, True or string (default False) + default : default value (default None) + + Examples + -------- + >>> G = nx.path_graph(3) + >>> G.add_edge(1, 2, foo="bar") + >>> list(G.edges(data="foo", default="biz")) + [(0, 1, 'biz'), (1, 2, 'bar')] + >>> assert (0, 1, "biz") in G.edges(data="foo", default="biz") + """ + + __slots__ = () + + def __len__(self): + return sum(1 for e in self) + + def __iter__(self): + seen = {} + for n, nbrs in self._nodes_nbrs(): + for nbr, dd in nbrs.items(): + if nbr not in seen: + yield self._report(n, nbr, dd) + seen[n] = 1 + del seen + + def __contains__(self, e): + u, v = e[:2] + if self._nbunch is not None and u not in self._nbunch and v not in self._nbunch: + return False # this edge doesn't start and it doesn't end in nbunch + try: + ddict = self._adjdict[u][v] + except KeyError: + return False + return e == self._report(u, v, ddict) + + +class InEdgeDataView(OutEdgeDataView): + """An EdgeDataView class for outward edges of DiGraph; See EdgeDataView""" + + __slots__ = () + + def __iter__(self): + return ( + self._report(nbr, n, dd) + for n, nbrs in self._nodes_nbrs() + for nbr, dd in nbrs.items() + ) + + def __contains__(self, e): + u, v = e[:2] + if self._nbunch is not None and v not in self._nbunch: + return False # this edge doesn't end in nbunch + try: + ddict = self._adjdict[v][u] + except KeyError: + return False + return e == self._report(u, v, ddict) + + +class OutMultiEdgeDataView(OutEdgeDataView): + """An EdgeDataView for outward edges of MultiDiGraph; See EdgeDataView""" + + __slots__ = ("keys",) + + def __getstate__(self): + return { + "viewer": self._viewer, + "nbunch": self._nbunch, + "keys": self.keys, + "data": self._data, + "default": self._default, + } + + def __setstate__(self, state): + self.__init__(**state) + + def __init__(self, viewer, nbunch=None, data=False, *, default=None, keys=False): + self._viewer = viewer + adjdict = self._adjdict = viewer._adjdict + self.keys = keys + if nbunch is None: + self._nodes_nbrs = adjdict.items + else: + # dict retains order of nodes but acts like a set + nbunch = dict.fromkeys(viewer._graph.nbunch_iter(nbunch)) + self._nodes_nbrs = lambda: [(n, adjdict[n]) for n in nbunch] + self._nbunch = nbunch + self._data = data + self._default = default + # Set _report based on data and default + if data is True: + if keys is True: + self._report = lambda n, nbr, k, dd: (n, nbr, k, dd) + else: + self._report = lambda n, nbr, k, dd: (n, nbr, dd) + elif data is False: + if keys is True: + self._report = lambda n, nbr, k, dd: (n, nbr, k) + else: + self._report = lambda n, nbr, k, dd: (n, nbr) + else: # data is attribute name + if keys is True: + self._report = ( + lambda n, nbr, k, dd: (n, nbr, k, dd[data]) + if data in dd + else (n, nbr, k, default) + ) + else: + self._report = ( + lambda n, nbr, k, dd: (n, nbr, dd[data]) + if data in dd + else (n, nbr, default) + ) + + def __len__(self): + return sum(1 for e in self) + + def __iter__(self): + return ( + self._report(n, nbr, k, dd) + for n, nbrs in self._nodes_nbrs() + for nbr, kd in nbrs.items() + for k, dd in kd.items() + ) + + def __contains__(self, e): + u, v = e[:2] + if self._nbunch is not None and u not in self._nbunch: + return False # this edge doesn't start in nbunch + try: + kdict = self._adjdict[u][v] + except KeyError: + return False + if self.keys is True: + k = e[2] + try: + dd = kdict[k] + except KeyError: + return False + return e == self._report(u, v, k, dd) + return any(e == self._report(u, v, k, dd) for k, dd in kdict.items()) + + +class MultiEdgeDataView(OutMultiEdgeDataView): + """An EdgeDataView class for edges of MultiGraph; See EdgeDataView""" + + __slots__ = () + + def __iter__(self): + seen = {} + for n, nbrs in self._nodes_nbrs(): + for nbr, kd in nbrs.items(): + if nbr not in seen: + for k, dd in kd.items(): + yield self._report(n, nbr, k, dd) + seen[n] = 1 + del seen + + def __contains__(self, e): + u, v = e[:2] + if self._nbunch is not None and u not in self._nbunch and v not in self._nbunch: + return False # this edge doesn't start and doesn't end in nbunch + try: + kdict = self._adjdict[u][v] + except KeyError: + try: + kdict = self._adjdict[v][u] + except KeyError: + return False + if self.keys is True: + k = e[2] + try: + dd = kdict[k] + except KeyError: + return False + return e == self._report(u, v, k, dd) + return any(e == self._report(u, v, k, dd) for k, dd in kdict.items()) + + +class InMultiEdgeDataView(OutMultiEdgeDataView): + """An EdgeDataView for inward edges of MultiDiGraph; See EdgeDataView""" + + __slots__ = () + + def __iter__(self): + return ( + self._report(nbr, n, k, dd) + for n, nbrs in self._nodes_nbrs() + for nbr, kd in nbrs.items() + for k, dd in kd.items() + ) + + def __contains__(self, e): + u, v = e[:2] + if self._nbunch is not None and v not in self._nbunch: + return False # this edge doesn't end in nbunch + try: + kdict = self._adjdict[v][u] + except KeyError: + return False + if self.keys is True: + k = e[2] + dd = kdict[k] + return e == self._report(u, v, k, dd) + return any(e == self._report(u, v, k, dd) for k, dd in kdict.items()) + + +# EdgeViews have set operations and no data reported +class OutEdgeView(Set, Mapping, EdgeViewABC): + """A EdgeView class for outward edges of a DiGraph""" + + __slots__ = ("_adjdict", "_graph", "_nodes_nbrs") + + def __getstate__(self): + return {"_graph": self._graph, "_adjdict": self._adjdict} + + def __setstate__(self, state): + self._graph = state["_graph"] + self._adjdict = state["_adjdict"] + self._nodes_nbrs = self._adjdict.items + + @classmethod + def _from_iterable(cls, it): + return set(it) + + dataview = OutEdgeDataView + + def __init__(self, G): + self._graph = G + self._adjdict = G._succ if hasattr(G, "succ") else G._adj + self._nodes_nbrs = self._adjdict.items + + # Set methods + def __len__(self): + return sum(len(nbrs) for n, nbrs in self._nodes_nbrs()) + + def __iter__(self): + for n, nbrs in self._nodes_nbrs(): + for nbr in nbrs: + yield (n, nbr) + + def __contains__(self, e): + try: + u, v = e + return v in self._adjdict[u] + except KeyError: + return False + + # Mapping Methods + def __getitem__(self, e): + if isinstance(e, slice): + raise nx.NetworkXError( + f"{type(self).__name__} does not support slicing, " + f"try list(G.edges)[{e.start}:{e.stop}:{e.step}]" + ) + u, v = e + try: + return self._adjdict[u][v] + except KeyError as ex: # Customize msg to indicate exception origin + raise KeyError(f"The edge {e} is not in the graph.") + + # EdgeDataView methods + def __call__(self, nbunch=None, data=False, *, default=None): + if nbunch is None and data is False: + return self + return self.dataview(self, nbunch, data, default=default) + + def data(self, data=True, default=None, nbunch=None): + """ + Return a read-only view of edge data. + + Parameters + ---------- + data : bool or edge attribute key + If ``data=True``, then the data view maps each edge to a dictionary + containing all of its attributes. If `data` is a key in the edge + dictionary, then the data view maps each edge to its value for + the keyed attribute. In this case, if the edge doesn't have the + attribute, the `default` value is returned. + default : object, default=None + The value used when an edge does not have a specific attribute + nbunch : container of nodes, optional (default=None) + Allows restriction to edges only involving certain nodes. All edges + are considered by default. + + Returns + ------- + dataview + Returns an `EdgeDataView` for undirected Graphs, `OutEdgeDataView` + for DiGraphs, `MultiEdgeDataView` for MultiGraphs and + `OutMultiEdgeDataView` for MultiDiGraphs. + + Notes + ----- + If ``data=False``, returns an `EdgeView` without any edge data. + + See Also + -------- + EdgeDataView + OutEdgeDataView + MultiEdgeDataView + OutMultiEdgeDataView + + Examples + -------- + >>> G = nx.Graph() + >>> G.add_edges_from( + ... [ + ... (0, 1, {"dist": 3, "capacity": 20}), + ... (1, 2, {"dist": 4}), + ... (2, 0, {"dist": 5}), + ... ] + ... ) + + Accessing edge data with ``data=True`` (the default) returns an + edge data view object listing each edge with all of its attributes: + + >>> G.edges.data() + EdgeDataView([(0, 1, {'dist': 3, 'capacity': 20}), (0, 2, {'dist': 5}), (1, 2, {'dist': 4})]) + + If `data` represents a key in the edge attribute dict, a dataview listing + each edge with its value for that specific key is returned: + + >>> G.edges.data("dist") + EdgeDataView([(0, 1, 3), (0, 2, 5), (1, 2, 4)]) + + `nbunch` can be used to limit the edges: + + >>> G.edges.data("dist", nbunch=[0]) + EdgeDataView([(0, 1, 3), (0, 2, 5)]) + + If a specific key is not found in an edge attribute dict, the value + specified by `default` is used: + + >>> G.edges.data("capacity") + EdgeDataView([(0, 1, 20), (0, 2, None), (1, 2, None)]) + + Note that there is no check that the `data` key is present in any of + the edge attribute dictionaries: + + >>> G.edges.data("speed") + EdgeDataView([(0, 1, None), (0, 2, None), (1, 2, None)]) + """ + if nbunch is None and data is False: + return self + return self.dataview(self, nbunch, data, default=default) + + # String Methods + def __str__(self): + return str(list(self)) + + def __repr__(self): + return f"{self.__class__.__name__}({list(self)})" + + +class EdgeView(OutEdgeView): + """A EdgeView class for edges of a Graph + + This densely packed View allows iteration over edges, data lookup + like a dict and set operations on edges represented by node-tuples. + In addition, edge data can be controlled by calling this object + possibly creating an EdgeDataView. Typically edges are iterated over + and reported as `(u, v)` node tuples or `(u, v, key)` node/key tuples + for multigraphs. Those edge representations can also be using to + lookup the data dict for any edge. Set operations also are available + where those tuples are the elements of the set. + Calling this object with optional arguments `data`, `default` and `keys` + controls the form of the tuple (see EdgeDataView). Optional argument + `nbunch` allows restriction to edges only involving certain nodes. + + If `data is False` (the default) then iterate over 2-tuples `(u, v)`. + If `data is True` iterate over 3-tuples `(u, v, datadict)`. + Otherwise iterate over `(u, v, datadict.get(data, default))`. + For Multigraphs, if `keys is True`, replace `u, v` with `u, v, key` above. + + Parameters + ========== + graph : NetworkX graph-like class + nbunch : (default= all nodes in graph) only report edges with these nodes + keys : (only for MultiGraph. default=False) report edge key in tuple + data : bool or string (default=False) see above + default : object (default=None) + + Examples + ======== + >>> G = nx.path_graph(4) + >>> EV = G.edges() + >>> (2, 3) in EV + True + >>> for u, v in EV: + ... print((u, v)) + (0, 1) + (1, 2) + (2, 3) + >>> assert EV & {(1, 2), (3, 4)} == {(1, 2)} + + >>> EVdata = G.edges(data="color", default="aqua") + >>> G.add_edge(2, 3, color="blue") + >>> assert (2, 3, "blue") in EVdata + >>> for u, v, c in EVdata: + ... print(f"({u}, {v}) has color: {c}") + (0, 1) has color: aqua + (1, 2) has color: aqua + (2, 3) has color: blue + + >>> EVnbunch = G.edges(nbunch=2) + >>> assert (2, 3) in EVnbunch + >>> assert (0, 1) not in EVnbunch + >>> for u, v in EVnbunch: + ... assert u == 2 or v == 2 + + >>> MG = nx.path_graph(4, create_using=nx.MultiGraph) + >>> EVmulti = MG.edges(keys=True) + >>> (2, 3, 0) in EVmulti + True + >>> (2, 3) in EVmulti # 2-tuples work even when keys is True + True + >>> key = MG.add_edge(2, 3) + >>> for u, v, k in EVmulti: + ... print((u, v, k)) + (0, 1, 0) + (1, 2, 0) + (2, 3, 0) + (2, 3, 1) + """ + + __slots__ = () + + dataview = EdgeDataView + + def __len__(self): + num_nbrs = (len(nbrs) + (n in nbrs) for n, nbrs in self._nodes_nbrs()) + return sum(num_nbrs) // 2 + + def __iter__(self): + seen = {} + for n, nbrs in self._nodes_nbrs(): + for nbr in list(nbrs): + if nbr not in seen: + yield (n, nbr) + seen[n] = 1 + del seen + + def __contains__(self, e): + try: + u, v = e[:2] + return v in self._adjdict[u] or u in self._adjdict[v] + except (KeyError, ValueError): + return False + + +class InEdgeView(OutEdgeView): + """A EdgeView class for inward edges of a DiGraph""" + + __slots__ = () + + def __setstate__(self, state): + self._graph = state["_graph"] + self._adjdict = state["_adjdict"] + self._nodes_nbrs = self._adjdict.items + + dataview = InEdgeDataView + + def __init__(self, G): + self._graph = G + self._adjdict = G._pred if hasattr(G, "pred") else G._adj + self._nodes_nbrs = self._adjdict.items + + def __iter__(self): + for n, nbrs in self._nodes_nbrs(): + for nbr in nbrs: + yield (nbr, n) + + def __contains__(self, e): + try: + u, v = e + return u in self._adjdict[v] + except KeyError: + return False + + def __getitem__(self, e): + if isinstance(e, slice): + raise nx.NetworkXError( + f"{type(self).__name__} does not support slicing, " + f"try list(G.in_edges)[{e.start}:{e.stop}:{e.step}]" + ) + u, v = e + return self._adjdict[v][u] + + +class OutMultiEdgeView(OutEdgeView): + """A EdgeView class for outward edges of a MultiDiGraph""" + + __slots__ = () + + dataview = OutMultiEdgeDataView + + def __len__(self): + return sum( + len(kdict) for n, nbrs in self._nodes_nbrs() for nbr, kdict in nbrs.items() + ) + + def __iter__(self): + for n, nbrs in self._nodes_nbrs(): + for nbr, kdict in nbrs.items(): + for key in kdict: + yield (n, nbr, key) + + def __contains__(self, e): + N = len(e) + if N == 3: + u, v, k = e + elif N == 2: + u, v = e + k = 0 + else: + raise ValueError("MultiEdge must have length 2 or 3") + try: + return k in self._adjdict[u][v] + except KeyError: + return False + + def __getitem__(self, e): + if isinstance(e, slice): + raise nx.NetworkXError( + f"{type(self).__name__} does not support slicing, " + f"try list(G.edges)[{e.start}:{e.stop}:{e.step}]" + ) + u, v, k = e + return self._adjdict[u][v][k] + + def __call__(self, nbunch=None, data=False, *, default=None, keys=False): + if nbunch is None and data is False and keys is True: + return self + return self.dataview(self, nbunch, data, default=default, keys=keys) + + def data(self, data=True, default=None, nbunch=None, keys=False): + if nbunch is None and data is False and keys is True: + return self + return self.dataview(self, nbunch, data, default=default, keys=keys) + + +class MultiEdgeView(OutMultiEdgeView): + """A EdgeView class for edges of a MultiGraph""" + + __slots__ = () + + dataview = MultiEdgeDataView + + def __len__(self): + return sum(1 for e in self) + + def __iter__(self): + seen = {} + for n, nbrs in self._nodes_nbrs(): + for nbr, kd in nbrs.items(): + if nbr not in seen: + for k, dd in kd.items(): + yield (n, nbr, k) + seen[n] = 1 + del seen + + +class InMultiEdgeView(OutMultiEdgeView): + """A EdgeView class for inward edges of a MultiDiGraph""" + + __slots__ = () + + def __setstate__(self, state): + self._graph = state["_graph"] + self._adjdict = state["_adjdict"] + self._nodes_nbrs = self._adjdict.items + + dataview = InMultiEdgeDataView + + def __init__(self, G): + self._graph = G + self._adjdict = G._pred if hasattr(G, "pred") else G._adj + self._nodes_nbrs = self._adjdict.items + + def __iter__(self): + for n, nbrs in self._nodes_nbrs(): + for nbr, kdict in nbrs.items(): + for key in kdict: + yield (nbr, n, key) + + def __contains__(self, e): + N = len(e) + if N == 3: + u, v, k = e + elif N == 2: + u, v = e + k = 0 + else: + raise ValueError("MultiEdge must have length 2 or 3") + try: + return k in self._adjdict[v][u] + except KeyError: + return False + + def __getitem__(self, e): + if isinstance(e, slice): + raise nx.NetworkXError( + f"{type(self).__name__} does not support slicing, " + f"try list(G.in_edges)[{e.start}:{e.stop}:{e.step}]" + ) + u, v, k = e + return self._adjdict[v][u][k] diff --git a/.venv/lib/python3.12/site-packages/networkx/classes/tests/__init__.py b/.venv/lib/python3.12/site-packages/networkx/classes/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/.venv/lib/python3.12/site-packages/networkx/classes/tests/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/classes/tests/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..3b9aa5e Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/classes/tests/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/classes/tests/__pycache__/dispatch_interface.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/classes/tests/__pycache__/dispatch_interface.cpython-312.pyc new file mode 100644 index 0000000..9e4ca72 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/classes/tests/__pycache__/dispatch_interface.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/classes/tests/__pycache__/historical_tests.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/classes/tests/__pycache__/historical_tests.cpython-312.pyc new file mode 100644 index 0000000..58fd56f Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/classes/tests/__pycache__/historical_tests.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/classes/tests/__pycache__/test_coreviews.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/classes/tests/__pycache__/test_coreviews.cpython-312.pyc new file mode 100644 index 0000000..2821f0c Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/classes/tests/__pycache__/test_coreviews.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/classes/tests/__pycache__/test_digraph.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/classes/tests/__pycache__/test_digraph.cpython-312.pyc new file mode 100644 index 0000000..42f5ab4 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/classes/tests/__pycache__/test_digraph.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/classes/tests/__pycache__/test_digraph_historical.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/classes/tests/__pycache__/test_digraph_historical.cpython-312.pyc new file mode 100644 index 0000000..58f2811 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/classes/tests/__pycache__/test_digraph_historical.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/classes/tests/__pycache__/test_filters.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/classes/tests/__pycache__/test_filters.cpython-312.pyc new file mode 100644 index 0000000..2315a71 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/classes/tests/__pycache__/test_filters.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/classes/tests/__pycache__/test_function.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/classes/tests/__pycache__/test_function.cpython-312.pyc new file mode 100644 index 0000000..4b0e358 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/classes/tests/__pycache__/test_function.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/classes/tests/__pycache__/test_graph.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/classes/tests/__pycache__/test_graph.cpython-312.pyc new file mode 100644 index 0000000..c4ae68f Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/classes/tests/__pycache__/test_graph.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/classes/tests/__pycache__/test_graph_historical.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/classes/tests/__pycache__/test_graph_historical.cpython-312.pyc new file mode 100644 index 0000000..e7c8b28 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/classes/tests/__pycache__/test_graph_historical.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/classes/tests/__pycache__/test_graphviews.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/classes/tests/__pycache__/test_graphviews.cpython-312.pyc new file mode 100644 index 0000000..dc14a49 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/classes/tests/__pycache__/test_graphviews.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/classes/tests/__pycache__/test_multidigraph.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/classes/tests/__pycache__/test_multidigraph.cpython-312.pyc new file mode 100644 index 0000000..761e2c6 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/classes/tests/__pycache__/test_multidigraph.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/classes/tests/__pycache__/test_multigraph.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/classes/tests/__pycache__/test_multigraph.cpython-312.pyc new file mode 100644 index 0000000..59df1a8 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/classes/tests/__pycache__/test_multigraph.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/classes/tests/__pycache__/test_reportviews.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/classes/tests/__pycache__/test_reportviews.cpython-312.pyc new file mode 100644 index 0000000..18f6794 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/classes/tests/__pycache__/test_reportviews.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/classes/tests/__pycache__/test_special.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/classes/tests/__pycache__/test_special.cpython-312.pyc new file mode 100644 index 0000000..a434a2e Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/classes/tests/__pycache__/test_special.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/classes/tests/__pycache__/test_subgraphviews.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/classes/tests/__pycache__/test_subgraphviews.cpython-312.pyc new file mode 100644 index 0000000..edee9c8 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/classes/tests/__pycache__/test_subgraphviews.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/classes/tests/dispatch_interface.py b/.venv/lib/python3.12/site-packages/networkx/classes/tests/dispatch_interface.py new file mode 100644 index 0000000..5cc908d --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/classes/tests/dispatch_interface.py @@ -0,0 +1,185 @@ +# This file contains utilities for testing the dispatching feature + +# A full test of all dispatchable algorithms is performed by +# modifying the pytest invocation and setting an environment variable +# NETWORKX_TEST_BACKEND=nx_loopback pytest +# This is comprehensive, but only tests the `test_override_dispatch` +# function in networkx.classes.backends. + +# To test the `_dispatchable` function directly, several tests scattered throughout +# NetworkX have been augmented to test normal and dispatch mode. +# Searching for `dispatch_interface` should locate the specific tests. + +import networkx as nx +from networkx import DiGraph, Graph, MultiDiGraph, MultiGraph, PlanarEmbedding +from networkx.classes.reportviews import NodeView + + +class LoopbackGraph(Graph): + __networkx_backend__ = "nx_loopback" + + +class LoopbackDiGraph(DiGraph): + __networkx_backend__ = "nx_loopback" + + +class LoopbackMultiGraph(MultiGraph): + __networkx_backend__ = "nx_loopback" + + +class LoopbackMultiDiGraph(MultiDiGraph): + __networkx_backend__ = "nx_loopback" + + +class LoopbackPlanarEmbedding(PlanarEmbedding): + __networkx_backend__ = "nx_loopback" + + +def convert(graph): + if isinstance(graph, PlanarEmbedding): + return LoopbackPlanarEmbedding(graph) + if isinstance(graph, MultiDiGraph): + return LoopbackMultiDiGraph(graph) + if isinstance(graph, MultiGraph): + return LoopbackMultiGraph(graph) + if isinstance(graph, DiGraph): + return LoopbackDiGraph(graph) + if isinstance(graph, Graph): + return LoopbackGraph(graph) + raise TypeError(f"Unsupported type of graph: {type(graph)}") + + +class LoopbackBackendInterface: + def __getattr__(self, item): + try: + return nx.utils.backends._registered_algorithms[item].orig_func + except KeyError: + raise AttributeError(item) from None + + @staticmethod + def convert_from_nx( + graph, + *, + edge_attrs=None, + node_attrs=None, + preserve_edge_attrs=None, + preserve_node_attrs=None, + preserve_graph_attrs=None, + name=None, + graph_name=None, + ): + if name in { + # Raise if input graph changes. See test_dag.py::test_topological_sort6 + "lexicographical_topological_sort", + "topological_generations", + "topological_sort", + # Would be nice to some day avoid these cutoffs of full testing + }: + return graph + if isinstance(graph, NodeView): + # Convert to a Graph with only nodes (no edges) + new_graph = Graph() + new_graph.add_nodes_from(graph.items()) + graph = new_graph + G = LoopbackGraph() + elif not isinstance(graph, Graph): + raise TypeError( + f"Bad type for graph argument {graph_name} in {name}: {type(graph)}" + ) + elif graph.__class__ in {Graph, LoopbackGraph}: + G = LoopbackGraph() + elif graph.__class__ in {DiGraph, LoopbackDiGraph}: + G = LoopbackDiGraph() + elif graph.__class__ in {MultiGraph, LoopbackMultiGraph}: + G = LoopbackMultiGraph() + elif graph.__class__ in {MultiDiGraph, LoopbackMultiDiGraph}: + G = LoopbackMultiDiGraph() + elif graph.__class__ in {PlanarEmbedding, LoopbackPlanarEmbedding}: + G = LoopbackDiGraph() # or LoopbackPlanarEmbedding + else: + # Would be nice to handle these better some day + # nx.algorithms.approximation.kcomponents._AntiGraph + # nx.classes.tests.test_multidigraph.MultiDiGraphSubClass + # nx.classes.tests.test_multigraph.MultiGraphSubClass + G = graph.__class__() + + if preserve_graph_attrs: + G.graph.update(graph.graph) + + # add nodes + G.add_nodes_from(graph) + if preserve_node_attrs: + for n, dd in G._node.items(): + dd.update(graph.nodes[n]) + elif node_attrs: + for n, dd in G._node.items(): + dd.update( + (attr, graph._node[n].get(attr, default)) + for attr, default in node_attrs.items() + if default is not None or attr in graph._node[n] + ) + + # tools to build datadict and keydict + if preserve_edge_attrs: + + def G_new_datadict(old_dd): + return G.edge_attr_dict_factory(old_dd) + elif edge_attrs: + + def G_new_datadict(old_dd): + return G.edge_attr_dict_factory( + (attr, old_dd.get(attr, default)) + for attr, default in edge_attrs.items() + if default is not None or attr in old_dd + ) + else: + + def G_new_datadict(old_dd): + return G.edge_attr_dict_factory() + + if G.is_multigraph(): + + def G_new_inner(keydict): + kd = G.adjlist_inner_dict_factory( + (k, G_new_datadict(dd)) for k, dd in keydict.items() + ) + return kd + else: + G_new_inner = G_new_datadict + + # add edges keeping the same order in _adj and _pred + G_adj = G._adj + if G.is_directed(): + for n, nbrs in graph._adj.items(): + G_adj[n].update((nbr, G_new_inner(dd)) for nbr, dd in nbrs.items()) + # ensure same datadict for pred and adj; and pred order of graph._pred + G_pred = G._pred + for n, nbrs in graph._pred.items(): + G_pred[n].update((nbr, G_adj[nbr][n]) for nbr in nbrs) + else: # undirected + for n, nbrs in graph._adj.items(): + # ensure same datadict for both ways; and adj order of graph._adj + G_adj[n].update( + (nbr, G_adj[nbr][n] if n in G_adj[nbr] else G_new_inner(dd)) + for nbr, dd in nbrs.items() + ) + + return G + + @staticmethod + def convert_to_nx(obj, *, name=None): + return obj + + @staticmethod + def on_start_tests(items): + # Verify that items can be xfailed + for item in items: + assert hasattr(item, "add_marker") + + def can_run(self, name, args, kwargs): + # It is unnecessary to define this function if algorithms are fully supported. + # We include it for illustration purposes. + return hasattr(self, name) + + +backend_interface = LoopbackBackendInterface() diff --git a/.venv/lib/python3.12/site-packages/networkx/classes/tests/historical_tests.py b/.venv/lib/python3.12/site-packages/networkx/classes/tests/historical_tests.py new file mode 100644 index 0000000..9dad24e --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/classes/tests/historical_tests.py @@ -0,0 +1,475 @@ +"""Original NetworkX graph tests""" + +import pytest + +import networkx as nx +from networkx import convert_node_labels_to_integers as cnlti +from networkx.utils import edges_equal, nodes_equal + + +class HistoricalTests: + @classmethod + def setup_class(cls): + cls.null = nx.null_graph() + cls.P1 = cnlti(nx.path_graph(1), first_label=1) + cls.P3 = cnlti(nx.path_graph(3), first_label=1) + cls.P10 = cnlti(nx.path_graph(10), first_label=1) + cls.K1 = cnlti(nx.complete_graph(1), first_label=1) + cls.K3 = cnlti(nx.complete_graph(3), first_label=1) + cls.K4 = cnlti(nx.complete_graph(4), first_label=1) + cls.K5 = cnlti(nx.complete_graph(5), first_label=1) + cls.K10 = cnlti(nx.complete_graph(10), first_label=1) + cls.G = nx.Graph + + def test_name(self): + G = self.G(name="test") + assert G.name == "test" + H = self.G() + assert H.name == "" + + # Nodes + + def test_add_remove_node(self): + G = self.G() + G.add_node("A") + assert G.has_node("A") + G.remove_node("A") + assert not G.has_node("A") + + def test_nonhashable_node(self): + # Test if a non-hashable object is in the Graph. A python dict will + # raise a TypeError, but for a Graph class a simple False should be + # returned (see Graph __contains__). If it cannot be a node then it is + # not a node. + G = self.G() + assert not G.has_node(["A"]) + assert not G.has_node({"A": 1}) + + def test_add_nodes_from(self): + G = self.G() + G.add_nodes_from(list("ABCDEFGHIJKL")) + assert G.has_node("L") + G.remove_nodes_from(["H", "I", "J", "K", "L"]) + G.add_nodes_from([1, 2, 3, 4]) + assert sorted(G.nodes(), key=str) == [ + 1, + 2, + 3, + 4, + "A", + "B", + "C", + "D", + "E", + "F", + "G", + ] + # test __iter__ + assert sorted(G, key=str) == [1, 2, 3, 4, "A", "B", "C", "D", "E", "F", "G"] + + def test_contains(self): + G = self.G() + G.add_node("A") + assert "A" in G + assert [] not in G # never raise a Key or TypeError in this test + assert {1: 1} not in G + + def test_add_remove(self): + # Test add_node and remove_node acting for various nbunch + G = self.G() + G.add_node("m") + assert G.has_node("m") + G.add_node("m") # no complaints + pytest.raises(nx.NetworkXError, G.remove_node, "j") + G.remove_node("m") + assert list(G) == [] + + def test_nbunch_is_list(self): + G = self.G() + G.add_nodes_from(list("ABCD")) + G.add_nodes_from(self.P3) # add nbunch of nodes (nbunch=Graph) + assert sorted(G.nodes(), key=str) == [1, 2, 3, "A", "B", "C", "D"] + G.remove_nodes_from(self.P3) # remove nbunch of nodes (nbunch=Graph) + assert sorted(G.nodes(), key=str) == ["A", "B", "C", "D"] + + def test_nbunch_is_set(self): + G = self.G() + nbunch = set("ABCDEFGHIJKL") + G.add_nodes_from(nbunch) + assert G.has_node("L") + + def test_nbunch_dict(self): + # nbunch is a dict with nodes as keys + G = self.G() + nbunch = set("ABCDEFGHIJKL") + G.add_nodes_from(nbunch) + nbunch = {"I": "foo", "J": 2, "K": True, "L": "spam"} + G.remove_nodes_from(nbunch) + assert sorted(G.nodes(), key=str), ["A", "B", "C", "D", "E", "F", "G", "H"] + + def test_nbunch_iterator(self): + G = self.G() + G.add_nodes_from(["A", "B", "C", "D", "E", "F", "G", "H"]) + n_iter = self.P3.nodes() + G.add_nodes_from(n_iter) + assert sorted(G.nodes(), key=str) == [ + 1, + 2, + 3, + "A", + "B", + "C", + "D", + "E", + "F", + "G", + "H", + ] + n_iter = self.P3.nodes() # rebuild same iterator + G.remove_nodes_from(n_iter) # remove nbunch of nodes (nbunch=iterator) + assert sorted(G.nodes(), key=str) == ["A", "B", "C", "D", "E", "F", "G", "H"] + + def test_nbunch_graph(self): + G = self.G() + G.add_nodes_from(["A", "B", "C", "D", "E", "F", "G", "H"]) + nbunch = self.K3 + G.add_nodes_from(nbunch) + assert sorted(G.nodes(), key=str), [ + 1, + 2, + 3, + "A", + "B", + "C", + "D", + "E", + "F", + "G", + "H", + ] + + # Edges + + def test_add_edge(self): + G = self.G() + pytest.raises(TypeError, G.add_edge, "A") + + G.add_edge("A", "B") # testing add_edge() + G.add_edge("A", "B") # should fail silently + assert G.has_edge("A", "B") + assert not G.has_edge("A", "C") + assert G.has_edge(*("A", "B")) + if G.is_directed(): + assert not G.has_edge("B", "A") + else: + # G is undirected, so B->A is an edge + assert G.has_edge("B", "A") + + G.add_edge("A", "C") # test directedness + G.add_edge("C", "A") + G.remove_edge("C", "A") + if G.is_directed(): + assert G.has_edge("A", "C") + else: + assert not G.has_edge("A", "C") + assert not G.has_edge("C", "A") + + def test_self_loop(self): + G = self.G() + G.add_edge("A", "A") # test self loops + assert G.has_edge("A", "A") + G.remove_edge("A", "A") + G.add_edge("X", "X") + assert G.has_node("X") + G.remove_node("X") + G.add_edge("A", "Z") # should add the node silently + assert G.has_node("Z") + + def test_add_edges_from(self): + G = self.G() + G.add_edges_from([("B", "C")]) # test add_edges_from() + assert G.has_edge("B", "C") + if G.is_directed(): + assert not G.has_edge("C", "B") + else: + assert G.has_edge("C", "B") # undirected + + G.add_edges_from([("D", "F"), ("B", "D")]) + assert G.has_edge("D", "F") + assert G.has_edge("B", "D") + + if G.is_directed(): + assert not G.has_edge("D", "B") + else: + assert G.has_edge("D", "B") # undirected + + def test_add_edges_from2(self): + G = self.G() + # after failing silently, should add 2nd edge + G.add_edges_from([tuple("IJ"), list("KK"), tuple("JK")]) + assert G.has_edge(*("I", "J")) + assert G.has_edge(*("K", "K")) + assert G.has_edge(*("J", "K")) + if G.is_directed(): + assert not G.has_edge(*("K", "J")) + else: + assert G.has_edge(*("K", "J")) + + def test_add_edges_from3(self): + G = self.G() + G.add_edges_from(zip(list("ACD"), list("CDE"))) + assert G.has_edge("D", "E") + assert not G.has_edge("E", "C") + + def test_remove_edge(self): + G = self.G() + G.add_nodes_from([1, 2, 3, "A", "B", "C", "D", "E", "F", "G", "H"]) + + G.add_edges_from(zip(list("MNOP"), list("NOPM"))) + assert G.has_edge("O", "P") + assert G.has_edge("P", "M") + G.remove_node("P") # tests remove_node()'s handling of edges. + assert not G.has_edge("P", "M") + pytest.raises(TypeError, G.remove_edge, "M") + + G.add_edge("N", "M") + assert G.has_edge("M", "N") + G.remove_edge("M", "N") + assert not G.has_edge("M", "N") + + # self loop fails silently + G.remove_edges_from([list("HI"), list("DF"), tuple("KK"), tuple("JK")]) + assert not G.has_edge("H", "I") + assert not G.has_edge("J", "K") + G.remove_edges_from([list("IJ"), list("KK"), list("JK")]) + assert not G.has_edge("I", "J") + G.remove_nodes_from(set("ZEFHIMNO")) + G.add_edge("J", "K") + + def test_edges_nbunch(self): + # Test G.edges(nbunch) with various forms of nbunch + G = self.G() + G.add_edges_from([("A", "B"), ("A", "C"), ("B", "D"), ("C", "B"), ("C", "D")]) + # node not in nbunch should be quietly ignored + pytest.raises(nx.NetworkXError, G.edges, 6) + assert list(G.edges("Z")) == [] # iterable non-node + # nbunch can be an empty list + assert list(G.edges([])) == [] + if G.is_directed(): + elist = [("A", "B"), ("A", "C"), ("B", "D")] + else: + elist = [("A", "B"), ("A", "C"), ("B", "C"), ("B", "D")] + # nbunch can be a list + assert edges_equal(list(G.edges(["A", "B"])), elist) + # nbunch can be a set + assert edges_equal(G.edges({"A", "B"}), elist) + # nbunch can be a graph + G1 = self.G() + G1.add_nodes_from("AB") + assert edges_equal(G.edges(G1), elist) + # nbunch can be a dict with nodes as keys + ndict = {"A": "thing1", "B": "thing2"} + assert edges_equal(G.edges(ndict), elist) + # nbunch can be a single node + assert edges_equal(list(G.edges("A")), [("A", "B"), ("A", "C")]) + assert nodes_equal(sorted(G), ["A", "B", "C", "D"]) + + # nbunch can be nothing (whole graph) + assert edges_equal( + list(G.edges()), + [("A", "B"), ("A", "C"), ("B", "D"), ("C", "B"), ("C", "D")], + ) + + def test_degree(self): + G = self.G() + G.add_edges_from([("A", "B"), ("A", "C"), ("B", "D"), ("C", "B"), ("C", "D")]) + assert G.degree("A") == 2 + + # degree of single node in iterable container must return dict + assert list(G.degree(["A"])) == [("A", 2)] + assert sorted(d for n, d in G.degree(["A", "B"])) == [2, 3] + assert sorted(d for n, d in G.degree()) == [2, 2, 3, 3] + + def test_degree2(self): + H = self.G() + H.add_edges_from([(1, 24), (1, 2)]) + assert sorted(d for n, d in H.degree([1, 24])) == [1, 2] + + def test_degree_graph(self): + P3 = nx.path_graph(3) + P5 = nx.path_graph(5) + # silently ignore nodes not in P3 + assert dict(d for n, d in P3.degree(["A", "B"])) == {} + # nbunch can be a graph + assert sorted(d for n, d in P5.degree(P3)) == [1, 2, 2] + # nbunch can be a graph that's way too big + assert sorted(d for n, d in P3.degree(P5)) == [1, 1, 2] + assert list(P5.degree([])) == [] + assert dict(P5.degree([])) == {} + + def test_null(self): + null = nx.null_graph() + assert list(null.degree()) == [] + assert dict(null.degree()) == {} + + def test_order_size(self): + G = self.G() + G.add_edges_from([("A", "B"), ("A", "C"), ("B", "D"), ("C", "B"), ("C", "D")]) + assert G.order() == 4 + assert G.size() == 5 + assert G.number_of_edges() == 5 + assert G.number_of_edges("A", "B") == 1 + assert G.number_of_edges("A", "D") == 0 + + def test_copy(self): + G = self.G() + H = G.copy() # copy + assert H.adj == G.adj + assert H.name == G.name + assert H is not G + + def test_subgraph(self): + G = self.G() + G.add_edges_from([("A", "B"), ("A", "C"), ("B", "D"), ("C", "B"), ("C", "D")]) + SG = G.subgraph(["A", "B", "D"]) + assert nodes_equal(list(SG), ["A", "B", "D"]) + assert edges_equal(list(SG.edges()), [("A", "B"), ("B", "D")]) + + def test_to_directed(self): + G = self.G() + if not G.is_directed(): + G.add_edges_from( + [("A", "B"), ("A", "C"), ("B", "D"), ("C", "B"), ("C", "D")] + ) + + DG = G.to_directed() + assert DG is not G # directed copy or copy + + assert DG.is_directed() + assert DG.name == G.name + assert DG.adj == G.adj + assert sorted(DG.out_edges(list("AB"))) == [ + ("A", "B"), + ("A", "C"), + ("B", "A"), + ("B", "C"), + ("B", "D"), + ] + DG.remove_edge("A", "B") + assert DG.has_edge("B", "A") # this removes B-A but not A-B + assert not DG.has_edge("A", "B") + + def test_to_undirected(self): + G = self.G() + if G.is_directed(): + G.add_edges_from( + [("A", "B"), ("A", "C"), ("B", "D"), ("C", "B"), ("C", "D")] + ) + UG = G.to_undirected() # to_undirected + assert UG is not G + assert not UG.is_directed() + assert G.is_directed() + assert UG.name == G.name + assert UG.adj != G.adj + assert sorted(UG.edges(list("AB"))) == [ + ("A", "B"), + ("A", "C"), + ("B", "C"), + ("B", "D"), + ] + assert sorted(UG.edges(["A", "B"])) == [ + ("A", "B"), + ("A", "C"), + ("B", "C"), + ("B", "D"), + ] + UG.remove_edge("A", "B") + assert not UG.has_edge("B", "A") + assert not UG.has_edge("A", "B") + + def test_neighbors(self): + G = self.G() + G.add_edges_from([("A", "B"), ("A", "C"), ("B", "D"), ("C", "B"), ("C", "D")]) + G.add_nodes_from("GJK") + assert sorted(G["A"]) == ["B", "C"] + assert sorted(G.neighbors("A")) == ["B", "C"] + assert sorted(G.neighbors("A")) == ["B", "C"] + assert sorted(G.neighbors("G")) == [] + pytest.raises(nx.NetworkXError, G.neighbors, "j") + + def test_iterators(self): + G = self.G() + G.add_edges_from([("A", "B"), ("A", "C"), ("B", "D"), ("C", "B"), ("C", "D")]) + G.add_nodes_from("GJK") + assert sorted(G.nodes()) == ["A", "B", "C", "D", "G", "J", "K"] + assert edges_equal( + G.edges(), [("A", "B"), ("A", "C"), ("B", "D"), ("C", "B"), ("C", "D")] + ) + + assert sorted(v for k, v in G.degree()) == [0, 0, 0, 2, 2, 3, 3] + assert sorted(G.degree(), key=str) == [ + ("A", 2), + ("B", 3), + ("C", 3), + ("D", 2), + ("G", 0), + ("J", 0), + ("K", 0), + ] + assert sorted(G.neighbors("A")) == ["B", "C"] + pytest.raises(nx.NetworkXError, G.neighbors, "X") + G.clear() + assert nx.number_of_nodes(G) == 0 + assert nx.number_of_edges(G) == 0 + + def test_null_subgraph(self): + # Subgraph of a null graph is a null graph + nullgraph = nx.null_graph() + G = nx.null_graph() + H = G.subgraph([]) + assert nx.is_isomorphic(H, nullgraph) + + def test_empty_subgraph(self): + # Subgraph of an empty graph is an empty graph. test 1 + nullgraph = nx.null_graph() + E5 = nx.empty_graph(5) + E10 = nx.empty_graph(10) + H = E10.subgraph([]) + assert nx.is_isomorphic(H, nullgraph) + H = E10.subgraph([1, 2, 3, 4, 5]) + assert nx.is_isomorphic(H, E5) + + def test_complete_subgraph(self): + # Subgraph of a complete graph is a complete graph + K1 = nx.complete_graph(1) + K3 = nx.complete_graph(3) + K5 = nx.complete_graph(5) + H = K5.subgraph([1, 2, 3]) + assert nx.is_isomorphic(H, K3) + + def test_subgraph_nbunch(self): + nullgraph = nx.null_graph() + K1 = nx.complete_graph(1) + K3 = nx.complete_graph(3) + K5 = nx.complete_graph(5) + # Test G.subgraph(nbunch), where nbunch is a single node + H = K5.subgraph(1) + assert nx.is_isomorphic(H, K1) + # Test G.subgraph(nbunch), where nbunch is a set + H = K5.subgraph({1}) + assert nx.is_isomorphic(H, K1) + # Test G.subgraph(nbunch), where nbunch is an iterator + H = K5.subgraph(iter(K3)) + assert nx.is_isomorphic(H, K3) + # Test G.subgraph(nbunch), where nbunch is another graph + H = K5.subgraph(K3) + assert nx.is_isomorphic(H, K3) + H = K5.subgraph([9]) + assert nx.is_isomorphic(H, nullgraph) + + def test_node_tuple_issue(self): + H = self.G() + # Test error handling of tuple as a node + pytest.raises(nx.NetworkXError, H.remove_node, (1, 2)) + H.remove_nodes_from([(1, 2)]) # no error + pytest.raises(nx.NetworkXError, H.neighbors, (1, 2)) diff --git a/.venv/lib/python3.12/site-packages/networkx/classes/tests/test_coreviews.py b/.venv/lib/python3.12/site-packages/networkx/classes/tests/test_coreviews.py new file mode 100644 index 0000000..24de7f2 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/classes/tests/test_coreviews.py @@ -0,0 +1,362 @@ +import pickle + +import pytest + +import networkx as nx + + +class TestAtlasView: + # node->data + def setup_method(self): + self.d = {0: {"color": "blue", "weight": 1.2}, 1: {}, 2: {"color": 1}} + self.av = nx.classes.coreviews.AtlasView(self.d) + + def test_pickle(self): + view = self.av + pview = pickle.loads(pickle.dumps(view, -1)) + assert view == pview + assert view.__slots__ == pview.__slots__ + pview = pickle.loads(pickle.dumps(view)) + assert view == pview + assert view.__slots__ == pview.__slots__ + + def test_len(self): + assert len(self.av) == len(self.d) + + def test_iter(self): + assert list(self.av) == list(self.d) + + def test_getitem(self): + assert self.av[1] is self.d[1] + assert self.av[2]["color"] == 1 + pytest.raises(KeyError, self.av.__getitem__, 3) + + def test_copy(self): + avcopy = self.av.copy() + assert avcopy[0] == self.av[0] + assert avcopy == self.av + assert avcopy[0] is not self.av[0] + assert avcopy is not self.av + avcopy[5] = {} + assert avcopy != self.av + + avcopy[0]["ht"] = 4 + assert avcopy[0] != self.av[0] + self.av[0]["ht"] = 4 + assert avcopy[0] == self.av[0] + del self.av[0]["ht"] + + assert not hasattr(self.av, "__setitem__") + + def test_items(self): + assert sorted(self.av.items()) == sorted(self.d.items()) + + def test_str(self): + out = str(self.d) + assert str(self.av) == out + + def test_repr(self): + out = "AtlasView(" + str(self.d) + ")" + assert repr(self.av) == out + + +class TestAdjacencyView: + # node->nbr->data + def setup_method(self): + dd = {"color": "blue", "weight": 1.2} + self.nd = {0: dd, 1: {}, 2: {"color": 1}} + self.adj = {3: self.nd, 0: {3: dd}, 1: {}, 2: {3: {"color": 1}}} + self.adjview = nx.classes.coreviews.AdjacencyView(self.adj) + + def test_pickle(self): + view = self.adjview + pview = pickle.loads(pickle.dumps(view, -1)) + assert view == pview + assert view.__slots__ == pview.__slots__ + + def test_len(self): + assert len(self.adjview) == len(self.adj) + + def test_iter(self): + assert list(self.adjview) == list(self.adj) + + def test_getitem(self): + assert self.adjview[1] is not self.adj[1] + assert self.adjview[3][0] is self.adjview[0][3] + assert self.adjview[2][3]["color"] == 1 + pytest.raises(KeyError, self.adjview.__getitem__, 4) + + def test_copy(self): + avcopy = self.adjview.copy() + assert avcopy[0] == self.adjview[0] + assert avcopy[0] is not self.adjview[0] + + avcopy[2][3]["ht"] = 4 + assert avcopy[2] != self.adjview[2] + self.adjview[2][3]["ht"] = 4 + assert avcopy[2] == self.adjview[2] + del self.adjview[2][3]["ht"] + + assert not hasattr(self.adjview, "__setitem__") + + def test_items(self): + view_items = sorted((n, dict(d)) for n, d in self.adjview.items()) + assert view_items == sorted(self.adj.items()) + + def test_str(self): + out = str(dict(self.adj)) + assert str(self.adjview) == out + + def test_repr(self): + out = self.adjview.__class__.__name__ + "(" + str(self.adj) + ")" + assert repr(self.adjview) == out + + +class TestMultiAdjacencyView(TestAdjacencyView): + # node->nbr->key->data + def setup_method(self): + dd = {"color": "blue", "weight": 1.2} + self.kd = {0: dd, 1: {}, 2: {"color": 1}} + self.nd = {3: self.kd, 0: {3: dd}, 1: {0: {}}, 2: {3: {"color": 1}}} + self.adj = {3: self.nd, 0: {3: {3: dd}}, 1: {}, 2: {3: {8: {}}}} + self.adjview = nx.classes.coreviews.MultiAdjacencyView(self.adj) + + def test_getitem(self): + assert self.adjview[1] is not self.adj[1] + assert self.adjview[3][0][3] is self.adjview[0][3][3] + assert self.adjview[3][2][3]["color"] == 1 + pytest.raises(KeyError, self.adjview.__getitem__, 4) + + def test_copy(self): + avcopy = self.adjview.copy() + assert avcopy[0] == self.adjview[0] + assert avcopy[0] is not self.adjview[0] + + avcopy[2][3][8]["ht"] = 4 + assert avcopy[2] != self.adjview[2] + self.adjview[2][3][8]["ht"] = 4 + assert avcopy[2] == self.adjview[2] + del self.adjview[2][3][8]["ht"] + + assert not hasattr(self.adjview, "__setitem__") + + +class TestUnionAtlas: + # node->data + def setup_method(self): + self.s = {0: {"color": "blue", "weight": 1.2}, 1: {}, 2: {"color": 1}} + self.p = {3: {"color": "blue", "weight": 1.2}, 4: {}, 2: {"watch": 2}} + self.av = nx.classes.coreviews.UnionAtlas(self.s, self.p) + + def test_pickle(self): + view = self.av + pview = pickle.loads(pickle.dumps(view, -1)) + assert view == pview + assert view.__slots__ == pview.__slots__ + + def test_len(self): + assert len(self.av) == len(self.s.keys() | self.p.keys()) == 5 + + def test_iter(self): + assert set(self.av) == set(self.s) | set(self.p) + + def test_getitem(self): + assert self.av[0] is self.s[0] + assert self.av[4] is self.p[4] + assert self.av[2]["color"] == 1 + pytest.raises(KeyError, self.av[2].__getitem__, "watch") + pytest.raises(KeyError, self.av.__getitem__, 8) + + def test_copy(self): + avcopy = self.av.copy() + assert avcopy[0] == self.av[0] + assert avcopy[0] is not self.av[0] + assert avcopy is not self.av + avcopy[5] = {} + assert avcopy != self.av + + avcopy[0]["ht"] = 4 + assert avcopy[0] != self.av[0] + self.av[0]["ht"] = 4 + assert avcopy[0] == self.av[0] + del self.av[0]["ht"] + + assert not hasattr(self.av, "__setitem__") + + def test_items(self): + expected = dict(self.p.items()) + expected.update(self.s) + assert sorted(self.av.items()) == sorted(expected.items()) + + def test_str(self): + out = str(dict(self.av)) + assert str(self.av) == out + + def test_repr(self): + out = f"{self.av.__class__.__name__}({self.s}, {self.p})" + assert repr(self.av) == out + + +class TestUnionAdjacency: + # node->nbr->data + def setup_method(self): + dd = {"color": "blue", "weight": 1.2} + self.nd = {0: dd, 1: {}, 2: {"color": 1}} + self.s = {3: self.nd, 0: {}, 1: {}, 2: {3: {"color": 1}}} + self.p = {3: {}, 0: {3: dd}, 1: {0: {}}, 2: {1: {"color": 1}}} + self.adjview = nx.classes.coreviews.UnionAdjacency(self.s, self.p) + + def test_pickle(self): + view = self.adjview + pview = pickle.loads(pickle.dumps(view, -1)) + assert view == pview + assert view.__slots__ == pview.__slots__ + + def test_len(self): + assert len(self.adjview) == len(self.s) + + def test_iter(self): + assert sorted(self.adjview) == sorted(self.s) + + def test_getitem(self): + assert self.adjview[1] is not self.s[1] + assert self.adjview[3][0] is self.adjview[0][3] + assert self.adjview[2][3]["color"] == 1 + pytest.raises(KeyError, self.adjview.__getitem__, 4) + + def test_copy(self): + avcopy = self.adjview.copy() + assert avcopy[0] == self.adjview[0] + assert avcopy[0] is not self.adjview[0] + + avcopy[2][3]["ht"] = 4 + assert avcopy[2] != self.adjview[2] + self.adjview[2][3]["ht"] = 4 + assert avcopy[2] == self.adjview[2] + del self.adjview[2][3]["ht"] + + assert not hasattr(self.adjview, "__setitem__") + + def test_str(self): + out = str(dict(self.adjview)) + assert str(self.adjview) == out + + def test_repr(self): + clsname = self.adjview.__class__.__name__ + out = f"{clsname}({self.s}, {self.p})" + assert repr(self.adjview) == out + + +class TestUnionMultiInner(TestUnionAdjacency): + # nbr->key->data + def setup_method(self): + dd = {"color": "blue", "weight": 1.2} + self.kd = {7: {}, "ekey": {}, 9: {"color": 1}} + self.s = {3: self.kd, 0: {7: dd}, 1: {}, 2: {"key": {"color": 1}}} + self.p = {3: {}, 0: {3: dd}, 1: {}, 2: {1: {"span": 2}}} + self.adjview = nx.classes.coreviews.UnionMultiInner(self.s, self.p) + + def test_len(self): + assert len(self.adjview) == len(self.s.keys() | self.p.keys()) == 4 + + def test_getitem(self): + assert self.adjview[1] is not self.s[1] + assert self.adjview[0][7] is self.adjview[0][3] + assert self.adjview[2]["key"]["color"] == 1 + assert self.adjview[2][1]["span"] == 2 + pytest.raises(KeyError, self.adjview.__getitem__, 4) + pytest.raises(KeyError, self.adjview[1].__getitem__, "key") + + def test_copy(self): + avcopy = self.adjview.copy() + assert avcopy[0] == self.adjview[0] + assert avcopy[0] is not self.adjview[0] + + avcopy[2][1]["width"] = 8 + assert avcopy[2] != self.adjview[2] + self.adjview[2][1]["width"] = 8 + assert avcopy[2] == self.adjview[2] + del self.adjview[2][1]["width"] + + assert not hasattr(self.adjview, "__setitem__") + assert hasattr(avcopy, "__setitem__") + + +class TestUnionMultiAdjacency(TestUnionAdjacency): + # node->nbr->key->data + def setup_method(self): + dd = {"color": "blue", "weight": 1.2} + self.kd = {7: {}, 8: {}, 9: {"color": 1}} + self.nd = {3: self.kd, 0: {9: dd}, 1: {8: {}}, 2: {9: {"color": 1}}} + self.s = {3: self.nd, 0: {3: {7: dd}}, 1: {}, 2: {3: {8: {}}}} + self.p = {3: {}, 0: {3: {9: dd}}, 1: {}, 2: {1: {8: {}}}} + self.adjview = nx.classes.coreviews.UnionMultiAdjacency(self.s, self.p) + + def test_getitem(self): + assert self.adjview[1] is not self.s[1] + assert self.adjview[3][0][9] is self.adjview[0][3][9] + assert self.adjview[3][2][9]["color"] == 1 + pytest.raises(KeyError, self.adjview.__getitem__, 4) + + def test_copy(self): + avcopy = self.adjview.copy() + assert avcopy[0] == self.adjview[0] + assert avcopy[0] is not self.adjview[0] + + avcopy[2][3][8]["ht"] = 4 + assert avcopy[2] != self.adjview[2] + self.adjview[2][3][8]["ht"] = 4 + assert avcopy[2] == self.adjview[2] + del self.adjview[2][3][8]["ht"] + + assert not hasattr(self.adjview, "__setitem__") + assert hasattr(avcopy, "__setitem__") + + +class TestFilteredGraphs: + def setup_method(self): + self.Graphs = [nx.Graph, nx.DiGraph, nx.MultiGraph, nx.MultiDiGraph] + + def test_hide_show_nodes(self): + SubGraph = nx.subgraph_view + for Graph in self.Graphs: + G = nx.path_graph(4, Graph) + SG = G.subgraph([2, 3]) + RG = SubGraph(G, filter_node=nx.filters.hide_nodes([0, 1])) + assert SG.nodes == RG.nodes + assert SG.edges == RG.edges + SGC = SG.copy() + RGC = RG.copy() + assert SGC.nodes == RGC.nodes + assert SGC.edges == RGC.edges + + def test_str_repr(self): + SubGraph = nx.subgraph_view + for Graph in self.Graphs: + G = nx.path_graph(4, Graph) + SG = G.subgraph([2, 3]) + RG = SubGraph(G, filter_node=nx.filters.hide_nodes([0, 1])) + str(SG.adj) + str(RG.adj) + repr(SG.adj) + repr(RG.adj) + str(SG.adj[2]) + str(RG.adj[2]) + repr(SG.adj[2]) + repr(RG.adj[2]) + + def test_copy(self): + SubGraph = nx.subgraph_view + for Graph in self.Graphs: + G = nx.path_graph(4, Graph) + SG = G.subgraph([2, 3]) + RG = SubGraph(G, filter_node=nx.filters.hide_nodes([0, 1])) + RsG = SubGraph(G, filter_node=nx.filters.show_nodes([2, 3])) + assert G.adj.copy() == G.adj + assert G.adj[2].copy() == G.adj[2] + assert SG.adj.copy() == SG.adj + assert SG.adj[2].copy() == SG.adj[2] + assert RG.adj.copy() == RG.adj + assert RG.adj[2].copy() == RG.adj[2] + assert RsG.adj.copy() == RsG.adj + assert RsG.adj[2].copy() == RsG.adj[2] diff --git a/.venv/lib/python3.12/site-packages/networkx/classes/tests/test_digraph.py b/.venv/lib/python3.12/site-packages/networkx/classes/tests/test_digraph.py new file mode 100644 index 0000000..b9972f9 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/classes/tests/test_digraph.py @@ -0,0 +1,331 @@ +import pytest + +import networkx as nx +from networkx.utils import nodes_equal + +from .test_graph import BaseAttrGraphTester, BaseGraphTester +from .test_graph import TestEdgeSubgraph as _TestGraphEdgeSubgraph +from .test_graph import TestGraph as _TestGraph + + +class BaseDiGraphTester(BaseGraphTester): + def test_has_successor(self): + G = self.K3 + assert G.has_successor(0, 1) + assert not G.has_successor(0, -1) + + def test_successors(self): + G = self.K3 + assert sorted(G.successors(0)) == [1, 2] + with pytest.raises(nx.NetworkXError): + G.successors(-1) + + def test_has_predecessor(self): + G = self.K3 + assert G.has_predecessor(0, 1) + assert not G.has_predecessor(0, -1) + + def test_predecessors(self): + G = self.K3 + assert sorted(G.predecessors(0)) == [1, 2] + with pytest.raises(nx.NetworkXError): + G.predecessors(-1) + + def test_edges(self): + G = self.K3 + assert sorted(G.edges()) == [(0, 1), (0, 2), (1, 0), (1, 2), (2, 0), (2, 1)] + assert sorted(G.edges(0)) == [(0, 1), (0, 2)] + assert sorted(G.edges([0, 1])) == [(0, 1), (0, 2), (1, 0), (1, 2)] + with pytest.raises(nx.NetworkXError): + G.edges(-1) + + def test_out_edges(self): + G = self.K3 + assert sorted(G.out_edges()) == [(0, 1), (0, 2), (1, 0), (1, 2), (2, 0), (2, 1)] + assert sorted(G.out_edges(0)) == [(0, 1), (0, 2)] + with pytest.raises(nx.NetworkXError): + G.out_edges(-1) + + def test_out_edges_dir(self): + G = self.P3 + assert sorted(G.out_edges()) == [(0, 1), (1, 2)] + assert sorted(G.out_edges(0)) == [(0, 1)] + assert sorted(G.out_edges(2)) == [] + + def test_out_edges_data(self): + G = nx.DiGraph([(0, 1, {"data": 0}), (1, 0, {})]) + assert sorted(G.out_edges(data=True)) == [(0, 1, {"data": 0}), (1, 0, {})] + assert sorted(G.out_edges(0, data=True)) == [(0, 1, {"data": 0})] + assert sorted(G.out_edges(data="data")) == [(0, 1, 0), (1, 0, None)] + assert sorted(G.out_edges(0, data="data")) == [(0, 1, 0)] + + def test_in_edges_dir(self): + G = self.P3 + assert sorted(G.in_edges()) == [(0, 1), (1, 2)] + assert sorted(G.in_edges(0)) == [] + assert sorted(G.in_edges(2)) == [(1, 2)] + + def test_in_edges_data(self): + G = nx.DiGraph([(0, 1, {"data": 0}), (1, 0, {})]) + assert sorted(G.in_edges(data=True)) == [(0, 1, {"data": 0}), (1, 0, {})] + assert sorted(G.in_edges(1, data=True)) == [(0, 1, {"data": 0})] + assert sorted(G.in_edges(data="data")) == [(0, 1, 0), (1, 0, None)] + assert sorted(G.in_edges(1, data="data")) == [(0, 1, 0)] + + def test_degree(self): + G = self.K3 + assert sorted(G.degree()) == [(0, 4), (1, 4), (2, 4)] + assert dict(G.degree()) == {0: 4, 1: 4, 2: 4} + assert G.degree(0) == 4 + assert list(G.degree(iter([0]))) == [(0, 4)] # run through iterator + + def test_in_degree(self): + G = self.K3 + assert sorted(G.in_degree()) == [(0, 2), (1, 2), (2, 2)] + assert dict(G.in_degree()) == {0: 2, 1: 2, 2: 2} + assert G.in_degree(0) == 2 + assert list(G.in_degree(iter([0]))) == [(0, 2)] # run through iterator + + def test_out_degree(self): + G = self.K3 + assert sorted(G.out_degree()) == [(0, 2), (1, 2), (2, 2)] + assert dict(G.out_degree()) == {0: 2, 1: 2, 2: 2} + assert G.out_degree(0) == 2 + assert list(G.out_degree(iter([0]))) == [(0, 2)] + + def test_size(self): + G = self.K3 + assert G.size() == 6 + assert G.number_of_edges() == 6 + + def test_to_undirected_reciprocal(self): + G = self.Graph() + G.add_edge(1, 2) + assert G.to_undirected().has_edge(1, 2) + assert not G.to_undirected(reciprocal=True).has_edge(1, 2) + G.add_edge(2, 1) + assert G.to_undirected(reciprocal=True).has_edge(1, 2) + + def test_reverse_copy(self): + G = nx.DiGraph([(0, 1), (1, 2)]) + R = G.reverse() + assert sorted(R.edges()) == [(1, 0), (2, 1)] + R.remove_edge(1, 0) + assert sorted(R.edges()) == [(2, 1)] + assert sorted(G.edges()) == [(0, 1), (1, 2)] + + def test_reverse_nocopy(self): + G = nx.DiGraph([(0, 1), (1, 2)]) + R = G.reverse(copy=False) + assert sorted(R.edges()) == [(1, 0), (2, 1)] + with pytest.raises(nx.NetworkXError): + R.remove_edge(1, 0) + + def test_reverse_hashable(self): + class Foo: + pass + + x = Foo() + y = Foo() + G = nx.DiGraph() + G.add_edge(x, y) + assert nodes_equal(G.nodes(), G.reverse().nodes()) + assert [(y, x)] == list(G.reverse().edges()) + + def test_di_cache_reset(self): + G = self.K3.copy() + old_succ = G.succ + assert id(G.succ) == id(old_succ) + old_adj = G.adj + assert id(G.adj) == id(old_adj) + + G._succ = {} + assert id(G.succ) != id(old_succ) + assert id(G.adj) != id(old_adj) + + old_pred = G.pred + assert id(G.pred) == id(old_pred) + G._pred = {} + assert id(G.pred) != id(old_pred) + + def test_di_attributes_cached(self): + G = self.K3.copy() + assert id(G.in_edges) == id(G.in_edges) + assert id(G.out_edges) == id(G.out_edges) + assert id(G.in_degree) == id(G.in_degree) + assert id(G.out_degree) == id(G.out_degree) + assert id(G.succ) == id(G.succ) + assert id(G.pred) == id(G.pred) + + +class BaseAttrDiGraphTester(BaseDiGraphTester, BaseAttrGraphTester): + def test_edges_data(self): + G = self.K3 + all_edges = [ + (0, 1, {}), + (0, 2, {}), + (1, 0, {}), + (1, 2, {}), + (2, 0, {}), + (2, 1, {}), + ] + assert sorted(G.edges(data=True)) == all_edges + assert sorted(G.edges(0, data=True)) == all_edges[:2] + assert sorted(G.edges([0, 1], data=True)) == all_edges[:4] + with pytest.raises(nx.NetworkXError): + G.edges(-1, True) + + def test_in_degree_weighted(self): + G = self.K3.copy() + G.add_edge(0, 1, weight=0.3, other=1.2) + assert sorted(G.in_degree(weight="weight")) == [(0, 2), (1, 1.3), (2, 2)] + assert dict(G.in_degree(weight="weight")) == {0: 2, 1: 1.3, 2: 2} + assert G.in_degree(1, weight="weight") == 1.3 + assert sorted(G.in_degree(weight="other")) == [(0, 2), (1, 2.2), (2, 2)] + assert dict(G.in_degree(weight="other")) == {0: 2, 1: 2.2, 2: 2} + assert G.in_degree(1, weight="other") == 2.2 + assert list(G.in_degree(iter([1]), weight="other")) == [(1, 2.2)] + + def test_out_degree_weighted(self): + G = self.K3.copy() + G.add_edge(0, 1, weight=0.3, other=1.2) + assert sorted(G.out_degree(weight="weight")) == [(0, 1.3), (1, 2), (2, 2)] + assert dict(G.out_degree(weight="weight")) == {0: 1.3, 1: 2, 2: 2} + assert G.out_degree(0, weight="weight") == 1.3 + assert sorted(G.out_degree(weight="other")) == [(0, 2.2), (1, 2), (2, 2)] + assert dict(G.out_degree(weight="other")) == {0: 2.2, 1: 2, 2: 2} + assert G.out_degree(0, weight="other") == 2.2 + assert list(G.out_degree(iter([0]), weight="other")) == [(0, 2.2)] + + +class TestDiGraph(BaseAttrDiGraphTester, _TestGraph): + """Tests specific to dict-of-dict-of-dict digraph data structure""" + + def setup_method(self): + self.Graph = nx.DiGraph + # build dict-of-dict-of-dict K3 + ed1, ed2, ed3, ed4, ed5, ed6 = ({}, {}, {}, {}, {}, {}) + self.k3adj = {0: {1: ed1, 2: ed2}, 1: {0: ed3, 2: ed4}, 2: {0: ed5, 1: ed6}} + self.k3edges = [(0, 1), (0, 2), (1, 2)] + self.k3nodes = [0, 1, 2] + self.K3 = self.Graph() + self.K3._succ = self.k3adj # K3._adj is synced with K3._succ + self.K3._pred = {0: {1: ed3, 2: ed5}, 1: {0: ed1, 2: ed6}, 2: {0: ed2, 1: ed4}} + self.K3._node = {} + self.K3._node[0] = {} + self.K3._node[1] = {} + self.K3._node[2] = {} + + ed1, ed2 = ({}, {}) + self.P3 = self.Graph() + self.P3._succ = {0: {1: ed1}, 1: {2: ed2}, 2: {}} + self.P3._pred = {0: {}, 1: {0: ed1}, 2: {1: ed2}} + # P3._adj is synced with P3._succ + self.P3._node = {} + self.P3._node[0] = {} + self.P3._node[1] = {} + self.P3._node[2] = {} + + def test_data_input(self): + G = self.Graph({1: [2], 2: [1]}, name="test") + assert G.name == "test" + assert sorted(G.adj.items()) == [(1, {2: {}}), (2, {1: {}})] + assert sorted(G.succ.items()) == [(1, {2: {}}), (2, {1: {}})] + assert sorted(G.pred.items()) == [(1, {2: {}}), (2, {1: {}})] + + def test_add_edge(self): + G = self.Graph() + G.add_edge(0, 1) + assert G.adj == {0: {1: {}}, 1: {}} + assert G.succ == {0: {1: {}}, 1: {}} + assert G.pred == {0: {}, 1: {0: {}}} + G = self.Graph() + G.add_edge(*(0, 1)) + assert G.adj == {0: {1: {}}, 1: {}} + assert G.succ == {0: {1: {}}, 1: {}} + assert G.pred == {0: {}, 1: {0: {}}} + with pytest.raises(ValueError, match="None cannot be a node"): + G.add_edge(None, 3) + + def test_add_edges_from(self): + G = self.Graph() + G.add_edges_from([(0, 1), (0, 2, {"data": 3})], data=2) + assert G.adj == {0: {1: {"data": 2}, 2: {"data": 3}}, 1: {}, 2: {}} + assert G.succ == {0: {1: {"data": 2}, 2: {"data": 3}}, 1: {}, 2: {}} + assert G.pred == {0: {}, 1: {0: {"data": 2}}, 2: {0: {"data": 3}}} + + with pytest.raises(nx.NetworkXError): + G.add_edges_from([(0,)]) # too few in tuple + with pytest.raises(nx.NetworkXError): + G.add_edges_from([(0, 1, 2, 3)]) # too many in tuple + with pytest.raises(TypeError): + G.add_edges_from([0]) # not a tuple + with pytest.raises(ValueError, match="None cannot be a node"): + G.add_edges_from([(None, 3), (3, 2)]) + + def test_remove_edge(self): + G = self.K3.copy() + G.remove_edge(0, 1) + assert G.succ == {0: {2: {}}, 1: {0: {}, 2: {}}, 2: {0: {}, 1: {}}} + assert G.pred == {0: {1: {}, 2: {}}, 1: {2: {}}, 2: {0: {}, 1: {}}} + with pytest.raises(nx.NetworkXError): + G.remove_edge(-1, 0) + + def test_remove_edges_from(self): + G = self.K3.copy() + G.remove_edges_from([(0, 1)]) + assert G.succ == {0: {2: {}}, 1: {0: {}, 2: {}}, 2: {0: {}, 1: {}}} + assert G.pred == {0: {1: {}, 2: {}}, 1: {2: {}}, 2: {0: {}, 1: {}}} + G.remove_edges_from([(0, 0)]) # silent fail + + def test_clear(self): + G = self.K3 + G.graph["name"] = "K3" + G.clear() + assert list(G.nodes) == [] + assert G.succ == {} + assert G.pred == {} + assert G.graph == {} + + def test_clear_edges(self): + G = self.K3 + G.graph["name"] = "K3" + nodes = list(G.nodes) + G.clear_edges() + assert list(G.nodes) == nodes + expected = {0: {}, 1: {}, 2: {}} + assert G.succ == expected + assert G.pred == expected + assert list(G.edges) == [] + assert G.graph["name"] == "K3" + + +class TestEdgeSubgraph(_TestGraphEdgeSubgraph): + """Unit tests for the :meth:`DiGraph.edge_subgraph` method.""" + + def setup_method(self): + # Create a doubly-linked path graph on five nodes. + G = nx.DiGraph(nx.path_graph(5)) + # Add some node, edge, and graph attributes. + for i in range(5): + G.nodes[i]["name"] = f"node{i}" + G.edges[0, 1]["name"] = "edge01" + G.edges[3, 4]["name"] = "edge34" + G.graph["name"] = "graph" + # Get the subgraph induced by the first and last edges. + self.G = G + self.H = G.edge_subgraph([(0, 1), (3, 4)]) + + def test_pred_succ(self): + """Test that nodes are added to predecessors and successors. + + For more information, see GitHub issue #2370. + + """ + G = nx.DiGraph() + G.add_edge(0, 1) + H = G.edge_subgraph([(0, 1)]) + assert list(H.predecessors(0)) == [] + assert list(H.successors(0)) == [1] + assert list(H.predecessors(1)) == [0] + assert list(H.successors(1)) == [] diff --git a/.venv/lib/python3.12/site-packages/networkx/classes/tests/test_digraph_historical.py b/.venv/lib/python3.12/site-packages/networkx/classes/tests/test_digraph_historical.py new file mode 100644 index 0000000..ff9f9e9 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/classes/tests/test_digraph_historical.py @@ -0,0 +1,110 @@ +"""Original NetworkX graph tests""" + +import pytest + +import networkx as nx + +from .historical_tests import HistoricalTests + + +class TestDiGraphHistorical(HistoricalTests): + @classmethod + def setup_class(cls): + HistoricalTests.setup_class() + cls.G = nx.DiGraph + + def test_in_degree(self): + G = self.G() + G.add_nodes_from("GJK") + G.add_edges_from([("A", "B"), ("A", "C"), ("B", "D"), ("B", "C"), ("C", "D")]) + + assert sorted(d for n, d in G.in_degree()) == [0, 0, 0, 0, 1, 2, 2] + assert dict(G.in_degree()) == { + "A": 0, + "C": 2, + "B": 1, + "D": 2, + "G": 0, + "K": 0, + "J": 0, + } + + def test_out_degree(self): + G = self.G() + G.add_nodes_from("GJK") + G.add_edges_from([("A", "B"), ("A", "C"), ("B", "D"), ("B", "C"), ("C", "D")]) + assert sorted(v for k, v in G.in_degree()) == [0, 0, 0, 0, 1, 2, 2] + assert dict(G.out_degree()) == { + "A": 2, + "C": 1, + "B": 2, + "D": 0, + "G": 0, + "K": 0, + "J": 0, + } + + def test_degree_digraph(self): + H = nx.DiGraph() + H.add_edges_from([(1, 24), (1, 2)]) + assert sorted(d for n, d in H.in_degree([1, 24])) == [0, 1] + assert sorted(d for n, d in H.out_degree([1, 24])) == [0, 2] + assert sorted(d for n, d in H.degree([1, 24])) == [1, 2] + + def test_neighbors(self): + G = self.G() + G.add_nodes_from("GJK") + G.add_edges_from([("A", "B"), ("A", "C"), ("B", "D"), ("B", "C"), ("C", "D")]) + + assert sorted(G.neighbors("C")) == ["D"] + assert sorted(G["C"]) == ["D"] + assert sorted(G.neighbors("A")) == ["B", "C"] + pytest.raises(nx.NetworkXError, G.neighbors, "j") + pytest.raises(nx.NetworkXError, G.neighbors, "j") + + def test_successors(self): + G = self.G() + G.add_nodes_from("GJK") + G.add_edges_from([("A", "B"), ("A", "C"), ("B", "D"), ("B", "C"), ("C", "D")]) + assert sorted(G.successors("A")) == ["B", "C"] + assert sorted(G.successors("A")) == ["B", "C"] + assert sorted(G.successors("G")) == [] + assert sorted(G.successors("D")) == [] + assert sorted(G.successors("G")) == [] + pytest.raises(nx.NetworkXError, G.successors, "j") + pytest.raises(nx.NetworkXError, G.successors, "j") + + def test_predecessors(self): + G = self.G() + G.add_nodes_from("GJK") + G.add_edges_from([("A", "B"), ("A", "C"), ("B", "D"), ("B", "C"), ("C", "D")]) + assert sorted(G.predecessors("C")) == ["A", "B"] + assert sorted(G.predecessors("C")) == ["A", "B"] + assert sorted(G.predecessors("G")) == [] + assert sorted(G.predecessors("A")) == [] + assert sorted(G.predecessors("G")) == [] + assert sorted(G.predecessors("A")) == [] + assert sorted(G.successors("D")) == [] + + pytest.raises(nx.NetworkXError, G.predecessors, "j") + pytest.raises(nx.NetworkXError, G.predecessors, "j") + + def test_reverse(self): + G = nx.complete_graph(10) + H = G.to_directed() + HR = H.reverse() + assert nx.is_isomorphic(H, HR) + assert sorted(H.edges()) == sorted(HR.edges()) + + def test_reverse2(self): + H = nx.DiGraph() + foo = [H.add_edge(u, u + 1) for u in range(5)] + HR = H.reverse() + for u in range(5): + assert HR.has_edge(u + 1, u) + + def test_reverse3(self): + H = nx.DiGraph() + H.add_nodes_from([1, 2, 3, 4]) + HR = H.reverse() + assert sorted(HR.nodes()) == [1, 2, 3, 4] diff --git a/.venv/lib/python3.12/site-packages/networkx/classes/tests/test_filters.py b/.venv/lib/python3.12/site-packages/networkx/classes/tests/test_filters.py new file mode 100644 index 0000000..2da5911 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/classes/tests/test_filters.py @@ -0,0 +1,177 @@ +import pytest + +import networkx as nx + + +class TestFilterFactory: + def test_no_filter(self): + nf = nx.filters.no_filter + assert nf() + assert nf(1) + assert nf(2, 1) + + def test_hide_nodes(self): + f = nx.classes.filters.hide_nodes([1, 2, 3]) + assert not f(1) + assert not f(2) + assert not f(3) + assert f(4) + assert f(0) + assert f("a") + pytest.raises(TypeError, f, 1, 2) + pytest.raises(TypeError, f) + + def test_show_nodes(self): + f = nx.classes.filters.show_nodes([1, 2, 3]) + assert f(1) + assert f(2) + assert f(3) + assert not f(4) + assert not f(0) + assert not f("a") + pytest.raises(TypeError, f, 1, 2) + pytest.raises(TypeError, f) + + def test_hide_edges(self): + factory = nx.classes.filters.hide_edges + f = factory([(1, 2), (3, 4)]) + assert not f(1, 2) + assert not f(3, 4) + assert not f(4, 3) + assert f(2, 3) + assert f(0, -1) + assert f("a", "b") + pytest.raises(TypeError, f, 1, 2, 3) + pytest.raises(TypeError, f, 1) + pytest.raises(TypeError, f) + pytest.raises(TypeError, factory, [1, 2, 3]) + pytest.raises(ValueError, factory, [(1, 2, 3)]) + + def test_show_edges(self): + factory = nx.classes.filters.show_edges + f = factory([(1, 2), (3, 4)]) + assert f(1, 2) + assert f(3, 4) + assert f(4, 3) + assert not f(2, 3) + assert not f(0, -1) + assert not f("a", "b") + pytest.raises(TypeError, f, 1, 2, 3) + pytest.raises(TypeError, f, 1) + pytest.raises(TypeError, f) + pytest.raises(TypeError, factory, [1, 2, 3]) + pytest.raises(ValueError, factory, [(1, 2, 3)]) + + def test_hide_diedges(self): + factory = nx.classes.filters.hide_diedges + f = factory([(1, 2), (3, 4)]) + assert not f(1, 2) + assert not f(3, 4) + assert f(4, 3) + assert f(2, 3) + assert f(0, -1) + assert f("a", "b") + pytest.raises(TypeError, f, 1, 2, 3) + pytest.raises(TypeError, f, 1) + pytest.raises(TypeError, f) + pytest.raises(TypeError, factory, [1, 2, 3]) + pytest.raises(ValueError, factory, [(1, 2, 3)]) + + def test_show_diedges(self): + factory = nx.classes.filters.show_diedges + f = factory([(1, 2), (3, 4)]) + assert f(1, 2) + assert f(3, 4) + assert not f(4, 3) + assert not f(2, 3) + assert not f(0, -1) + assert not f("a", "b") + pytest.raises(TypeError, f, 1, 2, 3) + pytest.raises(TypeError, f, 1) + pytest.raises(TypeError, f) + pytest.raises(TypeError, factory, [1, 2, 3]) + pytest.raises(ValueError, factory, [(1, 2, 3)]) + + def test_hide_multiedges(self): + factory = nx.classes.filters.hide_multiedges + f = factory([(1, 2, 0), (3, 4, 1), (1, 2, 1)]) + assert not f(1, 2, 0) + assert not f(1, 2, 1) + assert f(1, 2, 2) + assert f(3, 4, 0) + assert not f(3, 4, 1) + assert not f(4, 3, 1) + assert f(4, 3, 0) + assert f(2, 3, 0) + assert f(0, -1, 0) + assert f("a", "b", 0) + pytest.raises(TypeError, f, 1, 2, 3, 4) + pytest.raises(TypeError, f, 1, 2) + pytest.raises(TypeError, f, 1) + pytest.raises(TypeError, f) + pytest.raises(TypeError, factory, [1, 2, 3]) + pytest.raises(ValueError, factory, [(1, 2)]) + pytest.raises(ValueError, factory, [(1, 2, 3, 4)]) + + def test_show_multiedges(self): + factory = nx.classes.filters.show_multiedges + f = factory([(1, 2, 0), (3, 4, 1), (1, 2, 1)]) + assert f(1, 2, 0) + assert f(1, 2, 1) + assert not f(1, 2, 2) + assert not f(3, 4, 0) + assert f(3, 4, 1) + assert f(4, 3, 1) + assert not f(4, 3, 0) + assert not f(2, 3, 0) + assert not f(0, -1, 0) + assert not f("a", "b", 0) + pytest.raises(TypeError, f, 1, 2, 3, 4) + pytest.raises(TypeError, f, 1, 2) + pytest.raises(TypeError, f, 1) + pytest.raises(TypeError, f) + pytest.raises(TypeError, factory, [1, 2, 3]) + pytest.raises(ValueError, factory, [(1, 2)]) + pytest.raises(ValueError, factory, [(1, 2, 3, 4)]) + + def test_hide_multidiedges(self): + factory = nx.classes.filters.hide_multidiedges + f = factory([(1, 2, 0), (3, 4, 1), (1, 2, 1)]) + assert not f(1, 2, 0) + assert not f(1, 2, 1) + assert f(1, 2, 2) + assert f(3, 4, 0) + assert not f(3, 4, 1) + assert f(4, 3, 1) + assert f(4, 3, 0) + assert f(2, 3, 0) + assert f(0, -1, 0) + assert f("a", "b", 0) + pytest.raises(TypeError, f, 1, 2, 3, 4) + pytest.raises(TypeError, f, 1, 2) + pytest.raises(TypeError, f, 1) + pytest.raises(TypeError, f) + pytest.raises(TypeError, factory, [1, 2, 3]) + pytest.raises(ValueError, factory, [(1, 2)]) + pytest.raises(ValueError, factory, [(1, 2, 3, 4)]) + + def test_show_multidiedges(self): + factory = nx.classes.filters.show_multidiedges + f = factory([(1, 2, 0), (3, 4, 1), (1, 2, 1)]) + assert f(1, 2, 0) + assert f(1, 2, 1) + assert not f(1, 2, 2) + assert not f(3, 4, 0) + assert f(3, 4, 1) + assert not f(4, 3, 1) + assert not f(4, 3, 0) + assert not f(2, 3, 0) + assert not f(0, -1, 0) + assert not f("a", "b", 0) + pytest.raises(TypeError, f, 1, 2, 3, 4) + pytest.raises(TypeError, f, 1, 2) + pytest.raises(TypeError, f, 1) + pytest.raises(TypeError, f) + pytest.raises(TypeError, factory, [1, 2, 3]) + pytest.raises(ValueError, factory, [(1, 2)]) + pytest.raises(ValueError, factory, [(1, 2, 3, 4)]) diff --git a/.venv/lib/python3.12/site-packages/networkx/classes/tests/test_function.py b/.venv/lib/python3.12/site-packages/networkx/classes/tests/test_function.py new file mode 100644 index 0000000..9ace420 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/classes/tests/test_function.py @@ -0,0 +1,1035 @@ +import random + +import pytest + +import networkx as nx +from networkx.utils import edges_equal, nodes_equal + + +def test_degree_histogram_empty(): + G = nx.Graph() + assert nx.degree_histogram(G) == [] + + +class TestFunction: + def setup_method(self): + self.G = nx.Graph({0: [1, 2, 3], 1: [1, 2, 0], 4: []}, name="Test") + self.Gdegree = {0: 3, 1: 2, 2: 2, 3: 1, 4: 0} + self.Gnodes = list(range(5)) + self.Gedges = [(0, 1), (0, 2), (0, 3), (1, 0), (1, 1), (1, 2)] + self.DG = nx.DiGraph({0: [1, 2, 3], 1: [1, 2, 0], 4: []}) + self.DGin_degree = {0: 1, 1: 2, 2: 2, 3: 1, 4: 0} + self.DGout_degree = {0: 3, 1: 3, 2: 0, 3: 0, 4: 0} + self.DGnodes = list(range(5)) + self.DGedges = [(0, 1), (0, 2), (0, 3), (1, 0), (1, 1), (1, 2)] + + def test_nodes(self): + assert nodes_equal(self.G.nodes(), list(nx.nodes(self.G))) + assert nodes_equal(self.DG.nodes(), list(nx.nodes(self.DG))) + + def test_edges(self): + assert edges_equal(self.G.edges(), list(nx.edges(self.G))) + assert sorted(self.DG.edges()) == sorted(nx.edges(self.DG)) + assert edges_equal( + self.G.edges(nbunch=[0, 1, 3]), list(nx.edges(self.G, nbunch=[0, 1, 3])) + ) + assert sorted(self.DG.edges(nbunch=[0, 1, 3])) == sorted( + nx.edges(self.DG, nbunch=[0, 1, 3]) + ) + + def test_degree(self): + assert edges_equal(self.G.degree(), list(nx.degree(self.G))) + assert sorted(self.DG.degree()) == sorted(nx.degree(self.DG)) + assert edges_equal( + self.G.degree(nbunch=[0, 1]), list(nx.degree(self.G, nbunch=[0, 1])) + ) + assert sorted(self.DG.degree(nbunch=[0, 1])) == sorted( + nx.degree(self.DG, nbunch=[0, 1]) + ) + assert edges_equal( + self.G.degree(weight="weight"), list(nx.degree(self.G, weight="weight")) + ) + assert sorted(self.DG.degree(weight="weight")) == sorted( + nx.degree(self.DG, weight="weight") + ) + + def test_neighbors(self): + assert list(self.G.neighbors(1)) == list(nx.neighbors(self.G, 1)) + assert list(self.DG.neighbors(1)) == list(nx.neighbors(self.DG, 1)) + + def test_number_of_nodes(self): + assert self.G.number_of_nodes() == nx.number_of_nodes(self.G) + assert self.DG.number_of_nodes() == nx.number_of_nodes(self.DG) + + def test_number_of_edges(self): + assert self.G.number_of_edges() == nx.number_of_edges(self.G) + assert self.DG.number_of_edges() == nx.number_of_edges(self.DG) + + def test_is_directed(self): + assert self.G.is_directed() == nx.is_directed(self.G) + assert self.DG.is_directed() == nx.is_directed(self.DG) + + def test_add_star(self): + G = self.G.copy() + nlist = [12, 13, 14, 15] + nx.add_star(G, nlist) + assert edges_equal(G.edges(nlist), [(12, 13), (12, 14), (12, 15)]) + + G = self.G.copy() + nx.add_star(G, nlist, weight=2.0) + assert edges_equal( + G.edges(nlist, data=True), + [ + (12, 13, {"weight": 2.0}), + (12, 14, {"weight": 2.0}), + (12, 15, {"weight": 2.0}), + ], + ) + + G = self.G.copy() + nlist = [12] + nx.add_star(G, nlist) + assert nodes_equal(G, list(self.G) + nlist) + + G = self.G.copy() + nlist = [] + nx.add_star(G, nlist) + assert nodes_equal(G.nodes, self.Gnodes) + assert edges_equal(G.edges, self.G.edges) + + def test_add_path(self): + G = self.G.copy() + nlist = [12, 13, 14, 15] + nx.add_path(G, nlist) + assert edges_equal(G.edges(nlist), [(12, 13), (13, 14), (14, 15)]) + G = self.G.copy() + nx.add_path(G, nlist, weight=2.0) + assert edges_equal( + G.edges(nlist, data=True), + [ + (12, 13, {"weight": 2.0}), + (13, 14, {"weight": 2.0}), + (14, 15, {"weight": 2.0}), + ], + ) + + G = self.G.copy() + nlist = ["node"] + nx.add_path(G, nlist) + assert edges_equal(G.edges(nlist), []) + assert nodes_equal(G, list(self.G) + ["node"]) + + G = self.G.copy() + nlist = iter(["node"]) + nx.add_path(G, nlist) + assert edges_equal(G.edges(["node"]), []) + assert nodes_equal(G, list(self.G) + ["node"]) + + G = self.G.copy() + nlist = [12] + nx.add_path(G, nlist) + assert edges_equal(G.edges(nlist), []) + assert nodes_equal(G, list(self.G) + [12]) + + G = self.G.copy() + nlist = iter([12]) + nx.add_path(G, nlist) + assert edges_equal(G.edges([12]), []) + assert nodes_equal(G, list(self.G) + [12]) + + G = self.G.copy() + nlist = [] + nx.add_path(G, nlist) + assert edges_equal(G.edges, self.G.edges) + assert nodes_equal(G, list(self.G)) + + G = self.G.copy() + nlist = iter([]) + nx.add_path(G, nlist) + assert edges_equal(G.edges, self.G.edges) + assert nodes_equal(G, list(self.G)) + + def test_add_cycle(self): + G = self.G.copy() + nlist = [12, 13, 14, 15] + oklists = [ + [(12, 13), (12, 15), (13, 14), (14, 15)], + [(12, 13), (13, 14), (14, 15), (15, 12)], + ] + nx.add_cycle(G, nlist) + assert sorted(G.edges(nlist)) in oklists + G = self.G.copy() + oklists = [ + [ + (12, 13, {"weight": 1.0}), + (12, 15, {"weight": 1.0}), + (13, 14, {"weight": 1.0}), + (14, 15, {"weight": 1.0}), + ], + [ + (12, 13, {"weight": 1.0}), + (13, 14, {"weight": 1.0}), + (14, 15, {"weight": 1.0}), + (15, 12, {"weight": 1.0}), + ], + ] + nx.add_cycle(G, nlist, weight=1.0) + assert sorted(G.edges(nlist, data=True)) in oklists + + G = self.G.copy() + nlist = [12] + nx.add_cycle(G, nlist) + assert nodes_equal(G, list(self.G) + nlist) + + G = self.G.copy() + nlist = [] + nx.add_cycle(G, nlist) + assert nodes_equal(G.nodes, self.Gnodes) + assert edges_equal(G.edges, self.G.edges) + + def test_subgraph(self): + assert ( + self.G.subgraph([0, 1, 2, 4]).adj == nx.subgraph(self.G, [0, 1, 2, 4]).adj + ) + assert ( + self.DG.subgraph([0, 1, 2, 4]).adj == nx.subgraph(self.DG, [0, 1, 2, 4]).adj + ) + assert ( + self.G.subgraph([0, 1, 2, 4]).adj + == nx.induced_subgraph(self.G, [0, 1, 2, 4]).adj + ) + assert ( + self.DG.subgraph([0, 1, 2, 4]).adj + == nx.induced_subgraph(self.DG, [0, 1, 2, 4]).adj + ) + # subgraph-subgraph chain is allowed in function interface + H = nx.induced_subgraph(self.G.subgraph([0, 1, 2, 4]), [0, 1, 4]) + assert H._graph is not self.G + assert H.adj == self.G.subgraph([0, 1, 4]).adj + + def test_edge_subgraph(self): + assert ( + self.G.edge_subgraph([(1, 2), (0, 3)]).adj + == nx.edge_subgraph(self.G, [(1, 2), (0, 3)]).adj + ) + assert ( + self.DG.edge_subgraph([(1, 2), (0, 3)]).adj + == nx.edge_subgraph(self.DG, [(1, 2), (0, 3)]).adj + ) + + def test_create_empty_copy(self): + G = nx.create_empty_copy(self.G, with_data=False) + assert nodes_equal(G, list(self.G)) + assert G.graph == {} + assert G._node == {}.fromkeys(self.G.nodes(), {}) + assert G._adj == {}.fromkeys(self.G.nodes(), {}) + G = nx.create_empty_copy(self.G) + assert nodes_equal(G, list(self.G)) + assert G.graph == self.G.graph + assert G._node == self.G._node + assert G._adj == {}.fromkeys(self.G.nodes(), {}) + + def test_degree_histogram(self): + assert nx.degree_histogram(self.G) == [1, 1, 1, 1, 1] + + def test_density(self): + assert nx.density(self.G) == 0.5 + assert nx.density(self.DG) == 0.3 + G = nx.Graph() + G.add_node(1) + assert nx.density(G) == 0.0 + + def test_density_selfloop(self): + G = nx.Graph() + G.add_edge(1, 1) + assert nx.density(G) == 0.0 + G.add_edge(1, 2) + assert nx.density(G) == 2.0 + + def test_freeze(self): + G = nx.freeze(self.G) + assert G.frozen + pytest.raises(nx.NetworkXError, G.add_node, 1) + pytest.raises(nx.NetworkXError, G.add_nodes_from, [1]) + pytest.raises(nx.NetworkXError, G.remove_node, 1) + pytest.raises(nx.NetworkXError, G.remove_nodes_from, [1]) + pytest.raises(nx.NetworkXError, G.add_edge, 1, 2) + pytest.raises(nx.NetworkXError, G.add_edges_from, [(1, 2)]) + pytest.raises(nx.NetworkXError, G.remove_edge, 1, 2) + pytest.raises(nx.NetworkXError, G.remove_edges_from, [(1, 2)]) + pytest.raises(nx.NetworkXError, G.clear_edges) + pytest.raises(nx.NetworkXError, G.clear) + + def test_is_frozen(self): + assert not nx.is_frozen(self.G) + G = nx.freeze(self.G) + assert G.frozen == nx.is_frozen(self.G) + assert G.frozen + + def test_node_attributes_are_still_mutable_on_frozen_graph(self): + G = nx.freeze(nx.path_graph(3)) + node = G.nodes[0] + node["node_attribute"] = True + assert node["node_attribute"] is True + + def test_edge_attributes_are_still_mutable_on_frozen_graph(self): + G = nx.freeze(nx.path_graph(3)) + edge = G.edges[(0, 1)] + edge["edge_attribute"] = True + assert edge["edge_attribute"] is True + + def test_neighbors_complete_graph(self): + graph = nx.complete_graph(100) + pop = random.sample(list(graph), 1) + nbors = list(nx.neighbors(graph, pop[0])) + # should be all the other vertices in the graph + assert len(nbors) == len(graph) - 1 + + graph = nx.path_graph(100) + node = random.sample(list(graph), 1)[0] + nbors = list(nx.neighbors(graph, node)) + # should be all the other vertices in the graph + if node != 0 and node != 99: + assert len(nbors) == 2 + else: + assert len(nbors) == 1 + + # create a star graph with 99 outer nodes + graph = nx.star_graph(99) + nbors = list(nx.neighbors(graph, 0)) + assert len(nbors) == 99 + + def test_non_neighbors(self): + graph = nx.complete_graph(100) + pop = random.sample(list(graph), 1) + nbors = nx.non_neighbors(graph, pop[0]) + # should be all the other vertices in the graph + assert len(nbors) == 0 + + graph = nx.path_graph(100) + node = random.sample(list(graph), 1)[0] + nbors = nx.non_neighbors(graph, node) + # should be all the other vertices in the graph + if node != 0 and node != 99: + assert len(nbors) == 97 + else: + assert len(nbors) == 98 + + # create a star graph with 99 outer nodes + graph = nx.star_graph(99) + nbors = nx.non_neighbors(graph, 0) + assert len(nbors) == 0 + + # disconnected graph + graph = nx.Graph() + graph.add_nodes_from(range(10)) + nbors = nx.non_neighbors(graph, 0) + assert len(nbors) == 9 + + def test_non_edges(self): + # All possible edges exist + graph = nx.complete_graph(5) + nedges = list(nx.non_edges(graph)) + assert len(nedges) == 0 + + graph = nx.path_graph(4) + expected = [(0, 2), (0, 3), (1, 3)] + nedges = list(nx.non_edges(graph)) + for u, v in expected: + assert (u, v) in nedges or (v, u) in nedges + + graph = nx.star_graph(4) + expected = [(1, 2), (1, 3), (1, 4), (2, 3), (2, 4), (3, 4)] + nedges = list(nx.non_edges(graph)) + for u, v in expected: + assert (u, v) in nedges or (v, u) in nedges + + # Directed graphs + graph = nx.DiGraph() + graph.add_edges_from([(0, 2), (2, 0), (2, 1)]) + expected = [(0, 1), (1, 0), (1, 2)] + nedges = list(nx.non_edges(graph)) + for e in expected: + assert e in nedges + + def test_is_weighted(self): + G = nx.Graph() + assert not nx.is_weighted(G) + + G = nx.path_graph(4) + assert not nx.is_weighted(G) + assert not nx.is_weighted(G, (2, 3)) + + G.add_node(4) + G.add_edge(3, 4, weight=4) + assert not nx.is_weighted(G) + assert nx.is_weighted(G, (3, 4)) + + G = nx.DiGraph() + G.add_weighted_edges_from( + [ + ("0", "3", 3), + ("0", "1", -5), + ("1", "0", -5), + ("0", "2", 2), + ("1", "2", 4), + ("2", "3", 1), + ] + ) + assert nx.is_weighted(G) + assert nx.is_weighted(G, ("1", "0")) + + G = G.to_undirected() + assert nx.is_weighted(G) + assert nx.is_weighted(G, ("1", "0")) + + pytest.raises(nx.NetworkXError, nx.is_weighted, G, (1, 2)) + + def test_is_negatively_weighted(self): + G = nx.Graph() + assert not nx.is_negatively_weighted(G) + + G.add_node(1) + G.add_nodes_from([2, 3, 4, 5]) + assert not nx.is_negatively_weighted(G) + + G.add_edge(1, 2, weight=4) + assert not nx.is_negatively_weighted(G, (1, 2)) + + G.add_edges_from([(1, 3), (2, 4), (2, 6)]) + G[1][3]["color"] = "blue" + assert not nx.is_negatively_weighted(G) + assert not nx.is_negatively_weighted(G, (1, 3)) + + G[2][4]["weight"] = -2 + assert nx.is_negatively_weighted(G, (2, 4)) + assert nx.is_negatively_weighted(G) + + G = nx.DiGraph() + G.add_weighted_edges_from( + [ + ("0", "3", 3), + ("0", "1", -5), + ("1", "0", -2), + ("0", "2", 2), + ("1", "2", -3), + ("2", "3", 1), + ] + ) + assert nx.is_negatively_weighted(G) + assert not nx.is_negatively_weighted(G, ("0", "3")) + assert nx.is_negatively_weighted(G, ("1", "0")) + + pytest.raises(nx.NetworkXError, nx.is_negatively_weighted, G, (1, 4)) + + +class TestCommonNeighbors: + @classmethod + def setup_class(cls): + cls.func = staticmethod(nx.common_neighbors) + + def test_func(G, u, v, expected): + result = sorted(cls.func(G, u, v)) + assert result == expected + + cls.test = staticmethod(test_func) + + def test_K5(self): + G = nx.complete_graph(5) + self.test(G, 0, 1, [2, 3, 4]) + + def test_P3(self): + G = nx.path_graph(3) + self.test(G, 0, 2, [1]) + + def test_S4(self): + G = nx.star_graph(4) + self.test(G, 1, 2, [0]) + + def test_digraph(self): + with pytest.raises(nx.NetworkXNotImplemented): + G = nx.DiGraph() + G.add_edges_from([(0, 1), (1, 2)]) + self.func(G, 0, 2) + + def test_nonexistent_nodes(self): + G = nx.complete_graph(5) + pytest.raises(nx.NetworkXError, nx.common_neighbors, G, 5, 4) + pytest.raises(nx.NetworkXError, nx.common_neighbors, G, 4, 5) + pytest.raises(nx.NetworkXError, nx.common_neighbors, G, 5, 6) + + def test_custom1(self): + """Case of no common neighbors.""" + G = nx.Graph() + G.add_nodes_from([0, 1]) + self.test(G, 0, 1, []) + + def test_custom2(self): + """Case of equal nodes.""" + G = nx.complete_graph(4) + self.test(G, 0, 0, [1, 2, 3]) + + +@pytest.mark.parametrize( + "graph_type", (nx.Graph, nx.DiGraph, nx.MultiGraph, nx.MultiDiGraph) +) +def test_set_node_attributes(graph_type): + # Test single value + G = nx.path_graph(3, create_using=graph_type) + vals = 100 + attr = "hello" + nx.set_node_attributes(G, vals, attr) + assert G.nodes[0][attr] == vals + assert G.nodes[1][attr] == vals + assert G.nodes[2][attr] == vals + + # Test dictionary + G = nx.path_graph(3, create_using=graph_type) + vals = dict(zip(sorted(G.nodes()), range(len(G)))) + attr = "hi" + nx.set_node_attributes(G, vals, attr) + assert G.nodes[0][attr] == 0 + assert G.nodes[1][attr] == 1 + assert G.nodes[2][attr] == 2 + + # Test dictionary of dictionaries + G = nx.path_graph(3, create_using=graph_type) + d = {"hi": 0, "hello": 200} + vals = dict.fromkeys(G.nodes(), d) + vals.pop(0) + nx.set_node_attributes(G, vals) + assert G.nodes[0] == {} + assert G.nodes[1]["hi"] == 0 + assert G.nodes[2]["hello"] == 200 + + +@pytest.mark.parametrize( + ("values", "name"), + ( + ({0: "red", 1: "blue"}, "color"), # values dictionary + ({0: {"color": "red"}, 1: {"color": "blue"}}, None), # dict-of-dict + ), +) +def test_set_node_attributes_ignores_extra_nodes(values, name): + """ + When `values` is a dict or dict-of-dict keyed by nodes, ensure that keys + that correspond to nodes not in G are ignored. + """ + G = nx.Graph() + G.add_node(0) + nx.set_node_attributes(G, values, name) + assert G.nodes[0]["color"] == "red" + assert 1 not in G.nodes + + +@pytest.mark.parametrize("graph_type", (nx.Graph, nx.DiGraph)) +def test_set_edge_attributes(graph_type): + # Test single value + G = nx.path_graph(3, create_using=graph_type) + attr = "hello" + vals = 3 + nx.set_edge_attributes(G, vals, attr) + assert G[0][1][attr] == vals + assert G[1][2][attr] == vals + + # Test multiple values + G = nx.path_graph(3, create_using=graph_type) + attr = "hi" + edges = [(0, 1), (1, 2)] + vals = dict(zip(edges, range(len(edges)))) + nx.set_edge_attributes(G, vals, attr) + assert G[0][1][attr] == 0 + assert G[1][2][attr] == 1 + + # Test dictionary of dictionaries + G = nx.path_graph(3, create_using=graph_type) + d = {"hi": 0, "hello": 200} + edges = [(0, 1)] + vals = dict.fromkeys(edges, d) + nx.set_edge_attributes(G, vals) + assert G[0][1]["hi"] == 0 + assert G[0][1]["hello"] == 200 + assert G[1][2] == {} + + +@pytest.mark.parametrize( + ("values", "name"), + ( + ({(0, 1): 1.0, (0, 2): 2.0}, "weight"), # values dict + ({(0, 1): {"weight": 1.0}, (0, 2): {"weight": 2.0}}, None), # values dod + ), +) +def test_set_edge_attributes_ignores_extra_edges(values, name): + """If `values` is a dict or dict-of-dicts containing edges that are not in + G, data associate with these edges should be ignored. + """ + G = nx.Graph([(0, 1)]) + nx.set_edge_attributes(G, values, name) + assert G[0][1]["weight"] == 1.0 + assert (0, 2) not in G.edges + + +@pytest.mark.parametrize("graph_type", (nx.MultiGraph, nx.MultiDiGraph)) +def test_set_edge_attributes_multi(graph_type): + # Test single value + G = nx.path_graph(3, create_using=graph_type) + attr = "hello" + vals = 3 + nx.set_edge_attributes(G, vals, attr) + assert G[0][1][0][attr] == vals + assert G[1][2][0][attr] == vals + + # Test multiple values + G = nx.path_graph(3, create_using=graph_type) + attr = "hi" + edges = [(0, 1, 0), (1, 2, 0)] + vals = dict(zip(edges, range(len(edges)))) + nx.set_edge_attributes(G, vals, attr) + assert G[0][1][0][attr] == 0 + assert G[1][2][0][attr] == 1 + + # Test dictionary of dictionaries + G = nx.path_graph(3, create_using=graph_type) + d = {"hi": 0, "hello": 200} + edges = [(0, 1, 0)] + vals = dict.fromkeys(edges, d) + nx.set_edge_attributes(G, vals) + assert G[0][1][0]["hi"] == 0 + assert G[0][1][0]["hello"] == 200 + assert G[1][2][0] == {} + + +@pytest.mark.parametrize( + ("values", "name"), + ( + ({(0, 1, 0): 1.0, (0, 2, 0): 2.0}, "weight"), # values dict + ({(0, 1, 0): {"weight": 1.0}, (0, 2, 0): {"weight": 2.0}}, None), # values dod + ), +) +def test_set_edge_attributes_multi_ignores_extra_edges(values, name): + """If `values` is a dict or dict-of-dicts containing edges that are not in + G, data associate with these edges should be ignored. + """ + G = nx.MultiGraph([(0, 1, 0), (0, 1, 1)]) + nx.set_edge_attributes(G, values, name) + assert G[0][1][0]["weight"] == 1.0 + assert G[0][1][1] == {} + assert (0, 2) not in G.edges() + + +def test_get_node_attributes(): + graphs = [nx.Graph(), nx.DiGraph(), nx.MultiGraph(), nx.MultiDiGraph()] + for G in graphs: + G = nx.path_graph(3, create_using=G) + attr = "hello" + vals = 100 + nx.set_node_attributes(G, vals, attr) + attrs = nx.get_node_attributes(G, attr) + assert attrs[0] == vals + assert attrs[1] == vals + assert attrs[2] == vals + default_val = 1 + G.add_node(4) + attrs = nx.get_node_attributes(G, attr, default=default_val) + assert attrs[4] == default_val + + +def test_get_edge_attributes(): + graphs = [nx.Graph(), nx.DiGraph(), nx.MultiGraph(), nx.MultiDiGraph()] + for G in graphs: + G = nx.path_graph(3, create_using=G) + attr = "hello" + vals = 100 + nx.set_edge_attributes(G, vals, attr) + attrs = nx.get_edge_attributes(G, attr) + assert len(attrs) == 2 + + for edge in G.edges: + assert attrs[edge] == vals + + default_val = vals + G.add_edge(4, 5) + deafult_attrs = nx.get_edge_attributes(G, attr, default=default_val) + assert len(deafult_attrs) == 3 + + for edge in G.edges: + assert deafult_attrs[edge] == vals + + +@pytest.mark.parametrize( + "graph_type", (nx.Graph, nx.DiGraph, nx.MultiGraph, nx.MultiDiGraph) +) +def test_remove_node_attributes(graph_type): + # Test removing single attribute + G = nx.path_graph(3, create_using=graph_type) + vals = 100 + attr = "hello" + nx.set_node_attributes(G, vals, attr) + nx.remove_node_attributes(G, attr) + assert attr not in G.nodes[0] + assert attr not in G.nodes[1] + assert attr not in G.nodes[2] + + # Test removing single attribute when multiple present + G = nx.path_graph(3, create_using=graph_type) + other_vals = 200 + other_attr = "other" + nx.set_node_attributes(G, vals, attr) + nx.set_node_attributes(G, other_vals, other_attr) + nx.remove_node_attributes(G, attr) + assert attr not in G.nodes[0] + assert G.nodes[0][other_attr] == other_vals + assert attr not in G.nodes[1] + assert G.nodes[1][other_attr] == other_vals + assert attr not in G.nodes[2] + assert G.nodes[2][other_attr] == other_vals + + # Test removing multiple attributes + G = nx.path_graph(3, create_using=graph_type) + nx.set_node_attributes(G, vals, attr) + nx.set_node_attributes(G, other_vals, other_attr) + nx.remove_node_attributes(G, attr, other_attr) + assert attr not in G.nodes[0] and other_attr not in G.nodes[0] + assert attr not in G.nodes[1] and other_attr not in G.nodes[1] + assert attr not in G.nodes[2] and other_attr not in G.nodes[2] + + # Test removing multiple (but not all) attributes + G = nx.path_graph(3, create_using=graph_type) + third_vals = 300 + third_attr = "three" + nx.set_node_attributes( + G, + { + n: {attr: vals, other_attr: other_vals, third_attr: third_vals} + for n in G.nodes() + }, + ) + nx.remove_node_attributes(G, other_attr, third_attr) + assert other_attr not in G.nodes[0] and third_attr not in G.nodes[0] + assert other_attr not in G.nodes[1] and third_attr not in G.nodes[1] + assert other_attr not in G.nodes[2] and third_attr not in G.nodes[2] + assert G.nodes[0][attr] == vals + assert G.nodes[1][attr] == vals + assert G.nodes[2][attr] == vals + + # Test incomplete node attributes + G = nx.path_graph(3, create_using=graph_type) + nx.set_node_attributes( + G, + { + 1: {attr: vals, other_attr: other_vals}, + 2: {attr: vals, other_attr: other_vals}, + }, + ) + nx.remove_node_attributes(G, attr) + assert attr not in G.nodes[0] + assert attr not in G.nodes[1] + assert attr not in G.nodes[2] + assert G.nodes[1][other_attr] == other_vals + assert G.nodes[2][other_attr] == other_vals + + # Test removing on a subset of nodes + G = nx.path_graph(3, create_using=graph_type) + nx.set_node_attributes( + G, + { + n: {attr: vals, other_attr: other_vals, third_attr: third_vals} + for n in G.nodes() + }, + ) + nx.remove_node_attributes(G, attr, other_attr, nbunch=[0, 1]) + assert attr not in G.nodes[0] and other_attr not in G.nodes[0] + assert attr not in G.nodes[1] and other_attr not in G.nodes[1] + assert attr in G.nodes[2] and other_attr in G.nodes[2] + assert third_attr in G.nodes[0] and G.nodes[0][third_attr] == third_vals + assert third_attr in G.nodes[1] and G.nodes[1][third_attr] == third_vals + + +@pytest.mark.parametrize("graph_type", (nx.Graph, nx.DiGraph)) +def test_remove_edge_attributes(graph_type): + # Test removing single attribute + G = nx.path_graph(3, create_using=graph_type) + attr = "hello" + vals = 100 + nx.set_edge_attributes(G, vals, attr) + nx.remove_edge_attributes(G, attr) + assert len(nx.get_edge_attributes(G, attr)) == 0 + + # Test removing only some attributes + G = nx.path_graph(3, create_using=graph_type) + other_attr = "other" + other_vals = 200 + nx.set_edge_attributes(G, vals, attr) + nx.set_edge_attributes(G, other_vals, other_attr) + nx.remove_edge_attributes(G, attr) + + assert attr not in G[0][1] + assert attr not in G[1][2] + assert G[0][1][other_attr] == 200 + assert G[1][2][other_attr] == 200 + + # Test removing multiple attributes + G = nx.path_graph(3, create_using=graph_type) + nx.set_edge_attributes(G, vals, attr) + nx.set_edge_attributes(G, other_vals, other_attr) + nx.remove_edge_attributes(G, attr, other_attr) + assert attr not in G[0][1] and other_attr not in G[0][1] + assert attr not in G[1][2] and other_attr not in G[1][2] + + # Test removing multiple (not all) attributes + G = nx.path_graph(3, create_using=graph_type) + third_attr = "third" + third_vals = 300 + nx.set_edge_attributes( + G, + { + (u, v): {attr: vals, other_attr: other_vals, third_attr: third_vals} + for u, v in G.edges() + }, + ) + nx.remove_edge_attributes(G, other_attr, third_attr) + assert other_attr not in G[0][1] and third_attr not in G[0][1] + assert other_attr not in G[1][2] and third_attr not in G[1][2] + assert G[0][1][attr] == vals + assert G[1][2][attr] == vals + + # Test removing incomplete edge attributes + G = nx.path_graph(3, create_using=graph_type) + nx.set_edge_attributes(G, {(0, 1): {attr: vals, other_attr: other_vals}}) + nx.remove_edge_attributes(G, other_attr) + assert other_attr not in G[0][1] and G[0][1][attr] == vals + assert other_attr not in G[1][2] + + # Test removing subset of edge attributes + G = nx.path_graph(3, create_using=graph_type) + nx.set_edge_attributes( + G, + { + (u, v): {attr: vals, other_attr: other_vals, third_attr: third_vals} + for u, v in G.edges() + }, + ) + nx.remove_edge_attributes(G, other_attr, third_attr, ebunch=[(0, 1)]) + assert other_attr not in G[0][1] and third_attr not in G[0][1] + assert other_attr in G[1][2] and third_attr in G[1][2] + + +@pytest.mark.parametrize("graph_type", (nx.MultiGraph, nx.MultiDiGraph)) +def test_remove_multi_edge_attributes(graph_type): + # Test removing single attribute + G = nx.path_graph(3, create_using=graph_type) + G.add_edge(1, 2) + attr = "hello" + vals = 100 + nx.set_edge_attributes(G, vals, attr) + nx.remove_edge_attributes(G, attr) + assert attr not in G[0][1][0] + assert attr not in G[1][2][0] + assert attr not in G[1][2][1] + + # Test removing only some attributes + G = nx.path_graph(3, create_using=graph_type) + G.add_edge(1, 2) + other_attr = "other" + other_vals = 200 + nx.set_edge_attributes(G, vals, attr) + nx.set_edge_attributes(G, other_vals, other_attr) + nx.remove_edge_attributes(G, attr) + assert attr not in G[0][1][0] + assert attr not in G[1][2][0] + assert attr not in G[1][2][1] + assert G[0][1][0][other_attr] == other_vals + assert G[1][2][0][other_attr] == other_vals + assert G[1][2][1][other_attr] == other_vals + + # Test removing multiple attributes + G = nx.path_graph(3, create_using=graph_type) + G.add_edge(1, 2) + nx.set_edge_attributes(G, vals, attr) + nx.set_edge_attributes(G, other_vals, other_attr) + nx.remove_edge_attributes(G, attr, other_attr) + assert attr not in G[0][1][0] and other_attr not in G[0][1][0] + assert attr not in G[1][2][0] and other_attr not in G[1][2][0] + assert attr not in G[1][2][1] and other_attr not in G[1][2][1] + + # Test removing multiple (not all) attributes + G = nx.path_graph(3, create_using=graph_type) + G.add_edge(1, 2) + third_attr = "third" + third_vals = 300 + nx.set_edge_attributes( + G, + { + (u, v, k): {attr: vals, other_attr: other_vals, third_attr: third_vals} + for u, v, k in G.edges(keys=True) + }, + ) + nx.remove_edge_attributes(G, other_attr, third_attr) + assert other_attr not in G[0][1][0] and third_attr not in G[0][1][0] + assert other_attr not in G[1][2][0] and other_attr not in G[1][2][0] + assert other_attr not in G[1][2][1] and other_attr not in G[1][2][1] + assert G[0][1][0][attr] == vals + assert G[1][2][0][attr] == vals + assert G[1][2][1][attr] == vals + + # Test removing incomplete edge attributes + G = nx.path_graph(3, create_using=graph_type) + G.add_edge(1, 2) + nx.set_edge_attributes( + G, + { + (0, 1, 0): {attr: vals, other_attr: other_vals}, + (1, 2, 1): {attr: vals, other_attr: other_vals}, + }, + ) + nx.remove_edge_attributes(G, other_attr) + assert other_attr not in G[0][1][0] and G[0][1][0][attr] == vals + assert other_attr not in G[1][2][0] + assert other_attr not in G[1][2][1] + + # Test removing subset of edge attributes + G = nx.path_graph(3, create_using=graph_type) + G.add_edge(1, 2) + nx.set_edge_attributes( + G, + { + (0, 1, 0): {attr: vals, other_attr: other_vals}, + (1, 2, 0): {attr: vals, other_attr: other_vals}, + (1, 2, 1): {attr: vals, other_attr: other_vals}, + }, + ) + nx.remove_edge_attributes(G, attr, ebunch=[(0, 1, 0), (1, 2, 0)]) + assert attr not in G[0][1][0] and other_attr in G[0][1][0] + assert attr not in G[1][2][0] and other_attr in G[1][2][0] + assert attr in G[1][2][1] and other_attr in G[1][2][1] + + +def test_is_empty(): + graphs = [nx.Graph(), nx.DiGraph(), nx.MultiGraph(), nx.MultiDiGraph()] + for G in graphs: + assert nx.is_empty(G) + G.add_nodes_from(range(5)) + assert nx.is_empty(G) + G.add_edges_from([(1, 2), (3, 4)]) + assert not nx.is_empty(G) + + +@pytest.mark.parametrize( + "graph_type", [nx.Graph, nx.DiGraph, nx.MultiGraph, nx.MultiDiGraph] +) +def test_selfloops(graph_type): + G = nx.complete_graph(3, create_using=graph_type) + G.add_edge(0, 0) + assert nodes_equal(nx.nodes_with_selfloops(G), [0]) + assert edges_equal(nx.selfloop_edges(G), [(0, 0)]) + assert edges_equal(nx.selfloop_edges(G, data=True), [(0, 0, {})]) + assert nx.number_of_selfloops(G) == 1 + + +@pytest.mark.parametrize( + "graph_type", [nx.Graph, nx.DiGraph, nx.MultiGraph, nx.MultiDiGraph] +) +def test_selfloop_edges_attr(graph_type): + G = nx.complete_graph(3, create_using=graph_type) + G.add_edge(0, 0) + G.add_edge(1, 1, weight=2) + assert edges_equal( + nx.selfloop_edges(G, data=True), [(0, 0, {}), (1, 1, {"weight": 2})] + ) + assert edges_equal(nx.selfloop_edges(G, data="weight"), [(0, 0, None), (1, 1, 2)]) + + +def test_selfloop_edges_multi_with_data_and_keys(): + G = nx.complete_graph(3, create_using=nx.MultiGraph) + G.add_edge(0, 0, weight=10) + G.add_edge(0, 0, weight=100) + assert edges_equal( + nx.selfloop_edges(G, data="weight", keys=True), [(0, 0, 0, 10), (0, 0, 1, 100)] + ) + + +@pytest.mark.parametrize("graph_type", [nx.Graph, nx.DiGraph]) +def test_selfloops_removal(graph_type): + G = nx.complete_graph(3, create_using=graph_type) + G.add_edge(0, 0) + G.remove_edges_from(nx.selfloop_edges(G, keys=True)) + G.add_edge(0, 0) + G.remove_edges_from(nx.selfloop_edges(G, data=True)) + G.add_edge(0, 0) + G.remove_edges_from(nx.selfloop_edges(G, keys=True, data=True)) + + +@pytest.mark.parametrize("graph_type", [nx.MultiGraph, nx.MultiDiGraph]) +def test_selfloops_removal_multi(graph_type): + """test removing selfloops behavior vis-a-vis altering a dict while iterating. + cf. gh-4068""" + G = nx.complete_graph(3, create_using=graph_type) + # Defaults - see gh-4080 + G.add_edge(0, 0) + G.add_edge(0, 0) + G.remove_edges_from(nx.selfloop_edges(G)) + assert (0, 0) not in G.edges() + # With keys + G.add_edge(0, 0) + G.add_edge(0, 0) + with pytest.raises(RuntimeError): + G.remove_edges_from(nx.selfloop_edges(G, keys=True)) + # With data + G.add_edge(0, 0) + G.add_edge(0, 0) + with pytest.raises(TypeError): + G.remove_edges_from(nx.selfloop_edges(G, data=True)) + # With keys and data + G.add_edge(0, 0) + G.add_edge(0, 0) + with pytest.raises(RuntimeError): + G.remove_edges_from(nx.selfloop_edges(G, data=True, keys=True)) + + +def test_pathweight(): + valid_path = [1, 2, 3] + invalid_path = [1, 3, 2] + graphs = [nx.Graph(), nx.DiGraph(), nx.MultiGraph(), nx.MultiDiGraph()] + edges = [ + (1, 2, {"cost": 5, "dist": 6}), + (2, 3, {"cost": 3, "dist": 4}), + (1, 2, {"cost": 1, "dist": 2}), + ] + for graph in graphs: + graph.add_edges_from(edges) + assert nx.path_weight(graph, valid_path, "cost") == 4 + assert nx.path_weight(graph, valid_path, "dist") == 6 + pytest.raises(nx.NetworkXNoPath, nx.path_weight, graph, invalid_path, "cost") + + +@pytest.mark.parametrize( + "G", (nx.Graph(), nx.DiGraph(), nx.MultiGraph(), nx.MultiDiGraph()) +) +def test_ispath(G): + G.add_edges_from([(1, 2), (2, 3), (1, 2), (3, 4)]) + valid_path = [1, 2, 3, 4] + invalid_path = [1, 2, 4, 3] # wrong node order + another_invalid_path = [1, 2, 3, 4, 5] # contains node not in G + assert nx.is_path(G, valid_path) + assert not nx.is_path(G, invalid_path) + assert not nx.is_path(G, another_invalid_path) + + +@pytest.mark.parametrize("G", (nx.Graph(), nx.DiGraph())) +def test_restricted_view(G): + G.add_edges_from([(0, 1), (0, 2), (0, 3), (1, 0), (1, 1), (1, 2)]) + G.add_node(4) + H = nx.restricted_view(G, [0, 2, 5], [(1, 2), (3, 4)]) + assert set(H.nodes()) == {1, 3, 4} + assert set(H.edges()) == {(1, 1)} + + +@pytest.mark.parametrize("G", (nx.MultiGraph(), nx.MultiDiGraph())) +def test_restricted_view_multi(G): + G.add_edges_from( + [(0, 1, 0), (0, 2, 0), (0, 3, 0), (0, 1, 1), (1, 0, 0), (1, 1, 0), (1, 2, 0)] + ) + G.add_node(4) + H = nx.restricted_view(G, [0, 2, 5], [(1, 2, 0), (3, 4, 0)]) + assert set(H.nodes()) == {1, 3, 4} + assert set(H.edges()) == {(1, 1)} diff --git a/.venv/lib/python3.12/site-packages/networkx/classes/tests/test_graph.py b/.venv/lib/python3.12/site-packages/networkx/classes/tests/test_graph.py new file mode 100644 index 0000000..2b4ffe5 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/classes/tests/test_graph.py @@ -0,0 +1,927 @@ +import gc +import pickle +import platform +import weakref + +import pytest + +import networkx as nx +from networkx.utils import edges_equal, graphs_equal, nodes_equal + + +def test_degree_node_not_found_exception_message(): + """See gh-7740""" + G = nx.path_graph(5) + with pytest.raises(nx.NetworkXError, match="Node.*is not in the graph"): + G.degree(100) + + +class BaseGraphTester: + """Tests for data-structure independent graph class features.""" + + def test_contains(self): + G = self.K3 + assert 1 in G + assert 4 not in G + assert "b" not in G + assert [] not in G # no exception for nonhashable + assert {1: 1} not in G # no exception for nonhashable + + def test_order(self): + G = self.K3 + assert len(G) == 3 + assert G.order() == 3 + assert G.number_of_nodes() == 3 + + def test_nodes(self): + G = self.K3 + assert isinstance(G._node, G.node_dict_factory) + assert isinstance(G._adj, G.adjlist_outer_dict_factory) + assert all( + isinstance(adj, G.adjlist_inner_dict_factory) for adj in G._adj.values() + ) + assert sorted(G.nodes()) == self.k3nodes + assert sorted(G.nodes(data=True)) == [(0, {}), (1, {}), (2, {})] + + def test_none_node(self): + G = self.Graph() + with pytest.raises(ValueError): + G.add_node(None) + with pytest.raises(ValueError): + G.add_nodes_from([None]) + with pytest.raises(ValueError): + G.add_edge(0, None) + with pytest.raises(ValueError): + G.add_edges_from([(0, None)]) + + def test_has_node(self): + G = self.K3 + assert G.has_node(1) + assert not G.has_node(4) + assert not G.has_node([]) # no exception for nonhashable + assert not G.has_node({1: 1}) # no exception for nonhashable + + def test_has_edge(self): + G = self.K3 + assert G.has_edge(0, 1) + assert not G.has_edge(0, -1) + + def test_neighbors(self): + G = self.K3 + assert sorted(G.neighbors(0)) == [1, 2] + with pytest.raises(nx.NetworkXError): + G.neighbors(-1) + + @pytest.mark.skipif( + platform.python_implementation() == "PyPy", reason="PyPy gc is different" + ) + def test_memory_leak(self): + G = self.Graph() + + def count_objects_of_type(_type): + # Iterating over all objects tracked by gc can include weak references + # whose weakly-referenced objects may no longer exist. Calling `isinstance` + # on such a weak reference will raise ReferenceError. There are at least + # three workarounds for this: one is to compare type names instead of using + # `isinstance` such as `type(obj).__name__ == typename`, another is to use + # `type(obj) == _type`, and the last is to ignore ProxyTypes as we do below. + # NOTE: even if this safeguard is deemed unnecessary to pass NetworkX tests, + # we should still keep it for maximum safety for other NetworkX backends. + return sum( + 1 + for obj in gc.get_objects() + if not isinstance(obj, weakref.ProxyTypes) and isinstance(obj, _type) + ) + + gc.collect() + before = count_objects_of_type(self.Graph) + G.copy() + gc.collect() + after = count_objects_of_type(self.Graph) + assert before == after + + # test a subgraph of the base class + class MyGraph(self.Graph): + pass + + gc.collect() + G = MyGraph() + before = count_objects_of_type(MyGraph) + G.copy() + gc.collect() + after = count_objects_of_type(MyGraph) + assert before == after + + def test_edges(self): + G = self.K3 + assert isinstance(G._adj, G.adjlist_outer_dict_factory) + assert edges_equal(G.edges(), [(0, 1), (0, 2), (1, 2)]) + assert edges_equal(G.edges(0), [(0, 1), (0, 2)]) + assert edges_equal(G.edges([0, 1]), [(0, 1), (0, 2), (1, 2)]) + with pytest.raises(nx.NetworkXError): + G.edges(-1) + + def test_degree(self): + G = self.K3 + assert sorted(G.degree()) == [(0, 2), (1, 2), (2, 2)] + assert dict(G.degree()) == {0: 2, 1: 2, 2: 2} + assert G.degree(0) == 2 + with pytest.raises(nx.NetworkXError): + G.degree(-1) # node not in graph + + def test_size(self): + G = self.K3 + assert G.size() == 3 + assert G.number_of_edges() == 3 + + def test_nbunch_iter(self): + G = self.K3 + assert nodes_equal(G.nbunch_iter(), self.k3nodes) # all nodes + assert nodes_equal(G.nbunch_iter(0), [0]) # single node + assert nodes_equal(G.nbunch_iter([0, 1]), [0, 1]) # sequence + # sequence with none in graph + assert nodes_equal(G.nbunch_iter([-1]), []) + # string sequence with none in graph + assert nodes_equal(G.nbunch_iter("foo"), []) + # node not in graph doesn't get caught upon creation of iterator + bunch = G.nbunch_iter(-1) + # but gets caught when iterator used + with pytest.raises(nx.NetworkXError, match="is not in the graph"): + list(bunch) + # unhashable doesn't get caught upon creation of iterator + bunch = G.nbunch_iter([0, 1, 2, {}]) + # but gets caught when iterator hits the unhashable + with pytest.raises( + nx.NetworkXError, match="in sequence nbunch is not a valid node" + ): + list(bunch) + + def test_nbunch_iter_node_format_raise(self): + # Tests that a node that would have failed string formatting + # doesn't cause an error when attempting to raise a + # :exc:`nx.NetworkXError`. + + # For more information, see pull request #1813. + G = self.Graph() + nbunch = [("x", set())] + with pytest.raises(nx.NetworkXError): + list(G.nbunch_iter(nbunch)) + + def test_selfloop_degree(self): + G = self.Graph() + G.add_edge(1, 1) + assert sorted(G.degree()) == [(1, 2)] + assert dict(G.degree()) == {1: 2} + assert G.degree(1) == 2 + assert sorted(G.degree([1])) == [(1, 2)] + assert G.degree(1, weight="weight") == 2 + + def test_selfloops(self): + G = self.K3.copy() + G.add_edge(0, 0) + assert nodes_equal(nx.nodes_with_selfloops(G), [0]) + assert edges_equal(nx.selfloop_edges(G), [(0, 0)]) + assert nx.number_of_selfloops(G) == 1 + G.remove_edge(0, 0) + G.add_edge(0, 0) + G.remove_edges_from([(0, 0)]) + G.add_edge(1, 1) + G.remove_node(1) + G.add_edge(0, 0) + G.add_edge(1, 1) + G.remove_nodes_from([0, 1]) + + def test_cache_reset(self): + G = self.K3.copy() + old_adj = G.adj + assert id(G.adj) == id(old_adj) + G._adj = {} + assert id(G.adj) != id(old_adj) + + old_nodes = G.nodes + assert id(G.nodes) == id(old_nodes) + G._node = {} + assert id(G.nodes) != id(old_nodes) + + def test_attributes_cached(self): + G = self.K3.copy() + assert id(G.nodes) == id(G.nodes) + assert id(G.edges) == id(G.edges) + assert id(G.degree) == id(G.degree) + assert id(G.adj) == id(G.adj) + + +class BaseAttrGraphTester(BaseGraphTester): + """Tests of graph class attribute features.""" + + def test_weighted_degree(self): + G = self.Graph() + G.add_edge(1, 2, weight=2, other=3) + G.add_edge(2, 3, weight=3, other=4) + assert sorted(d for n, d in G.degree(weight="weight")) == [2, 3, 5] + assert dict(G.degree(weight="weight")) == {1: 2, 2: 5, 3: 3} + assert G.degree(1, weight="weight") == 2 + assert nodes_equal((G.degree([1], weight="weight")), [(1, 2)]) + + assert nodes_equal((d for n, d in G.degree(weight="other")), [3, 7, 4]) + assert dict(G.degree(weight="other")) == {1: 3, 2: 7, 3: 4} + assert G.degree(1, weight="other") == 3 + assert edges_equal((G.degree([1], weight="other")), [(1, 3)]) + + def add_attributes(self, G): + G.graph["foo"] = [] + G.nodes[0]["foo"] = [] + G.remove_edge(1, 2) + ll = [] + G.add_edge(1, 2, foo=ll) + G.add_edge(2, 1, foo=ll) + + def test_name(self): + G = self.Graph(name="") + assert G.name == "" + G = self.Graph(name="test") + assert G.name == "test" + + def test_str_unnamed(self): + G = self.Graph() + G.add_edges_from([(1, 2), (2, 3)]) + assert str(G) == f"{type(G).__name__} with 3 nodes and 2 edges" + + def test_str_named(self): + G = self.Graph(name="foo") + G.add_edges_from([(1, 2), (2, 3)]) + assert str(G) == f"{type(G).__name__} named 'foo' with 3 nodes and 2 edges" + + def test_graph_chain(self): + G = self.Graph([(0, 1), (1, 2)]) + DG = G.to_directed(as_view=True) + SDG = DG.subgraph([0, 1]) + RSDG = SDG.reverse(copy=False) + assert G is DG._graph + assert DG is SDG._graph + assert SDG is RSDG._graph + + def test_copy(self): + G = self.Graph() + G.add_node(0) + G.add_edge(1, 2) + self.add_attributes(G) + # copy edge datadict but any container attr are same + H = G.copy() + self.graphs_equal(H, G) + self.different_attrdict(H, G) + self.shallow_copy_attrdict(H, G) + + def test_class_copy(self): + G = self.Graph() + G.add_node(0) + G.add_edge(1, 2) + self.add_attributes(G) + # copy edge datadict but any container attr are same + H = G.__class__(G) + self.graphs_equal(H, G) + self.different_attrdict(H, G) + self.shallow_copy_attrdict(H, G) + + def test_fresh_copy(self): + G = self.Graph() + G.add_node(0) + G.add_edge(1, 2) + self.add_attributes(G) + # copy graph structure but use fresh datadict + H = G.__class__() + H.add_nodes_from(G) + H.add_edges_from(G.edges()) + assert len(G.nodes[0]) == 1 + ddict = G.adj[1][2][0] if G.is_multigraph() else G.adj[1][2] + assert len(ddict) == 1 + assert len(H.nodes[0]) == 0 + ddict = H.adj[1][2][0] if H.is_multigraph() else H.adj[1][2] + assert len(ddict) == 0 + + def is_deepcopy(self, H, G): + self.graphs_equal(H, G) + self.different_attrdict(H, G) + self.deep_copy_attrdict(H, G) + + def deep_copy_attrdict(self, H, G): + self.deepcopy_graph_attr(H, G) + self.deepcopy_node_attr(H, G) + self.deepcopy_edge_attr(H, G) + + def deepcopy_graph_attr(self, H, G): + assert G.graph["foo"] == H.graph["foo"] + G.graph["foo"].append(1) + assert G.graph["foo"] != H.graph["foo"] + + def deepcopy_node_attr(self, H, G): + assert G.nodes[0]["foo"] == H.nodes[0]["foo"] + G.nodes[0]["foo"].append(1) + assert G.nodes[0]["foo"] != H.nodes[0]["foo"] + + def deepcopy_edge_attr(self, H, G): + assert G[1][2]["foo"] == H[1][2]["foo"] + G[1][2]["foo"].append(1) + assert G[1][2]["foo"] != H[1][2]["foo"] + + def is_shallow_copy(self, H, G): + self.graphs_equal(H, G) + self.shallow_copy_attrdict(H, G) + + def shallow_copy_attrdict(self, H, G): + self.shallow_copy_graph_attr(H, G) + self.shallow_copy_node_attr(H, G) + self.shallow_copy_edge_attr(H, G) + + def shallow_copy_graph_attr(self, H, G): + assert G.graph["foo"] == H.graph["foo"] + G.graph["foo"].append(1) + assert G.graph["foo"] == H.graph["foo"] + + def shallow_copy_node_attr(self, H, G): + assert G.nodes[0]["foo"] == H.nodes[0]["foo"] + G.nodes[0]["foo"].append(1) + assert G.nodes[0]["foo"] == H.nodes[0]["foo"] + + def shallow_copy_edge_attr(self, H, G): + assert G[1][2]["foo"] == H[1][2]["foo"] + G[1][2]["foo"].append(1) + assert G[1][2]["foo"] == H[1][2]["foo"] + + def same_attrdict(self, H, G): + old_foo = H[1][2]["foo"] + H.adj[1][2]["foo"] = "baz" + assert G.edges == H.edges + H.adj[1][2]["foo"] = old_foo + assert G.edges == H.edges + + old_foo = H.nodes[0]["foo"] + H.nodes[0]["foo"] = "baz" + assert G.nodes == H.nodes + H.nodes[0]["foo"] = old_foo + assert G.nodes == H.nodes + + def different_attrdict(self, H, G): + old_foo = H[1][2]["foo"] + H.adj[1][2]["foo"] = "baz" + assert G._adj != H._adj + H.adj[1][2]["foo"] = old_foo + assert G._adj == H._adj + + old_foo = H.nodes[0]["foo"] + H.nodes[0]["foo"] = "baz" + assert G._node != H._node + H.nodes[0]["foo"] = old_foo + assert G._node == H._node + + def graphs_equal(self, H, G): + assert G._adj == H._adj + assert G._node == H._node + assert G.graph == H.graph + assert G.name == H.name + if not G.is_directed() and not H.is_directed(): + assert H._adj[1][2] is H._adj[2][1] + assert G._adj[1][2] is G._adj[2][1] + else: # at least one is directed + if not G.is_directed(): + G._pred = G._adj + G._succ = G._adj + if not H.is_directed(): + H._pred = H._adj + H._succ = H._adj + assert G._pred == H._pred + assert G._succ == H._succ + assert H._succ[1][2] is H._pred[2][1] + assert G._succ[1][2] is G._pred[2][1] + + def test_graph_attr(self): + G = self.K3.copy() + G.graph["foo"] = "bar" + assert isinstance(G.graph, G.graph_attr_dict_factory) + assert G.graph["foo"] == "bar" + del G.graph["foo"] + assert G.graph == {} + H = self.Graph(foo="bar") + assert H.graph["foo"] == "bar" + + def test_node_attr(self): + G = self.K3.copy() + G.add_node(1, foo="bar") + assert all( + isinstance(d, G.node_attr_dict_factory) for u, d in G.nodes(data=True) + ) + assert nodes_equal(G.nodes(), [0, 1, 2]) + assert nodes_equal(G.nodes(data=True), [(0, {}), (1, {"foo": "bar"}), (2, {})]) + G.nodes[1]["foo"] = "baz" + assert nodes_equal(G.nodes(data=True), [(0, {}), (1, {"foo": "baz"}), (2, {})]) + assert nodes_equal(G.nodes(data="foo"), [(0, None), (1, "baz"), (2, None)]) + assert nodes_equal( + G.nodes(data="foo", default="bar"), [(0, "bar"), (1, "baz"), (2, "bar")] + ) + + def test_node_attr2(self): + G = self.K3.copy() + a = {"foo": "bar"} + G.add_node(3, **a) + assert nodes_equal(G.nodes(), [0, 1, 2, 3]) + assert nodes_equal( + G.nodes(data=True), [(0, {}), (1, {}), (2, {}), (3, {"foo": "bar"})] + ) + + def test_edge_lookup(self): + G = self.Graph() + G.add_edge(1, 2, foo="bar") + assert edges_equal(G.edges[1, 2], {"foo": "bar"}) + + def test_edge_attr(self): + G = self.Graph() + G.add_edge(1, 2, foo="bar") + assert all( + isinstance(d, G.edge_attr_dict_factory) for u, v, d in G.edges(data=True) + ) + assert edges_equal(G.edges(data=True), [(1, 2, {"foo": "bar"})]) + assert edges_equal(G.edges(data="foo"), [(1, 2, "bar")]) + + def test_edge_attr2(self): + G = self.Graph() + G.add_edges_from([(1, 2), (3, 4)], foo="foo") + assert edges_equal( + G.edges(data=True), [(1, 2, {"foo": "foo"}), (3, 4, {"foo": "foo"})] + ) + assert edges_equal(G.edges(data="foo"), [(1, 2, "foo"), (3, 4, "foo")]) + + def test_edge_attr3(self): + G = self.Graph() + G.add_edges_from([(1, 2, {"weight": 32}), (3, 4, {"weight": 64})], foo="foo") + assert edges_equal( + G.edges(data=True), + [ + (1, 2, {"foo": "foo", "weight": 32}), + (3, 4, {"foo": "foo", "weight": 64}), + ], + ) + + G.remove_edges_from([(1, 2), (3, 4)]) + G.add_edge(1, 2, data=7, spam="bar", bar="foo") + assert edges_equal( + G.edges(data=True), [(1, 2, {"data": 7, "spam": "bar", "bar": "foo"})] + ) + + def test_edge_attr4(self): + G = self.Graph() + G.add_edge(1, 2, data=7, spam="bar", bar="foo") + assert edges_equal( + G.edges(data=True), [(1, 2, {"data": 7, "spam": "bar", "bar": "foo"})] + ) + G[1][2]["data"] = 10 # OK to set data like this + assert edges_equal( + G.edges(data=True), [(1, 2, {"data": 10, "spam": "bar", "bar": "foo"})] + ) + + G.adj[1][2]["data"] = 20 + assert edges_equal( + G.edges(data=True), [(1, 2, {"data": 20, "spam": "bar", "bar": "foo"})] + ) + G.edges[1, 2]["data"] = 21 # another spelling, "edge" + assert edges_equal( + G.edges(data=True), [(1, 2, {"data": 21, "spam": "bar", "bar": "foo"})] + ) + G.adj[1][2]["listdata"] = [20, 200] + G.adj[1][2]["weight"] = 20 + dd = { + "data": 21, + "spam": "bar", + "bar": "foo", + "listdata": [20, 200], + "weight": 20, + } + assert edges_equal(G.edges(data=True), [(1, 2, dd)]) + + def test_to_undirected(self): + G = self.K3 + self.add_attributes(G) + H = nx.Graph(G) + self.is_shallow_copy(H, G) + self.different_attrdict(H, G) + H = G.to_undirected() + self.is_deepcopy(H, G) + + def test_to_directed_as_view(self): + H = nx.path_graph(2, create_using=self.Graph) + H2 = H.to_directed(as_view=True) + assert H is H2._graph + assert H2.has_edge(0, 1) + assert H2.has_edge(1, 0) or H.is_directed() + pytest.raises(nx.NetworkXError, H2.add_node, -1) + pytest.raises(nx.NetworkXError, H2.add_edge, 1, 2) + H.add_edge(1, 2) + assert H2.has_edge(1, 2) + assert H2.has_edge(2, 1) or H.is_directed() + + def test_to_undirected_as_view(self): + H = nx.path_graph(2, create_using=self.Graph) + H2 = H.to_undirected(as_view=True) + assert H is H2._graph + assert H2.has_edge(0, 1) + assert H2.has_edge(1, 0) + pytest.raises(nx.NetworkXError, H2.add_node, -1) + pytest.raises(nx.NetworkXError, H2.add_edge, 1, 2) + H.add_edge(1, 2) + assert H2.has_edge(1, 2) + assert H2.has_edge(2, 1) + + def test_directed_class(self): + G = self.Graph() + + class newGraph(G.to_undirected_class()): + def to_directed_class(self): + return newDiGraph + + def to_undirected_class(self): + return newGraph + + class newDiGraph(G.to_directed_class()): + def to_directed_class(self): + return newDiGraph + + def to_undirected_class(self): + return newGraph + + G = newDiGraph() if G.is_directed() else newGraph() + H = G.to_directed() + assert isinstance(H, newDiGraph) + H = G.to_undirected() + assert isinstance(H, newGraph) + + def test_to_directed(self): + G = self.K3 + self.add_attributes(G) + H = nx.DiGraph(G) + self.is_shallow_copy(H, G) + self.different_attrdict(H, G) + H = G.to_directed() + self.is_deepcopy(H, G) + + def test_subgraph(self): + G = self.K3 + self.add_attributes(G) + H = G.subgraph([0, 1, 2, 5]) + self.graphs_equal(H, G) + self.same_attrdict(H, G) + self.shallow_copy_attrdict(H, G) + + H = G.subgraph(0) + assert H.adj == {0: {}} + H = G.subgraph([]) + assert H.adj == {} + assert G.adj != {} + + def test_selfloops_attr(self): + G = self.K3.copy() + G.add_edge(0, 0) + G.add_edge(1, 1, weight=2) + assert edges_equal( + nx.selfloop_edges(G, data=True), [(0, 0, {}), (1, 1, {"weight": 2})] + ) + assert edges_equal( + nx.selfloop_edges(G, data="weight"), [(0, 0, None), (1, 1, 2)] + ) + + +class TestGraph(BaseAttrGraphTester): + """Tests specific to dict-of-dict-of-dict graph data structure""" + + def setup_method(self): + self.Graph = nx.Graph + # build dict-of-dict-of-dict K3 + ed1, ed2, ed3 = ({}, {}, {}) + self.k3adj = {0: {1: ed1, 2: ed2}, 1: {0: ed1, 2: ed3}, 2: {0: ed2, 1: ed3}} + self.k3edges = [(0, 1), (0, 2), (1, 2)] + self.k3nodes = [0, 1, 2] + self.K3 = self.Graph() + self.K3._adj = self.k3adj + self.K3._node = {} + self.K3._node[0] = {} + self.K3._node[1] = {} + self.K3._node[2] = {} + + def test_pickle(self): + G = self.K3 + pg = pickle.loads(pickle.dumps(G, -1)) + self.graphs_equal(pg, G) + pg = pickle.loads(pickle.dumps(G)) + self.graphs_equal(pg, G) + + def test_data_input(self): + G = self.Graph({1: [2], 2: [1]}, name="test") + assert G.name == "test" + assert sorted(G.adj.items()) == [(1, {2: {}}), (2, {1: {}})] + + def test_adjacency(self): + G = self.K3 + assert dict(G.adjacency()) == { + 0: {1: {}, 2: {}}, + 1: {0: {}, 2: {}}, + 2: {0: {}, 1: {}}, + } + + def test_getitem(self): + G = self.K3 + assert G.adj[0] == {1: {}, 2: {}} + assert G[0] == {1: {}, 2: {}} + with pytest.raises(KeyError): + G.__getitem__("j") + with pytest.raises(TypeError): + G.__getitem__(["A"]) + + def test_add_node(self): + G = self.Graph() + G.add_node(0) + assert G.adj == {0: {}} + # test add attributes + G.add_node(1, c="red") + G.add_node(2, c="blue") + G.add_node(3, c="red") + assert G.nodes[1]["c"] == "red" + assert G.nodes[2]["c"] == "blue" + assert G.nodes[3]["c"] == "red" + # test updating attributes + G.add_node(1, c="blue") + G.add_node(2, c="red") + G.add_node(3, c="blue") + assert G.nodes[1]["c"] == "blue" + assert G.nodes[2]["c"] == "red" + assert G.nodes[3]["c"] == "blue" + + def test_add_nodes_from(self): + G = self.Graph() + G.add_nodes_from([0, 1, 2]) + assert G.adj == {0: {}, 1: {}, 2: {}} + # test add attributes + G.add_nodes_from([0, 1, 2], c="red") + assert G.nodes[0]["c"] == "red" + assert G.nodes[2]["c"] == "red" + # test that attribute dicts are not the same + assert G.nodes[0] is not G.nodes[1] + # test updating attributes + G.add_nodes_from([0, 1, 2], c="blue") + assert G.nodes[0]["c"] == "blue" + assert G.nodes[2]["c"] == "blue" + assert G.nodes[0] is not G.nodes[1] + # test tuple input + H = self.Graph() + H.add_nodes_from(G.nodes(data=True)) + assert H.nodes[0]["c"] == "blue" + assert H.nodes[2]["c"] == "blue" + assert H.nodes[0] is not H.nodes[1] + # specific overrides general + H.add_nodes_from([0, (1, {"c": "green"}), (3, {"c": "cyan"})], c="red") + assert H.nodes[0]["c"] == "red" + assert H.nodes[1]["c"] == "green" + assert H.nodes[2]["c"] == "blue" + assert H.nodes[3]["c"] == "cyan" + + def test_remove_node(self): + G = self.K3.copy() + G.remove_node(0) + assert G.adj == {1: {2: {}}, 2: {1: {}}} + with pytest.raises(nx.NetworkXError): + G.remove_node(-1) + + # generator here to implement list,set,string... + + def test_remove_nodes_from(self): + G = self.K3.copy() + G.remove_nodes_from([0, 1]) + assert G.adj == {2: {}} + G.remove_nodes_from([-1]) # silent fail + + def test_add_edge(self): + G = self.Graph() + G.add_edge(0, 1) + assert G.adj == {0: {1: {}}, 1: {0: {}}} + G = self.Graph() + G.add_edge(*(0, 1)) + assert G.adj == {0: {1: {}}, 1: {0: {}}} + G = self.Graph() + with pytest.raises(ValueError): + G.add_edge(None, "anything") + + def test_add_edges_from(self): + G = self.Graph() + G.add_edges_from([(0, 1), (0, 2, {"weight": 3})]) + assert G.adj == { + 0: {1: {}, 2: {"weight": 3}}, + 1: {0: {}}, + 2: {0: {"weight": 3}}, + } + G = self.Graph() + G.add_edges_from([(0, 1), (0, 2, {"weight": 3}), (1, 2, {"data": 4})], data=2) + assert G.adj == { + 0: {1: {"data": 2}, 2: {"weight": 3, "data": 2}}, + 1: {0: {"data": 2}, 2: {"data": 4}}, + 2: {0: {"weight": 3, "data": 2}, 1: {"data": 4}}, + } + + with pytest.raises(nx.NetworkXError): + G.add_edges_from([(0,)]) # too few in tuple + with pytest.raises(nx.NetworkXError): + G.add_edges_from([(0, 1, 2, 3)]) # too many in tuple + with pytest.raises(TypeError): + G.add_edges_from([0]) # not a tuple + with pytest.raises(ValueError): + G.add_edges_from([(None, 3), (3, 2)]) # None cannot be a node + + def test_remove_edge(self): + G = self.K3.copy() + G.remove_edge(0, 1) + assert G.adj == {0: {2: {}}, 1: {2: {}}, 2: {0: {}, 1: {}}} + with pytest.raises(nx.NetworkXError): + G.remove_edge(-1, 0) + + def test_remove_edges_from(self): + G = self.K3.copy() + G.remove_edges_from([(0, 1)]) + assert G.adj == {0: {2: {}}, 1: {2: {}}, 2: {0: {}, 1: {}}} + G.remove_edges_from([(0, 0)]) # silent fail + + def test_clear(self): + G = self.K3.copy() + G.graph["name"] = "K3" + G.clear() + assert list(G.nodes) == [] + assert G.adj == {} + assert G.graph == {} + + def test_clear_edges(self): + G = self.K3.copy() + G.graph["name"] = "K3" + nodes = list(G.nodes) + G.clear_edges() + assert list(G.nodes) == nodes + assert G.adj == {0: {}, 1: {}, 2: {}} + assert list(G.edges) == [] + assert G.graph["name"] == "K3" + + def test_edges_data(self): + G = self.K3 + all_edges = [(0, 1, {}), (0, 2, {}), (1, 2, {})] + assert edges_equal(G.edges(data=True), all_edges) + assert edges_equal(G.edges(0, data=True), [(0, 1, {}), (0, 2, {})]) + assert edges_equal(G.edges([0, 1], data=True), all_edges) + with pytest.raises(nx.NetworkXError): + G.edges(-1, True) + + def test_get_edge_data(self): + G = self.K3.copy() + assert G.get_edge_data(0, 1) == {} + assert G[0][1] == {} + assert G.get_edge_data(10, 20) is None + assert G.get_edge_data(-1, 0) is None + assert G.get_edge_data(-1, 0, default=1) == 1 + + def test_update(self): + # specify both edges and nodes + G = self.K3.copy() + G.update(nodes=[3, (4, {"size": 2})], edges=[(4, 5), (6, 7, {"weight": 2})]) + nlist = [ + (0, {}), + (1, {}), + (2, {}), + (3, {}), + (4, {"size": 2}), + (5, {}), + (6, {}), + (7, {}), + ] + assert sorted(G.nodes.data()) == nlist + if G.is_directed(): + elist = [ + (0, 1, {}), + (0, 2, {}), + (1, 0, {}), + (1, 2, {}), + (2, 0, {}), + (2, 1, {}), + (4, 5, {}), + (6, 7, {"weight": 2}), + ] + else: + elist = [ + (0, 1, {}), + (0, 2, {}), + (1, 2, {}), + (4, 5, {}), + (6, 7, {"weight": 2}), + ] + assert sorted(G.edges.data()) == elist + assert G.graph == {} + + # no keywords -- order is edges, nodes + G = self.K3.copy() + G.update([(4, 5), (6, 7, {"weight": 2})], [3, (4, {"size": 2})]) + assert sorted(G.nodes.data()) == nlist + assert sorted(G.edges.data()) == elist + assert G.graph == {} + + # update using only a graph + G = self.Graph() + G.graph["foo"] = "bar" + G.add_node(2, data=4) + G.add_edge(0, 1, weight=0.5) + GG = G.copy() + H = self.Graph() + GG.update(H) + assert graphs_equal(G, GG) + H.update(G) + assert graphs_equal(H, G) + + # update nodes only + H = self.Graph() + H.update(nodes=[3, 4]) + assert H.nodes ^ {3, 4} == set() + assert H.size() == 0 + + # update edges only + H = self.Graph() + H.update(edges=[(3, 4)]) + assert sorted(H.edges.data()) == [(3, 4, {})] + assert H.size() == 1 + + # No inputs -> exception + with pytest.raises(nx.NetworkXError): + nx.Graph().update() + + +class TestEdgeSubgraph: + """Unit tests for the :meth:`Graph.edge_subgraph` method.""" + + def setup_method(self): + # Create a path graph on five nodes. + G = nx.path_graph(5) + # Add some node, edge, and graph attributes. + for i in range(5): + G.nodes[i]["name"] = f"node{i}" + G.edges[0, 1]["name"] = "edge01" + G.edges[3, 4]["name"] = "edge34" + G.graph["name"] = "graph" + # Get the subgraph induced by the first and last edges. + self.G = G + self.H = G.edge_subgraph([(0, 1), (3, 4)]) + + def test_correct_nodes(self): + """Tests that the subgraph has the correct nodes.""" + assert [0, 1, 3, 4] == sorted(self.H.nodes()) + + def test_correct_edges(self): + """Tests that the subgraph has the correct edges.""" + assert [(0, 1, "edge01"), (3, 4, "edge34")] == sorted(self.H.edges(data="name")) + + def test_add_node(self): + """Tests that adding a node to the original graph does not + affect the nodes of the subgraph. + + """ + self.G.add_node(5) + assert [0, 1, 3, 4] == sorted(self.H.nodes()) + + def test_remove_node(self): + """Tests that removing a node in the original graph does + affect the nodes of the subgraph. + + """ + self.G.remove_node(0) + assert [1, 3, 4] == sorted(self.H.nodes()) + + def test_node_attr_dict(self): + """Tests that the node attribute dictionary of the two graphs is + the same object. + + """ + for v in self.H: + assert self.G.nodes[v] == self.H.nodes[v] + # Making a change to G should make a change in H and vice versa. + self.G.nodes[0]["name"] = "foo" + assert self.G.nodes[0] == self.H.nodes[0] + self.H.nodes[1]["name"] = "bar" + assert self.G.nodes[1] == self.H.nodes[1] + + def test_edge_attr_dict(self): + """Tests that the edge attribute dictionary of the two graphs is + the same object. + + """ + for u, v in self.H.edges(): + assert self.G.edges[u, v] == self.H.edges[u, v] + # Making a change to G should make a change in H and vice versa. + self.G.edges[0, 1]["name"] = "foo" + assert self.G.edges[0, 1]["name"] == self.H.edges[0, 1]["name"] + self.H.edges[3, 4]["name"] = "bar" + assert self.G.edges[3, 4]["name"] == self.H.edges[3, 4]["name"] + + def test_graph_attr_dict(self): + """Tests that the graph attribute dictionary of the two graphs + is the same object. + + """ + assert self.G.graph is self.H.graph diff --git a/.venv/lib/python3.12/site-packages/networkx/classes/tests/test_graph_historical.py b/.venv/lib/python3.12/site-packages/networkx/classes/tests/test_graph_historical.py new file mode 100644 index 0000000..6e80878 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/classes/tests/test_graph_historical.py @@ -0,0 +1,12 @@ +"""Original NetworkX graph tests""" + +import networkx as nx + +from .historical_tests import HistoricalTests + + +class TestGraphHistorical(HistoricalTests): + @classmethod + def setup_class(cls): + HistoricalTests.setup_class() + cls.G = nx.Graph diff --git a/.venv/lib/python3.12/site-packages/networkx/classes/tests/test_graphviews.py b/.venv/lib/python3.12/site-packages/networkx/classes/tests/test_graphviews.py new file mode 100644 index 0000000..c8f0e63 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/classes/tests/test_graphviews.py @@ -0,0 +1,349 @@ +import pytest + +import networkx as nx +from networkx.utils import edges_equal, nodes_equal + +# Note: SubGraph views are not tested here. They have their own testing file + + +class TestReverseView: + def setup_method(self): + self.G = nx.path_graph(9, create_using=nx.DiGraph()) + self.rv = nx.reverse_view(self.G) + + def test_pickle(self): + import pickle + + rv = self.rv + prv = pickle.loads(pickle.dumps(rv, -1)) + assert rv._node == prv._node + assert rv._adj == prv._adj + assert rv.graph == prv.graph + + def test_contains(self): + assert (2, 3) in self.G.edges + assert (3, 2) not in self.G.edges + assert (2, 3) not in self.rv.edges + assert (3, 2) in self.rv.edges + + def test_iter(self): + expected = sorted(tuple(reversed(e)) for e in self.G.edges) + assert sorted(self.rv.edges) == expected + + def test_exceptions(self): + G = nx.Graph() + pytest.raises(nx.NetworkXNotImplemented, nx.reverse_view, G) + + def test_subclass(self): + class MyGraph(nx.DiGraph): + def my_method(self): + return "me" + + def to_directed_class(self): + return MyGraph() + + M = MyGraph() + M.add_edge(1, 2) + RM = nx.reverse_view(M) + assert RM.__class__ == MyGraph + RMC = RM.copy() + assert RMC.__class__ == MyGraph + assert RMC.has_edge(2, 1) + assert RMC.my_method() == "me" + + +class TestMultiReverseView: + def setup_method(self): + self.G = nx.path_graph(9, create_using=nx.MultiDiGraph()) + self.G.add_edge(4, 5) + self.rv = nx.reverse_view(self.G) + + def test_pickle(self): + import pickle + + rv = self.rv + prv = pickle.loads(pickle.dumps(rv, -1)) + assert rv._node == prv._node + assert rv._adj == prv._adj + assert rv.graph == prv.graph + + def test_contains(self): + assert (2, 3, 0) in self.G.edges + assert (3, 2, 0) not in self.G.edges + assert (2, 3, 0) not in self.rv.edges + assert (3, 2, 0) in self.rv.edges + assert (5, 4, 1) in self.rv.edges + assert (4, 5, 1) not in self.rv.edges + + def test_iter(self): + expected = sorted((v, u, k) for u, v, k in self.G.edges) + assert sorted(self.rv.edges) == expected + + def test_exceptions(self): + MG = nx.MultiGraph(self.G) + pytest.raises(nx.NetworkXNotImplemented, nx.reverse_view, MG) + + +def test_generic_multitype(): + nxg = nx.graphviews + G = nx.DiGraph([(1, 2)]) + with pytest.raises(nx.NetworkXError): + nxg.generic_graph_view(G, create_using=nx.MultiGraph) + G = nx.MultiDiGraph([(1, 2)]) + with pytest.raises(nx.NetworkXError): + nxg.generic_graph_view(G, create_using=nx.DiGraph) + + +class TestToDirected: + def setup_method(self): + self.G = nx.path_graph(9) + self.dv = nx.to_directed(self.G) + self.MG = nx.path_graph(9, create_using=nx.MultiGraph()) + self.Mdv = nx.to_directed(self.MG) + + def test_directed(self): + assert not self.G.is_directed() + assert self.dv.is_directed() + + def test_already_directed(self): + dd = nx.to_directed(self.dv) + Mdd = nx.to_directed(self.Mdv) + assert edges_equal(dd.edges, self.dv.edges) + assert edges_equal(Mdd.edges, self.Mdv.edges) + + def test_pickle(self): + import pickle + + dv = self.dv + pdv = pickle.loads(pickle.dumps(dv, -1)) + assert dv._node == pdv._node + assert dv._succ == pdv._succ + assert dv._pred == pdv._pred + assert dv.graph == pdv.graph + + def test_contains(self): + assert (2, 3) in self.G.edges + assert (3, 2) in self.G.edges + assert (2, 3) in self.dv.edges + assert (3, 2) in self.dv.edges + + def test_iter(self): + revd = [tuple(reversed(e)) for e in self.G.edges] + expected = sorted(list(self.G.edges) + revd) + assert sorted(self.dv.edges) == expected + + +class TestToUndirected: + def setup_method(self): + self.DG = nx.path_graph(9, create_using=nx.DiGraph()) + self.uv = nx.to_undirected(self.DG) + self.MDG = nx.path_graph(9, create_using=nx.MultiDiGraph()) + self.Muv = nx.to_undirected(self.MDG) + + def test_directed(self): + assert self.DG.is_directed() + assert not self.uv.is_directed() + + def test_already_directed(self): + uu = nx.to_undirected(self.uv) + Muu = nx.to_undirected(self.Muv) + assert edges_equal(uu.edges, self.uv.edges) + assert edges_equal(Muu.edges, self.Muv.edges) + + def test_pickle(self): + import pickle + + uv = self.uv + puv = pickle.loads(pickle.dumps(uv, -1)) + assert uv._node == puv._node + assert uv._adj == puv._adj + assert uv.graph == puv.graph + assert hasattr(uv, "_graph") + + def test_contains(self): + assert (2, 3) in self.DG.edges + assert (3, 2) not in self.DG.edges + assert (2, 3) in self.uv.edges + assert (3, 2) in self.uv.edges + + def test_iter(self): + expected = sorted(self.DG.edges) + assert sorted(self.uv.edges) == expected + + +class TestChainsOfViews: + @classmethod + def setup_class(cls): + cls.G = nx.path_graph(9) + cls.DG = nx.path_graph(9, create_using=nx.DiGraph()) + cls.MG = nx.path_graph(9, create_using=nx.MultiGraph()) + cls.MDG = nx.path_graph(9, create_using=nx.MultiDiGraph()) + cls.Gv = nx.to_undirected(cls.DG) + cls.DGv = nx.to_directed(cls.G) + cls.MGv = nx.to_undirected(cls.MDG) + cls.MDGv = nx.to_directed(cls.MG) + cls.Rv = cls.DG.reverse() + cls.MRv = cls.MDG.reverse() + cls.graphs = [ + cls.G, + cls.DG, + cls.MG, + cls.MDG, + cls.Gv, + cls.DGv, + cls.MGv, + cls.MDGv, + cls.Rv, + cls.MRv, + ] + for G in cls.graphs: + G.edges, G.nodes, G.degree + + def test_pickle(self): + import pickle + + for G in self.graphs: + H = pickle.loads(pickle.dumps(G, -1)) + assert edges_equal(H.edges, G.edges) + assert nodes_equal(H.nodes, G.nodes) + + def test_subgraph_of_subgraph(self): + SGv = nx.subgraph(self.G, range(3, 7)) + SDGv = nx.subgraph(self.DG, range(3, 7)) + SMGv = nx.subgraph(self.MG, range(3, 7)) + SMDGv = nx.subgraph(self.MDG, range(3, 7)) + for G in self.graphs + [SGv, SDGv, SMGv, SMDGv]: + SG = nx.induced_subgraph(G, [4, 5, 6]) + assert list(SG) == [4, 5, 6] + SSG = SG.subgraph([6, 7]) + assert list(SSG) == [6] + # subgraph-subgraph chain is short-cut in base class method + assert SSG._graph is G + + def test_restricted_induced_subgraph_chains(self): + """Test subgraph chains that both restrict and show nodes/edges. + + A restricted_view subgraph should allow induced subgraphs using + G.subgraph that automagically without a chain (meaning the result + is a subgraph view of the original graph not a subgraph-of-subgraph. + """ + hide_nodes = [3, 4, 5] + hide_edges = [(6, 7)] + RG = nx.restricted_view(self.G, hide_nodes, hide_edges) + nodes = [4, 5, 6, 7, 8] + SG = nx.induced_subgraph(RG, nodes) + SSG = RG.subgraph(nodes) + assert RG._graph is self.G + assert SSG._graph is self.G + assert SG._graph is RG + assert edges_equal(SG.edges, SSG.edges) + # should be same as morphing the graph + CG = self.G.copy() + CG.remove_nodes_from(hide_nodes) + CG.remove_edges_from(hide_edges) + assert edges_equal(CG.edges(nodes), SSG.edges) + CG.remove_nodes_from([0, 1, 2, 3]) + assert edges_equal(CG.edges, SSG.edges) + # switch order: subgraph first, then restricted view + SSSG = self.G.subgraph(nodes) + RSG = nx.restricted_view(SSSG, hide_nodes, hide_edges) + assert RSG._graph is not self.G + assert edges_equal(RSG.edges, CG.edges) + + def test_subgraph_copy(self): + for origG in self.graphs: + G = nx.Graph(origG) + SG = G.subgraph([4, 5, 6]) + H = SG.copy() + assert type(G) is type(H) + + def test_subgraph_todirected(self): + SG = nx.induced_subgraph(self.G, [4, 5, 6]) + SSG = SG.to_directed() + assert sorted(SSG) == [4, 5, 6] + assert sorted(SSG.edges) == [(4, 5), (5, 4), (5, 6), (6, 5)] + + def test_subgraph_toundirected(self): + SG = nx.induced_subgraph(self.G, [4, 5, 6]) + SSG = SG.to_undirected() + assert list(SSG) == [4, 5, 6] + assert sorted(SSG.edges) == [(4, 5), (5, 6)] + + def test_reverse_subgraph_toundirected(self): + G = self.DG.reverse(copy=False) + SG = G.subgraph([4, 5, 6]) + SSG = SG.to_undirected() + assert list(SSG) == [4, 5, 6] + assert sorted(SSG.edges) == [(4, 5), (5, 6)] + + def test_reverse_reverse_copy(self): + G = self.DG.reverse(copy=False) + H = G.reverse(copy=True) + assert H.nodes == self.DG.nodes + assert H.edges == self.DG.edges + G = self.MDG.reverse(copy=False) + H = G.reverse(copy=True) + assert H.nodes == self.MDG.nodes + assert H.edges == self.MDG.edges + + def test_subgraph_edgesubgraph_toundirected(self): + G = self.G.copy() + SG = G.subgraph([4, 5, 6]) + SSG = SG.edge_subgraph([(4, 5), (5, 4)]) + USSG = SSG.to_undirected() + assert list(USSG) == [4, 5] + assert sorted(USSG.edges) == [(4, 5)] + + def test_copy_subgraph(self): + G = self.G.copy() + SG = G.subgraph([4, 5, 6]) + CSG = SG.copy(as_view=True) + DCSG = SG.copy(as_view=False) + assert hasattr(CSG, "_graph") # is a view + assert not hasattr(DCSG, "_graph") # not a view + + def test_copy_disubgraph(self): + G = self.DG.copy() + SG = G.subgraph([4, 5, 6]) + CSG = SG.copy(as_view=True) + DCSG = SG.copy(as_view=False) + assert hasattr(CSG, "_graph") # is a view + assert not hasattr(DCSG, "_graph") # not a view + + def test_copy_multidisubgraph(self): + G = self.MDG.copy() + SG = G.subgraph([4, 5, 6]) + CSG = SG.copy(as_view=True) + DCSG = SG.copy(as_view=False) + assert hasattr(CSG, "_graph") # is a view + assert not hasattr(DCSG, "_graph") # not a view + + def test_copy_multisubgraph(self): + G = self.MG.copy() + SG = G.subgraph([4, 5, 6]) + CSG = SG.copy(as_view=True) + DCSG = SG.copy(as_view=False) + assert hasattr(CSG, "_graph") # is a view + assert not hasattr(DCSG, "_graph") # not a view + + def test_copy_of_view(self): + G = nx.MultiGraph(self.MGv) + assert G.__class__.__name__ == "MultiGraph" + G = G.copy(as_view=True) + assert G.__class__.__name__ == "MultiGraph" + + def test_subclass(self): + class MyGraph(nx.DiGraph): + def my_method(self): + return "me" + + def to_directed_class(self): + return MyGraph() + + for origG in self.graphs: + G = MyGraph(origG) + SG = G.subgraph([4, 5, 6]) + H = SG.copy() + assert SG.my_method() == "me" + assert H.my_method() == "me" + assert 3 not in H or 3 in SG diff --git a/.venv/lib/python3.12/site-packages/networkx/classes/tests/test_multidigraph.py b/.venv/lib/python3.12/site-packages/networkx/classes/tests/test_multidigraph.py new file mode 100644 index 0000000..fc0bd54 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/classes/tests/test_multidigraph.py @@ -0,0 +1,459 @@ +from collections import UserDict + +import pytest + +import networkx as nx +from networkx.utils import edges_equal + +from .test_multigraph import BaseMultiGraphTester +from .test_multigraph import TestEdgeSubgraph as _TestMultiGraphEdgeSubgraph +from .test_multigraph import TestMultiGraph as _TestMultiGraph + + +class BaseMultiDiGraphTester(BaseMultiGraphTester): + def test_edges(self): + G = self.K3 + edges = [(0, 1), (0, 2), (1, 0), (1, 2), (2, 0), (2, 1)] + assert sorted(G.edges()) == edges + assert sorted(G.edges(0)) == [(0, 1), (0, 2)] + pytest.raises((KeyError, nx.NetworkXError), G.edges, -1) + + def test_edges_data(self): + G = self.K3 + edges = [(0, 1, {}), (0, 2, {}), (1, 0, {}), (1, 2, {}), (2, 0, {}), (2, 1, {})] + assert sorted(G.edges(data=True)) == edges + assert sorted(G.edges(0, data=True)) == [(0, 1, {}), (0, 2, {})] + pytest.raises((KeyError, nx.NetworkXError), G.neighbors, -1) + + def test_edges_multi(self): + G = self.K3 + assert sorted(G.edges()) == [(0, 1), (0, 2), (1, 0), (1, 2), (2, 0), (2, 1)] + assert sorted(G.edges(0)) == [(0, 1), (0, 2)] + G.add_edge(0, 1) + assert sorted(G.edges()) == [ + (0, 1), + (0, 1), + (0, 2), + (1, 0), + (1, 2), + (2, 0), + (2, 1), + ] + + def test_out_edges(self): + G = self.K3 + assert sorted(G.out_edges()) == [(0, 1), (0, 2), (1, 0), (1, 2), (2, 0), (2, 1)] + assert sorted(G.out_edges(0)) == [(0, 1), (0, 2)] + pytest.raises((KeyError, nx.NetworkXError), G.out_edges, -1) + assert sorted(G.out_edges(0, keys=True)) == [(0, 1, 0), (0, 2, 0)] + + def test_out_edges_multi(self): + G = self.K3 + assert sorted(G.out_edges()) == [(0, 1), (0, 2), (1, 0), (1, 2), (2, 0), (2, 1)] + assert sorted(G.out_edges(0)) == [(0, 1), (0, 2)] + G.add_edge(0, 1, 2) + assert sorted(G.out_edges()) == [ + (0, 1), + (0, 1), + (0, 2), + (1, 0), + (1, 2), + (2, 0), + (2, 1), + ] + + def test_out_edges_data(self): + G = self.K3 + assert sorted(G.edges(0, data=True)) == [(0, 1, {}), (0, 2, {})] + G.remove_edge(0, 1) + G.add_edge(0, 1, data=1) + assert sorted(G.edges(0, data=True)) == [(0, 1, {"data": 1}), (0, 2, {})] + assert sorted(G.edges(0, data="data")) == [(0, 1, 1), (0, 2, None)] + assert sorted(G.edges(0, data="data", default=-1)) == [(0, 1, 1), (0, 2, -1)] + + def test_in_edges(self): + G = self.K3 + assert sorted(G.in_edges()) == [(0, 1), (0, 2), (1, 0), (1, 2), (2, 0), (2, 1)] + assert sorted(G.in_edges(0)) == [(1, 0), (2, 0)] + pytest.raises((KeyError, nx.NetworkXError), G.in_edges, -1) + G.add_edge(0, 1, 2) + assert sorted(G.in_edges()) == [ + (0, 1), + (0, 1), + (0, 2), + (1, 0), + (1, 2), + (2, 0), + (2, 1), + ] + assert sorted(G.in_edges(0, keys=True)) == [(1, 0, 0), (2, 0, 0)] + + def test_in_edges_no_keys(self): + G = self.K3 + assert sorted(G.in_edges()) == [(0, 1), (0, 2), (1, 0), (1, 2), (2, 0), (2, 1)] + assert sorted(G.in_edges(0)) == [(1, 0), (2, 0)] + G.add_edge(0, 1, 2) + assert sorted(G.in_edges()) == [ + (0, 1), + (0, 1), + (0, 2), + (1, 0), + (1, 2), + (2, 0), + (2, 1), + ] + + assert sorted(G.in_edges(data=True, keys=False)) == [ + (0, 1, {}), + (0, 1, {}), + (0, 2, {}), + (1, 0, {}), + (1, 2, {}), + (2, 0, {}), + (2, 1, {}), + ] + + def test_in_edges_data(self): + G = self.K3 + assert sorted(G.in_edges(0, data=True)) == [(1, 0, {}), (2, 0, {})] + G.remove_edge(1, 0) + G.add_edge(1, 0, data=1) + assert sorted(G.in_edges(0, data=True)) == [(1, 0, {"data": 1}), (2, 0, {})] + assert sorted(G.in_edges(0, data="data")) == [(1, 0, 1), (2, 0, None)] + assert sorted(G.in_edges(0, data="data", default=-1)) == [(1, 0, 1), (2, 0, -1)] + + def is_shallow(self, H, G): + # graph + assert G.graph["foo"] == H.graph["foo"] + G.graph["foo"].append(1) + assert G.graph["foo"] == H.graph["foo"] + # node + assert G.nodes[0]["foo"] == H.nodes[0]["foo"] + G.nodes[0]["foo"].append(1) + assert G.nodes[0]["foo"] == H.nodes[0]["foo"] + # edge + assert G[1][2][0]["foo"] == H[1][2][0]["foo"] + G[1][2][0]["foo"].append(1) + assert G[1][2][0]["foo"] == H[1][2][0]["foo"] + + def is_deep(self, H, G): + # graph + assert G.graph["foo"] == H.graph["foo"] + G.graph["foo"].append(1) + assert G.graph["foo"] != H.graph["foo"] + # node + assert G.nodes[0]["foo"] == H.nodes[0]["foo"] + G.nodes[0]["foo"].append(1) + assert G.nodes[0]["foo"] != H.nodes[0]["foo"] + # edge + assert G[1][2][0]["foo"] == H[1][2][0]["foo"] + G[1][2][0]["foo"].append(1) + assert G[1][2][0]["foo"] != H[1][2][0]["foo"] + + def test_to_undirected(self): + # MultiDiGraph -> MultiGraph changes number of edges so it is + # not a copy operation... use is_shallow, not is_shallow_copy + G = self.K3 + self.add_attributes(G) + H = nx.MultiGraph(G) + # self.is_shallow(H,G) + # the result is traversal order dependent so we + # can't use the is_shallow() test here. + try: + assert edges_equal(H.edges(), [(0, 1), (1, 2), (2, 0)]) + except AssertionError: + assert edges_equal(H.edges(), [(0, 1), (1, 2), (1, 2), (2, 0)]) + H = G.to_undirected() + self.is_deep(H, G) + + def test_has_successor(self): + G = self.K3 + assert G.has_successor(0, 1) + assert not G.has_successor(0, -1) + + def test_successors(self): + G = self.K3 + assert sorted(G.successors(0)) == [1, 2] + pytest.raises((KeyError, nx.NetworkXError), G.successors, -1) + + def test_has_predecessor(self): + G = self.K3 + assert G.has_predecessor(0, 1) + assert not G.has_predecessor(0, -1) + + def test_predecessors(self): + G = self.K3 + assert sorted(G.predecessors(0)) == [1, 2] + pytest.raises((KeyError, nx.NetworkXError), G.predecessors, -1) + + def test_degree(self): + G = self.K3 + assert sorted(G.degree()) == [(0, 4), (1, 4), (2, 4)] + assert dict(G.degree()) == {0: 4, 1: 4, 2: 4} + assert G.degree(0) == 4 + assert list(G.degree(iter([0]))) == [(0, 4)] + G.add_edge(0, 1, weight=0.3, other=1.2) + assert sorted(G.degree(weight="weight")) == [(0, 4.3), (1, 4.3), (2, 4)] + assert sorted(G.degree(weight="other")) == [(0, 5.2), (1, 5.2), (2, 4)] + + def test_in_degree(self): + G = self.K3 + assert sorted(G.in_degree()) == [(0, 2), (1, 2), (2, 2)] + assert dict(G.in_degree()) == {0: 2, 1: 2, 2: 2} + assert G.in_degree(0) == 2 + assert list(G.in_degree(iter([0]))) == [(0, 2)] + assert G.in_degree(0, weight="weight") == 2 + + def test_out_degree(self): + G = self.K3 + assert sorted(G.out_degree()) == [(0, 2), (1, 2), (2, 2)] + assert dict(G.out_degree()) == {0: 2, 1: 2, 2: 2} + assert G.out_degree(0) == 2 + assert list(G.out_degree(iter([0]))) == [(0, 2)] + assert G.out_degree(0, weight="weight") == 2 + + def test_size(self): + G = self.K3 + assert G.size() == 6 + assert G.number_of_edges() == 6 + G.add_edge(0, 1, weight=0.3, other=1.2) + assert round(G.size(weight="weight"), 2) == 6.3 + assert round(G.size(weight="other"), 2) == 7.2 + + def test_to_undirected_reciprocal(self): + G = self.Graph() + G.add_edge(1, 2) + assert G.to_undirected().has_edge(1, 2) + assert not G.to_undirected(reciprocal=True).has_edge(1, 2) + G.add_edge(2, 1) + assert G.to_undirected(reciprocal=True).has_edge(1, 2) + + def test_reverse_copy(self): + G = nx.MultiDiGraph([(0, 1), (0, 1)]) + R = G.reverse() + assert sorted(R.edges()) == [(1, 0), (1, 0)] + R.remove_edge(1, 0) + assert sorted(R.edges()) == [(1, 0)] + assert sorted(G.edges()) == [(0, 1), (0, 1)] + + def test_reverse_nocopy(self): + G = nx.MultiDiGraph([(0, 1), (0, 1)]) + R = G.reverse(copy=False) + assert sorted(R.edges()) == [(1, 0), (1, 0)] + pytest.raises(nx.NetworkXError, R.remove_edge, 1, 0) + + def test_di_attributes_cached(self): + G = self.K3.copy() + assert id(G.in_edges) == id(G.in_edges) + assert id(G.out_edges) == id(G.out_edges) + assert id(G.in_degree) == id(G.in_degree) + assert id(G.out_degree) == id(G.out_degree) + assert id(G.succ) == id(G.succ) + assert id(G.pred) == id(G.pred) + + +class TestMultiDiGraph(BaseMultiDiGraphTester, _TestMultiGraph): + def setup_method(self): + self.Graph = nx.MultiDiGraph + # build K3 + self.k3edges = [(0, 1), (0, 2), (1, 2)] + self.k3nodes = [0, 1, 2] + self.K3 = self.Graph() + self.K3._succ = {0: {}, 1: {}, 2: {}} + # K3._adj is synced with K3._succ + self.K3._pred = {0: {}, 1: {}, 2: {}} + for u in self.k3nodes: + for v in self.k3nodes: + if u == v: + continue + d = {0: {}} + self.K3._succ[u][v] = d + self.K3._pred[v][u] = d + self.K3._node = {} + self.K3._node[0] = {} + self.K3._node[1] = {} + self.K3._node[2] = {} + + def test_add_edge(self): + G = self.Graph() + G.add_edge(0, 1) + assert G._adj == {0: {1: {0: {}}}, 1: {}} + assert G._succ == {0: {1: {0: {}}}, 1: {}} + assert G._pred == {0: {}, 1: {0: {0: {}}}} + G = self.Graph() + G.add_edge(*(0, 1)) + assert G._adj == {0: {1: {0: {}}}, 1: {}} + assert G._succ == {0: {1: {0: {}}}, 1: {}} + assert G._pred == {0: {}, 1: {0: {0: {}}}} + with pytest.raises(ValueError, match="None cannot be a node"): + G.add_edge(None, 3) + + def test_add_edges_from(self): + G = self.Graph() + G.add_edges_from([(0, 1), (0, 1, {"weight": 3})]) + assert G._adj == {0: {1: {0: {}, 1: {"weight": 3}}}, 1: {}} + assert G._succ == {0: {1: {0: {}, 1: {"weight": 3}}}, 1: {}} + assert G._pred == {0: {}, 1: {0: {0: {}, 1: {"weight": 3}}}} + + G.add_edges_from([(0, 1), (0, 1, {"weight": 3})], weight=2) + assert G._succ == { + 0: {1: {0: {}, 1: {"weight": 3}, 2: {"weight": 2}, 3: {"weight": 3}}}, + 1: {}, + } + assert G._pred == { + 0: {}, + 1: {0: {0: {}, 1: {"weight": 3}, 2: {"weight": 2}, 3: {"weight": 3}}}, + } + + G = self.Graph() + edges = [ + (0, 1, {"weight": 3}), + (0, 1, (("weight", 2),)), + (0, 1, 5), + (0, 1, "s"), + ] + G.add_edges_from(edges) + keydict = {0: {"weight": 3}, 1: {"weight": 2}, 5: {}, "s": {}} + assert G._succ == {0: {1: keydict}, 1: {}} + assert G._pred == {1: {0: keydict}, 0: {}} + + # too few in tuple + pytest.raises(nx.NetworkXError, G.add_edges_from, [(0,)]) + # too many in tuple + pytest.raises(nx.NetworkXError, G.add_edges_from, [(0, 1, 2, 3, 4)]) + # not a tuple + pytest.raises(TypeError, G.add_edges_from, [0]) + with pytest.raises(ValueError, match="None cannot be a node"): + G.add_edges_from([(None, 3), (3, 2)]) + + def test_remove_edge(self): + G = self.K3 + G.remove_edge(0, 1) + assert G._succ == { + 0: {2: {0: {}}}, + 1: {0: {0: {}}, 2: {0: {}}}, + 2: {0: {0: {}}, 1: {0: {}}}, + } + assert G._pred == { + 0: {1: {0: {}}, 2: {0: {}}}, + 1: {2: {0: {}}}, + 2: {0: {0: {}}, 1: {0: {}}}, + } + pytest.raises((KeyError, nx.NetworkXError), G.remove_edge, -1, 0) + pytest.raises((KeyError, nx.NetworkXError), G.remove_edge, 0, 2, key=1) + + def test_remove_multiedge(self): + G = self.K3 + G.add_edge(0, 1, key="parallel edge") + G.remove_edge(0, 1, key="parallel edge") + assert G._adj == { + 0: {1: {0: {}}, 2: {0: {}}}, + 1: {0: {0: {}}, 2: {0: {}}}, + 2: {0: {0: {}}, 1: {0: {}}}, + } + + assert G._succ == { + 0: {1: {0: {}}, 2: {0: {}}}, + 1: {0: {0: {}}, 2: {0: {}}}, + 2: {0: {0: {}}, 1: {0: {}}}, + } + + assert G._pred == { + 0: {1: {0: {}}, 2: {0: {}}}, + 1: {0: {0: {}}, 2: {0: {}}}, + 2: {0: {0: {}}, 1: {0: {}}}, + } + G.remove_edge(0, 1) + assert G._succ == { + 0: {2: {0: {}}}, + 1: {0: {0: {}}, 2: {0: {}}}, + 2: {0: {0: {}}, 1: {0: {}}}, + } + assert G._pred == { + 0: {1: {0: {}}, 2: {0: {}}}, + 1: {2: {0: {}}}, + 2: {0: {0: {}}, 1: {0: {}}}, + } + pytest.raises((KeyError, nx.NetworkXError), G.remove_edge, -1, 0) + + def test_remove_edges_from(self): + G = self.K3 + G.remove_edges_from([(0, 1)]) + assert G._succ == { + 0: {2: {0: {}}}, + 1: {0: {0: {}}, 2: {0: {}}}, + 2: {0: {0: {}}, 1: {0: {}}}, + } + assert G._pred == { + 0: {1: {0: {}}, 2: {0: {}}}, + 1: {2: {0: {}}}, + 2: {0: {0: {}}, 1: {0: {}}}, + } + G.remove_edges_from([(0, 0)]) # silent fail + + +class TestEdgeSubgraph(_TestMultiGraphEdgeSubgraph): + """Unit tests for the :meth:`MultiDiGraph.edge_subgraph` method.""" + + def setup_method(self): + # Create a quadruply-linked path graph on five nodes. + G = nx.MultiDiGraph() + nx.add_path(G, range(5)) + nx.add_path(G, range(5)) + nx.add_path(G, reversed(range(5))) + nx.add_path(G, reversed(range(5))) + # Add some node, edge, and graph attributes. + for i in range(5): + G.nodes[i]["name"] = f"node{i}" + G.adj[0][1][0]["name"] = "edge010" + G.adj[0][1][1]["name"] = "edge011" + G.adj[3][4][0]["name"] = "edge340" + G.adj[3][4][1]["name"] = "edge341" + G.graph["name"] = "graph" + # Get the subgraph induced by one of the first edges and one of + # the last edges. + self.G = G + self.H = G.edge_subgraph([(0, 1, 0), (3, 4, 1)]) + + +class CustomDictClass(UserDict): + pass + + +class MultiDiGraphSubClass(nx.MultiDiGraph): + node_dict_factory = CustomDictClass # type: ignore[assignment] + node_attr_dict_factory = CustomDictClass # type: ignore[assignment] + adjlist_outer_dict_factory = CustomDictClass # type: ignore[assignment] + adjlist_inner_dict_factory = CustomDictClass # type: ignore[assignment] + edge_key_dict_factory = CustomDictClass # type: ignore[assignment] + edge_attr_dict_factory = CustomDictClass # type: ignore[assignment] + graph_attr_dict_factory = CustomDictClass # type: ignore[assignment] + + +class TestMultiDiGraphSubclass(TestMultiDiGraph): + def setup_method(self): + self.Graph = MultiDiGraphSubClass + # build K3 + self.k3edges = [(0, 1), (0, 2), (1, 2)] + self.k3nodes = [0, 1, 2] + self.K3 = self.Graph() + self.K3._succ = self.K3.adjlist_outer_dict_factory( + { + 0: self.K3.adjlist_inner_dict_factory(), + 1: self.K3.adjlist_inner_dict_factory(), + 2: self.K3.adjlist_inner_dict_factory(), + } + ) + # K3._adj is synced with K3._succ + self.K3._pred = {0: {}, 1: {}, 2: {}} + for u in self.k3nodes: + for v in self.k3nodes: + if u == v: + continue + d = {0: {}} + self.K3._succ[u][v] = d + self.K3._pred[v][u] = d + self.K3._node = self.K3.node_dict_factory() + self.K3._node[0] = self.K3.node_attr_dict_factory() + self.K3._node[1] = self.K3.node_attr_dict_factory() + self.K3._node[2] = self.K3.node_attr_dict_factory() diff --git a/.venv/lib/python3.12/site-packages/networkx/classes/tests/test_multigraph.py b/.venv/lib/python3.12/site-packages/networkx/classes/tests/test_multigraph.py new file mode 100644 index 0000000..cd912d1 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/classes/tests/test_multigraph.py @@ -0,0 +1,528 @@ +from collections import UserDict + +import pytest + +import networkx as nx +from networkx.utils import edges_equal + +from .test_graph import BaseAttrGraphTester +from .test_graph import TestGraph as _TestGraph + + +class BaseMultiGraphTester(BaseAttrGraphTester): + def test_has_edge(self): + G = self.K3 + assert G.has_edge(0, 1) + assert not G.has_edge(0, -1) + assert G.has_edge(0, 1, 0) + assert not G.has_edge(0, 1, 1) + + def test_get_edge_data(self): + G = self.K3 + assert G.get_edge_data(0, 1) == {0: {}} + assert G[0][1] == {0: {}} + assert G[0][1][0] == {} + assert G.get_edge_data(10, 20) is None + assert G.get_edge_data(0, 1, 0) == {} + + def test_adjacency(self): + G = self.K3 + assert dict(G.adjacency()) == { + 0: {1: {0: {}}, 2: {0: {}}}, + 1: {0: {0: {}}, 2: {0: {}}}, + 2: {0: {0: {}}, 1: {0: {}}}, + } + + def deepcopy_edge_attr(self, H, G): + assert G[1][2][0]["foo"] == H[1][2][0]["foo"] + G[1][2][0]["foo"].append(1) + assert G[1][2][0]["foo"] != H[1][2][0]["foo"] + + def shallow_copy_edge_attr(self, H, G): + assert G[1][2][0]["foo"] == H[1][2][0]["foo"] + G[1][2][0]["foo"].append(1) + assert G[1][2][0]["foo"] == H[1][2][0]["foo"] + + def graphs_equal(self, H, G): + assert G._adj == H._adj + assert G._node == H._node + assert G.graph == H.graph + assert G.name == H.name + if not G.is_directed() and not H.is_directed(): + assert H._adj[1][2][0] is H._adj[2][1][0] + assert G._adj[1][2][0] is G._adj[2][1][0] + else: # at least one is directed + if not G.is_directed(): + G._pred = G._adj + G._succ = G._adj + if not H.is_directed(): + H._pred = H._adj + H._succ = H._adj + assert G._pred == H._pred + assert G._succ == H._succ + assert H._succ[1][2][0] is H._pred[2][1][0] + assert G._succ[1][2][0] is G._pred[2][1][0] + + def same_attrdict(self, H, G): + # same attrdict in the edgedata + old_foo = H[1][2][0]["foo"] + H.adj[1][2][0]["foo"] = "baz" + assert G._adj == H._adj + H.adj[1][2][0]["foo"] = old_foo + assert G._adj == H._adj + + old_foo = H.nodes[0]["foo"] + H.nodes[0]["foo"] = "baz" + assert G._node == H._node + H.nodes[0]["foo"] = old_foo + assert G._node == H._node + + def different_attrdict(self, H, G): + # used by graph_equal_but_different + old_foo = H[1][2][0]["foo"] + H.adj[1][2][0]["foo"] = "baz" + assert G._adj != H._adj + H.adj[1][2][0]["foo"] = old_foo + assert G._adj == H._adj + + old_foo = H.nodes[0]["foo"] + H.nodes[0]["foo"] = "baz" + assert G._node != H._node + H.nodes[0]["foo"] = old_foo + assert G._node == H._node + + def test_to_undirected(self): + G = self.K3 + self.add_attributes(G) + H = nx.MultiGraph(G) + self.is_shallow_copy(H, G) + H = G.to_undirected() + self.is_deepcopy(H, G) + + def test_to_directed(self): + G = self.K3 + self.add_attributes(G) + H = nx.MultiDiGraph(G) + self.is_shallow_copy(H, G) + H = G.to_directed() + self.is_deepcopy(H, G) + + def test_number_of_edges_selfloops(self): + G = self.K3 + G.add_edge(0, 0) + G.add_edge(0, 0) + G.add_edge(0, 0, key="parallel edge") + G.remove_edge(0, 0, key="parallel edge") + assert G.number_of_edges(0, 0) == 2 + G.remove_edge(0, 0) + assert G.number_of_edges(0, 0) == 1 + + def test_edge_lookup(self): + G = self.Graph() + G.add_edge(1, 2, foo="bar") + G.add_edge(1, 2, "key", foo="biz") + assert edges_equal(G.edges[1, 2, 0], {"foo": "bar"}) + assert edges_equal(G.edges[1, 2, "key"], {"foo": "biz"}) + + def test_edge_attr(self): + G = self.Graph() + G.add_edge(1, 2, key="k1", foo="bar") + G.add_edge(1, 2, key="k2", foo="baz") + assert isinstance(G.get_edge_data(1, 2), G.edge_key_dict_factory) + assert all( + isinstance(d, G.edge_attr_dict_factory) for u, v, d in G.edges(data=True) + ) + assert edges_equal( + G.edges(keys=True, data=True), + [(1, 2, "k1", {"foo": "bar"}), (1, 2, "k2", {"foo": "baz"})], + ) + assert edges_equal( + G.edges(keys=True, data="foo"), [(1, 2, "k1", "bar"), (1, 2, "k2", "baz")] + ) + + def test_edge_attr4(self): + G = self.Graph() + G.add_edge(1, 2, key=0, data=7, spam="bar", bar="foo") + assert edges_equal( + G.edges(data=True), [(1, 2, {"data": 7, "spam": "bar", "bar": "foo"})] + ) + G[1][2][0]["data"] = 10 # OK to set data like this + assert edges_equal( + G.edges(data=True), [(1, 2, {"data": 10, "spam": "bar", "bar": "foo"})] + ) + + G.adj[1][2][0]["data"] = 20 + assert edges_equal( + G.edges(data=True), [(1, 2, {"data": 20, "spam": "bar", "bar": "foo"})] + ) + G.edges[1, 2, 0]["data"] = 21 # another spelling, "edge" + assert edges_equal( + G.edges(data=True), [(1, 2, {"data": 21, "spam": "bar", "bar": "foo"})] + ) + G.adj[1][2][0]["listdata"] = [20, 200] + G.adj[1][2][0]["weight"] = 20 + assert edges_equal( + G.edges(data=True), + [ + ( + 1, + 2, + { + "data": 21, + "spam": "bar", + "bar": "foo", + "listdata": [20, 200], + "weight": 20, + }, + ) + ], + ) + + +class TestMultiGraph(BaseMultiGraphTester, _TestGraph): + def setup_method(self): + self.Graph = nx.MultiGraph + # build K3 + ed1, ed2, ed3 = ({0: {}}, {0: {}}, {0: {}}) + self.k3adj = {0: {1: ed1, 2: ed2}, 1: {0: ed1, 2: ed3}, 2: {0: ed2, 1: ed3}} + self.k3edges = [(0, 1), (0, 2), (1, 2)] + self.k3nodes = [0, 1, 2] + self.K3 = self.Graph() + self.K3._adj = self.k3adj + self.K3._node = {} + self.K3._node[0] = {} + self.K3._node[1] = {} + self.K3._node[2] = {} + + def test_data_input(self): + G = self.Graph({1: [2], 2: [1]}, name="test") + assert G.name == "test" + expected = [(1, {2: {0: {}}}), (2, {1: {0: {}}})] + assert sorted(G.adj.items()) == expected + + def test_data_multigraph_input(self): + # standard case with edge keys and edge data + edata0 = {"w": 200, "s": "foo"} + edata1 = {"w": 201, "s": "bar"} + keydict = {0: edata0, 1: edata1} + dododod = {"a": {"b": keydict}} + + multiple_edge = [("a", "b", 0, edata0), ("a", "b", 1, edata1)] + single_edge = [("a", "b", 0, keydict)] + + G = self.Graph(dododod, multigraph_input=True) + assert list(G.edges(keys=True, data=True)) == multiple_edge + G = self.Graph(dododod, multigraph_input=None) + assert list(G.edges(keys=True, data=True)) == multiple_edge + G = self.Graph(dododod, multigraph_input=False) + assert list(G.edges(keys=True, data=True)) == single_edge + + # test round-trip to_dict_of_dict and MultiGraph constructor + G = self.Graph(dododod, multigraph_input=True) + H = self.Graph(nx.to_dict_of_dicts(G)) + assert nx.is_isomorphic(G, H) is True # test that default is True + for mgi in [True, False]: + H = self.Graph(nx.to_dict_of_dicts(G), multigraph_input=mgi) + assert nx.is_isomorphic(G, H) == mgi + + # Set up cases for when incoming_graph_data is not multigraph_input + etraits = {"w": 200, "s": "foo"} + egraphics = {"color": "blue", "shape": "box"} + edata = {"traits": etraits, "graphics": egraphics} + dodod1 = {"a": {"b": edata}} + dodod2 = {"a": {"b": etraits}} + dodod3 = {"a": {"b": {"traits": etraits, "s": "foo"}}} + dol = {"a": ["b"]} + + multiple_edge = [("a", "b", "traits", etraits), ("a", "b", "graphics", egraphics)] + single_edge = [("a", "b", 0, {})] # type: ignore[var-annotated] + single_edge1 = [("a", "b", 0, edata)] + single_edge2 = [("a", "b", 0, etraits)] + single_edge3 = [("a", "b", 0, {"traits": etraits, "s": "foo"})] + + cases = [ # (dod, mgi, edges) + (dodod1, True, multiple_edge), + (dodod1, False, single_edge1), + (dodod2, False, single_edge2), + (dodod3, False, single_edge3), + (dol, False, single_edge), + ] + + @pytest.mark.parametrize("dod, mgi, edges", cases) + def test_non_multigraph_input(self, dod, mgi, edges): + G = self.Graph(dod, multigraph_input=mgi) + assert list(G.edges(keys=True, data=True)) == edges + G = nx.to_networkx_graph(dod, create_using=self.Graph, multigraph_input=mgi) + assert list(G.edges(keys=True, data=True)) == edges + + mgi_none_cases = [ + (dodod1, multiple_edge), + (dodod2, single_edge2), + (dodod3, single_edge3), + ] + + @pytest.mark.parametrize("dod, edges", mgi_none_cases) + def test_non_multigraph_input_mgi_none(self, dod, edges): + # test constructor without to_networkx_graph for mgi=None + G = self.Graph(dod) + assert list(G.edges(keys=True, data=True)) == edges + + raise_cases = [dodod2, dodod3, dol] + + @pytest.mark.parametrize("dod", raise_cases) + def test_non_multigraph_input_raise(self, dod): + # cases where NetworkXError is raised + pytest.raises(nx.NetworkXError, self.Graph, dod, multigraph_input=True) + pytest.raises( + nx.NetworkXError, + nx.to_networkx_graph, + dod, + create_using=self.Graph, + multigraph_input=True, + ) + + def test_getitem(self): + G = self.K3 + assert G[0] == {1: {0: {}}, 2: {0: {}}} + with pytest.raises(KeyError): + G.__getitem__("j") + with pytest.raises(TypeError): + G.__getitem__(["A"]) + + def test_remove_node(self): + G = self.K3 + G.remove_node(0) + assert G.adj == {1: {2: {0: {}}}, 2: {1: {0: {}}}} + with pytest.raises(nx.NetworkXError): + G.remove_node(-1) + + def test_add_edge(self): + G = self.Graph() + G.add_edge(0, 1) + assert G.adj == {0: {1: {0: {}}}, 1: {0: {0: {}}}} + G = self.Graph() + G.add_edge(*(0, 1)) + assert G.adj == {0: {1: {0: {}}}, 1: {0: {0: {}}}} + G = self.Graph() + with pytest.raises(ValueError): + G.add_edge(None, "anything") + + def test_add_edge_conflicting_key(self): + G = self.Graph() + G.add_edge(0, 1, key=1) + G.add_edge(0, 1) + assert G.number_of_edges() == 2 + G = self.Graph() + G.add_edges_from([(0, 1, 1, {})]) + G.add_edges_from([(0, 1)]) + assert G.number_of_edges() == 2 + + def test_add_edges_from(self): + G = self.Graph() + G.add_edges_from([(0, 1), (0, 1, {"weight": 3})]) + assert G.adj == { + 0: {1: {0: {}, 1: {"weight": 3}}}, + 1: {0: {0: {}, 1: {"weight": 3}}}, + } + G.add_edges_from([(0, 1), (0, 1, {"weight": 3})], weight=2) + assert G.adj == { + 0: {1: {0: {}, 1: {"weight": 3}, 2: {"weight": 2}, 3: {"weight": 3}}}, + 1: {0: {0: {}, 1: {"weight": 3}, 2: {"weight": 2}, 3: {"weight": 3}}}, + } + G = self.Graph() + edges = [ + (0, 1, {"weight": 3}), + (0, 1, (("weight", 2),)), + (0, 1, 5), + (0, 1, "s"), + ] + G.add_edges_from(edges) + keydict = {0: {"weight": 3}, 1: {"weight": 2}, 5: {}, "s": {}} + assert G._adj == {0: {1: keydict}, 1: {0: keydict}} + + # too few in tuple + with pytest.raises(nx.NetworkXError): + G.add_edges_from([(0,)]) + # too many in tuple + with pytest.raises(nx.NetworkXError): + G.add_edges_from([(0, 1, 2, 3, 4)]) + # not a tuple + with pytest.raises(TypeError): + G.add_edges_from([0]) + + def test_multigraph_add_edges_from_four_tuple_misordered(self): + """add_edges_from expects 4-tuples of the format (u, v, key, data_dict). + + Ensure 4-tuples of form (u, v, data_dict, key) raise exception. + """ + G = nx.MultiGraph() + with pytest.raises(TypeError): + # key/data values flipped in 4-tuple + G.add_edges_from([(0, 1, {"color": "red"}, 0)]) + + def test_remove_edge(self): + G = self.K3 + G.remove_edge(0, 1) + assert G.adj == {0: {2: {0: {}}}, 1: {2: {0: {}}}, 2: {0: {0: {}}, 1: {0: {}}}} + + with pytest.raises(nx.NetworkXError): + G.remove_edge(-1, 0) + with pytest.raises(nx.NetworkXError): + G.remove_edge(0, 2, key=1) + + def test_remove_edges_from(self): + G = self.K3.copy() + G.remove_edges_from([(0, 1)]) + kd = {0: {}} + assert G.adj == {0: {2: kd}, 1: {2: kd}, 2: {0: kd, 1: kd}} + G.remove_edges_from([(0, 0)]) # silent fail + self.K3.add_edge(0, 1) + G = self.K3.copy() + G.remove_edges_from(list(G.edges(data=True, keys=True))) + assert G.adj == {0: {}, 1: {}, 2: {}} + G = self.K3.copy() + G.remove_edges_from(list(G.edges(data=False, keys=True))) + assert G.adj == {0: {}, 1: {}, 2: {}} + G = self.K3.copy() + G.remove_edges_from(list(G.edges(data=False, keys=False))) + assert G.adj == {0: {}, 1: {}, 2: {}} + G = self.K3.copy() + G.remove_edges_from([(0, 1, 0), (0, 2, 0, {}), (1, 2)]) + assert G.adj == {0: {1: {1: {}}}, 1: {0: {1: {}}}, 2: {}} + + def test_remove_multiedge(self): + G = self.K3 + G.add_edge(0, 1, key="parallel edge") + G.remove_edge(0, 1, key="parallel edge") + assert G.adj == { + 0: {1: {0: {}}, 2: {0: {}}}, + 1: {0: {0: {}}, 2: {0: {}}}, + 2: {0: {0: {}}, 1: {0: {}}}, + } + G.remove_edge(0, 1) + kd = {0: {}} + assert G.adj == {0: {2: kd}, 1: {2: kd}, 2: {0: kd, 1: kd}} + with pytest.raises(nx.NetworkXError): + G.remove_edge(-1, 0) + + +class TestEdgeSubgraph: + """Unit tests for the :meth:`MultiGraph.edge_subgraph` method.""" + + def setup_method(self): + # Create a doubly-linked path graph on five nodes. + G = nx.MultiGraph() + nx.add_path(G, range(5)) + nx.add_path(G, range(5)) + # Add some node, edge, and graph attributes. + for i in range(5): + G.nodes[i]["name"] = f"node{i}" + G.adj[0][1][0]["name"] = "edge010" + G.adj[0][1][1]["name"] = "edge011" + G.adj[3][4][0]["name"] = "edge340" + G.adj[3][4][1]["name"] = "edge341" + G.graph["name"] = "graph" + # Get the subgraph induced by one of the first edges and one of + # the last edges. + self.G = G + self.H = G.edge_subgraph([(0, 1, 0), (3, 4, 1)]) + + def test_correct_nodes(self): + """Tests that the subgraph has the correct nodes.""" + assert [0, 1, 3, 4] == sorted(self.H.nodes()) + + def test_correct_edges(self): + """Tests that the subgraph has the correct edges.""" + assert [(0, 1, 0, "edge010"), (3, 4, 1, "edge341")] == sorted( + self.H.edges(keys=True, data="name") + ) + + def test_add_node(self): + """Tests that adding a node to the original graph does not + affect the nodes of the subgraph. + + """ + self.G.add_node(5) + assert [0, 1, 3, 4] == sorted(self.H.nodes()) + + def test_remove_node(self): + """Tests that removing a node in the original graph does + affect the nodes of the subgraph. + + """ + self.G.remove_node(0) + assert [1, 3, 4] == sorted(self.H.nodes()) + + def test_node_attr_dict(self): + """Tests that the node attribute dictionary of the two graphs is + the same object. + + """ + for v in self.H: + assert self.G.nodes[v] == self.H.nodes[v] + # Making a change to G should make a change in H and vice versa. + self.G.nodes[0]["name"] = "foo" + assert self.G.nodes[0] == self.H.nodes[0] + self.H.nodes[1]["name"] = "bar" + assert self.G.nodes[1] == self.H.nodes[1] + + def test_edge_attr_dict(self): + """Tests that the edge attribute dictionary of the two graphs is + the same object. + + """ + for u, v, k in self.H.edges(keys=True): + assert self.G._adj[u][v][k] == self.H._adj[u][v][k] + # Making a change to G should make a change in H and vice versa. + self.G._adj[0][1][0]["name"] = "foo" + assert self.G._adj[0][1][0]["name"] == self.H._adj[0][1][0]["name"] + self.H._adj[3][4][1]["name"] = "bar" + assert self.G._adj[3][4][1]["name"] == self.H._adj[3][4][1]["name"] + + def test_graph_attr_dict(self): + """Tests that the graph attribute dictionary of the two graphs + is the same object. + + """ + assert self.G.graph is self.H.graph + + +class CustomDictClass(UserDict): + pass + + +class MultiGraphSubClass(nx.MultiGraph): + node_dict_factory = CustomDictClass # type: ignore[assignment] + node_attr_dict_factory = CustomDictClass # type: ignore[assignment] + adjlist_outer_dict_factory = CustomDictClass # type: ignore[assignment] + adjlist_inner_dict_factory = CustomDictClass # type: ignore[assignment] + edge_key_dict_factory = CustomDictClass # type: ignore[assignment] + edge_attr_dict_factory = CustomDictClass # type: ignore[assignment] + graph_attr_dict_factory = CustomDictClass # type: ignore[assignment] + + +class TestMultiGraphSubclass(TestMultiGraph): + def setup_method(self): + self.Graph = MultiGraphSubClass + # build K3 + self.k3edges = [(0, 1), (0, 2), (1, 2)] + self.k3nodes = [0, 1, 2] + self.K3 = self.Graph() + self.K3._adj = self.K3.adjlist_outer_dict_factory( + { + 0: self.K3.adjlist_inner_dict_factory(), + 1: self.K3.adjlist_inner_dict_factory(), + 2: self.K3.adjlist_inner_dict_factory(), + } + ) + self.K3._pred = {0: {}, 1: {}, 2: {}} + for u in self.k3nodes: + for v in self.k3nodes: + if u != v: + d = {0: {}} + self.K3._adj[u][v] = d + self.K3._adj[v][u] = d + self.K3._node = self.K3.node_dict_factory() + self.K3._node[0] = self.K3.node_attr_dict_factory() + self.K3._node[1] = self.K3.node_attr_dict_factory() + self.K3._node[2] = self.K3.node_attr_dict_factory() diff --git a/.venv/lib/python3.12/site-packages/networkx/classes/tests/test_reportviews.py b/.venv/lib/python3.12/site-packages/networkx/classes/tests/test_reportviews.py new file mode 100644 index 0000000..8461be2 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/classes/tests/test_reportviews.py @@ -0,0 +1,1421 @@ +import pickle +from copy import deepcopy + +import pytest + +import networkx as nx +from networkx.classes import reportviews as rv +from networkx.classes.reportviews import NodeDataView + + +# Nodes +class TestNodeView: + @classmethod + def setup_class(cls): + cls.G = nx.path_graph(9) + cls.nv = cls.G.nodes # NodeView(G) + + def test_pickle(self): + import pickle + + nv = self.nv + pnv = pickle.loads(pickle.dumps(nv, -1)) + assert nv == pnv + assert nv.__slots__ == pnv.__slots__ + + def test_str(self): + assert str(self.nv) == "[0, 1, 2, 3, 4, 5, 6, 7, 8]" + + def test_repr(self): + assert repr(self.nv) == "NodeView((0, 1, 2, 3, 4, 5, 6, 7, 8))" + + def test_contains(self): + G = self.G.copy() + nv = G.nodes + assert 7 in nv + assert 9 not in nv + G.remove_node(7) + G.add_node(9) + assert 7 not in nv + assert 9 in nv + + def test_getitem(self): + G = self.G.copy() + nv = G.nodes + G.nodes[3]["foo"] = "bar" + assert nv[7] == {} + assert nv[3] == {"foo": "bar"} + # slicing + with pytest.raises(nx.NetworkXError): + G.nodes[0:5] + + def test_iter(self): + nv = self.nv + for i, n in enumerate(nv): + assert i == n + inv = iter(nv) + assert next(inv) == 0 + assert iter(nv) != nv + assert iter(inv) == inv + inv2 = iter(nv) + next(inv2) + assert list(inv) == list(inv2) + # odd case where NodeView calls NodeDataView with data=False + nnv = nv(data=False) + for i, n in enumerate(nnv): + assert i == n + + def test_call(self): + nodes = self.nv + assert nodes is nodes() + assert nodes is not nodes(data=True) + assert nodes is not nodes(data="weight") + + +class TestNodeDataView: + @classmethod + def setup_class(cls): + cls.G = nx.path_graph(9) + cls.nv = NodeDataView(cls.G) + cls.ndv = cls.G.nodes.data(True) + cls.nwv = cls.G.nodes.data("foo") + + def test_viewtype(self): + nv = self.G.nodes + ndvfalse = nv.data(False) + assert nv is ndvfalse + assert nv is not self.ndv + + def test_pickle(self): + import pickle + + nv = self.nv + pnv = pickle.loads(pickle.dumps(nv, -1)) + assert nv == pnv + assert nv.__slots__ == pnv.__slots__ + + def test_str(self): + msg = str([(n, {}) for n in range(9)]) + assert str(self.ndv) == msg + + def test_repr(self): + expected = "NodeDataView((0, 1, 2, 3, 4, 5, 6, 7, 8))" + assert repr(self.nv) == expected + expected = ( + "NodeDataView({0: {}, 1: {}, 2: {}, 3: {}, " + + "4: {}, 5: {}, 6: {}, 7: {}, 8: {}})" + ) + assert repr(self.ndv) == expected + expected = ( + "NodeDataView({0: None, 1: None, 2: None, 3: None, 4: None, " + + "5: None, 6: None, 7: None, 8: None}, data='foo')" + ) + assert repr(self.nwv) == expected + + def test_contains(self): + G = self.G.copy() + nv = G.nodes.data() + nwv = G.nodes.data("foo") + G.nodes[3]["foo"] = "bar" + assert (7, {}) in nv + assert (3, {"foo": "bar"}) in nv + assert (3, "bar") in nwv + assert (7, None) in nwv + # default + nwv_def = G.nodes(data="foo", default="biz") + assert (7, "biz") in nwv_def + assert (3, "bar") in nwv_def + + def test_getitem(self): + G = self.G.copy() + nv = G.nodes + G.nodes[3]["foo"] = "bar" + assert nv[3] == {"foo": "bar"} + # default + nwv_def = G.nodes(data="foo", default="biz") + assert nwv_def[7], "biz" + assert nwv_def[3] == "bar" + # slicing + with pytest.raises(nx.NetworkXError): + G.nodes.data()[0:5] + + def test_iter(self): + G = self.G.copy() + nv = G.nodes.data() + ndv = G.nodes.data(True) + nwv = G.nodes.data("foo") + for i, (n, d) in enumerate(nv): + assert i == n + assert d == {} + inv = iter(nv) + assert next(inv) == (0, {}) + G.nodes[3]["foo"] = "bar" + # default + for n, d in nv: + if n == 3: + assert d == {"foo": "bar"} + else: + assert d == {} + # data=True + for n, d in ndv: + if n == 3: + assert d == {"foo": "bar"} + else: + assert d == {} + # data='foo' + for n, d in nwv: + if n == 3: + assert d == "bar" + else: + assert d is None + # data='foo', default=1 + for n, d in G.nodes.data("foo", default=1): + if n == 3: + assert d == "bar" + else: + assert d == 1 + + +def test_nodedataview_unhashable(): + G = nx.path_graph(9) + G.nodes[3]["foo"] = "bar" + nvs = [G.nodes.data()] + nvs.append(G.nodes.data(True)) + H = G.copy() + H.nodes[4]["foo"] = {1, 2, 3} + nvs.append(H.nodes.data(True)) + # raise unhashable + for nv in nvs: + pytest.raises(TypeError, set, nv) + pytest.raises(TypeError, eval, "nv | nv", locals()) + # no raise... hashable + Gn = G.nodes.data(False) + set(Gn) + Gn | Gn + Gn = G.nodes.data("foo") + set(Gn) + Gn | Gn + + +class TestNodeViewSetOps: + @classmethod + def setup_class(cls): + cls.G = nx.path_graph(9) + cls.G.nodes[3]["foo"] = "bar" + cls.nv = cls.G.nodes + + def n_its(self, nodes): + return set(nodes) + + def test_len(self): + G = self.G.copy() + nv = G.nodes + assert len(nv) == 9 + G.remove_node(7) + assert len(nv) == 8 + G.add_node(9) + assert len(nv) == 9 + + def test_and(self): + nv = self.nv + some_nodes = self.n_its(range(5, 12)) + assert nv & some_nodes == self.n_its(range(5, 9)) + assert some_nodes & nv == self.n_its(range(5, 9)) + + def test_or(self): + nv = self.nv + some_nodes = self.n_its(range(5, 12)) + assert nv | some_nodes == self.n_its(range(12)) + assert some_nodes | nv == self.n_its(range(12)) + + def test_xor(self): + nv = self.nv + some_nodes = self.n_its(range(5, 12)) + nodes = {0, 1, 2, 3, 4, 9, 10, 11} + assert nv ^ some_nodes == self.n_its(nodes) + assert some_nodes ^ nv == self.n_its(nodes) + + def test_sub(self): + nv = self.nv + some_nodes = self.n_its(range(5, 12)) + assert nv - some_nodes == self.n_its(range(5)) + assert some_nodes - nv == self.n_its(range(9, 12)) + + +class TestNodeDataViewSetOps(TestNodeViewSetOps): + @classmethod + def setup_class(cls): + cls.G = nx.path_graph(9) + cls.G.nodes[3]["foo"] = "bar" + cls.nv = cls.G.nodes.data("foo") + + def n_its(self, nodes): + return {(node, "bar" if node == 3 else None) for node in nodes} + + +class TestNodeDataViewDefaultSetOps(TestNodeDataViewSetOps): + @classmethod + def setup_class(cls): + cls.G = nx.path_graph(9) + cls.G.nodes[3]["foo"] = "bar" + cls.nv = cls.G.nodes.data("foo", default=1) + + def n_its(self, nodes): + return {(node, "bar" if node == 3 else 1) for node in nodes} + + +# Edges Data View +class TestEdgeDataView: + @classmethod + def setup_class(cls): + cls.G = nx.path_graph(9) + cls.eview = nx.reportviews.EdgeView + + def test_pickle(self): + import pickle + + ev = self.eview(self.G)(data=True) + pev = pickle.loads(pickle.dumps(ev, -1)) + assert list(ev) == list(pev) + assert ev.__slots__ == pev.__slots__ + + def modify_edge(self, G, e, **kwds): + G._adj[e[0]][e[1]].update(kwds) + + def test_str(self): + ev = self.eview(self.G)(data=True) + rep = str([(n, n + 1, {}) for n in range(8)]) + assert str(ev) == rep + + def test_repr(self): + ev = self.eview(self.G)(data=True) + rep = ( + "EdgeDataView([(0, 1, {}), (1, 2, {}), " + + "(2, 3, {}), (3, 4, {}), " + + "(4, 5, {}), (5, 6, {}), " + + "(6, 7, {}), (7, 8, {})])" + ) + assert repr(ev) == rep + + def test_iterdata(self): + G = self.G.copy() + evr = self.eview(G) + ev = evr(data=True) + ev_def = evr(data="foo", default=1) + + for u, v, d in ev: + pass + assert d == {} + + for u, v, wt in ev_def: + pass + assert wt == 1 + + self.modify_edge(G, (2, 3), foo="bar") + for e in ev: + assert len(e) == 3 + if set(e[:2]) == {2, 3}: + assert e[2] == {"foo": "bar"} + checked = True + else: + assert e[2] == {} + assert checked + + for e in ev_def: + assert len(e) == 3 + if set(e[:2]) == {2, 3}: + assert e[2] == "bar" + checked_wt = True + else: + assert e[2] == 1 + assert checked_wt + + def test_iter(self): + evr = self.eview(self.G) + ev = evr() + for u, v in ev: + pass + iev = iter(ev) + assert next(iev) == (0, 1) + assert iter(ev) != ev + assert iter(iev) == iev + + def test_contains(self): + evr = self.eview(self.G) + ev = evr() + if self.G.is_directed(): + assert (1, 2) in ev and (2, 1) not in ev + else: + assert (1, 2) in ev and (2, 1) in ev + assert (1, 4) not in ev + assert (1, 90) not in ev + assert (90, 1) not in ev + + def test_contains_with_nbunch(self): + evr = self.eview(self.G) + ev = evr(nbunch=[0, 2]) + if self.G.is_directed(): + assert (0, 1) in ev + assert (1, 2) not in ev + assert (2, 3) in ev + else: + assert (0, 1) in ev + assert (1, 2) in ev + assert (2, 3) in ev + assert (3, 4) not in ev + assert (4, 5) not in ev + assert (5, 6) not in ev + assert (7, 8) not in ev + assert (8, 9) not in ev + + def test_len(self): + evr = self.eview(self.G) + ev = evr(data="foo") + assert len(ev) == 8 + assert len(evr(1)) == 2 + assert len(evr([1, 2, 3])) == 4 + + assert len(self.G.edges(1)) == 2 + assert len(self.G.edges()) == 8 + assert len(self.G.edges) == 8 + + H = self.G.copy() + H.add_edge(1, 1) + assert len(H.edges(1)) == 3 + assert len(H.edges()) == 9 + assert len(H.edges) == 9 + + +class TestOutEdgeDataView(TestEdgeDataView): + @classmethod + def setup_class(cls): + cls.G = nx.path_graph(9, create_using=nx.DiGraph()) + cls.eview = nx.reportviews.OutEdgeView + + def test_repr(self): + ev = self.eview(self.G)(data=True) + rep = ( + "OutEdgeDataView([(0, 1, {}), (1, 2, {}), " + + "(2, 3, {}), (3, 4, {}), " + + "(4, 5, {}), (5, 6, {}), " + + "(6, 7, {}), (7, 8, {})])" + ) + assert repr(ev) == rep + + def test_len(self): + evr = self.eview(self.G) + ev = evr(data="foo") + assert len(ev) == 8 + assert len(evr(1)) == 1 + assert len(evr([1, 2, 3])) == 3 + + assert len(self.G.edges(1)) == 1 + assert len(self.G.edges()) == 8 + assert len(self.G.edges) == 8 + + H = self.G.copy() + H.add_edge(1, 1) + assert len(H.edges(1)) == 2 + assert len(H.edges()) == 9 + assert len(H.edges) == 9 + + def test_contains_with_nbunch(self): + evr = self.eview(self.G) + ev = evr(nbunch=[0, 2]) + assert (0, 1) in ev + assert (1, 2) not in ev + assert (2, 3) in ev + assert (3, 4) not in ev + assert (4, 5) not in ev + assert (5, 6) not in ev + assert (7, 8) not in ev + assert (8, 9) not in ev + + +class TestInEdgeDataView(TestOutEdgeDataView): + @classmethod + def setup_class(cls): + cls.G = nx.path_graph(9, create_using=nx.DiGraph()) + cls.eview = nx.reportviews.InEdgeView + + def test_repr(self): + ev = self.eview(self.G)(data=True) + rep = ( + "InEdgeDataView([(0, 1, {}), (1, 2, {}), " + + "(2, 3, {}), (3, 4, {}), " + + "(4, 5, {}), (5, 6, {}), " + + "(6, 7, {}), (7, 8, {})])" + ) + assert repr(ev) == rep + + def test_contains_with_nbunch(self): + evr = self.eview(self.G) + ev = evr(nbunch=[0, 2]) + assert (0, 1) not in ev + assert (1, 2) in ev + assert (2, 3) not in ev + assert (3, 4) not in ev + assert (4, 5) not in ev + assert (5, 6) not in ev + assert (7, 8) not in ev + assert (8, 9) not in ev + + +class TestMultiEdgeDataView(TestEdgeDataView): + @classmethod + def setup_class(cls): + cls.G = nx.path_graph(9, create_using=nx.MultiGraph()) + cls.eview = nx.reportviews.MultiEdgeView + + def modify_edge(self, G, e, **kwds): + G._adj[e[0]][e[1]][0].update(kwds) + + def test_repr(self): + ev = self.eview(self.G)(data=True) + rep = ( + "MultiEdgeDataView([(0, 1, {}), (1, 2, {}), " + + "(2, 3, {}), (3, 4, {}), " + + "(4, 5, {}), (5, 6, {}), " + + "(6, 7, {}), (7, 8, {})])" + ) + assert repr(ev) == rep + + def test_contains_with_nbunch(self): + evr = self.eview(self.G) + ev = evr(nbunch=[0, 2]) + assert (0, 1) in ev + assert (1, 2) in ev + assert (2, 3) in ev + assert (3, 4) not in ev + assert (4, 5) not in ev + assert (5, 6) not in ev + assert (7, 8) not in ev + assert (8, 9) not in ev + + +class TestOutMultiEdgeDataView(TestOutEdgeDataView): + @classmethod + def setup_class(cls): + cls.G = nx.path_graph(9, create_using=nx.MultiDiGraph()) + cls.eview = nx.reportviews.OutMultiEdgeView + + def modify_edge(self, G, e, **kwds): + G._adj[e[0]][e[1]][0].update(kwds) + + def test_repr(self): + ev = self.eview(self.G)(data=True) + rep = ( + "OutMultiEdgeDataView([(0, 1, {}), (1, 2, {}), " + + "(2, 3, {}), (3, 4, {}), " + + "(4, 5, {}), (5, 6, {}), " + + "(6, 7, {}), (7, 8, {})])" + ) + assert repr(ev) == rep + + def test_contains_with_nbunch(self): + evr = self.eview(self.G) + ev = evr(nbunch=[0, 2]) + assert (0, 1) in ev + assert (1, 2) not in ev + assert (2, 3) in ev + assert (3, 4) not in ev + assert (4, 5) not in ev + assert (5, 6) not in ev + assert (7, 8) not in ev + assert (8, 9) not in ev + + +class TestInMultiEdgeDataView(TestOutMultiEdgeDataView): + @classmethod + def setup_class(cls): + cls.G = nx.path_graph(9, create_using=nx.MultiDiGraph()) + cls.eview = nx.reportviews.InMultiEdgeView + + def test_repr(self): + ev = self.eview(self.G)(data=True) + rep = ( + "InMultiEdgeDataView([(0, 1, {}), (1, 2, {}), " + + "(2, 3, {}), (3, 4, {}), " + + "(4, 5, {}), (5, 6, {}), " + + "(6, 7, {}), (7, 8, {})])" + ) + assert repr(ev) == rep + + def test_contains_with_nbunch(self): + evr = self.eview(self.G) + ev = evr(nbunch=[0, 2]) + assert (0, 1) not in ev + assert (1, 2) in ev + assert (2, 3) not in ev + assert (3, 4) not in ev + assert (4, 5) not in ev + assert (5, 6) not in ev + assert (7, 8) not in ev + assert (8, 9) not in ev + + +# Edge Views +class TestEdgeView: + @classmethod + def setup_class(cls): + cls.G = nx.path_graph(9) + cls.eview = nx.reportviews.EdgeView + + def test_pickle(self): + import pickle + + ev = self.eview(self.G) + pev = pickle.loads(pickle.dumps(ev, -1)) + assert ev == pev + assert ev.__slots__ == pev.__slots__ + + def modify_edge(self, G, e, **kwds): + G._adj[e[0]][e[1]].update(kwds) + + def test_str(self): + ev = self.eview(self.G) + rep = str([(n, n + 1) for n in range(8)]) + assert str(ev) == rep + + def test_repr(self): + ev = self.eview(self.G) + rep = ( + "EdgeView([(0, 1), (1, 2), (2, 3), (3, 4), " + + "(4, 5), (5, 6), (6, 7), (7, 8)])" + ) + assert repr(ev) == rep + + def test_getitem(self): + G = self.G.copy() + ev = G.edges + G.edges[0, 1]["foo"] = "bar" + assert ev[0, 1] == {"foo": "bar"} + + # slicing + with pytest.raises(nx.NetworkXError, match=".*does not support slicing"): + G.edges[0:5] + + # Invalid edge + with pytest.raises(KeyError, match=r".*edge.*is not in the graph."): + G.edges[0, 9] + + def test_call(self): + ev = self.eview(self.G) + assert id(ev) == id(ev()) + assert id(ev) == id(ev(data=False)) + assert id(ev) != id(ev(data=True)) + assert id(ev) != id(ev(nbunch=1)) + + def test_data(self): + ev = self.eview(self.G) + assert id(ev) != id(ev.data()) + assert id(ev) == id(ev.data(data=False)) + assert id(ev) != id(ev.data(data=True)) + assert id(ev) != id(ev.data(nbunch=1)) + + def test_iter(self): + ev = self.eview(self.G) + for u, v in ev: + pass + iev = iter(ev) + assert next(iev) == (0, 1) + assert iter(ev) != ev + assert iter(iev) == iev + + def test_contains(self): + ev = self.eview(self.G) + edv = ev() + if self.G.is_directed(): + assert (1, 2) in ev and (2, 1) not in ev + assert (1, 2) in edv and (2, 1) not in edv + else: + assert (1, 2) in ev and (2, 1) in ev + assert (1, 2) in edv and (2, 1) in edv + assert (1, 4) not in ev + assert (1, 4) not in edv + # edge not in graph + assert (1, 90) not in ev + assert (90, 1) not in ev + assert (1, 90) not in edv + assert (90, 1) not in edv + + def test_contains_with_nbunch(self): + ev = self.eview(self.G) + evn = ev(nbunch=[0, 2]) + assert (0, 1) in evn + assert (1, 2) in evn + assert (2, 3) in evn + assert (3, 4) not in evn + assert (4, 5) not in evn + assert (5, 6) not in evn + assert (7, 8) not in evn + assert (8, 9) not in evn + + def test_len(self): + ev = self.eview(self.G) + num_ed = 9 if self.G.is_multigraph() else 8 + assert len(ev) == num_ed + + H = self.G.copy() + H.add_edge(1, 1) + assert len(H.edges(1)) == 3 + H.is_multigraph() - H.is_directed() + assert len(H.edges()) == num_ed + 1 + assert len(H.edges) == num_ed + 1 + + def test_and(self): + ev = self.eview(self.G) + some_edges = {(0, 1), (1, 0), (0, 2)} + if self.G.is_directed(): + assert some_edges & ev, {(0, 1)} + assert ev & some_edges, {(0, 1)} + else: + assert ev & some_edges == {(0, 1), (1, 0)} + assert some_edges & ev == {(0, 1), (1, 0)} + return + + def test_or(self): + ev = self.eview(self.G) + some_edges = {(0, 1), (1, 0), (0, 2)} + result1 = {(n, n + 1) for n in range(8)} + result1.update(some_edges) + result2 = {(n + 1, n) for n in range(8)} + result2.update(some_edges) + assert (ev | some_edges) in (result1, result2) + assert (some_edges | ev) in (result1, result2) + + def test_xor(self): + ev = self.eview(self.G) + some_edges = {(0, 1), (1, 0), (0, 2)} + if self.G.is_directed(): + result = {(n, n + 1) for n in range(1, 8)} + result.update({(1, 0), (0, 2)}) + assert ev ^ some_edges == result + else: + result = {(n, n + 1) for n in range(1, 8)} + result.update({(0, 2)}) + assert ev ^ some_edges == result + return + + def test_sub(self): + ev = self.eview(self.G) + some_edges = {(0, 1), (1, 0), (0, 2)} + result = {(n, n + 1) for n in range(8)} + result.remove((0, 1)) + assert ev - some_edges, result + + +class TestOutEdgeView(TestEdgeView): + @classmethod + def setup_class(cls): + cls.G = nx.path_graph(9, nx.DiGraph()) + cls.eview = nx.reportviews.OutEdgeView + + def test_repr(self): + ev = self.eview(self.G) + rep = ( + "OutEdgeView([(0, 1), (1, 2), (2, 3), (3, 4), " + + "(4, 5), (5, 6), (6, 7), (7, 8)])" + ) + assert repr(ev) == rep + + def test_contains_with_nbunch(self): + ev = self.eview(self.G) + evn = ev(nbunch=[0, 2]) + assert (0, 1) in evn + assert (1, 2) not in evn + assert (2, 3) in evn + assert (3, 4) not in evn + assert (4, 5) not in evn + assert (5, 6) not in evn + assert (7, 8) not in evn + assert (8, 9) not in evn + + +class TestInEdgeView(TestEdgeView): + @classmethod + def setup_class(cls): + cls.G = nx.path_graph(9, nx.DiGraph()) + cls.eview = nx.reportviews.InEdgeView + + def test_repr(self): + ev = self.eview(self.G) + rep = ( + "InEdgeView([(0, 1), (1, 2), (2, 3), (3, 4), " + + "(4, 5), (5, 6), (6, 7), (7, 8)])" + ) + assert repr(ev) == rep + + def test_contains_with_nbunch(self): + ev = self.eview(self.G) + evn = ev(nbunch=[0, 2]) + assert (0, 1) not in evn + assert (1, 2) in evn + assert (2, 3) not in evn + assert (3, 4) not in evn + assert (4, 5) not in evn + assert (5, 6) not in evn + assert (7, 8) not in evn + assert (8, 9) not in evn + + +class TestMultiEdgeView(TestEdgeView): + @classmethod + def setup_class(cls): + cls.G = nx.path_graph(9, nx.MultiGraph()) + cls.G.add_edge(1, 2, key=3, foo="bar") + cls.eview = nx.reportviews.MultiEdgeView + + def modify_edge(self, G, e, **kwds): + if len(e) == 2: + e = e + (0,) + G._adj[e[0]][e[1]][e[2]].update(kwds) + + def test_str(self): + ev = self.eview(self.G) + replist = [(n, n + 1, 0) for n in range(8)] + replist.insert(2, (1, 2, 3)) + rep = str(replist) + assert str(ev) == rep + + def test_getitem(self): + G = self.G.copy() + ev = G.edges + G.edges[0, 1, 0]["foo"] = "bar" + assert ev[0, 1, 0] == {"foo": "bar"} + + # slicing + with pytest.raises(nx.NetworkXError): + G.edges[0:5] + + def test_repr(self): + ev = self.eview(self.G) + rep = ( + "MultiEdgeView([(0, 1, 0), (1, 2, 0), (1, 2, 3), (2, 3, 0), " + + "(3, 4, 0), (4, 5, 0), (5, 6, 0), (6, 7, 0), (7, 8, 0)])" + ) + assert repr(ev) == rep + + def test_call(self): + ev = self.eview(self.G) + assert id(ev) == id(ev(keys=True)) + assert id(ev) == id(ev(data=False, keys=True)) + assert id(ev) != id(ev(keys=False)) + assert id(ev) != id(ev(data=True)) + assert id(ev) != id(ev(nbunch=1)) + + def test_data(self): + ev = self.eview(self.G) + assert id(ev) != id(ev.data()) + assert id(ev) == id(ev.data(data=False, keys=True)) + assert id(ev) != id(ev.data(keys=False)) + assert id(ev) != id(ev.data(data=True)) + assert id(ev) != id(ev.data(nbunch=1)) + + def test_iter(self): + ev = self.eview(self.G) + for u, v, k in ev: + pass + iev = iter(ev) + assert next(iev) == (0, 1, 0) + assert iter(ev) != ev + assert iter(iev) == iev + + def test_iterkeys(self): + G = self.G + evr = self.eview(G) + ev = evr(keys=True) + for u, v, k in ev: + pass + assert k == 0 + ev = evr(keys=True, data="foo", default=1) + for u, v, k, wt in ev: + pass + assert wt == 1 + + self.modify_edge(G, (2, 3, 0), foo="bar") + ev = evr(keys=True, data=True) + for e in ev: + assert len(e) == 4 + if set(e[:2]) == {2, 3}: + assert e[2] == 0 + assert e[3] == {"foo": "bar"} + checked = True + elif set(e[:3]) == {1, 2, 3}: + assert e[2] == 3 + assert e[3] == {"foo": "bar"} + checked_multi = True + else: + assert e[2] == 0 + assert e[3] == {} + assert checked + assert checked_multi + ev = evr(keys=True, data="foo", default=1) + for e in ev: + if set(e[:2]) == {1, 2} and e[2] == 3: + assert e[3] == "bar" + if set(e[:2]) == {1, 2} and e[2] == 0: + assert e[3] == 1 + if set(e[:2]) == {2, 3}: + assert e[2] == 0 + assert e[3] == "bar" + assert len(e) == 4 + checked_wt = True + assert checked_wt + ev = evr(keys=True) + for e in ev: + assert len(e) == 3 + elist = sorted([(i, i + 1, 0) for i in range(8)] + [(1, 2, 3)]) + assert sorted(ev) == elist + # test that the keyword arguments are passed correctly + ev = evr((1, 2), "foo", keys=True, default=1) + with pytest.raises(TypeError): + evr((1, 2), "foo", True, 1) + with pytest.raises(TypeError): + evr((1, 2), "foo", True, default=1) + for e in ev: + if set(e[:2]) == {1, 2}: + assert e[2] in {0, 3} + if e[2] == 3: + assert e[3] == "bar" + else: # e[2] == 0 + assert e[3] == 1 + if G.is_directed(): + assert len(list(ev)) == 3 + else: + assert len(list(ev)) == 4 + + def test_or(self): + ev = self.eview(self.G) + some_edges = {(0, 1, 0), (1, 0, 0), (0, 2, 0)} + result = {(n, n + 1, 0) for n in range(8)} + result.update(some_edges) + result.update({(1, 2, 3)}) + assert ev | some_edges == result + assert some_edges | ev == result + + def test_sub(self): + ev = self.eview(self.G) + some_edges = {(0, 1, 0), (1, 0, 0), (0, 2, 0)} + result = {(n, n + 1, 0) for n in range(8)} + result.remove((0, 1, 0)) + result.update({(1, 2, 3)}) + assert ev - some_edges, result + assert some_edges - ev, result + + def test_xor(self): + ev = self.eview(self.G) + some_edges = {(0, 1, 0), (1, 0, 0), (0, 2, 0)} + if self.G.is_directed(): + result = {(n, n + 1, 0) for n in range(1, 8)} + result.update({(1, 0, 0), (0, 2, 0), (1, 2, 3)}) + assert ev ^ some_edges == result + assert some_edges ^ ev == result + else: + result = {(n, n + 1, 0) for n in range(1, 8)} + result.update({(0, 2, 0), (1, 2, 3)}) + assert ev ^ some_edges == result + assert some_edges ^ ev == result + + def test_and(self): + ev = self.eview(self.G) + some_edges = {(0, 1, 0), (1, 0, 0), (0, 2, 0)} + if self.G.is_directed(): + assert ev & some_edges == {(0, 1, 0)} + assert some_edges & ev == {(0, 1, 0)} + else: + assert ev & some_edges == {(0, 1, 0), (1, 0, 0)} + assert some_edges & ev == {(0, 1, 0), (1, 0, 0)} + + def test_contains_with_nbunch(self): + ev = self.eview(self.G) + evn = ev(nbunch=[0, 2]) + assert (0, 1) in evn + assert (1, 2) in evn + assert (2, 3) in evn + assert (3, 4) not in evn + assert (4, 5) not in evn + assert (5, 6) not in evn + assert (7, 8) not in evn + assert (8, 9) not in evn + + +class TestOutMultiEdgeView(TestMultiEdgeView): + @classmethod + def setup_class(cls): + cls.G = nx.path_graph(9, nx.MultiDiGraph()) + cls.G.add_edge(1, 2, key=3, foo="bar") + cls.eview = nx.reportviews.OutMultiEdgeView + + def modify_edge(self, G, e, **kwds): + if len(e) == 2: + e = e + (0,) + G._adj[e[0]][e[1]][e[2]].update(kwds) + + def test_repr(self): + ev = self.eview(self.G) + rep = ( + "OutMultiEdgeView([(0, 1, 0), (1, 2, 0), (1, 2, 3), (2, 3, 0)," + + " (3, 4, 0), (4, 5, 0), (5, 6, 0), (6, 7, 0), (7, 8, 0)])" + ) + assert repr(ev) == rep + + def test_contains_with_nbunch(self): + ev = self.eview(self.G) + evn = ev(nbunch=[0, 2]) + assert (0, 1) in evn + assert (1, 2) not in evn + assert (2, 3) in evn + assert (3, 4) not in evn + assert (4, 5) not in evn + assert (5, 6) not in evn + assert (7, 8) not in evn + assert (8, 9) not in evn + + +class TestInMultiEdgeView(TestMultiEdgeView): + @classmethod + def setup_class(cls): + cls.G = nx.path_graph(9, nx.MultiDiGraph()) + cls.G.add_edge(1, 2, key=3, foo="bar") + cls.eview = nx.reportviews.InMultiEdgeView + + def modify_edge(self, G, e, **kwds): + if len(e) == 2: + e = e + (0,) + G._adj[e[0]][e[1]][e[2]].update(kwds) + + def test_repr(self): + ev = self.eview(self.G) + rep = ( + "InMultiEdgeView([(0, 1, 0), (1, 2, 0), (1, 2, 3), (2, 3, 0), " + + "(3, 4, 0), (4, 5, 0), (5, 6, 0), (6, 7, 0), (7, 8, 0)])" + ) + assert repr(ev) == rep + + def test_contains_with_nbunch(self): + ev = self.eview(self.G) + evn = ev(nbunch=[0, 2]) + assert (0, 1) not in evn + assert (1, 2) in evn + assert (2, 3) not in evn + assert (3, 4) not in evn + assert (4, 5) not in evn + assert (5, 6) not in evn + assert (7, 8) not in evn + assert (8, 9) not in evn + + +# Degrees +class TestDegreeView: + GRAPH = nx.Graph + dview = nx.reportviews.DegreeView + + @classmethod + def setup_class(cls): + cls.G = nx.path_graph(6, cls.GRAPH()) + cls.G.add_edge(1, 3, foo=2) + cls.G.add_edge(1, 3, foo=3) + + def test_pickle(self): + import pickle + + deg = self.G.degree + pdeg = pickle.loads(pickle.dumps(deg, -1)) + assert dict(deg) == dict(pdeg) + + def test_str(self): + dv = self.dview(self.G) + rep = str([(0, 1), (1, 3), (2, 2), (3, 3), (4, 2), (5, 1)]) + assert str(dv) == rep + dv = self.G.degree() + assert str(dv) == rep + + def test_repr(self): + dv = self.dview(self.G) + rep = "DegreeView({0: 1, 1: 3, 2: 2, 3: 3, 4: 2, 5: 1})" + assert repr(dv) == rep + + def test_iter(self): + dv = self.dview(self.G) + for n, d in dv: + pass + idv = iter(dv) + assert iter(dv) != dv + assert iter(idv) == idv + assert next(idv) == (0, dv[0]) + assert next(idv) == (1, dv[1]) + # weighted + dv = self.dview(self.G, weight="foo") + for n, d in dv: + pass + idv = iter(dv) + assert iter(dv) != dv + assert iter(idv) == idv + assert next(idv) == (0, dv[0]) + assert next(idv) == (1, dv[1]) + + def test_nbunch(self): + dv = self.dview(self.G) + dvn = dv(0) + assert dvn == 1 + dvn = dv([2, 3]) + assert sorted(dvn) == [(2, 2), (3, 3)] + + def test_getitem(self): + dv = self.dview(self.G) + assert dv[0] == 1 + assert dv[1] == 3 + assert dv[2] == 2 + assert dv[3] == 3 + dv = self.dview(self.G, weight="foo") + assert dv[0] == 1 + assert dv[1] == 5 + assert dv[2] == 2 + assert dv[3] == 5 + + def test_weight(self): + dv = self.dview(self.G) + dvw = dv(0, weight="foo") + assert dvw == 1 + dvw = dv(1, weight="foo") + assert dvw == 5 + dvw = dv([2, 3], weight="foo") + assert sorted(dvw) == [(2, 2), (3, 5)] + dvd = dict(dv(weight="foo")) + assert dvd[0] == 1 + assert dvd[1] == 5 + assert dvd[2] == 2 + assert dvd[3] == 5 + + def test_len(self): + dv = self.dview(self.G) + assert len(dv) == 6 + + +class TestDiDegreeView(TestDegreeView): + GRAPH = nx.DiGraph + dview = nx.reportviews.DiDegreeView + + def test_repr(self): + dv = self.G.degree() + rep = "DiDegreeView({0: 1, 1: 3, 2: 2, 3: 3, 4: 2, 5: 1})" + assert repr(dv) == rep + + +class TestOutDegreeView(TestDegreeView): + GRAPH = nx.DiGraph + dview = nx.reportviews.OutDegreeView + + def test_str(self): + dv = self.dview(self.G) + rep = str([(0, 1), (1, 2), (2, 1), (3, 1), (4, 1), (5, 0)]) + assert str(dv) == rep + dv = self.G.out_degree() + assert str(dv) == rep + + def test_repr(self): + dv = self.G.out_degree() + rep = "OutDegreeView({0: 1, 1: 2, 2: 1, 3: 1, 4: 1, 5: 0})" + assert repr(dv) == rep + + def test_nbunch(self): + dv = self.dview(self.G) + dvn = dv(0) + assert dvn == 1 + dvn = dv([2, 3]) + assert sorted(dvn) == [(2, 1), (3, 1)] + + def test_getitem(self): + dv = self.dview(self.G) + assert dv[0] == 1 + assert dv[1] == 2 + assert dv[2] == 1 + assert dv[3] == 1 + dv = self.dview(self.G, weight="foo") + assert dv[0] == 1 + assert dv[1] == 4 + assert dv[2] == 1 + assert dv[3] == 1 + + def test_weight(self): + dv = self.dview(self.G) + dvw = dv(0, weight="foo") + assert dvw == 1 + dvw = dv(1, weight="foo") + assert dvw == 4 + dvw = dv([2, 3], weight="foo") + assert sorted(dvw) == [(2, 1), (3, 1)] + dvd = dict(dv(weight="foo")) + assert dvd[0] == 1 + assert dvd[1] == 4 + assert dvd[2] == 1 + assert dvd[3] == 1 + + +class TestInDegreeView(TestDegreeView): + GRAPH = nx.DiGraph + dview = nx.reportviews.InDegreeView + + def test_str(self): + dv = self.dview(self.G) + rep = str([(0, 0), (1, 1), (2, 1), (3, 2), (4, 1), (5, 1)]) + assert str(dv) == rep + dv = self.G.in_degree() + assert str(dv) == rep + + def test_repr(self): + dv = self.G.in_degree() + rep = "InDegreeView({0: 0, 1: 1, 2: 1, 3: 2, 4: 1, 5: 1})" + assert repr(dv) == rep + + def test_nbunch(self): + dv = self.dview(self.G) + dvn = dv(0) + assert dvn == 0 + dvn = dv([2, 3]) + assert sorted(dvn) == [(2, 1), (3, 2)] + + def test_getitem(self): + dv = self.dview(self.G) + assert dv[0] == 0 + assert dv[1] == 1 + assert dv[2] == 1 + assert dv[3] == 2 + dv = self.dview(self.G, weight="foo") + assert dv[0] == 0 + assert dv[1] == 1 + assert dv[2] == 1 + assert dv[3] == 4 + + def test_weight(self): + dv = self.dview(self.G) + dvw = dv(0, weight="foo") + assert dvw == 0 + dvw = dv(1, weight="foo") + assert dvw == 1 + dvw = dv([2, 3], weight="foo") + assert sorted(dvw) == [(2, 1), (3, 4)] + dvd = dict(dv(weight="foo")) + assert dvd[0] == 0 + assert dvd[1] == 1 + assert dvd[2] == 1 + assert dvd[3] == 4 + + +class TestMultiDegreeView(TestDegreeView): + GRAPH = nx.MultiGraph + dview = nx.reportviews.MultiDegreeView + + def test_str(self): + dv = self.dview(self.G) + rep = str([(0, 1), (1, 4), (2, 2), (3, 4), (4, 2), (5, 1)]) + assert str(dv) == rep + dv = self.G.degree() + assert str(dv) == rep + + def test_repr(self): + dv = self.G.degree() + rep = "MultiDegreeView({0: 1, 1: 4, 2: 2, 3: 4, 4: 2, 5: 1})" + assert repr(dv) == rep + + def test_nbunch(self): + dv = self.dview(self.G) + dvn = dv(0) + assert dvn == 1 + dvn = dv([2, 3]) + assert sorted(dvn) == [(2, 2), (3, 4)] + + def test_getitem(self): + dv = self.dview(self.G) + assert dv[0] == 1 + assert dv[1] == 4 + assert dv[2] == 2 + assert dv[3] == 4 + dv = self.dview(self.G, weight="foo") + assert dv[0] == 1 + assert dv[1] == 7 + assert dv[2] == 2 + assert dv[3] == 7 + + def test_weight(self): + dv = self.dview(self.G) + dvw = dv(0, weight="foo") + assert dvw == 1 + dvw = dv(1, weight="foo") + assert dvw == 7 + dvw = dv([2, 3], weight="foo") + assert sorted(dvw) == [(2, 2), (3, 7)] + dvd = dict(dv(weight="foo")) + assert dvd[0] == 1 + assert dvd[1] == 7 + assert dvd[2] == 2 + assert dvd[3] == 7 + + +class TestDiMultiDegreeView(TestMultiDegreeView): + GRAPH = nx.MultiDiGraph + dview = nx.reportviews.DiMultiDegreeView + + def test_repr(self): + dv = self.G.degree() + rep = "DiMultiDegreeView({0: 1, 1: 4, 2: 2, 3: 4, 4: 2, 5: 1})" + assert repr(dv) == rep + + +class TestOutMultiDegreeView(TestDegreeView): + GRAPH = nx.MultiDiGraph + dview = nx.reportviews.OutMultiDegreeView + + def test_str(self): + dv = self.dview(self.G) + rep = str([(0, 1), (1, 3), (2, 1), (3, 1), (4, 1), (5, 0)]) + assert str(dv) == rep + dv = self.G.out_degree() + assert str(dv) == rep + + def test_repr(self): + dv = self.G.out_degree() + rep = "OutMultiDegreeView({0: 1, 1: 3, 2: 1, 3: 1, 4: 1, 5: 0})" + assert repr(dv) == rep + + def test_nbunch(self): + dv = self.dview(self.G) + dvn = dv(0) + assert dvn == 1 + dvn = dv([2, 3]) + assert sorted(dvn) == [(2, 1), (3, 1)] + + def test_getitem(self): + dv = self.dview(self.G) + assert dv[0] == 1 + assert dv[1] == 3 + assert dv[2] == 1 + assert dv[3] == 1 + dv = self.dview(self.G, weight="foo") + assert dv[0] == 1 + assert dv[1] == 6 + assert dv[2] == 1 + assert dv[3] == 1 + + def test_weight(self): + dv = self.dview(self.G) + dvw = dv(0, weight="foo") + assert dvw == 1 + dvw = dv(1, weight="foo") + assert dvw == 6 + dvw = dv([2, 3], weight="foo") + assert sorted(dvw) == [(2, 1), (3, 1)] + dvd = dict(dv(weight="foo")) + assert dvd[0] == 1 + assert dvd[1] == 6 + assert dvd[2] == 1 + assert dvd[3] == 1 + + +class TestInMultiDegreeView(TestDegreeView): + GRAPH = nx.MultiDiGraph + dview = nx.reportviews.InMultiDegreeView + + def test_str(self): + dv = self.dview(self.G) + rep = str([(0, 0), (1, 1), (2, 1), (3, 3), (4, 1), (5, 1)]) + assert str(dv) == rep + dv = self.G.in_degree() + assert str(dv) == rep + + def test_repr(self): + dv = self.G.in_degree() + rep = "InMultiDegreeView({0: 0, 1: 1, 2: 1, 3: 3, 4: 1, 5: 1})" + assert repr(dv) == rep + + def test_nbunch(self): + dv = self.dview(self.G) + dvn = dv(0) + assert dvn == 0 + dvn = dv([2, 3]) + assert sorted(dvn) == [(2, 1), (3, 3)] + + def test_getitem(self): + dv = self.dview(self.G) + assert dv[0] == 0 + assert dv[1] == 1 + assert dv[2] == 1 + assert dv[3] == 3 + dv = self.dview(self.G, weight="foo") + assert dv[0] == 0 + assert dv[1] == 1 + assert dv[2] == 1 + assert dv[3] == 6 + + def test_weight(self): + dv = self.dview(self.G) + dvw = dv(0, weight="foo") + assert dvw == 0 + dvw = dv(1, weight="foo") + assert dvw == 1 + dvw = dv([2, 3], weight="foo") + assert sorted(dvw) == [(2, 1), (3, 6)] + dvd = dict(dv(weight="foo")) + assert dvd[0] == 0 + assert dvd[1] == 1 + assert dvd[2] == 1 + assert dvd[3] == 6 + + +@pytest.mark.parametrize( + ("reportview", "err_msg_terms"), + ( + (rv.NodeView, "list(G.nodes"), + (rv.NodeDataView, "list(G.nodes.data"), + (rv.EdgeView, "list(G.edges"), + # Directed EdgeViews + (rv.InEdgeView, "list(G.in_edges"), + (rv.OutEdgeView, "list(G.edges"), + # Multi EdgeViews + (rv.MultiEdgeView, "list(G.edges"), + (rv.InMultiEdgeView, "list(G.in_edges"), + (rv.OutMultiEdgeView, "list(G.edges"), + ), +) +def test_slicing_reportviews(reportview, err_msg_terms): + G = nx.complete_graph(3) + view = reportview(G) + with pytest.raises(nx.NetworkXError) as exc: + view[0:2] + errmsg = str(exc.value) + assert type(view).__name__ in errmsg + assert err_msg_terms in errmsg + + +@pytest.mark.parametrize( + "graph", [nx.Graph, nx.DiGraph, nx.MultiGraph, nx.MultiDiGraph] +) +def test_cache_dict_get_set_state(graph): + G = nx.path_graph(5, graph()) + G.nodes, G.edges, G.adj, G.degree + if G.is_directed(): + G.pred, G.succ, G.in_edges, G.out_edges, G.in_degree, G.out_degree + cached_dict = G.__dict__ + assert "nodes" in cached_dict + assert "edges" in cached_dict + assert "adj" in cached_dict + assert "degree" in cached_dict + if G.is_directed(): + assert "pred" in cached_dict + assert "succ" in cached_dict + assert "in_edges" in cached_dict + assert "out_edges" in cached_dict + assert "in_degree" in cached_dict + assert "out_degree" in cached_dict + + # Raises error if the cached properties and views do not work + pickle.loads(pickle.dumps(G, -1)) + deepcopy(G) + + +def test_edge_views_inherit_from_EdgeViewABC(): + all_edge_view_classes = (v for v in dir(nx.reportviews) if "Edge" in v) + for eview_class in all_edge_view_classes: + assert issubclass( + getattr(nx.reportviews, eview_class), nx.reportviews.EdgeViewABC + ) diff --git a/.venv/lib/python3.12/site-packages/networkx/classes/tests/test_special.py b/.venv/lib/python3.12/site-packages/networkx/classes/tests/test_special.py new file mode 100644 index 0000000..33b61a4 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/classes/tests/test_special.py @@ -0,0 +1,131 @@ +import networkx as nx + +from .test_digraph import BaseDiGraphTester +from .test_digraph import TestDiGraph as _TestDiGraph +from .test_graph import BaseGraphTester +from .test_graph import TestGraph as _TestGraph +from .test_multidigraph import TestMultiDiGraph as _TestMultiDiGraph +from .test_multigraph import TestMultiGraph as _TestMultiGraph + + +def test_factories(): + class mydict1(dict): + pass + + class mydict2(dict): + pass + + class mydict3(dict): + pass + + class mydict4(dict): + pass + + class mydict5(dict): + pass + + for Graph in (nx.Graph, nx.DiGraph, nx.MultiGraph, nx.MultiDiGraph): + + class MyGraph(Graph): + node_dict_factory = mydict1 + adjlist_outer_dict_factory = mydict2 + adjlist_inner_dict_factory = mydict3 + edge_key_dict_factory = mydict4 + edge_attr_dict_factory = mydict5 + + G = MyGraph() + assert isinstance(G._node, mydict1) + assert isinstance(G._adj, mydict2) + G.add_node(1) + assert isinstance(G._adj[1], mydict3) + if G.is_directed(): + assert isinstance(G._pred, mydict2) + assert isinstance(G._succ, mydict2) + assert isinstance(G._pred[1], mydict3) + G.add_edge(1, 2) + if G.is_multigraph(): + assert isinstance(G._adj[1][2], mydict4) + assert isinstance(G._adj[1][2][0], mydict5) + else: + assert isinstance(G._adj[1][2], mydict5) + + +class TestSpecialGraph(_TestGraph): + def setup_method(self): + _TestGraph.setup_method(self) + self.Graph = nx.Graph + + +class TestThinGraph(BaseGraphTester): + def setup_method(self): + all_edge_dict = {"weight": 1} + + class MyGraph(nx.Graph): + def edge_attr_dict_factory(self): + return all_edge_dict + + self.Graph = MyGraph + # build dict-of-dict-of-dict K3 + ed1, ed2, ed3 = (all_edge_dict, all_edge_dict, all_edge_dict) + self.k3adj = {0: {1: ed1, 2: ed2}, 1: {0: ed1, 2: ed3}, 2: {0: ed2, 1: ed3}} + self.k3edges = [(0, 1), (0, 2), (1, 2)] + self.k3nodes = [0, 1, 2] + self.K3 = self.Graph() + self.K3._adj = self.k3adj + self.K3._node = {} + self.K3._node[0] = {} + self.K3._node[1] = {} + self.K3._node[2] = {} + + +class TestSpecialDiGraph(_TestDiGraph): + def setup_method(self): + _TestDiGraph.setup_method(self) + self.Graph = nx.DiGraph + + +class TestThinDiGraph(BaseDiGraphTester): + def setup_method(self): + all_edge_dict = {"weight": 1} + + class MyGraph(nx.DiGraph): + def edge_attr_dict_factory(self): + return all_edge_dict + + self.Graph = MyGraph + # build dict-of-dict-of-dict K3 + ed1, ed2, ed3 = (all_edge_dict, all_edge_dict, all_edge_dict) + ed4, ed5, ed6 = (all_edge_dict, all_edge_dict, all_edge_dict) + self.k3adj = {0: {1: ed1, 2: ed2}, 1: {0: ed3, 2: ed4}, 2: {0: ed5, 1: ed6}} + self.k3edges = [(0, 1), (0, 2), (1, 2)] + self.k3nodes = [0, 1, 2] + self.K3 = self.Graph() + self.K3._succ = self.k3adj + # K3._adj is synced with K3._succ + self.K3._pred = {0: {1: ed3, 2: ed5}, 1: {0: ed1, 2: ed6}, 2: {0: ed2, 1: ed4}} + self.K3._node = {} + self.K3._node[0] = {} + self.K3._node[1] = {} + self.K3._node[2] = {} + + ed1, ed2 = (all_edge_dict, all_edge_dict) + self.P3 = self.Graph() + self.P3._succ = {0: {1: ed1}, 1: {2: ed2}, 2: {}} + # P3._adj is synced with P3._succ + self.P3._pred = {0: {}, 1: {0: ed1}, 2: {1: ed2}} + self.P3._node = {} + self.P3._node[0] = {} + self.P3._node[1] = {} + self.P3._node[2] = {} + + +class TestSpecialMultiGraph(_TestMultiGraph): + def setup_method(self): + _TestMultiGraph.setup_method(self) + self.Graph = nx.MultiGraph + + +class TestSpecialMultiDiGraph(_TestMultiDiGraph): + def setup_method(self): + _TestMultiDiGraph.setup_method(self) + self.Graph = nx.MultiDiGraph diff --git a/.venv/lib/python3.12/site-packages/networkx/classes/tests/test_subgraphviews.py b/.venv/lib/python3.12/site-packages/networkx/classes/tests/test_subgraphviews.py new file mode 100644 index 0000000..66d570c --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/classes/tests/test_subgraphviews.py @@ -0,0 +1,371 @@ +import pytest + +import networkx as nx +from networkx.utils import edges_equal + + +class TestSubGraphView: + gview = staticmethod(nx.subgraph_view) + graph = nx.Graph + hide_edges_filter = staticmethod(nx.filters.hide_edges) + show_edges_filter = staticmethod(nx.filters.show_edges) + + @classmethod + def setup_class(cls): + cls.G = nx.path_graph(9, create_using=cls.graph()) + cls.hide_edges_w_hide_nodes = {(3, 4), (4, 5), (5, 6)} + + def test_hidden_nodes(self): + hide_nodes = [4, 5, 111] + nodes_gone = nx.filters.hide_nodes(hide_nodes) + gview = self.gview + G = gview(self.G, filter_node=nodes_gone) + assert self.G.nodes - G.nodes == {4, 5} + assert self.G.edges - G.edges == self.hide_edges_w_hide_nodes + if G.is_directed(): + assert list(G[3]) == [] + assert list(G[2]) == [3] + else: + assert list(G[3]) == [2] + assert set(G[2]) == {1, 3} + pytest.raises(KeyError, G.__getitem__, 4) + pytest.raises(KeyError, G.__getitem__, 112) + pytest.raises(KeyError, G.__getitem__, 111) + assert G.degree(3) == (3 if G.is_multigraph() else 1) + assert G.size() == (7 if G.is_multigraph() else 5) + + def test_hidden_edges(self): + hide_edges = [(2, 3), (8, 7), (222, 223)] + edges_gone = self.hide_edges_filter(hide_edges) + gview = self.gview + G = gview(self.G, filter_edge=edges_gone) + assert self.G.nodes == G.nodes + if G.is_directed(): + assert self.G.edges - G.edges == {(2, 3)} + assert list(G[2]) == [] + assert list(G.pred[3]) == [] + assert list(G.pred[2]) == [1] + assert G.size() == 7 + else: + assert self.G.edges - G.edges == {(2, 3), (7, 8)} + assert list(G[2]) == [1] + assert G.size() == 6 + assert list(G[3]) == [4] + pytest.raises(KeyError, G.__getitem__, 221) + pytest.raises(KeyError, G.__getitem__, 222) + assert G.degree(3) == 1 + + def test_shown_node(self): + induced_subgraph = nx.filters.show_nodes([2, 3, 111]) + gview = self.gview + G = gview(self.G, filter_node=induced_subgraph) + assert set(G.nodes) == {2, 3} + if G.is_directed(): + assert list(G[3]) == [] + else: + assert list(G[3]) == [2] + assert list(G[2]) == [3] + pytest.raises(KeyError, G.__getitem__, 4) + pytest.raises(KeyError, G.__getitem__, 112) + pytest.raises(KeyError, G.__getitem__, 111) + assert G.degree(3) == (3 if G.is_multigraph() else 1) + assert G.size() == (3 if G.is_multigraph() else 1) + + def test_shown_edges(self): + show_edges = [(2, 3), (8, 7), (222, 223)] + edge_subgraph = self.show_edges_filter(show_edges) + G = self.gview(self.G, filter_edge=edge_subgraph) + assert self.G.nodes == G.nodes + if G.is_directed(): + assert G.edges == {(2, 3)} + assert list(G[3]) == [] + assert list(G[2]) == [3] + assert list(G.pred[3]) == [2] + assert list(G.pred[2]) == [] + assert G.size() == 1 + else: + assert G.edges == {(2, 3), (7, 8)} + assert list(G[3]) == [2] + assert list(G[2]) == [3] + assert G.size() == 2 + pytest.raises(KeyError, G.__getitem__, 221) + pytest.raises(KeyError, G.__getitem__, 222) + assert G.degree(3) == 1 + + +class TestSubDiGraphView(TestSubGraphView): + gview = staticmethod(nx.subgraph_view) + graph = nx.DiGraph + hide_edges_filter = staticmethod(nx.filters.hide_diedges) + show_edges_filter = staticmethod(nx.filters.show_diedges) + hide_edges = [(2, 3), (8, 7), (222, 223)] + excluded = {(2, 3), (3, 4), (4, 5), (5, 6)} + + def test_inoutedges(self): + edges_gone = self.hide_edges_filter(self.hide_edges) + hide_nodes = [4, 5, 111] + nodes_gone = nx.filters.hide_nodes(hide_nodes) + G = self.gview(self.G, filter_node=nodes_gone, filter_edge=edges_gone) + + assert self.G.in_edges - G.in_edges == self.excluded + assert self.G.out_edges - G.out_edges == self.excluded + + def test_pred(self): + edges_gone = self.hide_edges_filter(self.hide_edges) + hide_nodes = [4, 5, 111] + nodes_gone = nx.filters.hide_nodes(hide_nodes) + G = self.gview(self.G, filter_node=nodes_gone, filter_edge=edges_gone) + + assert list(G.pred[2]) == [1] + assert list(G.pred[6]) == [] + + def test_inout_degree(self): + edges_gone = self.hide_edges_filter(self.hide_edges) + hide_nodes = [4, 5, 111] + nodes_gone = nx.filters.hide_nodes(hide_nodes) + G = self.gview(self.G, filter_node=nodes_gone, filter_edge=edges_gone) + + assert G.degree(2) == 1 + assert G.out_degree(2) == 0 + assert G.in_degree(2) == 1 + assert G.size() == 4 + + +# multigraph +class TestMultiGraphView(TestSubGraphView): + gview = staticmethod(nx.subgraph_view) + graph = nx.MultiGraph + hide_edges_filter = staticmethod(nx.filters.hide_multiedges) + show_edges_filter = staticmethod(nx.filters.show_multiedges) + + @classmethod + def setup_class(cls): + cls.G = nx.path_graph(9, create_using=cls.graph()) + multiedges = {(2, 3, 4), (2, 3, 5)} + cls.G.add_edges_from(multiedges) + cls.hide_edges_w_hide_nodes = {(3, 4, 0), (4, 5, 0), (5, 6, 0)} + + def test_hidden_edges(self): + hide_edges = [(2, 3, 4), (2, 3, 3), (8, 7, 0), (222, 223, 0)] + edges_gone = self.hide_edges_filter(hide_edges) + G = self.gview(self.G, filter_edge=edges_gone) + assert self.G.nodes == G.nodes + if G.is_directed(): + assert self.G.edges - G.edges == {(2, 3, 4)} + assert list(G[3]) == [4] + assert list(G[2]) == [3] + assert list(G.pred[3]) == [2] # only one 2 but two edges + assert list(G.pred[2]) == [1] + assert G.size() == 9 + else: + assert self.G.edges - G.edges == {(2, 3, 4), (7, 8, 0)} + assert list(G[3]) == [2, 4] + assert list(G[2]) == [1, 3] + assert G.size() == 8 + assert G.degree(3) == 3 + pytest.raises(KeyError, G.__getitem__, 221) + pytest.raises(KeyError, G.__getitem__, 222) + + def test_shown_edges(self): + show_edges = [(2, 3, 4), (2, 3, 3), (8, 7, 0), (222, 223, 0)] + edge_subgraph = self.show_edges_filter(show_edges) + G = self.gview(self.G, filter_edge=edge_subgraph) + assert self.G.nodes == G.nodes + if G.is_directed(): + assert G.edges == {(2, 3, 4)} + assert list(G[3]) == [] + assert list(G.pred[3]) == [2] + assert list(G.pred[2]) == [] + assert G.size() == 1 + else: + assert G.edges == {(2, 3, 4), (7, 8, 0)} + assert G.size() == 2 + assert list(G[3]) == [2] + assert G.degree(3) == 1 + assert list(G[2]) == [3] + pytest.raises(KeyError, G.__getitem__, 221) + pytest.raises(KeyError, G.__getitem__, 222) + + +# multidigraph +class TestMultiDiGraphView(TestMultiGraphView, TestSubDiGraphView): + gview = staticmethod(nx.subgraph_view) + graph = nx.MultiDiGraph + hide_edges_filter = staticmethod(nx.filters.hide_multidiedges) + show_edges_filter = staticmethod(nx.filters.show_multidiedges) + hide_edges = [(2, 3, 0), (8, 7, 0), (222, 223, 0)] + excluded = {(2, 3, 0), (3, 4, 0), (4, 5, 0), (5, 6, 0)} + + def test_inout_degree(self): + edges_gone = self.hide_edges_filter(self.hide_edges) + hide_nodes = [4, 5, 111] + nodes_gone = nx.filters.hide_nodes(hide_nodes) + G = self.gview(self.G, filter_node=nodes_gone, filter_edge=edges_gone) + + assert G.degree(2) == 3 + assert G.out_degree(2) == 2 + assert G.in_degree(2) == 1 + assert G.size() == 6 + + +# induced_subgraph +class TestInducedSubGraph: + @classmethod + def setup_class(cls): + cls.K3 = G = nx.complete_graph(3) + G.graph["foo"] = [] + G.nodes[0]["foo"] = [] + G.remove_edge(1, 2) + ll = [] + G.add_edge(1, 2, foo=ll) + G.add_edge(2, 1, foo=ll) + + def test_full_graph(self): + G = self.K3 + H = nx.induced_subgraph(G, [0, 1, 2, 5]) + assert H.name == G.name + self.graphs_equal(H, G) + self.same_attrdict(H, G) + + def test_partial_subgraph(self): + G = self.K3 + H = nx.induced_subgraph(G, 0) + assert dict(H.adj) == {0: {}} + assert dict(G.adj) != {0: {}} + + H = nx.induced_subgraph(G, [0, 1]) + assert dict(H.adj) == {0: {1: {}}, 1: {0: {}}} + + def same_attrdict(self, H, G): + old_foo = H[1][2]["foo"] + H.edges[1, 2]["foo"] = "baz" + assert G.edges == H.edges + H.edges[1, 2]["foo"] = old_foo + assert G.edges == H.edges + old_foo = H.nodes[0]["foo"] + H.nodes[0]["foo"] = "baz" + assert G.nodes == H.nodes + H.nodes[0]["foo"] = old_foo + assert G.nodes == H.nodes + + def graphs_equal(self, H, G): + assert G._adj == H._adj + assert G._node == H._node + assert G.graph == H.graph + assert G.name == H.name + if not G.is_directed() and not H.is_directed(): + assert H._adj[1][2] is H._adj[2][1] + assert G._adj[1][2] is G._adj[2][1] + else: # at least one is directed + if not G.is_directed(): + G._pred = G._adj + G._succ = G._adj + if not H.is_directed(): + H._pred = H._adj + H._succ = H._adj + assert G._pred == H._pred + assert G._succ == H._succ + assert H._succ[1][2] is H._pred[2][1] + assert G._succ[1][2] is G._pred[2][1] + + +# edge_subgraph +class TestEdgeSubGraph: + @classmethod + def setup_class(cls): + # Create a path graph on five nodes. + cls.G = G = nx.path_graph(5) + # Add some node, edge, and graph attributes. + for i in range(5): + G.nodes[i]["name"] = f"node{i}" + G.edges[0, 1]["name"] = "edge01" + G.edges[3, 4]["name"] = "edge34" + G.graph["name"] = "graph" + # Get the subgraph induced by the first and last edges. + cls.H = nx.edge_subgraph(G, [(0, 1), (3, 4)]) + + def test_correct_nodes(self): + """Tests that the subgraph has the correct nodes.""" + assert [(0, "node0"), (1, "node1"), (3, "node3"), (4, "node4")] == sorted( + self.H.nodes.data("name") + ) + + def test_correct_edges(self): + """Tests that the subgraph has the correct edges.""" + assert edges_equal( + [(0, 1, "edge01"), (3, 4, "edge34")], self.H.edges.data("name") + ) + + def test_add_node(self): + """Tests that adding a node to the original graph does not + affect the nodes of the subgraph. + + """ + self.G.add_node(5) + assert [0, 1, 3, 4] == sorted(self.H.nodes) + self.G.remove_node(5) + + def test_remove_node(self): + """Tests that removing a node in the original graph + removes the nodes of the subgraph. + + """ + self.G.remove_node(0) + assert [1, 3, 4] == sorted(self.H.nodes) + self.G.add_node(0, name="node0") + self.G.add_edge(0, 1, name="edge01") + + def test_node_attr_dict(self): + """Tests that the node attribute dictionary of the two graphs is + the same object. + + """ + for v in self.H: + assert self.G.nodes[v] == self.H.nodes[v] + # Making a change to G should make a change in H and vice versa. + self.G.nodes[0]["name"] = "foo" + assert self.G.nodes[0] == self.H.nodes[0] + self.H.nodes[1]["name"] = "bar" + assert self.G.nodes[1] == self.H.nodes[1] + # Revert the change, so tests pass with pytest-randomly + self.G.nodes[0]["name"] = "node0" + self.H.nodes[1]["name"] = "node1" + + def test_edge_attr_dict(self): + """Tests that the edge attribute dictionary of the two graphs is + the same object. + + """ + for u, v in self.H.edges(): + assert self.G.edges[u, v] == self.H.edges[u, v] + # Making a change to G should make a change in H and vice versa. + self.G.edges[0, 1]["name"] = "foo" + assert self.G.edges[0, 1]["name"] == self.H.edges[0, 1]["name"] + self.H.edges[3, 4]["name"] = "bar" + assert self.G.edges[3, 4]["name"] == self.H.edges[3, 4]["name"] + # Revert the change, so tests pass with pytest-randomly + self.G.edges[0, 1]["name"] = "edge01" + self.H.edges[3, 4]["name"] = "edge34" + + def test_graph_attr_dict(self): + """Tests that the graph attribute dictionary of the two graphs + is the same object. + + """ + assert self.G.graph is self.H.graph + + def test_readonly(self): + """Tests that the subgraph cannot change the graph structure""" + pytest.raises(nx.NetworkXError, self.H.add_node, 5) + pytest.raises(nx.NetworkXError, self.H.remove_node, 0) + pytest.raises(nx.NetworkXError, self.H.add_edge, 5, 6) + pytest.raises(nx.NetworkXError, self.H.remove_edge, 0, 1) + + @pytest.mark.parametrize("multigraph", (nx.MultiGraph, nx.MultiDiGraph)) + def test_multigraph_filtered_edges(self, multigraph): + """Check edge visibility in FilterMultiInner on edge_subgraph's of + multigraphs. See gh-7724.""" + G = multigraph([("a", "b"), ("a", "c"), ("c", "b")]) + H = nx.edge_subgraph(G, [("a", "b", 0), ("c", "b", 0)]) + assert "c" not in H["a"] + assert not H.has_edge("a", "c") diff --git a/.venv/lib/python3.12/site-packages/networkx/conftest.py b/.venv/lib/python3.12/site-packages/networkx/conftest.py new file mode 100644 index 0000000..9f23922 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/conftest.py @@ -0,0 +1,257 @@ +""" +Testing +======= + +General guidelines for writing good tests: + +- doctests always assume ``import networkx as nx`` so don't add that +- prefer pytest fixtures over classes with setup methods. +- use the ``@pytest.mark.parametrize`` decorator +- use ``pytest.importorskip`` for numpy, scipy, pandas, and matplotlib b/c of PyPy. + and add the module to the relevant entries below. + +""" + +import os +import warnings +from importlib.metadata import entry_points + +import pytest + +import networkx as nx + + +def pytest_addoption(parser): + parser.addoption( + "--runslow", action="store_true", default=False, help="run slow tests" + ) + parser.addoption( + "--backend", + action="store", + default=None, + help="Run tests with a backend by auto-converting nx graphs to backend graphs", + ) + parser.addoption( + "--fallback-to-nx", + action="store_true", + default=False, + help="Run nx function if a backend doesn't implement a dispatchable function" + " (use with --backend)", + ) + + +def pytest_configure(config): + config.addinivalue_line("markers", "slow: mark test as slow to run") + backend = config.getoption("--backend") + if backend is None: + backend = os.environ.get("NETWORKX_TEST_BACKEND") + # nx_loopback backend is only available when testing with a backend + loopback_ep = entry_points(name="nx_loopback", group="networkx.backends") + if not loopback_ep: + warnings.warn( + "\n\n WARNING: Mixed NetworkX configuration! \n\n" + " This environment has mixed configuration for networkx.\n" + " The test object nx_loopback is not configured correctly.\n" + " You should not be seeing this message.\n" + " Try `pip install -e .`, or change your PYTHONPATH\n" + " Make sure python finds the networkx repo you are testing\n\n" + ) + config.backend = backend + if backend: + # We will update `networkx.config.backend_priority` below in `*_modify_items` + # to allow tests to get set up with normal networkx graphs. + nx.utils.backends.backends["nx_loopback"] = loopback_ep["nx_loopback"] + nx.utils.backends.backend_info["nx_loopback"] = {} + nx.config.backends = nx.utils.Config( + nx_loopback=nx.utils.Config(), + **nx.config.backends, + ) + fallback_to_nx = config.getoption("--fallback-to-nx") + if not fallback_to_nx: + fallback_to_nx = os.environ.get("NETWORKX_FALLBACK_TO_NX") + nx.config.fallback_to_nx = bool(fallback_to_nx) + nx.utils.backends._dispatchable.__call__ = ( + nx.utils.backends._dispatchable._call_if_any_backends_installed + ) + + +def pytest_collection_modifyitems(config, items): + # Setting this to True here allows tests to be set up before dispatching + # any function call to a backend. + if config.backend: + # Allow pluggable backends to add markers to tests (such as skip or xfail) + # when running in auto-conversion test mode + backend_name = config.backend + if backend_name != "networkx": + nx.utils.backends._dispatchable._is_testing = True + nx.config.backend_priority.algos = [backend_name] + nx.config.backend_priority.generators = [backend_name] + backend = nx.utils.backends.backends[backend_name].load() + if hasattr(backend, "on_start_tests"): + getattr(backend, "on_start_tests")(items) + + if config.getoption("--runslow"): + # --runslow given in cli: do not skip slow tests + return + skip_slow = pytest.mark.skip(reason="need --runslow option to run") + for item in items: + if "slow" in item.keywords: + item.add_marker(skip_slow) + + +# TODO: The warnings below need to be dealt with, but for now we silence them. +@pytest.fixture(autouse=True) +def set_warnings(): + warnings.filterwarnings( + "ignore", + category=UserWarning, + message=r"Exited (at iteration \d+|postprocessing) with accuracies.*", + ) + warnings.filterwarnings( + "ignore", category=DeprecationWarning, message="\n\nThe `normalized`" + ) + warnings.filterwarnings( + "ignore", category=DeprecationWarning, message="\n\n`compute_v_structures" + ) + warnings.filterwarnings( + "ignore", category=DeprecationWarning, message="Keyword argument 'link'" + ) + + +@pytest.fixture(autouse=True) +def add_nx(doctest_namespace): + doctest_namespace["nx"] = nx + + +# What dependencies are installed? + +try: + import numpy as np + + has_numpy = True +except ImportError: + has_numpy = False + +try: + import scipy as sp + + has_scipy = True +except ImportError: + has_scipy = False + +try: + import matplotlib as mpl + + has_matplotlib = True +except ImportError: + has_matplotlib = False + +try: + import pandas as pd + + has_pandas = True +except ImportError: + has_pandas = False + +try: + import pygraphviz + + has_pygraphviz = True +except ImportError: + has_pygraphviz = False + +try: + import pydot + + has_pydot = True +except ImportError: + has_pydot = False + +try: + import sympy + + has_sympy = True +except ImportError: + has_sympy = False + + +# List of files that pytest should ignore + +collect_ignore = [] + +needs_numpy = [ + "algorithms/approximation/traveling_salesman.py", + "algorithms/centrality/current_flow_closeness.py", + "algorithms/centrality/laplacian.py", + "algorithms/node_classification.py", + "algorithms/non_randomness.py", + "algorithms/polynomials.py", + "algorithms/shortest_paths/dense.py", + "algorithms/structuralholes.py", + "algorithms/tree/mst.py", + "drawing/nx_latex.py", + "generators/expanders.py", + "linalg/bethehessianmatrix.py", + "linalg/laplacianmatrix.py", + "utils/misc.py", +] +needs_scipy = [ + "algorithms/approximation/traveling_salesman.py", + "algorithms/assortativity/correlation.py", + "algorithms/assortativity/mixing.py", + "algorithms/assortativity/pairs.py", + "algorithms/bipartite/matrix.py", + "algorithms/bipartite/spectral.py", + "algorithms/bipartite/link_analysis.py", + "algorithms/centrality/current_flow_betweenness.py", + "algorithms/centrality/current_flow_betweenness_subset.py", + "algorithms/centrality/eigenvector.py", + "algorithms/centrality/katz.py", + "algorithms/centrality/laplacian.py", + "algorithms/centrality/second_order.py", + "algorithms/centrality/subgraph_alg.py", + "algorithms/communicability_alg.py", + "algorithms/community/divisive.py", + "algorithms/distance_measures.py", + "algorithms/link_analysis/hits_alg.py", + "algorithms/link_analysis/pagerank_alg.py", + "algorithms/node_classification.py", + "algorithms/similarity.py", + "algorithms/structuralholes.py", + "algorithms/tree/mst.py", + "algorithms/walks.py", + "convert_matrix.py", + "drawing/layout.py", + "drawing/nx_pylab.py", + "generators/spectral_graph_forge.py", + "generators/geometric.py", + "generators/expanders.py", + "linalg/algebraicconnectivity.py", + "linalg/attrmatrix.py", + "linalg/bethehessianmatrix.py", + "linalg/graphmatrix.py", + "linalg/laplacianmatrix.py", + "linalg/modularitymatrix.py", + "linalg/spectrum.py", + "utils/rcm.py", +] +needs_matplotlib = ["drawing/nx_pylab.py", "generators/classic.py"] +needs_pandas = ["convert_matrix.py"] +needs_pygraphviz = ["drawing/nx_agraph.py"] +needs_pydot = ["drawing/nx_pydot.py"] +needs_sympy = ["algorithms/polynomials.py"] + +if not has_numpy: + collect_ignore += needs_numpy +if not has_scipy: + collect_ignore += needs_scipy +if not has_matplotlib: + collect_ignore += needs_matplotlib +if not has_pandas: + collect_ignore += needs_pandas +if not has_pygraphviz: + collect_ignore += needs_pygraphviz +if not has_pydot: + collect_ignore += needs_pydot +if not has_sympy: + collect_ignore += needs_sympy diff --git a/.venv/lib/python3.12/site-packages/networkx/convert.py b/.venv/lib/python3.12/site-packages/networkx/convert.py new file mode 100644 index 0000000..bf0a502 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/convert.py @@ -0,0 +1,502 @@ +"""Functions to convert NetworkX graphs to and from other formats. + +The preferred way of converting data to a NetworkX graph is through the +graph constructor. The constructor calls the to_networkx_graph() function +which attempts to guess the input type and convert it automatically. + +Examples +-------- +Create a graph with a single edge from a dictionary of dictionaries + +>>> d = {0: {1: 1}} # dict-of-dicts single edge (0,1) +>>> G = nx.Graph(d) + +See Also +-------- +nx_agraph, nx_pydot +""" + +from collections.abc import Collection, Generator, Iterator + +import networkx as nx + +__all__ = [ + "to_networkx_graph", + "from_dict_of_dicts", + "to_dict_of_dicts", + "from_dict_of_lists", + "to_dict_of_lists", + "from_edgelist", + "to_edgelist", +] + + +def to_networkx_graph(data, create_using=None, multigraph_input=False): + """Make a NetworkX graph from a known data structure. + + The preferred way to call this is automatically + from the class constructor + + >>> d = {0: {1: {"weight": 1}}} # dict-of-dicts single edge (0,1) + >>> G = nx.Graph(d) + + instead of the equivalent + + >>> G = nx.from_dict_of_dicts(d) + + Parameters + ---------- + data : object to be converted + + Current known types are: + any NetworkX graph + dict-of-dicts + dict-of-lists + container (e.g. set, list, tuple) of edges + iterator (e.g. itertools.chain) that produces edges + generator of edges + Pandas DataFrame (row per edge) + 2D numpy array + scipy sparse array + pygraphviz agraph + + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + multigraph_input : bool (default False) + If True and data is a dict_of_dicts, + try to create a multigraph assuming dict_of_dict_of_lists. + If data and create_using are both multigraphs then create + a multigraph from a multigraph. + + """ + # NX graph + if hasattr(data, "adj"): + try: + result = from_dict_of_dicts( + data.adj, + create_using=create_using, + multigraph_input=data.is_multigraph(), + ) + # data.graph should be dict-like + result.graph.update(data.graph) + # data.nodes should be dict-like + # result.add_node_from(data.nodes.items()) possible but + # for custom node_attr_dict_factory which may be hashable + # will be unexpected behavior + for n, dd in data.nodes.items(): + result._node[n].update(dd) + return result + except Exception as err: + raise nx.NetworkXError("Input is not a correct NetworkX graph.") from err + + # dict of dicts/lists + if isinstance(data, dict): + try: + return from_dict_of_dicts( + data, create_using=create_using, multigraph_input=multigraph_input + ) + except Exception as err1: + if multigraph_input is True: + raise nx.NetworkXError( + f"converting multigraph_input raised:\n{type(err1)}: {err1}" + ) + try: + return from_dict_of_lists(data, create_using=create_using) + except Exception as err2: + raise TypeError("Input is not known type.") from err2 + + # edgelists + if isinstance(data, list | tuple | nx.reportviews.EdgeViewABC | Iterator): + try: + return from_edgelist(data, create_using=create_using) + except: + pass + + # pygraphviz agraph + if hasattr(data, "is_strict"): + try: + return nx.nx_agraph.from_agraph(data, create_using=create_using) + except Exception as err: + raise nx.NetworkXError("Input is not a correct pygraphviz graph.") from err + + # Pandas DataFrame + try: + import pandas as pd + + if isinstance(data, pd.DataFrame): + if data.shape[0] == data.shape[1]: + try: + return nx.from_pandas_adjacency(data, create_using=create_using) + except Exception as err: + msg = "Input is not a correct Pandas DataFrame adjacency matrix." + raise nx.NetworkXError(msg) from err + else: + try: + return nx.from_pandas_edgelist( + data, edge_attr=True, create_using=create_using + ) + except Exception as err: + msg = "Input is not a correct Pandas DataFrame edge-list." + raise nx.NetworkXError(msg) from err + except ImportError: + pass + + # numpy array + try: + import numpy as np + + if isinstance(data, np.ndarray): + try: + return nx.from_numpy_array(data, create_using=create_using) + except Exception as err: + raise nx.NetworkXError( + f"Failed to interpret array as an adjacency matrix." + ) from err + except ImportError: + pass + + # scipy sparse array - any format + try: + import scipy as sp + + if hasattr(data, "format"): + try: + return nx.from_scipy_sparse_array(data, create_using=create_using) + except Exception as err: + raise nx.NetworkXError( + "Input is not a correct scipy sparse array type." + ) from err + except ImportError: + pass + + # Note: most general check - should remain last in order of execution + # Includes containers (e.g. list, set, dict, etc.), generators, and + # iterators (e.g. itertools.chain) of edges + + if isinstance(data, Collection | Generator | Iterator): + try: + return from_edgelist(data, create_using=create_using) + except Exception as err: + raise nx.NetworkXError("Input is not a valid edge list") from err + + raise nx.NetworkXError("Input is not a known data type for conversion.") + + +@nx._dispatchable +def to_dict_of_lists(G, nodelist=None): + """Returns adjacency representation of graph as a dictionary of lists. + + Parameters + ---------- + G : graph + A NetworkX graph + + nodelist : list + Use only nodes specified in nodelist + + Notes + ----- + Completely ignores edge data for MultiGraph and MultiDiGraph. + + """ + if nodelist is None: + nodelist = G + + d = {} + for n in nodelist: + d[n] = [nbr for nbr in G.neighbors(n) if nbr in nodelist] + return d + + +@nx._dispatchable(graphs=None, returns_graph=True) +def from_dict_of_lists(d, create_using=None): + """Returns a graph from a dictionary of lists. + + Parameters + ---------- + d : dictionary of lists + A dictionary of lists adjacency representation. + + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + Examples + -------- + >>> dol = {0: [1]} # single edge (0,1) + >>> G = nx.from_dict_of_lists(dol) + + or + + >>> G = nx.Graph(dol) # use Graph constructor + + """ + G = nx.empty_graph(0, create_using) + G.add_nodes_from(d) + if G.is_multigraph() and not G.is_directed(): + # a dict_of_lists can't show multiedges. BUT for undirected graphs, + # each edge shows up twice in the dict_of_lists. + # So we need to treat this case separately. + seen = {} + for node, nbrlist in d.items(): + for nbr in nbrlist: + if nbr not in seen: + G.add_edge(node, nbr) + seen[node] = 1 # don't allow reverse edge to show up + else: + G.add_edges_from( + ((node, nbr) for node, nbrlist in d.items() for nbr in nbrlist) + ) + return G + + +def to_dict_of_dicts(G, nodelist=None, edge_data=None): + """Returns adjacency representation of graph as a dictionary of dictionaries. + + Parameters + ---------- + G : graph + A NetworkX graph + + nodelist : list + Use only nodes specified in nodelist. If None, all nodes in G. + + edge_data : scalar, optional (default: the G edgedatadict for each edge) + If provided, the value of the dictionary will be set to `edge_data` for + all edges. Usual values could be `1` or `True`. If `edge_data` is + `None` (the default), the edgedata in `G` is used, resulting in a + dict-of-dict-of-dicts. If `G` is a MultiGraph, the result will be a + dict-of-dict-of-dict-of-dicts. See Notes for an approach to customize + handling edge data. `edge_data` should *not* be a container as it will + be the same container for all the edges. + + Returns + ------- + dod : dict + A nested dictionary representation of `G`. Note that the level of + nesting depends on the type of `G` and the value of `edge_data` + (see Examples). + + See Also + -------- + from_dict_of_dicts, to_dict_of_lists + + Notes + ----- + For a more custom approach to handling edge data, try:: + + dod = { + n: {nbr: custom(n, nbr, dd) for nbr, dd in nbrdict.items()} + for n, nbrdict in G.adj.items() + } + + where `custom` returns the desired edge data for each edge between `n` and + `nbr`, given existing edge data `dd`. + + Examples + -------- + >>> G = nx.path_graph(3) + >>> nx.to_dict_of_dicts(G) + {0: {1: {}}, 1: {0: {}, 2: {}}, 2: {1: {}}} + + Edge data is preserved by default (``edge_data=None``), resulting + in dict-of-dict-of-dicts where the innermost dictionary contains the + edge data: + + >>> G = nx.Graph() + >>> G.add_edges_from( + ... [ + ... (0, 1, {"weight": 1.0}), + ... (1, 2, {"weight": 2.0}), + ... (2, 0, {"weight": 1.0}), + ... ] + ... ) + >>> d = nx.to_dict_of_dicts(G) + >>> d # doctest: +SKIP + {0: {1: {'weight': 1.0}, 2: {'weight': 1.0}}, + 1: {0: {'weight': 1.0}, 2: {'weight': 2.0}}, + 2: {1: {'weight': 2.0}, 0: {'weight': 1.0}}} + >>> d[1][2]["weight"] + 2.0 + + If `edge_data` is not `None`, edge data in the original graph (if any) is + replaced: + + >>> d = nx.to_dict_of_dicts(G, edge_data=1) + >>> d + {0: {1: 1, 2: 1}, 1: {0: 1, 2: 1}, 2: {1: 1, 0: 1}} + >>> d[1][2] + 1 + + This also applies to MultiGraphs: edge data is preserved by default: + + >>> G = nx.MultiGraph() + >>> G.add_edge(0, 1, key="a", weight=1.0) + 'a' + >>> G.add_edge(0, 1, key="b", weight=5.0) + 'b' + >>> d = nx.to_dict_of_dicts(G) + >>> d # doctest: +SKIP + {0: {1: {'a': {'weight': 1.0}, 'b': {'weight': 5.0}}}, + 1: {0: {'a': {'weight': 1.0}, 'b': {'weight': 5.0}}}} + >>> d[0][1]["b"]["weight"] + 5.0 + + But multi edge data is lost if `edge_data` is not `None`: + + >>> d = nx.to_dict_of_dicts(G, edge_data=10) + >>> d + {0: {1: 10}, 1: {0: 10}} + """ + dod = {} + if nodelist is None: + if edge_data is None: + for u, nbrdict in G.adjacency(): + dod[u] = nbrdict.copy() + else: # edge_data is not None + for u, nbrdict in G.adjacency(): + dod[u] = dod.fromkeys(nbrdict, edge_data) + else: # nodelist is not None + if edge_data is None: + for u in nodelist: + dod[u] = {} + for v, data in ((v, data) for v, data in G[u].items() if v in nodelist): + dod[u][v] = data + else: # nodelist and edge_data are not None + for u in nodelist: + dod[u] = {} + for v in (v for v in G[u] if v in nodelist): + dod[u][v] = edge_data + return dod + + +@nx._dispatchable(graphs=None, returns_graph=True) +def from_dict_of_dicts(d, create_using=None, multigraph_input=False): + """Returns a graph from a dictionary of dictionaries. + + Parameters + ---------- + d : dictionary of dictionaries + A dictionary of dictionaries adjacency representation. + + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + multigraph_input : bool (default False) + When True, the dict `d` is assumed + to be a dict-of-dict-of-dict-of-dict structure keyed by + node to neighbor to edge keys to edge data for multi-edges. + Otherwise this routine assumes dict-of-dict-of-dict keyed by + node to neighbor to edge data. + + Examples + -------- + >>> dod = {0: {1: {"weight": 1}}} # single edge (0,1) + >>> G = nx.from_dict_of_dicts(dod) + + or + + >>> G = nx.Graph(dod) # use Graph constructor + + """ + G = nx.empty_graph(0, create_using) + G.add_nodes_from(d) + # does dict d represent a MultiGraph or MultiDiGraph? + if multigraph_input: + if G.is_directed(): + if G.is_multigraph(): + G.add_edges_from( + (u, v, key, data) + for u, nbrs in d.items() + for v, datadict in nbrs.items() + for key, data in datadict.items() + ) + else: + G.add_edges_from( + (u, v, data) + for u, nbrs in d.items() + for v, datadict in nbrs.items() + for key, data in datadict.items() + ) + else: # Undirected + if G.is_multigraph(): + seen = set() # don't add both directions of undirected graph + for u, nbrs in d.items(): + for v, datadict in nbrs.items(): + if (u, v) not in seen: + G.add_edges_from( + (u, v, key, data) for key, data in datadict.items() + ) + seen.add((v, u)) + else: + seen = set() # don't add both directions of undirected graph + for u, nbrs in d.items(): + for v, datadict in nbrs.items(): + if (u, v) not in seen: + G.add_edges_from( + (u, v, data) for key, data in datadict.items() + ) + seen.add((v, u)) + + else: # not a multigraph to multigraph transfer + if G.is_multigraph() and not G.is_directed(): + # d can have both representations u-v, v-u in dict. Only add one. + # We don't need this check for digraphs since we add both directions, + # or for Graph() since it is done implicitly (parallel edges not allowed) + seen = set() + for u, nbrs in d.items(): + for v, data in nbrs.items(): + if (u, v) not in seen: + G.add_edge(u, v, key=0) + G[u][v][0].update(data) + seen.add((v, u)) + else: + G.add_edges_from( + ((u, v, data) for u, nbrs in d.items() for v, data in nbrs.items()) + ) + return G + + +@nx._dispatchable(preserve_edge_attrs=True) +def to_edgelist(G, nodelist=None): + """Returns a list of edges in the graph. + + Parameters + ---------- + G : graph + A NetworkX graph + + nodelist : list + Use only nodes specified in nodelist + + """ + if nodelist is None: + return G.edges(data=True) + return G.edges(nodelist, data=True) + + +@nx._dispatchable(graphs=None, returns_graph=True) +def from_edgelist(edgelist, create_using=None): + """Returns a graph from a list of edges. + + Parameters + ---------- + edgelist : list or iterator + Edge tuples + + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + Examples + -------- + >>> edgelist = [(0, 1)] # single edge (0,1) + >>> G = nx.from_edgelist(edgelist) + + or + + >>> G = nx.Graph(edgelist) # use Graph constructor + + """ + G = nx.empty_graph(0, create_using) + G.add_edges_from(edgelist) + return G diff --git a/.venv/lib/python3.12/site-packages/networkx/convert_matrix.py b/.venv/lib/python3.12/site-packages/networkx/convert_matrix.py new file mode 100644 index 0000000..72a5517 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/convert_matrix.py @@ -0,0 +1,1314 @@ +"""Functions to convert NetworkX graphs to and from common data containers +like numpy arrays, scipy sparse arrays, and pandas DataFrames. + +The preferred way of converting data to a NetworkX graph is through the +graph constructor. The constructor calls the `~networkx.convert.to_networkx_graph` +function which attempts to guess the input type and convert it automatically. + +Examples +-------- +Create a 10 node random graph from a numpy array + +>>> import numpy as np +>>> rng = np.random.default_rng() +>>> a = rng.integers(low=0, high=2, size=(10, 10)) +>>> DG = nx.from_numpy_array(a, create_using=nx.DiGraph) + +or equivalently: + +>>> DG = nx.DiGraph(a) + +which calls `from_numpy_array` internally based on the type of ``a``. + +See Also +-------- +nx_agraph, nx_pydot +""" + +import itertools +from collections import defaultdict + +import networkx as nx + +__all__ = [ + "from_pandas_adjacency", + "to_pandas_adjacency", + "from_pandas_edgelist", + "to_pandas_edgelist", + "from_scipy_sparse_array", + "to_scipy_sparse_array", + "from_numpy_array", + "to_numpy_array", +] + + +@nx._dispatchable(edge_attrs="weight") +def to_pandas_adjacency( + G, + nodelist=None, + dtype=None, + order=None, + multigraph_weight=sum, + weight="weight", + nonedge=0.0, +): + """Returns the graph adjacency matrix as a Pandas DataFrame. + + Parameters + ---------- + G : graph + The NetworkX graph used to construct the Pandas DataFrame. + + nodelist : list, optional + The rows and columns are ordered according to the nodes in `nodelist`. + If `nodelist` is None, then the ordering is produced by G.nodes(). + + multigraph_weight : {sum, min, max}, optional + An operator that determines how weights in multigraphs are handled. + The default is to sum the weights of the multiple edges. + + weight : string or None, optional + The edge attribute that holds the numerical value used for + the edge weight. If an edge does not have that attribute, then the + value 1 is used instead. + + nonedge : float, optional + The matrix values corresponding to nonedges are typically set to zero. + However, this could be undesirable if there are matrix values + corresponding to actual edges that also have the value zero. If so, + one might prefer nonedges to have some other value, such as nan. + + Returns + ------- + df : Pandas DataFrame + Graph adjacency matrix + + Notes + ----- + For directed graphs, entry i,j corresponds to an edge from i to j. + + The DataFrame entries are assigned to the weight edge attribute. When + an edge does not have a weight attribute, the value of the entry is set to + the number 1. For multiple (parallel) edges, the values of the entries + are determined by the 'multigraph_weight' parameter. The default is to + sum the weight attributes for each of the parallel edges. + + When `nodelist` does not contain every node in `G`, the matrix is built + from the subgraph of `G` that is induced by the nodes in `nodelist`. + + The convention used for self-loop edges in graphs is to assign the + diagonal matrix entry value to the weight attribute of the edge + (or the number 1 if the edge has no weight attribute). If the + alternate convention of doubling the edge weight is desired the + resulting Pandas DataFrame can be modified as follows:: + + >>> import pandas as pd + >>> G = nx.Graph([(1, 1), (2, 2)]) + >>> df = nx.to_pandas_adjacency(G) + >>> df + 1 2 + 1 1.0 0.0 + 2 0.0 1.0 + >>> diag_idx = list(range(len(df))) + >>> df.iloc[diag_idx, diag_idx] *= 2 + >>> df + 1 2 + 1 2.0 0.0 + 2 0.0 2.0 + + Examples + -------- + >>> G = nx.MultiDiGraph() + >>> G.add_edge(0, 1, weight=2) + 0 + >>> G.add_edge(1, 0) + 0 + >>> G.add_edge(2, 2, weight=3) + 0 + >>> G.add_edge(2, 2) + 1 + >>> nx.to_pandas_adjacency(G, nodelist=[0, 1, 2], dtype=int) + 0 1 2 + 0 0 2 0 + 1 1 0 0 + 2 0 0 4 + + """ + import pandas as pd + + M = to_numpy_array( + G, + nodelist=nodelist, + dtype=dtype, + order=order, + multigraph_weight=multigraph_weight, + weight=weight, + nonedge=nonedge, + ) + if nodelist is None: + nodelist = list(G) + return pd.DataFrame(data=M, index=nodelist, columns=nodelist) + + +@nx._dispatchable(graphs=None, returns_graph=True) +def from_pandas_adjacency(df, create_using=None): + r"""Returns a graph from Pandas DataFrame. + + The Pandas DataFrame is interpreted as an adjacency matrix for the graph. + + Parameters + ---------- + df : Pandas DataFrame + An adjacency matrix representation of a graph + + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + Notes + ----- + For directed graphs, explicitly mention create_using=nx.DiGraph, + and entry i,j of df corresponds to an edge from i to j. + + If `df` has a single data type for each entry it will be converted to an + appropriate Python data type. + + If you have node attributes stored in a separate dataframe `df_nodes`, + you can load those attributes to the graph `G` using the following code:: + + df_nodes = pd.DataFrame({"node_id": [1, 2, 3], "attribute1": ["A", "B", "C"]}) + G.add_nodes_from((n, dict(d)) for n, d in df_nodes.iterrows()) + + If `df` has a user-specified compound data type the names + of the data fields will be used as attribute keys in the resulting + NetworkX graph. + + See Also + -------- + to_pandas_adjacency + + Examples + -------- + Simple integer weights on edges: + + >>> import pandas as pd + >>> pd.options.display.max_columns = 20 + >>> df = pd.DataFrame([[1, 1], [2, 1]]) + >>> df + 0 1 + 0 1 1 + 1 2 1 + >>> G = nx.from_pandas_adjacency(df) + >>> G.name = "Graph from pandas adjacency matrix" + >>> print(G) + Graph named 'Graph from pandas adjacency matrix' with 2 nodes and 3 edges + """ + + try: + df = df[df.index] + except Exception as err: + missing = list(set(df.index).difference(set(df.columns))) + msg = f"{missing} not in columns" + raise nx.NetworkXError("Columns must match Indices.", msg) from err + + A = df.values + G = from_numpy_array(A, create_using=create_using, nodelist=df.columns) + + return G + + +@nx._dispatchable(preserve_edge_attrs=True) +def to_pandas_edgelist( + G, + source="source", + target="target", + nodelist=None, + dtype=None, + edge_key=None, +): + """Returns the graph edge list as a Pandas DataFrame. + + Parameters + ---------- + G : graph + The NetworkX graph used to construct the Pandas DataFrame. + + source : str or int, optional + A valid column name (string or integer) for the source nodes (for the + directed case). + + target : str or int, optional + A valid column name (string or integer) for the target nodes (for the + directed case). + + nodelist : list, optional + Use only nodes specified in nodelist + + dtype : dtype, default None + Use to create the DataFrame. Data type to force. + Only a single dtype is allowed. If None, infer. + + edge_key : str or int or None, optional (default=None) + A valid column name (string or integer) for the edge keys (for the + multigraph case). If None, edge keys are not stored in the DataFrame. + + Returns + ------- + df : Pandas DataFrame + Graph edge list + + Examples + -------- + >>> G = nx.Graph( + ... [ + ... ("A", "B", {"cost": 1, "weight": 7}), + ... ("C", "E", {"cost": 9, "weight": 10}), + ... ] + ... ) + >>> df = nx.to_pandas_edgelist(G, nodelist=["A", "C"]) + >>> df[["source", "target", "cost", "weight"]] + source target cost weight + 0 A B 1 7 + 1 C E 9 10 + + >>> G = nx.MultiGraph([("A", "B", {"cost": 1}), ("A", "B", {"cost": 9})]) + >>> df = nx.to_pandas_edgelist(G, nodelist=["A", "C"], edge_key="ekey") + >>> df[["source", "target", "cost", "ekey"]] + source target cost ekey + 0 A B 1 0 + 1 A B 9 1 + + """ + import pandas as pd + + if nodelist is None: + edgelist = G.edges(data=True) + else: + edgelist = G.edges(nodelist, data=True) + source_nodes = [s for s, _, _ in edgelist] + target_nodes = [t for _, t, _ in edgelist] + + all_attrs = set().union(*(d.keys() for _, _, d in edgelist)) + if source in all_attrs: + raise nx.NetworkXError(f"Source name {source!r} is an edge attr name") + if target in all_attrs: + raise nx.NetworkXError(f"Target name {target!r} is an edge attr name") + + nan = float("nan") + edge_attr = {k: [d.get(k, nan) for _, _, d in edgelist] for k in all_attrs} + + if G.is_multigraph() and edge_key is not None: + if edge_key in all_attrs: + raise nx.NetworkXError(f"Edge key name {edge_key!r} is an edge attr name") + edge_keys = [k for _, _, k in G.edges(keys=True)] + edgelistdict = {source: source_nodes, target: target_nodes, edge_key: edge_keys} + else: + edgelistdict = {source: source_nodes, target: target_nodes} + + edgelistdict.update(edge_attr) + return pd.DataFrame(edgelistdict, dtype=dtype) + + +@nx._dispatchable(graphs=None, returns_graph=True) +def from_pandas_edgelist( + df, + source="source", + target="target", + edge_attr=None, + create_using=None, + edge_key=None, +): + """Returns a graph from Pandas DataFrame containing an edge list. + + The Pandas DataFrame should contain at least two columns of node names and + zero or more columns of edge attributes. Each row will be processed as one + edge instance. + + Note: This function iterates over DataFrame.values, which is not + guaranteed to retain the data type across columns in the row. This is only + a problem if your row is entirely numeric and a mix of ints and floats. In + that case, all values will be returned as floats. See the + DataFrame.iterrows documentation for an example. + + Parameters + ---------- + df : Pandas DataFrame + An edge list representation of a graph + + source : str or int + A valid column name (string or integer) for the source nodes (for the + directed case). + + target : str or int + A valid column name (string or integer) for the target nodes (for the + directed case). + + edge_attr : str or int, iterable, True, or None + A valid column name (str or int) or iterable of column names that are + used to retrieve items and add them to the graph as edge attributes. + If `True`, all columns will be added except `source`, `target` and `edge_key`. + If `None`, no edge attributes are added to the graph. + + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + edge_key : str or None, optional (default=None) + A valid column name for the edge keys (for a MultiGraph). The values in + this column are used for the edge keys when adding edges if create_using + is a multigraph. + + Notes + ----- + If you have node attributes stored in a separate dataframe `df_nodes`, + you can load those attributes to the graph `G` using the following code:: + + df_nodes = pd.DataFrame({"node_id": [1, 2, 3], "attribute1": ["A", "B", "C"]}) + G.add_nodes_from((n, dict(d)) for n, d in df_nodes.iterrows()) + + See Also + -------- + to_pandas_edgelist + + Examples + -------- + Simple integer weights on edges: + + >>> import pandas as pd + >>> pd.options.display.max_columns = 20 + >>> import numpy as np + >>> rng = np.random.RandomState(seed=5) + >>> ints = rng.randint(1, 11, size=(3, 2)) + >>> a = ["A", "B", "C"] + >>> b = ["D", "A", "E"] + >>> df = pd.DataFrame(ints, columns=["weight", "cost"]) + >>> df[0] = a + >>> df["b"] = b + >>> df[["weight", "cost", 0, "b"]] + weight cost 0 b + 0 4 7 A D + 1 7 1 B A + 2 10 9 C E + >>> G = nx.from_pandas_edgelist(df, 0, "b", ["weight", "cost"]) + >>> G["E"]["C"]["weight"] + 10 + >>> G["E"]["C"]["cost"] + 9 + >>> edges = pd.DataFrame( + ... { + ... "source": [0, 1, 2], + ... "target": [2, 2, 3], + ... "weight": [3, 4, 5], + ... "color": ["red", "blue", "blue"], + ... } + ... ) + >>> G = nx.from_pandas_edgelist(edges, edge_attr=True) + >>> G[0][2]["color"] + 'red' + + Build multigraph with custom keys: + + >>> edges = pd.DataFrame( + ... { + ... "source": [0, 1, 2, 0], + ... "target": [2, 2, 3, 2], + ... "my_edge_key": ["A", "B", "C", "D"], + ... "weight": [3, 4, 5, 6], + ... "color": ["red", "blue", "blue", "blue"], + ... } + ... ) + >>> G = nx.from_pandas_edgelist( + ... edges, + ... edge_key="my_edge_key", + ... edge_attr=["weight", "color"], + ... create_using=nx.MultiGraph(), + ... ) + >>> G[0][2] + AtlasView({'A': {'weight': 3, 'color': 'red'}, 'D': {'weight': 6, 'color': 'blue'}}) + + + """ + g = nx.empty_graph(0, create_using) + + if edge_attr is None: + if g.is_multigraph() and edge_key is not None: + for u, v, k in zip(df[source], df[target], df[edge_key]): + g.add_edge(u, v, k) + else: + g.add_edges_from(zip(df[source], df[target])) + return g + + reserved_columns = [source, target] + if g.is_multigraph() and edge_key is not None: + reserved_columns.append(edge_key) + + # Additional columns requested + attr_col_headings = [] + attribute_data = [] + if edge_attr is True: + attr_col_headings = [c for c in df.columns if c not in reserved_columns] + elif isinstance(edge_attr, list | tuple): + attr_col_headings = edge_attr + else: + attr_col_headings = [edge_attr] + if len(attr_col_headings) == 0: + raise nx.NetworkXError( + f"Invalid edge_attr argument: No columns found with name: {attr_col_headings}" + ) + + try: + attribute_data = zip(*[df[col] for col in attr_col_headings]) + except (KeyError, TypeError) as err: + msg = f"Invalid edge_attr argument: {edge_attr}" + raise nx.NetworkXError(msg) from err + + if g.is_multigraph(): + # => append the edge keys from the df to the bundled data + if edge_key is not None: + try: + multigraph_edge_keys = df[edge_key] + attribute_data = zip(attribute_data, multigraph_edge_keys) + except (KeyError, TypeError) as err: + msg = f"Invalid edge_key argument: {edge_key}" + raise nx.NetworkXError(msg) from err + + for s, t, attrs in zip(df[source], df[target], attribute_data): + if edge_key is not None: + attrs, multigraph_edge_key = attrs + key = g.add_edge(s, t, key=multigraph_edge_key) + else: + key = g.add_edge(s, t) + + g[s][t][key].update(zip(attr_col_headings, attrs)) + else: + for s, t, attrs in zip(df[source], df[target], attribute_data): + g.add_edge(s, t) + g[s][t].update(zip(attr_col_headings, attrs)) + + return g + + +@nx._dispatchable(edge_attrs="weight") +def to_scipy_sparse_array(G, nodelist=None, dtype=None, weight="weight", format="csr"): + """Returns the graph adjacency matrix as a SciPy sparse array. + + Parameters + ---------- + G : graph + The NetworkX graph used to construct the sparse array. + + nodelist : list, optional + The rows and columns are ordered according to the nodes in `nodelist`. + If `nodelist` is None, then the ordering is produced by ``G.nodes()``. + + dtype : NumPy data-type, optional + A valid NumPy dtype used to initialize the array. If None, then the + NumPy default is used. + + weight : string or None, optional (default='weight') + The edge attribute that holds the numerical value used for + the edge weight. If None then all edge weights are 1. + + format : str in {'bsr', 'csr', 'csc', 'coo', 'lil', 'dia', 'dok'} + The format of the sparse array to be returned (default 'csr'). For + some algorithms different implementations of sparse arrays + can perform better. See [1]_ for details. + + Returns + ------- + A : SciPy sparse array + Graph adjacency matrix. + + Notes + ----- + For directed graphs, matrix entry ``i, j`` corresponds to an edge from + ``i`` to ``j``. + + The values of the adjacency matrix are populated using the edge attribute held in + parameter `weight`. When an edge does not have that attribute, the + value of the entry is 1. + + For multiple edges the matrix values are the sums of the edge weights. + + When `nodelist` does not contain every node in `G`, the adjacency matrix + is built from the subgraph of `G` that is induced by the nodes in + `nodelist`. + + The convention used for self-loop edges in graphs is to assign the + diagonal matrix entry value to the weight attribute of the edge + (or the number 1 if the edge has no weight attribute). If the + alternate convention of doubling the edge weight is desired the + resulting array can be modified as follows:: + + >>> G = nx.Graph([(1, 1)]) + >>> A = nx.to_scipy_sparse_array(G) + >>> A.toarray() + array([[1]]) + >>> A.setdiag(A.diagonal() * 2) + >>> A.toarray() + array([[2]]) + + Examples + -------- + + Basic usage: + + >>> G = nx.path_graph(4) + >>> A = nx.to_scipy_sparse_array(G) + >>> A # doctest: +SKIP + + + >>> A.toarray() + array([[0, 1, 0, 0], + [1, 0, 1, 0], + [0, 1, 0, 1], + [0, 0, 1, 0]]) + + .. note:: The `toarray` method is used in these examples to better visualize + the adjacency matrix. For a dense representation of the adjaceny matrix, + use `to_numpy_array` instead. + + Directed graphs: + + >>> G = nx.DiGraph([(0, 1), (1, 2), (2, 3)]) + >>> nx.to_scipy_sparse_array(G).toarray() + array([[0, 1, 0, 0], + [0, 0, 1, 0], + [0, 0, 0, 1], + [0, 0, 0, 0]]) + + >>> H = G.reverse() + >>> H.edges + OutEdgeView([(1, 0), (2, 1), (3, 2)]) + >>> nx.to_scipy_sparse_array(H).toarray() + array([[0, 0, 0, 0], + [1, 0, 0, 0], + [0, 1, 0, 0], + [0, 0, 1, 0]]) + + By default, the order of the rows/columns of the adjacency matrix is determined + by the ordering of the nodes in `G`: + + >>> G = nx.Graph() + >>> G.add_nodes_from([3, 5, 0, 1]) + >>> G.add_edges_from([(1, 3), (1, 5)]) + >>> nx.to_scipy_sparse_array(G).toarray() + array([[0, 0, 0, 1], + [0, 0, 0, 1], + [0, 0, 0, 0], + [1, 1, 0, 0]]) + + The ordering of the rows can be changed with `nodelist`: + + >>> ordered = [0, 1, 3, 5] + >>> nx.to_scipy_sparse_array(G, nodelist=ordered).toarray() + array([[0, 0, 0, 0], + [0, 0, 1, 1], + [0, 1, 0, 0], + [0, 1, 0, 0]]) + + If `nodelist` contains a subset of the nodes in `G`, the adjacency matrix + for the node-induced subgraph is produced: + + >>> nx.to_scipy_sparse_array(G, nodelist=[1, 3, 5]).toarray() + array([[0, 1, 1], + [1, 0, 0], + [1, 0, 0]]) + + The values of the adjacency matrix are drawn from the edge attribute + specified by the `weight` parameter: + + >>> G = nx.path_graph(4) + >>> nx.set_edge_attributes( + ... G, values={(0, 1): 1, (1, 2): 10, (2, 3): 2}, name="weight" + ... ) + >>> nx.set_edge_attributes( + ... G, values={(0, 1): 50, (1, 2): 35, (2, 3): 10}, name="capacity" + ... ) + >>> nx.to_scipy_sparse_array(G).toarray() # Default weight="weight" + array([[ 0, 1, 0, 0], + [ 1, 0, 10, 0], + [ 0, 10, 0, 2], + [ 0, 0, 2, 0]]) + >>> nx.to_scipy_sparse_array(G, weight="capacity").toarray() + array([[ 0, 50, 0, 0], + [50, 0, 35, 0], + [ 0, 35, 0, 10], + [ 0, 0, 10, 0]]) + + Any edges that don't have a `weight` attribute default to 1: + + >>> G[1][2].pop("capacity") + 35 + >>> nx.to_scipy_sparse_array(G, weight="capacity").toarray() + array([[ 0, 50, 0, 0], + [50, 0, 1, 0], + [ 0, 1, 0, 10], + [ 0, 0, 10, 0]]) + + When `G` is a multigraph, the values in the adjacency matrix are given by + the sum of the `weight` edge attribute over each edge key: + + >>> G = nx.MultiDiGraph([(0, 1), (0, 1), (0, 1), (2, 0)]) + >>> nx.to_scipy_sparse_array(G).toarray() + array([[0, 3, 0], + [0, 0, 0], + [1, 0, 0]]) + + References + ---------- + .. [1] Scipy Dev. References, "Sparse Arrays", + https://docs.scipy.org/doc/scipy/reference/sparse.html + """ + import scipy as sp + + if len(G) == 0: + raise nx.NetworkXError("Graph has no nodes or edges") + + if nodelist is None: + nodelist = list(G) + nlen = len(G) + else: + nlen = len(nodelist) + if nlen == 0: + raise nx.NetworkXError("nodelist has no nodes") + nodeset = set(G.nbunch_iter(nodelist)) + if nlen != len(nodeset): + for n in nodelist: + if n not in G: + raise nx.NetworkXError(f"Node {n} in nodelist is not in G") + raise nx.NetworkXError("nodelist contains duplicates.") + if nlen < len(G): + G = G.subgraph(nodelist) + + index = dict(zip(nodelist, range(nlen))) + coefficients = zip( + *((index[u], index[v], wt) for u, v, wt in G.edges(data=weight, default=1)) + ) + try: + row, col, data = coefficients + except ValueError: + # there is no edge in the subgraph + row, col, data = [], [], [] + + if G.is_directed(): + A = sp.sparse.coo_array((data, (row, col)), shape=(nlen, nlen), dtype=dtype) + else: + # symmetrize matrix + d = data + data + r = row + col + c = col + row + # selfloop entries get double counted when symmetrizing + # so we subtract the data on the diagonal + selfloops = list(nx.selfloop_edges(G, data=weight, default=1)) + if selfloops: + diag_index, diag_data = zip(*((index[u], -wt) for u, v, wt in selfloops)) + d += diag_data + r += diag_index + c += diag_index + A = sp.sparse.coo_array((d, (r, c)), shape=(nlen, nlen), dtype=dtype) + try: + return A.asformat(format) + except ValueError as err: + raise nx.NetworkXError(f"Unknown sparse matrix format: {format}") from err + + +def _csr_gen_triples(A): + """Converts a SciPy sparse array in **Compressed Sparse Row** format to + an iterable of weighted edge triples. + + """ + nrows = A.shape[0] + indptr, dst_indices, data = A.indptr, A.indices, A.data + import numpy as np + + src_indices = np.repeat(np.arange(nrows), np.diff(indptr)) + return zip(src_indices.tolist(), dst_indices.tolist(), A.data.tolist()) + + +def _csc_gen_triples(A): + """Converts a SciPy sparse array in **Compressed Sparse Column** format to + an iterable of weighted edge triples. + + """ + ncols = A.shape[1] + indptr, src_indices, data = A.indptr, A.indices, A.data + import numpy as np + + dst_indices = np.repeat(np.arange(ncols), np.diff(indptr)) + return zip(src_indices.tolist(), dst_indices.tolist(), A.data.tolist()) + + +def _coo_gen_triples(A): + """Converts a SciPy sparse array in **Coordinate** format to an iterable + of weighted edge triples. + + """ + return zip(A.row.tolist(), A.col.tolist(), A.data.tolist()) + + +def _dok_gen_triples(A): + """Converts a SciPy sparse array in **Dictionary of Keys** format to an + iterable of weighted edge triples. + + """ + for (r, c), v in A.items(): + # Use `v.item()` to convert a NumPy scalar to the appropriate Python scalar + yield int(r), int(c), v.item() + + +def _generate_weighted_edges(A): + """Returns an iterable over (u, v, w) triples, where u and v are adjacent + vertices and w is the weight of the edge joining u and v. + + `A` is a SciPy sparse array (in any format). + + """ + if A.format == "csr": + return _csr_gen_triples(A) + if A.format == "csc": + return _csc_gen_triples(A) + if A.format == "dok": + return _dok_gen_triples(A) + # If A is in any other format (including COO), convert it to COO format. + return _coo_gen_triples(A.tocoo()) + + +@nx._dispatchable(graphs=None, returns_graph=True) +def from_scipy_sparse_array( + A, parallel_edges=False, create_using=None, edge_attribute="weight" +): + """Creates a new graph from an adjacency matrix given as a SciPy sparse + array. + + Parameters + ---------- + A: scipy.sparse array + An adjacency matrix representation of a graph + + parallel_edges : Boolean + If this is True, `create_using` is a multigraph, and `A` is an + integer matrix, then entry *(i, j)* in the matrix is interpreted as the + number of parallel edges joining vertices *i* and *j* in the graph. + If it is False, then the entries in the matrix are interpreted as + the weight of a single edge joining the vertices. + + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + edge_attribute: string + Name of edge attribute to store matrix numeric value. The data will + have the same type as the matrix entry (int, float, (real,imag)). + + Notes + ----- + For directed graphs, explicitly mention create_using=nx.DiGraph, + and entry i,j of A corresponds to an edge from i to j. + + If `create_using` is :class:`networkx.MultiGraph` or + :class:`networkx.MultiDiGraph`, `parallel_edges` is True, and the + entries of `A` are of type :class:`int`, then this function returns a + multigraph (constructed from `create_using`) with parallel edges. + In this case, `edge_attribute` will be ignored. + + If `create_using` indicates an undirected multigraph, then only the edges + indicated by the upper triangle of the matrix `A` will be added to the + graph. + + Examples + -------- + >>> import scipy as sp + >>> A = sp.sparse.eye(2, 2, 1) + >>> G = nx.from_scipy_sparse_array(A) + + If `create_using` indicates a multigraph and the matrix has only integer + entries and `parallel_edges` is False, then the entries will be treated + as weights for edges joining the nodes (without creating parallel edges): + + >>> A = sp.sparse.csr_array([[1, 1], [1, 2]]) + >>> G = nx.from_scipy_sparse_array(A, create_using=nx.MultiGraph) + >>> G[1][1] + AtlasView({0: {'weight': 2}}) + + If `create_using` indicates a multigraph and the matrix has only integer + entries and `parallel_edges` is True, then the entries will be treated + as the number of parallel edges joining those two vertices: + + >>> A = sp.sparse.csr_array([[1, 1], [1, 2]]) + >>> G = nx.from_scipy_sparse_array( + ... A, parallel_edges=True, create_using=nx.MultiGraph + ... ) + >>> G[1][1] + AtlasView({0: {'weight': 1}, 1: {'weight': 1}}) + + """ + G = nx.empty_graph(0, create_using) + n, m = A.shape + if n != m: + raise nx.NetworkXError(f"Adjacency matrix not square: nx,ny={A.shape}") + # Make sure we get even the isolated nodes of the graph. + G.add_nodes_from(range(n)) + # Create an iterable over (u, v, w) triples and for each triple, add an + # edge from u to v with weight w. + triples = _generate_weighted_edges(A) + # If the entries in the adjacency matrix are integers, the graph is a + # multigraph, and parallel_edges is True, then create parallel edges, each + # with weight 1, for each entry in the adjacency matrix. Otherwise, create + # one edge for each positive entry in the adjacency matrix and set the + # weight of that edge to be the entry in the matrix. + if A.dtype.kind in ("i", "u") and G.is_multigraph() and parallel_edges: + chain = itertools.chain.from_iterable + # The following line is equivalent to: + # + # for (u, v) in edges: + # for d in range(A[u, v]): + # G.add_edge(u, v, weight=1) + # + triples = chain(((u, v, 1) for d in range(w)) for (u, v, w) in triples) + # If we are creating an undirected multigraph, only add the edges from the + # upper triangle of the matrix. Otherwise, add all the edges. This relies + # on the fact that the vertices created in the + # `_generated_weighted_edges()` function are actually the row/column + # indices for the matrix `A`. + # + # Without this check, we run into a problem where each edge is added twice + # when `G.add_weighted_edges_from()` is invoked below. + if G.is_multigraph() and not G.is_directed(): + triples = ((u, v, d) for u, v, d in triples if u <= v) + G.add_weighted_edges_from(triples, weight=edge_attribute) + return G + + +@nx._dispatchable(edge_attrs="weight") # edge attrs may also be obtained from `dtype` +def to_numpy_array( + G, + nodelist=None, + dtype=None, + order=None, + multigraph_weight=sum, + weight="weight", + nonedge=0.0, +): + """Returns the graph adjacency matrix as a NumPy array. + + Parameters + ---------- + G : graph + The NetworkX graph used to construct the NumPy array. + + nodelist : list, optional + The rows and columns are ordered according to the nodes in `nodelist`. + If `nodelist` is ``None``, then the ordering is produced by ``G.nodes()``. + + dtype : NumPy data type, optional + A NumPy data type used to initialize the array. If None, then the NumPy + default is used. The dtype can be structured if `weight=None`, in which + case the dtype field names are used to look up edge attributes. The + result is a structured array where each named field in the dtype + corresponds to the adjacency for that edge attribute. See examples for + details. + + order : {'C', 'F'}, optional + Whether to store multidimensional data in C- or Fortran-contiguous + (row- or column-wise) order in memory. If None, then the NumPy default + is used. + + multigraph_weight : callable, optional + An function that determines how weights in multigraphs are handled. + The function should accept a sequence of weights and return a single + value. The default is to sum the weights of the multiple edges. + + weight : string or None optional (default = 'weight') + The edge attribute that holds the numerical value used for + the edge weight. If an edge does not have that attribute, then the + value 1 is used instead. `weight` must be ``None`` if a structured + dtype is used. + + nonedge : array_like (default = 0.0) + The value used to represent non-edges in the adjacency matrix. + The array values corresponding to nonedges are typically set to zero. + However, this could be undesirable if there are array values + corresponding to actual edges that also have the value zero. If so, + one might prefer nonedges to have some other value, such as ``nan``. + + Returns + ------- + A : NumPy ndarray + Graph adjacency matrix + + Raises + ------ + NetworkXError + If `dtype` is a structured dtype and `G` is a multigraph + ValueError + If `dtype` is a structured dtype and `weight` is not `None` + + See Also + -------- + from_numpy_array + + Notes + ----- + For directed graphs, entry ``i, j`` corresponds to an edge from ``i`` to ``j``. + + Entries in the adjacency matrix are given by the `weight` edge attribute. + When an edge does not have a weight attribute, the value of the entry is + set to the number 1. For multiple (parallel) edges, the values of the + entries are determined by the `multigraph_weight` parameter. The default is + to sum the weight attributes for each of the parallel edges. + + When `nodelist` does not contain every node in `G`, the adjacency matrix is + built from the subgraph of `G` that is induced by the nodes in `nodelist`. + + The convention used for self-loop edges in graphs is to assign the + diagonal array entry value to the weight attribute of the edge + (or the number 1 if the edge has no weight attribute). If the + alternate convention of doubling the edge weight is desired the + resulting NumPy array can be modified as follows: + + >>> import numpy as np + >>> G = nx.Graph([(1, 1)]) + >>> A = nx.to_numpy_array(G) + >>> A + array([[1.]]) + >>> A[np.diag_indices_from(A)] *= 2 + >>> A + array([[2.]]) + + Examples + -------- + >>> G = nx.MultiDiGraph() + >>> G.add_edge(0, 1, weight=2) + 0 + >>> G.add_edge(1, 0) + 0 + >>> G.add_edge(2, 2, weight=3) + 0 + >>> G.add_edge(2, 2) + 1 + >>> nx.to_numpy_array(G, nodelist=[0, 1, 2]) + array([[0., 2., 0.], + [1., 0., 0.], + [0., 0., 4.]]) + + When `nodelist` argument is used, nodes of `G` which do not appear in the `nodelist` + and their edges are not included in the adjacency matrix. Here is an example: + + >>> G = nx.Graph() + >>> G.add_edge(3, 1) + >>> G.add_edge(2, 0) + >>> G.add_edge(2, 1) + >>> G.add_edge(3, 0) + >>> nx.to_numpy_array(G, nodelist=[1, 2, 3]) + array([[0., 1., 1.], + [1., 0., 0.], + [1., 0., 0.]]) + + This function can also be used to create adjacency matrices for multiple + edge attributes with structured dtypes: + + >>> G = nx.Graph() + >>> G.add_edge(0, 1, weight=10) + >>> G.add_edge(1, 2, cost=5) + >>> G.add_edge(2, 3, weight=3, cost=-4.0) + >>> dtype = np.dtype([("weight", int), ("cost", float)]) + >>> A = nx.to_numpy_array(G, dtype=dtype, weight=None) + >>> A["weight"] + array([[ 0, 10, 0, 0], + [10, 0, 1, 0], + [ 0, 1, 0, 3], + [ 0, 0, 3, 0]]) + >>> A["cost"] + array([[ 0., 1., 0., 0.], + [ 1., 0., 5., 0.], + [ 0., 5., 0., -4.], + [ 0., 0., -4., 0.]]) + + As stated above, the argument "nonedge" is useful especially when there are + actually edges with weight 0 in the graph. Setting a nonedge value different than 0, + makes it much clearer to differentiate such 0-weighted edges and actual nonedge values. + + >>> G = nx.Graph() + >>> G.add_edge(3, 1, weight=2) + >>> G.add_edge(2, 0, weight=0) + >>> G.add_edge(2, 1, weight=0) + >>> G.add_edge(3, 0, weight=1) + >>> nx.to_numpy_array(G, nonedge=-1.0) + array([[-1., 2., -1., 1.], + [ 2., -1., 0., -1.], + [-1., 0., -1., 0.], + [ 1., -1., 0., -1.]]) + """ + import numpy as np + + if nodelist is None: + nodelist = list(G) + nlen = len(nodelist) + + # Input validation + nodeset = set(nodelist) + if nodeset - set(G): + raise nx.NetworkXError(f"Nodes {nodeset - set(G)} in nodelist is not in G") + if len(nodeset) < nlen: + raise nx.NetworkXError("nodelist contains duplicates.") + + A = np.full((nlen, nlen), fill_value=nonedge, dtype=dtype, order=order) + + # Corner cases: empty nodelist or graph without any edges + if nlen == 0 or G.number_of_edges() == 0: + return A + + # If dtype is structured and weight is None, use dtype field names as + # edge attributes + edge_attrs = None # Only single edge attribute by default + if A.dtype.names: + if weight is None: + edge_attrs = dtype.names + else: + raise ValueError( + "Specifying `weight` not supported for structured dtypes\n." + "To create adjacency matrices from structured dtypes, use `weight=None`." + ) + + # Map nodes to row/col in matrix + idx = dict(zip(nodelist, range(nlen))) + if len(nodelist) < len(G): + G = G.subgraph(nodelist).copy() + + # Collect all edge weights and reduce with `multigraph_weights` + if G.is_multigraph(): + if edge_attrs: + raise nx.NetworkXError( + "Structured arrays are not supported for MultiGraphs" + ) + d = defaultdict(list) + for u, v, wt in G.edges(data=weight, default=1.0): + d[(idx[u], idx[v])].append(wt) + i, j = np.array(list(d.keys())).T # indices + wts = [multigraph_weight(ws) for ws in d.values()] # reduced weights + else: + i, j, wts = [], [], [] + + # Special branch: multi-attr adjacency from structured dtypes + if edge_attrs: + # Extract edges with all data + for u, v, data in G.edges(data=True): + i.append(idx[u]) + j.append(idx[v]) + wts.append(data) + # Map each attribute to the appropriate named field in the + # structured dtype + for attr in edge_attrs: + attr_data = [wt.get(attr, 1.0) for wt in wts] + A[attr][i, j] = attr_data + if not G.is_directed(): + A[attr][j, i] = attr_data + return A + + for u, v, wt in G.edges(data=weight, default=1.0): + i.append(idx[u]) + j.append(idx[v]) + wts.append(wt) + + # Set array values with advanced indexing + A[i, j] = wts + if not G.is_directed(): + A[j, i] = wts + + return A + + +@nx._dispatchable(graphs=None, returns_graph=True) +def from_numpy_array( + A, parallel_edges=False, create_using=None, edge_attr="weight", *, nodelist=None +): + """Returns a graph from a 2D NumPy array. + + The 2D NumPy array is interpreted as an adjacency matrix for the graph. + + Parameters + ---------- + A : a 2D numpy.ndarray + An adjacency matrix representation of a graph + + parallel_edges : Boolean + If this is True, `create_using` is a multigraph, and `A` is an + integer array, then entry *(i, j)* in the array is interpreted as the + number of parallel edges joining vertices *i* and *j* in the graph. + If it is False, then the entries in the array are interpreted as + the weight of a single edge joining the vertices. + + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + edge_attr : String, optional (default="weight") + The attribute to which the array values are assigned on each edge. If + it is None, edge attributes will not be assigned. + + nodelist : sequence of nodes, optional + A sequence of objects to use as the nodes in the graph. If provided, the + list of nodes must be the same length as the dimensions of `A`. The + default is `None`, in which case the nodes are drawn from ``range(n)``. + + Notes + ----- + For directed graphs, explicitly mention create_using=nx.DiGraph, + and entry i,j of A corresponds to an edge from i to j. + + If `create_using` is :class:`networkx.MultiGraph` or + :class:`networkx.MultiDiGraph`, `parallel_edges` is True, and the + entries of `A` are of type :class:`int`, then this function returns a + multigraph (of the same type as `create_using`) with parallel edges. + + If `create_using` indicates an undirected multigraph, then only the edges + indicated by the upper triangle of the array `A` will be added to the + graph. + + If `edge_attr` is Falsy (False or None), edge attributes will not be + assigned, and the array data will be treated like a binary mask of + edge presence or absence. Otherwise, the attributes will be assigned + as follows: + + If the NumPy array has a single data type for each array entry it + will be converted to an appropriate Python data type. + + If the NumPy array has a user-specified compound data type the names + of the data fields will be used as attribute keys in the resulting + NetworkX graph. + + See Also + -------- + to_numpy_array + + Examples + -------- + Simple integer weights on edges: + + >>> import numpy as np + >>> A = np.array([[1, 1], [2, 1]]) + >>> G = nx.from_numpy_array(A) + >>> G.edges(data=True) + EdgeDataView([(0, 0, {'weight': 1}), (0, 1, {'weight': 2}), (1, 1, {'weight': 1})]) + + If `create_using` indicates a multigraph and the array has only integer + entries and `parallel_edges` is False, then the entries will be treated + as weights for edges joining the nodes (without creating parallel edges): + + >>> A = np.array([[1, 1], [1, 2]]) + >>> G = nx.from_numpy_array(A, create_using=nx.MultiGraph) + >>> G[1][1] + AtlasView({0: {'weight': 2}}) + + If `create_using` indicates a multigraph and the array has only integer + entries and `parallel_edges` is True, then the entries will be treated + as the number of parallel edges joining those two vertices: + + >>> A = np.array([[1, 1], [1, 2]]) + >>> temp = nx.MultiGraph() + >>> G = nx.from_numpy_array(A, parallel_edges=True, create_using=temp) + >>> G[1][1] + AtlasView({0: {'weight': 1}, 1: {'weight': 1}}) + + User defined compound data type on edges: + + >>> dt = [("weight", float), ("cost", int)] + >>> A = np.array([[(1.0, 2)]], dtype=dt) + >>> G = nx.from_numpy_array(A) + >>> G.edges() + EdgeView([(0, 0)]) + >>> G[0][0]["cost"] + 2 + >>> G[0][0]["weight"] + 1.0 + + """ + kind_to_python_type = { + "f": float, + "i": int, + "u": int, + "b": bool, + "c": complex, + "S": str, + "U": str, + "V": "void", + } + G = nx.empty_graph(0, create_using) + if A.ndim != 2: + raise nx.NetworkXError(f"Input array must be 2D, not {A.ndim}") + n, m = A.shape + if n != m: + raise nx.NetworkXError(f"Adjacency matrix not square: nx,ny={A.shape}") + dt = A.dtype + try: + python_type = kind_to_python_type[dt.kind] + except Exception as err: + raise TypeError(f"Unknown numpy data type: {dt}") from err + if _default_nodes := (nodelist is None): + nodelist = range(n) + else: + if len(nodelist) != n: + raise ValueError("nodelist must have the same length as A.shape[0]") + + # Make sure we get even the isolated nodes of the graph. + G.add_nodes_from(nodelist) + # Get a list of all the entries in the array with nonzero entries. These + # coordinates become edges in the graph. (convert to int from np.int64) + edges = ((int(e[0]), int(e[1])) for e in zip(*A.nonzero())) + # handle numpy constructed data type + if python_type == "void": + # Sort the fields by their offset, then by dtype, then by name. + fields = sorted( + (offset, dtype, name) for name, (dtype, offset) in A.dtype.fields.items() + ) + triples = ( + ( + u, + v, + {} + if edge_attr in [False, None] + else { + name: kind_to_python_type[dtype.kind](val) + for (_, dtype, name), val in zip(fields, A[u, v]) + }, + ) + for u, v in edges + ) + # If the entries in the adjacency matrix are integers, the graph is a + # multigraph, and parallel_edges is True, then create parallel edges, each + # with weight 1, for each entry in the adjacency matrix. Otherwise, create + # one edge for each positive entry in the adjacency matrix and set the + # weight of that edge to be the entry in the matrix. + elif python_type is int and G.is_multigraph() and parallel_edges: + chain = itertools.chain.from_iterable + # The following line is equivalent to: + # + # for (u, v) in edges: + # for d in range(A[u, v]): + # G.add_edge(u, v, weight=1) + # + if edge_attr in [False, None]: + triples = chain(((u, v, {}) for d in range(A[u, v])) for (u, v) in edges) + else: + triples = chain( + ((u, v, {edge_attr: 1}) for d in range(A[u, v])) for (u, v) in edges + ) + else: # basic data type + if edge_attr in [False, None]: + triples = ((u, v, {}) for u, v in edges) + else: + triples = ((u, v, {edge_attr: python_type(A[u, v])}) for u, v in edges) + # If we are creating an undirected multigraph, only add the edges from the + # upper triangle of the matrix. Otherwise, add all the edges. This relies + # on the fact that the vertices created in the + # `_generated_weighted_edges()` function are actually the row/column + # indices for the matrix `A`. + # + # Without this check, we run into a problem where each edge is added twice + # when `G.add_edges_from()` is invoked below. + if G.is_multigraph() and not G.is_directed(): + triples = ((u, v, d) for u, v, d in triples if u <= v) + # Remap nodes if user provided custom `nodelist` + if not _default_nodes: + idx_to_node = dict(enumerate(nodelist)) + triples = ((idx_to_node[u], idx_to_node[v], d) for u, v, d in triples) + G.add_edges_from(triples) + return G diff --git a/.venv/lib/python3.12/site-packages/networkx/drawing/__init__.py b/.venv/lib/python3.12/site-packages/networkx/drawing/__init__.py new file mode 100644 index 0000000..0f53309 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/drawing/__init__.py @@ -0,0 +1,7 @@ +# graph drawing and interface to graphviz + +from .layout import * +from .nx_latex import * +from .nx_pylab import * +from . import nx_agraph +from . import nx_pydot diff --git a/.venv/lib/python3.12/site-packages/networkx/drawing/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/drawing/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..ecde5e3 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/drawing/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/drawing/__pycache__/layout.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/drawing/__pycache__/layout.cpython-312.pyc new file mode 100644 index 0000000..9591991 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/drawing/__pycache__/layout.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/drawing/__pycache__/nx_agraph.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/drawing/__pycache__/nx_agraph.cpython-312.pyc new file mode 100644 index 0000000..a024613 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/drawing/__pycache__/nx_agraph.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/drawing/__pycache__/nx_latex.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/drawing/__pycache__/nx_latex.cpython-312.pyc new file mode 100644 index 0000000..ab51682 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/drawing/__pycache__/nx_latex.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/drawing/__pycache__/nx_pydot.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/drawing/__pycache__/nx_pydot.cpython-312.pyc new file mode 100644 index 0000000..8267865 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/drawing/__pycache__/nx_pydot.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/drawing/__pycache__/nx_pylab.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/drawing/__pycache__/nx_pylab.cpython-312.pyc new file mode 100644 index 0000000..d3a9a8b Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/drawing/__pycache__/nx_pylab.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/drawing/layout.py b/.venv/lib/python3.12/site-packages/networkx/drawing/layout.py new file mode 100644 index 0000000..5a447d5 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/drawing/layout.py @@ -0,0 +1,2028 @@ +""" +****** +Layout +****** + +Node positioning algorithms for graph drawing. + +For `random_layout()` the possible resulting shape +is a square of side [0, scale] (default: [0, 1]) +Changing `center` shifts the layout by that amount. + +For the other layout routines, the extent is +[center - scale, center + scale] (default: [-1, 1]). + +Warning: Most layout routines have only been tested in 2-dimensions. + +""" + +import networkx as nx +from networkx.utils import np_random_state + +__all__ = [ + "bipartite_layout", + "circular_layout", + "forceatlas2_layout", + "kamada_kawai_layout", + "random_layout", + "rescale_layout", + "rescale_layout_dict", + "shell_layout", + "spring_layout", + "spectral_layout", + "planar_layout", + "fruchterman_reingold_layout", + "spiral_layout", + "multipartite_layout", + "bfs_layout", + "arf_layout", +] + + +def _process_params(G, center, dim): + # Some boilerplate code. + import numpy as np + + if not isinstance(G, nx.Graph): + empty_graph = nx.Graph() + empty_graph.add_nodes_from(G) + G = empty_graph + + if center is None: + center = np.zeros(dim) + else: + center = np.asarray(center) + + if len(center) != dim: + msg = "length of center coordinates must match dimension of layout" + raise ValueError(msg) + + return G, center + + +@np_random_state(3) +def random_layout(G, center=None, dim=2, seed=None, store_pos_as=None): + """Position nodes uniformly at random in the unit square. + + For every node, a position is generated by choosing each of dim + coordinates uniformly at random on the interval [0.0, 1.0). + + NumPy (http://scipy.org) is required for this function. + + Parameters + ---------- + G : NetworkX graph or list of nodes + A position will be assigned to every node in G. + + center : array-like or None + Coordinate pair around which to center the layout. + + dim : int + Dimension of layout. + + seed : int, RandomState instance or None optional (default=None) + Set the random state for deterministic node layouts. + If int, `seed` is the seed used by the random number generator, + if numpy.random.RandomState instance, `seed` is the random + number generator, + if None, the random number generator is the RandomState instance used + by numpy.random. + + store_pos_as : str, default None + If non-None, the position of each node will be stored on the graph as + an attribute with this string as its name, which can be accessed with + ``G.nodes[...][store_pos_as]``. The function still returns the dictionary. + + Returns + ------- + pos : dict + A dictionary of positions keyed by node + + Examples + -------- + >>> from pprint import pprint + >>> G = nx.lollipop_graph(4, 3) + >>> pos = nx.random_layout(G) + >>> # suppress the returned dict and store on the graph directly + >>> _ = nx.random_layout(G, seed=42, store_pos_as="pos") + >>> pprint(nx.get_node_attributes(G, "pos")) + {0: array([0.37454012, 0.9507143 ], dtype=float32), + 1: array([0.7319939, 0.5986585], dtype=float32), + 2: array([0.15601864, 0.15599452], dtype=float32), + 3: array([0.05808361, 0.8661761 ], dtype=float32), + 4: array([0.601115 , 0.7080726], dtype=float32), + 5: array([0.02058449, 0.96990985], dtype=float32), + 6: array([0.83244264, 0.21233912], dtype=float32)} + """ + import numpy as np + + G, center = _process_params(G, center, dim) + pos = seed.rand(len(G), dim) + center + pos = pos.astype(np.float32) + pos = dict(zip(G, pos)) + + if store_pos_as is not None: + nx.set_node_attributes(G, pos, store_pos_as) + return pos + + +def circular_layout(G, scale=1, center=None, dim=2, store_pos_as=None): + # dim=2 only + """Position nodes on a circle. + + Parameters + ---------- + G : NetworkX graph or list of nodes + A position will be assigned to every node in G. + + scale : number (default: 1) + Scale factor for positions. + + center : array-like or None + Coordinate pair around which to center the layout. + + dim : int + Dimension of layout. + If dim>2, the remaining dimensions are set to zero + in the returned positions. + If dim<2, a ValueError is raised. + + store_pos_as : str, default None + If non-None, the position of each node will be stored on the graph as + an attribute with this string as its name, which can be accessed with + ``G.nodes[...][store_pos_as]``. The function still returns the dictionary. + + Returns + ------- + pos : dict + A dictionary of positions keyed by node + + Raises + ------ + ValueError + If dim < 2 + + Examples + -------- + >>> from pprint import pprint + >>> G = nx.path_graph(4) + >>> pos = nx.circular_layout(G) + >>> # suppress the returned dict and store on the graph directly + >>> _ = nx.circular_layout(G, store_pos_as="pos") + >>> pprint(nx.get_node_attributes(G, "pos")) + {0: array([9.99999986e-01, 2.18556937e-08]), + 1: array([-3.57647606e-08, 1.00000000e+00]), + 2: array([-9.9999997e-01, -6.5567081e-08]), + 3: array([ 1.98715071e-08, -9.99999956e-01])} + + + Notes + ----- + This algorithm currently only works in two dimensions and does not + try to minimize edge crossings. + + """ + import numpy as np + + if dim < 2: + raise ValueError("cannot handle dimensions < 2") + + G, center = _process_params(G, center, dim) + + paddims = max(0, (dim - 2)) + + if len(G) == 0: + pos = {} + elif len(G) == 1: + pos = {nx.utils.arbitrary_element(G): center} + else: + # Discard the extra angle since it matches 0 radians. + theta = np.linspace(0, 1, len(G) + 1)[:-1] * 2 * np.pi + theta = theta.astype(np.float32) + pos = np.column_stack( + [np.cos(theta), np.sin(theta), np.zeros((len(G), paddims))] + ) + pos = rescale_layout(pos, scale=scale) + center + pos = dict(zip(G, pos)) + + if store_pos_as is not None: + nx.set_node_attributes(G, pos, store_pos_as) + + return pos + + +def shell_layout( + G, nlist=None, rotate=None, scale=1, center=None, dim=2, store_pos_as=None +): + """Position nodes in concentric circles. + + Parameters + ---------- + G : NetworkX graph or list of nodes + A position will be assigned to every node in G. + + nlist : list of lists + List of node lists for each shell. + + rotate : angle in radians (default=pi/len(nlist)) + Angle by which to rotate the starting position of each shell + relative to the starting position of the previous shell. + To recreate behavior before v2.5 use rotate=0. + + scale : number (default: 1) + Scale factor for positions. + + center : array-like or None + Coordinate pair around which to center the layout. + + dim : int + Dimension of layout, currently only dim=2 is supported. + Other dimension values result in a ValueError. + + store_pos_as : str, default None + If non-None, the position of each node will be stored on the graph as + an attribute with this string as its name, which can be accessed with + ``G.nodes[...][store_pos_as]``. The function still returns the dictionary. + + Returns + ------- + pos : dict + A dictionary of positions keyed by node + + Raises + ------ + ValueError + If dim != 2 + + Examples + -------- + >>> from pprint import pprint + >>> G = nx.path_graph(4) + >>> shells = [[0], [1, 2, 3]] + >>> pos = nx.shell_layout(G, shells) + >>> # suppress the returned dict and store on the graph directly + >>> _ = nx.shell_layout(G, shells, store_pos_as="pos") + >>> pprint(nx.get_node_attributes(G, "pos")) + {0: array([0., 0.]), + 1: array([-5.00000000e-01, -4.37113883e-08]), + 2: array([ 0.24999996, -0.43301272]), + 3: array([0.24999981, 0.43301281])} + + Notes + ----- + This algorithm currently only works in two dimensions and does not + try to minimize edge crossings. + + """ + import numpy as np + + if dim != 2: + raise ValueError("can only handle 2 dimensions") + + G, center = _process_params(G, center, dim) + + if len(G) == 0: + return {} + if len(G) == 1: + return {nx.utils.arbitrary_element(G): center} + + if nlist is None: + # draw the whole graph in one shell + nlist = [list(G)] + + radius_bump = scale / len(nlist) + + if len(nlist[0]) == 1: + # single node at center + radius = 0.0 + else: + # else start at r=1 + radius = radius_bump + + if rotate is None: + rotate = np.pi / len(nlist) + first_theta = rotate + npos = {} + for nodes in nlist: + # Discard the last angle (endpoint=False) since 2*pi matches 0 radians + theta = ( + np.linspace(0, 2 * np.pi, len(nodes), endpoint=False, dtype=np.float32) + + first_theta + ) + pos = radius * np.column_stack([np.cos(theta), np.sin(theta)]) + center + npos.update(zip(nodes, pos)) + radius += radius_bump + first_theta += rotate + + if store_pos_as is not None: + nx.set_node_attributes(G, npos, store_pos_as) + return npos + + +def bipartite_layout( + G, + nodes=None, + align="vertical", + scale=1, + center=None, + aspect_ratio=4 / 3, + store_pos_as=None, +): + """Position nodes in two straight lines. + + Parameters + ---------- + G : NetworkX graph or list of nodes + A position will be assigned to every node in G. + + nodes : collection of nodes + Nodes in one node set of the graph. This set will be placed on + left or top. If `None` (the default), a node set is chosen arbitrarily + if the graph if bipartite. + + align : string (default='vertical') + The alignment of nodes. Vertical or horizontal. + + scale : number (default: 1) + Scale factor for positions. + + center : array-like or None + Coordinate pair around which to center the layout. + + aspect_ratio : number (default=4/3): + The ratio of the width to the height of the layout. + + store_pos_as : str, default None + If non-None, the position of each node will be stored on the graph as + an attribute with this string as its name, which can be accessed with + ``G.nodes[...][store_pos_as]``. The function still returns the dictionary. + + Returns + ------- + pos : dict + A dictionary of positions keyed by node. + + Raises + ------ + NetworkXError + If ``nodes=None`` and `G` is not bipartite. + + Examples + -------- + >>> G = nx.complete_bipartite_graph(3, 3) + >>> pos = nx.bipartite_layout(G) + + The ordering of the layout (i.e. which nodes appear on the left/top) can + be specified with the `nodes` parameter: + + >>> top, bottom = nx.bipartite.sets(G) + >>> pos = nx.bipartite_layout(G, nodes=bottom) # "bottom" set appears on the left + + `store_pos_as` can be used to store the node positions for the computed layout + directly on the nodes: + + >>> _ = nx.bipartite_layout(G, nodes=bottom, store_pos_as="pos") + >>> from pprint import pprint + >>> pprint(nx.get_node_attributes(G, "pos")) + {0: array([ 1. , -0.75]), + 1: array([1., 0.]), + 2: array([1. , 0.75]), + 3: array([-1. , -0.75]), + 4: array([-1., 0.]), + 5: array([-1. , 0.75])} + + + The ``bipartite_layout`` function can be used with non-bipartite graphs + by explicitly specifying how the layout should be partitioned with `nodes`: + + >>> G = nx.complete_graph(5) # Non-bipartite + >>> pos = nx.bipartite_layout(G, nodes={0, 1, 2}) + + Notes + ----- + This algorithm currently only works in two dimensions and does not + try to minimize edge crossings. + + """ + + import numpy as np + + if align not in ("vertical", "horizontal"): + msg = "align must be either vertical or horizontal." + raise ValueError(msg) + + G, center = _process_params(G, center=center, dim=2) + if len(G) == 0: + return {} + + height = 1 + width = aspect_ratio * height + offset = (width / 2, height / 2) + + if nodes is None: + top, bottom = nx.bipartite.sets(G) + nodes = list(G) + else: + top = set(nodes) + bottom = set(G) - top + # Preserves backward-compatible node ordering in returned pos dict + nodes = list(top) + list(bottom) + + left_xs = np.repeat(0, len(top)) + right_xs = np.repeat(width, len(bottom)) + left_ys = np.linspace(0, height, len(top)) + right_ys = np.linspace(0, height, len(bottom)) + + top_pos = np.column_stack([left_xs, left_ys]) - offset + bottom_pos = np.column_stack([right_xs, right_ys]) - offset + + pos = np.concatenate([top_pos, bottom_pos]) + pos = rescale_layout(pos, scale=scale) + center + if align == "horizontal": + pos = pos[:, ::-1] # swap x and y coords + pos = dict(zip(nodes, pos)) + + if store_pos_as is not None: + nx.set_node_attributes(G, pos, store_pos_as) + + return pos + + +@np_random_state(10) +def spring_layout( + G, + k=None, + pos=None, + fixed=None, + iterations=50, + threshold=1e-4, + weight="weight", + scale=1, + center=None, + dim=2, + seed=None, + store_pos_as=None, + *, + method="auto", + gravity=1.0, +): + """Position nodes using Fruchterman-Reingold force-directed algorithm. + + The algorithm simulates a force-directed representation of the network + treating edges as springs holding nodes close, while treating nodes + as repelling objects, sometimes called an anti-gravity force. + Simulation continues until the positions are close to an equilibrium. + + There are some hard-coded values: minimal distance between + nodes (0.01) and "temperature" of 0.1 to ensure nodes don't fly away. + During the simulation, `k` helps determine the distance between nodes, + though `scale` and `center` determine the size and place after + rescaling occurs at the end of the simulation. + + Fixing some nodes doesn't allow them to move in the simulation. + It also turns off the rescaling feature at the simulation's end. + In addition, setting `scale` to `None` turns off rescaling. + + Parameters + ---------- + G : NetworkX graph or list of nodes + A position will be assigned to every node in G. + + k : float (default=None) + Optimal distance between nodes. If None the distance is set to + 1/sqrt(n) where n is the number of nodes. Increase this value + to move nodes farther apart. + + pos : dict or None optional (default=None) + Initial positions for nodes as a dictionary with node as keys + and values as a coordinate list or tuple. If None, then use + random initial positions. + + fixed : list or None optional (default=None) + Nodes to keep fixed at initial position. + Nodes not in ``G.nodes`` are ignored. + ValueError raised if `fixed` specified and `pos` not. + + iterations : int optional (default=50) + Maximum number of iterations taken + + threshold: float optional (default = 1e-4) + Threshold for relative error in node position changes. + The iteration stops if the error is below this threshold. + + weight : string or None optional (default='weight') + The edge attribute that holds the numerical value used for + the edge weight. Larger means a stronger attractive force. + If None, then all edge weights are 1. + + scale : number or None (default: 1) + Scale factor for positions. Not used unless `fixed is None`. + If scale is None, no rescaling is performed. + + center : array-like or None + Coordinate pair around which to center the layout. + Not used unless `fixed is None`. + + dim : int + Dimension of layout. + + seed : int, RandomState instance or None optional (default=None) + Used only for the initial positions in the algorithm. + Set the random state for deterministic node layouts. + If int, `seed` is the seed used by the random number generator, + if numpy.random.RandomState instance, `seed` is the random + number generator, + if None, the random number generator is the RandomState instance used + by numpy.random. + + store_pos_as : str, default None + If non-None, the position of each node will be stored on the graph as + an attribute with this string as its name, which can be accessed with + ``G.nodes[...][store_pos_as]``. The function still returns the dictionary. + + method : str optional (default='auto') + The method to compute the layout. + If 'force', the force-directed Fruchterman-Reingold algorithm [1]_ is used. + If 'energy', the energy-based optimization algorithm [2]_ is used with absolute + values of edge weights and gravitational forces acting on each connected component. + If 'auto', we use 'force' if ``len(G) < 500`` and 'energy' otherwise. + + gravity: float optional (default=1.0) + Used only for the method='energy'. + The positive coefficient of gravitational forces per connected component. + + Returns + ------- + pos : dict + A dictionary of positions keyed by node + + Examples + -------- + >>> from pprint import pprint + >>> G = nx.path_graph(4) + >>> pos = nx.spring_layout(G) + >>> # suppress the returned dict and store on the graph directly + >>> _ = nx.spring_layout(G, seed=123, store_pos_as="pos") + >>> pprint(nx.get_node_attributes(G, "pos")) + {0: array([-0.61520994, -1. ]), + 1: array([-0.21840965, -0.35501755]), + 2: array([0.21841264, 0.35502078]), + 3: array([0.61520696, 0.99999677])} + + # The same using longer but equivalent function name + >>> pos = nx.fruchterman_reingold_layout(G) + + References + ---------- + .. [1] Fruchterman, Thomas MJ, and Edward M. Reingold. + "Graph drawing by force-directed placement." + Software: Practice and experience 21, no. 11 (1991): 1129-1164. + http://dx.doi.org/10.1002/spe.4380211102 + .. [2] Hamaguchi, Hiroki, Naoki Marumo, and Akiko Takeda. + "Initial Placement for Fruchterman--Reingold Force Model With Coordinate Newton Direction." + arXiv preprint arXiv:2412.20317 (2024). + https://arxiv.org/abs/2412.20317 + """ + import numpy as np + + if method not in ("auto", "force", "energy"): + raise ValueError("the method must be either auto, force, or energy.") + if method == "auto": + method = "force" if len(G) < 500 else "energy" + + G, center = _process_params(G, center, dim) + + if fixed is not None: + if pos is None: + raise ValueError("nodes are fixed without positions given") + for node in fixed: + if node not in pos: + raise ValueError("nodes are fixed without positions given") + nfixed = {node: i for i, node in enumerate(G)} + fixed = np.asarray([nfixed[node] for node in fixed if node in nfixed]) + + if pos is not None: + # Determine size of existing domain to adjust initial positions + dom_size = max(coord for pos_tup in pos.values() for coord in pos_tup) + if dom_size == 0: + dom_size = 1 + pos_arr = seed.rand(len(G), dim) * dom_size + center + + for i, n in enumerate(G): + if n in pos: + pos_arr[i] = np.asarray(pos[n]) + else: + pos_arr = None + dom_size = 1 + + if len(G) == 0: + return {} + if len(G) == 1: + pos = {nx.utils.arbitrary_element(G.nodes()): center} + if store_pos_as is not None: + nx.set_node_attributes(G, pos, store_pos_as) + return pos + + # Sparse matrix + if len(G) >= 500 or method == "energy": + A = nx.to_scipy_sparse_array(G, weight=weight, dtype="f") + if k is None and fixed is not None: + # We must adjust k by domain size for layouts not near 1x1 + nnodes, _ = A.shape + k = dom_size / np.sqrt(nnodes) + pos = _sparse_fruchterman_reingold( + A, k, pos_arr, fixed, iterations, threshold, dim, seed, method, gravity + ) + else: + A = nx.to_numpy_array(G, weight=weight) + if k is None and fixed is not None: + # We must adjust k by domain size for layouts not near 1x1 + nnodes, _ = A.shape + k = dom_size / np.sqrt(nnodes) + pos = _fruchterman_reingold( + A, k, pos_arr, fixed, iterations, threshold, dim, seed + ) + if fixed is None and scale is not None: + pos = rescale_layout(pos, scale=scale) + center + pos = dict(zip(G, pos)) + + if store_pos_as is not None: + nx.set_node_attributes(G, pos, store_pos_as) + + return pos + + +fruchterman_reingold_layout = spring_layout + + +@np_random_state(7) +def _fruchterman_reingold( + A, k=None, pos=None, fixed=None, iterations=50, threshold=1e-4, dim=2, seed=None +): + # Position nodes in adjacency matrix A using Fruchterman-Reingold + # Entry point for NetworkX graph is fruchterman_reingold_layout() + import numpy as np + + try: + nnodes, _ = A.shape + except AttributeError as err: + msg = "fruchterman_reingold() takes an adjacency matrix as input" + raise nx.NetworkXError(msg) from err + + if pos is None: + # random initial positions + pos = np.asarray(seed.rand(nnodes, dim), dtype=A.dtype) + else: + # make sure positions are of same type as matrix + pos = pos.astype(A.dtype) + + # optimal distance between nodes + if k is None: + k = np.sqrt(1.0 / nnodes) + # the initial "temperature" is about .1 of domain area (=1x1) + # this is the largest step allowed in the dynamics. + # We need to calculate this in case our fixed positions force our domain + # to be much bigger than 1x1 + t = max(max(pos.T[0]) - min(pos.T[0]), max(pos.T[1]) - min(pos.T[1])) * 0.1 + # simple cooling scheme. + # linearly step down by dt on each iteration so last iteration is size dt. + dt = t / (iterations + 1) + delta = np.zeros((pos.shape[0], pos.shape[0], pos.shape[1]), dtype=A.dtype) + # the inscrutable (but fast) version + # this is still O(V^2) + # could use multilevel methods to speed this up significantly + for iteration in range(iterations): + # matrix of difference between points + delta = pos[:, np.newaxis, :] - pos[np.newaxis, :, :] + # distance between points + distance = np.linalg.norm(delta, axis=-1) + # enforce minimum distance of 0.01 + np.clip(distance, 0.01, None, out=distance) + # displacement "force" + displacement = np.einsum( + "ijk,ij->ik", delta, (k * k / distance**2 - A * distance / k) + ) + # update positions + length = np.linalg.norm(displacement, axis=-1) + length = np.where(length < 0.01, 0.1, length) + delta_pos = np.einsum("ij,i->ij", displacement, t / length) + if fixed is not None: + # don't change positions of fixed nodes + delta_pos[fixed] = 0.0 + pos += delta_pos + # cool temperature + t -= dt + if (np.linalg.norm(delta_pos) / nnodes) < threshold: + break + return pos + + +@np_random_state(7) +def _sparse_fruchterman_reingold( + A, + k=None, + pos=None, + fixed=None, + iterations=50, + threshold=1e-4, + dim=2, + seed=None, + method="energy", + gravity=1.0, +): + # Position nodes in adjacency matrix A using Fruchterman-Reingold + # Entry point for NetworkX graph is fruchterman_reingold_layout() + # Sparse version + import numpy as np + import scipy as sp + + try: + nnodes, _ = A.shape + except AttributeError as err: + msg = "fruchterman_reingold() takes an adjacency matrix as input" + raise nx.NetworkXError(msg) from err + + if pos is None: + # random initial positions + pos = np.asarray(seed.rand(nnodes, dim), dtype=A.dtype) + else: + # make sure positions are of same type as matrix + pos = pos.astype(A.dtype) + + # no fixed nodes + if fixed is None: + fixed = [] + + # optimal distance between nodes + if k is None: + k = np.sqrt(1.0 / nnodes) + + if method == "energy": + return _energy_fruchterman_reingold( + A, nnodes, k, pos, fixed, iterations, threshold, dim, gravity + ) + + # make sure we have a LIst of Lists representation + try: + A = A.tolil() + except AttributeError: + A = (sp.sparse.coo_array(A)).tolil() + + # the initial "temperature" is about .1 of domain area (=1x1) + # this is the largest step allowed in the dynamics. + t = max(max(pos.T[0]) - min(pos.T[0]), max(pos.T[1]) - min(pos.T[1])) * 0.1 + # simple cooling scheme. + # linearly step down by dt on each iteration so last iteration is size dt. + dt = t / (iterations + 1) + + displacement = np.zeros((dim, nnodes)) + for iteration in range(iterations): + displacement *= 0 + # loop over rows + for i in range(A.shape[0]): + if i in fixed: + continue + # difference between this row's node position and all others + delta = (pos[i] - pos).T + # distance between points + distance = np.sqrt((delta**2).sum(axis=0)) + # enforce minimum distance of 0.01 + distance = np.where(distance < 0.01, 0.01, distance) + # the adjacency matrix row + Ai = A.getrowview(i).toarray() # TODO: revisit w/ sparse 1D container + # displacement "force" + displacement[:, i] += ( + delta * (k * k / distance**2 - Ai * distance / k) + ).sum(axis=1) + # update positions + length = np.sqrt((displacement**2).sum(axis=0)) + length = np.where(length < 0.01, 0.1, length) + delta_pos = (displacement * t / length).T + pos += delta_pos + # cool temperature + t -= dt + if (np.linalg.norm(delta_pos) / nnodes) < threshold: + break + return pos + + +def _energy_fruchterman_reingold( + A, nnodes, k, pos, fixed, iterations, threshold, dim, gravity +): + # Entry point for NetworkX graph is fruchterman_reingold_layout() + # energy-based version + import numpy as np + import scipy as sp + + if gravity <= 0: + raise ValueError(f"the gravity must be positive.") + + # make sure we have a Compressed Sparse Row format + try: + A = A.tocsr() + except AttributeError: + A = sp.sparse.csr_array(A) + + # Take absolute values of edge weights and symmetrize it + A = np.abs(A) + A = (A + A.T) / 2 + + n_components, labels = sp.sparse.csgraph.connected_components(A, directed=False) + bincount = np.bincount(labels) + batchsize = 500 + + def _cost_FR(x): + pos = x.reshape((nnodes, dim)) + grad = np.zeros((nnodes, dim)) + cost = 0.0 + for l in range(0, nnodes, batchsize): + r = min(l + batchsize, nnodes) + # difference between selected node positions and all others + delta = pos[l:r, np.newaxis, :] - pos[np.newaxis, :, :] + # distance between points with a minimum distance of 1e-5 + distance2 = np.sum(delta * delta, axis=2) + distance2 = np.maximum(distance2, 1e-10) + distance = np.sqrt(distance2) + # temporary variable for calculation + Ad = A[l:r] * distance + # attractive forces and repulsive forces + grad[l:r] = 2 * np.einsum("ij,ijk->ik", Ad / k - k**2 / distance2, delta) + # integrated attractive forces + cost += np.sum(Ad * distance2) / (3 * k) + # integrated repulsive forces + cost -= k**2 * np.sum(np.log(distance)) + # gravitational force from the centroids of connected components to (0.5, ..., 0.5)^T + centers = np.zeros((n_components, dim)) + np.add.at(centers, labels, pos) + delta0 = centers / bincount[:, np.newaxis] - 0.5 + grad += gravity * delta0[labels] + cost += gravity * 0.5 * np.sum(bincount * np.linalg.norm(delta0, axis=1) ** 2) + # fix positions of fixed nodes + grad[fixed] = 0.0 + return cost, grad.ravel() + + # Optimization of the energy function by L-BFGS algorithm + options = {"maxiter": iterations, "gtol": threshold} + return sp.optimize.minimize( + _cost_FR, pos.ravel(), method="L-BFGS-B", jac=True, options=options + ).x.reshape((nnodes, dim)) + + +def kamada_kawai_layout( + G, + dist=None, + pos=None, + weight="weight", + scale=1, + center=None, + dim=2, + store_pos_as=None, +): + """Position nodes using Kamada-Kawai path-length cost-function. + + Parameters + ---------- + G : NetworkX graph or list of nodes + A position will be assigned to every node in G. + + dist : dict (default=None) + A two-level dictionary of optimal distances between nodes, + indexed by source and destination node. + If None, the distance is computed using shortest_path_length(). + + pos : dict or None optional (default=None) + Initial positions for nodes as a dictionary with node as keys + and values as a coordinate list or tuple. If None, then use + circular_layout() for dim >= 2 and a linear layout for dim == 1. + + weight : string or None optional (default='weight') + The edge attribute that holds the numerical value used for + the edge weight. If None, then all edge weights are 1. + + scale : number (default: 1) + Scale factor for positions. + + center : array-like or None + Coordinate pair around which to center the layout. + + dim : int + Dimension of layout. + + store_pos_as : str, default None + If non-None, the position of each node will be stored on the graph as + an attribute with this string as its name, which can be accessed with + ``G.nodes[...][store_pos_as]``. The function still returns the dictionary. + + Returns + ------- + pos : dict + A dictionary of positions keyed by node + + Examples + -------- + >>> from pprint import pprint + >>> G = nx.path_graph(4) + >>> pos = nx.kamada_kawai_layout(G) + >>> # suppress the returned dict and store on the graph directly + >>> _ = nx.kamada_kawai_layout(G, store_pos_as="pos") + >>> pprint(nx.get_node_attributes(G, "pos")) + {0: array([0.99996577, 0.99366857]), + 1: array([0.32913544, 0.33543827]), + 2: array([-0.33544334, -0.32910684]), + 3: array([-0.99365787, -1. ])} + """ + import numpy as np + + G, center = _process_params(G, center, dim) + nNodes = len(G) + if nNodes == 0: + return {} + + if dist is None: + dist = dict(nx.shortest_path_length(G, weight=weight)) + dist_mtx = 1e6 * np.ones((nNodes, nNodes)) + for row, nr in enumerate(G): + if nr not in dist: + continue + rdist = dist[nr] + for col, nc in enumerate(G): + if nc not in rdist: + continue + dist_mtx[row][col] = rdist[nc] + + if pos is None: + if dim >= 3: + pos = random_layout(G, dim=dim) + elif dim == 2: + pos = circular_layout(G, dim=dim) + else: + pos = dict(zip(G, np.linspace(0, 1, len(G)))) + pos_arr = np.array([pos[n] for n in G]) + + pos = _kamada_kawai_solve(dist_mtx, pos_arr, dim) + + pos = rescale_layout(pos, scale=scale) + center + pos = dict(zip(G, pos)) + + if store_pos_as is not None: + nx.set_node_attributes(G, pos, store_pos_as) + + return pos + + +def _kamada_kawai_solve(dist_mtx, pos_arr, dim): + # Anneal node locations based on the Kamada-Kawai cost-function, + # using the supplied matrix of preferred inter-node distances, + # and starting locations. + + import numpy as np + import scipy as sp + + meanwt = 1e-3 + costargs = (np, 1 / (dist_mtx + np.eye(dist_mtx.shape[0]) * 1e-3), meanwt, dim) + + optresult = sp.optimize.minimize( + _kamada_kawai_costfn, + pos_arr.ravel(), + method="L-BFGS-B", + args=costargs, + jac=True, + ) + + return optresult.x.reshape((-1, dim)) + + +def _kamada_kawai_costfn(pos_vec, np, invdist, meanweight, dim): + # Cost-function and gradient for Kamada-Kawai layout algorithm + nNodes = invdist.shape[0] + pos_arr = pos_vec.reshape((nNodes, dim)) + + delta = pos_arr[:, np.newaxis, :] - pos_arr[np.newaxis, :, :] + nodesep = np.linalg.norm(delta, axis=-1) + direction = np.einsum("ijk,ij->ijk", delta, 1 / (nodesep + np.eye(nNodes) * 1e-3)) + + offset = nodesep * invdist - 1.0 + offset[np.diag_indices(nNodes)] = 0 + + cost = 0.5 * np.sum(offset**2) + grad = np.einsum("ij,ij,ijk->ik", invdist, offset, direction) - np.einsum( + "ij,ij,ijk->jk", invdist, offset, direction + ) + + # Additional parabolic term to encourage mean position to be near origin: + sumpos = np.sum(pos_arr, axis=0) + cost += 0.5 * meanweight * np.sum(sumpos**2) + grad += meanweight * sumpos + + return (cost, grad.ravel()) + + +def spectral_layout(G, weight="weight", scale=1, center=None, dim=2, store_pos_as=None): + """Position nodes using the eigenvectors of the graph Laplacian. + + Using the unnormalized Laplacian, the layout shows possible clusters of + nodes which are an approximation of the ratio cut. If dim is the number of + dimensions then the positions are the entries of the dim eigenvectors + corresponding to the ascending eigenvalues starting from the second one. + + Parameters + ---------- + G : NetworkX graph or list of nodes + A position will be assigned to every node in G. + + weight : string or None optional (default='weight') + The edge attribute that holds the numerical value used for + the edge weight. If None, then all edge weights are 1. + + scale : number (default: 1) + Scale factor for positions. + + center : array-like or None + Coordinate pair around which to center the layout. + + dim : int + Dimension of layout. + + store_pos_as : str, default None + If non-None, the position of each node will be stored on the graph as + an attribute with this string as its name, which can be accessed with + ``G.nodes[...][store_pos_as]``. The function still returns the dictionary. + + Returns + ------- + pos : dict + A dictionary of positions keyed by node + + Examples + -------- + >>> from pprint import pprint + >>> G = nx.path_graph(4) + >>> pos = nx.spectral_layout(G) + >>> # suppress the returned dict and store on the graph directly + >>> _ = nx.spectral_layout(G, store_pos_as="pos") + >>> pprint(nx.get_node_attributes(G, "pos")) + {0: array([-1. , 0.76536686]), + 1: array([-0.41421356, -0.76536686]), + 2: array([ 0.41421356, -0.76536686]), + 3: array([1. , 0.76536686])} + + + Notes + ----- + Directed graphs will be considered as undirected graphs when + positioning the nodes. + + For larger graphs (>500 nodes) this will use the SciPy sparse + eigenvalue solver (ARPACK). + """ + # handle some special cases that break the eigensolvers + import numpy as np + + G, center = _process_params(G, center, dim) + + if len(G) <= 2: + if len(G) == 0: + pos = np.array([]) + elif len(G) == 1: + pos = np.array([center]) + else: + pos = np.array([np.zeros(dim), np.array(center) * 2.0]) + return dict(zip(G, pos)) + try: + # Sparse matrix + if len(G) < 500: # dense solver is faster for small graphs + raise ValueError + A = nx.to_scipy_sparse_array(G, weight=weight, dtype="d") + # Symmetrize directed graphs + if G.is_directed(): + A = A + np.transpose(A) + pos = _sparse_spectral(A, dim) + except (ImportError, ValueError): + # Dense matrix + A = nx.to_numpy_array(G, weight=weight) + # Symmetrize directed graphs + if G.is_directed(): + A += A.T + pos = _spectral(A, dim) + + pos = rescale_layout(pos, scale=scale) + center + pos = dict(zip(G, pos)) + + if store_pos_as is not None: + nx.set_node_attributes(G, pos, store_pos_as) + + return pos + + +def _spectral(A, dim=2): + # Input adjacency matrix A + # Uses dense eigenvalue solver from numpy + import numpy as np + + try: + nnodes, _ = A.shape + except AttributeError as err: + msg = "spectral() takes an adjacency matrix as input" + raise nx.NetworkXError(msg) from err + + # form Laplacian matrix where D is diagonal of degrees + D = np.identity(nnodes, dtype=A.dtype) * np.sum(A, axis=1) + L = D - A + + eigenvalues, eigenvectors = np.linalg.eig(L) + # sort and keep smallest nonzero + index = np.argsort(eigenvalues)[1 : dim + 1] # 0 index is zero eigenvalue + return np.real(eigenvectors[:, index]) + + +def _sparse_spectral(A, dim=2): + # Input adjacency matrix A + # Uses sparse eigenvalue solver from scipy + # Could use multilevel methods here, see Koren "On spectral graph drawing" + import numpy as np + import scipy as sp + + try: + nnodes, _ = A.shape + except AttributeError as err: + msg = "sparse_spectral() takes an adjacency matrix as input" + raise nx.NetworkXError(msg) from err + + # form Laplacian matrix + # TODO: Rm csr_array wrapper in favor of spdiags array constructor when available + D = sp.sparse.csr_array(sp.sparse.spdiags(A.sum(axis=1), 0, nnodes, nnodes)) + L = D - A + + k = dim + 1 + # number of Lanczos vectors for ARPACK solver.What is the right scaling? + ncv = max(2 * k + 1, int(np.sqrt(nnodes))) + # return smallest k eigenvalues and eigenvectors + eigenvalues, eigenvectors = sp.sparse.linalg.eigsh(L, k, which="SM", ncv=ncv) + index = np.argsort(eigenvalues)[1:k] # 0 index is zero eigenvalue + return np.real(eigenvectors[:, index]) + + +def planar_layout(G, scale=1, center=None, dim=2, store_pos_as=None): + """Position nodes without edge intersections. + + Parameters + ---------- + G : NetworkX graph or list of nodes + A position will be assigned to every node in G. If G is of type + nx.PlanarEmbedding, the positions are selected accordingly. + + scale : number (default: 1) + Scale factor for positions. + + center : array-like or None + Coordinate pair around which to center the layout. + + dim : int + Dimension of layout. + + store_pos_as : str, default None + If non-None, the position of each node will be stored on the graph as + an attribute with this string as its name, which can be accessed with + ``G.nodes[...][store_pos_as]``. The function still returns the dictionary. + + Returns + ------- + pos : dict + A dictionary of positions keyed by node + + Raises + ------ + NetworkXException + If G is not planar + + Examples + -------- + >>> from pprint import pprint + >>> G = nx.path_graph(4) + >>> pos = nx.planar_layout(G) + >>> # suppress the returned dict and store on the graph directly + >>> _ = nx.planar_layout(G, store_pos_as="pos") + >>> pprint(nx.get_node_attributes(G, "pos")) + {0: array([-0.77777778, -0.33333333]), + 1: array([ 1. , -0.33333333]), + 2: array([0.11111111, 0.55555556]), + 3: array([-0.33333333, 0.11111111])} + """ + import numpy as np + + if dim != 2: + raise ValueError("can only handle 2 dimensions") + + G, center = _process_params(G, center, dim) + + if len(G) == 0: + return {} + + if isinstance(G, nx.PlanarEmbedding): + embedding = G + else: + is_planar, embedding = nx.check_planarity(G) + if not is_planar: + raise nx.NetworkXException("G is not planar.") + pos = nx.combinatorial_embedding_to_pos(embedding) + node_list = list(embedding) + pos = np.vstack([pos[x] for x in node_list]) + pos = pos.astype(np.float64) + pos = rescale_layout(pos, scale=scale) + center + pos = dict(zip(node_list, pos)) + if store_pos_as is not None: + nx.set_node_attributes(G, pos, store_pos_as) + return pos + + +def spiral_layout( + G, + scale=1, + center=None, + dim=2, + resolution=0.35, + equidistant=False, + store_pos_as=None, +): + """Position nodes in a spiral layout. + + Parameters + ---------- + G : NetworkX graph or list of nodes + A position will be assigned to every node in G. + + scale : number (default: 1) + Scale factor for positions. + + center : array-like or None + Coordinate pair around which to center the layout. + + dim : int, default=2 + Dimension of layout, currently only dim=2 is supported. + Other dimension values result in a ValueError. + + resolution : float, default=0.35 + The compactness of the spiral layout returned. + Lower values result in more compressed spiral layouts. + + equidistant : bool, default=False + If True, nodes will be positioned equidistant from each other + by decreasing angle further from center. + If False, nodes will be positioned at equal angles + from each other by increasing separation further from center. + + store_pos_as : str, default None + If non-None, the position of each node will be stored on the graph as + an attribute with this string as its name, which can be accessed with + ``G.nodes[...][store_pos_as]``. The function still returns the dictionary. + + Returns + ------- + pos : dict + A dictionary of positions keyed by node + + Raises + ------ + ValueError + If dim != 2 + + Examples + -------- + >>> from pprint import pprint + >>> G = nx.path_graph(4) + >>> pos = nx.spiral_layout(G) + >>> nx.draw(G, pos=pos) + >>> # suppress the returned dict and store on the graph directly + >>> _ = nx.spiral_layout(G, store_pos_as="pos") + >>> pprint(nx.get_node_attributes(G, "pos")) + {0: array([-0.64153279, -0.68555087]), + 1: array([-0.03307913, -0.46344795]), + 2: array([0.34927952, 0.14899882]), + 3: array([0.32533239, 1. ])} + + Notes + ----- + This algorithm currently only works in two dimensions. + + """ + import numpy as np + + if dim != 2: + raise ValueError("can only handle 2 dimensions") + + G, center = _process_params(G, center, dim) + + if len(G) == 0: + return {} + if len(G) == 1: + pos = {nx.utils.arbitrary_element(G): center} + if store_pos_as is not None: + nx.set_node_attributes(G, pos, store_pos_as) + return pos + + pos = [] + if equidistant: + chord = 1 + step = 0.5 + theta = resolution + theta += chord / (step * theta) + for _ in range(len(G)): + r = step * theta + theta += chord / r + pos.append([np.cos(theta) * r, np.sin(theta) * r]) + + else: + dist = np.arange(len(G), dtype=float) + angle = resolution * dist + pos = np.transpose(dist * np.array([np.cos(angle), np.sin(angle)])) + + pos = rescale_layout(np.array(pos), scale=scale) + center + + pos = dict(zip(G, pos)) + + if store_pos_as is not None: + nx.set_node_attributes(G, pos, store_pos_as) + + return pos + + +def multipartite_layout( + G, subset_key="subset", align="vertical", scale=1, center=None, store_pos_as=None +): + """Position nodes in layers of straight lines. + + Parameters + ---------- + G : NetworkX graph or list of nodes + A position will be assigned to every node in G. + + subset_key : string or dict (default='subset') + If a string, the key of node data in G that holds the node subset. + If a dict, keyed by layer number to the nodes in that layer/subset. + + align : string (default='vertical') + The alignment of nodes. Vertical or horizontal. + + scale : number (default: 1) + Scale factor for positions. + + center : array-like or None + Coordinate pair around which to center the layout. + + store_pos_as : str, default None + If non-None, the position of each node will be stored on the graph as + an attribute with this string as its name, which can be accessed with + ``G.nodes[...][store_pos_as]``. The function still returns the dictionary. + + Returns + ------- + pos : dict + A dictionary of positions keyed by node. + + Examples + -------- + >>> G = nx.complete_multipartite_graph(28, 16, 10) + >>> pos = nx.multipartite_layout(G) + >>> # suppress the returned dict and store on the graph directly + >>> G = nx.complete_multipartite_graph(28, 16, 10) + >>> _ = nx.multipartite_layout(G, store_pos_as="pos") + + or use a dict to provide the layers of the layout + + >>> G = nx.Graph([(0, 1), (1, 2), (1, 3), (3, 4)]) + >>> layers = {"a": [0], "b": [1], "c": [2, 3], "d": [4]} + >>> pos = nx.multipartite_layout(G, subset_key=layers) + + Notes + ----- + This algorithm currently only works in two dimensions and does not + try to minimize edge crossings. + + Network does not need to be a complete multipartite graph. As long as nodes + have subset_key data, they will be placed in the corresponding layers. + + """ + import numpy as np + + if align not in ("vertical", "horizontal"): + msg = "align must be either vertical or horizontal." + raise ValueError(msg) + + G, center = _process_params(G, center=center, dim=2) + if len(G) == 0: + return {} + + try: + # check if subset_key is dict-like + if len(G) != sum(len(nodes) for nodes in subset_key.values()): + raise nx.NetworkXError( + "all nodes must be in one subset of `subset_key` dict" + ) + except AttributeError: + # subset_key is not a dict, hence a string + node_to_subset = nx.get_node_attributes(G, subset_key) + if len(node_to_subset) != len(G): + raise nx.NetworkXError( + f"all nodes need a subset_key attribute: {subset_key}" + ) + subset_key = nx.utils.groups(node_to_subset) + + # Sort by layer, if possible + try: + layers = dict(sorted(subset_key.items())) + except TypeError: + layers = subset_key + + pos = None + nodes = [] + width = len(layers) + for i, layer in enumerate(layers.values()): + height = len(layer) + xs = np.repeat(i, height) + ys = np.arange(0, height, dtype=float) + offset = ((width - 1) / 2, (height - 1) / 2) + layer_pos = np.column_stack([xs, ys]) - offset + if pos is None: + pos = layer_pos + else: + pos = np.concatenate([pos, layer_pos]) + nodes.extend(layer) + pos = rescale_layout(pos, scale=scale) + center + if align == "horizontal": + pos = pos[:, ::-1] # swap x and y coords + pos = dict(zip(nodes, pos)) + + if store_pos_as is not None: + nx.set_node_attributes(G, pos, store_pos_as) + + return pos + + +@np_random_state("seed") +def arf_layout( + G, + pos=None, + scaling=1, + a=1.1, + etol=1e-6, + dt=1e-3, + max_iter=1000, + *, + seed=None, + store_pos_as=None, +): + """Arf layout for networkx + + The attractive and repulsive forces (arf) layout [1] improves the spring + layout in three ways. First, it prevents congestion of highly connected nodes + due to strong forcing between nodes. Second, it utilizes the layout space + more effectively by preventing large gaps that spring layout tends to create. + Lastly, the arf layout represents symmetries in the layout better than the + default spring layout. + + Parameters + ---------- + G : nx.Graph or nx.DiGraph + Networkx graph. + pos : dict + Initial position of the nodes. If set to None a + random layout will be used. + scaling : float + Scales the radius of the circular layout space. + a : float + Strength of springs between connected nodes. Should be larger than 1. + The greater a, the clearer the separation of unconnected sub clusters. + etol : float + Gradient sum of spring forces must be larger than `etol` before successful + termination. + dt : float + Time step for force differential equation simulations. + max_iter : int + Max iterations before termination of the algorithm. + seed : int, RandomState instance or None optional (default=None) + Set the random state for deterministic node layouts. + If int, `seed` is the seed used by the random number generator, + if numpy.random.RandomState instance, `seed` is the random + number generator, + if None, the random number generator is the RandomState instance used + by numpy.random. + store_pos_as : str, default None + If non-None, the position of each node will be stored on the graph as + an attribute with this string as its name, which can be accessed with + ``G.nodes[...][store_pos_as]``. The function still returns the dictionary. + + Returns + ------- + pos : dict + A dictionary of positions keyed by node. + + Examples + -------- + >>> G = nx.grid_graph((5, 5)) + >>> pos = nx.arf_layout(G) + >>> # suppress the returned dict and store on the graph directly + >>> G = nx.grid_graph((5, 5)) + >>> _ = nx.arf_layout(G, store_pos_as="pos") + + References + ---------- + .. [1] "Self-Organization Applied to Dynamic Network Layout", M. Geipel, + International Journal of Modern Physics C, 2007, Vol 18, No 10, + pp. 1537-1549. + https://doi.org/10.1142/S0129183107011558 https://arxiv.org/abs/0704.1748 + """ + import warnings + + import numpy as np + + if a <= 1: + msg = "The parameter a should be larger than 1" + raise ValueError(msg) + + pos_tmp = nx.random_layout(G, seed=seed) + if pos is None: + pos = pos_tmp + else: + for node in G.nodes(): + if node not in pos: + pos[node] = pos_tmp[node].copy() + + # Initialize spring constant matrix + N = len(G) + # No nodes no computation + if N == 0: + return pos + + # init force of springs + K = np.ones((N, N)) - np.eye(N) + node_order = {node: i for i, node in enumerate(G)} + for x, y in G.edges(): + if x != y: + idx, jdx = (node_order[i] for i in (x, y)) + K[idx, jdx] = a + + # vectorize values + p = np.asarray(list(pos.values())) + + # equation 10 in [1] + rho = scaling * np.sqrt(N) + + # looping variables + error = etol + 1 + n_iter = 0 + while error > etol: + diff = p[:, np.newaxis] - p[np.newaxis] + A = np.linalg.norm(diff, axis=-1)[..., np.newaxis] + # attraction_force - repulsions force + # suppress nans due to division; caused by diagonal set to zero. + # Does not affect the computation due to nansum + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + change = K[..., np.newaxis] * diff - rho / A * diff + change = np.nansum(change, axis=0) + p += change * dt + + error = np.linalg.norm(change, axis=-1).sum() + if n_iter > max_iter: + break + n_iter += 1 + + pos = dict(zip(G.nodes(), p)) + + if store_pos_as is not None: + nx.set_node_attributes(G, pos, store_pos_as) + + return pos + + +@np_random_state("seed") +@nx._dispatchable(edge_attrs="weight", mutates_input={"store_pos_as": 15}) +def forceatlas2_layout( + G, + pos=None, + *, + max_iter=100, + jitter_tolerance=1.0, + scaling_ratio=2.0, + gravity=1.0, + distributed_action=False, + strong_gravity=False, + node_mass=None, + node_size=None, + weight=None, + dissuade_hubs=False, + linlog=False, + seed=None, + dim=2, + store_pos_as=None, +): + """Position nodes using the ForceAtlas2 force-directed layout algorithm. + + This function applies the ForceAtlas2 layout algorithm [1]_ to a NetworkX graph, + positioning the nodes in a way that visually represents the structure of the graph. + The algorithm uses physical simulation to minimize the energy of the system, + resulting in a more readable layout. + + Parameters + ---------- + G : nx.Graph + A NetworkX graph to be laid out. + pos : dict or None, optional + Initial positions of the nodes. If None, random initial positions are used. + max_iter : int (default: 100) + Number of iterations for the layout optimization. + jitter_tolerance : float (default: 1.0) + Controls the tolerance for adjusting the speed of layout generation. + scaling_ratio : float (default: 2.0) + Determines the scaling of attraction and repulsion forces. + gravity : float (default: 1.0) + Determines the amount of attraction on nodes to the center. Prevents islands + (i.e. weakly connected or disconnected parts of the graph) + from drifting away. + distributed_action : bool (default: False) + Distributes the attraction force evenly among nodes. + strong_gravity : bool (default: False) + Applies a strong gravitational pull towards the center. + node_mass : dict or None, optional + Maps nodes to their masses, influencing the attraction to other nodes. + node_size : dict or None, optional + Maps nodes to their sizes, preventing crowding by creating a halo effect. + weight : string or None, optional (default: None) + The edge attribute that holds the numerical value used for + the edge weight. If None, then all edge weights are 1. + dissuade_hubs : bool (default: False) + Prevents the clustering of hub nodes. + linlog : bool (default: False) + Uses logarithmic attraction instead of linear. + seed : int, RandomState instance or None optional (default=None) + Used only for the initial positions in the algorithm. + Set the random state for deterministic node layouts. + If int, `seed` is the seed used by the random number generator, + if numpy.random.RandomState instance, `seed` is the random + number generator, + if None, the random number generator is the RandomState instance used + by numpy.random. + dim : int (default: 2) + Sets the dimensions for the layout. Ignored if `pos` is provided. + store_pos_as : str, default None + If non-None, the position of each node will be stored on the graph as + an attribute with this string as its name, which can be accessed with + ``G.nodes[...][store_pos_as]``. The function still returns the dictionary. + + Examples + -------- + >>> import networkx as nx + >>> G = nx.florentine_families_graph() + >>> pos = nx.forceatlas2_layout(G) + >>> nx.draw(G, pos=pos) + >>> # suppress the returned dict and store on the graph directly + >>> pos = nx.forceatlas2_layout(G, store_pos_as="pos") + >>> _ = nx.forceatlas2_layout(G, store_pos_as="pos") + + References + ---------- + .. [1] Jacomy, M., Venturini, T., Heymann, S., & Bastian, M. (2014). + ForceAtlas2, a continuous graph layout algorithm for handy network + visualization designed for the Gephi software. PloS one, 9(6), e98679. + https://doi.org/10.1371/journal.pone.0098679 + """ + import numpy as np + + if len(G) == 0: + return {} + # parse optional pos positions + if pos is None: + pos = nx.random_layout(G, dim=dim, seed=seed) + pos_arr = np.array(list(pos.values())) + elif len(pos) == len(G): + pos_arr = np.array([pos[node].copy() for node in G]) + else: + # set random node pos within the initial pos values + pos_init = np.array(list(pos.values())) + max_pos = pos_init.max(axis=0) + min_pos = pos_init.min(axis=0) + dim = max_pos.size + pos_arr = min_pos + seed.rand(len(G), dim) * (max_pos - min_pos) + for idx, node in enumerate(G): + if node in pos: + pos_arr[idx] = pos[node].copy() + + mass = np.zeros(len(G)) + size = np.zeros(len(G)) + + # Only adjust for size when the users specifies size other than default (1) + adjust_sizes = False + if node_size is None: + node_size = {} + else: + adjust_sizes = True + + if node_mass is None: + node_mass = {} + + for idx, node in enumerate(G): + mass[idx] = node_mass.get(node, G.degree(node) + 1) + size[idx] = node_size.get(node, 1) + + n = len(G) + gravities = np.zeros((n, dim)) + attraction = np.zeros((n, dim)) + repulsion = np.zeros((n, dim)) + A = nx.to_numpy_array(G, weight=weight) + + def estimate_factor(n, swing, traction, speed, speed_efficiency, jitter_tolerance): + """Computes the scaling factor for the force in the ForceAtlas2 layout algorithm. + + This helper function adjusts the speed and + efficiency of the layout generation based on the + current state of the system, such as the number of + nodes, current swing, and traction forces. + + Parameters + ---------- + n : int + Number of nodes in the graph. + swing : float + The current swing, representing the oscillation of the nodes. + traction : float + The current traction force, representing the attraction between nodes. + speed : float + The current speed of the layout generation. + speed_efficiency : float + The efficiency of the current speed, influencing how fast the layout converges. + jitter_tolerance : float + The tolerance for jitter, affecting how much speed adjustment is allowed. + + Returns + ------- + tuple + A tuple containing the updated speed and speed efficiency. + + Notes + ----- + This function is a part of the ForceAtlas2 layout algorithm and is used to dynamically adjust the + layout parameters to achieve an optimal and stable visualization. + + """ + import numpy as np + + # estimate jitter + opt_jitter = 0.05 * np.sqrt(n) + min_jitter = np.sqrt(opt_jitter) + max_jitter = 10 + min_speed_efficiency = 0.05 + + other = min(max_jitter, opt_jitter * traction / n**2) + jitter = jitter_tolerance * max(min_jitter, other) + + if swing / traction > 2.0: + if speed_efficiency > min_speed_efficiency: + speed_efficiency *= 0.5 + jitter = max(jitter, jitter_tolerance) + if swing == 0: + target_speed = np.inf + else: + target_speed = jitter * speed_efficiency * traction / swing + + if swing > jitter * traction: + if speed_efficiency > min_speed_efficiency: + speed_efficiency *= 0.7 + elif speed < 1000: + speed_efficiency *= 1.3 + + max_rise = 0.5 + speed = speed + min(target_speed - speed, max_rise * speed) + return speed, speed_efficiency + + speed = 1 + speed_efficiency = 1 + swing = 1 + traction = 1 + for _ in range(max_iter): + # compute pairwise difference + diff = pos_arr[:, None] - pos_arr[None] + # compute pairwise distance + distance = np.linalg.norm(diff, axis=-1) + + # linear attraction + if linlog: + attraction = -np.log(1 + distance) / distance + np.fill_diagonal(attraction, 0) + attraction = np.einsum("ij, ij -> ij", attraction, A) + attraction = np.einsum("ijk, ij -> ik", diff, attraction) + + else: + attraction = -np.einsum("ijk, ij -> ik", diff, A) + + if distributed_action: + attraction /= mass[:, None] + + # repulsion + tmp = mass[:, None] @ mass[None] + if adjust_sizes: + distance += -size[:, None] - size[None] + + d2 = distance**2 + # remove self-interaction + np.fill_diagonal(tmp, 0) + np.fill_diagonal(d2, 1) + factor = (tmp / d2) * scaling_ratio + repulsion = np.einsum("ijk, ij -> ik", diff, factor) + + # gravity + pos_centered = pos_arr - np.mean(pos_arr, axis=0) + if strong_gravity: + gravities = -gravity * mass[:, None] * pos_centered + else: + # hide warnings for divide by zero. Then change nan to 0 + with np.errstate(divide="ignore", invalid="ignore"): + unit_vec = pos_centered / np.linalg.norm(pos_centered, axis=-1)[:, None] + unit_vec = np.nan_to_num(unit_vec, nan=0) + gravities = -gravity * mass[:, None] * unit_vec + + # total forces + update = attraction + repulsion + gravities + + # compute total swing and traction + swing += (mass * np.linalg.norm(pos_arr - update, axis=-1)).sum() + traction += (0.5 * mass * np.linalg.norm(pos_arr + update, axis=-1)).sum() + + speed, speed_efficiency = estimate_factor( + n, + swing, + traction, + speed, + speed_efficiency, + jitter_tolerance, + ) + + # update pos + if adjust_sizes: + df = np.linalg.norm(update, axis=-1) + swinging = mass * df + factor = 0.1 * speed / (1 + np.sqrt(speed * swinging)) + factor = np.minimum(factor * df, 10.0 * np.ones(df.shape)) / df + else: + swinging = mass * np.linalg.norm(update, axis=-1) + factor = speed / (1 + np.sqrt(speed * swinging)) + + factored_update = update * factor[:, None] + pos_arr += factored_update + if abs(factored_update).sum() < 1e-10: + break + + pos = dict(zip(G, pos_arr)) + if store_pos_as is not None: + nx.set_node_attributes(G, pos, store_pos_as) + + return pos + + +def rescale_layout(pos, scale=1): + """Returns scaled position array to (-scale, scale) in all axes. + + The function acts on NumPy arrays which hold position information. + Each position is one row of the array. The dimension of the space + equals the number of columns. Each coordinate in one column. + + To rescale, the mean (center) is subtracted from each axis separately. + Then all values are scaled so that the largest magnitude value + from all axes equals `scale` (thus, the aspect ratio is preserved). + The resulting NumPy Array is returned (order of rows unchanged). + + Parameters + ---------- + pos : numpy array + positions to be scaled. Each row is a position. + + scale : number (default: 1) + The size of the resulting extent in all directions. + + attribute : str, default None + If non-None, the position of each node will be stored on the graph as + an attribute named `attribute` which can be accessed with + `G.nodes[...][attribute]`. The function still returns the dictionary. + + Returns + ------- + pos : numpy array + scaled positions. Each row is a position. + + See Also + -------- + rescale_layout_dict + """ + import numpy as np + + # Find max length over all dimensions + pos -= pos.mean(axis=0) + lim = np.abs(pos).max() # max coordinate for all axes + # rescale to (-scale, scale) in all directions, preserves aspect + if lim > 0: + pos *= scale / lim + return pos + + +def rescale_layout_dict(pos, scale=1): + """Return a dictionary of scaled positions keyed by node + + Parameters + ---------- + pos : A dictionary of positions keyed by node + + scale : number (default: 1) + The size of the resulting extent in all directions. + + Returns + ------- + pos : A dictionary of positions keyed by node + + Examples + -------- + >>> import numpy as np + >>> pos = {0: np.array((0, 0)), 1: np.array((1, 1)), 2: np.array((0.5, 0.5))} + >>> nx.rescale_layout_dict(pos) + {0: array([-1., -1.]), 1: array([1., 1.]), 2: array([0., 0.])} + + >>> pos = {0: np.array((0, 0)), 1: np.array((-1, 1)), 2: np.array((-0.5, 0.5))} + >>> nx.rescale_layout_dict(pos, scale=2) + {0: array([ 2., -2.]), 1: array([-2., 2.]), 2: array([0., 0.])} + + See Also + -------- + rescale_layout + """ + import numpy as np + + if not pos: # empty_graph + return {} + pos_v = np.array(list(pos.values())) + pos_v = rescale_layout(pos_v, scale=scale) + return dict(zip(pos, pos_v)) + + +def bfs_layout(G, start, *, align="vertical", scale=1, center=None, store_pos_as=None): + """Position nodes according to breadth-first search algorithm. + + Parameters + ---------- + G : NetworkX graph + A position will be assigned to every node in G. + + start : node in `G` + Starting node for bfs + + center : array-like or None + Coordinate pair around which to center the layout. + + store_pos_as : str, default None + If non-None, the position of each node will be stored on the graph as + an attribute with this string as its name, which can be accessed with + ``G.nodes[...][store_pos_as]``. The function still returns the dictionary. + + Returns + ------- + pos : dict + A dictionary of positions keyed by node. + + Examples + -------- + >>> from pprint import pprint + >>> G = nx.path_graph(4) + >>> pos = nx.bfs_layout(G, 0) + >>> # suppress the returned dict and store on the graph directly + >>> _ = nx.bfs_layout(G, 0, store_pos_as="pos") + >>> pprint(nx.get_node_attributes(G, "pos")) + {0: array([-1., 0.]), + 1: array([-0.33333333, 0. ]), + 2: array([0.33333333, 0. ]), + 3: array([1., 0.])} + + + + Notes + ----- + This algorithm currently only works in two dimensions and does not + try to minimize edge crossings. + + """ + G, center = _process_params(G, center, 2) + + # Compute layers with BFS + layers = dict(enumerate(nx.bfs_layers(G, start))) + + if len(G) != sum(len(nodes) for nodes in layers.values()): + raise nx.NetworkXError( + "bfs_layout didn't include all nodes. Perhaps use input graph:\n" + " G.subgraph(nx.node_connected_component(G, start))" + ) + + # Compute node positions with multipartite_layout + pos = multipartite_layout( + G, subset_key=layers, align=align, scale=scale, center=center + ) + + if store_pos_as is not None: + nx.set_node_attributes(G, pos, store_pos_as) + + return pos diff --git a/.venv/lib/python3.12/site-packages/networkx/drawing/nx_agraph.py b/.venv/lib/python3.12/site-packages/networkx/drawing/nx_agraph.py new file mode 100644 index 0000000..8f1a7be --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/drawing/nx_agraph.py @@ -0,0 +1,464 @@ +""" +*************** +Graphviz AGraph +*************** + +Interface to pygraphviz AGraph class. + +Examples +-------- +>>> G = nx.complete_graph(5) +>>> A = nx.nx_agraph.to_agraph(G) +>>> H = nx.nx_agraph.from_agraph(A) + +See Also +-------- + - Pygraphviz: http://pygraphviz.github.io/ + - Graphviz: https://www.graphviz.org + - DOT Language: http://www.graphviz.org/doc/info/lang.html +""" + +import tempfile + +import networkx as nx + +__all__ = [ + "from_agraph", + "to_agraph", + "write_dot", + "read_dot", + "graphviz_layout", + "pygraphviz_layout", + "view_pygraphviz", +] + + +@nx._dispatchable(graphs=None, returns_graph=True) +def from_agraph(A, create_using=None): + """Returns a NetworkX Graph or DiGraph from a PyGraphviz graph. + + Parameters + ---------- + A : PyGraphviz AGraph + A graph created with PyGraphviz + + create_using : NetworkX graph constructor, optional (default=None) + Graph type to create. If graph instance, then cleared before populated. + If `None`, then the appropriate Graph type is inferred from `A`. + + Examples + -------- + >>> K5 = nx.complete_graph(5) + >>> A = nx.nx_agraph.to_agraph(K5) + >>> G = nx.nx_agraph.from_agraph(A) + + Notes + ----- + The Graph G will have a dictionary G.graph_attr containing + the default graphviz attributes for graphs, nodes and edges. + + Default node attributes will be in the dictionary G.node_attr + which is keyed by node. + + Edge attributes will be returned as edge data in G. With + edge_attr=False the edge data will be the Graphviz edge weight + attribute or the value 1 if no edge weight attribute is found. + + """ + if create_using is None: + if A.is_directed(): + if A.is_strict(): + create_using = nx.DiGraph + else: + create_using = nx.MultiDiGraph + else: + if A.is_strict(): + create_using = nx.Graph + else: + create_using = nx.MultiGraph + + # assign defaults + N = nx.empty_graph(0, create_using) + if A.name is not None: + N.name = A.name + + # add graph attributes + N.graph.update(A.graph_attr) + + # add nodes, attributes to N.node_attr + for n in A.nodes(): + str_attr = {str(k): v for k, v in n.attr.items()} + N.add_node(str(n), **str_attr) + + # add edges, assign edge data as dictionary of attributes + for e in A.edges(): + u, v = str(e[0]), str(e[1]) + attr = dict(e.attr) + str_attr = {str(k): v for k, v in attr.items()} + if not N.is_multigraph(): + if e.name is not None: + str_attr["key"] = e.name + N.add_edge(u, v, **str_attr) + else: + N.add_edge(u, v, key=e.name, **str_attr) + + # add default attributes for graph, nodes, and edges + # hang them on N.graph_attr + N.graph["graph"] = dict(A.graph_attr) + N.graph["node"] = dict(A.node_attr) + N.graph["edge"] = dict(A.edge_attr) + return N + + +def to_agraph(N): + """Returns a pygraphviz graph from a NetworkX graph N. + + Parameters + ---------- + N : NetworkX graph + A graph created with NetworkX + + Examples + -------- + >>> K5 = nx.complete_graph(5) + >>> A = nx.nx_agraph.to_agraph(K5) + + Notes + ----- + If N has an dict N.graph_attr an attempt will be made first + to copy properties attached to the graph (see from_agraph) + and then updated with the calling arguments if any. + + """ + try: + import pygraphviz + except ImportError as err: + raise ImportError("requires pygraphviz http://pygraphviz.github.io/") from err + directed = N.is_directed() + strict = nx.number_of_selfloops(N) == 0 and not N.is_multigraph() + + A = pygraphviz.AGraph(name=N.name, strict=strict, directed=directed) + + # default graph attributes + A.graph_attr.update(N.graph.get("graph", {})) + A.node_attr.update(N.graph.get("node", {})) + A.edge_attr.update(N.graph.get("edge", {})) + + A.graph_attr.update( + (k, v) for k, v in N.graph.items() if k not in ("graph", "node", "edge") + ) + + # add nodes + for n, nodedata in N.nodes(data=True): + A.add_node(n) + # Add node data + a = A.get_node(n) + for key, val in nodedata.items(): + if key == "pos": + a.attr["pos"] = f"{val[0]},{val[1]}!" + else: + a.attr[key] = str(val) + + # loop over edges + if N.is_multigraph(): + for u, v, key, edgedata in N.edges(data=True, keys=True): + str_edgedata = {k: str(v) for k, v in edgedata.items() if k != "key"} + A.add_edge(u, v, key=str(key)) + # Add edge data + a = A.get_edge(u, v) + a.attr.update(str_edgedata) + + else: + for u, v, edgedata in N.edges(data=True): + str_edgedata = {k: str(v) for k, v in edgedata.items()} + A.add_edge(u, v) + # Add edge data + a = A.get_edge(u, v) + a.attr.update(str_edgedata) + + return A + + +def write_dot(G, path): + """Write NetworkX graph G to Graphviz dot format on path. + + Parameters + ---------- + G : graph + A networkx graph + path : filename + Filename or file handle to write + + Notes + ----- + To use a specific graph layout, call ``A.layout`` prior to `write_dot`. + Note that some graphviz layouts are not guaranteed to be deterministic, + see https://gitlab.com/graphviz/graphviz/-/issues/1767 for more info. + """ + A = to_agraph(G) + A.write(path) + A.clear() + return + + +@nx._dispatchable(name="agraph_read_dot", graphs=None, returns_graph=True) +def read_dot(path): + """Returns a NetworkX graph from a dot file on path. + + Parameters + ---------- + path : file or string + File name or file handle to read. + """ + try: + import pygraphviz + except ImportError as err: + raise ImportError( + "read_dot() requires pygraphviz http://pygraphviz.github.io/" + ) from err + A = pygraphviz.AGraph(file=path) + gr = from_agraph(A) + A.clear() + return gr + + +def graphviz_layout(G, prog="neato", root=None, args=""): + """Create node positions for G using Graphviz. + + Parameters + ---------- + G : NetworkX graph + A graph created with NetworkX + prog : string + Name of Graphviz layout program + root : string, optional + Root node for twopi layout + args : string, optional + Extra arguments to Graphviz layout program + + Returns + ------- + Dictionary of x, y, positions keyed by node. + + Examples + -------- + >>> G = nx.petersen_graph() + >>> pos = nx.nx_agraph.graphviz_layout(G) + >>> pos = nx.nx_agraph.graphviz_layout(G, prog="dot") + + Notes + ----- + This is a wrapper for pygraphviz_layout. + + Note that some graphviz layouts are not guaranteed to be deterministic, + see https://gitlab.com/graphviz/graphviz/-/issues/1767 for more info. + """ + return pygraphviz_layout(G, prog=prog, root=root, args=args) + + +def pygraphviz_layout(G, prog="neato", root=None, args=""): + """Create node positions for G using Graphviz. + + Parameters + ---------- + G : NetworkX graph + A graph created with NetworkX + prog : string + Name of Graphviz layout program + root : string, optional + Root node for twopi layout + args : string, optional + Extra arguments to Graphviz layout program + + Returns + ------- + node_pos : dict + Dictionary of x, y, positions keyed by node. + + Examples + -------- + >>> G = nx.petersen_graph() + >>> pos = nx.nx_agraph.graphviz_layout(G) + >>> pos = nx.nx_agraph.graphviz_layout(G, prog="dot") + + Notes + ----- + If you use complex node objects, they may have the same string + representation and GraphViz could treat them as the same node. + The layout may assign both nodes a single location. See Issue #1568 + If this occurs in your case, consider relabeling the nodes just + for the layout computation using something similar to:: + + >>> H = nx.convert_node_labels_to_integers(G, label_attribute="node_label") + >>> H_layout = nx.nx_agraph.pygraphviz_layout(H, prog="dot") + >>> G_layout = {H.nodes[n]["node_label"]: p for n, p in H_layout.items()} + + Note that some graphviz layouts are not guaranteed to be deterministic, + see https://gitlab.com/graphviz/graphviz/-/issues/1767 for more info. + """ + try: + import pygraphviz + except ImportError as err: + raise ImportError("requires pygraphviz http://pygraphviz.github.io/") from err + if root is not None: + args += f"-Groot={root}" + A = to_agraph(G) + A.layout(prog=prog, args=args) + node_pos = {} + for n in G: + node = pygraphviz.Node(A, n) + try: + xs = node.attr["pos"].split(",") + node_pos[n] = tuple(float(x) for x in xs) + except: + print("no position for node", n) + node_pos[n] = (0.0, 0.0) + return node_pos + + +@nx.utils.open_file(5, "w+b") +def view_pygraphviz( + G, edgelabel=None, prog="dot", args="", suffix="", path=None, show=True +): + """Views the graph G using the specified layout algorithm. + + Parameters + ---------- + G : NetworkX graph + The machine to draw. + edgelabel : str, callable, None + If a string, then it specifies the edge attribute to be displayed + on the edge labels. If a callable, then it is called for each + edge and it should return the string to be displayed on the edges. + The function signature of `edgelabel` should be edgelabel(data), + where `data` is the edge attribute dictionary. + prog : string + Name of Graphviz layout program. + args : str + Additional arguments to pass to the Graphviz layout program. + suffix : str + If `filename` is None, we save to a temporary file. The value of + `suffix` will appear at the tail end of the temporary filename. + path : str, None + The filename used to save the image. If None, save to a temporary + file. File formats are the same as those from pygraphviz.agraph.draw. + Filenames ending in .gz or .bz2 will be compressed. + show : bool, default = True + Whether to display the graph with :mod:`PIL.Image.show`, + default is `True`. If `False`, the rendered graph is still available + at `path`. + + Returns + ------- + path : str + The filename of the generated image. + A : PyGraphviz graph + The PyGraphviz graph instance used to generate the image. + + Notes + ----- + If this function is called in succession too quickly, sometimes the + image is not displayed. So you might consider time.sleep(.5) between + calls if you experience problems. + + Note that some graphviz layouts are not guaranteed to be deterministic, + see https://gitlab.com/graphviz/graphviz/-/issues/1767 for more info. + + """ + if not len(G): + raise nx.NetworkXException("An empty graph cannot be drawn.") + + # If we are providing default values for graphviz, these must be set + # before any nodes or edges are added to the PyGraphviz graph object. + # The reason for this is that default values only affect incoming objects. + # If you change the default values after the objects have been added, + # then they inherit no value and are set only if explicitly set. + + # to_agraph() uses these values. + attrs = ["edge", "node", "graph"] + for attr in attrs: + if attr not in G.graph: + G.graph[attr] = {} + + # These are the default values. + edge_attrs = {"fontsize": "10"} + node_attrs = { + "style": "filled", + "fillcolor": "#0000FF40", + "height": "0.75", + "width": "0.75", + "shape": "circle", + } + graph_attrs = {} + + def update_attrs(which, attrs): + # Update graph attributes. Return list of those which were added. + added = [] + for k, v in attrs.items(): + if k not in G.graph[which]: + G.graph[which][k] = v + added.append(k) + + def clean_attrs(which, added): + # Remove added attributes + for attr in added: + del G.graph[which][attr] + if not G.graph[which]: + del G.graph[which] + + # Update all default values + update_attrs("edge", edge_attrs) + update_attrs("node", node_attrs) + update_attrs("graph", graph_attrs) + + # Convert to agraph, so we inherit default values + A = to_agraph(G) + + # Remove the default values we added to the original graph. + clean_attrs("edge", edge_attrs) + clean_attrs("node", node_attrs) + clean_attrs("graph", graph_attrs) + + # If the user passed in an edgelabel, we update the labels for all edges. + if edgelabel is not None: + if not callable(edgelabel): + + def func(data): + return "".join([" ", str(data[edgelabel]), " "]) + + else: + func = edgelabel + + # update all the edge labels + if G.is_multigraph(): + for u, v, key, data in G.edges(keys=True, data=True): + # PyGraphviz doesn't convert the key to a string. See #339 + edge = A.get_edge(u, v, str(key)) + edge.attr["label"] = str(func(data)) + else: + for u, v, data in G.edges(data=True): + edge = A.get_edge(u, v) + edge.attr["label"] = str(func(data)) + + if path is None: + ext = "png" + if suffix: + suffix = f"_{suffix}.{ext}" + else: + suffix = f".{ext}" + path = tempfile.NamedTemporaryFile(suffix=suffix, delete=False) + else: + # Assume the decorator worked and it is a file-object. + pass + + # Write graph to file + A.draw(path=path, format=None, prog=prog, args=args) + path.close() + + # Show graph in a new window (depends on platform configuration) + if show: + from PIL import Image + + Image.open(path.name).show() + + return path.name, A diff --git a/.venv/lib/python3.12/site-packages/networkx/drawing/nx_latex.py b/.venv/lib/python3.12/site-packages/networkx/drawing/nx_latex.py new file mode 100644 index 0000000..677def8 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/drawing/nx_latex.py @@ -0,0 +1,570 @@ +r""" +***** +LaTeX +***** + +Export NetworkX graphs in LaTeX format using the TikZ library within TeX/LaTeX. +Usually, you will want the drawing to appear in a figure environment so +you use ``to_latex(G, caption="A caption")``. If you want the raw +drawing commands without a figure environment use :func:`to_latex_raw`. +And if you want to write to a file instead of just returning the latex +code as a string, use ``write_latex(G, "filename.tex", caption="A caption")``. + +To construct a figure with subfigures for each graph to be shown, provide +``to_latex`` or ``write_latex`` a list of graphs, a list of subcaptions, +and a number of rows of subfigures inside the figure. + +To be able to refer to the figures or subfigures in latex using ``\\ref``, +the keyword ``latex_label`` is available for figures and `sub_labels` for +a list of labels, one for each subfigure. + +We intend to eventually provide an interface to the TikZ Graph +features which include e.g. layout algorithms. + +Let us know via github what you'd like to see available, or better yet +give us some code to do it, or even better make a github pull request +to add the feature. + +The TikZ approach +================= +Drawing options can be stored on the graph as node/edge attributes, or +can be provided as dicts keyed by node/edge to a string of the options +for that node/edge. Similarly a label can be shown for each node/edge +by specifying the labels as graph node/edge attributes or by providing +a dict keyed by node/edge to the text to be written for that node/edge. + +Options for the tikzpicture environment (e.g. "[scale=2]") can be provided +via a keyword argument. Similarly default node and edge options can be +provided through keywords arguments. The default node options are applied +to the single TikZ "path" that draws all nodes (and no edges). The default edge +options are applied to a TikZ "scope" which contains a path for each edge. + +Examples +======== +>>> G = nx.path_graph(3) +>>> nx.write_latex(G, "just_my_figure.tex", as_document=True) +>>> nx.write_latex(G, "my_figure.tex", caption="A path graph", latex_label="fig1") +>>> latex_code = nx.to_latex(G) # a string rather than a file + +You can change many features of the nodes and edges. + +>>> G = nx.path_graph(4, create_using=nx.DiGraph) +>>> pos = {n: (n, n) for n in G} # nodes set on a line + +>>> G.nodes[0]["style"] = "blue" +>>> G.nodes[2]["style"] = "line width=3,draw" +>>> G.nodes[3]["label"] = "Stop" +>>> G.edges[(0, 1)]["label"] = "1st Step" +>>> G.edges[(0, 1)]["label_opts"] = "near start" +>>> G.edges[(1, 2)]["style"] = "line width=3" +>>> G.edges[(1, 2)]["label"] = "2nd Step" +>>> G.edges[(2, 3)]["style"] = "green" +>>> G.edges[(2, 3)]["label"] = "3rd Step" +>>> G.edges[(2, 3)]["label_opts"] = "near end" + +>>> nx.write_latex(G, "latex_graph.tex", pos=pos, as_document=True) + +Then compile the LaTeX using something like ``pdflatex latex_graph.tex`` +and view the pdf file created: ``latex_graph.pdf``. + +If you want **subfigures** each containing one graph, you can input a list of graphs. + +>>> H1 = nx.path_graph(4) +>>> H2 = nx.complete_graph(4) +>>> H3 = nx.path_graph(8) +>>> H4 = nx.complete_graph(8) +>>> graphs = [H1, H2, H3, H4] +>>> caps = ["Path 4", "Complete graph 4", "Path 8", "Complete graph 8"] +>>> lbls = ["fig2a", "fig2b", "fig2c", "fig2d"] +>>> nx.write_latex(graphs, "subfigs.tex", n_rows=2, sub_captions=caps, sub_labels=lbls) +>>> latex_code = nx.to_latex(graphs, n_rows=2, sub_captions=caps, sub_labels=lbls) + +>>> node_color = {0: "red", 1: "orange", 2: "blue", 3: "gray!90"} +>>> edge_width = {e: "line width=1.5" for e in H3.edges} +>>> pos = nx.circular_layout(H3) +>>> latex_code = nx.to_latex(H3, pos, node_options=node_color, edge_options=edge_width) +>>> print(latex_code) +\documentclass{report} +\usepackage{tikz} +\usepackage{subcaption} + +\begin{document} +\begin{figure} + \begin{tikzpicture} + \draw + (1.0, 0.0) node[red] (0){0} + (0.707, 0.707) node[orange] (1){1} + (-0.0, 1.0) node[blue] (2){2} + (-0.707, 0.707) node[gray!90] (3){3} + (-1.0, -0.0) node (4){4} + (-0.707, -0.707) node (5){5} + (0.0, -1.0) node (6){6} + (0.707, -0.707) node (7){7}; + \begin{scope}[-] + \draw[line width=1.5] (0) to (1); + \draw[line width=1.5] (1) to (2); + \draw[line width=1.5] (2) to (3); + \draw[line width=1.5] (3) to (4); + \draw[line width=1.5] (4) to (5); + \draw[line width=1.5] (5) to (6); + \draw[line width=1.5] (6) to (7); + \end{scope} + \end{tikzpicture} +\end{figure} +\end{document} + +Notes +----- +If you want to change the preamble/postamble of the figure/document/subfigure +environment, use the keyword arguments: `figure_wrapper`, `document_wrapper`, +`subfigure_wrapper`. The default values are stored in private variables +e.g. ``nx.nx_layout._DOCUMENT_WRAPPER`` + +References +---------- +TikZ: https://tikz.dev/ + +TikZ options details: https://tikz.dev/tikz-actions +""" + +import networkx as nx + +__all__ = [ + "to_latex_raw", + "to_latex", + "write_latex", +] + + +@nx.utils.not_implemented_for("multigraph") +def to_latex_raw( + G, + pos="pos", + tikz_options="", + default_node_options="", + node_options="node_options", + node_label="label", + default_edge_options="", + edge_options="edge_options", + edge_label="label", + edge_label_options="edge_label_options", +): + """Return a string of the LaTeX/TikZ code to draw `G` + + This function produces just the code for the tikzpicture + without any enclosing environment. + + Parameters + ========== + G : NetworkX graph + The NetworkX graph to be drawn + pos : string or dict (default "pos") + The name of the node attribute on `G` that holds the position of each node. + Positions can be sequences of length 2 with numbers for (x,y) coordinates. + They can also be strings to denote positions in TikZ style, such as (x, y) + or (angle:radius). + If a dict, it should be keyed by node to a position. + If an empty dict, a circular layout is computed by TikZ. + tikz_options : string + The tikzpicture options description defining the options for the picture. + Often large scale options like `[scale=2]`. + default_node_options : string + The draw options for a path of nodes. Individual node options override these. + node_options : string or dict + The name of the node attribute on `G` that holds the options for each node. + Or a dict keyed by node to a string holding the options for that node. + node_label : string or dict + The name of the node attribute on `G` that holds the node label (text) + displayed for each node. If the attribute is "" or not present, the node + itself is drawn as a string. LaTeX processing such as ``"$A_1$"`` is allowed. + Or a dict keyed by node to a string holding the label for that node. + default_edge_options : string + The options for the scope drawing all edges. The default is "[-]" for + undirected graphs and "[->]" for directed graphs. + edge_options : string or dict + The name of the edge attribute on `G` that holds the options for each edge. + If the edge is a self-loop and ``"loop" not in edge_options`` the option + "loop," is added to the options for the self-loop edge. Hence you can + use "[loop above]" explicitly, but the default is "[loop]". + Or a dict keyed by edge to a string holding the options for that edge. + edge_label : string or dict + The name of the edge attribute on `G` that holds the edge label (text) + displayed for each edge. If the attribute is "" or not present, no edge + label is drawn. + Or a dict keyed by edge to a string holding the label for that edge. + edge_label_options : string or dict + The name of the edge attribute on `G` that holds the label options for + each edge. For example, "[sloped,above,blue]". The default is no options. + Or a dict keyed by edge to a string holding the label options for that edge. + + Returns + ======= + latex_code : string + The text string which draws the desired graph(s) when compiled by LaTeX. + + See Also + ======== + to_latex + write_latex + """ + i4 = "\n " + i8 = "\n " + + # set up position dict + # TODO allow pos to be None and use a nice TikZ default + if not isinstance(pos, dict): + pos = nx.get_node_attributes(G, pos) + if not pos: + # circular layout with radius 2 + pos = {n: f"({round(360.0 * i / len(G), 3)}:2)" for i, n in enumerate(G)} + for node in G: + if node not in pos: + raise nx.NetworkXError(f"node {node} has no specified pos {pos}") + posnode = pos[node] + if not isinstance(posnode, str): + try: + posx, posy = posnode + pos[node] = f"({round(posx, 3)}, {round(posy, 3)})" + except (TypeError, ValueError): + msg = f"position pos[{node}] is not 2-tuple or a string: {posnode}" + raise nx.NetworkXError(msg) + + # set up all the dicts + if not isinstance(node_options, dict): + node_options = nx.get_node_attributes(G, node_options) + if not isinstance(node_label, dict): + node_label = nx.get_node_attributes(G, node_label) + if not isinstance(edge_options, dict): + edge_options = nx.get_edge_attributes(G, edge_options) + if not isinstance(edge_label, dict): + edge_label = nx.get_edge_attributes(G, edge_label) + if not isinstance(edge_label_options, dict): + edge_label_options = nx.get_edge_attributes(G, edge_label_options) + + # process default options (add brackets or not) + topts = "" if tikz_options == "" else f"[{tikz_options.strip('[]')}]" + defn = "" if default_node_options == "" else f"[{default_node_options.strip('[]')}]" + linestyle = f"{'->' if G.is_directed() else '-'}" + if default_edge_options == "": + defe = "[" + linestyle + "]" + elif "-" in default_edge_options: + defe = default_edge_options + else: + defe = f"[{linestyle},{default_edge_options.strip('[]')}]" + + # Construct the string line by line + result = " \\begin{tikzpicture}" + topts + result += i4 + " \\draw" + defn + # load the nodes + for n in G: + # node options goes inside square brackets + nopts = f"[{node_options[n].strip('[]')}]" if n in node_options else "" + # node text goes inside curly brackets {} + ntext = f"{{{node_label[n]}}}" if n in node_label else f"{{{n}}}" + + result += i8 + f"{pos[n]} node{nopts} ({n}){ntext}" + result += ";\n" + + # load the edges + result += " \\begin{scope}" + defe + for edge in G.edges: + u, v = edge[:2] + e_opts = f"{edge_options[edge]}".strip("[]") if edge in edge_options else "" + # add loop options for selfloops if not present + if u == v and "loop" not in e_opts: + e_opts = "loop," + e_opts + e_opts = f"[{e_opts}]" if e_opts != "" else "" + # TODO -- handle bending of multiedges + + els = edge_label_options[edge] if edge in edge_label_options else "" + # edge label options goes inside square brackets [] + els = f"[{els.strip('[]')}]" + # edge text is drawn using the TikZ node command inside curly brackets {} + e_label = f" node{els} {{{edge_label[edge]}}}" if edge in edge_label else "" + + result += i8 + f"\\draw{e_opts} ({u}) to{e_label} ({v});" + + result += "\n \\end{scope}\n \\end{tikzpicture}\n" + return result + + +_DOC_WRAPPER_TIKZ = r"""\documentclass{{report}} +\usepackage{{tikz}} +\usepackage{{subcaption}} + +\begin{{document}} +{content} +\end{{document}}""" + + +_FIG_WRAPPER = r"""\begin{{figure}} +{content}{caption}{label} +\end{{figure}}""" + + +_SUBFIG_WRAPPER = r""" \begin{{subfigure}}{{{size}\textwidth}} +{content}{caption}{label} + \end{{subfigure}}""" + + +def to_latex( + Gbunch, + pos="pos", + tikz_options="", + default_node_options="", + node_options="node_options", + node_label="node_label", + default_edge_options="", + edge_options="edge_options", + edge_label="edge_label", + edge_label_options="edge_label_options", + caption="", + latex_label="", + sub_captions=None, + sub_labels=None, + n_rows=1, + as_document=True, + document_wrapper=_DOC_WRAPPER_TIKZ, + figure_wrapper=_FIG_WRAPPER, + subfigure_wrapper=_SUBFIG_WRAPPER, +): + """Return latex code to draw the graph(s) in `Gbunch` + + The TikZ drawing utility in LaTeX is used to draw the graph(s). + If `Gbunch` is a graph, it is drawn in a figure environment. + If `Gbunch` is an iterable of graphs, each is drawn in a subfigure environment + within a single figure environment. + + If `as_document` is True, the figure is wrapped inside a document environment + so that the resulting string is ready to be compiled by LaTeX. Otherwise, + the string is ready for inclusion in a larger tex document using ``\\include`` + or ``\\input`` statements. + + Parameters + ========== + Gbunch : NetworkX graph or iterable of NetworkX graphs + The NetworkX graph to be drawn or an iterable of graphs + to be drawn inside subfigures of a single figure. + pos : string or list of strings + The name of the node attribute on `G` that holds the position of each node. + Positions can be sequences of length 2 with numbers for (x,y) coordinates. + They can also be strings to denote positions in TikZ style, such as (x, y) + or (angle:radius). + If a dict, it should be keyed by node to a position. + If an empty dict, a circular layout is computed by TikZ. + If you are drawing many graphs in subfigures, use a list of position dicts. + tikz_options : string + The tikzpicture options description defining the options for the picture. + Often large scale options like `[scale=2]`. + default_node_options : string + The draw options for a path of nodes. Individual node options override these. + node_options : string or dict + The name of the node attribute on `G` that holds the options for each node. + Or a dict keyed by node to a string holding the options for that node. + node_label : string or dict + The name of the node attribute on `G` that holds the node label (text) + displayed for each node. If the attribute is "" or not present, the node + itself is drawn as a string. LaTeX processing such as ``"$A_1$"`` is allowed. + Or a dict keyed by node to a string holding the label for that node. + default_edge_options : string + The options for the scope drawing all edges. The default is "[-]" for + undirected graphs and "[->]" for directed graphs. + edge_options : string or dict + The name of the edge attribute on `G` that holds the options for each edge. + If the edge is a self-loop and ``"loop" not in edge_options`` the option + "loop," is added to the options for the self-loop edge. Hence you can + use "[loop above]" explicitly, but the default is "[loop]". + Or a dict keyed by edge to a string holding the options for that edge. + edge_label : string or dict + The name of the edge attribute on `G` that holds the edge label (text) + displayed for each edge. If the attribute is "" or not present, no edge + label is drawn. + Or a dict keyed by edge to a string holding the label for that edge. + edge_label_options : string or dict + The name of the edge attribute on `G` that holds the label options for + each edge. For example, "[sloped,above,blue]". The default is no options. + Or a dict keyed by edge to a string holding the label options for that edge. + caption : string + The caption string for the figure environment + latex_label : string + The latex label used for the figure for easy referral from the main text + sub_captions : list of strings + The sub_caption string for each subfigure in the figure + sub_latex_labels : list of strings + The latex label for each subfigure in the figure + n_rows : int + The number of rows of subfigures to arrange for multiple graphs + as_document : bool + Whether to wrap the latex code in a document environment for compiling + document_wrapper : formatted text string with variable ``content``. + This text is called to evaluate the content embedded in a document + environment with a preamble setting up TikZ. + figure_wrapper : formatted text string + This text is evaluated with variables ``content``, ``caption`` and ``label``. + It wraps the content and if a caption is provided, adds the latex code for + that caption, and if a label is provided, adds the latex code for a label. + subfigure_wrapper : formatted text string + This text evaluate variables ``size``, ``content``, ``caption`` and ``label``. + It wraps the content and if a caption is provided, adds the latex code for + that caption, and if a label is provided, adds the latex code for a label. + The size is the vertical size of each row of subfigures as a fraction. + + Returns + ======= + latex_code : string + The text string which draws the desired graph(s) when compiled by LaTeX. + + See Also + ======== + write_latex + to_latex_raw + """ + if hasattr(Gbunch, "adj"): + raw = to_latex_raw( + Gbunch, + pos, + tikz_options, + default_node_options, + node_options, + node_label, + default_edge_options, + edge_options, + edge_label, + edge_label_options, + ) + else: # iterator of graphs + sbf = subfigure_wrapper + size = 1 / n_rows + + N = len(Gbunch) + if isinstance(pos, str | dict): + pos = [pos] * N + if sub_captions is None: + sub_captions = [""] * N + if sub_labels is None: + sub_labels = [""] * N + if not (len(Gbunch) == len(pos) == len(sub_captions) == len(sub_labels)): + raise nx.NetworkXError( + "length of Gbunch, sub_captions and sub_figures must agree" + ) + + raw = "" + for G, pos, subcap, sublbl in zip(Gbunch, pos, sub_captions, sub_labels): + subraw = to_latex_raw( + G, + pos, + tikz_options, + default_node_options, + node_options, + node_label, + default_edge_options, + edge_options, + edge_label, + edge_label_options, + ) + cap = f" \\caption{{{subcap}}}" if subcap else "" + lbl = f"\\label{{{sublbl}}}" if sublbl else "" + raw += sbf.format(size=size, content=subraw, caption=cap, label=lbl) + raw += "\n" + + # put raw latex code into a figure environment and optionally into a document + raw = raw[:-1] + cap = f"\n \\caption{{{caption}}}" if caption else "" + lbl = f"\\label{{{latex_label}}}" if latex_label else "" + fig = figure_wrapper.format(content=raw, caption=cap, label=lbl) + if as_document: + return document_wrapper.format(content=fig) + return fig + + +@nx.utils.open_file(1, mode="w") +def write_latex(Gbunch, path, **options): + """Write the latex code to draw the graph(s) onto `path`. + + This convenience function creates the latex drawing code as a string + and writes that to a file ready to be compiled when `as_document` is True + or ready to be ``import`` ed or ``include`` ed into your main LaTeX document. + + The `path` argument can be a string filename or a file handle to write to. + + Parameters + ---------- + Gbunch : NetworkX graph or iterable of NetworkX graphs + If Gbunch is a graph, it is drawn in a figure environment. + If Gbunch is an iterable of graphs, each is drawn in a subfigure + environment within a single figure environment. + path : string or file + Filename or file handle to write to. + Filenames ending in .gz or .bz2 will be compressed. + options : dict + By default, TikZ is used with options: (others are ignored):: + + pos : string or dict or list + The name of the node attribute on `G` that holds the position of each node. + Positions can be sequences of length 2 with numbers for (x,y) coordinates. + They can also be strings to denote positions in TikZ style, such as (x, y) + or (angle:radius). + If a dict, it should be keyed by node to a position. + If an empty dict, a circular layout is computed by TikZ. + If you are drawing many graphs in subfigures, use a list of position dicts. + tikz_options : string + The tikzpicture options description defining the options for the picture. + Often large scale options like `[scale=2]`. + default_node_options : string + The draw options for a path of nodes. Individual node options override these. + node_options : string or dict + The name of the node attribute on `G` that holds the options for each node. + Or a dict keyed by node to a string holding the options for that node. + node_label : string or dict + The name of the node attribute on `G` that holds the node label (text) + displayed for each node. If the attribute is "" or not present, the node + itself is drawn as a string. LaTeX processing such as ``"$A_1$"`` is allowed. + Or a dict keyed by node to a string holding the label for that node. + default_edge_options : string + The options for the scope drawing all edges. The default is "[-]" for + undirected graphs and "[->]" for directed graphs. + edge_options : string or dict + The name of the edge attribute on `G` that holds the options for each edge. + If the edge is a self-loop and ``"loop" not in edge_options`` the option + "loop," is added to the options for the self-loop edge. Hence you can + use "[loop above]" explicitly, but the default is "[loop]". + Or a dict keyed by edge to a string holding the options for that edge. + edge_label : string or dict + The name of the edge attribute on `G` that holds the edge label (text) + displayed for each edge. If the attribute is "" or not present, no edge + label is drawn. + Or a dict keyed by edge to a string holding the label for that edge. + edge_label_options : string or dict + The name of the edge attribute on `G` that holds the label options for + each edge. For example, "[sloped,above,blue]". The default is no options. + Or a dict keyed by edge to a string holding the label options for that edge. + caption : string + The caption string for the figure environment + latex_label : string + The latex label used for the figure for easy referral from the main text + sub_captions : list of strings + The sub_caption string for each subfigure in the figure + sub_latex_labels : list of strings + The latex label for each subfigure in the figure + n_rows : int + The number of rows of subfigures to arrange for multiple graphs + as_document : bool + Whether to wrap the latex code in a document environment for compiling + document_wrapper : formatted text string with variable ``content``. + This text is called to evaluate the content embedded in a document + environment with a preamble setting up the TikZ syntax. + figure_wrapper : formatted text string + This text is evaluated with variables ``content``, ``caption`` and ``label``. + It wraps the content and if a caption is provided, adds the latex code for + that caption, and if a label is provided, adds the latex code for a label. + subfigure_wrapper : formatted text string + This text evaluate variables ``size``, ``content``, ``caption`` and ``label``. + It wraps the content and if a caption is provided, adds the latex code for + that caption, and if a label is provided, adds the latex code for a label. + The size is the vertical size of each row of subfigures as a fraction. + + See Also + ======== + to_latex + """ + path.write(to_latex(Gbunch, **options)) diff --git a/.venv/lib/python3.12/site-packages/networkx/drawing/nx_pydot.py b/.venv/lib/python3.12/site-packages/networkx/drawing/nx_pydot.py new file mode 100644 index 0000000..0fe5cee --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/drawing/nx_pydot.py @@ -0,0 +1,361 @@ +""" +***** +Pydot +***** + +Import and export NetworkX graphs in Graphviz dot format using pydot. + +Either this module or nx_agraph can be used to interface with graphviz. + +Examples +-------- +>>> G = nx.complete_graph(5) +>>> PG = nx.nx_pydot.to_pydot(G) +>>> H = nx.nx_pydot.from_pydot(PG) + +See Also +-------- + - pydot: https://github.com/erocarrera/pydot + - Graphviz: https://www.graphviz.org + - DOT Language: http://www.graphviz.org/doc/info/lang.html +""" + +from locale import getpreferredencoding + +import networkx as nx +from networkx.utils import open_file + +__all__ = [ + "write_dot", + "read_dot", + "graphviz_layout", + "pydot_layout", + "to_pydot", + "from_pydot", +] + + +@open_file(1, mode="w") +def write_dot(G, path): + """Write NetworkX graph G to Graphviz dot format on path. + + Parameters + ---------- + G : NetworkX graph + + path : string or file + Filename or file handle for data output. + Filenames ending in .gz or .bz2 will be compressed. + """ + P = to_pydot(G) + path.write(P.to_string()) + return + + +@open_file(0, mode="r") +@nx._dispatchable(name="pydot_read_dot", graphs=None, returns_graph=True) +def read_dot(path): + """Returns a NetworkX :class:`MultiGraph` or :class:`MultiDiGraph` from the + dot file with the passed path. + + If this file contains multiple graphs, only the first such graph is + returned. All graphs _except_ the first are silently ignored. + + Parameters + ---------- + path : str or file + Filename or file handle to read. + Filenames ending in .gz or .bz2 will be decompressed. + + Returns + ------- + G : MultiGraph or MultiDiGraph + A :class:`MultiGraph` or :class:`MultiDiGraph`. + + Notes + ----- + Use `G = nx.Graph(nx.nx_pydot.read_dot(path))` to return a :class:`Graph` instead of a + :class:`MultiGraph`. + """ + import pydot + + data = path.read() + + # List of one or more "pydot.Dot" instances deserialized from this file. + P_list = pydot.graph_from_dot_data(data) + + # Convert only the first such instance into a NetworkX graph. + return from_pydot(P_list[0]) + + +@nx._dispatchable(graphs=None, returns_graph=True) +def from_pydot(P): + """Returns a NetworkX graph from a Pydot graph. + + Parameters + ---------- + P : Pydot graph + A graph created with Pydot + + Returns + ------- + G : NetworkX multigraph + A MultiGraph or MultiDiGraph. + + Examples + -------- + >>> K5 = nx.complete_graph(5) + >>> A = nx.nx_pydot.to_pydot(K5) + >>> G = nx.nx_pydot.from_pydot(A) # return MultiGraph + + # make a Graph instead of MultiGraph + >>> G = nx.Graph(nx.nx_pydot.from_pydot(A)) + + """ + # NOTE: Pydot v3 expects a dummy argument whereas Pydot v4 doesn't + # Remove the try-except when Pydot v4 becomes the minimum supported version + try: + strict = P.get_strict() + except TypeError: + strict = P.get_strict(None) # pydot bug: get_strict() shouldn't take argument + multiedges = not strict + + if P.get_type() == "graph": # undirected + if multiedges: + N = nx.MultiGraph() + else: + N = nx.Graph() + else: + if multiedges: + N = nx.MultiDiGraph() + else: + N = nx.DiGraph() + + # assign defaults + name = P.get_name().strip('"') + if name != "": + N.name = name + + # add nodes, attributes to N.node_attr + for p in P.get_node_list(): + n = p.get_name().strip('"') + if n in ("node", "graph", "edge"): + continue + N.add_node(n, **p.get_attributes()) + + # add edges + for e in P.get_edge_list(): + u = e.get_source() + v = e.get_destination() + attr = e.get_attributes() + s = [] + d = [] + + if isinstance(u, str): + s.append(u.strip('"')) + else: + for unodes in u["nodes"]: + s.append(unodes.strip('"')) + + if isinstance(v, str): + d.append(v.strip('"')) + else: + for vnodes in v["nodes"]: + d.append(vnodes.strip('"')) + + for source_node in s: + for destination_node in d: + N.add_edge(source_node, destination_node, **attr) + + # add default attributes for graph, nodes, edges + pattr = P.get_attributes() + if pattr: + N.graph["graph"] = pattr + try: + N.graph["node"] = P.get_node_defaults()[0] + except (IndexError, TypeError): + pass # N.graph['node']={} + try: + N.graph["edge"] = P.get_edge_defaults()[0] + except (IndexError, TypeError): + pass # N.graph['edge']={} + return N + + +def to_pydot(N): + """Returns a pydot graph from a NetworkX graph N. + + Parameters + ---------- + N : NetworkX graph + A graph created with NetworkX + + Examples + -------- + >>> K5 = nx.complete_graph(5) + >>> P = nx.nx_pydot.to_pydot(K5) + + Notes + ----- + + """ + import pydot + + # set Graphviz graph type + if N.is_directed(): + graph_type = "digraph" + else: + graph_type = "graph" + strict = nx.number_of_selfloops(N) == 0 and not N.is_multigraph() + + name = N.name + graph_defaults = N.graph.get("graph", {}) + if name == "": + P = pydot.Dot("", graph_type=graph_type, strict=strict, **graph_defaults) + else: + P = pydot.Dot( + f'"{name}"', graph_type=graph_type, strict=strict, **graph_defaults + ) + try: + P.set_node_defaults(**N.graph["node"]) + except KeyError: + pass + try: + P.set_edge_defaults(**N.graph["edge"]) + except KeyError: + pass + + for n, nodedata in N.nodes(data=True): + str_nodedata = {str(k): str(v) for k, v in nodedata.items()} + n = str(n) + p = pydot.Node(n, **str_nodedata) + P.add_node(p) + + if N.is_multigraph(): + for u, v, key, edgedata in N.edges(data=True, keys=True): + str_edgedata = {str(k): str(v) for k, v in edgedata.items() if k != "key"} + u, v = str(u), str(v) + edge = pydot.Edge(u, v, key=str(key), **str_edgedata) + P.add_edge(edge) + + else: + for u, v, edgedata in N.edges(data=True): + str_edgedata = {str(k): str(v) for k, v in edgedata.items()} + u, v = str(u), str(v) + edge = pydot.Edge(u, v, **str_edgedata) + P.add_edge(edge) + return P + + +def graphviz_layout(G, prog="neato", root=None): + """Create node positions using Pydot and Graphviz. + + Returns a dictionary of positions keyed by node. + + Parameters + ---------- + G : NetworkX Graph + The graph for which the layout is computed. + prog : string (default: 'neato') + The name of the GraphViz program to use for layout. + Options depend on GraphViz version but may include: + 'dot', 'twopi', 'fdp', 'sfdp', 'circo' + root : Node from G or None (default: None) + The node of G from which to start some layout algorithms. + + Returns + ------- + Dictionary of (x, y) positions keyed by node. + + Examples + -------- + >>> G = nx.complete_graph(4) + >>> pos = nx.nx_pydot.graphviz_layout(G) + >>> pos = nx.nx_pydot.graphviz_layout(G, prog="dot") + + Notes + ----- + This is a wrapper for pydot_layout. + """ + return pydot_layout(G=G, prog=prog, root=root) + + +def pydot_layout(G, prog="neato", root=None): + """Create node positions using :mod:`pydot` and Graphviz. + + Parameters + ---------- + G : Graph + NetworkX graph to be laid out. + prog : string (default: 'neato') + Name of the GraphViz command to use for layout. + Options depend on GraphViz version but may include: + 'dot', 'twopi', 'fdp', 'sfdp', 'circo' + root : Node from G or None (default: None) + The node of G from which to start some layout algorithms. + + Returns + ------- + dict + Dictionary of positions keyed by node. + + Examples + -------- + >>> G = nx.complete_graph(4) + >>> pos = nx.nx_pydot.pydot_layout(G) + >>> pos = nx.nx_pydot.pydot_layout(G, prog="dot") + + Notes + ----- + If you use complex node objects, they may have the same string + representation and GraphViz could treat them as the same node. + The layout may assign both nodes a single location. See Issue #1568 + If this occurs in your case, consider relabeling the nodes just + for the layout computation using something similar to:: + + H = nx.convert_node_labels_to_integers(G, label_attribute="node_label") + H_layout = nx.nx_pydot.pydot_layout(H, prog="dot") + G_layout = {H.nodes[n]["node_label"]: p for n, p in H_layout.items()} + + """ + import pydot + + P = to_pydot(G) + if root is not None: + P.set("root", str(root)) + + # List of low-level bytes comprising a string in the dot language converted + # from the passed graph with the passed external GraphViz command. + D_bytes = P.create_dot(prog=prog) + + # Unique string decoded from these bytes with the preferred locale encoding + D = str(D_bytes, encoding=getpreferredencoding()) + + if D == "": # no data returned + print(f"Graphviz layout with {prog} failed") + print() + print("To debug what happened try:") + print("P = nx.nx_pydot.to_pydot(G)") + print('P.write_dot("file.dot")') + print(f"And then run {prog} on file.dot") + return + + # List of one or more "pydot.Dot" instances deserialized from this string. + Q_list = pydot.graph_from_dot_data(D) + assert len(Q_list) == 1 + + # The first and only such instance, as guaranteed by the above assertion. + Q = Q_list[0] + + node_pos = {} + for n in G.nodes(): + str_n = str(n) + node = Q.get_node(pydot.quote_id_if_necessary(str_n)) + + if isinstance(node, list): + node = node[0] + pos = node.get_pos()[1:-1] # strip leading and trailing double quotes + if pos is not None: + xx, yy = pos.split(",") + node_pos[n] = (float(xx), float(yy)) + return node_pos diff --git a/.venv/lib/python3.12/site-packages/networkx/drawing/nx_pylab.py b/.venv/lib/python3.12/site-packages/networkx/drawing/nx_pylab.py new file mode 100644 index 0000000..778d6e8 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/drawing/nx_pylab.py @@ -0,0 +1,3021 @@ +""" +********** +Matplotlib +********** + +Draw networks with matplotlib. + +Examples +-------- +>>> G = nx.complete_graph(5) +>>> nx.draw(G) + +See Also +-------- + - :doc:`matplotlib ` + - :func:`matplotlib.pyplot.scatter` + - :obj:`matplotlib.patches.FancyArrowPatch` +""" + +import collections +import itertools +from numbers import Number + +import networkx as nx + +__all__ = [ + "display", + "apply_matplotlib_colors", + "draw", + "draw_networkx", + "draw_networkx_nodes", + "draw_networkx_edges", + "draw_networkx_labels", + "draw_networkx_edge_labels", + "draw_bipartite", + "draw_circular", + "draw_kamada_kawai", + "draw_random", + "draw_spectral", + "draw_spring", + "draw_planar", + "draw_shell", + "draw_forceatlas2", +] + + +def apply_matplotlib_colors( + G, src_attr, dest_attr, map, vmin=None, vmax=None, nodes=True +): + """ + Apply colors from a matplotlib colormap to a graph. + + Reads values from the `src_attr` and use a matplotlib colormap + to produce a color. Write the color to `dest_attr`. + + Parameters + ---------- + G : nx.Graph + The graph to read and compute colors for. + + src_attr : str or other attribute name + The name of the attribute to read from the graph. + + dest_attr : str or other attribute name + The name of the attribute to write to on the graph. + + map : matplotlib.colormap + The matplotlib colormap to use. + + vmin : float, default None + The minimum value for scaling the colormap. If `None`, find the + minimum value of `src_attr`. + + vmax : float, default None + The maximum value for scaling the colormap. If `None`, find the + maximum value of `src_attr`. + + nodes : bool, default True + Whether the attribute names are edge attributes or node attributes. + """ + import matplotlib as mpl + + if nodes: + type_iter = G.nodes() + elif G.is_multigraph(): + type_iter = G.edges(keys=True) + else: + type_iter = G.edges() + + if vmin is None or vmax is None: + vals = [type_iter[a][src_attr] for a in type_iter] + if vmin is None: + vmin = min(vals) + if vmax is None: + vmax = max(vals) + + mapper = mpl.cm.ScalarMappable(cmap=map) + mapper.set_clim(vmin, vmax) + + def do_map(x): + # Cast numpy scalars to float + return tuple(float(x) for x in mapper.to_rgba(x)) + + if nodes: + nx.set_node_attributes( + G, {n: do_map(G.nodes[n][src_attr]) for n in G.nodes()}, dest_attr + ) + else: + nx.set_edge_attributes( + G, {e: do_map(G.edges[e][src_attr]) for e in type_iter}, dest_attr + ) + + +def display( + G, + canvas=None, + **kwargs, +): + """Draw the graph G. + + Draw the graph as a collection of nodes connected by edges. + The exact details of what the graph looks like are controled by the below + attributes. All nodes and nodes at the end of visible edges must have a + position set, but nearly all other node and edge attributes are options and + nodes or edges missing the attribute will use the default listed below. A more + complete discription of each parameter is given below this summary. + + .. list-table:: Default Visualization Attributes + :widths: 25 25 50 + :header-rows: 1 + + * - Parameter + - Default Attribute + - Default Value + * - pos + - `"pos"` + - If there is not position, a layout will be calculated with `nx.spring_layout`. + * - node_visible + - `"visible"` + - True + * - node_color + - `"color"` + - #1f78b4 + * - node_size + - `"size"` + - 300 + * - node_label + - `"label"` + - Dict describing the node label. Defaults create a black text with + the node name as the label. The dict respects these keys and defaults: + + * size : 12 + * color : black + * family : sans serif + * weight : normal + * alpha : 1.0 + * h_align : center + * v_align : center + * bbox : Dict describing a `matplotlib.patches.FancyBboxPatch`. + Default is None. + + * - node_shape + - `"shape"` + - "o" + * - node_alpha + - `"alpha"` + - 1.0 + * - node_border_width + - `"border_width"` + - 1.0 + * - node_border_color + - `"border_color"` + - Matching node_color + * - edge_visible + - `"visible"` + - True + * - edge_width + - `"width"` + - 1.0 + * - edge_color + - `"color"` + - Black (#000000) + * - edge_label + - `"label"` + - Dict describing the edge label. Defaults create black text with a + white bounding box. The dictionary respects these keys and defaults: + + * size : 12 + * color : black + * family : sans serif + * weight : normal + * alpha : 1.0 + * bbox : Dict describing a `matplotlib.patches.FancyBboxPatch`. + Default {"boxstyle": "round", "ec": (1.0, 1.0, 1.0), "fc": (1.0, 1.0, 1.0)} + * h_align : "center" + * v_align : "center" + * pos : 0.5 + * rotate : True + + * - edge_style + - `"style"` + - "-" + * - edge_alpha + - `"alpha"` + - 1.0 + * - arrowstyle + - `"arrowstyle"` + - ``"-|>"`` if `G` is directed else ``"-"`` + * - arrowsize + - `"arrowsize"` + - 10 if `G` is directed else 0 + * - edge_curvature + - `"curvature"` + - arc3 + * - edge_source_margin + - `"source_margin"` + - 0 + * - edge_target_margin + - `"target_margin"` + - 0 + + Parameters + ---------- + G : graph + A networkx graph + + canvas : Matplotlib Axes object, optional + Draw the graph in specified Matplotlib axes + + pos : string or function, default "pos" + A string naming the node attribute storing the position of nodes as a tuple. + Or a function to be called with input `G` which returns the layout as a dict keyed + by node to position tuple like the NetworkX layout functions. + If no nodes in the graph has the attribute, a spring layout is calculated. + + node_visible : string or bool, default visible + A string naming the node attribute which stores if a node should be drawn. + If `True`, all nodes will be visible while if `False` no nodes will be visible. + If incomplete, nodes missing this attribute will be shown by default. + + node_color : string, default "color" + A string naming the node attribute which stores the color of each node. + Visible nodes without this attribute will use '#1f78b4' as a default. + + node_size : string or number, default "size" + A string naming the node attribute which stores the size of each node. + Visible nodes without this attribute will use a default size of 300. + + node_label : string or bool, default "label" + A string naming the node attribute which stores the label of each node. + The attribute value can be a string, False (no label for that node), + True (the node is the label) or a dict keyed by node to the label. + + If a dict is specified, these keys are read to further control the label: + + * label : The text of the label; default: name of the node + * size : Font size of the label; default: 12 + * color : Font color of the label; default: black + * family : Font family of the label; default: "sans-serif" + * weight : Font weight of the label; default: "normal" + * alpha : Alpha value of the label; default: 1.0 + * h_align : The horizontal alignment of the label. + one of "left", "center", "right"; default: "center" + * v_align : The vertical alignment of the label. + one of "top", "center", "bottom"; default: "center" + * bbox : A dict of parameters for `matplotlib.patches.FancyBboxPatch`. + + Visible nodes without this attribute will be treated as if the value was True. + + node_shape : string, default "shape" + A string naming the node attribute which stores the label of each node. + The values of this attribute are expected to be one of the matplotlib shapes, + one of 'so^>v"`` for directed graphs. + + See `matplotlib.patches.ArrowStyle` for more options + + arrowsize : string or int, default "arrow_size" + A string naming the edge attribute which stores the size of the arrowhead for each + edge. Visible edges without this attribute will use a default value of 10. + + edge_curvature : string, default "curvature" + A string naming the edge attribute storing the curvature and connection style + of each edge. Visible edges without this attribute will use "arc3" as a default + value, resulting an a straight line between the two nodes. Curvature can be given + as 'arc3,rad=0.2' to specify both the style and radius of curvature. + + Please see `matplotlib.patches.ConnectionStyle` and + `matplotlib.patches.FancyArrowPatch` for more information. + + edge_source_margin : string or int, default "source_margin" + A string naming the edge attribute which stores the minimum margin (gap) between + the source node and the start of the edge. Visible edges without this attribute + will use a default value of 0. + + edge_target_margin : string or int, default "target_margin" + A string naming the edge attribute which stores the minimumm margin (gap) between + the target node and the end of the edge. Visible edges without this attribute + will use a default value of 0. + + hide_ticks : bool, default True + Weather to remove the ticks from the axes of the matplotlib object. + + Raises + ------ + NetworkXError + If a node or edge is missing a required parameter such as `pos` or + if `display` receives an argument not listed above. + + ValueError + If a node or edge has an invalid color format, i.e. not a color string, + rgb tuple or rgba tuple. + + Returns + ------- + The input graph. This is potentially useful for dispatching visualization + functions. + """ + from collections import Counter + + import matplotlib as mpl + import matplotlib.pyplot as plt + import numpy as np + + defaults = { + "node_pos": None, + "node_visible": True, + "node_color": "#1f78b4", + "node_size": 300, + "node_label": { + "size": 12, + "color": "#000000", + "family": "sans-serif", + "weight": "normal", + "alpha": 1.0, + "h_align": "center", + "v_align": "center", + "bbox": None, + }, + "node_shape": "o", + "node_alpha": 1.0, + "node_border_width": 1.0, + "node_border_color": "face", + "edge_visible": True, + "edge_width": 1.0, + "edge_color": "#000000", + "edge_label": { + "size": 12, + "color": "#000000", + "family": "sans-serif", + "weight": "normal", + "alpha": 1.0, + "bbox": {"boxstyle": "round", "ec": (1.0, 1.0, 1.0), "fc": (1.0, 1.0, 1.0)}, + "h_align": "center", + "v_align": "center", + "pos": 0.5, + "rotate": True, + }, + "edge_style": "-", + "edge_alpha": 1.0, + "edge_arrowstyle": "-|>" if G.is_directed() else "-", + "edge_arrowsize": 10 if G.is_directed() else 0, + "edge_curvature": "arc3", + "edge_source_margin": 0, + "edge_target_margin": 0, + "hide_ticks": True, + } + + # Check arguments + for kwarg in kwargs: + if kwarg not in defaults: + raise nx.NetworkXError( + f"Unrecongized visualization keyword argument: {kwarg}" + ) + + if canvas is None: + canvas = plt.gca() + + if kwargs.get("hide_ticks", defaults["hide_ticks"]): + canvas.tick_params( + axis="both", + which="both", + bottom=False, + left=False, + labelbottom=False, + labelleft=False, + ) + + ### Helper methods and classes + + def node_property_sequence(seq, attr): + """Return a list of attribute values for `seq`, using a default if needed""" + + # All node attribute parameters start with "node_" + param_name = f"node_{attr}" + default = defaults[param_name] + attr = kwargs.get(param_name, attr) + + if default is None: + # raise instead of using non-existant default value + for n in seq: + if attr not in node_subgraph.nodes[n]: + raise nx.NetworkXError(f"Attribute '{attr}' missing for node {n}") + + # If `attr` is not a graph attr and was explicitly passed as an argument + # it must be a user-default value. Allow attr=None to tell draw to skip + # attributes which are on the graph + if ( + attr is not None + and nx.get_node_attributes(node_subgraph, attr) == {} + and any(attr == v for k, v in kwargs.items() if "node" in k) + ): + return [attr for _ in seq] + + return [node_subgraph.nodes[n].get(attr, default) for n in seq] + + def compute_colors(color, alpha): + if isinstance(color, str): + rgba = mpl.colors.colorConverter.to_rgba(color) + # Using a non-default alpha value overrides any alpha value in the color + if alpha != defaults["node_alpha"]: + return (rgba[0], rgba[1], rgba[2], alpha) + return rgba + + if isinstance(color, tuple) and len(color) == 3: + return (color[0], color[1], color[2], alpha) + + if isinstance(color, tuple) and len(color) == 4: + return color + + raise ValueError(f"Invalid format for color: {color}") + + # Find which edges can be plotted as a line collection + # + # Non-default values for these attributes require fancy arrow patches: + # - any arrow style (including the default -|> for directed graphs) + # - arrow size (by extension of style) + # - connection style + # - min_source_margin + # - min_target_margin + + def collection_compatible(e): + return ( + get_edge_attr(e, "arrowstyle") == "-" + and get_edge_attr(e, "curvature") == "arc3" + and get_edge_attr(e, "source_margin") == 0 + and get_edge_attr(e, "target_margin") == 0 + # Self-loops will use fancy arrow patches + and e[0] != e[1] + ) + + def edge_property_sequence(seq, attr): + """Return a list of attribute values for `seq`, using a default if needed""" + + param_name = f"edge_{attr}" + default = defaults[param_name] + attr = kwargs.get(param_name, attr) + + if default is None: + # raise instead of using non-existant default value + for e in seq: + if attr not in edge_subgraph.edges[e]: + raise nx.NetworkXError(f"Attribute '{attr}' missing for edge {e}") + + if ( + attr is not None + and nx.get_edge_attributes(edge_subgraph, attr) == {} + and any(attr == v for k, v in kwargs.items() if "edge" in k) + ): + return [attr for _ in seq] + + return [edge_subgraph.edges[e].get(attr, default) for e in seq] + + def get_edge_attr(e, attr): + """Return the final edge attribute value, using default if not None""" + + param_name = f"edge_{attr}" + default = defaults[param_name] + attr = kwargs.get(param_name, attr) + + if default is None and attr not in edge_subgraph.edges[e]: + raise nx.NetworkXError(f"Attribute '{attr}' missing from edge {e}") + + if ( + attr is not None + and nx.get_edge_attributes(edge_subgraph, attr) == {} + and attr in kwargs.values() + ): + return attr + + return edge_subgraph.edges[e].get(attr, default) + + def get_node_attr(n, attr, use_edge_subgraph=True): + """Return the final node attribute value, using default if not None""" + subgraph = edge_subgraph if use_edge_subgraph else node_subgraph + + param_name = f"node_{attr}" + default = defaults[param_name] + attr = kwargs.get(param_name, attr) + + if default is None and attr not in subgraph.nodes[n]: + raise nx.NetworkXError(f"Attribute '{attr}' missing from node {n}") + + if ( + attr is not None + and nx.get_node_attributes(subgraph, attr) == {} + and attr in kwargs.values() + ): + return attr + + return subgraph.nodes[n].get(attr, default) + + # Taken from ConnectionStyleFactory + def self_loop(edge_index, node_size): + def self_loop_connection(posA, posB, *args, **kwargs): + if not np.all(posA == posB): + raise nx.NetworkXError( + "`self_loop` connection style method" + "is only to be used for self-loops" + ) + # this is called with _screen space_ values + # so convert back to data space + data_loc = canvas.transData.inverted().transform(posA) + # Scale self loop based on the size of the base node + # Size of nodes are given in points ** 2 and each point is 1/72 of an inch + v_shift = np.sqrt(node_size) / 72 + h_shift = v_shift * 0.5 + # put the top of the loop first so arrow is not hidden by node + path = np.asarray( + [ + # 1 + [0, v_shift], + # 4 4 4 + [h_shift, v_shift], + [h_shift, 0], + [0, 0], + # 4 4 4 + [-h_shift, 0], + [-h_shift, v_shift], + [0, v_shift], + ] + ) + # Rotate self loop 90 deg. if more than 1 + # This will allow for maximum of 4 visible self loops + if edge_index % 4: + x, y = path.T + for _ in range(edge_index % 4): + x, y = y, -x + path = np.array([x, y]).T + return mpl.path.Path( + canvas.transData.transform(data_loc + path), [1, 4, 4, 4, 4, 4, 4] + ) + + return self_loop_connection + + def to_marker_edge(size, marker): + if marker in "s^>v 90: + angle -= 180 + if angle < -90: + angle += 180 + (x, y) = self.ax.transData.inverted().transform((x, y)) + return x, y, angle + + def draw(self, renderer): + # recalculate the text position and angle + self.x, self.y, self.angle = self._update_text_pos_angle(self.arrow) + self.set_position((self.x, self.y)) + self.set_rotation(self.angle) + # redraw text + super().draw(renderer) + + ### Draw the nodes first + node_visible = kwargs.get("node_visible", "visible") + if isinstance(node_visible, bool): + if node_visible: + visible_nodes = G.nodes() + else: + visible_nodes = [] + else: + visible_nodes = [ + n for n, v in nx.get_node_attributes(G, node_visible, True).items() if v + ] + + node_subgraph = G.subgraph(visible_nodes) + + # Ignore the default dict value since that's for default values to use, not + # default attribute name + pos = kwargs.get("node_pos", "pos") + + default_display_pos_attr = "display's position attribute name" + if callable(pos): + nx.set_node_attributes( + node_subgraph, pos(node_subgraph), default_display_pos_attr + ) + pos = default_display_pos_attr + kwargs["node_pos"] = default_display_pos_attr + elif nx.get_node_attributes(G, pos) == {}: + nx.set_node_attributes( + node_subgraph, nx.spring_layout(node_subgraph), default_display_pos_attr + ) + pos = default_display_pos_attr + kwargs["node_pos"] = default_display_pos_attr + + # Each shape requires a new scatter object since they can't have different + # shapes. + if len(visible_nodes) > 0: + node_shape = kwargs.get("node_shape", "shape") + for shape in Counter( + nx.get_node_attributes( + node_subgraph, node_shape, defaults["node_shape"] + ).values() + ): + # Filter position just on this shape. + nodes_with_shape = [ + n + for n, s in node_subgraph.nodes(data=node_shape) + if s == shape or (s is None and shape == defaults["node_shape"]) + ] + # There are two property sequences to create before hand. + # 1. position, since it is used for x and y parameters to scatter + # 2. edgecolor, since the spaeical 'face' parameter value can only be + # be passed in as the sole string, not part of a list of strings. + position = np.asarray(node_property_sequence(nodes_with_shape, "pos")) + color = np.asarray( + [ + compute_colors(c, a) + for c, a in zip( + node_property_sequence(nodes_with_shape, "color"), + node_property_sequence(nodes_with_shape, "alpha"), + ) + ] + ) + border_color = np.asarray( + [ + ( + c + if ( + c := get_node_attr( + n, + "border_color", + False, + ) + ) + != "face" + else color[i] + ) + for i, n in enumerate(nodes_with_shape) + ] + ) + canvas.scatter( + position[:, 0], + position[:, 1], + s=node_property_sequence(nodes_with_shape, "size"), + c=color, + marker=shape, + linewidths=node_property_sequence(nodes_with_shape, "border_width"), + edgecolors=border_color, + zorder=2, + ) + + ### Draw node labels + node_label = kwargs.get("node_label", "label") + # Plot labels if node_label is not None and not False + if node_label is not None and node_label is not False: + default_dict = {} + if isinstance(node_label, dict): + default_dict = node_label + node_label = None + + for n, lbl in node_subgraph.nodes(data=node_label): + if lbl is False: + continue + + # We work with label dicts down here... + if not isinstance(lbl, dict): + lbl = {"label": lbl if lbl is not None else n} + + lbl_text = lbl.get("label", n) + if not isinstance(lbl_text, str): + lbl_text = str(lbl_text) + + lbl.update(default_dict) + x, y = node_subgraph.nodes[n][pos] + canvas.text( + x, + y, + lbl_text, + size=lbl.get("size", defaults["node_label"]["size"]), + color=lbl.get("color", defaults["node_label"]["color"]), + family=lbl.get("family", defaults["node_label"]["family"]), + weight=lbl.get("weight", defaults["node_label"]["weight"]), + horizontalalignment=lbl.get( + "h_align", defaults["node_label"]["h_align"] + ), + verticalalignment=lbl.get("v_align", defaults["node_label"]["v_align"]), + transform=canvas.transData, + bbox=lbl.get("bbox", defaults["node_label"]["bbox"]), + ) + + ### Draw edges + + edge_visible = kwargs.get("edge_visible", "visible") + if isinstance(edge_visible, bool): + if edge_visible: + visible_edges = G.edges() + else: + visible_edges = [] + else: + visible_edges = [ + e for e, v in nx.get_edge_attributes(G, edge_visible, True).items() if v + ] + + edge_subgraph = G.edge_subgraph(visible_edges) + print(nx.get_node_attributes(node_subgraph, pos)) + nx.set_node_attributes( + edge_subgraph, nx.get_node_attributes(node_subgraph, pos), name=pos + ) + + collection_edges = ( + [e for e in edge_subgraph.edges(keys=True) if collection_compatible(e)] + if edge_subgraph.is_multigraph() + else [e for e in edge_subgraph.edges() if collection_compatible(e)] + ) + non_collection_edges = ( + [e for e in edge_subgraph.edges(keys=True) if not collection_compatible(e)] + if edge_subgraph.is_multigraph() + else [e for e in edge_subgraph.edges() if not collection_compatible(e)] + ) + edge_position = np.asarray( + [ + ( + get_node_attr(u, "pos", use_edge_subgraph=True), + get_node_attr(v, "pos", use_edge_subgraph=True), + ) + for u, v, *_ in collection_edges + ] + ) + + # Only plot a line collection if needed + if len(collection_edges) > 0: + edge_collection = mpl.collections.LineCollection( + edge_position, + colors=edge_property_sequence(collection_edges, "color"), + linewidths=edge_property_sequence(collection_edges, "width"), + linestyle=edge_property_sequence(collection_edges, "style"), + alpha=edge_property_sequence(collection_edges, "alpha"), + antialiaseds=(1,), + zorder=1, + ) + canvas.add_collection(edge_collection) + + fancy_arrows = {} + if len(non_collection_edges) > 0: + for e in non_collection_edges: + # Cache results for use in edge labels + fancy_arrows[e] = build_fancy_arrow(e) + canvas.add_patch(fancy_arrows[e]) + + ### Draw edge labels + edge_label = kwargs.get("edge_label", "label") + default_dict = {} + if isinstance(edge_label, dict): + default_dict = edge_label + # Restore the default label attribute key of 'label' + edge_label = "label" + + # Handle multigraphs + edge_label_data = ( + edge_subgraph.edges(data=edge_label, keys=True) + if edge_subgraph.is_multigraph() + else edge_subgraph.edges(data=edge_label) + ) + if edge_label is not None and edge_label is not False: + for *e, lbl in edge_label_data: + e = tuple(e) + # I'm not sure how I want to handle None here... For now it means no label + if lbl is False or lbl is None: + continue + + if not isinstance(lbl, dict): + lbl = {"label": lbl} + + lbl.update(default_dict) + lbl_text = lbl.get("label") + if not isinstance(lbl_text, str): + lbl_text = str(lbl_text) + + # In the old code, every non-self-loop is placed via a fancy arrow patch + # Only compute a new fancy arrow if needed by caching the results from + # edge placement. + try: + arrow = fancy_arrows[e] + except KeyError: + arrow = build_fancy_arrow(e) + + if e[0] == e[1]: + # Taken directly from draw_networkx_edge_labels + connectionstyle_obj = arrow.get_connectionstyle() + posA = canvas.transData.transform(edge_subgraph.nodes[e[0]][pos]) + path_disp = connectionstyle_obj(posA, posA) + path_data = canvas.transData.inverted().transform_path(path_disp) + x, y = path_data.vertices[0] + canvas.text( + x, + y, + lbl_text, + size=lbl.get("size", defaults["edge_label"]["size"]), + color=lbl.get("color", defaults["edge_label"]["color"]), + family=lbl.get("family", defaults["edge_label"]["family"]), + weight=lbl.get("weight", defaults["edge_label"]["weight"]), + alpha=lbl.get("alpha", defaults["edge_label"]["alpha"]), + horizontalalignment=lbl.get( + "h_align", defaults["edge_label"]["h_align"] + ), + verticalalignment=lbl.get( + "v_align", defaults["edge_label"]["v_align"] + ), + rotation=0, + transform=canvas.transData, + bbox=lbl.get("bbox", defaults["edge_label"]["bbox"]), + zorder=1, + ) + continue + + CurvedArrowText( + arrow, + lbl_text, + size=lbl.get("size", defaults["edge_label"]["size"]), + color=lbl.get("color", defaults["edge_label"]["color"]), + family=lbl.get("family", defaults["edge_label"]["family"]), + weight=lbl.get("weight", defaults["edge_label"]["weight"]), + alpha=lbl.get("alpha", defaults["edge_label"]["alpha"]), + bbox=lbl.get("bbox", defaults["edge_label"]["bbox"]), + horizontalalignment=lbl.get( + "h_align", defaults["edge_label"]["h_align"] + ), + verticalalignment=lbl.get("v_align", defaults["edge_label"]["v_align"]), + label_pos=lbl.get("pos", defaults["edge_label"]["pos"]), + labels_horizontal=lbl.get("rotate", defaults["edge_label"]["rotate"]), + transform=canvas.transData, + zorder=1, + ax=canvas, + ) + + # If we had to add an attribute, remove it here + if pos == default_display_pos_attr: + nx.remove_node_attributes(G, default_display_pos_attr) + + return G + + +def draw(G, pos=None, ax=None, **kwds): + """Draw the graph G with Matplotlib. + + Draw the graph as a simple representation with no node + labels or edge labels and using the full Matplotlib figure area + and no axis labels by default. See draw_networkx() for more + full-featured drawing that allows title, axis labels etc. + + Parameters + ---------- + G : graph + A networkx graph + + pos : dictionary, optional + A dictionary with nodes as keys and positions as values. + If not specified a spring layout positioning will be computed. + See :py:mod:`networkx.drawing.layout` for functions that + compute node positions. + + ax : Matplotlib Axes object, optional + Draw the graph in specified Matplotlib axes. + + kwds : optional keywords + See networkx.draw_networkx() for a description of optional keywords. + + Examples + -------- + >>> G = nx.dodecahedral_graph() + >>> nx.draw(G) + >>> nx.draw(G, pos=nx.spring_layout(G)) # use spring layout + + See Also + -------- + draw_networkx + draw_networkx_nodes + draw_networkx_edges + draw_networkx_labels + draw_networkx_edge_labels + + Notes + ----- + This function has the same name as pylab.draw and pyplot.draw + so beware when using `from networkx import *` + + since you might overwrite the pylab.draw function. + + With pyplot use + + >>> import matplotlib.pyplot as plt + >>> G = nx.dodecahedral_graph() + >>> nx.draw(G) # networkx draw() + >>> plt.draw() # pyplot draw() + + Also see the NetworkX drawing examples at + https://networkx.org/documentation/latest/auto_examples/index.html + """ + + import matplotlib.pyplot as plt + + if ax is None: + cf = plt.gcf() + else: + cf = ax.get_figure() + cf.set_facecolor("w") + if ax is None: + if cf.axes: + ax = cf.gca() + else: + ax = cf.add_axes((0, 0, 1, 1)) + + if "with_labels" not in kwds: + kwds["with_labels"] = "labels" in kwds + + draw_networkx(G, pos=pos, ax=ax, **kwds) + ax.set_axis_off() + plt.draw_if_interactive() + return + + +def draw_networkx(G, pos=None, arrows=None, with_labels=True, **kwds): + r"""Draw the graph G using Matplotlib. + + Draw the graph with Matplotlib with options for node positions, + labeling, titles, and many other drawing features. + See draw() for simple drawing without labels or axes. + + Parameters + ---------- + G : graph + A networkx graph + + pos : dictionary, optional + A dictionary with nodes as keys and positions as values. + If not specified a spring layout positioning will be computed. + See :py:mod:`networkx.drawing.layout` for functions that + compute node positions. + + arrows : bool or None, optional (default=None) + If `None`, directed graphs draw arrowheads with + `~matplotlib.patches.FancyArrowPatch`, while undirected graphs draw edges + via `~matplotlib.collections.LineCollection` for speed. + If `True`, draw arrowheads with FancyArrowPatches (bendable and stylish). + If `False`, draw edges using LineCollection (linear and fast). + For directed graphs, if True draw arrowheads. + Note: Arrows will be the same color as edges. + + arrowstyle : str (default='-\|>' for directed graphs) + For directed graphs, choose the style of the arrowsheads. + For undirected graphs default to '-' + + See `matplotlib.patches.ArrowStyle` for more options. + + arrowsize : int or list (default=10) + For directed graphs, choose the size of the arrow head's length and + width. A list of values can be passed in to assign a different size for arrow head's length and width. + See `matplotlib.patches.FancyArrowPatch` for attribute `mutation_scale` + for more info. + + with_labels : bool (default=True) + Set to True to draw labels on the nodes. + + ax : Matplotlib Axes object, optional + Draw the graph in the specified Matplotlib axes. + + nodelist : list (default=list(G)) + Draw only specified nodes + + edgelist : list (default=list(G.edges())) + Draw only specified edges + + node_size : scalar or array (default=300) + Size of nodes. If an array is specified it must be the + same length as nodelist. + + node_color : color or array of colors (default='#1f78b4') + Node color. Can be a single color or a sequence of colors with the same + length as nodelist. Color can be string or rgb (or rgba) tuple of + floats from 0-1. If numeric values are specified they will be + mapped to colors using the cmap and vmin,vmax parameters. See + matplotlib.scatter for more details. + + node_shape : string (default='o') + The shape of the node. Specification is as matplotlib.scatter + marker, one of 'so^>v>> G = nx.dodecahedral_graph() + >>> nx.draw(G) + >>> nx.draw(G, pos=nx.spring_layout(G)) # use spring layout + + >>> import matplotlib.pyplot as plt + >>> limits = plt.axis("off") # turn off axis + + Also see the NetworkX drawing examples at + https://networkx.org/documentation/latest/auto_examples/index.html + + See Also + -------- + draw + draw_networkx_nodes + draw_networkx_edges + draw_networkx_labels + draw_networkx_edge_labels + """ + from inspect import signature + + import matplotlib.pyplot as plt + + # Get all valid keywords by inspecting the signatures of draw_networkx_nodes, + # draw_networkx_edges, draw_networkx_labels + + valid_node_kwds = signature(draw_networkx_nodes).parameters.keys() + valid_edge_kwds = signature(draw_networkx_edges).parameters.keys() + valid_label_kwds = signature(draw_networkx_labels).parameters.keys() + + # Create a set with all valid keywords across the three functions and + # remove the arguments of this function (draw_networkx) + valid_kwds = (valid_node_kwds | valid_edge_kwds | valid_label_kwds) - { + "G", + "pos", + "arrows", + "with_labels", + } + + if any(k not in valid_kwds for k in kwds): + invalid_args = ", ".join([k for k in kwds if k not in valid_kwds]) + raise ValueError(f"Received invalid argument(s): {invalid_args}") + + node_kwds = {k: v for k, v in kwds.items() if k in valid_node_kwds} + edge_kwds = {k: v for k, v in kwds.items() if k in valid_edge_kwds} + label_kwds = {k: v for k, v in kwds.items() if k in valid_label_kwds} + + if pos is None: + pos = nx.drawing.spring_layout(G) # default to spring layout + + draw_networkx_nodes(G, pos, **node_kwds) + draw_networkx_edges(G, pos, arrows=arrows, **edge_kwds) + if with_labels: + draw_networkx_labels(G, pos, **label_kwds) + plt.draw_if_interactive() + + +def draw_networkx_nodes( + G, + pos, + nodelist=None, + node_size=300, + node_color="#1f78b4", + node_shape="o", + alpha=None, + cmap=None, + vmin=None, + vmax=None, + ax=None, + linewidths=None, + edgecolors=None, + label=None, + margins=None, + hide_ticks=True, +): + """Draw the nodes of the graph G. + + This draws only the nodes of the graph G. + + Parameters + ---------- + G : graph + A networkx graph + + pos : dictionary + A dictionary with nodes as keys and positions as values. + Positions should be sequences of length 2. + + ax : Matplotlib Axes object, optional + Draw the graph in the specified Matplotlib axes. + + nodelist : list (default list(G)) + Draw only specified nodes + + node_size : scalar or array (default=300) + Size of nodes. If an array it must be the same length as nodelist. + + node_color : color or array of colors (default='#1f78b4') + Node color. Can be a single color or a sequence of colors with the same + length as nodelist. Color can be string or rgb (or rgba) tuple of + floats from 0-1. If numeric values are specified they will be + mapped to colors using the cmap and vmin,vmax parameters. See + matplotlib.scatter for more details. + + node_shape : string (default='o') + The shape of the node. Specification is as matplotlib.scatter + marker, one of 'so^>v>> G = nx.dodecahedral_graph() + >>> nodes = nx.draw_networkx_nodes(G, pos=nx.spring_layout(G)) + + Also see the NetworkX drawing examples at + https://networkx.org/documentation/latest/auto_examples/index.html + + See Also + -------- + draw + draw_networkx + draw_networkx_edges + draw_networkx_labels + draw_networkx_edge_labels + """ + from collections.abc import Iterable + + import matplotlib as mpl + import matplotlib.collections # call as mpl.collections + import matplotlib.pyplot as plt + import numpy as np + + if ax is None: + ax = plt.gca() + + if nodelist is None: + nodelist = list(G) + + if len(nodelist) == 0: # empty nodelist, no drawing + return mpl.collections.PathCollection(None) + + try: + xy = np.asarray([pos[v] for v in nodelist]) + except KeyError as err: + raise nx.NetworkXError(f"Node {err} has no position.") from err + + if isinstance(alpha, Iterable): + node_color = apply_alpha(node_color, alpha, nodelist, cmap, vmin, vmax) + alpha = None + + if not isinstance(node_shape, np.ndarray) and not isinstance(node_shape, list): + node_shape = np.array([node_shape for _ in range(len(nodelist))]) + + for shape in np.unique(node_shape): + node_collection = ax.scatter( + xy[node_shape == shape, 0], + xy[node_shape == shape, 1], + s=node_size, + c=node_color, + marker=shape, + cmap=cmap, + vmin=vmin, + vmax=vmax, + alpha=alpha, + linewidths=linewidths, + edgecolors=edgecolors, + label=label, + ) + if hide_ticks: + ax.tick_params( + axis="both", + which="both", + bottom=False, + left=False, + labelbottom=False, + labelleft=False, + ) + + if margins is not None: + if isinstance(margins, Iterable): + ax.margins(*margins) + else: + ax.margins(margins) + + node_collection.set_zorder(2) + return node_collection + + +class FancyArrowFactory: + """Draw arrows with `matplotlib.patches.FancyarrowPatch`""" + + class ConnectionStyleFactory: + def __init__(self, connectionstyles, selfloop_height, ax=None): + import matplotlib as mpl + import matplotlib.path # call as mpl.path + import numpy as np + + self.ax = ax + self.mpl = mpl + self.np = np + self.base_connection_styles = [ + mpl.patches.ConnectionStyle(cs) for cs in connectionstyles + ] + self.n = len(self.base_connection_styles) + self.selfloop_height = selfloop_height + + def curved(self, edge_index): + return self.base_connection_styles[edge_index % self.n] + + def self_loop(self, edge_index): + def self_loop_connection(posA, posB, *args, **kwargs): + if not self.np.all(posA == posB): + raise nx.NetworkXError( + "`self_loop` connection style method" + "is only to be used for self-loops" + ) + # this is called with _screen space_ values + # so convert back to data space + data_loc = self.ax.transData.inverted().transform(posA) + v_shift = 0.1 * self.selfloop_height + h_shift = v_shift * 0.5 + # put the top of the loop first so arrow is not hidden by node + path = self.np.asarray( + [ + # 1 + [0, v_shift], + # 4 4 4 + [h_shift, v_shift], + [h_shift, 0], + [0, 0], + # 4 4 4 + [-h_shift, 0], + [-h_shift, v_shift], + [0, v_shift], + ] + ) + # Rotate self loop 90 deg. if more than 1 + # This will allow for maximum of 4 visible self loops + if edge_index % 4: + x, y = path.T + for _ in range(edge_index % 4): + x, y = y, -x + path = self.np.array([x, y]).T + return self.mpl.path.Path( + self.ax.transData.transform(data_loc + path), [1, 4, 4, 4, 4, 4, 4] + ) + + return self_loop_connection + + def __init__( + self, + edge_pos, + edgelist, + nodelist, + edge_indices, + node_size, + selfloop_height, + connectionstyle="arc3", + node_shape="o", + arrowstyle="-", + arrowsize=10, + edge_color="k", + alpha=None, + linewidth=1.0, + style="solid", + min_source_margin=0, + min_target_margin=0, + ax=None, + ): + import matplotlib as mpl + import matplotlib.patches # call as mpl.patches + import matplotlib.pyplot as plt + import numpy as np + + if isinstance(connectionstyle, str): + connectionstyle = [connectionstyle] + elif np.iterable(connectionstyle): + connectionstyle = list(connectionstyle) + else: + msg = "ConnectionStyleFactory arg `connectionstyle` must be str or iterable" + raise nx.NetworkXError(msg) + self.ax = ax + self.mpl = mpl + self.np = np + self.edge_pos = edge_pos + self.edgelist = edgelist + self.nodelist = nodelist + self.node_shape = node_shape + self.min_source_margin = min_source_margin + self.min_target_margin = min_target_margin + self.edge_indices = edge_indices + self.node_size = node_size + self.connectionstyle_factory = self.ConnectionStyleFactory( + connectionstyle, selfloop_height, ax + ) + self.arrowstyle = arrowstyle + self.arrowsize = arrowsize + self.arrow_colors = mpl.colors.colorConverter.to_rgba_array(edge_color, alpha) + self.linewidth = linewidth + self.style = style + if isinstance(arrowsize, list) and len(arrowsize) != len(edge_pos): + raise ValueError("arrowsize should have the same length as edgelist") + + def __call__(self, i): + (x1, y1), (x2, y2) = self.edge_pos[i] + shrink_source = 0 # space from source to tail + shrink_target = 0 # space from head to target + if ( + self.np.iterable(self.min_source_margin) + and not isinstance(self.min_source_margin, str) + and not isinstance(self.min_source_margin, tuple) + ): + min_source_margin = self.min_source_margin[i] + else: + min_source_margin = self.min_source_margin + + if ( + self.np.iterable(self.min_target_margin) + and not isinstance(self.min_target_margin, str) + and not isinstance(self.min_target_margin, tuple) + ): + min_target_margin = self.min_target_margin[i] + else: + min_target_margin = self.min_target_margin + + if self.np.iterable(self.node_size): # many node sizes + source, target = self.edgelist[i][:2] + source_node_size = self.node_size[self.nodelist.index(source)] + target_node_size = self.node_size[self.nodelist.index(target)] + shrink_source = self.to_marker_edge(source_node_size, self.node_shape) + shrink_target = self.to_marker_edge(target_node_size, self.node_shape) + else: + shrink_source = self.to_marker_edge(self.node_size, self.node_shape) + shrink_target = shrink_source + shrink_source = max(shrink_source, min_source_margin) + shrink_target = max(shrink_target, min_target_margin) + + # scale factor of arrow head + if isinstance(self.arrowsize, list): + mutation_scale = self.arrowsize[i] + else: + mutation_scale = self.arrowsize + + if len(self.arrow_colors) > i: + arrow_color = self.arrow_colors[i] + elif len(self.arrow_colors) == 1: + arrow_color = self.arrow_colors[0] + else: # Cycle through colors + arrow_color = self.arrow_colors[i % len(self.arrow_colors)] + + if self.np.iterable(self.linewidth): + if len(self.linewidth) > i: + linewidth = self.linewidth[i] + else: + linewidth = self.linewidth[i % len(self.linewidth)] + else: + linewidth = self.linewidth + + if ( + self.np.iterable(self.style) + and not isinstance(self.style, str) + and not isinstance(self.style, tuple) + ): + if len(self.style) > i: + linestyle = self.style[i] + else: # Cycle through styles + linestyle = self.style[i % len(self.style)] + else: + linestyle = self.style + + if x1 == x2 and y1 == y2: + connectionstyle = self.connectionstyle_factory.self_loop( + self.edge_indices[i] + ) + else: + connectionstyle = self.connectionstyle_factory.curved(self.edge_indices[i]) + + if ( + self.np.iterable(self.arrowstyle) + and not isinstance(self.arrowstyle, str) + and not isinstance(self.arrowstyle, tuple) + ): + arrowstyle = self.arrowstyle[i] + else: + arrowstyle = self.arrowstyle + + return self.mpl.patches.FancyArrowPatch( + (x1, y1), + (x2, y2), + arrowstyle=arrowstyle, + shrinkA=shrink_source, + shrinkB=shrink_target, + mutation_scale=mutation_scale, + color=arrow_color, + linewidth=linewidth, + connectionstyle=connectionstyle, + linestyle=linestyle, + zorder=1, # arrows go behind nodes + ) + + def to_marker_edge(self, marker_size, marker): + if marker in "s^>v', + For undirected graphs default to '-'. + + See `matplotlib.patches.ArrowStyle` for more options. + + arrowsize : int or list of ints(default=10) + For directed graphs, choose the size of the arrow head's length and + width. See `matplotlib.patches.FancyArrowPatch` for attribute + `mutation_scale` for more info. + + connectionstyle : string or iterable of strings (default="arc3") + Pass the connectionstyle parameter to create curved arc of rounding + radius rad. For example, connectionstyle='arc3,rad=0.2'. + See `matplotlib.patches.ConnectionStyle` and + `matplotlib.patches.FancyArrowPatch` for more info. + If Iterable, index indicates i'th edge key of MultiGraph + + node_size : scalar or array (default=300) + Size of nodes. Though the nodes are not drawn with this function, the + node size is used in determining edge positioning. + + nodelist : list, optional (default=G.nodes()) + This provides the node order for the `node_size` array (if it is an array). + + node_shape : string (default='o') + The marker used for nodes, used in determining edge positioning. + Specification is as a `matplotlib.markers` marker, e.g. one of 'so^>v>> G = nx.dodecahedral_graph() + >>> edges = nx.draw_networkx_edges(G, pos=nx.spring_layout(G)) + + >>> G = nx.DiGraph() + >>> G.add_edges_from([(1, 2), (1, 3), (2, 3)]) + >>> arcs = nx.draw_networkx_edges(G, pos=nx.spring_layout(G)) + >>> alphas = [0.3, 0.4, 0.5] + >>> for i, arc in enumerate(arcs): # change alpha values of arcs + ... arc.set_alpha(alphas[i]) + + The FancyArrowPatches corresponding to self-loops are not always + returned, but can always be accessed via the ``patches`` attribute of the + `matplotlib.Axes` object. + + >>> import matplotlib.pyplot as plt + >>> fig, ax = plt.subplots() + >>> G = nx.Graph([(0, 1), (0, 0)]) # Self-loop at node 0 + >>> edge_collection = nx.draw_networkx_edges(G, pos=nx.circular_layout(G), ax=ax) + >>> self_loop_fap = ax.patches[0] + + Also see the NetworkX drawing examples at + https://networkx.org/documentation/latest/auto_examples/index.html + + See Also + -------- + draw + draw_networkx + draw_networkx_nodes + draw_networkx_labels + draw_networkx_edge_labels + + """ + import warnings + + import matplotlib as mpl + import matplotlib.collections # call as mpl.collections + import matplotlib.colors # call as mpl.colors + import matplotlib.pyplot as plt + import numpy as np + + # The default behavior is to use LineCollection to draw edges for + # undirected graphs (for performance reasons) and use FancyArrowPatches + # for directed graphs. + # The `arrows` keyword can be used to override the default behavior + if arrows is None: + use_linecollection = not (G.is_directed() or G.is_multigraph()) + else: + if not isinstance(arrows, bool): + raise TypeError("Argument `arrows` must be of type bool or None") + use_linecollection = not arrows + + if isinstance(connectionstyle, str): + connectionstyle = [connectionstyle] + elif np.iterable(connectionstyle): + connectionstyle = list(connectionstyle) + else: + msg = "draw_networkx_edges arg `connectionstyle` must be str or iterable" + raise nx.NetworkXError(msg) + + # Some kwargs only apply to FancyArrowPatches. Warn users when they use + # non-default values for these kwargs when LineCollection is being used + # instead of silently ignoring the specified option + if use_linecollection: + msg = ( + "\n\nThe {0} keyword argument is not applicable when drawing edges\n" + "with LineCollection.\n\n" + "To make this warning go away, either specify `arrows=True` to\n" + "force FancyArrowPatches or use the default values.\n" + "Note that using FancyArrowPatches may be slow for large graphs.\n" + ) + if arrowstyle is not None: + warnings.warn(msg.format("arrowstyle"), category=UserWarning, stacklevel=2) + if arrowsize != 10: + warnings.warn(msg.format("arrowsize"), category=UserWarning, stacklevel=2) + if min_source_margin != 0: + warnings.warn( + msg.format("min_source_margin"), category=UserWarning, stacklevel=2 + ) + if min_target_margin != 0: + warnings.warn( + msg.format("min_target_margin"), category=UserWarning, stacklevel=2 + ) + if any(cs != "arc3" for cs in connectionstyle): + warnings.warn( + msg.format("connectionstyle"), category=UserWarning, stacklevel=2 + ) + + # NOTE: Arrowstyle modification must occur after the warnings section + if arrowstyle is None: + arrowstyle = "-|>" if G.is_directed() else "-" + + if ax is None: + ax = plt.gca() + + if edgelist is None: + edgelist = list(G.edges) # (u, v, k) for multigraph (u, v) otherwise + + if len(edgelist): + if G.is_multigraph(): + key_count = collections.defaultdict(lambda: itertools.count(0)) + edge_indices = [next(key_count[tuple(e[:2])]) for e in edgelist] + else: + edge_indices = [0] * len(edgelist) + else: # no edges! + return [] + + if nodelist is None: + nodelist = list(G.nodes()) + + # FancyArrowPatch handles color=None different from LineCollection + if edge_color is None: + edge_color = "k" + + # set edge positions + edge_pos = np.asarray([(pos[e[0]], pos[e[1]]) for e in edgelist]) + + # Check if edge_color is an array of floats and map to edge_cmap. + # This is the only case handled differently from matplotlib + if ( + np.iterable(edge_color) + and (len(edge_color) == len(edge_pos)) + and np.all([isinstance(c, Number) for c in edge_color]) + ): + if edge_cmap is not None: + assert isinstance(edge_cmap, mpl.colors.Colormap) + else: + edge_cmap = plt.get_cmap() + if edge_vmin is None: + edge_vmin = min(edge_color) + if edge_vmax is None: + edge_vmax = max(edge_color) + color_normal = mpl.colors.Normalize(vmin=edge_vmin, vmax=edge_vmax) + edge_color = [edge_cmap(color_normal(e)) for e in edge_color] + + # compute initial view + minx = np.amin(np.ravel(edge_pos[:, :, 0])) + maxx = np.amax(np.ravel(edge_pos[:, :, 0])) + miny = np.amin(np.ravel(edge_pos[:, :, 1])) + maxy = np.amax(np.ravel(edge_pos[:, :, 1])) + w = maxx - minx + h = maxy - miny + + # Self-loops are scaled by view extent, except in cases the extent + # is 0, e.g. for a single node. In this case, fall back to scaling + # by the maximum node size + selfloop_height = h if h != 0 else 0.005 * np.array(node_size).max() + fancy_arrow_factory = FancyArrowFactory( + edge_pos, + edgelist, + nodelist, + edge_indices, + node_size, + selfloop_height, + connectionstyle, + node_shape, + arrowstyle, + arrowsize, + edge_color, + alpha, + width, + style, + min_source_margin, + min_target_margin, + ax=ax, + ) + + # Draw the edges + if use_linecollection: + edge_collection = mpl.collections.LineCollection( + edge_pos, + colors=edge_color, + linewidths=width, + antialiaseds=(1,), + linestyle=style, + alpha=alpha, + ) + edge_collection.set_cmap(edge_cmap) + edge_collection.set_clim(edge_vmin, edge_vmax) + edge_collection.set_zorder(1) # edges go behind nodes + edge_collection.set_label(label) + ax.add_collection(edge_collection) + edge_viz_obj = edge_collection + + # Make sure selfloop edges are also drawn + # --------------------------------------- + selfloops_to_draw = [loop for loop in nx.selfloop_edges(G) if loop in edgelist] + if selfloops_to_draw: + edgelist_tuple = list(map(tuple, edgelist)) + arrow_collection = [] + for loop in selfloops_to_draw: + i = edgelist_tuple.index(loop) + arrow = fancy_arrow_factory(i) + arrow_collection.append(arrow) + ax.add_patch(arrow) + else: + edge_viz_obj = [] + for i in range(len(edgelist)): + arrow = fancy_arrow_factory(i) + ax.add_patch(arrow) + edge_viz_obj.append(arrow) + + # update view after drawing + padx, pady = 0.05 * w, 0.05 * h + corners = (minx - padx, miny - pady), (maxx + padx, maxy + pady) + ax.update_datalim(corners) + ax.autoscale_view() + + if hide_ticks: + ax.tick_params( + axis="both", + which="both", + bottom=False, + left=False, + labelbottom=False, + labelleft=False, + ) + + return edge_viz_obj + + +def draw_networkx_labels( + G, + pos, + labels=None, + font_size=12, + font_color="k", + font_family="sans-serif", + font_weight="normal", + alpha=None, + bbox=None, + horizontalalignment="center", + verticalalignment="center", + ax=None, + clip_on=True, + hide_ticks=True, +): + """Draw node labels on the graph G. + + Parameters + ---------- + G : graph + A networkx graph + + pos : dictionary + A dictionary with nodes as keys and positions as values. + Positions should be sequences of length 2. + + labels : dictionary (default={n: n for n in G}) + Node labels in a dictionary of text labels keyed by node. + Node-keys in labels should appear as keys in `pos`. + If needed use: `{n:lab for n,lab in labels.items() if n in pos}` + + font_size : int or dictionary of nodes to ints (default=12) + Font size for text labels. + + font_color : color or dictionary of nodes to colors (default='k' black) + Font color string. Color can be string or rgb (or rgba) tuple of + floats from 0-1. + + font_weight : string or dictionary of nodes to strings (default='normal') + Font weight. + + font_family : string or dictionary of nodes to strings (default='sans-serif') + Font family. + + alpha : float or None or dictionary of nodes to floats (default=None) + The text transparency. + + bbox : Matplotlib bbox, (default is Matplotlib's ax.text default) + Specify text box properties (e.g. shape, color etc.) for node labels. + + horizontalalignment : string or array of strings (default='center') + Horizontal alignment {'center', 'right', 'left'}. If an array is + specified it must be the same length as `nodelist`. + + verticalalignment : string (default='center') + Vertical alignment {'center', 'top', 'bottom', 'baseline', 'center_baseline'}. + If an array is specified it must be the same length as `nodelist`. + + ax : Matplotlib Axes object, optional + Draw the graph in the specified Matplotlib axes. + + clip_on : bool (default=True) + Turn on clipping of node labels at axis boundaries + + hide_ticks : bool, optional + Hide ticks of axes. When `True` (the default), ticks and ticklabels + are removed from the axes. To set ticks and tick labels to the pyplot default, + use ``hide_ticks=False``. + + Returns + ------- + dict + `dict` of labels keyed on the nodes + + Examples + -------- + >>> G = nx.dodecahedral_graph() + >>> labels = nx.draw_networkx_labels(G, pos=nx.spring_layout(G)) + + Also see the NetworkX drawing examples at + https://networkx.org/documentation/latest/auto_examples/index.html + + See Also + -------- + draw + draw_networkx + draw_networkx_nodes + draw_networkx_edges + draw_networkx_edge_labels + """ + import matplotlib.pyplot as plt + + if ax is None: + ax = plt.gca() + + if labels is None: + labels = {n: n for n in G.nodes()} + + individual_params = set() + + def check_individual_params(p_value, p_name): + if isinstance(p_value, dict): + if len(p_value) != len(labels): + raise ValueError(f"{p_name} must have the same length as labels.") + individual_params.add(p_name) + + def get_param_value(node, p_value, p_name): + if p_name in individual_params: + return p_value[node] + return p_value + + check_individual_params(font_size, "font_size") + check_individual_params(font_color, "font_color") + check_individual_params(font_weight, "font_weight") + check_individual_params(font_family, "font_family") + check_individual_params(alpha, "alpha") + + text_items = {} # there is no text collection so we'll fake one + for n, label in labels.items(): + (x, y) = pos[n] + if not isinstance(label, str): + label = str(label) # this makes "1" and 1 labeled the same + t = ax.text( + x, + y, + label, + size=get_param_value(n, font_size, "font_size"), + color=get_param_value(n, font_color, "font_color"), + family=get_param_value(n, font_family, "font_family"), + weight=get_param_value(n, font_weight, "font_weight"), + alpha=get_param_value(n, alpha, "alpha"), + horizontalalignment=horizontalalignment, + verticalalignment=verticalalignment, + transform=ax.transData, + bbox=bbox, + clip_on=clip_on, + ) + text_items[n] = t + + if hide_ticks: + ax.tick_params( + axis="both", + which="both", + bottom=False, + left=False, + labelbottom=False, + labelleft=False, + ) + + return text_items + + +def draw_networkx_edge_labels( + G, + pos, + edge_labels=None, + label_pos=0.5, + font_size=10, + font_color="k", + font_family="sans-serif", + font_weight="normal", + alpha=None, + bbox=None, + horizontalalignment="center", + verticalalignment="center", + ax=None, + rotate=True, + clip_on=True, + node_size=300, + nodelist=None, + connectionstyle="arc3", + hide_ticks=True, +): + """Draw edge labels. + + Parameters + ---------- + G : graph + A networkx graph + + pos : dictionary + A dictionary with nodes as keys and positions as values. + Positions should be sequences of length 2. + + edge_labels : dictionary (default=None) + Edge labels in a dictionary of labels keyed by edge two-tuple. + Only labels for the keys in the dictionary are drawn. + + label_pos : float (default=0.5) + Position of edge label along edge (0=head, 0.5=center, 1=tail) + + font_size : int (default=10) + Font size for text labels + + font_color : color (default='k' black) + Font color string. Color can be string or rgb (or rgba) tuple of + floats from 0-1. + + font_weight : string (default='normal') + Font weight + + font_family : string (default='sans-serif') + Font family + + alpha : float or None (default=None) + The text transparency + + bbox : Matplotlib bbox, optional + Specify text box properties (e.g. shape, color etc.) for edge labels. + Default is {boxstyle='round', ec=(1.0, 1.0, 1.0), fc=(1.0, 1.0, 1.0)}. + + horizontalalignment : string (default='center') + Horizontal alignment {'center', 'right', 'left'} + + verticalalignment : string (default='center') + Vertical alignment {'center', 'top', 'bottom', 'baseline', 'center_baseline'} + + ax : Matplotlib Axes object, optional + Draw the graph in the specified Matplotlib axes. + + rotate : bool (default=True) + Rotate edge labels to lie parallel to edges + + clip_on : bool (default=True) + Turn on clipping of edge labels at axis boundaries + + node_size : scalar or array (default=300) + Size of nodes. If an array it must be the same length as nodelist. + + nodelist : list, optional (default=G.nodes()) + This provides the node order for the `node_size` array (if it is an array). + + connectionstyle : string or iterable of strings (default="arc3") + Pass the connectionstyle parameter to create curved arc of rounding + radius rad. For example, connectionstyle='arc3,rad=0.2'. + See `matplotlib.patches.ConnectionStyle` and + `matplotlib.patches.FancyArrowPatch` for more info. + If Iterable, index indicates i'th edge key of MultiGraph + + hide_ticks : bool, optional + Hide ticks of axes. When `True` (the default), ticks and ticklabels + are removed from the axes. To set ticks and tick labels to the pyplot default, + use ``hide_ticks=False``. + + Returns + ------- + dict + `dict` of labels keyed by edge + + Examples + -------- + >>> G = nx.dodecahedral_graph() + >>> edge_labels = nx.draw_networkx_edge_labels(G, pos=nx.spring_layout(G)) + + Also see the NetworkX drawing examples at + https://networkx.org/documentation/latest/auto_examples/index.html + + See Also + -------- + draw + draw_networkx + draw_networkx_nodes + draw_networkx_edges + draw_networkx_labels + """ + import matplotlib as mpl + import matplotlib.pyplot as plt + import numpy as np + + class CurvedArrowText(mpl.text.Text): + def __init__( + self, + arrow, + *args, + label_pos=0.5, + labels_horizontal=False, + ax=None, + **kwargs, + ): + # Bind to FancyArrowPatch + self.arrow = arrow + # how far along the text should be on the curve, + # 0 is at start, 1 is at end etc. + self.label_pos = label_pos + self.labels_horizontal = labels_horizontal + if ax is None: + ax = plt.gca() + self.ax = ax + self.x, self.y, self.angle = self._update_text_pos_angle(arrow) + + # Create text object + super().__init__(self.x, self.y, *args, rotation=self.angle, **kwargs) + # Bind to axis + self.ax.add_artist(self) + + def _get_arrow_path_disp(self, arrow): + """ + This is part of FancyArrowPatch._get_path_in_displaycoord + It omits the second part of the method where path is converted + to polygon based on width + The transform is taken from ax, not the object, as the object + has not been added yet, and doesn't have transform + """ + dpi_cor = arrow._dpi_cor + trans_data = self.ax.transData + if arrow._posA_posB is None: + raise ValueError( + "Can only draw labels for fancy arrows with " + "posA and posB inputs, not custom path" + ) + posA = arrow._convert_xy_units(arrow._posA_posB[0]) + posB = arrow._convert_xy_units(arrow._posA_posB[1]) + (posA, posB) = trans_data.transform((posA, posB)) + _path = arrow.get_connectionstyle()( + posA, + posB, + patchA=arrow.patchA, + patchB=arrow.patchB, + shrinkA=arrow.shrinkA * dpi_cor, + shrinkB=arrow.shrinkB * dpi_cor, + ) + # Return is in display coordinates + return _path + + def _update_text_pos_angle(self, arrow): + # Fractional label position + # Text position at a proportion t along the line in display coords + # default is 0.5 so text appears at the halfway point + t = self.label_pos + tt = 1 - t + path_disp = self._get_arrow_path_disp(arrow) + is_bar_style = isinstance( + arrow.get_connectionstyle(), mpl.patches.ConnectionStyle.Bar + ) + # 1. Calculate x and y + if is_bar_style: + # Bar Connection Style - straight line + _, (cx1, cy1), (cx2, cy2), _ = path_disp.vertices + x = cx1 * tt + cx2 * t + y = cy1 * tt + cy2 * t + else: + # Arc or Angle type Connection Styles - Bezier curve + (x1, y1), (cx, cy), (x2, y2) = path_disp.vertices + x = tt**2 * x1 + 2 * t * tt * cx + t**2 * x2 + y = tt**2 * y1 + 2 * t * tt * cy + t**2 * y2 + # 2. Calculate Angle + if self.labels_horizontal: + # Horizontal text labels + angle = 0 + else: + # Labels parallel to curve + if is_bar_style: + change_x = (cx2 - cx1) / 2 + change_y = (cy2 - cy1) / 2 + else: + change_x = 2 * tt * (cx - x1) + 2 * t * (x2 - cx) + change_y = 2 * tt * (cy - y1) + 2 * t * (y2 - cy) + angle = np.arctan2(change_y, change_x) / (2 * np.pi) * 360 + # Text is "right way up" + if angle > 90: + angle -= 180 + elif angle < -90: + angle += 180 + (x, y) = self.ax.transData.inverted().transform((x, y)) + return x, y, angle + + def draw(self, renderer): + # recalculate the text position and angle + self.x, self.y, self.angle = self._update_text_pos_angle(self.arrow) + self.set_position((self.x, self.y)) + self.set_rotation(self.angle) + # redraw text + super().draw(renderer) + + # use default box of white with white border + if bbox is None: + bbox = {"boxstyle": "round", "ec": (1.0, 1.0, 1.0), "fc": (1.0, 1.0, 1.0)} + + if isinstance(connectionstyle, str): + connectionstyle = [connectionstyle] + elif np.iterable(connectionstyle): + connectionstyle = list(connectionstyle) + else: + raise nx.NetworkXError( + "draw_networkx_edges arg `connectionstyle` must be" + "string or iterable of strings" + ) + + if ax is None: + ax = plt.gca() + + if edge_labels is None: + kwds = {"keys": True} if G.is_multigraph() else {} + edge_labels = {tuple(edge): d for *edge, d in G.edges(data=True, **kwds)} + # NOTHING TO PLOT + if not edge_labels: + return {} + edgelist, labels = zip(*edge_labels.items()) + + if nodelist is None: + nodelist = list(G.nodes()) + + # set edge positions + edge_pos = np.asarray([(pos[e[0]], pos[e[1]]) for e in edgelist]) + + if G.is_multigraph(): + key_count = collections.defaultdict(lambda: itertools.count(0)) + edge_indices = [next(key_count[tuple(e[:2])]) for e in edgelist] + else: + edge_indices = [0] * len(edgelist) + + # Used to determine self loop mid-point + # Note, that this will not be accurate, + # if not drawing edge_labels for all edges drawn + h = 0 + if edge_labels: + miny = np.amin(np.ravel(edge_pos[:, :, 1])) + maxy = np.amax(np.ravel(edge_pos[:, :, 1])) + h = maxy - miny + selfloop_height = h if h != 0 else 0.005 * np.array(node_size).max() + fancy_arrow_factory = FancyArrowFactory( + edge_pos, + edgelist, + nodelist, + edge_indices, + node_size, + selfloop_height, + connectionstyle, + ax=ax, + ) + + individual_params = {} + + def check_individual_params(p_value, p_name): + # TODO should this be list or array (as in a numpy array)? + if isinstance(p_value, list): + if len(p_value) != len(edgelist): + raise ValueError(f"{p_name} must have the same length as edgelist.") + individual_params[p_name] = p_value.iter() + + # Don't need to pass in an edge because these are lists, not dicts + def get_param_value(p_value, p_name): + if p_name in individual_params: + return next(individual_params[p_name]) + return p_value + + check_individual_params(font_size, "font_size") + check_individual_params(font_color, "font_color") + check_individual_params(font_weight, "font_weight") + check_individual_params(alpha, "alpha") + check_individual_params(horizontalalignment, "horizontalalignment") + check_individual_params(verticalalignment, "verticalalignment") + check_individual_params(rotate, "rotate") + check_individual_params(label_pos, "label_pos") + + text_items = {} + for i, (edge, label) in enumerate(zip(edgelist, labels)): + if not isinstance(label, str): + label = str(label) # this makes "1" and 1 labeled the same + + n1, n2 = edge[:2] + arrow = fancy_arrow_factory(i) + if n1 == n2: + connectionstyle_obj = arrow.get_connectionstyle() + posA = ax.transData.transform(pos[n1]) + path_disp = connectionstyle_obj(posA, posA) + path_data = ax.transData.inverted().transform_path(path_disp) + x, y = path_data.vertices[0] + text_items[edge] = ax.text( + x, + y, + label, + size=get_param_value(font_size, "font_size"), + color=get_param_value(font_color, "font_color"), + family=get_param_value(font_family, "font_family"), + weight=get_param_value(font_weight, "font_weight"), + alpha=get_param_value(alpha, "alpha"), + horizontalalignment=get_param_value( + horizontalalignment, "horizontalalignment" + ), + verticalalignment=get_param_value( + verticalalignment, "verticalalignment" + ), + rotation=0, + transform=ax.transData, + bbox=bbox, + zorder=1, + clip_on=clip_on, + ) + else: + text_items[edge] = CurvedArrowText( + arrow, + label, + size=get_param_value(font_size, "font_size"), + color=get_param_value(font_color, "font_color"), + family=get_param_value(font_family, "font_family"), + weight=get_param_value(font_weight, "font_weight"), + alpha=get_param_value(alpha, "alpha"), + horizontalalignment=get_param_value( + horizontalalignment, "horizontalalignment" + ), + verticalalignment=get_param_value( + verticalalignment, "verticalalignment" + ), + transform=ax.transData, + bbox=bbox, + zorder=1, + clip_on=clip_on, + label_pos=get_param_value(label_pos, "label_pos"), + labels_horizontal=not get_param_value(rotate, "rotate"), + ax=ax, + ) + + if hide_ticks: + ax.tick_params( + axis="both", + which="both", + bottom=False, + left=False, + labelbottom=False, + labelleft=False, + ) + + return text_items + + +def draw_bipartite(G, **kwargs): + """Draw the graph `G` with a bipartite layout. + + This is a convenience function equivalent to:: + + nx.draw(G, pos=nx.bipartite_layout(G), **kwargs) + + Parameters + ---------- + G : graph + A networkx graph + + kwargs : optional keywords + See `draw_networkx` for a description of optional keywords. + + Raises + ------ + NetworkXError : + If `G` is not bipartite. + + Notes + ----- + The layout is computed each time this function is called. For + repeated drawing it is much more efficient to call + `~networkx.drawing.layout.bipartite_layout` directly and reuse the result:: + + >>> G = nx.complete_bipartite_graph(3, 3) + >>> pos = nx.bipartite_layout(G) + >>> nx.draw(G, pos=pos) # Draw the original graph + >>> # Draw a subgraph, reusing the same node positions + >>> nx.draw(G.subgraph([0, 1, 2]), pos=pos, node_color="red") + + Examples + -------- + >>> G = nx.complete_bipartite_graph(2, 5) + >>> nx.draw_bipartite(G) + + See Also + -------- + :func:`~networkx.drawing.layout.bipartite_layout` + """ + draw(G, pos=nx.bipartite_layout(G), **kwargs) + + +def draw_circular(G, **kwargs): + """Draw the graph `G` with a circular layout. + + This is a convenience function equivalent to:: + + nx.draw(G, pos=nx.circular_layout(G), **kwargs) + + Parameters + ---------- + G : graph + A networkx graph + + kwargs : optional keywords + See `draw_networkx` for a description of optional keywords. + + Notes + ----- + The layout is computed each time this function is called. For + repeated drawing it is much more efficient to call + `~networkx.drawing.layout.circular_layout` directly and reuse the result:: + + >>> G = nx.complete_graph(5) + >>> pos = nx.circular_layout(G) + >>> nx.draw(G, pos=pos) # Draw the original graph + >>> # Draw a subgraph, reusing the same node positions + >>> nx.draw(G.subgraph([0, 1, 2]), pos=pos, node_color="red") + + Examples + -------- + >>> G = nx.path_graph(5) + >>> nx.draw_circular(G) + + See Also + -------- + :func:`~networkx.drawing.layout.circular_layout` + """ + draw(G, pos=nx.circular_layout(G), **kwargs) + + +def draw_kamada_kawai(G, **kwargs): + """Draw the graph `G` with a Kamada-Kawai force-directed layout. + + This is a convenience function equivalent to:: + + nx.draw(G, pos=nx.kamada_kawai_layout(G), **kwargs) + + Parameters + ---------- + G : graph + A networkx graph + + kwargs : optional keywords + See `draw_networkx` for a description of optional keywords. + + Notes + ----- + The layout is computed each time this function is called. + For repeated drawing it is much more efficient to call + `~networkx.drawing.layout.kamada_kawai_layout` directly and reuse the + result:: + + >>> G = nx.complete_graph(5) + >>> pos = nx.kamada_kawai_layout(G) + >>> nx.draw(G, pos=pos) # Draw the original graph + >>> # Draw a subgraph, reusing the same node positions + >>> nx.draw(G.subgraph([0, 1, 2]), pos=pos, node_color="red") + + Examples + -------- + >>> G = nx.path_graph(5) + >>> nx.draw_kamada_kawai(G) + + See Also + -------- + :func:`~networkx.drawing.layout.kamada_kawai_layout` + """ + draw(G, pos=nx.kamada_kawai_layout(G), **kwargs) + + +def draw_random(G, **kwargs): + """Draw the graph `G` with a random layout. + + This is a convenience function equivalent to:: + + nx.draw(G, pos=nx.random_layout(G), **kwargs) + + Parameters + ---------- + G : graph + A networkx graph + + kwargs : optional keywords + See `draw_networkx` for a description of optional keywords. + + Notes + ----- + The layout is computed each time this function is called. + For repeated drawing it is much more efficient to call + `~networkx.drawing.layout.random_layout` directly and reuse the result:: + + >>> G = nx.complete_graph(5) + >>> pos = nx.random_layout(G) + >>> nx.draw(G, pos=pos) # Draw the original graph + >>> # Draw a subgraph, reusing the same node positions + >>> nx.draw(G.subgraph([0, 1, 2]), pos=pos, node_color="red") + + Examples + -------- + >>> G = nx.lollipop_graph(4, 3) + >>> nx.draw_random(G) + + See Also + -------- + :func:`~networkx.drawing.layout.random_layout` + """ + draw(G, pos=nx.random_layout(G), **kwargs) + + +def draw_spectral(G, **kwargs): + """Draw the graph `G` with a spectral 2D layout. + + This is a convenience function equivalent to:: + + nx.draw(G, pos=nx.spectral_layout(G), **kwargs) + + For more information about how node positions are determined, see + `~networkx.drawing.layout.spectral_layout`. + + Parameters + ---------- + G : graph + A networkx graph + + kwargs : optional keywords + See `draw_networkx` for a description of optional keywords. + + Notes + ----- + The layout is computed each time this function is called. + For repeated drawing it is much more efficient to call + `~networkx.drawing.layout.spectral_layout` directly and reuse the result:: + + >>> G = nx.complete_graph(5) + >>> pos = nx.spectral_layout(G) + >>> nx.draw(G, pos=pos) # Draw the original graph + >>> # Draw a subgraph, reusing the same node positions + >>> nx.draw(G.subgraph([0, 1, 2]), pos=pos, node_color="red") + + Examples + -------- + >>> G = nx.path_graph(5) + >>> nx.draw_spectral(G) + + See Also + -------- + :func:`~networkx.drawing.layout.spectral_layout` + """ + draw(G, pos=nx.spectral_layout(G), **kwargs) + + +def draw_spring(G, **kwargs): + """Draw the graph `G` with a spring layout. + + This is a convenience function equivalent to:: + + nx.draw(G, pos=nx.spring_layout(G), **kwargs) + + Parameters + ---------- + G : graph + A networkx graph + + kwargs : optional keywords + See `draw_networkx` for a description of optional keywords. + + Notes + ----- + `~networkx.drawing.layout.spring_layout` is also the default layout for + `draw`, so this function is equivalent to `draw`. + + The layout is computed each time this function is called. + For repeated drawing it is much more efficient to call + `~networkx.drawing.layout.spring_layout` directly and reuse the result:: + + >>> G = nx.complete_graph(5) + >>> pos = nx.spring_layout(G) + >>> nx.draw(G, pos=pos) # Draw the original graph + >>> # Draw a subgraph, reusing the same node positions + >>> nx.draw(G.subgraph([0, 1, 2]), pos=pos, node_color="red") + + Examples + -------- + >>> G = nx.path_graph(20) + >>> nx.draw_spring(G) + + See Also + -------- + draw + :func:`~networkx.drawing.layout.spring_layout` + """ + draw(G, pos=nx.spring_layout(G), **kwargs) + + +def draw_shell(G, nlist=None, **kwargs): + """Draw networkx graph `G` with shell layout. + + This is a convenience function equivalent to:: + + nx.draw(G, pos=nx.shell_layout(G, nlist=nlist), **kwargs) + + Parameters + ---------- + G : graph + A networkx graph + + nlist : list of list of nodes, optional + A list containing lists of nodes representing the shells. + Default is `None`, meaning all nodes are in a single shell. + See `~networkx.drawing.layout.shell_layout` for details. + + kwargs : optional keywords + See `draw_networkx` for a description of optional keywords. + + Notes + ----- + The layout is computed each time this function is called. + For repeated drawing it is much more efficient to call + `~networkx.drawing.layout.shell_layout` directly and reuse the result:: + + >>> G = nx.complete_graph(5) + >>> pos = nx.shell_layout(G) + >>> nx.draw(G, pos=pos) # Draw the original graph + >>> # Draw a subgraph, reusing the same node positions + >>> nx.draw(G.subgraph([0, 1, 2]), pos=pos, node_color="red") + + Examples + -------- + >>> G = nx.path_graph(4) + >>> shells = [[0], [1, 2, 3]] + >>> nx.draw_shell(G, nlist=shells) + + See Also + -------- + :func:`~networkx.drawing.layout.shell_layout` + """ + draw(G, pos=nx.shell_layout(G, nlist=nlist), **kwargs) + + +def draw_planar(G, **kwargs): + """Draw a planar networkx graph `G` with planar layout. + + This is a convenience function equivalent to:: + + nx.draw(G, pos=nx.planar_layout(G), **kwargs) + + Parameters + ---------- + G : graph + A planar networkx graph + + kwargs : optional keywords + See `draw_networkx` for a description of optional keywords. + + Raises + ------ + NetworkXException + When `G` is not planar + + Notes + ----- + The layout is computed each time this function is called. + For repeated drawing it is much more efficient to call + `~networkx.drawing.layout.planar_layout` directly and reuse the result:: + + >>> G = nx.path_graph(5) + >>> pos = nx.planar_layout(G) + >>> nx.draw(G, pos=pos) # Draw the original graph + >>> # Draw a subgraph, reusing the same node positions + >>> nx.draw(G.subgraph([0, 1, 2]), pos=pos, node_color="red") + + Examples + -------- + >>> G = nx.path_graph(4) + >>> nx.draw_planar(G) + + See Also + -------- + :func:`~networkx.drawing.layout.planar_layout` + """ + draw(G, pos=nx.planar_layout(G), **kwargs) + + +def draw_forceatlas2(G, **kwargs): + """Draw a networkx graph with forceatlas2 layout. + + This is a convenience function equivalent to:: + + nx.draw(G, pos=nx.forceatlas2_layout(G), **kwargs) + + Parameters + ---------- + G : graph + A networkx graph + + kwargs : optional keywords + See networkx.draw_networkx() for a description of optional keywords, + with the exception of the pos parameter which is not used by this + function. + """ + draw(G, pos=nx.forceatlas2_layout(G), **kwargs) + + +def apply_alpha(colors, alpha, elem_list, cmap=None, vmin=None, vmax=None): + """Apply an alpha (or list of alphas) to the colors provided. + + Parameters + ---------- + + colors : color string or array of floats (default='r') + Color of element. Can be a single color format string, + or a sequence of colors with the same length as nodelist. + If numeric values are specified they will be mapped to + colors using the cmap and vmin,vmax parameters. See + matplotlib.scatter for more details. + + alpha : float or array of floats + Alpha values for elements. This can be a single alpha value, in + which case it will be applied to all the elements of color. Otherwise, + if it is an array, the elements of alpha will be applied to the colors + in order (cycling through alpha multiple times if necessary). + + elem_list : array of networkx objects + The list of elements which are being colored. These could be nodes, + edges or labels. + + cmap : matplotlib colormap + Color map for use if colors is a list of floats corresponding to points + on a color mapping. + + vmin, vmax : float + Minimum and maximum values for normalizing colors if a colormap is used + + Returns + ------- + + rgba_colors : numpy ndarray + Array containing RGBA format values for each of the node colours. + + """ + from itertools import cycle, islice + + import matplotlib as mpl + import matplotlib.cm # call as mpl.cm + import matplotlib.colors # call as mpl.colors + import numpy as np + + # If we have been provided with a list of numbers as long as elem_list, + # apply the color mapping. + if len(colors) == len(elem_list) and isinstance(colors[0], Number): + mapper = mpl.cm.ScalarMappable(cmap=cmap) + mapper.set_clim(vmin, vmax) + rgba_colors = mapper.to_rgba(colors) + # Otherwise, convert colors to matplotlib's RGB using the colorConverter + # object. These are converted to numpy ndarrays to be consistent with the + # to_rgba method of ScalarMappable. + else: + try: + rgba_colors = np.array([mpl.colors.colorConverter.to_rgba(colors)]) + except ValueError: + rgba_colors = np.array( + [mpl.colors.colorConverter.to_rgba(color) for color in colors] + ) + # Set the final column of the rgba_colors to have the relevant alpha values + try: + # If alpha is longer than the number of colors, resize to the number of + # elements. Also, if rgba_colors.size (the number of elements of + # rgba_colors) is the same as the number of elements, resize the array, + # to avoid it being interpreted as a colormap by scatter() + if len(alpha) > len(rgba_colors) or rgba_colors.size == len(elem_list): + rgba_colors = np.resize(rgba_colors, (len(elem_list), 4)) + rgba_colors[1:, 0] = rgba_colors[0, 0] + rgba_colors[1:, 1] = rgba_colors[0, 1] + rgba_colors[1:, 2] = rgba_colors[0, 2] + rgba_colors[:, 3] = list(islice(cycle(alpha), len(rgba_colors))) + except TypeError: + rgba_colors[:, -1] = alpha + return rgba_colors diff --git a/.venv/lib/python3.12/site-packages/networkx/drawing/tests/__init__.py b/.venv/lib/python3.12/site-packages/networkx/drawing/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/.venv/lib/python3.12/site-packages/networkx/drawing/tests/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/drawing/tests/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..2f6da87 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/drawing/tests/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/drawing/tests/__pycache__/test_agraph.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/drawing/tests/__pycache__/test_agraph.cpython-312.pyc new file mode 100644 index 0000000..67dfbd2 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/drawing/tests/__pycache__/test_agraph.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/drawing/tests/__pycache__/test_latex.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/drawing/tests/__pycache__/test_latex.cpython-312.pyc new file mode 100644 index 0000000..2145144 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/drawing/tests/__pycache__/test_latex.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/drawing/tests/__pycache__/test_layout.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/drawing/tests/__pycache__/test_layout.cpython-312.pyc new file mode 100644 index 0000000..fc6b408 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/drawing/tests/__pycache__/test_layout.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/drawing/tests/__pycache__/test_pydot.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/drawing/tests/__pycache__/test_pydot.cpython-312.pyc new file mode 100644 index 0000000..79a2816 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/drawing/tests/__pycache__/test_pydot.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/drawing/tests/__pycache__/test_pylab.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/drawing/tests/__pycache__/test_pylab.cpython-312.pyc new file mode 100644 index 0000000..515a191 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/drawing/tests/__pycache__/test_pylab.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/drawing/tests/baseline/test_display_complex.png b/.venv/lib/python3.12/site-packages/networkx/drawing/tests/baseline/test_display_complex.png new file mode 100644 index 0000000..e4b34e8 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/drawing/tests/baseline/test_display_complex.png differ diff --git a/.venv/lib/python3.12/site-packages/networkx/drawing/tests/baseline/test_display_empty_graph.png b/.venv/lib/python3.12/site-packages/networkx/drawing/tests/baseline/test_display_empty_graph.png new file mode 100644 index 0000000..5d2f4e7 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/drawing/tests/baseline/test_display_empty_graph.png differ diff --git a/.venv/lib/python3.12/site-packages/networkx/drawing/tests/baseline/test_display_house_with_colors.png b/.venv/lib/python3.12/site-packages/networkx/drawing/tests/baseline/test_display_house_with_colors.png new file mode 100644 index 0000000..08862d7 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/drawing/tests/baseline/test_display_house_with_colors.png differ diff --git a/.venv/lib/python3.12/site-packages/networkx/drawing/tests/baseline/test_display_labels_and_colors.png b/.venv/lib/python3.12/site-packages/networkx/drawing/tests/baseline/test_display_labels_and_colors.png new file mode 100644 index 0000000..f82737d Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/drawing/tests/baseline/test_display_labels_and_colors.png differ diff --git a/.venv/lib/python3.12/site-packages/networkx/drawing/tests/baseline/test_display_shortest_path.png b/.venv/lib/python3.12/site-packages/networkx/drawing/tests/baseline/test_display_shortest_path.png new file mode 100644 index 0000000..87394a1 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/drawing/tests/baseline/test_display_shortest_path.png differ diff --git a/.venv/lib/python3.12/site-packages/networkx/drawing/tests/baseline/test_house_with_colors.png b/.venv/lib/python3.12/site-packages/networkx/drawing/tests/baseline/test_house_with_colors.png new file mode 100644 index 0000000..31f4962 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/drawing/tests/baseline/test_house_with_colors.png differ diff --git a/.venv/lib/python3.12/site-packages/networkx/drawing/tests/test_agraph.py b/.venv/lib/python3.12/site-packages/networkx/drawing/tests/test_agraph.py new file mode 100644 index 0000000..f580ad9 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/drawing/tests/test_agraph.py @@ -0,0 +1,240 @@ +"""Unit tests for PyGraphviz interface.""" + +import warnings + +import pytest + +import networkx as nx +from networkx.utils import edges_equal, graphs_equal, nodes_equal + +pygraphviz = pytest.importorskip("pygraphviz") + + +class TestAGraph: + def build_graph(self, G): + edges = [("A", "B"), ("A", "C"), ("A", "C"), ("B", "C"), ("A", "D")] + G.add_edges_from(edges) + G.add_node("E") + G.graph["metal"] = "bronze" + return G + + def assert_equal(self, G1, G2): + assert nodes_equal(G1.nodes(), G2.nodes()) + assert edges_equal(G1.edges(), G2.edges()) + assert G1.graph["metal"] == G2.graph["metal"] + + @pytest.mark.parametrize( + "G", (nx.Graph(), nx.DiGraph(), nx.MultiGraph(), nx.MultiDiGraph()) + ) + def test_agraph_roundtripping(self, G, tmp_path): + G = self.build_graph(G) + A = nx.nx_agraph.to_agraph(G) + H = nx.nx_agraph.from_agraph(A) + self.assert_equal(G, H) + + fname = tmp_path / "test.dot" + nx.drawing.nx_agraph.write_dot(H, fname) + Hin = nx.nx_agraph.read_dot(fname) + self.assert_equal(H, Hin) + + fname = tmp_path / "fh_test.dot" + with open(fname, "w") as fh: + nx.drawing.nx_agraph.write_dot(H, fh) + + with open(fname) as fh: + Hin = nx.nx_agraph.read_dot(fh) + self.assert_equal(H, Hin) + + def test_from_agraph_name(self): + G = nx.Graph(name="test") + A = nx.nx_agraph.to_agraph(G) + H = nx.nx_agraph.from_agraph(A) + assert G.name == "test" + + @pytest.mark.parametrize( + "graph_class", (nx.Graph, nx.DiGraph, nx.MultiGraph, nx.MultiDiGraph) + ) + def test_from_agraph_create_using(self, graph_class): + G = nx.path_graph(3) + A = nx.nx_agraph.to_agraph(G) + H = nx.nx_agraph.from_agraph(A, create_using=graph_class) + assert isinstance(H, graph_class) + + def test_from_agraph_named_edges(self): + # Create an AGraph from an existing (non-multi) Graph + G = nx.Graph() + G.add_nodes_from([0, 1]) + A = nx.nx_agraph.to_agraph(G) + # Add edge (+ name, given by key) to the AGraph + A.add_edge(0, 1, key="foo") + # Verify a.name roundtrips out to 'key' in from_agraph + H = nx.nx_agraph.from_agraph(A) + assert isinstance(H, nx.Graph) + assert ("0", "1", {"key": "foo"}) in H.edges(data=True) + + def test_to_agraph_with_nodedata(self): + G = nx.Graph() + G.add_node(1, color="red") + A = nx.nx_agraph.to_agraph(G) + assert dict(A.nodes()[0].attr) == {"color": "red"} + + @pytest.mark.parametrize("graph_class", (nx.Graph, nx.MultiGraph)) + def test_to_agraph_with_edgedata(self, graph_class): + G = graph_class() + G.add_nodes_from([0, 1]) + G.add_edge(0, 1, color="yellow") + A = nx.nx_agraph.to_agraph(G) + assert dict(A.edges()[0].attr) == {"color": "yellow"} + + def test_view_pygraphviz_path(self, tmp_path): + G = nx.complete_graph(3) + input_path = str(tmp_path / "graph.png") + out_path, A = nx.nx_agraph.view_pygraphviz(G, path=input_path, show=False) + assert out_path == input_path + # Ensure file is not empty + with open(input_path, "rb") as fh: + data = fh.read() + assert len(data) > 0 + + def test_view_pygraphviz_file_suffix(self, tmp_path): + G = nx.complete_graph(3) + path, A = nx.nx_agraph.view_pygraphviz(G, suffix=1, show=False) + assert path[-6:] == "_1.png" + + def test_view_pygraphviz(self): + G = nx.Graph() # "An empty graph cannot be drawn." + pytest.raises(nx.NetworkXException, nx.nx_agraph.view_pygraphviz, G) + G = nx.barbell_graph(4, 6) + nx.nx_agraph.view_pygraphviz(G, show=False) + + def test_view_pygraphviz_edgelabel(self): + G = nx.Graph() + G.add_edge(1, 2, weight=7) + G.add_edge(2, 3, weight=8) + path, A = nx.nx_agraph.view_pygraphviz(G, edgelabel="weight", show=False) + for edge in A.edges(): + assert edge.attr["weight"] in ("7", "8") + + def test_view_pygraphviz_callable_edgelabel(self): + G = nx.complete_graph(3) + + def foo_label(data): + return "foo" + + path, A = nx.nx_agraph.view_pygraphviz(G, edgelabel=foo_label, show=False) + for edge in A.edges(): + assert edge.attr["label"] == "foo" + + def test_view_pygraphviz_multigraph_edgelabels(self): + G = nx.MultiGraph() + G.add_edge(0, 1, key=0, name="left_fork") + G.add_edge(0, 1, key=1, name="right_fork") + path, A = nx.nx_agraph.view_pygraphviz(G, edgelabel="name", show=False) + edges = A.edges() + assert len(edges) == 2 + for edge in edges: + assert edge.attr["label"].strip() in ("left_fork", "right_fork") + + def test_graph_with_reserved_keywords(self): + # test attribute/keyword clash case for #1582 + # node: n + # edges: u,v + G = nx.Graph() + G = self.build_graph(G) + G.nodes["E"]["n"] = "keyword" + G.edges[("A", "B")]["u"] = "keyword" + G.edges[("A", "B")]["v"] = "keyword" + A = nx.nx_agraph.to_agraph(G) + + def test_view_pygraphviz_no_added_attrs_to_input(self): + G = nx.complete_graph(2) + path, A = nx.nx_agraph.view_pygraphviz(G, show=False) + assert G.graph == {} + + @pytest.mark.xfail(reason="known bug in clean_attrs") + def test_view_pygraphviz_leaves_input_graph_unmodified(self): + G = nx.complete_graph(2) + # Add entries to graph dict that to_agraph handles specially + G.graph["node"] = {"width": "0.80"} + G.graph["edge"] = {"fontsize": "14"} + path, A = nx.nx_agraph.view_pygraphviz(G, show=False) + assert G.graph == {"node": {"width": "0.80"}, "edge": {"fontsize": "14"}} + + def test_graph_with_AGraph_attrs(self): + G = nx.complete_graph(2) + # Add entries to graph dict that to_agraph handles specially + G.graph["node"] = {"width": "0.80"} + G.graph["edge"] = {"fontsize": "14"} + path, A = nx.nx_agraph.view_pygraphviz(G, show=False) + # Ensure user-specified values are not lost + assert dict(A.node_attr)["width"] == "0.80" + assert dict(A.edge_attr)["fontsize"] == "14" + + def test_round_trip_empty_graph(self): + G = nx.Graph() + A = nx.nx_agraph.to_agraph(G) + H = nx.nx_agraph.from_agraph(A) + # assert graphs_equal(G, H) + AA = nx.nx_agraph.to_agraph(H) + HH = nx.nx_agraph.from_agraph(AA) + assert graphs_equal(H, HH) + G.graph["graph"] = {} + G.graph["node"] = {} + G.graph["edge"] = {} + assert graphs_equal(G, HH) + + @pytest.mark.xfail(reason="integer->string node conversion in round trip") + def test_round_trip_integer_nodes(self): + G = nx.complete_graph(3) + A = nx.nx_agraph.to_agraph(G) + H = nx.nx_agraph.from_agraph(A) + assert graphs_equal(G, H) + + def test_graphviz_alias(self): + G = self.build_graph(nx.Graph()) + pos_graphviz = nx.nx_agraph.graphviz_layout(G) + pos_pygraphviz = nx.nx_agraph.pygraphviz_layout(G) + assert pos_graphviz == pos_pygraphviz + + @pytest.mark.parametrize("root", range(5)) + def test_pygraphviz_layout_root(self, root): + # NOTE: test depends on layout prog being deterministic + G = nx.complete_graph(5) + A = nx.nx_agraph.to_agraph(G) + # Get layout with root arg is not None + pygv_layout = nx.nx_agraph.pygraphviz_layout(G, prog="circo", root=root) + # Equivalent layout directly on AGraph + A.layout(args=f"-Groot={root}", prog="circo") + # Parse AGraph layout + a1_pos = tuple(float(v) for v in dict(A.get_node("1").attr)["pos"].split(",")) + assert pygv_layout[1] == a1_pos + + def test_2d_layout(self): + G = nx.Graph() + G = self.build_graph(G) + G.graph["dimen"] = 2 + pos = nx.nx_agraph.pygraphviz_layout(G, prog="neato") + pos = list(pos.values()) + assert len(pos) == 5 + assert len(pos[0]) == 2 + + def test_3d_layout(self): + G = nx.Graph() + G = self.build_graph(G) + G.graph["dimen"] = 3 + pos = nx.nx_agraph.pygraphviz_layout(G, prog="neato") + pos = list(pos.values()) + assert len(pos) == 5 + assert len(pos[0]) == 3 + + def test_no_warnings_raised(self): + # Test that no warnings are raised when Networkx graph + # is converted to Pygraphviz graph and 'pos' + # attribute is given + G = nx.Graph() + G.add_node(0, pos=(0, 0)) + G.add_node(1, pos=(1, 1)) + A = nx.nx_agraph.to_agraph(G) + with warnings.catch_warnings(record=True) as record: + A.layout() + assert len(record) == 0 diff --git a/.venv/lib/python3.12/site-packages/networkx/drawing/tests/test_latex.py b/.venv/lib/python3.12/site-packages/networkx/drawing/tests/test_latex.py new file mode 100644 index 0000000..6ec9b07 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/drawing/tests/test_latex.py @@ -0,0 +1,285 @@ +import pytest + +import networkx as nx + + +def test_tikz_attributes(): + G = nx.path_graph(4, create_using=nx.DiGraph) + pos = {n: (n, n) for n in G} + + G.add_edge(0, 0) + G.edges[(0, 0)]["label"] = "Loop" + G.edges[(0, 0)]["label_options"] = "midway" + + G.nodes[0]["style"] = "blue" + G.nodes[1]["style"] = "line width=3,draw" + G.nodes[2]["style"] = "circle,draw,blue!50" + G.nodes[3]["label"] = "Stop" + G.edges[(0, 1)]["label"] = "1st Step" + G.edges[(0, 1)]["label_options"] = "near end" + G.edges[(2, 3)]["label"] = "3rd Step" + G.edges[(2, 3)]["label_options"] = "near start" + G.edges[(2, 3)]["style"] = "bend left,green" + G.edges[(1, 2)]["label"] = "2nd" + G.edges[(1, 2)]["label_options"] = "pos=0.5" + G.edges[(1, 2)]["style"] = ">->,bend right,line width=3,green!90" + + output_tex = nx.to_latex( + G, + pos=pos, + as_document=False, + tikz_options="[scale=3]", + node_options="style", + edge_options="style", + node_label="label", + edge_label="label", + edge_label_options="label_options", + ) + expected_tex = r"""\begin{figure} + \begin{tikzpicture}[scale=3] + \draw + (0, 0) node[blue] (0){0} + (1, 1) node[line width=3,draw] (1){1} + (2, 2) node[circle,draw,blue!50] (2){2} + (3, 3) node (3){Stop}; + \begin{scope}[->] + \draw (0) to node[near end] {1st Step} (1); + \draw[loop,] (0) to node[midway] {Loop} (0); + \draw[>->,bend right,line width=3,green!90] (1) to node[pos=0.5] {2nd} (2); + \draw[bend left,green] (2) to node[near start] {3rd Step} (3); + \end{scope} + \end{tikzpicture} +\end{figure}""" + + # First, check for consistency line-by-line - if this fails, the mismatched + # line will be shown explicitly in the failure summary + for expected, actual in zip(expected_tex.split("\n"), output_tex.split("\n")): + assert expected == actual + + assert output_tex == expected_tex + + +def test_basic_multiple_graphs(): + H1 = nx.path_graph(4) + H2 = nx.complete_graph(4) + H3 = nx.path_graph(8) + H4 = nx.complete_graph(8) + captions = [ + "Path on 4 nodes", + "Complete graph on 4 nodes", + "Path on 8 nodes", + "Complete graph on 8 nodes", + ] + labels = ["fig2a", "fig2b", "fig2c", "fig2d"] + latex_code = nx.to_latex( + [H1, H2, H3, H4], + n_rows=2, + sub_captions=captions, + sub_labels=labels, + ) + assert "begin{document}" in latex_code + assert "begin{figure}" in latex_code + assert latex_code.count("begin{subfigure}") == 4 + assert latex_code.count("tikzpicture") == 8 + assert latex_code.count("[-]") == 4 + + +def test_basic_tikz(): + expected_tex = r"""\documentclass{report} +\usepackage{tikz} +\usepackage{subcaption} + +\begin{document} +\begin{figure} + \begin{subfigure}{0.5\textwidth} + \begin{tikzpicture}[scale=2] + \draw[gray!90] + (0.749, 0.702) node[red!90] (0){0} + (1.0, -0.014) node[red!90] (1){1} + (-0.777, -0.705) node (2){2} + (-0.984, 0.042) node (3){3} + (-0.028, 0.375) node[cyan!90] (4){4} + (-0.412, 0.888) node (5){5} + (0.448, -0.856) node (6){6} + (0.003, -0.431) node[cyan!90] (7){7}; + \begin{scope}[->,gray!90] + \draw (0) to (4); + \draw (0) to (5); + \draw (0) to (6); + \draw (0) to (7); + \draw (1) to (4); + \draw (1) to (5); + \draw (1) to (6); + \draw (1) to (7); + \draw (2) to (4); + \draw (2) to (5); + \draw (2) to (6); + \draw (2) to (7); + \draw (3) to (4); + \draw (3) to (5); + \draw (3) to (6); + \draw (3) to (7); + \end{scope} + \end{tikzpicture} + \caption{My tikz number 1 of 2}\label{tikz_1_2} + \end{subfigure} + \begin{subfigure}{0.5\textwidth} + \begin{tikzpicture}[scale=2] + \draw[gray!90] + (0.749, 0.702) node[green!90] (0){0} + (1.0, -0.014) node[green!90] (1){1} + (-0.777, -0.705) node (2){2} + (-0.984, 0.042) node (3){3} + (-0.028, 0.375) node[purple!90] (4){4} + (-0.412, 0.888) node (5){5} + (0.448, -0.856) node (6){6} + (0.003, -0.431) node[purple!90] (7){7}; + \begin{scope}[->,gray!90] + \draw (0) to (4); + \draw (0) to (5); + \draw (0) to (6); + \draw (0) to (7); + \draw (1) to (4); + \draw (1) to (5); + \draw (1) to (6); + \draw (1) to (7); + \draw (2) to (4); + \draw (2) to (5); + \draw (2) to (6); + \draw (2) to (7); + \draw (3) to (4); + \draw (3) to (5); + \draw (3) to (6); + \draw (3) to (7); + \end{scope} + \end{tikzpicture} + \caption{My tikz number 2 of 2}\label{tikz_2_2} + \end{subfigure} + \caption{A graph generated with python and latex.} +\end{figure} +\end{document}""" + + edges = [ + (0, 4), + (0, 5), + (0, 6), + (0, 7), + (1, 4), + (1, 5), + (1, 6), + (1, 7), + (2, 4), + (2, 5), + (2, 6), + (2, 7), + (3, 4), + (3, 5), + (3, 6), + (3, 7), + ] + G = nx.DiGraph() + G.add_nodes_from(range(8)) + G.add_edges_from(edges) + pos = { + 0: (0.7490296171687696, 0.702353520257394), + 1: (1.0, -0.014221357723796535), + 2: (-0.7765783344161441, -0.7054170966808919), + 3: (-0.9842690223417624, 0.04177547602465483), + 4: (-0.02768523817180917, 0.3745724439551441), + 5: (-0.41154855146767433, 0.8880106515525136), + 6: (0.44780153389148264, -0.8561492709269164), + 7: (0.0032499953371383505, -0.43092436645809945), + } + + rc_node_color = {0: "red!90", 1: "red!90", 4: "cyan!90", 7: "cyan!90"} + gp_node_color = {0: "green!90", 1: "green!90", 4: "purple!90", 7: "purple!90"} + + H = G.copy() + nx.set_node_attributes(G, rc_node_color, "color") + nx.set_node_attributes(H, gp_node_color, "color") + + sub_captions = ["My tikz number 1 of 2", "My tikz number 2 of 2"] + sub_labels = ["tikz_1_2", "tikz_2_2"] + + output_tex = nx.to_latex( + [G, H], + [pos, pos], + tikz_options="[scale=2]", + default_node_options="gray!90", + default_edge_options="gray!90", + node_options="color", + sub_captions=sub_captions, + sub_labels=sub_labels, + caption="A graph generated with python and latex.", + n_rows=2, + as_document=True, + ) + + # First, check for consistency line-by-line - if this fails, the mismatched + # line will be shown explicitly in the failure summary + for expected, actual in zip(expected_tex.split("\n"), output_tex.split("\n")): + assert expected == actual + # Double-check for document-level consistency + assert output_tex == expected_tex + + +def test_exception_pos_single_graph(to_latex=nx.to_latex): + # smoke test that pos can be a string + G = nx.path_graph(4) + to_latex(G, pos="pos") + + # must include all nodes + pos = {0: (1, 2), 1: (0, 1), 2: (2, 1)} + with pytest.raises(nx.NetworkXError): + to_latex(G, pos) + + # must have 2 values + pos[3] = (1, 2, 3) + with pytest.raises(nx.NetworkXError): + to_latex(G, pos) + pos[3] = 2 + with pytest.raises(nx.NetworkXError): + to_latex(G, pos) + + # check that passes with 2 values + pos[3] = (3, 2) + to_latex(G, pos) + + +def test_exception_multiple_graphs(to_latex=nx.to_latex): + G = nx.path_graph(3) + pos_bad = {0: (1, 2), 1: (0, 1)} + pos_OK = {0: (1, 2), 1: (0, 1), 2: (2, 1)} + fourG = [G, G, G, G] + fourpos = [pos_OK, pos_OK, pos_OK, pos_OK] + + # input single dict to use for all graphs + to_latex(fourG, pos_OK) + with pytest.raises(nx.NetworkXError): + to_latex(fourG, pos_bad) + + # input list of dicts to use for all graphs + to_latex(fourG, fourpos) + with pytest.raises(nx.NetworkXError): + to_latex(fourG, [pos_bad, pos_bad, pos_bad, pos_bad]) + + # every pos dict must include all nodes + with pytest.raises(nx.NetworkXError): + to_latex(fourG, [pos_OK, pos_OK, pos_bad, pos_OK]) + + # test sub_captions and sub_labels (len must match Gbunch) + with pytest.raises(nx.NetworkXError): + to_latex(fourG, fourpos, sub_captions=["hi", "hi"]) + + with pytest.raises(nx.NetworkXError): + to_latex(fourG, fourpos, sub_labels=["hi", "hi"]) + + # all pass + to_latex(fourG, fourpos, sub_captions=["hi"] * 4, sub_labels=["lbl"] * 4) + + +def test_exception_multigraph(): + G = nx.path_graph(4, create_using=nx.MultiGraph) + G.add_edge(1, 2) + with pytest.raises(nx.NetworkXNotImplemented): + nx.to_latex(G) diff --git a/.venv/lib/python3.12/site-packages/networkx/drawing/tests/test_layout.py b/.venv/lib/python3.12/site-packages/networkx/drawing/tests/test_layout.py new file mode 100644 index 0000000..34d71ef --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/drawing/tests/test_layout.py @@ -0,0 +1,631 @@ +"""Unit tests for layout functions.""" + +import pytest + +import networkx as nx + +np = pytest.importorskip("numpy") +pytest.importorskip("scipy") + + +class TestLayout: + @classmethod + def setup_class(cls): + cls.Gi = nx.grid_2d_graph(5, 5) + cls.Gs = nx.Graph() + nx.add_path(cls.Gs, "abcdef") + cls.bigG = nx.grid_2d_graph(25, 25) # > 500 nodes for sparse + + def test_spring_fixed_without_pos(self): + G = nx.path_graph(4) + # No pos dict at all + with pytest.raises(ValueError, match="nodes are fixed without positions"): + nx.spring_layout(G, fixed=[0]) + + pos = {0: (1, 1), 2: (0, 0)} + # Node 1 not in pos dict + with pytest.raises(ValueError, match="nodes are fixed without positions"): + nx.spring_layout(G, fixed=[0, 1], pos=pos) + + # All fixed nodes in pos dict + out = nx.spring_layout(G, fixed=[0, 2], pos=pos) # No ValueError + assert all(np.array_equal(out[n], pos[n]) for n in (0, 2)) + + def test_spring_init_pos(self): + # Tests GH #2448 + import math + + G = nx.Graph() + G.add_edges_from([(0, 1), (1, 2), (2, 0), (2, 3)]) + + init_pos = {0: (0.0, 0.0)} + fixed_pos = [0] + pos = nx.fruchterman_reingold_layout(G, pos=init_pos, fixed=fixed_pos) + has_nan = any(math.isnan(c) for coords in pos.values() for c in coords) + assert not has_nan, "values should not be nan" + + def test_smoke_empty_graph(self): + G = [] + nx.random_layout(G) + nx.circular_layout(G) + nx.planar_layout(G) + nx.spring_layout(G) + nx.fruchterman_reingold_layout(G) + nx.spectral_layout(G) + nx.shell_layout(G) + nx.bipartite_layout(G, G) + nx.spiral_layout(G) + nx.multipartite_layout(G) + nx.kamada_kawai_layout(G) + + def test_smoke_int(self): + G = self.Gi + nx.random_layout(G) + nx.circular_layout(G) + nx.planar_layout(G) + nx.spring_layout(G) + nx.forceatlas2_layout(G) + nx.fruchterman_reingold_layout(G) + nx.fruchterman_reingold_layout(self.bigG) + nx.spectral_layout(G) + nx.spectral_layout(G.to_directed()) + nx.spectral_layout(self.bigG) + nx.spectral_layout(self.bigG.to_directed()) + nx.shell_layout(G) + nx.spiral_layout(G) + nx.kamada_kawai_layout(G) + nx.kamada_kawai_layout(G, dim=1) + nx.kamada_kawai_layout(G, dim=3) + nx.arf_layout(G) + + def test_smoke_string(self): + G = self.Gs + nx.random_layout(G) + nx.circular_layout(G) + nx.planar_layout(G) + nx.spring_layout(G) + nx.forceatlas2_layout(G) + nx.fruchterman_reingold_layout(G) + nx.spectral_layout(G) + nx.shell_layout(G) + nx.spiral_layout(G) + nx.kamada_kawai_layout(G) + nx.kamada_kawai_layout(G, dim=1) + nx.kamada_kawai_layout(G, dim=3) + nx.arf_layout(G) + + def check_scale_and_center(self, pos, scale, center): + center = np.array(center) + low = center - scale + hi = center + scale + vpos = np.array(list(pos.values())) + length = vpos.max(0) - vpos.min(0) + assert (length <= 2 * scale).all() + assert (vpos >= low).all() + assert (vpos <= hi).all() + + def test_scale_and_center_arg(self): + sc = self.check_scale_and_center + c = (4, 5) + G = nx.complete_graph(9) + G.add_node(9) + sc(nx.random_layout(G, center=c), scale=0.5, center=(4.5, 5.5)) + # rest can have 2*scale length: [-scale, scale] + sc(nx.spring_layout(G, scale=2, center=c), scale=2, center=c) + sc(nx.spectral_layout(G, scale=2, center=c), scale=2, center=c) + sc(nx.circular_layout(G, scale=2, center=c), scale=2, center=c) + sc(nx.shell_layout(G, scale=2, center=c), scale=2, center=c) + sc(nx.spiral_layout(G, scale=2, center=c), scale=2, center=c) + sc(nx.kamada_kawai_layout(G, scale=2, center=c), scale=2, center=c) + + c = (2, 3, 5) + sc(nx.kamada_kawai_layout(G, dim=3, scale=2, center=c), scale=2, center=c) + + def test_planar_layout_non_planar_input(self): + G = nx.complete_graph(9) + pytest.raises(nx.NetworkXException, nx.planar_layout, G) + + def test_smoke_planar_layout_embedding_input(self): + embedding = nx.PlanarEmbedding() + embedding.set_data({0: [1, 2], 1: [0, 2], 2: [0, 1]}) + nx.planar_layout(embedding) + + def test_default_scale_and_center(self): + sc = self.check_scale_and_center + c = (0, 0) + G = nx.complete_graph(9) + G.add_node(9) + sc(nx.random_layout(G), scale=0.5, center=(0.5, 0.5)) + sc(nx.spring_layout(G), scale=1, center=c) + sc(nx.spectral_layout(G), scale=1, center=c) + sc(nx.circular_layout(G), scale=1, center=c) + sc(nx.shell_layout(G), scale=1, center=c) + sc(nx.spiral_layout(G), scale=1, center=c) + sc(nx.kamada_kawai_layout(G), scale=1, center=c) + + c = (0, 0, 0) + sc(nx.kamada_kawai_layout(G, dim=3), scale=1, center=c) + + def test_circular_planar_and_shell_dim_error(self): + G = nx.path_graph(4) + pytest.raises(ValueError, nx.circular_layout, G, dim=1) + pytest.raises(ValueError, nx.shell_layout, G, dim=1) + pytest.raises(ValueError, nx.shell_layout, G, dim=3) + pytest.raises(ValueError, nx.planar_layout, G, dim=1) + pytest.raises(ValueError, nx.planar_layout, G, dim=3) + + def test_adjacency_interface_numpy(self): + A = nx.to_numpy_array(self.Gs) + pos = nx.drawing.layout._fruchterman_reingold(A) + assert pos.shape == (6, 2) + pos = nx.drawing.layout._fruchterman_reingold(A, dim=3) + assert pos.shape == (6, 3) + pos = nx.drawing.layout._sparse_fruchterman_reingold(A) + assert pos.shape == (6, 2) + + def test_adjacency_interface_scipy(self): + A = nx.to_scipy_sparse_array(self.Gs, dtype="d") + pos = nx.drawing.layout._sparse_fruchterman_reingold(A) + assert pos.shape == (6, 2) + pos = nx.drawing.layout._sparse_spectral(A) + assert pos.shape == (6, 2) + pos = nx.drawing.layout._sparse_fruchterman_reingold(A, dim=3) + assert pos.shape == (6, 3) + + def test_single_nodes(self): + G = nx.path_graph(1) + vpos = nx.shell_layout(G) + assert not vpos[0].any() + G = nx.path_graph(4) + vpos = nx.shell_layout(G, [[0], [1, 2], [3]]) + assert not vpos[0].any() + assert vpos[3].any() # ensure node 3 not at origin (#3188) + assert np.linalg.norm(vpos[3]) <= 1 # ensure node 3 fits (#3753) + vpos = nx.shell_layout(G, [[0], [1, 2], [3]], rotate=0) + assert np.linalg.norm(vpos[3]) <= 1 # ensure node 3 fits (#3753) + + def test_smoke_initial_pos_forceatlas2(self): + pos = nx.circular_layout(self.Gi) + npos = nx.forceatlas2_layout(self.Gi, pos=pos) + + def test_smoke_initial_pos_fruchterman_reingold(self): + pos = nx.circular_layout(self.Gi) + npos = nx.fruchterman_reingold_layout(self.Gi, pos=pos) + + def test_smoke_initial_pos_arf(self): + pos = nx.circular_layout(self.Gi) + npos = nx.arf_layout(self.Gi, pos=pos) + + def test_fixed_node_fruchterman_reingold(self): + # Dense version (numpy based) + pos = nx.circular_layout(self.Gi) + npos = nx.spring_layout(self.Gi, pos=pos, fixed=[(0, 0)]) + assert tuple(pos[(0, 0)]) == tuple(npos[(0, 0)]) + # Sparse version (scipy based) + pos = nx.circular_layout(self.bigG) + npos = nx.spring_layout(self.bigG, pos=pos, fixed=[(0, 0)]) + for axis in range(2): + assert pos[(0, 0)][axis] == pytest.approx(npos[(0, 0)][axis], abs=1e-7) + + def test_center_parameter(self): + G = nx.path_graph(1) + nx.random_layout(G, center=(1, 1)) + vpos = nx.circular_layout(G, center=(1, 1)) + assert tuple(vpos[0]) == (1, 1) + vpos = nx.planar_layout(G, center=(1, 1)) + assert tuple(vpos[0]) == (1, 1) + vpos = nx.spring_layout(G, center=(1, 1)) + assert tuple(vpos[0]) == (1, 1) + vpos = nx.fruchterman_reingold_layout(G, center=(1, 1)) + assert tuple(vpos[0]) == (1, 1) + vpos = nx.spectral_layout(G, center=(1, 1)) + assert tuple(vpos[0]) == (1, 1) + vpos = nx.shell_layout(G, center=(1, 1)) + assert tuple(vpos[0]) == (1, 1) + vpos = nx.spiral_layout(G, center=(1, 1)) + assert tuple(vpos[0]) == (1, 1) + + def test_center_wrong_dimensions(self): + G = nx.path_graph(1) + assert id(nx.spring_layout) == id(nx.fruchterman_reingold_layout) + pytest.raises(ValueError, nx.random_layout, G, center=(1, 1, 1)) + pytest.raises(ValueError, nx.circular_layout, G, center=(1, 1, 1)) + pytest.raises(ValueError, nx.planar_layout, G, center=(1, 1, 1)) + pytest.raises(ValueError, nx.spring_layout, G, center=(1, 1, 1)) + pytest.raises(ValueError, nx.spring_layout, G, dim=3, center=(1, 1)) + pytest.raises(ValueError, nx.spectral_layout, G, center=(1, 1, 1)) + pytest.raises(ValueError, nx.spectral_layout, G, dim=3, center=(1, 1)) + pytest.raises(ValueError, nx.shell_layout, G, center=(1, 1, 1)) + pytest.raises(ValueError, nx.spiral_layout, G, center=(1, 1, 1)) + pytest.raises(ValueError, nx.kamada_kawai_layout, G, center=(1, 1, 1)) + + def test_empty_graph(self): + G = nx.empty_graph() + vpos = nx.random_layout(G, center=(1, 1)) + assert vpos == {} + vpos = nx.circular_layout(G, center=(1, 1)) + assert vpos == {} + vpos = nx.planar_layout(G, center=(1, 1)) + assert vpos == {} + vpos = nx.bipartite_layout(G, G) + assert vpos == {} + vpos = nx.spring_layout(G, center=(1, 1)) + assert vpos == {} + vpos = nx.fruchterman_reingold_layout(G, center=(1, 1)) + assert vpos == {} + vpos = nx.spectral_layout(G, center=(1, 1)) + assert vpos == {} + vpos = nx.shell_layout(G, center=(1, 1)) + assert vpos == {} + vpos = nx.spiral_layout(G, center=(1, 1)) + assert vpos == {} + vpos = nx.multipartite_layout(G, center=(1, 1)) + assert vpos == {} + vpos = nx.kamada_kawai_layout(G, center=(1, 1)) + assert vpos == {} + vpos = nx.forceatlas2_layout(G) + assert vpos == {} + vpos = nx.arf_layout(G) + assert vpos == {} + + def test_bipartite_layout(self): + G = nx.complete_bipartite_graph(3, 5) + top, bottom = nx.bipartite.sets(G) + + vpos = nx.bipartite_layout(G, top) + assert len(vpos) == len(G) + + top_x = vpos[list(top)[0]][0] + bottom_x = vpos[list(bottom)[0]][0] + for node in top: + assert vpos[node][0] == top_x + for node in bottom: + assert vpos[node][0] == bottom_x + + vpos = nx.bipartite_layout( + G, top, align="horizontal", center=(2, 2), scale=2, aspect_ratio=1 + ) + assert len(vpos) == len(G) + + top_y = vpos[list(top)[0]][1] + bottom_y = vpos[list(bottom)[0]][1] + for node in top: + assert vpos[node][1] == top_y + for node in bottom: + assert vpos[node][1] == bottom_y + + pytest.raises(ValueError, nx.bipartite_layout, G, top, align="foo") + + def test_multipartite_layout(self): + sizes = (0, 5, 7, 2, 8) + G = nx.complete_multipartite_graph(*sizes) + + vpos = nx.multipartite_layout(G) + assert len(vpos) == len(G) + + start = 0 + for n in sizes: + end = start + n + assert all(vpos[start][0] == vpos[i][0] for i in range(start + 1, end)) + start += n + + vpos = nx.multipartite_layout(G, align="horizontal", scale=2, center=(2, 2)) + assert len(vpos) == len(G) + + start = 0 + for n in sizes: + end = start + n + assert all(vpos[start][1] == vpos[i][1] for i in range(start + 1, end)) + start += n + + pytest.raises(ValueError, nx.multipartite_layout, G, align="foo") + + def test_kamada_kawai_costfn_1d(self): + costfn = nx.drawing.layout._kamada_kawai_costfn + + pos = np.array([4.0, 7.0]) + invdist = 1 / np.array([[0.1, 2.0], [2.0, 0.3]]) + + cost, grad = costfn(pos, np, invdist, meanweight=0, dim=1) + + assert cost == pytest.approx(((3 / 2.0 - 1) ** 2), abs=1e-7) + assert grad[0] == pytest.approx((-0.5), abs=1e-7) + assert grad[1] == pytest.approx(0.5, abs=1e-7) + + def check_kamada_kawai_costfn(self, pos, invdist, meanwt, dim): + costfn = nx.drawing.layout._kamada_kawai_costfn + + cost, grad = costfn(pos.ravel(), np, invdist, meanweight=meanwt, dim=dim) + + expected_cost = 0.5 * meanwt * np.sum(np.sum(pos, axis=0) ** 2) + for i in range(pos.shape[0]): + for j in range(i + 1, pos.shape[0]): + diff = np.linalg.norm(pos[i] - pos[j]) + expected_cost += (diff * invdist[i][j] - 1.0) ** 2 + + assert cost == pytest.approx(expected_cost, abs=1e-7) + + dx = 1e-4 + for nd in range(pos.shape[0]): + for dm in range(pos.shape[1]): + idx = nd * pos.shape[1] + dm + ps = pos.flatten() + + ps[idx] += dx + cplus = costfn(ps, np, invdist, meanweight=meanwt, dim=pos.shape[1])[0] + + ps[idx] -= 2 * dx + cminus = costfn(ps, np, invdist, meanweight=meanwt, dim=pos.shape[1])[0] + + assert grad[idx] == pytest.approx((cplus - cminus) / (2 * dx), abs=1e-5) + + def test_kamada_kawai_costfn(self): + invdist = 1 / np.array([[0.1, 2.1, 1.7], [2.1, 0.2, 0.6], [1.7, 0.6, 0.3]]) + meanwt = 0.3 + + # 2d + pos = np.array([[1.3, -3.2], [2.7, -0.3], [5.1, 2.5]]) + + self.check_kamada_kawai_costfn(pos, invdist, meanwt, 2) + + # 3d + pos = np.array([[0.9, 8.6, -8.7], [-10, -0.5, -7.1], [9.1, -8.1, 1.6]]) + + self.check_kamada_kawai_costfn(pos, invdist, meanwt, 3) + + def test_spiral_layout(self): + G = self.Gs + + # a lower value of resolution should result in a more compact layout + # intuitively, the total distance from the start and end nodes + # via each node in between (transiting through each) will be less, + # assuming rescaling does not occur on the computed node positions + pos_standard = np.array(list(nx.spiral_layout(G, resolution=0.35).values())) + pos_tighter = np.array(list(nx.spiral_layout(G, resolution=0.34).values())) + distances = np.linalg.norm(pos_standard[:-1] - pos_standard[1:], axis=1) + distances_tighter = np.linalg.norm(pos_tighter[:-1] - pos_tighter[1:], axis=1) + assert sum(distances) > sum(distances_tighter) + + # return near-equidistant points after the first value if set to true + pos_equidistant = np.array(list(nx.spiral_layout(G, equidistant=True).values())) + distances_equidistant = np.linalg.norm( + pos_equidistant[:-1] - pos_equidistant[1:], axis=1 + ) + assert np.allclose( + distances_equidistant[1:], distances_equidistant[-1], atol=0.01 + ) + + def test_spiral_layout_equidistant(self): + G = nx.path_graph(10) + nx.spiral_layout(G, equidistant=True, store_pos_as="pos") + pos = nx.get_node_attributes(G, "pos") + # Extract individual node positions as an array + p = np.array(list(pos.values())) + # Elementwise-distance between node positions + dist = np.linalg.norm(p[1:] - p[:-1], axis=1) + assert np.allclose(np.diff(dist), 0, atol=1e-3) + + def test_forceatlas2_layout_partial_input_test(self): + # check whether partial pos input still returns a full proper position + G = self.Gs + node = nx.utils.arbitrary_element(G) + pos = nx.circular_layout(G) + del pos[node] + pos = nx.forceatlas2_layout(G, pos=pos) + assert len(pos) == len(G) + + def test_rescale_layout_dict(self): + G = nx.empty_graph() + vpos = nx.random_layout(G, center=(1, 1)) + assert nx.rescale_layout_dict(vpos) == {} + + G = nx.empty_graph(2) + vpos = {0: (0.0, 0.0), 1: (1.0, 1.0)} + s_vpos = nx.rescale_layout_dict(vpos) + assert np.linalg.norm([sum(x) for x in zip(*s_vpos.values())]) < 1e-6 + + G = nx.empty_graph(3) + vpos = {0: (0, 0), 1: (1, 1), 2: (0.5, 0.5)} + s_vpos = nx.rescale_layout_dict(vpos) + + expectation = { + 0: np.array((-1, -1)), + 1: np.array((1, 1)), + 2: np.array((0, 0)), + } + for k, v in expectation.items(): + assert (s_vpos[k] == v).all() + s_vpos = nx.rescale_layout_dict(vpos, scale=2) + expectation = { + 0: np.array((-2, -2)), + 1: np.array((2, 2)), + 2: np.array((0, 0)), + } + for k, v in expectation.items(): + assert (s_vpos[k] == v).all() + + def test_arf_layout_partial_input_test(self): + # Checks whether partial pos input still returns a proper position. + G = self.Gs + node = nx.utils.arbitrary_element(G) + pos = nx.circular_layout(G) + del pos[node] + pos = nx.arf_layout(G, pos=pos) + assert len(pos) == len(G) + + def test_arf_layout_negative_a_check(self): + """ + Checks input parameters correctly raises errors. For example, `a` should be larger than 1 + """ + G = self.Gs + pytest.raises(ValueError, nx.arf_layout, G=G, a=-1) + + def test_smoke_seed_input(self): + G = self.Gs + nx.random_layout(G, seed=42) + nx.spring_layout(G, seed=42) + nx.arf_layout(G, seed=42) + nx.forceatlas2_layout(G, seed=42) + + def test_node_at_center(self): + # see gh-7791 avoid divide by zero + G = nx.path_graph(3) + orig_pos = {i: [i - 1, 0.0] for i in range(3)} + new_pos = nx.forceatlas2_layout(G, pos=orig_pos) + + def test_initial_only_some_pos(self): + G = nx.path_graph(3) + orig_pos = {i: [i - 1, 0.0] for i in range(2)} + new_pos = nx.forceatlas2_layout(G, pos=orig_pos, seed=42) + + +def test_multipartite_layout_nonnumeric_partition_labels(): + """See gh-5123.""" + G = nx.Graph() + G.add_node(0, subset="s0") + G.add_node(1, subset="s0") + G.add_node(2, subset="s1") + G.add_node(3, subset="s1") + G.add_edges_from([(0, 2), (0, 3), (1, 2)]) + pos = nx.multipartite_layout(G) + assert len(pos) == len(G) + + +def test_multipartite_layout_layer_order(): + """Return the layers in sorted order if the layers of the multipartite + graph are sortable. See gh-5691""" + G = nx.Graph() + node_group = dict(zip(("a", "b", "c", "d", "e"), (2, 3, 1, 2, 4))) + for node, layer in node_group.items(): + G.add_node(node, subset=layer) + + # Horizontal alignment, therefore y-coord determines layers + pos = nx.multipartite_layout(G, align="horizontal") + + layers = nx.utils.groups(node_group) + pos_from_layers = nx.multipartite_layout(G, align="horizontal", subset_key=layers) + for (n1, p1), (n2, p2) in zip(pos.items(), pos_from_layers.items()): + assert n1 == n2 and (p1 == p2).all() + + # Nodes "a" and "d" are in the same layer + assert pos["a"][-1] == pos["d"][-1] + # positions should be sorted according to layer + assert pos["c"][-1] < pos["a"][-1] < pos["b"][-1] < pos["e"][-1] + + # Make sure that multipartite_layout still works when layers are not sortable + G.nodes["a"]["subset"] = "layer_0" # Can't sort mixed strs/ints + pos_nosort = nx.multipartite_layout(G) # smoke test: this should not raise + assert pos_nosort.keys() == pos.keys() + + +def _num_nodes_per_bfs_layer(pos): + """Helper function to extract the number of nodes in each layer of bfs_layout""" + x = np.array(list(pos.values()))[:, 0] # node positions in layered dimension + _, layer_count = np.unique(x, return_counts=True) + return layer_count + + +@pytest.mark.parametrize("n", range(2, 7)) +def test_bfs_layout_complete_graph(n): + """The complete graph should result in two layers: the starting node and + a second layer containing all neighbors.""" + G = nx.complete_graph(n) + nx.bfs_layout(G, start=0, store_pos_as="pos") + pos = nx.get_node_attributes(G, "pos") + assert np.array_equal(_num_nodes_per_bfs_layer(pos), [1, n - 1]) + + +def test_bfs_layout_barbell(): + G = nx.barbell_graph(5, 3) + # Start in one of the "bells" + pos = nx.bfs_layout(G, start=0) + # start, bell-1, [1] * len(bar)+1, bell-1 + expected_nodes_per_layer = [1, 4, 1, 1, 1, 1, 4] + assert np.array_equal(_num_nodes_per_bfs_layer(pos), expected_nodes_per_layer) + # Start in the other "bell" - expect same layer pattern + pos = nx.bfs_layout(G, start=12) + assert np.array_equal(_num_nodes_per_bfs_layer(pos), expected_nodes_per_layer) + # Starting in the center of the bar, expect layers to be symmetric + pos = nx.bfs_layout(G, start=6) + # Expected layers: {6 (start)}, {5, 7}, {4, 8}, {8 nodes from remainder of bells} + expected_nodes_per_layer = [1, 2, 2, 8] + assert np.array_equal(_num_nodes_per_bfs_layer(pos), expected_nodes_per_layer) + + +def test_bfs_layout_disconnected(): + G = nx.complete_graph(5) + G.add_edges_from([(10, 11), (11, 12)]) + with pytest.raises(nx.NetworkXError, match="bfs_layout didn't include all nodes"): + nx.bfs_layout(G, start=0) + + +def test_bipartite_layout_default_nodes_raises_non_bipartite_input(): + G = nx.complete_graph(5) + with pytest.raises(nx.NetworkXError, match="Graph is not bipartite"): + nx.bipartite_layout(G) + # No exception if nodes are explicitly specified + pos = nx.bipartite_layout(G, nodes=[2, 3]) + + +def test_bipartite_layout_default_nodes(): + G = nx.complete_bipartite_graph(3, 3) + pos = nx.bipartite_layout(G) # no nodes specified + # X coords of nodes should be the same within the bipartite sets + for nodeset in nx.bipartite.sets(G): + xs = [pos[k][0] for k in nodeset] + assert all(x == pytest.approx(xs[0]) for x in xs) + + +@pytest.mark.parametrize( + "layout", + [ + nx.random_layout, + nx.circular_layout, + nx.shell_layout, + nx.spring_layout, + nx.kamada_kawai_layout, + nx.spectral_layout, + nx.planar_layout, + nx.spiral_layout, + nx.forceatlas2_layout, + ], +) +def test_layouts_negative_dim(layout): + """Test all layouts that support dim kwarg handle invalid inputs.""" + G = nx.path_graph(4) + valid_err_msgs = "|".join( + [ + "negative dimensions.*not allowed", + "can only handle 2", + "cannot handle.*2", + ] + ) + with pytest.raises(ValueError, match=valid_err_msgs): + layout(G, dim=-1) + + +@pytest.mark.parametrize( + ("num_nodes", "expected_method"), [(100, "force"), (501, "energy")] +) +@pytest.mark.parametrize( + "extra_layout_kwargs", + [ + {}, # No extra kwargs + {"pos": {0: (0, 0)}, "fixed": [0]}, # Fixed node position + {"dim": 3}, # 3D layout + ], +) +def test_spring_layout_graph_size_heuristic( + num_nodes, expected_method, extra_layout_kwargs +): + """Expect 'force' layout for n < 500 and 'energy' for n >= 500""" + G = nx.cycle_graph(num_nodes) + # Seeded layout to compare explicit method to one determined by "auto" + seed = 163674319 + + # Compare explicit method to auto method + expected = nx.spring_layout( + G, method=expected_method, seed=seed, **extra_layout_kwargs + ) + actual = nx.spring_layout(G, method="auto", seed=seed, **extra_layout_kwargs) + assert np.allclose(list(expected.values()), list(actual.values()), atol=1e-5) diff --git a/.venv/lib/python3.12/site-packages/networkx/drawing/tests/test_pydot.py b/.venv/lib/python3.12/site-packages/networkx/drawing/tests/test_pydot.py new file mode 100644 index 0000000..acf93d7 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/drawing/tests/test_pydot.py @@ -0,0 +1,146 @@ +"""Unit tests for pydot drawing functions.""" + +from io import StringIO + +import pytest + +import networkx as nx +from networkx.utils import graphs_equal + +pydot = pytest.importorskip("pydot") + + +class TestPydot: + @pytest.mark.parametrize("G", (nx.Graph(), nx.DiGraph())) + @pytest.mark.parametrize("prog", ("neato", "dot")) + def test_pydot(self, G, prog, tmp_path): + """ + Validate :mod:`pydot`-based usage of the passed NetworkX graph with the + passed basename of an external GraphViz command (e.g., `dot`, `neato`). + """ + + # Set the name of this graph to... "G". Failing to do so will + # subsequently trip an assertion expecting this name. + G.graph["name"] = "G" + + # Add arbitrary nodes and edges to the passed empty graph. + G.add_edges_from([("A", "B"), ("A", "C"), ("B", "C"), ("A", "D")]) + G.add_node("E") + + # Validate layout of this graph with the passed GraphViz command. + graph_layout = nx.nx_pydot.pydot_layout(G, prog=prog) + assert isinstance(graph_layout, dict) + + # Convert this graph into a "pydot.Dot" instance. + P = nx.nx_pydot.to_pydot(G) + + # Convert this "pydot.Dot" instance back into a graph of the same type. + G2 = G.__class__(nx.nx_pydot.from_pydot(P)) + + # Validate the original and resulting graphs to be the same. + assert graphs_equal(G, G2) + + fname = tmp_path / "out.dot" + + # Serialize this "pydot.Dot" instance to a temporary file in dot format + P.write_raw(fname) + + # Deserialize a list of new "pydot.Dot" instances back from this file. + Pin_list = pydot.graph_from_dot_file(path=fname, encoding="utf-8") + + # Validate this file to contain only one graph. + assert len(Pin_list) == 1 + + # The single "pydot.Dot" instance deserialized from this file. + Pin = Pin_list[0] + + # Sorted list of all nodes in the original "pydot.Dot" instance. + n1 = sorted(p.get_name() for p in P.get_node_list()) + + # Sorted list of all nodes in the deserialized "pydot.Dot" instance. + n2 = sorted(p.get_name() for p in Pin.get_node_list()) + + # Validate these instances to contain the same nodes. + assert n1 == n2 + + # Sorted list of all edges in the original "pydot.Dot" instance. + e1 = sorted((e.get_source(), e.get_destination()) for e in P.get_edge_list()) + + # Sorted list of all edges in the original "pydot.Dot" instance. + e2 = sorted((e.get_source(), e.get_destination()) for e in Pin.get_edge_list()) + + # Validate these instances to contain the same edges. + assert e1 == e2 + + # Deserialize a new graph of the same type back from this file. + Hin = nx.nx_pydot.read_dot(fname) + Hin = G.__class__(Hin) + + # Validate the original and resulting graphs to be the same. + assert graphs_equal(G, Hin) + + def test_read_write(self): + G = nx.MultiGraph() + G.graph["name"] = "G" + G.add_edge("1", "2", key="0") # read assumes strings + fh = StringIO() + nx.nx_pydot.write_dot(G, fh) + fh.seek(0) + H = nx.nx_pydot.read_dot(fh) + assert graphs_equal(G, H) + + +def test_pydot_issue_7581(tmp_path): + """Validate that `nx_pydot.pydot_layout` handles nodes + with characters like "\n", " ". + + Those characters cause `pydot` to escape and quote them on output, + which caused #7581. + """ + G = nx.Graph() + G.add_edges_from([("A\nbig test", "B"), ("A\nbig test", "C"), ("B", "C")]) + + graph_layout = nx.nx_pydot.pydot_layout(G, prog="dot") + assert isinstance(graph_layout, dict) + + # Convert the graph to pydot and back into a graph. There should be no difference. + P = nx.nx_pydot.to_pydot(G) + G2 = nx.Graph(nx.nx_pydot.from_pydot(P)) + assert graphs_equal(G, G2) + + +@pytest.mark.parametrize( + "graph_type", [nx.Graph, nx.DiGraph, nx.MultiGraph, nx.MultiDiGraph] +) +def test_hashable_pydot(graph_type): + # gh-5790 + G = graph_type() + G.add_edge("5", frozenset([1]), t='"Example:A"', l=False) + G.add_edge("1", 2, w=True, t=("node1",), l=frozenset(["node1"])) + G.add_edge("node", (3, 3), w="string") + + assert [ + {"t": '"Example:A"', "l": "False"}, + {"w": "True", "t": "('node1',)", "l": "frozenset({'node1'})"}, + {"w": "string"}, + ] == [ + attr + for _, _, attr in nx.nx_pydot.from_pydot(nx.nx_pydot.to_pydot(G)).edges.data() + ] + + assert {str(i) for i in G.nodes()} == set( + nx.nx_pydot.from_pydot(nx.nx_pydot.to_pydot(G)).nodes + ) + + +def test_pydot_numerical_name(): + G = nx.Graph() + G.add_edges_from([("A", "B"), (0, 1)]) + graph_layout = nx.nx_pydot.pydot_layout(G, prog="dot") + assert isinstance(graph_layout, dict) + assert "0" not in graph_layout + assert 0 in graph_layout + assert "1" not in graph_layout + assert 1 in graph_layout + assert "A" in graph_layout + assert "B" in graph_layout diff --git a/.venv/lib/python3.12/site-packages/networkx/drawing/tests/test_pylab.py b/.venv/lib/python3.12/site-packages/networkx/drawing/tests/test_pylab.py new file mode 100644 index 0000000..5335dd4 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/drawing/tests/test_pylab.py @@ -0,0 +1,1756 @@ +"""Unit tests for matplotlib drawing functions.""" + +import itertools +import os +import warnings + +import pytest + +import networkx as nx + +mpl = pytest.importorskip("matplotlib") +np = pytest.importorskip("numpy") +mpl.use("PS") +plt = pytest.importorskip("matplotlib.pyplot") +plt.rcParams["text.usetex"] = False + + +barbell = nx.barbell_graph(4, 6) + +defaults = { + "node_pos": None, + "node_visible": True, + "node_color": "#1f78b4", + "node_size": 300, + "node_label": { + "size": 12, + "color": "#000000", + "family": "sans-serif", + "weight": "normal", + "alpha": 1.0, + "background_color": None, + "background_alpha": None, + "h_align": "center", + "v_align": "center", + "bbox": None, + }, + "node_shape": "o", + "node_alpha": 1.0, + "node_border_width": 1.0, + "node_border_color": "face", + "edge_visible": True, + "edge_width": 1.0, + "edge_color": "#000000", + "edge_label": { + "size": 12, + "color": "#000000", + "family": "sans-serif", + "weight": "normal", + "alpha": 1.0, + "bbox": {"boxstyle": "round", "ec": (1.0, 1.0, 1.0), "fc": (1.0, 1.0, 1.0)}, + "h_align": "center", + "v_align": "center", + "pos": 0.5, + "rotate": True, + }, + "edge_style": "-", + "edge_alpha": 1.0, + # These are for undirected-graphs. Directed graphs shouls use "-|>" and 10, respectively + "edge_arrowstyle": "-", + "edge_arrowsize": 0, + "edge_curvature": "arc3", + "edge_source_margin": 0, + "edge_target_margin": 0, +} + + +@pytest.mark.parametrize( + ("param_name", "param_value", "expected"), + ( + ("node_color", None, defaults["node_color"]), + ("node_color", "#FF0000", "red"), + ("node_color", "color", "lime"), + ), +) +def test_display_arg_handling_node_color(param_name, param_value, expected): + G = nx.path_graph(4) + nx.set_node_attributes(G, "#00FF00", "color") + canvas = plt.figure().add_subplot(111) + nx.display(G, canvas=canvas, **{param_name: param_value}) + assert mpl.colors.same_color(canvas.get_children()[0].get_edgecolors()[0], expected) + plt.close() + + +@pytest.mark.parametrize( + ("param_value", "expected"), + ( + (None, (1, 1, 1, 1)), # default value + (0.5, (0.5, 0.5, 0.5, 0.5)), + ("n_alpha", (1.0, 1 / 2, 1 / 3, 0.25)), + ), +) +def test_display_arg_handling_node_alpha(param_value, expected): + G = nx.path_graph(4) + nx.set_node_attributes(G, {n: 1 / (n + 1) for n in G.nodes()}, "n_alpha") + canvas = plt.figure().add_subplot(111) + nx.display(G, canvas=canvas, node_alpha=param_value) + assert all( + canvas.get_children()[0].get_fc()[:, 3] == expected + ) # Extract just the alpha from the node colors + plt.close() + + +def test_display_node_position(): + G = nx.path_graph(4) + nx.set_node_attributes(G, {n: (n, n) for n in G.nodes()}, "pos") + canvas = plt.figure().add_subplot(111) + nx.display(G, canvas=canvas, node_pos="pos") + assert np.all( + canvas.get_children()[0].get_offsets().data == [[0, 0], [1, 1], [2, 2], [3, 3]] + ) + plt.close() + + +@pytest.mark.mpl_image_compare +def test_display_house_with_colors(): + """ + Originally, I wanted to use the exact samge image as test_house_with_colors. + But I can't seem to find the correct value for the margins to get the figures + to line up perfectly. To the human eye, these visualizations are basically the + same. + """ + G = nx.house_graph() + fig, ax = plt.subplots() + nx.set_node_attributes( + G, {0: (0, 0), 1: (1, 0), 2: (0, 1), 3: (1, 1), 4: (0.5, 2.0)}, "pos" + ) + nx.set_node_attributes( + G, + { + n: { + "size": 3000 if n != 4 else 2000, + "color": "tab:blue" if n != 4 else "tab:orange", + } + for n in G.nodes() + }, + ) + nx.display( + G, + node_pos="pos", + edge_alpha=0.5, + edge_width=6, + node_label=None, + node_border_color="k", + ) + ax.margins(0.17) + plt.tight_layout() + plt.axis("off") + return fig + + +def test_display_line_collection(): + G = nx.karate_club_graph() + nx.set_edge_attributes( + G, {(u, v): "-|>" if (u + v) % 2 else "-" for u, v in G.edges()}, "arrowstyle" + ) + canvas = plt.figure().add_subplot(111) + nx.display(G, canvas=canvas, edge_arrowsize=10) + # There should only be one line collection in any given visualization + lc = [ + l + for l in canvas.get_children() + if isinstance(l, mpl.collections.LineCollection) + ][0] + assert len(lc.get_paths()) == sum([1 for u, v in G.edges() if (u + v) % 2]) + plt.close() + + +@pytest.mark.mpl_image_compare +def test_display_labels_and_colors(): + """See 'Labels and Colors' gallery example""" + fig, ax = plt.subplots() + G = nx.cubical_graph() + pos = nx.spring_layout(G, seed=3113794652) # positions for all nodes + nx.set_node_attributes(G, pos, "pos") # Will not be needed after PR 7571 + labels = iter( + [ + r"$a$", + r"$b$", + r"$c$", + r"$d$", + r"$\alpha$", + r"$\beta$", + r"$\gamma$", + r"$\delta$", + ] + ) + nx.set_node_attributes( + G, + { + n: { + "size": 800, + "alpha": 0.9, + "color": "tab:red" if n < 4 else "tab:blue", + "label": {"label": next(labels), "size": 22, "color": "whitesmoke"}, + } + for n in G.nodes() + }, + ) + + nx.display(G, node_pos="pos", edge_color="tab:grey") + + # The tricky bit is the highlighted colors for the edges + edgelist = [(0, 1), (1, 2), (2, 3), (0, 3)] + nx.set_edge_attributes( + G, + { + (u, v): { + "width": 8, + "alpha": 0.5, + "color": "tab:red", + "visible": (u, v) in edgelist, + } + for u, v in G.edges() + }, + ) + nx.display(G, node_pos="pos", node_visible=False) + edgelist = [(4, 5), (5, 6), (6, 7), (4, 7)] + nx.set_edge_attributes( + G, + { + (u, v): { + "color": "tab:blue", + "visible": (u, v) in edgelist, + } + for u, v in G.edges() + }, + ) + nx.display(G, node_pos="pos", node_visible=False) + + plt.tight_layout() + plt.axis("off") + return fig + + +@pytest.mark.mpl_image_compare +def test_display_complex(): + import itertools as it + + fig, ax = plt.subplots() + G = nx.MultiDiGraph() + nodes = "ABC" + prod = list(it.product(nodes, repeat=2)) * 4 + G = nx.MultiDiGraph() + for i, (u, v) in enumerate(prod): + G.add_edge(u, v, w=round(i / 3, 2)) + nx.set_node_attributes(G, nx.spring_layout(G, seed=3113794652), "pos") + csi = it.cycle([f"arc3,rad={r}" for r in it.accumulate([0.15] * 4)]) + nx.set_edge_attributes(G, {e: next(csi) for e in G.edges(keys=True)}, "curvature") + nx.set_edge_attributes( + G, + { + tuple(e): {"label": w, "bbox": {"alpha": 0}} + for *e, w in G.edges(keys=True, data="w") + }, + "label", + ) + nx.apply_matplotlib_colors(G, "w", "color", mpl.colormaps["inferno"], nodes=False) + nx.display(G, canvas=ax, node_pos="pos") + + plt.tight_layout() + plt.axis("off") + return fig + + +@pytest.mark.mpl_image_compare +def test_display_shortest_path(): + fig, ax = plt.subplots() + G = nx.Graph() + G.add_nodes_from(["A", "B", "C", "D", "E", "F", "G", "H"]) + G.add_edge("A", "B", weight=4) + G.add_edge("A", "H", weight=8) + G.add_edge("B", "C", weight=8) + G.add_edge("B", "H", weight=11) + G.add_edge("C", "D", weight=7) + G.add_edge("C", "F", weight=4) + G.add_edge("C", "I", weight=2) + G.add_edge("D", "E", weight=9) + G.add_edge("D", "F", weight=14) + G.add_edge("E", "F", weight=10) + G.add_edge("F", "G", weight=2) + G.add_edge("G", "H", weight=1) + G.add_edge("G", "I", weight=6) + G.add_edge("H", "I", weight=7) + + # Find the shortest path from node A to node E + path = nx.shortest_path(G, "A", "E", weight="weight") + + # Create a list of edges in the shortest path + path_edges = list(zip(path, path[1:])) + nx.set_node_attributes(G, nx.spring_layout(G, seed=37), "pos") + nx.set_edge_attributes( + G, + { + (u, v): { + "color": ( + "red" + if (u, v) in path_edges or tuple(reversed((u, v))) in path_edges + else "black" + ), + "label": {"label": d["weight"], "rotate": False}, + } + for u, v, d in G.edges(data=True) + }, + ) + nx.display(G, canvas=ax) + plt.tight_layout() + plt.axis("off") + return fig + + +@pytest.mark.parametrize( + ("edge_color", "expected"), + ( + (None, "black"), + ("r", "red"), + ((1.0, 1.0, 0.0), "yellow"), + ((0, 1, 0, 1), "lime"), + ("color", "blue"), + ("#0000FF", "blue"), + ), +) +@pytest.mark.parametrize("graph_type", (nx.Graph, nx.DiGraph)) +def test_display_edge_single_color(edge_color, expected, graph_type): + G = nx.path_graph(3, create_using=graph_type) + nx.set_edge_attributes(G, "#0000FF", "color") + canvas = plt.figure().add_subplot(111) + nx.display(G, edge_color=edge_color, canvas=canvas) + if G.is_directed(): + colors = [ + f.get_fc() + for f in canvas.get_children() + if isinstance(f, mpl.patches.FancyArrowPatch) + ] + else: + colors = [ + l + for l in canvas.collections + if isinstance(l, mpl.collections.LineCollection) + ][0].get_colors() + assert all(mpl.colors.same_color(c, expected) for c in colors) + plt.close() + + +@pytest.mark.parametrize("graph_type", (nx.Graph, nx.DiGraph)) +def test_display_edge_multiple_colors(graph_type): + G = nx.path_graph(3, create_using=graph_type) + nx.set_edge_attributes(G, {(0, 1): "#FF0000", (1, 2): (0, 0, 1)}, "color") + ax = plt.figure().add_subplot(111) + nx.display(G, canvas=ax) + expected = ["red", "blue"] + if G.is_directed(): + colors = [ + f.get_fc() + for f in ax.get_children() + if isinstance(f, mpl.patches.FancyArrowPatch) + ] + else: + colors = [ + l for l in ax.collections if isinstance(l, mpl.collections.LineCollection) + ][0].get_colors() + assert mpl.colors.same_color(colors, expected) + plt.close() + + +@pytest.mark.parametrize("graph_type", (nx.Graph, nx.DiGraph)) +def test_display_edge_position(graph_type): + G = nx.path_graph(3, create_using=graph_type) + nx.set_node_attributes(G, {n: (n, n) for n in G.nodes()}, "pos") + ax = plt.figure().add_subplot(111) + nx.display(G, canvas=ax) + if G.is_directed(): + end_points = [ + (f.get_path().vertices[0, :], f.get_path().vertices[-2, :]) + for f in ax.get_children() + if isinstance(f, mpl.patches.FancyArrowPatch) + ] + else: + line_collection = [ + l for l in ax.collections if isinstance(l, mpl.collections.LineCollection) + ][0] + end_points = [ + (p.vertices[0, :], p.vertices[-1, :]) for p in line_collection.get_paths() + ] + expected = [((0, 0), (1, 1)), ((1, 1), (2, 2))] + # Use the threshold to account for slight shifts in FancyArrowPatch margins to + # avoid covering the arrow head with the node. + threshold = 0.05 + for a, e in zip(end_points, expected): + act_start, act_end = a + exp_start, exp_end = e + assert all(abs(act_start - exp_start) < (threshold, threshold)) and all( + abs(act_end - exp_end) < (threshold, threshold) + ) + plt.close() + + +def test_display_position_function(): + G = nx.karate_club_graph() + + def fixed_layout(G): + return nx.spring_layout(G, seed=314159) + + pos = fixed_layout(G) + ax = plt.figure().add_subplot(111) + nx.display(G, node_pos=fixed_layout, canvas=ax) + # rebuild the position dictionary from the canvas + act_pos = { + n: tuple(p) for n, p in zip(G.nodes(), ax.get_children()[0].get_offsets().data) + } + for n in G.nodes(): + assert all(pos[n] == act_pos[n]) + plt.close() + + +@pytest.mark.parametrize("graph_type", (nx.Graph, nx.DiGraph)) +def test_display_edge_colormaps(graph_type): + G = nx.path_graph(3, create_using=graph_type) + nx.set_edge_attributes(G, {(0, 1): 0, (1, 2): 1}, "weight") + cmap = mpl.colormaps["plasma"] + nx.apply_matplotlib_colors(G, "weight", "color", cmap, nodes=False) + canvas = plt.figure().add_subplot(111) + nx.display(G, canvas=canvas) + mapper = mpl.cm.ScalarMappable(cmap=cmap) + mapper.set_clim(0, 1) + expected = [mapper.to_rgba(0), mapper.to_rgba(1)] + if G.is_directed(): + colors = [ + f.get_facecolor() + for f in canvas.get_children() + if isinstance(f, mpl.patches.FancyArrowPatch) + ] + else: + colors = [ + l + for l in canvas.collections + if isinstance(l, mpl.collections.LineCollection) + ][0].get_colors() + assert mpl.colors.same_color(expected[0], G.edges[0, 1]["color"]) + assert mpl.colors.same_color(expected[1], G.edges[1, 2]["color"]) + assert mpl.colors.same_color(expected, colors) + plt.close() + + +@pytest.mark.parametrize("graph_type", (nx.Graph, nx.DiGraph)) +def test_display_node_colormaps(graph_type): + G = nx.path_graph(3, create_using=graph_type) + nx.set_node_attributes(G, {0: 0, 1: 0.5, 2: 1}, "weight") + cmap = mpl.colormaps["plasma"] + nx.apply_matplotlib_colors(G, "weight", "color", cmap) + canvas = plt.figure().add_subplot(111) + nx.display(G, canvas=canvas) + mapper = mpl.cm.ScalarMappable(cmap=cmap) + mapper.set_clim(0, 1) + expected = [mapper.to_rgba(0), mapper.to_rgba(0.5), mapper.to_rgba(1)] + colors = [ + s for s in canvas.collections if isinstance(s, mpl.collections.PathCollection) + ][0].get_edgecolors() + assert mpl.colors.same_color(expected[0], G.nodes[0]["color"]) + assert mpl.colors.same_color(expected[1], G.nodes[1]["color"]) + assert mpl.colors.same_color(expected[2], G.nodes[2]["color"]) + assert mpl.colors.same_color(expected, colors) + plt.close() + + +@pytest.mark.parametrize( + ("param_value", "expected"), + ( + (None, [defaults["edge_width"], defaults["edge_width"]]), + (5, [5, 5]), + ("width", [5, 10]), + ), +) +@pytest.mark.parametrize("graph_type", (nx.Graph, nx.DiGraph)) +def test_display_edge_width(param_value, expected, graph_type): + G = nx.path_graph(3, create_using=graph_type) + nx.set_edge_attributes(G, {(0, 1): 5, (1, 2): 10}, "width") + canvas = plt.figure().add_subplot(111) + nx.display(G, edge_width=param_value, canvas=canvas) + if G.is_directed(): + widths = [ + f.get_linewidth() + for f in canvas.get_children() + if isinstance(f, mpl.patches.FancyArrowPatch) + ] + else: + widths = list( + [ + l + for l in canvas.collections + if isinstance(l, mpl.collections.LineCollection) + ][0].get_linewidths() + ) + assert widths == expected + + +@pytest.mark.parametrize( + ("param_value", "expected"), + ( + (None, [defaults["edge_style"], defaults["edge_style"]]), + (":", [":", ":"]), + ("style", ["-", ":"]), + ), +) +@pytest.mark.parametrize("graph_type", (nx.Graph, nx.DiGraph)) +def test_display_edge_style(param_value, expected, graph_type): + G = nx.path_graph(3, create_using=graph_type) + nx.set_edge_attributes(G, {(0, 1): "-", (1, 2): ":"}, "style") + canvas = plt.figure().add_subplot(111) + nx.display(G, edge_style=param_value, canvas=canvas) + if G.is_directed(): + styles = [ + f.get_linestyle() + for f in canvas.get_children() + if isinstance(f, mpl.patches.FancyArrowPatch) + ] + else: + # Convert back from tuple description to character form + linestyles = {(0, None): "-", (0, (1, 1.65)): ":"} + styles = [ + linestyles[(s[0], tuple(s[1]) if s[1] is not None else None)] + for s in [ + l + for l in canvas.collections + if isinstance(l, mpl.collections.LineCollection) + ][0].get_linestyles() + ] + assert styles == expected + plt.close() + + +def test_display_node_labels(): + G = nx.path_graph(4) + canvas = plt.figure().add_subplot(111) + nx.display(G, canvas=canvas, node_label={"size": 20}) + labels = [t for t in canvas.get_children() if isinstance(t, mpl.text.Text)] + for n, l in zip(G.nodes(), labels): + assert l.get_text() == str(n) + assert l.get_size() == 20.0 + plt.close() + + +def test_display_edge_labels(): + G = nx.path_graph(4) + canvas = plt.figure().add_subplot(111) + # While we can pass in dicts for edge label defaults without errors, + # this isn't helpful unless we want one label for all edges. + nx.set_edge_attributes(G, {(u, v): {"label": u + v} for u, v in G.edges()}) + nx.display(G, canvas=canvas, edge_label={"color": "r"}, node_label=None) + labels = [t for t in canvas.get_children() if isinstance(t, mpl.text.Text)] + print(labels) + for e, l in zip(G.edges(), labels): + assert l.get_text() == str(e[0] + e[1]) + assert l.get_color() == "r" + plt.close() + + +@pytest.mark.mpl_image_compare +def test_display_empty_graph(): + G = nx.empty_graph() + fig, ax = plt.subplots() + nx.display(G, canvas=ax) + plt.tight_layout() + plt.axis("off") + return fig + + +def test_display_multigraph_non_integer_keys(): + G = nx.MultiGraph() + G.add_nodes_from(["A", "B", "C", "D"]) + G.add_edges_from( + [ + ("A", "B", "0"), + ("A", "B", "1"), + ("B", "C", "-1"), + ("B", "C", "1"), + ("C", "D", "-1"), + ("C", "D", "0"), + ] + ) + nx.set_edge_attributes( + G, {e: f"arc3,rad={0.2 * int(e[2])}" for e in G.edges(keys=True)}, "curvature" + ) + canvas = plt.figure().add_subplot(111) + nx.display(G, canvas=canvas) + rads = [ + f.get_connectionstyle().rad + for f in canvas.get_children() + if isinstance(f, mpl.patches.FancyArrowPatch) + ] + assert rads == [0.0, 0.2, -0.2, 0.2, -0.2, 0.0] + plt.close() + + +def test_display_raises_for_bad_arg(): + G = nx.karate_club_graph() + with pytest.raises(nx.NetworkXError): + nx.display(G, bad_arg="bad_arg") + plt.close() + + +def test_display_arrow_size(): + G = nx.path_graph(4, create_using=nx.DiGraph) + nx.set_edge_attributes( + G, {(u, v): (u + v + 2) ** 2 for u, v in G.edges()}, "arrowsize" + ) + ax = plt.axes() + nx.display(G, canvas=ax) + assert [9, 25, 49] == [ + f.get_mutation_scale() + for f in ax.get_children() + if isinstance(f, mpl.patches.FancyArrowPatch) + ] + plt.close() + + +def test_display_mismatched_edge_position(): + """ + This test ensures that a error is raised for incomplete position data. + """ + G = nx.path_graph(5) + # Notice that there is no position for node 3 + nx.set_node_attributes(G, {0: (0, 0), 1: (1, 1), 2: (2, 2), 4: (4, 4)}, "pos") + # But that's not a problem since we don't want to show node 4, right? + nx.set_node_attributes(G, {n: n < 4 for n in G.nodes()}, "visible") + # However, if we try to visualize every edge (including 3 -> 4)... + # That's a problem since node 4 doesn't have a position + with pytest.raises(nx.NetworkXError): + nx.display(G) + + +# NOTE: parametrizing on marker to test both branches of internal +# to_marker_edge function +@pytest.mark.parametrize("node_shape", ("o", "s")) +def test_display_edge_margins(node_shape): + """ + Test that there is a wider gap between the node and the start of an + incident edge when min_source_margin is specified. + + This test checks that the use os min_{source/target}_margin edge + attributes result is shorter (more padding) between the edges and + source and target nodes. + + + As a crude visual example, let 's' and 't' represent source and target + nodes, respectively: + + Default: + s-----------------------------t + + With margins: + s ----------------------- t + + """ + ax = plt.figure().add_subplot(111) + G = nx.DiGraph([(0, 1)]) + nx.set_node_attributes(G, {0: (0, 0), 1: (1, 1)}, "pos") + # Get the default patches from the regular visualization + nx.display(G, canvas=ax, node_shape=node_shape) + default_arrow = [ + f for f in ax.get_children() if isinstance(f, mpl.patches.FancyArrowPatch) + ][0] + default_extent = default_arrow.get_extents().corners()[::2, 0] + # Now plot again with margins + ax = plt.figure().add_subplot(111) + nx.display( + G, + canvas=ax, + edge_source_margin=100, + edge_target_margin=100, + node_shape=node_shape, + ) + padded_arrow = [ + f for f in ax.get_children() if isinstance(f, mpl.patches.FancyArrowPatch) + ][0] + padded_extent = padded_arrow.get_extents().corners()[::2, 0] + + # With padding, the left-most extent of the edge should be further to the right + assert padded_extent[0] > default_extent[0] + # And the rightmost extent of the edge, further to the left + assert padded_extent[1] < default_extent[1] + plt.close() + + +@pytest.mark.parametrize("ticks", [False, True]) +def test_display_hide_ticks(ticks): + G = nx.path_graph(3) + nx.set_node_attributes(G, {n: (n, n) for n in G.nodes()}, "pos") + ax = plt.axes() + nx.display(G, hide_ticks=ticks) + for axis in [ax.xaxis, ax.yaxis]: + assert bool(axis.get_ticklabels()) != ticks + + plt.close() + + +def test_display_self_loop(): + ax = plt.axes() + G = nx.DiGraph() + G.add_node(0) + G.add_edge(0, 0) + nx.set_node_attributes(G, {0: (0, 0)}, "pos") + nx.display(G, canvas=ax) + arrow = [ + f for f in ax.get_children() if isinstance(f, mpl.patches.FancyArrowPatch) + ][0] + bbox = arrow.get_extents() + print(bbox.width) + print(bbox.height) + assert bbox.width > 0 and bbox.height > 0 + + plt.delaxes(ax) + plt.close() + + +def test_display_remove_pos_attr(): + """ + If the pos attribute isn't provided or is a function, display computes the layout + and adds it to the graph. We need to ensure that this new attribute is removed from + the returned graph. + """ + G = nx.karate_club_graph() + nx.display(G) + assert nx.get_node_attributes(G, "display's position attribute name") == {} + + +@pytest.fixture +def subplots(): + fig, ax = plt.subplots() + yield fig, ax + plt.delaxes(ax) + plt.close() + + +def test_draw(): + try: + functions = [ + nx.draw_circular, + nx.draw_kamada_kawai, + nx.draw_planar, + nx.draw_random, + nx.draw_spectral, + nx.draw_spring, + nx.draw_shell, + ] + options = [{"node_color": "black", "node_size": 100, "width": 3}] + for function, option in itertools.product(functions, options): + function(barbell, **option) + plt.savefig("test.ps") + except ModuleNotFoundError: # draw_kamada_kawai requires scipy + pass + finally: + try: + os.unlink("test.ps") + except OSError: + pass + + +def test_draw_shell_nlist(): + try: + nlist = [list(range(4)), list(range(4, 10)), list(range(10, 14))] + nx.draw_shell(barbell, nlist=nlist) + plt.savefig("test.ps") + finally: + try: + os.unlink("test.ps") + except OSError: + pass + + +def test_draw_bipartite(): + try: + G = nx.complete_bipartite_graph(2, 5) + nx.draw_bipartite(G) + plt.savefig("test.ps") + finally: + try: + os.unlink("test.ps") + except OSError: + pass + + +def test_edge_colormap(): + colors = range(barbell.number_of_edges()) + nx.draw_spring( + barbell, edge_color=colors, width=4, edge_cmap=plt.cm.Blues, with_labels=True + ) + # plt.show() + + +def test_arrows(): + nx.draw_spring(barbell.to_directed()) + # plt.show() + + +@pytest.mark.parametrize( + ("edge_color", "expected"), + ( + (None, "black"), # Default + ("r", "red"), # Non-default color string + (["r"], "red"), # Single non-default color in a list + ((1.0, 1.0, 0.0), "yellow"), # single color as rgb tuple + ([(1.0, 1.0, 0.0)], "yellow"), # single color as rgb tuple in list + ((0, 1, 0, 1), "lime"), # single color as rgba tuple + ([(0, 1, 0, 1)], "lime"), # single color as rgba tuple in list + ("#0000ff", "blue"), # single color hex code + (["#0000ff"], "blue"), # hex code in list + ), +) +@pytest.mark.parametrize("edgelist", (None, [(0, 1)])) +def test_single_edge_color_undirected(edge_color, expected, edgelist): + """Tests ways of specifying all edges have a single color for edges + drawn with a LineCollection""" + + G = nx.path_graph(3) + drawn_edges = nx.draw_networkx_edges( + G, pos=nx.random_layout(G), edgelist=edgelist, edge_color=edge_color + ) + assert mpl.colors.same_color(drawn_edges.get_color(), expected) + + +@pytest.mark.parametrize( + ("edge_color", "expected"), + ( + (None, "black"), # Default + ("r", "red"), # Non-default color string + (["r"], "red"), # Single non-default color in a list + ((1.0, 1.0, 0.0), "yellow"), # single color as rgb tuple + ([(1.0, 1.0, 0.0)], "yellow"), # single color as rgb tuple in list + ((0, 1, 0, 1), "lime"), # single color as rgba tuple + ([(0, 1, 0, 1)], "lime"), # single color as rgba tuple in list + ("#0000ff", "blue"), # single color hex code + (["#0000ff"], "blue"), # hex code in list + ), +) +@pytest.mark.parametrize("edgelist", (None, [(0, 1)])) +def test_single_edge_color_directed(edge_color, expected, edgelist): + """Tests ways of specifying all edges have a single color for edges drawn + with FancyArrowPatches""" + + G = nx.path_graph(3, create_using=nx.DiGraph) + drawn_edges = nx.draw_networkx_edges( + G, pos=nx.random_layout(G), edgelist=edgelist, edge_color=edge_color + ) + for fap in drawn_edges: + assert mpl.colors.same_color(fap.get_edgecolor(), expected) + + +def test_edge_color_tuple_interpretation(): + """If edge_color is a sequence with the same length as edgelist, then each + value in edge_color is mapped onto each edge via colormap.""" + G = nx.path_graph(6, create_using=nx.DiGraph) + pos = {n: (n, n) for n in range(len(G))} + + # num edges != 3 or 4 --> edge_color interpreted as rgb(a) + for ec in ((0, 0, 1), (0, 0, 1, 1)): + # More than 4 edges + drawn_edges = nx.draw_networkx_edges(G, pos, edge_color=ec) + for fap in drawn_edges: + assert mpl.colors.same_color(fap.get_edgecolor(), ec) + # Fewer than 3 edges + drawn_edges = nx.draw_networkx_edges( + G, pos, edgelist=[(0, 1), (1, 2)], edge_color=ec + ) + for fap in drawn_edges: + assert mpl.colors.same_color(fap.get_edgecolor(), ec) + + # num edges == 3, len(edge_color) == 4: interpreted as rgba + drawn_edges = nx.draw_networkx_edges( + G, pos, edgelist=[(0, 1), (1, 2), (2, 3)], edge_color=(0, 0, 1, 1) + ) + for fap in drawn_edges: + assert mpl.colors.same_color(fap.get_edgecolor(), "blue") + + # num edges == 4, len(edge_color) == 3: interpreted as rgb + drawn_edges = nx.draw_networkx_edges( + G, pos, edgelist=[(0, 1), (1, 2), (2, 3), (3, 4)], edge_color=(0, 0, 1) + ) + for fap in drawn_edges: + assert mpl.colors.same_color(fap.get_edgecolor(), "blue") + + # num edges == len(edge_color) == 3: interpreted with cmap, *not* as rgb + drawn_edges = nx.draw_networkx_edges( + G, pos, edgelist=[(0, 1), (1, 2), (2, 3)], edge_color=(0, 0, 1) + ) + assert mpl.colors.same_color( + drawn_edges[0].get_edgecolor(), drawn_edges[1].get_edgecolor() + ) + for fap in drawn_edges: + assert not mpl.colors.same_color(fap.get_edgecolor(), "blue") + + # num edges == len(edge_color) == 4: interpreted with cmap, *not* as rgba + drawn_edges = nx.draw_networkx_edges( + G, pos, edgelist=[(0, 1), (1, 2), (2, 3), (3, 4)], edge_color=(0, 0, 1, 1) + ) + assert mpl.colors.same_color( + drawn_edges[0].get_edgecolor(), drawn_edges[1].get_edgecolor() + ) + assert mpl.colors.same_color( + drawn_edges[2].get_edgecolor(), drawn_edges[3].get_edgecolor() + ) + for fap in drawn_edges: + assert not mpl.colors.same_color(fap.get_edgecolor(), "blue") + + +def test_fewer_edge_colors_than_num_edges_directed(): + """Test that the edge colors are cycled when there are fewer specified + colors than edges.""" + G = barbell.to_directed() + pos = nx.random_layout(barbell) + edgecolors = ("r", "g", "b") + drawn_edges = nx.draw_networkx_edges(G, pos, edge_color=edgecolors) + for fap, expected in zip(drawn_edges, itertools.cycle(edgecolors)): + assert mpl.colors.same_color(fap.get_edgecolor(), expected) + + +def test_more_edge_colors_than_num_edges_directed(): + """Test that extra edge colors are ignored when there are more specified + colors than edges.""" + G = nx.path_graph(4, create_using=nx.DiGraph) # 3 edges + pos = nx.random_layout(barbell) + edgecolors = ("r", "g", "b", "c") # 4 edge colors + drawn_edges = nx.draw_networkx_edges(G, pos, edge_color=edgecolors) + for fap, expected in zip(drawn_edges, edgecolors[:-1]): + assert mpl.colors.same_color(fap.get_edgecolor(), expected) + + +def test_edge_color_string_with_global_alpha_undirected(): + edge_collection = nx.draw_networkx_edges( + barbell, + pos=nx.random_layout(barbell), + edgelist=[(0, 1), (1, 2)], + edge_color="purple", + alpha=0.2, + ) + ec = edge_collection.get_color().squeeze() # as rgba tuple + assert len(edge_collection.get_paths()) == 2 + assert mpl.colors.same_color(ec[:-1], "purple") + assert ec[-1] == 0.2 + + +def test_edge_color_string_with_global_alpha_directed(): + drawn_edges = nx.draw_networkx_edges( + barbell.to_directed(), + pos=nx.random_layout(barbell), + edgelist=[(0, 1), (1, 2)], + edge_color="purple", + alpha=0.2, + ) + assert len(drawn_edges) == 2 + for fap in drawn_edges: + ec = fap.get_edgecolor() # As rgba tuple + assert mpl.colors.same_color(ec[:-1], "purple") + assert ec[-1] == 0.2 + + +@pytest.mark.parametrize("graph_type", (nx.Graph, nx.DiGraph)) +def test_edge_width_default_value(graph_type): + """Test the default linewidth for edges drawn either via LineCollection or + FancyArrowPatches.""" + G = nx.path_graph(2, create_using=graph_type) + pos = {n: (n, n) for n in range(len(G))} + drawn_edges = nx.draw_networkx_edges(G, pos) + if isinstance(drawn_edges, list): # directed case: list of FancyArrowPatch + drawn_edges = drawn_edges[0] + assert drawn_edges.get_linewidth() == 1 + + +@pytest.mark.parametrize( + ("edgewidth", "expected"), + ( + (3, 3), # single-value, non-default + ([3], 3), # Single value as a list + ), +) +def test_edge_width_single_value_undirected(edgewidth, expected): + G = nx.path_graph(4) + pos = {n: (n, n) for n in range(len(G))} + drawn_edges = nx.draw_networkx_edges(G, pos, width=edgewidth) + assert len(drawn_edges.get_paths()) == 3 + assert drawn_edges.get_linewidth() == expected + + +@pytest.mark.parametrize( + ("edgewidth", "expected"), + ( + (3, 3), # single-value, non-default + ([3], 3), # Single value as a list + ), +) +def test_edge_width_single_value_directed(edgewidth, expected): + G = nx.path_graph(4, create_using=nx.DiGraph) + pos = {n: (n, n) for n in range(len(G))} + drawn_edges = nx.draw_networkx_edges(G, pos, width=edgewidth) + assert len(drawn_edges) == 3 + for fap in drawn_edges: + assert fap.get_linewidth() == expected + + +@pytest.mark.parametrize( + "edgelist", + ( + [(0, 1), (1, 2), (2, 3)], # one width specification per edge + None, # fewer widths than edges - widths cycle + [(0, 1), (1, 2)], # More widths than edges - unused widths ignored + ), +) +def test_edge_width_sequence(edgelist): + G = barbell.to_directed() + pos = nx.random_layout(G) + widths = (0.5, 2.0, 12.0) + drawn_edges = nx.draw_networkx_edges(G, pos, edgelist=edgelist, width=widths) + for fap, expected_width in zip(drawn_edges, itertools.cycle(widths)): + assert fap.get_linewidth() == expected_width + + +def test_edge_color_with_edge_vmin_vmax(): + """Test that edge_vmin and edge_vmax properly set the dynamic range of the + color map when num edges == len(edge_colors).""" + G = nx.path_graph(3, create_using=nx.DiGraph) + pos = nx.random_layout(G) + # Extract colors from the original (unscaled) colormap + drawn_edges = nx.draw_networkx_edges(G, pos, edge_color=[0, 1.0]) + orig_colors = [e.get_edgecolor() for e in drawn_edges] + # Colors from scaled colormap + drawn_edges = nx.draw_networkx_edges( + G, pos, edge_color=[0.2, 0.8], edge_vmin=0.2, edge_vmax=0.8 + ) + scaled_colors = [e.get_edgecolor() for e in drawn_edges] + assert mpl.colors.same_color(orig_colors, scaled_colors) + + +def test_directed_edges_linestyle_default(): + """Test default linestyle for edges drawn with FancyArrowPatches.""" + G = nx.path_graph(4, create_using=nx.DiGraph) # Graph with 3 edges + pos = {n: (n, n) for n in range(len(G))} + + # edge with default style + drawn_edges = nx.draw_networkx_edges(G, pos) + assert len(drawn_edges) == 3 + for fap in drawn_edges: + assert fap.get_linestyle() == "solid" + + +@pytest.mark.parametrize( + "style", + ( + "dashed", # edge with string style + "--", # edge with simplified string style + (1, (1, 1)), # edge with (offset, onoffseq) style + ), +) +def test_directed_edges_linestyle_single_value(style): + """Tests support for specifying linestyles with a single value to be applied to + all edges in ``draw_networkx_edges`` for FancyArrowPatch outputs + (e.g. directed edges).""" + + G = nx.path_graph(4, create_using=nx.DiGraph) # Graph with 3 edges + pos = {n: (n, n) for n in range(len(G))} + + drawn_edges = nx.draw_networkx_edges(G, pos, style=style) + assert len(drawn_edges) == 3 + for fap in drawn_edges: + assert fap.get_linestyle() == style + + +@pytest.mark.parametrize( + "style_seq", + ( + ["dashed"], # edge with string style in list + ["--"], # edge with simplified string style in list + [(1, (1, 1))], # edge with (offset, onoffseq) style in list + ["--", "-", ":"], # edges with styles for each edge + ["--", "-"], # edges with fewer styles than edges (styles cycle) + ["--", "-", ":", "-."], # edges with more styles than edges (extra unused) + ), +) +def test_directed_edges_linestyle_sequence(style_seq): + """Tests support for specifying linestyles with sequences in + ``draw_networkx_edges`` for FancyArrowPatch outputs (e.g. directed edges).""" + + G = nx.path_graph(4, create_using=nx.DiGraph) # Graph with 3 edges + pos = {n: (n, n) for n in range(len(G))} + + drawn_edges = nx.draw_networkx_edges(G, pos, style=style_seq) + assert len(drawn_edges) == 3 + for fap, style in zip(drawn_edges, itertools.cycle(style_seq)): + assert fap.get_linestyle() == style + + +def test_return_types(): + from matplotlib.collections import LineCollection, PathCollection + from matplotlib.patches import FancyArrowPatch + + G = nx.frucht_graph(create_using=nx.Graph) + dG = nx.frucht_graph(create_using=nx.DiGraph) + pos = nx.spring_layout(G) + dpos = nx.spring_layout(dG) + # nodes + nodes = nx.draw_networkx_nodes(G, pos) + assert isinstance(nodes, PathCollection) + # edges + edges = nx.draw_networkx_edges(dG, dpos, arrows=True) + assert isinstance(edges, list) + if len(edges) > 0: + assert isinstance(edges[0], FancyArrowPatch) + edges = nx.draw_networkx_edges(dG, dpos, arrows=False) + assert isinstance(edges, LineCollection) + edges = nx.draw_networkx_edges(G, dpos, arrows=None) + assert isinstance(edges, LineCollection) + edges = nx.draw_networkx_edges(dG, pos, arrows=None) + assert isinstance(edges, list) + if len(edges) > 0: + assert isinstance(edges[0], FancyArrowPatch) + + +def test_labels_and_colors(): + G = nx.cubical_graph() + pos = nx.spring_layout(G) # positions for all nodes + # nodes + nx.draw_networkx_nodes( + G, pos, nodelist=[0, 1, 2, 3], node_color="r", node_size=500, alpha=0.75 + ) + nx.draw_networkx_nodes( + G, + pos, + nodelist=[4, 5, 6, 7], + node_color="b", + node_size=500, + alpha=[0.25, 0.5, 0.75, 1.0], + ) + # edges + nx.draw_networkx_edges(G, pos, width=1.0, alpha=0.5) + nx.draw_networkx_edges( + G, + pos, + edgelist=[(0, 1), (1, 2), (2, 3), (3, 0)], + width=8, + alpha=0.5, + edge_color="r", + ) + nx.draw_networkx_edges( + G, + pos, + edgelist=[(4, 5), (5, 6), (6, 7), (7, 4)], + width=8, + alpha=0.5, + edge_color="b", + ) + nx.draw_networkx_edges( + G, + pos, + edgelist=[(4, 5), (5, 6), (6, 7), (7, 4)], + arrows=True, + min_source_margin=0.5, + min_target_margin=0.75, + width=8, + edge_color="b", + ) + # some math labels + labels = {} + labels[0] = r"$a$" + labels[1] = r"$b$" + labels[2] = r"$c$" + labels[3] = r"$d$" + labels[4] = r"$\alpha$" + labels[5] = r"$\beta$" + labels[6] = r"$\gamma$" + labels[7] = r"$\delta$" + colors = {n: "k" if n % 2 == 0 else "r" for n in range(8)} + nx.draw_networkx_labels(G, pos, labels, font_size=16) + nx.draw_networkx_labels(G, pos, labels, font_size=16, font_color=colors) + nx.draw_networkx_edge_labels(G, pos, edge_labels=None, rotate=False) + nx.draw_networkx_edge_labels(G, pos, edge_labels={(4, 5): "4-5"}) + # plt.show() + + +@pytest.mark.mpl_image_compare +def test_house_with_colors(): + G = nx.house_graph() + # explicitly set positions + fig, ax = plt.subplots() + pos = {0: (0, 0), 1: (1, 0), 2: (0, 1), 3: (1, 1), 4: (0.5, 2.0)} + + # Plot nodes with different properties for the "wall" and "roof" nodes + nx.draw_networkx_nodes( + G, + pos, + node_size=3000, + nodelist=[0, 1, 2, 3], + node_color="tab:blue", + ) + nx.draw_networkx_nodes( + G, pos, node_size=2000, nodelist=[4], node_color="tab:orange" + ) + nx.draw_networkx_edges(G, pos, alpha=0.5, width=6) + # Customize axes + ax.margins(0.11) + plt.tight_layout() + plt.axis("off") + return fig + + +def test_axes(subplots): + fig, ax = subplots + nx.draw(barbell, ax=ax) + nx.draw_networkx_edge_labels(barbell, nx.circular_layout(barbell), ax=ax) + + +def test_empty_graph(): + G = nx.Graph() + nx.draw(G) + + +def test_draw_empty_nodes_return_values(): + # See Issue #3833 + import matplotlib.collections # call as mpl.collections + + G = nx.Graph([(1, 2), (2, 3)]) + DG = nx.DiGraph([(1, 2), (2, 3)]) + pos = nx.circular_layout(G) + assert isinstance( + nx.draw_networkx_nodes(G, pos, nodelist=[]), mpl.collections.PathCollection + ) + assert isinstance( + nx.draw_networkx_nodes(DG, pos, nodelist=[]), mpl.collections.PathCollection + ) + + # drawing empty edges used to return an empty LineCollection or empty list. + # Now it is always an empty list (because edges are now lists of FancyArrows) + assert nx.draw_networkx_edges(G, pos, edgelist=[], arrows=True) == [] + assert nx.draw_networkx_edges(G, pos, edgelist=[], arrows=False) == [] + assert nx.draw_networkx_edges(DG, pos, edgelist=[], arrows=False) == [] + assert nx.draw_networkx_edges(DG, pos, edgelist=[], arrows=True) == [] + + +def test_multigraph_edgelist_tuples(): + # See Issue #3295 + G = nx.path_graph(3, create_using=nx.MultiDiGraph) + nx.draw_networkx(G, edgelist=[(0, 1, 0)]) + nx.draw_networkx(G, edgelist=[(0, 1, 0)], node_size=[10, 20, 0]) + + +def test_alpha_iter(): + pos = nx.random_layout(barbell) + fig = plt.figure() + # with fewer alpha elements than nodes + fig.add_subplot(131) # Each test in a new axis object + nx.draw_networkx_nodes(barbell, pos, alpha=[0.1, 0.2]) + # with equal alpha elements and nodes + num_nodes = len(barbell.nodes) + alpha = [x / num_nodes for x in range(num_nodes)] + colors = range(num_nodes) + fig.add_subplot(132) + nx.draw_networkx_nodes(barbell, pos, node_color=colors, alpha=alpha) + # with more alpha elements than nodes + alpha.append(1) + fig.add_subplot(133) + nx.draw_networkx_nodes(barbell, pos, alpha=alpha) + + +def test_multiple_node_shapes(subplots): + fig, ax = subplots + G = nx.path_graph(4) + nx.draw(G, node_shape=["o", "h", "s", "^"], ax=ax) + scatters = [ + s for s in ax.get_children() if isinstance(s, mpl.collections.PathCollection) + ] + assert len(scatters) == 4 + + +def test_individualized_font_attributes(subplots): + G = nx.karate_club_graph() + fig, ax = subplots + nx.draw( + G, + ax=ax, + font_color={n: "k" if n % 2 else "r" for n in G.nodes()}, + font_size={n: int(n / (34 / 15) + 5) for n in G.nodes()}, + ) + for n, t in zip( + G.nodes(), + [ + t + for t in ax.get_children() + if isinstance(t, mpl.text.Text) and len(t.get_text()) > 0 + ], + ): + expected = "black" if n % 2 else "red" + + assert mpl.colors.same_color(t.get_color(), expected) + assert int(n / (34 / 15) + 5) == t.get_size() + + +def test_individualized_edge_attributes(subplots): + G = nx.karate_club_graph() + fig, ax = subplots + arrowstyles = ["-|>" if (u + v) % 2 == 0 else "-[" for u, v in G.edges()] + arrowsizes = [10 * (u % 2 + v % 2) + 10 for u, v in G.edges()] + nx.draw(G, ax=ax, arrows=True, arrowstyle=arrowstyles, arrowsize=arrowsizes) + arrows = [ + f for f in ax.get_children() if isinstance(f, mpl.patches.FancyArrowPatch) + ] + for e, a in zip(G.edges(), arrows): + assert a.get_mutation_scale() == 10 * (e[0] % 2 + e[1] % 2) + 10 + expected = ( + mpl.patches.ArrowStyle.BracketB + if sum(e) % 2 + else mpl.patches.ArrowStyle.CurveFilledB + ) + assert isinstance(a.get_arrowstyle(), expected) + + +def test_error_invalid_kwds(): + with pytest.raises(ValueError, match="Received invalid argument"): + nx.draw(barbell, foo="bar") + + +def test_draw_networkx_arrowsize_incorrect_size(): + G = nx.DiGraph([(0, 1), (0, 2), (0, 3), (1, 3)]) + arrowsize = [1, 2, 3] + with pytest.raises( + ValueError, match="arrowsize should have the same length as edgelist" + ): + nx.draw(G, arrowsize=arrowsize) + + +@pytest.mark.parametrize("arrowsize", (30, [10, 20, 30])) +def test_draw_edges_arrowsize(arrowsize): + G = nx.DiGraph([(0, 1), (0, 2), (1, 2)]) + pos = {0: (0, 0), 1: (0, 1), 2: (1, 0)} + edges = nx.draw_networkx_edges(G, pos=pos, arrowsize=arrowsize) + + arrowsize = itertools.repeat(arrowsize) if isinstance(arrowsize, int) else arrowsize + + for fap, expected in zip(edges, arrowsize): + assert isinstance(fap, mpl.patches.FancyArrowPatch) + assert fap.get_mutation_scale() == expected + + +@pytest.mark.parametrize("arrowstyle", ("-|>", ["-|>", "-[", "<|-|>"])) +def test_draw_edges_arrowstyle(arrowstyle): + G = nx.DiGraph([(0, 1), (0, 2), (1, 2)]) + pos = {0: (0, 0), 1: (0, 1), 2: (1, 0)} + edges = nx.draw_networkx_edges(G, pos=pos, arrowstyle=arrowstyle) + + arrowstyle = ( + itertools.repeat(arrowstyle) if isinstance(arrowstyle, str) else arrowstyle + ) + + arrow_objects = { + "-|>": mpl.patches.ArrowStyle.CurveFilledB, + "-[": mpl.patches.ArrowStyle.BracketB, + "<|-|>": mpl.patches.ArrowStyle.CurveFilledAB, + } + + for fap, expected in zip(edges, arrowstyle): + assert isinstance(fap, mpl.patches.FancyArrowPatch) + assert isinstance(fap.get_arrowstyle(), arrow_objects[expected]) + + +def test_np_edgelist(): + # see issue #4129 + nx.draw_networkx(barbell, edgelist=np.array([(0, 2), (0, 3)])) + + +def test_draw_nodes_missing_node_from_position(): + G = nx.path_graph(3) + pos = {0: (0, 0), 1: (1, 1)} # No position for node 2 + with pytest.raises(nx.NetworkXError, match="has no position"): + nx.draw_networkx_nodes(G, pos) + + +# NOTE: parametrizing on marker to test both branches of internal +# nx.draw_networkx_edges.to_marker_edge function +@pytest.mark.parametrize("node_shape", ("o", "s")) +def test_draw_edges_min_source_target_margins(node_shape, subplots): + """Test that there is a wider gap between the node and the start of an + incident edge when min_source_margin is specified. + + This test checks that the use of min_{source/target}_margin kwargs result + in shorter (more padding) between the edges and source and target nodes. + As a crude visual example, let 's' and 't' represent source and target + nodes, respectively: + + Default: + s-----------------------------t + + With margins: + s ----------------------- t + + """ + # Create a single axis object to get consistent pixel coords across + # multiple draws + fig, ax = subplots + G = nx.DiGraph([(0, 1)]) + pos = {0: (0, 0), 1: (1, 0)} # horizontal layout + # Get leftmost and rightmost points of the FancyArrowPatch object + # representing the edge between nodes 0 and 1 (in pixel coordinates) + default_patch = nx.draw_networkx_edges(G, pos, ax=ax, node_shape=node_shape)[0] + default_extent = default_patch.get_extents().corners()[::2, 0] + # Now, do the same but with "padding" for the source and target via the + # min_{source/target}_margin kwargs + padded_patch = nx.draw_networkx_edges( + G, + pos, + ax=ax, + node_shape=node_shape, + min_source_margin=100, + min_target_margin=100, + )[0] + padded_extent = padded_patch.get_extents().corners()[::2, 0] + + # With padding, the left-most extent of the edge should be further to the + # right + assert padded_extent[0] > default_extent[0] + # And the rightmost extent of the edge, further to the left + assert padded_extent[1] < default_extent[1] + + +# NOTE: parametrizing on marker to test both branches of internal +# nx.draw_networkx_edges.to_marker_edge function +@pytest.mark.parametrize("node_shape", ("o", "s")) +def test_draw_edges_min_source_target_margins_individual(node_shape, subplots): + """Test that there is a wider gap between the node and the start of an + incident edge when min_source_margin is specified. + + This test checks that the use of min_{source/target}_margin kwargs result + in shorter (more padding) between the edges and source and target nodes. + As a crude visual example, let 's' and 't' represent source and target + nodes, respectively: + + Default: + s-----------------------------t + + With margins: + s ----------------------- t + + """ + # Create a single axis object to get consistent pixel coords across + # multiple draws + fig, ax = subplots + G = nx.DiGraph([(0, 1), (1, 2)]) + pos = {0: (0, 0), 1: (1, 0), 2: (2, 0)} # horizontal layout + # Get leftmost and rightmost points of the FancyArrowPatch object + # representing the edge between nodes 0 and 1 (in pixel coordinates) + default_patch = nx.draw_networkx_edges(G, pos, ax=ax, node_shape=node_shape) + default_extent = [d.get_extents().corners()[::2, 0] for d in default_patch] + # Now, do the same but with "padding" for the source and target via the + # min_{source/target}_margin kwargs + padded_patch = nx.draw_networkx_edges( + G, + pos, + ax=ax, + node_shape=node_shape, + min_source_margin=[98, 102], + min_target_margin=[98, 102], + ) + padded_extent = [p.get_extents().corners()[::2, 0] for p in padded_patch] + for d, p in zip(default_extent, padded_extent): + # With padding, the left-most extent of the edge should be further to the + # right + assert p[0] > d[0] + # And the rightmost extent of the edge, further to the left + assert p[1] < d[1] + + +def test_nonzero_selfloop_with_single_node(subplots): + """Ensure that selfloop extent is non-zero when there is only one node.""" + # Create explicit axis object for test + fig, ax = subplots + # Graph with single node + self loop + G = nx.DiGraph() + G.add_node(0) + G.add_edge(0, 0) + # Draw + patch = nx.draw_networkx_edges(G, {0: (0, 0)})[0] + # The resulting patch must have non-zero extent + bbox = patch.get_extents() + assert bbox.width > 0 and bbox.height > 0 + + +def test_nonzero_selfloop_with_single_edge_in_edgelist(subplots): + """Ensure that selfloop extent is non-zero when only a single edge is + specified in the edgelist. + """ + # Create explicit axis object for test + fig, ax = subplots + # Graph with selfloop + G = nx.path_graph(2, create_using=nx.DiGraph) + G.add_edge(1, 1) + pos = {n: (n, n) for n in G.nodes} + # Draw only the selfloop edge via the `edgelist` kwarg + patch = nx.draw_networkx_edges(G, pos, edgelist=[(1, 1)])[0] + # The resulting patch must have non-zero extent + bbox = patch.get_extents() + assert bbox.width > 0 and bbox.height > 0 + + +def test_apply_alpha(): + """Test apply_alpha when there is a mismatch between the number of + supplied colors and elements. + """ + nodelist = [0, 1, 2] + colorlist = ["r", "g", "b"] + alpha = 0.5 + rgba_colors = nx.drawing.nx_pylab.apply_alpha(colorlist, alpha, nodelist) + assert all(rgba_colors[:, -1] == alpha) + + +def test_draw_edges_toggling_with_arrows_kwarg(): + """ + The `arrows` keyword argument is used as a 3-way switch to select which + type of object to use for drawing edges: + - ``arrows=None`` -> default (FancyArrowPatches for directed, else LineCollection) + - ``arrows=True`` -> FancyArrowPatches + - ``arrows=False`` -> LineCollection + """ + import matplotlib.collections + import matplotlib.patches + + UG = nx.path_graph(3) + DG = nx.path_graph(3, create_using=nx.DiGraph) + pos = {n: (n, n) for n in UG} + + # Use FancyArrowPatches when arrows=True, regardless of graph type + for G in (UG, DG): + edges = nx.draw_networkx_edges(G, pos, arrows=True) + assert len(edges) == len(G.edges) + assert isinstance(edges[0], mpl.patches.FancyArrowPatch) + + # Use LineCollection when arrows=False, regardless of graph type + for G in (UG, DG): + edges = nx.draw_networkx_edges(G, pos, arrows=False) + assert isinstance(edges, mpl.collections.LineCollection) + + # Default behavior when arrows=None: FAPs for directed, LC's for undirected + edges = nx.draw_networkx_edges(UG, pos) + assert isinstance(edges, mpl.collections.LineCollection) + edges = nx.draw_networkx_edges(DG, pos) + assert len(edges) == len(G.edges) + assert isinstance(edges[0], mpl.patches.FancyArrowPatch) + + +@pytest.mark.parametrize("drawing_func", (nx.draw, nx.draw_networkx)) +def test_draw_networkx_arrows_default_undirected(drawing_func, subplots): + import matplotlib.collections + + G = nx.path_graph(3) + fig, ax = subplots + drawing_func(G, ax=ax) + assert any(isinstance(c, mpl.collections.LineCollection) for c in ax.collections) + assert not ax.patches + + +@pytest.mark.parametrize("drawing_func", (nx.draw, nx.draw_networkx)) +def test_draw_networkx_arrows_default_directed(drawing_func, subplots): + import matplotlib.collections + + G = nx.path_graph(3, create_using=nx.DiGraph) + fig, ax = subplots + drawing_func(G, ax=ax) + assert not any( + isinstance(c, mpl.collections.LineCollection) for c in ax.collections + ) + assert ax.patches + + +def test_edgelist_kwarg_not_ignored(subplots): + # See gh-4994 + G = nx.path_graph(3) + G.add_edge(0, 0) + fig, ax = subplots + nx.draw(G, edgelist=[(0, 1), (1, 2)], ax=ax) # Exclude self-loop from edgelist + assert not ax.patches + + +@pytest.mark.parametrize( + ("G", "expected_n_edges"), + ([nx.DiGraph(), 2], [nx.MultiGraph(), 4], [nx.MultiDiGraph(), 4]), +) +def test_draw_networkx_edges_multiedge_connectionstyle(G, expected_n_edges): + """Draws edges correctly for 3 types of graphs and checks for valid length""" + for i, (u, v) in enumerate([(0, 1), (0, 1), (0, 1), (0, 2)]): + G.add_edge(u, v, weight=round(i / 3, 2)) + pos = {n: (n, n) for n in G} + # Raises on insufficient connectionstyle length + for conn_style in [ + "arc3,rad=0.1", + ["arc3,rad=0.1", "arc3,rad=0.1"], + ["arc3,rad=0.1", "arc3,rad=0.1", "arc3,rad=0.2"], + ]: + nx.draw_networkx_edges(G, pos, connectionstyle=conn_style) + arrows = nx.draw_networkx_edges(G, pos, connectionstyle=conn_style) + assert len(arrows) == expected_n_edges + + +@pytest.mark.parametrize( + ("G", "expected_n_edges"), + ([nx.DiGraph(), 2], [nx.MultiGraph(), 4], [nx.MultiDiGraph(), 4]), +) +def test_draw_networkx_edge_labels_multiedge_connectionstyle(G, expected_n_edges): + """Draws labels correctly for 3 types of graphs and checks for valid length and class names""" + for i, (u, v) in enumerate([(0, 1), (0, 1), (0, 1), (0, 2)]): + G.add_edge(u, v, weight=round(i / 3, 2)) + pos = {n: (n, n) for n in G} + # Raises on insufficient connectionstyle length + arrows = nx.draw_networkx_edges( + G, pos, connectionstyle=["arc3,rad=0.1", "arc3,rad=0.1", "arc3,rad=0.1"] + ) + for conn_style in [ + "arc3,rad=0.1", + ["arc3,rad=0.1", "arc3,rad=0.2"], + ["arc3,rad=0.1", "arc3,rad=0.1", "arc3,rad=0.1"], + ]: + text_items = nx.draw_networkx_edge_labels(G, pos, connectionstyle=conn_style) + assert len(text_items) == expected_n_edges + for ti in text_items.values(): + assert ti.__class__.__name__ == "CurvedArrowText" + + +def test_draw_networkx_edge_label_multiedge(): + G = nx.MultiGraph() + G.add_edge(0, 1, weight=10) + G.add_edge(0, 1, weight=20) + edge_labels = nx.get_edge_attributes(G, "weight") # Includes edge keys + pos = {n: (n, n) for n in G} + text_items = nx.draw_networkx_edge_labels( + G, + pos, + edge_labels=edge_labels, + connectionstyle=["arc3,rad=0.1", "arc3,rad=0.2"], + ) + assert len(text_items) == 2 + + +def test_draw_networkx_edge_label_empty_dict(): + """Regression test for draw_networkx_edge_labels with empty dict. See + gh-5372.""" + G = nx.path_graph(3) + pos = {n: (n, n) for n in G.nodes} + assert nx.draw_networkx_edge_labels(G, pos, edge_labels={}) == {} + + +def test_draw_networkx_edges_undirected_selfloop_colors(subplots): + """When an edgelist is supplied along with a sequence of colors, check that + the self-loops have the correct colors.""" + fig, ax = subplots + # Edge list and corresponding colors + edgelist = [(1, 3), (1, 2), (2, 3), (1, 1), (3, 3), (2, 2)] + edge_colors = ["pink", "cyan", "black", "red", "blue", "green"] + + G = nx.Graph(edgelist) + pos = {n: (n, n) for n in G.nodes} + nx.draw_networkx_edges(G, pos, ax=ax, edgelist=edgelist, edge_color=edge_colors) + + # Verify that there are three fancy arrow patches (1 per self loop) + assert len(ax.patches) == 3 + + # These are points that should be contained in the self loops. For example, + # sl_points[0] will be (1, 1.1), which is inside the "path" of the first + # self-loop but outside the others + sl_points = np.array(edgelist[-3:]) + np.array([0, 0.1]) + + # Check that the mapping between self-loop locations and their colors is + # correct + for fap, clr, slp in zip(ax.patches, edge_colors[-3:], sl_points): + assert fap.get_path().contains_point(slp) + assert mpl.colors.same_color(fap.get_edgecolor(), clr) + + +@pytest.mark.parametrize( + "fap_only_kwarg", # Non-default values for kwargs that only apply to FAPs + ( + {"arrowstyle": "-"}, + {"arrowsize": 20}, + {"connectionstyle": "arc3,rad=0.2"}, + {"min_source_margin": 10}, + {"min_target_margin": 10}, + ), +) +def test_user_warnings_for_unused_edge_drawing_kwargs(fap_only_kwarg, subplots): + """Users should get a warning when they specify a non-default value for + one of the kwargs that applies only to edges drawn with FancyArrowPatches, + but FancyArrowPatches aren't being used under the hood.""" + G = nx.path_graph(3) + pos = {n: (n, n) for n in G} + fig, ax = subplots + # By default, an undirected graph will use LineCollection to represent + # the edges + kwarg_name = list(fap_only_kwarg.keys())[0] + with pytest.warns( + UserWarning, match=f"\n\nThe {kwarg_name} keyword argument is not applicable" + ): + nx.draw_networkx_edges(G, pos, ax=ax, **fap_only_kwarg) + # FancyArrowPatches are always used when `arrows=True` is specified. + # Check that warnings are *not* raised in this case + with warnings.catch_warnings(): + # Escalate warnings -> errors so tests fail if warnings are raised + warnings.simplefilter("error") + warnings.filterwarnings("ignore", category=DeprecationWarning) + nx.draw_networkx_edges(G, pos, ax=ax, arrows=True, **fap_only_kwarg) + + +@pytest.mark.parametrize("draw_fn", (nx.draw, nx.draw_circular)) +def test_no_warning_on_default_draw_arrowstyle(draw_fn, subplots): + # See gh-7284 + fig, ax = subplots + G = nx.cycle_graph(5) + with warnings.catch_warnings(record=True) as w: + draw_fn(G, ax=ax) + assert len(w) == 0 + + +@pytest.mark.parametrize("hide_ticks", [False, True]) +@pytest.mark.parametrize( + "method", + [ + nx.draw_networkx, + nx.draw_networkx_edge_labels, + nx.draw_networkx_edges, + nx.draw_networkx_labels, + nx.draw_networkx_nodes, + ], +) +def test_hide_ticks(method, hide_ticks, subplots): + G = nx.path_graph(3) + pos = {n: (n, n) for n in G.nodes} + _, ax = subplots + method(G, pos=pos, ax=ax, hide_ticks=hide_ticks) + for axis in [ax.xaxis, ax.yaxis]: + assert bool(axis.get_ticklabels()) != hide_ticks + + +def test_edge_label_bar_connectionstyle(subplots): + """Check that FancyArrowPatches with `bar` connectionstyle are also supported + in edge label rendering. See gh-7735.""" + fig, ax = subplots + edge = (0, 1) + G = nx.DiGraph([edge]) + pos = {n: (n, 0) for n in G} # Edge is horizontal line between (0, 0) and (1, 0) + + style_arc = "arc3,rad=0.0" + style_bar = "bar,fraction=0.1" + + arc_lbl = nx.draw_networkx_edge_labels( + G, pos, edge_labels={edge: "edge"}, connectionstyle=style_arc + ) + # This would fail prior to gh-7739 + bar_lbl = nx.draw_networkx_edge_labels( + G, pos, edge_labels={edge: "edge"}, connectionstyle=style_bar + ) + + # For the "arc" style, the label should be at roughly the midpoint + assert arc_lbl[edge].x, arc_lbl[edge].y == pytest.approx((0.5, 0)) + # The label should be below the x-axis for the "bar" style + assert bar_lbl[edge].y < arc_lbl[edge].y diff --git a/.venv/lib/python3.12/site-packages/networkx/exception.py b/.venv/lib/python3.12/site-packages/networkx/exception.py new file mode 100644 index 0000000..c960cf1 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/exception.py @@ -0,0 +1,131 @@ +""" +********** +Exceptions +********** + +Base exceptions and errors for NetworkX. +""" + +__all__ = [ + "HasACycle", + "NodeNotFound", + "PowerIterationFailedConvergence", + "ExceededMaxIterations", + "AmbiguousSolution", + "NetworkXAlgorithmError", + "NetworkXException", + "NetworkXError", + "NetworkXNoCycle", + "NetworkXNoPath", + "NetworkXNotImplemented", + "NetworkXPointlessConcept", + "NetworkXUnbounded", + "NetworkXUnfeasible", +] + + +class NetworkXException(Exception): + """Base class for exceptions in NetworkX.""" + + +class NetworkXError(NetworkXException): + """Exception for a serious error in NetworkX""" + + +class NetworkXPointlessConcept(NetworkXException): + """Raised when a null graph is provided as input to an algorithm + that cannot use it. + + The null graph is sometimes considered a pointless concept [1]_, + thus the name of the exception. + + Notes + ----- + Null graphs and empty graphs are often used interchangeably but they + are well defined in NetworkX. An ``empty_graph`` is a graph with ``n`` nodes + and 0 edges, and a ``null_graph`` is a graph with 0 nodes and 0 edges. + + References + ---------- + .. [1] Harary, F. and Read, R. "Is the Null Graph a Pointless + Concept?" In Graphs and Combinatorics Conference, George + Washington University. New York: Springer-Verlag, 1973. + + """ + + +class NetworkXAlgorithmError(NetworkXException): + """Exception for unexpected termination of algorithms.""" + + +class NetworkXUnfeasible(NetworkXAlgorithmError): + """Exception raised by algorithms trying to solve a problem + instance that has no feasible solution.""" + + +class NetworkXNoPath(NetworkXUnfeasible): + """Exception for algorithms that should return a path when running + on graphs where such a path does not exist.""" + + +class NetworkXNoCycle(NetworkXUnfeasible): + """Exception for algorithms that should return a cycle when running + on graphs where such a cycle does not exist.""" + + +class HasACycle(NetworkXException): + """Raised if a graph has a cycle when an algorithm expects that it + will have no cycles. + + """ + + +class NetworkXUnbounded(NetworkXAlgorithmError): + """Exception raised by algorithms trying to solve a maximization + or a minimization problem instance that is unbounded.""" + + +class NetworkXNotImplemented(NetworkXException): + """Exception raised by algorithms not implemented for a type of graph.""" + + +class NodeNotFound(NetworkXException): + """Exception raised if requested node is not present in the graph""" + + +class AmbiguousSolution(NetworkXException): + """Raised if more than one valid solution exists for an intermediary step + of an algorithm. + + In the face of ambiguity, refuse the temptation to guess. + This may occur, for example, when trying to determine the + bipartite node sets in a disconnected bipartite graph when + computing bipartite matchings. + + """ + + +class ExceededMaxIterations(NetworkXException): + """Raised if a loop iterates too many times without breaking. + + This may occur, for example, in an algorithm that computes + progressively better approximations to a value but exceeds an + iteration bound specified by the user. + + """ + + +class PowerIterationFailedConvergence(ExceededMaxIterations): + """Raised when the power iteration method fails to converge within a + specified iteration limit. + + `num_iterations` is the number of iterations that have been + completed when this exception was raised. + + """ + + def __init__(self, num_iterations, *args, **kw): + msg = f"power iteration failed to converge within {num_iterations} iterations" + exception_message = msg + superinit = super().__init__ + superinit(self, exception_message, *args, **kw) diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/__init__.py b/.venv/lib/python3.12/site-packages/networkx/generators/__init__.py new file mode 100644 index 0000000..6ec027c --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/generators/__init__.py @@ -0,0 +1,34 @@ +""" +A package for generating various graphs in networkx. + +""" + +from networkx.generators.atlas import * +from networkx.generators.classic import * +from networkx.generators.cographs import * +from networkx.generators.community import * +from networkx.generators.degree_seq import * +from networkx.generators.directed import * +from networkx.generators.duplication import * +from networkx.generators.ego import * +from networkx.generators.expanders import * +from networkx.generators.geometric import * +from networkx.generators.harary_graph import * +from networkx.generators.internet_as_graphs import * +from networkx.generators.intersection import * +from networkx.generators.interval_graph import * +from networkx.generators.joint_degree_seq import * +from networkx.generators.lattice import * +from networkx.generators.line import * +from networkx.generators.mycielski import * +from networkx.generators.nonisomorphic_trees import * +from networkx.generators.random_clustered import * +from networkx.generators.random_graphs import * +from networkx.generators.small import * +from networkx.generators.social import * +from networkx.generators.spectral_graph_forge import * +from networkx.generators.stochastic import * +from networkx.generators.sudoku import * +from networkx.generators.time_series import * +from networkx.generators.trees import * +from networkx.generators.triads import * diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/generators/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..9aeff5c Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/generators/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/__pycache__/atlas.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/generators/__pycache__/atlas.cpython-312.pyc new file mode 100644 index 0000000..83dbc28 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/generators/__pycache__/atlas.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/__pycache__/classic.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/generators/__pycache__/classic.cpython-312.pyc new file mode 100644 index 0000000..eda2ec4 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/generators/__pycache__/classic.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/__pycache__/cographs.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/generators/__pycache__/cographs.cpython-312.pyc new file mode 100644 index 0000000..4f53c1e Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/generators/__pycache__/cographs.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/__pycache__/community.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/generators/__pycache__/community.cpython-312.pyc new file mode 100644 index 0000000..4fade40 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/generators/__pycache__/community.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/__pycache__/degree_seq.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/generators/__pycache__/degree_seq.cpython-312.pyc new file mode 100644 index 0000000..5844311 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/generators/__pycache__/degree_seq.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/__pycache__/directed.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/generators/__pycache__/directed.cpython-312.pyc new file mode 100644 index 0000000..0333e6e Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/generators/__pycache__/directed.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/__pycache__/duplication.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/generators/__pycache__/duplication.cpython-312.pyc new file mode 100644 index 0000000..317ebcf Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/generators/__pycache__/duplication.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/__pycache__/ego.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/generators/__pycache__/ego.cpython-312.pyc new file mode 100644 index 0000000..6d14b60 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/generators/__pycache__/ego.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/__pycache__/expanders.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/generators/__pycache__/expanders.cpython-312.pyc new file mode 100644 index 0000000..0cb93cc Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/generators/__pycache__/expanders.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/__pycache__/geometric.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/generators/__pycache__/geometric.cpython-312.pyc new file mode 100644 index 0000000..10423a0 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/generators/__pycache__/geometric.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/__pycache__/harary_graph.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/generators/__pycache__/harary_graph.cpython-312.pyc new file mode 100644 index 0000000..6434fa9 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/generators/__pycache__/harary_graph.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/__pycache__/internet_as_graphs.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/generators/__pycache__/internet_as_graphs.cpython-312.pyc new file mode 100644 index 0000000..6756f3f Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/generators/__pycache__/internet_as_graphs.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/__pycache__/intersection.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/generators/__pycache__/intersection.cpython-312.pyc new file mode 100644 index 0000000..579b9ba Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/generators/__pycache__/intersection.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/__pycache__/interval_graph.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/generators/__pycache__/interval_graph.cpython-312.pyc new file mode 100644 index 0000000..63f1f14 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/generators/__pycache__/interval_graph.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/__pycache__/joint_degree_seq.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/generators/__pycache__/joint_degree_seq.cpython-312.pyc new file mode 100644 index 0000000..bcd1300 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/generators/__pycache__/joint_degree_seq.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/__pycache__/lattice.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/generators/__pycache__/lattice.cpython-312.pyc new file mode 100644 index 0000000..929aa4a Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/generators/__pycache__/lattice.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/__pycache__/line.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/generators/__pycache__/line.cpython-312.pyc new file mode 100644 index 0000000..414b254 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/generators/__pycache__/line.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/__pycache__/mycielski.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/generators/__pycache__/mycielski.cpython-312.pyc new file mode 100644 index 0000000..70967dc Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/generators/__pycache__/mycielski.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/__pycache__/nonisomorphic_trees.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/generators/__pycache__/nonisomorphic_trees.cpython-312.pyc new file mode 100644 index 0000000..b140d91 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/generators/__pycache__/nonisomorphic_trees.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/__pycache__/random_clustered.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/generators/__pycache__/random_clustered.cpython-312.pyc new file mode 100644 index 0000000..6f0332a Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/generators/__pycache__/random_clustered.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/__pycache__/random_graphs.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/generators/__pycache__/random_graphs.cpython-312.pyc new file mode 100644 index 0000000..28961e9 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/generators/__pycache__/random_graphs.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/__pycache__/small.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/generators/__pycache__/small.cpython-312.pyc new file mode 100644 index 0000000..0766378 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/generators/__pycache__/small.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/__pycache__/social.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/generators/__pycache__/social.cpython-312.pyc new file mode 100644 index 0000000..271647f Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/generators/__pycache__/social.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/__pycache__/spectral_graph_forge.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/generators/__pycache__/spectral_graph_forge.cpython-312.pyc new file mode 100644 index 0000000..c5fd47a Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/generators/__pycache__/spectral_graph_forge.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/__pycache__/stochastic.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/generators/__pycache__/stochastic.cpython-312.pyc new file mode 100644 index 0000000..6306601 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/generators/__pycache__/stochastic.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/__pycache__/sudoku.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/generators/__pycache__/sudoku.cpython-312.pyc new file mode 100644 index 0000000..cdf0c22 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/generators/__pycache__/sudoku.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/__pycache__/time_series.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/generators/__pycache__/time_series.cpython-312.pyc new file mode 100644 index 0000000..ea7e07b Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/generators/__pycache__/time_series.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/__pycache__/trees.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/generators/__pycache__/trees.cpython-312.pyc new file mode 100644 index 0000000..71cfb98 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/generators/__pycache__/trees.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/__pycache__/triads.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/generators/__pycache__/triads.cpython-312.pyc new file mode 100644 index 0000000..22d8d6f Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/generators/__pycache__/triads.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/atlas.dat.gz b/.venv/lib/python3.12/site-packages/networkx/generators/atlas.dat.gz new file mode 100644 index 0000000..b0a9870 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/generators/atlas.dat.gz differ diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/atlas.py b/.venv/lib/python3.12/site-packages/networkx/generators/atlas.py new file mode 100644 index 0000000..000d478 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/generators/atlas.py @@ -0,0 +1,227 @@ +""" +Generators for the small graph atlas. +""" + +import gzip +import importlib.resources +from itertools import islice + +import networkx as nx + +__all__ = ["graph_atlas", "graph_atlas_g"] + +#: The total number of graphs in the atlas. +#: +#: The graphs are labeled starting from 0 and extending to (but not +#: including) this number. +NUM_GRAPHS = 1253 + +#: The path to the data file containing the graph edge lists. +#: +#: This is the absolute path of the gzipped text file containing the +#: edge list for each graph in the atlas. The file contains one entry +#: per graph in the atlas, in sequential order, starting from graph +#: number 0 and extending through graph number 1252 (see +#: :data:`NUM_GRAPHS`). Each entry looks like +#: +#: .. sourcecode:: text +#: +#: GRAPH 6 +#: NODES 3 +#: 0 1 +#: 0 2 +#: +#: where the first two lines are the graph's index in the atlas and the +#: number of nodes in the graph, and the remaining lines are the edge +#: list. +#: +#: This file was generated from a Python list of graphs via code like +#: the following:: +#: +#: import gzip +#: from networkx.generators.atlas import graph_atlas_g +#: from networkx.readwrite.edgelist import write_edgelist +#: +#: with gzip.open('atlas.dat.gz', 'wb') as f: +#: for i, G in enumerate(graph_atlas_g()): +#: f.write(bytes(f'GRAPH {i}\n', encoding='utf-8')) +#: f.write(bytes(f'NODES {len(G)}\n', encoding='utf-8')) +#: write_edgelist(G, f, data=False) +#: + +# Path to the atlas file +ATLAS_FILE = importlib.resources.files("networkx.generators") / "atlas.dat.gz" + + +def _generate_graphs(): + """Sequentially read the file containing the edge list data for the + graphs in the atlas and generate the graphs one at a time. + + This function reads the file given in :data:`.ATLAS_FILE`. + + """ + with gzip.open(ATLAS_FILE, "rb") as f: + line = f.readline() + while line and line.startswith(b"GRAPH"): + # The first two lines of each entry tell us the index of the + # graph in the list and the number of nodes in the graph. + # They look like this: + # + # GRAPH 3 + # NODES 2 + # + graph_index = int(line[6:].rstrip()) + line = f.readline() + num_nodes = int(line[6:].rstrip()) + # The remaining lines contain the edge list, until the next + # GRAPH line (or until the end of the file). + edgelist = [] + line = f.readline() + while line and not line.startswith(b"GRAPH"): + edgelist.append(line.rstrip()) + line = f.readline() + G = nx.Graph() + G.name = f"G{graph_index}" + G.add_nodes_from(range(num_nodes)) + G.add_edges_from(tuple(map(int, e.split())) for e in edgelist) + yield G + + +@nx._dispatchable(graphs=None, returns_graph=True) +def graph_atlas(i): + """Returns graph number `i` from the Graph Atlas. + + For more information, see :func:`.graph_atlas_g`. + + Parameters + ---------- + i : int + The index of the graph from the atlas to get. The graph at index + 0 is assumed to be the null graph. + + Returns + ------- + list + A list of :class:`~networkx.Graph` objects, the one at index *i* + corresponding to the graph *i* in the Graph Atlas. + + See also + -------- + graph_atlas_g + + Notes + ----- + The time required by this function increases linearly with the + argument `i`, since it reads a large file sequentially in order to + generate the graph [1]_. + + References + ---------- + .. [1] Ronald C. Read and Robin J. Wilson, *An Atlas of Graphs*. + Oxford University Press, 1998. + + """ + if not (0 <= i < NUM_GRAPHS): + raise ValueError(f"index must be between 0 and {NUM_GRAPHS}") + return next(islice(_generate_graphs(), i, None)) + + +@nx._dispatchable(graphs=None, returns_graph=True) +def graph_atlas_g(): + """Returns the list of all graphs with up to seven nodes named in the + Graph Atlas. + + The graphs are listed in increasing order by + + 1. number of nodes, + 2. number of edges, + 3. degree sequence (for example 111223 < 112222), + 4. number of automorphisms, + + in that order, with three exceptions as described in the *Notes* + section below. This causes the list to correspond with the index of + the graphs in the Graph Atlas [atlas]_, with the first graph, + ``G[0]``, being the null graph. + + Returns + ------- + list + A list of :class:`~networkx.Graph` objects, the one at index *i* + corresponding to the graph *i* in the Graph Atlas. + + Examples + -------- + >>> from pprint import pprint + >>> atlas = nx.graph_atlas_g() + + There are 1253 graphs in the atlas + + >>> len(atlas) + 1253 + + The number of graphs with *n* nodes, where *n* ranges from 0 to 7: + + >>> from collections import Counter + >>> num_nodes_per_graph = [len(G) for G in atlas] + >>> Counter(num_nodes_per_graph) + Counter({7: 1044, 6: 156, 5: 34, 4: 11, 3: 4, 2: 2, 0: 1, 1: 1}) + + Since the atlas is ordered by the number of nodes in the graph, all graphs + with *n* nodes can be obtained by slicing the atlas. For example, all + graphs with 5 nodes: + + >>> G5_list = atlas[19:53] + >>> all(len(G) == 5 for G in G5_list) + True + + Or all graphs with at least 3 nodes but fewer than 7 nodes: + + >>> G3_6_list = atlas[4:209] + + More generally, the indices that partition the atlas by the number of nodes + per graph: + + >>> import itertools + >>> partition_indices = [0] + list( + ... itertools.accumulate(Counter(num_nodes_per_graph).values()) # cumsum + ... ) + >>> partition_indices + [0, 1, 2, 4, 8, 19, 53, 209, 1253] + >>> partition_mapping = dict(enumerate(itertools.pairwise(partition_indices))) + >>> pprint(partition_mapping) + {0: (0, 1), + 1: (1, 2), + 2: (2, 4), + 3: (4, 8), + 4: (8, 19), + 5: (19, 53), + 6: (53, 209), + 7: (209, 1253)} + + See also + -------- + graph_atlas + + Notes + ----- + This function may be expensive in both time and space, since it + reads a large file sequentially in order to populate the list. + + Although the NetworkX atlas functions match the order of graphs + given in the "Atlas of Graphs" book, there are (at least) three + errors in the ordering described in the book. The following three + pairs of nodes violate the lexicographically nondecreasing sorted + degree sequence rule: + + - graphs 55 and 56 with degree sequences 001111 and 000112, + - graphs 1007 and 1008 with degree sequences 3333444 and 3333336, + - graphs 1012 and 1213 with degree sequences 1244555 and 1244456. + + References + ---------- + .. [atlas] Ronald C. Read and Robin J. Wilson, + *An Atlas of Graphs*. + Oxford University Press, 1998. + + """ + return list(_generate_graphs()) diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/classic.py b/.venv/lib/python3.12/site-packages/networkx/generators/classic.py new file mode 100644 index 0000000..a461e7b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/generators/classic.py @@ -0,0 +1,1068 @@ +"""Generators for some classic graphs. + +The typical graph builder function is called as follows: + +>>> G = nx.complete_graph(100) + +returning the complete graph on n nodes labeled 0, .., 99 +as a simple graph. Except for `empty_graph`, all the functions +in this module return a Graph class (i.e. a simple, undirected graph). + +""" + +import itertools +import numbers + +import networkx as nx +from networkx.classes import Graph +from networkx.exception import NetworkXError +from networkx.utils import nodes_or_number, pairwise + +__all__ = [ + "balanced_tree", + "barbell_graph", + "binomial_tree", + "complete_graph", + "complete_multipartite_graph", + "circular_ladder_graph", + "circulant_graph", + "cycle_graph", + "dorogovtsev_goltsev_mendes_graph", + "empty_graph", + "full_rary_tree", + "kneser_graph", + "ladder_graph", + "lollipop_graph", + "null_graph", + "path_graph", + "star_graph", + "tadpole_graph", + "trivial_graph", + "turan_graph", + "wheel_graph", +] + + +# ------------------------------------------------------------------- +# Some Classic Graphs +# ------------------------------------------------------------------- + + +def _tree_edges(n, r): + if n == 0: + return + # helper function for trees + # yields edges in rooted tree at 0 with n nodes and branching ratio r + nodes = iter(range(n)) + parents = [next(nodes)] # stack of max length r + while parents: + source = parents.pop(0) + for i in range(r): + try: + target = next(nodes) + parents.append(target) + yield source, target + except StopIteration: + break + + +@nx._dispatchable(graphs=None, returns_graph=True) +def full_rary_tree(r, n, create_using=None): + """Creates a full r-ary tree of `n` nodes. + + Sometimes called a k-ary, n-ary, or m-ary tree. + "... all non-leaf nodes have exactly r children and all levels + are full except for some rightmost position of the bottom level + (if a leaf at the bottom level is missing, then so are all of the + leaves to its right." [1]_ + + .. plot:: + + >>> nx.draw(nx.full_rary_tree(2, 10)) + + Parameters + ---------- + r : int + branching factor of the tree + n : int + Number of nodes in the tree + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + Returns + ------- + G : networkx Graph + An r-ary tree with n nodes + + References + ---------- + .. [1] An introduction to data structures and algorithms, + James Andrew Storer, Birkhauser Boston 2001, (page 225). + """ + G = empty_graph(n, create_using) + G.add_edges_from(_tree_edges(n, r)) + return G + + +@nx._dispatchable(graphs=None, returns_graph=True) +def kneser_graph(n, k): + """Returns the Kneser Graph with parameters `n` and `k`. + + The Kneser Graph has nodes that are k-tuples (subsets) of the integers + between 0 and ``n-1``. Nodes are adjacent if their corresponding sets are disjoint. + + Parameters + ---------- + n: int + Number of integers from which to make node subsets. + Subsets are drawn from ``set(range(n))``. + k: int + Size of the subsets. + + Returns + ------- + G : NetworkX Graph + + Examples + -------- + >>> G = nx.kneser_graph(5, 2) + >>> G.number_of_nodes() + 10 + >>> G.number_of_edges() + 15 + >>> nx.is_isomorphic(G, nx.petersen_graph()) + True + """ + if n <= 0: + raise NetworkXError("n should be greater than zero") + if k <= 0 or k > n: + raise NetworkXError("k should be greater than zero and smaller than n") + + G = nx.Graph() + # Create all k-subsets of [0, 1, ..., n-1] + subsets = list(itertools.combinations(range(n), k)) + + if 2 * k > n: + G.add_nodes_from(subsets) + + universe = set(range(n)) + comb = itertools.combinations # only to make it all fit on one line + G.add_edges_from((s, t) for s in subsets for t in comb(universe - set(s), k)) + return G + + +@nx._dispatchable(graphs=None, returns_graph=True) +def balanced_tree(r, h, create_using=None): + """Returns the perfectly balanced `r`-ary tree of height `h`. + + .. plot:: + + >>> nx.draw(nx.balanced_tree(2, 3)) + + Parameters + ---------- + r : int + Branching factor of the tree; each node will have `r` + children. + + h : int + Height of the tree. + + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + Returns + ------- + G : NetworkX graph + A balanced `r`-ary tree of height `h`. + + Notes + ----- + This is the rooted tree where all leaves are at distance `h` from + the root. The root has degree `r` and all other internal nodes + have degree `r + 1`. + + Node labels are integers, starting from zero. + + A balanced tree is also known as a *complete r-ary tree*. + + """ + # The number of nodes in the balanced tree is `1 + r + ... + r^h`, + # which is computed by using the closed-form formula for a geometric + # sum with ratio `r`. In the special case that `r` is 1, the number + # of nodes is simply `h + 1` (since the tree is actually a path + # graph). + if r == 1: + n = h + 1 + else: + # This must be an integer if both `r` and `h` are integers. If + # they are not, we force integer division anyway. + n = (1 - r ** (h + 1)) // (1 - r) + return full_rary_tree(r, n, create_using=create_using) + + +@nx._dispatchable(graphs=None, returns_graph=True) +def barbell_graph(m1, m2, create_using=None): + """Returns the Barbell Graph: two complete graphs connected by a path. + + .. plot:: + + >>> nx.draw(nx.barbell_graph(4, 2)) + + Parameters + ---------- + m1 : int + Size of the left and right barbells, must be greater than 2. + + m2 : int + Length of the path connecting the barbells. + + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + Only undirected Graphs are supported. + + Returns + ------- + G : NetworkX graph + A barbell graph. + + Notes + ----- + + + Two identical complete graphs $K_{m1}$ form the left and right bells, + and are connected by a path $P_{m2}$. + + The `2*m1+m2` nodes are numbered + `0, ..., m1-1` for the left barbell, + `m1, ..., m1+m2-1` for the path, + and `m1+m2, ..., 2*m1+m2-1` for the right barbell. + + The 3 subgraphs are joined via the edges `(m1-1, m1)` and + `(m1+m2-1, m1+m2)`. If `m2=0`, this is merely two complete + graphs joined together. + + This graph is an extremal example in David Aldous + and Jim Fill's e-text on Random Walks on Graphs. + + """ + if m1 < 2: + raise NetworkXError("Invalid graph description, m1 should be >=2") + if m2 < 0: + raise NetworkXError("Invalid graph description, m2 should be >=0") + + # left barbell + G = complete_graph(m1, create_using) + if G.is_directed(): + raise NetworkXError("Directed Graph not supported") + + # connecting path + G.add_nodes_from(range(m1, m1 + m2 - 1)) + if m2 > 1: + G.add_edges_from(pairwise(range(m1, m1 + m2))) + + # right barbell + G.add_edges_from( + (u, v) for u in range(m1 + m2, 2 * m1 + m2) for v in range(u + 1, 2 * m1 + m2) + ) + + # connect it up + G.add_edge(m1 - 1, m1) + if m2 > 0: + G.add_edge(m1 + m2 - 1, m1 + m2) + + return G + + +@nx._dispatchable(graphs=None, returns_graph=True) +def binomial_tree(n, create_using=None): + """Returns the Binomial Tree of order n. + + The binomial tree of order 0 consists of a single node. A binomial tree of order k + is defined recursively by linking two binomial trees of order k-1: the root of one is + the leftmost child of the root of the other. + + .. plot:: + + >>> nx.draw(nx.binomial_tree(3)) + + Parameters + ---------- + n : int + Order of the binomial tree. + + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + Returns + ------- + G : NetworkX graph + A binomial tree of $2^n$ nodes and $2^n - 1$ edges. + + """ + G = nx.empty_graph(1, create_using) + + N = 1 + for i in range(n): + # Use G.edges() to ensure 2-tuples. G.edges is 3-tuple for MultiGraph + edges = [(u + N, v + N) for (u, v) in G.edges()] + G.add_edges_from(edges) + G.add_edge(0, N) + N *= 2 + return G + + +@nx._dispatchable(graphs=None, returns_graph=True) +@nodes_or_number(0) +def complete_graph(n, create_using=None): + """Return the complete graph `K_n` with n nodes. + + A complete graph on `n` nodes means that all pairs + of distinct nodes have an edge connecting them. + + .. plot:: + + >>> nx.draw(nx.complete_graph(5)) + + Parameters + ---------- + n : int or iterable container of nodes + If n is an integer, nodes are from range(n). + If n is a container of nodes, those nodes appear in the graph. + Warning: n is not checked for duplicates and if present the + resulting graph may not be as desired. Make sure you have no duplicates. + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + Examples + -------- + >>> G = nx.complete_graph(9) + >>> len(G) + 9 + >>> G.size() + 36 + >>> G = nx.complete_graph(range(11, 14)) + >>> list(G.nodes()) + [11, 12, 13] + >>> G = nx.complete_graph(4, nx.DiGraph()) + >>> G.is_directed() + True + + """ + _, nodes = n + G = empty_graph(nodes, create_using) + if len(nodes) > 1: + if G.is_directed(): + edges = itertools.permutations(nodes, 2) + else: + edges = itertools.combinations(nodes, 2) + G.add_edges_from(edges) + return G + + +@nx._dispatchable(graphs=None, returns_graph=True) +def circular_ladder_graph(n, create_using=None): + """Returns the circular ladder graph $CL_n$ of length n. + + $CL_n$ consists of two concentric n-cycles in which + each of the n pairs of concentric nodes are joined by an edge. + + Node labels are the integers 0 to n-1 + + .. plot:: + + >>> nx.draw(nx.circular_ladder_graph(5)) + + """ + G = ladder_graph(n, create_using) + G.add_edge(0, n - 1) + G.add_edge(n, 2 * n - 1) + return G + + +@nx._dispatchable(graphs=None, returns_graph=True) +def circulant_graph(n, offsets, create_using=None): + r"""Returns the circulant graph $Ci_n(x_1, x_2, ..., x_m)$ with $n$ nodes. + + The circulant graph $Ci_n(x_1, ..., x_m)$ consists of $n$ nodes $0, ..., n-1$ + such that node $i$ is connected to nodes $(i + x) \mod n$ and $(i - x) \mod n$ + for all $x$ in $x_1, ..., x_m$. Thus $Ci_n(1)$ is a cycle graph. + + .. plot:: + + >>> nx.draw(nx.circulant_graph(10, [1])) + + Parameters + ---------- + n : integer + The number of nodes in the graph. + offsets : list of integers + A list of node offsets, $x_1$ up to $x_m$, as described above. + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + Returns + ------- + NetworkX Graph of type create_using + + Examples + -------- + Many well-known graph families are subfamilies of the circulant graphs; + for example, to create the cycle graph on n points, we connect every + node to nodes on either side (with offset plus or minus one). For n = 10, + + >>> G = nx.circulant_graph(10, [1]) + >>> edges = [ + ... (0, 9), + ... (0, 1), + ... (1, 2), + ... (2, 3), + ... (3, 4), + ... (4, 5), + ... (5, 6), + ... (6, 7), + ... (7, 8), + ... (8, 9), + ... ] + >>> sorted(edges) == sorted(G.edges()) + True + + Similarly, we can create the complete graph + on 5 points with the set of offsets [1, 2]: + + >>> G = nx.circulant_graph(5, [1, 2]) + >>> edges = [ + ... (0, 1), + ... (0, 2), + ... (0, 3), + ... (0, 4), + ... (1, 2), + ... (1, 3), + ... (1, 4), + ... (2, 3), + ... (2, 4), + ... (3, 4), + ... ] + >>> sorted(edges) == sorted(G.edges()) + True + + """ + G = empty_graph(n, create_using) + for i in range(n): + for j in offsets: + G.add_edge(i, (i - j) % n) + G.add_edge(i, (i + j) % n) + return G + + +@nx._dispatchable(graphs=None, returns_graph=True) +@nodes_or_number(0) +def cycle_graph(n, create_using=None): + """Returns the cycle graph $C_n$ of cyclically connected nodes. + + $C_n$ is a path with its two end-nodes connected. + + .. plot:: + + >>> nx.draw(nx.cycle_graph(5)) + + Parameters + ---------- + n : int or iterable container of nodes + If n is an integer, nodes are from `range(n)`. + If n is a container of nodes, those nodes appear in the graph. + Warning: n is not checked for duplicates and if present the + resulting graph may not be as desired. Make sure you have no duplicates. + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + Notes + ----- + If create_using is directed, the direction is in increasing order. + + """ + _, nodes = n + G = empty_graph(nodes, create_using) + G.add_edges_from(pairwise(nodes, cyclic=True)) + return G + + +@nx._dispatchable(graphs=None, returns_graph=True) +def dorogovtsev_goltsev_mendes_graph(n, create_using=None): + """Returns the hierarchically constructed Dorogovtsev--Goltsev--Mendes graph. + + The Dorogovtsev--Goltsev--Mendes [1]_ procedure deterministically produces a + scale-free graph with ``3/2 * (3**(n-1) + 1)`` nodes + and ``3**n`` edges for a given `n`. + + Note that `n` denotes the number of times the state transition is applied, + starting from the base graph with ``n = 0`` (no transitions), as in [2]_. + This is different from the parameter ``t = n - 1`` in [1]_. + + .. plot:: + + >>> nx.draw(nx.dorogovtsev_goltsev_mendes_graph(3)) + + Parameters + ---------- + n : integer + The generation number. + + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. Directed graphs and multigraphs are not supported. + + Returns + ------- + G : NetworkX `Graph` + + Raises + ------ + NetworkXError + If `n` is less than zero. + + If `create_using` is a directed graph or multigraph. + + Examples + -------- + >>> G = nx.dorogovtsev_goltsev_mendes_graph(3) + >>> G.number_of_nodes() + 15 + >>> G.number_of_edges() + 27 + >>> nx.is_planar(G) + True + + References + ---------- + .. [1] S. N. Dorogovtsev, A. V. Goltsev and J. F. F. Mendes, + "Pseudofractal scale-free web", Physical Review E 65, 066122, 2002. + https://arxiv.org/pdf/cond-mat/0112143.pdf + .. [2] Weisstein, Eric W. "Dorogovtsev--Goltsev--Mendes Graph". + From MathWorld--A Wolfram Web Resource. + https://mathworld.wolfram.com/Dorogovtsev-Goltsev-MendesGraph.html + """ + if n < 0: + raise NetworkXError("n must be greater than or equal to 0") + + G = empty_graph(0, create_using) + if G.is_directed(): + raise NetworkXError("directed graph not supported") + if G.is_multigraph(): + raise NetworkXError("multigraph not supported") + + G.add_edge(0, 1) + new_node = 2 # next node to be added + for _ in range(n): # iterate over number of generations. + new_edges = [] + for u, v in G.edges(): + new_edges.append((u, new_node)) + new_edges.append((v, new_node)) + new_node += 1 + + G.add_edges_from(new_edges) + return G + + +@nx._dispatchable(graphs=None, returns_graph=True) +@nodes_or_number(0) +def empty_graph(n=0, create_using=None, default=Graph): + """Returns the empty graph with n nodes and zero edges. + + .. plot:: + + >>> nx.draw(nx.empty_graph(5)) + + Parameters + ---------- + n : int or iterable container of nodes (default = 0) + If n is an integer, nodes are from `range(n)`. + If n is a container of nodes, those nodes appear in the graph. + create_using : Graph Instance, Constructor or None + Indicator of type of graph to return. + If a Graph-type instance, then clear and use it. + If None, use the `default` constructor. + If a constructor, call it to create an empty graph. + default : Graph constructor (optional, default = nx.Graph) + The constructor to use if create_using is None. + If None, then nx.Graph is used. + This is used when passing an unknown `create_using` value + through your home-grown function to `empty_graph` and + you want a default constructor other than nx.Graph. + + Examples + -------- + >>> G = nx.empty_graph(10) + >>> G.number_of_nodes() + 10 + >>> G.number_of_edges() + 0 + >>> G = nx.empty_graph("ABC") + >>> G.number_of_nodes() + 3 + >>> sorted(G) + ['A', 'B', 'C'] + + Notes + ----- + The variable create_using should be a Graph Constructor or a + "graph"-like object. Constructors, e.g. `nx.Graph` or `nx.MultiGraph` + will be used to create the returned graph. "graph"-like objects + will be cleared (nodes and edges will be removed) and refitted as + an empty "graph" with nodes specified in n. This capability + is useful for specifying the class-nature of the resulting empty + "graph" (i.e. Graph, DiGraph, MyWeirdGraphClass, etc.). + + The variable create_using has three main uses: + Firstly, the variable create_using can be used to create an + empty digraph, multigraph, etc. For example, + + >>> n = 10 + >>> G = nx.empty_graph(n, create_using=nx.DiGraph) + + will create an empty digraph on n nodes. + + Secondly, one can pass an existing graph (digraph, multigraph, + etc.) via create_using. For example, if G is an existing graph + (resp. digraph, multigraph, etc.), then empty_graph(n, create_using=G) + will empty G (i.e. delete all nodes and edges using G.clear()) + and then add n nodes and zero edges, and return the modified graph. + + Thirdly, when constructing your home-grown graph creation function + you can use empty_graph to construct the graph by passing a user + defined create_using to empty_graph. In this case, if you want the + default constructor to be other than nx.Graph, specify `default`. + + >>> def mygraph(n, create_using=None): + ... G = nx.empty_graph(n, create_using, nx.MultiGraph) + ... G.add_edges_from([(0, 1), (0, 1)]) + ... return G + >>> G = mygraph(3) + >>> G.is_multigraph() + True + >>> G = mygraph(3, nx.Graph) + >>> G.is_multigraph() + False + + See also create_empty_copy(G). + + """ + if create_using is None: + G = default() + elif isinstance(create_using, type): + G = create_using() + elif not hasattr(create_using, "adj"): + raise TypeError("create_using is not a valid NetworkX graph type or instance") + else: + # create_using is a NetworkX style Graph + create_using.clear() + G = create_using + + _, nodes = n + G.add_nodes_from(nodes) + return G + + +@nx._dispatchable(graphs=None, returns_graph=True) +def ladder_graph(n, create_using=None): + """Returns the Ladder graph of length n. + + This is two paths of n nodes, with + each pair connected by a single edge. + + Node labels are the integers 0 to 2*n - 1. + + .. plot:: + + >>> nx.draw(nx.ladder_graph(5)) + + """ + G = empty_graph(2 * n, create_using) + if G.is_directed(): + raise NetworkXError("Directed Graph not supported") + G.add_edges_from(pairwise(range(n))) + G.add_edges_from(pairwise(range(n, 2 * n))) + G.add_edges_from((v, v + n) for v in range(n)) + return G + + +@nx._dispatchable(graphs=None, returns_graph=True) +@nodes_or_number([0, 1]) +def lollipop_graph(m, n, create_using=None): + """Returns the Lollipop Graph; ``K_m`` connected to ``P_n``. + + This is the Barbell Graph without the right barbell. + + .. plot:: + + >>> nx.draw(nx.lollipop_graph(3, 4)) + + Parameters + ---------- + m, n : int or iterable container of nodes + If an integer, nodes are from ``range(m)`` and ``range(m, m+n)``. + If a container of nodes, those nodes appear in the graph. + Warning: `m` and `n` are not checked for duplicates and if present the + resulting graph may not be as desired. Make sure you have no duplicates. + + The nodes for `m` appear in the complete graph $K_m$ and the nodes + for `n` appear in the path $P_n$ + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + Returns + ------- + Networkx graph + A complete graph with `m` nodes connected to a path of length `n`. + + Notes + ----- + The 2 subgraphs are joined via an edge ``(m-1, m)``. + If ``n=0``, this is merely a complete graph. + + (This graph is an extremal example in David Aldous and Jim + Fill's etext on Random Walks on Graphs.) + + """ + m, m_nodes = m + M = len(m_nodes) + if M < 2: + raise NetworkXError("Invalid description: m should indicate at least 2 nodes") + + n, n_nodes = n + if isinstance(m, numbers.Integral) and isinstance(n, numbers.Integral): + n_nodes = list(range(M, M + n)) + N = len(n_nodes) + + # the ball + G = complete_graph(m_nodes, create_using) + if G.is_directed(): + raise NetworkXError("Directed Graph not supported") + + # the stick + G.add_nodes_from(n_nodes) + if N > 1: + G.add_edges_from(pairwise(n_nodes)) + + if len(G) != M + N: + raise NetworkXError("Nodes must be distinct in containers m and n") + + # connect ball to stick + if M > 0 and N > 0: + G.add_edge(m_nodes[-1], n_nodes[0]) + return G + + +@nx._dispatchable(graphs=None, returns_graph=True) +def null_graph(create_using=None): + """Returns the Null graph with no nodes or edges. + + See empty_graph for the use of create_using. + + """ + G = empty_graph(0, create_using) + return G + + +@nx._dispatchable(graphs=None, returns_graph=True) +@nodes_or_number(0) +def path_graph(n, create_using=None): + """Returns the Path graph `P_n` of linearly connected nodes. + + .. plot:: + + >>> nx.draw(nx.path_graph(5)) + + Parameters + ---------- + n : int or iterable + If an integer, nodes are 0 to n - 1. + If an iterable of nodes, in the order they appear in the path. + Warning: n is not checked for duplicates and if present the + resulting graph may not be as desired. Make sure you have no duplicates. + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + """ + _, nodes = n + G = empty_graph(nodes, create_using) + G.add_edges_from(pairwise(nodes)) + return G + + +@nx._dispatchable(graphs=None, returns_graph=True) +@nodes_or_number(0) +def star_graph(n, create_using=None): + """Return the star graph + + The star graph consists of one center node connected to n outer nodes. + + .. plot:: + + >>> nx.draw(nx.star_graph(6)) + + Parameters + ---------- + n : int or iterable + If an integer, node labels are 0 to n with center 0. + If an iterable of nodes, the center is the first. + Warning: n is not checked for duplicates and if present the + resulting graph may not be as desired. Make sure you have no duplicates. + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + Notes + ----- + The graph has n+1 nodes for integer n. + So star_graph(3) is the same as star_graph(range(4)). + """ + n, nodes = n + if isinstance(n, numbers.Integral): + nodes.append(int(n)) # there should be n+1 nodes + G = empty_graph(nodes, create_using) + if G.is_directed(): + raise NetworkXError("Directed Graph not supported") + + if len(nodes) > 1: + hub, *spokes = nodes + G.add_edges_from((hub, node) for node in spokes) + return G + + +@nx._dispatchable(graphs=None, returns_graph=True) +@nodes_or_number([0, 1]) +def tadpole_graph(m, n, create_using=None): + """Returns the (m,n)-tadpole graph; ``C_m`` connected to ``P_n``. + + This graph on m+n nodes connects a cycle of size `m` to a path of length `n`. + It looks like a tadpole. It is also called a kite graph or a dragon graph. + + .. plot:: + + >>> nx.draw(nx.tadpole_graph(3, 5)) + + Parameters + ---------- + m, n : int or iterable container of nodes + If an integer, nodes are from ``range(m)`` and ``range(m,m+n)``. + If a container of nodes, those nodes appear in the graph. + Warning: `m` and `n` are not checked for duplicates and if present the + resulting graph may not be as desired. + + The nodes for `m` appear in the cycle graph $C_m$ and the nodes + for `n` appear in the path $P_n$. + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + Returns + ------- + Networkx graph + A cycle of size `m` connected to a path of length `n`. + + Raises + ------ + NetworkXError + If ``m < 2``. The tadpole graph is undefined for ``m<2``. + + Notes + ----- + The 2 subgraphs are joined via an edge ``(m-1, m)``. + If ``n=0``, this is a cycle graph. + `m` and/or `n` can be a container of nodes instead of an integer. + + """ + m, m_nodes = m + M = len(m_nodes) + if M < 2: + raise NetworkXError("Invalid description: m should indicate at least 2 nodes") + + n, n_nodes = n + if isinstance(m, numbers.Integral) and isinstance(n, numbers.Integral): + n_nodes = list(range(M, M + n)) + + # the circle + G = cycle_graph(m_nodes, create_using) + if G.is_directed(): + raise NetworkXError("Directed Graph not supported") + + # the stick + nx.add_path(G, [m_nodes[-1]] + list(n_nodes)) + + return G + + +@nx._dispatchable(graphs=None, returns_graph=True) +def trivial_graph(create_using=None): + """Return the Trivial graph with one node (with label 0) and no edges. + + .. plot:: + + >>> nx.draw(nx.trivial_graph(), with_labels=True) + + """ + G = empty_graph(1, create_using) + return G + + +@nx._dispatchable(graphs=None, returns_graph=True) +def turan_graph(n, r): + r"""Return the Turan Graph + + The Turan Graph is a complete multipartite graph on $n$ nodes + with $r$ disjoint subsets. That is, edges connect each node to + every node not in its subset. + + Given $n$ and $r$, we create a complete multipartite graph with + $r-(n \mod r)$ partitions of size $n/r$, rounded down, and + $n \mod r$ partitions of size $n/r+1$, rounded down. + + .. plot:: + + >>> nx.draw(nx.turan_graph(6, 2)) + + Parameters + ---------- + n : int + The number of nodes. + r : int + The number of partitions. + Must be less than or equal to n. + + Notes + ----- + Must satisfy $1 <= r <= n$. + The graph has $(r-1)(n^2)/(2r)$ edges, rounded down. + """ + + if not 1 <= r <= n: + raise NetworkXError("Must satisfy 1 <= r <= n") + + partitions = [n // r] * (r - (n % r)) + [n // r + 1] * (n % r) + G = complete_multipartite_graph(*partitions) + return G + + +@nx._dispatchable(graphs=None, returns_graph=True) +@nodes_or_number(0) +def wheel_graph(n, create_using=None): + """Return the wheel graph + + The wheel graph consists of a hub node connected to a cycle of (n-1) nodes. + + .. plot:: + + >>> nx.draw(nx.wheel_graph(5)) + + Parameters + ---------- + n : int or iterable + If an integer, node labels are 0 to n with center 0. + If an iterable of nodes, the center is the first. + Warning: n is not checked for duplicates and if present the + resulting graph may not be as desired. Make sure you have no duplicates. + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + Node labels are the integers 0 to n - 1. + """ + _, nodes = n + G = empty_graph(nodes, create_using) + if G.is_directed(): + raise NetworkXError("Directed Graph not supported") + + if len(nodes) > 1: + hub, *rim = nodes + G.add_edges_from((hub, node) for node in rim) + if len(rim) > 1: + G.add_edges_from(pairwise(rim, cyclic=True)) + return G + + +@nx._dispatchable(graphs=None, returns_graph=True) +def complete_multipartite_graph(*subset_sizes): + """Returns the complete multipartite graph with the specified subset sizes. + + .. plot:: + + >>> nx.draw(nx.complete_multipartite_graph(1, 2, 3)) + + Parameters + ---------- + subset_sizes : tuple of integers or tuple of node iterables + The arguments can either all be integer number of nodes or they + can all be iterables of nodes. If integers, they represent the + number of nodes in each subset of the multipartite graph. + If iterables, each is used to create the nodes for that subset. + The length of subset_sizes is the number of subsets. + + Returns + ------- + G : NetworkX Graph + Returns the complete multipartite graph with the specified subsets. + + For each node, the node attribute 'subset' is an integer + indicating which subset contains the node. + + Examples + -------- + Creating a complete tripartite graph, with subsets of one, two, and three + nodes, respectively. + + >>> G = nx.complete_multipartite_graph(1, 2, 3) + >>> [G.nodes[u]["subset"] for u in G] + [0, 1, 1, 2, 2, 2] + >>> list(G.edges(0)) + [(0, 1), (0, 2), (0, 3), (0, 4), (0, 5)] + >>> list(G.edges(2)) + [(2, 0), (2, 3), (2, 4), (2, 5)] + >>> list(G.edges(4)) + [(4, 0), (4, 1), (4, 2)] + + >>> G = nx.complete_multipartite_graph("a", "bc", "def") + >>> [G.nodes[u]["subset"] for u in sorted(G)] + [0, 1, 1, 2, 2, 2] + + Notes + ----- + This function generalizes several other graph builder functions. + + - If no subset sizes are given, this returns the null graph. + - If a single subset size `n` is given, this returns the empty graph on + `n` nodes. + - If two subset sizes `m` and `n` are given, this returns the complete + bipartite graph on `m + n` nodes. + - If subset sizes `1` and `n` are given, this returns the star graph on + `n + 1` nodes. + + See also + -------- + complete_bipartite_graph + """ + # The complete multipartite graph is an undirected simple graph. + G = Graph() + + if len(subset_sizes) == 0: + return G + + # set up subsets of nodes + try: + extents = pairwise(itertools.accumulate((0,) + subset_sizes)) + subsets = [range(start, end) for start, end in extents] + except TypeError: + subsets = subset_sizes + else: + if any(size < 0 for size in subset_sizes): + raise NetworkXError(f"Negative number of nodes not valid: {subset_sizes}") + + # add nodes with subset attribute + # while checking that ints are not mixed with iterables + try: + for i, subset in enumerate(subsets): + G.add_nodes_from(subset, subset=i) + except TypeError as err: + raise NetworkXError("Arguments must be all ints or all iterables") from err + + # Across subsets, all nodes should be adjacent. + # We can use itertools.combinations() because undirected. + for subset1, subset2 in itertools.combinations(subsets, 2): + G.add_edges_from(itertools.product(subset1, subset2)) + return G diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/cographs.py b/.venv/lib/python3.12/site-packages/networkx/generators/cographs.py new file mode 100644 index 0000000..6635b32 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/generators/cographs.py @@ -0,0 +1,68 @@ +r"""Generators for cographs + +A cograph is a graph containing no path on four vertices. +Cographs or $P_4$-free graphs can be obtained from a single vertex +by disjoint union and complementation operations. + +References +---------- +.. [0] D.G. Corneil, H. Lerchs, L.Stewart Burlingham, + "Complement reducible graphs", + Discrete Applied Mathematics, Volume 3, Issue 3, 1981, Pages 163-174, + ISSN 0166-218X. +""" + +import networkx as nx +from networkx.utils import py_random_state + +__all__ = ["random_cograph"] + + +@py_random_state(1) +@nx._dispatchable(graphs=None, returns_graph=True) +def random_cograph(n, seed=None): + r"""Returns a random cograph with $2 ^ n$ nodes. + + A cograph is a graph containing no path on four vertices. + Cographs or $P_4$-free graphs can be obtained from a single vertex + by disjoint union and complementation operations. + + This generator starts off from a single vertex and performs disjoint + union and full join operations on itself. + The decision on which operation will take place is random. + + Parameters + ---------- + n : int + The order of the cograph. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + G : A random graph containing no path on four vertices. + + See Also + -------- + full_join + union + + References + ---------- + .. [1] D.G. Corneil, H. Lerchs, L.Stewart Burlingham, + "Complement reducible graphs", + Discrete Applied Mathematics, Volume 3, Issue 3, 1981, Pages 163-174, + ISSN 0166-218X. + """ + R = nx.empty_graph(1) + + for i in range(n): + RR = nx.relabel_nodes(R.copy(), lambda x: x + len(R)) + + if seed.randint(0, 1) == 0: + R = nx.full_join(R, RR) + else: + R = nx.disjoint_union(R, RR) + + return R diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/community.py b/.venv/lib/python3.12/site-packages/networkx/generators/community.py new file mode 100644 index 0000000..a7f2294 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/generators/community.py @@ -0,0 +1,1070 @@ +"""Generators for classes of graphs used in studying social networks.""" + +import itertools +import math + +import networkx as nx +from networkx.utils import py_random_state + +__all__ = [ + "caveman_graph", + "connected_caveman_graph", + "relaxed_caveman_graph", + "random_partition_graph", + "planted_partition_graph", + "gaussian_random_partition_graph", + "ring_of_cliques", + "windmill_graph", + "stochastic_block_model", + "LFR_benchmark_graph", +] + + +@nx._dispatchable(graphs=None, returns_graph=True) +def caveman_graph(l, k): + """Returns a caveman graph of `l` cliques of size `k`. + + Parameters + ---------- + l : int + Number of cliques + k : int + Size of cliques + + Returns + ------- + G : NetworkX Graph + caveman graph + + Notes + ----- + This returns an undirected graph, it can be converted to a directed + graph using :func:`nx.to_directed`, or a multigraph using + ``nx.MultiGraph(nx.caveman_graph(l, k))``. Only the undirected version is + described in [1]_ and it is unclear which of the directed + generalizations is most useful. + + Examples + -------- + >>> G = nx.caveman_graph(3, 3) + + See also + -------- + + connected_caveman_graph + + References + ---------- + .. [1] Watts, D. J. 'Networks, Dynamics, and the Small-World Phenomenon.' + Amer. J. Soc. 105, 493-527, 1999. + """ + # l disjoint cliques of size k + G = nx.empty_graph(l * k) + if k > 1: + for start in range(0, l * k, k): + edges = itertools.combinations(range(start, start + k), 2) + G.add_edges_from(edges) + return G + + +@nx._dispatchable(graphs=None, returns_graph=True) +def connected_caveman_graph(l, k): + """Returns a connected caveman graph of `l` cliques of size `k`. + + The connected caveman graph is formed by creating `n` cliques of size + `k`, then a single edge in each clique is rewired to a node in an + adjacent clique. + + Parameters + ---------- + l : int + number of cliques + k : int + size of cliques (k at least 2 or NetworkXError is raised) + + Returns + ------- + G : NetworkX Graph + connected caveman graph + + Raises + ------ + NetworkXError + If the size of cliques `k` is smaller than 2. + + Notes + ----- + This returns an undirected graph, it can be converted to a directed + graph using :func:`nx.to_directed`, or a multigraph using + ``nx.MultiGraph(nx.caveman_graph(l, k))``. Only the undirected version is + described in [1]_ and it is unclear which of the directed + generalizations is most useful. + + Examples + -------- + >>> G = nx.connected_caveman_graph(3, 3) + + References + ---------- + .. [1] Watts, D. J. 'Networks, Dynamics, and the Small-World Phenomenon.' + Amer. J. Soc. 105, 493-527, 1999. + """ + if k < 2: + raise nx.NetworkXError( + "The size of cliques in a connected caveman graph must be at least 2." + ) + + G = nx.caveman_graph(l, k) + for start in range(0, l * k, k): + G.remove_edge(start, start + 1) + G.add_edge(start, (start - 1) % (l * k)) + return G + + +@py_random_state(3) +@nx._dispatchable(graphs=None, returns_graph=True) +def relaxed_caveman_graph(l, k, p, seed=None): + """Returns a relaxed caveman graph. + + A relaxed caveman graph starts with `l` cliques of size `k`. Edges are + then randomly rewired with probability `p` to link different cliques. + + Parameters + ---------- + l : int + Number of groups + k : int + Size of cliques + p : float + Probability of rewiring each edge. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + G : NetworkX Graph + Relaxed Caveman Graph + + Raises + ------ + NetworkXError + If p is not in [0,1] + + Examples + -------- + >>> G = nx.relaxed_caveman_graph(2, 3, 0.1, seed=42) + + References + ---------- + .. [1] Santo Fortunato, Community Detection in Graphs, + Physics Reports Volume 486, Issues 3-5, February 2010, Pages 75-174. + https://arxiv.org/abs/0906.0612 + """ + G = nx.caveman_graph(l, k) + nodes = list(G) + for u, v in G.edges(): + if seed.random() < p: # rewire the edge + x = seed.choice(nodes) + if G.has_edge(u, x): + continue + G.remove_edge(u, v) + G.add_edge(u, x) + return G + + +@py_random_state(3) +@nx._dispatchable(graphs=None, returns_graph=True) +def random_partition_graph(sizes, p_in, p_out, seed=None, directed=False): + """Returns the random partition graph with a partition of sizes. + + A partition graph is a graph of communities with sizes defined by + s in sizes. Nodes in the same group are connected with probability + p_in and nodes of different groups are connected with probability + p_out. + + Parameters + ---------- + sizes : list of ints + Sizes of groups + p_in : float + probability of edges with in groups + p_out : float + probability of edges between groups + directed : boolean optional, default=False + Whether to create a directed graph + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + G : NetworkX Graph or DiGraph + random partition graph of size sum(gs) + + Raises + ------ + NetworkXError + If p_in or p_out is not in [0,1] + + Examples + -------- + >>> G = nx.random_partition_graph([10, 10, 10], 0.25, 0.01) + >>> len(G) + 30 + >>> partition = G.graph["partition"] + >>> len(partition) + 3 + + Notes + ----- + This is a generalization of the planted-l-partition described in + [1]_. It allows for the creation of groups of any size. + + The partition is store as a graph attribute 'partition'. + + References + ---------- + .. [1] Santo Fortunato 'Community Detection in Graphs' Physical Reports + Volume 486, Issue 3-5 p. 75-174. https://arxiv.org/abs/0906.0612 + """ + # Use geometric method for O(n+m) complexity algorithm + # partition = nx.community_sets(nx.get_node_attributes(G, 'affiliation')) + if not 0.0 <= p_in <= 1.0: + raise nx.NetworkXError("p_in must be in [0,1]") + if not 0.0 <= p_out <= 1.0: + raise nx.NetworkXError("p_out must be in [0,1]") + + # create connection matrix + num_blocks = len(sizes) + p = [[p_out for s in range(num_blocks)] for r in range(num_blocks)] + for r in range(num_blocks): + p[r][r] = p_in + + return stochastic_block_model( + sizes, + p, + nodelist=None, + seed=seed, + directed=directed, + selfloops=False, + sparse=True, + ) + + +@py_random_state(4) +@nx._dispatchable(graphs=None, returns_graph=True) +def planted_partition_graph(l, k, p_in, p_out, seed=None, directed=False): + """Returns the planted l-partition graph. + + This model partitions a graph with n=l*k vertices in + l groups with k vertices each. Vertices of the same + group are linked with a probability p_in, and vertices + of different groups are linked with probability p_out. + + Parameters + ---------- + l : int + Number of groups + k : int + Number of vertices in each group + p_in : float + probability of connecting vertices within a group + p_out : float + probability of connected vertices between groups + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + directed : bool,optional (default=False) + If True return a directed graph + + Returns + ------- + G : NetworkX Graph or DiGraph + planted l-partition graph + + Raises + ------ + NetworkXError + If `p_in`, `p_out` are not in `[0, 1]` + + Examples + -------- + >>> G = nx.planted_partition_graph(4, 3, 0.5, 0.1, seed=42) + + See Also + -------- + random_partition_model + + References + ---------- + .. [1] A. Condon, R.M. Karp, Algorithms for graph partitioning + on the planted partition model, + Random Struct. Algor. 18 (2001) 116-140. + + .. [2] Santo Fortunato 'Community Detection in Graphs' Physical Reports + Volume 486, Issue 3-5 p. 75-174. https://arxiv.org/abs/0906.0612 + """ + return random_partition_graph([k] * l, p_in, p_out, seed=seed, directed=directed) + + +@py_random_state(6) +@nx._dispatchable(graphs=None, returns_graph=True) +def gaussian_random_partition_graph(n, s, v, p_in, p_out, directed=False, seed=None): + """Generate a Gaussian random partition graph. + + A Gaussian random partition graph is created by creating k partitions + each with a size drawn from a normal distribution with mean s and variance + s/v. Nodes are connected within clusters with probability p_in and + between clusters with probability p_out[1] + + Parameters + ---------- + n : int + Number of nodes in the graph + s : float + Mean cluster size + v : float + Shape parameter. The variance of cluster size distribution is s/v. + p_in : float + Probability of intra cluster connection. + p_out : float + Probability of inter cluster connection. + directed : boolean, optional default=False + Whether to create a directed graph or not + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + G : NetworkX Graph or DiGraph + gaussian random partition graph + + Raises + ------ + NetworkXError + If s is > n + If p_in or p_out is not in [0,1] + + Notes + ----- + Note the number of partitions is dependent on s,v and n, and that the + last partition may be considerably smaller, as it is sized to simply + fill out the nodes [1] + + See Also + -------- + random_partition_graph + + Examples + -------- + >>> G = nx.gaussian_random_partition_graph(100, 10, 10, 0.25, 0.1) + >>> len(G) + 100 + + References + ---------- + .. [1] Ulrik Brandes, Marco Gaertler, Dorothea Wagner, + Experiments on Graph Clustering Algorithms, + In the proceedings of the 11th Europ. Symp. Algorithms, 2003. + """ + if s > n: + raise nx.NetworkXError("s must be <= n") + assigned = 0 + sizes = [] + while True: + size = int(seed.gauss(s, s / v + 0.5)) + if size < 1: # how to handle 0 or negative sizes? + continue + if assigned + size >= n: + sizes.append(n - assigned) + break + assigned += size + sizes.append(size) + return random_partition_graph(sizes, p_in, p_out, seed=seed, directed=directed) + + +@nx._dispatchable(graphs=None, returns_graph=True) +def ring_of_cliques(num_cliques, clique_size): + """Defines a "ring of cliques" graph. + + A ring of cliques graph is consisting of cliques, connected through single + links. Each clique is a complete graph. + + Parameters + ---------- + num_cliques : int + Number of cliques + clique_size : int + Size of cliques + + Returns + ------- + G : NetworkX Graph + ring of cliques graph + + Raises + ------ + NetworkXError + If the number of cliques is lower than 2 or + if the size of cliques is smaller than 2. + + Examples + -------- + >>> G = nx.ring_of_cliques(8, 4) + + See Also + -------- + connected_caveman_graph + + Notes + ----- + The `connected_caveman_graph` graph removes a link from each clique to + connect it with the next clique. Instead, the `ring_of_cliques` graph + simply adds the link without removing any link from the cliques. + """ + if num_cliques < 2: + raise nx.NetworkXError("A ring of cliques must have at least two cliques") + if clique_size < 2: + raise nx.NetworkXError("The cliques must have at least two nodes") + + G = nx.Graph() + for i in range(num_cliques): + edges = itertools.combinations( + range(i * clique_size, i * clique_size + clique_size), 2 + ) + G.add_edges_from(edges) + G.add_edge( + i * clique_size + 1, (i + 1) * clique_size % (num_cliques * clique_size) + ) + return G + + +@nx._dispatchable(graphs=None, returns_graph=True) +def windmill_graph(n, k): + """Generate a windmill graph. + A windmill graph is a graph of `n` cliques each of size `k` that are all + joined at one node. + It can be thought of as taking a disjoint union of `n` cliques of size `k`, + selecting one point from each, and contracting all of the selected points. + Alternatively, one could generate `n` cliques of size `k-1` and one node + that is connected to all other nodes in the graph. + + Parameters + ---------- + n : int + Number of cliques + k : int + Size of cliques + + Returns + ------- + G : NetworkX Graph + windmill graph with n cliques of size k + + Raises + ------ + NetworkXError + If the number of cliques is less than two + If the size of the cliques are less than two + + Examples + -------- + >>> G = nx.windmill_graph(4, 5) + + Notes + ----- + The node labeled `0` will be the node connected to all other nodes. + Note that windmill graphs are usually denoted `Wd(k,n)`, so the parameters + are in the opposite order as the parameters of this method. + """ + if n < 2: + msg = "A windmill graph must have at least two cliques" + raise nx.NetworkXError(msg) + if k < 2: + raise nx.NetworkXError("The cliques must have at least two nodes") + + G = nx.disjoint_union_all( + itertools.chain( + [nx.complete_graph(k)], (nx.complete_graph(k - 1) for _ in range(n - 1)) + ) + ) + G.add_edges_from((0, i) for i in range(k, G.number_of_nodes())) + return G + + +@py_random_state(3) +@nx._dispatchable(graphs=None, returns_graph=True) +def stochastic_block_model( + sizes, p, nodelist=None, seed=None, directed=False, selfloops=False, sparse=True +): + """Returns a stochastic block model graph. + + This model partitions the nodes in blocks of arbitrary sizes, and places + edges between pairs of nodes independently, with a probability that depends + on the blocks. + + Parameters + ---------- + sizes : list of ints + Sizes of blocks + p : list of list of floats + Element (r,s) gives the density of edges going from the nodes + of group r to nodes of group s. + p must match the number of groups (len(sizes) == len(p)), + and it must be symmetric if the graph is undirected. + nodelist : list, optional + The block tags are assigned according to the node identifiers + in nodelist. If nodelist is None, then the ordering is the + range [0,sum(sizes)-1]. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + directed : boolean optional, default=False + Whether to create a directed graph or not. + selfloops : boolean optional, default=False + Whether to include self-loops or not. + sparse: boolean optional, default=True + Use the sparse heuristic to speed up the generator. + + Returns + ------- + g : NetworkX Graph or DiGraph + Stochastic block model graph of size sum(sizes) + + Raises + ------ + NetworkXError + If probabilities are not in [0,1]. + If the probability matrix is not square (directed case). + If the probability matrix is not symmetric (undirected case). + If the sizes list does not match nodelist or the probability matrix. + If nodelist contains duplicate. + + Examples + -------- + >>> sizes = [75, 75, 300] + >>> probs = [[0.25, 0.05, 0.02], [0.05, 0.35, 0.07], [0.02, 0.07, 0.40]] + >>> g = nx.stochastic_block_model(sizes, probs, seed=0) + >>> len(g) + 450 + >>> H = nx.quotient_graph(g, g.graph["partition"], relabel=True) + >>> for v in H.nodes(data=True): + ... print(round(v[1]["density"], 3)) + 0.245 + 0.348 + 0.405 + >>> for v in H.edges(data=True): + ... print(round(1.0 * v[2]["weight"] / (sizes[v[0]] * sizes[v[1]]), 3)) + 0.051 + 0.022 + 0.07 + + See Also + -------- + random_partition_graph + planted_partition_graph + gaussian_random_partition_graph + gnp_random_graph + + References + ---------- + .. [1] Holland, P. W., Laskey, K. B., & Leinhardt, S., + "Stochastic blockmodels: First steps", + Social networks, 5(2), 109-137, 1983. + """ + # Check if dimensions match + if len(sizes) != len(p): + raise nx.NetworkXException("'sizes' and 'p' do not match.") + # Check for probability symmetry (undirected) and shape (directed) + for row in p: + if len(p) != len(row): + raise nx.NetworkXException("'p' must be a square matrix.") + if not directed: + p_transpose = [list(i) for i in zip(*p)] + for i in zip(p, p_transpose): + for j in zip(i[0], i[1]): + if abs(j[0] - j[1]) > 1e-08: + raise nx.NetworkXException("'p' must be symmetric.") + # Check for probability range + for row in p: + for prob in row: + if prob < 0 or prob > 1: + raise nx.NetworkXException("Entries of 'p' not in [0,1].") + # Check for nodelist consistency + if nodelist is not None: + if len(nodelist) != sum(sizes): + raise nx.NetworkXException("'nodelist' and 'sizes' do not match.") + if len(nodelist) != len(set(nodelist)): + raise nx.NetworkXException("nodelist contains duplicate.") + else: + nodelist = range(sum(sizes)) + + # Setup the graph conditionally to the directed switch. + block_range = range(len(sizes)) + if directed: + g = nx.DiGraph() + block_iter = itertools.product(block_range, block_range) + else: + g = nx.Graph() + block_iter = itertools.combinations_with_replacement(block_range, 2) + # Split nodelist in a partition (list of sets). + size_cumsum = [sum(sizes[0:x]) for x in range(len(sizes) + 1)] + g.graph["partition"] = [ + set(nodelist[size_cumsum[x] : size_cumsum[x + 1]]) + for x in range(len(size_cumsum) - 1) + ] + # Setup nodes and graph name + for block_id, nodes in enumerate(g.graph["partition"]): + for node in nodes: + g.add_node(node, block=block_id) + + g.name = "stochastic_block_model" + + # Test for edge existence + parts = g.graph["partition"] + for i, j in block_iter: + if i == j: + if directed: + if selfloops: + edges = itertools.product(parts[i], parts[i]) + else: + edges = itertools.permutations(parts[i], 2) + else: + edges = itertools.combinations(parts[i], 2) + if selfloops: + edges = itertools.chain(edges, zip(parts[i], parts[i])) + for e in edges: + if seed.random() < p[i][j]: + g.add_edge(*e) + else: + edges = itertools.product(parts[i], parts[j]) + if sparse: + if p[i][j] == 1: # Test edges cases p_ij = 0 or 1 + for e in edges: + g.add_edge(*e) + elif p[i][j] > 0: + while True: + try: + logrand = math.log(seed.random()) + skip = math.floor(logrand / math.log(1 - p[i][j])) + # consume "skip" edges + next(itertools.islice(edges, skip, skip), None) + e = next(edges) + g.add_edge(*e) # __safe + except StopIteration: + break + else: + for e in edges: + if seed.random() < p[i][j]: + g.add_edge(*e) # __safe + return g + + +def _zipf_rv_below(gamma, xmin, threshold, seed): + """Returns a random value chosen from the bounded Zipf distribution. + + Repeatedly draws values from the Zipf distribution until the + threshold is met, then returns that value. + """ + result = nx.utils.zipf_rv(gamma, xmin, seed) + while result > threshold: + result = nx.utils.zipf_rv(gamma, xmin, seed) + return result + + +def _powerlaw_sequence(gamma, low, high, condition, length, max_iters, seed): + """Returns a list of numbers obeying a constrained power law distribution. + + ``gamma`` and ``low`` are the parameters for the Zipf distribution. + + ``high`` is the maximum allowed value for values draw from the Zipf + distribution. For more information, see :func:`_zipf_rv_below`. + + ``condition`` and ``length`` are Boolean-valued functions on + lists. While generating the list, random values are drawn and + appended to the list until ``length`` is satisfied by the created + list. Once ``condition`` is satisfied, the sequence generated in + this way is returned. + + ``max_iters`` indicates the number of times to generate a list + satisfying ``length``. If the number of iterations exceeds this + value, :exc:`~networkx.exception.ExceededMaxIterations` is raised. + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + """ + for i in range(max_iters): + seq = [] + while not length(seq): + seq.append(_zipf_rv_below(gamma, low, high, seed)) + if condition(seq): + return seq + raise nx.ExceededMaxIterations("Could not create power law sequence") + + +def _hurwitz_zeta(x, q, tolerance): + """The Hurwitz zeta function, or the Riemann zeta function of two arguments. + + ``x`` must be greater than one and ``q`` must be positive. + + This function repeatedly computes subsequent partial sums until + convergence, as decided by ``tolerance``. + """ + z = 0 + z_prev = -float("inf") + k = 0 + while abs(z - z_prev) > tolerance: + z_prev = z + z += 1 / ((k + q) ** x) + k += 1 + return z + + +def _generate_min_degree(gamma, average_degree, max_degree, tolerance, max_iters): + """Returns a minimum degree from the given average degree.""" + # Defines zeta function whether or not Scipy is available + try: + from scipy.special import zeta + except ImportError: + + def zeta(x, q): + return _hurwitz_zeta(x, q, tolerance) + + min_deg_top = max_degree + min_deg_bot = 1 + min_deg_mid = (min_deg_top - min_deg_bot) / 2 + min_deg_bot + itrs = 0 + mid_avg_deg = 0 + while abs(mid_avg_deg - average_degree) > tolerance: + if itrs > max_iters: + raise nx.ExceededMaxIterations("Could not match average_degree") + mid_avg_deg = 0 + for x in range(int(min_deg_mid), max_degree + 1): + mid_avg_deg += (x ** (-gamma + 1)) / zeta(gamma, min_deg_mid) + if mid_avg_deg > average_degree: + min_deg_top = min_deg_mid + min_deg_mid = (min_deg_top - min_deg_bot) / 2 + min_deg_bot + else: + min_deg_bot = min_deg_mid + min_deg_mid = (min_deg_top - min_deg_bot) / 2 + min_deg_bot + itrs += 1 + # return int(min_deg_mid + 0.5) + return round(min_deg_mid) + + +def _generate_communities(degree_seq, community_sizes, mu, max_iters, seed): + """Returns a list of sets, each of which represents a community. + + ``degree_seq`` is the degree sequence that must be met by the + graph. + + ``community_sizes`` is the community size distribution that must be + met by the generated list of sets. + + ``mu`` is a float in the interval [0, 1] indicating the fraction of + intra-community edges incident to each node. + + ``max_iters`` is the number of times to try to add a node to a + community. This must be greater than the length of + ``degree_seq``, otherwise this function will always fail. If + the number of iterations exceeds this value, + :exc:`~networkx.exception.ExceededMaxIterations` is raised. + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + The communities returned by this are sets of integers in the set {0, + ..., *n* - 1}, where *n* is the length of ``degree_seq``. + + """ + # This assumes the nodes in the graph will be natural numbers. + result = [set() for _ in community_sizes] + n = len(degree_seq) + free = list(range(n)) + for i in range(max_iters): + v = free.pop() + c = seed.choice(range(len(community_sizes))) + # s = int(degree_seq[v] * (1 - mu) + 0.5) + s = round(degree_seq[v] * (1 - mu)) + # If the community is large enough, add the node to the chosen + # community. Otherwise, return it to the list of unaffiliated + # nodes. + if s < community_sizes[c]: + result[c].add(v) + else: + free.append(v) + # If the community is too big, remove a node from it. + if len(result[c]) > community_sizes[c]: + free.append(result[c].pop()) + if not free: + return result + msg = "Could not assign communities; try increasing min_community" + raise nx.ExceededMaxIterations(msg) + + +@py_random_state(11) +@nx._dispatchable(graphs=None, returns_graph=True) +def LFR_benchmark_graph( + n, + tau1, + tau2, + mu, + average_degree=None, + min_degree=None, + max_degree=None, + min_community=None, + max_community=None, + tol=1.0e-7, + max_iters=500, + seed=None, +): + r"""Returns the LFR benchmark graph. + + This algorithm proceeds as follows: + + 1) Find a degree sequence with a power law distribution, and minimum + value ``min_degree``, which has approximate average degree + ``average_degree``. This is accomplished by either + + a) specifying ``min_degree`` and not ``average_degree``, + b) specifying ``average_degree`` and not ``min_degree``, in which + case a suitable minimum degree will be found. + + ``max_degree`` can also be specified, otherwise it will be set to + ``n``. Each node *u* will have $\mu \mathrm{deg}(u)$ edges + joining it to nodes in communities other than its own and $(1 - + \mu) \mathrm{deg}(u)$ edges joining it to nodes in its own + community. + 2) Generate community sizes according to a power law distribution + with exponent ``tau2``. If ``min_community`` and + ``max_community`` are not specified they will be selected to be + ``min_degree`` and ``max_degree``, respectively. Community sizes + are generated until the sum of their sizes equals ``n``. + 3) Each node will be randomly assigned a community with the + condition that the community is large enough for the node's + intra-community degree, $(1 - \mu) \mathrm{deg}(u)$ as + described in step 2. If a community grows too large, a random node + will be selected for reassignment to a new community, until all + nodes have been assigned a community. + 4) Each node *u* then adds $(1 - \mu) \mathrm{deg}(u)$ + intra-community edges and $\mu \mathrm{deg}(u)$ inter-community + edges. + + Parameters + ---------- + n : int + Number of nodes in the created graph. + + tau1 : float + Power law exponent for the degree distribution of the created + graph. This value must be strictly greater than one. + + tau2 : float + Power law exponent for the community size distribution in the + created graph. This value must be strictly greater than one. + + mu : float + Fraction of inter-community edges incident to each node. This + value must be in the interval [0, 1]. + + average_degree : float + Desired average degree of nodes in the created graph. This value + must be in the interval [0, *n*]. Exactly one of this and + ``min_degree`` must be specified, otherwise a + :exc:`NetworkXError` is raised. + + min_degree : int + Minimum degree of nodes in the created graph. This value must be + in the interval [0, *n*]. Exactly one of this and + ``average_degree`` must be specified, otherwise a + :exc:`NetworkXError` is raised. + + max_degree : int + Maximum degree of nodes in the created graph. If not specified, + this is set to ``n``, the total number of nodes in the graph. + + min_community : int + Minimum size of communities in the graph. If not specified, this + is set to ``min_degree``. + + max_community : int + Maximum size of communities in the graph. If not specified, this + is set to ``n``, the total number of nodes in the graph. + + tol : float + Tolerance when comparing floats, specifically when comparing + average degree values. + + max_iters : int + Maximum number of iterations to try to create the community sizes, + degree distribution, and community affiliations. + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + G : NetworkX graph + The LFR benchmark graph generated according to the specified + parameters. + + Each node in the graph has a node attribute ``'community'`` that + stores the community (that is, the set of nodes) that includes + it. + + Raises + ------ + NetworkXError + If any of the parameters do not meet their upper and lower bounds: + + - ``tau1`` and ``tau2`` must be strictly greater than 1. + - ``mu`` must be in [0, 1]. + - ``max_degree`` must be in {1, ..., *n*}. + - ``min_community`` and ``max_community`` must be in {0, ..., + *n*}. + + If not exactly one of ``average_degree`` and ``min_degree`` is + specified. + + If ``min_degree`` is not specified and a suitable ``min_degree`` + cannot be found. + + ExceededMaxIterations + If a valid degree sequence cannot be created within + ``max_iters`` number of iterations. + + If a valid set of community sizes cannot be created within + ``max_iters`` number of iterations. + + If a valid community assignment cannot be created within ``10 * + n * max_iters`` number of iterations. + + Examples + -------- + Basic usage:: + + >>> from networkx.generators.community import LFR_benchmark_graph + >>> n = 250 + >>> tau1 = 3 + >>> tau2 = 1.5 + >>> mu = 0.1 + >>> G = LFR_benchmark_graph( + ... n, tau1, tau2, mu, average_degree=5, min_community=20, seed=10 + ... ) + + Continuing the example above, you can get the communities from the + node attributes of the graph:: + + >>> communities = {frozenset(G.nodes[v]["community"]) for v in G} + + Notes + ----- + This algorithm differs slightly from the original way it was + presented in [1]. + + 1) Rather than connecting the graph via a configuration model then + rewiring to match the intra-community and inter-community + degrees, we do this wiring explicitly at the end, which should be + equivalent. + 2) The code posted on the author's website [2] calculates the random + power law distributed variables and their average using + continuous approximations, whereas we use the discrete + distributions here as both degree and community size are + discrete. + + Though the authors describe the algorithm as quite robust, testing + during development indicates that a somewhat narrower parameter set + is likely to successfully produce a graph. Some suggestions have + been provided in the event of exceptions. + + References + ---------- + .. [1] "Benchmark graphs for testing community detection algorithms", + Andrea Lancichinetti, Santo Fortunato, and Filippo Radicchi, + Phys. Rev. E 78, 046110 2008 + .. [2] https://www.santofortunato.net/resources + + """ + # Perform some basic parameter validation. + if not tau1 > 1: + raise nx.NetworkXError("tau1 must be greater than one") + if not tau2 > 1: + raise nx.NetworkXError("tau2 must be greater than one") + if not 0 <= mu <= 1: + raise nx.NetworkXError("mu must be in the interval [0, 1]") + + # Validate parameters for generating the degree sequence. + if max_degree is None: + max_degree = n + elif not 0 < max_degree <= n: + raise nx.NetworkXError("max_degree must be in the interval (0, n]") + if not ((min_degree is None) ^ (average_degree is None)): + raise nx.NetworkXError( + "Must assign exactly one of min_degree and average_degree" + ) + if min_degree is None: + min_degree = _generate_min_degree( + tau1, average_degree, max_degree, tol, max_iters + ) + + # Generate a degree sequence with a power law distribution. + low, high = min_degree, max_degree + + def condition(seq): + return sum(seq) % 2 == 0 + + def length(seq): + return len(seq) >= n + + deg_seq = _powerlaw_sequence(tau1, low, high, condition, length, max_iters, seed) + + # Validate parameters for generating the community size sequence. + if min_community is None: + min_community = min(deg_seq) + if max_community is None: + max_community = max(deg_seq) + + # Generate a community size sequence with a power law distribution. + # + # TODO The original code incremented the number of iterations each + # time a new Zipf random value was drawn from the distribution. This + # differed from the way the number of iterations was incremented in + # `_powerlaw_degree_sequence`, so this code was changed to match + # that one. As a result, this code is allowed many more chances to + # generate a valid community size sequence. + low, high = min_community, max_community + + def condition(seq): + return sum(seq) == n + + def length(seq): + return sum(seq) >= n + + comms = _powerlaw_sequence(tau2, low, high, condition, length, max_iters, seed) + + # Generate the communities based on the given degree sequence and + # community sizes. + max_iters *= 10 * n + communities = _generate_communities(deg_seq, comms, mu, max_iters, seed) + + # Finally, generate the benchmark graph based on the given + # communities, joining nodes according to the intra- and + # inter-community degrees. + G = nx.Graph() + G.add_nodes_from(range(n)) + for c in communities: + for u in c: + while G.degree(u) < round(deg_seq[u] * (1 - mu)): + v = seed.choice(list(c)) + G.add_edge(u, v) + while G.degree(u) < deg_seq[u]: + v = seed.choice(range(n)) + if v not in c: + G.add_edge(u, v) + G.nodes[u]["community"] = c + return G diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/degree_seq.py b/.venv/lib/python3.12/site-packages/networkx/generators/degree_seq.py new file mode 100644 index 0000000..d920f85 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/generators/degree_seq.py @@ -0,0 +1,867 @@ +"""Generate graphs with a given degree sequence or expected degree sequence.""" + +import heapq +import math +from itertools import chain, combinations, zip_longest +from operator import itemgetter + +import networkx as nx +from networkx.utils import py_random_state, random_weighted_sample + +__all__ = [ + "configuration_model", + "directed_configuration_model", + "expected_degree_graph", + "havel_hakimi_graph", + "directed_havel_hakimi_graph", + "degree_sequence_tree", + "random_degree_sequence_graph", +] + +chaini = chain.from_iterable + + +def _to_stublist(degree_sequence): + """Returns a list of degree-repeated node numbers. + + ``degree_sequence`` is a list of nonnegative integers representing + the degrees of nodes in a graph. + + This function returns a list of node numbers with multiplicities + according to the given degree sequence. For example, if the first + element of ``degree_sequence`` is ``3``, then the first node number, + ``0``, will appear at the head of the returned list three times. The + node numbers are assumed to be the numbers zero through + ``len(degree_sequence) - 1``. + + Examples + -------- + + >>> degree_sequence = [1, 2, 3] + >>> _to_stublist(degree_sequence) + [0, 1, 1, 2, 2, 2] + + If a zero appears in the sequence, that means the node exists but + has degree zero, so that number will be skipped in the returned + list:: + + >>> degree_sequence = [2, 0, 1] + >>> _to_stublist(degree_sequence) + [0, 0, 2] + + """ + return list(chaini([n] * d for n, d in enumerate(degree_sequence))) + + +def _configuration_model( + deg_sequence, create_using, directed=False, in_deg_sequence=None, seed=None +): + """Helper function for generating either undirected or directed + configuration model graphs. + + ``deg_sequence`` is a list of nonnegative integers representing the + degree of the node whose label is the index of the list element. + + ``create_using`` see :func:`~networkx.empty_graph`. + + ``directed`` and ``in_deg_sequence`` are required if you want the + returned graph to be generated using the directed configuration + model algorithm. If ``directed`` is ``False``, then ``deg_sequence`` + is interpreted as the degree sequence of an undirected graph and + ``in_deg_sequence`` is ignored. Otherwise, if ``directed`` is + ``True``, then ``deg_sequence`` is interpreted as the out-degree + sequence and ``in_deg_sequence`` as the in-degree sequence of a + directed graph. + + .. note:: + + ``deg_sequence`` and ``in_deg_sequence`` need not be the same + length. + + ``seed`` is a random.Random or numpy.random.RandomState instance + + This function returns a graph, directed if and only if ``directed`` + is ``True``, generated according to the configuration model + algorithm. For more information on the algorithm, see the + :func:`configuration_model` or :func:`directed_configuration_model` + functions. + + """ + n = len(deg_sequence) + G = nx.empty_graph(n, create_using) + # If empty, return the null graph immediately. + if n == 0: + return G + # Build a list of available degree-repeated nodes. For example, + # for degree sequence [3, 2, 1, 1, 1], the "stub list" is + # initially [0, 0, 0, 1, 1, 2, 3, 4], that is, node 0 has degree + # 3 and thus is repeated 3 times, etc. + # + # Also, shuffle the stub list in order to get a random sequence of + # node pairs. + if directed: + pairs = zip_longest(deg_sequence, in_deg_sequence, fillvalue=0) + # Unzip the list of pairs into a pair of lists. + out_deg, in_deg = zip(*pairs) + + out_stublist = _to_stublist(out_deg) + in_stublist = _to_stublist(in_deg) + + seed.shuffle(out_stublist) + seed.shuffle(in_stublist) + else: + stublist = _to_stublist(deg_sequence) + # Choose a random balanced bipartition of the stublist, which + # gives a random pairing of nodes. In this implementation, we + # shuffle the list and then split it in half. + n = len(stublist) + half = n // 2 + seed.shuffle(stublist) + out_stublist, in_stublist = stublist[:half], stublist[half:] + G.add_edges_from(zip(out_stublist, in_stublist)) + return G + + +@py_random_state(2) +@nx._dispatchable(graphs=None, returns_graph=True) +def configuration_model(deg_sequence, create_using=None, seed=None): + """Returns a random graph with the given degree sequence. + + The configuration model generates a random pseudograph (graph with + parallel edges and self loops) by randomly assigning edges to + match the given degree sequence. + + Parameters + ---------- + deg_sequence : list of nonnegative integers + Each list entry corresponds to the degree of a node. + create_using : NetworkX graph constructor, optional (default MultiGraph) + Graph type to create. If graph instance, then cleared before populated. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + G : MultiGraph + A graph with the specified degree sequence. + Nodes are labeled starting at 0 with an index + corresponding to the position in deg_sequence. + + Raises + ------ + NetworkXError + If the degree sequence does not have an even sum. + + See Also + -------- + is_graphical + + Notes + ----- + As described by Newman [1]_. + + A non-graphical degree sequence (not realizable by some simple + graph) is allowed since this function returns graphs with self + loops and parallel edges. An exception is raised if the degree + sequence does not have an even sum. + + This configuration model construction process can lead to + duplicate edges and loops. You can remove the self-loops and + parallel edges (see below) which will likely result in a graph + that doesn't have the exact degree sequence specified. + + The density of self-loops and parallel edges tends to decrease as + the number of nodes increases. However, typically the number of + self-loops will approach a Poisson distribution with a nonzero mean, + and similarly for the number of parallel edges. Consider a node + with *k* stubs. The probability of being joined to another stub of + the same node is basically (*k* - *1*) / *N*, where *k* is the + degree and *N* is the number of nodes. So the probability of a + self-loop scales like *c* / *N* for some constant *c*. As *N* grows, + this means we expect *c* self-loops. Similarly for parallel edges. + + References + ---------- + .. [1] M.E.J. Newman, "The structure and function of complex networks", + SIAM REVIEW 45-2, pp 167-256, 2003. + + Examples + -------- + You can create a degree sequence following a particular distribution + by using the one of the distribution functions in + :mod:`~networkx.utils.random_sequence` (or one of your own). For + example, to create an undirected multigraph on one hundred nodes + with degree sequence chosen from the power law distribution: + + >>> sequence = nx.random_powerlaw_tree_sequence(100, tries=5000) + >>> G = nx.configuration_model(sequence) + >>> len(G) + 100 + >>> actual_degrees = [d for v, d in G.degree()] + >>> actual_degrees == sequence + True + + The returned graph is a multigraph, which may have parallel + edges. To remove any parallel edges from the returned graph: + + >>> G = nx.Graph(G) + + Similarly, to remove self-loops: + + >>> G.remove_edges_from(nx.selfloop_edges(G)) + + """ + if sum(deg_sequence) % 2 != 0: + msg = "Invalid degree sequence: sum of degrees must be even, not odd" + raise nx.NetworkXError(msg) + + G = nx.empty_graph(0, create_using, default=nx.MultiGraph) + if G.is_directed(): + raise nx.NetworkXNotImplemented("not implemented for directed graphs") + + G = _configuration_model(deg_sequence, G, seed=seed) + + return G + + +@py_random_state(3) +@nx._dispatchable(graphs=None, returns_graph=True) +def directed_configuration_model( + in_degree_sequence, out_degree_sequence, create_using=None, seed=None +): + """Returns a directed_random graph with the given degree sequences. + + The configuration model generates a random directed pseudograph + (graph with parallel edges and self loops) by randomly assigning + edges to match the given degree sequences. + + Parameters + ---------- + in_degree_sequence : list of nonnegative integers + Each list entry corresponds to the in-degree of a node. + out_degree_sequence : list of nonnegative integers + Each list entry corresponds to the out-degree of a node. + create_using : NetworkX graph constructor, optional (default MultiDiGraph) + Graph type to create. If graph instance, then cleared before populated. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + G : MultiDiGraph + A graph with the specified degree sequences. + Nodes are labeled starting at 0 with an index + corresponding to the position in deg_sequence. + + Raises + ------ + NetworkXError + If the degree sequences do not have the same sum. + + See Also + -------- + configuration_model + + Notes + ----- + Algorithm as described by Newman [1]_. + + A non-graphical degree sequence (not realizable by some simple + graph) is allowed since this function returns graphs with self + loops and parallel edges. An exception is raised if the degree + sequences does not have the same sum. + + This configuration model construction process can lead to + duplicate edges and loops. You can remove the self-loops and + parallel edges (see below) which will likely result in a graph + that doesn't have the exact degree sequence specified. This + "finite-size effect" decreases as the size of the graph increases. + + References + ---------- + .. [1] Newman, M. E. J. and Strogatz, S. H. and Watts, D. J. + Random graphs with arbitrary degree distributions and their applications + Phys. Rev. E, 64, 026118 (2001) + + Examples + -------- + One can modify the in- and out-degree sequences from an existing + directed graph in order to create a new directed graph. For example, + here we modify the directed path graph: + + >>> D = nx.DiGraph([(0, 1), (1, 2), (2, 3)]) + >>> din = list(d for n, d in D.in_degree()) + >>> dout = list(d for n, d in D.out_degree()) + >>> din.append(1) + >>> dout[0] = 2 + >>> # We now expect an edge from node 0 to a new node, node 3. + ... D = nx.directed_configuration_model(din, dout) + + The returned graph is a directed multigraph, which may have parallel + edges. To remove any parallel edges from the returned graph: + + >>> D = nx.DiGraph(D) + + Similarly, to remove self-loops: + + >>> D.remove_edges_from(nx.selfloop_edges(D)) + + """ + if sum(in_degree_sequence) != sum(out_degree_sequence): + msg = "Invalid degree sequences: sequences must have equal sums" + raise nx.NetworkXError(msg) + + if create_using is None: + create_using = nx.MultiDiGraph + + G = _configuration_model( + out_degree_sequence, + create_using, + directed=True, + in_deg_sequence=in_degree_sequence, + seed=seed, + ) + + name = "directed configuration_model {} nodes {} edges" + return G + + +@py_random_state(1) +@nx._dispatchable(graphs=None, returns_graph=True) +def expected_degree_graph(w, seed=None, selfloops=True): + r"""Returns a random graph with given expected degrees. + + Given a sequence of expected degrees $W=(w_0,w_1,\ldots,w_{n-1})$ + of length $n$ this algorithm assigns an edge between node $u$ and + node $v$ with probability + + .. math:: + + p_{uv} = \frac{w_u w_v}{\sum_k w_k} . + + Parameters + ---------- + w : list + The list of expected degrees. + selfloops: bool (default=True) + Set to False to remove the possibility of self-loop edges. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + Graph + + Examples + -------- + >>> z = [10 for i in range(100)] + >>> G = nx.expected_degree_graph(z) + + Notes + ----- + The nodes have integer labels corresponding to index of expected degrees + input sequence. + + The complexity of this algorithm is $\mathcal{O}(n+m)$ where $n$ is the + number of nodes and $m$ is the expected number of edges. + + The model in [1]_ includes the possibility of self-loop edges. + Set selfloops=False to produce a graph without self loops. + + For finite graphs this model doesn't produce exactly the given + expected degree sequence. Instead the expected degrees are as + follows. + + For the case without self loops (selfloops=False), + + .. math:: + + E[deg(u)] = \sum_{v \ne u} p_{uv} + = w_u \left( 1 - \frac{w_u}{\sum_k w_k} \right) . + + + NetworkX uses the standard convention that a self-loop edge counts 2 + in the degree of a node, so with self loops (selfloops=True), + + .. math:: + + E[deg(u)] = \sum_{v \ne u} p_{uv} + 2 p_{uu} + = w_u \left( 1 + \frac{w_u}{\sum_k w_k} \right) . + + References + ---------- + .. [1] Fan Chung and L. Lu, Connected components in random graphs with + given expected degree sequences, Ann. Combinatorics, 6, + pp. 125-145, 2002. + .. [2] Joel Miller and Aric Hagberg, + Efficient generation of networks with given expected degrees, + in Algorithms and Models for the Web-Graph (WAW 2011), + Alan Frieze, Paul Horn, and Paweł Prałat (Eds), LNCS 6732, + pp. 115-126, 2011. + """ + n = len(w) + G = nx.empty_graph(n) + + # If there are no nodes are no edges in the graph, return the empty graph. + if n == 0 or max(w) == 0: + return G + + rho = 1 / sum(w) + # Sort the weights in decreasing order. The original order of the + # weights dictates the order of the (integer) node labels, so we + # need to remember the permutation applied in the sorting. + order = sorted(enumerate(w), key=itemgetter(1), reverse=True) + mapping = {c: u for c, (u, v) in enumerate(order)} + seq = [v for u, v in order] + last = n + if not selfloops: + last -= 1 + for u in range(last): + v = u + if not selfloops: + v += 1 + factor = seq[u] * rho + p = min(seq[v] * factor, 1) + while v < n and p > 0: + if p != 1: + r = seed.random() + v += math.floor(math.log(r, 1 - p)) + if v < n: + q = min(seq[v] * factor, 1) + if seed.random() < q / p: + G.add_edge(mapping[u], mapping[v]) + v += 1 + p = q + return G + + +@nx._dispatchable(graphs=None, returns_graph=True) +def havel_hakimi_graph(deg_sequence, create_using=None): + """Returns a simple graph with given degree sequence constructed + using the Havel-Hakimi algorithm. + + Parameters + ---------- + deg_sequence: list of integers + Each integer corresponds to the degree of a node (need not be sorted). + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + Directed graphs are not allowed. + + Raises + ------ + NetworkXException + For a non-graphical degree sequence (i.e. one + not realizable by some simple graph). + + Notes + ----- + The Havel-Hakimi algorithm constructs a simple graph by + successively connecting the node of highest degree to other nodes + of highest degree, resorting remaining nodes by degree, and + repeating the process. The resulting graph has a high + degree-associativity. Nodes are labeled 1,.., len(deg_sequence), + corresponding to their position in deg_sequence. + + The basic algorithm is from Hakimi [1]_ and was generalized by + Kleitman and Wang [2]_. + + References + ---------- + .. [1] Hakimi S., On Realizability of a Set of Integers as + Degrees of the Vertices of a Linear Graph. I, + Journal of SIAM, 10(3), pp. 496-506 (1962) + .. [2] Kleitman D.J. and Wang D.L. + Algorithms for Constructing Graphs and Digraphs with Given Valences + and Factors Discrete Mathematics, 6(1), pp. 79-88 (1973) + """ + if not nx.is_graphical(deg_sequence): + raise nx.NetworkXError("Invalid degree sequence") + + p = len(deg_sequence) + G = nx.empty_graph(p, create_using) + if G.is_directed(): + raise nx.NetworkXError("Directed graphs are not supported") + num_degs = [[] for i in range(p)] + dmax, dsum, n = 0, 0, 0 + for d in deg_sequence: + # Process only the non-zero integers + if d > 0: + num_degs[d].append(n) + dmax, dsum, n = max(dmax, d), dsum + d, n + 1 + # Return graph if no edges + if n == 0: + return G + + modstubs = [(0, 0)] * (dmax + 1) + # Successively reduce degree sequence by removing the maximum degree + while n > 0: + # Retrieve the maximum degree in the sequence + while len(num_degs[dmax]) == 0: + dmax -= 1 + # If there are not enough stubs to connect to, then the sequence is + # not graphical + if dmax > n - 1: + raise nx.NetworkXError("Non-graphical integer sequence") + + # Remove largest stub in list + source = num_degs[dmax].pop() + n -= 1 + # Reduce the next dmax largest stubs + mslen = 0 + k = dmax + for i in range(dmax): + while len(num_degs[k]) == 0: + k -= 1 + target = num_degs[k].pop() + G.add_edge(source, target) + n -= 1 + if k > 1: + modstubs[mslen] = (k - 1, target) + mslen += 1 + # Add back to the list any nonzero stubs that were removed + for i in range(mslen): + (stubval, stubtarget) = modstubs[i] + num_degs[stubval].append(stubtarget) + n += 1 + + return G + + +@nx._dispatchable(graphs=None, returns_graph=True) +def directed_havel_hakimi_graph(in_deg_sequence, out_deg_sequence, create_using=None): + """Returns a directed graph with the given degree sequences. + + Parameters + ---------- + in_deg_sequence : list of integers + Each list entry corresponds to the in-degree of a node. + out_deg_sequence : list of integers + Each list entry corresponds to the out-degree of a node. + create_using : NetworkX graph constructor, optional (default DiGraph) + Graph type to create. If graph instance, then cleared before populated. + + Returns + ------- + G : DiGraph + A graph with the specified degree sequences. + Nodes are labeled starting at 0 with an index + corresponding to the position in deg_sequence + + Raises + ------ + NetworkXError + If the degree sequences are not digraphical. + + See Also + -------- + configuration_model + + Notes + ----- + Algorithm as described by Kleitman and Wang [1]_. + + References + ---------- + .. [1] D.J. Kleitman and D.L. Wang + Algorithms for Constructing Graphs and Digraphs with Given Valences + and Factors Discrete Mathematics, 6(1), pp. 79-88 (1973) + """ + in_deg_sequence = nx.utils.make_list_of_ints(in_deg_sequence) + out_deg_sequence = nx.utils.make_list_of_ints(out_deg_sequence) + + # Process the sequences and form two heaps to store degree pairs with + # either zero or nonzero out degrees + sumin, sumout = 0, 0 + nin, nout = len(in_deg_sequence), len(out_deg_sequence) + maxn = max(nin, nout) + G = nx.empty_graph(maxn, create_using, default=nx.DiGraph) + if maxn == 0: + return G + maxin = 0 + stubheap, zeroheap = [], [] + for n in range(maxn): + in_deg, out_deg = 0, 0 + if n < nout: + out_deg = out_deg_sequence[n] + if n < nin: + in_deg = in_deg_sequence[n] + if in_deg < 0 or out_deg < 0: + raise nx.NetworkXError( + "Invalid degree sequences. Sequence values must be positive." + ) + sumin, sumout, maxin = sumin + in_deg, sumout + out_deg, max(maxin, in_deg) + if in_deg > 0: + stubheap.append((-1 * out_deg, -1 * in_deg, n)) + elif out_deg > 0: + zeroheap.append((-1 * out_deg, n)) + if sumin != sumout: + raise nx.NetworkXError( + "Invalid degree sequences. Sequences must have equal sums." + ) + heapq.heapify(stubheap) + heapq.heapify(zeroheap) + + modstubs = [(0, 0, 0)] * (maxin + 1) + # Successively reduce degree sequence by removing the maximum + while stubheap: + # Remove first value in the sequence with a non-zero in degree + (freeout, freein, target) = heapq.heappop(stubheap) + freein *= -1 + if freein > len(stubheap) + len(zeroheap): + raise nx.NetworkXError("Non-digraphical integer sequence") + + # Attach arcs from the nodes with the most stubs + mslen = 0 + for i in range(freein): + if zeroheap and (not stubheap or stubheap[0][0] > zeroheap[0][0]): + (stubout, stubsource) = heapq.heappop(zeroheap) + stubin = 0 + else: + (stubout, stubin, stubsource) = heapq.heappop(stubheap) + if stubout == 0: + raise nx.NetworkXError("Non-digraphical integer sequence") + G.add_edge(stubsource, target) + # Check if source is now totally connected + if stubout + 1 < 0 or stubin < 0: + modstubs[mslen] = (stubout + 1, stubin, stubsource) + mslen += 1 + + # Add the nodes back to the heaps that still have available stubs + for i in range(mslen): + stub = modstubs[i] + if stub[1] < 0: + heapq.heappush(stubheap, stub) + else: + heapq.heappush(zeroheap, (stub[0], stub[2])) + if freeout < 0: + heapq.heappush(zeroheap, (freeout, target)) + + return G + + +@nx._dispatchable(graphs=None, returns_graph=True) +def degree_sequence_tree(deg_sequence, create_using=None): + """Make a tree for the given degree sequence. + + A tree has #nodes-#edges=1 so + the degree sequence must have + len(deg_sequence)-sum(deg_sequence)/2=1 + """ + # The sum of the degree sequence must be even (for any undirected graph). + degree_sum = sum(deg_sequence) + if degree_sum % 2 != 0: + msg = "Invalid degree sequence: sum of degrees must be even, not odd" + raise nx.NetworkXError(msg) + if len(deg_sequence) - degree_sum // 2 != 1: + msg = ( + "Invalid degree sequence: tree must have number of nodes equal" + " to one less than the number of edges" + ) + raise nx.NetworkXError(msg) + G = nx.empty_graph(0, create_using) + if G.is_directed(): + raise nx.NetworkXError("Directed Graph not supported") + + # Sort all degrees greater than 1 in decreasing order. + # + # TODO Does this need to be sorted in reverse order? + deg = sorted((s for s in deg_sequence if s > 1), reverse=True) + + # make path graph as backbone + n = len(deg) + 2 + nx.add_path(G, range(n)) + last = n + + # add the leaves + for source in range(1, n - 1): + nedges = deg.pop() - 2 + for target in range(last, last + nedges): + G.add_edge(source, target) + last += nedges + + # in case we added one too many + if len(G) > len(deg_sequence): + G.remove_node(0) + return G + + +@py_random_state(1) +@nx._dispatchable(graphs=None, returns_graph=True) +def random_degree_sequence_graph(sequence, seed=None, tries=10): + r"""Returns a simple random graph with the given degree sequence. + + If the maximum degree $d_m$ in the sequence is $O(m^{1/4})$ then the + algorithm produces almost uniform random graphs in $O(m d_m)$ time + where $m$ is the number of edges. + + Parameters + ---------- + sequence : list of integers + Sequence of degrees + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + tries : int, optional + Maximum number of tries to create a graph + + Returns + ------- + G : Graph + A graph with the specified degree sequence. + Nodes are labeled starting at 0 with an index + corresponding to the position in the sequence. + + Raises + ------ + NetworkXUnfeasible + If the degree sequence is not graphical. + NetworkXError + If a graph is not produced in specified number of tries + + See Also + -------- + is_graphical, configuration_model + + Notes + ----- + The generator algorithm [1]_ is not guaranteed to produce a graph. + + References + ---------- + .. [1] Moshen Bayati, Jeong Han Kim, and Amin Saberi, + A sequential algorithm for generating random graphs. + Algorithmica, Volume 58, Number 4, 860-910, + DOI: 10.1007/s00453-009-9340-1 + + Examples + -------- + >>> sequence = [1, 2, 2, 3] + >>> G = nx.random_degree_sequence_graph(sequence, seed=42) + >>> sorted(d for n, d in G.degree()) + [1, 2, 2, 3] + """ + DSRG = DegreeSequenceRandomGraph(sequence, seed) + for try_n in range(tries): + try: + return DSRG.generate() + except nx.NetworkXUnfeasible: + pass + raise nx.NetworkXError(f"failed to generate graph in {tries} tries") + + +class DegreeSequenceRandomGraph: + # class to generate random graphs with a given degree sequence + # use random_degree_sequence_graph() + def __init__(self, degree, rng): + self.rng = rng + self.degree = list(degree) + if not nx.is_graphical(self.degree): + raise nx.NetworkXUnfeasible("degree sequence is not graphical") + # node labels are integers 0,...,n-1 + self.m = sum(self.degree) / 2.0 # number of edges + try: + self.dmax = max(self.degree) # maximum degree + except ValueError: + self.dmax = 0 + + def generate(self): + # remaining_degree is mapping from int->remaining degree + self.remaining_degree = dict(enumerate(self.degree)) + # add all nodes to make sure we get isolated nodes + self.graph = nx.Graph() + self.graph.add_nodes_from(self.remaining_degree) + # remove zero degree nodes + for n, d in list(self.remaining_degree.items()): + if d == 0: + del self.remaining_degree[n] + if len(self.remaining_degree) > 0: + # build graph in three phases according to how many unmatched edges + self.phase1() + self.phase2() + self.phase3() + return self.graph + + def update_remaining(self, u, v, aux_graph=None): + # decrement remaining nodes, modify auxiliary graph if in phase3 + if aux_graph is not None: + # remove edges from auxiliary graph + aux_graph.remove_edge(u, v) + if self.remaining_degree[u] == 1: + del self.remaining_degree[u] + if aux_graph is not None: + aux_graph.remove_node(u) + else: + self.remaining_degree[u] -= 1 + if self.remaining_degree[v] == 1: + del self.remaining_degree[v] + if aux_graph is not None: + aux_graph.remove_node(v) + else: + self.remaining_degree[v] -= 1 + + def p(self, u, v): + # degree probability + return 1 - self.degree[u] * self.degree[v] / (4.0 * self.m) + + def q(self, u, v): + # remaining degree probability + norm = max(self.remaining_degree.values()) ** 2 + return self.remaining_degree[u] * self.remaining_degree[v] / norm + + def suitable_edge(self): + """Returns True if and only if an arbitrary remaining node can + potentially be joined with some other remaining node. + + """ + nodes = iter(self.remaining_degree) + u = next(nodes) + return any(v not in self.graph[u] for v in nodes) + + def phase1(self): + # choose node pairs from (degree) weighted distribution + rem_deg = self.remaining_degree + while sum(rem_deg.values()) >= 2 * self.dmax**2: + u, v = sorted(random_weighted_sample(rem_deg, 2, self.rng)) + if self.graph.has_edge(u, v): + continue + if self.rng.random() < self.p(u, v): # accept edge + self.graph.add_edge(u, v) + self.update_remaining(u, v) + + def phase2(self): + # choose remaining nodes uniformly at random and use rejection sampling + remaining_deg = self.remaining_degree + rng = self.rng + while len(remaining_deg) >= 2 * self.dmax: + while True: + u, v = sorted(rng.sample(list(remaining_deg.keys()), 2)) + if self.graph.has_edge(u, v): + continue + if rng.random() < self.q(u, v): + break + if rng.random() < self.p(u, v): # accept edge + self.graph.add_edge(u, v) + self.update_remaining(u, v) + + def phase3(self): + # build potential remaining edges and choose with rejection sampling + potential_edges = combinations(self.remaining_degree, 2) + # build auxiliary graph of potential edges not already in graph + H = nx.Graph( + [(u, v) for (u, v) in potential_edges if not self.graph.has_edge(u, v)] + ) + rng = self.rng + while self.remaining_degree: + if not self.suitable_edge(): + raise nx.NetworkXUnfeasible("no suitable edges left") + while True: + u, v = sorted(rng.choice(list(H.edges()))) + if rng.random() < self.q(u, v): + break + if rng.random() < self.p(u, v): # accept edge + self.graph.add_edge(u, v) + self.update_remaining(u, v, aux_graph=H) diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/directed.py b/.venv/lib/python3.12/site-packages/networkx/generators/directed.py new file mode 100644 index 0000000..0b2b5c3 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/generators/directed.py @@ -0,0 +1,572 @@ +""" +Generators for some directed graphs, including growing network (GN) graphs and +scale-free graphs. + +""" + +import numbers +from collections import Counter + +import networkx as nx +from networkx.generators.classic import empty_graph +from networkx.utils import ( + discrete_sequence, + np_random_state, + py_random_state, + weighted_choice, +) + +__all__ = [ + "gn_graph", + "gnc_graph", + "gnr_graph", + "random_k_out_graph", + "scale_free_graph", +] + + +@py_random_state(3) +@nx._dispatchable(graphs=None, returns_graph=True) +def gn_graph(n, kernel=None, create_using=None, seed=None): + """Returns the growing network (GN) digraph with `n` nodes. + + The GN graph is built by adding nodes one at a time with a link to one + previously added node. The target node for the link is chosen with + probability based on degree. The default attachment kernel is a linear + function of the degree of a node. + + The graph is always a (directed) tree. + + Parameters + ---------- + n : int + The number of nodes for the generated graph. + kernel : function + The attachment kernel. + create_using : NetworkX graph constructor, optional (default DiGraph) + Graph type to create. If graph instance, then cleared before populated. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Examples + -------- + To create the undirected GN graph, use the :meth:`~DiGraph.to_directed` + method:: + + >>> D = nx.gn_graph(10) # the GN graph + >>> G = D.to_undirected() # the undirected version + + To specify an attachment kernel, use the `kernel` keyword argument:: + + >>> D = nx.gn_graph(10, kernel=lambda x: x**1.5) # A_k = k^1.5 + + References + ---------- + .. [1] P. L. Krapivsky and S. Redner, + Organization of Growing Random Networks, + Phys. Rev. E, 63, 066123, 2001. + """ + G = empty_graph(1, create_using, default=nx.DiGraph) + if not G.is_directed(): + raise nx.NetworkXError("create_using must indicate a Directed Graph") + + if kernel is None: + + def kernel(x): + return x + + if n == 1: + return G + + G.add_edge(1, 0) # get started + ds = [1, 1] # degree sequence + + for source in range(2, n): + # compute distribution from kernel and degree + dist = [kernel(d) for d in ds] + # choose target from discrete distribution + target = discrete_sequence(1, distribution=dist, seed=seed)[0] + G.add_edge(source, target) + ds.append(1) # the source has only one link (degree one) + ds[target] += 1 # add one to the target link degree + return G + + +@py_random_state(3) +@nx._dispatchable(graphs=None, returns_graph=True) +def gnr_graph(n, p, create_using=None, seed=None): + """Returns the growing network with redirection (GNR) digraph with `n` + nodes and redirection probability `p`. + + The GNR graph is built by adding nodes one at a time with a link to one + previously added node. The previous target node is chosen uniformly at + random. With probability `p` the link is instead "redirected" to the + successor node of the target. + + The graph is always a (directed) tree. + + Parameters + ---------- + n : int + The number of nodes for the generated graph. + p : float + The redirection probability. + create_using : NetworkX graph constructor, optional (default DiGraph) + Graph type to create. If graph instance, then cleared before populated. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Examples + -------- + To create the undirected GNR graph, use the :meth:`~DiGraph.to_directed` + method:: + + >>> D = nx.gnr_graph(10, 0.5) # the GNR graph + >>> G = D.to_undirected() # the undirected version + + References + ---------- + .. [1] P. L. Krapivsky and S. Redner, + Organization of Growing Random Networks, + Phys. Rev. E, 63, 066123, 2001. + """ + G = empty_graph(1, create_using, default=nx.DiGraph) + if not G.is_directed(): + raise nx.NetworkXError("create_using must indicate a Directed Graph") + + if n == 1: + return G + + for source in range(1, n): + target = seed.randrange(0, source) + if seed.random() < p and target != 0: + target = next(G.successors(target)) + G.add_edge(source, target) + return G + + +@py_random_state(2) +@nx._dispatchable(graphs=None, returns_graph=True) +def gnc_graph(n, create_using=None, seed=None): + """Returns the growing network with copying (GNC) digraph with `n` nodes. + + The GNC graph is built by adding nodes one at a time with a link to one + previously added node (chosen uniformly at random) and to all of that + node's successors. + + Parameters + ---------- + n : int + The number of nodes for the generated graph. + create_using : NetworkX graph constructor, optional (default DiGraph) + Graph type to create. If graph instance, then cleared before populated. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + References + ---------- + .. [1] P. L. Krapivsky and S. Redner, + Network Growth by Copying, + Phys. Rev. E, 71, 036118, 2005k.}, + """ + G = empty_graph(1, create_using, default=nx.DiGraph) + if not G.is_directed(): + raise nx.NetworkXError("create_using must indicate a Directed Graph") + + if n == 1: + return G + + for source in range(1, n): + target = seed.randrange(0, source) + for succ in G.successors(target): + G.add_edge(source, succ) + G.add_edge(source, target) + return G + + +@py_random_state(6) +@nx._dispatchable(graphs=None, returns_graph=True) +def scale_free_graph( + n, + alpha=0.41, + beta=0.54, + gamma=0.05, + delta_in=0.2, + delta_out=0, + seed=None, + initial_graph=None, +): + """Returns a scale-free directed graph. + + Parameters + ---------- + n : integer + Number of nodes in graph + alpha : float + Probability for adding a new node connected to an existing node + chosen randomly according to the in-degree distribution. + beta : float + Probability for adding an edge between two existing nodes. + One existing node is chosen randomly according the in-degree + distribution and the other chosen randomly according to the out-degree + distribution. + gamma : float + Probability for adding a new node connected to an existing node + chosen randomly according to the out-degree distribution. + delta_in : float + Bias for choosing nodes from in-degree distribution. + delta_out : float + Bias for choosing nodes from out-degree distribution. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + initial_graph : MultiDiGraph instance, optional + Build the scale-free graph starting from this initial MultiDiGraph, + if provided. + + Returns + ------- + MultiDiGraph + + Examples + -------- + Create a scale-free graph on one hundred nodes:: + + >>> G = nx.scale_free_graph(100) + + Notes + ----- + The sum of `alpha`, `beta`, and `gamma` must be 1. + + References + ---------- + .. [1] B. Bollobás, C. Borgs, J. Chayes, and O. Riordan, + Directed scale-free graphs, + Proceedings of the fourteenth annual ACM-SIAM Symposium on + Discrete Algorithms, 132--139, 2003. + """ + + def _choose_node(candidates, node_list, delta): + if delta > 0: + bias_sum = len(node_list) * delta + p_delta = bias_sum / (bias_sum + len(candidates)) + if seed.random() < p_delta: + return seed.choice(node_list) + return seed.choice(candidates) + + if initial_graph is not None and hasattr(initial_graph, "_adj"): + if not isinstance(initial_graph, nx.MultiDiGraph): + raise nx.NetworkXError("initial_graph must be a MultiDiGraph.") + G = initial_graph + else: + # Start with 3-cycle + G = nx.MultiDiGraph([(0, 1), (1, 2), (2, 0)]) + + if alpha <= 0: + raise ValueError("alpha must be > 0.") + if beta <= 0: + raise ValueError("beta must be > 0.") + if gamma <= 0: + raise ValueError("gamma must be > 0.") + + if abs(alpha + beta + gamma - 1.0) >= 1e-9: + raise ValueError("alpha+beta+gamma must equal 1.") + + if delta_in < 0: + raise ValueError("delta_in must be >= 0.") + + if delta_out < 0: + raise ValueError("delta_out must be >= 0.") + + # pre-populate degree states + vs = sum((count * [idx] for idx, count in G.out_degree()), []) + ws = sum((count * [idx] for idx, count in G.in_degree()), []) + + # pre-populate node state + node_list = list(G.nodes()) + + # see if there already are number-based nodes + numeric_nodes = [n for n in node_list if isinstance(n, numbers.Number)] + if len(numeric_nodes) > 0: + # set cursor for new nodes appropriately + cursor = max(int(n.real) for n in numeric_nodes) + 1 + else: + # or start at zero + cursor = 0 + + while len(G) < n: + r = seed.random() + + # random choice in alpha,beta,gamma ranges + if r < alpha: + # alpha + # add new node v + v = cursor + cursor += 1 + # also add to node state + node_list.append(v) + # choose w according to in-degree and delta_in + w = _choose_node(ws, node_list, delta_in) + + elif r < alpha + beta: + # beta + # choose v according to out-degree and delta_out + v = _choose_node(vs, node_list, delta_out) + # choose w according to in-degree and delta_in + w = _choose_node(ws, node_list, delta_in) + + else: + # gamma + # choose v according to out-degree and delta_out + v = _choose_node(vs, node_list, delta_out) + # add new node w + w = cursor + cursor += 1 + # also add to node state + node_list.append(w) + + # add edge to graph + G.add_edge(v, w) + + # update degree states + vs.append(v) + ws.append(w) + + return G + + +@py_random_state(4) +@nx._dispatchable(graphs=None, returns_graph=True) +def random_uniform_k_out_graph(n, k, self_loops=True, with_replacement=True, seed=None): + """Returns a random `k`-out graph with uniform attachment. + + A random `k`-out graph with uniform attachment is a multidigraph + generated by the following algorithm. For each node *u*, choose + `k` nodes *v* uniformly at random (with replacement). Add a + directed edge joining *u* to *v*. + + Parameters + ---------- + n : int + The number of nodes in the returned graph. + + k : int + The out-degree of each node in the returned graph. + + self_loops : bool + If True, self-loops are allowed when generating the graph. + + with_replacement : bool + If True, neighbors are chosen with replacement and the + returned graph will be a directed multigraph. Otherwise, + neighbors are chosen without replacement and the returned graph + will be a directed graph. + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + NetworkX graph + A `k`-out-regular directed graph generated according to the + above algorithm. It will be a multigraph if and only if + `with_replacement` is True. + + Raises + ------ + ValueError + If `with_replacement` is False and `k` is greater than + `n`. + + See also + -------- + random_k_out_graph + + Notes + ----- + The return digraph or multidigraph may not be strongly connected, or + even weakly connected. + + If `with_replacement` is True, this function is similar to + :func:`random_k_out_graph`, if that function had parameter `alpha` + set to positive infinity. + + """ + if with_replacement: + create_using = nx.MultiDiGraph() + + def sample(v, nodes): + if not self_loops: + nodes = nodes - {v} + return (seed.choice(list(nodes)) for i in range(k)) + + else: + create_using = nx.DiGraph() + + def sample(v, nodes): + if not self_loops: + nodes = nodes - {v} + return seed.sample(list(nodes), k) + + G = nx.empty_graph(n, create_using) + nodes = set(G) + for u in G: + G.add_edges_from((u, v) for v in sample(u, nodes)) + return G + + +@nx._dispatchable(graphs=None, returns_graph=True) +def random_k_out_graph(n, k, alpha, self_loops=True, seed=None): + """Returns a random `k`-out graph with preferential attachment. + + .. versionchanged:: 3.5 + Different implementations will be used based on whether NumPy is + available. See Notes for details. + + A random `k`-out graph with preferential attachment is a + multidigraph generated by the following algorithm. + + 1. Begin with an empty digraph, and initially set each node to have + weight `alpha`. + 2. Choose a node `u` with out-degree less than `k` uniformly at + random. + 3. Choose a node `v` from with probability proportional to its + weight. + 4. Add a directed edge from `u` to `v`, and increase the weight + of `v` by one. + 5. If each node has out-degree `k`, halt, otherwise repeat from + step 2. + + For more information on this model of random graph, see [1]_. + + Parameters + ---------- + n : int + The number of nodes in the returned graph. + + k : int + The out-degree of each node in the returned graph. + + alpha : float + A positive :class:`float` representing the initial weight of + each vertex. A higher number means that in step 3 above, nodes + will be chosen more like a true uniformly random sample, and a + lower number means that nodes are more likely to be chosen as + their in-degree increases. If this parameter is not positive, a + :exc:`ValueError` is raised. + + self_loops : bool + If True, self-loops are allowed when generating the graph. + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + :class:`~networkx.classes.MultiDiGraph` + A `k`-out-regular multidigraph generated according to the above + algorithm. + + Raises + ------ + ValueError + If `alpha` is not positive. + + Notes + ----- + The returned multidigraph may not be strongly connected, or even + weakly connected. + + `random_k_out_graph` has two implementations: an array-based formulation that + uses `numpy` (``_random_k_out_graph_numpy``), and a pure-Python + implementation (``_random_k_out_graph_python``). + The NumPy implementation is more performant, especially for large `n`, and is + therefore used by default. If NumPy is not installed in the environment, + then the pure Python implementation is executed. + However, you can explicitly control which implementation is executed by directly + calling the corresponding function:: + + # Use numpy if available, else Python + nx.random_k_out_graph(1000, 5, alpha=1) + + # Use the numpy-based implementation (raises ImportError if numpy not installed) + nx.generators.directed._random_k_out_graph_numpy(1000, 5, alpha=1) + + # Use the Python-based implementation + nx.generators.directed._random_k_out_graph_python(1000, 5, alpha=1) + + References + ---------- + .. [1] Peterson, Nicholas R., and Boris Pittel. + "Distance between two random `k`-out digraphs, with and without preferential attachment." + arXiv preprint arXiv:1311.5961 (2013) . + + """ + if alpha < 0: + raise ValueError("alpha must be positive") + try: # Use numpy if available, otherwise fall back to pure Python implementation + return _random_k_out_graph_numpy(n, k, alpha, self_loops, seed) + except ImportError: + return _random_k_out_graph_python(n, k, alpha, self_loops, seed) + + +@np_random_state(4) +def _random_k_out_graph_numpy(n, k, alpha, self_loops=True, seed=None): + import numpy as np + + G = nx.empty_graph(n, create_using=nx.MultiDiGraph) + nodes = np.arange(n) + remaining_mask = np.full(n, True) + weights = np.full(n, alpha) + total_weight = n * alpha + out_strengths = np.zeros(n) + + for i in range(k * n): + u = seed.choice(nodes[remaining_mask]) + + if self_loops: + v = seed.choice(nodes, p=weights / total_weight) + else: # Ignore weight of u when selecting v + u_weight = weights[u] + weights[u] = 0 + v = seed.choice(nodes, p=weights / (total_weight - u_weight)) + weights[u] = u_weight + + G.add_edge(u, v) + weights[v] += 1 + total_weight += 1 + out_strengths[u] += 1 + if out_strengths[u] == k: + remaining_mask[u] = False + return G + + +@py_random_state(4) +def _random_k_out_graph_python(n, k, alpha, self_loops=True, seed=None): + G = nx.empty_graph(n, create_using=nx.MultiDiGraph) + weights = Counter(dict.fromkeys(G, alpha)) + out_strengths = Counter(dict.fromkeys(G, 0)) + + for i in range(k * n): + u = seed.choice(list(out_strengths.keys())) + # If self-loops are not allowed, make the source node `u` have + # weight zero. + if not self_loops: + uweight = weights.pop(u) + + v = weighted_choice(weights, seed=seed) + + if not self_loops: + weights[u] = uweight + + G.add_edge(u, v) + weights[v] += 1 + out_strengths[u] += 1 + if out_strengths[u] == k: + out_strengths.pop(u) + return G diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/duplication.py b/.venv/lib/python3.12/site-packages/networkx/generators/duplication.py new file mode 100644 index 0000000..3c3ade6 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/generators/duplication.py @@ -0,0 +1,174 @@ +"""Functions for generating graphs based on the "duplication" method. + +These graph generators start with a small initial graph then duplicate +nodes and (partially) duplicate their edges. These functions are +generally inspired by biological networks. + +""" + +import networkx as nx +from networkx.exception import NetworkXError +from networkx.utils import py_random_state +from networkx.utils.misc import check_create_using + +__all__ = ["partial_duplication_graph", "duplication_divergence_graph"] + + +@py_random_state(4) +@nx._dispatchable(graphs=None, returns_graph=True) +def partial_duplication_graph(N, n, p, q, seed=None, *, create_using=None): + """Returns a random graph using the partial duplication model. + + Parameters + ---------- + N : int + The total number of nodes in the final graph. + + n : int + The number of nodes in the initial clique. + + p : float + The probability of joining each neighbor of a node to the + duplicate node. Must be a number in the between zero and one, + inclusive. + + q : float + The probability of joining the source node to the duplicate + node. Must be a number in the between zero and one, inclusive. + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + create_using : Graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + Multigraph and directed types are not supported and raise a ``NetworkXError``. + + Notes + ----- + A graph of nodes is grown by creating a fully connected graph + of size `n`. The following procedure is then repeated until + a total of `N` nodes have been reached. + + 1. A random node, *u*, is picked and a new node, *v*, is created. + 2. For each neighbor of *u* an edge from the neighbor to *v* is created + with probability `p`. + 3. An edge from *u* to *v* is created with probability `q`. + + This algorithm appears in [1]. + + This implementation allows the possibility of generating + disconnected graphs. + + References + ---------- + .. [1] Knudsen Michael, and Carsten Wiuf. "A Markov chain approach to + randomly grown graphs." Journal of Applied Mathematics 2008. + + + """ + create_using = check_create_using(create_using, directed=False, multigraph=False) + if p < 0 or p > 1 or q < 0 or q > 1: + msg = "partial duplication graph must have 0 <= p, q <= 1." + raise NetworkXError(msg) + if n > N: + raise NetworkXError("partial duplication graph must have n <= N.") + + G = nx.complete_graph(n, create_using) + for new_node in range(n, N): + # Pick a random vertex, u, already in the graph. + src_node = seed.randint(0, new_node - 1) + + # Add a new vertex, v, to the graph. + G.add_node(new_node) + + # For each neighbor of u... + for nbr_node in list(nx.all_neighbors(G, src_node)): + # Add the neighbor to v with probability p. + if seed.random() < p: + G.add_edge(new_node, nbr_node) + + # Join v and u with probability q. + if seed.random() < q: + G.add_edge(new_node, src_node) + return G + + +@py_random_state(2) +@nx._dispatchable(graphs=None, returns_graph=True) +def duplication_divergence_graph(n, p, seed=None, *, create_using=None): + """Returns an undirected graph using the duplication-divergence model. + + A graph of `n` nodes is created by duplicating the initial nodes + and retaining edges incident to the original nodes with a retention + probability `p`. + + Parameters + ---------- + n : int + The desired number of nodes in the graph. + p : float + The probability for retaining the edge of the replicated node. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + create_using : Graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + Multigraph and directed types are not supported and raise a ``NetworkXError``. + + Returns + ------- + G : Graph + + Raises + ------ + NetworkXError + If `p` is not a valid probability. + If `n` is less than 2. + + Notes + ----- + This algorithm appears in [1]. + + This implementation disallows the possibility of generating + disconnected graphs. + + References + ---------- + .. [1] I. Ispolatov, P. L. Krapivsky, A. Yuryev, + "Duplication-divergence model of protein interaction network", + Phys. Rev. E, 71, 061911, 2005. + + """ + if p > 1 or p < 0: + msg = f"NetworkXError p={p} is not in [0,1]." + raise nx.NetworkXError(msg) + if n < 2: + msg = "n must be greater than or equal to 2" + raise nx.NetworkXError(msg) + + create_using = check_create_using(create_using, directed=False, multigraph=False) + G = nx.empty_graph(create_using=create_using) + + # Initialize the graph with two connected nodes. + G.add_edge(0, 1) + i = 2 + while i < n: + # Choose a random node from current graph to duplicate. + random_node = seed.choice(list(G)) + # Make the replica. + G.add_node(i) + # flag indicates whether at least one edge is connected on the replica. + flag = False + for nbr in G.neighbors(random_node): + if seed.random() < p: + # Link retention step. + G.add_edge(i, nbr) + flag = True + if not flag: + # Delete replica if no edges retained. + G.remove_node(i) + else: + # Successful duplication. + i += 1 + return G diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/ego.py b/.venv/lib/python3.12/site-packages/networkx/generators/ego.py new file mode 100644 index 0000000..91ff3e3 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/generators/ego.py @@ -0,0 +1,66 @@ +""" +Ego graph. +""" + +__all__ = ["ego_graph"] + +import networkx as nx + + +@nx._dispatchable(preserve_all_attrs=True, returns_graph=True) +def ego_graph(G, n, radius=1, center=True, undirected=False, distance=None): + """Returns induced subgraph of neighbors centered at node n within + a given radius. + + Parameters + ---------- + G : graph + A NetworkX Graph or DiGraph + + n : node + A single node + + radius : number, optional + Include all neighbors of distance<=radius from n. + + center : bool, optional + If False, do not include center node in graph + + undirected : bool, optional + If True use both in- and out-neighbors of directed graphs. + + distance : key, optional + Use specified edge data key as distance. For example, setting + distance='weight' will use the edge weight to measure the + distance from the node n. + + Notes + ----- + For directed graphs D this produces the "out" neighborhood + or successors. If you want the neighborhood of predecessors + first reverse the graph with D.reverse(). If you want both + directions use the keyword argument undirected=True. + + Node, edge, and graph attributes are copied to the returned subgraph. + """ + if undirected: + if distance is not None: + sp, _ = nx.single_source_dijkstra( + G.to_undirected(), n, cutoff=radius, weight=distance + ) + else: + sp = dict( + nx.single_source_shortest_path_length( + G.to_undirected(), n, cutoff=radius + ) + ) + else: + if distance is not None: + sp, _ = nx.single_source_dijkstra(G, n, cutoff=radius, weight=distance) + else: + sp = nx.single_source_shortest_path_length(G, n, cutoff=radius) + + H = G.subgraph(sp).copy() + if not center: + H.remove_node(n) + return H diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/expanders.py b/.venv/lib/python3.12/site-packages/networkx/generators/expanders.py new file mode 100644 index 0000000..65db764 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/generators/expanders.py @@ -0,0 +1,476 @@ +"""Provides explicit constructions of expander graphs.""" + +import itertools + +import networkx as nx + +__all__ = [ + "margulis_gabber_galil_graph", + "chordal_cycle_graph", + "paley_graph", + "maybe_regular_expander", + "is_regular_expander", + "random_regular_expander_graph", +] + + +# Other discrete torus expanders can be constructed by using the following edge +# sets. For more information, see Chapter 4, "Expander Graphs", in +# "Pseudorandomness", by Salil Vadhan. +# +# For a directed expander, add edges from (x, y) to: +# +# (x, y), +# ((x + 1) % n, y), +# (x, (y + 1) % n), +# (x, (x + y) % n), +# (-y % n, x) +# +# For an undirected expander, add the reverse edges. +# +# Also appearing in the paper of Gabber and Galil: +# +# (x, y), +# (x, (x + y) % n), +# (x, (x + y + 1) % n), +# ((x + y) % n, y), +# ((x + y + 1) % n, y) +# +# and: +# +# (x, y), +# ((x + 2*y) % n, y), +# ((x + (2*y + 1)) % n, y), +# ((x + (2*y + 2)) % n, y), +# (x, (y + 2*x) % n), +# (x, (y + (2*x + 1)) % n), +# (x, (y + (2*x + 2)) % n), +# +@nx._dispatchable(graphs=None, returns_graph=True) +def margulis_gabber_galil_graph(n, create_using=None): + r"""Returns the Margulis-Gabber-Galil undirected MultiGraph on `n^2` nodes. + + The undirected MultiGraph is regular with degree `8`. Nodes are integer + pairs. The second-largest eigenvalue of the adjacency matrix of the graph + is at most `5 \sqrt{2}`, regardless of `n`. + + Parameters + ---------- + n : int + Determines the number of nodes in the graph: `n^2`. + create_using : NetworkX graph constructor, optional (default MultiGraph) + Graph type to create. If graph instance, then cleared before populated. + + Returns + ------- + G : graph + The constructed undirected multigraph. + + Raises + ------ + NetworkXError + If the graph is directed or not a multigraph. + + """ + G = nx.empty_graph(0, create_using, default=nx.MultiGraph) + if G.is_directed() or not G.is_multigraph(): + msg = "`create_using` must be an undirected multigraph." + raise nx.NetworkXError(msg) + + for x, y in itertools.product(range(n), repeat=2): + for u, v in ( + ((x + 2 * y) % n, y), + ((x + (2 * y + 1)) % n, y), + (x, (y + 2 * x) % n), + (x, (y + (2 * x + 1)) % n), + ): + G.add_edge((x, y), (u, v)) + G.graph["name"] = f"margulis_gabber_galil_graph({n})" + return G + + +@nx._dispatchable(graphs=None, returns_graph=True) +def chordal_cycle_graph(p, create_using=None): + """Returns the chordal cycle graph on `p` nodes. + + The returned graph is a cycle graph on `p` nodes with chords joining each + vertex `x` to its inverse modulo `p`. This graph is a (mildly explicit) + 3-regular expander [1]_. + + `p` *must* be a prime number. + + Parameters + ---------- + p : a prime number + + The number of vertices in the graph. This also indicates where the + chordal edges in the cycle will be created. + + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + Returns + ------- + G : graph + The constructed undirected multigraph. + + Raises + ------ + NetworkXError + + If `create_using` indicates directed or not a multigraph. + + References + ---------- + + .. [1] Theorem 4.4.2 in A. Lubotzky. "Discrete groups, expanding graphs and + invariant measures", volume 125 of Progress in Mathematics. + Birkhäuser Verlag, Basel, 1994. + + """ + G = nx.empty_graph(0, create_using, default=nx.MultiGraph) + if G.is_directed() or not G.is_multigraph(): + msg = "`create_using` must be an undirected multigraph." + raise nx.NetworkXError(msg) + + for x in range(p): + left = (x - 1) % p + right = (x + 1) % p + # Here we apply Fermat's Little Theorem to compute the multiplicative + # inverse of x in Z/pZ. By Fermat's Little Theorem, + # + # x^p = x (mod p) + # + # Therefore, + # + # x * x^(p - 2) = 1 (mod p) + # + # The number 0 is a special case: we just let its inverse be itself. + chord = pow(x, p - 2, p) if x > 0 else 0 + for y in (left, right, chord): + G.add_edge(x, y) + G.graph["name"] = f"chordal_cycle_graph({p})" + return G + + +@nx._dispatchable(graphs=None, returns_graph=True) +def paley_graph(p, create_using=None): + r"""Returns the Paley $\frac{(p-1)}{2}$ -regular graph on $p$ nodes. + + The returned graph is a graph on $\mathbb{Z}/p\mathbb{Z}$ with edges between $x$ and $y$ + if and only if $x-y$ is a nonzero square in $\mathbb{Z}/p\mathbb{Z}$. + + If $p \equiv 1 \pmod 4$, $-1$ is a square in + $\mathbb{Z}/p\mathbb{Z}$ and therefore $x-y$ is a square if and + only if $y-x$ is also a square, i.e the edges in the Paley graph are symmetric. + + If $p \equiv 3 \pmod 4$, $-1$ is not a square in $\mathbb{Z}/p\mathbb{Z}$ + and therefore either $x-y$ or $y-x$ is a square in $\mathbb{Z}/p\mathbb{Z}$ but not both. + + Note that a more general definition of Paley graphs extends this construction + to graphs over $q=p^n$ vertices, by using the finite field $F_q$ instead of + $\mathbb{Z}/p\mathbb{Z}$. + This construction requires to compute squares in general finite fields and is + not what is implemented here (i.e `paley_graph(25)` does not return the true + Paley graph associated with $5^2$). + + Parameters + ---------- + p : int, an odd prime number. + + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + Returns + ------- + G : graph + The constructed directed graph. + + Raises + ------ + NetworkXError + If the graph is a multigraph. + + References + ---------- + Chapter 13 in B. Bollobas, Random Graphs. Second edition. + Cambridge Studies in Advanced Mathematics, 73. + Cambridge University Press, Cambridge (2001). + """ + G = nx.empty_graph(0, create_using, default=nx.DiGraph) + if G.is_multigraph(): + msg = "`create_using` cannot be a multigraph." + raise nx.NetworkXError(msg) + + # Compute the squares in Z/pZ. + # Make it a set to uniquify (there are exactly (p-1)/2 squares in Z/pZ + # when is prime). + square_set = {(x**2) % p for x in range(1, p) if (x**2) % p != 0} + + for x in range(p): + for x2 in square_set: + G.add_edge(x, (x + x2) % p) + G.graph["name"] = f"paley({p})" + return G + + +@nx.utils.decorators.np_random_state("seed") +@nx._dispatchable(graphs=None, returns_graph=True) +def maybe_regular_expander(n, d, *, create_using=None, max_tries=100, seed=None): + r"""Utility for creating a random regular expander. + + Returns a random $d$-regular graph on $n$ nodes which is an expander + graph with very good probability. + + Parameters + ---------- + n : int + The number of nodes. + d : int + The degree of each node. + create_using : Graph Instance or Constructor + Indicator of type of graph to return. + If a Graph-type instance, then clear and use it. + If a constructor, call it to create an empty graph. + Use the Graph constructor by default. + max_tries : int. (default: 100) + The number of allowed loops when generating each independent cycle + seed : (default: None) + Seed used to set random number generation state. See :ref`Randomness`. + + Notes + ----- + The nodes are numbered from $0$ to $n - 1$. + + The graph is generated by taking $d / 2$ random independent cycles. + + Joel Friedman proved that in this model the resulting + graph is an expander with probability + $1 - O(n^{-\tau})$ where $\tau = \lceil (\sqrt{d - 1}) / 2 \rceil - 1$. [1]_ + + Examples + -------- + >>> G = nx.maybe_regular_expander(n=200, d=6, seed=8020) + + Returns + ------- + G : graph + The constructed undirected graph. + + Raises + ------ + NetworkXError + If $d % 2 != 0$ as the degree must be even. + If $n - 1$ is less than $ 2d $ as the graph is complete at most. + If max_tries is reached + + See Also + -------- + is_regular_expander + random_regular_expander_graph + + References + ---------- + .. [1] Joel Friedman, + A Proof of Alon's Second Eigenvalue Conjecture and Related Problems, 2004 + https://arxiv.org/abs/cs/0405020 + + """ + + import numpy as np + + if n < 1: + raise nx.NetworkXError("n must be a positive integer") + + if not (d >= 2): + raise nx.NetworkXError("d must be greater than or equal to 2") + + if not (d % 2 == 0): + raise nx.NetworkXError("d must be even") + + if not (n - 1 >= d): + raise nx.NetworkXError( + f"Need n-1>= d to have room for {d // 2} independent cycles with {n} nodes" + ) + + G = nx.empty_graph(n, create_using) + + if n < 2: + return G + + cycles = [] + edges = set() + + # Create d / 2 cycles + for i in range(d // 2): + iterations = max_tries + # Make sure the cycles are independent to have a regular graph + while len(edges) != (i + 1) * n: + iterations -= 1 + # Faster than random.permutation(n) since there are only + # (n-1)! distinct cycles against n! permutations of size n + cycle = seed.permutation(n - 1).tolist() + cycle.append(n - 1) + + new_edges = { + (u, v) + for u, v in nx.utils.pairwise(cycle, cyclic=True) + if (u, v) not in edges and (v, u) not in edges + } + # If the new cycle has no edges in common with previous cycles + # then add it to the list otherwise try again + if len(new_edges) == n: + cycles.append(cycle) + edges.update(new_edges) + + if iterations == 0: + raise nx.NetworkXError("Too many iterations in maybe_regular_expander") + + G.add_edges_from(edges) + + return G + + +@nx.utils.not_implemented_for("directed") +@nx.utils.not_implemented_for("multigraph") +@nx._dispatchable(preserve_edge_attrs={"G": {"weight": 1}}) +def is_regular_expander(G, *, epsilon=0): + r"""Determines whether the graph G is a regular expander. [1]_ + + An expander graph is a sparse graph with strong connectivity properties. + + More precisely, this helper checks whether the graph is a + regular $(n, d, \lambda)$-expander with $\lambda$ close to + the Alon-Boppana bound and given by + $\lambda = 2 \sqrt{d - 1} + \epsilon$. [2]_ + + In the case where $\epsilon = 0$ then if the graph successfully passes the test + it is a Ramanujan graph. [3]_ + + A Ramanujan graph has spectral gap almost as large as possible, which makes them + excellent expanders. + + Parameters + ---------- + G : NetworkX graph + epsilon : int, float, default=0 + + Returns + ------- + bool + Whether the given graph is a regular $(n, d, \lambda)$-expander + where $\lambda = 2 \sqrt{d - 1} + \epsilon$. + + Examples + -------- + >>> G = nx.random_regular_expander_graph(20, 4) + >>> nx.is_regular_expander(G) + True + + See Also + -------- + maybe_regular_expander + random_regular_expander_graph + + References + ---------- + .. [1] Expander graph, https://en.wikipedia.org/wiki/Expander_graph + .. [2] Alon-Boppana bound, https://en.wikipedia.org/wiki/Alon%E2%80%93Boppana_bound + .. [3] Ramanujan graphs, https://en.wikipedia.org/wiki/Ramanujan_graph + + """ + + import numpy as np + import scipy as sp + + if epsilon < 0: + raise nx.NetworkXError("epsilon must be non negative") + + if not nx.is_regular(G): + return False + + _, d = nx.utils.arbitrary_element(G.degree) + + A = nx.adjacency_matrix(G, dtype=float) + lams = sp.sparse.linalg.eigsh(A, which="LM", k=2, return_eigenvectors=False) + + # lambda2 is the second biggest eigenvalue + lambda2 = min(lams) + + # Use bool() to convert numpy scalar to Python Boolean + return bool(abs(lambda2) < 2 * np.sqrt(d - 1) + epsilon) + + +@nx.utils.decorators.np_random_state("seed") +@nx._dispatchable(graphs=None, returns_graph=True) +def random_regular_expander_graph( + n, d, *, epsilon=0, create_using=None, max_tries=100, seed=None +): + r"""Returns a random regular expander graph on $n$ nodes with degree $d$. + + An expander graph is a sparse graph with strong connectivity properties. [1]_ + + More precisely the returned graph is a $(n, d, \lambda)$-expander with + $\lambda = 2 \sqrt{d - 1} + \epsilon$, close to the Alon-Boppana bound. [2]_ + + In the case where $\epsilon = 0$ it returns a Ramanujan graph. + A Ramanujan graph has spectral gap almost as large as possible, + which makes them excellent expanders. [3]_ + + Parameters + ---------- + n : int + The number of nodes. + d : int + The degree of each node. + epsilon : int, float, default=0 + max_tries : int, (default: 100) + The number of allowed loops, also used in the maybe_regular_expander utility + seed : (default: None) + Seed used to set random number generation state. See :ref`Randomness`. + + Raises + ------ + NetworkXError + If max_tries is reached + + Examples + -------- + >>> G = nx.random_regular_expander_graph(20, 4) + >>> nx.is_regular_expander(G) + True + + Notes + ----- + This loops over `maybe_regular_expander` and can be slow when + $n$ is too big or $\epsilon$ too small. + + See Also + -------- + maybe_regular_expander + is_regular_expander + + References + ---------- + .. [1] Expander graph, https://en.wikipedia.org/wiki/Expander_graph + .. [2] Alon-Boppana bound, https://en.wikipedia.org/wiki/Alon%E2%80%93Boppana_bound + .. [3] Ramanujan graphs, https://en.wikipedia.org/wiki/Ramanujan_graph + + """ + G = maybe_regular_expander( + n, d, create_using=create_using, max_tries=max_tries, seed=seed + ) + iterations = max_tries + + while not is_regular_expander(G, epsilon=epsilon): + iterations -= 1 + G = maybe_regular_expander( + n=n, d=d, create_using=create_using, max_tries=max_tries, seed=seed + ) + + if iterations == 0: + raise nx.NetworkXError( + "Too many iterations in random_regular_expander_graph" + ) + + return G diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/geometric.py b/.venv/lib/python3.12/site-packages/networkx/generators/geometric.py new file mode 100644 index 0000000..fdd4f62 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/generators/geometric.py @@ -0,0 +1,1037 @@ +"""Generators for geometric graphs.""" + +import math +from bisect import bisect_left +from itertools import accumulate, combinations, product + +import networkx as nx +from networkx.utils import py_random_state + +__all__ = [ + "geometric_edges", + "geographical_threshold_graph", + "navigable_small_world_graph", + "random_geometric_graph", + "soft_random_geometric_graph", + "thresholded_random_geometric_graph", + "waxman_graph", + "geometric_soft_configuration_graph", +] + + +@nx._dispatchable(node_attrs="pos_name") +def geometric_edges(G, radius, p=2, *, pos_name="pos"): + """Returns edge list of node pairs within `radius` of each other. + + Parameters + ---------- + G : networkx graph + The graph from which to generate the edge list. The nodes in `G` should + have an attribute ``pos`` corresponding to the node position, which is + used to compute the distance to other nodes. + radius : scalar + The distance threshold. Edges are included in the edge list if the + distance between the two nodes is less than `radius`. + pos_name : string, default="pos" + The name of the node attribute which represents the position of each + node in 2D coordinates. Every node in the Graph must have this attribute. + p : scalar, default=2 + The `Minkowski distance metric + `_ used to compute + distances. The default value is 2, i.e. Euclidean distance. + + Returns + ------- + edges : list + List of edges whose distances are less than `radius` + + Notes + ----- + Radius uses Minkowski distance metric `p`. + If scipy is available, `scipy.spatial.cKDTree` is used to speed computation. + + Examples + -------- + Create a graph with nodes that have a "pos" attribute representing 2D + coordinates. + + >>> G = nx.Graph() + >>> G.add_nodes_from( + ... [ + ... (0, {"pos": (0, 0)}), + ... (1, {"pos": (3, 0)}), + ... (2, {"pos": (8, 0)}), + ... ] + ... ) + >>> nx.geometric_edges(G, radius=1) + [] + >>> nx.geometric_edges(G, radius=4) + [(0, 1)] + >>> nx.geometric_edges(G, radius=6) + [(0, 1), (1, 2)] + >>> nx.geometric_edges(G, radius=9) + [(0, 1), (0, 2), (1, 2)] + """ + # Input validation - every node must have a "pos" attribute + for n, pos in G.nodes(data=pos_name): + if pos is None: + raise nx.NetworkXError( + f"Node {n} (and all nodes) must have a '{pos_name}' attribute." + ) + + # NOTE: See _geometric_edges for the actual implementation. The reason this + # is split into two functions is to avoid the overhead of input validation + # every time the function is called internally in one of the other + # geometric generators + return _geometric_edges(G, radius, p, pos_name) + + +def _geometric_edges(G, radius, p, pos_name): + """ + Implements `geometric_edges` without input validation. See `geometric_edges` + for complete docstring. + """ + nodes_pos = G.nodes(data=pos_name) + try: + import scipy as sp + except ImportError: + # no scipy KDTree so compute by for-loop + radius_p = radius**p + edges = [ + (u, v) + for (u, pu), (v, pv) in combinations(nodes_pos, 2) + if sum(abs(a - b) ** p for a, b in zip(pu, pv)) <= radius_p + ] + return edges + # scipy KDTree is available + nodes, coords = list(zip(*nodes_pos)) + kdtree = sp.spatial.cKDTree(coords) # Cannot provide generator. + edge_indexes = kdtree.query_pairs(radius, p) + edges = [(nodes[u], nodes[v]) for u, v in sorted(edge_indexes)] + return edges + + +@py_random_state(5) +@nx._dispatchable(graphs=None, returns_graph=True) +def random_geometric_graph( + n, radius, dim=2, pos=None, p=2, seed=None, *, pos_name="pos" +): + """Returns a random geometric graph in the unit cube of dimensions `dim`. + + The random geometric graph model places `n` nodes uniformly at + random in the unit cube. Two nodes are joined by an edge if the + distance between the nodes is at most `radius`. + + Edges are determined using a KDTree when SciPy is available. + This reduces the time complexity from $O(n^2)$ to $O(n)$. + + Parameters + ---------- + n : int or iterable + Number of nodes or iterable of nodes + radius: float + Distance threshold value + dim : int, optional + Dimension of graph + pos : dict, optional + A dictionary keyed by node with node positions as values. + p : float, optional + Which Minkowski distance metric to use. `p` has to meet the condition + ``1 <= p <= infinity``. + + If this argument is not specified, the :math:`L^2` metric + (the Euclidean distance metric), p = 2 is used. + This should not be confused with the `p` of an Erdős-Rényi random + graph, which represents probability. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + pos_name : string, default="pos" + The name of the node attribute which represents the position + in 2D coordinates of the node in the returned graph. + + Returns + ------- + Graph + A random geometric graph, undirected and without self-loops. + Each node has a node attribute ``'pos'`` that stores the + position of that node in Euclidean space as provided by the + ``pos`` keyword argument or, if ``pos`` was not provided, as + generated by this function. + + Examples + -------- + Create a random geometric graph on twenty nodes where nodes are joined by + an edge if their distance is at most 0.1:: + + >>> G = nx.random_geometric_graph(20, 0.1) + + Notes + ----- + This uses a *k*-d tree to build the graph. + + The `pos` keyword argument can be used to specify node positions so you + can create an arbitrary distribution and domain for positions. + + For example, to use a 2D Gaussian distribution of node positions with mean + (0, 0) and standard deviation 2:: + + >>> import random + >>> n = 20 + >>> pos = {i: (random.gauss(0, 2), random.gauss(0, 2)) for i in range(n)} + >>> G = nx.random_geometric_graph(n, 0.2, pos=pos) + + References + ---------- + .. [1] Penrose, Mathew, *Random Geometric Graphs*, + Oxford Studies in Probability, 5, 2003. + + """ + # TODO Is this function just a special case of the geographical + # threshold graph? + # + # half_radius = {v: radius / 2 for v in n} + # return geographical_threshold_graph(nodes, theta=1, alpha=1, + # weight=half_radius) + # + G = nx.empty_graph(n) + # If no positions are provided, choose uniformly random vectors in + # Euclidean space of the specified dimension. + if pos is None: + pos = {v: [seed.random() for i in range(dim)] for v in G} + nx.set_node_attributes(G, pos, pos_name) + + G.add_edges_from(_geometric_edges(G, radius, p, pos_name)) + return G + + +@py_random_state(6) +@nx._dispatchable(graphs=None, returns_graph=True) +def soft_random_geometric_graph( + n, radius, dim=2, pos=None, p=2, p_dist=None, seed=None, *, pos_name="pos" +): + r"""Returns a soft random geometric graph in the unit cube. + + The soft random geometric graph [1] model places `n` nodes uniformly at + random in the unit cube in dimension `dim`. Two nodes of distance, `dist`, + computed by the `p`-Minkowski distance metric are joined by an edge with + probability `p_dist` if the computed distance metric value of the nodes + is at most `radius`, otherwise they are not joined. + + Edges within `radius` of each other are determined using a KDTree when + SciPy is available. This reduces the time complexity from :math:`O(n^2)` + to :math:`O(n)`. + + Parameters + ---------- + n : int or iterable + Number of nodes or iterable of nodes + radius: float + Distance threshold value + dim : int, optional + Dimension of graph + pos : dict, optional + A dictionary keyed by node with node positions as values. + p : float, optional + Which Minkowski distance metric to use. + `p` has to meet the condition ``1 <= p <= infinity``. + + If this argument is not specified, the :math:`L^2` metric + (the Euclidean distance metric), p = 2 is used. + + This should not be confused with the `p` of an Erdős-Rényi random + graph, which represents probability. + p_dist : function, optional + A probability density function computing the probability of + connecting two nodes that are of distance, dist, computed by the + Minkowski distance metric. The probability density function, `p_dist`, + must be any function that takes the metric value as input + and outputs a single probability value between 0-1. The `scipy.stats` + package has many probability distribution functions implemented and + tools for custom probability distribution definitions [2], and passing + the .pdf method of `scipy.stats` distributions can be used here. If the + probability function, `p_dist`, is not supplied, the default function + is an exponential distribution with rate parameter :math:`\lambda=1`. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + pos_name : string, default="pos" + The name of the node attribute which represents the position + in 2D coordinates of the node in the returned graph. + + Returns + ------- + Graph + A soft random geometric graph, undirected and without self-loops. + Each node has a node attribute ``'pos'`` that stores the + position of that node in Euclidean space as provided by the + ``pos`` keyword argument or, if ``pos`` was not provided, as + generated by this function. + + Notes + ----- + This uses a *k*-d tree to build the graph. + + References + ---------- + .. [1] Penrose, Mathew D. "Connectivity of soft random geometric graphs." + The Annals of Applied Probability 26.2 (2016): 986-1028. + + Examples + -------- + Default Graph: + + >>> G = nx.soft_random_geometric_graph(50, 0.2) + + Custom Graph: + + The `pos` keyword argument can be used to specify node positions so you + can create an arbitrary distribution and domain for positions. + + The `scipy.stats` package can be used to define the probability distribution + with the ``.pdf`` method used as `p_dist`. + + For example, create a soft random geometric graph on 100 nodes using a 2D + Gaussian distribution of node positions with mean (0, 0) and standard deviation 2, + where nodes are joined by an edge with probability computed from an + exponential distribution with rate parameter :math:`\lambda=1` if their + Euclidean distance is at most 0.2. + + >>> import random + >>> from scipy.stats import expon + >>> n = 100 + >>> pos = {i: (random.gauss(0, 2), random.gauss(0, 2)) for i in range(n)} + >>> p_dist = lambda x: expon.pdf(x, scale=1) + >>> G = nx.soft_random_geometric_graph(n, 0.2, pos=pos, p_dist=p_dist) + + """ + G = nx.empty_graph(n) + G.name = f"soft_random_geometric_graph({n}, {radius}, {dim})" + # If no positions are provided, choose uniformly random vectors in + # Euclidean space of the specified dimension. + if pos is None: + pos = {v: [seed.random() for i in range(dim)] for v in G} + nx.set_node_attributes(G, pos, pos_name) + + # if p_dist function not supplied the default function is an exponential + # distribution with rate parameter :math:`\lambda=1`. + if p_dist is None: + + def p_dist(dist): + return math.exp(-dist) + + def should_join(edge): + u, v = edge + dist = (sum(abs(a - b) ** p for a, b in zip(pos[u], pos[v]))) ** (1 / p) + return seed.random() < p_dist(dist) + + G.add_edges_from(filter(should_join, _geometric_edges(G, radius, p, pos_name))) + return G + + +@py_random_state(7) +@nx._dispatchable(graphs=None, returns_graph=True) +def geographical_threshold_graph( + n, + theta, + dim=2, + pos=None, + weight=None, + metric=None, + p_dist=None, + seed=None, + *, + pos_name="pos", + weight_name="weight", +): + r"""Returns a geographical threshold graph. + + The geographical threshold graph model places $n$ nodes uniformly at + random in a rectangular domain. Each node $u$ is assigned a weight + $w_u$. Two nodes $u$ and $v$ are joined by an edge if + + .. math:: + + (w_u + w_v)p_{dist}(r) \ge \theta + + where `r` is the distance between `u` and `v`, `p_dist` is any function of + `r`, and :math:`\theta` as the threshold parameter. `p_dist` is used to + give weight to the distance between nodes when deciding whether or not + they should be connected. The larger `p_dist` is, the more prone nodes + separated by `r` are to be connected, and vice versa. + + Parameters + ---------- + n : int or iterable + Number of nodes or iterable of nodes + theta: float + Threshold value + dim : int, optional + Dimension of graph + pos : dict + Node positions as a dictionary of tuples keyed by node. + weight : dict + Node weights as a dictionary of numbers keyed by node. + metric : function + A metric on vectors of numbers (represented as lists or + tuples). This must be a function that accepts two lists (or + tuples) as input and yields a number as output. The function + must also satisfy the four requirements of a `metric`_. + Specifically, if $d$ is the function and $x$, $y$, + and $z$ are vectors in the graph, then $d$ must satisfy + + 1. $d(x, y) \ge 0$, + 2. $d(x, y) = 0$ if and only if $x = y$, + 3. $d(x, y) = d(y, x)$, + 4. $d(x, z) \le d(x, y) + d(y, z)$. + + If this argument is not specified, the Euclidean distance metric is + used. + + .. _metric: https://en.wikipedia.org/wiki/Metric_%28mathematics%29 + p_dist : function, optional + Any function used to give weight to the distance between nodes when + deciding whether or not they should be connected. `p_dist` was + originally conceived as a probability density function giving the + probability of connecting two nodes that are of metric distance `r` + apart. The implementation here allows for more arbitrary definitions + of `p_dist` that do not need to correspond to valid probability + density functions. The :mod:`scipy.stats` package has many + probability density functions implemented and tools for custom + probability density definitions, and passing the ``.pdf`` method of + `scipy.stats` distributions can be used here. If ``p_dist=None`` + (the default), the exponential function :math:`r^{-2}` is used. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + pos_name : string, default="pos" + The name of the node attribute which represents the position + in 2D coordinates of the node in the returned graph. + weight_name : string, default="weight" + The name of the node attribute which represents the weight + of the node in the returned graph. + + Returns + ------- + Graph + A random geographic threshold graph, undirected and without + self-loops. + + Each node has a node attribute ``pos`` that stores the + position of that node in Euclidean space as provided by the + ``pos`` keyword argument or, if ``pos`` was not provided, as + generated by this function. Similarly, each node has a node + attribute ``weight`` that stores the weight of that node as + provided or as generated. + + Examples + -------- + Specify an alternate distance metric using the ``metric`` keyword + argument. For example, to use the `taxicab metric`_ instead of the + default `Euclidean metric`_:: + + >>> dist = lambda x, y: sum(abs(a - b) for a, b in zip(x, y)) + >>> G = nx.geographical_threshold_graph(10, 0.1, metric=dist) + + .. _taxicab metric: https://en.wikipedia.org/wiki/Taxicab_geometry + .. _Euclidean metric: https://en.wikipedia.org/wiki/Euclidean_distance + + Notes + ----- + If weights are not specified they are assigned to nodes by drawing randomly + from the exponential distribution with rate parameter $\lambda=1$. + To specify weights from a different distribution, use the `weight` keyword + argument:: + + >>> import random + >>> n = 20 + >>> w = {i: random.expovariate(5.0) for i in range(n)} + >>> G = nx.geographical_threshold_graph(20, 50, weight=w) + + If node positions are not specified they are randomly assigned from the + uniform distribution. + + References + ---------- + .. [1] Masuda, N., Miwa, H., Konno, N.: + Geographical threshold graphs with small-world and scale-free + properties. + Physical Review E 71, 036108 (2005) + .. [2] Milan Bradonjić, Aric Hagberg and Allon G. Percus, + Giant component and connectivity in geographical threshold graphs, + in Algorithms and Models for the Web-Graph (WAW 2007), + Antony Bonato and Fan Chung (Eds), pp. 209--216, 2007 + """ + G = nx.empty_graph(n) + # If no weights are provided, choose them from an exponential + # distribution. + if weight is None: + weight = {v: seed.expovariate(1) for v in G} + # If no positions are provided, choose uniformly random vectors in + # Euclidean space of the specified dimension. + if pos is None: + pos = {v: [seed.random() for i in range(dim)] for v in G} + # If no distance metric is provided, use Euclidean distance. + if metric is None: + metric = math.dist + nx.set_node_attributes(G, weight, weight_name) + nx.set_node_attributes(G, pos, pos_name) + + # if p_dist is not supplied, use default r^-2 + if p_dist is None: + + def p_dist(r): + return r**-2 + + # Returns ``True`` if and only if the nodes whose attributes are + # ``du`` and ``dv`` should be joined, according to the threshold + # condition. + def should_join(pair): + u, v = pair + u_pos, v_pos = pos[u], pos[v] + u_weight, v_weight = weight[u], weight[v] + return (u_weight + v_weight) * p_dist(metric(u_pos, v_pos)) >= theta + + G.add_edges_from(filter(should_join, combinations(G, 2))) + return G + + +@py_random_state(6) +@nx._dispatchable(graphs=None, returns_graph=True) +def waxman_graph( + n, + beta=0.4, + alpha=0.1, + L=None, + domain=(0, 0, 1, 1), + metric=None, + seed=None, + *, + pos_name="pos", +): + r"""Returns a Waxman random graph. + + The Waxman random graph model places `n` nodes uniformly at random + in a rectangular domain. Each pair of nodes at distance `d` is + joined by an edge with probability + + .. math:: + p = \beta \exp(-d / \alpha L). + + This function implements both Waxman models, using the `L` keyword + argument. + + * Waxman-1: if `L` is not specified, it is set to be the maximum distance + between any pair of nodes. + * Waxman-2: if `L` is specified, the distance between a pair of nodes is + chosen uniformly at random from the interval `[0, L]`. + + Parameters + ---------- + n : int or iterable + Number of nodes or iterable of nodes + beta: float + Model parameter + alpha: float + Model parameter + L : float, optional + Maximum distance between nodes. If not specified, the actual distance + is calculated. + domain : four-tuple of numbers, optional + Domain size, given as a tuple of the form `(x_min, y_min, x_max, + y_max)`. + metric : function + A metric on vectors of numbers (represented as lists or + tuples). This must be a function that accepts two lists (or + tuples) as input and yields a number as output. The function + must also satisfy the four requirements of a `metric`_. + Specifically, if $d$ is the function and $x$, $y$, + and $z$ are vectors in the graph, then $d$ must satisfy + + 1. $d(x, y) \ge 0$, + 2. $d(x, y) = 0$ if and only if $x = y$, + 3. $d(x, y) = d(y, x)$, + 4. $d(x, z) \le d(x, y) + d(y, z)$. + + If this argument is not specified, the Euclidean distance metric is + used. + + .. _metric: https://en.wikipedia.org/wiki/Metric_%28mathematics%29 + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + pos_name : string, default="pos" + The name of the node attribute which represents the position + in 2D coordinates of the node in the returned graph. + + Returns + ------- + Graph + A random Waxman graph, undirected and without self-loops. Each + node has a node attribute ``'pos'`` that stores the position of + that node in Euclidean space as generated by this function. + + Examples + -------- + Specify an alternate distance metric using the ``metric`` keyword + argument. For example, to use the "`taxicab metric`_" instead of the + default `Euclidean metric`_:: + + >>> dist = lambda x, y: sum(abs(a - b) for a, b in zip(x, y)) + >>> G = nx.waxman_graph(10, 0.5, 0.1, metric=dist) + + .. _taxicab metric: https://en.wikipedia.org/wiki/Taxicab_geometry + .. _Euclidean metric: https://en.wikipedia.org/wiki/Euclidean_distance + + Notes + ----- + Starting in NetworkX 2.0 the parameters alpha and beta align with their + usual roles in the probability distribution. In earlier versions their + positions in the expression were reversed. Their position in the calling + sequence reversed as well to minimize backward incompatibility. + + References + ---------- + .. [1] B. M. Waxman, *Routing of multipoint connections*. + IEEE J. Select. Areas Commun. 6(9),(1988) 1617--1622. + """ + G = nx.empty_graph(n) + (xmin, ymin, xmax, ymax) = domain + # Each node gets a uniformly random position in the given rectangle. + pos = {v: (seed.uniform(xmin, xmax), seed.uniform(ymin, ymax)) for v in G} + nx.set_node_attributes(G, pos, pos_name) + # If no distance metric is provided, use Euclidean distance. + if metric is None: + metric = math.dist + # If the maximum distance L is not specified (that is, we are in the + # Waxman-1 model), then find the maximum distance between any pair + # of nodes. + # + # In the Waxman-1 model, join nodes randomly based on distance. In + # the Waxman-2 model, join randomly based on random l. + if L is None: + L = max(metric(x, y) for x, y in combinations(pos.values(), 2)) + + def dist(u, v): + return metric(pos[u], pos[v]) + + else: + + def dist(u, v): + return seed.random() * L + + # `pair` is the pair of nodes to decide whether to join. + def should_join(pair): + return seed.random() < beta * math.exp(-dist(*pair) / (alpha * L)) + + G.add_edges_from(filter(should_join, combinations(G, 2))) + return G + + +@py_random_state(5) +@nx._dispatchable(graphs=None, returns_graph=True) +def navigable_small_world_graph(n, p=1, q=1, r=2, dim=2, seed=None): + r"""Returns a navigable small-world graph. + + A navigable small-world graph is a directed grid with additional long-range + connections that are chosen randomly. + + [...] we begin with a set of nodes [...] that are identified with the set + of lattice points in an $n \times n$ square, + $\{(i, j): i \in \{1, 2, \ldots, n\}, j \in \{1, 2, \ldots, n\}\}$, + and we define the *lattice distance* between two nodes $(i, j)$ and + $(k, l)$ to be the number of "lattice steps" separating them: + $d((i, j), (k, l)) = |k - i| + |l - j|$. + + For a universal constant $p >= 1$, the node $u$ has a directed edge to + every other node within lattice distance $p$---these are its *local + contacts*. For universal constants $q >= 0$ and $r >= 0$ we also + construct directed edges from $u$ to $q$ other nodes (the *long-range + contacts*) using independent random trials; the $i$th directed edge from + $u$ has endpoint $v$ with probability proportional to $[d(u,v)]^{-r}$. + + -- [1]_ + + Parameters + ---------- + n : int + The length of one side of the lattice; the number of nodes in + the graph is therefore $n^2$. + p : int + The diameter of short range connections. Each node is joined with every + other node within this lattice distance. + q : int + The number of long-range connections for each node. + r : float + Exponent for decaying probability of connections. The probability of + connecting to a node at lattice distance $d$ is $1/d^r$. + dim : int + Dimension of grid + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + References + ---------- + .. [1] J. Kleinberg. The small-world phenomenon: An algorithmic + perspective. Proc. 32nd ACM Symposium on Theory of Computing, 2000. + """ + if p < 1: + raise nx.NetworkXException("p must be >= 1") + if q < 0: + raise nx.NetworkXException("q must be >= 0") + if r < 0: + raise nx.NetworkXException("r must be >= 0") + + G = nx.DiGraph() + nodes = list(product(range(n), repeat=dim)) + for p1 in nodes: + probs = [0] + for p2 in nodes: + if p1 == p2: + continue + d = sum((abs(b - a) for a, b in zip(p1, p2))) + if d <= p: + G.add_edge(p1, p2) + probs.append(d**-r) + cdf = list(accumulate(probs)) + for _ in range(q): + target = nodes[bisect_left(cdf, seed.uniform(0, cdf[-1]))] + G.add_edge(p1, target) + return G + + +@py_random_state(7) +@nx._dispatchable(graphs=None, returns_graph=True) +def thresholded_random_geometric_graph( + n, + radius, + theta, + dim=2, + pos=None, + weight=None, + p=2, + seed=None, + *, + pos_name="pos", + weight_name="weight", +): + r"""Returns a thresholded random geometric graph in the unit cube. + + The thresholded random geometric graph [1] model places `n` nodes + uniformly at random in the unit cube of dimensions `dim`. Each node + `u` is assigned a weight :math:`w_u`. Two nodes `u` and `v` are + joined by an edge if they are within the maximum connection distance, + `radius` computed by the `p`-Minkowski distance and the summation of + weights :math:`w_u` + :math:`w_v` is greater than or equal + to the threshold parameter `theta`. + + Edges within `radius` of each other are determined using a KDTree when + SciPy is available. This reduces the time complexity from :math:`O(n^2)` + to :math:`O(n)`. + + Parameters + ---------- + n : int or iterable + Number of nodes or iterable of nodes + radius: float + Distance threshold value + theta: float + Threshold value + dim : int, optional + Dimension of graph + pos : dict, optional + A dictionary keyed by node with node positions as values. + weight : dict, optional + Node weights as a dictionary of numbers keyed by node. + p : float, optional (default 2) + Which Minkowski distance metric to use. `p` has to meet the condition + ``1 <= p <= infinity``. + + If this argument is not specified, the :math:`L^2` metric + (the Euclidean distance metric), p = 2 is used. + + This should not be confused with the `p` of an Erdős-Rényi random + graph, which represents probability. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + pos_name : string, default="pos" + The name of the node attribute which represents the position + in 2D coordinates of the node in the returned graph. + weight_name : string, default="weight" + The name of the node attribute which represents the weight + of the node in the returned graph. + + Returns + ------- + Graph + A thresholded random geographic graph, undirected and without + self-loops. + + Each node has a node attribute ``'pos'`` that stores the + position of that node in Euclidean space as provided by the + ``pos`` keyword argument or, if ``pos`` was not provided, as + generated by this function. Similarly, each node has a nodethre + attribute ``'weight'`` that stores the weight of that node as + provided or as generated. + + Notes + ----- + This uses a *k*-d tree to build the graph. + + References + ---------- + .. [1] http://cole-maclean.github.io/blog/files/thesis.pdf + + Examples + -------- + Default Graph: + + >>> G = nx.thresholded_random_geometric_graph(50, 0.2, 0.1) + + Custom Graph: + + The `pos` keyword argument can be used to specify node positions so you + can create an arbitrary distribution and domain for positions. + + If weights are not specified they are assigned to nodes by drawing randomly + from the exponential distribution with rate parameter :math:`\lambda=1`. + To specify weights from a different distribution, use the `weight` keyword + argument. + + For example, create a thresholded random geometric graph on 50 nodes using a 2D + Gaussian distribution of node positions with mean (0, 0) and standard deviation 2, + where nodes are joined by an edge if their sum weights drawn from + a exponential distribution with rate = 5 are >= theta = 0.1 and their + Euclidean distance is at most 0.2. + + >>> import random + >>> n = 50 + >>> pos = {i: (random.gauss(0, 2), random.gauss(0, 2)) for i in range(n)} + >>> w = {i: random.expovariate(5.0) for i in range(n)} + >>> G = nx.thresholded_random_geometric_graph(n, 0.2, 0.1, 2, pos, w) + + """ + G = nx.empty_graph(n) + G.name = f"thresholded_random_geometric_graph({n}, {radius}, {theta}, {dim})" + # If no weights are provided, choose them from an exponential + # distribution. + if weight is None: + weight = {v: seed.expovariate(1) for v in G} + # If no positions are provided, choose uniformly random vectors in + # Euclidean space of the specified dimension. + if pos is None: + pos = {v: [seed.random() for i in range(dim)] for v in G} + # If no distance metric is provided, use Euclidean distance. + nx.set_node_attributes(G, weight, weight_name) + nx.set_node_attributes(G, pos, pos_name) + + edges = ( + (u, v) + for u, v in _geometric_edges(G, radius, p, pos_name) + if weight[u] + weight[v] >= theta + ) + G.add_edges_from(edges) + return G + + +@py_random_state(5) +@nx._dispatchable(graphs=None, returns_graph=True) +def geometric_soft_configuration_graph( + *, beta, n=None, gamma=None, mean_degree=None, kappas=None, seed=None +): + r"""Returns a random graph from the geometric soft configuration model. + + The $\mathbb{S}^1$ model [1]_ is the geometric soft configuration model + which is able to explain many fundamental features of real networks such as + small-world property, heteregenous degree distributions, high level of + clustering, and self-similarity. + + In the geometric soft configuration model, a node $i$ is assigned two hidden + variables: a hidden degree $\kappa_i$, quantifying its popularity, influence, + or importance, and an angular position $\theta_i$ in a circle abstracting the + similarity space, where angular distances between nodes are a proxy for their + similarity. Focusing on the angular position, this model is often called + the $\mathbb{S}^1$ model (a one-dimensional sphere). The circle's radius is + adjusted to $R = N/2\pi$, where $N$ is the number of nodes, so that the density + is set to 1 without loss of generality. + + The connection probability between any pair of nodes increases with + the product of their hidden degrees (i.e., their combined popularities), + and decreases with the angular distance between the two nodes. + Specifically, nodes $i$ and $j$ are connected with the probability + + $p_{ij} = \frac{1}{1 + \frac{d_{ij}^\beta}{\left(\mu \kappa_i \kappa_j\right)^{\max(1, \beta)}}}$ + + where $d_{ij} = R\Delta\theta_{ij}$ is the arc length of the circle between + nodes $i$ and $j$ separated by an angular distance $\Delta\theta_{ij}$. + Parameters $\mu$ and $\beta$ (also called inverse temperature) control the + average degree and the clustering coefficient, respectively. + + It can be shown [2]_ that the model undergoes a structural phase transition + at $\beta=1$ so that for $\beta<1$ networks are unclustered in the thermodynamic + limit (when $N\to \infty$) whereas for $\beta>1$ the ensemble generates + networks with finite clustering coefficient. + + The $\mathbb{S}^1$ model can be expressed as a purely geometric model + $\mathbb{H}^2$ in the hyperbolic plane [3]_ by mapping the hidden degree of + each node into a radial coordinate as + + $r_i = \hat{R} - \frac{2 \max(1, \beta)}{\beta \zeta} \ln \left(\frac{\kappa_i}{\kappa_0}\right)$ + + where $\hat{R}$ is the radius of the hyperbolic disk and $\zeta$ is the curvature, + + $\hat{R} = \frac{2}{\zeta} \ln \left(\frac{N}{\pi}\right) + - \frac{2\max(1, \beta)}{\beta \zeta} \ln (\mu \kappa_0^2)$ + + The connection probability then reads + + $p_{ij} = \frac{1}{1 + \exp\left({\frac{\beta\zeta}{2} (x_{ij} - \hat{R})}\right)}$ + + where + + $x_{ij} = r_i + r_j + \frac{2}{\zeta} \ln \frac{\Delta\theta_{ij}}{2}$ + + is a good approximation of the hyperbolic distance between two nodes separated + by an angular distance $\Delta\theta_{ij}$ with radial coordinates $r_i$ and $r_j$. + For $\beta > 1$, the curvature $\zeta = 1$, for $\beta < 1$, $\zeta = \beta^{-1}$. + + + Parameters + ---------- + Either `n`, `gamma`, `mean_degree` are provided or `kappas`. The values of + `n`, `gamma`, `mean_degree` (if provided) are used to construct a random + kappa-dict keyed by node with values sampled from a power-law distribution. + + beta : positive number + Inverse temperature, controlling the clustering coefficient. + n : int (default: None) + Size of the network (number of nodes). + If not provided, `kappas` must be provided and holds the nodes. + gamma : float (default: None) + Exponent of the power-law distribution for hidden degrees `kappas`. + If not provided, `kappas` must be provided directly. + mean_degree : float (default: None) + The mean degree in the network. + If not provided, `kappas` must be provided directly. + kappas : dict (default: None) + A dict keyed by node to its hidden degree value. + If not provided, random values are computed based on a power-law + distribution using `n`, `gamma` and `mean_degree`. + seed : int, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + Graph + A random geometric soft configuration graph (undirected with no self-loops). + Each node has three node-attributes: + + - ``kappa`` that represents the hidden degree. + + - ``theta`` the position in the similarity space ($\mathbb{S}^1$) which is + also the angular position in the hyperbolic plane. + + - ``radius`` the radial position in the hyperbolic plane + (based on the hidden degree). + + + Examples + -------- + Generate a network with specified parameters: + + >>> G = nx.geometric_soft_configuration_graph( + ... beta=1.5, n=100, gamma=2.7, mean_degree=5 + ... ) + + Create a geometric soft configuration graph with 100 nodes. The $\beta$ parameter + is set to 1.5 and the exponent of the powerlaw distribution of the hidden + degrees is 2.7 with mean value of 5. + + Generate a network with predefined hidden degrees: + + >>> kappas = {i: 10 for i in range(100)} + >>> G = nx.geometric_soft_configuration_graph(beta=2.5, kappas=kappas) + + Create a geometric soft configuration graph with 100 nodes. The $\beta$ parameter + is set to 2.5 and all nodes with hidden degree $\kappa=10$. + + + References + ---------- + .. [1] Serrano, M. Á., Krioukov, D., & Boguñá, M. (2008). Self-similarity + of complex networks and hidden metric spaces. Physical review letters, 100(7), 078701. + + .. [2] van der Kolk, J., Serrano, M. Á., & Boguñá, M. (2022). An anomalous + topological phase transition in spatial random graphs. Communications Physics, 5(1), 245. + + .. [3] Krioukov, D., Papadopoulos, F., Kitsak, M., Vahdat, A., & Boguná, M. (2010). + Hyperbolic geometry of complex networks. Physical Review E, 82(3), 036106. + + """ + if beta <= 0: + raise nx.NetworkXError("The parameter beta cannot be smaller or equal to 0.") + + if kappas is not None: + if not all((n is None, gamma is None, mean_degree is None)): + raise nx.NetworkXError( + "When kappas is input, n, gamma and mean_degree must not be." + ) + + n = len(kappas) + mean_degree = sum(kappas) / len(kappas) + else: + if any((n is None, gamma is None, mean_degree is None)): + raise nx.NetworkXError( + "Please provide either kappas, or all 3 of: n, gamma and mean_degree." + ) + + # Generate `n` hidden degrees from a powerlaw distribution + # with given exponent `gamma` and mean value `mean_degree` + gam_ratio = (gamma - 2) / (gamma - 1) + kappa_0 = mean_degree * gam_ratio * (1 - 1 / n) / (1 - 1 / n**gam_ratio) + base = 1 - 1 / n + power = 1 / (1 - gamma) + kappas = {i: kappa_0 * (1 - seed.random() * base) ** power for i in range(n)} + + G = nx.Graph() + R = n / (2 * math.pi) + + # Approximate values for mu in the thermodynamic limit (when n -> infinity) + if beta > 1: + mu = beta * math.sin(math.pi / beta) / (2 * math.pi * mean_degree) + elif beta == 1: + mu = 1 / (2 * mean_degree * math.log(n)) + else: + mu = (1 - beta) / (2**beta * mean_degree * n ** (1 - beta)) + + # Generate random positions on a circle + thetas = {k: seed.uniform(0, 2 * math.pi) for k in kappas} + + for u in kappas: + for v in list(G): + angle = math.pi - math.fabs(math.pi - math.fabs(thetas[u] - thetas[v])) + dij = math.pow(R * angle, beta) + mu_kappas = math.pow(mu * kappas[u] * kappas[v], max(1, beta)) + p_ij = 1 / (1 + dij / mu_kappas) + + # Create an edge with a certain connection probability + if seed.random() < p_ij: + G.add_edge(u, v) + G.add_node(u) + + nx.set_node_attributes(G, thetas, "theta") + nx.set_node_attributes(G, kappas, "kappa") + + # Map hidden degrees into the radial coordinates + zeta = 1 if beta > 1 else 1 / beta + kappa_min = min(kappas.values()) + R_c = 2 * max(1, beta) / (beta * zeta) + R_hat = (2 / zeta) * math.log(n / math.pi) - R_c * math.log(mu * kappa_min) + radii = {node: R_hat - R_c * math.log(kappa) for node, kappa in kappas.items()} + nx.set_node_attributes(G, radii, "radius") + + return G diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/harary_graph.py b/.venv/lib/python3.12/site-packages/networkx/generators/harary_graph.py new file mode 100644 index 0000000..591587d --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/generators/harary_graph.py @@ -0,0 +1,199 @@ +"""Generators for Harary graphs + +This module gives two generators for the Harary graph, which was +introduced by the famous mathematician Frank Harary in his 1962 work [H]_. +The first generator gives the Harary graph that maximizes the node +connectivity with given number of nodes and given number of edges. +The second generator gives the Harary graph that minimizes +the number of edges in the graph with given node connectivity and +number of nodes. + +References +---------- +.. [H] Harary, F. "The Maximum Connectivity of a Graph." + Proc. Nat. Acad. Sci. USA 48, 1142-1146, 1962. + +""" + +import networkx as nx +from networkx.exception import NetworkXError + +__all__ = ["hnm_harary_graph", "hkn_harary_graph"] + + +@nx._dispatchable(graphs=None, returns_graph=True) +def hnm_harary_graph(n, m, create_using=None): + """Returns the Harary graph with given numbers of nodes and edges. + + The Harary graph $H_{n,m}$ is the graph that maximizes node connectivity + with $n$ nodes and $m$ edges. + + This maximum node connectivity is known to be floor($2m/n$). [1]_ + + Parameters + ---------- + n: integer + The number of nodes the generated graph is to contain + + m: integer + The number of edges the generated graph is to contain + + create_using : NetworkX graph constructor, optional Graph type + to create (default=nx.Graph). If graph instance, then cleared + before populated. + + Returns + ------- + NetworkX graph + The Harary graph $H_{n,m}$. + + See Also + -------- + hkn_harary_graph + + Notes + ----- + This algorithm runs in $O(m)$ time. + It is implemented by following the Reference [2]_. + + References + ---------- + .. [1] F. T. Boesch, A. Satyanarayana, and C. L. Suffel, + "A Survey of Some Network Reliability Analysis and Synthesis Results," + Networks, pp. 99-107, 2009. + + .. [2] Harary, F. "The Maximum Connectivity of a Graph." + Proc. Nat. Acad. Sci. USA 48, 1142-1146, 1962. + """ + + if n < 1: + raise NetworkXError("The number of nodes must be >= 1!") + if m < n - 1: + raise NetworkXError("The number of edges must be >= n - 1 !") + if m > n * (n - 1) // 2: + raise NetworkXError("The number of edges must be <= n(n-1)/2") + + # Construct an empty graph with n nodes first + H = nx.empty_graph(n, create_using) + # Get the floor of average node degree + d = 2 * m // n + + # Test the parity of n and d + if (n % 2 == 0) or (d % 2 == 0): + # Start with a regular graph of d degrees + offset = d // 2 + for i in range(n): + for j in range(1, offset + 1): + H.add_edge(i, (i - j) % n) + H.add_edge(i, (i + j) % n) + if d & 1: + # in case d is odd; n must be even in this case + half = n // 2 + for i in range(half): + # add edges diagonally + H.add_edge(i, i + half) + # Get the remainder of 2*m modulo n + r = 2 * m % n + if r > 0: + # add remaining edges at offset+1 + for i in range(r // 2): + H.add_edge(i, i + offset + 1) + else: + # Start with a regular graph of (d - 1) degrees + offset = (d - 1) // 2 + for i in range(n): + for j in range(1, offset + 1): + H.add_edge(i, (i - j) % n) + H.add_edge(i, (i + j) % n) + half = n // 2 + for i in range(m - n * offset): + # add the remaining m - n*offset edges between i and i+half + H.add_edge(i, (i + half) % n) + + return H + + +@nx._dispatchable(graphs=None, returns_graph=True) +def hkn_harary_graph(k, n, create_using=None): + """Returns the Harary graph with given node connectivity and node number. + + The Harary graph $H_{k,n}$ is the graph that minimizes the number of + edges needed with given node connectivity $k$ and node number $n$. + + This smallest number of edges is known to be ceil($kn/2$) [1]_. + + Parameters + ---------- + k: integer + The node connectivity of the generated graph + + n: integer + The number of nodes the generated graph is to contain + + create_using : NetworkX graph constructor, optional Graph type + to create (default=nx.Graph). If graph instance, then cleared + before populated. + + Returns + ------- + NetworkX graph + The Harary graph $H_{k,n}$. + + See Also + -------- + hnm_harary_graph + + Notes + ----- + This algorithm runs in $O(kn)$ time. + It is implemented by following the Reference [2]_. + + References + ---------- + .. [1] Weisstein, Eric W. "Harary Graph." From MathWorld--A Wolfram Web + Resource. http://mathworld.wolfram.com/HararyGraph.html. + + .. [2] Harary, F. "The Maximum Connectivity of a Graph." + Proc. Nat. Acad. Sci. USA 48, 1142-1146, 1962. + """ + + if k < 1: + raise NetworkXError("The node connectivity must be >= 1!") + if n < k + 1: + raise NetworkXError("The number of nodes must be >= k+1 !") + + # in case of connectivity 1, simply return the path graph + if k == 1: + H = nx.path_graph(n, create_using) + return H + + # Construct an empty graph with n nodes first + H = nx.empty_graph(n, create_using) + + # Test the parity of k and n + if (k % 2 == 0) or (n % 2 == 0): + # Construct a regular graph with k degrees + offset = k // 2 + for i in range(n): + for j in range(1, offset + 1): + H.add_edge(i, (i - j) % n) + H.add_edge(i, (i + j) % n) + if k & 1: + # odd degree; n must be even in this case + half = n // 2 + for i in range(half): + # add edges diagonally + H.add_edge(i, i + half) + else: + # Construct a regular graph with (k - 1) degrees + offset = (k - 1) // 2 + for i in range(n): + for j in range(1, offset + 1): + H.add_edge(i, (i - j) % n) + H.add_edge(i, (i + j) % n) + half = n // 2 + for i in range(half + 1): + # add half+1 edges between i and i+half + H.add_edge(i, (i + half) % n) + + return H diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/internet_as_graphs.py b/.venv/lib/python3.12/site-packages/networkx/generators/internet_as_graphs.py new file mode 100644 index 0000000..449d543 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/generators/internet_as_graphs.py @@ -0,0 +1,441 @@ +"""Generates graphs resembling the Internet Autonomous System network""" + +import networkx as nx +from networkx.utils import py_random_state + +__all__ = ["random_internet_as_graph"] + + +def uniform_int_from_avg(a, m, seed): + """Pick a random integer with uniform probability. + + Returns a random integer uniformly taken from a distribution with + minimum value 'a' and average value 'm', X~U(a,b), E[X]=m, X in N where + b = 2*m - a. + + Notes + ----- + p = (b-floor(b))/2 + X = X1 + X2; X1~U(a,floor(b)), X2~B(p) + E[X] = E[X1] + E[X2] = (floor(b)+a)/2 + (b-floor(b))/2 = (b+a)/2 = m + """ + + from math import floor + + assert m >= a + b = 2 * m - a + p = (b - floor(b)) / 2 + X1 = round(seed.random() * (floor(b) - a) + a) + if seed.random() < p: + X2 = 1 + else: + X2 = 0 + return X1 + X2 + + +def choose_pref_attach(degs, seed): + """Pick a random value, with a probability given by its weight. + + Returns a random choice among degs keys, each of which has a + probability proportional to the corresponding dictionary value. + + Parameters + ---------- + degs: dictionary + It contains the possible values (keys) and the corresponding + probabilities (values) + seed: random state + + Returns + ------- + v: object + A key of degs or None if degs is empty + """ + + if len(degs) == 0: + return None + s = sum(degs.values()) + if s == 0: + return seed.choice(list(degs.keys())) + v = seed.random() * s + + nodes = list(degs.keys()) + i = 0 + acc = degs[nodes[i]] + while v > acc: + i += 1 + acc += degs[nodes[i]] + return nodes[i] + + +class AS_graph_generator: + """Generates random internet AS graphs.""" + + def __init__(self, n, seed): + """Initializes variables. Immediate numbers are taken from [1]. + + Parameters + ---------- + n: integer + Number of graph nodes + seed: random state + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + GG: AS_graph_generator object + + References + ---------- + [1] A. Elmokashfi, A. Kvalbein and C. Dovrolis, "On the Scalability of + BGP: The Role of Topology Growth," in IEEE Journal on Selected Areas + in Communications, vol. 28, no. 8, pp. 1250-1261, October 2010. + """ + + self.seed = seed + self.n_t = min(n, round(self.seed.random() * 2 + 4)) # num of T nodes + self.n_m = round(0.15 * n) # number of M nodes + self.n_cp = round(0.05 * n) # number of CP nodes + self.n_c = max(0, n - self.n_t - self.n_m - self.n_cp) # number of C nodes + + self.d_m = 2 + (2.5 * n) / 10000 # average multihoming degree for M nodes + self.d_cp = 2 + (1.5 * n) / 10000 # avg multihoming degree for CP nodes + self.d_c = 1 + (5 * n) / 100000 # average multihoming degree for C nodes + + self.p_m_m = 1 + (2 * n) / 10000 # avg num of peer edges between M and M + self.p_cp_m = 0.2 + (2 * n) / 10000 # avg num of peer edges between CP, M + self.p_cp_cp = 0.05 + (2 * n) / 100000 # avg num of peer edges btwn CP, CP + + self.t_m = 0.375 # probability M's provider is T + self.t_cp = 0.375 # probability CP's provider is T + self.t_c = 0.125 # probability C's provider is T + + def t_graph(self): + """Generates the core mesh network of tier one nodes of a AS graph. + + Returns + ------- + G: Networkx Graph + Core network + """ + + self.G = nx.Graph() + for i in range(self.n_t): + self.G.add_node(i, type="T") + for r in self.regions: + self.regions[r].add(i) + for j in self.G.nodes(): + if i != j: + self.add_edge(i, j, "peer") + self.customers[i] = set() + self.providers[i] = set() + return self.G + + def add_edge(self, i, j, kind): + if kind == "transit": + customer = str(i) + else: + customer = "none" + self.G.add_edge(i, j, type=kind, customer=customer) + + def choose_peer_pref_attach(self, node_list): + """Pick a node with a probability weighted by its peer degree. + + Pick a node from node_list with preferential attachment + computed only on their peer degree + """ + + d = {} + for n in node_list: + d[n] = self.G.nodes[n]["peers"] + return choose_pref_attach(d, self.seed) + + def choose_node_pref_attach(self, node_list): + """Pick a node with a probability weighted by its degree. + + Pick a node from node_list with preferential attachment + computed on their degree + """ + + degs = dict(self.G.degree(node_list)) + return choose_pref_attach(degs, self.seed) + + def add_customer(self, i, j): + """Keep the dictionaries 'customers' and 'providers' consistent.""" + + self.customers[j].add(i) + self.providers[i].add(j) + for z in self.providers[j]: + self.customers[z].add(i) + self.providers[i].add(z) + + def add_node(self, i, kind, reg2prob, avg_deg, t_edge_prob): + """Add a node and its customer transit edges to the graph. + + Parameters + ---------- + i: object + Identifier of the new node + kind: string + Type of the new node. Options are: 'M' for middle node, 'CP' for + content provider and 'C' for customer. + reg2prob: float + Probability the new node can be in two different regions. + avg_deg: float + Average number of transit nodes of which node i is customer. + t_edge_prob: float + Probability node i establish a customer transit edge with a tier + one (T) node + + Returns + ------- + i: object + Identifier of the new node + """ + + regs = 1 # regions in which node resides + if self.seed.random() < reg2prob: # node is in two regions + regs = 2 + node_options = set() + + self.G.add_node(i, type=kind, peers=0) + self.customers[i] = set() + self.providers[i] = set() + self.nodes[kind].add(i) + for r in self.seed.sample(list(self.regions), regs): + node_options = node_options.union(self.regions[r]) + self.regions[r].add(i) + + edge_num = uniform_int_from_avg(1, avg_deg, self.seed) + + t_options = node_options.intersection(self.nodes["T"]) + m_options = node_options.intersection(self.nodes["M"]) + if i in m_options: + m_options.remove(i) + d = 0 + while d < edge_num and (len(t_options) > 0 or len(m_options) > 0): + if len(m_options) == 0 or ( + len(t_options) > 0 and self.seed.random() < t_edge_prob + ): # add edge to a T node + j = self.choose_node_pref_attach(t_options) + t_options.remove(j) + else: + j = self.choose_node_pref_attach(m_options) + m_options.remove(j) + self.add_edge(i, j, "transit") + self.add_customer(i, j) + d += 1 + + return i + + def add_m_peering_link(self, m, to_kind): + """Add a peering link between two middle tier (M) nodes. + + Target node j is drawn considering a preferential attachment based on + other M node peering degree. + + Parameters + ---------- + m: object + Node identifier + to_kind: string + type for target node j (must be always M) + + Returns + ------- + success: boolean + """ + + # candidates are of type 'M' and are not customers of m + node_options = self.nodes["M"].difference(self.customers[m]) + # candidates are not providers of m + node_options = node_options.difference(self.providers[m]) + # remove self + if m in node_options: + node_options.remove(m) + + # remove candidates we are already connected to + for j in self.G.neighbors(m): + if j in node_options: + node_options.remove(j) + + if len(node_options) > 0: + j = self.choose_peer_pref_attach(node_options) + self.add_edge(m, j, "peer") + self.G.nodes[m]["peers"] += 1 + self.G.nodes[j]["peers"] += 1 + return True + else: + return False + + def add_cp_peering_link(self, cp, to_kind): + """Add a peering link to a content provider (CP) node. + + Target node j can be CP or M and it is drawn uniformly among the nodes + belonging to the same region as cp. + + Parameters + ---------- + cp: object + Node identifier + to_kind: string + type for target node j (must be M or CP) + + Returns + ------- + success: boolean + """ + + node_options = set() + for r in self.regions: # options include nodes in the same region(s) + if cp in self.regions[r]: + node_options = node_options.union(self.regions[r]) + + # options are restricted to the indicated kind ('M' or 'CP') + node_options = self.nodes[to_kind].intersection(node_options) + + # remove self + if cp in node_options: + node_options.remove(cp) + + # remove nodes that are cp's providers + node_options = node_options.difference(self.providers[cp]) + + # remove nodes we are already connected to + for j in self.G.neighbors(cp): + if j in node_options: + node_options.remove(j) + + if len(node_options) > 0: + j = self.seed.sample(list(node_options), 1)[0] + self.add_edge(cp, j, "peer") + self.G.nodes[cp]["peers"] += 1 + self.G.nodes[j]["peers"] += 1 + return True + else: + return False + + def graph_regions(self, rn): + """Initializes AS network regions. + + Parameters + ---------- + rn: integer + Number of regions + """ + + self.regions = {} + for i in range(rn): + self.regions["REG" + str(i)] = set() + + def add_peering_links(self, from_kind, to_kind): + """Utility function to add peering links among node groups.""" + peer_link_method = None + if from_kind == "M": + peer_link_method = self.add_m_peering_link + m = self.p_m_m + if from_kind == "CP": + peer_link_method = self.add_cp_peering_link + if to_kind == "M": + m = self.p_cp_m + else: + m = self.p_cp_cp + + for i in self.nodes[from_kind]: + num = uniform_int_from_avg(0, m, self.seed) + for _ in range(num): + peer_link_method(i, to_kind) + + def generate(self): + """Generates a random AS network graph as described in [1]. + + Returns + ------- + G: Graph object + + Notes + ----- + The process steps are the following: first we create the core network + of tier one nodes, then we add the middle tier (M), the content + provider (CP) and the customer (C) nodes along with their transit edges + (link i,j means i is customer of j). Finally we add peering links + between M nodes, between M and CP nodes and between CP node couples. + For a detailed description of the algorithm, please refer to [1]. + + References + ---------- + [1] A. Elmokashfi, A. Kvalbein and C. Dovrolis, "On the Scalability of + BGP: The Role of Topology Growth," in IEEE Journal on Selected Areas + in Communications, vol. 28, no. 8, pp. 1250-1261, October 2010. + """ + + self.graph_regions(5) + self.customers = {} + self.providers = {} + self.nodes = {"T": set(), "M": set(), "CP": set(), "C": set()} + + self.t_graph() + self.nodes["T"] = set(self.G.nodes()) + + i = len(self.nodes["T"]) + for _ in range(self.n_m): + self.nodes["M"].add(self.add_node(i, "M", 0.2, self.d_m, self.t_m)) + i += 1 + for _ in range(self.n_cp): + self.nodes["CP"].add(self.add_node(i, "CP", 0.05, self.d_cp, self.t_cp)) + i += 1 + for _ in range(self.n_c): + self.nodes["C"].add(self.add_node(i, "C", 0, self.d_c, self.t_c)) + i += 1 + + self.add_peering_links("M", "M") + self.add_peering_links("CP", "M") + self.add_peering_links("CP", "CP") + + return self.G + + +@py_random_state(1) +@nx._dispatchable(graphs=None, returns_graph=True) +def random_internet_as_graph(n, seed=None): + """Generates a random undirected graph resembling the Internet AS network + + Parameters + ---------- + n: integer in [1000, 10000] + Number of graph nodes + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + G: Networkx Graph object + A randomly generated undirected graph + + Notes + ----- + This algorithm returns an undirected graph resembling the Internet + Autonomous System (AS) network, it uses the approach by Elmokashfi et al. + [1]_ and it grants the properties described in the related paper [1]_. + + Each node models an autonomous system, with an attribute 'type' specifying + its kind; tier-1 (T), mid-level (M), customer (C) or content-provider (CP). + Each edge models an ADV communication link (hence, bidirectional) with + attributes: + + - type: transit|peer, the kind of commercial agreement between nodes; + - customer: , the identifier of the node acting as customer + ('none' if type is peer). + + References + ---------- + .. [1] A. Elmokashfi, A. Kvalbein and C. Dovrolis, "On the Scalability of + BGP: The Role of Topology Growth," in IEEE Journal on Selected Areas + in Communications, vol. 28, no. 8, pp. 1250-1261, October 2010. + """ + + GG = AS_graph_generator(n, seed) + G = GG.generate() + return G diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/intersection.py b/.venv/lib/python3.12/site-packages/networkx/generators/intersection.py new file mode 100644 index 0000000..e63af5b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/generators/intersection.py @@ -0,0 +1,125 @@ +""" +Generators for random intersection graphs. +""" + +import networkx as nx +from networkx.utils import py_random_state + +__all__ = [ + "uniform_random_intersection_graph", + "k_random_intersection_graph", + "general_random_intersection_graph", +] + + +@py_random_state(3) +@nx._dispatchable(graphs=None, returns_graph=True) +def uniform_random_intersection_graph(n, m, p, seed=None): + """Returns a uniform random intersection graph. + + Parameters + ---------- + n : int + The number of nodes in the first bipartite set (nodes) + m : int + The number of nodes in the second bipartite set (attributes) + p : float + Probability of connecting nodes between bipartite sets + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + See Also + -------- + gnp_random_graph + + References + ---------- + .. [1] K.B. Singer-Cohen, Random Intersection Graphs, 1995, + PhD thesis, Johns Hopkins University + .. [2] Fill, J. A., Scheinerman, E. R., and Singer-Cohen, K. B., + Random intersection graphs when m = !(n): + An equivalence theorem relating the evolution of the g(n, m, p) + and g(n, p) models. Random Struct. Algorithms 16, 2 (2000), 156–176. + """ + from networkx.algorithms import bipartite + + G = bipartite.random_graph(n, m, p, seed) + return nx.projected_graph(G, range(n)) + + +@py_random_state(3) +@nx._dispatchable(graphs=None, returns_graph=True) +def k_random_intersection_graph(n, m, k, seed=None): + """Returns a intersection graph with randomly chosen attribute sets for + each node that are of equal size (k). + + Parameters + ---------- + n : int + The number of nodes in the first bipartite set (nodes) + m : int + The number of nodes in the second bipartite set (attributes) + k : float + Size of attribute set to assign to each node. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + See Also + -------- + gnp_random_graph, uniform_random_intersection_graph + + References + ---------- + .. [1] Godehardt, E., and Jaworski, J. + Two models of random intersection graphs and their applications. + Electronic Notes in Discrete Mathematics 10 (2001), 129--132. + """ + G = nx.empty_graph(n + m) + mset = range(n, n + m) + for v in range(n): + targets = seed.sample(mset, k) + G.add_edges_from(zip([v] * len(targets), targets)) + return nx.projected_graph(G, range(n)) + + +@py_random_state(3) +@nx._dispatchable(graphs=None, returns_graph=True) +def general_random_intersection_graph(n, m, p, seed=None): + """Returns a random intersection graph with independent probabilities + for connections between node and attribute sets. + + Parameters + ---------- + n : int + The number of nodes in the first bipartite set (nodes) + m : int + The number of nodes in the second bipartite set (attributes) + p : list of floats of length m + Probabilities for connecting nodes to each attribute + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + See Also + -------- + gnp_random_graph, uniform_random_intersection_graph + + References + ---------- + .. [1] Nikoletseas, S. E., Raptopoulos, C., and Spirakis, P. G. + The existence and efficient construction of large independent sets + in general random intersection graphs. In ICALP (2004), J. D´ıaz, + J. Karhum¨aki, A. Lepist¨o, and D. Sannella, Eds., vol. 3142 + of Lecture Notes in Computer Science, Springer, pp. 1029–1040. + """ + if len(p) != m: + raise ValueError("Probability list p must have m elements.") + G = nx.empty_graph(n + m) + mset = range(n, n + m) + for u in range(n): + for v, q in zip(mset, p): + if seed.random() < q: + G.add_edge(u, v) + return nx.projected_graph(G, range(n)) diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/interval_graph.py b/.venv/lib/python3.12/site-packages/networkx/generators/interval_graph.py new file mode 100644 index 0000000..6a3fda4 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/generators/interval_graph.py @@ -0,0 +1,70 @@ +""" +Generators for interval graph. +""" + +from collections.abc import Sequence + +import networkx as nx + +__all__ = ["interval_graph"] + + +@nx._dispatchable(graphs=None, returns_graph=True) +def interval_graph(intervals): + """Generates an interval graph for a list of intervals given. + + In graph theory, an interval graph is an undirected graph formed from a set + of closed intervals on the real line, with a vertex for each interval + and an edge between vertices whose intervals intersect. + It is the intersection graph of the intervals. + + More information can be found at: + https://en.wikipedia.org/wiki/Interval_graph + + Parameters + ---------- + intervals : a sequence of intervals, say (l, r) where l is the left end, + and r is the right end of the closed interval. + + Returns + ------- + G : networkx graph + + Examples + -------- + >>> intervals = [(-2, 3), [1, 4], (2, 3), (4, 6)] + >>> G = nx.interval_graph(intervals) + >>> sorted(G.edges) + [((-2, 3), (1, 4)), ((-2, 3), (2, 3)), ((1, 4), (2, 3)), ((1, 4), (4, 6))] + + Raises + ------ + :exc:`TypeError` + if `intervals` contains None or an element which is not + collections.abc.Sequence or not a length of 2. + :exc:`ValueError` + if `intervals` contains an interval such that min1 > max1 + where min1,max1 = interval + """ + intervals = list(intervals) + for interval in intervals: + if not (isinstance(interval, Sequence) and len(interval) == 2): + raise TypeError( + "Each interval must have length 2, and be a " + "collections.abc.Sequence such as tuple or list." + ) + if interval[0] > interval[1]: + raise ValueError(f"Interval must have lower value first. Got {interval}") + + graph = nx.Graph() + + tupled_intervals = [tuple(interval) for interval in intervals] + graph.add_nodes_from(tupled_intervals) + + while tupled_intervals: + min1, max1 = interval1 = tupled_intervals.pop() + for interval2 in tupled_intervals: + min2, max2 = interval2 + if max1 >= min2 and max2 >= min1: + graph.add_edge(interval1, interval2) + return graph diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/joint_degree_seq.py b/.venv/lib/python3.12/site-packages/networkx/generators/joint_degree_seq.py new file mode 100644 index 0000000..c426df9 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/generators/joint_degree_seq.py @@ -0,0 +1,664 @@ +"""Generate graphs with a given joint degree and directed joint degree""" + +import networkx as nx +from networkx.utils import py_random_state + +__all__ = [ + "is_valid_joint_degree", + "is_valid_directed_joint_degree", + "joint_degree_graph", + "directed_joint_degree_graph", +] + + +@nx._dispatchable(graphs=None) +def is_valid_joint_degree(joint_degrees): + """Checks whether the given joint degree dictionary is realizable. + + A *joint degree dictionary* is a dictionary of dictionaries, in + which entry ``joint_degrees[k][l]`` is an integer representing the + number of edges joining nodes of degree *k* with nodes of degree + *l*. Such a dictionary is realizable as a simple graph if and only + if the following conditions are satisfied. + + - each entry must be an integer, + - the total number of nodes of degree *k*, computed by + ``sum(joint_degrees[k].values()) / k``, must be an integer, + - the total number of edges joining nodes of degree *k* with + nodes of degree *l* cannot exceed the total number of possible edges, + - each diagonal entry ``joint_degrees[k][k]`` must be even (this is + a convention assumed by the :func:`joint_degree_graph` function). + + + Parameters + ---------- + joint_degrees : dictionary of dictionary of integers + A joint degree dictionary in which entry ``joint_degrees[k][l]`` + is the number of edges joining nodes of degree *k* with nodes of + degree *l*. + + Returns + ------- + bool + Whether the given joint degree dictionary is realizable as a + simple graph. + + References + ---------- + .. [1] M. Gjoka, M. Kurant, A. Markopoulou, "2.5K Graphs: from Sampling + to Generation", IEEE Infocom, 2013. + .. [2] I. Stanton, A. Pinar, "Constructing and sampling graphs with a + prescribed joint degree distribution", Journal of Experimental + Algorithmics, 2012. + """ + + degree_count = {} + for k in joint_degrees: + if k > 0: + k_size = sum(joint_degrees[k].values()) / k + if not k_size.is_integer(): + return False + degree_count[k] = k_size + + for k in joint_degrees: + for l in joint_degrees[k]: + if not float(joint_degrees[k][l]).is_integer(): + return False + + if (k != l) and (joint_degrees[k][l] > degree_count[k] * degree_count[l]): + return False + elif k == l: + if joint_degrees[k][k] > degree_count[k] * (degree_count[k] - 1): + return False + if joint_degrees[k][k] % 2 != 0: + return False + + # if all above conditions have been satisfied then the input + # joint degree is realizable as a simple graph. + return True + + +def _neighbor_switch(G, w, unsat, h_node_residual, avoid_node_id=None): + """Releases one free stub for ``w``, while preserving joint degree in G. + + Parameters + ---------- + G : NetworkX graph + Graph in which the neighbor switch will take place. + w : integer + Node id for which we will execute this neighbor switch. + unsat : set of integers + Set of unsaturated node ids that have the same degree as w. + h_node_residual: dictionary of integers + Keeps track of the remaining stubs for a given node. + avoid_node_id: integer + Node id to avoid when selecting w_prime. + + Notes + ----- + First, it selects *w_prime*, an unsaturated node that has the same degree + as ``w``. Second, it selects *switch_node*, a neighbor node of ``w`` that + is not connected to *w_prime*. Then it executes an edge swap i.e. removes + (``w``,*switch_node*) and adds (*w_prime*,*switch_node*). Gjoka et. al. [1] + prove that such an edge swap is always possible. + + References + ---------- + .. [1] M. Gjoka, B. Tillman, A. Markopoulou, "Construction of Simple + Graphs with a Target Joint Degree Matrix and Beyond", IEEE Infocom, '15 + """ + + if (avoid_node_id is None) or (h_node_residual[avoid_node_id] > 1): + # select unsaturated node w_prime that has the same degree as w + w_prime = next(iter(unsat)) + else: + # assume that the node pair (v,w) has been selected for connection. if + # - neighbor_switch is called for node w, + # - nodes v and w have the same degree, + # - node v=avoid_node_id has only one stub left, + # then prevent v=avoid_node_id from being selected as w_prime. + + iter_var = iter(unsat) + while True: + w_prime = next(iter_var) + if w_prime != avoid_node_id: + break + + # select switch_node, a neighbor of w, that is not connected to w_prime + w_prime_neighbs = G[w_prime] # slightly faster declaring this variable + for v in G[w]: + if (v not in w_prime_neighbs) and (v != w_prime): + switch_node = v + break + + # remove edge (w,switch_node), add edge (w_prime,switch_node) and update + # data structures + G.remove_edge(w, switch_node) + G.add_edge(w_prime, switch_node) + h_node_residual[w] += 1 + h_node_residual[w_prime] -= 1 + if h_node_residual[w_prime] == 0: + unsat.remove(w_prime) + + +@py_random_state(1) +@nx._dispatchable(graphs=None, returns_graph=True) +def joint_degree_graph(joint_degrees, seed=None): + """Generates a random simple graph with the given joint degree dictionary. + + Parameters + ---------- + joint_degrees : dictionary of dictionary of integers + A joint degree dictionary in which entry ``joint_degrees[k][l]`` is the + number of edges joining nodes of degree *k* with nodes of degree *l*. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + G : Graph + A graph with the specified joint degree dictionary. + + Raises + ------ + NetworkXError + If *joint_degrees* dictionary is not realizable. + + Notes + ----- + In each iteration of the "while loop" the algorithm picks two disconnected + nodes *v* and *w*, of degree *k* and *l* correspondingly, for which + ``joint_degrees[k][l]`` has not reached its target yet. It then adds + edge (*v*, *w*) and increases the number of edges in graph G by one. + + The intelligence of the algorithm lies in the fact that it is always + possible to add an edge between such disconnected nodes *v* and *w*, + even if one or both nodes do not have free stubs. That is made possible by + executing a "neighbor switch", an edge rewiring move that releases + a free stub while keeping the joint degree of G the same. + + The algorithm continues for E (number of edges) iterations of + the "while loop", at the which point all entries of the given + ``joint_degrees[k][l]`` have reached their target values and the + construction is complete. + + References + ---------- + .. [1] M. Gjoka, B. Tillman, A. Markopoulou, "Construction of Simple + Graphs with a Target Joint Degree Matrix and Beyond", IEEE Infocom, '15 + + Examples + -------- + >>> joint_degrees = { + ... 1: {4: 1}, + ... 2: {2: 2, 3: 2, 4: 2}, + ... 3: {2: 2, 4: 1}, + ... 4: {1: 1, 2: 2, 3: 1}, + ... } + >>> G = nx.joint_degree_graph(joint_degrees) + >>> + """ + + if not is_valid_joint_degree(joint_degrees): + msg = "Input joint degree dict not realizable as a simple graph" + raise nx.NetworkXError(msg) + + # compute degree count from joint_degrees + degree_count = {k: sum(l.values()) // k for k, l in joint_degrees.items() if k > 0} + + # start with empty N-node graph + N = sum(degree_count.values()) + G = nx.empty_graph(N) + + # for a given degree group, keep the list of all node ids + h_degree_nodelist = {} + + # for a given node, keep track of the remaining stubs + h_node_residual = {} + + # populate h_degree_nodelist and h_node_residual + nodeid = 0 + for degree, num_nodes in degree_count.items(): + h_degree_nodelist[degree] = range(nodeid, nodeid + num_nodes) + for v in h_degree_nodelist[degree]: + h_node_residual[v] = degree + nodeid += int(num_nodes) + + # iterate over every degree pair (k,l) and add the number of edges given + # for each pair + for k in joint_degrees: + for l in joint_degrees[k]: + # n_edges_add is the number of edges to add for the + # degree pair (k,l) + n_edges_add = joint_degrees[k][l] + + if (n_edges_add > 0) and (k >= l): + # number of nodes with degree k and l + k_size = degree_count[k] + l_size = degree_count[l] + + # k_nodes and l_nodes consist of all nodes of degree k and l + k_nodes = h_degree_nodelist[k] + l_nodes = h_degree_nodelist[l] + + # k_unsat and l_unsat consist of nodes of degree k and l that + # are unsaturated (nodes that have at least 1 available stub) + k_unsat = {v for v in k_nodes if h_node_residual[v] > 0} + + if k != l: + l_unsat = {w for w in l_nodes if h_node_residual[w] > 0} + else: + l_unsat = k_unsat + n_edges_add = joint_degrees[k][l] // 2 + + while n_edges_add > 0: + # randomly pick nodes v and w that have degrees k and l + v = k_nodes[seed.randrange(k_size)] + w = l_nodes[seed.randrange(l_size)] + + # if nodes v and w are disconnected then attempt to connect + if not G.has_edge(v, w) and (v != w): + # if node v has no free stubs then do neighbor switch + if h_node_residual[v] == 0: + _neighbor_switch(G, v, k_unsat, h_node_residual) + + # if node w has no free stubs then do neighbor switch + if h_node_residual[w] == 0: + if k != l: + _neighbor_switch(G, w, l_unsat, h_node_residual) + else: + _neighbor_switch( + G, w, l_unsat, h_node_residual, avoid_node_id=v + ) + + # add edge (v, w) and update data structures + G.add_edge(v, w) + h_node_residual[v] -= 1 + h_node_residual[w] -= 1 + n_edges_add -= 1 + + if h_node_residual[v] == 0: + k_unsat.discard(v) + if h_node_residual[w] == 0: + l_unsat.discard(w) + return G + + +@nx._dispatchable(graphs=None) +def is_valid_directed_joint_degree(in_degrees, out_degrees, nkk): + """Checks whether the given directed joint degree input is realizable + + Parameters + ---------- + in_degrees : list of integers + in degree sequence contains the in degrees of nodes. + out_degrees : list of integers + out degree sequence contains the out degrees of nodes. + nkk : dictionary of dictionary of integers + directed joint degree dictionary. for nodes of out degree k (first + level of dict) and nodes of in degree l (second level of dict) + describes the number of edges. + + Returns + ------- + boolean + returns true if given input is realizable, else returns false. + + Notes + ----- + Here is the list of conditions that the inputs (in/out degree sequences, + nkk) need to satisfy for simple directed graph realizability: + + - Condition 0: in_degrees and out_degrees have the same length + - Condition 1: nkk[k][l] is integer for all k,l + - Condition 2: sum(nkk[k])/k = number of nodes with partition id k, is an + integer and matching degree sequence + - Condition 3: number of edges and non-chords between k and l cannot exceed + maximum possible number of edges + + + References + ---------- + [1] B. Tillman, A. Markopoulou, C. T. Butts & M. Gjoka, + "Construction of Directed 2K Graphs". In Proc. of KDD 2017. + """ + V = {} # number of nodes with in/out degree. + forbidden = {} + if len(in_degrees) != len(out_degrees): + return False + + for idx in range(len(in_degrees)): + i = in_degrees[idx] + o = out_degrees[idx] + V[(i, 0)] = V.get((i, 0), 0) + 1 + V[(o, 1)] = V.get((o, 1), 0) + 1 + + forbidden[(o, i)] = forbidden.get((o, i), 0) + 1 + + S = {} # number of edges going from in/out degree nodes. + for k in nkk: + for l in nkk[k]: + val = nkk[k][l] + if not float(val).is_integer(): # condition 1 + return False + + if val > 0: + S[(k, 1)] = S.get((k, 1), 0) + val + S[(l, 0)] = S.get((l, 0), 0) + val + # condition 3 + if val + forbidden.get((k, l), 0) > V[(k, 1)] * V[(l, 0)]: + return False + + return all(S[s] / s[0] == V[s] for s in S) + + +def _directed_neighbor_switch( + G, w, unsat, h_node_residual_out, chords, h_partition_in, partition +): + """Releases one free stub for node w, while preserving joint degree in G. + + Parameters + ---------- + G : networkx directed graph + graph within which the edge swap will take place. + w : integer + node id for which we need to perform a neighbor switch. + unsat: set of integers + set of node ids that have the same degree as w and are unsaturated. + h_node_residual_out: dict of integers + for a given node, keeps track of the remaining stubs to be added. + chords: set of tuples + keeps track of available positions to add edges. + h_partition_in: dict of integers + for a given node, keeps track of its partition id (in degree). + partition: integer + partition id to check if chords have to be updated. + + Notes + ----- + First, it selects node w_prime that (1) has the same degree as w and + (2) is unsaturated. Then, it selects node v, a neighbor of w, that is + not connected to w_prime and does an edge swap i.e. removes (w,v) and + adds (w_prime,v). If neighbor switch is not possible for w using + w_prime and v, then return w_prime; in [1] it's proven that + such unsaturated nodes can be used. + + References + ---------- + [1] B. Tillman, A. Markopoulou, C. T. Butts & M. Gjoka, + "Construction of Directed 2K Graphs". In Proc. of KDD 2017. + """ + w_prime = unsat.pop() + unsat.add(w_prime) + # select node t, a neighbor of w, that is not connected to w_prime + w_neighbs = list(G.successors(w)) + # slightly faster declaring this variable + w_prime_neighbs = list(G.successors(w_prime)) + + for v in w_neighbs: + if (v not in w_prime_neighbs) and w_prime != v: + # removes (w,v), add (w_prime,v) and update data structures + G.remove_edge(w, v) + G.add_edge(w_prime, v) + + if h_partition_in[v] == partition: + chords.add((w, v)) + chords.discard((w_prime, v)) + + h_node_residual_out[w] += 1 + h_node_residual_out[w_prime] -= 1 + if h_node_residual_out[w_prime] == 0: + unsat.remove(w_prime) + return None + + # If neighbor switch didn't work, use unsaturated node + return w_prime + + +def _directed_neighbor_switch_rev( + G, w, unsat, h_node_residual_in, chords, h_partition_out, partition +): + """The reverse of directed_neighbor_switch. + + Parameters + ---------- + G : networkx directed graph + graph within which the edge swap will take place. + w : integer + node id for which we need to perform a neighbor switch. + unsat: set of integers + set of node ids that have the same degree as w and are unsaturated. + h_node_residual_in: dict of integers + for a given node, keeps track of the remaining stubs to be added. + chords: set of tuples + keeps track of available positions to add edges. + h_partition_out: dict of integers + for a given node, keeps track of its partition id (out degree). + partition: integer + partition id to check if chords have to be updated. + + Notes + ----- + Same operation as directed_neighbor_switch except it handles this operation + for incoming edges instead of outgoing. + """ + w_prime = unsat.pop() + unsat.add(w_prime) + # slightly faster declaring these as variables. + w_neighbs = list(G.predecessors(w)) + w_prime_neighbs = list(G.predecessors(w_prime)) + # select node v, a neighbor of w, that is not connected to w_prime. + for v in w_neighbs: + if (v not in w_prime_neighbs) and w_prime != v: + # removes (v,w), add (v,w_prime) and update data structures. + G.remove_edge(v, w) + G.add_edge(v, w_prime) + if h_partition_out[v] == partition: + chords.add((v, w)) + chords.discard((v, w_prime)) + + h_node_residual_in[w] += 1 + h_node_residual_in[w_prime] -= 1 + if h_node_residual_in[w_prime] == 0: + unsat.remove(w_prime) + return None + + # If neighbor switch didn't work, use the unsaturated node. + return w_prime + + +@py_random_state(3) +@nx._dispatchable(graphs=None, returns_graph=True) +def directed_joint_degree_graph(in_degrees, out_degrees, nkk, seed=None): + """Generates a random simple directed graph with the joint degree. + + Parameters + ---------- + degree_seq : list of tuples (of size 3) + degree sequence contains tuples of nodes with node id, in degree and + out degree. + nkk : dictionary of dictionary of integers + directed joint degree dictionary, for nodes of out degree k (first + level of dict) and nodes of in degree l (second level of dict) + describes the number of edges. + seed : hashable object, optional + Seed for random number generator. + + Returns + ------- + G : Graph + A directed graph with the specified inputs. + + Raises + ------ + NetworkXError + If degree_seq and nkk are not realizable as a simple directed graph. + + + Notes + ----- + Similarly to the undirected version: + In each iteration of the "while loop" the algorithm picks two disconnected + nodes v and w, of degree k and l correspondingly, for which nkk[k][l] has + not reached its target yet i.e. (for given k,l): n_edges_add < nkk[k][l]. + It then adds edge (v,w) and always increases the number of edges in graph G + by one. + + The intelligence of the algorithm lies in the fact that it is always + possible to add an edge between disconnected nodes v and w, for which + nkk[degree(v)][degree(w)] has not reached its target, even if one or both + nodes do not have free stubs. If either node v or w does not have a free + stub, we perform a "neighbor switch", an edge rewiring move that releases a + free stub while keeping nkk the same. + + The difference for the directed version lies in the fact that neighbor + switches might not be able to rewire, but in these cases unsaturated nodes + can be reassigned to use instead, see [1] for detailed description and + proofs. + + The algorithm continues for E (number of edges in the graph) iterations of + the "while loop", at which point all entries of the given nkk[k][l] have + reached their target values and the construction is complete. + + References + ---------- + [1] B. Tillman, A. Markopoulou, C. T. Butts & M. Gjoka, + "Construction of Directed 2K Graphs". In Proc. of KDD 2017. + + Examples + -------- + >>> in_degrees = [0, 1, 1, 2] + >>> out_degrees = [1, 1, 1, 1] + >>> nkk = {1: {1: 2, 2: 2}} + >>> G = nx.directed_joint_degree_graph(in_degrees, out_degrees, nkk) + >>> + """ + if not is_valid_directed_joint_degree(in_degrees, out_degrees, nkk): + msg = "Input is not realizable as a simple graph" + raise nx.NetworkXError(msg) + + # start with an empty directed graph. + G = nx.DiGraph() + + # for a given group, keep the list of all node ids. + h_degree_nodelist_in = {} + h_degree_nodelist_out = {} + # for a given group, keep the list of all unsaturated node ids. + h_degree_nodelist_in_unsat = {} + h_degree_nodelist_out_unsat = {} + # for a given node, keep track of the remaining stubs to be added. + h_node_residual_out = {} + h_node_residual_in = {} + # for a given node, keep track of the partition id. + h_partition_out = {} + h_partition_in = {} + # keep track of non-chords between pairs of partition ids. + non_chords = {} + + # populate data structures + for idx, i in enumerate(in_degrees): + idx = int(idx) + if i > 0: + h_degree_nodelist_in.setdefault(i, []) + h_degree_nodelist_in_unsat.setdefault(i, set()) + h_degree_nodelist_in[i].append(idx) + h_degree_nodelist_in_unsat[i].add(idx) + h_node_residual_in[idx] = i + h_partition_in[idx] = i + + for idx, o in enumerate(out_degrees): + o = out_degrees[idx] + non_chords[(o, in_degrees[idx])] = non_chords.get((o, in_degrees[idx]), 0) + 1 + idx = int(idx) + if o > 0: + h_degree_nodelist_out.setdefault(o, []) + h_degree_nodelist_out_unsat.setdefault(o, set()) + h_degree_nodelist_out[o].append(idx) + h_degree_nodelist_out_unsat[o].add(idx) + h_node_residual_out[idx] = o + h_partition_out[idx] = o + + G.add_node(idx) + + nk_in = {} + nk_out = {} + for p in h_degree_nodelist_in: + nk_in[p] = len(h_degree_nodelist_in[p]) + for p in h_degree_nodelist_out: + nk_out[p] = len(h_degree_nodelist_out[p]) + + # iterate over every degree pair (k,l) and add the number of edges given + # for each pair. + for k in nkk: + for l in nkk[k]: + n_edges_add = nkk[k][l] + + if n_edges_add > 0: + # chords contains a random set of potential edges. + chords = set() + + k_len = nk_out[k] + l_len = nk_in[l] + chords_sample = seed.sample( + range(k_len * l_len), n_edges_add + non_chords.get((k, l), 0) + ) + + num = 0 + while len(chords) < n_edges_add: + i = h_degree_nodelist_out[k][chords_sample[num] % k_len] + j = h_degree_nodelist_in[l][chords_sample[num] // k_len] + num += 1 + if i != j: + chords.add((i, j)) + + # k_unsat and l_unsat consist of nodes of in/out degree k and l + # that are unsaturated i.e. those nodes that have at least one + # available stub + k_unsat = h_degree_nodelist_out_unsat[k] + l_unsat = h_degree_nodelist_in_unsat[l] + + while n_edges_add > 0: + v, w = chords.pop() + chords.add((v, w)) + + # if node v has no free stubs then do neighbor switch. + if h_node_residual_out[v] == 0: + _v = _directed_neighbor_switch( + G, + v, + k_unsat, + h_node_residual_out, + chords, + h_partition_in, + l, + ) + if _v is not None: + v = _v + + # if node w has no free stubs then do neighbor switch. + if h_node_residual_in[w] == 0: + _w = _directed_neighbor_switch_rev( + G, + w, + l_unsat, + h_node_residual_in, + chords, + h_partition_out, + k, + ) + if _w is not None: + w = _w + + # add edge (v,w) and update data structures. + G.add_edge(v, w) + h_node_residual_out[v] -= 1 + h_node_residual_in[w] -= 1 + n_edges_add -= 1 + chords.discard((v, w)) + + if h_node_residual_out[v] == 0: + k_unsat.discard(v) + if h_node_residual_in[w] == 0: + l_unsat.discard(w) + return G diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/lattice.py b/.venv/lib/python3.12/site-packages/networkx/generators/lattice.py new file mode 100644 index 0000000..95e520d --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/generators/lattice.py @@ -0,0 +1,367 @@ +"""Functions for generating grid graphs and lattices + +The :func:`grid_2d_graph`, :func:`triangular_lattice_graph`, and +:func:`hexagonal_lattice_graph` functions correspond to the three +`regular tilings of the plane`_, the square, triangular, and hexagonal +tilings, respectively. :func:`grid_graph` and :func:`hypercube_graph` +are similar for arbitrary dimensions. Useful relevant discussion can +be found about `Triangular Tiling`_, and `Square, Hex and Triangle Grids`_ + +.. _regular tilings of the plane: https://en.wikipedia.org/wiki/List_of_regular_polytopes_and_compounds#Euclidean_tilings +.. _Square, Hex and Triangle Grids: http://www-cs-students.stanford.edu/~amitp/game-programming/grids/ +.. _Triangular Tiling: https://en.wikipedia.org/wiki/Triangular_tiling + +""" + +from itertools import repeat +from math import sqrt + +import networkx as nx +from networkx.classes import set_node_attributes +from networkx.exception import NetworkXError +from networkx.generators.classic import cycle_graph, empty_graph, path_graph +from networkx.relabel import relabel_nodes +from networkx.utils import flatten, nodes_or_number, pairwise + +__all__ = [ + "grid_2d_graph", + "grid_graph", + "hypercube_graph", + "triangular_lattice_graph", + "hexagonal_lattice_graph", +] + + +@nx._dispatchable(graphs=None, returns_graph=True) +@nodes_or_number([0, 1]) +def grid_2d_graph(m, n, periodic=False, create_using=None): + """Returns the two-dimensional grid graph. + + The grid graph has each node connected to its four nearest neighbors. + + Parameters + ---------- + m, n : int or iterable container of nodes + If an integer, nodes are from `range(n)`. + If a container, elements become the coordinate of the nodes. + + periodic : bool or iterable + If `periodic` is True, both dimensions are periodic. If False, none + are periodic. If `periodic` is iterable, it should yield 2 bool + values indicating whether the 1st and 2nd axes, respectively, are + periodic. + + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + Returns + ------- + NetworkX graph + The (possibly periodic) grid graph of the specified dimensions. + + """ + G = empty_graph(0, create_using) + row_name, rows = m + col_name, cols = n + G.add_nodes_from((i, j) for i in rows for j in cols) + G.add_edges_from(((i, j), (pi, j)) for pi, i in pairwise(rows) for j in cols) + G.add_edges_from(((i, j), (i, pj)) for i in rows for pj, j in pairwise(cols)) + + try: + periodic_r, periodic_c = periodic + except TypeError: + periodic_r = periodic_c = periodic + + if periodic_r and len(rows) > 2: + first = rows[0] + last = rows[-1] + G.add_edges_from(((first, j), (last, j)) for j in cols) + if periodic_c and len(cols) > 2: + first = cols[0] + last = cols[-1] + G.add_edges_from(((i, first), (i, last)) for i in rows) + # both directions for directed + if G.is_directed(): + G.add_edges_from((v, u) for u, v in G.edges()) + return G + + +@nx._dispatchable(graphs=None, returns_graph=True) +def grid_graph(dim, periodic=False): + """Returns the *n*-dimensional grid graph. + + The dimension *n* is the length of the list `dim` and the size in + each dimension is the value of the corresponding list element. + + Parameters + ---------- + dim : list or tuple of numbers or iterables of nodes + 'dim' is a tuple or list with, for each dimension, either a number + that is the size of that dimension or an iterable of nodes for + that dimension. The dimension of the grid_graph is the length + of `dim`. + + periodic : bool or iterable + If `periodic` is True, all dimensions are periodic. If False all + dimensions are not periodic. If `periodic` is iterable, it should + yield `dim` bool values each of which indicates whether the + corresponding axis is periodic. + + Returns + ------- + NetworkX graph + The (possibly periodic) grid graph of the specified dimensions. + + Examples + -------- + To produce a 2 by 3 by 4 grid graph, a graph on 24 nodes: + + >>> from networkx import grid_graph + >>> G = grid_graph(dim=(2, 3, 4)) + >>> len(G) + 24 + >>> G = grid_graph(dim=(range(7, 9), range(3, 6))) + >>> len(G) + 6 + """ + from networkx.algorithms.operators.product import cartesian_product + + if not dim: + return empty_graph(0) + + try: + func = (cycle_graph if p else path_graph for p in periodic) + except TypeError: + func = repeat(cycle_graph if periodic else path_graph) + + G = next(func)(dim[0]) + for current_dim in dim[1:]: + Gnew = next(func)(current_dim) + G = cartesian_product(Gnew, G) + # graph G is done but has labels of the form (1, (2, (3, 1))) so relabel + H = relabel_nodes(G, flatten) + return H + + +@nx._dispatchable(graphs=None, returns_graph=True) +def hypercube_graph(n): + """Returns the *n*-dimensional hypercube graph. + + The nodes are the integers between 0 and ``2 ** n - 1``, inclusive. + + For more information on the hypercube graph, see the Wikipedia + article `Hypercube graph`_. + + .. _Hypercube graph: https://en.wikipedia.org/wiki/Hypercube_graph + + Parameters + ---------- + n : int + The dimension of the hypercube. + The number of nodes in the graph will be ``2 ** n``. + + Returns + ------- + NetworkX graph + The hypercube graph of dimension *n*. + """ + dim = n * [2] + G = grid_graph(dim) + return G + + +@nx._dispatchable(graphs=None, returns_graph=True) +def triangular_lattice_graph( + m, n, periodic=False, with_positions=True, create_using=None +): + r"""Returns the $m$ by $n$ triangular lattice graph. + + The `triangular lattice graph`_ is a two-dimensional `grid graph`_ in + which each square unit has a diagonal edge (each grid unit has a chord). + + The returned graph has $m$ rows and $n$ columns of triangles. Rows and + columns include both triangles pointing up and down. Rows form a strip + of constant height. Columns form a series of diamond shapes, staggered + with the columns on either side. Another way to state the size is that + the nodes form a grid of `m+1` rows and `(n + 1) // 2` columns. + The odd row nodes are shifted horizontally relative to the even rows. + + Directed graph types have edges pointed up or right. + + Positions of nodes are computed by default or `with_positions is True`. + The position of each node (embedded in a euclidean plane) is stored in + the graph using equilateral triangles with sidelength 1. + The height between rows of nodes is thus $\sqrt(3)/2$. + Nodes lie in the first quadrant with the node $(0, 0)$ at the origin. + + .. _triangular lattice graph: http://mathworld.wolfram.com/TriangularGrid.html + .. _grid graph: http://www-cs-students.stanford.edu/~amitp/game-programming/grids/ + .. _Triangular Tiling: https://en.wikipedia.org/wiki/Triangular_tiling + + Parameters + ---------- + m : int + The number of rows in the lattice. + + n : int + The number of columns in the lattice. + + periodic : bool (default: False) + If True, join the boundary vertices of the grid using periodic + boundary conditions. The join between boundaries is the final row + and column of triangles. This means there is one row and one column + fewer nodes for the periodic lattice. Periodic lattices require + `m >= 3`, `n >= 5` and are allowed but misaligned if `m` or `n` are odd + + with_positions : bool (default: True) + Store the coordinates of each node in the graph node attribute 'pos'. + The coordinates provide a lattice with equilateral triangles. + Periodic positions shift the nodes vertically in a nonlinear way so + the edges don't overlap so much. + + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + Returns + ------- + NetworkX graph + The *m* by *n* triangular lattice graph. + """ + H = empty_graph(0, create_using) + if n == 0 or m == 0: + return H + if periodic: + if n < 5 or m < 3: + msg = f"m > 2 and n > 4 required for periodic. m={m}, n={n}" + raise NetworkXError(msg) + + N = (n + 1) // 2 # number of nodes in row + rows = range(m + 1) + cols = range(N + 1) + # Make grid + H.add_edges_from(((i, j), (i + 1, j)) for j in rows for i in cols[:N]) + H.add_edges_from(((i, j), (i, j + 1)) for j in rows[:m] for i in cols) + # add diagonals + H.add_edges_from(((i, j), (i + 1, j + 1)) for j in rows[1:m:2] for i in cols[:N]) + H.add_edges_from(((i + 1, j), (i, j + 1)) for j in rows[:m:2] for i in cols[:N]) + # identify boundary nodes if periodic + from networkx.algorithms.minors import contracted_nodes + + if periodic is True: + for i in cols: + H = contracted_nodes(H, (i, 0), (i, m)) + for j in rows[:m]: + H = contracted_nodes(H, (0, j), (N, j)) + elif n % 2: + # remove extra nodes + H.remove_nodes_from((N, j) for j in rows[1::2]) + + # Add position node attributes + if with_positions: + ii = (i for i in cols for j in rows) + jj = (j for i in cols for j in rows) + xx = (0.5 * (j % 2) + i for i in cols for j in rows) + h = sqrt(3) / 2 + if periodic: + yy = (h * j + 0.01 * i * i for i in cols for j in rows) + else: + yy = (h * j for i in cols for j in rows) + pos = {(i, j): (x, y) for i, j, x, y in zip(ii, jj, xx, yy) if (i, j) in H} + set_node_attributes(H, pos, "pos") + return H + + +@nx._dispatchable(graphs=None, returns_graph=True) +def hexagonal_lattice_graph( + m, n, periodic=False, with_positions=True, create_using=None +): + """Returns an `m` by `n` hexagonal lattice graph. + + The *hexagonal lattice graph* is a graph whose nodes and edges are + the `hexagonal tiling`_ of the plane. + + The returned graph will have `m` rows and `n` columns of hexagons. + `Odd numbered columns`_ are shifted up relative to even numbered columns. + + Positions of nodes are computed by default or `with_positions is True`. + Node positions creating the standard embedding in the plane + with sidelength 1 and are stored in the node attribute 'pos'. + `pos = nx.get_node_attributes(G, 'pos')` creates a dict ready for drawing. + + .. _hexagonal tiling: https://en.wikipedia.org/wiki/Hexagonal_tiling + .. _Odd numbered columns: http://www-cs-students.stanford.edu/~amitp/game-programming/grids/ + + Parameters + ---------- + m : int + The number of rows of hexagons in the lattice. + + n : int + The number of columns of hexagons in the lattice. + + periodic : bool + Whether to make a periodic grid by joining the boundary vertices. + For this to work `n` must be even and both `n > 1` and `m > 1`. + The periodic connections create another row and column of hexagons + so these graphs have fewer nodes as boundary nodes are identified. + + with_positions : bool (default: True) + Store the coordinates of each node in the graph node attribute 'pos'. + The coordinates provide a lattice with vertical columns of hexagons + offset to interleave and cover the plane. + Periodic positions shift the nodes vertically in a nonlinear way so + the edges don't overlap so much. + + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + If graph is directed, edges will point up or right. + + Returns + ------- + NetworkX graph + The *m* by *n* hexagonal lattice graph. + """ + G = empty_graph(0, create_using) + if m == 0 or n == 0: + return G + if periodic and (n % 2 == 1 or m < 2 or n < 2): + msg = "periodic hexagonal lattice needs m > 1, n > 1 and even n" + raise NetworkXError(msg) + + M = 2 * m # twice as many nodes as hexagons vertically + rows = range(M + 2) + cols = range(n + 1) + # make lattice + col_edges = (((i, j), (i, j + 1)) for i in cols for j in rows[: M + 1]) + row_edges = (((i, j), (i + 1, j)) for i in cols[:n] for j in rows if i % 2 == j % 2) + G.add_edges_from(col_edges) + G.add_edges_from(row_edges) + # Remove corner nodes with one edge + G.remove_node((0, M + 1)) + G.remove_node((n, (M + 1) * (n % 2))) + + # identify boundary nodes if periodic + from networkx.algorithms.minors import contracted_nodes + + if periodic: + for i in cols[:n]: + G = contracted_nodes(G, (i, 0), (i, M)) + for i in cols[1:]: + G = contracted_nodes(G, (i, 1), (i, M + 1)) + for j in rows[1:M]: + G = contracted_nodes(G, (0, j), (n, j)) + G.remove_node((n, M)) + + # calc position in embedded space + ii = (i for i in cols for j in rows) + jj = (j for i in cols for j in rows) + xx = (0.5 + i + i // 2 + (j % 2) * ((i % 2) - 0.5) for i in cols for j in rows) + h = sqrt(3) / 2 + if periodic: + yy = (h * j + 0.01 * i * i for i in cols for j in rows) + else: + yy = (h * j for i in cols for j in rows) + # exclude nodes not in G + pos = {(i, j): (x, y) for i, j, x, y in zip(ii, jj, xx, yy) if (i, j) in G} + set_node_attributes(G, pos, "pos") + return G diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/line.py b/.venv/lib/python3.12/site-packages/networkx/generators/line.py new file mode 100644 index 0000000..2c6274a --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/generators/line.py @@ -0,0 +1,501 @@ +"""Functions for generating line graphs.""" + +from collections import defaultdict +from functools import partial +from itertools import combinations + +import networkx as nx +from networkx.utils import arbitrary_element +from networkx.utils.decorators import not_implemented_for + +__all__ = ["line_graph", "inverse_line_graph"] + + +@nx._dispatchable(returns_graph=True) +def line_graph(G, create_using=None): + r"""Returns the line graph of the graph or digraph `G`. + + The line graph of a graph `G` has a node for each edge in `G` and an + edge joining those nodes if the two edges in `G` share a common node. For + directed graphs, nodes are adjacent exactly when the edges they represent + form a directed path of length two. + + The nodes of the line graph are 2-tuples of nodes in the original graph (or + 3-tuples for multigraphs, with the key of the edge as the third element). + + For information about self-loops and more discussion, see the **Notes** + section below. + + Parameters + ---------- + G : graph + A NetworkX Graph, DiGraph, MultiGraph, or MultiDigraph. + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + Returns + ------- + L : graph + The line graph of G. + + Examples + -------- + >>> G = nx.star_graph(3) + >>> L = nx.line_graph(G) + >>> print(sorted(map(sorted, L.edges()))) # makes a 3-clique, K3 + [[(0, 1), (0, 2)], [(0, 1), (0, 3)], [(0, 2), (0, 3)]] + + Edge attributes from `G` are not copied over as node attributes in `L`, but + attributes can be copied manually: + + >>> G = nx.path_graph(4) + >>> G.add_edges_from((u, v, {"tot": u + v}) for u, v in G.edges) + >>> G.edges(data=True) + EdgeDataView([(0, 1, {'tot': 1}), (1, 2, {'tot': 3}), (2, 3, {'tot': 5})]) + >>> H = nx.line_graph(G) + >>> H.add_nodes_from((node, G.edges[node]) for node in H) + >>> H.nodes(data=True) + NodeDataView({(0, 1): {'tot': 1}, (2, 3): {'tot': 5}, (1, 2): {'tot': 3}}) + + Notes + ----- + Graph, node, and edge data are not propagated to the new graph. For + undirected graphs, the nodes in G must be sortable, otherwise the + constructed line graph may not be correct. + + *Self-loops in undirected graphs* + + For an undirected graph `G` without multiple edges, each edge can be + written as a set `\{u, v\}`. Its line graph `L` has the edges of `G` as + its nodes. If `x` and `y` are two nodes in `L`, then `\{x, y\}` is an edge + in `L` if and only if the intersection of `x` and `y` is nonempty. Thus, + the set of all edges is determined by the set of all pairwise intersections + of edges in `G`. + + Trivially, every edge in G would have a nonzero intersection with itself, + and so every node in `L` should have a self-loop. This is not so + interesting, and the original context of line graphs was with simple + graphs, which had no self-loops or multiple edges. The line graph was also + meant to be a simple graph and thus, self-loops in `L` are not part of the + standard definition of a line graph. In a pairwise intersection matrix, + this is analogous to excluding the diagonal entries from the line graph + definition. + + Self-loops and multiple edges in `G` add nodes to `L` in a natural way, and + do not require any fundamental changes to the definition. It might be + argued that the self-loops we excluded before should now be included. + However, the self-loops are still "trivial" in some sense and thus, are + usually excluded. + + *Self-loops in directed graphs* + + For a directed graph `G` without multiple edges, each edge can be written + as a tuple `(u, v)`. Its line graph `L` has the edges of `G` as its + nodes. If `x` and `y` are two nodes in `L`, then `(x, y)` is an edge in `L` + if and only if the tail of `x` matches the head of `y`, for example, if `x + = (a, b)` and `y = (b, c)` for some vertices `a`, `b`, and `c` in `G`. + + Due to the directed nature of the edges, it is no longer the case that + every edge in `G` should have a self-loop in `L`. Now, the only time + self-loops arise is if a node in `G` itself has a self-loop. So such + self-loops are no longer "trivial" but instead, represent essential + features of the topology of `G`. For this reason, the historical + development of line digraphs is such that self-loops are included. When the + graph `G` has multiple edges, once again only superficial changes are + required to the definition. + + References + ---------- + * Harary, Frank, and Norman, Robert Z., "Some properties of line digraphs", + Rend. Circ. Mat. Palermo, II. Ser. 9 (1960), 161--168. + * Hemminger, R. L.; Beineke, L. W. (1978), "Line graphs and line digraphs", + in Beineke, L. W.; Wilson, R. J., Selected Topics in Graph Theory, + Academic Press Inc., pp. 271--305. + + """ + if G.is_directed(): + L = _lg_directed(G, create_using=create_using) + else: + L = _lg_undirected(G, selfloops=False, create_using=create_using) + return L + + +def _lg_directed(G, create_using=None): + """Returns the line graph L of the (multi)digraph G. + + Edges in G appear as nodes in L, represented as tuples of the form (u,v) + or (u,v,key) if G is a multidigraph. A node in L corresponding to the edge + (u,v) is connected to every node corresponding to an edge (v,w). + + Parameters + ---------- + G : digraph + A directed graph or directed multigraph. + create_using : NetworkX graph constructor, optional + Graph type to create. If graph instance, then cleared before populated. + Default is to use the same graph class as `G`. + + """ + L = nx.empty_graph(0, create_using, default=G.__class__) + + # Create a graph specific edge function. + get_edges = partial(G.edges, keys=True) if G.is_multigraph() else G.edges + + for from_node in get_edges(): + # from_node is: (u,v) or (u,v,key) + L.add_node(from_node) + for to_node in get_edges(from_node[1]): + L.add_edge(from_node, to_node) + + return L + + +def _lg_undirected(G, selfloops=False, create_using=None): + """Returns the line graph L of the (multi)graph G. + + Edges in G appear as nodes in L, represented as sorted tuples of the form + (u,v), or (u,v,key) if G is a multigraph. A node in L corresponding to + the edge {u,v} is connected to every node corresponding to an edge that + involves u or v. + + Parameters + ---------- + G : graph + An undirected graph or multigraph. + selfloops : bool + If `True`, then self-loops are included in the line graph. If `False`, + they are excluded. + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + Notes + ----- + The standard algorithm for line graphs of undirected graphs does not + produce self-loops. + + """ + L = nx.empty_graph(0, create_using, default=G.__class__) + + # Graph specific functions for edges. + get_edges = partial(G.edges, keys=True) if G.is_multigraph() else G.edges + + # Determine if we include self-loops or not. + shift = 0 if selfloops else 1 + + # Introduce numbering of nodes + node_index = {n: i for i, n in enumerate(G)} + + # Lift canonical representation of nodes to edges in line graph + def edge_key_function(edge): + return node_index[edge[0]], node_index[edge[1]] + + edges = set() + for u in G: + # Label nodes as a sorted tuple of nodes in original graph. + # Decide on representation of {u, v} as (u, v) or (v, u) depending on node_index. + # -> This ensures a canonical representation and avoids comparing values of different types. + nodes = [tuple(sorted(x[:2], key=node_index.get)) + x[2:] for x in get_edges(u)] + + if len(nodes) == 1: + # Then the edge will be an isolated node in L. + L.add_node(nodes[0]) + + # Add a clique of `nodes` to graph. To prevent double adding edges, + # especially important for multigraphs, we store the edges in + # canonical form in a set. + for i, a in enumerate(nodes): + edges.update( + [ + tuple(sorted((a, b), key=edge_key_function)) + for b in nodes[i + shift :] + ] + ) + + L.add_edges_from(edges) + return L + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatchable(returns_graph=True) +def inverse_line_graph(G): + """Returns the inverse line graph of graph G. + + If H is a graph, and G is the line graph of H, such that G = L(H). + Then H is the inverse line graph of G. + + Not all graphs are line graphs and these do not have an inverse line graph. + In these cases this function raises a NetworkXError. + + Parameters + ---------- + G : graph + A NetworkX Graph + + Returns + ------- + H : graph + The inverse line graph of G. + + Raises + ------ + NetworkXNotImplemented + If G is directed or a multigraph + + NetworkXError + If G is not a line graph + + Notes + ----- + This is an implementation of the Roussopoulos algorithm[1]_. + + If G consists of multiple components, then the algorithm doesn't work. + You should invert every component separately: + + >>> K5 = nx.complete_graph(5) + >>> P4 = nx.Graph([("a", "b"), ("b", "c"), ("c", "d")]) + >>> G = nx.union(K5, P4) + >>> root_graphs = [] + >>> for comp in nx.connected_components(G): + ... root_graphs.append(nx.inverse_line_graph(G.subgraph(comp))) + >>> len(root_graphs) + 2 + + References + ---------- + .. [1] Roussopoulos, N.D. , "A max {m, n} algorithm for determining the graph H from + its line graph G", Information Processing Letters 2, (1973), 108--112, ISSN 0020-0190, + `DOI link `_ + + """ + if G.number_of_nodes() == 0: + return nx.empty_graph(1) + elif G.number_of_nodes() == 1: + v = arbitrary_element(G) + a = (v, 0) + b = (v, 1) + H = nx.Graph([(a, b)]) + return H + elif G.number_of_nodes() > 1 and G.number_of_edges() == 0: + msg = ( + "inverse_line_graph() doesn't work on an edgeless graph. " + "Please use this function on each component separately." + ) + raise nx.NetworkXError(msg) + + if nx.number_of_selfloops(G) != 0: + msg = ( + "A line graph as generated by NetworkX has no selfloops, so G has no " + "inverse line graph. Please remove the selfloops from G and try again." + ) + raise nx.NetworkXError(msg) + + starting_cell = _select_starting_cell(G) + P = _find_partition(G, starting_cell) + # count how many times each vertex appears in the partition set + P_count = dict.fromkeys(G.nodes, 0) + for p in P: + for u in p: + P_count[u] += 1 + + if max(P_count.values()) > 2: + msg = "G is not a line graph (vertex found in more than two partition cells)" + raise nx.NetworkXError(msg) + W = tuple((u,) for u in P_count if P_count[u] == 1) + H = nx.Graph() + H.add_nodes_from(P) + H.add_nodes_from(W) + for a, b in combinations(H.nodes, 2): + if any(a_bit in b for a_bit in a): + H.add_edge(a, b) + return H + + +def _triangles(G, e): + """Return list of all triangles containing edge e""" + u, v = e + if u not in G: + raise nx.NetworkXError(f"Vertex {u} not in graph") + if v not in G[u]: + raise nx.NetworkXError(f"Edge ({u}, {v}) not in graph") + triangle_list = [] + for x in G[u]: + if x in G[v]: + triangle_list.append((u, v, x)) + return triangle_list + + +def _odd_triangle(G, T): + """Test whether T is an odd triangle in G + + Parameters + ---------- + G : NetworkX Graph + T : 3-tuple of vertices forming triangle in G + + Returns + ------- + True is T is an odd triangle + False otherwise + + Raises + ------ + NetworkXError + T is not a triangle in G + + Notes + ----- + An odd triangle is one in which there exists another vertex in G which is + adjacent to either exactly one or exactly all three of the vertices in the + triangle. + + """ + for u in T: + if u not in G.nodes(): + raise nx.NetworkXError(f"Vertex {u} not in graph") + for e in list(combinations(T, 2)): + if e[0] not in G[e[1]]: + raise nx.NetworkXError(f"Edge ({e[0]}, {e[1]}) not in graph") + + T_nbrs = defaultdict(int) + for t in T: + for v in G[t]: + if v not in T: + T_nbrs[v] += 1 + return any(T_nbrs[v] in [1, 3] for v in T_nbrs) + + +def _find_partition(G, starting_cell): + """Find a partition of the vertices of G into cells of complete graphs + + Parameters + ---------- + G : NetworkX Graph + starting_cell : tuple of vertices in G which form a cell + + Returns + ------- + List of tuples of vertices of G + + Raises + ------ + NetworkXError + If a cell is not a complete subgraph then G is not a line graph + """ + G_partition = G.copy() + P = [starting_cell] # partition set + G_partition.remove_edges_from(list(combinations(starting_cell, 2))) + # keep list of partitioned nodes which might have an edge in G_partition + partitioned_vertices = list(starting_cell) + while G_partition.number_of_edges() > 0: + # there are still edges left and so more cells to be made + u = partitioned_vertices.pop() + deg_u = len(G_partition[u]) + if deg_u != 0: + # if u still has edges then we need to find its other cell + # this other cell must be a complete subgraph or else G is + # not a line graph + new_cell = [u] + list(G_partition[u]) + for u in new_cell: + for v in new_cell: + if (u != v) and (v not in G_partition[u]): + msg = ( + "G is not a line graph " + "(partition cell not a complete subgraph)" + ) + raise nx.NetworkXError(msg) + P.append(tuple(new_cell)) + G_partition.remove_edges_from(list(combinations(new_cell, 2))) + partitioned_vertices += new_cell + return P + + +def _select_starting_cell(G, starting_edge=None): + """Select a cell to initiate _find_partition + + Parameters + ---------- + G : NetworkX Graph + starting_edge: an edge to build the starting cell from + + Returns + ------- + Tuple of vertices in G + + Raises + ------ + NetworkXError + If it is determined that G is not a line graph + + Notes + ----- + If starting edge not specified then pick an arbitrary edge - doesn't + matter which. However, this function may call itself requiring a + specific starting edge. Note that the r, s notation for counting + triangles is the same as in the Roussopoulos paper cited above. + """ + if starting_edge is None: + e = arbitrary_element(G.edges()) + else: + e = starting_edge + if e[0] not in G.nodes(): + raise nx.NetworkXError(f"Vertex {e[0]} not in graph") + if e[1] not in G[e[0]]: + msg = f"starting_edge ({e[0]}, {e[1]}) is not in the Graph" + raise nx.NetworkXError(msg) + e_triangles = _triangles(G, e) + r = len(e_triangles) + if r == 0: + # there are no triangles containing e, so the starting cell is just e + starting_cell = e + elif r == 1: + # there is exactly one triangle, T, containing e. If other 2 edges + # of T belong only to this triangle then T is starting cell + T = e_triangles[0] + a, b, c = T + # ab was original edge so check the other 2 edges + ac_edges = len(_triangles(G, (a, c))) + bc_edges = len(_triangles(G, (b, c))) + if ac_edges == 1: + if bc_edges == 1: + starting_cell = T + else: + return _select_starting_cell(G, starting_edge=(b, c)) + else: + return _select_starting_cell(G, starting_edge=(a, c)) + else: + # r >= 2 so we need to count the number of odd triangles, s + s = 0 + odd_triangles = [] + for T in e_triangles: + if _odd_triangle(G, T): + s += 1 + odd_triangles.append(T) + if r == 2 and s == 0: + # in this case either triangle works, so just use T + starting_cell = T + elif r - 1 <= s <= r: + # check if odd triangles containing e form complete subgraph + triangle_nodes = set() + for T in odd_triangles: + for x in T: + triangle_nodes.add(x) + + for u in triangle_nodes: + for v in triangle_nodes: + if u != v and (v not in G[u]): + msg = ( + "G is not a line graph (odd triangles " + "do not form complete subgraph)" + ) + raise nx.NetworkXError(msg) + # otherwise then we can use this as the starting cell + starting_cell = tuple(triangle_nodes) + + else: + msg = ( + "G is not a line graph (incorrect number of " + "odd triangles around starting edge)" + ) + raise nx.NetworkXError(msg) + return starting_cell diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/mycielski.py b/.venv/lib/python3.12/site-packages/networkx/generators/mycielski.py new file mode 100644 index 0000000..804b903 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/generators/mycielski.py @@ -0,0 +1,110 @@ +"""Functions related to the Mycielski Operation and the Mycielskian family +of graphs. + +""" + +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = ["mycielskian", "mycielski_graph"] + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatchable(returns_graph=True) +def mycielskian(G, iterations=1): + r"""Returns the Mycielskian of a simple, undirected graph G + + The Mycielskian of graph preserves a graph's triangle free + property while increasing the chromatic number by 1. + + The Mycielski Operation on a graph, :math:`G=(V, E)`, constructs a new + graph with :math:`2|V| + 1` nodes and :math:`3|E| + |V|` edges. + + The construction is as follows: + + Let :math:`V = {0, ..., n-1}`. Construct another vertex set + :math:`U = {n, ..., 2n}` and a vertex, `w`. + Construct a new graph, `M`, with vertices :math:`U \bigcup V \bigcup w`. + For edges, :math:`(u, v) \in E` add edges :math:`(u, v), (u, v + n)`, and + :math:`(u + n, v)` to M. Finally, for all vertices :math:`u \in U`, add + edge :math:`(u, w)` to M. + + The Mycielski Operation can be done multiple times by repeating the above + process iteratively. + + More information can be found at https://en.wikipedia.org/wiki/Mycielskian + + Parameters + ---------- + G : graph + A simple, undirected NetworkX graph + iterations : int + The number of iterations of the Mycielski operation to + perform on G. Defaults to 1. Must be a non-negative integer. + + Returns + ------- + M : graph + The Mycielskian of G after the specified number of iterations. + + Notes + ----- + Graph, node, and edge data are not necessarily propagated to the new graph. + + """ + + M = nx.convert_node_labels_to_integers(G) + + for i in range(iterations): + n = M.number_of_nodes() + M.add_nodes_from(range(n, 2 * n)) + old_edges = list(M.edges()) + M.add_edges_from((u, v + n) for u, v in old_edges) + M.add_edges_from((u + n, v) for u, v in old_edges) + M.add_node(2 * n) + M.add_edges_from((u + n, 2 * n) for u in range(n)) + + return M + + +@nx._dispatchable(graphs=None, returns_graph=True) +def mycielski_graph(n): + """Generator for the n_th Mycielski Graph. + + The Mycielski family of graphs is an infinite set of graphs. + :math:`M_1` is the singleton graph, :math:`M_2` is two vertices with an + edge, and, for :math:`i > 2`, :math:`M_i` is the Mycielskian of + :math:`M_{i-1}`. + + More information can be found at + http://mathworld.wolfram.com/MycielskiGraph.html + + Parameters + ---------- + n : int + The desired Mycielski Graph. + + Returns + ------- + M : graph + The n_th Mycielski Graph + + Notes + ----- + The first graph in the Mycielski sequence is the singleton graph. + The Mycielskian of this graph is not the :math:`P_2` graph, but rather the + :math:`P_2` graph with an extra, isolated vertex. The second Mycielski + graph is the :math:`P_2` graph, so the first two are hard coded. + The remaining graphs are generated using the Mycielski operation. + + """ + + if n < 1: + raise nx.NetworkXError("must satisfy n >= 1") + + if n == 1: + return nx.empty_graph(1) + + else: + return mycielskian(nx.path_graph(2), n - 2) diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/nonisomorphic_trees.py b/.venv/lib/python3.12/site-packages/networkx/generators/nonisomorphic_trees.py new file mode 100644 index 0000000..2f19f93 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/generators/nonisomorphic_trees.py @@ -0,0 +1,209 @@ +""" +Implementation of the Wright, Richmond, Odlyzko and McKay (WROM) +algorithm for the enumeration of all non-isomorphic free trees of a +given order. Rooted trees are represented by level sequences, i.e., +lists in which the i-th element specifies the distance of vertex i to +the root. + +""" + +__all__ = ["nonisomorphic_trees", "number_of_nonisomorphic_trees"] + +from functools import lru_cache + +import networkx as nx + + +@nx._dispatchable(graphs=None, returns_graph=True) +def nonisomorphic_trees(order): + """Generates lists of nonisomorphic trees + + Parameters + ---------- + order : int + order of the desired tree(s) + + Yields + ------ + list of `networkx.Graph` instances + A list of nonisomorphic trees + """ + + if order < 2: + raise ValueError + # start at the path graph rooted at its center + layout = list(range(order // 2 + 1)) + list(range(1, (order + 1) // 2)) + + while layout is not None: + layout = _next_tree(layout) + if layout is not None: + yield _layout_to_graph(layout) + layout = _next_rooted_tree(layout) + + +@nx._dispatchable(graphs=None) +def number_of_nonisomorphic_trees(order): + """Returns the number of nonisomorphic trees + + Based on an algorithm by Alois P. Heinz in + `OEIS entry A000055 `_. Complexity is ``O(n ** 3)`` + + Parameters + ---------- + order : int + order of the desired tree(s) + + Returns + ------- + int + Number of nonisomorphic graphs for the given order + """ + if order < 2: + raise ValueError + return _unlabeled_trees(order) + + +@lru_cache(None) +def _unlabeled_trees(n): + """Implements OEIS A000055 (number of unlabeled trees).""" + + value = 0 + for k in range(n + 1): + value += _rooted_trees(k) * _rooted_trees(n - k) + if n % 2 == 0: + value -= _rooted_trees(n // 2) + return _rooted_trees(n) - value // 2 + + +@lru_cache(None) +def _rooted_trees(n): + """Implements OEIS A000081 (number of unlabeled rooted trees).""" + + if n < 2: + return n + value = 0 + for j in range(1, n): + for d in range(1, n): + if j % d == 0: + value += d * _rooted_trees(d) * _rooted_trees(n - j) + return value // (n - 1) + + +def _next_rooted_tree(predecessor, p=None): + """One iteration of the Beyer-Hedetniemi algorithm.""" + + if p is None: + p = len(predecessor) - 1 + while predecessor[p] == 1: + p -= 1 + if p == 0: + return None + + q = p - 1 + while predecessor[q] != predecessor[p] - 1: + q -= 1 + result = list(predecessor) + for i in range(p, len(result)): + result[i] = result[i - p + q] + return result + + +def _next_tree(candidate): + """One iteration of the Wright, Richmond, Odlyzko and McKay + algorithm.""" + + # valid representation of a free tree if: + # there are at least two vertices at layer 1 + # (this is always the case because we start at the path graph) + left, rest = _split_tree(candidate) + + # and the left subtree of the root + # is less high than the tree with the left subtree removed + left_height = max(left) + rest_height = max(rest) + valid = rest_height >= left_height + + if valid and rest_height == left_height: + # and, if left and rest are of the same height, + # if left does not encompass more vertices + if len(left) > len(rest): + valid = False + # and, if they have the same number or vertices, + # if left does not come after rest lexicographically + elif len(left) == len(rest) and left > rest: + valid = False + + if valid: + return candidate + else: + # jump to the next valid free tree + p = len(left) + new_candidate = _next_rooted_tree(candidate, p) + if candidate[p] > 2: + new_left, new_rest = _split_tree(new_candidate) + new_left_height = max(new_left) + suffix = range(1, new_left_height + 2) + new_candidate[-len(suffix) :] = suffix + return new_candidate + + +def _split_tree(layout): + """Returns a tuple of two layouts, one containing the left + subtree of the root vertex, and one containing the original tree + with the left subtree removed.""" + + one_found = False + m = None + for i in range(len(layout)): + if layout[i] == 1: + if one_found: + m = i + break + else: + one_found = True + + if m is None: + m = len(layout) + + left = [layout[i] - 1 for i in range(1, m)] + rest = [0] + [layout[i] for i in range(m, len(layout))] + return (left, rest) + + +def _layout_to_matrix(layout): + """Create the adjacency matrix for the tree specified by the + given layout (level sequence).""" + + result = [[0] * len(layout) for i in range(len(layout))] + stack = [] + for i in range(len(layout)): + i_level = layout[i] + if stack: + j = stack[-1] + j_level = layout[j] + while j_level >= i_level: + stack.pop() + j = stack[-1] + j_level = layout[j] + result[i][j] = result[j][i] = 1 + stack.append(i) + return result + + +def _layout_to_graph(layout): + """Create a NetworkX Graph for the tree specified by the + given layout(level sequence)""" + G = nx.Graph() + stack = [] + for i in range(len(layout)): + i_level = layout[i] + if stack: + j = stack[-1] + j_level = layout[j] + while j_level >= i_level: + stack.pop() + j = stack[-1] + j_level = layout[j] + G.add_edge(i, j) + stack.append(i) + return G diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/random_clustered.py b/.venv/lib/python3.12/site-packages/networkx/generators/random_clustered.py new file mode 100644 index 0000000..8fbf855 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/generators/random_clustered.py @@ -0,0 +1,117 @@ +"""Generate graphs with given degree and triangle sequence.""" + +import networkx as nx +from networkx.utils import py_random_state + +__all__ = ["random_clustered_graph"] + + +@py_random_state(2) +@nx._dispatchable(graphs=None, returns_graph=True) +def random_clustered_graph(joint_degree_sequence, create_using=None, seed=None): + r"""Generate a random graph with the given joint independent edge degree and + triangle degree sequence. + + This uses a configuration model-like approach to generate a random graph + (with parallel edges and self-loops) by randomly assigning edges to match + the given joint degree sequence. + + The joint degree sequence is a list of pairs of integers of the form + $[(d_{1,i}, d_{1,t}), \dotsc, (d_{n,i}, d_{n,t})]$. According to this list, + vertex $u$ is a member of $d_{u,t}$ triangles and has $d_{u, i}$ other + edges. The number $d_{u,t}$ is the *triangle degree* of $u$ and the number + $d_{u,i}$ is the *independent edge degree*. + + Parameters + ---------- + joint_degree_sequence : list of integer pairs + Each list entry corresponds to the independent edge degree and + triangle degree of a node. + create_using : NetworkX graph constructor, optional (default MultiGraph) + Graph type to create. If graph instance, then cleared before populated. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + G : MultiGraph + A graph with the specified degree sequence. Nodes are labeled + starting at 0 with an index corresponding to the position in + deg_sequence. + + Raises + ------ + NetworkXError + If the independent edge degree sequence sum is not even + or the triangle degree sequence sum is not divisible by 3. + + Notes + ----- + As described by Miller [1]_ (see also Newman [2]_ for an equivalent + description). + + A non-graphical degree sequence (not realizable by some simple + graph) is allowed since this function returns graphs with self + loops and parallel edges. An exception is raised if the + independent degree sequence does not have an even sum or the + triangle degree sequence sum is not divisible by 3. + + This configuration model-like construction process can lead to + duplicate edges and loops. You can remove the self-loops and + parallel edges (see below) which will likely result in a graph + that doesn't have the exact degree sequence specified. This + "finite-size effect" decreases as the size of the graph increases. + + References + ---------- + .. [1] Joel C. Miller. "Percolation and epidemics in random clustered + networks". In: Physical review. E, Statistical, nonlinear, and soft + matter physics 80 (2 Part 1 August 2009). + .. [2] M. E. J. Newman. "Random Graphs with Clustering". + In: Physical Review Letters 103 (5 July 2009) + + Examples + -------- + >>> deg = [(1, 0), (1, 0), (1, 0), (2, 0), (1, 0), (2, 1), (0, 1), (0, 1)] + >>> G = nx.random_clustered_graph(deg) + + To remove parallel edges: + + >>> G = nx.Graph(G) + + To remove self loops: + + >>> G.remove_edges_from(nx.selfloop_edges(G)) + + """ + # In Python 3, zip() returns an iterator. Make this into a list. + joint_degree_sequence = list(joint_degree_sequence) + + N = len(joint_degree_sequence) + G = nx.empty_graph(N, create_using, default=nx.MultiGraph) + if G.is_directed(): + raise nx.NetworkXError("Directed Graph not supported") + + ilist = [] + tlist = [] + for n in G: + degrees = joint_degree_sequence[n] + for icount in range(degrees[0]): + ilist.append(n) + for tcount in range(degrees[1]): + tlist.append(n) + + if len(ilist) % 2 != 0 or len(tlist) % 3 != 0: + raise nx.NetworkXError("Invalid degree sequence") + + seed.shuffle(ilist) + seed.shuffle(tlist) + while ilist: + G.add_edge(ilist.pop(), ilist.pop()) + while tlist: + n1 = tlist.pop() + n2 = tlist.pop() + n3 = tlist.pop() + G.add_edges_from([(n1, n2), (n1, n3), (n2, n3)]) + return G diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/random_graphs.py b/.venv/lib/python3.12/site-packages/networkx/generators/random_graphs.py new file mode 100644 index 0000000..90ae0d9 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/generators/random_graphs.py @@ -0,0 +1,1400 @@ +""" +Generators for random graphs. + +""" + +import itertools +import math +from collections import defaultdict + +import networkx as nx +from networkx.utils import py_random_state + +from ..utils.misc import check_create_using +from .classic import complete_graph, empty_graph, path_graph, star_graph +from .degree_seq import degree_sequence_tree + +__all__ = [ + "fast_gnp_random_graph", + "gnp_random_graph", + "dense_gnm_random_graph", + "gnm_random_graph", + "erdos_renyi_graph", + "binomial_graph", + "newman_watts_strogatz_graph", + "watts_strogatz_graph", + "connected_watts_strogatz_graph", + "random_regular_graph", + "barabasi_albert_graph", + "dual_barabasi_albert_graph", + "extended_barabasi_albert_graph", + "powerlaw_cluster_graph", + "random_lobster", + "random_shell_graph", + "random_powerlaw_tree", + "random_powerlaw_tree_sequence", + "random_kernel_graph", +] + + +@py_random_state(2) +@nx._dispatchable(graphs=None, returns_graph=True) +def fast_gnp_random_graph(n, p, seed=None, directed=False, *, create_using=None): + """Returns a $G_{n,p}$ random graph, also known as an Erdős-Rényi graph or + a binomial graph. + + Parameters + ---------- + n : int + The number of nodes. + p : float + Probability for edge creation. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + directed : bool, optional (default=False) + If True, this function returns a directed graph. + create_using : Graph constructor, optional (default=nx.Graph or nx.DiGraph) + Graph type to create. If graph instance, then cleared before populated. + Multigraph types are not supported and raise a ``NetworkXError``. + By default NetworkX Graph or DiGraph are used depending on `directed`. + + Notes + ----- + The $G_{n,p}$ graph algorithm chooses each of the $[n (n - 1)] / 2$ + (undirected) or $n (n - 1)$ (directed) possible edges with probability $p$. + + This algorithm [1]_ runs in $O(n + m)$ time, where `m` is the expected number of + edges, which equals $p n (n - 1) / 2$. This should be faster than + :func:`gnp_random_graph` when $p$ is small and the expected number of edges + is small (that is, the graph is sparse). + + See Also + -------- + gnp_random_graph + + References + ---------- + .. [1] Vladimir Batagelj and Ulrik Brandes, + "Efficient generation of large random networks", + Phys. Rev. E, 71, 036113, 2005. + """ + default = nx.DiGraph if directed else nx.Graph + create_using = check_create_using( + create_using, directed=directed, multigraph=False, default=default + ) + if p <= 0 or p >= 1: + return nx.gnp_random_graph( + n, p, seed=seed, directed=directed, create_using=create_using + ) + + G = empty_graph(n, create_using=create_using) + + lp = math.log(1.0 - p) + + if directed: + v = 1 + w = -1 + while v < n: + lr = math.log(1.0 - seed.random()) + w = w + 1 + int(lr / lp) + while w >= v and v < n: + w = w - v + v = v + 1 + if v < n: + G.add_edge(w, v) + + # Nodes in graph are from 0,n-1 (start with v as the second node index). + v = 1 + w = -1 + while v < n: + lr = math.log(1.0 - seed.random()) + w = w + 1 + int(lr / lp) + while w >= v and v < n: + w = w - v + v = v + 1 + if v < n: + G.add_edge(v, w) + return G + + +@py_random_state(2) +@nx._dispatchable(graphs=None, returns_graph=True) +def gnp_random_graph(n, p, seed=None, directed=False, *, create_using=None): + """Returns a $G_{n,p}$ random graph, also known as an Erdős-Rényi graph + or a binomial graph. + + The $G_{n,p}$ model chooses each of the possible edges with probability $p$. + + Parameters + ---------- + n : int + The number of nodes. + p : float + Probability for edge creation. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + directed : bool, optional (default=False) + If True, this function returns a directed graph. + create_using : Graph constructor, optional (default=nx.Graph or nx.DiGraph) + Graph type to create. If graph instance, then cleared before populated. + Multigraph types are not supported and raise a ``NetworkXError``. + By default NetworkX Graph or DiGraph are used depending on `directed`. + + See Also + -------- + fast_gnp_random_graph + + Notes + ----- + This algorithm [2]_ runs in $O(n^2)$ time. For sparse graphs (that is, for + small values of $p$), :func:`fast_gnp_random_graph` is a faster algorithm. + + :func:`binomial_graph` and :func:`erdos_renyi_graph` are + aliases for :func:`gnp_random_graph`. + + >>> nx.binomial_graph is nx.gnp_random_graph + True + >>> nx.erdos_renyi_graph is nx.gnp_random_graph + True + + References + ---------- + .. [1] P. Erdős and A. Rényi, On Random Graphs, Publ. Math. 6, 290 (1959). + .. [2] E. N. Gilbert, Random Graphs, Ann. Math. Stat., 30, 1141 (1959). + """ + default = nx.DiGraph if directed else nx.Graph + create_using = check_create_using( + create_using, directed=directed, multigraph=False, default=default + ) + if p >= 1: + return complete_graph(n, create_using=create_using) + + G = nx.empty_graph(n, create_using=create_using) + if p <= 0: + return G + + edgetool = itertools.permutations if directed else itertools.combinations + for e in edgetool(range(n), 2): + if seed.random() < p: + G.add_edge(*e) + return G + + +# add some aliases to common names +binomial_graph = gnp_random_graph +erdos_renyi_graph = gnp_random_graph + + +@py_random_state(2) +@nx._dispatchable(graphs=None, returns_graph=True) +def dense_gnm_random_graph(n, m, seed=None, *, create_using=None): + """Returns a $G_{n,m}$ random graph. + + In the $G_{n,m}$ model, a graph is chosen uniformly at random from the set + of all graphs with $n$ nodes and $m$ edges. + + This algorithm should be faster than :func:`gnm_random_graph` for dense + graphs. + + Parameters + ---------- + n : int + The number of nodes. + m : int + The number of edges. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + create_using : Graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + Multigraph and directed types are not supported and raise a ``NetworkXError``. + + See Also + -------- + gnm_random_graph + + Notes + ----- + Algorithm by Keith M. Briggs Mar 31, 2006. + Inspired by Knuth's Algorithm S (Selection sampling technique), + in section 3.4.2 of [1]_. + + References + ---------- + .. [1] Donald E. Knuth, The Art of Computer Programming, + Volume 2/Seminumerical algorithms, Third Edition, Addison-Wesley, 1997. + """ + create_using = check_create_using(create_using, directed=False, multigraph=False) + mmax = n * (n - 1) // 2 + if m >= mmax: + return complete_graph(n, create_using) + G = empty_graph(n, create_using) + + if n == 1: + return G + + u = 0 + v = 1 + t = 0 + k = 0 + while True: + if seed.randrange(mmax - t) < m - k: + G.add_edge(u, v) + k += 1 + if k == m: + return G + t += 1 + v += 1 + if v == n: # go to next row of adjacency matrix + u += 1 + v = u + 1 + + +@py_random_state(2) +@nx._dispatchable(graphs=None, returns_graph=True) +def gnm_random_graph(n, m, seed=None, directed=False, *, create_using=None): + """Returns a $G_{n,m}$ random graph. + + In the $G_{n,m}$ model, a graph is chosen uniformly at random from the set + of all graphs with $n$ nodes and $m$ edges. + + This algorithm should be faster than :func:`dense_gnm_random_graph` for + sparse graphs. + + Parameters + ---------- + n : int + The number of nodes. + m : int + The number of edges. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + directed : bool, optional (default=False) + If True return a directed graph + create_using : Graph constructor, optional (default=nx.Graph or nx.DiGraph) + Graph type to create. If graph instance, then cleared before populated. + Multigraph types are not supported and raise a ``NetworkXError``. + By default NetworkX Graph or DiGraph are used depending on `directed`. + + See also + -------- + dense_gnm_random_graph + + """ + default = nx.DiGraph if directed else nx.Graph + create_using = check_create_using( + create_using, directed=directed, multigraph=False, default=default + ) + if n == 1: + return nx.empty_graph(n, create_using=create_using) + max_edges = n * (n - 1) if directed else n * (n - 1) / 2.0 + if m >= max_edges: + return complete_graph(n, create_using=create_using) + + G = nx.empty_graph(n, create_using=create_using) + nlist = list(G) + edge_count = 0 + while edge_count < m: + # generate random edge,u,v + u = seed.choice(nlist) + v = seed.choice(nlist) + if u == v or G.has_edge(u, v): + continue + else: + G.add_edge(u, v) + edge_count = edge_count + 1 + return G + + +@py_random_state(3) +@nx._dispatchable(graphs=None, returns_graph=True) +def newman_watts_strogatz_graph(n, k, p, seed=None, *, create_using=None): + """Returns a Newman–Watts–Strogatz small-world graph. + + Parameters + ---------- + n : int + The number of nodes. + k : int + Each node is joined with its `k` nearest neighbors in a ring + topology. + p : float + The probability of adding a new edge for each edge. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + create_using : Graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + Multigraph and directed types are not supported and raise a ``NetworkXError``. + + Notes + ----- + First create a ring over $n$ nodes [1]_. Then each node in the ring is + connected with its $k$ nearest neighbors (or $k - 1$ neighbors if $k$ + is odd). Then shortcuts are created by adding new edges as follows: for + each edge $(u, v)$ in the underlying "$n$-ring with $k$ nearest + neighbors" with probability $p$ add a new edge $(u, w)$ with + randomly-chosen existing node $w$. In contrast with + :func:`watts_strogatz_graph`, no edges are removed. + + See Also + -------- + watts_strogatz_graph + + References + ---------- + .. [1] M. E. J. Newman and D. J. Watts, + Renormalization group analysis of the small-world network model, + Physics Letters A, 263, 341, 1999. + https://doi.org/10.1016/S0375-9601(99)00757-4 + """ + create_using = check_create_using(create_using, directed=False, multigraph=False) + if k > n: + raise nx.NetworkXError("k>=n, choose smaller k or larger n") + + # If k == n the graph return is a complete graph + if k == n: + return nx.complete_graph(n, create_using) + + G = empty_graph(n, create_using) + nlist = list(G.nodes()) + fromv = nlist + # connect the k/2 neighbors + for j in range(1, k // 2 + 1): + tov = fromv[j:] + fromv[0:j] # the first j are now last + for i in range(len(fromv)): + G.add_edge(fromv[i], tov[i]) + # for each edge u-v, with probability p, randomly select existing + # node w and add new edge u-w + e = list(G.edges()) + for u, v in e: + if seed.random() < p: + w = seed.choice(nlist) + # no self-loops and reject if edge u-w exists + # is that the correct NWS model? + while w == u or G.has_edge(u, w): + w = seed.choice(nlist) + if G.degree(u) >= n - 1: + break # skip this rewiring + else: + G.add_edge(u, w) + return G + + +@py_random_state(3) +@nx._dispatchable(graphs=None, returns_graph=True) +def watts_strogatz_graph(n, k, p, seed=None, *, create_using=None): + """Returns a Watts–Strogatz small-world graph. + + Parameters + ---------- + n : int + The number of nodes + k : int + Each node is joined with its `k` nearest neighbors in a ring + topology. + p : float + The probability of rewiring each edge + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + create_using : Graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + Multigraph and directed types are not supported and raise a ``NetworkXError``. + + See Also + -------- + newman_watts_strogatz_graph + connected_watts_strogatz_graph + + Notes + ----- + First create a ring over $n$ nodes [1]_. Then each node in the ring is joined + to its $k$ nearest neighbors (or $k - 1$ neighbors if $k$ is odd). + Then shortcuts are created by replacing some edges as follows: for each + edge $(u, v)$ in the underlying "$n$-ring with $k$ nearest neighbors" + with probability $p$ replace it with a new edge $(u, w)$ with uniformly + random choice of existing node $w$. + + In contrast with :func:`newman_watts_strogatz_graph`, the random rewiring + does not increase the number of edges. The rewired graph is not guaranteed + to be connected as in :func:`connected_watts_strogatz_graph`. + + References + ---------- + .. [1] Duncan J. Watts and Steven H. Strogatz, + Collective dynamics of small-world networks, + Nature, 393, pp. 440--442, 1998. + """ + create_using = check_create_using(create_using, directed=False, multigraph=False) + if k > n: + raise nx.NetworkXError("k>n, choose smaller k or larger n") + + # If k == n, the graph is complete not Watts-Strogatz + if k == n: + G = nx.complete_graph(n, create_using) + return G + + G = nx.empty_graph(n, create_using=create_using) + nodes = list(range(n)) # nodes are labeled 0 to n-1 + # connect each node to k/2 neighbors + for j in range(1, k // 2 + 1): + targets = nodes[j:] + nodes[0:j] # first j nodes are now last in list + G.add_edges_from(zip(nodes, targets)) + # rewire edges from each node + # loop over all nodes in order (label) and neighbors in order (distance) + # no self loops or multiple edges allowed + for j in range(1, k // 2 + 1): # outer loop is neighbors + targets = nodes[j:] + nodes[0:j] # first j nodes are now last in list + # inner loop in node order + for u, v in zip(nodes, targets): + if seed.random() < p: + w = seed.choice(nodes) + # Enforce no self-loops or multiple edges + while w == u or G.has_edge(u, w): + w = seed.choice(nodes) + if G.degree(u) >= n - 1: + break # skip this rewiring + else: + G.remove_edge(u, v) + G.add_edge(u, w) + return G + + +@py_random_state(4) +@nx._dispatchable(graphs=None, returns_graph=True) +def connected_watts_strogatz_graph(n, k, p, tries=100, seed=None, *, create_using=None): + """Returns a connected Watts–Strogatz small-world graph. + + Attempts to generate a connected graph by repeated generation of + Watts–Strogatz small-world graphs. An exception is raised if the maximum + number of tries is exceeded. + + Parameters + ---------- + n : int + The number of nodes + k : int + Each node is joined with its `k` nearest neighbors in a ring + topology. + p : float + The probability of rewiring each edge + tries : int + Number of attempts to generate a connected graph. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + create_using : Graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + Multigraph and directed types are not supported and raise a ``NetworkXError``. + + Notes + ----- + First create a ring over $n$ nodes [1]_. Then each node in the ring is joined + to its $k$ nearest neighbors (or $k - 1$ neighbors if $k$ is odd). + Then shortcuts are created by replacing some edges as follows: for each + edge $(u, v)$ in the underlying "$n$-ring with $k$ nearest neighbors" + with probability $p$ replace it with a new edge $(u, w)$ with uniformly + random choice of existing node $w$. + The entire process is repeated until a connected graph results. + + See Also + -------- + newman_watts_strogatz_graph + watts_strogatz_graph + + References + ---------- + .. [1] Duncan J. Watts and Steven H. Strogatz, + Collective dynamics of small-world networks, + Nature, 393, pp. 440--442, 1998. + """ + for i in range(tries): + # seed is an RNG so should change sequence each call + G = watts_strogatz_graph(n, k, p, seed, create_using=create_using) + if nx.is_connected(G): + return G + raise nx.NetworkXError("Maximum number of tries exceeded") + + +@py_random_state(2) +@nx._dispatchable(graphs=None, returns_graph=True) +def random_regular_graph(d, n, seed=None, *, create_using=None): + r"""Returns a random $d$-regular graph on $n$ nodes. + + A regular graph is a graph where each node has the same number of neighbors. + + The resulting graph has no self-loops or parallel edges. + + Parameters + ---------- + d : int + The degree of each node. + n : integer + The number of nodes. The value of $n \times d$ must be even. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + create_using : Graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + Multigraph and directed types are not supported and raise a ``NetworkXError``. + + Notes + ----- + The nodes are numbered from $0$ to $n - 1$. + + Kim and Vu's paper [2]_ shows that this algorithm samples in an + asymptotically uniform way from the space of random graphs when + $d = O(n^{1 / 3 - \epsilon})$. + + Raises + ------ + + NetworkXError + If $n \times d$ is odd or $d$ is greater than or equal to $n$. + + References + ---------- + .. [1] A. Steger and N. Wormald, + Generating random regular graphs quickly, + Probability and Computing 8 (1999), 377-396, 1999. + https://doi.org/10.1017/S0963548399003867 + + .. [2] Jeong Han Kim and Van H. Vu, + Generating random regular graphs, + Proceedings of the thirty-fifth ACM symposium on Theory of computing, + San Diego, CA, USA, pp 213--222, 2003. + http://portal.acm.org/citation.cfm?id=780542.780576 + """ + create_using = check_create_using(create_using, directed=False, multigraph=False) + if (n * d) % 2 != 0: + raise nx.NetworkXError("n * d must be even") + + if not 0 <= d < n: + raise nx.NetworkXError("the 0 <= d < n inequality must be satisfied") + + G = nx.empty_graph(n, create_using=create_using) + + if d == 0: + return G + + def _suitable(edges, potential_edges): + # Helper subroutine to check if there are suitable edges remaining + # If False, the generation of the graph has failed + if not potential_edges: + return True + for s1 in potential_edges: + for s2 in potential_edges: + # Two iterators on the same dictionary are guaranteed + # to visit it in the same order if there are no + # intervening modifications. + if s1 == s2: + # Only need to consider s1-s2 pair one time + break + if s1 > s2: + s1, s2 = s2, s1 + if (s1, s2) not in edges: + return True + return False + + def _try_creation(): + # Attempt to create an edge set + + edges = set() + stubs = list(range(n)) * d + + while stubs: + potential_edges = defaultdict(lambda: 0) + seed.shuffle(stubs) + stubiter = iter(stubs) + for s1, s2 in zip(stubiter, stubiter): + if s1 > s2: + s1, s2 = s2, s1 + if s1 != s2 and ((s1, s2) not in edges): + edges.add((s1, s2)) + else: + potential_edges[s1] += 1 + potential_edges[s2] += 1 + + if not _suitable(edges, potential_edges): + return None # failed to find suitable edge set + + stubs = [ + node + for node, potential in potential_edges.items() + for _ in range(potential) + ] + return edges + + # Even though a suitable edge set exists, + # the generation of such a set is not guaranteed. + # Try repeatedly to find one. + edges = _try_creation() + while edges is None: + edges = _try_creation() + G.add_edges_from(edges) + + return G + + +def _random_subset(seq, m, rng): + """Return m unique elements from seq. + + This differs from random.sample which can return repeated + elements if seq holds repeated elements. + + Note: rng is a random.Random or numpy.random.RandomState instance. + """ + targets = set() + while len(targets) < m: + x = rng.choice(seq) + targets.add(x) + return targets + + +@py_random_state(2) +@nx._dispatchable(graphs=None, returns_graph=True) +def barabasi_albert_graph(n, m, seed=None, initial_graph=None, *, create_using=None): + """Returns a random graph using Barabási–Albert preferential attachment + + A graph of $n$ nodes is grown by attaching new nodes each with $m$ + edges that are preferentially attached to existing nodes with high degree. + + Parameters + ---------- + n : int + Number of nodes + m : int + Number of edges to attach from a new node to existing nodes + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + initial_graph : Graph or None (default) + Initial network for Barabási–Albert algorithm. + It should be a connected graph for most use cases. + A copy of `initial_graph` is used. + If None, starts from a star graph on (m+1) nodes. + create_using : Graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + Multigraph and directed types are not supported and raise a ``NetworkXError``. + + Returns + ------- + G : Graph + + Raises + ------ + NetworkXError + If `m` does not satisfy ``1 <= m < n``, or + the initial graph number of nodes m0 does not satisfy ``m <= m0 <= n``. + + References + ---------- + .. [1] A. L. Barabási and R. Albert "Emergence of scaling in + random networks", Science 286, pp 509-512, 1999. + """ + create_using = check_create_using(create_using, directed=False, multigraph=False) + if m < 1 or m >= n: + raise nx.NetworkXError( + f"Barabási–Albert network must have m >= 1 and m < n, m = {m}, n = {n}" + ) + + if initial_graph is None: + # Default initial graph : star graph on (m + 1) nodes + G = star_graph(m, create_using) + else: + if len(initial_graph) < m or len(initial_graph) > n: + raise nx.NetworkXError( + f"Barabási–Albert initial graph needs between m={m} and n={n} nodes" + ) + G = initial_graph.copy() + + # List of existing nodes, with nodes repeated once for each adjacent edge + repeated_nodes = [n for n, d in G.degree() for _ in range(d)] + # Start adding the other n - m0 nodes. + source = len(G) + while source < n: + # Now choose m unique nodes from the existing nodes + # Pick uniformly from repeated_nodes (preferential attachment) + targets = _random_subset(repeated_nodes, m, seed) + # Add edges to m nodes from the source. + G.add_edges_from(zip([source] * m, targets)) + # Add one node to the list for each new edge just created. + repeated_nodes.extend(targets) + # And the new node "source" has m edges to add to the list. + repeated_nodes.extend([source] * m) + + source += 1 + return G + + +@py_random_state(4) +@nx._dispatchable(graphs=None, returns_graph=True) +def dual_barabasi_albert_graph( + n, m1, m2, p, seed=None, initial_graph=None, *, create_using=None +): + """Returns a random graph using dual Barabási–Albert preferential attachment + + A graph of $n$ nodes is grown by attaching new nodes each with either $m_1$ + edges (with probability $p$) or $m_2$ edges (with probability $1-p$) that + are preferentially attached to existing nodes with high degree. + + Parameters + ---------- + n : int + Number of nodes + m1 : int + Number of edges to link each new node to existing nodes with probability $p$ + m2 : int + Number of edges to link each new node to existing nodes with probability $1-p$ + p : float + The probability of attaching $m_1$ edges (as opposed to $m_2$ edges) + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + initial_graph : Graph or None (default) + Initial network for Barabási–Albert algorithm. + A copy of `initial_graph` is used. + It should be connected for most use cases. + If None, starts from an star graph on max(m1, m2) + 1 nodes. + create_using : Graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + Multigraph and directed types are not supported and raise a ``NetworkXError``. + + Returns + ------- + G : Graph + + Raises + ------ + NetworkXError + If `m1` and `m2` do not satisfy ``1 <= m1,m2 < n``, or + `p` does not satisfy ``0 <= p <= 1``, or + the initial graph number of nodes m0 does not satisfy m1, m2 <= m0 <= n. + + References + ---------- + .. [1] N. Moshiri "The dual-Barabasi-Albert model", arXiv:1810.10538. + """ + create_using = check_create_using(create_using, directed=False, multigraph=False) + if m1 < 1 or m1 >= n: + raise nx.NetworkXError( + f"Dual Barabási–Albert must have m1 >= 1 and m1 < n, m1 = {m1}, n = {n}" + ) + if m2 < 1 or m2 >= n: + raise nx.NetworkXError( + f"Dual Barabási–Albert must have m2 >= 1 and m2 < n, m2 = {m2}, n = {n}" + ) + if p < 0 or p > 1: + raise nx.NetworkXError( + f"Dual Barabási–Albert network must have 0 <= p <= 1, p = {p}" + ) + + # For simplicity, if p == 0 or 1, just return BA + if p == 1: + return barabasi_albert_graph(n, m1, seed, create_using=create_using) + elif p == 0: + return barabasi_albert_graph(n, m2, seed, create_using=create_using) + + if initial_graph is None: + # Default initial graph : star graph on max(m1, m2) nodes + G = star_graph(max(m1, m2), create_using) + else: + if len(initial_graph) < max(m1, m2) or len(initial_graph) > n: + raise nx.NetworkXError( + f"Barabási–Albert initial graph must have between " + f"max(m1, m2) = {max(m1, m2)} and n = {n} nodes" + ) + G = initial_graph.copy() + + # Target nodes for new edges + targets = list(G) + # List of existing nodes, with nodes repeated once for each adjacent edge + repeated_nodes = [n for n, d in G.degree() for _ in range(d)] + # Start adding the remaining nodes. + source = len(G) + while source < n: + # Pick which m to use (m1 or m2) + if seed.random() < p: + m = m1 + else: + m = m2 + # Now choose m unique nodes from the existing nodes + # Pick uniformly from repeated_nodes (preferential attachment) + targets = _random_subset(repeated_nodes, m, seed) + # Add edges to m nodes from the source. + G.add_edges_from(zip([source] * m, targets)) + # Add one node to the list for each new edge just created. + repeated_nodes.extend(targets) + # And the new node "source" has m edges to add to the list. + repeated_nodes.extend([source] * m) + + source += 1 + return G + + +@py_random_state(4) +@nx._dispatchable(graphs=None, returns_graph=True) +def extended_barabasi_albert_graph(n, m, p, q, seed=None, *, create_using=None): + """Returns an extended Barabási–Albert model graph. + + An extended Barabási–Albert model graph is a random graph constructed + using preferential attachment. The extended model allows new edges, + rewired edges or new nodes. Based on the probabilities $p$ and $q$ + with $p + q < 1$, the growing behavior of the graph is determined as: + + 1) With $p$ probability, $m$ new edges are added to the graph, + starting from randomly chosen existing nodes and attached preferentially at the + other end. + + 2) With $q$ probability, $m$ existing edges are rewired + by randomly choosing an edge and rewiring one end to a preferentially chosen node. + + 3) With $(1 - p - q)$ probability, $m$ new nodes are added to the graph + with edges attached preferentially. + + When $p = q = 0$, the model behaves just like the Barabási–Alber model. + + Parameters + ---------- + n : int + Number of nodes + m : int + Number of edges with which a new node attaches to existing nodes + p : float + Probability value for adding an edge between existing nodes. p + q < 1 + q : float + Probability value of rewiring of existing edges. p + q < 1 + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + create_using : Graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + Multigraph and directed types are not supported and raise a ``NetworkXError``. + + Returns + ------- + G : Graph + + Raises + ------ + NetworkXError + If `m` does not satisfy ``1 <= m < n`` or ``1 >= p + q`` + + References + ---------- + .. [1] Albert, R., & Barabási, A. L. (2000) + Topology of evolving networks: local events and universality + Physical review letters, 85(24), 5234. + """ + create_using = check_create_using(create_using, directed=False, multigraph=False) + if m < 1 or m >= n: + msg = f"Extended Barabasi-Albert network needs m>=1 and m= 1: + msg = f"Extended Barabasi-Albert network needs p + q <= 1, p={p}, q={q}" + raise nx.NetworkXError(msg) + + # Add m initial nodes (m0 in barabasi-speak) + G = empty_graph(m, create_using) + + # List of nodes to represent the preferential attachment random selection. + # At the creation of the graph, all nodes are added to the list + # so that even nodes that are not connected have a chance to get selected, + # for rewiring and adding of edges. + # With each new edge, nodes at the ends of the edge are added to the list. + attachment_preference = [] + attachment_preference.extend(range(m)) + + # Start adding the other n-m nodes. The first node is m. + new_node = m + while new_node < n: + a_probability = seed.random() + + # Total number of edges of a Clique of all the nodes + clique_degree = len(G) - 1 + clique_size = (len(G) * clique_degree) / 2 + + # Adding m new edges, if there is room to add them + if a_probability < p and G.size() <= clique_size - m: + # Select the nodes where an edge can be added + eligible_nodes = [nd for nd, deg in G.degree() if deg < clique_degree] + for i in range(m): + # Choosing a random source node from eligible_nodes + src_node = seed.choice(eligible_nodes) + + # Picking a possible node that is not 'src_node' or + # neighbor with 'src_node', with preferential attachment + prohibited_nodes = list(G[src_node]) + prohibited_nodes.append(src_node) + # This will raise an exception if the sequence is empty + dest_node = seed.choice( + [nd for nd in attachment_preference if nd not in prohibited_nodes] + ) + # Adding the new edge + G.add_edge(src_node, dest_node) + + # Appending both nodes to add to their preferential attachment + attachment_preference.append(src_node) + attachment_preference.append(dest_node) + + # Adjusting the eligible nodes. Degree may be saturated. + if G.degree(src_node) == clique_degree: + eligible_nodes.remove(src_node) + if G.degree(dest_node) == clique_degree and dest_node in eligible_nodes: + eligible_nodes.remove(dest_node) + + # Rewiring m edges, if there are enough edges + elif p <= a_probability < (p + q) and m <= G.size() < clique_size: + # Selecting nodes that have at least 1 edge but that are not + # fully connected to ALL other nodes (center of star). + # These nodes are the pivot nodes of the edges to rewire + eligible_nodes = [nd for nd, deg in G.degree() if 0 < deg < clique_degree] + for i in range(m): + # Choosing a random source node + node = seed.choice(eligible_nodes) + + # The available nodes do have a neighbor at least. + nbr_nodes = list(G[node]) + + # Choosing the other end that will get detached + src_node = seed.choice(nbr_nodes) + + # Picking a target node that is not 'node' or + # neighbor with 'node', with preferential attachment + nbr_nodes.append(node) + dest_node = seed.choice( + [nd for nd in attachment_preference if nd not in nbr_nodes] + ) + # Rewire + G.remove_edge(node, src_node) + G.add_edge(node, dest_node) + + # Adjusting the preferential attachment list + attachment_preference.remove(src_node) + attachment_preference.append(dest_node) + + # Adjusting the eligible nodes. + # nodes may be saturated or isolated. + if G.degree(src_node) == 0 and src_node in eligible_nodes: + eligible_nodes.remove(src_node) + if dest_node in eligible_nodes: + if G.degree(dest_node) == clique_degree: + eligible_nodes.remove(dest_node) + else: + if G.degree(dest_node) == 1: + eligible_nodes.append(dest_node) + + # Adding new node with m edges + else: + # Select the edges' nodes by preferential attachment + targets = _random_subset(attachment_preference, m, seed) + G.add_edges_from(zip([new_node] * m, targets)) + + # Add one node to the list for each new edge just created. + attachment_preference.extend(targets) + # The new node has m edges to it, plus itself: m + 1 + attachment_preference.extend([new_node] * (m + 1)) + new_node += 1 + return G + + +@py_random_state(3) +@nx._dispatchable(graphs=None, returns_graph=True) +def powerlaw_cluster_graph(n, m, p, seed=None, *, create_using=None): + """Holme and Kim algorithm for growing graphs with powerlaw + degree distribution and approximate average clustering. + + Parameters + ---------- + n : int + the number of nodes + m : int + the number of random edges to add for each new node + p : float, + Probability of adding a triangle after adding a random edge + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + create_using : Graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + Multigraph and directed types are not supported and raise a ``NetworkXError``. + + Notes + ----- + The average clustering has a hard time getting above a certain + cutoff that depends on `m`. This cutoff is often quite low. The + transitivity (fraction of triangles to possible triangles) seems to + decrease with network size. + + It is essentially the Barabási–Albert (BA) growth model with an + extra step that each random edge is followed by a chance of + making an edge to one of its neighbors too (and thus a triangle). + + This algorithm improves on BA in the sense that it enables a + higher average clustering to be attained if desired. + + It seems possible to have a disconnected graph with this algorithm + since the initial `m` nodes may not be all linked to a new node + on the first iteration like the BA model. + + Raises + ------ + NetworkXError + If `m` does not satisfy ``1 <= m <= n`` or `p` does not + satisfy ``0 <= p <= 1``. + + References + ---------- + .. [1] P. Holme and B. J. Kim, + "Growing scale-free networks with tunable clustering", + Phys. Rev. E, 65, 026107, 2002. + """ + create_using = check_create_using(create_using, directed=False, multigraph=False) + if m < 1 or n < m: + raise nx.NetworkXError(f"NetworkXError must have m>1 and m 1 or p < 0: + raise nx.NetworkXError(f"NetworkXError p must be in [0,1], p={p}") + + G = empty_graph(m, create_using) # add m initial nodes (m0 in barabasi-speak) + repeated_nodes = list(G) # list of existing nodes to sample from + # with nodes repeated once for each adjacent edge + source = m # next node is m + while source < n: # Now add the other n-1 nodes + possible_targets = _random_subset(repeated_nodes, m, seed) + # do one preferential attachment for new node + target = possible_targets.pop() + G.add_edge(source, target) + repeated_nodes.append(target) # add one node to list for each new link + count = 1 + while count < m: # add m-1 more new links + if seed.random() < p: # clustering step: add triangle + neighborhood = [ + nbr + for nbr in G.neighbors(target) + if not G.has_edge(source, nbr) and nbr != source + ] + if neighborhood: # if there is a neighbor without a link + nbr = seed.choice(neighborhood) + G.add_edge(source, nbr) # add triangle + repeated_nodes.append(nbr) + count = count + 1 + continue # go to top of while loop + # else do preferential attachment step if above fails + target = possible_targets.pop() + G.add_edge(source, target) + repeated_nodes.append(target) + count = count + 1 + + repeated_nodes.extend([source] * m) # add source node to list m times + source += 1 + return G + + +@py_random_state(3) +@nx._dispatchable(graphs=None, returns_graph=True) +def random_lobster(n, p1, p2, seed=None, *, create_using=None): + """Returns a random lobster graph. + + A lobster is a tree that reduces to a caterpillar when pruning all + leaf nodes. A caterpillar is a tree that reduces to a path graph + when pruning all leaf nodes; setting `p2` to zero produces a caterpillar. + + This implementation iterates on the probabilities `p1` and `p2` to add + edges at levels 1 and 2, respectively. Graphs are therefore constructed + iteratively with uniform randomness at each level rather than being selected + uniformly at random from the set of all possible lobsters. + + Parameters + ---------- + n : int + The expected number of nodes in the backbone + p1 : float + Probability of adding an edge to the backbone + p2 : float + Probability of adding an edge one level beyond backbone + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + create_using : Graph constructor, optional (default=nx.Grap) + Graph type to create. If graph instance, then cleared before populated. + Multigraph and directed types are not supported and raise a ``NetworkXError``. + + Raises + ------ + NetworkXError + If `p1` or `p2` parameters are >= 1 because the while loops would never finish. + """ + create_using = check_create_using(create_using, directed=False, multigraph=False) + p1, p2 = abs(p1), abs(p2) + if any(p >= 1 for p in [p1, p2]): + raise nx.NetworkXError("Probability values for `p1` and `p2` must both be < 1.") + + # a necessary ingredient in any self-respecting graph library + llen = int(2 * seed.random() * n + 0.5) + L = path_graph(llen, create_using) + # build caterpillar: add edges to path graph with probability p1 + current_node = llen - 1 + for n in range(llen): + while seed.random() < p1: # add fuzzy caterpillar parts + current_node += 1 + L.add_edge(n, current_node) + cat_node = current_node + while seed.random() < p2: # add crunchy lobster bits + current_node += 1 + L.add_edge(cat_node, current_node) + return L # voila, un lobster! + + +@py_random_state(1) +@nx._dispatchable(graphs=None, returns_graph=True) +def random_shell_graph(constructor, seed=None, *, create_using=None): + """Returns a random shell graph for the constructor given. + + Parameters + ---------- + constructor : list of three-tuples + Represents the parameters for a shell, starting at the center + shell. Each element of the list must be of the form `(n, m, + d)`, where `n` is the number of nodes in the shell, `m` is + the number of edges in the shell, and `d` is the ratio of + inter-shell (next) edges to intra-shell edges. If `d` is zero, + there will be no intra-shell edges, and if `d` is one there + will be all possible intra-shell edges. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + create_using : Graph constructor, optional (default=nx.Graph) + Graph type to create. Graph instances are not supported. + Multigraph and directed types are not supported and raise a ``NetworkXError``. + + Examples + -------- + >>> constructor = [(10, 20, 0.8), (20, 40, 0.8)] + >>> G = nx.random_shell_graph(constructor) + + """ + create_using = check_create_using(create_using, directed=False, multigraph=False) + G = empty_graph(0, create_using) + + glist = [] + intra_edges = [] + nnodes = 0 + # create gnm graphs for each shell + for n, m, d in constructor: + inter_edges = int(m * d) + intra_edges.append(m - inter_edges) + g = nx.convert_node_labels_to_integers( + gnm_random_graph(n, inter_edges, seed=seed, create_using=G.__class__), + first_label=nnodes, + ) + glist.append(g) + nnodes += n + G = nx.operators.union(G, g) + + # connect the shells randomly + for gi in range(len(glist) - 1): + nlist1 = list(glist[gi]) + nlist2 = list(glist[gi + 1]) + total_edges = intra_edges[gi] + edge_count = 0 + while edge_count < total_edges: + u = seed.choice(nlist1) + v = seed.choice(nlist2) + if u == v or G.has_edge(u, v): + continue + else: + G.add_edge(u, v) + edge_count = edge_count + 1 + return G + + +@py_random_state(2) +@nx._dispatchable(graphs=None, returns_graph=True) +def random_powerlaw_tree(n, gamma=3, seed=None, tries=100, *, create_using=None): + """Returns a tree with a power law degree distribution. + + Parameters + ---------- + n : int + The number of nodes. + gamma : float + Exponent of the power law. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + tries : int + Number of attempts to adjust the sequence to make it a tree. + create_using : Graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + Multigraph and directed types are not supported and raise a ``NetworkXError``. + + Raises + ------ + NetworkXError + If no valid sequence is found within the maximum number of + attempts. + + Notes + ----- + A trial power law degree sequence is chosen and then elements are + swapped with new elements from a powerlaw distribution until the + sequence makes a tree (by checking, for example, that the number of + edges is one smaller than the number of nodes). + + """ + create_using = check_create_using(create_using, directed=False, multigraph=False) + # This call may raise a NetworkXError if the number of tries is succeeded. + seq = random_powerlaw_tree_sequence(n, gamma=gamma, seed=seed, tries=tries) + G = degree_sequence_tree(seq, create_using) + return G + + +@py_random_state(2) +@nx._dispatchable(graphs=None) +def random_powerlaw_tree_sequence(n, gamma=3, seed=None, tries=100): + """Returns a degree sequence for a tree with a power law distribution. + + Parameters + ---------- + n : int, + The number of nodes. + gamma : float + Exponent of the power law. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + tries : int + Number of attempts to adjust the sequence to make it a tree. + + Raises + ------ + NetworkXError + If no valid sequence is found within the maximum number of + attempts. + + Notes + ----- + A trial power law degree sequence is chosen and then elements are + swapped with new elements from a power law distribution until + the sequence makes a tree (by checking, for example, that the number of + edges is one smaller than the number of nodes). + + """ + # get trial sequence + z = nx.utils.powerlaw_sequence(n, exponent=gamma, seed=seed) + # round to integer values in the range [0,n] + zseq = [min(n, max(round(s), 0)) for s in z] + + # another sequence to swap values from + z = nx.utils.powerlaw_sequence(tries, exponent=gamma, seed=seed) + # round to integer values in the range [0,n] + swap = [min(n, max(round(s), 0)) for s in z] + + for deg in swap: + # If this degree sequence can be the degree sequence of a tree, return + # it. It can be a tree if the number of edges is one fewer than the + # number of nodes, or in other words, `n - sum(zseq) / 2 == 1`. We + # use an equivalent condition below that avoids floating point + # operations. + if 2 * n - sum(zseq) == 2: + return zseq + index = seed.randint(0, n - 1) + zseq[index] = swap.pop() + + raise nx.NetworkXError( + f"Exceeded max ({tries}) attempts for a valid tree sequence." + ) + + +@py_random_state(3) +@nx._dispatchable(graphs=None, returns_graph=True) +def random_kernel_graph( + n, kernel_integral, kernel_root=None, seed=None, *, create_using=None +): + r"""Returns an random graph based on the specified kernel. + + The algorithm chooses each of the $[n(n-1)]/2$ possible edges with + probability specified by a kernel $\kappa(x,y)$ [1]_. The kernel + $\kappa(x,y)$ must be a symmetric (in $x,y$), non-negative, + bounded function. + + Parameters + ---------- + n : int + The number of nodes + kernel_integral : function + Function that returns the definite integral of the kernel $\kappa(x,y)$, + $F(y,a,b) := \int_a^b \kappa(x,y)dx$ + kernel_root: function (optional) + Function that returns the root $b$ of the equation $F(y,a,b) = r$. + If None, the root is found using :func:`scipy.optimize.brentq` + (this requires SciPy). + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + create_using : Graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + Multigraph and directed types are not supported and raise a ``NetworkXError``. + + Notes + ----- + The kernel is specified through its definite integral which must be + provided as one of the arguments. If the integral and root of the + kernel integral can be found in $O(1)$ time then this algorithm runs in + time $O(n+m)$ where m is the expected number of edges [2]_. + + The nodes are set to integers from $0$ to $n-1$. + + Examples + -------- + Generate an Erdős–Rényi random graph $G(n,c/n)$, with kernel + $\kappa(x,y)=c$ where $c$ is the mean expected degree. + + >>> def integral(u, w, z): + ... return c * (z - w) + >>> def root(u, w, r): + ... return r / c + w + >>> c = 1 + >>> graph = nx.random_kernel_graph(1000, integral, root) + + See Also + -------- + gnp_random_graph + expected_degree_graph + + References + ---------- + .. [1] Bollobás, Béla, Janson, S. and Riordan, O. + "The phase transition in inhomogeneous random graphs", + *Random Structures Algorithms*, 31, 3--122, 2007. + + .. [2] Hagberg A, Lemons N (2015), + "Fast Generation of Sparse Random Kernel Graphs". + PLoS ONE 10(9): e0135177, 2015. doi:10.1371/journal.pone.0135177 + """ + create_using = check_create_using(create_using, directed=False, multigraph=False) + if kernel_root is None: + import scipy as sp + + def kernel_root(y, a, r): + def my_function(b): + return kernel_integral(y, a, b) - r + + return sp.optimize.brentq(my_function, a, 1) + + graph = nx.empty_graph(create_using=create_using) + graph.add_nodes_from(range(n)) + (i, j) = (1, 1) + while i < n: + r = -math.log(1 - seed.random()) # (1-seed.random()) in (0, 1] + if kernel_integral(i / n, j / n, 1) <= r: + i, j = i + 1, i + 1 + else: + j = math.ceil(n * kernel_root(i / n, j / n, r)) + graph.add_edge(i - 1, j - 1) + return graph diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/small.py b/.venv/lib/python3.12/site-packages/networkx/generators/small.py new file mode 100644 index 0000000..719cf49 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/generators/small.py @@ -0,0 +1,996 @@ +""" +Various small and named graphs, together with some compact generators. + +""" + +__all__ = [ + "LCF_graph", + "bull_graph", + "chvatal_graph", + "cubical_graph", + "desargues_graph", + "diamond_graph", + "dodecahedral_graph", + "frucht_graph", + "heawood_graph", + "hoffman_singleton_graph", + "house_graph", + "house_x_graph", + "icosahedral_graph", + "krackhardt_kite_graph", + "moebius_kantor_graph", + "octahedral_graph", + "pappus_graph", + "petersen_graph", + "sedgewick_maze_graph", + "tetrahedral_graph", + "truncated_cube_graph", + "truncated_tetrahedron_graph", + "tutte_graph", +] + +from functools import wraps + +import networkx as nx +from networkx.exception import NetworkXError +from networkx.generators.classic import ( + complete_graph, + cycle_graph, + empty_graph, + path_graph, +) + + +def _raise_on_directed(func): + """ + A decorator which inspects the `create_using` argument and raises a + NetworkX exception when `create_using` is a DiGraph (class or instance) for + graph generators that do not support directed outputs. + + `create_using` may be a keyword argument or the first positional argument. + """ + + @wraps(func) + def wrapper(*args, **kwargs): + create_using = args[0] if args else kwargs.get("create_using") + if create_using is not None: + G = nx.empty_graph(create_using=create_using) + if G.is_directed(): + raise NetworkXError("Directed Graph not supported") + return func(*args, **kwargs) + + return wrapper + + +@nx._dispatchable(graphs=None, returns_graph=True) +def LCF_graph(n, shift_list, repeats, create_using=None): + """ + Return the cubic graph specified in LCF notation. + + LCF (Lederberg-Coxeter-Fruchte) notation[1]_ is a compressed + notation used in the generation of various cubic Hamiltonian + graphs of high symmetry. See, for example, `dodecahedral_graph`, + `desargues_graph`, `heawood_graph` and `pappus_graph`. + + Nodes are drawn from ``range(n)``. Each node ``n_i`` is connected with + node ``n_i + shift % n`` where ``shift`` is given by cycling through + the input `shift_list` `repeat` s times. + + Parameters + ---------- + n : int + The starting graph is the `n`-cycle with nodes ``0, ..., n-1``. + The null graph is returned if `n` < 1. + + shift_list : list + A list of integer shifts mod `n`, ``[s1, s2, .., sk]`` + + repeats : int + Integer specifying the number of times that shifts in `shift_list` + are successively applied to each current node in the n-cycle + to generate an edge between ``n_current`` and ``n_current + shift mod n``. + + Returns + ------- + G : Graph + A graph instance created from the specified LCF notation. + + Examples + -------- + The utility graph $K_{3,3}$ + + >>> G = nx.LCF_graph(6, [3, -3], 3) + >>> G.edges() + EdgeView([(0, 1), (0, 5), (0, 3), (1, 2), (1, 4), (2, 3), (2, 5), (3, 4), (4, 5)]) + + The Heawood graph: + + >>> G = nx.LCF_graph(14, [5, -5], 7) + >>> nx.is_isomorphic(G, nx.heawood_graph()) + True + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/LCF_notation + + """ + if n <= 0: + return empty_graph(0, create_using) + + # start with the n-cycle + G = cycle_graph(n, create_using) + if G.is_directed(): + raise NetworkXError("Directed Graph not supported") + G.name = "LCF_graph" + nodes = sorted(G) + + n_extra_edges = repeats * len(shift_list) + # edges are added n_extra_edges times + # (not all of these need be new) + if n_extra_edges < 1: + return G + + for i in range(n_extra_edges): + shift = shift_list[i % len(shift_list)] # cycle through shift_list + v1 = nodes[i % n] # cycle repeatedly through nodes + v2 = nodes[(i + shift) % n] + G.add_edge(v1, v2) + return G + + +# ------------------------------------------------------------------------------- +# Various small and named graphs +# ------------------------------------------------------------------------------- + + +@_raise_on_directed +@nx._dispatchable(graphs=None, returns_graph=True) +def bull_graph(create_using=None): + """ + Returns the Bull Graph + + The Bull Graph has 5 nodes and 5 edges. It is a planar undirected + graph in the form of a triangle with two disjoint pendant edges [1]_ + The name comes from the triangle and pendant edges representing + respectively the body and legs of a bull. + + Parameters + ---------- + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + Returns + ------- + G : networkx Graph + A bull graph with 5 nodes + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Bull_graph. + + """ + G = nx.from_dict_of_lists( + {0: [1, 2], 1: [0, 2, 3], 2: [0, 1, 4], 3: [1], 4: [2]}, + create_using=create_using, + ) + G.name = "Bull Graph" + return G + + +@_raise_on_directed +@nx._dispatchable(graphs=None, returns_graph=True) +def chvatal_graph(create_using=None): + """ + Returns the Chvátal Graph + + The Chvátal Graph is an undirected graph with 12 nodes and 24 edges [1]_. + It has 370 distinct (directed) Hamiltonian cycles, giving a unique generalized + LCF notation of order 4, two of order 6 , and 43 of order 1 [2]_. + + Parameters + ---------- + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + Returns + ------- + G : networkx Graph + The Chvátal graph with 12 nodes and 24 edges + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Chv%C3%A1tal_graph + .. [2] https://mathworld.wolfram.com/ChvatalGraph.html + + """ + G = nx.from_dict_of_lists( + { + 0: [1, 4, 6, 9], + 1: [2, 5, 7], + 2: [3, 6, 8], + 3: [4, 7, 9], + 4: [5, 8], + 5: [10, 11], + 6: [10, 11], + 7: [8, 11], + 8: [10], + 9: [10, 11], + }, + create_using=create_using, + ) + G.name = "Chvatal Graph" + return G + + +@_raise_on_directed +@nx._dispatchable(graphs=None, returns_graph=True) +def cubical_graph(create_using=None): + """ + Returns the 3-regular Platonic Cubical Graph + + The skeleton of the cube (the nodes and edges) form a graph, with 8 + nodes, and 12 edges. It is a special case of the hypercube graph. + It is one of 5 Platonic graphs, each a skeleton of its + Platonic solid [1]_. + Such graphs arise in parallel processing in computers. + + Parameters + ---------- + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + Returns + ------- + G : networkx Graph + A cubical graph with 8 nodes and 12 edges + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Cube#Cubical_graph + + """ + G = nx.from_dict_of_lists( + { + 0: [1, 3, 4], + 1: [0, 2, 7], + 2: [1, 3, 6], + 3: [0, 2, 5], + 4: [0, 5, 7], + 5: [3, 4, 6], + 6: [2, 5, 7], + 7: [1, 4, 6], + }, + create_using=create_using, + ) + G.name = "Platonic Cubical Graph" + return G + + +@nx._dispatchable(graphs=None, returns_graph=True) +def desargues_graph(create_using=None): + """ + Returns the Desargues Graph + + The Desargues Graph is a non-planar, distance-transitive cubic graph + with 20 nodes and 30 edges [1]_. + It is a symmetric graph. It can be represented in LCF notation + as [5,-5,9,-9]^5 [2]_. + + Parameters + ---------- + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + Returns + ------- + G : networkx Graph + Desargues Graph with 20 nodes and 30 edges + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Desargues_graph + .. [2] https://mathworld.wolfram.com/DesarguesGraph.html + """ + G = LCF_graph(20, [5, -5, 9, -9], 5, create_using) + G.name = "Desargues Graph" + return G + + +@_raise_on_directed +@nx._dispatchable(graphs=None, returns_graph=True) +def diamond_graph(create_using=None): + """ + Returns the Diamond graph + + The Diamond Graph is planar undirected graph with 4 nodes and 5 edges. + It is also sometimes known as the double triangle graph or kite graph [1]_. + + Parameters + ---------- + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + Returns + ------- + G : networkx Graph + Diamond Graph with 4 nodes and 5 edges + + References + ---------- + .. [1] https://mathworld.wolfram.com/DiamondGraph.html + """ + G = nx.from_dict_of_lists( + {0: [1, 2], 1: [0, 2, 3], 2: [0, 1, 3], 3: [1, 2]}, create_using=create_using + ) + G.name = "Diamond Graph" + return G + + +@nx._dispatchable(graphs=None, returns_graph=True) +def dodecahedral_graph(create_using=None): + """ + Returns the Platonic Dodecahedral graph. + + The dodecahedral graph has 20 nodes and 30 edges. The skeleton of the + dodecahedron forms a graph. It is one of 5 Platonic graphs [1]_. + It can be described in LCF notation as: + ``[10, 7, 4, -4, -7, 10, -4, 7, -7, 4]^2`` [2]_. + + Parameters + ---------- + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + Returns + ------- + G : networkx Graph + Dodecahedral Graph with 20 nodes and 30 edges + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Regular_dodecahedron#Dodecahedral_graph + .. [2] https://mathworld.wolfram.com/DodecahedralGraph.html + + """ + G = LCF_graph(20, [10, 7, 4, -4, -7, 10, -4, 7, -7, 4], 2, create_using) + G.name = "Dodecahedral Graph" + return G + + +@nx._dispatchable(graphs=None, returns_graph=True) +def frucht_graph(create_using=None): + """ + Returns the Frucht Graph. + + The Frucht Graph is the smallest cubical graph whose + automorphism group consists only of the identity element [1]_. + It has 12 nodes and 18 edges and no nontrivial symmetries. + It is planar and Hamiltonian [2]_. + + Parameters + ---------- + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + Returns + ------- + G : networkx Graph + Frucht Graph with 12 nodes and 18 edges + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Frucht_graph + .. [2] https://mathworld.wolfram.com/FruchtGraph.html + + """ + G = cycle_graph(7, create_using) + G.add_edges_from( + [ + [0, 7], + [1, 7], + [2, 8], + [3, 9], + [4, 9], + [5, 10], + [6, 10], + [7, 11], + [8, 11], + [8, 9], + [10, 11], + ] + ) + + G.name = "Frucht Graph" + return G + + +@nx._dispatchable(graphs=None, returns_graph=True) +def heawood_graph(create_using=None): + """ + Returns the Heawood Graph, a (3,6) cage. + + The Heawood Graph is an undirected graph with 14 nodes and 21 edges, + named after Percy John Heawood [1]_. + It is cubic symmetric, nonplanar, Hamiltonian, and can be represented + in LCF notation as ``[5,-5]^7`` [2]_. + It is the unique (3,6)-cage: the regular cubic graph of girth 6 with + minimal number of vertices [3]_. + + Parameters + ---------- + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + Returns + ------- + G : networkx Graph + Heawood Graph with 14 nodes and 21 edges + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Heawood_graph + .. [2] https://mathworld.wolfram.com/HeawoodGraph.html + .. [3] https://www.win.tue.nl/~aeb/graphs/Heawood.html + + """ + G = LCF_graph(14, [5, -5], 7, create_using) + G.name = "Heawood Graph" + return G + + +@nx._dispatchable(graphs=None, returns_graph=True) +def hoffman_singleton_graph(): + """ + Returns the Hoffman-Singleton Graph. + + The Hoffman–Singleton graph is a symmetrical undirected graph + with 50 nodes and 175 edges. + All indices lie in ``Z % 5``: that is, the integers mod 5 [1]_. + It is the only regular graph of vertex degree 7, diameter 2, and girth 5. + It is the unique (7,5)-cage graph and Moore graph, and contains many + copies of the Petersen graph [2]_. + + Returns + ------- + G : networkx Graph + Hoffman–Singleton Graph with 50 nodes and 175 edges + + Notes + ----- + Constructed from pentagon and pentagram as follows: Take five pentagons $P_h$ + and five pentagrams $Q_i$ . Join vertex $j$ of $P_h$ to vertex $h·i+j$ of $Q_i$ [3]_. + + References + ---------- + .. [1] https://blogs.ams.org/visualinsight/2016/02/01/hoffman-singleton-graph/ + .. [2] https://mathworld.wolfram.com/Hoffman-SingletonGraph.html + .. [3] https://en.wikipedia.org/wiki/Hoffman%E2%80%93Singleton_graph + + """ + G = nx.Graph() + for i in range(5): + for j in range(5): + G.add_edge(("pentagon", i, j), ("pentagon", i, (j - 1) % 5)) + G.add_edge(("pentagon", i, j), ("pentagon", i, (j + 1) % 5)) + G.add_edge(("pentagram", i, j), ("pentagram", i, (j - 2) % 5)) + G.add_edge(("pentagram", i, j), ("pentagram", i, (j + 2) % 5)) + for k in range(5): + G.add_edge(("pentagon", i, j), ("pentagram", k, (i * k + j) % 5)) + G = nx.convert_node_labels_to_integers(G) + G.name = "Hoffman-Singleton Graph" + return G + + +@_raise_on_directed +@nx._dispatchable(graphs=None, returns_graph=True) +def house_graph(create_using=None): + """ + Returns the House graph (square with triangle on top) + + The house graph is a simple undirected graph with + 5 nodes and 6 edges [1]_. + + Parameters + ---------- + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + Returns + ------- + G : networkx Graph + House graph in the form of a square with a triangle on top + + References + ---------- + .. [1] https://mathworld.wolfram.com/HouseGraph.html + """ + G = nx.from_dict_of_lists( + {0: [1, 2], 1: [0, 3], 2: [0, 3, 4], 3: [1, 2, 4], 4: [2, 3]}, + create_using=create_using, + ) + G.name = "House Graph" + return G + + +@_raise_on_directed +@nx._dispatchable(graphs=None, returns_graph=True) +def house_x_graph(create_using=None): + """ + Returns the House graph with a cross inside the house square. + + The House X-graph is the House graph plus the two edges connecting diagonally + opposite vertices of the square base. It is also one of the two graphs + obtained by removing two edges from the pentatope graph [1]_. + + Parameters + ---------- + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + Returns + ------- + G : networkx Graph + House graph with diagonal vertices connected + + References + ---------- + .. [1] https://mathworld.wolfram.com/HouseGraph.html + """ + G = house_graph(create_using) + G.add_edges_from([(0, 3), (1, 2)]) + G.name = "House-with-X-inside Graph" + return G + + +@_raise_on_directed +@nx._dispatchable(graphs=None, returns_graph=True) +def icosahedral_graph(create_using=None): + """ + Returns the Platonic Icosahedral graph. + + The icosahedral graph has 12 nodes and 30 edges. It is a Platonic graph + whose nodes have the connectivity of the icosahedron. It is undirected, + regular and Hamiltonian [1]_. + + Parameters + ---------- + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + Returns + ------- + G : networkx Graph + Icosahedral graph with 12 nodes and 30 edges. + + References + ---------- + .. [1] https://mathworld.wolfram.com/IcosahedralGraph.html + """ + G = nx.from_dict_of_lists( + { + 0: [1, 5, 7, 8, 11], + 1: [2, 5, 6, 8], + 2: [3, 6, 8, 9], + 3: [4, 6, 9, 10], + 4: [5, 6, 10, 11], + 5: [6, 11], + 7: [8, 9, 10, 11], + 8: [9], + 9: [10], + 10: [11], + }, + create_using=create_using, + ) + G.name = "Platonic Icosahedral Graph" + return G + + +@_raise_on_directed +@nx._dispatchable(graphs=None, returns_graph=True) +def krackhardt_kite_graph(create_using=None): + """ + Returns the Krackhardt Kite Social Network. + + A 10 actor social network introduced by David Krackhardt + to illustrate different centrality measures [1]_. + + Parameters + ---------- + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + Returns + ------- + G : networkx Graph + Krackhardt Kite graph with 10 nodes and 18 edges + + Notes + ----- + The traditional labeling is: + Andre=1, Beverley=2, Carol=3, Diane=4, + Ed=5, Fernando=6, Garth=7, Heather=8, Ike=9, Jane=10. + + References + ---------- + .. [1] Krackhardt, David. "Assessing the Political Landscape: Structure, + Cognition, and Power in Organizations". Administrative Science Quarterly. + 35 (2): 342–369. doi:10.2307/2393394. JSTOR 2393394. June 1990. + + """ + G = nx.from_dict_of_lists( + { + 0: [1, 2, 3, 5], + 1: [0, 3, 4, 6], + 2: [0, 3, 5], + 3: [0, 1, 2, 4, 5, 6], + 4: [1, 3, 6], + 5: [0, 2, 3, 6, 7], + 6: [1, 3, 4, 5, 7], + 7: [5, 6, 8], + 8: [7, 9], + 9: [8], + }, + create_using=create_using, + ) + G.name = "Krackhardt Kite Social Network" + return G + + +@nx._dispatchable(graphs=None, returns_graph=True) +def moebius_kantor_graph(create_using=None): + """ + Returns the Moebius-Kantor graph. + + The Möbius-Kantor graph is the cubic symmetric graph on 16 nodes. + Its LCF notation is [5,-5]^8, and it is isomorphic to the generalized + Petersen graph [1]_. + + Parameters + ---------- + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + Returns + ------- + G : networkx Graph + Moebius-Kantor graph + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/M%C3%B6bius%E2%80%93Kantor_graph + + """ + G = LCF_graph(16, [5, -5], 8, create_using) + G.name = "Moebius-Kantor Graph" + return G + + +@_raise_on_directed +@nx._dispatchable(graphs=None, returns_graph=True) +def octahedral_graph(create_using=None): + """ + Returns the Platonic Octahedral graph. + + The octahedral graph is the 6-node 12-edge Platonic graph having the + connectivity of the octahedron [1]_. If 6 couples go to a party, + and each person shakes hands with every person except his or her partner, + then this graph describes the set of handshakes that take place; + for this reason it is also called the cocktail party graph [2]_. + + Parameters + ---------- + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + Returns + ------- + G : networkx Graph + Octahedral graph + + References + ---------- + .. [1] https://mathworld.wolfram.com/OctahedralGraph.html + .. [2] https://en.wikipedia.org/wiki/Tur%C3%A1n_graph#Special_cases + + """ + G = nx.from_dict_of_lists( + {0: [1, 2, 3, 4], 1: [2, 3, 5], 2: [4, 5], 3: [4, 5], 4: [5]}, + create_using=create_using, + ) + G.name = "Platonic Octahedral Graph" + return G + + +@nx._dispatchable(graphs=None, returns_graph=True) +def pappus_graph(): + """ + Returns the Pappus graph. + + The Pappus graph is a cubic symmetric distance-regular graph with 18 nodes + and 27 edges. It is Hamiltonian and can be represented in LCF notation as + [5,7,-7,7,-7,-5]^3 [1]_. + + Returns + ------- + G : networkx Graph + Pappus graph + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Pappus_graph + """ + G = LCF_graph(18, [5, 7, -7, 7, -7, -5], 3) + G.name = "Pappus Graph" + return G + + +@_raise_on_directed +@nx._dispatchable(graphs=None, returns_graph=True) +def petersen_graph(create_using=None): + """ + Returns the Petersen graph. + + The Peterson graph is a cubic, undirected graph with 10 nodes and 15 edges [1]_. + Julius Petersen constructed the graph as the smallest counterexample + against the claim that a connected bridgeless cubic graph + has an edge colouring with three colours [2]_. + + Parameters + ---------- + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + Returns + ------- + G : networkx Graph + Petersen graph + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Petersen_graph + .. [2] https://www.win.tue.nl/~aeb/drg/graphs/Petersen.html + """ + G = nx.from_dict_of_lists( + { + 0: [1, 4, 5], + 1: [0, 2, 6], + 2: [1, 3, 7], + 3: [2, 4, 8], + 4: [3, 0, 9], + 5: [0, 7, 8], + 6: [1, 8, 9], + 7: [2, 5, 9], + 8: [3, 5, 6], + 9: [4, 6, 7], + }, + create_using=create_using, + ) + G.name = "Petersen Graph" + return G + + +@nx._dispatchable(graphs=None, returns_graph=True) +def sedgewick_maze_graph(create_using=None): + """ + Return a small maze with a cycle. + + This is the maze used in Sedgewick, 3rd Edition, Part 5, Graph + Algorithms, Chapter 18, e.g. Figure 18.2 and following [1]_. + Nodes are numbered 0,..,7 + + Parameters + ---------- + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + Returns + ------- + G : networkx Graph + Small maze with a cycle + + References + ---------- + .. [1] Figure 18.2, Chapter 18, Graph Algorithms (3rd Ed), Sedgewick + """ + G = empty_graph(0, create_using) + G.add_nodes_from(range(8)) + G.add_edges_from([[0, 2], [0, 7], [0, 5]]) + G.add_edges_from([[1, 7], [2, 6]]) + G.add_edges_from([[3, 4], [3, 5]]) + G.add_edges_from([[4, 5], [4, 7], [4, 6]]) + G.name = "Sedgewick Maze" + return G + + +@nx._dispatchable(graphs=None, returns_graph=True) +def tetrahedral_graph(create_using=None): + """ + Returns the 3-regular Platonic Tetrahedral graph. + + Tetrahedral graph has 4 nodes and 6 edges. It is a + special case of the complete graph, K4, and wheel graph, W4. + It is one of the 5 platonic graphs [1]_. + + Parameters + ---------- + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + Returns + ------- + G : networkx Graph + Tetrahedral Graph + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Tetrahedron#Tetrahedral_graph + + """ + G = complete_graph(4, create_using) + G.name = "Platonic Tetrahedral Graph" + return G + + +@_raise_on_directed +@nx._dispatchable(graphs=None, returns_graph=True) +def truncated_cube_graph(create_using=None): + """ + Returns the skeleton of the truncated cube. + + The truncated cube is an Archimedean solid with 14 regular + faces (6 octagonal and 8 triangular), 36 edges and 24 nodes [1]_. + The truncated cube is created by truncating (cutting off) the tips + of the cube one third of the way into each edge [2]_. + + Parameters + ---------- + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + Returns + ------- + G : networkx Graph + Skeleton of the truncated cube + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Truncated_cube + .. [2] https://www.coolmath.com/reference/polyhedra-truncated-cube + + """ + G = nx.from_dict_of_lists( + { + 0: [1, 2, 4], + 1: [11, 14], + 2: [3, 4], + 3: [6, 8], + 4: [5], + 5: [16, 18], + 6: [7, 8], + 7: [10, 12], + 8: [9], + 9: [17, 20], + 10: [11, 12], + 11: [14], + 12: [13], + 13: [21, 22], + 14: [15], + 15: [19, 23], + 16: [17, 18], + 17: [20], + 18: [19], + 19: [23], + 20: [21], + 21: [22], + 22: [23], + }, + create_using=create_using, + ) + G.name = "Truncated Cube Graph" + return G + + +@nx._dispatchable(graphs=None, returns_graph=True) +def truncated_tetrahedron_graph(create_using=None): + """ + Returns the skeleton of the truncated Platonic tetrahedron. + + The truncated tetrahedron is an Archimedean solid with 4 regular hexagonal faces, + 4 equilateral triangle faces, 12 nodes and 18 edges. It can be constructed by truncating + all 4 vertices of a regular tetrahedron at one third of the original edge length [1]_. + + Parameters + ---------- + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + Returns + ------- + G : networkx Graph + Skeleton of the truncated tetrahedron + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Truncated_tetrahedron + + """ + G = path_graph(12, create_using) + G.add_edges_from([(0, 2), (0, 9), (1, 6), (3, 11), (4, 11), (5, 7), (8, 10)]) + G.name = "Truncated Tetrahedron Graph" + return G + + +@_raise_on_directed +@nx._dispatchable(graphs=None, returns_graph=True) +def tutte_graph(create_using=None): + """ + Returns the Tutte graph. + + The Tutte graph is a cubic polyhedral, non-Hamiltonian graph. It has + 46 nodes and 69 edges. + It is a counterexample to Tait's conjecture that every 3-regular polyhedron + has a Hamiltonian cycle. + It can be realized geometrically from a tetrahedron by multiply truncating + three of its vertices [1]_. + + Parameters + ---------- + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + Returns + ------- + G : networkx Graph + Tutte graph + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Tutte_graph + """ + G = nx.from_dict_of_lists( + { + 0: [1, 2, 3], + 1: [4, 26], + 2: [10, 11], + 3: [18, 19], + 4: [5, 33], + 5: [6, 29], + 6: [7, 27], + 7: [8, 14], + 8: [9, 38], + 9: [10, 37], + 10: [39], + 11: [12, 39], + 12: [13, 35], + 13: [14, 15], + 14: [34], + 15: [16, 22], + 16: [17, 44], + 17: [18, 43], + 18: [45], + 19: [20, 45], + 20: [21, 41], + 21: [22, 23], + 22: [40], + 23: [24, 27], + 24: [25, 32], + 25: [26, 31], + 26: [33], + 27: [28], + 28: [29, 32], + 29: [30], + 30: [31, 33], + 31: [32], + 34: [35, 38], + 35: [36], + 36: [37, 39], + 37: [38], + 40: [41, 44], + 41: [42], + 42: [43, 45], + 43: [44], + }, + create_using=create_using, + ) + G.name = "Tutte's Graph" + return G diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/social.py b/.venv/lib/python3.12/site-packages/networkx/generators/social.py new file mode 100644 index 0000000..f41b2d8 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/generators/social.py @@ -0,0 +1,554 @@ +""" +Famous social networks. +""" + +import networkx as nx + +__all__ = [ + "karate_club_graph", + "davis_southern_women_graph", + "florentine_families_graph", + "les_miserables_graph", +] + + +@nx._dispatchable(graphs=None, returns_graph=True) +def karate_club_graph(): + """Returns Zachary's Karate Club graph. + + Each node in the returned graph has a node attribute 'club' that + indicates the name of the club to which the member represented by that node + belongs, either 'Mr. Hi' or 'Officer'. Each edge has a weight based on the + number of contexts in which that edge's incident node members interacted. + + The dataset is derived from the 'Club After Split From Data' column of Table 3 in [1]_. + This was in turn derived from the 'Club After Fission' column of Table 1 in the + same paper. Note that the nodes are 0-indexed in NetworkX, but 1-indexed in the + paper (the 'Individual Number in Matrix C' column of Table 3 starts at 1). This + means, for example, that ``G.nodes[9]["club"]`` returns 'Officer', which + corresponds to row 10 of Table 3 in the paper. + + Examples + -------- + To get the name of the club to which a node belongs:: + + >>> G = nx.karate_club_graph() + >>> G.nodes[5]["club"] + 'Mr. Hi' + >>> G.nodes[9]["club"] + 'Officer' + + References + ---------- + .. [1] Zachary, Wayne W. + "An Information Flow Model for Conflict and Fission in Small Groups." + *Journal of Anthropological Research*, 33, 452--473, (1977). + """ + # Create the set of all members, and the members of each club. + all_members = set(range(34)) + club1 = {0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 13, 16, 17, 19, 21} + # club2 = all_members - club1 + + G = nx.Graph() + G.add_nodes_from(all_members) + G.name = "Zachary's Karate Club" + + zacharydat = """\ +0 4 5 3 3 3 3 2 2 0 2 3 2 3 0 0 0 2 0 2 0 2 0 0 0 0 0 0 0 0 0 2 0 0 +4 0 6 3 0 0 0 4 0 0 0 0 0 5 0 0 0 1 0 2 0 2 0 0 0 0 0 0 0 0 2 0 0 0 +5 6 0 3 0 0 0 4 5 1 0 0 0 3 0 0 0 0 0 0 0 0 0 0 0 0 0 2 2 0 0 0 3 0 +3 3 3 0 0 0 0 3 0 0 0 0 3 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +3 0 0 0 0 0 2 0 0 0 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +3 0 0 0 0 0 5 0 0 0 3 0 0 0 0 0 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +3 0 0 0 2 5 0 0 0 0 0 0 0 0 0 0 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +2 4 4 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +2 0 5 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 3 0 4 3 +0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 2 +2 0 0 0 3 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +1 0 0 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +3 5 3 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 3 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 3 2 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 3 4 +0 0 0 0 0 3 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +2 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 2 +2 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 3 1 +2 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 2 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 5 0 4 0 2 0 0 5 4 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 2 0 3 0 0 0 2 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 5 2 0 0 0 0 0 0 7 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 4 0 0 0 2 +0 0 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 4 3 0 0 0 0 0 0 0 0 4 +0 0 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 2 0 2 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 3 0 0 4 0 0 0 0 0 3 2 +0 2 0 0 0 0 0 0 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 3 3 +2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 2 7 0 0 2 0 0 0 4 4 +0 0 2 0 0 0 0 0 3 0 0 0 0 0 3 3 0 0 1 0 3 0 2 5 0 0 0 0 0 4 3 4 0 5 +0 0 0 0 0 0 0 0 4 2 0 0 0 3 2 4 0 0 2 1 1 0 3 4 0 0 2 4 2 2 3 4 5 0""" + + for row, line in enumerate(zacharydat.split("\n")): + thisrow = [int(b) for b in line.split()] + for col, entry in enumerate(thisrow): + if entry >= 1: + G.add_edge(row, col, weight=entry) + + # Add the name of each member's club as a node attribute. + for v in G: + G.nodes[v]["club"] = "Mr. Hi" if v in club1 else "Officer" + return G + + +@nx._dispatchable(graphs=None, returns_graph=True) +def davis_southern_women_graph(): + """Returns Davis Southern women social network. + + This is a bipartite graph. + + References + ---------- + .. [1] A. Davis, Gardner, B. B., Gardner, M. R., 1941. Deep South. + University of Chicago Press, Chicago, IL. + """ + G = nx.Graph() + # Top nodes + women = [ + "Evelyn Jefferson", + "Laura Mandeville", + "Theresa Anderson", + "Brenda Rogers", + "Charlotte McDowd", + "Frances Anderson", + "Eleanor Nye", + "Pearl Oglethorpe", + "Ruth DeSand", + "Verne Sanderson", + "Myra Liddel", + "Katherina Rogers", + "Sylvia Avondale", + "Nora Fayette", + "Helen Lloyd", + "Dorothy Murchison", + "Olivia Carleton", + "Flora Price", + ] + G.add_nodes_from(women, bipartite=0) + # Bottom nodes + events = [ + "E1", + "E2", + "E3", + "E4", + "E5", + "E6", + "E7", + "E8", + "E9", + "E10", + "E11", + "E12", + "E13", + "E14", + ] + G.add_nodes_from(events, bipartite=1) + + G.add_edges_from( + [ + ("Evelyn Jefferson", "E1"), + ("Evelyn Jefferson", "E2"), + ("Evelyn Jefferson", "E3"), + ("Evelyn Jefferson", "E4"), + ("Evelyn Jefferson", "E5"), + ("Evelyn Jefferson", "E6"), + ("Evelyn Jefferson", "E8"), + ("Evelyn Jefferson", "E9"), + ("Laura Mandeville", "E1"), + ("Laura Mandeville", "E2"), + ("Laura Mandeville", "E3"), + ("Laura Mandeville", "E5"), + ("Laura Mandeville", "E6"), + ("Laura Mandeville", "E7"), + ("Laura Mandeville", "E8"), + ("Theresa Anderson", "E2"), + ("Theresa Anderson", "E3"), + ("Theresa Anderson", "E4"), + ("Theresa Anderson", "E5"), + ("Theresa Anderson", "E6"), + ("Theresa Anderson", "E7"), + ("Theresa Anderson", "E8"), + ("Theresa Anderson", "E9"), + ("Brenda Rogers", "E1"), + ("Brenda Rogers", "E3"), + ("Brenda Rogers", "E4"), + ("Brenda Rogers", "E5"), + ("Brenda Rogers", "E6"), + ("Brenda Rogers", "E7"), + ("Brenda Rogers", "E8"), + ("Charlotte McDowd", "E3"), + ("Charlotte McDowd", "E4"), + ("Charlotte McDowd", "E5"), + ("Charlotte McDowd", "E7"), + ("Frances Anderson", "E3"), + ("Frances Anderson", "E5"), + ("Frances Anderson", "E6"), + ("Frances Anderson", "E8"), + ("Eleanor Nye", "E5"), + ("Eleanor Nye", "E6"), + ("Eleanor Nye", "E7"), + ("Eleanor Nye", "E8"), + ("Pearl Oglethorpe", "E6"), + ("Pearl Oglethorpe", "E8"), + ("Pearl Oglethorpe", "E9"), + ("Ruth DeSand", "E5"), + ("Ruth DeSand", "E7"), + ("Ruth DeSand", "E8"), + ("Ruth DeSand", "E9"), + ("Verne Sanderson", "E7"), + ("Verne Sanderson", "E8"), + ("Verne Sanderson", "E9"), + ("Verne Sanderson", "E12"), + ("Myra Liddel", "E8"), + ("Myra Liddel", "E9"), + ("Myra Liddel", "E10"), + ("Myra Liddel", "E12"), + ("Katherina Rogers", "E8"), + ("Katherina Rogers", "E9"), + ("Katherina Rogers", "E10"), + ("Katherina Rogers", "E12"), + ("Katherina Rogers", "E13"), + ("Katherina Rogers", "E14"), + ("Sylvia Avondale", "E7"), + ("Sylvia Avondale", "E8"), + ("Sylvia Avondale", "E9"), + ("Sylvia Avondale", "E10"), + ("Sylvia Avondale", "E12"), + ("Sylvia Avondale", "E13"), + ("Sylvia Avondale", "E14"), + ("Nora Fayette", "E6"), + ("Nora Fayette", "E7"), + ("Nora Fayette", "E9"), + ("Nora Fayette", "E10"), + ("Nora Fayette", "E11"), + ("Nora Fayette", "E12"), + ("Nora Fayette", "E13"), + ("Nora Fayette", "E14"), + ("Helen Lloyd", "E7"), + ("Helen Lloyd", "E8"), + ("Helen Lloyd", "E10"), + ("Helen Lloyd", "E11"), + ("Helen Lloyd", "E12"), + ("Dorothy Murchison", "E8"), + ("Dorothy Murchison", "E9"), + ("Olivia Carleton", "E9"), + ("Olivia Carleton", "E11"), + ("Flora Price", "E9"), + ("Flora Price", "E11"), + ] + ) + G.graph["top"] = women + G.graph["bottom"] = events + return G + + +@nx._dispatchable(graphs=None, returns_graph=True) +def florentine_families_graph(): + """Returns Florentine families graph. + + References + ---------- + .. [1] Ronald L. Breiger and Philippa E. Pattison + Cumulated social roles: The duality of persons and their algebras,1 + Social Networks, Volume 8, Issue 3, September 1986, Pages 215-256 + """ + G = nx.Graph() + G.add_edge("Acciaiuoli", "Medici") + G.add_edge("Castellani", "Peruzzi") + G.add_edge("Castellani", "Strozzi") + G.add_edge("Castellani", "Barbadori") + G.add_edge("Medici", "Barbadori") + G.add_edge("Medici", "Ridolfi") + G.add_edge("Medici", "Tornabuoni") + G.add_edge("Medici", "Albizzi") + G.add_edge("Medici", "Salviati") + G.add_edge("Salviati", "Pazzi") + G.add_edge("Peruzzi", "Strozzi") + G.add_edge("Peruzzi", "Bischeri") + G.add_edge("Strozzi", "Ridolfi") + G.add_edge("Strozzi", "Bischeri") + G.add_edge("Ridolfi", "Tornabuoni") + G.add_edge("Tornabuoni", "Guadagni") + G.add_edge("Albizzi", "Ginori") + G.add_edge("Albizzi", "Guadagni") + G.add_edge("Bischeri", "Guadagni") + G.add_edge("Guadagni", "Lamberteschi") + return G + + +@nx._dispatchable(graphs=None, returns_graph=True) +def les_miserables_graph(): + """Returns coappearance network of characters in the novel Les Miserables. + + References + ---------- + .. [1] D. E. Knuth, 1993. + The Stanford GraphBase: a platform for combinatorial computing, + pp. 74-87. New York: AcM Press. + """ + G = nx.Graph() + G.add_edge("Napoleon", "Myriel", weight=1) + G.add_edge("MlleBaptistine", "Myriel", weight=8) + G.add_edge("MmeMagloire", "Myriel", weight=10) + G.add_edge("MmeMagloire", "MlleBaptistine", weight=6) + G.add_edge("CountessDeLo", "Myriel", weight=1) + G.add_edge("Geborand", "Myriel", weight=1) + G.add_edge("Champtercier", "Myriel", weight=1) + G.add_edge("Cravatte", "Myriel", weight=1) + G.add_edge("Count", "Myriel", weight=2) + G.add_edge("OldMan", "Myriel", weight=1) + G.add_edge("Valjean", "Labarre", weight=1) + G.add_edge("Valjean", "MmeMagloire", weight=3) + G.add_edge("Valjean", "MlleBaptistine", weight=3) + G.add_edge("Valjean", "Myriel", weight=5) + G.add_edge("Marguerite", "Valjean", weight=1) + G.add_edge("MmeDeR", "Valjean", weight=1) + G.add_edge("Isabeau", "Valjean", weight=1) + G.add_edge("Gervais", "Valjean", weight=1) + G.add_edge("Listolier", "Tholomyes", weight=4) + G.add_edge("Fameuil", "Tholomyes", weight=4) + G.add_edge("Fameuil", "Listolier", weight=4) + G.add_edge("Blacheville", "Tholomyes", weight=4) + G.add_edge("Blacheville", "Listolier", weight=4) + G.add_edge("Blacheville", "Fameuil", weight=4) + G.add_edge("Favourite", "Tholomyes", weight=3) + G.add_edge("Favourite", "Listolier", weight=3) + G.add_edge("Favourite", "Fameuil", weight=3) + G.add_edge("Favourite", "Blacheville", weight=4) + G.add_edge("Dahlia", "Tholomyes", weight=3) + G.add_edge("Dahlia", "Listolier", weight=3) + G.add_edge("Dahlia", "Fameuil", weight=3) + G.add_edge("Dahlia", "Blacheville", weight=3) + G.add_edge("Dahlia", "Favourite", weight=5) + G.add_edge("Zephine", "Tholomyes", weight=3) + G.add_edge("Zephine", "Listolier", weight=3) + G.add_edge("Zephine", "Fameuil", weight=3) + G.add_edge("Zephine", "Blacheville", weight=3) + G.add_edge("Zephine", "Favourite", weight=4) + G.add_edge("Zephine", "Dahlia", weight=4) + G.add_edge("Fantine", "Tholomyes", weight=3) + G.add_edge("Fantine", "Listolier", weight=3) + G.add_edge("Fantine", "Fameuil", weight=3) + G.add_edge("Fantine", "Blacheville", weight=3) + G.add_edge("Fantine", "Favourite", weight=4) + G.add_edge("Fantine", "Dahlia", weight=4) + G.add_edge("Fantine", "Zephine", weight=4) + G.add_edge("Fantine", "Marguerite", weight=2) + G.add_edge("Fantine", "Valjean", weight=9) + G.add_edge("MmeThenardier", "Fantine", weight=2) + G.add_edge("MmeThenardier", "Valjean", weight=7) + G.add_edge("Thenardier", "MmeThenardier", weight=13) + G.add_edge("Thenardier", "Fantine", weight=1) + G.add_edge("Thenardier", "Valjean", weight=12) + G.add_edge("Cosette", "MmeThenardier", weight=4) + G.add_edge("Cosette", "Valjean", weight=31) + G.add_edge("Cosette", "Tholomyes", weight=1) + G.add_edge("Cosette", "Thenardier", weight=1) + G.add_edge("Javert", "Valjean", weight=17) + G.add_edge("Javert", "Fantine", weight=5) + G.add_edge("Javert", "Thenardier", weight=5) + G.add_edge("Javert", "MmeThenardier", weight=1) + G.add_edge("Javert", "Cosette", weight=1) + G.add_edge("Fauchelevent", "Valjean", weight=8) + G.add_edge("Fauchelevent", "Javert", weight=1) + G.add_edge("Bamatabois", "Fantine", weight=1) + G.add_edge("Bamatabois", "Javert", weight=1) + G.add_edge("Bamatabois", "Valjean", weight=2) + G.add_edge("Perpetue", "Fantine", weight=1) + G.add_edge("Simplice", "Perpetue", weight=2) + G.add_edge("Simplice", "Valjean", weight=3) + G.add_edge("Simplice", "Fantine", weight=2) + G.add_edge("Simplice", "Javert", weight=1) + G.add_edge("Scaufflaire", "Valjean", weight=1) + G.add_edge("Woman1", "Valjean", weight=2) + G.add_edge("Woman1", "Javert", weight=1) + G.add_edge("Judge", "Valjean", weight=3) + G.add_edge("Judge", "Bamatabois", weight=2) + G.add_edge("Champmathieu", "Valjean", weight=3) + G.add_edge("Champmathieu", "Judge", weight=3) + G.add_edge("Champmathieu", "Bamatabois", weight=2) + G.add_edge("Brevet", "Judge", weight=2) + G.add_edge("Brevet", "Champmathieu", weight=2) + G.add_edge("Brevet", "Valjean", weight=2) + G.add_edge("Brevet", "Bamatabois", weight=1) + G.add_edge("Chenildieu", "Judge", weight=2) + G.add_edge("Chenildieu", "Champmathieu", weight=2) + G.add_edge("Chenildieu", "Brevet", weight=2) + G.add_edge("Chenildieu", "Valjean", weight=2) + G.add_edge("Chenildieu", "Bamatabois", weight=1) + G.add_edge("Cochepaille", "Judge", weight=2) + G.add_edge("Cochepaille", "Champmathieu", weight=2) + G.add_edge("Cochepaille", "Brevet", weight=2) + G.add_edge("Cochepaille", "Chenildieu", weight=2) + G.add_edge("Cochepaille", "Valjean", weight=2) + G.add_edge("Cochepaille", "Bamatabois", weight=1) + G.add_edge("Pontmercy", "Thenardier", weight=1) + G.add_edge("Boulatruelle", "Thenardier", weight=1) + G.add_edge("Eponine", "MmeThenardier", weight=2) + G.add_edge("Eponine", "Thenardier", weight=3) + G.add_edge("Anzelma", "Eponine", weight=2) + G.add_edge("Anzelma", "Thenardier", weight=2) + G.add_edge("Anzelma", "MmeThenardier", weight=1) + G.add_edge("Woman2", "Valjean", weight=3) + G.add_edge("Woman2", "Cosette", weight=1) + G.add_edge("Woman2", "Javert", weight=1) + G.add_edge("MotherInnocent", "Fauchelevent", weight=3) + G.add_edge("MotherInnocent", "Valjean", weight=1) + G.add_edge("Gribier", "Fauchelevent", weight=2) + G.add_edge("MmeBurgon", "Jondrette", weight=1) + G.add_edge("Gavroche", "MmeBurgon", weight=2) + G.add_edge("Gavroche", "Thenardier", weight=1) + G.add_edge("Gavroche", "Javert", weight=1) + G.add_edge("Gavroche", "Valjean", weight=1) + G.add_edge("Gillenormand", "Cosette", weight=3) + G.add_edge("Gillenormand", "Valjean", weight=2) + G.add_edge("Magnon", "Gillenormand", weight=1) + G.add_edge("Magnon", "MmeThenardier", weight=1) + G.add_edge("MlleGillenormand", "Gillenormand", weight=9) + G.add_edge("MlleGillenormand", "Cosette", weight=2) + G.add_edge("MlleGillenormand", "Valjean", weight=2) + G.add_edge("MmePontmercy", "MlleGillenormand", weight=1) + G.add_edge("MmePontmercy", "Pontmercy", weight=1) + G.add_edge("MlleVaubois", "MlleGillenormand", weight=1) + G.add_edge("LtGillenormand", "MlleGillenormand", weight=2) + G.add_edge("LtGillenormand", "Gillenormand", weight=1) + G.add_edge("LtGillenormand", "Cosette", weight=1) + G.add_edge("Marius", "MlleGillenormand", weight=6) + G.add_edge("Marius", "Gillenormand", weight=12) + G.add_edge("Marius", "Pontmercy", weight=1) + G.add_edge("Marius", "LtGillenormand", weight=1) + G.add_edge("Marius", "Cosette", weight=21) + G.add_edge("Marius", "Valjean", weight=19) + G.add_edge("Marius", "Tholomyes", weight=1) + G.add_edge("Marius", "Thenardier", weight=2) + G.add_edge("Marius", "Eponine", weight=5) + G.add_edge("Marius", "Gavroche", weight=4) + G.add_edge("BaronessT", "Gillenormand", weight=1) + G.add_edge("BaronessT", "Marius", weight=1) + G.add_edge("Mabeuf", "Marius", weight=1) + G.add_edge("Mabeuf", "Eponine", weight=1) + G.add_edge("Mabeuf", "Gavroche", weight=1) + G.add_edge("Enjolras", "Marius", weight=7) + G.add_edge("Enjolras", "Gavroche", weight=7) + G.add_edge("Enjolras", "Javert", weight=6) + G.add_edge("Enjolras", "Mabeuf", weight=1) + G.add_edge("Enjolras", "Valjean", weight=4) + G.add_edge("Combeferre", "Enjolras", weight=15) + G.add_edge("Combeferre", "Marius", weight=5) + G.add_edge("Combeferre", "Gavroche", weight=6) + G.add_edge("Combeferre", "Mabeuf", weight=2) + G.add_edge("Prouvaire", "Gavroche", weight=1) + G.add_edge("Prouvaire", "Enjolras", weight=4) + G.add_edge("Prouvaire", "Combeferre", weight=2) + G.add_edge("Feuilly", "Gavroche", weight=2) + G.add_edge("Feuilly", "Enjolras", weight=6) + G.add_edge("Feuilly", "Prouvaire", weight=2) + G.add_edge("Feuilly", "Combeferre", weight=5) + G.add_edge("Feuilly", "Mabeuf", weight=1) + G.add_edge("Feuilly", "Marius", weight=1) + G.add_edge("Courfeyrac", "Marius", weight=9) + G.add_edge("Courfeyrac", "Enjolras", weight=17) + G.add_edge("Courfeyrac", "Combeferre", weight=13) + G.add_edge("Courfeyrac", "Gavroche", weight=7) + G.add_edge("Courfeyrac", "Mabeuf", weight=2) + G.add_edge("Courfeyrac", "Eponine", weight=1) + G.add_edge("Courfeyrac", "Feuilly", weight=6) + G.add_edge("Courfeyrac", "Prouvaire", weight=3) + G.add_edge("Bahorel", "Combeferre", weight=5) + G.add_edge("Bahorel", "Gavroche", weight=5) + G.add_edge("Bahorel", "Courfeyrac", weight=6) + G.add_edge("Bahorel", "Mabeuf", weight=2) + G.add_edge("Bahorel", "Enjolras", weight=4) + G.add_edge("Bahorel", "Feuilly", weight=3) + G.add_edge("Bahorel", "Prouvaire", weight=2) + G.add_edge("Bahorel", "Marius", weight=1) + G.add_edge("Bossuet", "Marius", weight=5) + G.add_edge("Bossuet", "Courfeyrac", weight=12) + G.add_edge("Bossuet", "Gavroche", weight=5) + G.add_edge("Bossuet", "Bahorel", weight=4) + G.add_edge("Bossuet", "Enjolras", weight=10) + G.add_edge("Bossuet", "Feuilly", weight=6) + G.add_edge("Bossuet", "Prouvaire", weight=2) + G.add_edge("Bossuet", "Combeferre", weight=9) + G.add_edge("Bossuet", "Mabeuf", weight=1) + G.add_edge("Bossuet", "Valjean", weight=1) + G.add_edge("Joly", "Bahorel", weight=5) + G.add_edge("Joly", "Bossuet", weight=7) + G.add_edge("Joly", "Gavroche", weight=3) + G.add_edge("Joly", "Courfeyrac", weight=5) + G.add_edge("Joly", "Enjolras", weight=5) + G.add_edge("Joly", "Feuilly", weight=5) + G.add_edge("Joly", "Prouvaire", weight=2) + G.add_edge("Joly", "Combeferre", weight=5) + G.add_edge("Joly", "Mabeuf", weight=1) + G.add_edge("Joly", "Marius", weight=2) + G.add_edge("Grantaire", "Bossuet", weight=3) + G.add_edge("Grantaire", "Enjolras", weight=3) + G.add_edge("Grantaire", "Combeferre", weight=1) + G.add_edge("Grantaire", "Courfeyrac", weight=2) + G.add_edge("Grantaire", "Joly", weight=2) + G.add_edge("Grantaire", "Gavroche", weight=1) + G.add_edge("Grantaire", "Bahorel", weight=1) + G.add_edge("Grantaire", "Feuilly", weight=1) + G.add_edge("Grantaire", "Prouvaire", weight=1) + G.add_edge("MotherPlutarch", "Mabeuf", weight=3) + G.add_edge("Gueulemer", "Thenardier", weight=5) + G.add_edge("Gueulemer", "Valjean", weight=1) + G.add_edge("Gueulemer", "MmeThenardier", weight=1) + G.add_edge("Gueulemer", "Javert", weight=1) + G.add_edge("Gueulemer", "Gavroche", weight=1) + G.add_edge("Gueulemer", "Eponine", weight=1) + G.add_edge("Babet", "Thenardier", weight=6) + G.add_edge("Babet", "Gueulemer", weight=6) + G.add_edge("Babet", "Valjean", weight=1) + G.add_edge("Babet", "MmeThenardier", weight=1) + G.add_edge("Babet", "Javert", weight=2) + G.add_edge("Babet", "Gavroche", weight=1) + G.add_edge("Babet", "Eponine", weight=1) + G.add_edge("Claquesous", "Thenardier", weight=4) + G.add_edge("Claquesous", "Babet", weight=4) + G.add_edge("Claquesous", "Gueulemer", weight=4) + G.add_edge("Claquesous", "Valjean", weight=1) + G.add_edge("Claquesous", "MmeThenardier", weight=1) + G.add_edge("Claquesous", "Javert", weight=1) + G.add_edge("Claquesous", "Eponine", weight=1) + G.add_edge("Claquesous", "Enjolras", weight=1) + G.add_edge("Montparnasse", "Javert", weight=1) + G.add_edge("Montparnasse", "Babet", weight=2) + G.add_edge("Montparnasse", "Gueulemer", weight=2) + G.add_edge("Montparnasse", "Claquesous", weight=2) + G.add_edge("Montparnasse", "Valjean", weight=1) + G.add_edge("Montparnasse", "Gavroche", weight=1) + G.add_edge("Montparnasse", "Eponine", weight=1) + G.add_edge("Montparnasse", "Thenardier", weight=1) + G.add_edge("Toussaint", "Cosette", weight=2) + G.add_edge("Toussaint", "Javert", weight=1) + G.add_edge("Toussaint", "Valjean", weight=1) + G.add_edge("Child1", "Gavroche", weight=2) + G.add_edge("Child2", "Gavroche", weight=2) + G.add_edge("Child2", "Child1", weight=3) + G.add_edge("Brujon", "Babet", weight=3) + G.add_edge("Brujon", "Gueulemer", weight=3) + G.add_edge("Brujon", "Thenardier", weight=3) + G.add_edge("Brujon", "Gavroche", weight=1) + G.add_edge("Brujon", "Eponine", weight=1) + G.add_edge("Brujon", "Claquesous", weight=1) + G.add_edge("Brujon", "Montparnasse", weight=1) + G.add_edge("MmeHucheloup", "Bossuet", weight=1) + G.add_edge("MmeHucheloup", "Joly", weight=1) + G.add_edge("MmeHucheloup", "Grantaire", weight=1) + G.add_edge("MmeHucheloup", "Bahorel", weight=1) + G.add_edge("MmeHucheloup", "Courfeyrac", weight=1) + G.add_edge("MmeHucheloup", "Gavroche", weight=1) + G.add_edge("MmeHucheloup", "Enjolras", weight=1) + return G diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/spectral_graph_forge.py b/.venv/lib/python3.12/site-packages/networkx/generators/spectral_graph_forge.py new file mode 100644 index 0000000..aa8c919 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/generators/spectral_graph_forge.py @@ -0,0 +1,120 @@ +"""Generates graphs with a given eigenvector structure""" + +import networkx as nx +from networkx.utils import np_random_state + +__all__ = ["spectral_graph_forge"] + + +@np_random_state(3) +@nx._dispatchable(preserve_edge_attrs={"G": {"weight": 1}}, returns_graph=True) +def spectral_graph_forge(G, alpha, transformation="identity", seed=None): + """Returns a random simple graph with spectrum resembling that of `G` + + This algorithm, called Spectral Graph Forge (SGF), computes the + eigenvectors of a given graph adjacency matrix, filters them and + builds a random graph with a similar eigenstructure. + SGF has been proved to be particularly useful for synthesizing + realistic social networks and it can also be used to anonymize + graph sensitive data. + + Parameters + ---------- + G : Graph + alpha : float + Ratio representing the percentage of eigenvectors of G to consider, + values in [0,1]. + transformation : string, optional + Represents the intended matrix linear transformation, possible values + are 'identity' and 'modularity' + seed : integer, random_state, or None (default) + Indicator of numpy random number generation state. + See :ref:`Randomness`. + + Returns + ------- + H : Graph + A graph with a similar eigenvector structure of the input one. + + Raises + ------ + NetworkXError + If transformation has a value different from 'identity' or 'modularity' + + Notes + ----- + Spectral Graph Forge (SGF) generates a random simple graph resembling the + global properties of the given one. + It leverages the low-rank approximation of the associated adjacency matrix + driven by the *alpha* precision parameter. + SGF preserves the number of nodes of the input graph and their ordering. + This way, nodes of output graphs resemble the properties of the input one + and attributes can be directly mapped. + + It considers the graph adjacency matrices which can optionally be + transformed to other symmetric real matrices (currently transformation + options include *identity* and *modularity*). + The *modularity* transformation, in the sense of Newman's modularity matrix + allows the focusing on community structure related properties of the graph. + + SGF applies a low-rank approximation whose fixed rank is computed from the + ratio *alpha* of the input graph adjacency matrix dimension. + This step performs a filtering on the input eigenvectors similar to the low + pass filtering common in telecommunications. + + The filtered values (after truncation) are used as input to a Bernoulli + sampling for constructing a random adjacency matrix. + + References + ---------- + .. [1] L. Baldesi, C. T. Butts, A. Markopoulou, "Spectral Graph Forge: + Graph Generation Targeting Modularity", IEEE Infocom, '18. + https://arxiv.org/abs/1801.01715 + .. [2] M. Newman, "Networks: an introduction", Oxford university press, + 2010 + + Examples + -------- + >>> G = nx.karate_club_graph() + >>> H = nx.spectral_graph_forge(G, 0.3) + >>> + """ + import numpy as np + import scipy as sp + + available_transformations = ["identity", "modularity"] + alpha = np.clip(alpha, 0, 1) + A = nx.to_numpy_array(G) + n = A.shape[1] + level = round(n * alpha) + + if transformation not in available_transformations: + msg = f"{transformation!r} is not a valid transformation. " + msg += f"Transformations: {available_transformations}" + raise nx.NetworkXError(msg) + + K = np.ones((1, n)) @ A + + B = A + if transformation == "modularity": + B -= K.T @ K / K.sum() + + # Compute low-rank approximation of B + evals, evecs = np.linalg.eigh(B) + k = np.argsort(np.abs(evals))[::-1] # indices of evals in descending order + evecs[:, k[np.arange(level, n)]] = 0 # set smallest eigenvectors to 0 + B = evecs @ np.diag(evals) @ evecs.T + + if transformation == "modularity": + B += K.T @ K / K.sum() + + B = np.clip(B, 0, 1) + np.fill_diagonal(B, 0) + + for i in range(n - 1): + B[i, i + 1 :] = sp.stats.bernoulli.rvs(B[i, i + 1 :], random_state=seed) + B[i + 1 :, i] = np.transpose(B[i, i + 1 :]) + + H = nx.from_numpy_array(B) + + return H diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/stochastic.py b/.venv/lib/python3.12/site-packages/networkx/generators/stochastic.py new file mode 100644 index 0000000..f53e231 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/generators/stochastic.py @@ -0,0 +1,54 @@ +"""Functions for generating stochastic graphs from a given weighted directed +graph. + +""" + +import networkx as nx +from networkx.classes import DiGraph, MultiDiGraph +from networkx.utils import not_implemented_for + +__all__ = ["stochastic_graph"] + + +@not_implemented_for("undirected") +@nx._dispatchable( + edge_attrs="weight", mutates_input={"not copy": 1}, returns_graph=True +) +def stochastic_graph(G, copy=True, weight="weight"): + """Returns a right-stochastic representation of directed graph `G`. + + A right-stochastic graph is a weighted digraph in which for each + node, the sum of the weights of all the out-edges of that node is + 1. If the graph is already weighted (for example, via a 'weight' + edge attribute), the reweighting takes that into account. + + Parameters + ---------- + G : directed graph + A :class:`~networkx.DiGraph` or :class:`~networkx.MultiDiGraph`. + + copy : boolean, optional + If this is True, then this function returns a new graph with + the stochastic reweighting. Otherwise, the original graph is + modified in-place (and also returned, for convenience). + + weight : edge attribute key (optional, default='weight') + Edge attribute key used for reading the existing weight and + setting the new weight. If no attribute with this key is found + for an edge, then the edge weight is assumed to be 1. If an edge + has a weight, it must be a positive number. + + """ + if copy: + G = MultiDiGraph(G) if G.is_multigraph() else DiGraph(G) + # There is a tradeoff here: the dictionary of node degrees may + # require a lot of memory, whereas making a call to `G.out_degree` + # inside the loop may be costly in computation time. + degree = dict(G.out_degree(weight=weight)) + for u, v, d in G.edges(data=True): + if degree[u] == 0: + d[weight] = 0 + else: + d[weight] = d.get(weight, 1) / degree[u] + nx._clear_cache(G) + return G diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/sudoku.py b/.venv/lib/python3.12/site-packages/networkx/generators/sudoku.py new file mode 100644 index 0000000..f288ed2 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/generators/sudoku.py @@ -0,0 +1,131 @@ +"""Generator for Sudoku graphs + +This module gives a generator for n-Sudoku graphs. It can be used to develop +algorithms for solving or generating Sudoku puzzles. + +A completed Sudoku grid is a 9x9 array of integers between 1 and 9, with no +number appearing twice in the same row, column, or 3x3 box. + ++---------+---------+---------+ +| | 8 6 4 | | 3 7 1 | | 2 5 9 | +| | 3 2 5 | | 8 4 9 | | 7 6 1 | +| | 9 7 1 | | 2 6 5 | | 8 4 3 | ++---------+---------+---------+ +| | 4 3 6 | | 1 9 2 | | 5 8 7 | +| | 1 9 8 | | 6 5 7 | | 4 3 2 | +| | 2 5 7 | | 4 8 3 | | 9 1 6 | ++---------+---------+---------+ +| | 6 8 9 | | 7 3 4 | | 1 2 5 | +| | 7 1 3 | | 5 2 8 | | 6 9 4 | +| | 5 4 2 | | 9 1 6 | | 3 7 8 | ++---------+---------+---------+ + + +The Sudoku graph is an undirected graph with 81 vertices, corresponding to +the cells of a Sudoku grid. It is a regular graph of degree 20. Two distinct +vertices are adjacent if and only if the corresponding cells belong to the +same row, column, or box. A completed Sudoku grid corresponds to a vertex +coloring of the Sudoku graph with nine colors. + +More generally, the n-Sudoku graph is a graph with n^4 vertices, corresponding +to the cells of an n^2 by n^2 grid. Two distinct vertices are adjacent if and +only if they belong to the same row, column, or n by n box. + +References +---------- +.. [1] Herzberg, A. M., & Murty, M. R. (2007). Sudoku squares and chromatic + polynomials. Notices of the AMS, 54(6), 708-717. +.. [2] Sander, Torsten (2009), "Sudoku graphs are integral", + Electronic Journal of Combinatorics, 16 (1): Note 25, 7pp, MR 2529816 +.. [3] Wikipedia contributors. "Glossary of Sudoku." Wikipedia, The Free + Encyclopedia, 3 Dec. 2019. Web. 22 Dec. 2019. +""" + +import networkx as nx +from networkx.exception import NetworkXError + +__all__ = ["sudoku_graph"] + + +@nx._dispatchable(graphs=None, returns_graph=True) +def sudoku_graph(n=3): + """Returns the n-Sudoku graph. The default value of n is 3. + + The n-Sudoku graph is a graph with n^4 vertices, corresponding to the + cells of an n^2 by n^2 grid. Two distinct vertices are adjacent if and + only if they belong to the same row, column, or n-by-n box. + + Parameters + ---------- + n: integer + The order of the Sudoku graph, equal to the square root of the + number of rows. The default is 3. + + Returns + ------- + NetworkX graph + The n-Sudoku graph Sud(n). + + Examples + -------- + >>> G = nx.sudoku_graph() + >>> G.number_of_nodes() + 81 + >>> G.number_of_edges() + 810 + >>> sorted(G.neighbors(42)) + [6, 15, 24, 33, 34, 35, 36, 37, 38, 39, 40, 41, 43, 44, 51, 52, 53, 60, 69, 78] + >>> G = nx.sudoku_graph(2) + >>> G.number_of_nodes() + 16 + >>> G.number_of_edges() + 56 + + References + ---------- + .. [1] Herzberg, A. M., & Murty, M. R. (2007). Sudoku squares and chromatic + polynomials. Notices of the AMS, 54(6), 708-717. + .. [2] Sander, Torsten (2009), "Sudoku graphs are integral", + Electronic Journal of Combinatorics, 16 (1): Note 25, 7pp, MR 2529816 + .. [3] Wikipedia contributors. "Glossary of Sudoku." Wikipedia, The Free + Encyclopedia, 3 Dec. 2019. Web. 22 Dec. 2019. + """ + + if n < 0: + raise NetworkXError("The order must be greater than or equal to zero.") + + n2 = n * n + n3 = n2 * n + n4 = n3 * n + + # Construct an empty graph with n^4 nodes + G = nx.empty_graph(n4) + + # A Sudoku graph of order 0 or 1 has no edges + if n < 2: + return G + + # Add edges for cells in the same row + for row_no in range(n2): + row_start = row_no * n2 + for j in range(1, n2): + for i in range(j): + G.add_edge(row_start + i, row_start + j) + + # Add edges for cells in the same column + for col_no in range(n2): + for j in range(col_no, n4, n2): + for i in range(col_no, j, n2): + G.add_edge(i, j) + + # Add edges for cells in the same box + for band_no in range(n): + for stack_no in range(n): + box_start = n3 * band_no + n * stack_no + for j in range(1, n2): + for i in range(j): + u = box_start + (i % n) + n2 * (i // n) + v = box_start + (j % n) + n2 * (j // n) + G.add_edge(u, v) + + return G diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/tests/__init__.py b/.venv/lib/python3.12/site-packages/networkx/generators/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..05e7924 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_atlas.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_atlas.cpython-312.pyc new file mode 100644 index 0000000..2c94470 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_atlas.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_classic.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_classic.cpython-312.pyc new file mode 100644 index 0000000..cee6aa4 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_classic.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_cographs.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_cographs.cpython-312.pyc new file mode 100644 index 0000000..4e9cdca Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_cographs.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_community.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_community.cpython-312.pyc new file mode 100644 index 0000000..ceb985b Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_community.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_degree_seq.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_degree_seq.cpython-312.pyc new file mode 100644 index 0000000..1db36fb Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_degree_seq.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_directed.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_directed.cpython-312.pyc new file mode 100644 index 0000000..d45c9f7 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_directed.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_duplication.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_duplication.cpython-312.pyc new file mode 100644 index 0000000..82da3b7 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_duplication.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_ego.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_ego.cpython-312.pyc new file mode 100644 index 0000000..b09e6bf Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_ego.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_expanders.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_expanders.cpython-312.pyc new file mode 100644 index 0000000..c263394 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_expanders.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_geometric.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_geometric.cpython-312.pyc new file mode 100644 index 0000000..775fa71 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_geometric.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_harary_graph.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_harary_graph.cpython-312.pyc new file mode 100644 index 0000000..2502509 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_harary_graph.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_internet_as_graphs.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_internet_as_graphs.cpython-312.pyc new file mode 100644 index 0000000..a44a968 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_internet_as_graphs.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_intersection.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_intersection.cpython-312.pyc new file mode 100644 index 0000000..baaaaef Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_intersection.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_interval_graph.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_interval_graph.cpython-312.pyc new file mode 100644 index 0000000..b97c9a9 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_interval_graph.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_joint_degree_seq.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_joint_degree_seq.cpython-312.pyc new file mode 100644 index 0000000..077b167 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_joint_degree_seq.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_lattice.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_lattice.cpython-312.pyc new file mode 100644 index 0000000..08b51f1 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_lattice.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_line.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_line.cpython-312.pyc new file mode 100644 index 0000000..691869b Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_line.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_mycielski.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_mycielski.cpython-312.pyc new file mode 100644 index 0000000..841f6f8 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_mycielski.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_nonisomorphic_trees.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_nonisomorphic_trees.cpython-312.pyc new file mode 100644 index 0000000..5c5773b Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_nonisomorphic_trees.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_random_clustered.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_random_clustered.cpython-312.pyc new file mode 100644 index 0000000..7222381 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_random_clustered.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_random_graphs.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_random_graphs.cpython-312.pyc new file mode 100644 index 0000000..fd9cc34 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_random_graphs.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_small.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_small.cpython-312.pyc new file mode 100644 index 0000000..6ae6d9a Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_small.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_spectral_graph_forge.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_spectral_graph_forge.cpython-312.pyc new file mode 100644 index 0000000..477d0ca Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_spectral_graph_forge.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_stochastic.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_stochastic.cpython-312.pyc new file mode 100644 index 0000000..9f5ef34 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_stochastic.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_sudoku.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_sudoku.cpython-312.pyc new file mode 100644 index 0000000..d768835 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_sudoku.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_time_series.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_time_series.cpython-312.pyc new file mode 100644 index 0000000..84cec17 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_time_series.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_trees.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_trees.cpython-312.pyc new file mode 100644 index 0000000..4531b28 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_trees.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_triads.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_triads.cpython-312.pyc new file mode 100644 index 0000000..edef5f6 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/generators/tests/__pycache__/test_triads.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/tests/test_atlas.py b/.venv/lib/python3.12/site-packages/networkx/generators/tests/test_atlas.py new file mode 100644 index 0000000..add4741 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/generators/tests/test_atlas.py @@ -0,0 +1,75 @@ +from itertools import groupby + +import pytest + +import networkx as nx +from networkx import graph_atlas, graph_atlas_g +from networkx.generators.atlas import NUM_GRAPHS +from networkx.utils import edges_equal, nodes_equal, pairwise + + +class TestAtlasGraph: + """Unit tests for the :func:`~networkx.graph_atlas` function.""" + + def test_index_too_small(self): + with pytest.raises(ValueError): + graph_atlas(-1) + + def test_index_too_large(self): + with pytest.raises(ValueError): + graph_atlas(NUM_GRAPHS) + + def test_graph(self): + G = graph_atlas(6) + assert nodes_equal(G.nodes(), range(3)) + assert edges_equal(G.edges(), [(0, 1), (0, 2)]) + + +class TestAtlasGraphG: + """Unit tests for the :func:`~networkx.graph_atlas_g` function.""" + + @classmethod + def setup_class(cls): + cls.GAG = graph_atlas_g() + + def test_sizes(self): + G = self.GAG[0] + assert G.number_of_nodes() == 0 + assert G.number_of_edges() == 0 + + G = self.GAG[7] + assert G.number_of_nodes() == 3 + assert G.number_of_edges() == 3 + + def test_names(self): + for i, G in enumerate(self.GAG): + assert int(G.name[1:]) == i + + def test_nondecreasing_nodes(self): + # check for nondecreasing number of nodes + for n1, n2 in pairwise(map(len, self.GAG)): + assert n2 <= n1 + 1 + + def test_nondecreasing_edges(self): + # check for nondecreasing number of edges (for fixed number of + # nodes) + for n, group in groupby(self.GAG, key=nx.number_of_nodes): + for m1, m2 in pairwise(map(nx.number_of_edges, group)): + assert m2 <= m1 + 1 + + def test_nondecreasing_degree_sequence(self): + # Check for lexicographically nondecreasing degree sequences + # (for fixed number of nodes and edges). + # + # There are three exceptions to this rule in the order given in + # the "Atlas of Graphs" book, so we need to manually exclude + # those. + exceptions = [("G55", "G56"), ("G1007", "G1008"), ("G1012", "G1013")] + for n, group in groupby(self.GAG, key=nx.number_of_nodes): + for m, group in groupby(group, key=nx.number_of_edges): + for G1, G2 in pairwise(group): + if (G1.name, G2.name) in exceptions: + continue + d1 = sorted(d for v, d in G1.degree()) + d2 = sorted(d for v, d in G2.degree()) + assert d1 <= d2 diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/tests/test_classic.py b/.venv/lib/python3.12/site-packages/networkx/generators/tests/test_classic.py new file mode 100644 index 0000000..9ca4a86 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/generators/tests/test_classic.py @@ -0,0 +1,639 @@ +""" +==================== +Generators - Classic +==================== + +Unit tests for various classic graph generators in generators/classic.py +""" + +import itertools +import typing + +import pytest + +import networkx as nx +from networkx.utils import edges_equal, nodes_equal + + +class TestGeneratorClassic: + def test_balanced_tree(self): + # balanced_tree(r,h) is a tree with (r**(h+1)-1)/(r-1) edges + for r, h in [(2, 2), (3, 3), (6, 2)]: + t = nx.balanced_tree(r, h) + order = t.order() + assert order == (r ** (h + 1) - 1) / (r - 1) + assert nx.is_connected(t) + assert t.size() == order - 1 + dh = nx.degree_histogram(t) + assert dh[0] == 0 # no nodes of 0 + assert dh[1] == r**h # nodes of degree 1 are leaves + assert dh[r] == 1 # root is degree r + assert dh[r + 1] == order - r**h - 1 # everyone else is degree r+1 + assert len(dh) == r + 2 + + def test_balanced_tree_star(self): + # balanced_tree(r,1) is the r-star + t = nx.balanced_tree(r=2, h=1) + assert nx.could_be_isomorphic(t, nx.star_graph(2)) + t = nx.balanced_tree(r=5, h=1) + assert nx.could_be_isomorphic(t, nx.star_graph(5)) + t = nx.balanced_tree(r=10, h=1) + assert nx.could_be_isomorphic(t, nx.star_graph(10)) + + def test_balanced_tree_path(self): + """Tests that the balanced tree with branching factor one is the + path graph. + + """ + # A tree of height four has five levels. + T = nx.balanced_tree(1, 4) + P = nx.path_graph(5) + assert nx.could_be_isomorphic(T, P) + + def test_full_rary_tree(self): + r = 2 + n = 9 + t = nx.full_rary_tree(r, n) + assert t.order() == n + assert nx.is_connected(t) + dh = nx.degree_histogram(t) + assert dh[0] == 0 # no nodes of 0 + assert dh[1] == 5 # nodes of degree 1 are leaves + assert dh[r] == 1 # root is degree r + assert dh[r + 1] == 9 - 5 - 1 # everyone else is degree r+1 + assert len(dh) == r + 2 + + def test_full_rary_tree_balanced(self): + t = nx.full_rary_tree(2, 15) + th = nx.balanced_tree(2, 3) + assert nx.could_be_isomorphic(t, th) + + def test_full_rary_tree_path(self): + t = nx.full_rary_tree(1, 10) + assert nx.could_be_isomorphic(t, nx.path_graph(10)) + + def test_full_rary_tree_empty(self): + t = nx.full_rary_tree(0, 10) + assert nx.could_be_isomorphic(t, nx.empty_graph(10)) + t = nx.full_rary_tree(3, 0) + assert nx.could_be_isomorphic(t, nx.empty_graph(0)) + + def test_full_rary_tree_3_20(self): + t = nx.full_rary_tree(3, 20) + assert t.order() == 20 + + def test_barbell_graph(self): + # number of nodes = 2*m1 + m2 (2 m1-complete graphs + m2-path + 2 edges) + # number of edges = 2*(nx.number_of_edges(m1-complete graph) + m2 + 1 + m1 = 3 + m2 = 5 + b = nx.barbell_graph(m1, m2) + assert nx.number_of_nodes(b) == 2 * m1 + m2 + assert nx.number_of_edges(b) == m1 * (m1 - 1) + m2 + 1 + + m1 = 4 + m2 = 10 + b = nx.barbell_graph(m1, m2) + assert nx.number_of_nodes(b) == 2 * m1 + m2 + assert nx.number_of_edges(b) == m1 * (m1 - 1) + m2 + 1 + + m1 = 3 + m2 = 20 + b = nx.barbell_graph(m1, m2) + assert nx.number_of_nodes(b) == 2 * m1 + m2 + assert nx.number_of_edges(b) == m1 * (m1 - 1) + m2 + 1 + + # Raise NetworkXError if m1<2 + m1 = 1 + m2 = 20 + pytest.raises(nx.NetworkXError, nx.barbell_graph, m1, m2) + + # Raise NetworkXError if m2<0 + m1 = 5 + m2 = -2 + pytest.raises(nx.NetworkXError, nx.barbell_graph, m1, m2) + + # nx.barbell_graph(2,m) = nx.path_graph(m+4) + m1 = 2 + m2 = 5 + b = nx.barbell_graph(m1, m2) + assert nx.could_be_isomorphic(b, nx.path_graph(m2 + 4)) + + m1 = 2 + m2 = 10 + b = nx.barbell_graph(m1, m2) + assert nx.could_be_isomorphic(b, nx.path_graph(m2 + 4)) + + m1 = 2 + m2 = 20 + b = nx.barbell_graph(m1, m2) + assert nx.could_be_isomorphic(b, nx.path_graph(m2 + 4)) + + pytest.raises( + nx.NetworkXError, nx.barbell_graph, m1, m2, create_using=nx.DiGraph() + ) + + mb = nx.barbell_graph(m1, m2, create_using=nx.MultiGraph()) + assert edges_equal(mb.edges(), b.edges()) + + def test_binomial_tree(self): + graphs = (None, nx.Graph, nx.DiGraph, nx.MultiGraph, nx.MultiDiGraph) + for create_using in graphs: + for n in range(4): + b = nx.binomial_tree(n, create_using) + assert nx.number_of_nodes(b) == 2**n + assert nx.number_of_edges(b) == (2**n - 1) + + def test_complete_graph(self): + # complete_graph(m) is a connected graph with + # m nodes and m*(m+1)/2 edges + for m in [0, 1, 3, 5]: + g = nx.complete_graph(m) + assert nx.number_of_nodes(g) == m + assert nx.number_of_edges(g) == m * (m - 1) // 2 + + mg = nx.complete_graph(m, create_using=nx.MultiGraph) + assert edges_equal(mg.edges(), g.edges()) + + g = nx.complete_graph("abc") + assert nodes_equal(g.nodes(), ["a", "b", "c"]) + assert g.size() == 3 + + # creates a self-loop... should it? + g = nx.complete_graph("abcb") + assert nodes_equal(g.nodes(), ["a", "b", "c"]) + assert g.size() == 4 + + g = nx.complete_graph("abcb", create_using=nx.MultiGraph) + assert nodes_equal(g.nodes(), ["a", "b", "c"]) + assert g.size() == 6 + + def test_complete_digraph(self): + # complete_graph(m) is a connected graph with + # m nodes and m*(m+1)/2 edges + for m in [0, 1, 3, 5]: + g = nx.complete_graph(m, create_using=nx.DiGraph) + assert nx.number_of_nodes(g) == m + assert nx.number_of_edges(g) == m * (m - 1) + + g = nx.complete_graph("abc", create_using=nx.DiGraph) + assert len(g) == 3 + assert g.size() == 6 + assert g.is_directed() + + def test_circular_ladder_graph(self): + G = nx.circular_ladder_graph(5) + pytest.raises( + nx.NetworkXError, nx.circular_ladder_graph, 5, create_using=nx.DiGraph + ) + mG = nx.circular_ladder_graph(5, create_using=nx.MultiGraph) + assert edges_equal(mG.edges(), G.edges()) + + def test_circulant_graph(self): + # Ci_n(1) is the cycle graph for all n + Ci6_1 = nx.circulant_graph(6, [1]) + C6 = nx.cycle_graph(6) + assert edges_equal(Ci6_1.edges(), C6.edges()) + + # Ci_n(1, 2, ..., n div 2) is the complete graph for all n + Ci7 = nx.circulant_graph(7, [1, 2, 3]) + K7 = nx.complete_graph(7) + assert edges_equal(Ci7.edges(), K7.edges()) + + # Ci_6(1, 3) is K_3,3 i.e. the utility graph + Ci6_1_3 = nx.circulant_graph(6, [1, 3]) + K3_3 = nx.complete_bipartite_graph(3, 3) + assert nx.could_be_isomorphic(Ci6_1_3, K3_3) + + def test_cycle_graph(self): + G = nx.cycle_graph(4) + assert edges_equal(G.edges(), [(0, 1), (0, 3), (1, 2), (2, 3)]) + mG = nx.cycle_graph(4, create_using=nx.MultiGraph) + assert edges_equal(mG.edges(), [(0, 1), (0, 3), (1, 2), (2, 3)]) + G = nx.cycle_graph(4, create_using=nx.DiGraph) + assert not G.has_edge(2, 1) + assert G.has_edge(1, 2) + assert G.is_directed() + + G = nx.cycle_graph("abc") + assert len(G) == 3 + assert G.size() == 3 + G = nx.cycle_graph("abcb") + assert len(G) == 3 + assert G.size() == 2 + g = nx.cycle_graph("abc", nx.DiGraph) + assert len(g) == 3 + assert g.size() == 3 + assert g.is_directed() + g = nx.cycle_graph("abcb", nx.DiGraph) + assert len(g) == 3 + assert g.size() == 4 + + def test_dorogovtsev_goltsev_mendes_graph(self): + G = nx.dorogovtsev_goltsev_mendes_graph(0) + assert edges_equal(G.edges(), [(0, 1)]) + assert nodes_equal(list(G), [0, 1]) + G = nx.dorogovtsev_goltsev_mendes_graph(1) + assert edges_equal(G.edges(), [(0, 1), (0, 2), (1, 2)]) + assert nx.average_clustering(G) == 1.0 + assert nx.average_shortest_path_length(G) == 1.0 + assert sorted(nx.triangles(G).values()) == [1, 1, 1] + assert nx.is_planar(G) + G = nx.dorogovtsev_goltsev_mendes_graph(2) + assert nx.number_of_nodes(G) == 6 + assert nx.number_of_edges(G) == 9 + assert nx.average_clustering(G) == 0.75 + assert nx.average_shortest_path_length(G) == 1.4 + assert nx.is_planar(G) + G = nx.dorogovtsev_goltsev_mendes_graph(10) + assert nx.number_of_nodes(G) == 29526 + assert nx.number_of_edges(G) == 59049 + assert G.degree(0) == 1024 + assert G.degree(1) == 1024 + assert G.degree(2) == 1024 + + with pytest.raises(nx.NetworkXError, match=r"n must be greater than"): + nx.dorogovtsev_goltsev_mendes_graph(-1) + with pytest.raises(nx.NetworkXError, match=r"directed graph not supported"): + nx.dorogovtsev_goltsev_mendes_graph(7, create_using=nx.DiGraph) + with pytest.raises(nx.NetworkXError, match=r"multigraph not supported"): + nx.dorogovtsev_goltsev_mendes_graph(7, create_using=nx.MultiGraph) + with pytest.raises(nx.NetworkXError): + nx.dorogovtsev_goltsev_mendes_graph(7, create_using=nx.MultiDiGraph) + + def test_create_using(self): + G = nx.empty_graph() + assert isinstance(G, nx.Graph) + pytest.raises(TypeError, nx.empty_graph, create_using=0.0) + pytest.raises(TypeError, nx.empty_graph, create_using="Graph") + + G = nx.empty_graph(create_using=nx.MultiGraph) + assert isinstance(G, nx.MultiGraph) + G = nx.empty_graph(create_using=nx.DiGraph) + assert isinstance(G, nx.DiGraph) + + G = nx.empty_graph(create_using=nx.DiGraph, default=nx.MultiGraph) + assert isinstance(G, nx.DiGraph) + G = nx.empty_graph(create_using=None, default=nx.MultiGraph) + assert isinstance(G, nx.MultiGraph) + G = nx.empty_graph(default=nx.MultiGraph) + assert isinstance(G, nx.MultiGraph) + + G = nx.path_graph(5) + H = nx.empty_graph(create_using=G) + assert not H.is_multigraph() + assert not H.is_directed() + assert len(H) == 0 + assert G is H + + H = nx.empty_graph(create_using=nx.MultiGraph()) + assert H.is_multigraph() + assert not H.is_directed() + assert G is not H + + # test for subclasses that also use typing.Protocol. See gh-6243 + class Mixin(typing.Protocol): + pass + + class MyGraph(Mixin, nx.DiGraph): + pass + + G = nx.empty_graph(create_using=MyGraph) + + def test_empty_graph(self): + G = nx.empty_graph() + assert nx.number_of_nodes(G) == 0 + G = nx.empty_graph(42) + assert nx.number_of_nodes(G) == 42 + assert nx.number_of_edges(G) == 0 + + G = nx.empty_graph("abc") + assert len(G) == 3 + assert G.size() == 0 + + # create empty digraph + G = nx.empty_graph(42, create_using=nx.DiGraph(name="duh")) + assert nx.number_of_nodes(G) == 42 + assert nx.number_of_edges(G) == 0 + assert isinstance(G, nx.DiGraph) + + # create empty multigraph + G = nx.empty_graph(42, create_using=nx.MultiGraph(name="duh")) + assert nx.number_of_nodes(G) == 42 + assert nx.number_of_edges(G) == 0 + assert isinstance(G, nx.MultiGraph) + + # create empty graph from another + pete = nx.petersen_graph() + G = nx.empty_graph(42, create_using=pete) + assert nx.number_of_nodes(G) == 42 + assert nx.number_of_edges(G) == 0 + assert isinstance(G, nx.Graph) + + def test_ladder_graph(self): + for i, G in [ + (0, nx.empty_graph(0)), + (1, nx.path_graph(2)), + (2, nx.hypercube_graph(2)), + (10, nx.grid_graph([2, 10])), + ]: + assert nx.could_be_isomorphic(nx.ladder_graph(i), G) + + pytest.raises(nx.NetworkXError, nx.ladder_graph, 2, create_using=nx.DiGraph) + + g = nx.ladder_graph(2) + mg = nx.ladder_graph(2, create_using=nx.MultiGraph) + assert edges_equal(mg.edges(), g.edges()) + + @pytest.mark.parametrize(("m", "n"), [(3, 5), (4, 10), (3, 20)]) + def test_lollipop_graph_right_sizes(self, m, n): + G = nx.lollipop_graph(m, n) + assert nx.number_of_nodes(G) == m + n + assert nx.number_of_edges(G) == m * (m - 1) / 2 + n + + @pytest.mark.parametrize(("m", "n"), [("ab", ""), ("abc", "defg")]) + def test_lollipop_graph_size_node_sequence(self, m, n): + G = nx.lollipop_graph(m, n) + assert nx.number_of_nodes(G) == len(m) + len(n) + assert nx.number_of_edges(G) == len(m) * (len(m) - 1) / 2 + len(n) + + def test_lollipop_graph_exceptions(self): + # Raise NetworkXError if m<2 + pytest.raises(nx.NetworkXError, nx.lollipop_graph, -1, 2) + pytest.raises(nx.NetworkXError, nx.lollipop_graph, 1, 20) + pytest.raises(nx.NetworkXError, nx.lollipop_graph, "", 20) + pytest.raises(nx.NetworkXError, nx.lollipop_graph, "a", 20) + + # Raise NetworkXError if n<0 + pytest.raises(nx.NetworkXError, nx.lollipop_graph, 5, -2) + + # raise NetworkXError if create_using is directed + with pytest.raises(nx.NetworkXError): + nx.lollipop_graph(2, 20, create_using=nx.DiGraph) + with pytest.raises(nx.NetworkXError): + nx.lollipop_graph(2, 20, create_using=nx.MultiDiGraph) + + @pytest.mark.parametrize(("m", "n"), [(2, 0), (2, 5), (2, 10), ("ab", 20)]) + def test_lollipop_graph_same_as_path_when_m1_is_2(self, m, n): + G = nx.lollipop_graph(m, n) + assert nx.could_be_isomorphic(G, nx.path_graph(n + 2)) + + def test_lollipop_graph_for_multigraph(self): + G = nx.lollipop_graph(5, 20) + MG = nx.lollipop_graph(5, 20, create_using=nx.MultiGraph) + assert edges_equal(MG.edges(), G.edges()) + + @pytest.mark.parametrize( + ("m", "n"), + [(4, "abc"), ("abcd", 3), ([1, 2, 3, 4], "abc"), ("abcd", [1, 2, 3])], + ) + def test_lollipop_graph_mixing_input_types(self, m, n): + expected = nx.compose(nx.complete_graph(4), nx.path_graph(range(100, 103))) + expected.add_edge(0, 100) # Connect complete graph and path graph + assert nx.could_be_isomorphic(nx.lollipop_graph(m, n), expected) + + def test_lollipop_graph_non_builtin_ints(self): + np = pytest.importorskip("numpy") + G = nx.lollipop_graph(np.int32(4), np.int64(3)) + expected = nx.compose(nx.complete_graph(4), nx.path_graph(range(100, 103))) + expected.add_edge(0, 100) # Connect complete graph and path graph + assert nx.could_be_isomorphic(G, expected) + + def test_null_graph(self): + assert nx.number_of_nodes(nx.null_graph()) == 0 + + def test_path_graph(self): + p = nx.path_graph(0) + assert nx.could_be_isomorphic(p, nx.null_graph()) + + p = nx.path_graph(1) + assert nx.could_be_isomorphic(p, nx.empty_graph(1)) + + p = nx.path_graph(10) + assert nx.is_connected(p) + assert sorted(d for n, d in p.degree()) == [1, 1, 2, 2, 2, 2, 2, 2, 2, 2] + assert p.order() - 1 == p.size() + + dp = nx.path_graph(3, create_using=nx.DiGraph) + assert dp.has_edge(0, 1) + assert not dp.has_edge(1, 0) + + mp = nx.path_graph(10, create_using=nx.MultiGraph) + assert edges_equal(mp.edges(), p.edges()) + + G = nx.path_graph("abc") + assert len(G) == 3 + assert G.size() == 2 + G = nx.path_graph("abcb") + assert len(G) == 3 + assert G.size() == 2 + g = nx.path_graph("abc", nx.DiGraph) + assert len(g) == 3 + assert g.size() == 2 + assert g.is_directed() + g = nx.path_graph("abcb", nx.DiGraph) + assert len(g) == 3 + assert g.size() == 3 + + G = nx.path_graph((1, 2, 3, 2, 4)) + assert G.has_edge(2, 4) + + def test_star_graph(self): + assert nx.could_be_isomorphic(nx.star_graph(""), nx.empty_graph(0)) + assert nx.could_be_isomorphic(nx.star_graph([]), nx.empty_graph(0)) + assert nx.could_be_isomorphic(nx.star_graph(0), nx.empty_graph(1)) + assert nx.could_be_isomorphic(nx.star_graph(1), nx.path_graph(2)) + assert nx.could_be_isomorphic(nx.star_graph(2), nx.path_graph(3)) + assert nx.could_be_isomorphic( + nx.star_graph(5), nx.complete_bipartite_graph(1, 5) + ) + + s = nx.star_graph(10) + assert sorted(d for n, d in s.degree()) == [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 10] + + pytest.raises(nx.NetworkXError, nx.star_graph, 10, create_using=nx.DiGraph) + + ms = nx.star_graph(10, create_using=nx.MultiGraph) + assert edges_equal(ms.edges(), s.edges()) + + G = nx.star_graph("abc") + assert len(G) == 3 + assert G.size() == 2 + + G = nx.star_graph("abcb") + assert len(G) == 3 + assert G.size() == 2 + G = nx.star_graph("abcb", create_using=nx.MultiGraph) + assert len(G) == 3 + assert G.size() == 3 + + G = nx.star_graph("abcdefg") + assert len(G) == 7 + assert G.size() == 6 + + def test_non_int_integers_for_star_graph(self): + np = pytest.importorskip("numpy") + G = nx.star_graph(np.int32(3)) + assert len(G) == 4 + assert G.size() == 3 + + @pytest.mark.parametrize(("m", "n"), [(3, 0), (3, 5), (4, 10), (3, 20)]) + def test_tadpole_graph_right_sizes(self, m, n): + G = nx.tadpole_graph(m, n) + assert nx.number_of_nodes(G) == m + n + assert nx.number_of_edges(G) == m + n - (m == 2) + + @pytest.mark.parametrize(("m", "n"), [("ab", ""), ("ab", "c"), ("abc", "defg")]) + def test_tadpole_graph_size_node_sequences(self, m, n): + G = nx.tadpole_graph(m, n) + assert nx.number_of_nodes(G) == len(m) + len(n) + assert nx.number_of_edges(G) == len(m) + len(n) - (len(m) == 2) + + def test_tadpole_graph_exceptions(self): + # Raise NetworkXError if m<2 + pytest.raises(nx.NetworkXError, nx.tadpole_graph, -1, 3) + pytest.raises(nx.NetworkXError, nx.tadpole_graph, 0, 3) + pytest.raises(nx.NetworkXError, nx.tadpole_graph, 1, 3) + + # Raise NetworkXError if n<0 + pytest.raises(nx.NetworkXError, nx.tadpole_graph, 5, -2) + + # Raise NetworkXError for digraphs + with pytest.raises(nx.NetworkXError): + nx.tadpole_graph(2, 20, create_using=nx.DiGraph) + with pytest.raises(nx.NetworkXError): + nx.tadpole_graph(2, 20, create_using=nx.MultiDiGraph) + + @pytest.mark.parametrize(("m", "n"), [(2, 0), (2, 5), (2, 10), ("ab", 20)]) + def test_tadpole_graph_same_as_path_when_m_is_2(self, m, n): + G = nx.tadpole_graph(m, n) + assert nx.could_be_isomorphic(G, nx.path_graph(n + 2)) + + @pytest.mark.parametrize("m", [4, 7]) + def test_tadpole_graph_same_as_cycle_when_m2_is_0(self, m): + G = nx.tadpole_graph(m, 0) + assert nx.could_be_isomorphic(G, nx.cycle_graph(m)) + + def test_tadpole_graph_for_multigraph(self): + G = nx.tadpole_graph(5, 20) + MG = nx.tadpole_graph(5, 20, create_using=nx.MultiGraph) + assert edges_equal(MG.edges(), G.edges()) + + @pytest.mark.parametrize( + ("m", "n"), + [(4, "abc"), ("abcd", 3), ([1, 2, 3, 4], "abc"), ("abcd", [1, 2, 3])], + ) + def test_tadpole_graph_mixing_input_types(self, m, n): + expected = nx.compose(nx.cycle_graph(4), nx.path_graph(range(100, 103))) + expected.add_edge(0, 100) # Connect cycle and path + assert nx.could_be_isomorphic(nx.tadpole_graph(m, n), expected) + + def test_tadpole_graph_non_builtin_integers(self): + np = pytest.importorskip("numpy") + G = nx.tadpole_graph(np.int32(4), np.int64(3)) + expected = nx.compose(nx.cycle_graph(4), nx.path_graph(range(100, 103))) + expected.add_edge(0, 100) # Connect cycle and path + assert nx.could_be_isomorphic(G, expected) + + def test_trivial_graph(self): + assert nx.number_of_nodes(nx.trivial_graph()) == 1 + + def test_turan_graph(self): + assert nx.number_of_edges(nx.turan_graph(13, 4)) == 63 + assert nx.could_be_isomorphic( + nx.turan_graph(13, 4), nx.complete_multipartite_graph(3, 4, 3, 3) + ) + + def test_wheel_graph(self): + for n, G in [ + ("", nx.null_graph()), + (0, nx.null_graph()), + (1, nx.empty_graph(1)), + (2, nx.path_graph(2)), + (3, nx.complete_graph(3)), + (4, nx.complete_graph(4)), + ]: + g = nx.wheel_graph(n) + assert nx.could_be_isomorphic(g, G) + + g = nx.wheel_graph(10) + assert sorted(d for n, d in g.degree()) == [3, 3, 3, 3, 3, 3, 3, 3, 3, 9] + + pytest.raises(nx.NetworkXError, nx.wheel_graph, 10, create_using=nx.DiGraph) + + mg = nx.wheel_graph(10, create_using=nx.MultiGraph()) + assert edges_equal(mg.edges(), g.edges()) + + G = nx.wheel_graph("abc") + assert len(G) == 3 + assert G.size() == 3 + + G = nx.wheel_graph("abcb") + assert len(G) == 3 + assert G.size() == 4 + G = nx.wheel_graph("abcb", nx.MultiGraph) + assert len(G) == 3 + assert G.size() == 6 + + def test_non_int_integers_for_wheel_graph(self): + np = pytest.importorskip("numpy") + G = nx.wheel_graph(np.int32(3)) + assert len(G) == 3 + assert G.size() == 3 + + def test_complete_0_partite_graph(self): + """Tests that the complete 0-partite graph is the null graph.""" + G = nx.complete_multipartite_graph() + H = nx.null_graph() + assert nodes_equal(G, H) + assert edges_equal(G.edges(), H.edges()) + + def test_complete_1_partite_graph(self): + """Tests that the complete 1-partite graph is the empty graph.""" + G = nx.complete_multipartite_graph(3) + H = nx.empty_graph(3) + assert nodes_equal(G, H) + assert edges_equal(G.edges(), H.edges()) + + def test_complete_2_partite_graph(self): + """Tests that the complete 2-partite graph is the complete bipartite + graph. + + """ + G = nx.complete_multipartite_graph(2, 3) + H = nx.complete_bipartite_graph(2, 3) + assert nodes_equal(G, H) + assert edges_equal(G.edges(), H.edges()) + + def test_complete_multipartite_graph(self): + """Tests for generating the complete multipartite graph.""" + G = nx.complete_multipartite_graph(2, 3, 4) + blocks = [(0, 1), (2, 3, 4), (5, 6, 7, 8)] + # Within each block, no two vertices should be adjacent. + for block in blocks: + for u, v in itertools.combinations_with_replacement(block, 2): + assert v not in G[u] + assert G.nodes[u] == G.nodes[v] + # Across blocks, all vertices should be adjacent. + for block1, block2 in itertools.combinations(blocks, 2): + for u, v in itertools.product(block1, block2): + assert v in G[u] + assert G.nodes[u] != G.nodes[v] + with pytest.raises(nx.NetworkXError, match="Negative number of nodes"): + nx.complete_multipartite_graph(2, -3, 4) + + def test_kneser_graph(self): + # the petersen graph is a special case of the kneser graph when n=5 and k=2 + assert nx.could_be_isomorphic(nx.kneser_graph(5, 2), nx.petersen_graph()) + + # when k is 1, the kneser graph returns a complete graph with n vertices + for i in range(1, 7): + assert nx.could_be_isomorphic(nx.kneser_graph(i, 1), nx.complete_graph(i)) + + # the kneser graph of n and n-1 is the empty graph with n vertices + for j in range(3, 7): + assert nx.could_be_isomorphic(nx.kneser_graph(j, j - 1), nx.empty_graph(j)) + + # in general the number of edges of the kneser graph is equal to + # (n choose k) times (n-k choose k) divided by 2 + assert nx.number_of_edges(nx.kneser_graph(8, 3)) == 280 diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/tests/test_cographs.py b/.venv/lib/python3.12/site-packages/networkx/generators/tests/test_cographs.py new file mode 100644 index 0000000..a71849b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/generators/tests/test_cographs.py @@ -0,0 +1,18 @@ +"""Unit tests for the :mod:`networkx.generators.cographs` module.""" + +import networkx as nx + + +def test_random_cograph(): + n = 3 + G = nx.random_cograph(n) + + assert len(G) == 2**n + + # Every connected subgraph of G has diameter <= 2 + if nx.is_connected(G): + assert nx.diameter(G) <= 2 + else: + components = nx.connected_components(G) + for component in components: + assert nx.diameter(G.subgraph(component)) <= 2 diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/tests/test_community.py b/.venv/lib/python3.12/site-packages/networkx/generators/tests/test_community.py new file mode 100644 index 0000000..2fa107f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/generators/tests/test_community.py @@ -0,0 +1,362 @@ +import pytest + +import networkx as nx + + +def test_random_partition_graph(): + G = nx.random_partition_graph([3, 3, 3], 1, 0, seed=42) + C = G.graph["partition"] + assert C == [{0, 1, 2}, {3, 4, 5}, {6, 7, 8}] + assert len(G) == 9 + assert len(list(G.edges())) == 9 + + G = nx.random_partition_graph([3, 3, 3], 0, 1) + C = G.graph["partition"] + assert C == [{0, 1, 2}, {3, 4, 5}, {6, 7, 8}] + assert len(G) == 9 + assert len(list(G.edges())) == 27 + + G = nx.random_partition_graph([3, 3, 3], 1, 0, directed=True) + C = G.graph["partition"] + assert C == [{0, 1, 2}, {3, 4, 5}, {6, 7, 8}] + assert len(G) == 9 + assert len(list(G.edges())) == 18 + + G = nx.random_partition_graph([3, 3, 3], 0, 1, directed=True) + C = G.graph["partition"] + assert C == [{0, 1, 2}, {3, 4, 5}, {6, 7, 8}] + assert len(G) == 9 + assert len(list(G.edges())) == 54 + + G = nx.random_partition_graph([1, 2, 3, 4, 5], 0.5, 0.1) + C = G.graph["partition"] + assert C == [{0}, {1, 2}, {3, 4, 5}, {6, 7, 8, 9}, {10, 11, 12, 13, 14}] + assert len(G) == 15 + + rpg = nx.random_partition_graph + pytest.raises(nx.NetworkXError, rpg, [1, 2, 3], 1.1, 0.1) + pytest.raises(nx.NetworkXError, rpg, [1, 2, 3], -0.1, 0.1) + pytest.raises(nx.NetworkXError, rpg, [1, 2, 3], 0.1, 1.1) + pytest.raises(nx.NetworkXError, rpg, [1, 2, 3], 0.1, -0.1) + + +def test_planted_partition_graph(): + G = nx.planted_partition_graph(4, 3, 1, 0, seed=42) + C = G.graph["partition"] + assert len(C) == 4 + assert len(G) == 12 + assert len(list(G.edges())) == 12 + + G = nx.planted_partition_graph(4, 3, 0, 1) + C = G.graph["partition"] + assert len(C) == 4 + assert len(G) == 12 + assert len(list(G.edges())) == 54 + + G = nx.planted_partition_graph(10, 4, 0.5, 0.1, seed=42) + C = G.graph["partition"] + assert len(C) == 10 + assert len(G) == 40 + + G = nx.planted_partition_graph(4, 3, 1, 0, directed=True) + C = G.graph["partition"] + assert len(C) == 4 + assert len(G) == 12 + assert len(list(G.edges())) == 24 + + G = nx.planted_partition_graph(4, 3, 0, 1, directed=True) + C = G.graph["partition"] + assert len(C) == 4 + assert len(G) == 12 + assert len(list(G.edges())) == 108 + + G = nx.planted_partition_graph(10, 4, 0.5, 0.1, seed=42, directed=True) + C = G.graph["partition"] + assert len(C) == 10 + assert len(G) == 40 + + ppg = nx.planted_partition_graph + pytest.raises(nx.NetworkXError, ppg, 3, 3, 1.1, 0.1) + pytest.raises(nx.NetworkXError, ppg, 3, 3, -0.1, 0.1) + pytest.raises(nx.NetworkXError, ppg, 3, 3, 0.1, 1.1) + pytest.raises(nx.NetworkXError, ppg, 3, 3, 0.1, -0.1) + + +def test_relaxed_caveman_graph(): + G = nx.relaxed_caveman_graph(4, 3, 0) + assert len(G) == 12 + G = nx.relaxed_caveman_graph(4, 3, 1) + assert len(G) == 12 + G = nx.relaxed_caveman_graph(4, 3, 0.5) + assert len(G) == 12 + G = nx.relaxed_caveman_graph(4, 3, 0.5, seed=42) + assert len(G) == 12 + + +def test_connected_caveman_graph(): + G = nx.connected_caveman_graph(4, 3) + assert len(G) == 12 + + G = nx.connected_caveman_graph(1, 5) + K5 = nx.complete_graph(5) + K5.remove_edge(3, 4) + assert nx.is_isomorphic(G, K5) + + # need at least 2 nodes in each clique + pytest.raises(nx.NetworkXError, nx.connected_caveman_graph, 4, 1) + + +def test_caveman_graph(): + G = nx.caveman_graph(4, 3) + assert len(G) == 12 + + G = nx.caveman_graph(5, 1) + E5 = nx.empty_graph(5) + assert nx.is_isomorphic(G, E5) + + G = nx.caveman_graph(1, 5) + K5 = nx.complete_graph(5) + assert nx.is_isomorphic(G, K5) + + +def test_gaussian_random_partition_graph(): + G = nx.gaussian_random_partition_graph(100, 10, 10, 0.3, 0.01) + assert len(G) == 100 + G = nx.gaussian_random_partition_graph(100, 10, 10, 0.3, 0.01, directed=True) + assert len(G) == 100 + G = nx.gaussian_random_partition_graph( + 100, 10, 10, 0.3, 0.01, directed=False, seed=42 + ) + assert len(G) == 100 + assert not isinstance(G, nx.DiGraph) + G = nx.gaussian_random_partition_graph( + 100, 10, 10, 0.3, 0.01, directed=True, seed=42 + ) + assert len(G) == 100 + assert isinstance(G, nx.DiGraph) + pytest.raises( + nx.NetworkXError, nx.gaussian_random_partition_graph, 100, 101, 10, 1, 0 + ) + # Test when clusters are likely less than 1 + G = nx.gaussian_random_partition_graph(10, 0.5, 0.5, 0.5, 0.5, seed=1) + assert len(G) == 10 + + +def test_ring_of_cliques(): + for i in range(2, 20, 3): + for j in range(2, 20, 3): + G = nx.ring_of_cliques(i, j) + assert G.number_of_nodes() == i * j + if i != 2 or j != 1: + expected_num_edges = i * (((j * (j - 1)) // 2) + 1) + else: + # the edge that already exists cannot be duplicated + expected_num_edges = i * (((j * (j - 1)) // 2) + 1) - 1 + assert G.number_of_edges() == expected_num_edges + with pytest.raises( + nx.NetworkXError, match="A ring of cliques must have at least two cliques" + ): + nx.ring_of_cliques(1, 5) + with pytest.raises( + nx.NetworkXError, match="The cliques must have at least two nodes" + ): + nx.ring_of_cliques(3, 0) + + +def test_windmill_graph(): + for n in range(2, 20, 3): + for k in range(2, 20, 3): + G = nx.windmill_graph(n, k) + assert G.number_of_nodes() == (k - 1) * n + 1 + assert G.number_of_edges() == n * k * (k - 1) / 2 + assert G.degree(0) == G.number_of_nodes() - 1 + for i in range(1, G.number_of_nodes()): + assert G.degree(i) == k - 1 + with pytest.raises( + nx.NetworkXError, match="A windmill graph must have at least two cliques" + ): + nx.windmill_graph(1, 3) + with pytest.raises( + nx.NetworkXError, match="The cliques must have at least two nodes" + ): + nx.windmill_graph(3, 0) + + +def test_stochastic_block_model(): + sizes = [75, 75, 300] + probs = [[0.25, 0.05, 0.02], [0.05, 0.35, 0.07], [0.02, 0.07, 0.40]] + G = nx.stochastic_block_model(sizes, probs, seed=0) + C = G.graph["partition"] + assert len(C) == 3 + assert len(G) == 450 + assert G.size() == 22160 + + GG = nx.stochastic_block_model(sizes, probs, range(450), seed=0) + assert G.nodes == GG.nodes + + # Test Exceptions + sbm = nx.stochastic_block_model + badnodelist = list(range(400)) # not enough nodes to match sizes + badprobs1 = [[0.25, 0.05, 1.02], [0.05, 0.35, 0.07], [0.02, 0.07, 0.40]] + badprobs2 = [[0.25, 0.05, 0.02], [0.05, -0.35, 0.07], [0.02, 0.07, 0.40]] + probs_rect1 = [[0.25, 0.05, 0.02], [0.05, -0.35, 0.07]] + probs_rect2 = [[0.25, 0.05], [0.05, -0.35], [0.02, 0.07]] + asymprobs = [[0.25, 0.05, 0.01], [0.05, -0.35, 0.07], [0.02, 0.07, 0.40]] + pytest.raises(nx.NetworkXException, sbm, sizes, badprobs1) + pytest.raises(nx.NetworkXException, sbm, sizes, badprobs2) + pytest.raises(nx.NetworkXException, sbm, sizes, probs_rect1, directed=True) + pytest.raises(nx.NetworkXException, sbm, sizes, probs_rect2, directed=True) + pytest.raises(nx.NetworkXException, sbm, sizes, asymprobs, directed=False) + pytest.raises(nx.NetworkXException, sbm, sizes, probs, badnodelist) + nodelist = [0] + list(range(449)) # repeated node name in nodelist + pytest.raises(nx.NetworkXException, sbm, sizes, probs, nodelist) + + # Extra keyword arguments test + GG = nx.stochastic_block_model(sizes, probs, seed=0, selfloops=True) + assert G.nodes == GG.nodes + GG = nx.stochastic_block_model(sizes, probs, selfloops=True, directed=True) + assert G.nodes == GG.nodes + GG = nx.stochastic_block_model(sizes, probs, seed=0, sparse=False) + assert G.nodes == GG.nodes + + +def test_generator(): + n = 250 + tau1 = 3 + tau2 = 1.5 + mu = 0.1 + G = nx.LFR_benchmark_graph( + n, tau1, tau2, mu, average_degree=5, min_community=20, seed=10 + ) + assert len(G) == 250 + C = {frozenset(G.nodes[v]["community"]) for v in G} + assert nx.community.is_partition(G.nodes(), C) + + +def test_invalid_tau1(): + with pytest.raises(nx.NetworkXError, match="tau2 must be greater than one"): + n = 100 + tau1 = 2 + tau2 = 1 + mu = 0.1 + nx.LFR_benchmark_graph(n, tau1, tau2, mu, min_degree=2) + + +def test_invalid_tau2(): + with pytest.raises(nx.NetworkXError, match="tau1 must be greater than one"): + n = 100 + tau1 = 1 + tau2 = 2 + mu = 0.1 + nx.LFR_benchmark_graph(n, tau1, tau2, mu, min_degree=2) + + +def test_mu_too_large(): + with pytest.raises(nx.NetworkXError, match="mu must be in the interval \\[0, 1\\]"): + n = 100 + tau1 = 2 + tau2 = 2 + mu = 1.1 + nx.LFR_benchmark_graph(n, tau1, tau2, mu, min_degree=2) + + +def test_mu_too_small(): + with pytest.raises(nx.NetworkXError, match="mu must be in the interval \\[0, 1\\]"): + n = 100 + tau1 = 2 + tau2 = 2 + mu = -1 + nx.LFR_benchmark_graph(n, tau1, tau2, mu, min_degree=2) + + +def test_both_degrees_none(): + with pytest.raises( + nx.NetworkXError, + match="Must assign exactly one of min_degree and average_degree", + ): + n = 100 + tau1 = 2 + tau2 = 2 + mu = 1 + nx.LFR_benchmark_graph(n, tau1, tau2, mu) + + +def test_neither_degrees_none(): + with pytest.raises( + nx.NetworkXError, + match="Must assign exactly one of min_degree and average_degree", + ): + n = 100 + tau1 = 2 + tau2 = 2 + mu = 1 + nx.LFR_benchmark_graph(n, tau1, tau2, mu, min_degree=2, average_degree=5) + + +def test_max_iters_exceeded(): + with pytest.raises( + nx.ExceededMaxIterations, + match="Could not assign communities; try increasing min_community", + ): + n = 10 + tau1 = 2 + tau2 = 2 + mu = 0.1 + nx.LFR_benchmark_graph(n, tau1, tau2, mu, min_degree=2, max_iters=10, seed=1) + + +def test_max_deg_out_of_range(): + with pytest.raises( + nx.NetworkXError, match="max_degree must be in the interval \\(0, n\\]" + ): + n = 10 + tau1 = 2 + tau2 = 2 + mu = 0.1 + nx.LFR_benchmark_graph( + n, tau1, tau2, mu, max_degree=n + 1, max_iters=10, seed=1 + ) + + +def test_max_community(): + n = 250 + tau1 = 3 + tau2 = 1.5 + mu = 0.1 + G = nx.LFR_benchmark_graph( + n, + tau1, + tau2, + mu, + average_degree=5, + max_degree=100, + min_community=50, + max_community=200, + seed=10, + ) + assert len(G) == 250 + C = {frozenset(G.nodes[v]["community"]) for v in G} + assert nx.community.is_partition(G.nodes(), C) + + +def test_powerlaw_iterations_exceeded(): + with pytest.raises( + nx.ExceededMaxIterations, match="Could not create power law sequence" + ): + n = 100 + tau1 = 2 + tau2 = 2 + mu = 1 + nx.LFR_benchmark_graph(n, tau1, tau2, mu, min_degree=2, max_iters=0) + + +def test_no_scipy_zeta(): + zeta2 = 1.6449340668482264 + assert abs(zeta2 - nx.generators.community._hurwitz_zeta(2, 1, 0.0001)) < 0.01 + + +def test_generate_min_degree_itr(): + with pytest.raises( + nx.ExceededMaxIterations, match="Could not match average_degree" + ): + nx.generators.community._generate_min_degree(2, 2, 1, 0.01, 0) diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/tests/test_degree_seq.py b/.venv/lib/python3.12/site-packages/networkx/generators/tests/test_degree_seq.py new file mode 100644 index 0000000..c7a622f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/generators/tests/test_degree_seq.py @@ -0,0 +1,237 @@ +import pytest + +import networkx as nx + + +class TestConfigurationModel: + """Unit tests for the :func:`~networkx.configuration_model` + function. + + """ + + def test_empty_degree_sequence(self): + """Tests that an empty degree sequence yields the null graph.""" + G = nx.configuration_model([]) + assert len(G) == 0 + + def test_degree_zero(self): + """Tests that a degree sequence of all zeros yields the empty + graph. + + """ + G = nx.configuration_model([0, 0, 0]) + assert len(G) == 3 + assert G.number_of_edges() == 0 + + def test_degree_sequence(self): + """Tests that the degree sequence of the generated graph matches + the input degree sequence. + + """ + deg_seq = [5, 3, 3, 3, 3, 2, 2, 2, 1, 1, 1] + G = nx.configuration_model(deg_seq, seed=12345678) + assert sorted((d for n, d in G.degree()), reverse=True) == [ + 5, + 3, + 3, + 3, + 3, + 2, + 2, + 2, + 1, + 1, + 1, + ] + assert sorted((d for n, d in G.degree(range(len(deg_seq)))), reverse=True) == [ + 5, + 3, + 3, + 3, + 3, + 2, + 2, + 2, + 1, + 1, + 1, + ] + + def test_random_seed(self): + """Tests that each call with the same random seed generates the + same graph. + + """ + deg_seq = [3] * 12 + G1 = nx.configuration_model(deg_seq, seed=1000) + G2 = nx.configuration_model(deg_seq, seed=1000) + assert nx.is_isomorphic(G1, G2) + G1 = nx.configuration_model(deg_seq, seed=10) + G2 = nx.configuration_model(deg_seq, seed=10) + assert nx.is_isomorphic(G1, G2) + + def test_directed_disallowed(self): + """Tests that attempting to create a configuration model graph + using a directed graph yields an exception. + + """ + with pytest.raises(nx.NetworkXNotImplemented): + nx.configuration_model([], create_using=nx.DiGraph()) + + def test_odd_degree_sum(self): + """Tests that a degree sequence whose sum is odd yields an + exception. + + """ + with pytest.raises(nx.NetworkXError): + nx.configuration_model([1, 2]) + + +def test_directed_configuration_raise_unequal(): + with pytest.raises(nx.NetworkXError): + zin = [5, 3, 3, 3, 3, 2, 2, 2, 1, 1] + zout = [5, 3, 3, 3, 3, 2, 2, 2, 1, 2] + nx.directed_configuration_model(zin, zout) + + +def test_directed_configuration_model(): + G = nx.directed_configuration_model([], [], seed=0) + assert len(G) == 0 + + +def test_simple_directed_configuration_model(): + G = nx.directed_configuration_model([1, 1], [1, 1], seed=0) + assert len(G) == 2 + + +def test_expected_degree_graph_empty(): + # empty graph has empty degree sequence + deg_seq = [] + G = nx.expected_degree_graph(deg_seq) + assert dict(G.degree()) == {} + + +def test_expected_degree_graph(): + # test that fixed seed delivers the same graph + deg_seq = [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3] + G1 = nx.expected_degree_graph(deg_seq, seed=1000) + assert len(G1) == 12 + + G2 = nx.expected_degree_graph(deg_seq, seed=1000) + assert nx.is_isomorphic(G1, G2) + + G1 = nx.expected_degree_graph(deg_seq, seed=10) + G2 = nx.expected_degree_graph(deg_seq, seed=10) + assert nx.is_isomorphic(G1, G2) + + +def test_expected_degree_graph_selfloops(): + deg_seq = [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3] + G1 = nx.expected_degree_graph(deg_seq, seed=1000, selfloops=False) + G2 = nx.expected_degree_graph(deg_seq, seed=1000, selfloops=False) + assert nx.is_isomorphic(G1, G2) + assert len(G1) == 12 + + +def test_expected_degree_graph_skew(): + deg_seq = [10, 2, 2, 2, 2] + G1 = nx.expected_degree_graph(deg_seq, seed=1000) + G2 = nx.expected_degree_graph(deg_seq, seed=1000) + assert nx.is_isomorphic(G1, G2) + assert len(G1) == 5 + + +def test_havel_hakimi_construction(): + G = nx.havel_hakimi_graph([]) + assert len(G) == 0 + + z = [1000, 3, 3, 3, 3, 2, 2, 2, 1, 1, 1] + pytest.raises(nx.NetworkXError, nx.havel_hakimi_graph, z) + z = ["A", 3, 3, 3, 3, 2, 2, 2, 1, 1, 1] + pytest.raises(nx.NetworkXError, nx.havel_hakimi_graph, z) + + z = [5, 4, 3, 3, 3, 2, 2, 2] + G = nx.havel_hakimi_graph(z) + G = nx.configuration_model(z) + z = [6, 5, 4, 4, 2, 1, 1, 1] + pytest.raises(nx.NetworkXError, nx.havel_hakimi_graph, z) + + z = [10, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2] + + G = nx.havel_hakimi_graph(z) + + pytest.raises(nx.NetworkXError, nx.havel_hakimi_graph, z, create_using=nx.DiGraph()) + + +def test_directed_havel_hakimi(): + # Test range of valid directed degree sequences + n, r = 100, 10 + p = 1.0 / r + for i in range(r): + G1 = nx.erdos_renyi_graph(n, p * (i + 1), None, True) + din1 = [d for n, d in G1.in_degree()] + dout1 = [d for n, d in G1.out_degree()] + G2 = nx.directed_havel_hakimi_graph(din1, dout1) + din2 = [d for n, d in G2.in_degree()] + dout2 = [d for n, d in G2.out_degree()] + assert sorted(din1) == sorted(din2) + assert sorted(dout1) == sorted(dout2) + + # Test non-graphical sequence + dout = [1000, 3, 3, 3, 3, 2, 2, 2, 1, 1, 1] + din = [103, 102, 102, 102, 102, 102, 102, 102, 102, 102] + pytest.raises(nx.exception.NetworkXError, nx.directed_havel_hakimi_graph, din, dout) + # Test valid sequences + dout = [1, 1, 1, 1, 1, 2, 2, 2, 3, 4] + din = [2, 2, 2, 2, 2, 2, 2, 2, 0, 2] + G2 = nx.directed_havel_hakimi_graph(din, dout) + dout2 = (d for n, d in G2.out_degree()) + din2 = (d for n, d in G2.in_degree()) + assert sorted(dout) == sorted(dout2) + assert sorted(din) == sorted(din2) + # Test unequal sums + din = [2, 2, 2, 2, 2, 2, 2, 2, 2, 2] + pytest.raises(nx.exception.NetworkXError, nx.directed_havel_hakimi_graph, din, dout) + # Test for negative values + din = [2, 2, 2, 2, 2, 2, 2, 2, 2, 2, -2] + pytest.raises(nx.exception.NetworkXError, nx.directed_havel_hakimi_graph, din, dout) + + +def test_degree_sequence_tree(): + z = [1, 1, 1, 1, 1, 2, 2, 2, 3, 4] + G = nx.degree_sequence_tree(z) + assert len(G) == len(z) + assert len(list(G.edges())) == sum(z) / 2 + + pytest.raises( + nx.NetworkXError, nx.degree_sequence_tree, z, create_using=nx.DiGraph() + ) + + z = [1, 1, 1, 1, 1, 1, 2, 2, 2, 3, 4] + pytest.raises(nx.NetworkXError, nx.degree_sequence_tree, z) + + +def test_random_degree_sequence_graph(): + d = [1, 2, 2, 3] + G = nx.random_degree_sequence_graph(d, seed=42) + assert d == sorted(d for n, d in G.degree()) + + +def test_random_degree_sequence_graph_raise(): + z = [1, 1, 1, 1, 1, 1, 2, 2, 2, 3, 4] + pytest.raises(nx.NetworkXUnfeasible, nx.random_degree_sequence_graph, z) + + +def test_random_degree_sequence_large(): + G1 = nx.fast_gnp_random_graph(100, 0.1, seed=42) + d1 = [d for n, d in G1.degree()] + G2 = nx.random_degree_sequence_graph(d1, seed=42) + d2 = [d for n, d in G2.degree()] + assert sorted(d1) == sorted(d2) + + +def test_random_degree_sequence_iterator(): + G1 = nx.fast_gnp_random_graph(100, 0.1, seed=42) + d1 = (d for n, d in G1.degree()) + G2 = nx.random_degree_sequence_graph(d1, seed=42) + assert len(G2) > 0 diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/tests/test_directed.py b/.venv/lib/python3.12/site-packages/networkx/generators/tests/test_directed.py new file mode 100644 index 0000000..489ca3c --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/generators/tests/test_directed.py @@ -0,0 +1,182 @@ +"""Generators - Directed Graphs +---------------------------- +""" + +import pytest + +import networkx as nx +from networkx.classes import Graph, MultiDiGraph +from networkx.generators.directed import ( + _random_k_out_graph_numpy, + _random_k_out_graph_python, + gn_graph, + gnc_graph, + gnr_graph, + random_k_out_graph, + random_uniform_k_out_graph, + scale_free_graph, +) + +try: + import numpy as np + + has_numpy = True +except ImportError: + has_numpy = False + + +class TestGeneratorsDirected: + def test_smoke_test_random_graphs(self): + gn_graph(100) + gnr_graph(100, 0.5) + gnc_graph(100) + scale_free_graph(100) + + gn_graph(100, seed=42) + gnr_graph(100, 0.5, seed=42) + gnc_graph(100, seed=42) + scale_free_graph(100, seed=42) + + def test_create_using_keyword_arguments(self): + pytest.raises(nx.NetworkXError, gn_graph, 100, create_using=Graph()) + pytest.raises(nx.NetworkXError, gnr_graph, 100, 0.5, create_using=Graph()) + pytest.raises(nx.NetworkXError, gnc_graph, 100, create_using=Graph()) + G = gn_graph(100, seed=1) + MG = gn_graph(100, create_using=MultiDiGraph(), seed=1) + assert sorted(G.edges()) == sorted(MG.edges()) + G = gnr_graph(100, 0.5, seed=1) + MG = gnr_graph(100, 0.5, create_using=MultiDiGraph(), seed=1) + assert sorted(G.edges()) == sorted(MG.edges()) + G = gnc_graph(100, seed=1) + MG = gnc_graph(100, create_using=MultiDiGraph(), seed=1) + assert sorted(G.edges()) == sorted(MG.edges()) + + G = scale_free_graph( + 100, + alpha=0.3, + beta=0.4, + gamma=0.3, + delta_in=0.3, + delta_out=0.1, + initial_graph=nx.cycle_graph(4, create_using=MultiDiGraph), + seed=1, + ) + pytest.raises(ValueError, scale_free_graph, 100, 0.5, 0.4, 0.3) + pytest.raises(ValueError, scale_free_graph, 100, alpha=-0.3) + pytest.raises(ValueError, scale_free_graph, 100, beta=-0.3) + pytest.raises(ValueError, scale_free_graph, 100, gamma=-0.3) + + def test_parameters(self): + G = nx.DiGraph() + G.add_node(0) + + def kernel(x): + return x + + assert nx.is_isomorphic(gn_graph(1), G) + assert nx.is_isomorphic(gn_graph(1, kernel=kernel), G) + assert nx.is_isomorphic(gnc_graph(1), G) + assert nx.is_isomorphic(gnr_graph(1, 0.5), G) + + +def test_scale_free_graph_negative_delta(): + with pytest.raises(ValueError, match="delta_in must be >= 0."): + scale_free_graph(10, delta_in=-1) + with pytest.raises(ValueError, match="delta_out must be >= 0."): + scale_free_graph(10, delta_out=-1) + + +def test_non_numeric_ordering(): + G = MultiDiGraph([("a", "b"), ("b", "c"), ("c", "a")]) + s = scale_free_graph(3, initial_graph=G) + assert len(s) == 3 + assert len(s.edges) == 3 + + +@pytest.mark.parametrize("ig", (nx.Graph(), nx.DiGraph([(0, 1)]))) +def test_scale_free_graph_initial_graph_kwarg(ig): + with pytest.raises(nx.NetworkXError): + scale_free_graph(100, initial_graph=ig) + + +class TestRandomKOutGraph: + """Unit tests for the + :func:`~networkx.generators.directed.random_k_out_graph` function. + + """ + + @pytest.mark.parametrize( + "f", (_random_k_out_graph_numpy, _random_k_out_graph_python) + ) + def test_regularity(self, f): + """Tests that the generated graph is `k`-out-regular.""" + if (f == _random_k_out_graph_numpy) and not has_numpy: + pytest.skip() + n = 10 + k = 3 + alpha = 1 + G = f(n, k, alpha) + assert all(d == k for v, d in G.out_degree()) + G = f(n, k, alpha, seed=42) + assert all(d == k for v, d in G.out_degree()) + + @pytest.mark.parametrize( + "f", (_random_k_out_graph_numpy, _random_k_out_graph_python) + ) + def test_no_self_loops(self, f): + """Tests for forbidding self-loops.""" + if (f == _random_k_out_graph_numpy) and not has_numpy: + pytest.skip() + n = 10 + k = 3 + alpha = 1 + G = f(n, k, alpha, self_loops=False) + assert nx.number_of_selfloops(G) == 0 + + def test_negative_alpha(self): + with pytest.raises(ValueError, match="alpha must be positive"): + random_k_out_graph(10, 3, -1) + + +class TestUniformRandomKOutGraph: + """Unit tests for the + :func:`~networkx.generators.directed.random_uniform_k_out_graph` + function. + + """ + + def test_regularity(self): + """Tests that the generated graph is `k`-out-regular.""" + n = 10 + k = 3 + G = random_uniform_k_out_graph(n, k) + assert all(d == k for v, d in G.out_degree()) + G = random_uniform_k_out_graph(n, k, seed=42) + assert all(d == k for v, d in G.out_degree()) + + def test_no_self_loops(self): + """Tests for forbidding self-loops.""" + n = 10 + k = 3 + G = random_uniform_k_out_graph(n, k, self_loops=False) + assert nx.number_of_selfloops(G) == 0 + assert all(d == k for v, d in G.out_degree()) + + def test_with_replacement(self): + n = 10 + k = 3 + G = random_uniform_k_out_graph(n, k, with_replacement=True) + assert G.is_multigraph() + assert all(d == k for v, d in G.out_degree()) + n = 10 + k = 9 + G = random_uniform_k_out_graph(n, k, with_replacement=False, self_loops=False) + assert nx.number_of_selfloops(G) == 0 + assert all(d == k for v, d in G.out_degree()) + + def test_without_replacement(self): + n = 10 + k = 3 + G = random_uniform_k_out_graph(n, k, with_replacement=False) + assert not G.is_multigraph() + assert all(d == k for v, d in G.out_degree()) diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/tests/test_duplication.py b/.venv/lib/python3.12/site-packages/networkx/generators/tests/test_duplication.py new file mode 100644 index 0000000..9b6100b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/generators/tests/test_duplication.py @@ -0,0 +1,103 @@ +"""Unit tests for the :mod:`networkx.generators.duplication` module.""" + +import pytest + +import networkx as nx + + +class TestDuplicationDivergenceGraph: + """Unit tests for the + :func:`networkx.generators.duplication.duplication_divergence_graph` + function. + + """ + + def test_final_size(self): + G = nx.duplication_divergence_graph(3, p=1) + assert len(G) == 3 + G = nx.duplication_divergence_graph(3, p=1, seed=42) + assert len(G) == 3 + + def test_probability_too_large(self): + with pytest.raises(nx.NetworkXError): + nx.duplication_divergence_graph(3, p=2) + + def test_probability_too_small(self): + with pytest.raises(nx.NetworkXError): + nx.duplication_divergence_graph(3, p=-1) + + def test_non_extreme_probability_value(self): + G = nx.duplication_divergence_graph(6, p=0.3, seed=42) + assert len(G) == 6 + assert list(G.degree()) == [(0, 2), (1, 3), (2, 2), (3, 3), (4, 1), (5, 1)] + + def test_minimum_desired_nodes(self): + with pytest.raises( + nx.NetworkXError, match=".*n must be greater than or equal to 2" + ): + nx.duplication_divergence_graph(1, p=1) + + def test_create_using(self): + class DummyGraph(nx.Graph): + pass + + class DummyDiGraph(nx.DiGraph): + pass + + G = nx.duplication_divergence_graph(6, 0.3, seed=42, create_using=DummyGraph) + assert isinstance(G, DummyGraph) + with pytest.raises(nx.NetworkXError, match="create_using must not be directed"): + nx.duplication_divergence_graph(6, 0.3, seed=42, create_using=DummyDiGraph) + + +class TestPartialDuplicationGraph: + """Unit tests for the + :func:`networkx.generators.duplication.partial_duplication_graph` + function. + + """ + + def test_final_size(self): + N = 10 + n = 5 + p = 0.5 + q = 0.5 + G = nx.partial_duplication_graph(N, n, p, q) + assert len(G) == N + G = nx.partial_duplication_graph(N, n, p, q, seed=42) + assert len(G) == N + + def test_initial_clique_size(self): + N = 10 + n = 10 + p = 0.5 + q = 0.5 + G = nx.partial_duplication_graph(N, n, p, q) + assert len(G) == n + + def test_invalid_initial_size(self): + with pytest.raises(nx.NetworkXError): + N = 5 + n = 10 + p = 0.5 + q = 0.5 + G = nx.partial_duplication_graph(N, n, p, q) + + def test_invalid_probabilities(self): + N = 1 + n = 1 + for p, q in [(0.5, 2), (0.5, -1), (2, 0.5), (-1, 0.5)]: + args = (N, n, p, q) + pytest.raises(nx.NetworkXError, nx.partial_duplication_graph, *args) + + def test_create_using(self): + class DummyGraph(nx.Graph): + pass + + class DummyDiGraph(nx.DiGraph): + pass + + G = nx.partial_duplication_graph(10, 5, 0.5, 0.5, create_using=DummyGraph) + assert isinstance(G, DummyGraph) + with pytest.raises(nx.NetworkXError, match="create_using must not be directed"): + nx.partial_duplication_graph(10, 5, 0.5, 0.5, create_using=DummyDiGraph) diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/tests/test_ego.py b/.venv/lib/python3.12/site-packages/networkx/generators/tests/test_ego.py new file mode 100644 index 0000000..f6fc779 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/generators/tests/test_ego.py @@ -0,0 +1,39 @@ +""" +ego graph +--------- +""" + +import networkx as nx +from networkx.utils import edges_equal, nodes_equal + + +class TestGeneratorEgo: + def test_ego(self): + G = nx.star_graph(3) + H = nx.ego_graph(G, 0) + assert nx.is_isomorphic(G, H) + G.add_edge(1, 11) + G.add_edge(2, 22) + G.add_edge(3, 33) + H = nx.ego_graph(G, 0) + assert nx.is_isomorphic(nx.star_graph(3), H) + G = nx.path_graph(3) + H = nx.ego_graph(G, 0) + assert edges_equal(H.edges(), [(0, 1)]) + H = nx.ego_graph(G, 0, undirected=True) + assert edges_equal(H.edges(), [(0, 1)]) + H = nx.ego_graph(G, 0, center=False) + assert edges_equal(H.edges(), []) + + def test_ego_distance(self): + G = nx.Graph() + G.add_edge(0, 1, weight=2, distance=1) + G.add_edge(1, 2, weight=2, distance=2) + G.add_edge(2, 3, weight=2, distance=1) + assert nodes_equal(nx.ego_graph(G, 0, radius=3).nodes(), [0, 1, 2, 3]) + eg = nx.ego_graph(G, 0, radius=3, distance="weight") + assert nodes_equal(eg.nodes(), [0, 1]) + eg = nx.ego_graph(G, 0, radius=3, distance="weight", undirected=True) + assert nodes_equal(eg.nodes(), [0, 1]) + eg = nx.ego_graph(G, 0, radius=3, distance="distance") + assert nodes_equal(eg.nodes(), [0, 1, 2]) diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/tests/test_expanders.py b/.venv/lib/python3.12/site-packages/networkx/generators/tests/test_expanders.py new file mode 100644 index 0000000..7584c28 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/generators/tests/test_expanders.py @@ -0,0 +1,171 @@ +"""Unit tests for the :mod:`networkx.generators.expanders` module.""" + +import pytest + +import networkx as nx + + +@pytest.mark.parametrize("n", (2, 3, 5, 6, 10)) +def test_margulis_gabber_galil_graph_properties(n): + g = nx.margulis_gabber_galil_graph(n) + assert g.number_of_nodes() == n * n + for node in g: + assert g.degree(node) == 8 + assert len(node) == 2 + for i in node: + assert int(i) == i + assert 0 <= i < n + + +@pytest.mark.parametrize("n", (2, 3, 5, 6, 10)) +def test_margulis_gabber_galil_graph_eigvals(n): + np = pytest.importorskip("numpy") + sp = pytest.importorskip("scipy") + + g = nx.margulis_gabber_galil_graph(n) + # Eigenvalues are already sorted using the scipy eigvalsh, + # but the implementation in numpy does not guarantee order. + w = sorted(sp.linalg.eigvalsh(nx.adjacency_matrix(g).toarray())) + assert w[-2] < 5 * np.sqrt(2) + + +@pytest.mark.parametrize("p", (3, 5, 7, 11)) # Primes +def test_chordal_cycle_graph(p): + """Test for the :func:`networkx.chordal_cycle_graph` function.""" + G = nx.chordal_cycle_graph(p) + assert len(G) == p + # TODO The second largest eigenvalue should be smaller than a constant, + # independent of the number of nodes in the graph: + # + # eigs = sorted(sp.linalg.eigvalsh(nx.adjacency_matrix(G).toarray())) + # assert_less(eigs[-2], ...) + # + + +@pytest.mark.parametrize("p", (3, 5, 7, 11, 13)) # Primes +def test_paley_graph(p): + """Test for the :func:`networkx.paley_graph` function.""" + G = nx.paley_graph(p) + # G has p nodes + assert len(G) == p + # G is (p-1)/2-regular + in_degrees = {G.in_degree(node) for node in G.nodes} + out_degrees = {G.out_degree(node) for node in G.nodes} + assert len(in_degrees) == 1 and in_degrees.pop() == (p - 1) // 2 + assert len(out_degrees) == 1 and out_degrees.pop() == (p - 1) // 2 + + # If p = 1 mod 4, -1 is a square mod 4 and therefore the + # edge in the Paley graph are symmetric. + if p % 4 == 1: + for u, v in G.edges: + assert (v, u) in G.edges + + +@pytest.mark.parametrize("d, n", [(2, 7), (4, 10), (4, 16)]) +def test_maybe_regular_expander(d, n): + pytest.importorskip("numpy") + G = nx.maybe_regular_expander(n, d, seed=1729) + + assert len(G) == n, "Should have n nodes" + assert len(G.edges) == n * d / 2, "Should have n*d/2 edges" + assert nx.is_k_regular(G, d), "Should be d-regular" + + +def test_maybe_regular_expander_max_tries(): + pytest.importorskip("numpy") + d, n = 4, 10 + msg = "Too many iterations in maybe_regular_expander" + with pytest.raises(nx.NetworkXError, match=msg): + nx.maybe_regular_expander(n, d, max_tries=100, seed=6818) # See gh-8048 + + nx.maybe_regular_expander(n, d, max_tries=130, seed=6818) + + +@pytest.mark.parametrize("n", (3, 5, 6, 10)) +def test_is_regular_expander(n): + pytest.importorskip("numpy") + pytest.importorskip("scipy") + G = nx.complete_graph(n) + + assert nx.is_regular_expander(G), "Should be a regular expander" + + +@pytest.mark.parametrize("d, n", [(2, 7), (4, 10), (4, 16), (4, 2000)]) +def test_random_regular_expander(d, n): + pytest.importorskip("numpy") + pytest.importorskip("scipy") + G = nx.random_regular_expander_graph(n, d, seed=1729) + + assert len(G) == n, "Should have n nodes" + assert len(G.edges) == n * d / 2, "Should have n*d/2 edges" + assert nx.is_k_regular(G, d), "Should be d-regular" + assert nx.is_regular_expander(G), "Should be a regular expander" + + +def test_random_regular_expander_explicit_construction(): + pytest.importorskip("numpy") + pytest.importorskip("scipy") + G = nx.random_regular_expander_graph(d=4, n=5, seed=1729) + + assert len(G) == 5 and len(G.edges) == 10, "Should be a complete graph" + + +@pytest.mark.parametrize("graph_type", (nx.Graph, nx.DiGraph, nx.MultiDiGraph)) +def test_margulis_gabber_galil_graph_badinput(graph_type): + with pytest.raises( + nx.NetworkXError, match="`create_using` must be an undirected multigraph" + ): + nx.margulis_gabber_galil_graph(3, create_using=graph_type) + + +@pytest.mark.parametrize("graph_type", (nx.Graph, nx.DiGraph, nx.MultiDiGraph)) +def test_chordal_cycle_graph_badinput(graph_type): + with pytest.raises( + nx.NetworkXError, match="`create_using` must be an undirected multigraph" + ): + nx.chordal_cycle_graph(3, create_using=graph_type) + + +def test_paley_graph_badinput(): + with pytest.raises( + nx.NetworkXError, match="`create_using` cannot be a multigraph." + ): + nx.paley_graph(3, create_using=nx.MultiGraph) + + +def test_maybe_regular_expander_badinput(): + pytest.importorskip("numpy") + + with pytest.raises(nx.NetworkXError, match="n must be a positive integer"): + nx.maybe_regular_expander(n=-1, d=2) + + with pytest.raises(nx.NetworkXError, match="d must be greater than or equal to 2"): + nx.maybe_regular_expander(n=10, d=0) + + with pytest.raises(nx.NetworkXError, match="Need n-1>= d to have room"): + nx.maybe_regular_expander(n=5, d=6) + + +def test_is_regular_expander_badinput(): + pytest.importorskip("numpy") + pytest.importorskip("scipy") + + with pytest.raises(nx.NetworkXError, match="epsilon must be non negative"): + nx.is_regular_expander(nx.Graph(), epsilon=-1) + + +def test_random_regular_expander_badinput(): + pytest.importorskip("numpy") + pytest.importorskip("scipy") + + with pytest.raises(nx.NetworkXError, match="n must be a positive integer"): + nx.random_regular_expander_graph(n=-1, d=2) + + with pytest.raises(nx.NetworkXError, match="d must be greater than or equal to 2"): + nx.random_regular_expander_graph(n=10, d=0) + + with pytest.raises(nx.NetworkXError, match="Need n-1>= d to have room"): + nx.random_regular_expander_graph(n=5, d=6) + + with pytest.raises(nx.NetworkXError, match="epsilon must be non negative"): + nx.random_regular_expander_graph(n=4, d=2, epsilon=-1) diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/tests/test_geometric.py b/.venv/lib/python3.12/site-packages/networkx/generators/tests/test_geometric.py new file mode 100644 index 0000000..fc4ce66 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/generators/tests/test_geometric.py @@ -0,0 +1,488 @@ +import math +import random +from itertools import combinations + +import pytest + +import networkx as nx + + +def l1dist(x, y): + return sum(abs(a - b) for a, b in zip(x, y)) + + +class TestRandomGeometricGraph: + """Unit tests for :func:`~networkx.random_geometric_graph`""" + + def test_number_of_nodes(self): + G = nx.random_geometric_graph(50, 0.25, seed=42) + assert len(G) == 50 + G = nx.random_geometric_graph(range(50), 0.25, seed=42) + assert len(G) == 50 + + def test_distances(self): + """Tests that pairs of vertices adjacent if and only if they are + within the prescribed radius. + """ + # Use the Euclidean metric, the default according to the + # documentation. + G = nx.random_geometric_graph(50, 0.25) + for u, v in combinations(G, 2): + # Adjacent vertices must be within the given distance. + if v in G[u]: + assert math.dist(G.nodes[u]["pos"], G.nodes[v]["pos"]) <= 0.25 + # Nonadjacent vertices must be at greater distance. + else: + assert not math.dist(G.nodes[u]["pos"], G.nodes[v]["pos"]) <= 0.25 + + def test_p(self): + """Tests for providing an alternate distance metric to the generator.""" + # Use the L1 metric. + G = nx.random_geometric_graph(50, 0.25, p=1) + for u, v in combinations(G, 2): + # Adjacent vertices must be within the given distance. + if v in G[u]: + assert l1dist(G.nodes[u]["pos"], G.nodes[v]["pos"]) <= 0.25 + # Nonadjacent vertices must be at greater distance. + else: + assert not l1dist(G.nodes[u]["pos"], G.nodes[v]["pos"]) <= 0.25 + + def test_node_names(self): + """Tests using values other than sequential numbers as node IDs.""" + import string + + nodes = list(string.ascii_lowercase) + G = nx.random_geometric_graph(nodes, 0.25) + assert len(G) == len(nodes) + + for u, v in combinations(G, 2): + # Adjacent vertices must be within the given distance. + if v in G[u]: + assert math.dist(G.nodes[u]["pos"], G.nodes[v]["pos"]) <= 0.25 + # Nonadjacent vertices must be at greater distance. + else: + assert not math.dist(G.nodes[u]["pos"], G.nodes[v]["pos"]) <= 0.25 + + def test_pos_name(self): + G = nx.random_geometric_graph(50, 0.25, seed=42, pos_name="coords") + assert all(len(d["coords"]) == 2 for n, d in G.nodes.items()) + + +class TestSoftRandomGeometricGraph: + """Unit tests for :func:`~networkx.soft_random_geometric_graph`""" + + def test_number_of_nodes(self): + G = nx.soft_random_geometric_graph(50, 0.25, seed=42) + assert len(G) == 50 + G = nx.soft_random_geometric_graph(range(50), 0.25, seed=42) + assert len(G) == 50 + + def test_distances(self): + """Tests that pairs of vertices adjacent if and only if they are + within the prescribed radius. + """ + # Use the Euclidean metric, the default according to the + # documentation. + G = nx.soft_random_geometric_graph(50, 0.25) + for u, v in combinations(G, 2): + # Adjacent vertices must be within the given distance. + if v in G[u]: + assert math.dist(G.nodes[u]["pos"], G.nodes[v]["pos"]) <= 0.25 + + def test_p(self): + """Tests for providing an alternate distance metric to the generator.""" + + # Use the L1 metric. + def dist(x, y): + return sum(abs(a - b) for a, b in zip(x, y)) + + G = nx.soft_random_geometric_graph(50, 0.25, p=1) + for u, v in combinations(G, 2): + # Adjacent vertices must be within the given distance. + if v in G[u]: + assert dist(G.nodes[u]["pos"], G.nodes[v]["pos"]) <= 0.25 + + def test_node_names(self): + """Tests using values other than sequential numbers as node IDs.""" + import string + + nodes = list(string.ascii_lowercase) + G = nx.soft_random_geometric_graph(nodes, 0.25) + assert len(G) == len(nodes) + + for u, v in combinations(G, 2): + # Adjacent vertices must be within the given distance. + if v in G[u]: + assert math.dist(G.nodes[u]["pos"], G.nodes[v]["pos"]) <= 0.25 + + def test_p_dist_default(self): + """Tests default p_dict = 0.5 returns graph with edge count <= RGG with + same n, radius, dim and positions + """ + nodes = 50 + dim = 2 + pos = {v: [random.random() for i in range(dim)] for v in range(nodes)} + RGG = nx.random_geometric_graph(50, 0.25, pos=pos) + SRGG = nx.soft_random_geometric_graph(50, 0.25, pos=pos) + assert len(SRGG.edges()) <= len(RGG.edges()) + + def test_p_dist_zero(self): + """Tests if p_dict = 0 returns disconnected graph with 0 edges""" + + def p_dist(dist): + return 0 + + G = nx.soft_random_geometric_graph(50, 0.25, p_dist=p_dist) + assert len(G.edges) == 0 + + def test_pos_name(self): + G = nx.soft_random_geometric_graph(50, 0.25, seed=42, pos_name="coords") + assert all(len(d["coords"]) == 2 for n, d in G.nodes.items()) + + +def join(G, u, v, theta, alpha, metric): + """Returns ``True`` if and only if the nodes whose attributes are + ``du`` and ``dv`` should be joined, according to the threshold + condition for geographical threshold graphs. + + ``G`` is an undirected NetworkX graph, and ``u`` and ``v`` are nodes + in that graph. The nodes must have node attributes ``'pos'`` and + ``'weight'``. + + ``metric`` is a distance metric. + """ + du, dv = G.nodes[u], G.nodes[v] + u_pos, v_pos = du["pos"], dv["pos"] + u_weight, v_weight = du["weight"], dv["weight"] + return (u_weight + v_weight) * metric(u_pos, v_pos) ** alpha >= theta + + +class TestGeographicalThresholdGraph: + """Unit tests for :func:`~networkx.geographical_threshold_graph`""" + + def test_number_of_nodes(self): + G = nx.geographical_threshold_graph(50, 100, seed=42) + assert len(G) == 50 + G = nx.geographical_threshold_graph(range(50), 100, seed=42) + assert len(G) == 50 + + def test_distances(self): + """Tests that pairs of vertices adjacent if and only if their + distances meet the given threshold. + """ + # Use the Euclidean metric and alpha = -2 + # the default according to the documentation. + G = nx.geographical_threshold_graph(50, 10) + for u, v in combinations(G, 2): + # Adjacent vertices must exceed the threshold. + if v in G[u]: + assert join(G, u, v, 10, -2, math.dist) + # Nonadjacent vertices must not exceed the threshold. + else: + assert not join(G, u, v, 10, -2, math.dist) + + def test_metric(self): + """Tests for providing an alternate distance metric to the generator.""" + # Use the L1 metric. + G = nx.geographical_threshold_graph(50, 10, metric=l1dist) + for u, v in combinations(G, 2): + # Adjacent vertices must exceed the threshold. + if v in G[u]: + assert join(G, u, v, 10, -2, l1dist) + # Nonadjacent vertices must not exceed the threshold. + else: + assert not join(G, u, v, 10, -2, l1dist) + + def test_p_dist_zero(self): + """Tests if p_dict = 0 returns disconnected graph with 0 edges""" + + def p_dist(dist): + return 0 + + G = nx.geographical_threshold_graph(50, 1, p_dist=p_dist) + assert len(G.edges) == 0 + + def test_pos_weight_name(self): + gtg = nx.geographical_threshold_graph + G = gtg(50, 100, seed=42, pos_name="coords", weight_name="wt") + assert all(len(d["coords"]) == 2 for n, d in G.nodes.items()) + assert all(d["wt"] > 0 for n, d in G.nodes.items()) + + +class TestWaxmanGraph: + """Unit tests for the :func:`~networkx.waxman_graph` function.""" + + def test_number_of_nodes_1(self): + G = nx.waxman_graph(50, 0.5, 0.1, seed=42) + assert len(G) == 50 + G = nx.waxman_graph(range(50), 0.5, 0.1, seed=42) + assert len(G) == 50 + + def test_number_of_nodes_2(self): + G = nx.waxman_graph(50, 0.5, 0.1, L=1) + assert len(G) == 50 + G = nx.waxman_graph(range(50), 0.5, 0.1, L=1) + assert len(G) == 50 + + def test_metric(self): + """Tests for providing an alternate distance metric to the generator.""" + # Use the L1 metric. + G = nx.waxman_graph(50, 0.5, 0.1, metric=l1dist) + assert len(G) == 50 + + def test_pos_name(self): + G = nx.waxman_graph(50, 0.5, 0.1, seed=42, pos_name="coords") + assert all(len(d["coords"]) == 2 for n, d in G.nodes.items()) + + +class TestNavigableSmallWorldGraph: + def test_navigable_small_world(self): + G = nx.navigable_small_world_graph(5, p=1, q=0, seed=42) + gg = nx.grid_2d_graph(5, 5).to_directed() + assert nx.is_isomorphic(G, gg) + + G = nx.navigable_small_world_graph(5, p=1, q=0, dim=3) + gg = nx.grid_graph([5, 5, 5]).to_directed() + assert nx.is_isomorphic(G, gg) + + G = nx.navigable_small_world_graph(5, p=1, q=0, dim=1) + gg = nx.grid_graph([5]).to_directed() + assert nx.is_isomorphic(G, gg) + + def test_invalid_diameter_value(self): + with pytest.raises(nx.NetworkXException, match=".*p must be >= 1"): + nx.navigable_small_world_graph(5, p=0, q=0, dim=1) + + def test_invalid_long_range_connections_value(self): + with pytest.raises(nx.NetworkXException, match=".*q must be >= 0"): + nx.navigable_small_world_graph(5, p=1, q=-1, dim=1) + + def test_invalid_exponent_for_decaying_probability_value(self): + with pytest.raises(nx.NetworkXException, match=".*r must be >= 0"): + nx.navigable_small_world_graph(5, p=1, q=0, r=-1, dim=1) + + def test_r_between_0_and_1(self): + """Smoke test for radius in range [0, 1]""" + # q=0 means no long-range connections + G = nx.navigable_small_world_graph(3, p=1, q=0, r=0.5, dim=2, seed=42) + expected = nx.grid_2d_graph(3, 3, create_using=nx.DiGraph) + assert nx.utils.graphs_equal(G, expected) + + @pytest.mark.parametrize("seed", range(2478, 2578, 10)) + def test_r_general_scaling(self, seed): + """The probability of adding a long-range edge scales with `1 / dist**r`, + so a navigable_small_world graph created with r < 1 should generally + result in more edges than a navigable_small_world graph with r >= 1 + (for 0 < q << n). + + N.B. this is probabilistic, so this test may not hold for all seeds.""" + G1 = nx.navigable_small_world_graph(7, q=3, r=0.5, seed=seed) + G2 = nx.navigable_small_world_graph(7, q=3, r=1, seed=seed) + G3 = nx.navigable_small_world_graph(7, q=3, r=2, seed=seed) + assert G1.number_of_edges() > G2.number_of_edges() + assert G2.number_of_edges() > G3.number_of_edges() + + +class TestThresholdedRandomGeometricGraph: + """Unit tests for :func:`~networkx.thresholded_random_geometric_graph`""" + + def test_number_of_nodes(self): + G = nx.thresholded_random_geometric_graph(50, 0.2, 0.1, seed=42) + assert len(G) == 50 + G = nx.thresholded_random_geometric_graph(range(50), 0.2, 0.1, seed=42) + assert len(G) == 50 + + def test_distances(self): + """Tests that pairs of vertices adjacent if and only if they are + within the prescribed radius. + """ + # Use the Euclidean metric, the default according to the + # documentation. + G = nx.thresholded_random_geometric_graph(50, 0.25, 0.1, seed=42) + for u, v in combinations(G, 2): + # Adjacent vertices must be within the given distance. + if v in G[u]: + assert math.dist(G.nodes[u]["pos"], G.nodes[v]["pos"]) <= 0.25 + + def test_p(self): + """Tests for providing an alternate distance metric to the generator.""" + + # Use the L1 metric. + def dist(x, y): + return sum(abs(a - b) for a, b in zip(x, y)) + + G = nx.thresholded_random_geometric_graph(50, 0.25, 0.1, p=1, seed=42) + for u, v in combinations(G, 2): + # Adjacent vertices must be within the given distance. + if v in G[u]: + assert dist(G.nodes[u]["pos"], G.nodes[v]["pos"]) <= 0.25 + + def test_node_names(self): + """Tests using values other than sequential numbers as node IDs.""" + import string + + nodes = list(string.ascii_lowercase) + G = nx.thresholded_random_geometric_graph(nodes, 0.25, 0.1, seed=42) + assert len(G) == len(nodes) + + for u, v in combinations(G, 2): + # Adjacent vertices must be within the given distance. + if v in G[u]: + assert math.dist(G.nodes[u]["pos"], G.nodes[v]["pos"]) <= 0.25 + + def test_theta(self): + """Tests that pairs of vertices adjacent if and only if their sum + weights exceeds the threshold parameter theta. + """ + G = nx.thresholded_random_geometric_graph(50, 0.25, 0.1, seed=42) + + for u, v in combinations(G, 2): + # Adjacent vertices must be within the given distance. + if v in G[u]: + assert (G.nodes[u]["weight"] + G.nodes[v]["weight"]) >= 0.1 + + def test_pos_name(self): + trgg = nx.thresholded_random_geometric_graph + G = trgg(50, 0.25, 0.1, seed=42, pos_name="p", weight_name="wt") + assert all(len(d["p"]) == 2 for n, d in G.nodes.items()) + assert all(d["wt"] > 0 for n, d in G.nodes.items()) + + +def test_geometric_edges_pos_attribute(): + G = nx.Graph() + G.add_nodes_from( + [ + (0, {"position": (0, 0)}), + (1, {"position": (0, 1)}), + (2, {"position": (1, 0)}), + ] + ) + expected_edges = [(0, 1), (0, 2)] + assert expected_edges == nx.geometric_edges(G, radius=1, pos_name="position") + + +def test_geometric_edges_raises_no_pos(): + G = nx.path_graph(3) + msg = "all nodes. must have a '" + with pytest.raises(nx.NetworkXError, match=msg): + nx.geometric_edges(G, radius=1) + + +def test_number_of_nodes_S1(): + G = nx.geometric_soft_configuration_graph( + beta=1.5, n=100, gamma=2.7, mean_degree=10, seed=42 + ) + assert len(G) == 100 + + +def test_set_attributes_S1(): + G = nx.geometric_soft_configuration_graph( + beta=1.5, n=100, gamma=2.7, mean_degree=10, seed=42 + ) + kappas = nx.get_node_attributes(G, "kappa") + assert len(kappas) == 100 + thetas = nx.get_node_attributes(G, "theta") + assert len(thetas) == 100 + radii = nx.get_node_attributes(G, "radius") + assert len(radii) == 100 + + +def test_mean_kappas_mean_degree_S1(): + G = nx.geometric_soft_configuration_graph( + beta=2.5, n=50, gamma=2.7, mean_degree=10, seed=8023 + ) + + kappas = nx.get_node_attributes(G, "kappa") + mean_kappas = sum(kappas.values()) / len(kappas) + assert math.fabs(mean_kappas - 10) < 0.5 + + degrees = dict(G.degree()) + mean_degree = sum(degrees.values()) / len(degrees) + assert math.fabs(mean_degree - 10) < 1 + + +def test_dict_kappas_S1(): + kappas = dict.fromkeys(range(1000), 10) + G = nx.geometric_soft_configuration_graph(beta=1, kappas=kappas) + assert len(G) == 1000 + kappas = nx.get_node_attributes(G, "kappa") + assert all(kappa == 10 for kappa in kappas.values()) + + +def test_beta_clustering_S1(): + G1 = nx.geometric_soft_configuration_graph( + beta=1.5, n=100, gamma=3.5, mean_degree=10, seed=42 + ) + G2 = nx.geometric_soft_configuration_graph( + beta=3.0, n=100, gamma=3.5, mean_degree=10, seed=42 + ) + assert nx.average_clustering(G1) < nx.average_clustering(G2) + + +def test_wrong_parameters_S1(): + with pytest.raises( + nx.NetworkXError, + match="Please provide either kappas, or all 3 of: n, gamma and mean_degree.", + ): + G = nx.geometric_soft_configuration_graph( + beta=1.5, gamma=3.5, mean_degree=10, seed=42 + ) + + with pytest.raises( + nx.NetworkXError, + match="When kappas is input, n, gamma and mean_degree must not be.", + ): + kappas = dict.fromkeys(range(1000), 10) + G = nx.geometric_soft_configuration_graph( + beta=1.5, kappas=kappas, gamma=2.3, seed=42 + ) + + with pytest.raises( + nx.NetworkXError, + match="Please provide either kappas, or all 3 of: n, gamma and mean_degree.", + ): + G = nx.geometric_soft_configuration_graph(beta=1.5, seed=42) + + +def test_negative_beta_S1(): + with pytest.raises( + nx.NetworkXError, match="The parameter beta cannot be smaller or equal to 0." + ): + G = nx.geometric_soft_configuration_graph( + beta=-1, n=100, gamma=2.3, mean_degree=10, seed=42 + ) + + +def test_non_zero_clustering_beta_lower_one_S1(): + G = nx.geometric_soft_configuration_graph( + beta=0.5, n=100, gamma=3.5, mean_degree=10, seed=42 + ) + assert nx.average_clustering(G) > 0 + + +def test_mean_degree_influence_on_connectivity_S1(): + low_mean_degree = 2 + high_mean_degree = 20 + G_low = nx.geometric_soft_configuration_graph( + beta=1.2, n=100, gamma=2.7, mean_degree=low_mean_degree, seed=42 + ) + G_high = nx.geometric_soft_configuration_graph( + beta=1.2, n=100, gamma=2.7, mean_degree=high_mean_degree, seed=42 + ) + assert nx.number_connected_components(G_low) > nx.number_connected_components( + G_high + ) + + +def test_compare_mean_kappas_different_gammas_S1(): + G1 = nx.geometric_soft_configuration_graph( + beta=1.5, n=20, gamma=2.7, mean_degree=5, seed=42 + ) + G2 = nx.geometric_soft_configuration_graph( + beta=1.5, n=20, gamma=3.5, mean_degree=5, seed=42 + ) + kappas1 = nx.get_node_attributes(G1, "kappa") + mean_kappas1 = sum(kappas1.values()) / len(kappas1) + kappas2 = nx.get_node_attributes(G2, "kappa") + mean_kappas2 = sum(kappas2.values()) / len(kappas2) + assert math.fabs(mean_kappas1 - mean_kappas2) < 1 diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/tests/test_harary_graph.py b/.venv/lib/python3.12/site-packages/networkx/generators/tests/test_harary_graph.py new file mode 100644 index 0000000..8a0142d --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/generators/tests/test_harary_graph.py @@ -0,0 +1,133 @@ +"""Unit tests for the :mod:`networkx.generators.harary_graph` module.""" + +import pytest + +import networkx as nx +from networkx.algorithms.isomorphism.isomorph import is_isomorphic +from networkx.generators.harary_graph import hkn_harary_graph, hnm_harary_graph + + +class TestHararyGraph: + """ + Suppose n nodes, m >= n-1 edges, d = 2m // n, r = 2m % n + """ + + def test_hnm_harary_graph(self): + # When d is even and r = 0, the hnm_harary_graph(n,m) is + # the circulant_graph(n, list(range(1,d/2+1))) + for n, m in [(5, 5), (6, 12), (7, 14)]: + G1 = hnm_harary_graph(n, m) + d = 2 * m // n + G2 = nx.circulant_graph(n, list(range(1, d // 2 + 1))) + assert is_isomorphic(G1, G2) + + # When d is even and r > 0, the hnm_harary_graph(n,m) is + # the circulant_graph(n, list(range(1,d/2+1))) + # with r edges added arbitrarily + for n, m in [(5, 7), (6, 13), (7, 16)]: + G1 = hnm_harary_graph(n, m) + d = 2 * m // n + G2 = nx.circulant_graph(n, list(range(1, d // 2 + 1))) + assert set(G2.edges) < set(G1.edges) + assert G1.number_of_edges() == m + + # When d is odd and n is even and r = 0, the hnm_harary_graph(n,m) + # is the circulant_graph(n, list(range(1,(d+1)/2) plus [n//2]) + for n, m in [(6, 9), (8, 12), (10, 15)]: + G1 = hnm_harary_graph(n, m) + d = 2 * m // n + L = list(range(1, (d + 1) // 2)) + L.append(n // 2) + G2 = nx.circulant_graph(n, L) + assert is_isomorphic(G1, G2) + + # When d is odd and n is even and r > 0, the hnm_harary_graph(n,m) + # is the circulant_graph(n, list(range(1,(d+1)/2) plus [n//2]) + # with r edges added arbitrarily + for n, m in [(6, 10), (8, 13), (10, 17)]: + G1 = hnm_harary_graph(n, m) + d = 2 * m // n + L = list(range(1, (d + 1) // 2)) + L.append(n // 2) + G2 = nx.circulant_graph(n, L) + assert set(G2.edges) < set(G1.edges) + assert G1.number_of_edges() == m + + # When d is odd and n is odd, the hnm_harary_graph(n,m) is + # the circulant_graph(n, list(range(1,(d+1)/2)) + # with m - n*(d-1)/2 edges added arbitrarily + for n, m in [(5, 4), (7, 12), (9, 14)]: + G1 = hnm_harary_graph(n, m) + d = 2 * m // n + L = list(range(1, (d + 1) // 2)) + G2 = nx.circulant_graph(n, L) + assert set(G2.edges) < set(G1.edges) + assert G1.number_of_edges() == m + + # Raise NetworkXError if n<1 + n = 0 + m = 0 + pytest.raises(nx.NetworkXError, hnm_harary_graph, n, m) + + # Raise NetworkXError if m < n-1 + n = 6 + m = 4 + pytest.raises(nx.NetworkXError, hnm_harary_graph, n, m) + + # Raise NetworkXError if m > n(n-1)/2 + n = 6 + m = 16 + pytest.raises(nx.NetworkXError, hnm_harary_graph, n, m) + + """ + Suppose connectivity k, number of nodes n + """ + + def test_hkn_harary_graph(self): + # When k == 1, the hkn_harary_graph(k,n) is + # the path_graph(n) + for k, n in [(1, 6), (1, 7)]: + G1 = hkn_harary_graph(k, n) + G2 = nx.path_graph(n) + assert is_isomorphic(G1, G2) + + # When k is even, the hkn_harary_graph(k,n) is + # the circulant_graph(n, list(range(1,k/2+1))) + for k, n in [(2, 6), (2, 7), (4, 6), (4, 7)]: + G1 = hkn_harary_graph(k, n) + G2 = nx.circulant_graph(n, list(range(1, k // 2 + 1))) + assert is_isomorphic(G1, G2) + + # When k is odd and n is even, the hkn_harary_graph(k,n) is + # the circulant_graph(n, list(range(1,(k+1)/2)) plus [n/2]) + for k, n in [(3, 6), (5, 8), (7, 10)]: + G1 = hkn_harary_graph(k, n) + L = list(range(1, (k + 1) // 2)) + L.append(n // 2) + G2 = nx.circulant_graph(n, L) + assert is_isomorphic(G1, G2) + + # When k is odd and n is odd, the hkn_harary_graph(k,n) is + # the circulant_graph(n, list(range(1,(k+1)/2))) with + # n//2+1 edges added between node i and node i+n//2+1 + for k, n in [(3, 5), (5, 9), (7, 11)]: + G1 = hkn_harary_graph(k, n) + G2 = nx.circulant_graph(n, list(range(1, (k + 1) // 2))) + eSet1 = set(G1.edges) + eSet2 = set(G2.edges) + eSet3 = set() + half = n // 2 + for i in range(half + 1): + # add half+1 edges between i and i+half + eSet3.add((i, (i + half) % n)) + assert eSet1 == eSet2 | eSet3 + + # Raise NetworkXError if k<1 + k = 0 + n = 0 + pytest.raises(nx.NetworkXError, hkn_harary_graph, k, n) + + # Raise NetworkXError if ndegree_count[1]*degree_count[4] + joint_degrees_3 = { + 1: {4: 2}, + 2: {2: 2, 3: 2, 4: 2}, + 3: {2: 2, 4: 1}, + 4: {1: 2, 2: 2, 3: 1}, + } + assert not is_valid_joint_degree(joint_degrees_3) + + # test condition 5 + # joint_degrees_5[1][1] not even + joint_degrees_5 = {1: {1: 9}} + assert not is_valid_joint_degree(joint_degrees_5) + + +def test_joint_degree_graph(ntimes=10): + for _ in range(ntimes): + seed = int(time.time()) + + n, m, p = 20, 10, 1 + # generate random graph with model powerlaw_cluster and calculate + # its joint degree + g = powerlaw_cluster_graph(n, m, p, seed=seed) + joint_degrees_g = degree_mixing_dict(g, normalized=False) + + # generate simple undirected graph with given joint degree + # joint_degrees_g + G = joint_degree_graph(joint_degrees_g) + joint_degrees_G = degree_mixing_dict(G, normalized=False) + + # assert that the given joint degree is equal to the generated + # graph's joint degree + assert joint_degrees_g == joint_degrees_G + + +def test_is_valid_directed_joint_degree(): + in_degrees = [0, 1, 1, 2] + out_degrees = [1, 1, 1, 1] + nkk = {1: {1: 2, 2: 2}} + assert is_valid_directed_joint_degree(in_degrees, out_degrees, nkk) + + # not realizable, values are not integers. + nkk = {1: {1: 1.5, 2: 2.5}} + assert not is_valid_directed_joint_degree(in_degrees, out_degrees, nkk) + + # not realizable, number of edges between 1-2 are insufficient. + nkk = {1: {1: 2, 2: 1}} + assert not is_valid_directed_joint_degree(in_degrees, out_degrees, nkk) + + # not realizable, in/out degree sequences have different number of nodes. + out_degrees = [1, 1, 1] + nkk = {1: {1: 2, 2: 2}} + assert not is_valid_directed_joint_degree(in_degrees, out_degrees, nkk) + + # not realizable, degree sequences have fewer than required nodes. + in_degrees = [0, 1, 2] + assert not is_valid_directed_joint_degree(in_degrees, out_degrees, nkk) + + +def test_directed_joint_degree_graph(n=15, m=100, ntimes=1000): + for _ in range(ntimes): + # generate gnm random graph and calculate its joint degree. + g = gnm_random_graph(n, m, None, directed=True) + + # in-degree sequence of g as a list of integers. + in_degrees = list(dict(g.in_degree()).values()) + # out-degree sequence of g as a list of integers. + out_degrees = list(dict(g.out_degree()).values()) + nkk = degree_mixing_dict(g) + + # generate simple directed graph with given degree sequence and joint + # degree matrix. + G = directed_joint_degree_graph(in_degrees, out_degrees, nkk) + + # assert degree sequence correctness. + assert in_degrees == list(dict(G.in_degree()).values()) + assert out_degrees == list(dict(G.out_degree()).values()) + # assert joint degree matrix correctness. + assert nkk == degree_mixing_dict(G) diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/tests/test_lattice.py b/.venv/lib/python3.12/site-packages/networkx/generators/tests/test_lattice.py new file mode 100644 index 0000000..5012324 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/generators/tests/test_lattice.py @@ -0,0 +1,246 @@ +"""Unit tests for the :mod:`networkx.generators.lattice` module.""" + +from itertools import product + +import pytest + +import networkx as nx +from networkx.utils import edges_equal + + +class TestGrid2DGraph: + """Unit tests for :func:`networkx.generators.lattice.grid_2d_graph`""" + + def test_number_of_vertices(self): + m, n = 5, 6 + G = nx.grid_2d_graph(m, n) + assert len(G) == m * n + + def test_degree_distribution(self): + m, n = 5, 6 + G = nx.grid_2d_graph(m, n) + expected_histogram = [0, 0, 4, 2 * (m + n) - 8, (m - 2) * (n - 2)] + assert nx.degree_histogram(G) == expected_histogram + + def test_directed(self): + m, n = 5, 6 + G = nx.grid_2d_graph(m, n) + H = nx.grid_2d_graph(m, n, create_using=nx.DiGraph()) + assert H.succ == G.adj + assert H.pred == G.adj + + def test_multigraph(self): + m, n = 5, 6 + G = nx.grid_2d_graph(m, n) + H = nx.grid_2d_graph(m, n, create_using=nx.MultiGraph()) + assert list(H.edges()) == list(G.edges()) + + def test_periodic(self): + G = nx.grid_2d_graph(0, 0, periodic=True) + assert dict(G.degree()) == {} + + for m, n, H in [ + (2, 2, nx.cycle_graph(4)), + (1, 7, nx.cycle_graph(7)), + (7, 1, nx.cycle_graph(7)), + (2, 5, nx.circular_ladder_graph(5)), + (5, 2, nx.circular_ladder_graph(5)), + (2, 4, nx.cubical_graph()), + (4, 2, nx.cubical_graph()), + ]: + G = nx.grid_2d_graph(m, n, periodic=True) + assert nx.could_be_isomorphic(G, H) + + def test_periodic_iterable(self): + m, n = 3, 7 + for a, b in product([0, 1], [0, 1]): + G = nx.grid_2d_graph(m, n, periodic=(a, b)) + assert G.number_of_nodes() == m * n + assert G.number_of_edges() == (m + a - 1) * n + (n + b - 1) * m + + def test_periodic_directed(self): + G = nx.grid_2d_graph(4, 2, periodic=True) + H = nx.grid_2d_graph(4, 2, periodic=True, create_using=nx.DiGraph()) + assert H.succ == G.adj + assert H.pred == G.adj + + def test_periodic_multigraph(self): + G = nx.grid_2d_graph(4, 2, periodic=True) + H = nx.grid_2d_graph(4, 2, periodic=True, create_using=nx.MultiGraph()) + assert list(G.edges()) == list(H.edges()) + + def test_exceptions(self): + pytest.raises(nx.NetworkXError, nx.grid_2d_graph, -3, 2) + pytest.raises(nx.NetworkXError, nx.grid_2d_graph, 3, -2) + pytest.raises(TypeError, nx.grid_2d_graph, 3.3, 2) + pytest.raises(TypeError, nx.grid_2d_graph, 3, 2.2) + + def test_node_input(self): + G = nx.grid_2d_graph(4, 2, periodic=True) + H = nx.grid_2d_graph(range(4), range(2), periodic=True) + assert nx.is_isomorphic(H, G) + H = nx.grid_2d_graph("abcd", "ef", periodic=True) + assert nx.is_isomorphic(H, G) + G = nx.grid_2d_graph(5, 6) + H = nx.grid_2d_graph(range(5), range(6)) + assert edges_equal(H, G) + + +class TestGridGraph: + """Unit tests for :func:`networkx.generators.lattice.grid_graph`""" + + def test_grid_graph(self): + """grid_graph([n,m]) is a connected simple graph with the + following properties: + number_of_nodes = n*m + degree_histogram = [0,0,4,2*(n+m)-8,(n-2)*(m-2)] + """ + for n, m in [(3, 5), (5, 3), (4, 5), (5, 4)]: + dim = [n, m] + g = nx.grid_graph(dim) + assert len(g) == n * m + assert nx.degree_histogram(g) == [ + 0, + 0, + 4, + 2 * (n + m) - 8, + (n - 2) * (m - 2), + ] + + for n, m in [(1, 5), (5, 1)]: + dim = [n, m] + g = nx.grid_graph(dim) + assert len(g) == n * m + assert nx.is_isomorphic(g, nx.path_graph(5)) + + # mg = nx.grid_graph([n,m], create_using=MultiGraph()) + # assert_equal(mg.edges(), g.edges()) + + def test_node_input(self): + G = nx.grid_graph([range(7, 9), range(3, 6)]) + assert len(G) == 2 * 3 + assert nx.is_isomorphic(G, nx.grid_graph([2, 3])) + + def test_periodic_iterable(self): + m, n, k = 3, 7, 5 + for a, b, c in product([0, 1], [0, 1], [0, 1]): + G = nx.grid_graph([m, n, k], periodic=(a, b, c)) + num_e = (m + a - 1) * n * k + (n + b - 1) * m * k + (k + c - 1) * m * n + assert G.number_of_nodes() == m * n * k + assert G.number_of_edges() == num_e + + +class TestHypercubeGraph: + """Unit tests for :func:`networkx.generators.lattice.hypercube_graph`""" + + def test_special_cases(self): + for n, H in [ + (0, nx.null_graph()), + (1, nx.path_graph(2)), + (2, nx.cycle_graph(4)), + (3, nx.cubical_graph()), + ]: + G = nx.hypercube_graph(n) + assert nx.could_be_isomorphic(G, H) + + def test_degree_distribution(self): + for n in range(1, 10): + G = nx.hypercube_graph(n) + expected_histogram = [0] * n + [2**n] + assert nx.degree_histogram(G) == expected_histogram + + +class TestTriangularLatticeGraph: + "Tests for :func:`networkx.generators.lattice.triangular_lattice_graph`" + + def test_lattice_points(self): + """Tests that the graph is really a triangular lattice.""" + for m, n in [(2, 3), (2, 2), (2, 1), (3, 3), (3, 2), (3, 4)]: + G = nx.triangular_lattice_graph(m, n) + N = (n + 1) // 2 + assert len(G) == (m + 1) * (1 + N) - (n % 2) * ((m + 1) // 2) + for i, j in G.nodes(): + nbrs = G[(i, j)] + if i < N: + assert (i + 1, j) in nbrs + if j < m: + assert (i, j + 1) in nbrs + if j < m and (i > 0 or j % 2) and (i < N or (j + 1) % 2): + assert (i + 1, j + 1) in nbrs or (i - 1, j + 1) in nbrs + + def test_directed(self): + """Tests for creating a directed triangular lattice.""" + G = nx.triangular_lattice_graph(3, 4, create_using=nx.Graph()) + H = nx.triangular_lattice_graph(3, 4, create_using=nx.DiGraph()) + assert H.is_directed() + for u, v in H.edges(): + assert v[1] >= u[1] + if v[1] == u[1]: + assert v[0] > u[0] + + def test_multigraph(self): + """Tests for creating a triangular lattice multigraph.""" + G = nx.triangular_lattice_graph(3, 4, create_using=nx.Graph()) + H = nx.triangular_lattice_graph(3, 4, create_using=nx.MultiGraph()) + assert list(H.edges()) == list(G.edges()) + + def test_periodic(self): + G = nx.triangular_lattice_graph(4, 6, periodic=True) + assert len(G) == 12 + assert G.size() == 36 + # all degrees are 6 + assert len([n for n, d in G.degree() if d != 6]) == 0 + G = nx.triangular_lattice_graph(5, 7, periodic=True) + TLG = nx.triangular_lattice_graph + pytest.raises(nx.NetworkXError, TLG, 2, 4, periodic=True) + pytest.raises(nx.NetworkXError, TLG, 4, 4, periodic=True) + pytest.raises(nx.NetworkXError, TLG, 2, 6, periodic=True) + + +class TestHexagonalLatticeGraph: + "Tests for :func:`networkx.generators.lattice.hexagonal_lattice_graph`" + + def test_lattice_points(self): + """Tests that the graph is really a hexagonal lattice.""" + for m, n in [(4, 5), (4, 4), (4, 3), (3, 2), (3, 3), (3, 5)]: + G = nx.hexagonal_lattice_graph(m, n) + assert len(G) == 2 * (m + 1) * (n + 1) - 2 + C_6 = nx.cycle_graph(6) + hexagons = [ + [(0, 0), (0, 1), (0, 2), (1, 0), (1, 1), (1, 2)], + [(0, 2), (0, 3), (0, 4), (1, 2), (1, 3), (1, 4)], + [(1, 1), (1, 2), (1, 3), (2, 1), (2, 2), (2, 3)], + [(2, 0), (2, 1), (2, 2), (3, 0), (3, 1), (3, 2)], + [(2, 2), (2, 3), (2, 4), (3, 2), (3, 3), (3, 4)], + ] + for hexagon in hexagons: + assert nx.is_isomorphic(G.subgraph(hexagon), C_6) + + def test_directed(self): + """Tests for creating a directed hexagonal lattice.""" + G = nx.hexagonal_lattice_graph(3, 5, create_using=nx.Graph()) + H = nx.hexagonal_lattice_graph(3, 5, create_using=nx.DiGraph()) + assert H.is_directed() + pos = nx.get_node_attributes(H, "pos") + for u, v in H.edges(): + assert pos[v][1] >= pos[u][1] + if pos[v][1] == pos[u][1]: + assert pos[v][0] > pos[u][0] + + def test_multigraph(self): + """Tests for creating a hexagonal lattice multigraph.""" + G = nx.hexagonal_lattice_graph(3, 5, create_using=nx.Graph()) + H = nx.hexagonal_lattice_graph(3, 5, create_using=nx.MultiGraph()) + assert list(H.edges()) == list(G.edges()) + + def test_periodic(self): + G = nx.hexagonal_lattice_graph(4, 6, periodic=True) + assert len(G) == 48 + assert G.size() == 72 + # all degrees are 3 + assert len([n for n, d in G.degree() if d != 3]) == 0 + G = nx.hexagonal_lattice_graph(5, 8, periodic=True) + HLG = nx.hexagonal_lattice_graph + pytest.raises(nx.NetworkXError, HLG, 2, 7, periodic=True) + pytest.raises(nx.NetworkXError, HLG, 1, 4, periodic=True) + pytest.raises(nx.NetworkXError, HLG, 2, 1, periodic=True) diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/tests/test_line.py b/.venv/lib/python3.12/site-packages/networkx/generators/tests/test_line.py new file mode 100644 index 0000000..7f5454e --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/generators/tests/test_line.py @@ -0,0 +1,309 @@ +import pytest + +import networkx as nx +from networkx.generators import line +from networkx.utils import edges_equal + + +class TestGeneratorLine: + def test_star(self): + G = nx.star_graph(5) + L = nx.line_graph(G) + assert nx.is_isomorphic(L, nx.complete_graph(5)) + + def test_path(self): + G = nx.path_graph(5) + L = nx.line_graph(G) + assert nx.is_isomorphic(L, nx.path_graph(4)) + + def test_cycle(self): + G = nx.cycle_graph(5) + L = nx.line_graph(G) + assert nx.is_isomorphic(L, G) + + def test_digraph1(self): + G = nx.DiGraph([(0, 1), (0, 2), (0, 3)]) + L = nx.line_graph(G) + # no edge graph, but with nodes + assert L.adj == {(0, 1): {}, (0, 2): {}, (0, 3): {}} + + def test_multigraph1(self): + G = nx.MultiGraph([(0, 1), (0, 1), (1, 0), (0, 2), (2, 0), (0, 3)]) + L = nx.line_graph(G) + # no edge graph, but with nodes + assert edges_equal( + L.edges(), + [ + ((0, 3, 0), (0, 1, 0)), + ((0, 3, 0), (0, 2, 0)), + ((0, 3, 0), (0, 2, 1)), + ((0, 3, 0), (0, 1, 1)), + ((0, 3, 0), (0, 1, 2)), + ((0, 1, 0), (0, 1, 1)), + ((0, 1, 0), (0, 2, 0)), + ((0, 1, 0), (0, 1, 2)), + ((0, 1, 0), (0, 2, 1)), + ((0, 1, 1), (0, 1, 2)), + ((0, 1, 1), (0, 2, 0)), + ((0, 1, 1), (0, 2, 1)), + ((0, 1, 2), (0, 2, 0)), + ((0, 1, 2), (0, 2, 1)), + ((0, 2, 0), (0, 2, 1)), + ], + ) + + def test_multigraph2(self): + G = nx.MultiGraph([(1, 2), (2, 1)]) + L = nx.line_graph(G) + assert edges_equal(L.edges(), [((1, 2, 0), (1, 2, 1))]) + + def test_multidigraph1(self): + G = nx.MultiDiGraph([(1, 2), (2, 1)]) + L = nx.line_graph(G) + assert edges_equal(L.edges(), [((1, 2, 0), (2, 1, 0)), ((2, 1, 0), (1, 2, 0))]) + + def test_multidigraph2(self): + G = nx.MultiDiGraph([(0, 1), (0, 1), (0, 1), (1, 2)]) + L = nx.line_graph(G) + assert edges_equal( + L.edges(), + [((0, 1, 0), (1, 2, 0)), ((0, 1, 1), (1, 2, 0)), ((0, 1, 2), (1, 2, 0))], + ) + + def test_digraph2(self): + G = nx.DiGraph([(0, 1), (1, 2), (2, 3)]) + L = nx.line_graph(G) + assert edges_equal(L.edges(), [((0, 1), (1, 2)), ((1, 2), (2, 3))]) + + def test_create1(self): + G = nx.DiGraph([(0, 1), (1, 2), (2, 3)]) + L = nx.line_graph(G, create_using=nx.Graph()) + assert edges_equal(L.edges(), [((0, 1), (1, 2)), ((1, 2), (2, 3))]) + + def test_create2(self): + G = nx.Graph([(0, 1), (1, 2), (2, 3)]) + L = nx.line_graph(G, create_using=nx.DiGraph()) + assert edges_equal(L.edges(), [((0, 1), (1, 2)), ((1, 2), (2, 3))]) + + +class TestGeneratorInverseLine: + def test_example(self): + G = nx.Graph() + G_edges = [ + [1, 2], + [1, 3], + [1, 4], + [1, 5], + [2, 3], + [2, 5], + [2, 6], + [2, 7], + [3, 4], + [3, 5], + [6, 7], + [6, 8], + [7, 8], + ] + G.add_edges_from(G_edges) + H = nx.inverse_line_graph(G) + solution = nx.Graph() + solution_edges = [ + ("a", "b"), + ("a", "c"), + ("a", "d"), + ("a", "e"), + ("c", "d"), + ("e", "f"), + ("e", "g"), + ("f", "g"), + ] + solution.add_edges_from(solution_edges) + assert nx.is_isomorphic(H, solution) + + def test_example_2(self): + G = nx.Graph() + G_edges = [[1, 2], [1, 3], [2, 3], [3, 4], [3, 5], [4, 5]] + G.add_edges_from(G_edges) + H = nx.inverse_line_graph(G) + solution = nx.Graph() + solution_edges = [("a", "c"), ("b", "c"), ("c", "d"), ("d", "e"), ("d", "f")] + solution.add_edges_from(solution_edges) + assert nx.is_isomorphic(H, solution) + + def test_pair(self): + G = nx.path_graph(2) + H = nx.inverse_line_graph(G) + solution = nx.path_graph(3) + assert nx.is_isomorphic(H, solution) + + def test_line(self): + G = nx.path_graph(5) + solution = nx.path_graph(6) + H = nx.inverse_line_graph(G) + assert nx.is_isomorphic(H, solution) + + def test_triangle_graph(self): + G = nx.complete_graph(3) + H = nx.inverse_line_graph(G) + alternative_solution = nx.Graph() + alternative_solution.add_edges_from([[0, 1], [0, 2], [0, 3]]) + # there are two alternative inverse line graphs for this case + # so long as we get one of them the test should pass + assert nx.is_isomorphic(H, G) or nx.is_isomorphic(H, alternative_solution) + + def test_cycle(self): + G = nx.cycle_graph(5) + H = nx.inverse_line_graph(G) + assert nx.is_isomorphic(H, G) + + def test_empty(self): + G = nx.Graph() + H = nx.inverse_line_graph(G) + assert nx.is_isomorphic(H, nx.complete_graph(1)) + + def test_K1(self): + G = nx.complete_graph(1) + H = nx.inverse_line_graph(G) + solution = nx.path_graph(2) + assert nx.is_isomorphic(H, solution) + + def test_edgeless_graph(self): + G = nx.empty_graph(5) + with pytest.raises(nx.NetworkXError, match="edgeless graph"): + nx.inverse_line_graph(G) + + def test_selfloops_error(self): + G = nx.cycle_graph(4) + G.add_edge(0, 0) + pytest.raises(nx.NetworkXError, nx.inverse_line_graph, G) + + def test_non_line_graphs(self): + # Tests several known non-line graphs for impossibility + # Adapted from L.W.Beineke, "Characterizations of derived graphs" + + # claw graph + claw = nx.star_graph(3) + pytest.raises(nx.NetworkXError, nx.inverse_line_graph, claw) + + # wheel graph with 6 nodes + wheel = nx.wheel_graph(6) + pytest.raises(nx.NetworkXError, nx.inverse_line_graph, wheel) + + # K5 with one edge remove + K5m = nx.complete_graph(5) + K5m.remove_edge(0, 1) + pytest.raises(nx.NetworkXError, nx.inverse_line_graph, K5m) + + # graph without any odd triangles (contains claw as induced subgraph) + G = nx.compose(nx.path_graph(2), nx.complete_bipartite_graph(2, 3)) + pytest.raises(nx.NetworkXError, nx.inverse_line_graph, G) + + ## Variations on a diamond graph + + # Diamond + 2 edges (+ "roof") + G = nx.diamond_graph() + G.add_edges_from([(4, 0), (5, 3)]) + pytest.raises(nx.NetworkXError, nx.inverse_line_graph, G) + G.add_edge(4, 5) + pytest.raises(nx.NetworkXError, nx.inverse_line_graph, G) + + # Diamond + 2 connected edges + G = nx.diamond_graph() + G.add_edges_from([(4, 0), (4, 3)]) + pytest.raises(nx.NetworkXError, nx.inverse_line_graph, G) + + # Diamond + K3 + one edge (+ 2*K3) + G = nx.diamond_graph() + G.add_edges_from([(4, 0), (4, 1), (4, 2), (5, 3)]) + pytest.raises(nx.NetworkXError, nx.inverse_line_graph, G) + G.add_edges_from([(5, 1), (5, 2)]) + pytest.raises(nx.NetworkXError, nx.inverse_line_graph, G) + + # 4 triangles + G = nx.diamond_graph() + G.add_edges_from([(4, 0), (4, 1), (5, 2), (5, 3)]) + pytest.raises(nx.NetworkXError, nx.inverse_line_graph, G) + + def test_wrong_graph_type(self): + G = nx.DiGraph() + G_edges = [[0, 1], [0, 2], [0, 3]] + G.add_edges_from(G_edges) + pytest.raises(nx.NetworkXNotImplemented, nx.inverse_line_graph, G) + + G = nx.MultiGraph() + G_edges = [[0, 1], [0, 2], [0, 3]] + G.add_edges_from(G_edges) + pytest.raises(nx.NetworkXNotImplemented, nx.inverse_line_graph, G) + + def test_line_inverse_line_complete(self): + G = nx.complete_graph(10) + H = nx.line_graph(G) + J = nx.inverse_line_graph(H) + assert nx.is_isomorphic(G, J) + + def test_line_inverse_line_path(self): + G = nx.path_graph(10) + H = nx.line_graph(G) + J = nx.inverse_line_graph(H) + assert nx.is_isomorphic(G, J) + + def test_line_inverse_line_hypercube(self): + G = nx.hypercube_graph(5) + H = nx.line_graph(G) + J = nx.inverse_line_graph(H) + assert nx.is_isomorphic(G, J) + + def test_line_inverse_line_cycle(self): + G = nx.cycle_graph(10) + H = nx.line_graph(G) + J = nx.inverse_line_graph(H) + assert nx.is_isomorphic(G, J) + + def test_line_inverse_line_star(self): + G = nx.star_graph(20) + H = nx.line_graph(G) + J = nx.inverse_line_graph(H) + assert nx.is_isomorphic(G, J) + + def test_line_inverse_line_multipartite(self): + G = nx.complete_multipartite_graph(3, 4, 5) + H = nx.line_graph(G) + J = nx.inverse_line_graph(H) + assert nx.is_isomorphic(G, J) + + def test_line_inverse_line_dgm(self): + G = nx.dorogovtsev_goltsev_mendes_graph(4) + H = nx.line_graph(G) + J = nx.inverse_line_graph(H) + assert nx.is_isomorphic(G, J) + + def test_line_different_node_types(self): + G = nx.path_graph([1, 2, 3, "a", "b", "c"]) + H = nx.line_graph(G) + J = nx.inverse_line_graph(H) + assert nx.is_isomorphic(G, J) + + +class TestGeneratorPrivateFunctions: + def test_triangles_error(self): + G = nx.diamond_graph() + pytest.raises(nx.NetworkXError, line._triangles, G, (4, 0)) + pytest.raises(nx.NetworkXError, line._triangles, G, (0, 3)) + + def test_odd_triangles_error(self): + G = nx.diamond_graph() + pytest.raises(nx.NetworkXError, line._odd_triangle, G, (0, 1, 4)) + pytest.raises(nx.NetworkXError, line._odd_triangle, G, (0, 1, 3)) + + def test_select_starting_cell_error(self): + G = nx.diamond_graph() + pytest.raises(nx.NetworkXError, line._select_starting_cell, G, (4, 0)) + pytest.raises(nx.NetworkXError, line._select_starting_cell, G, (0, 3)) + + def test_diamond_graph(self): + G = nx.diamond_graph() + for edge in G.edges: + cell = line._select_starting_cell(G, starting_edge=edge) + # Starting cell should always be one of the two triangles + assert len(cell) == 3 + assert all(v in G[u] for u in cell for v in cell if u != v) diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/tests/test_mycielski.py b/.venv/lib/python3.12/site-packages/networkx/generators/tests/test_mycielski.py new file mode 100644 index 0000000..eb12b14 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/generators/tests/test_mycielski.py @@ -0,0 +1,30 @@ +"""Unit tests for the :mod:`networkx.generators.mycielski` module.""" + +import pytest + +import networkx as nx + + +class TestMycielski: + def test_construction(self): + G = nx.path_graph(2) + M = nx.mycielskian(G) + assert nx.is_isomorphic(M, nx.cycle_graph(5)) + + def test_size(self): + G = nx.path_graph(2) + M = nx.mycielskian(G, 2) + assert len(M) == 11 + assert M.size() == 20 + + def test_mycielski_graph_generator(self): + G = nx.mycielski_graph(1) + assert nx.is_isomorphic(G, nx.empty_graph(1)) + G = nx.mycielski_graph(2) + assert nx.is_isomorphic(G, nx.path_graph(2)) + G = nx.mycielski_graph(3) + assert nx.is_isomorphic(G, nx.cycle_graph(5)) + G = nx.mycielski_graph(4) + assert nx.is_isomorphic(G, nx.mycielskian(nx.cycle_graph(5))) + with pytest.raises(nx.NetworkXError, match="must satisfy n >= 1"): + nx.mycielski_graph(0) diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/tests/test_nonisomorphic_trees.py b/.venv/lib/python3.12/site-packages/networkx/generators/tests/test_nonisomorphic_trees.py new file mode 100644 index 0000000..8bfd12f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/generators/tests/test_nonisomorphic_trees.py @@ -0,0 +1,56 @@ +""" +Unit tests for WROM algorithm generator in generators/nonisomorphic_trees.py +""" + +import pytest + +import networkx as nx +from networkx.utils import edges_equal + + +class TestGeneratorNonIsomorphicTrees: + def test_tree_structure(self): + # test for tree structure for nx.nonisomorphic_trees() + def f(x): + return list(nx.nonisomorphic_trees(x)) + + for i in f(6): + assert nx.is_tree(i) + for i in f(8): + assert nx.is_tree(i) + + def test_nonisomorphism(self): + # test for nonisomorphism of trees for nx.nonisomorphic_trees() + def f(x): + return list(nx.nonisomorphic_trees(x)) + + trees = f(6) + for i in range(len(trees)): + for j in range(i + 1, len(trees)): + assert not nx.is_isomorphic(trees[i], trees[j]) + trees = f(8) + for i in range(len(trees)): + for j in range(i + 1, len(trees)): + assert not nx.is_isomorphic(trees[i], trees[j]) + + def test_number_of_nonisomorphic_trees(self): + # http://oeis.org/A000055 + assert nx.number_of_nonisomorphic_trees(2) == 1 + assert nx.number_of_nonisomorphic_trees(3) == 1 + assert nx.number_of_nonisomorphic_trees(4) == 2 + assert nx.number_of_nonisomorphic_trees(5) == 3 + assert nx.number_of_nonisomorphic_trees(6) == 6 + assert nx.number_of_nonisomorphic_trees(7) == 11 + assert nx.number_of_nonisomorphic_trees(8) == 23 + assert nx.number_of_nonisomorphic_trees(9) == 47 + assert nx.number_of_nonisomorphic_trees(10) == 106 + assert nx.number_of_nonisomorphic_trees(20) == 823065 + assert nx.number_of_nonisomorphic_trees(30) == 14830871802 + + def test_nonisomorphic_trees(self): + def f(x): + return list(nx.nonisomorphic_trees(x)) + + assert edges_equal(f(3)[0].edges(), [(0, 1), (0, 2)]) + assert edges_equal(f(4)[0].edges(), [(0, 1), (0, 3), (1, 2)]) + assert edges_equal(f(4)[1].edges(), [(0, 1), (0, 2), (0, 3)]) diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/tests/test_random_clustered.py b/.venv/lib/python3.12/site-packages/networkx/generators/tests/test_random_clustered.py new file mode 100644 index 0000000..8506652 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/generators/tests/test_random_clustered.py @@ -0,0 +1,33 @@ +import pytest + +import networkx as nx + + +class TestRandomClusteredGraph: + def test_custom_joint_degree_sequence(self): + node = [1, 1, 1, 2, 1, 2, 0, 0] + tri = [0, 0, 0, 0, 0, 1, 1, 1] + joint_degree_sequence = zip(node, tri) + G = nx.random_clustered_graph(joint_degree_sequence) + assert G.number_of_nodes() == 8 + assert G.number_of_edges() == 7 + + def test_tuple_joint_degree_sequence(self): + G = nx.random_clustered_graph([(1, 2), (2, 1), (1, 1), (1, 1), (1, 1), (2, 0)]) + assert G.number_of_nodes() == 6 + assert G.number_of_edges() == 10 + + def test_invalid_joint_degree_sequence_type(self): + with pytest.raises(nx.NetworkXError, match="Invalid degree sequence"): + nx.random_clustered_graph([[1, 1], [2, 1], [0, 1]]) + + def test_invalid_joint_degree_sequence_value(self): + with pytest.raises(nx.NetworkXError, match="Invalid degree sequence"): + nx.random_clustered_graph([[1, 1], [1, 2], [0, 1]]) + + def test_directed_graph_raises_error(self): + with pytest.raises(nx.NetworkXError, match="Directed Graph not supported"): + nx.random_clustered_graph( + [(1, 2), (2, 1), (1, 1), (1, 1), (1, 1), (2, 0)], + create_using=nx.DiGraph, + ) diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/tests/test_random_graphs.py b/.venv/lib/python3.12/site-packages/networkx/generators/tests/test_random_graphs.py new file mode 100644 index 0000000..3262e54 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/generators/tests/test_random_graphs.py @@ -0,0 +1,478 @@ +"""Unit tests for the :mod:`networkx.generators.random_graphs` module.""" + +import pytest + +import networkx as nx + +_gnp_generators = [ + nx.gnp_random_graph, + nx.fast_gnp_random_graph, + nx.binomial_graph, + nx.erdos_renyi_graph, +] + + +@pytest.mark.parametrize("generator", _gnp_generators) +@pytest.mark.parametrize("directed", (True, False)) +def test_gnp_generators_negative_edge_probability(generator, directed): + """If the edge probability `p` is <=0, the resulting graph should have no edges.""" + G = generator(10, -1.1, directed=directed) + assert len(G) == 10 + assert G.number_of_edges() == 0 + assert G.is_directed() == directed + + +@pytest.mark.parametrize("generator", _gnp_generators) +@pytest.mark.parametrize( + ("directed", "expected_num_edges"), + [(False, 45), (True, 90)], +) +def test_gnp_generators_greater_than_1_edge_probability( + generator, directed, expected_num_edges +): + """If the edge probability `p` is >=1, the resulting graph should be complete.""" + G = generator(10, 1.1, directed=directed) + assert len(G) == 10 + assert G.number_of_edges() == expected_num_edges + assert G.is_directed() == directed + + +@pytest.mark.parametrize("generator", _gnp_generators) +@pytest.mark.parametrize("directed", (True, False)) +def test_gnp_generators_basic(generator, directed): + """If the edge probability `p` is >0 and <1, test only the basic properties.""" + G = generator(10, 0.1, directed=directed) + assert len(G) == 10 + assert G.is_directed() == directed + + +@pytest.mark.parametrize("generator", _gnp_generators) +def test_gnp_generators_for_p_close_to_1(generator): + """If the edge probability `p` is close to 1, the resulting graph should have all edges.""" + runs = 100 + edges = sum( + generator(10, 0.99999, directed=True).number_of_edges() for _ in range(runs) + ) + assert abs(edges / float(runs) - 90) <= runs * 2.0 / 100 + + +@pytest.mark.parametrize("generator", _gnp_generators) +@pytest.mark.parametrize("p", (0.2, 0.8)) +@pytest.mark.parametrize("directed", (True, False)) +def test_gnp_generators_edge_probability(generator, p, directed): + """Test that gnp generators generate edges according to the their probability `p`.""" + runs = 5000 + n = 5 + edge_counts = [[0] * n for _ in range(n)] + for i in range(runs): + G = generator(n, p, directed=directed) + for v, w in G.edges: + edge_counts[v][w] += 1 + if not directed: + edge_counts[w][v] += 1 + for v in range(n): + for w in range(n): + if v == w: + # There should be no loops + assert edge_counts[v][w] == 0 + else: + # Each edge should have been generated with probability close to p + assert abs(edge_counts[v][w] / float(runs) - p) <= 0.03 + + +@pytest.mark.parametrize( + "generator", [nx.gnp_random_graph, nx.binomial_graph, nx.erdos_renyi_graph] +) +@pytest.mark.parametrize( + ("seed", "directed", "expected_num_edges"), + [(42, False, 1219), (42, True, 2454), (314, False, 1247), (314, True, 2476)], +) +def test_gnp_random_graph_aliases(generator, seed, directed, expected_num_edges): + """Test that aliases give the same result with the same seed.""" + G = generator(100, 0.25, seed=seed, directed=directed) + assert len(G) == 100 + assert G.number_of_edges() == expected_num_edges + assert G.is_directed() == directed + + +class TestGeneratorsRandom: + def test_random_graph(self): + seed = 42 + G = nx.gnm_random_graph(100, 20, seed) + G = nx.gnm_random_graph(100, 20, seed, directed=True) + G = nx.dense_gnm_random_graph(100, 20, seed) + + G = nx.barabasi_albert_graph(100, 1, seed) + G = nx.barabasi_albert_graph(100, 3, seed) + assert G.number_of_edges() == (97 * 3) + + G = nx.barabasi_albert_graph(100, 3, seed, nx.complete_graph(5)) + assert G.number_of_edges() == (10 + 95 * 3) + + G = nx.extended_barabasi_albert_graph(100, 1, 0, 0, seed) + assert G.number_of_edges() == 99 + G = nx.extended_barabasi_albert_graph(100, 3, 0, 0, seed) + assert G.number_of_edges() == 97 * 3 + G = nx.extended_barabasi_albert_graph(100, 1, 0, 0.5, seed) + assert G.number_of_edges() == 99 + G = nx.extended_barabasi_albert_graph(100, 2, 0.5, 0, seed) + assert G.number_of_edges() > 100 * 3 + assert G.number_of_edges() < 100 * 4 + + G = nx.extended_barabasi_albert_graph(100, 2, 0.3, 0.3, seed) + assert G.number_of_edges() > 100 * 2 + assert G.number_of_edges() < 100 * 4 + + G = nx.powerlaw_cluster_graph(100, 1, 1.0, seed) + G = nx.powerlaw_cluster_graph(100, 3, 0.0, seed) + assert G.number_of_edges() == (97 * 3) + + G = nx.random_regular_graph(10, 20, seed) + + pytest.raises(nx.NetworkXError, nx.random_regular_graph, 3, 21) + pytest.raises(nx.NetworkXError, nx.random_regular_graph, 33, 21) + + constructor = [(10, 20, 0.8), (20, 40, 0.8)] + G = nx.random_shell_graph(constructor, seed) + + def is_caterpillar(g): + """ + A tree is a caterpillar iff all nodes of degree >=3 are surrounded + by at most two nodes of degree two or greater. + ref: http://mathworld.wolfram.com/CaterpillarGraph.html + """ + deg_over_3 = [n for n in g if g.degree(n) >= 3] + for n in deg_over_3: + nbh_deg_over_2 = [nbh for nbh in g.neighbors(n) if g.degree(nbh) >= 2] + if not len(nbh_deg_over_2) <= 2: + return False + return True + + def is_lobster(g): + """ + A tree is a lobster if it has the property that the removal of leaf + nodes leaves a caterpillar graph (Gallian 2007) + ref: http://mathworld.wolfram.com/LobsterGraph.html + """ + non_leafs = [n for n in g if g.degree(n) > 1] + return is_caterpillar(g.subgraph(non_leafs)) + + G = nx.random_lobster(10, 0.1, 0.5, seed) + assert max(G.degree(n) for n in G.nodes()) > 3 + assert is_lobster(G) + pytest.raises(nx.NetworkXError, nx.random_lobster, 10, 0.1, 1, seed) + pytest.raises(nx.NetworkXError, nx.random_lobster, 10, 1, 1, seed) + pytest.raises(nx.NetworkXError, nx.random_lobster, 10, 1, 0.5, seed) + + # docstring says this should be a caterpillar + G = nx.random_lobster(10, 0.1, 0.0, seed) + assert is_caterpillar(G) + + # difficult to find seed that requires few tries + seq = nx.random_powerlaw_tree_sequence(10, 3, seed=14, tries=1) + G = nx.random_powerlaw_tree(10, 3, seed=14, tries=1) + + def test_dual_barabasi_albert(self, m1=1, m2=4, p=0.5): + """ + Tests that the dual BA random graph generated behaves consistently. + + Tests the exceptions are raised as expected. + + The graphs generation are repeated several times to prevent lucky shots + + """ + seeds = [42, 314, 2718] + initial_graph = nx.complete_graph(10) + + for seed in seeds: + # This should be BA with m = m1 + BA1 = nx.barabasi_albert_graph(100, m1, seed) + DBA1 = nx.dual_barabasi_albert_graph(100, m1, m2, 1, seed) + assert BA1.edges() == DBA1.edges() + + # This should be BA with m = m2 + BA2 = nx.barabasi_albert_graph(100, m2, seed) + DBA2 = nx.dual_barabasi_albert_graph(100, m1, m2, 0, seed) + assert BA2.edges() == DBA2.edges() + + BA3 = nx.barabasi_albert_graph(100, m1, seed) + DBA3 = nx.dual_barabasi_albert_graph(100, m1, m1, p, seed) + # We can't compare edges here since randomness is "consumed" when drawing + # between m1 and m2 + assert BA3.size() == DBA3.size() + + DBA = nx.dual_barabasi_albert_graph(100, m1, m2, p, seed, initial_graph) + BA1 = nx.barabasi_albert_graph(100, m1, seed, initial_graph) + BA2 = nx.barabasi_albert_graph(100, m2, seed, initial_graph) + assert ( + min(BA1.size(), BA2.size()) <= DBA.size() <= max(BA1.size(), BA2.size()) + ) + + # Testing exceptions + dbag = nx.dual_barabasi_albert_graph + pytest.raises(nx.NetworkXError, dbag, m1, m1, m2, 0) + pytest.raises(nx.NetworkXError, dbag, m2, m1, m2, 0) + pytest.raises(nx.NetworkXError, dbag, 100, m1, m2, -0.5) + pytest.raises(nx.NetworkXError, dbag, 100, m1, m2, 1.5) + initial = nx.complete_graph(max(m1, m2) - 1) + pytest.raises(nx.NetworkXError, dbag, 100, m1, m2, p, initial_graph=initial) + + def test_extended_barabasi_albert(self, m=2): + """ + Tests that the extended BA random graph generated behaves consistently. + + Tests the exceptions are raised as expected. + + The graphs generation are repeated several times to prevent lucky-shots + + """ + seeds = [42, 314, 2718] + + for seed in seeds: + BA_model = nx.barabasi_albert_graph(100, m, seed) + BA_model_edges = BA_model.number_of_edges() + + # This behaves just like BA, the number of edges must be the same + G1 = nx.extended_barabasi_albert_graph(100, m, 0, 0, seed) + assert G1.size() == BA_model_edges + + # More than twice more edges should have been added + G1 = nx.extended_barabasi_albert_graph(100, m, 0.8, 0, seed) + assert G1.size() > BA_model_edges * 2 + + # Only edge rewiring, so the number of edges less than original + G2 = nx.extended_barabasi_albert_graph(100, m, 0, 0.8, seed) + assert G2.size() == BA_model_edges + + # Mixed scenario: less edges than G1 and more edges than G2 + G3 = nx.extended_barabasi_albert_graph(100, m, 0.3, 0.3, seed) + assert G3.size() > G2.size() + assert G3.size() < G1.size() + + # Testing exceptions + ebag = nx.extended_barabasi_albert_graph + pytest.raises(nx.NetworkXError, ebag, m, m, 0, 0) + pytest.raises(nx.NetworkXError, ebag, 1, 0.5, 0, 0) + pytest.raises(nx.NetworkXError, ebag, 100, 2, 0.5, 0.5) + + def test_random_zero_regular_graph(self): + """Tests that a 0-regular graph has the correct number of nodes and + edges. + + """ + seed = 42 + G = nx.random_regular_graph(0, 10, seed) + assert len(G) == 10 + assert G.number_of_edges() == 0 + + def test_gnm(self): + G = nx.gnm_random_graph(10, 3) + assert len(G) == 10 + assert G.number_of_edges() == 3 + + G = nx.gnm_random_graph(10, 3, seed=42) + assert len(G) == 10 + assert G.number_of_edges() == 3 + + G = nx.gnm_random_graph(10, 100) + assert len(G) == 10 + assert G.number_of_edges() == 45 + + G = nx.gnm_random_graph(10, 100, directed=True) + assert len(G) == 10 + assert G.number_of_edges() == 90 + + G = nx.gnm_random_graph(10, -1.1) + assert len(G) == 10 + assert G.number_of_edges() == 0 + + def test_watts_strogatz_big_k(self): + # Test to make sure than n <= k + pytest.raises(nx.NetworkXError, nx.watts_strogatz_graph, 10, 11, 0.25) + pytest.raises(nx.NetworkXError, nx.newman_watts_strogatz_graph, 10, 11, 0.25) + + # could create an infinite loop, now doesn't + # infinite loop used to occur when a node has degree n-1 and needs to rewire + nx.watts_strogatz_graph(10, 9, 0.25, seed=0) + nx.newman_watts_strogatz_graph(10, 9, 0.5, seed=0) + + # Test k==n scenario + nx.watts_strogatz_graph(10, 10, 0.25, seed=0) + nx.newman_watts_strogatz_graph(10, 10, 0.25, seed=0) + + def test_random_kernel_graph(self): + def integral(u, w, z): + return c * (z - w) + + def root(u, w, r): + return r / c + w + + c = 1 + graph = nx.random_kernel_graph(1000, integral, root) + graph = nx.random_kernel_graph(1000, integral, root, seed=42) + assert len(graph) == 1000 + + +@pytest.mark.parametrize( + ("k", "expected_num_nodes", "expected_num_edges"), + [ + (2, 10, 10), + (4, 10, 20), + ], +) +def test_watts_strogatz(k, expected_num_nodes, expected_num_edges): + G = nx.watts_strogatz_graph(10, k, 0.25, seed=42) + assert len(G) == expected_num_nodes + assert G.number_of_edges() == expected_num_edges + + +def test_newman_watts_strogatz_zero_probability(): + G = nx.newman_watts_strogatz_graph(10, 2, 0.0, seed=42) + assert len(G) == 10 + assert G.number_of_edges() == 10 + + +def test_newman_watts_strogatz_nonzero_probability(): + G = nx.newman_watts_strogatz_graph(10, 4, 0.25, seed=42) + assert len(G) == 10 + assert G.number_of_edges() >= 20 + + +def test_connected_watts_strogatz(): + G = nx.connected_watts_strogatz_graph(10, 2, 0.1, tries=10, seed=42) + assert len(G) == 10 + assert G.number_of_edges() == 10 + + +def test_connected_watts_strogatz_zero_tries(): + with pytest.raises(nx.NetworkXError, match="Maximum number of tries exceeded"): + nx.connected_watts_strogatz_graph(10, 2, 0.1, tries=0) + + +@pytest.mark.parametrize( + "generator, kwargs", + [ + (nx.fast_gnp_random_graph, {"n": 20, "p": 0.2, "directed": False}), + (nx.fast_gnp_random_graph, {"n": 20, "p": 0.2, "directed": True}), + (nx.gnp_random_graph, {"n": 20, "p": 0.2, "directed": False}), + (nx.gnp_random_graph, {"n": 20, "p": 0.2, "directed": True}), + (nx.dense_gnm_random_graph, {"n": 30, "m": 4}), + (nx.gnm_random_graph, {"n": 30, "m": 4, "directed": False}), + (nx.gnm_random_graph, {"n": 30, "m": 4, "directed": True}), + (nx.newman_watts_strogatz_graph, {"n": 50, "k": 5, "p": 0.1}), + (nx.watts_strogatz_graph, {"n": 50, "k": 5, "p": 0.1}), + (nx.connected_watts_strogatz_graph, {"n": 50, "k": 5, "p": 0.1}), + (nx.random_regular_graph, {"d": 5, "n": 20}), + (nx.barabasi_albert_graph, {"n": 40, "m": 3}), + (nx.dual_barabasi_albert_graph, {"n": 40, "m1": 3, "m2": 2, "p": 0.1}), + (nx.extended_barabasi_albert_graph, {"n": 40, "m": 3, "p": 0.1, "q": 0.2}), + (nx.powerlaw_cluster_graph, {"n": 40, "m": 3, "p": 0.1}), + (nx.random_lobster, {"n": 40, "p1": 0.1, "p2": 0.2}), + (nx.random_shell_graph, {"constructor": [(10, 20, 0.8), (20, 40, 0.8)]}), + (nx.random_powerlaw_tree, {"n": 10, "seed": 14, "tries": 1}), + ( + nx.random_kernel_graph, + { + "n": 10, + "kernel_integral": lambda u, w, z: z - w, + "kernel_root": lambda u, w, r: r + w, + }, + ), + ], +) +@pytest.mark.parametrize("create_using_instance", [False, True]) +def test_create_using(generator, kwargs, create_using_instance): + class DummyGraph(nx.Graph): + pass + + class DummyDiGraph(nx.DiGraph): + pass + + create_using_type = DummyDiGraph if kwargs.get("directed") else DummyGraph + create_using = create_using_type() if create_using_instance else create_using_type + graph = generator(**kwargs, create_using=create_using) + assert isinstance(graph, create_using_type) + + +@pytest.mark.parametrize("directed", [True, False]) +@pytest.mark.parametrize("fn", (nx.fast_gnp_random_graph, nx.gnp_random_graph)) +def test_gnp_fns_disallow_multigraph(fn, directed): + with pytest.raises(nx.NetworkXError, match="must not be a multi-graph"): + fn(20, 0.2, create_using=nx.MultiGraph) + + +@pytest.mark.parametrize("fn", (nx.gnm_random_graph, nx.dense_gnm_random_graph)) +@pytest.mark.parametrize("graphtype", (nx.DiGraph, nx.MultiGraph, nx.MultiDiGraph)) +def test_gnm_fns_disallow_directed_and_multigraph(fn, graphtype): + with pytest.raises(nx.NetworkXError, match="must not be"): + fn(10, 20, create_using=graphtype) + + +@pytest.mark.parametrize( + "fn", + ( + nx.newman_watts_strogatz_graph, + nx.watts_strogatz_graph, + nx.connected_watts_strogatz_graph, + ), +) +@pytest.mark.parametrize("graphtype", (nx.DiGraph, nx.MultiGraph, nx.MultiDiGraph)) +def test_watts_strogatz_disallow_directed_and_multigraph(fn, graphtype): + with pytest.raises(nx.NetworkXError, match="must not be"): + fn(10, 2, 0.2, create_using=graphtype) + + +@pytest.mark.parametrize("graphtype", (nx.DiGraph, nx.MultiGraph, nx.MultiDiGraph)) +def test_random_regular_graph_disallow_directed_and_multigraph(graphtype): + with pytest.raises(nx.NetworkXError, match="must not be"): + nx.random_regular_graph(2, 10, create_using=graphtype) + + +@pytest.mark.parametrize("graphtype", (nx.DiGraph, nx.MultiGraph, nx.MultiDiGraph)) +def test_barabasi_albert_disallow_directed_and_multigraph(graphtype): + with pytest.raises(nx.NetworkXError, match="must not be"): + nx.barabasi_albert_graph(10, 3, create_using=graphtype) + + +@pytest.mark.parametrize("graphtype", (nx.DiGraph, nx.MultiGraph, nx.MultiDiGraph)) +def test_dual_barabasi_albert_disallow_directed_and_multigraph(graphtype): + with pytest.raises(nx.NetworkXError, match="must not be"): + nx.dual_barabasi_albert_graph(10, 2, 1, 0.4, create_using=graphtype) + + +@pytest.mark.parametrize("graphtype", (nx.DiGraph, nx.MultiGraph, nx.MultiDiGraph)) +def test_extended_barabasi_albert_disallow_directed_and_multigraph(graphtype): + with pytest.raises(nx.NetworkXError, match="must not be"): + nx.extended_barabasi_albert_graph(10, 2, 0.2, 0.3, create_using=graphtype) + + +@pytest.mark.parametrize("graphtype", (nx.DiGraph, nx.MultiGraph, nx.MultiDiGraph)) +def test_powerlaw_cluster_disallow_directed_and_multigraph(graphtype): + with pytest.raises(nx.NetworkXError, match="must not be"): + nx.powerlaw_cluster_graph(10, 5, 0.2, create_using=graphtype) + + +@pytest.mark.parametrize("graphtype", (nx.DiGraph, nx.MultiGraph, nx.MultiDiGraph)) +def test_random_lobster_disallow_directed_and_multigraph(graphtype): + with pytest.raises(nx.NetworkXError, match="must not be"): + nx.random_lobster(10, 0.1, 0.1, create_using=graphtype) + + +@pytest.mark.parametrize("graphtype", (nx.DiGraph, nx.MultiGraph, nx.MultiDiGraph)) +def test_random_shell_disallow_directed_and_multigraph(graphtype): + with pytest.raises(nx.NetworkXError, match="must not be"): + nx.random_shell_graph([(10, 20, 2), (10, 20, 5)], create_using=graphtype) + + +@pytest.mark.parametrize("graphtype", (nx.DiGraph, nx.MultiGraph, nx.MultiDiGraph)) +def test_random_powerlaw_tree_disallow_directed_and_multigraph(graphtype): + with pytest.raises(nx.NetworkXError, match="must not be"): + nx.random_powerlaw_tree(10, create_using=graphtype) + + +@pytest.mark.parametrize("graphtype", (nx.DiGraph, nx.MultiGraph, nx.MultiDiGraph)) +def test_random_kernel_disallow_directed_and_multigraph(graphtype): + with pytest.raises(nx.NetworkXError, match="must not be"): + nx.random_kernel_graph( + 10, lambda y, a, b: a + b, lambda u, w, r: r + w, create_using=graphtype + ) diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/tests/test_small.py b/.venv/lib/python3.12/site-packages/networkx/generators/tests/test_small.py new file mode 100644 index 0000000..bd65be4 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/generators/tests/test_small.py @@ -0,0 +1,202 @@ +import pytest + +import networkx as nx + +null = nx.null_graph() + + +class TestGeneratorsSmall: + def test__LCF_graph(self): + # If n<=0, then return the null_graph + G = nx.LCF_graph(-10, [1, 2], 100) + assert nx.could_be_isomorphic(G, null) + G = nx.LCF_graph(0, [1, 2], 3) + assert nx.could_be_isomorphic(G, null) + G = nx.LCF_graph(0, [1, 2], 10) + assert nx.could_be_isomorphic(G, null) + + # Test that LCF(n,[],0) == cycle_graph(n) + for a, b, c in [(5, [], 0), (10, [], 0), (5, [], 1), (10, [], 10)]: + G = nx.LCF_graph(a, b, c) + assert nx.could_be_isomorphic(G, nx.cycle_graph(a)) + + # Generate the utility graph K_{3,3} + G = nx.LCF_graph(6, [3, -3], 3) + utility_graph = nx.complete_bipartite_graph(3, 3) + assert nx.could_be_isomorphic(G, utility_graph) + + with pytest.raises(nx.NetworkXError, match="Directed Graph not supported"): + G = nx.LCF_graph(6, [3, -3], 3, create_using=nx.DiGraph) + + def test_properties_named_small_graphs(self): + G = nx.bull_graph() + assert sorted(G) == list(range(5)) + assert G.number_of_edges() == 5 + assert sorted(d for n, d in G.degree()) == [1, 1, 2, 3, 3] + assert nx.diameter(G) == 3 + assert nx.radius(G) == 2 + + G = nx.chvatal_graph() + assert sorted(G) == list(range(12)) + assert G.number_of_edges() == 24 + assert [d for n, d in G.degree()] == 12 * [4] + assert nx.diameter(G) == 2 + assert nx.radius(G) == 2 + + G = nx.cubical_graph() + assert sorted(G) == list(range(8)) + assert G.number_of_edges() == 12 + assert [d for n, d in G.degree()] == 8 * [3] + assert nx.diameter(G) == 3 + assert nx.radius(G) == 3 + + G = nx.desargues_graph() + assert sorted(G) == list(range(20)) + assert G.number_of_edges() == 30 + assert [d for n, d in G.degree()] == 20 * [3] + + G = nx.diamond_graph() + assert sorted(G) == list(range(4)) + assert sorted(d for n, d in G.degree()) == [2, 2, 3, 3] + assert nx.diameter(G) == 2 + assert nx.radius(G) == 1 + + G = nx.dodecahedral_graph() + assert sorted(G) == list(range(20)) + assert G.number_of_edges() == 30 + assert [d for n, d in G.degree()] == 20 * [3] + assert nx.diameter(G) == 5 + assert nx.radius(G) == 5 + + G = nx.frucht_graph() + assert sorted(G) == list(range(12)) + assert G.number_of_edges() == 18 + assert [d for n, d in G.degree()] == 12 * [3] + assert nx.diameter(G) == 4 + assert nx.radius(G) == 3 + + G = nx.heawood_graph() + assert sorted(G) == list(range(14)) + assert G.number_of_edges() == 21 + assert [d for n, d in G.degree()] == 14 * [3] + assert nx.diameter(G) == 3 + assert nx.radius(G) == 3 + + G = nx.hoffman_singleton_graph() + assert sorted(G) == list(range(50)) + assert G.number_of_edges() == 175 + assert [d for n, d in G.degree()] == 50 * [7] + assert nx.diameter(G) == 2 + assert nx.radius(G) == 2 + + G = nx.house_graph() + assert sorted(G) == list(range(5)) + assert G.number_of_edges() == 6 + assert sorted(d for n, d in G.degree()) == [2, 2, 2, 3, 3] + assert nx.diameter(G) == 2 + assert nx.radius(G) == 2 + + G = nx.house_x_graph() + assert sorted(G) == list(range(5)) + assert G.number_of_edges() == 8 + assert sorted(d for n, d in G.degree()) == [2, 3, 3, 4, 4] + assert nx.diameter(G) == 2 + assert nx.radius(G) == 1 + + G = nx.icosahedral_graph() + assert sorted(G) == list(range(12)) + assert G.number_of_edges() == 30 + assert [d for n, d in G.degree()] == [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5] + assert nx.diameter(G) == 3 + assert nx.radius(G) == 3 + + G = nx.krackhardt_kite_graph() + assert sorted(G) == list(range(10)) + assert G.number_of_edges() == 18 + assert sorted(d for n, d in G.degree()) == [1, 2, 3, 3, 3, 4, 4, 5, 5, 6] + + G = nx.moebius_kantor_graph() + assert sorted(G) == list(range(16)) + assert G.number_of_edges() == 24 + assert [d for n, d in G.degree()] == 16 * [3] + assert nx.diameter(G) == 4 + + G = nx.octahedral_graph() + assert sorted(G) == list(range(6)) + assert G.number_of_edges() == 12 + assert [d for n, d in G.degree()] == 6 * [4] + assert nx.diameter(G) == 2 + assert nx.radius(G) == 2 + + G = nx.pappus_graph() + assert sorted(G) == list(range(18)) + assert G.number_of_edges() == 27 + assert [d for n, d in G.degree()] == 18 * [3] + assert nx.diameter(G) == 4 + + G = nx.petersen_graph() + assert sorted(G) == list(range(10)) + assert G.number_of_edges() == 15 + assert [d for n, d in G.degree()] == 10 * [3] + assert nx.diameter(G) == 2 + assert nx.radius(G) == 2 + + G = nx.sedgewick_maze_graph() + assert sorted(G) == list(range(8)) + assert G.number_of_edges() == 10 + assert sorted(d for n, d in G.degree()) == [1, 2, 2, 2, 3, 3, 3, 4] + + G = nx.tetrahedral_graph() + assert sorted(G) == list(range(4)) + assert G.number_of_edges() == 6 + assert [d for n, d in G.degree()] == [3, 3, 3, 3] + assert nx.diameter(G) == 1 + assert nx.radius(G) == 1 + + G = nx.truncated_cube_graph() + assert sorted(G) == list(range(24)) + assert G.number_of_edges() == 36 + assert [d for n, d in G.degree()] == 24 * [3] + + G = nx.truncated_tetrahedron_graph() + assert sorted(G) == list(range(12)) + assert G.number_of_edges() == 18 + assert [d for n, d in G.degree()] == 12 * [3] + + G = nx.tutte_graph() + assert sorted(G) == list(range(46)) + assert G.number_of_edges() == 69 + assert [d for n, d in G.degree()] == 46 * [3] + + # Test create_using with directed or multigraphs on small graphs + pytest.raises(nx.NetworkXError, nx.tutte_graph, create_using=nx.DiGraph) + MG = nx.tutte_graph(create_using=nx.MultiGraph) + assert sorted(MG.edges()) == sorted(G.edges()) + + +@pytest.mark.parametrize( + "fn", + ( + nx.bull_graph, + nx.chvatal_graph, + nx.cubical_graph, + nx.diamond_graph, + nx.house_graph, + nx.house_x_graph, + nx.icosahedral_graph, + nx.krackhardt_kite_graph, + nx.octahedral_graph, + nx.petersen_graph, + nx.truncated_cube_graph, + nx.tutte_graph, + ), +) +@pytest.mark.parametrize( + "create_using", (nx.DiGraph, nx.MultiDiGraph, nx.DiGraph([(0, 1)])) +) +def tests_raises_with_directed_create_using(fn, create_using): + with pytest.raises(nx.NetworkXError, match="Directed Graph not supported"): + fn(create_using=create_using) + # All these functions have `create_using` as the first positional argument too + with pytest.raises(nx.NetworkXError, match="Directed Graph not supported"): + fn(create_using) diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/tests/test_spectral_graph_forge.py b/.venv/lib/python3.12/site-packages/networkx/generators/tests/test_spectral_graph_forge.py new file mode 100644 index 0000000..b554bfd --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/generators/tests/test_spectral_graph_forge.py @@ -0,0 +1,49 @@ +import pytest + +pytest.importorskip("numpy") +pytest.importorskip("scipy") + + +from networkx import is_isomorphic +from networkx.exception import NetworkXError +from networkx.generators import karate_club_graph +from networkx.generators.spectral_graph_forge import spectral_graph_forge +from networkx.utils import nodes_equal + + +def test_spectral_graph_forge(): + G = karate_club_graph() + + seed = 54321 + + # common cases, just checking node number preserving and difference + # between identity and modularity cases + H = spectral_graph_forge(G, 0.1, transformation="identity", seed=seed) + assert nodes_equal(G, H) + + I = spectral_graph_forge(G, 0.1, transformation="identity", seed=seed) + assert nodes_equal(G, H) + assert is_isomorphic(I, H) + + I = spectral_graph_forge(G, 0.1, transformation="modularity", seed=seed) + assert nodes_equal(G, I) + + assert not is_isomorphic(I, H) + + # with all the eigenvectors, output graph is identical to the input one + H = spectral_graph_forge(G, 1, transformation="modularity", seed=seed) + assert nodes_equal(G, H) + assert is_isomorphic(G, H) + + # invalid alpha input value, it is silently truncated in [0,1] + H = spectral_graph_forge(G, -1, transformation="identity", seed=seed) + assert nodes_equal(G, H) + + H = spectral_graph_forge(G, 10, transformation="identity", seed=seed) + assert nodes_equal(G, H) + assert is_isomorphic(G, H) + + # invalid transformation mode, checking the error raising + pytest.raises( + NetworkXError, spectral_graph_forge, G, 0.1, transformation="unknown", seed=seed + ) diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/tests/test_stochastic.py b/.venv/lib/python3.12/site-packages/networkx/generators/tests/test_stochastic.py new file mode 100644 index 0000000..0404d9d --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/generators/tests/test_stochastic.py @@ -0,0 +1,72 @@ +"""Unit tests for the :mod:`networkx.generators.stochastic` module.""" + +import pytest + +import networkx as nx + + +class TestStochasticGraph: + """Unit tests for the :func:`~networkx.stochastic_graph` function.""" + + def test_default_weights(self): + G = nx.DiGraph() + G.add_edge(0, 1) + G.add_edge(0, 2) + S = nx.stochastic_graph(G) + assert nx.is_isomorphic(G, S) + assert sorted(S.edges(data=True)) == [ + (0, 1, {"weight": 0.5}), + (0, 2, {"weight": 0.5}), + ] + + def test_in_place(self): + """Tests for an in-place reweighting of the edges of the graph.""" + G = nx.DiGraph() + G.add_edge(0, 1, weight=1) + G.add_edge(0, 2, weight=1) + nx.stochastic_graph(G, copy=False) + assert sorted(G.edges(data=True)) == [ + (0, 1, {"weight": 0.5}), + (0, 2, {"weight": 0.5}), + ] + + def test_arbitrary_weights(self): + G = nx.DiGraph() + G.add_edge(0, 1, weight=1) + G.add_edge(0, 2, weight=1) + S = nx.stochastic_graph(G) + assert sorted(S.edges(data=True)) == [ + (0, 1, {"weight": 0.5}), + (0, 2, {"weight": 0.5}), + ] + + def test_multidigraph(self): + G = nx.MultiDiGraph() + G.add_edges_from([(0, 1), (0, 1), (0, 2), (0, 2)]) + S = nx.stochastic_graph(G) + d = {"weight": 0.25} + assert sorted(S.edges(data=True)) == [ + (0, 1, d), + (0, 1, d), + (0, 2, d), + (0, 2, d), + ] + + def test_zero_weights(self): + """Smoke test: ensure ZeroDivisionError is not raised.""" + G = nx.DiGraph() + G.add_edge(0, 1, weight=0) + G.add_edge(0, 2, weight=0) + S = nx.stochastic_graph(G) + assert sorted(S.edges(data=True)) == [ + (0, 1, {"weight": 0}), + (0, 2, {"weight": 0}), + ] + + def test_graph_disallowed(self): + with pytest.raises(nx.NetworkXNotImplemented): + nx.stochastic_graph(nx.Graph()) + + def test_multigraph_disallowed(self): + with pytest.raises(nx.NetworkXNotImplemented): + nx.stochastic_graph(nx.MultiGraph()) diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/tests/test_sudoku.py b/.venv/lib/python3.12/site-packages/networkx/generators/tests/test_sudoku.py new file mode 100644 index 0000000..7c3560a --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/generators/tests/test_sudoku.py @@ -0,0 +1,92 @@ +"""Unit tests for the :mod:`networkx.generators.sudoku_graph` module.""" + +import pytest + +import networkx as nx + + +def test_sudoku_negative(): + """Raise an error when generating a Sudoku graph of order -1.""" + pytest.raises(nx.NetworkXError, nx.sudoku_graph, n=-1) + + +@pytest.mark.parametrize("n", [0, 1, 2, 3, 4]) +def test_sudoku_generator(n): + """Generate Sudoku graphs of various sizes and verify their properties.""" + G = nx.sudoku_graph(n) + expected_nodes = n**4 + expected_degree = (n - 1) * (3 * n + 1) + expected_edges = expected_nodes * expected_degree // 2 + assert not G.is_directed() + assert not G.is_multigraph() + assert G.number_of_nodes() == expected_nodes + assert G.number_of_edges() == expected_edges + assert all(d == expected_degree for _, d in G.degree) + + if n == 2: + assert sorted(G.neighbors(6)) == [2, 3, 4, 5, 7, 10, 14] + elif n == 3: + assert sorted(G.neighbors(42)) == [ + 6, + 15, + 24, + 33, + 34, + 35, + 36, + 37, + 38, + 39, + 40, + 41, + 43, + 44, + 51, + 52, + 53, + 60, + 69, + 78, + ] + elif n == 4: + assert sorted(G.neighbors(0)) == [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 32, + 33, + 34, + 35, + 48, + 49, + 50, + 51, + 64, + 80, + 96, + 112, + 128, + 144, + 160, + 176, + 192, + 208, + 224, + 240, + ] diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/tests/test_time_series.py b/.venv/lib/python3.12/site-packages/networkx/generators/tests/test_time_series.py new file mode 100644 index 0000000..5d0cc90 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/generators/tests/test_time_series.py @@ -0,0 +1,64 @@ +"""Unit tests for the :mod:`networkx.generators.time_series` module.""" + +import itertools + +import networkx as nx + + +def test_visibility_graph__empty_series__empty_graph(): + null_graph = nx.visibility_graph([]) # move along nothing to see here + assert nx.is_empty(null_graph) + + +def test_visibility_graph__single_value_ts__single_node_graph(): + node_graph = nx.visibility_graph([10]) # So Lonely + assert node_graph.number_of_nodes() == 1 + assert node_graph.number_of_edges() == 0 + + +def test_visibility_graph__two_values_ts__single_edge_graph(): + edge_graph = nx.visibility_graph([10, 20]) # Two of Us + assert list(edge_graph.edges) == [(0, 1)] + + +def test_visibility_graph__convex_series__complete_graph(): + series = [i**2 for i in range(10)] # no obstructions + expected_series_length = len(series) + + actual_graph = nx.visibility_graph(series) + + assert actual_graph.number_of_nodes() == expected_series_length + assert actual_graph.number_of_edges() == 45 + assert nx.is_isomorphic(actual_graph, nx.complete_graph(expected_series_length)) + + +def test_visibility_graph__concave_series__path_graph(): + series = [-(i**2) for i in range(10)] # Slip Slidin' Away + expected_node_count = len(series) + + actual_graph = nx.visibility_graph(series) + + assert actual_graph.number_of_nodes() == expected_node_count + assert actual_graph.number_of_edges() == expected_node_count - 1 + assert nx.is_isomorphic(actual_graph, nx.path_graph(expected_node_count)) + + +def test_visibility_graph__flat_series__path_graph(): + series = [0] * 10 # living in 1D flatland + expected_node_count = len(series) + + actual_graph = nx.visibility_graph(series) + + assert actual_graph.number_of_nodes() == expected_node_count + assert actual_graph.number_of_edges() == expected_node_count - 1 + assert nx.is_isomorphic(actual_graph, nx.path_graph(expected_node_count)) + + +def test_visibility_graph_cyclic_series(): + series = list(itertools.islice(itertools.cycle((2, 1, 3)), 17)) # It's so bumpy! + expected_node_count = len(series) + + actual_graph = nx.visibility_graph(series) + + assert actual_graph.number_of_nodes() == expected_node_count + assert actual_graph.number_of_edges() == 25 diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/tests/test_trees.py b/.venv/lib/python3.12/site-packages/networkx/generators/tests/test_trees.py new file mode 100644 index 0000000..7932436 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/generators/tests/test_trees.py @@ -0,0 +1,195 @@ +import random + +import pytest + +import networkx as nx +from networkx.utils import arbitrary_element, graphs_equal + + +@pytest.mark.parametrize("prefix_tree_fn", (nx.prefix_tree, nx.prefix_tree_recursive)) +def test_basic_prefix_tree(prefix_tree_fn): + # This example is from the Wikipedia article "Trie" + # . + strings = ["a", "to", "tea", "ted", "ten", "i", "in", "inn"] + T = prefix_tree_fn(strings) + root, NIL = 0, -1 + + def source_label(v): + return T.nodes[v]["source"] + + # First, we check that the tree has the expected + # structure. Recall that each node that corresponds to one of + # the input strings has an edge to the NIL node. + # + # Consider the three children at level 1 in the trie. + a, i, t = sorted(T[root], key=source_label) + # Check the 'a' branch. + assert len(T[a]) == 1 + nil = arbitrary_element(T[a]) + assert len(T[nil]) == 0 + # Check the 'i' branch. + assert len(T[i]) == 2 + nil, in_ = sorted(T[i], key=source_label) + assert len(T[nil]) == 0 + assert len(T[in_]) == 2 + nil, inn = sorted(T[in_], key=source_label) + assert len(T[nil]) == 0 + assert len(T[inn]) == 1 + nil = arbitrary_element(T[inn]) + assert len(T[nil]) == 0 + # Check the 't' branch. + te, to = sorted(T[t], key=source_label) + assert len(T[to]) == 1 + nil = arbitrary_element(T[to]) + assert len(T[nil]) == 0 + tea, ted, ten = sorted(T[te], key=source_label) + assert len(T[tea]) == 1 + assert len(T[ted]) == 1 + assert len(T[ten]) == 1 + nil = arbitrary_element(T[tea]) + assert len(T[nil]) == 0 + nil = arbitrary_element(T[ted]) + assert len(T[nil]) == 0 + nil = arbitrary_element(T[ten]) + assert len(T[nil]) == 0 + + # Next, we check that the "sources" of each of the nodes is the + # rightmost letter in the string corresponding to the path to + # that node. + assert source_label(root) is None + assert source_label(a) == "a" + assert source_label(i) == "i" + assert source_label(t) == "t" + assert source_label(in_) == "n" + assert source_label(inn) == "n" + assert source_label(to) == "o" + assert source_label(te) == "e" + assert source_label(tea) == "a" + assert source_label(ted) == "d" + assert source_label(ten) == "n" + assert source_label(NIL) == "NIL" + + +@pytest.mark.parametrize( + "strings", + ( + ["a", "to", "tea", "ted", "ten", "i", "in", "inn"], + ["ab", "abs", "ad"], + ["ab", "abs", "ad", ""], + ["distant", "disparaging", "distant", "diamond", "ruby"], + ), +) +def test_implementations_consistent(strings): + """Ensure results are consistent between prefix_tree implementations.""" + assert graphs_equal(nx.prefix_tree(strings), nx.prefix_tree_recursive(strings)) + + +def test_random_labeled_rooted_tree(): + for i in range(1, 10): + t1 = nx.random_labeled_rooted_tree(i, seed=42) + t2 = nx.random_labeled_rooted_tree(i, seed=42) + assert nx.utils.misc.graphs_equal(t1, t2) + assert nx.is_tree(t1) + assert "root" in t1.graph + assert "roots" not in t1.graph + + +def test_random_labeled_tree_n_zero(): + """Tests if n = 0 then the NetworkXPointlessConcept exception is raised.""" + with pytest.raises(nx.NetworkXPointlessConcept): + T = nx.random_labeled_tree(0, seed=1234) + with pytest.raises(nx.NetworkXPointlessConcept): + T = nx.random_labeled_rooted_tree(0, seed=1234) + + +def test_random_labeled_rooted_forest(): + for i in range(1, 10): + t1 = nx.random_labeled_rooted_forest(i, seed=42) + t2 = nx.random_labeled_rooted_forest(i, seed=42) + assert nx.utils.misc.graphs_equal(t1, t2) + for c in nx.connected_components(t1): + assert nx.is_tree(t1.subgraph(c)) + assert "root" not in t1.graph + assert "roots" in t1.graph + + +def test_random_labeled_rooted_forest_n_zero(): + """Tests generation of empty labeled forests.""" + F = nx.random_labeled_rooted_forest(0, seed=1234) + assert len(F) == 0 + assert len(F.graph["roots"]) == 0 + + +def test_random_unlabeled_rooted_tree(): + for i in range(1, 10): + t1 = nx.random_unlabeled_rooted_tree(i, seed=42) + t2 = nx.random_unlabeled_rooted_tree(i, seed=42) + assert nx.utils.misc.graphs_equal(t1, t2) + assert nx.is_tree(t1) + assert "root" in t1.graph + assert "roots" not in t1.graph + t = nx.random_unlabeled_rooted_tree(15, number_of_trees=10, seed=43) + random.seed(43) + s = nx.random_unlabeled_rooted_tree(15, number_of_trees=10, seed=random) + for i in range(10): + assert nx.utils.misc.graphs_equal(t[i], s[i]) + assert nx.is_tree(t[i]) + assert "root" in t[i].graph + assert "roots" not in t[i].graph + + +def test_random_unlabeled_tree_n_zero(): + """Tests if n = 0 then the NetworkXPointlessConcept exception is raised.""" + with pytest.raises(nx.NetworkXPointlessConcept): + T = nx.random_unlabeled_tree(0, seed=1234) + with pytest.raises(nx.NetworkXPointlessConcept): + T = nx.random_unlabeled_rooted_tree(0, seed=1234) + + +def test_random_unlabeled_rooted_forest(): + with pytest.raises(ValueError): + nx.random_unlabeled_rooted_forest(10, q=0, seed=42) + for i in range(1, 10): + for q in range(1, i + 1): + t1 = nx.random_unlabeled_rooted_forest(i, q=q, seed=42) + t2 = nx.random_unlabeled_rooted_forest(i, q=q, seed=42) + assert nx.utils.misc.graphs_equal(t1, t2) + for c in nx.connected_components(t1): + assert nx.is_tree(t1.subgraph(c)) + assert len(c) <= q + assert "root" not in t1.graph + assert "roots" in t1.graph + t = nx.random_unlabeled_rooted_forest(15, number_of_forests=10, seed=43) + random.seed(43) + s = nx.random_unlabeled_rooted_forest(15, number_of_forests=10, seed=random) + for i in range(10): + assert nx.utils.misc.graphs_equal(t[i], s[i]) + for c in nx.connected_components(t[i]): + assert nx.is_tree(t[i].subgraph(c)) + assert "root" not in t[i].graph + assert "roots" in t[i].graph + + +def test_random_unlabeled_forest_n_zero(): + """Tests generation of empty unlabeled forests.""" + F = nx.random_unlabeled_rooted_forest(0, seed=1234) + assert len(F) == 0 + assert len(F.graph["roots"]) == 0 + + +def test_random_unlabeled_tree(): + for i in range(1, 10): + t1 = nx.random_unlabeled_tree(i, seed=42) + t2 = nx.random_unlabeled_tree(i, seed=42) + assert nx.utils.misc.graphs_equal(t1, t2) + assert nx.is_tree(t1) + assert "root" not in t1.graph + assert "roots" not in t1.graph + t = nx.random_unlabeled_tree(10, number_of_trees=10, seed=43) + random.seed(43) + s = nx.random_unlabeled_tree(10, number_of_trees=10, seed=random) + for i in range(10): + assert nx.utils.misc.graphs_equal(t[i], s[i]) + assert nx.is_tree(t[i]) + assert "root" not in t[i].graph + assert "roots" not in t[i].graph diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/tests/test_triads.py b/.venv/lib/python3.12/site-packages/networkx/generators/tests/test_triads.py new file mode 100644 index 0000000..463844b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/generators/tests/test_triads.py @@ -0,0 +1,15 @@ +"""Unit tests for the :mod:`networkx.generators.triads` module.""" + +import pytest + +from networkx import triad_graph + + +def test_triad_graph(): + G = triad_graph("030T") + assert [tuple(e) for e in ("ab", "ac", "cb")] == sorted(G.edges()) + + +def test_invalid_name(): + with pytest.raises(ValueError): + triad_graph("bogus") diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/time_series.py b/.venv/lib/python3.12/site-packages/networkx/generators/time_series.py new file mode 100644 index 0000000..592d773 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/generators/time_series.py @@ -0,0 +1,74 @@ +""" +Time Series Graphs +""" + +import itertools + +import networkx as nx + +__all__ = ["visibility_graph"] + + +@nx._dispatchable(graphs=None, returns_graph=True) +def visibility_graph(series): + """ + Return a Visibility Graph of an input Time Series. + + A visibility graph converts a time series into a graph. The constructed graph + uses integer nodes to indicate which event in the series the node represents. + Edges are formed as follows: consider a bar plot of the series and view that + as a side view of a landscape with a node at the top of each bar. An edge + means that the nodes can be connected by a straight "line-of-sight" without + being obscured by any bars between the nodes. + + The resulting graph inherits several properties of the series in its structure. + Thereby, periodic series convert into regular graphs, random series convert + into random graphs, and fractal series convert into scale-free networks [1]_. + + Parameters + ---------- + series : Sequence[Number] + A Time Series sequence (iterable and sliceable) of numeric values + representing times. + + Returns + ------- + NetworkX Graph + The Visibility Graph of the input series + + Examples + -------- + >>> series_list = [range(10), [2, 1, 3, 2, 1, 3, 2, 1, 3, 2, 1, 3]] + >>> for s in series_list: + ... g = nx.visibility_graph(s) + ... print(g) + Graph with 10 nodes and 9 edges + Graph with 12 nodes and 18 edges + + References + ---------- + .. [1] Lacasa, Lucas, Bartolo Luque, Fernando Ballesteros, Jordi Luque, and Juan Carlos Nuno. + "From time series to complex networks: The visibility graph." Proceedings of the + National Academy of Sciences 105, no. 13 (2008): 4972-4975. + https://www.pnas.org/doi/10.1073/pnas.0709247105 + """ + + # Sequential values are always connected + G = nx.path_graph(len(series)) + nx.set_node_attributes(G, dict(enumerate(series)), "value") + + # Check all combinations of nodes n series + for (n1, t1), (n2, t2) in itertools.combinations(enumerate(series), 2): + # check if any value between obstructs line of sight + slope = (t2 - t1) / (n2 - n1) + offset = t2 - slope * n2 + + obstructed = any( + t >= slope * n + offset + for n, t in enumerate(series[n1 + 1 : n2], start=n1 + 1) + ) + + if not obstructed: + G.add_edge(n1, n2) + + return G diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/trees.py b/.venv/lib/python3.12/site-packages/networkx/generators/trees.py new file mode 100644 index 0000000..a8b24fa --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/generators/trees.py @@ -0,0 +1,1070 @@ +"""Functions for generating trees. + +The functions sampling trees at random in this module come +in two variants: labeled and unlabeled. The labeled variants +sample from every possible tree with the given number of nodes +uniformly at random. The unlabeled variants sample from every +possible *isomorphism class* of trees with the given number +of nodes uniformly at random. + +To understand the difference, consider the following example. +There are two isomorphism classes of trees with four nodes. +One is that of the path graph, the other is that of the +star graph. The unlabeled variant will return a line graph or +a star graph with probability 1/2. + +The labeled variant will return the line graph +with probability 3/4 and the star graph with probability 1/4, +because there are more labeled variants of the line graph +than of the star graph. More precisely, the line graph has +an automorphism group of order 2, whereas the star graph has +an automorphism group of order 6, so the line graph has three +times as many labeled variants as the star graph, and thus +three more chances to be drawn. + +Additionally, some functions in this module can sample rooted +trees and forests uniformly at random. A rooted tree is a tree +with a designated root node. A rooted forest is a disjoint union +of rooted trees. +""" + +from collections import Counter, defaultdict +from math import comb, factorial + +import networkx as nx +from networkx.utils import py_random_state + +__all__ = [ + "prefix_tree", + "prefix_tree_recursive", + "random_labeled_tree", + "random_labeled_rooted_tree", + "random_labeled_rooted_forest", + "random_unlabeled_tree", + "random_unlabeled_rooted_tree", + "random_unlabeled_rooted_forest", +] + + +@nx._dispatchable(graphs=None, returns_graph=True) +def prefix_tree(paths): + """Creates a directed prefix tree from a list of paths. + + Usually the paths are described as strings or lists of integers. + + A "prefix tree" represents the prefix structure of the strings. + Each node represents a prefix of some string. The root represents + the empty prefix with children for the single letter prefixes which + in turn have children for each double letter prefix starting with + the single letter corresponding to the parent node, and so on. + + More generally the prefixes do not need to be strings. A prefix refers + to the start of a sequence. The root has children for each one element + prefix and they have children for each two element prefix that starts + with the one element sequence of the parent, and so on. + + Note that this implementation uses integer nodes with an attribute. + Each node has an attribute "source" whose value is the original element + of the path to which this node corresponds. For example, suppose `paths` + consists of one path: "can". Then the nodes `[1, 2, 3]` which represent + this path have "source" values "c", "a" and "n". + + All the descendants of a node have a common prefix in the sequence/path + associated with that node. From the returned tree, the prefix for each + node can be constructed by traversing the tree up to the root and + accumulating the "source" values along the way. + + The root node is always `0` and has "source" attribute `None`. + The root is the only node with in-degree zero. + The nil node is always `-1` and has "source" attribute `"NIL"`. + The nil node is the only node with out-degree zero. + + + Parameters + ---------- + paths: iterable of paths + An iterable of paths which are themselves sequences. + Matching prefixes among these sequences are identified with + nodes of the prefix tree. One leaf of the tree is associated + with each path. (Identical paths are associated with the same + leaf of the tree.) + + + Returns + ------- + tree: DiGraph + A directed graph representing an arborescence consisting of the + prefix tree generated by `paths`. Nodes are directed "downward", + from parent to child. A special "synthetic" root node is added + to be the parent of the first node in each path. A special + "synthetic" leaf node, the "nil" node `-1`, is added to be the child + of all nodes representing the last element in a path. (The + addition of this nil node technically makes this not an + arborescence but a directed acyclic graph; removing the nil node + makes it an arborescence.) + + + Notes + ----- + The prefix tree is also known as a *trie*. + + + Examples + -------- + Create a prefix tree from a list of strings with common prefixes:: + + >>> paths = ["ab", "abs", "ad"] + >>> T = nx.prefix_tree(paths) + >>> list(T.edges) + [(0, 1), (1, 2), (1, 4), (2, -1), (2, 3), (3, -1), (4, -1)] + + The leaf nodes can be obtained as predecessors of the nil node:: + + >>> root, NIL = 0, -1 + >>> list(T.predecessors(NIL)) + [2, 3, 4] + + To recover the original paths that generated the prefix tree, + traverse up the tree from the node `-1` to the node `0`:: + + >>> recovered = [] + >>> for v in T.predecessors(NIL): + ... prefix = "" + ... while v != root: + ... prefix = str(T.nodes[v]["source"]) + prefix + ... v = next(T.predecessors(v)) # only one predecessor + ... recovered.append(prefix) + >>> sorted(recovered) + ['ab', 'abs', 'ad'] + """ + + def get_children(parent, paths): + children = defaultdict(list) + # Populate dictionary with key(s) as the child/children of the root and + # value(s) as the remaining paths of the corresponding child/children + for path in paths: + # If path is empty, we add an edge to the NIL node. + if not path: + tree.add_edge(parent, NIL) + continue + child, *rest = path + # `child` may exist as the head of more than one path in `paths`. + children[child].append(rest) + return children + + # Initialize the prefix tree with a root node and a nil node. + tree = nx.DiGraph() + root = 0 + tree.add_node(root, source=None) + NIL = -1 + tree.add_node(NIL, source="NIL") + children = get_children(root, paths) + stack = [(root, iter(children.items()))] + while stack: + parent, remaining_children = stack[-1] + try: + child, remaining_paths = next(remaining_children) + # Pop item off stack if there are no remaining children + except StopIteration: + stack.pop() + continue + # We relabel each child with an unused name. + new_name = len(tree) - 1 + # The "source" node attribute stores the original node name. + tree.add_node(new_name, source=child) + tree.add_edge(parent, new_name) + children = get_children(new_name, remaining_paths) + stack.append((new_name, iter(children.items()))) + + return tree + + +@nx._dispatchable(graphs=None, returns_graph=True) +def prefix_tree_recursive(paths): + """Recursively creates a directed prefix tree from a list of paths. + + The original recursive version of prefix_tree for comparison. It is + the same algorithm but the recursion is unrolled onto a stack. + + Usually the paths are described as strings or lists of integers. + + A "prefix tree" represents the prefix structure of the strings. + Each node represents a prefix of some string. The root represents + the empty prefix with children for the single letter prefixes which + in turn have children for each double letter prefix starting with + the single letter corresponding to the parent node, and so on. + + More generally the prefixes do not need to be strings. A prefix refers + to the start of a sequence. The root has children for each one element + prefix and they have children for each two element prefix that starts + with the one element sequence of the parent, and so on. + + Note that this implementation uses integer nodes with an attribute. + Each node has an attribute "source" whose value is the original element + of the path to which this node corresponds. For example, suppose `paths` + consists of one path: "can". Then the nodes `[1, 2, 3]` which represent + this path have "source" values "c", "a" and "n". + + All the descendants of a node have a common prefix in the sequence/path + associated with that node. From the returned tree, ehe prefix for each + node can be constructed by traversing the tree up to the root and + accumulating the "source" values along the way. + + The root node is always `0` and has "source" attribute `None`. + The root is the only node with in-degree zero. + The nil node is always `-1` and has "source" attribute `"NIL"`. + The nil node is the only node with out-degree zero. + + + Parameters + ---------- + paths: iterable of paths + An iterable of paths which are themselves sequences. + Matching prefixes among these sequences are identified with + nodes of the prefix tree. One leaf of the tree is associated + with each path. (Identical paths are associated with the same + leaf of the tree.) + + + Returns + ------- + tree: DiGraph + A directed graph representing an arborescence consisting of the + prefix tree generated by `paths`. Nodes are directed "downward", + from parent to child. A special "synthetic" root node is added + to be the parent of the first node in each path. A special + "synthetic" leaf node, the "nil" node `-1`, is added to be the child + of all nodes representing the last element in a path. (The + addition of this nil node technically makes this not an + arborescence but a directed acyclic graph; removing the nil node + makes it an arborescence.) + + + Notes + ----- + The prefix tree is also known as a *trie*. + + + Examples + -------- + Create a prefix tree from a list of strings with common prefixes:: + + >>> paths = ["ab", "abs", "ad"] + >>> T = nx.prefix_tree(paths) + >>> list(T.edges) + [(0, 1), (1, 2), (1, 4), (2, -1), (2, 3), (3, -1), (4, -1)] + + The leaf nodes can be obtained as predecessors of the nil node. + + >>> root, NIL = 0, -1 + >>> list(T.predecessors(NIL)) + [2, 3, 4] + + To recover the original paths that generated the prefix tree, + traverse up the tree from the node `-1` to the node `0`:: + + >>> recovered = [] + >>> for v in T.predecessors(NIL): + ... prefix = "" + ... while v != root: + ... prefix = str(T.nodes[v]["source"]) + prefix + ... v = next(T.predecessors(v)) # only one predecessor + ... recovered.append(prefix) + >>> sorted(recovered) + ['ab', 'abs', 'ad'] + """ + + def _helper(paths, root, tree): + """Recursively create a trie from the given list of paths. + + `paths` is a list of paths, each of which is itself a list of + nodes, relative to the given `root` (but not including it). This + list of paths will be interpreted as a tree-like structure, in + which two paths that share a prefix represent two branches of + the tree with the same initial segment. + + `root` is the parent of the node at index 0 in each path. + + `tree` is the "accumulator", the :class:`networkx.DiGraph` + representing the branching to which the new nodes and edges will + be added. + + """ + # For each path, remove the first node and make it a child of root. + # Any remaining paths then get processed recursively. + children = defaultdict(list) + for path in paths: + # If path is empty, we add an edge to the NIL node. + if not path: + tree.add_edge(root, NIL) + continue + child, *rest = path + # `child` may exist as the head of more than one path in `paths`. + children[child].append(rest) + # Add a node for each child, connect root, recurse to remaining paths + for child, remaining_paths in children.items(): + # We relabel each child with an unused name. + new_name = len(tree) - 1 + # The "source" node attribute stores the original node name. + tree.add_node(new_name, source=child) + tree.add_edge(root, new_name) + _helper(remaining_paths, new_name, tree) + + # Initialize the prefix tree with a root node and a nil node. + tree = nx.DiGraph() + root = 0 + tree.add_node(root, source=None) + NIL = -1 + tree.add_node(NIL, source="NIL") + # Populate the tree. + _helper(paths, root, tree) + return tree + + +@py_random_state("seed") +@nx._dispatchable(graphs=None, returns_graph=True) +def random_labeled_tree(n, *, seed=None): + """Returns a labeled tree on `n` nodes chosen uniformly at random. + + Generating uniformly distributed random Prüfer sequences and + converting them into the corresponding trees is a straightforward + method of generating uniformly distributed random labeled trees. + This function implements this method. + + Parameters + ---------- + n : int + The number of nodes, greater than zero. + seed : random_state + Indicator of random number generation state. + See :ref:`Randomness` + + Returns + ------- + :class:`networkx.Graph` + A `networkx.Graph` with nodes in the set {0, …, *n* - 1}. + + Raises + ------ + NetworkXPointlessConcept + If `n` is zero (because the null graph is not a tree). + + Examples + -------- + >>> G = nx.random_labeled_tree(5, seed=42) + >>> nx.is_tree(G) + True + >>> G.edges + EdgeView([(0, 1), (0, 3), (0, 2), (2, 4)]) + + A tree with *arbitrarily directed* edges can be created by assigning + generated edges to a ``DiGraph``: + + >>> DG = nx.DiGraph() + >>> DG.add_edges_from(G.edges) + >>> nx.is_tree(DG) + True + >>> DG.edges + OutEdgeView([(0, 1), (0, 3), (0, 2), (2, 4)]) + """ + # Cannot create a Prüfer sequence unless `n` is at least two. + if n == 0: + raise nx.NetworkXPointlessConcept("the null graph is not a tree") + if n == 1: + return nx.empty_graph(1) + return nx.from_prufer_sequence([seed.choice(range(n)) for i in range(n - 2)]) + + +@py_random_state("seed") +@nx._dispatchable(graphs=None, returns_graph=True) +def random_labeled_rooted_tree(n, *, seed=None): + """Returns a labeled rooted tree with `n` nodes. + + The returned tree is chosen uniformly at random from all labeled rooted trees. + + Parameters + ---------- + n : int + The number of nodes + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + :class:`networkx.Graph` + A `networkx.Graph` with integer nodes 0 <= node <= `n` - 1. + The root of the tree is selected uniformly from the nodes. + The "root" graph attribute identifies the root of the tree. + + Notes + ----- + This function returns the result of :func:`random_labeled_tree` + with a randomly selected root. + + Raises + ------ + NetworkXPointlessConcept + If `n` is zero (because the null graph is not a tree). + """ + t = random_labeled_tree(n, seed=seed) + t.graph["root"] = seed.randint(0, n - 1) + return t + + +@py_random_state("seed") +@nx._dispatchable(graphs=None, returns_graph=True) +def random_labeled_rooted_forest(n, *, seed=None): + """Returns a labeled rooted forest with `n` nodes. + + The returned forest is chosen uniformly at random using a + generalization of Prüfer sequences [1]_ in the form described in [2]_. + + Parameters + ---------- + n : int + The number of nodes. + seed : random_state + See :ref:`Randomness`. + + Returns + ------- + :class:`networkx.Graph` + A `networkx.Graph` with integer nodes 0 <= node <= `n` - 1. + The "roots" graph attribute is a set of integers containing the roots. + + References + ---------- + .. [1] Knuth, Donald E. "Another Enumeration of Trees." + Canadian Journal of Mathematics, 20 (1968): 1077-1086. + https://doi.org/10.4153/CJM-1968-104-8 + .. [2] Rubey, Martin. "Counting Spanning Trees". Diplomarbeit + zur Erlangung des akademischen Grades Magister der + Naturwissenschaften an der Formal- und Naturwissenschaftlichen + Fakultät der Universität Wien. Wien, May 2000. + """ + + # Select the number of roots by iterating over the cumulative count of trees + # with at most k roots + def _select_k(n, seed): + r = seed.randint(0, (n + 1) ** (n - 1) - 1) + cum_sum = 0 + for k in range(1, n): + cum_sum += (factorial(n - 1) * n ** (n - k)) // ( + factorial(k - 1) * factorial(n - k) + ) + if r < cum_sum: + return k + + return n + + F = nx.empty_graph(n) + if n == 0: + F.graph["roots"] = {} + return F + # Select the number of roots k + k = _select_k(n, seed) + if k == n: + F.graph["roots"] = set(range(n)) + return F # Nothing to do + # Select the roots + roots = seed.sample(range(n), k) + # Nonroots + p = set(range(n)).difference(roots) + # Coding sequence + N = [seed.randint(0, n - 1) for i in range(n - k - 1)] + # Multiset of elements in N also in p + degree = Counter([x for x in N if x in p]) + # Iterator over the elements of p with degree zero + iterator = iter(x for x in p if degree[x] == 0) + u = last = next(iterator) + # This loop is identical to that for Prüfer sequences, + # except that we can draw nodes only from p + for v in N: + F.add_edge(u, v) + degree[v] -= 1 + if v < last and degree[v] == 0: + u = v + else: + last = u = next(iterator) + + F.add_edge(u, roots[0]) + F.graph["roots"] = set(roots) + return F + + +# The following functions support generation of unlabeled trees and forests. + + +def _to_nx(edges, n_nodes, root=None, roots=None): + """ + Converts the (edges, n_nodes) input to a :class:`networkx.Graph`. + The (edges, n_nodes) input is a list of even length, where each pair + of consecutive integers represents an edge, and an integer `n_nodes`. + Integers in the list are elements of `range(n_nodes)`. + + Parameters + ---------- + edges : list of ints + The flattened list of edges of the graph. + n_nodes : int + The number of nodes of the graph. + root: int (default=None) + If not None, the "root" attribute of the graph will be set to this value. + roots: collection of ints (default=None) + If not None, he "roots" attribute of the graph will be set to this value. + + Returns + ------- + :class:`networkx.Graph` + The graph with `n_nodes` nodes and edges given by `edges`. + """ + G = nx.empty_graph(n_nodes) + G.add_edges_from(edges) + if root is not None: + G.graph["root"] = root + if roots is not None: + G.graph["roots"] = roots + return G + + +def _num_rooted_trees(n, cache_trees): + """Returns the number of unlabeled rooted trees with `n` nodes. + + See also https://oeis.org/A000081. + + Parameters + ---------- + n : int + The number of nodes + cache_trees : list of ints + The $i$-th element is the number of unlabeled rooted trees with $i$ nodes, + which is used as a cache (and is extended to length $n+1$ if needed) + + Returns + ------- + int + The number of unlabeled rooted trees with `n` nodes. + """ + for n_i in range(len(cache_trees), n + 1): + cache_trees.append( + sum( + [ + d * cache_trees[n_i - j * d] * cache_trees[d] + for d in range(1, n_i) + for j in range(1, (n_i - 1) // d + 1) + ] + ) + // (n_i - 1) + ) + return cache_trees[n] + + +def _select_jd_trees(n, cache_trees, seed): + """Returns a pair $(j,d)$ with a specific probability + + Given $n$, returns a pair of positive integers $(j,d)$ with the probability + specified in formula (5) of Chapter 29 of [1]_. + + Parameters + ---------- + n : int + The number of nodes + cache_trees : list of ints + Cache for :func:`_num_rooted_trees`. + seed : random_state + See :ref:`Randomness`. + + Returns + ------- + (int, int) + A pair of positive integers $(j,d)$ satisfying formula (5) of + Chapter 29 of [1]_. + + References + ---------- + .. [1] Nijenhuis, Albert, and Wilf, Herbert S. + "Combinatorial algorithms: for computers and calculators." + Academic Press, 1978. + https://doi.org/10.1016/C2013-0-11243-3 + """ + p = seed.randint(0, _num_rooted_trees(n, cache_trees) * (n - 1) - 1) + cumsum = 0 + for d in range(n - 1, 0, -1): + for j in range(1, (n - 1) // d + 1): + cumsum += ( + d + * _num_rooted_trees(n - j * d, cache_trees) + * _num_rooted_trees(d, cache_trees) + ) + if p < cumsum: + return (j, d) + + +def _random_unlabeled_rooted_tree(n, cache_trees, seed): + """Returns an unlabeled rooted tree with `n` nodes. + + Returns an unlabeled rooted tree with `n` nodes chosen uniformly + at random using the "RANRUT" algorithm from [1]_. + The tree is returned in the form: (list_of_edges, number_of_nodes) + + Parameters + ---------- + n : int + The number of nodes, greater than zero. + cache_trees : list ints + Cache for :func:`_num_rooted_trees`. + seed : random_state + See :ref:`Randomness`. + + Returns + ------- + (list_of_edges, number_of_nodes) : list, int + A random unlabeled rooted tree with `n` nodes as a 2-tuple + ``(list_of_edges, number_of_nodes)``. + The root is node 0. + + References + ---------- + .. [1] Nijenhuis, Albert, and Wilf, Herbert S. + "Combinatorial algorithms: for computers and calculators." + Academic Press, 1978. + https://doi.org/10.1016/C2013-0-11243-3 + """ + if n == 1: + edges, n_nodes = [], 1 + return edges, n_nodes + if n == 2: + edges, n_nodes = [(0, 1)], 2 + return edges, n_nodes + + j, d = _select_jd_trees(n, cache_trees, seed) + t1, t1_nodes = _random_unlabeled_rooted_tree(n - j * d, cache_trees, seed) + t2, t2_nodes = _random_unlabeled_rooted_tree(d, cache_trees, seed) + t12 = [(0, t2_nodes * i + t1_nodes) for i in range(j)] + t1.extend(t12) + for _ in range(j): + t1.extend((n1 + t1_nodes, n2 + t1_nodes) for n1, n2 in t2) + t1_nodes += t2_nodes + + return t1, t1_nodes + + +@py_random_state("seed") +@nx._dispatchable(graphs=None, returns_graph=True) +def random_unlabeled_rooted_tree(n, *, number_of_trees=None, seed=None): + """Returns a number of unlabeled rooted trees uniformly at random + + Returns one or more (depending on `number_of_trees`) + unlabeled rooted trees with `n` nodes drawn uniformly + at random. + + Parameters + ---------- + n : int + The number of nodes + number_of_trees : int or None (default) + If not None, this number of trees is generated and returned. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + :class:`networkx.Graph` or list of :class:`networkx.Graph` + A single `networkx.Graph` (or a list thereof, if `number_of_trees` + is specified) with nodes in the set {0, …, *n* - 1}. + The "root" graph attribute identifies the root of the tree. + + Notes + ----- + The trees are generated using the "RANRUT" algorithm from [1]_. + The algorithm needs to compute some counting functions + that are relatively expensive: in case several trees are needed, + it is advisable to use the `number_of_trees` optional argument + to reuse the counting functions. + + Raises + ------ + NetworkXPointlessConcept + If `n` is zero (because the null graph is not a tree). + + References + ---------- + .. [1] Nijenhuis, Albert, and Wilf, Herbert S. + "Combinatorial algorithms: for computers and calculators." + Academic Press, 1978. + https://doi.org/10.1016/C2013-0-11243-3 + """ + if n == 0: + raise nx.NetworkXPointlessConcept("the null graph is not a tree") + cache_trees = [0, 1] # initial cache of number of rooted trees + if number_of_trees is None: + return _to_nx(*_random_unlabeled_rooted_tree(n, cache_trees, seed), root=0) + return [ + _to_nx(*_random_unlabeled_rooted_tree(n, cache_trees, seed), root=0) + for i in range(number_of_trees) + ] + + +def _num_rooted_forests(n, q, cache_forests): + """Returns the number of unlabeled rooted forests with `n` nodes, and with + no more than `q` nodes per tree. A recursive formula for this is (2) in + [1]_. This function is implemented using dynamic programming instead of + recursion. + + Parameters + ---------- + n : int + The number of nodes. + q : int + The maximum number of nodes for each tree of the forest. + cache_forests : list of ints + The $i$-th element is the number of unlabeled rooted forests with + $i$ nodes, and with no more than `q` nodes per tree; this is used + as a cache (and is extended to length `n` + 1 if needed). + + Returns + ------- + int + The number of unlabeled rooted forests with `n` nodes with no more than + `q` nodes per tree. + + References + ---------- + .. [1] Wilf, Herbert S. "The uniform selection of free trees." + Journal of Algorithms 2.2 (1981): 204-207. + https://doi.org/10.1016/0196-6774(81)90021-3 + """ + for n_i in range(len(cache_forests), n + 1): + q_i = min(n_i, q) + cache_forests.append( + sum( + [ + d * cache_forests[n_i - j * d] * cache_forests[d - 1] + for d in range(1, q_i + 1) + for j in range(1, n_i // d + 1) + ] + ) + // n_i + ) + + return cache_forests[n] + + +def _select_jd_forests(n, q, cache_forests, seed): + """Given `n` and `q`, returns a pair of positive integers $(j,d)$ + such that $j\\leq d$, with probability satisfying (F1) of [1]_. + + Parameters + ---------- + n : int + The number of nodes. + q : int + The maximum number of nodes for each tree of the forest. + cache_forests : list of ints + Cache for :func:`_num_rooted_forests`. + seed : random_state + See :ref:`Randomness`. + + Returns + ------- + (int, int) + A pair of positive integers $(j,d)$ + + References + ---------- + .. [1] Wilf, Herbert S. "The uniform selection of free trees." + Journal of Algorithms 2.2 (1981): 204-207. + https://doi.org/10.1016/0196-6774(81)90021-3 + """ + p = seed.randint(0, _num_rooted_forests(n, q, cache_forests) * n - 1) + cumsum = 0 + for d in range(q, 0, -1): + for j in range(1, n // d + 1): + cumsum += ( + d + * _num_rooted_forests(n - j * d, q, cache_forests) + * _num_rooted_forests(d - 1, q, cache_forests) + ) + if p < cumsum: + return (j, d) + + +def _random_unlabeled_rooted_forest(n, q, cache_trees, cache_forests, seed): + """Returns an unlabeled rooted forest with `n` nodes, and with no more + than `q` nodes per tree, drawn uniformly at random. It is an implementation + of the algorithm "Forest" of [1]_. + + Parameters + ---------- + n : int + The number of nodes. + q : int + The maximum number of nodes per tree. + cache_trees : + Cache for :func:`_num_rooted_trees`. + cache_forests : + Cache for :func:`_num_rooted_forests`. + seed : random_state + See :ref:`Randomness`. + + Returns + ------- + (edges, n, r) : (list, int, list) + The forest (edges, n) and a list r of root nodes. + + References + ---------- + .. [1] Wilf, Herbert S. "The uniform selection of free trees." + Journal of Algorithms 2.2 (1981): 204-207. + https://doi.org/10.1016/0196-6774(81)90021-3 + """ + if n == 0: + return ([], 0, []) + + j, d = _select_jd_forests(n, q, cache_forests, seed) + t1, t1_nodes, r1 = _random_unlabeled_rooted_forest( + n - j * d, q, cache_trees, cache_forests, seed + ) + t2, t2_nodes = _random_unlabeled_rooted_tree(d, cache_trees, seed) + for _ in range(j): + r1.append(t1_nodes) + t1.extend((n1 + t1_nodes, n2 + t1_nodes) for n1, n2 in t2) + t1_nodes += t2_nodes + return t1, t1_nodes, r1 + + +@py_random_state("seed") +@nx._dispatchable(graphs=None, returns_graph=True) +def random_unlabeled_rooted_forest(n, *, q=None, number_of_forests=None, seed=None): + """Returns a forest or list of forests selected at random. + + Returns one or more (depending on `number_of_forests`) + unlabeled rooted forests with `n` nodes, and with no more than + `q` nodes per tree, drawn uniformly at random. + The "roots" graph attribute identifies the roots of the forest. + + Parameters + ---------- + n : int + The number of nodes + q : int or None (default) + The maximum number of nodes per tree. + number_of_forests : int or None (default) + If not None, this number of forests is generated and returned. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + :class:`networkx.Graph` or list of :class:`networkx.Graph` + A single `networkx.Graph` (or a list thereof, if `number_of_forests` + is specified) with nodes in the set {0, …, *n* - 1}. + The "roots" graph attribute is a set containing the roots + of the trees in the forest. + + Notes + ----- + This function implements the algorithm "Forest" of [1]_. + The algorithm needs to compute some counting functions + that are relatively expensive: in case several trees are needed, + it is advisable to use the `number_of_forests` optional argument + to reuse the counting functions. + + Raises + ------ + ValueError + If `n` is non-zero but `q` is zero. + + References + ---------- + .. [1] Wilf, Herbert S. "The uniform selection of free trees." + Journal of Algorithms 2.2 (1981): 204-207. + https://doi.org/10.1016/0196-6774(81)90021-3 + """ + if q is None: + q = n + if q == 0 and n != 0: + raise ValueError("q must be a positive integer if n is positive.") + + cache_trees = [0, 1] # initial cache of number of rooted trees + cache_forests = [1] # initial cache of number of rooted forests + + if number_of_forests is None: + g, nodes, rs = _random_unlabeled_rooted_forest( + n, q, cache_trees, cache_forests, seed + ) + return _to_nx(g, nodes, roots=set(rs)) + + res = [] + for i in range(number_of_forests): + g, nodes, rs = _random_unlabeled_rooted_forest( + n, q, cache_trees, cache_forests, seed + ) + res.append(_to_nx(g, nodes, roots=set(rs))) + return res + + +def _num_trees(n, cache_trees): + """Returns the number of unlabeled trees with `n` nodes. + + See also https://oeis.org/A000055. + + Parameters + ---------- + n : int + The number of nodes. + cache_trees : list of ints + Cache for :func:`_num_rooted_trees`. + + Returns + ------- + int + The number of unlabeled trees with `n` nodes. + """ + r = _num_rooted_trees(n, cache_trees) - sum( + [ + _num_rooted_trees(j, cache_trees) * _num_rooted_trees(n - j, cache_trees) + for j in range(1, n // 2 + 1) + ] + ) + if n % 2 == 0: + r += comb(_num_rooted_trees(n // 2, cache_trees) + 1, 2) + return r + + +def _bicenter(n, cache, seed): + """Returns a bi-centroidal tree on `n` nodes drawn uniformly at random. + + This function implements the algorithm Bicenter of [1]_. + + Parameters + ---------- + n : int + The number of nodes (must be even). + cache : list of ints. + Cache for :func:`_num_rooted_trees`. + seed : random_state + See :ref:`Randomness` + + Returns + ------- + (edges, n) + The tree as a list of edges and number of nodes. + + References + ---------- + .. [1] Wilf, Herbert S. "The uniform selection of free trees." + Journal of Algorithms 2.2 (1981): 204-207. + https://doi.org/10.1016/0196-6774(81)90021-3 + """ + t, t_nodes = _random_unlabeled_rooted_tree(n // 2, cache, seed) + if seed.randint(0, _num_rooted_trees(n // 2, cache)) == 0: + t2, t2_nodes = t, t_nodes + else: + t2, t2_nodes = _random_unlabeled_rooted_tree(n // 2, cache, seed) + t.extend([(n1 + (n // 2), n2 + (n // 2)) for n1, n2 in t2]) + t.append((0, n // 2)) + return t, t_nodes + t2_nodes + + +def _random_unlabeled_tree(n, cache_trees, cache_forests, seed): + """Returns a tree on `n` nodes drawn uniformly at random. + It implements the Wilf's algorithm "Free" of [1]_. + + Parameters + ---------- + n : int + The number of nodes, greater than zero. + cache_trees : list of ints + Cache for :func:`_num_rooted_trees`. + cache_forests : list of ints + Cache for :func:`_num_rooted_forests`. + seed : random_state + Indicator of random number generation state. + See :ref:`Randomness` + + Returns + ------- + (edges, n) + The tree as a list of edges and number of nodes. + + References + ---------- + .. [1] Wilf, Herbert S. "The uniform selection of free trees." + Journal of Algorithms 2.2 (1981): 204-207. + https://doi.org/10.1016/0196-6774(81)90021-3 + """ + if n % 2 == 1: + p = 0 + else: + p = comb(_num_rooted_trees(n // 2, cache_trees) + 1, 2) + if seed.randint(0, _num_trees(n, cache_trees) - 1) < p: + return _bicenter(n, cache_trees, seed) + else: + f, n_f, r = _random_unlabeled_rooted_forest( + n - 1, (n - 1) // 2, cache_trees, cache_forests, seed + ) + for i in r: + f.append((i, n_f)) + return f, n_f + 1 + + +@py_random_state("seed") +@nx._dispatchable(graphs=None, returns_graph=True) +def random_unlabeled_tree(n, *, number_of_trees=None, seed=None): + """Returns a tree or list of trees chosen randomly. + + Returns one or more (depending on `number_of_trees`) + unlabeled trees with `n` nodes drawn uniformly at random. + + Parameters + ---------- + n : int + The number of nodes + number_of_trees : int or None (default) + If not None, this number of trees is generated and returned. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + :class:`networkx.Graph` or list of :class:`networkx.Graph` + A single `networkx.Graph` (or a list thereof, if + `number_of_trees` is specified) with nodes in the set {0, …, *n* - 1}. + + Raises + ------ + NetworkXPointlessConcept + If `n` is zero (because the null graph is not a tree). + + Notes + ----- + This function generates an unlabeled tree uniformly at random using + Wilf's algorithm "Free" of [1]_. The algorithm needs to + compute some counting functions that are relatively expensive: + in case several trees are needed, it is advisable to use the + `number_of_trees` optional argument to reuse the counting + functions. + + References + ---------- + .. [1] Wilf, Herbert S. "The uniform selection of free trees." + Journal of Algorithms 2.2 (1981): 204-207. + https://doi.org/10.1016/0196-6774(81)90021-3 + """ + if n == 0: + raise nx.NetworkXPointlessConcept("the null graph is not a tree") + + cache_trees = [0, 1] # initial cache of number of rooted trees + cache_forests = [1] # initial cache of number of rooted forests + if number_of_trees is None: + return _to_nx(*_random_unlabeled_tree(n, cache_trees, cache_forests, seed)) + else: + return [ + _to_nx(*_random_unlabeled_tree(n, cache_trees, cache_forests, seed)) + for i in range(number_of_trees) + ] diff --git a/.venv/lib/python3.12/site-packages/networkx/generators/triads.py b/.venv/lib/python3.12/site-packages/networkx/generators/triads.py new file mode 100644 index 0000000..09b722d --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/generators/triads.py @@ -0,0 +1,94 @@ +# See https://github.com/networkx/networkx/pull/1474 +# Copyright 2011 Reya Group +# Copyright 2011 Alex Levenson +# Copyright 2011 Diederik van Liere +"""Functions that generate the triad graphs, that is, the possible +digraphs on three nodes. + +""" + +import networkx as nx +from networkx.classes import DiGraph + +__all__ = ["triad_graph"] + +#: Dictionary mapping triad name to list of directed edges in the +#: digraph representation of that triad (with nodes 'a', 'b', and 'c'). +TRIAD_EDGES = { + "003": [], + "012": ["ab"], + "102": ["ab", "ba"], + "021D": ["ba", "bc"], + "021U": ["ab", "cb"], + "021C": ["ab", "bc"], + "111D": ["ac", "ca", "bc"], + "111U": ["ac", "ca", "cb"], + "030T": ["ab", "cb", "ac"], + "030C": ["ba", "cb", "ac"], + "201": ["ab", "ba", "ac", "ca"], + "120D": ["bc", "ba", "ac", "ca"], + "120U": ["ab", "cb", "ac", "ca"], + "120C": ["ab", "bc", "ac", "ca"], + "210": ["ab", "bc", "cb", "ac", "ca"], + "300": ["ab", "ba", "bc", "cb", "ac", "ca"], +} + + +@nx._dispatchable(graphs=None, returns_graph=True) +def triad_graph(triad_name): + """Returns the triad graph with the given name. + + Each string in the following tuple is a valid triad name:: + + ( + "003", + "012", + "102", + "021D", + "021U", + "021C", + "111D", + "111U", + "030T", + "030C", + "201", + "120D", + "120U", + "120C", + "210", + "300", + ) + + Each triad name corresponds to one of the possible valid digraph on + three nodes. + + Parameters + ---------- + triad_name : string + The name of a triad, as described above. + + Returns + ------- + :class:`~networkx.DiGraph` + The digraph on three nodes with the given name. The nodes of the + graph are the single-character strings 'a', 'b', and 'c'. + + Raises + ------ + ValueError + If `triad_name` is not the name of a triad. + + See also + -------- + triadic_census + + """ + if triad_name not in TRIAD_EDGES: + raise ValueError( + f'unknown triad name "{triad_name}"; use one of the triad names' + " in the TRIAD_NAMES constant" + ) + G = DiGraph() + G.add_nodes_from("abc") + G.add_edges_from(TRIAD_EDGES[triad_name]) + return G diff --git a/.venv/lib/python3.12/site-packages/networkx/lazy_imports.py b/.venv/lib/python3.12/site-packages/networkx/lazy_imports.py new file mode 100644 index 0000000..c5c05ca --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/lazy_imports.py @@ -0,0 +1,188 @@ +import importlib +import importlib.util +import inspect +import os +import sys +import types + +__all__ = ["attach", "_lazy_import"] + + +def attach(module_name, submodules=None, submod_attrs=None): + """Attach lazily loaded submodules, and functions or other attributes. + + Typically, modules import submodules and attributes as follows:: + + import mysubmodule + import anothersubmodule + + from .foo import someattr + + The idea of this function is to replace the `__init__.py` + module's `__getattr__`, `__dir__`, and `__all__` attributes such that + all imports work exactly the way they normally would, except that the + actual import is delayed until the resulting module object is first used. + + The typical way to call this function, replacing the above imports, is:: + + __getattr__, __lazy_dir__, __all__ = lazy.attach( + __name__, ["mysubmodule", "anothersubmodule"], {"foo": "someattr"} + ) + + This functionality requires Python 3.7 or higher. + + Parameters + ---------- + module_name : str + Typically use __name__. + submodules : set + List of submodules to lazily import. + submod_attrs : dict + Dictionary of submodule -> list of attributes / functions. + These attributes are imported as they are used. + + Returns + ------- + __getattr__, __dir__, __all__ + + """ + if submod_attrs is None: + submod_attrs = {} + + if submodules is None: + submodules = set() + else: + submodules = set(submodules) + + attr_to_modules = { + attr: mod for mod, attrs in submod_attrs.items() for attr in attrs + } + + __all__ = list(submodules | attr_to_modules.keys()) + + def __getattr__(name): + if name in submodules: + return importlib.import_module(f"{module_name}.{name}") + elif name in attr_to_modules: + submod = importlib.import_module(f"{module_name}.{attr_to_modules[name]}") + return getattr(submod, name) + else: + raise AttributeError(f"No {module_name} attribute {name}") + + def __dir__(): + return __all__ + + if os.environ.get("EAGER_IMPORT", ""): + for attr in set(attr_to_modules.keys()) | submodules: + __getattr__(attr) + + return __getattr__, __dir__, list(__all__) + + +class DelayedImportErrorModule(types.ModuleType): + def __init__(self, frame_data, *args, **kwargs): + self.__frame_data = frame_data + super().__init__(*args, **kwargs) + + def __getattr__(self, x): + if x in ("__class__", "__file__", "__frame_data"): + super().__getattr__(x) + else: + fd = self.__frame_data + raise ModuleNotFoundError( + f"No module named '{fd['spec']}'\n\n" + "This error is lazily reported, having originally occurred in\n" + f" File {fd['filename']}, line {fd['lineno']}, in {fd['function']}\n\n" + f"----> {''.join(fd['code_context'] or '').strip()}" + ) + + +def _lazy_import(fullname): + """Return a lazily imported proxy for a module or library. + + Warning + ------- + Importing using this function can currently cause trouble + when the user tries to import from a subpackage of a module before + the package is fully imported. In particular, this idiom may not work: + + np = lazy_import("numpy") + from numpy.lib import recfunctions + + This is due to a difference in the way Python's LazyLoader handles + subpackage imports compared to the normal import process. Hopefully + we will get Python's LazyLoader to fix this, or find a workaround. + In the meantime, this is a potential problem. + + The workaround is to import numpy before importing from the subpackage. + + Notes + ----- + We often see the following pattern:: + + def myfunc(): + import scipy as sp + sp.argmin(...) + .... + + This is to prevent a library, in this case `scipy`, from being + imported at function definition time, since that can be slow. + + This function provides a proxy module that, upon access, imports + the actual module. So the idiom equivalent to the above example is:: + + sp = lazy.load("scipy") + + def myfunc(): + sp.argmin(...) + .... + + The initial import time is fast because the actual import is delayed + until the first attribute is requested. The overall import time may + decrease as well for users that don't make use of large portions + of the library. + + Parameters + ---------- + fullname : str + The full name of the package or subpackage to import. For example:: + + sp = lazy.load("scipy") # import scipy as sp + spla = lazy.load("scipy.linalg") # import scipy.linalg as spla + + Returns + ------- + pm : importlib.util._LazyModule + Proxy module. Can be used like any regularly imported module. + Actual loading of the module occurs upon first attribute request. + + """ + try: + return sys.modules[fullname] + except: + pass + + # Not previously loaded -- look it up + spec = importlib.util.find_spec(fullname) + + if spec is None: + try: + parent = inspect.stack()[1] + frame_data = { + "spec": fullname, + "filename": parent.filename, + "lineno": parent.lineno, + "function": parent.function, + "code_context": parent.code_context, + } + return DelayedImportErrorModule(frame_data, "DelayedImportErrorModule") + finally: + del parent + + module = importlib.util.module_from_spec(spec) + sys.modules[fullname] = module + + loader = importlib.util.LazyLoader(spec.loader) + loader.exec_module(module) + + return module diff --git a/.venv/lib/python3.12/site-packages/networkx/linalg/__init__.py b/.venv/lib/python3.12/site-packages/networkx/linalg/__init__.py new file mode 100644 index 0000000..119db18 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/linalg/__init__.py @@ -0,0 +1,13 @@ +from networkx.linalg.attrmatrix import * +from networkx.linalg import attrmatrix +from networkx.linalg.spectrum import * +from networkx.linalg import spectrum +from networkx.linalg.graphmatrix import * +from networkx.linalg import graphmatrix +from networkx.linalg.laplacianmatrix import * +from networkx.linalg import laplacianmatrix +from networkx.linalg.algebraicconnectivity import * +from networkx.linalg.modularitymatrix import * +from networkx.linalg import modularitymatrix +from networkx.linalg.bethehessianmatrix import * +from networkx.linalg import bethehessianmatrix diff --git a/.venv/lib/python3.12/site-packages/networkx/linalg/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/linalg/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..d6b03e8 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/linalg/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/linalg/__pycache__/algebraicconnectivity.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/linalg/__pycache__/algebraicconnectivity.cpython-312.pyc new file mode 100644 index 0000000..37cf926 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/linalg/__pycache__/algebraicconnectivity.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/linalg/__pycache__/attrmatrix.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/linalg/__pycache__/attrmatrix.cpython-312.pyc new file mode 100644 index 0000000..f5bb4a7 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/linalg/__pycache__/attrmatrix.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/linalg/__pycache__/bethehessianmatrix.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/linalg/__pycache__/bethehessianmatrix.cpython-312.pyc new file mode 100644 index 0000000..b01abe0 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/linalg/__pycache__/bethehessianmatrix.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/linalg/__pycache__/graphmatrix.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/linalg/__pycache__/graphmatrix.cpython-312.pyc new file mode 100644 index 0000000..8560886 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/linalg/__pycache__/graphmatrix.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/linalg/__pycache__/laplacianmatrix.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/linalg/__pycache__/laplacianmatrix.cpython-312.pyc new file mode 100644 index 0000000..f45ac52 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/linalg/__pycache__/laplacianmatrix.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/linalg/__pycache__/modularitymatrix.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/linalg/__pycache__/modularitymatrix.cpython-312.pyc new file mode 100644 index 0000000..9111326 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/linalg/__pycache__/modularitymatrix.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/linalg/__pycache__/spectrum.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/linalg/__pycache__/spectrum.cpython-312.pyc new file mode 100644 index 0000000..7e20ec7 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/linalg/__pycache__/spectrum.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/linalg/algebraicconnectivity.py b/.venv/lib/python3.12/site-packages/networkx/linalg/algebraicconnectivity.py new file mode 100644 index 0000000..55073c2 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/linalg/algebraicconnectivity.py @@ -0,0 +1,655 @@ +""" +Algebraic connectivity and Fiedler vectors of undirected graphs. +""" + +import networkx as nx +from networkx.utils import ( + not_implemented_for, + np_random_state, + reverse_cuthill_mckee_ordering, +) + +__all__ = [ + "algebraic_connectivity", + "fiedler_vector", + "spectral_ordering", + "spectral_bisection", +] + + +class _PCGSolver: + """Preconditioned conjugate gradient method. + + To solve Ax = b: + M = A.diagonal() # or some other preconditioner + solver = _PCGSolver(lambda x: A * x, lambda x: M * x) + x = solver.solve(b) + + The inputs A and M are functions which compute + matrix multiplication on the argument. + A - multiply by the matrix A in Ax=b + M - multiply by M, the preconditioner surrogate for A + + Warning: There is no limit on number of iterations. + """ + + def __init__(self, A, M): + self._A = A + self._M = M + + def solve(self, B, tol): + import numpy as np + + # Densifying step - can this be kept sparse? + B = np.asarray(B) + X = np.ndarray(B.shape, order="F") + for j in range(B.shape[1]): + X[:, j] = self._solve(B[:, j], tol) + return X + + def _solve(self, b, tol): + import numpy as np + import scipy as sp + + A = self._A + M = self._M + tol *= sp.linalg.blas.dasum(b) + # Initialize. + x = np.zeros(b.shape) + r = b.copy() + z = M(r) + rz = sp.linalg.blas.ddot(r, z) + p = z.copy() + # Iterate. + while True: + Ap = A(p) + alpha = rz / sp.linalg.blas.ddot(p, Ap) + x = sp.linalg.blas.daxpy(p, x, a=alpha) + r = sp.linalg.blas.daxpy(Ap, r, a=-alpha) + if sp.linalg.blas.dasum(r) < tol: + return x + z = M(r) + beta = sp.linalg.blas.ddot(r, z) + beta, rz = beta / rz, beta + p = sp.linalg.blas.daxpy(p, z, a=beta) + + +class _LUSolver: + """LU factorization. + + To solve Ax = b: + solver = _LUSolver(A) + x = solver.solve(b) + + optional argument `tol` on solve method is ignored but included + to match _PCGsolver API. + """ + + def __init__(self, A): + import scipy as sp + + self._LU = sp.sparse.linalg.splu( + A, + permc_spec="MMD_AT_PLUS_A", + diag_pivot_thresh=0.0, + options={"Equil": True, "SymmetricMode": True}, + ) + + def solve(self, B, tol=None): + import numpy as np + + B = np.asarray(B) + X = np.ndarray(B.shape, order="F") + for j in range(B.shape[1]): + X[:, j] = self._LU.solve(B[:, j]) + return X + + +def _preprocess_graph(G, weight): + """Compute edge weights and eliminate zero-weight edges.""" + if G.is_directed(): + H = nx.MultiGraph() + H.add_nodes_from(G) + H.add_weighted_edges_from( + ((u, v, e.get(weight, 1.0)) for u, v, e in G.edges(data=True) if u != v), + weight=weight, + ) + G = H + if not G.is_multigraph(): + edges = ( + (u, v, abs(e.get(weight, 1.0))) for u, v, e in G.edges(data=True) if u != v + ) + else: + edges = ( + (u, v, sum(abs(e.get(weight, 1.0)) for e in G[u][v].values())) + for u, v in G.edges() + if u != v + ) + H = nx.Graph() + H.add_nodes_from(G) + H.add_weighted_edges_from((u, v, e) for u, v, e in edges if e != 0) + return H + + +def _rcm_estimate(G, nodelist): + """Estimate the Fiedler vector using the reverse Cuthill-McKee ordering.""" + import numpy as np + + G = G.subgraph(nodelist) + order = reverse_cuthill_mckee_ordering(G) + n = len(nodelist) + index = dict(zip(nodelist, range(n))) + x = np.ndarray(n, dtype=float) + for i, u in enumerate(order): + x[index[u]] = i + x -= (n - 1) / 2.0 + return x + + +def _tracemin_fiedler(L, X, normalized, tol, method): + """Compute the Fiedler vector of L using the TraceMIN-Fiedler algorithm. + + The Fiedler vector of a connected undirected graph is the eigenvector + corresponding to the second smallest eigenvalue of the Laplacian matrix + of the graph. This function starts with the Laplacian L, not the Graph. + + Parameters + ---------- + L : Laplacian of a possibly weighted or normalized, but undirected graph + + X : Initial guess for a solution. Usually a matrix of random numbers. + This function allows more than one column in X to identify more than + one eigenvector if desired. + + normalized : bool + Whether the normalized Laplacian matrix is used. + + tol : float + Tolerance of relative residual in eigenvalue computation. + Warning: There is no limit on number of iterations. + + method : string + Should be 'tracemin_pcg' or 'tracemin_lu'. + Otherwise exception is raised. + + Returns + ------- + sigma, X : Two NumPy arrays of floats. + The lowest eigenvalues and corresponding eigenvectors of L. + The size of input X determines the size of these outputs. + As this is for Fiedler vectors, the zero eigenvalue (and + constant eigenvector) are avoided. + """ + import numpy as np + import scipy as sp + + n = X.shape[0] + + if normalized: + # Form the normalized Laplacian matrix and determine the eigenvector of + # its nullspace. + e = np.sqrt(L.diagonal()) + # TODO: rm csr_array wrapper when spdiags array creation becomes available + D = sp.sparse.csr_array(sp.sparse.spdiags(1 / e, 0, n, n, format="csr")) + L = D @ L @ D + e *= 1.0 / np.linalg.norm(e, 2) + + if normalized: + + def project(X): + """Make X orthogonal to the nullspace of L.""" + X = np.asarray(X) + for j in range(X.shape[1]): + X[:, j] -= (X[:, j] @ e) * e + + else: + + def project(X): + """Make X orthogonal to the nullspace of L.""" + X = np.asarray(X) + for j in range(X.shape[1]): + X[:, j] -= X[:, j].sum() / n + + if method == "tracemin_pcg": + D = L.diagonal().astype(float) + solver = _PCGSolver(lambda x: L @ x, lambda x: D * x) + elif method == "tracemin_lu": + # Convert A to CSC to suppress SparseEfficiencyWarning. + A = sp.sparse.csc_array(L, dtype=float, copy=True) + # Force A to be nonsingular. Since A is the Laplacian matrix of a + # connected graph, its rank deficiency is one, and thus one diagonal + # element needs to modified. Changing to infinity forces a zero in the + # corresponding element in the solution. + i = (A.indptr[1:] - A.indptr[:-1]).argmax() + A[i, i] = np.inf + solver = _LUSolver(A) + else: + raise nx.NetworkXError(f"Unknown linear system solver: {method}") + + # Initialize. + Lnorm = abs(L).sum(axis=1).flatten().max() + project(X) + W = np.ndarray(X.shape, order="F") + + while True: + # Orthonormalize X. + X = np.linalg.qr(X)[0] + # Compute iteration matrix H. + W[:, :] = L @ X + H = X.T @ W + sigma, Y = sp.linalg.eigh(H, overwrite_a=True) + # Compute the Ritz vectors. + X = X @ Y + # Test for convergence exploiting the fact that L * X == W * Y. + res = sp.linalg.blas.dasum(W @ Y[:, 0] - sigma[0] * X[:, 0]) / Lnorm + if res < tol: + break + # Compute X = L \ X / (X' * (L \ X)). + # L \ X can have an arbitrary projection on the nullspace of L, + # which will be eliminated. + W[:, :] = solver.solve(X, tol) + X = (sp.linalg.inv(W.T @ X) @ W.T).T # Preserves Fortran storage order. + project(X) + + return sigma, np.asarray(X) + + +def _get_fiedler_func(method): + """Returns a function that solves the Fiedler eigenvalue problem.""" + import numpy as np + + if method == "tracemin": # old style keyword `. + + Returns + ------- + algebraic_connectivity : float + Algebraic connectivity. + + Raises + ------ + NetworkXNotImplemented + If G is directed. + + NetworkXError + If G has less than two nodes. + + Notes + ----- + Edge weights are interpreted by their absolute values. For MultiGraph's, + weights of parallel edges are summed. Zero-weighted edges are ignored. + + See Also + -------- + laplacian_matrix + + Examples + -------- + For undirected graphs algebraic connectivity can tell us if a graph is connected or not + `G` is connected iff ``algebraic_connectivity(G) > 0``: + + >>> G = nx.complete_graph(5) + >>> nx.algebraic_connectivity(G) > 0 + True + >>> G.add_node(10) # G is no longer connected + >>> nx.algebraic_connectivity(G) > 0 + False + + """ + if len(G) < 2: + raise nx.NetworkXError("graph has less than two nodes.") + G = _preprocess_graph(G, weight) + if not nx.is_connected(G): + return 0.0 + + L = nx.laplacian_matrix(G) + if L.shape[0] == 2: + return 2.0 * float(L[0, 0]) if not normalized else 2.0 + + find_fiedler = _get_fiedler_func(method) + x = None if method != "lobpcg" else _rcm_estimate(G, G) + sigma, fiedler = find_fiedler(L, x, normalized, tol, seed) + return float(sigma) + + +@not_implemented_for("directed") +@np_random_state(5) +@nx._dispatchable(edge_attrs="weight") +def fiedler_vector( + G, weight="weight", normalized=False, tol=1e-8, method="tracemin_pcg", seed=None +): + """Returns the Fiedler vector of a connected undirected graph. + + The Fiedler vector of a connected undirected graph is the eigenvector + corresponding to the second smallest eigenvalue of the Laplacian matrix + of the graph. + + Parameters + ---------- + G : NetworkX graph + An undirected graph. + + weight : object, optional (default: None) + The data key used to determine the weight of each edge. If None, then + each edge has unit weight. + + normalized : bool, optional (default: False) + Whether the normalized Laplacian matrix is used. + + tol : float, optional (default: 1e-8) + Tolerance of relative residual in eigenvalue computation. + + method : string, optional (default: 'tracemin_pcg') + Method of eigenvalue computation. It must be one of the tracemin + options shown below (TraceMIN), 'lanczos' (Lanczos iteration) + or 'lobpcg' (LOBPCG). + + The TraceMIN algorithm uses a linear system solver. The following + values allow specifying the solver to be used. + + =============== ======================================== + Value Solver + =============== ======================================== + 'tracemin_pcg' Preconditioned conjugate gradient method + 'tracemin_lu' LU factorization + =============== ======================================== + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + fiedler_vector : NumPy array of floats. + Fiedler vector. + + Raises + ------ + NetworkXNotImplemented + If G is directed. + + NetworkXError + If G has less than two nodes or is not connected. + + Notes + ----- + Edge weights are interpreted by their absolute values. For MultiGraph's, + weights of parallel edges are summed. Zero-weighted edges are ignored. + + See Also + -------- + laplacian_matrix + + Examples + -------- + Given a connected graph the signs of the values in the Fiedler vector can be + used to partition the graph into two components. + + >>> G = nx.barbell_graph(5, 0) + >>> nx.fiedler_vector(G, normalized=True, seed=1) + array([-0.32864129, -0.32864129, -0.32864129, -0.32864129, -0.26072899, + 0.26072899, 0.32864129, 0.32864129, 0.32864129, 0.32864129]) + + The connected components are the two 5-node cliques of the barbell graph. + """ + import numpy as np + + if len(G) < 2: + raise nx.NetworkXError("graph has less than two nodes.") + G = _preprocess_graph(G, weight) + if not nx.is_connected(G): + raise nx.NetworkXError("graph is not connected.") + + if len(G) == 2: + return np.array([1.0, -1.0]) + + find_fiedler = _get_fiedler_func(method) + L = nx.laplacian_matrix(G) + x = None if method != "lobpcg" else _rcm_estimate(G, G) + sigma, fiedler = find_fiedler(L, x, normalized, tol, seed) + return fiedler + + +@np_random_state(5) +@nx._dispatchable(edge_attrs="weight") +def spectral_ordering( + G, weight="weight", normalized=False, tol=1e-8, method="tracemin_pcg", seed=None +): + """Compute the spectral_ordering of a graph. + + The spectral ordering of a graph is an ordering of its nodes where nodes + in the same weakly connected components appear contiguous and ordered by + their corresponding elements in the Fiedler vector of the component. + + Parameters + ---------- + G : NetworkX graph + A graph. + + weight : object, optional (default: None) + The data key used to determine the weight of each edge. If None, then + each edge has unit weight. + + normalized : bool, optional (default: False) + Whether the normalized Laplacian matrix is used. + + tol : float, optional (default: 1e-8) + Tolerance of relative residual in eigenvalue computation. + + method : string, optional (default: 'tracemin_pcg') + Method of eigenvalue computation. It must be one of the tracemin + options shown below (TraceMIN), 'lanczos' (Lanczos iteration) + or 'lobpcg' (LOBPCG). + + The TraceMIN algorithm uses a linear system solver. The following + values allow specifying the solver to be used. + + =============== ======================================== + Value Solver + =============== ======================================== + 'tracemin_pcg' Preconditioned conjugate gradient method + 'tracemin_lu' LU factorization + =============== ======================================== + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + spectral_ordering : NumPy array of floats. + Spectral ordering of nodes. + + Raises + ------ + NetworkXError + If G is empty. + + Notes + ----- + Edge weights are interpreted by their absolute values. For MultiGraph's, + weights of parallel edges are summed. Zero-weighted edges are ignored. + + See Also + -------- + laplacian_matrix + """ + if len(G) == 0: + raise nx.NetworkXError("graph is empty.") + G = _preprocess_graph(G, weight) + + find_fiedler = _get_fiedler_func(method) + order = [] + for component in nx.connected_components(G): + size = len(component) + if size > 2: + L = nx.laplacian_matrix(G, component) + x = None if method != "lobpcg" else _rcm_estimate(G, component) + sigma, fiedler = find_fiedler(L, x, normalized, tol, seed) + sort_info = zip(fiedler, range(size), component) + order.extend(u for x, c, u in sorted(sort_info)) + else: + order.extend(component) + + return order + + +@nx._dispatchable(edge_attrs="weight") +def spectral_bisection( + G, weight="weight", normalized=False, tol=1e-8, method="tracemin_pcg", seed=None +): + """Bisect the graph using the Fiedler vector. + + This method uses the Fiedler vector to bisect a graph. + The partition is defined by the nodes which are associated with + either positive or negative values in the vector. + + Parameters + ---------- + G : NetworkX Graph + + weight : str, optional (default: weight) + The data key used to determine the weight of each edge. If None, then + each edge has unit weight. + + normalized : bool, optional (default: False) + Whether the normalized Laplacian matrix is used. + + tol : float, optional (default: 1e-8) + Tolerance of relative residual in eigenvalue computation. + + method : string, optional (default: 'tracemin_pcg') + Method of eigenvalue computation. It must be one of the tracemin + options shown below (TraceMIN), 'lanczos' (Lanczos iteration) + or 'lobpcg' (LOBPCG). + + The TraceMIN algorithm uses a linear system solver. The following + values allow specifying the solver to be used. + + =============== ======================================== + Value Solver + =============== ======================================== + 'tracemin_pcg' Preconditioned conjugate gradient method + 'tracemin_lu' LU factorization + =============== ======================================== + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + bisection : tuple of sets + Sets with the bisection of nodes + + Examples + -------- + >>> G = nx.barbell_graph(3, 0) + >>> nx.spectral_bisection(G) + ({0, 1, 2}, {3, 4, 5}) + + References + ---------- + .. [1] M. E. J Newman 'Networks: An Introduction', pages 364-370 + Oxford University Press 2011. + """ + import numpy as np + + v = nx.fiedler_vector(G, weight, normalized, tol, method, seed) + nodes = np.array(list(G)) + pos_vals = v >= 0 + + return set(nodes[~pos_vals].tolist()), set(nodes[pos_vals].tolist()) diff --git a/.venv/lib/python3.12/site-packages/networkx/linalg/attrmatrix.py b/.venv/lib/python3.12/site-packages/networkx/linalg/attrmatrix.py new file mode 100644 index 0000000..989e8ff --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/linalg/attrmatrix.py @@ -0,0 +1,466 @@ +""" +Functions for constructing matrix-like objects from graph attributes. +""" + +import networkx as nx + +__all__ = ["attr_matrix", "attr_sparse_matrix"] + + +def _node_value(G, node_attr): + """Returns a function that returns a value from G.nodes[u]. + + We return a function expecting a node as its sole argument. Then, in the + simplest scenario, the returned function will return G.nodes[u][node_attr]. + However, we also handle the case when `node_attr` is None (returns the node) + or when `node_attr` is a function itself. + + Parameters + ---------- + G : graph + A NetworkX graph + + node_attr : {None, str, callable} + Specification of how the value of the node attribute should be obtained + from the node attribute dictionary. + + Returns + ------- + value : function + A function expecting a node as its sole argument. The function will + returns a value from G.nodes[u] that depends on `edge_attr`. + + """ + if node_attr is None: + + def value(u): + return u + + elif not callable(node_attr): + # assume it is a key for the node attribute dictionary + def value(u): + return G.nodes[u][node_attr] + + else: + # Advanced: Allow users to specify something else. + # + # For example, + # node_attr = lambda u: G.nodes[u].get('size', .5) * 3 + # + value = node_attr + + return value + + +def _edge_value(G, edge_attr): + """Returns a function that returns a value from G[u][v]. + + Suppose there exists an edge between u and v. Then we return a function + expecting u and v as arguments. For Graph and DiGraph, G[u][v] is + the edge attribute dictionary, and the function (essentially) returns + G[u][v][edge_attr]. However, we also handle cases when `edge_attr` is None + and when it is a function itself. For MultiGraph and MultiDiGraph, G[u][v] + is a dictionary of all edges between u and v. In this case, the returned + function sums the value of `edge_attr` for every edge between u and v. + + Parameters + ---------- + G : graph + A NetworkX graph + + edge_attr : {None, str, callable} + Specification of how the value of the edge attribute should be obtained + from the edge attribute dictionary, G[u][v]. For multigraphs, G[u][v] + is a dictionary of all the edges between u and v. This allows for + special treatment of multiedges. + + Returns + ------- + value : function + A function expecting two nodes as parameters. The nodes should + represent the from- and to- node of an edge. The function will + return a value from G[u][v] that depends on `edge_attr`. + + """ + + if edge_attr is None: + # topological count of edges + + if G.is_multigraph(): + + def value(u, v): + return len(G[u][v]) + + else: + + def value(u, v): + return 1 + + elif not callable(edge_attr): + # assume it is a key for the edge attribute dictionary + + if edge_attr == "weight": + # provide a default value + if G.is_multigraph(): + + def value(u, v): + return sum(d.get(edge_attr, 1) for d in G[u][v].values()) + + else: + + def value(u, v): + return G[u][v].get(edge_attr, 1) + + else: + # otherwise, the edge attribute MUST exist for each edge + if G.is_multigraph(): + + def value(u, v): + return sum(d[edge_attr] for d in G[u][v].values()) + + else: + + def value(u, v): + return G[u][v][edge_attr] + + else: + # Advanced: Allow users to specify something else. + # + # Alternative default value: + # edge_attr = lambda u,v: G[u][v].get('thickness', .5) + # + # Function on an attribute: + # edge_attr = lambda u,v: abs(G[u][v]['weight']) + # + # Handle Multi(Di)Graphs differently: + # edge_attr = lambda u,v: numpy.prod([d['size'] for d in G[u][v].values()]) + # + # Ignore multiple edges + # edge_attr = lambda u,v: 1 if len(G[u][v]) else 0 + # + value = edge_attr + + return value + + +@nx._dispatchable(edge_attrs={"edge_attr": None}, node_attrs="node_attr") +def attr_matrix( + G, + edge_attr=None, + node_attr=None, + normalized=False, + rc_order=None, + dtype=None, + order=None, +): + """Returns the attribute matrix using attributes from `G` as a numpy array. + + If only `G` is passed in, then the adjacency matrix is constructed. + + Let A be a discrete set of values for the node attribute `node_attr`. Then + the elements of A represent the rows and columns of the constructed matrix. + Now, iterate through every edge e=(u,v) in `G` and consider the value + of the edge attribute `edge_attr`. If ua and va are the values of the + node attribute `node_attr` for u and v, respectively, then the value of + the edge attribute is added to the matrix element at (ua, va). + + Parameters + ---------- + G : graph + The NetworkX graph used to construct the attribute matrix. + + edge_attr : str, optional (default: number of edges for each matrix element) + Each element of the matrix represents a running total of the + specified edge attribute for edges whose node attributes correspond + to the rows/cols of the matrix. The attribute must be present for + all edges in the graph. If no attribute is specified, then we + just count the number of edges whose node attributes correspond + to the matrix element. + + node_attr : str, optional (default: use nodes of the graph) + Each row and column in the matrix represents a particular value + of the node attribute. The attribute must be present for all nodes + in the graph. Note, the values of this attribute should be reliably + hashable. So, float values are not recommended. If no attribute is + specified, then the rows and columns will be the nodes of the graph. + + normalized : bool, optional (default: False) + If True, then each row is normalized by the summation of its values. + + rc_order : list, optional (default: order of nodes in G) + A list of the node attribute values. This list specifies the ordering + of rows and columns of the array. If no ordering is provided, then + the ordering will be the same as the node order in `G`. + When `rc_order` is `None`, the function returns a 2-tuple ``(matrix, ordering)`` + + Other Parameters + ---------------- + dtype : NumPy data-type, optional + A valid NumPy dtype used to initialize the array. Keep in mind certain + dtypes can yield unexpected results if the array is to be normalized. + The parameter is passed to numpy.zeros(). If unspecified, the NumPy + default is used. + + order : {'C', 'F'}, optional + Whether to store multidimensional data in C- or Fortran-contiguous + (row- or column-wise) order in memory. This parameter is passed to + numpy.zeros(). If unspecified, the NumPy default is used. + + Returns + ------- + M : 2D NumPy ndarray + The attribute matrix. + + ordering : list + If `rc_order` was specified, then only the attribute matrix is returned. + However, if `rc_order` was None, then the ordering used to construct + the matrix is returned as well. + + Examples + -------- + Construct an adjacency matrix: + + >>> G = nx.Graph() + >>> G.add_edge(0, 1, thickness=1, weight=3) + >>> G.add_edge(0, 2, thickness=2) + >>> G.add_edge(1, 2, thickness=3) + >>> nx.attr_matrix(G, rc_order=[0, 1, 2]) + array([[0., 1., 1.], + [1., 0., 1.], + [1., 1., 0.]]) + + Alternatively, we can obtain the matrix describing edge thickness. + + >>> nx.attr_matrix(G, edge_attr="thickness", rc_order=[0, 1, 2]) + array([[0., 1., 2.], + [1., 0., 3.], + [2., 3., 0.]]) + + We can also color the nodes and ask for the probability distribution over + all edges (u,v) describing: + + Pr(v has color Y | u has color X) + + >>> G.nodes[0]["color"] = "red" + >>> G.nodes[1]["color"] = "red" + >>> G.nodes[2]["color"] = "blue" + >>> rc = ["red", "blue"] + >>> nx.attr_matrix(G, node_attr="color", normalized=True, rc_order=rc) + array([[0.33333333, 0.66666667], + [1. , 0. ]]) + + For example, the above tells us that for all edges (u,v): + + Pr( v is red | u is red) = 1/3 + Pr( v is blue | u is red) = 2/3 + + Pr( v is red | u is blue) = 1 + Pr( v is blue | u is blue) = 0 + + Finally, we can obtain the total weights listed by the node colors. + + >>> nx.attr_matrix(G, edge_attr="weight", node_attr="color", rc_order=rc) + array([[3., 2.], + [2., 0.]]) + + Thus, the total weight over all edges (u,v) with u and v having colors: + + (red, red) is 3 # the sole contribution is from edge (0,1) + (red, blue) is 2 # contributions from edges (0,2) and (1,2) + (blue, red) is 2 # same as (red, blue) since graph is undirected + (blue, blue) is 0 # there are no edges with blue endpoints + + """ + import numpy as np + + edge_value = _edge_value(G, edge_attr) + node_value = _node_value(G, node_attr) + + if rc_order is None: + ordering = list({node_value(n) for n in G}) + else: + ordering = rc_order + + N = len(ordering) + undirected = not G.is_directed() + index = dict(zip(ordering, range(N))) + M = np.zeros((N, N), dtype=dtype, order=order) + + seen = set() + for u, nbrdict in G.adjacency(): + for v in nbrdict: + # Obtain the node attribute values. + i, j = index[node_value(u)], index[node_value(v)] + if v not in seen: + M[i, j] += edge_value(u, v) + if undirected: + M[j, i] = M[i, j] + + if undirected: + seen.add(u) + + if normalized: + M /= M.sum(axis=1).reshape((N, 1)) + + if rc_order is None: + return M, ordering + else: + return M + + +@nx._dispatchable(edge_attrs={"edge_attr": None}, node_attrs="node_attr") +def attr_sparse_matrix( + G, edge_attr=None, node_attr=None, normalized=False, rc_order=None, dtype=None +): + """Returns a SciPy sparse array using attributes from G. + + If only `G` is passed in, then the adjacency matrix is constructed. + + Let A be a discrete set of values for the node attribute `node_attr`. Then + the elements of A represent the rows and columns of the constructed matrix. + Now, iterate through every edge e=(u,v) in `G` and consider the value + of the edge attribute `edge_attr`. If ua and va are the values of the + node attribute `node_attr` for u and v, respectively, then the value of + the edge attribute is added to the matrix element at (ua, va). + + Parameters + ---------- + G : graph + The NetworkX graph used to construct the NumPy matrix. + + edge_attr : str, optional (default: number of edges for each matrix element) + Each element of the matrix represents a running total of the + specified edge attribute for edges whose node attributes correspond + to the rows/cols of the matrix. The attribute must be present for + all edges in the graph. If no attribute is specified, then we + just count the number of edges whose node attributes correspond + to the matrix element. + + node_attr : str, optional (default: use nodes of the graph) + Each row and column in the matrix represents a particular value + of the node attribute. The attribute must be present for all nodes + in the graph. Note, the values of this attribute should be reliably + hashable. So, float values are not recommended. If no attribute is + specified, then the rows and columns will be the nodes of the graph. + + normalized : bool, optional (default: False) + If True, then each row is normalized by the summation of its values. + + rc_order : list, optional (default: order of nodes in G) + A list of the node attribute values. This list specifies the ordering + of rows and columns of the array and the return value. If no ordering + is provided, then the ordering will be that of nodes in `G`. + + Other Parameters + ---------------- + dtype : NumPy data-type, optional + A valid NumPy dtype used to initialize the array. Keep in mind certain + dtypes can yield unexpected results if the array is to be normalized. + The parameter is passed to numpy.zeros(). If unspecified, the NumPy + default is used. + + Returns + ------- + M : SciPy sparse array + The attribute matrix. + + ordering : list + If `rc_order` was specified, then only the matrix is returned. + However, if `rc_order` was None, then the ordering used to construct + the matrix is returned as well. + + Examples + -------- + Construct an adjacency matrix: + + >>> G = nx.Graph() + >>> G.add_edge(0, 1, thickness=1, weight=3) + >>> G.add_edge(0, 2, thickness=2) + >>> G.add_edge(1, 2, thickness=3) + >>> M = nx.attr_sparse_matrix(G, rc_order=[0, 1, 2]) + >>> M.toarray() + array([[0., 1., 1.], + [1., 0., 1.], + [1., 1., 0.]]) + + Alternatively, we can obtain the matrix describing edge thickness. + + >>> M = nx.attr_sparse_matrix(G, edge_attr="thickness", rc_order=[0, 1, 2]) + >>> M.toarray() + array([[0., 1., 2.], + [1., 0., 3.], + [2., 3., 0.]]) + + We can also color the nodes and ask for the probability distribution over + all edges (u,v) describing: + + Pr(v has color Y | u has color X) + + >>> G.nodes[0]["color"] = "red" + >>> G.nodes[1]["color"] = "red" + >>> G.nodes[2]["color"] = "blue" + >>> rc = ["red", "blue"] + >>> M = nx.attr_sparse_matrix(G, node_attr="color", normalized=True, rc_order=rc) + >>> M.toarray() + array([[0.33333333, 0.66666667], + [1. , 0. ]]) + + For example, the above tells us that for all edges (u,v): + + Pr( v is red | u is red) = 1/3 + Pr( v is blue | u is red) = 2/3 + + Pr( v is red | u is blue) = 1 + Pr( v is blue | u is blue) = 0 + + Finally, we can obtain the total weights listed by the node colors. + + >>> M = nx.attr_sparse_matrix(G, edge_attr="weight", node_attr="color", rc_order=rc) + >>> M.toarray() + array([[3., 2.], + [2., 0.]]) + + Thus, the total weight over all edges (u,v) with u and v having colors: + + (red, red) is 3 # the sole contribution is from edge (0,1) + (red, blue) is 2 # contributions from edges (0,2) and (1,2) + (blue, red) is 2 # same as (red, blue) since graph is undirected + (blue, blue) is 0 # there are no edges with blue endpoints + + """ + import numpy as np + import scipy as sp + + edge_value = _edge_value(G, edge_attr) + node_value = _node_value(G, node_attr) + + if rc_order is None: + ordering = list({node_value(n) for n in G}) + else: + ordering = rc_order + + N = len(ordering) + undirected = not G.is_directed() + index = dict(zip(ordering, range(N))) + M = sp.sparse.lil_array((N, N), dtype=dtype) + + seen = set() + for u, nbrdict in G.adjacency(): + for v in nbrdict: + # Obtain the node attribute values. + i, j = index[node_value(u)], index[node_value(v)] + if v not in seen: + M[i, j] += edge_value(u, v) + if undirected: + M[j, i] = M[i, j] + + if undirected: + seen.add(u) + + if normalized: + M *= 1 / M.sum(axis=1)[:, np.newaxis] # in-place mult preserves sparse + + if rc_order is None: + return M, ordering + else: + return M diff --git a/.venv/lib/python3.12/site-packages/networkx/linalg/bethehessianmatrix.py b/.venv/lib/python3.12/site-packages/networkx/linalg/bethehessianmatrix.py new file mode 100644 index 0000000..3d42fc6 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/linalg/bethehessianmatrix.py @@ -0,0 +1,79 @@ +"""Bethe Hessian or deformed Laplacian matrix of graphs.""" + +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = ["bethe_hessian_matrix"] + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatchable +def bethe_hessian_matrix(G, r=None, nodelist=None): + r"""Returns the Bethe Hessian matrix of G. + + The Bethe Hessian is a family of matrices parametrized by r, defined as + H(r) = (r^2 - 1) I - r A + D where A is the adjacency matrix, D is the + diagonal matrix of node degrees, and I is the identify matrix. It is equal + to the graph laplacian when the regularizer r = 1. + + The default choice of regularizer should be the ratio [2]_ + + .. math:: + r_m = \left(\sum k_i \right)^{-1}\left(\sum k_i^2 \right) - 1 + + Parameters + ---------- + G : Graph + A NetworkX graph + r : float + Regularizer parameter + nodelist : list, optional + The rows and columns are ordered according to the nodes in nodelist. + If nodelist is None, then the ordering is produced by ``G.nodes()``. + + Returns + ------- + H : scipy.sparse.csr_array + The Bethe Hessian matrix of `G`, with parameter `r`. + + Examples + -------- + >>> k = [3, 2, 2, 1, 0] + >>> G = nx.havel_hakimi_graph(k) + >>> H = nx.bethe_hessian_matrix(G) + >>> H.toarray() + array([[ 3.5625, -1.25 , -1.25 , -1.25 , 0. ], + [-1.25 , 2.5625, -1.25 , 0. , 0. ], + [-1.25 , -1.25 , 2.5625, 0. , 0. ], + [-1.25 , 0. , 0. , 1.5625, 0. ], + [ 0. , 0. , 0. , 0. , 0.5625]]) + + See Also + -------- + bethe_hessian_spectrum + adjacency_matrix + laplacian_matrix + + References + ---------- + .. [1] A. Saade, F. Krzakala and L. Zdeborová + "Spectral Clustering of Graphs with the Bethe Hessian", + Advances in Neural Information Processing Systems, 2014. + .. [2] C. M. Le, E. Levina + "Estimating the number of communities in networks by spectral methods" + arXiv:1507.00827, 2015. + """ + import scipy as sp + + if nodelist is None: + nodelist = list(G) + if r is None: + r = sum(d**2 for v, d in nx.degree(G)) / sum(d for v, d in nx.degree(G)) - 1 + A = nx.to_scipy_sparse_array(G, nodelist=nodelist, format="csr") + n, m = A.shape + # TODO: Rm csr_array wrapper when spdiags array creation becomes available + D = sp.sparse.csr_array(sp.sparse.spdiags(A.sum(axis=1), 0, m, n, format="csr")) + # TODO: Rm csr_array wrapper when eye array creation becomes available + I = sp.sparse.csr_array(sp.sparse.eye(m, n, format="csr")) + return (r**2 - 1) * I - r * A + D diff --git a/.venv/lib/python3.12/site-packages/networkx/linalg/graphmatrix.py b/.venv/lib/python3.12/site-packages/networkx/linalg/graphmatrix.py new file mode 100644 index 0000000..9f477bc --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/linalg/graphmatrix.py @@ -0,0 +1,168 @@ +""" +Adjacency matrix and incidence matrix of graphs. +""" + +import networkx as nx + +__all__ = ["incidence_matrix", "adjacency_matrix"] + + +@nx._dispatchable(edge_attrs="weight") +def incidence_matrix( + G, nodelist=None, edgelist=None, oriented=False, weight=None, *, dtype=None +): + """Returns incidence matrix of G. + + The incidence matrix assigns each row to a node and each column to an edge. + For a standard incidence matrix a 1 appears wherever a row's node is + incident on the column's edge. For an oriented incidence matrix each + edge is assigned an orientation (arbitrarily for undirected and aligning to + direction for directed). A -1 appears for the source (tail) of an edge and + 1 for the destination (head) of the edge. The elements are zero otherwise. + + Parameters + ---------- + G : graph + A NetworkX graph + + nodelist : list, optional (default= all nodes in G) + The rows are ordered according to the nodes in nodelist. + If nodelist is None, then the ordering is produced by G.nodes(). + + edgelist : list, optional (default= all edges in G) + The columns are ordered according to the edges in edgelist. + If edgelist is None, then the ordering is produced by G.edges(). + + oriented: bool, optional (default=False) + If True, matrix elements are +1 or -1 for the head or tail node + respectively of each edge. If False, +1 occurs at both nodes. + + weight : string or None, optional (default=None) + The edge data key used to provide each value in the matrix. + If None, then each edge has weight 1. Edge weights, if used, + should be positive so that the orientation can provide the sign. + + dtype : a NumPy dtype or None (default=None) + The dtype of the output sparse array. This type should be a compatible + type of the weight argument, eg. if weight would return a float this + argument should also be a float. + If None, then the default for SciPy is used. + + Returns + ------- + A : SciPy sparse array + The incidence matrix of G. + + Notes + ----- + For MultiGraph/MultiDiGraph, the edges in edgelist should be + (u,v,key) 3-tuples. + + "Networks are the best discrete model for so many problems in + applied mathematics" [1]_. + + References + ---------- + .. [1] Gil Strang, Network applications: A = incidence matrix, + http://videolectures.net/mit18085f07_strang_lec03/ + """ + import scipy as sp + + if nodelist is None: + nodelist = list(G) + if edgelist is None: + if G.is_multigraph(): + edgelist = list(G.edges(keys=True)) + else: + edgelist = list(G.edges()) + A = sp.sparse.lil_array((len(nodelist), len(edgelist)), dtype=dtype) + node_index = {node: i for i, node in enumerate(nodelist)} + for ei, e in enumerate(edgelist): + (u, v) = e[:2] + if u == v: + continue # self loops give zero column + try: + ui = node_index[u] + vi = node_index[v] + except KeyError as err: + raise nx.NetworkXError( + f"node {u} or {v} in edgelist but not in nodelist" + ) from err + if weight is None: + wt = 1 + else: + if G.is_multigraph(): + ekey = e[2] + wt = G[u][v][ekey].get(weight, 1) + else: + wt = G[u][v].get(weight, 1) + if oriented: + A[ui, ei] = -wt + A[vi, ei] = wt + else: + A[ui, ei] = wt + A[vi, ei] = wt + return A.asformat("csc") + + +@nx._dispatchable(edge_attrs="weight") +def adjacency_matrix(G, nodelist=None, dtype=None, weight="weight"): + """Returns adjacency matrix of `G`. + + Parameters + ---------- + G : graph + A NetworkX graph + + nodelist : list, optional + The rows and columns are ordered according to the nodes in `nodelist`. + If ``nodelist=None`` (the default), then the ordering is produced by + ``G.nodes()``. + + dtype : NumPy data-type, optional + The desired data-type for the array. + If `None`, then the NumPy default is used. + + weight : string or None, optional (default='weight') + The edge data key used to provide each value in the matrix. + If None, then each edge has weight 1. + + Returns + ------- + A : SciPy sparse array + Adjacency matrix representation of G. + + Notes + ----- + For directed graphs, entry ``i, j`` corresponds to an edge from ``i`` to ``j``. + + If you want a pure Python adjacency matrix representation try + :func:`~networkx.convert.to_dict_of_dicts` which will return a + dictionary-of-dictionaries format that can be addressed as a + sparse matrix. + + For multigraphs with parallel edges the weights are summed. + See :func:`networkx.convert_matrix.to_numpy_array` for other options. + + The convention used for self-loop edges in graphs is to assign the + diagonal matrix entry value to the edge weight attribute + (or the number 1 if the edge has no weight attribute). If the + alternate convention of doubling the edge weight is desired the + resulting SciPy sparse array can be modified as follows:: + + >>> G = nx.Graph([(1, 1)]) + >>> A = nx.adjacency_matrix(G) + >>> A.toarray() + array([[1]]) + >>> A.setdiag(A.diagonal() * 2) + >>> A.toarray() + array([[2]]) + + See Also + -------- + to_numpy_array + to_scipy_sparse_array + to_dict_of_dicts + adjacency_spectrum + """ + return nx.to_scipy_sparse_array(G, nodelist=nodelist, dtype=dtype, weight=weight) diff --git a/.venv/lib/python3.12/site-packages/networkx/linalg/laplacianmatrix.py b/.venv/lib/python3.12/site-packages/networkx/linalg/laplacianmatrix.py new file mode 100644 index 0000000..690924f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/linalg/laplacianmatrix.py @@ -0,0 +1,520 @@ +"""Laplacian matrix of graphs. + +All calculations here are done using the out-degree. For Laplacians using +in-degree, use `G.reverse(copy=False)` instead of `G` and take the transpose. + +The `laplacian_matrix` function provides an unnormalized matrix, +while `normalized_laplacian_matrix`, `directed_laplacian_matrix`, +and `directed_combinatorial_laplacian_matrix` are all normalized. +""" + +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = [ + "laplacian_matrix", + "normalized_laplacian_matrix", + "directed_laplacian_matrix", + "directed_combinatorial_laplacian_matrix", +] + + +@nx._dispatchable(edge_attrs="weight") +def laplacian_matrix(G, nodelist=None, weight="weight"): + """Returns the Laplacian matrix of G. + + The graph Laplacian is the matrix L = D - A, where + A is the adjacency matrix and D is the diagonal matrix of node degrees. + + Parameters + ---------- + G : graph + A NetworkX graph + + nodelist : list, optional + The rows and columns are ordered according to the nodes in nodelist. + If nodelist is None, then the ordering is produced by G.nodes(). + + weight : string or None, optional (default='weight') + The edge data key used to compute each value in the matrix. + If None, then each edge has weight 1. + + Returns + ------- + L : SciPy sparse array + The Laplacian matrix of G. + + Notes + ----- + For MultiGraph, the edges weights are summed. + + This returns an unnormalized matrix. For a normalized output, + use `normalized_laplacian_matrix`, `directed_laplacian_matrix`, + or `directed_combinatorial_laplacian_matrix`. + + This calculation uses the out-degree of the graph `G`. To use the + in-degree for calculations instead, use `G.reverse(copy=False)` and + take the transpose. + + See Also + -------- + :func:`~networkx.convert_matrix.to_numpy_array` + normalized_laplacian_matrix + directed_laplacian_matrix + directed_combinatorial_laplacian_matrix + :func:`~networkx.linalg.spectrum.laplacian_spectrum` + + Examples + -------- + For graphs with multiple connected components, L is permutation-similar + to a block diagonal matrix where each block is the respective Laplacian + matrix for each component. + + >>> G = nx.Graph([(1, 2), (2, 3), (4, 5)]) + >>> print(nx.laplacian_matrix(G).toarray()) + [[ 1 -1 0 0 0] + [-1 2 -1 0 0] + [ 0 -1 1 0 0] + [ 0 0 0 1 -1] + [ 0 0 0 -1 1]] + + >>> edges = [ + ... (1, 2), + ... (2, 1), + ... (2, 4), + ... (4, 3), + ... (3, 4), + ... ] + >>> DiG = nx.DiGraph(edges) + >>> print(nx.laplacian_matrix(DiG).toarray()) + [[ 1 -1 0 0] + [-1 2 -1 0] + [ 0 0 1 -1] + [ 0 0 -1 1]] + + Notice that node 4 is represented by the third column and row. This is because + by default the row/column order is the order of `G.nodes` (i.e. the node added + order -- in the edgelist, 4 first appears in (2, 4), before node 3 in edge (4, 3).) + To control the node order of the matrix, use the `nodelist` argument. + + >>> print(nx.laplacian_matrix(DiG, nodelist=[1, 2, 3, 4]).toarray()) + [[ 1 -1 0 0] + [-1 2 0 -1] + [ 0 0 1 -1] + [ 0 0 -1 1]] + + This calculation uses the out-degree of the graph `G`. To use the + in-degree for calculations instead, use `G.reverse(copy=False)` and + take the transpose. + + >>> print(nx.laplacian_matrix(DiG.reverse(copy=False)).toarray().T) + [[ 1 -1 0 0] + [-1 1 -1 0] + [ 0 0 2 -1] + [ 0 0 -1 1]] + + References + ---------- + .. [1] Langville, Amy N., and Carl D. Meyer. Google’s PageRank and Beyond: + The Science of Search Engine Rankings. Princeton University Press, 2006. + + """ + import scipy as sp + + if nodelist is None: + nodelist = list(G) + A = nx.to_scipy_sparse_array(G, nodelist=nodelist, weight=weight, format="csr") + n, m = A.shape + # TODO: rm csr_array wrapper when spdiags can produce arrays + D = sp.sparse.csr_array(sp.sparse.spdiags(A.sum(axis=1), 0, m, n, format="csr")) + return D - A + + +@nx._dispatchable(edge_attrs="weight") +def normalized_laplacian_matrix(G, nodelist=None, weight="weight"): + r"""Returns the normalized Laplacian matrix of G. + + The normalized graph Laplacian is the matrix + + .. math:: + + N = D^{-1/2} L D^{-1/2} + + where `L` is the graph Laplacian and `D` is the diagonal matrix of + node degrees [1]_. + + Parameters + ---------- + G : graph + A NetworkX graph + + nodelist : list, optional + The rows and columns are ordered according to the nodes in nodelist. + If nodelist is None, then the ordering is produced by G.nodes(). + + weight : string or None, optional (default='weight') + The edge data key used to compute each value in the matrix. + If None, then each edge has weight 1. + + Returns + ------- + N : SciPy sparse array + The normalized Laplacian matrix of G. + + Notes + ----- + For MultiGraph, the edges weights are summed. + See :func:`to_numpy_array` for other options. + + If the Graph contains selfloops, D is defined as ``diag(sum(A, 1))``, where A is + the adjacency matrix [2]_. + + This calculation uses the out-degree of the graph `G`. To use the + in-degree for calculations instead, use `G.reverse(copy=False)` and + take the transpose. + + For an unnormalized output, use `laplacian_matrix`. + + Examples + -------- + + >>> import numpy as np + >>> edges = [ + ... (1, 2), + ... (2, 1), + ... (2, 4), + ... (4, 3), + ... (3, 4), + ... ] + >>> DiG = nx.DiGraph(edges) + >>> print(nx.normalized_laplacian_matrix(DiG).toarray()) + [[ 1. -0.70710678 0. 0. ] + [-0.70710678 1. -0.70710678 0. ] + [ 0. 0. 1. -1. ] + [ 0. 0. -1. 1. ]] + + Notice that node 4 is represented by the third column and row. This is because + by default the row/column order is the order of `G.nodes` (i.e. the node added + order -- in the edgelist, 4 first appears in (2, 4), before node 3 in edge (4, 3).) + To control the node order of the matrix, use the `nodelist` argument. + + >>> print(nx.normalized_laplacian_matrix(DiG, nodelist=[1, 2, 3, 4]).toarray()) + [[ 1. -0.70710678 0. 0. ] + [-0.70710678 1. 0. -0.70710678] + [ 0. 0. 1. -1. ] + [ 0. 0. -1. 1. ]] + >>> G = nx.Graph(edges) + >>> print(nx.normalized_laplacian_matrix(G).toarray()) + [[ 1. -0.70710678 0. 0. ] + [-0.70710678 1. -0.5 0. ] + [ 0. -0.5 1. -0.70710678] + [ 0. 0. -0.70710678 1. ]] + + See Also + -------- + laplacian_matrix + normalized_laplacian_spectrum + directed_laplacian_matrix + directed_combinatorial_laplacian_matrix + + References + ---------- + .. [1] Fan Chung-Graham, Spectral Graph Theory, + CBMS Regional Conference Series in Mathematics, Number 92, 1997. + .. [2] Steve Butler, Interlacing For Weighted Graphs Using The Normalized + Laplacian, Electronic Journal of Linear Algebra, Volume 16, pp. 90-98, + March 2007. + .. [3] Langville, Amy N., and Carl D. Meyer. Google’s PageRank and Beyond: + The Science of Search Engine Rankings. Princeton University Press, 2006. + """ + import numpy as np + import scipy as sp + + if nodelist is None: + nodelist = list(G) + A = nx.to_scipy_sparse_array(G, nodelist=nodelist, weight=weight, format="csr") + n, _ = A.shape + diags = A.sum(axis=1) + # TODO: rm csr_array wrapper when spdiags can produce arrays + D = sp.sparse.csr_array(sp.sparse.spdiags(diags, 0, n, n, format="csr")) + L = D - A + with np.errstate(divide="ignore"): + diags_sqrt = 1.0 / np.sqrt(diags) + diags_sqrt[np.isinf(diags_sqrt)] = 0 + # TODO: rm csr_array wrapper when spdiags can produce arrays + DH = sp.sparse.csr_array(sp.sparse.spdiags(diags_sqrt, 0, n, n, format="csr")) + return DH @ (L @ DH) + + +############################################################################### +# Code based on work from https://github.com/bjedwards + + +@not_implemented_for("undirected") +@not_implemented_for("multigraph") +@nx._dispatchable(edge_attrs="weight") +def directed_laplacian_matrix( + G, nodelist=None, weight="weight", walk_type=None, alpha=0.95 +): + r"""Returns the directed Laplacian matrix of G. + + The graph directed Laplacian is the matrix + + .. math:: + + L = I - \frac{1}{2} \left (\Phi^{1/2} P \Phi^{-1/2} + \Phi^{-1/2} P^T \Phi^{1/2} \right ) + + where `I` is the identity matrix, `P` is the transition matrix of the + graph, and `\Phi` a matrix with the Perron vector of `P` in the diagonal and + zeros elsewhere [1]_. + + Depending on the value of walk_type, `P` can be the transition matrix + induced by a random walk, a lazy random walk, or a random walk with + teleportation (PageRank). + + Parameters + ---------- + G : DiGraph + A NetworkX graph + + nodelist : list, optional + The rows and columns are ordered according to the nodes in nodelist. + If nodelist is None, then the ordering is produced by G.nodes(). + + weight : string or None, optional (default='weight') + The edge data key used to compute each value in the matrix. + If None, then each edge has weight 1. + + walk_type : string or None, optional (default=None) + One of ``"random"``, ``"lazy"``, or ``"pagerank"``. If ``walk_type=None`` + (the default), then a value is selected according to the properties of `G`: + - ``walk_type="random"`` if `G` is strongly connected and aperiodic + - ``walk_type="lazy"`` if `G` is strongly connected but not aperiodic + - ``walk_type="pagerank"`` for all other cases. + + alpha : real + (1 - alpha) is the teleportation probability used with pagerank + + Returns + ------- + L : NumPy matrix + Normalized Laplacian of G. + + Notes + ----- + Only implemented for DiGraphs + + The result is always a symmetric matrix. + + This calculation uses the out-degree of the graph `G`. To use the + in-degree for calculations instead, use `G.reverse(copy=False)` and + take the transpose. + + See Also + -------- + laplacian_matrix + normalized_laplacian_matrix + directed_combinatorial_laplacian_matrix + + References + ---------- + .. [1] Fan Chung (2005). + Laplacians and the Cheeger inequality for directed graphs. + Annals of Combinatorics, 9(1), 2005 + """ + import numpy as np + import scipy as sp + + # NOTE: P has type ndarray if walk_type=="pagerank", else csr_array + P = _transition_matrix( + G, nodelist=nodelist, weight=weight, walk_type=walk_type, alpha=alpha + ) + + n, m = P.shape + + evals, evecs = sp.sparse.linalg.eigs(P.T, k=1) + v = evecs.flatten().real + p = v / v.sum() + # p>=0 by Perron-Frobenius Thm. Use abs() to fix roundoff across zero gh-6865 + sqrtp = np.sqrt(np.abs(p)) + Q = ( + # TODO: rm csr_array wrapper when spdiags creates arrays + sp.sparse.csr_array(sp.sparse.spdiags(sqrtp, 0, n, n)) + @ P + # TODO: rm csr_array wrapper when spdiags creates arrays + @ sp.sparse.csr_array(sp.sparse.spdiags(1.0 / sqrtp, 0, n, n)) + ) + # NOTE: This could be sparsified for the non-pagerank cases + I = np.identity(len(G)) + + return I - (Q + Q.T) / 2.0 + + +@not_implemented_for("undirected") +@not_implemented_for("multigraph") +@nx._dispatchable(edge_attrs="weight") +def directed_combinatorial_laplacian_matrix( + G, nodelist=None, weight="weight", walk_type=None, alpha=0.95 +): + r"""Return the directed combinatorial Laplacian matrix of G. + + The graph directed combinatorial Laplacian is the matrix + + .. math:: + + L = \Phi - \frac{1}{2} \left (\Phi P + P^T \Phi \right) + + where `P` is the transition matrix of the graph and `\Phi` a matrix + with the Perron vector of `P` in the diagonal and zeros elsewhere [1]_. + + Depending on the value of walk_type, `P` can be the transition matrix + induced by a random walk, a lazy random walk, or a random walk with + teleportation (PageRank). + + Parameters + ---------- + G : DiGraph + A NetworkX graph + + nodelist : list, optional + The rows and columns are ordered according to the nodes in nodelist. + If nodelist is None, then the ordering is produced by G.nodes(). + + weight : string or None, optional (default='weight') + The edge data key used to compute each value in the matrix. + If None, then each edge has weight 1. + + walk_type : string or None, optional (default=None) + One of ``"random"``, ``"lazy"``, or ``"pagerank"``. If ``walk_type=None`` + (the default), then a value is selected according to the properties of `G`: + - ``walk_type="random"`` if `G` is strongly connected and aperiodic + - ``walk_type="lazy"`` if `G` is strongly connected but not aperiodic + - ``walk_type="pagerank"`` for all other cases. + + alpha : real + (1 - alpha) is the teleportation probability used with pagerank + + Returns + ------- + L : NumPy matrix + Combinatorial Laplacian of G. + + Notes + ----- + Only implemented for DiGraphs + + The result is always a symmetric matrix. + + This calculation uses the out-degree of the graph `G`. To use the + in-degree for calculations instead, use `G.reverse(copy=False)` and + take the transpose. + + See Also + -------- + laplacian_matrix + normalized_laplacian_matrix + directed_laplacian_matrix + + References + ---------- + .. [1] Fan Chung (2005). + Laplacians and the Cheeger inequality for directed graphs. + Annals of Combinatorics, 9(1), 2005 + """ + import scipy as sp + + P = _transition_matrix( + G, nodelist=nodelist, weight=weight, walk_type=walk_type, alpha=alpha + ) + + n, m = P.shape + + evals, evecs = sp.sparse.linalg.eigs(P.T, k=1) + v = evecs.flatten().real + p = v / v.sum() + # NOTE: could be improved by not densifying + # TODO: Rm csr_array wrapper when spdiags array creation becomes available + Phi = sp.sparse.csr_array(sp.sparse.spdiags(p, 0, n, n)).toarray() + + return Phi - (Phi @ P + P.T @ Phi) / 2.0 + + +def _transition_matrix(G, nodelist=None, weight="weight", walk_type=None, alpha=0.95): + """Returns the transition matrix of G. + + This is a row stochastic giving the transition probabilities while + performing a random walk on the graph. Depending on the value of walk_type, + P can be the transition matrix induced by a random walk, a lazy random walk, + or a random walk with teleportation (PageRank). + + Parameters + ---------- + G : DiGraph + A NetworkX graph + + nodelist : list, optional + The rows and columns are ordered according to the nodes in nodelist. + If nodelist is None, then the ordering is produced by G.nodes(). + + weight : string or None, optional (default='weight') + The edge data key used to compute each value in the matrix. + If None, then each edge has weight 1. + + walk_type : string or None, optional (default=None) + One of ``"random"``, ``"lazy"``, or ``"pagerank"``. If ``walk_type=None`` + (the default), then a value is selected according to the properties of `G`: + - ``walk_type="random"`` if `G` is strongly connected and aperiodic + - ``walk_type="lazy"`` if `G` is strongly connected but not aperiodic + - ``walk_type="pagerank"`` for all other cases. + + alpha : real + (1 - alpha) is the teleportation probability used with pagerank + + Returns + ------- + P : numpy.ndarray + transition matrix of G. + + Raises + ------ + NetworkXError + If walk_type not specified or alpha not in valid range + """ + import numpy as np + import scipy as sp + + if walk_type is None: + if nx.is_strongly_connected(G): + if nx.is_aperiodic(G): + walk_type = "random" + else: + walk_type = "lazy" + else: + walk_type = "pagerank" + + A = nx.to_scipy_sparse_array(G, nodelist=nodelist, weight=weight, dtype=float) + n, m = A.shape + if walk_type in ["random", "lazy"]: + # TODO: Rm csr_array wrapper when spdiags array creation becomes available + DI = sp.sparse.csr_array(sp.sparse.spdiags(1.0 / A.sum(axis=1), 0, n, n)) + if walk_type == "random": + P = DI @ A + else: + # TODO: Rm csr_array wrapper when identity array creation becomes available + I = sp.sparse.csr_array(sp.sparse.identity(n)) + P = (I + DI @ A) / 2.0 + + elif walk_type == "pagerank": + if not (0 < alpha < 1): + raise nx.NetworkXError("alpha must be between 0 and 1") + # this is using a dense representation. NOTE: This should be sparsified! + A = A.toarray() + # add constant to dangling nodes' row + A[A.sum(axis=1) == 0, :] = 1 / n + # normalize + A = A / A.sum(axis=1)[np.newaxis, :].T + P = alpha * A + (1 - alpha) / n + else: + raise nx.NetworkXError("walk_type must be random, lazy, or pagerank") + + return P diff --git a/.venv/lib/python3.12/site-packages/networkx/linalg/modularitymatrix.py b/.venv/lib/python3.12/site-packages/networkx/linalg/modularitymatrix.py new file mode 100644 index 0000000..0287910 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/linalg/modularitymatrix.py @@ -0,0 +1,166 @@ +"""Modularity matrix of graphs.""" + +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = ["modularity_matrix", "directed_modularity_matrix"] + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatchable(edge_attrs="weight") +def modularity_matrix(G, nodelist=None, weight=None): + r"""Returns the modularity matrix of G. + + The modularity matrix is the matrix B = A - , where A is the adjacency + matrix and is the average adjacency matrix, assuming that the graph + is described by the configuration model. + + More specifically, the element B_ij of B is defined as + + .. math:: + A_{ij} - {k_i k_j \over 2 m} + + where k_i is the degree of node i, and where m is the number of edges + in the graph. When weight is set to a name of an attribute edge, Aij, k_i, + k_j and m are computed using its value. + + Parameters + ---------- + G : Graph + A NetworkX graph + + nodelist : list, optional + The rows and columns are ordered according to the nodes in nodelist. + If nodelist is None, then the ordering is produced by G.nodes(). + + weight : string or None, optional (default=None) + The edge attribute that holds the numerical value used for + the edge weight. If None then all edge weights are 1. + + Returns + ------- + B : Numpy array + The modularity matrix of G. + + Examples + -------- + >>> k = [3, 2, 2, 1, 0] + >>> G = nx.havel_hakimi_graph(k) + >>> B = nx.modularity_matrix(G) + + + See Also + -------- + to_numpy_array + modularity_spectrum + adjacency_matrix + directed_modularity_matrix + + References + ---------- + .. [1] M. E. J. Newman, "Modularity and community structure in networks", + Proc. Natl. Acad. Sci. USA, vol. 103, pp. 8577-8582, 2006. + """ + import numpy as np + + if nodelist is None: + nodelist = list(G) + A = nx.to_scipy_sparse_array(G, nodelist=nodelist, weight=weight, format="csr") + k = A.sum(axis=1) + m = k.sum() * 0.5 + # Expected adjacency matrix + X = np.outer(k, k) / (2 * m) + + return A - X + + +@not_implemented_for("undirected") +@not_implemented_for("multigraph") +@nx._dispatchable(edge_attrs="weight") +def directed_modularity_matrix(G, nodelist=None, weight=None): + """Returns the directed modularity matrix of G. + + The modularity matrix is the matrix B = A - , where A is the adjacency + matrix and is the expected adjacency matrix, assuming that the graph + is described by the configuration model. + + More specifically, the element B_ij of B is defined as + + .. math:: + B_{ij} = A_{ij} - k_i^{out} k_j^{in} / m + + where :math:`k_i^{in}` is the in degree of node i, and :math:`k_j^{out}` is the out degree + of node j, with m the number of edges in the graph. When weight is set + to a name of an attribute edge, Aij, k_i, k_j and m are computed using + its value. + + Parameters + ---------- + G : DiGraph + A NetworkX DiGraph + + nodelist : list, optional + The rows and columns are ordered according to the nodes in nodelist. + If nodelist is None, then the ordering is produced by G.nodes(). + + weight : string or None, optional (default=None) + The edge attribute that holds the numerical value used for + the edge weight. If None then all edge weights are 1. + + Returns + ------- + B : Numpy array + The modularity matrix of G. + + Examples + -------- + >>> G = nx.DiGraph() + >>> G.add_edges_from( + ... ( + ... (1, 2), + ... (1, 3), + ... (3, 1), + ... (3, 2), + ... (3, 5), + ... (4, 5), + ... (4, 6), + ... (5, 4), + ... (5, 6), + ... (6, 4), + ... ) + ... ) + >>> B = nx.directed_modularity_matrix(G) + + + Notes + ----- + NetworkX defines the element A_ij of the adjacency matrix as 1 if there + is a link going from node i to node j. Leicht and Newman use the opposite + definition. This explains the different expression for B_ij. + + See Also + -------- + to_numpy_array + modularity_spectrum + adjacency_matrix + modularity_matrix + + References + ---------- + .. [1] E. A. Leicht, M. E. J. Newman, + "Community structure in directed networks", + Phys. Rev Lett., vol. 100, no. 11, p. 118703, 2008. + """ + import numpy as np + + if nodelist is None: + nodelist = list(G) + A = nx.to_scipy_sparse_array(G, nodelist=nodelist, weight=weight, format="csr") + k_in = A.sum(axis=0) + k_out = A.sum(axis=1) + m = k_in.sum() + # Expected adjacency matrix + X = np.outer(k_out, k_in) / m + + return A - X diff --git a/.venv/lib/python3.12/site-packages/networkx/linalg/spectrum.py b/.venv/lib/python3.12/site-packages/networkx/linalg/spectrum.py new file mode 100644 index 0000000..079b185 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/linalg/spectrum.py @@ -0,0 +1,186 @@ +""" +Eigenvalue spectrum of graphs. +""" + +import networkx as nx + +__all__ = [ + "laplacian_spectrum", + "adjacency_spectrum", + "modularity_spectrum", + "normalized_laplacian_spectrum", + "bethe_hessian_spectrum", +] + + +@nx._dispatchable(edge_attrs="weight") +def laplacian_spectrum(G, weight="weight"): + """Returns eigenvalues of the Laplacian of G + + Parameters + ---------- + G : graph + A NetworkX graph + + weight : string or None, optional (default='weight') + The edge data key used to compute each value in the matrix. + If None, then each edge has weight 1. + + Returns + ------- + evals : NumPy array + Eigenvalues + + Notes + ----- + For MultiGraph/MultiDiGraph, the edges weights are summed. + See :func:`~networkx.convert_matrix.to_numpy_array` for other options. + + See Also + -------- + laplacian_matrix + + Examples + -------- + The multiplicity of 0 as an eigenvalue of the laplacian matrix is equal + to the number of connected components of G. + + >>> G = nx.Graph() # Create a graph with 5 nodes and 3 connected components + >>> G.add_nodes_from(range(5)) + >>> G.add_edges_from([(0, 2), (3, 4)]) + >>> nx.laplacian_spectrum(G) + array([0., 0., 0., 2., 2.]) + + """ + import scipy as sp + + return sp.linalg.eigvalsh(nx.laplacian_matrix(G, weight=weight).todense()) + + +@nx._dispatchable(edge_attrs="weight") +def normalized_laplacian_spectrum(G, weight="weight"): + """Return eigenvalues of the normalized Laplacian of G + + Parameters + ---------- + G : graph + A NetworkX graph + + weight : string or None, optional (default='weight') + The edge data key used to compute each value in the matrix. + If None, then each edge has weight 1. + + Returns + ------- + evals : NumPy array + Eigenvalues + + Notes + ----- + For MultiGraph/MultiDiGraph, the edges weights are summed. + See to_numpy_array for other options. + + See Also + -------- + normalized_laplacian_matrix + """ + import scipy as sp + + return sp.linalg.eigvalsh( + nx.normalized_laplacian_matrix(G, weight=weight).todense() + ) + + +@nx._dispatchable(edge_attrs="weight") +def adjacency_spectrum(G, weight="weight"): + """Returns eigenvalues of the adjacency matrix of G. + + Parameters + ---------- + G : graph + A NetworkX graph + + weight : string or None, optional (default='weight') + The edge data key used to compute each value in the matrix. + If None, then each edge has weight 1. + + Returns + ------- + evals : NumPy array + Eigenvalues + + Notes + ----- + For MultiGraph/MultiDiGraph, the edges weights are summed. + See to_numpy_array for other options. + + See Also + -------- + adjacency_matrix + """ + import scipy as sp + + return sp.linalg.eigvals(nx.adjacency_matrix(G, weight=weight).todense()) + + +@nx._dispatchable +def modularity_spectrum(G): + """Returns eigenvalues of the modularity matrix of G. + + Parameters + ---------- + G : Graph + A NetworkX Graph or DiGraph + + Returns + ------- + evals : NumPy array + Eigenvalues + + See Also + -------- + modularity_matrix + + References + ---------- + .. [1] M. E. J. Newman, "Modularity and community structure in networks", + Proc. Natl. Acad. Sci. USA, vol. 103, pp. 8577-8582, 2006. + """ + import scipy as sp + + if G.is_directed(): + return sp.linalg.eigvals(nx.directed_modularity_matrix(G)) + else: + return sp.linalg.eigvals(nx.modularity_matrix(G)) + + +@nx._dispatchable +def bethe_hessian_spectrum(G, r=None): + """Returns eigenvalues of the Bethe Hessian matrix of G. + + Parameters + ---------- + G : Graph + A NetworkX Graph or DiGraph + + r : float + Regularizer parameter + + Returns + ------- + evals : NumPy array + Eigenvalues + + See Also + -------- + bethe_hessian_matrix + + References + ---------- + .. [1] A. Saade, F. Krzakala and L. Zdeborová + "Spectral clustering of graphs with the bethe hessian", + Advances in Neural Information Processing Systems. 2014. + """ + import scipy as sp + + return sp.linalg.eigvalsh(nx.bethe_hessian_matrix(G, r).todense()) diff --git a/.venv/lib/python3.12/site-packages/networkx/linalg/tests/__init__.py b/.venv/lib/python3.12/site-packages/networkx/linalg/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/.venv/lib/python3.12/site-packages/networkx/linalg/tests/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/linalg/tests/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..1b557c3 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/linalg/tests/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/linalg/tests/__pycache__/test_algebraic_connectivity.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/linalg/tests/__pycache__/test_algebraic_connectivity.cpython-312.pyc new file mode 100644 index 0000000..c5fcbe2 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/linalg/tests/__pycache__/test_algebraic_connectivity.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/linalg/tests/__pycache__/test_attrmatrix.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/linalg/tests/__pycache__/test_attrmatrix.cpython-312.pyc new file mode 100644 index 0000000..16a2025 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/linalg/tests/__pycache__/test_attrmatrix.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/linalg/tests/__pycache__/test_bethehessian.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/linalg/tests/__pycache__/test_bethehessian.cpython-312.pyc new file mode 100644 index 0000000..96a0466 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/linalg/tests/__pycache__/test_bethehessian.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/linalg/tests/__pycache__/test_graphmatrix.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/linalg/tests/__pycache__/test_graphmatrix.cpython-312.pyc new file mode 100644 index 0000000..e9da338 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/linalg/tests/__pycache__/test_graphmatrix.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/linalg/tests/__pycache__/test_laplacian.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/linalg/tests/__pycache__/test_laplacian.cpython-312.pyc new file mode 100644 index 0000000..1fcbd18 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/linalg/tests/__pycache__/test_laplacian.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/linalg/tests/__pycache__/test_modularity.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/linalg/tests/__pycache__/test_modularity.cpython-312.pyc new file mode 100644 index 0000000..dee0698 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/linalg/tests/__pycache__/test_modularity.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/linalg/tests/__pycache__/test_spectrum.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/linalg/tests/__pycache__/test_spectrum.cpython-312.pyc new file mode 100644 index 0000000..c7bcc54 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/linalg/tests/__pycache__/test_spectrum.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/linalg/tests/test_algebraic_connectivity.py b/.venv/lib/python3.12/site-packages/networkx/linalg/tests/test_algebraic_connectivity.py new file mode 100644 index 0000000..31f911f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/linalg/tests/test_algebraic_connectivity.py @@ -0,0 +1,400 @@ +from math import sqrt + +import pytest + +import networkx as nx + +np = pytest.importorskip("numpy") +methods = ("tracemin_pcg", "tracemin_lu", "lanczos", "lobpcg") + + +def test_algebraic_connectivity_tracemin_chol(): + """Test that "tracemin_chol" raises an exception.""" + pytest.importorskip("scipy") + G = nx.barbell_graph(5, 4) + with pytest.raises(nx.NetworkXError): + nx.algebraic_connectivity(G, method="tracemin_chol") + + +def test_fiedler_vector_tracemin_chol(): + """Test that "tracemin_chol" raises an exception.""" + pytest.importorskip("scipy") + G = nx.barbell_graph(5, 4) + with pytest.raises(nx.NetworkXError): + nx.fiedler_vector(G, method="tracemin_chol") + + +def test_spectral_ordering_tracemin_chol(): + """Test that "tracemin_chol" raises an exception.""" + pytest.importorskip("scipy") + G = nx.barbell_graph(5, 4) + with pytest.raises(nx.NetworkXError): + nx.spectral_ordering(G, method="tracemin_chol") + + +def test_fiedler_vector_tracemin_unknown(): + """Test that "tracemin_unknown" raises an exception.""" + pytest.importorskip("scipy") + G = nx.barbell_graph(5, 4) + L = nx.laplacian_matrix(G) + X = np.asarray(np.random.normal(size=(1, L.shape[0]))).T + with pytest.raises(nx.NetworkXError, match="Unknown linear system solver"): + nx.linalg.algebraicconnectivity._tracemin_fiedler( + L, X, normalized=False, tol=1e-8, method="tracemin_unknown" + ) + + +def test_spectral_bisection(): + pytest.importorskip("scipy") + G = nx.barbell_graph(3, 0) + C = nx.spectral_bisection(G) + assert C == ({0, 1, 2}, {3, 4, 5}) + + mapping = dict(enumerate("badfec")) + G = nx.relabel_nodes(G, mapping) + C = nx.spectral_bisection(G) + assert C == ( + {mapping[0], mapping[1], mapping[2]}, + {mapping[3], mapping[4], mapping[5]}, + ) + + +def check_eigenvector(A, l, x): + nx = np.linalg.norm(x) + # Check zeroness. + assert nx != pytest.approx(0, abs=1e-07) + y = A @ x + ny = np.linalg.norm(y) + # Check collinearity. + assert x @ y == pytest.approx(nx * ny, abs=1e-7) + # Check eigenvalue. + assert ny == pytest.approx(l * nx, abs=1e-7) + + +class TestAlgebraicConnectivity: + @pytest.mark.parametrize("method", methods) + def test_directed(self, method): + G = nx.DiGraph() + pytest.raises( + nx.NetworkXNotImplemented, nx.algebraic_connectivity, G, method=method + ) + pytest.raises(nx.NetworkXNotImplemented, nx.fiedler_vector, G, method=method) + + @pytest.mark.parametrize("method", methods) + def test_null_and_singleton(self, method): + G = nx.Graph() + pytest.raises(nx.NetworkXError, nx.algebraic_connectivity, G, method=method) + pytest.raises(nx.NetworkXError, nx.fiedler_vector, G, method=method) + G.add_edge(0, 0) + pytest.raises(nx.NetworkXError, nx.algebraic_connectivity, G, method=method) + pytest.raises(nx.NetworkXError, nx.fiedler_vector, G, method=method) + + @pytest.mark.parametrize("method", methods) + def test_disconnected(self, method): + G = nx.Graph() + G.add_nodes_from(range(2)) + assert nx.algebraic_connectivity(G) == 0 + pytest.raises(nx.NetworkXError, nx.fiedler_vector, G, method=method) + G.add_edge(0, 1, weight=0) + assert nx.algebraic_connectivity(G) == 0 + pytest.raises(nx.NetworkXError, nx.fiedler_vector, G, method=method) + + def test_unrecognized_method(self): + pytest.importorskip("scipy") + G = nx.path_graph(4) + pytest.raises(nx.NetworkXError, nx.algebraic_connectivity, G, method="unknown") + pytest.raises(nx.NetworkXError, nx.fiedler_vector, G, method="unknown") + + @pytest.mark.parametrize("method", methods) + def test_two_nodes(self, method): + pytest.importorskip("scipy") + G = nx.Graph() + G.add_edge(0, 1, weight=1) + A = nx.laplacian_matrix(G) + assert nx.algebraic_connectivity(G, tol=1e-12, method=method) == pytest.approx( + 2, abs=1e-7 + ) + x = nx.fiedler_vector(G, tol=1e-12, method=method) + check_eigenvector(A, 2, x) + + @pytest.mark.parametrize("method", methods) + def test_two_nodes_multigraph(self, method): + pytest.importorskip("scipy") + G = nx.MultiGraph() + G.add_edge(0, 0, spam=1e8) + G.add_edge(0, 1, spam=1) + G.add_edge(0, 1, spam=-2) + A = -3 * nx.laplacian_matrix(G, weight="spam") + assert nx.algebraic_connectivity( + G, weight="spam", tol=1e-12, method=method + ) == pytest.approx(6, abs=1e-7) + x = nx.fiedler_vector(G, weight="spam", tol=1e-12, method=method) + check_eigenvector(A, 6, x) + + def test_abbreviation_of_method(self): + pytest.importorskip("scipy") + G = nx.path_graph(8) + A = nx.laplacian_matrix(G) + sigma = 2 - sqrt(2 + sqrt(2)) + ac = nx.algebraic_connectivity(G, tol=1e-12, method="tracemin") + assert ac == pytest.approx(sigma, abs=1e-7) + x = nx.fiedler_vector(G, tol=1e-12, method="tracemin") + check_eigenvector(A, sigma, x) + + @pytest.mark.parametrize("method", methods) + def test_path(self, method): + pytest.importorskip("scipy") + G = nx.path_graph(8) + A = nx.laplacian_matrix(G) + sigma = 2 - sqrt(2 + sqrt(2)) + ac = nx.algebraic_connectivity(G, tol=1e-12, method=method) + assert ac == pytest.approx(sigma, abs=1e-7) + x = nx.fiedler_vector(G, tol=1e-12, method=method) + check_eigenvector(A, sigma, x) + + @pytest.mark.parametrize("method", methods) + def test_problematic_graph_issue_2381(self, method): + pytest.importorskip("scipy") + G = nx.path_graph(4) + G.add_edges_from([(4, 2), (5, 1)]) + A = nx.laplacian_matrix(G) + sigma = 0.438447187191 + ac = nx.algebraic_connectivity(G, tol=1e-12, method=method) + assert ac == pytest.approx(sigma, abs=1e-7) + x = nx.fiedler_vector(G, tol=1e-12, method=method) + check_eigenvector(A, sigma, x) + + @pytest.mark.parametrize("method", methods) + def test_cycle(self, method): + pytest.importorskip("scipy") + G = nx.cycle_graph(8) + A = nx.laplacian_matrix(G) + sigma = 2 - sqrt(2) + ac = nx.algebraic_connectivity(G, tol=1e-12, method=method) + assert ac == pytest.approx(sigma, abs=1e-7) + x = nx.fiedler_vector(G, tol=1e-12, method=method) + check_eigenvector(A, sigma, x) + + @pytest.mark.parametrize("method", methods) + def test_seed_argument(self, method): + pytest.importorskip("scipy") + G = nx.cycle_graph(8) + A = nx.laplacian_matrix(G) + sigma = 2 - sqrt(2) + ac = nx.algebraic_connectivity(G, tol=1e-12, method=method, seed=1) + assert ac == pytest.approx(sigma, abs=1e-7) + x = nx.fiedler_vector(G, tol=1e-12, method=method, seed=1) + check_eigenvector(A, sigma, x) + + @pytest.mark.parametrize( + ("normalized", "sigma", "laplacian_fn"), + ( + (False, 0.2434017461399311, nx.laplacian_matrix), + (True, 0.08113391537997749, nx.normalized_laplacian_matrix), + ), + ) + @pytest.mark.parametrize("method", methods) + def test_buckminsterfullerene(self, normalized, sigma, laplacian_fn, method): + pytest.importorskip("scipy") + G = nx.Graph( + [ + (1, 10), + (1, 41), + (1, 59), + (2, 12), + (2, 42), + (2, 60), + (3, 6), + (3, 43), + (3, 57), + (4, 8), + (4, 44), + (4, 58), + (5, 13), + (5, 56), + (5, 57), + (6, 10), + (6, 31), + (7, 14), + (7, 56), + (7, 58), + (8, 12), + (8, 32), + (9, 23), + (9, 53), + (9, 59), + (10, 15), + (11, 24), + (11, 53), + (11, 60), + (12, 16), + (13, 14), + (13, 25), + (14, 26), + (15, 27), + (15, 49), + (16, 28), + (16, 50), + (17, 18), + (17, 19), + (17, 54), + (18, 20), + (18, 55), + (19, 23), + (19, 41), + (20, 24), + (20, 42), + (21, 31), + (21, 33), + (21, 57), + (22, 32), + (22, 34), + (22, 58), + (23, 24), + (25, 35), + (25, 43), + (26, 36), + (26, 44), + (27, 51), + (27, 59), + (28, 52), + (28, 60), + (29, 33), + (29, 34), + (29, 56), + (30, 51), + (30, 52), + (30, 53), + (31, 47), + (32, 48), + (33, 45), + (34, 46), + (35, 36), + (35, 37), + (36, 38), + (37, 39), + (37, 49), + (38, 40), + (38, 50), + (39, 40), + (39, 51), + (40, 52), + (41, 47), + (42, 48), + (43, 49), + (44, 50), + (45, 46), + (45, 54), + (46, 55), + (47, 54), + (48, 55), + ] + ) + A = laplacian_fn(G) + try: + assert nx.algebraic_connectivity( + G, normalized=normalized, tol=1e-12, method=method + ) == pytest.approx(sigma, abs=1e-7) + x = nx.fiedler_vector(G, normalized=normalized, tol=1e-12, method=method) + check_eigenvector(A, sigma, x) + except nx.NetworkXError as err: + if err.args not in ( + ("Cholesky solver unavailable.",), + ("LU solver unavailable.",), + ): + raise + + +class TestSpectralOrdering: + _graphs = (nx.Graph, nx.DiGraph, nx.MultiGraph, nx.MultiDiGraph) + + @pytest.mark.parametrize("graph", _graphs) + def test_nullgraph(self, graph): + G = graph() + pytest.raises(nx.NetworkXError, nx.spectral_ordering, G) + + @pytest.mark.parametrize("graph", _graphs) + def test_singleton(self, graph): + G = graph() + G.add_node("x") + assert nx.spectral_ordering(G) == ["x"] + G.add_edge("x", "x", weight=33) + G.add_edge("x", "x", weight=33) + assert nx.spectral_ordering(G) == ["x"] + + def test_unrecognized_method(self): + G = nx.path_graph(4) + pytest.raises(nx.NetworkXError, nx.spectral_ordering, G, method="unknown") + + @pytest.mark.parametrize("method", methods) + def test_three_nodes(self, method): + pytest.importorskip("scipy") + G = nx.Graph() + G.add_weighted_edges_from([(1, 2, 1), (1, 3, 2), (2, 3, 1)], weight="spam") + order = nx.spectral_ordering(G, weight="spam", method=method) + assert set(order) == set(G) + assert {1, 3} in (set(order[:-1]), set(order[1:])) + + @pytest.mark.parametrize("method", methods) + def test_three_nodes_multigraph(self, method): + pytest.importorskip("scipy") + G = nx.MultiDiGraph() + G.add_weighted_edges_from([(1, 2, 1), (1, 3, 2), (2, 3, 1), (2, 3, 2)]) + order = nx.spectral_ordering(G, method=method) + assert set(order) == set(G) + assert {2, 3} in (set(order[:-1]), set(order[1:])) + + @pytest.mark.parametrize("method", methods) + def test_path(self, method): + pytest.importorskip("scipy") + path = list(range(10)) + np.random.shuffle(path) + G = nx.Graph() + nx.add_path(G, path) + order = nx.spectral_ordering(G, method=method) + assert order in [path, list(reversed(path))] + + @pytest.mark.parametrize("method", methods) + def test_seed_argument(self, method): + pytest.importorskip("scipy") + path = list(range(10)) + np.random.shuffle(path) + G = nx.Graph() + nx.add_path(G, path) + order = nx.spectral_ordering(G, method=method, seed=1) + assert order in [path, list(reversed(path))] + + @pytest.mark.parametrize("method", methods) + def test_disconnected(self, method): + pytest.importorskip("scipy") + G = nx.Graph() + nx.add_path(G, range(0, 10, 2)) + nx.add_path(G, range(1, 10, 2)) + order = nx.spectral_ordering(G, method=method) + assert set(order) == set(G) + seqs = [ + list(range(0, 10, 2)), + list(range(8, -1, -2)), + list(range(1, 10, 2)), + list(range(9, -1, -2)), + ] + assert order[:5] in seqs + assert order[5:] in seqs + + @pytest.mark.parametrize( + ("normalized", "expected_order"), + ( + (False, [[1, 2, 0, 3, 4, 5, 6, 9, 7, 8], [8, 7, 9, 6, 5, 4, 3, 0, 2, 1]]), + (True, [[1, 2, 3, 0, 4, 5, 9, 6, 7, 8], [8, 7, 6, 9, 5, 4, 0, 3, 2, 1]]), + ), + ) + @pytest.mark.parametrize("method", methods) + def test_cycle(self, normalized, expected_order, method): + pytest.importorskip("scipy") + path = list(range(10)) + G = nx.Graph() + nx.add_path(G, path, weight=5) + G.add_edge(path[-1], path[0], weight=1) + A = nx.laplacian_matrix(G).todense() + order = nx.spectral_ordering(G, normalized=normalized, method=method) + assert order in expected_order diff --git a/.venv/lib/python3.12/site-packages/networkx/linalg/tests/test_attrmatrix.py b/.venv/lib/python3.12/site-packages/networkx/linalg/tests/test_attrmatrix.py new file mode 100644 index 0000000..05a3b94 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/linalg/tests/test_attrmatrix.py @@ -0,0 +1,108 @@ +import pytest + +import networkx as nx + +np = pytest.importorskip("numpy") + + +def test_attr_matrix(): + G = nx.Graph() + G.add_edge(0, 1, thickness=1, weight=3) + G.add_edge(0, 1, thickness=1, weight=3) + G.add_edge(0, 2, thickness=2) + G.add_edge(1, 2, thickness=3) + + def node_attr(u): + return G.nodes[u].get("size", 0.5) * 3 + + def edge_attr(u, v): + return G[u][v].get("thickness", 0.5) + + M = nx.attr_matrix(G, edge_attr=edge_attr, node_attr=node_attr) + np.testing.assert_equal(M[0], np.array([[6.0]])) + assert M[1] == [1.5] + + +def test_attr_matrix_directed(): + G = nx.DiGraph() + G.add_edge(0, 1, thickness=1, weight=3) + G.add_edge(0, 1, thickness=1, weight=3) + G.add_edge(0, 2, thickness=2) + G.add_edge(1, 2, thickness=3) + M = nx.attr_matrix(G, rc_order=[0, 1, 2]) + # fmt: off + data = np.array( + [[0., 1., 1.], + [0., 0., 1.], + [0., 0., 0.]] + ) + # fmt: on + np.testing.assert_equal(M, np.array(data)) + + +def test_attr_matrix_multigraph(): + G = nx.MultiGraph() + G.add_edge(0, 1, thickness=1, weight=3) + G.add_edge(0, 1, thickness=1, weight=3) + G.add_edge(0, 1, thickness=1, weight=3) + G.add_edge(0, 2, thickness=2) + G.add_edge(1, 2, thickness=3) + M = nx.attr_matrix(G, rc_order=[0, 1, 2]) + # fmt: off + data = np.array( + [[0., 3., 1.], + [3., 0., 1.], + [1., 1., 0.]] + ) + # fmt: on + np.testing.assert_equal(M, np.array(data)) + M = nx.attr_matrix(G, edge_attr="weight", rc_order=[0, 1, 2]) + # fmt: off + data = np.array( + [[0., 9., 1.], + [9., 0., 1.], + [1., 1., 0.]] + ) + # fmt: on + np.testing.assert_equal(M, np.array(data)) + M = nx.attr_matrix(G, edge_attr="thickness", rc_order=[0, 1, 2]) + # fmt: off + data = np.array( + [[0., 3., 2.], + [3., 0., 3.], + [2., 3., 0.]] + ) + # fmt: on + np.testing.assert_equal(M, np.array(data)) + + +def test_attr_sparse_matrix(): + pytest.importorskip("scipy") + G = nx.Graph() + G.add_edge(0, 1, thickness=1, weight=3) + G.add_edge(0, 2, thickness=2) + G.add_edge(1, 2, thickness=3) + M = nx.attr_sparse_matrix(G) + mtx = M[0] + data = np.ones((3, 3), float) + np.fill_diagonal(data, 0) + np.testing.assert_equal(mtx.todense(), np.array(data)) + assert M[1] == [0, 1, 2] + + +def test_attr_sparse_matrix_directed(): + pytest.importorskip("scipy") + G = nx.DiGraph() + G.add_edge(0, 1, thickness=1, weight=3) + G.add_edge(0, 1, thickness=1, weight=3) + G.add_edge(0, 2, thickness=2) + G.add_edge(1, 2, thickness=3) + M = nx.attr_sparse_matrix(G, rc_order=[0, 1, 2]) + # fmt: off + data = np.array( + [[0., 1., 1.], + [0., 0., 1.], + [0., 0., 0.]] + ) + # fmt: on + np.testing.assert_equal(M.todense(), np.array(data)) diff --git a/.venv/lib/python3.12/site-packages/networkx/linalg/tests/test_bethehessian.py b/.venv/lib/python3.12/site-packages/networkx/linalg/tests/test_bethehessian.py new file mode 100644 index 0000000..92b745b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/linalg/tests/test_bethehessian.py @@ -0,0 +1,40 @@ +import pytest + +import networkx as nx + +np = pytest.importorskip("numpy") +pytest.importorskip("scipy") + + +class TestBetheHessian: + @classmethod + def setup_class(cls): + deg = [3, 2, 2, 1, 0] + cls.G = nx.havel_hakimi_graph(deg) + cls.P = nx.path_graph(3) + + def test_bethe_hessian(self): + "Bethe Hessian matrix" + # fmt: off + H = np.array([[4, -2, 0], + [-2, 5, -2], + [0, -2, 4]]) + # fmt: on + permutation = [2, 0, 1] + # Bethe Hessian gives expected form + np.testing.assert_equal(nx.bethe_hessian_matrix(self.P, r=2).todense(), H) + # nodelist is correctly implemented + np.testing.assert_equal( + nx.bethe_hessian_matrix(self.P, r=2, nodelist=permutation).todense(), + H[np.ix_(permutation, permutation)], + ) + # Equal to Laplacian matrix when r=1 + np.testing.assert_equal( + nx.bethe_hessian_matrix(self.G, r=1).todense(), + nx.laplacian_matrix(self.G).todense(), + ) + # Correct default for the regularizer r + np.testing.assert_equal( + nx.bethe_hessian_matrix(self.G).todense(), + nx.bethe_hessian_matrix(self.G, r=1.25).todense(), + ) diff --git a/.venv/lib/python3.12/site-packages/networkx/linalg/tests/test_graphmatrix.py b/.venv/lib/python3.12/site-packages/networkx/linalg/tests/test_graphmatrix.py new file mode 100644 index 0000000..d84a397 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/linalg/tests/test_graphmatrix.py @@ -0,0 +1,275 @@ +import pytest + +import networkx as nx +from networkx.exception import NetworkXError + +np = pytest.importorskip("numpy") +pytest.importorskip("scipy") + + +def test_incidence_matrix_simple(): + deg = [3, 2, 2, 1, 0] + G = nx.havel_hakimi_graph(deg) + deg = [(1, 0), (1, 0), (1, 0), (2, 0), (1, 0), (2, 1), (0, 1), (0, 1)] + MG = nx.random_clustered_graph(deg, seed=42) + + I = nx.incidence_matrix(G, dtype=int).todense() + # fmt: off + expected = np.array( + [[1, 1, 1, 0], + [0, 1, 0, 1], + [1, 0, 0, 1], + [0, 0, 1, 0], + [0, 0, 0, 0]] + ) + # fmt: on + np.testing.assert_equal(I, expected) + + I = nx.incidence_matrix(MG, dtype=int).todense() + # fmt: off + expected = np.array( + [[1, 0, 0, 0, 0, 0, 0], + [1, 0, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 1, 1, 0], + [0, 0, 0, 0, 0, 1, 1], + [0, 0, 0, 0, 1, 0, 1]] + ) + # fmt: on + np.testing.assert_equal(I, expected) + + with pytest.raises(NetworkXError): + nx.incidence_matrix(G, nodelist=[0, 1]) + + +class TestGraphMatrix: + @classmethod + def setup_class(cls): + deg = [3, 2, 2, 1, 0] + cls.G = nx.havel_hakimi_graph(deg) + # fmt: off + cls.OI = np.array( + [[-1, -1, -1, 0], + [1, 0, 0, -1], + [0, 1, 0, 1], + [0, 0, 1, 0], + [0, 0, 0, 0]] + ) + cls.A = np.array( + [[0, 1, 1, 1, 0], + [1, 0, 1, 0, 0], + [1, 1, 0, 0, 0], + [1, 0, 0, 0, 0], + [0, 0, 0, 0, 0]] + ) + # fmt: on + cls.WG = nx.havel_hakimi_graph(deg) + cls.WG.add_edges_from( + (u, v, {"weight": 0.5, "other": 0.3}) for (u, v) in cls.G.edges() + ) + # fmt: off + cls.WA = np.array( + [[0, 0.5, 0.5, 0.5, 0], + [0.5, 0, 0.5, 0, 0], + [0.5, 0.5, 0, 0, 0], + [0.5, 0, 0, 0, 0], + [0, 0, 0, 0, 0]] + ) + # fmt: on + cls.MG = nx.MultiGraph(cls.G) + cls.MG2 = cls.MG.copy() + cls.MG2.add_edge(0, 1) + # fmt: off + cls.MG2A = np.array( + [[0, 2, 1, 1, 0], + [2, 0, 1, 0, 0], + [1, 1, 0, 0, 0], + [1, 0, 0, 0, 0], + [0, 0, 0, 0, 0]] + ) + cls.MGOI = np.array( + [[-1, -1, -1, -1, 0], + [1, 1, 0, 0, -1], + [0, 0, 1, 0, 1], + [0, 0, 0, 1, 0], + [0, 0, 0, 0, 0]] + ) + # fmt: on + cls.no_edges_G = nx.Graph([(1, 2), (3, 2, {"weight": 8})]) + cls.no_edges_A = np.array([[0, 0], [0, 0]]) + + def test_incidence_matrix(self): + "Conversion to incidence matrix" + I = nx.incidence_matrix( + self.G, + nodelist=sorted(self.G), + edgelist=sorted(self.G.edges()), + oriented=True, + dtype=int, + ).todense() + np.testing.assert_equal(I, self.OI) + + I = nx.incidence_matrix( + self.G, + nodelist=sorted(self.G), + edgelist=sorted(self.G.edges()), + oriented=False, + dtype=int, + ).todense() + np.testing.assert_equal(I, np.abs(self.OI)) + + I = nx.incidence_matrix( + self.MG, + nodelist=sorted(self.MG), + edgelist=sorted(self.MG.edges()), + oriented=True, + dtype=int, + ).todense() + np.testing.assert_equal(I, self.OI) + + I = nx.incidence_matrix( + self.MG, + nodelist=sorted(self.MG), + edgelist=sorted(self.MG.edges()), + oriented=False, + dtype=int, + ).todense() + np.testing.assert_equal(I, np.abs(self.OI)) + + I = nx.incidence_matrix( + self.MG2, + nodelist=sorted(self.MG2), + edgelist=sorted(self.MG2.edges()), + oriented=True, + dtype=int, + ).todense() + np.testing.assert_equal(I, self.MGOI) + + I = nx.incidence_matrix( + self.MG2, + nodelist=sorted(self.MG), + edgelist=sorted(self.MG2.edges()), + oriented=False, + dtype=int, + ).todense() + np.testing.assert_equal(I, np.abs(self.MGOI)) + + I = nx.incidence_matrix(self.G, dtype=np.uint8) + assert I.dtype == np.uint8 + + def test_weighted_incidence_matrix(self): + I = nx.incidence_matrix( + self.WG, + nodelist=sorted(self.WG), + edgelist=sorted(self.WG.edges()), + oriented=True, + dtype=int, + ).todense() + np.testing.assert_equal(I, self.OI) + + I = nx.incidence_matrix( + self.WG, + nodelist=sorted(self.WG), + edgelist=sorted(self.WG.edges()), + oriented=False, + dtype=int, + ).todense() + np.testing.assert_equal(I, np.abs(self.OI)) + + # np.testing.assert_equal(nx.incidence_matrix(self.WG,oriented=True, + # weight='weight').todense(),0.5*self.OI) + # np.testing.assert_equal(nx.incidence_matrix(self.WG,weight='weight').todense(), + # np.abs(0.5*self.OI)) + # np.testing.assert_equal(nx.incidence_matrix(self.WG,oriented=True,weight='other').todense(), + # 0.3*self.OI) + + I = nx.incidence_matrix( + self.WG, + nodelist=sorted(self.WG), + edgelist=sorted(self.WG.edges()), + oriented=True, + weight="weight", + ).todense() + np.testing.assert_equal(I, 0.5 * self.OI) + + I = nx.incidence_matrix( + self.WG, + nodelist=sorted(self.WG), + edgelist=sorted(self.WG.edges()), + oriented=False, + weight="weight", + ).todense() + np.testing.assert_equal(I, np.abs(0.5 * self.OI)) + + I = nx.incidence_matrix( + self.WG, + nodelist=sorted(self.WG), + edgelist=sorted(self.WG.edges()), + oriented=True, + weight="other", + ).todense() + np.testing.assert_equal(I, 0.3 * self.OI) + + # WMG=nx.MultiGraph(self.WG) + # WMG.add_edge(0,1,weight=0.5,other=0.3) + # np.testing.assert_equal(nx.incidence_matrix(WMG,weight='weight').todense(), + # np.abs(0.5*self.MGOI)) + # np.testing.assert_equal(nx.incidence_matrix(WMG,weight='weight',oriented=True).todense(), + # 0.5*self.MGOI) + # np.testing.assert_equal(nx.incidence_matrix(WMG,weight='other',oriented=True).todense(), + # 0.3*self.MGOI) + + WMG = nx.MultiGraph(self.WG) + WMG.add_edge(0, 1, weight=0.5, other=0.3) + + I = nx.incidence_matrix( + WMG, + nodelist=sorted(WMG), + edgelist=sorted(WMG.edges(keys=True)), + oriented=True, + weight="weight", + ).todense() + np.testing.assert_equal(I, 0.5 * self.MGOI) + + I = nx.incidence_matrix( + WMG, + nodelist=sorted(WMG), + edgelist=sorted(WMG.edges(keys=True)), + oriented=False, + weight="weight", + ).todense() + np.testing.assert_equal(I, np.abs(0.5 * self.MGOI)) + + I = nx.incidence_matrix( + WMG, + nodelist=sorted(WMG), + edgelist=sorted(WMG.edges(keys=True)), + oriented=True, + weight="other", + ).todense() + np.testing.assert_equal(I, 0.3 * self.MGOI) + + def test_adjacency_matrix(self): + "Conversion to adjacency matrix" + np.testing.assert_equal(nx.adjacency_matrix(self.G).todense(), self.A) + np.testing.assert_equal(nx.adjacency_matrix(self.MG).todense(), self.A) + np.testing.assert_equal(nx.adjacency_matrix(self.MG2).todense(), self.MG2A) + np.testing.assert_equal( + nx.adjacency_matrix(self.G, nodelist=[0, 1]).todense(), self.A[:2, :2] + ) + np.testing.assert_equal(nx.adjacency_matrix(self.WG).todense(), self.WA) + np.testing.assert_equal( + nx.adjacency_matrix(self.WG, weight=None).todense(), self.A + ) + np.testing.assert_equal( + nx.adjacency_matrix(self.MG2, weight=None).todense(), self.MG2A + ) + np.testing.assert_equal( + nx.adjacency_matrix(self.WG, weight="other").todense(), 0.6 * self.WA + ) + np.testing.assert_equal( + nx.adjacency_matrix(self.no_edges_G, nodelist=[1, 3]).todense(), + self.no_edges_A, + ) diff --git a/.venv/lib/python3.12/site-packages/networkx/linalg/tests/test_laplacian.py b/.venv/lib/python3.12/site-packages/networkx/linalg/tests/test_laplacian.py new file mode 100644 index 0000000..b1d5c13 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/linalg/tests/test_laplacian.py @@ -0,0 +1,334 @@ +import pytest + +import networkx as nx + +np = pytest.importorskip("numpy") +pytest.importorskip("scipy") + + +class TestLaplacian: + @classmethod + def setup_class(cls): + deg = [3, 2, 2, 1, 0] + cls.G = nx.havel_hakimi_graph(deg) + cls.WG = nx.Graph( + (u, v, {"weight": 0.5, "other": 0.3}) for (u, v) in cls.G.edges() + ) + cls.WG.add_node(4) + cls.MG = nx.MultiGraph(cls.G) + + # Graph with clsloops + cls.Gsl = cls.G.copy() + for node in cls.Gsl.nodes(): + cls.Gsl.add_edge(node, node) + + # Graph used as an example in Sec. 4.1 of Langville and Meyer, + # "Google's PageRank and Beyond". + cls.DiG = nx.DiGraph() + cls.DiG.add_edges_from( + ( + (1, 2), + (1, 3), + (3, 1), + (3, 2), + (3, 5), + (4, 5), + (4, 6), + (5, 4), + (5, 6), + (6, 4), + ) + ) + cls.DiMG = nx.MultiDiGraph(cls.DiG) + cls.DiWG = nx.DiGraph( + (u, v, {"weight": 0.5, "other": 0.3}) for (u, v) in cls.DiG.edges() + ) + cls.DiGsl = cls.DiG.copy() + for node in cls.DiGsl.nodes(): + cls.DiGsl.add_edge(node, node) + + def test_laplacian(self): + "Graph Laplacian" + # fmt: off + NL = np.array([[ 3, -1, -1, -1, 0], + [-1, 2, -1, 0, 0], + [-1, -1, 2, 0, 0], + [-1, 0, 0, 1, 0], + [ 0, 0, 0, 0, 0]]) + # fmt: on + WL = 0.5 * NL + OL = 0.3 * NL + # fmt: off + DiNL = np.array([[ 2, -1, -1, 0, 0, 0], + [ 0, 0, 0, 0, 0, 0], + [-1, -1, 3, -1, 0, 0], + [ 0, 0, 0, 2, -1, -1], + [ 0, 0, 0, -1, 2, -1], + [ 0, 0, 0, 0, -1, 1]]) + # fmt: on + DiWL = 0.5 * DiNL + DiOL = 0.3 * DiNL + np.testing.assert_equal(nx.laplacian_matrix(self.G).todense(), NL) + np.testing.assert_equal(nx.laplacian_matrix(self.MG).todense(), NL) + np.testing.assert_equal( + nx.laplacian_matrix(self.G, nodelist=[0, 1]).todense(), + np.array([[1, -1], [-1, 1]]), + ) + np.testing.assert_equal(nx.laplacian_matrix(self.WG).todense(), WL) + np.testing.assert_equal(nx.laplacian_matrix(self.WG, weight=None).todense(), NL) + np.testing.assert_equal( + nx.laplacian_matrix(self.WG, weight="other").todense(), OL + ) + + np.testing.assert_equal(nx.laplacian_matrix(self.DiG).todense(), DiNL) + np.testing.assert_equal(nx.laplacian_matrix(self.DiMG).todense(), DiNL) + np.testing.assert_equal( + nx.laplacian_matrix(self.DiG, nodelist=[1, 2]).todense(), + np.array([[1, -1], [0, 0]]), + ) + np.testing.assert_equal(nx.laplacian_matrix(self.DiWG).todense(), DiWL) + np.testing.assert_equal( + nx.laplacian_matrix(self.DiWG, weight=None).todense(), DiNL + ) + np.testing.assert_equal( + nx.laplacian_matrix(self.DiWG, weight="other").todense(), DiOL + ) + + def test_normalized_laplacian(self): + "Generalized Graph Laplacian" + # fmt: off + G = np.array([[ 1. , -0.408, -0.408, -0.577, 0.], + [-0.408, 1. , -0.5 , 0. , 0.], + [-0.408, -0.5 , 1. , 0. , 0.], + [-0.577, 0. , 0. , 1. , 0.], + [ 0. , 0. , 0. , 0. , 0.]]) + GL = np.array([[ 1. , -0.408, -0.408, -0.577, 0. ], + [-0.408, 1. , -0.5 , 0. , 0. ], + [-0.408, -0.5 , 1. , 0. , 0. ], + [-0.577, 0. , 0. , 1. , 0. ], + [ 0. , 0. , 0. , 0. , 0. ]]) + Lsl = np.array([[ 0.75 , -0.2887, -0.2887, -0.3536, 0. ], + [-0.2887, 0.6667, -0.3333, 0. , 0. ], + [-0.2887, -0.3333, 0.6667, 0. , 0. ], + [-0.3536, 0. , 0. , 0.5 , 0. ], + [ 0. , 0. , 0. , 0. , 0. ]]) + + DiG = np.array([[ 1. , 0. , -0.4082, 0. , 0. , 0. ], + [ 0. , 0. , 0. , 0. , 0. , 0. ], + [-0.4082, 0. , 1. , 0. , -0.4082, 0. ], + [ 0. , 0. , 0. , 1. , -0.5 , -0.7071], + [ 0. , 0. , 0. , -0.5 , 1. , -0.7071], + [ 0. , 0. , 0. , -0.7071, 0. , 1. ]]) + DiGL = np.array([[ 1. , 0. , -0.4082, 0. , 0. , 0. ], + [ 0. , 0. , 0. , 0. , 0. , 0. ], + [-0.4082, 0. , 1. , -0.4082, 0. , 0. ], + [ 0. , 0. , 0. , 1. , -0.5 , -0.7071], + [ 0. , 0. , 0. , -0.5 , 1. , -0.7071], + [ 0. , 0. , 0. , 0. , -0.7071, 1. ]]) + DiLsl = np.array([[ 0.6667, -0.5774, -0.2887, 0. , 0. , 0. ], + [ 0. , 0. , 0. , 0. , 0. , 0. ], + [-0.2887, -0.5 , 0.75 , -0.2887, 0. , 0. ], + [ 0. , 0. , 0. , 0.6667, -0.3333, -0.4082], + [ 0. , 0. , 0. , -0.3333, 0.6667, -0.4082], + [ 0. , 0. , 0. , 0. , -0.4082, 0.5 ]]) + # fmt: on + + np.testing.assert_almost_equal( + nx.normalized_laplacian_matrix(self.G, nodelist=range(5)).todense(), + G, + decimal=3, + ) + np.testing.assert_almost_equal( + nx.normalized_laplacian_matrix(self.G).todense(), GL, decimal=3 + ) + np.testing.assert_almost_equal( + nx.normalized_laplacian_matrix(self.MG).todense(), GL, decimal=3 + ) + np.testing.assert_almost_equal( + nx.normalized_laplacian_matrix(self.WG).todense(), GL, decimal=3 + ) + np.testing.assert_almost_equal( + nx.normalized_laplacian_matrix(self.WG, weight="other").todense(), + GL, + decimal=3, + ) + np.testing.assert_almost_equal( + nx.normalized_laplacian_matrix(self.Gsl).todense(), Lsl, decimal=3 + ) + + np.testing.assert_almost_equal( + nx.normalized_laplacian_matrix( + self.DiG, + nodelist=range(1, 1 + 6), + ).todense(), + DiG, + decimal=3, + ) + np.testing.assert_almost_equal( + nx.normalized_laplacian_matrix(self.DiG).todense(), DiGL, decimal=3 + ) + np.testing.assert_almost_equal( + nx.normalized_laplacian_matrix(self.DiMG).todense(), DiGL, decimal=3 + ) + np.testing.assert_almost_equal( + nx.normalized_laplacian_matrix(self.DiWG).todense(), DiGL, decimal=3 + ) + np.testing.assert_almost_equal( + nx.normalized_laplacian_matrix(self.DiWG, weight="other").todense(), + DiGL, + decimal=3, + ) + np.testing.assert_almost_equal( + nx.normalized_laplacian_matrix(self.DiGsl).todense(), DiLsl, decimal=3 + ) + + +def test_directed_laplacian(): + "Directed Laplacian" + # Graph used as an example in Sec. 4.1 of Langville and Meyer, + # "Google's PageRank and Beyond". The graph contains dangling nodes, so + # the pagerank random walk is selected by directed_laplacian + G = nx.DiGraph() + G.add_edges_from( + ( + (1, 2), + (1, 3), + (3, 1), + (3, 2), + (3, 5), + (4, 5), + (4, 6), + (5, 4), + (5, 6), + (6, 4), + ) + ) + # fmt: off + GL = np.array([[ 0.9833, -0.2941, -0.3882, -0.0291, -0.0231, -0.0261], + [-0.2941, 0.8333, -0.2339, -0.0536, -0.0589, -0.0554], + [-0.3882, -0.2339, 0.9833, -0.0278, -0.0896, -0.0251], + [-0.0291, -0.0536, -0.0278, 0.9833, -0.4878, -0.6675], + [-0.0231, -0.0589, -0.0896, -0.4878, 0.9833, -0.2078], + [-0.0261, -0.0554, -0.0251, -0.6675, -0.2078, 0.9833]]) + # fmt: on + L = nx.directed_laplacian_matrix(G, alpha=0.9, nodelist=sorted(G)) + np.testing.assert_almost_equal(L, GL, decimal=3) + + # Make the graph strongly connected, so we can use a random and lazy walk + G.add_edges_from(((2, 5), (6, 1))) + # fmt: off + GL = np.array([[ 1. , -0.3062, -0.4714, 0. , 0. , -0.3227], + [-0.3062, 1. , -0.1443, 0. , -0.3162, 0. ], + [-0.4714, -0.1443, 1. , 0. , -0.0913, 0. ], + [ 0. , 0. , 0. , 1. , -0.5 , -0.5 ], + [ 0. , -0.3162, -0.0913, -0.5 , 1. , -0.25 ], + [-0.3227, 0. , 0. , -0.5 , -0.25 , 1. ]]) + # fmt: on + L = nx.directed_laplacian_matrix( + G, alpha=0.9, nodelist=sorted(G), walk_type="random" + ) + np.testing.assert_almost_equal(L, GL, decimal=3) + + # fmt: off + GL = np.array([[ 0.5 , -0.1531, -0.2357, 0. , 0. , -0.1614], + [-0.1531, 0.5 , -0.0722, 0. , -0.1581, 0. ], + [-0.2357, -0.0722, 0.5 , 0. , -0.0456, 0. ], + [ 0. , 0. , 0. , 0.5 , -0.25 , -0.25 ], + [ 0. , -0.1581, -0.0456, -0.25 , 0.5 , -0.125 ], + [-0.1614, 0. , 0. , -0.25 , -0.125 , 0.5 ]]) + # fmt: on + L = nx.directed_laplacian_matrix(G, alpha=0.9, nodelist=sorted(G), walk_type="lazy") + np.testing.assert_almost_equal(L, GL, decimal=3) + + # Make a strongly connected periodic graph + G = nx.DiGraph() + G.add_edges_from(((1, 2), (2, 4), (4, 1), (1, 3), (3, 4))) + # fmt: off + GL = np.array([[ 0.5 , -0.176, -0.176, -0.25 ], + [-0.176, 0.5 , 0. , -0.176], + [-0.176, 0. , 0.5 , -0.176], + [-0.25 , -0.176, -0.176, 0.5 ]]) + # fmt: on + L = nx.directed_laplacian_matrix(G, alpha=0.9, nodelist=sorted(G)) + np.testing.assert_almost_equal(L, GL, decimal=3) + + +def test_directed_combinatorial_laplacian(): + "Directed combinatorial Laplacian" + # Graph used as an example in Sec. 4.1 of Langville and Meyer, + # "Google's PageRank and Beyond". The graph contains dangling nodes, so + # the pagerank random walk is selected by directed_laplacian + G = nx.DiGraph() + G.add_edges_from( + ( + (1, 2), + (1, 3), + (3, 1), + (3, 2), + (3, 5), + (4, 5), + (4, 6), + (5, 4), + (5, 6), + (6, 4), + ) + ) + # fmt: off + GL = np.array([[ 0.0366, -0.0132, -0.0153, -0.0034, -0.0020, -0.0027], + [-0.0132, 0.0450, -0.0111, -0.0076, -0.0062, -0.0069], + [-0.0153, -0.0111, 0.0408, -0.0035, -0.0083, -0.0027], + [-0.0034, -0.0076, -0.0035, 0.3688, -0.1356, -0.2187], + [-0.0020, -0.0062, -0.0083, -0.1356, 0.2026, -0.0505], + [-0.0027, -0.0069, -0.0027, -0.2187, -0.0505, 0.2815]]) + # fmt: on + + L = nx.directed_combinatorial_laplacian_matrix(G, alpha=0.9, nodelist=sorted(G)) + np.testing.assert_almost_equal(L, GL, decimal=3) + + # Make the graph strongly connected, so we can use a random and lazy walk + G.add_edges_from(((2, 5), (6, 1))) + + # fmt: off + GL = np.array([[ 0.1395, -0.0349, -0.0465, 0. , 0. , -0.0581], + [-0.0349, 0.093 , -0.0116, 0. , -0.0465, 0. ], + [-0.0465, -0.0116, 0.0698, 0. , -0.0116, 0. ], + [ 0. , 0. , 0. , 0.2326, -0.1163, -0.1163], + [ 0. , -0.0465, -0.0116, -0.1163, 0.2326, -0.0581], + [-0.0581, 0. , 0. , -0.1163, -0.0581, 0.2326]]) + # fmt: on + + L = nx.directed_combinatorial_laplacian_matrix( + G, alpha=0.9, nodelist=sorted(G), walk_type="random" + ) + np.testing.assert_almost_equal(L, GL, decimal=3) + + # fmt: off + GL = np.array([[ 0.0698, -0.0174, -0.0233, 0. , 0. , -0.0291], + [-0.0174, 0.0465, -0.0058, 0. , -0.0233, 0. ], + [-0.0233, -0.0058, 0.0349, 0. , -0.0058, 0. ], + [ 0. , 0. , 0. , 0.1163, -0.0581, -0.0581], + [ 0. , -0.0233, -0.0058, -0.0581, 0.1163, -0.0291], + [-0.0291, 0. , 0. , -0.0581, -0.0291, 0.1163]]) + # fmt: on + + L = nx.directed_combinatorial_laplacian_matrix( + G, alpha=0.9, nodelist=sorted(G), walk_type="lazy" + ) + np.testing.assert_almost_equal(L, GL, decimal=3) + + E = nx.DiGraph(nx.margulis_gabber_galil_graph(2)) + L = nx.directed_combinatorial_laplacian_matrix(E) + # fmt: off + expected = np.array( + [[ 0.16666667, -0.08333333, -0.08333333, 0. ], + [-0.08333333, 0.16666667, 0. , -0.08333333], + [-0.08333333, 0. , 0.16666667, -0.08333333], + [ 0. , -0.08333333, -0.08333333, 0.16666667]] + ) + # fmt: on + np.testing.assert_almost_equal(L, expected, decimal=6) + + with pytest.raises(nx.NetworkXError): + nx.directed_combinatorial_laplacian_matrix(G, walk_type="pagerank", alpha=100) + with pytest.raises(nx.NetworkXError): + nx.directed_combinatorial_laplacian_matrix(G, walk_type="silly") diff --git a/.venv/lib/python3.12/site-packages/networkx/linalg/tests/test_modularity.py b/.venv/lib/python3.12/site-packages/networkx/linalg/tests/test_modularity.py new file mode 100644 index 0000000..48dda00 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/linalg/tests/test_modularity.py @@ -0,0 +1,86 @@ +import pytest + +import networkx as nx + +np = pytest.importorskip("numpy") +pytest.importorskip("scipy") + + +class TestModularity: + @classmethod + def setup_class(cls): + deg = [3, 2, 2, 1, 0] + cls.G = nx.havel_hakimi_graph(deg) + # Graph used as an example in Sec. 4.1 of Langville and Meyer, + # "Google's PageRank and Beyond". (Used for test_directed_laplacian) + cls.DG = nx.DiGraph() + cls.DG.add_edges_from( + ( + (1, 2), + (1, 3), + (3, 1), + (3, 2), + (3, 5), + (4, 5), + (4, 6), + (5, 4), + (5, 6), + (6, 4), + ) + ) + + def test_modularity(self): + "Modularity matrix" + # fmt: off + B = np.array([[-1.125, 0.25, 0.25, 0.625, 0.], + [0.25, -0.5, 0.5, -0.25, 0.], + [0.25, 0.5, -0.5, -0.25, 0.], + [0.625, -0.25, -0.25, -0.125, 0.], + [0., 0., 0., 0., 0.]]) + # fmt: on + + permutation = [4, 0, 1, 2, 3] + np.testing.assert_equal(nx.modularity_matrix(self.G), B) + np.testing.assert_equal( + nx.modularity_matrix(self.G, nodelist=permutation), + B[np.ix_(permutation, permutation)], + ) + + def test_modularity_weight(self): + "Modularity matrix with weights" + # fmt: off + B = np.array([[-1.125, 0.25, 0.25, 0.625, 0.], + [0.25, -0.5, 0.5, -0.25, 0.], + [0.25, 0.5, -0.5, -0.25, 0.], + [0.625, -0.25, -0.25, -0.125, 0.], + [0., 0., 0., 0., 0.]]) + # fmt: on + + G_weighted = self.G.copy() + for n1, n2 in G_weighted.edges(): + G_weighted.edges[n1, n2]["weight"] = 0.5 + # The following test would fail in networkx 1.1 + np.testing.assert_equal(nx.modularity_matrix(G_weighted), B) + # The following test that the modularity matrix get rescaled accordingly + np.testing.assert_equal( + nx.modularity_matrix(G_weighted, weight="weight"), 0.5 * B + ) + + def test_directed_modularity(self): + "Directed Modularity matrix" + # fmt: off + B = np.array([[-0.2, 0.6, 0.8, -0.4, -0.4, -0.4], + [0., 0., 0., 0., 0., 0.], + [0.7, 0.4, -0.3, -0.6, 0.4, -0.6], + [-0.2, -0.4, -0.2, -0.4, 0.6, 0.6], + [-0.2, -0.4, -0.2, 0.6, -0.4, 0.6], + [-0.1, -0.2, -0.1, 0.8, -0.2, -0.2]]) + # fmt: on + node_permutation = [5, 1, 2, 3, 4, 6] + idx_permutation = [4, 0, 1, 2, 3, 5] + mm = nx.directed_modularity_matrix(self.DG, nodelist=sorted(self.DG)) + np.testing.assert_equal(mm, B) + np.testing.assert_equal( + nx.directed_modularity_matrix(self.DG, nodelist=node_permutation), + B[np.ix_(idx_permutation, idx_permutation)], + ) diff --git a/.venv/lib/python3.12/site-packages/networkx/linalg/tests/test_spectrum.py b/.venv/lib/python3.12/site-packages/networkx/linalg/tests/test_spectrum.py new file mode 100644 index 0000000..01009f0 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/linalg/tests/test_spectrum.py @@ -0,0 +1,70 @@ +import pytest + +import networkx as nx + +np = pytest.importorskip("numpy") +pytest.importorskip("scipy") + + +class TestSpectrum: + @classmethod + def setup_class(cls): + deg = [3, 2, 2, 1, 0] + cls.G = nx.havel_hakimi_graph(deg) + cls.P = nx.path_graph(3) + cls.WG = nx.Graph( + (u, v, {"weight": 0.5, "other": 0.3}) for (u, v) in cls.G.edges() + ) + cls.WG.add_node(4) + cls.DG = nx.DiGraph() + nx.add_path(cls.DG, [0, 1, 2]) + + def test_laplacian_spectrum(self): + "Laplacian eigenvalues" + evals = np.array([0, 0, 1, 3, 4]) + e = sorted(nx.laplacian_spectrum(self.G)) + np.testing.assert_almost_equal(e, evals) + e = sorted(nx.laplacian_spectrum(self.WG, weight=None)) + np.testing.assert_almost_equal(e, evals) + e = sorted(nx.laplacian_spectrum(self.WG)) + np.testing.assert_almost_equal(e, 0.5 * evals) + e = sorted(nx.laplacian_spectrum(self.WG, weight="other")) + np.testing.assert_almost_equal(e, 0.3 * evals) + + def test_normalized_laplacian_spectrum(self): + "Normalized Laplacian eigenvalues" + evals = np.array([0, 0, 0.7712864461218, 1.5, 1.7287135538781]) + e = sorted(nx.normalized_laplacian_spectrum(self.G)) + np.testing.assert_almost_equal(e, evals) + e = sorted(nx.normalized_laplacian_spectrum(self.WG, weight=None)) + np.testing.assert_almost_equal(e, evals) + e = sorted(nx.normalized_laplacian_spectrum(self.WG)) + np.testing.assert_almost_equal(e, evals) + e = sorted(nx.normalized_laplacian_spectrum(self.WG, weight="other")) + np.testing.assert_almost_equal(e, evals) + + def test_adjacency_spectrum(self): + "Adjacency eigenvalues" + evals = np.array([-np.sqrt(2), 0, np.sqrt(2)]) + e = sorted(nx.adjacency_spectrum(self.P)) + np.testing.assert_almost_equal(e, evals) + + def test_modularity_spectrum(self): + "Modularity eigenvalues" + evals = np.array([-1.5, 0.0, 0.0]) + e = sorted(nx.modularity_spectrum(self.P)) + np.testing.assert_almost_equal(e, evals) + # Directed modularity eigenvalues + evals = np.array([-0.5, 0.0, 0.0]) + e = sorted(nx.modularity_spectrum(self.DG)) + np.testing.assert_almost_equal(e, evals) + + def test_bethe_hessian_spectrum(self): + "Bethe Hessian eigenvalues" + evals = np.array([0.5 * (9 - np.sqrt(33)), 4, 0.5 * (9 + np.sqrt(33))]) + e = sorted(nx.bethe_hessian_spectrum(self.P, r=2)) + np.testing.assert_almost_equal(e, evals) + # Collapses back to Laplacian: + e1 = sorted(nx.bethe_hessian_spectrum(self.P, r=1)) + e2 = sorted(nx.laplacian_spectrum(self.P)) + np.testing.assert_almost_equal(e1, e2) diff --git a/.venv/lib/python3.12/site-packages/networkx/readwrite/__init__.py b/.venv/lib/python3.12/site-packages/networkx/readwrite/__init__.py new file mode 100644 index 0000000..a805c50 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/readwrite/__init__.py @@ -0,0 +1,17 @@ +""" +A package for reading and writing graphs in various formats. + +""" + +from networkx.readwrite.adjlist import * +from networkx.readwrite.multiline_adjlist import * +from networkx.readwrite.edgelist import * +from networkx.readwrite.pajek import * +from networkx.readwrite.leda import * +from networkx.readwrite.sparse6 import * +from networkx.readwrite.graph6 import * +from networkx.readwrite.gml import * +from networkx.readwrite.graphml import * +from networkx.readwrite.gexf import * +from networkx.readwrite.json_graph import * +from networkx.readwrite.text import * diff --git a/.venv/lib/python3.12/site-packages/networkx/readwrite/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/readwrite/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..777c059 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/readwrite/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/readwrite/__pycache__/adjlist.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/readwrite/__pycache__/adjlist.cpython-312.pyc new file mode 100644 index 0000000..304dd74 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/readwrite/__pycache__/adjlist.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/readwrite/__pycache__/edgelist.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/readwrite/__pycache__/edgelist.cpython-312.pyc new file mode 100644 index 0000000..7071468 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/readwrite/__pycache__/edgelist.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/readwrite/__pycache__/gexf.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/readwrite/__pycache__/gexf.cpython-312.pyc new file mode 100644 index 0000000..2420818 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/readwrite/__pycache__/gexf.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/readwrite/__pycache__/gml.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/readwrite/__pycache__/gml.cpython-312.pyc new file mode 100644 index 0000000..8276e9c Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/readwrite/__pycache__/gml.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/readwrite/__pycache__/graph6.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/readwrite/__pycache__/graph6.cpython-312.pyc new file mode 100644 index 0000000..7b7bb0e Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/readwrite/__pycache__/graph6.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/readwrite/__pycache__/graphml.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/readwrite/__pycache__/graphml.cpython-312.pyc new file mode 100644 index 0000000..3687715 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/readwrite/__pycache__/graphml.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/readwrite/__pycache__/leda.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/readwrite/__pycache__/leda.cpython-312.pyc new file mode 100644 index 0000000..06dd7d3 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/readwrite/__pycache__/leda.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/readwrite/__pycache__/multiline_adjlist.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/readwrite/__pycache__/multiline_adjlist.cpython-312.pyc new file mode 100644 index 0000000..63f5679 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/readwrite/__pycache__/multiline_adjlist.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/readwrite/__pycache__/p2g.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/readwrite/__pycache__/p2g.cpython-312.pyc new file mode 100644 index 0000000..653ea13 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/readwrite/__pycache__/p2g.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/readwrite/__pycache__/pajek.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/readwrite/__pycache__/pajek.cpython-312.pyc new file mode 100644 index 0000000..8d66d54 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/readwrite/__pycache__/pajek.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/readwrite/__pycache__/sparse6.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/readwrite/__pycache__/sparse6.cpython-312.pyc new file mode 100644 index 0000000..1c2a810 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/readwrite/__pycache__/sparse6.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/readwrite/__pycache__/text.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/readwrite/__pycache__/text.cpython-312.pyc new file mode 100644 index 0000000..664baf3 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/readwrite/__pycache__/text.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/readwrite/adjlist.py b/.venv/lib/python3.12/site-packages/networkx/readwrite/adjlist.py new file mode 100644 index 0000000..d759939 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/readwrite/adjlist.py @@ -0,0 +1,310 @@ +""" +************** +Adjacency List +************** +Read and write NetworkX graphs as adjacency lists. + +Adjacency list format is useful for graphs without data associated +with nodes or edges and for nodes that can be meaningfully represented +as strings. + +Format +------ +The adjacency list format consists of lines with node labels. The +first label in a line is the source node. Further labels in the line +are considered target nodes and are added to the graph along with an edge +between the source node and target node. + +The graph with edges a-b, a-c, d-e can be represented as the following +adjacency list (anything following the # in a line is a comment):: + + a b c # source target target + d e +""" + +__all__ = ["generate_adjlist", "write_adjlist", "parse_adjlist", "read_adjlist"] + +import networkx as nx +from networkx.utils import open_file + + +def generate_adjlist(G, delimiter=" "): + """Generate a single line of the graph G in adjacency list format. + + Parameters + ---------- + G : NetworkX graph + + delimiter : string, optional + Separator for node labels + + Returns + ------- + lines : string + Lines of data in adjlist format. + + Examples + -------- + >>> G = nx.lollipop_graph(4, 3) + >>> for line in nx.generate_adjlist(G): + ... print(line) + 0 1 2 3 + 1 2 3 + 2 3 + 3 4 + 4 5 + 5 6 + 6 + + See Also + -------- + write_adjlist, read_adjlist + + Notes + ----- + The default `delimiter=" "` will result in unexpected results if node names contain + whitespace characters. To avoid this problem, specify an alternate delimiter when spaces are + valid in node names. + + NB: This option is not available for data that isn't user-generated. + + """ + directed = G.is_directed() + seen = set() + for s, nbrs in G.adjacency(): + line = str(s) + delimiter + for t, data in nbrs.items(): + if not directed and t in seen: + continue + if G.is_multigraph(): + for d in data.values(): + line += str(t) + delimiter + else: + line += str(t) + delimiter + if not directed: + seen.add(s) + yield line[: -len(delimiter)] + + +@open_file(1, mode="wb") +def write_adjlist(G, path, comments="#", delimiter=" ", encoding="utf-8"): + """Write graph G in single-line adjacency-list format to path. + + + Parameters + ---------- + G : NetworkX graph + + path : string or file + Filename or file handle for data output. + Filenames ending in .gz or .bz2 will be compressed. + + comments : string, optional + Marker for comment lines + + delimiter : string, optional + Separator for node labels + + encoding : string, optional + Text encoding. + + Examples + -------- + >>> G = nx.path_graph(4) + >>> nx.write_adjlist(G, "path4.adjlist") + + The path can be a filehandle or a string with the name of the file. If a + filehandle is provided, it has to be opened in 'wb' mode. + + >>> fh = open("path4.adjlist2", "wb") + >>> nx.write_adjlist(G, fh) + + Notes + ----- + The default `delimiter=" "` will result in unexpected results if node names contain + whitespace characters. To avoid this problem, specify an alternate delimiter when spaces are + valid in node names. + NB: This option is not available for data that isn't user-generated. + + This format does not store graph, node, or edge data. + + See Also + -------- + read_adjlist, generate_adjlist + """ + import sys + import time + + pargs = comments + " ".join(sys.argv) + "\n" + header = ( + pargs + + comments + + f" GMT {time.asctime(time.gmtime())}\n" + + comments + + f" {G.name}\n" + ) + path.write(header.encode(encoding)) + + for line in generate_adjlist(G, delimiter): + line += "\n" + path.write(line.encode(encoding)) + + +@nx._dispatchable(graphs=None, returns_graph=True) +def parse_adjlist( + lines, comments="#", delimiter=None, create_using=None, nodetype=None +): + """Parse lines of a graph adjacency list representation. + + Parameters + ---------- + lines : list or iterator of strings + Input data in adjlist format + + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + nodetype : Python type, optional + Convert nodes to this type. + + comments : string, optional + Marker for comment lines + + delimiter : string, optional + Separator for node labels. The default is whitespace. + + Returns + ------- + G: NetworkX graph + The graph corresponding to the lines in adjacency list format. + + Examples + -------- + >>> lines = ["1 2 5", "2 3 4", "3 5", "4", "5"] + >>> G = nx.parse_adjlist(lines, nodetype=int) + >>> nodes = [1, 2, 3, 4, 5] + >>> all(node in G for node in nodes) + True + >>> edges = [(1, 2), (1, 5), (2, 3), (2, 4), (3, 5)] + >>> all((u, v) in G.edges() or (v, u) in G.edges() for (u, v) in edges) + True + + See Also + -------- + read_adjlist + + """ + G = nx.empty_graph(0, create_using) + for line in lines: + p = line.find(comments) + if p >= 0: + line = line[:p] + if not len(line): + continue + vlist = line.rstrip("\n").split(delimiter) + u = vlist.pop(0) + # convert types + if nodetype is not None: + try: + u = nodetype(u) + except BaseException as err: + raise TypeError( + f"Failed to convert node ({u}) to type {nodetype}" + ) from err + G.add_node(u) + if nodetype is not None: + try: + vlist = list(map(nodetype, vlist)) + except BaseException as err: + raise TypeError( + f"Failed to convert nodes ({','.join(vlist)}) to type {nodetype}" + ) from err + G.add_edges_from([(u, v) for v in vlist]) + return G + + +@open_file(0, mode="rb") +@nx._dispatchable(graphs=None, returns_graph=True) +def read_adjlist( + path, + comments="#", + delimiter=None, + create_using=None, + nodetype=None, + encoding="utf-8", +): + """Read graph in adjacency list format from path. + + Parameters + ---------- + path : string or file + Filename or file handle to read. + Filenames ending in .gz or .bz2 will be decompressed. + + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + nodetype : Python type, optional + Convert nodes to this type. + + comments : string, optional + Marker for comment lines + + delimiter : string, optional + Separator for node labels. The default is whitespace. + + Returns + ------- + G: NetworkX graph + The graph corresponding to the lines in adjacency list format. + + Examples + -------- + >>> G = nx.path_graph(4) + >>> nx.write_adjlist(G, "test.adjlist") + >>> G = nx.read_adjlist("test.adjlist") + + The path can be a filehandle or a string with the name of the file. If a + filehandle is provided, it has to be opened in 'rb' mode. + + >>> fh = open("test.adjlist", "rb") + >>> G = nx.read_adjlist(fh) + + Filenames ending in .gz or .bz2 will be compressed. + + >>> nx.write_adjlist(G, "test.adjlist.gz") + >>> G = nx.read_adjlist("test.adjlist.gz") + + The optional nodetype is a function to convert node strings to nodetype. + + For example + + >>> G = nx.read_adjlist("test.adjlist", nodetype=int) + + will attempt to convert all nodes to integer type. + + Since nodes must be hashable, the function nodetype must return hashable + types (e.g. int, float, str, frozenset - or tuples of those, etc.) + + The optional create_using parameter indicates the type of NetworkX graph + created. The default is `nx.Graph`, an undirected graph. + To read the data as a directed graph use + + >>> G = nx.read_adjlist("test.adjlist", create_using=nx.DiGraph) + + Notes + ----- + This format does not store graph or node data. + + See Also + -------- + write_adjlist + """ + lines = (line.decode(encoding) for line in path) + return parse_adjlist( + lines, + comments=comments, + delimiter=delimiter, + create_using=create_using, + nodetype=nodetype, + ) diff --git a/.venv/lib/python3.12/site-packages/networkx/readwrite/edgelist.py b/.venv/lib/python3.12/site-packages/networkx/readwrite/edgelist.py new file mode 100644 index 0000000..afdb175 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/readwrite/edgelist.py @@ -0,0 +1,489 @@ +""" +********** +Edge Lists +********** +Read and write NetworkX graphs as edge lists. + +The multi-line adjacency list format is useful for graphs with nodes +that can be meaningfully represented as strings. With the edgelist +format simple edge data can be stored but node or graph data is not. +There is no way of representing isolated nodes unless the node has a +self-loop edge. + +Format +------ +You can read or write three formats of edge lists with these functions. + +Node pairs with no data:: + + 1 2 + +Python dictionary as data:: + + 1 2 {'weight':7, 'color':'green'} + +Arbitrary data:: + + 1 2 7 green +""" + +__all__ = [ + "generate_edgelist", + "write_edgelist", + "parse_edgelist", + "read_edgelist", + "read_weighted_edgelist", + "write_weighted_edgelist", +] + +import networkx as nx +from networkx.utils import open_file + + +def generate_edgelist(G, delimiter=" ", data=True): + """Generate a single line of the graph G in edge list format. + + Parameters + ---------- + G : NetworkX graph + + delimiter : string, optional + Separator for node labels + + data : bool or list of keys + If False generate no edge data. If True use a dictionary + representation of edge data. If a list of keys use a list of data + values corresponding to the keys. + + Returns + ------- + lines : string + Lines of data in adjlist format. + + Examples + -------- + >>> G = nx.lollipop_graph(4, 3) + >>> G[1][2]["weight"] = 3 + >>> G[3][4]["capacity"] = 12 + >>> for line in nx.generate_edgelist(G, data=False): + ... print(line) + 0 1 + 0 2 + 0 3 + 1 2 + 1 3 + 2 3 + 3 4 + 4 5 + 5 6 + + >>> for line in nx.generate_edgelist(G): + ... print(line) + 0 1 {} + 0 2 {} + 0 3 {} + 1 2 {'weight': 3} + 1 3 {} + 2 3 {} + 3 4 {'capacity': 12} + 4 5 {} + 5 6 {} + + >>> for line in nx.generate_edgelist(G, data=["weight"]): + ... print(line) + 0 1 + 0 2 + 0 3 + 1 2 3 + 1 3 + 2 3 + 3 4 + 4 5 + 5 6 + + See Also + -------- + write_adjlist, read_adjlist + """ + if data is True: + for u, v, d in G.edges(data=True): + e = u, v, dict(d) + yield delimiter.join(map(str, e)) + elif data is False: + for u, v in G.edges(data=False): + e = u, v + yield delimiter.join(map(str, e)) + else: + for u, v, d in G.edges(data=True): + e = [u, v] + try: + e.extend(d[k] for k in data) + except KeyError: + pass # missing data for this edge, should warn? + yield delimiter.join(map(str, e)) + + +@open_file(1, mode="wb") +def write_edgelist(G, path, comments="#", delimiter=" ", data=True, encoding="utf-8"): + """Write graph as a list of edges. + + Parameters + ---------- + G : graph + A NetworkX graph + path : file or string + File or filename to write. If a file is provided, it must be + opened in 'wb' mode. Filenames ending in .gz or .bz2 will be compressed. + comments : string, optional + The character used to indicate the start of a comment + delimiter : string, optional + The string used to separate values. The default is whitespace. + data : bool or list, optional + If False write no edge data. + If True write a string representation of the edge data dictionary.. + If a list (or other iterable) is provided, write the keys specified + in the list. + encoding: string, optional + Specify which encoding to use when writing file. + + Examples + -------- + >>> G = nx.path_graph(4) + >>> nx.write_edgelist(G, "test.edgelist") + >>> G = nx.path_graph(4) + >>> fh = open("test.edgelist", "wb") + >>> nx.write_edgelist(G, fh) + >>> nx.write_edgelist(G, "test.edgelist.gz") + >>> nx.write_edgelist(G, "test.edgelist_nodata.gz", data=False) + + >>> G = nx.Graph() + >>> G.add_edge(1, 2, weight=7, color="red") + >>> nx.write_edgelist(G, "test.edgelist_bigger_nodata", data=False) + >>> nx.write_edgelist(G, "test.edgelist_color", data=["color"]) + >>> nx.write_edgelist(G, "test.edgelist_color_weight", data=["color", "weight"]) + + See Also + -------- + read_edgelist + write_weighted_edgelist + """ + + for line in generate_edgelist(G, delimiter, data): + line += "\n" + path.write(line.encode(encoding)) + + +@nx._dispatchable(graphs=None, returns_graph=True) +def parse_edgelist( + lines, comments="#", delimiter=None, create_using=None, nodetype=None, data=True +): + """Parse lines of an edge list representation of a graph. + + Parameters + ---------- + lines : list or iterator of strings + Input data in edgelist format + comments : string, optional + Marker for comment lines. Default is `'#'`. To specify that no character + should be treated as a comment, use ``comments=None``. + delimiter : string, optional + Separator for node labels. Default is `None`, meaning any whitespace. + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + nodetype : Python type, optional + Convert nodes to this type. Default is `None`, meaning no conversion is + performed. + data : bool or list of (label,type) tuples + If `False` generate no edge data or if `True` use a dictionary + representation of edge data or a list tuples specifying dictionary + key names and types for edge data. + + Returns + ------- + G: NetworkX Graph + The graph corresponding to lines + + Examples + -------- + Edgelist with no data: + + >>> lines = ["1 2", "2 3", "3 4"] + >>> G = nx.parse_edgelist(lines, nodetype=int) + >>> list(G) + [1, 2, 3, 4] + >>> list(G.edges()) + [(1, 2), (2, 3), (3, 4)] + + Edgelist with data in Python dictionary representation: + + >>> lines = ["1 2 {'weight': 3}", "2 3 {'weight': 27}", "3 4 {'weight': 3.0}"] + >>> G = nx.parse_edgelist(lines, nodetype=int) + >>> list(G) + [1, 2, 3, 4] + >>> list(G.edges(data=True)) + [(1, 2, {'weight': 3}), (2, 3, {'weight': 27}), (3, 4, {'weight': 3.0})] + + Edgelist with data in a list: + + >>> lines = ["1 2 3", "2 3 27", "3 4 3.0"] + >>> G = nx.parse_edgelist(lines, nodetype=int, data=(("weight", float),)) + >>> list(G) + [1, 2, 3, 4] + >>> list(G.edges(data=True)) + [(1, 2, {'weight': 3.0}), (2, 3, {'weight': 27.0}), (3, 4, {'weight': 3.0})] + + See Also + -------- + read_weighted_edgelist + """ + from ast import literal_eval + + G = nx.empty_graph(0, create_using) + for line in lines: + if comments is not None: + p = line.find(comments) + if p >= 0: + line = line[:p] + if not line: + continue + # split line, should have 2 or more + s = line.rstrip("\n").split(delimiter) + if len(s) < 2: + continue + u = s.pop(0) + v = s.pop(0) + d = s + if nodetype is not None: + try: + u = nodetype(u) + v = nodetype(v) + except Exception as err: + raise TypeError( + f"Failed to convert nodes {u},{v} to type {nodetype}." + ) from err + + if len(d) == 0 or data is False: + # no data or data type specified + edgedata = {} + elif data is True: + # no edge types specified + try: # try to evaluate as dictionary + if delimiter == ",": + edgedata_str = ",".join(d) + else: + edgedata_str = " ".join(d) + edgedata = dict(literal_eval(edgedata_str.strip())) + except Exception as err: + raise TypeError( + f"Failed to convert edge data ({d}) to dictionary." + ) from err + else: + # convert edge data to dictionary with specified keys and type + if len(d) != len(data): + raise IndexError( + f"Edge data {d} and data_keys {data} are not the same length" + ) + edgedata = {} + for (edge_key, edge_type), edge_value in zip(data, d): + try: + edge_value = edge_type(edge_value) + except Exception as err: + raise TypeError( + f"Failed to convert {edge_key} data {edge_value} " + f"to type {edge_type}." + ) from err + edgedata.update({edge_key: edge_value}) + G.add_edge(u, v, **edgedata) + return G + + +@open_file(0, mode="rb") +@nx._dispatchable(graphs=None, returns_graph=True) +def read_edgelist( + path, + comments="#", + delimiter=None, + create_using=None, + nodetype=None, + data=True, + edgetype=None, + encoding="utf-8", +): + """Read a graph from a list of edges. + + Parameters + ---------- + path : file or string + File or filename to read. If a file is provided, it must be + opened in 'rb' mode. + Filenames ending in .gz or .bz2 will be decompressed. + comments : string, optional + The character used to indicate the start of a comment. To specify that + no character should be treated as a comment, use ``comments=None``. + delimiter : string, optional + The string used to separate values. The default is whitespace. + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + nodetype : int, float, str, Python type, optional + Convert node data from strings to specified type + data : bool or list of (label,type) tuples + Tuples specifying dictionary key names and types for edge data + edgetype : int, float, str, Python type, optional OBSOLETE + Convert edge data from strings to specified type and use as 'weight' + encoding: string, optional + Specify which encoding to use when reading file. + + Returns + ------- + G : graph + A networkx Graph or other type specified with create_using + + Examples + -------- + >>> nx.write_edgelist(nx.path_graph(4), "test.edgelist_P4") + >>> G = nx.read_edgelist("test.edgelist_P4") + + >>> fh = open("test.edgelist_P4", "rb") + >>> G = nx.read_edgelist(fh) + >>> fh.close() + + >>> G = nx.read_edgelist("test.edgelist_P4", nodetype=int) + >>> G = nx.read_edgelist("test.edgelist_P4", create_using=nx.DiGraph) + + Edgelist with data in a list: + + >>> textline = "1 2 3" + >>> fh = open("test.textline", "w") + >>> d = fh.write(textline) + >>> fh.close() + >>> G = nx.read_edgelist("test.textline", nodetype=int, data=(("weight", float),)) + >>> list(G) + [1, 2] + >>> list(G.edges(data=True)) + [(1, 2, {'weight': 3.0})] + + See parse_edgelist() for more examples of formatting. + + See Also + -------- + parse_edgelist + write_edgelist + + Notes + ----- + Since nodes must be hashable, the function nodetype must return hashable + types (e.g. int, float, str, frozenset - or tuples of those, etc.) + """ + lines = (line if isinstance(line, str) else line.decode(encoding) for line in path) + return parse_edgelist( + lines, + comments=comments, + delimiter=delimiter, + create_using=create_using, + nodetype=nodetype, + data=data, + ) + + +def write_weighted_edgelist(G, path, comments="#", delimiter=" ", encoding="utf-8"): + """Write graph G as a list of edges with numeric weights. + + Parameters + ---------- + G : graph + A NetworkX graph + path : file or string + File or filename to write. If a file is provided, it must be + opened in 'wb' mode. + Filenames ending in .gz or .bz2 will be compressed. + comments : string, optional + The character used to indicate the start of a comment + delimiter : string, optional + The string used to separate values. The default is whitespace. + encoding: string, optional + Specify which encoding to use when writing file. + + Examples + -------- + >>> G = nx.Graph() + >>> G.add_edge(1, 2, weight=7) + >>> nx.write_weighted_edgelist(G, "test.weighted.edgelist") + + See Also + -------- + read_edgelist + write_edgelist + read_weighted_edgelist + """ + write_edgelist( + G, + path, + comments=comments, + delimiter=delimiter, + data=("weight",), + encoding=encoding, + ) + + +@nx._dispatchable(graphs=None, returns_graph=True) +def read_weighted_edgelist( + path, + comments="#", + delimiter=None, + create_using=None, + nodetype=None, + encoding="utf-8", +): + """Read a graph as list of edges with numeric weights. + + Parameters + ---------- + path : file or string + File or filename to read. If a file is provided, it must be + opened in 'rb' mode. + Filenames ending in .gz or .bz2 will be decompressed. + comments : string, optional + The character used to indicate the start of a comment. + delimiter : string, optional + The string used to separate values. The default is whitespace. + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + nodetype : int, float, str, Python type, optional + Convert node data from strings to specified type + encoding: string, optional + Specify which encoding to use when reading file. + + Returns + ------- + G : graph + A networkx Graph or other type specified with create_using + + Notes + ----- + Since nodes must be hashable, the function nodetype must return hashable + types (e.g. int, float, str, frozenset - or tuples of those, etc.) + + Example edgelist file format. + + With numeric edge data:: + + # read with + # >>> G=nx.read_weighted_edgelist(fh) + # source target data + a b 1 + a c 3.14159 + d e 42 + + See Also + -------- + write_weighted_edgelist + """ + return read_edgelist( + path, + comments=comments, + delimiter=delimiter, + create_using=create_using, + nodetype=nodetype, + data=(("weight", float),), + encoding=encoding, + ) diff --git a/.venv/lib/python3.12/site-packages/networkx/readwrite/gexf.py b/.venv/lib/python3.12/site-packages/networkx/readwrite/gexf.py new file mode 100644 index 0000000..da726e3 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/readwrite/gexf.py @@ -0,0 +1,1064 @@ +"""Read and write graphs in GEXF format. + +.. warning:: + This parser uses the standard xml library present in Python, which is + insecure - see :external+python:mod:`xml` for additional information. + Only parse GEFX files you trust. + +GEXF (Graph Exchange XML Format) is a language for describing complex +network structures, their associated data and dynamics. + +This implementation does not support mixed graphs (directed and +undirected edges together). + +Format +------ +GEXF is an XML format. See http://gexf.net/schema.html for the +specification and http://gexf.net/basic.html for examples. +""" + +import itertools +import time +from xml.etree.ElementTree import ( + Element, + ElementTree, + SubElement, + register_namespace, + tostring, +) + +import networkx as nx +from networkx.utils import open_file + +__all__ = ["write_gexf", "read_gexf", "relabel_gexf_graph", "generate_gexf"] + + +@open_file(1, mode="wb") +def write_gexf(G, path, encoding="utf-8", prettyprint=True, version="1.2draft"): + """Write G in GEXF format to path. + + "GEXF (Graph Exchange XML Format) is a language for describing + complex networks structures, their associated data and dynamics" [1]_. + + Node attributes are checked according to the version of the GEXF + schemas used for parameters which are not user defined, + e.g. visualization 'viz' [2]_. See example for usage. + + Parameters + ---------- + G : graph + A NetworkX graph + path : file or string + File or file name to write. + File names ending in .gz or .bz2 will be compressed. + encoding : string (optional, default: 'utf-8') + Encoding for text data. + prettyprint : bool (optional, default: True) + If True use line breaks and indenting in output XML. + version: string (optional, default: '1.2draft') + The version of GEXF to be used for nodes attributes checking + + Examples + -------- + >>> G = nx.path_graph(4) + >>> nx.write_gexf(G, "test.gexf") + + # visualization data + >>> G.nodes[0]["viz"] = {"size": 54} + >>> G.nodes[0]["viz"]["position"] = {"x": 0, "y": 1} + >>> G.nodes[0]["viz"]["color"] = {"r": 0, "g": 0, "b": 256} + + + Notes + ----- + This implementation does not support mixed graphs (directed and undirected + edges together). + + The node id attribute is set to be the string of the node label. + If you want to specify an id use set it as node data, e.g. + node['a']['id']=1 to set the id of node 'a' to 1. + + References + ---------- + .. [1] GEXF File Format, http://gexf.net/ + .. [2] GEXF schema, http://gexf.net/schema.html + """ + writer = GEXFWriter(encoding=encoding, prettyprint=prettyprint, version=version) + writer.add_graph(G) + writer.write(path) + + +def generate_gexf(G, encoding="utf-8", prettyprint=True, version="1.2draft"): + """Generate lines of GEXF format representation of G. + + "GEXF (Graph Exchange XML Format) is a language for describing + complex networks structures, their associated data and dynamics" [1]_. + + Parameters + ---------- + G : graph + A NetworkX graph + encoding : string (optional, default: 'utf-8') + Encoding for text data. + prettyprint : bool (optional, default: True) + If True use line breaks and indenting in output XML. + version : string (default: 1.2draft) + Version of GEFX File Format (see http://gexf.net/schema.html) + Supported values: "1.1draft", "1.2draft" + + + Examples + -------- + >>> G = nx.path_graph(4) + >>> linefeed = chr(10) # linefeed=\n + >>> s = linefeed.join(nx.generate_gexf(G)) + >>> for line in nx.generate_gexf(G): # doctest: +SKIP + ... print(line) + + Notes + ----- + This implementation does not support mixed graphs (directed and undirected + edges together). + + The node id attribute is set to be the string of the node label. + If you want to specify an id use set it as node data, e.g. + node['a']['id']=1 to set the id of node 'a' to 1. + + References + ---------- + .. [1] GEXF File Format, https://gephi.org/gexf/format/ + """ + writer = GEXFWriter(encoding=encoding, prettyprint=prettyprint, version=version) + writer.add_graph(G) + yield from str(writer).splitlines() + + +@open_file(0, mode="rb") +@nx._dispatchable(graphs=None, returns_graph=True) +def read_gexf(path, node_type=None, relabel=False, version="1.2draft"): + """Read graph in GEXF format from path. + + "GEXF (Graph Exchange XML Format) is a language for describing + complex networks structures, their associated data and dynamics" [1]_. + + Parameters + ---------- + path : file or string + Filename or file handle to read. + Filenames ending in .gz or .bz2 will be decompressed. + node_type: Python type (default: None) + Convert node ids to this type if not None. + relabel : bool (default: False) + If True relabel the nodes to use the GEXF node "label" attribute + instead of the node "id" attribute as the NetworkX node label. + version : string (default: 1.2draft) + Version of GEFX File Format (see http://gexf.net/schema.html) + Supported values: "1.1draft", "1.2draft" + + Returns + ------- + graph: NetworkX graph + If no parallel edges are found a Graph or DiGraph is returned. + Otherwise a MultiGraph or MultiDiGraph is returned. + + Notes + ----- + This implementation does not support mixed graphs (directed and undirected + edges together). + + References + ---------- + .. [1] GEXF File Format, http://gexf.net/ + """ + reader = GEXFReader(node_type=node_type, version=version) + if relabel: + G = relabel_gexf_graph(reader(path)) + else: + G = reader(path) + return G + + +class GEXF: + versions = { + "1.1draft": { + "NS_GEXF": "http://www.gexf.net/1.1draft", + "NS_VIZ": "http://www.gexf.net/1.1draft/viz", + "NS_XSI": "http://www.w3.org/2001/XMLSchema-instance", + "SCHEMALOCATION": " ".join( + [ + "http://www.gexf.net/1.1draft", + "http://www.gexf.net/1.1draft/gexf.xsd", + ] + ), + "VERSION": "1.1", + }, + "1.2draft": { + "NS_GEXF": "http://www.gexf.net/1.2draft", + "NS_VIZ": "http://www.gexf.net/1.2draft/viz", + "NS_XSI": "http://www.w3.org/2001/XMLSchema-instance", + "SCHEMALOCATION": " ".join( + [ + "http://www.gexf.net/1.2draft", + "http://www.gexf.net/1.2draft/gexf.xsd", + ] + ), + "VERSION": "1.2", + }, + } + + def construct_types(self): + types = [ + (int, "integer"), + (float, "float"), + (float, "double"), + (bool, "boolean"), + (list, "string"), + (dict, "string"), + (int, "long"), + (str, "liststring"), + (str, "anyURI"), + (str, "string"), + ] + + # These additions to types allow writing numpy types + try: + import numpy as np + except ImportError: + pass + else: + # prepend so that python types are created upon read (last entry wins) + types = [ + (np.float64, "float"), + (np.float32, "float"), + (np.float16, "float"), + (np.int_, "int"), + (np.int8, "int"), + (np.int16, "int"), + (np.int32, "int"), + (np.int64, "int"), + (np.uint8, "int"), + (np.uint16, "int"), + (np.uint32, "int"), + (np.uint64, "int"), + (np.int_, "int"), + (np.intc, "int"), + (np.intp, "int"), + ] + types + + self.xml_type = dict(types) + self.python_type = dict(reversed(a) for a in types) + + # http://www.w3.org/TR/xmlschema-2/#boolean + convert_bool = { + "true": True, + "false": False, + "True": True, + "False": False, + "0": False, + 0: False, + "1": True, + 1: True, + } + + def set_version(self, version): + d = self.versions.get(version) + if d is None: + raise nx.NetworkXError(f"Unknown GEXF version {version}.") + self.NS_GEXF = d["NS_GEXF"] + self.NS_VIZ = d["NS_VIZ"] + self.NS_XSI = d["NS_XSI"] + self.SCHEMALOCATION = d["SCHEMALOCATION"] + self.VERSION = d["VERSION"] + self.version = version + + +class GEXFWriter(GEXF): + # class for writing GEXF format files + # use write_gexf() function + def __init__( + self, graph=None, encoding="utf-8", prettyprint=True, version="1.2draft" + ): + self.construct_types() + self.prettyprint = prettyprint + self.encoding = encoding + self.set_version(version) + self.xml = Element( + "gexf", + { + "xmlns": self.NS_GEXF, + "xmlns:xsi": self.NS_XSI, + "xsi:schemaLocation": self.SCHEMALOCATION, + "version": self.VERSION, + }, + ) + + # Make meta element a non-graph element + # Also add lastmodifieddate as attribute, not tag + meta_element = Element("meta") + subelement_text = f"NetworkX {nx.__version__}" + SubElement(meta_element, "creator").text = subelement_text + meta_element.set("lastmodifieddate", time.strftime("%Y-%m-%d")) + self.xml.append(meta_element) + + register_namespace("viz", self.NS_VIZ) + + # counters for edge and attribute identifiers + self.edge_id = itertools.count() + self.attr_id = itertools.count() + self.all_edge_ids = set() + # default attributes are stored in dictionaries + self.attr = {} + self.attr["node"] = {} + self.attr["edge"] = {} + self.attr["node"]["dynamic"] = {} + self.attr["node"]["static"] = {} + self.attr["edge"]["dynamic"] = {} + self.attr["edge"]["static"] = {} + + if graph is not None: + self.add_graph(graph) + + def __str__(self): + if self.prettyprint: + self.indent(self.xml) + s = tostring(self.xml).decode(self.encoding) + return s + + def add_graph(self, G): + # first pass through G collecting edge ids + for u, v, dd in G.edges(data=True): + eid = dd.get("id") + if eid is not None: + self.all_edge_ids.add(str(eid)) + # set graph attributes + if G.graph.get("mode") == "dynamic": + mode = "dynamic" + else: + mode = "static" + # Add a graph element to the XML + if G.is_directed(): + default = "directed" + else: + default = "undirected" + name = G.graph.get("name", "") + graph_element = Element("graph", defaultedgetype=default, mode=mode, name=name) + self.graph_element = graph_element + self.add_nodes(G, graph_element) + self.add_edges(G, graph_element) + self.xml.append(graph_element) + + def add_nodes(self, G, graph_element): + nodes_element = Element("nodes") + for node, data in G.nodes(data=True): + node_data = data.copy() + node_id = str(node_data.pop("id", node)) + kw = {"id": node_id} + label = str(node_data.pop("label", node)) + kw["label"] = label + try: + pid = node_data.pop("pid") + kw["pid"] = str(pid) + except KeyError: + pass + try: + start = node_data.pop("start") + kw["start"] = str(start) + self.alter_graph_mode_timeformat(start) + except KeyError: + pass + try: + end = node_data.pop("end") + kw["end"] = str(end) + self.alter_graph_mode_timeformat(end) + except KeyError: + pass + # add node element with attributes + node_element = Element("node", **kw) + # add node element and attr subelements + default = G.graph.get("node_default", {}) + node_data = self.add_parents(node_element, node_data) + if self.VERSION == "1.1": + node_data = self.add_slices(node_element, node_data) + else: + node_data = self.add_spells(node_element, node_data) + node_data = self.add_viz(node_element, node_data) + node_data = self.add_attributes("node", node_element, node_data, default) + nodes_element.append(node_element) + graph_element.append(nodes_element) + + def add_edges(self, G, graph_element): + def edge_key_data(G): + # helper function to unify multigraph and graph edge iterator + if G.is_multigraph(): + for u, v, key, data in G.edges(data=True, keys=True): + edge_data = data.copy() + edge_data.update(key=key) + edge_id = edge_data.pop("id", None) + if edge_id is None: + edge_id = next(self.edge_id) + while str(edge_id) in self.all_edge_ids: + edge_id = next(self.edge_id) + self.all_edge_ids.add(str(edge_id)) + yield u, v, edge_id, edge_data + else: + for u, v, data in G.edges(data=True): + edge_data = data.copy() + edge_id = edge_data.pop("id", None) + if edge_id is None: + edge_id = next(self.edge_id) + while str(edge_id) in self.all_edge_ids: + edge_id = next(self.edge_id) + self.all_edge_ids.add(str(edge_id)) + yield u, v, edge_id, edge_data + + edges_element = Element("edges") + for u, v, key, edge_data in edge_key_data(G): + kw = {"id": str(key)} + try: + edge_label = edge_data.pop("label") + kw["label"] = str(edge_label) + except KeyError: + pass + try: + edge_weight = edge_data.pop("weight") + kw["weight"] = str(edge_weight) + except KeyError: + pass + try: + edge_type = edge_data.pop("type") + kw["type"] = str(edge_type) + except KeyError: + pass + try: + start = edge_data.pop("start") + kw["start"] = str(start) + self.alter_graph_mode_timeformat(start) + except KeyError: + pass + try: + end = edge_data.pop("end") + kw["end"] = str(end) + self.alter_graph_mode_timeformat(end) + except KeyError: + pass + source_id = str(G.nodes[u].get("id", u)) + target_id = str(G.nodes[v].get("id", v)) + edge_element = Element("edge", source=source_id, target=target_id, **kw) + default = G.graph.get("edge_default", {}) + if self.VERSION == "1.1": + edge_data = self.add_slices(edge_element, edge_data) + else: + edge_data = self.add_spells(edge_element, edge_data) + edge_data = self.add_viz(edge_element, edge_data) + edge_data = self.add_attributes("edge", edge_element, edge_data, default) + edges_element.append(edge_element) + graph_element.append(edges_element) + + def add_attributes(self, node_or_edge, xml_obj, data, default): + # Add attrvalues to node or edge + attvalues = Element("attvalues") + if len(data) == 0: + return data + mode = "static" + for k, v in data.items(): + # rename generic multigraph key to avoid any name conflict + if k == "key": + k = "networkx_key" + val_type = type(v) + if val_type not in self.xml_type: + raise TypeError(f"attribute value type is not allowed: {val_type}") + if isinstance(v, list): + # dynamic data + for val, start, end in v: + val_type = type(val) + if start is not None or end is not None: + mode = "dynamic" + self.alter_graph_mode_timeformat(start) + self.alter_graph_mode_timeformat(end) + break + attr_id = self.get_attr_id( + str(k), self.xml_type[val_type], node_or_edge, default, mode + ) + for val, start, end in v: + e = Element("attvalue") + e.attrib["for"] = attr_id + e.attrib["value"] = str(val) + # Handle nan, inf, -inf differently + if val_type is float: + if e.attrib["value"] == "inf": + e.attrib["value"] = "INF" + elif e.attrib["value"] == "nan": + e.attrib["value"] = "NaN" + elif e.attrib["value"] == "-inf": + e.attrib["value"] = "-INF" + if start is not None: + e.attrib["start"] = str(start) + if end is not None: + e.attrib["end"] = str(end) + attvalues.append(e) + else: + # static data + mode = "static" + attr_id = self.get_attr_id( + str(k), self.xml_type[val_type], node_or_edge, default, mode + ) + e = Element("attvalue") + e.attrib["for"] = attr_id + if isinstance(v, bool): + e.attrib["value"] = str(v).lower() + else: + e.attrib["value"] = str(v) + # Handle float nan, inf, -inf differently + if val_type is float: + if e.attrib["value"] == "inf": + e.attrib["value"] = "INF" + elif e.attrib["value"] == "nan": + e.attrib["value"] = "NaN" + elif e.attrib["value"] == "-inf": + e.attrib["value"] = "-INF" + attvalues.append(e) + xml_obj.append(attvalues) + return data + + def get_attr_id(self, title, attr_type, edge_or_node, default, mode): + # find the id of the attribute or generate a new id + try: + return self.attr[edge_or_node][mode][title] + except KeyError: + # generate new id + new_id = str(next(self.attr_id)) + self.attr[edge_or_node][mode][title] = new_id + attr_kwargs = {"id": new_id, "title": title, "type": attr_type} + attribute = Element("attribute", **attr_kwargs) + # add subelement for data default value if present + default_title = default.get(title) + if default_title is not None: + default_element = Element("default") + default_element.text = str(default_title) + attribute.append(default_element) + # new insert it into the XML + attributes_element = None + for a in self.graph_element.findall("attributes"): + # find existing attributes element by class and mode + a_class = a.get("class") + a_mode = a.get("mode", "static") + if a_class == edge_or_node and a_mode == mode: + attributes_element = a + if attributes_element is None: + # create new attributes element + attr_kwargs = {"mode": mode, "class": edge_or_node} + attributes_element = Element("attributes", **attr_kwargs) + self.graph_element.insert(0, attributes_element) + attributes_element.append(attribute) + return new_id + + def add_viz(self, element, node_data): + viz = node_data.pop("viz", False) + if viz: + color = viz.get("color") + if color is not None: + if self.VERSION == "1.1": + e = Element( + f"{{{self.NS_VIZ}}}color", + r=str(color.get("r")), + g=str(color.get("g")), + b=str(color.get("b")), + ) + else: + e = Element( + f"{{{self.NS_VIZ}}}color", + r=str(color.get("r")), + g=str(color.get("g")), + b=str(color.get("b")), + a=str(color.get("a", 1.0)), + ) + element.append(e) + + size = viz.get("size") + if size is not None: + e = Element(f"{{{self.NS_VIZ}}}size", value=str(size)) + element.append(e) + + thickness = viz.get("thickness") + if thickness is not None: + e = Element(f"{{{self.NS_VIZ}}}thickness", value=str(thickness)) + element.append(e) + + shape = viz.get("shape") + if shape is not None: + if shape.startswith("http"): + e = Element( + f"{{{self.NS_VIZ}}}shape", value="image", uri=str(shape) + ) + else: + e = Element(f"{{{self.NS_VIZ}}}shape", value=str(shape)) + element.append(e) + + position = viz.get("position") + if position is not None: + e = Element( + f"{{{self.NS_VIZ}}}position", + x=str(position.get("x")), + y=str(position.get("y")), + z=str(position.get("z")), + ) + element.append(e) + return node_data + + def add_parents(self, node_element, node_data): + parents = node_data.pop("parents", False) + if parents: + parents_element = Element("parents") + for p in parents: + e = Element("parent") + e.attrib["for"] = str(p) + parents_element.append(e) + node_element.append(parents_element) + return node_data + + def add_slices(self, node_or_edge_element, node_or_edge_data): + slices = node_or_edge_data.pop("slices", False) + if slices: + slices_element = Element("slices") + for start, end in slices: + e = Element("slice", start=str(start), end=str(end)) + slices_element.append(e) + node_or_edge_element.append(slices_element) + return node_or_edge_data + + def add_spells(self, node_or_edge_element, node_or_edge_data): + spells = node_or_edge_data.pop("spells", False) + if spells: + spells_element = Element("spells") + for start, end in spells: + e = Element("spell") + if start is not None: + e.attrib["start"] = str(start) + self.alter_graph_mode_timeformat(start) + if end is not None: + e.attrib["end"] = str(end) + self.alter_graph_mode_timeformat(end) + spells_element.append(e) + node_or_edge_element.append(spells_element) + return node_or_edge_data + + def alter_graph_mode_timeformat(self, start_or_end): + # If 'start' or 'end' appears, set timeformat + if start_or_end is not None: + if isinstance(start_or_end, str): + timeformat = "date" + elif isinstance(start_or_end, float): + timeformat = "double" + elif isinstance(start_or_end, int): + timeformat = "long" + else: + raise nx.NetworkXError( + "timeformat should be of the type int, float or str" + ) + self.graph_element.set("timeformat", timeformat) + # If Graph mode is static, alter to dynamic + if self.graph_element.get("mode") == "static": + self.graph_element.set("mode", "dynamic") + + def write(self, fh): + # Serialize graph G in GEXF to the open fh + if self.prettyprint: + self.indent(self.xml) + document = ElementTree(self.xml) + document.write(fh, encoding=self.encoding, xml_declaration=True) + + def indent(self, elem, level=0): + # in-place prettyprint formatter + i = "\n" + " " * level + if len(elem): + if not elem.text or not elem.text.strip(): + elem.text = i + " " + if not elem.tail or not elem.tail.strip(): + elem.tail = i + for elem in elem: + self.indent(elem, level + 1) + if not elem.tail or not elem.tail.strip(): + elem.tail = i + else: + if level and (not elem.tail or not elem.tail.strip()): + elem.tail = i + + +class GEXFReader(GEXF): + # Class to read GEXF format files + # use read_gexf() function + def __init__(self, node_type=None, version="1.2draft"): + self.construct_types() + self.node_type = node_type + # assume simple graph and test for multigraph on read + self.simple_graph = True + self.set_version(version) + + def __call__(self, stream): + self.xml = ElementTree(file=stream) + g = self.xml.find(f"{{{self.NS_GEXF}}}graph") + if g is not None: + return self.make_graph(g) + # try all the versions + for version in self.versions: + self.set_version(version) + g = self.xml.find(f"{{{self.NS_GEXF}}}graph") + if g is not None: + return self.make_graph(g) + raise nx.NetworkXError("No element in GEXF file.") + + def make_graph(self, graph_xml): + # start with empty DiGraph or MultiDiGraph + edgedefault = graph_xml.get("defaultedgetype", None) + if edgedefault == "directed": + G = nx.MultiDiGraph() + else: + G = nx.MultiGraph() + + # graph attributes + graph_name = graph_xml.get("name", "") + if graph_name != "": + G.graph["name"] = graph_name + graph_start = graph_xml.get("start") + if graph_start is not None: + G.graph["start"] = graph_start + graph_end = graph_xml.get("end") + if graph_end is not None: + G.graph["end"] = graph_end + graph_mode = graph_xml.get("mode", "") + if graph_mode == "dynamic": + G.graph["mode"] = "dynamic" + else: + G.graph["mode"] = "static" + + # timeformat + self.timeformat = graph_xml.get("timeformat") + if self.timeformat == "date": + self.timeformat = "string" + + # node and edge attributes + attributes_elements = graph_xml.findall(f"{{{self.NS_GEXF}}}attributes") + # dictionaries to hold attributes and attribute defaults + node_attr = {} + node_default = {} + edge_attr = {} + edge_default = {} + for a in attributes_elements: + attr_class = a.get("class") + if attr_class == "node": + na, nd = self.find_gexf_attributes(a) + node_attr.update(na) + node_default.update(nd) + G.graph["node_default"] = node_default + elif attr_class == "edge": + ea, ed = self.find_gexf_attributes(a) + edge_attr.update(ea) + edge_default.update(ed) + G.graph["edge_default"] = edge_default + else: + raise # unknown attribute class + + # Hack to handle Gephi0.7beta bug + # add weight attribute + ea = {"weight": {"type": "double", "mode": "static", "title": "weight"}} + ed = {} + edge_attr.update(ea) + edge_default.update(ed) + G.graph["edge_default"] = edge_default + + # add nodes + nodes_element = graph_xml.find(f"{{{self.NS_GEXF}}}nodes") + if nodes_element is not None: + for node_xml in nodes_element.findall(f"{{{self.NS_GEXF}}}node"): + self.add_node(G, node_xml, node_attr) + + # add edges + edges_element = graph_xml.find(f"{{{self.NS_GEXF}}}edges") + if edges_element is not None: + for edge_xml in edges_element.findall(f"{{{self.NS_GEXF}}}edge"): + self.add_edge(G, edge_xml, edge_attr) + + # switch to Graph or DiGraph if no parallel edges were found. + if self.simple_graph: + if G.is_directed(): + G = nx.DiGraph(G) + else: + G = nx.Graph(G) + return G + + def add_node(self, G, node_xml, node_attr, node_pid=None): + # add a single node with attributes to the graph + + # get attributes and subattributues for node + data = self.decode_attr_elements(node_attr, node_xml) + data = self.add_parents(data, node_xml) # add any parents + if self.VERSION == "1.1": + data = self.add_slices(data, node_xml) # add slices + else: + data = self.add_spells(data, node_xml) # add spells + data = self.add_viz(data, node_xml) # add viz + data = self.add_start_end(data, node_xml) # add start/end + + # find the node id and cast it to the appropriate type + node_id = node_xml.get("id") + if self.node_type is not None: + node_id = self.node_type(node_id) + + # every node should have a label + node_label = node_xml.get("label") + data["label"] = node_label + + # parent node id + node_pid = node_xml.get("pid", node_pid) + if node_pid is not None: + data["pid"] = node_pid + + # check for subnodes, recursive + subnodes = node_xml.find(f"{{{self.NS_GEXF}}}nodes") + if subnodes is not None: + for node_xml in subnodes.findall(f"{{{self.NS_GEXF}}}node"): + self.add_node(G, node_xml, node_attr, node_pid=node_id) + + G.add_node(node_id, **data) + + def add_start_end(self, data, xml): + # start and end times + ttype = self.timeformat + node_start = xml.get("start") + if node_start is not None: + data["start"] = self.python_type[ttype](node_start) + node_end = xml.get("end") + if node_end is not None: + data["end"] = self.python_type[ttype](node_end) + return data + + def add_viz(self, data, node_xml): + # add viz element for node + viz = {} + color = node_xml.find(f"{{{self.NS_VIZ}}}color") + if color is not None: + if self.VERSION == "1.1": + viz["color"] = { + "r": int(color.get("r")), + "g": int(color.get("g")), + "b": int(color.get("b")), + } + else: + viz["color"] = { + "r": int(color.get("r")), + "g": int(color.get("g")), + "b": int(color.get("b")), + "a": float(color.get("a", 1)), + } + + size = node_xml.find(f"{{{self.NS_VIZ}}}size") + if size is not None: + viz["size"] = float(size.get("value")) + + thickness = node_xml.find(f"{{{self.NS_VIZ}}}thickness") + if thickness is not None: + viz["thickness"] = float(thickness.get("value")) + + shape = node_xml.find(f"{{{self.NS_VIZ}}}shape") + if shape is not None: + viz["shape"] = shape.get("shape") + if viz["shape"] == "image": + viz["shape"] = shape.get("uri") + + position = node_xml.find(f"{{{self.NS_VIZ}}}position") + if position is not None: + viz["position"] = { + "x": float(position.get("x", 0)), + "y": float(position.get("y", 0)), + "z": float(position.get("z", 0)), + } + + if len(viz) > 0: + data["viz"] = viz + return data + + def add_parents(self, data, node_xml): + parents_element = node_xml.find(f"{{{self.NS_GEXF}}}parents") + if parents_element is not None: + data["parents"] = [] + for p in parents_element.findall(f"{{{self.NS_GEXF}}}parent"): + parent = p.get("for") + data["parents"].append(parent) + return data + + def add_slices(self, data, node_or_edge_xml): + slices_element = node_or_edge_xml.find(f"{{{self.NS_GEXF}}}slices") + if slices_element is not None: + data["slices"] = [] + for s in slices_element.findall(f"{{{self.NS_GEXF}}}slice"): + start = s.get("start") + end = s.get("end") + data["slices"].append((start, end)) + return data + + def add_spells(self, data, node_or_edge_xml): + spells_element = node_or_edge_xml.find(f"{{{self.NS_GEXF}}}spells") + if spells_element is not None: + data["spells"] = [] + ttype = self.timeformat + for s in spells_element.findall(f"{{{self.NS_GEXF}}}spell"): + start = self.python_type[ttype](s.get("start")) + end = self.python_type[ttype](s.get("end")) + data["spells"].append((start, end)) + return data + + def add_edge(self, G, edge_element, edge_attr): + # add an edge to the graph + + # raise error if we find mixed directed and undirected edges + edge_direction = edge_element.get("type") + if G.is_directed() and edge_direction == "undirected": + raise nx.NetworkXError("Undirected edge found in directed graph.") + if (not G.is_directed()) and edge_direction == "directed": + raise nx.NetworkXError("Directed edge found in undirected graph.") + + # Get source and target and recast type if required + source = edge_element.get("source") + target = edge_element.get("target") + if self.node_type is not None: + source = self.node_type(source) + target = self.node_type(target) + + data = self.decode_attr_elements(edge_attr, edge_element) + data = self.add_start_end(data, edge_element) + + if self.VERSION == "1.1": + data = self.add_slices(data, edge_element) # add slices + else: + data = self.add_spells(data, edge_element) # add spells + + # GEXF stores edge ids as an attribute + # NetworkX uses them as keys in multigraphs + # if networkx_key is not specified as an attribute + edge_id = edge_element.get("id") + if edge_id is not None: + data["id"] = edge_id + + # check if there is a 'multigraph_key' and use that as edge_id + multigraph_key = data.pop("networkx_key", None) + if multigraph_key is not None: + edge_id = multigraph_key + + weight = edge_element.get("weight") + if weight is not None: + data["weight"] = float(weight) + + edge_label = edge_element.get("label") + if edge_label is not None: + data["label"] = edge_label + + if G.has_edge(source, target): + # seen this edge before - this is a multigraph + self.simple_graph = False + G.add_edge(source, target, key=edge_id, **data) + if edge_direction == "mutual": + G.add_edge(target, source, key=edge_id, **data) + + def decode_attr_elements(self, gexf_keys, obj_xml): + # Use the key information to decode the attr XML + attr = {} + # look for outer '' element + attr_element = obj_xml.find(f"{{{self.NS_GEXF}}}attvalues") + if attr_element is not None: + # loop over elements + for a in attr_element.findall(f"{{{self.NS_GEXF}}}attvalue"): + key = a.get("for") # for is required + try: # should be in our gexf_keys dictionary + title = gexf_keys[key]["title"] + except KeyError as err: + raise nx.NetworkXError(f"No attribute defined for={key}.") from err + atype = gexf_keys[key]["type"] + value = a.get("value") + if atype == "boolean": + value = self.convert_bool[value] + else: + value = self.python_type[atype](value) + if gexf_keys[key]["mode"] == "dynamic": + # for dynamic graphs use list of three-tuples + # [(value1,start1,end1), (value2,start2,end2), etc] + ttype = self.timeformat + start = self.python_type[ttype](a.get("start")) + end = self.python_type[ttype](a.get("end")) + if title in attr: + attr[title].append((value, start, end)) + else: + attr[title] = [(value, start, end)] + else: + # for static graphs just assign the value + attr[title] = value + return attr + + def find_gexf_attributes(self, attributes_element): + # Extract all the attributes and defaults + attrs = {} + defaults = {} + mode = attributes_element.get("mode") + for k in attributes_element.findall(f"{{{self.NS_GEXF}}}attribute"): + attr_id = k.get("id") + title = k.get("title") + atype = k.get("type") + attrs[attr_id] = {"title": title, "type": atype, "mode": mode} + # check for the 'default' subelement of key element and add + default = k.find(f"{{{self.NS_GEXF}}}default") + if default is not None: + if atype == "boolean": + value = self.convert_bool[default.text] + else: + value = self.python_type[atype](default.text) + defaults[title] = value + return attrs, defaults + + +def relabel_gexf_graph(G): + """Relabel graph using "label" node keyword for node label. + + Parameters + ---------- + G : graph + A NetworkX graph read from GEXF data + + Returns + ------- + H : graph + A NetworkX graph with relabeled nodes + + Raises + ------ + NetworkXError + If node labels are missing or not unique while relabel=True. + + Notes + ----- + This function relabels the nodes in a NetworkX graph with the + "label" attribute. It also handles relabeling the specific GEXF + node attributes "parents", and "pid". + """ + # build mapping of node labels, do some error checking + try: + mapping = [(u, G.nodes[u]["label"]) for u in G] + except KeyError as err: + raise nx.NetworkXError( + "Failed to relabel nodes: missing node labels found. Use relabel=False." + ) from err + x, y = zip(*mapping) + if len(set(y)) != len(G): + raise nx.NetworkXError( + "Failed to relabel nodes: duplicate node labels found. Use relabel=False." + ) + mapping = dict(mapping) + H = nx.relabel_nodes(G, mapping) + # relabel attributes + for n in G: + m = mapping[n] + H.nodes[m]["id"] = n + H.nodes[m].pop("label") + if "pid" in H.nodes[m]: + H.nodes[m]["pid"] = mapping[G.nodes[n]["pid"]] + if "parents" in H.nodes[m]: + H.nodes[m]["parents"] = [mapping[p] for p in G.nodes[n]["parents"]] + return H diff --git a/.venv/lib/python3.12/site-packages/networkx/readwrite/gml.py b/.venv/lib/python3.12/site-packages/networkx/readwrite/gml.py new file mode 100644 index 0000000..c53496c --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/readwrite/gml.py @@ -0,0 +1,879 @@ +""" +Read graphs in GML format. + +"GML, the Graph Modelling Language, is our proposal for a portable +file format for graphs. GML's key features are portability, simple +syntax, extensibility and flexibility. A GML file consists of a +hierarchical key-value lists. Graphs can be annotated with arbitrary +data structures. The idea for a common file format was born at the +GD'95; this proposal is the outcome of many discussions. GML is the +standard file format in the Graphlet graph editor system. It has been +overtaken and adapted by several other systems for drawing graphs." + +GML files are stored using a 7-bit ASCII encoding with any extended +ASCII characters (iso8859-1) appearing as HTML character entities. +You will need to give some thought into how the exported data should +interact with different languages and even different Python versions. +Re-importing from gml is also a concern. + +Without specifying a `stringizer`/`destringizer`, the code is capable of +writing `int`/`float`/`str`/`dict`/`list` data as required by the GML +specification. For writing other data types, and for reading data other +than `str` you need to explicitly supply a `stringizer`/`destringizer`. + +For additional documentation on the GML file format, please see the +`GML website `_. + +Several example graphs in GML format may be found on Mark Newman's +`Network data page `_. +""" + +import html.entities as htmlentitydefs +import re +from ast import literal_eval +from collections import defaultdict +from enum import Enum +from io import StringIO +from typing import Any, NamedTuple + +import networkx as nx +from networkx.exception import NetworkXError +from networkx.utils import open_file + +__all__ = ["read_gml", "parse_gml", "generate_gml", "write_gml"] + + +def escape(text): + """Use XML character references to escape characters. + + Use XML character references for unprintable or non-ASCII + characters, double quotes and ampersands in a string + """ + + def fixup(m): + ch = m.group(0) + return "&#" + str(ord(ch)) + ";" + + text = re.sub('[^ -~]|[&"]', fixup, text) + return text if isinstance(text, str) else str(text) + + +def unescape(text): + """Replace XML character references with the referenced characters""" + + def fixup(m): + text = m.group(0) + if text[1] == "#": + # Character reference + if text[2] == "x": + code = int(text[3:-1], 16) + else: + code = int(text[2:-1]) + else: + # Named entity + try: + code = htmlentitydefs.name2codepoint[text[1:-1]] + except KeyError: + return text # leave unchanged + try: + return chr(code) + except (ValueError, OverflowError): + return text # leave unchanged + + return re.sub("&(?:[0-9A-Za-z]+|#(?:[0-9]+|x[0-9A-Fa-f]+));", fixup, text) + + +def literal_destringizer(rep): + """Convert a Python literal to the value it represents. + + Parameters + ---------- + rep : string + A Python literal. + + Returns + ------- + value : object + The value of the Python literal. + + Raises + ------ + ValueError + If `rep` is not a Python literal. + """ + if isinstance(rep, str): + orig_rep = rep + try: + return literal_eval(rep) + except SyntaxError as err: + raise ValueError(f"{orig_rep!r} is not a valid Python literal") from err + else: + raise ValueError(f"{rep!r} is not a string") + + +@open_file(0, mode="rb") +@nx._dispatchable(graphs=None, returns_graph=True) +def read_gml(path, label="label", destringizer=None): + """Read graph in GML format from `path`. + + Parameters + ---------- + path : file or string + Filename or file handle to read. + Filenames ending in .gz or .bz2 will be decompressed. + + label : string, optional + If not None, the parsed nodes will be renamed according to node + attributes indicated by `label`. Default value: 'label'. + + destringizer : callable, optional + A `destringizer` that recovers values stored as strings in GML. If it + cannot convert a string to a value, a `ValueError` is raised. Default + value : None. + + Returns + ------- + G : NetworkX graph + The parsed graph. + + Raises + ------ + NetworkXError + If the input cannot be parsed. + + See Also + -------- + write_gml, parse_gml + literal_destringizer + + Notes + ----- + GML files are stored using a 7-bit ASCII encoding with any extended + ASCII characters (iso8859-1) appearing as HTML character entities. + Without specifying a `stringizer`/`destringizer`, the code is capable of + writing `int`/`float`/`str`/`dict`/`list` data as required by the GML + specification. For writing other data types, and for reading data other + than `str` you need to explicitly supply a `stringizer`/`destringizer`. + + For additional documentation on the GML file format, please see the + `GML url `_. + + See the module docstring :mod:`networkx.readwrite.gml` for more details. + + Examples + -------- + >>> G = nx.path_graph(4) + >>> nx.write_gml(G, "test_path4.gml") + + GML values are interpreted as strings by default: + + >>> H = nx.read_gml("test_path4.gml") + >>> H.nodes + NodeView(('0', '1', '2', '3')) + + When a `destringizer` is provided, GML values are converted to the provided type. + For example, integer nodes can be recovered as shown below: + + >>> J = nx.read_gml("test_path4.gml", destringizer=int) + >>> J.nodes + NodeView((0, 1, 2, 3)) + + """ + + def filter_lines(lines): + for line in lines: + try: + line = line.decode("ascii") + except UnicodeDecodeError as err: + raise NetworkXError("input is not ASCII-encoded") from err + if not isinstance(line, str): + lines = str(lines) + if line and line[-1] == "\n": + line = line[:-1] + yield line + + G = parse_gml_lines(filter_lines(path), label, destringizer) + return G + + +@nx._dispatchable(graphs=None, returns_graph=True) +def parse_gml(lines, label="label", destringizer=None): + """Parse GML graph from a string or iterable. + + Parameters + ---------- + lines : string or iterable of strings + Data in GML format. + + label : string, optional + If not None, the parsed nodes will be renamed according to node + attributes indicated by `label`. Default value: 'label'. + + destringizer : callable, optional + A `destringizer` that recovers values stored as strings in GML. If it + cannot convert a string to a value, a `ValueError` is raised. Default + value : None. + + Returns + ------- + G : NetworkX graph + The parsed graph. + + Raises + ------ + NetworkXError + If the input cannot be parsed. + + See Also + -------- + write_gml, read_gml + + Notes + ----- + This stores nested GML attributes as dictionaries in the NetworkX graph, + node, and edge attribute structures. + + GML files are stored using a 7-bit ASCII encoding with any extended + ASCII characters (iso8859-1) appearing as HTML character entities. + Without specifying a `stringizer`/`destringizer`, the code is capable of + writing `int`/`float`/`str`/`dict`/`list` data as required by the GML + specification. For writing other data types, and for reading data other + than `str` you need to explicitly supply a `stringizer`/`destringizer`. + + For additional documentation on the GML file format, please see the + `GML url `_. + + See the module docstring :mod:`networkx.readwrite.gml` for more details. + """ + + def decode_line(line): + if isinstance(line, bytes): + try: + line.decode("ascii") + except UnicodeDecodeError as err: + raise NetworkXError("input is not ASCII-encoded") from err + if not isinstance(line, str): + line = str(line) + return line + + def filter_lines(lines): + if isinstance(lines, str): + lines = decode_line(lines) + lines = lines.splitlines() + yield from lines + else: + for line in lines: + line = decode_line(line) + if line and line[-1] == "\n": + line = line[:-1] + if line.find("\n") != -1: + raise NetworkXError("input line contains newline") + yield line + + G = parse_gml_lines(filter_lines(lines), label, destringizer) + return G + + +class Pattern(Enum): + """encodes the index of each token-matching pattern in `tokenize`.""" + + KEYS = 0 + REALS = 1 + INTS = 2 + STRINGS = 3 + DICT_START = 4 + DICT_END = 5 + COMMENT_WHITESPACE = 6 + + +class Token(NamedTuple): + category: Pattern + value: Any + line: int + position: int + + +LIST_START_VALUE = "_networkx_list_start" + + +def parse_gml_lines(lines, label, destringizer): + """Parse GML `lines` into a graph.""" + + def tokenize(): + patterns = [ + r"[A-Za-z][0-9A-Za-z_]*\b", # keys + # reals + r"[+-]?(?:[0-9]*\.[0-9]+|[0-9]+\.[0-9]*|INF)(?:[Ee][+-]?[0-9]+)?", + r"[+-]?[0-9]+", # ints + r'".*?"', # strings + r"\[", # dict start + r"\]", # dict end + r"#.*$|\s+", # comments and whitespaces + ] + tokens = re.compile("|".join(f"({pattern})" for pattern in patterns)) + lineno = 0 + multilines = [] # entries spread across multiple lines + for line in lines: + pos = 0 + + # deal with entries spread across multiple lines + # + # should we actually have to deal with escaped "s then do it here + if multilines: + multilines.append(line.strip()) + if line[-1] == '"': # closing multiline entry + # multiline entries will be joined by space. cannot + # reintroduce newlines as this will break the tokenizer + line = " ".join(multilines) + multilines = [] + else: # continued multiline entry + lineno += 1 + continue + else: + if line.count('"') == 1: # opening multiline entry + if line.strip()[0] != '"' and line.strip()[-1] != '"': + # since we expect something like key "value", the " should not be found at ends + # otherwise tokenizer will pick up the formatting mistake. + multilines = [line.rstrip()] + lineno += 1 + continue + + length = len(line) + + while pos < length: + match = tokens.match(line, pos) + if match is None: + m = f"cannot tokenize {line[pos:]} at ({lineno + 1}, {pos + 1})" + raise NetworkXError(m) + for i in range(len(patterns)): + group = match.group(i + 1) + if group is not None: + if i == 0: # keys + value = group.rstrip() + elif i == 1: # reals + value = float(group) + elif i == 2: # ints + value = int(group) + else: + value = group + if i != 6: # comments and whitespaces + yield Token(Pattern(i), value, lineno + 1, pos + 1) + pos += len(group) + break + lineno += 1 + yield Token(None, None, lineno + 1, 1) # EOF + + def unexpected(curr_token, expected): + category, value, lineno, pos = curr_token + value = repr(value) if value is not None else "EOF" + raise NetworkXError(f"expected {expected}, found {value} at ({lineno}, {pos})") + + def consume(curr_token, category, expected): + if curr_token.category == category: + return next(tokens) + unexpected(curr_token, expected) + + def parse_kv(curr_token): + dct = defaultdict(list) + while curr_token.category == Pattern.KEYS: + key = curr_token.value + curr_token = next(tokens) + category = curr_token.category + if category == Pattern.REALS or category == Pattern.INTS: + value = curr_token.value + curr_token = next(tokens) + elif category == Pattern.STRINGS: + value = unescape(curr_token.value[1:-1]) + if destringizer: + try: + value = destringizer(value) + except ValueError: + pass + # Special handling for empty lists and tuples + if value == "()": + value = () + if value == "[]": + value = [] + curr_token = next(tokens) + elif category == Pattern.DICT_START: + curr_token, value = parse_dict(curr_token) + else: + # Allow for string convertible id and label values + if key in ("id", "label", "source", "target"): + try: + # String convert the token value + value = unescape(str(curr_token.value)) + if destringizer: + try: + value = destringizer(value) + except ValueError: + pass + curr_token = next(tokens) + except Exception: + msg = ( + "an int, float, string, '[' or string" + + " convertible ASCII value for node id or label" + ) + unexpected(curr_token, msg) + # Special handling for nan and infinity. Since the gml language + # defines unquoted strings as keys, the numeric and string branches + # are skipped and we end up in this special branch, so we need to + # convert the current token value to a float for NAN and plain INF. + # +/-INF are handled in the pattern for 'reals' in tokenize(). This + # allows labels and values to be nan or infinity, but not keys. + elif curr_token.value in {"NAN", "INF"}: + value = float(curr_token.value) + curr_token = next(tokens) + else: # Otherwise error out + unexpected(curr_token, "an int, float, string or '['") + dct[key].append(value) + + def clean_dict_value(value): + if not isinstance(value, list): + return value + if len(value) == 1: + return value[0] + if value[0] == LIST_START_VALUE: + return value[1:] + return value + + dct = {key: clean_dict_value(value) for key, value in dct.items()} + return curr_token, dct + + def parse_dict(curr_token): + # dict start + curr_token = consume(curr_token, Pattern.DICT_START, "'['") + # dict contents + curr_token, dct = parse_kv(curr_token) + # dict end + curr_token = consume(curr_token, Pattern.DICT_END, "']'") + return curr_token, dct + + def parse_graph(): + curr_token, dct = parse_kv(next(tokens)) + if curr_token.category is not None: # EOF + unexpected(curr_token, "EOF") + if "graph" not in dct: + raise NetworkXError("input contains no graph") + graph = dct["graph"] + if isinstance(graph, list): + raise NetworkXError("input contains more than one graph") + return graph + + tokens = tokenize() + graph = parse_graph() + + directed = graph.pop("directed", False) + multigraph = graph.pop("multigraph", False) + if not multigraph: + G = nx.DiGraph() if directed else nx.Graph() + else: + G = nx.MultiDiGraph() if directed else nx.MultiGraph() + graph_attr = {k: v for k, v in graph.items() if k not in ("node", "edge")} + G.graph.update(graph_attr) + + def pop_attr(dct, category, attr, i): + try: + return dct.pop(attr) + except KeyError as err: + raise NetworkXError(f"{category} #{i} has no {attr!r} attribute") from err + + nodes = graph.get("node", []) + mapping = {} + node_labels = set() + for i, node in enumerate(nodes if isinstance(nodes, list) else [nodes]): + id = pop_attr(node, "node", "id", i) + if id in G: + raise NetworkXError(f"node id {id!r} is duplicated") + if label is not None and label != "id": + node_label = pop_attr(node, "node", label, i) + if node_label in node_labels: + raise NetworkXError(f"node label {node_label!r} is duplicated") + node_labels.add(node_label) + mapping[id] = node_label + G.add_node(id, **node) + + edges = graph.get("edge", []) + for i, edge in enumerate(edges if isinstance(edges, list) else [edges]): + source = pop_attr(edge, "edge", "source", i) + target = pop_attr(edge, "edge", "target", i) + if source not in G: + raise NetworkXError(f"edge #{i} has undefined source {source!r}") + if target not in G: + raise NetworkXError(f"edge #{i} has undefined target {target!r}") + if not multigraph: + if not G.has_edge(source, target): + G.add_edge(source, target, **edge) + else: + arrow = "->" if directed else "--" + msg = f"edge #{i} ({source!r}{arrow}{target!r}) is duplicated" + raise nx.NetworkXError(msg) + else: + key = edge.pop("key", None) + if key is not None and G.has_edge(source, target, key): + arrow = "->" if directed else "--" + msg = f"edge #{i} ({source!r}{arrow}{target!r}, {key!r})" + msg2 = 'Hint: If multigraph add "multigraph 1" to file header.' + raise nx.NetworkXError(msg + " is duplicated\n" + msg2) + G.add_edge(source, target, key, **edge) + + if label is not None and label != "id": + G = nx.relabel_nodes(G, mapping) + return G + + +def literal_stringizer(value): + """Convert a `value` to a Python literal in GML representation. + + Parameters + ---------- + value : object + The `value` to be converted to GML representation. + + Returns + ------- + rep : string + A double-quoted Python literal representing value. Unprintable + characters are replaced by XML character references. + + Raises + ------ + ValueError + If `value` cannot be converted to GML. + + Notes + ----- + The original value can be recovered using the + :func:`networkx.readwrite.gml.literal_destringizer` function. + """ + + def stringize(value): + if isinstance(value, int | bool) or value is None: + if value is True: # GML uses 1/0 for boolean values. + buf.write(str(1)) + elif value is False: + buf.write(str(0)) + else: + buf.write(str(value)) + elif isinstance(value, str): + text = repr(value) + if text[0] != "u": + try: + value.encode("latin1") + except UnicodeEncodeError: + text = "u" + text + buf.write(text) + elif isinstance(value, float | complex | str | bytes): + buf.write(repr(value)) + elif isinstance(value, list): + buf.write("[") + first = True + for item in value: + if not first: + buf.write(",") + else: + first = False + stringize(item) + buf.write("]") + elif isinstance(value, tuple): + if len(value) > 1: + buf.write("(") + first = True + for item in value: + if not first: + buf.write(",") + else: + first = False + stringize(item) + buf.write(")") + elif value: + buf.write("(") + stringize(value[0]) + buf.write(",)") + else: + buf.write("()") + elif isinstance(value, dict): + buf.write("{") + first = True + for key, value in value.items(): + if not first: + buf.write(",") + else: + first = False + stringize(key) + buf.write(":") + stringize(value) + buf.write("}") + elif isinstance(value, set): + buf.write("{") + first = True + for item in value: + if not first: + buf.write(",") + else: + first = False + stringize(item) + buf.write("}") + else: + msg = f"{value!r} cannot be converted into a Python literal" + raise ValueError(msg) + + buf = StringIO() + stringize(value) + return buf.getvalue() + + +def generate_gml(G, stringizer=None): + r"""Generate a single entry of the graph `G` in GML format. + + Parameters + ---------- + G : NetworkX graph + The graph to be converted to GML. + + stringizer : callable, optional + A `stringizer` which converts non-int/non-float/non-dict values into + strings. If it cannot convert a value into a string, it should raise a + `ValueError` to indicate that. Default value: None. + + Returns + ------- + lines: generator of strings + Lines of GML data. Newlines are not appended. + + Raises + ------ + NetworkXError + If `stringizer` cannot convert a value into a string, or the value to + convert is not a string while `stringizer` is None. + + See Also + -------- + literal_stringizer + + Notes + ----- + Graph attributes named 'directed', 'multigraph', 'node' or + 'edge', node attributes named 'id' or 'label', edge attributes + named 'source' or 'target' (or 'key' if `G` is a multigraph) + are ignored because these attribute names are used to encode the graph + structure. + + GML files are stored using a 7-bit ASCII encoding with any extended + ASCII characters (iso8859-1) appearing as HTML character entities. + Without specifying a `stringizer`/`destringizer`, the code is capable of + writing `int`/`float`/`str`/`dict`/`list` data as required by the GML + specification. For writing other data types, and for reading data other + than `str` you need to explicitly supply a `stringizer`/`destringizer`. + + For additional documentation on the GML file format, please see the + `GML url `_. + + See the module docstring :mod:`networkx.readwrite.gml` for more details. + + Examples + -------- + >>> G = nx.Graph() + >>> G.add_node("1") + >>> print("\n".join(nx.generate_gml(G))) + graph [ + node [ + id 0 + label "1" + ] + ] + >>> G = nx.MultiGraph([("a", "b"), ("a", "b")]) + >>> print("\n".join(nx.generate_gml(G))) + graph [ + multigraph 1 + node [ + id 0 + label "a" + ] + node [ + id 1 + label "b" + ] + edge [ + source 0 + target 1 + key 0 + ] + edge [ + source 0 + target 1 + key 1 + ] + ] + """ + valid_keys = re.compile("^[A-Za-z][0-9A-Za-z_]*$") + + def stringize(key, value, ignored_keys, indent, in_list=False): + if not isinstance(key, str): + raise NetworkXError(f"{key!r} is not a string") + if not valid_keys.match(key): + raise NetworkXError(f"{key!r} is not a valid key") + if not isinstance(key, str): + key = str(key) + if key not in ignored_keys: + if isinstance(value, int | bool): + if key == "label": + yield indent + key + ' "' + str(value) + '"' + elif value is True: + # python bool is an instance of int + yield indent + key + " 1" + elif value is False: + yield indent + key + " 0" + # GML only supports signed 32-bit integers + elif value < -(2**31) or value >= 2**31: + yield indent + key + ' "' + str(value) + '"' + else: + yield indent + key + " " + str(value) + elif isinstance(value, float): + text = repr(value).upper() + # GML matches INF to keys, so prepend + to INF. Use repr(float(*)) + # instead of string literal to future proof against changes to repr. + if text == repr(float("inf")).upper(): + text = "+" + text + else: + # GML requires that a real literal contain a decimal point, but + # repr may not output a decimal point when the mantissa is + # integral and hence needs fixing. + epos = text.rfind("E") + if epos != -1 and text.find(".", 0, epos) == -1: + text = text[:epos] + "." + text[epos:] + if key == "label": + yield indent + key + ' "' + text + '"' + else: + yield indent + key + " " + text + elif isinstance(value, dict): + yield indent + key + " [" + next_indent = indent + " " + for key, value in value.items(): + yield from stringize(key, value, (), next_indent) + yield indent + "]" + elif isinstance(value, tuple) and key == "label": + yield indent + key + f' "({",".join(repr(v) for v in value)})"' + elif isinstance(value, list | tuple) and key != "label" and not in_list: + if len(value) == 0: + yield indent + key + " " + f'"{value!r}"' + if len(value) == 1: + yield indent + key + " " + f'"{LIST_START_VALUE}"' + for val in value: + yield from stringize(key, val, (), indent, True) + else: + if stringizer: + try: + value = stringizer(value) + except ValueError as err: + raise NetworkXError( + f"{value!r} cannot be converted into a string" + ) from err + if not isinstance(value, str): + raise NetworkXError(f"{value!r} is not a string") + yield indent + key + ' "' + escape(value) + '"' + + multigraph = G.is_multigraph() + yield "graph [" + + # Output graph attributes + if G.is_directed(): + yield " directed 1" + if multigraph: + yield " multigraph 1" + ignored_keys = {"directed", "multigraph", "node", "edge"} + for attr, value in G.graph.items(): + yield from stringize(attr, value, ignored_keys, " ") + + # Output node data + node_id = dict(zip(G, range(len(G)))) + ignored_keys = {"id", "label"} + for node, attrs in G.nodes.items(): + yield " node [" + yield " id " + str(node_id[node]) + yield from stringize("label", node, (), " ") + for attr, value in attrs.items(): + yield from stringize(attr, value, ignored_keys, " ") + yield " ]" + + # Output edge data + ignored_keys = {"source", "target"} + kwargs = {"data": True} + if multigraph: + ignored_keys.add("key") + kwargs["keys"] = True + for e in G.edges(**kwargs): + yield " edge [" + yield " source " + str(node_id[e[0]]) + yield " target " + str(node_id[e[1]]) + if multigraph: + yield from stringize("key", e[2], (), " ") + for attr, value in e[-1].items(): + yield from stringize(attr, value, ignored_keys, " ") + yield " ]" + yield "]" + + +@open_file(1, mode="wb") +def write_gml(G, path, stringizer=None): + """Write a graph `G` in GML format to the file or file handle `path`. + + Parameters + ---------- + G : NetworkX graph + The graph to be converted to GML. + + path : string or file + Filename or file handle to write to. + Filenames ending in .gz or .bz2 will be compressed. + + stringizer : callable, optional + A `stringizer` which converts non-int/non-float/non-dict values into + strings. If it cannot convert a value into a string, it should raise a + `ValueError` to indicate that. Default value: None. + + Raises + ------ + NetworkXError + If `stringizer` cannot convert a value into a string, or the value to + convert is not a string while `stringizer` is None. + + See Also + -------- + read_gml, generate_gml + literal_stringizer + + Notes + ----- + Graph attributes named 'directed', 'multigraph', 'node' or + 'edge', node attributes named 'id' or 'label', edge attributes + named 'source' or 'target' (or 'key' if `G` is a multigraph) + are ignored because these attribute names are used to encode the graph + structure. + + GML files are stored using a 7-bit ASCII encoding with any extended + ASCII characters (iso8859-1) appearing as HTML character entities. + Without specifying a `stringizer`/`destringizer`, the code is capable of + writing `int`/`float`/`str`/`dict`/`list` data as required by the GML + specification. For writing other data types, and for reading data other + than `str` you need to explicitly supply a `stringizer`/`destringizer`. + + Note that while we allow non-standard GML to be read from a file, we make + sure to write GML format. In particular, underscores are not allowed in + attribute names. + For additional documentation on the GML file format, please see the + `GML url `_. + + See the module docstring :mod:`networkx.readwrite.gml` for more details. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> nx.write_gml(G, "test_path5.gml") + + Filenames ending in .gz or .bz2 will be compressed. + + >>> nx.write_gml(G, "test_path5.gml.gz") + """ + for line in generate_gml(G, stringizer): + path.write((line + "\n").encode("ascii")) diff --git a/.venv/lib/python3.12/site-packages/networkx/readwrite/graph6.py b/.venv/lib/python3.12/site-packages/networkx/readwrite/graph6.py new file mode 100644 index 0000000..cdc2925 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/readwrite/graph6.py @@ -0,0 +1,427 @@ +# Original author: D. Eppstein, UC Irvine, August 12, 2003. +# The original code at http://www.ics.uci.edu/~eppstein/PADS/ is public domain. +"""Functions for reading and writing graphs in the *graph6* format. + +The *graph6* file format is suitable for small graphs or large dense +graphs. For large sparse graphs, use the *sparse6* format. + +For more information, see the `graph6`_ homepage. + +.. _graph6: http://users.cecs.anu.edu.au/~bdm/data/formats.html + +""" + +from itertools import islice + +import networkx as nx +from networkx.exception import NetworkXError +from networkx.utils import not_implemented_for, open_file + +__all__ = ["from_graph6_bytes", "read_graph6", "to_graph6_bytes", "write_graph6"] + + +def _generate_graph6_bytes(G, nodes, header): + """Yield bytes in the graph6 encoding of a graph. + + `G` is an undirected simple graph. `nodes` is the list of nodes for + which the node-induced subgraph will be encoded; if `nodes` is the + list of all nodes in the graph, the entire graph will be + encoded. `header` is a Boolean that specifies whether to generate + the header ``b'>>graph6<<'`` before the remaining data. + + This function generates `bytes` objects in the following order: + + 1. the header (if requested), + 2. the encoding of the number of nodes, + 3. each character, one-at-a-time, in the encoding of the requested + node-induced subgraph, + 4. a newline character. + + This function raises :exc:`ValueError` if the graph is too large for + the graph6 format (that is, greater than ``2 ** 36`` nodes). + + """ + n = len(G) + if n >= 2**36: + raise ValueError( + "graph6 is only defined if number of nodes is less than 2 ** 36" + ) + if header: + yield b">>graph6<<" + for d in n_to_data(n): + yield str.encode(chr(d + 63)) + # This generates the same as `(v in G[u] for u, v in combinations(G, 2))`, + # but in "column-major" order instead of "row-major" order. + bits = (nodes[j] in G[nodes[i]] for j in range(1, n) for i in range(j)) + chunk = list(islice(bits, 6)) + while chunk: + d = sum(b << 5 - i for i, b in enumerate(chunk)) + yield str.encode(chr(d + 63)) + chunk = list(islice(bits, 6)) + yield b"\n" + + +@nx._dispatchable(graphs=None, returns_graph=True) +def from_graph6_bytes(bytes_in): + """Read a simple undirected graph in graph6 format from bytes. + + Parameters + ---------- + bytes_in : bytes + Data in graph6 format + + Returns + ------- + G : Graph + + Raises + ------ + NetworkXError + If `bytes_in` is unable to be parsed in graph6 format + + ValueError + If any character ``c`` in bytes_in does not satisfy + ``63 <= ord(c) < 127``. + + Examples + -------- + >>> G = nx.from_graph6_bytes(b"A_") + >>> sorted(G.edges()) + [(0, 1)] + + Notes + ----- + Per the graph6 spec, the header (e.g. ``b'>>graph6<<'``) must not be + followed by a newline character. + + See Also + -------- + read_graph6, write_graph6 + + References + ---------- + .. [1] Graph6 specification + + + """ + + def bits(): + """Returns sequence of individual bits from 6-bit-per-value + list of data values.""" + for d in data: + for i in [5, 4, 3, 2, 1, 0]: + yield (d >> i) & 1 + + # Ignore trailing newline + bytes_in = bytes_in.rstrip(b"\n") + + if bytes_in.startswith(b">>graph6<<"): + bytes_in = bytes_in[10:] + + data = [c - 63 for c in bytes_in] + if any(c > 63 for c in data): + raise ValueError("each input character must be in range(63, 127)") + + n, data = data_to_n(data) + nd = (n * (n - 1) // 2 + 5) // 6 + if len(data) != nd: + raise NetworkXError( + f"Expected {n * (n - 1) // 2} bits but got {len(data) * 6} in graph6" + ) + + G = nx.Graph() + G.add_nodes_from(range(n)) + for (i, j), b in zip(((i, j) for j in range(1, n) for i in range(j)), bits()): + if b: + G.add_edge(i, j) + + return G + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +def to_graph6_bytes(G, nodes=None, header=True): + """Convert a simple undirected graph to bytes in graph6 format. + + Parameters + ---------- + G : Graph (undirected) + + nodes: list or iterable + Nodes are labeled 0...n-1 in the order provided. If None the ordering + given by ``G.nodes()`` is used. + + header: bool + If True add '>>graph6<<' bytes to head of data. + + Raises + ------ + NetworkXNotImplemented + If the graph is directed or is a multigraph. + + ValueError + If the graph has at least ``2 ** 36`` nodes; the graph6 format + is only defined for graphs of order less than ``2 ** 36``. + + Examples + -------- + >>> nx.to_graph6_bytes(nx.path_graph(2)) + b'>>graph6< + + """ + if nodes is not None: + G = G.subgraph(nodes) + H = nx.convert_node_labels_to_integers(G) + nodes = sorted(H.nodes()) + return b"".join(_generate_graph6_bytes(H, nodes, header)) + + +@open_file(0, mode="rb") +@nx._dispatchable(graphs=None, returns_graph=True) +def read_graph6(path): + """Read simple undirected graphs in graph6 format from path. + + Parameters + ---------- + path : file or string + Filename or file handle to read. + Filenames ending in .gz or .bz2 will be decompressed. + + Returns + ------- + G : Graph or list of Graphs + If the file contains multiple lines then a list of graphs is returned + + Raises + ------ + NetworkXError + If the string is unable to be parsed in graph6 format + + Examples + -------- + You can read a graph6 file by giving the path to the file:: + + >>> import tempfile + >>> with tempfile.NamedTemporaryFile(delete=False) as f: + ... _ = f.write(b">>graph6<>> list(G.edges()) + [(0, 1)] + + You can also read a graph6 file by giving an open file-like object:: + + >>> import tempfile + >>> with tempfile.NamedTemporaryFile() as f: + ... _ = f.write(b">>graph6<>> list(G.edges()) + [(0, 1)] + + See Also + -------- + from_graph6_bytes, write_graph6 + + References + ---------- + .. [1] Graph6 specification + + + """ + glist = [] + for line in path: + line = line.strip() + if not len(line): + continue + glist.append(from_graph6_bytes(line)) + if len(glist) == 1: + return glist[0] + else: + return glist + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@open_file(1, mode="wb") +def write_graph6(G, path, nodes=None, header=True): + """Write a simple undirected graph to a path in graph6 format. + + Parameters + ---------- + G : Graph (undirected) + + path : file or string + File or filename to write. + Filenames ending in .gz or .bz2 will be compressed. + + nodes: list or iterable + Nodes are labeled 0...n-1 in the order provided. If None the ordering + given by ``G.nodes()`` is used. + + header: bool + If True add '>>graph6<<' string to head of data + + Raises + ------ + NetworkXNotImplemented + If the graph is directed or is a multigraph. + + ValueError + If the graph has at least ``2 ** 36`` nodes; the graph6 format + is only defined for graphs of order less than ``2 ** 36``. + + Examples + -------- + You can write a graph6 file by giving the path to a file:: + + >>> import tempfile + >>> with tempfile.NamedTemporaryFile(delete=False) as f: + ... nx.write_graph6(nx.path_graph(2), f.name) + ... _ = f.seek(0) + ... print(f.read()) + b'>>graph6< + + """ + return write_graph6_file(G, path, nodes=nodes, header=header) + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +def write_graph6_file(G, f, nodes=None, header=True): + """Write a simple undirected graph to a file-like object in graph6 format. + + Parameters + ---------- + G : Graph (undirected) + + f : file-like object + The file to write. + + nodes: list or iterable + Nodes are labeled 0...n-1 in the order provided. If None the ordering + given by ``G.nodes()`` is used. + + header: bool + If True add '>>graph6<<' string to head of data + + Raises + ------ + NetworkXNotImplemented + If the graph is directed or is a multigraph. + + ValueError + If the graph has at least ``2 ** 36`` nodes; the graph6 format + is only defined for graphs of order less than ``2 ** 36``. + + Examples + -------- + You can write a graph6 file by giving an open file-like object:: + + >>> import tempfile + >>> with tempfile.NamedTemporaryFile() as f: + ... nx.write_graph6(nx.path_graph(2), f) + ... _ = f.seek(0) + ... print(f.read()) + b'>>graph6< + + """ + if nodes is not None: + G = G.subgraph(nodes) + H = nx.convert_node_labels_to_integers(G) + nodes = sorted(H.nodes()) + for b in _generate_graph6_bytes(H, nodes, header): + f.write(b) + + +def data_to_n(data): + """Read initial one-, four- or eight-unit value from graph6 + integer sequence. + + Return (value, rest of seq.)""" + if data[0] <= 62: + return data[0], data[1:] + if data[1] <= 62: + return (data[1] << 12) + (data[2] << 6) + data[3], data[4:] + return ( + (data[2] << 30) + + (data[3] << 24) + + (data[4] << 18) + + (data[5] << 12) + + (data[6] << 6) + + data[7], + data[8:], + ) + + +def n_to_data(n): + """Convert an integer to one-, four- or eight-unit graph6 sequence. + + This function is undefined if `n` is not in ``range(2 ** 36)``. + + """ + if n <= 62: + return [n] + elif n <= 258047: + return [63, (n >> 12) & 0x3F, (n >> 6) & 0x3F, n & 0x3F] + else: # if n <= 68719476735: + return [ + 63, + 63, + (n >> 30) & 0x3F, + (n >> 24) & 0x3F, + (n >> 18) & 0x3F, + (n >> 12) & 0x3F, + (n >> 6) & 0x3F, + n & 0x3F, + ] diff --git a/.venv/lib/python3.12/site-packages/networkx/readwrite/graphml.py b/.venv/lib/python3.12/site-packages/networkx/readwrite/graphml.py new file mode 100644 index 0000000..6ca1741 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/readwrite/graphml.py @@ -0,0 +1,1053 @@ +""" +******* +GraphML +******* +Read and write graphs in GraphML format. + +.. warning:: + + This parser uses the standard xml library present in Python, which is + insecure - see :external+python:mod:`xml` for additional information. + Only parse GraphML files you trust. + +This implementation does not support mixed graphs (directed and unidirected +edges together), hyperedges, nested graphs, or ports. + +"GraphML is a comprehensive and easy-to-use file format for graphs. It +consists of a language core to describe the structural properties of a +graph and a flexible extension mechanism to add application-specific +data. Its main features include support of + + * directed, undirected, and mixed graphs, + * hypergraphs, + * hierarchical graphs, + * graphical representations, + * references to external data, + * application-specific attribute data, and + * light-weight parsers. + +Unlike many other file formats for graphs, GraphML does not use a +custom syntax. Instead, it is based on XML and hence ideally suited as +a common denominator for all kinds of services generating, archiving, +or processing graphs." + +http://graphml.graphdrawing.org/ + +Format +------ +GraphML is an XML format. See +http://graphml.graphdrawing.org/specification.html for the specification and +http://graphml.graphdrawing.org/primer/graphml-primer.html +for examples. +""" + +import warnings +from collections import defaultdict + +import networkx as nx +from networkx.utils import open_file + +__all__ = [ + "write_graphml", + "read_graphml", + "generate_graphml", + "write_graphml_xml", + "write_graphml_lxml", + "parse_graphml", + "GraphMLWriter", + "GraphMLReader", +] + + +@open_file(1, mode="wb") +def write_graphml_xml( + G, + path, + encoding="utf-8", + prettyprint=True, + infer_numeric_types=False, + named_key_ids=False, + edge_id_from_attribute=None, +): + """Write G in GraphML XML format to path + + Parameters + ---------- + G : graph + A networkx graph + path : file or string + File or filename to write. + Filenames ending in .gz or .bz2 will be compressed. + encoding : string (optional) + Encoding for text data. + prettyprint : bool (optional) + If True use line breaks and indenting in output XML. + infer_numeric_types : boolean + Determine if numeric types should be generalized. + For example, if edges have both int and float 'weight' attributes, + we infer in GraphML that both are floats. + named_key_ids : bool (optional) + If True use attr.name as value for key elements' id attribute. + edge_id_from_attribute : dict key (optional) + If provided, the graphml edge id is set by looking up the corresponding + edge data attribute keyed by this parameter. If `None` or the key does not exist in edge data, + the edge id is set by the edge key if `G` is a MultiGraph, else the edge id is left unset. + + Examples + -------- + >>> G = nx.path_graph(4) + >>> nx.write_graphml(G, "test.graphml") + + Notes + ----- + This implementation does not support mixed graphs (directed + and unidirected edges together) hyperedges, nested graphs, or ports. + """ + writer = GraphMLWriter( + encoding=encoding, + prettyprint=prettyprint, + infer_numeric_types=infer_numeric_types, + named_key_ids=named_key_ids, + edge_id_from_attribute=edge_id_from_attribute, + ) + writer.add_graph_element(G) + writer.dump(path) + + +@open_file(1, mode="wb") +def write_graphml_lxml( + G, + path, + encoding="utf-8", + prettyprint=True, + infer_numeric_types=False, + named_key_ids=False, + edge_id_from_attribute=None, +): + """Write G in GraphML XML format to path + + This function uses the LXML framework and should be faster than + the version using the xml library. + + Parameters + ---------- + G : graph + A networkx graph + path : file or string + File or filename to write. + Filenames ending in .gz or .bz2 will be compressed. + encoding : string (optional) + Encoding for text data. + prettyprint : bool (optional) + If True use line breaks and indenting in output XML. + infer_numeric_types : boolean + Determine if numeric types should be generalized. + For example, if edges have both int and float 'weight' attributes, + we infer in GraphML that both are floats. + named_key_ids : bool (optional) + If True use attr.name as value for key elements' id attribute. + edge_id_from_attribute : dict key (optional) + If provided, the graphml edge id is set by looking up the corresponding + edge data attribute keyed by this parameter. If `None` or the key does not exist in edge data, + the edge id is set by the edge key if `G` is a MultiGraph, else the edge id is left unset. + + Examples + -------- + >>> G = nx.path_graph(4) + >>> nx.write_graphml_lxml(G, "fourpath.graphml") + + Notes + ----- + This implementation does not support mixed graphs (directed + and unidirected edges together) hyperedges, nested graphs, or ports. + """ + try: + import lxml.etree as lxmletree + except ImportError: + return write_graphml_xml( + G, + path, + encoding, + prettyprint, + infer_numeric_types, + named_key_ids, + edge_id_from_attribute, + ) + + writer = GraphMLWriterLxml( + path, + graph=G, + encoding=encoding, + prettyprint=prettyprint, + infer_numeric_types=infer_numeric_types, + named_key_ids=named_key_ids, + edge_id_from_attribute=edge_id_from_attribute, + ) + writer.dump() + + +def generate_graphml( + G, + encoding="utf-8", + prettyprint=True, + named_key_ids=False, + edge_id_from_attribute=None, +): + """Generate GraphML lines for G + + Parameters + ---------- + G : graph + A networkx graph + encoding : string (optional) + Encoding for text data. + prettyprint : bool (optional) + If True use line breaks and indenting in output XML. + named_key_ids : bool (optional) + If True use attr.name as value for key elements' id attribute. + edge_id_from_attribute : dict key (optional) + If provided, the graphml edge id is set by looking up the corresponding + edge data attribute keyed by this parameter. If `None` or the key does not exist in edge data, + the edge id is set by the edge key if `G` is a MultiGraph, else the edge id is left unset. + + Examples + -------- + >>> G = nx.path_graph(4) + >>> linefeed = chr(10) # linefeed = \n + >>> s = linefeed.join(nx.generate_graphml(G)) + >>> for line in nx.generate_graphml(G): # doctest: +SKIP + ... print(line) + + Notes + ----- + This implementation does not support mixed graphs (directed and unidirected + edges together) hyperedges, nested graphs, or ports. + """ + writer = GraphMLWriter( + encoding=encoding, + prettyprint=prettyprint, + named_key_ids=named_key_ids, + edge_id_from_attribute=edge_id_from_attribute, + ) + writer.add_graph_element(G) + yield from str(writer).splitlines() + + +@open_file(0, mode="rb") +@nx._dispatchable(graphs=None, returns_graph=True) +def read_graphml(path, node_type=str, edge_key_type=int, force_multigraph=False): + """Read graph in GraphML format from path. + + Parameters + ---------- + path : file or string + Filename or file handle to read. + Filenames ending in .gz or .bz2 will be decompressed. + + node_type: Python type (default: str) + Convert node ids to this type + + edge_key_type: Python type (default: int) + Convert graphml edge ids to this type. Multigraphs use id as edge key. + Non-multigraphs add to edge attribute dict with name "id". + + force_multigraph : bool (default: False) + If True, return a multigraph with edge keys. If False (the default) + return a multigraph when multiedges are in the graph. + + Returns + ------- + graph: NetworkX graph + If parallel edges are present or `force_multigraph=True` then + a MultiGraph or MultiDiGraph is returned. Otherwise a Graph/DiGraph. + The returned graph is directed if the file indicates it should be. + + Notes + ----- + Default node and edge attributes are not propagated to each node and edge. + They can be obtained from `G.graph` and applied to node and edge attributes + if desired using something like this: + + >>> default_color = G.graph["node_default"]["color"] # doctest: +SKIP + >>> for node, data in G.nodes(data=True): # doctest: +SKIP + ... if "color" not in data: + ... data["color"] = default_color + >>> default_color = G.graph["edge_default"]["color"] # doctest: +SKIP + >>> for u, v, data in G.edges(data=True): # doctest: +SKIP + ... if "color" not in data: + ... data["color"] = default_color + + This implementation does not support mixed graphs (directed and unidirected + edges together), hypergraphs, nested graphs, or ports. + + For multigraphs the GraphML edge "id" will be used as the edge + key. If not specified then they "key" attribute will be used. If + there is no "key" attribute a default NetworkX multigraph edge key + will be provided. + + Files with the yEd "yfiles" extension can be read. The type of the node's + shape is preserved in the `shape_type` node attribute. + + yEd compressed files ("file.graphmlz" extension) can be read by renaming + the file to "file.graphml.gz". + + """ + reader = GraphMLReader(node_type, edge_key_type, force_multigraph) + # need to check for multiple graphs + glist = list(reader(path=path)) + if len(glist) == 0: + # If no graph comes back, try looking for an incomplete header + header = b'' + path.seek(0) + old_bytes = path.read() + new_bytes = old_bytes.replace(b"", header) + glist = list(reader(string=new_bytes)) + if len(glist) == 0: + raise nx.NetworkXError("file not successfully read as graphml") + return glist[0] + + +@nx._dispatchable(graphs=None, returns_graph=True) +def parse_graphml( + graphml_string, node_type=str, edge_key_type=int, force_multigraph=False +): + """Read graph in GraphML format from string. + + Parameters + ---------- + graphml_string : string + String containing graphml information + (e.g., contents of a graphml file). + + node_type: Python type (default: str) + Convert node ids to this type + + edge_key_type: Python type (default: int) + Convert graphml edge ids to this type. Multigraphs use id as edge key. + Non-multigraphs add to edge attribute dict with name "id". + + force_multigraph : bool (default: False) + If True, return a multigraph with edge keys. If False (the default) + return a multigraph when multiedges are in the graph. + + + Returns + ------- + graph: NetworkX graph + If no parallel edges are found a Graph or DiGraph is returned. + Otherwise a MultiGraph or MultiDiGraph is returned. + + Examples + -------- + >>> G = nx.path_graph(4) + >>> linefeed = chr(10) # linefeed = \n + >>> s = linefeed.join(nx.generate_graphml(G)) + >>> H = nx.parse_graphml(s) + + Notes + ----- + Default node and edge attributes are not propagated to each node and edge. + They can be obtained from `G.graph` and applied to node and edge attributes + if desired using something like this: + + >>> default_color = G.graph["node_default"]["color"] # doctest: +SKIP + >>> for node, data in G.nodes(data=True): # doctest: +SKIP + ... if "color" not in data: + ... data["color"] = default_color + >>> default_color = G.graph["edge_default"]["color"] # doctest: +SKIP + >>> for u, v, data in G.edges(data=True): # doctest: +SKIP + ... if "color" not in data: + ... data["color"] = default_color + + This implementation does not support mixed graphs (directed and unidirected + edges together), hypergraphs, nested graphs, or ports. + + For multigraphs the GraphML edge "id" will be used as the edge + key. If not specified then they "key" attribute will be used. If + there is no "key" attribute a default NetworkX multigraph edge key + will be provided. + + """ + reader = GraphMLReader(node_type, edge_key_type, force_multigraph) + # need to check for multiple graphs + glist = list(reader(string=graphml_string)) + if len(glist) == 0: + # If no graph comes back, try looking for an incomplete header + header = '' + new_string = graphml_string.replace("", header) + glist = list(reader(string=new_string)) + if len(glist) == 0: + raise nx.NetworkXError("file not successfully read as graphml") + return glist[0] + + +class GraphML: + NS_GRAPHML = "http://graphml.graphdrawing.org/xmlns" + NS_XSI = "http://www.w3.org/2001/XMLSchema-instance" + # xmlns:y="http://www.yworks.com/xml/graphml" + NS_Y = "http://www.yworks.com/xml/graphml" + SCHEMALOCATION = " ".join( + [ + "http://graphml.graphdrawing.org/xmlns", + "http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd", + ] + ) + + def construct_types(self): + types = [ + (int, "integer"), # for Gephi GraphML bug + (str, "yfiles"), + (str, "string"), + (int, "int"), + (int, "long"), + (float, "float"), + (float, "double"), + (bool, "boolean"), + ] + + # These additions to types allow writing numpy types + try: + import numpy as np + except: + pass + else: + # prepend so that python types are created upon read (last entry wins) + types = [ + (np.float64, "float"), + (np.float32, "float"), + (np.float16, "float"), + (np.int_, "int"), + (np.int8, "int"), + (np.int16, "int"), + (np.int32, "int"), + (np.int64, "int"), + (np.uint8, "int"), + (np.uint16, "int"), + (np.uint32, "int"), + (np.uint64, "int"), + (np.int_, "int"), + (np.intc, "int"), + (np.intp, "int"), + ] + types + + self.xml_type = dict(types) + self.python_type = dict(reversed(a) for a in types) + + # This page says that data types in GraphML follow Java(TM). + # http://graphml.graphdrawing.org/primer/graphml-primer.html#AttributesDefinition + # true and false are the only boolean literals: + # http://en.wikibooks.org/wiki/Java_Programming/Literals#Boolean_Literals + convert_bool = { + # We use data.lower() in actual use. + "true": True, + "false": False, + # Include integer strings for convenience. + "0": False, + 0: False, + "1": True, + 1: True, + } + + def get_xml_type(self, key): + """Wrapper around the xml_type dict that raises a more informative + exception message when a user attempts to use data of a type not + supported by GraphML.""" + try: + return self.xml_type[key] + except KeyError as err: + raise TypeError( + f"GraphML does not support type {key} as data values." + ) from err + + +class GraphMLWriter(GraphML): + def __init__( + self, + graph=None, + encoding="utf-8", + prettyprint=True, + infer_numeric_types=False, + named_key_ids=False, + edge_id_from_attribute=None, + ): + self.construct_types() + from xml.etree.ElementTree import Element + + self.myElement = Element + + self.infer_numeric_types = infer_numeric_types + self.prettyprint = prettyprint + self.named_key_ids = named_key_ids + self.edge_id_from_attribute = edge_id_from_attribute + self.encoding = encoding + self.xml = self.myElement( + "graphml", + { + "xmlns": self.NS_GRAPHML, + "xmlns:xsi": self.NS_XSI, + "xsi:schemaLocation": self.SCHEMALOCATION, + }, + ) + self.keys = {} + self.attributes = defaultdict(list) + self.attribute_types = defaultdict(set) + + if graph is not None: + self.add_graph_element(graph) + + def __str__(self): + from xml.etree.ElementTree import tostring + + if self.prettyprint: + self.indent(self.xml) + s = tostring(self.xml).decode(self.encoding) + return s + + def attr_type(self, name, scope, value): + """Infer the attribute type of data named name. Currently this only + supports inference of numeric types. + + If self.infer_numeric_types is false, type is used. Otherwise, pick the + most general of types found across all values with name and scope. This + means edges with data named 'weight' are treated separately from nodes + with data named 'weight'. + """ + if self.infer_numeric_types: + types = self.attribute_types[(name, scope)] + + if len(types) > 1: + types = {self.get_xml_type(t) for t in types} + if "string" in types: + return str + elif "float" in types or "double" in types: + return float + else: + return int + else: + return list(types)[0] + else: + return type(value) + + def get_key(self, name, attr_type, scope, default): + keys_key = (name, attr_type, scope) + try: + return self.keys[keys_key] + except KeyError: + if self.named_key_ids: + new_id = name + else: + new_id = f"d{len(list(self.keys))}" + + self.keys[keys_key] = new_id + key_kwargs = { + "id": new_id, + "for": scope, + "attr.name": name, + "attr.type": attr_type, + } + key_element = self.myElement("key", **key_kwargs) + # add subelement for data default value if present + if default is not None: + default_element = self.myElement("default") + default_element.text = str(default) + key_element.append(default_element) + self.xml.insert(0, key_element) + return new_id + + def add_data(self, name, element_type, value, scope="all", default=None): + """ + Make a data element for an edge or a node. Keep a log of the + type in the keys table. + """ + if element_type not in self.xml_type: + raise nx.NetworkXError( + f"GraphML writer does not support {element_type} as data values." + ) + keyid = self.get_key(name, self.get_xml_type(element_type), scope, default) + data_element = self.myElement("data", key=keyid) + data_element.text = str(value) + return data_element + + def add_attributes(self, scope, xml_obj, data, default): + """Appends attribute data to edges or nodes, and stores type information + to be added later. See add_graph_element. + """ + for k, v in data.items(): + self.attribute_types[(str(k), scope)].add(type(v)) + self.attributes[xml_obj].append([k, v, scope, default.get(k)]) + + def add_nodes(self, G, graph_element): + default = G.graph.get("node_default", {}) + for node, data in G.nodes(data=True): + node_element = self.myElement("node", id=str(node)) + self.add_attributes("node", node_element, data, default) + graph_element.append(node_element) + + def add_edges(self, G, graph_element): + if G.is_multigraph(): + for u, v, key, data in G.edges(data=True, keys=True): + edge_element = self.myElement( + "edge", + source=str(u), + target=str(v), + id=str(data.get(self.edge_id_from_attribute)) + if self.edge_id_from_attribute + and self.edge_id_from_attribute in data + else str(key), + ) + default = G.graph.get("edge_default", {}) + self.add_attributes("edge", edge_element, data, default) + graph_element.append(edge_element) + else: + for u, v, data in G.edges(data=True): + if self.edge_id_from_attribute and self.edge_id_from_attribute in data: + # select attribute to be edge id + edge_element = self.myElement( + "edge", + source=str(u), + target=str(v), + id=str(data.get(self.edge_id_from_attribute)), + ) + else: + # default: no edge id + edge_element = self.myElement("edge", source=str(u), target=str(v)) + default = G.graph.get("edge_default", {}) + self.add_attributes("edge", edge_element, data, default) + graph_element.append(edge_element) + + def add_graph_element(self, G): + """ + Serialize graph G in GraphML to the stream. + """ + if G.is_directed(): + default_edge_type = "directed" + else: + default_edge_type = "undirected" + + graphid = G.graph.pop("id", None) + if graphid is None: + graph_element = self.myElement("graph", edgedefault=default_edge_type) + else: + graph_element = self.myElement( + "graph", edgedefault=default_edge_type, id=graphid + ) + default = {} + data = { + k: v + for (k, v) in G.graph.items() + if k not in ["node_default", "edge_default"] + } + self.add_attributes("graph", graph_element, data, default) + self.add_nodes(G, graph_element) + self.add_edges(G, graph_element) + + # self.attributes contains a mapping from XML Objects to a list of + # data that needs to be added to them. + # We postpone processing in order to do type inference/generalization. + # See self.attr_type + for xml_obj, data in self.attributes.items(): + for k, v, scope, default in data: + xml_obj.append( + self.add_data( + str(k), self.attr_type(k, scope, v), str(v), scope, default + ) + ) + self.xml.append(graph_element) + + def add_graphs(self, graph_list): + """Add many graphs to this GraphML document.""" + for G in graph_list: + self.add_graph_element(G) + + def dump(self, stream): + from xml.etree.ElementTree import ElementTree + + if self.prettyprint: + self.indent(self.xml) + document = ElementTree(self.xml) + document.write(stream, encoding=self.encoding, xml_declaration=True) + + def indent(self, elem, level=0): + # in-place prettyprint formatter + i = "\n" + level * " " + if len(elem): + if not elem.text or not elem.text.strip(): + elem.text = i + " " + if not elem.tail or not elem.tail.strip(): + elem.tail = i + for elem in elem: + self.indent(elem, level + 1) + if not elem.tail or not elem.tail.strip(): + elem.tail = i + else: + if level and (not elem.tail or not elem.tail.strip()): + elem.tail = i + + +class IncrementalElement: + """Wrapper for _IncrementalWriter providing an Element like interface. + + This wrapper does not intend to be a complete implementation but rather to + deal with those calls used in GraphMLWriter. + """ + + def __init__(self, xml, prettyprint): + self.xml = xml + self.prettyprint = prettyprint + + def append(self, element): + self.xml.write(element, pretty_print=self.prettyprint) + + +class GraphMLWriterLxml(GraphMLWriter): + def __init__( + self, + path, + graph=None, + encoding="utf-8", + prettyprint=True, + infer_numeric_types=False, + named_key_ids=False, + edge_id_from_attribute=None, + ): + self.construct_types() + import lxml.etree as lxmletree + + self.myElement = lxmletree.Element + + self._encoding = encoding + self._prettyprint = prettyprint + self.named_key_ids = named_key_ids + self.edge_id_from_attribute = edge_id_from_attribute + self.infer_numeric_types = infer_numeric_types + + self._xml_base = lxmletree.xmlfile(path, encoding=encoding) + self._xml = self._xml_base.__enter__() + self._xml.write_declaration() + + # We need to have a xml variable that support insertion. This call is + # used for adding the keys to the document. + # We will store those keys in a plain list, and then after the graph + # element is closed we will add them to the main graphml element. + self.xml = [] + self._keys = self.xml + self._graphml = self._xml.element( + "graphml", + { + "xmlns": self.NS_GRAPHML, + "xmlns:xsi": self.NS_XSI, + "xsi:schemaLocation": self.SCHEMALOCATION, + }, + ) + self._graphml.__enter__() + self.keys = {} + self.attribute_types = defaultdict(set) + + if graph is not None: + self.add_graph_element(graph) + + def add_graph_element(self, G): + """ + Serialize graph G in GraphML to the stream. + """ + if G.is_directed(): + default_edge_type = "directed" + else: + default_edge_type = "undirected" + + graphid = G.graph.pop("id", None) + if graphid is None: + graph_element = self._xml.element("graph", edgedefault=default_edge_type) + else: + graph_element = self._xml.element( + "graph", edgedefault=default_edge_type, id=graphid + ) + + # gather attributes types for the whole graph + # to find the most general numeric format needed. + # Then pass through attributes to create key_id for each. + graphdata = { + k: v + for k, v in G.graph.items() + if k not in ("node_default", "edge_default") + } + node_default = G.graph.get("node_default", {}) + edge_default = G.graph.get("edge_default", {}) + # Graph attributes + for k, v in graphdata.items(): + self.attribute_types[(str(k), "graph")].add(type(v)) + for k, v in graphdata.items(): + element_type = self.get_xml_type(self.attr_type(k, "graph", v)) + self.get_key(str(k), element_type, "graph", None) + # Nodes and data + for node, d in G.nodes(data=True): + for k, v in d.items(): + self.attribute_types[(str(k), "node")].add(type(v)) + for node, d in G.nodes(data=True): + for k, v in d.items(): + T = self.get_xml_type(self.attr_type(k, "node", v)) + self.get_key(str(k), T, "node", node_default.get(k)) + # Edges and data + if G.is_multigraph(): + for u, v, ekey, d in G.edges(keys=True, data=True): + for k, v in d.items(): + self.attribute_types[(str(k), "edge")].add(type(v)) + for u, v, ekey, d in G.edges(keys=True, data=True): + for k, v in d.items(): + T = self.get_xml_type(self.attr_type(k, "edge", v)) + self.get_key(str(k), T, "edge", edge_default.get(k)) + else: + for u, v, d in G.edges(data=True): + for k, v in d.items(): + self.attribute_types[(str(k), "edge")].add(type(v)) + for u, v, d in G.edges(data=True): + for k, v in d.items(): + T = self.get_xml_type(self.attr_type(k, "edge", v)) + self.get_key(str(k), T, "edge", edge_default.get(k)) + + # Now add attribute keys to the xml file + for key in self.xml: + self._xml.write(key, pretty_print=self._prettyprint) + + # The incremental_writer writes each node/edge as it is created + incremental_writer = IncrementalElement(self._xml, self._prettyprint) + with graph_element: + self.add_attributes("graph", incremental_writer, graphdata, {}) + self.add_nodes(G, incremental_writer) # adds attributes too + self.add_edges(G, incremental_writer) # adds attributes too + + def add_attributes(self, scope, xml_obj, data, default): + """Appends attribute data.""" + for k, v in data.items(): + data_element = self.add_data( + str(k), self.attr_type(str(k), scope, v), str(v), scope, default.get(k) + ) + xml_obj.append(data_element) + + def __str__(self): + return object.__str__(self) + + def dump(self, stream=None): + self._graphml.__exit__(None, None, None) + self._xml_base.__exit__(None, None, None) + + +# default is lxml is present. +write_graphml = write_graphml_lxml + + +class GraphMLReader(GraphML): + """Read a GraphML document. Produces NetworkX graph objects.""" + + def __init__(self, node_type=str, edge_key_type=int, force_multigraph=False): + self.construct_types() + self.node_type = node_type + self.edge_key_type = edge_key_type + self.multigraph = force_multigraph # If False, test for multiedges + self.edge_ids = {} # dict mapping (u,v) tuples to edge id attributes + + def __call__(self, path=None, string=None): + from xml.etree.ElementTree import ElementTree, fromstring + + if path is not None: + self.xml = ElementTree(file=path) + elif string is not None: + self.xml = fromstring(string) + else: + raise ValueError("Must specify either 'path' or 'string' as kwarg") + (keys, defaults) = self.find_graphml_keys(self.xml) + for g in self.xml.findall(f"{{{self.NS_GRAPHML}}}graph"): + yield self.make_graph(g, keys, defaults) + + def make_graph(self, graph_xml, graphml_keys, defaults, G=None): + # set default graph type + edgedefault = graph_xml.get("edgedefault", None) + if G is None: + if edgedefault == "directed": + G = nx.MultiDiGraph() + else: + G = nx.MultiGraph() + # set defaults for graph attributes + G.graph["node_default"] = {} + G.graph["edge_default"] = {} + for key_id, value in defaults.items(): + key_for = graphml_keys[key_id]["for"] + name = graphml_keys[key_id]["name"] + python_type = graphml_keys[key_id]["type"] + if key_for == "node": + G.graph["node_default"].update({name: python_type(value)}) + if key_for == "edge": + G.graph["edge_default"].update({name: python_type(value)}) + # hyperedges are not supported + hyperedge = graph_xml.find(f"{{{self.NS_GRAPHML}}}hyperedge") + if hyperedge is not None: + raise nx.NetworkXError("GraphML reader doesn't support hyperedges") + # add nodes + for node_xml in graph_xml.findall(f"{{{self.NS_GRAPHML}}}node"): + self.add_node(G, node_xml, graphml_keys, defaults) + # add edges + for edge_xml in graph_xml.findall(f"{{{self.NS_GRAPHML}}}edge"): + self.add_edge(G, edge_xml, graphml_keys) + # add graph data + data = self.decode_data_elements(graphml_keys, graph_xml) + G.graph.update(data) + + # switch to Graph or DiGraph if no parallel edges were found + if self.multigraph: + return G + + G = nx.DiGraph(G) if G.is_directed() else nx.Graph(G) + # add explicit edge "id" from file as attribute in NX graph. + nx.set_edge_attributes(G, values=self.edge_ids, name="id") + return G + + def add_node(self, G, node_xml, graphml_keys, defaults): + """Add a node to the graph.""" + # warn on finding unsupported ports tag + ports = node_xml.find(f"{{{self.NS_GRAPHML}}}port") + if ports is not None: + warnings.warn("GraphML port tag not supported.") + # find the node by id and cast it to the appropriate type + node_id = self.node_type(node_xml.get("id")) + # get data/attributes for node + data = self.decode_data_elements(graphml_keys, node_xml) + G.add_node(node_id, **data) + # get child nodes + if node_xml.attrib.get("yfiles.foldertype") == "group": + graph_xml = node_xml.find(f"{{{self.NS_GRAPHML}}}graph") + self.make_graph(graph_xml, graphml_keys, defaults, G) + + def add_edge(self, G, edge_element, graphml_keys): + """Add an edge to the graph.""" + # warn on finding unsupported ports tag + ports = edge_element.find(f"{{{self.NS_GRAPHML}}}port") + if ports is not None: + warnings.warn("GraphML port tag not supported.") + + # raise error if we find mixed directed and undirected edges + directed = edge_element.get("directed") + if G.is_directed() and directed == "false": + msg = "directed=false edge found in directed graph." + raise nx.NetworkXError(msg) + if (not G.is_directed()) and directed == "true": + msg = "directed=true edge found in undirected graph." + raise nx.NetworkXError(msg) + + source = self.node_type(edge_element.get("source")) + target = self.node_type(edge_element.get("target")) + data = self.decode_data_elements(graphml_keys, edge_element) + # GraphML stores edge ids as an attribute + # NetworkX uses them as keys in multigraphs too if no key + # attribute is specified + edge_id = edge_element.get("id") + if edge_id: + # self.edge_ids is used by `make_graph` method for non-multigraphs + self.edge_ids[source, target] = edge_id + try: + edge_id = self.edge_key_type(edge_id) + except ValueError: # Could not convert. + pass + else: + edge_id = data.get("key") + + if G.has_edge(source, target): + # mark this as a multigraph + self.multigraph = True + + # Use add_edges_from to avoid error with add_edge when `'key' in data` + # Note there is only one edge here... + G.add_edges_from([(source, target, edge_id, data)]) + + def decode_data_elements(self, graphml_keys, obj_xml): + """Use the key information to decode the data XML if present.""" + data = {} + for data_element in obj_xml.findall(f"{{{self.NS_GRAPHML}}}data"): + key = data_element.get("key") + try: + data_name = graphml_keys[key]["name"] + data_type = graphml_keys[key]["type"] + except KeyError as err: + raise nx.NetworkXError(f"Bad GraphML data: no key {key}") from err + text = data_element.text + # assume anything with subelements is a yfiles extension + if text is not None and len(list(data_element)) == 0: + if data_type is bool: + # Ignore cases. + # http://docs.oracle.com/javase/6/docs/api/java/lang/ + # Boolean.html#parseBoolean%28java.lang.String%29 + data[data_name] = self.convert_bool[text.lower()] + else: + data[data_name] = data_type(text) + elif len(list(data_element)) > 0: + # Assume yfiles as subelements, try to extract node_label + node_label = None + # set GenericNode's configuration as shape type + gn = data_element.find(f"{{{self.NS_Y}}}GenericNode") + if gn is not None: + data["shape_type"] = gn.get("configuration") + for node_type in ["GenericNode", "ShapeNode", "SVGNode", "ImageNode"]: + pref = f"{{{self.NS_Y}}}{node_type}/{{{self.NS_Y}}}" + geometry = data_element.find(f"{pref}Geometry") + if geometry is not None: + data["x"] = geometry.get("x") + data["y"] = geometry.get("y") + if node_label is None: + node_label = data_element.find(f"{pref}NodeLabel") + shape = data_element.find(f"{pref}Shape") + if shape is not None: + data["shape_type"] = shape.get("type") + if node_label is not None: + data["label"] = node_label.text + + # check all the different types of edges available in yEd. + for edge_type in [ + "PolyLineEdge", + "SplineEdge", + "QuadCurveEdge", + "BezierEdge", + "ArcEdge", + ]: + pref = f"{{{self.NS_Y}}}{edge_type}/{{{self.NS_Y}}}" + edge_label = data_element.find(f"{pref}EdgeLabel") + if edge_label is not None: + break + if edge_label is not None: + data["label"] = edge_label.text + elif text is None: + data[data_name] = "" + return data + + def find_graphml_keys(self, graph_element): + """Extracts all the keys and key defaults from the xml.""" + graphml_keys = {} + graphml_key_defaults = {} + for k in graph_element.findall(f"{{{self.NS_GRAPHML}}}key"): + attr_id = k.get("id") + attr_type = k.get("attr.type") + attr_name = k.get("attr.name") + yfiles_type = k.get("yfiles.type") + if yfiles_type is not None: + attr_name = yfiles_type + attr_type = "yfiles" + if attr_type is None: + attr_type = "string" + warnings.warn(f"No key type for id {attr_id}. Using string") + if attr_name is None: + raise nx.NetworkXError(f"Unknown key for id {attr_id}.") + graphml_keys[attr_id] = { + "name": attr_name, + "type": self.python_type[attr_type], + "for": k.get("for"), + } + # check for "default" sub-element of key element + default = k.find(f"{{{self.NS_GRAPHML}}}default") + if default is not None: + # Handle default values identically to data element values + python_type = graphml_keys[attr_id]["type"] + if python_type is bool: + graphml_key_defaults[attr_id] = self.convert_bool[ + default.text.lower() + ] + else: + graphml_key_defaults[attr_id] = python_type(default.text) + return graphml_keys, graphml_key_defaults diff --git a/.venv/lib/python3.12/site-packages/networkx/readwrite/json_graph/__init__.py b/.venv/lib/python3.12/site-packages/networkx/readwrite/json_graph/__init__.py new file mode 100644 index 0000000..532c71d --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/readwrite/json_graph/__init__.py @@ -0,0 +1,19 @@ +""" +********* +JSON data +********* +Generate and parse JSON serializable data for NetworkX graphs. + +These formats are suitable for use with the d3.js examples https://d3js.org/ + +The three formats that you can generate with NetworkX are: + + - node-link like in the d3.js example https://bl.ocks.org/mbostock/4062045 + - tree like in the d3.js example https://bl.ocks.org/mbostock/4063550 + - adjacency like in the d3.js example https://bost.ocks.org/mike/miserables/ +""" + +from networkx.readwrite.json_graph.node_link import * +from networkx.readwrite.json_graph.adjacency import * +from networkx.readwrite.json_graph.tree import * +from networkx.readwrite.json_graph.cytoscape import * diff --git a/.venv/lib/python3.12/site-packages/networkx/readwrite/json_graph/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/readwrite/json_graph/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..01ce195 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/readwrite/json_graph/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/readwrite/json_graph/__pycache__/adjacency.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/readwrite/json_graph/__pycache__/adjacency.cpython-312.pyc new file mode 100644 index 0000000..8754f29 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/readwrite/json_graph/__pycache__/adjacency.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/readwrite/json_graph/__pycache__/cytoscape.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/readwrite/json_graph/__pycache__/cytoscape.cpython-312.pyc new file mode 100644 index 0000000..775729b Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/readwrite/json_graph/__pycache__/cytoscape.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/readwrite/json_graph/__pycache__/node_link.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/readwrite/json_graph/__pycache__/node_link.cpython-312.pyc new file mode 100644 index 0000000..ab5b58d Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/readwrite/json_graph/__pycache__/node_link.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/readwrite/json_graph/__pycache__/tree.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/readwrite/json_graph/__pycache__/tree.cpython-312.pyc new file mode 100644 index 0000000..e51d702 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/readwrite/json_graph/__pycache__/tree.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/readwrite/json_graph/adjacency.py b/.venv/lib/python3.12/site-packages/networkx/readwrite/json_graph/adjacency.py new file mode 100644 index 0000000..3b05747 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/readwrite/json_graph/adjacency.py @@ -0,0 +1,156 @@ +import networkx as nx + +__all__ = ["adjacency_data", "adjacency_graph"] + +_attrs = {"id": "id", "key": "key"} + + +def adjacency_data(G, attrs=_attrs): + """Returns data in adjacency format that is suitable for JSON serialization + and use in JavaScript documents. + + Parameters + ---------- + G : NetworkX graph + + attrs : dict + A dictionary that contains two keys 'id' and 'key'. The corresponding + values provide the attribute names for storing NetworkX-internal graph + data. The values should be unique. Default value: + :samp:`dict(id='id', key='key')`. + + If some user-defined graph data use these attribute names as data keys, + they may be silently dropped. + + Returns + ------- + data : dict + A dictionary with adjacency formatted data. + + Raises + ------ + NetworkXError + If values in attrs are not unique. + + Examples + -------- + >>> from networkx.readwrite import json_graph + >>> G = nx.Graph([(1, 2)]) + >>> data = json_graph.adjacency_data(G) + + To serialize with json + + >>> import json + >>> s = json.dumps(data) + + Notes + ----- + Graph, node, and link attributes will be written when using this format + but attribute keys must be strings if you want to serialize the resulting + data with JSON. + + The default value of attrs will be changed in a future release of NetworkX. + + See Also + -------- + adjacency_graph, node_link_data, tree_data + """ + multigraph = G.is_multigraph() + id_ = attrs["id"] + # Allow 'key' to be omitted from attrs if the graph is not a multigraph. + key = None if not multigraph else attrs["key"] + if id_ == key: + raise nx.NetworkXError("Attribute names are not unique.") + data = {} + data["directed"] = G.is_directed() + data["multigraph"] = multigraph + data["graph"] = list(G.graph.items()) + data["nodes"] = [] + data["adjacency"] = [] + for n, nbrdict in G.adjacency(): + data["nodes"].append({**G.nodes[n], id_: n}) + adj = [] + if multigraph: + for nbr, keys in nbrdict.items(): + for k, d in keys.items(): + adj.append({**d, id_: nbr, key: k}) + else: + for nbr, d in nbrdict.items(): + adj.append({**d, id_: nbr}) + data["adjacency"].append(adj) + return data + + +@nx._dispatchable(graphs=None, returns_graph=True) +def adjacency_graph(data, directed=False, multigraph=True, attrs=_attrs): + """Returns graph from adjacency data format. + + Parameters + ---------- + data : dict + Adjacency list formatted graph data + + directed : bool + If True, and direction not specified in data, return a directed graph. + + multigraph : bool + If True, and multigraph not specified in data, return a multigraph. + + attrs : dict + A dictionary that contains two keys 'id' and 'key'. The corresponding + values provide the attribute names for storing NetworkX-internal graph + data. The values should be unique. Default value: + :samp:`dict(id='id', key='key')`. + + Returns + ------- + G : NetworkX graph + A NetworkX graph object + + Examples + -------- + >>> from networkx.readwrite import json_graph + >>> G = nx.Graph([(1, 2)]) + >>> data = json_graph.adjacency_data(G) + >>> H = json_graph.adjacency_graph(data) + + Notes + ----- + The default value of attrs will be changed in a future release of NetworkX. + + See Also + -------- + adjacency_graph, node_link_data, tree_data + """ + multigraph = data.get("multigraph", multigraph) + directed = data.get("directed", directed) + if multigraph: + graph = nx.MultiGraph() + else: + graph = nx.Graph() + if directed: + graph = graph.to_directed() + id_ = attrs["id"] + # Allow 'key' to be omitted from attrs if the graph is not a multigraph. + key = None if not multigraph else attrs["key"] + graph.graph = dict(data.get("graph", [])) + mapping = [] + for d in data["nodes"]: + node_data = d.copy() + node = node_data.pop(id_) + mapping.append(node) + graph.add_node(node) + graph.nodes[node].update(node_data) + for i, d in enumerate(data["adjacency"]): + source = mapping[i] + for tdata in d: + target_data = tdata.copy() + target = target_data.pop(id_) + if not multigraph: + graph.add_edge(source, target) + graph[source][target].update(target_data) + else: + ky = target_data.pop(key, None) + graph.add_edge(source, target, key=ky) + graph[source][target][ky].update(target_data) + return graph diff --git a/.venv/lib/python3.12/site-packages/networkx/readwrite/json_graph/cytoscape.py b/.venv/lib/python3.12/site-packages/networkx/readwrite/json_graph/cytoscape.py new file mode 100644 index 0000000..417e36f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/readwrite/json_graph/cytoscape.py @@ -0,0 +1,190 @@ +import networkx as nx + +__all__ = ["cytoscape_data", "cytoscape_graph"] + + +def cytoscape_data(G, name="name", ident="id"): + """Returns data in Cytoscape JSON format (cyjs). + + Parameters + ---------- + G : NetworkX Graph + The graph to convert to cytoscape format + name : string + A string which is mapped to the 'name' node element in cyjs format. + Must not have the same value as `ident`. + ident : string + A string which is mapped to the 'id' node element in cyjs format. + Must not have the same value as `name`. + + Returns + ------- + data: dict + A dictionary with cyjs formatted data. + + Raises + ------ + NetworkXError + If the values for `name` and `ident` are identical. + + See Also + -------- + cytoscape_graph: convert a dictionary in cyjs format to a graph + + References + ---------- + .. [1] Cytoscape user's manual: + http://manual.cytoscape.org/en/stable/index.html + + Examples + -------- + >>> from pprint import pprint + >>> G = nx.path_graph(2) + >>> cyto_data = nx.cytoscape_data(G) + >>> pprint(cyto_data, sort_dicts=False) + {'data': [], + 'directed': False, + 'multigraph': False, + 'elements': {'nodes': [{'data': {'id': '0', 'value': 0, 'name': '0'}}, + {'data': {'id': '1', 'value': 1, 'name': '1'}}], + 'edges': [{'data': {'source': 0, 'target': 1}}]}} + + The :mod:`json` package can be used to serialize the resulting data + + >>> import io, json + >>> with io.StringIO() as fh: # replace io with `open(...)` to write to disk + ... json.dump(cyto_data, fh) + ... fh.seek(0) # doctest: +SKIP + ... print(fh.getvalue()[:64]) # View the first 64 characters + {"data": [], "directed": false, "multigraph": false, "elements": + + """ + if name == ident: + raise nx.NetworkXError("name and ident must be different.") + + jsondata = {"data": list(G.graph.items())} + jsondata["directed"] = G.is_directed() + jsondata["multigraph"] = G.is_multigraph() + jsondata["elements"] = {"nodes": [], "edges": []} + nodes = jsondata["elements"]["nodes"] + edges = jsondata["elements"]["edges"] + + for i, j in G.nodes.items(): + n = {"data": j.copy()} + n["data"]["id"] = j.get(ident) or str(i) + n["data"]["value"] = i + n["data"]["name"] = j.get(name) or str(i) + nodes.append(n) + + if G.is_multigraph(): + for e in G.edges(keys=True): + n = {"data": G.adj[e[0]][e[1]][e[2]].copy()} + n["data"]["source"] = e[0] + n["data"]["target"] = e[1] + n["data"]["key"] = e[2] + edges.append(n) + else: + for e in G.edges(): + n = {"data": G.adj[e[0]][e[1]].copy()} + n["data"]["source"] = e[0] + n["data"]["target"] = e[1] + edges.append(n) + return jsondata + + +@nx._dispatchable(graphs=None, returns_graph=True) +def cytoscape_graph(data, name="name", ident="id"): + """ + Create a NetworkX graph from a dictionary in cytoscape JSON format. + + Parameters + ---------- + data : dict + A dictionary of data conforming to cytoscape JSON format. + name : string + A string which is mapped to the 'name' node element in cyjs format. + Must not have the same value as `ident`. + ident : string + A string which is mapped to the 'id' node element in cyjs format. + Must not have the same value as `name`. + + Returns + ------- + graph : a NetworkX graph instance + The `graph` can be an instance of `Graph`, `DiGraph`, `MultiGraph`, or + `MultiDiGraph` depending on the input data. + + Raises + ------ + NetworkXError + If the `name` and `ident` attributes are identical. + + See Also + -------- + cytoscape_data: convert a NetworkX graph to a dict in cyjs format + + References + ---------- + .. [1] Cytoscape user's manual: + http://manual.cytoscape.org/en/stable/index.html + + Examples + -------- + >>> data_dict = { + ... "data": [], + ... "directed": False, + ... "multigraph": False, + ... "elements": { + ... "nodes": [ + ... {"data": {"id": "0", "value": 0, "name": "0"}}, + ... {"data": {"id": "1", "value": 1, "name": "1"}}, + ... ], + ... "edges": [{"data": {"source": 0, "target": 1}}], + ... }, + ... } + >>> G = nx.cytoscape_graph(data_dict) + >>> G.name + '' + >>> G.nodes() + NodeView((0, 1)) + >>> G.nodes(data=True)[0] + {'id': '0', 'value': 0, 'name': '0'} + >>> G.edges(data=True) + EdgeDataView([(0, 1, {'source': 0, 'target': 1})]) + """ + if name == ident: + raise nx.NetworkXError("name and ident must be different.") + + multigraph = data.get("multigraph") + directed = data.get("directed") + if multigraph: + graph = nx.MultiGraph() + else: + graph = nx.Graph() + if directed: + graph = graph.to_directed() + graph.graph = dict(data.get("data")) + for d in data["elements"]["nodes"]: + node_data = d["data"].copy() + node = d["data"]["value"] + + if d["data"].get(name): + node_data[name] = d["data"].get(name) + if d["data"].get(ident): + node_data[ident] = d["data"].get(ident) + + graph.add_node(node) + graph.nodes[node].update(node_data) + + for d in data["elements"]["edges"]: + edge_data = d["data"].copy() + sour = d["data"]["source"] + targ = d["data"]["target"] + if multigraph: + key = d["data"].get("key", 0) + graph.add_edge(sour, targ, key=key) + graph.edges[sour, targ, key].update(edge_data) + else: + graph.add_edge(sour, targ) + graph.edges[sour, targ].update(edge_data) + return graph diff --git a/.venv/lib/python3.12/site-packages/networkx/readwrite/json_graph/node_link.py b/.venv/lib/python3.12/site-packages/networkx/readwrite/json_graph/node_link.py new file mode 100644 index 0000000..88bfbcd --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/readwrite/json_graph/node_link.py @@ -0,0 +1,333 @@ +import warnings +from itertools import count + +import networkx as nx + +__all__ = ["node_link_data", "node_link_graph"] + + +def _to_tuple(x): + """Converts lists to tuples, including nested lists. + + All other non-list inputs are passed through unmodified. This function is + intended to be used to convert potentially nested lists from json files + into valid nodes. + + Examples + -------- + >>> _to_tuple([1, 2, [3, 4]]) + (1, 2, (3, 4)) + """ + if not isinstance(x, tuple | list): + return x + return tuple(map(_to_tuple, x)) + + +def node_link_data( + G, + *, + source="source", + target="target", + name="id", + key="key", + edges=None, + nodes="nodes", + link=None, +): + """Returns data in node-link format that is suitable for JSON serialization + and use in JavaScript documents. + + Parameters + ---------- + G : NetworkX graph + source : string + A string that provides the 'source' attribute name for storing NetworkX-internal graph data. + target : string + A string that provides the 'target' attribute name for storing NetworkX-internal graph data. + name : string + A string that provides the 'name' attribute name for storing NetworkX-internal graph data. + key : string + A string that provides the 'key' attribute name for storing NetworkX-internal graph data. + edges : string + A string that provides the 'edges' attribute name for storing NetworkX-internal graph data. + nodes : string + A string that provides the 'nodes' attribute name for storing NetworkX-internal graph data. + link : string + .. deprecated:: 3.4 + + The `link` argument is deprecated and will be removed in version `3.6`. + Use the `edges` keyword instead. + + A string that provides the 'edges' attribute name for storing NetworkX-internal graph data. + + Returns + ------- + data : dict + A dictionary with node-link formatted data. + + Raises + ------ + NetworkXError + If the values of 'source', 'target' and 'key' are not unique. + + Examples + -------- + >>> from pprint import pprint + >>> G = nx.Graph([("A", "B")]) + >>> data1 = nx.node_link_data(G, edges="edges") + >>> pprint(data1) + {'directed': False, + 'edges': [{'source': 'A', 'target': 'B'}], + 'graph': {}, + 'multigraph': False, + 'nodes': [{'id': 'A'}, {'id': 'B'}]} + + To serialize with JSON + + >>> import json + >>> s1 = json.dumps(data1) + >>> pprint(s1) + ('{"directed": false, "multigraph": false, "graph": {}, "nodes": [{"id": "A"}, ' + '{"id": "B"}], "edges": [{"source": "A", "target": "B"}]}') + + + A graph can also be serialized by passing `node_link_data` as an encoder function. + + >>> s1 = json.dumps(G, default=nx.node_link_data) + >>> pprint(s1) + ('{"directed": false, "multigraph": false, "graph": {}, "nodes": [{"id": "A"}, ' + '{"id": "B"}], "links": [{"source": "A", "target": "B"}]}') + + The attribute names for storing NetworkX-internal graph data can + be specified as keyword options. + + >>> H = nx.gn_graph(2) + >>> data2 = nx.node_link_data( + ... H, edges="links", source="from", target="to", nodes="vertices" + ... ) + >>> pprint(data2) + {'directed': True, + 'graph': {}, + 'links': [{'from': 1, 'to': 0}], + 'multigraph': False, + 'vertices': [{'id': 0}, {'id': 1}]} + + Notes + ----- + Graph, node, and link attributes are stored in this format. Note that + attribute keys will be converted to strings in order to comply with JSON. + + Attribute 'key' is only used for multigraphs. + + To use `node_link_data` in conjunction with `node_link_graph`, + the keyword names for the attributes must match. + + See Also + -------- + node_link_graph, adjacency_data, tree_data + """ + # TODO: Remove between the lines when `link` deprecation expires + # ------------------------------------------------------------- + if link is not None: + warnings.warn( + "Keyword argument 'link' is deprecated; use 'edges' instead", + DeprecationWarning, + stacklevel=2, + ) + if edges is not None: + raise ValueError( + "Both 'edges' and 'link' are specified. Use 'edges', 'link' will be remove in a future release" + ) + else: + edges = link + else: + if edges is None: + warnings.warn( + ( + '\nThe default value will be `edges="edges" in NetworkX 3.6.\n\n' + "To make this warning go away, explicitly set the edges kwarg, e.g.:\n\n" + ' nx.node_link_data(G, edges="links") to preserve current behavior, or\n' + ' nx.node_link_data(G, edges="edges") for forward compatibility.' + ), + FutureWarning, + ) + edges = "links" + # ------------------------------------------------------------ + + multigraph = G.is_multigraph() + + # Allow 'key' to be omitted from attrs if the graph is not a multigraph. + key = None if not multigraph else key + if len({source, target, key}) < 3: + raise nx.NetworkXError("Attribute names are not unique.") + data = { + "directed": G.is_directed(), + "multigraph": multigraph, + "graph": G.graph, + nodes: [{**G.nodes[n], name: n} for n in G], + } + if multigraph: + data[edges] = [ + {**d, source: u, target: v, key: k} + for u, v, k, d in G.edges(keys=True, data=True) + ] + else: + data[edges] = [{**d, source: u, target: v} for u, v, d in G.edges(data=True)] + return data + + +@nx._dispatchable(graphs=None, returns_graph=True) +def node_link_graph( + data, + directed=False, + multigraph=True, + *, + source="source", + target="target", + name="id", + key="key", + edges=None, + nodes="nodes", + link=None, +): + """Returns graph from node-link data format. + + Useful for de-serialization from JSON. + + Parameters + ---------- + data : dict + node-link formatted graph data + + directed : bool + If True, and direction not specified in data, return a directed graph. + + multigraph : bool + If True, and multigraph not specified in data, return a multigraph. + + source : string + A string that provides the 'source' attribute name for storing NetworkX-internal graph data. + target : string + A string that provides the 'target' attribute name for storing NetworkX-internal graph data. + name : string + A string that provides the 'name' attribute name for storing NetworkX-internal graph data. + key : string + A string that provides the 'key' attribute name for storing NetworkX-internal graph data. + edges : string + A string that provides the 'edges' attribute name for storing NetworkX-internal graph data. + nodes : string + A string that provides the 'nodes' attribute name for storing NetworkX-internal graph data. + link : string + .. deprecated:: 3.4 + + The `link` argument is deprecated and will be removed in version `3.6`. + Use the `edges` keyword instead. + + A string that provides the 'edges' attribute name for storing NetworkX-internal graph data. + + Returns + ------- + G : NetworkX graph + A NetworkX graph object + + Examples + -------- + + Create data in node-link format by converting a graph. + + >>> from pprint import pprint + >>> G = nx.Graph([("A", "B")]) + >>> data = nx.node_link_data(G, edges="edges") + >>> pprint(data) + {'directed': False, + 'edges': [{'source': 'A', 'target': 'B'}], + 'graph': {}, + 'multigraph': False, + 'nodes': [{'id': 'A'}, {'id': 'B'}]} + + Revert data in node-link format to a graph. + + >>> H = nx.node_link_graph(data, edges="edges") + >>> print(H.edges) + [('A', 'B')] + + To serialize and deserialize a graph with JSON, + + >>> import json + >>> d = json.dumps(nx.node_link_data(G, edges="edges")) + >>> H = nx.node_link_graph(json.loads(d), edges="edges") + >>> print(G.edges, H.edges) + [('A', 'B')] [('A', 'B')] + + + Notes + ----- + Attribute 'key' is only used for multigraphs. + + To use `node_link_data` in conjunction with `node_link_graph`, + the keyword names for the attributes must match. + + See Also + -------- + node_link_data, adjacency_data, tree_data + """ + # TODO: Remove between the lines when `link` deprecation expires + # ------------------------------------------------------------- + if link is not None: + warnings.warn( + "Keyword argument 'link' is deprecated; use 'edges' instead", + DeprecationWarning, + stacklevel=2, + ) + if edges is not None: + raise ValueError( + "Both 'edges' and 'link' are specified. Use 'edges', 'link' will be remove in a future release" + ) + else: + edges = link + else: + if edges is None: + warnings.warn( + ( + '\nThe default value will be changed to `edges="edges" in NetworkX 3.6.\n\n' + "To make this warning go away, explicitly set the edges kwarg, e.g.:\n\n" + ' nx.node_link_graph(data, edges="links") to preserve current behavior, or\n' + ' nx.node_link_graph(data, edges="edges") for forward compatibility.' + ), + FutureWarning, + ) + edges = "links" + # ------------------------------------------------------------- + + multigraph = data.get("multigraph", multigraph) + directed = data.get("directed", directed) + if multigraph: + graph = nx.MultiGraph() + else: + graph = nx.Graph() + if directed: + graph = graph.to_directed() + + # Allow 'key' to be omitted from attrs if the graph is not a multigraph. + key = None if not multigraph else key + graph.graph = data.get("graph", {}) + c = count() + for d in data[nodes]: + node = _to_tuple(d.get(name, next(c))) + nodedata = {str(k): v for k, v in d.items() if k != name} + graph.add_node(node, **nodedata) + for d in data[edges]: + src = tuple(d[source]) if isinstance(d[source], list) else d[source] + tgt = tuple(d[target]) if isinstance(d[target], list) else d[target] + if not multigraph: + edgedata = {str(k): v for k, v in d.items() if k != source and k != target} + graph.add_edge(src, tgt, **edgedata) + else: + ky = d.get(key, None) + edgedata = { + str(k): v + for k, v in d.items() + if k != source and k != target and k != key + } + graph.add_edge(src, tgt, ky, **edgedata) + return graph diff --git a/.venv/lib/python3.12/site-packages/networkx/readwrite/json_graph/tests/__init__.py b/.venv/lib/python3.12/site-packages/networkx/readwrite/json_graph/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/.venv/lib/python3.12/site-packages/networkx/readwrite/json_graph/tests/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/readwrite/json_graph/tests/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..c8ff487 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/readwrite/json_graph/tests/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/readwrite/json_graph/tests/__pycache__/test_adjacency.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/readwrite/json_graph/tests/__pycache__/test_adjacency.cpython-312.pyc new file mode 100644 index 0000000..bf6547b Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/readwrite/json_graph/tests/__pycache__/test_adjacency.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/readwrite/json_graph/tests/__pycache__/test_cytoscape.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/readwrite/json_graph/tests/__pycache__/test_cytoscape.cpython-312.pyc new file mode 100644 index 0000000..22f8eed Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/readwrite/json_graph/tests/__pycache__/test_cytoscape.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/readwrite/json_graph/tests/__pycache__/test_node_link.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/readwrite/json_graph/tests/__pycache__/test_node_link.cpython-312.pyc new file mode 100644 index 0000000..52fa37c Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/readwrite/json_graph/tests/__pycache__/test_node_link.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/readwrite/json_graph/tests/__pycache__/test_tree.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/readwrite/json_graph/tests/__pycache__/test_tree.cpython-312.pyc new file mode 100644 index 0000000..62ed601 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/readwrite/json_graph/tests/__pycache__/test_tree.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/readwrite/json_graph/tests/test_adjacency.py b/.venv/lib/python3.12/site-packages/networkx/readwrite/json_graph/tests/test_adjacency.py new file mode 100644 index 0000000..3750638 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/readwrite/json_graph/tests/test_adjacency.py @@ -0,0 +1,78 @@ +import copy +import json + +import pytest + +import networkx as nx +from networkx.readwrite.json_graph import adjacency_data, adjacency_graph +from networkx.utils import graphs_equal + + +class TestAdjacency: + def test_graph(self): + G = nx.path_graph(4) + H = adjacency_graph(adjacency_data(G)) + assert graphs_equal(G, H) + + def test_graph_attributes(self): + G = nx.path_graph(4) + G.add_node(1, color="red") + G.add_edge(1, 2, width=7) + G.graph["foo"] = "bar" + G.graph[1] = "one" + + H = adjacency_graph(adjacency_data(G)) + assert graphs_equal(G, H) + assert H.graph["foo"] == "bar" + assert H.nodes[1]["color"] == "red" + assert H[1][2]["width"] == 7 + + d = json.dumps(adjacency_data(G)) + H = adjacency_graph(json.loads(d)) + assert graphs_equal(G, H) + assert H.graph["foo"] == "bar" + assert H.graph[1] == "one" + assert H.nodes[1]["color"] == "red" + assert H[1][2]["width"] == 7 + + def test_digraph(self): + G = nx.DiGraph() + nx.add_path(G, [1, 2, 3]) + H = adjacency_graph(adjacency_data(G)) + assert H.is_directed() + assert graphs_equal(G, H) + + def test_multidigraph(self): + G = nx.MultiDiGraph() + nx.add_path(G, [1, 2, 3]) + H = adjacency_graph(adjacency_data(G)) + assert H.is_directed() + assert H.is_multigraph() + assert graphs_equal(G, H) + + def test_multigraph(self): + G = nx.MultiGraph() + G.add_edge(1, 2, key="first") + G.add_edge(1, 2, key="second", color="blue") + H = adjacency_graph(adjacency_data(G)) + assert graphs_equal(G, H) + assert H[1][2]["second"]["color"] == "blue" + + def test_input_data_is_not_modified_when_building_graph(self): + G = nx.path_graph(4) + input_data = adjacency_data(G) + orig_data = copy.deepcopy(input_data) + # Ensure input is unmodified by deserialisation + assert graphs_equal(G, adjacency_graph(input_data)) + assert input_data == orig_data + + def test_adjacency_form_json_serialisable(self): + G = nx.path_graph(4) + H = adjacency_graph(json.loads(json.dumps(adjacency_data(G)))) + assert graphs_equal(G, H) + + def test_exception(self): + with pytest.raises(nx.NetworkXError): + G = nx.MultiDiGraph() + attrs = {"id": "node", "key": "node"} + adjacency_data(G, attrs) diff --git a/.venv/lib/python3.12/site-packages/networkx/readwrite/json_graph/tests/test_cytoscape.py b/.venv/lib/python3.12/site-packages/networkx/readwrite/json_graph/tests/test_cytoscape.py new file mode 100644 index 0000000..5d47f21 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/readwrite/json_graph/tests/test_cytoscape.py @@ -0,0 +1,78 @@ +import copy +import json + +import pytest + +import networkx as nx +from networkx.readwrite.json_graph import cytoscape_data, cytoscape_graph + + +def test_graph(): + G = nx.path_graph(4) + H = cytoscape_graph(cytoscape_data(G)) + assert nx.is_isomorphic(G, H) + + +def test_input_data_is_not_modified_when_building_graph(): + G = nx.path_graph(4) + input_data = cytoscape_data(G) + orig_data = copy.deepcopy(input_data) + # Ensure input is unmodified by cytoscape_graph (gh-4173) + cytoscape_graph(input_data) + assert input_data == orig_data + + +def test_graph_attributes(): + G = nx.path_graph(4) + G.add_node(1, color="red") + G.add_edge(1, 2, width=7) + G.graph["foo"] = "bar" + G.graph[1] = "one" + G.add_node(3, name="node", id="123") + + H = cytoscape_graph(cytoscape_data(G)) + assert H.graph["foo"] == "bar" + assert H.nodes[1]["color"] == "red" + assert H[1][2]["width"] == 7 + assert H.nodes[3]["name"] == "node" + assert H.nodes[3]["id"] == "123" + + d = json.dumps(cytoscape_data(G)) + H = cytoscape_graph(json.loads(d)) + assert H.graph["foo"] == "bar" + assert H.graph[1] == "one" + assert H.nodes[1]["color"] == "red" + assert H[1][2]["width"] == 7 + assert H.nodes[3]["name"] == "node" + assert H.nodes[3]["id"] == "123" + + +def test_digraph(): + G = nx.DiGraph() + nx.add_path(G, [1, 2, 3]) + H = cytoscape_graph(cytoscape_data(G)) + assert H.is_directed() + assert nx.is_isomorphic(G, H) + + +def test_multidigraph(): + G = nx.MultiDiGraph() + nx.add_path(G, [1, 2, 3]) + H = cytoscape_graph(cytoscape_data(G)) + assert H.is_directed() + assert H.is_multigraph() + + +def test_multigraph(): + G = nx.MultiGraph() + G.add_edge(1, 2, key="first") + G.add_edge(1, 2, key="second", color="blue") + H = cytoscape_graph(cytoscape_data(G)) + assert nx.is_isomorphic(G, H) + assert H[1][2]["second"]["color"] == "blue" + + +def test_exception(): + with pytest.raises(nx.NetworkXError): + G = nx.MultiDiGraph() + cytoscape_data(G, name="foo", ident="foo") diff --git a/.venv/lib/python3.12/site-packages/networkx/readwrite/json_graph/tests/test_node_link.py b/.venv/lib/python3.12/site-packages/networkx/readwrite/json_graph/tests/test_node_link.py new file mode 100644 index 0000000..f903f60 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/readwrite/json_graph/tests/test_node_link.py @@ -0,0 +1,175 @@ +import json + +import pytest + +import networkx as nx +from networkx.readwrite.json_graph import node_link_data, node_link_graph + + +def test_node_link_edges_default_future_warning(): + "Test FutureWarning is raised when `edges=None` in node_link_data and node_link_graph" + G = nx.Graph([(1, 2)]) + with pytest.warns(FutureWarning, match="\nThe default value will be"): + data = nx.node_link_data(G) # edges=None, the default + with pytest.warns(FutureWarning, match="\nThe default value will be"): + H = nx.node_link_graph(data) # edges=None, the default + + +def test_node_link_deprecated_link_param(): + G = nx.Graph([(1, 2)]) + with pytest.warns(DeprecationWarning, match="Keyword argument 'link'"): + data = nx.node_link_data(G, link="links") + with pytest.warns(DeprecationWarning, match="Keyword argument 'link'"): + H = nx.node_link_graph(data, link="links") + + +class TestNodeLink: + # TODO: To be removed when signature change complete + def test_custom_attrs_dep(self): + G = nx.path_graph(4) + G.add_node(1, color="red") + G.add_edge(1, 2, width=7) + G.graph[1] = "one" + G.graph["foo"] = "bar" + + attrs = { + "source": "c_source", + "target": "c_target", + "name": "c_id", + "key": "c_key", + "link": "c_links", + } + + H = node_link_graph(node_link_data(G, **attrs), multigraph=False, **attrs) + assert nx.is_isomorphic(G, H) + assert H.graph["foo"] == "bar" + assert H.nodes[1]["color"] == "red" + assert H[1][2]["width"] == 7 + + # provide only a partial dictionary of keywords. + # This is similar to an example in the doc string + attrs = { + "link": "c_links", + "source": "c_source", + "target": "c_target", + } + H = node_link_graph(node_link_data(G, **attrs), multigraph=False, **attrs) + assert nx.is_isomorphic(G, H) + assert H.graph["foo"] == "bar" + assert H.nodes[1]["color"] == "red" + assert H[1][2]["width"] == 7 + + def test_exception_dep(self): + G = nx.MultiDiGraph() + with pytest.raises(nx.NetworkXError): + with pytest.warns(FutureWarning, match="\nThe default value will be"): + node_link_data(G, name="node", source="node", target="node", key="node") + + def test_graph(self): + G = nx.path_graph(4) + with pytest.warns(FutureWarning, match="\nThe default value will be"): + H = node_link_graph(node_link_data(G)) + assert nx.is_isomorphic(G, H) + + def test_graph_attributes(self): + G = nx.path_graph(4) + G.add_node(1, color="red") + G.add_edge(1, 2, width=7) + G.graph[1] = "one" + G.graph["foo"] = "bar" + + with pytest.warns(FutureWarning, match="\nThe default value will be"): + H = node_link_graph(node_link_data(G)) + assert H.graph["foo"] == "bar" + assert H.nodes[1]["color"] == "red" + assert H[1][2]["width"] == 7 + + with pytest.warns(FutureWarning, match="\nThe default value will be"): + d = json.dumps(node_link_data(G)) + with pytest.warns(FutureWarning, match="\nThe default value will be"): + H = node_link_graph(json.loads(d)) + assert H.graph["foo"] == "bar" + assert H.graph["1"] == "one" + assert H.nodes[1]["color"] == "red" + assert H[1][2]["width"] == 7 + + def test_digraph(self): + G = nx.DiGraph() + with pytest.warns(FutureWarning, match="\nThe default value will be"): + H = node_link_graph(node_link_data(G)) + assert H.is_directed() + + def test_multigraph(self): + G = nx.MultiGraph() + G.add_edge(1, 2, key="first") + G.add_edge(1, 2, key="second", color="blue") + with pytest.warns(FutureWarning, match="\nThe default value will be"): + H = node_link_graph(node_link_data(G)) + assert nx.is_isomorphic(G, H) + assert H[1][2]["second"]["color"] == "blue" + + def test_graph_with_tuple_nodes(self): + G = nx.Graph() + G.add_edge((0, 0), (1, 0), color=[255, 255, 0]) + with pytest.warns(FutureWarning, match="\nThe default value will be"): + d = node_link_data(G) + dumped_d = json.dumps(d) + dd = json.loads(dumped_d) + with pytest.warns(FutureWarning, match="\nThe default value will be"): + H = node_link_graph(dd) + assert H.nodes[(0, 0)] == G.nodes[(0, 0)] + assert H[(0, 0)][(1, 0)]["color"] == [255, 255, 0] + + def test_unicode_keys(self): + q = "qualité" + G = nx.Graph() + G.add_node(1, **{q: q}) + with pytest.warns(FutureWarning, match="\nThe default value will be"): + s = node_link_data(G) + output = json.dumps(s, ensure_ascii=False) + data = json.loads(output) + with pytest.warns(FutureWarning, match="\nThe default value will be"): + H = node_link_graph(data) + assert H.nodes[1][q] == q + + def test_exception(self): + G = nx.MultiDiGraph() + attrs = {"name": "node", "source": "node", "target": "node", "key": "node"} + with pytest.raises(nx.NetworkXError): + with pytest.warns(FutureWarning, match="\nThe default value will be"): + node_link_data(G, **attrs) + + def test_string_ids(self): + q = "qualité" + G = nx.DiGraph() + G.add_node("A") + G.add_node(q) + G.add_edge("A", q) + with pytest.warns(FutureWarning, match="\nThe default value will be"): + data = node_link_data(G) + assert data["links"][0]["source"] == "A" + assert data["links"][0]["target"] == q + with pytest.warns(FutureWarning, match="\nThe default value will be"): + H = node_link_graph(data) + assert nx.is_isomorphic(G, H) + + def test_custom_attrs(self): + G = nx.path_graph(4) + G.add_node(1, color="red") + G.add_edge(1, 2, width=7) + G.graph[1] = "one" + G.graph["foo"] = "bar" + + attrs = { + "source": "c_source", + "target": "c_target", + "name": "c_id", + "key": "c_key", + "link": "c_links", + } + + H = node_link_graph(node_link_data(G, **attrs), multigraph=False, **attrs) + assert nx.is_isomorphic(G, H) + assert H.graph["foo"] == "bar" + assert H.nodes[1]["color"] == "red" + assert H[1][2]["width"] == 7 diff --git a/.venv/lib/python3.12/site-packages/networkx/readwrite/json_graph/tests/test_tree.py b/.venv/lib/python3.12/site-packages/networkx/readwrite/json_graph/tests/test_tree.py new file mode 100644 index 0000000..643a14d --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/readwrite/json_graph/tests/test_tree.py @@ -0,0 +1,48 @@ +import json + +import pytest + +import networkx as nx +from networkx.readwrite.json_graph import tree_data, tree_graph + + +def test_graph(): + G = nx.DiGraph() + G.add_nodes_from([1, 2, 3], color="red") + G.add_edge(1, 2, foo=7) + G.add_edge(1, 3, foo=10) + G.add_edge(3, 4, foo=10) + H = tree_graph(tree_data(G, 1)) + assert nx.is_isomorphic(G, H) + + +def test_graph_attributes(): + G = nx.DiGraph() + G.add_nodes_from([1, 2, 3], color="red") + G.add_edge(1, 2, foo=7) + G.add_edge(1, 3, foo=10) + G.add_edge(3, 4, foo=10) + H = tree_graph(tree_data(G, 1)) + assert H.nodes[1]["color"] == "red" + + d = json.dumps(tree_data(G, 1)) + H = tree_graph(json.loads(d)) + assert H.nodes[1]["color"] == "red" + + +def test_exceptions(): + with pytest.raises(TypeError, match="is not a tree."): + G = nx.complete_graph(3) + tree_data(G, 0) + with pytest.raises(TypeError, match="is not directed."): + G = nx.path_graph(3) + tree_data(G, 0) + with pytest.raises(TypeError, match="is not weakly connected."): + G = nx.path_graph(3, create_using=nx.DiGraph) + G.add_edge(2, 0) + G.add_node(3) + tree_data(G, 0) + with pytest.raises(nx.NetworkXError, match="must be different."): + G = nx.MultiDiGraph() + G.add_node(0) + tree_data(G, 0, ident="node", children="node") diff --git a/.venv/lib/python3.12/site-packages/networkx/readwrite/json_graph/tree.py b/.venv/lib/python3.12/site-packages/networkx/readwrite/json_graph/tree.py new file mode 100644 index 0000000..22b07b0 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/readwrite/json_graph/tree.py @@ -0,0 +1,137 @@ +from itertools import chain + +import networkx as nx + +__all__ = ["tree_data", "tree_graph"] + + +def tree_data(G, root, ident="id", children="children"): + """Returns data in tree format that is suitable for JSON serialization + and use in JavaScript documents. + + Parameters + ---------- + G : NetworkX graph + G must be an oriented tree + + root : node + The root of the tree + + ident : string + Attribute name for storing NetworkX-internal graph data. `ident` must + have a different value than `children`. The default is 'id'. + + children : string + Attribute name for storing NetworkX-internal graph data. `children` + must have a different value than `ident`. The default is 'children'. + + Returns + ------- + data : dict + A dictionary with node-link formatted data. + + Raises + ------ + NetworkXError + If `children` and `ident` attributes are identical. + + Examples + -------- + >>> from networkx.readwrite import json_graph + >>> G = nx.DiGraph([(1, 2)]) + >>> data = json_graph.tree_data(G, root=1) + + To serialize with json + + >>> import json + >>> s = json.dumps(data) + + Notes + ----- + Node attributes are stored in this format but keys + for attributes must be strings if you want to serialize with JSON. + + Graph and edge attributes are not stored. + + See Also + -------- + tree_graph, node_link_data, adjacency_data + """ + if G.number_of_nodes() != G.number_of_edges() + 1: + raise TypeError("G is not a tree.") + if not G.is_directed(): + raise TypeError("G is not directed.") + if not nx.is_weakly_connected(G): + raise TypeError("G is not weakly connected.") + + if ident == children: + raise nx.NetworkXError("The values for `id` and `children` must be different.") + + def add_children(n, G): + nbrs = G[n] + if len(nbrs) == 0: + return [] + children_ = [] + for child in nbrs: + d = {**G.nodes[child], ident: child} + c = add_children(child, G) + if c: + d[children] = c + children_.append(d) + return children_ + + return {**G.nodes[root], ident: root, children: add_children(root, G)} + + +@nx._dispatchable(graphs=None, returns_graph=True) +def tree_graph(data, ident="id", children="children"): + """Returns graph from tree data format. + + Parameters + ---------- + data : dict + Tree formatted graph data + + ident : string + Attribute name for storing NetworkX-internal graph data. `ident` must + have a different value than `children`. The default is 'id'. + + children : string + Attribute name for storing NetworkX-internal graph data. `children` + must have a different value than `ident`. The default is 'children'. + + Returns + ------- + G : NetworkX DiGraph + + Examples + -------- + >>> from networkx.readwrite import json_graph + >>> G = nx.DiGraph([(1, 2)]) + >>> data = json_graph.tree_data(G, root=1) + >>> H = json_graph.tree_graph(data) + + See Also + -------- + tree_data, node_link_data, adjacency_data + """ + graph = nx.DiGraph() + + def add_children(parent, children_): + for data in children_: + child = data[ident] + graph.add_edge(parent, child) + grandchildren = data.get(children, []) + if grandchildren: + add_children(child, grandchildren) + nodedata = { + str(k): v for k, v in data.items() if k != ident and k != children + } + graph.add_node(child, **nodedata) + + root = data[ident] + children_ = data.get(children, []) + nodedata = {str(k): v for k, v in data.items() if k != ident and k != children} + graph.add_node(root, **nodedata) + add_children(root, children_) + return graph diff --git a/.venv/lib/python3.12/site-packages/networkx/readwrite/leda.py b/.venv/lib/python3.12/site-packages/networkx/readwrite/leda.py new file mode 100644 index 0000000..8d88a67 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/readwrite/leda.py @@ -0,0 +1,108 @@ +""" +Read graphs in LEDA format. + +LEDA is a C++ class library for efficient data types and algorithms. + +Format +------ +See http://www.algorithmic-solutions.info/leda_guide/graphs/leda_native_graph_fileformat.html + +""" +# Original author: D. Eppstein, UC Irvine, August 12, 2003. +# The original code at http://www.ics.uci.edu/~eppstein/PADS/ is public domain. + +__all__ = ["read_leda", "parse_leda"] + +import networkx as nx +from networkx.exception import NetworkXError +from networkx.utils import open_file + + +@open_file(0, mode="rb") +@nx._dispatchable(graphs=None, returns_graph=True) +def read_leda(path, encoding="UTF-8"): + """Read graph in LEDA format from path. + + Parameters + ---------- + path : file or string + Filename or file handle to read. + Filenames ending in .gz or .bz2 will be decompressed. + + Returns + ------- + G : NetworkX graph + + Examples + -------- + >>> G = nx.read_leda("file.leda") # doctest: +SKIP + + References + ---------- + .. [1] http://www.algorithmic-solutions.info/leda_guide/graphs/leda_native_graph_fileformat.html + """ + lines = (line.decode(encoding) for line in path) + G = parse_leda(lines) + return G + + +@nx._dispatchable(graphs=None, returns_graph=True) +def parse_leda(lines): + """Read graph in LEDA format from string or iterable. + + Parameters + ---------- + lines : string or iterable + Data in LEDA format. + + Returns + ------- + G : NetworkX graph + + Examples + -------- + >>> G = nx.parse_leda(string) # doctest: +SKIP + + References + ---------- + .. [1] http://www.algorithmic-solutions.info/leda_guide/graphs/leda_native_graph_fileformat.html + """ + if isinstance(lines, str): + lines = iter(lines.split("\n")) + lines = iter( + [ + line.rstrip("\n") + for line in lines + if not (line.startswith(("#", "\n")) or line == "") + ] + ) + for i in range(3): + next(lines) + # Graph + du = int(next(lines)) # -1=directed, -2=undirected + if du == -1: + G = nx.DiGraph() + else: + G = nx.Graph() + + # Nodes + n = int(next(lines)) # number of nodes + node = {} + for i in range(1, n + 1): # LEDA counts from 1 to n + symbol = next(lines).rstrip().strip("|{}| ") + if symbol == "": + symbol = str(i) # use int if no label - could be trouble + node[i] = symbol + + G.add_nodes_from([s for i, s in node.items()]) + + # Edges + m = int(next(lines)) # number of edges + for i in range(m): + try: + s, t, reversal, label = next(lines).split() + except BaseException as err: + raise NetworkXError(f"Too few fields in LEDA.GRAPH edge {i + 1}") from err + # BEWARE: no handling of reversal edges + G.add_edge(node[int(s)], node[int(t)], label=label[2:-2]) + return G diff --git a/.venv/lib/python3.12/site-packages/networkx/readwrite/multiline_adjlist.py b/.venv/lib/python3.12/site-packages/networkx/readwrite/multiline_adjlist.py new file mode 100644 index 0000000..f5b0b1c --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/readwrite/multiline_adjlist.py @@ -0,0 +1,393 @@ +""" +************************* +Multi-line Adjacency List +************************* +Read and write NetworkX graphs as multi-line adjacency lists. + +The multi-line adjacency list format is useful for graphs with +nodes that can be meaningfully represented as strings. With this format +simple edge data can be stored but node or graph data is not. + +Format +------ +The first label in a line is the source node label followed by the node degree +d. The next d lines are target node labels and optional edge data. +That pattern repeats for all nodes in the graph. + +The graph with edges a-b, a-c, d-e can be represented as the following +adjacency list (anything following the # in a line is a comment):: + + # example.multiline-adjlist + a 2 + b + c + d 1 + e +""" + +__all__ = [ + "generate_multiline_adjlist", + "write_multiline_adjlist", + "parse_multiline_adjlist", + "read_multiline_adjlist", +] + +import networkx as nx +from networkx.utils import open_file + + +def generate_multiline_adjlist(G, delimiter=" "): + """Generate a single line of the graph G in multiline adjacency list format. + + Parameters + ---------- + G : NetworkX graph + + delimiter : string, optional + Separator for node labels + + Returns + ------- + lines : string + Lines of data in multiline adjlist format. + + Examples + -------- + >>> G = nx.lollipop_graph(4, 3) + >>> for line in nx.generate_multiline_adjlist(G): + ... print(line) + 0 3 + 1 {} + 2 {} + 3 {} + 1 2 + 2 {} + 3 {} + 2 1 + 3 {} + 3 1 + 4 {} + 4 1 + 5 {} + 5 1 + 6 {} + 6 0 + + See Also + -------- + write_multiline_adjlist, read_multiline_adjlist + """ + if G.is_directed(): + if G.is_multigraph(): + for s, nbrs in G.adjacency(): + nbr_edges = [ + (u, data) + for u, datadict in nbrs.items() + for key, data in datadict.items() + ] + deg = len(nbr_edges) + yield str(s) + delimiter + str(deg) + for u, d in nbr_edges: + if d is None: + yield str(u) + else: + yield str(u) + delimiter + str(d) + else: # directed single edges + for s, nbrs in G.adjacency(): + deg = len(nbrs) + yield str(s) + delimiter + str(deg) + for u, d in nbrs.items(): + if d is None: + yield str(u) + else: + yield str(u) + delimiter + str(d) + else: # undirected + if G.is_multigraph(): + seen = set() # helper dict used to avoid duplicate edges + for s, nbrs in G.adjacency(): + nbr_edges = [ + (u, data) + for u, datadict in nbrs.items() + if u not in seen + for key, data in datadict.items() + ] + deg = len(nbr_edges) + yield str(s) + delimiter + str(deg) + for u, d in nbr_edges: + if d is None: + yield str(u) + else: + yield str(u) + delimiter + str(d) + seen.add(s) + else: # undirected single edges + seen = set() # helper dict used to avoid duplicate edges + for s, nbrs in G.adjacency(): + nbr_edges = [(u, d) for u, d in nbrs.items() if u not in seen] + deg = len(nbr_edges) + yield str(s) + delimiter + str(deg) + for u, d in nbr_edges: + if d is None: + yield str(u) + else: + yield str(u) + delimiter + str(d) + seen.add(s) + + +@open_file(1, mode="wb") +def write_multiline_adjlist(G, path, delimiter=" ", comments="#", encoding="utf-8"): + """Write the graph G in multiline adjacency list format to path + + Parameters + ---------- + G : NetworkX graph + + path : string or file + Filename or file handle to write to. + Filenames ending in .gz or .bz2 will be compressed. + + comments : string, optional + Marker for comment lines + + delimiter : string, optional + Separator for node labels + + encoding : string, optional + Text encoding. + + Examples + -------- + >>> G = nx.path_graph(4) + >>> nx.write_multiline_adjlist(G, "test.multi_adjlist") + + The path can be a file handle or a string with the name of the file. If a + file handle is provided, it has to be opened in 'wb' mode. + + >>> fh = open("test.multi_adjlist2", "wb") + >>> nx.write_multiline_adjlist(G, fh) + + Filenames ending in .gz or .bz2 will be compressed. + + >>> nx.write_multiline_adjlist(G, "test.multi_adjlist.gz") + + See Also + -------- + read_multiline_adjlist + """ + import sys + import time + + pargs = comments + " ".join(sys.argv) + header = ( + f"{pargs}\n" + + comments + + f" GMT {time.asctime(time.gmtime())}\n" + + comments + + f" {G.name}\n" + ) + path.write(header.encode(encoding)) + + for multiline in generate_multiline_adjlist(G, delimiter): + multiline += "\n" + path.write(multiline.encode(encoding)) + + +@nx._dispatchable(graphs=None, returns_graph=True) +def parse_multiline_adjlist( + lines, comments="#", delimiter=None, create_using=None, nodetype=None, edgetype=None +): + """Parse lines of a multiline adjacency list representation of a graph. + + Parameters + ---------- + lines : list or iterator of strings + Input data in multiline adjlist format + + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + nodetype : Python type, optional + Convert nodes to this type. + + edgetype : Python type, optional + Convert edges to this type. + + comments : string, optional + Marker for comment lines + + delimiter : string, optional + Separator for node labels. The default is whitespace. + + Returns + ------- + G: NetworkX graph + The graph corresponding to the lines in multiline adjacency list format. + + Examples + -------- + >>> lines = [ + ... "1 2", + ... "2 {'weight':3, 'name': 'Frodo'}", + ... "3 {}", + ... "2 1", + ... "5 {'weight':6, 'name': 'Saruman'}", + ... ] + >>> G = nx.parse_multiline_adjlist(iter(lines), nodetype=int) + >>> list(G) + [1, 2, 3, 5] + + """ + from ast import literal_eval + + G = nx.empty_graph(0, create_using) + for line in lines: + p = line.find(comments) + if p >= 0: + line = line[:p] + if not line: + continue + try: + (u, deg) = line.rstrip("\n").split(delimiter) + deg = int(deg) + except BaseException as err: + raise TypeError(f"Failed to read node and degree on line ({line})") from err + if nodetype is not None: + try: + u = nodetype(u) + except BaseException as err: + raise TypeError( + f"Failed to convert node ({u}) to type {nodetype}" + ) from err + G.add_node(u) + for i in range(deg): + while True: + try: + line = next(lines) + except StopIteration as err: + msg = f"Failed to find neighbor for node ({u})" + raise TypeError(msg) from err + p = line.find(comments) + if p >= 0: + line = line[:p] + if line: + break + vlist = line.rstrip("\n").split(delimiter) + numb = len(vlist) + if numb < 1: + continue # isolated node + v = vlist.pop(0) + data = "".join(vlist) + if nodetype is not None: + try: + v = nodetype(v) + except BaseException as err: + raise TypeError( + f"Failed to convert node ({v}) to type {nodetype}" + ) from err + if edgetype is not None: + try: + edgedata = {"weight": edgetype(data)} + except BaseException as err: + raise TypeError( + f"Failed to convert edge data ({data}) to type {edgetype}" + ) from err + else: + try: # try to evaluate + edgedata = literal_eval(data) + except: + edgedata = {} + G.add_edge(u, v, **edgedata) + + return G + + +@open_file(0, mode="rb") +@nx._dispatchable(graphs=None, returns_graph=True) +def read_multiline_adjlist( + path, + comments="#", + delimiter=None, + create_using=None, + nodetype=None, + edgetype=None, + encoding="utf-8", +): + """Read graph in multi-line adjacency list format from path. + + Parameters + ---------- + path : string or file + Filename or file handle to read. + Filenames ending in .gz or .bz2 will be decompressed. + + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + nodetype : Python type, optional + Convert nodes to this type. + + edgetype : Python type, optional + Convert edge data to this type. + + comments : string, optional + Marker for comment lines + + delimiter : string, optional + Separator for node labels. The default is whitespace. + + Returns + ------- + G: NetworkX graph + + Examples + -------- + >>> G = nx.path_graph(4) + >>> nx.write_multiline_adjlist(G, "test.multi_adjlistP4") + >>> G = nx.read_multiline_adjlist("test.multi_adjlistP4") + + The path can be a file or a string with the name of the file. If a + file s provided, it has to be opened in 'rb' mode. + + >>> fh = open("test.multi_adjlistP4", "rb") + >>> G = nx.read_multiline_adjlist(fh) + + Filenames ending in .gz or .bz2 will be compressed. + + >>> nx.write_multiline_adjlist(G, "test.multi_adjlistP4.gz") + >>> G = nx.read_multiline_adjlist("test.multi_adjlistP4.gz") + + The optional nodetype is a function to convert node strings to nodetype. + + For example + + >>> G = nx.read_multiline_adjlist("test.multi_adjlistP4", nodetype=int) + + will attempt to convert all nodes to integer type. + + The optional edgetype is a function to convert edge data strings to + edgetype. + + >>> G = nx.read_multiline_adjlist("test.multi_adjlistP4") + + The optional create_using parameter is a NetworkX graph container. + The default is Graph(), an undirected graph. To read the data as + a directed graph use + + >>> G = nx.read_multiline_adjlist("test.multi_adjlistP4", create_using=nx.DiGraph) + + Notes + ----- + This format does not store graph, node, or edge data. + + See Also + -------- + write_multiline_adjlist + """ + lines = (line.decode(encoding) for line in path) + return parse_multiline_adjlist( + lines, + comments=comments, + delimiter=delimiter, + create_using=create_using, + nodetype=nodetype, + edgetype=edgetype, + ) diff --git a/.venv/lib/python3.12/site-packages/networkx/readwrite/p2g.py b/.venv/lib/python3.12/site-packages/networkx/readwrite/p2g.py new file mode 100644 index 0000000..4bde362 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/readwrite/p2g.py @@ -0,0 +1,113 @@ +""" +This module provides the following: read and write of p2g format +used in metabolic pathway studies. + +See: + +for a description. + +The summary is included here: + +A file that describes a uniquely labeled graph (with extension ".gr") +format looks like the following: + + +name +3 4 +a +1 2 +b + +c +0 2 + +"name" is simply a description of what the graph corresponds to. The +second line displays the number of nodes and number of edges, +respectively. This sample graph contains three nodes labeled "a", "b", +and "c". The rest of the graph contains two lines for each node. The +first line for a node contains the node label. After the declaration +of the node label, the out-edges of that node in the graph are +provided. For instance, "a" is linked to nodes 1 and 2, which are +labeled "b" and "c", while the node labeled "b" has no outgoing +edges. Observe that node labeled "c" has an outgoing edge to +itself. Indeed, self-loops are allowed. Node index starts from 0. + +""" + +import networkx as nx +from networkx.utils import open_file + + +@open_file(1, mode="w") +def write_p2g(G, path, encoding="utf-8"): + """Write NetworkX graph in p2g format. + + Notes + ----- + This format is meant to be used with directed graphs with + possible self loops. + """ + path.write((f"{G.name}\n").encode(encoding)) + path.write((f"{G.order()} {G.size()}\n").encode(encoding)) + nodes = list(G) + # make dictionary mapping nodes to integers + nodenumber = dict(zip(nodes, range(len(nodes)))) + for n in nodes: + path.write((f"{n}\n").encode(encoding)) + for nbr in G.neighbors(n): + path.write((f"{nodenumber[nbr]} ").encode(encoding)) + path.write("\n".encode(encoding)) + + +@open_file(0, mode="r") +@nx._dispatchable(graphs=None, returns_graph=True) +def read_p2g(path, encoding="utf-8"): + """Read graph in p2g format from path. + + Parameters + ---------- + path : string or file + Filename or file handle to read. + Filenames ending in .gz or .bz2 will be decompressed. + + Returns + ------- + MultiDiGraph + + Notes + ----- + If you want a DiGraph (with no self loops allowed and no edge data) + use D=nx.DiGraph(read_p2g(path)) + """ + lines = (line.decode(encoding) for line in path) + G = parse_p2g(lines) + return G + + +@nx._dispatchable(graphs=None, returns_graph=True) +def parse_p2g(lines): + """Parse p2g format graph from string or iterable. + + Returns + ------- + MultiDiGraph + """ + description = next(lines).strip() + # are multiedges (parallel edges) allowed? + G = nx.MultiDiGraph(name=description, selfloops=True) + nnodes, nedges = map(int, next(lines).split()) + nodelabel = {} + nbrs = {} + # loop over the nodes keeping track of node labels and out neighbors + # defer adding edges until all node labels are known + for i in range(nnodes): + n = next(lines).strip() + nodelabel[i] = n + G.add_node(n) + nbrs[n] = map(int, next(lines).split()) + # now we know all of the node labels so we can add the edges + # with the correct labels + for n in G: + for nbr in nbrs[n]: + G.add_edge(n, nodelabel[nbr]) + return G diff --git a/.venv/lib/python3.12/site-packages/networkx/readwrite/pajek.py b/.venv/lib/python3.12/site-packages/networkx/readwrite/pajek.py new file mode 100644 index 0000000..2cab6b9 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/readwrite/pajek.py @@ -0,0 +1,286 @@ +""" +***** +Pajek +***** +Read graphs in Pajek format. + +This implementation handles directed and undirected graphs including +those with self loops and parallel edges. + +Format +------ +See http://vlado.fmf.uni-lj.si/pub/networks/pajek/doc/draweps.htm +for format information. + +""" + +import warnings + +import networkx as nx +from networkx.utils import open_file + +__all__ = ["read_pajek", "parse_pajek", "generate_pajek", "write_pajek"] + + +def generate_pajek(G): + """Generate lines in Pajek graph format. + + Parameters + ---------- + G : graph + A Networkx graph + + References + ---------- + See http://vlado.fmf.uni-lj.si/pub/networks/pajek/doc/draweps.htm + for format information. + """ + if G.name == "": + name = "NetworkX" + else: + name = G.name + # Apparently many Pajek format readers can't process this line + # So we'll leave it out for now. + # yield '*network %s'%name + + # write nodes with attributes + yield f"*vertices {G.order()}" + nodes = list(G) + # make dictionary mapping nodes to integers + nodenumber = dict(zip(nodes, range(1, len(nodes) + 1))) + for n in nodes: + # copy node attributes and pop mandatory attributes + # to avoid duplication. + na = G.nodes.get(n, {}).copy() + x = na.pop("x", 0.0) + y = na.pop("y", 0.0) + try: + id = int(na.pop("id", nodenumber[n])) + except ValueError as err: + err.args += ( + ( + "Pajek format requires 'id' to be an int()." + " Refer to the 'Relabeling nodes' section." + ), + ) + raise + nodenumber[n] = id + shape = na.pop("shape", "ellipse") + s = " ".join(map(make_qstr, (id, n, x, y, shape))) + # only optional attributes are left in na. + for k, v in na.items(): + if isinstance(v, str) and v.strip() != "": + s += f" {make_qstr(k)} {make_qstr(v)}" + else: + warnings.warn( + f"Node attribute {k} is not processed. {('Empty attribute' if isinstance(v, str) else 'Non-string attribute')}." + ) + yield s + + # write edges with attributes + if G.is_directed(): + yield "*arcs" + else: + yield "*edges" + for u, v, edgedata in G.edges(data=True): + d = edgedata.copy() + value = d.pop("weight", 1.0) # use 1 as default edge value + s = " ".join(map(make_qstr, (nodenumber[u], nodenumber[v], value))) + for k, v in d.items(): + if isinstance(v, str) and v.strip() != "": + s += f" {make_qstr(k)} {make_qstr(v)}" + else: + warnings.warn( + f"Edge attribute {k} is not processed. {('Empty attribute' if isinstance(v, str) else 'Non-string attribute')}." + ) + yield s + + +@open_file(1, mode="wb") +def write_pajek(G, path, encoding="UTF-8"): + """Write graph in Pajek format to path. + + Parameters + ---------- + G : graph + A Networkx graph + path : file or string + File or filename to write. + Filenames ending in .gz or .bz2 will be compressed. + + Examples + -------- + >>> G = nx.path_graph(4) + >>> nx.write_pajek(G, "test.netP4") + + Warnings + -------- + Optional node attributes and edge attributes must be non-empty strings. + Otherwise it will not be written into the file. You will need to + convert those attributes to strings if you want to keep them. + + References + ---------- + See http://vlado.fmf.uni-lj.si/pub/networks/pajek/doc/draweps.htm + for format information. + """ + for line in generate_pajek(G): + line += "\n" + path.write(line.encode(encoding)) + + +@open_file(0, mode="rb") +@nx._dispatchable(graphs=None, returns_graph=True) +def read_pajek(path, encoding="UTF-8"): + """Read graph in Pajek format from path. + + Parameters + ---------- + path : file or string + Filename or file handle to read. + Filenames ending in .gz or .bz2 will be decompressed. + + Returns + ------- + G : NetworkX MultiGraph or MultiDiGraph. + + Examples + -------- + >>> G = nx.path_graph(4) + >>> nx.write_pajek(G, "test.net") + >>> G = nx.read_pajek("test.net") + + To create a Graph instead of a MultiGraph use + + >>> G1 = nx.Graph(G) + + References + ---------- + See http://vlado.fmf.uni-lj.si/pub/networks/pajek/doc/draweps.htm + for format information. + """ + lines = (line.decode(encoding) for line in path) + return parse_pajek(lines) + + +@nx._dispatchable(graphs=None, returns_graph=True) +def parse_pajek(lines): + """Parse Pajek format graph from string or iterable. + + Parameters + ---------- + lines : string or iterable + Data in Pajek format. + + Returns + ------- + G : NetworkX graph + + See Also + -------- + read_pajek + + """ + import shlex + + # multigraph=False + if isinstance(lines, str): + lines = iter(lines.split("\n")) + lines = iter([line.rstrip("\n") for line in lines]) + G = nx.MultiDiGraph() # are multiedges allowed in Pajek? assume yes + labels = [] # in the order of the file, needed for matrix + while lines: + try: + l = next(lines) + except: # EOF + break + if l.lower().startswith("*network"): + try: + label, name = l.split(None, 1) + except ValueError: + # Line was not of the form: *network NAME + pass + else: + G.graph["name"] = name + elif l.lower().startswith("*vertices"): + nodelabels = {} + l, nnodes = l.split() + for i in range(int(nnodes)): + l = next(lines) + try: + splitline = [ + x.decode("utf-8") for x in shlex.split(str(l).encode("utf-8")) + ] + except AttributeError: + splitline = shlex.split(str(l)) + id, label = splitline[0:2] + labels.append(label) + G.add_node(label) + nodelabels[id] = label + G.nodes[label]["id"] = id + try: + x, y, shape = splitline[2:5] + G.nodes[label].update( + {"x": float(x), "y": float(y), "shape": shape} + ) + except: + pass + extra_attr = zip(splitline[5::2], splitline[6::2]) + G.nodes[label].update(extra_attr) + elif l.lower().startswith("*edges") or l.lower().startswith("*arcs"): + if l.lower().startswith("*edge"): + # switch from multidigraph to multigraph + G = nx.MultiGraph(G) + if l.lower().startswith("*arcs"): + # switch to directed with multiple arcs for each existing edge + G = G.to_directed() + for l in lines: + try: + splitline = [ + x.decode("utf-8") for x in shlex.split(str(l).encode("utf-8")) + ] + except AttributeError: + splitline = shlex.split(str(l)) + + if len(splitline) < 2: + continue + ui, vi = splitline[0:2] + u = nodelabels.get(ui, ui) + v = nodelabels.get(vi, vi) + # parse the data attached to this edge and put in a dictionary + edge_data = {} + try: + # there should always be a single value on the edge? + w = splitline[2:3] + edge_data.update({"weight": float(w[0])}) + except: + pass + # if there isn't, just assign a 1 + # edge_data.update({'value':1}) + extra_attr = zip(splitline[3::2], splitline[4::2]) + edge_data.update(extra_attr) + # if G.has_edge(u,v): + # multigraph=True + G.add_edge(u, v, **edge_data) + elif l.lower().startswith("*matrix"): + G = nx.DiGraph(G) + adj_list = ( + (labels[row], labels[col], {"weight": int(data)}) + for (row, line) in enumerate(lines) + for (col, data) in enumerate(line.split()) + if int(data) != 0 + ) + G.add_edges_from(adj_list) + + return G + + +def make_qstr(t): + """Returns the string representation of t. + Add outer double-quotes if the string has a space. + """ + if not isinstance(t, str): + t = str(t) + if " " in t: + t = f'"{t}"' + return t diff --git a/.venv/lib/python3.12/site-packages/networkx/readwrite/sparse6.py b/.venv/lib/python3.12/site-packages/networkx/readwrite/sparse6.py new file mode 100644 index 0000000..b82ae5c --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/readwrite/sparse6.py @@ -0,0 +1,379 @@ +# Original author: D. Eppstein, UC Irvine, August 12, 2003. +# The original code at https://www.ics.uci.edu/~eppstein/PADS/ is public domain. +"""Functions for reading and writing graphs in the *sparse6* format. + +The *sparse6* file format is a space-efficient format for large sparse +graphs. For small graphs or large dense graphs, use the *graph6* file +format. + +For more information, see the `sparse6`_ homepage. + +.. _sparse6: https://users.cecs.anu.edu.au/~bdm/data/formats.html + +""" + +import networkx as nx +from networkx.exception import NetworkXError +from networkx.readwrite.graph6 import data_to_n, n_to_data +from networkx.utils import not_implemented_for, open_file + +__all__ = ["from_sparse6_bytes", "read_sparse6", "to_sparse6_bytes", "write_sparse6"] + + +def _generate_sparse6_bytes(G, nodes, header): + """Yield bytes in the sparse6 encoding of a graph. + + `G` is an undirected simple graph. `nodes` is the list of nodes for + which the node-induced subgraph will be encoded; if `nodes` is the + list of all nodes in the graph, the entire graph will be + encoded. `header` is a Boolean that specifies whether to generate + the header ``b'>>sparse6<<'`` before the remaining data. + + This function generates `bytes` objects in the following order: + + 1. the header (if requested), + 2. the encoding of the number of nodes, + 3. each character, one-at-a-time, in the encoding of the requested + node-induced subgraph, + 4. a newline character. + + This function raises :exc:`ValueError` if the graph is too large for + the graph6 format (that is, greater than ``2 ** 36`` nodes). + + """ + n = len(G) + if n >= 2**36: + raise ValueError( + "sparse6 is only defined if number of nodes is less than 2 ** 36" + ) + if header: + yield b">>sparse6<<" + yield b":" + for d in n_to_data(n): + yield str.encode(chr(d + 63)) + + k = 1 + while 1 << k < n: + k += 1 + + def enc(x): + """Big endian k-bit encoding of x""" + return [1 if (x & 1 << (k - 1 - i)) else 0 for i in range(k)] + + edges = sorted((max(u, v), min(u, v)) for u, v in G.edges()) + bits = [] + curv = 0 + for v, u in edges: + if v == curv: # current vertex edge + bits.append(0) + bits.extend(enc(u)) + elif v == curv + 1: # next vertex edge + curv += 1 + bits.append(1) + bits.extend(enc(u)) + else: # skip to vertex v and then add edge to u + curv = v + bits.append(1) + bits.extend(enc(v)) + bits.append(0) + bits.extend(enc(u)) + if k < 6 and n == (1 << k) and ((-len(bits)) % 6) >= k and curv < (n - 1): + # Padding special case: small k, n=2^k, + # more than k bits of padding needed, + # current vertex is not (n-1) -- + # appending 1111... would add a loop on (n-1) + bits.append(0) + bits.extend([1] * ((-len(bits)) % 6)) + else: + bits.extend([1] * ((-len(bits)) % 6)) + + data = [ + (bits[i + 0] << 5) + + (bits[i + 1] << 4) + + (bits[i + 2] << 3) + + (bits[i + 3] << 2) + + (bits[i + 4] << 1) + + (bits[i + 5] << 0) + for i in range(0, len(bits), 6) + ] + + for d in data: + yield str.encode(chr(d + 63)) + yield b"\n" + + +@nx._dispatchable(graphs=None, returns_graph=True) +def from_sparse6_bytes(string): + """Read an undirected graph in sparse6 format from string. + + Parameters + ---------- + string : string + Data in sparse6 format + + Returns + ------- + G : Graph + + Raises + ------ + NetworkXError + If the string is unable to be parsed in sparse6 format + + Examples + -------- + >>> G = nx.from_sparse6_bytes(b":A_") + >>> sorted(G.edges()) + [(0, 1), (0, 1), (0, 1)] + + See Also + -------- + read_sparse6, write_sparse6 + + References + ---------- + .. [1] Sparse6 specification + + + """ + if string.startswith(b">>sparse6<<"): + string = string[11:] + if not string.startswith(b":"): + raise NetworkXError("Expected leading colon in sparse6") + + chars = [c - 63 for c in string[1:]] + n, data = data_to_n(chars) + k = 1 + while 1 << k < n: + k += 1 + + def parseData(): + """Returns stream of pairs b[i], x[i] for sparse6 format.""" + chunks = iter(data) + d = None # partial data word + dLen = 0 # how many unparsed bits are left in d + + while 1: + if dLen < 1: + try: + d = next(chunks) + except StopIteration: + return + dLen = 6 + dLen -= 1 + b = (d >> dLen) & 1 # grab top remaining bit + + x = d & ((1 << dLen) - 1) # partially built up value of x + xLen = dLen # how many bits included so far in x + while xLen < k: # now grab full chunks until we have enough + try: + d = next(chunks) + except StopIteration: + return + dLen = 6 + x = (x << 6) + d + xLen += 6 + x = x >> (xLen - k) # shift back the extra bits + dLen = xLen - k + yield b, x + + v = 0 + + G = nx.MultiGraph() + G.add_nodes_from(range(n)) + + multigraph = False + for b, x in parseData(): + if b == 1: + v += 1 + # padding with ones can cause overlarge number here + if x >= n or v >= n: + break + elif x > v: + v = x + else: + if G.has_edge(x, v): + multigraph = True + G.add_edge(x, v) + if not multigraph: + G = nx.Graph(G) + return G + + +def to_sparse6_bytes(G, nodes=None, header=True): + """Convert an undirected graph to bytes in sparse6 format. + + Parameters + ---------- + G : Graph (undirected) + + nodes: list or iterable + Nodes are labeled 0...n-1 in the order provided. If None the ordering + given by ``G.nodes()`` is used. + + header: bool + If True add '>>sparse6<<' bytes to head of data. + + Raises + ------ + NetworkXNotImplemented + If the graph is directed. + + ValueError + If the graph has at least ``2 ** 36`` nodes; the sparse6 format + is only defined for graphs of order less than ``2 ** 36``. + + Examples + -------- + >>> nx.to_sparse6_bytes(nx.path_graph(2)) + b'>>sparse6<<:An\\n' + + See Also + -------- + to_sparse6_bytes, read_sparse6, write_sparse6_bytes + + Notes + ----- + The returned bytes end with a newline character. + + The format does not support edge or node labels. + + References + ---------- + .. [1] Graph6 specification + + + """ + if nodes is not None: + G = G.subgraph(nodes) + G = nx.convert_node_labels_to_integers(G, ordering="sorted") + return b"".join(_generate_sparse6_bytes(G, nodes, header)) + + +@open_file(0, mode="rb") +@nx._dispatchable(graphs=None, returns_graph=True) +def read_sparse6(path): + """Read an undirected graph in sparse6 format from path. + + Parameters + ---------- + path : file or string + Filename or file handle to read. + Filenames ending in .gz or .bz2 will be decompressed. + + Returns + ------- + G : Graph/Multigraph or list of Graphs/MultiGraphs + If the file contains multiple lines then a list of graphs is returned + + Raises + ------ + NetworkXError + If the string is unable to be parsed in sparse6 format + + Examples + -------- + You can read a sparse6 file by giving the path to the file:: + + >>> import tempfile + >>> with tempfile.NamedTemporaryFile(delete=False) as f: + ... _ = f.write(b">>sparse6<<:An\\n") + ... _ = f.seek(0) + ... G = nx.read_sparse6(f.name) + >>> list(G.edges()) + [(0, 1)] + + You can also read a sparse6 file by giving an open file-like object:: + + >>> import tempfile + >>> with tempfile.NamedTemporaryFile() as f: + ... _ = f.write(b">>sparse6<<:An\\n") + ... _ = f.seek(0) + ... G = nx.read_sparse6(f) + >>> list(G.edges()) + [(0, 1)] + + See Also + -------- + read_sparse6, from_sparse6_bytes + + References + ---------- + .. [1] Sparse6 specification + + + """ + glist = [] + for line in path: + line = line.strip() + if not len(line): + continue + glist.append(from_sparse6_bytes(line)) + if len(glist) == 1: + return glist[0] + else: + return glist + + +@not_implemented_for("directed") +@open_file(1, mode="wb") +def write_sparse6(G, path, nodes=None, header=True): + """Write graph G to given path in sparse6 format. + + Parameters + ---------- + G : Graph (undirected) + + path : file or string + File or filename to write. + Filenames ending in .gz or .bz2 will be compressed. + + nodes: list or iterable + Nodes are labeled 0...n-1 in the order provided. If None the ordering + given by G.nodes() is used. + + header: bool + If True add '>>sparse6<<' string to head of data + + Raises + ------ + NetworkXError + If the graph is directed + + Examples + -------- + You can write a sparse6 file by giving the path to the file:: + + >>> import tempfile + >>> with tempfile.NamedTemporaryFile(delete=False) as f: + ... nx.write_sparse6(nx.path_graph(2), f.name) + ... print(f.read()) + b'>>sparse6<<:An\\n' + + You can also write a sparse6 file by giving an open file-like object:: + + >>> with tempfile.NamedTemporaryFile() as f: + ... nx.write_sparse6(nx.path_graph(2), f) + ... _ = f.seek(0) + ... print(f.read()) + b'>>sparse6<<:An\\n' + + See Also + -------- + read_sparse6, from_sparse6_bytes + + Notes + ----- + The format does not support edge or node labels. + + References + ---------- + .. [1] Sparse6 specification + + + """ + if nodes is not None: + G = G.subgraph(nodes) + G = nx.convert_node_labels_to_integers(G, ordering="sorted") + for b in _generate_sparse6_bytes(G, nodes, header): + path.write(b) diff --git a/.venv/lib/python3.12/site-packages/networkx/readwrite/tests/__init__.py b/.venv/lib/python3.12/site-packages/networkx/readwrite/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/.venv/lib/python3.12/site-packages/networkx/readwrite/tests/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/readwrite/tests/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..f6c606a Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/readwrite/tests/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/readwrite/tests/__pycache__/test_adjlist.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/readwrite/tests/__pycache__/test_adjlist.cpython-312.pyc new file mode 100644 index 0000000..6fd6c93 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/readwrite/tests/__pycache__/test_adjlist.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/readwrite/tests/__pycache__/test_edgelist.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/readwrite/tests/__pycache__/test_edgelist.cpython-312.pyc new file mode 100644 index 0000000..0d72b0f Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/readwrite/tests/__pycache__/test_edgelist.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/readwrite/tests/__pycache__/test_gexf.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/readwrite/tests/__pycache__/test_gexf.cpython-312.pyc new file mode 100644 index 0000000..84c43c3 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/readwrite/tests/__pycache__/test_gexf.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/readwrite/tests/__pycache__/test_gml.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/readwrite/tests/__pycache__/test_gml.cpython-312.pyc new file mode 100644 index 0000000..1e2489c Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/readwrite/tests/__pycache__/test_gml.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/readwrite/tests/__pycache__/test_graph6.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/readwrite/tests/__pycache__/test_graph6.cpython-312.pyc new file mode 100644 index 0000000..a557663 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/readwrite/tests/__pycache__/test_graph6.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/readwrite/tests/__pycache__/test_graphml.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/readwrite/tests/__pycache__/test_graphml.cpython-312.pyc new file mode 100644 index 0000000..7f34fd8 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/readwrite/tests/__pycache__/test_graphml.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/readwrite/tests/__pycache__/test_leda.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/readwrite/tests/__pycache__/test_leda.cpython-312.pyc new file mode 100644 index 0000000..3f7cae6 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/readwrite/tests/__pycache__/test_leda.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/readwrite/tests/__pycache__/test_p2g.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/readwrite/tests/__pycache__/test_p2g.cpython-312.pyc new file mode 100644 index 0000000..af4c2ab Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/readwrite/tests/__pycache__/test_p2g.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/readwrite/tests/__pycache__/test_pajek.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/readwrite/tests/__pycache__/test_pajek.cpython-312.pyc new file mode 100644 index 0000000..af6a082 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/readwrite/tests/__pycache__/test_pajek.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/readwrite/tests/__pycache__/test_sparse6.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/readwrite/tests/__pycache__/test_sparse6.cpython-312.pyc new file mode 100644 index 0000000..e016a64 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/readwrite/tests/__pycache__/test_sparse6.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/readwrite/tests/__pycache__/test_text.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/readwrite/tests/__pycache__/test_text.cpython-312.pyc new file mode 100644 index 0000000..45676f2 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/readwrite/tests/__pycache__/test_text.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/readwrite/tests/test_adjlist.py b/.venv/lib/python3.12/site-packages/networkx/readwrite/tests/test_adjlist.py new file mode 100644 index 0000000..f2218eb --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/readwrite/tests/test_adjlist.py @@ -0,0 +1,262 @@ +""" +Unit tests for adjlist. +""" + +import io + +import pytest + +import networkx as nx +from networkx.utils import edges_equal, graphs_equal, nodes_equal + + +class TestAdjlist: + @classmethod + def setup_class(cls): + cls.G = nx.Graph(name="test") + e = [("a", "b"), ("b", "c"), ("c", "d"), ("d", "e"), ("e", "f"), ("a", "f")] + cls.G.add_edges_from(e) + cls.G.add_node("g") + cls.DG = nx.DiGraph(cls.G) + cls.XG = nx.MultiGraph() + cls.XG.add_weighted_edges_from([(1, 2, 5), (1, 2, 5), (1, 2, 1), (3, 3, 42)]) + cls.XDG = nx.MultiDiGraph(cls.XG) + + def test_read_multiline_adjlist_1(self): + # Unit test for https://networkx.lanl.gov/trac/ticket/252 + s = b"""# comment line +1 2 +# comment line +2 +3 +""" + bytesIO = io.BytesIO(s) + G = nx.read_multiline_adjlist(bytesIO) + adj = {"1": {"3": {}, "2": {}}, "3": {"1": {}}, "2": {"1": {}}} + assert graphs_equal(G, nx.Graph(adj)) + + def test_unicode(self, tmp_path): + G = nx.Graph() + name1 = chr(2344) + chr(123) + chr(6543) + name2 = chr(5543) + chr(1543) + chr(324) + G.add_edge(name1, "Radiohead", **{name2: 3}) + + fname = tmp_path / "adjlist.txt" + nx.write_multiline_adjlist(G, fname) + H = nx.read_multiline_adjlist(fname) + assert graphs_equal(G, H) + + def test_latin1_err(self, tmp_path): + G = nx.Graph() + name1 = chr(2344) + chr(123) + chr(6543) + name2 = chr(5543) + chr(1543) + chr(324) + G.add_edge(name1, "Radiohead", **{name2: 3}) + fname = tmp_path / "adjlist.txt" + with pytest.raises(UnicodeEncodeError): + nx.write_multiline_adjlist(G, fname, encoding="latin-1") + + def test_latin1(self, tmp_path): + G = nx.Graph() + name1 = "Bj" + chr(246) + "rk" + name2 = chr(220) + "ber" + G.add_edge(name1, "Radiohead", **{name2: 3}) + fname = tmp_path / "adjlist.txt" + nx.write_multiline_adjlist(G, fname, encoding="latin-1") + H = nx.read_multiline_adjlist(fname, encoding="latin-1") + assert graphs_equal(G, H) + + def test_parse_adjlist(self): + lines = ["1 2 5", "2 3 4", "3 5", "4", "5"] + nx.parse_adjlist(lines, nodetype=int) # smoke test + with pytest.raises(TypeError): + nx.parse_adjlist(lines, nodetype="int") + lines = ["1 2 5", "2 b", "c"] + with pytest.raises(TypeError): + nx.parse_adjlist(lines, nodetype=int) + + def test_adjlist_graph(self, tmp_path): + G = self.G + fname = tmp_path / "adjlist.txt" + nx.write_adjlist(G, fname) + H = nx.read_adjlist(fname) + H2 = nx.read_adjlist(fname) + assert H is not H2 # they should be different graphs + assert nodes_equal(list(H), list(G)) + assert edges_equal(list(H.edges()), list(G.edges())) + + def test_adjlist_digraph(self, tmp_path): + G = self.DG + fname = tmp_path / "adjlist.txt" + nx.write_adjlist(G, fname) + H = nx.read_adjlist(fname, create_using=nx.DiGraph()) + H2 = nx.read_adjlist(fname, create_using=nx.DiGraph()) + assert H is not H2 # they should be different graphs + assert nodes_equal(list(H), list(G)) + assert edges_equal(list(H.edges()), list(G.edges())) + + def test_adjlist_integers(self, tmp_path): + fname = tmp_path / "adjlist.txt" + G = nx.convert_node_labels_to_integers(self.G) + nx.write_adjlist(G, fname) + H = nx.read_adjlist(fname, nodetype=int) + H2 = nx.read_adjlist(fname, nodetype=int) + assert H is not H2 # they should be different graphs + assert nodes_equal(list(H), list(G)) + assert edges_equal(list(H.edges()), list(G.edges())) + + def test_adjlist_multigraph(self, tmp_path): + G = self.XG + fname = tmp_path / "adjlist.txt" + nx.write_adjlist(G, fname) + H = nx.read_adjlist(fname, nodetype=int, create_using=nx.MultiGraph()) + H2 = nx.read_adjlist(fname, nodetype=int, create_using=nx.MultiGraph()) + assert H is not H2 # they should be different graphs + assert nodes_equal(list(H), list(G)) + assert edges_equal(list(H.edges()), list(G.edges())) + + def test_adjlist_multidigraph(self, tmp_path): + G = self.XDG + fname = tmp_path / "adjlist.txt" + nx.write_adjlist(G, fname) + H = nx.read_adjlist(fname, nodetype=int, create_using=nx.MultiDiGraph()) + H2 = nx.read_adjlist(fname, nodetype=int, create_using=nx.MultiDiGraph()) + assert H is not H2 # they should be different graphs + assert nodes_equal(list(H), list(G)) + assert edges_equal(list(H.edges()), list(G.edges())) + + def test_adjlist_delimiter(self): + fh = io.BytesIO() + G = nx.path_graph(3) + nx.write_adjlist(G, fh, delimiter=":") + fh.seek(0) + H = nx.read_adjlist(fh, nodetype=int, delimiter=":") + assert nodes_equal(list(H), list(G)) + assert edges_equal(list(H.edges()), list(G.edges())) + + +class TestMultilineAdjlist: + @classmethod + def setup_class(cls): + cls.G = nx.Graph(name="test") + e = [("a", "b"), ("b", "c"), ("c", "d"), ("d", "e"), ("e", "f"), ("a", "f")] + cls.G.add_edges_from(e) + cls.G.add_node("g") + cls.DG = nx.DiGraph(cls.G) + cls.DG.remove_edge("b", "a") + cls.DG.remove_edge("b", "c") + cls.XG = nx.MultiGraph() + cls.XG.add_weighted_edges_from([(1, 2, 5), (1, 2, 5), (1, 2, 1), (3, 3, 42)]) + cls.XDG = nx.MultiDiGraph(cls.XG) + + def test_parse_multiline_adjlist(self): + lines = [ + "1 2", + "b {'weight':3, 'name': 'Frodo'}", + "c {}", + "d 1", + "e {'weight':6, 'name': 'Saruman'}", + ] + nx.parse_multiline_adjlist(iter(lines)) # smoke test + with pytest.raises(TypeError): + nx.parse_multiline_adjlist(iter(lines), nodetype=int) + nx.parse_multiline_adjlist(iter(lines), edgetype=str) # smoke test + with pytest.raises(TypeError): + nx.parse_multiline_adjlist(iter(lines), nodetype=int) + lines = ["1 a"] + with pytest.raises(TypeError): + nx.parse_multiline_adjlist(iter(lines)) + lines = ["a 2"] + with pytest.raises(TypeError): + nx.parse_multiline_adjlist(iter(lines), nodetype=int) + lines = ["1 2"] + with pytest.raises(TypeError): + nx.parse_multiline_adjlist(iter(lines)) + lines = ["1 2", "2 {}"] + with pytest.raises(TypeError): + nx.parse_multiline_adjlist(iter(lines)) + + def test_multiline_adjlist_graph(self, tmp_path): + G = self.G + fname = tmp_path / "adjlist.txt" + nx.write_multiline_adjlist(G, fname) + H = nx.read_multiline_adjlist(fname) + H2 = nx.read_multiline_adjlist(fname) + assert H is not H2 # they should be different graphs + assert nodes_equal(list(H), list(G)) + assert edges_equal(list(H.edges()), list(G.edges())) + + def test_multiline_adjlist_digraph(self, tmp_path): + G = self.DG + fname = tmp_path / "adjlist.txt" + nx.write_multiline_adjlist(G, fname) + H = nx.read_multiline_adjlist(fname, create_using=nx.DiGraph()) + H2 = nx.read_multiline_adjlist(fname, create_using=nx.DiGraph()) + assert H is not H2 # they should be different graphs + assert nodes_equal(list(H), list(G)) + assert edges_equal(list(H.edges()), list(G.edges())) + + def test_multiline_adjlist_integers(self, tmp_path): + fname = tmp_path / "adjlist.txt" + G = nx.convert_node_labels_to_integers(self.G) + nx.write_multiline_adjlist(G, fname) + H = nx.read_multiline_adjlist(fname, nodetype=int) + H2 = nx.read_multiline_adjlist(fname, nodetype=int) + assert H is not H2 # they should be different graphs + assert nodes_equal(list(H), list(G)) + assert edges_equal(list(H.edges()), list(G.edges())) + + def test_multiline_adjlist_multigraph(self, tmp_path): + G = self.XG + fname = tmp_path / "adjlist.txt" + nx.write_multiline_adjlist(G, fname) + H = nx.read_multiline_adjlist(fname, nodetype=int, create_using=nx.MultiGraph()) + H2 = nx.read_multiline_adjlist( + fname, nodetype=int, create_using=nx.MultiGraph() + ) + assert H is not H2 # they should be different graphs + assert nodes_equal(list(H), list(G)) + assert edges_equal(list(H.edges()), list(G.edges())) + + def test_multiline_adjlist_multidigraph(self, tmp_path): + G = self.XDG + fname = tmp_path / "adjlist.txt" + nx.write_multiline_adjlist(G, fname) + H = nx.read_multiline_adjlist( + fname, nodetype=int, create_using=nx.MultiDiGraph() + ) + H2 = nx.read_multiline_adjlist( + fname, nodetype=int, create_using=nx.MultiDiGraph() + ) + assert H is not H2 # they should be different graphs + assert nodes_equal(list(H), list(G)) + assert edges_equal(list(H.edges()), list(G.edges())) + + def test_multiline_adjlist_delimiter(self): + fh = io.BytesIO() + G = nx.path_graph(3) + nx.write_multiline_adjlist(G, fh, delimiter=":") + fh.seek(0) + H = nx.read_multiline_adjlist(fh, nodetype=int, delimiter=":") + assert nodes_equal(list(H), list(G)) + assert edges_equal(list(H.edges()), list(G.edges())) + + +@pytest.mark.parametrize( + ("lines", "delim"), + ( + (["1 2 5", "2 3 4", "3 5", "4", "5"], None), # No extra whitespace + (["1\t2\t5", "2\t3\t4", "3\t5", "4", "5"], "\t"), # tab-delimited + ( + ["1\t2\t5", "2\t3\t4", "3\t5\t", "4\t", "5"], + "\t", + ), # tab-delimited, extra delims + ( + ["1\t2\t5", "2\t3\t4", "3\t5\t\t\n", "4\t", "5"], + "\t", + ), # extra delim+newlines + ), +) +def test_adjlist_rstrip_parsing(lines, delim): + """Regression test related to gh-7465""" + expected = nx.Graph([(1, 2), (1, 5), (2, 3), (2, 4), (3, 5)]) + nx.utils.graphs_equal(nx.parse_adjlist(lines, delimiter=delim), expected) diff --git a/.venv/lib/python3.12/site-packages/networkx/readwrite/tests/test_edgelist.py b/.venv/lib/python3.12/site-packages/networkx/readwrite/tests/test_edgelist.py new file mode 100644 index 0000000..fe58b3b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/readwrite/tests/test_edgelist.py @@ -0,0 +1,314 @@ +""" +Unit tests for edgelists. +""" + +import io +import textwrap + +import pytest + +import networkx as nx +from networkx.utils import edges_equal, graphs_equal, nodes_equal + +edges_no_data = textwrap.dedent( + """ + # comment line + 1 2 + # comment line + 2 3 + """ +) + + +edges_with_values = textwrap.dedent( + """ + # comment line + 1 2 2.0 + # comment line + 2 3 3.0 + """ +) + + +edges_with_weight = textwrap.dedent( + """ + # comment line + 1 2 {'weight':2.0} + # comment line + 2 3 {'weight':3.0} + """ +) + + +edges_with_multiple_attrs = textwrap.dedent( + """ + # comment line + 1 2 {'weight':2.0, 'color':'green'} + # comment line + 2 3 {'weight':3.0, 'color':'red'} + """ +) + + +edges_with_multiple_attrs_csv = textwrap.dedent( + """ + # comment line + 1, 2, {'weight':2.0, 'color':'green'} + # comment line + 2, 3, {'weight':3.0, 'color':'red'} + """ +) + + +_expected_edges_weights = [(1, 2, {"weight": 2.0}), (2, 3, {"weight": 3.0})] +_expected_edges_multiattr = [ + (1, 2, {"weight": 2.0, "color": "green"}), + (2, 3, {"weight": 3.0, "color": "red"}), +] + + +@pytest.mark.parametrize( + ("data", "extra_kwargs"), + ( + (edges_no_data, {}), + (edges_with_values, {}), + (edges_with_weight, {}), + (edges_with_multiple_attrs, {}), + (edges_with_multiple_attrs_csv, {"delimiter": ","}), + ), +) +def test_read_edgelist_no_data(data, extra_kwargs): + bytesIO = io.BytesIO(data.encode("utf-8")) + G = nx.read_edgelist(bytesIO, nodetype=int, data=False, **extra_kwargs) + assert edges_equal(G.edges(), [(1, 2), (2, 3)]) + + +def test_read_weighted_edgelist(): + bytesIO = io.BytesIO(edges_with_values.encode("utf-8")) + G = nx.read_weighted_edgelist(bytesIO, nodetype=int) + assert edges_equal(G.edges(data=True), _expected_edges_weights) + + +@pytest.mark.parametrize( + ("data", "extra_kwargs", "expected"), + ( + (edges_with_weight, {}, _expected_edges_weights), + (edges_with_multiple_attrs, {}, _expected_edges_multiattr), + (edges_with_multiple_attrs_csv, {"delimiter": ","}, _expected_edges_multiattr), + ), +) +def test_read_edgelist_with_data(data, extra_kwargs, expected): + bytesIO = io.BytesIO(data.encode("utf-8")) + G = nx.read_edgelist(bytesIO, nodetype=int, **extra_kwargs) + assert edges_equal(G.edges(data=True), expected) + + +@pytest.fixture +def example_graph(): + G = nx.Graph() + G.add_weighted_edges_from([(1, 2, 3.0), (2, 3, 27.0), (3, 4, 3.0)]) + return G + + +def test_parse_edgelist_no_data(example_graph): + G = example_graph + H = nx.parse_edgelist(["1 2", "2 3", "3 4"], nodetype=int) + assert nodes_equal(G.nodes, H.nodes) + assert edges_equal(G.edges, H.edges) + + +def test_parse_edgelist_with_data_dict(example_graph): + G = example_graph + H = nx.parse_edgelist( + ["1 2 {'weight': 3}", "2 3 {'weight': 27}", "3 4 {'weight': 3.0}"], nodetype=int + ) + assert nodes_equal(G.nodes, H.nodes) + assert edges_equal(G.edges(data=True), H.edges(data=True)) + + +def test_parse_edgelist_with_data_list(example_graph): + G = example_graph + H = nx.parse_edgelist( + ["1 2 3", "2 3 27", "3 4 3.0"], nodetype=int, data=(("weight", float),) + ) + assert nodes_equal(G.nodes, H.nodes) + assert edges_equal(G.edges(data=True), H.edges(data=True)) + + +def test_parse_edgelist(): + # ignore lines with less than 2 nodes + lines = ["1;2", "2 3", "3 4"] + G = nx.parse_edgelist(lines, nodetype=int) + assert list(G.edges()) == [(2, 3), (3, 4)] + # unknown nodetype + with pytest.raises(TypeError, match="Failed to convert nodes"): + lines = ["1 2", "2 3", "3 4"] + nx.parse_edgelist(lines, nodetype="nope") + # lines have invalid edge format + with pytest.raises(TypeError, match="Failed to convert edge data"): + lines = ["1 2 3", "2 3", "3 4"] + nx.parse_edgelist(lines, nodetype=int) + # edge data and data_keys not the same length + with pytest.raises(IndexError, match="not the same length"): + lines = ["1 2 3", "2 3 27", "3 4 3.0"] + nx.parse_edgelist( + lines, nodetype=int, data=(("weight", float), ("capacity", int)) + ) + # edge data can't be converted to edge type + with pytest.raises(TypeError, match="Failed to convert"): + lines = ["1 2 't1'", "2 3 't3'", "3 4 't3'"] + nx.parse_edgelist(lines, nodetype=int, data=(("weight", float),)) + + +def test_comments_None(): + edgelist = ["node#1 node#2", "node#2 node#3"] + # comments=None supported to ignore all comment characters + G = nx.parse_edgelist(edgelist, comments=None) + H = nx.Graph([e.split(" ") for e in edgelist]) + assert edges_equal(G.edges, H.edges) + + +class TestEdgelist: + @classmethod + def setup_class(cls): + cls.G = nx.Graph(name="test") + e = [("a", "b"), ("b", "c"), ("c", "d"), ("d", "e"), ("e", "f"), ("a", "f")] + cls.G.add_edges_from(e) + cls.G.add_node("g") + cls.DG = nx.DiGraph(cls.G) + cls.XG = nx.MultiGraph() + cls.XG.add_weighted_edges_from([(1, 2, 5), (1, 2, 5), (1, 2, 1), (3, 3, 42)]) + cls.XDG = nx.MultiDiGraph(cls.XG) + + def test_write_edgelist_1(self): + fh = io.BytesIO() + G = nx.Graph() + G.add_edges_from([(1, 2), (2, 3)]) + nx.write_edgelist(G, fh, data=False) + fh.seek(0) + assert fh.read() == b"1 2\n2 3\n" + + def test_write_edgelist_2(self): + fh = io.BytesIO() + G = nx.Graph() + G.add_edges_from([(1, 2), (2, 3)]) + nx.write_edgelist(G, fh, data=True) + fh.seek(0) + assert fh.read() == b"1 2 {}\n2 3 {}\n" + + def test_write_edgelist_3(self): + fh = io.BytesIO() + G = nx.Graph() + G.add_edge(1, 2, weight=2.0) + G.add_edge(2, 3, weight=3.0) + nx.write_edgelist(G, fh, data=True) + fh.seek(0) + assert fh.read() == b"1 2 {'weight': 2.0}\n2 3 {'weight': 3.0}\n" + + def test_write_edgelist_4(self): + fh = io.BytesIO() + G = nx.Graph() + G.add_edge(1, 2, weight=2.0) + G.add_edge(2, 3, weight=3.0) + nx.write_edgelist(G, fh, data=[("weight")]) + fh.seek(0) + assert fh.read() == b"1 2 2.0\n2 3 3.0\n" + + def test_unicode(self, tmp_path): + G = nx.Graph() + name1 = chr(2344) + chr(123) + chr(6543) + name2 = chr(5543) + chr(1543) + chr(324) + G.add_edge(name1, "Radiohead", **{name2: 3}) + fname = tmp_path / "el.txt" + nx.write_edgelist(G, fname) + H = nx.read_edgelist(fname) + assert graphs_equal(G, H) + + def test_latin1_issue(self, tmp_path): + G = nx.Graph() + name1 = chr(2344) + chr(123) + chr(6543) + name2 = chr(5543) + chr(1543) + chr(324) + G.add_edge(name1, "Radiohead", **{name2: 3}) + fname = tmp_path / "el.txt" + with pytest.raises(UnicodeEncodeError): + nx.write_edgelist(G, fname, encoding="latin-1") + + def test_latin1(self, tmp_path): + G = nx.Graph() + name1 = "Bj" + chr(246) + "rk" + name2 = chr(220) + "ber" + G.add_edge(name1, "Radiohead", **{name2: 3}) + fname = tmp_path / "el.txt" + + nx.write_edgelist(G, fname, encoding="latin-1") + H = nx.read_edgelist(fname, encoding="latin-1") + assert graphs_equal(G, H) + + def test_edgelist_graph(self, tmp_path): + G = self.G + fname = tmp_path / "el.txt" + nx.write_edgelist(G, fname) + H = nx.read_edgelist(fname) + H2 = nx.read_edgelist(fname) + assert H is not H2 # they should be different graphs + G.remove_node("g") # isolated nodes are not written in edgelist + assert nodes_equal(list(H), list(G)) + assert edges_equal(list(H.edges()), list(G.edges())) + + def test_edgelist_digraph(self, tmp_path): + G = self.DG + fname = tmp_path / "el.txt" + nx.write_edgelist(G, fname) + H = nx.read_edgelist(fname, create_using=nx.DiGraph()) + H2 = nx.read_edgelist(fname, create_using=nx.DiGraph()) + assert H is not H2 # they should be different graphs + G.remove_node("g") # isolated nodes are not written in edgelist + assert nodes_equal(list(H), list(G)) + assert edges_equal(list(H.edges()), list(G.edges())) + + def test_edgelist_integers(self, tmp_path): + G = nx.convert_node_labels_to_integers(self.G) + fname = tmp_path / "el.txt" + nx.write_edgelist(G, fname) + H = nx.read_edgelist(fname, nodetype=int) + # isolated nodes are not written in edgelist + G.remove_nodes_from(list(nx.isolates(G))) + assert nodes_equal(list(H), list(G)) + assert edges_equal(list(H.edges()), list(G.edges())) + + def test_edgelist_multigraph(self, tmp_path): + G = self.XG + fname = tmp_path / "el.txt" + nx.write_edgelist(G, fname) + H = nx.read_edgelist(fname, nodetype=int, create_using=nx.MultiGraph()) + H2 = nx.read_edgelist(fname, nodetype=int, create_using=nx.MultiGraph()) + assert H is not H2 # they should be different graphs + assert nodes_equal(list(H), list(G)) + assert edges_equal(list(H.edges()), list(G.edges())) + + def test_edgelist_multidigraph(self, tmp_path): + G = self.XDG + fname = tmp_path / "el.txt" + nx.write_edgelist(G, fname) + H = nx.read_edgelist(fname, nodetype=int, create_using=nx.MultiDiGraph()) + H2 = nx.read_edgelist(fname, nodetype=int, create_using=nx.MultiDiGraph()) + assert H is not H2 # they should be different graphs + assert nodes_equal(list(H), list(G)) + assert edges_equal(list(H.edges()), list(G.edges())) + + +def test_edgelist_consistent_strip_handling(): + """See gh-7462 + + Input when printed looks like:: + + 1 2 3 + 2 3 + 3 4 3.0 + + Note the trailing \\t after the `3` in the second row, indicating an empty + data value. + """ + s = io.StringIO("1\t2\t3\n2\t3\t\n3\t4\t3.0") + G = nx.parse_edgelist(s, delimiter="\t", nodetype=int, data=[("value", str)]) + assert sorted(G.edges(data="value")) == [(1, 2, "3"), (2, 3, ""), (3, 4, "3.0")] diff --git a/.venv/lib/python3.12/site-packages/networkx/readwrite/tests/test_gexf.py b/.venv/lib/python3.12/site-packages/networkx/readwrite/tests/test_gexf.py new file mode 100644 index 0000000..d69a5ef --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/readwrite/tests/test_gexf.py @@ -0,0 +1,579 @@ +import io +import time + +import pytest + +import networkx as nx + + +@pytest.mark.parametrize("time_attr", ("start", "end")) +@pytest.mark.parametrize("dyn_attr", ("static", "dynamic")) +def test_dynamic_graph_has_timeformat(time_attr, dyn_attr, tmp_path): + """Ensure that graphs which have a 'start' or 'stop' attribute get a + 'timeformat' attribute upon parsing. See gh-7914.""" + G = nx.MultiGraph(mode=dyn_attr) + G.add_node(0) + G.nodes[0][time_attr] = 1 + # Write out + fname = tmp_path / "foo.gexf" + nx.write_gexf(G, fname) + # Check that timeformat is added to saved data + with open(fname) as fh: + assert 'timeformat="long"' in fh.read() + # Round-trip + H = nx.read_gexf(fname) + # If any node has a "start" or "end" attr, it is considered dynamic + # regardless of the graph "mode" attr + assert H.graph["mode"] == "dynamic" + assert nx.utils.nodes_equal(G.edges, H.edges) + + +class TestGEXF: + @classmethod + def setup_class(cls): + cls.simple_directed_data = """ + + + + + + + + + + + +""" + cls.simple_directed_graph = nx.DiGraph() + cls.simple_directed_graph.add_node("0", label="Hello") + cls.simple_directed_graph.add_node("1", label="World") + cls.simple_directed_graph.add_edge("0", "1", id="0") + + cls.simple_directed_fh = io.BytesIO(cls.simple_directed_data.encode("UTF-8")) + + cls.attribute_data = """\ + + + Gephi.org + A Web network + + + + + + + true + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +""" + cls.attribute_graph = nx.DiGraph() + cls.attribute_graph.graph["node_default"] = {"frog": True} + cls.attribute_graph.add_node( + "0", label="Gephi", url="https://gephi.org", indegree=1, frog=False + ) + cls.attribute_graph.add_node( + "1", label="Webatlas", url="http://webatlas.fr", indegree=2, frog=False + ) + cls.attribute_graph.add_node( + "2", label="RTGI", url="http://rtgi.fr", indegree=1, frog=True + ) + cls.attribute_graph.add_node( + "3", + label="BarabasiLab", + url="http://barabasilab.com", + indegree=1, + frog=True, + ) + cls.attribute_graph.add_edge("0", "1", id="0", label="foo") + cls.attribute_graph.add_edge("0", "2", id="1") + cls.attribute_graph.add_edge("1", "0", id="2") + cls.attribute_graph.add_edge("2", "1", id="3") + cls.attribute_graph.add_edge("0", "3", id="4") + cls.attribute_fh = io.BytesIO(cls.attribute_data.encode("UTF-8")) + + cls.simple_undirected_data = """ + + + + + + + + + + + +""" + cls.simple_undirected_graph = nx.Graph() + cls.simple_undirected_graph.add_node("0", label="Hello") + cls.simple_undirected_graph.add_node("1", label="World") + cls.simple_undirected_graph.add_edge("0", "1", id="0") + + cls.simple_undirected_fh = io.BytesIO( + cls.simple_undirected_data.encode("UTF-8") + ) + + def test_read_simple_directed_graphml(self): + G = self.simple_directed_graph + H = nx.read_gexf(self.simple_directed_fh) + assert sorted(G.nodes()) == sorted(H.nodes()) + assert sorted(G.edges()) == sorted(H.edges()) + assert sorted(G.edges(data=True)) == sorted(H.edges(data=True)) + self.simple_directed_fh.seek(0) + + def test_write_read_simple_directed_graphml(self): + G = self.simple_directed_graph + fh = io.BytesIO() + nx.write_gexf(G, fh) + fh.seek(0) + H = nx.read_gexf(fh) + assert sorted(G.nodes()) == sorted(H.nodes()) + assert sorted(G.edges()) == sorted(H.edges()) + assert sorted(G.edges(data=True)) == sorted(H.edges(data=True)) + self.simple_directed_fh.seek(0) + + def test_read_simple_undirected_graphml(self): + G = self.simple_undirected_graph + H = nx.read_gexf(self.simple_undirected_fh) + assert sorted(G.nodes()) == sorted(H.nodes()) + assert sorted(sorted(e) for e in G.edges()) == sorted( + sorted(e) for e in H.edges() + ) + self.simple_undirected_fh.seek(0) + + def test_read_attribute_graphml(self): + G = self.attribute_graph + H = nx.read_gexf(self.attribute_fh) + assert sorted(G.nodes(True)) == sorted(H.nodes(data=True)) + ge = sorted(G.edges(data=True)) + he = sorted(H.edges(data=True)) + for a, b in zip(ge, he): + assert a == b + self.attribute_fh.seek(0) + + def test_directed_edge_in_undirected(self): + s = """ + + + + + + + + + + + +""" + fh = io.BytesIO(s.encode("UTF-8")) + pytest.raises(nx.NetworkXError, nx.read_gexf, fh) + + def test_undirected_edge_in_directed(self): + s = """ + + + + + + + + + + + +""" + fh = io.BytesIO(s.encode("UTF-8")) + pytest.raises(nx.NetworkXError, nx.read_gexf, fh) + + def test_key_raises(self): + s = """ + + + + + + + + + + + + + + + +""" + fh = io.BytesIO(s.encode("UTF-8")) + pytest.raises(nx.NetworkXError, nx.read_gexf, fh) + + def test_relabel(self): + s = """ + + + + + + + + + + + +""" + fh = io.BytesIO(s.encode("UTF-8")) + G = nx.read_gexf(fh, relabel=True) + assert sorted(G.nodes()) == ["Hello", "Word"] + + def test_default_attribute(self): + G = nx.Graph() + G.add_node(1, label="1", color="green") + nx.add_path(G, [0, 1, 2, 3]) + G.add_edge(1, 2, foo=3) + G.graph["node_default"] = {"color": "yellow"} + G.graph["edge_default"] = {"foo": 7} + fh = io.BytesIO() + nx.write_gexf(G, fh) + fh.seek(0) + H = nx.read_gexf(fh, node_type=int) + assert sorted(G.nodes()) == sorted(H.nodes()) + assert sorted(sorted(e) for e in G.edges()) == sorted( + sorted(e) for e in H.edges() + ) + # Reading a gexf graph always sets mode attribute to either + # 'static' or 'dynamic'. Remove the mode attribute from the + # read graph for the sake of comparing remaining attributes. + del H.graph["mode"] + assert G.graph == H.graph + + def test_serialize_ints_to_strings(self): + G = nx.Graph() + G.add_node(1, id=7, label=77) + fh = io.BytesIO() + nx.write_gexf(G, fh) + fh.seek(0) + H = nx.read_gexf(fh, node_type=int) + assert list(H) == [7] + assert H.nodes[7]["label"] == "77" + + def test_write_with_node_attributes(self): + # Addresses #673. + G = nx.Graph() + G.add_edges_from([(0, 1), (1, 2), (2, 3)]) + for i in range(4): + G.nodes[i]["id"] = i + G.nodes[i]["label"] = i + G.nodes[i]["pid"] = i + G.nodes[i]["start"] = i + G.nodes[i]["end"] = i + 1 + + expected = f""" + + NetworkX {nx.__version__} + + + + + + + + + + + + + + +""" + obtained = "\n".join(nx.generate_gexf(G)) + assert expected == obtained + + def test_edge_id_construct(self): + G = nx.Graph() + G.add_edges_from([(0, 1, {"id": 0}), (1, 2, {"id": 2}), (2, 3)]) + + expected = f""" + + NetworkX {nx.__version__} + + + + + + + + + + + + + + +""" + + obtained = "\n".join(nx.generate_gexf(G)) + assert expected == obtained + + def test_numpy_type(self): + np = pytest.importorskip("numpy") + G = nx.path_graph(4) + nx.set_node_attributes(G, {n: n for n in np.arange(4)}, "number") + G[0][1]["edge-number"] = np.float64(1.1) + + expected = f""" + + NetworkX {nx.__version__} + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +""" + obtained = "\n".join(nx.generate_gexf(G)) + assert expected == obtained + + def test_bool(self): + G = nx.Graph() + G.add_node(1, testattr=True) + fh = io.BytesIO() + nx.write_gexf(G, fh) + fh.seek(0) + H = nx.read_gexf(fh, node_type=int) + assert H.nodes[1]["testattr"] + + # Test for NaN, INF and -INF + def test_specials(self): + from math import isnan + + inf, nan = float("inf"), float("nan") + G = nx.Graph() + G.add_node(1, testattr=inf, strdata="inf", key="a") + G.add_node(2, testattr=nan, strdata="nan", key="b") + G.add_node(3, testattr=-inf, strdata="-inf", key="c") + + fh = io.BytesIO() + nx.write_gexf(G, fh) + fh.seek(0) + filetext = fh.read() + fh.seek(0) + H = nx.read_gexf(fh, node_type=int) + + assert b"INF" in filetext + assert b"NaN" in filetext + assert b"-INF" in filetext + + assert H.nodes[1]["testattr"] == inf + assert isnan(H.nodes[2]["testattr"]) + assert H.nodes[3]["testattr"] == -inf + + assert H.nodes[1]["strdata"] == "inf" + assert H.nodes[2]["strdata"] == "nan" + assert H.nodes[3]["strdata"] == "-inf" + + assert H.nodes[1]["networkx_key"] == "a" + assert H.nodes[2]["networkx_key"] == "b" + assert H.nodes[3]["networkx_key"] == "c" + + def test_simple_list(self): + G = nx.Graph() + list_value = [(1, 2, 3), (9, 1, 2)] + G.add_node(1, key=list_value) + fh = io.BytesIO() + nx.write_gexf(G, fh) + fh.seek(0) + H = nx.read_gexf(fh, node_type=int) + assert H.nodes[1]["networkx_key"] == list_value + + def test_dynamic_mode(self): + G = nx.Graph() + G.add_node(1, label="1", color="green") + G.graph["mode"] = "dynamic" + fh = io.BytesIO() + nx.write_gexf(G, fh) + fh.seek(0) + H = nx.read_gexf(fh, node_type=int) + assert sorted(G.nodes()) == sorted(H.nodes()) + assert sorted(sorted(e) for e in G.edges()) == sorted( + sorted(e) for e in H.edges() + ) + + def test_multigraph_with_missing_attributes(self): + G = nx.MultiGraph() + G.add_node(0, label="1", color="green") + G.add_node(1, label="2", color="green") + G.add_edge(0, 1, id="0", weight=3, type="undirected", start=0, end=1) + G.add_edge(0, 1, id="1", label="foo", start=0, end=1) + G.add_edge(0, 1) + fh = io.BytesIO() + nx.write_gexf(G, fh) + fh.seek(0) + H = nx.read_gexf(fh, node_type=int) + assert sorted(G.nodes()) == sorted(H.nodes()) + assert sorted(sorted(e) for e in G.edges()) == sorted( + sorted(e) for e in H.edges() + ) + + def test_missing_viz_attributes(self): + G = nx.Graph() + G.add_node(0, label="1", color="green") + G.nodes[0]["viz"] = {"size": 54} + G.nodes[0]["viz"]["position"] = {"x": 0, "y": 1, "z": 0} + G.nodes[0]["viz"]["color"] = {"r": 0, "g": 0, "b": 256} + G.nodes[0]["viz"]["shape"] = "http://random.url" + G.nodes[0]["viz"]["thickness"] = 2 + fh = io.BytesIO() + nx.write_gexf(G, fh, version="1.1draft") + fh.seek(0) + H = nx.read_gexf(fh, node_type=int) + assert sorted(G.nodes()) == sorted(H.nodes()) + assert sorted(sorted(e) for e in G.edges()) == sorted( + sorted(e) for e in H.edges() + ) + + # Test missing alpha value for version >draft1.1 - set default alpha value + # to 1.0 instead of `None` when writing for better general compatibility + fh = io.BytesIO() + # G.nodes[0]["viz"]["color"] does not have an alpha value explicitly defined + # so the default is used instead + nx.write_gexf(G, fh, version="1.2draft") + fh.seek(0) + H = nx.read_gexf(fh, node_type=int) + assert H.nodes[0]["viz"]["color"]["a"] == 1.0 + + # Second graph for the other branch + G = nx.Graph() + G.add_node(0, label="1", color="green") + G.nodes[0]["viz"] = {"size": 54} + G.nodes[0]["viz"]["position"] = {"x": 0, "y": 1, "z": 0} + G.nodes[0]["viz"]["color"] = {"r": 0, "g": 0, "b": 256, "a": 0.5} + G.nodes[0]["viz"]["shape"] = "ftp://random.url" + G.nodes[0]["viz"]["thickness"] = 2 + fh = io.BytesIO() + nx.write_gexf(G, fh) + fh.seek(0) + H = nx.read_gexf(fh, node_type=int) + assert sorted(G.nodes()) == sorted(H.nodes()) + assert sorted(sorted(e) for e in G.edges()) == sorted( + sorted(e) for e in H.edges() + ) + + def test_slice_and_spell(self): + # Test spell first, so version = 1.2 + G = nx.Graph() + G.add_node(0, label="1", color="green") + G.nodes[0]["spells"] = [(1, 2)] + fh = io.BytesIO() + nx.write_gexf(G, fh) + fh.seek(0) + H = nx.read_gexf(fh, node_type=int) + assert sorted(G.nodes()) == sorted(H.nodes()) + assert sorted(sorted(e) for e in G.edges()) == sorted( + sorted(e) for e in H.edges() + ) + + G = nx.Graph() + G.add_node(0, label="1", color="green") + G.nodes[0]["slices"] = [(1, 2)] + fh = io.BytesIO() + nx.write_gexf(G, fh, version="1.1draft") + fh.seek(0) + H = nx.read_gexf(fh, node_type=int) + assert sorted(G.nodes()) == sorted(H.nodes()) + assert sorted(sorted(e) for e in G.edges()) == sorted( + sorted(e) for e in H.edges() + ) + + def test_add_parent(self): + G = nx.Graph() + G.add_node(0, label="1", color="green", parents=[1, 2]) + fh = io.BytesIO() + nx.write_gexf(G, fh) + fh.seek(0) + H = nx.read_gexf(fh, node_type=int) + assert sorted(G.nodes()) == sorted(H.nodes()) + assert sorted(sorted(e) for e in G.edges()) == sorted( + sorted(e) for e in H.edges() + ) diff --git a/.venv/lib/python3.12/site-packages/networkx/readwrite/tests/test_gml.py b/.venv/lib/python3.12/site-packages/networkx/readwrite/tests/test_gml.py new file mode 100644 index 0000000..df25097 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/readwrite/tests/test_gml.py @@ -0,0 +1,744 @@ +import codecs +import io +import math +from ast import literal_eval +from contextlib import contextmanager +from textwrap import dedent + +import pytest + +import networkx as nx +from networkx.readwrite.gml import literal_destringizer, literal_stringizer + + +class TestGraph: + @classmethod + def setup_class(cls): + cls.simple_data = """Creator "me" +Version "xx" +graph [ + comment "This is a sample graph" + directed 1 + IsPlanar 1 + pos [ x 0 y 1 ] + node [ + id 1 + label "Node 1" + pos [ x 1 y 1 ] + ] + node [ + id 2 + pos [ x 1 y 2 ] + label "Node 2" + ] + node [ + id 3 + label "Node 3" + pos [ x 1 y 3 ] + ] + edge [ + source 1 + target 2 + label "Edge from node 1 to node 2" + color [line "blue" thickness 3] + + ] + edge [ + source 2 + target 3 + label "Edge from node 2 to node 3" + ] + edge [ + source 3 + target 1 + label "Edge from node 3 to node 1" + ] +] +""" + + def test_parse_gml_cytoscape_bug(self): + # example from issue #321, originally #324 in trac + cytoscape_example = """ +Creator "Cytoscape" +Version 1.0 +graph [ + node [ + root_index -3 + id -3 + graphics [ + x -96.0 + y -67.0 + w 40.0 + h 40.0 + fill "#ff9999" + type "ellipse" + outline "#666666" + outline_width 1.5 + ] + label "node2" + ] + node [ + root_index -2 + id -2 + graphics [ + x 63.0 + y 37.0 + w 40.0 + h 40.0 + fill "#ff9999" + type "ellipse" + outline "#666666" + outline_width 1.5 + ] + label "node1" + ] + node [ + root_index -1 + id -1 + graphics [ + x -31.0 + y -17.0 + w 40.0 + h 40.0 + fill "#ff9999" + type "ellipse" + outline "#666666" + outline_width 1.5 + ] + label "node0" + ] + edge [ + root_index -2 + target -2 + source -1 + graphics [ + width 1.5 + fill "#0000ff" + type "line" + Line [ + ] + source_arrow 0 + target_arrow 3 + ] + label "DirectedEdge" + ] + edge [ + root_index -1 + target -1 + source -3 + graphics [ + width 1.5 + fill "#0000ff" + type "line" + Line [ + ] + source_arrow 0 + target_arrow 3 + ] + label "DirectedEdge" + ] +] +""" + nx.parse_gml(cytoscape_example) + + def test_parse_gml(self): + G = nx.parse_gml(self.simple_data, label="label") + assert sorted(G.nodes()) == ["Node 1", "Node 2", "Node 3"] + assert sorted(G.edges()) == [ + ("Node 1", "Node 2"), + ("Node 2", "Node 3"), + ("Node 3", "Node 1"), + ] + + assert sorted(G.edges(data=True)) == [ + ( + "Node 1", + "Node 2", + { + "color": {"line": "blue", "thickness": 3}, + "label": "Edge from node 1 to node 2", + }, + ), + ("Node 2", "Node 3", {"label": "Edge from node 2 to node 3"}), + ("Node 3", "Node 1", {"label": "Edge from node 3 to node 1"}), + ] + + def test_read_gml(self, tmp_path): + fname = tmp_path / "test.gml" + with open(fname, "w") as fh: + fh.write(self.simple_data) + Gin = nx.read_gml(fname, label="label") + G = nx.parse_gml(self.simple_data, label="label") + assert sorted(G.nodes(data=True)) == sorted(Gin.nodes(data=True)) + assert sorted(G.edges(data=True)) == sorted(Gin.edges(data=True)) + + def test_labels_are_strings(self): + # GML requires labels to be strings (i.e., in quotes) + answer = """graph [ + node [ + id 0 + label "1203" + ] +]""" + G = nx.Graph() + G.add_node(1203) + data = "\n".join(nx.generate_gml(G, stringizer=literal_stringizer)) + assert data == answer + + def test_relabel_duplicate(self): + data = """ +graph +[ + label "" + directed 1 + node + [ + id 0 + label "same" + ] + node + [ + id 1 + label "same" + ] +] +""" + fh = io.BytesIO(data.encode("UTF-8")) + fh.seek(0) + pytest.raises(nx.NetworkXError, nx.read_gml, fh, label="label") + + @pytest.mark.parametrize("stringizer", (None, literal_stringizer)) + def test_tuplelabels(self, stringizer): + # https://github.com/networkx/networkx/pull/1048 + # Writing tuple labels to GML failed. + G = nx.Graph() + G.add_edge((0, 1), (1, 0)) + data = "\n".join(nx.generate_gml(G, stringizer=stringizer)) + answer = """graph [ + node [ + id 0 + label "(0,1)" + ] + node [ + id 1 + label "(1,0)" + ] + edge [ + source 0 + target 1 + ] +]""" + assert data == answer + + def test_quotes(self, tmp_path): + # https://github.com/networkx/networkx/issues/1061 + # Encoding quotes as HTML entities. + G = nx.path_graph(1) + G.name = "path_graph(1)" + attr = 'This is "quoted" and this is a copyright: ' + chr(169) + G.nodes[0]["demo"] = attr + with open(tmp_path / "test.gml", "w+b") as fobj: + nx.write_gml(G, fobj) + fobj.seek(0) + # Should be bytes in 2.x and 3.x + data = fobj.read().strip().decode("ascii") + answer = """graph [ + name "path_graph(1)" + node [ + id 0 + label "0" + demo "This is "quoted" and this is a copyright: ©" + ] +]""" + assert data == answer + + def test_unicode_node(self, tmp_path): + node = "node" + chr(169) + G = nx.Graph() + G.add_node(node) + with open(tmp_path / "test.gml", "w+b") as fobj: + nx.write_gml(G, fobj) + fobj.seek(0) + # Should be bytes in 2.x and 3.x + data = fobj.read().strip().decode("ascii") + answer = """graph [ + node [ + id 0 + label "node©" + ] +]""" + assert data == answer + + def test_float_label(self, tmp_path): + node = 1.0 + G = nx.Graph() + G.add_node(node) + with open(tmp_path / "test.gml", "w+b") as fobj: + nx.write_gml(G, fobj) + fobj.seek(0) + # Should be bytes in 2.x and 3.x + data = fobj.read().strip().decode("ascii") + answer = """graph [ + node [ + id 0 + label "1.0" + ] +]""" + assert data == answer + + def test_special_float_label(self, tmp_path): + special_floats = [float("nan"), float("+inf"), float("-inf")] + try: + import numpy as np + + special_floats += [np.nan, np.inf, np.inf * -1] + except ImportError: + special_floats += special_floats + + G = nx.cycle_graph(len(special_floats)) + attrs = dict(enumerate(special_floats)) + nx.set_node_attributes(G, attrs, "nodefloat") + edges = list(G.edges) + attrs = {edges[i]: value for i, value in enumerate(special_floats)} + nx.set_edge_attributes(G, attrs, "edgefloat") + + with open(tmp_path / "test.gml", "w+b") as fobj: + nx.write_gml(G, fobj) + fobj.seek(0) + # Should be bytes in 2.x and 3.x + data = fobj.read().strip().decode("ascii") + answer = """graph [ + node [ + id 0 + label "0" + nodefloat NAN + ] + node [ + id 1 + label "1" + nodefloat +INF + ] + node [ + id 2 + label "2" + nodefloat -INF + ] + node [ + id 3 + label "3" + nodefloat NAN + ] + node [ + id 4 + label "4" + nodefloat +INF + ] + node [ + id 5 + label "5" + nodefloat -INF + ] + edge [ + source 0 + target 1 + edgefloat NAN + ] + edge [ + source 0 + target 5 + edgefloat +INF + ] + edge [ + source 1 + target 2 + edgefloat -INF + ] + edge [ + source 2 + target 3 + edgefloat NAN + ] + edge [ + source 3 + target 4 + edgefloat +INF + ] + edge [ + source 4 + target 5 + edgefloat -INF + ] +]""" + assert data == answer + + fobj.seek(0) + graph = nx.read_gml(fobj) + for indx, value in enumerate(special_floats): + node_value = graph.nodes[str(indx)]["nodefloat"] + if math.isnan(value): + assert math.isnan(node_value) + else: + assert node_value == value + + edge = edges[indx] + string_edge = (str(edge[0]), str(edge[1])) + edge_value = graph.edges[string_edge]["edgefloat"] + if math.isnan(value): + assert math.isnan(edge_value) + else: + assert edge_value == value + + def test_name(self): + G = nx.parse_gml('graph [ name "x" node [ id 0 label "x" ] ]') + assert "x" == G.graph["name"] + G = nx.parse_gml('graph [ node [ id 0 label "x" ] ]') + assert "" == G.name + assert "name" not in G.graph + + def test_graph_types(self): + for directed in [None, False, True]: + for multigraph in [None, False, True]: + gml = "graph [" + if directed is not None: + gml += " directed " + str(int(directed)) + if multigraph is not None: + gml += " multigraph " + str(int(multigraph)) + gml += ' node [ id 0 label "0" ]' + gml += " edge [ source 0 target 0 ]" + gml += " ]" + G = nx.parse_gml(gml) + assert bool(directed) == G.is_directed() + assert bool(multigraph) == G.is_multigraph() + gml = "graph [\n" + if directed is True: + gml += " directed 1\n" + if multigraph is True: + gml += " multigraph 1\n" + gml += """ node [ + id 0 + label "0" + ] + edge [ + source 0 + target 0 +""" + if multigraph: + gml += " key 0\n" + gml += " ]\n]" + assert gml == "\n".join(nx.generate_gml(G)) + + def test_data_types(self): + data = [ + True, + False, + 10**20, + -2e33, + "'", + '"&&&""', + [{(b"\xfd",): "\x7f", chr(0x4444): (1, 2)}, (2, "3")], + ] + data.append(chr(0x14444)) + data.append(literal_eval("{2.3j, 1 - 2.3j, ()}")) + G = nx.Graph() + G.name = data + G.graph["data"] = data + G.add_node(0, int=-1, data={"data": data}) + G.add_edge(0, 0, float=-2.5, data=data) + gml = "\n".join(nx.generate_gml(G, stringizer=literal_stringizer)) + G = nx.parse_gml(gml, destringizer=literal_destringizer) + assert data == G.name + assert {"name": data, "data": data} == G.graph + assert list(G.nodes(data=True)) == [(0, {"int": -1, "data": {"data": data}})] + assert list(G.edges(data=True)) == [(0, 0, {"float": -2.5, "data": data})] + G = nx.Graph() + G.graph["data"] = "frozenset([1, 2, 3])" + G = nx.parse_gml(nx.generate_gml(G), destringizer=literal_eval) + assert G.graph["data"] == "frozenset([1, 2, 3])" + + def test_escape_unescape(self): + gml = """graph [ + name "&"䑄��&unknown;" +]""" + G = nx.parse_gml(gml) + assert ( + '&"\x0f' + chr(0x4444) + "��&unknown;" + == G.name + ) + gml = "\n".join(nx.generate_gml(G)) + alnu = "#1234567890;&#x1234567890abcdef" + answer = ( + """graph [ + name "&"䑄&""" + + alnu + + """;&unknown;" +]""" + ) + assert answer == gml + + def test_exceptions(self, tmp_path): + pytest.raises(ValueError, literal_destringizer, "(") + pytest.raises(ValueError, literal_destringizer, "frozenset([1, 2, 3])") + pytest.raises(ValueError, literal_destringizer, literal_destringizer) + pytest.raises(ValueError, literal_stringizer, frozenset([1, 2, 3])) + pytest.raises(ValueError, literal_stringizer, literal_stringizer) + with open(tmp_path / "test.gml", "w+b") as f: + f.write(codecs.BOM_UTF8 + b"graph[]") + f.seek(0) + pytest.raises(nx.NetworkXError, nx.read_gml, f) + + def assert_parse_error(gml): + pytest.raises(nx.NetworkXError, nx.parse_gml, gml) + + assert_parse_error(["graph [\n\n", "]"]) + assert_parse_error("") + assert_parse_error('Creator ""') + assert_parse_error("0") + assert_parse_error("graph ]") + assert_parse_error("graph [ 1 ]") + assert_parse_error("graph [ 1.E+2 ]") + assert_parse_error('graph [ "A" ]') + assert_parse_error("graph [ ] graph ]") + assert_parse_error("graph [ ] graph [ ]") + assert_parse_error("graph [ data [1, 2, 3] ]") + assert_parse_error("graph [ node [ ] ]") + assert_parse_error("graph [ node [ id 0 ] ]") + nx.parse_gml('graph [ node [ id "a" ] ]', label="id") + assert_parse_error("graph [ node [ id 0 label 0 ] node [ id 0 label 1 ] ]") + assert_parse_error("graph [ node [ id 0 label 0 ] node [ id 1 label 0 ] ]") + assert_parse_error("graph [ node [ id 0 label 0 ] edge [ ] ]") + assert_parse_error("graph [ node [ id 0 label 0 ] edge [ source 0 ] ]") + nx.parse_gml("graph [edge [ source 0 target 0 ] node [ id 0 label 0 ] ]") + assert_parse_error("graph [ node [ id 0 label 0 ] edge [ source 1 target 0 ] ]") + assert_parse_error("graph [ node [ id 0 label 0 ] edge [ source 0 target 1 ] ]") + assert_parse_error( + "graph [ node [ id 0 label 0 ] node [ id 1 label 1 ] " + "edge [ source 0 target 1 ] edge [ source 1 target 0 ] ]" + ) + nx.parse_gml( + "graph [ node [ id 0 label 0 ] node [ id 1 label 1 ] " + "edge [ source 0 target 1 ] edge [ source 1 target 0 ] " + "directed 1 ]" + ) + nx.parse_gml( + "graph [ node [ id 0 label 0 ] node [ id 1 label 1 ] " + "edge [ source 0 target 1 ] edge [ source 0 target 1 ]" + "multigraph 1 ]" + ) + nx.parse_gml( + "graph [ node [ id 0 label 0 ] node [ id 1 label 1 ] " + "edge [ source 0 target 1 key 0 ] edge [ source 0 target 1 ]" + "multigraph 1 ]" + ) + assert_parse_error( + "graph [ node [ id 0 label 0 ] node [ id 1 label 1 ] " + "edge [ source 0 target 1 key 0 ] edge [ source 0 target 1 key 0 ]" + "multigraph 1 ]" + ) + nx.parse_gml( + "graph [ node [ id 0 label 0 ] node [ id 1 label 1 ] " + "edge [ source 0 target 1 key 0 ] edge [ source 1 target 0 key 0 ]" + "directed 1 multigraph 1 ]" + ) + + # Tests for string convertible alphanumeric id and label values + nx.parse_gml("graph [edge [ source a target a ] node [ id a label b ] ]") + nx.parse_gml( + "graph [ node [ id n42 label 0 ] node [ id x43 label 1 ]" + "edge [ source n42 target x43 key 0 ]" + "edge [ source x43 target n42 key 0 ]" + "directed 1 multigraph 1 ]" + ) + assert_parse_error( + "graph [edge [ source '\u4200' target '\u4200' ] " + + "node [ id '\u4200' label b ] ]" + ) + + def assert_generate_error(*args, **kwargs): + pytest.raises( + nx.NetworkXError, lambda: list(nx.generate_gml(*args, **kwargs)) + ) + + G = nx.Graph() + G.graph[3] = 3 + assert_generate_error(G) + G = nx.Graph() + G.graph["3"] = 3 + assert_generate_error(G) + G = nx.Graph() + G.graph["data"] = frozenset([1, 2, 3]) + assert_generate_error(G, stringizer=literal_stringizer) + + def test_label_kwarg(self): + G = nx.parse_gml(self.simple_data, label="id") + assert sorted(G.nodes) == [1, 2, 3] + labels = [G.nodes[n]["label"] for n in sorted(G.nodes)] + assert labels == ["Node 1", "Node 2", "Node 3"] + + G = nx.parse_gml(self.simple_data, label=None) + assert sorted(G.nodes) == [1, 2, 3] + labels = [G.nodes[n]["label"] for n in sorted(G.nodes)] + assert labels == ["Node 1", "Node 2", "Node 3"] + + def test_outofrange_integers(self, tmp_path): + # GML restricts integers to 32 signed bits. + # Check that we honor this restriction on export + G = nx.Graph() + # Test export for numbers that barely fit or don't fit into 32 bits, + # and 3 numbers in the middle + numbers = { + "toosmall": (-(2**31)) - 1, + "small": -(2**31), + "med1": -4, + "med2": 0, + "med3": 17, + "big": (2**31) - 1, + "toobig": 2**31, + } + G.add_node("Node", **numbers) + + fname = tmp_path / "test.gml" + nx.write_gml(G, fname) + # Check that the export wrote the nonfitting numbers as strings + G2 = nx.read_gml(fname) + for attr, value in G2.nodes["Node"].items(): + if attr == "toosmall" or attr == "toobig": + assert isinstance(value, str) + else: + assert isinstance(value, int) + + def test_multiline(self): + # example from issue #6836 + multiline_example = """ +graph +[ + node + [ + id 0 + label "multiline node" + label2 "multiline1 + multiline2 + multiline3" + alt_name "id 0" + ] +] +""" + G = nx.parse_gml(multiline_example) + assert G.nodes["multiline node"] == { + "label2": "multiline1 multiline2 multiline3", + "alt_name": "id 0", + } + + +@contextmanager +def byte_file(): + _file_handle = io.BytesIO() + yield _file_handle + _file_handle.seek(0) + + +class TestPropertyLists: + def test_writing_graph_with_multi_element_property_list(self): + g = nx.Graph() + g.add_node("n1", properties=["element", 0, 1, 2.5, True, False]) + with byte_file() as f: + nx.write_gml(g, f) + result = f.read().decode() + + assert result == dedent( + """\ + graph [ + node [ + id 0 + label "n1" + properties "element" + properties 0 + properties 1 + properties 2.5 + properties 1 + properties 0 + ] + ] + """ + ) + + def test_writing_graph_with_one_element_property_list(self): + g = nx.Graph() + g.add_node("n1", properties=["element"]) + with byte_file() as f: + nx.write_gml(g, f) + result = f.read().decode() + + assert result == dedent( + """\ + graph [ + node [ + id 0 + label "n1" + properties "_networkx_list_start" + properties "element" + ] + ] + """ + ) + + def test_reading_graph_with_list_property(self): + with byte_file() as f: + f.write( + dedent( + """ + graph [ + node [ + id 0 + label "n1" + properties "element" + properties 0 + properties 1 + properties 2.5 + ] + ] + """ + ).encode("ascii") + ) + f.seek(0) + graph = nx.read_gml(f) + assert graph.nodes(data=True)["n1"] == {"properties": ["element", 0, 1, 2.5]} + + def test_reading_graph_with_single_element_list_property(self): + with byte_file() as f: + f.write( + dedent( + """ + graph [ + node [ + id 0 + label "n1" + properties "_networkx_list_start" + properties "element" + ] + ] + """ + ).encode("ascii") + ) + f.seek(0) + graph = nx.read_gml(f) + assert graph.nodes(data=True)["n1"] == {"properties": ["element"]} + + +@pytest.mark.parametrize("coll", ([], ())) +def test_stringize_empty_list_tuple(coll): + G = nx.path_graph(2) + G.nodes[0]["test"] = coll # test serializing an empty collection + f = io.BytesIO() + nx.write_gml(G, f) # Smoke test - should not raise + f.seek(0) + H = nx.read_gml(f) + assert H.nodes["0"]["test"] == coll # Check empty list round-trips properly + # Check full round-tripping. Note that nodes are loaded as strings by + # default, so there needs to be some remapping prior to comparison + H = nx.relabel_nodes(H, {"0": 0, "1": 1}) + assert nx.utils.graphs_equal(G, H) + # Same as above, but use destringizer for node remapping. Should have no + # effect on node attr + f.seek(0) + H = nx.read_gml(f, destringizer=int) + assert nx.utils.graphs_equal(G, H) diff --git a/.venv/lib/python3.12/site-packages/networkx/readwrite/tests/test_graph6.py b/.venv/lib/python3.12/site-packages/networkx/readwrite/tests/test_graph6.py new file mode 100644 index 0000000..d680c21 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/readwrite/tests/test_graph6.py @@ -0,0 +1,181 @@ +from io import BytesIO + +import pytest + +import networkx as nx +import networkx.readwrite.graph6 as g6 +from networkx.utils import edges_equal, nodes_equal + + +def test_from_graph6_invariant_to_trailing_newline(): + """See gh-7557""" + G = nx.from_graph6_bytes(b">>graph6<>graph6<>graph6<<\nP~~~~~~~~~~~~~~~~~~~~~~{") + + +class TestGraph6Utils: + def test_n_data_n_conversion(self): + for i in [0, 1, 42, 62, 63, 64, 258047, 258048, 7744773, 68719476735]: + assert g6.data_to_n(g6.n_to_data(i))[0] == i + assert g6.data_to_n(g6.n_to_data(i))[1] == [] + assert g6.data_to_n(g6.n_to_data(i) + [42, 43])[1] == [42, 43] + + +class TestFromGraph6Bytes: + def test_from_graph6_bytes(self): + data = b"DF{" + G = nx.from_graph6_bytes(data) + assert nodes_equal(G.nodes(), [0, 1, 2, 3, 4]) + assert edges_equal( + G.edges(), [(0, 3), (0, 4), (1, 3), (1, 4), (2, 3), (2, 4), (3, 4)] + ) + + def test_read_equals_from_bytes(self): + data = b"DF{" + G = nx.from_graph6_bytes(data) + fh = BytesIO(data) + Gin = nx.read_graph6(fh) + assert nodes_equal(G.nodes(), Gin.nodes()) + assert edges_equal(G.edges(), Gin.edges()) + + +class TestReadGraph6: + def test_read_many_graph6(self): + """Test for reading many graphs from a file into a list.""" + data = b"DF{\nD`{\nDqK\nD~{\n" + fh = BytesIO(data) + glist = nx.read_graph6(fh) + assert len(glist) == 4 + for G in glist: + assert sorted(G) == list(range(5)) + + +class TestWriteGraph6: + """Unit tests for writing a graph to a file in graph6 format.""" + + def test_null_graph(self): + result = BytesIO() + nx.write_graph6(nx.null_graph(), result) + assert result.getvalue() == b">>graph6<>graph6<<@\n" + + def test_complete_graph(self): + result = BytesIO() + nx.write_graph6(nx.complete_graph(4), result) + assert result.getvalue() == b">>graph6<>graph6<>graph6<>graph6<>graph6<<@\n" + + def test_complete_graph(self): + assert g6.to_graph6_bytes(nx.complete_graph(4)) == b">>graph6<>graph6< + + + + + + + + + + + + + + + + + + + + + + + + + + +""" + cls.simple_directed_graph = nx.DiGraph() + cls.simple_directed_graph.add_node("n10") + cls.simple_directed_graph.add_edge("n0", "n2", id="foo") + cls.simple_directed_graph.add_edge("n0", "n2") + cls.simple_directed_graph.add_edges_from( + [ + ("n1", "n2"), + ("n2", "n3"), + ("n3", "n5"), + ("n3", "n4"), + ("n4", "n6"), + ("n6", "n5"), + ("n5", "n7"), + ("n6", "n8"), + ("n8", "n7"), + ("n8", "n9"), + ] + ) + cls.simple_directed_fh = io.BytesIO(cls.simple_directed_data.encode("UTF-8")) + + cls.attribute_data = """ + + + yellow + + + + + green + + + + blue + + + red + + + + turquoise + + + 1.0 + + + 1.0 + + + 2.0 + + + + + + 1.1 + + + +""" + cls.attribute_graph = nx.DiGraph(id="G") + cls.attribute_graph.graph["node_default"] = {"color": "yellow"} + cls.attribute_graph.add_node("n0", color="green") + cls.attribute_graph.add_node("n2", color="blue") + cls.attribute_graph.add_node("n3", color="red") + cls.attribute_graph.add_node("n4") + cls.attribute_graph.add_node("n5", color="turquoise") + cls.attribute_graph.add_edge("n0", "n2", id="e0", weight=1.0) + cls.attribute_graph.add_edge("n0", "n1", id="e1", weight=1.0) + cls.attribute_graph.add_edge("n1", "n3", id="e2", weight=2.0) + cls.attribute_graph.add_edge("n3", "n2", id="e3") + cls.attribute_graph.add_edge("n2", "n4", id="e4") + cls.attribute_graph.add_edge("n3", "n5", id="e5") + cls.attribute_graph.add_edge("n5", "n4", id="e6", weight=1.1) + cls.attribute_fh = io.BytesIO(cls.attribute_data.encode("UTF-8")) + + cls.node_attribute_default_data = """ + + false + 0 + 0 + 0.0 + 0.0 + Foo + + + + + + + """ + cls.node_attribute_default_graph = nx.DiGraph(id="G") + cls.node_attribute_default_graph.graph["node_default"] = { + "boolean_attribute": False, + "int_attribute": 0, + "long_attribute": 0, + "float_attribute": 0.0, + "double_attribute": 0.0, + "string_attribute": "Foo", + } + cls.node_attribute_default_graph.add_node("n0") + cls.node_attribute_default_graph.add_node("n1") + cls.node_attribute_default_graph.add_edge("n0", "n1", id="e0") + cls.node_attribute_default_fh = io.BytesIO( + cls.node_attribute_default_data.encode("UTF-8") + ) + + cls.attribute_named_key_ids_data = """ + + + + + + + val1 + val2 + + + val_one + val2 + + + edge_value + + + +""" + cls.attribute_named_key_ids_graph = nx.DiGraph() + cls.attribute_named_key_ids_graph.add_node("0", prop1="val1", prop2="val2") + cls.attribute_named_key_ids_graph.add_node("1", prop1="val_one", prop2="val2") + cls.attribute_named_key_ids_graph.add_edge("0", "1", edge_prop="edge_value") + fh = io.BytesIO(cls.attribute_named_key_ids_data.encode("UTF-8")) + cls.attribute_named_key_ids_fh = fh + + cls.attribute_numeric_type_data = """ + + + + + + 1 + + + 2.0 + + + 1 + + + k + + + 1.0 + + + +""" + cls.attribute_numeric_type_graph = nx.DiGraph() + cls.attribute_numeric_type_graph.add_node("n0", weight=1) + cls.attribute_numeric_type_graph.add_node("n1", weight=2.0) + cls.attribute_numeric_type_graph.add_edge("n0", "n1", weight=1) + cls.attribute_numeric_type_graph.add_edge("n1", "n1", weight=1.0) + fh = io.BytesIO(cls.attribute_numeric_type_data.encode("UTF-8")) + cls.attribute_numeric_type_fh = fh + + cls.simple_undirected_data = """ + + + + + + + + + + +""" + # + cls.simple_undirected_graph = nx.Graph() + cls.simple_undirected_graph.add_node("n10") + cls.simple_undirected_graph.add_edge("n0", "n2", id="foo") + cls.simple_undirected_graph.add_edges_from([("n1", "n2"), ("n2", "n3")]) + fh = io.BytesIO(cls.simple_undirected_data.encode("UTF-8")) + cls.simple_undirected_fh = fh + + cls.undirected_multigraph_data = """ + + + + + + + + + + +""" + cls.undirected_multigraph = nx.MultiGraph() + cls.undirected_multigraph.add_node("n10") + cls.undirected_multigraph.add_edge("n0", "n2", id="e0") + cls.undirected_multigraph.add_edge("n1", "n2", id="e1") + cls.undirected_multigraph.add_edge("n2", "n1", id="e2") + fh = io.BytesIO(cls.undirected_multigraph_data.encode("UTF-8")) + cls.undirected_multigraph_fh = fh + + cls.undirected_multigraph_no_multiedge_data = """ + + + + + + + + + + +""" + cls.undirected_multigraph_no_multiedge = nx.MultiGraph() + cls.undirected_multigraph_no_multiedge.add_node("n10") + cls.undirected_multigraph_no_multiedge.add_edge("n0", "n2", id="e0") + cls.undirected_multigraph_no_multiedge.add_edge("n1", "n2", id="e1") + cls.undirected_multigraph_no_multiedge.add_edge("n2", "n3", id="e2") + fh = io.BytesIO(cls.undirected_multigraph_no_multiedge_data.encode("UTF-8")) + cls.undirected_multigraph_no_multiedge_fh = fh + + cls.multigraph_only_ids_for_multiedges_data = """ + + + + + + + + + + +""" + cls.multigraph_only_ids_for_multiedges = nx.MultiGraph() + cls.multigraph_only_ids_for_multiedges.add_node("n10") + cls.multigraph_only_ids_for_multiedges.add_edge("n0", "n2") + cls.multigraph_only_ids_for_multiedges.add_edge("n1", "n2", id="e1") + cls.multigraph_only_ids_for_multiedges.add_edge("n2", "n1", id="e2") + fh = io.BytesIO(cls.multigraph_only_ids_for_multiedges_data.encode("UTF-8")) + cls.multigraph_only_ids_for_multiedges_fh = fh + + +class TestReadGraphML(BaseGraphML): + def test_read_simple_directed_graphml(self): + G = self.simple_directed_graph + H = nx.read_graphml(self.simple_directed_fh) + assert sorted(G.nodes()) == sorted(H.nodes()) + assert sorted(G.edges()) == sorted(H.edges()) + assert sorted(G.edges(data=True)) == sorted(H.edges(data=True)) + self.simple_directed_fh.seek(0) + + PG = nx.parse_graphml(self.simple_directed_data) + assert sorted(G.nodes()) == sorted(PG.nodes()) + assert sorted(G.edges()) == sorted(PG.edges()) + assert sorted(G.edges(data=True)) == sorted(PG.edges(data=True)) + + def test_read_simple_undirected_graphml(self): + G = self.simple_undirected_graph + H = nx.read_graphml(self.simple_undirected_fh) + assert nodes_equal(G.nodes(), H.nodes()) + assert edges_equal(G.edges(), H.edges()) + self.simple_undirected_fh.seek(0) + + PG = nx.parse_graphml(self.simple_undirected_data) + assert nodes_equal(G.nodes(), PG.nodes()) + assert edges_equal(G.edges(), PG.edges()) + + def test_read_undirected_multigraph_graphml(self): + G = self.undirected_multigraph + H = nx.read_graphml(self.undirected_multigraph_fh) + assert nodes_equal(G.nodes(), H.nodes()) + assert edges_equal(G.edges(), H.edges()) + self.undirected_multigraph_fh.seek(0) + + PG = nx.parse_graphml(self.undirected_multigraph_data) + assert nodes_equal(G.nodes(), PG.nodes()) + assert edges_equal(G.edges(), PG.edges()) + + def test_read_undirected_multigraph_no_multiedge_graphml(self): + G = self.undirected_multigraph_no_multiedge + H = nx.read_graphml(self.undirected_multigraph_no_multiedge_fh) + assert nodes_equal(G.nodes(), H.nodes()) + assert edges_equal(G.edges(), H.edges()) + self.undirected_multigraph_no_multiedge_fh.seek(0) + + PG = nx.parse_graphml(self.undirected_multigraph_no_multiedge_data) + assert nodes_equal(G.nodes(), PG.nodes()) + assert edges_equal(G.edges(), PG.edges()) + + def test_read_undirected_multigraph_only_ids_for_multiedges_graphml(self): + G = self.multigraph_only_ids_for_multiedges + H = nx.read_graphml(self.multigraph_only_ids_for_multiedges_fh) + assert nodes_equal(G.nodes(), H.nodes()) + assert edges_equal(G.edges(), H.edges()) + self.multigraph_only_ids_for_multiedges_fh.seek(0) + + PG = nx.parse_graphml(self.multigraph_only_ids_for_multiedges_data) + assert nodes_equal(G.nodes(), PG.nodes()) + assert edges_equal(G.edges(), PG.edges()) + + def test_read_attribute_graphml(self): + G = self.attribute_graph + H = nx.read_graphml(self.attribute_fh) + assert nodes_equal(G.nodes(True), sorted(H.nodes(data=True))) + ge = sorted(G.edges(data=True)) + he = sorted(H.edges(data=True)) + for a, b in zip(ge, he): + assert a == b + self.attribute_fh.seek(0) + + PG = nx.parse_graphml(self.attribute_data) + assert sorted(G.nodes(True)) == sorted(PG.nodes(data=True)) + ge = sorted(G.edges(data=True)) + he = sorted(PG.edges(data=True)) + for a, b in zip(ge, he): + assert a == b + + def test_node_default_attribute_graphml(self): + G = self.node_attribute_default_graph + H = nx.read_graphml(self.node_attribute_default_fh) + assert G.graph["node_default"] == H.graph["node_default"] + + def test_directed_edge_in_undirected(self): + s = """ + + + + + + + + +""" + fh = io.BytesIO(s.encode("UTF-8")) + pytest.raises(nx.NetworkXError, nx.read_graphml, fh) + pytest.raises(nx.NetworkXError, nx.parse_graphml, s) + + def test_undirected_edge_in_directed(self): + s = """ + + + + + + + + +""" + fh = io.BytesIO(s.encode("UTF-8")) + pytest.raises(nx.NetworkXError, nx.read_graphml, fh) + pytest.raises(nx.NetworkXError, nx.parse_graphml, s) + + def test_key_raise(self): + s = """ + + + yellow + + + + + green + + + + blue + + + 1.0 + + + +""" + fh = io.BytesIO(s.encode("UTF-8")) + pytest.raises(nx.NetworkXError, nx.read_graphml, fh) + pytest.raises(nx.NetworkXError, nx.parse_graphml, s) + + def test_hyperedge_raise(self): + s = """ + + + yellow + + + + + green + + + + blue + + + + + + + + +""" + fh = io.BytesIO(s.encode("UTF-8")) + pytest.raises(nx.NetworkXError, nx.read_graphml, fh) + pytest.raises(nx.NetworkXError, nx.parse_graphml, s) + + def test_multigraph_keys(self): + # Test that reading multigraphs uses edge id attributes as keys + s = """ + + + + + + + + +""" + fh = io.BytesIO(s.encode("UTF-8")) + G = nx.read_graphml(fh) + expected = [("n0", "n1", "e0"), ("n0", "n1", "e1")] + assert sorted(G.edges(keys=True)) == expected + fh.seek(0) + H = nx.parse_graphml(s) + assert sorted(H.edges(keys=True)) == expected + + def test_preserve_multi_edge_data(self): + """ + Test that data and keys of edges are preserved on consequent + write and reads + """ + G = nx.MultiGraph() + G.add_node(1) + G.add_node(2) + G.add_edges_from( + [ + # edges with no data, no keys: + (1, 2), + # edges with only data: + (1, 2, {"key": "data_key1"}), + (1, 2, {"id": "data_id2"}), + (1, 2, {"key": "data_key3", "id": "data_id3"}), + # edges with both data and keys: + (1, 2, 103, {"key": "data_key4"}), + (1, 2, 104, {"id": "data_id5"}), + (1, 2, 105, {"key": "data_key6", "id": "data_id7"}), + ] + ) + fh = io.BytesIO() + nx.write_graphml(G, fh) + fh.seek(0) + H = nx.read_graphml(fh, node_type=int) + assert edges_equal(G.edges(data=True, keys=True), H.edges(data=True, keys=True)) + assert G._adj == H._adj + + Gadj = { + str(node): { + str(nbr): {str(ekey): dd for ekey, dd in key_dict.items()} + for nbr, key_dict in nbr_dict.items() + } + for node, nbr_dict in G._adj.items() + } + fh.seek(0) + HH = nx.read_graphml(fh, node_type=str, edge_key_type=str) + assert Gadj == HH._adj + + fh.seek(0) + string_fh = fh.read() + HH = nx.parse_graphml(string_fh, node_type=str, edge_key_type=str) + assert Gadj == HH._adj + + def test_yfiles_extension(self): + data = """ + + + + + + + + + + + + + + + + + + + + 1 + + + + + + + + + + + 2 + + + + + + + + + + + + 3 + + + + + + + + + + + + + + + + + + + + +""" + fh = io.BytesIO(data.encode("UTF-8")) + G = nx.read_graphml(fh, force_multigraph=True) + assert list(G.edges()) == [("n0", "n1")] + assert G.has_edge("n0", "n1", key="e0") + assert G.nodes["n0"]["label"] == "1" + assert G.nodes["n1"]["label"] == "2" + assert G.nodes["n2"]["label"] == "3" + assert G.nodes["n0"]["shape_type"] == "rectangle" + assert G.nodes["n1"]["shape_type"] == "rectangle" + assert G.nodes["n2"]["shape_type"] == "com.yworks.flowchart.terminator" + assert G.nodes["n2"]["description"] == "description\nline1\nline2" + fh.seek(0) + G = nx.read_graphml(fh) + assert list(G.edges()) == [("n0", "n1")] + assert G["n0"]["n1"]["id"] == "e0" + assert G.nodes["n0"]["label"] == "1" + assert G.nodes["n1"]["label"] == "2" + assert G.nodes["n2"]["label"] == "3" + assert G.nodes["n0"]["shape_type"] == "rectangle" + assert G.nodes["n1"]["shape_type"] == "rectangle" + assert G.nodes["n2"]["shape_type"] == "com.yworks.flowchart.terminator" + assert G.nodes["n2"]["description"] == "description\nline1\nline2" + + H = nx.parse_graphml(data, force_multigraph=True) + assert list(H.edges()) == [("n0", "n1")] + assert H.has_edge("n0", "n1", key="e0") + assert H.nodes["n0"]["label"] == "1" + assert H.nodes["n1"]["label"] == "2" + assert H.nodes["n2"]["label"] == "3" + + H = nx.parse_graphml(data) + assert list(H.edges()) == [("n0", "n1")] + assert H["n0"]["n1"]["id"] == "e0" + assert H.nodes["n0"]["label"] == "1" + assert H.nodes["n1"]["label"] == "2" + assert H.nodes["n2"]["label"] == "3" + + def test_bool(self): + s = """ + + + false + + + + true + + + + false + + + FaLsE + + + True + + + 0 + + + 1 + + + +""" + fh = io.BytesIO(s.encode("UTF-8")) + G = nx.read_graphml(fh) + H = nx.parse_graphml(s) + for graph in [G, H]: + assert graph.nodes["n0"]["test"] + assert not graph.nodes["n2"]["test"] + assert not graph.nodes["n3"]["test"] + assert graph.nodes["n4"]["test"] + assert not graph.nodes["n5"]["test"] + assert graph.nodes["n6"]["test"] + + def test_graphml_header_line(self): + good = """ + + + false + + + + true + + + +""" + bad = """ + + + false + + + + true + + + +""" + ugly = """ + + + false + + + + true + + + +""" + for s in (good, bad): + fh = io.BytesIO(s.encode("UTF-8")) + G = nx.read_graphml(fh) + H = nx.parse_graphml(s) + for graph in [G, H]: + assert graph.nodes["n0"]["test"] + + fh = io.BytesIO(ugly.encode("UTF-8")) + pytest.raises(nx.NetworkXError, nx.read_graphml, fh) + pytest.raises(nx.NetworkXError, nx.parse_graphml, ugly) + + def test_read_attributes_with_groups(self): + data = """\ + + + + + + + + + + + + + + + + + + + + + + + + + + + + 2 + + + + + + + + + + + + + + + + + + + + + + Group 3 + + + + + + + + + + Folder 3 + + + + + + + + + + + + + + + + + + + + + Group 1 + + + + + + + + + + Folder 1 + + + + + + + + + + + + + + + + + + 1 + + + + + + + + + + + + + + + + + + + 3 + + + + + + + + + + + + + + + + + + + + + + + + Group 2 + + + + + + + + + + Folder 2 + + + + + + + + + + + + + + + + + + 5 + + + + + + + + + + + + + + + + + + + 6 + + + + + + + + + + + + + + + + + + + + + + + 9 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +""" + # verify that nodes / attributes are correctly read when part of a group + fh = io.BytesIO(data.encode("UTF-8")) + G = nx.read_graphml(fh) + data = [x for _, x in G.nodes(data=True)] + assert len(data) == 9 + for node_data in data: + assert node_data["CustomProperty"] != "" + + def test_long_attribute_type(self): + # test that graphs with attr.type="long" (as produced by botch and + # dose3) can be parsed + s = """ + + + + + 4284 + + +""" + fh = io.BytesIO(s.encode("UTF-8")) + G = nx.read_graphml(fh) + expected = [("n1", {"cudfversion": 4284})] + assert sorted(G.nodes(data=True)) == expected + fh.seek(0) + H = nx.parse_graphml(s) + assert sorted(H.nodes(data=True)) == expected + + +class TestWriteGraphML(BaseGraphML): + writer = staticmethod(nx.write_graphml_lxml) + + @classmethod + def setup_class(cls): + BaseGraphML.setup_class() + _ = pytest.importorskip("lxml.etree") + + def test_write_interface(self): + try: + import lxml.etree + + assert nx.write_graphml == nx.write_graphml_lxml + except ImportError: + assert nx.write_graphml == nx.write_graphml_xml + + def test_write_read_simple_directed_graphml(self): + G = self.simple_directed_graph + G.graph["hi"] = "there" + fh = io.BytesIO() + self.writer(G, fh) + fh.seek(0) + H = nx.read_graphml(fh) + assert sorted(G.nodes()) == sorted(H.nodes()) + assert sorted(G.edges()) == sorted(H.edges()) + assert sorted(G.edges(data=True)) == sorted(H.edges(data=True)) + self.simple_directed_fh.seek(0) + + def test_GraphMLWriter_add_graphs(self): + gmlw = GraphMLWriter() + G = self.simple_directed_graph + H = G.copy() + gmlw.add_graphs([G, H]) + + def test_write_read_simple_no_prettyprint(self): + G = self.simple_directed_graph + G.graph["hi"] = "there" + G.graph["id"] = "1" + fh = io.BytesIO() + self.writer(G, fh, prettyprint=False) + fh.seek(0) + H = nx.read_graphml(fh) + assert sorted(G.nodes()) == sorted(H.nodes()) + assert sorted(G.edges()) == sorted(H.edges()) + assert sorted(G.edges(data=True)) == sorted(H.edges(data=True)) + self.simple_directed_fh.seek(0) + + def test_write_read_attribute_named_key_ids_graphml(self): + from xml.etree.ElementTree import parse + + G = self.attribute_named_key_ids_graph + fh = io.BytesIO() + self.writer(G, fh, named_key_ids=True) + fh.seek(0) + H = nx.read_graphml(fh) + fh.seek(0) + + assert nodes_equal(G.nodes(), H.nodes()) + assert edges_equal(G.edges(), H.edges()) + assert edges_equal(G.edges(data=True), H.edges(data=True)) + self.attribute_named_key_ids_fh.seek(0) + + xml = parse(fh) + # Children are the key elements, and the graph element + children = list(xml.getroot()) + assert len(children) == 4 + + keys = [child.items() for child in children[:3]] + + assert len(keys) == 3 + assert ("id", "edge_prop") in keys[0] + assert ("attr.name", "edge_prop") in keys[0] + assert ("id", "prop2") in keys[1] + assert ("attr.name", "prop2") in keys[1] + assert ("id", "prop1") in keys[2] + assert ("attr.name", "prop1") in keys[2] + + # Confirm the read graph nodes/edge are identical when compared to + # default writing behavior. + default_behavior_fh = io.BytesIO() + nx.write_graphml(G, default_behavior_fh) + default_behavior_fh.seek(0) + H = nx.read_graphml(default_behavior_fh) + + named_key_ids_behavior_fh = io.BytesIO() + nx.write_graphml(G, named_key_ids_behavior_fh, named_key_ids=True) + named_key_ids_behavior_fh.seek(0) + J = nx.read_graphml(named_key_ids_behavior_fh) + + assert all(n1 == n2 for (n1, n2) in zip(H.nodes, J.nodes)) + assert all(e1 == e2 for (e1, e2) in zip(H.edges, J.edges)) + + def test_write_read_attribute_numeric_type_graphml(self): + from xml.etree.ElementTree import parse + + G = self.attribute_numeric_type_graph + fh = io.BytesIO() + self.writer(G, fh, infer_numeric_types=True) + fh.seek(0) + H = nx.read_graphml(fh) + fh.seek(0) + + assert nodes_equal(G.nodes(), H.nodes()) + assert edges_equal(G.edges(), H.edges()) + assert edges_equal(G.edges(data=True), H.edges(data=True)) + self.attribute_numeric_type_fh.seek(0) + + xml = parse(fh) + # Children are the key elements, and the graph element + children = list(xml.getroot()) + assert len(children) == 3 + + keys = [child.items() for child in children[:2]] + + assert len(keys) == 2 + assert ("attr.type", "double") in keys[0] + assert ("attr.type", "double") in keys[1] + + def test_more_multigraph_keys(self, tmp_path): + """Writing keys as edge id attributes means keys become strings. + The original keys are stored as data, so read them back in + if `str(key) == edge_id` + This allows the adjacency to remain the same. + """ + G = nx.MultiGraph() + G.add_edges_from([("a", "b", 2), ("a", "b", 3)]) + fname = tmp_path / "test.graphml" + self.writer(G, fname) + H = nx.read_graphml(fname) + assert H.is_multigraph() + assert edges_equal(G.edges(keys=True), H.edges(keys=True)) + assert G._adj == H._adj + + def test_default_attribute(self): + G = nx.Graph(name="Fred") + G.add_node(1, label=1, color="green") + nx.add_path(G, [0, 1, 2, 3]) + G.add_edge(1, 2, weight=3) + G.graph["node_default"] = {"color": "yellow"} + G.graph["edge_default"] = {"weight": 7} + fh = io.BytesIO() + self.writer(G, fh) + fh.seek(0) + H = nx.read_graphml(fh, node_type=int) + assert nodes_equal(G.nodes(), H.nodes()) + assert edges_equal(G.edges(), H.edges()) + assert G.graph == H.graph + + def test_mixed_type_attributes(self): + G = nx.MultiGraph() + G.add_node("n0", special=False) + G.add_node("n1", special=0) + G.add_edge("n0", "n1", special=False) + G.add_edge("n0", "n1", special=0) + fh = io.BytesIO() + self.writer(G, fh) + fh.seek(0) + H = nx.read_graphml(fh) + assert not H.nodes["n0"]["special"] + assert H.nodes["n1"]["special"] == 0 + assert not H.edges["n0", "n1", 0]["special"] + assert H.edges["n0", "n1", 1]["special"] == 0 + + def test_str_number_mixed_type_attributes(self): + G = nx.MultiGraph() + G.add_node("n0", special="hello") + G.add_node("n1", special=0) + G.add_edge("n0", "n1", special="hello") + G.add_edge("n0", "n1", special=0) + fh = io.BytesIO() + self.writer(G, fh) + fh.seek(0) + H = nx.read_graphml(fh) + assert H.nodes["n0"]["special"] == "hello" + assert H.nodes["n1"]["special"] == 0 + assert H.edges["n0", "n1", 0]["special"] == "hello" + assert H.edges["n0", "n1", 1]["special"] == 0 + + def test_mixed_int_type_number_attributes(self): + np = pytest.importorskip("numpy") + G = nx.MultiGraph() + G.add_node("n0", special=np.int64(0)) + G.add_node("n1", special=1) + G.add_edge("n0", "n1", special=np.int64(2)) + G.add_edge("n0", "n1", special=3) + fh = io.BytesIO() + self.writer(G, fh) + fh.seek(0) + H = nx.read_graphml(fh) + assert H.nodes["n0"]["special"] == 0 + assert H.nodes["n1"]["special"] == 1 + assert H.edges["n0", "n1", 0]["special"] == 2 + assert H.edges["n0", "n1", 1]["special"] == 3 + + def test_multigraph_to_graph(self, tmp_path): + # test converting multigraph to graph if no parallel edges found + G = nx.MultiGraph() + G.add_edges_from([("a", "b", 2), ("b", "c", 3)]) # no multiedges + fname = tmp_path / "test.graphml" + self.writer(G, fname) + H = nx.read_graphml(fname) + assert not H.is_multigraph() + H = nx.read_graphml(fname, force_multigraph=True) + assert H.is_multigraph() + + # add a multiedge + G.add_edge("a", "b", "e-id") + fname = tmp_path / "test.graphml" + self.writer(G, fname) + H = nx.read_graphml(fname) + assert H.is_multigraph() + H = nx.read_graphml(fname, force_multigraph=True) + assert H.is_multigraph() + + def test_write_generate_edge_id_from_attribute(self, tmp_path): + from xml.etree.ElementTree import parse + + G = nx.Graph() + G.add_edges_from([("a", "b"), ("b", "c"), ("a", "c")]) + edge_attributes = {e: str(e) for e in G.edges} + nx.set_edge_attributes(G, edge_attributes, "eid") + fname = tmp_path / "test.graphml" + # set edge_id_from_attribute e.g. "eid" for write_graphml() + self.writer(G, fname, edge_id_from_attribute="eid") + # set edge_id_from_attribute e.g. "eid" for generate_graphml() + generator = nx.generate_graphml(G, edge_id_from_attribute="eid") + + H = nx.read_graphml(fname) + assert nodes_equal(G.nodes(), H.nodes()) + assert edges_equal(G.edges(), H.edges()) + # NetworkX adds explicit edge "id" from file as attribute + nx.set_edge_attributes(G, edge_attributes, "id") + assert edges_equal(G.edges(data=True), H.edges(data=True)) + + tree = parse(fname) + children = list(tree.getroot()) + assert len(children) == 2 + edge_ids = [ + edge.attrib["id"] + for edge in tree.getroot().findall( + ".//{http://graphml.graphdrawing.org/xmlns}edge" + ) + ] + # verify edge id value is equal to specified attribute value + assert sorted(edge_ids) == sorted(edge_attributes.values()) + + # check graphml generated from generate_graphml() + data = "".join(generator) + J = nx.parse_graphml(data) + assert sorted(G.nodes()) == sorted(J.nodes()) + assert sorted(G.edges()) == sorted(J.edges()) + # NetworkX adds explicit edge "id" from file as attribute + nx.set_edge_attributes(G, edge_attributes, "id") + assert edges_equal(G.edges(data=True), J.edges(data=True)) + + def test_multigraph_write_generate_edge_id_from_attribute(self, tmp_path): + from xml.etree.ElementTree import parse + + G = nx.MultiGraph() + G.add_edges_from([("a", "b"), ("b", "c"), ("a", "c"), ("a", "b")]) + edge_attributes = {e: str(e) for e in G.edges} + nx.set_edge_attributes(G, edge_attributes, "eid") + fname = tmp_path / "test.graphml" + # set edge_id_from_attribute e.g. "eid" for write_graphml() + self.writer(G, fname, edge_id_from_attribute="eid") + # set edge_id_from_attribute e.g. "eid" for generate_graphml() + generator = nx.generate_graphml(G, edge_id_from_attribute="eid") + + H = nx.read_graphml(fname) + assert H.is_multigraph() + H = nx.read_graphml(fname, force_multigraph=True) + assert H.is_multigraph() + + assert nodes_equal(G.nodes(), H.nodes()) + assert edges_equal(G.edges(), H.edges()) + assert sorted(data.get("eid") for u, v, data in H.edges(data=True)) == sorted( + edge_attributes.values() + ) + # NetworkX uses edge_ids as keys in multigraphs if no key + assert sorted(key for u, v, key in H.edges(keys=True)) == sorted( + edge_attributes.values() + ) + + tree = parse(fname) + children = list(tree.getroot()) + assert len(children) == 2 + edge_ids = [ + edge.attrib["id"] + for edge in tree.getroot().findall( + ".//{http://graphml.graphdrawing.org/xmlns}edge" + ) + ] + # verify edge id value is equal to specified attribute value + assert sorted(edge_ids) == sorted(edge_attributes.values()) + + # check graphml generated from generate_graphml() + graphml_data = "".join(generator) + J = nx.parse_graphml(graphml_data) + assert J.is_multigraph() + + assert nodes_equal(G.nodes(), J.nodes()) + assert edges_equal(G.edges(), J.edges()) + assert sorted(data.get("eid") for u, v, data in J.edges(data=True)) == sorted( + edge_attributes.values() + ) + # NetworkX uses edge_ids as keys in multigraphs if no key + assert sorted(key for u, v, key in J.edges(keys=True)) == sorted( + edge_attributes.values() + ) + + def test_numpy_float64(self, tmp_path): + np = pytest.importorskip("numpy") + wt = np.float64(3.4) + G = nx.Graph([(1, 2, {"weight": wt})]) + fname = tmp_path / "test.graphml" + self.writer(G, fname) + H = nx.read_graphml(fname, node_type=int) + assert G.edges == H.edges + wtG = G[1][2]["weight"] + wtH = H[1][2]["weight"] + assert wtG == pytest.approx(wtH, abs=1e-6) + assert type(wtG) is np.float64 + assert type(wtH) is float + + def test_numpy_float32(self, tmp_path): + np = pytest.importorskip("numpy") + wt = np.float32(3.4) + G = nx.Graph([(1, 2, {"weight": wt})]) + fname = tmp_path / "test.graphml" + self.writer(G, fname) + H = nx.read_graphml(fname, node_type=int) + assert G.edges == H.edges + wtG = G[1][2]["weight"] + wtH = H[1][2]["weight"] + assert wtG == pytest.approx(wtH, abs=1e-6) + assert type(wtG) is np.float32 + assert type(wtH) is float + + def test_numpy_float64_inference(self, tmp_path): + np = pytest.importorskip("numpy") + G = self.attribute_numeric_type_graph + G.edges[("n1", "n1")]["weight"] = np.float64(1.1) + fname = tmp_path / "test.graphml" + self.writer(G, fname, infer_numeric_types=True) + H = nx.read_graphml(fname) + assert G._adj == H._adj + + def test_unicode_attributes(self, tmp_path): + G = nx.Graph() + name1 = chr(2344) + chr(123) + chr(6543) + name2 = chr(5543) + chr(1543) + chr(324) + node_type = str + G.add_edge(name1, "Radiohead", foo=name2) + fname = tmp_path / "test.graphml" + self.writer(G, fname) + H = nx.read_graphml(fname, node_type=node_type) + assert G._adj == H._adj + + def test_unicode_escape(self): + # test for handling json escaped strings in python 2 Issue #1880 + import json + + a = {"a": '{"a": "123"}'} # an object with many chars to escape + sa = json.dumps(a) + G = nx.Graph() + G.graph["test"] = sa + fh = io.BytesIO() + self.writer(G, fh) + fh.seek(0) + H = nx.read_graphml(fh) + assert G.graph["test"] == H.graph["test"] + + +class TestXMLGraphML(TestWriteGraphML): + writer = staticmethod(nx.write_graphml_xml) + + @classmethod + def setup_class(cls): + TestWriteGraphML.setup_class() + + +def test_exception_for_unsupported_datatype_node_attr(): + """Test that a detailed exception is raised when an attribute is of a type + not supported by GraphML, e.g. a list""" + pytest.importorskip("lxml.etree") + # node attribute + G = nx.Graph() + G.add_node(0, my_list_attribute=[0, 1, 2]) + fh = io.BytesIO() + with pytest.raises(TypeError, match="GraphML does not support"): + nx.write_graphml(G, fh) + + +def test_exception_for_unsupported_datatype_edge_attr(): + """Test that a detailed exception is raised when an attribute is of a type + not supported by GraphML, e.g. a list""" + pytest.importorskip("lxml.etree") + # edge attribute + G = nx.Graph() + G.add_edge(0, 1, my_list_attribute=[0, 1, 2]) + fh = io.BytesIO() + with pytest.raises(TypeError, match="GraphML does not support"): + nx.write_graphml(G, fh) + + +def test_exception_for_unsupported_datatype_graph_attr(): + """Test that a detailed exception is raised when an attribute is of a type + not supported by GraphML, e.g. a list""" + pytest.importorskip("lxml.etree") + # graph attribute + G = nx.Graph() + G.graph["my_list_attribute"] = [0, 1, 2] + fh = io.BytesIO() + with pytest.raises(TypeError, match="GraphML does not support"): + nx.write_graphml(G, fh) + + +def test_empty_attribute(): + """Tests that a GraphML string with an empty attribute can be parsed + correctly.""" + s = """ + + + + + + aaa + bbb + + + ccc + + + + """ + fh = io.BytesIO(s.encode("UTF-8")) + G = nx.read_graphml(fh) + assert G.nodes["0"] == {"foo": "aaa", "bar": "bbb"} + assert G.nodes["1"] == {"foo": "ccc", "bar": ""} diff --git a/.venv/lib/python3.12/site-packages/networkx/readwrite/tests/test_leda.py b/.venv/lib/python3.12/site-packages/networkx/readwrite/tests/test_leda.py new file mode 100644 index 0000000..8ac5ecc --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/readwrite/tests/test_leda.py @@ -0,0 +1,30 @@ +import io + +import networkx as nx + + +class TestLEDA: + def test_parse_leda(self): + data = """#header section \nLEDA.GRAPH \nstring\nint\n-1\n#nodes section\n5 \n|{v1}| \n|{v2}| \n|{v3}| \n|{v4}| \n|{v5}| \n\n#edges section\n7 \n1 2 0 |{4}| \n1 3 0 |{3}| \n2 3 0 |{2}| \n3 4 0 |{3}| \n3 5 0 |{7}| \n4 5 0 |{6}| \n5 1 0 |{foo}|""" + G = nx.parse_leda(data) + G = nx.parse_leda(data.split("\n")) + assert sorted(G.nodes()) == ["v1", "v2", "v3", "v4", "v5"] + assert sorted(G.edges(data=True)) == [ + ("v1", "v2", {"label": "4"}), + ("v1", "v3", {"label": "3"}), + ("v2", "v3", {"label": "2"}), + ("v3", "v4", {"label": "3"}), + ("v3", "v5", {"label": "7"}), + ("v4", "v5", {"label": "6"}), + ("v5", "v1", {"label": "foo"}), + ] + + def test_read_LEDA(self): + fh = io.BytesIO() + data = """#header section \nLEDA.GRAPH \nstring\nint\n-1\n#nodes section\n5 \n|{v1}| \n|{v2}| \n|{v3}| \n|{v4}| \n|{v5}| \n\n#edges section\n7 \n1 2 0 |{4}| \n1 3 0 |{3}| \n2 3 0 |{2}| \n3 4 0 |{3}| \n3 5 0 |{7}| \n4 5 0 |{6}| \n5 1 0 |{foo}|""" + G = nx.parse_leda(data) + fh.write(data.encode("UTF-8")) + fh.seek(0) + Gin = nx.read_leda(fh) + assert sorted(G.nodes()) == sorted(Gin.nodes()) + assert sorted(G.edges()) == sorted(Gin.edges()) diff --git a/.venv/lib/python3.12/site-packages/networkx/readwrite/tests/test_p2g.py b/.venv/lib/python3.12/site-packages/networkx/readwrite/tests/test_p2g.py new file mode 100644 index 0000000..e4c50de --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/readwrite/tests/test_p2g.py @@ -0,0 +1,62 @@ +import io + +import networkx as nx +from networkx.readwrite.p2g import read_p2g, write_p2g +from networkx.utils import edges_equal + + +class TestP2G: + @classmethod + def setup_class(cls): + cls.G = nx.Graph(name="test") + e = [("a", "b"), ("b", "c"), ("c", "d"), ("d", "e"), ("e", "f"), ("a", "f")] + cls.G.add_edges_from(e) + cls.G.add_node("g") + cls.DG = nx.DiGraph(cls.G) + + def test_read_p2g(self): + s = b"""\ +name +3 4 +a +1 2 +b + +c +0 2 +""" + bytesIO = io.BytesIO(s) + G = read_p2g(bytesIO) + assert G.name == "name" + assert sorted(G) == ["a", "b", "c"] + edges = [(str(u), str(v)) for u, v in G.edges()] + assert edges_equal(G.edges(), [("a", "c"), ("a", "b"), ("c", "a"), ("c", "c")]) + + def test_write_p2g(self): + s = b"""foo +3 2 +1 +1 +2 +2 +3 + +""" + fh = io.BytesIO() + G = nx.DiGraph() + G.name = "foo" + G.add_edges_from([(1, 2), (2, 3)]) + write_p2g(G, fh) + fh.seek(0) + r = fh.read() + assert r == s + + def test_write_read_p2g(self): + fh = io.BytesIO() + G = nx.DiGraph() + G.name = "foo" + G.add_edges_from([("a", "b"), ("b", "c")]) + write_p2g(G, fh) + fh.seek(0) + H = read_p2g(fh) + assert edges_equal(G.edges(), H.edges()) diff --git a/.venv/lib/python3.12/site-packages/networkx/readwrite/tests/test_pajek.py b/.venv/lib/python3.12/site-packages/networkx/readwrite/tests/test_pajek.py new file mode 100644 index 0000000..317ebe8 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/readwrite/tests/test_pajek.py @@ -0,0 +1,126 @@ +""" +Pajek tests +""" + +import networkx as nx +from networkx.utils import edges_equal, nodes_equal + + +class TestPajek: + @classmethod + def setup_class(cls): + cls.data = """*network Tralala\n*vertices 4\n 1 "A1" 0.0938 0.0896 ellipse x_fact 1 y_fact 1\n 2 "Bb" 0.8188 0.2458 ellipse x_fact 1 y_fact 1\n 3 "C" 0.3688 0.7792 ellipse x_fact 1\n 4 "D2" 0.9583 0.8563 ellipse x_fact 1\n*arcs\n1 1 1 h2 0 w 3 c Blue s 3 a1 -130 k1 0.6 a2 -130 k2 0.6 ap 0.5 l "Bezier loop" lc BlueViolet fos 20 lr 58 lp 0.3 la 360\n2 1 1 h2 0 a1 120 k1 1.3 a2 -120 k2 0.3 ap 25 l "Bezier arc" lphi 270 la 180 lr 19 lp 0.5\n1 2 1 h2 0 a1 40 k1 2.8 a2 30 k2 0.8 ap 25 l "Bezier arc" lphi 90 la 0 lp 0.65\n4 2 -1 h2 0 w 1 k1 -2 k2 250 ap 25 l "Circular arc" c Red lc OrangeRed\n3 4 1 p Dashed h2 0 w 2 c OliveGreen ap 25 l "Straight arc" lc PineGreen\n1 3 1 p Dashed h2 0 w 5 k1 -1 k2 -20 ap 25 l "Oval arc" c Brown lc Black\n3 3 -1 h1 6 w 1 h2 12 k1 -2 k2 -15 ap 0.5 l "Circular loop" c Red lc OrangeRed lphi 270 la 180""" + cls.G = nx.MultiDiGraph() + cls.G.add_nodes_from(["A1", "Bb", "C", "D2"]) + cls.G.add_edges_from( + [ + ("A1", "A1"), + ("A1", "Bb"), + ("A1", "C"), + ("Bb", "A1"), + ("C", "C"), + ("C", "D2"), + ("D2", "Bb"), + ] + ) + + cls.G.graph["name"] = "Tralala" + + def test_parse_pajek_simple(self): + # Example without node positions or shape + data = """*Vertices 2\n1 "1"\n2 "2"\n*Edges\n1 2\n2 1""" + G = nx.parse_pajek(data) + assert sorted(G.nodes()) == ["1", "2"] + assert edges_equal(G.edges(), [("1", "2"), ("1", "2")]) + + def test_parse_pajek(self): + G = nx.parse_pajek(self.data) + assert sorted(G.nodes()) == ["A1", "Bb", "C", "D2"] + assert edges_equal( + G.edges(), + [ + ("A1", "A1"), + ("A1", "Bb"), + ("A1", "C"), + ("Bb", "A1"), + ("C", "C"), + ("C", "D2"), + ("D2", "Bb"), + ], + ) + + def test_parse_pajet_mat(self): + data = """*Vertices 3\n1 "one"\n2 "two"\n3 "three"\n*Matrix\n1 1 0\n0 1 0\n0 1 0\n""" + G = nx.parse_pajek(data) + assert set(G.nodes()) == {"one", "two", "three"} + assert G.nodes["two"] == {"id": "2"} + assert edges_equal( + set(G.edges()), + {("one", "one"), ("two", "one"), ("two", "two"), ("two", "three")}, + ) + + def test_read_pajek(self, tmp_path): + G = nx.parse_pajek(self.data) + # Read data from file + fname = tmp_path / "test.pjk" + with open(fname, "wb") as fh: + fh.write(self.data.encode("UTF-8")) + + Gin = nx.read_pajek(fname) + assert sorted(G.nodes()) == sorted(Gin.nodes()) + assert edges_equal(G.edges(), Gin.edges()) + assert self.G.graph == Gin.graph + for n in G: + assert G.nodes[n] == Gin.nodes[n] + + def test_write_pajek(self): + import io + + G = nx.parse_pajek(self.data) + fh = io.BytesIO() + nx.write_pajek(G, fh) + fh.seek(0) + H = nx.read_pajek(fh) + assert nodes_equal(list(G), list(H)) + assert edges_equal(list(G.edges()), list(H.edges())) + # Graph name is left out for now, therefore it is not tested. + # assert_equal(G.graph, H.graph) + + def test_ignored_attribute(self): + import io + + G = nx.Graph() + fh = io.BytesIO() + G.add_node(1, int_attr=1) + G.add_node(2, empty_attr=" ") + G.add_edge(1, 2, int_attr=2) + G.add_edge(2, 3, empty_attr=" ") + + import warnings + + with warnings.catch_warnings(record=True) as w: + nx.write_pajek(G, fh) + assert len(w) == 4 + + def test_noname(self): + # Make sure we can parse a line such as: *network + # Issue #952 + line = "*network\n" + other_lines = self.data.split("\n")[1:] + data = line + "\n".join(other_lines) + G = nx.parse_pajek(data) + + def test_unicode(self): + import io + + G = nx.Graph() + name1 = chr(2344) + chr(123) + chr(6543) + name2 = chr(5543) + chr(1543) + chr(324) + G.add_edge(name1, "Radiohead", foo=name2) + fh = io.BytesIO() + nx.write_pajek(G, fh) + fh.seek(0) + H = nx.read_pajek(fh) + assert nodes_equal(list(G), list(H)) + assert edges_equal(list(G.edges()), list(H.edges())) + assert G.graph == H.graph diff --git a/.venv/lib/python3.12/site-packages/networkx/readwrite/tests/test_sparse6.py b/.venv/lib/python3.12/site-packages/networkx/readwrite/tests/test_sparse6.py new file mode 100644 index 0000000..52cd271 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/readwrite/tests/test_sparse6.py @@ -0,0 +1,166 @@ +from io import BytesIO + +import pytest + +import networkx as nx +from networkx.utils import edges_equal, nodes_equal + + +class TestSparseGraph6: + def test_from_sparse6_bytes(self): + data = b":Q___eDcdFcDeFcE`GaJ`IaHbKNbLM" + G = nx.from_sparse6_bytes(data) + assert nodes_equal( + sorted(G.nodes()), + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17], + ) + assert edges_equal( + G.edges(), + [ + (0, 1), + (0, 2), + (0, 3), + (1, 12), + (1, 14), + (2, 13), + (2, 15), + (3, 16), + (3, 17), + (4, 7), + (4, 9), + (4, 11), + (5, 6), + (5, 8), + (5, 9), + (6, 10), + (6, 11), + (7, 8), + (7, 10), + (8, 12), + (9, 15), + (10, 14), + (11, 13), + (12, 16), + (13, 17), + (14, 17), + (15, 16), + ], + ) + + def test_from_bytes_multigraph_graph(self): + graph_data = b":An" + G = nx.from_sparse6_bytes(graph_data) + assert isinstance(G, nx.Graph) + multigraph_data = b":Ab" + M = nx.from_sparse6_bytes(multigraph_data) + assert isinstance(M, nx.MultiGraph) + + def test_read_sparse6(self): + data = b":Q___eDcdFcDeFcE`GaJ`IaHbKNbLM" + G = nx.from_sparse6_bytes(data) + fh = BytesIO(data) + Gin = nx.read_sparse6(fh) + assert nodes_equal(G.nodes(), Gin.nodes()) + assert edges_equal(G.edges(), Gin.edges()) + + def test_read_many_graph6(self): + # Read many graphs into list + data = b":Q___eDcdFcDeFcE`GaJ`IaHbKNbLM\n:Q___dCfDEdcEgcbEGbFIaJ`JaHN`IM" + fh = BytesIO(data) + glist = nx.read_sparse6(fh) + assert len(glist) == 2 + for G in glist: + assert nodes_equal( + G.nodes(), + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17], + ) + + +class TestWriteSparse6: + """Unit tests for writing graphs in the sparse6 format. + + Most of the test cases were checked against the sparse6 encoder in Sage. + + """ + + def test_null_graph(self): + G = nx.null_graph() + result = BytesIO() + nx.write_sparse6(G, result) + assert result.getvalue() == b">>sparse6<<:?\n" + + def test_trivial_graph(self): + G = nx.trivial_graph() + result = BytesIO() + nx.write_sparse6(G, result) + assert result.getvalue() == b">>sparse6<<:@\n" + + def test_empty_graph(self): + G = nx.empty_graph(5) + result = BytesIO() + nx.write_sparse6(G, result) + assert result.getvalue() == b">>sparse6<<:D\n" + + def test_large_empty_graph(self): + G = nx.empty_graph(68) + result = BytesIO() + nx.write_sparse6(G, result) + assert result.getvalue() == b">>sparse6<<:~?@C\n" + + def test_very_large_empty_graph(self): + G = nx.empty_graph(258049) + result = BytesIO() + nx.write_sparse6(G, result) + assert result.getvalue() == b">>sparse6<<:~~???~?@\n" + + def test_complete_graph(self): + G = nx.complete_graph(4) + result = BytesIO() + nx.write_sparse6(G, result) + assert result.getvalue() == b">>sparse6<<:CcKI\n" + + def test_no_header(self): + G = nx.complete_graph(4) + result = BytesIO() + nx.write_sparse6(G, result, header=False) + assert result.getvalue() == b":CcKI\n" + + def test_padding(self): + codes = (b":Cdv", b":DaYn", b":EaYnN", b":FaYnL", b":GaYnLz") + for n, code in enumerate(codes, start=4): + G = nx.path_graph(n) + result = BytesIO() + nx.write_sparse6(G, result, header=False) + assert result.getvalue() == code + b"\n" + + def test_complete_bipartite(self): + G = nx.complete_bipartite_graph(6, 9) + result = BytesIO() + nx.write_sparse6(G, result) + # Compared with sage + expected = b">>sparse6<<:Nk" + b"?G`cJ" * 9 + b"\n" + assert result.getvalue() == expected + + def test_read_write_inverse(self): + for i in list(range(13)) + [31, 47, 62, 63, 64, 72]: + m = min(2 * i, i * i // 2) + g = nx.random_graphs.gnm_random_graph(i, m, seed=i) + gstr = BytesIO() + nx.write_sparse6(g, gstr, header=False) + # Strip the trailing newline. + gstr = gstr.getvalue().rstrip() + g2 = nx.from_sparse6_bytes(gstr) + assert g2.order() == g.order() + assert edges_equal(g2.edges(), g.edges()) + + def test_no_directed_graphs(self): + with pytest.raises(nx.NetworkXNotImplemented): + nx.write_sparse6(nx.DiGraph(), BytesIO()) + + def test_write_path(self, tmp_path): + # Get a valid temporary file name + fullfilename = str(tmp_path / "test.s6") + # file should be closed now, so write_sparse6 can open it + nx.write_sparse6(nx.null_graph(), fullfilename) + with open(fullfilename, mode="rb") as fh: + assert fh.read() == b">>sparse6<<:?\n" diff --git a/.venv/lib/python3.12/site-packages/networkx/readwrite/tests/test_text.py b/.venv/lib/python3.12/site-packages/networkx/readwrite/tests/test_text.py new file mode 100644 index 0000000..b2b7448 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/readwrite/tests/test_text.py @@ -0,0 +1,1742 @@ +import random +from itertools import product +from textwrap import dedent + +import pytest + +import networkx as nx + + +def test_generate_network_text_forest_directed(): + # Create a directed forest with labels + graph = nx.balanced_tree(r=2, h=2, create_using=nx.DiGraph) + for node in graph.nodes: + graph.nodes[node]["label"] = "node_" + chr(ord("a") + node) + + node_target = dedent( + """ + ╙── 0 + ├─╼ 1 + │ ├─╼ 3 + │ └─╼ 4 + └─╼ 2 + ├─╼ 5 + └─╼ 6 + """ + ).strip() + + label_target = dedent( + """ + ╙── node_a + ├─╼ node_b + │ ├─╼ node_d + │ └─╼ node_e + └─╼ node_c + ├─╼ node_f + └─╼ node_g + """ + ).strip() + + # Basic node case + ret = nx.generate_network_text(graph, with_labels=False) + assert "\n".join(ret) == node_target + + # Basic label case + ret = nx.generate_network_text(graph, with_labels=True) + assert "\n".join(ret) == label_target + + +def test_write_network_text_empty_graph(): + def _graph_str(g, **kw): + printbuf = [] + nx.write_network_text(g, printbuf.append, end="", **kw) + return "\n".join(printbuf) + + assert _graph_str(nx.DiGraph()) == "╙" + assert _graph_str(nx.Graph()) == "╙" + assert _graph_str(nx.DiGraph(), ascii_only=True) == "+" + assert _graph_str(nx.Graph(), ascii_only=True) == "+" + + +def test_write_network_text_within_forest_glyph(): + g = nx.DiGraph() + g.add_nodes_from([1, 2, 3, 4]) + g.add_edge(2, 4) + lines = [] + write = lines.append + nx.write_network_text(g, path=write, end="") + nx.write_network_text(g, path=write, ascii_only=True, end="") + text = "\n".join(lines) + target = dedent( + """ + ╟── 1 + ╟── 2 + ╎ └─╼ 4 + ╙── 3 + +-- 1 + +-- 2 + : L-> 4 + +-- 3 + """ + ).strip() + assert text == target + + +def test_generate_network_text_directed_multi_tree(): + tree1 = nx.balanced_tree(r=2, h=2, create_using=nx.DiGraph) + tree2 = nx.balanced_tree(r=2, h=2, create_using=nx.DiGraph) + forest = nx.disjoint_union_all([tree1, tree2]) + ret = "\n".join(nx.generate_network_text(forest)) + + target = dedent( + """ + ╟── 0 + ╎ ├─╼ 1 + ╎ │ ├─╼ 3 + ╎ │ └─╼ 4 + ╎ └─╼ 2 + ╎ ├─╼ 5 + ╎ └─╼ 6 + ╙── 7 + ├─╼ 8 + │ ├─╼ 10 + │ └─╼ 11 + └─╼ 9 + ├─╼ 12 + └─╼ 13 + """ + ).strip() + assert ret == target + + tree3 = nx.balanced_tree(r=2, h=2, create_using=nx.DiGraph) + forest = nx.disjoint_union_all([tree1, tree2, tree3]) + ret = "\n".join(nx.generate_network_text(forest, sources=[0, 14, 7])) + + target = dedent( + """ + ╟── 0 + ╎ ├─╼ 1 + ╎ │ ├─╼ 3 + ╎ │ └─╼ 4 + ╎ └─╼ 2 + ╎ ├─╼ 5 + ╎ └─╼ 6 + ╟── 14 + ╎ ├─╼ 15 + ╎ │ ├─╼ 17 + ╎ │ └─╼ 18 + ╎ └─╼ 16 + ╎ ├─╼ 19 + ╎ └─╼ 20 + ╙── 7 + ├─╼ 8 + │ ├─╼ 10 + │ └─╼ 11 + └─╼ 9 + ├─╼ 12 + └─╼ 13 + """ + ).strip() + assert ret == target + + ret = "\n".join( + nx.generate_network_text(forest, sources=[0, 14, 7], ascii_only=True) + ) + + target = dedent( + """ + +-- 0 + : |-> 1 + : | |-> 3 + : | L-> 4 + : L-> 2 + : |-> 5 + : L-> 6 + +-- 14 + : |-> 15 + : | |-> 17 + : | L-> 18 + : L-> 16 + : |-> 19 + : L-> 20 + +-- 7 + |-> 8 + | |-> 10 + | L-> 11 + L-> 9 + |-> 12 + L-> 13 + """ + ).strip() + assert ret == target + + +def test_generate_network_text_undirected_multi_tree(): + tree1 = nx.balanced_tree(r=2, h=2, create_using=nx.Graph) + tree2 = nx.balanced_tree(r=2, h=2, create_using=nx.Graph) + tree2 = nx.relabel_nodes(tree2, {n: n + len(tree1) for n in tree2.nodes}) + forest = nx.union(tree1, tree2) + ret = "\n".join(nx.generate_network_text(forest, sources=[0, 7])) + + target = dedent( + """ + ╟── 0 + ╎ ├── 1 + ╎ │ ├── 3 + ╎ │ └── 4 + ╎ └── 2 + ╎ ├── 5 + ╎ └── 6 + ╙── 7 + ├── 8 + │ ├── 10 + │ └── 11 + └── 9 + ├── 12 + └── 13 + """ + ).strip() + assert ret == target + + ret = "\n".join(nx.generate_network_text(forest, sources=[0, 7], ascii_only=True)) + + target = dedent( + """ + +-- 0 + : |-- 1 + : | |-- 3 + : | L-- 4 + : L-- 2 + : |-- 5 + : L-- 6 + +-- 7 + |-- 8 + | |-- 10 + | L-- 11 + L-- 9 + |-- 12 + L-- 13 + """ + ).strip() + assert ret == target + + +def test_generate_network_text_forest_undirected(): + # Create a directed forest + graph = nx.balanced_tree(r=2, h=2, create_using=nx.Graph) + + node_target0 = dedent( + """ + ╙── 0 + ├── 1 + │ ├── 3 + │ └── 4 + └── 2 + ├── 5 + └── 6 + """ + ).strip() + + # defined starting point + ret = "\n".join(nx.generate_network_text(graph, sources=[0])) + assert ret == node_target0 + + # defined starting point + node_target2 = dedent( + """ + ╙── 2 + ├── 0 + │ └── 1 + │ ├── 3 + │ └── 4 + ├── 5 + └── 6 + """ + ).strip() + ret = "\n".join(nx.generate_network_text(graph, sources=[2])) + assert ret == node_target2 + + +def test_generate_network_text_overspecified_sources(): + """ + When sources are directly specified, we won't be able to determine when we + are in the last component, so there will always be a trailing, leftmost + pipe. + """ + graph = nx.disjoint_union_all( + [ + nx.balanced_tree(r=2, h=1, create_using=nx.DiGraph), + nx.balanced_tree(r=1, h=2, create_using=nx.DiGraph), + nx.balanced_tree(r=2, h=1, create_using=nx.DiGraph), + ] + ) + + # defined starting point + target1 = dedent( + """ + ╟── 0 + ╎ ├─╼ 1 + ╎ └─╼ 2 + ╟── 3 + ╎ └─╼ 4 + ╎ └─╼ 5 + ╟── 6 + ╎ ├─╼ 7 + ╎ └─╼ 8 + """ + ).strip() + + target2 = dedent( + """ + ╟── 0 + ╎ ├─╼ 1 + ╎ └─╼ 2 + ╟── 3 + ╎ └─╼ 4 + ╎ └─╼ 5 + ╙── 6 + ├─╼ 7 + └─╼ 8 + """ + ).strip() + + got1 = "\n".join(nx.generate_network_text(graph, sources=graph.nodes)) + got2 = "\n".join(nx.generate_network_text(graph)) + assert got1 == target1 + assert got2 == target2 + + +def test_write_network_text_iterative_add_directed_edges(): + """ + Walk through the cases going from a disconnected to fully connected graph + """ + graph = nx.DiGraph() + graph.add_nodes_from([1, 2, 3, 4]) + lines = [] + write = lines.append + write("--- initial state ---") + nx.write_network_text(graph, path=write, end="") + for i, j in product(graph.nodes, graph.nodes): + write(f"--- add_edge({i}, {j}) ---") + graph.add_edge(i, j) + nx.write_network_text(graph, path=write, end="") + text = "\n".join(lines) + # defined starting point + target = dedent( + """ + --- initial state --- + ╟── 1 + ╟── 2 + ╟── 3 + ╙── 4 + --- add_edge(1, 1) --- + ╟── 1 ╾ 1 + ╎ └─╼ ... + ╟── 2 + ╟── 3 + ╙── 4 + --- add_edge(1, 2) --- + ╟── 1 ╾ 1 + ╎ ├─╼ 2 + ╎ └─╼ ... + ╟── 3 + ╙── 4 + --- add_edge(1, 3) --- + ╟── 1 ╾ 1 + ╎ ├─╼ 2 + ╎ ├─╼ 3 + ╎ └─╼ ... + ╙── 4 + --- add_edge(1, 4) --- + ╙── 1 ╾ 1 + ├─╼ 2 + ├─╼ 3 + ├─╼ 4 + └─╼ ... + --- add_edge(2, 1) --- + ╙── 2 ╾ 1 + └─╼ 1 ╾ 1 + ├─╼ 3 + ├─╼ 4 + └─╼ ... + --- add_edge(2, 2) --- + ╙── 1 ╾ 1, 2 + ├─╼ 2 ╾ 2 + │ └─╼ ... + ├─╼ 3 + ├─╼ 4 + └─╼ ... + --- add_edge(2, 3) --- + ╙── 1 ╾ 1, 2 + ├─╼ 2 ╾ 2 + │ ├─╼ 3 ╾ 1 + │ └─╼ ... + ├─╼ 4 + └─╼ ... + --- add_edge(2, 4) --- + ╙── 1 ╾ 1, 2 + ├─╼ 2 ╾ 2 + │ ├─╼ 3 ╾ 1 + │ ├─╼ 4 ╾ 1 + │ └─╼ ... + └─╼ ... + --- add_edge(3, 1) --- + ╙── 2 ╾ 1, 2 + ├─╼ 1 ╾ 1, 3 + │ ├─╼ 3 ╾ 2 + │ │ └─╼ ... + │ ├─╼ 4 ╾ 2 + │ └─╼ ... + └─╼ ... + --- add_edge(3, 2) --- + ╙── 3 ╾ 1, 2 + ├─╼ 1 ╾ 1, 2 + │ ├─╼ 2 ╾ 2, 3 + │ │ ├─╼ 4 ╾ 1 + │ │ └─╼ ... + │ └─╼ ... + └─╼ ... + --- add_edge(3, 3) --- + ╙── 1 ╾ 1, 2, 3 + ├─╼ 2 ╾ 2, 3 + │ ├─╼ 3 ╾ 1, 3 + │ │ └─╼ ... + │ ├─╼ 4 ╾ 1 + │ └─╼ ... + └─╼ ... + --- add_edge(3, 4) --- + ╙── 1 ╾ 1, 2, 3 + ├─╼ 2 ╾ 2, 3 + │ ├─╼ 3 ╾ 1, 3 + │ │ ├─╼ 4 ╾ 1, 2 + │ │ └─╼ ... + │ └─╼ ... + └─╼ ... + --- add_edge(4, 1) --- + ╙── 2 ╾ 1, 2, 3 + ├─╼ 1 ╾ 1, 3, 4 + │ ├─╼ 3 ╾ 2, 3 + │ │ ├─╼ 4 ╾ 1, 2 + │ │ │ └─╼ ... + │ │ └─╼ ... + │ └─╼ ... + └─╼ ... + --- add_edge(4, 2) --- + ╙── 3 ╾ 1, 2, 3 + ├─╼ 1 ╾ 1, 2, 4 + │ ├─╼ 2 ╾ 2, 3, 4 + │ │ ├─╼ 4 ╾ 1, 3 + │ │ │ └─╼ ... + │ │ └─╼ ... + │ └─╼ ... + └─╼ ... + --- add_edge(4, 3) --- + ╙── 4 ╾ 1, 2, 3 + ├─╼ 1 ╾ 1, 2, 3 + │ ├─╼ 2 ╾ 2, 3, 4 + │ │ ├─╼ 3 ╾ 1, 3, 4 + │ │ │ └─╼ ... + │ │ └─╼ ... + │ └─╼ ... + └─╼ ... + --- add_edge(4, 4) --- + ╙── 1 ╾ 1, 2, 3, 4 + ├─╼ 2 ╾ 2, 3, 4 + │ ├─╼ 3 ╾ 1, 3, 4 + │ │ ├─╼ 4 ╾ 1, 2, 4 + │ │ │ └─╼ ... + │ │ └─╼ ... + │ └─╼ ... + └─╼ ... + """ + ).strip() + assert target == text + + +def test_write_network_text_iterative_add_undirected_edges(): + """ + Walk through the cases going from a disconnected to fully connected graph + """ + graph = nx.Graph() + graph.add_nodes_from([1, 2, 3, 4]) + lines = [] + write = lines.append + write("--- initial state ---") + nx.write_network_text(graph, path=write, end="") + for i, j in product(graph.nodes, graph.nodes): + if i == j: + continue + write(f"--- add_edge({i}, {j}) ---") + graph.add_edge(i, j) + nx.write_network_text(graph, path=write, end="") + text = "\n".join(lines) + target = dedent( + """ + --- initial state --- + ╟── 1 + ╟── 2 + ╟── 3 + ╙── 4 + --- add_edge(1, 2) --- + ╟── 3 + ╟── 4 + ╙── 1 + └── 2 + --- add_edge(1, 3) --- + ╟── 4 + ╙── 2 + └── 1 + └── 3 + --- add_edge(1, 4) --- + ╙── 2 + └── 1 + ├── 3 + └── 4 + --- add_edge(2, 1) --- + ╙── 2 + └── 1 + ├── 3 + └── 4 + --- add_edge(2, 3) --- + ╙── 4 + └── 1 + ├── 2 + │ └── 3 ─ 1 + └── ... + --- add_edge(2, 4) --- + ╙── 3 + ├── 1 + │ ├── 2 ─ 3 + │ │ └── 4 ─ 1 + │ └── ... + └── ... + --- add_edge(3, 1) --- + ╙── 3 + ├── 1 + │ ├── 2 ─ 3 + │ │ └── 4 ─ 1 + │ └── ... + └── ... + --- add_edge(3, 2) --- + ╙── 3 + ├── 1 + │ ├── 2 ─ 3 + │ │ └── 4 ─ 1 + │ └── ... + └── ... + --- add_edge(3, 4) --- + ╙── 1 + ├── 2 + │ ├── 3 ─ 1 + │ │ └── 4 ─ 1, 2 + │ └── ... + └── ... + --- add_edge(4, 1) --- + ╙── 1 + ├── 2 + │ ├── 3 ─ 1 + │ │ └── 4 ─ 1, 2 + │ └── ... + └── ... + --- add_edge(4, 2) --- + ╙── 1 + ├── 2 + │ ├── 3 ─ 1 + │ │ └── 4 ─ 1, 2 + │ └── ... + └── ... + --- add_edge(4, 3) --- + ╙── 1 + ├── 2 + │ ├── 3 ─ 1 + │ │ └── 4 ─ 1, 2 + │ └── ... + └── ... + """ + ).strip() + assert target == text + + +def test_write_network_text_iterative_add_random_directed_edges(): + """ + Walk through the cases going from a disconnected to fully connected graph + """ + + rng = random.Random(724466096) + graph = nx.DiGraph() + graph.add_nodes_from([1, 2, 3, 4, 5]) + possible_edges = list(product(graph.nodes, graph.nodes)) + rng.shuffle(possible_edges) + graph.add_edges_from(possible_edges[0:8]) + lines = [] + write = lines.append + write("--- initial state ---") + nx.write_network_text(graph, path=write, end="") + for i, j in possible_edges[8:12]: + write(f"--- add_edge({i}, {j}) ---") + graph.add_edge(i, j) + nx.write_network_text(graph, path=write, end="") + text = "\n".join(lines) + target = dedent( + """ + --- initial state --- + ╙── 3 ╾ 5 + └─╼ 2 ╾ 2 + ├─╼ 4 ╾ 4 + │ ├─╼ 5 + │ │ ├─╼ 1 ╾ 1 + │ │ │ └─╼ ... + │ │ └─╼ ... + │ └─╼ ... + └─╼ ... + --- add_edge(4, 1) --- + ╙── 3 ╾ 5 + └─╼ 2 ╾ 2 + ├─╼ 4 ╾ 4 + │ ├─╼ 5 + │ │ ├─╼ 1 ╾ 1, 4 + │ │ │ └─╼ ... + │ │ └─╼ ... + │ └─╼ ... + └─╼ ... + --- add_edge(2, 1) --- + ╙── 3 ╾ 5 + └─╼ 2 ╾ 2 + ├─╼ 4 ╾ 4 + │ ├─╼ 5 + │ │ ├─╼ 1 ╾ 1, 4, 2 + │ │ │ └─╼ ... + │ │ └─╼ ... + │ └─╼ ... + └─╼ ... + --- add_edge(5, 2) --- + ╙── 3 ╾ 5 + └─╼ 2 ╾ 2, 5 + ├─╼ 4 ╾ 4 + │ ├─╼ 5 + │ │ ├─╼ 1 ╾ 1, 4, 2 + │ │ │ └─╼ ... + │ │ └─╼ ... + │ └─╼ ... + └─╼ ... + --- add_edge(1, 5) --- + ╙── 3 ╾ 5 + └─╼ 2 ╾ 2, 5 + ├─╼ 4 ╾ 4 + │ ├─╼ 5 ╾ 1 + │ │ ├─╼ 1 ╾ 1, 4, 2 + │ │ │ └─╼ ... + │ │ └─╼ ... + │ └─╼ ... + └─╼ ... + + """ + ).strip() + assert target == text + + +def test_write_network_text_nearly_forest(): + g = nx.DiGraph() + g.add_edge(1, 2) + g.add_edge(1, 5) + g.add_edge(2, 3) + g.add_edge(3, 4) + g.add_edge(5, 6) + g.add_edge(6, 7) + g.add_edge(6, 8) + orig = g.copy() + g.add_edge(1, 8) # forward edge + g.add_edge(4, 2) # back edge + g.add_edge(6, 3) # cross edge + lines = [] + write = lines.append + write("--- directed case ---") + nx.write_network_text(orig, path=write, end="") + write("--- add (1, 8), (4, 2), (6, 3) ---") + nx.write_network_text(g, path=write, end="") + write("--- undirected case ---") + nx.write_network_text(orig.to_undirected(), path=write, sources=[1], end="") + write("--- add (1, 8), (4, 2), (6, 3) ---") + nx.write_network_text(g.to_undirected(), path=write, sources=[1], end="") + text = "\n".join(lines) + target = dedent( + """ + --- directed case --- + ╙── 1 + ├─╼ 2 + │ └─╼ 3 + │ └─╼ 4 + └─╼ 5 + └─╼ 6 + ├─╼ 7 + └─╼ 8 + --- add (1, 8), (4, 2), (6, 3) --- + ╙── 1 + ├─╼ 2 ╾ 4 + │ └─╼ 3 ╾ 6 + │ └─╼ 4 + │ └─╼ ... + ├─╼ 5 + │ └─╼ 6 + │ ├─╼ 7 + │ ├─╼ 8 ╾ 1 + │ └─╼ ... + └─╼ ... + --- undirected case --- + ╙── 1 + ├── 2 + │ └── 3 + │ └── 4 + └── 5 + └── 6 + ├── 7 + └── 8 + --- add (1, 8), (4, 2), (6, 3) --- + ╙── 1 + ├── 2 + │ ├── 3 + │ │ ├── 4 ─ 2 + │ │ └── 6 + │ │ ├── 5 ─ 1 + │ │ ├── 7 + │ │ └── 8 ─ 1 + │ └── ... + └── ... + """ + ).strip() + assert target == text + + +def test_write_network_text_complete_graph_ascii_only(): + graph = nx.generators.complete_graph(5, create_using=nx.DiGraph) + lines = [] + write = lines.append + write("--- directed case ---") + nx.write_network_text(graph, path=write, ascii_only=True, end="") + write("--- undirected case ---") + nx.write_network_text(graph.to_undirected(), path=write, ascii_only=True, end="") + text = "\n".join(lines) + target = dedent( + """ + --- directed case --- + +-- 0 <- 1, 2, 3, 4 + |-> 1 <- 2, 3, 4 + | |-> 2 <- 0, 3, 4 + | | |-> 3 <- 0, 1, 4 + | | | |-> 4 <- 0, 1, 2 + | | | | L-> ... + | | | L-> ... + | | L-> ... + | L-> ... + L-> ... + --- undirected case --- + +-- 0 + |-- 1 + | |-- 2 - 0 + | | |-- 3 - 0, 1 + | | | L-- 4 - 0, 1, 2 + | | L-- ... + | L-- ... + L-- ... + """ + ).strip() + assert target == text + + +def test_write_network_text_with_labels(): + graph = nx.generators.complete_graph(5, create_using=nx.DiGraph) + for n in graph.nodes: + graph.nodes[n]["label"] = f"Node(n={n})" + lines = [] + write = lines.append + nx.write_network_text(graph, path=write, with_labels=True, ascii_only=False, end="") + text = "\n".join(lines) + # Non trees with labels can get somewhat out of hand with network text + # because we need to immediately show every non-tree edge to the right + target = dedent( + """ + ╙── Node(n=0) ╾ Node(n=1), Node(n=2), Node(n=3), Node(n=4) + ├─╼ Node(n=1) ╾ Node(n=2), Node(n=3), Node(n=4) + │ ├─╼ Node(n=2) ╾ Node(n=0), Node(n=3), Node(n=4) + │ │ ├─╼ Node(n=3) ╾ Node(n=0), Node(n=1), Node(n=4) + │ │ │ ├─╼ Node(n=4) ╾ Node(n=0), Node(n=1), Node(n=2) + │ │ │ │ └─╼ ... + │ │ │ └─╼ ... + │ │ └─╼ ... + │ └─╼ ... + └─╼ ... + """ + ).strip() + assert target == text + + +def test_write_network_text_complete_graphs(): + lines = [] + write = lines.append + for k in [0, 1, 2, 3, 4, 5]: + g = nx.generators.complete_graph(k) + write(f"--- undirected k={k} ---") + nx.write_network_text(g, path=write, end="") + + for k in [0, 1, 2, 3, 4, 5]: + g = nx.generators.complete_graph(k, nx.DiGraph) + write(f"--- directed k={k} ---") + nx.write_network_text(g, path=write, end="") + text = "\n".join(lines) + target = dedent( + """ + --- undirected k=0 --- + ╙ + --- undirected k=1 --- + ╙── 0 + --- undirected k=2 --- + ╙── 0 + └── 1 + --- undirected k=3 --- + ╙── 0 + ├── 1 + │ └── 2 ─ 0 + └── ... + --- undirected k=4 --- + ╙── 0 + ├── 1 + │ ├── 2 ─ 0 + │ │ └── 3 ─ 0, 1 + │ └── ... + └── ... + --- undirected k=5 --- + ╙── 0 + ├── 1 + │ ├── 2 ─ 0 + │ │ ├── 3 ─ 0, 1 + │ │ │ └── 4 ─ 0, 1, 2 + │ │ └── ... + │ └── ... + └── ... + --- directed k=0 --- + ╙ + --- directed k=1 --- + ╙── 0 + --- directed k=2 --- + ╙── 0 ╾ 1 + └─╼ 1 + └─╼ ... + --- directed k=3 --- + ╙── 0 ╾ 1, 2 + ├─╼ 1 ╾ 2 + │ ├─╼ 2 ╾ 0 + │ │ └─╼ ... + │ └─╼ ... + └─╼ ... + --- directed k=4 --- + ╙── 0 ╾ 1, 2, 3 + ├─╼ 1 ╾ 2, 3 + │ ├─╼ 2 ╾ 0, 3 + │ │ ├─╼ 3 ╾ 0, 1 + │ │ │ └─╼ ... + │ │ └─╼ ... + │ └─╼ ... + └─╼ ... + --- directed k=5 --- + ╙── 0 ╾ 1, 2, 3, 4 + ├─╼ 1 ╾ 2, 3, 4 + │ ├─╼ 2 ╾ 0, 3, 4 + │ │ ├─╼ 3 ╾ 0, 1, 4 + │ │ │ ├─╼ 4 ╾ 0, 1, 2 + │ │ │ │ └─╼ ... + │ │ │ └─╼ ... + │ │ └─╼ ... + │ └─╼ ... + └─╼ ... + """ + ).strip() + assert target == text + + +def test_write_network_text_multiple_sources(): + g = nx.DiGraph() + g.add_edge(1, 2) + g.add_edge(1, 3) + g.add_edge(2, 4) + g.add_edge(3, 5) + g.add_edge(3, 6) + g.add_edge(5, 4) + g.add_edge(4, 1) + g.add_edge(1, 5) + lines = [] + write = lines.append + # Use each node as the starting point to demonstrate how the representation + # changes. + nodes = sorted(g.nodes()) + for n in nodes: + write(f"--- source node: {n} ---") + nx.write_network_text(g, path=write, sources=[n], end="") + text = "\n".join(lines) + target = dedent( + """ + --- source node: 1 --- + ╙── 1 ╾ 4 + ├─╼ 2 + │ └─╼ 4 ╾ 5 + │ └─╼ ... + ├─╼ 3 + │ ├─╼ 5 ╾ 1 + │ │ └─╼ ... + │ └─╼ 6 + └─╼ ... + --- source node: 2 --- + ╙── 2 ╾ 1 + └─╼ 4 ╾ 5 + └─╼ 1 + ├─╼ 3 + │ ├─╼ 5 ╾ 1 + │ │ └─╼ ... + │ └─╼ 6 + └─╼ ... + --- source node: 3 --- + ╙── 3 ╾ 1 + ├─╼ 5 ╾ 1 + │ └─╼ 4 ╾ 2 + │ └─╼ 1 + │ ├─╼ 2 + │ │ └─╼ ... + │ └─╼ ... + └─╼ 6 + --- source node: 4 --- + ╙── 4 ╾ 2, 5 + └─╼ 1 + ├─╼ 2 + │ └─╼ ... + ├─╼ 3 + │ ├─╼ 5 ╾ 1 + │ │ └─╼ ... + │ └─╼ 6 + └─╼ ... + --- source node: 5 --- + ╙── 5 ╾ 3, 1 + └─╼ 4 ╾ 2 + └─╼ 1 + ├─╼ 2 + │ └─╼ ... + ├─╼ 3 + │ ├─╼ 6 + │ └─╼ ... + └─╼ ... + --- source node: 6 --- + ╙── 6 ╾ 3 + """ + ).strip() + assert target == text + + +def test_write_network_text_star_graph(): + graph = nx.star_graph(5, create_using=nx.Graph) + lines = [] + write = lines.append + nx.write_network_text(graph, path=write, end="") + text = "\n".join(lines) + target = dedent( + """ + ╙── 1 + └── 0 + ├── 2 + ├── 3 + ├── 4 + └── 5 + """ + ).strip() + assert target == text + + +def test_write_network_text_path_graph(): + graph = nx.path_graph(3, create_using=nx.Graph) + lines = [] + write = lines.append + nx.write_network_text(graph, path=write, end="") + text = "\n".join(lines) + target = dedent( + """ + ╙── 0 + └── 1 + └── 2 + """ + ).strip() + assert target == text + + +def test_write_network_text_lollipop_graph(): + graph = nx.lollipop_graph(4, 2, create_using=nx.Graph) + lines = [] + write = lines.append + nx.write_network_text(graph, path=write, end="") + text = "\n".join(lines) + target = dedent( + """ + ╙── 5 + └── 4 + └── 3 + ├── 0 + │ ├── 1 ─ 3 + │ │ └── 2 ─ 0, 3 + │ └── ... + └── ... + """ + ).strip() + assert target == text + + +def test_write_network_text_wheel_graph(): + graph = nx.wheel_graph(7, create_using=nx.Graph) + lines = [] + write = lines.append + nx.write_network_text(graph, path=write, end="") + text = "\n".join(lines) + target = dedent( + """ + ╙── 1 + ├── 0 + │ ├── 2 ─ 1 + │ │ └── 3 ─ 0 + │ │ └── 4 ─ 0 + │ │ └── 5 ─ 0 + │ │ └── 6 ─ 0, 1 + │ └── ... + └── ... + """ + ).strip() + assert target == text + + +def test_write_network_text_circular_ladder_graph(): + graph = nx.circular_ladder_graph(4, create_using=nx.Graph) + lines = [] + write = lines.append + nx.write_network_text(graph, path=write, end="") + text = "\n".join(lines) + target = dedent( + """ + ╙── 0 + ├── 1 + │ ├── 2 + │ │ ├── 3 ─ 0 + │ │ │ └── 7 + │ │ │ ├── 6 ─ 2 + │ │ │ │ └── 5 ─ 1 + │ │ │ │ └── 4 ─ 0, 7 + │ │ │ └── ... + │ │ └── ... + │ └── ... + └── ... + """ + ).strip() + assert target == text + + +def test_write_network_text_dorogovtsev_goltsev_mendes_graph(): + graph = nx.dorogovtsev_goltsev_mendes_graph(4, create_using=nx.Graph) + lines = [] + write = lines.append + nx.write_network_text(graph, path=write, end="") + text = "\n".join(lines) + target = dedent( + """ + ╙── 15 + ├── 0 + │ ├── 1 ─ 15 + │ │ ├── 2 ─ 0 + │ │ │ ├── 4 ─ 0 + │ │ │ │ ├── 9 ─ 0 + │ │ │ │ │ ├── 22 ─ 0 + │ │ │ │ │ └── 38 ─ 4 + │ │ │ │ ├── 13 ─ 2 + │ │ │ │ │ ├── 34 ─ 2 + │ │ │ │ │ └── 39 ─ 4 + │ │ │ │ ├── 18 ─ 0 + │ │ │ │ ├── 30 ─ 2 + │ │ │ │ └── ... + │ │ │ ├── 5 ─ 1 + │ │ │ │ ├── 12 ─ 1 + │ │ │ │ │ ├── 29 ─ 1 + │ │ │ │ │ └── 40 ─ 5 + │ │ │ │ ├── 14 ─ 2 + │ │ │ │ │ ├── 35 ─ 2 + │ │ │ │ │ └── 41 ─ 5 + │ │ │ │ ├── 25 ─ 1 + │ │ │ │ ├── 31 ─ 2 + │ │ │ │ └── ... + │ │ │ ├── 7 ─ 0 + │ │ │ │ ├── 20 ─ 0 + │ │ │ │ └── 32 ─ 2 + │ │ │ ├── 10 ─ 1 + │ │ │ │ ├── 27 ─ 1 + │ │ │ │ └── 33 ─ 2 + │ │ │ ├── 16 ─ 0 + │ │ │ ├── 23 ─ 1 + │ │ │ └── ... + │ │ ├── 3 ─ 0 + │ │ │ ├── 8 ─ 0 + │ │ │ │ ├── 21 ─ 0 + │ │ │ │ └── 36 ─ 3 + │ │ │ ├── 11 ─ 1 + │ │ │ │ ├── 28 ─ 1 + │ │ │ │ └── 37 ─ 3 + │ │ │ ├── 17 ─ 0 + │ │ │ ├── 24 ─ 1 + │ │ │ └── ... + │ │ ├── 6 ─ 0 + │ │ │ ├── 19 ─ 0 + │ │ │ └── 26 ─ 1 + │ │ └── ... + │ └── ... + └── ... + """ + ).strip() + assert target == text + + +def test_write_network_text_tree_max_depth(): + orig = nx.balanced_tree(r=1, h=3, create_using=nx.DiGraph) + lines = [] + write = lines.append + write("--- directed case, max_depth=0 ---") + nx.write_network_text(orig, path=write, end="", max_depth=0) + write("--- directed case, max_depth=1 ---") + nx.write_network_text(orig, path=write, end="", max_depth=1) + write("--- directed case, max_depth=2 ---") + nx.write_network_text(orig, path=write, end="", max_depth=2) + write("--- directed case, max_depth=3 ---") + nx.write_network_text(orig, path=write, end="", max_depth=3) + write("--- directed case, max_depth=4 ---") + nx.write_network_text(orig, path=write, end="", max_depth=4) + write("--- undirected case, max_depth=0 ---") + nx.write_network_text(orig.to_undirected(), path=write, end="", max_depth=0) + write("--- undirected case, max_depth=1 ---") + nx.write_network_text(orig.to_undirected(), path=write, end="", max_depth=1) + write("--- undirected case, max_depth=2 ---") + nx.write_network_text(orig.to_undirected(), path=write, end="", max_depth=2) + write("--- undirected case, max_depth=3 ---") + nx.write_network_text(orig.to_undirected(), path=write, end="", max_depth=3) + write("--- undirected case, max_depth=4 ---") + nx.write_network_text(orig.to_undirected(), path=write, end="", max_depth=4) + text = "\n".join(lines) + target = dedent( + """ + --- directed case, max_depth=0 --- + ╙ ... + --- directed case, max_depth=1 --- + ╙── 0 + └─╼ ... + --- directed case, max_depth=2 --- + ╙── 0 + └─╼ 1 + └─╼ ... + --- directed case, max_depth=3 --- + ╙── 0 + └─╼ 1 + └─╼ 2 + └─╼ ... + --- directed case, max_depth=4 --- + ╙── 0 + └─╼ 1 + └─╼ 2 + └─╼ 3 + --- undirected case, max_depth=0 --- + ╙ ... + --- undirected case, max_depth=1 --- + ╙── 0 ─ 1 + └── ... + --- undirected case, max_depth=2 --- + ╙── 0 + └── 1 ─ 2 + └── ... + --- undirected case, max_depth=3 --- + ╙── 0 + └── 1 + └── 2 ─ 3 + └── ... + --- undirected case, max_depth=4 --- + ╙── 0 + └── 1 + └── 2 + └── 3 + """ + ).strip() + assert target == text + + +def test_write_network_text_graph_max_depth(): + orig = nx.erdos_renyi_graph(10, 0.15, directed=True, seed=40392) + lines = [] + write = lines.append + write("--- directed case, max_depth=None ---") + nx.write_network_text(orig, path=write, end="", max_depth=None) + write("--- directed case, max_depth=0 ---") + nx.write_network_text(orig, path=write, end="", max_depth=0) + write("--- directed case, max_depth=1 ---") + nx.write_network_text(orig, path=write, end="", max_depth=1) + write("--- directed case, max_depth=2 ---") + nx.write_network_text(orig, path=write, end="", max_depth=2) + write("--- directed case, max_depth=3 ---") + nx.write_network_text(orig, path=write, end="", max_depth=3) + write("--- undirected case, max_depth=None ---") + nx.write_network_text(orig.to_undirected(), path=write, end="", max_depth=None) + write("--- undirected case, max_depth=0 ---") + nx.write_network_text(orig.to_undirected(), path=write, end="", max_depth=0) + write("--- undirected case, max_depth=1 ---") + nx.write_network_text(orig.to_undirected(), path=write, end="", max_depth=1) + write("--- undirected case, max_depth=2 ---") + nx.write_network_text(orig.to_undirected(), path=write, end="", max_depth=2) + write("--- undirected case, max_depth=3 ---") + nx.write_network_text(orig.to_undirected(), path=write, end="", max_depth=3) + text = "\n".join(lines) + target = dedent( + """ + --- directed case, max_depth=None --- + ╟── 4 + ╎ ├─╼ 0 ╾ 3 + ╎ ├─╼ 5 ╾ 7 + ╎ │ └─╼ 3 + ╎ │ ├─╼ 1 ╾ 9 + ╎ │ │ └─╼ 9 ╾ 6 + ╎ │ │ ├─╼ 6 + ╎ │ │ │ └─╼ ... + ╎ │ │ ├─╼ 7 ╾ 4 + ╎ │ │ │ ├─╼ 2 + ╎ │ │ │ └─╼ ... + ╎ │ │ └─╼ ... + ╎ │ └─╼ ... + ╎ └─╼ ... + ╙── 8 + --- directed case, max_depth=0 --- + ╙ ... + --- directed case, max_depth=1 --- + ╟── 4 + ╎ └─╼ ... + ╙── 8 + --- directed case, max_depth=2 --- + ╟── 4 + ╎ ├─╼ 0 ╾ 3 + ╎ ├─╼ 5 ╾ 7 + ╎ │ └─╼ ... + ╎ └─╼ 7 ╾ 9 + ╎ └─╼ ... + ╙── 8 + --- directed case, max_depth=3 --- + ╟── 4 + ╎ ├─╼ 0 ╾ 3 + ╎ ├─╼ 5 ╾ 7 + ╎ │ └─╼ 3 + ╎ │ └─╼ ... + ╎ └─╼ 7 ╾ 9 + ╎ ├─╼ 2 + ╎ └─╼ ... + ╙── 8 + --- undirected case, max_depth=None --- + ╟── 8 + ╙── 2 + └── 7 + ├── 4 + │ ├── 0 + │ │ └── 3 + │ │ ├── 1 + │ │ │ └── 9 ─ 7 + │ │ │ └── 6 + │ │ └── 5 ─ 4, 7 + │ └── ... + └── ... + --- undirected case, max_depth=0 --- + ╙ ... + --- undirected case, max_depth=1 --- + ╟── 8 + ╙── 2 ─ 7 + └── ... + --- undirected case, max_depth=2 --- + ╟── 8 + ╙── 2 + └── 7 ─ 4, 5, 9 + └── ... + --- undirected case, max_depth=3 --- + ╟── 8 + ╙── 2 + └── 7 + ├── 4 ─ 0, 5 + │ └── ... + ├── 5 ─ 4, 3 + │ └── ... + └── 9 ─ 1, 6 + └── ... + """ + ).strip() + assert target == text + + +def test_write_network_text_clique_max_depth(): + orig = nx.complete_graph(5, nx.DiGraph) + lines = [] + write = lines.append + write("--- directed case, max_depth=None ---") + nx.write_network_text(orig, path=write, end="", max_depth=None) + write("--- directed case, max_depth=0 ---") + nx.write_network_text(orig, path=write, end="", max_depth=0) + write("--- directed case, max_depth=1 ---") + nx.write_network_text(orig, path=write, end="", max_depth=1) + write("--- directed case, max_depth=2 ---") + nx.write_network_text(orig, path=write, end="", max_depth=2) + write("--- directed case, max_depth=3 ---") + nx.write_network_text(orig, path=write, end="", max_depth=3) + write("--- undirected case, max_depth=None ---") + nx.write_network_text(orig.to_undirected(), path=write, end="", max_depth=None) + write("--- undirected case, max_depth=0 ---") + nx.write_network_text(orig.to_undirected(), path=write, end="", max_depth=0) + write("--- undirected case, max_depth=1 ---") + nx.write_network_text(orig.to_undirected(), path=write, end="", max_depth=1) + write("--- undirected case, max_depth=2 ---") + nx.write_network_text(orig.to_undirected(), path=write, end="", max_depth=2) + write("--- undirected case, max_depth=3 ---") + nx.write_network_text(orig.to_undirected(), path=write, end="", max_depth=3) + text = "\n".join(lines) + target = dedent( + """ + --- directed case, max_depth=None --- + ╙── 0 ╾ 1, 2, 3, 4 + ├─╼ 1 ╾ 2, 3, 4 + │ ├─╼ 2 ╾ 0, 3, 4 + │ │ ├─╼ 3 ╾ 0, 1, 4 + │ │ │ ├─╼ 4 ╾ 0, 1, 2 + │ │ │ │ └─╼ ... + │ │ │ └─╼ ... + │ │ └─╼ ... + │ └─╼ ... + └─╼ ... + --- directed case, max_depth=0 --- + ╙ ... + --- directed case, max_depth=1 --- + ╙── 0 ╾ 1, 2, 3, 4 + └─╼ ... + --- directed case, max_depth=2 --- + ╙── 0 ╾ 1, 2, 3, 4 + ├─╼ 1 ╾ 2, 3, 4 + │ └─╼ ... + ├─╼ 2 ╾ 1, 3, 4 + │ └─╼ ... + ├─╼ 3 ╾ 1, 2, 4 + │ └─╼ ... + └─╼ 4 ╾ 1, 2, 3 + └─╼ ... + --- directed case, max_depth=3 --- + ╙── 0 ╾ 1, 2, 3, 4 + ├─╼ 1 ╾ 2, 3, 4 + │ ├─╼ 2 ╾ 0, 3, 4 + │ │ └─╼ ... + │ ├─╼ 3 ╾ 0, 2, 4 + │ │ └─╼ ... + │ ├─╼ 4 ╾ 0, 2, 3 + │ │ └─╼ ... + │ └─╼ ... + └─╼ ... + --- undirected case, max_depth=None --- + ╙── 0 + ├── 1 + │ ├── 2 ─ 0 + │ │ ├── 3 ─ 0, 1 + │ │ │ └── 4 ─ 0, 1, 2 + │ │ └── ... + │ └── ... + └── ... + --- undirected case, max_depth=0 --- + ╙ ... + --- undirected case, max_depth=1 --- + ╙── 0 ─ 1, 2, 3, 4 + └── ... + --- undirected case, max_depth=2 --- + ╙── 0 + ├── 1 ─ 2, 3, 4 + │ └── ... + ├── 2 ─ 1, 3, 4 + │ └── ... + ├── 3 ─ 1, 2, 4 + │ └── ... + └── 4 ─ 1, 2, 3 + --- undirected case, max_depth=3 --- + ╙── 0 + ├── 1 + │ ├── 2 ─ 0, 3, 4 + │ │ └── ... + │ ├── 3 ─ 0, 2, 4 + │ │ └── ... + │ └── 4 ─ 0, 2, 3 + └── ... + """ + ).strip() + assert target == text + + +def test_write_network_text_custom_label(): + # Create a directed forest with labels + graph = nx.erdos_renyi_graph(5, 0.4, directed=True, seed=359222358) + for node in graph.nodes: + graph.nodes[node]["label"] = f"Node({node})" + graph.nodes[node]["chr"] = chr(node + ord("a") - 1) + if node % 2 == 0: + graph.nodes[node]["part"] = chr(node + ord("a")) + + lines = [] + write = lines.append + write("--- when with_labels=True, uses the 'label' attr ---") + nx.write_network_text(graph, path=write, with_labels=True, end="", max_depth=None) + write("--- when with_labels=False, uses str(node) value ---") + nx.write_network_text(graph, path=write, with_labels=False, end="", max_depth=None) + write("--- when with_labels is a string, use that attr ---") + nx.write_network_text(graph, path=write, with_labels="chr", end="", max_depth=None) + write("--- fallback to str(node) when the attr does not exist ---") + nx.write_network_text(graph, path=write, with_labels="part", end="", max_depth=None) + + text = "\n".join(lines) + target = dedent( + """ + --- when with_labels=True, uses the 'label' attr --- + ╙── Node(1) + └─╼ Node(3) ╾ Node(2) + ├─╼ Node(0) + │ ├─╼ Node(2) ╾ Node(3), Node(4) + │ │ └─╼ ... + │ └─╼ Node(4) + │ └─╼ ... + └─╼ ... + --- when with_labels=False, uses str(node) value --- + ╙── 1 + └─╼ 3 ╾ 2 + ├─╼ 0 + │ ├─╼ 2 ╾ 3, 4 + │ │ └─╼ ... + │ └─╼ 4 + │ └─╼ ... + └─╼ ... + --- when with_labels is a string, use that attr --- + ╙── a + └─╼ c ╾ b + ├─╼ ` + │ ├─╼ b ╾ c, d + │ │ └─╼ ... + │ └─╼ d + │ └─╼ ... + └─╼ ... + --- fallback to str(node) when the attr does not exist --- + ╙── 1 + └─╼ 3 ╾ c + ├─╼ a + │ ├─╼ c ╾ 3, e + │ │ └─╼ ... + │ └─╼ e + │ └─╼ ... + └─╼ ... + """ + ).strip() + assert target == text + + +def test_write_network_text_vertical_chains(): + graph1 = nx.lollipop_graph(4, 2, create_using=nx.Graph) + graph1.add_edge(0, -1) + graph1.add_edge(-1, -2) + graph1.add_edge(-2, -3) + + graph2 = graph1.to_directed() + graph2.remove_edges_from([(u, v) for u, v in graph2.edges if v > u]) + + lines = [] + write = lines.append + write("--- Undirected UTF ---") + nx.write_network_text(graph1, path=write, end="", vertical_chains=True) + write("--- Undirected ASCI ---") + nx.write_network_text( + graph1, path=write, end="", vertical_chains=True, ascii_only=True + ) + write("--- Directed UTF ---") + nx.write_network_text(graph2, path=write, end="", vertical_chains=True) + write("--- Directed ASCI ---") + nx.write_network_text( + graph2, path=write, end="", vertical_chains=True, ascii_only=True + ) + + text = "\n".join(lines) + target = dedent( + """ + --- Undirected UTF --- + ╙── 5 + │ + 4 + │ + 3 + ├── 0 + │ ├── 1 ─ 3 + │ │ │ + │ │ 2 ─ 0, 3 + │ ├── -1 + │ │ │ + │ │ -2 + │ │ │ + │ │ -3 + │ └── ... + └── ... + --- Undirected ASCI --- + +-- 5 + | + 4 + | + 3 + |-- 0 + | |-- 1 - 3 + | | | + | | 2 - 0, 3 + | |-- -1 + | | | + | | -2 + | | | + | | -3 + | L-- ... + L-- ... + --- Directed UTF --- + ╙── 5 + ╽ + 4 + ╽ + 3 + ├─╼ 0 ╾ 1, 2 + │ ╽ + │ -1 + │ ╽ + │ -2 + │ ╽ + │ -3 + ├─╼ 1 ╾ 2 + │ └─╼ ... + └─╼ 2 + └─╼ ... + --- Directed ASCI --- + +-- 5 + ! + 4 + ! + 3 + |-> 0 <- 1, 2 + | ! + | -1 + | ! + | -2 + | ! + | -3 + |-> 1 <- 2 + | L-> ... + L-> 2 + L-> ... + """ + ).strip() + assert target == text + + +def test_collapse_directed(): + graph = nx.balanced_tree(r=2, h=3, create_using=nx.DiGraph) + lines = [] + write = lines.append + write("--- Original ---") + nx.write_network_text(graph, path=write, end="") + graph.nodes[1]["collapse"] = True + write("--- Collapse Node 1 ---") + nx.write_network_text(graph, path=write, end="") + write("--- Add alternate path (5, 3) to collapsed zone") + graph.add_edge(5, 3) + nx.write_network_text(graph, path=write, end="") + write("--- Collapse Node 0 ---") + graph.nodes[0]["collapse"] = True + nx.write_network_text(graph, path=write, end="") + text = "\n".join(lines) + target = dedent( + """ + --- Original --- + ╙── 0 + ├─╼ 1 + │ ├─╼ 3 + │ │ ├─╼ 7 + │ │ └─╼ 8 + │ └─╼ 4 + │ ├─╼ 9 + │ └─╼ 10 + └─╼ 2 + ├─╼ 5 + │ ├─╼ 11 + │ └─╼ 12 + └─╼ 6 + ├─╼ 13 + └─╼ 14 + --- Collapse Node 1 --- + ╙── 0 + ├─╼ 1 + │ └─╼ ... + └─╼ 2 + ├─╼ 5 + │ ├─╼ 11 + │ └─╼ 12 + └─╼ 6 + ├─╼ 13 + └─╼ 14 + --- Add alternate path (5, 3) to collapsed zone + ╙── 0 + ├─╼ 1 + │ └─╼ ... + └─╼ 2 + ├─╼ 5 + │ ├─╼ 11 + │ ├─╼ 12 + │ └─╼ 3 ╾ 1 + │ ├─╼ 7 + │ └─╼ 8 + └─╼ 6 + ├─╼ 13 + └─╼ 14 + --- Collapse Node 0 --- + ╙── 0 + └─╼ ... + """ + ).strip() + assert target == text + + +def test_collapse_undirected(): + graph = nx.balanced_tree(r=2, h=3, create_using=nx.Graph) + lines = [] + write = lines.append + write("--- Original ---") + nx.write_network_text(graph, path=write, end="", sources=[0]) + graph.nodes[1]["collapse"] = True + write("--- Collapse Node 1 ---") + nx.write_network_text(graph, path=write, end="", sources=[0]) + write("--- Add alternate path (5, 3) to collapsed zone") + graph.add_edge(5, 3) + nx.write_network_text(graph, path=write, end="", sources=[0]) + write("--- Collapse Node 0 ---") + graph.nodes[0]["collapse"] = True + nx.write_network_text(graph, path=write, end="", sources=[0]) + text = "\n".join(lines) + target = dedent( + """ + --- Original --- + ╙── 0 + ├── 1 + │ ├── 3 + │ │ ├── 7 + │ │ └── 8 + │ └── 4 + │ ├── 9 + │ └── 10 + └── 2 + ├── 5 + │ ├── 11 + │ └── 12 + └── 6 + ├── 13 + └── 14 + --- Collapse Node 1 --- + ╙── 0 + ├── 1 ─ 3, 4 + │ └── ... + └── 2 + ├── 5 + │ ├── 11 + │ └── 12 + └── 6 + ├── 13 + └── 14 + --- Add alternate path (5, 3) to collapsed zone + ╙── 0 + ├── 1 ─ 3, 4 + │ └── ... + └── 2 + ├── 5 + │ ├── 11 + │ ├── 12 + │ └── 3 ─ 1 + │ ├── 7 + │ └── 8 + └── 6 + ├── 13 + └── 14 + --- Collapse Node 0 --- + ╙── 0 ─ 1, 2 + └── ... + """ + ).strip() + assert target == text + + +def generate_test_graphs(): + """ + Generate a gauntlet of different test graphs with different properties + """ + import random + + rng = random.Random(976689776) + num_randomized = 3 + + for directed in [0, 1]: + cls = nx.DiGraph if directed else nx.Graph + + for num_nodes in range(17): + # Disconnected graph + graph = cls() + graph.add_nodes_from(range(num_nodes)) + yield graph + + # Randomize graphs + if num_nodes > 0: + for p in [0.1, 0.3, 0.5, 0.7, 0.9]: + for seed in range(num_randomized): + graph = nx.erdos_renyi_graph( + num_nodes, p, directed=directed, seed=rng + ) + yield graph + + yield nx.complete_graph(num_nodes, cls) + + yield nx.path_graph(3, create_using=cls) + yield nx.balanced_tree(r=1, h=3, create_using=cls) + if not directed: + yield nx.circular_ladder_graph(4, create_using=cls) + yield nx.star_graph(5, create_using=cls) + yield nx.lollipop_graph(4, 2, create_using=cls) + yield nx.wheel_graph(7, create_using=cls) + yield nx.dorogovtsev_goltsev_mendes_graph(4, create_using=cls) + + +@pytest.mark.parametrize( + ("vertical_chains", "ascii_only"), + tuple( + [ + (vertical_chains, ascii_only) + for vertical_chains in [0, 1] + for ascii_only in [0, 1] + ] + ), +) +def test_network_text_round_trip(vertical_chains, ascii_only): + """ + Write the graph to network text format, then parse it back in, assert it is + the same as the original graph. Passing this test is strong validation of + both the format generator and parser. + """ + from networkx.readwrite.text import _parse_network_text + + for graph in generate_test_graphs(): + graph = nx.relabel_nodes(graph, {n: str(n) for n in graph.nodes}) + lines = list( + nx.generate_network_text( + graph, vertical_chains=vertical_chains, ascii_only=ascii_only + ) + ) + new = _parse_network_text(lines) + try: + assert new.nodes == graph.nodes + assert new.edges == graph.edges + except Exception: + nx.write_network_text(graph) + raise diff --git a/.venv/lib/python3.12/site-packages/networkx/readwrite/text.py b/.venv/lib/python3.12/site-packages/networkx/readwrite/text.py new file mode 100644 index 0000000..6fce220 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/readwrite/text.py @@ -0,0 +1,851 @@ +""" +Text-based visual representations of graphs +""" + +import sys +from collections import defaultdict + +import networkx as nx +from networkx.utils import open_file + +__all__ = ["generate_network_text", "write_network_text"] + + +class BaseGlyphs: + @classmethod + def as_dict(cls): + return { + a: getattr(cls, a) + for a in dir(cls) + if not a.startswith("_") and a != "as_dict" + } + + +class AsciiBaseGlyphs(BaseGlyphs): + empty: str = "+" + newtree_last: str = "+-- " + newtree_mid: str = "+-- " + endof_forest: str = " " + within_forest: str = ": " + within_tree: str = "| " + + +class AsciiDirectedGlyphs(AsciiBaseGlyphs): + last: str = "L-> " + mid: str = "|-> " + backedge: str = "<-" + vertical_edge: str = "!" + + +class AsciiUndirectedGlyphs(AsciiBaseGlyphs): + last: str = "L-- " + mid: str = "|-- " + backedge: str = "-" + vertical_edge: str = "|" + + +class UtfBaseGlyphs(BaseGlyphs): + # Notes on available box and arrow characters + # https://en.wikipedia.org/wiki/Box-drawing_character + # https://stackoverflow.com/questions/2701192/triangle-arrow + empty: str = "╙" + newtree_last: str = "╙── " + newtree_mid: str = "╟── " + endof_forest: str = " " + within_forest: str = "╎ " + within_tree: str = "│ " + + +class UtfDirectedGlyphs(UtfBaseGlyphs): + last: str = "└─╼ " + mid: str = "├─╼ " + backedge: str = "╾" + vertical_edge: str = "╽" + + +class UtfUndirectedGlyphs(UtfBaseGlyphs): + last: str = "└── " + mid: str = "├── " + backedge: str = "─" + vertical_edge: str = "│" + + +def generate_network_text( + graph, + with_labels=True, + sources=None, + max_depth=None, + ascii_only=False, + vertical_chains=False, +): + """Generate lines in the "network text" format + + This works via a depth-first traversal of the graph and writing a line for + each unique node encountered. Non-tree edges are written to the right of + each node, and connection to a non-tree edge is indicated with an ellipsis. + This representation works best when the input graph is a forest, but any + graph can be represented. + + This notation is original to networkx, although it is simple enough that it + may be known in existing literature. See #5602 for details. The procedure + is summarized as follows: + + 1. Given a set of source nodes (which can be specified, or automatically + discovered via finding the (strongly) connected components and choosing one + node with minimum degree from each), we traverse the graph in depth first + order. + + 2. Each reachable node will be printed exactly once on it's own line. + + 3. Edges are indicated in one of four ways: + + a. a parent "L-style" connection on the upper left. This corresponds to + a traversal in the directed DFS tree. + + b. a backref "<-style" connection shown directly on the right. For + directed graphs, these are drawn for any incoming edges to a node that + is not a parent edge. For undirected graphs, these are drawn for only + the non-parent edges that have already been represented (The edges that + have not been represented will be handled in the recursive case). + + c. a child "L-style" connection on the lower right. Drawing of the + children are handled recursively. + + d. if ``vertical_chains`` is true, and a parent node only has one child + a "vertical-style" edge is drawn between them. + + 4. The children of each node (wrt the directed DFS tree) are drawn + underneath and to the right of it. In the case that a child node has already + been drawn the connection is replaced with an ellipsis ("...") to indicate + that there is one or more connections represented elsewhere. + + 5. If a maximum depth is specified, an edge to nodes past this maximum + depth will be represented by an ellipsis. + + 6. If a node has a truthy "collapse" value, then we do not traverse past + that node. + + Parameters + ---------- + graph : nx.DiGraph | nx.Graph + Graph to represent + + with_labels : bool | str + If True will use the "label" attribute of a node to display if it + exists otherwise it will use the node value itself. If given as a + string, then that attribute name will be used instead of "label". + Defaults to True. + + sources : List + Specifies which nodes to start traversal from. Note: nodes that are not + reachable from one of these sources may not be shown. If unspecified, + the minimal set of nodes needed to reach all others will be used. + + max_depth : int | None + The maximum depth to traverse before stopping. Defaults to None. + + ascii_only : Boolean + If True only ASCII characters are used to construct the visualization + + vertical_chains : Boolean + If True, chains of nodes will be drawn vertically when possible. + + Yields + ------ + str : a line of generated text + + Examples + -------- + >>> graph = nx.path_graph(10) + >>> graph.add_node("A") + >>> graph.add_node("B") + >>> graph.add_node("C") + >>> graph.add_node("D") + >>> graph.add_edge(9, "A") + >>> graph.add_edge(9, "B") + >>> graph.add_edge(9, "C") + >>> graph.add_edge("C", "D") + >>> graph.add_edge("C", "E") + >>> graph.add_edge("C", "F") + >>> nx.write_network_text(graph) + ╙── 0 + └── 1 + └── 2 + └── 3 + └── 4 + └── 5 + └── 6 + └── 7 + └── 8 + └── 9 + ├── A + ├── B + └── C + ├── D + ├── E + └── F + >>> nx.write_network_text(graph, vertical_chains=True) + ╙── 0 + │ + 1 + │ + 2 + │ + 3 + │ + 4 + │ + 5 + │ + 6 + │ + 7 + │ + 8 + │ + 9 + ├── A + ├── B + └── C + ├── D + ├── E + └── F + """ + from typing import Any, NamedTuple + + class StackFrame(NamedTuple): + parent: Any + node: Any + indents: list + this_islast: bool + this_vertical: bool + + collapse_attr = "collapse" + + is_directed = graph.is_directed() + + if is_directed: + glyphs = AsciiDirectedGlyphs if ascii_only else UtfDirectedGlyphs + succ = graph.succ + pred = graph.pred + else: + glyphs = AsciiUndirectedGlyphs if ascii_only else UtfUndirectedGlyphs + succ = graph.adj + pred = graph.adj + + if isinstance(with_labels, str): + label_attr = with_labels + elif with_labels: + label_attr = "label" + else: + label_attr = None + + if max_depth == 0: + yield glyphs.empty + " ..." + elif len(graph.nodes) == 0: + yield glyphs.empty + else: + # If the nodes to traverse are unspecified, find the minimal set of + # nodes that will reach the entire graph + if sources is None: + sources = _find_sources(graph) + + # Populate the stack with each: + # 1. parent node in the DFS tree (or None for root nodes), + # 2. the current node in the DFS tree + # 2. a list of indentations indicating depth + # 3. a flag indicating if the node is the final one to be written. + # Reverse the stack so sources are popped in the correct order. + last_idx = len(sources) - 1 + stack = [ + StackFrame(None, node, [], (idx == last_idx), False) + for idx, node in enumerate(sources) + ][::-1] + + num_skipped_children = defaultdict(lambda: 0) + seen_nodes = set() + while stack: + parent, node, indents, this_islast, this_vertical = stack.pop() + + if node is not Ellipsis: + skip = node in seen_nodes + if skip: + # Mark that we skipped a parent's child + num_skipped_children[parent] += 1 + + if this_islast: + # If we reached the last child of a parent, and we skipped + # any of that parents children, then we should emit an + # ellipsis at the end after this. + if num_skipped_children[parent] and parent is not None: + # Append the ellipsis to be emitted last + next_islast = True + try_frame = StackFrame( + node, Ellipsis, indents, next_islast, False + ) + stack.append(try_frame) + + # Redo this frame, but not as a last object + next_islast = False + try_frame = StackFrame( + parent, node, indents, next_islast, this_vertical + ) + stack.append(try_frame) + continue + + if skip: + continue + seen_nodes.add(node) + + if not indents: + # Top level items (i.e. trees in the forest) get different + # glyphs to indicate they are not actually connected + if this_islast: + this_vertical = False + this_prefix = indents + [glyphs.newtree_last] + next_prefix = indents + [glyphs.endof_forest] + else: + this_prefix = indents + [glyphs.newtree_mid] + next_prefix = indents + [glyphs.within_forest] + + else: + # Non-top-level items + if this_vertical: + this_prefix = indents + next_prefix = indents + else: + if this_islast: + this_prefix = indents + [glyphs.last] + next_prefix = indents + [glyphs.endof_forest] + else: + this_prefix = indents + [glyphs.mid] + next_prefix = indents + [glyphs.within_tree] + + if node is Ellipsis: + label = " ..." + suffix = "" + children = [] + else: + if label_attr is not None: + label = str(graph.nodes[node].get(label_attr, node)) + else: + label = str(node) + + # Determine if we want to show the children of this node. + if collapse_attr is not None: + collapse = graph.nodes[node].get(collapse_attr, False) + else: + collapse = False + + # Determine: + # (1) children to traverse into after showing this node. + # (2) parents to immediately show to the right of this node. + if is_directed: + # In the directed case we must show every successor node + # note: it may be skipped later, but we don't have that + # information here. + children = list(succ[node]) + # In the directed case we must show every predecessor + # except for parent we directly traversed from. + handled_parents = {parent} + else: + # Showing only the unseen children results in a more + # concise representation for the undirected case. + children = [ + child for child in succ[node] if child not in seen_nodes + ] + + # In the undirected case, parents are also children, so we + # only need to immediately show the ones we can no longer + # traverse + handled_parents = {*children, parent} + + if max_depth is not None and len(indents) == max_depth - 1: + # Use ellipsis to indicate we have reached maximum depth + if children: + children = [Ellipsis] + handled_parents = {parent} + + if collapse: + # Collapsing a node is the same as reaching maximum depth + if children: + children = [Ellipsis] + handled_parents = {parent} + + # The other parents are other predecessors of this node that + # are not handled elsewhere. + other_parents = [p for p in pred[node] if p not in handled_parents] + if other_parents: + if label_attr is not None: + other_parents_labels = ", ".join( + [ + str(graph.nodes[p].get(label_attr, p)) + for p in other_parents + ] + ) + else: + other_parents_labels = ", ".join( + [str(p) for p in other_parents] + ) + suffix = " ".join(["", glyphs.backedge, other_parents_labels]) + else: + suffix = "" + + # Emit the line for this node, this will be called for each node + # exactly once. + if this_vertical: + yield "".join(this_prefix + [glyphs.vertical_edge]) + + yield "".join(this_prefix + [label, suffix]) + + if vertical_chains: + if is_directed: + num_children = len(set(children)) + else: + num_children = len(set(children) - {parent}) + # The next node can be drawn vertically if it is the only + # remaining child of this node. + next_is_vertical = num_children == 1 + else: + next_is_vertical = False + + # Push children on the stack in reverse order so they are popped in + # the original order. + for idx, child in enumerate(children[::-1]): + next_islast = idx == 0 + try_frame = StackFrame( + node, child, next_prefix, next_islast, next_is_vertical + ) + stack.append(try_frame) + + +@open_file(1, "w") +def write_network_text( + graph, + path=None, + with_labels=True, + sources=None, + max_depth=None, + ascii_only=False, + end="\n", + vertical_chains=False, +): + """Creates a nice text representation of a graph + + This works via a depth-first traversal of the graph and writing a line for + each unique node encountered. Non-tree edges are written to the right of + each node, and connection to a non-tree edge is indicated with an ellipsis. + This representation works best when the input graph is a forest, but any + graph can be represented. + + Parameters + ---------- + graph : nx.DiGraph | nx.Graph + Graph to represent + + path : string or file or callable or None + Filename or file handle for data output. + if a function, then it will be called for each generated line. + if None, this will default to "sys.stdout.write" + + with_labels : bool | str + If True will use the "label" attribute of a node to display if it + exists otherwise it will use the node value itself. If given as a + string, then that attribute name will be used instead of "label". + Defaults to True. + + sources : List + Specifies which nodes to start traversal from. Note: nodes that are not + reachable from one of these sources may not be shown. If unspecified, + the minimal set of nodes needed to reach all others will be used. + + max_depth : int | None + The maximum depth to traverse before stopping. Defaults to None. + + ascii_only : Boolean + If True only ASCII characters are used to construct the visualization + + end : string + The line ending character + + vertical_chains : Boolean + If True, chains of nodes will be drawn vertically when possible. + + Examples + -------- + >>> graph = nx.balanced_tree(r=2, h=2, create_using=nx.DiGraph) + >>> nx.write_network_text(graph) + ╙── 0 + ├─╼ 1 + │ ├─╼ 3 + │ └─╼ 4 + └─╼ 2 + ├─╼ 5 + └─╼ 6 + + >>> # A near tree with one non-tree edge + >>> graph.add_edge(5, 1) + >>> nx.write_network_text(graph) + ╙── 0 + ├─╼ 1 ╾ 5 + │ ├─╼ 3 + │ └─╼ 4 + └─╼ 2 + ├─╼ 5 + │ └─╼ ... + └─╼ 6 + + >>> graph = nx.cycle_graph(5) + >>> nx.write_network_text(graph) + ╙── 0 + ├── 1 + │ └── 2 + │ └── 3 + │ └── 4 ─ 0 + └── ... + + >>> graph = nx.cycle_graph(5, nx.DiGraph) + >>> nx.write_network_text(graph, vertical_chains=True) + ╙── 0 ╾ 4 + ╽ + 1 + ╽ + 2 + ╽ + 3 + ╽ + 4 + └─╼ ... + + >>> nx.write_network_text(graph, vertical_chains=True, ascii_only=True) + +-- 0 <- 4 + ! + 1 + ! + 2 + ! + 3 + ! + 4 + L-> ... + + >>> graph = nx.generators.barbell_graph(4, 2) + >>> nx.write_network_text(graph, vertical_chains=False) + ╙── 4 + ├── 5 + │ └── 6 + │ ├── 7 + │ │ ├── 8 ─ 6 + │ │ │ └── 9 ─ 6, 7 + │ │ └── ... + │ └── ... + └── 3 + ├── 0 + │ ├── 1 ─ 3 + │ │ └── 2 ─ 0, 3 + │ └── ... + └── ... + >>> nx.write_network_text(graph, vertical_chains=True) + ╙── 4 + ├── 5 + │ │ + │ 6 + │ ├── 7 + │ │ ├── 8 ─ 6 + │ │ │ │ + │ │ │ 9 ─ 6, 7 + │ │ └── ... + │ └── ... + └── 3 + ├── 0 + │ ├── 1 ─ 3 + │ │ │ + │ │ 2 ─ 0, 3 + │ └── ... + └── ... + + >>> graph = nx.complete_graph(5, create_using=nx.Graph) + >>> nx.write_network_text(graph) + ╙── 0 + ├── 1 + │ ├── 2 ─ 0 + │ │ ├── 3 ─ 0, 1 + │ │ │ └── 4 ─ 0, 1, 2 + │ │ └── ... + │ └── ... + └── ... + + >>> graph = nx.complete_graph(3, create_using=nx.DiGraph) + >>> nx.write_network_text(graph) + ╙── 0 ╾ 1, 2 + ├─╼ 1 ╾ 2 + │ ├─╼ 2 ╾ 0 + │ │ └─╼ ... + │ └─╼ ... + └─╼ ... + """ + if path is None: + # The path is unspecified, write to stdout + _write = sys.stdout.write + elif hasattr(path, "write"): + # The path is already an open file + _write = path.write + elif callable(path): + # The path is a custom callable + _write = path + else: + raise TypeError(type(path)) + + for line in generate_network_text( + graph, + with_labels=with_labels, + sources=sources, + max_depth=max_depth, + ascii_only=ascii_only, + vertical_chains=vertical_chains, + ): + _write(line + end) + + +def _find_sources(graph): + """ + Determine a minimal set of nodes such that the entire graph is reachable + """ + # For each connected part of the graph, choose at least + # one node as a starting point, preferably without a parent + if graph.is_directed(): + # Choose one node from each SCC with minimum in_degree + sccs = list(nx.strongly_connected_components(graph)) + # condensing the SCCs forms a dag, the nodes in this graph with + # 0 in-degree correspond to the SCCs from which the minimum set + # of nodes from which all other nodes can be reached. + scc_graph = nx.condensation(graph, sccs) + supernode_to_nodes = {sn: [] for sn in scc_graph.nodes()} + # Note: the order of mapping differs between pypy and cpython + # so we have to loop over graph nodes for consistency + mapping = scc_graph.graph["mapping"] + for n in graph.nodes: + sn = mapping[n] + supernode_to_nodes[sn].append(n) + sources = [] + for sn in scc_graph.nodes(): + if scc_graph.in_degree[sn] == 0: + scc = supernode_to_nodes[sn] + node = min(scc, key=lambda n: graph.in_degree[n]) + sources.append(node) + else: + # For undirected graph, the entire graph will be reachable as + # long as we consider one node from every connected component + sources = [ + min(cc, key=lambda n: graph.degree[n]) + for cc in nx.connected_components(graph) + ] + sources = sorted(sources, key=lambda n: graph.degree[n]) + return sources + + +def _parse_network_text(lines): + """Reconstructs a graph from a network text representation. + + This is mainly used for testing. Network text is for display, not + serialization, as such this cannot parse all network text representations + because node labels can be ambiguous with the glyphs and indentation used + to represent edge structure. Additionally, there is no way to determine if + disconnected graphs were originally directed or undirected. + + Parameters + ---------- + lines : list or iterator of strings + Input data in network text format + + Returns + ------- + G: NetworkX graph + The graph corresponding to the lines in network text format. + """ + from itertools import chain + from typing import Any, NamedTuple + + class ParseStackFrame(NamedTuple): + node: Any + indent: int + has_vertical_child: int | None + + initial_line_iter = iter(lines) + + is_ascii = None + is_directed = None + + ############## + # Initial Pass + ############## + + # Do an initial pass over the lines to determine what type of graph it is. + # Remember what these lines were, so we can reiterate over them in the + # parsing pass. + initial_lines = [] + try: + first_line = next(initial_line_iter) + except StopIteration: + ... + else: + initial_lines.append(first_line) + # The first character indicates if it is an ASCII or UTF graph + first_char = first_line[0] + if first_char in { + UtfBaseGlyphs.empty, + UtfBaseGlyphs.newtree_mid[0], + UtfBaseGlyphs.newtree_last[0], + }: + is_ascii = False + elif first_char in { + AsciiBaseGlyphs.empty, + AsciiBaseGlyphs.newtree_mid[0], + AsciiBaseGlyphs.newtree_last[0], + }: + is_ascii = True + else: + raise AssertionError(f"Unexpected first character: {first_char}") + + if is_ascii: + directed_glyphs = AsciiDirectedGlyphs.as_dict() + undirected_glyphs = AsciiUndirectedGlyphs.as_dict() + else: + directed_glyphs = UtfDirectedGlyphs.as_dict() + undirected_glyphs = UtfUndirectedGlyphs.as_dict() + + # For both directed / undirected glyphs, determine which glyphs never + # appear as substrings in the other undirected / directed glyphs. Glyphs + # with this property unambiguously indicates if a graph is directed / + # undirected. + directed_items = set(directed_glyphs.values()) + undirected_items = set(undirected_glyphs.values()) + unambiguous_directed_items = [] + for item in directed_items: + other_items = undirected_items + other_supersets = [other for other in other_items if item in other] + if not other_supersets: + unambiguous_directed_items.append(item) + unambiguous_undirected_items = [] + for item in undirected_items: + other_items = directed_items + other_supersets = [other for other in other_items if item in other] + if not other_supersets: + unambiguous_undirected_items.append(item) + + for line in initial_line_iter: + initial_lines.append(line) + if any(item in line for item in unambiguous_undirected_items): + is_directed = False + break + elif any(item in line for item in unambiguous_directed_items): + is_directed = True + break + + if is_directed is None: + # Not enough information to determine, choose undirected by default + is_directed = False + + glyphs = directed_glyphs if is_directed else undirected_glyphs + + # the backedge symbol by itself can be ambiguous, but with spaces around it + # becomes unambiguous. + backedge_symbol = " " + glyphs["backedge"] + " " + + # Reconstruct an iterator over all of the lines. + parsing_line_iter = chain(initial_lines, initial_line_iter) + + ############## + # Parsing Pass + ############## + + edges = [] + nodes = [] + is_empty = None + + noparent = object() # sentinel value + + # keep a stack of previous nodes that could be parents of subsequent nodes + stack = [ParseStackFrame(noparent, -1, None)] + + for line in parsing_line_iter: + if line == glyphs["empty"]: + # If the line is the empty glyph, we are done. + # There shouldn't be anything else after this. + is_empty = True + continue + + if backedge_symbol in line: + # This line has one or more backedges, separate those out + node_part, backedge_part = line.split(backedge_symbol) + backedge_nodes = [u.strip() for u in backedge_part.split(", ")] + # Now the node can be parsed + node_part = node_part.rstrip() + prefix, node = node_part.rsplit(" ", 1) + node = node.strip() + # Add the backedges to the edge list + edges.extend([(u, node) for u in backedge_nodes]) + else: + # No backedge, the tail of this line is the node + prefix, node = line.rsplit(" ", 1) + node = node.strip() + + prev = stack.pop() + + if node in glyphs["vertical_edge"]: + # Previous node is still the previous node, but we know it will + # have exactly one child, which will need to have its nesting level + # adjusted. + modified_prev = ParseStackFrame( + prev.node, + prev.indent, + True, + ) + stack.append(modified_prev) + continue + + # The length of the string before the node characters give us a hint + # about our nesting level. The only case where this doesn't work is + # when there are vertical chains, which is handled explicitly. + indent = len(prefix) + curr = ParseStackFrame(node, indent, None) + + if prev.has_vertical_child: + # In this case we know prev must be the parent of our current line, + # so we don't have to search the stack. (which is good because the + # indentation check wouldn't work in this case). + ... + else: + # If the previous node nesting-level is greater than the current + # nodes nesting-level than the previous node was the end of a path, + # and is not our parent. We can safely pop nodes off the stack + # until we find one with a comparable nesting-level, which is our + # parent. + while curr.indent <= prev.indent: + prev = stack.pop() + + if node == "...": + # The current previous node is no longer a valid parent, + # keep it popped from the stack. + stack.append(prev) + else: + # The previous and current nodes may still be parents, so add them + # back onto the stack. + stack.append(prev) + stack.append(curr) + + # Add the node and the edge to its parent to the node / edge lists. + nodes.append(curr.node) + if prev.node is not noparent: + edges.append((prev.node, curr.node)) + + if is_empty: + # Sanity check + assert len(nodes) == 0 + + # Reconstruct the graph + cls = nx.DiGraph if is_directed else nx.Graph + new = cls() + new.add_nodes_from(nodes) + new.add_edges_from(edges) + return new diff --git a/.venv/lib/python3.12/site-packages/networkx/relabel.py b/.venv/lib/python3.12/site-packages/networkx/relabel.py new file mode 100644 index 0000000..4b870f7 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/relabel.py @@ -0,0 +1,285 @@ +import networkx as nx + +__all__ = ["convert_node_labels_to_integers", "relabel_nodes"] + + +@nx._dispatchable( + preserve_all_attrs=True, mutates_input={"not copy": 2}, returns_graph=True +) +def relabel_nodes(G, mapping, copy=True): + """Relabel the nodes of the graph G according to a given mapping. + + The original node ordering may not be preserved if `copy` is `False` and the + mapping includes overlap between old and new labels. + + Parameters + ---------- + G : graph + A NetworkX graph + + mapping : dictionary + A dictionary with the old labels as keys and new labels as values. + A partial mapping is allowed. Mapping 2 nodes to a single node is allowed. + Any non-node keys in the mapping are ignored. + + copy : bool (optional, default=True) + If True return a copy, or if False relabel the nodes in place. + + Examples + -------- + To create a new graph with nodes relabeled according to a given + dictionary: + + >>> G = nx.path_graph(3) + >>> sorted(G) + [0, 1, 2] + >>> mapping = {0: "a", 1: "b", 2: "c"} + >>> H = nx.relabel_nodes(G, mapping) + >>> sorted(H) + ['a', 'b', 'c'] + + Nodes can be relabeled with any hashable object, including numbers + and strings: + + >>> import string + >>> G = nx.path_graph(26) # nodes are integers 0 through 25 + >>> sorted(G)[:3] + [0, 1, 2] + >>> mapping = dict(zip(G, string.ascii_lowercase)) + >>> G = nx.relabel_nodes(G, mapping) # nodes are characters a through z + >>> sorted(G)[:3] + ['a', 'b', 'c'] + >>> mapping = dict(zip(G, range(1, 27))) + >>> G = nx.relabel_nodes(G, mapping) # nodes are integers 1 through 26 + >>> sorted(G)[:3] + [1, 2, 3] + + To perform a partial in-place relabeling, provide a dictionary + mapping only a subset of the nodes, and set the `copy` keyword + argument to False: + + >>> G = nx.path_graph(3) # nodes 0-1-2 + >>> mapping = {0: "a", 1: "b"} # 0->'a' and 1->'b' + >>> G = nx.relabel_nodes(G, mapping, copy=False) + >>> sorted(G, key=str) + [2, 'a', 'b'] + + A mapping can also be given as a function: + + >>> G = nx.path_graph(3) + >>> H = nx.relabel_nodes(G, lambda x: x**2) + >>> list(H) + [0, 1, 4] + + In a multigraph, relabeling two or more nodes to the same new node + will retain all edges, but may change the edge keys in the process: + + >>> G = nx.MultiGraph() + >>> G.add_edge(0, 1, value="a") # returns the key for this edge + 0 + >>> G.add_edge(0, 2, value="b") + 0 + >>> G.add_edge(0, 3, value="c") + 0 + >>> mapping = {1: 4, 2: 4, 3: 4} + >>> H = nx.relabel_nodes(G, mapping, copy=True) + >>> print(H[0]) + {4: {0: {'value': 'a'}, 1: {'value': 'b'}, 2: {'value': 'c'}}} + + This works for in-place relabeling too: + + >>> G = nx.relabel_nodes(G, mapping, copy=False) + >>> print(G[0]) + {4: {0: {'value': 'a'}, 1: {'value': 'b'}, 2: {'value': 'c'}}} + + Notes + ----- + Only the nodes specified in the mapping will be relabeled. + Any non-node keys in the mapping are ignored. + + The keyword setting copy=False modifies the graph in place. + Relabel_nodes avoids naming collisions by building a + directed graph from ``mapping`` which specifies the order of + relabelings. Naming collisions, such as a->b, b->c, are ordered + such that "b" gets renamed to "c" before "a" gets renamed "b". + In cases of circular mappings (e.g. a->b, b->a), modifying the + graph is not possible in-place and an exception is raised. + In that case, use copy=True. + + If a relabel operation on a multigraph would cause two or more + edges to have the same source, target and key, the second edge must + be assigned a new key to retain all edges. The new key is set + to the lowest non-negative integer not already used as a key + for edges between these two nodes. Note that this means non-numeric + keys may be replaced by numeric keys. + + See Also + -------- + convert_node_labels_to_integers + """ + # you can pass any callable e.g. f(old_label) -> new_label or + # e.g. str(old_label) -> new_label, but we'll just make a dictionary here regardless + m = {n: mapping(n) for n in G} if callable(mapping) else mapping + + if copy: + return _relabel_copy(G, m) + else: + return _relabel_inplace(G, m) + + +def _relabel_inplace(G, mapping): + if len(mapping.keys() & mapping.values()) > 0: + # labels sets overlap + # can we topological sort and still do the relabeling? + D = nx.DiGraph(list(mapping.items())) + D.remove_edges_from(nx.selfloop_edges(D)) + try: + nodes = reversed(list(nx.topological_sort(D))) + except nx.NetworkXUnfeasible as err: + raise nx.NetworkXUnfeasible( + "The node label sets are overlapping and no ordering can " + "resolve the mapping. Use copy=True." + ) from err + else: + # non-overlapping label sets, sort them in the order of G nodes + nodes = [n for n in G if n in mapping] + + multigraph = G.is_multigraph() + directed = G.is_directed() + + for old in nodes: + # Test that old is in both mapping and G, otherwise ignore. + try: + new = mapping[old] + G.add_node(new, **G.nodes[old]) + except KeyError: + continue + if new == old: + continue + if multigraph: + new_edges = [ + (new, new if old == target else target, key, data) + for (_, target, key, data) in G.edges(old, data=True, keys=True) + ] + if directed: + new_edges += [ + (new if old == source else source, new, key, data) + for (source, _, key, data) in G.in_edges(old, data=True, keys=True) + ] + # Ensure new edges won't overwrite existing ones + seen = set() + for i, (source, target, key, data) in enumerate(new_edges): + if target in G[source] and key in G[source][target]: + new_key = 0 if not isinstance(key, int | float) else key + while new_key in G[source][target] or (target, new_key) in seen: + new_key += 1 + new_edges[i] = (source, target, new_key, data) + seen.add((target, new_key)) + else: + new_edges = [ + (new, new if old == target else target, data) + for (_, target, data) in G.edges(old, data=True) + ] + if directed: + new_edges += [ + (new if old == source else source, new, data) + for (source, _, data) in G.in_edges(old, data=True) + ] + G.remove_node(old) + G.add_edges_from(new_edges) + return G + + +def _relabel_copy(G, mapping): + H = G.__class__() + H.add_nodes_from(mapping.get(n, n) for n in G) + H._node.update((mapping.get(n, n), d.copy()) for n, d in G.nodes.items()) + if G.is_multigraph(): + new_edges = [ + (mapping.get(n1, n1), mapping.get(n2, n2), k, d.copy()) + for (n1, n2, k, d) in G.edges(keys=True, data=True) + ] + + # check for conflicting edge-keys + undirected = not G.is_directed() + seen_edges = set() + for i, (source, target, key, data) in enumerate(new_edges): + while (source, target, key) in seen_edges: + if not isinstance(key, int | float): + key = 0 + key += 1 + seen_edges.add((source, target, key)) + if undirected: + seen_edges.add((target, source, key)) + new_edges[i] = (source, target, key, data) + + H.add_edges_from(new_edges) + else: + H.add_edges_from( + (mapping.get(n1, n1), mapping.get(n2, n2), d.copy()) + for (n1, n2, d) in G.edges(data=True) + ) + H.graph.update(G.graph) + return H + + +@nx._dispatchable(preserve_all_attrs=True, returns_graph=True) +def convert_node_labels_to_integers( + G, first_label=0, ordering="default", label_attribute=None +): + """Returns a copy of the graph G with the nodes relabeled using + consecutive integers. + + Parameters + ---------- + G : graph + A NetworkX graph + + first_label : int, optional (default=0) + An integer specifying the starting offset in numbering nodes. + The new integer labels are numbered first_label, ..., n-1+first_label. + + ordering : string + "default" : inherit node ordering from G.nodes() + "sorted" : inherit node ordering from sorted(G.nodes()) + "increasing degree" : nodes are sorted by increasing degree + "decreasing degree" : nodes are sorted by decreasing degree + + label_attribute : string, optional (default=None) + Name of node attribute to store old label. If None no attribute + is created. + + Notes + ----- + Node and edge attribute data are copied to the new (relabeled) graph. + + There is no guarantee that the relabeling of nodes to integers will + give the same two integers for two (even identical graphs). + Use the `ordering` argument to try to preserve the order. + + See Also + -------- + relabel_nodes + """ + N = G.number_of_nodes() + first_label + if ordering == "default": + mapping = dict(zip(G.nodes(), range(first_label, N))) + elif ordering == "sorted": + nlist = sorted(G.nodes()) + mapping = dict(zip(nlist, range(first_label, N))) + elif ordering == "increasing degree": + dv_pairs = [(d, n) for (n, d) in G.degree()] + dv_pairs.sort() # in-place sort from lowest to highest degree + mapping = dict(zip([n for d, n in dv_pairs], range(first_label, N))) + elif ordering == "decreasing degree": + dv_pairs = [(d, n) for (n, d) in G.degree()] + dv_pairs.sort() # in-place sort from lowest to highest degree + dv_pairs.reverse() + mapping = dict(zip([n for d, n in dv_pairs], range(first_label, N))) + else: + raise nx.NetworkXError(f"Unknown node ordering: {ordering}") + H = relabel_nodes(G, mapping) + # create node attribute with the old label + if label_attribute is not None: + nx.set_node_attributes(H, {v: k for k, v in mapping.items()}, label_attribute) + return H diff --git a/.venv/lib/python3.12/site-packages/networkx/tests/__init__.py b/.venv/lib/python3.12/site-packages/networkx/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/.venv/lib/python3.12/site-packages/networkx/tests/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/tests/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..4f7280c Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/tests/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/tests/__pycache__/test_all_random_functions.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/tests/__pycache__/test_all_random_functions.cpython-312.pyc new file mode 100644 index 0000000..ac22383 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/tests/__pycache__/test_all_random_functions.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/tests/__pycache__/test_convert.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/tests/__pycache__/test_convert.cpython-312.pyc new file mode 100644 index 0000000..db866aa Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/tests/__pycache__/test_convert.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/tests/__pycache__/test_convert_numpy.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/tests/__pycache__/test_convert_numpy.cpython-312.pyc new file mode 100644 index 0000000..3e8322b Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/tests/__pycache__/test_convert_numpy.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/tests/__pycache__/test_convert_pandas.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/tests/__pycache__/test_convert_pandas.cpython-312.pyc new file mode 100644 index 0000000..c1f3526 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/tests/__pycache__/test_convert_pandas.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/tests/__pycache__/test_convert_scipy.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/tests/__pycache__/test_convert_scipy.cpython-312.pyc new file mode 100644 index 0000000..12490b2 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/tests/__pycache__/test_convert_scipy.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/tests/__pycache__/test_exceptions.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/tests/__pycache__/test_exceptions.cpython-312.pyc new file mode 100644 index 0000000..ee9ffbd Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/tests/__pycache__/test_exceptions.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/tests/__pycache__/test_import.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/tests/__pycache__/test_import.cpython-312.pyc new file mode 100644 index 0000000..0e145ac Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/tests/__pycache__/test_import.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/tests/__pycache__/test_lazy_imports.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/tests/__pycache__/test_lazy_imports.cpython-312.pyc new file mode 100644 index 0000000..78aa9c6 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/tests/__pycache__/test_lazy_imports.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/tests/__pycache__/test_relabel.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/tests/__pycache__/test_relabel.cpython-312.pyc new file mode 100644 index 0000000..ff98f68 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/tests/__pycache__/test_relabel.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/tests/test_all_random_functions.py b/.venv/lib/python3.12/site-packages/networkx/tests/test_all_random_functions.py new file mode 100644 index 0000000..fb59159 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/tests/test_all_random_functions.py @@ -0,0 +1,250 @@ +import random + +import pytest + +import networkx as nx +from networkx.algorithms import approximation as approx +from networkx.algorithms import threshold + +np = pytest.importorskip("numpy") + +progress = 0 + +# store the random numbers after setting a global seed +np.random.seed(42) +np_rv = np.random.rand() +random.seed(42) +py_rv = random.random() + + +def t(f, *args, **kwds): + """call one function and check if global RNG changed""" + global progress + progress += 1 + print(progress, ",", end="") + + f(*args, **kwds) + + after_np_rv = np.random.rand() + # if np_rv != after_np_rv: + # print(np_rv, after_np_rv, "don't match np!") + assert np_rv == after_np_rv + np.random.seed(42) + + after_py_rv = random.random() + # if py_rv != after_py_rv: + # print(py_rv, after_py_rv, "don't match py!") + assert py_rv == after_py_rv + random.seed(42) + + +def run_all_random_functions(seed): + n = 20 + m = 10 + k = l = 2 + s = v = 10 + p = q = p1 = p2 = p_in = p_out = 0.4 + alpha = radius = theta = 0.75 + sizes = (20, 20, 10) + colors = [1, 2, 3] + G = nx.barbell_graph(12, 20) + H = nx.cycle_graph(3) + H.add_weighted_edges_from((u, v, 0.2) for u, v in H.edges) + deg_sequence = [3, 2, 1, 3, 2, 1, 3, 2, 1, 2, 1, 2, 1] + in_degree_sequence = w = sequence = aseq = bseq = deg_sequence + + # print("starting...") + t(nx.maximal_independent_set, G, seed=seed) + t(nx.rich_club_coefficient, G, seed=seed, normalized=False) + t(nx.random_reference, G, seed=seed) + t(nx.lattice_reference, G, seed=seed) + t(nx.sigma, G, 1, 2, seed=seed) + t(nx.omega, G, 1, 2, seed=seed) + # print("out of smallworld.py") + t(nx.double_edge_swap, G, seed=seed) + # print("starting connected_double_edge_swap") + t(nx.connected_double_edge_swap, nx.complete_graph(9), seed=seed) + # print("ending connected_double_edge_swap") + t(nx.random_layout, G, seed=seed) + t(nx.fruchterman_reingold_layout, G, seed=seed) + t(nx.algebraic_connectivity, G, seed=seed) + t(nx.fiedler_vector, G, seed=seed) + t(nx.spectral_ordering, G, seed=seed) + # print('starting average_clustering') + t(approx.average_clustering, G, seed=seed) + t(approx.simulated_annealing_tsp, H, "greedy", source=1, seed=seed) + t(approx.threshold_accepting_tsp, H, "greedy", source=1, seed=seed) + t( + approx.traveling_salesman_problem, + H, + method=lambda G, weight: approx.simulated_annealing_tsp( + G, "greedy", weight, seed=seed + ), + ) + t( + approx.traveling_salesman_problem, + H, + method=lambda G, weight: approx.threshold_accepting_tsp( + G, "greedy", weight, seed=seed + ), + ) + t(nx.betweenness_centrality, G, seed=seed) + t(nx.edge_betweenness_centrality, G, seed=seed) + t(nx.approximate_current_flow_betweenness_centrality, G, seed=seed) + # print("kernighan") + t(nx.algorithms.community.kernighan_lin_bisection, G, seed=seed) + # nx.algorithms.community.asyn_lpa_communities(G, seed=seed) + t(nx.algorithms.tree.greedy_branching, G, seed=seed) + # print('done with graph argument functions') + + t(nx.spectral_graph_forge, G, alpha, seed=seed) + t(nx.algorithms.community.asyn_fluidc, G, k, max_iter=1, seed=seed) + t( + nx.algorithms.connectivity.edge_augmentation.greedy_k_edge_augmentation, + G, + k, + seed=seed, + ) + t(nx.algorithms.coloring.strategy_random_sequential, G, colors, seed=seed) + + cs = ["d", "i", "i", "d", "d", "i"] + t(threshold.swap_d, cs, seed=seed) + t(nx.configuration_model, deg_sequence, seed=seed) + t( + nx.directed_configuration_model, + in_degree_sequence, + in_degree_sequence, + seed=seed, + ) + t(nx.expected_degree_graph, w, seed=seed) + t(nx.random_degree_sequence_graph, sequence, seed=seed) + joint_degrees = { + 1: {4: 1}, + 2: {2: 2, 3: 2, 4: 2}, + 3: {2: 2, 4: 1}, + 4: {1: 1, 2: 2, 3: 1}, + } + t(nx.joint_degree_graph, joint_degrees, seed=seed) + joint_degree_sequence = [ + (1, 0), + (1, 0), + (1, 0), + (2, 0), + (1, 0), + (2, 1), + (0, 1), + (0, 1), + ] + t(nx.random_clustered_graph, joint_degree_sequence, seed=seed) + constructor = [(3, 3, 0.5), (10, 10, 0.7)] + t(nx.random_shell_graph, constructor, seed=seed) + mapping = {1: 0.4, 2: 0.3, 3: 0.3} + t(nx.utils.random_weighted_sample, mapping, k, seed=seed) + t(nx.utils.weighted_choice, mapping, seed=seed) + t(nx.algorithms.bipartite.configuration_model, aseq, bseq, seed=seed) + t(nx.algorithms.bipartite.preferential_attachment_graph, aseq, p, seed=seed) + + def kernel_integral(u, w, z): + return z - w + + t(nx.random_kernel_graph, n, kernel_integral, seed=seed) + + sizes = [75, 75, 300] + probs = [[0.25, 0.05, 0.02], [0.05, 0.35, 0.07], [0.02, 0.07, 0.40]] + t(nx.stochastic_block_model, sizes, probs, seed=seed) + t(nx.random_partition_graph, sizes, p_in, p_out, seed=seed) + + # print("starting generator functions") + t(threshold.random_threshold_sequence, n, p, seed=seed) + t(nx.tournament.random_tournament, n, seed=seed) + t(nx.relaxed_caveman_graph, l, k, p, seed=seed) + t(nx.planted_partition_graph, l, k, p_in, p_out, seed=seed) + t(nx.gaussian_random_partition_graph, n, s, v, p_in, p_out, seed=seed) + t(nx.gn_graph, n, seed=seed) + t(nx.gnr_graph, n, p, seed=seed) + t(nx.gnc_graph, n, seed=seed) + t(nx.scale_free_graph, n, seed=seed) + t(nx.directed.random_uniform_k_out_graph, n, k, seed=seed) + t(nx.random_k_out_graph, n, k, alpha, seed=seed) + N = 1000 + t(nx.partial_duplication_graph, N, n, p, q, seed=seed) + t(nx.duplication_divergence_graph, n, p, seed=seed) + t(nx.random_geometric_graph, n, radius, seed=seed) + t(nx.soft_random_geometric_graph, n, radius, seed=seed) + t(nx.geographical_threshold_graph, n, theta, seed=seed) + t(nx.waxman_graph, n, seed=seed) + t(nx.navigable_small_world_graph, n, seed=seed) + t(nx.thresholded_random_geometric_graph, n, radius, theta, seed=seed) + t(nx.uniform_random_intersection_graph, n, m, p, seed=seed) + t(nx.k_random_intersection_graph, n, m, k, seed=seed) + + t(nx.general_random_intersection_graph, n, 2, [0.1, 0.5], seed=seed) + t(nx.fast_gnp_random_graph, n, p, seed=seed) + t(nx.gnp_random_graph, n, p, seed=seed) + t(nx.dense_gnm_random_graph, n, m, seed=seed) + t(nx.gnm_random_graph, n, m, seed=seed) + t(nx.newman_watts_strogatz_graph, n, k, p, seed=seed) + t(nx.watts_strogatz_graph, n, k, p, seed=seed) + t(nx.connected_watts_strogatz_graph, n, k, p, seed=seed) + t(nx.random_regular_graph, 3, n, seed=seed) + t(nx.barabasi_albert_graph, n, m, seed=seed) + t(nx.extended_barabasi_albert_graph, n, m, p, q, seed=seed) + t(nx.powerlaw_cluster_graph, n, m, p, seed=seed) + t(nx.random_lobster, n, p1, p2, seed=seed) + t(nx.random_powerlaw_tree, n, seed=seed, tries=5000) + t(nx.random_powerlaw_tree_sequence, 10, seed=seed, tries=5000) + t(nx.random_labeled_tree, n, seed=seed) + t(nx.utils.powerlaw_sequence, n, seed=seed) + t(nx.utils.zipf_rv, 2.3, seed=seed) + cdist = [0.2, 0.4, 0.5, 0.7, 0.9, 1.0] + t(nx.utils.discrete_sequence, n, cdistribution=cdist, seed=seed) + t(nx.algorithms.bipartite.random_graph, n, m, p, seed=seed) + t(nx.algorithms.bipartite.gnmk_random_graph, n, m, k, seed=seed) + LFR = nx.generators.LFR_benchmark_graph + t( + LFR, + 25, + 3, + 1.5, + 0.1, + average_degree=3, + min_community=10, + seed=seed, + max_community=20, + ) + t(nx.random_internet_as_graph, n, seed=seed) + # print("done") + + +# choose to test an integer seed, or whether a single RNG can be everywhere +# np_rng = np.random.RandomState(14) +# seed = np_rng +# seed = 14 + + +@pytest.mark.slow +# print("NetworkX Version:", nx.__version__) +def test_rng_interface(): + global progress + + # try different kinds of seeds + for seed in [14, np.random.RandomState(14)]: + np.random.seed(42) + random.seed(42) + run_all_random_functions(seed) + progress = 0 + + # check that both global RNGs are unaffected + after_np_rv = np.random.rand() + # if np_rv != after_np_rv: + # print(np_rv, after_np_rv, "don't match np!") + assert np_rv == after_np_rv + after_py_rv = random.random() + # if py_rv != after_py_rv: + # print(py_rv, after_py_rv, "don't match py!") + assert py_rv == after_py_rv + + +# print("\nDone testing seed:", seed) + +# test_rng_interface() diff --git a/.venv/lib/python3.12/site-packages/networkx/tests/test_convert.py b/.venv/lib/python3.12/site-packages/networkx/tests/test_convert.py new file mode 100644 index 0000000..44bed94 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/tests/test_convert.py @@ -0,0 +1,321 @@ +import pytest + +import networkx as nx +from networkx.convert import ( + from_dict_of_dicts, + from_dict_of_lists, + to_dict_of_dicts, + to_dict_of_lists, + to_networkx_graph, +) +from networkx.generators.classic import barbell_graph, cycle_graph +from networkx.utils import edges_equal, graphs_equal, nodes_equal + + +class TestConvert: + def edgelists_equal(self, e1, e2): + return sorted(sorted(e) for e in e1) == sorted(sorted(e) for e in e2) + + def test_simple_graphs(self): + for dest, source in [ + (to_dict_of_dicts, from_dict_of_dicts), + (to_dict_of_lists, from_dict_of_lists), + ]: + G = barbell_graph(10, 3) + G.graph = {} + dod = dest(G) + + # Dict of [dicts, lists] + GG = source(dod) + assert graphs_equal(G, GG) + GW = to_networkx_graph(dod) + assert graphs_equal(G, GW) + GI = nx.Graph(dod) + assert graphs_equal(G, GI) + + # With nodelist keyword + P4 = nx.path_graph(4) + P3 = nx.path_graph(3) + P4.graph = {} + P3.graph = {} + dod = dest(P4, nodelist=[0, 1, 2]) + Gdod = nx.Graph(dod) + assert graphs_equal(Gdod, P3) + + def test_exceptions(self): + # NX graph + class G: + adj = None + + pytest.raises(nx.NetworkXError, to_networkx_graph, G) + + # pygraphviz agraph + class G: + is_strict = None + + pytest.raises(nx.NetworkXError, to_networkx_graph, G) + + # Dict of [dicts, lists] + G = {"a": 0} + pytest.raises(TypeError, to_networkx_graph, G) + + # list or generator of edges + class G: + next = None + + pytest.raises(nx.NetworkXError, to_networkx_graph, G) + + # no match + pytest.raises(nx.NetworkXError, to_networkx_graph, "a") + + def test_digraphs(self): + for dest, source in [ + (to_dict_of_dicts, from_dict_of_dicts), + (to_dict_of_lists, from_dict_of_lists), + ]: + G = cycle_graph(10) + + # Dict of [dicts, lists] + dod = dest(G) + GG = source(dod) + assert nodes_equal(sorted(G.nodes()), sorted(GG.nodes())) + assert edges_equal(sorted(G.edges()), sorted(GG.edges())) + GW = to_networkx_graph(dod) + assert nodes_equal(sorted(G.nodes()), sorted(GW.nodes())) + assert edges_equal(sorted(G.edges()), sorted(GW.edges())) + GI = nx.Graph(dod) + assert nodes_equal(sorted(G.nodes()), sorted(GI.nodes())) + assert edges_equal(sorted(G.edges()), sorted(GI.edges())) + + G = cycle_graph(10, create_using=nx.DiGraph) + dod = dest(G) + GG = source(dod, create_using=nx.DiGraph) + assert sorted(G.nodes()) == sorted(GG.nodes()) + assert sorted(G.edges()) == sorted(GG.edges()) + GW = to_networkx_graph(dod, create_using=nx.DiGraph) + assert sorted(G.nodes()) == sorted(GW.nodes()) + assert sorted(G.edges()) == sorted(GW.edges()) + GI = nx.DiGraph(dod) + assert sorted(G.nodes()) == sorted(GI.nodes()) + assert sorted(G.edges()) == sorted(GI.edges()) + + def test_graph(self): + g = nx.cycle_graph(10) + G = nx.Graph() + G.add_nodes_from(g) + G.add_weighted_edges_from((u, v, u) for u, v in g.edges()) + + # Dict of dicts + dod = to_dict_of_dicts(G) + GG = from_dict_of_dicts(dod, create_using=nx.Graph) + assert nodes_equal(sorted(G.nodes()), sorted(GG.nodes())) + assert edges_equal(sorted(G.edges()), sorted(GG.edges())) + GW = to_networkx_graph(dod, create_using=nx.Graph) + assert nodes_equal(sorted(G.nodes()), sorted(GW.nodes())) + assert edges_equal(sorted(G.edges()), sorted(GW.edges())) + GI = nx.Graph(dod) + assert sorted(G.nodes()) == sorted(GI.nodes()) + assert sorted(G.edges()) == sorted(GI.edges()) + + # Dict of lists + dol = to_dict_of_lists(G) + GG = from_dict_of_lists(dol, create_using=nx.Graph) + # dict of lists throws away edge data so set it to none + enone = [(u, v, {}) for (u, v, d) in G.edges(data=True)] + assert nodes_equal(sorted(G.nodes()), sorted(GG.nodes())) + assert edges_equal(enone, sorted(GG.edges(data=True))) + GW = to_networkx_graph(dol, create_using=nx.Graph) + assert nodes_equal(sorted(G.nodes()), sorted(GW.nodes())) + assert edges_equal(enone, sorted(GW.edges(data=True))) + GI = nx.Graph(dol) + assert nodes_equal(sorted(G.nodes()), sorted(GI.nodes())) + assert edges_equal(enone, sorted(GI.edges(data=True))) + + def test_with_multiedges_self_loops(self): + G = cycle_graph(10) + XG = nx.Graph() + XG.add_nodes_from(G) + XG.add_weighted_edges_from((u, v, u) for u, v in G.edges()) + XGM = nx.MultiGraph() + XGM.add_nodes_from(G) + XGM.add_weighted_edges_from((u, v, u) for u, v in G.edges()) + XGM.add_edge(0, 1, weight=2) # multiedge + XGS = nx.Graph() + XGS.add_nodes_from(G) + XGS.add_weighted_edges_from((u, v, u) for u, v in G.edges()) + XGS.add_edge(0, 0, weight=100) # self loop + + # Dict of dicts + # with self loops, OK + dod = to_dict_of_dicts(XGS) + GG = from_dict_of_dicts(dod, create_using=nx.Graph) + assert nodes_equal(XGS.nodes(), GG.nodes()) + assert edges_equal(XGS.edges(), GG.edges()) + GW = to_networkx_graph(dod, create_using=nx.Graph) + assert nodes_equal(XGS.nodes(), GW.nodes()) + assert edges_equal(XGS.edges(), GW.edges()) + GI = nx.Graph(dod) + assert nodes_equal(XGS.nodes(), GI.nodes()) + assert edges_equal(XGS.edges(), GI.edges()) + + # Dict of lists + # with self loops, OK + dol = to_dict_of_lists(XGS) + GG = from_dict_of_lists(dol, create_using=nx.Graph) + # dict of lists throws away edge data so set it to none + enone = [(u, v, {}) for (u, v, d) in XGS.edges(data=True)] + assert nodes_equal(sorted(XGS.nodes()), sorted(GG.nodes())) + assert edges_equal(enone, sorted(GG.edges(data=True))) + GW = to_networkx_graph(dol, create_using=nx.Graph) + assert nodes_equal(sorted(XGS.nodes()), sorted(GW.nodes())) + assert edges_equal(enone, sorted(GW.edges(data=True))) + GI = nx.Graph(dol) + assert nodes_equal(sorted(XGS.nodes()), sorted(GI.nodes())) + assert edges_equal(enone, sorted(GI.edges(data=True))) + + # Dict of dicts + # with multiedges, OK + dod = to_dict_of_dicts(XGM) + GG = from_dict_of_dicts(dod, create_using=nx.MultiGraph, multigraph_input=True) + assert nodes_equal(sorted(XGM.nodes()), sorted(GG.nodes())) + assert edges_equal(sorted(XGM.edges()), sorted(GG.edges())) + GW = to_networkx_graph(dod, create_using=nx.MultiGraph, multigraph_input=True) + assert nodes_equal(sorted(XGM.nodes()), sorted(GW.nodes())) + assert edges_equal(sorted(XGM.edges()), sorted(GW.edges())) + GI = nx.MultiGraph(dod) + assert nodes_equal(sorted(XGM.nodes()), sorted(GI.nodes())) + assert sorted(XGM.edges()) == sorted(GI.edges()) + GE = from_dict_of_dicts(dod, create_using=nx.MultiGraph, multigraph_input=False) + assert nodes_equal(sorted(XGM.nodes()), sorted(GE.nodes())) + assert sorted(XGM.edges()) != sorted(GE.edges()) + GI = nx.MultiGraph(XGM) + assert nodes_equal(sorted(XGM.nodes()), sorted(GI.nodes())) + assert edges_equal(sorted(XGM.edges()), sorted(GI.edges())) + GM = nx.MultiGraph(G) + assert nodes_equal(sorted(GM.nodes()), sorted(G.nodes())) + assert edges_equal(sorted(GM.edges()), sorted(G.edges())) + + # Dict of lists + # with multiedges, OK, but better write as DiGraph else you'll + # get double edges + dol = to_dict_of_lists(G) + GG = from_dict_of_lists(dol, create_using=nx.MultiGraph) + assert nodes_equal(sorted(G.nodes()), sorted(GG.nodes())) + assert edges_equal(sorted(G.edges()), sorted(GG.edges())) + GW = to_networkx_graph(dol, create_using=nx.MultiGraph) + assert nodes_equal(sorted(G.nodes()), sorted(GW.nodes())) + assert edges_equal(sorted(G.edges()), sorted(GW.edges())) + GI = nx.MultiGraph(dol) + assert nodes_equal(sorted(G.nodes()), sorted(GI.nodes())) + assert edges_equal(sorted(G.edges()), sorted(GI.edges())) + + def test_edgelists(self): + P = nx.path_graph(4) + e = [(0, 1), (1, 2), (2, 3)] + G = nx.Graph(e) + assert nodes_equal(sorted(G.nodes()), sorted(P.nodes())) + assert edges_equal(sorted(G.edges()), sorted(P.edges())) + assert edges_equal(sorted(G.edges(data=True)), sorted(P.edges(data=True))) + + e = [(0, 1, {}), (1, 2, {}), (2, 3, {})] + G = nx.Graph(e) + assert nodes_equal(sorted(G.nodes()), sorted(P.nodes())) + assert edges_equal(sorted(G.edges()), sorted(P.edges())) + assert edges_equal(sorted(G.edges(data=True)), sorted(P.edges(data=True))) + + e = ((n, n + 1) for n in range(3)) + G = nx.Graph(e) + assert nodes_equal(sorted(G.nodes()), sorted(P.nodes())) + assert edges_equal(sorted(G.edges()), sorted(P.edges())) + assert edges_equal(sorted(G.edges(data=True)), sorted(P.edges(data=True))) + + def test_directed_to_undirected(self): + edges1 = [(0, 1), (1, 2), (2, 0)] + edges2 = [(0, 1), (1, 2), (0, 2)] + assert self.edgelists_equal(nx.Graph(nx.DiGraph(edges1)).edges(), edges1) + assert self.edgelists_equal(nx.Graph(nx.DiGraph(edges2)).edges(), edges1) + assert self.edgelists_equal(nx.MultiGraph(nx.DiGraph(edges1)).edges(), edges1) + assert self.edgelists_equal(nx.MultiGraph(nx.DiGraph(edges2)).edges(), edges1) + + assert self.edgelists_equal( + nx.MultiGraph(nx.MultiDiGraph(edges1)).edges(), edges1 + ) + assert self.edgelists_equal( + nx.MultiGraph(nx.MultiDiGraph(edges2)).edges(), edges1 + ) + + assert self.edgelists_equal(nx.Graph(nx.MultiDiGraph(edges1)).edges(), edges1) + assert self.edgelists_equal(nx.Graph(nx.MultiDiGraph(edges2)).edges(), edges1) + + def test_attribute_dict_integrity(self): + # we must not replace dict-like graph data structures with dicts + G = nx.Graph() + G.add_nodes_from("abc") + H = to_networkx_graph(G, create_using=nx.Graph) + assert list(H.nodes) == list(G.nodes) + H = nx.DiGraph(G) + assert list(H.nodes) == list(G.nodes) + + def test_to_edgelist(self): + G = nx.Graph([(1, 1)]) + elist = nx.to_edgelist(G, nodelist=list(G)) + assert edges_equal(G.edges(data=True), elist) + + def test_custom_node_attr_dict_safekeeping(self): + class custom_dict(dict): + pass + + class Custom(nx.Graph): + node_attr_dict_factory = custom_dict + + g = nx.Graph() + g.add_node(1, weight=1) + + h = Custom(g) + assert isinstance(g._node[1], dict) + assert isinstance(h._node[1], custom_dict) + + # this raise exception + # h._node.update((n, dd.copy()) for n, dd in g.nodes.items()) + # assert isinstance(h._node[1], custom_dict) + + +@pytest.mark.parametrize( + "edgelist", + ( + # Graph with no edge data + [(0, 1), (1, 2)], + # Graph with edge data + [(0, 1, {"weight": 1.0}), (1, 2, {"weight": 2.0})], + ), +) +def test_to_dict_of_dicts_with_edgedata_param(edgelist): + G = nx.Graph() + G.add_edges_from(edgelist) + # Innermost dict value == edge_data when edge_data != None. + # In the case when G has edge data, it is overwritten + expected = {0: {1: 10}, 1: {0: 10, 2: 10}, 2: {1: 10}} + assert nx.to_dict_of_dicts(G, edge_data=10) == expected + + +def test_to_dict_of_dicts_with_edgedata_and_nodelist(): + G = nx.path_graph(5) + nodelist = [2, 3, 4] + expected = {2: {3: 10}, 3: {2: 10, 4: 10}, 4: {3: 10}} + assert nx.to_dict_of_dicts(G, nodelist=nodelist, edge_data=10) == expected + + +def test_to_dict_of_dicts_with_edgedata_multigraph(): + """Multi edge data overwritten when edge_data != None""" + G = nx.MultiGraph() + G.add_edge(0, 1, key="a") + G.add_edge(0, 1, key="b") + # Multi edge data lost when edge_data is not None + expected = {0: {1: 10}, 1: {0: 10}} + assert nx.to_dict_of_dicts(G, edge_data=10) == expected + + +def test_to_networkx_graph_non_edgelist(): + invalid_edgelist = [1, 2, 3] + with pytest.raises(nx.NetworkXError, match="Input is not a valid edge list"): + nx.to_networkx_graph(invalid_edgelist) diff --git a/.venv/lib/python3.12/site-packages/networkx/tests/test_convert_numpy.py b/.venv/lib/python3.12/site-packages/networkx/tests/test_convert_numpy.py new file mode 100644 index 0000000..0a554fd --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/tests/test_convert_numpy.py @@ -0,0 +1,531 @@ +import itertools + +import pytest + +import networkx as nx +from networkx.utils import graphs_equal + +np = pytest.importorskip("numpy") +npt = pytest.importorskip("numpy.testing") + + +class TestConvertNumpyArray: + def setup_method(self): + self.G1 = nx.barbell_graph(10, 3) + self.G2 = nx.cycle_graph(10, create_using=nx.DiGraph) + self.G3 = self.create_weighted(nx.Graph()) + self.G4 = self.create_weighted(nx.DiGraph()) + + def create_weighted(self, G): + g = nx.cycle_graph(4) + G.add_nodes_from(g) + G.add_weighted_edges_from((u, v, 10 + u) for u, v in g.edges()) + return G + + def assert_equal(self, G1, G2): + assert sorted(G1.nodes()) == sorted(G2.nodes()) + assert sorted(G1.edges()) == sorted(G2.edges()) + + def identity_conversion(self, G, A, create_using): + assert A.sum() > 0 + GG = nx.from_numpy_array(A, create_using=create_using) + self.assert_equal(G, GG) + GW = nx.to_networkx_graph(A, create_using=create_using) + self.assert_equal(G, GW) + GI = nx.empty_graph(0, create_using).__class__(A) + self.assert_equal(G, GI) + + def test_shape(self): + "Conversion from non-square array." + A = np.array([[1, 2, 3], [4, 5, 6]]) + pytest.raises(nx.NetworkXError, nx.from_numpy_array, A) + + def test_identity_graph_array(self): + "Conversion from graph to array to graph." + A = nx.to_numpy_array(self.G1) + self.identity_conversion(self.G1, A, nx.Graph()) + + def test_identity_digraph_array(self): + """Conversion from digraph to array to digraph.""" + A = nx.to_numpy_array(self.G2) + self.identity_conversion(self.G2, A, nx.DiGraph()) + + def test_identity_weighted_graph_array(self): + """Conversion from weighted graph to array to weighted graph.""" + A = nx.to_numpy_array(self.G3) + self.identity_conversion(self.G3, A, nx.Graph()) + + def test_identity_weighted_digraph_array(self): + """Conversion from weighted digraph to array to weighted digraph.""" + A = nx.to_numpy_array(self.G4) + self.identity_conversion(self.G4, A, nx.DiGraph()) + + def test_nodelist(self): + """Conversion from graph to array to graph with nodelist.""" + P4 = nx.path_graph(4) + P3 = nx.path_graph(3) + nodelist = list(P3) + A = nx.to_numpy_array(P4, nodelist=nodelist) + GA = nx.Graph(A) + self.assert_equal(GA, P3) + + # Make nodelist ambiguous by containing duplicates. + nodelist += [nodelist[0]] + pytest.raises(nx.NetworkXError, nx.to_numpy_array, P3, nodelist=nodelist) + + # Make nodelist invalid by including nonexistent nodes + nodelist = [-1, 0, 1] + with pytest.raises( + nx.NetworkXError, + match=f"Nodes {nodelist - P3.nodes} in nodelist is not in G", + ): + nx.to_numpy_array(P3, nodelist=nodelist) + + def test_weight_keyword(self): + WP4 = nx.Graph() + WP4.add_edges_from((n, n + 1, {"weight": 0.5, "other": 0.3}) for n in range(3)) + P4 = nx.path_graph(4) + A = nx.to_numpy_array(P4) + np.testing.assert_equal(A, nx.to_numpy_array(WP4, weight=None)) + np.testing.assert_equal(0.5 * A, nx.to_numpy_array(WP4)) + np.testing.assert_equal(0.3 * A, nx.to_numpy_array(WP4, weight="other")) + + def test_from_numpy_array_type(self): + A = np.array([[1]]) + G = nx.from_numpy_array(A) + assert isinstance(G[0][0]["weight"], int) + + A = np.array([[1]]).astype(float) + G = nx.from_numpy_array(A) + assert isinstance(G[0][0]["weight"], float) + + A = np.array([[1]]).astype(str) + G = nx.from_numpy_array(A) + assert isinstance(G[0][0]["weight"], str) + + A = np.array([[1]]).astype(bool) + G = nx.from_numpy_array(A) + assert isinstance(G[0][0]["weight"], bool) + + A = np.array([[1]]).astype(complex) + G = nx.from_numpy_array(A) + assert isinstance(G[0][0]["weight"], complex) + + A = np.array([[1]]).astype(object) + pytest.raises(TypeError, nx.from_numpy_array, A) + + A = np.array([[[1, 1, 1], [1, 1, 1]], [[1, 1, 1], [1, 1, 1]]]) + with pytest.raises( + nx.NetworkXError, match=f"Input array must be 2D, not {A.ndim}" + ): + g = nx.from_numpy_array(A) + + def test_from_numpy_array_dtype(self): + dt = [("weight", float), ("cost", int)] + A = np.array([[(1.0, 2)]], dtype=dt) + G = nx.from_numpy_array(A) + assert isinstance(G[0][0]["weight"], float) + assert isinstance(G[0][0]["cost"], int) + assert G[0][0]["cost"] == 2 + assert G[0][0]["weight"] == 1.0 + + def test_from_numpy_array_parallel_edges(self): + """Tests that the :func:`networkx.from_numpy_array` function + interprets integer weights as the number of parallel edges when + creating a multigraph. + + """ + A = np.array([[1, 1], [1, 2]]) + # First, with a simple graph, each integer entry in the adjacency + # matrix is interpreted as the weight of a single edge in the graph. + expected = nx.DiGraph() + edges = [(0, 0), (0, 1), (1, 0)] + expected.add_weighted_edges_from([(u, v, 1) for (u, v) in edges]) + expected.add_edge(1, 1, weight=2) + actual = nx.from_numpy_array(A, parallel_edges=True, create_using=nx.DiGraph) + assert graphs_equal(actual, expected) + actual = nx.from_numpy_array(A, parallel_edges=False, create_using=nx.DiGraph) + assert graphs_equal(actual, expected) + # Now each integer entry in the adjacency matrix is interpreted as the + # number of parallel edges in the graph if the appropriate keyword + # argument is specified. + edges = [(0, 0), (0, 1), (1, 0), (1, 1), (1, 1)] + expected = nx.MultiDiGraph() + expected.add_weighted_edges_from([(u, v, 1) for (u, v) in edges]) + actual = nx.from_numpy_array( + A, parallel_edges=True, create_using=nx.MultiDiGraph + ) + assert graphs_equal(actual, expected) + expected = nx.MultiDiGraph() + expected.add_edges_from(set(edges), weight=1) + # The sole self-loop (edge 0) on vertex 1 should have weight 2. + expected[1][1][0]["weight"] = 2 + actual = nx.from_numpy_array( + A, parallel_edges=False, create_using=nx.MultiDiGraph + ) + assert graphs_equal(actual, expected) + + @pytest.mark.parametrize( + "dt", + ( + None, # default + int, # integer dtype + np.dtype( + [("weight", "f8"), ("color", "i1")] + ), # Structured dtype with named fields + ), + ) + def test_from_numpy_array_no_edge_attr(self, dt): + A = np.array([[0, 1], [1, 0]], dtype=dt) + G = nx.from_numpy_array(A, edge_attr=None) + assert "weight" not in G.edges[0, 1] + assert len(G.edges[0, 1]) == 0 + + def test_from_numpy_array_multiedge_no_edge_attr(self): + A = np.array([[0, 2], [2, 0]]) + G = nx.from_numpy_array(A, create_using=nx.MultiDiGraph, edge_attr=None) + assert all("weight" not in e for _, e in G[0][1].items()) + assert len(G[0][1][0]) == 0 + + def test_from_numpy_array_custom_edge_attr(self): + A = np.array([[0, 2], [3, 0]]) + G = nx.from_numpy_array(A, edge_attr="cost") + assert "weight" not in G.edges[0, 1] + assert G.edges[0, 1]["cost"] == 3 + + def test_symmetric(self): + """Tests that a symmetric array has edges added only once to an + undirected multigraph when using :func:`networkx.from_numpy_array`. + + """ + A = np.array([[0, 1], [1, 0]]) + G = nx.from_numpy_array(A, create_using=nx.MultiGraph) + expected = nx.MultiGraph() + expected.add_edge(0, 1, weight=1) + assert graphs_equal(G, expected) + + def test_dtype_int_graph(self): + """Test that setting dtype int actually gives an integer array. + + For more information, see GitHub pull request #1363. + + """ + G = nx.complete_graph(3) + A = nx.to_numpy_array(G, dtype=int) + assert A.dtype == int + + def test_dtype_int_multigraph(self): + """Test that setting dtype int actually gives an integer array. + + For more information, see GitHub pull request #1363. + + """ + G = nx.MultiGraph(nx.complete_graph(3)) + A = nx.to_numpy_array(G, dtype=int) + assert A.dtype == int + + +@pytest.fixture +def multigraph_test_graph(): + G = nx.MultiGraph() + G.add_edge(1, 2, weight=7) + G.add_edge(1, 2, weight=70) + return G + + +@pytest.mark.parametrize(("operator", "expected"), ((sum, 77), (min, 7), (max, 70))) +def test_numpy_multigraph(multigraph_test_graph, operator, expected): + A = nx.to_numpy_array(multigraph_test_graph, multigraph_weight=operator) + assert A[1, 0] == expected + + +def test_to_numpy_array_multigraph_nodelist(multigraph_test_graph): + G = multigraph_test_graph + G.add_edge(0, 1, weight=3) + A = nx.to_numpy_array(G, nodelist=[1, 2]) + assert A.shape == (2, 2) + assert A[1, 0] == 77 + + +@pytest.mark.parametrize( + "G, expected", + [ + (nx.Graph(), np.array([[0, 1 + 2j], [1 + 2j, 0]], dtype=complex)), + (nx.DiGraph(), np.array([[0, 1 + 2j], [0, 0]], dtype=complex)), + ], +) +def test_to_numpy_array_complex_weights(G, expected): + G.add_edge(0, 1, weight=1 + 2j) + A = nx.to_numpy_array(G, dtype=complex) + npt.assert_array_equal(A, expected) + + +def test_to_numpy_array_arbitrary_weights(): + G = nx.DiGraph() + w = 922337203685477580102 # Out of range for int64 + G.add_edge(0, 1, weight=922337203685477580102) # val not representable by int64 + A = nx.to_numpy_array(G, dtype=object) + expected = np.array([[0, w], [0, 0]], dtype=object) + npt.assert_array_equal(A, expected) + + # Undirected + A = nx.to_numpy_array(G.to_undirected(), dtype=object) + expected = np.array([[0, w], [w, 0]], dtype=object) + npt.assert_array_equal(A, expected) + + +@pytest.mark.parametrize( + "func, expected", + ((min, -1), (max, 10), (sum, 11), (np.mean, 11 / 3), (np.median, 2)), +) +def test_to_numpy_array_multiweight_reduction(func, expected): + """Test various functions for reducing multiedge weights.""" + G = nx.MultiDiGraph() + weights = [-1, 2, 10.0] + for w in weights: + G.add_edge(0, 1, weight=w) + A = nx.to_numpy_array(G, multigraph_weight=func, dtype=float) + assert np.allclose(A, [[0, expected], [0, 0]]) + + # Undirected case + A = nx.to_numpy_array(G.to_undirected(), multigraph_weight=func, dtype=float) + assert np.allclose(A, [[0, expected], [expected, 0]]) + + +@pytest.mark.parametrize( + ("G, expected"), + [ + (nx.Graph(), [[(0, 0), (10, 5)], [(10, 5), (0, 0)]]), + (nx.DiGraph(), [[(0, 0), (10, 5)], [(0, 0), (0, 0)]]), + ], +) +def test_to_numpy_array_structured_dtype_attrs_from_fields(G, expected): + """When `dtype` is structured (i.e. has names) and `weight` is None, use + the named fields of the dtype to look up edge attributes.""" + G.add_edge(0, 1, weight=10, cost=5.0) + dtype = np.dtype([("weight", int), ("cost", int)]) + A = nx.to_numpy_array(G, dtype=dtype, weight=None) + expected = np.asarray(expected, dtype=dtype) + npt.assert_array_equal(A, expected) + + +def test_to_numpy_array_structured_dtype_single_attr_default(): + G = nx.path_graph(3) + dtype = np.dtype([("weight", float)]) # A single named field + A = nx.to_numpy_array(G, dtype=dtype, weight=None) + expected = np.array([[0, 1, 0], [1, 0, 1], [0, 1, 0]], dtype=float) + npt.assert_array_equal(A["weight"], expected) + + +@pytest.mark.parametrize( + ("field_name", "expected_attr_val"), + [ + ("weight", 1), + ("cost", 3), + ], +) +def test_to_numpy_array_structured_dtype_single_attr(field_name, expected_attr_val): + G = nx.Graph() + G.add_edge(0, 1, cost=3) + dtype = np.dtype([(field_name, float)]) + A = nx.to_numpy_array(G, dtype=dtype, weight=None) + expected = np.array([[0, expected_attr_val], [expected_attr_val, 0]], dtype=float) + npt.assert_array_equal(A[field_name], expected) + + +@pytest.mark.parametrize("graph_type", (nx.Graph, nx.DiGraph)) +@pytest.mark.parametrize( + "edge", + [ + (0, 1), # No edge attributes + (0, 1, {"weight": 10}), # One edge attr + (0, 1, {"weight": 5, "flow": -4}), # Multiple but not all edge attrs + (0, 1, {"weight": 2.0, "cost": 10, "flow": -45}), # All attrs + ], +) +def test_to_numpy_array_structured_dtype_multiple_fields(graph_type, edge): + G = graph_type([edge]) + dtype = np.dtype([("weight", float), ("cost", float), ("flow", float)]) + A = nx.to_numpy_array(G, dtype=dtype, weight=None) + for attr in dtype.names: + expected = nx.to_numpy_array(G, dtype=float, weight=attr) + npt.assert_array_equal(A[attr], expected) + + +@pytest.mark.parametrize("G", (nx.Graph(), nx.DiGraph())) +def test_to_numpy_array_structured_dtype_scalar_nonedge(G): + G.add_edge(0, 1, weight=10) + dtype = np.dtype([("weight", float), ("cost", float)]) + A = nx.to_numpy_array(G, dtype=dtype, weight=None, nonedge=np.nan) + for attr in dtype.names: + expected = nx.to_numpy_array(G, dtype=float, weight=attr, nonedge=np.nan) + npt.assert_array_equal(A[attr], expected) + + +@pytest.mark.parametrize("G", (nx.Graph(), nx.DiGraph())) +def test_to_numpy_array_structured_dtype_nonedge_ary(G): + """Similar to the scalar case, except has a different non-edge value for + each named field.""" + G.add_edge(0, 1, weight=10) + dtype = np.dtype([("weight", float), ("cost", float)]) + nonedges = np.array([(0, np.inf)], dtype=dtype) + A = nx.to_numpy_array(G, dtype=dtype, weight=None, nonedge=nonedges) + for attr in dtype.names: + nonedge = nonedges[attr] + expected = nx.to_numpy_array(G, dtype=float, weight=attr, nonedge=nonedge) + npt.assert_array_equal(A[attr], expected) + + +def test_to_numpy_array_structured_dtype_with_weight_raises(): + """Using both a structured dtype (with named fields) and specifying a `weight` + parameter is ambiguous.""" + G = nx.path_graph(3) + dtype = np.dtype([("weight", int), ("cost", int)]) + exception_msg = "Specifying `weight` not supported for structured dtypes" + with pytest.raises(ValueError, match=exception_msg): + nx.to_numpy_array(G, dtype=dtype) # Default is weight="weight" + with pytest.raises(ValueError, match=exception_msg): + nx.to_numpy_array(G, dtype=dtype, weight="cost") + + +@pytest.mark.parametrize("graph_type", (nx.MultiGraph, nx.MultiDiGraph)) +def test_to_numpy_array_structured_multigraph_raises(graph_type): + G = nx.path_graph(3, create_using=graph_type) + dtype = np.dtype([("weight", int), ("cost", int)]) + with pytest.raises(nx.NetworkXError, match="Structured arrays are not supported"): + nx.to_numpy_array(G, dtype=dtype, weight=None) + + +def test_from_numpy_array_nodelist_bad_size(): + """An exception is raised when `len(nodelist) != A.shape[0]`.""" + n = 5 # Number of nodes + A = np.diag(np.ones(n - 1), k=1) # Adj. matrix for P_n + expected = nx.path_graph(n) + + assert graphs_equal(nx.from_numpy_array(A, edge_attr=None), expected) + nodes = list(range(n)) + assert graphs_equal( + nx.from_numpy_array(A, edge_attr=None, nodelist=nodes), expected + ) + + # Too many node labels + nodes = list(range(n + 1)) + with pytest.raises(ValueError, match="nodelist must have the same length as A"): + nx.from_numpy_array(A, nodelist=nodes) + + # Too few node labels + nodes = list(range(n - 1)) + with pytest.raises(ValueError, match="nodelist must have the same length as A"): + nx.from_numpy_array(A, nodelist=nodes) + + +@pytest.mark.parametrize( + "nodes", + ( + [4, 3, 2, 1, 0], + [9, 7, 1, 2, 8], + ["a", "b", "c", "d", "e"], + [(0, 0), (1, 1), (2, 3), (0, 2), (3, 1)], + ["A", 2, 7, "spam", (1, 3)], + ), +) +def test_from_numpy_array_nodelist(nodes): + A = np.diag(np.ones(4), k=1) + # Without edge attributes + expected = nx.relabel_nodes( + nx.path_graph(5), mapping=dict(enumerate(nodes)), copy=True + ) + G = nx.from_numpy_array(A, edge_attr=None, nodelist=nodes) + assert graphs_equal(G, expected) + + # With edge attributes + nx.set_edge_attributes(expected, 1.0, name="weight") + G = nx.from_numpy_array(A, nodelist=nodes) + assert graphs_equal(G, expected) + + +@pytest.mark.parametrize( + "nodes", + ( + [4, 3, 2, 1, 0], + [9, 7, 1, 2, 8], + ["a", "b", "c", "d", "e"], + [(0, 0), (1, 1), (2, 3), (0, 2), (3, 1)], + ["A", 2, 7, "spam", (1, 3)], + ), +) +def test_from_numpy_array_nodelist_directed(nodes): + A = np.diag(np.ones(4), k=1) + # Without edge attributes + H = nx.DiGraph([(0, 1), (1, 2), (2, 3), (3, 4)]) + expected = nx.relabel_nodes(H, mapping=dict(enumerate(nodes)), copy=True) + G = nx.from_numpy_array(A, create_using=nx.DiGraph, edge_attr=None, nodelist=nodes) + assert graphs_equal(G, expected) + + # With edge attributes + nx.set_edge_attributes(expected, 1.0, name="weight") + G = nx.from_numpy_array(A, create_using=nx.DiGraph, nodelist=nodes) + assert graphs_equal(G, expected) + + +@pytest.mark.parametrize( + "nodes", + ( + [4, 3, 2, 1, 0], + [9, 7, 1, 2, 8], + ["a", "b", "c", "d", "e"], + [(0, 0), (1, 1), (2, 3), (0, 2), (3, 1)], + ["A", 2, 7, "spam", (1, 3)], + ), +) +def test_from_numpy_array_nodelist_multigraph(nodes): + A = np.array( + [ + [0, 1, 0, 0, 0], + [1, 0, 2, 0, 0], + [0, 2, 0, 3, 0], + [0, 0, 3, 0, 4], + [0, 0, 0, 4, 0], + ] + ) + + H = nx.MultiGraph() + for i, edge in enumerate(((0, 1), (1, 2), (2, 3), (3, 4))): + H.add_edges_from(itertools.repeat(edge, i + 1)) + expected = nx.relabel_nodes(H, mapping=dict(enumerate(nodes)), copy=True) + + G = nx.from_numpy_array( + A, + parallel_edges=True, + create_using=nx.MultiGraph, + edge_attr=None, + nodelist=nodes, + ) + assert graphs_equal(G, expected) + + +@pytest.mark.parametrize( + "nodes", + ( + [4, 3, 2, 1, 0], + [9, 7, 1, 2, 8], + ["a", "b", "c", "d", "e"], + [(0, 0), (1, 1), (2, 3), (0, 2), (3, 1)], + ["A", 2, 7, "spam", (1, 3)], + ), +) +@pytest.mark.parametrize("graph", (nx.complete_graph, nx.cycle_graph, nx.wheel_graph)) +def test_from_numpy_array_nodelist_rountrip(graph, nodes): + G = graph(5) + A = nx.to_numpy_array(G) + expected = nx.relabel_nodes(G, mapping=dict(enumerate(nodes)), copy=True) + H = nx.from_numpy_array(A, edge_attr=None, nodelist=nodes) + assert graphs_equal(H, expected) + + # With an isolated node + G = graph(4) + G.add_node("foo") + A = nx.to_numpy_array(G) + expected = nx.relabel_nodes(G, mapping=dict(zip(G.nodes, nodes)), copy=True) + H = nx.from_numpy_array(A, edge_attr=None, nodelist=nodes) + assert graphs_equal(H, expected) diff --git a/.venv/lib/python3.12/site-packages/networkx/tests/test_convert_pandas.py b/.venv/lib/python3.12/site-packages/networkx/tests/test_convert_pandas.py new file mode 100644 index 0000000..8c3f02a --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/tests/test_convert_pandas.py @@ -0,0 +1,349 @@ +import pytest + +import networkx as nx +from networkx.utils import edges_equal, graphs_equal, nodes_equal + +np = pytest.importorskip("numpy") +pd = pytest.importorskip("pandas") + + +class TestConvertPandas: + def setup_method(self): + self.rng = np.random.RandomState(seed=5) + ints = self.rng.randint(1, 11, size=(3, 2)) + a = ["A", "B", "C"] + b = ["D", "A", "E"] + df = pd.DataFrame(ints, columns=["weight", "cost"]) + df[0] = a # Column label 0 (int) + df["b"] = b # Column label 'b' (str) + self.df = df + + mdf = pd.DataFrame([[4, 16, "A", "D"]], columns=["weight", "cost", 0, "b"]) + self.mdf = pd.concat([df, mdf]) + + def test_exceptions(self): + G = pd.DataFrame(["a"]) # adj + pytest.raises(nx.NetworkXError, nx.to_networkx_graph, G) + G = pd.DataFrame(["a", 0.0]) # elist + pytest.raises(nx.NetworkXError, nx.to_networkx_graph, G) + df = pd.DataFrame([[1, 1], [1, 0]], dtype=int, index=[1, 2], columns=["a", "b"]) + pytest.raises(nx.NetworkXError, nx.from_pandas_adjacency, df) + + def test_from_edgelist_all_attr(self): + Gtrue = nx.Graph( + [ + ("E", "C", {"cost": 9, "weight": 10}), + ("B", "A", {"cost": 1, "weight": 7}), + ("A", "D", {"cost": 7, "weight": 4}), + ] + ) + G = nx.from_pandas_edgelist(self.df, 0, "b", True) + assert graphs_equal(G, Gtrue) + # MultiGraph + MGtrue = nx.MultiGraph(Gtrue) + MGtrue.add_edge("A", "D", cost=16, weight=4) + MG = nx.from_pandas_edgelist(self.mdf, 0, "b", True, nx.MultiGraph()) + assert graphs_equal(MG, MGtrue) + + def test_from_edgelist_multi_attr(self): + Gtrue = nx.Graph( + [ + ("E", "C", {"cost": 9, "weight": 10}), + ("B", "A", {"cost": 1, "weight": 7}), + ("A", "D", {"cost": 7, "weight": 4}), + ] + ) + G = nx.from_pandas_edgelist(self.df, 0, "b", ["weight", "cost"]) + assert graphs_equal(G, Gtrue) + + def test_from_edgelist_multi_attr_incl_target(self): + Gtrue = nx.Graph( + [ + ("E", "C", {0: "C", "b": "E", "weight": 10}), + ("B", "A", {0: "B", "b": "A", "weight": 7}), + ("A", "D", {0: "A", "b": "D", "weight": 4}), + ] + ) + G = nx.from_pandas_edgelist(self.df, 0, "b", [0, "b", "weight"]) + assert graphs_equal(G, Gtrue) + + def test_from_edgelist_multidigraph_and_edge_attr(self): + # example from issue #2374 + edges = [ + ("X1", "X4", {"Co": "zA", "Mi": 0, "St": "X1"}), + ("X1", "X4", {"Co": "zB", "Mi": 54, "St": "X2"}), + ("X1", "X4", {"Co": "zB", "Mi": 49, "St": "X3"}), + ("X1", "X4", {"Co": "zB", "Mi": 44, "St": "X4"}), + ("Y1", "Y3", {"Co": "zC", "Mi": 0, "St": "Y1"}), + ("Y1", "Y3", {"Co": "zC", "Mi": 34, "St": "Y2"}), + ("Y1", "Y3", {"Co": "zC", "Mi": 29, "St": "X2"}), + ("Y1", "Y3", {"Co": "zC", "Mi": 24, "St": "Y3"}), + ("Z1", "Z3", {"Co": "zD", "Mi": 0, "St": "Z1"}), + ("Z1", "Z3", {"Co": "zD", "Mi": 14, "St": "X3"}), + ] + Gtrue = nx.MultiDiGraph(edges) + data = { + "O": ["X1", "X1", "X1", "X1", "Y1", "Y1", "Y1", "Y1", "Z1", "Z1"], + "D": ["X4", "X4", "X4", "X4", "Y3", "Y3", "Y3", "Y3", "Z3", "Z3"], + "St": ["X1", "X2", "X3", "X4", "Y1", "Y2", "X2", "Y3", "Z1", "X3"], + "Co": ["zA", "zB", "zB", "zB", "zC", "zC", "zC", "zC", "zD", "zD"], + "Mi": [0, 54, 49, 44, 0, 34, 29, 24, 0, 14], + } + df = pd.DataFrame.from_dict(data) + G1 = nx.from_pandas_edgelist( + df, source="O", target="D", edge_attr=True, create_using=nx.MultiDiGraph + ) + G2 = nx.from_pandas_edgelist( + df, + source="O", + target="D", + edge_attr=["St", "Co", "Mi"], + create_using=nx.MultiDiGraph, + ) + assert graphs_equal(G1, Gtrue) + assert graphs_equal(G2, Gtrue) + + def test_from_edgelist_one_attr(self): + Gtrue = nx.Graph( + [ + ("E", "C", {"weight": 10}), + ("B", "A", {"weight": 7}), + ("A", "D", {"weight": 4}), + ] + ) + G = nx.from_pandas_edgelist(self.df, 0, "b", "weight") + assert graphs_equal(G, Gtrue) + + def test_from_edgelist_int_attr_name(self): + # note: this also tests that edge_attr can be `source` + Gtrue = nx.Graph( + [("E", "C", {0: "C"}), ("B", "A", {0: "B"}), ("A", "D", {0: "A"})] + ) + G = nx.from_pandas_edgelist(self.df, 0, "b", 0) + assert graphs_equal(G, Gtrue) + + def test_from_edgelist_invalid_attr(self): + pytest.raises( + nx.NetworkXError, nx.from_pandas_edgelist, self.df, 0, "b", "misspell" + ) + pytest.raises(nx.NetworkXError, nx.from_pandas_edgelist, self.df, 0, "b", 1) + # see Issue #3562 + edgeframe = pd.DataFrame([[0, 1], [1, 2], [2, 0]], columns=["s", "t"]) + pytest.raises( + nx.NetworkXError, nx.from_pandas_edgelist, edgeframe, "s", "t", True + ) + pytest.raises( + nx.NetworkXError, nx.from_pandas_edgelist, edgeframe, "s", "t", "weight" + ) + pytest.raises( + nx.NetworkXError, + nx.from_pandas_edgelist, + edgeframe, + "s", + "t", + ["weight", "size"], + ) + + def test_from_edgelist_no_attr(self): + Gtrue = nx.Graph([("E", "C", {}), ("B", "A", {}), ("A", "D", {})]) + G = nx.from_pandas_edgelist(self.df, 0, "b") + assert graphs_equal(G, Gtrue) + + def test_from_edgelist(self): + # Pandas DataFrame + G = nx.cycle_graph(10) + G.add_weighted_edges_from((u, v, u) for u, v in list(G.edges)) + + edgelist = nx.to_edgelist(G) + source = [s for s, t, d in edgelist] + target = [t for s, t, d in edgelist] + weight = [d["weight"] for s, t, d in edgelist] + edges = pd.DataFrame({"source": source, "target": target, "weight": weight}) + + GG = nx.from_pandas_edgelist(edges, edge_attr="weight") + assert nodes_equal(G.nodes(), GG.nodes()) + assert edges_equal(G.edges(), GG.edges()) + GW = nx.to_networkx_graph(edges, create_using=nx.Graph) + assert nodes_equal(G.nodes(), GW.nodes()) + assert edges_equal(G.edges(), GW.edges()) + + def test_to_edgelist_default_source_or_target_col_exists(self): + G = nx.path_graph(10) + G.add_weighted_edges_from((u, v, u) for u, v in list(G.edges)) + nx.set_edge_attributes(G, 0, name="source") + pytest.raises(nx.NetworkXError, nx.to_pandas_edgelist, G) + + # drop source column to test an exception raised for the target column + for u, v, d in G.edges(data=True): + d.pop("source", None) + + nx.set_edge_attributes(G, 0, name="target") + pytest.raises(nx.NetworkXError, nx.to_pandas_edgelist, G) + + def test_to_edgelist_custom_source_or_target_col_exists(self): + G = nx.path_graph(10) + G.add_weighted_edges_from((u, v, u) for u, v in list(G.edges)) + nx.set_edge_attributes(G, 0, name="source_col_name") + pytest.raises( + nx.NetworkXError, nx.to_pandas_edgelist, G, source="source_col_name" + ) + + # drop source column to test an exception raised for the target column + for u, v, d in G.edges(data=True): + d.pop("source_col_name", None) + + nx.set_edge_attributes(G, 0, name="target_col_name") + pytest.raises( + nx.NetworkXError, nx.to_pandas_edgelist, G, target="target_col_name" + ) + + def test_to_edgelist_edge_key_col_exists(self): + G = nx.path_graph(10, create_using=nx.MultiGraph) + G.add_weighted_edges_from((u, v, u) for u, v in list(G.edges())) + nx.set_edge_attributes(G, 0, name="edge_key_name") + pytest.raises( + nx.NetworkXError, nx.to_pandas_edgelist, G, edge_key="edge_key_name" + ) + + def test_from_adjacency(self): + nodelist = [1, 2] + dftrue = pd.DataFrame( + [[1, 1], [1, 0]], dtype=int, index=nodelist, columns=nodelist + ) + G = nx.Graph([(1, 1), (1, 2)]) + df = nx.to_pandas_adjacency(G, dtype=int) + pd.testing.assert_frame_equal(df, dftrue) + + @pytest.mark.parametrize("graph", [nx.Graph, nx.MultiGraph]) + def test_roundtrip(self, graph): + # edgelist + Gtrue = graph([(1, 1), (1, 2)]) + df = nx.to_pandas_edgelist(Gtrue) + G = nx.from_pandas_edgelist(df, create_using=graph) + assert graphs_equal(Gtrue, G) + # adjacency + adj = {1: {1: {"weight": 1}, 2: {"weight": 1}}, 2: {1: {"weight": 1}}} + Gtrue = graph(adj) + df = nx.to_pandas_adjacency(Gtrue, dtype=int) + G = nx.from_pandas_adjacency(df, create_using=graph) + assert graphs_equal(Gtrue, G) + + def test_from_adjacency_named(self): + # example from issue #3105 + data = { + "A": {"A": 0, "B": 0, "C": 0}, + "B": {"A": 1, "B": 0, "C": 0}, + "C": {"A": 0, "B": 1, "C": 0}, + } + dftrue = pd.DataFrame(data, dtype=np.intp) + df = dftrue[["A", "C", "B"]] + G = nx.from_pandas_adjacency(df, create_using=nx.DiGraph()) + df = nx.to_pandas_adjacency(G, dtype=np.intp) + pd.testing.assert_frame_equal(df, dftrue) + + @pytest.mark.parametrize("edge_attr", [["attr2", "attr3"], True]) + def test_edgekey_with_multigraph(self, edge_attr): + df = pd.DataFrame( + { + "source": {"A": "N1", "B": "N2", "C": "N1", "D": "N1"}, + "target": {"A": "N2", "B": "N3", "C": "N1", "D": "N2"}, + "attr1": {"A": "F1", "B": "F2", "C": "F3", "D": "F4"}, + "attr2": {"A": 1, "B": 0, "C": 0, "D": 0}, + "attr3": {"A": 0, "B": 1, "C": 0, "D": 1}, + } + ) + Gtrue = nx.MultiGraph( + [ + ("N1", "N2", "F1", {"attr2": 1, "attr3": 0}), + ("N2", "N3", "F2", {"attr2": 0, "attr3": 1}), + ("N1", "N1", "F3", {"attr2": 0, "attr3": 0}), + ("N1", "N2", "F4", {"attr2": 0, "attr3": 1}), + ] + ) + # example from issue #4065 + G = nx.from_pandas_edgelist( + df, + source="source", + target="target", + edge_attr=edge_attr, + edge_key="attr1", + create_using=nx.MultiGraph(), + ) + assert graphs_equal(G, Gtrue) + + df_roundtrip = nx.to_pandas_edgelist(G, edge_key="attr1") + df_roundtrip = df_roundtrip.sort_values("attr1") + df_roundtrip.index = ["A", "B", "C", "D"] + pd.testing.assert_frame_equal( + df, df_roundtrip[["source", "target", "attr1", "attr2", "attr3"]] + ) + + def test_edgekey_with_normal_graph_no_action(self): + Gtrue = nx.Graph( + [ + ("E", "C", {"cost": 9, "weight": 10}), + ("B", "A", {"cost": 1, "weight": 7}), + ("A", "D", {"cost": 7, "weight": 4}), + ] + ) + G = nx.from_pandas_edgelist(self.df, 0, "b", True, edge_key="weight") + assert graphs_equal(G, Gtrue) + + def test_nonexisting_edgekey_raises(self): + with pytest.raises(nx.exception.NetworkXError): + nx.from_pandas_edgelist( + self.df, + source="source", + target="target", + edge_key="Not_real", + edge_attr=True, + create_using=nx.MultiGraph(), + ) + + def test_multigraph_with_edgekey_no_edgeattrs(self): + Gtrue = nx.MultiGraph() + Gtrue.add_edge(0, 1, key=0) + Gtrue.add_edge(0, 1, key=3) + df = nx.to_pandas_edgelist(Gtrue, edge_key="key") + expected = pd.DataFrame({"source": [0, 0], "target": [1, 1], "key": [0, 3]}) + pd.testing.assert_frame_equal(expected, df) + G = nx.from_pandas_edgelist(df, edge_key="key", create_using=nx.MultiGraph) + assert graphs_equal(Gtrue, G) + + +def test_to_pandas_adjacency_with_nodelist(): + G = nx.complete_graph(5) + nodelist = [1, 4] + expected = pd.DataFrame( + [[0, 1], [1, 0]], dtype=int, index=nodelist, columns=nodelist + ) + pd.testing.assert_frame_equal( + expected, nx.to_pandas_adjacency(G, nodelist, dtype=int) + ) + + +def test_to_pandas_edgelist_with_nodelist(): + G = nx.Graph() + G.add_edges_from([(0, 1), (1, 2), (1, 3)], weight=2.0) + G.add_edge(0, 5, weight=100) + df = nx.to_pandas_edgelist(G, nodelist=[1, 2]) + assert 0 not in df["source"].to_numpy() + assert 100 not in df["weight"].to_numpy() + + +def test_from_pandas_adjacency_with_index_collisions(): + """See gh-7407""" + df = pd.DataFrame( + [ + [0, 1, 0, 0], + [0, 0, 1, 0], + [0, 0, 0, 1], + [0, 0, 0, 0], + ], + index=[1010001, 2, 1, 1010002], + columns=[1010001, 2, 1, 1010002], + ) + G = nx.from_pandas_adjacency(df, create_using=nx.DiGraph) + expected = nx.DiGraph([(1010001, 2), (2, 1), (1, 1010002)]) + assert nodes_equal(G.nodes, expected.nodes) + assert edges_equal(G.edges, expected.edges) diff --git a/.venv/lib/python3.12/site-packages/networkx/tests/test_convert_scipy.py b/.venv/lib/python3.12/site-packages/networkx/tests/test_convert_scipy.py new file mode 100644 index 0000000..2575027 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/tests/test_convert_scipy.py @@ -0,0 +1,281 @@ +import pytest + +import networkx as nx +from networkx.utils import graphs_equal + +np = pytest.importorskip("numpy") +sp = pytest.importorskip("scipy") + + +class TestConvertScipy: + def setup_method(self): + self.G1 = nx.barbell_graph(10, 3) + self.G2 = nx.cycle_graph(10, create_using=nx.DiGraph) + + self.G3 = self.create_weighted(nx.Graph()) + self.G4 = self.create_weighted(nx.DiGraph()) + + def test_exceptions(self): + class G: + format = None + + pytest.raises(nx.NetworkXError, nx.to_networkx_graph, G) + + def create_weighted(self, G): + g = nx.cycle_graph(4) + e = list(g.edges()) + source = [u for u, v in e] + dest = [v for u, v in e] + weight = [s + 10 for s in source] + ex = zip(source, dest, weight) + G.add_weighted_edges_from(ex) + return G + + def identity_conversion(self, G, A, create_using): + GG = nx.from_scipy_sparse_array(A, create_using=create_using) + assert nx.is_isomorphic(G, GG) + + GW = nx.to_networkx_graph(A, create_using=create_using) + assert nx.is_isomorphic(G, GW) + + GI = nx.empty_graph(0, create_using).__class__(A) + assert nx.is_isomorphic(G, GI) + + ACSR = A.tocsr() + GI = nx.empty_graph(0, create_using).__class__(ACSR) + assert nx.is_isomorphic(G, GI) + + ACOO = A.tocoo() + GI = nx.empty_graph(0, create_using).__class__(ACOO) + assert nx.is_isomorphic(G, GI) + + ACSC = A.tocsc() + GI = nx.empty_graph(0, create_using).__class__(ACSC) + assert nx.is_isomorphic(G, GI) + + AD = A.todense() + GI = nx.empty_graph(0, create_using).__class__(AD) + assert nx.is_isomorphic(G, GI) + + AA = A.toarray() + GI = nx.empty_graph(0, create_using).__class__(AA) + assert nx.is_isomorphic(G, GI) + + def test_shape(self): + "Conversion from non-square sparse array." + A = sp.sparse.lil_array([[1, 2, 3], [4, 5, 6]]) + pytest.raises(nx.NetworkXError, nx.from_scipy_sparse_array, A) + + def test_identity_graph_matrix(self): + "Conversion from graph to sparse matrix to graph." + A = nx.to_scipy_sparse_array(self.G1) + self.identity_conversion(self.G1, A, nx.Graph()) + + def test_identity_digraph_matrix(self): + "Conversion from digraph to sparse matrix to digraph." + A = nx.to_scipy_sparse_array(self.G2) + self.identity_conversion(self.G2, A, nx.DiGraph()) + + def test_identity_weighted_graph_matrix(self): + """Conversion from weighted graph to sparse matrix to weighted graph.""" + A = nx.to_scipy_sparse_array(self.G3) + self.identity_conversion(self.G3, A, nx.Graph()) + + def test_identity_weighted_digraph_matrix(self): + """Conversion from weighted digraph to sparse matrix to weighted digraph.""" + A = nx.to_scipy_sparse_array(self.G4) + self.identity_conversion(self.G4, A, nx.DiGraph()) + + def test_nodelist(self): + """Conversion from graph to sparse matrix to graph with nodelist.""" + P4 = nx.path_graph(4) + P3 = nx.path_graph(3) + nodelist = list(P3.nodes()) + A = nx.to_scipy_sparse_array(P4, nodelist=nodelist) + GA = nx.Graph(A) + assert nx.is_isomorphic(GA, P3) + + pytest.raises(nx.NetworkXError, nx.to_scipy_sparse_array, P3, nodelist=[]) + # Test nodelist duplicates. + long_nl = nodelist + [0] + pytest.raises(nx.NetworkXError, nx.to_scipy_sparse_array, P3, nodelist=long_nl) + + # Test nodelist contains non-nodes + non_nl = [-1, 0, 1, 2] + pytest.raises(nx.NetworkXError, nx.to_scipy_sparse_array, P3, nodelist=non_nl) + + def test_weight_keyword(self): + WP4 = nx.Graph() + WP4.add_edges_from((n, n + 1, {"weight": 0.5, "other": 0.3}) for n in range(3)) + P4 = nx.path_graph(4) + A = nx.to_scipy_sparse_array(P4) + np.testing.assert_equal( + A.todense(), nx.to_scipy_sparse_array(WP4, weight=None).todense() + ) + np.testing.assert_equal( + 0.5 * A.todense(), nx.to_scipy_sparse_array(WP4).todense() + ) + np.testing.assert_equal( + 0.3 * A.todense(), nx.to_scipy_sparse_array(WP4, weight="other").todense() + ) + + def test_format_keyword(self): + WP4 = nx.Graph() + WP4.add_edges_from((n, n + 1, {"weight": 0.5, "other": 0.3}) for n in range(3)) + P4 = nx.path_graph(4) + A = nx.to_scipy_sparse_array(P4, format="csr") + np.testing.assert_equal( + A.todense(), nx.to_scipy_sparse_array(WP4, weight=None).todense() + ) + + A = nx.to_scipy_sparse_array(P4, format="csc") + np.testing.assert_equal( + A.todense(), nx.to_scipy_sparse_array(WP4, weight=None).todense() + ) + + A = nx.to_scipy_sparse_array(P4, format="coo") + np.testing.assert_equal( + A.todense(), nx.to_scipy_sparse_array(WP4, weight=None).todense() + ) + + A = nx.to_scipy_sparse_array(P4, format="bsr") + np.testing.assert_equal( + A.todense(), nx.to_scipy_sparse_array(WP4, weight=None).todense() + ) + + A = nx.to_scipy_sparse_array(P4, format="lil") + np.testing.assert_equal( + A.todense(), nx.to_scipy_sparse_array(WP4, weight=None).todense() + ) + + A = nx.to_scipy_sparse_array(P4, format="dia") + np.testing.assert_equal( + A.todense(), nx.to_scipy_sparse_array(WP4, weight=None).todense() + ) + + A = nx.to_scipy_sparse_array(P4, format="dok") + np.testing.assert_equal( + A.todense(), nx.to_scipy_sparse_array(WP4, weight=None).todense() + ) + + def test_format_keyword_raise(self): + with pytest.raises(nx.NetworkXError): + WP4 = nx.Graph() + WP4.add_edges_from( + (n, n + 1, {"weight": 0.5, "other": 0.3}) for n in range(3) + ) + P4 = nx.path_graph(4) + nx.to_scipy_sparse_array(P4, format="any_other") + + def test_null_raise(self): + with pytest.raises(nx.NetworkXError): + nx.to_scipy_sparse_array(nx.Graph()) + + def test_empty(self): + G = nx.Graph() + G.add_node(1) + M = nx.to_scipy_sparse_array(G) + np.testing.assert_equal(M.toarray(), np.array([[0]])) + + def test_ordering(self): + G = nx.DiGraph() + G.add_edge(1, 2) + G.add_edge(2, 3) + G.add_edge(3, 1) + M = nx.to_scipy_sparse_array(G, nodelist=[3, 2, 1]) + np.testing.assert_equal( + M.toarray(), np.array([[0, 0, 1], [1, 0, 0], [0, 1, 0]]) + ) + + def test_selfloop_graph(self): + G = nx.Graph([(1, 1)]) + M = nx.to_scipy_sparse_array(G) + np.testing.assert_equal(M.toarray(), np.array([[1]])) + + G.add_edges_from([(2, 3), (3, 4)]) + M = nx.to_scipy_sparse_array(G, nodelist=[2, 3, 4]) + np.testing.assert_equal( + M.toarray(), np.array([[0, 1, 0], [1, 0, 1], [0, 1, 0]]) + ) + + def test_selfloop_digraph(self): + G = nx.DiGraph([(1, 1)]) + M = nx.to_scipy_sparse_array(G) + np.testing.assert_equal(M.toarray(), np.array([[1]])) + + G.add_edges_from([(2, 3), (3, 4)]) + M = nx.to_scipy_sparse_array(G, nodelist=[2, 3, 4]) + np.testing.assert_equal( + M.toarray(), np.array([[0, 1, 0], [0, 0, 1], [0, 0, 0]]) + ) + + def test_from_scipy_sparse_array_parallel_edges(self): + """Tests that the :func:`networkx.from_scipy_sparse_array` function + interprets integer weights as the number of parallel edges when + creating a multigraph. + + """ + A = sp.sparse.csr_array([[1, 1], [1, 2]]) + # First, with a simple graph, each integer entry in the adjacency + # matrix is interpreted as the weight of a single edge in the graph. + expected = nx.DiGraph() + edges = [(0, 0), (0, 1), (1, 0)] + expected.add_weighted_edges_from([(u, v, 1) for (u, v) in edges]) + expected.add_edge(1, 1, weight=2) + actual = nx.from_scipy_sparse_array( + A, parallel_edges=True, create_using=nx.DiGraph + ) + assert graphs_equal(actual, expected) + actual = nx.from_scipy_sparse_array( + A, parallel_edges=False, create_using=nx.DiGraph + ) + assert graphs_equal(actual, expected) + # Now each integer entry in the adjacency matrix is interpreted as the + # number of parallel edges in the graph if the appropriate keyword + # argument is specified. + edges = [(0, 0), (0, 1), (1, 0), (1, 1), (1, 1)] + expected = nx.MultiDiGraph() + expected.add_weighted_edges_from([(u, v, 1) for (u, v) in edges]) + actual = nx.from_scipy_sparse_array( + A, parallel_edges=True, create_using=nx.MultiDiGraph + ) + assert graphs_equal(actual, expected) + expected = nx.MultiDiGraph() + expected.add_edges_from(set(edges), weight=1) + # The sole self-loop (edge 0) on vertex 1 should have weight 2. + expected[1][1][0]["weight"] = 2 + actual = nx.from_scipy_sparse_array( + A, parallel_edges=False, create_using=nx.MultiDiGraph + ) + assert graphs_equal(actual, expected) + + def test_symmetric(self): + """Tests that a symmetric matrix has edges added only once to an + undirected multigraph when using + :func:`networkx.from_scipy_sparse_array`. + + """ + A = sp.sparse.csr_array([[0, 1], [1, 0]]) + G = nx.from_scipy_sparse_array(A, create_using=nx.MultiGraph) + expected = nx.MultiGraph() + expected.add_edge(0, 1, weight=1) + assert graphs_equal(G, expected) + + +@pytest.mark.parametrize("sparse_format", ("csr", "csc", "dok")) +def test_from_scipy_sparse_array_formats(sparse_format): + """Test all formats supported by _generate_weighted_edges.""" + # trinode complete graph with non-uniform edge weights + expected = nx.Graph() + expected.add_edges_from( + [ + (0, 1, {"weight": 3}), + (0, 2, {"weight": 2}), + (1, 0, {"weight": 3}), + (1, 2, {"weight": 1}), + (2, 0, {"weight": 2}), + (2, 1, {"weight": 1}), + ] + ) + A = sp.sparse.coo_array([[0, 3, 2], [3, 0, 1], [2, 1, 0]]).asformat(sparse_format) + assert graphs_equal(expected, nx.from_scipy_sparse_array(A)) diff --git a/.venv/lib/python3.12/site-packages/networkx/tests/test_exceptions.py b/.venv/lib/python3.12/site-packages/networkx/tests/test_exceptions.py new file mode 100644 index 0000000..cf59983 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/tests/test_exceptions.py @@ -0,0 +1,40 @@ +import pytest + +import networkx as nx + +# smoke tests for exceptions + + +def test_raises_networkxexception(): + with pytest.raises(nx.NetworkXException): + raise nx.NetworkXException + + +def test_raises_networkxerr(): + with pytest.raises(nx.NetworkXError): + raise nx.NetworkXError + + +def test_raises_networkx_pointless_concept(): + with pytest.raises(nx.NetworkXPointlessConcept): + raise nx.NetworkXPointlessConcept + + +def test_raises_networkxalgorithmerr(): + with pytest.raises(nx.NetworkXAlgorithmError): + raise nx.NetworkXAlgorithmError + + +def test_raises_networkx_unfeasible(): + with pytest.raises(nx.NetworkXUnfeasible): + raise nx.NetworkXUnfeasible + + +def test_raises_networkx_no_path(): + with pytest.raises(nx.NetworkXNoPath): + raise nx.NetworkXNoPath + + +def test_raises_networkx_unbounded(): + with pytest.raises(nx.NetworkXUnbounded): + raise nx.NetworkXUnbounded diff --git a/.venv/lib/python3.12/site-packages/networkx/tests/test_import.py b/.venv/lib/python3.12/site-packages/networkx/tests/test_import.py new file mode 100644 index 0000000..32aafdf --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/tests/test_import.py @@ -0,0 +1,11 @@ +import pytest + + +def test_namespace_alias(): + with pytest.raises(ImportError): + from networkx import nx + + +def test_namespace_nesting(): + with pytest.raises(ImportError): + from networkx import networkx diff --git a/.venv/lib/python3.12/site-packages/networkx/tests/test_lazy_imports.py b/.venv/lib/python3.12/site-packages/networkx/tests/test_lazy_imports.py new file mode 100644 index 0000000..ec09ac2 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/tests/test_lazy_imports.py @@ -0,0 +1,96 @@ +import sys +import types + +import pytest + +import networkx.lazy_imports as lazy + + +def test_lazy_import_basics(): + math = lazy._lazy_import("math") + anything_not_real = lazy._lazy_import("anything_not_real") + + # Now test that accessing attributes does what it should + assert math.sin(math.pi) == pytest.approx(0, 1e-6) + # poor-mans pytest.raises for testing errors on attribute access + try: + anything_not_real.pi + assert False # Should not get here + except ModuleNotFoundError: + pass + assert isinstance(anything_not_real, lazy.DelayedImportErrorModule) + # see if it changes for second access + try: + anything_not_real.pi + assert False # Should not get here + except ModuleNotFoundError: + pass + + +def test_lazy_import_impact_on_sys_modules(): + math = lazy._lazy_import("math") + anything_not_real = lazy._lazy_import("anything_not_real") + + assert isinstance(math, types.ModuleType) + assert "math" in sys.modules + assert type(anything_not_real) is lazy.DelayedImportErrorModule + assert "anything_not_real" not in sys.modules + + # only do this if numpy is installed + np_test = pytest.importorskip("numpy") + np = lazy._lazy_import("numpy") + assert isinstance(np, types.ModuleType) + assert "numpy" in sys.modules + + np.pi # trigger load of numpy + + assert isinstance(np, types.ModuleType) + assert "numpy" in sys.modules + + +def test_lazy_import_nonbuiltins(): + sp = lazy._lazy_import("scipy") + np = lazy._lazy_import("numpy") + if isinstance(sp, lazy.DelayedImportErrorModule): + try: + sp.special.erf + assert False + except ModuleNotFoundError: + pass + elif isinstance(np, lazy.DelayedImportErrorModule): + try: + np.sin(np.pi) + assert False + except ModuleNotFoundError: + pass + else: + assert sp.special.erf(np.pi) == pytest.approx(1, 1e-4) + + +def test_lazy_attach(): + name = "mymod" + submods = ["mysubmodule", "anothersubmodule"] + myall = {"not_real_submod": ["some_var_or_func"]} + + locls = { + "attach": lazy.attach, + "name": name, + "submods": submods, + "myall": myall, + } + s = "__getattr__, __lazy_dir__, __all__ = attach(name, submods, myall)" + + exec(s, {}, locls) + expected = { + "attach": lazy.attach, + "name": name, + "submods": submods, + "myall": myall, + "__getattr__": None, + "__lazy_dir__": None, + "__all__": None, + } + assert locls.keys() == expected.keys() + for k, v in expected.items(): + if v is not None: + assert locls[k] == v diff --git a/.venv/lib/python3.12/site-packages/networkx/tests/test_relabel.py b/.venv/lib/python3.12/site-packages/networkx/tests/test_relabel.py new file mode 100644 index 0000000..0ebf4d3 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/tests/test_relabel.py @@ -0,0 +1,347 @@ +import pytest + +import networkx as nx +from networkx.generators.classic import empty_graph +from networkx.utils import edges_equal, nodes_equal + + +class TestRelabel: + def test_convert_node_labels_to_integers(self): + # test that empty graph converts fine for all options + G = empty_graph() + H = nx.convert_node_labels_to_integers(G, 100) + assert list(H.nodes()) == [] + assert list(H.edges()) == [] + + for opt in ["default", "sorted", "increasing degree", "decreasing degree"]: + G = empty_graph() + H = nx.convert_node_labels_to_integers(G, 100, ordering=opt) + assert list(H.nodes()) == [] + assert list(H.edges()) == [] + + G = empty_graph() + G.add_edges_from([("A", "B"), ("A", "C"), ("B", "C"), ("C", "D")]) + H = nx.convert_node_labels_to_integers(G) + degH = (d for n, d in H.degree()) + degG = (d for n, d in G.degree()) + assert sorted(degH) == sorted(degG) + + H = nx.convert_node_labels_to_integers(G, 1000) + degH = (d for n, d in H.degree()) + degG = (d for n, d in G.degree()) + assert sorted(degH) == sorted(degG) + assert nodes_equal(H.nodes(), [1000, 1001, 1002, 1003]) + + H = nx.convert_node_labels_to_integers(G, ordering="increasing degree") + degH = (d for n, d in H.degree()) + degG = (d for n, d in G.degree()) + assert sorted(degH) == sorted(degG) + assert H.degree(0) == 1 + assert H.degree(1) == 2 + assert H.degree(2) == 2 + assert H.degree(3) == 3 + + H = nx.convert_node_labels_to_integers(G, ordering="decreasing degree") + degH = (d for n, d in H.degree()) + degG = (d for n, d in G.degree()) + assert sorted(degH) == sorted(degG) + assert H.degree(0) == 3 + assert H.degree(1) == 2 + assert H.degree(2) == 2 + assert H.degree(3) == 1 + + H = nx.convert_node_labels_to_integers( + G, ordering="increasing degree", label_attribute="label" + ) + degH = (d for n, d in H.degree()) + degG = (d for n, d in G.degree()) + assert sorted(degH) == sorted(degG) + assert H.degree(0) == 1 + assert H.degree(1) == 2 + assert H.degree(2) == 2 + assert H.degree(3) == 3 + + # check mapping + assert H.nodes[3]["label"] == "C" + assert H.nodes[0]["label"] == "D" + assert H.nodes[1]["label"] == "A" or H.nodes[2]["label"] == "A" + assert H.nodes[1]["label"] == "B" or H.nodes[2]["label"] == "B" + + def test_convert_to_integers2(self): + G = empty_graph() + G.add_edges_from([("C", "D"), ("A", "B"), ("A", "C"), ("B", "C")]) + H = nx.convert_node_labels_to_integers(G, ordering="sorted") + degH = (d for n, d in H.degree()) + degG = (d for n, d in G.degree()) + assert sorted(degH) == sorted(degG) + + H = nx.convert_node_labels_to_integers( + G, ordering="sorted", label_attribute="label" + ) + assert H.nodes[0]["label"] == "A" + assert H.nodes[1]["label"] == "B" + assert H.nodes[2]["label"] == "C" + assert H.nodes[3]["label"] == "D" + + def test_convert_to_integers_raise(self): + with pytest.raises(nx.NetworkXError): + G = nx.Graph() + H = nx.convert_node_labels_to_integers(G, ordering="increasing age") + + def test_relabel_nodes_copy(self): + G = nx.empty_graph() + G.add_edges_from([("A", "B"), ("A", "C"), ("B", "C"), ("C", "D")]) + mapping = {"A": "aardvark", "B": "bear", "C": "cat", "D": "dog"} + H = nx.relabel_nodes(G, mapping) + assert nodes_equal(H.nodes(), ["aardvark", "bear", "cat", "dog"]) + + def test_relabel_nodes_function(self): + G = nx.empty_graph() + G.add_edges_from([("A", "B"), ("A", "C"), ("B", "C"), ("C", "D")]) + # function mapping no longer encouraged but works + + def mapping(n): + return ord(n) + + H = nx.relabel_nodes(G, mapping) + assert nodes_equal(H.nodes(), [65, 66, 67, 68]) + + def test_relabel_nodes_callable_type(self): + G = nx.path_graph(4) + H = nx.relabel_nodes(G, str) + assert nodes_equal(H.nodes, ["0", "1", "2", "3"]) + + @pytest.mark.parametrize("non_mc", ("0123", ["0", "1", "2", "3"])) + def test_relabel_nodes_non_mapping_or_callable(self, non_mc): + """If `mapping` is neither a Callable or a Mapping, an exception + should be raised.""" + G = nx.path_graph(4) + with pytest.raises(AttributeError): + nx.relabel_nodes(G, non_mc) + + def test_relabel_nodes_graph(self): + G = nx.Graph([("A", "B"), ("A", "C"), ("B", "C"), ("C", "D")]) + mapping = {"A": "aardvark", "B": "bear", "C": "cat", "D": "dog"} + H = nx.relabel_nodes(G, mapping) + assert nodes_equal(H.nodes(), ["aardvark", "bear", "cat", "dog"]) + + def test_relabel_nodes_orderedgraph(self): + G = nx.Graph() + G.add_nodes_from([1, 2, 3]) + G.add_edges_from([(1, 3), (2, 3)]) + mapping = {1: "a", 2: "b", 3: "c"} + H = nx.relabel_nodes(G, mapping) + assert list(H.nodes) == ["a", "b", "c"] + + def test_relabel_nodes_digraph(self): + G = nx.DiGraph([("A", "B"), ("A", "C"), ("B", "C"), ("C", "D")]) + mapping = {"A": "aardvark", "B": "bear", "C": "cat", "D": "dog"} + H = nx.relabel_nodes(G, mapping, copy=False) + assert nodes_equal(H.nodes(), ["aardvark", "bear", "cat", "dog"]) + + def test_relabel_nodes_multigraph(self): + G = nx.MultiGraph([("a", "b"), ("a", "b")]) + mapping = {"a": "aardvark", "b": "bear"} + G = nx.relabel_nodes(G, mapping, copy=False) + assert nodes_equal(G.nodes(), ["aardvark", "bear"]) + assert edges_equal(G.edges(), [("aardvark", "bear"), ("aardvark", "bear")]) + + def test_relabel_nodes_multidigraph(self): + G = nx.MultiDiGraph([("a", "b"), ("a", "b")]) + mapping = {"a": "aardvark", "b": "bear"} + G = nx.relabel_nodes(G, mapping, copy=False) + assert nodes_equal(G.nodes(), ["aardvark", "bear"]) + assert edges_equal(G.edges(), [("aardvark", "bear"), ("aardvark", "bear")]) + + def test_relabel_isolated_nodes_to_same(self): + G = nx.Graph() + G.add_nodes_from(range(4)) + mapping = {1: 1} + H = nx.relabel_nodes(G, mapping, copy=False) + assert nodes_equal(H.nodes(), list(range(4))) + + def test_relabel_nodes_missing(self): + G = nx.Graph([("A", "B"), ("A", "C"), ("B", "C"), ("C", "D")]) + mapping = {0: "aardvark"} + # copy=True + H = nx.relabel_nodes(G, mapping, copy=True) + assert nodes_equal(H.nodes, G.nodes) + # copy=False + GG = G.copy() + nx.relabel_nodes(G, mapping, copy=False) + assert nodes_equal(G.nodes, GG.nodes) + + def test_relabel_copy_name(self): + G = nx.Graph() + H = nx.relabel_nodes(G, {}, copy=True) + assert H.graph == G.graph + H = nx.relabel_nodes(G, {}, copy=False) + assert H.graph == G.graph + G.name = "first" + H = nx.relabel_nodes(G, {}, copy=True) + assert H.graph == G.graph + H = nx.relabel_nodes(G, {}, copy=False) + assert H.graph == G.graph + + def test_relabel_toposort(self): + K5 = nx.complete_graph(4) + G = nx.complete_graph(4) + G = nx.relabel_nodes(G, {i: i + 1 for i in range(4)}, copy=False) + assert nx.is_isomorphic(K5, G) + G = nx.complete_graph(4) + G = nx.relabel_nodes(G, {i: i - 1 for i in range(4)}, copy=False) + assert nx.is_isomorphic(K5, G) + + def test_relabel_selfloop(self): + G = nx.DiGraph([(1, 1), (1, 2), (2, 3)]) + G = nx.relabel_nodes(G, {1: "One", 2: "Two", 3: "Three"}, copy=False) + assert nodes_equal(G.nodes(), ["One", "Three", "Two"]) + G = nx.MultiDiGraph([(1, 1), (1, 2), (2, 3)]) + G = nx.relabel_nodes(G, {1: "One", 2: "Two", 3: "Three"}, copy=False) + assert nodes_equal(G.nodes(), ["One", "Three", "Two"]) + G = nx.MultiDiGraph([(1, 1)]) + G = nx.relabel_nodes(G, {1: 0}, copy=False) + assert nodes_equal(G.nodes(), [0]) + + def test_relabel_multidigraph_inout_merge_nodes(self): + for MG in (nx.MultiGraph, nx.MultiDiGraph): + for cc in (True, False): + G = MG([(0, 4), (1, 4), (4, 2), (4, 3)]) + G[0][4][0]["value"] = "a" + G[1][4][0]["value"] = "b" + G[4][2][0]["value"] = "c" + G[4][3][0]["value"] = "d" + G.add_edge(0, 4, key="x", value="e") + G.add_edge(4, 3, key="x", value="f") + mapping = {0: 9, 1: 9, 2: 9, 3: 9} + H = nx.relabel_nodes(G, mapping, copy=cc) + # No ordering on keys enforced + assert {"value": "a"} in H[9][4].values() + assert {"value": "b"} in H[9][4].values() + assert {"value": "c"} in H[4][9].values() + assert len(H[4][9]) == 3 if G.is_directed() else 6 + assert {"value": "d"} in H[4][9].values() + assert {"value": "e"} in H[9][4].values() + assert {"value": "f"} in H[4][9].values() + assert len(H[9][4]) == 3 if G.is_directed() else 6 + + def test_relabel_multigraph_merge_inplace(self): + G = nx.MultiGraph([(0, 1), (0, 2), (0, 3), (0, 1), (0, 2), (0, 3)]) + G[0][1][0]["value"] = "a" + G[0][2][0]["value"] = "b" + G[0][3][0]["value"] = "c" + mapping = {1: 4, 2: 4, 3: 4} + nx.relabel_nodes(G, mapping, copy=False) + # No ordering on keys enforced + assert {"value": "a"} in G[0][4].values() + assert {"value": "b"} in G[0][4].values() + assert {"value": "c"} in G[0][4].values() + + def test_relabel_multidigraph_merge_inplace(self): + G = nx.MultiDiGraph([(0, 1), (0, 2), (0, 3)]) + G[0][1][0]["value"] = "a" + G[0][2][0]["value"] = "b" + G[0][3][0]["value"] = "c" + mapping = {1: 4, 2: 4, 3: 4} + nx.relabel_nodes(G, mapping, copy=False) + # No ordering on keys enforced + assert {"value": "a"} in G[0][4].values() + assert {"value": "b"} in G[0][4].values() + assert {"value": "c"} in G[0][4].values() + + def test_relabel_multidigraph_inout_copy(self): + G = nx.MultiDiGraph([(0, 4), (1, 4), (4, 2), (4, 3)]) + G[0][4][0]["value"] = "a" + G[1][4][0]["value"] = "b" + G[4][2][0]["value"] = "c" + G[4][3][0]["value"] = "d" + G.add_edge(0, 4, key="x", value="e") + G.add_edge(4, 3, key="x", value="f") + mapping = {0: 9, 1: 9, 2: 9, 3: 9} + H = nx.relabel_nodes(G, mapping, copy=True) + # No ordering on keys enforced + assert {"value": "a"} in H[9][4].values() + assert {"value": "b"} in H[9][4].values() + assert {"value": "c"} in H[4][9].values() + assert len(H[4][9]) == 3 + assert {"value": "d"} in H[4][9].values() + assert {"value": "e"} in H[9][4].values() + assert {"value": "f"} in H[4][9].values() + assert len(H[9][4]) == 3 + + def test_relabel_multigraph_merge_copy(self): + G = nx.MultiGraph([(0, 1), (0, 2), (0, 3)]) + G[0][1][0]["value"] = "a" + G[0][2][0]["value"] = "b" + G[0][3][0]["value"] = "c" + mapping = {1: 4, 2: 4, 3: 4} + H = nx.relabel_nodes(G, mapping, copy=True) + assert {"value": "a"} in H[0][4].values() + assert {"value": "b"} in H[0][4].values() + assert {"value": "c"} in H[0][4].values() + + def test_relabel_multidigraph_merge_copy(self): + G = nx.MultiDiGraph([(0, 1), (0, 2), (0, 3)]) + G[0][1][0]["value"] = "a" + G[0][2][0]["value"] = "b" + G[0][3][0]["value"] = "c" + mapping = {1: 4, 2: 4, 3: 4} + H = nx.relabel_nodes(G, mapping, copy=True) + assert {"value": "a"} in H[0][4].values() + assert {"value": "b"} in H[0][4].values() + assert {"value": "c"} in H[0][4].values() + + def test_relabel_multigraph_nonnumeric_key(self): + for MG in (nx.MultiGraph, nx.MultiDiGraph): + for cc in (True, False): + G = nx.MultiGraph() + G.add_edge(0, 1, key="I", value="a") + G.add_edge(0, 2, key="II", value="b") + G.add_edge(0, 3, key="II", value="c") + mapping = {1: 4, 2: 4, 3: 4} + nx.relabel_nodes(G, mapping, copy=False) + assert {"value": "a"} in G[0][4].values() + assert {"value": "b"} in G[0][4].values() + assert {"value": "c"} in G[0][4].values() + assert 0 in G[0][4] + assert "I" in G[0][4] + assert "II" in G[0][4] + + def test_relabel_circular(self): + G = nx.path_graph(3) + mapping = {0: 1, 1: 0} + H = nx.relabel_nodes(G, mapping, copy=True) + with pytest.raises(nx.NetworkXUnfeasible): + H = nx.relabel_nodes(G, mapping, copy=False) + + def test_relabel_preserve_node_order_full_mapping_with_copy_true(self): + G = nx.path_graph(3) + original_order = list(G.nodes()) + mapping = {2: "a", 1: "b", 0: "c"} # dictionary keys out of order on purpose + H = nx.relabel_nodes(G, mapping, copy=True) + new_order = list(H.nodes()) + assert [mapping.get(i, i) for i in original_order] == new_order + + def test_relabel_preserve_node_order_full_mapping_with_copy_false(self): + G = nx.path_graph(3) + original_order = list(G) + mapping = {2: "a", 1: "b", 0: "c"} # dictionary keys out of order on purpose + H = nx.relabel_nodes(G, mapping, copy=False) + new_order = list(H) + assert [mapping.get(i, i) for i in original_order] == new_order + + def test_relabel_preserve_node_order_partial_mapping_with_copy_true(self): + G = nx.path_graph(3) + original_order = list(G) + mapping = {1: "a", 0: "b"} # partial mapping and keys out of order on purpose + H = nx.relabel_nodes(G, mapping, copy=True) + new_order = list(H) + assert [mapping.get(i, i) for i in original_order] == new_order + + def test_relabel_preserve_node_order_partial_mapping_with_copy_false(self): + G = nx.path_graph(3) + original_order = list(G) + mapping = {1: "a", 0: "b"} # partial mapping and keys out of order on purpose + H = nx.relabel_nodes(G, mapping, copy=False) + new_order = list(H) + assert [mapping.get(i, i) for i in original_order] != new_order diff --git a/.venv/lib/python3.12/site-packages/networkx/utils/__init__.py b/.venv/lib/python3.12/site-packages/networkx/utils/__init__.py new file mode 100644 index 0000000..d6abb17 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/utils/__init__.py @@ -0,0 +1,8 @@ +from networkx.utils.misc import * +from networkx.utils.decorators import * +from networkx.utils.random_sequence import * +from networkx.utils.union_find import * +from networkx.utils.rcm import * +from networkx.utils.heaps import * +from networkx.utils.configs import * +from networkx.utils.backends import * diff --git a/.venv/lib/python3.12/site-packages/networkx/utils/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/utils/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..6e4127c Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/utils/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/utils/__pycache__/backends.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/utils/__pycache__/backends.cpython-312.pyc new file mode 100644 index 0000000..4b1f153 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/utils/__pycache__/backends.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/utils/__pycache__/configs.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/utils/__pycache__/configs.cpython-312.pyc new file mode 100644 index 0000000..a9d6158 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/utils/__pycache__/configs.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/utils/__pycache__/decorators.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/utils/__pycache__/decorators.cpython-312.pyc new file mode 100644 index 0000000..28c3242 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/utils/__pycache__/decorators.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/utils/__pycache__/heaps.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/utils/__pycache__/heaps.cpython-312.pyc new file mode 100644 index 0000000..a3e9e47 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/utils/__pycache__/heaps.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/utils/__pycache__/mapped_queue.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/utils/__pycache__/mapped_queue.cpython-312.pyc new file mode 100644 index 0000000..b887377 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/utils/__pycache__/mapped_queue.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/utils/__pycache__/misc.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/utils/__pycache__/misc.cpython-312.pyc new file mode 100644 index 0000000..e4a48dc Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/utils/__pycache__/misc.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/utils/__pycache__/random_sequence.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/utils/__pycache__/random_sequence.cpython-312.pyc new file mode 100644 index 0000000..f2ea87b Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/utils/__pycache__/random_sequence.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/utils/__pycache__/rcm.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/utils/__pycache__/rcm.cpython-312.pyc new file mode 100644 index 0000000..512e518 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/utils/__pycache__/rcm.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/utils/__pycache__/union_find.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/utils/__pycache__/union_find.cpython-312.pyc new file mode 100644 index 0000000..a0b5653 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/utils/__pycache__/union_find.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/utils/backends.py b/.venv/lib/python3.12/site-packages/networkx/utils/backends.py new file mode 100644 index 0000000..1a9250c --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/utils/backends.py @@ -0,0 +1,2143 @@ +# Notes about NetworkX namespace objects set up here: +# +# nx.utils.backends.backends: +# dict keyed by backend name to the backend entry point object. +# Filled using ``_get_backends("networkx.backends")`` during import of this module. +# +# nx.utils.backends.backend_info: +# dict keyed by backend name to the metadata returned by the function indicated +# by the "networkx.backend_info" entry point. +# Created as an empty dict while importing this module, but later filled using +# ``_set_configs_from_environment()`` at end of importing ``networkx/__init__.py``. +# +# nx.config: +# Config object for NetworkX config setting. Created using +# ``_set_configs_from_environment()`` at end of importing ``networkx/__init__.py``. +# +# private dicts: +# nx.utils.backends._loaded_backends: +# dict used to memoize loaded backends. Keyed by backend name to loaded backends. +# +# nx.utils.backends._registered_algorithms: +# dict of all the dispatchable functions in networkx, keyed by _dispatchable +# function name to the wrapped function object. + +import inspect +import itertools +import logging +import os +import typing +import warnings +from functools import partial +from importlib.metadata import entry_points + +import networkx as nx + +from .configs import BackendPriorities, Config, NetworkXConfig +from .decorators import argmap + +__all__ = ["_dispatchable"] + +_logger = logging.getLogger(__name__) +FAILED_TO_CONVERT = "FAILED_TO_CONVERT" + + +def _get_backends(group, *, load_and_call=False): + """ + Retrieve NetworkX ``backends`` and ``backend_info`` from the entry points. + + Parameters + ----------- + group : str + The entry_point to be retrieved. + load_and_call : bool, optional + If True, load and call the backend. Defaults to False. + + Returns + -------- + dict + A dictionary mapping backend names to their respective backend objects. + + Notes + ------ + If a backend is defined more than once, a warning is issued. + The "nx_loopback" backend is removed if it exists, as it is only available during testing. + A warning is displayed if an error occurs while loading a backend. + """ + items = entry_points(group=group) + rv = {} + for ep in items: + if ep.name in rv: + warnings.warn( + f"networkx backend defined more than once: {ep.name}", + RuntimeWarning, + stacklevel=2, + ) + elif load_and_call: + try: + rv[ep.name] = ep.load()() + except Exception as exc: + warnings.warn( + f"Error encountered when loading info for backend {ep.name}: {exc}", + RuntimeWarning, + stacklevel=2, + ) + else: + rv[ep.name] = ep + rv.pop("nx_loopback", None) + return rv + + +# Note: "networkx" is in `backend_info` but ignored in `backends` and `config.backends`. +# It is valid to use "networkx" as a backend argument and in `config.backend_priority`. +# If we make "networkx" a "proper" backend, put it in `backends` and `config.backends`. +backends = _get_backends("networkx.backends") + +# Use _set_configs_from_environment() below to fill backend_info dict as +# the last step in importing networkx +backend_info = {} + +# Load and cache backends on-demand +_loaded_backends = {} # type: ignore[var-annotated] +_registered_algorithms = {} + + +# Get default configuration from environment variables at import time +def _comma_sep_to_list(string): + return [x_strip for x in string.strip().split(",") if (x_strip := x.strip())] + + +def _set_configs_from_environment(): + """Initialize ``config.backend_priority``, load backend_info and config. + + This gets default values from environment variables (see ``nx.config`` for details). + This function is run at the very end of importing networkx. It is run at this time + to avoid loading backend_info before the rest of networkx is imported in case a + backend uses networkx for its backend_info (e.g. subclassing the Config class.) + """ + # backend_info is defined above as empty dict. Fill it after import finishes. + backend_info.update(_get_backends("networkx.backend_info", load_and_call=True)) + backend_info.update( + (backend, {}) for backend in backends.keys() - backend_info.keys() + ) + + # set up config based on backend_info and environment + backend_config = {} + for backend, info in backend_info.items(): + if "default_config" not in info: + cfg = Config() + else: + cfg = info["default_config"] + if not isinstance(cfg, Config): + cfg = Config(**cfg) + backend_config[backend] = cfg + backend_config = Config(**backend_config) + # Setting doc of backends_config type is not setting doc of Config + # Config has __new__ method that returns instance with a unique type! + type(backend_config).__doc__ = "All installed NetworkX backends and their configs." + + backend_priority = BackendPriorities(algos=[], generators=[]) + + config = NetworkXConfig( + backend_priority=backend_priority, + backends=backend_config, + cache_converted_graphs=bool( + os.environ.get("NETWORKX_CACHE_CONVERTED_GRAPHS", True) + ), + fallback_to_nx=bool(os.environ.get("NETWORKX_FALLBACK_TO_NX", False)), + warnings_to_ignore=set( + _comma_sep_to_list(os.environ.get("NETWORKX_WARNINGS_TO_IGNORE", "")) + ), + ) + + # Add "networkx" item to backend_info now b/c backend_config is set up + backend_info["networkx"] = {} + + # NETWORKX_BACKEND_PRIORITY is the same as NETWORKX_BACKEND_PRIORITY_ALGOS + priorities = { + key[26:].lower(): val + for key, val in os.environ.items() + if key.startswith("NETWORKX_BACKEND_PRIORITY_") + } + backend_priority = config.backend_priority + backend_priority.algos = ( + _comma_sep_to_list(priorities.pop("algos")) + if "algos" in priorities + else _comma_sep_to_list( + os.environ.get( + "NETWORKX_BACKEND_PRIORITY", + os.environ.get("NETWORKX_AUTOMATIC_BACKENDS", ""), + ) + ) + ) + backend_priority.generators = _comma_sep_to_list(priorities.pop("generators", "")) + for key in sorted(priorities): + backend_priority[key] = _comma_sep_to_list(priorities[key]) + + return config + + +def _do_nothing(): + """This does nothing at all, yet it helps turn ``_dispatchable`` into functions. + + Use this with the ``argmap`` decorator to turn ``self`` into a function. It results + in some small additional overhead compared to calling ``_dispatchable`` directly, + but ``argmap`` has the property that it can stack with other ``argmap`` + decorators "for free". Being a function is better for REPRs and type-checkers. + """ + + +def _always_run(name, args, kwargs): + return True + + +def _load_backend(backend_name): + if backend_name in _loaded_backends: + return _loaded_backends[backend_name] + if backend_name not in backends: + raise ImportError(f"'{backend_name}' backend is not installed") + rv = _loaded_backends[backend_name] = backends[backend_name].load() + if not hasattr(rv, "can_run"): + rv.can_run = _always_run + if not hasattr(rv, "should_run"): + rv.should_run = _always_run + return rv + + +class _dispatchable: + _is_testing = False + + def __new__( + cls, + func=None, + *, + name=None, + graphs="G", + edge_attrs=None, + node_attrs=None, + preserve_edge_attrs=False, + preserve_node_attrs=False, + preserve_graph_attrs=False, + preserve_all_attrs=False, + mutates_input=False, + returns_graph=False, + implemented_by_nx=True, + ): + """A decorator function that is used to redirect the execution of ``func`` + function to its backend implementation. + + This decorator allows the function to dispatch to different backend + implementations based on the input graph types, and also manages the + extra keywords ``backend`` and ``**backend_kwargs``. + Usage can be any of the following decorator forms: + + - ``@_dispatchable`` + - ``@_dispatchable()`` + - ``@_dispatchable(name="override_name")`` + - ``@_dispatchable(graphs="graph_var_name")`` + - ``@_dispatchable(edge_attrs="weight")`` + - ``@_dispatchable(graphs={"G": 0, "H": 1}, edge_attrs={"weight": "default"})`` + with 0 and 1 giving the position in the signature function for graph + objects. When ``edge_attrs`` is a dict, keys are keyword names and values + are defaults. + + Parameters + ---------- + func : callable, optional (default: None) + The function to be decorated. If None, ``_dispatchable`` returns a + partial object that can be used to decorate a function later. If ``func`` + is a callable, returns a new callable object that dispatches to a backend + function based on input graph types. + + name : str, optional (default: name of `func`) + The name for the function as used for dispatching. If not provided, + the name of ``func`` will be used. ``name`` is useful to avoid name + conflicts, as all dispatched functions live in a single namespace. + For example, ``nx.tournament.is_strongly_connected`` had a name + conflict with the standard ``nx.is_strongly_connected``, so we used + ``@_dispatchable(name="tournament_is_strongly_connected")``. + + graphs : str or dict or None, optional (default: "G") + If a string, the parameter name of the graph, which must be the first + argument of the wrapped function. If more than one graph is required + for the function (or if the graph is not the first argument), provide + a dict keyed by graph parameter name to the value parameter position. + A question mark in the name indicates an optional argument. + For example, ``@_dispatchable(graphs={"G": 0, "auxiliary?": 4})`` + indicates the 0th parameter ``G`` of the function is a required graph, + and the 4th parameter ``auxiliary?`` is an optional graph. + To indicate that an argument is a list of graphs, do ``"[graphs]"``. + Use ``graphs=None``, if *no* arguments are NetworkX graphs such as for + graph generators, readers, and conversion functions. + + edge_attrs : str or dict, optional (default: None) + ``edge_attrs`` holds information about edge attribute arguments + and default values for those edge attributes. + If a string, ``edge_attrs`` holds the function argument name that + indicates a single edge attribute to include in the converted graph. + The default value for this attribute is 1. To indicate that an argument + is a list of attributes (all with default value 1), use e.g. ``"[attrs]"``. + If a dict, ``edge_attrs`` holds a dict keyed by argument names, with + values that are either the default value or, if a string, the argument + name that indicates the default value. + If None, function does not use edge attributes. + + node_attrs : str or dict, optional + Like ``edge_attrs``, but for node attributes. + + preserve_edge_attrs : bool or str or dict, optional (default: False) + If bool, whether to preserve all edge attributes. + If a string, the parameter name that may indicate (with ``True`` or a + callable argument) whether all edge attributes should be preserved + when converting graphs to a backend graph type. + If a dict of form ``{graph_name: {attr: default}}``, indicate + pre-determined edge attributes (and defaults) to preserve for the + indicated input graph. + + preserve_node_attrs : bool or str or dict, optional (default: False) + Like ``preserve_edge_attrs``, but for node attributes. + + preserve_graph_attrs : bool or set, optional (default: False) + If bool, whether to preserve all graph attributes. + If set, which input graph arguments to preserve graph attributes. + + preserve_all_attrs : bool, optional (default: False) + Whether to preserve all edge, node and graph attributes. + If True, this overrides all the other preserve_*_attrs. + + mutates_input : bool or dict, optional (default: False) + If bool, whether the function mutates an input graph argument. + If dict of ``{arg_name: arg_pos}``, name and position of bool arguments + that indicate whether an input graph will be mutated, and ``arg_name`` + may begin with ``"not "`` to negate the logic (for example, ``"not copy"`` + means we mutate the input graph when the ``copy`` argument is False). + By default, dispatching doesn't convert input graphs to a different + backend for functions that mutate input graphs. + + returns_graph : bool, optional (default: False) + Whether the function can return or yield a graph object. By default, + dispatching doesn't convert input graphs to a different backend for + functions that return graphs. + + implemented_by_nx : bool, optional (default: True) + Whether the function is implemented by NetworkX. If it is not, then the + function is included in NetworkX only as an API to dispatch to backends. + Default is True. + """ + if func is None: + return partial( + _dispatchable, + name=name, + graphs=graphs, + edge_attrs=edge_attrs, + node_attrs=node_attrs, + preserve_edge_attrs=preserve_edge_attrs, + preserve_node_attrs=preserve_node_attrs, + preserve_graph_attrs=preserve_graph_attrs, + preserve_all_attrs=preserve_all_attrs, + mutates_input=mutates_input, + returns_graph=returns_graph, + implemented_by_nx=implemented_by_nx, + ) + if isinstance(func, str): + raise TypeError("'name' and 'graphs' must be passed by keyword") from None + # If name not provided, use the name of the function + if name is None: + name = func.__name__ + + self = object.__new__(cls) + + # standard function-wrapping stuff + # __annotations__ not used + self.__name__ = func.__name__ + # self.__doc__ = func.__doc__ # __doc__ handled as cached property + self.__defaults__ = func.__defaults__ + # Add `backend=` keyword argument to allow backend choice at call-time + if func.__kwdefaults__: + self.__kwdefaults__ = {**func.__kwdefaults__, "backend": None} + else: + self.__kwdefaults__ = {"backend": None} + self.__module__ = func.__module__ + self.__qualname__ = func.__qualname__ + self.__dict__.update(func.__dict__) + self.__wrapped__ = func + + # Supplement docstring with backend info; compute and cache when needed + self._orig_doc = func.__doc__ + self._cached_doc = None + + self.orig_func = func + self.name = name + self.edge_attrs = edge_attrs + self.node_attrs = node_attrs + self.preserve_edge_attrs = preserve_edge_attrs or preserve_all_attrs + self.preserve_node_attrs = preserve_node_attrs or preserve_all_attrs + self.preserve_graph_attrs = preserve_graph_attrs or preserve_all_attrs + self.mutates_input = mutates_input + # Keep `returns_graph` private for now, b/c we may extend info on return types + self._returns_graph = returns_graph + + if edge_attrs is not None and not isinstance(edge_attrs, str | dict): + raise TypeError( + f"Bad type for edge_attrs: {type(edge_attrs)}. Expected str or dict." + ) from None + if node_attrs is not None and not isinstance(node_attrs, str | dict): + raise TypeError( + f"Bad type for node_attrs: {type(node_attrs)}. Expected str or dict." + ) from None + if not isinstance(self.preserve_edge_attrs, bool | str | dict): + raise TypeError( + f"Bad type for preserve_edge_attrs: {type(self.preserve_edge_attrs)}." + " Expected bool, str, or dict." + ) from None + if not isinstance(self.preserve_node_attrs, bool | str | dict): + raise TypeError( + f"Bad type for preserve_node_attrs: {type(self.preserve_node_attrs)}." + " Expected bool, str, or dict." + ) from None + if not isinstance(self.preserve_graph_attrs, bool | set): + raise TypeError( + f"Bad type for preserve_graph_attrs: {type(self.preserve_graph_attrs)}." + " Expected bool or set." + ) from None + if not isinstance(self.mutates_input, bool | dict): + raise TypeError( + f"Bad type for mutates_input: {type(self.mutates_input)}." + " Expected bool or dict." + ) from None + if not isinstance(self._returns_graph, bool): + raise TypeError( + f"Bad type for returns_graph: {type(self._returns_graph)}." + " Expected bool." + ) from None + + if isinstance(graphs, str): + graphs = {graphs: 0} + elif graphs is None: + pass + elif not isinstance(graphs, dict): + raise TypeError( + f"Bad type for graphs: {type(graphs)}. Expected str or dict." + ) from None + elif len(graphs) == 0: + raise KeyError("'graphs' must contain at least one variable name") from None + + # This dict comprehension is complicated for better performance; equivalent shown below. + self.optional_graphs = set() + self.list_graphs = set() + if graphs is None: + self.graphs = {} + else: + self.graphs = { + self.optional_graphs.add(val := k[:-1]) or val + if (last := k[-1]) == "?" + else self.list_graphs.add(val := k[1:-1]) or val + if last == "]" + else k: v + for k, v in graphs.items() + } + # The above is equivalent to: + # self.optional_graphs = {k[:-1] for k in graphs if k[-1] == "?"} + # self.list_graphs = {k[1:-1] for k in graphs if k[-1] == "]"} + # self.graphs = {k[:-1] if k[-1] == "?" else k: v for k, v in graphs.items()} + + # Compute and cache the signature on-demand + self._sig = None + + # Which backends implement this function? + self.backends = { + backend + for backend, info in backend_info.items() + if "functions" in info and name in info["functions"] + } + if implemented_by_nx: + self.backends.add("networkx") + + if name in _registered_algorithms: + raise KeyError( + f"Algorithm already exists in dispatch registry: {name}" + ) from None + # Use the `argmap` decorator to turn `self` into a function. This does result + # in small additional overhead compared to calling `_dispatchable` directly, + # but `argmap` has the property that it can stack with other `argmap` + # decorators "for free". Being a function is better for REPRs and type-checkers. + self = argmap(_do_nothing)(self) + _registered_algorithms[name] = self + return self + + @property + def __doc__(self): + """If the cached documentation exists, it is returned. + Otherwise, the documentation is generated using _make_doc() method, + cached, and then returned.""" + + rv = self._cached_doc + if rv is None: + rv = self._cached_doc = self._make_doc() + return rv + + @__doc__.setter + def __doc__(self, val): + """Sets the original documentation to the given value and resets the + cached documentation.""" + + self._orig_doc = val + self._cached_doc = None + + @property + def __signature__(self): + """Return the signature of the original function, with the addition of + the `backend` and `backend_kwargs` parameters.""" + + if self._sig is None: + sig = inspect.signature(self.orig_func) + # `backend` is now a reserved argument used by dispatching. + # assert "backend" not in sig.parameters + if not any( + p.kind == inspect.Parameter.VAR_KEYWORD for p in sig.parameters.values() + ): + sig = sig.replace( + parameters=[ + *sig.parameters.values(), + inspect.Parameter( + "backend", inspect.Parameter.KEYWORD_ONLY, default=None + ), + inspect.Parameter( + "backend_kwargs", inspect.Parameter.VAR_KEYWORD + ), + ] + ) + else: + *parameters, var_keyword = sig.parameters.values() + sig = sig.replace( + parameters=[ + *parameters, + inspect.Parameter( + "backend", inspect.Parameter.KEYWORD_ONLY, default=None + ), + var_keyword, + ] + ) + self._sig = sig + return self._sig + + # Fast, simple path if no backends are installed + def _call_if_no_backends_installed(self, /, *args, backend=None, **kwargs): + """Returns the result of the original function (no backends installed).""" + if backend is not None and backend != "networkx": + raise ImportError(f"'{backend}' backend is not installed") + if "networkx" not in self.backends: + raise NotImplementedError( + f"'{self.name}' is not implemented by 'networkx' backend. " + "This function is included in NetworkX as an API to dispatch to " + "other backends." + ) + return self.orig_func(*args, **kwargs) + + # Dispatch to backends based on inputs, `backend=` arg, or configuration + def _call_if_any_backends_installed(self, /, *args, backend=None, **kwargs): + """Returns the result of the original function, or the backend function if + the backend is specified and that backend implements `func`.""" + # Use `backend_name` in this function instead of `backend`. + # This is purely for aesthetics and to make it easier to search for this + # variable since "backend" is used in many comments and log/error messages. + backend_name = backend + if backend_name is not None and backend_name not in backend_info: + raise ImportError(f"'{backend_name}' backend is not installed") + + graphs_resolved = {} + for gname, pos in self.graphs.items(): + if pos < len(args): + if gname in kwargs: + raise TypeError(f"{self.name}() got multiple values for {gname!r}") + graph = args[pos] + elif gname in kwargs: + graph = kwargs[gname] + elif gname not in self.optional_graphs: + raise TypeError( + f"{self.name}() missing required graph argument: {gname}" + ) + else: + continue + if graph is None: + if gname not in self.optional_graphs: + raise TypeError( + f"{self.name}() required graph argument {gname!r} is None; must be a graph" + ) + else: + graphs_resolved[gname] = graph + + # Alternative to the above that does not check duplicated args or missing required graphs. + # graphs_resolved = { + # gname: graph + # for gname, pos in self.graphs.items() + # if (graph := args[pos] if pos < len(args) else kwargs.get(gname)) is not None + # } + + # Check if any graph comes from a backend + if self.list_graphs: + # Make sure we don't lose values by consuming an iterator + args = list(args) + for gname in self.list_graphs & graphs_resolved.keys(): + list_of_graphs = list(graphs_resolved[gname]) + graphs_resolved[gname] = list_of_graphs + if gname in kwargs: + kwargs[gname] = list_of_graphs + else: + args[self.graphs[gname]] = list_of_graphs + + graph_backend_names = { + getattr(g, "__networkx_backend__", None) + for gname, g in graphs_resolved.items() + if gname not in self.list_graphs + } + for gname in self.list_graphs & graphs_resolved.keys(): + graph_backend_names.update( + getattr(g, "__networkx_backend__", None) + for g in graphs_resolved[gname] + ) + else: + graph_backend_names = { + getattr(g, "__networkx_backend__", None) + for g in graphs_resolved.values() + } + + backend_priority = nx.config.backend_priority.get( + self.name, + nx.config.backend_priority.generators + if self._returns_graph + else nx.config.backend_priority.algos, + ) + fallback_to_nx = nx.config.fallback_to_nx and "networkx" in self.backends + if self._is_testing and backend_priority and backend_name is None: + # Special path if we are running networkx tests with a backend. + # This even runs for (and handles) functions that mutate input graphs. + return self._convert_and_call_for_tests( + backend_priority[0], + args, + kwargs, + fallback_to_nx=fallback_to_nx, + ) + + graph_backend_names.discard(None) + if backend_name is not None: + # Must run with the given backend. + # `can_run` only used for better log and error messages. + # Check `mutates_input` for logging, not behavior. + backend_kwarg_msg = ( + "No other backends will be attempted, because the backend was " + f"specified with the `backend='{backend_name}'` keyword argument." + ) + extra_message = ( + f"'{backend_name}' backend raised NotImplementedError when calling " + f"'{self.name}'. {backend_kwarg_msg}" + ) + if not graph_backend_names or graph_backend_names == {backend_name}: + # All graphs are backend graphs--no need to convert! + if self._can_backend_run(backend_name, args, kwargs): + return self._call_with_backend( + backend_name, args, kwargs, extra_message=extra_message + ) + if self._does_backend_have(backend_name): + extra = " for the given arguments" + else: + extra = "" + raise NotImplementedError( + f"'{self.name}' is not implemented by '{backend_name}' backend" + f"{extra}. {backend_kwarg_msg}" + ) + if self._can_convert(backend_name, graph_backend_names): + if self._can_backend_run(backend_name, args, kwargs): + if self._will_call_mutate_input(args, kwargs): + _logger.debug( + "'%s' will mutate an input graph. This prevents automatic conversion " + "to, and use of, backends listed in `nx.config.backend_priority`. " + "Using backend specified by the " + "`backend='%s'` keyword argument. This may change behavior by not " + "mutating inputs.", + self.name, + backend_name, + ) + mutations = [] + else: + mutations = None + rv = self._convert_and_call( + backend_name, + graph_backend_names, + args, + kwargs, + extra_message=extra_message, + mutations=mutations, + ) + if mutations: + for cache, key in mutations: + # If the call mutates inputs, then remove all inputs gotten + # from cache. We do this after all conversions (and call) so + # that a graph can be gotten from a cache multiple times. + cache.pop(key, None) + return rv + if self._does_backend_have(backend_name): + extra = " for the given arguments" + else: + extra = "" + raise NotImplementedError( + f"'{self.name}' is not implemented by '{backend_name}' backend" + f"{extra}. {backend_kwarg_msg}" + ) + if len(graph_backend_names) == 1: + maybe_s = "" + graph_backend_names = f"'{next(iter(graph_backend_names))}'" + else: + maybe_s = "s" + raise TypeError( + f"'{self.name}' is unable to convert graph from backend{maybe_s} " + f"{graph_backend_names} to '{backend_name}' backend, which was " + f"specified with the `backend='{backend_name}'` keyword argument. " + f"{backend_kwarg_msg}" + ) + + if self._will_call_mutate_input(args, kwargs): + # The current behavior for functions that mutate input graphs: + # + # 1. If backend is specified by `backend=` keyword, use it (done above). + # 2. If inputs are from one backend, try to use it. + # 3. If all input graphs are instances of `nx.Graph`, then run with the + # default "networkx" implementation. + # + # Do not automatically convert if a call will mutate inputs, because doing + # so would change behavior. Hence, we should fail if there are multiple input + # backends or if the input backend does not implement the function. However, + # we offer a way for backends to circumvent this if they do not implement + # this function: we will fall back to the default "networkx" implementation + # without using conversions if all input graphs are subclasses of `nx.Graph`. + mutate_msg = ( + "conversions between backends (if configured) will not be attempted " + "because the original input graph would not be mutated. Using the " + "backend keyword e.g. `backend='some_backend'` will force conversions " + "and not mutate the original input graph." + ) + fallback_msg = ( + "This call will mutate inputs, so fall back to 'networkx' " + "backend (without converting) since all input graphs are " + "instances of nx.Graph and are hopefully compatible." + ) + if len(graph_backend_names) == 1: + [backend_name] = graph_backend_names + msg_template = ( + f"Backend '{backend_name}' does not implement '{self.name}'%s. " + f"This call will mutate an input, so automatic {mutate_msg}" + ) + # `can_run` is only used for better log and error messages + try: + if self._can_backend_run(backend_name, args, kwargs): + return self._call_with_backend( + backend_name, + args, + kwargs, + extra_message=msg_template % " with these arguments", + ) + except NotImplementedError as exc: + if all(isinstance(g, nx.Graph) for g in graphs_resolved.values()): + _logger.debug( + "Backend '%s' raised when calling '%s': %s. %s", + backend_name, + self.name, + exc, + fallback_msg, + ) + else: + raise + else: + if fallback_to_nx and all( + # Consider dropping the `isinstance` check here to allow + # duck-type graphs, but let's wait for a backend to ask us. + isinstance(g, nx.Graph) + for g in graphs_resolved.values() + ): + # Log that we are falling back to networkx + _logger.debug( + "Backend '%s' can't run '%s'. %s", + backend_name, + self.name, + fallback_msg, + ) + else: + if self._does_backend_have(backend_name): + extra = " with these arguments" + else: + extra = "" + raise NotImplementedError(msg_template % extra) + elif fallback_to_nx and all( + # Consider dropping the `isinstance` check here to allow + # duck-type graphs, but let's wait for a backend to ask us. + isinstance(g, nx.Graph) + for g in graphs_resolved.values() + ): + # Log that we are falling back to networkx + _logger.debug( + "'%s' was called with inputs from multiple backends: %s. %s", + self.name, + graph_backend_names, + fallback_msg, + ) + else: + raise RuntimeError( + f"'{self.name}' will mutate an input, but it was called with " + f"inputs from multiple backends: {graph_backend_names}. " + f"Automatic {mutate_msg}" + ) + # At this point, no backends are available to handle the call with + # the input graph types, but if the input graphs are compatible + # nx.Graph instances, fall back to networkx without converting. + return self.orig_func(*args, **kwargs) + + # We may generalize fallback configuration as e.g. `nx.config.backend_fallback` + if fallback_to_nx or not graph_backend_names: + # Use "networkx" by default if there are no inputs from backends. + # For example, graph generators should probably return NetworkX graphs + # instead of raising NotImplementedError. + backend_fallback = ["networkx"] + else: + backend_fallback = [] + + # ########################## + # # How this behaves today # + # ########################## + # + # The prose below describes the implementation and a *possible* way to + # generalize "networkx" as "just another backend". The code is structured + # to perhaps someday support backend-to-backend conversions (including + # simply passing objects from one backend directly to another backend; + # the dispatch machinery does not necessarily need to perform conversions), + # but since backend-to-backend matching is not yet supported, the following + # code is merely a convenient way to implement dispatch behaviors that have + # been carefully developed since NetworkX 3.0 and to include falling back + # to the default NetworkX implementation. + # + # The current behavior for functions that don't mutate input graphs: + # + # 1. If backend is specified by `backend=` keyword, use it (done above). + # 2. If input is from a backend other than "networkx", try to use it. + # - Note: if present, "networkx" graphs will be converted to the backend. + # 3. If input is from "networkx" (or no backend), try to use backends from + # `backend_priority` before running with the default "networkx" implementation. + # 4. If configured, "fall back" and run with the default "networkx" implementation. + # + # ################################################ + # # How this is implemented and may work someday # + # ################################################ + # + # Let's determine the order of backends we should try according + # to `backend_priority`, `backend_fallback`, and input backends. + # There are two† dimensions of priorities to consider: + # backend_priority > unspecified > backend_fallback + # and + # backend of an input > not a backend of an input + # These are combined to form five groups of priorities as such: + # + # input ~input + # +-------+-------+ + # backend_priority | 1 | 2 | + # unspecified | 3 | N/A | (if only 1) + # backend_fallback | 4 | 5 | + # +-------+-------+ + # + # This matches the behaviors we developed in versions 3.0 to 3.2, it + # ought to cover virtually all use cases we expect, and I (@eriknw) don't + # think it can be done any simpler (although it can be generalized further + # and made to be more complicated to capture 100% of *possible* use cases). + # Some observations: + # + # 1. If an input is in `backend_priority`, it will be used before trying a + # backend that is higher priority in `backend_priority` and not an input. + # 2. To prioritize converting from one backend to another even if both implement + # a function, list one in `backend_priority` and one in `backend_fallback`. + # 3. To disable conversions, set `backend_priority` and `backend_fallback` to []. + # + # †: There is actually a third dimension of priorities: + # should_run == True > should_run == False + # Backends with `can_run == True` and `should_run == False` are tried last. + # + seen = set() + group1 = [] # In backend_priority, and an input + group2 = [] # In backend_priority, but not an input + for name in backend_priority: + if name in seen: + continue + seen.add(name) + if name in graph_backend_names: + group1.append(name) + else: + group2.append(name) + group4 = [] # In backend_fallback, and an input + group5 = [] # In backend_fallback, but not an input + for name in backend_fallback: + if name in seen: + continue + seen.add(name) + if name in graph_backend_names: + group4.append(name) + else: + group5.append(name) + # An input, but not in backend_priority or backend_fallback. + group3 = graph_backend_names - seen + if len(group3) > 1: + # `group3` backends are not configured for automatic conversion or fallback. + # There are at least two issues if this group contains multiple backends: + # + # 1. How should we prioritize them? We have no good way to break ties. + # Although we could arbitrarily choose alphabetical or left-most, + # let's follow the Zen of Python and refuse the temptation to guess. + # 2. We probably shouldn't automatically convert to these backends, + # because we are not configured to do so. + # + # (2) is important to allow disabling all conversions by setting both + # `nx.config.backend_priority` and `nx.config.backend_fallback` to []. + # + # If there is a single backend in `group3`, then giving it priority over + # the fallback backends is what is generally expected. For example, this + # allows input graphs of `backend_fallback` backends (such as "networkx") + # to be converted to, and run with, the unspecified backend. + _logger.debug( + "Call to '%s' has inputs from multiple backends, %s, that " + "have no priority set in `nx.config.backend_priority`, " + "so automatic conversions to " + "these backends will not be attempted.", + self.name, + group3, + ) + group3 = () + + try_order = list(itertools.chain(group1, group2, group3, group4, group5)) + if len(try_order) > 1: + # Should we consider adding an option for more verbose logging? + # For example, we could explain the order of `try_order` in detail. + _logger.debug( + "Call to '%s' has inputs from %s backends, and will try to use " + "backends in the following order: %s", + self.name, + graph_backend_names or "no", + try_order, + ) + backends_to_try_again = [] + for is_not_first, backend_name in enumerate(try_order): + if is_not_first: + _logger.debug("Trying next backend: '%s'", backend_name) + try: + if not graph_backend_names or graph_backend_names == {backend_name}: + if self._can_backend_run(backend_name, args, kwargs): + return self._call_with_backend(backend_name, args, kwargs) + elif self._can_convert( + backend_name, graph_backend_names + ) and self._can_backend_run(backend_name, args, kwargs): + if self._should_backend_run(backend_name, args, kwargs): + rv = self._convert_and_call( + backend_name, graph_backend_names, args, kwargs + ) + if ( + self._returns_graph + and graph_backend_names + and backend_name not in graph_backend_names + ): + # If the function has graph inputs and graph output, we try + # to make it so the backend of the return type will match the + # backend of the input types. In case this is not possible, + # let's tell the user that the backend of the return graph + # has changed. Perhaps we could try to convert back, but + # "fallback" backends for graph generators should typically + # be compatible with NetworkX graphs. + _logger.debug( + "Call to '%s' is returning a graph from a different " + "backend! It has inputs from %s backends, but ran with " + "'%s' backend and is returning graph from '%s' backend", + self.name, + graph_backend_names, + backend_name, + backend_name, + ) + return rv + # `should_run` is False, but `can_run` is True, so try again later + backends_to_try_again.append(backend_name) + except NotImplementedError as exc: + _logger.debug( + "Backend '%s' raised when calling '%s': %s", + backend_name, + self.name, + exc, + ) + + # We are about to fail. Let's try backends with can_run=True and should_run=False. + # This is unlikely to help today since we try to run with "networkx" before this. + for backend_name in backends_to_try_again: + _logger.debug( + "Trying backend: '%s' (ignoring `should_run=False`)", backend_name + ) + try: + rv = self._convert_and_call( + backend_name, graph_backend_names, args, kwargs + ) + if ( + self._returns_graph + and graph_backend_names + and backend_name not in graph_backend_names + ): + _logger.debug( + "Call to '%s' is returning a graph from a different " + "backend! It has inputs from %s backends, but ran with " + "'%s' backend and is returning graph from '%s' backend", + self.name, + graph_backend_names, + backend_name, + backend_name, + ) + return rv + except NotImplementedError as exc: + _logger.debug( + "Backend '%s' raised when calling '%s': %s", + backend_name, + self.name, + exc, + ) + # As a final effort, we could try to convert and run with `group3` backends + # that we discarded when `len(group3) > 1`, but let's not consider doing + # so until there is a reasonable request for it. + + if len(unspecified_backends := graph_backend_names - seen) > 1: + raise TypeError( + f"Unable to convert inputs from {graph_backend_names} backends and " + f"run '{self.name}'. NetworkX is configured to automatically convert " + f"to {try_order} backends. To remedy this, you may enable automatic " + f"conversion to {unspecified_backends} backends by adding them to " + "`nx.config.backend_priority`, or you " + "may specify a backend to use with the `backend=` keyword argument." + ) + if "networkx" not in self.backends: + extra = ( + " This function is included in NetworkX as an API to dispatch to " + "other backends." + ) + else: + extra = "" + raise NotImplementedError( + f"'{self.name}' is not implemented by {try_order} backends. To remedy " + "this, you may enable automatic conversion to more backends (including " + "'networkx') by adding them to `nx.config.backend_priority`, " + "or you may specify a backend to use with " + f"the `backend=` keyword argument.{extra}" + ) + + # Dispatch only if there exist any installed backend(s) + __call__: typing.Callable = ( + _call_if_any_backends_installed if backends else _call_if_no_backends_installed + ) + + def _will_call_mutate_input(self, args, kwargs): + # Fairly few nx functions mutate the input graph. Most that do, always do. + # So a boolean input indicates "always" or "never". + if isinstance((mutates_input := self.mutates_input), bool): + return mutates_input + + # The ~10 other nx functions either use "copy=True" to control mutation or + # an arg naming an edge/node attribute to mutate (None means no mutation). + # Now `mutates_input` is a dict keyed by arg_name to its func-sig position. + # The `copy=` args are keyed as "not copy" to mean "negate the copy argument". + # Keys w/o "not " mean the call mutates only when the arg value `is not None`. + # + # This section might need different code if new functions mutate in new ways. + # + # NetworkX doesn't have any `mutates_input` dicts with more than 1 item. + # But we treat it like it might have more than 1 item for generality. + n = len(args) + return any( + (args[arg_pos] if n > arg_pos else kwargs.get(arg_name)) is not None + if not arg_name.startswith("not ") + # This assumes that e.g. `copy=True` is the default + else not (args[arg_pos] if n > arg_pos else kwargs.get(arg_name[4:], True)) + for arg_name, arg_pos in mutates_input.items() + ) + + def _can_convert(self, backend_name, graph_backend_names): + # Backend-to-backend conversion not supported yet. + # We can only convert to and from networkx. + rv = backend_name == "networkx" or graph_backend_names.issubset( + {"networkx", backend_name} + ) + if not rv: + _logger.debug( + "Unable to convert from %s backends to '%s' backend", + graph_backend_names, + backend_name, + ) + return rv + + def _does_backend_have(self, backend_name): + """Does the specified backend have this algorithm?""" + if backend_name == "networkx": + return "networkx" in self.backends + # Inspect the backend; don't trust metadata used to create `self.backends` + backend = _load_backend(backend_name) + return hasattr(backend, self.name) + + def _can_backend_run(self, backend_name, args, kwargs): + """Can the specified backend run this algorithm with these arguments?""" + if backend_name == "networkx": + return "networkx" in self.backends + backend = _load_backend(backend_name) + # `backend.can_run` and `backend.should_run` may return strings that describe + # why they can't or shouldn't be run. + if not hasattr(backend, self.name): + _logger.debug( + "Backend '%s' does not implement '%s'", backend_name, self.name + ) + return False + can_run = backend.can_run(self.name, args, kwargs) + if isinstance(can_run, str) or not can_run: + reason = f", because: {can_run}" if isinstance(can_run, str) else "" + _logger.debug( + "Backend '%s' can't run `%s` with arguments: %s%s", + backend_name, + self.name, + _LazyArgsRepr(self, args, kwargs), + reason, + ) + return False + return True + + def _should_backend_run(self, backend_name, args, kwargs): + """Should the specified backend run this algorithm with these arguments? + + Note that this does not check ``backend.can_run``. + """ + # `backend.can_run` and `backend.should_run` may return strings that describe + # why they can't or shouldn't be run. + # `_should_backend_run` may assume that `_can_backend_run` returned True. + if backend_name == "networkx": + return True + backend = _load_backend(backend_name) + should_run = backend.should_run(self.name, args, kwargs) + if isinstance(should_run, str) or not should_run: + reason = f", because: {should_run}" if isinstance(should_run, str) else "" + _logger.debug( + "Backend '%s' shouldn't run `%s` with arguments: %s%s", + backend_name, + self.name, + _LazyArgsRepr(self, args, kwargs), + reason, + ) + return False + return True + + def _convert_arguments(self, backend_name, args, kwargs, *, use_cache, mutations): + """Convert graph arguments to the specified backend. + + Returns + ------- + args tuple and kwargs dict + """ + bound = self.__signature__.bind(*args, **kwargs) + bound.apply_defaults() + if not self.graphs: + bound_kwargs = bound.kwargs + del bound_kwargs["backend"] + return bound.args, bound_kwargs + if backend_name == "networkx": + # `backend_interface.convert_from_nx` preserves everything + preserve_edge_attrs = preserve_node_attrs = preserve_graph_attrs = True + else: + preserve_edge_attrs = self.preserve_edge_attrs + preserve_node_attrs = self.preserve_node_attrs + preserve_graph_attrs = self.preserve_graph_attrs + edge_attrs = self.edge_attrs + node_attrs = self.node_attrs + # Convert graphs into backend graph-like object + # Include the edge and/or node labels if provided to the algorithm + if preserve_edge_attrs is False: + # e.g. `preserve_edge_attrs=False` + pass + elif preserve_edge_attrs is True: + # e.g. `preserve_edge_attrs=True` + edge_attrs = None + elif isinstance(preserve_edge_attrs, str): + if bound.arguments[preserve_edge_attrs] is True or callable( + bound.arguments[preserve_edge_attrs] + ): + # e.g. `preserve_edge_attrs="attr"` and `func(attr=True)` + # e.g. `preserve_edge_attrs="attr"` and `func(attr=myfunc)` + preserve_edge_attrs = True + edge_attrs = None + elif bound.arguments[preserve_edge_attrs] is False and ( + isinstance(edge_attrs, str) + and edge_attrs == preserve_edge_attrs + or isinstance(edge_attrs, dict) + and preserve_edge_attrs in edge_attrs + ): + # e.g. `preserve_edge_attrs="attr"` and `func(attr=False)` + # Treat `False` argument as meaning "preserve_edge_data=False" + # and not `False` as the edge attribute to use. + preserve_edge_attrs = False + edge_attrs = None + else: + # e.g. `preserve_edge_attrs="attr"` and `func(attr="weight")` + preserve_edge_attrs = False + # Else: e.g. `preserve_edge_attrs={"G": {"weight": 1}}` + + if edge_attrs is None: + # May have been set to None above b/c all attributes are preserved + pass + elif isinstance(edge_attrs, str): + if edge_attrs[0] == "[": + # e.g. `edge_attrs="[edge_attributes]"` (argument of list of attributes) + # e.g. `func(edge_attributes=["foo", "bar"])` + edge_attrs = dict.fromkeys(bound.arguments[edge_attrs[1:-1]], 1) + elif callable(bound.arguments[edge_attrs]): + # e.g. `edge_attrs="weight"` and `func(weight=myfunc)` + preserve_edge_attrs = True + edge_attrs = None + elif bound.arguments[edge_attrs] is not None: + # e.g. `edge_attrs="weight"` and `func(weight="foo")` (default of 1) + edge_attrs = {bound.arguments[edge_attrs]: 1} + elif self.name == "to_numpy_array" and hasattr( + bound.arguments["dtype"], "names" + ): + # Custom handling: attributes may be obtained from `dtype` + edge_attrs = dict.fromkeys(bound.arguments["dtype"].names, 1) + else: + # e.g. `edge_attrs="weight"` and `func(weight=None)` + edge_attrs = None + else: + # e.g. `edge_attrs={"attr": "default"}` and `func(attr="foo", default=7)` + # e.g. `edge_attrs={"attr": 0}` and `func(attr="foo")` + edge_attrs = { + edge_attr: bound.arguments.get(val, 1) if isinstance(val, str) else val + for key, val in edge_attrs.items() + if (edge_attr := bound.arguments[key]) is not None + } + + if preserve_node_attrs is False: + # e.g. `preserve_node_attrs=False` + pass + elif preserve_node_attrs is True: + # e.g. `preserve_node_attrs=True` + node_attrs = None + elif isinstance(preserve_node_attrs, str): + if bound.arguments[preserve_node_attrs] is True or callable( + bound.arguments[preserve_node_attrs] + ): + # e.g. `preserve_node_attrs="attr"` and `func(attr=True)` + # e.g. `preserve_node_attrs="attr"` and `func(attr=myfunc)` + preserve_node_attrs = True + node_attrs = None + elif bound.arguments[preserve_node_attrs] is False and ( + isinstance(node_attrs, str) + and node_attrs == preserve_node_attrs + or isinstance(node_attrs, dict) + and preserve_node_attrs in node_attrs + ): + # e.g. `preserve_node_attrs="attr"` and `func(attr=False)` + # Treat `False` argument as meaning "preserve_node_data=False" + # and not `False` as the node attribute to use. Is this used? + preserve_node_attrs = False + node_attrs = None + else: + # e.g. `preserve_node_attrs="attr"` and `func(attr="weight")` + preserve_node_attrs = False + # Else: e.g. `preserve_node_attrs={"G": {"pos": None}}` + + if node_attrs is None: + # May have been set to None above b/c all attributes are preserved + pass + elif isinstance(node_attrs, str): + if node_attrs[0] == "[": + # e.g. `node_attrs="[node_attributes]"` (argument of list of attributes) + # e.g. `func(node_attributes=["foo", "bar"])` + node_attrs = dict.fromkeys(bound.arguments[node_attrs[1:-1]]) + elif callable(bound.arguments[node_attrs]): + # e.g. `node_attrs="weight"` and `func(weight=myfunc)` + preserve_node_attrs = True + node_attrs = None + elif bound.arguments[node_attrs] is not None: + # e.g. `node_attrs="weight"` and `func(weight="foo")` + node_attrs = {bound.arguments[node_attrs]: None} + else: + # e.g. `node_attrs="weight"` and `func(weight=None)` + node_attrs = None + else: + # e.g. `node_attrs={"attr": "default"}` and `func(attr="foo", default=7)` + # e.g. `node_attrs={"attr": 0}` and `func(attr="foo")` + node_attrs = { + node_attr: bound.arguments.get(val) if isinstance(val, str) else val + for key, val in node_attrs.items() + if (node_attr := bound.arguments[key]) is not None + } + + # It should be safe to assume that we either have networkx graphs or backend graphs. + # Future work: allow conversions between backends. + for gname in self.graphs: + if gname in self.list_graphs: + bound.arguments[gname] = [ + self._convert_graph( + backend_name, + g, + edge_attrs=edge_attrs, + node_attrs=node_attrs, + preserve_edge_attrs=preserve_edge_attrs, + preserve_node_attrs=preserve_node_attrs, + preserve_graph_attrs=preserve_graph_attrs, + graph_name=gname, + use_cache=use_cache, + mutations=mutations, + ) + if getattr(g, "__networkx_backend__", "networkx") != backend_name + else g + for g in bound.arguments[gname] + ] + else: + graph = bound.arguments[gname] + if graph is None: + if gname in self.optional_graphs: + continue + raise TypeError( + f"Missing required graph argument `{gname}` in {self.name} function" + ) + if isinstance(preserve_edge_attrs, dict): + preserve_edges = False + edges = preserve_edge_attrs.get(gname, edge_attrs) + else: + preserve_edges = preserve_edge_attrs + edges = edge_attrs + if isinstance(preserve_node_attrs, dict): + preserve_nodes = False + nodes = preserve_node_attrs.get(gname, node_attrs) + else: + preserve_nodes = preserve_node_attrs + nodes = node_attrs + if isinstance(preserve_graph_attrs, set): + preserve_graph = gname in preserve_graph_attrs + else: + preserve_graph = preserve_graph_attrs + if getattr(graph, "__networkx_backend__", "networkx") != backend_name: + bound.arguments[gname] = self._convert_graph( + backend_name, + graph, + edge_attrs=edges, + node_attrs=nodes, + preserve_edge_attrs=preserve_edges, + preserve_node_attrs=preserve_nodes, + preserve_graph_attrs=preserve_graph, + graph_name=gname, + use_cache=use_cache, + mutations=mutations, + ) + bound_kwargs = bound.kwargs + del bound_kwargs["backend"] + return bound.args, bound_kwargs + + def _convert_graph( + self, + backend_name, + graph, + *, + edge_attrs, + node_attrs, + preserve_edge_attrs, + preserve_node_attrs, + preserve_graph_attrs, + graph_name, + use_cache, + mutations, + ): + nx_cache = getattr(graph, "__networkx_cache__", None) if use_cache else None + if nx_cache is not None: + cache = nx_cache.setdefault("backends", {}).setdefault(backend_name, {}) + key = _get_cache_key( + edge_attrs=edge_attrs, + node_attrs=node_attrs, + preserve_edge_attrs=preserve_edge_attrs, + preserve_node_attrs=preserve_node_attrs, + preserve_graph_attrs=preserve_graph_attrs, + ) + compat_key, rv = _get_from_cache(cache, key, mutations=mutations) + if rv is not None: + if "cache" not in nx.config.warnings_to_ignore: + warnings.warn( + "Note: conversions to backend graphs are saved to cache " + "(`G.__networkx_cache__` on the original graph) by default." + "\n\nThis warning means the cached graph is being used " + f"for the {backend_name!r} backend in the " + f"call to {self.name}.\n\nFor the cache to be consistent " + "(i.e., correct), the input graph must not have been " + "manually mutated since the cached graph was created. " + "Examples of manually mutating the graph data structures " + "resulting in an inconsistent cache include:\n\n" + " >>> G[u][v][key] = val\n\n" + "and\n\n" + " >>> for u, v, d in G.edges(data=True):\n" + " ... d[key] = val\n\n" + "Using methods such as `G.add_edge(u, v, weight=val)` " + "will correctly clear the cache to keep it consistent. " + "You may also use `G.__networkx_cache__.clear()` to " + "manually clear the cache, or set `G.__networkx_cache__` " + "to None to disable caching for G. Enable or disable caching " + "globally via `nx.config.cache_converted_graphs` config.\n\n" + "To disable this warning:\n\n" + ' >>> nx.config.warnings_to_ignore.add("cache")\n' + ) + if rv == FAILED_TO_CONVERT: + # NotImplementedError is reasonable to use since the backend doesn't + # implement this conversion. However, this will be different than + # the original exception that the backend raised when it failed. + # Using NotImplementedError allows the next backend to be attempted. + raise NotImplementedError( + "Graph conversion aborted: unable to convert graph to " + f"'{backend_name}' backend in call to `{self.name}', " + "because this conversion has previously failed." + ) + _logger.debug( + "Using cached converted graph (from '%s' to '%s' backend) " + "in call to '%s' for '%s' argument", + getattr(graph, "__networkx_backend__", None), + backend_name, + self.name, + graph_name, + ) + return rv + + if backend_name == "networkx": + # Perhaps we should check that "__networkx_backend__" attribute exists + # and return the original object if not. + if not hasattr(graph, "__networkx_backend__"): + _logger.debug( + "Unable to convert input to 'networkx' backend in call to '%s' for " + "'%s argument, because it is not from a backend (i.e., it does not " + "have `G.__networkx_backend__` attribute). Using the original " + "object: %s", + self.name, + graph_name, + graph, + ) + # This may fail, but let it fail in the networkx function + return graph + backend = _load_backend(graph.__networkx_backend__) + try: + rv = backend.convert_to_nx(graph) + except Exception: + if nx_cache is not None: + _set_to_cache(cache, key, FAILED_TO_CONVERT) + raise + else: + backend = _load_backend(backend_name) + try: + rv = backend.convert_from_nx( + graph, + edge_attrs=edge_attrs, + node_attrs=node_attrs, + preserve_edge_attrs=preserve_edge_attrs, + preserve_node_attrs=preserve_node_attrs, + # Always preserve graph attrs when we are caching b/c this should be + # cheap and may help prevent extra (unnecessary) conversions. Because + # we do this, we don't need `preserve_graph_attrs` in the cache key. + preserve_graph_attrs=preserve_graph_attrs or nx_cache is not None, + name=self.name, + graph_name=graph_name, + ) + except Exception: + if nx_cache is not None: + _set_to_cache(cache, key, FAILED_TO_CONVERT) + raise + if nx_cache is not None: + _set_to_cache(cache, key, rv) + _logger.debug( + "Caching converted graph (from '%s' to '%s' backend) " + "in call to '%s' for '%s' argument", + getattr(graph, "__networkx_backend__", None), + backend_name, + self.name, + graph_name, + ) + + return rv + + def _call_with_backend(self, backend_name, args, kwargs, *, extra_message=None): + """Call this dispatchable function with a backend without converting inputs.""" + if backend_name == "networkx": + return self.orig_func(*args, **kwargs) + backend = _load_backend(backend_name) + _logger.debug( + "Using backend '%s' for call to '%s' with arguments: %s", + backend_name, + self.name, + _LazyArgsRepr(self, args, kwargs), + ) + try: + return getattr(backend, self.name)(*args, **kwargs) + except NotImplementedError as exc: + if extra_message is not None: + _logger.debug( + "Backend '%s' raised when calling '%s': %s", + backend_name, + self.name, + exc, + ) + raise NotImplementedError(extra_message) from exc + raise + + def _convert_and_call( + self, + backend_name, + input_backend_names, + args, + kwargs, + *, + extra_message=None, + mutations=None, + ): + """Call this dispatchable function with a backend after converting inputs. + + Parameters + ---------- + backend_name : str + input_backend_names : set[str] + args : arguments tuple + kwargs : keywords dict + extra_message : str, optional + Additional message to log if NotImplementedError is raised by backend. + mutations : list, optional + Used to clear objects gotten from cache if inputs will be mutated. + """ + if backend_name == "networkx": + func = self.orig_func + else: + backend = _load_backend(backend_name) + func = getattr(backend, self.name) + other_backend_names = input_backend_names - {backend_name} + _logger.debug( + "Converting input graphs from %s backend%s to '%s' backend for call to '%s'", + other_backend_names + if len(other_backend_names) > 1 + else f"'{next(iter(other_backend_names))}'", + "s" if len(other_backend_names) > 1 else "", + backend_name, + self.name, + ) + try: + converted_args, converted_kwargs = self._convert_arguments( + backend_name, + args, + kwargs, + use_cache=nx.config.cache_converted_graphs, + mutations=mutations, + ) + except NotImplementedError as exc: + # Only log the exception if we are adding an extra message + # because we don't want to lose any information. + _logger.debug( + "Failed to convert graphs from %s to '%s' backend for call to '%s'" + + ("" if extra_message is None else ": %s"), + input_backend_names, + backend_name, + self.name, + *(() if extra_message is None else (exc,)), + ) + if extra_message is not None: + raise NotImplementedError(extra_message) from exc + raise + if backend_name != "networkx": + _logger.debug( + "Using backend '%s' for call to '%s' with arguments: %s", + backend_name, + self.name, + _LazyArgsRepr(self, converted_args, converted_kwargs), + ) + try: + return func(*converted_args, **converted_kwargs) + except NotImplementedError as exc: + if extra_message is not None: + _logger.debug( + "Backend '%s' raised when calling '%s': %s", + backend_name, + self.name, + exc, + ) + raise NotImplementedError(extra_message) from exc + raise + + def _convert_and_call_for_tests( + self, backend_name, args, kwargs, *, fallback_to_nx=False + ): + """Call this dispatchable function with a backend; for use with testing.""" + backend = _load_backend(backend_name) + if not self._can_backend_run(backend_name, args, kwargs): + if fallback_to_nx or not self.graphs: + if fallback_to_nx: + _logger.debug( + "Falling back to use 'networkx' instead of '%s' backend " + "for call to '%s' with arguments: %s", + backend_name, + self.name, + _LazyArgsRepr(self, args, kwargs), + ) + return self.orig_func(*args, **kwargs) + + import pytest + + msg = f"'{self.name}' not implemented by {backend_name}" + if hasattr(backend, self.name): + msg += " with the given arguments" + pytest.xfail(msg) + + from collections.abc import Iterable, Iterator, Mapping + from copy import copy, deepcopy + from io import BufferedReader, BytesIO, StringIO, TextIOWrapper + from itertools import tee + from random import Random + + import numpy as np + from numpy.random import Generator, RandomState + from scipy.sparse import sparray + + # We sometimes compare the backend result (or input graphs) to the + # original result (or input graphs), so we need two sets of arguments. + compare_result_to_nx = ( + self._returns_graph + and "networkx" in self.backends + and self.name + not in { + # Has graphs as node values (unable to compare) + "quotient_graph", + # We don't handle tempfile.NamedTemporaryFile arguments + "read_gml", + "read_graph6", + "read_sparse6", + # We don't handle io.BufferedReader or io.TextIOWrapper arguments + "bipartite_read_edgelist", + "read_adjlist", + "read_edgelist", + "read_graphml", + "read_multiline_adjlist", + "read_pajek", + "from_pydot", + "pydot_read_dot", + "agraph_read_dot", + # graph comparison fails b/c of nan values + "read_gexf", + } + ) + compare_inputs_to_nx = ( + "networkx" in self.backends and self._will_call_mutate_input(args, kwargs) + ) + + # Tee iterators and copy random state so that they may be used twice. + if not args or not compare_result_to_nx and not compare_inputs_to_nx: + args_to_convert = args_nx = args + else: + args_to_convert, args_nx = zip( + *( + (arg, deepcopy(arg)) + if isinstance(arg, RandomState) + else (arg, copy(arg)) + if isinstance(arg, BytesIO | StringIO | Random | Generator) + else tee(arg) + if isinstance(arg, Iterator) + and not isinstance(arg, BufferedReader | TextIOWrapper) + else (arg, arg) + for arg in args + ) + ) + if not kwargs or not compare_result_to_nx and not compare_inputs_to_nx: + kwargs_to_convert = kwargs_nx = kwargs + else: + kwargs_to_convert, kwargs_nx = zip( + *( + ((k, v), (k, deepcopy(v))) + if isinstance(v, RandomState) + else ((k, v), (k, copy(v))) + if isinstance(v, BytesIO | StringIO | Random | Generator) + else ((k, (teed := tee(v))[0]), (k, teed[1])) + if isinstance(v, Iterator) + and not isinstance(v, BufferedReader | TextIOWrapper) + else ((k, v), (k, v)) + for k, v in kwargs.items() + ) + ) + kwargs_to_convert = dict(kwargs_to_convert) + kwargs_nx = dict(kwargs_nx) + + try: + converted_args, converted_kwargs = self._convert_arguments( + backend_name, + args_to_convert, + kwargs_to_convert, + use_cache=False, + mutations=None, + ) + except NotImplementedError as exc: + if fallback_to_nx: + _logger.debug( + "Graph conversion failed; falling back to use 'networkx' instead " + "of '%s' backend for call to '%s'", + backend_name, + self.name, + ) + return self.orig_func(*args_nx, **kwargs_nx) + import pytest + + pytest.xfail( + exc.args[0] if exc.args else f"{self.name} raised {type(exc).__name__}" + ) + + if compare_inputs_to_nx: + # Ensure input graphs are different if the function mutates an input graph. + bound_backend = self.__signature__.bind(*converted_args, **converted_kwargs) + bound_backend.apply_defaults() + bound_nx = self.__signature__.bind(*args_nx, **kwargs_nx) + bound_nx.apply_defaults() + for gname in self.graphs: + graph_nx = bound_nx.arguments[gname] + if bound_backend.arguments[gname] is graph_nx is not None: + bound_nx.arguments[gname] = graph_nx.copy() + args_nx = bound_nx.args + kwargs_nx = bound_nx.kwargs + kwargs_nx.pop("backend", None) + + _logger.debug( + "Using backend '%s' for call to '%s' with arguments: %s", + backend_name, + self.name, + _LazyArgsRepr(self, converted_args, converted_kwargs), + ) + try: + result = getattr(backend, self.name)(*converted_args, **converted_kwargs) + except NotImplementedError as exc: + if fallback_to_nx: + _logger.debug( + "Backend '%s' raised when calling '%s': %s; " + "falling back to use 'networkx' instead.", + backend_name, + self.name, + exc, + ) + return self.orig_func(*args_nx, **kwargs_nx) + import pytest + + pytest.xfail( + exc.args[0] if exc.args else f"{self.name} raised {type(exc).__name__}" + ) + + # Verify that `self._returns_graph` is correct. This compares the return type + # to the type expected from `self._returns_graph`. This handles tuple and list + # return types, but *does not* catch functions that yield graphs. + if ( + self._returns_graph + != ( + isinstance(result, nx.Graph) + or hasattr(result, "__networkx_backend__") + or isinstance(result, tuple | list) + and any( + isinstance(x, nx.Graph) or hasattr(x, "__networkx_backend__") + for x in result + ) + ) + and not ( + # May return Graph or None + self.name in {"check_planarity", "check_planarity_recursive"} + and any(x is None for x in result) + ) + and not ( + # May return Graph or dict + self.name in {"held_karp_ascent"} + and any(isinstance(x, dict) for x in result) + ) + and self.name + not in { + # yields graphs + "all_triads", + "general_k_edge_subgraphs", + # yields graphs or arrays + "nonisomorphic_trees", + } + ): + raise RuntimeError(f"`returns_graph` is incorrect for {self.name}") + + def check_result(val, depth=0): + if isinstance(val, np.number): + raise RuntimeError( + f"{self.name} returned a numpy scalar {val} ({type(val)}, depth={depth})" + ) + if isinstance(val, np.ndarray | sparray): + return + if isinstance(val, nx.Graph): + check_result(val._node, depth=depth + 1) + check_result(val._adj, depth=depth + 1) + return + if isinstance(val, Iterator): + raise NotImplementedError + if isinstance(val, Iterable) and not isinstance(val, str): + for x in val: + check_result(x, depth=depth + 1) + if isinstance(val, Mapping): + for x in val.values(): + check_result(x, depth=depth + 1) + + def check_iterator(it): + for val in it: + try: + check_result(val) + except RuntimeError as exc: + raise RuntimeError( + f"{self.name} returned a numpy scalar {val} ({type(val)})" + ) from exc + yield val + + if self.name in {"from_edgelist"}: + # numpy scalars are explicitly given as values in some tests + pass + elif isinstance(result, Iterator): + result = check_iterator(result) + else: + try: + check_result(result) + except RuntimeError as exc: + raise RuntimeError( + f"{self.name} returned a numpy scalar {result} ({type(result)})" + ) from exc + check_result(result) + + def assert_graphs_equal(G1, G2, strict=True): + assert G1.number_of_nodes() == G2.number_of_nodes() + assert G1.number_of_edges() == G2.number_of_edges() + assert G1.is_directed() is G2.is_directed() + assert G1.is_multigraph() is G2.is_multigraph() + if strict: + assert G1.graph == G2.graph + assert G1._node == G2._node + assert G1._adj == G2._adj + else: + assert set(G1) == set(G2) + assert set(G1.edges) == set(G2.edges) + + if compare_inputs_to_nx: + # Special-case algorithms that mutate input graphs + result_nx = self.orig_func(*args_nx, **kwargs_nx) + for gname in self.graphs: + G0 = bound_backend.arguments[gname] + G1 = bound_nx.arguments[gname] + if G0 is not None or G1 is not None: + G1 = backend.convert_to_nx(G1) + assert_graphs_equal(G0, G1, strict=False) + + converted_result = backend.convert_to_nx(result) + if compare_result_to_nx and isinstance(converted_result, nx.Graph): + # For graph return types (e.g. generators), we compare that results are + # the same between the backend and networkx, then return the original + # networkx result so the iteration order will be consistent in tests. + if compare_inputs_to_nx: + G = result_nx + else: + G = self.orig_func(*args_nx, **kwargs_nx) + assert_graphs_equal(G, converted_result) + return G + + return converted_result + + def _make_doc(self): + """Generate the backends section at the end for functions having an alternate + backend implementation(s) using the `backend_info` entry-point.""" + + if self.backends == {"networkx"}: + return self._orig_doc + # Add "Backends" section to the bottom of the docstring (if there are backends) + lines = [ + "Backends", + "--------", + ] + for backend in sorted(self.backends - {"networkx"}): + info = backend_info[backend] + if "short_summary" in info: + lines.append(f"{backend} : {info['short_summary']}") + else: + lines.append(backend) + if "functions" not in info or self.name not in info["functions"]: + lines.append("") + continue + + func_info = info["functions"][self.name] + + # Renaming extra_docstring to additional_docs + if func_docs := ( + func_info.get("additional_docs") or func_info.get("extra_docstring") + ): + lines.extend( + f" {line}" if line else line for line in func_docs.split("\n") + ) + add_gap = True + else: + add_gap = False + + # Renaming extra_parameters to additional_parameters + if extra_parameters := ( + func_info.get("extra_parameters") + or func_info.get("additional_parameters") + ): + if add_gap: + lines.append("") + lines.append(" Additional parameters:") + for param in sorted(extra_parameters): + lines.append(f" {param}") + if desc := extra_parameters[param]: + lines.append(f" {desc}") + lines.append("") + else: + lines.append("") + + if func_url := func_info.get("url"): + lines.append(f"[`Source <{func_url}>`_]") + lines.append("") + + # We assume the docstrings are indented by four spaces (true for now) + new_doc = self._orig_doc or "" + if not new_doc.rstrip(): + new_doc = f"The original docstring for {self.name} was empty." + if self.backends: + lines.pop() # Remove last empty line + to_add = "\n ".join(lines) + new_doc = f"{new_doc.rstrip()}\n\n {to_add}" + + # For backend-only funcs, add "Attention" admonishment after the one line summary + if "networkx" not in self.backends: + lines = new_doc.split("\n") + index = 0 + while not lines[index].strip(): + index += 1 + while index < len(lines) and lines[index].strip(): + index += 1 + backends = sorted(self.backends) + if len(backends) == 0: + example = "" + elif len(backends) == 1: + example = f' such as "{backends[0]}"' + elif len(backends) == 2: + example = f' such as "{backends[0]} or "{backends[1]}"' + else: + example = ( + " such as " + + ", ".join(f'"{x}"' for x in backends[:-1]) + + f', or "{backends[-1]}"' # Oxford comma + ) + to_add = ( + "\n .. attention:: This function does not have a default NetworkX implementation.\n" + " It may only be run with an installable :doc:`backend ` that\n" + f" supports it{example}.\n\n" + " Hint: use ``backend=...`` keyword argument to specify a backend or add\n" + " backends to ``nx.config.backend_priority``." + ) + lines.insert(index, to_add) + new_doc = "\n".join(lines) + return new_doc + + def __reduce__(self): + """Allow this object to be serialized with pickle. + + This uses the global registry `_registered_algorithms` to deserialize. + """ + return _restore_dispatchable, (self.name,) + + +def _restore_dispatchable(name): + return _registered_algorithms[name].__wrapped__ + + +def _get_cache_key( + *, + edge_attrs, + node_attrs, + preserve_edge_attrs, + preserve_node_attrs, + preserve_graph_attrs, +): + """Return key used by networkx caching given arguments for ``convert_from_nx``.""" + # edge_attrs: dict | None + # node_attrs: dict | None + # preserve_edge_attrs: bool (False if edge_attrs is not None) + # preserve_node_attrs: bool (False if node_attrs is not None) + return ( + frozenset(edge_attrs.items()) + if edge_attrs is not None + else preserve_edge_attrs, + frozenset(node_attrs.items()) + if node_attrs is not None + else preserve_node_attrs, + ) + + +def _get_from_cache(cache, key, *, backend_name=None, mutations=None): + """Search the networkx cache for a graph that is compatible with ``key``. + + Parameters + ---------- + cache : dict + If ``backend_name`` is given, then this is treated as ``G.__networkx_cache__``, + but if ``backend_name`` is None, then this is treated as the resolved inner + cache such as ``G.__networkx_cache__["backends"][backend_name]``. + key : tuple + Cache key from ``_get_cache_key``. + backend_name : str, optional + Name of the backend to control how ``cache`` is interpreted. + mutations : list, optional + Used internally to clear objects gotten from cache if inputs will be mutated. + + Returns + ------- + tuple or None + The key of the compatible graph found in the cache. + graph or "FAILED_TO_CONVERT" or None + A compatible graph if possible. "FAILED_TO_CONVERT" indicates that a previous + conversion attempt failed for this cache key. + """ + if backend_name is not None: + cache = cache.get("backends", {}).get(backend_name, {}) + if not cache: + return None, None + + # Do a simple search for a cached graph with compatible data. + # For example, if we need a single attribute, then it's okay + # to use a cached graph that preserved all attributes. + # This looks for an exact match first. + edge_key, node_key = key + for compat_key in itertools.product( + (edge_key, True) if edge_key is not True else (True,), + (node_key, True) if node_key is not True else (True,), + ): + if (rv := cache.get(compat_key)) is not None and ( + rv != FAILED_TO_CONVERT or key == compat_key + ): + if mutations is not None: + # Remove this item from the cache (after all conversions) if + # the call to this dispatchable function will mutate an input. + mutations.append((cache, compat_key)) + return compat_key, rv + + # Iterate over the items in `cache` to see if any are compatible. + # For example, if no edge attributes are needed, then a graph + # with any edge attribute will suffice. We use the same logic + # below (but switched) to clear unnecessary items from the cache. + # Use `list(cache.items())` to be thread-safe. + for (ekey, nkey), graph in list(cache.items()): + if graph == FAILED_TO_CONVERT: + # Return FAILED_TO_CONVERT if any cache key that requires a subset + # of the edge/node attributes of the given cache key has previously + # failed to convert. This logic is similar to `_set_to_cache`. + if ekey is False or edge_key is True: + pass + elif ekey is True or edge_key is False or not ekey.issubset(edge_key): + continue + if nkey is False or node_key is True: # or nkey == node_key: + pass + elif nkey is True or node_key is False or not nkey.issubset(node_key): + continue + # Save to cache for faster subsequent lookups + cache[key] = FAILED_TO_CONVERT + elif edge_key is False or ekey is True: + pass # Cache works for edge data! + elif edge_key is True or ekey is False or not edge_key.issubset(ekey): + continue # Cache missing required edge data; does not work + if node_key is False or nkey is True: + pass # Cache works for node data! + elif node_key is True or nkey is False or not node_key.issubset(nkey): + continue # Cache missing required node data; does not work + if mutations is not None: + # Remove this item from the cache (after all conversions) if + # the call to this dispatchable function will mutate an input. + mutations.append((cache, (ekey, nkey))) + return (ekey, nkey), graph + + return None, None + + +def _set_to_cache(cache, key, graph, *, backend_name=None): + """Set a backend graph to the cache, and remove unnecessary cached items. + + Parameters + ---------- + cache : dict + If ``backend_name`` is given, then this is treated as ``G.__networkx_cache__``, + but if ``backend_name`` is None, then this is treated as the resolved inner + cache such as ``G.__networkx_cache__["backends"][backend_name]``. + key : tuple + Cache key from ``_get_cache_key``. + graph : graph or "FAILED_TO_CONVERT" + Setting value to "FAILED_TO_CONVERT" prevents this conversion from being + attempted in future calls. + backend_name : str, optional + Name of the backend to control how ``cache`` is interpreted. + + Returns + ------- + dict + The items that were removed from the cache. + """ + if backend_name is not None: + cache = cache.setdefault("backends", {}).setdefault(backend_name, {}) + # Remove old cached items that are no longer necessary since they + # are dominated/subsumed/outdated by what was just calculated. + # This uses the same logic as above, but with keys switched. + # Also, don't update the cache here if the call will mutate an input. + removed = {} + edge_key, node_key = key + cache[key] = graph # Set at beginning to be thread-safe + if graph == FAILED_TO_CONVERT: + return removed + for cur_key in list(cache): + if cur_key == key: + continue + ekey, nkey = cur_key + if ekey is False or edge_key is True: + pass + elif ekey is True or edge_key is False or not ekey.issubset(edge_key): + continue + if nkey is False or node_key is True: + pass + elif nkey is True or node_key is False or not nkey.issubset(node_key): + continue + # Use pop instead of del to try to be thread-safe + if (graph := cache.pop(cur_key, None)) is not None: + removed[cur_key] = graph + return removed + + +class _LazyArgsRepr: + """Simple wrapper to display arguments of dispatchable functions in logging calls.""" + + def __init__(self, func, args, kwargs): + self.func = func + self.args = args + self.kwargs = kwargs + self.value = None + + def __repr__(self): + if self.value is None: + bound = self.func.__signature__.bind_partial(*self.args, **self.kwargs) + inner = ", ".join(f"{key}={val!r}" for key, val in bound.arguments.items()) + self.value = f"({inner})" + return self.value + + +if os.environ.get("_NETWORKX_BUILDING_DOCS_"): + # When building docs with Sphinx, use the original function with the + # dispatched __doc__, b/c Sphinx renders normal Python functions better. + # This doesn't show e.g. `*, backend=None, **backend_kwargs` in the + # signatures, which is probably okay. It does allow the docstring to be + # updated based on the installed backends. + _orig_dispatchable = _dispatchable + + def _dispatchable(func=None, **kwargs): # type: ignore[no-redef] + if func is None: + return partial(_dispatchable, **kwargs) + dispatched_func = _orig_dispatchable(func, **kwargs) + func.__doc__ = dispatched_func.__doc__ + return func + + _dispatchable.__doc__ = _orig_dispatchable.__new__.__doc__ # type: ignore[method-assign,assignment] + _sig = inspect.signature(_orig_dispatchable.__new__) + _dispatchable.__signature__ = _sig.replace( # type: ignore[method-assign,assignment] + parameters=[v for k, v in _sig.parameters.items() if k != "cls"] + ) diff --git a/.venv/lib/python3.12/site-packages/networkx/utils/configs.py b/.venv/lib/python3.12/site-packages/networkx/utils/configs.py new file mode 100644 index 0000000..bbd9792 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/utils/configs.py @@ -0,0 +1,391 @@ +import collections +import typing +from dataclasses import dataclass + +__all__ = ["Config"] + + +@dataclass(init=False, eq=False, slots=True, kw_only=True, match_args=False) +class Config: + """The base class for NetworkX configuration. + + There are two ways to use this to create configurations. The recommended way + is to subclass ``Config`` with docs and annotations. + + >>> class MyConfig(Config): + ... '''Breakfast!''' + ... + ... eggs: int + ... spam: int + ... + ... def _on_setattr(self, key, value): + ... assert isinstance(value, int) and value >= 0 + ... return value + >>> cfg = MyConfig(eggs=1, spam=5) + + Another way is to simply pass the initial configuration as keyword arguments to + the ``Config`` instance: + + >>> cfg1 = Config(eggs=1, spam=5) + >>> cfg1 + Config(eggs=1, spam=5) + + Once defined, config items may be modified, but can't be added or deleted by default. + ``Config`` is a ``Mapping``, and can get and set configs via attributes or brackets: + + >>> cfg.eggs = 2 + >>> cfg.eggs + 2 + >>> cfg["spam"] = 42 + >>> cfg["spam"] + 42 + + For convenience, it can also set configs within a context with the "with" statement: + + >>> with cfg(spam=3): + ... print("spam (in context):", cfg.spam) + spam (in context): 3 + >>> print("spam (after context):", cfg.spam) + spam (after context): 42 + + Subclasses may also define ``_on_setattr`` (as done in the example above) + to ensure the value being assigned is valid: + + >>> cfg.spam = -1 + Traceback (most recent call last): + ... + AssertionError + + If a more flexible configuration object is needed that allows adding and deleting + configurations, then pass ``strict=False`` when defining the subclass: + + >>> class FlexibleConfig(Config, strict=False): + ... default_greeting: str = "Hello" + >>> flexcfg = FlexibleConfig() + >>> flexcfg.name = "Mr. Anderson" + >>> flexcfg + FlexibleConfig(default_greeting='Hello', name='Mr. Anderson') + """ + + def __init_subclass__(cls, strict=True): + cls._strict = strict + + def __new__(cls, **kwargs): + orig_class = cls + if cls is Config: + # Enable the "simple" case of accepting config definition as keywords + cls = type( + cls.__name__, + (cls,), + {"__annotations__": dict.fromkeys(kwargs, typing.Any)}, + ) + cls = dataclass( + eq=False, + repr=cls._strict, + slots=cls._strict, + kw_only=True, + match_args=False, + )(cls) + if not cls._strict: + cls.__repr__ = _flexible_repr + cls._orig_class = orig_class # Save original class so we can pickle + cls._prev = None # Stage previous configs to enable use as context manager + cls._context_stack = [] # Stack of previous configs when used as context + instance = object.__new__(cls) + instance.__init__(**kwargs) + return instance + + def _on_setattr(self, key, value): + """Process config value and check whether it is valid. Useful for subclasses.""" + return value + + def _on_delattr(self, key): + """Callback for when a config item is being deleted. Useful for subclasses.""" + + # Control behavior of attributes + def __dir__(self): + return self.__dataclass_fields__.keys() + + def __setattr__(self, key, value): + if self._strict and key not in self.__dataclass_fields__: + raise AttributeError(f"Invalid config name: {key!r}") + value = self._on_setattr(key, value) + object.__setattr__(self, key, value) + self.__class__._prev = None + + def __delattr__(self, key): + if self._strict: + raise TypeError( + f"Configuration items can't be deleted (can't delete {key!r})." + ) + self._on_delattr(key) + object.__delattr__(self, key) + self.__class__._prev = None + + # Be a `collection.abc.Collection` + def __contains__(self, key): + return ( + key in self.__dataclass_fields__ if self._strict else key in self.__dict__ + ) + + def __iter__(self): + return iter(self.__dataclass_fields__ if self._strict else self.__dict__) + + def __len__(self): + return len(self.__dataclass_fields__ if self._strict else self.__dict__) + + def __reversed__(self): + return reversed(self.__dataclass_fields__ if self._strict else self.__dict__) + + # Add dunder methods for `collections.abc.Mapping` + def __getitem__(self, key): + try: + return getattr(self, key) + except AttributeError as err: + raise KeyError(*err.args) from None + + def __setitem__(self, key, value): + try: + self.__setattr__(key, value) + except AttributeError as err: + raise KeyError(*err.args) from None + + def __delitem__(self, key): + try: + self.__delattr__(key) + except AttributeError as err: + raise KeyError(*err.args) from None + + _ipython_key_completions_ = __dir__ # config[" + + # Go ahead and make it a `collections.abc.Mapping` + def get(self, key, default=None): + return getattr(self, key, default) + + def items(self): + return collections.abc.ItemsView(self) + + def keys(self): + return collections.abc.KeysView(self) + + def values(self): + return collections.abc.ValuesView(self) + + # dataclass can define __eq__ for us, but do it here so it works after pickling + def __eq__(self, other): + if not isinstance(other, Config): + return NotImplemented + return self._orig_class == other._orig_class and self.items() == other.items() + + # Make pickle work + def __reduce__(self): + return self._deserialize, (self._orig_class, dict(self)) + + @staticmethod + def _deserialize(cls, kwargs): + return cls(**kwargs) + + # Allow to be used as context manager + def __call__(self, **kwargs): + kwargs = {key: self._on_setattr(key, val) for key, val in kwargs.items()} + prev = dict(self) + for key, val in kwargs.items(): + setattr(self, key, val) + self.__class__._prev = prev + return self + + def __enter__(self): + if self.__class__._prev is None: + raise RuntimeError( + "Config being used as a context manager without config items being set. " + "Set config items via keyword arguments when calling the config object. " + "For example, using config as a context manager should be like:\n\n" + ' >>> with cfg(breakfast="spam"):\n' + " ... ... # Do stuff\n" + ) + self.__class__._context_stack.append(self.__class__._prev) + self.__class__._prev = None + return self + + def __exit__(self, exc_type, exc_value, traceback): + prev = self.__class__._context_stack.pop() + for key, val in prev.items(): + setattr(self, key, val) + + +def _flexible_repr(self): + return ( + f"{self.__class__.__qualname__}(" + + ", ".join(f"{key}={val!r}" for key, val in self.__dict__.items()) + + ")" + ) + + +# Register, b/c `Mapping.__subclasshook__` returns `NotImplemented` +collections.abc.Mapping.register(Config) + + +class BackendPriorities(Config, strict=False): + """Configuration to control automatic conversion to and calling of backends. + + Priority is given to backends listed earlier. + + Parameters + ---------- + algos : list of backend names + This controls "algorithms" such as ``nx.pagerank`` that don't return a graph. + generators : list of backend names + This controls "generators" such as ``nx.from_pandas_edgelist`` that return a graph. + kwargs : variadic keyword arguments of function name to list of backend names + This allows each function to be configured separately and will override the config + in ``algos`` or ``generators`` if present. The dispatchable function name may be + gotten from the ``.name`` attribute such as ``nx.pagerank.name`` (it's typically + the same as the name of the function). + """ + + algos: list[str] + generators: list[str] + + def _on_setattr(self, key, value): + from .backends import _registered_algorithms, backend_info + + if key in {"algos", "generators"}: + pass + elif key not in _registered_algorithms: + raise AttributeError( + f"Invalid config name: {key!r}. Expected 'algos', 'generators', or a name " + "of a dispatchable function (e.g. `.name` attribute of the function)." + ) + if not (isinstance(value, list) and all(isinstance(x, str) for x in value)): + raise TypeError( + f"{key!r} config must be a list of backend names; got {value!r}" + ) + if missing := {x for x in value if x not in backend_info}: + missing = ", ".join(map(repr, sorted(missing))) + raise ValueError(f"Unknown backend when setting {key!r}: {missing}") + return value + + def _on_delattr(self, key): + if key in {"algos", "generators"}: + raise TypeError(f"{key!r} configuration item can't be deleted.") + + +class NetworkXConfig(Config): + """Configuration for NetworkX that controls behaviors such as how to use backends. + + Attribute and bracket notation are supported for getting and setting configurations:: + + >>> nx.config.backend_priority == nx.config["backend_priority"] + True + + Parameters + ---------- + backend_priority : list of backend names or dict or BackendPriorities + Enable automatic conversion of graphs to backend graphs for functions + implemented by the backend. Priority is given to backends listed earlier. + This is a nested configuration with keys ``algos``, ``generators``, and, + optionally, function names. Setting this value to a list of backend names + will set ``nx.config.backend_priority.algos``. For more information, see + ``help(nx.config.backend_priority)``. Default is empty list. + + backends : Config mapping of backend names to backend Config + The keys of the Config mapping are names of all installed NetworkX backends, + and the values are their configurations as Config mappings. + + cache_converted_graphs : bool + If True, then save converted graphs to the cache of the input graph. Graph + conversion may occur when automatically using a backend from `backend_priority` + or when using the `backend=` keyword argument to a function call. Caching can + improve performance by avoiding repeated conversions, but it uses more memory. + Care should be taken to not manually mutate a graph that has cached graphs; for + example, ``G[u][v][k] = val`` changes the graph, but does not clear the cache. + Using methods such as ``G.add_edge(u, v, weight=val)`` will clear the cache to + keep it consistent. ``G.__networkx_cache__.clear()`` manually clears the cache. + Default is True. + + fallback_to_nx : bool + If True, then "fall back" and run with the default "networkx" implementation + for dispatchable functions not implemented by backends of input graphs. When a + backend graph is passed to a dispatchable function, the default behavior is to + use the implementation from that backend if possible and raise if not. Enabling + ``fallback_to_nx`` makes the networkx implementation the fallback to use instead + of raising, and will convert the backend graph to a networkx-compatible graph. + Default is False. + + warnings_to_ignore : set of strings + Control which warnings from NetworkX are not emitted. Valid elements: + + - `"cache"`: when a cached value is used from ``G.__networkx_cache__``. + + Notes + ----- + Environment variables may be used to control some default configurations: + + - ``NETWORKX_BACKEND_PRIORITY``: set ``backend_priority.algos`` from comma-separated names. + - ``NETWORKX_CACHE_CONVERTED_GRAPHS``: set ``cache_converted_graphs`` to True if nonempty. + - ``NETWORKX_FALLBACK_TO_NX``: set ``fallback_to_nx`` to True if nonempty. + - ``NETWORKX_WARNINGS_TO_IGNORE``: set `warnings_to_ignore` from comma-separated names. + + and can be used for finer control of ``backend_priority`` such as: + + - ``NETWORKX_BACKEND_PRIORITY_ALGOS``: same as ``NETWORKX_BACKEND_PRIORITY`` + to set ``backend_priority.algos``. + + This is a global configuration. Use with caution when using from multiple threads. + """ + + backend_priority: BackendPriorities + backends: Config + cache_converted_graphs: bool + fallback_to_nx: bool + warnings_to_ignore: set[str] + + def _on_setattr(self, key, value): + from .backends import backend_info + + if key == "backend_priority": + if isinstance(value, list): + # `config.backend_priority = [backend]` sets `backend_priority.algos` + value = BackendPriorities( + **dict( + self.backend_priority, + algos=self.backend_priority._on_setattr("algos", value), + ) + ) + elif isinstance(value, dict): + kwargs = value + value = BackendPriorities(algos=[], generators=[]) + for key, val in kwargs.items(): + setattr(value, key, val) + elif not isinstance(value, BackendPriorities): + raise TypeError( + f"{key!r} config must be a dict of lists of backend names; got {value!r}" + ) + elif key == "backends": + if not ( + isinstance(value, Config) + and all(isinstance(key, str) for key in value) + and all(isinstance(val, Config) for val in value.values()) + ): + raise TypeError( + f"{key!r} config must be a Config of backend configs; got {value!r}" + ) + if missing := {x for x in value if x not in backend_info}: + missing = ", ".join(map(repr, sorted(missing))) + raise ValueError(f"Unknown backend when setting {key!r}: {missing}") + elif key in {"cache_converted_graphs", "fallback_to_nx"}: + if not isinstance(value, bool): + raise TypeError(f"{key!r} config must be True or False; got {value!r}") + elif key == "warnings_to_ignore": + if not (isinstance(value, set) and all(isinstance(x, str) for x in value)): + raise TypeError( + f"{key!r} config must be a set of warning names; got {value!r}" + ) + known_warnings = {"cache"} + if missing := {x for x in value if x not in known_warnings}: + missing = ", ".join(map(repr, sorted(missing))) + raise ValueError( + f"Unknown warning when setting {key!r}: {missing}. Valid entries: " + + ", ".join(sorted(known_warnings)) + ) + return value diff --git a/.venv/lib/python3.12/site-packages/networkx/utils/decorators.py b/.venv/lib/python3.12/site-packages/networkx/utils/decorators.py new file mode 100644 index 0000000..f222744 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/utils/decorators.py @@ -0,0 +1,1233 @@ +import bz2 +import collections +import gzip +import inspect +import itertools +import re +from collections import defaultdict +from os.path import splitext +from pathlib import Path + +import networkx as nx +from networkx.utils import create_py_random_state, create_random_state + +__all__ = [ + "not_implemented_for", + "open_file", + "nodes_or_number", + "np_random_state", + "py_random_state", + "argmap", +] + + +def not_implemented_for(*graph_types): + """Decorator to mark algorithms as not implemented + + Parameters + ---------- + graph_types : container of strings + Entries must be one of "directed", "undirected", "multigraph", or "graph". + + Returns + ------- + _require : function + The decorated function. + + Raises + ------ + NetworkXNotImplemented + If any of the packages cannot be imported + + Notes + ----- + Multiple types are joined logically with "and". + For "or" use multiple @not_implemented_for() lines. + + Examples + -------- + Decorate functions like this:: + + @not_implemented_for("directed") + def sp_function(G): + pass + + + # rule out MultiDiGraph + @not_implemented_for("directed", "multigraph") + def sp_np_function(G): + pass + + + # rule out all except DiGraph + @not_implemented_for("undirected") + @not_implemented_for("multigraph") + def sp_np_function(G): + pass + """ + if ("directed" in graph_types) and ("undirected" in graph_types): + raise ValueError("Function not implemented on directed AND undirected graphs?") + if ("multigraph" in graph_types) and ("graph" in graph_types): + raise ValueError("Function not implemented on graph AND multigraphs?") + if not set(graph_types) < {"directed", "undirected", "multigraph", "graph"}: + raise KeyError( + "use one or more of directed, undirected, multigraph, graph. " + f"You used {graph_types}" + ) + + # 3-way logic: True if "directed" input, False if "undirected" input, else None + dval = ("directed" in graph_types) or "undirected" not in graph_types and None + mval = ("multigraph" in graph_types) or "graph" not in graph_types and None + errmsg = f"not implemented for {' '.join(graph_types)} type" + + def _not_implemented_for(g): + if (mval is None or mval == g.is_multigraph()) and ( + dval is None or dval == g.is_directed() + ): + raise nx.NetworkXNotImplemented(errmsg) + + return g + + return argmap(_not_implemented_for, 0) + + +# To handle new extensions, define a function accepting a `path` and `mode`. +# Then add the extension to _dispatch_dict. +fopeners = { + ".gz": gzip.open, + ".gzip": gzip.open, + ".bz2": bz2.BZ2File, +} +_dispatch_dict = defaultdict(lambda: open, **fopeners) + + +def open_file(path_arg, mode="r"): + """Decorator to ensure clean opening and closing of files. + + Parameters + ---------- + path_arg : string or int + Name or index of the argument that is a path. + + mode : str + String for opening mode. + + Returns + ------- + _open_file : function + Function which cleanly executes the io. + + Examples + -------- + Decorate functions like this:: + + @open_file(0, "r") + def read_function(pathname): + pass + + + @open_file(1, "w") + def write_function(G, pathname): + pass + + + @open_file(1, "w") + def write_function(G, pathname="graph.dot"): + pass + + + @open_file("pathname", "w") + def write_function(G, pathname="graph.dot"): + pass + + + @open_file("path", "w+") + def another_function(arg, **kwargs): + path = kwargs["path"] + pass + + Notes + ----- + Note that this decorator solves the problem when a path argument is + specified as a string, but it does not handle the situation when the + function wants to accept a default of None (and then handle it). + + Here is an example of how to handle this case:: + + @open_file("path") + def some_function(arg1, arg2, path=None): + if path is None: + fobj = tempfile.NamedTemporaryFile(delete=False) + else: + # `path` could have been a string or file object or something + # similar. In any event, the decorator has given us a file object + # and it will close it for us, if it should. + fobj = path + + try: + fobj.write("blah") + finally: + if path is None: + fobj.close() + + Normally, we'd want to use "with" to ensure that fobj gets closed. + However, the decorator will make `path` a file object for us, + and using "with" would undesirably close that file object. + Instead, we use a try block, as shown above. + When we exit the function, fobj will be closed, if it should be, by the decorator. + """ + + def _open_file(path): + # Now we have the path_arg. There are two types of input to consider: + # 1) string representing a path that should be opened + # 2) an already opened file object + if isinstance(path, str): + ext = splitext(path)[1] + elif isinstance(path, Path): + # path is a pathlib reference to a filename + ext = path.suffix + path = str(path) + else: + # could be None, or a file handle, in which case the algorithm will deal with it + return path, lambda: None + + fobj = _dispatch_dict[ext](path, mode=mode) + return fobj, lambda: fobj.close() + + return argmap(_open_file, path_arg, try_finally=True) + + +def nodes_or_number(which_args): + """Decorator to allow number of nodes or container of nodes. + + With this decorator, the specified argument can be either a number or a container + of nodes. If it is a number, the nodes used are `range(n)`. + This allows `nx.complete_graph(50)` in place of `nx.complete_graph(list(range(50)))`. + And it also allows `nx.complete_graph(any_list_of_nodes)`. + + Parameters + ---------- + which_args : string or int or sequence of strings or ints + If string, the name of the argument to be treated. + If int, the index of the argument to be treated. + If more than one node argument is allowed, can be a list of locations. + + Returns + ------- + _nodes_or_numbers : function + Function which replaces int args with ranges. + + Examples + -------- + Decorate functions like this:: + + @nodes_or_number("nodes") + def empty_graph(nodes): + # nodes is converted to a list of nodes + + @nodes_or_number(0) + def empty_graph(nodes): + # nodes is converted to a list of nodes + + @nodes_or_number(["m1", "m2"]) + def grid_2d_graph(m1, m2, periodic=False): + # m1 and m2 are each converted to a list of nodes + + @nodes_or_number([0, 1]) + def grid_2d_graph(m1, m2, periodic=False): + # m1 and m2 are each converted to a list of nodes + + @nodes_or_number(1) + def full_rary_tree(r, n) + # presumably r is a number. It is not handled by this decorator. + # n is converted to a list of nodes + """ + + def _nodes_or_number(n): + try: + nodes = list(range(n)) + except TypeError: + nodes = tuple(n) + else: + if n < 0: + raise nx.NetworkXError(f"Negative number of nodes not valid: {n}") + return (n, nodes) + + try: + iter_wa = iter(which_args) + except TypeError: + iter_wa = (which_args,) + + return argmap(_nodes_or_number, *iter_wa) + + +def np_random_state(random_state_argument): + """Decorator to generate a numpy RandomState or Generator instance. + + The decorator processes the argument indicated by `random_state_argument` + using :func:`nx.utils.create_random_state`. + The argument value can be a seed (integer), or a `numpy.random.RandomState` + or `numpy.random.RandomState` instance or (`None` or `numpy.random`). + The latter two options use the global random number generator for `numpy.random`. + + The returned instance is a `numpy.random.RandomState` or `numpy.random.Generator`. + + Parameters + ---------- + random_state_argument : string or int + The name or index of the argument to be converted + to a `numpy.random.RandomState` instance. + + Returns + ------- + _random_state : function + Function whose random_state keyword argument is a RandomState instance. + + Examples + -------- + Decorate functions like this:: + + @np_random_state("seed") + def random_float(seed=None): + return seed.rand() + + + @np_random_state(0) + def random_float(rng=None): + return rng.rand() + + + @np_random_state(1) + def random_array(dims, random_state=1): + return random_state.rand(*dims) + + See Also + -------- + py_random_state + """ + return argmap(create_random_state, random_state_argument) + + +def py_random_state(random_state_argument): + """Decorator to generate a random.Random instance (or equiv). + + This decorator processes `random_state_argument` using + :func:`nx.utils.create_py_random_state`. + The input value can be a seed (integer), or a random number generator:: + + If int, return a random.Random instance set with seed=int. + If random.Random instance, return it. + If None or the `random` package, return the global random number + generator used by `random`. + If np.random package, or the default numpy RandomState instance, + return the default numpy random number generator wrapped in a + `PythonRandomViaNumpyBits` class. + If np.random.Generator instance, return it wrapped in a + `PythonRandomViaNumpyBits` class. + + # Legacy options + If np.random.RandomState instance, return it wrapped in a + `PythonRandomInterface` class. + If a `PythonRandomInterface` instance, return it + + Parameters + ---------- + random_state_argument : string or int + The name of the argument or the index of the argument in args that is + to be converted to the random.Random instance or numpy.random.RandomState + instance that mimics basic methods of random.Random. + + Returns + ------- + _random_state : function + Function whose random_state_argument is converted to a Random instance. + + Examples + -------- + Decorate functions like this:: + + @py_random_state("random_state") + def random_float(random_state=None): + return random_state.rand() + + + @py_random_state(0) + def random_float(rng=None): + return rng.rand() + + + @py_random_state(1) + def random_array(dims, seed=12345): + return seed.rand(*dims) + + See Also + -------- + np_random_state + """ + + return argmap(create_py_random_state, random_state_argument) + + +class argmap: + """A decorator to apply a map to arguments before calling the function + + This class provides a decorator that maps (transforms) arguments of the function + before the function is called. Thus for example, we have similar code + in many functions to determine whether an argument is the number of nodes + to be created, or a list of nodes to be handled. The decorator provides + the code to accept either -- transforming the indicated argument into a + list of nodes before the actual function is called. + + This decorator class allows us to process single or multiple arguments. + The arguments to be processed can be specified by string, naming the argument, + or by index, specifying the item in the args list. + + Parameters + ---------- + func : callable + The function to apply to arguments + + *args : iterable of (int, str or tuple) + A list of parameters, specified either as strings (their names), ints + (numerical indices) or tuples, which may contain ints, strings, and + (recursively) tuples. Each indicates which parameters the decorator + should map. Tuples indicate that the map function takes (and returns) + multiple parameters in the same order and nested structure as indicated + here. + + try_finally : bool (default: False) + When True, wrap the function call in a try-finally block with code + for the finally block created by `func`. This is used when the map + function constructs an object (like a file handle) that requires + post-processing (like closing). + + Note: try_finally decorators cannot be used to decorate generator + functions. + + Examples + -------- + Most of these examples use `@argmap(...)` to apply the decorator to + the function defined on the next line. + In the NetworkX codebase however, `argmap` is used within a function to + construct a decorator. That is, the decorator defines a mapping function + and then uses `argmap` to build and return a decorated function. + A simple example is a decorator that specifies which currency to report money. + The decorator (named `convert_to`) would be used like:: + + @convert_to("US_Dollars", "income") + def show_me_the_money(name, income): + print(f"{name} : {income}") + + And the code to create the decorator might be:: + + def convert_to(currency, which_arg): + def _convert(amount): + if amount.currency != currency: + amount = amount.to_currency(currency) + return amount + + return argmap(_convert, which_arg) + + Despite this common idiom for argmap, most of the following examples + use the `@argmap(...)` idiom to save space. + + Here's an example use of argmap to sum the elements of two of the functions + arguments. The decorated function:: + + @argmap(sum, "xlist", "zlist") + def foo(xlist, y, zlist): + return xlist - y + zlist + + is syntactic sugar for:: + + def foo(xlist, y, zlist): + x = sum(xlist) + z = sum(zlist) + return x - y + z + + and is equivalent to (using argument indexes):: + + @argmap(sum, "xlist", 2) + def foo(xlist, y, zlist): + return xlist - y + zlist + + or:: + + @argmap(sum, "zlist", 0) + def foo(xlist, y, zlist): + return xlist - y + zlist + + Transforming functions can be applied to multiple arguments, such as:: + + def swap(x, y): + return y, x + + # the 2-tuple tells argmap that the map `swap` has 2 inputs/outputs. + @argmap(swap, ("a", "b")): + def foo(a, b, c): + return a / b * c + + is equivalent to:: + + def foo(a, b, c): + a, b = swap(a, b) + return a / b * c + + More generally, the applied arguments can be nested tuples of strings or ints. + The syntax `@argmap(some_func, ("a", ("b", "c")))` would expect `some_func` to + accept 2 inputs with the second expected to be a 2-tuple. It should then return + 2 outputs with the second a 2-tuple. The returns values would replace input "a" + "b" and "c" respectively. Similarly for `@argmap(some_func, (0, ("b", 2)))`. + + Also, note that an index larger than the number of named parameters is allowed + for variadic functions. For example:: + + def double(a): + return 2 * a + + + @argmap(double, 3) + def overflow(a, *args): + return a, args + + + print(overflow(1, 2, 3, 4, 5, 6)) # output is 1, (2, 3, 8, 5, 6) + + **Try Finally** + + Additionally, this `argmap` class can be used to create a decorator that + initiates a try...finally block. The decorator must be written to return + both the transformed argument and a closing function. + This feature was included to enable the `open_file` decorator which might + need to close the file or not depending on whether it had to open that file. + This feature uses the keyword-only `try_finally` argument to `@argmap`. + + For example this map opens a file and then makes sure it is closed:: + + def open_file(fn): + f = open(fn) + return f, lambda: f.close() + + The decorator applies that to the function `foo`:: + + @argmap(open_file, "file", try_finally=True) + def foo(file): + print(file.read()) + + is syntactic sugar for:: + + def foo(file): + file, close_file = open_file(file) + try: + print(file.read()) + finally: + close_file() + + and is equivalent to (using indexes):: + + @argmap(open_file, 0, try_finally=True) + def foo(file): + print(file.read()) + + Here's an example of the try_finally feature used to create a decorator:: + + def my_closing_decorator(which_arg): + def _opener(path): + if path is None: + path = open(path) + fclose = path.close + else: + # assume `path` handles the closing + fclose = lambda: None + return path, fclose + + return argmap(_opener, which_arg, try_finally=True) + + which can then be used as:: + + @my_closing_decorator("file") + def fancy_reader(file=None): + # this code doesn't need to worry about closing the file + print(file.read()) + + Decorators with try_finally = True cannot be used with generator functions, + because the `finally` block is evaluated before the generator is exhausted:: + + @argmap(open_file, "file", try_finally=True) + def file_to_lines(file): + for line in file.readlines(): + yield line + + is equivalent to:: + + def file_to_lines_wrapped(file): + for line in file.readlines(): + yield line + + + def file_to_lines_wrapper(file): + try: + file = open_file(file) + return file_to_lines_wrapped(file) + finally: + file.close() + + which behaves similarly to:: + + def file_to_lines_whoops(file): + file = open_file(file) + file.close() + for line in file.readlines(): + yield line + + because the `finally` block of `file_to_lines_wrapper` is executed before + the caller has a chance to exhaust the iterator. + + Notes + ----- + An object of this class is callable and intended to be used when + defining a decorator. Generally, a decorator takes a function as input + and constructs a function as output. Specifically, an `argmap` object + returns the input function decorated/wrapped so that specified arguments + are mapped (transformed) to new values before the decorated function is called. + + As an overview, the argmap object returns a new function with all the + dunder values of the original function (like `__doc__`, `__name__`, etc). + Code for this decorated function is built based on the original function's + signature. It starts by mapping the input arguments to potentially new + values. Then it calls the decorated function with these new values in place + of the indicated arguments that have been mapped. The return value of the + original function is then returned. This new function is the function that + is actually called by the user. + + Three additional features are provided. + 1) The code is lazily compiled. That is, the new function is returned + as an object without the code compiled, but with all information + needed so it can be compiled upon it's first invocation. This saves + time on import at the cost of additional time on the first call of + the function. Subsequent calls are then just as fast as normal. + + 2) If the "try_finally" keyword-only argument is True, a try block + follows each mapped argument, matched on the other side of the wrapped + call, by a finally block closing that mapping. We expect func to return + a 2-tuple: the mapped value and a function to be called in the finally + clause. This feature was included so the `open_file` decorator could + provide a file handle to the decorated function and close the file handle + after the function call. It even keeps track of whether to close the file + handle or not based on whether it had to open the file or the input was + already open. So, the decorated function does not need to include any + code to open or close files. + + 3) The maps applied can process multiple arguments. For example, + you could swap two arguments using a mapping, or transform + them to their sum and their difference. This was included to allow + a decorator in the `quality.py` module that checks that an input + `partition` is a valid partition of the nodes of the input graph `G`. + In this example, the map has inputs `(G, partition)`. After checking + for a valid partition, the map either raises an exception or leaves + the inputs unchanged. Thus many functions that make this check can + use the decorator rather than copy the checking code into each function. + More complicated nested argument structures are described below. + + The remaining notes describe the code structure and methods for this + class in broad terms to aid in understanding how to use it. + + Instantiating an `argmap` object simply stores the mapping function and + the input identifiers of which arguments to map. The resulting decorator + is ready to use this map to decorate any function. Calling that object + (`argmap.__call__`, but usually done via `@my_decorator`) a lazily + compiled thin wrapper of the decorated function is constructed, + wrapped with the necessary function dunder attributes like `__doc__` + and `__name__`. That thinly wrapped function is returned as the + decorated function. When that decorated function is called, the thin + wrapper of code calls `argmap._lazy_compile` which compiles the decorated + function (using `argmap.compile`) and replaces the code of the thin + wrapper with the newly compiled code. This saves the compilation step + every import of networkx, at the cost of compiling upon the first call + to the decorated function. + + When the decorated function is compiled, the code is recursively assembled + using the `argmap.assemble` method. The recursive nature is needed in + case of nested decorators. The result of the assembly is a number of + useful objects. + + sig : the function signature of the original decorated function as + constructed by :func:`argmap.signature`. This is constructed + using `inspect.signature` but enhanced with attribute + strings `sig_def` and `sig_call`, and other information + specific to mapping arguments of this function. + This information is used to construct a string of code defining + the new decorated function. + + wrapped_name : a unique internally used name constructed by argmap + for the decorated function. + + functions : a dict of the functions used inside the code of this + decorated function, to be used as `globals` in `exec`. + This dict is recursively updated to allow for nested decorating. + + mapblock : code (as a list of strings) to map the incoming argument + values to their mapped values. + + finallys : code (as a list of strings) to provide the possibly nested + set of finally clauses if needed. + + mutable_args : a bool indicating whether the `sig.args` tuple should be + converted to a list so mutation can occur. + + After this recursive assembly process, the `argmap.compile` method + constructs code (as strings) to convert the tuple `sig.args` to a list + if needed. It joins the defining code with appropriate indents and + compiles the result. Finally, this code is evaluated and the original + wrapper's implementation is replaced with the compiled version (see + `argmap._lazy_compile` for more details). + + Other `argmap` methods include `_name` and `_count` which allow internally + generated names to be unique within a python session. + The methods `_flatten` and `_indent` process the nested lists of strings + into properly indented python code ready to be compiled. + + More complicated nested tuples of arguments also allowed though + usually not used. For the simple 2 argument case, the argmap + input ("a", "b") implies the mapping function will take 2 arguments + and return a 2-tuple of mapped values. A more complicated example + with argmap input `("a", ("b", "c"))` requires the mapping function + take 2 inputs, with the second being a 2-tuple. It then must output + the 3 mapped values in the same nested structure `(newa, (newb, newc))`. + This level of generality is not often needed, but was convenient + to implement when handling the multiple arguments. + + See Also + -------- + not_implemented_for + open_file + nodes_or_number + py_random_state + networkx.algorithms.community.quality.require_partition + + """ + + def __init__(self, func, *args, try_finally=False): + self._func = func + self._args = args + self._finally = try_finally + + @staticmethod + def _lazy_compile(func): + """Compile the source of a wrapped function + + Assemble and compile the decorated function, and intrusively replace its + code with the compiled version's. The thinly wrapped function becomes + the decorated function. + + Parameters + ---------- + func : callable + A function returned by argmap.__call__ which is in the process + of being called for the first time. + + Returns + ------- + func : callable + The same function, with a new __code__ object. + + Notes + ----- + It was observed in NetworkX issue #4732 [1] that the import time of + NetworkX was significantly bloated by the use of decorators: over half + of the import time was being spent decorating functions. This was + somewhat improved by a change made to the `decorator` library, at the + cost of a relatively heavy-weight call to `inspect.Signature.bind` + for each call to the decorated function. + + The workaround we arrived at is to do minimal work at the time of + decoration. When the decorated function is called for the first time, + we compile a function with the same function signature as the wrapped + function. The resulting decorated function is faster than one made by + the `decorator` library, so that the overhead of the first call is + 'paid off' after a small number of calls. + + References + ---------- + + [1] https://github.com/networkx/networkx/issues/4732 + + """ + real_func = func.__argmap__.compile(func.__wrapped__) + func.__code__ = real_func.__code__ + func.__globals__.update(real_func.__globals__) + func.__dict__.update(real_func.__dict__) + return func + + def __call__(self, f): + """Construct a lazily decorated wrapper of f. + + The decorated function will be compiled when it is called for the first time, + and it will replace its own __code__ object so subsequent calls are fast. + + Parameters + ---------- + f : callable + A function to be decorated. + + Returns + ------- + func : callable + The decorated function. + + See Also + -------- + argmap._lazy_compile + """ + + def func(*args, __wrapper=None, **kwargs): + return argmap._lazy_compile(__wrapper)(*args, **kwargs) + + # standard function-wrapping stuff + func.__name__ = f.__name__ + func.__doc__ = f.__doc__ + func.__defaults__ = f.__defaults__ + func.__kwdefaults__.update(f.__kwdefaults__ or {}) + func.__module__ = f.__module__ + func.__qualname__ = f.__qualname__ + func.__dict__.update(f.__dict__) + func.__wrapped__ = f + + # now that we've wrapped f, we may have picked up some __dict__ or + # __kwdefaults__ items that were set by a previous argmap. Thus, we set + # these values after those update() calls. + + # If we attempt to access func from within itself, that happens through + # a closure -- which trips an error when we replace func.__code__. The + # standard workaround for functions which can't see themselves is to use + # a Y-combinator, as we do here. + func.__kwdefaults__["_argmap__wrapper"] = func + + # this self-reference is here because functools.wraps preserves + # everything in __dict__, and we don't want to mistake a non-argmap + # wrapper for an argmap wrapper + func.__self__ = func + + # this is used to variously call self.assemble and self.compile + func.__argmap__ = self + + if hasattr(f, "__argmap__"): + func.__is_generator = f.__is_generator + else: + func.__is_generator = inspect.isgeneratorfunction(f) + + if self._finally and func.__is_generator: + raise nx.NetworkXError("argmap cannot decorate generators with try_finally") + + return func + + __count = 0 + + @classmethod + def _count(cls): + """Maintain a globally-unique identifier for function names and "file" names + + Note that this counter is a class method reporting a class variable + so the count is unique within a Python session. It could differ from + session to session for a specific decorator depending on the order + that the decorators are created. But that doesn't disrupt `argmap`. + + This is used in two places: to construct unique variable names + in the `_name` method and to construct unique fictitious filenames + in the `_compile` method. + + Returns + ------- + count : int + An integer unique to this Python session (simply counts from zero) + """ + cls.__count += 1 + return cls.__count + + _bad_chars = re.compile("[^a-zA-Z0-9_]") + + @classmethod + def _name(cls, f): + """Mangle the name of a function to be unique but somewhat human-readable + + The names are unique within a Python session and set using `_count`. + + Parameters + ---------- + f : str or object + + Returns + ------- + name : str + The mangled version of `f.__name__` (if `f.__name__` exists) or `f` + + """ + f = f.__name__ if hasattr(f, "__name__") else f + fname = re.sub(cls._bad_chars, "_", f) + return f"argmap_{fname}_{cls._count()}" + + def compile(self, f): + """Compile the decorated function. + + Called once for a given decorated function -- collects the code from all + argmap decorators in the stack, and compiles the decorated function. + + Much of the work done here uses the `assemble` method to allow recursive + treatment of multiple argmap decorators on a single decorated function. + That flattens the argmap decorators, collects the source code to construct + a single decorated function, then compiles/executes/returns that function. + + The source code for the decorated function is stored as an attribute + `_code` on the function object itself. + + Note that Python's `compile` function requires a filename, but this + code is constructed without a file, so a fictitious filename is used + to describe where the function comes from. The name is something like: + "argmap compilation 4". + + Parameters + ---------- + f : callable + The function to be decorated + + Returns + ------- + func : callable + The decorated file + + """ + sig, wrapped_name, functions, mapblock, finallys, mutable_args = self.assemble( + f + ) + + call = f"{sig.call_sig.format(wrapped_name)}#" + mut_args = f"{sig.args} = list({sig.args})" if mutable_args else "" + body = argmap._indent(sig.def_sig, mut_args, mapblock, call, finallys) + code = "\n".join(body) + + locl = {} + globl = dict(functions.values()) + filename = f"{self.__class__} compilation {self._count()}" + compiled = compile(code, filename, "exec") + exec(compiled, globl, locl) + func = locl[sig.name] + func._code = code + return func + + def assemble(self, f): + """Collects components of the source for the decorated function wrapping f. + + If `f` has multiple argmap decorators, we recursively assemble the stack of + decorators into a single flattened function. + + This method is part of the `compile` method's process yet separated + from that method to allow recursive processing. The outputs are + strings, dictionaries and lists that collect needed info to + flatten any nested argmap-decoration. + + Parameters + ---------- + f : callable + The function to be decorated. If f is argmapped, we assemble it. + + Returns + ------- + sig : argmap.Signature + The function signature as an `argmap.Signature` object. + wrapped_name : str + The mangled name used to represent the wrapped function in the code + being assembled. + functions : dict + A dictionary mapping id(g) -> (mangled_name(g), g) for functions g + referred to in the code being assembled. These need to be present + in the ``globals`` scope of ``exec`` when defining the decorated + function. + mapblock : list of lists and/or strings + Code that implements mapping of parameters including any try blocks + if needed. This code will precede the decorated function call. + finallys : list of lists and/or strings + Code that implements the finally blocks to post-process the + arguments (usually close any files if needed) after the + decorated function is called. + mutable_args : bool + True if the decorator needs to modify positional arguments + via their indices. The compile method then turns the argument + tuple into a list so that the arguments can be modified. + """ + + # first, we check if f is already argmapped -- if that's the case, + # build up the function recursively. + # > mapblock is generally a list of function calls of the sort + # arg = func(arg) + # in addition to some try-blocks if needed. + # > finallys is a recursive list of finally blocks of the sort + # finally: + # close_func_1() + # finally: + # close_func_2() + # > functions is a dict of functions used in the scope of our decorated + # function. It will be used to construct globals used in compilation. + # We make functions[id(f)] = name_of_f, f to ensure that a given + # function is stored and named exactly once even if called by + # nested decorators. + if hasattr(f, "__argmap__") and f.__self__ is f: + ( + sig, + wrapped_name, + functions, + mapblock, + finallys, + mutable_args, + ) = f.__argmap__.assemble(f.__wrapped__) + functions = dict(functions) # shallow-copy just in case + else: + sig = self.signature(f) + wrapped_name = self._name(f) + mapblock, finallys = [], [] + functions = {id(f): (wrapped_name, f)} + mutable_args = False + + if id(self._func) in functions: + fname, _ = functions[id(self._func)] + else: + fname, _ = functions[id(self._func)] = self._name(self._func), self._func + + # this is a bit complicated -- we can call functions with a variety of + # nested arguments, so long as their input and output are tuples with + # the same nested structure. e.g. ("a", "b") maps arguments a and b. + # A more complicated nesting like (0, (3, 4)) maps arguments 0, 3, 4 + # expecting the mapping to output new values in the same nested shape. + # The ability to argmap multiple arguments was necessary for + # the decorator `nx.algorithms.community.quality.require_partition`, and + # while we're not taking full advantage of the ability to handle + # multiply-nested tuples, it was convenient to implement this in + # generality because the recursive call to `get_name` is necessary in + # any case. + applied = set() + + def get_name(arg, first=True): + nonlocal mutable_args + if isinstance(arg, tuple): + name = ", ".join(get_name(x, False) for x in arg) + return name if first else f"({name})" + if arg in applied: + raise nx.NetworkXError(f"argument {arg} is specified multiple times") + applied.add(arg) + if arg in sig.names: + return sig.names[arg] + elif isinstance(arg, str): + if sig.kwargs is None: + raise nx.NetworkXError( + f"name {arg} is not a named parameter and this function doesn't have kwargs" + ) + return f"{sig.kwargs}[{arg!r}]" + else: + if sig.args is None: + raise nx.NetworkXError( + f"index {arg} not a parameter index and this function doesn't have args" + ) + mutable_args = True + return f"{sig.args}[{arg - sig.n_positional}]" + + if self._finally: + # here's where we handle try_finally decorators. Such a decorator + # returns a mapped argument and a function to be called in a + # finally block. This feature was required by the open_file + # decorator. The below generates the code + # + # name, final = func(name) #<--append to mapblock + # try: #<--append to mapblock + # ... more argmapping and try blocks + # return WRAPPED_FUNCTION(...) + # ... more finally blocks + # finally: #<--prepend to finallys + # final() #<--prepend to finallys + # + for a in self._args: + name = get_name(a) + final = self._name(name) + mapblock.append(f"{name}, {final} = {fname}({name})") + mapblock.append("try:") + finallys = ["finally:", f"{final}()#", "#", finallys] + else: + mapblock.extend( + f"{name} = {fname}({name})" for name in map(get_name, self._args) + ) + + return sig, wrapped_name, functions, mapblock, finallys, mutable_args + + @classmethod + def signature(cls, f): + r"""Construct a Signature object describing `f` + + Compute a Signature so that we can write a function wrapping f with + the same signature and call-type. + + Parameters + ---------- + f : callable + A function to be decorated + + Returns + ------- + sig : argmap.Signature + The Signature of f + + Notes + ----- + The Signature is a namedtuple with names: + + name : a unique version of the name of the decorated function + signature : the inspect.signature of the decorated function + def_sig : a string used as code to define the new function + call_sig : a string used as code to call the decorated function + names : a dict keyed by argument name and index to the argument's name + n_positional : the number of positional arguments in the signature + args : the name of the VAR_POSITIONAL argument if any, i.e. \*theseargs + kwargs : the name of the VAR_KEYWORDS argument if any, i.e. \*\*kwargs + + These named attributes of the signature are used in `assemble` and `compile` + to construct a string of source code for the decorated function. + + """ + sig = inspect.signature(f, follow_wrapped=False) + def_sig = [] + call_sig = [] + names = {} + + kind = None + args = None + kwargs = None + npos = 0 + for i, param in enumerate(sig.parameters.values()): + # parameters can be position-only, keyword-or-position, keyword-only + # in any combination, but only in the order as above. we do edge + # detection to add the appropriate punctuation + prev = kind + kind = param.kind + if prev == param.POSITIONAL_ONLY != kind: + # the last token was position-only, but this one isn't + def_sig.append("/") + if ( + param.VAR_POSITIONAL + != prev + != param.KEYWORD_ONLY + == kind + != param.VAR_POSITIONAL + ): + # param is the first keyword-only arg and isn't starred + def_sig.append("*") + + # star arguments as appropriate + if kind == param.VAR_POSITIONAL: + name = "*" + param.name + args = param.name + count = 0 + elif kind == param.VAR_KEYWORD: + name = "**" + param.name + kwargs = param.name + count = 0 + else: + names[i] = names[param.name] = param.name + name = param.name + count = 1 + + # assign to keyword-only args in the function call + if kind == param.KEYWORD_ONLY: + call_sig.append(f"{name} = {name}") + else: + npos += count + call_sig.append(name) + + def_sig.append(name) + + fname = cls._name(f) + def_sig = f"def {fname}({', '.join(def_sig)}):" + + call_sig = f"return {{}}({', '.join(call_sig)})" + + return cls.Signature(fname, sig, def_sig, call_sig, names, npos, args, kwargs) + + Signature = collections.namedtuple( + "Signature", + [ + "name", + "signature", + "def_sig", + "call_sig", + "names", + "n_positional", + "args", + "kwargs", + ], + ) + + @staticmethod + def _flatten(nestlist, visited): + """flattens a recursive list of lists that doesn't have cyclic references + + Parameters + ---------- + nestlist : iterable + A recursive list of objects to be flattened into a single iterable + + visited : set + A set of object ids which have been walked -- initialize with an + empty set + + Yields + ------ + Non-list objects contained in nestlist + + """ + for thing in nestlist: + if isinstance(thing, list): + if id(thing) in visited: + raise ValueError("A cycle was found in nestlist. Be a tree.") + else: + visited.add(id(thing)) + yield from argmap._flatten(thing, visited) + else: + yield thing + + _tabs = " " * 64 + + @staticmethod + def _indent(*lines): + """Indent list of code lines to make executable Python code + + Indents a tree-recursive list of strings, following the rule that one + space is added to the tab after a line that ends in a colon, and one is + removed after a line that ends in an hashmark. + + Parameters + ---------- + *lines : lists and/or strings + A recursive list of strings to be assembled into properly indented + code. + + Returns + ------- + code : str + + Examples + -------- + + argmap._indent(*["try:", "try:", "pass#", "finally:", "pass#", "#", + "finally:", "pass#"]) + + renders to + + '''try: + try: + pass# + finally: + pass# + # + finally: + pass#''' + """ + depth = 0 + for line in argmap._flatten(lines, set()): + yield f"{argmap._tabs[:depth]}{line}" + depth += (line[-1:] == ":") - (line[-1:] == "#") diff --git a/.venv/lib/python3.12/site-packages/networkx/utils/heaps.py b/.venv/lib/python3.12/site-packages/networkx/utils/heaps.py new file mode 100644 index 0000000..2d67dfd --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/utils/heaps.py @@ -0,0 +1,338 @@ +""" +Min-heaps. +""" + +from heapq import heappop, heappush +from itertools import count + +import networkx as nx + +__all__ = ["MinHeap", "PairingHeap", "BinaryHeap"] + + +class MinHeap: + """Base class for min-heaps. + + A MinHeap stores a collection of key-value pairs ordered by their values. + It supports querying the minimum pair, inserting a new pair, decreasing the + value in an existing pair and deleting the minimum pair. + """ + + class _Item: + """Used by subclassess to represent a key-value pair.""" + + __slots__ = ("key", "value") + + def __init__(self, key, value): + self.key = key + self.value = value + + def __repr__(self): + return repr((self.key, self.value)) + + def __init__(self): + """Initialize a new min-heap.""" + self._dict = {} + + def min(self): + """Query the minimum key-value pair. + + Returns + ------- + key, value : tuple + The key-value pair with the minimum value in the heap. + + Raises + ------ + NetworkXError + If the heap is empty. + """ + raise NotImplementedError + + def pop(self): + """Delete the minimum pair in the heap. + + Returns + ------- + key, value : tuple + The key-value pair with the minimum value in the heap. + + Raises + ------ + NetworkXError + If the heap is empty. + """ + raise NotImplementedError + + def get(self, key, default=None): + """Returns the value associated with a key. + + Parameters + ---------- + key : hashable object + The key to be looked up. + + default : object + Default value to return if the key is not present in the heap. + Default value: None. + + Returns + ------- + value : object. + The value associated with the key. + """ + raise NotImplementedError + + def insert(self, key, value, allow_increase=False): + """Insert a new key-value pair or modify the value in an existing + pair. + + Parameters + ---------- + key : hashable object + The key. + + value : object comparable with existing values. + The value. + + allow_increase : bool + Whether the value is allowed to increase. If False, attempts to + increase an existing value have no effect. Default value: False. + + Returns + ------- + decreased : bool + True if a pair is inserted or the existing value is decreased. + """ + raise NotImplementedError + + def __nonzero__(self): + """Returns whether the heap if empty.""" + return bool(self._dict) + + def __bool__(self): + """Returns whether the heap if empty.""" + return bool(self._dict) + + def __len__(self): + """Returns the number of key-value pairs in the heap.""" + return len(self._dict) + + def __contains__(self, key): + """Returns whether a key exists in the heap. + + Parameters + ---------- + key : any hashable object. + The key to be looked up. + """ + return key in self._dict + + +class PairingHeap(MinHeap): + """A pairing heap.""" + + class _Node(MinHeap._Item): + """A node in a pairing heap. + + A tree in a pairing heap is stored using the left-child, right-sibling + representation. + """ + + __slots__ = ("left", "next", "prev", "parent") + + def __init__(self, key, value): + super().__init__(key, value) + # The leftmost child. + self.left = None + # The next sibling. + self.next = None + # The previous sibling. + self.prev = None + # The parent. + self.parent = None + + def __init__(self): + """Initialize a pairing heap.""" + super().__init__() + self._root = None + + def min(self): + if self._root is None: + raise nx.NetworkXError("heap is empty.") + return (self._root.key, self._root.value) + + def pop(self): + if self._root is None: + raise nx.NetworkXError("heap is empty.") + min_node = self._root + self._root = self._merge_children(self._root) + del self._dict[min_node.key] + return (min_node.key, min_node.value) + + def get(self, key, default=None): + node = self._dict.get(key) + return node.value if node is not None else default + + def insert(self, key, value, allow_increase=False): + node = self._dict.get(key) + root = self._root + if node is not None: + if value < node.value: + node.value = value + if node is not root and value < node.parent.value: + self._cut(node) + self._root = self._link(root, node) + return True + elif allow_increase and value > node.value: + node.value = value + child = self._merge_children(node) + # Nonstandard step: Link the merged subtree with the root. See + # below for the standard step. + if child is not None: + self._root = self._link(self._root, child) + # Standard step: Perform a decrease followed by a pop as if the + # value were the smallest in the heap. Then insert the new + # value into the heap. + # if node is not root: + # self._cut(node) + # if child is not None: + # root = self._link(root, child) + # self._root = self._link(root, node) + # else: + # self._root = (self._link(node, child) + # if child is not None else node) + return False + else: + # Insert a new key. + node = self._Node(key, value) + self._dict[key] = node + self._root = self._link(root, node) if root is not None else node + return True + + def _link(self, root, other): + """Link two nodes, making the one with the smaller value the parent of + the other. + """ + if other.value < root.value: + root, other = other, root + next = root.left + other.next = next + if next is not None: + next.prev = other + other.prev = None + root.left = other + other.parent = root + return root + + def _merge_children(self, root): + """Merge the subtrees of the root using the standard two-pass method. + The resulting subtree is detached from the root. + """ + node = root.left + root.left = None + if node is not None: + link = self._link + # Pass 1: Merge pairs of consecutive subtrees from left to right. + # At the end of the pass, only the prev pointers of the resulting + # subtrees have meaningful values. The other pointers will be fixed + # in pass 2. + prev = None + while True: + next = node.next + if next is None: + node.prev = prev + break + next_next = next.next + node = link(node, next) + node.prev = prev + prev = node + if next_next is None: + break + node = next_next + # Pass 2: Successively merge the subtrees produced by pass 1 from + # right to left with the rightmost one. + prev = node.prev + while prev is not None: + prev_prev = prev.prev + node = link(prev, node) + prev = prev_prev + # Now node can become the new root. Its has no parent nor siblings. + node.prev = None + node.next = None + node.parent = None + return node + + def _cut(self, node): + """Cut a node from its parent.""" + prev = node.prev + next = node.next + if prev is not None: + prev.next = next + else: + node.parent.left = next + node.prev = None + if next is not None: + next.prev = prev + node.next = None + node.parent = None + + +class BinaryHeap(MinHeap): + """A binary heap.""" + + def __init__(self): + """Initialize a binary heap.""" + super().__init__() + self._heap = [] + self._count = count() + + def min(self): + dict = self._dict + if not dict: + raise nx.NetworkXError("heap is empty") + heap = self._heap + # Repeatedly remove stale key-value pairs until a up-to-date one is + # met. + while True: + value, _, key = heap[0] + if key in dict and value == dict[key]: + break + heappop(heap) + return (key, value) + + def pop(self): + dict = self._dict + if not dict: + raise nx.NetworkXError("heap is empty") + heap = self._heap + # Repeatedly remove stale key-value pairs until a up-to-date one is + # met. + while True: + value, _, key = heap[0] + heappop(heap) + if key in dict and value == dict[key]: + break + del dict[key] + return (key, value) + + def get(self, key, default=None): + return self._dict.get(key, default) + + def insert(self, key, value, allow_increase=False): + dict = self._dict + if key in dict: + old_value = dict[key] + if value < old_value or (allow_increase and value > old_value): + # Since there is no way to efficiently obtain the location of a + # key-value pair in the heap, insert a new pair even if ones + # with the same key may already be present. Deem the old ones + # as stale and skip them when the minimum pair is queried. + dict[key] = value + heappush(self._heap, (value, next(self._count), key)) + return value < old_value + return False + else: + dict[key] = value + heappush(self._heap, (value, next(self._count), key)) + return True diff --git a/.venv/lib/python3.12/site-packages/networkx/utils/mapped_queue.py b/.venv/lib/python3.12/site-packages/networkx/utils/mapped_queue.py new file mode 100644 index 0000000..0dcea36 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/utils/mapped_queue.py @@ -0,0 +1,297 @@ +"""Priority queue class with updatable priorities.""" + +import heapq + +__all__ = ["MappedQueue"] + + +class _HeapElement: + """This proxy class separates the heap element from its priority. + + The idea is that using a 2-tuple (priority, element) works + for sorting, but not for dict lookup because priorities are + often floating point values so round-off can mess up equality. + + So, we need inequalities to look at the priority (for sorting) + and equality (and hash) to look at the element to enable + updates to the priority. + + Unfortunately, this class can be tricky to work with if you forget that + `__lt__` compares the priority while `__eq__` compares the element. + In `greedy_modularity_communities()` the following code is + used to check that two _HeapElements differ in either element or priority: + + if d_oldmax != row_max or d_oldmax.priority != row_max.priority: + + If the priorities are the same, this implementation uses the element + as a tiebreaker. This provides compatibility with older systems that + use tuples to combine priority and elements. + """ + + __slots__ = ["priority", "element", "_hash"] + + def __init__(self, priority, element): + self.priority = priority + self.element = element + self._hash = hash(element) + + def __lt__(self, other): + try: + other_priority = other.priority + except AttributeError: + return self.priority < other + # assume comparing to another _HeapElement + if self.priority == other_priority: + try: + return self.element < other.element + except TypeError as err: + raise TypeError( + "Consider using a tuple, with a priority value that can be compared." + ) + return self.priority < other_priority + + def __gt__(self, other): + try: + other_priority = other.priority + except AttributeError: + return self.priority > other + # assume comparing to another _HeapElement + if self.priority == other_priority: + try: + return self.element > other.element + except TypeError as err: + raise TypeError( + "Consider using a tuple, with a priority value that can be compared." + ) + return self.priority > other_priority + + def __eq__(self, other): + try: + return self.element == other.element + except AttributeError: + return self.element == other + + def __hash__(self): + return self._hash + + def __getitem__(self, indx): + return self.priority if indx == 0 else self.element[indx - 1] + + def __iter__(self): + yield self.priority + try: + yield from self.element + except TypeError: + yield self.element + + def __repr__(self): + return f"_HeapElement({self.priority}, {self.element})" + + +class MappedQueue: + """The MappedQueue class implements a min-heap with removal and update-priority. + + The min heap uses heapq as well as custom written _siftup and _siftdown + methods to allow the heap positions to be tracked by an additional dict + keyed by element to position. The smallest element can be popped in O(1) time, + new elements can be pushed in O(log n) time, and any element can be removed + or updated in O(log n) time. The queue cannot contain duplicate elements + and an attempt to push an element already in the queue will have no effect. + + MappedQueue complements the heapq package from the python standard + library. While MappedQueue is designed for maximum compatibility with + heapq, it adds element removal, lookup, and priority update. + + Parameters + ---------- + data : dict or iterable + + Examples + -------- + + A `MappedQueue` can be created empty, or optionally, given a dictionary + of initial elements and priorities. The methods `push`, `pop`, + `remove`, and `update` operate on the queue. + + >>> colors_nm = {"red": 665, "blue": 470, "green": 550} + >>> q = MappedQueue(colors_nm) + >>> q.remove("red") + >>> q.update("green", "violet", 400) + >>> q.push("indigo", 425) + True + >>> [q.pop().element for i in range(len(q.heap))] + ['violet', 'indigo', 'blue'] + + A `MappedQueue` can also be initialized with a list or other iterable. The priority is assumed + to be the sort order of the items in the list. + + >>> q = MappedQueue([916, 50, 4609, 493, 237]) + >>> q.remove(493) + >>> q.update(237, 1117) + >>> [q.pop() for i in range(len(q.heap))] + [50, 916, 1117, 4609] + + An exception is raised if the elements are not comparable. + + >>> q = MappedQueue([100, "a"]) + Traceback (most recent call last): + ... + TypeError: '<' not supported between instances of 'int' and 'str' + + To avoid the exception, use a dictionary to assign priorities to the elements. + + >>> q = MappedQueue({100: 0, "a": 1}) + + References + ---------- + .. [1] Cormen, T. H., Leiserson, C. E., Rivest, R. L., & Stein, C. (2001). + Introduction to algorithms second edition. + .. [2] Knuth, D. E. (1997). The art of computer programming (Vol. 3). + Pearson Education. + """ + + def __init__(self, data=None): + """Priority queue class with updatable priorities.""" + if data is None: + self.heap = [] + elif isinstance(data, dict): + self.heap = [_HeapElement(v, k) for k, v in data.items()] + else: + self.heap = list(data) + self.position = {} + self._heapify() + + def _heapify(self): + """Restore heap invariant and recalculate map.""" + heapq.heapify(self.heap) + self.position = {elt: pos for pos, elt in enumerate(self.heap)} + if len(self.heap) != len(self.position): + raise AssertionError("Heap contains duplicate elements") + + def __len__(self): + return len(self.heap) + + def push(self, elt, priority=None): + """Add an element to the queue.""" + if priority is not None: + elt = _HeapElement(priority, elt) + # If element is already in queue, do nothing + if elt in self.position: + return False + # Add element to heap and dict + pos = len(self.heap) + self.heap.append(elt) + self.position[elt] = pos + # Restore invariant by sifting down + self._siftdown(0, pos) + return True + + def pop(self): + """Remove and return the smallest element in the queue.""" + # Remove smallest element + elt = self.heap[0] + del self.position[elt] + # If elt is last item, remove and return + if len(self.heap) == 1: + self.heap.pop() + return elt + # Replace root with last element + last = self.heap.pop() + self.heap[0] = last + self.position[last] = 0 + # Restore invariant by sifting up + self._siftup(0) + # Return smallest element + return elt + + def update(self, elt, new, priority=None): + """Replace an element in the queue with a new one.""" + if priority is not None: + new = _HeapElement(priority, new) + # Replace + pos = self.position[elt] + self.heap[pos] = new + del self.position[elt] + self.position[new] = pos + # Restore invariant by sifting up + self._siftup(pos) + + def remove(self, elt): + """Remove an element from the queue.""" + # Find and remove element + try: + pos = self.position[elt] + del self.position[elt] + except KeyError: + # Not in queue + raise + # If elt is last item, remove and return + if pos == len(self.heap) - 1: + self.heap.pop() + return + # Replace elt with last element + last = self.heap.pop() + self.heap[pos] = last + self.position[last] = pos + # Restore invariant by sifting up + self._siftup(pos) + + def _siftup(self, pos): + """Move smaller child up until hitting a leaf. + + Built to mimic code for heapq._siftup + only updating position dict too. + """ + heap, position = self.heap, self.position + end_pos = len(heap) + startpos = pos + newitem = heap[pos] + # Shift up the smaller child until hitting a leaf + child_pos = (pos << 1) + 1 # start with leftmost child position + while child_pos < end_pos: + # Set child_pos to index of smaller child. + child = heap[child_pos] + right_pos = child_pos + 1 + if right_pos < end_pos: + right = heap[right_pos] + if not child < right: + child = right + child_pos = right_pos + # Move the smaller child up. + heap[pos] = child + position[child] = pos + pos = child_pos + child_pos = (pos << 1) + 1 + # pos is a leaf position. Put newitem there, and bubble it up + # to its final resting place (by sifting its parents down). + while pos > 0: + parent_pos = (pos - 1) >> 1 + parent = heap[parent_pos] + if not newitem < parent: + break + heap[pos] = parent + position[parent] = pos + pos = parent_pos + heap[pos] = newitem + position[newitem] = pos + + def _siftdown(self, start_pos, pos): + """Restore invariant. keep swapping with parent until smaller. + + Built to mimic code for heapq._siftdown + only updating position dict too. + """ + heap, position = self.heap, self.position + newitem = heap[pos] + # Follow the path to the root, moving parents down until finding a place + # newitem fits. + while pos > start_pos: + parent_pos = (pos - 1) >> 1 + parent = heap[parent_pos] + if not newitem < parent: + break + heap[pos] = parent + position[parent] = pos + pos = parent_pos + heap[pos] = newitem + position[newitem] = pos diff --git a/.venv/lib/python3.12/site-packages/networkx/utils/misc.py b/.venv/lib/python3.12/site-packages/networkx/utils/misc.py new file mode 100644 index 0000000..73b5065 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/utils/misc.py @@ -0,0 +1,651 @@ +""" +Miscellaneous Helpers for NetworkX. + +These are not imported into the base networkx namespace but +can be accessed, for example, as + +>>> import networkx as nx +>>> nx.utils.make_list_of_ints({1, 2, 3}) +[1, 2, 3] +>>> nx.utils.arbitrary_element({5, 1, 7}) # doctest: +SKIP +1 +""" + +import random +import warnings +from collections import defaultdict +from collections.abc import Iterable, Iterator, Sized +from itertools import chain, tee + +import networkx as nx + +__all__ = [ + "flatten", + "make_list_of_ints", + "dict_to_numpy_array", + "arbitrary_element", + "pairwise", + "groups", + "create_random_state", + "create_py_random_state", + "PythonRandomInterface", + "PythonRandomViaNumpyBits", + "nodes_equal", + "edges_equal", + "graphs_equal", + "_clear_cache", +] + + +# some cookbook stuff +# used in deciding whether something is a bunch of nodes, edges, etc. +# see G.add_nodes and others in Graph Class in networkx/base.py + + +def flatten(obj, result=None): + """Return flattened version of (possibly nested) iterable object.""" + if not isinstance(obj, Iterable | Sized) or isinstance(obj, str): + return obj + if result is None: + result = [] + for item in obj: + if not isinstance(item, Iterable | Sized) or isinstance(item, str): + result.append(item) + else: + flatten(item, result) + return tuple(result) + + +def make_list_of_ints(sequence): + """Return list of ints from sequence of integral numbers. + + All elements of the sequence must satisfy int(element) == element + or a ValueError is raised. Sequence is iterated through once. + + If sequence is a list, the non-int values are replaced with ints. + So, no new list is created + """ + if not isinstance(sequence, list): + result = [] + for i in sequence: + errmsg = f"sequence is not all integers: {i}" + try: + ii = int(i) + except ValueError: + raise nx.NetworkXError(errmsg) from None + if ii != i: + raise nx.NetworkXError(errmsg) + result.append(ii) + return result + # original sequence is a list... in-place conversion to ints + for indx, i in enumerate(sequence): + errmsg = f"sequence is not all integers: {i}" + if isinstance(i, int): + continue + try: + ii = int(i) + except ValueError: + raise nx.NetworkXError(errmsg) from None + if ii != i: + raise nx.NetworkXError(errmsg) + sequence[indx] = ii + return sequence + + +def dict_to_numpy_array(d, mapping=None): + """Convert a dictionary of dictionaries to a numpy array + with optional mapping.""" + try: + return _dict_to_numpy_array2(d, mapping) + except (AttributeError, TypeError): + # AttributeError is when no mapping was provided and v.keys() fails. + # TypeError is when a mapping was provided and d[k1][k2] fails. + return _dict_to_numpy_array1(d, mapping) + + +def _dict_to_numpy_array2(d, mapping=None): + """Convert a dictionary of dictionaries to a 2d numpy array + with optional mapping. + + """ + import numpy as np + + if mapping is None: + s = set(d.keys()) + for k, v in d.items(): + s.update(v.keys()) + mapping = dict(zip(s, range(len(s)))) + n = len(mapping) + a = np.zeros((n, n)) + for k1, i in mapping.items(): + for k2, j in mapping.items(): + try: + a[i, j] = d[k1][k2] + except KeyError: + pass + return a + + +def _dict_to_numpy_array1(d, mapping=None): + """Convert a dictionary of numbers to a 1d numpy array with optional mapping.""" + import numpy as np + + if mapping is None: + s = set(d.keys()) + mapping = dict(zip(s, range(len(s)))) + n = len(mapping) + a = np.zeros(n) + for k1, i in mapping.items(): + i = mapping[k1] + a[i] = d[k1] + return a + + +def arbitrary_element(iterable): + """Returns an arbitrary element of `iterable` without removing it. + + This is most useful for "peeking" at an arbitrary element of a set, + but can be used for any list, dictionary, etc., as well. + + Parameters + ---------- + iterable : `abc.collections.Iterable` instance + Any object that implements ``__iter__``, e.g. set, dict, list, tuple, + etc. + + Returns + ------- + The object that results from ``next(iter(iterable))`` + + Raises + ------ + ValueError + If `iterable` is an iterator (because the current implementation of + this function would consume an element from the iterator). + + Examples + -------- + Arbitrary elements from common Iterable objects: + + >>> nx.utils.arbitrary_element([1, 2, 3]) # list + 1 + >>> nx.utils.arbitrary_element((1, 2, 3)) # tuple + 1 + >>> nx.utils.arbitrary_element({1, 2, 3}) # set + 1 + >>> d = {k: v for k, v in zip([1, 2, 3], [3, 2, 1])} + >>> nx.utils.arbitrary_element(d) # dict_keys + 1 + >>> nx.utils.arbitrary_element(d.values()) # dict values + 3 + + `str` is also an Iterable: + + >>> nx.utils.arbitrary_element("hello") + 'h' + + :exc:`ValueError` is raised if `iterable` is an iterator: + + >>> iterator = iter([1, 2, 3]) # Iterator, *not* Iterable + >>> nx.utils.arbitrary_element(iterator) + Traceback (most recent call last): + ... + ValueError: cannot return an arbitrary item from an iterator + + Notes + ----- + This function does not return a *random* element. If `iterable` is + ordered, sequential calls will return the same value:: + + >>> l = [1, 2, 3] + >>> nx.utils.arbitrary_element(l) + 1 + >>> nx.utils.arbitrary_element(l) + 1 + + """ + if isinstance(iterable, Iterator): + raise ValueError("cannot return an arbitrary item from an iterator") + # Another possible implementation is ``for x in iterable: return x``. + return next(iter(iterable)) + + +# Recipe from the itertools documentation. +def pairwise(iterable, cyclic=False): + "s -> (s0, s1), (s1, s2), (s2, s3), ..." + a, b = tee(iterable) + first = next(b, None) + if cyclic is True: + return zip(a, chain(b, (first,))) + return zip(a, b) + + +def groups(many_to_one): + """Converts a many-to-one mapping into a one-to-many mapping. + + `many_to_one` must be a dictionary whose keys and values are all + :term:`hashable`. + + The return value is a dictionary mapping values from `many_to_one` + to sets of keys from `many_to_one` that have that value. + + Examples + -------- + >>> from networkx.utils import groups + >>> many_to_one = {"a": 1, "b": 1, "c": 2, "d": 3, "e": 3} + >>> groups(many_to_one) # doctest: +SKIP + {1: {'a', 'b'}, 2: {'c'}, 3: {'e', 'd'}} + """ + one_to_many = defaultdict(set) + for v, k in many_to_one.items(): + one_to_many[k].add(v) + return dict(one_to_many) + + +def create_random_state(random_state=None): + """Returns a numpy.random.RandomState or numpy.random.Generator instance + depending on input. + + Parameters + ---------- + random_state : int or NumPy RandomState or Generator instance, optional (default=None) + If int, return a numpy.random.RandomState instance set with seed=int. + if `numpy.random.RandomState` instance, return it. + if `numpy.random.Generator` instance, return it. + if None or numpy.random, return the global random number generator used + by numpy.random. + """ + import numpy as np + + if random_state is None or random_state is np.random: + return np.random.mtrand._rand + if isinstance(random_state, np.random.RandomState): + return random_state + if isinstance(random_state, int): + return np.random.RandomState(random_state) + if isinstance(random_state, np.random.Generator): + return random_state + msg = ( + f"{random_state} cannot be used to create a numpy.random.RandomState or\n" + "numpy.random.Generator instance" + ) + raise ValueError(msg) + + +class PythonRandomViaNumpyBits(random.Random): + """Provide the random.random algorithms using a numpy.random bit generator + + The intent is to allow people to contribute code that uses Python's random + library, but still allow users to provide a single easily controlled random + bit-stream for all work with NetworkX. This implementation is based on helpful + comments and code from Robert Kern on NumPy's GitHub Issue #24458. + + This implementation supersedes that of `PythonRandomInterface` which rewrote + methods to account for subtle differences in API between `random` and + `numpy.random`. Instead this subclasses `random.Random` and overwrites + the methods `random`, `getrandbits`, `getstate`, `setstate` and `seed`. + It makes them use the rng values from an input numpy `RandomState` or `Generator`. + Those few methods allow the rest of the `random.Random` methods to provide + the API interface of `random.random` while using randomness generated by + a numpy generator. + """ + + def __init__(self, rng=None): + try: + import numpy as np + except ImportError: + msg = "numpy not found, only random.random available." + warnings.warn(msg, ImportWarning) + + if rng is None: + self._rng = np.random.mtrand._rand + else: + self._rng = rng + + # Not necessary, given our overriding of gauss() below, but it's + # in the superclass and nominally public, so initialize it here. + self.gauss_next = None + + def random(self): + """Get the next random number in the range 0.0 <= X < 1.0.""" + return self._rng.random() + + def getrandbits(self, k): + """getrandbits(k) -> x. Generates an int with k random bits.""" + if k < 0: + raise ValueError("number of bits must be non-negative") + numbytes = (k + 7) // 8 # bits / 8 and rounded up + x = int.from_bytes(self._rng.bytes(numbytes), "big") + return x >> (numbytes * 8 - k) # trim excess bits + + def getstate(self): + return self._rng.__getstate__() + + def setstate(self, state): + self._rng.__setstate__(state) + + def seed(self, *args, **kwds): + "Do nothing override method." + raise NotImplementedError("seed() not implemented in PythonRandomViaNumpyBits") + + +################################################################## +class PythonRandomInterface: + """PythonRandomInterface is included for backward compatibility + New code should use PythonRandomViaNumpyBits instead. + """ + + def __init__(self, rng=None): + try: + import numpy as np + except ImportError: + msg = "numpy not found, only random.random available." + warnings.warn(msg, ImportWarning) + + if rng is None: + self._rng = np.random.mtrand._rand + else: + self._rng = rng + + def random(self): + return self._rng.random() + + def uniform(self, a, b): + return a + (b - a) * self._rng.random() + + def randrange(self, a, b=None): + import numpy as np + + if b is None: + a, b = 0, a + if b > 9223372036854775807: # from np.iinfo(np.int64).max + tmp_rng = PythonRandomViaNumpyBits(self._rng) + return tmp_rng.randrange(a, b) + + if isinstance(self._rng, np.random.Generator): + return self._rng.integers(a, b) + return self._rng.randint(a, b) + + # NOTE: the numpy implementations of `choice` don't support strings, so + # this cannot be replaced with self._rng.choice + def choice(self, seq): + import numpy as np + + if isinstance(self._rng, np.random.Generator): + idx = self._rng.integers(0, len(seq)) + else: + idx = self._rng.randint(0, len(seq)) + return seq[idx] + + def gauss(self, mu, sigma): + return self._rng.normal(mu, sigma) + + def shuffle(self, seq): + return self._rng.shuffle(seq) + + # Some methods don't match API for numpy RandomState. + # Commented out versions are not used by NetworkX + + def sample(self, seq, k): + return self._rng.choice(list(seq), size=(k,), replace=False) + + def randint(self, a, b): + import numpy as np + + if b > 9223372036854775807: # from np.iinfo(np.int64).max + tmp_rng = PythonRandomViaNumpyBits(self._rng) + return tmp_rng.randint(a, b) + + if isinstance(self._rng, np.random.Generator): + return self._rng.integers(a, b + 1) + return self._rng.randint(a, b + 1) + + # exponential as expovariate with 1/argument, + def expovariate(self, scale): + return self._rng.exponential(1 / scale) + + # pareto as paretovariate with 1/argument, + def paretovariate(self, shape): + return self._rng.pareto(shape) + + +# weibull as weibullvariate multiplied by beta, +# def weibullvariate(self, alpha, beta): +# return self._rng.weibull(alpha) * beta +# +# def triangular(self, low, high, mode): +# return self._rng.triangular(low, mode, high) +# +# def choices(self, seq, weights=None, cum_weights=None, k=1): +# return self._rng.choice(seq + + +def create_py_random_state(random_state=None): + """Returns a random.Random instance depending on input. + + Parameters + ---------- + random_state : int or random number generator or None (default=None) + - If int, return a `random.Random` instance set with seed=int. + - If `random.Random` instance, return it. + - If None or the `np.random` package, return the global random number + generator used by `np.random`. + - If an `np.random.Generator` instance, or the `np.random` package, or + the global numpy random number generator, then return it. + wrapped in a `PythonRandomViaNumpyBits` class. + - If a `PythonRandomViaNumpyBits` instance, return it. + - If a `PythonRandomInterface` instance, return it. + - If a `np.random.RandomState` instance and not the global numpy default, + return it wrapped in `PythonRandomInterface` for backward bit-stream + matching with legacy code. + + Notes + ----- + - A diagram intending to illustrate the relationships behind our support + for numpy random numbers is called + `NetworkX Numpy Random Numbers `_. + - More discussion about this support also appears in + `gh-6869#comment `_. + - Wrappers of numpy.random number generators allow them to mimic the Python random + number generation algorithms. For example, Python can create arbitrarily large + random ints, and the wrappers use Numpy bit-streams with CPython's random module + to choose arbitrarily large random integers too. + - We provide two wrapper classes: + `PythonRandomViaNumpyBits` is usually what you want and is always used for + `np.Generator` instances. But for users who need to recreate random numbers + produced in NetworkX 3.2 or earlier, we maintain the `PythonRandomInterface` + wrapper as well. We use it only used if passed a (non-default) `np.RandomState` + instance pre-initialized from a seed. Otherwise the newer wrapper is used. + """ + if random_state is None or random_state is random: + return random._inst + if isinstance(random_state, random.Random): + return random_state + if isinstance(random_state, int): + return random.Random(random_state) + + try: + import numpy as np + except ImportError: + pass + else: + if isinstance(random_state, PythonRandomInterface | PythonRandomViaNumpyBits): + return random_state + if isinstance(random_state, np.random.Generator): + return PythonRandomViaNumpyBits(random_state) + if random_state is np.random: + return PythonRandomViaNumpyBits(np.random.mtrand._rand) + + if isinstance(random_state, np.random.RandomState): + if random_state is np.random.mtrand._rand: + return PythonRandomViaNumpyBits(random_state) + # Only need older interface if specially constructed RandomState used + return PythonRandomInterface(random_state) + + msg = f"{random_state} cannot be used to generate a random.Random instance" + raise ValueError(msg) + + +def nodes_equal(nodes1, nodes2): + """Check if nodes are equal. + + Equality here means equal as Python objects. + Node data must match if included. + The order of nodes is not relevant. + + Parameters + ---------- + nodes1, nodes2 : iterables of nodes, or (node, datadict) tuples + + Returns + ------- + bool + True if nodes are equal, False otherwise. + """ + nlist1 = list(nodes1) + nlist2 = list(nodes2) + try: + d1 = dict(nlist1) + d2 = dict(nlist2) + except (ValueError, TypeError): + d1 = dict.fromkeys(nlist1) + d2 = dict.fromkeys(nlist2) + return d1 == d2 + + +def edges_equal(edges1, edges2): + """Check if edges are equal. + + Equality here means equal as Python objects. + Edge data must match if included. + The order of the edges is not relevant. + + Parameters + ---------- + edges1, edges2 : iterables of with u, v nodes as + edge tuples (u, v), or + edge tuples with data dicts (u, v, d), or + edge tuples with keys and data dicts (u, v, k, d) + + Returns + ------- + bool + True if edges are equal, False otherwise. + """ + from collections import defaultdict + + d1 = defaultdict(dict) + d2 = defaultdict(dict) + c1 = 0 + for c1, e in enumerate(edges1): + u, v = e[0], e[1] + data = [e[2:]] + if v in d1[u]: + data = d1[u][v] + data + d1[u][v] = data + d1[v][u] = data + c2 = 0 + for c2, e in enumerate(edges2): + u, v = e[0], e[1] + data = [e[2:]] + if v in d2[u]: + data = d2[u][v] + data + d2[u][v] = data + d2[v][u] = data + if c1 != c2: + return False + # can check one direction because lengths are the same. + for n, nbrdict in d1.items(): + for nbr, datalist in nbrdict.items(): + if n not in d2: + return False + if nbr not in d2[n]: + return False + d2datalist = d2[n][nbr] + for data in datalist: + if datalist.count(data) != d2datalist.count(data): + return False + return True + + +def graphs_equal(graph1, graph2): + """Check if graphs are equal. + + Equality here means equal as Python objects (not isomorphism). + Node, edge and graph data must match. + + Parameters + ---------- + graph1, graph2 : graph + + Returns + ------- + bool + True if graphs are equal, False otherwise. + """ + return ( + graph1.adj == graph2.adj + and graph1.nodes == graph2.nodes + and graph1.graph == graph2.graph + ) + + +def _clear_cache(G): + """Clear the cache of a graph (currently stores converted graphs). + + Caching is controlled via ``nx.config.cache_converted_graphs`` configuration. + """ + if cache := getattr(G, "__networkx_cache__", None): + cache.clear() + + +def check_create_using(create_using, *, directed=None, multigraph=None, default=None): + """Assert that create_using has good properties + + This checks for desired directedness and multi-edge properties. + It returns `create_using` unless that is `None` when it returns + the optionally specified default value. + + Parameters + ---------- + create_using : None, graph class or instance + The input value of create_using for a function. + directed : None or bool + Whether to check `create_using.is_directed() == directed`. + If None, do not assert directedness. + multigraph : None or bool + Whether to check `create_using.is_multigraph() == multigraph`. + If None, do not assert multi-edge property. + default : None or graph class + The graph class to return if create_using is None. + + Returns + ------- + create_using : graph class or instance + The provided graph class or instance, or if None, the `default` value. + + Raises + ------ + NetworkXError + When `create_using` doesn't match the properties specified by `directed` + or `multigraph` parameters. + """ + if default is None: + default = nx.Graph + G = create_using if create_using is not None else default + + G_directed = G.is_directed(None) if isinstance(G, type) else G.is_directed() + G_multigraph = G.is_multigraph(None) if isinstance(G, type) else G.is_multigraph() + + if directed is not None: + if directed and not G_directed: + raise nx.NetworkXError("create_using must be directed") + if not directed and G_directed: + raise nx.NetworkXError("create_using must not be directed") + + if multigraph is not None: + if multigraph and not G_multigraph: + raise nx.NetworkXError("create_using must be a multi-graph") + if not multigraph and G_multigraph: + raise nx.NetworkXError("create_using must not be a multi-graph") + return G diff --git a/.venv/lib/python3.12/site-packages/networkx/utils/random_sequence.py b/.venv/lib/python3.12/site-packages/networkx/utils/random_sequence.py new file mode 100644 index 0000000..20a7b5e --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/utils/random_sequence.py @@ -0,0 +1,164 @@ +""" +Utilities for generating random numbers, random sequences, and +random selections. +""" + +import networkx as nx +from networkx.utils import py_random_state + +__all__ = [ + "powerlaw_sequence", + "zipf_rv", + "cumulative_distribution", + "discrete_sequence", + "random_weighted_sample", + "weighted_choice", +] + + +# The same helpers for choosing random sequences from distributions +# uses Python's random module +# https://docs.python.org/3/library/random.html + + +@py_random_state(2) +def powerlaw_sequence(n, exponent=2.0, seed=None): + """ + Return sample sequence of length n from a power law distribution. + """ + return [seed.paretovariate(exponent - 1) for i in range(n)] + + +@py_random_state(2) +def zipf_rv(alpha, xmin=1, seed=None): + r"""Returns a random value chosen from the Zipf distribution. + + The return value is an integer drawn from the probability distribution + + .. math:: + + p(x)=\frac{x^{-\alpha}}{\zeta(\alpha, x_{\min})}, + + where $\zeta(\alpha, x_{\min})$ is the Hurwitz zeta function. + + Parameters + ---------- + alpha : float + Exponent value of the distribution + xmin : int + Minimum value + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + x : int + Random value from Zipf distribution + + Raises + ------ + ValueError: + If xmin < 1 or + If alpha <= 1 + + Notes + ----- + The rejection algorithm generates random values for a the power-law + distribution in uniformly bounded expected time dependent on + parameters. See [1]_ for details on its operation. + + Examples + -------- + >>> nx.utils.zipf_rv(alpha=2, xmin=3, seed=42) + 8 + + References + ---------- + .. [1] Luc Devroye, Non-Uniform Random Variate Generation, + Springer-Verlag, New York, 1986. + """ + if xmin < 1: + raise ValueError("xmin < 1") + if alpha <= 1: + raise ValueError("a <= 1.0") + a1 = alpha - 1.0 + b = 2**a1 + while True: + u = 1.0 - seed.random() # u in (0,1] + v = seed.random() # v in [0,1) + x = int(xmin * u ** -(1.0 / a1)) + t = (1.0 + (1.0 / x)) ** a1 + if v * x * (t - 1.0) / (b - 1.0) <= t / b: + break + return x + + +def cumulative_distribution(distribution): + """Returns normalized cumulative distribution from discrete distribution.""" + + cdf = [0.0] + psum = sum(distribution) + for i in range(len(distribution)): + cdf.append(cdf[i] + distribution[i] / psum) + return cdf + + +@py_random_state(3) +def discrete_sequence(n, distribution=None, cdistribution=None, seed=None): + """ + Return sample sequence of length n from a given discrete distribution + or discrete cumulative distribution. + + One of the following must be specified. + + distribution = histogram of values, will be normalized + + cdistribution = normalized discrete cumulative distribution + + """ + import bisect + + if cdistribution is not None: + cdf = cdistribution + elif distribution is not None: + cdf = cumulative_distribution(distribution) + else: + raise nx.NetworkXError( + "discrete_sequence: distribution or cdistribution missing" + ) + + # get a uniform random number + inputseq = [seed.random() for i in range(n)] + + # choose from CDF + seq = [bisect.bisect_left(cdf, s) - 1 for s in inputseq] + return seq + + +@py_random_state(2) +def random_weighted_sample(mapping, k, seed=None): + """Returns k items without replacement from a weighted sample. + + The input is a dictionary of items with weights as values. + """ + if k > len(mapping): + raise ValueError("sample larger than population") + sample = set() + while len(sample) < k: + sample.add(weighted_choice(mapping, seed)) + return list(sample) + + +@py_random_state(1) +def weighted_choice(mapping, seed=None): + """Returns a single element from a weighted sample. + + The input is a dictionary of items with weights as values. + """ + # use roulette method + rnd = seed.random() * sum(mapping.values()) + for k, w in mapping.items(): + rnd -= w + if rnd < 0: + return k diff --git a/.venv/lib/python3.12/site-packages/networkx/utils/rcm.py b/.venv/lib/python3.12/site-packages/networkx/utils/rcm.py new file mode 100644 index 0000000..7465c50 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/utils/rcm.py @@ -0,0 +1,159 @@ +""" +Cuthill-McKee ordering of graph nodes to produce sparse matrices +""" + +from collections import deque +from operator import itemgetter + +import networkx as nx + +from ..utils import arbitrary_element + +__all__ = ["cuthill_mckee_ordering", "reverse_cuthill_mckee_ordering"] + + +def cuthill_mckee_ordering(G, heuristic=None): + """Generate an ordering (permutation) of the graph nodes to make + a sparse matrix. + + Uses the Cuthill-McKee heuristic (based on breadth-first search) [1]_. + + Parameters + ---------- + G : graph + A NetworkX graph + + heuristic : function, optional + Function to choose starting node for RCM algorithm. If None + a node from a pseudo-peripheral pair is used. A user-defined function + can be supplied that takes a graph object and returns a single node. + + Returns + ------- + nodes : generator + Generator of nodes in Cuthill-McKee ordering. + + Examples + -------- + >>> from networkx.utils import cuthill_mckee_ordering + >>> G = nx.path_graph(4) + >>> rcm = list(cuthill_mckee_ordering(G)) + >>> A = nx.adjacency_matrix(G, nodelist=rcm) + + Smallest degree node as heuristic function: + + >>> def smallest_degree(G): + ... return min(G, key=G.degree) + >>> rcm = list(cuthill_mckee_ordering(G, heuristic=smallest_degree)) + + + See Also + -------- + reverse_cuthill_mckee_ordering + + Notes + ----- + The optimal solution the bandwidth reduction is NP-complete [2]_. + + + References + ---------- + .. [1] E. Cuthill and J. McKee. + Reducing the bandwidth of sparse symmetric matrices, + In Proc. 24th Nat. Conf. ACM, pages 157-172, 1969. + http://doi.acm.org/10.1145/800195.805928 + .. [2] Steven S. Skiena. 1997. The Algorithm Design Manual. + Springer-Verlag New York, Inc., New York, NY, USA. + """ + for c in nx.connected_components(G): + yield from connected_cuthill_mckee_ordering(G.subgraph(c), heuristic) + + +def reverse_cuthill_mckee_ordering(G, heuristic=None): + """Generate an ordering (permutation) of the graph nodes to make + a sparse matrix. + + Uses the reverse Cuthill-McKee heuristic (based on breadth-first search) + [1]_. + + Parameters + ---------- + G : graph + A NetworkX graph + + heuristic : function, optional + Function to choose starting node for RCM algorithm. If None + a node from a pseudo-peripheral pair is used. A user-defined function + can be supplied that takes a graph object and returns a single node. + + Returns + ------- + nodes : generator + Generator of nodes in reverse Cuthill-McKee ordering. + + Examples + -------- + >>> from networkx.utils import reverse_cuthill_mckee_ordering + >>> G = nx.path_graph(4) + >>> rcm = list(reverse_cuthill_mckee_ordering(G)) + >>> A = nx.adjacency_matrix(G, nodelist=rcm) + + Smallest degree node as heuristic function: + + >>> def smallest_degree(G): + ... return min(G, key=G.degree) + >>> rcm = list(reverse_cuthill_mckee_ordering(G, heuristic=smallest_degree)) + + + See Also + -------- + cuthill_mckee_ordering + + Notes + ----- + The optimal solution the bandwidth reduction is NP-complete [2]_. + + References + ---------- + .. [1] E. Cuthill and J. McKee. + Reducing the bandwidth of sparse symmetric matrices, + In Proc. 24th Nat. Conf. ACM, pages 157-72, 1969. + http://doi.acm.org/10.1145/800195.805928 + .. [2] Steven S. Skiena. 1997. The Algorithm Design Manual. + Springer-Verlag New York, Inc., New York, NY, USA. + """ + return reversed(list(cuthill_mckee_ordering(G, heuristic=heuristic))) + + +def connected_cuthill_mckee_ordering(G, heuristic=None): + # the cuthill mckee algorithm for connected graphs + if heuristic is None: + start = pseudo_peripheral_node(G) + else: + start = heuristic(G) + visited = {start} + queue = deque([start]) + while queue: + parent = queue.popleft() + yield parent + nd = sorted(G.degree(set(G[parent]) - visited), key=itemgetter(1)) + children = [n for n, d in nd] + visited.update(children) + queue.extend(children) + + +def pseudo_peripheral_node(G): + # helper for cuthill-mckee to find a node in a "pseudo peripheral pair" + # to use as good starting node + u = arbitrary_element(G) + lp = 0 + v = u + while True: + spl = nx.shortest_path_length(G, v) + l = max(spl.values()) + if l <= lp: + break + lp = l + farthest = (n for n, dist in spl.items() if dist == l) + v, deg = min(G.degree(farthest), key=itemgetter(1)) + return v diff --git a/.venv/lib/python3.12/site-packages/networkx/utils/tests/__init__.py b/.venv/lib/python3.12/site-packages/networkx/utils/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/.venv/lib/python3.12/site-packages/networkx/utils/tests/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/utils/tests/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..0f195dd Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/utils/tests/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/utils/tests/__pycache__/test__init.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/utils/tests/__pycache__/test__init.cpython-312.pyc new file mode 100644 index 0000000..5840011 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/utils/tests/__pycache__/test__init.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/utils/tests/__pycache__/test_backends.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/utils/tests/__pycache__/test_backends.cpython-312.pyc new file mode 100644 index 0000000..604e6e3 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/utils/tests/__pycache__/test_backends.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/utils/tests/__pycache__/test_config.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/utils/tests/__pycache__/test_config.cpython-312.pyc new file mode 100644 index 0000000..cf7a7df Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/utils/tests/__pycache__/test_config.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/utils/tests/__pycache__/test_decorators.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/utils/tests/__pycache__/test_decorators.cpython-312.pyc new file mode 100644 index 0000000..d33aa84 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/utils/tests/__pycache__/test_decorators.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/utils/tests/__pycache__/test_heaps.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/utils/tests/__pycache__/test_heaps.cpython-312.pyc new file mode 100644 index 0000000..0e293a0 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/utils/tests/__pycache__/test_heaps.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/utils/tests/__pycache__/test_mapped_queue.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/utils/tests/__pycache__/test_mapped_queue.cpython-312.pyc new file mode 100644 index 0000000..8a7d46e Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/utils/tests/__pycache__/test_mapped_queue.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/utils/tests/__pycache__/test_misc.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/utils/tests/__pycache__/test_misc.cpython-312.pyc new file mode 100644 index 0000000..e4bccd0 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/utils/tests/__pycache__/test_misc.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/utils/tests/__pycache__/test_random_sequence.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/utils/tests/__pycache__/test_random_sequence.cpython-312.pyc new file mode 100644 index 0000000..d922019 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/utils/tests/__pycache__/test_random_sequence.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/utils/tests/__pycache__/test_rcm.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/utils/tests/__pycache__/test_rcm.cpython-312.pyc new file mode 100644 index 0000000..81bc370 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/utils/tests/__pycache__/test_rcm.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/utils/tests/__pycache__/test_unionfind.cpython-312.pyc b/.venv/lib/python3.12/site-packages/networkx/utils/tests/__pycache__/test_unionfind.cpython-312.pyc new file mode 100644 index 0000000..ca6111d Binary files /dev/null and b/.venv/lib/python3.12/site-packages/networkx/utils/tests/__pycache__/test_unionfind.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/networkx/utils/tests/test__init.py b/.venv/lib/python3.12/site-packages/networkx/utils/tests/test__init.py new file mode 100644 index 0000000..ecbcce3 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/utils/tests/test__init.py @@ -0,0 +1,11 @@ +import pytest + + +def test_utils_namespace(): + """Ensure objects are not unintentionally exposed in utils namespace.""" + with pytest.raises(ImportError): + from networkx.utils import nx + with pytest.raises(ImportError): + from networkx.utils import sys + with pytest.raises(ImportError): + from networkx.utils import defaultdict, deque diff --git a/.venv/lib/python3.12/site-packages/networkx/utils/tests/test_backends.py b/.venv/lib/python3.12/site-packages/networkx/utils/tests/test_backends.py new file mode 100644 index 0000000..fb0309c --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/utils/tests/test_backends.py @@ -0,0 +1,187 @@ +import pickle + +import pytest + +import networkx as nx + +sp = pytest.importorskip("scipy") +pytest.importorskip("numpy") + + +@nx._dispatchable(implemented_by_nx=False) +def _stub_func(G): + raise NotImplementedError("_stub_func is a stub") + + +def test_dispatch_kwds_vs_args(): + G = nx.path_graph(4) + nx.pagerank(G) + nx.pagerank(G=G) + with pytest.raises(TypeError): + nx.pagerank() + + +def test_pickle(): + count = 0 + for name, func in nx.utils.backends._registered_algorithms.items(): + pickled = pickle.dumps(func.__wrapped__) + assert pickle.loads(pickled) is func.__wrapped__ + try: + # Some functions can't be pickled, but it's not b/c of _dispatchable + pickled = pickle.dumps(func) + except pickle.PicklingError: + continue + assert pickle.loads(pickled) is func + count += 1 + assert count > 0 + assert pickle.loads(pickle.dumps(nx.inverse_line_graph)) is nx.inverse_line_graph + + +@pytest.mark.skipif( + "not nx.config.backend_priority.algos " + "or nx.config.backend_priority.algos[0] != 'nx_loopback'" +) +def test_graph_converter_needs_backend(): + # When testing, `nx.from_scipy_sparse_array` will *always* call the backend + # implementation if it's implemented. If `backend=` isn't given, then the result + # will be converted back to NetworkX via `convert_to_nx`. + # If not testing, then calling `nx.from_scipy_sparse_array` w/o `backend=` will + # always call the original version. `backend=` is *required* to call the backend. + from networkx.classes.tests.dispatch_interface import ( + LoopbackBackendInterface, + LoopbackGraph, + ) + + A = sp.sparse.coo_array([[0, 3, 2], [3, 0, 1], [2, 1, 0]]) + + side_effects = [] + + def from_scipy_sparse_array(self, *args, **kwargs): + side_effects.append(1) # Just to prove this was called + return self.convert_from_nx( + self.__getattr__("from_scipy_sparse_array")(*args, **kwargs), + preserve_edge_attrs=True, + preserve_node_attrs=True, + preserve_graph_attrs=True, + ) + + @staticmethod + def convert_to_nx(obj, *, name=None): + if type(obj) is nx.Graph: + return obj + return nx.Graph(obj) + + # *This mutates LoopbackBackendInterface!* + orig_convert_to_nx = LoopbackBackendInterface.convert_to_nx + LoopbackBackendInterface.convert_to_nx = convert_to_nx + LoopbackBackendInterface.from_scipy_sparse_array = from_scipy_sparse_array + + try: + assert side_effects == [] + assert type(nx.from_scipy_sparse_array(A)) is nx.Graph + assert side_effects == [1] + assert ( + type(nx.from_scipy_sparse_array(A, backend="nx_loopback")) is LoopbackGraph + ) + assert side_effects == [1, 1] + # backend="networkx" is default implementation + assert type(nx.from_scipy_sparse_array(A, backend="networkx")) is nx.Graph + assert side_effects == [1, 1] + finally: + LoopbackBackendInterface.convert_to_nx = staticmethod(orig_convert_to_nx) + del LoopbackBackendInterface.from_scipy_sparse_array + with pytest.raises(ImportError, match="backend is not installed"): + nx.from_scipy_sparse_array(A, backend="bad-backend-name") + + +@pytest.mark.skipif( + "not nx.config.backend_priority.algos " + "or nx.config.backend_priority.algos[0] != 'nx_loopback'" +) +def test_networkx_backend(): + """Test using `backend="networkx"` in a dispatchable function.""" + # (Implementing this test is harder than it should be) + from networkx.classes.tests.dispatch_interface import ( + LoopbackBackendInterface, + LoopbackGraph, + ) + + G = LoopbackGraph() + G.add_edges_from([(0, 1), (1, 2), (1, 3), (2, 4)]) + + @staticmethod + def convert_to_nx(obj, *, name=None): + if isinstance(obj, LoopbackGraph): + new_graph = nx.Graph() + new_graph.__dict__.update(obj.__dict__) + return new_graph + return obj + + # *This mutates LoopbackBackendInterface!* + # This uses the same trick as in the previous test. + orig_convert_to_nx = LoopbackBackendInterface.convert_to_nx + LoopbackBackendInterface.convert_to_nx = convert_to_nx + try: + G2 = nx.ego_graph(G, 0, backend="networkx") + assert type(G2) is nx.Graph + finally: + LoopbackBackendInterface.convert_to_nx = staticmethod(orig_convert_to_nx) + + +def test_dispatchable_are_functions(): + assert type(nx.pagerank) is type(nx.pagerank.orig_func) + + +@pytest.mark.skipif("not nx.utils.backends.backends") +def test_mixing_backend_graphs(): + from networkx.classes.tests import dispatch_interface + + G = nx.Graph() + G.add_edge(1, 2) + G.add_edge(2, 3) + H = nx.Graph() + H.add_edge(2, 3) + rv = nx.intersection(G, H) + assert set(nx.intersection(G, H)) == {2, 3} + G2 = dispatch_interface.convert(G) + H2 = dispatch_interface.convert(H) + if "nx_loopback" in nx.config.backend_priority: + # Auto-convert + assert set(nx.intersection(G2, H)) == {2, 3} + assert set(nx.intersection(G, H2)) == {2, 3} + elif not nx.config.backend_priority and "nx_loopback" not in nx.config.backends: + # G2 and H2 are backend objects for a backend that is not registered! + with pytest.raises(ImportError, match="backend is not installed"): + nx.intersection(G2, H) + with pytest.raises(ImportError, match="backend is not installed"): + nx.intersection(G, H2) + # It would be nice to test passing graphs from *different* backends, + # but we are not set up to do this yet. + + +def test_bad_backend_name(): + """Using `backend=` raises with unknown backend even if there are no backends.""" + with pytest.raises( + ImportError, match="'this_backend_does_not_exist' backend is not installed" + ): + nx.null_graph(backend="this_backend_does_not_exist") + + +def test_not_implemented_by_nx(): + assert "networkx" in nx.pagerank.backends + assert "networkx" not in _stub_func.backends + + if "nx_loopback" in nx.config.backends: + from networkx.classes.tests.dispatch_interface import LoopbackBackendInterface + + def stub_func_implementation(G): + return True + + LoopbackBackendInterface._stub_func = staticmethod(stub_func_implementation) + try: + assert _stub_func(nx.Graph()) is True + finally: + del LoopbackBackendInterface._stub_func + + with pytest.raises(NotImplementedError): + _stub_func(nx.Graph()) diff --git a/.venv/lib/python3.12/site-packages/networkx/utils/tests/test_config.py b/.venv/lib/python3.12/site-packages/networkx/utils/tests/test_config.py new file mode 100644 index 0000000..d4fe902 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/utils/tests/test_config.py @@ -0,0 +1,263 @@ +import collections +import pickle + +import pytest + +import networkx as nx +from networkx.utils.configs import BackendPriorities, Config + + +# Define this at module level so we can test pickling +class ExampleConfig(Config): + """Example configuration.""" + + x: int + y: str + + def _on_setattr(self, key, value): + if key == "x" and value <= 0: + raise ValueError("x must be positive") + if key == "y" and not isinstance(value, str): + raise TypeError("y must be a str") + return value + + +class EmptyConfig(Config): + pass + + +@pytest.mark.parametrize("cfg", [EmptyConfig(), Config()]) +def test_config_empty(cfg): + assert dir(cfg) == [] + with pytest.raises(AttributeError): + cfg.x = 1 + with pytest.raises(KeyError): + cfg["x"] = 1 + with pytest.raises(AttributeError): + cfg.x + with pytest.raises(KeyError): + cfg["x"] + assert len(cfg) == 0 + assert "x" not in cfg + assert cfg == cfg + assert cfg.get("x", 2) == 2 + assert set(cfg.keys()) == set() + assert set(cfg.values()) == set() + assert set(cfg.items()) == set() + cfg2 = pickle.loads(pickle.dumps(cfg)) + assert cfg == cfg2 + assert isinstance(cfg, collections.abc.Collection) + assert isinstance(cfg, collections.abc.Mapping) + + +def test_config_subclass(): + with pytest.raises(TypeError, match="missing 2 required keyword-only"): + ExampleConfig() + with pytest.raises(ValueError, match="x must be positive"): + ExampleConfig(x=0, y="foo") + with pytest.raises(TypeError, match="unexpected keyword"): + ExampleConfig(x=1, y="foo", z="bad config") + with pytest.raises(TypeError, match="unexpected keyword"): + EmptyConfig(z="bad config") + cfg = ExampleConfig(x=1, y="foo") + assert cfg.x == 1 + assert cfg["x"] == 1 + assert cfg["y"] == "foo" + assert cfg.y == "foo" + assert "x" in cfg + assert "y" in cfg + assert "z" not in cfg + assert len(cfg) == 2 + assert set(iter(cfg)) == {"x", "y"} + assert set(cfg.keys()) == {"x", "y"} + assert set(cfg.values()) == {1, "foo"} + assert set(cfg.items()) == {("x", 1), ("y", "foo")} + assert dir(cfg) == ["x", "y"] + cfg.x = 2 + cfg["y"] = "bar" + assert cfg["x"] == 2 + assert cfg.y == "bar" + with pytest.raises(TypeError, match="can't be deleted"): + del cfg.x + with pytest.raises(TypeError, match="can't be deleted"): + del cfg["y"] + assert cfg.x == 2 + assert cfg == cfg + assert cfg == ExampleConfig(x=2, y="bar") + assert cfg != ExampleConfig(x=3, y="baz") + assert cfg != Config(x=2, y="bar") + with pytest.raises(TypeError, match="y must be a str"): + cfg["y"] = 5 + with pytest.raises(ValueError, match="x must be positive"): + cfg.x = -5 + assert cfg.get("x", 10) == 2 + with pytest.raises(AttributeError): + cfg.z = 5 + with pytest.raises(KeyError): + cfg["z"] = 5 + with pytest.raises(AttributeError): + cfg.z + with pytest.raises(KeyError): + cfg["z"] + cfg2 = pickle.loads(pickle.dumps(cfg)) + assert cfg == cfg2 + assert cfg.__doc__ == "Example configuration." + assert cfg2.__doc__ == "Example configuration." + + +def test_config_defaults(): + class DefaultConfig(Config): + x: int = 0 + y: int + + cfg = DefaultConfig(y=1) + assert cfg.x == 0 + cfg = DefaultConfig(x=2, y=1) + assert cfg.x == 2 + + +def test_nxconfig(): + assert isinstance(nx.config.backend_priority, BackendPriorities) + assert isinstance(nx.config.backend_priority.algos, list) + assert isinstance(nx.config.backends, Config) + with pytest.raises(TypeError, match="must be a list of backend names"): + nx.config.backend_priority.algos = "nx_loopback" + with pytest.raises(ValueError, match="Unknown backend when setting"): + nx.config.backend_priority.algos = ["this_almost_certainly_is_not_a_backend"] + with pytest.raises(TypeError, match="must be a Config of backend configs"): + nx.config.backends = {} + with pytest.raises(TypeError, match="must be a Config of backend configs"): + nx.config.backends = Config(plausible_backend_name={}) + with pytest.raises(ValueError, match="Unknown backend when setting"): + nx.config.backends = Config(this_almost_certainly_is_not_a_backend=Config()) + with pytest.raises(TypeError, match="must be True or False"): + nx.config.cache_converted_graphs = "bad value" + with pytest.raises(TypeError, match="must be a set of "): + nx.config.warnings_to_ignore = 7 + with pytest.raises(ValueError, match="Unknown warning "): + nx.config.warnings_to_ignore = {"bad value"} + + prev = nx.config.backend_priority + try: + nx.config.backend_priority = ["networkx"] + assert isinstance(nx.config.backend_priority, BackendPriorities) + assert nx.config.backend_priority.algos == ["networkx"] + finally: + nx.config.backend_priority = prev + + +def test_nxconfig_context(): + # We do some special handling so that `nx.config.backend_priority = val` + # actually does `nx.config.backend_priority.algos = val`. + orig = nx.config.backend_priority.algos + val = [] if orig else ["networkx"] + assert orig != val + assert nx.config.backend_priority.algos != val + with nx.config(backend_priority=val): + assert nx.config.backend_priority.algos == val + assert nx.config.backend_priority.algos == orig + with nx.config.backend_priority(algos=val): + assert nx.config.backend_priority.algos == val + assert nx.config.backend_priority.algos == orig + bad = ["bad-backend"] + with pytest.raises(ValueError, match="Unknown backend"): + nx.config.backend_priority = bad + with pytest.raises(ValueError, match="Unknown backend"): + with nx.config(backend_priority=bad): + pass + with pytest.raises(ValueError, match="Unknown backend"): + with nx.config.backend_priority(algos=bad): + pass + + +def test_not_strict(): + class FlexibleConfig(Config, strict=False): + x: int + + cfg = FlexibleConfig(x=1) + assert "_strict" not in cfg + assert len(cfg) == 1 + assert list(cfg) == ["x"] + assert list(cfg.keys()) == ["x"] + assert list(cfg.values()) == [1] + assert list(cfg.items()) == [("x", 1)] + assert cfg.x == 1 + assert cfg["x"] == 1 + assert "x" in cfg + assert hasattr(cfg, "x") + assert "FlexibleConfig(x=1)" in repr(cfg) + assert cfg == FlexibleConfig(x=1) + del cfg.x + assert "FlexibleConfig()" in repr(cfg) + assert len(cfg) == 0 + assert not hasattr(cfg, "x") + assert "x" not in cfg + assert not hasattr(cfg, "y") + assert "y" not in cfg + cfg.y = 2 + assert len(cfg) == 1 + assert list(cfg) == ["y"] + assert list(cfg.keys()) == ["y"] + assert list(cfg.values()) == [2] + assert list(cfg.items()) == [("y", 2)] + assert cfg.y == 2 + assert cfg["y"] == 2 + assert hasattr(cfg, "y") + assert "y" in cfg + del cfg["y"] + assert len(cfg) == 0 + assert list(cfg) == [] + with pytest.raises(AttributeError, match="y"): + del cfg.y + with pytest.raises(KeyError, match="y"): + del cfg["y"] + with pytest.raises(TypeError, match="missing 1 required keyword-only"): + FlexibleConfig() + # Be strict when first creating the config object + with pytest.raises(TypeError, match="unexpected keyword argument 'y'"): + FlexibleConfig(x=1, y=2) + + class FlexibleConfigWithDefault(Config, strict=False): + x: int = 0 + + assert FlexibleConfigWithDefault().x == 0 + assert FlexibleConfigWithDefault(x=1)["x"] == 1 + + +def test_context(): + cfg = Config(x=1) + with cfg(x=2) as c: + assert c.x == 2 + c.x = 3 + assert cfg.x == 3 + assert cfg.x == 1 + + with cfg(x=2) as c: + assert c == cfg + assert cfg.x == 2 + with cfg(x=3) as c2: + assert c2 == cfg + assert cfg.x == 3 + with pytest.raises(RuntimeError, match="context manager without"): + with cfg as c3: # Forgot to call `cfg(...)` + pass + assert cfg.x == 3 + assert cfg.x == 2 + assert cfg.x == 1 + + c = cfg(x=4) # Not yet as context (not recommended, but possible) + assert c == cfg + assert cfg.x == 4 + # Cheat by looking at internal data; context stack should only grow with __enter__ + assert cfg._prev is not None + assert cfg._context_stack == [] + with c: + assert c == cfg + assert cfg.x == 4 + assert cfg.x == 1 + # Cheat again; there was no preceding `cfg(...)` call this time + assert cfg._prev is None + with pytest.raises(RuntimeError, match="context manager without"): + with cfg: + pass + assert cfg.x == 1 diff --git a/.venv/lib/python3.12/site-packages/networkx/utils/tests/test_decorators.py b/.venv/lib/python3.12/site-packages/networkx/utils/tests/test_decorators.py new file mode 100644 index 0000000..0a4aeab --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/utils/tests/test_decorators.py @@ -0,0 +1,510 @@ +import os +import pathlib +import random +import tempfile + +import pytest + +import networkx as nx +from networkx.utils.decorators import ( + argmap, + not_implemented_for, + np_random_state, + open_file, + py_random_state, +) +from networkx.utils.misc import PythonRandomInterface, PythonRandomViaNumpyBits + + +def test_not_implemented_decorator(): + @not_implemented_for("directed") + def test_d(G): + pass + + test_d(nx.Graph()) + with pytest.raises(nx.NetworkXNotImplemented): + test_d(nx.DiGraph()) + + @not_implemented_for("undirected") + def test_u(G): + pass + + test_u(nx.DiGraph()) + with pytest.raises(nx.NetworkXNotImplemented): + test_u(nx.Graph()) + + @not_implemented_for("multigraph") + def test_m(G): + pass + + test_m(nx.Graph()) + with pytest.raises(nx.NetworkXNotImplemented): + test_m(nx.MultiGraph()) + + @not_implemented_for("graph") + def test_g(G): + pass + + test_g(nx.MultiGraph()) + with pytest.raises(nx.NetworkXNotImplemented): + test_g(nx.Graph()) + + # not MultiDiGraph (multiple arguments => AND) + @not_implemented_for("directed", "multigraph") + def test_not_md(G): + pass + + test_not_md(nx.Graph()) + test_not_md(nx.DiGraph()) + test_not_md(nx.MultiGraph()) + with pytest.raises(nx.NetworkXNotImplemented): + test_not_md(nx.MultiDiGraph()) + + # Graph only (multiple decorators => OR) + @not_implemented_for("directed") + @not_implemented_for("multigraph") + def test_graph_only(G): + pass + + test_graph_only(nx.Graph()) + with pytest.raises(nx.NetworkXNotImplemented): + test_graph_only(nx.DiGraph()) + with pytest.raises(nx.NetworkXNotImplemented): + test_graph_only(nx.MultiGraph()) + with pytest.raises(nx.NetworkXNotImplemented): + test_graph_only(nx.MultiDiGraph()) + + with pytest.raises(ValueError): + not_implemented_for("directed", "undirected") + + with pytest.raises(ValueError): + not_implemented_for("multigraph", "graph") + + +def test_not_implemented_decorator_key(): + with pytest.raises(KeyError): + + @not_implemented_for("foo") + def test1(G): + pass + + test1(nx.Graph()) + + +def test_not_implemented_decorator_raise(): + with pytest.raises(nx.NetworkXNotImplemented): + + @not_implemented_for("graph") + def test1(G): + pass + + test1(nx.Graph()) + + +class TestOpenFileDecorator: + def setup_method(self): + self.text = ["Blah... ", "BLAH ", "BLAH!!!!"] + self.fobj = tempfile.NamedTemporaryFile("wb+", delete=False) + self.name = self.fobj.name + + def teardown_method(self): + self.fobj.close() + os.unlink(self.name) + + def write(self, path): + for text in self.text: + path.write(text.encode("ascii")) + + @open_file(1, "r") + def read(self, path): + return path.readlines()[0] + + @staticmethod + @open_file(0, "wb") + def writer_arg0(path): + path.write(b"demo") + + @open_file(1, "wb+") + def writer_arg1(self, path): + self.write(path) + + @open_file(2, "wb") + def writer_arg2default(self, x, path=None): + if path is None: + with tempfile.NamedTemporaryFile("wb+") as fh: + self.write(fh) + else: + self.write(path) + + @open_file(4, "wb") + def writer_arg4default(self, x, y, other="hello", path=None, **kwargs): + if path is None: + with tempfile.NamedTemporaryFile("wb+") as fh: + self.write(fh) + else: + self.write(path) + + @open_file("path", "wb") + def writer_kwarg(self, **kwargs): + path = kwargs.get("path", None) + if path is None: + with tempfile.NamedTemporaryFile("wb+") as fh: + self.write(fh) + else: + self.write(path) + + def test_writer_arg0_str(self): + self.writer_arg0(self.name) + + def test_writer_arg0_fobj(self): + self.writer_arg0(self.fobj) + + def test_writer_arg0_pathlib(self): + self.writer_arg0(pathlib.Path(self.name)) + + def test_writer_arg1_str(self): + self.writer_arg1(self.name) + assert self.read(self.name) == "".join(self.text) + + def test_writer_arg1_fobj(self): + self.writer_arg1(self.fobj) + assert not self.fobj.closed + self.fobj.close() + assert self.read(self.name) == "".join(self.text) + + def test_writer_arg2default_str(self): + self.writer_arg2default(0, path=None) + self.writer_arg2default(0, path=self.name) + assert self.read(self.name) == "".join(self.text) + + def test_writer_arg2default_fobj(self): + self.writer_arg2default(0, path=self.fobj) + assert not self.fobj.closed + self.fobj.close() + assert self.read(self.name) == "".join(self.text) + + def test_writer_arg2default_fobj_path_none(self): + self.writer_arg2default(0, path=None) + + def test_writer_arg4default_fobj(self): + self.writer_arg4default(0, 1, dog="dog", other="other") + self.writer_arg4default(0, 1, dog="dog", other="other", path=self.name) + assert self.read(self.name) == "".join(self.text) + + def test_writer_kwarg_str(self): + self.writer_kwarg(path=self.name) + assert self.read(self.name) == "".join(self.text) + + def test_writer_kwarg_fobj(self): + self.writer_kwarg(path=self.fobj) + self.fobj.close() + assert self.read(self.name) == "".join(self.text) + + def test_writer_kwarg_path_none(self): + self.writer_kwarg(path=None) + + +class TestRandomState: + @classmethod + def setup_class(cls): + global np + np = pytest.importorskip("numpy") + + @np_random_state(1) + def instantiate_np_random_state(self, random_state): + allowed = (np.random.RandomState, np.random.Generator) + assert isinstance(random_state, allowed) + return random_state.random() + + @py_random_state(1) + def instantiate_py_random_state(self, random_state): + allowed = (random.Random, PythonRandomInterface, PythonRandomViaNumpyBits) + assert isinstance(random_state, allowed) + return random_state.random() + + def test_random_state_None(self): + np.random.seed(42) + rv = np.random.random() + np.random.seed(42) + assert rv == self.instantiate_np_random_state(None) + + random.seed(42) + rv = random.random() + random.seed(42) + assert rv == self.instantiate_py_random_state(None) + + def test_random_state_np_random(self): + np.random.seed(42) + rv = np.random.random() + np.random.seed(42) + assert rv == self.instantiate_np_random_state(np.random) + np.random.seed(42) + assert rv == self.instantiate_py_random_state(np.random) + + def test_random_state_int(self): + np.random.seed(42) + np_rv = np.random.random() + random.seed(42) + py_rv = random.random() + + np.random.seed(42) + seed = 1 + rval = self.instantiate_np_random_state(seed) + rval_expected = np.random.RandomState(seed).rand() + assert rval == rval_expected + # test that global seed wasn't changed in function + assert np_rv == np.random.random() + + random.seed(42) + rval = self.instantiate_py_random_state(seed) + rval_expected = random.Random(seed).random() + assert rval == rval_expected + # test that global seed wasn't changed in function + assert py_rv == random.random() + + def test_random_state_np_random_Generator(self): + np.random.seed(42) + np_rv = np.random.random() + np.random.seed(42) + seed = 1 + + rng = np.random.default_rng(seed) + rval = self.instantiate_np_random_state(rng) + rval_expected = np.random.default_rng(seed).random() + assert rval == rval_expected + + rval = self.instantiate_py_random_state(rng) + rval_expected = np.random.default_rng(seed).random(size=2)[1] + assert rval == rval_expected + # test that global seed wasn't changed in function + assert np_rv == np.random.random() + + def test_random_state_np_random_RandomState(self): + np.random.seed(42) + np_rv = np.random.random() + np.random.seed(42) + seed = 1 + + rng = np.random.RandomState(seed) + rval = self.instantiate_np_random_state(rng) + rval_expected = np.random.RandomState(seed).random() + assert rval == rval_expected + + rval = self.instantiate_py_random_state(rng) + rval_expected = np.random.RandomState(seed).random(size=2)[1] + assert rval == rval_expected + # test that global seed wasn't changed in function + assert np_rv == np.random.random() + + def test_random_state_py_random(self): + seed = 1 + rng = random.Random(seed) + rv = self.instantiate_py_random_state(rng) + assert rv == random.Random(seed).random() + + pytest.raises(ValueError, self.instantiate_np_random_state, rng) + + +def test_random_state_string_arg_index(): + with pytest.raises(nx.NetworkXError): + + @np_random_state("a") + def make_random_state(rs): + pass + + rstate = make_random_state(1) + + +def test_py_random_state_string_arg_index(): + with pytest.raises(nx.NetworkXError): + + @py_random_state("a") + def make_random_state(rs): + pass + + rstate = make_random_state(1) + + +def test_random_state_invalid_arg_index(): + with pytest.raises(nx.NetworkXError): + + @np_random_state(2) + def make_random_state(rs): + pass + + rstate = make_random_state(1) + + +def test_py_random_state_invalid_arg_index(): + with pytest.raises(nx.NetworkXError): + + @py_random_state(2) + def make_random_state(rs): + pass + + rstate = make_random_state(1) + + +class TestArgmap: + class ArgmapError(RuntimeError): + pass + + def test_trivial_function(self): + def do_not_call(x): + raise ArgmapError("do not call this function") + + @argmap(do_not_call) + def trivial_argmap(): + return 1 + + assert trivial_argmap() == 1 + + def test_trivial_iterator(self): + def do_not_call(x): + raise ArgmapError("do not call this function") + + @argmap(do_not_call) + def trivial_argmap(): + yield from (1, 2, 3) + + assert tuple(trivial_argmap()) == (1, 2, 3) + + def test_contextmanager(self): + container = [] + + def contextmanager(x): + nonlocal container + return x, lambda: container.append(x) + + @argmap(contextmanager, 0, 1, 2, try_finally=True) + def foo(x, y, z): + return x, y, z + + x, y, z = foo("a", "b", "c") + + # context exits are called in reverse + assert container == ["c", "b", "a"] + + def test_tryfinally_generator(self): + container = [] + + def singleton(x): + return (x,) + + with pytest.raises(nx.NetworkXError): + + @argmap(singleton, 0, 1, 2, try_finally=True) + def foo(x, y, z): + yield from (x, y, z) + + @argmap(singleton, 0, 1, 2) + def foo(x, y, z): + return x + y + z + + q = foo("a", "b", "c") + + assert q == ("a", "b", "c") + + def test_actual_vararg(self): + @argmap(lambda x: -x, 4) + def foo(x, y, *args): + return (x, y) + tuple(args) + + assert foo(1, 2, 3, 4, 5, 6) == (1, 2, 3, 4, -5, 6) + + def test_signature_destroying_intermediate_decorator(self): + def add_one_to_first_bad_decorator(f): + """Bad because it doesn't wrap the f signature (clobbers it)""" + + def decorated(a, *args, **kwargs): + return f(a + 1, *args, **kwargs) + + return decorated + + add_two_to_second = argmap(lambda b: b + 2, 1) + + @add_two_to_second + @add_one_to_first_bad_decorator + def add_one_and_two(a, b): + return a, b + + assert add_one_and_two(5, 5) == (6, 7) + + def test_actual_kwarg(self): + @argmap(lambda x: -x, "arg") + def foo(*, arg): + return arg + + assert foo(arg=3) == -3 + + def test_nested_tuple(self): + def xform(x, y): + u, v = y + return x + u + v, (x + u, x + v) + + # we're testing args and kwargs here, too + @argmap(xform, (0, ("t", 2))) + def foo(a, *args, **kwargs): + return a, args, kwargs + + a, args, kwargs = foo(1, 2, 3, t=4) + + assert a == 1 + 4 + 3 + assert args == (2, 1 + 3) + assert kwargs == {"t": 1 + 4} + + def test_flatten(self): + assert tuple(argmap._flatten([[[[[], []], [], []], [], [], []]], set())) == () + + rlist = ["a", ["b", "c"], [["d"], "e"], "f"] + assert "".join(argmap._flatten(rlist, set())) == "abcdef" + + def test_indent(self): + code = "\n".join( + argmap._indent( + *[ + "try:", + "try:", + "pass#", + "finally:", + "pass#", + "#", + "finally:", + "pass#", + ] + ) + ) + assert ( + code + == """try: + try: + pass# + finally: + pass# + # +finally: + pass#""" + ) + + def test_immediate_raise(self): + @not_implemented_for("directed") + def yield_nodes(G): + yield from G + + G = nx.Graph([(1, 2)]) + D = nx.DiGraph() + + # test first call (argmap is compiled and executed) + with pytest.raises(nx.NetworkXNotImplemented): + node_iter = yield_nodes(D) + + # test second call (argmap is only executed) + with pytest.raises(nx.NetworkXNotImplemented): + node_iter = yield_nodes(D) + + # ensure that generators still make generators + node_iter = yield_nodes(G) + next(node_iter) + next(node_iter) + with pytest.raises(StopIteration): + next(node_iter) diff --git a/.venv/lib/python3.12/site-packages/networkx/utils/tests/test_heaps.py b/.venv/lib/python3.12/site-packages/networkx/utils/tests/test_heaps.py new file mode 100644 index 0000000..5ea3871 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/utils/tests/test_heaps.py @@ -0,0 +1,131 @@ +import pytest + +import networkx as nx +from networkx.utils import BinaryHeap, PairingHeap + + +class X: + def __eq__(self, other): + raise self is other + + def __ne__(self, other): + raise self is not other + + def __lt__(self, other): + raise TypeError("cannot compare") + + def __le__(self, other): + raise TypeError("cannot compare") + + def __ge__(self, other): + raise TypeError("cannot compare") + + def __gt__(self, other): + raise TypeError("cannot compare") + + def __hash__(self): + return hash(id(self)) + + +x = X() + + +data = [ # min should not invent an element. + ("min", nx.NetworkXError), + # Popping an empty heap should fail. + ("pop", nx.NetworkXError), + # Getting nonexisting elements should return None. + ("get", 0, None), + ("get", x, None), + ("get", None, None), + # Inserting a new key should succeed. + ("insert", x, 1, True), + ("get", x, 1), + ("min", (x, 1)), + # min should not pop the top element. + ("min", (x, 1)), + # Inserting a new key of different type should succeed. + ("insert", 1, -2.0, True), + # int and float values should interop. + ("min", (1, -2.0)), + # pop removes minimum-valued element. + ("insert", 3, -(10**100), True), + ("insert", 4, 5, True), + ("pop", (3, -(10**100))), + ("pop", (1, -2.0)), + # Decrease-insert should succeed. + ("insert", 4, -50, True), + ("insert", 4, -60, False, True), + # Decrease-insert should not create duplicate keys. + ("pop", (4, -60)), + ("pop", (x, 1)), + # Popping all elements should empty the heap. + ("min", nx.NetworkXError), + ("pop", nx.NetworkXError), + # Non-value-changing insert should fail. + ("insert", x, 0, True), + ("insert", x, 0, False, False), + ("min", (x, 0)), + ("insert", x, 0, True, False), + ("min", (x, 0)), + # Failed insert should not create duplicate keys. + ("pop", (x, 0)), + ("pop", nx.NetworkXError), + # Increase-insert should succeed when allowed. + ("insert", None, 0, True), + ("insert", 2, -1, True), + ("min", (2, -1)), + ("insert", 2, 1, True, False), + ("min", (None, 0)), + # Increase-insert should fail when disallowed. + ("insert", None, 2, False, False), + ("min", (None, 0)), + # Failed increase-insert should not create duplicate keys. + ("pop", (None, 0)), + ("pop", (2, 1)), + ("min", nx.NetworkXError), + ("pop", nx.NetworkXError), +] + + +def _test_heap_class(cls, *args, **kwargs): + heap = cls(*args, **kwargs) + # Basic behavioral test + for op in data: + if op[-1] is not nx.NetworkXError: + assert op[-1] == getattr(heap, op[0])(*op[1:-1]) + else: + pytest.raises(op[-1], getattr(heap, op[0]), *op[1:-1]) + # Coverage test. + for i in range(99, -1, -1): + assert heap.insert(i, i) + for i in range(50): + assert heap.pop() == (i, i) + for i in range(100): + assert heap.insert(i, i) == (i < 50) + for i in range(100): + assert not heap.insert(i, i + 1) + for i in range(50): + assert heap.pop() == (i, i) + for i in range(100): + assert heap.insert(i, i + 1) == (i < 50) + for i in range(49): + assert heap.pop() == (i, i + 1) + assert sorted([heap.pop(), heap.pop()]) == [(49, 50), (50, 50)] + for i in range(51, 100): + assert not heap.insert(i, i + 1, True) + for i in range(51, 70): + assert heap.pop() == (i, i + 1) + for i in range(100): + assert heap.insert(i, i) + for i in range(100): + assert heap.pop() == (i, i) + pytest.raises(nx.NetworkXError, heap.pop) + + +def test_PairingHeap(): + _test_heap_class(PairingHeap) + + +def test_BinaryHeap(): + _test_heap_class(BinaryHeap) diff --git a/.venv/lib/python3.12/site-packages/networkx/utils/tests/test_mapped_queue.py b/.venv/lib/python3.12/site-packages/networkx/utils/tests/test_mapped_queue.py new file mode 100644 index 0000000..ca9b7e4 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/utils/tests/test_mapped_queue.py @@ -0,0 +1,268 @@ +import pytest + +from networkx.utils.mapped_queue import MappedQueue, _HeapElement + + +def test_HeapElement_gtlt(): + bar = _HeapElement(1.1, "a") + foo = _HeapElement(1, "b") + assert foo < bar + assert bar > foo + assert foo < 1.1 + assert 1 < bar + + +def test_HeapElement_gtlt_tied_priority(): + bar = _HeapElement(1, "a") + foo = _HeapElement(1, "b") + assert foo > bar + assert bar < foo + + +def test_HeapElement_eq(): + bar = _HeapElement(1.1, "a") + foo = _HeapElement(1, "a") + assert foo == bar + assert bar == foo + assert foo == "a" + + +def test_HeapElement_iter(): + foo = _HeapElement(1, "a") + bar = _HeapElement(1.1, (3, 2, 1)) + assert list(foo) == [1, "a"] + assert list(bar) == [1.1, 3, 2, 1] + + +def test_HeapElement_getitem(): + foo = _HeapElement(1, "a") + bar = _HeapElement(1.1, (3, 2, 1)) + assert foo[1] == "a" + assert foo[0] == 1 + assert bar[0] == 1.1 + assert bar[2] == 2 + assert bar[3] == 1 + pytest.raises(IndexError, bar.__getitem__, 4) + pytest.raises(IndexError, foo.__getitem__, 2) + + +class TestMappedQueue: + def setup_method(self): + pass + + def _check_map(self, q): + assert q.position == {elt: pos for pos, elt in enumerate(q.heap)} + + def _make_mapped_queue(self, h): + q = MappedQueue() + q.heap = h + q.position = {elt: pos for pos, elt in enumerate(h)} + return q + + def test_heapify(self): + h = [5, 4, 3, 2, 1, 0] + q = self._make_mapped_queue(h) + q._heapify() + self._check_map(q) + + def test_init(self): + h = [5, 4, 3, 2, 1, 0] + q = MappedQueue(h) + self._check_map(q) + + def test_incomparable(self): + h = [5, 4, "a", 2, 1, 0] + pytest.raises(TypeError, MappedQueue, h) + + def test_len(self): + h = [5, 4, 3, 2, 1, 0] + q = MappedQueue(h) + self._check_map(q) + assert len(q) == 6 + + def test_siftup_leaf(self): + h = [2] + h_sifted = [2] + q = self._make_mapped_queue(h) + q._siftup(0) + assert q.heap == h_sifted + self._check_map(q) + + def test_siftup_one_child(self): + h = [2, 0] + h_sifted = [0, 2] + q = self._make_mapped_queue(h) + q._siftup(0) + assert q.heap == h_sifted + self._check_map(q) + + def test_siftup_left_child(self): + h = [2, 0, 1] + h_sifted = [0, 2, 1] + q = self._make_mapped_queue(h) + q._siftup(0) + assert q.heap == h_sifted + self._check_map(q) + + def test_siftup_right_child(self): + h = [2, 1, 0] + h_sifted = [0, 1, 2] + q = self._make_mapped_queue(h) + q._siftup(0) + assert q.heap == h_sifted + self._check_map(q) + + def test_siftup_multiple(self): + h = [0, 1, 2, 4, 3, 5, 6] + h_sifted = [0, 1, 2, 4, 3, 5, 6] + q = self._make_mapped_queue(h) + q._siftup(0) + assert q.heap == h_sifted + self._check_map(q) + + def test_siftdown_leaf(self): + h = [2] + h_sifted = [2] + q = self._make_mapped_queue(h) + q._siftdown(0, 0) + assert q.heap == h_sifted + self._check_map(q) + + def test_siftdown_single(self): + h = [1, 0] + h_sifted = [0, 1] + q = self._make_mapped_queue(h) + q._siftdown(0, len(h) - 1) + assert q.heap == h_sifted + self._check_map(q) + + def test_siftdown_multiple(self): + h = [1, 2, 3, 4, 5, 6, 7, 0] + h_sifted = [0, 1, 3, 2, 5, 6, 7, 4] + q = self._make_mapped_queue(h) + q._siftdown(0, len(h) - 1) + assert q.heap == h_sifted + self._check_map(q) + + def test_push(self): + to_push = [6, 1, 4, 3, 2, 5, 0] + h_sifted = [0, 2, 1, 6, 3, 5, 4] + q = MappedQueue() + for elt in to_push: + q.push(elt) + assert q.heap == h_sifted + self._check_map(q) + + def test_push_duplicate(self): + to_push = [2, 1, 0] + h_sifted = [0, 2, 1] + q = MappedQueue() + for elt in to_push: + inserted = q.push(elt) + assert inserted + assert q.heap == h_sifted + self._check_map(q) + inserted = q.push(1) + assert not inserted + + def test_pop(self): + h = [3, 4, 6, 0, 1, 2, 5] + h_sorted = sorted(h) + q = self._make_mapped_queue(h) + q._heapify() + popped = [q.pop() for _ in range(len(h))] + assert popped == h_sorted + self._check_map(q) + + def test_remove_leaf(self): + h = [0, 2, 1, 6, 3, 5, 4] + h_removed = [0, 2, 1, 6, 4, 5] + q = self._make_mapped_queue(h) + removed = q.remove(3) + assert q.heap == h_removed + + def test_remove_root(self): + h = [0, 2, 1, 6, 3, 5, 4] + h_removed = [1, 2, 4, 6, 3, 5] + q = self._make_mapped_queue(h) + removed = q.remove(0) + assert q.heap == h_removed + + def test_update_leaf(self): + h = [0, 20, 10, 60, 30, 50, 40] + h_updated = [0, 15, 10, 60, 20, 50, 40] + q = self._make_mapped_queue(h) + removed = q.update(30, 15) + assert q.heap == h_updated + + def test_update_root(self): + h = [0, 20, 10, 60, 30, 50, 40] + h_updated = [10, 20, 35, 60, 30, 50, 40] + q = self._make_mapped_queue(h) + removed = q.update(0, 35) + assert q.heap == h_updated + + +class TestMappedDict(TestMappedQueue): + def _make_mapped_queue(self, h): + priority_dict = {elt: elt for elt in h} + return MappedQueue(priority_dict) + + def test_init(self): + d = {5: 0, 4: 1, "a": 2, 2: 3, 1: 4} + q = MappedQueue(d) + assert q.position == d + + def test_ties(self): + d = {5: 0, 4: 1, 3: 2, 2: 3, 1: 4} + q = MappedQueue(d) + assert q.position == {elt: pos for pos, elt in enumerate(q.heap)} + + def test_pop(self): + d = {5: 0, 4: 1, 3: 2, 2: 3, 1: 4} + q = MappedQueue(d) + assert q.pop() == _HeapElement(0, 5) + assert q.position == {elt: pos for pos, elt in enumerate(q.heap)} + + def test_empty_pop(self): + q = MappedQueue() + pytest.raises(IndexError, q.pop) + + def test_incomparable_ties(self): + d = {5: 0, 4: 0, "a": 0, 2: 0, 1: 0} + pytest.raises(TypeError, MappedQueue, d) + + def test_push(self): + to_push = [6, 1, 4, 3, 2, 5, 0] + h_sifted = [0, 2, 1, 6, 3, 5, 4] + q = MappedQueue() + for elt in to_push: + q.push(elt, priority=elt) + assert q.heap == h_sifted + self._check_map(q) + + def test_push_duplicate(self): + to_push = [2, 1, 0] + h_sifted = [0, 2, 1] + q = MappedQueue() + for elt in to_push: + inserted = q.push(elt, priority=elt) + assert inserted + assert q.heap == h_sifted + self._check_map(q) + inserted = q.push(1, priority=1) + assert not inserted + + def test_update_leaf(self): + h = [0, 20, 10, 60, 30, 50, 40] + h_updated = [0, 15, 10, 60, 20, 50, 40] + q = self._make_mapped_queue(h) + removed = q.update(30, 15, priority=15) + assert q.heap == h_updated + + def test_update_root(self): + h = [0, 20, 10, 60, 30, 50, 40] + h_updated = [10, 20, 35, 60, 30, 50, 40] + q = self._make_mapped_queue(h) + removed = q.update(0, 35, priority=35) + assert q.heap == h_updated diff --git a/.venv/lib/python3.12/site-packages/networkx/utils/tests/test_misc.py b/.venv/lib/python3.12/site-packages/networkx/utils/tests/test_misc.py new file mode 100644 index 0000000..f9d70d9 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/utils/tests/test_misc.py @@ -0,0 +1,268 @@ +import random +from copy import copy + +import pytest + +import networkx as nx +from networkx.utils import ( + PythonRandomInterface, + PythonRandomViaNumpyBits, + arbitrary_element, + create_py_random_state, + create_random_state, + dict_to_numpy_array, + discrete_sequence, + flatten, + groups, + make_list_of_ints, + pairwise, + powerlaw_sequence, +) +from networkx.utils.misc import _dict_to_numpy_array1, _dict_to_numpy_array2 + +nested_depth = ( + 1, + 2, + (3, 4, ((5, 6, (7,), (8, (9, 10), 11), (12, 13, (14, 15)), 16), 17), 18, 19), + 20, +) + +nested_set = { + (1, 2, 3, 4), + (5, 6, 7, 8, 9), + (10, 11, (12, 13, 14), (15, 16, 17, 18)), + 19, + 20, +} + +nested_mixed = [ + 1, + (2, 3, {4, (5, 6), 7}, [8, 9]), + {10: "foo", 11: "bar", (12, 13): "baz"}, + {(14, 15): "qwe", 16: "asd"}, + (17, (18, "19"), 20), +] + + +@pytest.mark.parametrize("result", [None, [], ["existing"], ["existing1", "existing2"]]) +@pytest.mark.parametrize("nested", [nested_depth, nested_mixed, nested_set]) +def test_flatten(nested, result): + if result is None: + val = flatten(nested, result) + assert len(val) == 20 + else: + _result = copy(result) # because pytest passes parameters as is + nexisting = len(_result) + val = flatten(nested, _result) + assert len(val) == len(_result) == 20 + nexisting + + assert issubclass(type(val), tuple) + + +def test_make_list_of_ints(): + mylist = [1, 2, 3.0, 42, -2] + assert make_list_of_ints(mylist) is mylist + assert make_list_of_ints(mylist) == mylist + assert isinstance(make_list_of_ints(mylist)[2], int) + pytest.raises(nx.NetworkXError, make_list_of_ints, [1, 2, 3, "kermit"]) + pytest.raises(nx.NetworkXError, make_list_of_ints, [1, 2, 3.1]) + + +def test_random_number_distribution(): + # smoke test only + z = powerlaw_sequence(20, exponent=2.5) + z = discrete_sequence(20, distribution=[0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 3]) + + +class TestNumpyArray: + @classmethod + def setup_class(cls): + global np + np = pytest.importorskip("numpy") + + def test_numpy_to_list_of_ints(self): + a = np.array([1, 2, 3], dtype=np.int64) + b = np.array([1.0, 2, 3]) + c = np.array([1.1, 2, 3]) + assert isinstance(make_list_of_ints(a), list) + assert make_list_of_ints(b) == list(b) + B = make_list_of_ints(b) + assert isinstance(B[0], int) + pytest.raises(nx.NetworkXError, make_list_of_ints, c) + + def test__dict_to_numpy_array1(self): + d = {"a": 1, "b": 2} + a = _dict_to_numpy_array1(d, mapping={"a": 0, "b": 1}) + np.testing.assert_allclose(a, np.array([1, 2])) + a = _dict_to_numpy_array1(d, mapping={"b": 0, "a": 1}) + np.testing.assert_allclose(a, np.array([2, 1])) + + a = _dict_to_numpy_array1(d) + np.testing.assert_allclose(a.sum(), 3) + + def test__dict_to_numpy_array2(self): + d = {"a": {"a": 1, "b": 2}, "b": {"a": 10, "b": 20}} + + mapping = {"a": 1, "b": 0} + a = _dict_to_numpy_array2(d, mapping=mapping) + np.testing.assert_allclose(a, np.array([[20, 10], [2, 1]])) + + a = _dict_to_numpy_array2(d) + np.testing.assert_allclose(a.sum(), 33) + + def test_dict_to_numpy_array_a(self): + d = {"a": {"a": 1, "b": 2}, "b": {"a": 10, "b": 20}} + + mapping = {"a": 0, "b": 1} + a = dict_to_numpy_array(d, mapping=mapping) + np.testing.assert_allclose(a, np.array([[1, 2], [10, 20]])) + + mapping = {"a": 1, "b": 0} + a = dict_to_numpy_array(d, mapping=mapping) + np.testing.assert_allclose(a, np.array([[20, 10], [2, 1]])) + + a = _dict_to_numpy_array2(d) + np.testing.assert_allclose(a.sum(), 33) + + def test_dict_to_numpy_array_b(self): + d = {"a": 1, "b": 2} + + mapping = {"a": 0, "b": 1} + a = dict_to_numpy_array(d, mapping=mapping) + np.testing.assert_allclose(a, np.array([1, 2])) + + a = _dict_to_numpy_array1(d) + np.testing.assert_allclose(a.sum(), 3) + + +def test_pairwise(): + nodes = range(4) + node_pairs = [(0, 1), (1, 2), (2, 3)] + node_pairs_cycle = node_pairs + [(3, 0)] + assert list(pairwise(nodes)) == node_pairs + assert list(pairwise(iter(nodes))) == node_pairs + assert list(pairwise(nodes, cyclic=True)) == node_pairs_cycle + empty_iter = iter(()) + assert list(pairwise(empty_iter)) == [] + empty_iter = iter(()) + assert list(pairwise(empty_iter, cyclic=True)) == [] + + +def test_groups(): + many_to_one = dict(zip("abcde", [0, 0, 1, 1, 2])) + actual = groups(many_to_one) + expected = {0: {"a", "b"}, 1: {"c", "d"}, 2: {"e"}} + assert actual == expected + assert {} == groups({}) + + +def test_create_random_state(): + np = pytest.importorskip("numpy") + rs = np.random.RandomState + + assert isinstance(create_random_state(1), rs) + assert isinstance(create_random_state(None), rs) + assert isinstance(create_random_state(np.random), rs) + assert isinstance(create_random_state(rs(1)), rs) + # Support for numpy.random.Generator + rng = np.random.default_rng() + assert isinstance(create_random_state(rng), np.random.Generator) + pytest.raises(ValueError, create_random_state, "a") + + assert np.all(rs(1).rand(10) == create_random_state(1).rand(10)) + + +def test_create_py_random_state(): + pyrs = random.Random + + assert isinstance(create_py_random_state(1), pyrs) + assert isinstance(create_py_random_state(None), pyrs) + assert isinstance(create_py_random_state(pyrs(1)), pyrs) + pytest.raises(ValueError, create_py_random_state, "a") + + np = pytest.importorskip("numpy") + + rs = np.random.RandomState + rng = np.random.default_rng(1000) + rng_explicit = np.random.Generator(np.random.SFC64()) + old_nprs = PythonRandomInterface + nprs = PythonRandomViaNumpyBits + assert isinstance(create_py_random_state(np.random), nprs) + assert isinstance(create_py_random_state(rs(1)), old_nprs) + assert isinstance(create_py_random_state(rng), nprs) + assert isinstance(create_py_random_state(rng_explicit), nprs) + # test default rng input + assert isinstance(PythonRandomInterface(), old_nprs) + assert isinstance(PythonRandomViaNumpyBits(), nprs) + + # VeryLargeIntegers Smoke test (they raise error for np.random) + int64max = 9223372036854775807 # from np.iinfo(np.int64).max + for r in (rng, rs(1)): + prs = create_py_random_state(r) + prs.randrange(3, int64max + 5) + prs.randint(3, int64max + 5) + + +def test_PythonRandomInterface_RandomState(): + np = pytest.importorskip("numpy") + + seed = 42 + rs = np.random.RandomState + rng = PythonRandomInterface(rs(seed)) + rs42 = rs(seed) + + # make sure these functions are same as expected outcome + assert rng.randrange(3, 5) == rs42.randint(3, 5) + assert rng.choice([1, 2, 3]) == rs42.choice([1, 2, 3]) + assert rng.gauss(0, 1) == rs42.normal(0, 1) + assert rng.expovariate(1.5) == rs42.exponential(1 / 1.5) + assert np.all(rng.shuffle([1, 2, 3]) == rs42.shuffle([1, 2, 3])) + assert np.all( + rng.sample([1, 2, 3], 2) == rs42.choice([1, 2, 3], (2,), replace=False) + ) + assert np.all( + [rng.randint(3, 5) for _ in range(100)] + == [rs42.randint(3, 6) for _ in range(100)] + ) + assert rng.random() == rs42.random_sample() + + +def test_PythonRandomInterface_Generator(): + np = pytest.importorskip("numpy") + + seed = 42 + rng = np.random.default_rng(seed) + pri = PythonRandomInterface(np.random.default_rng(seed)) + + # make sure these functions are same as expected outcome + assert pri.randrange(3, 5) == rng.integers(3, 5) + assert pri.choice([1, 2, 3]) == rng.choice([1, 2, 3]) + assert pri.gauss(0, 1) == rng.normal(0, 1) + assert pri.expovariate(1.5) == rng.exponential(1 / 1.5) + assert np.all(pri.shuffle([1, 2, 3]) == rng.shuffle([1, 2, 3])) + assert np.all( + pri.sample([1, 2, 3], 2) == rng.choice([1, 2, 3], (2,), replace=False) + ) + assert np.all( + [pri.randint(3, 5) for _ in range(100)] + == [rng.integers(3, 6) for _ in range(100)] + ) + assert pri.random() == rng.random() + + +@pytest.mark.parametrize( + ("iterable_type", "expected"), ((list, 1), (tuple, 1), (str, "["), (set, 1)) +) +def test_arbitrary_element(iterable_type, expected): + iterable = iterable_type([1, 2, 3]) + assert arbitrary_element(iterable) == expected + + +@pytest.mark.parametrize( + "iterator", + ((i for i in range(3)), iter([1, 2, 3])), # generator +) +def test_arbitrary_element_raises(iterator): + """Value error is raised when input is an iterator.""" + with pytest.raises(ValueError, match="from an iterator"): + arbitrary_element(iterator) diff --git a/.venv/lib/python3.12/site-packages/networkx/utils/tests/test_random_sequence.py b/.venv/lib/python3.12/site-packages/networkx/utils/tests/test_random_sequence.py new file mode 100644 index 0000000..1d1b957 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/utils/tests/test_random_sequence.py @@ -0,0 +1,38 @@ +import pytest + +from networkx.utils import ( + powerlaw_sequence, + random_weighted_sample, + weighted_choice, + zipf_rv, +) + + +def test_degree_sequences(): + seq = powerlaw_sequence(10, seed=1) + seq = powerlaw_sequence(10) + assert len(seq) == 10 + + +def test_zipf_rv(): + r = zipf_rv(2.3, xmin=2, seed=1) + r = zipf_rv(2.3, 2, 1) + r = zipf_rv(2.3) + assert type(r), int + pytest.raises(ValueError, zipf_rv, 0.5) + pytest.raises(ValueError, zipf_rv, 2, xmin=0) + + +def test_random_weighted_sample(): + mapping = {"a": 10, "b": 20} + s = random_weighted_sample(mapping, 2, seed=1) + s = random_weighted_sample(mapping, 2) + assert sorted(s) == sorted(mapping.keys()) + pytest.raises(ValueError, random_weighted_sample, mapping, 3) + + +def test_random_weighted_choice(): + mapping = {"a": 10, "b": 0} + c = weighted_choice(mapping, seed=1) + c = weighted_choice(mapping) + assert c == "a" diff --git a/.venv/lib/python3.12/site-packages/networkx/utils/tests/test_rcm.py b/.venv/lib/python3.12/site-packages/networkx/utils/tests/test_rcm.py new file mode 100644 index 0000000..88702b3 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/utils/tests/test_rcm.py @@ -0,0 +1,63 @@ +import networkx as nx +from networkx.utils import reverse_cuthill_mckee_ordering + + +def test_reverse_cuthill_mckee(): + # example graph from + # http://www.boost.org/doc/libs/1_37_0/libs/graph/example/cuthill_mckee_ordering.cpp + G = nx.Graph( + [ + (0, 3), + (0, 5), + (1, 2), + (1, 4), + (1, 6), + (1, 9), + (2, 3), + (2, 4), + (3, 5), + (3, 8), + (4, 6), + (5, 6), + (5, 7), + (6, 7), + ] + ) + rcm = list(reverse_cuthill_mckee_ordering(G)) + assert rcm in [[0, 8, 5, 7, 3, 6, 2, 4, 1, 9], [0, 8, 5, 7, 3, 6, 4, 2, 1, 9]] + + +def test_rcm_alternate_heuristic(): + # example from + G = nx.Graph( + [ + (0, 0), + (0, 4), + (1, 1), + (1, 2), + (1, 5), + (1, 7), + (2, 2), + (2, 4), + (3, 3), + (3, 6), + (4, 4), + (5, 5), + (5, 7), + (6, 6), + (7, 7), + ] + ) + + answers = [ + [6, 3, 5, 7, 1, 2, 4, 0], + [6, 3, 7, 5, 1, 2, 4, 0], + [7, 5, 1, 2, 4, 0, 6, 3], + ] + + def smallest_degree(G): + deg, node = min((d, n) for n, d in G.degree()) + return node + + rcm = list(reverse_cuthill_mckee_ordering(G, heuristic=smallest_degree)) + assert rcm in answers diff --git a/.venv/lib/python3.12/site-packages/networkx/utils/tests/test_unionfind.py b/.venv/lib/python3.12/site-packages/networkx/utils/tests/test_unionfind.py new file mode 100644 index 0000000..2d30580 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/utils/tests/test_unionfind.py @@ -0,0 +1,55 @@ +import networkx as nx + + +def test_unionfind(): + # Fixed by: 2cddd5958689bdecdcd89b91ac9aaf6ce0e4f6b8 + # Previously (in 2.x), the UnionFind class could handle mixed types. + # But in Python 3.x, this causes a TypeError such as: + # TypeError: unorderable types: str() > int() + # + # Now we just make sure that no exception is raised. + x = nx.utils.UnionFind() + x.union(0, "a") + + +def test_subtree_union(): + # See https://github.com/networkx/networkx/pull/3224 + # (35db1b551ee65780794a357794f521d8768d5049). + # Test if subtree unions hare handled correctly by to_sets(). + uf = nx.utils.UnionFind() + uf.union(1, 2) + uf.union(3, 4) + uf.union(4, 5) + uf.union(1, 5) + assert list(uf.to_sets()) == [{1, 2, 3, 4, 5}] + + +def test_unionfind_weights(): + # Tests if weights are computed correctly with unions of many elements + uf = nx.utils.UnionFind() + uf.union(1, 4, 7) + uf.union(2, 5, 8) + uf.union(3, 6, 9) + uf.union(1, 2, 3, 4, 5, 6, 7, 8, 9) + assert uf.weights[uf[1]] == 9 + + +def test_unbalanced_merge_weights(): + # Tests if the largest set's root is used as the new root when merging + uf = nx.utils.UnionFind() + uf.union(1, 2, 3) + uf.union(4, 5, 6, 7, 8, 9) + assert uf.weights[uf[1]] == 3 + assert uf.weights[uf[4]] == 6 + largest_root = uf[4] + uf.union(1, 4) + assert uf[1] == largest_root + assert uf.weights[largest_root] == 9 + + +def test_empty_union(): + # Tests if a null-union does nothing. + uf = nx.utils.UnionFind((0, 1)) + uf.union() + assert uf[0] == 0 + assert uf[1] == 1 diff --git a/.venv/lib/python3.12/site-packages/networkx/utils/union_find.py b/.venv/lib/python3.12/site-packages/networkx/utils/union_find.py new file mode 100644 index 0000000..2a07129 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/utils/union_find.py @@ -0,0 +1,106 @@ +""" +Union-find data structure. +""" + +from networkx.utils import groups + + +class UnionFind: + """Union-find data structure. + + Each unionFind instance X maintains a family of disjoint sets of + hashable objects, supporting the following two methods: + + - X[item] returns a name for the set containing the given item. + Each set is named by an arbitrarily-chosen one of its members; as + long as the set remains unchanged it will keep the same name. If + the item is not yet part of a set in X, a new singleton set is + created for it. + + - X.union(item1, item2, ...) merges the sets containing each item + into a single larger set. If any item is not yet part of a set + in X, it is added to X as one of the members of the merged set. + + Union-find data structure. Based on Josiah Carlson's code, + https://code.activestate.com/recipes/215912/ + with significant additional changes by D. Eppstein. + http://www.ics.uci.edu/~eppstein/PADS/UnionFind.py + + """ + + def __init__(self, elements=None): + """Create a new empty union-find structure. + + If *elements* is an iterable, this structure will be initialized + with the discrete partition on the given set of elements. + + """ + if elements is None: + elements = () + self.parents = {} + self.weights = {} + for x in elements: + self.weights[x] = 1 + self.parents[x] = x + + def __getitem__(self, object): + """Find and return the name of the set containing the object.""" + + # check for previously unknown object + if object not in self.parents: + self.parents[object] = object + self.weights[object] = 1 + return object + + # find path of objects leading to the root + path = [] + root = self.parents[object] + while root != object: + path.append(object) + object = root + root = self.parents[object] + + # compress the path and return + for ancestor in path: + self.parents[ancestor] = root + return root + + def __iter__(self): + """Iterate through all items ever found or unioned by this structure.""" + return iter(self.parents) + + def to_sets(self): + """Iterates over the sets stored in this structure. + + For example:: + + >>> partition = UnionFind("xyz") + >>> sorted(map(sorted, partition.to_sets())) + [['x'], ['y'], ['z']] + >>> partition.union("x", "y") + >>> sorted(map(sorted, partition.to_sets())) + [['x', 'y'], ['z']] + + """ + # Ensure fully pruned paths + for x in self.parents: + _ = self[x] # Evaluated for side-effect only + + yield from groups(self.parents).values() + + def union(self, *objects): + """Find the sets containing the objects and merge them all.""" + # Find the heaviest root according to its weight. + roots = iter( + sorted( + {self[x] for x in objects}, key=lambda r: self.weights[r], reverse=True + ) + ) + try: + root = next(roots) + except StopIteration: + return + + for r in roots: + self.weights[root] += self.weights[r] + self.parents[r] = root diff --git a/.venv/lib/python3.12/site-packages/numpy-2.3.2.dist-info/INSTALLER b/.venv/lib/python3.12/site-packages/numpy-2.3.2.dist-info/INSTALLER new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy-2.3.2.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/.venv/lib/python3.12/site-packages/numpy-2.3.2.dist-info/LICENSE.txt b/.venv/lib/python3.12/site-packages/numpy-2.3.2.dist-info/LICENSE.txt new file mode 100644 index 0000000..284458b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy-2.3.2.dist-info/LICENSE.txt @@ -0,0 +1,971 @@ +Copyright (c) 2005-2025, NumPy Developers. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + + * Neither the name of the NumPy Developers nor the names of any + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +---- + +The NumPy repository and source distributions bundle several libraries that are +compatibly licensed. We list these here. + +Name: lapack-lite +Files: numpy/linalg/lapack_lite/* +License: BSD-3-Clause + For details, see numpy/linalg/lapack_lite/LICENSE.txt + +Name: dragon4 +Files: numpy/_core/src/multiarray/dragon4.c +License: MIT + For license text, see numpy/_core/src/multiarray/dragon4.c + +Name: libdivide +Files: numpy/_core/include/numpy/libdivide/* +License: Zlib + For license text, see numpy/_core/include/numpy/libdivide/LICENSE.txt + + +Note that the following files are vendored in the repository and sdist but not +installed in built numpy packages: + +Name: Meson +Files: vendored-meson/meson/* +License: Apache 2.0 + For license text, see vendored-meson/meson/COPYING + +Name: spin +Files: .spin/cmds.py +License: BSD-3 + For license text, see .spin/LICENSE + +Name: tempita +Files: numpy/_build_utils/tempita/* +License: MIT + For details, see numpy/_build_utils/tempita/LICENCE.txt + +---- + +This binary distribution of NumPy also bundles the following software: + + +Name: OpenBLAS +Files: numpy.libs/libscipy_openblas*.so +Description: bundled as a dynamically linked library +Availability: https://github.com/OpenMathLib/OpenBLAS/ +License: BSD-3-Clause + Copyright (c) 2011-2014, The OpenBLAS Project + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + 3. Neither the name of the OpenBLAS project nor the names of + its contributors may be used to endorse or promote products + derived from this software without specific prior written + permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +Name: LAPACK +Files: numpy.libs/libscipy_openblas*.so +Description: bundled in OpenBLAS +Availability: https://github.com/OpenMathLib/OpenBLAS/ +License: BSD-3-Clause-Open-MPI + Copyright (c) 1992-2013 The University of Tennessee and The University + of Tennessee Research Foundation. All rights + reserved. + Copyright (c) 2000-2013 The University of California Berkeley. All + rights reserved. + Copyright (c) 2006-2013 The University of Colorado Denver. All rights + reserved. + + $COPYRIGHT$ + + Additional copyrights may follow + + $HEADER$ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + - Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + - Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer listed + in this license in the documentation and/or other materials + provided with the distribution. + + - Neither the name of the copyright holders nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + The copyright holders provide no reassurances that the source code + provided does not infringe any patent, copyright, or any other + intellectual property rights of third parties. The copyright holders + disclaim any liability to any recipient for claims brought against + recipient by any third party for infringement of that parties + intellectual property rights. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +Name: GCC runtime library +Files: numpy.libs/libgfortran*.so +Description: dynamically linked to files compiled with gcc +Availability: https://gcc.gnu.org/git/?p=gcc.git;a=tree;f=libgfortran +License: GPL-3.0-or-later WITH GCC-exception-3.1 + Copyright (C) 2002-2017 Free Software Foundation, Inc. + + Libgfortran is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + Libgfortran is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . + +---- + +Full text of license texts referred to above follows (that they are +listed below does not necessarily imply the conditions apply to the +present binary release): + +---- + +GCC RUNTIME LIBRARY EXCEPTION + +Version 3.1, 31 March 2009 + +Copyright (C) 2009 Free Software Foundation, Inc. + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + +This GCC Runtime Library Exception ("Exception") is an additional +permission under section 7 of the GNU General Public License, version +3 ("GPLv3"). It applies to a given file (the "Runtime Library") that +bears a notice placed by the copyright holder of the file stating that +the file is governed by GPLv3 along with this Exception. + +When you use GCC to compile a program, GCC may combine portions of +certain GCC header files and runtime libraries with the compiled +program. The purpose of this Exception is to allow compilation of +non-GPL (including proprietary) programs to use, in this way, the +header files and runtime libraries covered by this Exception. + +0. Definitions. + +A file is an "Independent Module" if it either requires the Runtime +Library for execution after a Compilation Process, or makes use of an +interface provided by the Runtime Library, but is not otherwise based +on the Runtime Library. + +"GCC" means a version of the GNU Compiler Collection, with or without +modifications, governed by version 3 (or a specified later version) of +the GNU General Public License (GPL) with the option of using any +subsequent versions published by the FSF. + +"GPL-compatible Software" is software whose conditions of propagation, +modification and use would permit combination with GCC in accord with +the license of GCC. + +"Target Code" refers to output from any compiler for a real or virtual +target processor architecture, in executable form or suitable for +input to an assembler, loader, linker and/or execution +phase. Notwithstanding that, Target Code does not include data in any +format that is used as a compiler intermediate representation, or used +for producing a compiler intermediate representation. + +The "Compilation Process" transforms code entirely represented in +non-intermediate languages designed for human-written code, and/or in +Java Virtual Machine byte code, into Target Code. Thus, for example, +use of source code generators and preprocessors need not be considered +part of the Compilation Process, since the Compilation Process can be +understood as starting with the output of the generators or +preprocessors. + +A Compilation Process is "Eligible" if it is done using GCC, alone or +with other GPL-compatible software, or if it is done without using any +work based on GCC. For example, using non-GPL-compatible Software to +optimize any GCC intermediate representations would not qualify as an +Eligible Compilation Process. + +1. Grant of Additional Permission. + +You have permission to propagate a work of Target Code formed by +combining the Runtime Library with Independent Modules, even if such +propagation would otherwise violate the terms of GPLv3, provided that +all Target Code was generated by Eligible Compilation Processes. You +may then convey such a combination under terms of your choice, +consistent with the licensing of the Independent Modules. + +2. No Weakening of GCC Copyleft. + +The availability of this Exception does not imply any general +presumption that third-party software is unaffected by the copyleft +requirements of the license of GCC. + +---- + + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. + +Name: libquadmath +Files: numpy.libs/libquadmath*.so +Description: dynamically linked to files compiled with gcc +Availability: https://gcc.gnu.org/git/?p=gcc.git;a=tree;f=libquadmath +License: LGPL-2.1-or-later + + GCC Quad-Precision Math Library + Copyright (C) 2010-2019 Free Software Foundation, Inc. + Written by Francois-Xavier Coudert + + This file is part of the libquadmath library. + Libquadmath is free software; you can redistribute it and/or + modify it under the terms of the GNU Library General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + Libquadmath is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + https://www.gnu.org/licenses/old-licenses/lgpl-2.1.html diff --git a/.venv/lib/python3.12/site-packages/numpy-2.3.2.dist-info/METADATA b/.venv/lib/python3.12/site-packages/numpy-2.3.2.dist-info/METADATA new file mode 100644 index 0000000..8c894f1 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy-2.3.2.dist-info/METADATA @@ -0,0 +1,1093 @@ +Metadata-Version: 2.1 +Name: numpy +Version: 2.3.2 +Summary: Fundamental package for array computing in Python +Author: Travis E. Oliphant et al. +Maintainer-Email: NumPy Developers +License: Copyright (c) 2005-2025, NumPy Developers. + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + + * Neither the name of the NumPy Developers nor the names of any + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + ---- + + The NumPy repository and source distributions bundle several libraries that are + compatibly licensed. We list these here. + + Name: lapack-lite + Files: numpy/linalg/lapack_lite/* + License: BSD-3-Clause + For details, see numpy/linalg/lapack_lite/LICENSE.txt + + Name: dragon4 + Files: numpy/_core/src/multiarray/dragon4.c + License: MIT + For license text, see numpy/_core/src/multiarray/dragon4.c + + Name: libdivide + Files: numpy/_core/include/numpy/libdivide/* + License: Zlib + For license text, see numpy/_core/include/numpy/libdivide/LICENSE.txt + + + Note that the following files are vendored in the repository and sdist but not + installed in built numpy packages: + + Name: Meson + Files: vendored-meson/meson/* + License: Apache 2.0 + For license text, see vendored-meson/meson/COPYING + + Name: spin + Files: .spin/cmds.py + License: BSD-3 + For license text, see .spin/LICENSE + + Name: tempita + Files: numpy/_build_utils/tempita/* + License: MIT + For details, see numpy/_build_utils/tempita/LICENCE.txt + + ---- + + This binary distribution of NumPy also bundles the following software: + + + Name: OpenBLAS + Files: numpy.libs/libscipy_openblas*.so + Description: bundled as a dynamically linked library + Availability: https://github.com/OpenMathLib/OpenBLAS/ + License: BSD-3-Clause + Copyright (c) 2011-2014, The OpenBLAS Project + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + 3. Neither the name of the OpenBLAS project nor the names of + its contributors may be used to endorse or promote products + derived from this software without specific prior written + permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + + Name: LAPACK + Files: numpy.libs/libscipy_openblas*.so + Description: bundled in OpenBLAS + Availability: https://github.com/OpenMathLib/OpenBLAS/ + License: BSD-3-Clause-Open-MPI + Copyright (c) 1992-2013 The University of Tennessee and The University + of Tennessee Research Foundation. All rights + reserved. + Copyright (c) 2000-2013 The University of California Berkeley. All + rights reserved. + Copyright (c) 2006-2013 The University of Colorado Denver. All rights + reserved. + + $COPYRIGHT$ + + Additional copyrights may follow + + $HEADER$ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + - Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + - Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer listed + in this license in the documentation and/or other materials + provided with the distribution. + + - Neither the name of the copyright holders nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + The copyright holders provide no reassurances that the source code + provided does not infringe any patent, copyright, or any other + intellectual property rights of third parties. The copyright holders + disclaim any liability to any recipient for claims brought against + recipient by any third party for infringement of that parties + intellectual property rights. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + + Name: GCC runtime library + Files: numpy.libs/libgfortran*.so + Description: dynamically linked to files compiled with gcc + Availability: https://gcc.gnu.org/git/?p=gcc.git;a=tree;f=libgfortran + License: GPL-3.0-or-later WITH GCC-exception-3.1 + Copyright (C) 2002-2017 Free Software Foundation, Inc. + + Libgfortran is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + Libgfortran is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . + + ---- + + Full text of license texts referred to above follows (that they are + listed below does not necessarily imply the conditions apply to the + present binary release): + + ---- + + GCC RUNTIME LIBRARY EXCEPTION + + Version 3.1, 31 March 2009 + + Copyright (C) 2009 Free Software Foundation, Inc. + + Everyone is permitted to copy and distribute verbatim copies of this + license document, but changing it is not allowed. + + This GCC Runtime Library Exception ("Exception") is an additional + permission under section 7 of the GNU General Public License, version + 3 ("GPLv3"). It applies to a given file (the "Runtime Library") that + bears a notice placed by the copyright holder of the file stating that + the file is governed by GPLv3 along with this Exception. + + When you use GCC to compile a program, GCC may combine portions of + certain GCC header files and runtime libraries with the compiled + program. The purpose of this Exception is to allow compilation of + non-GPL (including proprietary) programs to use, in this way, the + header files and runtime libraries covered by this Exception. + + 0. Definitions. + + A file is an "Independent Module" if it either requires the Runtime + Library for execution after a Compilation Process, or makes use of an + interface provided by the Runtime Library, but is not otherwise based + on the Runtime Library. + + "GCC" means a version of the GNU Compiler Collection, with or without + modifications, governed by version 3 (or a specified later version) of + the GNU General Public License (GPL) with the option of using any + subsequent versions published by the FSF. + + "GPL-compatible Software" is software whose conditions of propagation, + modification and use would permit combination with GCC in accord with + the license of GCC. + + "Target Code" refers to output from any compiler for a real or virtual + target processor architecture, in executable form or suitable for + input to an assembler, loader, linker and/or execution + phase. Notwithstanding that, Target Code does not include data in any + format that is used as a compiler intermediate representation, or used + for producing a compiler intermediate representation. + + The "Compilation Process" transforms code entirely represented in + non-intermediate languages designed for human-written code, and/or in + Java Virtual Machine byte code, into Target Code. Thus, for example, + use of source code generators and preprocessors need not be considered + part of the Compilation Process, since the Compilation Process can be + understood as starting with the output of the generators or + preprocessors. + + A Compilation Process is "Eligible" if it is done using GCC, alone or + with other GPL-compatible software, or if it is done without using any + work based on GCC. For example, using non-GPL-compatible Software to + optimize any GCC intermediate representations would not qualify as an + Eligible Compilation Process. + + 1. Grant of Additional Permission. + + You have permission to propagate a work of Target Code formed by + combining the Runtime Library with Independent Modules, even if such + propagation would otherwise violate the terms of GPLv3, provided that + all Target Code was generated by Eligible Compilation Processes. You + may then convey such a combination under terms of your choice, + consistent with the licensing of the Independent Modules. + + 2. No Weakening of GCC Copyleft. + + The availability of this Exception does not imply any general + presumption that third-party software is unaffected by the copyleft + requirements of the license of GCC. + + ---- + + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for + software and other kinds of works. + + The licenses for most software and other practical works are designed + to take away your freedom to share and change the works. By contrast, + the GNU General Public License is intended to guarantee your freedom to + share and change all versions of a program--to make sure it remains free + software for all its users. We, the Free Software Foundation, use the + GNU General Public License for most of our software; it applies also to + any other work released this way by its authors. You can apply it to + your programs, too. + + When we speak of free software, we are referring to freedom, not + price. Our General Public Licenses are designed to make sure that you + have the freedom to distribute copies of free software (and charge for + them if you wish), that you receive source code or can get it if you + want it, that you can change the software or use pieces of it in new + free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you + these rights or asking you to surrender the rights. Therefore, you have + certain responsibilities if you distribute copies of the software, or if + you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether + gratis or for a fee, you must pass on to the recipients the same + freedoms that you received. You must make sure that they, too, receive + or can get the source code. And you must show them these terms so they + know their rights. + + Developers that use the GNU GPL protect your rights with two steps: + (1) assert copyright on the software, and (2) offer you this License + giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains + that there is no warranty for this free software. For both users' and + authors' sake, the GPL requires that modified versions be marked as + changed, so that their problems will not be attributed erroneously to + authors of previous versions. + + Some devices are designed to deny users access to install or run + modified versions of the software inside them, although the manufacturer + can do so. This is fundamentally incompatible with the aim of + protecting users' freedom to change the software. The systematic + pattern of such abuse occurs in the area of products for individuals to + use, which is precisely where it is most unacceptable. Therefore, we + have designed this version of the GPL to prohibit the practice for those + products. If such problems arise substantially in other domains, we + stand ready to extend this provision to those domains in future versions + of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. + States should not allow patents to restrict development and use of + software on general-purpose computers, but in those that do, we wish to + avoid the special danger that patents applied to a free program could + make it effectively proprietary. To prevent this, the GPL assures that + patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and + modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of + works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this + License. Each licensee is addressed as "you". "Licensees" and + "recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work + in a fashion requiring copyright permission, other than the making of an + exact copy. The resulting work is called a "modified version" of the + earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based + on the Program. + + To "propagate" a work means to do anything with it that, without + permission, would make you directly or secondarily liable for + infringement under applicable copyright law, except executing it on a + computer or modifying a private copy. Propagation includes copying, + distribution (with or without modification), making available to the + public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other + parties to make or receive copies. Mere interaction with a user through + a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" + to the extent that it includes a convenient and prominently visible + feature that (1) displays an appropriate copyright notice, and (2) + tells the user that there is no warranty for the work (except to the + extent that warranties are provided), that licensees may convey the + work under this License, and how to view a copy of this License. If + the interface presents a list of user commands or options, such as a + menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work + for making modifications to it. "Object code" means any non-source + form of a work. + + A "Standard Interface" means an interface that either is an official + standard defined by a recognized standards body, or, in the case of + interfaces specified for a particular programming language, one that + is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other + than the work as a whole, that (a) is included in the normal form of + packaging a Major Component, but which is not part of that Major + Component, and (b) serves only to enable use of the work with that + Major Component, or to implement a Standard Interface for which an + implementation is available to the public in source code form. A + "Major Component", in this context, means a major essential component + (kernel, window system, and so on) of the specific operating system + (if any) on which the executable work runs, or a compiler used to + produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all + the source code needed to generate, install, and (for an executable + work) run the object code and to modify the work, including scripts to + control those activities. However, it does not include the work's + System Libraries, or general-purpose tools or generally available free + programs which are used unmodified in performing those activities but + which are not part of the work. For example, Corresponding Source + includes interface definition files associated with source files for + the work, and the source code for shared libraries and dynamically + linked subprograms that the work is specifically designed to require, + such as by intimate data communication or control flow between those + subprograms and other parts of the work. + + The Corresponding Source need not include anything that users + can regenerate automatically from other parts of the Corresponding + Source. + + The Corresponding Source for a work in source code form is that + same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of + copyright on the Program, and are irrevocable provided the stated + conditions are met. This License explicitly affirms your unlimited + permission to run the unmodified Program. The output from running a + covered work is covered by this License only if the output, given its + content, constitutes a covered work. This License acknowledges your + rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not + convey, without conditions so long as your license otherwise remains + in force. You may convey covered works to others for the sole purpose + of having them make modifications exclusively for you, or provide you + with facilities for running those works, provided that you comply with + the terms of this License in conveying all material for which you do + not control copyright. Those thus making or running the covered works + for you must do so exclusively on your behalf, under your direction + and control, on terms that prohibit them from making any copies of + your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under + the conditions stated below. Sublicensing is not allowed; section 10 + makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological + measure under any applicable law fulfilling obligations under article + 11 of the WIPO copyright treaty adopted on 20 December 1996, or + similar laws prohibiting or restricting circumvention of such + measures. + + When you convey a covered work, you waive any legal power to forbid + circumvention of technological measures to the extent such circumvention + is effected by exercising rights under this License with respect to + the covered work, and you disclaim any intention to limit operation or + modification of the work as a means of enforcing, against the work's + users, your or third parties' legal rights to forbid circumvention of + technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you + receive it, in any medium, provided that you conspicuously and + appropriately publish on each copy an appropriate copyright notice; + keep intact all notices stating that this License and any + non-permissive terms added in accord with section 7 apply to the code; + keep intact all notices of the absence of any warranty; and give all + recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, + and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to + produce it from the Program, in the form of source code under the + terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent + works, which are not by their nature extensions of the covered work, + and which are not combined with it such as to form a larger program, + in or on a volume of a storage or distribution medium, is called an + "aggregate" if the compilation and its resulting copyright are not + used to limit the access or legal rights of the compilation's users + beyond what the individual works permit. Inclusion of a covered work + in an aggregate does not cause this License to apply to the other + parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms + of sections 4 and 5, provided that you also convey the + machine-readable Corresponding Source under the terms of this License, + in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded + from the Corresponding Source as a System Library, need not be + included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any + tangible personal property which is normally used for personal, family, + or household purposes, or (2) anything designed or sold for incorporation + into a dwelling. In determining whether a product is a consumer product, + doubtful cases shall be resolved in favor of coverage. For a particular + product received by a particular user, "normally used" refers to a + typical or common use of that class of product, regardless of the status + of the particular user or of the way in which the particular user + actually uses, or expects or is expected to use, the product. A product + is a consumer product regardless of whether the product has substantial + commercial, industrial or non-consumer uses, unless such uses represent + the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, + procedures, authorization keys, or other information required to install + and execute modified versions of a covered work in that User Product from + a modified version of its Corresponding Source. The information must + suffice to ensure that the continued functioning of the modified object + code is in no case prevented or interfered with solely because + modification has been made. + + If you convey an object code work under this section in, or with, or + specifically for use in, a User Product, and the conveying occurs as + part of a transaction in which the right of possession and use of the + User Product is transferred to the recipient in perpetuity or for a + fixed term (regardless of how the transaction is characterized), the + Corresponding Source conveyed under this section must be accompanied + by the Installation Information. But this requirement does not apply + if neither you nor any third party retains the ability to install + modified object code on the User Product (for example, the work has + been installed in ROM). + + The requirement to provide Installation Information does not include a + requirement to continue to provide support service, warranty, or updates + for a work that has been modified or installed by the recipient, or for + the User Product in which it has been modified or installed. Access to a + network may be denied when the modification itself materially and + adversely affects the operation of the network or violates the rules and + protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, + in accord with this section must be in a format that is publicly + documented (and with an implementation available to the public in + source code form), and must require no special password or key for + unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this + License by making exceptions from one or more of its conditions. + Additional permissions that are applicable to the entire Program shall + be treated as though they were included in this License, to the extent + that they are valid under applicable law. If additional permissions + apply only to part of the Program, that part may be used separately + under those permissions, but the entire Program remains governed by + this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option + remove any additional permissions from that copy, or from any part of + it. (Additional permissions may be written to require their own + removal in certain cases when you modify the work.) You may place + additional permissions on material, added by you to a covered work, + for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you + add to a covered work, you may (if authorized by the copyright holders of + that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further + restrictions" within the meaning of section 10. If the Program as you + received it, or any part of it, contains a notice stating that it is + governed by this License along with a term that is a further + restriction, you may remove that term. If a license document contains + a further restriction but permits relicensing or conveying under this + License, you may add to a covered work material governed by the terms + of that license document, provided that the further restriction does + not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you + must place, in the relevant source files, a statement of the + additional terms that apply to those files, or a notice indicating + where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the + form of a separately written license, or stated as exceptions; + the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly + provided under this License. Any attempt otherwise to propagate or + modify it is void, and will automatically terminate your rights under + this License (including any patent licenses granted under the third + paragraph of section 11). + + However, if you cease all violation of this License, then your + license from a particular copyright holder is reinstated (a) + provisionally, unless and until the copyright holder explicitly and + finally terminates your license, and (b) permanently, if the copyright + holder fails to notify you of the violation by some reasonable means + prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is + reinstated permanently if the copyright holder notifies you of the + violation by some reasonable means, this is the first time you have + received notice of violation of this License (for any work) from that + copyright holder, and you cure the violation prior to 30 days after + your receipt of the notice. + + Termination of your rights under this section does not terminate the + licenses of parties who have received copies or rights from you under + this License. If your rights have been terminated and not permanently + reinstated, you do not qualify to receive new licenses for the same + material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or + run a copy of the Program. Ancillary propagation of a covered work + occurring solely as a consequence of using peer-to-peer transmission + to receive a copy likewise does not require acceptance. However, + nothing other than this License grants you permission to propagate or + modify any covered work. These actions infringe copyright if you do + not accept this License. Therefore, by modifying or propagating a + covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically + receives a license from the original licensors, to run, modify and + propagate that work, subject to this License. You are not responsible + for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an + organization, or substantially all assets of one, or subdividing an + organization, or merging organizations. If propagation of a covered + work results from an entity transaction, each party to that + transaction who receives a copy of the work also receives whatever + licenses to the work the party's predecessor in interest had or could + give under the previous paragraph, plus a right to possession of the + Corresponding Source of the work from the predecessor in interest, if + the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the + rights granted or affirmed under this License. For example, you may + not impose a license fee, royalty, or other charge for exercise of + rights granted under this License, and you may not initiate litigation + (including a cross-claim or counterclaim in a lawsuit) alleging that + any patent claim is infringed by making, using, selling, offering for + sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this + License of the Program or a work on which the Program is based. The + work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims + owned or controlled by the contributor, whether already acquired or + hereafter acquired, that would be infringed by some manner, permitted + by this License, of making, using, or selling its contributor version, + but do not include claims that would be infringed only as a + consequence of further modification of the contributor version. For + purposes of this definition, "control" includes the right to grant + patent sublicenses in a manner consistent with the requirements of + this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free + patent license under the contributor's essential patent claims, to + make, use, sell, offer for sale, import and otherwise run, modify and + propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express + agreement or commitment, however denominated, not to enforce a patent + (such as an express permission to practice a patent or covenant not to + sue for patent infringement). To "grant" such a patent license to a + party means to make such an agreement or commitment not to enforce a + patent against the party. + + If you convey a covered work, knowingly relying on a patent license, + and the Corresponding Source of the work is not available for anyone + to copy, free of charge and under the terms of this License, through a + publicly available network server or other readily accessible means, + then you must either (1) cause the Corresponding Source to be so + available, or (2) arrange to deprive yourself of the benefit of the + patent license for this particular work, or (3) arrange, in a manner + consistent with the requirements of this License, to extend the patent + license to downstream recipients. "Knowingly relying" means you have + actual knowledge that, but for the patent license, your conveying the + covered work in a country, or your recipient's use of the covered work + in a country, would infringe one or more identifiable patents in that + country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or + arrangement, you convey, or propagate by procuring conveyance of, a + covered work, and grant a patent license to some of the parties + receiving the covered work authorizing them to use, propagate, modify + or convey a specific copy of the covered work, then the patent license + you grant is automatically extended to all recipients of the covered + work and works based on it. + + A patent license is "discriminatory" if it does not include within + the scope of its coverage, prohibits the exercise of, or is + conditioned on the non-exercise of one or more of the rights that are + specifically granted under this License. You may not convey a covered + work if you are a party to an arrangement with a third party that is + in the business of distributing software, under which you make payment + to the third party based on the extent of your activity of conveying + the work, and under which the third party grants, to any of the + parties who would receive the covered work from you, a discriminatory + patent license (a) in connection with copies of the covered work + conveyed by you (or copies made from those copies), or (b) primarily + for and in connection with specific products or compilations that + contain the covered work, unless you entered into that arrangement, + or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting + any implied license or other defenses to infringement that may + otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or + otherwise) that contradict the conditions of this License, they do not + excuse you from the conditions of this License. If you cannot convey a + covered work so as to satisfy simultaneously your obligations under this + License and any other pertinent obligations, then as a consequence you may + not convey it at all. For example, if you agree to terms that obligate you + to collect a royalty for further conveying from those to whom you convey + the Program, the only way you could satisfy both those terms and this + License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have + permission to link or combine any covered work with a work licensed + under version 3 of the GNU Affero General Public License into a single + combined work, and to convey the resulting work. The terms of this + License will continue to apply to the part which is the covered work, + but the special requirements of the GNU Affero General Public License, + section 13, concerning interaction through a network will apply to the + combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of + the GNU General Public License from time to time. Such new versions will + be similar in spirit to the present version, but may differ in detail to + address new problems or concerns. + + Each version is given a distinguishing version number. If the + Program specifies that a certain numbered version of the GNU General + Public License "or any later version" applies to it, you have the + option of following the terms and conditions either of that numbered + version or of any later version published by the Free Software + Foundation. If the Program does not specify a version number of the + GNU General Public License, you may choose any version ever published + by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future + versions of the GNU General Public License can be used, that proxy's + public statement of acceptance of a version permanently authorizes you + to choose that version for the Program. + + Later license versions may give you additional or different + permissions. However, no additional obligations are imposed on any + author or copyright holder as a result of your choosing to follow a + later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY + APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT + HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY + OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, + THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM + IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF + ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING + WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS + THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY + GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE + USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF + DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD + PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), + EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF + SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided + above cannot be given local legal effect according to their terms, + reviewing courts shall apply local law that most closely approximates + an absolute waiver of all civil liability in connection with the + Program, unless a warranty or assumption of liability accompanies a + copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest + possible use to the public, the best way to achieve this is to make it + free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest + to attach them to the start of each source file to most effectively + state the exclusion of warranty; and each file should have at least + the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + + Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short + notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + + The hypothetical commands `show w' and `show c' should show the appropriate + parts of the General Public License. Of course, your program's commands + might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, + if any, to sign a "copyright disclaimer" for the program, if necessary. + For more information on this, and how to apply and follow the GNU GPL, see + . + + The GNU General Public License does not permit incorporating your program + into proprietary programs. If your program is a subroutine library, you + may consider it more useful to permit linking proprietary applications with + the library. If this is what you want to do, use the GNU Lesser General + Public License instead of this License. But first, please read + . + + Name: libquadmath + Files: numpy.libs/libquadmath*.so + Description: dynamically linked to files compiled with gcc + Availability: https://gcc.gnu.org/git/?p=gcc.git;a=tree;f=libquadmath + License: LGPL-2.1-or-later + + GCC Quad-Precision Math Library + Copyright (C) 2010-2019 Free Software Foundation, Inc. + Written by Francois-Xavier Coudert + + This file is part of the libquadmath library. + Libquadmath is free software; you can redistribute it and/or + modify it under the terms of the GNU Library General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + Libquadmath is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + https://www.gnu.org/licenses/old-licenses/lgpl-2.1.html + +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Science/Research +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: BSD License +Classifier: Programming Language :: C +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: 3.13 +Classifier: Programming Language :: Python :: 3.14 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Topic :: Software Development +Classifier: Topic :: Scientific/Engineering +Classifier: Typing :: Typed +Classifier: Operating System :: Microsoft :: Windows +Classifier: Operating System :: POSIX +Classifier: Operating System :: Unix +Classifier: Operating System :: MacOS +Project-URL: homepage, https://numpy.org +Project-URL: documentation, https://numpy.org/doc/ +Project-URL: source, https://github.com/numpy/numpy +Project-URL: download, https://pypi.org/project/numpy/#files +Project-URL: tracker, https://github.com/numpy/numpy/issues +Project-URL: release notes, https://numpy.org/doc/stable/release +Requires-Python: >=3.11 +Description-Content-Type: text/markdown + +

+ +


+ + +[![Powered by NumFOCUS](https://img.shields.io/badge/powered%20by-NumFOCUS-orange.svg?style=flat&colorA=E1523D&colorB=007D8A)]( +https://numfocus.org) +[![PyPI Downloads](https://img.shields.io/pypi/dm/numpy.svg?label=PyPI%20downloads)]( +https://pypi.org/project/numpy/) +[![Conda Downloads](https://img.shields.io/conda/dn/conda-forge/numpy.svg?label=Conda%20downloads)]( +https://anaconda.org/conda-forge/numpy) +[![Stack Overflow](https://img.shields.io/badge/stackoverflow-Ask%20questions-blue.svg)]( +https://stackoverflow.com/questions/tagged/numpy) +[![Nature Paper](https://img.shields.io/badge/DOI-10.1038%2Fs41586--020--2649--2-blue)]( +https://doi.org/10.1038/s41586-020-2649-2) +[![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/numpy/numpy/badge)](https://securityscorecards.dev/viewer/?uri=github.com/numpy/numpy) +[![Typing](https://img.shields.io/pypi/types/numpy)](https://pypi.org/project/numpy/) + + +NumPy is the fundamental package for scientific computing with Python. + +- **Website:** https://numpy.org +- **Documentation:** https://numpy.org/doc +- **Mailing list:** https://mail.python.org/mailman/listinfo/numpy-discussion +- **Source code:** https://github.com/numpy/numpy +- **Contributing:** https://numpy.org/devdocs/dev/index.html +- **Bug reports:** https://github.com/numpy/numpy/issues +- **Report a security vulnerability:** https://tidelift.com/docs/security + +It provides: + +- a powerful N-dimensional array object +- sophisticated (broadcasting) functions +- tools for integrating C/C++ and Fortran code +- useful linear algebra, Fourier transform, and random number capabilities + +Testing: + +NumPy requires `pytest` and `hypothesis`. Tests can then be run after installation with: + + python -c "import numpy, sys; sys.exit(numpy.test() is False)" + +Code of Conduct +---------------------- + +NumPy is a community-driven open source project developed by a diverse group of +[contributors](https://numpy.org/teams/). The NumPy leadership has made a strong +commitment to creating an open, inclusive, and positive community. Please read the +[NumPy Code of Conduct](https://numpy.org/code-of-conduct/) for guidance on how to interact +with others in a way that makes our community thrive. + +Call for Contributions +---------------------- + +The NumPy project welcomes your expertise and enthusiasm! + +Small improvements or fixes are always appreciated. If you are considering larger contributions +to the source code, please contact us through the [mailing +list](https://mail.python.org/mailman/listinfo/numpy-discussion) first. + +Writing code isn’t the only way to contribute to NumPy. You can also: +- review pull requests +- help us stay on top of new and old issues +- develop tutorials, presentations, and other educational materials +- maintain and improve [our website](https://github.com/numpy/numpy.org) +- develop graphic design for our brand assets and promotional materials +- translate website content +- help with outreach and onboard new contributors +- write grant proposals and help with other fundraising efforts + +For more information about the ways you can contribute to NumPy, visit [our website](https://numpy.org/contribute/). +If you’re unsure where to start or how your skills fit in, reach out! You can +ask on the mailing list or here, on GitHub, by opening a new issue or leaving a +comment on a relevant issue that is already open. + +Our preferred channels of communication are all public, but if you’d like to +speak to us in private first, contact our community coordinators at +numpy-team@googlegroups.com or on Slack (write numpy-team@googlegroups.com for +an invitation). + +We also have a biweekly community call, details of which are announced on the +mailing list. You are very welcome to join. + +If you are new to contributing to open source, [this +guide](https://opensource.guide/how-to-contribute/) helps explain why, what, +and how to successfully get involved. diff --git a/.venv/lib/python3.12/site-packages/numpy-2.3.2.dist-info/RECORD b/.venv/lib/python3.12/site-packages/numpy-2.3.2.dist-info/RECORD new file mode 100644 index 0000000..09e6038 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy-2.3.2.dist-info/RECORD @@ -0,0 +1,1314 @@ +../../../bin/f2py,sha256=s9TqsfwccvWsRXfDx5Q5BrkRBucKpmV64GY_Aatgfkk,222 +../../../bin/numpy-config,sha256=uAW9ztIdPgM-s9SC47cZN94e6t73R0_86Kriene2U5k,222 +numpy-2.3.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +numpy-2.3.2.dist-info/LICENSE.txt,sha256=IEajEw5QsRwBZZs6DZY-auC3Q2_46Jy8_Z6HvGES1ZU,47768 +numpy-2.3.2.dist-info/METADATA,sha256=Y1tmC-sJT0UZehnoBeTGGawb23JdM-SysmhcMAzJYV4,62117 +numpy-2.3.2.dist-info/RECORD,, +numpy-2.3.2.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy-2.3.2.dist-info/WHEEL,sha256=rJCplGeGFjRoUvxJWhVONRgyGYGZoOkhyej0s3KyjoI,138 +numpy-2.3.2.dist-info/entry_points.txt,sha256=7Cb63gyL2sIRpsHdADpl6xaIW5JTlUI-k_yqEVr0BSw,220 +numpy.libs/libgfortran-040039e1-0352e75f.so.5.0.0,sha256=xgkASOzMdjUiwS7wFvgdprYnyzoET1XPBHmoOcQcCYA,2833617 +numpy.libs/libquadmath-96973f99-934c22de.so.0.0.0,sha256=btUTf0Enga14Y0OftUNhP2ILQ8MrYykqACkkYWL1u8Y,250985 +numpy.libs/libscipy_openblas64_-8fb3d286.so,sha256=N7prNzoi_0KETgFPXcu0KBg_IyX9mTNhtB_M9oJBMz0,25050385 +numpy/__config__.py,sha256=ERTBy5gqppmtMAuu_9XGALvjlTL5G9t0ELHY9dIDDkM,5281 +numpy/__config__.pyi,sha256=7nE-kUNs2lWPIpofTastbf2PCMgCka7FCiK5jrFkDYE,2367 +numpy/__init__.cython-30.pxd,sha256=qT7d9_TWkj4UsfpY1uaBUmcYflptcjZfDGZsYJth8rU,47123 +numpy/__init__.pxd,sha256=BFYYkcQUcrl0Ee8ReoQiA0wgtxsWeIGovC8jYeEw5qg,43758 +numpy/__init__.py,sha256=gjanU4Bds0wp75zQNpTgr4g7YyXrT9JGzIPSvguEfok,25226 +numpy/__init__.pyi,sha256=MRetJRVe0uboEY0XxK9-bMqPyrD1J5NXSvh9auIPD6I,213336 +numpy/__pycache__/__config__.cpython-312.pyc,, +numpy/__pycache__/__init__.cpython-312.pyc,, +numpy/__pycache__/_array_api_info.cpython-312.pyc,, +numpy/__pycache__/_configtool.cpython-312.pyc,, +numpy/__pycache__/_distributor_init.cpython-312.pyc,, +numpy/__pycache__/_expired_attrs_2_0.cpython-312.pyc,, +numpy/__pycache__/_globals.cpython-312.pyc,, +numpy/__pycache__/_pytesttester.cpython-312.pyc,, +numpy/__pycache__/conftest.cpython-312.pyc,, +numpy/__pycache__/dtypes.cpython-312.pyc,, +numpy/__pycache__/exceptions.cpython-312.pyc,, +numpy/__pycache__/matlib.cpython-312.pyc,, +numpy/__pycache__/version.cpython-312.pyc,, +numpy/_array_api_info.py,sha256=NzJSuf8vutjGSqiqahq3jRI3SxMX4X1cva4J6dFv4EU,10354 +numpy/_array_api_info.pyi,sha256=QP_tYDbjtTOPtJECk3ehRXOQ24QM8TZjAfWX8XAsZCM,4864 +numpy/_configtool.py,sha256=EFRJ3pazTxYhE9op-ocWyKTLZrrpFhfmmS_tWrq8Cxo,1007 +numpy/_configtool.pyi,sha256=d4f22QGwpb1ZtDk-1Sn72ftvo4incC5E2JAikmjzfJI,24 +numpy/_core/__init__.py,sha256=yJ0iy1fXk9ogCFnflCWzBBLwlKSS-xlQWCpWCozaT6c,5542 +numpy/_core/__init__.pyi,sha256=Mj2I4BtqBVNUZVs5o1T58Z7wSaWjfhX0nCl-a0ULjgA,86 +numpy/_core/__pycache__/__init__.cpython-312.pyc,, +numpy/_core/__pycache__/_add_newdocs.cpython-312.pyc,, +numpy/_core/__pycache__/_add_newdocs_scalars.cpython-312.pyc,, +numpy/_core/__pycache__/_asarray.cpython-312.pyc,, +numpy/_core/__pycache__/_dtype.cpython-312.pyc,, +numpy/_core/__pycache__/_dtype_ctypes.cpython-312.pyc,, +numpy/_core/__pycache__/_exceptions.cpython-312.pyc,, +numpy/_core/__pycache__/_internal.cpython-312.pyc,, +numpy/_core/__pycache__/_machar.cpython-312.pyc,, +numpy/_core/__pycache__/_methods.cpython-312.pyc,, +numpy/_core/__pycache__/_string_helpers.cpython-312.pyc,, +numpy/_core/__pycache__/_type_aliases.cpython-312.pyc,, +numpy/_core/__pycache__/_ufunc_config.cpython-312.pyc,, +numpy/_core/__pycache__/arrayprint.cpython-312.pyc,, +numpy/_core/__pycache__/cversions.cpython-312.pyc,, +numpy/_core/__pycache__/defchararray.cpython-312.pyc,, +numpy/_core/__pycache__/einsumfunc.cpython-312.pyc,, +numpy/_core/__pycache__/fromnumeric.cpython-312.pyc,, +numpy/_core/__pycache__/function_base.cpython-312.pyc,, +numpy/_core/__pycache__/getlimits.cpython-312.pyc,, +numpy/_core/__pycache__/memmap.cpython-312.pyc,, +numpy/_core/__pycache__/multiarray.cpython-312.pyc,, +numpy/_core/__pycache__/numeric.cpython-312.pyc,, +numpy/_core/__pycache__/numerictypes.cpython-312.pyc,, +numpy/_core/__pycache__/overrides.cpython-312.pyc,, +numpy/_core/__pycache__/printoptions.cpython-312.pyc,, +numpy/_core/__pycache__/records.cpython-312.pyc,, +numpy/_core/__pycache__/shape_base.cpython-312.pyc,, +numpy/_core/__pycache__/strings.cpython-312.pyc,, +numpy/_core/__pycache__/umath.cpython-312.pyc,, +numpy/_core/_add_newdocs.py,sha256=ySKuP_4sVPNLHp1ojgTMhSRWi3d18CcBFHFHkD8Xf-U,208893 +numpy/_core/_add_newdocs.pyi,sha256=r__d_-GHkfjzuZ0qyjDztsKgdc1eIyeN-cBoYVgMBuo,168 +numpy/_core/_add_newdocs_scalars.py,sha256=Z5WcIAXy2Vs8kWLCzgyvxWVH0CAl-O64YFK3ttbU7yc,12600 +numpy/_core/_add_newdocs_scalars.pyi,sha256=ZnIk0TgL0szrv6SPCH-4dF469Q_92UvV5_ek47Oj7HM,573 +numpy/_core/_asarray.py,sha256=fCNHLaaCP-5Ia-RR_bIrHxWY3xklcmvlZiGhJIDiKLM,3911 +numpy/_core/_asarray.pyi,sha256=QHyb8DM_9U0otRugoNIyKjtvTVS3dZLn6DSxGi_ZU4U,1073 +numpy/_core/_dtype.py,sha256=cM6JnjoHLURWCHgN8VmQyjeiiDjcwhB5L_fPMOe1uuM,10547 +numpy/_core/_dtype.pyi,sha256=turm6RyVVEGKm6antqWWnyA0bnS2AuMwmKeFj-9mYHA,1851 +numpy/_core/_dtype_ctypes.py,sha256=KPPlakDsPkuThSOr5qFwW0jJ9VnjbvW4EWhObCHYGIE,3726 +numpy/_core/_dtype_ctypes.pyi,sha256=VwEZFViCPuHlCURv2jpJp9sbHh2hYUpzC_FRZNNGMMw,3682 +numpy/_core/_exceptions.py,sha256=X8Eg1hq1uU8L9wiOwFo2jRq6S0vnjCdgYFHj3hAW9Co,5159 +numpy/_core/_exceptions.pyi,sha256=ESXpijoEK0HrPy0dQYtjO62-Krd0419WLlrDROqwTyU,1900 +numpy/_core/_internal.py,sha256=YZ6nMGVOvfTD1nzk2XqRdz8k05WVnYGiljb1TnHvMq8,28981 +numpy/_core/_internal.pyi,sha256=2V2rXMQocZZHw8z_9HSrUi3LNGxaxA1nm0B0fcofjU8,2654 +numpy/_core/_machar.py,sha256=YUX24XYbxXJ79KrWar27FlDYKfeodr_RCkE7w0bETqs,11569 +numpy/_core/_machar.pyi,sha256=ESXpijoEK0HrPy0dQYtjO62-Krd0419WLlrDROqwTyU,1900 +numpy/_core/_methods.py,sha256=4qiUUES5wnOFeXnPavtqqMVhZ09ZZeSKlwqdPw2eKSI,9430 +numpy/_core/_methods.pyi,sha256=5HzEt2Z0-vxQfS1QJKDlTvNyLXcinNsja-xQiehMGbw,526 +numpy/_core/_multiarray_tests.cpython-312-x86_64-linux-gnu.so,sha256=-uOqys8wnCdsxMjL3BNzURd2PxxfZSy0i7l-zJwbndg,141888 +numpy/_core/_multiarray_umath.cpython-312-x86_64-linux-gnu.so,sha256=nq8jsmR1SSQmgjE46CS24s3UzBOHzCL3zBRWI3LzMR4,10808937 +numpy/_core/_operand_flag_tests.cpython-312-x86_64-linux-gnu.so,sha256=LaAKYVcg0fwPuQL89z8q1-GrpQykaaNK4kJuXRArmDs,16800 +numpy/_core/_rational_tests.cpython-312-x86_64-linux-gnu.so,sha256=L-0ufvXmFouELpWO_1IJOyZp7m1EiznGwGSZC3lGRVA,59592 +numpy/_core/_simd.cpython-312-x86_64-linux-gnu.so,sha256=NGUI17JKGWykF1UY3TsJruMrbH-YCfePwDFQRDbA0_c,2882368 +numpy/_core/_simd.pyi,sha256=2z2sFPgXr3KRzHltbt31HVrhkXM0VwXFp1lUjxaRMAM,669 +numpy/_core/_string_helpers.py,sha256=6Smgoi6oD2CunjwBSr9BZ20HkCnvW6nTPblTOU3pWng,2845 +numpy/_core/_string_helpers.pyi,sha256=xLlLKJHutEYzyKnTG2k7clcWvVUTvD319SjnKmDXuac,358 +numpy/_core/_struct_ufunc_tests.cpython-312-x86_64-linux-gnu.so,sha256=U__bzTJORJ4S1Ax393Y1ui1vpwNmQKJzNAIkICwK-hk,16936 +numpy/_core/_type_aliases.py,sha256=msFHBkZ2s1wKQyuguK_cF6NBS0_3AOww7j3oh26mo3Q,3489 +numpy/_core/_type_aliases.pyi,sha256=Tn1Ex4bAGQa1HuMx0Vn-tEBl3HDF_uesTzmiSrz81kQ,2388 +numpy/_core/_ufunc_config.py,sha256=hVIyOmLjFYdZQY5plKWuOMk-U7UzeYSEo4ygiXOFcBU,15052 +numpy/_core/_ufunc_config.pyi,sha256=rh1jhYnkafjGvrc3ytC5mOSwRnjwhoggw8yDeLCS3jc,972 +numpy/_core/_umath_tests.cpython-312-x86_64-linux-gnu.so,sha256=7Q7bbK-i26WJdhCDw-22TnLzD9gWwyqa93FEClt5M7A,50312 +numpy/_core/arrayprint.py,sha256=AAAvkrI0U6Pa_wZOnpuVZBpdsCCjpYpcWF8sA_SPYbg,65278 +numpy/_core/arrayprint.pyi,sha256=ogMYnp2ipEfagADzRaRK9ySGAfH_oabGNJegiA6LicY,6971 +numpy/_core/cversions.py,sha256=H_iNIpx9-hY1cQNxqjT2d_5SXZhJbMo_caq4_q6LB7I,347 +numpy/_core/defchararray.py,sha256=1tSvLWEeac20DodpDBxapJKwwczpJG1lVy2qjScIVXg,38007 +numpy/_core/defchararray.pyi,sha256=Mq-ytnNliY2jEYAl_0l5ZTRx9IpNMaJpDmkoerRUILE,27985 +numpy/_core/einsumfunc.py,sha256=heFeCiEKji-qfVk8zAZ1b5bKm-MUMLzCETMQ7yyHBhc,52820 +numpy/_core/einsumfunc.pyi,sha256=b10CKdAeLEryabwRMdiW1cKdNyqWLa5kMV7O2_X8g3A,4893 +numpy/_core/fromnumeric.py,sha256=s0f6WfkIRVwFZMlDrdYb3EjyF9vMGr0bms0Pc-VcOAM,143882 +numpy/_core/fromnumeric.pyi,sha256=VoUF-d31OuZYaRIi-duoYAABOADe4KjbBhFFx3Hd_Mc,42034 +numpy/_core/function_base.py,sha256=QT1pbll_8rf_3ZsGtLQoAeQ1OSqCqeAGtMTzPAE1I_w,19683 +numpy/_core/function_base.pyi,sha256=A9BlWQeiX08iIwDQJ6W1FUhy2qrRPVenXtHiEnPkt0k,7064 +numpy/_core/getlimits.py,sha256=32Qe7tlBFdyiDvdSjG1cp2a0NJ0rSMxeDRij3agiPrg,26101 +numpy/_core/getlimits.pyi,sha256=q30hQ3wDenmxoZUSoSOqyVrZZVGlsixXCHe6QUthbp8,61 +numpy/_core/include/numpy/__multiarray_api.c,sha256=ndBF5wbdd7F8_zWvR52MDO0Qm15_PrCCBlSk4dky4F8,12698 +numpy/_core/include/numpy/__multiarray_api.h,sha256=6ep4M4s0Cxoj4DgJGns-0___TdSqDJoUPnZr0BBYwkU,61639 +numpy/_core/include/numpy/__ufunc_api.c,sha256=Fg7WlH4Ow6jETKRArVL_QF11ABKYz1VpOve56_U3E0w,1755 +numpy/_core/include/numpy/__ufunc_api.h,sha256=J5h9KHdntM27XQdq1PwHwI7V2v-sOx6AIbgCwP8mg9M,13175 +numpy/_core/include/numpy/_neighborhood_iterator_imp.h,sha256=s-Hw_l5WRwKtYvsiIghF0bg-mA_CgWnzFFOYVFJ-q4k,1857 +numpy/_core/include/numpy/_numpyconfig.h,sha256=lfgEF_31SixqOweZEHjn19bN5ng62MSwuVWEXS1_p_U,926 +numpy/_core/include/numpy/_public_dtype_api_table.h,sha256=n6_Kb98SyvsR_X7stiNA6VuGp_c5W1e4fMVcJdO0wis,4574 +numpy/_core/include/numpy/arrayobject.h,sha256=mU5vpcQ95PH1j3bp8KYhJOFHB-GxwRjSUsR7nxlTSRk,204 +numpy/_core/include/numpy/arrayscalars.h,sha256=LlyrZIa_5td11BfqfMCv1hYbiG6__zxxGv1MRj8uIVo,4243 +numpy/_core/include/numpy/dtype_api.h,sha256=Gn37RzObmcTsL6YUYY9aG22Ct8F-r4ZaC53NPFqaIso,19238 +numpy/_core/include/numpy/halffloat.h,sha256=TRZfXgipa-dFppX2uNgkrjrPli-1BfJtadWjAembJ4s,1959 +numpy/_core/include/numpy/ndarrayobject.h,sha256=MnykWmchyS05ler_ZyhFIr_0j6c0IcndEi3X3n0ZWDk,12057 +numpy/_core/include/numpy/ndarraytypes.h,sha256=kS9uirBf_ewXdIgsmRQETk3aQXeSPjLPCa6hlX5By-0,65810 +numpy/_core/include/numpy/npy_2_compat.h,sha256=wdjB7_-AtW3op67Xbj3EVH6apSF7cRG6h3c5hBz-YMs,8546 +numpy/_core/include/numpy/npy_2_complexcompat.h,sha256=eE9dV_Iq3jEfGGJFH_pQjJnvC6eQ12WgOB7cZMmHByE,857 +numpy/_core/include/numpy/npy_3kcompat.h,sha256=grN6W1n7benj3F2pSAOpl_s6vn1Y50QfAP-DaleD7cA,9648 +numpy/_core/include/numpy/npy_common.h,sha256=-05bavbk44KUjy5Q-qnM5YzU32VJRv0N8ozfCI_SKcE,32586 +numpy/_core/include/numpy/npy_cpu.h,sha256=Vw8mVPm1fGmLdeLV3RoBZnBMMXA8cghgwRdWhlkDLi4,4225 +numpy/_core/include/numpy/npy_endian.h,sha256=vvK7ZlOt0vgqTVrIyviWzoxQz70S-BvflS4Z_k6X5XE,2834 +numpy/_core/include/numpy/npy_math.h,sha256=aeSFs60QbWPy1gIPyHDPrYExifm5mbDAcjP_mLk_PF0,18858 +numpy/_core/include/numpy/npy_no_deprecated_api.h,sha256=0yZrJcQEJ6MCHJInQk5TP9_qZ4t7EfBuoLOJ34IlJd4,678 +numpy/_core/include/numpy/npy_os.h,sha256=hlQsg_7-RkvS3s8OM8KXy99xxyJbCm-W1AYVcdnO1cw,1256 +numpy/_core/include/numpy/numpyconfig.h,sha256=FGuDPIr0gTFYgUzhVMXqq5BIQL-WqgmXfp003cUwpWE,7333 +numpy/_core/include/numpy/random/LICENSE.txt,sha256=-8U59H0M-DvGE3gID7hz1cFGMBJsrL_nVANcOSbapew,1018 +numpy/_core/include/numpy/random/bitgen.h,sha256=49AwKOR552r-NkhuSOF1usb_URiMSRMvD22JF5pKIng,488 +numpy/_core/include/numpy/random/distributions.h,sha256=W5tOyETd0m1W0GdaZ5dJP8fKlBtsTpG23V2Zlmrlqpg,9861 +numpy/_core/include/numpy/random/libdivide.h,sha256=ew9MNhPQd1LsCZiWiFmj9IZ7yOnA3HKOXffDeR9X1jw,80138 +numpy/_core/include/numpy/ufuncobject.h,sha256=BengvqXqiy4ipzz23KQi1Kldy9ybYUs4Sp5yA73VgiU,11780 +numpy/_core/include/numpy/utils.h,sha256=wMNomSH3Dfj0q78PrjLVtFtN-FPo7UJ4o0ifCUO-6Es,1185 +numpy/_core/lib/libnpymath.a,sha256=oXeSGrMy3L_zDbnj58as1hihfFFftHWb73ah3KPeCT4,54312 +numpy/_core/lib/npy-pkg-config/mlib.ini,sha256=_LsWV1eStNqwhdiYPa2538GL46dnfVwT4MrI1zbsoFw,147 +numpy/_core/lib/npy-pkg-config/npymath.ini,sha256=0iMzarBfkkZ_EXO95_kz-SHZRcNIEwIeOjE_esVBkRQ,361 +numpy/_core/lib/pkgconfig/numpy.pc,sha256=85w4PFiYRj_NToadLugN4QNsA7doEeQTZ-ineB4EcYs,191 +numpy/_core/memmap.py,sha256=yIsQ6n9kpZulggRJJFkTbjVwnB4leoyizvUpc2iU4n8,12651 +numpy/_core/memmap.pyi,sha256=_LKjb_PuhcQwpqc2lFaL379DYzQ9PtuKdlVV3jXOYEM,47 +numpy/_core/multiarray.py,sha256=zwHBdyOoxiBRcOhG2QB_xBAYm-p8ARSpQbye9EzrrBo,58155 +numpy/_core/multiarray.pyi,sha256=Uy5Unmczfk7Pyz8Ohgh_5g4ASY7aZ0ZYpmhhmPnG6OA,32150 +numpy/_core/numeric.py,sha256=_DcnvXu6oaHXSi9Q-BV9yGzfx7tc9iCx69r9MnJDm5g,82322 +numpy/_core/numeric.pyi,sha256=ZSWTBi2kdP7BPG3KMGJWJIlqM9BLKFmgq_xgK_GnDUo,19042 +numpy/_core/numerictypes.py,sha256=mKPbsOzX9vyWQEv4jlf4xnlPfP4IYAXeILHFdb2FS0I,15957 +numpy/_core/numerictypes.pyi,sha256=Kp4_fEg_Wj_Yv8xvI7H1TJXrDVsxb96oIH5EmnQyW1c,3270 +numpy/_core/overrides.py,sha256=MtgzOBavG7wzQYCA7O7ArdCJVV72STIb_cvkWBuDLJE,7241 +numpy/_core/overrides.pyi,sha256=2lHte4EbOTDQvknjVfO71RgiLXnOpGQky5j2meS09JU,1713 +numpy/_core/printoptions.py,sha256=NFpvy5bnjbvqnKeqQt0veEExpAAYAVNoiGXH3pglWAc,1056 +numpy/_core/printoptions.pyi,sha256=eNiliCnDuZBxla6X9kwZ-7YiCn-UtMbT-U_qTnw8l9w,594 +numpy/_core/records.py,sha256=hoXCDswM6hbytiGdYGkhRISzQjnqImXcIdGlNuOUDX4,36767 +numpy/_core/records.pyi,sha256=tob9AxABbCXsO--gWXX-pD5Bo50NgCXKOt4JstVESjY,8935 +numpy/_core/shape_base.py,sha256=7yDPrIXTmmBnZMUStHXsq1iJNiGmIxEAcepxQ9o-JVQ,32738 +numpy/_core/shape_base.pyi,sha256=Qgfi1izbvKgRWAojCMXw3HsONgvsryFCsDhAvNI1dZE,4753 +numpy/_core/strings.py,sha256=yjdeNG2e0wpljpnwGISi7NXVLD4ttCM5vAYSSV1yI8k,50642 +numpy/_core/strings.pyi,sha256=Fyjq70ZP70BzV3Ov490dxX5EOv76sgnxA7qVBxeXuRU,13502 +numpy/_core/tests/__pycache__/_locales.cpython-312.pyc,, +numpy/_core/tests/__pycache__/_natype.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test__exceptions.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_abc.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_api.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_argparse.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_array_api_info.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_array_coercion.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_array_interface.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_arraymethod.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_arrayobject.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_arrayprint.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_casting_floatingpoint_errors.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_casting_unittests.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_conversion_utils.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_cpu_dispatcher.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_cpu_features.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_custom_dtypes.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_cython.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_datetime.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_defchararray.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_deprecations.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_dlpack.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_dtype.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_einsum.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_errstate.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_extint128.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_function_base.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_getlimits.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_half.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_hashtable.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_indexerrors.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_indexing.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_item_selection.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_limited_api.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_longdouble.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_machar.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_mem_overlap.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_mem_policy.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_memmap.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_multiarray.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_multithreading.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_nditer.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_nep50_promotions.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_numeric.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_numerictypes.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_overrides.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_print.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_protocols.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_records.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_regression.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_scalar_ctors.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_scalar_methods.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_scalarbuffer.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_scalarinherit.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_scalarmath.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_scalarprint.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_shape_base.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_simd.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_simd_module.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_stringdtype.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_strings.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_ufunc.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_umath.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_umath_accuracy.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_umath_complex.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_unicode.cpython-312.pyc,, +numpy/_core/tests/_locales.py,sha256=lvHqUJVMsrE7Jh3N_KpO5fGBZgID-l3Zr4-_RrH1ZNM,2176 +numpy/_core/tests/_natype.py,sha256=YCAkuhvWuMjTjt-C0VjA8zzui-KoioNwOmAYnvf6KR0,6525 +numpy/_core/tests/data/astype_copy.pkl,sha256=lWSzCcvzRB_wpuRGj92spGIw-rNPFcd9hwJaRVvfWdk,716 +numpy/_core/tests/data/generate_umath_validation_data.cpp,sha256=BQakB5o8Mq60zex5ovVO0IatNa7xbF8JvXmtk6373So,5842 +numpy/_core/tests/data/recarray_from_file.fits,sha256=NA0kliz31FlLnYxv3ppzeruONqNYkuEvts5wzXEeIc4,8640 +numpy/_core/tests/data/umath-validation-set-README.txt,sha256=pxWwOaGGahaRd-AlAidDfocLyrAiDp0whf5hC7hYwqM,967 +numpy/_core/tests/data/umath-validation-set-arccos.csv,sha256=yBlz8r6RnnAYhdlobzGGo2FKY-DoSTQaP26y8138a3I,61365 +numpy/_core/tests/data/umath-validation-set-arccosh.csv,sha256=0GXe7XG1Z3jXAcK-OlEot_Df3MetDQSlbm3MJ__iMQk,61365 +numpy/_core/tests/data/umath-validation-set-arcsin.csv,sha256=w_Sv2NDn-mLZSAqb56JT2g4bqBzxYAihedWxHuf82uU,61339 +numpy/_core/tests/data/umath-validation-set-arcsinh.csv,sha256=DZrMYoZZZyM1DDyXNUxSlzx6bOgajnRSLWAzxcPck8k,60289 +numpy/_core/tests/data/umath-validation-set-arctan.csv,sha256=0aosXZ-9DYTop0lj4bfcBNwYVvjZdW13hbMRTRRTmV0,60305 +numpy/_core/tests/data/umath-validation-set-arctanh.csv,sha256=HEK9ePx1OkKrXIKkMUV0IxrmsDqIlgKddiI-LvF2J20,61339 +numpy/_core/tests/data/umath-validation-set-cbrt.csv,sha256=v855MTZih-fZp_GuEDst2qaIsxU4a7vlAbeIJy2xKpc,60846 +numpy/_core/tests/data/umath-validation-set-cos.csv,sha256=0PNnDqKkokZ7ERVDgbes8KNZc-ISJrZUlVZc5LkW18E,59122 +numpy/_core/tests/data/umath-validation-set-cosh.csv,sha256=JKC4nKr3wTzA_XNSiQvVUq9zkYy4djvtu2-j4ZZ_7Oc,60869 +numpy/_core/tests/data/umath-validation-set-exp.csv,sha256=rUAWIbvyeKh9rPfp2n0Zq7AKq_nvHpgbgzLjAllhsek,17491 +numpy/_core/tests/data/umath-validation-set-exp2.csv,sha256=djosT-3fTpiN_f_2WOumgMuuKgC_XhpVO-QsUFwI6uU,58624 +numpy/_core/tests/data/umath-validation-set-expm1.csv,sha256=K7jL6N4KQGX71fj5hvYkzcMXk7MmQes8FwrNfyrPpgU,60299 +numpy/_core/tests/data/umath-validation-set-log.csv,sha256=ynzbVbKxFzxWFwxHnxX7Fpm-va09oI3oK1_lTe19g4w,11692 +numpy/_core/tests/data/umath-validation-set-log10.csv,sha256=NOBD-rOWI_FPG4Vmbzu3JtX9UA838f2AaDFA-waiqGA,68922 +numpy/_core/tests/data/umath-validation-set-log1p.csv,sha256=tdbYWPqWIz8BEbIyklynh_tpQJzo970Edd4ek6DsPb8,60303 +numpy/_core/tests/data/umath-validation-set-log2.csv,sha256=39EUD0vFMbwyoXoOhgCmid6NeEAQU7Ff7QFjPsVObIE,68917 +numpy/_core/tests/data/umath-validation-set-sin.csv,sha256=8PUjnQ_YfmxFb42XJrvpvmkeSpEOlEXSmNvIK4VgfAM,58611 +numpy/_core/tests/data/umath-validation-set-sinh.csv,sha256=XOsBUuPcMjiO_pevMalpmd0iRv2gmnh9u7bV9ZLLg8I,60293 +numpy/_core/tests/data/umath-validation-set-tan.csv,sha256=Hv2WUMIscfvQJ5Y5BipuHk4oE4VY6QKbQp_kNRdCqYQ,60299 +numpy/_core/tests/data/umath-validation-set-tanh.csv,sha256=iolZF_MOyWRgYSa-SsD4df5mnyFK18zrICI740SWoTc,60299 +numpy/_core/tests/examples/cython/__pycache__/setup.cpython-312.pyc,, +numpy/_core/tests/examples/cython/checks.pyx,sha256=nw6o0nlj3SfNQP3McS10zVH9UCZiITBdAi5yO4gm9Qo,10774 +numpy/_core/tests/examples/cython/meson.build,sha256=uuXVPKemNVMQ5MiEDqS4BXhwGHa96JHjS50WxZuJS_8,1268 +numpy/_core/tests/examples/cython/setup.py,sha256=JM6UnDql7LsAnRo6p9G-nRz3dfnoy9fHF6YVKy1OzdA,859 +numpy/_core/tests/examples/limited_api/__pycache__/setup.cpython-312.pyc,, +numpy/_core/tests/examples/limited_api/limited_api1.c,sha256=htSR9ER3S8AJqv4EZMsrxQ-SufTIlXNpuFI6MXQs87w,346 +numpy/_core/tests/examples/limited_api/limited_api2.pyx,sha256=1q4I59pdkCmMhLcYngN_XwQnPoLmDEo1uTGnhrLRjDc,203 +numpy/_core/tests/examples/limited_api/limited_api_latest.c,sha256=ltBLbrl1g9XxD2wvN_-g3NhIizc8mxnh2Z6wCyXo-8E,452 +numpy/_core/tests/examples/limited_api/meson.build,sha256=YM5RwW_waFymlWSHFhCCOHO6KCknooN0jCiqScL0i5M,1627 +numpy/_core/tests/examples/limited_api/setup.py,sha256=Y6tgsOF58qe7eG2QmRQHG2wacZWfpbJLT8u-5OamjqA,437 +numpy/_core/tests/test__exceptions.py,sha256=luMT6vPIdf6LuwFNGyT-xLMZaKZEYYOFzFpMaesojoE,2922 +numpy/_core/tests/test_abc.py,sha256=9y2SsJdkPeV0oW6dsROPZOcQ72_mXie1uU2yPN93wzo,2221 +numpy/_core/tests/test_api.py,sha256=NiqlxYyBOZlKVKIWs_vQTg6ZnOk5iE63nbz1GBdHXeI,22954 +numpy/_core/tests/test_argparse.py,sha256=pfFfRr0grfOt-6Y7D8q9yPmz8Fcx4UbUxLpe96Tk9Xg,2870 +numpy/_core/tests/test_array_api_info.py,sha256=PZ2EzS9pq4nLZRAvvUSOb2Ke5p7pb4u4P4HKLRZjstw,3063 +numpy/_core/tests/test_array_coercion.py,sha256=PJ3s7psngDM084R2x7luAHVkHoa31TDiH1FiZpUWSfs,34897 +numpy/_core/tests/test_array_interface.py,sha256=l39VuV4nCdIeV1RUvMtjjPohAgIvJP-V3GQ5MaPrVK8,7843 +numpy/_core/tests/test_arraymethod.py,sha256=my4I9YjpVGLwN1GMbuoEhBZEJN0PuH6R2wtvGHcfoWI,3223 +numpy/_core/tests/test_arrayobject.py,sha256=aVv2eGjunCMEDFgmFujxMpk4xb-zo1MQrFcwQLfblx0,2596 +numpy/_core/tests/test_arrayprint.py,sha256=6UmL93wltbIDKdhF_WcdPRH5mztX0wyzuBy6PYW3R_o,50738 +numpy/_core/tests/test_casting_floatingpoint_errors.py,sha256=cER1YCNEwq67uAPX0QhkJonb5oA4Ws1_t0Z2AWJjYJg,5076 +numpy/_core/tests/test_casting_unittests.py,sha256=HH849h4ox1dejLB4aFX2B9tSGf0WhVvPZBPJT4yTOAA,34336 +numpy/_core/tests/test_conversion_utils.py,sha256=HAIdSRUit1lhSQEn-UVPTwyNxKjP9bSr8NGeHXnp6ew,6362 +numpy/_core/tests/test_cpu_dispatcher.py,sha256=26vob-nCPkjtxf9lRlQvwoTR92lqquyDGPgE5DIoii8,1570 +numpy/_core/tests/test_cpu_features.py,sha256=lS9iIWWznKZgR8-G4ABZqznMTJGC343-FBaCG9ZHXmQ,15703 +numpy/_core/tests/test_custom_dtypes.py,sha256=LZCbBeoyCcluhz_drg5neyiAsoTaK-6DjB4l3LaNnTw,11766 +numpy/_core/tests/test_cython.py,sha256=hLdTcd5wbzMXOx_OyQEzNyFWm-rIcWto7LpCl1SNdIU,10186 +numpy/_core/tests/test_datetime.py,sha256=fVk7HADvcuiFzs19MTF4sAvp96jCBWQ7GWeNBQZKBPs,121786 +numpy/_core/tests/test_defchararray.py,sha256=hmMd5Wv5PjTEIuBXq_DopSqJsnp-qJ8ub5BBGRKIUEw,30629 +numpy/_core/tests/test_deprecations.py,sha256=CayfNUVMMj4BYTIFdYR4xvL2Sy2CTLN7VTABe0HIlxg,17101 +numpy/_core/tests/test_dlpack.py,sha256=Lfi3Xd2umxJ4W8fJht5epHlYWwTKx7MB47i7dcOIpq8,5830 +numpy/_core/tests/test_dtype.py,sha256=e1ZLn0xj8FrlxK3FeHOOsoQ-xV17-FMM7mh7VpuuVhs,78797 +numpy/_core/tests/test_einsum.py,sha256=Sixz-ZogKZmnFz3t49voD6AsCxmxUl_c_DHxT9rdscE,56277 +numpy/_core/tests/test_errstate.py,sha256=czhSWJJ8mdDpkh76pAxU2-d4ebMyopyk2D_CC-2lzI0,4627 +numpy/_core/tests/test_extint128.py,sha256=F6TAH3PlGON3CNz-B4hunClNUTQYQ2R8CkvaX2Zqeo4,5625 +numpy/_core/tests/test_function_base.py,sha256=x6rHdbqXtHj07Oml_5DslnG6y8jm0XfW4RdV0Q_lHHA,17651 +numpy/_core/tests/test_getlimits.py,sha256=CAHTLA8QIYVXTLWCGAISUZaAJ-xd_cBnSdYaOGuLWn8,6976 +numpy/_core/tests/test_half.py,sha256=QSKuHAfa8NWvl0A51-XcV0UOIvk-ooLy6pndq90hr6k,24425 +numpy/_core/tests/test_hashtable.py,sha256=m9-IRALLhU5liPuAk4v-ZQTVQ4s5XtLhL6xRXf5QTOE,1147 +numpy/_core/tests/test_indexerrors.py,sha256=mU2MJbdpbrcvxLZqZR293So4ZJxMH4apAjqXufRyOis,4726 +numpy/_core/tests/test_indexing.py,sha256=lU0jP4UvEe2_MUiAhy4_GD1zvpdIwUrHviu0MJhW_wQ,55421 +numpy/_core/tests/test_item_selection.py,sha256=AoPUe3llYwKjv3dO1PW1qSml4SWrAAL3fNqpwKAku6w,6631 +numpy/_core/tests/test_limited_api.py,sha256=75nz_t-jBdjKim6j-WW7WsD2rPnJ_KQ-zrRUiP3nVic,3463 +numpy/_core/tests/test_longdouble.py,sha256=FjuntHkYe158dwWr7eYe_mlqkj7sQ9lQXKZ93CKF0Pc,12391 +numpy/_core/tests/test_machar.py,sha256=Aw8icmrolAGmbIuXhUIYd4YvqIRR1I8GkcSx0J2c6yM,1067 +numpy/_core/tests/test_mem_overlap.py,sha256=IGpRF2GnkLQxEiIizsVT0eWUtlgCcJQ4w0-BEjSpT_8,29219 +numpy/_core/tests/test_mem_policy.py,sha256=pL6kBK8fgtRDTfMubFGGWnliTPWnS64uZ9l1H5qI8hk,16794 +numpy/_core/tests/test_memmap.py,sha256=LtghbNqt9AOmAalIyZF3lepthcKircyNfb2-5_Tkj1c,8186 +numpy/_core/tests/test_multiarray.py,sha256=au2BIcxXH1rXMVBm4VKNA3aogJu3Qtd8bAwcoZzpDcM,400390 +numpy/_core/tests/test_multithreading.py,sha256=VkvO2311ch8a_EeF7RTmhAQWvtHXuTZhqLVZZH1ovKI,8601 +numpy/_core/tests/test_nditer.py,sha256=7y1wdYzpGdwEbHRc5xppx8FZ45cKxNrm3JKzUPvkhrE,136568 +numpy/_core/tests/test_nep50_promotions.py,sha256=i6KpABBWFB5PWCdEv8kIjNQd7ryAPINS5m_Tnu7sDj4,10068 +numpy/_core/tests/test_numeric.py,sha256=aM2TfTaSVE2fz0Z3nN72XoxSDvZzAdatwWpLYWGBBws,159748 +numpy/_core/tests/test_numerictypes.py,sha256=r4ZvEN0E8efuqZhx2spCXA5Mr14mK1BRpmOZFRp0LhU,23271 +numpy/_core/tests/test_overrides.py,sha256=0sDSmDWIr88GuCj0gOxdE3l0X_T5Hb5Wj2zfJDkOtvU,27518 +numpy/_core/tests/test_print.py,sha256=_cuM-DIpljOkzErb2ggIgs9HvOYrtpRppaECF6xAo0c,6787 +numpy/_core/tests/test_protocols.py,sha256=pbfumoRNnPhDP6PAPNIgLHUPPlmCdamCo4akkO8afjo,1173 +numpy/_core/tests/test_records.py,sha256=PAMHzIPp2WWDm4JHFQ-cjPBWf4BDuQumIYo7UX-zElk,20547 +numpy/_core/tests/test_regression.py,sha256=fJJnesLRUyPziCbYVM9LfLSS3qAMUz1-mzddhV9Br-U,95565 +numpy/_core/tests/test_scalar_ctors.py,sha256=I3akKp6WdwsTGic8pYQC_c6AxPXPEXStywWOF0n_ivU,6724 +numpy/_core/tests/test_scalar_methods.py,sha256=tx1RoZ03QsWblqg3Dv_JkaBFUOOILKZIqaEsFEs4tfE,9117 +numpy/_core/tests/test_scalarbuffer.py,sha256=2mZblaScwhN8mdlQvUULAKt273B2ia-mjtNmL_2UxfQ,5638 +numpy/_core/tests/test_scalarinherit.py,sha256=OIvSjrltdNSSP2c5HvDQ6pza3aKfmfgtixu1Zbahpcg,2587 +numpy/_core/tests/test_scalarmath.py,sha256=gBHBZ5SQMru1A57FUEaIMk19GFdVLTRXiO9vVh4XVVc,46583 +numpy/_core/tests/test_scalarprint.py,sha256=NS-FQDWICDcuDF5gxTQuG1Td1-EiOXIXufI-dwvKwxU,19705 +numpy/_core/tests/test_shape_base.py,sha256=mRSruY7S84ula25ZoOvbcRg_ea_3C3338e1tmdmv1Uk,31536 +numpy/_core/tests/test_simd.py,sha256=u8xSZ6HNLJ9-siYNIuyd0RA7FbD1BLEmnV5TGUrt1FU,48823 +numpy/_core/tests/test_simd_module.py,sha256=JjXH4Yq-0K-R8FHqVDinNaqY_grb1fQFFyVTHGQ0pBg,3904 +numpy/_core/tests/test_stringdtype.py,sha256=QRBUpyg3vpwECmznarkRC2WT_LTLinmn_LBOWpQxf3I,56863 +numpy/_core/tests/test_strings.py,sha256=16hEUxlHI89-8YsoW9RfI-V4eU-GKwnJXEak-dB7lW8,57959 +numpy/_core/tests/test_ufunc.py,sha256=yO1DbSTyonZWsz8HoXV0E4YN5Xlg-aIHi6xn2gTi928,136356 +numpy/_core/tests/test_umath.py,sha256=D7wSX7JvIk80znwd8GsxYZIzp62It75SBzvKOZHeOXE,193840 +numpy/_core/tests/test_umath_accuracy.py,sha256=QCFAeiPN6rEO8fwDwJun4J1pCKm0bPsQK6-1pTYCMIY,5478 +numpy/_core/tests/test_umath_complex.py,sha256=LZMd-divBHQQ7dS34obwvmStXa8aNez45VIVTwPg_jM,23627 +numpy/_core/tests/test_unicode.py,sha256=qrQ7UC0yndXFYI7MiJu8y_I5jCK2lxOQcehE289MElk,12967 +numpy/_core/umath.py,sha256=t_SQIHR7dkMF-VRp8dKyroOEd90oqNlzmgGwaH28qW8,2130 +numpy/_core/umath.pyi,sha256=FIqmlQwQIueIrs-_QehV3guNEnJE2LxVs3NPCj38Vdo,2643 +numpy/_distributor_init.py,sha256=FBSJdgVHlQca5BrQEVYPoFm6KSTJhIFnWtWbEkEhTSo,421 +numpy/_distributor_init.pyi,sha256=6IvMzAmr0-Z6oqTkZcgXgrkJrQXVMjBih2AZvLdDgOE,27 +numpy/_expired_attrs_2_0.py,sha256=zP31EXmbwygcOEzyetDEp-RxL9cUfbUUht956zaOSf8,3826 +numpy/_expired_attrs_2_0.pyi,sha256=n2ipDUFTFS4puCD56dlNWGkVkw_b0M6cEyugo4Qh3HM,1253 +numpy/_globals.py,sha256=k5ZVnzUbKNSLPmZ0URYwJN5C_7xIzfMNaaSsBSrPTuI,3091 +numpy/_globals.pyi,sha256=IrHHIXmibXzgK0VUlECQLw4IEkveXSHo_ZWnTkfnLe4,280 +numpy/_pyinstaller/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/_pyinstaller/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/_pyinstaller/__pycache__/__init__.cpython-312.pyc,, +numpy/_pyinstaller/__pycache__/hook-numpy.cpython-312.pyc,, +numpy/_pyinstaller/hook-numpy.py,sha256=MU22pQ4AkUYPQWu5C8pRDpnYXElLJ8R0FGNYJUQpiVE,1362 +numpy/_pyinstaller/hook-numpy.pyi,sha256=tAvtMPovoi-sur0D1NAo3_evSmYKLTh0bgRSC7QrCIk,349 +numpy/_pyinstaller/tests/__init__.py,sha256=pdPbCTRwpCJamlyvIi9HZTlqAvK5HPbGu3oMA0cu2Rs,329 +numpy/_pyinstaller/tests/__pycache__/__init__.cpython-312.pyc,, +numpy/_pyinstaller/tests/__pycache__/pyinstaller-smoke.cpython-312.pyc,, +numpy/_pyinstaller/tests/__pycache__/test_pyinstaller.cpython-312.pyc,, +numpy/_pyinstaller/tests/pyinstaller-smoke.py,sha256=6iL-eHMQaG3rxnS5EgcvrCqElm9aKL07Cjr1FZJSXls,1143 +numpy/_pyinstaller/tests/test_pyinstaller.py,sha256=8K-7QxmfoXCG0NwR0bhIgCNrDjGlrTzWnrR1sR8btgU,1135 +numpy/_pytesttester.py,sha256=DjlYL8uINN2XWa3nnlX6gPGuoLjcx1Bie_PQzbp2cpA,6328 +numpy/_pytesttester.pyi,sha256=VXCuwPYTb9-PF6nxXwibwBbre0hW9jIB4nkzmtm2kls,497 +numpy/_typing/__init__.py,sha256=MG5Wv9dc3ZyOmDfidH5cFtykeyNM77ArC4R3UW7Tn-Y,7188 +numpy/_typing/__pycache__/__init__.cpython-312.pyc,, +numpy/_typing/__pycache__/_add_docstring.cpython-312.pyc,, +numpy/_typing/__pycache__/_array_like.cpython-312.pyc,, +numpy/_typing/__pycache__/_char_codes.cpython-312.pyc,, +numpy/_typing/__pycache__/_dtype_like.cpython-312.pyc,, +numpy/_typing/__pycache__/_extended_precision.cpython-312.pyc,, +numpy/_typing/__pycache__/_nbit.cpython-312.pyc,, +numpy/_typing/__pycache__/_nbit_base.cpython-312.pyc,, +numpy/_typing/__pycache__/_nested_sequence.cpython-312.pyc,, +numpy/_typing/__pycache__/_scalars.cpython-312.pyc,, +numpy/_typing/__pycache__/_shape.cpython-312.pyc,, +numpy/_typing/__pycache__/_ufunc.cpython-312.pyc,, +numpy/_typing/_add_docstring.py,sha256=_3g7D-6HAQ3MT4X6DE07yLua9LqWFhskNVx1TS7X9O4,3999 +numpy/_typing/_array_like.py,sha256=EPZUfJSjamvsWJ6Rs5ZwwA_5FhBpYdoifcVVtVcWPn0,4188 +numpy/_typing/_callable.pyi,sha256=Zq3vN0V7VMwFRjyXl2ITcc8DdWKAB0fSlBQ52wmZrMI,11767 +numpy/_typing/_char_codes.py,sha256=j07npk82Nb7Ira2z7ZTlU3UcOPwt2gM7qZKrPLdjT48,8764 +numpy/_typing/_dtype_like.py,sha256=8M5RekLqdheEjWMIn4RnbkEzsS7jCatCiT0D5hg-53c,3762 +numpy/_typing/_extended_precision.py,sha256=pknUqgak0FBNM-sERPqW-pFGH71_K-iehFSee5oQiqE,434 +numpy/_typing/_nbit.py,sha256=KSbKwOKttob-5ytT5vCVkHrDMn0YHvyptTTyj_6AYcw,632 +numpy/_typing/_nbit_base.py,sha256=nPZpsQltuR5B0iaAYF9qD2he_kXnmssv_RhaUNFsW-s,3058 +numpy/_typing/_nbit_base.pyi,sha256=kHAqTmpYUWbQyTUVRs4NKKcDwiEJgUzWvvT1FQgQ89I,740 +numpy/_typing/_nested_sequence.py,sha256=so1agYGHd5gDo_IBvvHqBB5lsqGbHqN_imyC5UHU-HI,2505 +numpy/_typing/_scalars.py,sha256=LhXY2BTHmeYKzeIZfpjvuMn-5eOLjU2n9z7z1l5bKf8,944 +numpy/_typing/_shape.py,sha256=6cFv-LbSyG9mlfSBOGGyul9Q_GUrlcHQC9JZa-m20cA,275 +numpy/_typing/_ufunc.py,sha256=HOkaE-6wV0fd3rmHZGC39YAHIIf8tyvlzekD4y4GQxA,156 +numpy/_typing/_ufunc.pyi,sha256=1Ni26dsi2fbH2oNvXDNNXaBPQQzdhkwA7VQ8eyuJS_c,26575 +numpy/_utils/__init__.py,sha256=hVnZ7C0MCSNbMw-Zyq-MKCYStaGX6RzqFMnnh7ed4dE,3477 +numpy/_utils/__init__.pyi,sha256=VxEygNvp90alV8zYsUSuDYNdF7BEucXUx3w55Ef7YXI,726 +numpy/_utils/__pycache__/__init__.cpython-312.pyc,, +numpy/_utils/__pycache__/_convertions.cpython-312.pyc,, +numpy/_utils/__pycache__/_inspect.cpython-312.pyc,, +numpy/_utils/__pycache__/_pep440.cpython-312.pyc,, +numpy/_utils/_convertions.py,sha256=0xMxdeLOziDmHsRM_8luEh4S-kQdMoMg6GxNDDas69k,329 +numpy/_utils/_convertions.pyi,sha256=4l-0UmPCyVA70UJ8WAd2A45HrKFSzgC0sFDBSnKcYiQ,118 +numpy/_utils/_inspect.py,sha256=zFuJABH08D1Kgq_eecYkD1Ogg0OXp1t4oqjZxM0kdLk,7436 +numpy/_utils/_inspect.pyi,sha256=wFajmQpCTXpMbJBbdiiyJMb29HkaMW0jEWLMqbQcQ5k,2255 +numpy/_utils/_pep440.py,sha256=it9P4_oHXWw3BxdoVz7JPMuj5kxF5M7_BJ8Z1m9nu0w,13988 +numpy/_utils/_pep440.pyi,sha256=xzYJoZ6DnjvgaKr8OsBwim77fAJ0xeQJI9XAt75gvfI,3870 +numpy/char/__init__.py,sha256=xs6pprMdmNeXVfuTRkU3nF9qdhutWdPu5oaep2AjWmc,93 +numpy/char/__init__.pyi,sha256=siwqDh7X7u4e0HGx3xg8eDaJVqy0_nac5y8UMzz-BcM,1540 +numpy/char/__pycache__/__init__.cpython-312.pyc,, +numpy/conftest.py,sha256=pXdv-CKocoIEpr0DsYstu7TgqvNdzSvfiDNMlMwmqYk,8577 +numpy/core/__init__.py,sha256=wJNaRF1UFOnZKqiBrsshWLjTGiEZ9rvWlcit0xj7Y0w,1290 +numpy/core/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/core/__pycache__/__init__.cpython-312.pyc,, +numpy/core/__pycache__/_dtype.cpython-312.pyc,, +numpy/core/__pycache__/_dtype_ctypes.cpython-312.pyc,, +numpy/core/__pycache__/_internal.cpython-312.pyc,, +numpy/core/__pycache__/_multiarray_umath.cpython-312.pyc,, +numpy/core/__pycache__/_utils.cpython-312.pyc,, +numpy/core/__pycache__/arrayprint.cpython-312.pyc,, +numpy/core/__pycache__/defchararray.cpython-312.pyc,, +numpy/core/__pycache__/einsumfunc.cpython-312.pyc,, +numpy/core/__pycache__/fromnumeric.cpython-312.pyc,, +numpy/core/__pycache__/function_base.cpython-312.pyc,, +numpy/core/__pycache__/getlimits.cpython-312.pyc,, +numpy/core/__pycache__/multiarray.cpython-312.pyc,, +numpy/core/__pycache__/numeric.cpython-312.pyc,, +numpy/core/__pycache__/numerictypes.cpython-312.pyc,, +numpy/core/__pycache__/overrides.cpython-312.pyc,, +numpy/core/__pycache__/records.cpython-312.pyc,, +numpy/core/__pycache__/shape_base.cpython-312.pyc,, +numpy/core/__pycache__/umath.cpython-312.pyc,, +numpy/core/_dtype.py,sha256=GHBhfVtsVrP7v13IujEz9aGIENkYIdbfuRu-New1UnU,323 +numpy/core/_dtype.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/core/_dtype_ctypes.py,sha256=wX4m37b0zQgxlzT5OjE_uj2E5CpiX9E7HLFpO6h_lDY,351 +numpy/core/_dtype_ctypes.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/core/_internal.py,sha256=qxpHJELXNUcYJkJt1LktQuZm4BwYu4bXnMuBEOp6POU,949 +numpy/core/_multiarray_umath.py,sha256=T88HZgFD5VCuXRCSeLbPoj99nKUSdgyw8xWyf6eqhxQ,2098 +numpy/core/_utils.py,sha256=5fk18JN43Rg6YHvan6QjdrOeOuLtRlLVmP6MadBEJVA,923 +numpy/core/arrayprint.py,sha256=Lbe4smWXYFzd9sO9LLJ5PZS4C3bSvLt6HRtwSE56xN8,339 +numpy/core/defchararray.py,sha256=a9luvvni8gRrGVdKO7U_xwsFFvkzlxnVgxL75jLRmCI,347 +numpy/core/einsumfunc.py,sha256=CNucINgUIrpiLQn4xPI_mogwjfKlFA3h7gwAvRVwb5M,339 +numpy/core/fromnumeric.py,sha256=5TaonJVuC110qv3f3cqTtmjayTX0BmqJAgoAJn5H3ZI,343 +numpy/core/function_base.py,sha256=vhjhzsEzDd11RHg6pilfMJO3X6k94an5RAJqj-nlzms,351 +numpy/core/getlimits.py,sha256=6nCk4Tw0LjW7joWsprI5LiMzje1gsOjO2lSQ_OwBB8I,335 +numpy/core/multiarray.py,sha256=bjdPLbvJuj61M6TZkbB5NXOCNmH4QbUq6g3ePkKP6TA,793 +numpy/core/numeric.py,sha256=Ctk_QikyB2mM0xI0lBeB8YTUfTwQSXfVdpIMRtunbMo,360 +numpy/core/numerictypes.py,sha256=bXwTwzUahzbHrFGhS5RkJOvb6TYEsQnQC5ww9mN-1Vw,347 +numpy/core/overrides.py,sha256=1FZyb0U6JJuyojtxFvQ7HSJ2rpfhWec0F-X0mapCjc8,335 +numpy/core/overrides.pyi,sha256=-3xfjHfa4UaCuhTVwwRN4EOM5uz9vZR0gMeTVvEdbYI,525 +numpy/core/records.py,sha256=9yfFDxyOc68lXqfbaosgRNlw1dbWP8CRHzIPEtEtSgc,327 +numpy/core/shape_base.py,sha256=2srdQtF1d8LpUbDjGMXT-Tqz2K2NaTO-ZEC4viCYswY,339 +numpy/core/umath.py,sha256=hMVmNrICdqXRiiRG7UMV0Gr-9xYqJGmkONGQn20iK98,319 +numpy/ctypeslib/__init__.py,sha256=WFwMhpV2LJP-IQOspaInhV8c6XPKZwqppE-cvtIpqvU,193 +numpy/ctypeslib/__init__.pyi,sha256=R0tHAk1P0jw-HLYjjKBqXEjDyXhByrtbjrgOxht9tE4,619 +numpy/ctypeslib/__pycache__/__init__.cpython-312.pyc,, +numpy/ctypeslib/__pycache__/_ctypeslib.cpython-312.pyc,, +numpy/ctypeslib/_ctypeslib.py,sha256=NtEUpisQhDfETBLAkqYf7Ajq0xiNhZurb5SmGGH54pA,19079 +numpy/ctypeslib/_ctypeslib.pyi,sha256=xS-NLEO6xwjUr-AUWfGxz3N7X5jwIGBVl6RhOUUYZ74,8084 +numpy/doc/__pycache__/ufuncs.cpython-312.pyc,, +numpy/doc/ufuncs.py,sha256=9xt8H34GhrXrFq9cWFUGvJFePa9YuH9Tq1DzAnm2E2E,5414 +numpy/dtypes.py,sha256=zuPwgC0ijF2oDRAOJ6I9JKhaJuhXFAygByLQaoVtT54,1312 +numpy/dtypes.pyi,sha256=sNN4kzUfhArHuKaMRKofBNZ57trl35UaZ51oDWrMmJ4,15544 +numpy/exceptions.py,sha256=x1z7C2RjrDFW8tLewbZjyMiQok0WBm5kKuRPIxVLUjg,7800 +numpy/exceptions.pyi,sha256=MJbCHjwFGps97WaVOPkaoUb8wi-l5OUbcFHdWZgBGbI,751 +numpy/f2py/__init__.py,sha256=cAgUHWgJQZZsfv8co8KBNr_m8B6fpzdBaUNvJeBf_No,2448 +numpy/f2py/__init__.pyi,sha256=UbgqGZKYnDHGHX9MlwBB3aBZ2T470ojrNREIhkwt6gc,132 +numpy/f2py/__main__.py,sha256=6i2jVH2fPriV1aocTY_dUFvWK18qa-zjpnISA-OpF3w,130 +numpy/f2py/__pycache__/__init__.cpython-312.pyc,, +numpy/f2py/__pycache__/__main__.cpython-312.pyc,, +numpy/f2py/__pycache__/__version__.cpython-312.pyc,, +numpy/f2py/__pycache__/_isocbind.cpython-312.pyc,, +numpy/f2py/__pycache__/_src_pyf.cpython-312.pyc,, +numpy/f2py/__pycache__/auxfuncs.cpython-312.pyc,, +numpy/f2py/__pycache__/capi_maps.cpython-312.pyc,, +numpy/f2py/__pycache__/cb_rules.cpython-312.pyc,, +numpy/f2py/__pycache__/cfuncs.cpython-312.pyc,, +numpy/f2py/__pycache__/common_rules.cpython-312.pyc,, +numpy/f2py/__pycache__/crackfortran.cpython-312.pyc,, +numpy/f2py/__pycache__/diagnose.cpython-312.pyc,, +numpy/f2py/__pycache__/f2py2e.cpython-312.pyc,, +numpy/f2py/__pycache__/f90mod_rules.cpython-312.pyc,, +numpy/f2py/__pycache__/func2subr.cpython-312.pyc,, +numpy/f2py/__pycache__/rules.cpython-312.pyc,, +numpy/f2py/__pycache__/symbolic.cpython-312.pyc,, +numpy/f2py/__pycache__/use_rules.cpython-312.pyc,, +numpy/f2py/__version__.py,sha256=99S6mSevuhwGmO9ku--7VUJekhN0ot4-J0cZKiHcqpw,48 +numpy/f2py/__version__.pyi,sha256=L4V6f6B-wuPi82B0MzeQsgN0NuHUQs9rKYl1jy3tG7s,45 +numpy/f2py/_backends/__init__.py,sha256=7_bA7c_xDpLc4_8vPfH32-Lxn9fcUTgjQ25srdvwvAM,299 +numpy/f2py/_backends/__init__.pyi,sha256=i4XhDRwbrl0ta6QGJPxhYGfSgugNGdtoWf1_27eSd60,136 +numpy/f2py/_backends/__pycache__/__init__.cpython-312.pyc,, +numpy/f2py/_backends/__pycache__/_backend.cpython-312.pyc,, +numpy/f2py/_backends/__pycache__/_distutils.cpython-312.pyc,, +numpy/f2py/_backends/__pycache__/_meson.cpython-312.pyc,, +numpy/f2py/_backends/_backend.py,sha256=oFXZ8-VwcQSbltl8_pgWLPqCOZ8Y_px7oeTk_BlxJTc,1151 +numpy/f2py/_backends/_backend.pyi,sha256=sU4YiHvGfMkzDFbhZqqQPT-kwJZsWpGemkLxDion7ss,1342 +numpy/f2py/_backends/_distutils.py,sha256=hET0WB4qy-D4BznekGAWhk945k5weq2lGUDR6hriXMo,2385 +numpy/f2py/_backends/_distutils.pyi,sha256=-L8K1KQShPGGd1vgr4DlnYf6AshHFaRzAcgGqKv205g,463 +numpy/f2py/_backends/_meson.py,sha256=VouUQkWRUk74WhDtkf6HR79QoK-Wrx8E7qO7gVpyDnk,8107 +numpy/f2py/_backends/_meson.pyi,sha256=wvYtBdippKeiSeLzaYKehql0_3ThS8T8Aqat03hhjQ4,1869 +numpy/f2py/_backends/meson.build.template,sha256=hQeTapAY0xtni5Li-QaEtWx9DH9WDKah2lcEuSZfLLo,1599 +numpy/f2py/_isocbind.py,sha256=zaBgpfPNRmxVG3doUIlbZIiyB990MsXiwDabrSj9HnQ,2360 +numpy/f2py/_isocbind.pyi,sha256=KuzqHJQk0YSQnRnb8xqnyh8T0DGNnDD6bNI880tadCY,339 +numpy/f2py/_src_pyf.py,sha256=PHpo9D28Kq3q_3-KFX8D3sFD9eX8A1c3LuLNzXzByOw,7695 +numpy/f2py/_src_pyf.pyi,sha256=9NKnovhbLibbQkjCrRnyiTPDw3MBqycOHl1--BNrIqw,1012 +numpy/f2py/auxfuncs.py,sha256=dnaUwrdAv4-LbEiHNbS1vrjQNCO0lBuyWkj3Rt_UizE,26920 +numpy/f2py/auxfuncs.pyi,sha256=7RUoWWaHrqSYEmdNd5zCNnmbjUYE5pCe0FCxMXejbhg,8011 +numpy/f2py/capi_maps.py,sha256=7C-NndI2UbStNGXbhgbWOmr9tLAxfQvw1zf7Z7w5SFk,30079 +numpy/f2py/capi_maps.pyi,sha256=pR0pVZhUxaCpctq7FOWFSAGI_gaLdE-NWAyT96cWWZg,1066 +numpy/f2py/cb_rules.py,sha256=6KbPu9yfJ-7pAa24Ij9H34Ll15Qc8CXTqCFiUJI6R8Y,25051 +numpy/f2py/cb_rules.pyi,sha256=X_it8-Q0188EDlXd-QxhRdc3OUoA2t6V_jgM5TiQC88,495 +numpy/f2py/cfuncs.py,sha256=4J4P12oGpyWZHb1AVKAl7YJ3QUgngwGMCnB1IhrJn7U,52660 +numpy/f2py/cfuncs.pyi,sha256=EiAtSQxw4x-UlxsGKIEOJnld1d7dNYrk0bt_rlqLSp0,802 +numpy/f2py/common_rules.py,sha256=_9yzIolJMGgpd3D94LdBsODnfUskMRgt2v03rECIHJQ,5030 +numpy/f2py/common_rules.pyi,sha256=1uzTkcwiin6dVBbWUiOVB1ZppjKBHoRHG_Byvw-1UbI,323 +numpy/f2py/crackfortran.py,sha256=vbAvWj6XszLS-nU0nOedaNNtwtqvkkM8gqZAP9MvPBI,146879 +numpy/f2py/crackfortran.pyi,sha256=AvV_KPeE9jLG9EdmPdb2u7-gPJXc1H2yWVmmihHzCgM,10276 +numpy/f2py/diagnose.py,sha256=YWNj1vM68e47Lb270wlZk5yrcU-yTlzGaYNPBZ7nTAU,5075 +numpy/f2py/diagnose.pyi,sha256=ZFVCWTwf_xzL736p9FcfCYWftOXcNqSMCmq-K27KNN8,23 +numpy/f2py/f2py2e.py,sha256=krSW4RpZPDHNX2IWLdn28KWzj0lzFNSc_6fScbGQMfI,28763 +numpy/f2py/f2py2e.pyi,sha256=Qt6ZeOYBugJLFpAY3F9K_4hcm0sZt_3APTtdKLKObWA,2153 +numpy/f2py/f90mod_rules.py,sha256=7Z5vorU4whX405xML66hr4i1icCUc9gr6an4R-AMh7M,9810 +numpy/f2py/f90mod_rules.pyi,sha256=r6w0DuH2Jdt8wPdDYAnXZAQmytIYUqPOxVz-QaWwt74,451 +numpy/f2py/func2subr.py,sha256=9igCMMDttIgF1MG6kBOagkjI_SF-UlGjACAj3Ncv0-o,10049 +numpy/f2py/func2subr.pyi,sha256=-MDbOrhanuizf3rlcwBQooCF4GnoGprA8ypeFV_m8d0,386 +numpy/f2py/rules.py,sha256=Irj-13oLGowNHYElFV-TZUs0VEd0NQpRsnomnI1NTx8,63091 +numpy/f2py/rules.pyi,sha256=9GfFmNA8Unlg3pxcGwqwFl7yeKyIcTmx7wiPuiBAT-k,1326 +numpy/f2py/setup.cfg,sha256=Fpn4sjqTl5OT5sp8haqKIRnUcTPZNM6MIvUJBU7BIhg,48 +numpy/f2py/src/fortranobject.c,sha256=kLiHOty8fUruzfOmL5MQeVNFJSGHBjn7W6QbPYgQb30,46356 +numpy/f2py/src/fortranobject.h,sha256=7cfRN_tToAQ1Na13VQ2Kzb2ujMHUAgGsbScnfLVOHqs,5823 +numpy/f2py/symbolic.py,sha256=UuFs411WYSqR7JfbsuyNv__IC9wKqxQAWoWRDeKPcdw,53214 +numpy/f2py/symbolic.pyi,sha256=piZrats8SXrOD1qEADo-mbsc5NZOIaZ27Fl3d3cydTc,6083 +numpy/f2py/tests/__init__.py,sha256=pdPbCTRwpCJamlyvIi9HZTlqAvK5HPbGu3oMA0cu2Rs,329 +numpy/f2py/tests/__pycache__/__init__.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/test_abstract_interface.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/test_array_from_pyobj.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/test_assumed_shape.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/test_block_docstring.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/test_callback.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/test_character.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/test_common.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/test_crackfortran.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/test_data.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/test_docs.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/test_f2cmap.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/test_f2py2e.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/test_isoc.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/test_kind.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/test_mixed.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/test_modules.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/test_parameter.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/test_pyf_src.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/test_quoted_character.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/test_regression.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/test_return_character.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/test_return_complex.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/test_return_integer.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/test_return_logical.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/test_return_real.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/test_routines.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/test_semicolon_split.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/test_size.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/test_string.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/test_symbolic.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/test_value_attrspec.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/util.cpython-312.pyc,, +numpy/f2py/tests/src/abstract_interface/foo.f90,sha256=JFU2w98cB_XNwfrqNtI0yDTmpEdxYO_UEl2pgI_rnt8,658 +numpy/f2py/tests/src/abstract_interface/gh18403_mod.f90,sha256=gvQJIzNtvacWE0dhysxn30-iUeI65Hpq7DiE9oRauz8,105 +numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c,sha256=s6XLwujiCr6Xi8yBkvLPBXRmo2WsGVohU7K9ALnKUng,7478 +numpy/f2py/tests/src/assumed_shape/.f2py_f2cmap,sha256=But9r9m4iL7EGq_haMW8IiQ4VivH0TgUozxX4pPvdpE,29 +numpy/f2py/tests/src/assumed_shape/foo_free.f90,sha256=oBwbGSlbr9MkFyhVO2aldjc01dr9GHrMrSiRQek8U64,460 +numpy/f2py/tests/src/assumed_shape/foo_mod.f90,sha256=rfzw3QdI-eaDSl-hslCgGpd5tHftJOVhXvb21Y9Gf6M,499 +numpy/f2py/tests/src/assumed_shape/foo_use.f90,sha256=rmT9k4jP9Ru1PLcGqepw9Jc6P9XNXM0axY7o4hi9lUw,269 +numpy/f2py/tests/src/assumed_shape/precision.f90,sha256=r08JeTVmTTExA-hYZ6HzaxVwBn1GMbPAuuwBhBDtJUk,130 +numpy/f2py/tests/src/block_docstring/foo.f,sha256=y7lPCPu7_Fhs_Tf2hfdpDQo1bhtvNSKRaZAOpM_l3dg,97 +numpy/f2py/tests/src/callback/foo.f,sha256=C1hjfpRCQWiOVVzIHqnsYcnLrqQcixrnHCn8hd9GhVk,1254 +numpy/f2py/tests/src/callback/gh17797.f90,sha256=_Nrl0a2HgUbtymGU0twaJ--7rMa1Uco2A3swbWvHoMo,148 +numpy/f2py/tests/src/callback/gh18335.f90,sha256=NraOyKIXyvv_Y-3xGnmTjtNjW2Znsnlk8AViI8zfovc,506 +numpy/f2py/tests/src/callback/gh25211.f,sha256=a2sxlQhtDVbYn8KOKHUYqwc-aCFt7sDPSnJsXFG35uI,179 +numpy/f2py/tests/src/callback/gh25211.pyf,sha256=FWxo0JWQlw519BpZV8PoYeI_FZ_K6C-3Wk6gLrfBPlw,447 +numpy/f2py/tests/src/callback/gh26681.f90,sha256=-cD69x7omk5wvVsfMHlXiZ-pTcaxs2Bl5G9GHA4UJ2M,566 +numpy/f2py/tests/src/cli/gh_22819.pyf,sha256=5rvOfCv-wSosB354LC9pExJmMoSHnbGZGl_rtA2fogA,142 +numpy/f2py/tests/src/cli/hi77.f,sha256=ttyI6vAP3qLnDqy82V04XmoqrXNM6uhMvvLri2p0dq0,71 +numpy/f2py/tests/src/cli/hiworld.f90,sha256=QWOLPrTxYQu1yrEtyQMbM0fE9M2RmXe7c185KnD5x3o,51 +numpy/f2py/tests/src/common/block.f,sha256=GQ0Pd-VMX3H3a-__f2SuosSdwNXHpBqoGnQDjf8aG9g,224 +numpy/f2py/tests/src/common/gh19161.f90,sha256=BUejyhqpNVfHZHQ-QC7o7ZSo7lQ6YHyX08lSmQqs6YM,193 +numpy/f2py/tests/src/crackfortran/accesstype.f90,sha256=-5Din7YlY1TU7tUHD2p-_DSTxGBpDsWYNeT9WOwGhno,208 +numpy/f2py/tests/src/crackfortran/common_with_division.f,sha256=2LfRa26JEB07_ti-WDmIveq991PxRlL_K6ss28rZDkk,494 +numpy/f2py/tests/src/crackfortran/data_common.f,sha256=ZSUAh3uhn9CCF-cYqK5TNmosBGPfsuHBIEfudgysun4,193 +numpy/f2py/tests/src/crackfortran/data_multiplier.f,sha256=jYrJKZWF_59JF9EMOSALUjn0UupWvp1teuGpcL5s1Sc,197 +numpy/f2py/tests/src/crackfortran/data_stmts.f90,sha256=19YO7OGj0IksyBlmMLZGRBQLjoE3erfkR4tFvhznvvE,693 +numpy/f2py/tests/src/crackfortran/data_with_comments.f,sha256=hoyXw330VHh8duMVmAQZjr1lgLVF4zFCIuEaUIrupv0,175 +numpy/f2py/tests/src/crackfortran/foo_deps.f90,sha256=CaH7mnWTG7FcnJe2vXN_0zDbMadw6NCqK-JJ2HmDjK8,128 +numpy/f2py/tests/src/crackfortran/gh15035.f,sha256=jJly1AzF5L9VxbVQ0vr-sf4LaUo4eQzJguhuemFxnvg,375 +numpy/f2py/tests/src/crackfortran/gh17859.f,sha256=7K5dtOXGuBDAENPNCt-tAGJqTfNKz5OsqVSk16_e7Es,340 +numpy/f2py/tests/src/crackfortran/gh22648.pyf,sha256=qZHPRNQljIeYNwbqPLxREnOrSdVV14f3fnaHqB1M7c0,241 +numpy/f2py/tests/src/crackfortran/gh23533.f,sha256=w3tr_KcY3s7oSWGDmjfMHv5h0RYVGUpyXquNdNFOJQg,126 +numpy/f2py/tests/src/crackfortran/gh23598.f90,sha256=41W6Ire-5wjJTTg6oAo7O1WZfd1Ug9vvNtNgHS5MhEU,101 +numpy/f2py/tests/src/crackfortran/gh23598Warn.f90,sha256=1v-hMCT_K7prhhamoM20nMU9zILam84Hr-imck_dYYk,205 +numpy/f2py/tests/src/crackfortran/gh23879.f90,sha256=LWDJTYR3t9h1IsrKC8dVXZlBfWX7clLeU006X6Ow8oI,332 +numpy/f2py/tests/src/crackfortran/gh27697.f90,sha256=bbnKpDsOuCWluoNodxzCspUQnu169zKTsn4fLTkhwpM,364 +numpy/f2py/tests/src/crackfortran/gh2848.f90,sha256=gPNasx98SIf7Z9ibk_DHiGKCvl7ERtsfoGXiFDT7FbM,282 +numpy/f2py/tests/src/crackfortran/operators.f90,sha256=-Fc-qjW1wBr3Dkvdd5dMTrt0hnjnV-1AYo-NFWcwFSo,1184 +numpy/f2py/tests/src/crackfortran/privatemod.f90,sha256=7bubZGMIn7iD31wDkjF1TlXCUM7naCIK69M9d0e3y-U,174 +numpy/f2py/tests/src/crackfortran/publicmod.f90,sha256=Pnwyf56Qd6W3FUH-ZMgnXEYkb7gn18ptNTdwmGan0Jo,167 +numpy/f2py/tests/src/crackfortran/pubprivmod.f90,sha256=eYpJwBYLKGOxVbKgEqfny1znib-b7uYhxcRXIf7uwXg,165 +numpy/f2py/tests/src/crackfortran/unicode_comment.f90,sha256=aINLh6GlfTwFewxvDoqnMqwuCNb4XAqi5Nj5vXguXYs,98 +numpy/f2py/tests/src/f2cmap/.f2py_f2cmap,sha256=iUOtfHd3OuT1Rz2-yiSgt4uPKGvCt5AzQ1iygJt_yjg,82 +numpy/f2py/tests/src/f2cmap/isoFortranEnvMap.f90,sha256=iJCD8a8MUTmuPuedbcmxW54Nr4alYuLhksBe1sHS4K0,298 +numpy/f2py/tests/src/isocintrin/isoCtests.f90,sha256=jcw-fzrFh0w5U66uJYfeUW4gv94L5MnWQ_NpsV9y0oI,998 +numpy/f2py/tests/src/kind/foo.f90,sha256=zIHpw1KdkWbTzbXb73hPbCg4N2Htj3XL8DIwM7seXpo,347 +numpy/f2py/tests/src/mixed/foo.f,sha256=90zmbSHloY1XQYcPb8B5d9bv9mCZx8Z8AMTtgDwJDz8,85 +numpy/f2py/tests/src/mixed/foo_fixed.f90,sha256=pxKuPzxF3Kn5khyFq9ayCsQiolxB3SaNtcWaK5j6Rv4,179 +numpy/f2py/tests/src/mixed/foo_free.f90,sha256=fIQ71wrBc00JUAVUj_r3QF9SdeNniBiMw6Ly7CGgPWU,139 +numpy/f2py/tests/src/modules/gh25337/data.f90,sha256=9Uz8CHB9i3_mjC3cTOmkTgPAF5tWSwYacG3MUrU-SY0,180 +numpy/f2py/tests/src/modules/gh25337/use_data.f90,sha256=WATiDGAoCKnGgMzm_iMgmfVU0UKOQlk5Fm0iXCmPAkE,179 +numpy/f2py/tests/src/modules/gh26920/two_mods_with_no_public_entities.f90,sha256=c7VU4SbK3yWn-6wksP3tDx_Hxh5u_g8UnlDpjU_-tBg,402 +numpy/f2py/tests/src/modules/gh26920/two_mods_with_one_public_routine.f90,sha256=eEU7RgFPh-TnNXEuJFdtJmTF-wPnpbHLQhG4fEeJnag,403 +numpy/f2py/tests/src/modules/module_data_docstring.f90,sha256=tDZ3fUlazLL8ThJm3VwNGJ75QIlLcW70NnMFv-JA4W0,224 +numpy/f2py/tests/src/modules/use_modules.f90,sha256=UsFfx0B2gu_tS-H-BpLWed_yoMDl1kbydMIOz8fvXWA,398 +numpy/f2py/tests/src/negative_bounds/issue_20853.f90,sha256=fdOPhRi7ipygwYCXcda7p_dlrws5Hd2GlpF9EZ-qnck,157 +numpy/f2py/tests/src/parameter/constant_array.f90,sha256=KRg7Gmq_r3B7t3IEgRkP1FT8ve8AuUFWT0WcTlXoN5U,1468 +numpy/f2py/tests/src/parameter/constant_both.f90,sha256=-bBf2eqHb-uFxgo6Q7iAtVUUQzrGFqzhHDNaxwSICfQ,1939 +numpy/f2py/tests/src/parameter/constant_compound.f90,sha256=re7pfzcuaquiOia53UT7qNNrTYu2euGKOF4IhoLmT6g,469 +numpy/f2py/tests/src/parameter/constant_integer.f90,sha256=nEmMLitKoSAG7gBBEQLWumogN-KS3DBZOAZJWcSDnFw,612 +numpy/f2py/tests/src/parameter/constant_non_compound.f90,sha256=IcxESVLKJUZ1k9uYKoSb8Hfm9-O_4rVnlkiUU2diy8Q,609 +numpy/f2py/tests/src/parameter/constant_real.f90,sha256=quNbDsM1Ts2rN4WtPO67S9Xi_8l2cXabWRO00CPQSSQ,610 +numpy/f2py/tests/src/quoted_character/foo.f,sha256=WjC9D9171fe2f7rkUAZUvik9bkIf9adByfRGzh6V0cM,482 +numpy/f2py/tests/src/regression/AB.inc,sha256=cSNxitwrjTKMiJzhY2AI5FaXJ5y9zDgA27x79jyoI6s,16 +numpy/f2py/tests/src/regression/assignOnlyModule.f90,sha256=c9RvUP1pQ201O_zOXgV0xp_aJF_8llxuA8Uot9z5tr0,608 +numpy/f2py/tests/src/regression/datonly.f90,sha256=9cVvl8zlAuGiqbSHMFzFn6aNWXj2v7sHJdd9A1Oc0qg,392 +numpy/f2py/tests/src/regression/f77comments.f,sha256=bqTsmO8WuSLVFsViIV7Nj7wQbJoZ7IAA3d2tpRDKsnA,626 +numpy/f2py/tests/src/regression/f77fixedform.f95,sha256=hcLZbdozMJ3V9pByVRp3RoeUvZgLMRLFctpZvxK2hTI,139 +numpy/f2py/tests/src/regression/f90continuation.f90,sha256=_W1fj0wXLqT91Q14qpBnM3F7rJKaiSR8upe0mR6_OIE,276 +numpy/f2py/tests/src/regression/incfile.f90,sha256=i7Y1zgMXR9bSxnjeYWSDGeCfsS5jiyn7BLb-wbwjz2U,92 +numpy/f2py/tests/src/regression/inout.f90,sha256=CpHpgMrf0bqA1W3Ozo3vInDz0RP904S7LkpdAH6ODck,277 +numpy/f2py/tests/src/regression/lower_f2py_fortran.f90,sha256=CMQL5RWf9LKnnUDiS-IYa9xc9DGanCYraNq0vGmunOE,100 +numpy/f2py/tests/src/regression/mod_derived_types.f90,sha256=565plqPwWDgnkpSb4-cfZbf3wTM85F2Gocklx5wpGWA,567 +numpy/f2py/tests/src/return_character/foo77.f,sha256=WzDNF3d_hUDSSZjtxd3DtE-bSx1ilOMEviGyYHbcFgM,980 +numpy/f2py/tests/src/return_character/foo90.f90,sha256=ULcETDEt7gXHRzmsMhPsGG4o3lGrcx-FEFaJsPGFKyA,1248 +numpy/f2py/tests/src/return_complex/foo77.f,sha256=8ECRJkfX82oFvGWKbIrCvKjf5QQQClx4sSEvsbkB6A8,973 +numpy/f2py/tests/src/return_complex/foo90.f90,sha256=c1BnrtWwL2dkrTr7wvlEqNDg59SeNMo3gyJuGdRwcDw,1238 +numpy/f2py/tests/src/return_integer/foo77.f,sha256=_8k1evlzBwvgZ047ofpdcbwKdF8Bm3eQ7VYl2Y8b5kA,1178 +numpy/f2py/tests/src/return_integer/foo90.f90,sha256=bzxbYtofivGRYH35Ang9ScnbNsVERN8-6ub5-eI-LGQ,1531 +numpy/f2py/tests/src/return_logical/foo77.f,sha256=FxiF_X0HkyXHzJM2rLyTubZJu4JB-ObLnVqfZwAQFl8,1188 +numpy/f2py/tests/src/return_logical/foo90.f90,sha256=9KmCe7yJYpi4ftkKOM3BCDnPOdBPTbUNrKxY3p37O14,1531 +numpy/f2py/tests/src/return_real/foo77.f,sha256=ZTrzb6oDrIDPlrVWP3Bmtkbz3ffHaaSQoXkfTGtCuFE,933 +numpy/f2py/tests/src/return_real/foo90.f90,sha256=gZuH5lj2lG6gqHlH766KQ3J4-Ero-G4WpOOo2MG3ohU,1194 +numpy/f2py/tests/src/routines/funcfortranname.f,sha256=oGPnHo0zL7kjFnuHw41mWUSXauoeRVPXnYXBb2qljio,123 +numpy/f2py/tests/src/routines/funcfortranname.pyf,sha256=coD8AdLyPK4_cGvQJgE2WJW_jH8EAulZCsMeb-Q1gOk,440 +numpy/f2py/tests/src/routines/subrout.f,sha256=RTexoH7RApv_mhu-RcVwyNiU-DXMTUP8LJAMSn2wQjk,90 +numpy/f2py/tests/src/routines/subrout.pyf,sha256=c9qv4XtIh4wA9avdkDJuXNwojK-VBPldrNhxlh446Ic,322 +numpy/f2py/tests/src/size/foo.f90,sha256=IlFAQazwBRr3zyT7v36-tV0-fXtB1d7WFp6S1JVMstg,815 +numpy/f2py/tests/src/string/char.f90,sha256=ihr_BH9lY7eXcQpHHDQhFoKcbu7VMOX5QP2Tlr7xlaM,618 +numpy/f2py/tests/src/string/fixed_string.f90,sha256=5n6IkuASFKgYICXY9foCVoqndfAY0AQZFEK8L8ARBGM,695 +numpy/f2py/tests/src/string/gh24008.f,sha256=UA8Pr-_yplfOFmc6m4v9ryFQ8W9OulaglulefkFWD68,217 +numpy/f2py/tests/src/string/gh24662.f90,sha256=-Tp9Kd1avvM7AIr8ZukFA9RVr-wusziAnE8AvG9QQI4,197 +numpy/f2py/tests/src/string/gh25286.f90,sha256=2EpxvC-0_dA58MBfGQcLyHzpZgKcMf_W9c73C_Mqnok,304 +numpy/f2py/tests/src/string/gh25286.pyf,sha256=GjgWKh1fHNdPGRiX5ek60i1XSeZsfFalydWqjISPVV8,381 +numpy/f2py/tests/src/string/gh25286_bc.pyf,sha256=6Y9zU66NfcGhTXlFOdFjCSMSwKXpq5ZfAe3FwpkAsm4,384 +numpy/f2py/tests/src/string/scalar_string.f90,sha256=ACxV2i6iPDk-a6L_Bs4jryVKYJMEGUTitEIYTjbJes4,176 +numpy/f2py/tests/src/string/string.f,sha256=shr3fLVZaa6SyUJFYIF1OZuhff8v5lCwsVNBU2B-3pk,248 +numpy/f2py/tests/src/value_attrspec/gh21665.f90,sha256=JC0FfVXsnB2lZHb-nGbySnxv_9VHAyD0mKaLDowczFU,190 +numpy/f2py/tests/test_abstract_interface.py,sha256=PXNQB0DZdmdZyysJkB8f9GY0_hA3hGkmha8aQBXc1Sk,811 +numpy/f2py/tests/test_array_from_pyobj.py,sha256=N1RJ0yFcLs6cFmdxSjizjfLRTEhdKRhrO9Vx8bcG0GU,23696 +numpy/f2py/tests/test_assumed_shape.py,sha256=8kPoQWn6IfMWNMba0al7a5XopKb3JnvZP3V3P6O2F8o,1467 +numpy/f2py/tests/test_block_docstring.py,sha256=P3K0QqnY0UfUQPc3vDrlP_WlZ6gNJ7iokG-D-ZG9tXQ,584 +numpy/f2py/tests/test_callback.py,sha256=P_5qM1xWOYfjeDgd70cIVpV1h0_tA1AP3kxRZDAeqII,7099 +numpy/f2py/tests/test_character.py,sha256=R6FhfIi85E6L1qwlJtsnTCvNgFRriE3kSXefTwIVgLk,21931 +numpy/f2py/tests/test_common.py,sha256=gr4MF659JBWvSY4eQAqgHnOrVbEpq0ZhGM5Cdbye1L4,644 +numpy/f2py/tests/test_crackfortran.py,sha256=x_E4KmEfBX5SFsNkO_-mUi4W_WuzB-ZFsLOfUdHjLVE,16413 +numpy/f2py/tests/test_data.py,sha256=tete-xcIZHZi5VFjy_pyTjr5AjhQzoyJvLsT9QLYU1M,2895 +numpy/f2py/tests/test_docs.py,sha256=wGsRmCJugExEAvj25pANoLr45S6fkpG4kf47dnfg9Ew,1855 +numpy/f2py/tests/test_f2cmap.py,sha256=zM8lksGAoH-cRvEVRkzciZ4oqH28obd-vvMVUObVjt0,387 +numpy/f2py/tests/test_f2py2e.py,sha256=aGZnZH5USd8FJpG5F1L6bWfUzuUqP954lit5-TDPbeE,27834 +numpy/f2py/tests/test_isoc.py,sha256=g5PLyJuAYwF0obaZ55j_e-CNOODJcADsYFSfxcCl5LM,1434 +numpy/f2py/tests/test_kind.py,sha256=ovQVxbtbbnb-Keo8Dh2LpDyPLbIA1uxiZOzMLo5KMX0,1825 +numpy/f2py/tests/test_mixed.py,sha256=DZcTCCle0o4aopFmGi58KtxzP6NFFci4N-pL3_HLb90,862 +numpy/f2py/tests/test_modules.py,sha256=GaOwxLf8KLdNkWIl9fveT9xg_wvCFdDsel9QiFweCAE,2301 +numpy/f2py/tests/test_parameter.py,sha256=P8hDezlxKN_Cm06oWGkS0pwlJvQz5QYwBsyTEA_Y1PQ,4634 +numpy/f2py/tests/test_pyf_src.py,sha256=xV81hRiGeytFFKeVnp-6O2OrGVdzJyecMEalCQSoDoI,1134 +numpy/f2py/tests/test_quoted_character.py,sha256=x19FhD6ZA7JkDuLuiXi48sGd8b42SPRuwwEY8CVRb24,477 +numpy/f2py/tests/test_regression.py,sha256=APQz3e38jz-AbGEBN5n-P1Wuegx4Da1ze7D7nLLpUL8,6197 +numpy/f2py/tests/test_return_character.py,sha256=t8cxO8LatnBXf2EU-HkfmdxvdHMYDk9DLx3kNUTArC4,1534 +numpy/f2py/tests/test_return_complex.py,sha256=_uWrnSh-IDL8men8X__5srP4wM0RkICr4WVJgoNgrzY,2440 +numpy/f2py/tests/test_return_integer.py,sha256=ng_cpFX4nStcpSFoYdD9GiUdCJSXPU0On2MLOA4uOpQ,1813 +numpy/f2py/tests/test_return_logical.py,sha256=OrS11uAw_asDamL7inRKf-S-7SBG0GTS8Vrqlexrkm0,2048 +numpy/f2py/tests/test_return_real.py,sha256=ynInWwkcRfUe981kGJnrkkZeKK7QFlvkiODoIJj6Jg0,3273 +numpy/f2py/tests/test_routines.py,sha256=f9pR8FNJgKuBWtzCjlfniWVHJecpW6gSNkGDb2t693c,795 +numpy/f2py/tests/test_semicolon_split.py,sha256=akc4xJiHI6xOCfpCEtFYPMz8qy2K5jODEPyJHYQvLdE,1627 +numpy/f2py/tests/test_size.py,sha256=SjES727lNcCJFePDnh7uBhncOXWOcqHqVPbZPvBO5js,1155 +numpy/f2py/tests/test_string.py,sha256=47wYPuO1NkjhXSbbyS8vBKsNCju5dA9uMjNhGPx-BGg,2938 +numpy/f2py/tests/test_symbolic.py,sha256=dmuYLhhcv-rT-ux_aVrWaJj_Yxmznezl6Enu8-ediK0,18342 +numpy/f2py/tests/test_value_attrspec.py,sha256=4wY9qPXl0JoPGCG7GyyuMDKLfsHAV8KRWGdEk9-ZZT8,330 +numpy/f2py/tests/util.py,sha256=KIDsCW5uZXe6jSdWpY9Ozlqs5-v-eeDsW3P5TDWKDzo,12112 +numpy/f2py/use_rules.py,sha256=emZhSLPbNDyBHnsfKKXDnGz4P_gwrgL0dfCZcD3n9D4,3376 +numpy/f2py/use_rules.pyi,sha256=gIAAemWfcidclVYZUpa6RRmSdUEDw4FDnGPaCNo93Zw,424 +numpy/fft/__init__.py,sha256=OWE0m6H_blyv1wtqQpiXU5kqxF6O2UxxcV5t11U05RE,8291 +numpy/fft/__init__.pyi,sha256=6XgAsd9coqJ3jBOPD3vn1-8AcbMLhjxzQd21xjeqmlA,514 +numpy/fft/__pycache__/__init__.cpython-312.pyc,, +numpy/fft/__pycache__/_helper.cpython-312.pyc,, +numpy/fft/__pycache__/_pocketfft.cpython-312.pyc,, +numpy/fft/__pycache__/helper.cpython-312.pyc,, +numpy/fft/_helper.py,sha256=hIn2ZyEYG4fLB3MGvCPvpSrLXFfh-xO4zGKljk_TQjY,6787 +numpy/fft/_helper.pyi,sha256=1A1kitc5k62ER6X1XLF7PIQL5FiVxxRKu_iCqiQ1kIU,1394 +numpy/fft/_pocketfft.py,sha256=CfpApR9R0SOucql9gp9vXadm_y5cBM-Xnj5trDpvFSE,62598 +numpy/fft/_pocketfft.pyi,sha256=_RIRwdhtixjN4qszZk-xeYn2jmcW_NNAMEJHeETigv0,3174 +numpy/fft/_pocketfft_umath.cpython-312-x86_64-linux-gnu.so,sha256=6EnciecXsqMVlI3QEneGCYNyx6IgbP0TD6gYtz5pFk0,539072 +numpy/fft/helper.py,sha256=RoEADsOnoCgSTL1gE5n-36llz8iwxGzn52af3L-9KEY,611 +numpy/fft/helper.pyi,sha256=KsF45bVyZ4_eJbBFpkER9L8MCWmg7dJuhLqY_7uFNZs,891 +numpy/fft/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/fft/tests/__pycache__/__init__.cpython-312.pyc,, +numpy/fft/tests/__pycache__/test_helper.cpython-312.pyc,, +numpy/fft/tests/__pycache__/test_pocketfft.cpython-312.pyc,, +numpy/fft/tests/test_helper.py,sha256=LeVDCCdHzFhmCQ5ByMtVyA22GphgTQS5dupuxrLE8X0,6154 +numpy/fft/tests/test_pocketfft.py,sha256=PCF833rSWsXOMWN8wCluhq0aYHU24_tHbuMl1PuO6dE,24446 +numpy/lib/__init__.py,sha256=zYGuqEfPqq7LDbidpxYs8GgCNAmoJ4xQgFvF3XKJ5Rg,3004 +numpy/lib/__init__.pyi,sha256=Z7OsQAZGURd4cI3xnEF37unbOUqtknwEkT8yQTF-AF8,1651 +numpy/lib/__pycache__/__init__.cpython-312.pyc,, +numpy/lib/__pycache__/_array_utils_impl.cpython-312.pyc,, +numpy/lib/__pycache__/_arraypad_impl.cpython-312.pyc,, +numpy/lib/__pycache__/_arraysetops_impl.cpython-312.pyc,, +numpy/lib/__pycache__/_arrayterator_impl.cpython-312.pyc,, +numpy/lib/__pycache__/_datasource.cpython-312.pyc,, +numpy/lib/__pycache__/_format_impl.cpython-312.pyc,, +numpy/lib/__pycache__/_function_base_impl.cpython-312.pyc,, +numpy/lib/__pycache__/_histograms_impl.cpython-312.pyc,, +numpy/lib/__pycache__/_index_tricks_impl.cpython-312.pyc,, +numpy/lib/__pycache__/_iotools.cpython-312.pyc,, +numpy/lib/__pycache__/_nanfunctions_impl.cpython-312.pyc,, +numpy/lib/__pycache__/_npyio_impl.cpython-312.pyc,, +numpy/lib/__pycache__/_polynomial_impl.cpython-312.pyc,, +numpy/lib/__pycache__/_scimath_impl.cpython-312.pyc,, +numpy/lib/__pycache__/_shape_base_impl.cpython-312.pyc,, +numpy/lib/__pycache__/_stride_tricks_impl.cpython-312.pyc,, +numpy/lib/__pycache__/_twodim_base_impl.cpython-312.pyc,, +numpy/lib/__pycache__/_type_check_impl.cpython-312.pyc,, +numpy/lib/__pycache__/_ufunclike_impl.cpython-312.pyc,, +numpy/lib/__pycache__/_user_array_impl.cpython-312.pyc,, +numpy/lib/__pycache__/_utils_impl.cpython-312.pyc,, +numpy/lib/__pycache__/_version.cpython-312.pyc,, +numpy/lib/__pycache__/array_utils.cpython-312.pyc,, +numpy/lib/__pycache__/format.cpython-312.pyc,, +numpy/lib/__pycache__/introspect.cpython-312.pyc,, +numpy/lib/__pycache__/mixins.cpython-312.pyc,, +numpy/lib/__pycache__/npyio.cpython-312.pyc,, +numpy/lib/__pycache__/recfunctions.cpython-312.pyc,, +numpy/lib/__pycache__/scimath.cpython-312.pyc,, +numpy/lib/__pycache__/stride_tricks.cpython-312.pyc,, +numpy/lib/__pycache__/user_array.cpython-312.pyc,, +numpy/lib/_array_utils_impl.py,sha256=GYWiyNqLQ7DGUSBXz0bbR6AAqZStDIwUe7tsbZ__15M,1697 +numpy/lib/_array_utils_impl.pyi,sha256=AktSeZcFe_XUQ6utYHQyJKG8l8bhM8tQL2Kttj1DjcQ,820 +numpy/lib/_arraypad_impl.py,sha256=z5--XT80TcnDZezHVrdxauJSY3yC4vMDdd7JlO-h3zw,32296 +numpy/lib/_arraypad_impl.pyi,sha256=W98XPsguuf8B924KVVxs6l_EOBM9JKzwTmHL98CKbs0,1837 +numpy/lib/_arraysetops_impl.py,sha256=VFdgpFZJcyJhYFPcTk_LQD_SrqX6poy_shsLKvZigy0,41275 +numpy/lib/_arraysetops_impl.pyi,sha256=gYwt9qhpNQf5krbOK7MAHq9IQn1D4bb-lvbBIK98m3s,12803 +numpy/lib/_arrayterator_impl.py,sha256=HtOADIHuG9ADbbMTgmh4P_muke1V-8E-FNEO3bVOGPA,7218 +numpy/lib/_arrayterator_impl.pyi,sha256=8u0nb5NPpWNib-FlWaXlp6BXBPgTv5__NF30FD_1qmM,1876 +numpy/lib/_datasource.py,sha256=zk-Vbn4JlDHEVa3De6A3NgjnnizSJi-HF0ZvvA6YIo4,22731 +numpy/lib/_datasource.pyi,sha256=135RvD3p-3mHdNp_sZV4aN9brwEFvEM49VE1eHlFEfs,996 +numpy/lib/_format_impl.py,sha256=zcQ3xXxPf7epktsYrcdBbIPuOCh9OPV1g3gB6ghf4rE,36865 +numpy/lib/_format_impl.pyi,sha256=_0lEht2hKbTevv0eGChmYMBTAg-2jAfvrfU9p326VHs,869 +numpy/lib/_function_base_impl.py,sha256=AZGyN29Ecw4LRuU1TNUcPC7cVHO9ye4bJ9FQI7n_Gwc,196425 +numpy/lib/_function_base_impl.pyi,sha256=HY21gmJcUIvTsYL2UUZ8l2MYj34cp_7Mdsmck-FjeEE,24116 +numpy/lib/_histograms_impl.py,sha256=Utu7aAQc7ZpsHn_04ogUnZq1ZcdHfipcq9eRq817oVU,38432 +numpy/lib/_histograms_impl.pyi,sha256=QouOxW0sa_LMJ2hDv5WEO9k95mTMjEvbP2-7swNJxzI,1093 +numpy/lib/_index_tricks_impl.py,sha256=g7Np4E8AG9sgyi9HTUgvOM08pIlAj_cvXw4cc7NrU5I,32186 +numpy/lib/_index_tricks_impl.pyi,sha256=gQwY1mj_Sxk2eo9BXcqJ68F88XvzQB81o0nNUkQ9w9o,6325 +numpy/lib/_iotools.py,sha256=0jtpvpl5L-_1ODI21F-1i19t1e3L-6wJxRd1CSLewL0,30876 +numpy/lib/_iotools.pyi,sha256=69hfBI89W2UP6ozHiSByt-GxTupni-gBRPihFbXSh6Q,3393 +numpy/lib/_nanfunctions_impl.py,sha256=cdOT7dYwjvUpI9iEHTrwzbbtKhP9ZZgOCMirTBeYPUk,71949 +numpy/lib/_nanfunctions_impl.pyi,sha256=j5dyJz_c-SQDxXrL9N2ouKC-DsP_EVDZyLedGXqCpMI,833 +numpy/lib/_npyio_impl.py,sha256=kucazwCufh4mNwECyZxEerxsqa_GxQMz1kYuZURDI8s,99277 +numpy/lib/_npyio_impl.pyi,sha256=WWlGxbobwLgEiD-k58g_Q9K1HW1vDk--AYrBSjjqALE,9388 +numpy/lib/_polynomial_impl.py,sha256=TWiqlG3WDa97tayxQCEltZD9TNhUyFprzL_Umd7Lxso,44134 +numpy/lib/_polynomial_impl.pyi,sha256=9PvnPmeCk45ldiJ8xHwsIVdX9DrjPhY9H7CEFbVJMLQ,6999 +numpy/lib/_scimath_impl.py,sha256=QAU4uM_INzVqCTs-ATEyy1JhREl_wDJn_ygU75YtfgE,15692 +numpy/lib/_scimath_impl.pyi,sha256=pXBZjHPB_FbeBfe9M3N8TjrET_oclGuafWjTHC-xjUs,2774 +numpy/lib/_shape_base_impl.py,sha256=5vkU9rPOwKvSc7TzxdfWtM08uV0m15iHPTxbqcY47Oc,39479 +numpy/lib/_shape_base_impl.pyi,sha256=36gmgbFd1cUmSUfUihFtb1brc2gKLYi8NXDAEzLyBmQ,5412 +numpy/lib/_stride_tricks_impl.py,sha256=y3Uxp3jFzDwmIQ137N2zap7-vW_jONUQmXnbfqrs60A,18025 +numpy/lib/_stride_tricks_impl.pyi,sha256=6rR7IO04w1FPCKUM920r9Kf_A_hpZbIABo6Rcl34tFI,1815 +numpy/lib/_twodim_base_impl.py,sha256=3nOLvCD6cfM6MD3o381F48GB8poqsUGDCDOQlOBQXmY,33925 +numpy/lib/_twodim_base_impl.pyi,sha256=nBRqOTSD21ioBkUw6vtzy1-ZyczJcvybkvG3-hvSIkY,11193 +numpy/lib/_type_check_impl.py,sha256=WeVfWz_0Klvb2K_6l0x4nHwHBwPYgfcxeZinV_dp_mw,19221 +numpy/lib/_type_check_impl.pyi,sha256=xpZV5LStVGHbEDAcJUbD7iZFE0onwCPZZuwb01P4o_Q,9713 +numpy/lib/_ufunclike_impl.py,sha256=0eemf_EYlLmSa4inNr3iuJ1eoTMqLyIR0n6dQymga3Y,6309 +numpy/lib/_ufunclike_impl.pyi,sha256=SJ7wbjWFI6WL_rp3CNqbZoKoza4Ou4uDwXvpt4iekys,1288 +numpy/lib/_user_array_impl.py,sha256=t3nnrFuvbBizFV1K3C9NNyIM80LU5spA88MlrYJzEok,7697 +numpy/lib/_user_array_impl.pyi,sha256=AZpI9fHHYpLxyYL9ud5YDHcZhxLl-YpfB23i9f154BQ,9110 +numpy/lib/_utils_impl.py,sha256=7BSreRcHNIsUeMj3U1GbqzVjJYKvyuEWHdG_C4TM46Q,23346 +numpy/lib/_utils_impl.pyi,sha256=ckxdUjdGEaa3JAKVQZHYgZ1R3glZZg-ssh90vkV7dJg,371 +numpy/lib/_version.py,sha256=4dUrc9Js0KPEQ5adoYKR5dnP4ffjCDtJUKPqcMauwY4,4851 +numpy/lib/_version.pyi,sha256=vysY5Vl_nh4si6GkMXEoB6pUDl-jJ5g0LpSDa40F124,641 +numpy/lib/array_utils.py,sha256=XbcyhJ9S0IlNnP9Ny6yygLMEACWWUPNOU8vevj1TEpI,144 +numpy/lib/array_utils.pyi,sha256=LfY_fzfTdtjoLIi5FSCDsC5weYrmAHFh7fxFfniupbg,296 +numpy/lib/format.py,sha256=npJ0eJhT7uKNK5a0lCMGfiJv-R4jyNhiIPeZbJcNXBs,477 +numpy/lib/format.pyi,sha256=fh-5SN4MORvjLliV8LwOb3VqG8tFvOaMeG4Vn5CBusA,1482 +numpy/lib/introspect.py,sha256=u-wgfMuYt8GI3AnRNdXs4j4w9eNTsazlqrazS-P7gKA,2749 +numpy/lib/introspect.pyi,sha256=AWVX6b9mzdwsxizOY0LydWKBEpGatHaeeXGc2txYJEM,152 +numpy/lib/mixins.py,sha256=Kff76ScpgWV3cruicI9A7a4zfBnGVmXtwQzMzu5xDEo,7200 +numpy/lib/mixins.pyi,sha256=I3iXqrcHpV4jwsgBGJKT2Ero2SlTSEZZDmfcx3DJ7Cc,3131 +numpy/lib/npyio.py,sha256=eaPvfHGSzUE70TJHHLOCPIX9G5ihMuBEexy6_PNhJ9Q,68 +numpy/lib/npyio.pyi,sha256=qX68dlgy7M2MtAgNSabTV8rWOTXOXCE1_72XcdJq10Y,192 +numpy/lib/recfunctions.py,sha256=T4aa5xXav9ntfw5YmzPiq_YUkh12wGk40XyBLQPCEzU,59539 +numpy/lib/recfunctions.pyi,sha256=NTf4FyM2Kinx56nNHcyGjKUz_RBSJQr-qtZsLKeIYvQ,13216 +numpy/lib/scimath.py,sha256=qjFaQeq0zEIl7gKqOhaj_vmCC_KaFdyTmHdLUUkSp5I,169 +numpy/lib/scimath.pyi,sha256=Fe7sfleFSY0uCGUj5gATxjEoMnva1nJ53YyP1wP11Nk,512 +numpy/lib/stride_tricks.py,sha256=x0_BfwlycBAlR3BvpxTndeP96dHBT_fASbkTTTzBYgI,88 +numpy/lib/stride_tricks.pyi,sha256=FLo0b8NlLPsS58VzjFFchivpBOjjE_meU0EhWEFPQNY,170 +numpy/lib/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/lib/tests/__pycache__/__init__.cpython-312.pyc,, +numpy/lib/tests/__pycache__/test__datasource.cpython-312.pyc,, +numpy/lib/tests/__pycache__/test__iotools.cpython-312.pyc,, +numpy/lib/tests/__pycache__/test__version.cpython-312.pyc,, +numpy/lib/tests/__pycache__/test_array_utils.cpython-312.pyc,, +numpy/lib/tests/__pycache__/test_arraypad.cpython-312.pyc,, +numpy/lib/tests/__pycache__/test_arraysetops.cpython-312.pyc,, +numpy/lib/tests/__pycache__/test_arrayterator.cpython-312.pyc,, +numpy/lib/tests/__pycache__/test_format.cpython-312.pyc,, +numpy/lib/tests/__pycache__/test_function_base.cpython-312.pyc,, +numpy/lib/tests/__pycache__/test_histograms.cpython-312.pyc,, +numpy/lib/tests/__pycache__/test_index_tricks.cpython-312.pyc,, +numpy/lib/tests/__pycache__/test_io.cpython-312.pyc,, +numpy/lib/tests/__pycache__/test_loadtxt.cpython-312.pyc,, +numpy/lib/tests/__pycache__/test_mixins.cpython-312.pyc,, +numpy/lib/tests/__pycache__/test_nanfunctions.cpython-312.pyc,, +numpy/lib/tests/__pycache__/test_packbits.cpython-312.pyc,, +numpy/lib/tests/__pycache__/test_polynomial.cpython-312.pyc,, +numpy/lib/tests/__pycache__/test_recfunctions.cpython-312.pyc,, +numpy/lib/tests/__pycache__/test_regression.cpython-312.pyc,, +numpy/lib/tests/__pycache__/test_shape_base.cpython-312.pyc,, +numpy/lib/tests/__pycache__/test_stride_tricks.cpython-312.pyc,, +numpy/lib/tests/__pycache__/test_twodim_base.cpython-312.pyc,, +numpy/lib/tests/__pycache__/test_type_check.cpython-312.pyc,, +numpy/lib/tests/__pycache__/test_ufunclike.cpython-312.pyc,, +numpy/lib/tests/__pycache__/test_utils.cpython-312.pyc,, +numpy/lib/tests/data/py2-np0-objarr.npy,sha256=ZLoI7K3iQpXDkuoDF1Ymyc6Jbw4JngbQKC9grauVRsk,258 +numpy/lib/tests/data/py2-objarr.npy,sha256=F4cyUC-_TB9QSFLAo2c7c44rC6NUYIgrfGx9PqWPSKk,258 +numpy/lib/tests/data/py2-objarr.npz,sha256=xo13HBT0FbFZ2qvZz0LWGDb3SuQASSaXh7rKfVcJjx4,366 +numpy/lib/tests/data/py3-objarr.npy,sha256=7mtikKlHXp4unZhM8eBot8Cknlx1BofJdd73Np2PW8o,325 +numpy/lib/tests/data/py3-objarr.npz,sha256=vVRl9_NZ7_q-hjduUr8YWnzRy8ESNlmvMPlaSSC69fk,453 +numpy/lib/tests/data/python3.npy,sha256=X0ad3hAaLGXig9LtSHAo-BgOvLlFfPYMnZuVIxRmj-0,96 +numpy/lib/tests/data/win64python2.npy,sha256=agOcgHVYFJrV-nrRJDbGnUnF4ZTPYXuSeF-Mtg7GMpc,96 +numpy/lib/tests/test__datasource.py,sha256=0vL8l30yb53Wwnt0YbdqvOl2xQf9fc0S-0pRTMAdaYc,10581 +numpy/lib/tests/test__iotools.py,sha256=LTODsFclDQnIbKQb98hEysgVhQ6cs230aj45pA1QYFc,13765 +numpy/lib/tests/test__version.py,sha256=SwXoEqMap603c2jd7ONod0ZOVQeX6T-zArMf03OCHbw,1999 +numpy/lib/tests/test_array_utils.py,sha256=hPXtCjoBKe6MP91sg_04EBpRYg7MITVlCAgD1AScjx8,1118 +numpy/lib/tests/test_arraypad.py,sha256=GzqMIQ0Y8XLYmP5osXzl5W1Pcywy_OK-39STKoCWJc4,56155 +numpy/lib/tests/test_arraysetops.py,sha256=GKotFUbKgEfHybghYP1zIM0RWMqW1pa4cdYlML1seXQ,40445 +numpy/lib/tests/test_arrayterator.py,sha256=1LZmgQQJpndfwh3X2mL4JpaWvKQl9a0WAnQdSpXimhM,1301 +numpy/lib/tests/test_format.py,sha256=BTKd2lUodd8gNznWkh_Hl3mG8Mu8SOFADEqGd5kCw64,41956 +numpy/lib/tests/test_function_base.py,sha256=z2SkeGd9qQjXmaxk6bhoi06qlfxdrDzJEqRsDxIuEoM,171119 +numpy/lib/tests/test_histograms.py,sha256=QkcA46lJ1Y-T3f4-Qn7kn6J9bIid3RLK7NKMrUI3Rpw,33966 +numpy/lib/tests/test_index_tricks.py,sha256=bp4GFjqQ3s_taGDVsCOgs5YU7qtDMhQuPGwvcCxj2sk,20477 +numpy/lib/tests/test_io.py,sha256=8StHTe3-XsyPNBy4IveftRY1Zba2JTW3ALOHg_bEfRw,110989 +numpy/lib/tests/test_loadtxt.py,sha256=1R_xoumDPtPGQYoWh_WWCFKeb3-9WfLIoMHCYQQ0CtQ,40557 +numpy/lib/tests/test_mixins.py,sha256=9r6tgP4Wb6vCDn590PkHmHl-GBAoAL6_-mwp2wbiaO0,7009 +numpy/lib/tests/test_nanfunctions.py,sha256=1GGtPUD8bS5v2FxLr8e0BUgx9k6Iu-8WLZisawPY4Yw,54098 +numpy/lib/tests/test_packbits.py,sha256=REkoSXh9FVVTizyyHWkLqXFLIjt0rynXeixhK8-gBgk,17543 +numpy/lib/tests/test_polynomial.py,sha256=3Z7x5gf2cSb5pN5e0Sb_hZetF3mI5GrTLv-OaN7v0m0,12312 +numpy/lib/tests/test_recfunctions.py,sha256=xYsC_t_tpIpWJvS1pRU2HNxZTO1cJ3QZ1OnXt4ajm0s,43928 +numpy/lib/tests/test_regression.py,sha256=UURtmtwfrxMDF3UY1ZMNbgIJOa38jUzYKCmpYYD8e3Q,7716 +numpy/lib/tests/test_shape_base.py,sha256=ZWHeWCs9x0sD-L03h6kTmUdRHvxHVC-8KOu8KomhyKQ,27406 +numpy/lib/tests/test_stride_tricks.py,sha256=tBErppWSp8jAckkx_zN5ZbAhfKxZJ99cOQxDI9B_xh0,23030 +numpy/lib/tests/test_twodim_base.py,sha256=-djv2iP3W2sB4rAgj9Orl8alGwDFfPvcVu6CNvlKIcg,18925 +numpy/lib/tests/test_type_check.py,sha256=2M6uyLSI-CP13CAylnBn3kbT6nrK6wYWW-Scw13vsAQ,14796 +numpy/lib/tests/test_ufunclike.py,sha256=5a65WfziLpjPJ_yE8zg-A-q08xlyiU8_S1JH8kb-Uyw,3015 +numpy/lib/tests/test_utils.py,sha256=HRZxH8Rs-PxCpMAhgbNOrTfBrsA8B2eTOKypY0Udczw,2374 +numpy/lib/user_array.py,sha256=zs6u6TAXoAySGAZc1qE6fKD4AN-t6urZCaiZaKmHiso,63 +numpy/lib/user_array.pyi,sha256=8C-aTekEYA0bVU7F3turaw1w0j8FfFvDp9xKa9Pfe94,53 +numpy/linalg/__init__.py,sha256=7pVvFwOJFKOArGeUs6MNj3MNqqsx7xx0vt2_7avNAg4,2124 +numpy/linalg/__init__.pyi,sha256=C3fZHKPSa4wpfRqfTjw3DpzE5p-Czjus48OuMLsDckQ,1060 +numpy/linalg/__pycache__/__init__.cpython-312.pyc,, +numpy/linalg/__pycache__/_linalg.cpython-312.pyc,, +numpy/linalg/__pycache__/linalg.cpython-312.pyc,, +numpy/linalg/_linalg.py,sha256=XSBRz5lsdmZ9olIGvdqGIgnajzI-mSx4eBbKNYsYJ1A,115032 +numpy/linalg/_linalg.pyi,sha256=inijXDOFEzZayOL37HNKOqyH8wCLQaU0r__pO4do7Ag,11141 +numpy/linalg/_umath_linalg.cpython-312-x86_64-linux-gnu.so,sha256=46UEl6W4XZFSu4ZGDC_RgVSBzAMg4Wi45u-FXiZARdk,231833 +numpy/linalg/_umath_linalg.pyi,sha256=awvRP1FGuomyfeaR0wzHvrXURAI8tUF3u2RRZ24hkXw,1409 +numpy/linalg/lapack_lite.cpython-312-x86_64-linux-gnu.so,sha256=yKClyTWZEb-1IENKoh6zVgluRTPO5foLIRXC2LMJd9c,30001 +numpy/linalg/lapack_lite.pyi,sha256=QjaS8R4uu6MiJDcCFNE5EOAYGnFCcrNz873gs2OUXEM,2672 +numpy/linalg/linalg.py,sha256=6NimP68tYa0qBRglWH87_tOh2scshtDpcwfvBvmd6Po,585 +numpy/linalg/linalg.pyi,sha256=8E5sbKeM5Ors7r143mM7A4ui8kFZM0SF7NfUGW1eN-4,932 +numpy/linalg/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/linalg/tests/__pycache__/__init__.cpython-312.pyc,, +numpy/linalg/tests/__pycache__/test_deprecations.cpython-312.pyc,, +numpy/linalg/tests/__pycache__/test_linalg.cpython-312.pyc,, +numpy/linalg/tests/__pycache__/test_regression.cpython-312.pyc,, +numpy/linalg/tests/test_deprecations.py,sha256=9p_SRmtxj2zc1doY9Ie3dyy5JzWy-tCQWFoajcAJUmM,640 +numpy/linalg/tests/test_linalg.py,sha256=G2gy9cyvTNbVKGRx2hd9ybPL0MFttglJtd3U0bfe1b0,84312 +numpy/linalg/tests/test_regression.py,sha256=9a96oyeEGQMUxfw_-GUjNWqn51iu4Cf7kllJ0bKp9ws,6704 +numpy/ma/API_CHANGES.txt,sha256=F_4jW8X5cYBbzpcwteymkonTmvzgKKY2kGrHF1AtnrI,3405 +numpy/ma/LICENSE,sha256=BfO4g1GYjs-tEKvpLAxQ5YdcZFLVAJoAhMwpFVH_zKY,1593 +numpy/ma/README.rst,sha256=krf2cvVK_zNQf1d3yVYwg0uDHzTiR4vHbr91zwaAyoI,9874 +numpy/ma/__init__.py,sha256=XpDWYXwauDc49-INsk455D03Uw4p6xFdsdWOn2rt87U,1406 +numpy/ma/__init__.pyi,sha256=QV7F1eN7GQLA2V2vI_bYXC_XhoZl-2IqXHWIqJtXLKU,6946 +numpy/ma/__pycache__/__init__.cpython-312.pyc,, +numpy/ma/__pycache__/core.cpython-312.pyc,, +numpy/ma/__pycache__/extras.cpython-312.pyc,, +numpy/ma/__pycache__/mrecords.cpython-312.pyc,, +numpy/ma/__pycache__/testutils.cpython-312.pyc,, +numpy/ma/core.py,sha256=Te0RIWw8JyG2iJJjeSiG_t1ahKAICDdr7_pl4G6Q1Yc,288881 +numpy/ma/core.pyi,sha256=RxL-vzdzpBB97UqNesAkHjvFxQUom1ARUvKurQgz58I,40459 +numpy/ma/extras.py,sha256=f8qf6t_x9k34OKmHiNIft9PFCyLYMeBSGhiYjhUuIpc,70680 +numpy/ma/extras.pyi,sha256=4nJDP_0yoEtchWJgczd0ubXba76TsGPBOuCRexQgVbE,3794 +numpy/ma/mrecords.py,sha256=00gzzy_xxC408pVZIRUSRhbwqc1UHcyhE-tO2FYM8IE,27073 +numpy/ma/mrecords.pyi,sha256=YW81zL9LDzi-L-2WI7135-HxBzj12n4YgARHh2qZ6Bs,1973 +numpy/ma/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/ma/tests/__pycache__/__init__.cpython-312.pyc,, +numpy/ma/tests/__pycache__/test_arrayobject.cpython-312.pyc,, +numpy/ma/tests/__pycache__/test_core.cpython-312.pyc,, +numpy/ma/tests/__pycache__/test_deprecations.cpython-312.pyc,, +numpy/ma/tests/__pycache__/test_extras.cpython-312.pyc,, +numpy/ma/tests/__pycache__/test_mrecords.cpython-312.pyc,, +numpy/ma/tests/__pycache__/test_old_ma.cpython-312.pyc,, +numpy/ma/tests/__pycache__/test_regression.cpython-312.pyc,, +numpy/ma/tests/__pycache__/test_subclassing.cpython-312.pyc,, +numpy/ma/tests/test_arrayobject.py,sha256=MSvEcxlsVt4YZ7mVXU8q_hkwM0I7xsxWejEqnUQx6hE,1099 +numpy/ma/tests/test_core.py,sha256=novMpyqUqf9O7970aVB2HUqTBSiUqMQINMas3PbTgjM,219717 +numpy/ma/tests/test_deprecations.py,sha256=Hye4FMqAdPOOCVnihbs4R8ntLvYJy6WF3LA29876urI,2569 +numpy/ma/tests/test_extras.py,sha256=BnFaTx33kNdLDuLJ74Dt1f7gGsD_noYFmBGA8UelUqI,78435 +numpy/ma/tests/test_mrecords.py,sha256=ZDEv-LbPlx4Qf9NQs8unNXgrdXupRv4IQljf4_vCr34,19894 +numpy/ma/tests/test_old_ma.py,sha256=PMA26SyXJxN0o-pPvyEhl_YF2zRcxuPRMPAXztKCphA,33018 +numpy/ma/tests/test_regression.py,sha256=_eskYMrmSHe-_iODK6mvRD5gN_w6NpAl5agsyIGRRUo,3303 +numpy/ma/tests/test_subclassing.py,sha256=_TQZ4WM2VG-yuITIXeRZbAZrWDHpxtQoLzDKbGRmuHM,16936 +numpy/ma/testutils.py,sha256=vNG1ay689zOktrm-33tyz0bsCLxkJHK6j--2JtHRPq4,10235 +numpy/matlib.py,sha256=_S9N8S2NNsHGQUcloxrxABtJDejHiUyMdMJO7SayPkA,10638 +numpy/matlib.pyi,sha256=d9Tw-ThrWNUgXKGTiQvCjqrkWQSWqHcXUXAxvYENtYk,9602 +numpy/matrixlib/__init__.py,sha256=Ut6IqfjuA-kwwo6HBOYAgFjXXC_h7YV_3HyDsKM72dk,243 +numpy/matrixlib/__init__.pyi,sha256=e9xC6kWhIYoPqa3-tmtxdaq8RLjXrBjpyXLqV-pV9UY,106 +numpy/matrixlib/__pycache__/__init__.cpython-312.pyc,, +numpy/matrixlib/__pycache__/defmatrix.cpython-312.pyc,, +numpy/matrixlib/defmatrix.py,sha256=wpw6lZU9X6qp8wAJokDXt2RBrL1eXqlmBt-ojIwYzlU,30875 +numpy/matrixlib/defmatrix.pyi,sha256=ReQicwbCq4EFGM6paj5KoTeFK3fyiBMC4fJLJcP0SI4,478 +numpy/matrixlib/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/matrixlib/tests/__pycache__/__init__.cpython-312.pyc,, +numpy/matrixlib/tests/__pycache__/test_defmatrix.cpython-312.pyc,, +numpy/matrixlib/tests/__pycache__/test_interaction.cpython-312.pyc,, +numpy/matrixlib/tests/__pycache__/test_masked_matrix.cpython-312.pyc,, +numpy/matrixlib/tests/__pycache__/test_matrix_linalg.cpython-312.pyc,, +numpy/matrixlib/tests/__pycache__/test_multiarray.cpython-312.pyc,, +numpy/matrixlib/tests/__pycache__/test_numeric.cpython-312.pyc,, +numpy/matrixlib/tests/__pycache__/test_regression.cpython-312.pyc,, +numpy/matrixlib/tests/test_defmatrix.py,sha256=G9v4-cGuAbHVVDCJ2rCUnQrSTUChOih_6ZMV-ZlYsNA,14977 +numpy/matrixlib/tests/test_interaction.py,sha256=BMpaAIeGOJ5EEHWuozBifN8l3Av5RO6jGoaPgdzTiqQ,11874 +numpy/matrixlib/tests/test_masked_matrix.py,sha256=UN212xE5e3G9OuwdOWvRMFT5-z3zIfjQQIIpY26a52k,8787 +numpy/matrixlib/tests/test_matrix_linalg.py,sha256=33UxWKz2NwI2Wt3pP0AyaooZ5tCFpbOePWek3XT0a4U,2149 +numpy/matrixlib/tests/test_multiarray.py,sha256=S5kjzsQR2YgT0qIGrNO1lUDl3o-h0EIdg_g3U3CnuRc,555 +numpy/matrixlib/tests/test_numeric.py,sha256=hZ-r921WDG8Ck8KmT6ulgykjHU1QaGY6gprC2OPo-vg,447 +numpy/matrixlib/tests/test_regression.py,sha256=XnfZ4RoTS49XMUyUlHVMc6wcWImNRja7DT1wTdEk428,934 +numpy/polynomial/__init__.py,sha256=gGSwLNpPCpXfPgiJSsgVoVsJ0AS1c-_MWlGOeiG55sI,6726 +numpy/polynomial/__init__.pyi,sha256=tVWqA3_ZzcTyfp5yIr4ca87Tgx4YtY4660UQi3JhfJI,688 +numpy/polynomial/__pycache__/__init__.cpython-312.pyc,, +numpy/polynomial/__pycache__/_polybase.cpython-312.pyc,, +numpy/polynomial/__pycache__/chebyshev.cpython-312.pyc,, +numpy/polynomial/__pycache__/hermite.cpython-312.pyc,, +numpy/polynomial/__pycache__/hermite_e.cpython-312.pyc,, +numpy/polynomial/__pycache__/laguerre.cpython-312.pyc,, +numpy/polynomial/__pycache__/legendre.cpython-312.pyc,, +numpy/polynomial/__pycache__/polynomial.cpython-312.pyc,, +numpy/polynomial/__pycache__/polyutils.cpython-312.pyc,, +numpy/polynomial/_polybase.py,sha256=b0kCiTgUm8D5QC_LWSm6yNvwC79npDAeksK0vQPciCQ,39358 +numpy/polynomial/_polybase.pyi,sha256=mKbxu6z3iC6NnDNXHPrMm6Vo6RQvrvtCel7S5Mi3Q3Q,8187 +numpy/polynomial/_polytypes.pyi,sha256=e-uO5HmbYsWffZtOKCDgrxEqvUm-YKTqQKXj83m8j6s,22382 +numpy/polynomial/chebyshev.py,sha256=T0vrDsOrO8Ntxbzf_-0dv_lPyF5c45OjDoVJDzeGBAI,62322 +numpy/polynomial/chebyshev.pyi,sha256=jn21NMBsc4FYvC_5BM4kOfnYEaUSINdq3RyooS-5rjU,4787 +numpy/polynomial/hermite.py,sha256=IguwJittKDh3y0rF1M9lLuIptFXgq-PhaHNTjfE3CnA,54603 +numpy/polynomial/hermite.pyi,sha256=bNrlxTVHTskFUOKDbyrISXbOsmPxxhnAGmZmOF1mLpc,2463 +numpy/polynomial/hermite_e.py,sha256=fhuui2jLc0I5WEEsRDcyw8FKSFxOl9jr8b4yRIxEZqQ,52305 +numpy/polynomial/hermite_e.pyi,sha256=OyjRyzP7tz5sDP-90D4dpn82zJ4zPUCIzhpXaOCpkCY,2555 +numpy/polynomial/laguerre.py,sha256=XJ5dNqWuZNhqwARb_QW4nfrRHyJv1JMCgsP2W4-KE9M,52474 +numpy/polynomial/laguerre.pyi,sha256=_72JssagROc-vwt8W1i3aOo8s5l2v2G2NzMUm14vZnw,2191 +numpy/polynomial/legendre.py,sha256=sMJTmGdewNhccrK9CyfNIIFRgzmY-AJHhgo6zxtGYvo,51129 +numpy/polynomial/legendre.pyi,sha256=dPizRI4HLqAQ8Jms8Ta_HtsUyHV49fk3hFCZNOid1fo,2191 +numpy/polynomial/polynomial.py,sha256=-IICosb2j8ClsIfXPDWgXqLx6WuhU6olocU4JkxN7kI,52196 +numpy/polynomial/polynomial.pyi,sha256=A3oK3wKteiRkUcNEkzgvZQ11HIqloIRoxG2X9rPVZBE,2021 +numpy/polynomial/polyutils.py,sha256=mQEa3oCz9X-d1HaNdXkpBJzXWGzgY42WDMjJOn988O8,22657 +numpy/polynomial/polyutils.pyi,sha256=gnB7TQZclbMGneVVFE1z5LX5Qgs3GCidRTWL97rja-4,10235 +numpy/polynomial/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/polynomial/tests/__pycache__/__init__.cpython-312.pyc,, +numpy/polynomial/tests/__pycache__/test_chebyshev.cpython-312.pyc,, +numpy/polynomial/tests/__pycache__/test_classes.cpython-312.pyc,, +numpy/polynomial/tests/__pycache__/test_hermite.cpython-312.pyc,, +numpy/polynomial/tests/__pycache__/test_hermite_e.cpython-312.pyc,, +numpy/polynomial/tests/__pycache__/test_laguerre.cpython-312.pyc,, +numpy/polynomial/tests/__pycache__/test_legendre.cpython-312.pyc,, +numpy/polynomial/tests/__pycache__/test_polynomial.cpython-312.pyc,, +numpy/polynomial/tests/__pycache__/test_polyutils.cpython-312.pyc,, +numpy/polynomial/tests/__pycache__/test_printing.cpython-312.pyc,, +numpy/polynomial/tests/__pycache__/test_symbol.cpython-312.pyc,, +numpy/polynomial/tests/test_chebyshev.py,sha256=gcK5jVv1vG3O-VVMkZKpmweR6_4HQAXtzvbJ_ib0-B8,20650 +numpy/polynomial/tests/test_classes.py,sha256=nBsVHcubheo1s7t-jUXY984ptC2x-aWDPkWED1cUZt4,18552 +numpy/polynomial/tests/test_hermite.py,sha256=sexvJUDmac1JKL8qOeQr70KJTD1KdoJ10LKosFfBqm0,18687 +numpy/polynomial/tests/test_hermite_e.py,sha256=r3QQOUVoBBVgZzCjE3qzIl-wMcl_kI1Nuc-KGNy7rIw,19026 +numpy/polynomial/tests/test_laguerre.py,sha256=8c2h7Lj3F2DtuVuOPlS8ZL-dq_IoFxPrzREbuI5iZqQ,17637 +numpy/polynomial/tests/test_legendre.py,sha256=hMdOs_RzkGihUzg7gDmeM1FxkIT2UIgqkDWanucfMHg,18805 +numpy/polynomial/tests/test_polynomial.py,sha256=Pi_X6ThfxgVbgzyAnu3FcyTIUvpL9ENxRSanyUjgon8,22911 +numpy/polynomial/tests/test_polyutils.py,sha256=gO7B1oPBRRClF7WeXFsLjGwqUl9kjVIv7aAoHlhqVsk,3780 +numpy/polynomial/tests/test_printing.py,sha256=qk76AKCvHHqbsDnHIVf5fxIEH9Va4U9jwJkJ1b67k1o,21403 +numpy/polynomial/tests/test_symbol.py,sha256=ShBdNg9cvYy31fQnrn4gprZUSD0shz5r8zlG8CEq7gs,5375 +numpy/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/random/LICENSE.md,sha256=EDFmtiuARDr7nrNIjgUuoGvgz_VmuQjxmeVh_eSa8Z8,3511 +numpy/random/__init__.pxd,sha256=9JbnX540aJNSothGs-7e23ozhilG6U8tINOUEp08M_k,431 +numpy/random/__init__.py,sha256=WFzntztUVNaiXCpQln8twyL8HSFNS7XAWJlJsQXgbqk,7480 +numpy/random/__init__.pyi,sha256=5X5UqSDkeruZafGWv9EnYb0RrjRs49r-TlzV3PPQOjs,2109 +numpy/random/__pycache__/__init__.cpython-312.pyc,, +numpy/random/__pycache__/_pickle.cpython-312.pyc,, +numpy/random/_bounded_integers.cpython-312-x86_64-linux-gnu.so,sha256=ofU020DNnyzo2un2GGXqVLQs8MdtqNzYbPtMZmctu6g,323168 +numpy/random/_bounded_integers.pxd,sha256=SH_FwJDigFEInhdliSaNH2H2ZIZoX02xYhNQA81g2-g,1678 +numpy/random/_bounded_integers.pyi,sha256=juqd9PbXs4yg45zMJ7BHAOPQjb7sgEbWE9InBtGZhfo,24 +numpy/random/_common.cpython-312-x86_64-linux-gnu.so,sha256=Rnrdnf0IFga95o_zmZQbvApU_qR2T7xRQy7zkdjN6Zg,258912 +numpy/random/_common.pxd,sha256=7kGArYkBcemrxJcSttwvtDGbimLszdQnZdNvPMgN5xQ,4982 +numpy/random/_common.pyi,sha256=02dQDSAflunmZQFWThDLG3650py_DNqCmxjmkv5_XpA,421 +numpy/random/_examples/cffi/__pycache__/extending.cpython-312.pyc,, +numpy/random/_examples/cffi/__pycache__/parse.cpython-312.pyc,, +numpy/random/_examples/cffi/extending.py,sha256=jpIL1njMhf0nehmlMHkgZkIxns2JC9GEDYgAChX87G8,884 +numpy/random/_examples/cffi/parse.py,sha256=PK9vdUxwmvdnFvH3rOpgnnpISwnid7ri5XOmBrMWpJw,1750 +numpy/random/_examples/cython/extending.pyx,sha256=ePnHDNfMQcTUzAqgFiEqrTFr9BoDmbqgjxzrDLvV8fE,2267 +numpy/random/_examples/cython/extending_distributions.pyx,sha256=ahvbdSuRj35DKJRaNFP5JDuPqveBBp-M9mFfF3Wd_M4,3866 +numpy/random/_examples/cython/meson.build,sha256=GxZZT_Lu3nZsgcqo_7sTR_IdMJaHA1fxyjwrQTcodPs,1694 +numpy/random/_examples/numba/__pycache__/extending.cpython-312.pyc,, +numpy/random/_examples/numba/__pycache__/extending_distributions.cpython-312.pyc,, +numpy/random/_examples/numba/extending.py,sha256=Z7Z_Xp7HPE4K5BZ7AwpZ29qvuftFAkvhMtNX53tlMMw,1959 +numpy/random/_examples/numba/extending_distributions.py,sha256=fdePXeUj46yXK0MK1cszxUHQiOTiNuNsrbZqPw4AdGs,2036 +numpy/random/_generator.cpython-312-x86_64-linux-gnu.so,sha256=i9UBaQ-oQR5mcn1vie32w6vN_hBwz3yQmegTTS69QrY,993240 +numpy/random/_generator.pyi,sha256=aFPqfOxIpOIOmdY1xBcUpllMCv20iTq4PN7Ad_gd7HY,24009 +numpy/random/_mt19937.cpython-312-x86_64-linux-gnu.so,sha256=M-21VYXZXnU2mi5QOPREsRXTf3z6Y8ng2Y3YlsHDo5Q,137960 +numpy/random/_mt19937.pyi,sha256=ZjOCfOQb1KLDywy8ZHy8pQb1C-DZvStqYK3OOB6rETo,775 +numpy/random/_pcg64.cpython-312-x86_64-linux-gnu.so,sha256=6bmiyeXy6EO4jKfNIun6EwNDz2AmTYwTqnf8Gf2UjiU,148272 +numpy/random/_pcg64.pyi,sha256=bIlGJyN2X3gtKEzh6qwDdyXX88al_2vVmCzGNpbNifs,1142 +numpy/random/_philox.cpython-312-x86_64-linux-gnu.so,sha256=TDpf7T5UdYD-rHJmFCSCQoST7nAXFU-OzoUKzemmczA,120808 +numpy/random/_philox.pyi,sha256=xFogUASfSHdviqexIf4bGgkzbryir7Tik7z0XQR9xx4,1005 +numpy/random/_pickle.py,sha256=Lt47ma_vnnJHdnQlc5jZ_DqBHsdKi0QiUNaIkMf95qA,2742 +numpy/random/_pickle.pyi,sha256=5obQY7CZRLMDjOgRtNgzV_Bg5O9E8DK_G74j7J7q6qo,1608 +numpy/random/_sfc64.cpython-312-x86_64-linux-gnu.so,sha256=9r3ZwMuqGPV6zHhK0NB4M1VzqDRnzOvS4rN4iNecaJg,89648 +numpy/random/_sfc64.pyi,sha256=wRrbkEGLNhjXa7-LyGNtO5El9c8B_hNRQqF0Kmv6hQM,682 +numpy/random/bit_generator.cpython-312-x86_64-linux-gnu.so,sha256=rp-vL6PxsRdLn_ciYNlwJaVGddMm-u0_NGl2TN7rOzU,239072 +numpy/random/bit_generator.pxd,sha256=lArpIXSgTwVnJMYc4XX0NGxegXq3h_QsUDK6qeZKbNc,1007 +numpy/random/bit_generator.pyi,sha256=tX5lVJDp6J5bNzflo-1rNylceD30oDBYtbiYVA1cWOY,3604 +numpy/random/c_distributions.pxd,sha256=UCtqx0Nf-vHuJVaqPlLFURWnaI1vH-vJRE01BZDTL9o,6335 +numpy/random/lib/libnpyrandom.a,sha256=0hYJRONXaBW-JS5wptgs-kvXtPhgwBlh7nhh3JVFxho,71702 +numpy/random/mtrand.cpython-312-x86_64-linux-gnu.so,sha256=nX--1W8U0JMrnCI-P4xRd11fuJtw0lJhvDXMLlfNyeM,785752 +numpy/random/mtrand.pyi,sha256=Ds2d-DloxUUE2wNNMA1w6oqqPsgBilkaRMCLioBTiJA,22687 +numpy/random/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/random/tests/__pycache__/__init__.cpython-312.pyc,, +numpy/random/tests/__pycache__/test_direct.cpython-312.pyc,, +numpy/random/tests/__pycache__/test_extending.cpython-312.pyc,, +numpy/random/tests/__pycache__/test_generator_mt19937.cpython-312.pyc,, +numpy/random/tests/__pycache__/test_generator_mt19937_regressions.cpython-312.pyc,, +numpy/random/tests/__pycache__/test_random.cpython-312.pyc,, +numpy/random/tests/__pycache__/test_randomstate.cpython-312.pyc,, +numpy/random/tests/__pycache__/test_randomstate_regression.cpython-312.pyc,, +numpy/random/tests/__pycache__/test_regression.cpython-312.pyc,, +numpy/random/tests/__pycache__/test_seed_sequence.cpython-312.pyc,, +numpy/random/tests/__pycache__/test_smoke.cpython-312.pyc,, +numpy/random/tests/data/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/random/tests/data/__pycache__/__init__.cpython-312.pyc,, +numpy/random/tests/data/generator_pcg64_np121.pkl.gz,sha256=EfQ-X70KkHgBAFX2pIPcCUl4MNP1ZNROaXOU75vdiqM,203 +numpy/random/tests/data/generator_pcg64_np126.pkl.gz,sha256=fN8deNVxX-HELA1eIZ32kdtYvc4hwKya6wv00GJeH0Y,208 +numpy/random/tests/data/mt19937-testset-1.csv,sha256=Xkef402AVB-eZgYQkVtoxERHkxffCA9Jyt_oMbtJGwY,15844 +numpy/random/tests/data/mt19937-testset-2.csv,sha256=nsBEQNnff-aFjHYK4thjvUK4xSXDSfv5aTbcE59pOkE,15825 +numpy/random/tests/data/pcg64-testset-1.csv,sha256=xB00DpknGUTTCxDr9L6aNo9Hs-sfzEMbUSS4t11TTfE,23839 +numpy/random/tests/data/pcg64-testset-2.csv,sha256=NTdzTKvG2U7_WyU_IoQUtMzU3kEvDH39CgnR6VzhTkw,23845 +numpy/random/tests/data/pcg64dxsm-testset-1.csv,sha256=vNSUT-gXS_oEw_awR3O30ziVO4seNPUv1UIZ01SfVnI,23833 +numpy/random/tests/data/pcg64dxsm-testset-2.csv,sha256=uylS8PU2AIKZ185OC04RBr_OePweGRtvn-dE4YN0yYA,23839 +numpy/random/tests/data/philox-testset-1.csv,sha256=SedRaIy5zFadmk71nKrGxCFZ6BwKz8g1A9-OZp3IkkY,23852 +numpy/random/tests/data/philox-testset-2.csv,sha256=dWECt-sbfvaSiK8-Ygp5AqyjoN5i26VEOrXqg01rk3g,23838 +numpy/random/tests/data/sfc64-testset-1.csv,sha256=iHs6iX6KR8bxGwKk-3tedAdMPz6ZW8slDSUECkAqC8Q,23840 +numpy/random/tests/data/sfc64-testset-2.csv,sha256=FIDIDFCaPZfWUSxsJMAe58hPNmMrU27kCd9FhCEYt_k,23833 +numpy/random/tests/data/sfc64_np126.pkl.gz,sha256=MVa1ylFy7DUPgUBK-oIeKSdVl4UYEiN3AZ7G3sdzzaw,290 +numpy/random/tests/test_direct.py,sha256=-ugW0cpuYhFSGVDtAbpEy_uFk-cG0JKFpPpQMDyFJh4,19919 +numpy/random/tests/test_extending.py,sha256=8KgkOAbxrgU9_cj9Qm0F8r9qVEVy438Q-Usp7_HpSLQ,4532 +numpy/random/tests/test_generator_mt19937.py,sha256=X0AEzi3xy6FzyTpTZNT_lXyXS_LWOWFYc9nZ6QtkILQ,117812 +numpy/random/tests/test_generator_mt19937_regressions.py,sha256=QZVFTSN9gnJXN-ye89JfUoov1Cu65r4e32FMmCYje5U,8107 +numpy/random/tests/test_random.py,sha256=YSlHTwu6t7BAjDLZrBz4e8-ynSuV6eOHP9NwxDoZBvU,70298 +numpy/random/tests/test_randomstate.py,sha256=WbZBpZplBlgmhWKXNsj7d0Zw0BHJ2nxEerMRnuwyYnE,85749 +numpy/random/tests/test_randomstate_regression.py,sha256=1NgkJ60dVg8-UZ-ApepKlZGotqgenW_vZ3jqofMOSlw,8010 +numpy/random/tests/test_regression.py,sha256=DqqLLE3_MW04ltPhSXy44oFx_daO9b4I7NgI-WoMc-s,5471 +numpy/random/tests/test_seed_sequence.py,sha256=0lb4LRofbt_wHO-Cs_d1hwp1WcWjOmxH-OePkXST5bc,3310 +numpy/random/tests/test_smoke.py,sha256=epkUF47HanaSZVz9NVUt6xUmKZhJNolPIB-z4MN67Qw,28141 +numpy/rec/__init__.py,sha256=kNAYYoSAA0izpUVRb-18sJw-iKtFq2Rl2U5SOH3pHRM,83 +numpy/rec/__init__.pyi,sha256=1ZL2SbvFSaoXwOK-378QQ0g0XldOjskx2E2uIerEGUI,347 +numpy/rec/__pycache__/__init__.cpython-312.pyc,, +numpy/strings/__init__.py,sha256=o27wHW8jGaUfbDopSyEmYD6Rjeo33AzkGBBTgWrlGH4,83 +numpy/strings/__init__.pyi,sha256=JP8YQR3xZ_mPMdQax7QSR2cZ-N-V7ZDqvOcWIIUP_54,1319 +numpy/strings/__pycache__/__init__.cpython-312.pyc,, +numpy/testing/__init__.py,sha256=Eqe-Ox-3JSqk6QRnnPPFLCW9Ikqv9OuJDhnm2uGM3zc,581 +numpy/testing/__init__.pyi,sha256=1jr2Gj9BmCdtK4bqNGkwUAuqwC4n2JPOy6lqczK7xpA,2045 +numpy/testing/__pycache__/__init__.cpython-312.pyc,, +numpy/testing/__pycache__/overrides.cpython-312.pyc,, +numpy/testing/__pycache__/print_coercion_tables.cpython-312.pyc,, +numpy/testing/_private/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/testing/_private/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/testing/_private/__pycache__/__init__.cpython-312.pyc,, +numpy/testing/_private/__pycache__/extbuild.cpython-312.pyc,, +numpy/testing/_private/__pycache__/utils.cpython-312.pyc,, +numpy/testing/_private/extbuild.py,sha256=Lg1sqA94Q74Ki5u-sx0PEj7urr3YP470-BCiyvJwExQ,7716 +numpy/testing/_private/extbuild.pyi,sha256=aNH6UnAhh4Zny81W45GrAcScB12b6_84y8M0Vdtpm2I,626 +numpy/testing/_private/utils.py,sha256=PZFbAxTSOPFQ_VXMaDCgPFBSEk2kcEGh8GRiBJy_yJg,95707 +numpy/testing/_private/utils.pyi,sha256=9xlm7AQwi1yqOZN_t22jI_G9Ov-0tzX5H0ITHVz0UEE,12943 +numpy/testing/overrides.py,sha256=B8Y8PlpvK71IcSuoubXWj4L5NVmLVSn7WMg1L7xZO8k,2134 +numpy/testing/overrides.pyi,sha256=IQvQLxD-dHcbTQOZEO5bnCtCp8Uv3vj51dl0dZ0htjg,397 +numpy/testing/print_coercion_tables.py,sha256=SboNmCLc5FyV-UR8gKjJc2PIojN1XQTvH0WzDq75M2M,6286 +numpy/testing/print_coercion_tables.pyi,sha256=FRNibMYi0OyLIzKD4RUASZyhlsTY8elN0Q3jcBPEdgE,821 +numpy/testing/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/testing/tests/__pycache__/__init__.cpython-312.pyc,, +numpy/testing/tests/__pycache__/test_utils.cpython-312.pyc,, +numpy/testing/tests/test_utils.py,sha256=yb2RpPDZvVagXiwQPFhV2IhwslZRkC-d-Vtb5wbJbbo,69575 +numpy/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/tests/__pycache__/__init__.cpython-312.pyc,, +numpy/tests/__pycache__/test__all__.cpython-312.pyc,, +numpy/tests/__pycache__/test_configtool.cpython-312.pyc,, +numpy/tests/__pycache__/test_ctypeslib.cpython-312.pyc,, +numpy/tests/__pycache__/test_lazyloading.cpython-312.pyc,, +numpy/tests/__pycache__/test_matlib.cpython-312.pyc,, +numpy/tests/__pycache__/test_numpy_config.cpython-312.pyc,, +numpy/tests/__pycache__/test_numpy_version.cpython-312.pyc,, +numpy/tests/__pycache__/test_public_api.cpython-312.pyc,, +numpy/tests/__pycache__/test_reloading.cpython-312.pyc,, +numpy/tests/__pycache__/test_scripts.cpython-312.pyc,, +numpy/tests/__pycache__/test_warnings.cpython-312.pyc,, +numpy/tests/test__all__.py,sha256=AXbT9VmRSTYq9beba4d1Eom_V9SVXXEtpkBdEW2XCqU,222 +numpy/tests/test_configtool.py,sha256=vJswcByOu52gdUcO5NX_jxYRbCNNO80IihaBrHrP0AU,1739 +numpy/tests/test_ctypeslib.py,sha256=RNTHi3cYOEPQno5zZQ_WyekW5E_0bVuwmn1AFgkDzY8,12375 +numpy/tests/test_lazyloading.py,sha256=mMbie5VOu7S4uQBu66RNA2ipSsAY4C0lyoJXeHclAvk,1160 +numpy/tests/test_matlib.py,sha256=RMduSGHBJuVFmk__Ug_hVeGD4-Y3f28G0tlDt8F7k7c,1854 +numpy/tests/test_numpy_config.py,sha256=y4U3wnNW0Ags4W_ejhQ4CRCPnBc9p-4-B9OFDcLq9fg,1235 +numpy/tests/test_numpy_version.py,sha256=6PIeISx9_Hglpxc3y6KugeAgB4eBkuZC-DFlXt4LocA,1744 +numpy/tests/test_public_api.py,sha256=KqMtjIjq0_lp2ag4FTtulzypCqyZ43kuUlXgzd_Vkxc,27851 +numpy/tests/test_reloading.py,sha256=T0NTsxAZFPY0LuAzbsy0wV_vSIZON7dwWSNjz_yzpDg,2367 +numpy/tests/test_scripts.py,sha256=QpjsWc0vgi-IFLdMr81horvHAnjRI7RhYyO-edHxzcU,1665 +numpy/tests/test_warnings.py,sha256=ynGuW4FOgjLcwdyi5AYCGCrmAu7jZlIQWPNK-0Yr800,2328 +numpy/typing/__init__.py,sha256=FdaIH47j8uGEA5luTu-DnrOOTFw-3ne2JVHe-yn_7bA,6048 +numpy/typing/__pycache__/__init__.cpython-312.pyc,, +numpy/typing/__pycache__/mypy_plugin.cpython-312.pyc,, +numpy/typing/mypy_plugin.py,sha256=1pcfLxJaYFdCPKQJVwHvdYbZSVdZ7RSIcg1QXHR7nqM,6541 +numpy/typing/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/typing/tests/__pycache__/__init__.cpython-312.pyc,, +numpy/typing/tests/__pycache__/test_isfile.cpython-312.pyc,, +numpy/typing/tests/__pycache__/test_runtime.cpython-312.pyc,, +numpy/typing/tests/__pycache__/test_typing.cpython-312.pyc,, +numpy/typing/tests/data/fail/arithmetic.pyi,sha256=HJZH7HOYGyw5z1WAm6ujlXfdkOcqM2YVI92Zv-Wkaj0,3695 +numpy/typing/tests/data/fail/array_constructors.pyi,sha256=2917vcb59EaV406oGtA9lSQ8n_KDDyfvMrxj1Na6rPM,1200 +numpy/typing/tests/data/fail/array_like.pyi,sha256=klBpaBcONODcPC1LztdVZ3UuIPwXN8kUK_e9s_QnZoo,496 +numpy/typing/tests/data/fail/array_pad.pyi,sha256=mt-nhrs6f4YP7xgXizti7k_AwFAJ50yK97fMrMAAEds,137 +numpy/typing/tests/data/fail/arrayprint.pyi,sha256=i-0R4ExF_gtfXML5qirbsQLmDwJyg6_37HDYsk6g6tI,616 +numpy/typing/tests/data/fail/arrayterator.pyi,sha256=9Y8aD2lkDO7UcLo9ySR0pnVz0p2ofl2Lq4XTHgoMXxA,463 +numpy/typing/tests/data/fail/bitwise_ops.pyi,sha256=fnn3R8bMqRD3TIwhc793zgMYoXk3pG52JjkPynL52mw,404 +numpy/typing/tests/data/fail/char.pyi,sha256=IrAk7rxT3ixBVwNWHzCWEsW9rF_oCXTOtwIG-q_Vx2A,2800 +numpy/typing/tests/data/fail/chararray.pyi,sha256=aTqYSgwQUGUVUDkOly_Dy5SdOiHdKKTEbo6jD68KaH0,2356 +numpy/typing/tests/data/fail/comparisons.pyi,sha256=zscovvsL89W8wrL43k4z8z1DLrjXmxBbu6lAOpiyhp0,750 +numpy/typing/tests/data/fail/constants.pyi,sha256=OHaBJo6GYW94BlUWKQDat5jiit5ZAy_h-5bb1WUJnLU,78 +numpy/typing/tests/data/fail/datasource.pyi,sha256=7om31_WCptsSn_z7E5p-pKFlswZItyZm9GQ-L5fXWqM,419 +numpy/typing/tests/data/fail/dtype.pyi,sha256=crqAVZmBYLYV9N-ihiOnKCY1QK4iTxX7J4Tviac2Vq4,305 +numpy/typing/tests/data/fail/einsumfunc.pyi,sha256=4QWkwE4sr5bKz5-OCjPzeUcr4V-wpaMsqee2PXDwyjw,458 +numpy/typing/tests/data/fail/flatiter.pyi,sha256=3WblzrQewUBZo1mjTRWzwda_rQ0HSVamtz2XwH2CgCc,715 +numpy/typing/tests/data/fail/fromnumeric.pyi,sha256=eWCbs1dcoJraAA3b5qME09sWWvsSdlDO912_OwQ_M7k,5685 +numpy/typing/tests/data/fail/histograms.pyi,sha256=wgI2CG-P0jbDw4KohR_nbJxqa34PT1e6nmnLi9KbPQM,376 +numpy/typing/tests/data/fail/index_tricks.pyi,sha256=RNZLHeMOpSX594Eem4WyJrM_QouqndGRVj2YQakJN-E,517 +numpy/typing/tests/data/fail/lib_function_base.pyi,sha256=JdvdZlgNUNzlOuY74T6Lt_hNOpveU6U1jhFGB9Iu6ZA,2817 +numpy/typing/tests/data/fail/lib_polynomial.pyi,sha256=Y3jlwigvtr5tFEHvr3SgguMVsYZe8cvsdgKcavgfucs,937 +numpy/typing/tests/data/fail/lib_utils.pyi,sha256=6Oc_wYI0mv0l74p4pEiVtKjkWFNg4WmXjGW7_2zEKS4,98 +numpy/typing/tests/data/fail/lib_version.pyi,sha256=BvABs2aeC6ZHUGkrsowu80Ks22pbxUMwSPJ8c5i7H14,154 +numpy/typing/tests/data/fail/linalg.pyi,sha256=h9bcCeP0ARGONB3iYGkdX-BFPsNI-pZq3C-nfKgbbBU,1381 +numpy/typing/tests/data/fail/ma.pyi,sha256=inPaC4jP7hGPqQJn-rBspeJZnxJz7m1nVDYQxuMI8SE,6364 +numpy/typing/tests/data/fail/memmap.pyi,sha256=uXPLcVx726Bbw93E5kdIc7K0ssvLIZoJfNTULMtAa_8,169 +numpy/typing/tests/data/fail/modules.pyi,sha256=mEBLIY6vZAPIf2BuyJcMAR-FarSkT55FRlusrsR0qCo,603 +numpy/typing/tests/data/fail/multiarray.pyi,sha256=lSV5JiLNz-CxHUlNbF1Bq3x7mOftfr1kiiG2DgtXilE,1656 +numpy/typing/tests/data/fail/ndarray.pyi,sha256=65IDiOprlv-sg375SBmmh6_hYOzlucTVLe42GymniGM,381 +numpy/typing/tests/data/fail/ndarray_misc.pyi,sha256=hSdxKyxweyxAH32DGa_ZnZIXqPNh6CafBK90rjbi8cs,1061 +numpy/typing/tests/data/fail/nditer.pyi,sha256=nRbQ66HcoKXDKIacbWD9pTq-523GJOqxzJ3r278lDsc,319 +numpy/typing/tests/data/fail/nested_sequence.pyi,sha256=jGjoQhCLr8dbTCPvWkilJKAW0RRMbrY-iEHf24Happo,463 +numpy/typing/tests/data/fail/npyio.pyi,sha256=vPYmFaPCFbr5V2AC3074w8hTCBUYxpSF4fi1sbbfopw,646 +numpy/typing/tests/data/fail/numerictypes.pyi,sha256=NJxviXTJIaDoz7q56dBrHCBNNG-doTu-oIryzwURxHQ,124 +numpy/typing/tests/data/fail/random.pyi,sha256=IvKXQuxZhuK6M0u2x3Y4PhXvLoC8OFnUdoeneaqDiIE,2903 +numpy/typing/tests/data/fail/rec.pyi,sha256=eeFXVvVg4DherRMA1T8KERtTiRN1ZIbarw4Yokb8WrU,741 +numpy/typing/tests/data/fail/scalars.pyi,sha256=EQy8ovBZX49a7AgtRyI8uHgauQoVzAmjE3NALe97tEw,2849 +numpy/typing/tests/data/fail/shape.pyi,sha256=VNucLx9ittp1a0AOyVPd6XKfERm0kq_ND1lOr-LXQ_s,131 +numpy/typing/tests/data/fail/shape_base.pyi,sha256=8366-8mCNii1D0W6YrOhCNxo8rrfqQThO-dIVWNRHvA,157 +numpy/typing/tests/data/fail/stride_tricks.pyi,sha256=g7-DY8Zc8pzTDyOBA-8t6yIFj1FZI9XpvVdbybQN2i0,330 +numpy/typing/tests/data/fail/strings.pyi,sha256=wX9ROrRNhpH9g_ewNGjWuTKU-He4xaNxrtz2Dm3iPo8,2333 +numpy/typing/tests/data/fail/testing.pyi,sha256=m8d2OZZ1DtsHfmnTwvdMRETUfo0lwRzaOXjuyNi08PQ,1399 +numpy/typing/tests/data/fail/twodim_base.pyi,sha256=ROt5iqOp9ENbXlMEG8dzUZxHD3N4lwcbyCffuJ4BLZE,936 +numpy/typing/tests/data/fail/type_check.pyi,sha256=hRXyE4Ywx6zjtSgiHwKRs4k47M4hnPjj7yjVhi91IaU,397 +numpy/typing/tests/data/fail/ufunc_config.pyi,sha256=v5rd68Y2TzLplIOaOXM4h66HqSv8XbapR0b3xaoUOdQ,589 +numpy/typing/tests/data/fail/ufunclike.pyi,sha256=ejCb6kb7mmxPH0QrDsYfdFSLLPFKx0IZ9xSLs3YXOzg,649 +numpy/typing/tests/data/fail/ufuncs.pyi,sha256=XBoxO597ponBkFcCfwCS3s-jKfcnDzC_K5n2uBPrD6E,505 +numpy/typing/tests/data/fail/warnings_and_errors.pyi,sha256=SoFIznFd_xDifIsS0pv0aqS2BvhZaT6xsOA0zJrRJkA,200 +numpy/typing/tests/data/misc/extended_precision.pyi,sha256=n1nzRzRa_oKDdNExxB0qRIQr8MeDIosbLU6Vpgi6ZYo,322 +numpy/typing/tests/data/mypy.ini,sha256=rfUCMP01SsfRLJ-MRGEicI9XW-HJDoTJ_ncaACuKJ0s,245 +numpy/typing/tests/data/pass/__pycache__/arithmetic.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/array_constructors.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/array_like.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/arrayprint.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/arrayterator.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/bitwise_ops.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/comparisons.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/dtype.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/einsumfunc.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/flatiter.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/fromnumeric.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/index_tricks.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/lib_user_array.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/lib_utils.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/lib_version.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/literal.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/ma.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/mod.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/modules.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/multiarray.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/ndarray_conversion.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/ndarray_misc.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/ndarray_shape_manipulation.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/nditer.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/numeric.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/numerictypes.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/random.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/recfunctions.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/scalars.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/shape.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/simple.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/simple_py3.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/ufunc_config.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/ufunclike.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/ufuncs.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/warnings_and_errors.cpython-312.pyc,, +numpy/typing/tests/data/pass/arithmetic.py,sha256=t4UK-TROh0uYPlUNn5CZHdTysECmDZa04uUOCZO58cY,7762 +numpy/typing/tests/data/pass/array_constructors.py,sha256=rfJ8SRB4raElxRjsHBCsZIkZAfqZMie0VE8sSKMgkHg,2447 +numpy/typing/tests/data/pass/array_like.py,sha256=-wTiw2o_rLw1aeT7FSh60RKguhvxKyr_Vv5XNXTYeS4,1032 +numpy/typing/tests/data/pass/arrayprint.py,sha256=y_KkuLz1uM7pv53qfq7GQOuud4LoXE3apK1wtARdVyM,766 +numpy/typing/tests/data/pass/arrayterator.py,sha256=FqcpKdUQBQ0FazHFxr9MsLEZG-jnJVGKWZX2owRr4DQ,393 +numpy/typing/tests/data/pass/bitwise_ops.py,sha256=FmEs_sKaU9ox-5f0NU3_TRIv0XxLQVEZ8rou9VNehb4,964 +numpy/typing/tests/data/pass/comparisons.py,sha256=5aGrNl3D7Yd1m9WVkHrjJtqi7SricTxrEMtmIV9x0aE,3298 +numpy/typing/tests/data/pass/dtype.py,sha256=YDuYAb0oKoJc9eOnKJuoPfLbIKOgEdE04_CYxRS4U5I,1070 +numpy/typing/tests/data/pass/einsumfunc.py,sha256=eXj5L5MWPtQHgrHPsJ36qqrmBHqct9UoujjJCvHnF1k,1370 +numpy/typing/tests/data/pass/flatiter.py,sha256=tpKL_EAjkJoCZ5C0iuIX0dNCwQ9wUq1XlBMP-n2rjM4,203 +numpy/typing/tests/data/pass/fromnumeric.py,sha256=d_hVLyrVDFPVx33aqLIyAGYYQ8XAJFIzrAsE8QCoof4,3991 +numpy/typing/tests/data/pass/index_tricks.py,sha256=dmonWJMUKsXg23zD_mibEEtd4b5ys-sEfT9Fnnq08x8,1402 +numpy/typing/tests/data/pass/lib_user_array.py,sha256=Za_n84msWtV8dqQZhMhvh7lzu5WZvO8ixTPkEqO2Hms,590 +numpy/typing/tests/data/pass/lib_utils.py,sha256=bj1sEA4gsmezqbYdqKnVtKzY_fb64w7PEoZwNvaaUdA,317 +numpy/typing/tests/data/pass/lib_version.py,sha256=HnuGOx7tQA_bcxFIJ3dRoMAR0fockxg4lGqQ4g7LGIw,299 +numpy/typing/tests/data/pass/literal.py,sha256=HSG-2Gf7J5ax3mjTOeh0pAYUrVOqboTkrt2m6ssfqVY,1508 +numpy/typing/tests/data/pass/ma.py,sha256=ZIi85AwntBX7M1LIvl4yEGixAauHAS2GINBR42Ri4Hw,3362 +numpy/typing/tests/data/pass/mod.py,sha256=owFL1fys3LPTWpAlsjS-IzW4sSu98ncp2BnsIetLSrA,1576 +numpy/typing/tests/data/pass/modules.py,sha256=g9PhyLO6rflYHZtmryx1VWTubphN4TAPUSfoiYriTqE,625 +numpy/typing/tests/data/pass/multiarray.py,sha256=MxHax6l94yqlTVZleAqG77ILEbW6wU5osPcHzxJ85ns,1331 +numpy/typing/tests/data/pass/ndarray_conversion.py,sha256=d7cFNUrofdLXh9T_9RG3Esz1XOihWWQNlz5Lb0yt6dM,1525 +numpy/typing/tests/data/pass/ndarray_misc.py,sha256=wBbQDHcpiIlMl-z5ToVOrFpoxrqXQMBq1dFSWfwGJNE,3699 +numpy/typing/tests/data/pass/ndarray_shape_manipulation.py,sha256=37eYwMNqMLwanIW9-63hrokacnSz2K_qtPUlkdpsTjo,640 +numpy/typing/tests/data/pass/nditer.py,sha256=nYO45Lw3ZNbQq75Vht86zzLZ4cWzP3ml0rxDPlYt8_8,63 +numpy/typing/tests/data/pass/numeric.py,sha256=pOwxnmZmdCtDKh9ih0h5GFIUPJwsi97NBs1y5ZAGyUM,1622 +numpy/typing/tests/data/pass/numerictypes.py,sha256=6x6eN9-5NsSQUSc6rf3fYieS2poYEY0t_ujbwgF9S5Q,331 +numpy/typing/tests/data/pass/random.py,sha256=UJF6epKYGfGq9QlrR9YuA7EK_mI8AQ2osdA4Uhsh1ms,61824 +numpy/typing/tests/data/pass/recfunctions.py,sha256=GwDirrHsL3upfIsAEZakPt95-RLY7BpXqU_KXxi4HhQ,5003 +numpy/typing/tests/data/pass/scalars.py,sha256=pzV3Y20dd6xB9NRsJ0YSdkcvI5XcD8cEWtEo1KTL1SU,3724 +numpy/typing/tests/data/pass/shape.py,sha256=L2iugxTnbm8kmBpaJVYpURKJEAnI7TH2KtuYeqNR9co,445 +numpy/typing/tests/data/pass/simple.py,sha256=lPj620zkTA8Sg893eu2mGuj-Xq2BGZ_1dcmfsVDkz8g,2751 +numpy/typing/tests/data/pass/simple_py3.py,sha256=HuLrc5aphThQkLjU2_19KgGFaXwKOfSzXe0p2xMm8ZI,96 +numpy/typing/tests/data/pass/ufunc_config.py,sha256=uzXOhCl9N4LPV9hV2Iqg_skgkKMbBPBF0GXPU9EMeuE,1205 +numpy/typing/tests/data/pass/ufunclike.py,sha256=U4Aay11VALvm22bWEX0eDWuN5qxJlg_hH5IpOL62M3I,1125 +numpy/typing/tests/data/pass/ufuncs.py,sha256=1Rem_geEm4qyD3XaRA1NAPKwr3YjRq68zbIlC_Xhi9M,422 +numpy/typing/tests/data/pass/warnings_and_errors.py,sha256=ETLZkDTGpZspvwjVYAZlnA1gH4PJ4bSY5PkWyxTjusU,161 +numpy/typing/tests/data/reveal/arithmetic.pyi,sha256=phWM8Fz30fe--KkKI8S9voIbDNHbxIKSzLwRWwvJ7yU,27424 +numpy/typing/tests/data/reveal/array_api_info.pyi,sha256=oWKW0yGS9xKcLZnH2QeeixMBcI74dNIcwZr0bwGmDVM,3017 +numpy/typing/tests/data/reveal/array_constructors.pyi,sha256=fJZwsHVQS-_sEMo6qsLKyKyxuQoGvCeW8TF3xUzv_rw,13041 +numpy/typing/tests/data/reveal/arraypad.pyi,sha256=Dg5ss1cDS_QiNT4YEheHXMa2beM4qBTUb1mq-REkh6A,653 +numpy/typing/tests/data/reveal/arrayprint.pyi,sha256=iUHzZaUrYFGC9QBCxhiEAIJODeqGwG7VCv875il-9gY,777 +numpy/typing/tests/data/reveal/arraysetops.pyi,sha256=Hhe49rLgj0P8SXElncNvLeCv1OqdI-iryB_673w7vL4,4411 +numpy/typing/tests/data/reveal/arrayterator.pyi,sha256=QPRyZzHFmti4HlrJ315dgzBjaet8LqM9il-8uc9e2P8,1039 +numpy/typing/tests/data/reveal/bitwise_ops.pyi,sha256=TjW0vMyXqUy-WoEIMA3AMN_u4IGw5RosOWK_qHMNjes,4911 +numpy/typing/tests/data/reveal/char.pyi,sha256=9QbiMbkKycnZl4f4eKBoF_rAxIUIv3vBcOQyksHJCug,11470 +numpy/typing/tests/data/reveal/chararray.pyi,sha256=4oqRNZt7jIdfbNVgcsWPDVVFrrEYhqjAExaNzPya_lY,5199 +numpy/typing/tests/data/reveal/comparisons.pyi,sha256=mXRfm3ZUsk8YbSPg9ugPSWLGRwzUVy4BEVN7q4K56tc,7195 +numpy/typing/tests/data/reveal/constants.pyi,sha256=AazwlvF--Te1dt35f8lkDLNuo3jQXqmGvddDQ37jAE0,333 +numpy/typing/tests/data/reveal/ctypeslib.pyi,sha256=U9ZO5GnGHxVyv-OWRYWHSXctH7LGHPWDdyNVl_saQEQ,4134 +numpy/typing/tests/data/reveal/datasource.pyi,sha256=B9nCoOPE4fJvBIeInAgUCg5pIsr8IYOu_iToqt6n-Nc,583 +numpy/typing/tests/data/reveal/dtype.pyi,sha256=IdxNE3NIE0YKpVw4yI9lS-wWPmeFyfGCW2V0oyor4zk,5080 +numpy/typing/tests/data/reveal/einsumfunc.pyi,sha256=qPYk5W3lardDdgsQIGyu356iIGDnb0P38UKQDXWQlrk,1926 +numpy/typing/tests/data/reveal/emath.pyi,sha256=fcf0-GftYRByfJFuZC-MvzHlQU4A-f9-kPnxzQt48E0,2125 +numpy/typing/tests/data/reveal/fft.pyi,sha256=uZOJ0ljmmnejfPEwMsfUGDb52NOuTh7Npl7ONwx-Y2k,1601 +numpy/typing/tests/data/reveal/flatiter.pyi,sha256=ZxgdgbRWYXlyxlPOXJzZSHvALqGsK3aV4lf9RePghdA,1347 +numpy/typing/tests/data/reveal/fromnumeric.pyi,sha256=xweKmm6uKVgJF4-AwtM6hGEI_YHosu-8jXnd8yjSfJ4,15066 +numpy/typing/tests/data/reveal/getlimits.pyi,sha256=mH0kk94VBu-O5ZzA1nki80jttDK_EBGOsLQOZo3Rq18,1547 +numpy/typing/tests/data/reveal/histograms.pyi,sha256=Mr7P7JYMWF9jM6w5othyzh8CN3ygd2A-WRoB4jImnzk,1257 +numpy/typing/tests/data/reveal/index_tricks.pyi,sha256=4dvG8RXY5ktKXo1uC_pfPHXBDd7tatTbjCs8xr8M2os,3241 +numpy/typing/tests/data/reveal/lib_function_base.pyi,sha256=LMCyduuUjX1E7ruBI-B_cEJQ_rUt9ZO21ck22_OLa_c,10112 +numpy/typing/tests/data/reveal/lib_polynomial.pyi,sha256=CrG0zxbY-HddD7D93q5Cow6c_3mx3nVb1ZCcAq5mC4U,5660 +numpy/typing/tests/data/reveal/lib_utils.pyi,sha256=oQCay2NF8pYHD5jNgRZKNjn8uJW4TJqUPIlytOwDSi0,436 +numpy/typing/tests/data/reveal/lib_version.pyi,sha256=y4ZJSLEeS273Zd6fqaE2XNdczTS0-cwIJ2Yn_4Otm44,572 +numpy/typing/tests/data/reveal/linalg.pyi,sha256=w8RdvwTSt40PMQDvlt_tnky4Cu9LnTUXAmdFhZORPpc,5933 +numpy/typing/tests/data/reveal/ma.pyi,sha256=5FCR2aqUpKOtoQcazro_5C-NE2MrywouDrMHirVyHF0,16223 +numpy/typing/tests/data/reveal/matrix.pyi,sha256=ntknd4qkGbaBMMzPlkTeahyg_H8_TDBJQDbd36a_QfY,3040 +numpy/typing/tests/data/reveal/memmap.pyi,sha256=OCcEhR5mvvXk4UhF6lRqmkxU2NcAqJ4nqAuBpcroQ1g,719 +numpy/typing/tests/data/reveal/mod.pyi,sha256=9nJnn1rA_4mbk2JSYyDmQ5pMWWQ9vPDDzWqijlFAG4I,7599 +numpy/typing/tests/data/reveal/modules.pyi,sha256=_Gvxgql5KbJFL1Mj5gFAphzyGC44AkuNZLnYkv-3LRA,1858 +numpy/typing/tests/data/reveal/multiarray.pyi,sha256=oz81sV4JUBbd6memodStUpT11TARzqRXWUs4H0cU-YA,7779 +numpy/typing/tests/data/reveal/nbit_base_example.pyi,sha256=9OqWKUGRGCIt-mywzDmZExTOsM7l3JGw0YAPB9rs_8k,687 +numpy/typing/tests/data/reveal/ndarray_assignability.pyi,sha256=KOl5ActvtUx6h1oTQT3c0EiU5eCDbMD1okQVfxpc4j0,2668 +numpy/typing/tests/data/reveal/ndarray_conversion.pyi,sha256=SAI9kxMNl66L8n7kO3jn7-EL_3Ygn46behqD_dVa5Hw,3309 +numpy/typing/tests/data/reveal/ndarray_misc.pyi,sha256=8jwi9O-iGcojU0xSF_GUYMFRpkRdol5hQza0hkziNXc,8663 +numpy/typing/tests/data/reveal/ndarray_shape_manipulation.pyi,sha256=z8SRTWdl6fSj_ENNF-M5jZnujUl1180WaFMAanXqCVw,1394 +numpy/typing/tests/data/reveal/nditer.pyi,sha256=yih7UE0OynR7GuVCGgwhzjTjARwOXikDe6Dr4ymRC2g,1898 +numpy/typing/tests/data/reveal/nested_sequence.pyi,sha256=Z2vwweUjoqxR0zUUldOUXsg6mkDfDP1BMyFV2hje5Z8,612 +numpy/typing/tests/data/reveal/npyio.pyi,sha256=p6jJFmcwXuQhshYC70zhg_itI1kLiDu9saUCNwYpFNo,3493 +numpy/typing/tests/data/reveal/numeric.pyi,sha256=0hvPN803QJoO38lYY68of7M-1KGXqdgHy9RdqcHwO-M,5869 +numpy/typing/tests/data/reveal/numerictypes.pyi,sha256=4lnZQTgVtig8UuDwuETyQ6jRFxsYv6tnni2ZJaDyMM0,1331 +numpy/typing/tests/data/reveal/polynomial_polybase.pyi,sha256=V7ulOvXuAcduWTD_7Jg1yPCLvROq8E-10GobfNlKXD8,7925 +numpy/typing/tests/data/reveal/polynomial_polyutils.pyi,sha256=I_4waxJEeUsp5pjnbBN55kqZ2kycK8akD_XvhsgsCGY,10642 +numpy/typing/tests/data/reveal/polynomial_series.pyi,sha256=YowKiIaDd2Je0PjEmXDINUXe4il0r4KDkpzDbYpwG38,6853 +numpy/typing/tests/data/reveal/random.pyi,sha256=xXJobSp5nVBelmrBO_OTvV8XQnbnZjbAyJfrRwlJshg,104296 +numpy/typing/tests/data/reveal/rec.pyi,sha256=E8lxkOQ4qSwwX20Y4d438s5g-kTnNARsZc4f-Y8OhZo,3378 +numpy/typing/tests/data/reveal/scalars.pyi,sha256=5s5Xm1HoA6bwwqK4gfEWqoNk45dAQvxAZLZc2zUhe3A,6378 +numpy/typing/tests/data/reveal/shape.pyi,sha256=ZT6e5LW4nU90tA-Av5NLiyoaPW9NIX_XkWJ-LOOzh84,262 +numpy/typing/tests/data/reveal/shape_base.pyi,sha256=xbnt0jps1djVxVMn4Lj8bxGl-mGvbhqSKFVWYcFApLg,2006 +numpy/typing/tests/data/reveal/stride_tricks.pyi,sha256=Cm9P_F7promu0zGZmo957SOFCZ6Np8wSv5ecR_hB668,1315 +numpy/typing/tests/data/reveal/strings.pyi,sha256=WvSd8xHIdxQdah3Q0ZJUva79jfVngB3UD9yb6awDW8w,9547 +numpy/typing/tests/data/reveal/testing.pyi,sha256=vP3uEWEdFHrfv_Q4OaJ0Oo5gUqUxkkIRVjvJMsqiHs8,8443 +numpy/typing/tests/data/reveal/twodim_base.pyi,sha256=TiBbWXI0xRCgk0bE-Bd4ZryWaLeJIQ5I-6KBjIVoMuE,4237 +numpy/typing/tests/data/reveal/type_check.pyi,sha256=W7rJUEf_iwI0D1FIVjhCEfzIjw_T04qcBYFxuPwnXAo,2392 +numpy/typing/tests/data/reveal/ufunc_config.pyi,sha256=XoD9fxaMVCGgyMncWKIJssFBO0SmndHsDs0hDXS04A8,1162 +numpy/typing/tests/data/reveal/ufunclike.pyi,sha256=0jwIYSgXn0usVGkzyZz0ttO5tSYfWMYu_U2ByqrzuRQ,1183 +numpy/typing/tests/data/reveal/ufuncs.pyi,sha256=2IYvfPlLCuqgoyNKzbcv3mr-Dva2cyUSWtBWuM77sDk,4789 +numpy/typing/tests/data/reveal/warnings_and_errors.pyi,sha256=5qqRFzPOon1GhU_i5CHDxQLPKVcO2EMhbc851V8Gusc,449 +numpy/typing/tests/test_isfile.py,sha256=yaRIX3JLmwY1cgD-xxKvJjMVVBRmv9QNSXx9kQSoVAc,878 +numpy/typing/tests/test_runtime.py,sha256=YHS0Hgv1v3cip7C14UcsJWLGI37m18MqXrwLmb88Ctc,2919 +numpy/typing/tests/test_typing.py,sha256=VERPf6NJ6gRLoKk0ki-s1wvDS4E--InjNUaj63_Q-00,6289 +numpy/version.py,sha256=Ccbg_VAXfEkqr6IQb13_j4PBikXP57X-Gsn02aWHzg4,293 +numpy/version.pyi,sha256=x3oCrDqM_gQhitdDgfgMhJ-UPabIXk5etqBq8HUwUok,358 diff --git a/.venv/lib/python3.12/site-packages/numpy-2.3.2.dist-info/REQUESTED b/.venv/lib/python3.12/site-packages/numpy-2.3.2.dist-info/REQUESTED new file mode 100644 index 0000000..e69de29 diff --git a/.venv/lib/python3.12/site-packages/numpy-2.3.2.dist-info/WHEEL b/.venv/lib/python3.12/site-packages/numpy-2.3.2.dist-info/WHEEL new file mode 100644 index 0000000..227a38c --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy-2.3.2.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: meson +Root-Is-Purelib: false +Tag: cp312-cp312-manylinux_2_27_x86_64 +Tag: cp312-cp312-manylinux_2_28_x86_64 + diff --git a/.venv/lib/python3.12/site-packages/numpy-2.3.2.dist-info/entry_points.txt b/.venv/lib/python3.12/site-packages/numpy-2.3.2.dist-info/entry_points.txt new file mode 100644 index 0000000..48c4f64 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy-2.3.2.dist-info/entry_points.txt @@ -0,0 +1,13 @@ +[pkg_config] +numpy = numpy._core.lib.pkgconfig + +[array_api] +numpy = numpy + +[pyinstaller40] +hook-dirs = numpy:_pyinstaller_hooks_dir + +[console_scripts] +f2py = numpy.f2py.f2py2e:main +numpy-config = numpy._configtool:main + diff --git a/.venv/lib/python3.12/site-packages/numpy.libs/libgfortran-040039e1-0352e75f.so.5.0.0 b/.venv/lib/python3.12/site-packages/numpy.libs/libgfortran-040039e1-0352e75f.so.5.0.0 new file mode 100755 index 0000000..f00c303 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy.libs/libgfortran-040039e1-0352e75f.so.5.0.0 differ diff --git a/.venv/lib/python3.12/site-packages/numpy.libs/libquadmath-96973f99-934c22de.so.0.0.0 b/.venv/lib/python3.12/site-packages/numpy.libs/libquadmath-96973f99-934c22de.so.0.0.0 new file mode 100755 index 0000000..b6063a0 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy.libs/libquadmath-96973f99-934c22de.so.0.0.0 differ diff --git a/.venv/lib/python3.12/site-packages/numpy.libs/libscipy_openblas64_-8fb3d286.so b/.venv/lib/python3.12/site-packages/numpy.libs/libscipy_openblas64_-8fb3d286.so new file mode 100755 index 0000000..d644909 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy.libs/libscipy_openblas64_-8fb3d286.so differ diff --git a/.venv/lib/python3.12/site-packages/numpy/__config__.py b/.venv/lib/python3.12/site-packages/numpy/__config__.py new file mode 100644 index 0000000..c02dc19 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/__config__.py @@ -0,0 +1,170 @@ +# This file is generated by numpy's build process +# It contains system_info results at the time of building this package. +from enum import Enum +from numpy._core._multiarray_umath import ( + __cpu_features__, + __cpu_baseline__, + __cpu_dispatch__, +) + +__all__ = ["show_config"] +_built_with_meson = True + + +class DisplayModes(Enum): + stdout = "stdout" + dicts = "dicts" + + +def _cleanup(d): + """ + Removes empty values in a `dict` recursively + This ensures we remove values that Meson could not provide to CONFIG + """ + if isinstance(d, dict): + return {k: _cleanup(v) for k, v in d.items() if v and _cleanup(v)} + else: + return d + + +CONFIG = _cleanup( + { + "Compilers": { + "c": { + "name": "gcc", + "linker": r"ld.bfd", + "version": "14.2.1", + "commands": r"cc", + "args": r"", + "linker args": r"", + }, + "cython": { + "name": "cython", + "linker": r"cython", + "version": "3.1.2", + "commands": r"cython", + "args": r"", + "linker args": r"", + }, + "c++": { + "name": "gcc", + "linker": r"ld.bfd", + "version": "14.2.1", + "commands": r"c++", + "args": r"", + "linker args": r"", + }, + }, + "Machine Information": { + "host": { + "cpu": "x86_64", + "family": "x86_64", + "endian": "little", + "system": "linux", + }, + "build": { + "cpu": "x86_64", + "family": "x86_64", + "endian": "little", + "system": "linux", + }, + "cross-compiled": bool("False".lower().replace("false", "")), + }, + "Build Dependencies": { + "blas": { + "name": "scipy-openblas", + "found": bool("True".lower().replace("false", "")), + "version": "0.3.30", + "detection method": "pkgconfig", + "include directory": r"/opt/_internal/cpython-3.12.11/lib/python3.12/site-packages/scipy_openblas64/include", + "lib directory": r"/opt/_internal/cpython-3.12.11/lib/python3.12/site-packages/scipy_openblas64/lib", + "openblas configuration": r"OpenBLAS 0.3.30 USE64BITINT DYNAMIC_ARCH NO_AFFINITY Haswell MAX_THREADS=64", + "pc file directory": r"/project/.openblas", + }, + "lapack": { + "name": "scipy-openblas", + "found": bool("True".lower().replace("false", "")), + "version": "0.3.30", + "detection method": "pkgconfig", + "include directory": r"/opt/_internal/cpython-3.12.11/lib/python3.12/site-packages/scipy_openblas64/include", + "lib directory": r"/opt/_internal/cpython-3.12.11/lib/python3.12/site-packages/scipy_openblas64/lib", + "openblas configuration": r"OpenBLAS 0.3.30 USE64BITINT DYNAMIC_ARCH NO_AFFINITY Haswell MAX_THREADS=64", + "pc file directory": r"/project/.openblas", + }, + }, + "Python Information": { + "path": r"/tmp/build-env-cy0nuk24/bin/python", + "version": "3.12", + }, + "SIMD Extensions": { + "baseline": __cpu_baseline__, + "found": [ + feature for feature in __cpu_dispatch__ if __cpu_features__[feature] + ], + "not found": [ + feature for feature in __cpu_dispatch__ if not __cpu_features__[feature] + ], + }, + } +) + + +def _check_pyyaml(): + import yaml + + return yaml + + +def show(mode=DisplayModes.stdout.value): + """ + Show libraries and system information on which NumPy was built + and is being used + + Parameters + ---------- + mode : {`'stdout'`, `'dicts'`}, optional. + Indicates how to display the config information. + `'stdout'` prints to console, `'dicts'` returns a dictionary + of the configuration. + + Returns + ------- + out : {`dict`, `None`} + If mode is `'dicts'`, a dict is returned, else None + + See Also + -------- + get_include : Returns the directory containing NumPy C + header files. + + Notes + ----- + 1. The `'stdout'` mode will give more readable + output if ``pyyaml`` is installed + + """ + if mode == DisplayModes.stdout.value: + try: # Non-standard library, check import + yaml = _check_pyyaml() + + print(yaml.dump(CONFIG)) + except ModuleNotFoundError: + import warnings + import json + + warnings.warn("Install `pyyaml` for better output", stacklevel=1) + print(json.dumps(CONFIG, indent=2)) + elif mode == DisplayModes.dicts.value: + return CONFIG + else: + raise AttributeError( + f"Invalid `mode`, use one of: {', '.join([e.value for e in DisplayModes])}" + ) + + +def show_config(mode=DisplayModes.stdout.value): + return show(mode) + + +show_config.__doc__ = show.__doc__ +show_config.__module__ = "numpy" diff --git a/.venv/lib/python3.12/site-packages/numpy/__config__.pyi b/.venv/lib/python3.12/site-packages/numpy/__config__.pyi new file mode 100644 index 0000000..b59bdcd --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/__config__.pyi @@ -0,0 +1,102 @@ +from enum import Enum +from types import ModuleType +from typing import Final, NotRequired, TypedDict, overload, type_check_only +from typing import Literal as L + +_CompilerConfigDictValue = TypedDict( + "_CompilerConfigDictValue", + { + "name": str, + "linker": str, + "version": str, + "commands": str, + "args": str, + "linker args": str, + }, +) +_CompilerConfigDict = TypedDict( + "_CompilerConfigDict", + { + "c": _CompilerConfigDictValue, + "cython": _CompilerConfigDictValue, + "c++": _CompilerConfigDictValue, + }, +) +_MachineInformationDict = TypedDict( + "_MachineInformationDict", + { + "host": _MachineInformationDictValue, + "build": _MachineInformationDictValue, + "cross-compiled": NotRequired[L[True]], + }, +) + +@type_check_only +class _MachineInformationDictValue(TypedDict): + cpu: str + family: str + endian: L["little", "big"] + system: str + +_BuildDependenciesDictValue = TypedDict( + "_BuildDependenciesDictValue", + { + "name": str, + "found": NotRequired[L[True]], + "version": str, + "include directory": str, + "lib directory": str, + "openblas configuration": str, + "pc file directory": str, + }, +) + +class _BuildDependenciesDict(TypedDict): + blas: _BuildDependenciesDictValue + lapack: _BuildDependenciesDictValue + +class _PythonInformationDict(TypedDict): + path: str + version: str + +_SIMDExtensionsDict = TypedDict( + "_SIMDExtensionsDict", + { + "baseline": list[str], + "found": list[str], + "not found": list[str], + }, +) + +_ConfigDict = TypedDict( + "_ConfigDict", + { + "Compilers": _CompilerConfigDict, + "Machine Information": _MachineInformationDict, + "Build Dependencies": _BuildDependenciesDict, + "Python Information": _PythonInformationDict, + "SIMD Extensions": _SIMDExtensionsDict, + }, +) + +### + +__all__ = ["show_config"] + +CONFIG: Final[_ConfigDict] = ... + +class DisplayModes(Enum): + stdout = "stdout" + dicts = "dicts" + +def _check_pyyaml() -> ModuleType: ... + +@overload +def show(mode: L["stdout"] = "stdout") -> None: ... +@overload +def show(mode: L["dicts"]) -> _ConfigDict: ... + +@overload +def show_config(mode: L["stdout"] = "stdout") -> None: ... +@overload +def show_config(mode: L["dicts"]) -> _ConfigDict: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/__init__.cython-30.pxd b/.venv/lib/python3.12/site-packages/numpy/__init__.cython-30.pxd new file mode 100644 index 0000000..86c91cf --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/__init__.cython-30.pxd @@ -0,0 +1,1241 @@ +# NumPy static imports for Cython >= 3.0 +# +# If any of the PyArray_* functions are called, import_array must be +# called first. This is done automatically by Cython 3.0+ if a call +# is not detected inside of the module. +# +# Author: Dag Sverre Seljebotn +# + +from cpython.ref cimport Py_INCREF +from cpython.object cimport PyObject, PyTypeObject, PyObject_TypeCheck +cimport libc.stdio as stdio + + +cdef extern from *: + # Leave a marker that the NumPy declarations came from NumPy itself and not from Cython. + # See https://github.com/cython/cython/issues/3573 + """ + /* Using NumPy API declarations from "numpy/__init__.cython-30.pxd" */ + """ + + +cdef extern from "numpy/arrayobject.h": + # It would be nice to use size_t and ssize_t, but ssize_t has special + # implicit conversion rules, so just use "long". + # Note: The actual type only matters for Cython promotion, so long + # is closer than int, but could lead to incorrect promotion. + # (Not to worrying, and always the status-quo.) + ctypedef signed long npy_intp + ctypedef unsigned long npy_uintp + + ctypedef unsigned char npy_bool + + ctypedef signed char npy_byte + ctypedef signed short npy_short + ctypedef signed int npy_int + ctypedef signed long npy_long + ctypedef signed long long npy_longlong + + ctypedef unsigned char npy_ubyte + ctypedef unsigned short npy_ushort + ctypedef unsigned int npy_uint + ctypedef unsigned long npy_ulong + ctypedef unsigned long long npy_ulonglong + + ctypedef float npy_float + ctypedef double npy_double + ctypedef long double npy_longdouble + + ctypedef signed char npy_int8 + ctypedef signed short npy_int16 + ctypedef signed int npy_int32 + ctypedef signed long long npy_int64 + + ctypedef unsigned char npy_uint8 + ctypedef unsigned short npy_uint16 + ctypedef unsigned int npy_uint32 + ctypedef unsigned long long npy_uint64 + + ctypedef float npy_float32 + ctypedef double npy_float64 + ctypedef long double npy_float80 + ctypedef long double npy_float96 + ctypedef long double npy_float128 + + ctypedef struct npy_cfloat: + pass + + ctypedef struct npy_cdouble: + pass + + ctypedef struct npy_clongdouble: + pass + + ctypedef struct npy_complex64: + pass + + ctypedef struct npy_complex128: + pass + + ctypedef struct npy_complex160: + pass + + ctypedef struct npy_complex192: + pass + + ctypedef struct npy_complex256: + pass + + ctypedef struct PyArray_Dims: + npy_intp *ptr + int len + + + cdef enum NPY_TYPES: + NPY_BOOL + NPY_BYTE + NPY_UBYTE + NPY_SHORT + NPY_USHORT + NPY_INT + NPY_UINT + NPY_LONG + NPY_ULONG + NPY_LONGLONG + NPY_ULONGLONG + NPY_FLOAT + NPY_DOUBLE + NPY_LONGDOUBLE + NPY_CFLOAT + NPY_CDOUBLE + NPY_CLONGDOUBLE + NPY_OBJECT + NPY_STRING + NPY_UNICODE + NPY_VSTRING + NPY_VOID + NPY_DATETIME + NPY_TIMEDELTA + NPY_NTYPES_LEGACY + NPY_NOTYPE + + NPY_INT8 + NPY_INT16 + NPY_INT32 + NPY_INT64 + NPY_UINT8 + NPY_UINT16 + NPY_UINT32 + NPY_UINT64 + NPY_FLOAT16 + NPY_FLOAT32 + NPY_FLOAT64 + NPY_FLOAT80 + NPY_FLOAT96 + NPY_FLOAT128 + NPY_COMPLEX64 + NPY_COMPLEX128 + NPY_COMPLEX160 + NPY_COMPLEX192 + NPY_COMPLEX256 + + NPY_INTP + NPY_UINTP + NPY_DEFAULT_INT # Not a compile time constant (normally)! + + ctypedef enum NPY_ORDER: + NPY_ANYORDER + NPY_CORDER + NPY_FORTRANORDER + NPY_KEEPORDER + + ctypedef enum NPY_CASTING: + NPY_NO_CASTING + NPY_EQUIV_CASTING + NPY_SAFE_CASTING + NPY_SAME_KIND_CASTING + NPY_UNSAFE_CASTING + + ctypedef enum NPY_CLIPMODE: + NPY_CLIP + NPY_WRAP + NPY_RAISE + + ctypedef enum NPY_SCALARKIND: + NPY_NOSCALAR, + NPY_BOOL_SCALAR, + NPY_INTPOS_SCALAR, + NPY_INTNEG_SCALAR, + NPY_FLOAT_SCALAR, + NPY_COMPLEX_SCALAR, + NPY_OBJECT_SCALAR + + ctypedef enum NPY_SORTKIND: + NPY_QUICKSORT + NPY_HEAPSORT + NPY_MERGESORT + + ctypedef enum NPY_SEARCHSIDE: + NPY_SEARCHLEFT + NPY_SEARCHRIGHT + + enum: + NPY_ARRAY_C_CONTIGUOUS + NPY_ARRAY_F_CONTIGUOUS + NPY_ARRAY_OWNDATA + NPY_ARRAY_FORCECAST + NPY_ARRAY_ENSURECOPY + NPY_ARRAY_ENSUREARRAY + NPY_ARRAY_ELEMENTSTRIDES + NPY_ARRAY_ALIGNED + NPY_ARRAY_NOTSWAPPED + NPY_ARRAY_WRITEABLE + NPY_ARRAY_WRITEBACKIFCOPY + + NPY_ARRAY_BEHAVED + NPY_ARRAY_BEHAVED_NS + NPY_ARRAY_CARRAY + NPY_ARRAY_CARRAY_RO + NPY_ARRAY_FARRAY + NPY_ARRAY_FARRAY_RO + NPY_ARRAY_DEFAULT + + NPY_ARRAY_IN_ARRAY + NPY_ARRAY_OUT_ARRAY + NPY_ARRAY_INOUT_ARRAY + NPY_ARRAY_IN_FARRAY + NPY_ARRAY_OUT_FARRAY + NPY_ARRAY_INOUT_FARRAY + + NPY_ARRAY_UPDATE_ALL + + cdef enum: + NPY_MAXDIMS # 64 on NumPy 2.x and 32 on NumPy 1.x + NPY_RAVEL_AXIS # Used for functions like PyArray_Mean + + ctypedef void (*PyArray_VectorUnaryFunc)(void *, void *, npy_intp, void *, void *) + + ctypedef struct PyArray_ArrayDescr: + # shape is a tuple, but Cython doesn't support "tuple shape" + # inside a non-PyObject declaration, so we have to declare it + # as just a PyObject*. + PyObject* shape + + ctypedef struct PyArray_Descr: + pass + + ctypedef class numpy.dtype [object PyArray_Descr, check_size ignore]: + # Use PyDataType_* macros when possible, however there are no macros + # for accessing some of the fields, so some are defined. + cdef PyTypeObject* typeobj + cdef char kind + cdef char type + # Numpy sometimes mutates this without warning (e.g. it'll + # sometimes change "|" to "<" in shared dtype objects on + # little-endian machines). If this matters to you, use + # PyArray_IsNativeByteOrder(dtype.byteorder) instead of + # directly accessing this field. + cdef char byteorder + cdef int type_num + + @property + cdef inline npy_intp itemsize(self) noexcept nogil: + return PyDataType_ELSIZE(self) + + @property + cdef inline npy_intp alignment(self) noexcept nogil: + return PyDataType_ALIGNMENT(self) + + # Use fields/names with care as they may be NULL. You must check + # for this using PyDataType_HASFIELDS. + @property + cdef inline object fields(self): + return PyDataType_FIELDS(self) + + @property + cdef inline tuple names(self): + return PyDataType_NAMES(self) + + # Use PyDataType_HASSUBARRAY to test whether this field is + # valid (the pointer can be NULL). Most users should access + # this field via the inline helper method PyDataType_SHAPE. + @property + cdef inline PyArray_ArrayDescr* subarray(self) noexcept nogil: + return PyDataType_SUBARRAY(self) + + @property + cdef inline npy_uint64 flags(self) noexcept nogil: + """The data types flags.""" + return PyDataType_FLAGS(self) + + + ctypedef class numpy.flatiter [object PyArrayIterObject, check_size ignore]: + # Use through macros + pass + + ctypedef class numpy.broadcast [object PyArrayMultiIterObject, check_size ignore]: + + @property + cdef inline int numiter(self) noexcept nogil: + """The number of arrays that need to be broadcast to the same shape.""" + return PyArray_MultiIter_NUMITER(self) + + @property + cdef inline npy_intp size(self) noexcept nogil: + """The total broadcasted size.""" + return PyArray_MultiIter_SIZE(self) + + @property + cdef inline npy_intp index(self) noexcept nogil: + """The current (1-d) index into the broadcasted result.""" + return PyArray_MultiIter_INDEX(self) + + @property + cdef inline int nd(self) noexcept nogil: + """The number of dimensions in the broadcasted result.""" + return PyArray_MultiIter_NDIM(self) + + @property + cdef inline npy_intp* dimensions(self) noexcept nogil: + """The shape of the broadcasted result.""" + return PyArray_MultiIter_DIMS(self) + + @property + cdef inline void** iters(self) noexcept nogil: + """An array of iterator objects that holds the iterators for the arrays to be broadcast together. + On return, the iterators are adjusted for broadcasting.""" + return PyArray_MultiIter_ITERS(self) + + + ctypedef struct PyArrayObject: + # For use in situations where ndarray can't replace PyArrayObject*, + # like PyArrayObject**. + pass + + ctypedef class numpy.ndarray [object PyArrayObject, check_size ignore]: + cdef __cythonbufferdefaults__ = {"mode": "strided"} + + # NOTE: no field declarations since direct access is deprecated since NumPy 1.7 + # Instead, we use properties that map to the corresponding C-API functions. + + @property + cdef inline PyObject* base(self) noexcept nogil: + """Returns a borrowed reference to the object owning the data/memory. + """ + return PyArray_BASE(self) + + @property + cdef inline dtype descr(self): + """Returns an owned reference to the dtype of the array. + """ + return PyArray_DESCR(self) + + @property + cdef inline int ndim(self) noexcept nogil: + """Returns the number of dimensions in the array. + """ + return PyArray_NDIM(self) + + @property + cdef inline npy_intp *shape(self) noexcept nogil: + """Returns a pointer to the dimensions/shape of the array. + The number of elements matches the number of dimensions of the array (ndim). + Can return NULL for 0-dimensional arrays. + """ + return PyArray_DIMS(self) + + @property + cdef inline npy_intp *strides(self) noexcept nogil: + """Returns a pointer to the strides of the array. + The number of elements matches the number of dimensions of the array (ndim). + """ + return PyArray_STRIDES(self) + + @property + cdef inline npy_intp size(self) noexcept nogil: + """Returns the total size (in number of elements) of the array. + """ + return PyArray_SIZE(self) + + @property + cdef inline char* data(self) noexcept nogil: + """The pointer to the data buffer as a char*. + This is provided for legacy reasons to avoid direct struct field access. + For new code that needs this access, you probably want to cast the result + of `PyArray_DATA()` instead, which returns a 'void*'. + """ + return PyArray_BYTES(self) + + + int _import_array() except -1 + # A second definition so _import_array isn't marked as used when we use it here. + # Do not use - subject to change any time. + int __pyx_import_array "_import_array"() except -1 + + # + # Macros from ndarrayobject.h + # + bint PyArray_CHKFLAGS(ndarray m, int flags) nogil + bint PyArray_IS_C_CONTIGUOUS(ndarray arr) nogil + bint PyArray_IS_F_CONTIGUOUS(ndarray arr) nogil + bint PyArray_ISCONTIGUOUS(ndarray m) nogil + bint PyArray_ISWRITEABLE(ndarray m) nogil + bint PyArray_ISALIGNED(ndarray m) nogil + + int PyArray_NDIM(ndarray) nogil + bint PyArray_ISONESEGMENT(ndarray) nogil + bint PyArray_ISFORTRAN(ndarray) nogil + int PyArray_FORTRANIF(ndarray) nogil + + void* PyArray_DATA(ndarray) nogil + char* PyArray_BYTES(ndarray) nogil + + npy_intp* PyArray_DIMS(ndarray) nogil + npy_intp* PyArray_STRIDES(ndarray) nogil + npy_intp PyArray_DIM(ndarray, size_t) nogil + npy_intp PyArray_STRIDE(ndarray, size_t) nogil + + PyObject *PyArray_BASE(ndarray) nogil # returns borrowed reference! + PyArray_Descr *PyArray_DESCR(ndarray) nogil # returns borrowed reference to dtype! + PyArray_Descr *PyArray_DTYPE(ndarray) nogil # returns borrowed reference to dtype! NP 1.7+ alias for descr. + int PyArray_FLAGS(ndarray) nogil + void PyArray_CLEARFLAGS(ndarray, int flags) nogil # Added in NumPy 1.7 + void PyArray_ENABLEFLAGS(ndarray, int flags) nogil # Added in NumPy 1.7 + npy_intp PyArray_ITEMSIZE(ndarray) nogil + int PyArray_TYPE(ndarray arr) nogil + + object PyArray_GETITEM(ndarray arr, void *itemptr) + int PyArray_SETITEM(ndarray arr, void *itemptr, object obj) except -1 + + bint PyTypeNum_ISBOOL(int) nogil + bint PyTypeNum_ISUNSIGNED(int) nogil + bint PyTypeNum_ISSIGNED(int) nogil + bint PyTypeNum_ISINTEGER(int) nogil + bint PyTypeNum_ISFLOAT(int) nogil + bint PyTypeNum_ISNUMBER(int) nogil + bint PyTypeNum_ISSTRING(int) nogil + bint PyTypeNum_ISCOMPLEX(int) nogil + bint PyTypeNum_ISFLEXIBLE(int) nogil + bint PyTypeNum_ISUSERDEF(int) nogil + bint PyTypeNum_ISEXTENDED(int) nogil + bint PyTypeNum_ISOBJECT(int) nogil + + npy_intp PyDataType_ELSIZE(dtype) nogil + npy_intp PyDataType_ALIGNMENT(dtype) nogil + PyObject* PyDataType_METADATA(dtype) nogil + PyArray_ArrayDescr* PyDataType_SUBARRAY(dtype) nogil + PyObject* PyDataType_NAMES(dtype) nogil + PyObject* PyDataType_FIELDS(dtype) nogil + + bint PyDataType_ISBOOL(dtype) nogil + bint PyDataType_ISUNSIGNED(dtype) nogil + bint PyDataType_ISSIGNED(dtype) nogil + bint PyDataType_ISINTEGER(dtype) nogil + bint PyDataType_ISFLOAT(dtype) nogil + bint PyDataType_ISNUMBER(dtype) nogil + bint PyDataType_ISSTRING(dtype) nogil + bint PyDataType_ISCOMPLEX(dtype) nogil + bint PyDataType_ISFLEXIBLE(dtype) nogil + bint PyDataType_ISUSERDEF(dtype) nogil + bint PyDataType_ISEXTENDED(dtype) nogil + bint PyDataType_ISOBJECT(dtype) nogil + bint PyDataType_HASFIELDS(dtype) nogil + bint PyDataType_HASSUBARRAY(dtype) nogil + npy_uint64 PyDataType_FLAGS(dtype) nogil + + bint PyArray_ISBOOL(ndarray) nogil + bint PyArray_ISUNSIGNED(ndarray) nogil + bint PyArray_ISSIGNED(ndarray) nogil + bint PyArray_ISINTEGER(ndarray) nogil + bint PyArray_ISFLOAT(ndarray) nogil + bint PyArray_ISNUMBER(ndarray) nogil + bint PyArray_ISSTRING(ndarray) nogil + bint PyArray_ISCOMPLEX(ndarray) nogil + bint PyArray_ISFLEXIBLE(ndarray) nogil + bint PyArray_ISUSERDEF(ndarray) nogil + bint PyArray_ISEXTENDED(ndarray) nogil + bint PyArray_ISOBJECT(ndarray) nogil + bint PyArray_HASFIELDS(ndarray) nogil + + bint PyArray_ISVARIABLE(ndarray) nogil + + bint PyArray_SAFEALIGNEDCOPY(ndarray) nogil + bint PyArray_ISNBO(char) nogil # works on ndarray.byteorder + bint PyArray_IsNativeByteOrder(char) nogil # works on ndarray.byteorder + bint PyArray_ISNOTSWAPPED(ndarray) nogil + bint PyArray_ISBYTESWAPPED(ndarray) nogil + + bint PyArray_FLAGSWAP(ndarray, int) nogil + + bint PyArray_ISCARRAY(ndarray) nogil + bint PyArray_ISCARRAY_RO(ndarray) nogil + bint PyArray_ISFARRAY(ndarray) nogil + bint PyArray_ISFARRAY_RO(ndarray) nogil + bint PyArray_ISBEHAVED(ndarray) nogil + bint PyArray_ISBEHAVED_RO(ndarray) nogil + + + bint PyDataType_ISNOTSWAPPED(dtype) nogil + bint PyDataType_ISBYTESWAPPED(dtype) nogil + + bint PyArray_DescrCheck(object) + + bint PyArray_Check(object) + bint PyArray_CheckExact(object) + + # Cannot be supported due to out arg: + # bint PyArray_HasArrayInterfaceType(object, dtype, object, object&) + # bint PyArray_HasArrayInterface(op, out) + + + bint PyArray_IsZeroDim(object) + # Cannot be supported due to ## ## in macro: + # bint PyArray_IsScalar(object, verbatim work) + bint PyArray_CheckScalar(object) + bint PyArray_IsPythonNumber(object) + bint PyArray_IsPythonScalar(object) + bint PyArray_IsAnyScalar(object) + bint PyArray_CheckAnyScalar(object) + + ndarray PyArray_GETCONTIGUOUS(ndarray) + bint PyArray_SAMESHAPE(ndarray, ndarray) nogil + npy_intp PyArray_SIZE(ndarray) nogil + npy_intp PyArray_NBYTES(ndarray) nogil + + object PyArray_FROM_O(object) + object PyArray_FROM_OF(object m, int flags) + object PyArray_FROM_OT(object m, int type) + object PyArray_FROM_OTF(object m, int type, int flags) + object PyArray_FROMANY(object m, int type, int min, int max, int flags) + object PyArray_ZEROS(int nd, npy_intp* dims, int type, int fortran) + object PyArray_EMPTY(int nd, npy_intp* dims, int type, int fortran) + void PyArray_FILLWBYTE(ndarray, int val) + object PyArray_ContiguousFromAny(op, int, int min_depth, int max_depth) + unsigned char PyArray_EquivArrTypes(ndarray a1, ndarray a2) + bint PyArray_EquivByteorders(int b1, int b2) nogil + object PyArray_SimpleNew(int nd, npy_intp* dims, int typenum) + object PyArray_SimpleNewFromData(int nd, npy_intp* dims, int typenum, void* data) + #object PyArray_SimpleNewFromDescr(int nd, npy_intp* dims, dtype descr) + object PyArray_ToScalar(void* data, ndarray arr) + + void* PyArray_GETPTR1(ndarray m, npy_intp i) nogil + void* PyArray_GETPTR2(ndarray m, npy_intp i, npy_intp j) nogil + void* PyArray_GETPTR3(ndarray m, npy_intp i, npy_intp j, npy_intp k) nogil + void* PyArray_GETPTR4(ndarray m, npy_intp i, npy_intp j, npy_intp k, npy_intp l) nogil + + # Cannot be supported due to out arg + # void PyArray_DESCR_REPLACE(descr) + + + object PyArray_Copy(ndarray) + object PyArray_FromObject(object op, int type, int min_depth, int max_depth) + object PyArray_ContiguousFromObject(object op, int type, int min_depth, int max_depth) + object PyArray_CopyFromObject(object op, int type, int min_depth, int max_depth) + + object PyArray_Cast(ndarray mp, int type_num) + object PyArray_Take(ndarray ap, object items, int axis) + object PyArray_Put(ndarray ap, object items, object values) + + void PyArray_ITER_RESET(flatiter it) nogil + void PyArray_ITER_NEXT(flatiter it) nogil + void PyArray_ITER_GOTO(flatiter it, npy_intp* destination) nogil + void PyArray_ITER_GOTO1D(flatiter it, npy_intp ind) nogil + void* PyArray_ITER_DATA(flatiter it) nogil + bint PyArray_ITER_NOTDONE(flatiter it) nogil + + void PyArray_MultiIter_RESET(broadcast multi) nogil + void PyArray_MultiIter_NEXT(broadcast multi) nogil + void PyArray_MultiIter_GOTO(broadcast multi, npy_intp dest) nogil + void PyArray_MultiIter_GOTO1D(broadcast multi, npy_intp ind) nogil + void* PyArray_MultiIter_DATA(broadcast multi, npy_intp i) nogil + void PyArray_MultiIter_NEXTi(broadcast multi, npy_intp i) nogil + bint PyArray_MultiIter_NOTDONE(broadcast multi) nogil + npy_intp PyArray_MultiIter_SIZE(broadcast multi) nogil + int PyArray_MultiIter_NDIM(broadcast multi) nogil + npy_intp PyArray_MultiIter_INDEX(broadcast multi) nogil + int PyArray_MultiIter_NUMITER(broadcast multi) nogil + npy_intp* PyArray_MultiIter_DIMS(broadcast multi) nogil + void** PyArray_MultiIter_ITERS(broadcast multi) nogil + + # Functions from __multiarray_api.h + + # Functions taking dtype and returning object/ndarray are disabled + # for now as they steal dtype references. I'm conservative and disable + # more than is probably needed until it can be checked further. + int PyArray_INCREF (ndarray) except * # uses PyArray_Item_INCREF... + int PyArray_XDECREF (ndarray) except * # uses PyArray_Item_DECREF... + dtype PyArray_DescrFromType (int) + object PyArray_TypeObjectFromType (int) + char * PyArray_Zero (ndarray) + char * PyArray_One (ndarray) + #object PyArray_CastToType (ndarray, dtype, int) + int PyArray_CanCastSafely (int, int) # writes errors + npy_bool PyArray_CanCastTo (dtype, dtype) # writes errors + int PyArray_ObjectType (object, int) except 0 + dtype PyArray_DescrFromObject (object, dtype) + #ndarray* PyArray_ConvertToCommonType (object, int *) + dtype PyArray_DescrFromScalar (object) + dtype PyArray_DescrFromTypeObject (object) + npy_intp PyArray_Size (object) + #object PyArray_Scalar (void *, dtype, object) + #object PyArray_FromScalar (object, dtype) + void PyArray_ScalarAsCtype (object, void *) + #int PyArray_CastScalarToCtype (object, void *, dtype) + #int PyArray_CastScalarDirect (object, dtype, void *, int) + #PyArray_VectorUnaryFunc * PyArray_GetCastFunc (dtype, int) + #object PyArray_FromAny (object, dtype, int, int, int, object) + object PyArray_EnsureArray (object) + object PyArray_EnsureAnyArray (object) + #object PyArray_FromFile (stdio.FILE *, dtype, npy_intp, char *) + #object PyArray_FromString (char *, npy_intp, dtype, npy_intp, char *) + #object PyArray_FromBuffer (object, dtype, npy_intp, npy_intp) + #object PyArray_FromIter (object, dtype, npy_intp) + object PyArray_Return (ndarray) + #object PyArray_GetField (ndarray, dtype, int) + #int PyArray_SetField (ndarray, dtype, int, object) except -1 + object PyArray_Byteswap (ndarray, npy_bool) + object PyArray_Resize (ndarray, PyArray_Dims *, int, NPY_ORDER) + int PyArray_CopyInto (ndarray, ndarray) except -1 + int PyArray_CopyAnyInto (ndarray, ndarray) except -1 + int PyArray_CopyObject (ndarray, object) except -1 + object PyArray_NewCopy (ndarray, NPY_ORDER) + object PyArray_ToList (ndarray) + object PyArray_ToString (ndarray, NPY_ORDER) + int PyArray_ToFile (ndarray, stdio.FILE *, char *, char *) except -1 + int PyArray_Dump (object, object, int) except -1 + object PyArray_Dumps (object, int) + int PyArray_ValidType (int) # Cannot error + void PyArray_UpdateFlags (ndarray, int) + object PyArray_New (type, int, npy_intp *, int, npy_intp *, void *, int, int, object) + #object PyArray_NewFromDescr (type, dtype, int, npy_intp *, npy_intp *, void *, int, object) + #dtype PyArray_DescrNew (dtype) + dtype PyArray_DescrNewFromType (int) + double PyArray_GetPriority (object, double) # clears errors as of 1.25 + object PyArray_IterNew (object) + object PyArray_MultiIterNew (int, ...) + + int PyArray_PyIntAsInt (object) except? -1 + npy_intp PyArray_PyIntAsIntp (object) + int PyArray_Broadcast (broadcast) except -1 + int PyArray_FillWithScalar (ndarray, object) except -1 + npy_bool PyArray_CheckStrides (int, int, npy_intp, npy_intp, npy_intp *, npy_intp *) + dtype PyArray_DescrNewByteorder (dtype, char) + object PyArray_IterAllButAxis (object, int *) + #object PyArray_CheckFromAny (object, dtype, int, int, int, object) + #object PyArray_FromArray (ndarray, dtype, int) + object PyArray_FromInterface (object) + object PyArray_FromStructInterface (object) + #object PyArray_FromArrayAttr (object, dtype, object) + #NPY_SCALARKIND PyArray_ScalarKind (int, ndarray*) + int PyArray_CanCoerceScalar (int, int, NPY_SCALARKIND) + npy_bool PyArray_CanCastScalar (type, type) + int PyArray_RemoveSmallest (broadcast) except -1 + int PyArray_ElementStrides (object) + void PyArray_Item_INCREF (char *, dtype) except * + void PyArray_Item_XDECREF (char *, dtype) except * + object PyArray_Transpose (ndarray, PyArray_Dims *) + object PyArray_TakeFrom (ndarray, object, int, ndarray, NPY_CLIPMODE) + object PyArray_PutTo (ndarray, object, object, NPY_CLIPMODE) + object PyArray_PutMask (ndarray, object, object) + object PyArray_Repeat (ndarray, object, int) + object PyArray_Choose (ndarray, object, ndarray, NPY_CLIPMODE) + int PyArray_Sort (ndarray, int, NPY_SORTKIND) except -1 + object PyArray_ArgSort (ndarray, int, NPY_SORTKIND) + object PyArray_SearchSorted (ndarray, object, NPY_SEARCHSIDE, PyObject *) + object PyArray_ArgMax (ndarray, int, ndarray) + object PyArray_ArgMin (ndarray, int, ndarray) + object PyArray_Reshape (ndarray, object) + object PyArray_Newshape (ndarray, PyArray_Dims *, NPY_ORDER) + object PyArray_Squeeze (ndarray) + #object PyArray_View (ndarray, dtype, type) + object PyArray_SwapAxes (ndarray, int, int) + object PyArray_Max (ndarray, int, ndarray) + object PyArray_Min (ndarray, int, ndarray) + object PyArray_Ptp (ndarray, int, ndarray) + object PyArray_Mean (ndarray, int, int, ndarray) + object PyArray_Trace (ndarray, int, int, int, int, ndarray) + object PyArray_Diagonal (ndarray, int, int, int) + object PyArray_Clip (ndarray, object, object, ndarray) + object PyArray_Conjugate (ndarray, ndarray) + object PyArray_Nonzero (ndarray) + object PyArray_Std (ndarray, int, int, ndarray, int) + object PyArray_Sum (ndarray, int, int, ndarray) + object PyArray_CumSum (ndarray, int, int, ndarray) + object PyArray_Prod (ndarray, int, int, ndarray) + object PyArray_CumProd (ndarray, int, int, ndarray) + object PyArray_All (ndarray, int, ndarray) + object PyArray_Any (ndarray, int, ndarray) + object PyArray_Compress (ndarray, object, int, ndarray) + object PyArray_Flatten (ndarray, NPY_ORDER) + object PyArray_Ravel (ndarray, NPY_ORDER) + npy_intp PyArray_MultiplyList (npy_intp *, int) + int PyArray_MultiplyIntList (int *, int) + void * PyArray_GetPtr (ndarray, npy_intp*) + int PyArray_CompareLists (npy_intp *, npy_intp *, int) + #int PyArray_AsCArray (object*, void *, npy_intp *, int, dtype) + int PyArray_Free (object, void *) + #int PyArray_Converter (object, object*) + int PyArray_IntpFromSequence (object, npy_intp *, int) except -1 + object PyArray_Concatenate (object, int) + object PyArray_InnerProduct (object, object) + object PyArray_MatrixProduct (object, object) + object PyArray_Correlate (object, object, int) + #int PyArray_DescrConverter (object, dtype*) except 0 + #int PyArray_DescrConverter2 (object, dtype*) except 0 + int PyArray_IntpConverter (object, PyArray_Dims *) except 0 + #int PyArray_BufferConverter (object, chunk) except 0 + int PyArray_AxisConverter (object, int *) except 0 + int PyArray_BoolConverter (object, npy_bool *) except 0 + int PyArray_ByteorderConverter (object, char *) except 0 + int PyArray_OrderConverter (object, NPY_ORDER *) except 0 + unsigned char PyArray_EquivTypes (dtype, dtype) # clears errors + #object PyArray_Zeros (int, npy_intp *, dtype, int) + #object PyArray_Empty (int, npy_intp *, dtype, int) + object PyArray_Where (object, object, object) + object PyArray_Arange (double, double, double, int) + #object PyArray_ArangeObj (object, object, object, dtype) + int PyArray_SortkindConverter (object, NPY_SORTKIND *) except 0 + object PyArray_LexSort (object, int) + object PyArray_Round (ndarray, int, ndarray) + unsigned char PyArray_EquivTypenums (int, int) + int PyArray_RegisterDataType (dtype) except -1 + int PyArray_RegisterCastFunc (dtype, int, PyArray_VectorUnaryFunc *) except -1 + int PyArray_RegisterCanCast (dtype, int, NPY_SCALARKIND) except -1 + #void PyArray_InitArrFuncs (PyArray_ArrFuncs *) + object PyArray_IntTupleFromIntp (int, npy_intp *) + int PyArray_ClipmodeConverter (object, NPY_CLIPMODE *) except 0 + #int PyArray_OutputConverter (object, ndarray*) except 0 + object PyArray_BroadcastToShape (object, npy_intp *, int) + #int PyArray_DescrAlignConverter (object, dtype*) except 0 + #int PyArray_DescrAlignConverter2 (object, dtype*) except 0 + int PyArray_SearchsideConverter (object, void *) except 0 + object PyArray_CheckAxis (ndarray, int *, int) + npy_intp PyArray_OverflowMultiplyList (npy_intp *, int) + int PyArray_SetBaseObject(ndarray, base) except -1 # NOTE: steals a reference to base! Use "set_array_base()" instead. + + # The memory handler functions require the NumPy 1.22 API + # and may require defining NPY_TARGET_VERSION + ctypedef struct PyDataMemAllocator: + void *ctx + void* (*malloc) (void *ctx, size_t size) + void* (*calloc) (void *ctx, size_t nelem, size_t elsize) + void* (*realloc) (void *ctx, void *ptr, size_t new_size) + void (*free) (void *ctx, void *ptr, size_t size) + + ctypedef struct PyDataMem_Handler: + char* name + npy_uint8 version + PyDataMemAllocator allocator + + object PyDataMem_SetHandler(object handler) + object PyDataMem_GetHandler() + + # additional datetime related functions are defined below + + +# Typedefs that matches the runtime dtype objects in +# the numpy module. + +# The ones that are commented out needs an IFDEF function +# in Cython to enable them only on the right systems. + +ctypedef npy_int8 int8_t +ctypedef npy_int16 int16_t +ctypedef npy_int32 int32_t +ctypedef npy_int64 int64_t + +ctypedef npy_uint8 uint8_t +ctypedef npy_uint16 uint16_t +ctypedef npy_uint32 uint32_t +ctypedef npy_uint64 uint64_t + +ctypedef npy_float32 float32_t +ctypedef npy_float64 float64_t +#ctypedef npy_float80 float80_t +#ctypedef npy_float128 float128_t + +ctypedef float complex complex64_t +ctypedef double complex complex128_t + +ctypedef npy_longlong longlong_t +ctypedef npy_ulonglong ulonglong_t + +ctypedef npy_intp intp_t +ctypedef npy_uintp uintp_t + +ctypedef npy_double float_t +ctypedef npy_double double_t +ctypedef npy_longdouble longdouble_t + +ctypedef float complex cfloat_t +ctypedef double complex cdouble_t +ctypedef double complex complex_t +ctypedef long double complex clongdouble_t + +cdef inline object PyArray_MultiIterNew1(a): + return PyArray_MultiIterNew(1, a) + +cdef inline object PyArray_MultiIterNew2(a, b): + return PyArray_MultiIterNew(2, a, b) + +cdef inline object PyArray_MultiIterNew3(a, b, c): + return PyArray_MultiIterNew(3, a, b, c) + +cdef inline object PyArray_MultiIterNew4(a, b, c, d): + return PyArray_MultiIterNew(4, a, b, c, d) + +cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): + return PyArray_MultiIterNew(5, a, b, c, d, e) + +cdef inline tuple PyDataType_SHAPE(dtype d): + if PyDataType_HASSUBARRAY(d): + return d.subarray.shape + else: + return () + + +cdef extern from "numpy/ndarrayobject.h": + PyTypeObject PyTimedeltaArrType_Type + PyTypeObject PyDatetimeArrType_Type + ctypedef int64_t npy_timedelta + ctypedef int64_t npy_datetime + +cdef extern from "numpy/ndarraytypes.h": + ctypedef struct PyArray_DatetimeMetaData: + NPY_DATETIMEUNIT base + int64_t num + + ctypedef struct npy_datetimestruct: + int64_t year + int32_t month, day, hour, min, sec, us, ps, as + + # Iterator API added in v1.6 + # + # These don't match the definition in the C API because Cython can't wrap + # function pointers that return functions. + # https://github.com/cython/cython/issues/6720 + ctypedef int (*NpyIter_IterNextFunc "NpyIter_IterNextFunc *")(NpyIter* it) noexcept nogil + ctypedef void (*NpyIter_GetMultiIndexFunc "NpyIter_GetMultiIndexFunc *")(NpyIter* it, npy_intp* outcoords) noexcept nogil + + +cdef extern from "numpy/arrayscalars.h": + + # abstract types + ctypedef class numpy.generic [object PyObject]: + pass + ctypedef class numpy.number [object PyObject]: + pass + ctypedef class numpy.integer [object PyObject]: + pass + ctypedef class numpy.signedinteger [object PyObject]: + pass + ctypedef class numpy.unsignedinteger [object PyObject]: + pass + ctypedef class numpy.inexact [object PyObject]: + pass + ctypedef class numpy.floating [object PyObject]: + pass + ctypedef class numpy.complexfloating [object PyObject]: + pass + ctypedef class numpy.flexible [object PyObject]: + pass + ctypedef class numpy.character [object PyObject]: + pass + + ctypedef struct PyDatetimeScalarObject: + # PyObject_HEAD + npy_datetime obval + PyArray_DatetimeMetaData obmeta + + ctypedef struct PyTimedeltaScalarObject: + # PyObject_HEAD + npy_timedelta obval + PyArray_DatetimeMetaData obmeta + + ctypedef enum NPY_DATETIMEUNIT: + NPY_FR_Y + NPY_FR_M + NPY_FR_W + NPY_FR_D + NPY_FR_B + NPY_FR_h + NPY_FR_m + NPY_FR_s + NPY_FR_ms + NPY_FR_us + NPY_FR_ns + NPY_FR_ps + NPY_FR_fs + NPY_FR_as + NPY_FR_GENERIC + + +cdef extern from "numpy/arrayobject.h": + # These are part of the C-API defined in `__multiarray_api.h` + + # NumPy internal definitions in datetime_strings.c: + int get_datetime_iso_8601_strlen "NpyDatetime_GetDatetimeISO8601StrLen" ( + int local, NPY_DATETIMEUNIT base) + int make_iso_8601_datetime "NpyDatetime_MakeISO8601Datetime" ( + npy_datetimestruct *dts, char *outstr, npy_intp outlen, + int local, int utc, NPY_DATETIMEUNIT base, int tzoffset, + NPY_CASTING casting) except -1 + + # NumPy internal definition in datetime.c: + # May return 1 to indicate that object does not appear to be a datetime + # (returns 0 on success). + int convert_pydatetime_to_datetimestruct "NpyDatetime_ConvertPyDateTimeToDatetimeStruct" ( + PyObject *obj, npy_datetimestruct *out, + NPY_DATETIMEUNIT *out_bestunit, int apply_tzinfo) except -1 + int convert_datetime64_to_datetimestruct "NpyDatetime_ConvertDatetime64ToDatetimeStruct" ( + PyArray_DatetimeMetaData *meta, npy_datetime dt, + npy_datetimestruct *out) except -1 + int convert_datetimestruct_to_datetime64 "NpyDatetime_ConvertDatetimeStructToDatetime64"( + PyArray_DatetimeMetaData *meta, const npy_datetimestruct *dts, + npy_datetime *out) except -1 + + +# +# ufunc API +# + +cdef extern from "numpy/ufuncobject.h": + + ctypedef void (*PyUFuncGenericFunction) (char **, npy_intp *, npy_intp *, void *) + + ctypedef class numpy.ufunc [object PyUFuncObject, check_size ignore]: + cdef: + int nin, nout, nargs + int identity + PyUFuncGenericFunction *functions + void **data + int ntypes + int check_return + char *name + char *types + char *doc + void *ptr + PyObject *obj + PyObject *userloops + + cdef enum: + PyUFunc_Zero + PyUFunc_One + PyUFunc_None + # deprecated + UFUNC_FPE_DIVIDEBYZERO + UFUNC_FPE_OVERFLOW + UFUNC_FPE_UNDERFLOW + UFUNC_FPE_INVALID + # use these instead + NPY_FPE_DIVIDEBYZERO + NPY_FPE_OVERFLOW + NPY_FPE_UNDERFLOW + NPY_FPE_INVALID + + + object PyUFunc_FromFuncAndData(PyUFuncGenericFunction *, + void **, char *, int, int, int, int, char *, char *, int) + int PyUFunc_RegisterLoopForType(ufunc, int, + PyUFuncGenericFunction, int *, void *) except -1 + void PyUFunc_f_f_As_d_d \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_d_d \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_f_f \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_g_g \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_F_F_As_D_D \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_F_F \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_D_D \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_G_G \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_O_O \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_ff_f_As_dd_d \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_ff_f \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_dd_d \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_gg_g \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_FF_F_As_DD_D \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_DD_D \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_FF_F \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_GG_G \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_OO_O \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_O_O_method \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_OO_O_method \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_On_Om \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_clearfperr() + int PyUFunc_getfperr() + int PyUFunc_ReplaceLoopBySignature \ + (ufunc, PyUFuncGenericFunction, int *, PyUFuncGenericFunction *) + object PyUFunc_FromFuncAndDataAndSignature \ + (PyUFuncGenericFunction *, void **, char *, int, int, int, + int, char *, char *, int, char *) + + int _import_umath() except -1 + +cdef inline void set_array_base(ndarray arr, object base) except *: + Py_INCREF(base) # important to do this before stealing the reference below! + PyArray_SetBaseObject(arr, base) + +cdef inline object get_array_base(ndarray arr): + base = PyArray_BASE(arr) + if base is NULL: + return None + return base + +# Versions of the import_* functions which are more suitable for +# Cython code. +cdef inline int import_array() except -1: + try: + __pyx_import_array() + except Exception: + raise ImportError("numpy._core.multiarray failed to import") + +cdef inline int import_umath() except -1: + try: + _import_umath() + except Exception: + raise ImportError("numpy._core.umath failed to import") + +cdef inline int import_ufunc() except -1: + try: + _import_umath() + except Exception: + raise ImportError("numpy._core.umath failed to import") + + +cdef inline bint is_timedelta64_object(object obj) noexcept: + """ + Cython equivalent of `isinstance(obj, np.timedelta64)` + + Parameters + ---------- + obj : object + + Returns + ------- + bool + """ + return PyObject_TypeCheck(obj, &PyTimedeltaArrType_Type) + + +cdef inline bint is_datetime64_object(object obj) noexcept: + """ + Cython equivalent of `isinstance(obj, np.datetime64)` + + Parameters + ---------- + obj : object + + Returns + ------- + bool + """ + return PyObject_TypeCheck(obj, &PyDatetimeArrType_Type) + + +cdef inline npy_datetime get_datetime64_value(object obj) noexcept nogil: + """ + returns the int64 value underlying scalar numpy datetime64 object + + Note that to interpret this as a datetime, the corresponding unit is + also needed. That can be found using `get_datetime64_unit`. + """ + return (obj).obval + + +cdef inline npy_timedelta get_timedelta64_value(object obj) noexcept nogil: + """ + returns the int64 value underlying scalar numpy timedelta64 object + """ + return (obj).obval + + +cdef inline NPY_DATETIMEUNIT get_datetime64_unit(object obj) noexcept nogil: + """ + returns the unit part of the dtype for a numpy datetime64 object. + """ + return (obj).obmeta.base + + +cdef extern from "numpy/arrayobject.h": + + ctypedef struct NpyIter: + pass + + cdef enum: + NPY_FAIL + NPY_SUCCEED + + cdef enum: + # Track an index representing C order + NPY_ITER_C_INDEX + # Track an index representing Fortran order + NPY_ITER_F_INDEX + # Track a multi-index + NPY_ITER_MULTI_INDEX + # User code external to the iterator does the 1-dimensional innermost loop + NPY_ITER_EXTERNAL_LOOP + # Convert all the operands to a common data type + NPY_ITER_COMMON_DTYPE + # Operands may hold references, requiring API access during iteration + NPY_ITER_REFS_OK + # Zero-sized operands should be permitted, iteration checks IterSize for 0 + NPY_ITER_ZEROSIZE_OK + # Permits reductions (size-0 stride with dimension size > 1) + NPY_ITER_REDUCE_OK + # Enables sub-range iteration + NPY_ITER_RANGED + # Enables buffering + NPY_ITER_BUFFERED + # When buffering is enabled, grows the inner loop if possible + NPY_ITER_GROWINNER + # Delay allocation of buffers until first Reset* call + NPY_ITER_DELAY_BUFALLOC + # When NPY_KEEPORDER is specified, disable reversing negative-stride axes + NPY_ITER_DONT_NEGATE_STRIDES + NPY_ITER_COPY_IF_OVERLAP + # The operand will be read from and written to + NPY_ITER_READWRITE + # The operand will only be read from + NPY_ITER_READONLY + # The operand will only be written to + NPY_ITER_WRITEONLY + # The operand's data must be in native byte order + NPY_ITER_NBO + # The operand's data must be aligned + NPY_ITER_ALIGNED + # The operand's data must be contiguous (within the inner loop) + NPY_ITER_CONTIG + # The operand may be copied to satisfy requirements + NPY_ITER_COPY + # The operand may be copied with WRITEBACKIFCOPY to satisfy requirements + NPY_ITER_UPDATEIFCOPY + # Allocate the operand if it is NULL + NPY_ITER_ALLOCATE + # If an operand is allocated, don't use any subtype + NPY_ITER_NO_SUBTYPE + # This is a virtual array slot, operand is NULL but temporary data is there + NPY_ITER_VIRTUAL + # Require that the dimension match the iterator dimensions exactly + NPY_ITER_NO_BROADCAST + # A mask is being used on this array, affects buffer -> array copy + NPY_ITER_WRITEMASKED + # This array is the mask for all WRITEMASKED operands + NPY_ITER_ARRAYMASK + # Assume iterator order data access for COPY_IF_OVERLAP + NPY_ITER_OVERLAP_ASSUME_ELEMENTWISE + + # construction and destruction functions + NpyIter* NpyIter_New(ndarray arr, npy_uint32 flags, NPY_ORDER order, + NPY_CASTING casting, dtype datatype) except NULL + NpyIter* NpyIter_MultiNew(npy_intp nop, PyArrayObject** op, npy_uint32 flags, + NPY_ORDER order, NPY_CASTING casting, npy_uint32* + op_flags, PyArray_Descr** op_dtypes) except NULL + NpyIter* NpyIter_AdvancedNew(npy_intp nop, PyArrayObject** op, + npy_uint32 flags, NPY_ORDER order, + NPY_CASTING casting, npy_uint32* op_flags, + PyArray_Descr** op_dtypes, int oa_ndim, + int** op_axes, const npy_intp* itershape, + npy_intp buffersize) except NULL + NpyIter* NpyIter_Copy(NpyIter* it) except NULL + int NpyIter_RemoveAxis(NpyIter* it, int axis) except NPY_FAIL + int NpyIter_RemoveMultiIndex(NpyIter* it) except NPY_FAIL + int NpyIter_EnableExternalLoop(NpyIter* it) except NPY_FAIL + int NpyIter_Deallocate(NpyIter* it) except NPY_FAIL + int NpyIter_Reset(NpyIter* it, char** errmsg) except NPY_FAIL + int NpyIter_ResetToIterIndexRange(NpyIter* it, npy_intp istart, + npy_intp iend, char** errmsg) except NPY_FAIL + int NpyIter_ResetBasePointers(NpyIter* it, char** baseptrs, char** errmsg) except NPY_FAIL + int NpyIter_GotoMultiIndex(NpyIter* it, const npy_intp* multi_index) except NPY_FAIL + int NpyIter_GotoIndex(NpyIter* it, npy_intp index) except NPY_FAIL + npy_intp NpyIter_GetIterSize(NpyIter* it) nogil + npy_intp NpyIter_GetIterIndex(NpyIter* it) nogil + void NpyIter_GetIterIndexRange(NpyIter* it, npy_intp* istart, + npy_intp* iend) nogil + int NpyIter_GotoIterIndex(NpyIter* it, npy_intp iterindex) except NPY_FAIL + npy_bool NpyIter_HasDelayedBufAlloc(NpyIter* it) nogil + npy_bool NpyIter_HasExternalLoop(NpyIter* it) nogil + npy_bool NpyIter_HasMultiIndex(NpyIter* it) nogil + npy_bool NpyIter_HasIndex(NpyIter* it) nogil + npy_bool NpyIter_RequiresBuffering(NpyIter* it) nogil + npy_bool NpyIter_IsBuffered(NpyIter* it) nogil + npy_bool NpyIter_IsGrowInner(NpyIter* it) nogil + npy_intp NpyIter_GetBufferSize(NpyIter* it) nogil + int NpyIter_GetNDim(NpyIter* it) nogil + int NpyIter_GetNOp(NpyIter* it) nogil + npy_intp* NpyIter_GetAxisStrideArray(NpyIter* it, int axis) except NULL + int NpyIter_GetShape(NpyIter* it, npy_intp* outshape) nogil + PyArray_Descr** NpyIter_GetDescrArray(NpyIter* it) + PyArrayObject** NpyIter_GetOperandArray(NpyIter* it) + ndarray NpyIter_GetIterView(NpyIter* it, npy_intp i) + void NpyIter_GetReadFlags(NpyIter* it, char* outreadflags) + void NpyIter_GetWriteFlags(NpyIter* it, char* outwriteflags) + int NpyIter_CreateCompatibleStrides(NpyIter* it, npy_intp itemsize, + npy_intp* outstrides) except NPY_FAIL + npy_bool NpyIter_IsFirstVisit(NpyIter* it, int iop) nogil + # functions for iterating an NpyIter object + # + # These don't match the definition in the C API because Cython can't wrap + # function pointers that return functions. + NpyIter_IterNextFunc NpyIter_GetIterNext(NpyIter* it, char** errmsg) except NULL + NpyIter_GetMultiIndexFunc NpyIter_GetGetMultiIndex(NpyIter* it, + char** errmsg) except NULL + char** NpyIter_GetDataPtrArray(NpyIter* it) nogil + char** NpyIter_GetInitialDataPtrArray(NpyIter* it) nogil + npy_intp* NpyIter_GetIndexPtr(NpyIter* it) + npy_intp* NpyIter_GetInnerStrideArray(NpyIter* it) nogil + npy_intp* NpyIter_GetInnerLoopSizePtr(NpyIter* it) nogil + void NpyIter_GetInnerFixedStrideArray(NpyIter* it, npy_intp* outstrides) nogil + npy_bool NpyIter_IterationNeedsAPI(NpyIter* it) nogil + void NpyIter_DebugPrint(NpyIter* it) + +# NpyString API +cdef extern from "numpy/ndarraytypes.h": + ctypedef struct npy_string_allocator: + pass + + ctypedef struct npy_packed_static_string: + pass + + ctypedef struct npy_static_string: + size_t size + const char *buf + + ctypedef struct PyArray_StringDTypeObject: + PyArray_Descr base + PyObject *na_object + char coerce + char has_nan_na + char has_string_na + char array_owned + npy_static_string default_string + npy_static_string na_name + npy_string_allocator *allocator + +cdef extern from "numpy/arrayobject.h": + npy_string_allocator *NpyString_acquire_allocator(const PyArray_StringDTypeObject *descr) + void NpyString_acquire_allocators(size_t n_descriptors, PyArray_Descr *const descrs[], npy_string_allocator *allocators[]) + void NpyString_release_allocator(npy_string_allocator *allocator) + void NpyString_release_allocators(size_t length, npy_string_allocator *allocators[]) + int NpyString_load(npy_string_allocator *allocator, const npy_packed_static_string *packed_string, npy_static_string *unpacked_string) + int NpyString_pack_null(npy_string_allocator *allocator, npy_packed_static_string *packed_string) + int NpyString_pack(npy_string_allocator *allocator, npy_packed_static_string *packed_string, const char *buf, size_t size) diff --git a/.venv/lib/python3.12/site-packages/numpy/__init__.pxd b/.venv/lib/python3.12/site-packages/numpy/__init__.pxd new file mode 100644 index 0000000..eb07641 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/__init__.pxd @@ -0,0 +1,1154 @@ +# NumPy static imports for Cython < 3.0 +# +# If any of the PyArray_* functions are called, import_array must be +# called first. +# +# Author: Dag Sverre Seljebotn +# + +DEF _buffer_format_string_len = 255 + +cimport cpython.buffer as pybuf +from cpython.ref cimport Py_INCREF +from cpython.mem cimport PyObject_Malloc, PyObject_Free +from cpython.object cimport PyObject, PyTypeObject +from cpython.buffer cimport PyObject_GetBuffer +from cpython.type cimport type +cimport libc.stdio as stdio + + +cdef extern from *: + # Leave a marker that the NumPy declarations came from NumPy itself and not from Cython. + # See https://github.com/cython/cython/issues/3573 + """ + /* Using NumPy API declarations from "numpy/__init__.pxd" */ + """ + + +cdef extern from "Python.h": + ctypedef int Py_intptr_t + bint PyObject_TypeCheck(object obj, PyTypeObject* type) + +cdef extern from "numpy/arrayobject.h": + # It would be nice to use size_t and ssize_t, but ssize_t has special + # implicit conversion rules, so just use "long". + # Note: The actual type only matters for Cython promotion, so long + # is closer than int, but could lead to incorrect promotion. + # (Not to worrying, and always the status-quo.) + ctypedef signed long npy_intp + ctypedef unsigned long npy_uintp + + ctypedef unsigned char npy_bool + + ctypedef signed char npy_byte + ctypedef signed short npy_short + ctypedef signed int npy_int + ctypedef signed long npy_long + ctypedef signed long long npy_longlong + + ctypedef unsigned char npy_ubyte + ctypedef unsigned short npy_ushort + ctypedef unsigned int npy_uint + ctypedef unsigned long npy_ulong + ctypedef unsigned long long npy_ulonglong + + ctypedef float npy_float + ctypedef double npy_double + ctypedef long double npy_longdouble + + ctypedef signed char npy_int8 + ctypedef signed short npy_int16 + ctypedef signed int npy_int32 + ctypedef signed long long npy_int64 + + ctypedef unsigned char npy_uint8 + ctypedef unsigned short npy_uint16 + ctypedef unsigned int npy_uint32 + ctypedef unsigned long long npy_uint64 + + ctypedef float npy_float32 + ctypedef double npy_float64 + ctypedef long double npy_float80 + ctypedef long double npy_float96 + ctypedef long double npy_float128 + + ctypedef struct npy_cfloat: + pass + + ctypedef struct npy_cdouble: + pass + + ctypedef struct npy_clongdouble: + pass + + ctypedef struct npy_complex64: + pass + + ctypedef struct npy_complex128: + pass + + ctypedef struct npy_complex160: + pass + + ctypedef struct npy_complex192: + pass + + ctypedef struct npy_complex256: + pass + + ctypedef struct PyArray_Dims: + npy_intp *ptr + int len + + + cdef enum NPY_TYPES: + NPY_BOOL + NPY_BYTE + NPY_UBYTE + NPY_SHORT + NPY_USHORT + NPY_INT + NPY_UINT + NPY_LONG + NPY_ULONG + NPY_LONGLONG + NPY_ULONGLONG + NPY_FLOAT + NPY_DOUBLE + NPY_LONGDOUBLE + NPY_CFLOAT + NPY_CDOUBLE + NPY_CLONGDOUBLE + NPY_OBJECT + NPY_STRING + NPY_UNICODE + NPY_VSTRING + NPY_VOID + NPY_DATETIME + NPY_TIMEDELTA + NPY_NTYPES_LEGACY + NPY_NOTYPE + + NPY_INT8 + NPY_INT16 + NPY_INT32 + NPY_INT64 + NPY_UINT8 + NPY_UINT16 + NPY_UINT32 + NPY_UINT64 + NPY_FLOAT16 + NPY_FLOAT32 + NPY_FLOAT64 + NPY_FLOAT80 + NPY_FLOAT96 + NPY_FLOAT128 + NPY_COMPLEX64 + NPY_COMPLEX128 + NPY_COMPLEX160 + NPY_COMPLEX192 + NPY_COMPLEX256 + + NPY_INTP + NPY_UINTP + NPY_DEFAULT_INT # Not a compile time constant (normally)! + + ctypedef enum NPY_ORDER: + NPY_ANYORDER + NPY_CORDER + NPY_FORTRANORDER + NPY_KEEPORDER + + ctypedef enum NPY_CASTING: + NPY_NO_CASTING + NPY_EQUIV_CASTING + NPY_SAFE_CASTING + NPY_SAME_KIND_CASTING + NPY_UNSAFE_CASTING + + ctypedef enum NPY_CLIPMODE: + NPY_CLIP + NPY_WRAP + NPY_RAISE + + ctypedef enum NPY_SCALARKIND: + NPY_NOSCALAR, + NPY_BOOL_SCALAR, + NPY_INTPOS_SCALAR, + NPY_INTNEG_SCALAR, + NPY_FLOAT_SCALAR, + NPY_COMPLEX_SCALAR, + NPY_OBJECT_SCALAR + + ctypedef enum NPY_SORTKIND: + NPY_QUICKSORT + NPY_HEAPSORT + NPY_MERGESORT + + ctypedef enum NPY_SEARCHSIDE: + NPY_SEARCHLEFT + NPY_SEARCHRIGHT + + enum: + NPY_ARRAY_C_CONTIGUOUS + NPY_ARRAY_F_CONTIGUOUS + NPY_ARRAY_OWNDATA + NPY_ARRAY_FORCECAST + NPY_ARRAY_ENSURECOPY + NPY_ARRAY_ENSUREARRAY + NPY_ARRAY_ELEMENTSTRIDES + NPY_ARRAY_ALIGNED + NPY_ARRAY_NOTSWAPPED + NPY_ARRAY_WRITEABLE + NPY_ARRAY_WRITEBACKIFCOPY + + NPY_ARRAY_BEHAVED + NPY_ARRAY_BEHAVED_NS + NPY_ARRAY_CARRAY + NPY_ARRAY_CARRAY_RO + NPY_ARRAY_FARRAY + NPY_ARRAY_FARRAY_RO + NPY_ARRAY_DEFAULT + + NPY_ARRAY_IN_ARRAY + NPY_ARRAY_OUT_ARRAY + NPY_ARRAY_INOUT_ARRAY + NPY_ARRAY_IN_FARRAY + NPY_ARRAY_OUT_FARRAY + NPY_ARRAY_INOUT_FARRAY + + NPY_ARRAY_UPDATE_ALL + + cdef enum: + NPY_MAXDIMS # 64 on NumPy 2.x and 32 on NumPy 1.x + NPY_RAVEL_AXIS # Used for functions like PyArray_Mean + + ctypedef void (*PyArray_VectorUnaryFunc)(void *, void *, npy_intp, void *, void *) + + ctypedef struct PyArray_ArrayDescr: + # shape is a tuple, but Cython doesn't support "tuple shape" + # inside a non-PyObject declaration, so we have to declare it + # as just a PyObject*. + PyObject* shape + + ctypedef struct PyArray_Descr: + pass + + ctypedef class numpy.dtype [object PyArray_Descr, check_size ignore]: + # Use PyDataType_* macros when possible, however there are no macros + # for accessing some of the fields, so some are defined. + cdef PyTypeObject* typeobj + cdef char kind + cdef char type + # Numpy sometimes mutates this without warning (e.g. it'll + # sometimes change "|" to "<" in shared dtype objects on + # little-endian machines). If this matters to you, use + # PyArray_IsNativeByteOrder(dtype.byteorder) instead of + # directly accessing this field. + cdef char byteorder + # Flags are not directly accessible on Cython <3. Use PyDataType_FLAGS. + # cdef char flags + cdef int type_num + # itemsize/elsize, alignment, fields, names, and subarray must + # use the `PyDataType_*` accessor macros. With Cython 3 you can + # still use getter attributes `dtype.itemsize` + + ctypedef class numpy.flatiter [object PyArrayIterObject, check_size ignore]: + # Use through macros + pass + + ctypedef class numpy.broadcast [object PyArrayMultiIterObject, check_size ignore]: + cdef int numiter + cdef npy_intp size, index + cdef int nd + cdef npy_intp *dimensions + cdef void **iters + + ctypedef struct PyArrayObject: + # For use in situations where ndarray can't replace PyArrayObject*, + # like PyArrayObject**. + pass + + ctypedef class numpy.ndarray [object PyArrayObject, check_size ignore]: + cdef __cythonbufferdefaults__ = {"mode": "strided"} + + cdef: + # Only taking a few of the most commonly used and stable fields. + # One should use PyArray_* macros instead to access the C fields. + char *data + int ndim "nd" + npy_intp *shape "dimensions" + npy_intp *strides + dtype descr # deprecated since NumPy 1.7 ! + PyObject* base # NOT PUBLIC, DO NOT USE ! + + + int _import_array() except -1 + # A second definition so _import_array isn't marked as used when we use it here. + # Do not use - subject to change any time. + int __pyx_import_array "_import_array"() except -1 + + # + # Macros from ndarrayobject.h + # + bint PyArray_CHKFLAGS(ndarray m, int flags) nogil + bint PyArray_IS_C_CONTIGUOUS(ndarray arr) nogil + bint PyArray_IS_F_CONTIGUOUS(ndarray arr) nogil + bint PyArray_ISCONTIGUOUS(ndarray m) nogil + bint PyArray_ISWRITEABLE(ndarray m) nogil + bint PyArray_ISALIGNED(ndarray m) nogil + + int PyArray_NDIM(ndarray) nogil + bint PyArray_ISONESEGMENT(ndarray) nogil + bint PyArray_ISFORTRAN(ndarray) nogil + int PyArray_FORTRANIF(ndarray) nogil + + void* PyArray_DATA(ndarray) nogil + char* PyArray_BYTES(ndarray) nogil + + npy_intp* PyArray_DIMS(ndarray) nogil + npy_intp* PyArray_STRIDES(ndarray) nogil + npy_intp PyArray_DIM(ndarray, size_t) nogil + npy_intp PyArray_STRIDE(ndarray, size_t) nogil + + PyObject *PyArray_BASE(ndarray) nogil # returns borrowed reference! + PyArray_Descr *PyArray_DESCR(ndarray) nogil # returns borrowed reference to dtype! + PyArray_Descr *PyArray_DTYPE(ndarray) nogil # returns borrowed reference to dtype! NP 1.7+ alias for descr. + int PyArray_FLAGS(ndarray) nogil + void PyArray_CLEARFLAGS(ndarray, int flags) nogil # Added in NumPy 1.7 + void PyArray_ENABLEFLAGS(ndarray, int flags) nogil # Added in NumPy 1.7 + npy_intp PyArray_ITEMSIZE(ndarray) nogil + int PyArray_TYPE(ndarray arr) nogil + + object PyArray_GETITEM(ndarray arr, void *itemptr) + int PyArray_SETITEM(ndarray arr, void *itemptr, object obj) except -1 + + bint PyTypeNum_ISBOOL(int) nogil + bint PyTypeNum_ISUNSIGNED(int) nogil + bint PyTypeNum_ISSIGNED(int) nogil + bint PyTypeNum_ISINTEGER(int) nogil + bint PyTypeNum_ISFLOAT(int) nogil + bint PyTypeNum_ISNUMBER(int) nogil + bint PyTypeNum_ISSTRING(int) nogil + bint PyTypeNum_ISCOMPLEX(int) nogil + bint PyTypeNum_ISFLEXIBLE(int) nogil + bint PyTypeNum_ISUSERDEF(int) nogil + bint PyTypeNum_ISEXTENDED(int) nogil + bint PyTypeNum_ISOBJECT(int) nogil + + npy_intp PyDataType_ELSIZE(dtype) nogil + npy_intp PyDataType_ALIGNMENT(dtype) nogil + PyObject* PyDataType_METADATA(dtype) nogil + PyArray_ArrayDescr* PyDataType_SUBARRAY(dtype) nogil + PyObject* PyDataType_NAMES(dtype) nogil + PyObject* PyDataType_FIELDS(dtype) nogil + + bint PyDataType_ISBOOL(dtype) nogil + bint PyDataType_ISUNSIGNED(dtype) nogil + bint PyDataType_ISSIGNED(dtype) nogil + bint PyDataType_ISINTEGER(dtype) nogil + bint PyDataType_ISFLOAT(dtype) nogil + bint PyDataType_ISNUMBER(dtype) nogil + bint PyDataType_ISSTRING(dtype) nogil + bint PyDataType_ISCOMPLEX(dtype) nogil + bint PyDataType_ISFLEXIBLE(dtype) nogil + bint PyDataType_ISUSERDEF(dtype) nogil + bint PyDataType_ISEXTENDED(dtype) nogil + bint PyDataType_ISOBJECT(dtype) nogil + bint PyDataType_HASFIELDS(dtype) nogil + bint PyDataType_HASSUBARRAY(dtype) nogil + npy_uint64 PyDataType_FLAGS(dtype) nogil + + bint PyArray_ISBOOL(ndarray) nogil + bint PyArray_ISUNSIGNED(ndarray) nogil + bint PyArray_ISSIGNED(ndarray) nogil + bint PyArray_ISINTEGER(ndarray) nogil + bint PyArray_ISFLOAT(ndarray) nogil + bint PyArray_ISNUMBER(ndarray) nogil + bint PyArray_ISSTRING(ndarray) nogil + bint PyArray_ISCOMPLEX(ndarray) nogil + bint PyArray_ISFLEXIBLE(ndarray) nogil + bint PyArray_ISUSERDEF(ndarray) nogil + bint PyArray_ISEXTENDED(ndarray) nogil + bint PyArray_ISOBJECT(ndarray) nogil + bint PyArray_HASFIELDS(ndarray) nogil + + bint PyArray_ISVARIABLE(ndarray) nogil + + bint PyArray_SAFEALIGNEDCOPY(ndarray) nogil + bint PyArray_ISNBO(char) nogil # works on ndarray.byteorder + bint PyArray_IsNativeByteOrder(char) nogil # works on ndarray.byteorder + bint PyArray_ISNOTSWAPPED(ndarray) nogil + bint PyArray_ISBYTESWAPPED(ndarray) nogil + + bint PyArray_FLAGSWAP(ndarray, int) nogil + + bint PyArray_ISCARRAY(ndarray) nogil + bint PyArray_ISCARRAY_RO(ndarray) nogil + bint PyArray_ISFARRAY(ndarray) nogil + bint PyArray_ISFARRAY_RO(ndarray) nogil + bint PyArray_ISBEHAVED(ndarray) nogil + bint PyArray_ISBEHAVED_RO(ndarray) nogil + + + bint PyDataType_ISNOTSWAPPED(dtype) nogil + bint PyDataType_ISBYTESWAPPED(dtype) nogil + + bint PyArray_DescrCheck(object) + + bint PyArray_Check(object) + bint PyArray_CheckExact(object) + + # Cannot be supported due to out arg: + # bint PyArray_HasArrayInterfaceType(object, dtype, object, object&) + # bint PyArray_HasArrayInterface(op, out) + + + bint PyArray_IsZeroDim(object) + # Cannot be supported due to ## ## in macro: + # bint PyArray_IsScalar(object, verbatim work) + bint PyArray_CheckScalar(object) + bint PyArray_IsPythonNumber(object) + bint PyArray_IsPythonScalar(object) + bint PyArray_IsAnyScalar(object) + bint PyArray_CheckAnyScalar(object) + + ndarray PyArray_GETCONTIGUOUS(ndarray) + bint PyArray_SAMESHAPE(ndarray, ndarray) nogil + npy_intp PyArray_SIZE(ndarray) nogil + npy_intp PyArray_NBYTES(ndarray) nogil + + object PyArray_FROM_O(object) + object PyArray_FROM_OF(object m, int flags) + object PyArray_FROM_OT(object m, int type) + object PyArray_FROM_OTF(object m, int type, int flags) + object PyArray_FROMANY(object m, int type, int min, int max, int flags) + object PyArray_ZEROS(int nd, npy_intp* dims, int type, int fortran) + object PyArray_EMPTY(int nd, npy_intp* dims, int type, int fortran) + void PyArray_FILLWBYTE(ndarray, int val) + object PyArray_ContiguousFromAny(op, int, int min_depth, int max_depth) + unsigned char PyArray_EquivArrTypes(ndarray a1, ndarray a2) + bint PyArray_EquivByteorders(int b1, int b2) nogil + object PyArray_SimpleNew(int nd, npy_intp* dims, int typenum) + object PyArray_SimpleNewFromData(int nd, npy_intp* dims, int typenum, void* data) + #object PyArray_SimpleNewFromDescr(int nd, npy_intp* dims, dtype descr) + object PyArray_ToScalar(void* data, ndarray arr) + + void* PyArray_GETPTR1(ndarray m, npy_intp i) nogil + void* PyArray_GETPTR2(ndarray m, npy_intp i, npy_intp j) nogil + void* PyArray_GETPTR3(ndarray m, npy_intp i, npy_intp j, npy_intp k) nogil + void* PyArray_GETPTR4(ndarray m, npy_intp i, npy_intp j, npy_intp k, npy_intp l) nogil + + # Cannot be supported due to out arg + # void PyArray_DESCR_REPLACE(descr) + + + object PyArray_Copy(ndarray) + object PyArray_FromObject(object op, int type, int min_depth, int max_depth) + object PyArray_ContiguousFromObject(object op, int type, int min_depth, int max_depth) + object PyArray_CopyFromObject(object op, int type, int min_depth, int max_depth) + + object PyArray_Cast(ndarray mp, int type_num) + object PyArray_Take(ndarray ap, object items, int axis) + object PyArray_Put(ndarray ap, object items, object values) + + void PyArray_ITER_RESET(flatiter it) nogil + void PyArray_ITER_NEXT(flatiter it) nogil + void PyArray_ITER_GOTO(flatiter it, npy_intp* destination) nogil + void PyArray_ITER_GOTO1D(flatiter it, npy_intp ind) nogil + void* PyArray_ITER_DATA(flatiter it) nogil + bint PyArray_ITER_NOTDONE(flatiter it) nogil + + void PyArray_MultiIter_RESET(broadcast multi) nogil + void PyArray_MultiIter_NEXT(broadcast multi) nogil + void PyArray_MultiIter_GOTO(broadcast multi, npy_intp dest) nogil + void PyArray_MultiIter_GOTO1D(broadcast multi, npy_intp ind) nogil + void* PyArray_MultiIter_DATA(broadcast multi, npy_intp i) nogil + void PyArray_MultiIter_NEXTi(broadcast multi, npy_intp i) nogil + bint PyArray_MultiIter_NOTDONE(broadcast multi) nogil + npy_intp PyArray_MultiIter_SIZE(broadcast multi) nogil + int PyArray_MultiIter_NDIM(broadcast multi) nogil + npy_intp PyArray_MultiIter_INDEX(broadcast multi) nogil + int PyArray_MultiIter_NUMITER(broadcast multi) nogil + npy_intp* PyArray_MultiIter_DIMS(broadcast multi) nogil + void** PyArray_MultiIter_ITERS(broadcast multi) nogil + + # Functions from __multiarray_api.h + + # Functions taking dtype and returning object/ndarray are disabled + # for now as they steal dtype references. I'm conservative and disable + # more than is probably needed until it can be checked further. + int PyArray_INCREF (ndarray) except * # uses PyArray_Item_INCREF... + int PyArray_XDECREF (ndarray) except * # uses PyArray_Item_DECREF... + dtype PyArray_DescrFromType (int) + object PyArray_TypeObjectFromType (int) + char * PyArray_Zero (ndarray) + char * PyArray_One (ndarray) + #object PyArray_CastToType (ndarray, dtype, int) + int PyArray_CanCastSafely (int, int) # writes errors + npy_bool PyArray_CanCastTo (dtype, dtype) # writes errors + int PyArray_ObjectType (object, int) except 0 + dtype PyArray_DescrFromObject (object, dtype) + #ndarray* PyArray_ConvertToCommonType (object, int *) + dtype PyArray_DescrFromScalar (object) + dtype PyArray_DescrFromTypeObject (object) + npy_intp PyArray_Size (object) + #object PyArray_Scalar (void *, dtype, object) + #object PyArray_FromScalar (object, dtype) + void PyArray_ScalarAsCtype (object, void *) + #int PyArray_CastScalarToCtype (object, void *, dtype) + #int PyArray_CastScalarDirect (object, dtype, void *, int) + #PyArray_VectorUnaryFunc * PyArray_GetCastFunc (dtype, int) + #object PyArray_FromAny (object, dtype, int, int, int, object) + object PyArray_EnsureArray (object) + object PyArray_EnsureAnyArray (object) + #object PyArray_FromFile (stdio.FILE *, dtype, npy_intp, char *) + #object PyArray_FromString (char *, npy_intp, dtype, npy_intp, char *) + #object PyArray_FromBuffer (object, dtype, npy_intp, npy_intp) + #object PyArray_FromIter (object, dtype, npy_intp) + object PyArray_Return (ndarray) + #object PyArray_GetField (ndarray, dtype, int) + #int PyArray_SetField (ndarray, dtype, int, object) except -1 + object PyArray_Byteswap (ndarray, npy_bool) + object PyArray_Resize (ndarray, PyArray_Dims *, int, NPY_ORDER) + int PyArray_CopyInto (ndarray, ndarray) except -1 + int PyArray_CopyAnyInto (ndarray, ndarray) except -1 + int PyArray_CopyObject (ndarray, object) except -1 + object PyArray_NewCopy (ndarray, NPY_ORDER) + object PyArray_ToList (ndarray) + object PyArray_ToString (ndarray, NPY_ORDER) + int PyArray_ToFile (ndarray, stdio.FILE *, char *, char *) except -1 + int PyArray_Dump (object, object, int) except -1 + object PyArray_Dumps (object, int) + int PyArray_ValidType (int) # Cannot error + void PyArray_UpdateFlags (ndarray, int) + object PyArray_New (type, int, npy_intp *, int, npy_intp *, void *, int, int, object) + #object PyArray_NewFromDescr (type, dtype, int, npy_intp *, npy_intp *, void *, int, object) + #dtype PyArray_DescrNew (dtype) + dtype PyArray_DescrNewFromType (int) + double PyArray_GetPriority (object, double) # clears errors as of 1.25 + object PyArray_IterNew (object) + object PyArray_MultiIterNew (int, ...) + + int PyArray_PyIntAsInt (object) except? -1 + npy_intp PyArray_PyIntAsIntp (object) + int PyArray_Broadcast (broadcast) except -1 + int PyArray_FillWithScalar (ndarray, object) except -1 + npy_bool PyArray_CheckStrides (int, int, npy_intp, npy_intp, npy_intp *, npy_intp *) + dtype PyArray_DescrNewByteorder (dtype, char) + object PyArray_IterAllButAxis (object, int *) + #object PyArray_CheckFromAny (object, dtype, int, int, int, object) + #object PyArray_FromArray (ndarray, dtype, int) + object PyArray_FromInterface (object) + object PyArray_FromStructInterface (object) + #object PyArray_FromArrayAttr (object, dtype, object) + #NPY_SCALARKIND PyArray_ScalarKind (int, ndarray*) + int PyArray_CanCoerceScalar (int, int, NPY_SCALARKIND) + npy_bool PyArray_CanCastScalar (type, type) + int PyArray_RemoveSmallest (broadcast) except -1 + int PyArray_ElementStrides (object) + void PyArray_Item_INCREF (char *, dtype) except * + void PyArray_Item_XDECREF (char *, dtype) except * + object PyArray_Transpose (ndarray, PyArray_Dims *) + object PyArray_TakeFrom (ndarray, object, int, ndarray, NPY_CLIPMODE) + object PyArray_PutTo (ndarray, object, object, NPY_CLIPMODE) + object PyArray_PutMask (ndarray, object, object) + object PyArray_Repeat (ndarray, object, int) + object PyArray_Choose (ndarray, object, ndarray, NPY_CLIPMODE) + int PyArray_Sort (ndarray, int, NPY_SORTKIND) except -1 + object PyArray_ArgSort (ndarray, int, NPY_SORTKIND) + object PyArray_SearchSorted (ndarray, object, NPY_SEARCHSIDE, PyObject *) + object PyArray_ArgMax (ndarray, int, ndarray) + object PyArray_ArgMin (ndarray, int, ndarray) + object PyArray_Reshape (ndarray, object) + object PyArray_Newshape (ndarray, PyArray_Dims *, NPY_ORDER) + object PyArray_Squeeze (ndarray) + #object PyArray_View (ndarray, dtype, type) + object PyArray_SwapAxes (ndarray, int, int) + object PyArray_Max (ndarray, int, ndarray) + object PyArray_Min (ndarray, int, ndarray) + object PyArray_Ptp (ndarray, int, ndarray) + object PyArray_Mean (ndarray, int, int, ndarray) + object PyArray_Trace (ndarray, int, int, int, int, ndarray) + object PyArray_Diagonal (ndarray, int, int, int) + object PyArray_Clip (ndarray, object, object, ndarray) + object PyArray_Conjugate (ndarray, ndarray) + object PyArray_Nonzero (ndarray) + object PyArray_Std (ndarray, int, int, ndarray, int) + object PyArray_Sum (ndarray, int, int, ndarray) + object PyArray_CumSum (ndarray, int, int, ndarray) + object PyArray_Prod (ndarray, int, int, ndarray) + object PyArray_CumProd (ndarray, int, int, ndarray) + object PyArray_All (ndarray, int, ndarray) + object PyArray_Any (ndarray, int, ndarray) + object PyArray_Compress (ndarray, object, int, ndarray) + object PyArray_Flatten (ndarray, NPY_ORDER) + object PyArray_Ravel (ndarray, NPY_ORDER) + npy_intp PyArray_MultiplyList (npy_intp *, int) + int PyArray_MultiplyIntList (int *, int) + void * PyArray_GetPtr (ndarray, npy_intp*) + int PyArray_CompareLists (npy_intp *, npy_intp *, int) + #int PyArray_AsCArray (object*, void *, npy_intp *, int, dtype) + int PyArray_Free (object, void *) + #int PyArray_Converter (object, object*) + int PyArray_IntpFromSequence (object, npy_intp *, int) except -1 + object PyArray_Concatenate (object, int) + object PyArray_InnerProduct (object, object) + object PyArray_MatrixProduct (object, object) + object PyArray_Correlate (object, object, int) + #int PyArray_DescrConverter (object, dtype*) except 0 + #int PyArray_DescrConverter2 (object, dtype*) except 0 + int PyArray_IntpConverter (object, PyArray_Dims *) except 0 + #int PyArray_BufferConverter (object, chunk) except 0 + int PyArray_AxisConverter (object, int *) except 0 + int PyArray_BoolConverter (object, npy_bool *) except 0 + int PyArray_ByteorderConverter (object, char *) except 0 + int PyArray_OrderConverter (object, NPY_ORDER *) except 0 + unsigned char PyArray_EquivTypes (dtype, dtype) # clears errors + #object PyArray_Zeros (int, npy_intp *, dtype, int) + #object PyArray_Empty (int, npy_intp *, dtype, int) + object PyArray_Where (object, object, object) + object PyArray_Arange (double, double, double, int) + #object PyArray_ArangeObj (object, object, object, dtype) + int PyArray_SortkindConverter (object, NPY_SORTKIND *) except 0 + object PyArray_LexSort (object, int) + object PyArray_Round (ndarray, int, ndarray) + unsigned char PyArray_EquivTypenums (int, int) + int PyArray_RegisterDataType (dtype) except -1 + int PyArray_RegisterCastFunc (dtype, int, PyArray_VectorUnaryFunc *) except -1 + int PyArray_RegisterCanCast (dtype, int, NPY_SCALARKIND) except -1 + #void PyArray_InitArrFuncs (PyArray_ArrFuncs *) + object PyArray_IntTupleFromIntp (int, npy_intp *) + int PyArray_ClipmodeConverter (object, NPY_CLIPMODE *) except 0 + #int PyArray_OutputConverter (object, ndarray*) except 0 + object PyArray_BroadcastToShape (object, npy_intp *, int) + #int PyArray_DescrAlignConverter (object, dtype*) except 0 + #int PyArray_DescrAlignConverter2 (object, dtype*) except 0 + int PyArray_SearchsideConverter (object, void *) except 0 + object PyArray_CheckAxis (ndarray, int *, int) + npy_intp PyArray_OverflowMultiplyList (npy_intp *, int) + int PyArray_SetBaseObject(ndarray, base) except -1 # NOTE: steals a reference to base! Use "set_array_base()" instead. + + # The memory handler functions require the NumPy 1.22 API + # and may require defining NPY_TARGET_VERSION + ctypedef struct PyDataMemAllocator: + void *ctx + void* (*malloc) (void *ctx, size_t size) + void* (*calloc) (void *ctx, size_t nelem, size_t elsize) + void* (*realloc) (void *ctx, void *ptr, size_t new_size) + void (*free) (void *ctx, void *ptr, size_t size) + + ctypedef struct PyDataMem_Handler: + char* name + npy_uint8 version + PyDataMemAllocator allocator + + object PyDataMem_SetHandler(object handler) + object PyDataMem_GetHandler() + + # additional datetime related functions are defined below + + +# Typedefs that matches the runtime dtype objects in +# the numpy module. + +# The ones that are commented out needs an IFDEF function +# in Cython to enable them only on the right systems. + +ctypedef npy_int8 int8_t +ctypedef npy_int16 int16_t +ctypedef npy_int32 int32_t +ctypedef npy_int64 int64_t + +ctypedef npy_uint8 uint8_t +ctypedef npy_uint16 uint16_t +ctypedef npy_uint32 uint32_t +ctypedef npy_uint64 uint64_t + +ctypedef npy_float32 float32_t +ctypedef npy_float64 float64_t +#ctypedef npy_float80 float80_t +#ctypedef npy_float128 float128_t + +ctypedef float complex complex64_t +ctypedef double complex complex128_t + +ctypedef npy_longlong longlong_t +ctypedef npy_ulonglong ulonglong_t + +ctypedef npy_intp intp_t +ctypedef npy_uintp uintp_t + +ctypedef npy_double float_t +ctypedef npy_double double_t +ctypedef npy_longdouble longdouble_t + +ctypedef float complex cfloat_t +ctypedef double complex cdouble_t +ctypedef double complex complex_t +ctypedef long double complex clongdouble_t + +cdef inline object PyArray_MultiIterNew1(a): + return PyArray_MultiIterNew(1, a) + +cdef inline object PyArray_MultiIterNew2(a, b): + return PyArray_MultiIterNew(2, a, b) + +cdef inline object PyArray_MultiIterNew3(a, b, c): + return PyArray_MultiIterNew(3, a, b, c) + +cdef inline object PyArray_MultiIterNew4(a, b, c, d): + return PyArray_MultiIterNew(4, a, b, c, d) + +cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): + return PyArray_MultiIterNew(5, a, b, c, d, e) + +cdef inline tuple PyDataType_SHAPE(dtype d): + if PyDataType_HASSUBARRAY(d): + return d.subarray.shape + else: + return () + + +cdef extern from "numpy/ndarrayobject.h": + PyTypeObject PyTimedeltaArrType_Type + PyTypeObject PyDatetimeArrType_Type + ctypedef int64_t npy_timedelta + ctypedef int64_t npy_datetime + +cdef extern from "numpy/ndarraytypes.h": + ctypedef struct PyArray_DatetimeMetaData: + NPY_DATETIMEUNIT base + int64_t num + + ctypedef struct npy_datetimestruct: + int64_t year + int32_t month, day, hour, min, sec, us, ps, as + + # Iterator API added in v1.6 + # + # These don't match the definition in the C API because Cython can't wrap + # function pointers that return functions. + # https://github.com/cython/cython/issues/6720 + ctypedef int (*NpyIter_IterNextFunc "NpyIter_IterNextFunc *")(NpyIter* it) noexcept nogil + ctypedef void (*NpyIter_GetMultiIndexFunc "NpyIter_GetMultiIndexFunc *")(NpyIter* it, npy_intp* outcoords) noexcept nogil + +cdef extern from "numpy/arrayscalars.h": + + # abstract types + ctypedef class numpy.generic [object PyObject]: + pass + ctypedef class numpy.number [object PyObject]: + pass + ctypedef class numpy.integer [object PyObject]: + pass + ctypedef class numpy.signedinteger [object PyObject]: + pass + ctypedef class numpy.unsignedinteger [object PyObject]: + pass + ctypedef class numpy.inexact [object PyObject]: + pass + ctypedef class numpy.floating [object PyObject]: + pass + ctypedef class numpy.complexfloating [object PyObject]: + pass + ctypedef class numpy.flexible [object PyObject]: + pass + ctypedef class numpy.character [object PyObject]: + pass + + ctypedef struct PyDatetimeScalarObject: + # PyObject_HEAD + npy_datetime obval + PyArray_DatetimeMetaData obmeta + + ctypedef struct PyTimedeltaScalarObject: + # PyObject_HEAD + npy_timedelta obval + PyArray_DatetimeMetaData obmeta + + ctypedef enum NPY_DATETIMEUNIT: + NPY_FR_Y + NPY_FR_M + NPY_FR_W + NPY_FR_D + NPY_FR_B + NPY_FR_h + NPY_FR_m + NPY_FR_s + NPY_FR_ms + NPY_FR_us + NPY_FR_ns + NPY_FR_ps + NPY_FR_fs + NPY_FR_as + NPY_FR_GENERIC + + +cdef extern from "numpy/arrayobject.h": + # These are part of the C-API defined in `__multiarray_api.h` + + # NumPy internal definitions in datetime_strings.c: + int get_datetime_iso_8601_strlen "NpyDatetime_GetDatetimeISO8601StrLen" ( + int local, NPY_DATETIMEUNIT base) + int make_iso_8601_datetime "NpyDatetime_MakeISO8601Datetime" ( + npy_datetimestruct *dts, char *outstr, npy_intp outlen, + int local, int utc, NPY_DATETIMEUNIT base, int tzoffset, + NPY_CASTING casting) except -1 + + # NumPy internal definition in datetime.c: + # May return 1 to indicate that object does not appear to be a datetime + # (returns 0 on success). + int convert_pydatetime_to_datetimestruct "NpyDatetime_ConvertPyDateTimeToDatetimeStruct" ( + PyObject *obj, npy_datetimestruct *out, + NPY_DATETIMEUNIT *out_bestunit, int apply_tzinfo) except -1 + int convert_datetime64_to_datetimestruct "NpyDatetime_ConvertDatetime64ToDatetimeStruct" ( + PyArray_DatetimeMetaData *meta, npy_datetime dt, + npy_datetimestruct *out) except -1 + int convert_datetimestruct_to_datetime64 "NpyDatetime_ConvertDatetimeStructToDatetime64"( + PyArray_DatetimeMetaData *meta, const npy_datetimestruct *dts, + npy_datetime *out) except -1 + + +# +# ufunc API +# + +cdef extern from "numpy/ufuncobject.h": + + ctypedef void (*PyUFuncGenericFunction) (char **, npy_intp *, npy_intp *, void *) + + ctypedef class numpy.ufunc [object PyUFuncObject, check_size ignore]: + cdef: + int nin, nout, nargs + int identity + PyUFuncGenericFunction *functions + void **data + int ntypes + int check_return + char *name + char *types + char *doc + void *ptr + PyObject *obj + PyObject *userloops + + cdef enum: + PyUFunc_Zero + PyUFunc_One + PyUFunc_None + # deprecated + UFUNC_FPE_DIVIDEBYZERO + UFUNC_FPE_OVERFLOW + UFUNC_FPE_UNDERFLOW + UFUNC_FPE_INVALID + # use these instead + NPY_FPE_DIVIDEBYZERO + NPY_FPE_OVERFLOW + NPY_FPE_UNDERFLOW + NPY_FPE_INVALID + + object PyUFunc_FromFuncAndData(PyUFuncGenericFunction *, + void **, char *, int, int, int, int, char *, char *, int) + int PyUFunc_RegisterLoopForType(ufunc, int, + PyUFuncGenericFunction, int *, void *) except -1 + void PyUFunc_f_f_As_d_d \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_d_d \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_f_f \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_g_g \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_F_F_As_D_D \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_F_F \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_D_D \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_G_G \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_O_O \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_ff_f_As_dd_d \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_ff_f \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_dd_d \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_gg_g \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_FF_F_As_DD_D \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_DD_D \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_FF_F \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_GG_G \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_OO_O \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_O_O_method \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_OO_O_method \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_On_Om \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_clearfperr() + int PyUFunc_getfperr() + int PyUFunc_ReplaceLoopBySignature \ + (ufunc, PyUFuncGenericFunction, int *, PyUFuncGenericFunction *) + object PyUFunc_FromFuncAndDataAndSignature \ + (PyUFuncGenericFunction *, void **, char *, int, int, int, + int, char *, char *, int, char *) + + int _import_umath() except -1 + +cdef inline void set_array_base(ndarray arr, object base): + Py_INCREF(base) # important to do this before stealing the reference below! + PyArray_SetBaseObject(arr, base) + +cdef inline object get_array_base(ndarray arr): + base = PyArray_BASE(arr) + if base is NULL: + return None + return base + +# Versions of the import_* functions which are more suitable for +# Cython code. +cdef inline int import_array() except -1: + try: + __pyx_import_array() + except Exception: + raise ImportError("numpy._core.multiarray failed to import") + +cdef inline int import_umath() except -1: + try: + _import_umath() + except Exception: + raise ImportError("numpy._core.umath failed to import") + +cdef inline int import_ufunc() except -1: + try: + _import_umath() + except Exception: + raise ImportError("numpy._core.umath failed to import") + + +cdef inline bint is_timedelta64_object(object obj): + """ + Cython equivalent of `isinstance(obj, np.timedelta64)` + + Parameters + ---------- + obj : object + + Returns + ------- + bool + """ + return PyObject_TypeCheck(obj, &PyTimedeltaArrType_Type) + + +cdef inline bint is_datetime64_object(object obj): + """ + Cython equivalent of `isinstance(obj, np.datetime64)` + + Parameters + ---------- + obj : object + + Returns + ------- + bool + """ + return PyObject_TypeCheck(obj, &PyDatetimeArrType_Type) + + +cdef inline npy_datetime get_datetime64_value(object obj) nogil: + """ + returns the int64 value underlying scalar numpy datetime64 object + + Note that to interpret this as a datetime, the corresponding unit is + also needed. That can be found using `get_datetime64_unit`. + """ + return (obj).obval + + +cdef inline npy_timedelta get_timedelta64_value(object obj) nogil: + """ + returns the int64 value underlying scalar numpy timedelta64 object + """ + return (obj).obval + + +cdef inline NPY_DATETIMEUNIT get_datetime64_unit(object obj) nogil: + """ + returns the unit part of the dtype for a numpy datetime64 object. + """ + return (obj).obmeta.base + + +cdef extern from "numpy/arrayobject.h": + + ctypedef struct NpyIter: + pass + + cdef enum: + NPY_FAIL + NPY_SUCCEED + + cdef enum: + # Track an index representing C order + NPY_ITER_C_INDEX + # Track an index representing Fortran order + NPY_ITER_F_INDEX + # Track a multi-index + NPY_ITER_MULTI_INDEX + # User code external to the iterator does the 1-dimensional innermost loop + NPY_ITER_EXTERNAL_LOOP + # Convert all the operands to a common data type + NPY_ITER_COMMON_DTYPE + # Operands may hold references, requiring API access during iteration + NPY_ITER_REFS_OK + # Zero-sized operands should be permitted, iteration checks IterSize for 0 + NPY_ITER_ZEROSIZE_OK + # Permits reductions (size-0 stride with dimension size > 1) + NPY_ITER_REDUCE_OK + # Enables sub-range iteration + NPY_ITER_RANGED + # Enables buffering + NPY_ITER_BUFFERED + # When buffering is enabled, grows the inner loop if possible + NPY_ITER_GROWINNER + # Delay allocation of buffers until first Reset* call + NPY_ITER_DELAY_BUFALLOC + # When NPY_KEEPORDER is specified, disable reversing negative-stride axes + NPY_ITER_DONT_NEGATE_STRIDES + NPY_ITER_COPY_IF_OVERLAP + # The operand will be read from and written to + NPY_ITER_READWRITE + # The operand will only be read from + NPY_ITER_READONLY + # The operand will only be written to + NPY_ITER_WRITEONLY + # The operand's data must be in native byte order + NPY_ITER_NBO + # The operand's data must be aligned + NPY_ITER_ALIGNED + # The operand's data must be contiguous (within the inner loop) + NPY_ITER_CONTIG + # The operand may be copied to satisfy requirements + NPY_ITER_COPY + # The operand may be copied with WRITEBACKIFCOPY to satisfy requirements + NPY_ITER_UPDATEIFCOPY + # Allocate the operand if it is NULL + NPY_ITER_ALLOCATE + # If an operand is allocated, don't use any subtype + NPY_ITER_NO_SUBTYPE + # This is a virtual array slot, operand is NULL but temporary data is there + NPY_ITER_VIRTUAL + # Require that the dimension match the iterator dimensions exactly + NPY_ITER_NO_BROADCAST + # A mask is being used on this array, affects buffer -> array copy + NPY_ITER_WRITEMASKED + # This array is the mask for all WRITEMASKED operands + NPY_ITER_ARRAYMASK + # Assume iterator order data access for COPY_IF_OVERLAP + NPY_ITER_OVERLAP_ASSUME_ELEMENTWISE + + # construction and destruction functions + NpyIter* NpyIter_New(ndarray arr, npy_uint32 flags, NPY_ORDER order, + NPY_CASTING casting, dtype datatype) except NULL + NpyIter* NpyIter_MultiNew(npy_intp nop, PyArrayObject** op, npy_uint32 flags, + NPY_ORDER order, NPY_CASTING casting, npy_uint32* + op_flags, PyArray_Descr** op_dtypes) except NULL + NpyIter* NpyIter_AdvancedNew(npy_intp nop, PyArrayObject** op, + npy_uint32 flags, NPY_ORDER order, + NPY_CASTING casting, npy_uint32* op_flags, + PyArray_Descr** op_dtypes, int oa_ndim, + int** op_axes, const npy_intp* itershape, + npy_intp buffersize) except NULL + NpyIter* NpyIter_Copy(NpyIter* it) except NULL + int NpyIter_RemoveAxis(NpyIter* it, int axis) except NPY_FAIL + int NpyIter_RemoveMultiIndex(NpyIter* it) except NPY_FAIL + int NpyIter_EnableExternalLoop(NpyIter* it) except NPY_FAIL + int NpyIter_Deallocate(NpyIter* it) except NPY_FAIL + int NpyIter_Reset(NpyIter* it, char** errmsg) except NPY_FAIL + int NpyIter_ResetToIterIndexRange(NpyIter* it, npy_intp istart, + npy_intp iend, char** errmsg) except NPY_FAIL + int NpyIter_ResetBasePointers(NpyIter* it, char** baseptrs, char** errmsg) except NPY_FAIL + int NpyIter_GotoMultiIndex(NpyIter* it, const npy_intp* multi_index) except NPY_FAIL + int NpyIter_GotoIndex(NpyIter* it, npy_intp index) except NPY_FAIL + npy_intp NpyIter_GetIterSize(NpyIter* it) nogil + npy_intp NpyIter_GetIterIndex(NpyIter* it) nogil + void NpyIter_GetIterIndexRange(NpyIter* it, npy_intp* istart, + npy_intp* iend) nogil + int NpyIter_GotoIterIndex(NpyIter* it, npy_intp iterindex) except NPY_FAIL + npy_bool NpyIter_HasDelayedBufAlloc(NpyIter* it) nogil + npy_bool NpyIter_HasExternalLoop(NpyIter* it) nogil + npy_bool NpyIter_HasMultiIndex(NpyIter* it) nogil + npy_bool NpyIter_HasIndex(NpyIter* it) nogil + npy_bool NpyIter_RequiresBuffering(NpyIter* it) nogil + npy_bool NpyIter_IsBuffered(NpyIter* it) nogil + npy_bool NpyIter_IsGrowInner(NpyIter* it) nogil + npy_intp NpyIter_GetBufferSize(NpyIter* it) nogil + int NpyIter_GetNDim(NpyIter* it) nogil + int NpyIter_GetNOp(NpyIter* it) nogil + npy_intp* NpyIter_GetAxisStrideArray(NpyIter* it, int axis) except NULL + int NpyIter_GetShape(NpyIter* it, npy_intp* outshape) nogil + PyArray_Descr** NpyIter_GetDescrArray(NpyIter* it) + PyArrayObject** NpyIter_GetOperandArray(NpyIter* it) + ndarray NpyIter_GetIterView(NpyIter* it, npy_intp i) + void NpyIter_GetReadFlags(NpyIter* it, char* outreadflags) + void NpyIter_GetWriteFlags(NpyIter* it, char* outwriteflags) + int NpyIter_CreateCompatibleStrides(NpyIter* it, npy_intp itemsize, + npy_intp* outstrides) except NPY_FAIL + npy_bool NpyIter_IsFirstVisit(NpyIter* it, int iop) nogil + # functions for iterating an NpyIter object + # + # These don't match the definition in the C API because Cython can't wrap + # function pointers that return functions. + NpyIter_IterNextFunc* NpyIter_GetIterNext(NpyIter* it, char** errmsg) except NULL + NpyIter_GetMultiIndexFunc* NpyIter_GetGetMultiIndex(NpyIter* it, + char** errmsg) except NULL + char** NpyIter_GetDataPtrArray(NpyIter* it) nogil + char** NpyIter_GetInitialDataPtrArray(NpyIter* it) nogil + npy_intp* NpyIter_GetIndexPtr(NpyIter* it) + npy_intp* NpyIter_GetInnerStrideArray(NpyIter* it) nogil + npy_intp* NpyIter_GetInnerLoopSizePtr(NpyIter* it) nogil + void NpyIter_GetInnerFixedStrideArray(NpyIter* it, npy_intp* outstrides) nogil + npy_bool NpyIter_IterationNeedsAPI(NpyIter* it) nogil + void NpyIter_DebugPrint(NpyIter* it) + +# NpyString API +cdef extern from "numpy/ndarraytypes.h": + ctypedef struct npy_string_allocator: + pass + + ctypedef struct npy_packed_static_string: + pass + + ctypedef struct npy_static_string: + size_t size + const char *buf + + ctypedef struct PyArray_StringDTypeObject: + PyArray_Descr base + PyObject *na_object + char coerce + char has_nan_na + char has_string_na + char array_owned + npy_static_string default_string + npy_static_string na_name + npy_string_allocator *allocator + +cdef extern from "numpy/arrayobject.h": + npy_string_allocator *NpyString_acquire_allocator(const PyArray_StringDTypeObject *descr) + void NpyString_acquire_allocators(size_t n_descriptors, PyArray_Descr *const descrs[], npy_string_allocator *allocators[]) + void NpyString_release_allocator(npy_string_allocator *allocator) + void NpyString_release_allocators(size_t length, npy_string_allocator *allocators[]) + int NpyString_load(npy_string_allocator *allocator, const npy_packed_static_string *packed_string, npy_static_string *unpacked_string) + int NpyString_pack_null(npy_string_allocator *allocator, npy_packed_static_string *packed_string) + int NpyString_pack(npy_string_allocator *allocator, npy_packed_static_string *packed_string, const char *buf, size_t size) diff --git a/.venv/lib/python3.12/site-packages/numpy/__init__.py b/.venv/lib/python3.12/site-packages/numpy/__init__.py new file mode 100644 index 0000000..8fb2e74 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/__init__.py @@ -0,0 +1,928 @@ +""" +NumPy +===== + +Provides + 1. An array object of arbitrary homogeneous items + 2. Fast mathematical operations over arrays + 3. Linear Algebra, Fourier Transforms, Random Number Generation + +How to use the documentation +---------------------------- +Documentation is available in two forms: docstrings provided +with the code, and a loose standing reference guide, available from +`the NumPy homepage `_. + +We recommend exploring the docstrings using +`IPython `_, an advanced Python shell with +TAB-completion and introspection capabilities. See below for further +instructions. + +The docstring examples assume that `numpy` has been imported as ``np``:: + + >>> import numpy as np + +Code snippets are indicated by three greater-than signs:: + + >>> x = 42 + >>> x = x + 1 + +Use the built-in ``help`` function to view a function's docstring:: + + >>> help(np.sort) + ... # doctest: +SKIP + +For some objects, ``np.info(obj)`` may provide additional help. This is +particularly true if you see the line "Help on ufunc object:" at the top +of the help() page. Ufuncs are implemented in C, not Python, for speed. +The native Python help() does not know how to view their help, but our +np.info() function does. + +Available subpackages +--------------------- +lib + Basic functions used by several sub-packages. +random + Core Random Tools +linalg + Core Linear Algebra Tools +fft + Core FFT routines +polynomial + Polynomial tools +testing + NumPy testing tools +distutils + Enhancements to distutils with support for + Fortran compilers support and more (for Python <= 3.11) + +Utilities +--------- +test + Run numpy unittests +show_config + Show numpy build configuration +__version__ + NumPy version string + +Viewing documentation using IPython +----------------------------------- + +Start IPython and import `numpy` usually under the alias ``np``: `import +numpy as np`. Then, directly past or use the ``%cpaste`` magic to paste +examples into the shell. To see which functions are available in `numpy`, +type ``np.`` (where ```` refers to the TAB key), or use +``np.*cos*?`` (where ```` refers to the ENTER key) to narrow +down the list. To view the docstring for a function, use +``np.cos?`` (to view the docstring) and ``np.cos??`` (to view +the source code). + +Copies vs. in-place operation +----------------------------- +Most of the functions in `numpy` return a copy of the array argument +(e.g., `np.sort`). In-place versions of these functions are often +available as array methods, i.e. ``x = np.array([1,2,3]); x.sort()``. +Exceptions to this rule are documented. + +""" +import os +import sys +import warnings + +# If a version with git hash was stored, use that instead +from . import version +from ._expired_attrs_2_0 import __expired_attributes__ +from ._globals import _CopyMode, _NoValue +from .version import __version__ + +# We first need to detect if we're being called as part of the numpy setup +# procedure itself in a reliable manner. +try: + __NUMPY_SETUP__ # noqa: B018 +except NameError: + __NUMPY_SETUP__ = False + +if __NUMPY_SETUP__: + sys.stderr.write('Running from numpy source directory.\n') +else: + # Allow distributors to run custom init code before importing numpy._core + from . import _distributor_init + + try: + from numpy.__config__ import show_config + except ImportError as e: + msg = """Error importing numpy: you should not try to import numpy from + its source directory; please exit the numpy source tree, and relaunch + your python interpreter from there.""" + raise ImportError(msg) from e + + from . import _core + from ._core import ( + False_, + ScalarType, + True_, + abs, + absolute, + acos, + acosh, + add, + all, + allclose, + amax, + amin, + any, + arange, + arccos, + arccosh, + arcsin, + arcsinh, + arctan, + arctan2, + arctanh, + argmax, + argmin, + argpartition, + argsort, + argwhere, + around, + array, + array2string, + array_equal, + array_equiv, + array_repr, + array_str, + asanyarray, + asarray, + ascontiguousarray, + asfortranarray, + asin, + asinh, + astype, + atan, + atan2, + atanh, + atleast_1d, + atleast_2d, + atleast_3d, + base_repr, + binary_repr, + bitwise_and, + bitwise_count, + bitwise_invert, + bitwise_left_shift, + bitwise_not, + bitwise_or, + bitwise_right_shift, + bitwise_xor, + block, + bool, + bool_, + broadcast, + busday_count, + busday_offset, + busdaycalendar, + byte, + bytes_, + can_cast, + cbrt, + cdouble, + ceil, + character, + choose, + clip, + clongdouble, + complex64, + complex128, + complexfloating, + compress, + concat, + concatenate, + conj, + conjugate, + convolve, + copysign, + copyto, + correlate, + cos, + cosh, + count_nonzero, + cross, + csingle, + cumprod, + cumsum, + cumulative_prod, + cumulative_sum, + datetime64, + datetime_as_string, + datetime_data, + deg2rad, + degrees, + diagonal, + divide, + divmod, + dot, + double, + dtype, + e, + einsum, + einsum_path, + empty, + empty_like, + equal, + errstate, + euler_gamma, + exp, + exp2, + expm1, + fabs, + finfo, + flatiter, + flatnonzero, + flexible, + float16, + float32, + float64, + float_power, + floating, + floor, + floor_divide, + fmax, + fmin, + fmod, + format_float_positional, + format_float_scientific, + frexp, + from_dlpack, + frombuffer, + fromfile, + fromfunction, + fromiter, + frompyfunc, + fromstring, + full, + full_like, + gcd, + generic, + geomspace, + get_printoptions, + getbufsize, + geterr, + geterrcall, + greater, + greater_equal, + half, + heaviside, + hstack, + hypot, + identity, + iinfo, + indices, + inexact, + inf, + inner, + int8, + int16, + int32, + int64, + int_, + intc, + integer, + intp, + invert, + is_busday, + isclose, + isdtype, + isfinite, + isfortran, + isinf, + isnan, + isnat, + isscalar, + issubdtype, + lcm, + ldexp, + left_shift, + less, + less_equal, + lexsort, + linspace, + little_endian, + log, + log1p, + log2, + log10, + logaddexp, + logaddexp2, + logical_and, + logical_not, + logical_or, + logical_xor, + logspace, + long, + longdouble, + longlong, + matmul, + matrix_transpose, + matvec, + max, + maximum, + may_share_memory, + mean, + memmap, + min, + min_scalar_type, + minimum, + mod, + modf, + moveaxis, + multiply, + nan, + ndarray, + ndim, + nditer, + negative, + nested_iters, + newaxis, + nextafter, + nonzero, + not_equal, + number, + object_, + ones, + ones_like, + outer, + partition, + permute_dims, + pi, + positive, + pow, + power, + printoptions, + prod, + promote_types, + ptp, + put, + putmask, + rad2deg, + radians, + ravel, + recarray, + reciprocal, + record, + remainder, + repeat, + require, + reshape, + resize, + result_type, + right_shift, + rint, + roll, + rollaxis, + round, + sctypeDict, + searchsorted, + set_printoptions, + setbufsize, + seterr, + seterrcall, + shape, + shares_memory, + short, + sign, + signbit, + signedinteger, + sin, + single, + sinh, + size, + sort, + spacing, + sqrt, + square, + squeeze, + stack, + std, + str_, + subtract, + sum, + swapaxes, + take, + tan, + tanh, + tensordot, + timedelta64, + trace, + transpose, + true_divide, + trunc, + typecodes, + ubyte, + ufunc, + uint, + uint8, + uint16, + uint32, + uint64, + uintc, + uintp, + ulong, + ulonglong, + unsignedinteger, + unstack, + ushort, + var, + vdot, + vecdot, + vecmat, + void, + vstack, + where, + zeros, + zeros_like, + ) + + # NOTE: It's still under discussion whether these aliases + # should be removed. + for ta in ["float96", "float128", "complex192", "complex256"]: + try: + globals()[ta] = getattr(_core, ta) + except AttributeError: + pass + del ta + + from . import lib + from . import matrixlib as _mat + from .lib import scimath as emath + from .lib._arraypad_impl import pad + from .lib._arraysetops_impl import ( + ediff1d, + in1d, + intersect1d, + isin, + setdiff1d, + setxor1d, + union1d, + unique, + unique_all, + unique_counts, + unique_inverse, + unique_values, + ) + from .lib._function_base_impl import ( + angle, + append, + asarray_chkfinite, + average, + bartlett, + bincount, + blackman, + copy, + corrcoef, + cov, + delete, + diff, + digitize, + extract, + flip, + gradient, + hamming, + hanning, + i0, + insert, + interp, + iterable, + kaiser, + median, + meshgrid, + percentile, + piecewise, + place, + quantile, + rot90, + select, + sinc, + sort_complex, + trapezoid, + trapz, + trim_zeros, + unwrap, + vectorize, + ) + from .lib._histograms_impl import histogram, histogram_bin_edges, histogramdd + from .lib._index_tricks_impl import ( + c_, + diag_indices, + diag_indices_from, + fill_diagonal, + index_exp, + ix_, + mgrid, + ndenumerate, + ndindex, + ogrid, + r_, + ravel_multi_index, + s_, + unravel_index, + ) + from .lib._nanfunctions_impl import ( + nanargmax, + nanargmin, + nancumprod, + nancumsum, + nanmax, + nanmean, + nanmedian, + nanmin, + nanpercentile, + nanprod, + nanquantile, + nanstd, + nansum, + nanvar, + ) + from .lib._npyio_impl import ( + fromregex, + genfromtxt, + load, + loadtxt, + packbits, + save, + savetxt, + savez, + savez_compressed, + unpackbits, + ) + from .lib._polynomial_impl import ( + poly, + poly1d, + polyadd, + polyder, + polydiv, + polyfit, + polyint, + polymul, + polysub, + polyval, + roots, + ) + from .lib._shape_base_impl import ( + apply_along_axis, + apply_over_axes, + array_split, + column_stack, + dsplit, + dstack, + expand_dims, + hsplit, + kron, + put_along_axis, + row_stack, + split, + take_along_axis, + tile, + vsplit, + ) + from .lib._stride_tricks_impl import ( + broadcast_arrays, + broadcast_shapes, + broadcast_to, + ) + from .lib._twodim_base_impl import ( + diag, + diagflat, + eye, + fliplr, + flipud, + histogram2d, + mask_indices, + tri, + tril, + tril_indices, + tril_indices_from, + triu, + triu_indices, + triu_indices_from, + vander, + ) + from .lib._type_check_impl import ( + common_type, + imag, + iscomplex, + iscomplexobj, + isreal, + isrealobj, + mintypecode, + nan_to_num, + real, + real_if_close, + typename, + ) + from .lib._ufunclike_impl import fix, isneginf, isposinf + from .lib._utils_impl import get_include, info, show_runtime + from .matrixlib import asmatrix, bmat, matrix + + # public submodules are imported lazily, therefore are accessible from + # __getattr__. Note that `distutils` (deprecated) and `array_api` + # (experimental label) are not added here, because `from numpy import *` + # must not raise any warnings - that's too disruptive. + __numpy_submodules__ = { + "linalg", "fft", "dtypes", "random", "polynomial", "ma", + "exceptions", "lib", "ctypeslib", "testing", "typing", + "f2py", "test", "rec", "char", "core", "strings", + } + + # We build warning messages for former attributes + _msg = ( + "module 'numpy' has no attribute '{n}'.\n" + "`np.{n}` was a deprecated alias for the builtin `{n}`. " + "To avoid this error in existing code, use `{n}` by itself. " + "Doing this will not modify any behavior and is safe. {extended_msg}\n" + "The aliases was originally deprecated in NumPy 1.20; for more " + "details and guidance see the original release note at:\n" + " https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations") + + _specific_msg = ( + "If you specifically wanted the numpy scalar type, use `np.{}` here.") + + _int_extended_msg = ( + "When replacing `np.{}`, you may wish to use e.g. `np.int64` " + "or `np.int32` to specify the precision. If you wish to review " + "your current use, check the release note link for " + "additional information.") + + _type_info = [ + ("object", ""), # The NumPy scalar only exists by name. + ("float", _specific_msg.format("float64")), + ("complex", _specific_msg.format("complex128")), + ("str", _specific_msg.format("str_")), + ("int", _int_extended_msg.format("int"))] + + __former_attrs__ = { + n: _msg.format(n=n, extended_msg=extended_msg) + for n, extended_msg in _type_info + } + + # Some of these could be defined right away, but most were aliases to + # the Python objects and only removed in NumPy 1.24. Defining them should + # probably wait for NumPy 1.26 or 2.0. + # When defined, these should possibly not be added to `__all__` to avoid + # import with `from numpy import *`. + __future_scalars__ = {"str", "bytes", "object"} + + __array_api_version__ = "2024.12" + + from ._array_api_info import __array_namespace_info__ + + # now that numpy core module is imported, can initialize limits + _core.getlimits._register_known_types() + + __all__ = list( + __numpy_submodules__ | + set(_core.__all__) | + set(_mat.__all__) | + set(lib._histograms_impl.__all__) | + set(lib._nanfunctions_impl.__all__) | + set(lib._function_base_impl.__all__) | + set(lib._twodim_base_impl.__all__) | + set(lib._shape_base_impl.__all__) | + set(lib._type_check_impl.__all__) | + set(lib._arraysetops_impl.__all__) | + set(lib._ufunclike_impl.__all__) | + set(lib._arraypad_impl.__all__) | + set(lib._utils_impl.__all__) | + set(lib._stride_tricks_impl.__all__) | + set(lib._polynomial_impl.__all__) | + set(lib._npyio_impl.__all__) | + set(lib._index_tricks_impl.__all__) | + {"emath", "show_config", "__version__", "__array_namespace_info__"} + ) + + # Filter out Cython harmless warnings + warnings.filterwarnings("ignore", message="numpy.dtype size changed") + warnings.filterwarnings("ignore", message="numpy.ufunc size changed") + warnings.filterwarnings("ignore", message="numpy.ndarray size changed") + + def __getattr__(attr): + # Warn for expired attributes + import warnings + + if attr == "linalg": + import numpy.linalg as linalg + return linalg + elif attr == "fft": + import numpy.fft as fft + return fft + elif attr == "dtypes": + import numpy.dtypes as dtypes + return dtypes + elif attr == "random": + import numpy.random as random + return random + elif attr == "polynomial": + import numpy.polynomial as polynomial + return polynomial + elif attr == "ma": + import numpy.ma as ma + return ma + elif attr == "ctypeslib": + import numpy.ctypeslib as ctypeslib + return ctypeslib + elif attr == "exceptions": + import numpy.exceptions as exceptions + return exceptions + elif attr == "testing": + import numpy.testing as testing + return testing + elif attr == "matlib": + import numpy.matlib as matlib + return matlib + elif attr == "f2py": + import numpy.f2py as f2py + return f2py + elif attr == "typing": + import numpy.typing as typing + return typing + elif attr == "rec": + import numpy.rec as rec + return rec + elif attr == "char": + import numpy.char as char + return char + elif attr == "array_api": + raise AttributeError("`numpy.array_api` is not available from " + "numpy 2.0 onwards", name=None) + elif attr == "core": + import numpy.core as core + return core + elif attr == "strings": + import numpy.strings as strings + return strings + elif attr == "distutils": + if 'distutils' in __numpy_submodules__: + import numpy.distutils as distutils + return distutils + else: + raise AttributeError("`numpy.distutils` is not available from " + "Python 3.12 onwards", name=None) + + if attr in __future_scalars__: + # And future warnings for those that will change, but also give + # the AttributeError + warnings.warn( + f"In the future `np.{attr}` will be defined as the " + "corresponding NumPy scalar.", FutureWarning, stacklevel=2) + + if attr in __former_attrs__: + raise AttributeError(__former_attrs__[attr], name=None) + + if attr in __expired_attributes__: + raise AttributeError( + f"`np.{attr}` was removed in the NumPy 2.0 release. " + f"{__expired_attributes__[attr]}", + name=None + ) + + if attr == "chararray": + warnings.warn( + "`np.chararray` is deprecated and will be removed from " + "the main namespace in the future. Use an array with a string " + "or bytes dtype instead.", DeprecationWarning, stacklevel=2) + import numpy.char as char + return char.chararray + + raise AttributeError(f"module {__name__!r} has no attribute {attr!r}") + + def __dir__(): + public_symbols = ( + globals().keys() | __numpy_submodules__ + ) + public_symbols -= { + "matrixlib", "matlib", "tests", "conftest", "version", + "distutils", "array_api" + } + return list(public_symbols) + + # Pytest testing + from numpy._pytesttester import PytestTester + test = PytestTester(__name__) + del PytestTester + + def _sanity_check(): + """ + Quick sanity checks for common bugs caused by environment. + There are some cases e.g. with wrong BLAS ABI that cause wrong + results under specific runtime conditions that are not necessarily + achieved during test suite runs, and it is useful to catch those early. + + See https://github.com/numpy/numpy/issues/8577 and other + similar bug reports. + + """ + try: + x = ones(2, dtype=float32) + if not abs(x.dot(x) - float32(2.0)) < 1e-5: + raise AssertionError + except AssertionError: + msg = ("The current Numpy installation ({!r}) fails to " + "pass simple sanity checks. This can be caused for example " + "by incorrect BLAS library being linked in, or by mixing " + "package managers (pip, conda, apt, ...). Search closed " + "numpy issues for similar problems.") + raise RuntimeError(msg.format(__file__)) from None + + _sanity_check() + del _sanity_check + + def _mac_os_check(): + """ + Quick Sanity check for Mac OS look for accelerate build bugs. + Testing numpy polyfit calls init_dgelsd(LAPACK) + """ + try: + c = array([3., 2., 1.]) + x = linspace(0, 2, 5) + y = polyval(c, x) + _ = polyfit(x, y, 2, cov=True) + except ValueError: + pass + + if sys.platform == "darwin": + from . import exceptions + with warnings.catch_warnings(record=True) as w: + _mac_os_check() + # Throw runtime error, if the test failed + # Check for warning and report the error_message + if len(w) > 0: + for _wn in w: + if _wn.category is exceptions.RankWarning: + # Ignore other warnings, they may not be relevant (see gh-25433) + error_message = ( + f"{_wn.category.__name__}: {_wn.message}" + ) + msg = ( + "Polyfit sanity test emitted a warning, most likely due " + "to using a buggy Accelerate backend." + "\nIf you compiled yourself, more information is available at:" # noqa: E501 + "\nhttps://numpy.org/devdocs/building/index.html" + "\nOtherwise report this to the vendor " + f"that provided NumPy.\n\n{error_message}\n") + raise RuntimeError(msg) + del _wn + del w + del _mac_os_check + + def hugepage_setup(): + """ + We usually use madvise hugepages support, but on some old kernels it + is slow and thus better avoided. Specifically kernel version 4.6 + had a bug fix which probably fixed this: + https://github.com/torvalds/linux/commit/7cf91a98e607c2f935dbcc177d70011e95b8faff + """ + use_hugepage = os.environ.get("NUMPY_MADVISE_HUGEPAGE", None) + if sys.platform == "linux" and use_hugepage is None: + # If there is an issue with parsing the kernel version, + # set use_hugepage to 0. Usage of LooseVersion will handle + # the kernel version parsing better, but avoided since it + # will increase the import time. + # See: #16679 for related discussion. + try: + use_hugepage = 1 + kernel_version = os.uname().release.split(".")[:2] + kernel_version = tuple(int(v) for v in kernel_version) + if kernel_version < (4, 6): + use_hugepage = 0 + except ValueError: + use_hugepage = 0 + elif use_hugepage is None: + # This is not Linux, so it should not matter, just enable anyway + use_hugepage = 1 + else: + use_hugepage = int(use_hugepage) + return use_hugepage + + # Note that this will currently only make a difference on Linux + _core.multiarray._set_madvise_hugepage(hugepage_setup()) + del hugepage_setup + + # Give a warning if NumPy is reloaded or imported on a sub-interpreter + # We do this from python, since the C-module may not be reloaded and + # it is tidier organized. + _core.multiarray._multiarray_umath._reload_guard() + + # TODO: Remove the environment variable entirely now that it is "weak" + if (os.environ.get("NPY_PROMOTION_STATE", "weak") != "weak"): + warnings.warn( + "NPY_PROMOTION_STATE was a temporary feature for NumPy 2.0 " + "transition and is ignored after NumPy 2.2.", + UserWarning, stacklevel=2) + + # Tell PyInstaller where to find hook-numpy.py + def _pyinstaller_hooks_dir(): + from pathlib import Path + return [str(Path(__file__).with_name("_pyinstaller").resolve())] + + +# Remove symbols imported for internal use +del os, sys, warnings diff --git a/.venv/lib/python3.12/site-packages/numpy/__init__.pyi b/.venv/lib/python3.12/site-packages/numpy/__init__.pyi new file mode 100644 index 0000000..341fc49 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/__init__.pyi @@ -0,0 +1,5387 @@ +# ruff: noqa: I001 +import builtins +import sys +import mmap +import ctypes as ct +import array as _array +import datetime as dt +from abc import abstractmethod +from types import EllipsisType, ModuleType, TracebackType, MappingProxyType, GenericAlias +from decimal import Decimal +from fractions import Fraction +from uuid import UUID + +import numpy as np +from numpy.__config__ import show as show_config +from numpy._pytesttester import PytestTester +from numpy._core._internal import _ctypes + +from numpy._typing import ( + # Arrays + ArrayLike, + NDArray, + _SupportsArray, + _NestedSequence, + _ArrayLike, + _ArrayLikeBool_co, + _ArrayLikeUInt_co, + _ArrayLikeInt, + _ArrayLikeInt_co, + _ArrayLikeFloat64_co, + _ArrayLikeFloat_co, + _ArrayLikeComplex128_co, + _ArrayLikeComplex_co, + _ArrayLikeNumber_co, + _ArrayLikeObject_co, + _ArrayLikeBytes_co, + _ArrayLikeStr_co, + _ArrayLikeString_co, + _ArrayLikeTD64_co, + _ArrayLikeDT64_co, + # DTypes + DTypeLike, + _DTypeLike, + _DTypeLikeVoid, + _VoidDTypeLike, + # Shapes + _AnyShape, + _Shape, + _ShapeLike, + # Scalars + _CharLike_co, + _IntLike_co, + _FloatLike_co, + _TD64Like_co, + _NumberLike_co, + _ScalarLike_co, + # `number` precision + NBitBase, + # NOTE: Do not remove the extended precision bit-types even if seemingly unused; + # they're used by the mypy plugin + _128Bit, + _96Bit, + _64Bit, + _32Bit, + _16Bit, + _8Bit, + _NBitByte, + _NBitShort, + _NBitIntC, + _NBitIntP, + _NBitLong, + _NBitLongLong, + _NBitHalf, + _NBitSingle, + _NBitDouble, + _NBitLongDouble, + # Character codes + _BoolCodes, + _UInt8Codes, + _UInt16Codes, + _UInt32Codes, + _UInt64Codes, + _Int8Codes, + _Int16Codes, + _Int32Codes, + _Int64Codes, + _Float16Codes, + _Float32Codes, + _Float64Codes, + _Complex64Codes, + _Complex128Codes, + _ByteCodes, + _ShortCodes, + _IntCCodes, + _IntPCodes, + _LongCodes, + _LongLongCodes, + _UByteCodes, + _UShortCodes, + _UIntCCodes, + _UIntPCodes, + _ULongCodes, + _ULongLongCodes, + _HalfCodes, + _SingleCodes, + _DoubleCodes, + _LongDoubleCodes, + _CSingleCodes, + _CDoubleCodes, + _CLongDoubleCodes, + _DT64Codes, + _TD64Codes, + _StrCodes, + _BytesCodes, + _VoidCodes, + _ObjectCodes, + _StringCodes, + _UnsignedIntegerCodes, + _SignedIntegerCodes, + _IntegerCodes, + _FloatingCodes, + _ComplexFloatingCodes, + _InexactCodes, + _NumberCodes, + _CharacterCodes, + _FlexibleCodes, + _GenericCodes, + # Ufuncs + _UFunc_Nin1_Nout1, + _UFunc_Nin2_Nout1, + _UFunc_Nin1_Nout2, + _UFunc_Nin2_Nout2, + _GUFunc_Nin2_Nout1, +) + +from numpy._typing._callable import ( + _BoolOp, + _BoolBitOp, + _BoolSub, + _BoolTrueDiv, + _BoolMod, + _BoolDivMod, + _IntTrueDiv, + _UnsignedIntOp, + _UnsignedIntBitOp, + _UnsignedIntMod, + _UnsignedIntDivMod, + _SignedIntOp, + _SignedIntBitOp, + _SignedIntMod, + _SignedIntDivMod, + _FloatOp, + _FloatMod, + _FloatDivMod, + _NumberOp, + _ComparisonOpLT, + _ComparisonOpLE, + _ComparisonOpGT, + _ComparisonOpGE, +) + +# NOTE: Numpy's mypy plugin is used for removing the types unavailable to the specific platform +from numpy._typing._extended_precision import ( + float96, + float128, + complex192, + complex256, +) + +from numpy._array_api_info import __array_namespace_info__ + +from collections.abc import ( + Callable, + Iterable, + Iterator, + Mapping, + Sequence, +) + +if sys.version_info >= (3, 12): + from collections.abc import Buffer as _SupportsBuffer +else: + _SupportsBuffer: TypeAlias = ( + bytes + | bytearray + | memoryview + | _array.array[Any] + | mmap.mmap + | NDArray[Any] + | generic + ) + +from typing import ( + Any, + ClassVar, + Final, + Generic, + Literal as L, + LiteralString, + Never, + NoReturn, + Protocol, + Self, + SupportsComplex, + SupportsFloat, + SupportsInt, + SupportsIndex, + TypeAlias, + TypedDict, + final, + overload, + type_check_only, +) + +# NOTE: `typing_extensions` and `_typeshed` are always available in `.pyi` stubs, even +# if not available at runtime. This is because the `typeshed` stubs for the standard +# library include `typing_extensions` stubs: +# https://github.com/python/typeshed/blob/main/stdlib/typing_extensions.pyi +from _typeshed import Incomplete, StrOrBytesPath, SupportsFlush, SupportsLenAndGetItem, SupportsWrite +from typing_extensions import CapsuleType, TypeVar + +from numpy import ( + char, + core, + ctypeslib, + dtypes, + exceptions, + f2py, + fft, + lib, + linalg, + ma, + polynomial, + random, + rec, + strings, + testing, + typing, +) + +# available through `__getattr__`, but not in `__all__` or `__dir__` +from numpy import ( + __config__ as __config__, + matlib as matlib, + matrixlib as matrixlib, + version as version, +) +if sys.version_info < (3, 12): + from numpy import distutils as distutils + +from numpy._core.records import ( + record, + recarray, +) + +from numpy._core.function_base import ( + linspace, + logspace, + geomspace, +) + +from numpy._core.fromnumeric import ( + take, + reshape, + choose, + repeat, + put, + swapaxes, + transpose, + matrix_transpose, + partition, + argpartition, + sort, + argsort, + argmax, + argmin, + searchsorted, + resize, + squeeze, + diagonal, + trace, + ravel, + nonzero, + shape, + compress, + clip, + sum, + all, + any, + cumsum, + cumulative_sum, + ptp, + max, + min, + amax, + amin, + prod, + cumprod, + cumulative_prod, + ndim, + size, + around, + round, + mean, + std, + var, +) + +from numpy._core._asarray import ( + require, +) + +from numpy._core._type_aliases import ( + sctypeDict, +) + +from numpy._core._ufunc_config import ( + seterr, + geterr, + setbufsize, + getbufsize, + seterrcall, + geterrcall, + _ErrKind, + _ErrCall, +) + +from numpy._core.arrayprint import ( + set_printoptions, + get_printoptions, + array2string, + format_float_scientific, + format_float_positional, + array_repr, + array_str, + printoptions, +) + +from numpy._core.einsumfunc import ( + einsum, + einsum_path, +) + +from numpy._core.multiarray import ( + array, + empty_like, + empty, + zeros, + concatenate, + inner, + where, + lexsort, + can_cast, + min_scalar_type, + result_type, + dot, + vdot, + bincount, + copyto, + putmask, + packbits, + unpackbits, + shares_memory, + may_share_memory, + asarray, + asanyarray, + ascontiguousarray, + asfortranarray, + arange, + busday_count, + busday_offset, + datetime_as_string, + datetime_data, + frombuffer, + fromfile, + fromiter, + is_busday, + promote_types, + fromstring, + frompyfunc, + nested_iters, + flagsobj, +) + +from numpy._core.numeric import ( + zeros_like, + ones, + ones_like, + full, + full_like, + count_nonzero, + isfortran, + argwhere, + flatnonzero, + correlate, + convolve, + outer, + tensordot, + roll, + rollaxis, + moveaxis, + cross, + indices, + fromfunction, + isscalar, + binary_repr, + base_repr, + identity, + allclose, + isclose, + array_equal, + array_equiv, + astype, +) + +from numpy._core.numerictypes import ( + isdtype, + issubdtype, + ScalarType, + typecodes, +) + +from numpy._core.shape_base import ( + atleast_1d, + atleast_2d, + atleast_3d, + block, + hstack, + stack, + vstack, + unstack, +) + +from ._expired_attrs_2_0 import __expired_attributes__ as __expired_attributes__ + +from numpy.lib import ( + scimath as emath, +) + +from numpy.lib._arraypad_impl import ( + pad, +) + +from numpy.lib._arraysetops_impl import ( + ediff1d, + in1d, + intersect1d, + isin, + setdiff1d, + setxor1d, + union1d, + unique, + unique_all, + unique_counts, + unique_inverse, + unique_values, +) + +from numpy.lib._function_base_impl import ( + select, + piecewise, + trim_zeros, + copy, + iterable, + percentile, + diff, + gradient, + angle, + unwrap, + sort_complex, + flip, + rot90, + extract, + place, + asarray_chkfinite, + average, + digitize, + cov, + corrcoef, + median, + sinc, + hamming, + hanning, + bartlett, + blackman, + kaiser, + trapezoid, + trapz, + i0, + meshgrid, + delete, + insert, + append, + interp, + quantile, +) + +from numpy._globals import _CopyMode + +from numpy.lib._histograms_impl import ( + histogram_bin_edges, + histogram, + histogramdd, +) + +from numpy.lib._index_tricks_impl import ( + ndenumerate, + ndindex, + ravel_multi_index, + unravel_index, + mgrid, + ogrid, + r_, + c_, + s_, + index_exp, + ix_, + fill_diagonal, + diag_indices, + diag_indices_from, +) + +from numpy.lib._nanfunctions_impl import ( + nansum, + nanmax, + nanmin, + nanargmax, + nanargmin, + nanmean, + nanmedian, + nanpercentile, + nanvar, + nanstd, + nanprod, + nancumsum, + nancumprod, + nanquantile, +) + +from numpy.lib._npyio_impl import ( + savetxt, + loadtxt, + genfromtxt, + load, + save, + savez, + savez_compressed, + fromregex, +) + +from numpy.lib._polynomial_impl import ( + poly, + roots, + polyint, + polyder, + polyadd, + polysub, + polymul, + polydiv, + polyval, + polyfit, +) + +from numpy.lib._shape_base_impl import ( + column_stack, + row_stack, + dstack, + array_split, + split, + hsplit, + vsplit, + dsplit, + apply_over_axes, + expand_dims, + apply_along_axis, + kron, + tile, + take_along_axis, + put_along_axis, +) + +from numpy.lib._stride_tricks_impl import ( + broadcast_to, + broadcast_arrays, + broadcast_shapes, +) + +from numpy.lib._twodim_base_impl import ( + diag, + diagflat, + eye, + fliplr, + flipud, + tri, + triu, + tril, + vander, + histogram2d, + mask_indices, + tril_indices, + tril_indices_from, + triu_indices, + triu_indices_from, +) + +from numpy.lib._type_check_impl import ( + mintypecode, + real, + imag, + iscomplex, + isreal, + iscomplexobj, + isrealobj, + nan_to_num, + real_if_close, + typename, + common_type, +) + +from numpy.lib._ufunclike_impl import ( + fix, + isposinf, + isneginf, +) + +from numpy.lib._utils_impl import ( + get_include, + info, + show_runtime, +) + +from numpy.matrixlib import ( + asmatrix, + bmat, +) + +__all__ = [ # noqa: RUF022 + # __numpy_submodules__ + "char", "core", "ctypeslib", "dtypes", "exceptions", "f2py", "fft", "lib", "linalg", + "ma", "polynomial", "random", "rec", "strings", "test", "testing", "typing", + + # _core.__all__ + "abs", "acos", "acosh", "asin", "asinh", "atan", "atanh", "atan2", "bitwise_invert", + "bitwise_left_shift", "bitwise_right_shift", "concat", "pow", "permute_dims", + "memmap", "sctypeDict", "record", "recarray", + + # _core.numeric.__all__ + "newaxis", "ndarray", "flatiter", "nditer", "nested_iters", "ufunc", "arange", + "array", "asarray", "asanyarray", "ascontiguousarray", "asfortranarray", "zeros", + "count_nonzero", "empty", "broadcast", "dtype", "fromstring", "fromfile", + "frombuffer", "from_dlpack", "where", "argwhere", "copyto", "concatenate", + "lexsort", "astype", "can_cast", "promote_types", "min_scalar_type", "result_type", + "isfortran", "empty_like", "zeros_like", "ones_like", "correlate", "convolve", + "inner", "dot", "outer", "vdot", "roll", "rollaxis", "moveaxis", "cross", + "tensordot", "little_endian", "fromiter", "array_equal", "array_equiv", "indices", + "fromfunction", "isclose", "isscalar", "binary_repr", "base_repr", "ones", + "identity", "allclose", "putmask", "flatnonzero", "inf", "nan", "False_", "True_", + "bitwise_not", "full", "full_like", "matmul", "vecdot", "vecmat", + "shares_memory", "may_share_memory", + "all", "amax", "amin", "any", "argmax", "argmin", "argpartition", "argsort", + "around", "choose", "clip", "compress", "cumprod", "cumsum", "cumulative_prod", + "cumulative_sum", "diagonal", "mean", "max", "min", "matrix_transpose", "ndim", + "nonzero", "partition", "prod", "ptp", "put", "ravel", "repeat", "reshape", + "resize", "round", "searchsorted", "shape", "size", "sort", "squeeze", "std", "sum", + "swapaxes", "take", "trace", "transpose", "var", + "absolute", "add", "arccos", "arccosh", "arcsin", "arcsinh", "arctan", "arctan2", + "arctanh", "bitwise_and", "bitwise_or", "bitwise_xor", "cbrt", "ceil", "conj", + "conjugate", "copysign", "cos", "cosh", "bitwise_count", "deg2rad", "degrees", + "divide", "divmod", "e", "equal", "euler_gamma", "exp", "exp2", "expm1", "fabs", + "floor", "floor_divide", "float_power", "fmax", "fmin", "fmod", "frexp", + "frompyfunc", "gcd", "greater", "greater_equal", "heaviside", "hypot", "invert", + "isfinite", "isinf", "isnan", "isnat", "lcm", "ldexp", "left_shift", "less", + "less_equal", "log", "log10", "log1p", "log2", "logaddexp", "logaddexp2", + "logical_and", "logical_not", "logical_or", "logical_xor", "matvec", "maximum", "minimum", + "mod", "modf", "multiply", "negative", "nextafter", "not_equal", "pi", "positive", + "power", "rad2deg", "radians", "reciprocal", "remainder", "right_shift", "rint", + "sign", "signbit", "sin", "sinh", "spacing", "sqrt", "square", "subtract", "tan", + "tanh", "true_divide", "trunc", "ScalarType", "typecodes", "issubdtype", + "datetime_data", "datetime_as_string", "busday_offset", "busday_count", "is_busday", + "busdaycalendar", "isdtype", + "complexfloating", "character", "unsignedinteger", "inexact", "generic", "floating", + "integer", "signedinteger", "number", "flexible", "bool", "float16", "float32", + "float64", "longdouble", "complex64", "complex128", "clongdouble", + "bytes_", "str_", "void", "object_", "datetime64", "timedelta64", "int8", "byte", + "uint8", "ubyte", "int16", "short", "uint16", "ushort", "int32", "intc", "uint32", + "uintc", "int64", "long", "uint64", "ulong", "longlong", "ulonglong", "intp", + "uintp", "double", "cdouble", "single", "csingle", "half", "bool_", "int_", "uint", + "float96", "float128", "complex192", "complex256", + "array2string", "array_str", "array_repr", "set_printoptions", "get_printoptions", + "printoptions", "format_float_positional", "format_float_scientific", "require", + "seterr", "geterr", "setbufsize", "getbufsize", "seterrcall", "geterrcall", + "errstate", + # _core.function_base.__all__ + "logspace", "linspace", "geomspace", + # _core.getlimits.__all__ + "finfo", "iinfo", + # _core.shape_base.__all__ + "atleast_1d", "atleast_2d", "atleast_3d", "block", "hstack", "stack", "unstack", + "vstack", + # _core.einsumfunc.__all__ + "einsum", "einsum_path", + # matrixlib.__all__ + "matrix", "bmat", "asmatrix", + # lib._histograms_impl.__all__ + "histogram", "histogramdd", "histogram_bin_edges", + # lib._nanfunctions_impl.__all__ + "nansum", "nanmax", "nanmin", "nanargmax", "nanargmin", "nanmean", "nanmedian", + "nanpercentile", "nanvar", "nanstd", "nanprod", "nancumsum", "nancumprod", + "nanquantile", + # lib._function_base_impl.__all__ + "select", "piecewise", "trim_zeros", "copy", "iterable", "percentile", "diff", + "gradient", "angle", "unwrap", "sort_complex", "flip", "rot90", "extract", "place", + "vectorize", "asarray_chkfinite", "average", "bincount", "digitize", "cov", + "corrcoef", "median", "sinc", "hamming", "hanning", "bartlett", "blackman", + "kaiser", "trapezoid", "trapz", "i0", "meshgrid", "delete", "insert", "append", + "interp", "quantile", + # lib._twodim_base_impl.__all__ + "diag", "diagflat", "eye", "fliplr", "flipud", "tri", "triu", "tril", "vander", + "histogram2d", "mask_indices", "tril_indices", "tril_indices_from", "triu_indices", + "triu_indices_from", + # lib._shape_base_impl.__all__ + "column_stack", "dstack", "array_split", "split", "hsplit", "vsplit", "dsplit", + "apply_over_axes", "expand_dims", "apply_along_axis", "kron", "tile", + "take_along_axis", "put_along_axis", "row_stack", + # lib._type_check_impl.__all__ + "iscomplexobj", "isrealobj", "imag", "iscomplex", "isreal", "nan_to_num", "real", + "real_if_close", "typename", "mintypecode", "common_type", + # lib._arraysetops_impl.__all__ + "ediff1d", "in1d", "intersect1d", "isin", "setdiff1d", "setxor1d", "union1d", + "unique", "unique_all", "unique_counts", "unique_inverse", "unique_values", + # lib._ufunclike_impl.__all__ + "fix", "isneginf", "isposinf", + # lib._arraypad_impl.__all__ + "pad", + # lib._utils_impl.__all__ + "get_include", "info", "show_runtime", + # lib._stride_tricks_impl.__all__ + "broadcast_to", "broadcast_arrays", "broadcast_shapes", + # lib._polynomial_impl.__all__ + "poly", "roots", "polyint", "polyder", "polyadd", "polysub", "polymul", "polydiv", + "polyval", "poly1d", "polyfit", + # lib._npyio_impl.__all__ + "savetxt", "loadtxt", "genfromtxt", "load", "save", "savez", "savez_compressed", + "packbits", "unpackbits", "fromregex", + # lib._index_tricks_impl.__all__ + "ravel_multi_index", "unravel_index", "mgrid", "ogrid", "r_", "c_", "s_", + "index_exp", "ix_", "ndenumerate", "ndindex", "fill_diagonal", "diag_indices", + "diag_indices_from", + + # __init__.__all__ + "emath", "show_config", "__version__", "__array_namespace_info__", +] # fmt: skip + +### Constrained types (for internal use only) +# Only use these for functions; never as generic type parameter. + +_AnyStr = TypeVar("_AnyStr", LiteralString, str, bytes) +_AnyShapeT = TypeVar( + "_AnyShapeT", + tuple[()], # 0-d + tuple[int], # 1-d + tuple[int, int], # 2-d + tuple[int, int, int], # 3-d + tuple[int, int, int, int], # 4-d + tuple[int, int, int, int, int], # 5-d + tuple[int, int, int, int, int, int], # 6-d + tuple[int, int, int, int, int, int, int], # 7-d + tuple[int, int, int, int, int, int, int, int], # 8-d + tuple[int, ...], # N-d +) +_AnyTD64Item = TypeVar("_AnyTD64Item", dt.timedelta, int, None, dt.timedelta | int | None) +_AnyDT64Arg = TypeVar("_AnyDT64Arg", dt.datetime, dt.date, None) +_AnyDT64Item = TypeVar("_AnyDT64Item", dt.datetime, dt.date, int, None, dt.date, int | None) +_AnyDate = TypeVar("_AnyDate", dt.date, dt.datetime) +_AnyDateOrTime = TypeVar("_AnyDateOrTime", dt.date, dt.datetime, dt.timedelta) + +### Type parameters (for internal use only) + +_T = TypeVar("_T") +_T_co = TypeVar("_T_co", covariant=True) +_T_contra = TypeVar("_T_contra", contravariant=True) +_RealT_co = TypeVar("_RealT_co", covariant=True) +_ImagT_co = TypeVar("_ImagT_co", covariant=True) + +_CallableT = TypeVar("_CallableT", bound=Callable[..., object]) + +_DTypeT = TypeVar("_DTypeT", bound=dtype) +_DTypeT_co = TypeVar("_DTypeT_co", bound=dtype, default=dtype, covariant=True) +_FlexDTypeT = TypeVar("_FlexDTypeT", bound=dtype[flexible]) + +_ArrayT = TypeVar("_ArrayT", bound=ndarray) +_ArrayT_co = TypeVar("_ArrayT_co", bound=ndarray, default=ndarray, covariant=True) +_IntegralArrayT = TypeVar("_IntegralArrayT", bound=NDArray[integer | np.bool | object_]) +_RealArrayT = TypeVar("_RealArrayT", bound=NDArray[floating | integer | timedelta64 | np.bool | object_]) +_NumericArrayT = TypeVar("_NumericArrayT", bound=NDArray[number | timedelta64 | object_]) + +_ShapeT = TypeVar("_ShapeT", bound=_Shape) +_ShapeT_co = TypeVar("_ShapeT_co", bound=_Shape, default=_AnyShape, covariant=True) +_1DShapeT = TypeVar("_1DShapeT", bound=_1D) +_2DShapeT_co = TypeVar("_2DShapeT_co", bound=_2D, default=_2D, covariant=True) +_1NShapeT = TypeVar("_1NShapeT", bound=tuple[L[1], *tuple[L[1], ...]]) # (1,) | (1, 1) | (1, 1, 1) | ... + +_ScalarT = TypeVar("_ScalarT", bound=generic) +_ScalarT_co = TypeVar("_ScalarT_co", bound=generic, default=Any, covariant=True) +_NumberT = TypeVar("_NumberT", bound=number) +_RealNumberT = TypeVar("_RealNumberT", bound=floating | integer) +_FloatingT_co = TypeVar("_FloatingT_co", bound=floating, default=floating, covariant=True) +_IntegerT = TypeVar("_IntegerT", bound=integer) +_IntegerT_co = TypeVar("_IntegerT_co", bound=integer, default=integer, covariant=True) +_NonObjectScalarT = TypeVar("_NonObjectScalarT", bound=np.bool | number | flexible | datetime64 | timedelta64) + +_NBit = TypeVar("_NBit", bound=NBitBase, default=Any) # pyright: ignore[reportDeprecated] +_NBit1 = TypeVar("_NBit1", bound=NBitBase, default=Any) # pyright: ignore[reportDeprecated] +_NBit2 = TypeVar("_NBit2", bound=NBitBase, default=_NBit1) # pyright: ignore[reportDeprecated] + +_ItemT_co = TypeVar("_ItemT_co", default=Any, covariant=True) +_BoolItemT = TypeVar("_BoolItemT", bound=builtins.bool) +_BoolItemT_co = TypeVar("_BoolItemT_co", bound=builtins.bool, default=builtins.bool, covariant=True) +_NumberItemT_co = TypeVar("_NumberItemT_co", bound=complex, default=int | float | complex, covariant=True) +_InexactItemT_co = TypeVar("_InexactItemT_co", bound=complex, default=float | complex, covariant=True) +_FlexibleItemT_co = TypeVar( + "_FlexibleItemT_co", + bound=_CharLike_co | tuple[Any, ...], + default=_CharLike_co | tuple[Any, ...], + covariant=True, +) +_CharacterItemT_co = TypeVar("_CharacterItemT_co", bound=_CharLike_co, default=_CharLike_co, covariant=True) +_TD64ItemT_co = TypeVar("_TD64ItemT_co", bound=dt.timedelta | int | None, default=dt.timedelta | int | None, covariant=True) +_DT64ItemT_co = TypeVar("_DT64ItemT_co", bound=dt.date | int | None, default=dt.date | int | None, covariant=True) +_TD64UnitT = TypeVar("_TD64UnitT", bound=_TD64Unit, default=_TD64Unit) +_BoolOrIntArrayT = TypeVar("_BoolOrIntArrayT", bound=NDArray[integer | np.bool]) + +### Type Aliases (for internal use only) + +_Falsy: TypeAlias = L[False, 0] | np.bool[L[False]] +_Truthy: TypeAlias = L[True, 1] | np.bool[L[True]] + +_1D: TypeAlias = tuple[int] +_2D: TypeAlias = tuple[int, int] +_2Tuple: TypeAlias = tuple[_T, _T] + +_ArrayUInt_co: TypeAlias = NDArray[unsignedinteger | np.bool] +_ArrayInt_co: TypeAlias = NDArray[integer | np.bool] +_ArrayFloat64_co: TypeAlias = NDArray[floating[_64Bit] | float32 | float16 | integer | np.bool] +_ArrayFloat_co: TypeAlias = NDArray[floating | integer | np.bool] +_ArrayComplex128_co: TypeAlias = NDArray[number[_64Bit] | number[_32Bit] | float16 | integer | np.bool] +_ArrayComplex_co: TypeAlias = NDArray[inexact | integer | np.bool] +_ArrayNumber_co: TypeAlias = NDArray[number | np.bool] +_ArrayTD64_co: TypeAlias = NDArray[timedelta64 | integer | np.bool] + +_Float64_co: TypeAlias = float | floating[_64Bit] | float32 | float16 | integer | np.bool +_Complex64_co: TypeAlias = number[_32Bit] | number[_16Bit] | number[_8Bit] | builtins.bool | np.bool +_Complex128_co: TypeAlias = complex | number[_64Bit] | _Complex64_co + +_ToIndex: TypeAlias = SupportsIndex | slice | EllipsisType | _ArrayLikeInt_co | None +_ToIndices: TypeAlias = _ToIndex | tuple[_ToIndex, ...] + +_UnsignedIntegerCType: TypeAlias = type[ + ct.c_uint8 | ct.c_uint16 | ct.c_uint32 | ct.c_uint64 + | ct.c_ushort | ct.c_uint | ct.c_ulong | ct.c_ulonglong + | ct.c_size_t | ct.c_void_p +] # fmt: skip +_SignedIntegerCType: TypeAlias = type[ + ct.c_int8 | ct.c_int16 | ct.c_int32 | ct.c_int64 + | ct.c_short | ct.c_int | ct.c_long | ct.c_longlong + | ct.c_ssize_t +] # fmt: skip +_FloatingCType: TypeAlias = type[ct.c_float | ct.c_double | ct.c_longdouble] +_IntegerCType: TypeAlias = _UnsignedIntegerCType | _SignedIntegerCType +_NumberCType: TypeAlias = _IntegerCType +_GenericCType: TypeAlias = _NumberCType | type[ct.c_bool | ct.c_char | ct.py_object[Any]] + +# some commonly used builtin types that are known to result in a +# `dtype[object_]`, when their *type* is passed to the `dtype` constructor +# NOTE: `builtins.object` should not be included here +_BuiltinObjectLike: TypeAlias = ( + slice | Decimal | Fraction | UUID + | dt.date | dt.time | dt.timedelta | dt.tzinfo + | tuple[Any, ...] | list[Any] | set[Any] | frozenset[Any] | dict[Any, Any] +) # fmt: skip + +# Introduce an alias for `dtype` to avoid naming conflicts. +_dtype: TypeAlias = dtype[_ScalarT] + +_ByteOrderChar: TypeAlias = L["<", ">", "=", "|"] +# can be anything, is case-insensitive, and only the first character matters +_ByteOrder: TypeAlias = L[ + "S", # swap the current order (default) + "<", "L", "little", # little-endian + ">", "B", "big", # big endian + "=", "N", "native", # native order + "|", "I", # ignore +] # fmt: skip +_DTypeKind: TypeAlias = L[ + "b", # boolean + "i", # signed integer + "u", # unsigned integer + "f", # floating-point + "c", # complex floating-point + "m", # timedelta64 + "M", # datetime64 + "O", # python object + "S", # byte-string (fixed-width) + "U", # unicode-string (fixed-width) + "V", # void + "T", # unicode-string (variable-width) +] +_DTypeChar: TypeAlias = L[ + "?", # bool + "b", # byte + "B", # ubyte + "h", # short + "H", # ushort + "i", # intc + "I", # uintc + "l", # long + "L", # ulong + "q", # longlong + "Q", # ulonglong + "e", # half + "f", # single + "d", # double + "g", # longdouble + "F", # csingle + "D", # cdouble + "G", # clongdouble + "O", # object + "S", # bytes_ (S0) + "a", # bytes_ (deprecated) + "U", # str_ + "V", # void + "M", # datetime64 + "m", # timedelta64 + "c", # bytes_ (S1) + "T", # StringDType +] +_DTypeNum: TypeAlias = L[ + 0, # bool + 1, # byte + 2, # ubyte + 3, # short + 4, # ushort + 5, # intc + 6, # uintc + 7, # long + 8, # ulong + 9, # longlong + 10, # ulonglong + 23, # half + 11, # single + 12, # double + 13, # longdouble + 14, # csingle + 15, # cdouble + 16, # clongdouble + 17, # object + 18, # bytes_ + 19, # str_ + 20, # void + 21, # datetime64 + 22, # timedelta64 + 25, # no type + 256, # user-defined + 2056, # StringDType +] +_DTypeBuiltinKind: TypeAlias = L[0, 1, 2] + +_ArrayAPIVersion: TypeAlias = L["2021.12", "2022.12", "2023.12", "2024.12"] + +_CastingKind: TypeAlias = L["no", "equiv", "safe", "same_kind", "unsafe"] + +_OrderKACF: TypeAlias = L["K", "A", "C", "F"] | None +_OrderACF: TypeAlias = L["A", "C", "F"] | None +_OrderCF: TypeAlias = L["C", "F"] | None + +_ModeKind: TypeAlias = L["raise", "wrap", "clip"] +_PartitionKind: TypeAlias = L["introselect"] +# in practice, only the first case-insensitive character is considered (so e.g. +# "QuantumSort3000" will be interpreted as quicksort). +_SortKind: TypeAlias = L[ + "Q", "quick", "quicksort", + "M", "merge", "mergesort", + "H", "heap", "heapsort", + "S", "stable", "stablesort", +] +_SortSide: TypeAlias = L["left", "right"] + +_ConvertibleToInt: TypeAlias = SupportsInt | SupportsIndex | _CharLike_co +_ConvertibleToFloat: TypeAlias = SupportsFloat | SupportsIndex | _CharLike_co +_ConvertibleToComplex: TypeAlias = SupportsComplex | SupportsFloat | SupportsIndex | _CharLike_co +_ConvertibleToTD64: TypeAlias = dt.timedelta | int | _CharLike_co | character | number | timedelta64 | np.bool | None +_ConvertibleToDT64: TypeAlias = dt.date | int | _CharLike_co | character | number | datetime64 | np.bool | None + +_NDIterFlagsKind: TypeAlias = L[ + "buffered", + "c_index", + "copy_if_overlap", + "common_dtype", + "delay_bufalloc", + "external_loop", + "f_index", + "grow_inner", "growinner", + "multi_index", + "ranged", + "refs_ok", + "reduce_ok", + "zerosize_ok", +] +_NDIterFlagsOp: TypeAlias = L[ + "aligned", + "allocate", + "arraymask", + "copy", + "config", + "nbo", + "no_subtype", + "no_broadcast", + "overlap_assume_elementwise", + "readonly", + "readwrite", + "updateifcopy", + "virtual", + "writeonly", + "writemasked" +] + +_MemMapModeKind: TypeAlias = L[ + "readonly", "r", + "copyonwrite", "c", + "readwrite", "r+", + "write", "w+", +] + +_DT64Date: TypeAlias = _HasDateAttributes | L["TODAY", "today", b"TODAY", b"today"] +_DT64Now: TypeAlias = L["NOW", "now", b"NOW", b"now"] +_NaTValue: TypeAlias = L["NAT", "NaT", "nat", b"NAT", b"NaT", b"nat"] + +_MonthUnit: TypeAlias = L["Y", "M", b"Y", b"M"] +_DayUnit: TypeAlias = L["W", "D", b"W", b"D"] +_DateUnit: TypeAlias = L[_MonthUnit, _DayUnit] +_NativeTimeUnit: TypeAlias = L["h", "m", "s", "ms", "us", "μs", b"h", b"m", b"s", b"ms", b"us"] +_IntTimeUnit: TypeAlias = L["ns", "ps", "fs", "as", b"ns", b"ps", b"fs", b"as"] +_TimeUnit: TypeAlias = L[_NativeTimeUnit, _IntTimeUnit] +_NativeTD64Unit: TypeAlias = L[_DayUnit, _NativeTimeUnit] +_IntTD64Unit: TypeAlias = L[_MonthUnit, _IntTimeUnit] +_TD64Unit: TypeAlias = L[_DateUnit, _TimeUnit] +_TimeUnitSpec: TypeAlias = _TD64UnitT | tuple[_TD64UnitT, SupportsIndex] + +### TypedDict's (for internal use only) + +@type_check_only +class _FormerAttrsDict(TypedDict): + object: LiteralString + float: LiteralString + complex: LiteralString + str: LiteralString + int: LiteralString + +### Protocols (for internal use only) + +@type_check_only +class _SupportsFileMethods(SupportsFlush, Protocol): + # Protocol for representing file-like-objects accepted by `ndarray.tofile` and `fromfile` + def fileno(self) -> SupportsIndex: ... + def tell(self) -> SupportsIndex: ... + def seek(self, offset: int, whence: int, /) -> object: ... + +@type_check_only +class _SupportsFileMethodsRW(SupportsWrite[bytes], _SupportsFileMethods, Protocol): ... + +@type_check_only +class _SupportsItem(Protocol[_T_co]): + def item(self, /) -> _T_co: ... + +@type_check_only +class _SupportsDLPack(Protocol[_T_contra]): + def __dlpack__(self, /, *, stream: _T_contra | None = None) -> CapsuleType: ... + +@type_check_only +class _HasDType(Protocol[_T_co]): + @property + def dtype(self, /) -> _T_co: ... + +@type_check_only +class _HasRealAndImag(Protocol[_RealT_co, _ImagT_co]): + @property + def real(self, /) -> _RealT_co: ... + @property + def imag(self, /) -> _ImagT_co: ... + +@type_check_only +class _HasTypeWithRealAndImag(Protocol[_RealT_co, _ImagT_co]): + @property + def type(self, /) -> type[_HasRealAndImag[_RealT_co, _ImagT_co]]: ... + +@type_check_only +class _HasDTypeWithRealAndImag(Protocol[_RealT_co, _ImagT_co]): + @property + def dtype(self, /) -> _HasTypeWithRealAndImag[_RealT_co, _ImagT_co]: ... + +@type_check_only +class _HasDateAttributes(Protocol): + # The `datetime64` constructors requires an object with the three attributes below, + # and thus supports datetime duck typing + @property + def day(self) -> int: ... + @property + def month(self) -> int: ... + @property + def year(self) -> int: ... + +### Mixins (for internal use only) + +@type_check_only +class _RealMixin: + @property + def real(self) -> Self: ... + @property + def imag(self) -> Self: ... + +@type_check_only +class _RoundMixin: + @overload + def __round__(self, /, ndigits: None = None) -> int: ... + @overload + def __round__(self, /, ndigits: SupportsIndex) -> Self: ... + +@type_check_only +class _IntegralMixin(_RealMixin): + @property + def numerator(self) -> Self: ... + @property + def denominator(self) -> L[1]: ... + + def is_integer(self, /) -> L[True]: ... + +### Public API + +__version__: Final[LiteralString] = ... + +e: Final[float] = ... +euler_gamma: Final[float] = ... +pi: Final[float] = ... +inf: Final[float] = ... +nan: Final[float] = ... +little_endian: Final[builtins.bool] = ... +False_: Final[np.bool[L[False]]] = ... +True_: Final[np.bool[L[True]]] = ... +newaxis: Final[None] = None + +# not in __all__ +__NUMPY_SETUP__: Final[L[False]] = False +__numpy_submodules__: Final[set[LiteralString]] = ... +__former_attrs__: Final[_FormerAttrsDict] = ... +__future_scalars__: Final[set[L["bytes", "str", "object"]]] = ... +__array_api_version__: Final[L["2024.12"]] = "2024.12" +test: Final[PytestTester] = ... + +@type_check_only +class _DTypeMeta(type): + @property + def type(cls, /) -> type[generic] | None: ... + @property + def _abstract(cls, /) -> bool: ... + @property + def _is_numeric(cls, /) -> bool: ... + @property + def _parametric(cls, /) -> bool: ... + @property + def _legacy(cls, /) -> bool: ... + +@final +class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): + names: tuple[builtins.str, ...] | None + def __hash__(self) -> int: ... + + # `None` results in the default dtype + @overload + def __new__( + cls, + dtype: type[float64] | None, + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ... + ) -> dtype[float64]: ... + + # Overload for `dtype` instances, scalar types, and instances that have a + # `dtype: dtype[_ScalarT]` attribute + @overload + def __new__( + cls, + dtype: _DTypeLike[_ScalarT], + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[_ScalarT]: ... + + # Builtin types + # + # NOTE: Typecheckers act as if `bool <: int <: float <: complex <: object`, + # even though at runtime `int`, `float`, and `complex` aren't subtypes.. + # This makes it impossible to express e.g. "a float that isn't an int", + # since type checkers treat `_: float` like `_: float | int`. + # + # For more details, see: + # - https://github.com/numpy/numpy/issues/27032#issuecomment-2278958251 + # - https://typing.readthedocs.io/en/latest/spec/special-types.html#special-cases-for-float-and-complex + @overload + def __new__( + cls, + dtype: type[builtins.bool | np.bool], + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[str, Any] = ..., + ) -> dtype[np.bool]: ... + # NOTE: `_: type[int]` also accepts `type[int | bool]` + @overload + def __new__( + cls, + dtype: type[int | int_ | np.bool], + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[str, Any] = ..., + ) -> dtype[int_ | np.bool]: ... + # NOTE: `_: type[float]` also accepts `type[float | int | bool]` + # NOTE: `float64` inherits from `float` at runtime; but this isn't + # reflected in these stubs. So an explicit `float64` is required here. + @overload + def __new__( + cls, + dtype: type[float | float64 | int_ | np.bool] | None, + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[str, Any] = ..., + ) -> dtype[float64 | int_ | np.bool]: ... + # NOTE: `_: type[complex]` also accepts `type[complex | float | int | bool]` + @overload + def __new__( + cls, + dtype: type[complex | complex128 | float64 | int_ | np.bool], + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[str, Any] = ..., + ) -> dtype[complex128 | float64 | int_ | np.bool]: ... + @overload + def __new__( + cls, + dtype: type[bytes], # also includes `type[bytes_]` + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[str, Any] = ..., + ) -> dtype[bytes_]: ... + @overload + def __new__( + cls, + dtype: type[str], # also includes `type[str_]` + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[str, Any] = ..., + ) -> dtype[str_]: ... + # NOTE: These `memoryview` overloads assume PEP 688, which requires mypy to + # be run with the (undocumented) `--disable-memoryview-promotion` flag, + # This will be the default in a future mypy release, see: + # https://github.com/python/mypy/issues/15313 + # Pyright / Pylance requires setting `disableBytesTypePromotions=true`, + # which is the default in strict mode + @overload + def __new__( + cls, + dtype: type[memoryview | void], + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[str, Any] = ..., + ) -> dtype[void]: ... + # NOTE: `_: type[object]` would also accept e.g. `type[object | complex]`, + # and is therefore not included here + @overload + def __new__( + cls, + dtype: type[_BuiltinObjectLike | object_], + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[str, Any] = ..., + ) -> dtype[object_]: ... + + # Unions of builtins. + @overload + def __new__( + cls, + dtype: type[bytes | str], + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[str, Any] = ..., + ) -> dtype[character]: ... + @overload + def __new__( + cls, + dtype: type[bytes | str | memoryview], + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[str, Any] = ..., + ) -> dtype[flexible]: ... + @overload + def __new__( + cls, + dtype: type[complex | bytes | str | memoryview | _BuiltinObjectLike], + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[str, Any] = ..., + ) -> dtype[np.bool | int_ | float64 | complex128 | flexible | object_]: ... + + # `unsignedinteger` string-based representations and ctypes + @overload + def __new__(cls, dtype: _UInt8Codes | type[ct.c_uint8], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[uint8]: ... + @overload + def __new__(cls, dtype: _UInt16Codes | type[ct.c_uint16], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[uint16]: ... + @overload + def __new__(cls, dtype: _UInt32Codes | type[ct.c_uint32], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[uint32]: ... + @overload + def __new__(cls, dtype: _UInt64Codes | type[ct.c_uint64], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[uint64]: ... + @overload + def __new__(cls, dtype: _UByteCodes | type[ct.c_ubyte], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[ubyte]: ... + @overload + def __new__(cls, dtype: _UShortCodes | type[ct.c_ushort], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[ushort]: ... + @overload + def __new__(cls, dtype: _UIntCCodes | type[ct.c_uint], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[uintc]: ... + # NOTE: We're assuming here that `uint_ptr_t == size_t`, + # an assumption that does not hold in rare cases (same for `ssize_t`) + @overload + def __new__(cls, dtype: _UIntPCodes | type[ct.c_void_p] | type[ct.c_size_t], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[uintp]: ... + @overload + def __new__(cls, dtype: _ULongCodes | type[ct.c_ulong], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[ulong]: ... + @overload + def __new__(cls, dtype: _ULongLongCodes | type[ct.c_ulonglong], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[ulonglong]: ... + + # `signedinteger` string-based representations and ctypes + @overload + def __new__(cls, dtype: _Int8Codes | type[ct.c_int8], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[int8]: ... + @overload + def __new__(cls, dtype: _Int16Codes | type[ct.c_int16], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[int16]: ... + @overload + def __new__(cls, dtype: _Int32Codes | type[ct.c_int32], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[int32]: ... + @overload + def __new__(cls, dtype: _Int64Codes | type[ct.c_int64], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[int64]: ... + @overload + def __new__(cls, dtype: _ByteCodes | type[ct.c_byte], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[byte]: ... + @overload + def __new__(cls, dtype: _ShortCodes | type[ct.c_short], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[short]: ... + @overload + def __new__(cls, dtype: _IntCCodes | type[ct.c_int], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[intc]: ... + @overload + def __new__(cls, dtype: _IntPCodes | type[ct.c_ssize_t], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[intp]: ... + @overload + def __new__(cls, dtype: _LongCodes | type[ct.c_long], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[long]: ... + @overload + def __new__(cls, dtype: _LongLongCodes | type[ct.c_longlong], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[longlong]: ... + + # `floating` string-based representations and ctypes + @overload + def __new__(cls, dtype: _Float16Codes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[float16]: ... + @overload + def __new__(cls, dtype: _Float32Codes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[float32]: ... + @overload + def __new__(cls, dtype: _Float64Codes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[float64]: ... + @overload + def __new__(cls, dtype: _HalfCodes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[half]: ... + @overload + def __new__(cls, dtype: _SingleCodes | type[ct.c_float], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[single]: ... + @overload + def __new__(cls, dtype: _DoubleCodes | type[ct.c_double], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[double]: ... + @overload + def __new__(cls, dtype: _LongDoubleCodes | type[ct.c_longdouble], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[longdouble]: ... + + # `complexfloating` string-based representations + @overload + def __new__(cls, dtype: _Complex64Codes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[complex64]: ... + @overload + def __new__(cls, dtype: _Complex128Codes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[complex128]: ... + @overload + def __new__(cls, dtype: _CSingleCodes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[csingle]: ... + @overload + def __new__(cls, dtype: _CDoubleCodes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[cdouble]: ... + @overload + def __new__(cls, dtype: _CLongDoubleCodes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[clongdouble]: ... + + # Miscellaneous string-based representations and ctypes + @overload + def __new__(cls, dtype: _BoolCodes | type[ct.c_bool], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[np.bool]: ... + @overload + def __new__(cls, dtype: _TD64Codes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[timedelta64]: ... + @overload + def __new__(cls, dtype: _DT64Codes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[datetime64]: ... + @overload + def __new__(cls, dtype: _StrCodes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[str_]: ... + @overload + def __new__(cls, dtype: _BytesCodes | type[ct.c_char], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[bytes_]: ... + @overload + def __new__(cls, dtype: _VoidCodes | _VoidDTypeLike, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[void]: ... + @overload + def __new__(cls, dtype: _ObjectCodes | type[ct.py_object[Any]], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[object_]: ... + + # `StringDType` requires special treatment because it has no scalar type + @overload + def __new__( + cls, + dtype: dtypes.StringDType | _StringCodes, + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ... + ) -> dtypes.StringDType: ... + + # Combined char-codes and ctypes, analogous to the scalar-type hierarchy + @overload + def __new__( + cls, + dtype: _UnsignedIntegerCodes | _UnsignedIntegerCType, + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[unsignedinteger]: ... + @overload + def __new__( + cls, + dtype: _SignedIntegerCodes | _SignedIntegerCType, + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[signedinteger]: ... + @overload + def __new__( + cls, + dtype: _IntegerCodes | _IntegerCType, + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[integer]: ... + @overload + def __new__( + cls, + dtype: _FloatingCodes | _FloatingCType, + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[floating]: ... + @overload + def __new__( + cls, + dtype: _ComplexFloatingCodes, + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[complexfloating]: ... + @overload + def __new__( + cls, + dtype: _InexactCodes | _FloatingCType, + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[inexact]: ... + @overload + def __new__( + cls, + dtype: _NumberCodes | _NumberCType, + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[number]: ... + @overload + def __new__( + cls, + dtype: _CharacterCodes | type[ct.c_char], + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[character]: ... + @overload + def __new__( + cls, + dtype: _FlexibleCodes | type[ct.c_char], + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[flexible]: ... + @overload + def __new__( + cls, + dtype: _GenericCodes | _GenericCType, + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[generic]: ... + + # Handle strings that can't be expressed as literals; i.e. "S1", "S2", ... + @overload + def __new__( + cls, + dtype: builtins.str, + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype: ... + + # Catch-all overload for object-likes + # NOTE: `object_ | Any` is *not* equivalent to `Any` -- it describes some + # (static) type `T` s.t. `object_ <: T <: builtins.object` (`<:` denotes + # the subtyping relation, the (gradual) typing analogue of `issubclass()`). + # https://typing.readthedocs.io/en/latest/spec/concepts.html#union-types + @overload + def __new__( + cls, + dtype: type[object], + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[object_ | Any]: ... + + def __class_getitem__(cls, item: Any, /) -> GenericAlias: ... + + @overload + def __getitem__(self: dtype[void], key: list[builtins.str], /) -> dtype[void]: ... + @overload + def __getitem__(self: dtype[void], key: builtins.str | SupportsIndex, /) -> dtype: ... + + # NOTE: In the future 1-based multiplications will also yield `flexible` dtypes + @overload + def __mul__(self: _DTypeT, value: L[1], /) -> _DTypeT: ... + @overload + def __mul__(self: _FlexDTypeT, value: SupportsIndex, /) -> _FlexDTypeT: ... + @overload + def __mul__(self, value: SupportsIndex, /) -> dtype[void]: ... + + # NOTE: `__rmul__` seems to be broken when used in combination with + # literals as of mypy 0.902. Set the return-type to `dtype` for + # now for non-flexible dtypes. + @overload + def __rmul__(self: _FlexDTypeT, value: SupportsIndex, /) -> _FlexDTypeT: ... + @overload + def __rmul__(self, value: SupportsIndex, /) -> dtype: ... + + def __gt__(self, other: DTypeLike, /) -> builtins.bool: ... + def __ge__(self, other: DTypeLike, /) -> builtins.bool: ... + def __lt__(self, other: DTypeLike, /) -> builtins.bool: ... + def __le__(self, other: DTypeLike, /) -> builtins.bool: ... + + # Explicitly defined `__eq__` and `__ne__` to get around mypy's + # `strict_equality` option; even though their signatures are + # identical to their `object`-based counterpart + def __eq__(self, other: Any, /) -> builtins.bool: ... + def __ne__(self, other: Any, /) -> builtins.bool: ... + + @property + def alignment(self) -> int: ... + @property + def base(self) -> dtype: ... + @property + def byteorder(self) -> _ByteOrderChar: ... + @property + def char(self) -> _DTypeChar: ... + @property + def descr(self) -> list[tuple[LiteralString, LiteralString] | tuple[LiteralString, LiteralString, _Shape]]: ... + @property + def fields(self,) -> MappingProxyType[LiteralString, tuple[dtype, int] | tuple[dtype, int, Any]] | None: ... + @property + def flags(self) -> int: ... + @property + def hasobject(self) -> builtins.bool: ... + @property + def isbuiltin(self) -> _DTypeBuiltinKind: ... + @property + def isnative(self) -> builtins.bool: ... + @property + def isalignedstruct(self) -> builtins.bool: ... + @property + def itemsize(self) -> int: ... + @property + def kind(self) -> _DTypeKind: ... + @property + def metadata(self) -> MappingProxyType[builtins.str, Any] | None: ... + @property + def name(self) -> LiteralString: ... + @property + def num(self) -> _DTypeNum: ... + @property + def shape(self) -> _AnyShape: ... + @property + def ndim(self) -> int: ... + @property + def subdtype(self) -> tuple[dtype, _AnyShape] | None: ... + def newbyteorder(self, new_order: _ByteOrder = ..., /) -> Self: ... + @property + def str(self) -> LiteralString: ... + @property + def type(self) -> type[_ScalarT_co]: ... + +@final +class flatiter(Generic[_ArrayT_co]): + __hash__: ClassVar[None] + @property + def base(self) -> _ArrayT_co: ... + @property + def coords(self) -> _Shape: ... + @property + def index(self) -> int: ... + def copy(self) -> _ArrayT_co: ... + def __iter__(self) -> Self: ... + def __next__(self: flatiter[NDArray[_ScalarT]]) -> _ScalarT: ... + def __len__(self) -> int: ... + @overload + def __getitem__( + self: flatiter[NDArray[_ScalarT]], + key: int | integer | tuple[int | integer], + ) -> _ScalarT: ... + @overload + def __getitem__( + self, + key: _ArrayLikeInt | slice | EllipsisType | tuple[_ArrayLikeInt | slice | EllipsisType], + ) -> _ArrayT_co: ... + # TODO: `__setitem__` operates via `unsafe` casting rules, and can + # thus accept any type accepted by the relevant underlying `np.generic` + # constructor. + # This means that `value` must in reality be a supertype of `npt.ArrayLike`. + def __setitem__( + self, + key: _ArrayLikeInt | slice | EllipsisType | tuple[_ArrayLikeInt | slice | EllipsisType], + value: Any, + ) -> None: ... + @overload + def __array__(self: flatiter[ndarray[_1DShapeT, _DTypeT]], dtype: None = ..., /) -> ndarray[_1DShapeT, _DTypeT]: ... + @overload + def __array__(self: flatiter[ndarray[_1DShapeT, Any]], dtype: _DTypeT, /) -> ndarray[_1DShapeT, _DTypeT]: ... + @overload + def __array__(self: flatiter[ndarray[Any, _DTypeT]], dtype: None = ..., /) -> ndarray[_AnyShape, _DTypeT]: ... + @overload + def __array__(self, dtype: _DTypeT, /) -> ndarray[_AnyShape, _DTypeT]: ... + +@type_check_only +class _ArrayOrScalarCommon: + @property + def real(self, /) -> Any: ... + @property + def imag(self, /) -> Any: ... + @property + def T(self) -> Self: ... + @property + def mT(self) -> Self: ... + @property + def data(self) -> memoryview: ... + @property + def flags(self) -> flagsobj: ... + @property + def itemsize(self) -> int: ... + @property + def nbytes(self) -> int: ... + @property + def device(self) -> L["cpu"]: ... + + def __bool__(self, /) -> builtins.bool: ... + def __int__(self, /) -> int: ... + def __float__(self, /) -> float: ... + def __copy__(self) -> Self: ... + def __deepcopy__(self, memo: dict[int, Any] | None, /) -> Self: ... + + # TODO: How to deal with the non-commutative nature of `==` and `!=`? + # xref numpy/numpy#17368 + def __eq__(self, other: Any, /) -> Any: ... + def __ne__(self, other: Any, /) -> Any: ... + + def copy(self, order: _OrderKACF = ...) -> Self: ... + def dump(self, file: StrOrBytesPath | SupportsWrite[bytes]) -> None: ... + def dumps(self) -> bytes: ... + def tobytes(self, order: _OrderKACF = ...) -> bytes: ... + def tofile(self, fid: StrOrBytesPath | _SupportsFileMethods, sep: str = ..., format: str = ...) -> None: ... + # generics and 0d arrays return builtin scalars + def tolist(self) -> Any: ... + def to_device(self, device: L["cpu"], /, *, stream: int | Any | None = ...) -> Self: ... + + @property + def __array_interface__(self) -> dict[str, Any]: ... + @property + def __array_priority__(self) -> float: ... + @property + def __array_struct__(self) -> CapsuleType: ... # builtins.PyCapsule + def __array_namespace__(self, /, *, api_version: _ArrayAPIVersion | None = None) -> ModuleType: ... + def __setstate__(self, state: tuple[ + SupportsIndex, # version + _ShapeLike, # Shape + _DTypeT_co, # DType + np.bool, # F-continuous + bytes | list[Any], # Data + ], /) -> None: ... + + def conj(self) -> Self: ... + def conjugate(self) -> Self: ... + + def argsort( + self, + axis: SupportsIndex | None = ..., + kind: _SortKind | None = ..., + order: str | Sequence[str] | None = ..., + *, + stable: bool | None = ..., + ) -> NDArray[Any]: ... + + @overload # axis=None (default), out=None (default), keepdims=False (default) + def argmax(self, /, axis: None = None, out: None = None, *, keepdims: L[False] = False) -> intp: ... + @overload # axis=index, out=None (default) + def argmax(self, /, axis: SupportsIndex, out: None = None, *, keepdims: builtins.bool = False) -> Any: ... + @overload # axis=index, out=ndarray + def argmax(self, /, axis: SupportsIndex | None, out: _BoolOrIntArrayT, *, keepdims: builtins.bool = False) -> _BoolOrIntArrayT: ... + @overload + def argmax(self, /, axis: SupportsIndex | None = None, *, out: _BoolOrIntArrayT, keepdims: builtins.bool = False) -> _BoolOrIntArrayT: ... + + @overload # axis=None (default), out=None (default), keepdims=False (default) + def argmin(self, /, axis: None = None, out: None = None, *, keepdims: L[False] = False) -> intp: ... + @overload # axis=index, out=None (default) + def argmin(self, /, axis: SupportsIndex, out: None = None, *, keepdims: builtins.bool = False) -> Any: ... + @overload # axis=index, out=ndarray + def argmin(self, /, axis: SupportsIndex | None, out: _BoolOrIntArrayT, *, keepdims: builtins.bool = False) -> _BoolOrIntArrayT: ... + @overload + def argmin(self, /, axis: SupportsIndex | None = None, *, out: _BoolOrIntArrayT, keepdims: builtins.bool = False) -> _BoolOrIntArrayT: ... + + @overload # out=None (default) + def round(self, /, decimals: SupportsIndex = 0, out: None = None) -> Self: ... + @overload # out=ndarray + def round(self, /, decimals: SupportsIndex, out: _ArrayT) -> _ArrayT: ... + @overload + def round(self, /, decimals: SupportsIndex = 0, *, out: _ArrayT) -> _ArrayT: ... + + @overload # out=None (default) + def choose(self, /, choices: ArrayLike, out: None = None, mode: _ModeKind = "raise") -> NDArray[Any]: ... + @overload # out=ndarray + def choose(self, /, choices: ArrayLike, out: _ArrayT, mode: _ModeKind = "raise") -> _ArrayT: ... + + # TODO: Annotate kwargs with an unpacked `TypedDict` + @overload # out: None (default) + def clip(self, /, min: ArrayLike, max: ArrayLike | None = None, out: None = None, **kwargs: Any) -> NDArray[Any]: ... + @overload + def clip(self, /, min: None, max: ArrayLike, out: None = None, **kwargs: Any) -> NDArray[Any]: ... + @overload + def clip(self, /, min: None = None, *, max: ArrayLike, out: None = None, **kwargs: Any) -> NDArray[Any]: ... + @overload # out: ndarray + def clip(self, /, min: ArrayLike, max: ArrayLike | None, out: _ArrayT, **kwargs: Any) -> _ArrayT: ... + @overload + def clip(self, /, min: ArrayLike, max: ArrayLike | None = None, *, out: _ArrayT, **kwargs: Any) -> _ArrayT: ... + @overload + def clip(self, /, min: None, max: ArrayLike, out: _ArrayT, **kwargs: Any) -> _ArrayT: ... + @overload + def clip(self, /, min: None = None, *, max: ArrayLike, out: _ArrayT, **kwargs: Any) -> _ArrayT: ... + + @overload + def compress(self, /, condition: _ArrayLikeInt_co, axis: SupportsIndex | None = None, out: None = None) -> NDArray[Any]: ... + @overload + def compress(self, /, condition: _ArrayLikeInt_co, axis: SupportsIndex | None, out: _ArrayT) -> _ArrayT: ... + @overload + def compress(self, /, condition: _ArrayLikeInt_co, axis: SupportsIndex | None = None, *, out: _ArrayT) -> _ArrayT: ... + + @overload # out: None (default) + def cumprod(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, out: None = None) -> NDArray[Any]: ... + @overload # out: ndarray + def cumprod(self, /, axis: SupportsIndex | None, dtype: DTypeLike | None, out: _ArrayT) -> _ArrayT: ... + @overload + def cumprod(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... + + @overload # out: None (default) + def cumsum(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, out: None = None) -> NDArray[Any]: ... + @overload # out: ndarray + def cumsum(self, /, axis: SupportsIndex | None, dtype: DTypeLike | None, out: _ArrayT) -> _ArrayT: ... + @overload + def cumsum(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... + + @overload + def max( + self, + /, + axis: _ShapeLike | None = None, + out: None = None, + keepdims: builtins.bool = False, + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = True, + ) -> Any: ... + @overload + def max( + self, + /, + axis: _ShapeLike | None, + out: _ArrayT, + keepdims: builtins.bool = False, + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = True, + ) -> _ArrayT: ... + @overload + def max( + self, + /, + axis: _ShapeLike | None = None, + *, + out: _ArrayT, + keepdims: builtins.bool = False, + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = True, + ) -> _ArrayT: ... + + @overload + def min( + self, + /, + axis: _ShapeLike | None = None, + out: None = None, + keepdims: builtins.bool = False, + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = True, + ) -> Any: ... + @overload + def min( + self, + /, + axis: _ShapeLike | None, + out: _ArrayT, + keepdims: builtins.bool = False, + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = True, + ) -> _ArrayT: ... + @overload + def min( + self, + /, + axis: _ShapeLike | None = None, + *, + out: _ArrayT, + keepdims: builtins.bool = False, + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = True, + ) -> _ArrayT: ... + + @overload + def sum( + self, + /, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + keepdims: builtins.bool = False, + initial: _NumberLike_co = 0, + where: _ArrayLikeBool_co = True, + ) -> Any: ... + @overload + def sum( + self, + /, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: _ArrayT, + keepdims: builtins.bool = False, + initial: _NumberLike_co = 0, + where: _ArrayLikeBool_co = True, + ) -> _ArrayT: ... + @overload + def sum( + self, + /, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: _ArrayT, + keepdims: builtins.bool = False, + initial: _NumberLike_co = 0, + where: _ArrayLikeBool_co = True, + ) -> _ArrayT: ... + + @overload + def prod( + self, + /, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + keepdims: builtins.bool = False, + initial: _NumberLike_co = 1, + where: _ArrayLikeBool_co = True, + ) -> Any: ... + @overload + def prod( + self, + /, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: _ArrayT, + keepdims: builtins.bool = False, + initial: _NumberLike_co = 1, + where: _ArrayLikeBool_co = True, + ) -> _ArrayT: ... + @overload + def prod( + self, + /, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: _ArrayT, + keepdims: builtins.bool = False, + initial: _NumberLike_co = 1, + where: _ArrayLikeBool_co = True, + ) -> _ArrayT: ... + + @overload + def mean( + self, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + keepdims: builtins.bool = False, + *, + where: _ArrayLikeBool_co = True, + ) -> Any: ... + @overload + def mean( + self, + /, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: _ArrayT, + keepdims: builtins.bool = False, + *, + where: _ArrayLikeBool_co = True, + ) -> _ArrayT: ... + @overload + def mean( + self, + /, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: _ArrayT, + keepdims: builtins.bool = False, + where: _ArrayLikeBool_co = True, + ) -> _ArrayT: ... + + @overload + def std( + self, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + ddof: float = 0, + keepdims: builtins.bool = False, + *, + where: _ArrayLikeBool_co = True, + mean: _ArrayLikeNumber_co = ..., + correction: float = ..., + ) -> Any: ... + @overload + def std( + self, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: _ArrayT, + ddof: float = 0, + keepdims: builtins.bool = False, + *, + where: _ArrayLikeBool_co = True, + mean: _ArrayLikeNumber_co = ..., + correction: float = ..., + ) -> _ArrayT: ... + @overload + def std( + self, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: _ArrayT, + ddof: float = 0, + keepdims: builtins.bool = False, + where: _ArrayLikeBool_co = True, + mean: _ArrayLikeNumber_co = ..., + correction: float = ..., + ) -> _ArrayT: ... + + @overload + def var( + self, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + ddof: float = 0, + keepdims: builtins.bool = False, + *, + where: _ArrayLikeBool_co = True, + mean: _ArrayLikeNumber_co = ..., + correction: float = ..., + ) -> Any: ... + @overload + def var( + self, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: _ArrayT, + ddof: float = 0, + keepdims: builtins.bool = False, + *, + where: _ArrayLikeBool_co = True, + mean: _ArrayLikeNumber_co = ..., + correction: float = ..., + ) -> _ArrayT: ... + @overload + def var( + self, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: _ArrayT, + ddof: float = 0, + keepdims: builtins.bool = False, + where: _ArrayLikeBool_co = True, + mean: _ArrayLikeNumber_co = ..., + correction: float = ..., + ) -> _ArrayT: ... + +class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): + __hash__: ClassVar[None] # type: ignore[assignment] # pyright: ignore[reportIncompatibleMethodOverride] + @property + def base(self) -> NDArray[Any] | None: ... + @property + def ndim(self) -> int: ... + @property + def size(self) -> int: ... + @property + def real(self: _HasDTypeWithRealAndImag[_ScalarT, object], /) -> ndarray[_ShapeT_co, dtype[_ScalarT]]: ... + @real.setter + def real(self, value: ArrayLike, /) -> None: ... + @property + def imag(self: _HasDTypeWithRealAndImag[object, _ScalarT], /) -> ndarray[_ShapeT_co, dtype[_ScalarT]]: ... + @imag.setter + def imag(self, value: ArrayLike, /) -> None: ... + + def __new__( + cls, + shape: _ShapeLike, + dtype: DTypeLike = ..., + buffer: _SupportsBuffer | None = ..., + offset: SupportsIndex = ..., + strides: _ShapeLike | None = ..., + order: _OrderKACF = ..., + ) -> Self: ... + + if sys.version_info >= (3, 12): + def __buffer__(self, flags: int, /) -> memoryview: ... + + def __class_getitem__(cls, item: Any, /) -> GenericAlias: ... + + @overload + def __array__(self, dtype: None = None, /, *, copy: builtins.bool | None = None) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __array__(self, dtype: _DTypeT, /, *, copy: builtins.bool | None = None) -> ndarray[_ShapeT_co, _DTypeT]: ... + + def __array_ufunc__( + self, + ufunc: ufunc, + method: L["__call__", "reduce", "reduceat", "accumulate", "outer", "at"], + *inputs: Any, + **kwargs: Any, + ) -> Any: ... + + def __array_function__( + self, + func: Callable[..., Any], + types: Iterable[type], + args: Iterable[Any], + kwargs: Mapping[str, Any], + ) -> Any: ... + + # NOTE: In practice any object is accepted by `obj`, but as `__array_finalize__` + # is a pseudo-abstract method the type has been narrowed down in order to + # grant subclasses a bit more flexibility + def __array_finalize__(self, obj: NDArray[Any] | None, /) -> None: ... + + def __array_wrap__( + self, + array: ndarray[_ShapeT, _DTypeT], + context: tuple[ufunc, tuple[Any, ...], int] | None = ..., + return_scalar: builtins.bool = ..., + /, + ) -> ndarray[_ShapeT, _DTypeT]: ... + + @overload + def __getitem__(self, key: _ArrayInt_co | tuple[_ArrayInt_co, ...], /) -> ndarray[_AnyShape, _DTypeT_co]: ... + @overload + def __getitem__(self, key: SupportsIndex | tuple[SupportsIndex, ...], /) -> Any: ... + @overload + def __getitem__(self, key: _ToIndices, /) -> ndarray[_AnyShape, _DTypeT_co]: ... + @overload + def __getitem__(self: NDArray[void], key: str, /) -> ndarray[_ShapeT_co, np.dtype]: ... + @overload + def __getitem__(self: NDArray[void], key: list[str], /) -> ndarray[_ShapeT_co, _dtype[void]]: ... + + @overload # flexible | object_ | bool + def __setitem__( + self: ndarray[Any, dtype[flexible | object_ | np.bool] | dtypes.StringDType], + key: _ToIndices, + value: object, + /, + ) -> None: ... + @overload # integer + def __setitem__( + self: NDArray[integer], + key: _ToIndices, + value: _ConvertibleToInt | _NestedSequence[_ConvertibleToInt] | _ArrayLikeInt_co, + /, + ) -> None: ... + @overload # floating + def __setitem__( + self: NDArray[floating], + key: _ToIndices, + value: _ConvertibleToFloat | _NestedSequence[_ConvertibleToFloat | None] | _ArrayLikeFloat_co | None, + /, + ) -> None: ... + @overload # complexfloating + def __setitem__( + self: NDArray[complexfloating], + key: _ToIndices, + value: _ConvertibleToComplex | _NestedSequence[_ConvertibleToComplex | None] | _ArrayLikeNumber_co | None, + /, + ) -> None: ... + @overload # timedelta64 + def __setitem__( + self: NDArray[timedelta64], + key: _ToIndices, + value: _ConvertibleToTD64 | _NestedSequence[_ConvertibleToTD64], + /, + ) -> None: ... + @overload # datetime64 + def __setitem__( + self: NDArray[datetime64], + key: _ToIndices, + value: _ConvertibleToDT64 | _NestedSequence[_ConvertibleToDT64], + /, + ) -> None: ... + @overload # void + def __setitem__(self: NDArray[void], key: str | list[str], value: object, /) -> None: ... + @overload # catch-all + def __setitem__(self, key: _ToIndices, value: ArrayLike, /) -> None: ... + + @property + def ctypes(self) -> _ctypes[int]: ... + @property + def shape(self) -> _ShapeT_co: ... + @shape.setter + def shape(self, value: _ShapeLike) -> None: ... + @property + def strides(self) -> _Shape: ... + @strides.setter + def strides(self, value: _ShapeLike) -> None: ... + def byteswap(self, inplace: builtins.bool = ...) -> Self: ... + def fill(self, value: Any) -> None: ... + @property + def flat(self) -> flatiter[Self]: ... + + @overload # use the same output type as that of the underlying `generic` + def item(self: NDArray[generic[_T]], i0: SupportsIndex | tuple[SupportsIndex, ...] = ..., /, *args: SupportsIndex) -> _T: ... + @overload # special casing for `StringDType`, which has no scalar type + def item( + self: ndarray[Any, dtypes.StringDType], + arg0: SupportsIndex | tuple[SupportsIndex, ...] = ..., + /, + *args: SupportsIndex, + ) -> str: ... + + @overload # this first overload prevents mypy from over-eagerly selecting `tuple[()]` in case of `_AnyShape` + def tolist(self: ndarray[tuple[Never], dtype[generic[_T]]], /) -> Any: ... + @overload + def tolist(self: ndarray[tuple[()], dtype[generic[_T]]], /) -> _T: ... + @overload + def tolist(self: ndarray[tuple[int], dtype[generic[_T]]], /) -> list[_T]: ... + @overload + def tolist(self: ndarray[tuple[int, int], dtype[generic[_T]]], /) -> list[list[_T]]: ... + @overload + def tolist(self: ndarray[tuple[int, int, int], dtype[generic[_T]]], /) -> list[list[list[_T]]]: ... + @overload + def tolist(self, /) -> Any: ... + + @overload + def resize(self, new_shape: _ShapeLike, /, *, refcheck: builtins.bool = ...) -> None: ... + @overload + def resize(self, /, *new_shape: SupportsIndex, refcheck: builtins.bool = ...) -> None: ... + + def setflags(self, write: builtins.bool = ..., align: builtins.bool = ..., uic: builtins.bool = ...) -> None: ... + + def squeeze( + self, + axis: SupportsIndex | tuple[SupportsIndex, ...] | None = ..., + ) -> ndarray[_AnyShape, _DTypeT_co]: ... + + def swapaxes( + self, + axis1: SupportsIndex, + axis2: SupportsIndex, + ) -> ndarray[_AnyShape, _DTypeT_co]: ... + + @overload + def transpose(self, axes: _ShapeLike | None, /) -> Self: ... + @overload + def transpose(self, *axes: SupportsIndex) -> Self: ... + + @overload + def all( + self, + axis: None = None, + out: None = None, + keepdims: L[False, 0] = False, + *, + where: _ArrayLikeBool_co = True + ) -> np.bool: ... + @overload + def all( + self, + axis: int | tuple[int, ...] | None = None, + out: None = None, + keepdims: SupportsIndex = False, + *, + where: _ArrayLikeBool_co = True, + ) -> np.bool | NDArray[np.bool]: ... + @overload + def all( + self, + axis: int | tuple[int, ...] | None, + out: _ArrayT, + keepdims: SupportsIndex = False, + *, + where: _ArrayLikeBool_co = True, + ) -> _ArrayT: ... + @overload + def all( + self, + axis: int | tuple[int, ...] | None = None, + *, + out: _ArrayT, + keepdims: SupportsIndex = False, + where: _ArrayLikeBool_co = True, + ) -> _ArrayT: ... + + @overload + def any( + self, + axis: None = None, + out: None = None, + keepdims: L[False, 0] = False, + *, + where: _ArrayLikeBool_co = True + ) -> np.bool: ... + @overload + def any( + self, + axis: int | tuple[int, ...] | None = None, + out: None = None, + keepdims: SupportsIndex = False, + *, + where: _ArrayLikeBool_co = True, + ) -> np.bool | NDArray[np.bool]: ... + @overload + def any( + self, + axis: int | tuple[int, ...] | None, + out: _ArrayT, + keepdims: SupportsIndex = False, + *, + where: _ArrayLikeBool_co = True, + ) -> _ArrayT: ... + @overload + def any( + self, + axis: int | tuple[int, ...] | None = None, + *, + out: _ArrayT, + keepdims: SupportsIndex = False, + where: _ArrayLikeBool_co = True, + ) -> _ArrayT: ... + + # + @overload + def partition( + self, + /, + kth: _ArrayLikeInt, + axis: SupportsIndex = -1, + kind: _PartitionKind = "introselect", + order: None = None, + ) -> None: ... + @overload + def partition( + self: NDArray[void], + /, + kth: _ArrayLikeInt, + axis: SupportsIndex = -1, + kind: _PartitionKind = "introselect", + order: str | Sequence[str] | None = None, + ) -> None: ... + + # + @overload + def argpartition( + self, + /, + kth: _ArrayLikeInt, + axis: SupportsIndex | None = -1, + kind: _PartitionKind = "introselect", + order: None = None, + ) -> NDArray[intp]: ... + @overload + def argpartition( + self: NDArray[void], + /, + kth: _ArrayLikeInt, + axis: SupportsIndex | None = -1, + kind: _PartitionKind = "introselect", + order: str | Sequence[str] | None = None, + ) -> NDArray[intp]: ... + + # + def diagonal( + self, + offset: SupportsIndex = ..., + axis1: SupportsIndex = ..., + axis2: SupportsIndex = ..., + ) -> ndarray[_AnyShape, _DTypeT_co]: ... + + # 1D + 1D returns a scalar; + # all other with at least 1 non-0D array return an ndarray. + @overload + def dot(self, b: _ScalarLike_co, out: None = ...) -> NDArray[Any]: ... + @overload + def dot(self, b: ArrayLike, out: None = ...) -> Any: ... # type: ignore[misc] + @overload + def dot(self, b: ArrayLike, out: _ArrayT) -> _ArrayT: ... + + # `nonzero()` is deprecated for 0d arrays/generics + def nonzero(self) -> tuple[NDArray[intp], ...]: ... + + # `put` is technically available to `generic`, + # but is pointless as `generic`s are immutable + def put(self, /, indices: _ArrayLikeInt_co, values: ArrayLike, mode: _ModeKind = "raise") -> None: ... + + @overload + def searchsorted( # type: ignore[misc] + self, # >= 1D array + v: _ScalarLike_co, # 0D array-like + side: _SortSide = ..., + sorter: _ArrayLikeInt_co | None = ..., + ) -> intp: ... + @overload + def searchsorted( + self, # >= 1D array + v: ArrayLike, + side: _SortSide = ..., + sorter: _ArrayLikeInt_co | None = ..., + ) -> NDArray[intp]: ... + + def sort( + self, + axis: SupportsIndex = ..., + kind: _SortKind | None = ..., + order: str | Sequence[str] | None = ..., + *, + stable: bool | None = ..., + ) -> None: ... + + @overload + def trace( + self, # >= 2D array + offset: SupportsIndex = ..., + axis1: SupportsIndex = ..., + axis2: SupportsIndex = ..., + dtype: DTypeLike = ..., + out: None = ..., + ) -> Any: ... + @overload + def trace( + self, # >= 2D array + offset: SupportsIndex = ..., + axis1: SupportsIndex = ..., + axis2: SupportsIndex = ..., + dtype: DTypeLike = ..., + out: _ArrayT = ..., + ) -> _ArrayT: ... + + @overload + def take( # type: ignore[misc] + self: NDArray[_ScalarT], + indices: _IntLike_co, + axis: SupportsIndex | None = ..., + out: None = ..., + mode: _ModeKind = ..., + ) -> _ScalarT: ... + @overload + def take( # type: ignore[misc] + self, + indices: _ArrayLikeInt_co, + axis: SupportsIndex | None = ..., + out: None = ..., + mode: _ModeKind = ..., + ) -> ndarray[_AnyShape, _DTypeT_co]: ... + @overload + def take( + self, + indices: _ArrayLikeInt_co, + axis: SupportsIndex | None = ..., + out: _ArrayT = ..., + mode: _ModeKind = ..., + ) -> _ArrayT: ... + + @overload + def repeat( + self, + repeats: _ArrayLikeInt_co, + axis: None = None, + ) -> ndarray[tuple[int], _DTypeT_co]: ... + @overload + def repeat( + self, + repeats: _ArrayLikeInt_co, + axis: SupportsIndex, + ) -> ndarray[_AnyShape, _DTypeT_co]: ... + + def flatten(self, /, order: _OrderKACF = "C") -> ndarray[tuple[int], _DTypeT_co]: ... + def ravel(self, /, order: _OrderKACF = "C") -> ndarray[tuple[int], _DTypeT_co]: ... + + # NOTE: reshape also accepts negative integers, so we can't use integer literals + @overload # (None) + def reshape(self, shape: None, /, *, order: _OrderACF = "C", copy: builtins.bool | None = None) -> Self: ... + @overload # (empty_sequence) + def reshape( # type: ignore[overload-overlap] # mypy false positive + self, + shape: Sequence[Never], + /, + *, + order: _OrderACF = "C", + copy: builtins.bool | None = None, + ) -> ndarray[tuple[()], _DTypeT_co]: ... + @overload # (() | (int) | (int, int) | ....) # up to 8-d + def reshape( + self, + shape: _AnyShapeT, + /, + *, + order: _OrderACF = "C", + copy: builtins.bool | None = None, + ) -> ndarray[_AnyShapeT, _DTypeT_co]: ... + @overload # (index) + def reshape( + self, + size1: SupportsIndex, + /, + *, + order: _OrderACF = "C", + copy: builtins.bool | None = None, + ) -> ndarray[tuple[int], _DTypeT_co]: ... + @overload # (index, index) + def reshape( + self, + size1: SupportsIndex, + size2: SupportsIndex, + /, + *, + order: _OrderACF = "C", + copy: builtins.bool | None = None, + ) -> ndarray[tuple[int, int], _DTypeT_co]: ... + @overload # (index, index, index) + def reshape( + self, + size1: SupportsIndex, + size2: SupportsIndex, + size3: SupportsIndex, + /, + *, + order: _OrderACF = "C", + copy: builtins.bool | None = None, + ) -> ndarray[tuple[int, int, int], _DTypeT_co]: ... + @overload # (index, index, index, index) + def reshape( + self, + size1: SupportsIndex, + size2: SupportsIndex, + size3: SupportsIndex, + size4: SupportsIndex, + /, + *, + order: _OrderACF = "C", + copy: builtins.bool | None = None, + ) -> ndarray[tuple[int, int, int, int], _DTypeT_co]: ... + @overload # (int, *(index, ...)) + def reshape( + self, + size0: SupportsIndex, + /, + *shape: SupportsIndex, + order: _OrderACF = "C", + copy: builtins.bool | None = None, + ) -> ndarray[_AnyShape, _DTypeT_co]: ... + @overload # (sequence[index]) + def reshape( + self, + shape: Sequence[SupportsIndex], + /, + *, + order: _OrderACF = "C", + copy: builtins.bool | None = None, + ) -> ndarray[_AnyShape, _DTypeT_co]: ... + + @overload + def astype( + self, + dtype: _DTypeLike[_ScalarT], + order: _OrderKACF = ..., + casting: _CastingKind = ..., + subok: builtins.bool = ..., + copy: builtins.bool | _CopyMode = ..., + ) -> ndarray[_ShapeT_co, dtype[_ScalarT]]: ... + @overload + def astype( + self, + dtype: DTypeLike, + order: _OrderKACF = ..., + casting: _CastingKind = ..., + subok: builtins.bool = ..., + copy: builtins.bool | _CopyMode = ..., + ) -> ndarray[_ShapeT_co, dtype]: ... + + # + @overload # () + def view(self, /) -> Self: ... + @overload # (dtype: T) + def view(self, /, dtype: _DTypeT | _HasDType[_DTypeT]) -> ndarray[_ShapeT_co, _DTypeT]: ... + @overload # (dtype: dtype[T]) + def view(self, /, dtype: _DTypeLike[_ScalarT]) -> NDArray[_ScalarT]: ... + @overload # (type: T) + def view(self, /, *, type: type[_ArrayT]) -> _ArrayT: ... + @overload # (_: T) + def view(self, /, dtype: type[_ArrayT]) -> _ArrayT: ... + @overload # (dtype: ?) + def view(self, /, dtype: DTypeLike) -> ndarray[_ShapeT_co, dtype]: ... + @overload # (dtype: ?, type: type[T]) + def view(self, /, dtype: DTypeLike, type: type[_ArrayT]) -> _ArrayT: ... + + def setfield(self, /, val: ArrayLike, dtype: DTypeLike, offset: SupportsIndex = 0) -> None: ... + @overload + def getfield(self, dtype: _DTypeLike[_ScalarT], offset: SupportsIndex = 0) -> NDArray[_ScalarT]: ... + @overload + def getfield(self, dtype: DTypeLike, offset: SupportsIndex = 0) -> NDArray[Any]: ... + + def __index__(self: NDArray[integer], /) -> int: ... + def __complex__(self: NDArray[number | np.bool | object_], /) -> complex: ... + + def __len__(self) -> int: ... + def __contains__(self, value: object, /) -> builtins.bool: ... + + # NOTE: This weird `Never` tuple works around a strange mypy issue where it assigns + # `tuple[int]` to `tuple[Never]` or `tuple[int, int]` to `tuple[Never, Never]`. + # This way the bug only occurs for 9-D arrays, which are probably not very common. + @overload + def __iter__(self: ndarray[tuple[Never, Never, Never, Never, Never, Never, Never, Never, Never]], /) -> Iterator[Any]: ... + @overload # == 1-d & dtype[T \ object_] + def __iter__(self: ndarray[tuple[int], dtype[_NonObjectScalarT]], /) -> Iterator[_NonObjectScalarT]: ... + @overload # >= 2-d + def __iter__(self: ndarray[tuple[int, int, *tuple[int, ...]], dtype[_ScalarT]], /) -> Iterator[NDArray[_ScalarT]]: ... + @overload # ?-d + def __iter__(self, /) -> Iterator[Any]: ... + + # + @overload + def __lt__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co, /) -> NDArray[np.bool]: ... + @overload + def __lt__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[np.bool]: ... + @overload + def __lt__(self: NDArray[datetime64], other: _ArrayLikeDT64_co, /) -> NDArray[np.bool]: ... + @overload + def __lt__(self: NDArray[bytes_], other: _ArrayLikeBytes_co, /) -> NDArray[np.bool]: ... + @overload + def __lt__( + self: ndarray[Any, dtype[str_] | dtypes.StringDType], other: _ArrayLikeStr_co | _ArrayLikeString_co, / + ) -> NDArray[np.bool]: ... + @overload + def __lt__(self: NDArray[object_], other: object, /) -> NDArray[np.bool]: ... + @overload + def __lt__(self, other: _ArrayLikeObject_co, /) -> NDArray[np.bool]: ... + + # + @overload + def __le__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co, /) -> NDArray[np.bool]: ... + @overload + def __le__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[np.bool]: ... + @overload + def __le__(self: NDArray[datetime64], other: _ArrayLikeDT64_co, /) -> NDArray[np.bool]: ... + @overload + def __le__(self: NDArray[bytes_], other: _ArrayLikeBytes_co, /) -> NDArray[np.bool]: ... + @overload + def __le__( + self: ndarray[Any, dtype[str_] | dtypes.StringDType], other: _ArrayLikeStr_co | _ArrayLikeString_co, / + ) -> NDArray[np.bool]: ... + @overload + def __le__(self: NDArray[object_], other: object, /) -> NDArray[np.bool]: ... + @overload + def __le__(self, other: _ArrayLikeObject_co, /) -> NDArray[np.bool]: ... + + # + @overload + def __gt__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co, /) -> NDArray[np.bool]: ... + @overload + def __gt__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[np.bool]: ... + @overload + def __gt__(self: NDArray[datetime64], other: _ArrayLikeDT64_co, /) -> NDArray[np.bool]: ... + @overload + def __gt__(self: NDArray[bytes_], other: _ArrayLikeBytes_co, /) -> NDArray[np.bool]: ... + @overload + def __gt__( + self: ndarray[Any, dtype[str_] | dtypes.StringDType], other: _ArrayLikeStr_co | _ArrayLikeString_co, / + ) -> NDArray[np.bool]: ... + @overload + def __gt__(self: NDArray[object_], other: object, /) -> NDArray[np.bool]: ... + @overload + def __gt__(self, other: _ArrayLikeObject_co, /) -> NDArray[np.bool]: ... + + # + @overload + def __ge__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co, /) -> NDArray[np.bool]: ... + @overload + def __ge__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[np.bool]: ... + @overload + def __ge__(self: NDArray[datetime64], other: _ArrayLikeDT64_co, /) -> NDArray[np.bool]: ... + @overload + def __ge__(self: NDArray[bytes_], other: _ArrayLikeBytes_co, /) -> NDArray[np.bool]: ... + @overload + def __ge__( + self: ndarray[Any, dtype[str_] | dtypes.StringDType], other: _ArrayLikeStr_co | _ArrayLikeString_co, / + ) -> NDArray[np.bool]: ... + @overload + def __ge__(self: NDArray[object_], other: object, /) -> NDArray[np.bool]: ... + @overload + def __ge__(self, other: _ArrayLikeObject_co, /) -> NDArray[np.bool]: ... + + # Unary ops + + # TODO: Uncomment once https://github.com/python/mypy/issues/14070 is fixed + # @overload + # def __abs__(self: ndarray[_ShapeT, dtypes.Complex64DType], /) -> ndarray[_ShapeT, dtypes.Float32DType]: ... + # @overload + # def __abs__(self: ndarray[_ShapeT, dtypes.Complex128DType], /) -> ndarray[_ShapeT, dtypes.Float64DType]: ... + # @overload + # def __abs__(self: ndarray[_ShapeT, dtypes.CLongDoubleDType], /) -> ndarray[_ShapeT, dtypes.LongDoubleDType]: ... + # @overload + # def __abs__(self: ndarray[_ShapeT, dtype[complex128]], /) -> ndarray[_ShapeT, dtype[float64]]: ... + @overload + def __abs__(self: ndarray[_ShapeT, dtype[complexfloating[_NBit]]], /) -> ndarray[_ShapeT, dtype[floating[_NBit]]]: ... + @overload + def __abs__(self: _RealArrayT, /) -> _RealArrayT: ... + + def __invert__(self: _IntegralArrayT, /) -> _IntegralArrayT: ... # noqa: PYI019 + def __neg__(self: _NumericArrayT, /) -> _NumericArrayT: ... # noqa: PYI019 + def __pos__(self: _NumericArrayT, /) -> _NumericArrayT: ... # noqa: PYI019 + + # Binary ops + + # TODO: Support the "1d @ 1d -> scalar" case + @overload + def __matmul__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... + @overload + def __matmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[overload-overlap] + @overload + def __matmul__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __matmul__(self: NDArray[floating[_64Bit]], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + @overload + def __matmul__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... + @overload + def __matmul__(self: NDArray[complexfloating[_64Bit]], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... + @overload + def __matmul__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... + @overload + def __matmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __matmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __matmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __matmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... + @overload + def __matmul__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... + @overload + def __matmul__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __matmul__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + @overload # signature equivalent to __matmul__ + def __rmatmul__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... + @overload + def __rmatmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[overload-overlap] + @overload + def __rmatmul__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __rmatmul__(self: NDArray[floating[_64Bit]], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + @overload + def __rmatmul__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... + @overload + def __rmatmul__(self: NDArray[complexfloating[_64Bit]], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... + @overload + def __rmatmul__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... + @overload + def __rmatmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rmatmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rmatmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __rmatmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... + @overload + def __rmatmul__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... + @overload + def __rmatmul__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __rmatmul__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + @overload + def __mod__(self: NDArray[_RealNumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_RealNumberT]]: ... + @overload + def __mod__(self: NDArray[_RealNumberT], other: _ArrayLikeBool_co, /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] + @overload + def __mod__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[overload-overlap] + @overload + def __mod__(self: NDArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] + @overload + def __mod__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + @overload + def __mod__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... + @overload + def __mod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __mod__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __mod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... + @overload + def __mod__(self: NDArray[timedelta64], other: _ArrayLike[timedelta64], /) -> NDArray[timedelta64]: ... + @overload + def __mod__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __mod__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + @overload # signature equivalent to __mod__ + def __rmod__(self: NDArray[_RealNumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_RealNumberT]]: ... + @overload + def __rmod__(self: NDArray[_RealNumberT], other: _ArrayLikeBool_co, /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] + @overload + def __rmod__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[overload-overlap] + @overload + def __rmod__(self: NDArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] + @overload + def __rmod__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + @overload + def __rmod__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... + @overload + def __rmod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rmod__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rmod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... + @overload + def __rmod__(self: NDArray[timedelta64], other: _ArrayLike[timedelta64], /) -> NDArray[timedelta64]: ... + @overload + def __rmod__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __rmod__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + @overload + def __divmod__(self: NDArray[_RealNumberT], rhs: int | np.bool, /) -> _2Tuple[ndarray[_ShapeT_co, dtype[_RealNumberT]]]: ... + @overload + def __divmod__(self: NDArray[_RealNumberT], rhs: _ArrayLikeBool_co, /) -> _2Tuple[NDArray[_RealNumberT]]: ... # type: ignore[overload-overlap] + @overload + def __divmod__(self: NDArray[np.bool], rhs: _ArrayLikeBool_co, /) -> _2Tuple[NDArray[int8]]: ... # type: ignore[overload-overlap] + @overload + def __divmod__(self: NDArray[np.bool], rhs: _ArrayLike[_RealNumberT], /) -> _2Tuple[NDArray[_RealNumberT]]: ... # type: ignore[overload-overlap] + @overload + def __divmod__(self: NDArray[float64], rhs: _ArrayLikeFloat64_co, /) -> _2Tuple[NDArray[float64]]: ... + @overload + def __divmod__(self: _ArrayFloat64_co, rhs: _ArrayLike[floating[_64Bit]], /) -> _2Tuple[NDArray[float64]]: ... + @overload + def __divmod__(self: _ArrayUInt_co, rhs: _ArrayLikeUInt_co, /) -> _2Tuple[NDArray[unsignedinteger]]: ... # type: ignore[overload-overlap] + @overload + def __divmod__(self: _ArrayInt_co, rhs: _ArrayLikeInt_co, /) -> _2Tuple[NDArray[signedinteger]]: ... # type: ignore[overload-overlap] + @overload + def __divmod__(self: _ArrayFloat_co, rhs: _ArrayLikeFloat_co, /) -> _2Tuple[NDArray[floating]]: ... + @overload + def __divmod__(self: NDArray[timedelta64], rhs: _ArrayLike[timedelta64], /) -> tuple[NDArray[int64], NDArray[timedelta64]]: ... + + @overload # signature equivalent to __divmod__ + def __rdivmod__(self: NDArray[_RealNumberT], lhs: int | np.bool, /) -> _2Tuple[ndarray[_ShapeT_co, dtype[_RealNumberT]]]: ... + @overload + def __rdivmod__(self: NDArray[_RealNumberT], lhs: _ArrayLikeBool_co, /) -> _2Tuple[NDArray[_RealNumberT]]: ... # type: ignore[overload-overlap] + @overload + def __rdivmod__(self: NDArray[np.bool], lhs: _ArrayLikeBool_co, /) -> _2Tuple[NDArray[int8]]: ... # type: ignore[overload-overlap] + @overload + def __rdivmod__(self: NDArray[np.bool], lhs: _ArrayLike[_RealNumberT], /) -> _2Tuple[NDArray[_RealNumberT]]: ... # type: ignore[overload-overlap] + @overload + def __rdivmod__(self: NDArray[float64], lhs: _ArrayLikeFloat64_co, /) -> _2Tuple[NDArray[float64]]: ... + @overload + def __rdivmod__(self: _ArrayFloat64_co, lhs: _ArrayLike[floating[_64Bit]], /) -> _2Tuple[NDArray[float64]]: ... + @overload + def __rdivmod__(self: _ArrayUInt_co, lhs: _ArrayLikeUInt_co, /) -> _2Tuple[NDArray[unsignedinteger]]: ... # type: ignore[overload-overlap] + @overload + def __rdivmod__(self: _ArrayInt_co, lhs: _ArrayLikeInt_co, /) -> _2Tuple[NDArray[signedinteger]]: ... # type: ignore[overload-overlap] + @overload + def __rdivmod__(self: _ArrayFloat_co, lhs: _ArrayLikeFloat_co, /) -> _2Tuple[NDArray[floating]]: ... + @overload + def __rdivmod__(self: NDArray[timedelta64], lhs: _ArrayLike[timedelta64], /) -> tuple[NDArray[int64], NDArray[timedelta64]]: ... + + @overload + def __add__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... + @overload + def __add__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + @overload + def __add__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... + @overload + def __add__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... + @overload + def __add__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... + @overload + def __add__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... + @overload + def __add__(self: _ArrayTD64_co, other: _ArrayLikeDT64_co, /) -> NDArray[datetime64]: ... + @overload + def __add__(self: NDArray[datetime64], other: _ArrayLikeTD64_co, /) -> NDArray[datetime64]: ... + @overload + def __add__(self: NDArray[bytes_], other: _ArrayLikeBytes_co, /) -> NDArray[bytes_]: ... + @overload + def __add__(self: NDArray[str_], other: _ArrayLikeStr_co, /) -> NDArray[str_]: ... + @overload + def __add__( + self: ndarray[Any, dtypes.StringDType], + other: _ArrayLikeStr_co | _ArrayLikeString_co, + /, + ) -> ndarray[tuple[Any, ...], dtypes.StringDType]: ... + @overload + def __add__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __add__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + @overload # signature equivalent to __add__ + def __radd__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... + @overload + def __radd__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __radd__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[overload-overlap] + @overload + def __radd__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __radd__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + @overload + def __radd__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... + @overload + def __radd__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... + @overload + def __radd__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... + @overload + def __radd__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __radd__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __radd__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __radd__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... # type: ignore[overload-overlap] + @overload + def __radd__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... # type: ignore[overload-overlap] + @overload + def __radd__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... + @overload + def __radd__(self: _ArrayTD64_co, other: _ArrayLikeDT64_co, /) -> NDArray[datetime64]: ... + @overload + def __radd__(self: NDArray[datetime64], other: _ArrayLikeTD64_co, /) -> NDArray[datetime64]: ... + @overload + def __radd__(self: NDArray[bytes_], other: _ArrayLikeBytes_co, /) -> NDArray[bytes_]: ... + @overload + def __radd__(self: NDArray[str_], other: _ArrayLikeStr_co, /) -> NDArray[str_]: ... + @overload + def __radd__( + self: ndarray[Any, dtypes.StringDType], + other: _ArrayLikeStr_co | _ArrayLikeString_co, + /, + ) -> ndarray[tuple[Any, ...], dtypes.StringDType]: ... + @overload + def __radd__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __radd__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + @overload + def __sub__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... + @overload + def __sub__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __sub__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NoReturn: ... + @overload + def __sub__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __sub__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + @overload + def __sub__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... + @overload + def __sub__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... + @overload + def __sub__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... + @overload + def __sub__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __sub__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __sub__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __sub__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... # type: ignore[overload-overlap] + @overload + def __sub__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... # type: ignore[overload-overlap] + @overload + def __sub__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... + @overload + def __sub__(self: NDArray[datetime64], other: _ArrayLikeTD64_co, /) -> NDArray[datetime64]: ... + @overload + def __sub__(self: NDArray[datetime64], other: _ArrayLikeDT64_co, /) -> NDArray[timedelta64]: ... + @overload + def __sub__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __sub__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + @overload + def __rsub__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... + @overload + def __rsub__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __rsub__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NoReturn: ... + @overload + def __rsub__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __rsub__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + @overload + def __rsub__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... + @overload + def __rsub__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... + @overload + def __rsub__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... + @overload + def __rsub__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rsub__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rsub__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __rsub__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... # type: ignore[overload-overlap] + @overload + def __rsub__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... # type: ignore[overload-overlap] + @overload + def __rsub__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... + @overload + def __rsub__(self: _ArrayTD64_co, other: _ArrayLikeDT64_co, /) -> NDArray[datetime64]: ... + @overload + def __rsub__(self: NDArray[datetime64], other: _ArrayLikeDT64_co, /) -> NDArray[timedelta64]: ... + @overload + def __rsub__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __rsub__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + @overload + def __mul__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... + @overload + def __mul__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __mul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[overload-overlap] + @overload + def __mul__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __mul__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + @overload + def __mul__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... + @overload + def __mul__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... + @overload + def __mul__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... + @overload + def __mul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __mul__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __mul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __mul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... # type: ignore[overload-overlap] + @overload + def __mul__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... + @overload + def __mul__(self: NDArray[timedelta64], other: _ArrayLikeFloat_co, /) -> NDArray[timedelta64]: ... + @overload + def __mul__(self: _ArrayFloat_co, other: _ArrayLike[timedelta64], /) -> NDArray[timedelta64]: ... + @overload + def __mul__( + self: ndarray[Any, dtype[character] | dtypes.StringDType], + other: _ArrayLikeInt, + /, + ) -> ndarray[tuple[Any, ...], _DTypeT_co]: ... + @overload + def __mul__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __mul__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + @overload # signature equivalent to __mul__ + def __rmul__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... + @overload + def __rmul__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __rmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[overload-overlap] + @overload + def __rmul__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __rmul__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + @overload + def __rmul__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... + @overload + def __rmul__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... + @overload + def __rmul__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... + @overload + def __rmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __rmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... # type: ignore[overload-overlap] + @overload + def __rmul__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... + @overload + def __rmul__(self: NDArray[timedelta64], other: _ArrayLikeFloat_co, /) -> NDArray[timedelta64]: ... + @overload + def __rmul__(self: _ArrayFloat_co, other: _ArrayLike[timedelta64], /) -> NDArray[timedelta64]: ... + @overload + def __rmul__( + self: ndarray[Any, dtype[character] | dtypes.StringDType], + other: _ArrayLikeInt, + /, + ) -> ndarray[tuple[Any, ...], _DTypeT_co]: ... + @overload + def __rmul__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __rmul__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + @overload + def __truediv__(self: _ArrayInt_co | NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + @overload + def __truediv__(self: _ArrayFloat64_co, other: _ArrayLikeInt_co | _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... + @overload + def __truediv__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... + @overload + def __truediv__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... + @overload + def __truediv__(self: NDArray[floating], other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... + @overload + def __truediv__(self: _ArrayFloat_co, other: _ArrayLike[floating], /) -> NDArray[floating]: ... + @overload + def __truediv__(self: NDArray[complexfloating], other: _ArrayLikeNumber_co, /) -> NDArray[complexfloating]: ... + @overload + def __truediv__(self: _ArrayNumber_co, other: _ArrayLike[complexfloating], /) -> NDArray[complexfloating]: ... + @overload + def __truediv__(self: NDArray[inexact], other: _ArrayLikeNumber_co, /) -> NDArray[inexact]: ... + @overload + def __truediv__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... + @overload + def __truediv__(self: NDArray[timedelta64], other: _ArrayLike[timedelta64], /) -> NDArray[float64]: ... + @overload + def __truediv__(self: NDArray[timedelta64], other: _ArrayLikeBool_co, /) -> NoReturn: ... + @overload + def __truediv__(self: NDArray[timedelta64], other: _ArrayLikeFloat_co, /) -> NDArray[timedelta64]: ... + @overload + def __truediv__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __truediv__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + @overload + def __rtruediv__(self: _ArrayInt_co | NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + @overload + def __rtruediv__(self: _ArrayFloat64_co, other: _ArrayLikeInt_co | _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... + @overload + def __rtruediv__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... + @overload + def __rtruediv__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... + @overload + def __rtruediv__(self: NDArray[floating], other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... + @overload + def __rtruediv__(self: _ArrayFloat_co, other: _ArrayLike[floating], /) -> NDArray[floating]: ... + @overload + def __rtruediv__(self: NDArray[complexfloating], other: _ArrayLikeNumber_co, /) -> NDArray[complexfloating]: ... + @overload + def __rtruediv__(self: _ArrayNumber_co, other: _ArrayLike[complexfloating], /) -> NDArray[complexfloating]: ... + @overload + def __rtruediv__(self: NDArray[inexact], other: _ArrayLikeNumber_co, /) -> NDArray[inexact]: ... + @overload + def __rtruediv__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... + @overload + def __rtruediv__(self: NDArray[timedelta64], other: _ArrayLike[timedelta64], /) -> NDArray[float64]: ... + @overload + def __rtruediv__(self: NDArray[integer | floating], other: _ArrayLike[timedelta64], /) -> NDArray[timedelta64]: ... + @overload + def __rtruediv__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __rtruediv__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + @overload + def __floordiv__(self: NDArray[_RealNumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_RealNumberT]]: ... + @overload + def __floordiv__(self: NDArray[_RealNumberT], other: _ArrayLikeBool_co, /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] + @overload + def __floordiv__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[overload-overlap] + @overload + def __floordiv__(self: NDArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] + @overload + def __floordiv__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + @overload + def __floordiv__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... + @overload + def __floordiv__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __floordiv__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __floordiv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... + @overload + def __floordiv__(self: NDArray[timedelta64], other: _ArrayLike[timedelta64], /) -> NDArray[int64]: ... + @overload + def __floordiv__(self: NDArray[timedelta64], other: _ArrayLikeBool_co, /) -> NoReturn: ... + @overload + def __floordiv__(self: NDArray[timedelta64], other: _ArrayLikeFloat_co, /) -> NDArray[timedelta64]: ... + @overload + def __floordiv__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __floordiv__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + @overload + def __rfloordiv__(self: NDArray[_RealNumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_RealNumberT]]: ... + @overload + def __rfloordiv__(self: NDArray[_RealNumberT], other: _ArrayLikeBool_co, /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] + @overload + def __rfloordiv__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[overload-overlap] + @overload + def __rfloordiv__(self: NDArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] + @overload + def __rfloordiv__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + @overload + def __rfloordiv__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... + @overload + def __rfloordiv__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rfloordiv__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rfloordiv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __rfloordiv__(self: NDArray[timedelta64], other: _ArrayLike[timedelta64], /) -> NDArray[int64]: ... + @overload + def __rfloordiv__(self: NDArray[floating | integer], other: _ArrayLike[timedelta64], /) -> NDArray[timedelta64]: ... + @overload + def __rfloordiv__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __rfloordiv__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + @overload + def __pow__(self: NDArray[_NumberT], other: int | np.bool, mod: None = None, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... + @overload + def __pow__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, mod: None = None, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __pow__(self: NDArray[np.bool], other: _ArrayLikeBool_co, mod: None = None, /) -> NDArray[int8]: ... # type: ignore[overload-overlap] + @overload + def __pow__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], mod: None = None, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __pow__(self: NDArray[float64], other: _ArrayLikeFloat64_co, mod: None = None, /) -> NDArray[float64]: ... + @overload + def __pow__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], mod: None = None, /) -> NDArray[float64]: ... + @overload + def __pow__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, mod: None = None, /) -> NDArray[complex128]: ... + @overload + def __pow__( + self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], mod: None = None, / + ) -> NDArray[complex128]: ... + @overload + def __pow__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, mod: None = None, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __pow__(self: _ArrayInt_co, other: _ArrayLikeInt_co, mod: None = None, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __pow__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, mod: None = None, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __pow__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, mod: None = None, /) -> NDArray[complexfloating]: ... + @overload + def __pow__(self: NDArray[number], other: _ArrayLikeNumber_co, mod: None = None, /) -> NDArray[number]: ... + @overload + def __pow__(self: NDArray[object_], other: Any, mod: None = None, /) -> Any: ... + @overload + def __pow__(self: NDArray[Any], other: _ArrayLikeObject_co, mod: None = None, /) -> Any: ... + + @overload + def __rpow__(self: NDArray[_NumberT], other: int | np.bool, mod: None = None, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... + @overload + def __rpow__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, mod: None = None, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __rpow__(self: NDArray[np.bool], other: _ArrayLikeBool_co, mod: None = None, /) -> NDArray[int8]: ... # type: ignore[overload-overlap] + @overload + def __rpow__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], mod: None = None, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __rpow__(self: NDArray[float64], other: _ArrayLikeFloat64_co, mod: None = None, /) -> NDArray[float64]: ... + @overload + def __rpow__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], mod: None = None, /) -> NDArray[float64]: ... + @overload + def __rpow__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, mod: None = None, /) -> NDArray[complex128]: ... + @overload + def __rpow__( + self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], mod: None = None, / + ) -> NDArray[complex128]: ... + @overload + def __rpow__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, mod: None = None, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rpow__(self: _ArrayInt_co, other: _ArrayLikeInt_co, mod: None = None, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rpow__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, mod: None = None, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __rpow__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, mod: None = None, /) -> NDArray[complexfloating]: ... + @overload + def __rpow__(self: NDArray[number], other: _ArrayLikeNumber_co, mod: None = None, /) -> NDArray[number]: ... + @overload + def __rpow__(self: NDArray[object_], other: Any, mod: None = None, /) -> Any: ... + @overload + def __rpow__(self: NDArray[Any], other: _ArrayLikeObject_co, mod: None = None, /) -> Any: ... + + @overload + def __lshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[misc] + @overload + def __lshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[misc] + @overload + def __lshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... + @overload + def __lshift__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __lshift__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + @overload + def __rlshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[misc] + @overload + def __rlshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[misc] + @overload + def __rlshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... + @overload + def __rlshift__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __rlshift__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + @overload + def __rshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[misc] + @overload + def __rshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[misc] + @overload + def __rshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... + @overload + def __rshift__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __rshift__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + @overload + def __rrshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[misc] + @overload + def __rrshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[misc] + @overload + def __rrshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... + @overload + def __rrshift__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __rrshift__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + @overload + def __and__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[misc] + @overload + def __and__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[misc] + @overload + def __and__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... + @overload + def __and__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __and__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + @overload + def __rand__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[misc] + @overload + def __rand__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[misc] + @overload + def __rand__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... + @overload + def __rand__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __rand__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + @overload + def __xor__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[misc] + @overload + def __xor__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[misc] + @overload + def __xor__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... + @overload + def __xor__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __xor__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + @overload + def __rxor__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[misc] + @overload + def __rxor__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[misc] + @overload + def __rxor__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... + @overload + def __rxor__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __rxor__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + @overload + def __or__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[misc] + @overload + def __or__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[misc] + @overload + def __or__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... + @overload + def __or__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __or__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + @overload + def __ror__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[misc] + @overload + def __ror__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[misc] + @overload + def __ror__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... + @overload + def __ror__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __ror__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # `np.generic` does not support inplace operations + + # NOTE: Inplace ops generally use "same_kind" casting w.r.t. to the left + # operand. An exception to this rule are unsigned integers though, which + # also accepts a signed integer for the right operand as long it is a 0D + # object and its value is >= 0 + # NOTE: Due to a mypy bug, overloading on e.g. `self: NDArray[SCT_floating]` won't + # work, as this will lead to `false negatives` when using these inplace ops. + @overload + def __iadd__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __iadd__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __iadd__(self: NDArray[floating], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __iadd__(self: NDArray[complexfloating], other: _ArrayLikeComplex_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __iadd__(self: NDArray[timedelta64 | datetime64], other: _ArrayLikeTD64_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __iadd__(self: NDArray[bytes_], other: _ArrayLikeBytes_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __iadd__( + self: ndarray[Any, dtype[str_] | dtypes.StringDType], + other: _ArrayLikeStr_co | _ArrayLikeString_co, + /, + ) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __iadd__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + + # + @overload + def __isub__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __isub__(self: NDArray[floating], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __isub__(self: NDArray[complexfloating], other: _ArrayLikeComplex_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __isub__(self: NDArray[timedelta64 | datetime64], other: _ArrayLikeTD64_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __isub__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + + # + @overload + def __imul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __imul__( + self: ndarray[Any, dtype[integer | character] | dtypes.StringDType], other: _ArrayLikeInt_co, / + ) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __imul__(self: NDArray[floating | timedelta64], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __imul__(self: NDArray[complexfloating], other: _ArrayLikeComplex_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __imul__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + + @overload + def __ipow__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __ipow__(self: NDArray[floating], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __ipow__(self: NDArray[complexfloating], other: _ArrayLikeComplex_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __ipow__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + + # + @overload + def __itruediv__(self: NDArray[floating | timedelta64], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __itruediv__(self: NDArray[complexfloating], other: _ArrayLikeComplex_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __itruediv__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + + # keep in sync with `__imod__` + @overload + def __ifloordiv__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __ifloordiv__(self: NDArray[floating | timedelta64], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __ifloordiv__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + + # keep in sync with `__ifloordiv__` + @overload + def __imod__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __imod__(self: NDArray[floating], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __imod__( + self: NDArray[timedelta64], + other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]], + /, + ) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __imod__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + + # keep in sync with `__irshift__` + @overload + def __ilshift__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __ilshift__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + + # keep in sync with `__ilshift__` + @overload + def __irshift__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __irshift__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + + # keep in sync with `__ixor__` and `__ior__` + @overload + def __iand__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __iand__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __iand__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + + # keep in sync with `__iand__` and `__ior__` + @overload + def __ixor__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __ixor__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __ixor__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + + # keep in sync with `__iand__` and `__ixor__` + @overload + def __ior__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __ior__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __ior__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + + # + @overload + def __imatmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __imatmul__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __imatmul__(self: NDArray[floating], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __imatmul__(self: NDArray[complexfloating], other: _ArrayLikeComplex_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __imatmul__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + + # + def __dlpack__( + self: NDArray[number], + /, + *, + stream: int | Any | None = None, + max_version: tuple[int, int] | None = None, + dl_device: tuple[int, int] | None = None, + copy: builtins.bool | None = None, + ) -> CapsuleType: ... + def __dlpack_device__(self, /) -> tuple[L[1], L[0]]: ... + + # Keep `dtype` at the bottom to avoid name conflicts with `np.dtype` + @property + def dtype(self) -> _DTypeT_co: ... + +# NOTE: while `np.generic` is not technically an instance of `ABCMeta`, +# the `@abstractmethod` decorator is herein used to (forcefully) deny +# the creation of `np.generic` instances. +# The `# type: ignore` comments are necessary to silence mypy errors regarding +# the missing `ABCMeta` metaclass. +# See https://github.com/numpy/numpy-stubs/pull/80 for more details. +class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): + @abstractmethod + def __init__(self, *args: Any, **kwargs: Any) -> None: ... + def __hash__(self) -> int: ... + @overload + def __array__(self, dtype: None = None, /) -> ndarray[tuple[()], dtype[Self]]: ... + @overload + def __array__(self, dtype: _DTypeT, /) -> ndarray[tuple[()], _DTypeT]: ... + if sys.version_info >= (3, 12): + def __buffer__(self, flags: int, /) -> memoryview: ... + + @property + def base(self) -> None: ... + @property + def ndim(self) -> L[0]: ... + @property + def size(self) -> L[1]: ... + @property + def shape(self) -> tuple[()]: ... + @property + def strides(self) -> tuple[()]: ... + @property + def flat(self) -> flatiter[ndarray[tuple[int], dtype[Self]]]: ... + + @overload + def item(self, /) -> _ItemT_co: ... + @overload + def item(self, arg0: L[0, -1] | tuple[L[0, -1]] | tuple[()] = ..., /) -> _ItemT_co: ... + def tolist(self, /) -> _ItemT_co: ... + + def byteswap(self, inplace: L[False] = ...) -> Self: ... + + @overload + def astype( + self, + dtype: _DTypeLike[_ScalarT], + order: _OrderKACF = ..., + casting: _CastingKind = ..., + subok: builtins.bool = ..., + copy: builtins.bool | _CopyMode = ..., + ) -> _ScalarT: ... + @overload + def astype( + self, + dtype: DTypeLike, + order: _OrderKACF = ..., + casting: _CastingKind = ..., + subok: builtins.bool = ..., + copy: builtins.bool | _CopyMode = ..., + ) -> Any: ... + + # NOTE: `view` will perform a 0D->scalar cast, + # thus the array `type` is irrelevant to the output type + @overload + def view(self, type: type[NDArray[Any]] = ...) -> Self: ... + @overload + def view( + self, + dtype: _DTypeLike[_ScalarT], + type: type[NDArray[Any]] = ..., + ) -> _ScalarT: ... + @overload + def view( + self, + dtype: DTypeLike, + type: type[NDArray[Any]] = ..., + ) -> Any: ... + + @overload + def getfield( + self, + dtype: _DTypeLike[_ScalarT], + offset: SupportsIndex = ... + ) -> _ScalarT: ... + @overload + def getfield( + self, + dtype: DTypeLike, + offset: SupportsIndex = ... + ) -> Any: ... + + @overload + def take( # type: ignore[misc] + self, + indices: _IntLike_co, + axis: SupportsIndex | None = ..., + out: None = ..., + mode: _ModeKind = ..., + ) -> Self: ... + @overload + def take( # type: ignore[misc] + self, + indices: _ArrayLikeInt_co, + axis: SupportsIndex | None = ..., + out: None = ..., + mode: _ModeKind = ..., + ) -> NDArray[Self]: ... + @overload + def take( + self, + indices: _ArrayLikeInt_co, + axis: SupportsIndex | None = ..., + out: _ArrayT = ..., + mode: _ModeKind = ..., + ) -> _ArrayT: ... + + def repeat(self, repeats: _ArrayLikeInt_co, axis: SupportsIndex | None = None) -> ndarray[tuple[int], dtype[Self]]: ... + def flatten(self, /, order: _OrderKACF = "C") -> ndarray[tuple[int], dtype[Self]]: ... + def ravel(self, /, order: _OrderKACF = "C") -> ndarray[tuple[int], dtype[Self]]: ... + + @overload # (() | []) + def reshape( + self, + shape: tuple[()] | list[Never], + /, + *, + order: _OrderACF = "C", + copy: builtins.bool | None = None, + ) -> Self: ... + @overload # ((1, *(1, ...))@_ShapeT) + def reshape( + self, + shape: _1NShapeT, + /, + *, + order: _OrderACF = "C", + copy: builtins.bool | None = None, + ) -> ndarray[_1NShapeT, dtype[Self]]: ... + @overload # (Sequence[index, ...]) # not recommended + def reshape( + self, + shape: Sequence[SupportsIndex], + /, + *, + order: _OrderACF = "C", + copy: builtins.bool | None = None, + ) -> Self | ndarray[tuple[L[1], ...], dtype[Self]]: ... + @overload # _(index) + def reshape( + self, + size1: SupportsIndex, + /, + *, + order: _OrderACF = "C", + copy: builtins.bool | None = None, + ) -> ndarray[tuple[L[1]], dtype[Self]]: ... + @overload # _(index, index) + def reshape( + self, + size1: SupportsIndex, + size2: SupportsIndex, + /, + *, + order: _OrderACF = "C", + copy: builtins.bool | None = None, + ) -> ndarray[tuple[L[1], L[1]], dtype[Self]]: ... + @overload # _(index, index, index) + def reshape( + self, + size1: SupportsIndex, + size2: SupportsIndex, + size3: SupportsIndex, + /, + *, + order: _OrderACF = "C", + copy: builtins.bool | None = None, + ) -> ndarray[tuple[L[1], L[1], L[1]], dtype[Self]]: ... + @overload # _(index, index, index, index) + def reshape( + self, + size1: SupportsIndex, + size2: SupportsIndex, + size3: SupportsIndex, + size4: SupportsIndex, + /, + *, + order: _OrderACF = "C", + copy: builtins.bool | None = None, + ) -> ndarray[tuple[L[1], L[1], L[1], L[1]], dtype[Self]]: ... + @overload # _(index, index, index, index, index, *index) # ndim >= 5 + def reshape( + self, + size1: SupportsIndex, + size2: SupportsIndex, + size3: SupportsIndex, + size4: SupportsIndex, + size5: SupportsIndex, + /, + *sizes6_: SupportsIndex, + order: _OrderACF = "C", + copy: builtins.bool | None = None, + ) -> ndarray[tuple[L[1], L[1], L[1], L[1], L[1], *tuple[L[1], ...]], dtype[Self]]: ... + + def squeeze(self, axis: L[0] | tuple[()] | None = ...) -> Self: ... + def transpose(self, axes: tuple[()] | None = ..., /) -> Self: ... + + @overload + def all( + self, + /, + axis: L[0, -1] | tuple[()] | None = None, + out: None = None, + keepdims: SupportsIndex = False, + *, + where: builtins.bool | np.bool | ndarray[tuple[()], dtype[np.bool]] = True + ) -> np.bool: ... + @overload + def all( + self, + /, + axis: L[0, -1] | tuple[()] | None, + out: ndarray[tuple[()], dtype[_ScalarT]], + keepdims: SupportsIndex = False, + *, + where: builtins.bool | np.bool | ndarray[tuple[()], dtype[np.bool]] = True, + ) -> _ScalarT: ... + @overload + def all( + self, + /, + axis: L[0, -1] | tuple[()] | None = None, + *, + out: ndarray[tuple[()], dtype[_ScalarT]], + keepdims: SupportsIndex = False, + where: builtins.bool | np.bool | ndarray[tuple[()], dtype[np.bool]] = True, + ) -> _ScalarT: ... + + @overload + def any( + self, + /, + axis: L[0, -1] | tuple[()] | None = None, + out: None = None, + keepdims: SupportsIndex = False, + *, + where: builtins.bool | np.bool | ndarray[tuple[()], dtype[np.bool]] = True + ) -> np.bool: ... + @overload + def any( + self, + /, + axis: L[0, -1] | tuple[()] | None, + out: ndarray[tuple[()], dtype[_ScalarT]], + keepdims: SupportsIndex = False, + *, + where: builtins.bool | np.bool | ndarray[tuple[()], dtype[np.bool]] = True, + ) -> _ScalarT: ... + @overload + def any( + self, + /, + axis: L[0, -1] | tuple[()] | None = None, + *, + out: ndarray[tuple[()], dtype[_ScalarT]], + keepdims: SupportsIndex = False, + where: builtins.bool | np.bool | ndarray[tuple[()], dtype[np.bool]] = True, + ) -> _ScalarT: ... + + # Keep `dtype` at the bottom to avoid name conflicts with `np.dtype` + @property + def dtype(self) -> _dtype[Self]: ... + +class number(generic[_NumberItemT_co], Generic[_NBit, _NumberItemT_co]): + @abstractmethod + def __init__(self, value: _NumberItemT_co, /) -> None: ... + def __class_getitem__(cls, item: Any, /) -> GenericAlias: ... + + def __neg__(self) -> Self: ... + def __pos__(self) -> Self: ... + def __abs__(self) -> Self: ... + + __add__: _NumberOp + __radd__: _NumberOp + __sub__: _NumberOp + __rsub__: _NumberOp + __mul__: _NumberOp + __rmul__: _NumberOp + __floordiv__: _NumberOp + __rfloordiv__: _NumberOp + __pow__: _NumberOp + __rpow__: _NumberOp + __truediv__: _NumberOp + __rtruediv__: _NumberOp + + __lt__: _ComparisonOpLT[_NumberLike_co, _ArrayLikeNumber_co] + __le__: _ComparisonOpLE[_NumberLike_co, _ArrayLikeNumber_co] + __gt__: _ComparisonOpGT[_NumberLike_co, _ArrayLikeNumber_co] + __ge__: _ComparisonOpGE[_NumberLike_co, _ArrayLikeNumber_co] + +class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): + @property + def itemsize(self) -> L[1]: ... + @property + def nbytes(self) -> L[1]: ... + @property + def real(self) -> Self: ... + @property + def imag(self) -> np.bool[L[False]]: ... + + @overload # mypy bug workaround: https://github.com/numpy/numpy/issues/29245 + def __init__(self: np.bool[builtins.bool], value: Never, /) -> None: ... + @overload + def __init__(self: np.bool[L[False]], value: _Falsy = ..., /) -> None: ... + @overload + def __init__(self: np.bool[L[True]], value: _Truthy, /) -> None: ... + @overload + def __init__(self: np.bool[builtins.bool], value: object, /) -> None: ... + + def __bool__(self, /) -> _BoolItemT_co: ... + @overload + def __int__(self: np.bool[L[False]], /) -> L[0]: ... + @overload + def __int__(self: np.bool[L[True]], /) -> L[1]: ... + @overload + def __int__(self, /) -> L[0, 1]: ... + def __abs__(self) -> Self: ... + + @overload + def __invert__(self: np.bool[L[False]], /) -> np.bool[L[True]]: ... + @overload + def __invert__(self: np.bool[L[True]], /) -> np.bool[L[False]]: ... + @overload + def __invert__(self, /) -> np.bool: ... + + __add__: _BoolOp[np.bool] + __radd__: _BoolOp[np.bool] + __sub__: _BoolSub + __rsub__: _BoolSub + __mul__: _BoolOp[np.bool] + __rmul__: _BoolOp[np.bool] + __truediv__: _BoolTrueDiv + __rtruediv__: _BoolTrueDiv + __floordiv__: _BoolOp[int8] + __rfloordiv__: _BoolOp[int8] + __pow__: _BoolOp[int8] + __rpow__: _BoolOp[int8] + + __lshift__: _BoolBitOp[int8] + __rlshift__: _BoolBitOp[int8] + __rshift__: _BoolBitOp[int8] + __rrshift__: _BoolBitOp[int8] + + @overload + def __and__(self: np.bool[L[False]], other: builtins.bool | np.bool, /) -> np.bool[L[False]]: ... + @overload + def __and__(self, other: L[False] | np.bool[L[False]], /) -> np.bool[L[False]]: ... + @overload + def __and__(self, other: L[True] | np.bool[L[True]], /) -> Self: ... + @overload + def __and__(self, other: builtins.bool | np.bool, /) -> np.bool: ... + @overload + def __and__(self, other: _IntegerT, /) -> _IntegerT: ... + @overload + def __and__(self, other: int, /) -> np.bool | intp: ... + __rand__ = __and__ + + @overload + def __xor__(self: np.bool[L[False]], other: _BoolItemT | np.bool[_BoolItemT], /) -> np.bool[_BoolItemT]: ... + @overload + def __xor__(self: np.bool[L[True]], other: L[True] | np.bool[L[True]], /) -> np.bool[L[False]]: ... + @overload + def __xor__(self, other: L[False] | np.bool[L[False]], /) -> Self: ... + @overload + def __xor__(self, other: builtins.bool | np.bool, /) -> np.bool: ... + @overload + def __xor__(self, other: _IntegerT, /) -> _IntegerT: ... + @overload + def __xor__(self, other: int, /) -> np.bool | intp: ... + __rxor__ = __xor__ + + @overload + def __or__(self: np.bool[L[True]], other: builtins.bool | np.bool, /) -> np.bool[L[True]]: ... + @overload + def __or__(self, other: L[False] | np.bool[L[False]], /) -> Self: ... + @overload + def __or__(self, other: L[True] | np.bool[L[True]], /) -> np.bool[L[True]]: ... + @overload + def __or__(self, other: builtins.bool | np.bool, /) -> np.bool: ... + @overload + def __or__(self, other: _IntegerT, /) -> _IntegerT: ... + @overload + def __or__(self, other: int, /) -> np.bool | intp: ... + __ror__ = __or__ + + __mod__: _BoolMod + __rmod__: _BoolMod + __divmod__: _BoolDivMod + __rdivmod__: _BoolDivMod + + __lt__: _ComparisonOpLT[_NumberLike_co, _ArrayLikeNumber_co] + __le__: _ComparisonOpLE[_NumberLike_co, _ArrayLikeNumber_co] + __gt__: _ComparisonOpGT[_NumberLike_co, _ArrayLikeNumber_co] + __ge__: _ComparisonOpGE[_NumberLike_co, _ArrayLikeNumber_co] + +# NOTE: This should _not_ be `Final` or a `TypeAlias` +bool_ = bool + +# NOTE: The `object_` constructor returns the passed object, so instances with type +# `object_` cannot exists (at runtime). +# NOTE: Because mypy has some long-standing bugs related to `__new__`, `object_` can't +# be made generic. +@final +class object_(_RealMixin, generic): + @overload + def __new__(cls, nothing_to_see_here: None = None, /) -> None: ... # type: ignore[misc] + @overload + def __new__(cls, stringy: _AnyStr, /) -> _AnyStr: ... # type: ignore[misc] + @overload + def __new__(cls, array: ndarray[_ShapeT, Any], /) -> ndarray[_ShapeT, dtype[Self]]: ... # type: ignore[misc] + @overload + def __new__(cls, sequence: SupportsLenAndGetItem[object], /) -> NDArray[Self]: ... # type: ignore[misc] + @overload + def __new__(cls, value: _T, /) -> _T: ... # type: ignore[misc] + @overload # catch-all + def __new__(cls, value: Any = ..., /) -> object | NDArray[Self]: ... # type: ignore[misc] + def __init__(self, value: object = ..., /) -> None: ... + def __hash__(self, /) -> int: ... + def __abs__(self, /) -> object_: ... # this affects NDArray[object_].__abs__ + def __call__(self, /, *args: object, **kwargs: object) -> Any: ... + + if sys.version_info >= (3, 12): + def __release_buffer__(self, buffer: memoryview, /) -> None: ... + +class integer(_IntegralMixin, _RoundMixin, number[_NBit, int]): + @abstractmethod + def __init__(self, value: _ConvertibleToInt = ..., /) -> None: ... + + # NOTE: `bit_count` and `__index__` are technically defined in the concrete subtypes + def bit_count(self, /) -> int: ... + def __index__(self, /) -> int: ... + def __invert__(self, /) -> Self: ... + + __truediv__: _IntTrueDiv[_NBit] + __rtruediv__: _IntTrueDiv[_NBit] + def __mod__(self, value: _IntLike_co, /) -> integer: ... + def __rmod__(self, value: _IntLike_co, /) -> integer: ... + # Ensure that objects annotated as `integer` support bit-wise operations + def __lshift__(self, other: _IntLike_co, /) -> integer: ... + def __rlshift__(self, other: _IntLike_co, /) -> integer: ... + def __rshift__(self, other: _IntLike_co, /) -> integer: ... + def __rrshift__(self, other: _IntLike_co, /) -> integer: ... + def __and__(self, other: _IntLike_co, /) -> integer: ... + def __rand__(self, other: _IntLike_co, /) -> integer: ... + def __or__(self, other: _IntLike_co, /) -> integer: ... + def __ror__(self, other: _IntLike_co, /) -> integer: ... + def __xor__(self, other: _IntLike_co, /) -> integer: ... + def __rxor__(self, other: _IntLike_co, /) -> integer: ... + +class signedinteger(integer[_NBit1]): + def __init__(self, value: _ConvertibleToInt = ..., /) -> None: ... + + __add__: _SignedIntOp[_NBit1] + __radd__: _SignedIntOp[_NBit1] + __sub__: _SignedIntOp[_NBit1] + __rsub__: _SignedIntOp[_NBit1] + __mul__: _SignedIntOp[_NBit1] + __rmul__: _SignedIntOp[_NBit1] + __floordiv__: _SignedIntOp[_NBit1] + __rfloordiv__: _SignedIntOp[_NBit1] + __pow__: _SignedIntOp[_NBit1] + __rpow__: _SignedIntOp[_NBit1] + __lshift__: _SignedIntBitOp[_NBit1] + __rlshift__: _SignedIntBitOp[_NBit1] + __rshift__: _SignedIntBitOp[_NBit1] + __rrshift__: _SignedIntBitOp[_NBit1] + __and__: _SignedIntBitOp[_NBit1] + __rand__: _SignedIntBitOp[_NBit1] + __xor__: _SignedIntBitOp[_NBit1] + __rxor__: _SignedIntBitOp[_NBit1] + __or__: _SignedIntBitOp[_NBit1] + __ror__: _SignedIntBitOp[_NBit1] + __mod__: _SignedIntMod[_NBit1] + __rmod__: _SignedIntMod[_NBit1] + __divmod__: _SignedIntDivMod[_NBit1] + __rdivmod__: _SignedIntDivMod[_NBit1] + +int8 = signedinteger[_8Bit] +int16 = signedinteger[_16Bit] +int32 = signedinteger[_32Bit] +int64 = signedinteger[_64Bit] + +byte = signedinteger[_NBitByte] +short = signedinteger[_NBitShort] +intc = signedinteger[_NBitIntC] +intp = signedinteger[_NBitIntP] +int_ = intp +long = signedinteger[_NBitLong] +longlong = signedinteger[_NBitLongLong] + +class unsignedinteger(integer[_NBit1]): + # NOTE: `uint64 + signedinteger -> float64` + def __init__(self, value: _ConvertibleToInt = ..., /) -> None: ... + + __add__: _UnsignedIntOp[_NBit1] + __radd__: _UnsignedIntOp[_NBit1] + __sub__: _UnsignedIntOp[_NBit1] + __rsub__: _UnsignedIntOp[_NBit1] + __mul__: _UnsignedIntOp[_NBit1] + __rmul__: _UnsignedIntOp[_NBit1] + __floordiv__: _UnsignedIntOp[_NBit1] + __rfloordiv__: _UnsignedIntOp[_NBit1] + __pow__: _UnsignedIntOp[_NBit1] + __rpow__: _UnsignedIntOp[_NBit1] + __lshift__: _UnsignedIntBitOp[_NBit1] + __rlshift__: _UnsignedIntBitOp[_NBit1] + __rshift__: _UnsignedIntBitOp[_NBit1] + __rrshift__: _UnsignedIntBitOp[_NBit1] + __and__: _UnsignedIntBitOp[_NBit1] + __rand__: _UnsignedIntBitOp[_NBit1] + __xor__: _UnsignedIntBitOp[_NBit1] + __rxor__: _UnsignedIntBitOp[_NBit1] + __or__: _UnsignedIntBitOp[_NBit1] + __ror__: _UnsignedIntBitOp[_NBit1] + __mod__: _UnsignedIntMod[_NBit1] + __rmod__: _UnsignedIntMod[_NBit1] + __divmod__: _UnsignedIntDivMod[_NBit1] + __rdivmod__: _UnsignedIntDivMod[_NBit1] + +uint8: TypeAlias = unsignedinteger[_8Bit] +uint16: TypeAlias = unsignedinteger[_16Bit] +uint32: TypeAlias = unsignedinteger[_32Bit] +uint64: TypeAlias = unsignedinteger[_64Bit] + +ubyte: TypeAlias = unsignedinteger[_NBitByte] +ushort: TypeAlias = unsignedinteger[_NBitShort] +uintc: TypeAlias = unsignedinteger[_NBitIntC] +uintp: TypeAlias = unsignedinteger[_NBitIntP] +uint: TypeAlias = uintp +ulong: TypeAlias = unsignedinteger[_NBitLong] +ulonglong: TypeAlias = unsignedinteger[_NBitLongLong] + +class inexact(number[_NBit, _InexactItemT_co], Generic[_NBit, _InexactItemT_co]): + @abstractmethod + def __init__(self, value: _InexactItemT_co | None = ..., /) -> None: ... + +class floating(_RealMixin, _RoundMixin, inexact[_NBit1, float]): + def __init__(self, value: _ConvertibleToFloat | None = ..., /) -> None: ... + + __add__: _FloatOp[_NBit1] + __radd__: _FloatOp[_NBit1] + __sub__: _FloatOp[_NBit1] + __rsub__: _FloatOp[_NBit1] + __mul__: _FloatOp[_NBit1] + __rmul__: _FloatOp[_NBit1] + __truediv__: _FloatOp[_NBit1] + __rtruediv__: _FloatOp[_NBit1] + __floordiv__: _FloatOp[_NBit1] + __rfloordiv__: _FloatOp[_NBit1] + __pow__: _FloatOp[_NBit1] + __rpow__: _FloatOp[_NBit1] + __mod__: _FloatMod[_NBit1] + __rmod__: _FloatMod[_NBit1] + __divmod__: _FloatDivMod[_NBit1] + __rdivmod__: _FloatDivMod[_NBit1] + + # NOTE: `is_integer` and `as_integer_ratio` are technically defined in the concrete subtypes + def is_integer(self, /) -> builtins.bool: ... + def as_integer_ratio(self, /) -> tuple[int, int]: ... + +float16: TypeAlias = floating[_16Bit] +float32: TypeAlias = floating[_32Bit] + +# either a C `double`, `float`, or `longdouble` +class float64(floating[_64Bit], float): # type: ignore[misc] + def __new__(cls, x: _ConvertibleToFloat | None = ..., /) -> Self: ... + + # + @property + def itemsize(self) -> L[8]: ... + @property + def nbytes(self) -> L[8]: ... + + # overrides for `floating` and `builtins.float` compatibility (`_RealMixin` doesn't work) + @property + def real(self) -> Self: ... + @property + def imag(self) -> Self: ... + def conjugate(self) -> Self: ... + def __getformat__(self, typestr: L["double", "float"], /) -> str: ... + def __getnewargs__(self, /) -> tuple[float]: ... + + # float64-specific operator overrides + @overload + def __add__(self, other: _Float64_co, /) -> float64: ... + @overload + def __add__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... + @overload + def __add__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + @overload + def __add__(self, other: complex, /) -> float64 | complex128: ... + @overload + def __radd__(self, other: _Float64_co, /) -> float64: ... + @overload + def __radd__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... + @overload + def __radd__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + @overload + def __radd__(self, other: complex, /) -> float64 | complex128: ... + + @overload + def __sub__(self, other: _Float64_co, /) -> float64: ... + @overload + def __sub__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... + @overload + def __sub__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + @overload + def __sub__(self, other: complex, /) -> float64 | complex128: ... + @overload + def __rsub__(self, other: _Float64_co, /) -> float64: ... + @overload + def __rsub__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... + @overload + def __rsub__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + @overload + def __rsub__(self, other: complex, /) -> float64 | complex128: ... + + @overload + def __mul__(self, other: _Float64_co, /) -> float64: ... + @overload + def __mul__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... + @overload + def __mul__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + @overload + def __mul__(self, other: complex, /) -> float64 | complex128: ... + @overload + def __rmul__(self, other: _Float64_co, /) -> float64: ... + @overload + def __rmul__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... + @overload + def __rmul__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + @overload + def __rmul__(self, other: complex, /) -> float64 | complex128: ... + + @overload + def __truediv__(self, other: _Float64_co, /) -> float64: ... + @overload + def __truediv__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... + @overload + def __truediv__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + @overload + def __truediv__(self, other: complex, /) -> float64 | complex128: ... + @overload + def __rtruediv__(self, other: _Float64_co, /) -> float64: ... + @overload + def __rtruediv__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... + @overload + def __rtruediv__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + @overload + def __rtruediv__(self, other: complex, /) -> float64 | complex128: ... + + @overload + def __floordiv__(self, other: _Float64_co, /) -> float64: ... + @overload + def __floordiv__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... + @overload + def __floordiv__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + @overload + def __floordiv__(self, other: complex, /) -> float64 | complex128: ... + @overload + def __rfloordiv__(self, other: _Float64_co, /) -> float64: ... + @overload + def __rfloordiv__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... + @overload + def __rfloordiv__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + @overload + def __rfloordiv__(self, other: complex, /) -> float64 | complex128: ... + + @overload + def __pow__(self, other: _Float64_co, mod: None = None, /) -> float64: ... + @overload + def __pow__(self, other: complexfloating[_64Bit, _64Bit], mod: None = None, /) -> complex128: ... + @overload + def __pow__( + self, other: complexfloating[_NBit1, _NBit2], mod: None = None, / + ) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + @overload + def __pow__(self, other: complex, mod: None = None, /) -> float64 | complex128: ... + @overload + def __rpow__(self, other: _Float64_co, mod: None = None, /) -> float64: ... + @overload + def __rpow__(self, other: complexfloating[_64Bit, _64Bit], mod: None = None, /) -> complex128: ... + @overload + def __rpow__( + self, other: complexfloating[_NBit1, _NBit2], mod: None = None, / + ) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + @overload + def __rpow__(self, other: complex, mod: None = None, /) -> float64 | complex128: ... + + def __mod__(self, other: _Float64_co, /) -> float64: ... # type: ignore[override] + def __rmod__(self, other: _Float64_co, /) -> float64: ... # type: ignore[override] + + def __divmod__(self, other: _Float64_co, /) -> _2Tuple[float64]: ... # type: ignore[override] + def __rdivmod__(self, other: _Float64_co, /) -> _2Tuple[float64]: ... # type: ignore[override] + +half: TypeAlias = floating[_NBitHalf] +single: TypeAlias = floating[_NBitSingle] +double: TypeAlias = floating[_NBitDouble] +longdouble: TypeAlias = floating[_NBitLongDouble] + +# The main reason for `complexfloating` having two typevars is cosmetic. +# It is used to clarify why `complex128`s precision is `_64Bit`, the latter +# describing the two 64 bit floats representing its real and imaginary component + +class complexfloating(inexact[_NBit1, complex], Generic[_NBit1, _NBit2]): + @overload + def __init__( + self, + real: complex | SupportsComplex | SupportsFloat | SupportsIndex = ..., + imag: complex | SupportsFloat | SupportsIndex = ..., + /, + ) -> None: ... + @overload + def __init__(self, real: _ConvertibleToComplex | None = ..., /) -> None: ... + + @property + def real(self) -> floating[_NBit1]: ... # type: ignore[override] + @property + def imag(self) -> floating[_NBit2]: ... # type: ignore[override] + + # NOTE: `__complex__` is technically defined in the concrete subtypes + def __complex__(self, /) -> complex: ... + def __abs__(self, /) -> floating[_NBit1 | _NBit2]: ... # type: ignore[override] + + @overload + def __add__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... + @overload + def __add__(self, other: complex | float64 | complex128, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... + @overload + def __add__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... + @overload + def __radd__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... + @overload + def __radd__(self, other: complex, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... + @overload + def __radd__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... + + @overload + def __sub__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... + @overload + def __sub__(self, other: complex | float64 | complex128, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... + @overload + def __sub__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... + @overload + def __rsub__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... + @overload + def __rsub__(self, other: complex, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... + @overload + def __rsub__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... + + @overload + def __mul__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... + @overload + def __mul__(self, other: complex | float64 | complex128, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... + @overload + def __mul__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... + @overload + def __rmul__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... + @overload + def __rmul__(self, other: complex, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... + @overload + def __rmul__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... + + @overload + def __truediv__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... + @overload + def __truediv__(self, other: complex | float64 | complex128, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... + @overload + def __truediv__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... + @overload + def __rtruediv__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... + @overload + def __rtruediv__(self, other: complex, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... + @overload + def __rtruediv__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... + + @overload + def __pow__(self, other: _Complex64_co, mod: None = None, /) -> complexfloating[_NBit1, _NBit2]: ... + @overload + def __pow__( + self, other: complex | float64 | complex128, mod: None = None, / + ) -> complexfloating[_NBit1, _NBit2] | complex128: ... + @overload + def __pow__( + self, other: number[_NBit], mod: None = None, / + ) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... + @overload + def __rpow__(self, other: _Complex64_co, mod: None = None, /) -> complexfloating[_NBit1, _NBit2]: ... + @overload + def __rpow__(self, other: complex, mod: None = None, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... + @overload + def __rpow__( + self, other: number[_NBit], mod: None = None, / + ) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... + +complex64: TypeAlias = complexfloating[_32Bit, _32Bit] + +class complex128(complexfloating[_64Bit, _64Bit], complex): # type: ignore[misc] + @overload + def __new__( + cls, + real: complex | SupportsComplex | SupportsFloat | SupportsIndex = ..., + imag: complex | SupportsFloat | SupportsIndex = ..., + /, + ) -> Self: ... + @overload + def __new__(cls, real: _ConvertibleToComplex | None = ..., /) -> Self: ... + + # + @property + def itemsize(self) -> L[16]: ... + @property + def nbytes(self) -> L[16]: ... + + # overrides for `floating` and `builtins.float` compatibility + @property + def real(self) -> float64: ... + @property + def imag(self) -> float64: ... + def conjugate(self) -> Self: ... + def __abs__(self) -> float64: ... # type: ignore[override] + def __getnewargs__(self, /) -> tuple[float, float]: ... + + # complex128-specific operator overrides + @overload + def __add__(self, other: _Complex128_co, /) -> complex128: ... + @overload + def __add__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + def __radd__(self, other: _Complex128_co, /) -> complex128: ... + + @overload + def __sub__(self, other: _Complex128_co, /) -> complex128: ... + @overload + def __sub__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + def __rsub__(self, other: _Complex128_co, /) -> complex128: ... + + @overload + def __mul__(self, other: _Complex128_co, /) -> complex128: ... + @overload + def __mul__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + def __rmul__(self, other: _Complex128_co, /) -> complex128: ... + + @overload + def __truediv__(self, other: _Complex128_co, /) -> complex128: ... + @overload + def __truediv__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + def __rtruediv__(self, other: _Complex128_co, /) -> complex128: ... + + @overload + def __pow__(self, other: _Complex128_co, mod: None = None, /) -> complex128: ... + @overload + def __pow__( + self, other: complexfloating[_NBit1, _NBit2], mod: None = None, / + ) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + def __rpow__(self, other: _Complex128_co, mod: None = None, /) -> complex128: ... + +csingle: TypeAlias = complexfloating[_NBitSingle, _NBitSingle] +cdouble: TypeAlias = complexfloating[_NBitDouble, _NBitDouble] +clongdouble: TypeAlias = complexfloating[_NBitLongDouble, _NBitLongDouble] + +class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co]): + @property + def itemsize(self) -> L[8]: ... + @property + def nbytes(self) -> L[8]: ... + + @overload + def __init__(self, value: _TD64ItemT_co | timedelta64[_TD64ItemT_co], /) -> None: ... + @overload + def __init__(self: timedelta64[L[0]], /) -> None: ... + @overload + def __init__(self: timedelta64[None], value: _NaTValue | None, format: _TimeUnitSpec, /) -> None: ... + @overload + def __init__(self: timedelta64[L[0]], value: L[0], format: _TimeUnitSpec[_IntTD64Unit] = ..., /) -> None: ... + @overload + def __init__(self: timedelta64[int], value: _IntLike_co, format: _TimeUnitSpec[_IntTD64Unit] = ..., /) -> None: ... + @overload + def __init__(self: timedelta64[int], value: dt.timedelta, format: _TimeUnitSpec[_IntTimeUnit], /) -> None: ... + @overload + def __init__( + self: timedelta64[dt.timedelta], + value: dt.timedelta | _IntLike_co, + format: _TimeUnitSpec[_NativeTD64Unit] = ..., + /, + ) -> None: ... + @overload + def __init__(self, value: _ConvertibleToTD64, format: _TimeUnitSpec = ..., /) -> None: ... + + # inherited at runtime from `signedinteger` + def __class_getitem__(cls, type_arg: type | object, /) -> GenericAlias: ... + + # NOTE: Only a limited number of units support conversion + # to builtin scalar types: `Y`, `M`, `ns`, `ps`, `fs`, `as` + def __int__(self: timedelta64[int], /) -> int: ... + def __float__(self: timedelta64[int], /) -> float: ... + + def __neg__(self, /) -> Self: ... + def __pos__(self, /) -> Self: ... + def __abs__(self, /) -> Self: ... + + @overload + def __add__(self: timedelta64[None], x: _TD64Like_co, /) -> timedelta64[None]: ... + @overload + def __add__(self: timedelta64[int], x: timedelta64[int | dt.timedelta], /) -> timedelta64[int]: ... + @overload + def __add__(self: timedelta64[int], x: timedelta64, /) -> timedelta64[int | None]: ... + @overload + def __add__(self: timedelta64[dt.timedelta], x: _AnyDateOrTime, /) -> _AnyDateOrTime: ... + @overload + def __add__(self: timedelta64[_AnyTD64Item], x: timedelta64[_AnyTD64Item] | _IntLike_co, /) -> timedelta64[_AnyTD64Item]: ... + @overload + def __add__(self, x: timedelta64[None], /) -> timedelta64[None]: ... + __radd__ = __add__ + + @overload + def __mul__(self: timedelta64[_AnyTD64Item], x: int | np.integer | np.bool, /) -> timedelta64[_AnyTD64Item]: ... + @overload + def __mul__(self: timedelta64[_AnyTD64Item], x: float | np.floating, /) -> timedelta64[_AnyTD64Item | None]: ... + @overload + def __mul__(self, x: float | np.floating | np.integer | np.bool, /) -> timedelta64: ... + __rmul__ = __mul__ + + @overload + def __mod__(self, x: timedelta64[L[0] | None], /) -> timedelta64[None]: ... + @overload + def __mod__(self: timedelta64[None], x: timedelta64, /) -> timedelta64[None]: ... + @overload + def __mod__(self: timedelta64[int], x: timedelta64[int | dt.timedelta], /) -> timedelta64[int | None]: ... + @overload + def __mod__(self: timedelta64[dt.timedelta], x: timedelta64[_AnyTD64Item], /) -> timedelta64[_AnyTD64Item | None]: ... + @overload + def __mod__(self: timedelta64[dt.timedelta], x: dt.timedelta, /) -> dt.timedelta: ... + @overload + def __mod__(self, x: timedelta64[int], /) -> timedelta64[int | None]: ... + @overload + def __mod__(self, x: timedelta64, /) -> timedelta64: ... + + # the L[0] makes __mod__ non-commutative, which the first two overloads reflect + @overload + def __rmod__(self, x: timedelta64[None], /) -> timedelta64[None]: ... + @overload + def __rmod__(self: timedelta64[L[0] | None], x: timedelta64, /) -> timedelta64[None]: ... + @overload + def __rmod__(self: timedelta64[int], x: timedelta64[int | dt.timedelta], /) -> timedelta64[int | None]: ... + @overload + def __rmod__(self: timedelta64[dt.timedelta], x: timedelta64[_AnyTD64Item], /) -> timedelta64[_AnyTD64Item | None]: ... + @overload + def __rmod__(self: timedelta64[dt.timedelta], x: dt.timedelta, /) -> dt.timedelta: ... + @overload + def __rmod__(self, x: timedelta64[int], /) -> timedelta64[int | None]: ... + @overload + def __rmod__(self, x: timedelta64, /) -> timedelta64: ... + + # keep in sync with __mod__ + @overload + def __divmod__(self, x: timedelta64[L[0] | None], /) -> tuple[int64, timedelta64[None]]: ... + @overload + def __divmod__(self: timedelta64[None], x: timedelta64, /) -> tuple[int64, timedelta64[None]]: ... + @overload + def __divmod__(self: timedelta64[int], x: timedelta64[int | dt.timedelta], /) -> tuple[int64, timedelta64[int | None]]: ... + @overload + def __divmod__(self: timedelta64[dt.timedelta], x: timedelta64[_AnyTD64Item], /) -> tuple[int64, timedelta64[_AnyTD64Item | None]]: ... + @overload + def __divmod__(self: timedelta64[dt.timedelta], x: dt.timedelta, /) -> tuple[int, dt.timedelta]: ... + @overload + def __divmod__(self, x: timedelta64[int], /) -> tuple[int64, timedelta64[int | None]]: ... + @overload + def __divmod__(self, x: timedelta64, /) -> tuple[int64, timedelta64]: ... + + # keep in sync with __rmod__ + @overload + def __rdivmod__(self, x: timedelta64[None], /) -> tuple[int64, timedelta64[None]]: ... + @overload + def __rdivmod__(self: timedelta64[L[0] | None], x: timedelta64, /) -> tuple[int64, timedelta64[None]]: ... + @overload + def __rdivmod__(self: timedelta64[int], x: timedelta64[int | dt.timedelta], /) -> tuple[int64, timedelta64[int | None]]: ... + @overload + def __rdivmod__(self: timedelta64[dt.timedelta], x: timedelta64[_AnyTD64Item], /) -> tuple[int64, timedelta64[_AnyTD64Item | None]]: ... + @overload + def __rdivmod__(self: timedelta64[dt.timedelta], x: dt.timedelta, /) -> tuple[int, dt.timedelta]: ... + @overload + def __rdivmod__(self, x: timedelta64[int], /) -> tuple[int64, timedelta64[int | None]]: ... + @overload + def __rdivmod__(self, x: timedelta64, /) -> tuple[int64, timedelta64]: ... + + @overload + def __sub__(self: timedelta64[None], b: _TD64Like_co, /) -> timedelta64[None]: ... + @overload + def __sub__(self: timedelta64[int], b: timedelta64[int | dt.timedelta], /) -> timedelta64[int]: ... + @overload + def __sub__(self: timedelta64[int], b: timedelta64, /) -> timedelta64[int | None]: ... + @overload + def __sub__(self: timedelta64[dt.timedelta], b: dt.timedelta, /) -> dt.timedelta: ... + @overload + def __sub__(self: timedelta64[_AnyTD64Item], b: timedelta64[_AnyTD64Item] | _IntLike_co, /) -> timedelta64[_AnyTD64Item]: ... + @overload + def __sub__(self, b: timedelta64[None], /) -> timedelta64[None]: ... + + @overload + def __rsub__(self: timedelta64[None], a: _TD64Like_co, /) -> timedelta64[None]: ... + @overload + def __rsub__(self: timedelta64[dt.timedelta], a: _AnyDateOrTime, /) -> _AnyDateOrTime: ... + @overload + def __rsub__(self: timedelta64[dt.timedelta], a: timedelta64[_AnyTD64Item], /) -> timedelta64[_AnyTD64Item]: ... + @overload + def __rsub__(self: timedelta64[_AnyTD64Item], a: timedelta64[_AnyTD64Item] | _IntLike_co, /) -> timedelta64[_AnyTD64Item]: ... + @overload + def __rsub__(self, a: timedelta64[None], /) -> timedelta64[None]: ... + @overload + def __rsub__(self, a: datetime64[None], /) -> datetime64[None]: ... + + @overload + def __truediv__(self: timedelta64[dt.timedelta], b: dt.timedelta, /) -> float: ... + @overload + def __truediv__(self, b: timedelta64, /) -> float64: ... + @overload + def __truediv__(self: timedelta64[_AnyTD64Item], b: int | integer, /) -> timedelta64[_AnyTD64Item]: ... + @overload + def __truediv__(self: timedelta64[_AnyTD64Item], b: float | floating, /) -> timedelta64[_AnyTD64Item | None]: ... + @overload + def __truediv__(self, b: float | floating | integer, /) -> timedelta64: ... + @overload + def __rtruediv__(self: timedelta64[dt.timedelta], a: dt.timedelta, /) -> float: ... + @overload + def __rtruediv__(self, a: timedelta64, /) -> float64: ... + + @overload + def __floordiv__(self: timedelta64[dt.timedelta], b: dt.timedelta, /) -> int: ... + @overload + def __floordiv__(self, b: timedelta64, /) -> int64: ... + @overload + def __floordiv__(self: timedelta64[_AnyTD64Item], b: int | integer, /) -> timedelta64[_AnyTD64Item]: ... + @overload + def __floordiv__(self: timedelta64[_AnyTD64Item], b: float | floating, /) -> timedelta64[_AnyTD64Item | None]: ... + @overload + def __rfloordiv__(self: timedelta64[dt.timedelta], a: dt.timedelta, /) -> int: ... + @overload + def __rfloordiv__(self, a: timedelta64, /) -> int64: ... + + __lt__: _ComparisonOpLT[_TD64Like_co, _ArrayLikeTD64_co] + __le__: _ComparisonOpLE[_TD64Like_co, _ArrayLikeTD64_co] + __gt__: _ComparisonOpGT[_TD64Like_co, _ArrayLikeTD64_co] + __ge__: _ComparisonOpGE[_TD64Like_co, _ArrayLikeTD64_co] + +class datetime64(_RealMixin, generic[_DT64ItemT_co], Generic[_DT64ItemT_co]): + @property + def itemsize(self) -> L[8]: ... + @property + def nbytes(self) -> L[8]: ... + + @overload + def __init__(self, value: datetime64[_DT64ItemT_co], /) -> None: ... + @overload + def __init__(self: datetime64[_AnyDT64Arg], value: _AnyDT64Arg, /) -> None: ... + @overload + def __init__(self: datetime64[None], value: _NaTValue | None = ..., format: _TimeUnitSpec = ..., /) -> None: ... + @overload + def __init__(self: datetime64[dt.datetime], value: _DT64Now, format: _TimeUnitSpec[_NativeTimeUnit] = ..., /) -> None: ... + @overload + def __init__(self: datetime64[dt.date], value: _DT64Date, format: _TimeUnitSpec[_DateUnit] = ..., /) -> None: ... + @overload + def __init__(self: datetime64[int], value: int | bytes | str | dt.date, format: _TimeUnitSpec[_IntTimeUnit], /) -> None: ... + @overload + def __init__( + self: datetime64[dt.datetime], value: int | bytes | str | dt.date, format: _TimeUnitSpec[_NativeTimeUnit], / + ) -> None: ... + @overload + def __init__(self: datetime64[dt.date], value: int | bytes | str | dt.date, format: _TimeUnitSpec[_DateUnit], /) -> None: ... + @overload + def __init__(self, value: bytes | str | dt.date | None, format: _TimeUnitSpec = ..., /) -> None: ... + + @overload + def __add__(self: datetime64[_AnyDT64Item], x: int | integer | np.bool, /) -> datetime64[_AnyDT64Item]: ... + @overload + def __add__(self: datetime64[None], x: _TD64Like_co, /) -> datetime64[None]: ... + @overload + def __add__(self: datetime64[int], x: timedelta64[int | dt.timedelta], /) -> datetime64[int]: ... + @overload + def __add__(self: datetime64[dt.datetime], x: timedelta64[dt.timedelta], /) -> datetime64[dt.datetime]: ... + @overload + def __add__(self: datetime64[dt.date], x: timedelta64[dt.timedelta], /) -> datetime64[dt.date]: ... + @overload + def __add__(self: datetime64[dt.date], x: timedelta64[int], /) -> datetime64[int]: ... + @overload + def __add__(self, x: datetime64[None], /) -> datetime64[None]: ... + @overload + def __add__(self, x: _TD64Like_co, /) -> datetime64: ... + __radd__ = __add__ + + @overload + def __sub__(self: datetime64[_AnyDT64Item], x: int | integer | np.bool, /) -> datetime64[_AnyDT64Item]: ... + @overload + def __sub__(self: datetime64[_AnyDate], x: _AnyDate, /) -> dt.timedelta: ... + @overload + def __sub__(self: datetime64[None], x: timedelta64, /) -> datetime64[None]: ... + @overload + def __sub__(self: datetime64[None], x: datetime64, /) -> timedelta64[None]: ... + @overload + def __sub__(self: datetime64[int], x: timedelta64, /) -> datetime64[int]: ... + @overload + def __sub__(self: datetime64[int], x: datetime64, /) -> timedelta64[int]: ... + @overload + def __sub__(self: datetime64[dt.datetime], x: timedelta64[int], /) -> datetime64[int]: ... + @overload + def __sub__(self: datetime64[dt.datetime], x: timedelta64[dt.timedelta], /) -> datetime64[dt.datetime]: ... + @overload + def __sub__(self: datetime64[dt.datetime], x: datetime64[int], /) -> timedelta64[int]: ... + @overload + def __sub__(self: datetime64[dt.date], x: timedelta64[int], /) -> datetime64[dt.date | int]: ... + @overload + def __sub__(self: datetime64[dt.date], x: timedelta64[dt.timedelta], /) -> datetime64[dt.date]: ... + @overload + def __sub__(self: datetime64[dt.date], x: datetime64[dt.date], /) -> timedelta64[dt.timedelta]: ... + @overload + def __sub__(self, x: timedelta64[None], /) -> datetime64[None]: ... + @overload + def __sub__(self, x: datetime64[None], /) -> timedelta64[None]: ... + @overload + def __sub__(self, x: _TD64Like_co, /) -> datetime64: ... + @overload + def __sub__(self, x: datetime64, /) -> timedelta64: ... + + @overload + def __rsub__(self: datetime64[_AnyDT64Item], x: int | integer | np.bool, /) -> datetime64[_AnyDT64Item]: ... + @overload + def __rsub__(self: datetime64[_AnyDate], x: _AnyDate, /) -> dt.timedelta: ... + @overload + def __rsub__(self: datetime64[None], x: datetime64, /) -> timedelta64[None]: ... + @overload + def __rsub__(self: datetime64[int], x: datetime64, /) -> timedelta64[int]: ... + @overload + def __rsub__(self: datetime64[dt.datetime], x: datetime64[int], /) -> timedelta64[int]: ... + @overload + def __rsub__(self: datetime64[dt.datetime], x: datetime64[dt.date], /) -> timedelta64[dt.timedelta]: ... + @overload + def __rsub__(self, x: datetime64[None], /) -> timedelta64[None]: ... + @overload + def __rsub__(self, x: datetime64, /) -> timedelta64: ... + + __lt__: _ComparisonOpLT[datetime64, _ArrayLikeDT64_co] + __le__: _ComparisonOpLE[datetime64, _ArrayLikeDT64_co] + __gt__: _ComparisonOpGT[datetime64, _ArrayLikeDT64_co] + __ge__: _ComparisonOpGE[datetime64, _ArrayLikeDT64_co] + +class flexible(_RealMixin, generic[_FlexibleItemT_co], Generic[_FlexibleItemT_co]): ... + +class void(flexible[bytes | tuple[Any, ...]]): + @overload + def __init__(self, value: _IntLike_co | bytes, /, dtype: None = None) -> None: ... + @overload + def __init__(self, value: Any, /, dtype: _DTypeLikeVoid) -> None: ... + + @overload + def __getitem__(self, key: str | SupportsIndex, /) -> Any: ... + @overload + def __getitem__(self, key: list[str], /) -> void: ... + def __setitem__(self, key: str | list[str] | SupportsIndex, value: ArrayLike, /) -> None: ... + + def setfield(self, val: ArrayLike, dtype: DTypeLike, offset: int = ...) -> None: ... + +class character(flexible[_CharacterItemT_co], Generic[_CharacterItemT_co]): + @abstractmethod + def __init__(self, value: _CharacterItemT_co = ..., /) -> None: ... + +# NOTE: Most `np.bytes_` / `np.str_` methods return their builtin `bytes` / `str` counterpart + +class bytes_(character[bytes], bytes): + @overload + def __new__(cls, o: object = ..., /) -> Self: ... + @overload + def __new__(cls, s: str, /, encoding: str, errors: str = ...) -> Self: ... + + # + @overload + def __init__(self, o: object = ..., /) -> None: ... + @overload + def __init__(self, s: str, /, encoding: str, errors: str = ...) -> None: ... + + # + def __bytes__(self, /) -> bytes: ... + +class str_(character[str], str): + @overload + def __new__(cls, value: object = ..., /) -> Self: ... + @overload + def __new__(cls, value: bytes, /, encoding: str = ..., errors: str = ...) -> Self: ... + + # + @overload + def __init__(self, value: object = ..., /) -> None: ... + @overload + def __init__(self, value: bytes, /, encoding: str = ..., errors: str = ...) -> None: ... + +# See `numpy._typing._ufunc` for more concrete nin-/nout-specific stubs +@final +class ufunc: + @property + def __name__(self) -> LiteralString: ... + @property + def __qualname__(self) -> LiteralString: ... + @property + def __doc__(self) -> str: ... + @property + def nin(self) -> int: ... + @property + def nout(self) -> int: ... + @property + def nargs(self) -> int: ... + @property + def ntypes(self) -> int: ... + @property + def types(self) -> list[LiteralString]: ... + # Broad return type because it has to encompass things like + # + # >>> np.logical_and.identity is True + # True + # >>> np.add.identity is 0 + # True + # >>> np.sin.identity is None + # True + # + # and any user-defined ufuncs. + @property + def identity(self) -> Any: ... + # This is None for ufuncs and a string for gufuncs. + @property + def signature(self) -> LiteralString | None: ... + + def __call__(self, *args: Any, **kwargs: Any) -> Any: ... + # The next four methods will always exist, but they will just + # raise a ValueError ufuncs with that don't accept two input + # arguments and return one output argument. Because of that we + # can't type them very precisely. + def reduce(self, /, *args: Any, **kwargs: Any) -> Any: ... + def accumulate(self, /, *args: Any, **kwargs: Any) -> NDArray[Any]: ... + def reduceat(self, /, *args: Any, **kwargs: Any) -> NDArray[Any]: ... + def outer(self, *args: Any, **kwargs: Any) -> Any: ... + # Similarly at won't be defined for ufuncs that return multiple + # outputs, so we can't type it very precisely. + def at(self, /, *args: Any, **kwargs: Any) -> None: ... + + # + def resolve_dtypes( + self, + /, + dtypes: tuple[dtype | type | None, ...], + *, + signature: tuple[dtype | None, ...] | None = None, + casting: _CastingKind | None = None, + reduction: builtins.bool = False, + ) -> tuple[dtype, ...]: ... + +# Parameters: `__name__`, `ntypes` and `identity` +absolute: _UFunc_Nin1_Nout1[L['absolute'], L[20], None] +add: _UFunc_Nin2_Nout1[L['add'], L[22], L[0]] +arccos: _UFunc_Nin1_Nout1[L['arccos'], L[8], None] +arccosh: _UFunc_Nin1_Nout1[L['arccosh'], L[8], None] +arcsin: _UFunc_Nin1_Nout1[L['arcsin'], L[8], None] +arcsinh: _UFunc_Nin1_Nout1[L['arcsinh'], L[8], None] +arctan2: _UFunc_Nin2_Nout1[L['arctan2'], L[5], None] +arctan: _UFunc_Nin1_Nout1[L['arctan'], L[8], None] +arctanh: _UFunc_Nin1_Nout1[L['arctanh'], L[8], None] +bitwise_and: _UFunc_Nin2_Nout1[L['bitwise_and'], L[12], L[-1]] +bitwise_count: _UFunc_Nin1_Nout1[L['bitwise_count'], L[11], None] +bitwise_not: _UFunc_Nin1_Nout1[L['invert'], L[12], None] +bitwise_or: _UFunc_Nin2_Nout1[L['bitwise_or'], L[12], L[0]] +bitwise_xor: _UFunc_Nin2_Nout1[L['bitwise_xor'], L[12], L[0]] +cbrt: _UFunc_Nin1_Nout1[L['cbrt'], L[5], None] +ceil: _UFunc_Nin1_Nout1[L['ceil'], L[7], None] +conj: _UFunc_Nin1_Nout1[L['conjugate'], L[18], None] +conjugate: _UFunc_Nin1_Nout1[L['conjugate'], L[18], None] +copysign: _UFunc_Nin2_Nout1[L['copysign'], L[4], None] +cos: _UFunc_Nin1_Nout1[L['cos'], L[9], None] +cosh: _UFunc_Nin1_Nout1[L['cosh'], L[8], None] +deg2rad: _UFunc_Nin1_Nout1[L['deg2rad'], L[5], None] +degrees: _UFunc_Nin1_Nout1[L['degrees'], L[5], None] +divide: _UFunc_Nin2_Nout1[L['true_divide'], L[11], None] +divmod: _UFunc_Nin2_Nout2[L['divmod'], L[15], None] +equal: _UFunc_Nin2_Nout1[L['equal'], L[23], None] +exp2: _UFunc_Nin1_Nout1[L['exp2'], L[8], None] +exp: _UFunc_Nin1_Nout1[L['exp'], L[10], None] +expm1: _UFunc_Nin1_Nout1[L['expm1'], L[8], None] +fabs: _UFunc_Nin1_Nout1[L['fabs'], L[5], None] +float_power: _UFunc_Nin2_Nout1[L['float_power'], L[4], None] +floor: _UFunc_Nin1_Nout1[L['floor'], L[7], None] +floor_divide: _UFunc_Nin2_Nout1[L['floor_divide'], L[21], None] +fmax: _UFunc_Nin2_Nout1[L['fmax'], L[21], None] +fmin: _UFunc_Nin2_Nout1[L['fmin'], L[21], None] +fmod: _UFunc_Nin2_Nout1[L['fmod'], L[15], None] +frexp: _UFunc_Nin1_Nout2[L['frexp'], L[4], None] +gcd: _UFunc_Nin2_Nout1[L['gcd'], L[11], L[0]] +greater: _UFunc_Nin2_Nout1[L['greater'], L[23], None] +greater_equal: _UFunc_Nin2_Nout1[L['greater_equal'], L[23], None] +heaviside: _UFunc_Nin2_Nout1[L['heaviside'], L[4], None] +hypot: _UFunc_Nin2_Nout1[L['hypot'], L[5], L[0]] +invert: _UFunc_Nin1_Nout1[L['invert'], L[12], None] +isfinite: _UFunc_Nin1_Nout1[L['isfinite'], L[20], None] +isinf: _UFunc_Nin1_Nout1[L['isinf'], L[20], None] +isnan: _UFunc_Nin1_Nout1[L['isnan'], L[20], None] +isnat: _UFunc_Nin1_Nout1[L['isnat'], L[2], None] +lcm: _UFunc_Nin2_Nout1[L['lcm'], L[11], None] +ldexp: _UFunc_Nin2_Nout1[L['ldexp'], L[8], None] +left_shift: _UFunc_Nin2_Nout1[L['left_shift'], L[11], None] +less: _UFunc_Nin2_Nout1[L['less'], L[23], None] +less_equal: _UFunc_Nin2_Nout1[L['less_equal'], L[23], None] +log10: _UFunc_Nin1_Nout1[L['log10'], L[8], None] +log1p: _UFunc_Nin1_Nout1[L['log1p'], L[8], None] +log2: _UFunc_Nin1_Nout1[L['log2'], L[8], None] +log: _UFunc_Nin1_Nout1[L['log'], L[10], None] +logaddexp2: _UFunc_Nin2_Nout1[L['logaddexp2'], L[4], float] +logaddexp: _UFunc_Nin2_Nout1[L['logaddexp'], L[4], float] +logical_and: _UFunc_Nin2_Nout1[L['logical_and'], L[20], L[True]] +logical_not: _UFunc_Nin1_Nout1[L['logical_not'], L[20], None] +logical_or: _UFunc_Nin2_Nout1[L['logical_or'], L[20], L[False]] +logical_xor: _UFunc_Nin2_Nout1[L['logical_xor'], L[19], L[False]] +matmul: _GUFunc_Nin2_Nout1[L['matmul'], L[19], None, L["(n?,k),(k,m?)->(n?,m?)"]] +matvec: _GUFunc_Nin2_Nout1[L['matvec'], L[19], None, L["(m,n),(n)->(m)"]] +maximum: _UFunc_Nin2_Nout1[L['maximum'], L[21], None] +minimum: _UFunc_Nin2_Nout1[L['minimum'], L[21], None] +mod: _UFunc_Nin2_Nout1[L['remainder'], L[16], None] +modf: _UFunc_Nin1_Nout2[L['modf'], L[4], None] +multiply: _UFunc_Nin2_Nout1[L['multiply'], L[23], L[1]] +negative: _UFunc_Nin1_Nout1[L['negative'], L[19], None] +nextafter: _UFunc_Nin2_Nout1[L['nextafter'], L[4], None] +not_equal: _UFunc_Nin2_Nout1[L['not_equal'], L[23], None] +positive: _UFunc_Nin1_Nout1[L['positive'], L[19], None] +power: _UFunc_Nin2_Nout1[L['power'], L[18], None] +rad2deg: _UFunc_Nin1_Nout1[L['rad2deg'], L[5], None] +radians: _UFunc_Nin1_Nout1[L['radians'], L[5], None] +reciprocal: _UFunc_Nin1_Nout1[L['reciprocal'], L[18], None] +remainder: _UFunc_Nin2_Nout1[L['remainder'], L[16], None] +right_shift: _UFunc_Nin2_Nout1[L['right_shift'], L[11], None] +rint: _UFunc_Nin1_Nout1[L['rint'], L[10], None] +sign: _UFunc_Nin1_Nout1[L['sign'], L[19], None] +signbit: _UFunc_Nin1_Nout1[L['signbit'], L[4], None] +sin: _UFunc_Nin1_Nout1[L['sin'], L[9], None] +sinh: _UFunc_Nin1_Nout1[L['sinh'], L[8], None] +spacing: _UFunc_Nin1_Nout1[L['spacing'], L[4], None] +sqrt: _UFunc_Nin1_Nout1[L['sqrt'], L[10], None] +square: _UFunc_Nin1_Nout1[L['square'], L[18], None] +subtract: _UFunc_Nin2_Nout1[L['subtract'], L[21], None] +tan: _UFunc_Nin1_Nout1[L['tan'], L[8], None] +tanh: _UFunc_Nin1_Nout1[L['tanh'], L[8], None] +true_divide: _UFunc_Nin2_Nout1[L['true_divide'], L[11], None] +trunc: _UFunc_Nin1_Nout1[L['trunc'], L[7], None] +vecdot: _GUFunc_Nin2_Nout1[L['vecdot'], L[19], None, L["(n),(n)->()"]] +vecmat: _GUFunc_Nin2_Nout1[L['vecmat'], L[19], None, L["(n),(n,m)->(m)"]] + +abs = absolute +acos = arccos +acosh = arccosh +asin = arcsin +asinh = arcsinh +atan = arctan +atanh = arctanh +atan2 = arctan2 +concat = concatenate +bitwise_left_shift = left_shift +bitwise_invert = invert +bitwise_right_shift = right_shift +permute_dims = transpose +pow = power + +class errstate: + def __init__( + self, + *, + call: _ErrCall = ..., + all: _ErrKind | None = ..., + divide: _ErrKind | None = ..., + over: _ErrKind | None = ..., + under: _ErrKind | None = ..., + invalid: _ErrKind | None = ..., + ) -> None: ... + def __enter__(self) -> None: ... + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_value: BaseException | None, + traceback: TracebackType | None, + /, + ) -> None: ... + def __call__(self, func: _CallableT) -> _CallableT: ... + +# TODO: The type of each `__next__` and `iters` return-type depends +# on the length and dtype of `args`; we can't describe this behavior yet +# as we lack variadics (PEP 646). +@final +class broadcast: + def __new__(cls, *args: ArrayLike) -> broadcast: ... + @property + def index(self) -> int: ... + @property + def iters(self) -> tuple[flatiter[Any], ...]: ... + @property + def nd(self) -> int: ... + @property + def ndim(self) -> int: ... + @property + def numiter(self) -> int: ... + @property + def shape(self) -> _AnyShape: ... + @property + def size(self) -> int: ... + def __next__(self) -> tuple[Any, ...]: ... + def __iter__(self) -> Self: ... + def reset(self) -> None: ... + +@final +class busdaycalendar: + def __new__( + cls, + weekmask: ArrayLike = ..., + holidays: ArrayLike | dt.date | _NestedSequence[dt.date] = ..., + ) -> busdaycalendar: ... + @property + def weekmask(self) -> NDArray[np.bool]: ... + @property + def holidays(self) -> NDArray[datetime64]: ... + +class finfo(Generic[_FloatingT_co]): + dtype: Final[dtype[_FloatingT_co]] + bits: Final[int] + eps: Final[_FloatingT_co] + epsneg: Final[_FloatingT_co] + iexp: Final[int] + machep: Final[int] + max: Final[_FloatingT_co] + maxexp: Final[int] + min: Final[_FloatingT_co] + minexp: Final[int] + negep: Final[int] + nexp: Final[int] + nmant: Final[int] + precision: Final[int] + resolution: Final[_FloatingT_co] + smallest_subnormal: Final[_FloatingT_co] + @property + def smallest_normal(self) -> _FloatingT_co: ... + @property + def tiny(self) -> _FloatingT_co: ... + @overload + def __new__(cls, dtype: inexact[_NBit1] | _DTypeLike[inexact[_NBit1]]) -> finfo[floating[_NBit1]]: ... + @overload + def __new__(cls, dtype: complex | type[complex]) -> finfo[float64]: ... + @overload + def __new__(cls, dtype: str) -> finfo[floating]: ... + +class iinfo(Generic[_IntegerT_co]): + dtype: Final[dtype[_IntegerT_co]] + kind: Final[LiteralString] + bits: Final[int] + key: Final[LiteralString] + @property + def min(self) -> int: ... + @property + def max(self) -> int: ... + + @overload + def __new__( + cls, dtype: _IntegerT_co | _DTypeLike[_IntegerT_co] + ) -> iinfo[_IntegerT_co]: ... + @overload + def __new__(cls, dtype: int | type[int]) -> iinfo[int_]: ... + @overload + def __new__(cls, dtype: str) -> iinfo[Any]: ... + +@final +class nditer: + def __new__( + cls, + op: ArrayLike | Sequence[ArrayLike | None], + flags: Sequence[_NDIterFlagsKind] | None = ..., + op_flags: Sequence[Sequence[_NDIterFlagsOp]] | None = ..., + op_dtypes: DTypeLike | Sequence[DTypeLike] = ..., + order: _OrderKACF = ..., + casting: _CastingKind = ..., + op_axes: Sequence[Sequence[SupportsIndex]] | None = ..., + itershape: _ShapeLike | None = ..., + buffersize: SupportsIndex = ..., + ) -> nditer: ... + def __enter__(self) -> nditer: ... + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_value: BaseException | None, + traceback: TracebackType | None, + ) -> None: ... + def __iter__(self) -> nditer: ... + def __next__(self) -> tuple[NDArray[Any], ...]: ... + def __len__(self) -> int: ... + def __copy__(self) -> nditer: ... + @overload + def __getitem__(self, index: SupportsIndex) -> NDArray[Any]: ... + @overload + def __getitem__(self, index: slice) -> tuple[NDArray[Any], ...]: ... + def __setitem__(self, index: slice | SupportsIndex, value: ArrayLike) -> None: ... + def close(self) -> None: ... + def copy(self) -> nditer: ... + def debug_print(self) -> None: ... + def enable_external_loop(self) -> None: ... + def iternext(self) -> builtins.bool: ... + def remove_axis(self, i: SupportsIndex, /) -> None: ... + def remove_multi_index(self) -> None: ... + def reset(self) -> None: ... + @property + def dtypes(self) -> tuple[dtype, ...]: ... + @property + def finished(self) -> builtins.bool: ... + @property + def has_delayed_bufalloc(self) -> builtins.bool: ... + @property + def has_index(self) -> builtins.bool: ... + @property + def has_multi_index(self) -> builtins.bool: ... + @property + def index(self) -> int: ... + @property + def iterationneedsapi(self) -> builtins.bool: ... + @property + def iterindex(self) -> int: ... + @property + def iterrange(self) -> tuple[int, ...]: ... + @property + def itersize(self) -> int: ... + @property + def itviews(self) -> tuple[NDArray[Any], ...]: ... + @property + def multi_index(self) -> tuple[int, ...]: ... + @property + def ndim(self) -> int: ... + @property + def nop(self) -> int: ... + @property + def operands(self) -> tuple[NDArray[Any], ...]: ... + @property + def shape(self) -> tuple[int, ...]: ... + @property + def value(self) -> tuple[NDArray[Any], ...]: ... + +class memmap(ndarray[_ShapeT_co, _DTypeT_co]): + __array_priority__: ClassVar[float] + filename: str | None + offset: int + mode: str + @overload + def __new__( + subtype, + filename: StrOrBytesPath | _SupportsFileMethodsRW, + dtype: type[uint8] = ..., + mode: _MemMapModeKind = ..., + offset: int = ..., + shape: int | tuple[int, ...] | None = ..., + order: _OrderKACF = ..., + ) -> memmap[Any, dtype[uint8]]: ... + @overload + def __new__( + subtype, + filename: StrOrBytesPath | _SupportsFileMethodsRW, + dtype: _DTypeLike[_ScalarT], + mode: _MemMapModeKind = ..., + offset: int = ..., + shape: int | tuple[int, ...] | None = ..., + order: _OrderKACF = ..., + ) -> memmap[Any, dtype[_ScalarT]]: ... + @overload + def __new__( + subtype, + filename: StrOrBytesPath | _SupportsFileMethodsRW, + dtype: DTypeLike, + mode: _MemMapModeKind = ..., + offset: int = ..., + shape: int | tuple[int, ...] | None = ..., + order: _OrderKACF = ..., + ) -> memmap[Any, dtype]: ... + def __array_finalize__(self, obj: object) -> None: ... + def __array_wrap__( + self, + array: memmap[_ShapeT_co, _DTypeT_co], + context: tuple[ufunc, tuple[Any, ...], int] | None = ..., + return_scalar: builtins.bool = ..., + ) -> Any: ... + def flush(self) -> None: ... + +# TODO: Add a mypy plugin for managing functions whose output type is dependent +# on the literal value of some sort of signature (e.g. `einsum` and `vectorize`) +class vectorize: + pyfunc: Callable[..., Any] + cache: builtins.bool + signature: LiteralString | None + otypes: LiteralString | None + excluded: set[int | str] + __doc__: str | None + def __init__( + self, + pyfunc: Callable[..., Any], + otypes: str | Iterable[DTypeLike] | None = ..., + doc: str | None = ..., + excluded: Iterable[int | str] | None = ..., + cache: builtins.bool = ..., + signature: str | None = ..., + ) -> None: ... + def __call__(self, *args: Any, **kwargs: Any) -> Any: ... + +class poly1d: + @property + def variable(self) -> LiteralString: ... + @property + def order(self) -> int: ... + @property + def o(self) -> int: ... + @property + def roots(self) -> NDArray[Any]: ... + @property + def r(self) -> NDArray[Any]: ... + + @property + def coeffs(self) -> NDArray[Any]: ... + @coeffs.setter + def coeffs(self, value: NDArray[Any]) -> None: ... + + @property + def c(self) -> NDArray[Any]: ... + @c.setter + def c(self, value: NDArray[Any]) -> None: ... + + @property + def coef(self) -> NDArray[Any]: ... + @coef.setter + def coef(self, value: NDArray[Any]) -> None: ... + + @property + def coefficients(self) -> NDArray[Any]: ... + @coefficients.setter + def coefficients(self, value: NDArray[Any]) -> None: ... + + __hash__: ClassVar[None] # type: ignore[assignment] # pyright: ignore[reportIncompatibleMethodOverride] + + @overload + def __array__(self, /, t: None = None, copy: builtins.bool | None = None) -> ndarray[tuple[int], dtype]: ... + @overload + def __array__(self, /, t: _DTypeT, copy: builtins.bool | None = None) -> ndarray[tuple[int], _DTypeT]: ... + + @overload + def __call__(self, val: _ScalarLike_co) -> Any: ... + @overload + def __call__(self, val: poly1d) -> poly1d: ... + @overload + def __call__(self, val: ArrayLike) -> NDArray[Any]: ... + + def __init__( + self, + c_or_r: ArrayLike, + r: builtins.bool = ..., + variable: str | None = ..., + ) -> None: ... + def __len__(self) -> int: ... + def __neg__(self) -> poly1d: ... + def __pos__(self) -> poly1d: ... + def __mul__(self, other: ArrayLike, /) -> poly1d: ... + def __rmul__(self, other: ArrayLike, /) -> poly1d: ... + def __add__(self, other: ArrayLike, /) -> poly1d: ... + def __radd__(self, other: ArrayLike, /) -> poly1d: ... + def __pow__(self, val: _FloatLike_co, /) -> poly1d: ... # Integral floats are accepted + def __sub__(self, other: ArrayLike, /) -> poly1d: ... + def __rsub__(self, other: ArrayLike, /) -> poly1d: ... + def __truediv__(self, other: ArrayLike, /) -> poly1d: ... + def __rtruediv__(self, other: ArrayLike, /) -> poly1d: ... + def __getitem__(self, val: int, /) -> Any: ... + def __setitem__(self, key: int, val: Any, /) -> None: ... + def __iter__(self) -> Iterator[Any]: ... + def deriv(self, m: SupportsInt | SupportsIndex = ...) -> poly1d: ... + def integ( + self, + m: SupportsInt | SupportsIndex = ..., + k: _ArrayLikeComplex_co | _ArrayLikeObject_co | None = ..., + ) -> poly1d: ... + +class matrix(ndarray[_2DShapeT_co, _DTypeT_co]): + __array_priority__: ClassVar[float] = 10.0 # pyright: ignore[reportIncompatibleMethodOverride] + + def __new__( + subtype, # pyright: ignore[reportSelfClsParameterName] + data: ArrayLike, + dtype: DTypeLike = ..., + copy: builtins.bool = ..., + ) -> matrix[_2D, Incomplete]: ... + def __array_finalize__(self, obj: object) -> None: ... + + @overload # type: ignore[override] + def __getitem__( + self, key: SupportsIndex | _ArrayLikeInt_co | tuple[SupportsIndex | _ArrayLikeInt_co, ...], / + ) -> Incomplete: ... + @overload + def __getitem__(self, key: _ToIndices, /) -> matrix[_2D, _DTypeT_co]: ... + @overload + def __getitem__(self: matrix[Any, dtype[void]], key: str, /) -> matrix[_2D, dtype]: ... + @overload + def __getitem__(self: matrix[Any, dtype[void]], key: list[str], /) -> matrix[_2DShapeT_co, _DTypeT_co]: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # + def __mul__(self, other: ArrayLike, /) -> matrix[_2D, Incomplete]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + def __rmul__(self, other: ArrayLike, /) -> matrix[_2D, Incomplete]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + def __imul__(self, other: ArrayLike, /) -> Self: ... + + # + def __pow__(self, other: ArrayLike, mod: None = None, /) -> matrix[_2D, Incomplete]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + def __rpow__(self, other: ArrayLike, mod: None = None, /) -> matrix[_2D, Incomplete]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + def __ipow__(self, other: ArrayLike, /) -> Self: ... # type: ignore[misc, override] + + # keep in sync with `prod` and `mean` + @overload # type: ignore[override] + def sum(self, axis: None = None, dtype: DTypeLike | None = None, out: None = None) -> Incomplete: ... + @overload + def sum(self, axis: _ShapeLike, dtype: DTypeLike | None = None, out: None = None) -> matrix[_2D, Incomplete]: ... + @overload + def sum(self, axis: _ShapeLike | None, dtype: DTypeLike | None, out: _ArrayT) -> _ArrayT: ... + @overload + def sum(self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # keep in sync with `sum` and `mean` + @overload # type: ignore[override] + def prod(self, axis: None = None, dtype: DTypeLike | None = None, out: None = None) -> Incomplete: ... + @overload + def prod(self, axis: _ShapeLike, dtype: DTypeLike | None = None, out: None = None) -> matrix[_2D, Incomplete]: ... + @overload + def prod(self, axis: _ShapeLike | None, dtype: DTypeLike | None, out: _ArrayT) -> _ArrayT: ... + @overload + def prod(self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # keep in sync with `sum` and `prod` + @overload # type: ignore[override] + def mean(self, axis: None = None, dtype: DTypeLike | None = None, out: None = None) -> Incomplete: ... + @overload + def mean(self, axis: _ShapeLike, dtype: DTypeLike | None = None, out: None = None) -> matrix[_2D, Incomplete]: ... + @overload + def mean(self, axis: _ShapeLike | None, dtype: DTypeLike | None, out: _ArrayT) -> _ArrayT: ... + @overload + def mean(self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # keep in sync with `var` + @overload # type: ignore[override] + def std(self, axis: None = None, dtype: DTypeLike | None = None, out: None = None, ddof: float = 0) -> Incomplete: ... + @overload + def std( + self, axis: _ShapeLike, dtype: DTypeLike | None = None, out: None = None, ddof: float = 0 + ) -> matrix[_2D, Incomplete]: ... + @overload + def std(self, axis: _ShapeLike | None, dtype: DTypeLike | None, out: _ArrayT, ddof: float = 0) -> _ArrayT: ... + @overload + def std( # pyright: ignore[reportIncompatibleMethodOverride] + self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT, ddof: float = 0 + ) -> _ArrayT: ... + + # keep in sync with `std` + @overload # type: ignore[override] + def var(self, axis: None = None, dtype: DTypeLike | None = None, out: None = None, ddof: float = 0) -> Incomplete: ... + @overload + def var( + self, axis: _ShapeLike, dtype: DTypeLike | None = None, out: None = None, ddof: float = 0 + ) -> matrix[_2D, Incomplete]: ... + @overload + def var(self, axis: _ShapeLike | None, dtype: DTypeLike | None, out: _ArrayT, ddof: float = 0) -> _ArrayT: ... + @overload + def var( # pyright: ignore[reportIncompatibleMethodOverride] + self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT, ddof: float = 0 + ) -> _ArrayT: ... + + # keep in sync with `all` + @overload # type: ignore[override] + def any(self, axis: None = None, out: None = None) -> np.bool: ... + @overload + def any(self, axis: _ShapeLike, out: None = None) -> matrix[_2D, dtype[np.bool]]: ... + @overload + def any(self, axis: _ShapeLike | None, out: _ArrayT) -> _ArrayT: ... + @overload + def any(self, axis: _ShapeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # keep in sync with `any` + @overload # type: ignore[override] + def all(self, axis: None = None, out: None = None) -> np.bool: ... + @overload + def all(self, axis: _ShapeLike, out: None = None) -> matrix[_2D, dtype[np.bool]]: ... + @overload + def all(self, axis: _ShapeLike | None, out: _ArrayT) -> _ArrayT: ... + @overload + def all(self, axis: _ShapeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # keep in sync with `min` and `ptp` + @overload # type: ignore[override] + def max(self: NDArray[_ScalarT], axis: None = None, out: None = None) -> _ScalarT: ... + @overload + def max(self, axis: _ShapeLike, out: None = None) -> matrix[_2D, _DTypeT_co]: ... + @overload + def max(self, axis: _ShapeLike | None, out: _ArrayT) -> _ArrayT: ... + @overload + def max(self, axis: _ShapeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # keep in sync with `max` and `ptp` + @overload # type: ignore[override] + def min(self: NDArray[_ScalarT], axis: None = None, out: None = None) -> _ScalarT: ... + @overload + def min(self, axis: _ShapeLike, out: None = None) -> matrix[_2D, _DTypeT_co]: ... + @overload + def min(self, axis: _ShapeLike | None, out: _ArrayT) -> _ArrayT: ... + @overload + def min(self, axis: _ShapeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # keep in sync with `max` and `min` + @overload + def ptp(self: NDArray[_ScalarT], axis: None = None, out: None = None) -> _ScalarT: ... + @overload + def ptp(self, axis: _ShapeLike, out: None = None) -> matrix[_2D, _DTypeT_co]: ... + @overload + def ptp(self, axis: _ShapeLike | None, out: _ArrayT) -> _ArrayT: ... + @overload + def ptp(self, axis: _ShapeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # keep in sync with `argmin` + @overload # type: ignore[override] + def argmax(self: NDArray[_ScalarT], axis: None = None, out: None = None) -> intp: ... + @overload + def argmax(self, axis: _ShapeLike, out: None = None) -> matrix[_2D, dtype[intp]]: ... + @overload + def argmax(self, axis: _ShapeLike | None, out: _BoolOrIntArrayT) -> _BoolOrIntArrayT: ... + @overload + def argmax(self, axis: _ShapeLike | None = None, *, out: _BoolOrIntArrayT) -> _BoolOrIntArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # keep in sync with `argmax` + @overload # type: ignore[override] + def argmin(self: NDArray[_ScalarT], axis: None = None, out: None = None) -> intp: ... + @overload + def argmin(self, axis: _ShapeLike, out: None = None) -> matrix[_2D, dtype[intp]]: ... + @overload + def argmin(self, axis: _ShapeLike | None, out: _BoolOrIntArrayT) -> _BoolOrIntArrayT: ... + @overload + def argmin(self, axis: _ShapeLike | None = None, *, out: _BoolOrIntArrayT) -> _BoolOrIntArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + + #the second overload handles the (rare) case that the matrix is not 2-d + @overload + def tolist(self: matrix[_2D, dtype[generic[_T]]]) -> list[list[_T]]: ... # pyright: ignore[reportIncompatibleMethodOverride] + @overload + def tolist(self) -> Incomplete: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # these three methods will at least return a `2-d` array of shape (1, n) + def squeeze(self, axis: _ShapeLike | None = None) -> matrix[_2D, _DTypeT_co]: ... + def ravel(self, /, order: _OrderKACF = "C") -> matrix[_2D, _DTypeT_co]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + def flatten(self, /, order: _OrderKACF = "C") -> matrix[_2D, _DTypeT_co]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + + # matrix.T is inherited from _ScalarOrArrayCommon + def getT(self) -> Self: ... + @property + def I(self) -> matrix[_2D, Incomplete]: ... # noqa: E743 + def getI(self) -> matrix[_2D, Incomplete]: ... + @property + def A(self) -> ndarray[_2DShapeT_co, _DTypeT_co]: ... + def getA(self) -> ndarray[_2DShapeT_co, _DTypeT_co]: ... + @property + def A1(self) -> ndarray[_AnyShape, _DTypeT_co]: ... + def getA1(self) -> ndarray[_AnyShape, _DTypeT_co]: ... + @property + def H(self) -> matrix[_2D, _DTypeT_co]: ... + def getH(self) -> matrix[_2D, _DTypeT_co]: ... + +def from_dlpack( + x: _SupportsDLPack[None], + /, + *, + device: L["cpu"] | None = None, + copy: builtins.bool | None = None, +) -> NDArray[number | np.bool]: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/__pycache__/__config__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/__pycache__/__config__.cpython-312.pyc new file mode 100644 index 0000000..358cdea Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/__pycache__/__config__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..0ed281f Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/__pycache__/_array_api_info.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/__pycache__/_array_api_info.cpython-312.pyc new file mode 100644 index 0000000..e33e3c8 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/__pycache__/_array_api_info.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/__pycache__/_configtool.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/__pycache__/_configtool.cpython-312.pyc new file mode 100644 index 0000000..d278020 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/__pycache__/_configtool.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/__pycache__/_distributor_init.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/__pycache__/_distributor_init.cpython-312.pyc new file mode 100644 index 0000000..94ac405 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/__pycache__/_distributor_init.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/__pycache__/_expired_attrs_2_0.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/__pycache__/_expired_attrs_2_0.cpython-312.pyc new file mode 100644 index 0000000..ef9f35c Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/__pycache__/_expired_attrs_2_0.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/__pycache__/_globals.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/__pycache__/_globals.cpython-312.pyc new file mode 100644 index 0000000..fea8377 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/__pycache__/_globals.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/__pycache__/_pytesttester.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/__pycache__/_pytesttester.cpython-312.pyc new file mode 100644 index 0000000..4107cd4 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/__pycache__/_pytesttester.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/__pycache__/conftest.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/__pycache__/conftest.cpython-312.pyc new file mode 100644 index 0000000..7617f6d Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/__pycache__/conftest.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/__pycache__/dtypes.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/__pycache__/dtypes.cpython-312.pyc new file mode 100644 index 0000000..ff4a5d7 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/__pycache__/dtypes.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/__pycache__/exceptions.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/__pycache__/exceptions.cpython-312.pyc new file mode 100644 index 0000000..c04c7a6 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/__pycache__/exceptions.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/__pycache__/matlib.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/__pycache__/matlib.cpython-312.pyc new file mode 100644 index 0000000..c19c0c2 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/__pycache__/matlib.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/__pycache__/version.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/__pycache__/version.cpython-312.pyc new file mode 100644 index 0000000..d95593b Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/__pycache__/version.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_array_api_info.py b/.venv/lib/python3.12/site-packages/numpy/_array_api_info.py new file mode 100644 index 0000000..067e387 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_array_api_info.py @@ -0,0 +1,346 @@ +""" +Array API Inspection namespace + +This is the namespace for inspection functions as defined by the array API +standard. See +https://data-apis.org/array-api/latest/API_specification/inspection.html for +more details. + +""" +from numpy._core import ( + bool, + complex64, + complex128, + dtype, + float32, + float64, + int8, + int16, + int32, + int64, + intp, + uint8, + uint16, + uint32, + uint64, +) + + +class __array_namespace_info__: + """ + Get the array API inspection namespace for NumPy. + + The array API inspection namespace defines the following functions: + + - capabilities() + - default_device() + - default_dtypes() + - dtypes() + - devices() + + See + https://data-apis.org/array-api/latest/API_specification/inspection.html + for more details. + + Returns + ------- + info : ModuleType + The array API inspection namespace for NumPy. + + Examples + -------- + >>> info = np.__array_namespace_info__() + >>> info.default_dtypes() + {'real floating': numpy.float64, + 'complex floating': numpy.complex128, + 'integral': numpy.int64, + 'indexing': numpy.int64} + + """ + + __module__ = 'numpy' + + def capabilities(self): + """ + Return a dictionary of array API library capabilities. + + The resulting dictionary has the following keys: + + - **"boolean indexing"**: boolean indicating whether an array library + supports boolean indexing. Always ``True`` for NumPy. + + - **"data-dependent shapes"**: boolean indicating whether an array + library supports data-dependent output shapes. Always ``True`` for + NumPy. + + See + https://data-apis.org/array-api/latest/API_specification/generated/array_api.info.capabilities.html + for more details. + + See Also + -------- + __array_namespace_info__.default_device, + __array_namespace_info__.default_dtypes, + __array_namespace_info__.dtypes, + __array_namespace_info__.devices + + Returns + ------- + capabilities : dict + A dictionary of array API library capabilities. + + Examples + -------- + >>> info = np.__array_namespace_info__() + >>> info.capabilities() + {'boolean indexing': True, + 'data-dependent shapes': True, + 'max dimensions': 64} + + """ + return { + "boolean indexing": True, + "data-dependent shapes": True, + "max dimensions": 64, + } + + def default_device(self): + """ + The default device used for new NumPy arrays. + + For NumPy, this always returns ``'cpu'``. + + See Also + -------- + __array_namespace_info__.capabilities, + __array_namespace_info__.default_dtypes, + __array_namespace_info__.dtypes, + __array_namespace_info__.devices + + Returns + ------- + device : str + The default device used for new NumPy arrays. + + Examples + -------- + >>> info = np.__array_namespace_info__() + >>> info.default_device() + 'cpu' + + """ + return "cpu" + + def default_dtypes(self, *, device=None): + """ + The default data types used for new NumPy arrays. + + For NumPy, this always returns the following dictionary: + + - **"real floating"**: ``numpy.float64`` + - **"complex floating"**: ``numpy.complex128`` + - **"integral"**: ``numpy.intp`` + - **"indexing"**: ``numpy.intp`` + + Parameters + ---------- + device : str, optional + The device to get the default data types for. For NumPy, only + ``'cpu'`` is allowed. + + Returns + ------- + dtypes : dict + A dictionary describing the default data types used for new NumPy + arrays. + + See Also + -------- + __array_namespace_info__.capabilities, + __array_namespace_info__.default_device, + __array_namespace_info__.dtypes, + __array_namespace_info__.devices + + Examples + -------- + >>> info = np.__array_namespace_info__() + >>> info.default_dtypes() + {'real floating': numpy.float64, + 'complex floating': numpy.complex128, + 'integral': numpy.int64, + 'indexing': numpy.int64} + + """ + if device not in ["cpu", None]: + raise ValueError( + 'Device not understood. Only "cpu" is allowed, but received:' + f' {device}' + ) + return { + "real floating": dtype(float64), + "complex floating": dtype(complex128), + "integral": dtype(intp), + "indexing": dtype(intp), + } + + def dtypes(self, *, device=None, kind=None): + """ + The array API data types supported by NumPy. + + Note that this function only returns data types that are defined by + the array API. + + Parameters + ---------- + device : str, optional + The device to get the data types for. For NumPy, only ``'cpu'`` is + allowed. + kind : str or tuple of str, optional + The kind of data types to return. If ``None``, all data types are + returned. If a string, only data types of that kind are returned. + If a tuple, a dictionary containing the union of the given kinds + is returned. The following kinds are supported: + + - ``'bool'``: boolean data types (i.e., ``bool``). + - ``'signed integer'``: signed integer data types (i.e., ``int8``, + ``int16``, ``int32``, ``int64``). + - ``'unsigned integer'``: unsigned integer data types (i.e., + ``uint8``, ``uint16``, ``uint32``, ``uint64``). + - ``'integral'``: integer data types. Shorthand for ``('signed + integer', 'unsigned integer')``. + - ``'real floating'``: real-valued floating-point data types + (i.e., ``float32``, ``float64``). + - ``'complex floating'``: complex floating-point data types (i.e., + ``complex64``, ``complex128``). + - ``'numeric'``: numeric data types. Shorthand for ``('integral', + 'real floating', 'complex floating')``. + + Returns + ------- + dtypes : dict + A dictionary mapping the names of data types to the corresponding + NumPy data types. + + See Also + -------- + __array_namespace_info__.capabilities, + __array_namespace_info__.default_device, + __array_namespace_info__.default_dtypes, + __array_namespace_info__.devices + + Examples + -------- + >>> info = np.__array_namespace_info__() + >>> info.dtypes(kind='signed integer') + {'int8': numpy.int8, + 'int16': numpy.int16, + 'int32': numpy.int32, + 'int64': numpy.int64} + + """ + if device not in ["cpu", None]: + raise ValueError( + 'Device not understood. Only "cpu" is allowed, but received:' + f' {device}' + ) + if kind is None: + return { + "bool": dtype(bool), + "int8": dtype(int8), + "int16": dtype(int16), + "int32": dtype(int32), + "int64": dtype(int64), + "uint8": dtype(uint8), + "uint16": dtype(uint16), + "uint32": dtype(uint32), + "uint64": dtype(uint64), + "float32": dtype(float32), + "float64": dtype(float64), + "complex64": dtype(complex64), + "complex128": dtype(complex128), + } + if kind == "bool": + return {"bool": bool} + if kind == "signed integer": + return { + "int8": dtype(int8), + "int16": dtype(int16), + "int32": dtype(int32), + "int64": dtype(int64), + } + if kind == "unsigned integer": + return { + "uint8": dtype(uint8), + "uint16": dtype(uint16), + "uint32": dtype(uint32), + "uint64": dtype(uint64), + } + if kind == "integral": + return { + "int8": dtype(int8), + "int16": dtype(int16), + "int32": dtype(int32), + "int64": dtype(int64), + "uint8": dtype(uint8), + "uint16": dtype(uint16), + "uint32": dtype(uint32), + "uint64": dtype(uint64), + } + if kind == "real floating": + return { + "float32": dtype(float32), + "float64": dtype(float64), + } + if kind == "complex floating": + return { + "complex64": dtype(complex64), + "complex128": dtype(complex128), + } + if kind == "numeric": + return { + "int8": dtype(int8), + "int16": dtype(int16), + "int32": dtype(int32), + "int64": dtype(int64), + "uint8": dtype(uint8), + "uint16": dtype(uint16), + "uint32": dtype(uint32), + "uint64": dtype(uint64), + "float32": dtype(float32), + "float64": dtype(float64), + "complex64": dtype(complex64), + "complex128": dtype(complex128), + } + if isinstance(kind, tuple): + res = {} + for k in kind: + res.update(self.dtypes(kind=k)) + return res + raise ValueError(f"unsupported kind: {kind!r}") + + def devices(self): + """ + The devices supported by NumPy. + + For NumPy, this always returns ``['cpu']``. + + Returns + ------- + devices : list of str + The devices supported by NumPy. + + See Also + -------- + __array_namespace_info__.capabilities, + __array_namespace_info__.default_device, + __array_namespace_info__.default_dtypes, + __array_namespace_info__.dtypes + + Examples + -------- + >>> info = np.__array_namespace_info__() + >>> info.devices() + ['cpu'] + + """ + return ["cpu"] diff --git a/.venv/lib/python3.12/site-packages/numpy/_array_api_info.pyi b/.venv/lib/python3.12/site-packages/numpy/_array_api_info.pyi new file mode 100644 index 0000000..ee9f8a5 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_array_api_info.pyi @@ -0,0 +1,207 @@ +from typing import ( + ClassVar, + Literal, + Never, + TypeAlias, + TypedDict, + TypeVar, + final, + overload, + type_check_only, +) + +import numpy as np + +_Device: TypeAlias = Literal["cpu"] +_DeviceLike: TypeAlias = _Device | None + +_Capabilities = TypedDict( + "_Capabilities", + { + "boolean indexing": Literal[True], + "data-dependent shapes": Literal[True], + }, +) + +_DefaultDTypes = TypedDict( + "_DefaultDTypes", + { + "real floating": np.dtype[np.float64], + "complex floating": np.dtype[np.complex128], + "integral": np.dtype[np.intp], + "indexing": np.dtype[np.intp], + }, +) + +_KindBool: TypeAlias = Literal["bool"] +_KindInt: TypeAlias = Literal["signed integer"] +_KindUInt: TypeAlias = Literal["unsigned integer"] +_KindInteger: TypeAlias = Literal["integral"] +_KindFloat: TypeAlias = Literal["real floating"] +_KindComplex: TypeAlias = Literal["complex floating"] +_KindNumber: TypeAlias = Literal["numeric"] +_Kind: TypeAlias = ( + _KindBool + | _KindInt + | _KindUInt + | _KindInteger + | _KindFloat + | _KindComplex + | _KindNumber +) + +_T1 = TypeVar("_T1") +_T2 = TypeVar("_T2") +_T3 = TypeVar("_T3") +_Permute1: TypeAlias = _T1 | tuple[_T1] +_Permute2: TypeAlias = tuple[_T1, _T2] | tuple[_T2, _T1] +_Permute3: TypeAlias = ( + tuple[_T1, _T2, _T3] | tuple[_T1, _T3, _T2] + | tuple[_T2, _T1, _T3] | tuple[_T2, _T3, _T1] + | tuple[_T3, _T1, _T2] | tuple[_T3, _T2, _T1] +) + +@type_check_only +class _DTypesBool(TypedDict): + bool: np.dtype[np.bool] + +@type_check_only +class _DTypesInt(TypedDict): + int8: np.dtype[np.int8] + int16: np.dtype[np.int16] + int32: np.dtype[np.int32] + int64: np.dtype[np.int64] + +@type_check_only +class _DTypesUInt(TypedDict): + uint8: np.dtype[np.uint8] + uint16: np.dtype[np.uint16] + uint32: np.dtype[np.uint32] + uint64: np.dtype[np.uint64] + +@type_check_only +class _DTypesInteger(_DTypesInt, _DTypesUInt): ... + +@type_check_only +class _DTypesFloat(TypedDict): + float32: np.dtype[np.float32] + float64: np.dtype[np.float64] + +@type_check_only +class _DTypesComplex(TypedDict): + complex64: np.dtype[np.complex64] + complex128: np.dtype[np.complex128] + +@type_check_only +class _DTypesNumber(_DTypesInteger, _DTypesFloat, _DTypesComplex): ... + +@type_check_only +class _DTypes(_DTypesBool, _DTypesNumber): ... + +@type_check_only +class _DTypesUnion(TypedDict, total=False): + bool: np.dtype[np.bool] + int8: np.dtype[np.int8] + int16: np.dtype[np.int16] + int32: np.dtype[np.int32] + int64: np.dtype[np.int64] + uint8: np.dtype[np.uint8] + uint16: np.dtype[np.uint16] + uint32: np.dtype[np.uint32] + uint64: np.dtype[np.uint64] + float32: np.dtype[np.float32] + float64: np.dtype[np.float64] + complex64: np.dtype[np.complex64] + complex128: np.dtype[np.complex128] + +_EmptyDict: TypeAlias = dict[Never, Never] + +@final +class __array_namespace_info__: + __module__: ClassVar[Literal['numpy']] + + def capabilities(self) -> _Capabilities: ... + def default_device(self) -> _Device: ... + def default_dtypes( + self, + *, + device: _DeviceLike = ..., + ) -> _DefaultDTypes: ... + def devices(self) -> list[_Device]: ... + + @overload + def dtypes( + self, + *, + device: _DeviceLike = ..., + kind: None = ..., + ) -> _DTypes: ... + @overload + def dtypes( + self, + *, + device: _DeviceLike = ..., + kind: _Permute1[_KindBool], + ) -> _DTypesBool: ... + @overload + def dtypes( + self, + *, + device: _DeviceLike = ..., + kind: _Permute1[_KindInt], + ) -> _DTypesInt: ... + @overload + def dtypes( + self, + *, + device: _DeviceLike = ..., + kind: _Permute1[_KindUInt], + ) -> _DTypesUInt: ... + @overload + def dtypes( + self, + *, + device: _DeviceLike = ..., + kind: _Permute1[_KindFloat], + ) -> _DTypesFloat: ... + @overload + def dtypes( + self, + *, + device: _DeviceLike = ..., + kind: _Permute1[_KindComplex], + ) -> _DTypesComplex: ... + @overload + def dtypes( + self, + *, + device: _DeviceLike = ..., + kind: ( + _Permute1[_KindInteger] + | _Permute2[_KindInt, _KindUInt] + ), + ) -> _DTypesInteger: ... + @overload + def dtypes( + self, + *, + device: _DeviceLike = ..., + kind: ( + _Permute1[_KindNumber] + | _Permute3[_KindInteger, _KindFloat, _KindComplex] + ), + ) -> _DTypesNumber: ... + @overload + def dtypes( + self, + *, + device: _DeviceLike = ..., + kind: tuple[()], + ) -> _EmptyDict: ... + @overload + def dtypes( + self, + *, + device: _DeviceLike = ..., + kind: tuple[_Kind, ...], + ) -> _DTypesUnion: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/_configtool.py b/.venv/lib/python3.12/site-packages/numpy/_configtool.py new file mode 100644 index 0000000..db7831c --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_configtool.py @@ -0,0 +1,39 @@ +import argparse +import sys +from pathlib import Path + +from .lib._utils_impl import get_include +from .version import __version__ + + +def main() -> None: + parser = argparse.ArgumentParser() + parser.add_argument( + "--version", + action="version", + version=__version__, + help="Print the version and exit.", + ) + parser.add_argument( + "--cflags", + action="store_true", + help="Compile flag needed when using the NumPy headers.", + ) + parser.add_argument( + "--pkgconfigdir", + action="store_true", + help=("Print the pkgconfig directory in which `numpy.pc` is stored " + "(useful for setting $PKG_CONFIG_PATH)."), + ) + args = parser.parse_args() + if not sys.argv[1:]: + parser.print_help() + if args.cflags: + print("-I" + get_include()) + if args.pkgconfigdir: + _path = Path(get_include()) / '..' / 'lib' / 'pkgconfig' + print(_path.resolve()) + + +if __name__ == "__main__": + main() diff --git a/.venv/lib/python3.12/site-packages/numpy/_configtool.pyi b/.venv/lib/python3.12/site-packages/numpy/_configtool.pyi new file mode 100644 index 0000000..7e7363e --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_configtool.pyi @@ -0,0 +1 @@ +def main() -> None: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/__init__.py b/.venv/lib/python3.12/site-packages/numpy/_core/__init__.py new file mode 100644 index 0000000..d0da7e0 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/__init__.py @@ -0,0 +1,186 @@ +""" +Contains the core of NumPy: ndarray, ufuncs, dtypes, etc. + +Please note that this module is private. All functions and objects +are available in the main ``numpy`` namespace - use that instead. + +""" + +import os + +from numpy.version import version as __version__ + +# disables OpenBLAS affinity setting of the main thread that limits +# python threads or processes to one core +env_added = [] +for envkey in ['OPENBLAS_MAIN_FREE', 'GOTOBLAS_MAIN_FREE']: + if envkey not in os.environ: + os.environ[envkey] = '1' + env_added.append(envkey) + +try: + from . import multiarray +except ImportError as exc: + import sys + msg = """ + +IMPORTANT: PLEASE READ THIS FOR ADVICE ON HOW TO SOLVE THIS ISSUE! + +Importing the numpy C-extensions failed. This error can happen for +many reasons, often due to issues with your setup or how NumPy was +installed. + +We have compiled some common reasons and troubleshooting tips at: + + https://numpy.org/devdocs/user/troubleshooting-importerror.html + +Please note and check the following: + + * The Python version is: Python%d.%d from "%s" + * The NumPy version is: "%s" + +and make sure that they are the versions you expect. +Please carefully study the documentation linked above for further help. + +Original error was: %s +""" % (sys.version_info[0], sys.version_info[1], sys.executable, + __version__, exc) + raise ImportError(msg) from exc +finally: + for envkey in env_added: + del os.environ[envkey] +del envkey +del env_added +del os + +from . import umath + +# Check that multiarray,umath are pure python modules wrapping +# _multiarray_umath and not either of the old c-extension modules +if not (hasattr(multiarray, '_multiarray_umath') and + hasattr(umath, '_multiarray_umath')): + import sys + path = sys.modules['numpy'].__path__ + msg = ("Something is wrong with the numpy installation. " + "While importing we detected an older version of " + "numpy in {}. One method of fixing this is to repeatedly uninstall " + "numpy until none is found, then reinstall this version.") + raise ImportError(msg.format(path)) + +from . import numerictypes as nt +from .numerictypes import sctypeDict, sctypes + +multiarray.set_typeDict(nt.sctypeDict) +from . import ( + _machar, + einsumfunc, + fromnumeric, + function_base, + getlimits, + numeric, + shape_base, +) +from .einsumfunc import * +from .fromnumeric import * +from .function_base import * +from .getlimits import * + +# Note: module name memmap is overwritten by a class with same name +from .memmap import * +from .numeric import * +from .records import recarray, record +from .shape_base import * + +del nt + +# do this after everything else, to minimize the chance of this misleadingly +# appearing in an import-time traceback +# add these for module-freeze analysis (like PyInstaller) +from . import ( + _add_newdocs, + _add_newdocs_scalars, + _dtype, + _dtype_ctypes, + _internal, + _methods, +) +from .numeric import absolute as abs + +acos = numeric.arccos +acosh = numeric.arccosh +asin = numeric.arcsin +asinh = numeric.arcsinh +atan = numeric.arctan +atanh = numeric.arctanh +atan2 = numeric.arctan2 +concat = numeric.concatenate +bitwise_left_shift = numeric.left_shift +bitwise_invert = numeric.invert +bitwise_right_shift = numeric.right_shift +permute_dims = numeric.transpose +pow = numeric.power + +__all__ = [ + "abs", "acos", "acosh", "asin", "asinh", "atan", "atanh", "atan2", + "bitwise_invert", "bitwise_left_shift", "bitwise_right_shift", "concat", + "pow", "permute_dims", "memmap", "sctypeDict", "record", "recarray" +] +__all__ += numeric.__all__ +__all__ += function_base.__all__ +__all__ += getlimits.__all__ +__all__ += shape_base.__all__ +__all__ += einsumfunc.__all__ + + +def _ufunc_reduce(func): + # Report the `__name__`. pickle will try to find the module. Note that + # pickle supports for this `__name__` to be a `__qualname__`. It may + # make sense to add a `__qualname__` to ufuncs, to allow this more + # explicitly (Numba has ufuncs as attributes). + # See also: https://github.com/dask/distributed/issues/3450 + return func.__name__ + + +def _DType_reconstruct(scalar_type): + # This is a work-around to pickle type(np.dtype(np.float64)), etc. + # and it should eventually be replaced with a better solution, e.g. when + # DTypes become HeapTypes. + return type(dtype(scalar_type)) + + +def _DType_reduce(DType): + # As types/classes, most DTypes can simply be pickled by their name: + if not DType._legacy or DType.__module__ == "numpy.dtypes": + return DType.__name__ + + # However, user defined legacy dtypes (like rational) do not end up in + # `numpy.dtypes` as module and do not have a public class at all. + # For these, we pickle them by reconstructing them from the scalar type: + scalar_type = DType.type + return _DType_reconstruct, (scalar_type,) + + +def __getattr__(name): + # Deprecated 2022-11-22, NumPy 1.25. + if name == "MachAr": + import warnings + warnings.warn( + "The `np._core.MachAr` is considered private API (NumPy 1.24)", + DeprecationWarning, stacklevel=2, + ) + return _machar.MachAr + raise AttributeError(f"Module {__name__!r} has no attribute {name!r}") + + +import copyreg + +copyreg.pickle(ufunc, _ufunc_reduce) +copyreg.pickle(type(dtype), _DType_reduce, _DType_reconstruct) + +# Unclutter namespace (must keep _*_reconstruct for unpickling) +del copyreg, _ufunc_reduce, _DType_reduce + +from numpy._pytesttester import PytestTester + +test = PytestTester(__name__) +del PytestTester diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/__init__.pyi b/.venv/lib/python3.12/site-packages/numpy/_core/__init__.pyi new file mode 100644 index 0000000..40d9c41 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/__init__.pyi @@ -0,0 +1,2 @@ +# NOTE: The `np._core` namespace is deliberately kept empty due to it +# being private diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..eee68ab Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/_add_newdocs.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/_add_newdocs.cpython-312.pyc new file mode 100644 index 0000000..7d03b1a Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/_add_newdocs.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/_add_newdocs_scalars.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/_add_newdocs_scalars.cpython-312.pyc new file mode 100644 index 0000000..c22395e Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/_add_newdocs_scalars.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/_asarray.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/_asarray.cpython-312.pyc new file mode 100644 index 0000000..79fc012 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/_asarray.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/_dtype.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/_dtype.cpython-312.pyc new file mode 100644 index 0000000..1ff83ec Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/_dtype.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/_dtype_ctypes.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/_dtype_ctypes.cpython-312.pyc new file mode 100644 index 0000000..434c6eb Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/_dtype_ctypes.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/_exceptions.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/_exceptions.cpython-312.pyc new file mode 100644 index 0000000..37814af Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/_exceptions.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/_internal.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/_internal.cpython-312.pyc new file mode 100644 index 0000000..3d1a0ec Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/_internal.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/_machar.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/_machar.cpython-312.pyc new file mode 100644 index 0000000..b466096 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/_machar.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/_methods.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/_methods.cpython-312.pyc new file mode 100644 index 0000000..6d03e14 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/_methods.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/_string_helpers.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/_string_helpers.cpython-312.pyc new file mode 100644 index 0000000..54369a2 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/_string_helpers.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/_type_aliases.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/_type_aliases.cpython-312.pyc new file mode 100644 index 0000000..202bc7f Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/_type_aliases.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/_ufunc_config.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/_ufunc_config.cpython-312.pyc new file mode 100644 index 0000000..760cbab Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/_ufunc_config.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/arrayprint.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/arrayprint.cpython-312.pyc new file mode 100644 index 0000000..8117740 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/arrayprint.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/cversions.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/cversions.cpython-312.pyc new file mode 100644 index 0000000..532c821 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/cversions.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/defchararray.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/defchararray.cpython-312.pyc new file mode 100644 index 0000000..2d68f6a Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/defchararray.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/einsumfunc.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/einsumfunc.cpython-312.pyc new file mode 100644 index 0000000..b17224a Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/einsumfunc.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/fromnumeric.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/fromnumeric.cpython-312.pyc new file mode 100644 index 0000000..93f3f89 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/fromnumeric.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/function_base.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/function_base.cpython-312.pyc new file mode 100644 index 0000000..d45dcd3 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/function_base.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/getlimits.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/getlimits.cpython-312.pyc new file mode 100644 index 0000000..18b6fb6 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/getlimits.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/memmap.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/memmap.cpython-312.pyc new file mode 100644 index 0000000..05e2f99 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/memmap.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/multiarray.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/multiarray.cpython-312.pyc new file mode 100644 index 0000000..29266a5 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/multiarray.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/numeric.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/numeric.cpython-312.pyc new file mode 100644 index 0000000..e79b272 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/numeric.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/numerictypes.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/numerictypes.cpython-312.pyc new file mode 100644 index 0000000..8c2feac Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/numerictypes.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/overrides.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/overrides.cpython-312.pyc new file mode 100644 index 0000000..d4a3473 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/overrides.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/printoptions.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/printoptions.cpython-312.pyc new file mode 100644 index 0000000..f070192 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/printoptions.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/records.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/records.cpython-312.pyc new file mode 100644 index 0000000..ac0b55c Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/records.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/shape_base.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/shape_base.cpython-312.pyc new file mode 100644 index 0000000..6882a72 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/shape_base.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/strings.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/strings.cpython-312.pyc new file mode 100644 index 0000000..0ae9211 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/strings.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/umath.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/umath.cpython-312.pyc new file mode 100644 index 0000000..95fbac4 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/umath.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/_add_newdocs.py b/.venv/lib/python3.12/site-packages/numpy/_core/_add_newdocs.py new file mode 100644 index 0000000..8f5de4b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/_add_newdocs.py @@ -0,0 +1,6967 @@ +""" +This is only meant to add docs to objects defined in C-extension modules. +The purpose is to allow easier editing of the docstrings without +requiring a re-compile. + +NOTE: Many of the methods of ndarray have corresponding functions. + If you update these docstrings, please keep also the ones in + _core/fromnumeric.py, matrixlib/defmatrix.py up-to-date. + +""" + +from numpy._core.function_base import add_newdoc +from numpy._core.overrides import get_array_function_like_doc # noqa: F401 + +############################################################################### +# +# flatiter +# +# flatiter needs a toplevel description +# +############################################################################### + +add_newdoc('numpy._core', 'flatiter', + """ + Flat iterator object to iterate over arrays. + + A `flatiter` iterator is returned by ``x.flat`` for any array `x`. + It allows iterating over the array as if it were a 1-D array, + either in a for-loop or by calling its `next` method. + + Iteration is done in row-major, C-style order (the last + index varying the fastest). The iterator can also be indexed using + basic slicing or advanced indexing. + + See Also + -------- + ndarray.flat : Return a flat iterator over an array. + ndarray.flatten : Returns a flattened copy of an array. + + Notes + ----- + A `flatiter` iterator can not be constructed directly from Python code + by calling the `flatiter` constructor. + + Examples + -------- + >>> import numpy as np + >>> x = np.arange(6).reshape(2, 3) + >>> fl = x.flat + >>> type(fl) + + >>> for item in fl: + ... print(item) + ... + 0 + 1 + 2 + 3 + 4 + 5 + + >>> fl[2:4] + array([2, 3]) + + """) + +# flatiter attributes + +add_newdoc('numpy._core', 'flatiter', ('base', + """ + A reference to the array that is iterated over. + + Examples + -------- + >>> import numpy as np + >>> x = np.arange(5) + >>> fl = x.flat + >>> fl.base is x + True + + """)) + + +add_newdoc('numpy._core', 'flatiter', ('coords', + """ + An N-dimensional tuple of current coordinates. + + Examples + -------- + >>> import numpy as np + >>> x = np.arange(6).reshape(2, 3) + >>> fl = x.flat + >>> fl.coords + (0, 0) + >>> next(fl) + 0 + >>> fl.coords + (0, 1) + + """)) + + +add_newdoc('numpy._core', 'flatiter', ('index', + """ + Current flat index into the array. + + Examples + -------- + >>> import numpy as np + >>> x = np.arange(6).reshape(2, 3) + >>> fl = x.flat + >>> fl.index + 0 + >>> next(fl) + 0 + >>> fl.index + 1 + + """)) + +# flatiter functions + +add_newdoc('numpy._core', 'flatiter', ('__array__', + """__array__(type=None) Get array from iterator + + """)) + + +add_newdoc('numpy._core', 'flatiter', ('copy', + """ + copy() + + Get a copy of the iterator as a 1-D array. + + Examples + -------- + >>> import numpy as np + >>> x = np.arange(6).reshape(2, 3) + >>> x + array([[0, 1, 2], + [3, 4, 5]]) + >>> fl = x.flat + >>> fl.copy() + array([0, 1, 2, 3, 4, 5]) + + """)) + + +############################################################################### +# +# nditer +# +############################################################################### + +add_newdoc('numpy._core', 'nditer', + """ + nditer(op, flags=None, op_flags=None, op_dtypes=None, order='K', + casting='safe', op_axes=None, itershape=None, buffersize=0) + + Efficient multi-dimensional iterator object to iterate over arrays. + To get started using this object, see the + :ref:`introductory guide to array iteration `. + + Parameters + ---------- + op : ndarray or sequence of array_like + The array(s) to iterate over. + + flags : sequence of str, optional + Flags to control the behavior of the iterator. + + * ``buffered`` enables buffering when required. + * ``c_index`` causes a C-order index to be tracked. + * ``f_index`` causes a Fortran-order index to be tracked. + * ``multi_index`` causes a multi-index, or a tuple of indices + with one per iteration dimension, to be tracked. + * ``common_dtype`` causes all the operands to be converted to + a common data type, with copying or buffering as necessary. + * ``copy_if_overlap`` causes the iterator to determine if read + operands have overlap with write operands, and make temporary + copies as necessary to avoid overlap. False positives (needless + copying) are possible in some cases. + * ``delay_bufalloc`` delays allocation of the buffers until + a reset() call is made. Allows ``allocate`` operands to + be initialized before their values are copied into the buffers. + * ``external_loop`` causes the ``values`` given to be + one-dimensional arrays with multiple values instead of + zero-dimensional arrays. + * ``grow_inner`` allows the ``value`` array sizes to be made + larger than the buffer size when both ``buffered`` and + ``external_loop`` is used. + * ``ranged`` allows the iterator to be restricted to a sub-range + of the iterindex values. + * ``refs_ok`` enables iteration of reference types, such as + object arrays. + * ``reduce_ok`` enables iteration of ``readwrite`` operands + which are broadcasted, also known as reduction operands. + * ``zerosize_ok`` allows `itersize` to be zero. + op_flags : list of list of str, optional + This is a list of flags for each operand. At minimum, one of + ``readonly``, ``readwrite``, or ``writeonly`` must be specified. + + * ``readonly`` indicates the operand will only be read from. + * ``readwrite`` indicates the operand will be read from and written to. + * ``writeonly`` indicates the operand will only be written to. + * ``no_broadcast`` prevents the operand from being broadcasted. + * ``contig`` forces the operand data to be contiguous. + * ``aligned`` forces the operand data to be aligned. + * ``nbo`` forces the operand data to be in native byte order. + * ``copy`` allows a temporary read-only copy if required. + * ``updateifcopy`` allows a temporary read-write copy if required. + * ``allocate`` causes the array to be allocated if it is None + in the ``op`` parameter. + * ``no_subtype`` prevents an ``allocate`` operand from using a subtype. + * ``arraymask`` indicates that this operand is the mask to use + for selecting elements when writing to operands with the + 'writemasked' flag set. The iterator does not enforce this, + but when writing from a buffer back to the array, it only + copies those elements indicated by this mask. + * ``writemasked`` indicates that only elements where the chosen + ``arraymask`` operand is True will be written to. + * ``overlap_assume_elementwise`` can be used to mark operands that are + accessed only in the iterator order, to allow less conservative + copying when ``copy_if_overlap`` is present. + op_dtypes : dtype or tuple of dtype(s), optional + The required data type(s) of the operands. If copying or buffering + is enabled, the data will be converted to/from their original types. + order : {'C', 'F', 'A', 'K'}, optional + Controls the iteration order. 'C' means C order, 'F' means + Fortran order, 'A' means 'F' order if all the arrays are Fortran + contiguous, 'C' order otherwise, and 'K' means as close to the + order the array elements appear in memory as possible. This also + affects the element memory order of ``allocate`` operands, as they + are allocated to be compatible with iteration order. + Default is 'K'. + casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional + Controls what kind of data casting may occur when making a copy + or buffering. Setting this to 'unsafe' is not recommended, + as it can adversely affect accumulations. + + * 'no' means the data types should not be cast at all. + * 'equiv' means only byte-order changes are allowed. + * 'safe' means only casts which can preserve values are allowed. + * 'same_kind' means only safe casts or casts within a kind, + like float64 to float32, are allowed. + * 'unsafe' means any data conversions may be done. + op_axes : list of list of ints, optional + If provided, is a list of ints or None for each operands. + The list of axes for an operand is a mapping from the dimensions + of the iterator to the dimensions of the operand. A value of + -1 can be placed for entries, causing that dimension to be + treated as `newaxis`. + itershape : tuple of ints, optional + The desired shape of the iterator. This allows ``allocate`` operands + with a dimension mapped by op_axes not corresponding to a dimension + of a different operand to get a value not equal to 1 for that + dimension. + buffersize : int, optional + When buffering is enabled, controls the size of the temporary + buffers. Set to 0 for the default value. + + Attributes + ---------- + dtypes : tuple of dtype(s) + The data types of the values provided in `value`. This may be + different from the operand data types if buffering is enabled. + Valid only before the iterator is closed. + finished : bool + Whether the iteration over the operands is finished or not. + has_delayed_bufalloc : bool + If True, the iterator was created with the ``delay_bufalloc`` flag, + and no reset() function was called on it yet. + has_index : bool + If True, the iterator was created with either the ``c_index`` or + the ``f_index`` flag, and the property `index` can be used to + retrieve it. + has_multi_index : bool + If True, the iterator was created with the ``multi_index`` flag, + and the property `multi_index` can be used to retrieve it. + index + When the ``c_index`` or ``f_index`` flag was used, this property + provides access to the index. Raises a ValueError if accessed + and ``has_index`` is False. + iterationneedsapi : bool + Whether iteration requires access to the Python API, for example + if one of the operands is an object array. + iterindex : int + An index which matches the order of iteration. + itersize : int + Size of the iterator. + itviews + Structured view(s) of `operands` in memory, matching the reordered + and optimized iterator access pattern. Valid only before the iterator + is closed. + multi_index + When the ``multi_index`` flag was used, this property + provides access to the index. Raises a ValueError if accessed + accessed and ``has_multi_index`` is False. + ndim : int + The dimensions of the iterator. + nop : int + The number of iterator operands. + operands : tuple of operand(s) + The array(s) to be iterated over. Valid only before the iterator is + closed. + shape : tuple of ints + Shape tuple, the shape of the iterator. + value + Value of ``operands`` at current iteration. Normally, this is a + tuple of array scalars, but if the flag ``external_loop`` is used, + it is a tuple of one dimensional arrays. + + Notes + ----- + `nditer` supersedes `flatiter`. The iterator implementation behind + `nditer` is also exposed by the NumPy C API. + + The Python exposure supplies two iteration interfaces, one which follows + the Python iterator protocol, and another which mirrors the C-style + do-while pattern. The native Python approach is better in most cases, but + if you need the coordinates or index of an iterator, use the C-style pattern. + + Examples + -------- + Here is how we might write an ``iter_add`` function, using the + Python iterator protocol: + + >>> import numpy as np + + >>> def iter_add_py(x, y, out=None): + ... addop = np.add + ... it = np.nditer([x, y, out], [], + ... [['readonly'], ['readonly'], ['writeonly','allocate']]) + ... with it: + ... for (a, b, c) in it: + ... addop(a, b, out=c) + ... return it.operands[2] + + Here is the same function, but following the C-style pattern: + + >>> def iter_add(x, y, out=None): + ... addop = np.add + ... it = np.nditer([x, y, out], [], + ... [['readonly'], ['readonly'], ['writeonly','allocate']]) + ... with it: + ... while not it.finished: + ... addop(it[0], it[1], out=it[2]) + ... it.iternext() + ... return it.operands[2] + + Here is an example outer product function: + + >>> def outer_it(x, y, out=None): + ... mulop = np.multiply + ... it = np.nditer([x, y, out], ['external_loop'], + ... [['readonly'], ['readonly'], ['writeonly', 'allocate']], + ... op_axes=[list(range(x.ndim)) + [-1] * y.ndim, + ... [-1] * x.ndim + list(range(y.ndim)), + ... None]) + ... with it: + ... for (a, b, c) in it: + ... mulop(a, b, out=c) + ... return it.operands[2] + + >>> a = np.arange(2)+1 + >>> b = np.arange(3)+1 + >>> outer_it(a,b) + array([[1, 2, 3], + [2, 4, 6]]) + + Here is an example function which operates like a "lambda" ufunc: + + >>> def luf(lamdaexpr, *args, **kwargs): + ... '''luf(lambdaexpr, op1, ..., opn, out=None, order='K', casting='safe', buffersize=0)''' + ... nargs = len(args) + ... op = (kwargs.get('out',None),) + args + ... it = np.nditer(op, ['buffered','external_loop'], + ... [['writeonly','allocate','no_broadcast']] + + ... [['readonly','nbo','aligned']]*nargs, + ... order=kwargs.get('order','K'), + ... casting=kwargs.get('casting','safe'), + ... buffersize=kwargs.get('buffersize',0)) + ... while not it.finished: + ... it[0] = lamdaexpr(*it[1:]) + ... it.iternext() + ... return it.operands[0] + + >>> a = np.arange(5) + >>> b = np.ones(5) + >>> luf(lambda i,j:i*i + j/2, a, b) + array([ 0.5, 1.5, 4.5, 9.5, 16.5]) + + If operand flags ``"writeonly"`` or ``"readwrite"`` are used the + operands may be views into the original data with the + `WRITEBACKIFCOPY` flag. In this case `nditer` must be used as a + context manager or the `nditer.close` method must be called before + using the result. The temporary data will be written back to the + original data when the :meth:`~object.__exit__` function is called + but not before: + + >>> a = np.arange(6, dtype='i4')[::-2] + >>> with np.nditer(a, [], + ... [['writeonly', 'updateifcopy']], + ... casting='unsafe', + ... op_dtypes=[np.dtype('f4')]) as i: + ... x = i.operands[0] + ... x[:] = [-1, -2, -3] + ... # a still unchanged here + >>> a, x + (array([-1, -2, -3], dtype=int32), array([-1., -2., -3.], dtype=float32)) + + It is important to note that once the iterator is exited, dangling + references (like `x` in the example) may or may not share data with + the original data `a`. If writeback semantics were active, i.e. if + `x.base.flags.writebackifcopy` is `True`, then exiting the iterator + will sever the connection between `x` and `a`, writing to `x` will + no longer write to `a`. If writeback semantics are not active, then + `x.data` will still point at some part of `a.data`, and writing to + one will affect the other. + + Context management and the `close` method appeared in version 1.15.0. + + """) + +# nditer methods + +add_newdoc('numpy._core', 'nditer', ('copy', + """ + copy() + + Get a copy of the iterator in its current state. + + Examples + -------- + >>> import numpy as np + >>> x = np.arange(10) + >>> y = x + 1 + >>> it = np.nditer([x, y]) + >>> next(it) + (array(0), array(1)) + >>> it2 = it.copy() + >>> next(it2) + (array(1), array(2)) + + """)) + +add_newdoc('numpy._core', 'nditer', ('operands', + """ + operands[`Slice`] + + The array(s) to be iterated over. Valid only before the iterator is closed. + """)) + +add_newdoc('numpy._core', 'nditer', ('debug_print', + """ + debug_print() + + Print the current state of the `nditer` instance and debug info to stdout. + + """)) + +add_newdoc('numpy._core', 'nditer', ('enable_external_loop', + """ + enable_external_loop() + + When the "external_loop" was not used during construction, but + is desired, this modifies the iterator to behave as if the flag + was specified. + + """)) + +add_newdoc('numpy._core', 'nditer', ('iternext', + """ + iternext() + + Check whether iterations are left, and perform a single internal iteration + without returning the result. Used in the C-style pattern do-while + pattern. For an example, see `nditer`. + + Returns + ------- + iternext : bool + Whether or not there are iterations left. + + """)) + +add_newdoc('numpy._core', 'nditer', ('remove_axis', + """ + remove_axis(i, /) + + Removes axis `i` from the iterator. Requires that the flag "multi_index" + be enabled. + + """)) + +add_newdoc('numpy._core', 'nditer', ('remove_multi_index', + """ + remove_multi_index() + + When the "multi_index" flag was specified, this removes it, allowing + the internal iteration structure to be optimized further. + + """)) + +add_newdoc('numpy._core', 'nditer', ('reset', + """ + reset() + + Reset the iterator to its initial state. + + """)) + +add_newdoc('numpy._core', 'nested_iters', + """ + nested_iters(op, axes, flags=None, op_flags=None, op_dtypes=None, \ + order="K", casting="safe", buffersize=0) + + Create nditers for use in nested loops + + Create a tuple of `nditer` objects which iterate in nested loops over + different axes of the op argument. The first iterator is used in the + outermost loop, the last in the innermost loop. Advancing one will change + the subsequent iterators to point at its new element. + + Parameters + ---------- + op : ndarray or sequence of array_like + The array(s) to iterate over. + + axes : list of list of int + Each item is used as an "op_axes" argument to an nditer + + flags, op_flags, op_dtypes, order, casting, buffersize (optional) + See `nditer` parameters of the same name + + Returns + ------- + iters : tuple of nditer + An nditer for each item in `axes`, outermost first + + See Also + -------- + nditer + + Examples + -------- + + Basic usage. Note how y is the "flattened" version of + [a[:, 0, :], a[:, 1, 0], a[:, 2, :]] since we specified + the first iter's axes as [1] + + >>> import numpy as np + >>> a = np.arange(12).reshape(2, 3, 2) + >>> i, j = np.nested_iters(a, [[1], [0, 2]], flags=["multi_index"]) + >>> for x in i: + ... print(i.multi_index) + ... for y in j: + ... print('', j.multi_index, y) + (0,) + (0, 0) 0 + (0, 1) 1 + (1, 0) 6 + (1, 1) 7 + (1,) + (0, 0) 2 + (0, 1) 3 + (1, 0) 8 + (1, 1) 9 + (2,) + (0, 0) 4 + (0, 1) 5 + (1, 0) 10 + (1, 1) 11 + + """) + +add_newdoc('numpy._core', 'nditer', ('close', + """ + close() + + Resolve all writeback semantics in writeable operands. + + See Also + -------- + + :ref:`nditer-context-manager` + + """)) + + +############################################################################### +# +# broadcast +# +############################################################################### + +add_newdoc('numpy._core', 'broadcast', + """ + Produce an object that mimics broadcasting. + + Parameters + ---------- + in1, in2, ... : array_like + Input parameters. + + Returns + ------- + b : broadcast object + Broadcast the input parameters against one another, and + return an object that encapsulates the result. + Amongst others, it has ``shape`` and ``nd`` properties, and + may be used as an iterator. + + See Also + -------- + broadcast_arrays + broadcast_to + broadcast_shapes + + Examples + -------- + + Manually adding two vectors, using broadcasting: + + >>> import numpy as np + >>> x = np.array([[1], [2], [3]]) + >>> y = np.array([4, 5, 6]) + >>> b = np.broadcast(x, y) + + >>> out = np.empty(b.shape) + >>> out.flat = [u+v for (u,v) in b] + >>> out + array([[5., 6., 7.], + [6., 7., 8.], + [7., 8., 9.]]) + + Compare against built-in broadcasting: + + >>> x + y + array([[5, 6, 7], + [6, 7, 8], + [7, 8, 9]]) + + """) + +# attributes + +add_newdoc('numpy._core', 'broadcast', ('index', + """ + current index in broadcasted result + + Examples + -------- + + >>> import numpy as np + >>> x = np.array([[1], [2], [3]]) + >>> y = np.array([4, 5, 6]) + >>> b = np.broadcast(x, y) + >>> b.index + 0 + >>> next(b), next(b), next(b) + ((1, 4), (1, 5), (1, 6)) + >>> b.index + 3 + + """)) + +add_newdoc('numpy._core', 'broadcast', ('iters', + """ + tuple of iterators along ``self``'s "components." + + Returns a tuple of `numpy.flatiter` objects, one for each "component" + of ``self``. + + See Also + -------- + numpy.flatiter + + Examples + -------- + + >>> import numpy as np + >>> x = np.array([1, 2, 3]) + >>> y = np.array([[4], [5], [6]]) + >>> b = np.broadcast(x, y) + >>> row, col = b.iters + >>> next(row), next(col) + (1, 4) + + """)) + +add_newdoc('numpy._core', 'broadcast', ('ndim', + """ + Number of dimensions of broadcasted result. Alias for `nd`. + + Examples + -------- + >>> import numpy as np + >>> x = np.array([1, 2, 3]) + >>> y = np.array([[4], [5], [6]]) + >>> b = np.broadcast(x, y) + >>> b.ndim + 2 + + """)) + +add_newdoc('numpy._core', 'broadcast', ('nd', + """ + Number of dimensions of broadcasted result. For code intended for NumPy + 1.12.0 and later the more consistent `ndim` is preferred. + + Examples + -------- + >>> import numpy as np + >>> x = np.array([1, 2, 3]) + >>> y = np.array([[4], [5], [6]]) + >>> b = np.broadcast(x, y) + >>> b.nd + 2 + + """)) + +add_newdoc('numpy._core', 'broadcast', ('numiter', + """ + Number of iterators possessed by the broadcasted result. + + Examples + -------- + >>> import numpy as np + >>> x = np.array([1, 2, 3]) + >>> y = np.array([[4], [5], [6]]) + >>> b = np.broadcast(x, y) + >>> b.numiter + 2 + + """)) + +add_newdoc('numpy._core', 'broadcast', ('shape', + """ + Shape of broadcasted result. + + Examples + -------- + >>> import numpy as np + >>> x = np.array([1, 2, 3]) + >>> y = np.array([[4], [5], [6]]) + >>> b = np.broadcast(x, y) + >>> b.shape + (3, 3) + + """)) + +add_newdoc('numpy._core', 'broadcast', ('size', + """ + Total size of broadcasted result. + + Examples + -------- + >>> import numpy as np + >>> x = np.array([1, 2, 3]) + >>> y = np.array([[4], [5], [6]]) + >>> b = np.broadcast(x, y) + >>> b.size + 9 + + """)) + +add_newdoc('numpy._core', 'broadcast', ('reset', + """ + reset() + + Reset the broadcasted result's iterator(s). + + Parameters + ---------- + None + + Returns + ------- + None + + Examples + -------- + >>> import numpy as np + >>> x = np.array([1, 2, 3]) + >>> y = np.array([[4], [5], [6]]) + >>> b = np.broadcast(x, y) + >>> b.index + 0 + >>> next(b), next(b), next(b) + ((1, 4), (2, 4), (3, 4)) + >>> b.index + 3 + >>> b.reset() + >>> b.index + 0 + + """)) + +############################################################################### +# +# numpy functions +# +############################################################################### + +add_newdoc('numpy._core.multiarray', 'array', + """ + array(object, dtype=None, *, copy=True, order='K', subok=False, ndmin=0, + like=None) + + Create an array. + + Parameters + ---------- + object : array_like + An array, any object exposing the array interface, an object whose + ``__array__`` method returns an array, or any (nested) sequence. + If object is a scalar, a 0-dimensional array containing object is + returned. + dtype : data-type, optional + The desired data-type for the array. If not given, NumPy will try to use + a default ``dtype`` that can represent the values (by applying promotion + rules when necessary.) + copy : bool, optional + If ``True`` (default), then the array data is copied. If ``None``, + a copy will only be made if ``__array__`` returns a copy, if obj is + a nested sequence, or if a copy is needed to satisfy any of the other + requirements (``dtype``, ``order``, etc.). Note that any copy of + the data is shallow, i.e., for arrays with object dtype, the new + array will point to the same objects. See Examples for `ndarray.copy`. + For ``False`` it raises a ``ValueError`` if a copy cannot be avoided. + Default: ``True``. + order : {'K', 'A', 'C', 'F'}, optional + Specify the memory layout of the array. If object is not an array, the + newly created array will be in C order (row major) unless 'F' is + specified, in which case it will be in Fortran order (column major). + If object is an array the following holds. + + ===== ========= =================================================== + order no copy copy=True + ===== ========= =================================================== + 'K' unchanged F & C order preserved, otherwise most similar order + 'A' unchanged F order if input is F and not C, otherwise C order + 'C' C order C order + 'F' F order F order + ===== ========= =================================================== + + When ``copy=None`` and a copy is made for other reasons, the result is + the same as if ``copy=True``, with some exceptions for 'A', see the + Notes section. The default order is 'K'. + subok : bool, optional + If True, then sub-classes will be passed-through, otherwise + the returned array will be forced to be a base-class array (default). + ndmin : int, optional + Specifies the minimum number of dimensions that the resulting + array should have. Ones will be prepended to the shape as + needed to meet this requirement. + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + out : ndarray + An array object satisfying the specified requirements. + + See Also + -------- + empty_like : Return an empty array with shape and type of input. + ones_like : Return an array of ones with shape and type of input. + zeros_like : Return an array of zeros with shape and type of input. + full_like : Return a new array with shape of input filled with value. + empty : Return a new uninitialized array. + ones : Return a new array setting values to one. + zeros : Return a new array setting values to zero. + full : Return a new array of given shape filled with value. + copy: Return an array copy of the given object. + + + Notes + ----- + When order is 'A' and ``object`` is an array in neither 'C' nor 'F' order, + and a copy is forced by a change in dtype, then the order of the result is + not necessarily 'C' as expected. This is likely a bug. + + Examples + -------- + >>> import numpy as np + >>> np.array([1, 2, 3]) + array([1, 2, 3]) + + Upcasting: + + >>> np.array([1, 2, 3.0]) + array([ 1., 2., 3.]) + + More than one dimension: + + >>> np.array([[1, 2], [3, 4]]) + array([[1, 2], + [3, 4]]) + + Minimum dimensions 2: + + >>> np.array([1, 2, 3], ndmin=2) + array([[1, 2, 3]]) + + Type provided: + + >>> np.array([1, 2, 3], dtype=complex) + array([ 1.+0.j, 2.+0.j, 3.+0.j]) + + Data-type consisting of more than one element: + + >>> x = np.array([(1,2),(3,4)],dtype=[('a','>> x['a'] + array([1, 3], dtype=int32) + + Creating an array from sub-classes: + + >>> np.array(np.asmatrix('1 2; 3 4')) + array([[1, 2], + [3, 4]]) + + >>> np.array(np.asmatrix('1 2; 3 4'), subok=True) + matrix([[1, 2], + [3, 4]]) + + """) + +add_newdoc('numpy._core.multiarray', 'asarray', + """ + asarray(a, dtype=None, order=None, *, device=None, copy=None, like=None) + + Convert the input to an array. + + Parameters + ---------- + a : array_like + Input data, in any form that can be converted to an array. This + includes lists, lists of tuples, tuples, tuples of tuples, tuples + of lists and ndarrays. + dtype : data-type, optional + By default, the data-type is inferred from the input data. + order : {'C', 'F', 'A', 'K'}, optional + Memory layout. 'A' and 'K' depend on the order of input array a. + 'C' row-major (C-style), + 'F' column-major (Fortran-style) memory representation. + 'A' (any) means 'F' if `a` is Fortran contiguous, 'C' otherwise + 'K' (keep) preserve input order + Defaults to 'K'. + device : str, optional + The device on which to place the created array. Default: ``None``. + For Array-API interoperability only, so must be ``"cpu"`` if passed. + + .. versionadded:: 2.0.0 + copy : bool, optional + If ``True``, then the object is copied. If ``None`` then the object is + copied only if needed, i.e. if ``__array__`` returns a copy, if obj + is a nested sequence, or if a copy is needed to satisfy any of + the other requirements (``dtype``, ``order``, etc.). + For ``False`` it raises a ``ValueError`` if a copy cannot be avoided. + Default: ``None``. + + .. versionadded:: 2.0.0 + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + out : ndarray + Array interpretation of ``a``. No copy is performed if the input + is already an ndarray with matching dtype and order. If ``a`` is a + subclass of ndarray, a base class ndarray is returned. + + See Also + -------- + asanyarray : Similar function which passes through subclasses. + ascontiguousarray : Convert input to a contiguous array. + asfortranarray : Convert input to an ndarray with column-major + memory order. + asarray_chkfinite : Similar function which checks input for NaNs and Infs. + fromiter : Create an array from an iterator. + fromfunction : Construct an array by executing a function on grid + positions. + + Examples + -------- + Convert a list into an array: + + >>> a = [1, 2] + >>> import numpy as np + >>> np.asarray(a) + array([1, 2]) + + Existing arrays are not copied: + + >>> a = np.array([1, 2]) + >>> np.asarray(a) is a + True + + If `dtype` is set, array is copied only if dtype does not match: + + >>> a = np.array([1, 2], dtype=np.float32) + >>> np.shares_memory(np.asarray(a, dtype=np.float32), a) + True + >>> np.shares_memory(np.asarray(a, dtype=np.float64), a) + False + + Contrary to `asanyarray`, ndarray subclasses are not passed through: + + >>> issubclass(np.recarray, np.ndarray) + True + >>> a = np.array([(1., 2), (3., 4)], dtype='f4,i4').view(np.recarray) + >>> np.asarray(a) is a + False + >>> np.asanyarray(a) is a + True + + """) + +add_newdoc('numpy._core.multiarray', 'asanyarray', + """ + asanyarray(a, dtype=None, order=None, *, device=None, copy=None, like=None) + + Convert the input to an ndarray, but pass ndarray subclasses through. + + Parameters + ---------- + a : array_like + Input data, in any form that can be converted to an array. This + includes scalars, lists, lists of tuples, tuples, tuples of tuples, + tuples of lists, and ndarrays. + dtype : data-type, optional + By default, the data-type is inferred from the input data. + order : {'C', 'F', 'A', 'K'}, optional + Memory layout. 'A' and 'K' depend on the order of input array a. + 'C' row-major (C-style), + 'F' column-major (Fortran-style) memory representation. + 'A' (any) means 'F' if `a` is Fortran contiguous, 'C' otherwise + 'K' (keep) preserve input order + Defaults to 'C'. + device : str, optional + The device on which to place the created array. Default: ``None``. + For Array-API interoperability only, so must be ``"cpu"`` if passed. + + .. versionadded:: 2.1.0 + + copy : bool, optional + If ``True``, then the object is copied. If ``None`` then the object is + copied only if needed, i.e. if ``__array__`` returns a copy, if obj + is a nested sequence, or if a copy is needed to satisfy any of + the other requirements (``dtype``, ``order``, etc.). + For ``False`` it raises a ``ValueError`` if a copy cannot be avoided. + Default: ``None``. + + .. versionadded:: 2.1.0 + + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + out : ndarray or an ndarray subclass + Array interpretation of `a`. If `a` is an ndarray or a subclass + of ndarray, it is returned as-is and no copy is performed. + + See Also + -------- + asarray : Similar function which always returns ndarrays. + ascontiguousarray : Convert input to a contiguous array. + asfortranarray : Convert input to an ndarray with column-major + memory order. + asarray_chkfinite : Similar function which checks input for NaNs and + Infs. + fromiter : Create an array from an iterator. + fromfunction : Construct an array by executing a function on grid + positions. + + Examples + -------- + Convert a list into an array: + + >>> a = [1, 2] + >>> import numpy as np + >>> np.asanyarray(a) + array([1, 2]) + + Instances of `ndarray` subclasses are passed through as-is: + + >>> a = np.array([(1., 2), (3., 4)], dtype='f4,i4').view(np.recarray) + >>> np.asanyarray(a) is a + True + + """) + +add_newdoc('numpy._core.multiarray', 'ascontiguousarray', + """ + ascontiguousarray(a, dtype=None, *, like=None) + + Return a contiguous array (ndim >= 1) in memory (C order). + + Parameters + ---------- + a : array_like + Input array. + dtype : str or dtype object, optional + Data-type of returned array. + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + out : ndarray + Contiguous array of same shape and content as `a`, with type `dtype` + if specified. + + See Also + -------- + asfortranarray : Convert input to an ndarray with column-major + memory order. + require : Return an ndarray that satisfies requirements. + ndarray.flags : Information about the memory layout of the array. + + Examples + -------- + Starting with a Fortran-contiguous array: + + >>> import numpy as np + >>> x = np.ones((2, 3), order='F') + >>> x.flags['F_CONTIGUOUS'] + True + + Calling ``ascontiguousarray`` makes a C-contiguous copy: + + >>> y = np.ascontiguousarray(x) + >>> y.flags['C_CONTIGUOUS'] + True + >>> np.may_share_memory(x, y) + False + + Now, starting with a C-contiguous array: + + >>> x = np.ones((2, 3), order='C') + >>> x.flags['C_CONTIGUOUS'] + True + + Then, calling ``ascontiguousarray`` returns the same object: + + >>> y = np.ascontiguousarray(x) + >>> x is y + True + + Note: This function returns an array with at least one-dimension (1-d) + so it will not preserve 0-d arrays. + + """) + +add_newdoc('numpy._core.multiarray', 'asfortranarray', + """ + asfortranarray(a, dtype=None, *, like=None) + + Return an array (ndim >= 1) laid out in Fortran order in memory. + + Parameters + ---------- + a : array_like + Input array. + dtype : str or dtype object, optional + By default, the data-type is inferred from the input data. + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + out : ndarray + The input `a` in Fortran, or column-major, order. + + See Also + -------- + ascontiguousarray : Convert input to a contiguous (C order) array. + asanyarray : Convert input to an ndarray with either row or + column-major memory order. + require : Return an ndarray that satisfies requirements. + ndarray.flags : Information about the memory layout of the array. + + Examples + -------- + Starting with a C-contiguous array: + + >>> import numpy as np + >>> x = np.ones((2, 3), order='C') + >>> x.flags['C_CONTIGUOUS'] + True + + Calling ``asfortranarray`` makes a Fortran-contiguous copy: + + >>> y = np.asfortranarray(x) + >>> y.flags['F_CONTIGUOUS'] + True + >>> np.may_share_memory(x, y) + False + + Now, starting with a Fortran-contiguous array: + + >>> x = np.ones((2, 3), order='F') + >>> x.flags['F_CONTIGUOUS'] + True + + Then, calling ``asfortranarray`` returns the same object: + + >>> y = np.asfortranarray(x) + >>> x is y + True + + Note: This function returns an array with at least one-dimension (1-d) + so it will not preserve 0-d arrays. + + """) + +add_newdoc('numpy._core.multiarray', 'empty', + """ + empty(shape, dtype=float, order='C', *, device=None, like=None) + + Return a new array of given shape and type, without initializing entries. + + Parameters + ---------- + shape : int or tuple of int + Shape of the empty array, e.g., ``(2, 3)`` or ``2``. + dtype : data-type, optional + Desired output data-type for the array, e.g, `numpy.int8`. Default is + `numpy.float64`. + order : {'C', 'F'}, optional, default: 'C' + Whether to store multi-dimensional data in row-major + (C-style) or column-major (Fortran-style) order in + memory. + device : str, optional + The device on which to place the created array. Default: ``None``. + For Array-API interoperability only, so must be ``"cpu"`` if passed. + + .. versionadded:: 2.0.0 + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + out : ndarray + Array of uninitialized (arbitrary) data of the given shape, dtype, and + order. Object arrays will be initialized to None. + + See Also + -------- + empty_like : Return an empty array with shape and type of input. + ones : Return a new array setting values to one. + zeros : Return a new array setting values to zero. + full : Return a new array of given shape filled with value. + + Notes + ----- + Unlike other array creation functions (e.g. `zeros`, `ones`, `full`), + `empty` does not initialize the values of the array, and may therefore be + marginally faster. However, the values stored in the newly allocated array + are arbitrary. For reproducible behavior, be sure to set each element of + the array before reading. + + Examples + -------- + >>> import numpy as np + >>> np.empty([2, 2]) + array([[ -9.74499359e+001, 6.69583040e-309], + [ 2.13182611e-314, 3.06959433e-309]]) #uninitialized + + >>> np.empty([2, 2], dtype=int) + array([[-1073741821, -1067949133], + [ 496041986, 19249760]]) #uninitialized + + """) + +add_newdoc('numpy._core.multiarray', 'scalar', + """ + scalar(dtype, obj) + + Return a new scalar array of the given type initialized with obj. + + This function is meant mainly for pickle support. `dtype` must be a + valid data-type descriptor. If `dtype` corresponds to an object + descriptor, then `obj` can be any object, otherwise `obj` must be a + string. If `obj` is not given, it will be interpreted as None for object + type and as zeros for all other types. + + """) + +add_newdoc('numpy._core.multiarray', 'zeros', + """ + zeros(shape, dtype=float, order='C', *, like=None) + + Return a new array of given shape and type, filled with zeros. + + Parameters + ---------- + shape : int or tuple of ints + Shape of the new array, e.g., ``(2, 3)`` or ``2``. + dtype : data-type, optional + The desired data-type for the array, e.g., `numpy.int8`. Default is + `numpy.float64`. + order : {'C', 'F'}, optional, default: 'C' + Whether to store multi-dimensional data in row-major + (C-style) or column-major (Fortran-style) order in + memory. + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + out : ndarray + Array of zeros with the given shape, dtype, and order. + + See Also + -------- + zeros_like : Return an array of zeros with shape and type of input. + empty : Return a new uninitialized array. + ones : Return a new array setting values to one. + full : Return a new array of given shape filled with value. + + Examples + -------- + >>> import numpy as np + >>> np.zeros(5) + array([ 0., 0., 0., 0., 0.]) + + >>> np.zeros((5,), dtype=int) + array([0, 0, 0, 0, 0]) + + >>> np.zeros((2, 1)) + array([[ 0.], + [ 0.]]) + + >>> s = (2,2) + >>> np.zeros(s) + array([[ 0., 0.], + [ 0., 0.]]) + + >>> np.zeros((2,), dtype=[('x', 'i4'), ('y', 'i4')]) # custom dtype + array([(0, 0), (0, 0)], + dtype=[('x', '>> import numpy as np + >>> np.fromstring('1 2', dtype=int, sep=' ') + array([1, 2]) + >>> np.fromstring('1, 2', dtype=int, sep=',') + array([1, 2]) + + """) + +add_newdoc('numpy._core.multiarray', 'compare_chararrays', + """ + compare_chararrays(a1, a2, cmp, rstrip) + + Performs element-wise comparison of two string arrays using the + comparison operator specified by `cmp`. + + Parameters + ---------- + a1, a2 : array_like + Arrays to be compared. + cmp : {"<", "<=", "==", ">=", ">", "!="} + Type of comparison. + rstrip : Boolean + If True, the spaces at the end of Strings are removed before the comparison. + + Returns + ------- + out : ndarray + The output array of type Boolean with the same shape as a and b. + + Raises + ------ + ValueError + If `cmp` is not valid. + TypeError + If at least one of `a` or `b` is a non-string array + + Examples + -------- + >>> import numpy as np + >>> a = np.array(["a", "b", "cde"]) + >>> b = np.array(["a", "a", "dec"]) + >>> np.char.compare_chararrays(a, b, ">", True) + array([False, True, False]) + + """) + +add_newdoc('numpy._core.multiarray', 'fromiter', + """ + fromiter(iter, dtype, count=-1, *, like=None) + + Create a new 1-dimensional array from an iterable object. + + Parameters + ---------- + iter : iterable object + An iterable object providing data for the array. + dtype : data-type + The data-type of the returned array. + + .. versionchanged:: 1.23 + Object and subarray dtypes are now supported (note that the final + result is not 1-D for a subarray dtype). + + count : int, optional + The number of items to read from *iterable*. The default is -1, + which means all data is read. + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + out : ndarray + The output array. + + Notes + ----- + Specify `count` to improve performance. It allows ``fromiter`` to + pre-allocate the output array, instead of resizing it on demand. + + Examples + -------- + >>> import numpy as np + >>> iterable = (x*x for x in range(5)) + >>> np.fromiter(iterable, float) + array([ 0., 1., 4., 9., 16.]) + + A carefully constructed subarray dtype will lead to higher dimensional + results: + + >>> iterable = ((x+1, x+2) for x in range(5)) + >>> np.fromiter(iterable, dtype=np.dtype((int, 2))) + array([[1, 2], + [2, 3], + [3, 4], + [4, 5], + [5, 6]]) + + + """) + +add_newdoc('numpy._core.multiarray', 'fromfile', + """ + fromfile(file, dtype=float, count=-1, sep='', offset=0, *, like=None) + + Construct an array from data in a text or binary file. + + A highly efficient way of reading binary data with a known data-type, + as well as parsing simply formatted text files. Data written using the + `tofile` method can be read using this function. + + Parameters + ---------- + file : file or str or Path + Open file object or filename. + dtype : data-type + Data type of the returned array. + For binary files, it is used to determine the size and byte-order + of the items in the file. + Most builtin numeric types are supported and extension types may be supported. + count : int + Number of items to read. ``-1`` means all items (i.e., the complete + file). + sep : str + Separator between items if file is a text file. + Empty ("") separator means the file should be treated as binary. + Spaces (" ") in the separator match zero or more whitespace characters. + A separator consisting only of spaces must match at least one + whitespace. + offset : int + The offset (in bytes) from the file's current position. Defaults to 0. + Only permitted for binary files. + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + See also + -------- + load, save + ndarray.tofile + loadtxt : More flexible way of loading data from a text file. + + Notes + ----- + Do not rely on the combination of `tofile` and `fromfile` for + data storage, as the binary files generated are not platform + independent. In particular, no byte-order or data-type information is + saved. Data can be stored in the platform independent ``.npy`` format + using `save` and `load` instead. + + Examples + -------- + Construct an ndarray: + + >>> import numpy as np + >>> dt = np.dtype([('time', [('min', np.int64), ('sec', np.int64)]), + ... ('temp', float)]) + >>> x = np.zeros((1,), dtype=dt) + >>> x['time']['min'] = 10; x['temp'] = 98.25 + >>> x + array([((10, 0), 98.25)], + dtype=[('time', [('min', '>> import tempfile + >>> fname = tempfile.mkstemp()[1] + >>> x.tofile(fname) + + Read the raw data from disk: + + >>> np.fromfile(fname, dtype=dt) + array([((10, 0), 98.25)], + dtype=[('time', [('min', '>> np.save(fname, x) + >>> np.load(fname + '.npy') + array([((10, 0), 98.25)], + dtype=[('time', [('min', '>> dt = np.dtype(int) + >>> dt = dt.newbyteorder('>') + >>> np.frombuffer(buf, dtype=dt) # doctest: +SKIP + + The data of the resulting array will not be byteswapped, but will be + interpreted correctly. + + This function creates a view into the original object. This should be safe + in general, but it may make sense to copy the result when the original + object is mutable or untrusted. + + Examples + -------- + >>> import numpy as np + >>> s = b'hello world' + >>> np.frombuffer(s, dtype='S1', count=5, offset=6) + array([b'w', b'o', b'r', b'l', b'd'], dtype='|S1') + + >>> np.frombuffer(b'\\x01\\x02', dtype=np.uint8) + array([1, 2], dtype=uint8) + >>> np.frombuffer(b'\\x01\\x02\\x03\\x04\\x05', dtype=np.uint8, count=3) + array([1, 2, 3], dtype=uint8) + + """) + +add_newdoc('numpy._core.multiarray', 'from_dlpack', + """ + from_dlpack(x, /, *, device=None, copy=None) + + Create a NumPy array from an object implementing the ``__dlpack__`` + protocol. Generally, the returned NumPy array is a view of the input + object. See [1]_ and [2]_ for more details. + + Parameters + ---------- + x : object + A Python object that implements the ``__dlpack__`` and + ``__dlpack_device__`` methods. + device : device, optional + Device on which to place the created array. Default: ``None``. + Must be ``"cpu"`` if passed which may allow importing an array + that is not already CPU available. + copy : bool, optional + Boolean indicating whether or not to copy the input. If ``True``, + the copy will be made. If ``False``, the function will never copy, + and will raise ``BufferError`` in case a copy is deemed necessary. + Passing it requests a copy from the exporter who may or may not + implement the capability. + If ``None``, the function will reuse the existing memory buffer if + possible and copy otherwise. Default: ``None``. + + + Returns + ------- + out : ndarray + + References + ---------- + .. [1] Array API documentation, + https://data-apis.org/array-api/latest/design_topics/data_interchange.html#syntax-for-data-interchange-with-dlpack + + .. [2] Python specification for DLPack, + https://dmlc.github.io/dlpack/latest/python_spec.html + + Examples + -------- + >>> import torch # doctest: +SKIP + >>> x = torch.arange(10) # doctest: +SKIP + >>> # create a view of the torch tensor "x" in NumPy + >>> y = np.from_dlpack(x) # doctest: +SKIP + """) + +add_newdoc('numpy._core.multiarray', 'correlate', + """cross_correlate(a,v, mode=0)""") + +add_newdoc('numpy._core.multiarray', 'arange', + """ + arange([start,] stop[, step,], dtype=None, *, device=None, like=None) + + Return evenly spaced values within a given interval. + + ``arange`` can be called with a varying number of positional arguments: + + * ``arange(stop)``: Values are generated within the half-open interval + ``[0, stop)`` (in other words, the interval including `start` but + excluding `stop`). + * ``arange(start, stop)``: Values are generated within the half-open + interval ``[start, stop)``. + * ``arange(start, stop, step)`` Values are generated within the half-open + interval ``[start, stop)``, with spacing between values given by + ``step``. + + For integer arguments the function is roughly equivalent to the Python + built-in :py:class:`range`, but returns an ndarray rather than a ``range`` + instance. + + When using a non-integer step, such as 0.1, it is often better to use + `numpy.linspace`. + + See the Warning sections below for more information. + + Parameters + ---------- + start : integer or real, optional + Start of interval. The interval includes this value. The default + start value is 0. + stop : integer or real + End of interval. The interval does not include this value, except + in some cases where `step` is not an integer and floating point + round-off affects the length of `out`. + step : integer or real, optional + Spacing between values. For any output `out`, this is the distance + between two adjacent values, ``out[i+1] - out[i]``. The default + step size is 1. If `step` is specified as a position argument, + `start` must also be given. + dtype : dtype, optional + The type of the output array. If `dtype` is not given, infer the data + type from the other input arguments. + device : str, optional + The device on which to place the created array. Default: ``None``. + For Array-API interoperability only, so must be ``"cpu"`` if passed. + + .. versionadded:: 2.0.0 + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + arange : ndarray + Array of evenly spaced values. + + For floating point arguments, the length of the result is + ``ceil((stop - start)/step)``. Because of floating point overflow, + this rule may result in the last element of `out` being greater + than `stop`. + + Warnings + -------- + The length of the output might not be numerically stable. + + Another stability issue is due to the internal implementation of + `numpy.arange`. + The actual step value used to populate the array is + ``dtype(start + step) - dtype(start)`` and not `step`. Precision loss + can occur here, due to casting or due to using floating points when + `start` is much larger than `step`. This can lead to unexpected + behaviour. For example:: + + >>> np.arange(0, 5, 0.5, dtype=int) + array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) + >>> np.arange(-3, 3, 0.5, dtype=int) + array([-3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8]) + + In such cases, the use of `numpy.linspace` should be preferred. + + The built-in :py:class:`range` generates :std:doc:`Python built-in integers + that have arbitrary size `, while `numpy.arange` + produces `numpy.int32` or `numpy.int64` numbers. This may result in + incorrect results for large integer values:: + + >>> power = 40 + >>> modulo = 10000 + >>> x1 = [(n ** power) % modulo for n in range(8)] + >>> x2 = [(n ** power) % modulo for n in np.arange(8)] + >>> print(x1) + [0, 1, 7776, 8801, 6176, 625, 6576, 4001] # correct + >>> print(x2) + [0, 1, 7776, 7185, 0, 5969, 4816, 3361] # incorrect + + See Also + -------- + numpy.linspace : Evenly spaced numbers with careful handling of endpoints. + numpy.ogrid: Arrays of evenly spaced numbers in N-dimensions. + numpy.mgrid: Grid-shaped arrays of evenly spaced numbers in N-dimensions. + :ref:`how-to-partition` + + Examples + -------- + >>> import numpy as np + >>> np.arange(3) + array([0, 1, 2]) + >>> np.arange(3.0) + array([ 0., 1., 2.]) + >>> np.arange(3,7) + array([3, 4, 5, 6]) + >>> np.arange(3,7,2) + array([3, 5]) + + """) + +add_newdoc('numpy._core.multiarray', '_get_ndarray_c_version', + """_get_ndarray_c_version() + + Return the compile time NPY_VERSION (formerly called NDARRAY_VERSION) number. + + """) + +add_newdoc('numpy._core.multiarray', '_reconstruct', + """_reconstruct(subtype, shape, dtype) + + Construct an empty array. Used by Pickles. + + """) + +add_newdoc('numpy._core.multiarray', 'promote_types', + """ + promote_types(type1, type2) + + Returns the data type with the smallest size and smallest scalar + kind to which both ``type1`` and ``type2`` may be safely cast. + The returned data type is always considered "canonical", this mainly + means that the promoted dtype will always be in native byte order. + + This function is symmetric, but rarely associative. + + Parameters + ---------- + type1 : dtype or dtype specifier + First data type. + type2 : dtype or dtype specifier + Second data type. + + Returns + ------- + out : dtype + The promoted data type. + + Notes + ----- + Please see `numpy.result_type` for additional information about promotion. + + Starting in NumPy 1.9, promote_types function now returns a valid string + length when given an integer or float dtype as one argument and a string + dtype as another argument. Previously it always returned the input string + dtype, even if it wasn't long enough to store the max integer/float value + converted to a string. + + .. versionchanged:: 1.23.0 + + NumPy now supports promotion for more structured dtypes. It will now + remove unnecessary padding from a structure dtype and promote included + fields individually. + + See Also + -------- + result_type, dtype, can_cast + + Examples + -------- + >>> import numpy as np + >>> np.promote_types('f4', 'f8') + dtype('float64') + + >>> np.promote_types('i8', 'f4') + dtype('float64') + + >>> np.promote_types('>i8', '>> np.promote_types('i4', 'S8') + dtype('S11') + + An example of a non-associative case: + + >>> p = np.promote_types + >>> p('S', p('i1', 'u1')) + dtype('S6') + >>> p(p('S', 'i1'), 'u1') + dtype('S4') + + """) + +add_newdoc('numpy._core.multiarray', 'c_einsum', + """ + c_einsum(subscripts, *operands, out=None, dtype=None, order='K', + casting='safe') + + *This documentation shadows that of the native python implementation of the `einsum` function, + except all references and examples related to the `optimize` argument (v 0.12.0) have been removed.* + + Evaluates the Einstein summation convention on the operands. + + Using the Einstein summation convention, many common multi-dimensional, + linear algebraic array operations can be represented in a simple fashion. + In *implicit* mode `einsum` computes these values. + + In *explicit* mode, `einsum` provides further flexibility to compute + other array operations that might not be considered classical Einstein + summation operations, by disabling, or forcing summation over specified + subscript labels. + + See the notes and examples for clarification. + + Parameters + ---------- + subscripts : str + Specifies the subscripts for summation as comma separated list of + subscript labels. An implicit (classical Einstein summation) + calculation is performed unless the explicit indicator '->' is + included as well as subscript labels of the precise output form. + operands : list of array_like + These are the arrays for the operation. + out : ndarray, optional + If provided, the calculation is done into this array. + dtype : {data-type, None}, optional + If provided, forces the calculation to use the data type specified. + Note that you may have to also give a more liberal `casting` + parameter to allow the conversions. Default is None. + order : {'C', 'F', 'A', 'K'}, optional + Controls the memory layout of the output. 'C' means it should + be C contiguous. 'F' means it should be Fortran contiguous, + 'A' means it should be 'F' if the inputs are all 'F', 'C' otherwise. + 'K' means it should be as close to the layout of the inputs as + is possible, including arbitrarily permuted axes. + Default is 'K'. + casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional + Controls what kind of data casting may occur. Setting this to + 'unsafe' is not recommended, as it can adversely affect accumulations. + + * 'no' means the data types should not be cast at all. + * 'equiv' means only byte-order changes are allowed. + * 'safe' means only casts which can preserve values are allowed. + * 'same_kind' means only safe casts or casts within a kind, + like float64 to float32, are allowed. + * 'unsafe' means any data conversions may be done. + + Default is 'safe'. + optimize : {False, True, 'greedy', 'optimal'}, optional + Controls if intermediate optimization should occur. No optimization + will occur if False and True will default to the 'greedy' algorithm. + Also accepts an explicit contraction list from the ``np.einsum_path`` + function. See ``np.einsum_path`` for more details. Defaults to False. + + Returns + ------- + output : ndarray + The calculation based on the Einstein summation convention. + + See Also + -------- + einsum_path, dot, inner, outer, tensordot, linalg.multi_dot + + Notes + ----- + The Einstein summation convention can be used to compute + many multi-dimensional, linear algebraic array operations. `einsum` + provides a succinct way of representing these. + + A non-exhaustive list of these operations, + which can be computed by `einsum`, is shown below along with examples: + + * Trace of an array, :py:func:`numpy.trace`. + * Return a diagonal, :py:func:`numpy.diag`. + * Array axis summations, :py:func:`numpy.sum`. + * Transpositions and permutations, :py:func:`numpy.transpose`. + * Matrix multiplication and dot product, :py:func:`numpy.matmul` :py:func:`numpy.dot`. + * Vector inner and outer products, :py:func:`numpy.inner` :py:func:`numpy.outer`. + * Broadcasting, element-wise and scalar multiplication, :py:func:`numpy.multiply`. + * Tensor contractions, :py:func:`numpy.tensordot`. + * Chained array operations, in efficient calculation order, :py:func:`numpy.einsum_path`. + + The subscripts string is a comma-separated list of subscript labels, + where each label refers to a dimension of the corresponding operand. + Whenever a label is repeated it is summed, so ``np.einsum('i,i', a, b)`` + is equivalent to :py:func:`np.inner(a,b) `. If a label + appears only once, it is not summed, so ``np.einsum('i', a)`` produces a + view of ``a`` with no changes. A further example ``np.einsum('ij,jk', a, b)`` + describes traditional matrix multiplication and is equivalent to + :py:func:`np.matmul(a,b) `. Repeated subscript labels in one + operand take the diagonal. For example, ``np.einsum('ii', a)`` is equivalent + to :py:func:`np.trace(a) `. + + In *implicit mode*, the chosen subscripts are important + since the axes of the output are reordered alphabetically. This + means that ``np.einsum('ij', a)`` doesn't affect a 2D array, while + ``np.einsum('ji', a)`` takes its transpose. Additionally, + ``np.einsum('ij,jk', a, b)`` returns a matrix multiplication, while, + ``np.einsum('ij,jh', a, b)`` returns the transpose of the + multiplication since subscript 'h' precedes subscript 'i'. + + In *explicit mode* the output can be directly controlled by + specifying output subscript labels. This requires the + identifier '->' as well as the list of output subscript labels. + This feature increases the flexibility of the function since + summing can be disabled or forced when required. The call + ``np.einsum('i->', a)`` is like :py:func:`np.sum(a) ` + if ``a`` is a 1-D array, and ``np.einsum('ii->i', a)`` + is like :py:func:`np.diag(a) ` if ``a`` is a square 2-D array. + The difference is that `einsum` does not allow broadcasting by default. + Additionally ``np.einsum('ij,jh->ih', a, b)`` directly specifies the + order of the output subscript labels and therefore returns matrix + multiplication, unlike the example above in implicit mode. + + To enable and control broadcasting, use an ellipsis. Default + NumPy-style broadcasting is done by adding an ellipsis + to the left of each term, like ``np.einsum('...ii->...i', a)``. + ``np.einsum('...i->...', a)`` is like + :py:func:`np.sum(a, axis=-1) ` for array ``a`` of any shape. + To take the trace along the first and last axes, + you can do ``np.einsum('i...i', a)``, or to do a matrix-matrix + product with the left-most indices instead of rightmost, one can do + ``np.einsum('ij...,jk...->ik...', a, b)``. + + When there is only one operand, no axes are summed, and no output + parameter is provided, a view into the operand is returned instead + of a new array. Thus, taking the diagonal as ``np.einsum('ii->i', a)`` + produces a view (changed in version 1.10.0). + + `einsum` also provides an alternative way to provide the subscripts + and operands as ``einsum(op0, sublist0, op1, sublist1, ..., [sublistout])``. + If the output shape is not provided in this format `einsum` will be + calculated in implicit mode, otherwise it will be performed explicitly. + The examples below have corresponding `einsum` calls with the two + parameter methods. + + Views returned from einsum are now writeable whenever the input array + is writeable. For example, ``np.einsum('ijk...->kji...', a)`` will now + have the same effect as :py:func:`np.swapaxes(a, 0, 2) ` + and ``np.einsum('ii->i', a)`` will return a writeable view of the diagonal + of a 2D array. + + Examples + -------- + >>> import numpy as np + >>> a = np.arange(25).reshape(5,5) + >>> b = np.arange(5) + >>> c = np.arange(6).reshape(2,3) + + Trace of a matrix: + + >>> np.einsum('ii', a) + 60 + >>> np.einsum(a, [0,0]) + 60 + >>> np.trace(a) + 60 + + Extract the diagonal (requires explicit form): + + >>> np.einsum('ii->i', a) + array([ 0, 6, 12, 18, 24]) + >>> np.einsum(a, [0,0], [0]) + array([ 0, 6, 12, 18, 24]) + >>> np.diag(a) + array([ 0, 6, 12, 18, 24]) + + Sum over an axis (requires explicit form): + + >>> np.einsum('ij->i', a) + array([ 10, 35, 60, 85, 110]) + >>> np.einsum(a, [0,1], [0]) + array([ 10, 35, 60, 85, 110]) + >>> np.sum(a, axis=1) + array([ 10, 35, 60, 85, 110]) + + For higher dimensional arrays summing a single axis can be done with ellipsis: + + >>> np.einsum('...j->...', a) + array([ 10, 35, 60, 85, 110]) + >>> np.einsum(a, [Ellipsis,1], [Ellipsis]) + array([ 10, 35, 60, 85, 110]) + + Compute a matrix transpose, or reorder any number of axes: + + >>> np.einsum('ji', c) + array([[0, 3], + [1, 4], + [2, 5]]) + >>> np.einsum('ij->ji', c) + array([[0, 3], + [1, 4], + [2, 5]]) + >>> np.einsum(c, [1,0]) + array([[0, 3], + [1, 4], + [2, 5]]) + >>> np.transpose(c) + array([[0, 3], + [1, 4], + [2, 5]]) + + Vector inner products: + + >>> np.einsum('i,i', b, b) + 30 + >>> np.einsum(b, [0], b, [0]) + 30 + >>> np.inner(b,b) + 30 + + Matrix vector multiplication: + + >>> np.einsum('ij,j', a, b) + array([ 30, 80, 130, 180, 230]) + >>> np.einsum(a, [0,1], b, [1]) + array([ 30, 80, 130, 180, 230]) + >>> np.dot(a, b) + array([ 30, 80, 130, 180, 230]) + >>> np.einsum('...j,j', a, b) + array([ 30, 80, 130, 180, 230]) + + Broadcasting and scalar multiplication: + + >>> np.einsum('..., ...', 3, c) + array([[ 0, 3, 6], + [ 9, 12, 15]]) + >>> np.einsum(',ij', 3, c) + array([[ 0, 3, 6], + [ 9, 12, 15]]) + >>> np.einsum(3, [Ellipsis], c, [Ellipsis]) + array([[ 0, 3, 6], + [ 9, 12, 15]]) + >>> np.multiply(3, c) + array([[ 0, 3, 6], + [ 9, 12, 15]]) + + Vector outer product: + + >>> np.einsum('i,j', np.arange(2)+1, b) + array([[0, 1, 2, 3, 4], + [0, 2, 4, 6, 8]]) + >>> np.einsum(np.arange(2)+1, [0], b, [1]) + array([[0, 1, 2, 3, 4], + [0, 2, 4, 6, 8]]) + >>> np.outer(np.arange(2)+1, b) + array([[0, 1, 2, 3, 4], + [0, 2, 4, 6, 8]]) + + Tensor contraction: + + >>> a = np.arange(60.).reshape(3,4,5) + >>> b = np.arange(24.).reshape(4,3,2) + >>> np.einsum('ijk,jil->kl', a, b) + array([[ 4400., 4730.], + [ 4532., 4874.], + [ 4664., 5018.], + [ 4796., 5162.], + [ 4928., 5306.]]) + >>> np.einsum(a, [0,1,2], b, [1,0,3], [2,3]) + array([[ 4400., 4730.], + [ 4532., 4874.], + [ 4664., 5018.], + [ 4796., 5162.], + [ 4928., 5306.]]) + >>> np.tensordot(a,b, axes=([1,0],[0,1])) + array([[ 4400., 4730.], + [ 4532., 4874.], + [ 4664., 5018.], + [ 4796., 5162.], + [ 4928., 5306.]]) + + Writeable returned arrays (since version 1.10.0): + + >>> a = np.zeros((3, 3)) + >>> np.einsum('ii->i', a)[:] = 1 + >>> a + array([[ 1., 0., 0.], + [ 0., 1., 0.], + [ 0., 0., 1.]]) + + Example of ellipsis use: + + >>> a = np.arange(6).reshape((3,2)) + >>> b = np.arange(12).reshape((4,3)) + >>> np.einsum('ki,jk->ij', a, b) + array([[10, 28, 46, 64], + [13, 40, 67, 94]]) + >>> np.einsum('ki,...k->i...', a, b) + array([[10, 28, 46, 64], + [13, 40, 67, 94]]) + >>> np.einsum('k...,jk', a, b) + array([[10, 28, 46, 64], + [13, 40, 67, 94]]) + + """) + + +############################################################################## +# +# Documentation for ndarray attributes and methods +# +############################################################################## + + +############################################################################## +# +# ndarray object +# +############################################################################## + + +add_newdoc('numpy._core.multiarray', 'ndarray', + """ + ndarray(shape, dtype=float, buffer=None, offset=0, + strides=None, order=None) + + An array object represents a multidimensional, homogeneous array + of fixed-size items. An associated data-type object describes the + format of each element in the array (its byte-order, how many bytes it + occupies in memory, whether it is an integer, a floating point number, + or something else, etc.) + + Arrays should be constructed using `array`, `zeros` or `empty` (refer + to the See Also section below). The parameters given here refer to + a low-level method (`ndarray(...)`) for instantiating an array. + + For more information, refer to the `numpy` module and examine the + methods and attributes of an array. + + Parameters + ---------- + (for the __new__ method; see Notes below) + + shape : tuple of ints + Shape of created array. + dtype : data-type, optional + Any object that can be interpreted as a numpy data type. + buffer : object exposing buffer interface, optional + Used to fill the array with data. + offset : int, optional + Offset of array data in buffer. + strides : tuple of ints, optional + Strides of data in memory. + order : {'C', 'F'}, optional + Row-major (C-style) or column-major (Fortran-style) order. + + Attributes + ---------- + T : ndarray + Transpose of the array. + data : buffer + The array's elements, in memory. + dtype : dtype object + Describes the format of the elements in the array. + flags : dict + Dictionary containing information related to memory use, e.g., + 'C_CONTIGUOUS', 'OWNDATA', 'WRITEABLE', etc. + flat : numpy.flatiter object + Flattened version of the array as an iterator. The iterator + allows assignments, e.g., ``x.flat = 3`` (See `ndarray.flat` for + assignment examples; TODO). + imag : ndarray + Imaginary part of the array. + real : ndarray + Real part of the array. + size : int + Number of elements in the array. + itemsize : int + The memory use of each array element in bytes. + nbytes : int + The total number of bytes required to store the array data, + i.e., ``itemsize * size``. + ndim : int + The array's number of dimensions. + shape : tuple of ints + Shape of the array. + strides : tuple of ints + The step-size required to move from one element to the next in + memory. For example, a contiguous ``(3, 4)`` array of type + ``int16`` in C-order has strides ``(8, 2)``. This implies that + to move from element to element in memory requires jumps of 2 bytes. + To move from row-to-row, one needs to jump 8 bytes at a time + (``2 * 4``). + ctypes : ctypes object + Class containing properties of the array needed for interaction + with ctypes. + base : ndarray + If the array is a view into another array, that array is its `base` + (unless that array is also a view). The `base` array is where the + array data is actually stored. + + See Also + -------- + array : Construct an array. + zeros : Create an array, each element of which is zero. + empty : Create an array, but leave its allocated memory unchanged (i.e., + it contains "garbage"). + dtype : Create a data-type. + numpy.typing.NDArray : An ndarray alias :term:`generic ` + w.r.t. its `dtype.type `. + + Notes + ----- + There are two modes of creating an array using ``__new__``: + + 1. If `buffer` is None, then only `shape`, `dtype`, and `order` + are used. + 2. If `buffer` is an object exposing the buffer interface, then + all keywords are interpreted. + + No ``__init__`` method is needed because the array is fully initialized + after the ``__new__`` method. + + Examples + -------- + These examples illustrate the low-level `ndarray` constructor. Refer + to the `See Also` section above for easier ways of constructing an + ndarray. + + First mode, `buffer` is None: + + >>> import numpy as np + >>> np.ndarray(shape=(2,2), dtype=float, order='F') + array([[0.0e+000, 0.0e+000], # random + [ nan, 2.5e-323]]) + + Second mode: + + >>> np.ndarray((2,), buffer=np.array([1,2,3]), + ... offset=np.int_().itemsize, + ... dtype=int) # offset = 1*itemsize, i.e. skip first element + array([2, 3]) + + """) + + +############################################################################## +# +# ndarray attributes +# +############################################################################## + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('__array_interface__', + """Array protocol: Python side.""")) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('__array_priority__', + """Array priority.""")) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('__array_struct__', + """Array protocol: C-struct side.""")) + +add_newdoc('numpy._core.multiarray', 'ndarray', ('__dlpack__', + """ + a.__dlpack__(*, stream=None, max_version=None, dl_device=None, copy=None) + + DLPack Protocol: Part of the Array API. + + """)) + +add_newdoc('numpy._core.multiarray', 'ndarray', ('__dlpack_device__', + """ + a.__dlpack_device__() + + DLPack Protocol: Part of the Array API. + + """)) + +add_newdoc('numpy._core.multiarray', 'ndarray', ('base', + """ + Base object if memory is from some other object. + + Examples + -------- + The base of an array that owns its memory is None: + + >>> import numpy as np + >>> x = np.array([1,2,3,4]) + >>> x.base is None + True + + Slicing creates a view, whose memory is shared with x: + + >>> y = x[2:] + >>> y.base is x + True + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('ctypes', + """ + An object to simplify the interaction of the array with the ctypes + module. + + This attribute creates an object that makes it easier to use arrays + when calling shared libraries with the ctypes module. The returned + object has, among others, data, shape, and strides attributes (see + Notes below) which themselves return ctypes objects that can be used + as arguments to a shared library. + + Parameters + ---------- + None + + Returns + ------- + c : Python object + Possessing attributes data, shape, strides, etc. + + See Also + -------- + numpy.ctypeslib + + Notes + ----- + Below are the public attributes of this object which were documented + in "Guide to NumPy" (we have omitted undocumented public attributes, + as well as documented private attributes): + + .. autoattribute:: numpy._core._internal._ctypes.data + :noindex: + + .. autoattribute:: numpy._core._internal._ctypes.shape + :noindex: + + .. autoattribute:: numpy._core._internal._ctypes.strides + :noindex: + + .. automethod:: numpy._core._internal._ctypes.data_as + :noindex: + + .. automethod:: numpy._core._internal._ctypes.shape_as + :noindex: + + .. automethod:: numpy._core._internal._ctypes.strides_as + :noindex: + + If the ctypes module is not available, then the ctypes attribute + of array objects still returns something useful, but ctypes objects + are not returned and errors may be raised instead. In particular, + the object will still have the ``as_parameter`` attribute which will + return an integer equal to the data attribute. + + Examples + -------- + >>> import numpy as np + >>> import ctypes + >>> x = np.array([[0, 1], [2, 3]], dtype=np.int32) + >>> x + array([[0, 1], + [2, 3]], dtype=int32) + >>> x.ctypes.data + 31962608 # may vary + >>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_uint32)) + <__main__.LP_c_uint object at 0x7ff2fc1fc200> # may vary + >>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_uint32)).contents + c_uint(0) + >>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_uint64)).contents + c_ulong(4294967296) + >>> x.ctypes.shape + # may vary + >>> x.ctypes.strides + # may vary + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('data', + """Python buffer object pointing to the start of the array's data.""")) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('dtype', + """ + Data-type of the array's elements. + + .. warning:: + + Setting ``arr.dtype`` is discouraged and may be deprecated in the + future. Setting will replace the ``dtype`` without modifying the + memory (see also `ndarray.view` and `ndarray.astype`). + + Parameters + ---------- + None + + Returns + ------- + d : numpy dtype object + + See Also + -------- + ndarray.astype : Cast the values contained in the array to a new data-type. + ndarray.view : Create a view of the same data but a different data-type. + numpy.dtype + + Examples + -------- + >>> import numpy as np + >>> x = np.arange(4).reshape((2, 2)) + >>> x + array([[0, 1], + [2, 3]]) + >>> x.dtype + dtype('int64') # may vary (OS, bitness) + >>> isinstance(x.dtype, np.dtype) + True + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('imag', + """ + The imaginary part of the array. + + Examples + -------- + >>> import numpy as np + >>> x = np.sqrt([1+0j, 0+1j]) + >>> x.imag + array([ 0. , 0.70710678]) + >>> x.imag.dtype + dtype('float64') + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('itemsize', + """ + Length of one array element in bytes. + + Examples + -------- + >>> import numpy as np + >>> x = np.array([1,2,3], dtype=np.float64) + >>> x.itemsize + 8 + >>> x = np.array([1,2,3], dtype=np.complex128) + >>> x.itemsize + 16 + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('flags', + """ + Information about the memory layout of the array. + + Attributes + ---------- + C_CONTIGUOUS (C) + The data is in a single, C-style contiguous segment. + F_CONTIGUOUS (F) + The data is in a single, Fortran-style contiguous segment. + OWNDATA (O) + The array owns the memory it uses or borrows it from another object. + WRITEABLE (W) + The data area can be written to. Setting this to False locks + the data, making it read-only. A view (slice, etc.) inherits WRITEABLE + from its base array at creation time, but a view of a writeable + array may be subsequently locked while the base array remains writeable. + (The opposite is not true, in that a view of a locked array may not + be made writeable. However, currently, locking a base object does not + lock any views that already reference it, so under that circumstance it + is possible to alter the contents of a locked array via a previously + created writeable view onto it.) Attempting to change a non-writeable + array raises a RuntimeError exception. + ALIGNED (A) + The data and all elements are aligned appropriately for the hardware. + WRITEBACKIFCOPY (X) + This array is a copy of some other array. The C-API function + PyArray_ResolveWritebackIfCopy must be called before deallocating + to the base array will be updated with the contents of this array. + FNC + F_CONTIGUOUS and not C_CONTIGUOUS. + FORC + F_CONTIGUOUS or C_CONTIGUOUS (one-segment test). + BEHAVED (B) + ALIGNED and WRITEABLE. + CARRAY (CA) + BEHAVED and C_CONTIGUOUS. + FARRAY (FA) + BEHAVED and F_CONTIGUOUS and not C_CONTIGUOUS. + + Notes + ----- + The `flags` object can be accessed dictionary-like (as in ``a.flags['WRITEABLE']``), + or by using lowercased attribute names (as in ``a.flags.writeable``). Short flag + names are only supported in dictionary access. + + Only the WRITEBACKIFCOPY, WRITEABLE, and ALIGNED flags can be + changed by the user, via direct assignment to the attribute or dictionary + entry, or by calling `ndarray.setflags`. + + The array flags cannot be set arbitrarily: + + - WRITEBACKIFCOPY can only be set ``False``. + - ALIGNED can only be set ``True`` if the data is truly aligned. + - WRITEABLE can only be set ``True`` if the array owns its own memory + or the ultimate owner of the memory exposes a writeable buffer + interface or is a string. + + Arrays can be both C-style and Fortran-style contiguous simultaneously. + This is clear for 1-dimensional arrays, but can also be true for higher + dimensional arrays. + + Even for contiguous arrays a stride for a given dimension + ``arr.strides[dim]`` may be *arbitrary* if ``arr.shape[dim] == 1`` + or the array has no elements. + It does *not* generally hold that ``self.strides[-1] == self.itemsize`` + for C-style contiguous arrays or ``self.strides[0] == self.itemsize`` for + Fortran-style contiguous arrays is true. + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('flat', + """ + A 1-D iterator over the array. + + This is a `numpy.flatiter` instance, which acts similarly to, but is not + a subclass of, Python's built-in iterator object. + + See Also + -------- + flatten : Return a copy of the array collapsed into one dimension. + + flatiter + + Examples + -------- + >>> import numpy as np + >>> x = np.arange(1, 7).reshape(2, 3) + >>> x + array([[1, 2, 3], + [4, 5, 6]]) + >>> x.flat[3] + 4 + >>> x.T + array([[1, 4], + [2, 5], + [3, 6]]) + >>> x.T.flat[3] + 5 + >>> type(x.flat) + + + An assignment example: + + >>> x.flat = 3; x + array([[3, 3, 3], + [3, 3, 3]]) + >>> x.flat[[1,4]] = 1; x + array([[3, 1, 3], + [3, 1, 3]]) + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('nbytes', + """ + Total bytes consumed by the elements of the array. + + Notes + ----- + Does not include memory consumed by non-element attributes of the + array object. + + See Also + -------- + sys.getsizeof + Memory consumed by the object itself without parents in case view. + This does include memory consumed by non-element attributes. + + Examples + -------- + >>> import numpy as np + >>> x = np.zeros((3,5,2), dtype=np.complex128) + >>> x.nbytes + 480 + >>> np.prod(x.shape) * x.itemsize + 480 + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('ndim', + """ + Number of array dimensions. + + Examples + -------- + >>> import numpy as np + >>> x = np.array([1, 2, 3]) + >>> x.ndim + 1 + >>> y = np.zeros((2, 3, 4)) + >>> y.ndim + 3 + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('real', + """ + The real part of the array. + + Examples + -------- + >>> import numpy as np + >>> x = np.sqrt([1+0j, 0+1j]) + >>> x.real + array([ 1. , 0.70710678]) + >>> x.real.dtype + dtype('float64') + + See Also + -------- + numpy.real : equivalent function + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('shape', + """ + Tuple of array dimensions. + + The shape property is usually used to get the current shape of an array, + but may also be used to reshape the array in-place by assigning a tuple of + array dimensions to it. As with `numpy.reshape`, one of the new shape + dimensions can be -1, in which case its value is inferred from the size of + the array and the remaining dimensions. Reshaping an array in-place will + fail if a copy is required. + + .. warning:: + + Setting ``arr.shape`` is discouraged and may be deprecated in the + future. Using `ndarray.reshape` is the preferred approach. + + Examples + -------- + >>> import numpy as np + >>> x = np.array([1, 2, 3, 4]) + >>> x.shape + (4,) + >>> y = np.zeros((2, 3, 4)) + >>> y.shape + (2, 3, 4) + >>> y.shape = (3, 8) + >>> y + array([[ 0., 0., 0., 0., 0., 0., 0., 0.], + [ 0., 0., 0., 0., 0., 0., 0., 0.], + [ 0., 0., 0., 0., 0., 0., 0., 0.]]) + >>> y.shape = (3, 6) + Traceback (most recent call last): + File "", line 1, in + ValueError: cannot reshape array of size 24 into shape (3,6) + >>> np.zeros((4,2))[::2].shape = (-1,) + Traceback (most recent call last): + File "", line 1, in + AttributeError: Incompatible shape for in-place modification. Use + `.reshape()` to make a copy with the desired shape. + + See Also + -------- + numpy.shape : Equivalent getter function. + numpy.reshape : Function similar to setting ``shape``. + ndarray.reshape : Method similar to setting ``shape``. + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('size', + """ + Number of elements in the array. + + Equal to ``np.prod(a.shape)``, i.e., the product of the array's + dimensions. + + Notes + ----- + `a.size` returns a standard arbitrary precision Python integer. This + may not be the case with other methods of obtaining the same value + (like the suggested ``np.prod(a.shape)``, which returns an instance + of ``np.int_``), and may be relevant if the value is used further in + calculations that may overflow a fixed size integer type. + + Examples + -------- + >>> import numpy as np + >>> x = np.zeros((3, 5, 2), dtype=np.complex128) + >>> x.size + 30 + >>> np.prod(x.shape) + 30 + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('strides', + """ + Tuple of bytes to step in each dimension when traversing an array. + + The byte offset of element ``(i[0], i[1], ..., i[n])`` in an array `a` + is:: + + offset = sum(np.array(i) * a.strides) + + A more detailed explanation of strides can be found in + :ref:`arrays.ndarray`. + + .. warning:: + + Setting ``arr.strides`` is discouraged and may be deprecated in the + future. `numpy.lib.stride_tricks.as_strided` should be preferred + to create a new view of the same data in a safer way. + + Notes + ----- + Imagine an array of 32-bit integers (each 4 bytes):: + + x = np.array([[0, 1, 2, 3, 4], + [5, 6, 7, 8, 9]], dtype=np.int32) + + This array is stored in memory as 40 bytes, one after the other + (known as a contiguous block of memory). The strides of an array tell + us how many bytes we have to skip in memory to move to the next position + along a certain axis. For example, we have to skip 4 bytes (1 value) to + move to the next column, but 20 bytes (5 values) to get to the same + position in the next row. As such, the strides for the array `x` will be + ``(20, 4)``. + + See Also + -------- + numpy.lib.stride_tricks.as_strided + + Examples + -------- + >>> import numpy as np + >>> y = np.reshape(np.arange(2 * 3 * 4, dtype=np.int32), (2, 3, 4)) + >>> y + array([[[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]], + [[12, 13, 14, 15], + [16, 17, 18, 19], + [20, 21, 22, 23]]], dtype=np.int32) + >>> y.strides + (48, 16, 4) + >>> y[1, 1, 1] + np.int32(17) + >>> offset = sum(y.strides * np.array((1, 1, 1))) + >>> offset // y.itemsize + np.int64(17) + + >>> x = np.reshape(np.arange(5*6*7*8, dtype=np.int32), (5, 6, 7, 8)) + >>> x = x.transpose(2, 3, 1, 0) + >>> x.strides + (32, 4, 224, 1344) + >>> i = np.array([3, 5, 2, 2], dtype=np.int32) + >>> offset = sum(i * x.strides) + >>> x[3, 5, 2, 2] + np.int32(813) + >>> offset // x.itemsize + np.int64(813) + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('T', + """ + View of the transposed array. + + Same as ``self.transpose()``. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[1, 2], [3, 4]]) + >>> a + array([[1, 2], + [3, 4]]) + >>> a.T + array([[1, 3], + [2, 4]]) + + >>> a = np.array([1, 2, 3, 4]) + >>> a + array([1, 2, 3, 4]) + >>> a.T + array([1, 2, 3, 4]) + + See Also + -------- + transpose + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('mT', + """ + View of the matrix transposed array. + + The matrix transpose is the transpose of the last two dimensions, even + if the array is of higher dimension. + + .. versionadded:: 2.0 + + Raises + ------ + ValueError + If the array is of dimension less than 2. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[1, 2], [3, 4]]) + >>> a + array([[1, 2], + [3, 4]]) + >>> a.mT + array([[1, 3], + [2, 4]]) + + >>> a = np.arange(8).reshape((2, 2, 2)) + >>> a + array([[[0, 1], + [2, 3]], + + [[4, 5], + [6, 7]]]) + >>> a.mT + array([[[0, 2], + [1, 3]], + + [[4, 6], + [5, 7]]]) + + """)) +############################################################################## +# +# ndarray methods +# +############################################################################## + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('__array__', + """ + a.__array__([dtype], *, copy=None) + + For ``dtype`` parameter it returns a new reference to self if + ``dtype`` is not given or it matches array's data type. + A new array of provided data type is returned if ``dtype`` + is different from the current data type of the array. + For ``copy`` parameter it returns a new reference to self if + ``copy=False`` or ``copy=None`` and copying isn't enforced by ``dtype`` + parameter. The method returns a new array for ``copy=True``, regardless of + ``dtype`` parameter. + + A more detailed explanation of the ``__array__`` interface + can be found in :ref:`dunder_array.interface`. + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('__array_finalize__', + """ + a.__array_finalize__(obj, /) + + Present so subclasses can call super. Does nothing. + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('__array_wrap__', + """ + a.__array_wrap__(array[, context], /) + + Returns a view of `array` with the same type as self. + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('__copy__', + """ + a.__copy__() + + Used if :func:`copy.copy` is called on an array. Returns a copy of the array. + + Equivalent to ``a.copy(order='K')``. + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('__class_getitem__', + """ + a.__class_getitem__(item, /) + + Return a parametrized wrapper around the `~numpy.ndarray` type. + + .. versionadded:: 1.22 + + Returns + ------- + alias : types.GenericAlias + A parametrized `~numpy.ndarray` type. + + Examples + -------- + >>> from typing import Any + >>> import numpy as np + + >>> np.ndarray[Any, np.dtype[np.uint8]] + numpy.ndarray[typing.Any, numpy.dtype[numpy.uint8]] + + See Also + -------- + :pep:`585` : Type hinting generics in standard collections. + numpy.typing.NDArray : An ndarray alias :term:`generic ` + w.r.t. its `dtype.type `. + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('__deepcopy__', + """ + a.__deepcopy__(memo, /) + + Used if :func:`copy.deepcopy` is called on an array. + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('__reduce__', + """ + a.__reduce__() + + For pickling. + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('__setstate__', + """ + a.__setstate__(state, /) + + For unpickling. + + The `state` argument must be a sequence that contains the following + elements: + + Parameters + ---------- + version : int + optional pickle version. If omitted defaults to 0. + shape : tuple + dtype : data-type + isFortran : bool + rawdata : string or list + a binary string with the data (or a list if 'a' is an object array) + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('all', + """ + a.all(axis=None, out=None, keepdims=False, *, where=True) + + Returns True if all elements evaluate to True. + + Refer to `numpy.all` for full documentation. + + See Also + -------- + numpy.all : equivalent function + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('any', + """ + a.any(axis=None, out=None, keepdims=False, *, where=True) + + Returns True if any of the elements of `a` evaluate to True. + + Refer to `numpy.any` for full documentation. + + See Also + -------- + numpy.any : equivalent function + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('argmax', + """ + a.argmax(axis=None, out=None, *, keepdims=False) + + Return indices of the maximum values along the given axis. + + Refer to `numpy.argmax` for full documentation. + + See Also + -------- + numpy.argmax : equivalent function + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('argmin', + """ + a.argmin(axis=None, out=None, *, keepdims=False) + + Return indices of the minimum values along the given axis. + + Refer to `numpy.argmin` for detailed documentation. + + See Also + -------- + numpy.argmin : equivalent function + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('argsort', + """ + a.argsort(axis=-1, kind=None, order=None) + + Returns the indices that would sort this array. + + Refer to `numpy.argsort` for full documentation. + + See Also + -------- + numpy.argsort : equivalent function + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('argpartition', + """ + a.argpartition(kth, axis=-1, kind='introselect', order=None) + + Returns the indices that would partition this array. + + Refer to `numpy.argpartition` for full documentation. + + See Also + -------- + numpy.argpartition : equivalent function + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('astype', + """ + a.astype(dtype, order='K', casting='unsafe', subok=True, copy=True) + + Copy of the array, cast to a specified type. + + Parameters + ---------- + dtype : str or dtype + Typecode or data-type to which the array is cast. + order : {'C', 'F', 'A', 'K'}, optional + Controls the memory layout order of the result. + 'C' means C order, 'F' means Fortran order, 'A' + means 'F' order if all the arrays are Fortran contiguous, + 'C' order otherwise, and 'K' means as close to the + order the array elements appear in memory as possible. + Default is 'K'. + casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional + Controls what kind of data casting may occur. Defaults to 'unsafe' + for backwards compatibility. + + * 'no' means the data types should not be cast at all. + * 'equiv' means only byte-order changes are allowed. + * 'safe' means only casts which can preserve values are allowed. + * 'same_kind' means only safe casts or casts within a kind, + like float64 to float32, are allowed. + * 'unsafe' means any data conversions may be done. + subok : bool, optional + If True, then sub-classes will be passed-through (default), otherwise + the returned array will be forced to be a base-class array. + copy : bool, optional + By default, astype always returns a newly allocated array. If this + is set to false, and the `dtype`, `order`, and `subok` + requirements are satisfied, the input array is returned instead + of a copy. + + Returns + ------- + arr_t : ndarray + Unless `copy` is False and the other conditions for returning the input + array are satisfied (see description for `copy` input parameter), `arr_t` + is a new array of the same shape as the input array, with dtype, order + given by `dtype`, `order`. + + Raises + ------ + ComplexWarning + When casting from complex to float or int. To avoid this, + one should use ``a.real.astype(t)``. + + Examples + -------- + >>> import numpy as np + >>> x = np.array([1, 2, 2.5]) + >>> x + array([1. , 2. , 2.5]) + + >>> x.astype(int) + array([1, 2, 2]) + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('byteswap', + """ + a.byteswap(inplace=False) + + Swap the bytes of the array elements + + Toggle between low-endian and big-endian data representation by + returning a byteswapped array, optionally swapped in-place. + Arrays of byte-strings are not swapped. The real and imaginary + parts of a complex number are swapped individually. + + Parameters + ---------- + inplace : bool, optional + If ``True``, swap bytes in-place, default is ``False``. + + Returns + ------- + out : ndarray + The byteswapped array. If `inplace` is ``True``, this is + a view to self. + + Examples + -------- + >>> import numpy as np + >>> A = np.array([1, 256, 8755], dtype=np.int16) + >>> list(map(hex, A)) + ['0x1', '0x100', '0x2233'] + >>> A.byteswap(inplace=True) + array([ 256, 1, 13090], dtype=int16) + >>> list(map(hex, A)) + ['0x100', '0x1', '0x3322'] + + Arrays of byte-strings are not swapped + + >>> A = np.array([b'ceg', b'fac']) + >>> A.byteswap() + array([b'ceg', b'fac'], dtype='|S3') + + ``A.view(A.dtype.newbyteorder()).byteswap()`` produces an array with + the same values but different representation in memory + + >>> A = np.array([1, 2, 3],dtype=np.int64) + >>> A.view(np.uint8) + array([1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, + 0, 0], dtype=uint8) + >>> A.view(A.dtype.newbyteorder()).byteswap(inplace=True) + array([1, 2, 3], dtype='>i8') + >>> A.view(np.uint8) + array([0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, + 0, 3], dtype=uint8) + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('choose', + """ + a.choose(choices, out=None, mode='raise') + + Use an index array to construct a new array from a set of choices. + + Refer to `numpy.choose` for full documentation. + + See Also + -------- + numpy.choose : equivalent function + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('clip', + """ + a.clip(min=None, max=None, out=None, **kwargs) + + Return an array whose values are limited to ``[min, max]``. + One of max or min must be given. + + Refer to `numpy.clip` for full documentation. + + See Also + -------- + numpy.clip : equivalent function + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('compress', + """ + a.compress(condition, axis=None, out=None) + + Return selected slices of this array along given axis. + + Refer to `numpy.compress` for full documentation. + + See Also + -------- + numpy.compress : equivalent function + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('conj', + """ + a.conj() + + Complex-conjugate all elements. + + Refer to `numpy.conjugate` for full documentation. + + See Also + -------- + numpy.conjugate : equivalent function + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('conjugate', + """ + a.conjugate() + + Return the complex conjugate, element-wise. + + Refer to `numpy.conjugate` for full documentation. + + See Also + -------- + numpy.conjugate : equivalent function + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('copy', + """ + a.copy(order='C') + + Return a copy of the array. + + Parameters + ---------- + order : {'C', 'F', 'A', 'K'}, optional + Controls the memory layout of the copy. 'C' means C-order, + 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous, + 'C' otherwise. 'K' means match the layout of `a` as closely + as possible. (Note that this function and :func:`numpy.copy` are very + similar but have different default values for their order= + arguments, and this function always passes sub-classes through.) + + See also + -------- + numpy.copy : Similar function with different default behavior + numpy.copyto + + Notes + ----- + This function is the preferred method for creating an array copy. The + function :func:`numpy.copy` is similar, but it defaults to using order 'K', + and will not pass sub-classes through by default. + + Examples + -------- + >>> import numpy as np + >>> x = np.array([[1,2,3],[4,5,6]], order='F') + + >>> y = x.copy() + + >>> x.fill(0) + + >>> x + array([[0, 0, 0], + [0, 0, 0]]) + + >>> y + array([[1, 2, 3], + [4, 5, 6]]) + + >>> y.flags['C_CONTIGUOUS'] + True + + For arrays containing Python objects (e.g. dtype=object), + the copy is a shallow one. The new array will contain the + same object which may lead to surprises if that object can + be modified (is mutable): + + >>> a = np.array([1, 'm', [2, 3, 4]], dtype=object) + >>> b = a.copy() + >>> b[2][0] = 10 + >>> a + array([1, 'm', list([10, 3, 4])], dtype=object) + + To ensure all elements within an ``object`` array are copied, + use `copy.deepcopy`: + + >>> import copy + >>> a = np.array([1, 'm', [2, 3, 4]], dtype=object) + >>> c = copy.deepcopy(a) + >>> c[2][0] = 10 + >>> c + array([1, 'm', list([10, 3, 4])], dtype=object) + >>> a + array([1, 'm', list([2, 3, 4])], dtype=object) + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('cumprod', + """ + a.cumprod(axis=None, dtype=None, out=None) + + Return the cumulative product of the elements along the given axis. + + Refer to `numpy.cumprod` for full documentation. + + See Also + -------- + numpy.cumprod : equivalent function + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('cumsum', + """ + a.cumsum(axis=None, dtype=None, out=None) + + Return the cumulative sum of the elements along the given axis. + + Refer to `numpy.cumsum` for full documentation. + + See Also + -------- + numpy.cumsum : equivalent function + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('diagonal', + """ + a.diagonal(offset=0, axis1=0, axis2=1) + + Return specified diagonals. In NumPy 1.9 the returned array is a + read-only view instead of a copy as in previous NumPy versions. In + a future version the read-only restriction will be removed. + + Refer to :func:`numpy.diagonal` for full documentation. + + See Also + -------- + numpy.diagonal : equivalent function + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('dot')) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('dump', + """ + a.dump(file) + + Dump a pickle of the array to the specified file. + The array can be read back with pickle.load or numpy.load. + + Parameters + ---------- + file : str or Path + A string naming the dump file. + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('dumps', + """ + a.dumps() + + Returns the pickle of the array as a string. + pickle.loads will convert the string back to an array. + + Parameters + ---------- + None + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('fill', + """ + a.fill(value) + + Fill the array with a scalar value. + + Parameters + ---------- + value : scalar + All elements of `a` will be assigned this value. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([1, 2]) + >>> a.fill(0) + >>> a + array([0, 0]) + >>> a = np.empty(2) + >>> a.fill(1) + >>> a + array([1., 1.]) + + Fill expects a scalar value and always behaves the same as assigning + to a single array element. The following is a rare example where this + distinction is important: + + >>> a = np.array([None, None], dtype=object) + >>> a[0] = np.array(3) + >>> a + array([array(3), None], dtype=object) + >>> a.fill(np.array(3)) + >>> a + array([array(3), array(3)], dtype=object) + + Where other forms of assignments will unpack the array being assigned: + + >>> a[...] = np.array(3) + >>> a + array([3, 3], dtype=object) + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('flatten', + """ + a.flatten(order='C') + + Return a copy of the array collapsed into one dimension. + + Parameters + ---------- + order : {'C', 'F', 'A', 'K'}, optional + 'C' means to flatten in row-major (C-style) order. + 'F' means to flatten in column-major (Fortran- + style) order. 'A' means to flatten in column-major + order if `a` is Fortran *contiguous* in memory, + row-major order otherwise. 'K' means to flatten + `a` in the order the elements occur in memory. + The default is 'C'. + + Returns + ------- + y : ndarray + A copy of the input array, flattened to one dimension. + + See Also + -------- + ravel : Return a flattened array. + flat : A 1-D flat iterator over the array. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[1,2], [3,4]]) + >>> a.flatten() + array([1, 2, 3, 4]) + >>> a.flatten('F') + array([1, 3, 2, 4]) + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('getfield', + """ + a.getfield(dtype, offset=0) + + Returns a field of the given array as a certain type. + + A field is a view of the array data with a given data-type. The values in + the view are determined by the given type and the offset into the current + array in bytes. The offset needs to be such that the view dtype fits in the + array dtype; for example an array of dtype complex128 has 16-byte elements. + If taking a view with a 32-bit integer (4 bytes), the offset needs to be + between 0 and 12 bytes. + + Parameters + ---------- + dtype : str or dtype + The data type of the view. The dtype size of the view can not be larger + than that of the array itself. + offset : int + Number of bytes to skip before beginning the element view. + + Examples + -------- + >>> import numpy as np + >>> x = np.diag([1.+1.j]*2) + >>> x[1, 1] = 2 + 4.j + >>> x + array([[1.+1.j, 0.+0.j], + [0.+0.j, 2.+4.j]]) + >>> x.getfield(np.float64) + array([[1., 0.], + [0., 2.]]) + + By choosing an offset of 8 bytes we can select the complex part of the + array for our view: + + >>> x.getfield(np.float64, offset=8) + array([[1., 0.], + [0., 4.]]) + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('item', + """ + a.item(*args) + + Copy an element of an array to a standard Python scalar and return it. + + Parameters + ---------- + \\*args : Arguments (variable number and type) + + * none: in this case, the method only works for arrays + with one element (`a.size == 1`), which element is + copied into a standard Python scalar object and returned. + + * int_type: this argument is interpreted as a flat index into + the array, specifying which element to copy and return. + + * tuple of int_types: functions as does a single int_type argument, + except that the argument is interpreted as an nd-index into the + array. + + Returns + ------- + z : Standard Python scalar object + A copy of the specified element of the array as a suitable + Python scalar + + Notes + ----- + When the data type of `a` is longdouble or clongdouble, item() returns + a scalar array object because there is no available Python scalar that + would not lose information. Void arrays return a buffer object for item(), + unless fields are defined, in which case a tuple is returned. + + `item` is very similar to a[args], except, instead of an array scalar, + a standard Python scalar is returned. This can be useful for speeding up + access to elements of the array and doing arithmetic on elements of the + array using Python's optimized math. + + Examples + -------- + >>> import numpy as np + >>> np.random.seed(123) + >>> x = np.random.randint(9, size=(3, 3)) + >>> x + array([[2, 2, 6], + [1, 3, 6], + [1, 0, 1]]) + >>> x.item(3) + 1 + >>> x.item(7) + 0 + >>> x.item((0, 1)) + 2 + >>> x.item((2, 2)) + 1 + + For an array with object dtype, elements are returned as-is. + + >>> a = np.array([np.int64(1)], dtype=object) + >>> a.item() #return np.int64 + np.int64(1) + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('max', + """ + a.max(axis=None, out=None, keepdims=False, initial=, where=True) + + Return the maximum along a given axis. + + Refer to `numpy.amax` for full documentation. + + See Also + -------- + numpy.amax : equivalent function + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('mean', + """ + a.mean(axis=None, dtype=None, out=None, keepdims=False, *, where=True) + + Returns the average of the array elements along given axis. + + Refer to `numpy.mean` for full documentation. + + See Also + -------- + numpy.mean : equivalent function + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('min', + """ + a.min(axis=None, out=None, keepdims=False, initial=, where=True) + + Return the minimum along a given axis. + + Refer to `numpy.amin` for full documentation. + + See Also + -------- + numpy.amin : equivalent function + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('nonzero', + """ + a.nonzero() + + Return the indices of the elements that are non-zero. + + Refer to `numpy.nonzero` for full documentation. + + See Also + -------- + numpy.nonzero : equivalent function + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('prod', + """ + a.prod(axis=None, dtype=None, out=None, keepdims=False, + initial=1, where=True) + + Return the product of the array elements over the given axis + + Refer to `numpy.prod` for full documentation. + + See Also + -------- + numpy.prod : equivalent function + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('put', + """ + a.put(indices, values, mode='raise') + + Set ``a.flat[n] = values[n]`` for all `n` in indices. + + Refer to `numpy.put` for full documentation. + + See Also + -------- + numpy.put : equivalent function + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('ravel', + """ + a.ravel([order]) + + Return a flattened array. + + Refer to `numpy.ravel` for full documentation. + + See Also + -------- + numpy.ravel : equivalent function + + ndarray.flat : a flat iterator on the array. + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('repeat', + """ + a.repeat(repeats, axis=None) + + Repeat elements of an array. + + Refer to `numpy.repeat` for full documentation. + + See Also + -------- + numpy.repeat : equivalent function + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('reshape', + """ + a.reshape(shape, /, *, order='C', copy=None) + + Returns an array containing the same data with a new shape. + + Refer to `numpy.reshape` for full documentation. + + See Also + -------- + numpy.reshape : equivalent function + + Notes + ----- + Unlike the free function `numpy.reshape`, this method on `ndarray` allows + the elements of the shape parameter to be passed in as separate arguments. + For example, ``a.reshape(10, 11)`` is equivalent to + ``a.reshape((10, 11))``. + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('resize', + """ + a.resize(new_shape, refcheck=True) + + Change shape and size of array in-place. + + Parameters + ---------- + new_shape : tuple of ints, or `n` ints + Shape of resized array. + refcheck : bool, optional + If False, reference count will not be checked. Default is True. + + Returns + ------- + None + + Raises + ------ + ValueError + If `a` does not own its own data or references or views to it exist, + and the data memory must be changed. + PyPy only: will always raise if the data memory must be changed, since + there is no reliable way to determine if references or views to it + exist. + + SystemError + If the `order` keyword argument is specified. This behaviour is a + bug in NumPy. + + See Also + -------- + resize : Return a new array with the specified shape. + + Notes + ----- + This reallocates space for the data area if necessary. + + Only contiguous arrays (data elements consecutive in memory) can be + resized. + + The purpose of the reference count check is to make sure you + do not use this array as a buffer for another Python object and then + reallocate the memory. However, reference counts can increase in + other ways so if you are sure that you have not shared the memory + for this array with another Python object, then you may safely set + `refcheck` to False. + + Examples + -------- + Shrinking an array: array is flattened (in the order that the data are + stored in memory), resized, and reshaped: + + >>> import numpy as np + + >>> a = np.array([[0, 1], [2, 3]], order='C') + >>> a.resize((2, 1)) + >>> a + array([[0], + [1]]) + + >>> a = np.array([[0, 1], [2, 3]], order='F') + >>> a.resize((2, 1)) + >>> a + array([[0], + [2]]) + + Enlarging an array: as above, but missing entries are filled with zeros: + + >>> b = np.array([[0, 1], [2, 3]]) + >>> b.resize(2, 3) # new_shape parameter doesn't have to be a tuple + >>> b + array([[0, 1, 2], + [3, 0, 0]]) + + Referencing an array prevents resizing... + + >>> c = a + >>> a.resize((1, 1)) + Traceback (most recent call last): + ... + ValueError: cannot resize an array that references or is referenced ... + + Unless `refcheck` is False: + + >>> a.resize((1, 1), refcheck=False) + >>> a + array([[0]]) + >>> c + array([[0]]) + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('round', + """ + a.round(decimals=0, out=None) + + Return `a` with each element rounded to the given number of decimals. + + Refer to `numpy.around` for full documentation. + + See Also + -------- + numpy.around : equivalent function + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('searchsorted', + """ + a.searchsorted(v, side='left', sorter=None) + + Find indices where elements of v should be inserted in a to maintain order. + + For full documentation, see `numpy.searchsorted` + + See Also + -------- + numpy.searchsorted : equivalent function + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('setfield', + """ + a.setfield(val, dtype, offset=0) + + Put a value into a specified place in a field defined by a data-type. + + Place `val` into `a`'s field defined by `dtype` and beginning `offset` + bytes into the field. + + Parameters + ---------- + val : object + Value to be placed in field. + dtype : dtype object + Data-type of the field in which to place `val`. + offset : int, optional + The number of bytes into the field at which to place `val`. + + Returns + ------- + None + + See Also + -------- + getfield + + Examples + -------- + >>> import numpy as np + >>> x = np.eye(3) + >>> x.getfield(np.float64) + array([[1., 0., 0.], + [0., 1., 0.], + [0., 0., 1.]]) + >>> x.setfield(3, np.int32) + >>> x.getfield(np.int32) + array([[3, 3, 3], + [3, 3, 3], + [3, 3, 3]], dtype=int32) + >>> x + array([[1.0e+000, 1.5e-323, 1.5e-323], + [1.5e-323, 1.0e+000, 1.5e-323], + [1.5e-323, 1.5e-323, 1.0e+000]]) + >>> x.setfield(np.eye(3), np.int32) + >>> x + array([[1., 0., 0.], + [0., 1., 0.], + [0., 0., 1.]]) + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('setflags', + """ + a.setflags(write=None, align=None, uic=None) + + Set array flags WRITEABLE, ALIGNED, WRITEBACKIFCOPY, + respectively. + + These Boolean-valued flags affect how numpy interprets the memory + area used by `a` (see Notes below). The ALIGNED flag can only + be set to True if the data is actually aligned according to the type. + The WRITEBACKIFCOPY flag can never be set + to True. The flag WRITEABLE can only be set to True if the array owns its + own memory, or the ultimate owner of the memory exposes a writeable buffer + interface, or is a string. (The exception for string is made so that + unpickling can be done without copying memory.) + + Parameters + ---------- + write : bool, optional + Describes whether or not `a` can be written to. + align : bool, optional + Describes whether or not `a` is aligned properly for its type. + uic : bool, optional + Describes whether or not `a` is a copy of another "base" array. + + Notes + ----- + Array flags provide information about how the memory area used + for the array is to be interpreted. There are 7 Boolean flags + in use, only three of which can be changed by the user: + WRITEBACKIFCOPY, WRITEABLE, and ALIGNED. + + WRITEABLE (W) the data area can be written to; + + ALIGNED (A) the data and strides are aligned appropriately for the hardware + (as determined by the compiler); + + WRITEBACKIFCOPY (X) this array is a copy of some other array (referenced + by .base). When the C-API function PyArray_ResolveWritebackIfCopy is + called, the base array will be updated with the contents of this array. + + All flags can be accessed using the single (upper case) letter as well + as the full name. + + Examples + -------- + >>> import numpy as np + >>> y = np.array([[3, 1, 7], + ... [2, 0, 0], + ... [8, 5, 9]]) + >>> y + array([[3, 1, 7], + [2, 0, 0], + [8, 5, 9]]) + >>> y.flags + C_CONTIGUOUS : True + F_CONTIGUOUS : False + OWNDATA : True + WRITEABLE : True + ALIGNED : True + WRITEBACKIFCOPY : False + >>> y.setflags(write=0, align=0) + >>> y.flags + C_CONTIGUOUS : True + F_CONTIGUOUS : False + OWNDATA : True + WRITEABLE : False + ALIGNED : False + WRITEBACKIFCOPY : False + >>> y.setflags(uic=1) + Traceback (most recent call last): + File "", line 1, in + ValueError: cannot set WRITEBACKIFCOPY flag to True + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('sort', + """ + a.sort(axis=-1, kind=None, order=None) + + Sort an array in-place. Refer to `numpy.sort` for full documentation. + + Parameters + ---------- + axis : int, optional + Axis along which to sort. Default is -1, which means sort along the + last axis. + kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional + Sorting algorithm. The default is 'quicksort'. Note that both 'stable' + and 'mergesort' use timsort under the covers and, in general, the + actual implementation will vary with datatype. The 'mergesort' option + is retained for backwards compatibility. + order : str or list of str, optional + When `a` is an array with fields defined, this argument specifies + which fields to compare first, second, etc. A single field can + be specified as a string, and not all fields need be specified, + but unspecified fields will still be used, in the order in which + they come up in the dtype, to break ties. + + See Also + -------- + numpy.sort : Return a sorted copy of an array. + numpy.argsort : Indirect sort. + numpy.lexsort : Indirect stable sort on multiple keys. + numpy.searchsorted : Find elements in sorted array. + numpy.partition: Partial sort. + + Notes + ----- + See `numpy.sort` for notes on the different sorting algorithms. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[1,4], [3,1]]) + >>> a.sort(axis=1) + >>> a + array([[1, 4], + [1, 3]]) + >>> a.sort(axis=0) + >>> a + array([[1, 3], + [1, 4]]) + + Use the `order` keyword to specify a field to use when sorting a + structured array: + + >>> a = np.array([('a', 2), ('c', 1)], dtype=[('x', 'S1'), ('y', int)]) + >>> a.sort(order='y') + >>> a + array([(b'c', 1), (b'a', 2)], + dtype=[('x', 'S1'), ('y', '>> import numpy as np + >>> a = np.array([3, 4, 2, 1]) + >>> a.partition(3) + >>> a + array([2, 1, 3, 4]) # may vary + + >>> a.partition((1, 3)) + >>> a + array([1, 2, 3, 4]) + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('squeeze', + """ + a.squeeze(axis=None) + + Remove axes of length one from `a`. + + Refer to `numpy.squeeze` for full documentation. + + See Also + -------- + numpy.squeeze : equivalent function + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('std', + """ + a.std(axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, where=True) + + Returns the standard deviation of the array elements along given axis. + + Refer to `numpy.std` for full documentation. + + See Also + -------- + numpy.std : equivalent function + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('sum', + """ + a.sum(axis=None, dtype=None, out=None, keepdims=False, initial=0, where=True) + + Return the sum of the array elements over the given axis. + + Refer to `numpy.sum` for full documentation. + + See Also + -------- + numpy.sum : equivalent function + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('swapaxes', + """ + a.swapaxes(axis1, axis2) + + Return a view of the array with `axis1` and `axis2` interchanged. + + Refer to `numpy.swapaxes` for full documentation. + + See Also + -------- + numpy.swapaxes : equivalent function + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('take', + """ + a.take(indices, axis=None, out=None, mode='raise') + + Return an array formed from the elements of `a` at the given indices. + + Refer to `numpy.take` for full documentation. + + See Also + -------- + numpy.take : equivalent function + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('tofile', + """ + a.tofile(fid, sep="", format="%s") + + Write array to a file as text or binary (default). + + Data is always written in 'C' order, independent of the order of `a`. + The data produced by this method can be recovered using the function + fromfile(). + + Parameters + ---------- + fid : file or str or Path + An open file object, or a string containing a filename. + sep : str + Separator between array items for text output. + If "" (empty), a binary file is written, equivalent to + ``file.write(a.tobytes())``. + format : str + Format string for text file output. + Each entry in the array is formatted to text by first converting + it to the closest Python type, and then using "format" % item. + + Notes + ----- + This is a convenience function for quick storage of array data. + Information on endianness and precision is lost, so this method is not a + good choice for files intended to archive data or transport data between + machines with different endianness. Some of these problems can be overcome + by outputting the data as text files, at the expense of speed and file + size. + + When fid is a file object, array contents are directly written to the + file, bypassing the file object's ``write`` method. As a result, tofile + cannot be used with files objects supporting compression (e.g., GzipFile) + or file-like objects that do not support ``fileno()`` (e.g., BytesIO). + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('tolist', + """ + a.tolist() + + Return the array as an ``a.ndim``-levels deep nested list of Python scalars. + + Return a copy of the array data as a (nested) Python list. + Data items are converted to the nearest compatible builtin Python type, via + the `~numpy.ndarray.item` function. + + If ``a.ndim`` is 0, then since the depth of the nested list is 0, it will + not be a list at all, but a simple Python scalar. + + Parameters + ---------- + none + + Returns + ------- + y : object, or list of object, or list of list of object, or ... + The possibly nested list of array elements. + + Notes + ----- + The array may be recreated via ``a = np.array(a.tolist())``, although this + may sometimes lose precision. + + Examples + -------- + For a 1D array, ``a.tolist()`` is almost the same as ``list(a)``, + except that ``tolist`` changes numpy scalars to Python scalars: + + >>> import numpy as np + >>> a = np.uint32([1, 2]) + >>> a_list = list(a) + >>> a_list + [np.uint32(1), np.uint32(2)] + >>> type(a_list[0]) + + >>> a_tolist = a.tolist() + >>> a_tolist + [1, 2] + >>> type(a_tolist[0]) + + + Additionally, for a 2D array, ``tolist`` applies recursively: + + >>> a = np.array([[1, 2], [3, 4]]) + >>> list(a) + [array([1, 2]), array([3, 4])] + >>> a.tolist() + [[1, 2], [3, 4]] + + The base case for this recursion is a 0D array: + + >>> a = np.array(1) + >>> list(a) + Traceback (most recent call last): + ... + TypeError: iteration over a 0-d array + >>> a.tolist() + 1 + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('tobytes', """ + a.tobytes(order='C') + + Construct Python bytes containing the raw data bytes in the array. + + Constructs Python bytes showing a copy of the raw contents of + data memory. The bytes object is produced in C-order by default. + This behavior is controlled by the ``order`` parameter. + + Parameters + ---------- + order : {'C', 'F', 'A'}, optional + Controls the memory layout of the bytes object. 'C' means C-order, + 'F' means F-order, 'A' (short for *Any*) means 'F' if `a` is + Fortran contiguous, 'C' otherwise. Default is 'C'. + + Returns + ------- + s : bytes + Python bytes exhibiting a copy of `a`'s raw data. + + See also + -------- + frombuffer + Inverse of this operation, construct a 1-dimensional array from Python + bytes. + + Examples + -------- + >>> import numpy as np + >>> x = np.array([[0, 1], [2, 3]], dtype='>> x.tobytes() + b'\\x00\\x00\\x01\\x00\\x02\\x00\\x03\\x00' + >>> x.tobytes('C') == x.tobytes() + True + >>> x.tobytes('F') + b'\\x00\\x00\\x02\\x00\\x01\\x00\\x03\\x00' + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('trace', + """ + a.trace(offset=0, axis1=0, axis2=1, dtype=None, out=None) + + Return the sum along diagonals of the array. + + Refer to `numpy.trace` for full documentation. + + See Also + -------- + numpy.trace : equivalent function + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('transpose', + """ + a.transpose(*axes) + + Returns a view of the array with axes transposed. + + Refer to `numpy.transpose` for full documentation. + + Parameters + ---------- + axes : None, tuple of ints, or `n` ints + + * None or no argument: reverses the order of the axes. + + * tuple of ints: `i` in the `j`-th place in the tuple means that the + array's `i`-th axis becomes the transposed array's `j`-th axis. + + * `n` ints: same as an n-tuple of the same ints (this form is + intended simply as a "convenience" alternative to the tuple form). + + Returns + ------- + p : ndarray + View of the array with its axes suitably permuted. + + See Also + -------- + transpose : Equivalent function. + ndarray.T : Array property returning the array transposed. + ndarray.reshape : Give a new shape to an array without changing its data. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[1, 2], [3, 4]]) + >>> a + array([[1, 2], + [3, 4]]) + >>> a.transpose() + array([[1, 3], + [2, 4]]) + >>> a.transpose((1, 0)) + array([[1, 3], + [2, 4]]) + >>> a.transpose(1, 0) + array([[1, 3], + [2, 4]]) + + >>> a = np.array([1, 2, 3, 4]) + >>> a + array([1, 2, 3, 4]) + >>> a.transpose() + array([1, 2, 3, 4]) + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('var', + """ + a.var(axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, where=True) + + Returns the variance of the array elements, along given axis. + + Refer to `numpy.var` for full documentation. + + See Also + -------- + numpy.var : equivalent function + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('view', + """ + a.view([dtype][, type]) + + New view of array with the same data. + + .. note:: + Passing None for ``dtype`` is different from omitting the parameter, + since the former invokes ``dtype(None)`` which is an alias for + ``dtype('float64')``. + + Parameters + ---------- + dtype : data-type or ndarray sub-class, optional + Data-type descriptor of the returned view, e.g., float32 or int16. + Omitting it results in the view having the same data-type as `a`. + This argument can also be specified as an ndarray sub-class, which + then specifies the type of the returned object (this is equivalent to + setting the ``type`` parameter). + type : Python type, optional + Type of the returned view, e.g., ndarray or matrix. Again, omission + of the parameter results in type preservation. + + Notes + ----- + ``a.view()`` is used two different ways: + + ``a.view(some_dtype)`` or ``a.view(dtype=some_dtype)`` constructs a view + of the array's memory with a different data-type. This can cause a + reinterpretation of the bytes of memory. + + ``a.view(ndarray_subclass)`` or ``a.view(type=ndarray_subclass)`` just + returns an instance of `ndarray_subclass` that looks at the same array + (same shape, dtype, etc.) This does not cause a reinterpretation of the + memory. + + For ``a.view(some_dtype)``, if ``some_dtype`` has a different number of + bytes per entry than the previous dtype (for example, converting a regular + array to a structured array), then the last axis of ``a`` must be + contiguous. This axis will be resized in the result. + + .. versionchanged:: 1.23.0 + Only the last axis needs to be contiguous. Previously, the entire array + had to be C-contiguous. + + Examples + -------- + >>> import numpy as np + >>> x = np.array([(-1, 2)], dtype=[('a', np.int8), ('b', np.int8)]) + + Viewing array data using a different type and dtype: + + >>> nonneg = np.dtype([("a", np.uint8), ("b", np.uint8)]) + >>> y = x.view(dtype=nonneg, type=np.recarray) + >>> x["a"] + array([-1], dtype=int8) + >>> y.a + array([255], dtype=uint8) + + Creating a view on a structured array so it can be used in calculations + + >>> x = np.array([(1, 2),(3,4)], dtype=[('a', np.int8), ('b', np.int8)]) + >>> xv = x.view(dtype=np.int8).reshape(-1,2) + >>> xv + array([[1, 2], + [3, 4]], dtype=int8) + >>> xv.mean(0) + array([2., 3.]) + + Making changes to the view changes the underlying array + + >>> xv[0,1] = 20 + >>> x + array([(1, 20), (3, 4)], dtype=[('a', 'i1'), ('b', 'i1')]) + + Using a view to convert an array to a recarray: + + >>> z = x.view(np.recarray) + >>> z.a + array([1, 3], dtype=int8) + + Views share data: + + >>> x[0] = (9, 10) + >>> z[0] + np.record((9, 10), dtype=[('a', 'i1'), ('b', 'i1')]) + + Views that change the dtype size (bytes per entry) should normally be + avoided on arrays defined by slices, transposes, fortran-ordering, etc.: + + >>> x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int16) + >>> y = x[:, ::2] + >>> y + array([[1, 3], + [4, 6]], dtype=int16) + >>> y.view(dtype=[('width', np.int16), ('length', np.int16)]) + Traceback (most recent call last): + ... + ValueError: To change to a dtype of a different size, the last axis must be contiguous + >>> z = y.copy() + >>> z.view(dtype=[('width', np.int16), ('length', np.int16)]) + array([[(1, 3)], + [(4, 6)]], dtype=[('width', '>> x = np.arange(2 * 3 * 4, dtype=np.int8).reshape(2, 3, 4) + >>> x.transpose(1, 0, 2).view(np.int16) + array([[[ 256, 770], + [3340, 3854]], + + [[1284, 1798], + [4368, 4882]], + + [[2312, 2826], + [5396, 5910]]], dtype=int16) + + """)) + + +############################################################################## +# +# umath functions +# +############################################################################## + +add_newdoc('numpy._core.umath', 'frompyfunc', + """ + frompyfunc(func, /, nin, nout, *[, identity]) + + Takes an arbitrary Python function and returns a NumPy ufunc. + + Can be used, for example, to add broadcasting to a built-in Python + function (see Examples section). + + Parameters + ---------- + func : Python function object + An arbitrary Python function. + nin : int + The number of input arguments. + nout : int + The number of objects returned by `func`. + identity : object, optional + The value to use for the `~numpy.ufunc.identity` attribute of the resulting + object. If specified, this is equivalent to setting the underlying + C ``identity`` field to ``PyUFunc_IdentityValue``. + If omitted, the identity is set to ``PyUFunc_None``. Note that this is + _not_ equivalent to setting the identity to ``None``, which implies the + operation is reorderable. + + Returns + ------- + out : ufunc + Returns a NumPy universal function (``ufunc``) object. + + See Also + -------- + vectorize : Evaluates pyfunc over input arrays using broadcasting rules of numpy. + + Notes + ----- + The returned ufunc always returns PyObject arrays. + + Examples + -------- + Use frompyfunc to add broadcasting to the Python function ``oct``: + + >>> import numpy as np + >>> oct_array = np.frompyfunc(oct, 1, 1) + >>> oct_array(np.array((10, 30, 100))) + array(['0o12', '0o36', '0o144'], dtype=object) + >>> np.array((oct(10), oct(30), oct(100))) # for comparison + array(['0o12', '0o36', '0o144'], dtype='doc is NULL.) + + Parameters + ---------- + ufunc : numpy.ufunc + A ufunc whose current doc is NULL. + new_docstring : string + The new docstring for the ufunc. + + Notes + ----- + This method allocates memory for new_docstring on + the heap. Technically this creates a memory leak, since this + memory will not be reclaimed until the end of the program + even if the ufunc itself is removed. However this will only + be a problem if the user is repeatedly creating ufuncs with + no documentation, adding documentation via add_newdoc_ufunc, + and then throwing away the ufunc. + """) + +add_newdoc('numpy._core.multiarray', 'get_handler_name', + """ + get_handler_name(a: ndarray) -> str,None + + Return the name of the memory handler used by `a`. If not provided, return + the name of the memory handler that will be used to allocate data for the + next `ndarray` in this context. May return None if `a` does not own its + memory, in which case you can traverse ``a.base`` for a memory handler. + """) + +add_newdoc('numpy._core.multiarray', 'get_handler_version', + """ + get_handler_version(a: ndarray) -> int,None + + Return the version of the memory handler used by `a`. If not provided, + return the version of the memory handler that will be used to allocate data + for the next `ndarray` in this context. May return None if `a` does not own + its memory, in which case you can traverse ``a.base`` for a memory handler. + """) + +add_newdoc('numpy._core._multiarray_umath', '_array_converter', + """ + _array_converter(*array_likes) + + Helper to convert one or more objects to arrays. Integrates machinery + to deal with the ``result_type`` and ``__array_wrap__``. + + The reason for this is that e.g. ``result_type`` needs to convert to arrays + to find the ``dtype``. But converting to an array before calling + ``result_type`` would incorrectly "forget" whether it was a Python int, + float, or complex. + """) + +add_newdoc( + 'numpy._core._multiarray_umath', '_array_converter', ('scalar_input', + """ + A tuple which indicates for each input whether it was a scalar that + was coerced to a 0-D array (and was not already an array or something + converted via a protocol like ``__array__()``). + """)) + +add_newdoc('numpy._core._multiarray_umath', '_array_converter', ('as_arrays', + """ + as_arrays(/, subok=True, pyscalars="convert_if_no_array") + + Return the inputs as arrays or scalars. + + Parameters + ---------- + subok : True or False, optional + Whether array subclasses are preserved. + pyscalars : {"convert", "preserve", "convert_if_no_array"}, optional + To allow NEP 50 weak promotion later, it may be desirable to preserve + Python scalars. As default, these are preserved unless all inputs + are Python scalars. "convert" enforces an array return. + """)) + +add_newdoc('numpy._core._multiarray_umath', '_array_converter', ('result_type', + """result_type(/, extra_dtype=None, ensure_inexact=False) + + Find the ``result_type`` just as ``np.result_type`` would, but taking + into account that the original inputs (before converting to an array) may + have been Python scalars with weak promotion. + + Parameters + ---------- + extra_dtype : dtype instance or class + An additional DType or dtype instance to promote (e.g. could be used + to ensure the result precision is at least float32). + ensure_inexact : True or False + When ``True``, ensures a floating point (or complex) result replacing + the ``arr * 1.`` or ``result_type(..., 0.0)`` pattern. + """)) + +add_newdoc('numpy._core._multiarray_umath', '_array_converter', ('wrap', + """ + wrap(arr, /, to_scalar=None) + + Call ``__array_wrap__`` on ``arr`` if ``arr`` is not the same subclass + as the input the ``__array_wrap__`` method was retrieved from. + + Parameters + ---------- + arr : ndarray + The object to be wrapped. Normally an ndarray or subclass, + although for backward compatibility NumPy scalars are also accepted + (these will be converted to a NumPy array before being passed on to + the ``__array_wrap__`` method). + to_scalar : {True, False, None}, optional + When ``True`` will convert a 0-d array to a scalar via ``result[()]`` + (with a fast-path for non-subclasses). If ``False`` the result should + be an array-like (as ``__array_wrap__`` is free to return a non-array). + By default (``None``), a scalar is returned if all inputs were scalar. + """)) + + +add_newdoc('numpy._core.multiarray', '_get_madvise_hugepage', + """ + _get_madvise_hugepage() -> bool + + Get use of ``madvise (2)`` MADV_HUGEPAGE support when + allocating the array data. Returns the currently set value. + See `global_state` for more information. + """) + +add_newdoc('numpy._core.multiarray', '_set_madvise_hugepage', + """ + _set_madvise_hugepage(enabled: bool) -> bool + + Set or unset use of ``madvise (2)`` MADV_HUGEPAGE support when + allocating the array data. Returns the previously set value. + See `global_state` for more information. + """) + + +############################################################################## +# +# Documentation for ufunc attributes and methods +# +############################################################################## + + +############################################################################## +# +# ufunc object +# +############################################################################## + +add_newdoc('numpy._core', 'ufunc', + """ + Functions that operate element by element on whole arrays. + + To see the documentation for a specific ufunc, use `info`. For + example, ``np.info(np.sin)``. Because ufuncs are written in C + (for speed) and linked into Python with NumPy's ufunc facility, + Python's help() function finds this page whenever help() is called + on a ufunc. + + A detailed explanation of ufuncs can be found in the docs for :ref:`ufuncs`. + + **Calling ufuncs:** ``op(*x[, out], where=True, **kwargs)`` + + Apply `op` to the arguments `*x` elementwise, broadcasting the arguments. + + The broadcasting rules are: + + * Dimensions of length 1 may be prepended to either array. + * Arrays may be repeated along dimensions of length 1. + + Parameters + ---------- + *x : array_like + Input arrays. + out : ndarray, None, ..., or tuple of ndarray and None, optional + Location(s) into which the result(s) are stored. + If not provided or None, new array(s) are created by the ufunc. + If passed as a keyword argument, can be Ellipses (``out=...``) to + ensure an array is returned even if the result is 0-dimensional, + or a tuple with length equal to the number of outputs (where None + can be used for allocation by the ufunc). + + .. versionadded:: 2.3 + Support for ``out=...`` was added. + + where : array_like, optional + This condition is broadcast over the input. At locations where the + condition is True, the `out` array will be set to the ufunc result. + Elsewhere, the `out` array will retain its original value. + Note that if an uninitialized `out` array is created via the default + ``out=None``, locations within it where the condition is False will + remain uninitialized. + **kwargs + For other keyword-only arguments, see the :ref:`ufunc docs `. + + Returns + ------- + r : ndarray or tuple of ndarray + `r` will have the shape that the arrays in `x` broadcast to; if `out` is + provided, it will be returned. If not, `r` will be allocated and + may contain uninitialized values. If the function has more than one + output, then the result will be a tuple of arrays. + + """) + + +############################################################################## +# +# ufunc attributes +# +############################################################################## + +add_newdoc('numpy._core', 'ufunc', ('identity', + """ + The identity value. + + Data attribute containing the identity element for the ufunc, + if it has one. If it does not, the attribute value is None. + + Examples + -------- + >>> import numpy as np + >>> np.add.identity + 0 + >>> np.multiply.identity + 1 + >>> print(np.power.identity) + None + >>> print(np.exp.identity) + None + """)) + +add_newdoc('numpy._core', 'ufunc', ('nargs', + """ + The number of arguments. + + Data attribute containing the number of arguments the ufunc takes, including + optional ones. + + Notes + ----- + Typically this value will be one more than what you might expect + because all ufuncs take the optional "out" argument. + + Examples + -------- + >>> import numpy as np + >>> np.add.nargs + 3 + >>> np.multiply.nargs + 3 + >>> np.power.nargs + 3 + >>> np.exp.nargs + 2 + """)) + +add_newdoc('numpy._core', 'ufunc', ('nin', + """ + The number of inputs. + + Data attribute containing the number of arguments the ufunc treats as input. + + Examples + -------- + >>> import numpy as np + >>> np.add.nin + 2 + >>> np.multiply.nin + 2 + >>> np.power.nin + 2 + >>> np.exp.nin + 1 + """)) + +add_newdoc('numpy._core', 'ufunc', ('nout', + """ + The number of outputs. + + Data attribute containing the number of arguments the ufunc treats as output. + + Notes + ----- + Since all ufuncs can take output arguments, this will always be at least 1. + + Examples + -------- + >>> import numpy as np + >>> np.add.nout + 1 + >>> np.multiply.nout + 1 + >>> np.power.nout + 1 + >>> np.exp.nout + 1 + + """)) + +add_newdoc('numpy._core', 'ufunc', ('ntypes', + """ + The number of types. + + The number of numerical NumPy types - of which there are 18 total - on which + the ufunc can operate. + + See Also + -------- + numpy.ufunc.types + + Examples + -------- + >>> import numpy as np + >>> np.add.ntypes + 22 + >>> np.multiply.ntypes + 23 + >>> np.power.ntypes + 21 + >>> np.exp.ntypes + 10 + >>> np.remainder.ntypes + 16 + + """)) + +add_newdoc('numpy._core', 'ufunc', ('types', + """ + Returns a list with types grouped input->output. + + Data attribute listing the data-type "Domain-Range" groupings the ufunc can + deliver. The data-types are given using the character codes. + + See Also + -------- + numpy.ufunc.ntypes + + Examples + -------- + >>> import numpy as np + >>> np.add.types + ['??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', ... + + >>> np.power.types + ['bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', ... + + >>> np.exp.types + ['e->e', 'f->f', 'd->d', 'f->f', 'd->d', 'g->g', 'F->F', 'D->D', 'G->G', 'O->O'] + + >>> np.remainder.types + ['bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', ... + + """)) + +add_newdoc('numpy._core', 'ufunc', ('signature', + """ + Definition of the core elements a generalized ufunc operates on. + + The signature determines how the dimensions of each input/output array + are split into core and loop dimensions: + + 1. Each dimension in the signature is matched to a dimension of the + corresponding passed-in array, starting from the end of the shape tuple. + 2. Core dimensions assigned to the same label in the signature must have + exactly matching sizes, no broadcasting is performed. + 3. The core dimensions are removed from all inputs and the remaining + dimensions are broadcast together, defining the loop dimensions. + + Notes + ----- + Generalized ufuncs are used internally in many linalg functions, and in + the testing suite; the examples below are taken from these. + For ufuncs that operate on scalars, the signature is None, which is + equivalent to '()' for every argument. + + Examples + -------- + >>> import numpy as np + >>> np.linalg._umath_linalg.det.signature + '(m,m)->()' + >>> np.matmul.signature + '(n?,k),(k,m?)->(n?,m?)' + >>> np.add.signature is None + True # equivalent to '(),()->()' + """)) + +############################################################################## +# +# ufunc methods +# +############################################################################## + +add_newdoc('numpy._core', 'ufunc', ('reduce', + """ + reduce(array, axis=0, dtype=None, out=None, keepdims=False, initial=, where=True) + + Reduces `array`'s dimension by one, by applying ufunc along one axis. + + Let :math:`array.shape = (N_0, ..., N_i, ..., N_{M-1})`. Then + :math:`ufunc.reduce(array, axis=i)[k_0, ..,k_{i-1}, k_{i+1}, .., k_{M-1}]` = + the result of iterating `j` over :math:`range(N_i)`, cumulatively applying + ufunc to each :math:`array[k_0, ..,k_{i-1}, j, k_{i+1}, .., k_{M-1}]`. + For a one-dimensional array, reduce produces results equivalent to: + :: + + r = op.identity # op = ufunc + for i in range(len(A)): + r = op(r, A[i]) + return r + + For example, add.reduce() is equivalent to sum(). + + Parameters + ---------- + array : array_like + The array to act on. + axis : None or int or tuple of ints, optional + Axis or axes along which a reduction is performed. + The default (`axis` = 0) is perform a reduction over the first + dimension of the input array. `axis` may be negative, in + which case it counts from the last to the first axis. + + If this is None, a reduction is performed over all the axes. + If this is a tuple of ints, a reduction is performed on multiple + axes, instead of a single axis or all the axes as before. + + For operations which are either not commutative or not associative, + doing a reduction over multiple axes is not well-defined. The + ufuncs do not currently raise an exception in this case, but will + likely do so in the future. + dtype : data-type code, optional + The data type used to perform the operation. Defaults to that of + ``out`` if given, and the data type of ``array`` otherwise (though + upcast to conserve precision for some cases, such as + ``numpy.add.reduce`` for integer or boolean input). + out : ndarray, None, ..., or tuple of ndarray and None, optional + Location into which the result is stored. + If not provided or None, a freshly-allocated array is returned. + If passed as a keyword argument, can be Ellipses (``out=...``) to + ensure an array is returned even if the result is 0-dimensional + (which is useful especially for object dtype), or a 1-element tuple + (latter for consistency with ``ufunc.__call__``). + + .. versionadded:: 2.3 + Support for ``out=...`` was added. + + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `array`. + initial : scalar, optional + The value with which to start the reduction. + If the ufunc has no identity or the dtype is object, this defaults + to None - otherwise it defaults to ufunc.identity. + If ``None`` is given, the first element of the reduction is used, + and an error is thrown if the reduction is empty. + where : array_like of bool, optional + A boolean array which is broadcasted to match the dimensions + of `array`, and selects elements to include in the reduction. Note + that for ufuncs like ``minimum`` that do not have an identity + defined, one has to pass in also ``initial``. + + Returns + ------- + r : ndarray + The reduced array. If `out` was supplied, `r` is a reference to it. + + Examples + -------- + >>> import numpy as np + >>> np.multiply.reduce([2,3,5]) + 30 + + A multi-dimensional array example: + + >>> X = np.arange(8).reshape((2,2,2)) + >>> X + array([[[0, 1], + [2, 3]], + [[4, 5], + [6, 7]]]) + >>> np.add.reduce(X, 0) + array([[ 4, 6], + [ 8, 10]]) + >>> np.add.reduce(X) # confirm: default axis value is 0 + array([[ 4, 6], + [ 8, 10]]) + >>> np.add.reduce(X, 1) + array([[ 2, 4], + [10, 12]]) + >>> np.add.reduce(X, 2) + array([[ 1, 5], + [ 9, 13]]) + + You can use the ``initial`` keyword argument to initialize the reduction + with a different value, and ``where`` to select specific elements to include: + + >>> np.add.reduce([10], initial=5) + 15 + >>> np.add.reduce(np.ones((2, 2, 2)), axis=(0, 2), initial=10) + array([14., 14.]) + >>> a = np.array([10., np.nan, 10]) + >>> np.add.reduce(a, where=~np.isnan(a)) + 20.0 + + Allows reductions of empty arrays where they would normally fail, i.e. + for ufuncs without an identity. + + >>> np.minimum.reduce([], initial=np.inf) + inf + >>> np.minimum.reduce([[1., 2.], [3., 4.]], initial=10., where=[True, False]) + array([ 1., 10.]) + >>> np.minimum.reduce([]) + Traceback (most recent call last): + ... + ValueError: zero-size array to reduction operation minimum which has no identity + """)) + +add_newdoc('numpy._core', 'ufunc', ('accumulate', + """ + accumulate(array, axis=0, dtype=None, out=None) + + Accumulate the result of applying the operator to all elements. + + For a one-dimensional array, accumulate produces results equivalent to:: + + r = np.empty(len(A)) + t = op.identity # op = the ufunc being applied to A's elements + for i in range(len(A)): + t = op(t, A[i]) + r[i] = t + return r + + For example, add.accumulate() is equivalent to np.cumsum(). + + For a multi-dimensional array, accumulate is applied along only one + axis (axis zero by default; see Examples below) so repeated use is + necessary if one wants to accumulate over multiple axes. + + Parameters + ---------- + array : array_like + The array to act on. + axis : int, optional + The axis along which to apply the accumulation; default is zero. + dtype : data-type code, optional + The data-type used to represent the intermediate results. Defaults + to the data-type of the output array if such is provided, or the + data-type of the input array if no output array is provided. + out : ndarray, None, or tuple of ndarray and None, optional + Location into which the result is stored. + If not provided or None, a freshly-allocated array is returned. + For consistency with ``ufunc.__call__``, if passed as a keyword + argument, can be Ellipses (``out=...``, which has the same effect + as None as an array is always returned), or a 1-element tuple. + + Returns + ------- + r : ndarray + The accumulated values. If `out` was supplied, `r` is a reference to + `out`. + + Examples + -------- + 1-D array examples: + + >>> import numpy as np + >>> np.add.accumulate([2, 3, 5]) + array([ 2, 5, 10]) + >>> np.multiply.accumulate([2, 3, 5]) + array([ 2, 6, 30]) + + 2-D array examples: + + >>> I = np.eye(2) + >>> I + array([[1., 0.], + [0., 1.]]) + + Accumulate along axis 0 (rows), down columns: + + >>> np.add.accumulate(I, 0) + array([[1., 0.], + [1., 1.]]) + >>> np.add.accumulate(I) # no axis specified = axis zero + array([[1., 0.], + [1., 1.]]) + + Accumulate along axis 1 (columns), through rows: + + >>> np.add.accumulate(I, 1) + array([[1., 1.], + [0., 1.]]) + + """)) + +add_newdoc('numpy._core', 'ufunc', ('reduceat', + """ + reduceat(array, indices, axis=0, dtype=None, out=None) + + Performs a (local) reduce with specified slices over a single axis. + + For i in ``range(len(indices))``, `reduceat` computes + ``ufunc.reduce(array[indices[i]:indices[i+1]])``, which becomes the i-th + generalized "row" parallel to `axis` in the final result (i.e., in a + 2-D array, for example, if `axis = 0`, it becomes the i-th row, but if + `axis = 1`, it becomes the i-th column). There are three exceptions to this: + + * when ``i = len(indices) - 1`` (so for the last index), + ``indices[i+1] = array.shape[axis]``. + * if ``indices[i] >= indices[i + 1]``, the i-th generalized "row" is + simply ``array[indices[i]]``. + * if ``indices[i] >= len(array)`` or ``indices[i] < 0``, an error is raised. + + The shape of the output depends on the size of `indices`, and may be + larger than `array` (this happens if ``len(indices) > array.shape[axis]``). + + Parameters + ---------- + array : array_like + The array to act on. + indices : array_like + Paired indices, comma separated (not colon), specifying slices to + reduce. + axis : int, optional + The axis along which to apply the reduceat. + dtype : data-type code, optional + The data type used to perform the operation. Defaults to that of + ``out`` if given, and the data type of ``array`` otherwise (though + upcast to conserve precision for some cases, such as + ``numpy.add.reduce`` for integer or boolean input). + out : ndarray, None, or tuple of ndarray and None, optional + Location into which the result is stored. + If not provided or None, a freshly-allocated array is returned. + For consistency with ``ufunc.__call__``, if passed as a keyword + argument, can be Ellipses (``out=...``, which has the same effect + as None as an array is always returned), or a 1-element tuple. + + Returns + ------- + r : ndarray + The reduced values. If `out` was supplied, `r` is a reference to + `out`. + + Notes + ----- + A descriptive example: + + If `array` is 1-D, the function `ufunc.accumulate(array)` is the same as + ``ufunc.reduceat(array, indices)[::2]`` where `indices` is + ``range(len(array) - 1)`` with a zero placed + in every other element: + ``indices = zeros(2 * len(array) - 1)``, + ``indices[1::2] = range(1, len(array))``. + + Don't be fooled by this attribute's name: `reduceat(array)` is not + necessarily smaller than `array`. + + Examples + -------- + To take the running sum of four successive values: + + >>> import numpy as np + >>> np.add.reduceat(np.arange(8),[0,4, 1,5, 2,6, 3,7])[::2] + array([ 6, 10, 14, 18]) + + A 2-D example: + + >>> x = np.linspace(0, 15, 16).reshape(4,4) + >>> x + array([[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.], + [12., 13., 14., 15.]]) + + :: + + # reduce such that the result has the following five rows: + # [row1 + row2 + row3] + # [row4] + # [row2] + # [row3] + # [row1 + row2 + row3 + row4] + + >>> np.add.reduceat(x, [0, 3, 1, 2, 0]) + array([[12., 15., 18., 21.], + [12., 13., 14., 15.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.], + [24., 28., 32., 36.]]) + + :: + + # reduce such that result has the following two columns: + # [col1 * col2 * col3, col4] + + >>> np.multiply.reduceat(x, [0, 3], 1) + array([[ 0., 3.], + [ 120., 7.], + [ 720., 11.], + [2184., 15.]]) + + """)) + +add_newdoc('numpy._core', 'ufunc', ('outer', + r""" + outer(A, B, /, **kwargs) + + Apply the ufunc `op` to all pairs (a, b) with a in `A` and b in `B`. + + Let ``M = A.ndim``, ``N = B.ndim``. Then the result, `C`, of + ``op.outer(A, B)`` is an array of dimension M + N such that: + + .. math:: C[i_0, ..., i_{M-1}, j_0, ..., j_{N-1}] = + op(A[i_0, ..., i_{M-1}], B[j_0, ..., j_{N-1}]) + + For `A` and `B` one-dimensional, this is equivalent to:: + + r = empty(len(A),len(B)) + for i in range(len(A)): + for j in range(len(B)): + r[i,j] = op(A[i], B[j]) # op = ufunc in question + + Parameters + ---------- + A : array_like + First array + B : array_like + Second array + kwargs : any + Arguments to pass on to the ufunc. Typically `dtype` or `out`. + See `ufunc` for a comprehensive overview of all available arguments. + + Returns + ------- + r : ndarray + Output array + + See Also + -------- + numpy.outer : A less powerful version of ``np.multiply.outer`` + that `ravel`\ s all inputs to 1D. This exists + primarily for compatibility with old code. + + tensordot : ``np.tensordot(a, b, axes=((), ()))`` and + ``np.multiply.outer(a, b)`` behave same for all + dimensions of a and b. + + Examples + -------- + >>> np.multiply.outer([1, 2, 3], [4, 5, 6]) + array([[ 4, 5, 6], + [ 8, 10, 12], + [12, 15, 18]]) + + A multi-dimensional example: + + >>> A = np.array([[1, 2, 3], [4, 5, 6]]) + >>> A.shape + (2, 3) + >>> B = np.array([[1, 2, 3, 4]]) + >>> B.shape + (1, 4) + >>> C = np.multiply.outer(A, B) + >>> C.shape; C + (2, 3, 1, 4) + array([[[[ 1, 2, 3, 4]], + [[ 2, 4, 6, 8]], + [[ 3, 6, 9, 12]]], + [[[ 4, 8, 12, 16]], + [[ 5, 10, 15, 20]], + [[ 6, 12, 18, 24]]]]) + + """)) + +add_newdoc('numpy._core', 'ufunc', ('at', + """ + at(a, indices, b=None, /) + + Performs unbuffered in place operation on operand 'a' for elements + specified by 'indices'. For addition ufunc, this method is equivalent to + ``a[indices] += b``, except that results are accumulated for elements that + are indexed more than once. For example, ``a[[0,0]] += 1`` will only + increment the first element once because of buffering, whereas + ``add.at(a, [0,0], 1)`` will increment the first element twice. + + Parameters + ---------- + a : array_like + The array to perform in place operation on. + indices : array_like or tuple + Array like index object or slice object for indexing into first + operand. If first operand has multiple dimensions, indices can be a + tuple of array like index objects or slice objects. + b : array_like + Second operand for ufuncs requiring two operands. Operand must be + broadcastable over first operand after indexing or slicing. + + Examples + -------- + Set items 0 and 1 to their negative values: + + >>> import numpy as np + >>> a = np.array([1, 2, 3, 4]) + >>> np.negative.at(a, [0, 1]) + >>> a + array([-1, -2, 3, 4]) + + Increment items 0 and 1, and increment item 2 twice: + + >>> a = np.array([1, 2, 3, 4]) + >>> np.add.at(a, [0, 1, 2, 2], 1) + >>> a + array([2, 3, 5, 4]) + + Add items 0 and 1 in first array to second array, + and store results in first array: + + >>> a = np.array([1, 2, 3, 4]) + >>> b = np.array([1, 2]) + >>> np.add.at(a, [0, 1], b) + >>> a + array([2, 4, 3, 4]) + + """)) + +add_newdoc('numpy._core', 'ufunc', ('resolve_dtypes', + """ + resolve_dtypes(dtypes, *, signature=None, casting=None, reduction=False) + + Find the dtypes NumPy will use for the operation. Both input and + output dtypes are returned and may differ from those provided. + + .. note:: + + This function always applies NEP 50 rules since it is not provided + any actual values. The Python types ``int``, ``float``, and + ``complex`` thus behave weak and should be passed for "untyped" + Python input. + + Parameters + ---------- + dtypes : tuple of dtypes, None, or literal int, float, complex + The input dtypes for each operand. Output operands can be + None, indicating that the dtype must be found. + signature : tuple of DTypes or None, optional + If given, enforces exact DType (classes) of the specific operand. + The ufunc ``dtype`` argument is equivalent to passing a tuple with + only output dtypes set. + casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional + The casting mode when casting is necessary. This is identical to + the ufunc call casting modes. + reduction : boolean + If given, the resolution assumes a reduce operation is happening + which slightly changes the promotion and type resolution rules. + `dtypes` is usually something like ``(None, np.dtype("i2"), None)`` + for reductions (first input is also the output). + + .. note:: + + The default casting mode is "same_kind", however, as of + NumPy 1.24, NumPy uses "unsafe" for reductions. + + Returns + ------- + dtypes : tuple of dtypes + The dtypes which NumPy would use for the calculation. Note that + dtypes may not match the passed in ones (casting is necessary). + + + Examples + -------- + This API requires passing dtypes, define them for convenience: + + >>> import numpy as np + >>> int32 = np.dtype("int32") + >>> float32 = np.dtype("float32") + + The typical ufunc call does not pass an output dtype. `numpy.add` has two + inputs and one output, so leave the output as ``None`` (not provided): + + >>> np.add.resolve_dtypes((int32, float32, None)) + (dtype('float64'), dtype('float64'), dtype('float64')) + + The loop found uses "float64" for all operands (including the output), the + first input would be cast. + + ``resolve_dtypes`` supports "weak" handling for Python scalars by passing + ``int``, ``float``, or ``complex``: + + >>> np.add.resolve_dtypes((float32, float, None)) + (dtype('float32'), dtype('float32'), dtype('float32')) + + Where the Python ``float`` behaves similar to a Python value ``0.0`` + in a ufunc call. (See :ref:`NEP 50 ` for details.) + + """)) + +add_newdoc('numpy._core', 'ufunc', ('_resolve_dtypes_and_context', + """ + _resolve_dtypes_and_context(dtypes, *, signature=None, casting=None, reduction=False) + + See `numpy.ufunc.resolve_dtypes` for parameter information. This + function is considered *unstable*. You may use it, but the returned + information is NumPy version specific and expected to change. + Large API/ABI changes are not expected, but a new NumPy version is + expected to require updating code using this functionality. + + This function is designed to be used in conjunction with + `numpy.ufunc._get_strided_loop`. The calls are split to mirror the C API + and allow future improvements. + + Returns + ------- + dtypes : tuple of dtypes + call_info : + PyCapsule with all necessary information to get access to low level + C calls. See `numpy.ufunc._get_strided_loop` for more information. + + """)) + +add_newdoc('numpy._core', 'ufunc', ('_get_strided_loop', + """ + _get_strided_loop(call_info, /, *, fixed_strides=None) + + This function fills in the ``call_info`` capsule to include all + information necessary to call the low-level strided loop from NumPy. + + See notes for more information. + + Parameters + ---------- + call_info : PyCapsule + The PyCapsule returned by `numpy.ufunc._resolve_dtypes_and_context`. + fixed_strides : tuple of int or None, optional + A tuple with fixed byte strides of all input arrays. NumPy may use + this information to find specialized loops, so any call must follow + the given stride. Use ``None`` to indicate that the stride is not + known (or not fixed) for all calls. + + Notes + ----- + Together with `numpy.ufunc._resolve_dtypes_and_context` this function + gives low-level access to the NumPy ufunc loops. + The first function does general preparation and returns the required + information. It returns this as a C capsule with the version specific + name ``numpy_1.24_ufunc_call_info``. + The NumPy 1.24 ufunc call info capsule has the following layout:: + + typedef struct { + PyArrayMethod_StridedLoop *strided_loop; + PyArrayMethod_Context *context; + NpyAuxData *auxdata; + + /* Flag information (expected to change) */ + npy_bool requires_pyapi; /* GIL is required by loop */ + + /* Loop doesn't set FPE flags; if not set check FPE flags */ + npy_bool no_floatingpoint_errors; + } ufunc_call_info; + + Note that the first call only fills in the ``context``. The call to + ``_get_strided_loop`` fills in all other data. The main thing to note is + that the new-style loops return 0 on success, -1 on failure. They are + passed context as new first input and ``auxdata`` as (replaced) last. + + Only the ``strided_loop``signature is considered guaranteed stable + for NumPy bug-fix releases. All other API is tied to the experimental + API versioning. + + The reason for the split call is that cast information is required to + decide what the fixed-strides will be. + + NumPy ties the lifetime of the ``auxdata`` information to the capsule. + + """)) + + +############################################################################## +# +# Documentation for dtype attributes and methods +# +############################################################################## + +############################################################################## +# +# dtype object +# +############################################################################## + +add_newdoc('numpy._core.multiarray', 'dtype', + """ + dtype(dtype, align=False, copy=False, [metadata]) + + Create a data type object. + + A numpy array is homogeneous, and contains elements described by a + dtype object. A dtype object can be constructed from different + combinations of fundamental numeric types. + + Parameters + ---------- + dtype + Object to be converted to a data type object. + align : bool, optional + Add padding to the fields to match what a C compiler would output + for a similar C-struct. Can be ``True`` only if `obj` is a dictionary + or a comma-separated string. If a struct dtype is being created, + this also sets a sticky alignment flag ``isalignedstruct``. + copy : bool, optional + Make a new copy of the data-type object. If ``False``, the result + may just be a reference to a built-in data-type object. + metadata : dict, optional + An optional dictionary with dtype metadata. + + See also + -------- + result_type + + Examples + -------- + Using array-scalar type: + + >>> import numpy as np + >>> np.dtype(np.int16) + dtype('int16') + + Structured type, one field name 'f1', containing int16: + + >>> np.dtype([('f1', np.int16)]) + dtype([('f1', '>> np.dtype([('f1', [('f1', np.int16)])]) + dtype([('f1', [('f1', '>> np.dtype([('f1', np.uint64), ('f2', np.int32)]) + dtype([('f1', '>> np.dtype([('a','f8'),('b','S10')]) + dtype([('a', '>> np.dtype("i4, (2,3)f8") + dtype([('f0', '>> np.dtype([('hello',(np.int64,3)),('world',np.void,10)]) + dtype([('hello', '>> np.dtype((np.int16, {'x':(np.int8,0), 'y':(np.int8,1)})) + dtype((numpy.int16, [('x', 'i1'), ('y', 'i1')])) + + Using dictionaries. Two fields named 'gender' and 'age': + + >>> np.dtype({'names':['gender','age'], 'formats':['S1',np.uint8]}) + dtype([('gender', 'S1'), ('age', 'u1')]) + + Offsets in bytes, here 0 and 25: + + >>> np.dtype({'surname':('S25',0),'age':(np.uint8,25)}) + dtype([('surname', 'S25'), ('age', 'u1')]) + + """) + +############################################################################## +# +# dtype attributes +# +############################################################################## + +add_newdoc('numpy._core.multiarray', 'dtype', ('alignment', + """ + The required alignment (bytes) of this data-type according to the compiler. + + More information is available in the C-API section of the manual. + + Examples + -------- + + >>> import numpy as np + >>> x = np.dtype('i4') + >>> x.alignment + 4 + + >>> x = np.dtype(float) + >>> x.alignment + 8 + + """)) + +add_newdoc('numpy._core.multiarray', 'dtype', ('byteorder', + """ + A character indicating the byte-order of this data-type object. + + One of: + + === ============== + '=' native + '<' little-endian + '>' big-endian + '|' not applicable + === ============== + + All built-in data-type objects have byteorder either '=' or '|'. + + Examples + -------- + + >>> import numpy as np + >>> dt = np.dtype('i2') + >>> dt.byteorder + '=' + >>> # endian is not relevant for 8 bit numbers + >>> np.dtype('i1').byteorder + '|' + >>> # or ASCII strings + >>> np.dtype('S2').byteorder + '|' + >>> # Even if specific code is given, and it is native + >>> # '=' is the byteorder + >>> import sys + >>> sys_is_le = sys.byteorder == 'little' + >>> native_code = '<' if sys_is_le else '>' + >>> swapped_code = '>' if sys_is_le else '<' + >>> dt = np.dtype(native_code + 'i2') + >>> dt.byteorder + '=' + >>> # Swapped code shows up as itself + >>> dt = np.dtype(swapped_code + 'i2') + >>> dt.byteorder == swapped_code + True + + """)) + +add_newdoc('numpy._core.multiarray', 'dtype', ('char', + """A unique character code for each of the 21 different built-in types. + + Examples + -------- + + >>> import numpy as np + >>> x = np.dtype(float) + >>> x.char + 'd' + + """)) + +add_newdoc('numpy._core.multiarray', 'dtype', ('descr', + """ + `__array_interface__` description of the data-type. + + The format is that required by the 'descr' key in the + `__array_interface__` attribute. + + Warning: This attribute exists specifically for `__array_interface__`, + and passing it directly to `numpy.dtype` will not accurately reconstruct + some dtypes (e.g., scalar and subarray dtypes). + + Examples + -------- + + >>> import numpy as np + >>> x = np.dtype(float) + >>> x.descr + [('', '>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))]) + >>> dt.descr + [('name', '>> import numpy as np + >>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))]) + >>> print(dt.fields) + {'name': (dtype('|S16'), 0), 'grades': (dtype(('float64',(2,))), 16)} + + """)) + +add_newdoc('numpy._core.multiarray', 'dtype', ('flags', + """ + Bit-flags describing how this data type is to be interpreted. + + Bit-masks are in ``numpy._core.multiarray`` as the constants + `ITEM_HASOBJECT`, `LIST_PICKLE`, `ITEM_IS_POINTER`, `NEEDS_INIT`, + `NEEDS_PYAPI`, `USE_GETITEM`, `USE_SETITEM`. A full explanation + of these flags is in C-API documentation; they are largely useful + for user-defined data-types. + + The following example demonstrates that operations on this particular + dtype requires Python C-API. + + Examples + -------- + + >>> import numpy as np + >>> x = np.dtype([('a', np.int32, 8), ('b', np.float64, 6)]) + >>> x.flags + 16 + >>> np._core.multiarray.NEEDS_PYAPI + 16 + + """)) + +add_newdoc('numpy._core.multiarray', 'dtype', ('hasobject', + """ + Boolean indicating whether this dtype contains any reference-counted + objects in any fields or sub-dtypes. + + Recall that what is actually in the ndarray memory representing + the Python object is the memory address of that object (a pointer). + Special handling may be required, and this attribute is useful for + distinguishing data types that may contain arbitrary Python objects + and data-types that won't. + + """)) + +add_newdoc('numpy._core.multiarray', 'dtype', ('isbuiltin', + """ + Integer indicating how this dtype relates to the built-in dtypes. + + Read-only. + + = ======================================================================== + 0 if this is a structured array type, with fields + 1 if this is a dtype compiled into numpy (such as ints, floats etc) + 2 if the dtype is for a user-defined numpy type + A user-defined type uses the numpy C-API machinery to extend + numpy to handle a new array type. See + :ref:`user.user-defined-data-types` in the NumPy manual. + = ======================================================================== + + Examples + -------- + + >>> import numpy as np + >>> dt = np.dtype('i2') + >>> dt.isbuiltin + 1 + >>> dt = np.dtype('f8') + >>> dt.isbuiltin + 1 + >>> dt = np.dtype([('field1', 'f8')]) + >>> dt.isbuiltin + 0 + + """)) + +add_newdoc('numpy._core.multiarray', 'dtype', ('isnative', + """ + Boolean indicating whether the byte order of this dtype is native + to the platform. + + """)) + +add_newdoc('numpy._core.multiarray', 'dtype', ('isalignedstruct', + """ + Boolean indicating whether the dtype is a struct which maintains + field alignment. This flag is sticky, so when combining multiple + structs together, it is preserved and produces new dtypes which + are also aligned. + + """)) + +add_newdoc('numpy._core.multiarray', 'dtype', ('itemsize', + """ + The element size of this data-type object. + + For 18 of the 21 types this number is fixed by the data-type. + For the flexible data-types, this number can be anything. + + Examples + -------- + + >>> import numpy as np + >>> arr = np.array([[1, 2], [3, 4]]) + >>> arr.dtype + dtype('int64') + >>> arr.itemsize + 8 + + >>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))]) + >>> dt.itemsize + 80 + + """)) + +add_newdoc('numpy._core.multiarray', 'dtype', ('kind', + """ + A character code (one of 'biufcmMOSTUV') identifying the general kind of data. + + = ====================== + b boolean + i signed integer + u unsigned integer + f floating-point + c complex floating-point + m timedelta + M datetime + O object + S (byte-)string + T string (StringDType) + U Unicode + V void + = ====================== + + Examples + -------- + + >>> import numpy as np + >>> dt = np.dtype('i4') + >>> dt.kind + 'i' + >>> dt = np.dtype('f8') + >>> dt.kind + 'f' + >>> dt = np.dtype([('field1', 'f8')]) + >>> dt.kind + 'V' + + """)) + +add_newdoc('numpy._core.multiarray', 'dtype', ('metadata', + """ + Either ``None`` or a readonly dictionary of metadata (mappingproxy). + + The metadata field can be set using any dictionary at data-type + creation. NumPy currently has no uniform approach to propagating + metadata; although some array operations preserve it, there is no + guarantee that others will. + + .. warning:: + + Although used in certain projects, this feature was long undocumented + and is not well supported. Some aspects of metadata propagation + are expected to change in the future. + + Examples + -------- + + >>> import numpy as np + >>> dt = np.dtype(float, metadata={"key": "value"}) + >>> dt.metadata["key"] + 'value' + >>> arr = np.array([1, 2, 3], dtype=dt) + >>> arr.dtype.metadata + mappingproxy({'key': 'value'}) + + Adding arrays with identical datatypes currently preserves the metadata: + + >>> (arr + arr).dtype.metadata + mappingproxy({'key': 'value'}) + + If the arrays have different dtype metadata, the first one wins: + + >>> dt2 = np.dtype(float, metadata={"key2": "value2"}) + >>> arr2 = np.array([3, 2, 1], dtype=dt2) + >>> print((arr + arr2).dtype.metadata) + {'key': 'value'} + """)) + +add_newdoc('numpy._core.multiarray', 'dtype', ('name', + """ + A bit-width name for this data-type. + + Un-sized flexible data-type objects do not have this attribute. + + Examples + -------- + + >>> import numpy as np + >>> x = np.dtype(float) + >>> x.name + 'float64' + >>> x = np.dtype([('a', np.int32, 8), ('b', np.float64, 6)]) + >>> x.name + 'void640' + + """)) + +add_newdoc('numpy._core.multiarray', 'dtype', ('names', + """ + Ordered list of field names, or ``None`` if there are no fields. + + The names are ordered according to increasing byte offset. This can be + used, for example, to walk through all of the named fields in offset order. + + Examples + -------- + >>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))]) + >>> dt.names + ('name', 'grades') + + """)) + +add_newdoc('numpy._core.multiarray', 'dtype', ('num', + """ + A unique number for each of the 21 different built-in types. + + These are roughly ordered from least-to-most precision. + + Examples + -------- + + >>> import numpy as np + >>> dt = np.dtype(str) + >>> dt.num + 19 + + >>> dt = np.dtype(float) + >>> dt.num + 12 + + """)) + +add_newdoc('numpy._core.multiarray', 'dtype', ('shape', + """ + Shape tuple of the sub-array if this data type describes a sub-array, + and ``()`` otherwise. + + Examples + -------- + + >>> import numpy as np + >>> dt = np.dtype(('i4', 4)) + >>> dt.shape + (4,) + + >>> dt = np.dtype(('i4', (2, 3))) + >>> dt.shape + (2, 3) + + """)) + +add_newdoc('numpy._core.multiarray', 'dtype', ('ndim', + """ + Number of dimensions of the sub-array if this data type describes a + sub-array, and ``0`` otherwise. + + Examples + -------- + >>> import numpy as np + >>> x = np.dtype(float) + >>> x.ndim + 0 + + >>> x = np.dtype((float, 8)) + >>> x.ndim + 1 + + >>> x = np.dtype(('i4', (3, 4))) + >>> x.ndim + 2 + + """)) + +add_newdoc('numpy._core.multiarray', 'dtype', ('str', + """The array-protocol typestring of this data-type object.""")) + +add_newdoc('numpy._core.multiarray', 'dtype', ('subdtype', + """ + Tuple ``(item_dtype, shape)`` if this `dtype` describes a sub-array, and + None otherwise. + + The *shape* is the fixed shape of the sub-array described by this + data type, and *item_dtype* the data type of the array. + + If a field whose dtype object has this attribute is retrieved, + then the extra dimensions implied by *shape* are tacked on to + the end of the retrieved array. + + See Also + -------- + dtype.base + + Examples + -------- + >>> import numpy as np + >>> x = np.dtype('8f') + >>> x.subdtype + (dtype('float32'), (8,)) + + >>> x = np.dtype('i2') + >>> x.subdtype + >>> + + """)) + +add_newdoc('numpy._core.multiarray', 'dtype', ('base', + """ + Returns dtype for the base element of the subarrays, + regardless of their dimension or shape. + + See Also + -------- + dtype.subdtype + + Examples + -------- + >>> import numpy as np + >>> x = np.dtype('8f') + >>> x.base + dtype('float32') + + >>> x = np.dtype('i2') + >>> x.base + dtype('int16') + + """)) + +add_newdoc('numpy._core.multiarray', 'dtype', ('type', + """The type object used to instantiate a scalar of this data-type.""")) + +############################################################################## +# +# dtype methods +# +############################################################################## + +add_newdoc('numpy._core.multiarray', 'dtype', ('newbyteorder', + """ + newbyteorder(new_order='S', /) + + Return a new dtype with a different byte order. + + Changes are also made in all fields and sub-arrays of the data type. + + Parameters + ---------- + new_order : string, optional + Byte order to force; a value from the byte order specifications + below. The default value ('S') results in swapping the current + byte order. `new_order` codes can be any of: + + * 'S' - swap dtype from current to opposite endian + * {'<', 'little'} - little endian + * {'>', 'big'} - big endian + * {'=', 'native'} - native order + * {'|', 'I'} - ignore (no change to byte order) + + Returns + ------- + new_dtype : dtype + New dtype object with the given change to the byte order. + + Notes + ----- + Changes are also made in all fields and sub-arrays of the data type. + + Examples + -------- + >>> import sys + >>> sys_is_le = sys.byteorder == 'little' + >>> native_code = '<' if sys_is_le else '>' + >>> swapped_code = '>' if sys_is_le else '<' + >>> import numpy as np + >>> native_dt = np.dtype(native_code+'i2') + >>> swapped_dt = np.dtype(swapped_code+'i2') + >>> native_dt.newbyteorder('S') == swapped_dt + True + >>> native_dt.newbyteorder() == swapped_dt + True + >>> native_dt == swapped_dt.newbyteorder('S') + True + >>> native_dt == swapped_dt.newbyteorder('=') + True + >>> native_dt == swapped_dt.newbyteorder('N') + True + >>> native_dt == native_dt.newbyteorder('|') + True + >>> np.dtype('>> np.dtype('>> np.dtype('>i2') == native_dt.newbyteorder('>') + True + >>> np.dtype('>i2') == native_dt.newbyteorder('B') + True + + """)) + +add_newdoc('numpy._core.multiarray', 'dtype', ('__class_getitem__', + """ + __class_getitem__(item, /) + + Return a parametrized wrapper around the `~numpy.dtype` type. + + .. versionadded:: 1.22 + + Returns + ------- + alias : types.GenericAlias + A parametrized `~numpy.dtype` type. + + Examples + -------- + >>> import numpy as np + + >>> np.dtype[np.int64] + numpy.dtype[numpy.int64] + + See Also + -------- + :pep:`585` : Type hinting generics in standard collections. + + """)) + +add_newdoc('numpy._core.multiarray', 'dtype', ('__ge__', + """ + __ge__(value, /) + + Return ``self >= value``. + + Equivalent to ``np.can_cast(value, self, casting="safe")``. + + See Also + -------- + can_cast : Returns True if cast between data types can occur according to + the casting rule. + + """)) + +add_newdoc('numpy._core.multiarray', 'dtype', ('__le__', + """ + __le__(value, /) + + Return ``self <= value``. + + Equivalent to ``np.can_cast(self, value, casting="safe")``. + + See Also + -------- + can_cast : Returns True if cast between data types can occur according to + the casting rule. + + """)) + +add_newdoc('numpy._core.multiarray', 'dtype', ('__gt__', + """ + __ge__(value, /) + + Return ``self > value``. + + Equivalent to + ``self != value and np.can_cast(value, self, casting="safe")``. + + See Also + -------- + can_cast : Returns True if cast between data types can occur according to + the casting rule. + + """)) + +add_newdoc('numpy._core.multiarray', 'dtype', ('__lt__', + """ + __lt__(value, /) + + Return ``self < value``. + + Equivalent to + ``self != value and np.can_cast(self, value, casting="safe")``. + + See Also + -------- + can_cast : Returns True if cast between data types can occur according to + the casting rule. + + """)) + +############################################################################## +# +# Datetime-related Methods +# +############################################################################## + +add_newdoc('numpy._core.multiarray', 'busdaycalendar', + """ + busdaycalendar(weekmask='1111100', holidays=None) + + A business day calendar object that efficiently stores information + defining valid days for the busday family of functions. + + The default valid days are Monday through Friday ("business days"). + A busdaycalendar object can be specified with any set of weekly + valid days, plus an optional "holiday" dates that always will be invalid. + + Once a busdaycalendar object is created, the weekmask and holidays + cannot be modified. + + Parameters + ---------- + weekmask : str or array_like of bool, optional + A seven-element array indicating which of Monday through Sunday are + valid days. May be specified as a length-seven list or array, like + [1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string + like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for + weekdays, optionally separated by white space. Valid abbreviations + are: Mon Tue Wed Thu Fri Sat Sun + holidays : array_like of datetime64[D], optional + An array of dates to consider as invalid dates, no matter which + weekday they fall upon. Holiday dates may be specified in any + order, and NaT (not-a-time) dates are ignored. This list is + saved in a normalized form that is suited for fast calculations + of valid days. + + Returns + ------- + out : busdaycalendar + A business day calendar object containing the specified + weekmask and holidays values. + + See Also + -------- + is_busday : Returns a boolean array indicating valid days. + busday_offset : Applies an offset counted in valid days. + busday_count : Counts how many valid days are in a half-open date range. + + Attributes + ---------- + weekmask : (copy) seven-element array of bool + holidays : (copy) sorted array of datetime64[D] + + Notes + ----- + Once a busdaycalendar object is created, you cannot modify the + weekmask or holidays. The attributes return copies of internal data. + + Examples + -------- + >>> import numpy as np + >>> # Some important days in July + ... bdd = np.busdaycalendar( + ... holidays=['2011-07-01', '2011-07-04', '2011-07-17']) + >>> # Default is Monday to Friday weekdays + ... bdd.weekmask + array([ True, True, True, True, True, False, False]) + >>> # Any holidays already on the weekend are removed + ... bdd.holidays + array(['2011-07-01', '2011-07-04'], dtype='datetime64[D]') + """) + +add_newdoc('numpy._core.multiarray', 'busdaycalendar', ('weekmask', + """A copy of the seven-element boolean mask indicating valid days.""")) + +add_newdoc('numpy._core.multiarray', 'busdaycalendar', ('holidays', + """A copy of the holiday array indicating additional invalid days.""")) + +add_newdoc('numpy._core.multiarray', 'normalize_axis_index', + """ + normalize_axis_index(axis, ndim, msg_prefix=None) + + Normalizes an axis index, `axis`, such that is a valid positive index into + the shape of array with `ndim` dimensions. Raises an AxisError with an + appropriate message if this is not possible. + + Used internally by all axis-checking logic. + + Parameters + ---------- + axis : int + The un-normalized index of the axis. Can be negative + ndim : int + The number of dimensions of the array that `axis` should be normalized + against + msg_prefix : str + A prefix to put before the message, typically the name of the argument + + Returns + ------- + normalized_axis : int + The normalized axis index, such that `0 <= normalized_axis < ndim` + + Raises + ------ + AxisError + If the axis index is invalid, when `-ndim <= axis < ndim` is false. + + Examples + -------- + >>> import numpy as np + >>> from numpy.lib.array_utils import normalize_axis_index + >>> normalize_axis_index(0, ndim=3) + 0 + >>> normalize_axis_index(1, ndim=3) + 1 + >>> normalize_axis_index(-1, ndim=3) + 2 + + >>> normalize_axis_index(3, ndim=3) + Traceback (most recent call last): + ... + numpy.exceptions.AxisError: axis 3 is out of bounds for array ... + >>> normalize_axis_index(-4, ndim=3, msg_prefix='axes_arg') + Traceback (most recent call last): + ... + numpy.exceptions.AxisError: axes_arg: axis -4 is out of bounds ... + """) + +add_newdoc('numpy._core.multiarray', 'datetime_data', + """ + datetime_data(dtype, /) + + Get information about the step size of a date or time type. + + The returned tuple can be passed as the second argument of `numpy.datetime64` and + `numpy.timedelta64`. + + Parameters + ---------- + dtype : dtype + The dtype object, which must be a `datetime64` or `timedelta64` type. + + Returns + ------- + unit : str + The :ref:`datetime unit ` on which this dtype + is based. + count : int + The number of base units in a step. + + Examples + -------- + >>> import numpy as np + >>> dt_25s = np.dtype('timedelta64[25s]') + >>> np.datetime_data(dt_25s) + ('s', 25) + >>> np.array(10, dt_25s).astype('timedelta64[s]') + array(250, dtype='timedelta64[s]') + + The result can be used to construct a datetime that uses the same units + as a timedelta + + >>> np.datetime64('2010', np.datetime_data(dt_25s)) + np.datetime64('2010-01-01T00:00:00','25s') + """) + + +############################################################################## +# +# Documentation for `generic` attributes and methods +# +############################################################################## + +add_newdoc('numpy._core.numerictypes', 'generic', + """ + Base class for numpy scalar types. + + Class from which most (all?) numpy scalar types are derived. For + consistency, exposes the same API as `ndarray`, despite many + consequent attributes being either "get-only," or completely irrelevant. + This is the class from which it is strongly suggested users should derive + custom scalar types. + + """) + +# Attributes + +def refer_to_array_attribute(attr, method=True): + docstring = """ + Scalar {} identical to the corresponding array attribute. + + Please see `ndarray.{}`. + """ + + return attr, docstring.format("method" if method else "attribute", attr) + + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('T', method=False)) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('base', method=False)) + +add_newdoc('numpy._core.numerictypes', 'generic', ('data', + """Pointer to start of data.""")) + +add_newdoc('numpy._core.numerictypes', 'generic', ('dtype', + """Get array data-descriptor.""")) + +add_newdoc('numpy._core.numerictypes', 'generic', ('flags', + """The integer value of flags.""")) + +add_newdoc('numpy._core.numerictypes', 'generic', ('flat', + """A 1-D view of the scalar.""")) + +add_newdoc('numpy._core.numerictypes', 'generic', ('imag', + """The imaginary part of the scalar.""")) + +add_newdoc('numpy._core.numerictypes', 'generic', ('itemsize', + """The length of one element in bytes.""")) + +add_newdoc('numpy._core.numerictypes', 'generic', ('ndim', + """The number of array dimensions.""")) + +add_newdoc('numpy._core.numerictypes', 'generic', ('real', + """The real part of the scalar.""")) + +add_newdoc('numpy._core.numerictypes', 'generic', ('shape', + """Tuple of array dimensions.""")) + +add_newdoc('numpy._core.numerictypes', 'generic', ('size', + """The number of elements in the gentype.""")) + +add_newdoc('numpy._core.numerictypes', 'generic', ('strides', + """Tuple of bytes steps in each dimension.""")) + +# Methods + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('all')) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('any')) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('argmax')) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('argmin')) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('argsort')) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('astype')) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('byteswap')) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('choose')) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('clip')) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('compress')) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('conjugate')) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('copy')) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('cumprod')) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('cumsum')) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('diagonal')) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('dump')) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('dumps')) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('fill')) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('flatten')) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('getfield')) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('item')) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('max')) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('mean')) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('min')) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('nonzero')) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('prod')) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('put')) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('ravel')) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('repeat')) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('reshape')) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('resize')) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('round')) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('searchsorted')) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('setfield')) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('setflags')) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('sort')) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('squeeze')) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('std')) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('sum')) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('swapaxes')) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('take')) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('tofile')) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('tolist')) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('tostring')) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('trace')) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('transpose')) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('var')) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('view')) + +add_newdoc('numpy._core.numerictypes', 'number', ('__class_getitem__', + """ + __class_getitem__(item, /) + + Return a parametrized wrapper around the `~numpy.number` type. + + .. versionadded:: 1.22 + + Returns + ------- + alias : types.GenericAlias + A parametrized `~numpy.number` type. + + Examples + -------- + >>> from typing import Any + >>> import numpy as np + + >>> np.signedinteger[Any] + numpy.signedinteger[typing.Any] + + See Also + -------- + :pep:`585` : Type hinting generics in standard collections. + + """)) + +############################################################################## +# +# Documentation for scalar type abstract base classes in type hierarchy +# +############################################################################## + + +add_newdoc('numpy._core.numerictypes', 'number', + """ + Abstract base class of all numeric scalar types. + + """) + +add_newdoc('numpy._core.numerictypes', 'integer', + """ + Abstract base class of all integer scalar types. + + """) + +add_newdoc('numpy._core.numerictypes', 'signedinteger', + """ + Abstract base class of all signed integer scalar types. + + """) + +add_newdoc('numpy._core.numerictypes', 'unsignedinteger', + """ + Abstract base class of all unsigned integer scalar types. + + """) + +add_newdoc('numpy._core.numerictypes', 'inexact', + """ + Abstract base class of all numeric scalar types with a (potentially) + inexact representation of the values in its range, such as + floating-point numbers. + + """) + +add_newdoc('numpy._core.numerictypes', 'floating', + """ + Abstract base class of all floating-point scalar types. + + """) + +add_newdoc('numpy._core.numerictypes', 'complexfloating', + """ + Abstract base class of all complex number scalar types that are made up of + floating-point numbers. + + """) + +add_newdoc('numpy._core.numerictypes', 'flexible', + """ + Abstract base class of all scalar types without predefined length. + The actual size of these types depends on the specific `numpy.dtype` + instantiation. + + """) + +add_newdoc('numpy._core.numerictypes', 'character', + """ + Abstract base class of all character string scalar types. + + """) + +add_newdoc('numpy._core.multiarray', 'StringDType', + """ + StringDType(*, na_object=np._NoValue, coerce=True) + + Create a StringDType instance. + + StringDType can be used to store UTF-8 encoded variable-width strings in + a NumPy array. + + Parameters + ---------- + na_object : object, optional + Object used to represent missing data. If unset, the array will not + use a missing data sentinel. + coerce : bool, optional + Whether or not items in an array-like passed to an array creation + function that are neither a str or str subtype should be coerced to + str. Defaults to True. If set to False, creating a StringDType + array from an array-like containing entries that are not already + strings will raise an error. + + Examples + -------- + + >>> import numpy as np + + >>> from numpy.dtypes import StringDType + >>> np.array(["hello", "world"], dtype=StringDType()) + array(["hello", "world"], dtype=StringDType()) + + >>> arr = np.array(["hello", None, "world"], + ... dtype=StringDType(na_object=None)) + >>> arr + array(["hello", None, "world"], dtype=StringDType(na_object=None)) + >>> arr[1] is None + True + + >>> arr = np.array(["hello", np.nan, "world"], + ... dtype=StringDType(na_object=np.nan)) + >>> np.isnan(arr) + array([False, True, False]) + + >>> np.array([1.2, object(), "hello world"], + ... dtype=StringDType(coerce=False)) + Traceback (most recent call last): + ... + ValueError: StringDType only allows string data when string coercion is disabled. + + >>> np.array(["hello", "world"], dtype=StringDType(coerce=True)) + array(["hello", "world"], dtype=StringDType(coerce=True)) + """) diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/_add_newdocs.pyi b/.venv/lib/python3.12/site-packages/numpy/_core/_add_newdocs.pyi new file mode 100644 index 0000000..b23c3b1 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/_add_newdocs.pyi @@ -0,0 +1,3 @@ +from .overrides import get_array_function_like_doc as get_array_function_like_doc + +def refer_to_array_attribute(attr: str, method: bool = True) -> tuple[str, str]: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/_add_newdocs_scalars.py b/.venv/lib/python3.12/site-packages/numpy/_core/_add_newdocs_scalars.py new file mode 100644 index 0000000..96170d8 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/_add_newdocs_scalars.py @@ -0,0 +1,390 @@ +""" +This file is separate from ``_add_newdocs.py`` so that it can be mocked out by +our sphinx ``conf.py`` during doc builds, where we want to avoid showing +platform-dependent information. +""" +import os +import sys + +from numpy._core import dtype +from numpy._core import numerictypes as _numerictypes +from numpy._core.function_base import add_newdoc + +############################################################################## +# +# Documentation for concrete scalar classes +# +############################################################################## + +def numeric_type_aliases(aliases): + def type_aliases_gen(): + for alias, doc in aliases: + try: + alias_type = getattr(_numerictypes, alias) + except AttributeError: + # The set of aliases that actually exist varies between platforms + pass + else: + yield (alias_type, alias, doc) + return list(type_aliases_gen()) + + +possible_aliases = numeric_type_aliases([ + ('int8', '8-bit signed integer (``-128`` to ``127``)'), + ('int16', '16-bit signed integer (``-32_768`` to ``32_767``)'), + ('int32', '32-bit signed integer (``-2_147_483_648`` to ``2_147_483_647``)'), + ('int64', '64-bit signed integer (``-9_223_372_036_854_775_808`` to ``9_223_372_036_854_775_807``)'), + ('intp', 'Signed integer large enough to fit pointer, compatible with C ``intptr_t``'), + ('uint8', '8-bit unsigned integer (``0`` to ``255``)'), + ('uint16', '16-bit unsigned integer (``0`` to ``65_535``)'), + ('uint32', '32-bit unsigned integer (``0`` to ``4_294_967_295``)'), + ('uint64', '64-bit unsigned integer (``0`` to ``18_446_744_073_709_551_615``)'), + ('uintp', 'Unsigned integer large enough to fit pointer, compatible with C ``uintptr_t``'), + ('float16', '16-bit-precision floating-point number type: sign bit, 5 bits exponent, 10 bits mantissa'), + ('float32', '32-bit-precision floating-point number type: sign bit, 8 bits exponent, 23 bits mantissa'), + ('float64', '64-bit precision floating-point number type: sign bit, 11 bits exponent, 52 bits mantissa'), + ('float96', '96-bit extended-precision floating-point number type'), + ('float128', '128-bit extended-precision floating-point number type'), + ('complex64', 'Complex number type composed of 2 32-bit-precision floating-point numbers'), + ('complex128', 'Complex number type composed of 2 64-bit-precision floating-point numbers'), + ('complex192', 'Complex number type composed of 2 96-bit extended-precision floating-point numbers'), + ('complex256', 'Complex number type composed of 2 128-bit extended-precision floating-point numbers'), + ]) + + +def _get_platform_and_machine(): + try: + system, _, _, _, machine = os.uname() + except AttributeError: + system = sys.platform + if system == 'win32': + machine = os.environ.get('PROCESSOR_ARCHITEW6432', '') \ + or os.environ.get('PROCESSOR_ARCHITECTURE', '') + else: + machine = 'unknown' + return system, machine + + +_system, _machine = _get_platform_and_machine() +_doc_alias_string = f":Alias on this platform ({_system} {_machine}):" + + +def add_newdoc_for_scalar_type(obj, fixed_aliases, doc): + # note: `:field: value` is rST syntax which renders as field lists. + o = getattr(_numerictypes, obj) + + character_code = dtype(o).char + canonical_name_doc = "" if obj == o.__name__ else \ + f":Canonical name: `numpy.{obj}`\n " + if fixed_aliases: + alias_doc = ''.join(f":Alias: `numpy.{alias}`\n " + for alias in fixed_aliases) + else: + alias_doc = '' + alias_doc += ''.join(f"{_doc_alias_string} `numpy.{alias}`: {doc}.\n " + for (alias_type, alias, doc) in possible_aliases if alias_type is o) + + docstring = f""" + {doc.strip()} + + :Character code: ``'{character_code}'`` + {canonical_name_doc}{alias_doc} + """ + + add_newdoc('numpy._core.numerictypes', obj, docstring) + + +_bool_docstring = ( + """ + Boolean type (True or False), stored as a byte. + + .. warning:: + + The :class:`bool` type is not a subclass of the :class:`int_` type + (the :class:`bool` is not even a number type). This is different + than Python's default implementation of :class:`bool` as a + sub-class of :class:`int`. + """ +) + +add_newdoc_for_scalar_type('bool', [], _bool_docstring) + +add_newdoc_for_scalar_type('bool_', [], _bool_docstring) + +add_newdoc_for_scalar_type('byte', [], + """ + Signed integer type, compatible with C ``char``. + """) + +add_newdoc_for_scalar_type('short', [], + """ + Signed integer type, compatible with C ``short``. + """) + +add_newdoc_for_scalar_type('intc', [], + """ + Signed integer type, compatible with C ``int``. + """) + +# TODO: These docs probably need an if to highlight the default rather than +# the C-types (and be correct). +add_newdoc_for_scalar_type('int_', [], + """ + Default signed integer type, 64bit on 64bit systems and 32bit on 32bit + systems. + """) + +add_newdoc_for_scalar_type('longlong', [], + """ + Signed integer type, compatible with C ``long long``. + """) + +add_newdoc_for_scalar_type('ubyte', [], + """ + Unsigned integer type, compatible with C ``unsigned char``. + """) + +add_newdoc_for_scalar_type('ushort', [], + """ + Unsigned integer type, compatible with C ``unsigned short``. + """) + +add_newdoc_for_scalar_type('uintc', [], + """ + Unsigned integer type, compatible with C ``unsigned int``. + """) + +add_newdoc_for_scalar_type('uint', [], + """ + Unsigned signed integer type, 64bit on 64bit systems and 32bit on 32bit + systems. + """) + +add_newdoc_for_scalar_type('ulonglong', [], + """ + Signed integer type, compatible with C ``unsigned long long``. + """) + +add_newdoc_for_scalar_type('half', [], + """ + Half-precision floating-point number type. + """) + +add_newdoc_for_scalar_type('single', [], + """ + Single-precision floating-point number type, compatible with C ``float``. + """) + +add_newdoc_for_scalar_type('double', [], + """ + Double-precision floating-point number type, compatible with Python + :class:`float` and C ``double``. + """) + +add_newdoc_for_scalar_type('longdouble', [], + """ + Extended-precision floating-point number type, compatible with C + ``long double`` but not necessarily with IEEE 754 quadruple-precision. + """) + +add_newdoc_for_scalar_type('csingle', [], + """ + Complex number type composed of two single-precision floating-point + numbers. + """) + +add_newdoc_for_scalar_type('cdouble', [], + """ + Complex number type composed of two double-precision floating-point + numbers, compatible with Python :class:`complex`. + """) + +add_newdoc_for_scalar_type('clongdouble', [], + """ + Complex number type composed of two extended-precision floating-point + numbers. + """) + +add_newdoc_for_scalar_type('object_', [], + """ + Any Python object. + """) + +add_newdoc_for_scalar_type('str_', [], + r""" + A unicode string. + + This type strips trailing null codepoints. + + >>> s = np.str_("abc\x00") + >>> s + 'abc' + + Unlike the builtin :class:`str`, this supports the + :ref:`python:bufferobjects`, exposing its contents as UCS4: + + >>> m = memoryview(np.str_("abc")) + >>> m.format + '3w' + >>> m.tobytes() + b'a\x00\x00\x00b\x00\x00\x00c\x00\x00\x00' + """) + +add_newdoc_for_scalar_type('bytes_', [], + r""" + A byte string. + + When used in arrays, this type strips trailing null bytes. + """) + +add_newdoc_for_scalar_type('void', [], + r""" + np.void(length_or_data, /, dtype=None) + + Create a new structured or unstructured void scalar. + + Parameters + ---------- + length_or_data : int, array-like, bytes-like, object + One of multiple meanings (see notes). The length or + bytes data of an unstructured void. Or alternatively, + the data to be stored in the new scalar when `dtype` + is provided. + This can be an array-like, in which case an array may + be returned. + dtype : dtype, optional + If provided the dtype of the new scalar. This dtype must + be "void" dtype (i.e. a structured or unstructured void, + see also :ref:`defining-structured-types`). + + .. versionadded:: 1.24 + + Notes + ----- + For historical reasons and because void scalars can represent both + arbitrary byte data and structured dtypes, the void constructor + has three calling conventions: + + 1. ``np.void(5)`` creates a ``dtype="V5"`` scalar filled with five + ``\0`` bytes. The 5 can be a Python or NumPy integer. + 2. ``np.void(b"bytes-like")`` creates a void scalar from the byte string. + The dtype itemsize will match the byte string length, here ``"V10"``. + 3. When a ``dtype=`` is passed the call is roughly the same as an + array creation. However, a void scalar rather than array is returned. + + Please see the examples which show all three different conventions. + + Examples + -------- + >>> np.void(5) + np.void(b'\x00\x00\x00\x00\x00') + >>> np.void(b'abcd') + np.void(b'\x61\x62\x63\x64') + >>> np.void((3.2, b'eggs'), dtype="d,S5") + np.void((3.2, b'eggs'), dtype=[('f0', '>> np.void(3, dtype=[('x', np.int8), ('y', np.int8)]) + np.void((3, 3), dtype=[('x', 'i1'), ('y', 'i1')]) + + """) + +add_newdoc_for_scalar_type('datetime64', [], + """ + If created from a 64-bit integer, it represents an offset from + ``1970-01-01T00:00:00``. + If created from string, the string can be in ISO 8601 date + or datetime format. + + When parsing a string to create a datetime object, if the string contains + a trailing timezone (A 'Z' or a timezone offset), the timezone will be + dropped and a User Warning is given. + + Datetime64 objects should be considered to be UTC and therefore have an + offset of +0000. + + >>> np.datetime64(10, 'Y') + np.datetime64('1980') + >>> np.datetime64('1980', 'Y') + np.datetime64('1980') + >>> np.datetime64(10, 'D') + np.datetime64('1970-01-11') + + See :ref:`arrays.datetime` for more information. + """) + +add_newdoc_for_scalar_type('timedelta64', [], + """ + A timedelta stored as a 64-bit integer. + + See :ref:`arrays.datetime` for more information. + """) + +add_newdoc('numpy._core.numerictypes', "integer", ('is_integer', + """ + integer.is_integer() -> bool + + Return ``True`` if the number is finite with integral value. + + .. versionadded:: 1.22 + + Examples + -------- + >>> import numpy as np + >>> np.int64(-2).is_integer() + True + >>> np.uint32(5).is_integer() + True + """)) + +# TODO: work out how to put this on the base class, np.floating +for float_name in ('half', 'single', 'double', 'longdouble'): + add_newdoc('numpy._core.numerictypes', float_name, ('as_integer_ratio', + f""" + {float_name}.as_integer_ratio() -> (int, int) + + Return a pair of integers, whose ratio is exactly equal to the original + floating point number, and with a positive denominator. + Raise `OverflowError` on infinities and a `ValueError` on NaNs. + + >>> np.{float_name}(10.0).as_integer_ratio() + (10, 1) + >>> np.{float_name}(0.0).as_integer_ratio() + (0, 1) + >>> np.{float_name}(-.25).as_integer_ratio() + (-1, 4) + """)) + + add_newdoc('numpy._core.numerictypes', float_name, ('is_integer', + f""" + {float_name}.is_integer() -> bool + + Return ``True`` if the floating point number is finite with integral + value, and ``False`` otherwise. + + .. versionadded:: 1.22 + + Examples + -------- + >>> np.{float_name}(-2.0).is_integer() + True + >>> np.{float_name}(3.2).is_integer() + False + """)) + +for int_name in ('int8', 'uint8', 'int16', 'uint16', 'int32', 'uint32', + 'int64', 'uint64', 'int64', 'uint64', 'int64', 'uint64'): + # Add negative examples for signed cases by checking typecode + add_newdoc('numpy._core.numerictypes', int_name, ('bit_count', + f""" + {int_name}.bit_count() -> int + + Computes the number of 1-bits in the absolute value of the input. + Analogous to the builtin `int.bit_count` or ``popcount`` in C++. + + Examples + -------- + >>> np.{int_name}(127).bit_count() + 7""" + + (f""" + >>> np.{int_name}(-127).bit_count() + 7 + """ if dtype(int_name).char.islower() else ""))) diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/_add_newdocs_scalars.pyi b/.venv/lib/python3.12/site-packages/numpy/_core/_add_newdocs_scalars.pyi new file mode 100644 index 0000000..4a06c9b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/_add_newdocs_scalars.pyi @@ -0,0 +1,16 @@ +from collections.abc import Iterable +from typing import Final + +import numpy as np + +possible_aliases: Final[list[tuple[type[np.number], str, str]]] = ... +_system: Final[str] = ... +_machine: Final[str] = ... +_doc_alias_string: Final[str] = ... +_bool_docstring: Final[str] = ... +int_name: str = ... +float_name: str = ... + +def numeric_type_aliases(aliases: list[tuple[str, str]]) -> list[tuple[type[np.number], str, str]]: ... +def add_newdoc_for_scalar_type(obj: str, fixed_aliases: Iterable[str], doc: str) -> None: ... +def _get_platform_and_machine() -> tuple[str, str]: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/_asarray.py b/.venv/lib/python3.12/site-packages/numpy/_core/_asarray.py new file mode 100644 index 0000000..613c5cf --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/_asarray.py @@ -0,0 +1,134 @@ +""" +Functions in the ``as*array`` family that promote array-likes into arrays. + +`require` fits this category despite its name not matching this pattern. +""" +from .multiarray import array, asanyarray +from .overrides import ( + array_function_dispatch, + finalize_array_function_like, + set_module, +) + +__all__ = ["require"] + + +POSSIBLE_FLAGS = { + 'C': 'C', 'C_CONTIGUOUS': 'C', 'CONTIGUOUS': 'C', + 'F': 'F', 'F_CONTIGUOUS': 'F', 'FORTRAN': 'F', + 'A': 'A', 'ALIGNED': 'A', + 'W': 'W', 'WRITEABLE': 'W', + 'O': 'O', 'OWNDATA': 'O', + 'E': 'E', 'ENSUREARRAY': 'E' +} + + +@finalize_array_function_like +@set_module('numpy') +def require(a, dtype=None, requirements=None, *, like=None): + """ + Return an ndarray of the provided type that satisfies requirements. + + This function is useful to be sure that an array with the correct flags + is returned for passing to compiled code (perhaps through ctypes). + + Parameters + ---------- + a : array_like + The object to be converted to a type-and-requirement-satisfying array. + dtype : data-type + The required data-type. If None preserve the current dtype. If your + application requires the data to be in native byteorder, include + a byteorder specification as a part of the dtype specification. + requirements : str or sequence of str + The requirements list can be any of the following + + * 'F_CONTIGUOUS' ('F') - ensure a Fortran-contiguous array + * 'C_CONTIGUOUS' ('C') - ensure a C-contiguous array + * 'ALIGNED' ('A') - ensure a data-type aligned array + * 'WRITEABLE' ('W') - ensure a writable array + * 'OWNDATA' ('O') - ensure an array that owns its own data + * 'ENSUREARRAY', ('E') - ensure a base array, instead of a subclass + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + out : ndarray + Array with specified requirements and type if given. + + See Also + -------- + asarray : Convert input to an ndarray. + asanyarray : Convert to an ndarray, but pass through ndarray subclasses. + ascontiguousarray : Convert input to a contiguous array. + asfortranarray : Convert input to an ndarray with column-major + memory order. + ndarray.flags : Information about the memory layout of the array. + + Notes + ----- + The returned array will be guaranteed to have the listed requirements + by making a copy if needed. + + Examples + -------- + >>> import numpy as np + >>> x = np.arange(6).reshape(2,3) + >>> x.flags + C_CONTIGUOUS : True + F_CONTIGUOUS : False + OWNDATA : False + WRITEABLE : True + ALIGNED : True + WRITEBACKIFCOPY : False + + >>> y = np.require(x, dtype=np.float32, requirements=['A', 'O', 'W', 'F']) + >>> y.flags + C_CONTIGUOUS : False + F_CONTIGUOUS : True + OWNDATA : True + WRITEABLE : True + ALIGNED : True + WRITEBACKIFCOPY : False + + """ + if like is not None: + return _require_with_like( + like, + a, + dtype=dtype, + requirements=requirements, + ) + + if not requirements: + return asanyarray(a, dtype=dtype) + + requirements = {POSSIBLE_FLAGS[x.upper()] for x in requirements} + + if 'E' in requirements: + requirements.remove('E') + subok = False + else: + subok = True + + order = 'A' + if requirements >= {'C', 'F'}: + raise ValueError('Cannot specify both "C" and "F" order') + elif 'F' in requirements: + order = 'F' + requirements.remove('F') + elif 'C' in requirements: + order = 'C' + requirements.remove('C') + + arr = array(a, dtype=dtype, order=order, copy=None, subok=subok) + + for prop in requirements: + if not arr.flags[prop]: + return arr.copy(order) + return arr + + +_require_with_like = array_function_dispatch()(require) diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/_asarray.pyi b/.venv/lib/python3.12/site-packages/numpy/_core/_asarray.pyi new file mode 100644 index 0000000..a4bee00 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/_asarray.pyi @@ -0,0 +1,41 @@ +from collections.abc import Iterable +from typing import Any, Literal, TypeAlias, TypeVar, overload + +from numpy._typing import DTypeLike, NDArray, _SupportsArrayFunc + +_ArrayT = TypeVar("_ArrayT", bound=NDArray[Any]) + +_Requirements: TypeAlias = Literal[ + "C", "C_CONTIGUOUS", "CONTIGUOUS", + "F", "F_CONTIGUOUS", "FORTRAN", + "A", "ALIGNED", + "W", "WRITEABLE", + "O", "OWNDATA" +] +_E: TypeAlias = Literal["E", "ENSUREARRAY"] +_RequirementsWithE: TypeAlias = _Requirements | _E + +@overload +def require( + a: _ArrayT, + dtype: None = ..., + requirements: _Requirements | Iterable[_Requirements] | None = ..., + *, + like: _SupportsArrayFunc = ... +) -> _ArrayT: ... +@overload +def require( + a: object, + dtype: DTypeLike = ..., + requirements: _E | Iterable[_RequirementsWithE] = ..., + *, + like: _SupportsArrayFunc = ... +) -> NDArray[Any]: ... +@overload +def require( + a: object, + dtype: DTypeLike = ..., + requirements: _Requirements | Iterable[_Requirements] | None = ..., + *, + like: _SupportsArrayFunc = ... +) -> NDArray[Any]: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/_dtype.py b/.venv/lib/python3.12/site-packages/numpy/_core/_dtype.py new file mode 100644 index 0000000..6a8a091 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/_dtype.py @@ -0,0 +1,366 @@ +""" +A place for code to be called from the implementation of np.dtype + +String handling is much easier to do correctly in python. +""" +import numpy as np + +_kind_to_stem = { + 'u': 'uint', + 'i': 'int', + 'c': 'complex', + 'f': 'float', + 'b': 'bool', + 'V': 'void', + 'O': 'object', + 'M': 'datetime', + 'm': 'timedelta', + 'S': 'bytes', + 'U': 'str', +} + + +def _kind_name(dtype): + try: + return _kind_to_stem[dtype.kind] + except KeyError as e: + raise RuntimeError( + f"internal dtype error, unknown kind {dtype.kind!r}" + ) from None + + +def __str__(dtype): + if dtype.fields is not None: + return _struct_str(dtype, include_align=True) + elif dtype.subdtype: + return _subarray_str(dtype) + elif issubclass(dtype.type, np.flexible) or not dtype.isnative: + return dtype.str + else: + return dtype.name + + +def __repr__(dtype): + arg_str = _construction_repr(dtype, include_align=False) + if dtype.isalignedstruct: + arg_str = arg_str + ", align=True" + return f"dtype({arg_str})" + + +def _unpack_field(dtype, offset, title=None): + """ + Helper function to normalize the items in dtype.fields. + + Call as: + + dtype, offset, title = _unpack_field(*dtype.fields[name]) + """ + return dtype, offset, title + + +def _isunsized(dtype): + # PyDataType_ISUNSIZED + return dtype.itemsize == 0 + + +def _construction_repr(dtype, include_align=False, short=False): + """ + Creates a string repr of the dtype, excluding the 'dtype()' part + surrounding the object. This object may be a string, a list, or + a dict depending on the nature of the dtype. This + is the object passed as the first parameter to the dtype + constructor, and if no additional constructor parameters are + given, will reproduce the exact memory layout. + + Parameters + ---------- + short : bool + If true, this creates a shorter repr using 'kind' and 'itemsize', + instead of the longer type name. + + include_align : bool + If true, this includes the 'align=True' parameter + inside the struct dtype construction dict when needed. Use this flag + if you want a proper repr string without the 'dtype()' part around it. + + If false, this does not preserve the + 'align=True' parameter or sticky NPY_ALIGNED_STRUCT flag for + struct arrays like the regular repr does, because the 'align' + flag is not part of first dtype constructor parameter. This + mode is intended for a full 'repr', where the 'align=True' is + provided as the second parameter. + """ + if dtype.fields is not None: + return _struct_str(dtype, include_align=include_align) + elif dtype.subdtype: + return _subarray_str(dtype) + else: + return _scalar_str(dtype, short=short) + + +def _scalar_str(dtype, short): + byteorder = _byte_order_str(dtype) + + if dtype.type == np.bool: + if short: + return "'?'" + else: + return "'bool'" + + elif dtype.type == np.object_: + # The object reference may be different sizes on different + # platforms, so it should never include the itemsize here. + return "'O'" + + elif dtype.type == np.bytes_: + if _isunsized(dtype): + return "'S'" + else: + return "'S%d'" % dtype.itemsize + + elif dtype.type == np.str_: + if _isunsized(dtype): + return f"'{byteorder}U'" + else: + return "'%sU%d'" % (byteorder, dtype.itemsize / 4) + + elif dtype.type == str: + return "'T'" + + elif not type(dtype)._legacy: + return f"'{byteorder}{type(dtype).__name__}{dtype.itemsize * 8}'" + + # unlike the other types, subclasses of void are preserved - but + # historically the repr does not actually reveal the subclass + elif issubclass(dtype.type, np.void): + if _isunsized(dtype): + return "'V'" + else: + return "'V%d'" % dtype.itemsize + + elif dtype.type == np.datetime64: + return f"'{byteorder}M8{_datetime_metadata_str(dtype)}'" + + elif dtype.type == np.timedelta64: + return f"'{byteorder}m8{_datetime_metadata_str(dtype)}'" + + elif dtype.isbuiltin == 2: + return dtype.type.__name__ + + elif np.issubdtype(dtype, np.number): + # Short repr with endianness, like '' """ + # hack to obtain the native and swapped byte order characters + swapped = np.dtype(int).newbyteorder('S') + native = swapped.newbyteorder('S') + + byteorder = dtype.byteorder + if byteorder == '=': + return native.byteorder + if byteorder == 'S': + # TODO: this path can never be reached + return swapped.byteorder + elif byteorder == '|': + return '' + else: + return byteorder + + +def _datetime_metadata_str(dtype): + # TODO: this duplicates the C metastr_to_unicode functionality + unit, count = np.datetime_data(dtype) + if unit == 'generic': + return '' + elif count == 1: + return f'[{unit}]' + else: + return f'[{count}{unit}]' + + +def _struct_dict_str(dtype, includealignedflag): + # unpack the fields dictionary into ls + names = dtype.names + fld_dtypes = [] + offsets = [] + titles = [] + for name in names: + fld_dtype, offset, title = _unpack_field(*dtype.fields[name]) + fld_dtypes.append(fld_dtype) + offsets.append(offset) + titles.append(title) + + # Build up a string to make the dictionary + + if np._core.arrayprint._get_legacy_print_mode() <= 121: + colon = ":" + fieldsep = "," + else: + colon = ": " + fieldsep = ", " + + # First, the names + ret = "{'names'%s[" % colon + ret += fieldsep.join(repr(name) for name in names) + + # Second, the formats + ret += f"], 'formats'{colon}[" + ret += fieldsep.join( + _construction_repr(fld_dtype, short=True) for fld_dtype in fld_dtypes) + + # Third, the offsets + ret += f"], 'offsets'{colon}[" + ret += fieldsep.join("%d" % offset for offset in offsets) + + # Fourth, the titles + if any(title is not None for title in titles): + ret += f"], 'titles'{colon}[" + ret += fieldsep.join(repr(title) for title in titles) + + # Fifth, the itemsize + ret += "], 'itemsize'%s%d" % (colon, dtype.itemsize) + + if (includealignedflag and dtype.isalignedstruct): + # Finally, the aligned flag + ret += ", 'aligned'%sTrue}" % colon + else: + ret += "}" + + return ret + + +def _aligned_offset(offset, alignment): + # round up offset: + return - (-offset // alignment) * alignment + + +def _is_packed(dtype): + """ + Checks whether the structured data type in 'dtype' + has a simple layout, where all the fields are in order, + and follow each other with no alignment padding. + + When this returns true, the dtype can be reconstructed + from a list of the field names and dtypes with no additional + dtype parameters. + + Duplicates the C `is_dtype_struct_simple_unaligned_layout` function. + """ + align = dtype.isalignedstruct + max_alignment = 1 + total_offset = 0 + for name in dtype.names: + fld_dtype, fld_offset, title = _unpack_field(*dtype.fields[name]) + + if align: + total_offset = _aligned_offset(total_offset, fld_dtype.alignment) + max_alignment = max(max_alignment, fld_dtype.alignment) + + if fld_offset != total_offset: + return False + total_offset += fld_dtype.itemsize + + if align: + total_offset = _aligned_offset(total_offset, max_alignment) + + return total_offset == dtype.itemsize + + +def _struct_list_str(dtype): + items = [] + for name in dtype.names: + fld_dtype, fld_offset, title = _unpack_field(*dtype.fields[name]) + + item = "(" + if title is not None: + item += f"({title!r}, {name!r}), " + else: + item += f"{name!r}, " + # Special case subarray handling here + if fld_dtype.subdtype is not None: + base, shape = fld_dtype.subdtype + item += f"{_construction_repr(base, short=True)}, {shape}" + else: + item += _construction_repr(fld_dtype, short=True) + + item += ")" + items.append(item) + + return "[" + ", ".join(items) + "]" + + +def _struct_str(dtype, include_align): + # The list str representation can't include the 'align=' flag, + # so if it is requested and the struct has the aligned flag set, + # we must use the dict str instead. + if not (include_align and dtype.isalignedstruct) and _is_packed(dtype): + sub = _struct_list_str(dtype) + + else: + sub = _struct_dict_str(dtype, include_align) + + # If the data type isn't the default, void, show it + if dtype.type != np.void: + return f"({dtype.type.__module__}.{dtype.type.__name__}, {sub})" + else: + return sub + + +def _subarray_str(dtype): + base, shape = dtype.subdtype + return f"({_construction_repr(base, short=True)}, {shape})" + + +def _name_includes_bit_suffix(dtype): + if dtype.type == np.object_: + # pointer size varies by system, best to omit it + return False + elif dtype.type == np.bool: + # implied + return False + elif dtype.type is None: + return True + elif np.issubdtype(dtype, np.flexible) and _isunsized(dtype): + # unspecified + return False + else: + return True + + +def _name_get(dtype): + # provides dtype.name.__get__, documented as returning a "bit name" + + if dtype.isbuiltin == 2: + # user dtypes don't promise to do anything special + return dtype.type.__name__ + + if not type(dtype)._legacy: + name = type(dtype).__name__ + + elif issubclass(dtype.type, np.void): + # historically, void subclasses preserve their name, eg `record64` + name = dtype.type.__name__ + else: + name = _kind_name(dtype) + + # append bit counts + if _name_includes_bit_suffix(dtype): + name += f"{dtype.itemsize * 8}" + + # append metadata to datetimes + if dtype.type in (np.datetime64, np.timedelta64): + name += _datetime_metadata_str(dtype) + + return name diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/_dtype.pyi b/.venv/lib/python3.12/site-packages/numpy/_core/_dtype.pyi new file mode 100644 index 0000000..6cdd77b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/_dtype.pyi @@ -0,0 +1,58 @@ +from typing import Final, TypeAlias, TypedDict, overload, type_check_only +from typing import Literal as L + +from typing_extensions import ReadOnly, TypeVar + +import numpy as np + +### + +_T = TypeVar("_T") + +_Name: TypeAlias = L["uint", "int", "complex", "float", "bool", "void", "object", "datetime", "timedelta", "bytes", "str"] + +@type_check_only +class _KindToStemType(TypedDict): + u: ReadOnly[L["uint"]] + i: ReadOnly[L["int"]] + c: ReadOnly[L["complex"]] + f: ReadOnly[L["float"]] + b: ReadOnly[L["bool"]] + V: ReadOnly[L["void"]] + O: ReadOnly[L["object"]] + M: ReadOnly[L["datetime"]] + m: ReadOnly[L["timedelta"]] + S: ReadOnly[L["bytes"]] + U: ReadOnly[L["str"]] + +### + +_kind_to_stem: Final[_KindToStemType] = ... + +# +def _kind_name(dtype: np.dtype) -> _Name: ... +def __str__(dtype: np.dtype) -> str: ... +def __repr__(dtype: np.dtype) -> str: ... + +# +def _isunsized(dtype: np.dtype) -> bool: ... +def _is_packed(dtype: np.dtype) -> bool: ... +def _name_includes_bit_suffix(dtype: np.dtype) -> bool: ... + +# +def _construction_repr(dtype: np.dtype, include_align: bool = False, short: bool = False) -> str: ... +def _scalar_str(dtype: np.dtype, short: bool) -> str: ... +def _byte_order_str(dtype: np.dtype) -> str: ... +def _datetime_metadata_str(dtype: np.dtype) -> str: ... +def _struct_dict_str(dtype: np.dtype, includealignedflag: bool) -> str: ... +def _struct_list_str(dtype: np.dtype) -> str: ... +def _struct_str(dtype: np.dtype, include_align: bool) -> str: ... +def _subarray_str(dtype: np.dtype) -> str: ... +def _name_get(dtype: np.dtype) -> str: ... + +# +@overload +def _unpack_field(dtype: np.dtype, offset: int, title: _T) -> tuple[np.dtype, int, _T]: ... +@overload +def _unpack_field(dtype: np.dtype, offset: int, title: None = None) -> tuple[np.dtype, int, None]: ... +def _aligned_offset(offset: int, alignment: int) -> int: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/_dtype_ctypes.py b/.venv/lib/python3.12/site-packages/numpy/_core/_dtype_ctypes.py new file mode 100644 index 0000000..4de6df6 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/_dtype_ctypes.py @@ -0,0 +1,120 @@ +""" +Conversion from ctypes to dtype. + +In an ideal world, we could achieve this through the PEP3118 buffer protocol, +something like:: + + def dtype_from_ctypes_type(t): + # needed to ensure that the shape of `t` is within memoryview.format + class DummyStruct(ctypes.Structure): + _fields_ = [('a', t)] + + # empty to avoid memory allocation + ctype_0 = (DummyStruct * 0)() + mv = memoryview(ctype_0) + + # convert the struct, and slice back out the field + return _dtype_from_pep3118(mv.format)['a'] + +Unfortunately, this fails because: + +* ctypes cannot handle length-0 arrays with PEP3118 (bpo-32782) +* PEP3118 cannot represent unions, but both numpy and ctypes can +* ctypes cannot handle big-endian structs with PEP3118 (bpo-32780) +""" + +# We delay-import ctypes for distributions that do not include it. +# While this module is not used unless the user passes in ctypes +# members, it is eagerly imported from numpy/_core/__init__.py. +import numpy as np + + +def _from_ctypes_array(t): + return np.dtype((dtype_from_ctypes_type(t._type_), (t._length_,))) + + +def _from_ctypes_structure(t): + for item in t._fields_: + if len(item) > 2: + raise TypeError( + "ctypes bitfields have no dtype equivalent") + + if hasattr(t, "_pack_"): + import ctypes + formats = [] + offsets = [] + names = [] + current_offset = 0 + for fname, ftyp in t._fields_: + names.append(fname) + formats.append(dtype_from_ctypes_type(ftyp)) + # Each type has a default offset, this is platform dependent + # for some types. + effective_pack = min(t._pack_, ctypes.alignment(ftyp)) + current_offset = ( + (current_offset + effective_pack - 1) // effective_pack + ) * effective_pack + offsets.append(current_offset) + current_offset += ctypes.sizeof(ftyp) + + return np.dtype({ + "formats": formats, + "offsets": offsets, + "names": names, + "itemsize": ctypes.sizeof(t)}) + else: + fields = [] + for fname, ftyp in t._fields_: + fields.append((fname, dtype_from_ctypes_type(ftyp))) + + # by default, ctypes structs are aligned + return np.dtype(fields, align=True) + + +def _from_ctypes_scalar(t): + """ + Return the dtype type with endianness included if it's the case + """ + if getattr(t, '__ctype_be__', None) is t: + return np.dtype('>' + t._type_) + elif getattr(t, '__ctype_le__', None) is t: + return np.dtype('<' + t._type_) + else: + return np.dtype(t._type_) + + +def _from_ctypes_union(t): + import ctypes + formats = [] + offsets = [] + names = [] + for fname, ftyp in t._fields_: + names.append(fname) + formats.append(dtype_from_ctypes_type(ftyp)) + offsets.append(0) # Union fields are offset to 0 + + return np.dtype({ + "formats": formats, + "offsets": offsets, + "names": names, + "itemsize": ctypes.sizeof(t)}) + + +def dtype_from_ctypes_type(t): + """ + Construct a dtype object from a ctypes type + """ + import _ctypes + if issubclass(t, _ctypes.Array): + return _from_ctypes_array(t) + elif issubclass(t, _ctypes._Pointer): + raise TypeError("ctypes pointers have no dtype equivalent") + elif issubclass(t, _ctypes.Structure): + return _from_ctypes_structure(t) + elif issubclass(t, _ctypes.Union): + return _from_ctypes_union(t) + elif isinstance(getattr(t, '_type_', None), str): + return _from_ctypes_scalar(t) + else: + raise NotImplementedError( + f"Unknown ctypes type {t.__name__}") diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/_dtype_ctypes.pyi b/.venv/lib/python3.12/site-packages/numpy/_core/_dtype_ctypes.pyi new file mode 100644 index 0000000..69438a2 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/_dtype_ctypes.pyi @@ -0,0 +1,83 @@ +import _ctypes +import ctypes as ct +from typing import Any, overload + +import numpy as np + +# +@overload +def dtype_from_ctypes_type(t: type[_ctypes.Array[Any] | _ctypes.Structure]) -> np.dtype[np.void]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.c_bool]) -> np.dtype[np.bool]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.c_int8 | ct.c_byte]) -> np.dtype[np.int8]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.c_uint8 | ct.c_ubyte]) -> np.dtype[np.uint8]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.c_int16 | ct.c_short]) -> np.dtype[np.int16]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.c_uint16 | ct.c_ushort]) -> np.dtype[np.uint16]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.c_int32 | ct.c_int]) -> np.dtype[np.int32]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.c_uint32 | ct.c_uint]) -> np.dtype[np.uint32]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.c_ssize_t | ct.c_long]) -> np.dtype[np.int32 | np.int64]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.c_size_t | ct.c_ulong]) -> np.dtype[np.uint32 | np.uint64]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.c_int64 | ct.c_longlong]) -> np.dtype[np.int64]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.c_uint64 | ct.c_ulonglong]) -> np.dtype[np.uint64]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.c_float]) -> np.dtype[np.float32]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.c_double]) -> np.dtype[np.float64]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.c_longdouble]) -> np.dtype[np.longdouble]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.c_char]) -> np.dtype[np.bytes_]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.py_object[Any]]) -> np.dtype[np.object_]: ... + +# NOTE: the complex ctypes on python>=3.14 are not yet supported at runtim, see +# https://github.com/numpy/numpy/issues/28360 + +# +def _from_ctypes_array(t: type[_ctypes.Array[Any]]) -> np.dtype[np.void]: ... +def _from_ctypes_structure(t: type[_ctypes.Structure]) -> np.dtype[np.void]: ... +def _from_ctypes_union(t: type[_ctypes.Union]) -> np.dtype[np.void]: ... + +# keep in sync with `dtype_from_ctypes_type` (minus the first overload) +@overload +def _from_ctypes_scalar(t: type[ct.c_bool]) -> np.dtype[np.bool]: ... +@overload +def _from_ctypes_scalar(t: type[ct.c_int8 | ct.c_byte]) -> np.dtype[np.int8]: ... +@overload +def _from_ctypes_scalar(t: type[ct.c_uint8 | ct.c_ubyte]) -> np.dtype[np.uint8]: ... +@overload +def _from_ctypes_scalar(t: type[ct.c_int16 | ct.c_short]) -> np.dtype[np.int16]: ... +@overload +def _from_ctypes_scalar(t: type[ct.c_uint16 | ct.c_ushort]) -> np.dtype[np.uint16]: ... +@overload +def _from_ctypes_scalar(t: type[ct.c_int32 | ct.c_int]) -> np.dtype[np.int32]: ... +@overload +def _from_ctypes_scalar(t: type[ct.c_uint32 | ct.c_uint]) -> np.dtype[np.uint32]: ... +@overload +def _from_ctypes_scalar(t: type[ct.c_ssize_t | ct.c_long]) -> np.dtype[np.int32 | np.int64]: ... +@overload +def _from_ctypes_scalar(t: type[ct.c_size_t | ct.c_ulong]) -> np.dtype[np.uint32 | np.uint64]: ... +@overload +def _from_ctypes_scalar(t: type[ct.c_int64 | ct.c_longlong]) -> np.dtype[np.int64]: ... +@overload +def _from_ctypes_scalar(t: type[ct.c_uint64 | ct.c_ulonglong]) -> np.dtype[np.uint64]: ... +@overload +def _from_ctypes_scalar(t: type[ct.c_float]) -> np.dtype[np.float32]: ... +@overload +def _from_ctypes_scalar(t: type[ct.c_double]) -> np.dtype[np.float64]: ... +@overload +def _from_ctypes_scalar(t: type[ct.c_longdouble]) -> np.dtype[np.longdouble]: ... +@overload +def _from_ctypes_scalar(t: type[ct.c_char]) -> np.dtype[np.bytes_]: ... +@overload +def _from_ctypes_scalar(t: type[ct.py_object[Any]]) -> np.dtype[np.object_]: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/_exceptions.py b/.venv/lib/python3.12/site-packages/numpy/_core/_exceptions.py new file mode 100644 index 0000000..73b07d2 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/_exceptions.py @@ -0,0 +1,162 @@ +""" +Various richly-typed exceptions, that also help us deal with string formatting +in python where it's easier. + +By putting the formatting in `__str__`, we also avoid paying the cost for +users who silence the exceptions. +""" + +def _unpack_tuple(tup): + if len(tup) == 1: + return tup[0] + else: + return tup + + +def _display_as_base(cls): + """ + A decorator that makes an exception class look like its base. + + We use this to hide subclasses that are implementation details - the user + should catch the base type, which is what the traceback will show them. + + Classes decorated with this decorator are subject to removal without a + deprecation warning. + """ + assert issubclass(cls, Exception) + cls.__name__ = cls.__base__.__name__ + return cls + + +class UFuncTypeError(TypeError): + """ Base class for all ufunc exceptions """ + def __init__(self, ufunc): + self.ufunc = ufunc + + +@_display_as_base +class _UFuncNoLoopError(UFuncTypeError): + """ Thrown when a ufunc loop cannot be found """ + def __init__(self, ufunc, dtypes): + super().__init__(ufunc) + self.dtypes = tuple(dtypes) + + def __str__(self): + return ( + f"ufunc {self.ufunc.__name__!r} did not contain a loop with signature " + f"matching types {_unpack_tuple(self.dtypes[:self.ufunc.nin])!r} " + f"-> {_unpack_tuple(self.dtypes[self.ufunc.nin:])!r}" + ) + + +@_display_as_base +class _UFuncBinaryResolutionError(_UFuncNoLoopError): + """ Thrown when a binary resolution fails """ + def __init__(self, ufunc, dtypes): + super().__init__(ufunc, dtypes) + assert len(self.dtypes) == 2 + + def __str__(self): + return ( + "ufunc {!r} cannot use operands with types {!r} and {!r}" + ).format( + self.ufunc.__name__, *self.dtypes + ) + + +@_display_as_base +class _UFuncCastingError(UFuncTypeError): + def __init__(self, ufunc, casting, from_, to): + super().__init__(ufunc) + self.casting = casting + self.from_ = from_ + self.to = to + + +@_display_as_base +class _UFuncInputCastingError(_UFuncCastingError): + """ Thrown when a ufunc input cannot be casted """ + def __init__(self, ufunc, casting, from_, to, i): + super().__init__(ufunc, casting, from_, to) + self.in_i = i + + def __str__(self): + # only show the number if more than one input exists + i_str = f"{self.in_i} " if self.ufunc.nin != 1 else "" + return ( + f"Cannot cast ufunc {self.ufunc.__name__!r} input {i_str}from " + f"{self.from_!r} to {self.to!r} with casting rule {self.casting!r}" + ) + + +@_display_as_base +class _UFuncOutputCastingError(_UFuncCastingError): + """ Thrown when a ufunc output cannot be casted """ + def __init__(self, ufunc, casting, from_, to, i): + super().__init__(ufunc, casting, from_, to) + self.out_i = i + + def __str__(self): + # only show the number if more than one output exists + i_str = f"{self.out_i} " if self.ufunc.nout != 1 else "" + return ( + f"Cannot cast ufunc {self.ufunc.__name__!r} output {i_str}from " + f"{self.from_!r} to {self.to!r} with casting rule {self.casting!r}" + ) + + +@_display_as_base +class _ArrayMemoryError(MemoryError): + """ Thrown when an array cannot be allocated""" + def __init__(self, shape, dtype): + self.shape = shape + self.dtype = dtype + + @property + def _total_size(self): + num_bytes = self.dtype.itemsize + for dim in self.shape: + num_bytes *= dim + return num_bytes + + @staticmethod + def _size_to_string(num_bytes): + """ Convert a number of bytes into a binary size string """ + + # https://en.wikipedia.org/wiki/Binary_prefix + LOG2_STEP = 10 + STEP = 1024 + units = ['bytes', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB'] + + unit_i = max(num_bytes.bit_length() - 1, 1) // LOG2_STEP + unit_val = 1 << (unit_i * LOG2_STEP) + n_units = num_bytes / unit_val + del unit_val + + # ensure we pick a unit that is correct after rounding + if round(n_units) == STEP: + unit_i += 1 + n_units /= STEP + + # deal with sizes so large that we don't have units for them + if unit_i >= len(units): + new_unit_i = len(units) - 1 + n_units *= 1 << ((unit_i - new_unit_i) * LOG2_STEP) + unit_i = new_unit_i + + unit_name = units[unit_i] + # format with a sensible number of digits + if unit_i == 0: + # no decimal point on bytes + return f'{n_units:.0f} {unit_name}' + elif round(n_units) < 1000: + # 3 significant figures, if none are dropped to the left of the . + return f'{n_units:#.3g} {unit_name}' + else: + # just give all the digits otherwise + return f'{n_units:#.0f} {unit_name}' + + def __str__(self): + size_str = self._size_to_string(self._total_size) + return (f"Unable to allocate {size_str} for an array with shape " + f"{self.shape} and data type {self.dtype}") diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/_exceptions.pyi b/.venv/lib/python3.12/site-packages/numpy/_core/_exceptions.pyi new file mode 100644 index 0000000..02637a1 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/_exceptions.pyi @@ -0,0 +1,55 @@ +from collections.abc import Iterable +from typing import Any, Final, TypeVar, overload + +import numpy as np +from numpy import _CastingKind +from numpy._utils import set_module as set_module + +### + +_T = TypeVar("_T") +_TupleT = TypeVar("_TupleT", bound=tuple[()] | tuple[Any, Any, *tuple[Any, ...]]) +_ExceptionT = TypeVar("_ExceptionT", bound=Exception) + +### + +class UFuncTypeError(TypeError): + ufunc: Final[np.ufunc] + def __init__(self, /, ufunc: np.ufunc) -> None: ... + +class _UFuncNoLoopError(UFuncTypeError): + dtypes: tuple[np.dtype, ...] + def __init__(self, /, ufunc: np.ufunc, dtypes: Iterable[np.dtype]) -> None: ... + +class _UFuncBinaryResolutionError(_UFuncNoLoopError): + dtypes: tuple[np.dtype, np.dtype] + def __init__(self, /, ufunc: np.ufunc, dtypes: Iterable[np.dtype]) -> None: ... + +class _UFuncCastingError(UFuncTypeError): + casting: Final[_CastingKind] + from_: Final[np.dtype] + to: Final[np.dtype] + def __init__(self, /, ufunc: np.ufunc, casting: _CastingKind, from_: np.dtype, to: np.dtype) -> None: ... + +class _UFuncInputCastingError(_UFuncCastingError): + in_i: Final[int] + def __init__(self, /, ufunc: np.ufunc, casting: _CastingKind, from_: np.dtype, to: np.dtype, i: int) -> None: ... + +class _UFuncOutputCastingError(_UFuncCastingError): + out_i: Final[int] + def __init__(self, /, ufunc: np.ufunc, casting: _CastingKind, from_: np.dtype, to: np.dtype, i: int) -> None: ... + +class _ArrayMemoryError(MemoryError): + shape: tuple[int, ...] + dtype: np.dtype + def __init__(self, /, shape: tuple[int, ...], dtype: np.dtype) -> None: ... + @property + def _total_size(self) -> int: ... + @staticmethod + def _size_to_string(num_bytes: int) -> str: ... + +@overload +def _unpack_tuple(tup: tuple[_T]) -> _T: ... +@overload +def _unpack_tuple(tup: _TupleT) -> _TupleT: ... +def _display_as_base(cls: type[_ExceptionT]) -> type[_ExceptionT]: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/_internal.py b/.venv/lib/python3.12/site-packages/numpy/_core/_internal.py new file mode 100644 index 0000000..e00e1b2 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/_internal.py @@ -0,0 +1,958 @@ +""" +A place for internal code + +Some things are more easily handled Python. + +""" +import ast +import math +import re +import sys +import warnings + +from numpy import _NoValue +from numpy.exceptions import DTypePromotionError + +from .multiarray import StringDType, array, dtype, promote_types + +try: + import ctypes +except ImportError: + ctypes = None + +IS_PYPY = sys.implementation.name == 'pypy' + +if sys.byteorder == 'little': + _nbo = '<' +else: + _nbo = '>' + +def _makenames_list(adict, align): + allfields = [] + + for fname, obj in adict.items(): + n = len(obj) + if not isinstance(obj, tuple) or n not in (2, 3): + raise ValueError("entry not a 2- or 3- tuple") + if n > 2 and obj[2] == fname: + continue + num = int(obj[1]) + if num < 0: + raise ValueError("invalid offset.") + format = dtype(obj[0], align=align) + if n > 2: + title = obj[2] + else: + title = None + allfields.append((fname, format, num, title)) + # sort by offsets + allfields.sort(key=lambda x: x[2]) + names = [x[0] for x in allfields] + formats = [x[1] for x in allfields] + offsets = [x[2] for x in allfields] + titles = [x[3] for x in allfields] + + return names, formats, offsets, titles + +# Called in PyArray_DescrConverter function when +# a dictionary without "names" and "formats" +# fields is used as a data-type descriptor. +def _usefields(adict, align): + try: + names = adict[-1] + except KeyError: + names = None + if names is None: + names, formats, offsets, titles = _makenames_list(adict, align) + else: + formats = [] + offsets = [] + titles = [] + for name in names: + res = adict[name] + formats.append(res[0]) + offsets.append(res[1]) + if len(res) > 2: + titles.append(res[2]) + else: + titles.append(None) + + return dtype({"names": names, + "formats": formats, + "offsets": offsets, + "titles": titles}, align) + + +# construct an array_protocol descriptor list +# from the fields attribute of a descriptor +# This calls itself recursively but should eventually hit +# a descriptor that has no fields and then return +# a simple typestring + +def _array_descr(descriptor): + fields = descriptor.fields + if fields is None: + subdtype = descriptor.subdtype + if subdtype is None: + if descriptor.metadata is None: + return descriptor.str + else: + new = descriptor.metadata.copy() + if new: + return (descriptor.str, new) + else: + return descriptor.str + else: + return (_array_descr(subdtype[0]), subdtype[1]) + + names = descriptor.names + ordered_fields = [fields[x] + (x,) for x in names] + result = [] + offset = 0 + for field in ordered_fields: + if field[1] > offset: + num = field[1] - offset + result.append(('', f'|V{num}')) + offset += num + elif field[1] < offset: + raise ValueError( + "dtype.descr is not defined for types with overlapping or " + "out-of-order fields") + if len(field) > 3: + name = (field[2], field[3]) + else: + name = field[2] + if field[0].subdtype: + tup = (name, _array_descr(field[0].subdtype[0]), + field[0].subdtype[1]) + else: + tup = (name, _array_descr(field[0])) + offset += field[0].itemsize + result.append(tup) + + if descriptor.itemsize > offset: + num = descriptor.itemsize - offset + result.append(('', f'|V{num}')) + + return result + + +# format_re was originally from numarray by J. Todd Miller + +format_re = re.compile(r'(?P[<>|=]?)' + r'(?P *[(]?[ ,0-9]*[)]? *)' + r'(?P[<>|=]?)' + r'(?P[A-Za-z0-9.?]*(?:\[[a-zA-Z0-9,.]+\])?)') +sep_re = re.compile(r'\s*,\s*') +space_re = re.compile(r'\s+$') + +# astr is a string (perhaps comma separated) + +_convorder = {'=': _nbo} + +def _commastring(astr): + startindex = 0 + result = [] + islist = False + while startindex < len(astr): + mo = format_re.match(astr, pos=startindex) + try: + (order1, repeats, order2, dtype) = mo.groups() + except (TypeError, AttributeError): + raise ValueError( + f'format number {len(result) + 1} of "{astr}" is not recognized' + ) from None + startindex = mo.end() + # Separator or ending padding + if startindex < len(astr): + if space_re.match(astr, pos=startindex): + startindex = len(astr) + else: + mo = sep_re.match(astr, pos=startindex) + if not mo: + raise ValueError( + 'format number %d of "%s" is not recognized' % + (len(result) + 1, astr)) + startindex = mo.end() + islist = True + + if order2 == '': + order = order1 + elif order1 == '': + order = order2 + else: + order1 = _convorder.get(order1, order1) + order2 = _convorder.get(order2, order2) + if (order1 != order2): + raise ValueError( + f'inconsistent byte-order specification {order1} and {order2}') + order = order1 + + if order in ('|', '=', _nbo): + order = '' + dtype = order + dtype + if repeats == '': + newitem = dtype + else: + if (repeats[0] == "(" and repeats[-1] == ")" + and repeats[1:-1].strip() != "" + and "," not in repeats): + warnings.warn( + 'Passing in a parenthesized single number for repeats ' + 'is deprecated; pass either a single number or indicate ' + 'a tuple with a comma, like "(2,)".', DeprecationWarning, + stacklevel=2) + newitem = (dtype, ast.literal_eval(repeats)) + + result.append(newitem) + + return result if islist else result[0] + +class dummy_ctype: + + def __init__(self, cls): + self._cls = cls + + def __mul__(self, other): + return self + + def __call__(self, *other): + return self._cls(other) + + def __eq__(self, other): + return self._cls == other._cls + + def __ne__(self, other): + return self._cls != other._cls + +def _getintp_ctype(): + val = _getintp_ctype.cache + if val is not None: + return val + if ctypes is None: + import numpy as np + val = dummy_ctype(np.intp) + else: + char = dtype('n').char + if char == 'i': + val = ctypes.c_int + elif char == 'l': + val = ctypes.c_long + elif char == 'q': + val = ctypes.c_longlong + else: + val = ctypes.c_long + _getintp_ctype.cache = val + return val + + +_getintp_ctype.cache = None + +# Used for .ctypes attribute of ndarray + +class _missing_ctypes: + def cast(self, num, obj): + return num.value + + class c_void_p: + def __init__(self, ptr): + self.value = ptr + + +class _ctypes: + def __init__(self, array, ptr=None): + self._arr = array + + if ctypes: + self._ctypes = ctypes + self._data = self._ctypes.c_void_p(ptr) + else: + # fake a pointer-like object that holds onto the reference + self._ctypes = _missing_ctypes() + self._data = self._ctypes.c_void_p(ptr) + self._data._objects = array + + if self._arr.ndim == 0: + self._zerod = True + else: + self._zerod = False + + def data_as(self, obj): + """ + Return the data pointer cast to a particular c-types object. + For example, calling ``self._as_parameter_`` is equivalent to + ``self.data_as(ctypes.c_void_p)``. Perhaps you want to use + the data as a pointer to a ctypes array of floating-point data: + ``self.data_as(ctypes.POINTER(ctypes.c_double))``. + + The returned pointer will keep a reference to the array. + """ + # _ctypes.cast function causes a circular reference of self._data in + # self._data._objects. Attributes of self._data cannot be released + # until gc.collect is called. Make a copy of the pointer first then + # let it hold the array reference. This is a workaround to circumvent + # the CPython bug https://bugs.python.org/issue12836. + ptr = self._ctypes.cast(self._data, obj) + ptr._arr = self._arr + return ptr + + def shape_as(self, obj): + """ + Return the shape tuple as an array of some other c-types + type. For example: ``self.shape_as(ctypes.c_short)``. + """ + if self._zerod: + return None + return (obj * self._arr.ndim)(*self._arr.shape) + + def strides_as(self, obj): + """ + Return the strides tuple as an array of some other + c-types type. For example: ``self.strides_as(ctypes.c_longlong)``. + """ + if self._zerod: + return None + return (obj * self._arr.ndim)(*self._arr.strides) + + @property + def data(self): + """ + A pointer to the memory area of the array as a Python integer. + This memory area may contain data that is not aligned, or not in + correct byte-order. The memory area may not even be writeable. + The array flags and data-type of this array should be respected + when passing this attribute to arbitrary C-code to avoid trouble + that can include Python crashing. User Beware! The value of this + attribute is exactly the same as: + ``self._array_interface_['data'][0]``. + + Note that unlike ``data_as``, a reference won't be kept to the array: + code like ``ctypes.c_void_p((a + b).ctypes.data)`` will result in a + pointer to a deallocated array, and should be spelt + ``(a + b).ctypes.data_as(ctypes.c_void_p)`` + """ + return self._data.value + + @property + def shape(self): + """ + (c_intp*self.ndim): A ctypes array of length self.ndim where + the basetype is the C-integer corresponding to ``dtype('p')`` on this + platform (see `~numpy.ctypeslib.c_intp`). This base-type could be + `ctypes.c_int`, `ctypes.c_long`, or `ctypes.c_longlong` depending on + the platform. The ctypes array contains the shape of + the underlying array. + """ + return self.shape_as(_getintp_ctype()) + + @property + def strides(self): + """ + (c_intp*self.ndim): A ctypes array of length self.ndim where + the basetype is the same as for the shape attribute. This ctypes + array contains the strides information from the underlying array. + This strides information is important for showing how many bytes + must be jumped to get to the next element in the array. + """ + return self.strides_as(_getintp_ctype()) + + @property + def _as_parameter_(self): + """ + Overrides the ctypes semi-magic method + + Enables `c_func(some_array.ctypes)` + """ + return self.data_as(ctypes.c_void_p) + + # Numpy 1.21.0, 2021-05-18 + + def get_data(self): + """Deprecated getter for the `_ctypes.data` property. + + .. deprecated:: 1.21 + """ + warnings.warn('"get_data" is deprecated. Use "data" instead', + DeprecationWarning, stacklevel=2) + return self.data + + def get_shape(self): + """Deprecated getter for the `_ctypes.shape` property. + + .. deprecated:: 1.21 + """ + warnings.warn('"get_shape" is deprecated. Use "shape" instead', + DeprecationWarning, stacklevel=2) + return self.shape + + def get_strides(self): + """Deprecated getter for the `_ctypes.strides` property. + + .. deprecated:: 1.21 + """ + warnings.warn('"get_strides" is deprecated. Use "strides" instead', + DeprecationWarning, stacklevel=2) + return self.strides + + def get_as_parameter(self): + """Deprecated getter for the `_ctypes._as_parameter_` property. + + .. deprecated:: 1.21 + """ + warnings.warn( + '"get_as_parameter" is deprecated. Use "_as_parameter_" instead', + DeprecationWarning, stacklevel=2, + ) + return self._as_parameter_ + + +def _newnames(datatype, order): + """ + Given a datatype and an order object, return a new names tuple, with the + order indicated + """ + oldnames = datatype.names + nameslist = list(oldnames) + if isinstance(order, str): + order = [order] + seen = set() + if isinstance(order, (list, tuple)): + for name in order: + try: + nameslist.remove(name) + except ValueError: + if name in seen: + raise ValueError(f"duplicate field name: {name}") from None + else: + raise ValueError(f"unknown field name: {name}") from None + seen.add(name) + return tuple(list(order) + nameslist) + raise ValueError(f"unsupported order value: {order}") + +def _copy_fields(ary): + """Return copy of structured array with padding between fields removed. + + Parameters + ---------- + ary : ndarray + Structured array from which to remove padding bytes + + Returns + ------- + ary_copy : ndarray + Copy of ary with padding bytes removed + """ + dt = ary.dtype + copy_dtype = {'names': dt.names, + 'formats': [dt.fields[name][0] for name in dt.names]} + return array(ary, dtype=copy_dtype, copy=True) + +def _promote_fields(dt1, dt2): + """ Perform type promotion for two structured dtypes. + + Parameters + ---------- + dt1 : structured dtype + First dtype. + dt2 : structured dtype + Second dtype. + + Returns + ------- + out : dtype + The promoted dtype + + Notes + ----- + If one of the inputs is aligned, the result will be. The titles of + both descriptors must match (point to the same field). + """ + # Both must be structured and have the same names in the same order + if (dt1.names is None or dt2.names is None) or dt1.names != dt2.names: + raise DTypePromotionError( + f"field names `{dt1.names}` and `{dt2.names}` mismatch.") + + # if both are identical, we can (maybe!) just return the same dtype. + identical = dt1 is dt2 + new_fields = [] + for name in dt1.names: + field1 = dt1.fields[name] + field2 = dt2.fields[name] + new_descr = promote_types(field1[0], field2[0]) + identical = identical and new_descr is field1[0] + + # Check that the titles match (if given): + if field1[2:] != field2[2:]: + raise DTypePromotionError( + f"field titles of field '{name}' mismatch") + if len(field1) == 2: + new_fields.append((name, new_descr)) + else: + new_fields.append(((field1[2], name), new_descr)) + + res = dtype(new_fields, align=dt1.isalignedstruct or dt2.isalignedstruct) + + # Might as well preserve identity (and metadata) if the dtype is identical + # and the itemsize, offsets are also unmodified. This could probably be + # sped up, but also probably just be removed entirely. + if identical and res.itemsize == dt1.itemsize: + for name in dt1.names: + if dt1.fields[name][1] != res.fields[name][1]: + return res # the dtype changed. + return dt1 + + return res + + +def _getfield_is_safe(oldtype, newtype, offset): + """ Checks safety of getfield for object arrays. + + As in _view_is_safe, we need to check that memory containing objects is not + reinterpreted as a non-object datatype and vice versa. + + Parameters + ---------- + oldtype : data-type + Data type of the original ndarray. + newtype : data-type + Data type of the field being accessed by ndarray.getfield + offset : int + Offset of the field being accessed by ndarray.getfield + + Raises + ------ + TypeError + If the field access is invalid + + """ + if newtype.hasobject or oldtype.hasobject: + if offset == 0 and newtype == oldtype: + return + if oldtype.names is not None: + for name in oldtype.names: + if (oldtype.fields[name][1] == offset and + oldtype.fields[name][0] == newtype): + return + raise TypeError("Cannot get/set field of an object array") + return + +def _view_is_safe(oldtype, newtype): + """ Checks safety of a view involving object arrays, for example when + doing:: + + np.zeros(10, dtype=oldtype).view(newtype) + + Parameters + ---------- + oldtype : data-type + Data type of original ndarray + newtype : data-type + Data type of the view + + Raises + ------ + TypeError + If the new type is incompatible with the old type. + + """ + + # if the types are equivalent, there is no problem. + # for example: dtype((np.record, 'i4,i4')) == dtype((np.void, 'i4,i4')) + if oldtype == newtype: + return + + if newtype.hasobject or oldtype.hasobject: + raise TypeError("Cannot change data-type for array of references.") + return + + +# Given a string containing a PEP 3118 format specifier, +# construct a NumPy dtype + +_pep3118_native_map = { + '?': '?', + 'c': 'S1', + 'b': 'b', + 'B': 'B', + 'h': 'h', + 'H': 'H', + 'i': 'i', + 'I': 'I', + 'l': 'l', + 'L': 'L', + 'q': 'q', + 'Q': 'Q', + 'e': 'e', + 'f': 'f', + 'd': 'd', + 'g': 'g', + 'Zf': 'F', + 'Zd': 'D', + 'Zg': 'G', + 's': 'S', + 'w': 'U', + 'O': 'O', + 'x': 'V', # padding +} +_pep3118_native_typechars = ''.join(_pep3118_native_map.keys()) + +_pep3118_standard_map = { + '?': '?', + 'c': 'S1', + 'b': 'b', + 'B': 'B', + 'h': 'i2', + 'H': 'u2', + 'i': 'i4', + 'I': 'u4', + 'l': 'i4', + 'L': 'u4', + 'q': 'i8', + 'Q': 'u8', + 'e': 'f2', + 'f': 'f', + 'd': 'd', + 'Zf': 'F', + 'Zd': 'D', + 's': 'S', + 'w': 'U', + 'O': 'O', + 'x': 'V', # padding +} +_pep3118_standard_typechars = ''.join(_pep3118_standard_map.keys()) + +_pep3118_unsupported_map = { + 'u': 'UCS-2 strings', + '&': 'pointers', + 't': 'bitfields', + 'X': 'function pointers', +} + +class _Stream: + def __init__(self, s): + self.s = s + self.byteorder = '@' + + def advance(self, n): + res = self.s[:n] + self.s = self.s[n:] + return res + + def consume(self, c): + if self.s[:len(c)] == c: + self.advance(len(c)) + return True + return False + + def consume_until(self, c): + if callable(c): + i = 0 + while i < len(self.s) and not c(self.s[i]): + i = i + 1 + return self.advance(i) + else: + i = self.s.index(c) + res = self.advance(i) + self.advance(len(c)) + return res + + @property + def next(self): + return self.s[0] + + def __bool__(self): + return bool(self.s) + + +def _dtype_from_pep3118(spec): + stream = _Stream(spec) + dtype, align = __dtype_from_pep3118(stream, is_subdtype=False) + return dtype + +def __dtype_from_pep3118(stream, is_subdtype): + field_spec = { + 'names': [], + 'formats': [], + 'offsets': [], + 'itemsize': 0 + } + offset = 0 + common_alignment = 1 + is_padding = False + + # Parse spec + while stream: + value = None + + # End of structure, bail out to upper level + if stream.consume('}'): + break + + # Sub-arrays (1) + shape = None + if stream.consume('('): + shape = stream.consume_until(')') + shape = tuple(map(int, shape.split(','))) + + # Byte order + if stream.next in ('@', '=', '<', '>', '^', '!'): + byteorder = stream.advance(1) + if byteorder == '!': + byteorder = '>' + stream.byteorder = byteorder + + # Byte order characters also control native vs. standard type sizes + if stream.byteorder in ('@', '^'): + type_map = _pep3118_native_map + type_map_chars = _pep3118_native_typechars + else: + type_map = _pep3118_standard_map + type_map_chars = _pep3118_standard_typechars + + # Item sizes + itemsize_str = stream.consume_until(lambda c: not c.isdigit()) + if itemsize_str: + itemsize = int(itemsize_str) + else: + itemsize = 1 + + # Data types + is_padding = False + + if stream.consume('T{'): + value, align = __dtype_from_pep3118( + stream, is_subdtype=True) + elif stream.next in type_map_chars: + if stream.next == 'Z': + typechar = stream.advance(2) + else: + typechar = stream.advance(1) + + is_padding = (typechar == 'x') + dtypechar = type_map[typechar] + if dtypechar in 'USV': + dtypechar += '%d' % itemsize + itemsize = 1 + numpy_byteorder = {'@': '=', '^': '='}.get( + stream.byteorder, stream.byteorder) + value = dtype(numpy_byteorder + dtypechar) + align = value.alignment + elif stream.next in _pep3118_unsupported_map: + desc = _pep3118_unsupported_map[stream.next] + raise NotImplementedError( + f"Unrepresentable PEP 3118 data type {stream.next!r} ({desc})") + else: + raise ValueError( + f"Unknown PEP 3118 data type specifier {stream.s!r}" + ) + + # + # Native alignment may require padding + # + # Here we assume that the presence of a '@' character implicitly + # implies that the start of the array is *already* aligned. + # + extra_offset = 0 + if stream.byteorder == '@': + start_padding = (-offset) % align + intra_padding = (-value.itemsize) % align + + offset += start_padding + + if intra_padding != 0: + if itemsize > 1 or (shape is not None and _prod(shape) > 1): + # Inject internal padding to the end of the sub-item + value = _add_trailing_padding(value, intra_padding) + else: + # We can postpone the injection of internal padding, + # as the item appears at most once + extra_offset += intra_padding + + # Update common alignment + common_alignment = _lcm(align, common_alignment) + + # Convert itemsize to sub-array + if itemsize != 1: + value = dtype((value, (itemsize,))) + + # Sub-arrays (2) + if shape is not None: + value = dtype((value, shape)) + + # Field name + if stream.consume(':'): + name = stream.consume_until(':') + else: + name = None + + if not (is_padding and name is None): + if name is not None and name in field_spec['names']: + raise RuntimeError( + f"Duplicate field name '{name}' in PEP3118 format" + ) + field_spec['names'].append(name) + field_spec['formats'].append(value) + field_spec['offsets'].append(offset) + + offset += value.itemsize + offset += extra_offset + + field_spec['itemsize'] = offset + + # extra final padding for aligned types + if stream.byteorder == '@': + field_spec['itemsize'] += (-offset) % common_alignment + + # Check if this was a simple 1-item type, and unwrap it + if (field_spec['names'] == [None] + and field_spec['offsets'][0] == 0 + and field_spec['itemsize'] == field_spec['formats'][0].itemsize + and not is_subdtype): + ret = field_spec['formats'][0] + else: + _fix_names(field_spec) + ret = dtype(field_spec) + + # Finished + return ret, common_alignment + +def _fix_names(field_spec): + """ Replace names which are None with the next unused f%d name """ + names = field_spec['names'] + for i, name in enumerate(names): + if name is not None: + continue + + j = 0 + while True: + name = f'f{j}' + if name not in names: + break + j = j + 1 + names[i] = name + +def _add_trailing_padding(value, padding): + """Inject the specified number of padding bytes at the end of a dtype""" + if value.fields is None: + field_spec = { + 'names': ['f0'], + 'formats': [value], + 'offsets': [0], + 'itemsize': value.itemsize + } + else: + fields = value.fields + names = value.names + field_spec = { + 'names': names, + 'formats': [fields[name][0] for name in names], + 'offsets': [fields[name][1] for name in names], + 'itemsize': value.itemsize + } + + field_spec['itemsize'] += padding + return dtype(field_spec) + +def _prod(a): + p = 1 + for x in a: + p *= x + return p + +def _gcd(a, b): + """Calculate the greatest common divisor of a and b""" + if not (math.isfinite(a) and math.isfinite(b)): + raise ValueError('Can only find greatest common divisor of ' + f'finite arguments, found "{a}" and "{b}"') + while b: + a, b = b, a % b + return a + +def _lcm(a, b): + return a // _gcd(a, b) * b + +def array_ufunc_errmsg_formatter(dummy, ufunc, method, *inputs, **kwargs): + """ Format the error message for when __array_ufunc__ gives up. """ + args_string = ', '.join([f'{arg!r}' for arg in inputs] + + [f'{k}={v!r}' + for k, v in kwargs.items()]) + args = inputs + kwargs.get('out', ()) + types_string = ', '.join(repr(type(arg).__name__) for arg in args) + return ('operand type(s) all returned NotImplemented from ' + f'__array_ufunc__({ufunc!r}, {method!r}, {args_string}): {types_string}' + ) + + +def array_function_errmsg_formatter(public_api, types): + """ Format the error message for when __array_ufunc__ gives up. """ + func_name = f'{public_api.__module__}.{public_api.__name__}' + return (f"no implementation found for '{func_name}' on types that implement " + f'__array_function__: {list(types)}') + + +def _ufunc_doc_signature_formatter(ufunc): + """ + Builds a signature string which resembles PEP 457 + + This is used to construct the first line of the docstring + """ + + # input arguments are simple + if ufunc.nin == 1: + in_args = 'x' + else: + in_args = ', '.join(f'x{i + 1}' for i in range(ufunc.nin)) + + # output arguments are both keyword or positional + if ufunc.nout == 0: + out_args = ', /, out=()' + elif ufunc.nout == 1: + out_args = ', /, out=None' + else: + out_args = '[, {positional}], / [, out={default}]'.format( + positional=', '.join( + f'out{i + 1}' for i in range(ufunc.nout)), + default=repr((None,) * ufunc.nout) + ) + + # keyword only args depend on whether this is a gufunc + kwargs = ( + ", casting='same_kind'" + ", order='K'" + ", dtype=None" + ", subok=True" + ) + + # NOTE: gufuncs may or may not support the `axis` parameter + if ufunc.signature is None: + kwargs = f", where=True{kwargs}[, signature]" + else: + kwargs += "[, signature, axes, axis]" + + # join all the parts together + return f'{ufunc.__name__}({in_args}{out_args}, *{kwargs})' + + +def npy_ctypes_check(cls): + # determine if a class comes from ctypes, in order to work around + # a bug in the buffer protocol for those objects, bpo-10746 + try: + # ctypes class are new-style, so have an __mro__. This probably fails + # for ctypes classes with multiple inheritance. + if IS_PYPY: + # (..., _ctypes.basics._CData, Bufferable, object) + ctype_base = cls.__mro__[-3] + else: + # # (..., _ctypes._CData, object) + ctype_base = cls.__mro__[-2] + # right now, they're part of the _ctypes module + return '_ctypes' in ctype_base.__module__ + except Exception: + return False + +# used to handle the _NoValue default argument for na_object +# in the C implementation of the __reduce__ method for stringdtype +def _convert_to_stringdtype_kwargs(coerce, na_object=_NoValue): + if na_object is _NoValue: + return StringDType(coerce=coerce) + return StringDType(coerce=coerce, na_object=na_object) diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/_internal.pyi b/.venv/lib/python3.12/site-packages/numpy/_core/_internal.pyi new file mode 100644 index 0000000..3038297 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/_internal.pyi @@ -0,0 +1,72 @@ +import ctypes as ct +import re +from collections.abc import Callable, Iterable +from typing import Any, Final, Generic, Self, overload + +from typing_extensions import TypeVar, deprecated + +import numpy as np +import numpy.typing as npt +from numpy.ctypeslib import c_intp + +_CastT = TypeVar("_CastT", bound=ct._CanCastTo) +_T_co = TypeVar("_T_co", covariant=True) +_CT = TypeVar("_CT", bound=ct._CData) +_PT_co = TypeVar("_PT_co", bound=int | None, default=None, covariant=True) + +### + +IS_PYPY: Final[bool] = ... + +format_re: Final[re.Pattern[str]] = ... +sep_re: Final[re.Pattern[str]] = ... +space_re: Final[re.Pattern[str]] = ... + +### + +# TODO: Let the likes of `shape_as` and `strides_as` return `None` +# for 0D arrays once we've got shape-support + +class _ctypes(Generic[_PT_co]): + @overload + def __init__(self: _ctypes[None], /, array: npt.NDArray[Any], ptr: None = None) -> None: ... + @overload + def __init__(self, /, array: npt.NDArray[Any], ptr: _PT_co) -> None: ... + + # + @property + def data(self) -> _PT_co: ... + @property + def shape(self) -> ct.Array[c_intp]: ... + @property + def strides(self) -> ct.Array[c_intp]: ... + @property + def _as_parameter_(self) -> ct.c_void_p: ... + + # + def data_as(self, /, obj: type[_CastT]) -> _CastT: ... + def shape_as(self, /, obj: type[_CT]) -> ct.Array[_CT]: ... + def strides_as(self, /, obj: type[_CT]) -> ct.Array[_CT]: ... + + # + @deprecated('"get_data" is deprecated. Use "data" instead') + def get_data(self, /) -> _PT_co: ... + @deprecated('"get_shape" is deprecated. Use "shape" instead') + def get_shape(self, /) -> ct.Array[c_intp]: ... + @deprecated('"get_strides" is deprecated. Use "strides" instead') + def get_strides(self, /) -> ct.Array[c_intp]: ... + @deprecated('"get_as_parameter" is deprecated. Use "_as_parameter_" instead') + def get_as_parameter(self, /) -> ct.c_void_p: ... + +class dummy_ctype(Generic[_T_co]): + _cls: type[_T_co] + + def __init__(self, /, cls: type[_T_co]) -> None: ... + def __eq__(self, other: Self, /) -> bool: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + def __ne__(self, other: Self, /) -> bool: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + def __mul__(self, other: object, /) -> Self: ... + def __call__(self, /, *other: object) -> _T_co: ... + +def array_ufunc_errmsg_formatter(dummy: object, ufunc: np.ufunc, method: str, *inputs: object, **kwargs: object) -> str: ... +def array_function_errmsg_formatter(public_api: Callable[..., object], types: Iterable[str]) -> str: ... +def npy_ctypes_check(cls: type) -> bool: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/_machar.py b/.venv/lib/python3.12/site-packages/numpy/_core/_machar.py new file mode 100644 index 0000000..b49742a --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/_machar.py @@ -0,0 +1,355 @@ +""" +Machine arithmetic - determine the parameters of the +floating-point arithmetic system + +Author: Pearu Peterson, September 2003 + +""" +__all__ = ['MachAr'] + +from ._ufunc_config import errstate +from .fromnumeric import any + +# Need to speed this up...especially for longdouble + +# Deprecated 2021-10-20, NumPy 1.22 +class MachAr: + """ + Diagnosing machine parameters. + + Attributes + ---------- + ibeta : int + Radix in which numbers are represented. + it : int + Number of base-`ibeta` digits in the floating point mantissa M. + machep : int + Exponent of the smallest (most negative) power of `ibeta` that, + added to 1.0, gives something different from 1.0 + eps : float + Floating-point number ``beta**machep`` (floating point precision) + negep : int + Exponent of the smallest power of `ibeta` that, subtracted + from 1.0, gives something different from 1.0. + epsneg : float + Floating-point number ``beta**negep``. + iexp : int + Number of bits in the exponent (including its sign and bias). + minexp : int + Smallest (most negative) power of `ibeta` consistent with there + being no leading zeros in the mantissa. + xmin : float + Floating-point number ``beta**minexp`` (the smallest [in + magnitude] positive floating point number with full precision). + maxexp : int + Smallest (positive) power of `ibeta` that causes overflow. + xmax : float + ``(1-epsneg) * beta**maxexp`` (the largest [in magnitude] + usable floating value). + irnd : int + In ``range(6)``, information on what kind of rounding is done + in addition, and on how underflow is handled. + ngrd : int + Number of 'guard digits' used when truncating the product + of two mantissas to fit the representation. + epsilon : float + Same as `eps`. + tiny : float + An alias for `smallest_normal`, kept for backwards compatibility. + huge : float + Same as `xmax`. + precision : float + ``- int(-log10(eps))`` + resolution : float + ``- 10**(-precision)`` + smallest_normal : float + The smallest positive floating point number with 1 as leading bit in + the mantissa following IEEE-754. Same as `xmin`. + smallest_subnormal : float + The smallest positive floating point number with 0 as leading bit in + the mantissa following IEEE-754. + + Parameters + ---------- + float_conv : function, optional + Function that converts an integer or integer array to a float + or float array. Default is `float`. + int_conv : function, optional + Function that converts a float or float array to an integer or + integer array. Default is `int`. + float_to_float : function, optional + Function that converts a float array to float. Default is `float`. + Note that this does not seem to do anything useful in the current + implementation. + float_to_str : function, optional + Function that converts a single float to a string. Default is + ``lambda v:'%24.16e' %v``. + title : str, optional + Title that is printed in the string representation of `MachAr`. + + See Also + -------- + finfo : Machine limits for floating point types. + iinfo : Machine limits for integer types. + + References + ---------- + .. [1] Press, Teukolsky, Vetterling and Flannery, + "Numerical Recipes in C++," 2nd ed, + Cambridge University Press, 2002, p. 31. + + """ + + def __init__(self, float_conv=float, int_conv=int, + float_to_float=float, + float_to_str=lambda v: f'{v:24.16e}', + title='Python floating point number'): + """ + + float_conv - convert integer to float (array) + int_conv - convert float (array) to integer + float_to_float - convert float array to float + float_to_str - convert array float to str + title - description of used floating point numbers + + """ + # We ignore all errors here because we are purposely triggering + # underflow to detect the properties of the running arch. + with errstate(under='ignore'): + self._do_init(float_conv, int_conv, float_to_float, float_to_str, title) + + def _do_init(self, float_conv, int_conv, float_to_float, float_to_str, title): + max_iterN = 10000 + msg = "Did not converge after %d tries with %s" + one = float_conv(1) + two = one + one + zero = one - one + + # Do we really need to do this? Aren't they 2 and 2.0? + # Determine ibeta and beta + a = one + for _ in range(max_iterN): + a = a + a + temp = a + one + temp1 = temp - a + if any(temp1 - one != zero): + break + else: + raise RuntimeError(msg % (_, one.dtype)) + b = one + for _ in range(max_iterN): + b = b + b + temp = a + b + itemp = int_conv(temp - a) + if any(itemp != 0): + break + else: + raise RuntimeError(msg % (_, one.dtype)) + ibeta = itemp + beta = float_conv(ibeta) + + # Determine it and irnd + it = -1 + b = one + for _ in range(max_iterN): + it = it + 1 + b = b * beta + temp = b + one + temp1 = temp - b + if any(temp1 - one != zero): + break + else: + raise RuntimeError(msg % (_, one.dtype)) + + betah = beta / two + a = one + for _ in range(max_iterN): + a = a + a + temp = a + one + temp1 = temp - a + if any(temp1 - one != zero): + break + else: + raise RuntimeError(msg % (_, one.dtype)) + temp = a + betah + irnd = 0 + if any(temp - a != zero): + irnd = 1 + tempa = a + beta + temp = tempa + betah + if irnd == 0 and any(temp - tempa != zero): + irnd = 2 + + # Determine negep and epsneg + negep = it + 3 + betain = one / beta + a = one + for i in range(negep): + a = a * betain + b = a + for _ in range(max_iterN): + temp = one - a + if any(temp - one != zero): + break + a = a * beta + negep = negep - 1 + # Prevent infinite loop on PPC with gcc 4.0: + if negep < 0: + raise RuntimeError("could not determine machine tolerance " + "for 'negep', locals() -> %s" % (locals())) + else: + raise RuntimeError(msg % (_, one.dtype)) + negep = -negep + epsneg = a + + # Determine machep and eps + machep = - it - 3 + a = b + + for _ in range(max_iterN): + temp = one + a + if any(temp - one != zero): + break + a = a * beta + machep = machep + 1 + else: + raise RuntimeError(msg % (_, one.dtype)) + eps = a + + # Determine ngrd + ngrd = 0 + temp = one + eps + if irnd == 0 and any(temp * one - one != zero): + ngrd = 1 + + # Determine iexp + i = 0 + k = 1 + z = betain + t = one + eps + nxres = 0 + for _ in range(max_iterN): + y = z + z = y * y + a = z * one # Check here for underflow + temp = z * t + if any(a + a == zero) or any(abs(z) >= y): + break + temp1 = temp * betain + if any(temp1 * beta == z): + break + i = i + 1 + k = k + k + else: + raise RuntimeError(msg % (_, one.dtype)) + if ibeta != 10: + iexp = i + 1 + mx = k + k + else: + iexp = 2 + iz = ibeta + while k >= iz: + iz = iz * ibeta + iexp = iexp + 1 + mx = iz + iz - 1 + + # Determine minexp and xmin + for _ in range(max_iterN): + xmin = y + y = y * betain + a = y * one + temp = y * t + if any((a + a) != zero) and any(abs(y) < xmin): + k = k + 1 + temp1 = temp * betain + if any(temp1 * beta == y) and any(temp != y): + nxres = 3 + xmin = y + break + else: + break + else: + raise RuntimeError(msg % (_, one.dtype)) + minexp = -k + + # Determine maxexp, xmax + if mx <= k + k - 3 and ibeta != 10: + mx = mx + mx + iexp = iexp + 1 + maxexp = mx + minexp + irnd = irnd + nxres + if irnd >= 2: + maxexp = maxexp - 2 + i = maxexp + minexp + if ibeta == 2 and not i: + maxexp = maxexp - 1 + if i > 20: + maxexp = maxexp - 1 + if any(a != y): + maxexp = maxexp - 2 + xmax = one - epsneg + if any(xmax * one != xmax): + xmax = one - beta * epsneg + xmax = xmax / (xmin * beta * beta * beta) + i = maxexp + minexp + 3 + for j in range(i): + if ibeta == 2: + xmax = xmax + xmax + else: + xmax = xmax * beta + + smallest_subnormal = abs(xmin / beta ** (it)) + + self.ibeta = ibeta + self.it = it + self.negep = negep + self.epsneg = float_to_float(epsneg) + self._str_epsneg = float_to_str(epsneg) + self.machep = machep + self.eps = float_to_float(eps) + self._str_eps = float_to_str(eps) + self.ngrd = ngrd + self.iexp = iexp + self.minexp = minexp + self.xmin = float_to_float(xmin) + self._str_xmin = float_to_str(xmin) + self.maxexp = maxexp + self.xmax = float_to_float(xmax) + self._str_xmax = float_to_str(xmax) + self.irnd = irnd + + self.title = title + # Commonly used parameters + self.epsilon = self.eps + self.tiny = self.xmin + self.huge = self.xmax + self.smallest_normal = self.xmin + self._str_smallest_normal = float_to_str(self.xmin) + self.smallest_subnormal = float_to_float(smallest_subnormal) + self._str_smallest_subnormal = float_to_str(smallest_subnormal) + + import math + self.precision = int(-math.log10(float_to_float(self.eps))) + ten = two + two + two + two + two + resolution = ten ** (-self.precision) + self.resolution = float_to_float(resolution) + self._str_resolution = float_to_str(resolution) + + def __str__(self): + fmt = ( + 'Machine parameters for %(title)s\n' + '---------------------------------------------------------------------\n' + 'ibeta=%(ibeta)s it=%(it)s iexp=%(iexp)s ngrd=%(ngrd)s irnd=%(irnd)s\n' + 'machep=%(machep)s eps=%(_str_eps)s (beta**machep == epsilon)\n' + 'negep =%(negep)s epsneg=%(_str_epsneg)s (beta**epsneg)\n' + 'minexp=%(minexp)s xmin=%(_str_xmin)s (beta**minexp == tiny)\n' + 'maxexp=%(maxexp)s xmax=%(_str_xmax)s ((1-epsneg)*beta**maxexp == huge)\n' + 'smallest_normal=%(smallest_normal)s ' + 'smallest_subnormal=%(smallest_subnormal)s\n' + '---------------------------------------------------------------------\n' + ) + return fmt % self.__dict__ + + +if __name__ == '__main__': + print(MachAr()) diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/_machar.pyi b/.venv/lib/python3.12/site-packages/numpy/_core/_machar.pyi new file mode 100644 index 0000000..02637a1 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/_machar.pyi @@ -0,0 +1,55 @@ +from collections.abc import Iterable +from typing import Any, Final, TypeVar, overload + +import numpy as np +from numpy import _CastingKind +from numpy._utils import set_module as set_module + +### + +_T = TypeVar("_T") +_TupleT = TypeVar("_TupleT", bound=tuple[()] | tuple[Any, Any, *tuple[Any, ...]]) +_ExceptionT = TypeVar("_ExceptionT", bound=Exception) + +### + +class UFuncTypeError(TypeError): + ufunc: Final[np.ufunc] + def __init__(self, /, ufunc: np.ufunc) -> None: ... + +class _UFuncNoLoopError(UFuncTypeError): + dtypes: tuple[np.dtype, ...] + def __init__(self, /, ufunc: np.ufunc, dtypes: Iterable[np.dtype]) -> None: ... + +class _UFuncBinaryResolutionError(_UFuncNoLoopError): + dtypes: tuple[np.dtype, np.dtype] + def __init__(self, /, ufunc: np.ufunc, dtypes: Iterable[np.dtype]) -> None: ... + +class _UFuncCastingError(UFuncTypeError): + casting: Final[_CastingKind] + from_: Final[np.dtype] + to: Final[np.dtype] + def __init__(self, /, ufunc: np.ufunc, casting: _CastingKind, from_: np.dtype, to: np.dtype) -> None: ... + +class _UFuncInputCastingError(_UFuncCastingError): + in_i: Final[int] + def __init__(self, /, ufunc: np.ufunc, casting: _CastingKind, from_: np.dtype, to: np.dtype, i: int) -> None: ... + +class _UFuncOutputCastingError(_UFuncCastingError): + out_i: Final[int] + def __init__(self, /, ufunc: np.ufunc, casting: _CastingKind, from_: np.dtype, to: np.dtype, i: int) -> None: ... + +class _ArrayMemoryError(MemoryError): + shape: tuple[int, ...] + dtype: np.dtype + def __init__(self, /, shape: tuple[int, ...], dtype: np.dtype) -> None: ... + @property + def _total_size(self) -> int: ... + @staticmethod + def _size_to_string(num_bytes: int) -> str: ... + +@overload +def _unpack_tuple(tup: tuple[_T]) -> _T: ... +@overload +def _unpack_tuple(tup: _TupleT) -> _TupleT: ... +def _display_as_base(cls: type[_ExceptionT]) -> type[_ExceptionT]: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/_methods.py b/.venv/lib/python3.12/site-packages/numpy/_core/_methods.py new file mode 100644 index 0000000..21ad790 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/_methods.py @@ -0,0 +1,255 @@ +""" +Array methods which are called by both the C-code for the method +and the Python code for the NumPy-namespace function + +""" +import os +import pickle +import warnings +from contextlib import nullcontext + +import numpy as np +from numpy._core import multiarray as mu +from numpy._core import numerictypes as nt +from numpy._core import umath as um +from numpy._core.multiarray import asanyarray +from numpy._globals import _NoValue + +# save those O(100) nanoseconds! +bool_dt = mu.dtype("bool") +umr_maximum = um.maximum.reduce +umr_minimum = um.minimum.reduce +umr_sum = um.add.reduce +umr_prod = um.multiply.reduce +umr_bitwise_count = um.bitwise_count +umr_any = um.logical_or.reduce +umr_all = um.logical_and.reduce + +# Complex types to -> (2,)float view for fast-path computation in _var() +_complex_to_float = { + nt.dtype(nt.csingle): nt.dtype(nt.single), + nt.dtype(nt.cdouble): nt.dtype(nt.double), +} +# Special case for windows: ensure double takes precedence +if nt.dtype(nt.longdouble) != nt.dtype(nt.double): + _complex_to_float.update({ + nt.dtype(nt.clongdouble): nt.dtype(nt.longdouble), + }) + +# avoid keyword arguments to speed up parsing, saves about 15%-20% for very +# small reductions +def _amax(a, axis=None, out=None, keepdims=False, + initial=_NoValue, where=True): + return umr_maximum(a, axis, None, out, keepdims, initial, where) + +def _amin(a, axis=None, out=None, keepdims=False, + initial=_NoValue, where=True): + return umr_minimum(a, axis, None, out, keepdims, initial, where) + +def _sum(a, axis=None, dtype=None, out=None, keepdims=False, + initial=_NoValue, where=True): + return umr_sum(a, axis, dtype, out, keepdims, initial, where) + +def _prod(a, axis=None, dtype=None, out=None, keepdims=False, + initial=_NoValue, where=True): + return umr_prod(a, axis, dtype, out, keepdims, initial, where) + +def _any(a, axis=None, dtype=None, out=None, keepdims=False, *, where=True): + # By default, return a boolean for any and all + if dtype is None: + dtype = bool_dt + # Parsing keyword arguments is currently fairly slow, so avoid it for now + if where is True: + return umr_any(a, axis, dtype, out, keepdims) + return umr_any(a, axis, dtype, out, keepdims, where=where) + +def _all(a, axis=None, dtype=None, out=None, keepdims=False, *, where=True): + # By default, return a boolean for any and all + if dtype is None: + dtype = bool_dt + # Parsing keyword arguments is currently fairly slow, so avoid it for now + if where is True: + return umr_all(a, axis, dtype, out, keepdims) + return umr_all(a, axis, dtype, out, keepdims, where=where) + +def _count_reduce_items(arr, axis, keepdims=False, where=True): + # fast-path for the default case + if where is True: + # no boolean mask given, calculate items according to axis + if axis is None: + axis = tuple(range(arr.ndim)) + elif not isinstance(axis, tuple): + axis = (axis,) + items = 1 + for ax in axis: + items *= arr.shape[mu.normalize_axis_index(ax, arr.ndim)] + items = nt.intp(items) + else: + # TODO: Optimize case when `where` is broadcast along a non-reduction + # axis and full sum is more excessive than needed. + + # guarded to protect circular imports + from numpy.lib._stride_tricks_impl import broadcast_to + # count True values in (potentially broadcasted) boolean mask + items = umr_sum(broadcast_to(where, arr.shape), axis, nt.intp, None, + keepdims) + return items + +def _clip(a, min=None, max=None, out=None, **kwargs): + if a.dtype.kind in "iu": + # If min/max is a Python integer, deal with out-of-bound values here. + # (This enforces NEP 50 rules as no value based promotion is done.) + if type(min) is int and min <= np.iinfo(a.dtype).min: + min = None + if type(max) is int and max >= np.iinfo(a.dtype).max: + max = None + + if min is None and max is None: + # return identity + return um.positive(a, out=out, **kwargs) + elif min is None: + return um.minimum(a, max, out=out, **kwargs) + elif max is None: + return um.maximum(a, min, out=out, **kwargs) + else: + return um.clip(a, min, max, out=out, **kwargs) + +def _mean(a, axis=None, dtype=None, out=None, keepdims=False, *, where=True): + arr = asanyarray(a) + + is_float16_result = False + + rcount = _count_reduce_items(arr, axis, keepdims=keepdims, where=where) + if rcount == 0 if where is True else umr_any(rcount == 0, axis=None): + warnings.warn("Mean of empty slice.", RuntimeWarning, stacklevel=2) + + # Cast bool, unsigned int, and int to float64 by default + if dtype is None: + if issubclass(arr.dtype.type, (nt.integer, nt.bool)): + dtype = mu.dtype('f8') + elif issubclass(arr.dtype.type, nt.float16): + dtype = mu.dtype('f4') + is_float16_result = True + + ret = umr_sum(arr, axis, dtype, out, keepdims, where=where) + if isinstance(ret, mu.ndarray): + ret = um.true_divide( + ret, rcount, out=ret, casting='unsafe', subok=False) + if is_float16_result and out is None: + ret = arr.dtype.type(ret) + elif hasattr(ret, 'dtype'): + if is_float16_result: + ret = arr.dtype.type(ret / rcount) + else: + ret = ret.dtype.type(ret / rcount) + else: + ret = ret / rcount + + return ret + +def _var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, + where=True, mean=None): + arr = asanyarray(a) + + rcount = _count_reduce_items(arr, axis, keepdims=keepdims, where=where) + # Make this warning show up on top. + if ddof >= rcount if where is True else umr_any(ddof >= rcount, axis=None): + warnings.warn("Degrees of freedom <= 0 for slice", RuntimeWarning, + stacklevel=2) + + # Cast bool, unsigned int, and int to float64 by default + if dtype is None and issubclass(arr.dtype.type, (nt.integer, nt.bool)): + dtype = mu.dtype('f8') + + if mean is not None: + arrmean = mean + else: + # Compute the mean. + # Note that if dtype is not of inexact type then arraymean will + # not be either. + arrmean = umr_sum(arr, axis, dtype, keepdims=True, where=where) + # The shape of rcount has to match arrmean to not change the shape of + # out in broadcasting. Otherwise, it cannot be stored back to arrmean. + if rcount.ndim == 0: + # fast-path for default case when where is True + div = rcount + else: + # matching rcount to arrmean when where is specified as array + div = rcount.reshape(arrmean.shape) + if isinstance(arrmean, mu.ndarray): + arrmean = um.true_divide(arrmean, div, out=arrmean, + casting='unsafe', subok=False) + elif hasattr(arrmean, "dtype"): + arrmean = arrmean.dtype.type(arrmean / rcount) + else: + arrmean = arrmean / rcount + + # Compute sum of squared deviations from mean + # Note that x may not be inexact and that we need it to be an array, + # not a scalar. + x = asanyarray(arr - arrmean) + + if issubclass(arr.dtype.type, (nt.floating, nt.integer)): + x = um.multiply(x, x, out=x) + # Fast-paths for built-in complex types + elif x.dtype in _complex_to_float: + xv = x.view(dtype=(_complex_to_float[x.dtype], (2,))) + um.multiply(xv, xv, out=xv) + x = um.add(xv[..., 0], xv[..., 1], out=x.real).real + # Most general case; includes handling object arrays containing imaginary + # numbers and complex types with non-native byteorder + else: + x = um.multiply(x, um.conjugate(x), out=x).real + + ret = umr_sum(x, axis, dtype, out, keepdims=keepdims, where=where) + + # Compute degrees of freedom and make sure it is not negative. + rcount = um.maximum(rcount - ddof, 0) + + # divide by degrees of freedom + if isinstance(ret, mu.ndarray): + ret = um.true_divide( + ret, rcount, out=ret, casting='unsafe', subok=False) + elif hasattr(ret, 'dtype'): + ret = ret.dtype.type(ret / rcount) + else: + ret = ret / rcount + + return ret + +def _std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, + where=True, mean=None): + ret = _var(a, axis=axis, dtype=dtype, out=out, ddof=ddof, + keepdims=keepdims, where=where, mean=mean) + + if isinstance(ret, mu.ndarray): + ret = um.sqrt(ret, out=ret) + elif hasattr(ret, 'dtype'): + ret = ret.dtype.type(um.sqrt(ret)) + else: + ret = um.sqrt(ret) + + return ret + +def _ptp(a, axis=None, out=None, keepdims=False): + return um.subtract( + umr_maximum(a, axis, None, out, keepdims), + umr_minimum(a, axis, None, None, keepdims), + out + ) + +def _dump(self, file, protocol=2): + if hasattr(file, 'write'): + ctx = nullcontext(file) + else: + ctx = open(os.fspath(file), "wb") + with ctx as f: + pickle.dump(self, f, protocol=protocol) + +def _dumps(self, protocol=2): + return pickle.dumps(self, protocol=protocol) + +def _bitwise_count(a, out=None, *, where=True, casting='same_kind', + order='K', dtype=None, subok=True): + return umr_bitwise_count(a, out, where=where, casting=casting, + order=order, dtype=dtype, subok=subok) diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/_methods.pyi b/.venv/lib/python3.12/site-packages/numpy/_core/_methods.pyi new file mode 100644 index 0000000..3c80683 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/_methods.pyi @@ -0,0 +1,22 @@ +from collections.abc import Callable +from typing import Any, Concatenate, TypeAlias + +import numpy as np + +from . import _exceptions as _exceptions + +### + +_Reduce2: TypeAlias = Callable[Concatenate[object, ...], Any] + +### + +bool_dt: np.dtype[np.bool] = ... +umr_maximum: _Reduce2 = ... +umr_minimum: _Reduce2 = ... +umr_sum: _Reduce2 = ... +umr_prod: _Reduce2 = ... +umr_bitwise_count = np.bitwise_count +umr_any: _Reduce2 = ... +umr_all: _Reduce2 = ... +_complex_to_float: dict[np.dtype[np.complexfloating], np.dtype[np.floating]] = ... diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/_multiarray_tests.cpython-312-x86_64-linux-gnu.so b/.venv/lib/python3.12/site-packages/numpy/_core/_multiarray_tests.cpython-312-x86_64-linux-gnu.so new file mode 100755 index 0000000..ce78182 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/_multiarray_tests.cpython-312-x86_64-linux-gnu.so differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/_multiarray_umath.cpython-312-x86_64-linux-gnu.so b/.venv/lib/python3.12/site-packages/numpy/_core/_multiarray_umath.cpython-312-x86_64-linux-gnu.so new file mode 100755 index 0000000..8b2a78b Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/_multiarray_umath.cpython-312-x86_64-linux-gnu.so differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/_operand_flag_tests.cpython-312-x86_64-linux-gnu.so b/.venv/lib/python3.12/site-packages/numpy/_core/_operand_flag_tests.cpython-312-x86_64-linux-gnu.so new file mode 100755 index 0000000..bf24fe0 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/_operand_flag_tests.cpython-312-x86_64-linux-gnu.so differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/_rational_tests.cpython-312-x86_64-linux-gnu.so b/.venv/lib/python3.12/site-packages/numpy/_core/_rational_tests.cpython-312-x86_64-linux-gnu.so new file mode 100755 index 0000000..b9a7717 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/_rational_tests.cpython-312-x86_64-linux-gnu.so differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/_simd.cpython-312-x86_64-linux-gnu.so b/.venv/lib/python3.12/site-packages/numpy/_core/_simd.cpython-312-x86_64-linux-gnu.so new file mode 100755 index 0000000..54bbb83 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/_simd.cpython-312-x86_64-linux-gnu.so differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/_simd.pyi b/.venv/lib/python3.12/site-packages/numpy/_core/_simd.pyi new file mode 100644 index 0000000..70bb707 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/_simd.pyi @@ -0,0 +1,25 @@ +from types import ModuleType +from typing import TypedDict, type_check_only + +# NOTE: these 5 are only defined on systems with an intel processor +SSE42: ModuleType | None = ... +FMA3: ModuleType | None = ... +AVX2: ModuleType | None = ... +AVX512F: ModuleType | None = ... +AVX512_SKX: ModuleType | None = ... + +baseline: ModuleType | None = ... + +@type_check_only +class SimdTargets(TypedDict): + SSE42: ModuleType | None + AVX2: ModuleType | None + FMA3: ModuleType | None + AVX512F: ModuleType | None + AVX512_SKX: ModuleType | None + baseline: ModuleType | None + +targets: SimdTargets = ... + +def clear_floatstatus() -> None: ... +def get_floatstatus() -> int: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/_string_helpers.py b/.venv/lib/python3.12/site-packages/numpy/_core/_string_helpers.py new file mode 100644 index 0000000..87085d4 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/_string_helpers.py @@ -0,0 +1,100 @@ +""" +String-handling utilities to avoid locale-dependence. + +Used primarily to generate type name aliases. +""" +# "import string" is costly to import! +# Construct the translation tables directly +# "A" = chr(65), "a" = chr(97) +_all_chars = tuple(map(chr, range(256))) +_ascii_upper = _all_chars[65:65 + 26] +_ascii_lower = _all_chars[97:97 + 26] +LOWER_TABLE = _all_chars[:65] + _ascii_lower + _all_chars[65 + 26:] +UPPER_TABLE = _all_chars[:97] + _ascii_upper + _all_chars[97 + 26:] + + +def english_lower(s): + """ Apply English case rules to convert ASCII strings to all lower case. + + This is an internal utility function to replace calls to str.lower() such + that we can avoid changing behavior with changing locales. In particular, + Turkish has distinct dotted and dotless variants of the Latin letter "I" in + both lowercase and uppercase. Thus, "I".lower() != "i" in a "tr" locale. + + Parameters + ---------- + s : str + + Returns + ------- + lowered : str + + Examples + -------- + >>> from numpy._core.numerictypes import english_lower + >>> english_lower('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_') + 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz0123456789_' + >>> english_lower('') + '' + """ + lowered = s.translate(LOWER_TABLE) + return lowered + + +def english_upper(s): + """ Apply English case rules to convert ASCII strings to all upper case. + + This is an internal utility function to replace calls to str.upper() such + that we can avoid changing behavior with changing locales. In particular, + Turkish has distinct dotted and dotless variants of the Latin letter "I" in + both lowercase and uppercase. Thus, "i".upper() != "I" in a "tr" locale. + + Parameters + ---------- + s : str + + Returns + ------- + uppered : str + + Examples + -------- + >>> from numpy._core.numerictypes import english_upper + >>> english_upper('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_') + 'ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_' + >>> english_upper('') + '' + """ + uppered = s.translate(UPPER_TABLE) + return uppered + + +def english_capitalize(s): + """ Apply English case rules to convert the first character of an ASCII + string to upper case. + + This is an internal utility function to replace calls to str.capitalize() + such that we can avoid changing behavior with changing locales. + + Parameters + ---------- + s : str + + Returns + ------- + capitalized : str + + Examples + -------- + >>> from numpy._core.numerictypes import english_capitalize + >>> english_capitalize('int8') + 'Int8' + >>> english_capitalize('Int8') + 'Int8' + >>> english_capitalize('') + '' + """ + if s: + return english_upper(s[0]) + s[1:] + else: + return s diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/_string_helpers.pyi b/.venv/lib/python3.12/site-packages/numpy/_core/_string_helpers.pyi new file mode 100644 index 0000000..6a85832 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/_string_helpers.pyi @@ -0,0 +1,12 @@ +from typing import Final + +_all_chars: Final[tuple[str, ...]] = ... +_ascii_upper: Final[tuple[str, ...]] = ... +_ascii_lower: Final[tuple[str, ...]] = ... + +LOWER_TABLE: Final[tuple[str, ...]] = ... +UPPER_TABLE: Final[tuple[str, ...]] = ... + +def english_lower(s: str) -> str: ... +def english_upper(s: str) -> str: ... +def english_capitalize(s: str) -> str: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/_struct_ufunc_tests.cpython-312-x86_64-linux-gnu.so b/.venv/lib/python3.12/site-packages/numpy/_core/_struct_ufunc_tests.cpython-312-x86_64-linux-gnu.so new file mode 100755 index 0000000..10747a3 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/_struct_ufunc_tests.cpython-312-x86_64-linux-gnu.so differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/_type_aliases.py b/.venv/lib/python3.12/site-packages/numpy/_core/_type_aliases.py new file mode 100644 index 0000000..de6c309 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/_type_aliases.py @@ -0,0 +1,119 @@ +""" +Due to compatibility, numpy has a very large number of different naming +conventions for the scalar types (those subclassing from `numpy.generic`). +This file produces a convoluted set of dictionaries mapping names to types, +and sometimes other mappings too. + +.. data:: allTypes + A dictionary of names to types that will be exposed as attributes through + ``np._core.numerictypes.*`` + +.. data:: sctypeDict + Similar to `allTypes`, but maps a broader set of aliases to their types. + +.. data:: sctypes + A dictionary keyed by a "type group" string, providing a list of types + under that group. + +""" + +import numpy._core.multiarray as ma +from numpy._core.multiarray import dtype, typeinfo + +###################################### +# Building `sctypeDict` and `allTypes` +###################################### + +sctypeDict = {} +allTypes = {} +c_names_dict = {} + +_abstract_type_names = { + "generic", "integer", "inexact", "floating", "number", + "flexible", "character", "complexfloating", "unsignedinteger", + "signedinteger" +} + +for _abstract_type_name in _abstract_type_names: + allTypes[_abstract_type_name] = getattr(ma, _abstract_type_name) + +for k, v in typeinfo.items(): + if k.startswith("NPY_") and v not in c_names_dict: + c_names_dict[k[4:]] = v + else: + concrete_type = v.type + allTypes[k] = concrete_type + sctypeDict[k] = concrete_type + +_aliases = { + "double": "float64", + "cdouble": "complex128", + "single": "float32", + "csingle": "complex64", + "half": "float16", + "bool_": "bool", + # Default integer: + "int_": "intp", + "uint": "uintp", +} + +for k, v in _aliases.items(): + sctypeDict[k] = allTypes[v] + allTypes[k] = allTypes[v] + +# extra aliases are added only to `sctypeDict` +# to support dtype name access, such as`np.dtype("float")` +_extra_aliases = { + "float": "float64", + "complex": "complex128", + "object": "object_", + "bytes": "bytes_", + "a": "bytes_", + "int": "int_", + "str": "str_", + "unicode": "str_", +} + +for k, v in _extra_aliases.items(): + sctypeDict[k] = allTypes[v] + +# include extended precision sized aliases +for is_complex, full_name in [(False, "longdouble"), (True, "clongdouble")]: + longdouble_type: type = allTypes[full_name] + + bits: int = dtype(longdouble_type).itemsize * 8 + base_name: str = "complex" if is_complex else "float" + extended_prec_name: str = f"{base_name}{bits}" + if extended_prec_name not in allTypes: + sctypeDict[extended_prec_name] = longdouble_type + allTypes[extended_prec_name] = longdouble_type + + +#################### +# Building `sctypes` +#################### + +sctypes = {"int": set(), "uint": set(), "float": set(), + "complex": set(), "others": set()} + +for type_info in typeinfo.values(): + if type_info.kind in ["M", "m"]: # exclude timedelta and datetime + continue + + concrete_type = type_info.type + + # find proper group for each concrete type + for type_group, abstract_type in [ + ("int", ma.signedinteger), ("uint", ma.unsignedinteger), + ("float", ma.floating), ("complex", ma.complexfloating), + ("others", ma.generic) + ]: + if issubclass(concrete_type, abstract_type): + sctypes[type_group].add(concrete_type) + break + +# sort sctype groups by bitsize +for sctype_key in sctypes.keys(): + sctype_list = list(sctypes[sctype_key]) + sctype_list.sort(key=lambda x: dtype(x).itemsize) + sctypes[sctype_key] = sctype_list diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/_type_aliases.pyi b/.venv/lib/python3.12/site-packages/numpy/_core/_type_aliases.pyi new file mode 100644 index 0000000..3c9dac7 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/_type_aliases.pyi @@ -0,0 +1,97 @@ +from collections.abc import Collection +from typing import Final, TypeAlias, TypedDict, type_check_only +from typing import Literal as L + +import numpy as np + +__all__ = ( + "_abstract_type_names", + "_aliases", + "_extra_aliases", + "allTypes", + "c_names_dict", + "sctypeDict", + "sctypes", +) + +sctypeDict: Final[dict[str, type[np.generic]]] +allTypes: Final[dict[str, type[np.generic]]] + +@type_check_only +class _CNamesDict(TypedDict): + BOOL: np.dtype[np.bool] + HALF: np.dtype[np.half] + FLOAT: np.dtype[np.single] + DOUBLE: np.dtype[np.double] + LONGDOUBLE: np.dtype[np.longdouble] + CFLOAT: np.dtype[np.csingle] + CDOUBLE: np.dtype[np.cdouble] + CLONGDOUBLE: np.dtype[np.clongdouble] + STRING: np.dtype[np.bytes_] + UNICODE: np.dtype[np.str_] + VOID: np.dtype[np.void] + OBJECT: np.dtype[np.object_] + DATETIME: np.dtype[np.datetime64] + TIMEDELTA: np.dtype[np.timedelta64] + BYTE: np.dtype[np.byte] + UBYTE: np.dtype[np.ubyte] + SHORT: np.dtype[np.short] + USHORT: np.dtype[np.ushort] + INT: np.dtype[np.intc] + UINT: np.dtype[np.uintc] + LONG: np.dtype[np.long] + ULONG: np.dtype[np.ulong] + LONGLONG: np.dtype[np.longlong] + ULONGLONG: np.dtype[np.ulonglong] + +c_names_dict: Final[_CNamesDict] + +_AbstractTypeName: TypeAlias = L[ + "generic", + "flexible", + "character", + "number", + "integer", + "inexact", + "unsignedinteger", + "signedinteger", + "floating", + "complexfloating", +] +_abstract_type_names: Final[set[_AbstractTypeName]] + +@type_check_only +class _AliasesType(TypedDict): + double: L["float64"] + cdouble: L["complex128"] + single: L["float32"] + csingle: L["complex64"] + half: L["float16"] + bool_: L["bool"] + int_: L["intp"] + uint: L["intp"] + +_aliases: Final[_AliasesType] + +@type_check_only +class _ExtraAliasesType(TypedDict): + float: L["float64"] + complex: L["complex128"] + object: L["object_"] + bytes: L["bytes_"] + a: L["bytes_"] + int: L["int_"] + str: L["str_"] + unicode: L["str_"] + +_extra_aliases: Final[_ExtraAliasesType] + +@type_check_only +class _SCTypes(TypedDict): + int: Collection[type[np.signedinteger]] + uint: Collection[type[np.unsignedinteger]] + float: Collection[type[np.floating]] + complex: Collection[type[np.complexfloating]] + others: Collection[type[np.flexible | np.bool | np.object_]] + +sctypes: Final[_SCTypes] diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/_ufunc_config.py b/.venv/lib/python3.12/site-packages/numpy/_core/_ufunc_config.py new file mode 100644 index 0000000..24abecd --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/_ufunc_config.py @@ -0,0 +1,489 @@ +""" +Functions for changing global ufunc configuration + +This provides helpers which wrap `_get_extobj_dict` and `_make_extobj`, and +`_extobj_contextvar` from umath. +""" +import functools + +from numpy._utils import set_module + +from .umath import _extobj_contextvar, _get_extobj_dict, _make_extobj + +__all__ = [ + "seterr", "geterr", "setbufsize", "getbufsize", "seterrcall", "geterrcall", + "errstate" +] + + +@set_module('numpy') +def seterr(all=None, divide=None, over=None, under=None, invalid=None): + """ + Set how floating-point errors are handled. + + Note that operations on integer scalar types (such as `int16`) are + handled like floating point, and are affected by these settings. + + Parameters + ---------- + all : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional + Set treatment for all types of floating-point errors at once: + + - ignore: Take no action when the exception occurs. + - warn: Print a :exc:`RuntimeWarning` (via the Python `warnings` + module). + - raise: Raise a :exc:`FloatingPointError`. + - call: Call a function specified using the `seterrcall` function. + - print: Print a warning directly to ``stdout``. + - log: Record error in a Log object specified by `seterrcall`. + + The default is not to change the current behavior. + divide : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional + Treatment for division by zero. + over : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional + Treatment for floating-point overflow. + under : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional + Treatment for floating-point underflow. + invalid : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional + Treatment for invalid floating-point operation. + + Returns + ------- + old_settings : dict + Dictionary containing the old settings. + + See also + -------- + seterrcall : Set a callback function for the 'call' mode. + geterr, geterrcall, errstate + + Notes + ----- + The floating-point exceptions are defined in the IEEE 754 standard [1]_: + + - Division by zero: infinite result obtained from finite numbers. + - Overflow: result too large to be expressed. + - Underflow: result so close to zero that some precision + was lost. + - Invalid operation: result is not an expressible number, typically + indicates that a NaN was produced. + + .. [1] https://en.wikipedia.org/wiki/IEEE_754 + + Examples + -------- + >>> import numpy as np + >>> orig_settings = np.seterr(all='ignore') # seterr to known value + >>> np.int16(32000) * np.int16(3) + np.int16(30464) + >>> np.seterr(over='raise') + {'divide': 'ignore', 'over': 'ignore', 'under': 'ignore', 'invalid': 'ignore'} + >>> old_settings = np.seterr(all='warn', over='raise') + >>> np.int16(32000) * np.int16(3) + Traceback (most recent call last): + File "", line 1, in + FloatingPointError: overflow encountered in scalar multiply + + >>> old_settings = np.seterr(all='print') + >>> np.geterr() + {'divide': 'print', 'over': 'print', 'under': 'print', 'invalid': 'print'} + >>> np.int16(32000) * np.int16(3) + np.int16(30464) + >>> np.seterr(**orig_settings) # restore original + {'divide': 'print', 'over': 'print', 'under': 'print', 'invalid': 'print'} + + """ + + old = _get_extobj_dict() + # The errstate doesn't include call and bufsize, so pop them: + old.pop("call", None) + old.pop("bufsize", None) + + extobj = _make_extobj( + all=all, divide=divide, over=over, under=under, invalid=invalid) + _extobj_contextvar.set(extobj) + return old + + +@set_module('numpy') +def geterr(): + """ + Get the current way of handling floating-point errors. + + Returns + ------- + res : dict + A dictionary with keys "divide", "over", "under", and "invalid", + whose values are from the strings "ignore", "print", "log", "warn", + "raise", and "call". The keys represent possible floating-point + exceptions, and the values define how these exceptions are handled. + + See Also + -------- + geterrcall, seterr, seterrcall + + Notes + ----- + For complete documentation of the types of floating-point exceptions and + treatment options, see `seterr`. + + Examples + -------- + >>> import numpy as np + >>> np.geterr() + {'divide': 'warn', 'over': 'warn', 'under': 'ignore', 'invalid': 'warn'} + >>> np.arange(3.) / np.arange(3.) # doctest: +SKIP + array([nan, 1., 1.]) + RuntimeWarning: invalid value encountered in divide + + >>> oldsettings = np.seterr(all='warn', invalid='raise') + >>> np.geterr() + {'divide': 'warn', 'over': 'warn', 'under': 'warn', 'invalid': 'raise'} + >>> np.arange(3.) / np.arange(3.) + Traceback (most recent call last): + ... + FloatingPointError: invalid value encountered in divide + >>> oldsettings = np.seterr(**oldsettings) # restore original + + """ + res = _get_extobj_dict() + # The "geterr" doesn't include call and bufsize,: + res.pop("call", None) + res.pop("bufsize", None) + return res + + +@set_module('numpy') +def setbufsize(size): + """ + Set the size of the buffer used in ufuncs. + + .. versionchanged:: 2.0 + The scope of setting the buffer is tied to the `numpy.errstate` + context. Exiting a ``with errstate():`` will also restore the bufsize. + + Parameters + ---------- + size : int + Size of buffer. + + Returns + ------- + bufsize : int + Previous size of ufunc buffer in bytes. + + Examples + -------- + When exiting a `numpy.errstate` context manager the bufsize is restored: + + >>> import numpy as np + >>> with np.errstate(): + ... np.setbufsize(4096) + ... print(np.getbufsize()) + ... + 8192 + 4096 + >>> np.getbufsize() + 8192 + + """ + old = _get_extobj_dict()["bufsize"] + extobj = _make_extobj(bufsize=size) + _extobj_contextvar.set(extobj) + return old + + +@set_module('numpy') +def getbufsize(): + """ + Return the size of the buffer used in ufuncs. + + Returns + ------- + getbufsize : int + Size of ufunc buffer in bytes. + + Examples + -------- + >>> import numpy as np + >>> np.getbufsize() + 8192 + + """ + return _get_extobj_dict()["bufsize"] + + +@set_module('numpy') +def seterrcall(func): + """ + Set the floating-point error callback function or log object. + + There are two ways to capture floating-point error messages. The first + is to set the error-handler to 'call', using `seterr`. Then, set + the function to call using this function. + + The second is to set the error-handler to 'log', using `seterr`. + Floating-point errors then trigger a call to the 'write' method of + the provided object. + + Parameters + ---------- + func : callable f(err, flag) or object with write method + Function to call upon floating-point errors ('call'-mode) or + object whose 'write' method is used to log such message ('log'-mode). + + The call function takes two arguments. The first is a string describing + the type of error (such as "divide by zero", "overflow", "underflow", + or "invalid value"), and the second is the status flag. The flag is a + byte, whose four least-significant bits indicate the type of error, one + of "divide", "over", "under", "invalid":: + + [0 0 0 0 divide over under invalid] + + In other words, ``flags = divide + 2*over + 4*under + 8*invalid``. + + If an object is provided, its write method should take one argument, + a string. + + Returns + ------- + h : callable, log instance or None + The old error handler. + + See Also + -------- + seterr, geterr, geterrcall + + Examples + -------- + Callback upon error: + + >>> def err_handler(type, flag): + ... print("Floating point error (%s), with flag %s" % (type, flag)) + ... + + >>> import numpy as np + + >>> orig_handler = np.seterrcall(err_handler) + >>> orig_err = np.seterr(all='call') + + >>> np.array([1, 2, 3]) / 0.0 + Floating point error (divide by zero), with flag 1 + array([inf, inf, inf]) + + >>> np.seterrcall(orig_handler) + + >>> np.seterr(**orig_err) + {'divide': 'call', 'over': 'call', 'under': 'call', 'invalid': 'call'} + + Log error message: + + >>> class Log: + ... def write(self, msg): + ... print("LOG: %s" % msg) + ... + + >>> log = Log() + >>> saved_handler = np.seterrcall(log) + >>> save_err = np.seterr(all='log') + + >>> np.array([1, 2, 3]) / 0.0 + LOG: Warning: divide by zero encountered in divide + array([inf, inf, inf]) + + >>> np.seterrcall(orig_handler) + + >>> np.seterr(**orig_err) + {'divide': 'log', 'over': 'log', 'under': 'log', 'invalid': 'log'} + + """ + old = _get_extobj_dict()["call"] + extobj = _make_extobj(call=func) + _extobj_contextvar.set(extobj) + return old + + +@set_module('numpy') +def geterrcall(): + """ + Return the current callback function used on floating-point errors. + + When the error handling for a floating-point error (one of "divide", + "over", "under", or "invalid") is set to 'call' or 'log', the function + that is called or the log instance that is written to is returned by + `geterrcall`. This function or log instance has been set with + `seterrcall`. + + Returns + ------- + errobj : callable, log instance or None + The current error handler. If no handler was set through `seterrcall`, + ``None`` is returned. + + See Also + -------- + seterrcall, seterr, geterr + + Notes + ----- + For complete documentation of the types of floating-point exceptions and + treatment options, see `seterr`. + + Examples + -------- + >>> import numpy as np + >>> np.geterrcall() # we did not yet set a handler, returns None + + >>> orig_settings = np.seterr(all='call') + >>> def err_handler(type, flag): + ... print("Floating point error (%s), with flag %s" % (type, flag)) + >>> old_handler = np.seterrcall(err_handler) + >>> np.array([1, 2, 3]) / 0.0 + Floating point error (divide by zero), with flag 1 + array([inf, inf, inf]) + + >>> cur_handler = np.geterrcall() + >>> cur_handler is err_handler + True + >>> old_settings = np.seterr(**orig_settings) # restore original + >>> old_handler = np.seterrcall(None) # restore original + + """ + return _get_extobj_dict()["call"] + + +class _unspecified: + pass + + +_Unspecified = _unspecified() + + +@set_module('numpy') +class errstate: + """ + errstate(**kwargs) + + Context manager for floating-point error handling. + + Using an instance of `errstate` as a context manager allows statements in + that context to execute with a known error handling behavior. Upon entering + the context the error handling is set with `seterr` and `seterrcall`, and + upon exiting it is reset to what it was before. + + .. versionchanged:: 1.17.0 + `errstate` is also usable as a function decorator, saving + a level of indentation if an entire function is wrapped. + + .. versionchanged:: 2.0 + `errstate` is now fully thread and asyncio safe, but may not be + entered more than once. + It is not safe to decorate async functions using ``errstate``. + + Parameters + ---------- + kwargs : {divide, over, under, invalid} + Keyword arguments. The valid keywords are the possible floating-point + exceptions. Each keyword should have a string value that defines the + treatment for the particular error. Possible values are + {'ignore', 'warn', 'raise', 'call', 'print', 'log'}. + + See Also + -------- + seterr, geterr, seterrcall, geterrcall + + Notes + ----- + For complete documentation of the types of floating-point exceptions and + treatment options, see `seterr`. + + Examples + -------- + >>> import numpy as np + >>> olderr = np.seterr(all='ignore') # Set error handling to known state. + + >>> np.arange(3) / 0. + array([nan, inf, inf]) + >>> with np.errstate(divide='ignore'): + ... np.arange(3) / 0. + array([nan, inf, inf]) + + >>> np.sqrt(-1) + np.float64(nan) + >>> with np.errstate(invalid='raise'): + ... np.sqrt(-1) + Traceback (most recent call last): + File "", line 2, in + FloatingPointError: invalid value encountered in sqrt + + Outside the context the error handling behavior has not changed: + + >>> np.geterr() + {'divide': 'ignore', 'over': 'ignore', 'under': 'ignore', 'invalid': 'ignore'} + >>> olderr = np.seterr(**olderr) # restore original state + + """ + __slots__ = ( + "_all", + "_call", + "_divide", + "_invalid", + "_over", + "_token", + "_under", + ) + + def __init__(self, *, call=_Unspecified, + all=None, divide=None, over=None, under=None, invalid=None): + self._token = None + self._call = call + self._all = all + self._divide = divide + self._over = over + self._under = under + self._invalid = invalid + + def __enter__(self): + # Note that __call__ duplicates much of this logic + if self._token is not None: + raise TypeError("Cannot enter `np.errstate` twice.") + if self._call is _Unspecified: + extobj = _make_extobj( + all=self._all, divide=self._divide, over=self._over, + under=self._under, invalid=self._invalid) + else: + extobj = _make_extobj( + call=self._call, + all=self._all, divide=self._divide, over=self._over, + under=self._under, invalid=self._invalid) + + self._token = _extobj_contextvar.set(extobj) + + def __exit__(self, *exc_info): + _extobj_contextvar.reset(self._token) + + def __call__(self, func): + # We need to customize `__call__` compared to `ContextDecorator` + # because we must store the token per-thread so cannot store it on + # the instance (we could create a new instance for this). + # This duplicates the code from `__enter__`. + @functools.wraps(func) + def inner(*args, **kwargs): + if self._call is _Unspecified: + extobj = _make_extobj( + all=self._all, divide=self._divide, over=self._over, + under=self._under, invalid=self._invalid) + else: + extobj = _make_extobj( + call=self._call, + all=self._all, divide=self._divide, over=self._over, + under=self._under, invalid=self._invalid) + + _token = _extobj_contextvar.set(extobj) + try: + # Call the original, decorated, function: + return func(*args, **kwargs) + finally: + _extobj_contextvar.reset(_token) + + return inner diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/_ufunc_config.pyi b/.venv/lib/python3.12/site-packages/numpy/_core/_ufunc_config.pyi new file mode 100644 index 0000000..1a66131 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/_ufunc_config.pyi @@ -0,0 +1,32 @@ +from collections.abc import Callable +from typing import Any, Literal, TypeAlias, TypedDict, type_check_only + +from _typeshed import SupportsWrite + +from numpy import errstate as errstate + +_ErrKind: TypeAlias = Literal["ignore", "warn", "raise", "call", "print", "log"] +_ErrFunc: TypeAlias = Callable[[str, int], Any] +_ErrCall: TypeAlias = _ErrFunc | SupportsWrite[str] + +@type_check_only +class _ErrDict(TypedDict): + divide: _ErrKind + over: _ErrKind + under: _ErrKind + invalid: _ErrKind + +def seterr( + all: _ErrKind | None = ..., + divide: _ErrKind | None = ..., + over: _ErrKind | None = ..., + under: _ErrKind | None = ..., + invalid: _ErrKind | None = ..., +) -> _ErrDict: ... +def geterr() -> _ErrDict: ... +def setbufsize(size: int) -> int: ... +def getbufsize() -> int: ... +def seterrcall(func: _ErrCall | None) -> _ErrCall | None: ... +def geterrcall() -> _ErrCall | None: ... + +# See `numpy/__init__.pyi` for the `errstate` class and `no_nep5_warnings` diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/_umath_tests.cpython-312-x86_64-linux-gnu.so b/.venv/lib/python3.12/site-packages/numpy/_core/_umath_tests.cpython-312-x86_64-linux-gnu.so new file mode 100755 index 0000000..51212e8 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/_umath_tests.cpython-312-x86_64-linux-gnu.so differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/arrayprint.py b/.venv/lib/python3.12/site-packages/numpy/_core/arrayprint.py new file mode 100644 index 0000000..2a68428 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/arrayprint.py @@ -0,0 +1,1775 @@ +"""Array printing function + +$Id: arrayprint.py,v 1.9 2005/09/13 13:58:44 teoliphant Exp $ + +""" +__all__ = ["array2string", "array_str", "array_repr", + "set_printoptions", "get_printoptions", "printoptions", + "format_float_positional", "format_float_scientific"] +__docformat__ = 'restructuredtext' + +# +# Written by Konrad Hinsen +# last revision: 1996-3-13 +# modified by Jim Hugunin 1997-3-3 for repr's and str's (and other details) +# and by Perry Greenfield 2000-4-1 for numarray +# and by Travis Oliphant 2005-8-22 for numpy + + +# Note: Both scalartypes.c.src and arrayprint.py implement strs for numpy +# scalars but for different purposes. scalartypes.c.src has str/reprs for when +# the scalar is printed on its own, while arrayprint.py has strs for when +# scalars are printed inside an ndarray. Only the latter strs are currently +# user-customizable. + +import functools +import numbers +import sys + +try: + from _thread import get_ident +except ImportError: + from _dummy_thread import get_ident + +import contextlib +import operator +import warnings + +import numpy as np + +from . import numerictypes as _nt +from .fromnumeric import any +from .multiarray import ( + array, + datetime_as_string, + datetime_data, + dragon4_positional, + dragon4_scientific, + ndarray, +) +from .numeric import asarray, concatenate, errstate +from .numerictypes import complex128, flexible, float64, int_ +from .overrides import array_function_dispatch, set_module +from .printoptions import format_options +from .umath import absolute, isfinite, isinf, isnat + + +def _make_options_dict(precision=None, threshold=None, edgeitems=None, + linewidth=None, suppress=None, nanstr=None, infstr=None, + sign=None, formatter=None, floatmode=None, legacy=None, + override_repr=None): + """ + Make a dictionary out of the non-None arguments, plus conversion of + *legacy* and sanity checks. + """ + + options = {k: v for k, v in list(locals().items()) if v is not None} + + if suppress is not None: + options['suppress'] = bool(suppress) + + modes = ['fixed', 'unique', 'maxprec', 'maxprec_equal'] + if floatmode not in modes + [None]: + raise ValueError("floatmode option must be one of " + + ", ".join(f'"{m}"' for m in modes)) + + if sign not in [None, '-', '+', ' ']: + raise ValueError("sign option must be one of ' ', '+', or '-'") + + if legacy is False: + options['legacy'] = sys.maxsize + elif legacy == False: # noqa: E712 + warnings.warn( + f"Passing `legacy={legacy!r}` is deprecated.", + FutureWarning, stacklevel=3 + ) + options['legacy'] = sys.maxsize + elif legacy == '1.13': + options['legacy'] = 113 + elif legacy == '1.21': + options['legacy'] = 121 + elif legacy == '1.25': + options['legacy'] = 125 + elif legacy == '2.1': + options['legacy'] = 201 + elif legacy == '2.2': + options['legacy'] = 202 + elif legacy is None: + pass # OK, do nothing. + else: + warnings.warn( + "legacy printing option can currently only be '1.13', '1.21', " + "'1.25', '2.1', '2.2' or `False`", stacklevel=3) + + if threshold is not None: + # forbid the bad threshold arg suggested by stack overflow, gh-12351 + if not isinstance(threshold, numbers.Number): + raise TypeError("threshold must be numeric") + if np.isnan(threshold): + raise ValueError("threshold must be non-NAN, try " + "sys.maxsize for untruncated representation") + + if precision is not None: + # forbid the bad precision arg as suggested by issue #18254 + try: + options['precision'] = operator.index(precision) + except TypeError as e: + raise TypeError('precision must be an integer') from e + + return options + + +@set_module('numpy') +def set_printoptions(precision=None, threshold=None, edgeitems=None, + linewidth=None, suppress=None, nanstr=None, + infstr=None, formatter=None, sign=None, floatmode=None, + *, legacy=None, override_repr=None): + """ + Set printing options. + + These options determine the way floating point numbers, arrays and + other NumPy objects are displayed. + + Parameters + ---------- + precision : int or None, optional + Number of digits of precision for floating point output (default 8). + May be None if `floatmode` is not `fixed`, to print as many digits as + necessary to uniquely specify the value. + threshold : int, optional + Total number of array elements which trigger summarization + rather than full repr (default 1000). + To always use the full repr without summarization, pass `sys.maxsize`. + edgeitems : int, optional + Number of array items in summary at beginning and end of + each dimension (default 3). + linewidth : int, optional + The number of characters per line for the purpose of inserting + line breaks (default 75). + suppress : bool, optional + If True, always print floating point numbers using fixed point + notation, in which case numbers equal to zero in the current precision + will print as zero. If False, then scientific notation is used when + absolute value of the smallest number is < 1e-4 or the ratio of the + maximum absolute value to the minimum is > 1e3. The default is False. + nanstr : str, optional + String representation of floating point not-a-number (default nan). + infstr : str, optional + String representation of floating point infinity (default inf). + sign : string, either '-', '+', or ' ', optional + Controls printing of the sign of floating-point types. If '+', always + print the sign of positive values. If ' ', always prints a space + (whitespace character) in the sign position of positive values. If + '-', omit the sign character of positive values. (default '-') + + .. versionchanged:: 2.0 + The sign parameter can now be an integer type, previously + types were floating-point types. + + formatter : dict of callables, optional + If not None, the keys should indicate the type(s) that the respective + formatting function applies to. Callables should return a string. + Types that are not specified (by their corresponding keys) are handled + by the default formatters. Individual types for which a formatter + can be set are: + + - 'bool' + - 'int' + - 'timedelta' : a `numpy.timedelta64` + - 'datetime' : a `numpy.datetime64` + - 'float' + - 'longfloat' : 128-bit floats + - 'complexfloat' + - 'longcomplexfloat' : composed of two 128-bit floats + - 'numpystr' : types `numpy.bytes_` and `numpy.str_` + - 'object' : `np.object_` arrays + + Other keys that can be used to set a group of types at once are: + + - 'all' : sets all types + - 'int_kind' : sets 'int' + - 'float_kind' : sets 'float' and 'longfloat' + - 'complex_kind' : sets 'complexfloat' and 'longcomplexfloat' + - 'str_kind' : sets 'numpystr' + floatmode : str, optional + Controls the interpretation of the `precision` option for + floating-point types. Can take the following values + (default maxprec_equal): + + * 'fixed': Always print exactly `precision` fractional digits, + even if this would print more or fewer digits than + necessary to specify the value uniquely. + * 'unique': Print the minimum number of fractional digits necessary + to represent each value uniquely. Different elements may + have a different number of digits. The value of the + `precision` option is ignored. + * 'maxprec': Print at most `precision` fractional digits, but if + an element can be uniquely represented with fewer digits + only print it with that many. + * 'maxprec_equal': Print at most `precision` fractional digits, + but if every element in the array can be uniquely + represented with an equal number of fewer digits, use that + many digits for all elements. + legacy : string or `False`, optional + If set to the string ``'1.13'`` enables 1.13 legacy printing mode. This + approximates numpy 1.13 print output by including a space in the sign + position of floats and different behavior for 0d arrays. This also + enables 1.21 legacy printing mode (described below). + + If set to the string ``'1.21'`` enables 1.21 legacy printing mode. This + approximates numpy 1.21 print output of complex structured dtypes + by not inserting spaces after commas that separate fields and after + colons. + + If set to ``'1.25'`` approximates printing of 1.25 which mainly means + that numeric scalars are printed without their type information, e.g. + as ``3.0`` rather than ``np.float64(3.0)``. + + If set to ``'2.1'``, shape information is not given when arrays are + summarized (i.e., multiple elements replaced with ``...``). + + If set to ``'2.2'``, the transition to use scientific notation for + printing ``np.float16`` and ``np.float32`` types may happen later or + not at all for larger values. + + If set to `False`, disables legacy mode. + + Unrecognized strings will be ignored with a warning for forward + compatibility. + + .. versionchanged:: 1.22.0 + .. versionchanged:: 2.2 + + override_repr: callable, optional + If set a passed function will be used for generating arrays' repr. + Other options will be ignored. + + See Also + -------- + get_printoptions, printoptions, array2string + + Notes + ----- + `formatter` is always reset with a call to `set_printoptions`. + + Use `printoptions` as a context manager to set the values temporarily. + + Examples + -------- + Floating point precision can be set: + + >>> import numpy as np + >>> np.set_printoptions(precision=4) + >>> np.array([1.123456789]) + [1.1235] + + Long arrays can be summarised: + + >>> np.set_printoptions(threshold=5) + >>> np.arange(10) + array([0, 1, 2, ..., 7, 8, 9], shape=(10,)) + + Small results can be suppressed: + + >>> eps = np.finfo(float).eps + >>> x = np.arange(4.) + >>> x**2 - (x + eps)**2 + array([-4.9304e-32, -4.4409e-16, 0.0000e+00, 0.0000e+00]) + >>> np.set_printoptions(suppress=True) + >>> x**2 - (x + eps)**2 + array([-0., -0., 0., 0.]) + + A custom formatter can be used to display array elements as desired: + + >>> np.set_printoptions(formatter={'all':lambda x: 'int: '+str(-x)}) + >>> x = np.arange(3) + >>> x + array([int: 0, int: -1, int: -2]) + >>> np.set_printoptions() # formatter gets reset + >>> x + array([0, 1, 2]) + + To put back the default options, you can use: + + >>> np.set_printoptions(edgeitems=3, infstr='inf', + ... linewidth=75, nanstr='nan', precision=8, + ... suppress=False, threshold=1000, formatter=None) + + Also to temporarily override options, use `printoptions` + as a context manager: + + >>> with np.printoptions(precision=2, suppress=True, threshold=5): + ... np.linspace(0, 10, 10) + array([ 0. , 1.11, 2.22, ..., 7.78, 8.89, 10. ], shape=(10,)) + + """ + _set_printoptions(precision, threshold, edgeitems, linewidth, suppress, + nanstr, infstr, formatter, sign, floatmode, + legacy=legacy, override_repr=override_repr) + + +def _set_printoptions(precision=None, threshold=None, edgeitems=None, + linewidth=None, suppress=None, nanstr=None, + infstr=None, formatter=None, sign=None, floatmode=None, + *, legacy=None, override_repr=None): + new_opt = _make_options_dict(precision, threshold, edgeitems, linewidth, + suppress, nanstr, infstr, sign, formatter, + floatmode, legacy) + # formatter and override_repr are always reset + new_opt['formatter'] = formatter + new_opt['override_repr'] = override_repr + + updated_opt = format_options.get() | new_opt + updated_opt.update(new_opt) + + if updated_opt['legacy'] == 113: + updated_opt['sign'] = '-' + + return format_options.set(updated_opt) + + +@set_module('numpy') +def get_printoptions(): + """ + Return the current print options. + + Returns + ------- + print_opts : dict + Dictionary of current print options with keys + + - precision : int + - threshold : int + - edgeitems : int + - linewidth : int + - suppress : bool + - nanstr : str + - infstr : str + - sign : str + - formatter : dict of callables + - floatmode : str + - legacy : str or False + + For a full description of these options, see `set_printoptions`. + + See Also + -------- + set_printoptions, printoptions + + Examples + -------- + >>> import numpy as np + + >>> np.get_printoptions() + {'edgeitems': 3, 'threshold': 1000, ..., 'override_repr': None} + + >>> np.get_printoptions()['linewidth'] + 75 + >>> np.set_printoptions(linewidth=100) + >>> np.get_printoptions()['linewidth'] + 100 + + """ + opts = format_options.get().copy() + opts['legacy'] = { + 113: '1.13', 121: '1.21', 125: '1.25', 201: '2.1', + 202: '2.2', sys.maxsize: False, + }[opts['legacy']] + return opts + + +def _get_legacy_print_mode(): + """Return the legacy print mode as an int.""" + return format_options.get()['legacy'] + + +@set_module('numpy') +@contextlib.contextmanager +def printoptions(*args, **kwargs): + """Context manager for setting print options. + + Set print options for the scope of the `with` block, and restore the old + options at the end. See `set_printoptions` for the full description of + available options. + + Examples + -------- + >>> import numpy as np + + >>> from numpy.testing import assert_equal + >>> with np.printoptions(precision=2): + ... np.array([2.0]) / 3 + array([0.67]) + + The `as`-clause of the `with`-statement gives the current print options: + + >>> with np.printoptions(precision=2) as opts: + ... assert_equal(opts, np.get_printoptions()) + + See Also + -------- + set_printoptions, get_printoptions + + """ + token = _set_printoptions(*args, **kwargs) + + try: + yield get_printoptions() + finally: + format_options.reset(token) + + +def _leading_trailing(a, edgeitems, index=()): + """ + Keep only the N-D corners (leading and trailing edges) of an array. + + Should be passed a base-class ndarray, since it makes no guarantees about + preserving subclasses. + """ + axis = len(index) + if axis == a.ndim: + return a[index] + + if a.shape[axis] > 2 * edgeitems: + return concatenate(( + _leading_trailing(a, edgeitems, index + np.index_exp[:edgeitems]), + _leading_trailing(a, edgeitems, index + np.index_exp[-edgeitems:]) + ), axis=axis) + else: + return _leading_trailing(a, edgeitems, index + np.index_exp[:]) + + +def _object_format(o): + """ Object arrays containing lists should be printed unambiguously """ + if type(o) is list: + fmt = 'list({!r})' + else: + fmt = '{!r}' + return fmt.format(o) + +def repr_format(x): + if isinstance(x, (np.str_, np.bytes_)): + return repr(x.item()) + return repr(x) + +def str_format(x): + if isinstance(x, (np.str_, np.bytes_)): + return str(x.item()) + return str(x) + +def _get_formatdict(data, *, precision, floatmode, suppress, sign, legacy, + formatter, **kwargs): + # note: extra arguments in kwargs are ignored + + # wrapped in lambdas to avoid taking a code path + # with the wrong type of data + formatdict = { + 'bool': lambda: BoolFormat(data), + 'int': lambda: IntegerFormat(data, sign), + 'float': lambda: FloatingFormat( + data, precision, floatmode, suppress, sign, legacy=legacy), + 'longfloat': lambda: FloatingFormat( + data, precision, floatmode, suppress, sign, legacy=legacy), + 'complexfloat': lambda: ComplexFloatingFormat( + data, precision, floatmode, suppress, sign, legacy=legacy), + 'longcomplexfloat': lambda: ComplexFloatingFormat( + data, precision, floatmode, suppress, sign, legacy=legacy), + 'datetime': lambda: DatetimeFormat(data, legacy=legacy), + 'timedelta': lambda: TimedeltaFormat(data), + 'object': lambda: _object_format, + 'void': lambda: str_format, + 'numpystr': lambda: repr_format} + + # we need to wrap values in `formatter` in a lambda, so that the interface + # is the same as the above values. + def indirect(x): + return lambda: x + + if formatter is not None: + fkeys = [k for k in formatter.keys() if formatter[k] is not None] + if 'all' in fkeys: + for key in formatdict.keys(): + formatdict[key] = indirect(formatter['all']) + if 'int_kind' in fkeys: + for key in ['int']: + formatdict[key] = indirect(formatter['int_kind']) + if 'float_kind' in fkeys: + for key in ['float', 'longfloat']: + formatdict[key] = indirect(formatter['float_kind']) + if 'complex_kind' in fkeys: + for key in ['complexfloat', 'longcomplexfloat']: + formatdict[key] = indirect(formatter['complex_kind']) + if 'str_kind' in fkeys: + formatdict['numpystr'] = indirect(formatter['str_kind']) + for key in formatdict.keys(): + if key in fkeys: + formatdict[key] = indirect(formatter[key]) + + return formatdict + +def _get_format_function(data, **options): + """ + find the right formatting function for the dtype_ + """ + dtype_ = data.dtype + dtypeobj = dtype_.type + formatdict = _get_formatdict(data, **options) + if dtypeobj is None: + return formatdict["numpystr"]() + elif issubclass(dtypeobj, _nt.bool): + return formatdict['bool']() + elif issubclass(dtypeobj, _nt.integer): + if issubclass(dtypeobj, _nt.timedelta64): + return formatdict['timedelta']() + else: + return formatdict['int']() + elif issubclass(dtypeobj, _nt.floating): + if issubclass(dtypeobj, _nt.longdouble): + return formatdict['longfloat']() + else: + return formatdict['float']() + elif issubclass(dtypeobj, _nt.complexfloating): + if issubclass(dtypeobj, _nt.clongdouble): + return formatdict['longcomplexfloat']() + else: + return formatdict['complexfloat']() + elif issubclass(dtypeobj, (_nt.str_, _nt.bytes_)): + return formatdict['numpystr']() + elif issubclass(dtypeobj, _nt.datetime64): + return formatdict['datetime']() + elif issubclass(dtypeobj, _nt.object_): + return formatdict['object']() + elif issubclass(dtypeobj, _nt.void): + if dtype_.names is not None: + return StructuredVoidFormat.from_data(data, **options) + else: + return formatdict['void']() + else: + return formatdict['numpystr']() + + +def _recursive_guard(fillvalue='...'): + """ + Like the python 3.2 reprlib.recursive_repr, but forwards *args and **kwargs + + Decorates a function such that if it calls itself with the same first + argument, it returns `fillvalue` instead of recursing. + + Largely copied from reprlib.recursive_repr + """ + + def decorating_function(f): + repr_running = set() + + @functools.wraps(f) + def wrapper(self, *args, **kwargs): + key = id(self), get_ident() + if key in repr_running: + return fillvalue + repr_running.add(key) + try: + return f(self, *args, **kwargs) + finally: + repr_running.discard(key) + + return wrapper + + return decorating_function + + +# gracefully handle recursive calls, when object arrays contain themselves +@_recursive_guard() +def _array2string(a, options, separator=' ', prefix=""): + # The formatter __init__s in _get_format_function cannot deal with + # subclasses yet, and we also need to avoid recursion issues in + # _formatArray with subclasses which return 0d arrays in place of scalars + data = asarray(a) + if a.shape == (): + a = data + + if a.size > options['threshold']: + summary_insert = "..." + data = _leading_trailing(data, options['edgeitems']) + else: + summary_insert = "" + + # find the right formatting function for the array + format_function = _get_format_function(data, **options) + + # skip over "[" + next_line_prefix = " " + # skip over array( + next_line_prefix += " " * len(prefix) + + lst = _formatArray(a, format_function, options['linewidth'], + next_line_prefix, separator, options['edgeitems'], + summary_insert, options['legacy']) + return lst + + +def _array2string_dispatcher( + a, max_line_width=None, precision=None, + suppress_small=None, separator=None, prefix=None, + style=None, formatter=None, threshold=None, + edgeitems=None, sign=None, floatmode=None, suffix=None, + *, legacy=None): + return (a,) + + +@array_function_dispatch(_array2string_dispatcher, module='numpy') +def array2string(a, max_line_width=None, precision=None, + suppress_small=None, separator=' ', prefix="", + style=np._NoValue, formatter=None, threshold=None, + edgeitems=None, sign=None, floatmode=None, suffix="", + *, legacy=None): + """ + Return a string representation of an array. + + Parameters + ---------- + a : ndarray + Input array. + max_line_width : int, optional + Inserts newlines if text is longer than `max_line_width`. + Defaults to ``numpy.get_printoptions()['linewidth']``. + precision : int or None, optional + Floating point precision. + Defaults to ``numpy.get_printoptions()['precision']``. + suppress_small : bool, optional + Represent numbers "very close" to zero as zero; default is False. + Very close is defined by precision: if the precision is 8, e.g., + numbers smaller (in absolute value) than 5e-9 are represented as + zero. + Defaults to ``numpy.get_printoptions()['suppress']``. + separator : str, optional + Inserted between elements. + prefix : str, optional + suffix : str, optional + The length of the prefix and suffix strings are used to respectively + align and wrap the output. An array is typically printed as:: + + prefix + array2string(a) + suffix + + The output is left-padded by the length of the prefix string, and + wrapping is forced at the column ``max_line_width - len(suffix)``. + It should be noted that the content of prefix and suffix strings are + not included in the output. + style : _NoValue, optional + Has no effect, do not use. + + .. deprecated:: 1.14.0 + formatter : dict of callables, optional + If not None, the keys should indicate the type(s) that the respective + formatting function applies to. Callables should return a string. + Types that are not specified (by their corresponding keys) are handled + by the default formatters. Individual types for which a formatter + can be set are: + + - 'bool' + - 'int' + - 'timedelta' : a `numpy.timedelta64` + - 'datetime' : a `numpy.datetime64` + - 'float' + - 'longfloat' : 128-bit floats + - 'complexfloat' + - 'longcomplexfloat' : composed of two 128-bit floats + - 'void' : type `numpy.void` + - 'numpystr' : types `numpy.bytes_` and `numpy.str_` + + Other keys that can be used to set a group of types at once are: + + - 'all' : sets all types + - 'int_kind' : sets 'int' + - 'float_kind' : sets 'float' and 'longfloat' + - 'complex_kind' : sets 'complexfloat' and 'longcomplexfloat' + - 'str_kind' : sets 'numpystr' + threshold : int, optional + Total number of array elements which trigger summarization + rather than full repr. + Defaults to ``numpy.get_printoptions()['threshold']``. + edgeitems : int, optional + Number of array items in summary at beginning and end of + each dimension. + Defaults to ``numpy.get_printoptions()['edgeitems']``. + sign : string, either '-', '+', or ' ', optional + Controls printing of the sign of floating-point types. If '+', always + print the sign of positive values. If ' ', always prints a space + (whitespace character) in the sign position of positive values. If + '-', omit the sign character of positive values. + Defaults to ``numpy.get_printoptions()['sign']``. + + .. versionchanged:: 2.0 + The sign parameter can now be an integer type, previously + types were floating-point types. + + floatmode : str, optional + Controls the interpretation of the `precision` option for + floating-point types. + Defaults to ``numpy.get_printoptions()['floatmode']``. + Can take the following values: + + - 'fixed': Always print exactly `precision` fractional digits, + even if this would print more or fewer digits than + necessary to specify the value uniquely. + - 'unique': Print the minimum number of fractional digits necessary + to represent each value uniquely. Different elements may + have a different number of digits. The value of the + `precision` option is ignored. + - 'maxprec': Print at most `precision` fractional digits, but if + an element can be uniquely represented with fewer digits + only print it with that many. + - 'maxprec_equal': Print at most `precision` fractional digits, + but if every element in the array can be uniquely + represented with an equal number of fewer digits, use that + many digits for all elements. + legacy : string or `False`, optional + If set to the string ``'1.13'`` enables 1.13 legacy printing mode. This + approximates numpy 1.13 print output by including a space in the sign + position of floats and different behavior for 0d arrays. If set to + `False`, disables legacy mode. Unrecognized strings will be ignored + with a warning for forward compatibility. + + Returns + ------- + array_str : str + String representation of the array. + + Raises + ------ + TypeError + if a callable in `formatter` does not return a string. + + See Also + -------- + array_str, array_repr, set_printoptions, get_printoptions + + Notes + ----- + If a formatter is specified for a certain type, the `precision` keyword is + ignored for that type. + + This is a very flexible function; `array_repr` and `array_str` are using + `array2string` internally so keywords with the same name should work + identically in all three functions. + + Examples + -------- + >>> import numpy as np + >>> x = np.array([1e-16,1,2,3]) + >>> np.array2string(x, precision=2, separator=',', + ... suppress_small=True) + '[0.,1.,2.,3.]' + + >>> x = np.arange(3.) + >>> np.array2string(x, formatter={'float_kind':lambda x: "%.2f" % x}) + '[0.00 1.00 2.00]' + + >>> x = np.arange(3) + >>> np.array2string(x, formatter={'int':lambda x: hex(x)}) + '[0x0 0x1 0x2]' + + """ + + overrides = _make_options_dict(precision, threshold, edgeitems, + max_line_width, suppress_small, None, None, + sign, formatter, floatmode, legacy) + options = format_options.get().copy() + options.update(overrides) + + if options['legacy'] <= 113: + if style is np._NoValue: + style = repr + + if a.shape == () and a.dtype.names is None: + return style(a.item()) + elif style is not np._NoValue: + # Deprecation 11-9-2017 v1.14 + warnings.warn("'style' argument is deprecated and no longer functional" + " except in 1.13 'legacy' mode", + DeprecationWarning, stacklevel=2) + + if options['legacy'] > 113: + options['linewidth'] -= len(suffix) + + # treat as a null array if any of shape elements == 0 + if a.size == 0: + return "[]" + + return _array2string(a, options, separator, prefix) + + +def _extendLine(s, line, word, line_width, next_line_prefix, legacy): + needs_wrap = len(line) + len(word) > line_width + if legacy > 113: + # don't wrap lines if it won't help + if len(line) <= len(next_line_prefix): + needs_wrap = False + + if needs_wrap: + s += line.rstrip() + "\n" + line = next_line_prefix + line += word + return s, line + + +def _extendLine_pretty(s, line, word, line_width, next_line_prefix, legacy): + """ + Extends line with nicely formatted (possibly multi-line) string ``word``. + """ + words = word.splitlines() + if len(words) == 1 or legacy <= 113: + return _extendLine(s, line, word, line_width, next_line_prefix, legacy) + + max_word_length = max(len(word) for word in words) + if (len(line) + max_word_length > line_width and + len(line) > len(next_line_prefix)): + s += line.rstrip() + '\n' + line = next_line_prefix + words[0] + indent = next_line_prefix + else: + indent = len(line) * ' ' + line += words[0] + + for word in words[1::]: + s += line.rstrip() + '\n' + line = indent + word + + suffix_length = max_word_length - len(words[-1]) + line += suffix_length * ' ' + + return s, line + +def _formatArray(a, format_function, line_width, next_line_prefix, + separator, edge_items, summary_insert, legacy): + """formatArray is designed for two modes of operation: + + 1. Full output + + 2. Summarized output + + """ + def recurser(index, hanging_indent, curr_width): + """ + By using this local function, we don't need to recurse with all the + arguments. Since this function is not created recursively, the cost is + not significant + """ + axis = len(index) + axes_left = a.ndim - axis + + if axes_left == 0: + return format_function(a[index]) + + # when recursing, add a space to align with the [ added, and reduce the + # length of the line by 1 + next_hanging_indent = hanging_indent + ' ' + if legacy <= 113: + next_width = curr_width + else: + next_width = curr_width - len(']') + + a_len = a.shape[axis] + show_summary = summary_insert and 2 * edge_items < a_len + if show_summary: + leading_items = edge_items + trailing_items = edge_items + else: + leading_items = 0 + trailing_items = a_len + + # stringify the array with the hanging indent on the first line too + s = '' + + # last axis (rows) - wrap elements if they would not fit on one line + if axes_left == 1: + # the length up until the beginning of the separator / bracket + if legacy <= 113: + elem_width = curr_width - len(separator.rstrip()) + else: + elem_width = curr_width - max( + len(separator.rstrip()), len(']') + ) + + line = hanging_indent + for i in range(leading_items): + word = recurser(index + (i,), next_hanging_indent, next_width) + s, line = _extendLine_pretty( + s, line, word, elem_width, hanging_indent, legacy) + line += separator + + if show_summary: + s, line = _extendLine( + s, line, summary_insert, elem_width, hanging_indent, legacy + ) + if legacy <= 113: + line += ", " + else: + line += separator + + for i in range(trailing_items, 1, -1): + word = recurser(index + (-i,), next_hanging_indent, next_width) + s, line = _extendLine_pretty( + s, line, word, elem_width, hanging_indent, legacy) + line += separator + + if legacy <= 113: + # width of the separator is not considered on 1.13 + elem_width = curr_width + word = recurser(index + (-1,), next_hanging_indent, next_width) + s, line = _extendLine_pretty( + s, line, word, elem_width, hanging_indent, legacy) + + s += line + + # other axes - insert newlines between rows + else: + s = '' + line_sep = separator.rstrip() + '\n' * (axes_left - 1) + + for i in range(leading_items): + nested = recurser( + index + (i,), next_hanging_indent, next_width + ) + s += hanging_indent + nested + line_sep + + if show_summary: + if legacy <= 113: + # trailing space, fixed nbr of newlines, + # and fixed separator + s += hanging_indent + summary_insert + ", \n" + else: + s += hanging_indent + summary_insert + line_sep + + for i in range(trailing_items, 1, -1): + nested = recurser(index + (-i,), next_hanging_indent, + next_width) + s += hanging_indent + nested + line_sep + + nested = recurser(index + (-1,), next_hanging_indent, next_width) + s += hanging_indent + nested + + # remove the hanging indent, and wrap in [] + s = '[' + s[len(hanging_indent):] + ']' + return s + + try: + # invoke the recursive part with an initial index and prefix + return recurser(index=(), + hanging_indent=next_line_prefix, + curr_width=line_width) + finally: + # recursive closures have a cyclic reference to themselves, which + # requires gc to collect (gh-10620). To avoid this problem, for + # performance and PyPy friendliness, we break the cycle: + recurser = None + +def _none_or_positive_arg(x, name): + if x is None: + return -1 + if x < 0: + raise ValueError(f"{name} must be >= 0") + return x + +class FloatingFormat: + """ Formatter for subtypes of np.floating """ + def __init__(self, data, precision, floatmode, suppress_small, sign=False, + *, legacy=None): + # for backcompatibility, accept bools + if isinstance(sign, bool): + sign = '+' if sign else '-' + + self._legacy = legacy + if self._legacy <= 113: + # when not 0d, legacy does not support '-' + if data.shape != () and sign == '-': + sign = ' ' + + self.floatmode = floatmode + if floatmode == 'unique': + self.precision = None + else: + self.precision = precision + + self.precision = _none_or_positive_arg(self.precision, 'precision') + + self.suppress_small = suppress_small + self.sign = sign + self.exp_format = False + self.large_exponent = False + self.fillFormat(data) + + def fillFormat(self, data): + # only the finite values are used to compute the number of digits + finite_vals = data[isfinite(data)] + + # choose exponential mode based on the non-zero finite values: + abs_non_zero = absolute(finite_vals[finite_vals != 0]) + if len(abs_non_zero) != 0: + max_val = np.max(abs_non_zero) + min_val = np.min(abs_non_zero) + if self._legacy <= 202: + exp_cutoff_max = 1.e8 + else: + # consider data type while deciding the max cutoff for exp format + exp_cutoff_max = 10.**min(8, np.finfo(data.dtype).precision) + with errstate(over='ignore'): # division can overflow + if max_val >= exp_cutoff_max or (not self.suppress_small and + (min_val < 0.0001 or max_val / min_val > 1000.)): + self.exp_format = True + + # do a first pass of printing all the numbers, to determine sizes + if len(finite_vals) == 0: + self.pad_left = 0 + self.pad_right = 0 + self.trim = '.' + self.exp_size = -1 + self.unique = True + self.min_digits = None + elif self.exp_format: + trim, unique = '.', True + if self.floatmode == 'fixed' or self._legacy <= 113: + trim, unique = 'k', False + strs = (dragon4_scientific(x, precision=self.precision, + unique=unique, trim=trim, sign=self.sign == '+') + for x in finite_vals) + frac_strs, _, exp_strs = zip(*(s.partition('e') for s in strs)) + int_part, frac_part = zip(*(s.split('.') for s in frac_strs)) + self.exp_size = max(len(s) for s in exp_strs) - 1 + + self.trim = 'k' + self.precision = max(len(s) for s in frac_part) + self.min_digits = self.precision + self.unique = unique + + # for back-compat with np 1.13, use 2 spaces & sign and full prec + if self._legacy <= 113: + self.pad_left = 3 + else: + # this should be only 1 or 2. Can be calculated from sign. + self.pad_left = max(len(s) for s in int_part) + # pad_right is only needed for nan length calculation + self.pad_right = self.exp_size + 2 + self.precision + else: + trim, unique = '.', True + if self.floatmode == 'fixed': + trim, unique = 'k', False + strs = (dragon4_positional(x, precision=self.precision, + fractional=True, + unique=unique, trim=trim, + sign=self.sign == '+') + for x in finite_vals) + int_part, frac_part = zip(*(s.split('.') for s in strs)) + if self._legacy <= 113: + self.pad_left = 1 + max(len(s.lstrip('-+')) for s in int_part) + else: + self.pad_left = max(len(s) for s in int_part) + self.pad_right = max(len(s) for s in frac_part) + self.exp_size = -1 + self.unique = unique + + if self.floatmode in ['fixed', 'maxprec_equal']: + self.precision = self.min_digits = self.pad_right + self.trim = 'k' + else: + self.trim = '.' + self.min_digits = 0 + + if self._legacy > 113: + # account for sign = ' ' by adding one to pad_left + if self.sign == ' ' and not any(np.signbit(finite_vals)): + self.pad_left += 1 + + # if there are non-finite values, may need to increase pad_left + if data.size != finite_vals.size: + neginf = self.sign != '-' or any(data[isinf(data)] < 0) + offset = self.pad_right + 1 # +1 for decimal pt + current_options = format_options.get() + self.pad_left = max( + self.pad_left, len(current_options['nanstr']) - offset, + len(current_options['infstr']) + neginf - offset + ) + + def __call__(self, x): + if not np.isfinite(x): + with errstate(invalid='ignore'): + current_options = format_options.get() + if np.isnan(x): + sign = '+' if self.sign == '+' else '' + ret = sign + current_options['nanstr'] + else: # isinf + sign = '-' if x < 0 else '+' if self.sign == '+' else '' + ret = sign + current_options['infstr'] + return ' ' * ( + self.pad_left + self.pad_right + 1 - len(ret) + ) + ret + + if self.exp_format: + return dragon4_scientific(x, + precision=self.precision, + min_digits=self.min_digits, + unique=self.unique, + trim=self.trim, + sign=self.sign == '+', + pad_left=self.pad_left, + exp_digits=self.exp_size) + else: + return dragon4_positional(x, + precision=self.precision, + min_digits=self.min_digits, + unique=self.unique, + fractional=True, + trim=self.trim, + sign=self.sign == '+', + pad_left=self.pad_left, + pad_right=self.pad_right) + + +@set_module('numpy') +def format_float_scientific(x, precision=None, unique=True, trim='k', + sign=False, pad_left=None, exp_digits=None, + min_digits=None): + """ + Format a floating-point scalar as a decimal string in scientific notation. + + Provides control over rounding, trimming and padding. Uses and assumes + IEEE unbiased rounding. Uses the "Dragon4" algorithm. + + Parameters + ---------- + x : python float or numpy floating scalar + Value to format. + precision : non-negative integer or None, optional + Maximum number of digits to print. May be None if `unique` is + `True`, but must be an integer if unique is `False`. + unique : boolean, optional + If `True`, use a digit-generation strategy which gives the shortest + representation which uniquely identifies the floating-point number from + other values of the same type, by judicious rounding. If `precision` + is given fewer digits than necessary can be printed. If `min_digits` + is given more can be printed, in which cases the last digit is rounded + with unbiased rounding. + If `False`, digits are generated as if printing an infinite-precision + value and stopping after `precision` digits, rounding the remaining + value with unbiased rounding + trim : one of 'k', '.', '0', '-', optional + Controls post-processing trimming of trailing digits, as follows: + + * 'k' : keep trailing zeros, keep decimal point (no trimming) + * '.' : trim all trailing zeros, leave decimal point + * '0' : trim all but the zero before the decimal point. Insert the + zero if it is missing. + * '-' : trim trailing zeros and any trailing decimal point + sign : boolean, optional + Whether to show the sign for positive values. + pad_left : non-negative integer, optional + Pad the left side of the string with whitespace until at least that + many characters are to the left of the decimal point. + exp_digits : non-negative integer, optional + Pad the exponent with zeros until it contains at least this + many digits. If omitted, the exponent will be at least 2 digits. + min_digits : non-negative integer or None, optional + Minimum number of digits to print. This only has an effect for + `unique=True`. In that case more digits than necessary to uniquely + identify the value may be printed and rounded unbiased. + + .. versionadded:: 1.21.0 + + Returns + ------- + rep : string + The string representation of the floating point value + + See Also + -------- + format_float_positional + + Examples + -------- + >>> import numpy as np + >>> np.format_float_scientific(np.float32(np.pi)) + '3.1415927e+00' + >>> s = np.float32(1.23e24) + >>> np.format_float_scientific(s, unique=False, precision=15) + '1.230000071797338e+24' + >>> np.format_float_scientific(s, exp_digits=4) + '1.23e+0024' + """ + precision = _none_or_positive_arg(precision, 'precision') + pad_left = _none_or_positive_arg(pad_left, 'pad_left') + exp_digits = _none_or_positive_arg(exp_digits, 'exp_digits') + min_digits = _none_or_positive_arg(min_digits, 'min_digits') + if min_digits > 0 and precision > 0 and min_digits > precision: + raise ValueError("min_digits must be less than or equal to precision") + return dragon4_scientific(x, precision=precision, unique=unique, + trim=trim, sign=sign, pad_left=pad_left, + exp_digits=exp_digits, min_digits=min_digits) + + +@set_module('numpy') +def format_float_positional(x, precision=None, unique=True, + fractional=True, trim='k', sign=False, + pad_left=None, pad_right=None, min_digits=None): + """ + Format a floating-point scalar as a decimal string in positional notation. + + Provides control over rounding, trimming and padding. Uses and assumes + IEEE unbiased rounding. Uses the "Dragon4" algorithm. + + Parameters + ---------- + x : python float or numpy floating scalar + Value to format. + precision : non-negative integer or None, optional + Maximum number of digits to print. May be None if `unique` is + `True`, but must be an integer if unique is `False`. + unique : boolean, optional + If `True`, use a digit-generation strategy which gives the shortest + representation which uniquely identifies the floating-point number from + other values of the same type, by judicious rounding. If `precision` + is given fewer digits than necessary can be printed, or if `min_digits` + is given more can be printed, in which cases the last digit is rounded + with unbiased rounding. + If `False`, digits are generated as if printing an infinite-precision + value and stopping after `precision` digits, rounding the remaining + value with unbiased rounding + fractional : boolean, optional + If `True`, the cutoffs of `precision` and `min_digits` refer to the + total number of digits after the decimal point, including leading + zeros. + If `False`, `precision` and `min_digits` refer to the total number of + significant digits, before or after the decimal point, ignoring leading + zeros. + trim : one of 'k', '.', '0', '-', optional + Controls post-processing trimming of trailing digits, as follows: + + * 'k' : keep trailing zeros, keep decimal point (no trimming) + * '.' : trim all trailing zeros, leave decimal point + * '0' : trim all but the zero before the decimal point. Insert the + zero if it is missing. + * '-' : trim trailing zeros and any trailing decimal point + sign : boolean, optional + Whether to show the sign for positive values. + pad_left : non-negative integer, optional + Pad the left side of the string with whitespace until at least that + many characters are to the left of the decimal point. + pad_right : non-negative integer, optional + Pad the right side of the string with whitespace until at least that + many characters are to the right of the decimal point. + min_digits : non-negative integer or None, optional + Minimum number of digits to print. Only has an effect if `unique=True` + in which case additional digits past those necessary to uniquely + identify the value may be printed, rounding the last additional digit. + + .. versionadded:: 1.21.0 + + Returns + ------- + rep : string + The string representation of the floating point value + + See Also + -------- + format_float_scientific + + Examples + -------- + >>> import numpy as np + >>> np.format_float_positional(np.float32(np.pi)) + '3.1415927' + >>> np.format_float_positional(np.float16(np.pi)) + '3.14' + >>> np.format_float_positional(np.float16(0.3)) + '0.3' + >>> np.format_float_positional(np.float16(0.3), unique=False, precision=10) + '0.3000488281' + """ + precision = _none_or_positive_arg(precision, 'precision') + pad_left = _none_or_positive_arg(pad_left, 'pad_left') + pad_right = _none_or_positive_arg(pad_right, 'pad_right') + min_digits = _none_or_positive_arg(min_digits, 'min_digits') + if not fractional and precision == 0: + raise ValueError("precision must be greater than 0 if " + "fractional=False") + if min_digits > 0 and precision > 0 and min_digits > precision: + raise ValueError("min_digits must be less than or equal to precision") + return dragon4_positional(x, precision=precision, unique=unique, + fractional=fractional, trim=trim, + sign=sign, pad_left=pad_left, + pad_right=pad_right, min_digits=min_digits) + +class IntegerFormat: + def __init__(self, data, sign='-'): + if data.size > 0: + data_max = np.max(data) + data_min = np.min(data) + data_max_str_len = len(str(data_max)) + if sign == ' ' and data_min < 0: + sign = '-' + if data_max >= 0 and sign in "+ ": + data_max_str_len += 1 + max_str_len = max(data_max_str_len, + len(str(data_min))) + else: + max_str_len = 0 + self.format = f'{{:{sign}{max_str_len}d}}' + + def __call__(self, x): + return self.format.format(x) + +class BoolFormat: + def __init__(self, data, **kwargs): + # add an extra space so " True" and "False" have the same length and + # array elements align nicely when printed, except in 0d arrays + self.truestr = ' True' if data.shape != () else 'True' + + def __call__(self, x): + return self.truestr if x else "False" + + +class ComplexFloatingFormat: + """ Formatter for subtypes of np.complexfloating """ + def __init__(self, x, precision, floatmode, suppress_small, + sign=False, *, legacy=None): + # for backcompatibility, accept bools + if isinstance(sign, bool): + sign = '+' if sign else '-' + + floatmode_real = floatmode_imag = floatmode + if legacy <= 113: + floatmode_real = 'maxprec_equal' + floatmode_imag = 'maxprec' + + self.real_format = FloatingFormat( + x.real, precision, floatmode_real, suppress_small, + sign=sign, legacy=legacy + ) + self.imag_format = FloatingFormat( + x.imag, precision, floatmode_imag, suppress_small, + sign='+', legacy=legacy + ) + + def __call__(self, x): + r = self.real_format(x.real) + i = self.imag_format(x.imag) + + # add the 'j' before the terminal whitespace in i + sp = len(i.rstrip()) + i = i[:sp] + 'j' + i[sp:] + + return r + i + + +class _TimelikeFormat: + def __init__(self, data): + non_nat = data[~isnat(data)] + if len(non_nat) > 0: + # Max str length of non-NaT elements + max_str_len = max(len(self._format_non_nat(np.max(non_nat))), + len(self._format_non_nat(np.min(non_nat)))) + else: + max_str_len = 0 + if len(non_nat) < data.size: + # data contains a NaT + max_str_len = max(max_str_len, 5) + self._format = f'%{max_str_len}s' + self._nat = "'NaT'".rjust(max_str_len) + + def _format_non_nat(self, x): + # override in subclass + raise NotImplementedError + + def __call__(self, x): + if isnat(x): + return self._nat + else: + return self._format % self._format_non_nat(x) + + +class DatetimeFormat(_TimelikeFormat): + def __init__(self, x, unit=None, timezone=None, casting='same_kind', + legacy=False): + # Get the unit from the dtype + if unit is None: + if x.dtype.kind == 'M': + unit = datetime_data(x.dtype)[0] + else: + unit = 's' + + if timezone is None: + timezone = 'naive' + self.timezone = timezone + self.unit = unit + self.casting = casting + self.legacy = legacy + + # must be called after the above are configured + super().__init__(x) + + def __call__(self, x): + if self.legacy <= 113: + return self._format_non_nat(x) + return super().__call__(x) + + def _format_non_nat(self, x): + return "'%s'" % datetime_as_string(x, + unit=self.unit, + timezone=self.timezone, + casting=self.casting) + + +class TimedeltaFormat(_TimelikeFormat): + def _format_non_nat(self, x): + return str(x.astype('i8')) + + +class SubArrayFormat: + def __init__(self, format_function, **options): + self.format_function = format_function + self.threshold = options['threshold'] + self.edge_items = options['edgeitems'] + + def __call__(self, a): + self.summary_insert = "..." if a.size > self.threshold else "" + return self.format_array(a) + + def format_array(self, a): + if np.ndim(a) == 0: + return self.format_function(a) + + if self.summary_insert and a.shape[0] > 2 * self.edge_items: + formatted = ( + [self.format_array(a_) for a_ in a[:self.edge_items]] + + [self.summary_insert] + + [self.format_array(a_) for a_ in a[-self.edge_items:]] + ) + else: + formatted = [self.format_array(a_) for a_ in a] + + return "[" + ", ".join(formatted) + "]" + + +class StructuredVoidFormat: + """ + Formatter for structured np.void objects. + + This does not work on structured alias types like + np.dtype(('i4', 'i2,i2')), as alias scalars lose their field information, + and the implementation relies upon np.void.__getitem__. + """ + def __init__(self, format_functions): + self.format_functions = format_functions + + @classmethod + def from_data(cls, data, **options): + """ + This is a second way to initialize StructuredVoidFormat, + using the raw data as input. Added to avoid changing + the signature of __init__. + """ + format_functions = [] + for field_name in data.dtype.names: + format_function = _get_format_function(data[field_name], **options) + if data.dtype[field_name].shape != (): + format_function = SubArrayFormat(format_function, **options) + format_functions.append(format_function) + return cls(format_functions) + + def __call__(self, x): + str_fields = [ + format_function(field) + for field, format_function in zip(x, self.format_functions) + ] + if len(str_fields) == 1: + return f"({str_fields[0]},)" + else: + return f"({', '.join(str_fields)})" + + +def _void_scalar_to_string(x, is_repr=True): + """ + Implements the repr for structured-void scalars. It is called from the + scalartypes.c.src code, and is placed here because it uses the elementwise + formatters defined above. + """ + options = format_options.get().copy() + + if options["legacy"] <= 125: + return StructuredVoidFormat.from_data(array(x), **options)(x) + + if options.get('formatter') is None: + options['formatter'] = {} + options['formatter'].setdefault('float_kind', str) + val_repr = StructuredVoidFormat.from_data(array(x), **options)(x) + if not is_repr: + return val_repr + cls = type(x) + cls_fqn = cls.__module__.replace("numpy", "np") + "." + cls.__name__ + void_dtype = np.dtype((np.void, x.dtype)) + return f"{cls_fqn}({val_repr}, dtype={void_dtype!s})" + + +_typelessdata = [int_, float64, complex128, _nt.bool] + + +def dtype_is_implied(dtype): + """ + Determine if the given dtype is implied by the representation + of its values. + + Parameters + ---------- + dtype : dtype + Data type + + Returns + ------- + implied : bool + True if the dtype is implied by the representation of its values. + + Examples + -------- + >>> import numpy as np + >>> np._core.arrayprint.dtype_is_implied(int) + True + >>> np.array([1, 2, 3], int) + array([1, 2, 3]) + >>> np._core.arrayprint.dtype_is_implied(np.int8) + False + >>> np.array([1, 2, 3], np.int8) + array([1, 2, 3], dtype=int8) + """ + dtype = np.dtype(dtype) + if format_options.get()['legacy'] <= 113 and dtype.type == np.bool: + return False + + # not just void types can be structured, and names are not part of the repr + if dtype.names is not None: + return False + + # should care about endianness *unless size is 1* (e.g., int8, bool) + if not dtype.isnative: + return False + + return dtype.type in _typelessdata + + +def dtype_short_repr(dtype): + """ + Convert a dtype to a short form which evaluates to the same dtype. + + The intent is roughly that the following holds + + >>> from numpy import * + >>> dt = np.int64([1, 2]).dtype + >>> assert eval(dtype_short_repr(dt)) == dt + """ + if type(dtype).__repr__ != np.dtype.__repr__: + # TODO: Custom repr for user DTypes, logic should likely move. + return repr(dtype) + if dtype.names is not None: + # structured dtypes give a list or tuple repr + return str(dtype) + elif issubclass(dtype.type, flexible): + # handle these separately so they don't give garbage like str256 + return f"'{str(dtype)}'" + + typename = dtype.name + if not dtype.isnative: + # deal with cases like dtype(' 210 + and arr.size > current_options['threshold'])): + extras.append(f"shape={arr.shape}") + if not dtype_is_implied(arr.dtype) or arr.size == 0: + extras.append(f"dtype={dtype_short_repr(arr.dtype)}") + + if not extras: + return prefix + lst + ")" + + arr_str = prefix + lst + "," + extra_str = ", ".join(extras) + ")" + # compute whether we should put extras on a new line: Do so if adding the + # extras would extend the last line past max_line_width. + # Note: This line gives the correct result even when rfind returns -1. + last_line_len = len(arr_str) - (arr_str.rfind('\n') + 1) + spacer = " " + if current_options['legacy'] <= 113: + if issubclass(arr.dtype.type, flexible): + spacer = '\n' + ' ' * len(prefix) + elif last_line_len + len(extra_str) + 1 > max_line_width: + spacer = '\n' + ' ' * len(prefix) + + return arr_str + spacer + extra_str + + +def _array_repr_dispatcher( + arr, max_line_width=None, precision=None, suppress_small=None): + return (arr,) + + +@array_function_dispatch(_array_repr_dispatcher, module='numpy') +def array_repr(arr, max_line_width=None, precision=None, suppress_small=None): + """ + Return the string representation of an array. + + Parameters + ---------- + arr : ndarray + Input array. + max_line_width : int, optional + Inserts newlines if text is longer than `max_line_width`. + Defaults to ``numpy.get_printoptions()['linewidth']``. + precision : int, optional + Floating point precision. + Defaults to ``numpy.get_printoptions()['precision']``. + suppress_small : bool, optional + Represent numbers "very close" to zero as zero; default is False. + Very close is defined by precision: if the precision is 8, e.g., + numbers smaller (in absolute value) than 5e-9 are represented as + zero. + Defaults to ``numpy.get_printoptions()['suppress']``. + + Returns + ------- + string : str + The string representation of an array. + + See Also + -------- + array_str, array2string, set_printoptions + + Examples + -------- + >>> import numpy as np + >>> np.array_repr(np.array([1,2])) + 'array([1, 2])' + >>> np.array_repr(np.ma.array([0.])) + 'MaskedArray([0.])' + >>> np.array_repr(np.array([], np.int32)) + 'array([], dtype=int32)' + + >>> x = np.array([1e-6, 4e-7, 2, 3]) + >>> np.array_repr(x, precision=6, suppress_small=True) + 'array([0.000001, 0. , 2. , 3. ])' + + """ + return _array_repr_implementation( + arr, max_line_width, precision, suppress_small) + + +@_recursive_guard() +def _guarded_repr_or_str(v): + if isinstance(v, bytes): + return repr(v) + return str(v) + + +def _array_str_implementation( + a, max_line_width=None, precision=None, suppress_small=None, + array2string=array2string): + """Internal version of array_str() that allows overriding array2string.""" + if (format_options.get()['legacy'] <= 113 and + a.shape == () and not a.dtype.names): + return str(a.item()) + + # the str of 0d arrays is a special case: It should appear like a scalar, + # so floats are not truncated by `precision`, and strings are not wrapped + # in quotes. So we return the str of the scalar value. + if a.shape == (): + # obtain a scalar and call str on it, avoiding problems for subclasses + # for which indexing with () returns a 0d instead of a scalar by using + # ndarray's getindex. Also guard against recursive 0d object arrays. + return _guarded_repr_or_str(np.ndarray.__getitem__(a, ())) + + return array2string(a, max_line_width, precision, suppress_small, ' ', "") + + +def _array_str_dispatcher( + a, max_line_width=None, precision=None, suppress_small=None): + return (a,) + + +@array_function_dispatch(_array_str_dispatcher, module='numpy') +def array_str(a, max_line_width=None, precision=None, suppress_small=None): + """ + Return a string representation of the data in an array. + + The data in the array is returned as a single string. This function is + similar to `array_repr`, the difference being that `array_repr` also + returns information on the kind of array and its data type. + + Parameters + ---------- + a : ndarray + Input array. + max_line_width : int, optional + Inserts newlines if text is longer than `max_line_width`. + Defaults to ``numpy.get_printoptions()['linewidth']``. + precision : int, optional + Floating point precision. + Defaults to ``numpy.get_printoptions()['precision']``. + suppress_small : bool, optional + Represent numbers "very close" to zero as zero; default is False. + Very close is defined by precision: if the precision is 8, e.g., + numbers smaller (in absolute value) than 5e-9 are represented as + zero. + Defaults to ``numpy.get_printoptions()['suppress']``. + + See Also + -------- + array2string, array_repr, set_printoptions + + Examples + -------- + >>> import numpy as np + >>> np.array_str(np.arange(3)) + '[0 1 2]' + + """ + return _array_str_implementation( + a, max_line_width, precision, suppress_small) + + +# needed if __array_function__ is disabled +_array2string_impl = getattr(array2string, '__wrapped__', array2string) +_default_array_str = functools.partial(_array_str_implementation, + array2string=_array2string_impl) +_default_array_repr = functools.partial(_array_repr_implementation, + array2string=_array2string_impl) diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/arrayprint.pyi b/.venv/lib/python3.12/site-packages/numpy/_core/arrayprint.pyi new file mode 100644 index 0000000..fec03a6 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/arrayprint.pyi @@ -0,0 +1,238 @@ +from collections.abc import Callable + +# Using a private class is by no means ideal, but it is simply a consequence +# of a `contextlib.context` returning an instance of aforementioned class +from contextlib import _GeneratorContextManager +from typing import ( + Any, + Final, + Literal, + SupportsIndex, + TypeAlias, + TypedDict, + overload, + type_check_only, +) + +from typing_extensions import deprecated + +import numpy as np +from numpy._globals import _NoValueType +from numpy._typing import NDArray, _CharLike_co, _FloatLike_co + +__all__ = [ + "array2string", + "array_repr", + "array_str", + "format_float_positional", + "format_float_scientific", + "get_printoptions", + "printoptions", + "set_printoptions", +] + +### + +_FloatMode: TypeAlias = Literal["fixed", "unique", "maxprec", "maxprec_equal"] +_LegacyNoStyle: TypeAlias = Literal["1.21", "1.25", "2.1", False] +_Legacy: TypeAlias = Literal["1.13", _LegacyNoStyle] +_Sign: TypeAlias = Literal["-", "+", " "] +_Trim: TypeAlias = Literal["k", ".", "0", "-"] +_ReprFunc: TypeAlias = Callable[[NDArray[Any]], str] + +@type_check_only +class _FormatDict(TypedDict, total=False): + bool: Callable[[np.bool], str] + int: Callable[[np.integer], str] + timedelta: Callable[[np.timedelta64], str] + datetime: Callable[[np.datetime64], str] + float: Callable[[np.floating], str] + longfloat: Callable[[np.longdouble], str] + complexfloat: Callable[[np.complexfloating], str] + longcomplexfloat: Callable[[np.clongdouble], str] + void: Callable[[np.void], str] + numpystr: Callable[[_CharLike_co], str] + object: Callable[[object], str] + all: Callable[[object], str] + int_kind: Callable[[np.integer], str] + float_kind: Callable[[np.floating], str] + complex_kind: Callable[[np.complexfloating], str] + str_kind: Callable[[_CharLike_co], str] + +@type_check_only +class _FormatOptions(TypedDict): + precision: int + threshold: int + edgeitems: int + linewidth: int + suppress: bool + nanstr: str + infstr: str + formatter: _FormatDict | None + sign: _Sign + floatmode: _FloatMode + legacy: _Legacy + +### + +__docformat__: Final = "restructuredtext" # undocumented + +def set_printoptions( + precision: SupportsIndex | None = ..., + threshold: int | None = ..., + edgeitems: int | None = ..., + linewidth: int | None = ..., + suppress: bool | None = ..., + nanstr: str | None = ..., + infstr: str | None = ..., + formatter: _FormatDict | None = ..., + sign: _Sign | None = None, + floatmode: _FloatMode | None = None, + *, + legacy: _Legacy | None = None, + override_repr: _ReprFunc | None = None, +) -> None: ... +def get_printoptions() -> _FormatOptions: ... + +# public numpy export +@overload # no style +def array2string( + a: NDArray[Any], + max_line_width: int | None = None, + precision: SupportsIndex | None = None, + suppress_small: bool | None = None, + separator: str = " ", + prefix: str = "", + style: _NoValueType = ..., + formatter: _FormatDict | None = None, + threshold: int | None = None, + edgeitems: int | None = None, + sign: _Sign | None = None, + floatmode: _FloatMode | None = None, + suffix: str = "", + *, + legacy: _Legacy | None = None, +) -> str: ... +@overload # style= (positional), legacy="1.13" +def array2string( + a: NDArray[Any], + max_line_width: int | None, + precision: SupportsIndex | None, + suppress_small: bool | None, + separator: str, + prefix: str, + style: _ReprFunc, + formatter: _FormatDict | None = None, + threshold: int | None = None, + edgeitems: int | None = None, + sign: _Sign | None = None, + floatmode: _FloatMode | None = None, + suffix: str = "", + *, + legacy: Literal["1.13"], +) -> str: ... +@overload # style= (keyword), legacy="1.13" +def array2string( + a: NDArray[Any], + max_line_width: int | None = None, + precision: SupportsIndex | None = None, + suppress_small: bool | None = None, + separator: str = " ", + prefix: str = "", + *, + style: _ReprFunc, + formatter: _FormatDict | None = None, + threshold: int | None = None, + edgeitems: int | None = None, + sign: _Sign | None = None, + floatmode: _FloatMode | None = None, + suffix: str = "", + legacy: Literal["1.13"], +) -> str: ... +@overload # style= (positional), legacy!="1.13" +@deprecated("'style' argument is deprecated and no longer functional except in 1.13 'legacy' mode") +def array2string( + a: NDArray[Any], + max_line_width: int | None, + precision: SupportsIndex | None, + suppress_small: bool | None, + separator: str, + prefix: str, + style: _ReprFunc, + formatter: _FormatDict | None = None, + threshold: int | None = None, + edgeitems: int | None = None, + sign: _Sign | None = None, + floatmode: _FloatMode | None = None, + suffix: str = "", + *, + legacy: _LegacyNoStyle | None = None, +) -> str: ... +@overload # style= (keyword), legacy="1.13" +@deprecated("'style' argument is deprecated and no longer functional except in 1.13 'legacy' mode") +def array2string( + a: NDArray[Any], + max_line_width: int | None = None, + precision: SupportsIndex | None = None, + suppress_small: bool | None = None, + separator: str = " ", + prefix: str = "", + *, + style: _ReprFunc, + formatter: _FormatDict | None = None, + threshold: int | None = None, + edgeitems: int | None = None, + sign: _Sign | None = None, + floatmode: _FloatMode | None = None, + suffix: str = "", + legacy: _LegacyNoStyle | None = None, +) -> str: ... + +def format_float_scientific( + x: _FloatLike_co, + precision: int | None = ..., + unique: bool = ..., + trim: _Trim = "k", + sign: bool = ..., + pad_left: int | None = ..., + exp_digits: int | None = ..., + min_digits: int | None = ..., +) -> str: ... +def format_float_positional( + x: _FloatLike_co, + precision: int | None = ..., + unique: bool = ..., + fractional: bool = ..., + trim: _Trim = "k", + sign: bool = ..., + pad_left: int | None = ..., + pad_right: int | None = ..., + min_digits: int | None = ..., +) -> str: ... +def array_repr( + arr: NDArray[Any], + max_line_width: int | None = ..., + precision: SupportsIndex | None = ..., + suppress_small: bool | None = ..., +) -> str: ... +def array_str( + a: NDArray[Any], + max_line_width: int | None = ..., + precision: SupportsIndex | None = ..., + suppress_small: bool | None = ..., +) -> str: ... +def printoptions( + precision: SupportsIndex | None = ..., + threshold: int | None = ..., + edgeitems: int | None = ..., + linewidth: int | None = ..., + suppress: bool | None = ..., + nanstr: str | None = ..., + infstr: str | None = ..., + formatter: _FormatDict | None = ..., + sign: _Sign | None = None, + floatmode: _FloatMode | None = None, + *, + legacy: _Legacy | None = None, + override_repr: _ReprFunc | None = None, +) -> _GeneratorContextManager[_FormatOptions]: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/cversions.py b/.venv/lib/python3.12/site-packages/numpy/_core/cversions.py new file mode 100644 index 0000000..00159c3 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/cversions.py @@ -0,0 +1,13 @@ +"""Simple script to compute the api hash of the current API. + +The API has is defined by numpy_api_order and ufunc_api_order. + +""" +from os.path import dirname + +from code_generators.genapi import fullapi_hash +from code_generators.numpy_api import full_api + +if __name__ == '__main__': + curdir = dirname(__file__) + print(fullapi_hash(full_api)) diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/defchararray.py b/.venv/lib/python3.12/site-packages/numpy/_core/defchararray.py new file mode 100644 index 0000000..bde8921 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/defchararray.py @@ -0,0 +1,1427 @@ +""" +This module contains a set of functions for vectorized string +operations and methods. + +.. note:: + The `chararray` class exists for backwards compatibility with + Numarray, it is not recommended for new development. Starting from numpy + 1.4, if one needs arrays of strings, it is recommended to use arrays of + `dtype` `object_`, `bytes_` or `str_`, and use the free functions + in the `numpy.char` module for fast vectorized string operations. + +Some methods will only be available if the corresponding string method is +available in your version of Python. + +The preferred alias for `defchararray` is `numpy.char`. + +""" +import functools + +import numpy as np +from numpy._core import overrides +from numpy._core.multiarray import compare_chararrays +from numpy._core.strings import ( + _join as join, +) +from numpy._core.strings import ( + _rsplit as rsplit, +) +from numpy._core.strings import ( + _split as split, +) +from numpy._core.strings import ( + _splitlines as splitlines, +) +from numpy._utils import set_module +from numpy.strings import * +from numpy.strings import ( + multiply as strings_multiply, +) +from numpy.strings import ( + partition as strings_partition, +) +from numpy.strings import ( + rpartition as strings_rpartition, +) + +from .numeric import array as narray +from .numeric import asarray as asnarray +from .numeric import ndarray +from .numerictypes import bytes_, character, str_ + +__all__ = [ + 'equal', 'not_equal', 'greater_equal', 'less_equal', + 'greater', 'less', 'str_len', 'add', 'multiply', 'mod', 'capitalize', + 'center', 'count', 'decode', 'encode', 'endswith', 'expandtabs', + 'find', 'index', 'isalnum', 'isalpha', 'isdigit', 'islower', 'isspace', + 'istitle', 'isupper', 'join', 'ljust', 'lower', 'lstrip', 'partition', + 'replace', 'rfind', 'rindex', 'rjust', 'rpartition', 'rsplit', + 'rstrip', 'split', 'splitlines', 'startswith', 'strip', 'swapcase', + 'title', 'translate', 'upper', 'zfill', 'isnumeric', 'isdecimal', + 'array', 'asarray', 'compare_chararrays', 'chararray' + ] + + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy.char') + + +def _binary_op_dispatcher(x1, x2): + return (x1, x2) + + +@array_function_dispatch(_binary_op_dispatcher) +def equal(x1, x2): + """ + Return (x1 == x2) element-wise. + + Unlike `numpy.equal`, this comparison is performed by first + stripping whitespace characters from the end of the string. This + behavior is provided for backward-compatibility with numarray. + + Parameters + ---------- + x1, x2 : array_like of str or unicode + Input arrays of the same shape. + + Returns + ------- + out : ndarray + Output array of bools. + + Examples + -------- + >>> import numpy as np + >>> y = "aa " + >>> x = "aa" + >>> np.char.equal(x, y) + array(True) + + See Also + -------- + not_equal, greater_equal, less_equal, greater, less + """ + return compare_chararrays(x1, x2, '==', True) + + +@array_function_dispatch(_binary_op_dispatcher) +def not_equal(x1, x2): + """ + Return (x1 != x2) element-wise. + + Unlike `numpy.not_equal`, this comparison is performed by first + stripping whitespace characters from the end of the string. This + behavior is provided for backward-compatibility with numarray. + + Parameters + ---------- + x1, x2 : array_like of str or unicode + Input arrays of the same shape. + + Returns + ------- + out : ndarray + Output array of bools. + + See Also + -------- + equal, greater_equal, less_equal, greater, less + + Examples + -------- + >>> import numpy as np + >>> x1 = np.array(['a', 'b', 'c']) + >>> np.char.not_equal(x1, 'b') + array([ True, False, True]) + + """ + return compare_chararrays(x1, x2, '!=', True) + + +@array_function_dispatch(_binary_op_dispatcher) +def greater_equal(x1, x2): + """ + Return (x1 >= x2) element-wise. + + Unlike `numpy.greater_equal`, this comparison is performed by + first stripping whitespace characters from the end of the string. + This behavior is provided for backward-compatibility with + numarray. + + Parameters + ---------- + x1, x2 : array_like of str or unicode + Input arrays of the same shape. + + Returns + ------- + out : ndarray + Output array of bools. + + See Also + -------- + equal, not_equal, less_equal, greater, less + + Examples + -------- + >>> import numpy as np + >>> x1 = np.array(['a', 'b', 'c']) + >>> np.char.greater_equal(x1, 'b') + array([False, True, True]) + + """ + return compare_chararrays(x1, x2, '>=', True) + + +@array_function_dispatch(_binary_op_dispatcher) +def less_equal(x1, x2): + """ + Return (x1 <= x2) element-wise. + + Unlike `numpy.less_equal`, this comparison is performed by first + stripping whitespace characters from the end of the string. This + behavior is provided for backward-compatibility with numarray. + + Parameters + ---------- + x1, x2 : array_like of str or unicode + Input arrays of the same shape. + + Returns + ------- + out : ndarray + Output array of bools. + + See Also + -------- + equal, not_equal, greater_equal, greater, less + + Examples + -------- + >>> import numpy as np + >>> x1 = np.array(['a', 'b', 'c']) + >>> np.char.less_equal(x1, 'b') + array([ True, True, False]) + + """ + return compare_chararrays(x1, x2, '<=', True) + + +@array_function_dispatch(_binary_op_dispatcher) +def greater(x1, x2): + """ + Return (x1 > x2) element-wise. + + Unlike `numpy.greater`, this comparison is performed by first + stripping whitespace characters from the end of the string. This + behavior is provided for backward-compatibility with numarray. + + Parameters + ---------- + x1, x2 : array_like of str or unicode + Input arrays of the same shape. + + Returns + ------- + out : ndarray + Output array of bools. + + See Also + -------- + equal, not_equal, greater_equal, less_equal, less + + Examples + -------- + >>> import numpy as np + >>> x1 = np.array(['a', 'b', 'c']) + >>> np.char.greater(x1, 'b') + array([False, False, True]) + + """ + return compare_chararrays(x1, x2, '>', True) + + +@array_function_dispatch(_binary_op_dispatcher) +def less(x1, x2): + """ + Return (x1 < x2) element-wise. + + Unlike `numpy.greater`, this comparison is performed by first + stripping whitespace characters from the end of the string. This + behavior is provided for backward-compatibility with numarray. + + Parameters + ---------- + x1, x2 : array_like of str or unicode + Input arrays of the same shape. + + Returns + ------- + out : ndarray + Output array of bools. + + See Also + -------- + equal, not_equal, greater_equal, less_equal, greater + + Examples + -------- + >>> import numpy as np + >>> x1 = np.array(['a', 'b', 'c']) + >>> np.char.less(x1, 'b') + array([True, False, False]) + + """ + return compare_chararrays(x1, x2, '<', True) + + +@set_module("numpy.char") +def multiply(a, i): + """ + Return (a * i), that is string multiple concatenation, + element-wise. + + Values in ``i`` of less than 0 are treated as 0 (which yields an + empty string). + + Parameters + ---------- + a : array_like, with `np.bytes_` or `np.str_` dtype + + i : array_like, with any integer dtype + + Returns + ------- + out : ndarray + Output array of str or unicode, depending on input types + + Notes + ----- + This is a thin wrapper around np.strings.multiply that raises + `ValueError` when ``i`` is not an integer. It only + exists for backwards-compatibility. + + Examples + -------- + >>> import numpy as np + >>> a = np.array(["a", "b", "c"]) + >>> np.strings.multiply(a, 3) + array(['aaa', 'bbb', 'ccc'], dtype='>> i = np.array([1, 2, 3]) + >>> np.strings.multiply(a, i) + array(['a', 'bb', 'ccc'], dtype='>> np.strings.multiply(np.array(['a']), i) + array(['a', 'aa', 'aaa'], dtype='>> a = np.array(['a', 'b', 'c', 'd', 'e', 'f']).reshape((2, 3)) + >>> np.strings.multiply(a, 3) + array([['aaa', 'bbb', 'ccc'], + ['ddd', 'eee', 'fff']], dtype='>> np.strings.multiply(a, i) + array([['a', 'bb', 'ccc'], + ['d', 'ee', 'fff']], dtype='>> import numpy as np + >>> x = np.array(["Numpy is nice!"]) + >>> np.char.partition(x, " ") + array([['Numpy', ' ', 'is nice!']], dtype='>> import numpy as np + >>> a = np.array(['aAaAaA', ' aA ', 'abBABba']) + >>> np.char.rpartition(a, 'A') + array([['aAaAa', 'A', ''], + [' a', 'A', ' '], + ['abB', 'A', 'Bba']], dtype='= 2`` and ``order='F'``, in which case `strides` + is in "Fortran order". + + Methods + ------- + astype + argsort + copy + count + decode + dump + dumps + encode + endswith + expandtabs + fill + find + flatten + getfield + index + isalnum + isalpha + isdecimal + isdigit + islower + isnumeric + isspace + istitle + isupper + item + join + ljust + lower + lstrip + nonzero + put + ravel + repeat + replace + reshape + resize + rfind + rindex + rjust + rsplit + rstrip + searchsorted + setfield + setflags + sort + split + splitlines + squeeze + startswith + strip + swapaxes + swapcase + take + title + tofile + tolist + tostring + translate + transpose + upper + view + zfill + + Parameters + ---------- + shape : tuple + Shape of the array. + itemsize : int, optional + Length of each array element, in number of characters. Default is 1. + unicode : bool, optional + Are the array elements of type unicode (True) or string (False). + Default is False. + buffer : object exposing the buffer interface or str, optional + Memory address of the start of the array data. Default is None, + in which case a new array is created. + offset : int, optional + Fixed stride displacement from the beginning of an axis? + Default is 0. Needs to be >=0. + strides : array_like of ints, optional + Strides for the array (see `~numpy.ndarray.strides` for + full description). Default is None. + order : {'C', 'F'}, optional + The order in which the array data is stored in memory: 'C' -> + "row major" order (the default), 'F' -> "column major" + (Fortran) order. + + Examples + -------- + >>> import numpy as np + >>> charar = np.char.chararray((3, 3)) + >>> charar[:] = 'a' + >>> charar + chararray([[b'a', b'a', b'a'], + [b'a', b'a', b'a'], + [b'a', b'a', b'a']], dtype='|S1') + + >>> charar = np.char.chararray(charar.shape, itemsize=5) + >>> charar[:] = 'abc' + >>> charar + chararray([[b'abc', b'abc', b'abc'], + [b'abc', b'abc', b'abc'], + [b'abc', b'abc', b'abc']], dtype='|S5') + + """ + def __new__(subtype, shape, itemsize=1, unicode=False, buffer=None, + offset=0, strides=None, order='C'): + if unicode: + dtype = str_ + else: + dtype = bytes_ + + # force itemsize to be a Python int, since using NumPy integer + # types results in itemsize.itemsize being used as the size of + # strings in the new array. + itemsize = int(itemsize) + + if isinstance(buffer, str): + # unicode objects do not have the buffer interface + filler = buffer + buffer = None + else: + filler = None + + if buffer is None: + self = ndarray.__new__(subtype, shape, (dtype, itemsize), + order=order) + else: + self = ndarray.__new__(subtype, shape, (dtype, itemsize), + buffer=buffer, + offset=offset, strides=strides, + order=order) + if filler is not None: + self[...] = filler + + return self + + def __array_wrap__(self, arr, context=None, return_scalar=False): + # When calling a ufunc (and some other functions), we return a + # chararray if the ufunc output is a string-like array, + # or an ndarray otherwise + if arr.dtype.char in "SUbc": + return arr.view(type(self)) + return arr + + def __array_finalize__(self, obj): + # The b is a special case because it is used for reconstructing. + if self.dtype.char not in 'VSUbc': + raise ValueError("Can only create a chararray from string data.") + + def __getitem__(self, obj): + val = ndarray.__getitem__(self, obj) + if isinstance(val, character): + return val.rstrip() + return val + + # IMPLEMENTATION NOTE: Most of the methods of this class are + # direct delegations to the free functions in this module. + # However, those that return an array of strings should instead + # return a chararray, so some extra wrapping is required. + + def __eq__(self, other): + """ + Return (self == other) element-wise. + + See Also + -------- + equal + """ + return equal(self, other) + + def __ne__(self, other): + """ + Return (self != other) element-wise. + + See Also + -------- + not_equal + """ + return not_equal(self, other) + + def __ge__(self, other): + """ + Return (self >= other) element-wise. + + See Also + -------- + greater_equal + """ + return greater_equal(self, other) + + def __le__(self, other): + """ + Return (self <= other) element-wise. + + See Also + -------- + less_equal + """ + return less_equal(self, other) + + def __gt__(self, other): + """ + Return (self > other) element-wise. + + See Also + -------- + greater + """ + return greater(self, other) + + def __lt__(self, other): + """ + Return (self < other) element-wise. + + See Also + -------- + less + """ + return less(self, other) + + def __add__(self, other): + """ + Return (self + other), that is string concatenation, + element-wise for a pair of array_likes of str or unicode. + + See Also + -------- + add + """ + return add(self, other) + + def __radd__(self, other): + """ + Return (other + self), that is string concatenation, + element-wise for a pair of array_likes of `bytes_` or `str_`. + + See Also + -------- + add + """ + return add(other, self) + + def __mul__(self, i): + """ + Return (self * i), that is string multiple concatenation, + element-wise. + + See Also + -------- + multiply + """ + return asarray(multiply(self, i)) + + def __rmul__(self, i): + """ + Return (self * i), that is string multiple concatenation, + element-wise. + + See Also + -------- + multiply + """ + return asarray(multiply(self, i)) + + def __mod__(self, i): + """ + Return (self % i), that is pre-Python 2.6 string formatting + (interpolation), element-wise for a pair of array_likes of `bytes_` + or `str_`. + + See Also + -------- + mod + """ + return asarray(mod(self, i)) + + def __rmod__(self, other): + return NotImplemented + + def argsort(self, axis=-1, kind=None, order=None): + """ + Return the indices that sort the array lexicographically. + + For full documentation see `numpy.argsort`, for which this method is + in fact merely a "thin wrapper." + + Examples + -------- + >>> c = np.array(['a1b c', '1b ca', 'b ca1', 'Ca1b'], 'S5') + >>> c = c.view(np.char.chararray); c + chararray(['a1b c', '1b ca', 'b ca1', 'Ca1b'], + dtype='|S5') + >>> c[c.argsort()] + chararray(['1b ca', 'Ca1b', 'a1b c', 'b ca1'], + dtype='|S5') + + """ + return self.__array__().argsort(axis, kind, order) + argsort.__doc__ = ndarray.argsort.__doc__ + + def capitalize(self): + """ + Return a copy of `self` with only the first character of each element + capitalized. + + See Also + -------- + char.capitalize + + """ + return asarray(capitalize(self)) + + def center(self, width, fillchar=' '): + """ + Return a copy of `self` with its elements centered in a + string of length `width`. + + See Also + -------- + center + """ + return asarray(center(self, width, fillchar)) + + def count(self, sub, start=0, end=None): + """ + Returns an array with the number of non-overlapping occurrences of + substring `sub` in the range [`start`, `end`]. + + See Also + -------- + char.count + + """ + return count(self, sub, start, end) + + def decode(self, encoding=None, errors=None): + """ + Calls ``bytes.decode`` element-wise. + + See Also + -------- + char.decode + + """ + return decode(self, encoding, errors) + + def encode(self, encoding=None, errors=None): + """ + Calls :meth:`str.encode` element-wise. + + See Also + -------- + char.encode + + """ + return encode(self, encoding, errors) + + def endswith(self, suffix, start=0, end=None): + """ + Returns a boolean array which is `True` where the string element + in `self` ends with `suffix`, otherwise `False`. + + See Also + -------- + char.endswith + + """ + return endswith(self, suffix, start, end) + + def expandtabs(self, tabsize=8): + """ + Return a copy of each string element where all tab characters are + replaced by one or more spaces. + + See Also + -------- + char.expandtabs + + """ + return asarray(expandtabs(self, tabsize)) + + def find(self, sub, start=0, end=None): + """ + For each element, return the lowest index in the string where + substring `sub` is found. + + See Also + -------- + char.find + + """ + return find(self, sub, start, end) + + def index(self, sub, start=0, end=None): + """ + Like `find`, but raises :exc:`ValueError` when the substring is not + found. + + See Also + -------- + char.index + + """ + return index(self, sub, start, end) + + def isalnum(self): + """ + Returns true for each element if all characters in the string + are alphanumeric and there is at least one character, false + otherwise. + + See Also + -------- + char.isalnum + + """ + return isalnum(self) + + def isalpha(self): + """ + Returns true for each element if all characters in the string + are alphabetic and there is at least one character, false + otherwise. + + See Also + -------- + char.isalpha + + """ + return isalpha(self) + + def isdigit(self): + """ + Returns true for each element if all characters in the string are + digits and there is at least one character, false otherwise. + + See Also + -------- + char.isdigit + + """ + return isdigit(self) + + def islower(self): + """ + Returns true for each element if all cased characters in the + string are lowercase and there is at least one cased character, + false otherwise. + + See Also + -------- + char.islower + + """ + return islower(self) + + def isspace(self): + """ + Returns true for each element if there are only whitespace + characters in the string and there is at least one character, + false otherwise. + + See Also + -------- + char.isspace + + """ + return isspace(self) + + def istitle(self): + """ + Returns true for each element if the element is a titlecased + string and there is at least one character, false otherwise. + + See Also + -------- + char.istitle + + """ + return istitle(self) + + def isupper(self): + """ + Returns true for each element if all cased characters in the + string are uppercase and there is at least one character, false + otherwise. + + See Also + -------- + char.isupper + + """ + return isupper(self) + + def join(self, seq): + """ + Return a string which is the concatenation of the strings in the + sequence `seq`. + + See Also + -------- + char.join + + """ + return join(self, seq) + + def ljust(self, width, fillchar=' '): + """ + Return an array with the elements of `self` left-justified in a + string of length `width`. + + See Also + -------- + char.ljust + + """ + return asarray(ljust(self, width, fillchar)) + + def lower(self): + """ + Return an array with the elements of `self` converted to + lowercase. + + See Also + -------- + char.lower + + """ + return asarray(lower(self)) + + def lstrip(self, chars=None): + """ + For each element in `self`, return a copy with the leading characters + removed. + + See Also + -------- + char.lstrip + + """ + return lstrip(self, chars) + + def partition(self, sep): + """ + Partition each element in `self` around `sep`. + + See Also + -------- + partition + """ + return asarray(partition(self, sep)) + + def replace(self, old, new, count=None): + """ + For each element in `self`, return a copy of the string with all + occurrences of substring `old` replaced by `new`. + + See Also + -------- + char.replace + + """ + return replace(self, old, new, count if count is not None else -1) + + def rfind(self, sub, start=0, end=None): + """ + For each element in `self`, return the highest index in the string + where substring `sub` is found, such that `sub` is contained + within [`start`, `end`]. + + See Also + -------- + char.rfind + + """ + return rfind(self, sub, start, end) + + def rindex(self, sub, start=0, end=None): + """ + Like `rfind`, but raises :exc:`ValueError` when the substring `sub` is + not found. + + See Also + -------- + char.rindex + + """ + return rindex(self, sub, start, end) + + def rjust(self, width, fillchar=' '): + """ + Return an array with the elements of `self` + right-justified in a string of length `width`. + + See Also + -------- + char.rjust + + """ + return asarray(rjust(self, width, fillchar)) + + def rpartition(self, sep): + """ + Partition each element in `self` around `sep`. + + See Also + -------- + rpartition + """ + return asarray(rpartition(self, sep)) + + def rsplit(self, sep=None, maxsplit=None): + """ + For each element in `self`, return a list of the words in + the string, using `sep` as the delimiter string. + + See Also + -------- + char.rsplit + + """ + return rsplit(self, sep, maxsplit) + + def rstrip(self, chars=None): + """ + For each element in `self`, return a copy with the trailing + characters removed. + + See Also + -------- + char.rstrip + + """ + return rstrip(self, chars) + + def split(self, sep=None, maxsplit=None): + """ + For each element in `self`, return a list of the words in the + string, using `sep` as the delimiter string. + + See Also + -------- + char.split + + """ + return split(self, sep, maxsplit) + + def splitlines(self, keepends=None): + """ + For each element in `self`, return a list of the lines in the + element, breaking at line boundaries. + + See Also + -------- + char.splitlines + + """ + return splitlines(self, keepends) + + def startswith(self, prefix, start=0, end=None): + """ + Returns a boolean array which is `True` where the string element + in `self` starts with `prefix`, otherwise `False`. + + See Also + -------- + char.startswith + + """ + return startswith(self, prefix, start, end) + + def strip(self, chars=None): + """ + For each element in `self`, return a copy with the leading and + trailing characters removed. + + See Also + -------- + char.strip + + """ + return strip(self, chars) + + def swapcase(self): + """ + For each element in `self`, return a copy of the string with + uppercase characters converted to lowercase and vice versa. + + See Also + -------- + char.swapcase + + """ + return asarray(swapcase(self)) + + def title(self): + """ + For each element in `self`, return a titlecased version of the + string: words start with uppercase characters, all remaining cased + characters are lowercase. + + See Also + -------- + char.title + + """ + return asarray(title(self)) + + def translate(self, table, deletechars=None): + """ + For each element in `self`, return a copy of the string where + all characters occurring in the optional argument + `deletechars` are removed, and the remaining characters have + been mapped through the given translation table. + + See Also + -------- + char.translate + + """ + return asarray(translate(self, table, deletechars)) + + def upper(self): + """ + Return an array with the elements of `self` converted to + uppercase. + + See Also + -------- + char.upper + + """ + return asarray(upper(self)) + + def zfill(self, width): + """ + Return the numeric string left-filled with zeros in a string of + length `width`. + + See Also + -------- + char.zfill + + """ + return asarray(zfill(self, width)) + + def isnumeric(self): + """ + For each element in `self`, return True if there are only + numeric characters in the element. + + See Also + -------- + char.isnumeric + + """ + return isnumeric(self) + + def isdecimal(self): + """ + For each element in `self`, return True if there are only + decimal characters in the element. + + See Also + -------- + char.isdecimal + + """ + return isdecimal(self) + + +@set_module("numpy.char") +def array(obj, itemsize=None, copy=True, unicode=None, order=None): + """ + Create a `~numpy.char.chararray`. + + .. note:: + This class is provided for numarray backward-compatibility. + New code (not concerned with numarray compatibility) should use + arrays of type `bytes_` or `str_` and use the free functions + in :mod:`numpy.char` for fast vectorized string operations instead. + + Versus a NumPy array of dtype `bytes_` or `str_`, this + class adds the following functionality: + + 1) values automatically have whitespace removed from the end + when indexed + + 2) comparison operators automatically remove whitespace from the + end when comparing values + + 3) vectorized string operations are provided as methods + (e.g. `chararray.endswith `) + and infix operators (e.g. ``+, *, %``) + + Parameters + ---------- + obj : array of str or unicode-like + + itemsize : int, optional + `itemsize` is the number of characters per scalar in the + resulting array. If `itemsize` is None, and `obj` is an + object array or a Python list, the `itemsize` will be + automatically determined. If `itemsize` is provided and `obj` + is of type str or unicode, then the `obj` string will be + chunked into `itemsize` pieces. + + copy : bool, optional + If true (default), then the object is copied. Otherwise, a copy + will only be made if ``__array__`` returns a copy, if obj is a + nested sequence, or if a copy is needed to satisfy any of the other + requirements (`itemsize`, unicode, `order`, etc.). + + unicode : bool, optional + When true, the resulting `~numpy.char.chararray` can contain Unicode + characters, when false only 8-bit characters. If unicode is + None and `obj` is one of the following: + + - a `~numpy.char.chararray`, + - an ndarray of type :class:`str_` or :class:`bytes_` + - a Python :class:`str` or :class:`bytes` object, + + then the unicode setting of the output array will be + automatically determined. + + order : {'C', 'F', 'A'}, optional + Specify the order of the array. If order is 'C' (default), then the + array will be in C-contiguous order (last-index varies the + fastest). If order is 'F', then the returned array + will be in Fortran-contiguous order (first-index varies the + fastest). If order is 'A', then the returned array may + be in any order (either C-, Fortran-contiguous, or even + discontiguous). + + Examples + -------- + + >>> import numpy as np + >>> char_array = np.char.array(['hello', 'world', 'numpy','array']) + >>> char_array + chararray(['hello', 'world', 'numpy', 'array'], dtype='`) + and infix operators (e.g. ``+``, ``*``, ``%``) + + Parameters + ---------- + obj : array of str or unicode-like + + itemsize : int, optional + `itemsize` is the number of characters per scalar in the + resulting array. If `itemsize` is None, and `obj` is an + object array or a Python list, the `itemsize` will be + automatically determined. If `itemsize` is provided and `obj` + is of type str or unicode, then the `obj` string will be + chunked into `itemsize` pieces. + + unicode : bool, optional + When true, the resulting `~numpy.char.chararray` can contain Unicode + characters, when false only 8-bit characters. If unicode is + None and `obj` is one of the following: + + - a `~numpy.char.chararray`, + - an ndarray of type `str_` or `unicode_` + - a Python str or unicode object, + + then the unicode setting of the output array will be + automatically determined. + + order : {'C', 'F'}, optional + Specify the order of the array. If order is 'C' (default), then the + array will be in C-contiguous order (last-index varies the + fastest). If order is 'F', then the returned array + will be in Fortran-contiguous order (first-index varies the + fastest). + + Examples + -------- + >>> import numpy as np + >>> np.char.asarray(['hello', 'world']) + chararray(['hello', 'world'], dtype=' _CharArray[bytes_]: ... + @overload + def __new__( + subtype, + shape: _ShapeLike, + itemsize: SupportsIndex | SupportsInt = ..., + unicode: L[True] = ..., + buffer: _SupportsBuffer = ..., + offset: SupportsIndex = ..., + strides: _ShapeLike = ..., + order: _OrderKACF = ..., + ) -> _CharArray[str_]: ... + + def __array_finalize__(self, obj: object) -> None: ... + def __mul__(self, other: i_co) -> chararray[_AnyShape, _CharDTypeT_co]: ... + def __rmul__(self, other: i_co) -> chararray[_AnyShape, _CharDTypeT_co]: ... + def __mod__(self, i: Any) -> chararray[_AnyShape, _CharDTypeT_co]: ... + + @overload + def __eq__( + self: _CharArray[str_], + other: U_co, + ) -> NDArray[np.bool]: ... + @overload + def __eq__( + self: _CharArray[bytes_], + other: S_co, + ) -> NDArray[np.bool]: ... + + @overload + def __ne__( + self: _CharArray[str_], + other: U_co, + ) -> NDArray[np.bool]: ... + @overload + def __ne__( + self: _CharArray[bytes_], + other: S_co, + ) -> NDArray[np.bool]: ... + + @overload + def __ge__( + self: _CharArray[str_], + other: U_co, + ) -> NDArray[np.bool]: ... + @overload + def __ge__( + self: _CharArray[bytes_], + other: S_co, + ) -> NDArray[np.bool]: ... + + @overload + def __le__( + self: _CharArray[str_], + other: U_co, + ) -> NDArray[np.bool]: ... + @overload + def __le__( + self: _CharArray[bytes_], + other: S_co, + ) -> NDArray[np.bool]: ... + + @overload + def __gt__( + self: _CharArray[str_], + other: U_co, + ) -> NDArray[np.bool]: ... + @overload + def __gt__( + self: _CharArray[bytes_], + other: S_co, + ) -> NDArray[np.bool]: ... + + @overload + def __lt__( + self: _CharArray[str_], + other: U_co, + ) -> NDArray[np.bool]: ... + @overload + def __lt__( + self: _CharArray[bytes_], + other: S_co, + ) -> NDArray[np.bool]: ... + + @overload + def __add__( + self: _CharArray[str_], + other: U_co, + ) -> _CharArray[str_]: ... + @overload + def __add__( + self: _CharArray[bytes_], + other: S_co, + ) -> _CharArray[bytes_]: ... + + @overload + def __radd__( + self: _CharArray[str_], + other: U_co, + ) -> _CharArray[str_]: ... + @overload + def __radd__( + self: _CharArray[bytes_], + other: S_co, + ) -> _CharArray[bytes_]: ... + + @overload + def center( + self: _CharArray[str_], + width: i_co, + fillchar: U_co = ..., + ) -> _CharArray[str_]: ... + @overload + def center( + self: _CharArray[bytes_], + width: i_co, + fillchar: S_co = ..., + ) -> _CharArray[bytes_]: ... + + @overload + def count( + self: _CharArray[str_], + sub: U_co, + start: i_co = ..., + end: i_co | None = ..., + ) -> NDArray[int_]: ... + @overload + def count( + self: _CharArray[bytes_], + sub: S_co, + start: i_co = ..., + end: i_co | None = ..., + ) -> NDArray[int_]: ... + + def decode( + self: _CharArray[bytes_], + encoding: str | None = ..., + errors: str | None = ..., + ) -> _CharArray[str_]: ... + + def encode( + self: _CharArray[str_], + encoding: str | None = ..., + errors: str | None = ..., + ) -> _CharArray[bytes_]: ... + + @overload + def endswith( + self: _CharArray[str_], + suffix: U_co, + start: i_co = ..., + end: i_co | None = ..., + ) -> NDArray[np.bool]: ... + @overload + def endswith( + self: _CharArray[bytes_], + suffix: S_co, + start: i_co = ..., + end: i_co | None = ..., + ) -> NDArray[np.bool]: ... + + def expandtabs( + self, + tabsize: i_co = ..., + ) -> Self: ... + + @overload + def find( + self: _CharArray[str_], + sub: U_co, + start: i_co = ..., + end: i_co | None = ..., + ) -> NDArray[int_]: ... + @overload + def find( + self: _CharArray[bytes_], + sub: S_co, + start: i_co = ..., + end: i_co | None = ..., + ) -> NDArray[int_]: ... + + @overload + def index( + self: _CharArray[str_], + sub: U_co, + start: i_co = ..., + end: i_co | None = ..., + ) -> NDArray[int_]: ... + @overload + def index( + self: _CharArray[bytes_], + sub: S_co, + start: i_co = ..., + end: i_co | None = ..., + ) -> NDArray[int_]: ... + + @overload + def join( + self: _CharArray[str_], + seq: U_co, + ) -> _CharArray[str_]: ... + @overload + def join( + self: _CharArray[bytes_], + seq: S_co, + ) -> _CharArray[bytes_]: ... + + @overload + def ljust( + self: _CharArray[str_], + width: i_co, + fillchar: U_co = ..., + ) -> _CharArray[str_]: ... + @overload + def ljust( + self: _CharArray[bytes_], + width: i_co, + fillchar: S_co = ..., + ) -> _CharArray[bytes_]: ... + + @overload + def lstrip( + self: _CharArray[str_], + chars: U_co | None = ..., + ) -> _CharArray[str_]: ... + @overload + def lstrip( + self: _CharArray[bytes_], + chars: S_co | None = ..., + ) -> _CharArray[bytes_]: ... + + @overload + def partition( + self: _CharArray[str_], + sep: U_co, + ) -> _CharArray[str_]: ... + @overload + def partition( + self: _CharArray[bytes_], + sep: S_co, + ) -> _CharArray[bytes_]: ... + + @overload + def replace( + self: _CharArray[str_], + old: U_co, + new: U_co, + count: i_co | None = ..., + ) -> _CharArray[str_]: ... + @overload + def replace( + self: _CharArray[bytes_], + old: S_co, + new: S_co, + count: i_co | None = ..., + ) -> _CharArray[bytes_]: ... + + @overload + def rfind( + self: _CharArray[str_], + sub: U_co, + start: i_co = ..., + end: i_co | None = ..., + ) -> NDArray[int_]: ... + @overload + def rfind( + self: _CharArray[bytes_], + sub: S_co, + start: i_co = ..., + end: i_co | None = ..., + ) -> NDArray[int_]: ... + + @overload + def rindex( + self: _CharArray[str_], + sub: U_co, + start: i_co = ..., + end: i_co | None = ..., + ) -> NDArray[int_]: ... + @overload + def rindex( + self: _CharArray[bytes_], + sub: S_co, + start: i_co = ..., + end: i_co | None = ..., + ) -> NDArray[int_]: ... + + @overload + def rjust( + self: _CharArray[str_], + width: i_co, + fillchar: U_co = ..., + ) -> _CharArray[str_]: ... + @overload + def rjust( + self: _CharArray[bytes_], + width: i_co, + fillchar: S_co = ..., + ) -> _CharArray[bytes_]: ... + + @overload + def rpartition( + self: _CharArray[str_], + sep: U_co, + ) -> _CharArray[str_]: ... + @overload + def rpartition( + self: _CharArray[bytes_], + sep: S_co, + ) -> _CharArray[bytes_]: ... + + @overload + def rsplit( + self: _CharArray[str_], + sep: U_co | None = ..., + maxsplit: i_co | None = ..., + ) -> NDArray[object_]: ... + @overload + def rsplit( + self: _CharArray[bytes_], + sep: S_co | None = ..., + maxsplit: i_co | None = ..., + ) -> NDArray[object_]: ... + + @overload + def rstrip( + self: _CharArray[str_], + chars: U_co | None = ..., + ) -> _CharArray[str_]: ... + @overload + def rstrip( + self: _CharArray[bytes_], + chars: S_co | None = ..., + ) -> _CharArray[bytes_]: ... + + @overload + def split( + self: _CharArray[str_], + sep: U_co | None = ..., + maxsplit: i_co | None = ..., + ) -> NDArray[object_]: ... + @overload + def split( + self: _CharArray[bytes_], + sep: S_co | None = ..., + maxsplit: i_co | None = ..., + ) -> NDArray[object_]: ... + + def splitlines(self, keepends: b_co | None = ...) -> NDArray[object_]: ... + + @overload + def startswith( + self: _CharArray[str_], + prefix: U_co, + start: i_co = ..., + end: i_co | None = ..., + ) -> NDArray[np.bool]: ... + @overload + def startswith( + self: _CharArray[bytes_], + prefix: S_co, + start: i_co = ..., + end: i_co | None = ..., + ) -> NDArray[np.bool]: ... + + @overload + def strip( + self: _CharArray[str_], + chars: U_co | None = ..., + ) -> _CharArray[str_]: ... + @overload + def strip( + self: _CharArray[bytes_], + chars: S_co | None = ..., + ) -> _CharArray[bytes_]: ... + + @overload + def translate( + self: _CharArray[str_], + table: U_co, + deletechars: U_co | None = ..., + ) -> _CharArray[str_]: ... + @overload + def translate( + self: _CharArray[bytes_], + table: S_co, + deletechars: S_co | None = ..., + ) -> _CharArray[bytes_]: ... + + def zfill(self, width: i_co) -> Self: ... + def capitalize(self) -> Self: ... + def title(self) -> Self: ... + def swapcase(self) -> Self: ... + def lower(self) -> Self: ... + def upper(self) -> Self: ... + def isalnum(self) -> ndarray[_ShapeT_co, dtype[np.bool]]: ... + def isalpha(self) -> ndarray[_ShapeT_co, dtype[np.bool]]: ... + def isdigit(self) -> ndarray[_ShapeT_co, dtype[np.bool]]: ... + def islower(self) -> ndarray[_ShapeT_co, dtype[np.bool]]: ... + def isspace(self) -> ndarray[_ShapeT_co, dtype[np.bool]]: ... + def istitle(self) -> ndarray[_ShapeT_co, dtype[np.bool]]: ... + def isupper(self) -> ndarray[_ShapeT_co, dtype[np.bool]]: ... + def isnumeric(self) -> ndarray[_ShapeT_co, dtype[np.bool]]: ... + def isdecimal(self) -> ndarray[_ShapeT_co, dtype[np.bool]]: ... + +# Comparison +@overload +def equal(x1: U_co, x2: U_co) -> NDArray[np.bool]: ... +@overload +def equal(x1: S_co, x2: S_co) -> NDArray[np.bool]: ... +@overload +def equal(x1: T_co, x2: T_co) -> NDArray[np.bool]: ... + +@overload +def not_equal(x1: U_co, x2: U_co) -> NDArray[np.bool]: ... +@overload +def not_equal(x1: S_co, x2: S_co) -> NDArray[np.bool]: ... +@overload +def not_equal(x1: T_co, x2: T_co) -> NDArray[np.bool]: ... + +@overload +def greater_equal(x1: U_co, x2: U_co) -> NDArray[np.bool]: ... +@overload +def greater_equal(x1: S_co, x2: S_co) -> NDArray[np.bool]: ... +@overload +def greater_equal(x1: T_co, x2: T_co) -> NDArray[np.bool]: ... + +@overload +def less_equal(x1: U_co, x2: U_co) -> NDArray[np.bool]: ... +@overload +def less_equal(x1: S_co, x2: S_co) -> NDArray[np.bool]: ... +@overload +def less_equal(x1: T_co, x2: T_co) -> NDArray[np.bool]: ... + +@overload +def greater(x1: U_co, x2: U_co) -> NDArray[np.bool]: ... +@overload +def greater(x1: S_co, x2: S_co) -> NDArray[np.bool]: ... +@overload +def greater(x1: T_co, x2: T_co) -> NDArray[np.bool]: ... + +@overload +def less(x1: U_co, x2: U_co) -> NDArray[np.bool]: ... +@overload +def less(x1: S_co, x2: S_co) -> NDArray[np.bool]: ... +@overload +def less(x1: T_co, x2: T_co) -> NDArray[np.bool]: ... + +@overload +def add(x1: U_co, x2: U_co) -> NDArray[np.str_]: ... +@overload +def add(x1: S_co, x2: S_co) -> NDArray[np.bytes_]: ... +@overload +def add(x1: _StringDTypeSupportsArray, x2: _StringDTypeSupportsArray) -> _StringDTypeArray: ... +@overload +def add(x1: T_co, x2: T_co) -> _StringDTypeOrUnicodeArray: ... + +@overload +def multiply(a: U_co, i: i_co) -> NDArray[np.str_]: ... +@overload +def multiply(a: S_co, i: i_co) -> NDArray[np.bytes_]: ... +@overload +def multiply(a: _StringDTypeSupportsArray, i: i_co) -> _StringDTypeArray: ... +@overload +def multiply(a: T_co, i: i_co) -> _StringDTypeOrUnicodeArray: ... + +@overload +def mod(a: U_co, value: Any) -> NDArray[np.str_]: ... +@overload +def mod(a: S_co, value: Any) -> NDArray[np.bytes_]: ... +@overload +def mod(a: _StringDTypeSupportsArray, value: Any) -> _StringDTypeArray: ... +@overload +def mod(a: T_co, value: Any) -> _StringDTypeOrUnicodeArray: ... + +@overload +def capitalize(a: U_co) -> NDArray[str_]: ... +@overload +def capitalize(a: S_co) -> NDArray[bytes_]: ... +@overload +def capitalize(a: _StringDTypeSupportsArray) -> _StringDTypeArray: ... +@overload +def capitalize(a: T_co) -> _StringDTypeOrUnicodeArray: ... + +@overload +def center(a: U_co, width: i_co, fillchar: U_co = ...) -> NDArray[str_]: ... +@overload +def center(a: S_co, width: i_co, fillchar: S_co = ...) -> NDArray[bytes_]: ... +@overload +def center(a: _StringDTypeSupportsArray, width: i_co, fillchar: _StringDTypeSupportsArray = ...) -> _StringDTypeArray: ... +@overload +def center(a: T_co, width: i_co, fillchar: T_co = ...) -> _StringDTypeOrUnicodeArray: ... + +def decode( + a: S_co, + encoding: str | None = ..., + errors: str | None = ..., +) -> NDArray[str_]: ... +def encode( + a: U_co | T_co, + encoding: str | None = ..., + errors: str | None = ..., +) -> NDArray[bytes_]: ... + +@overload +def expandtabs(a: U_co, tabsize: i_co = ...) -> NDArray[str_]: ... +@overload +def expandtabs(a: S_co, tabsize: i_co = ...) -> NDArray[bytes_]: ... +@overload +def expandtabs(a: _StringDTypeSupportsArray, tabsize: i_co = ...) -> _StringDTypeArray: ... +@overload +def expandtabs(a: T_co, tabsize: i_co = ...) -> _StringDTypeOrUnicodeArray: ... + +@overload +def join(sep: U_co, seq: U_co) -> NDArray[str_]: ... +@overload +def join(sep: S_co, seq: S_co) -> NDArray[bytes_]: ... +@overload +def join(sep: _StringDTypeSupportsArray, seq: _StringDTypeSupportsArray) -> _StringDTypeArray: ... +@overload +def join(sep: T_co, seq: T_co) -> _StringDTypeOrUnicodeArray: ... + +@overload +def ljust(a: U_co, width: i_co, fillchar: U_co = ...) -> NDArray[str_]: ... +@overload +def ljust(a: S_co, width: i_co, fillchar: S_co = ...) -> NDArray[bytes_]: ... +@overload +def ljust(a: _StringDTypeSupportsArray, width: i_co, fillchar: _StringDTypeSupportsArray = ...) -> _StringDTypeArray: ... +@overload +def ljust(a: T_co, width: i_co, fillchar: T_co = ...) -> _StringDTypeOrUnicodeArray: ... + +@overload +def lower(a: U_co) -> NDArray[str_]: ... +@overload +def lower(a: S_co) -> NDArray[bytes_]: ... +@overload +def lower(a: _StringDTypeSupportsArray) -> _StringDTypeArray: ... +@overload +def lower(a: T_co) -> _StringDTypeOrUnicodeArray: ... + +@overload +def lstrip(a: U_co, chars: U_co | None = ...) -> NDArray[str_]: ... +@overload +def lstrip(a: S_co, chars: S_co | None = ...) -> NDArray[bytes_]: ... +@overload +def lstrip(a: _StringDTypeSupportsArray, chars: _StringDTypeSupportsArray | None = ...) -> _StringDTypeArray: ... +@overload +def lstrip(a: T_co, chars: T_co | None = ...) -> _StringDTypeOrUnicodeArray: ... + +@overload +def partition(a: U_co, sep: U_co) -> NDArray[str_]: ... +@overload +def partition(a: S_co, sep: S_co) -> NDArray[bytes_]: ... +@overload +def partition(a: _StringDTypeSupportsArray, sep: _StringDTypeSupportsArray) -> _StringDTypeArray: ... +@overload +def partition(a: T_co, sep: T_co) -> _StringDTypeOrUnicodeArray: ... + +@overload +def replace( + a: U_co, + old: U_co, + new: U_co, + count: i_co | None = ..., +) -> NDArray[str_]: ... +@overload +def replace( + a: S_co, + old: S_co, + new: S_co, + count: i_co | None = ..., +) -> NDArray[bytes_]: ... +@overload +def replace( + a: _StringDTypeSupportsArray, + old: _StringDTypeSupportsArray, + new: _StringDTypeSupportsArray, + count: i_co = ..., +) -> _StringDTypeArray: ... +@overload +def replace( + a: T_co, + old: T_co, + new: T_co, + count: i_co = ..., +) -> _StringDTypeOrUnicodeArray: ... + +@overload +def rjust( + a: U_co, + width: i_co, + fillchar: U_co = ..., +) -> NDArray[str_]: ... +@overload +def rjust( + a: S_co, + width: i_co, + fillchar: S_co = ..., +) -> NDArray[bytes_]: ... +@overload +def rjust( + a: _StringDTypeSupportsArray, + width: i_co, + fillchar: _StringDTypeSupportsArray = ..., +) -> _StringDTypeArray: ... +@overload +def rjust( + a: T_co, + width: i_co, + fillchar: T_co = ..., +) -> _StringDTypeOrUnicodeArray: ... + +@overload +def rpartition(a: U_co, sep: U_co) -> NDArray[str_]: ... +@overload +def rpartition(a: S_co, sep: S_co) -> NDArray[bytes_]: ... +@overload +def rpartition(a: _StringDTypeSupportsArray, sep: _StringDTypeSupportsArray) -> _StringDTypeArray: ... +@overload +def rpartition(a: T_co, sep: T_co) -> _StringDTypeOrUnicodeArray: ... + +@overload +def rsplit( + a: U_co, + sep: U_co | None = ..., + maxsplit: i_co | None = ..., +) -> NDArray[object_]: ... +@overload +def rsplit( + a: S_co, + sep: S_co | None = ..., + maxsplit: i_co | None = ..., +) -> NDArray[object_]: ... +@overload +def rsplit( + a: _StringDTypeSupportsArray, + sep: _StringDTypeSupportsArray | None = ..., + maxsplit: i_co | None = ..., +) -> NDArray[object_]: ... +@overload +def rsplit( + a: T_co, + sep: T_co | None = ..., + maxsplit: i_co | None = ..., +) -> NDArray[object_]: ... + +@overload +def rstrip(a: U_co, chars: U_co | None = ...) -> NDArray[str_]: ... +@overload +def rstrip(a: S_co, chars: S_co | None = ...) -> NDArray[bytes_]: ... +@overload +def rstrip(a: _StringDTypeSupportsArray, chars: _StringDTypeSupportsArray | None = ...) -> _StringDTypeArray: ... +@overload +def rstrip(a: T_co, chars: T_co | None = ...) -> _StringDTypeOrUnicodeArray: ... + +@overload +def split( + a: U_co, + sep: U_co | None = ..., + maxsplit: i_co | None = ..., +) -> NDArray[object_]: ... +@overload +def split( + a: S_co, + sep: S_co | None = ..., + maxsplit: i_co | None = ..., +) -> NDArray[object_]: ... +@overload +def split( + a: _StringDTypeSupportsArray, + sep: _StringDTypeSupportsArray | None = ..., + maxsplit: i_co | None = ..., +) -> NDArray[object_]: ... +@overload +def split( + a: T_co, + sep: T_co | None = ..., + maxsplit: i_co | None = ..., +) -> NDArray[object_]: ... + +def splitlines(a: UST_co, keepends: b_co | None = ...) -> NDArray[np.object_]: ... + +@overload +def strip(a: U_co, chars: U_co | None = ...) -> NDArray[str_]: ... +@overload +def strip(a: S_co, chars: S_co | None = ...) -> NDArray[bytes_]: ... +@overload +def strip(a: _StringDTypeSupportsArray, chars: _StringDTypeSupportsArray | None = ...) -> _StringDTypeArray: ... +@overload +def strip(a: T_co, chars: T_co | None = ...) -> _StringDTypeOrUnicodeArray: ... + +@overload +def swapcase(a: U_co) -> NDArray[str_]: ... +@overload +def swapcase(a: S_co) -> NDArray[bytes_]: ... +@overload +def swapcase(a: _StringDTypeSupportsArray) -> _StringDTypeArray: ... +@overload +def swapcase(a: T_co) -> _StringDTypeOrUnicodeArray: ... + +@overload +def title(a: U_co) -> NDArray[str_]: ... +@overload +def title(a: S_co) -> NDArray[bytes_]: ... +@overload +def title(a: _StringDTypeSupportsArray) -> _StringDTypeArray: ... +@overload +def title(a: T_co) -> _StringDTypeOrUnicodeArray: ... + +@overload +def translate( + a: U_co, + table: str, + deletechars: str | None = ..., +) -> NDArray[str_]: ... +@overload +def translate( + a: S_co, + table: str, + deletechars: str | None = ..., +) -> NDArray[bytes_]: ... +@overload +def translate( + a: _StringDTypeSupportsArray, + table: str, + deletechars: str | None = ..., +) -> _StringDTypeArray: ... +@overload +def translate( + a: T_co, + table: str, + deletechars: str | None = ..., +) -> _StringDTypeOrUnicodeArray: ... + +@overload +def upper(a: U_co) -> NDArray[str_]: ... +@overload +def upper(a: S_co) -> NDArray[bytes_]: ... +@overload +def upper(a: _StringDTypeSupportsArray) -> _StringDTypeArray: ... +@overload +def upper(a: T_co) -> _StringDTypeOrUnicodeArray: ... + +@overload +def zfill(a: U_co, width: i_co) -> NDArray[str_]: ... +@overload +def zfill(a: S_co, width: i_co) -> NDArray[bytes_]: ... +@overload +def zfill(a: _StringDTypeSupportsArray, width: i_co) -> _StringDTypeArray: ... +@overload +def zfill(a: T_co, width: i_co) -> _StringDTypeOrUnicodeArray: ... + +# String information +@overload +def count( + a: U_co, + sub: U_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[int_]: ... +@overload +def count( + a: S_co, + sub: S_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[int_]: ... +@overload +def count( + a: T_co, + sub: T_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[np.int_]: ... + +@overload +def endswith( + a: U_co, + suffix: U_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[np.bool]: ... +@overload +def endswith( + a: S_co, + suffix: S_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[np.bool]: ... +@overload +def endswith( + a: T_co, + suffix: T_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[np.bool]: ... + +@overload +def find( + a: U_co, + sub: U_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[int_]: ... +@overload +def find( + a: S_co, + sub: S_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[int_]: ... +@overload +def find( + a: T_co, + sub: T_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[np.int_]: ... + +@overload +def index( + a: U_co, + sub: U_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[int_]: ... +@overload +def index( + a: S_co, + sub: S_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[int_]: ... +@overload +def index( + a: T_co, + sub: T_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[np.int_]: ... + +def isalpha(a: UST_co) -> NDArray[np.bool]: ... +def isalnum(a: UST_co) -> NDArray[np.bool]: ... +def isdecimal(a: U_co | T_co) -> NDArray[np.bool]: ... +def isdigit(a: UST_co) -> NDArray[np.bool]: ... +def islower(a: UST_co) -> NDArray[np.bool]: ... +def isnumeric(a: U_co | T_co) -> NDArray[np.bool]: ... +def isspace(a: UST_co) -> NDArray[np.bool]: ... +def istitle(a: UST_co) -> NDArray[np.bool]: ... +def isupper(a: UST_co) -> NDArray[np.bool]: ... + +@overload +def rfind( + a: U_co, + sub: U_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[int_]: ... +@overload +def rfind( + a: S_co, + sub: S_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[int_]: ... +@overload +def rfind( + a: T_co, + sub: T_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[np.int_]: ... + +@overload +def rindex( + a: U_co, + sub: U_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[int_]: ... +@overload +def rindex( + a: S_co, + sub: S_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[int_]: ... +@overload +def rindex( + a: T_co, + sub: T_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[np.int_]: ... + +@overload +def startswith( + a: U_co, + prefix: U_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[np.bool]: ... +@overload +def startswith( + a: S_co, + prefix: S_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[np.bool]: ... +@overload +def startswith( + a: T_co, + suffix: T_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[np.bool]: ... + +def str_len(A: UST_co) -> NDArray[int_]: ... + +# Overload 1 and 2: str- or bytes-based array-likes +# overload 3 and 4: arbitrary object with unicode=False (-> bytes_) +# overload 5 and 6: arbitrary object with unicode=True (-> str_) +# overload 7: arbitrary object with unicode=None (default) (-> str_ | bytes_) +@overload +def array( + obj: U_co, + itemsize: int | None = ..., + copy: bool = ..., + unicode: L[True] | None = ..., + order: _OrderKACF = ..., +) -> _CharArray[str_]: ... +@overload +def array( + obj: S_co, + itemsize: int | None = ..., + copy: bool = ..., + unicode: L[False] | None = ..., + order: _OrderKACF = ..., +) -> _CharArray[bytes_]: ... +@overload +def array( + obj: object, + itemsize: int | None, + copy: bool, + unicode: L[False], + order: _OrderKACF = ..., +) -> _CharArray[bytes_]: ... +@overload +def array( + obj: object, + itemsize: int | None = ..., + copy: bool = ..., + *, + unicode: L[False], + order: _OrderKACF = ..., +) -> _CharArray[bytes_]: ... +@overload +def array( + obj: object, + itemsize: int | None, + copy: bool, + unicode: L[True], + order: _OrderKACF = ..., +) -> _CharArray[str_]: ... +@overload +def array( + obj: object, + itemsize: int | None = ..., + copy: bool = ..., + *, + unicode: L[True], + order: _OrderKACF = ..., +) -> _CharArray[str_]: ... +@overload +def array( + obj: object, + itemsize: int | None = ..., + copy: bool = ..., + unicode: bool | None = ..., + order: _OrderKACF = ..., +) -> _CharArray[str_] | _CharArray[bytes_]: ... + +@overload +def asarray( + obj: U_co, + itemsize: int | None = ..., + unicode: L[True] | None = ..., + order: _OrderKACF = ..., +) -> _CharArray[str_]: ... +@overload +def asarray( + obj: S_co, + itemsize: int | None = ..., + unicode: L[False] | None = ..., + order: _OrderKACF = ..., +) -> _CharArray[bytes_]: ... +@overload +def asarray( + obj: object, + itemsize: int | None, + unicode: L[False], + order: _OrderKACF = ..., +) -> _CharArray[bytes_]: ... +@overload +def asarray( + obj: object, + itemsize: int | None = ..., + *, + unicode: L[False], + order: _OrderKACF = ..., +) -> _CharArray[bytes_]: ... +@overload +def asarray( + obj: object, + itemsize: int | None, + unicode: L[True], + order: _OrderKACF = ..., +) -> _CharArray[str_]: ... +@overload +def asarray( + obj: object, + itemsize: int | None = ..., + *, + unicode: L[True], + order: _OrderKACF = ..., +) -> _CharArray[str_]: ... +@overload +def asarray( + obj: object, + itemsize: int | None = ..., + unicode: bool | None = ..., + order: _OrderKACF = ..., +) -> _CharArray[str_] | _CharArray[bytes_]: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/einsumfunc.py b/.venv/lib/python3.12/site-packages/numpy/_core/einsumfunc.py new file mode 100644 index 0000000..8e71e6d --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/einsumfunc.py @@ -0,0 +1,1498 @@ +""" +Implementation of optimized einsum. + +""" +import itertools +import operator + +from numpy._core.multiarray import c_einsum +from numpy._core.numeric import asanyarray, tensordot +from numpy._core.overrides import array_function_dispatch + +__all__ = ['einsum', 'einsum_path'] + +# importing string for string.ascii_letters would be too slow +# the first import before caching has been measured to take 800 µs (#23777) +# imports begin with uppercase to mimic ASCII values to avoid sorting issues +einsum_symbols = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz' +einsum_symbols_set = set(einsum_symbols) + + +def _flop_count(idx_contraction, inner, num_terms, size_dictionary): + """ + Computes the number of FLOPS in the contraction. + + Parameters + ---------- + idx_contraction : iterable + The indices involved in the contraction + inner : bool + Does this contraction require an inner product? + num_terms : int + The number of terms in a contraction + size_dictionary : dict + The size of each of the indices in idx_contraction + + Returns + ------- + flop_count : int + The total number of FLOPS required for the contraction. + + Examples + -------- + + >>> _flop_count('abc', False, 1, {'a': 2, 'b':3, 'c':5}) + 30 + + >>> _flop_count('abc', True, 2, {'a': 2, 'b':3, 'c':5}) + 60 + + """ + + overall_size = _compute_size_by_dict(idx_contraction, size_dictionary) + op_factor = max(1, num_terms - 1) + if inner: + op_factor += 1 + + return overall_size * op_factor + +def _compute_size_by_dict(indices, idx_dict): + """ + Computes the product of the elements in indices based on the dictionary + idx_dict. + + Parameters + ---------- + indices : iterable + Indices to base the product on. + idx_dict : dictionary + Dictionary of index sizes + + Returns + ------- + ret : int + The resulting product. + + Examples + -------- + >>> _compute_size_by_dict('abbc', {'a': 2, 'b':3, 'c':5}) + 90 + + """ + ret = 1 + for i in indices: + ret *= idx_dict[i] + return ret + + +def _find_contraction(positions, input_sets, output_set): + """ + Finds the contraction for a given set of input and output sets. + + Parameters + ---------- + positions : iterable + Integer positions of terms used in the contraction. + input_sets : list + List of sets that represent the lhs side of the einsum subscript + output_set : set + Set that represents the rhs side of the overall einsum subscript + + Returns + ------- + new_result : set + The indices of the resulting contraction + remaining : list + List of sets that have not been contracted, the new set is appended to + the end of this list + idx_removed : set + Indices removed from the entire contraction + idx_contraction : set + The indices used in the current contraction + + Examples + -------- + + # A simple dot product test case + >>> pos = (0, 1) + >>> isets = [set('ab'), set('bc')] + >>> oset = set('ac') + >>> _find_contraction(pos, isets, oset) + ({'a', 'c'}, [{'a', 'c'}], {'b'}, {'a', 'b', 'c'}) + + # A more complex case with additional terms in the contraction + >>> pos = (0, 2) + >>> isets = [set('abd'), set('ac'), set('bdc')] + >>> oset = set('ac') + >>> _find_contraction(pos, isets, oset) + ({'a', 'c'}, [{'a', 'c'}, {'a', 'c'}], {'b', 'd'}, {'a', 'b', 'c', 'd'}) + """ + + idx_contract = set() + idx_remain = output_set.copy() + remaining = [] + for ind, value in enumerate(input_sets): + if ind in positions: + idx_contract |= value + else: + remaining.append(value) + idx_remain |= value + + new_result = idx_remain & idx_contract + idx_removed = (idx_contract - new_result) + remaining.append(new_result) + + return (new_result, remaining, idx_removed, idx_contract) + + +def _optimal_path(input_sets, output_set, idx_dict, memory_limit): + """ + Computes all possible pair contractions, sieves the results based + on ``memory_limit`` and returns the lowest cost path. This algorithm + scales factorial with respect to the elements in the list ``input_sets``. + + Parameters + ---------- + input_sets : list + List of sets that represent the lhs side of the einsum subscript + output_set : set + Set that represents the rhs side of the overall einsum subscript + idx_dict : dictionary + Dictionary of index sizes + memory_limit : int + The maximum number of elements in a temporary array + + Returns + ------- + path : list + The optimal contraction order within the memory limit constraint. + + Examples + -------- + >>> isets = [set('abd'), set('ac'), set('bdc')] + >>> oset = set() + >>> idx_sizes = {'a': 1, 'b':2, 'c':3, 'd':4} + >>> _optimal_path(isets, oset, idx_sizes, 5000) + [(0, 2), (0, 1)] + """ + + full_results = [(0, [], input_sets)] + for iteration in range(len(input_sets) - 1): + iter_results = [] + + # Compute all unique pairs + for curr in full_results: + cost, positions, remaining = curr + for con in itertools.combinations( + range(len(input_sets) - iteration), 2 + ): + + # Find the contraction + cont = _find_contraction(con, remaining, output_set) + new_result, new_input_sets, idx_removed, idx_contract = cont + + # Sieve the results based on memory_limit + new_size = _compute_size_by_dict(new_result, idx_dict) + if new_size > memory_limit: + continue + + # Build (total_cost, positions, indices_remaining) + total_cost = cost + _flop_count( + idx_contract, idx_removed, len(con), idx_dict + ) + new_pos = positions + [con] + iter_results.append((total_cost, new_pos, new_input_sets)) + + # Update combinatorial list, if we did not find anything return best + # path + remaining contractions + if iter_results: + full_results = iter_results + else: + path = min(full_results, key=lambda x: x[0])[1] + path += [tuple(range(len(input_sets) - iteration))] + return path + + # If we have not found anything return single einsum contraction + if len(full_results) == 0: + return [tuple(range(len(input_sets)))] + + path = min(full_results, key=lambda x: x[0])[1] + return path + +def _parse_possible_contraction( + positions, input_sets, output_set, idx_dict, + memory_limit, path_cost, naive_cost + ): + """Compute the cost (removed size + flops) and resultant indices for + performing the contraction specified by ``positions``. + + Parameters + ---------- + positions : tuple of int + The locations of the proposed tensors to contract. + input_sets : list of sets + The indices found on each tensors. + output_set : set + The output indices of the expression. + idx_dict : dict + Mapping of each index to its size. + memory_limit : int + The total allowed size for an intermediary tensor. + path_cost : int + The contraction cost so far. + naive_cost : int + The cost of the unoptimized expression. + + Returns + ------- + cost : (int, int) + A tuple containing the size of any indices removed, and the flop cost. + positions : tuple of int + The locations of the proposed tensors to contract. + new_input_sets : list of sets + The resulting new list of indices if this proposed contraction + is performed. + + """ + + # Find the contraction + contract = _find_contraction(positions, input_sets, output_set) + idx_result, new_input_sets, idx_removed, idx_contract = contract + + # Sieve the results based on memory_limit + new_size = _compute_size_by_dict(idx_result, idx_dict) + if new_size > memory_limit: + return None + + # Build sort tuple + old_sizes = ( + _compute_size_by_dict(input_sets[p], idx_dict) for p in positions + ) + removed_size = sum(old_sizes) - new_size + + # NB: removed_size used to be just the size of any removed indices i.e.: + # helpers.compute_size_by_dict(idx_removed, idx_dict) + cost = _flop_count(idx_contract, idx_removed, len(positions), idx_dict) + sort = (-removed_size, cost) + + # Sieve based on total cost as well + if (path_cost + cost) > naive_cost: + return None + + # Add contraction to possible choices + return [sort, positions, new_input_sets] + + +def _update_other_results(results, best): + """Update the positions and provisional input_sets of ``results`` + based on performing the contraction result ``best``. Remove any + involving the tensors contracted. + + Parameters + ---------- + results : list + List of contraction results produced by + ``_parse_possible_contraction``. + best : list + The best contraction of ``results`` i.e. the one that + will be performed. + + Returns + ------- + mod_results : list + The list of modified results, updated with outcome of + ``best`` contraction. + """ + + best_con = best[1] + bx, by = best_con + mod_results = [] + + for cost, (x, y), con_sets in results: + + # Ignore results involving tensors just contracted + if x in best_con or y in best_con: + continue + + # Update the input_sets + del con_sets[by - int(by > x) - int(by > y)] + del con_sets[bx - int(bx > x) - int(bx > y)] + con_sets.insert(-1, best[2][-1]) + + # Update the position indices + mod_con = x - int(x > bx) - int(x > by), y - int(y > bx) - int(y > by) + mod_results.append((cost, mod_con, con_sets)) + + return mod_results + +def _greedy_path(input_sets, output_set, idx_dict, memory_limit): + """ + Finds the path by contracting the best pair until the input list is + exhausted. The best pair is found by minimizing the tuple + ``(-prod(indices_removed), cost)``. What this amounts to is prioritizing + matrix multiplication or inner product operations, then Hadamard like + operations, and finally outer operations. Outer products are limited by + ``memory_limit``. This algorithm scales cubically with respect to the + number of elements in the list ``input_sets``. + + Parameters + ---------- + input_sets : list + List of sets that represent the lhs side of the einsum subscript + output_set : set + Set that represents the rhs side of the overall einsum subscript + idx_dict : dictionary + Dictionary of index sizes + memory_limit : int + The maximum number of elements in a temporary array + + Returns + ------- + path : list + The greedy contraction order within the memory limit constraint. + + Examples + -------- + >>> isets = [set('abd'), set('ac'), set('bdc')] + >>> oset = set() + >>> idx_sizes = {'a': 1, 'b':2, 'c':3, 'd':4} + >>> _greedy_path(isets, oset, idx_sizes, 5000) + [(0, 2), (0, 1)] + """ + + # Handle trivial cases that leaked through + if len(input_sets) == 1: + return [(0,)] + elif len(input_sets) == 2: + return [(0, 1)] + + # Build up a naive cost + contract = _find_contraction( + range(len(input_sets)), input_sets, output_set + ) + idx_result, new_input_sets, idx_removed, idx_contract = contract + naive_cost = _flop_count( + idx_contract, idx_removed, len(input_sets), idx_dict + ) + + # Initially iterate over all pairs + comb_iter = itertools.combinations(range(len(input_sets)), 2) + known_contractions = [] + + path_cost = 0 + path = [] + + for iteration in range(len(input_sets) - 1): + + # Iterate over all pairs on the first step, only previously + # found pairs on subsequent steps + for positions in comb_iter: + + # Always initially ignore outer products + if input_sets[positions[0]].isdisjoint(input_sets[positions[1]]): + continue + + result = _parse_possible_contraction( + positions, input_sets, output_set, idx_dict, + memory_limit, path_cost, naive_cost + ) + if result is not None: + known_contractions.append(result) + + # If we do not have a inner contraction, rescan pairs + # including outer products + if len(known_contractions) == 0: + + # Then check the outer products + for positions in itertools.combinations( + range(len(input_sets)), 2 + ): + result = _parse_possible_contraction( + positions, input_sets, output_set, idx_dict, + memory_limit, path_cost, naive_cost + ) + if result is not None: + known_contractions.append(result) + + # If we still did not find any remaining contractions, + # default back to einsum like behavior + if len(known_contractions) == 0: + path.append(tuple(range(len(input_sets)))) + break + + # Sort based on first index + best = min(known_contractions, key=lambda x: x[0]) + + # Now propagate as many unused contractions as possible + # to the next iteration + known_contractions = _update_other_results(known_contractions, best) + + # Next iteration only compute contractions with the new tensor + # All other contractions have been accounted for + input_sets = best[2] + new_tensor_pos = len(input_sets) - 1 + comb_iter = ((i, new_tensor_pos) for i in range(new_tensor_pos)) + + # Update path and total cost + path.append(best[1]) + path_cost += best[0][1] + + return path + + +def _can_dot(inputs, result, idx_removed): + """ + Checks if we can use BLAS (np.tensordot) call and its beneficial to do so. + + Parameters + ---------- + inputs : list of str + Specifies the subscripts for summation. + result : str + Resulting summation. + idx_removed : set + Indices that are removed in the summation + + + Returns + ------- + type : bool + Returns true if BLAS should and can be used, else False + + Notes + ----- + If the operations is BLAS level 1 or 2 and is not already aligned + we default back to einsum as the memory movement to copy is more + costly than the operation itself. + + + Examples + -------- + + # Standard GEMM operation + >>> _can_dot(['ij', 'jk'], 'ik', set('j')) + True + + # Can use the standard BLAS, but requires odd data movement + >>> _can_dot(['ijj', 'jk'], 'ik', set('j')) + False + + # DDOT where the memory is not aligned + >>> _can_dot(['ijk', 'ikj'], '', set('ijk')) + False + + """ + + # All `dot` calls remove indices + if len(idx_removed) == 0: + return False + + # BLAS can only handle two operands + if len(inputs) != 2: + return False + + input_left, input_right = inputs + + for c in set(input_left + input_right): + # can't deal with repeated indices on same input or more than 2 total + nl, nr = input_left.count(c), input_right.count(c) + if (nl > 1) or (nr > 1) or (nl + nr > 2): + return False + + # can't do implicit summation or dimension collapse e.g. + # "ab,bc->c" (implicitly sum over 'a') + # "ab,ca->ca" (take diagonal of 'a') + if nl + nr - 1 == int(c in result): + return False + + # Build a few temporaries + set_left = set(input_left) + set_right = set(input_right) + keep_left = set_left - idx_removed + keep_right = set_right - idx_removed + rs = len(idx_removed) + + # At this point we are a DOT, GEMV, or GEMM operation + + # Handle inner products + + # DDOT with aligned data + if input_left == input_right: + return True + + # DDOT without aligned data (better to use einsum) + if set_left == set_right: + return False + + # Handle the 4 possible (aligned) GEMV or GEMM cases + + # GEMM or GEMV no transpose + if input_left[-rs:] == input_right[:rs]: + return True + + # GEMM or GEMV transpose both + if input_left[:rs] == input_right[-rs:]: + return True + + # GEMM or GEMV transpose right + if input_left[-rs:] == input_right[-rs:]: + return True + + # GEMM or GEMV transpose left + if input_left[:rs] == input_right[:rs]: + return True + + # Einsum is faster than GEMV if we have to copy data + if not keep_left or not keep_right: + return False + + # We are a matrix-matrix product, but we need to copy data + return True + + +def _parse_einsum_input(operands): + """ + A reproduction of einsum c side einsum parsing in python. + + Returns + ------- + input_strings : str + Parsed input strings + output_string : str + Parsed output string + operands : list of array_like + The operands to use in the numpy contraction + + Examples + -------- + The operand list is simplified to reduce printing: + + >>> np.random.seed(123) + >>> a = np.random.rand(4, 4) + >>> b = np.random.rand(4, 4, 4) + >>> _parse_einsum_input(('...a,...a->...', a, b)) + ('za,xza', 'xz', [a, b]) # may vary + + >>> _parse_einsum_input((a, [Ellipsis, 0], b, [Ellipsis, 0])) + ('za,xza', 'xz', [a, b]) # may vary + """ + + if len(operands) == 0: + raise ValueError("No input operands") + + if isinstance(operands[0], str): + subscripts = operands[0].replace(" ", "") + operands = [asanyarray(v) for v in operands[1:]] + + # Ensure all characters are valid + for s in subscripts: + if s in '.,->': + continue + if s not in einsum_symbols: + raise ValueError(f"Character {s} is not a valid symbol.") + + else: + tmp_operands = list(operands) + operand_list = [] + subscript_list = [] + for p in range(len(operands) // 2): + operand_list.append(tmp_operands.pop(0)) + subscript_list.append(tmp_operands.pop(0)) + + output_list = tmp_operands[-1] if len(tmp_operands) else None + operands = [asanyarray(v) for v in operand_list] + subscripts = "" + last = len(subscript_list) - 1 + for num, sub in enumerate(subscript_list): + for s in sub: + if s is Ellipsis: + subscripts += "..." + else: + try: + s = operator.index(s) + except TypeError as e: + raise TypeError( + "For this input type lists must contain " + "either int or Ellipsis" + ) from e + subscripts += einsum_symbols[s] + if num != last: + subscripts += "," + + if output_list is not None: + subscripts += "->" + for s in output_list: + if s is Ellipsis: + subscripts += "..." + else: + try: + s = operator.index(s) + except TypeError as e: + raise TypeError( + "For this input type lists must contain " + "either int or Ellipsis" + ) from e + subscripts += einsum_symbols[s] + # Check for proper "->" + if ("-" in subscripts) or (">" in subscripts): + invalid = (subscripts.count("-") > 1) or (subscripts.count(">") > 1) + if invalid or (subscripts.count("->") != 1): + raise ValueError("Subscripts can only contain one '->'.") + + # Parse ellipses + if "." in subscripts: + used = subscripts.replace(".", "").replace(",", "").replace("->", "") + unused = list(einsum_symbols_set - set(used)) + ellipse_inds = "".join(unused) + longest = 0 + + if "->" in subscripts: + input_tmp, output_sub = subscripts.split("->") + split_subscripts = input_tmp.split(",") + out_sub = True + else: + split_subscripts = subscripts.split(',') + out_sub = False + + for num, sub in enumerate(split_subscripts): + if "." in sub: + if (sub.count(".") != 3) or (sub.count("...") != 1): + raise ValueError("Invalid Ellipses.") + + # Take into account numerical values + if operands[num].shape == (): + ellipse_count = 0 + else: + ellipse_count = max(operands[num].ndim, 1) + ellipse_count -= (len(sub) - 3) + + if ellipse_count > longest: + longest = ellipse_count + + if ellipse_count < 0: + raise ValueError("Ellipses lengths do not match.") + elif ellipse_count == 0: + split_subscripts[num] = sub.replace('...', '') + else: + rep_inds = ellipse_inds[-ellipse_count:] + split_subscripts[num] = sub.replace('...', rep_inds) + + subscripts = ",".join(split_subscripts) + if longest == 0: + out_ellipse = "" + else: + out_ellipse = ellipse_inds[-longest:] + + if out_sub: + subscripts += "->" + output_sub.replace("...", out_ellipse) + else: + # Special care for outputless ellipses + output_subscript = "" + tmp_subscripts = subscripts.replace(",", "") + for s in sorted(set(tmp_subscripts)): + if s not in (einsum_symbols): + raise ValueError(f"Character {s} is not a valid symbol.") + if tmp_subscripts.count(s) == 1: + output_subscript += s + normal_inds = ''.join(sorted(set(output_subscript) - + set(out_ellipse))) + + subscripts += "->" + out_ellipse + normal_inds + + # Build output string if does not exist + if "->" in subscripts: + input_subscripts, output_subscript = subscripts.split("->") + else: + input_subscripts = subscripts + # Build output subscripts + tmp_subscripts = subscripts.replace(",", "") + output_subscript = "" + for s in sorted(set(tmp_subscripts)): + if s not in einsum_symbols: + raise ValueError(f"Character {s} is not a valid symbol.") + if tmp_subscripts.count(s) == 1: + output_subscript += s + + # Make sure output subscripts are in the input + for char in output_subscript: + if output_subscript.count(char) != 1: + raise ValueError("Output character %s appeared more than once in " + "the output." % char) + if char not in input_subscripts: + raise ValueError(f"Output character {char} did not appear in the input") + + # Make sure number operands is equivalent to the number of terms + if len(input_subscripts.split(',')) != len(operands): + raise ValueError("Number of einsum subscripts must be equal to the " + "number of operands.") + + return (input_subscripts, output_subscript, operands) + + +def _einsum_path_dispatcher(*operands, optimize=None, einsum_call=None): + # NOTE: technically, we should only dispatch on array-like arguments, not + # subscripts (given as strings). But separating operands into + # arrays/subscripts is a little tricky/slow (given einsum's two supported + # signatures), so as a practical shortcut we dispatch on everything. + # Strings will be ignored for dispatching since they don't define + # __array_function__. + return operands + + +@array_function_dispatch(_einsum_path_dispatcher, module='numpy') +def einsum_path(*operands, optimize='greedy', einsum_call=False): + """ + einsum_path(subscripts, *operands, optimize='greedy') + + Evaluates the lowest cost contraction order for an einsum expression by + considering the creation of intermediate arrays. + + Parameters + ---------- + subscripts : str + Specifies the subscripts for summation. + *operands : list of array_like + These are the arrays for the operation. + optimize : {bool, list, tuple, 'greedy', 'optimal'} + Choose the type of path. If a tuple is provided, the second argument is + assumed to be the maximum intermediate size created. If only a single + argument is provided the largest input or output array size is used + as a maximum intermediate size. + + * if a list is given that starts with ``einsum_path``, uses this as the + contraction path + * if False no optimization is taken + * if True defaults to the 'greedy' algorithm + * 'optimal' An algorithm that combinatorially explores all possible + ways of contracting the listed tensors and chooses the least costly + path. Scales exponentially with the number of terms in the + contraction. + * 'greedy' An algorithm that chooses the best pair contraction + at each step. Effectively, this algorithm searches the largest inner, + Hadamard, and then outer products at each step. Scales cubically with + the number of terms in the contraction. Equivalent to the 'optimal' + path for most contractions. + + Default is 'greedy'. + + Returns + ------- + path : list of tuples + A list representation of the einsum path. + string_repr : str + A printable representation of the einsum path. + + Notes + ----- + The resulting path indicates which terms of the input contraction should be + contracted first, the result of this contraction is then appended to the + end of the contraction list. This list can then be iterated over until all + intermediate contractions are complete. + + See Also + -------- + einsum, linalg.multi_dot + + Examples + -------- + + We can begin with a chain dot example. In this case, it is optimal to + contract the ``b`` and ``c`` tensors first as represented by the first + element of the path ``(1, 2)``. The resulting tensor is added to the end + of the contraction and the remaining contraction ``(0, 1)`` is then + completed. + + >>> np.random.seed(123) + >>> a = np.random.rand(2, 2) + >>> b = np.random.rand(2, 5) + >>> c = np.random.rand(5, 2) + >>> path_info = np.einsum_path('ij,jk,kl->il', a, b, c, optimize='greedy') + >>> print(path_info[0]) + ['einsum_path', (1, 2), (0, 1)] + >>> print(path_info[1]) + Complete contraction: ij,jk,kl->il # may vary + Naive scaling: 4 + Optimized scaling: 3 + Naive FLOP count: 1.600e+02 + Optimized FLOP count: 5.600e+01 + Theoretical speedup: 2.857 + Largest intermediate: 4.000e+00 elements + ------------------------------------------------------------------------- + scaling current remaining + ------------------------------------------------------------------------- + 3 kl,jk->jl ij,jl->il + 3 jl,ij->il il->il + + + A more complex index transformation example. + + >>> I = np.random.rand(10, 10, 10, 10) + >>> C = np.random.rand(10, 10) + >>> path_info = np.einsum_path('ea,fb,abcd,gc,hd->efgh', C, C, I, C, C, + ... optimize='greedy') + + >>> print(path_info[0]) + ['einsum_path', (0, 2), (0, 3), (0, 2), (0, 1)] + >>> print(path_info[1]) + Complete contraction: ea,fb,abcd,gc,hd->efgh # may vary + Naive scaling: 8 + Optimized scaling: 5 + Naive FLOP count: 8.000e+08 + Optimized FLOP count: 8.000e+05 + Theoretical speedup: 1000.000 + Largest intermediate: 1.000e+04 elements + -------------------------------------------------------------------------- + scaling current remaining + -------------------------------------------------------------------------- + 5 abcd,ea->bcde fb,gc,hd,bcde->efgh + 5 bcde,fb->cdef gc,hd,cdef->efgh + 5 cdef,gc->defg hd,defg->efgh + 5 defg,hd->efgh efgh->efgh + """ + + # Figure out what the path really is + path_type = optimize + if path_type is True: + path_type = 'greedy' + if path_type is None: + path_type = False + + explicit_einsum_path = False + memory_limit = None + + # No optimization or a named path algorithm + if (path_type is False) or isinstance(path_type, str): + pass + + # Given an explicit path + elif len(path_type) and (path_type[0] == 'einsum_path'): + explicit_einsum_path = True + + # Path tuple with memory limit + elif ((len(path_type) == 2) and isinstance(path_type[0], str) and + isinstance(path_type[1], (int, float))): + memory_limit = int(path_type[1]) + path_type = path_type[0] + + else: + raise TypeError(f"Did not understand the path: {str(path_type)}") + + # Hidden option, only einsum should call this + einsum_call_arg = einsum_call + + # Python side parsing + input_subscripts, output_subscript, operands = ( + _parse_einsum_input(operands) + ) + + # Build a few useful list and sets + input_list = input_subscripts.split(',') + input_sets = [set(x) for x in input_list] + output_set = set(output_subscript) + indices = set(input_subscripts.replace(',', '')) + + # Get length of each unique dimension and ensure all dimensions are correct + dimension_dict = {} + broadcast_indices = [[] for x in range(len(input_list))] + for tnum, term in enumerate(input_list): + sh = operands[tnum].shape + if len(sh) != len(term): + raise ValueError("Einstein sum subscript %s does not contain the " + "correct number of indices for operand %d." + % (input_subscripts[tnum], tnum)) + for cnum, char in enumerate(term): + dim = sh[cnum] + + # Build out broadcast indices + if dim == 1: + broadcast_indices[tnum].append(char) + + if char in dimension_dict.keys(): + # For broadcasting cases we always want the largest dim size + if dimension_dict[char] == 1: + dimension_dict[char] = dim + elif dim not in (1, dimension_dict[char]): + raise ValueError("Size of label '%s' for operand %d (%d) " + "does not match previous terms (%d)." + % (char, tnum, dimension_dict[char], dim)) + else: + dimension_dict[char] = dim + + # Convert broadcast inds to sets + broadcast_indices = [set(x) for x in broadcast_indices] + + # Compute size of each input array plus the output array + size_list = [_compute_size_by_dict(term, dimension_dict) + for term in input_list + [output_subscript]] + max_size = max(size_list) + + if memory_limit is None: + memory_arg = max_size + else: + memory_arg = memory_limit + + # Compute naive cost + # This isn't quite right, need to look into exactly how einsum does this + inner_product = (sum(len(x) for x in input_sets) - len(indices)) > 0 + naive_cost = _flop_count( + indices, inner_product, len(input_list), dimension_dict + ) + + # Compute the path + if explicit_einsum_path: + path = path_type[1:] + elif ( + (path_type is False) + or (len(input_list) in [1, 2]) + or (indices == output_set) + ): + # Nothing to be optimized, leave it to einsum + path = [tuple(range(len(input_list)))] + elif path_type == "greedy": + path = _greedy_path( + input_sets, output_set, dimension_dict, memory_arg + ) + elif path_type == "optimal": + path = _optimal_path( + input_sets, output_set, dimension_dict, memory_arg + ) + else: + raise KeyError("Path name %s not found", path_type) + + cost_list, scale_list, size_list, contraction_list = [], [], [], [] + + # Build contraction tuple (positions, gemm, einsum_str, remaining) + for cnum, contract_inds in enumerate(path): + # Make sure we remove inds from right to left + contract_inds = tuple(sorted(contract_inds, reverse=True)) + + contract = _find_contraction(contract_inds, input_sets, output_set) + out_inds, input_sets, idx_removed, idx_contract = contract + + cost = _flop_count( + idx_contract, idx_removed, len(contract_inds), dimension_dict + ) + cost_list.append(cost) + scale_list.append(len(idx_contract)) + size_list.append(_compute_size_by_dict(out_inds, dimension_dict)) + + bcast = set() + tmp_inputs = [] + for x in contract_inds: + tmp_inputs.append(input_list.pop(x)) + bcast |= broadcast_indices.pop(x) + + new_bcast_inds = bcast - idx_removed + + # If we're broadcasting, nix blas + if not len(idx_removed & bcast): + do_blas = _can_dot(tmp_inputs, out_inds, idx_removed) + else: + do_blas = False + + # Last contraction + if (cnum - len(path)) == -1: + idx_result = output_subscript + else: + sort_result = [(dimension_dict[ind], ind) for ind in out_inds] + idx_result = "".join([x[1] for x in sorted(sort_result)]) + + input_list.append(idx_result) + broadcast_indices.append(new_bcast_inds) + einsum_str = ",".join(tmp_inputs) + "->" + idx_result + + contraction = ( + contract_inds, idx_removed, einsum_str, input_list[:], do_blas + ) + contraction_list.append(contraction) + + opt_cost = sum(cost_list) + 1 + + if len(input_list) != 1: + # Explicit "einsum_path" is usually trusted, but we detect this kind of + # mistake in order to prevent from returning an intermediate value. + raise RuntimeError( + f"Invalid einsum_path is specified: {len(input_list) - 1} more " + "operands has to be contracted.") + + if einsum_call_arg: + return (operands, contraction_list) + + # Return the path along with a nice string representation + overall_contraction = input_subscripts + "->" + output_subscript + header = ("scaling", "current", "remaining") + + speedup = naive_cost / opt_cost + max_i = max(size_list) + + path_print = f" Complete contraction: {overall_contraction}\n" + path_print += f" Naive scaling: {len(indices)}\n" + path_print += " Optimized scaling: %d\n" % max(scale_list) + path_print += f" Naive FLOP count: {naive_cost:.3e}\n" + path_print += f" Optimized FLOP count: {opt_cost:.3e}\n" + path_print += f" Theoretical speedup: {speedup:3.3f}\n" + path_print += f" Largest intermediate: {max_i:.3e} elements\n" + path_print += "-" * 74 + "\n" + path_print += "%6s %24s %40s\n" % header + path_print += "-" * 74 + + for n, contraction in enumerate(contraction_list): + inds, idx_rm, einsum_str, remaining, blas = contraction + remaining_str = ",".join(remaining) + "->" + output_subscript + path_run = (scale_list[n], einsum_str, remaining_str) + path_print += "\n%4d %24s %40s" % path_run + + path = ['einsum_path'] + path + return (path, path_print) + + +def _einsum_dispatcher(*operands, out=None, optimize=None, **kwargs): + # Arguably we dispatch on more arguments than we really should; see note in + # _einsum_path_dispatcher for why. + yield from operands + yield out + + +# Rewrite einsum to handle different cases +@array_function_dispatch(_einsum_dispatcher, module='numpy') +def einsum(*operands, out=None, optimize=False, **kwargs): + """ + einsum(subscripts, *operands, out=None, dtype=None, order='K', + casting='safe', optimize=False) + + Evaluates the Einstein summation convention on the operands. + + Using the Einstein summation convention, many common multi-dimensional, + linear algebraic array operations can be represented in a simple fashion. + In *implicit* mode `einsum` computes these values. + + In *explicit* mode, `einsum` provides further flexibility to compute + other array operations that might not be considered classical Einstein + summation operations, by disabling, or forcing summation over specified + subscript labels. + + See the notes and examples for clarification. + + Parameters + ---------- + subscripts : str + Specifies the subscripts for summation as comma separated list of + subscript labels. An implicit (classical Einstein summation) + calculation is performed unless the explicit indicator '->' is + included as well as subscript labels of the precise output form. + operands : list of array_like + These are the arrays for the operation. + out : ndarray, optional + If provided, the calculation is done into this array. + dtype : {data-type, None}, optional + If provided, forces the calculation to use the data type specified. + Note that you may have to also give a more liberal `casting` + parameter to allow the conversions. Default is None. + order : {'C', 'F', 'A', 'K'}, optional + Controls the memory layout of the output. 'C' means it should + be C contiguous. 'F' means it should be Fortran contiguous, + 'A' means it should be 'F' if the inputs are all 'F', 'C' otherwise. + 'K' means it should be as close to the layout as the inputs as + is possible, including arbitrarily permuted axes. + Default is 'K'. + casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional + Controls what kind of data casting may occur. Setting this to + 'unsafe' is not recommended, as it can adversely affect accumulations. + + * 'no' means the data types should not be cast at all. + * 'equiv' means only byte-order changes are allowed. + * 'safe' means only casts which can preserve values are allowed. + * 'same_kind' means only safe casts or casts within a kind, + like float64 to float32, are allowed. + * 'unsafe' means any data conversions may be done. + + Default is 'safe'. + optimize : {False, True, 'greedy', 'optimal'}, optional + Controls if intermediate optimization should occur. No optimization + will occur if False and True will default to the 'greedy' algorithm. + Also accepts an explicit contraction list from the ``np.einsum_path`` + function. See ``np.einsum_path`` for more details. Defaults to False. + + Returns + ------- + output : ndarray + The calculation based on the Einstein summation convention. + + See Also + -------- + einsum_path, dot, inner, outer, tensordot, linalg.multi_dot + einsum: + Similar verbose interface is provided by the + `einops `_ package to cover + additional operations: transpose, reshape/flatten, repeat/tile, + squeeze/unsqueeze and reductions. + The `opt_einsum `_ + optimizes contraction order for einsum-like expressions + in backend-agnostic manner. + + Notes + ----- + The Einstein summation convention can be used to compute + many multi-dimensional, linear algebraic array operations. `einsum` + provides a succinct way of representing these. + + A non-exhaustive list of these operations, + which can be computed by `einsum`, is shown below along with examples: + + * Trace of an array, :py:func:`numpy.trace`. + * Return a diagonal, :py:func:`numpy.diag`. + * Array axis summations, :py:func:`numpy.sum`. + * Transpositions and permutations, :py:func:`numpy.transpose`. + * Matrix multiplication and dot product, :py:func:`numpy.matmul` + :py:func:`numpy.dot`. + * Vector inner and outer products, :py:func:`numpy.inner` + :py:func:`numpy.outer`. + * Broadcasting, element-wise and scalar multiplication, + :py:func:`numpy.multiply`. + * Tensor contractions, :py:func:`numpy.tensordot`. + * Chained array operations, in efficient calculation order, + :py:func:`numpy.einsum_path`. + + The subscripts string is a comma-separated list of subscript labels, + where each label refers to a dimension of the corresponding operand. + Whenever a label is repeated it is summed, so ``np.einsum('i,i', a, b)`` + is equivalent to :py:func:`np.inner(a,b) `. If a label + appears only once, it is not summed, so ``np.einsum('i', a)`` + produces a view of ``a`` with no changes. A further example + ``np.einsum('ij,jk', a, b)`` describes traditional matrix multiplication + and is equivalent to :py:func:`np.matmul(a,b) `. + Repeated subscript labels in one operand take the diagonal. + For example, ``np.einsum('ii', a)`` is equivalent to + :py:func:`np.trace(a) `. + + In *implicit mode*, the chosen subscripts are important + since the axes of the output are reordered alphabetically. This + means that ``np.einsum('ij', a)`` doesn't affect a 2D array, while + ``np.einsum('ji', a)`` takes its transpose. Additionally, + ``np.einsum('ij,jk', a, b)`` returns a matrix multiplication, while, + ``np.einsum('ij,jh', a, b)`` returns the transpose of the + multiplication since subscript 'h' precedes subscript 'i'. + + In *explicit mode* the output can be directly controlled by + specifying output subscript labels. This requires the + identifier '->' as well as the list of output subscript labels. + This feature increases the flexibility of the function since + summing can be disabled or forced when required. The call + ``np.einsum('i->', a)`` is like :py:func:`np.sum(a) ` + if ``a`` is a 1-D array, and ``np.einsum('ii->i', a)`` + is like :py:func:`np.diag(a) ` if ``a`` is a square 2-D array. + The difference is that `einsum` does not allow broadcasting by default. + Additionally ``np.einsum('ij,jh->ih', a, b)`` directly specifies the + order of the output subscript labels and therefore returns matrix + multiplication, unlike the example above in implicit mode. + + To enable and control broadcasting, use an ellipsis. Default + NumPy-style broadcasting is done by adding an ellipsis + to the left of each term, like ``np.einsum('...ii->...i', a)``. + ``np.einsum('...i->...', a)`` is like + :py:func:`np.sum(a, axis=-1) ` for array ``a`` of any shape. + To take the trace along the first and last axes, + you can do ``np.einsum('i...i', a)``, or to do a matrix-matrix + product with the left-most indices instead of rightmost, one can do + ``np.einsum('ij...,jk...->ik...', a, b)``. + + When there is only one operand, no axes are summed, and no output + parameter is provided, a view into the operand is returned instead + of a new array. Thus, taking the diagonal as ``np.einsum('ii->i', a)`` + produces a view (changed in version 1.10.0). + + `einsum` also provides an alternative way to provide the subscripts and + operands as ``einsum(op0, sublist0, op1, sublist1, ..., [sublistout])``. + If the output shape is not provided in this format `einsum` will be + calculated in implicit mode, otherwise it will be performed explicitly. + The examples below have corresponding `einsum` calls with the two + parameter methods. + + Views returned from einsum are now writeable whenever the input array + is writeable. For example, ``np.einsum('ijk...->kji...', a)`` will now + have the same effect as :py:func:`np.swapaxes(a, 0, 2) ` + and ``np.einsum('ii->i', a)`` will return a writeable view of the diagonal + of a 2D array. + + Added the ``optimize`` argument which will optimize the contraction order + of an einsum expression. For a contraction with three or more operands + this can greatly increase the computational efficiency at the cost of + a larger memory footprint during computation. + + Typically a 'greedy' algorithm is applied which empirical tests have shown + returns the optimal path in the majority of cases. In some cases 'optimal' + will return the superlative path through a more expensive, exhaustive + search. For iterative calculations it may be advisable to calculate + the optimal path once and reuse that path by supplying it as an argument. + An example is given below. + + See :py:func:`numpy.einsum_path` for more details. + + Examples + -------- + >>> a = np.arange(25).reshape(5,5) + >>> b = np.arange(5) + >>> c = np.arange(6).reshape(2,3) + + Trace of a matrix: + + >>> np.einsum('ii', a) + 60 + >>> np.einsum(a, [0,0]) + 60 + >>> np.trace(a) + 60 + + Extract the diagonal (requires explicit form): + + >>> np.einsum('ii->i', a) + array([ 0, 6, 12, 18, 24]) + >>> np.einsum(a, [0,0], [0]) + array([ 0, 6, 12, 18, 24]) + >>> np.diag(a) + array([ 0, 6, 12, 18, 24]) + + Sum over an axis (requires explicit form): + + >>> np.einsum('ij->i', a) + array([ 10, 35, 60, 85, 110]) + >>> np.einsum(a, [0,1], [0]) + array([ 10, 35, 60, 85, 110]) + >>> np.sum(a, axis=1) + array([ 10, 35, 60, 85, 110]) + + For higher dimensional arrays summing a single axis can be done + with ellipsis: + + >>> np.einsum('...j->...', a) + array([ 10, 35, 60, 85, 110]) + >>> np.einsum(a, [Ellipsis,1], [Ellipsis]) + array([ 10, 35, 60, 85, 110]) + + Compute a matrix transpose, or reorder any number of axes: + + >>> np.einsum('ji', c) + array([[0, 3], + [1, 4], + [2, 5]]) + >>> np.einsum('ij->ji', c) + array([[0, 3], + [1, 4], + [2, 5]]) + >>> np.einsum(c, [1,0]) + array([[0, 3], + [1, 4], + [2, 5]]) + >>> np.transpose(c) + array([[0, 3], + [1, 4], + [2, 5]]) + + Vector inner products: + + >>> np.einsum('i,i', b, b) + 30 + >>> np.einsum(b, [0], b, [0]) + 30 + >>> np.inner(b,b) + 30 + + Matrix vector multiplication: + + >>> np.einsum('ij,j', a, b) + array([ 30, 80, 130, 180, 230]) + >>> np.einsum(a, [0,1], b, [1]) + array([ 30, 80, 130, 180, 230]) + >>> np.dot(a, b) + array([ 30, 80, 130, 180, 230]) + >>> np.einsum('...j,j', a, b) + array([ 30, 80, 130, 180, 230]) + + Broadcasting and scalar multiplication: + + >>> np.einsum('..., ...', 3, c) + array([[ 0, 3, 6], + [ 9, 12, 15]]) + >>> np.einsum(',ij', 3, c) + array([[ 0, 3, 6], + [ 9, 12, 15]]) + >>> np.einsum(3, [Ellipsis], c, [Ellipsis]) + array([[ 0, 3, 6], + [ 9, 12, 15]]) + >>> np.multiply(3, c) + array([[ 0, 3, 6], + [ 9, 12, 15]]) + + Vector outer product: + + >>> np.einsum('i,j', np.arange(2)+1, b) + array([[0, 1, 2, 3, 4], + [0, 2, 4, 6, 8]]) + >>> np.einsum(np.arange(2)+1, [0], b, [1]) + array([[0, 1, 2, 3, 4], + [0, 2, 4, 6, 8]]) + >>> np.outer(np.arange(2)+1, b) + array([[0, 1, 2, 3, 4], + [0, 2, 4, 6, 8]]) + + Tensor contraction: + + >>> a = np.arange(60.).reshape(3,4,5) + >>> b = np.arange(24.).reshape(4,3,2) + >>> np.einsum('ijk,jil->kl', a, b) + array([[4400., 4730.], + [4532., 4874.], + [4664., 5018.], + [4796., 5162.], + [4928., 5306.]]) + >>> np.einsum(a, [0,1,2], b, [1,0,3], [2,3]) + array([[4400., 4730.], + [4532., 4874.], + [4664., 5018.], + [4796., 5162.], + [4928., 5306.]]) + >>> np.tensordot(a,b, axes=([1,0],[0,1])) + array([[4400., 4730.], + [4532., 4874.], + [4664., 5018.], + [4796., 5162.], + [4928., 5306.]]) + + Writeable returned arrays (since version 1.10.0): + + >>> a = np.zeros((3, 3)) + >>> np.einsum('ii->i', a)[:] = 1 + >>> a + array([[1., 0., 0.], + [0., 1., 0.], + [0., 0., 1.]]) + + Example of ellipsis use: + + >>> a = np.arange(6).reshape((3,2)) + >>> b = np.arange(12).reshape((4,3)) + >>> np.einsum('ki,jk->ij', a, b) + array([[10, 28, 46, 64], + [13, 40, 67, 94]]) + >>> np.einsum('ki,...k->i...', a, b) + array([[10, 28, 46, 64], + [13, 40, 67, 94]]) + >>> np.einsum('k...,jk', a, b) + array([[10, 28, 46, 64], + [13, 40, 67, 94]]) + + Chained array operations. For more complicated contractions, speed ups + might be achieved by repeatedly computing a 'greedy' path or pre-computing + the 'optimal' path and repeatedly applying it, using an `einsum_path` + insertion (since version 1.12.0). Performance improvements can be + particularly significant with larger arrays: + + >>> a = np.ones(64).reshape(2,4,8) + + Basic `einsum`: ~1520ms (benchmarked on 3.1GHz Intel i5.) + + >>> for iteration in range(500): + ... _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a) + + Sub-optimal `einsum` (due to repeated path calculation time): ~330ms + + >>> for iteration in range(500): + ... _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, + ... optimize='optimal') + + Greedy `einsum` (faster optimal path approximation): ~160ms + + >>> for iteration in range(500): + ... _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize='greedy') + + Optimal `einsum` (best usage pattern in some use cases): ~110ms + + >>> path = np.einsum_path('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, + ... optimize='optimal')[0] + >>> for iteration in range(500): + ... _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize=path) + + """ + # Special handling if out is specified + specified_out = out is not None + + # If no optimization, run pure einsum + if optimize is False: + if specified_out: + kwargs['out'] = out + return c_einsum(*operands, **kwargs) + + # Check the kwargs to avoid a more cryptic error later, without having to + # repeat default values here + valid_einsum_kwargs = ['dtype', 'order', 'casting'] + unknown_kwargs = [k for (k, v) in kwargs.items() if + k not in valid_einsum_kwargs] + if len(unknown_kwargs): + raise TypeError(f"Did not understand the following kwargs: {unknown_kwargs}") + + # Build the contraction list and operand + operands, contraction_list = einsum_path(*operands, optimize=optimize, + einsum_call=True) + + # Handle order kwarg for output array, c_einsum allows mixed case + output_order = kwargs.pop('order', 'K') + if output_order.upper() == 'A': + if all(arr.flags.f_contiguous for arr in operands): + output_order = 'F' + else: + output_order = 'C' + + # Start contraction loop + for num, contraction in enumerate(contraction_list): + inds, idx_rm, einsum_str, remaining, blas = contraction + tmp_operands = [operands.pop(x) for x in inds] + + # Do we need to deal with the output? + handle_out = specified_out and ((num + 1) == len(contraction_list)) + + # Call tensordot if still possible + if blas: + # Checks have already been handled + input_str, results_index = einsum_str.split('->') + input_left, input_right = input_str.split(',') + + tensor_result = input_left + input_right + for s in idx_rm: + tensor_result = tensor_result.replace(s, "") + + # Find indices to contract over + left_pos, right_pos = [], [] + for s in sorted(idx_rm): + left_pos.append(input_left.find(s)) + right_pos.append(input_right.find(s)) + + # Contract! + new_view = tensordot( + *tmp_operands, axes=(tuple(left_pos), tuple(right_pos)) + ) + + # Build a new view if needed + if (tensor_result != results_index) or handle_out: + if handle_out: + kwargs["out"] = out + new_view = c_einsum( + tensor_result + '->' + results_index, new_view, **kwargs + ) + + # Call einsum + else: + # If out was specified + if handle_out: + kwargs["out"] = out + + # Do the contraction + new_view = c_einsum(einsum_str, *tmp_operands, **kwargs) + + # Append new items and dereference what we can + operands.append(new_view) + del tmp_operands, new_view + + if specified_out: + return out + else: + return asanyarray(operands[0], order=output_order) diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/einsumfunc.pyi b/.venv/lib/python3.12/site-packages/numpy/_core/einsumfunc.pyi new file mode 100644 index 0000000..9653a26 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/einsumfunc.pyi @@ -0,0 +1,184 @@ +from collections.abc import Sequence +from typing import Any, Literal, TypeAlias, TypeVar, overload + +import numpy as np +from numpy import _OrderKACF, number +from numpy._typing import ( + NDArray, + _ArrayLikeBool_co, + _ArrayLikeComplex_co, + _ArrayLikeFloat_co, + _ArrayLikeInt_co, + _ArrayLikeObject_co, + _ArrayLikeUInt_co, + _DTypeLikeBool, + _DTypeLikeComplex, + _DTypeLikeComplex_co, + _DTypeLikeFloat, + _DTypeLikeInt, + _DTypeLikeObject, + _DTypeLikeUInt, +) + +__all__ = ["einsum", "einsum_path"] + +_ArrayT = TypeVar( + "_ArrayT", + bound=NDArray[np.bool | number], +) + +_OptimizeKind: TypeAlias = bool | Literal["greedy", "optimal"] | Sequence[Any] | None +_CastingSafe: TypeAlias = Literal["no", "equiv", "safe", "same_kind"] +_CastingUnsafe: TypeAlias = Literal["unsafe"] + +# TODO: Properly handle the `casting`-based combinatorics +# TODO: We need to evaluate the content `__subscripts` in order +# to identify whether or an array or scalar is returned. At a cursory +# glance this seems like something that can quite easily be done with +# a mypy plugin. +# Something like `is_scalar = bool(__subscripts.partition("->")[-1])` +@overload +def einsum( + subscripts: str | _ArrayLikeInt_co, + /, + *operands: _ArrayLikeBool_co, + out: None = ..., + dtype: _DTypeLikeBool | None = ..., + order: _OrderKACF = ..., + casting: _CastingSafe = ..., + optimize: _OptimizeKind = ..., +) -> Any: ... +@overload +def einsum( + subscripts: str | _ArrayLikeInt_co, + /, + *operands: _ArrayLikeUInt_co, + out: None = ..., + dtype: _DTypeLikeUInt | None = ..., + order: _OrderKACF = ..., + casting: _CastingSafe = ..., + optimize: _OptimizeKind = ..., +) -> Any: ... +@overload +def einsum( + subscripts: str | _ArrayLikeInt_co, + /, + *operands: _ArrayLikeInt_co, + out: None = ..., + dtype: _DTypeLikeInt | None = ..., + order: _OrderKACF = ..., + casting: _CastingSafe = ..., + optimize: _OptimizeKind = ..., +) -> Any: ... +@overload +def einsum( + subscripts: str | _ArrayLikeInt_co, + /, + *operands: _ArrayLikeFloat_co, + out: None = ..., + dtype: _DTypeLikeFloat | None = ..., + order: _OrderKACF = ..., + casting: _CastingSafe = ..., + optimize: _OptimizeKind = ..., +) -> Any: ... +@overload +def einsum( + subscripts: str | _ArrayLikeInt_co, + /, + *operands: _ArrayLikeComplex_co, + out: None = ..., + dtype: _DTypeLikeComplex | None = ..., + order: _OrderKACF = ..., + casting: _CastingSafe = ..., + optimize: _OptimizeKind = ..., +) -> Any: ... +@overload +def einsum( + subscripts: str | _ArrayLikeInt_co, + /, + *operands: Any, + casting: _CastingUnsafe, + dtype: _DTypeLikeComplex_co | None = ..., + out: None = ..., + order: _OrderKACF = ..., + optimize: _OptimizeKind = ..., +) -> Any: ... +@overload +def einsum( + subscripts: str | _ArrayLikeInt_co, + /, + *operands: _ArrayLikeComplex_co, + out: _ArrayT, + dtype: _DTypeLikeComplex_co | None = ..., + order: _OrderKACF = ..., + casting: _CastingSafe = ..., + optimize: _OptimizeKind = ..., +) -> _ArrayT: ... +@overload +def einsum( + subscripts: str | _ArrayLikeInt_co, + /, + *operands: Any, + out: _ArrayT, + casting: _CastingUnsafe, + dtype: _DTypeLikeComplex_co | None = ..., + order: _OrderKACF = ..., + optimize: _OptimizeKind = ..., +) -> _ArrayT: ... + +@overload +def einsum( + subscripts: str | _ArrayLikeInt_co, + /, + *operands: _ArrayLikeObject_co, + out: None = ..., + dtype: _DTypeLikeObject | None = ..., + order: _OrderKACF = ..., + casting: _CastingSafe = ..., + optimize: _OptimizeKind = ..., +) -> Any: ... +@overload +def einsum( + subscripts: str | _ArrayLikeInt_co, + /, + *operands: Any, + casting: _CastingUnsafe, + dtype: _DTypeLikeObject | None = ..., + out: None = ..., + order: _OrderKACF = ..., + optimize: _OptimizeKind = ..., +) -> Any: ... +@overload +def einsum( + subscripts: str | _ArrayLikeInt_co, + /, + *operands: _ArrayLikeObject_co, + out: _ArrayT, + dtype: _DTypeLikeObject | None = ..., + order: _OrderKACF = ..., + casting: _CastingSafe = ..., + optimize: _OptimizeKind = ..., +) -> _ArrayT: ... +@overload +def einsum( + subscripts: str | _ArrayLikeInt_co, + /, + *operands: Any, + out: _ArrayT, + casting: _CastingUnsafe, + dtype: _DTypeLikeObject | None = ..., + order: _OrderKACF = ..., + optimize: _OptimizeKind = ..., +) -> _ArrayT: ... + +# NOTE: `einsum_call` is a hidden kwarg unavailable for public use. +# It is therefore excluded from the signatures below. +# NOTE: In practice the list consists of a `str` (first element) +# and a variable number of integer tuples. +def einsum_path( + subscripts: str | _ArrayLikeInt_co, + /, + *operands: _ArrayLikeComplex_co | _DTypeLikeObject, + optimize: _OptimizeKind = "greedy", + einsum_call: Literal[False] = False, +) -> tuple[list[Any], str]: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/fromnumeric.py b/.venv/lib/python3.12/site-packages/numpy/_core/fromnumeric.py new file mode 100644 index 0000000..e20d774 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/fromnumeric.py @@ -0,0 +1,4269 @@ +"""Module containing non-deprecated functions borrowed from Numeric. + +""" +import functools +import types +import warnings + +import numpy as np +from numpy._utils import set_module + +from . import _methods, overrides +from . import multiarray as mu +from . import numerictypes as nt +from . import umath as um +from ._multiarray_umath import _array_converter +from .multiarray import asanyarray, asarray, concatenate + +_dt_ = nt.sctype2char + +# functions that are methods +__all__ = [ + 'all', 'amax', 'amin', 'any', 'argmax', + 'argmin', 'argpartition', 'argsort', 'around', 'choose', 'clip', + 'compress', 'cumprod', 'cumsum', 'cumulative_prod', 'cumulative_sum', + 'diagonal', 'mean', 'max', 'min', 'matrix_transpose', + 'ndim', 'nonzero', 'partition', 'prod', 'ptp', 'put', + 'ravel', 'repeat', 'reshape', 'resize', 'round', + 'searchsorted', 'shape', 'size', 'sort', 'squeeze', + 'std', 'sum', 'swapaxes', 'take', 'trace', 'transpose', 'var', +] + +_gentype = types.GeneratorType +# save away Python sum +_sum_ = sum + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy') + + +# functions that are now methods +def _wrapit(obj, method, *args, **kwds): + conv = _array_converter(obj) + # As this already tried the method, subok is maybe quite reasonable here + # but this follows what was done before. TODO: revisit this. + arr, = conv.as_arrays(subok=False) + result = getattr(arr, method)(*args, **kwds) + + return conv.wrap(result, to_scalar=False) + + +def _wrapfunc(obj, method, *args, **kwds): + bound = getattr(obj, method, None) + if bound is None: + return _wrapit(obj, method, *args, **kwds) + + try: + return bound(*args, **kwds) + except TypeError: + # A TypeError occurs if the object does have such a method in its + # class, but its signature is not identical to that of NumPy's. This + # situation has occurred in the case of a downstream library like + # 'pandas'. + # + # Call _wrapit from within the except clause to ensure a potential + # exception has a traceback chain. + return _wrapit(obj, method, *args, **kwds) + + +def _wrapreduction(obj, ufunc, method, axis, dtype, out, **kwargs): + passkwargs = {k: v for k, v in kwargs.items() + if v is not np._NoValue} + + if type(obj) is not mu.ndarray: + try: + reduction = getattr(obj, method) + except AttributeError: + pass + else: + # This branch is needed for reductions like any which don't + # support a dtype. + if dtype is not None: + return reduction(axis=axis, dtype=dtype, out=out, **passkwargs) + else: + return reduction(axis=axis, out=out, **passkwargs) + + return ufunc.reduce(obj, axis, dtype, out, **passkwargs) + + +def _wrapreduction_any_all(obj, ufunc, method, axis, out, **kwargs): + # Same as above function, but dtype is always bool (but never passed on) + passkwargs = {k: v for k, v in kwargs.items() + if v is not np._NoValue} + + if type(obj) is not mu.ndarray: + try: + reduction = getattr(obj, method) + except AttributeError: + pass + else: + return reduction(axis=axis, out=out, **passkwargs) + + return ufunc.reduce(obj, axis, bool, out, **passkwargs) + + +def _take_dispatcher(a, indices, axis=None, out=None, mode=None): + return (a, out) + + +@array_function_dispatch(_take_dispatcher) +def take(a, indices, axis=None, out=None, mode='raise'): + """ + Take elements from an array along an axis. + + When axis is not None, this function does the same thing as "fancy" + indexing (indexing arrays using arrays); however, it can be easier to use + if you need elements along a given axis. A call such as + ``np.take(arr, indices, axis=3)`` is equivalent to + ``arr[:,:,:,indices,...]``. + + Explained without fancy indexing, this is equivalent to the following use + of `ndindex`, which sets each of ``ii``, ``jj``, and ``kk`` to a tuple of + indices:: + + Ni, Nk = a.shape[:axis], a.shape[axis+1:] + Nj = indices.shape + for ii in ndindex(Ni): + for jj in ndindex(Nj): + for kk in ndindex(Nk): + out[ii + jj + kk] = a[ii + (indices[jj],) + kk] + + Parameters + ---------- + a : array_like (Ni..., M, Nk...) + The source array. + indices : array_like (Nj...) + The indices of the values to extract. + Also allow scalars for indices. + axis : int, optional + The axis over which to select values. By default, the flattened + input array is used. + out : ndarray, optional (Ni..., Nj..., Nk...) + If provided, the result will be placed in this array. It should + be of the appropriate shape and dtype. Note that `out` is always + buffered if `mode='raise'`; use other modes for better performance. + mode : {'raise', 'wrap', 'clip'}, optional + Specifies how out-of-bounds indices will behave. + + * 'raise' -- raise an error (default) + * 'wrap' -- wrap around + * 'clip' -- clip to the range + + 'clip' mode means that all indices that are too large are replaced + by the index that addresses the last element along that axis. Note + that this disables indexing with negative numbers. + + Returns + ------- + out : ndarray (Ni..., Nj..., Nk...) + The returned array has the same type as `a`. + + See Also + -------- + compress : Take elements using a boolean mask + ndarray.take : equivalent method + take_along_axis : Take elements by matching the array and the index arrays + + Notes + ----- + By eliminating the inner loop in the description above, and using `s_` to + build simple slice objects, `take` can be expressed in terms of applying + fancy indexing to each 1-d slice:: + + Ni, Nk = a.shape[:axis], a.shape[axis+1:] + for ii in ndindex(Ni): + for kk in ndindex(Nj): + out[ii + s_[...,] + kk] = a[ii + s_[:,] + kk][indices] + + For this reason, it is equivalent to (but faster than) the following use + of `apply_along_axis`:: + + out = np.apply_along_axis(lambda a_1d: a_1d[indices], axis, a) + + Examples + -------- + >>> import numpy as np + >>> a = [4, 3, 5, 7, 6, 8] + >>> indices = [0, 1, 4] + >>> np.take(a, indices) + array([4, 3, 6]) + + In this example if `a` is an ndarray, "fancy" indexing can be used. + + >>> a = np.array(a) + >>> a[indices] + array([4, 3, 6]) + + If `indices` is not one dimensional, the output also has these dimensions. + + >>> np.take(a, [[0, 1], [2, 3]]) + array([[4, 3], + [5, 7]]) + """ + return _wrapfunc(a, 'take', indices, axis=axis, out=out, mode=mode) + + +def _reshape_dispatcher(a, /, shape=None, order=None, *, newshape=None, + copy=None): + return (a,) + + +@array_function_dispatch(_reshape_dispatcher) +def reshape(a, /, shape=None, order='C', *, newshape=None, copy=None): + """ + Gives a new shape to an array without changing its data. + + Parameters + ---------- + a : array_like + Array to be reshaped. + shape : int or tuple of ints + The new shape should be compatible with the original shape. If + an integer, then the result will be a 1-D array of that length. + One shape dimension can be -1. In this case, the value is + inferred from the length of the array and remaining dimensions. + order : {'C', 'F', 'A'}, optional + Read the elements of ``a`` using this index order, and place the + elements into the reshaped array using this index order. 'C' + means to read / write the elements using C-like index order, + with the last axis index changing fastest, back to the first + axis index changing slowest. 'F' means to read / write the + elements using Fortran-like index order, with the first index + changing fastest, and the last index changing slowest. Note that + the 'C' and 'F' options take no account of the memory layout of + the underlying array, and only refer to the order of indexing. + 'A' means to read / write the elements in Fortran-like index + order if ``a`` is Fortran *contiguous* in memory, C-like order + otherwise. + newshape : int or tuple of ints + .. deprecated:: 2.1 + Replaced by ``shape`` argument. Retained for backward + compatibility. + copy : bool, optional + If ``True``, then the array data is copied. If ``None``, a copy will + only be made if it's required by ``order``. For ``False`` it raises + a ``ValueError`` if a copy cannot be avoided. Default: ``None``. + + Returns + ------- + reshaped_array : ndarray + This will be a new view object if possible; otherwise, it will + be a copy. Note there is no guarantee of the *memory layout* (C- or + Fortran- contiguous) of the returned array. + + See Also + -------- + ndarray.reshape : Equivalent method. + + Notes + ----- + It is not always possible to change the shape of an array without copying + the data. + + The ``order`` keyword gives the index ordering both for *fetching* + the values from ``a``, and then *placing* the values into the output + array. For example, let's say you have an array: + + >>> a = np.arange(6).reshape((3, 2)) + >>> a + array([[0, 1], + [2, 3], + [4, 5]]) + + You can think of reshaping as first raveling the array (using the given + index order), then inserting the elements from the raveled array into the + new array using the same kind of index ordering as was used for the + raveling. + + >>> np.reshape(a, (2, 3)) # C-like index ordering + array([[0, 1, 2], + [3, 4, 5]]) + >>> np.reshape(np.ravel(a), (2, 3)) # equivalent to C ravel then C reshape + array([[0, 1, 2], + [3, 4, 5]]) + >>> np.reshape(a, (2, 3), order='F') # Fortran-like index ordering + array([[0, 4, 3], + [2, 1, 5]]) + >>> np.reshape(np.ravel(a, order='F'), (2, 3), order='F') + array([[0, 4, 3], + [2, 1, 5]]) + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[1,2,3], [4,5,6]]) + >>> np.reshape(a, 6) + array([1, 2, 3, 4, 5, 6]) + >>> np.reshape(a, 6, order='F') + array([1, 4, 2, 5, 3, 6]) + + >>> np.reshape(a, (3,-1)) # the unspecified value is inferred to be 2 + array([[1, 2], + [3, 4], + [5, 6]]) + """ + if newshape is None and shape is None: + raise TypeError( + "reshape() missing 1 required positional argument: 'shape'") + if newshape is not None: + if shape is not None: + raise TypeError( + "You cannot specify 'newshape' and 'shape' arguments " + "at the same time.") + # Deprecated in NumPy 2.1, 2024-04-18 + warnings.warn( + "`newshape` keyword argument is deprecated, " + "use `shape=...` or pass shape positionally instead. " + "(deprecated in NumPy 2.1)", + DeprecationWarning, + stacklevel=2, + ) + shape = newshape + if copy is not None: + return _wrapfunc(a, 'reshape', shape, order=order, copy=copy) + return _wrapfunc(a, 'reshape', shape, order=order) + + +def _choose_dispatcher(a, choices, out=None, mode=None): + yield a + yield from choices + yield out + + +@array_function_dispatch(_choose_dispatcher) +def choose(a, choices, out=None, mode='raise'): + """ + Construct an array from an index array and a list of arrays to choose from. + + First of all, if confused or uncertain, definitely look at the Examples - + in its full generality, this function is less simple than it might + seem from the following code description:: + + np.choose(a,c) == np.array([c[a[I]][I] for I in np.ndindex(a.shape)]) + + But this omits some subtleties. Here is a fully general summary: + + Given an "index" array (`a`) of integers and a sequence of ``n`` arrays + (`choices`), `a` and each choice array are first broadcast, as necessary, + to arrays of a common shape; calling these *Ba* and *Bchoices[i], i = + 0,...,n-1* we have that, necessarily, ``Ba.shape == Bchoices[i].shape`` + for each ``i``. Then, a new array with shape ``Ba.shape`` is created as + follows: + + * if ``mode='raise'`` (the default), then, first of all, each element of + ``a`` (and thus ``Ba``) must be in the range ``[0, n-1]``; now, suppose + that ``i`` (in that range) is the value at the ``(j0, j1, ..., jm)`` + position in ``Ba`` - then the value at the same position in the new array + is the value in ``Bchoices[i]`` at that same position; + + * if ``mode='wrap'``, values in `a` (and thus `Ba`) may be any (signed) + integer; modular arithmetic is used to map integers outside the range + `[0, n-1]` back into that range; and then the new array is constructed + as above; + + * if ``mode='clip'``, values in `a` (and thus ``Ba``) may be any (signed) + integer; negative integers are mapped to 0; values greater than ``n-1`` + are mapped to ``n-1``; and then the new array is constructed as above. + + Parameters + ---------- + a : int array + This array must contain integers in ``[0, n-1]``, where ``n`` is the + number of choices, unless ``mode=wrap`` or ``mode=clip``, in which + cases any integers are permissible. + choices : sequence of arrays + Choice arrays. `a` and all of the choices must be broadcastable to the + same shape. If `choices` is itself an array (not recommended), then + its outermost dimension (i.e., the one corresponding to + ``choices.shape[0]``) is taken as defining the "sequence". + out : array, optional + If provided, the result will be inserted into this array. It should + be of the appropriate shape and dtype. Note that `out` is always + buffered if ``mode='raise'``; use other modes for better performance. + mode : {'raise' (default), 'wrap', 'clip'}, optional + Specifies how indices outside ``[0, n-1]`` will be treated: + + * 'raise' : an exception is raised + * 'wrap' : value becomes value mod ``n`` + * 'clip' : values < 0 are mapped to 0, values > n-1 are mapped to n-1 + + Returns + ------- + merged_array : array + The merged result. + + Raises + ------ + ValueError: shape mismatch + If `a` and each choice array are not all broadcastable to the same + shape. + + See Also + -------- + ndarray.choose : equivalent method + numpy.take_along_axis : Preferable if `choices` is an array + + Notes + ----- + To reduce the chance of misinterpretation, even though the following + "abuse" is nominally supported, `choices` should neither be, nor be + thought of as, a single array, i.e., the outermost sequence-like container + should be either a list or a tuple. + + Examples + -------- + + >>> import numpy as np + >>> choices = [[0, 1, 2, 3], [10, 11, 12, 13], + ... [20, 21, 22, 23], [30, 31, 32, 33]] + >>> np.choose([2, 3, 1, 0], choices + ... # the first element of the result will be the first element of the + ... # third (2+1) "array" in choices, namely, 20; the second element + ... # will be the second element of the fourth (3+1) choice array, i.e., + ... # 31, etc. + ... ) + array([20, 31, 12, 3]) + >>> np.choose([2, 4, 1, 0], choices, mode='clip') # 4 goes to 3 (4-1) + array([20, 31, 12, 3]) + >>> # because there are 4 choice arrays + >>> np.choose([2, 4, 1, 0], choices, mode='wrap') # 4 goes to (4 mod 4) + array([20, 1, 12, 3]) + >>> # i.e., 0 + + A couple examples illustrating how choose broadcasts: + + >>> a = [[1, 0, 1], [0, 1, 0], [1, 0, 1]] + >>> choices = [-10, 10] + >>> np.choose(a, choices) + array([[ 10, -10, 10], + [-10, 10, -10], + [ 10, -10, 10]]) + + >>> # With thanks to Anne Archibald + >>> a = np.array([0, 1]).reshape((2,1,1)) + >>> c1 = np.array([1, 2, 3]).reshape((1,3,1)) + >>> c2 = np.array([-1, -2, -3, -4, -5]).reshape((1,1,5)) + >>> np.choose(a, (c1, c2)) # result is 2x3x5, res[0,:,:]=c1, res[1,:,:]=c2 + array([[[ 1, 1, 1, 1, 1], + [ 2, 2, 2, 2, 2], + [ 3, 3, 3, 3, 3]], + [[-1, -2, -3, -4, -5], + [-1, -2, -3, -4, -5], + [-1, -2, -3, -4, -5]]]) + + """ + return _wrapfunc(a, 'choose', choices, out=out, mode=mode) + + +def _repeat_dispatcher(a, repeats, axis=None): + return (a,) + + +@array_function_dispatch(_repeat_dispatcher) +def repeat(a, repeats, axis=None): + """ + Repeat each element of an array after themselves + + Parameters + ---------- + a : array_like + Input array. + repeats : int or array of ints + The number of repetitions for each element. `repeats` is broadcasted + to fit the shape of the given axis. + axis : int, optional + The axis along which to repeat values. By default, use the + flattened input array, and return a flat output array. + + Returns + ------- + repeated_array : ndarray + Output array which has the same shape as `a`, except along + the given axis. + + See Also + -------- + tile : Tile an array. + unique : Find the unique elements of an array. + + Examples + -------- + >>> import numpy as np + >>> np.repeat(3, 4) + array([3, 3, 3, 3]) + >>> x = np.array([[1,2],[3,4]]) + >>> np.repeat(x, 2) + array([1, 1, 2, 2, 3, 3, 4, 4]) + >>> np.repeat(x, 3, axis=1) + array([[1, 1, 1, 2, 2, 2], + [3, 3, 3, 4, 4, 4]]) + >>> np.repeat(x, [1, 2], axis=0) + array([[1, 2], + [3, 4], + [3, 4]]) + + """ + return _wrapfunc(a, 'repeat', repeats, axis=axis) + + +def _put_dispatcher(a, ind, v, mode=None): + return (a, ind, v) + + +@array_function_dispatch(_put_dispatcher) +def put(a, ind, v, mode='raise'): + """ + Replaces specified elements of an array with given values. + + The indexing works on the flattened target array. `put` is roughly + equivalent to: + + :: + + a.flat[ind] = v + + Parameters + ---------- + a : ndarray + Target array. + ind : array_like + Target indices, interpreted as integers. + v : array_like + Values to place in `a` at target indices. If `v` is shorter than + `ind` it will be repeated as necessary. + mode : {'raise', 'wrap', 'clip'}, optional + Specifies how out-of-bounds indices will behave. + + * 'raise' -- raise an error (default) + * 'wrap' -- wrap around + * 'clip' -- clip to the range + + 'clip' mode means that all indices that are too large are replaced + by the index that addresses the last element along that axis. Note + that this disables indexing with negative numbers. In 'raise' mode, + if an exception occurs the target array may still be modified. + + See Also + -------- + putmask, place + put_along_axis : Put elements by matching the array and the index arrays + + Examples + -------- + >>> import numpy as np + >>> a = np.arange(5) + >>> np.put(a, [0, 2], [-44, -55]) + >>> a + array([-44, 1, -55, 3, 4]) + + >>> a = np.arange(5) + >>> np.put(a, 22, -5, mode='clip') + >>> a + array([ 0, 1, 2, 3, -5]) + + """ + try: + put = a.put + except AttributeError as e: + raise TypeError(f"argument 1 must be numpy.ndarray, not {type(a)}") from e + + return put(ind, v, mode=mode) + + +def _swapaxes_dispatcher(a, axis1, axis2): + return (a,) + + +@array_function_dispatch(_swapaxes_dispatcher) +def swapaxes(a, axis1, axis2): + """ + Interchange two axes of an array. + + Parameters + ---------- + a : array_like + Input array. + axis1 : int + First axis. + axis2 : int + Second axis. + + Returns + ------- + a_swapped : ndarray + For NumPy >= 1.10.0, if `a` is an ndarray, then a view of `a` is + returned; otherwise a new array is created. For earlier NumPy + versions a view of `a` is returned only if the order of the + axes is changed, otherwise the input array is returned. + + Examples + -------- + >>> import numpy as np + >>> x = np.array([[1,2,3]]) + >>> np.swapaxes(x,0,1) + array([[1], + [2], + [3]]) + + >>> x = np.array([[[0,1],[2,3]],[[4,5],[6,7]]]) + >>> x + array([[[0, 1], + [2, 3]], + [[4, 5], + [6, 7]]]) + + >>> np.swapaxes(x,0,2) + array([[[0, 4], + [2, 6]], + [[1, 5], + [3, 7]]]) + + """ + return _wrapfunc(a, 'swapaxes', axis1, axis2) + + +def _transpose_dispatcher(a, axes=None): + return (a,) + + +@array_function_dispatch(_transpose_dispatcher) +def transpose(a, axes=None): + """ + Returns an array with axes transposed. + + For a 1-D array, this returns an unchanged view of the original array, as a + transposed vector is simply the same vector. + To convert a 1-D array into a 2-D column vector, an additional dimension + must be added, e.g., ``np.atleast_2d(a).T`` achieves this, as does + ``a[:, np.newaxis]``. + For a 2-D array, this is the standard matrix transpose. + For an n-D array, if axes are given, their order indicates how the + axes are permuted (see Examples). If axes are not provided, then + ``transpose(a).shape == a.shape[::-1]``. + + Parameters + ---------- + a : array_like + Input array. + axes : tuple or list of ints, optional + If specified, it must be a tuple or list which contains a permutation + of [0, 1, ..., N-1] where N is the number of axes of `a`. Negative + indices can also be used to specify axes. The i-th axis of the returned + array will correspond to the axis numbered ``axes[i]`` of the input. + If not specified, defaults to ``range(a.ndim)[::-1]``, which reverses + the order of the axes. + + Returns + ------- + p : ndarray + `a` with its axes permuted. A view is returned whenever possible. + + See Also + -------- + ndarray.transpose : Equivalent method. + moveaxis : Move axes of an array to new positions. + argsort : Return the indices that would sort an array. + + Notes + ----- + Use ``transpose(a, argsort(axes))`` to invert the transposition of tensors + when using the `axes` keyword argument. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[1, 2], [3, 4]]) + >>> a + array([[1, 2], + [3, 4]]) + >>> np.transpose(a) + array([[1, 3], + [2, 4]]) + + >>> a = np.array([1, 2, 3, 4]) + >>> a + array([1, 2, 3, 4]) + >>> np.transpose(a) + array([1, 2, 3, 4]) + + >>> a = np.ones((1, 2, 3)) + >>> np.transpose(a, (1, 0, 2)).shape + (2, 1, 3) + + >>> a = np.ones((2, 3, 4, 5)) + >>> np.transpose(a).shape + (5, 4, 3, 2) + + >>> a = np.arange(3*4*5).reshape((3, 4, 5)) + >>> np.transpose(a, (-1, 0, -2)).shape + (5, 3, 4) + + """ + return _wrapfunc(a, 'transpose', axes) + + +def _matrix_transpose_dispatcher(x): + return (x,) + +@array_function_dispatch(_matrix_transpose_dispatcher) +def matrix_transpose(x, /): + """ + Transposes a matrix (or a stack of matrices) ``x``. + + This function is Array API compatible. + + Parameters + ---------- + x : array_like + Input array having shape (..., M, N) and whose two innermost + dimensions form ``MxN`` matrices. + + Returns + ------- + out : ndarray + An array containing the transpose for each matrix and having shape + (..., N, M). + + See Also + -------- + transpose : Generic transpose method. + + Examples + -------- + >>> import numpy as np + >>> np.matrix_transpose([[1, 2], [3, 4]]) + array([[1, 3], + [2, 4]]) + + >>> np.matrix_transpose([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]) + array([[[1, 3], + [2, 4]], + [[5, 7], + [6, 8]]]) + + """ + x = asanyarray(x) + if x.ndim < 2: + raise ValueError( + f"Input array must be at least 2-dimensional, but it is {x.ndim}" + ) + return swapaxes(x, -1, -2) + + +def _partition_dispatcher(a, kth, axis=None, kind=None, order=None): + return (a,) + + +@array_function_dispatch(_partition_dispatcher) +def partition(a, kth, axis=-1, kind='introselect', order=None): + """ + Return a partitioned copy of an array. + + Creates a copy of the array and partially sorts it in such a way that + the value of the element in k-th position is in the position it would be + in a sorted array. In the output array, all elements smaller than the k-th + element are located to the left of this element and all equal or greater + are located to its right. The ordering of the elements in the two + partitions on the either side of the k-th element in the output array is + undefined. + + Parameters + ---------- + a : array_like + Array to be sorted. + kth : int or sequence of ints + Element index to partition by. The k-th value of the element + will be in its final sorted position and all smaller elements + will be moved before it and all equal or greater elements behind + it. The order of all elements in the partitions is undefined. If + provided with a sequence of k-th it will partition all elements + indexed by k-th of them into their sorted position at once. + + .. deprecated:: 1.22.0 + Passing booleans as index is deprecated. + axis : int or None, optional + Axis along which to sort. If None, the array is flattened before + sorting. The default is -1, which sorts along the last axis. + kind : {'introselect'}, optional + Selection algorithm. Default is 'introselect'. + order : str or list of str, optional + When `a` is an array with fields defined, this argument + specifies which fields to compare first, second, etc. A single + field can be specified as a string. Not all fields need be + specified, but unspecified fields will still be used, in the + order in which they come up in the dtype, to break ties. + + Returns + ------- + partitioned_array : ndarray + Array of the same type and shape as `a`. + + See Also + -------- + ndarray.partition : Method to sort an array in-place. + argpartition : Indirect partition. + sort : Full sorting + + Notes + ----- + The various selection algorithms are characterized by their average + speed, worst case performance, work space size, and whether they are + stable. A stable sort keeps items with the same key in the same + relative order. The available algorithms have the following + properties: + + ================= ======= ============= ============ ======= + kind speed worst case work space stable + ================= ======= ============= ============ ======= + 'introselect' 1 O(n) 0 no + ================= ======= ============= ============ ======= + + All the partition algorithms make temporary copies of the data when + partitioning along any but the last axis. Consequently, + partitioning along the last axis is faster and uses less space than + partitioning along any other axis. + + The sort order for complex numbers is lexicographic. If both the + real and imaginary parts are non-nan then the order is determined by + the real parts except when they are equal, in which case the order + is determined by the imaginary parts. + + The sort order of ``np.nan`` is bigger than ``np.inf``. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([7, 1, 7, 7, 1, 5, 7, 2, 3, 2, 6, 2, 3, 0]) + >>> p = np.partition(a, 4) + >>> p + array([0, 1, 2, 1, 2, 5, 2, 3, 3, 6, 7, 7, 7, 7]) # may vary + + ``p[4]`` is 2; all elements in ``p[:4]`` are less than or equal + to ``p[4]``, and all elements in ``p[5:]`` are greater than or + equal to ``p[4]``. The partition is:: + + [0, 1, 2, 1], [2], [5, 2, 3, 3, 6, 7, 7, 7, 7] + + The next example shows the use of multiple values passed to `kth`. + + >>> p2 = np.partition(a, (4, 8)) + >>> p2 + array([0, 1, 2, 1, 2, 3, 3, 2, 5, 6, 7, 7, 7, 7]) + + ``p2[4]`` is 2 and ``p2[8]`` is 5. All elements in ``p2[:4]`` + are less than or equal to ``p2[4]``, all elements in ``p2[5:8]`` + are greater than or equal to ``p2[4]`` and less than or equal to + ``p2[8]``, and all elements in ``p2[9:]`` are greater than or + equal to ``p2[8]``. The partition is:: + + [0, 1, 2, 1], [2], [3, 3, 2], [5], [6, 7, 7, 7, 7] + """ + if axis is None: + # flatten returns (1, N) for np.matrix, so always use the last axis + a = asanyarray(a).flatten() + axis = -1 + else: + a = asanyarray(a).copy(order="K") + a.partition(kth, axis=axis, kind=kind, order=order) + return a + + +def _argpartition_dispatcher(a, kth, axis=None, kind=None, order=None): + return (a,) + + +@array_function_dispatch(_argpartition_dispatcher) +def argpartition(a, kth, axis=-1, kind='introselect', order=None): + """ + Perform an indirect partition along the given axis using the + algorithm specified by the `kind` keyword. It returns an array of + indices of the same shape as `a` that index data along the given + axis in partitioned order. + + Parameters + ---------- + a : array_like + Array to sort. + kth : int or sequence of ints + Element index to partition by. The k-th element will be in its + final sorted position and all smaller elements will be moved + before it and all larger elements behind it. The order of all + elements in the partitions is undefined. If provided with a + sequence of k-th it will partition all of them into their sorted + position at once. + + .. deprecated:: 1.22.0 + Passing booleans as index is deprecated. + axis : int or None, optional + Axis along which to sort. The default is -1 (the last axis). If + None, the flattened array is used. + kind : {'introselect'}, optional + Selection algorithm. Default is 'introselect' + order : str or list of str, optional + When `a` is an array with fields defined, this argument + specifies which fields to compare first, second, etc. A single + field can be specified as a string, and not all fields need be + specified, but unspecified fields will still be used, in the + order in which they come up in the dtype, to break ties. + + Returns + ------- + index_array : ndarray, int + Array of indices that partition `a` along the specified axis. + If `a` is one-dimensional, ``a[index_array]`` yields a partitioned `a`. + More generally, ``np.take_along_axis(a, index_array, axis=axis)`` + always yields the partitioned `a`, irrespective of dimensionality. + + See Also + -------- + partition : Describes partition algorithms used. + ndarray.partition : Inplace partition. + argsort : Full indirect sort. + take_along_axis : Apply ``index_array`` from argpartition + to an array as if by calling partition. + + Notes + ----- + The returned indices are not guaranteed to be sorted according to + the values. Furthermore, the default selection algorithm ``introselect`` + is unstable, and hence the returned indices are not guaranteed + to be the earliest/latest occurrence of the element. + + `argpartition` works for real/complex inputs with nan values, + see `partition` for notes on the enhanced sort order and + different selection algorithms. + + Examples + -------- + One dimensional array: + + >>> import numpy as np + >>> x = np.array([3, 4, 2, 1]) + >>> x[np.argpartition(x, 3)] + array([2, 1, 3, 4]) # may vary + >>> x[np.argpartition(x, (1, 3))] + array([1, 2, 3, 4]) # may vary + + >>> x = [3, 4, 2, 1] + >>> np.array(x)[np.argpartition(x, 3)] + array([2, 1, 3, 4]) # may vary + + Multi-dimensional array: + + >>> x = np.array([[3, 4, 2], [1, 3, 1]]) + >>> index_array = np.argpartition(x, kth=1, axis=-1) + >>> # below is the same as np.partition(x, kth=1) + >>> np.take_along_axis(x, index_array, axis=-1) + array([[2, 3, 4], + [1, 1, 3]]) + + """ + return _wrapfunc(a, 'argpartition', kth, axis=axis, kind=kind, order=order) + + +def _sort_dispatcher(a, axis=None, kind=None, order=None, *, stable=None): + return (a,) + + +@array_function_dispatch(_sort_dispatcher) +def sort(a, axis=-1, kind=None, order=None, *, stable=None): + """ + Return a sorted copy of an array. + + Parameters + ---------- + a : array_like + Array to be sorted. + axis : int or None, optional + Axis along which to sort. If None, the array is flattened before + sorting. The default is -1, which sorts along the last axis. + kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional + Sorting algorithm. The default is 'quicksort'. Note that both 'stable' + and 'mergesort' use timsort or radix sort under the covers and, + in general, the actual implementation will vary with data type. + The 'mergesort' option is retained for backwards compatibility. + order : str or list of str, optional + When `a` is an array with fields defined, this argument specifies + which fields to compare first, second, etc. A single field can + be specified as a string, and not all fields need be specified, + but unspecified fields will still be used, in the order in which + they come up in the dtype, to break ties. + stable : bool, optional + Sort stability. If ``True``, the returned array will maintain + the relative order of ``a`` values which compare as equal. + If ``False`` or ``None``, this is not guaranteed. Internally, + this option selects ``kind='stable'``. Default: ``None``. + + .. versionadded:: 2.0.0 + + Returns + ------- + sorted_array : ndarray + Array of the same type and shape as `a`. + + See Also + -------- + ndarray.sort : Method to sort an array in-place. + argsort : Indirect sort. + lexsort : Indirect stable sort on multiple keys. + searchsorted : Find elements in a sorted array. + partition : Partial sort. + + Notes + ----- + The various sorting algorithms are characterized by their average speed, + worst case performance, work space size, and whether they are stable. A + stable sort keeps items with the same key in the same relative + order. The four algorithms implemented in NumPy have the following + properties: + + =========== ======= ============= ============ ======== + kind speed worst case work space stable + =========== ======= ============= ============ ======== + 'quicksort' 1 O(n^2) 0 no + 'heapsort' 3 O(n*log(n)) 0 no + 'mergesort' 2 O(n*log(n)) ~n/2 yes + 'timsort' 2 O(n*log(n)) ~n/2 yes + =========== ======= ============= ============ ======== + + .. note:: The datatype determines which of 'mergesort' or 'timsort' + is actually used, even if 'mergesort' is specified. User selection + at a finer scale is not currently available. + + For performance, ``sort`` makes a temporary copy if needed to make the data + `contiguous `_ + in memory along the sort axis. For even better performance and reduced + memory consumption, ensure that the array is already contiguous along the + sort axis. + + The sort order for complex numbers is lexicographic. If both the real + and imaginary parts are non-nan then the order is determined by the + real parts except when they are equal, in which case the order is + determined by the imaginary parts. + + Previous to numpy 1.4.0 sorting real and complex arrays containing nan + values led to undefined behaviour. In numpy versions >= 1.4.0 nan + values are sorted to the end. The extended sort order is: + + * Real: [R, nan] + * Complex: [R + Rj, R + nanj, nan + Rj, nan + nanj] + + where R is a non-nan real value. Complex values with the same nan + placements are sorted according to the non-nan part if it exists. + Non-nan values are sorted as before. + + quicksort has been changed to: + `introsort `_. + When sorting does not make enough progress it switches to + `heapsort `_. + This implementation makes quicksort O(n*log(n)) in the worst case. + + 'stable' automatically chooses the best stable sorting algorithm + for the data type being sorted. + It, along with 'mergesort' is currently mapped to + `timsort `_ + or `radix sort `_ + depending on the data type. + API forward compatibility currently limits the + ability to select the implementation and it is hardwired for the different + data types. + + Timsort is added for better performance on already or nearly + sorted data. On random data timsort is almost identical to + mergesort. It is now used for stable sort while quicksort is still the + default sort if none is chosen. For timsort details, refer to + `CPython listsort.txt + `_ + 'mergesort' and 'stable' are mapped to radix sort for integer data types. + Radix sort is an O(n) sort instead of O(n log n). + + NaT now sorts to the end of arrays for consistency with NaN. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[1,4],[3,1]]) + >>> np.sort(a) # sort along the last axis + array([[1, 4], + [1, 3]]) + >>> np.sort(a, axis=None) # sort the flattened array + array([1, 1, 3, 4]) + >>> np.sort(a, axis=0) # sort along the first axis + array([[1, 1], + [3, 4]]) + + Use the `order` keyword to specify a field to use when sorting a + structured array: + + >>> dtype = [('name', 'S10'), ('height', float), ('age', int)] + >>> values = [('Arthur', 1.8, 41), ('Lancelot', 1.9, 38), + ... ('Galahad', 1.7, 38)] + >>> a = np.array(values, dtype=dtype) # create a structured array + >>> np.sort(a, order='height') # doctest: +SKIP + array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41), + ('Lancelot', 1.8999999999999999, 38)], + dtype=[('name', '|S10'), ('height', '>> np.sort(a, order=['age', 'height']) # doctest: +SKIP + array([('Galahad', 1.7, 38), ('Lancelot', 1.8999999999999999, 38), + ('Arthur', 1.8, 41)], + dtype=[('name', '|S10'), ('height', '>> import numpy as np + >>> x = np.array([3, 1, 2]) + >>> np.argsort(x) + array([1, 2, 0]) + + Two-dimensional array: + + >>> x = np.array([[0, 3], [2, 2]]) + >>> x + array([[0, 3], + [2, 2]]) + + >>> ind = np.argsort(x, axis=0) # sorts along first axis (down) + >>> ind + array([[0, 1], + [1, 0]]) + >>> np.take_along_axis(x, ind, axis=0) # same as np.sort(x, axis=0) + array([[0, 2], + [2, 3]]) + + >>> ind = np.argsort(x, axis=1) # sorts along last axis (across) + >>> ind + array([[0, 1], + [0, 1]]) + >>> np.take_along_axis(x, ind, axis=1) # same as np.sort(x, axis=1) + array([[0, 3], + [2, 2]]) + + Indices of the sorted elements of a N-dimensional array: + + >>> ind = np.unravel_index(np.argsort(x, axis=None), x.shape) + >>> ind + (array([0, 1, 1, 0]), array([0, 0, 1, 1])) + >>> x[ind] # same as np.sort(x, axis=None) + array([0, 2, 2, 3]) + + Sorting with keys: + + >>> x = np.array([(1, 0), (0, 1)], dtype=[('x', '>> x + array([(1, 0), (0, 1)], + dtype=[('x', '>> np.argsort(x, order=('x','y')) + array([1, 0]) + + >>> np.argsort(x, order=('y','x')) + array([0, 1]) + + """ + return _wrapfunc( + a, 'argsort', axis=axis, kind=kind, order=order, stable=stable + ) + +def _argmax_dispatcher(a, axis=None, out=None, *, keepdims=np._NoValue): + return (a, out) + + +@array_function_dispatch(_argmax_dispatcher) +def argmax(a, axis=None, out=None, *, keepdims=np._NoValue): + """ + Returns the indices of the maximum values along an axis. + + Parameters + ---------- + a : array_like + Input array. + axis : int, optional + By default, the index is into the flattened array, otherwise + along the specified axis. + out : array, optional + If provided, the result will be inserted into this array. It should + be of the appropriate shape and dtype. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the array. + + .. versionadded:: 1.22.0 + + Returns + ------- + index_array : ndarray of ints + Array of indices into the array. It has the same shape as ``a.shape`` + with the dimension along `axis` removed. If `keepdims` is set to True, + then the size of `axis` will be 1 with the resulting array having same + shape as ``a.shape``. + + See Also + -------- + ndarray.argmax, argmin + amax : The maximum value along a given axis. + unravel_index : Convert a flat index into an index tuple. + take_along_axis : Apply ``np.expand_dims(index_array, axis)`` + from argmax to an array as if by calling max. + + Notes + ----- + In case of multiple occurrences of the maximum values, the indices + corresponding to the first occurrence are returned. + + Examples + -------- + >>> import numpy as np + >>> a = np.arange(6).reshape(2,3) + 10 + >>> a + array([[10, 11, 12], + [13, 14, 15]]) + >>> np.argmax(a) + 5 + >>> np.argmax(a, axis=0) + array([1, 1, 1]) + >>> np.argmax(a, axis=1) + array([2, 2]) + + Indexes of the maximal elements of a N-dimensional array: + + >>> ind = np.unravel_index(np.argmax(a, axis=None), a.shape) + >>> ind + (1, 2) + >>> a[ind] + 15 + + >>> b = np.arange(6) + >>> b[1] = 5 + >>> b + array([0, 5, 2, 3, 4, 5]) + >>> np.argmax(b) # Only the first occurrence is returned. + 1 + + >>> x = np.array([[4,2,3], [1,0,3]]) + >>> index_array = np.argmax(x, axis=-1) + >>> # Same as np.amax(x, axis=-1, keepdims=True) + >>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1) + array([[4], + [3]]) + >>> # Same as np.amax(x, axis=-1) + >>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), + ... axis=-1).squeeze(axis=-1) + array([4, 3]) + + Setting `keepdims` to `True`, + + >>> x = np.arange(24).reshape((2, 3, 4)) + >>> res = np.argmax(x, axis=1, keepdims=True) + >>> res.shape + (2, 1, 4) + """ + kwds = {'keepdims': keepdims} if keepdims is not np._NoValue else {} + return _wrapfunc(a, 'argmax', axis=axis, out=out, **kwds) + + +def _argmin_dispatcher(a, axis=None, out=None, *, keepdims=np._NoValue): + return (a, out) + + +@array_function_dispatch(_argmin_dispatcher) +def argmin(a, axis=None, out=None, *, keepdims=np._NoValue): + """ + Returns the indices of the minimum values along an axis. + + Parameters + ---------- + a : array_like + Input array. + axis : int, optional + By default, the index is into the flattened array, otherwise + along the specified axis. + out : array, optional + If provided, the result will be inserted into this array. It should + be of the appropriate shape and dtype. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the array. + + .. versionadded:: 1.22.0 + + Returns + ------- + index_array : ndarray of ints + Array of indices into the array. It has the same shape as `a.shape` + with the dimension along `axis` removed. If `keepdims` is set to True, + then the size of `axis` will be 1 with the resulting array having same + shape as `a.shape`. + + See Also + -------- + ndarray.argmin, argmax + amin : The minimum value along a given axis. + unravel_index : Convert a flat index into an index tuple. + take_along_axis : Apply ``np.expand_dims(index_array, axis)`` + from argmin to an array as if by calling min. + + Notes + ----- + In case of multiple occurrences of the minimum values, the indices + corresponding to the first occurrence are returned. + + Examples + -------- + >>> import numpy as np + >>> a = np.arange(6).reshape(2,3) + 10 + >>> a + array([[10, 11, 12], + [13, 14, 15]]) + >>> np.argmin(a) + 0 + >>> np.argmin(a, axis=0) + array([0, 0, 0]) + >>> np.argmin(a, axis=1) + array([0, 0]) + + Indices of the minimum elements of a N-dimensional array: + + >>> ind = np.unravel_index(np.argmin(a, axis=None), a.shape) + >>> ind + (0, 0) + >>> a[ind] + 10 + + >>> b = np.arange(6) + 10 + >>> b[4] = 10 + >>> b + array([10, 11, 12, 13, 10, 15]) + >>> np.argmin(b) # Only the first occurrence is returned. + 0 + + >>> x = np.array([[4,2,3], [1,0,3]]) + >>> index_array = np.argmin(x, axis=-1) + >>> # Same as np.amin(x, axis=-1, keepdims=True) + >>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1) + array([[2], + [0]]) + >>> # Same as np.amax(x, axis=-1) + >>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), + ... axis=-1).squeeze(axis=-1) + array([2, 0]) + + Setting `keepdims` to `True`, + + >>> x = np.arange(24).reshape((2, 3, 4)) + >>> res = np.argmin(x, axis=1, keepdims=True) + >>> res.shape + (2, 1, 4) + """ + kwds = {'keepdims': keepdims} if keepdims is not np._NoValue else {} + return _wrapfunc(a, 'argmin', axis=axis, out=out, **kwds) + + +def _searchsorted_dispatcher(a, v, side=None, sorter=None): + return (a, v, sorter) + + +@array_function_dispatch(_searchsorted_dispatcher) +def searchsorted(a, v, side='left', sorter=None): + """ + Find indices where elements should be inserted to maintain order. + + Find the indices into a sorted array `a` such that, if the + corresponding elements in `v` were inserted before the indices, the + order of `a` would be preserved. + + Assuming that `a` is sorted: + + ====== ============================ + `side` returned index `i` satisfies + ====== ============================ + left ``a[i-1] < v <= a[i]`` + right ``a[i-1] <= v < a[i]`` + ====== ============================ + + Parameters + ---------- + a : 1-D array_like + Input array. If `sorter` is None, then it must be sorted in + ascending order, otherwise `sorter` must be an array of indices + that sort it. + v : array_like + Values to insert into `a`. + side : {'left', 'right'}, optional + If 'left', the index of the first suitable location found is given. + If 'right', return the last such index. If there is no suitable + index, return either 0 or N (where N is the length of `a`). + sorter : 1-D array_like, optional + Optional array of integer indices that sort array a into ascending + order. They are typically the result of argsort. + + Returns + ------- + indices : int or array of ints + Array of insertion points with the same shape as `v`, + or an integer if `v` is a scalar. + + See Also + -------- + sort : Return a sorted copy of an array. + histogram : Produce histogram from 1-D data. + + Notes + ----- + Binary search is used to find the required insertion points. + + As of NumPy 1.4.0 `searchsorted` works with real/complex arrays containing + `nan` values. The enhanced sort order is documented in `sort`. + + This function uses the same algorithm as the builtin python + `bisect.bisect_left` (``side='left'``) and `bisect.bisect_right` + (``side='right'``) functions, which is also vectorized + in the `v` argument. + + Examples + -------- + >>> import numpy as np + >>> np.searchsorted([11,12,13,14,15], 13) + 2 + >>> np.searchsorted([11,12,13,14,15], 13, side='right') + 3 + >>> np.searchsorted([11,12,13,14,15], [-10, 20, 12, 13]) + array([0, 5, 1, 2]) + + When `sorter` is used, the returned indices refer to the sorted + array of `a` and not `a` itself: + + >>> a = np.array([40, 10, 20, 30]) + >>> sorter = np.argsort(a) + >>> sorter + array([1, 2, 3, 0]) # Indices that would sort the array 'a' + >>> result = np.searchsorted(a, 25, sorter=sorter) + >>> result + 2 + >>> a[sorter[result]] + 30 # The element at index 2 of the sorted array is 30. + """ + return _wrapfunc(a, 'searchsorted', v, side=side, sorter=sorter) + + +def _resize_dispatcher(a, new_shape): + return (a,) + + +@array_function_dispatch(_resize_dispatcher) +def resize(a, new_shape): + """ + Return a new array with the specified shape. + + If the new array is larger than the original array, then the new + array is filled with repeated copies of `a`. Note that this behavior + is different from a.resize(new_shape) which fills with zeros instead + of repeated copies of `a`. + + Parameters + ---------- + a : array_like + Array to be resized. + + new_shape : int or tuple of int + Shape of resized array. + + Returns + ------- + reshaped_array : ndarray + The new array is formed from the data in the old array, repeated + if necessary to fill out the required number of elements. The + data are repeated iterating over the array in C-order. + + See Also + -------- + numpy.reshape : Reshape an array without changing the total size. + numpy.pad : Enlarge and pad an array. + numpy.repeat : Repeat elements of an array. + ndarray.resize : resize an array in-place. + + Notes + ----- + When the total size of the array does not change `~numpy.reshape` should + be used. In most other cases either indexing (to reduce the size) + or padding (to increase the size) may be a more appropriate solution. + + Warning: This functionality does **not** consider axes separately, + i.e. it does not apply interpolation/extrapolation. + It fills the return array with the required number of elements, iterating + over `a` in C-order, disregarding axes (and cycling back from the start if + the new shape is larger). This functionality is therefore not suitable to + resize images, or data where each axis represents a separate and distinct + entity. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[0,1],[2,3]]) + >>> np.resize(a,(2,3)) + array([[0, 1, 2], + [3, 0, 1]]) + >>> np.resize(a,(1,4)) + array([[0, 1, 2, 3]]) + >>> np.resize(a,(2,4)) + array([[0, 1, 2, 3], + [0, 1, 2, 3]]) + + """ + if isinstance(new_shape, (int, nt.integer)): + new_shape = (new_shape,) + + a = ravel(a) + + new_size = 1 + for dim_length in new_shape: + new_size *= dim_length + if dim_length < 0: + raise ValueError( + 'all elements of `new_shape` must be non-negative' + ) + + if a.size == 0 or new_size == 0: + # First case must zero fill. The second would have repeats == 0. + return np.zeros_like(a, shape=new_shape) + + # ceiling division without negating new_size + repeats = (new_size + a.size - 1) // a.size + a = concatenate((a,) * repeats)[:new_size] + + return reshape(a, new_shape) + + +def _squeeze_dispatcher(a, axis=None): + return (a,) + + +@array_function_dispatch(_squeeze_dispatcher) +def squeeze(a, axis=None): + """ + Remove axes of length one from `a`. + + Parameters + ---------- + a : array_like + Input data. + axis : None or int or tuple of ints, optional + Selects a subset of the entries of length one in the + shape. If an axis is selected with shape entry greater than + one, an error is raised. + + Returns + ------- + squeezed : ndarray + The input array, but with all or a subset of the + dimensions of length 1 removed. This is always `a` itself + or a view into `a`. Note that if all axes are squeezed, + the result is a 0d array and not a scalar. + + Raises + ------ + ValueError + If `axis` is not None, and an axis being squeezed is not of length 1 + + See Also + -------- + expand_dims : The inverse operation, adding entries of length one + reshape : Insert, remove, and combine dimensions, and resize existing ones + + Examples + -------- + >>> import numpy as np + >>> x = np.array([[[0], [1], [2]]]) + >>> x.shape + (1, 3, 1) + >>> np.squeeze(x).shape + (3,) + >>> np.squeeze(x, axis=0).shape + (3, 1) + >>> np.squeeze(x, axis=1).shape + Traceback (most recent call last): + ... + ValueError: cannot select an axis to squeeze out which has size + not equal to one + >>> np.squeeze(x, axis=2).shape + (1, 3) + >>> x = np.array([[1234]]) + >>> x.shape + (1, 1) + >>> np.squeeze(x) + array(1234) # 0d array + >>> np.squeeze(x).shape + () + >>> np.squeeze(x)[()] + 1234 + + """ + try: + squeeze = a.squeeze + except AttributeError: + return _wrapit(a, 'squeeze', axis=axis) + if axis is None: + return squeeze() + else: + return squeeze(axis=axis) + + +def _diagonal_dispatcher(a, offset=None, axis1=None, axis2=None): + return (a,) + + +@array_function_dispatch(_diagonal_dispatcher) +def diagonal(a, offset=0, axis1=0, axis2=1): + """ + Return specified diagonals. + + If `a` is 2-D, returns the diagonal of `a` with the given offset, + i.e., the collection of elements of the form ``a[i, i+offset]``. If + `a` has more than two dimensions, then the axes specified by `axis1` + and `axis2` are used to determine the 2-D sub-array whose diagonal is + returned. The shape of the resulting array can be determined by + removing `axis1` and `axis2` and appending an index to the right equal + to the size of the resulting diagonals. + + In versions of NumPy prior to 1.7, this function always returned a new, + independent array containing a copy of the values in the diagonal. + + In NumPy 1.7 and 1.8, it continues to return a copy of the diagonal, + but depending on this fact is deprecated. Writing to the resulting + array continues to work as it used to, but a FutureWarning is issued. + + Starting in NumPy 1.9 it returns a read-only view on the original array. + Attempting to write to the resulting array will produce an error. + + In some future release, it will return a read/write view and writing to + the returned array will alter your original array. The returned array + will have the same type as the input array. + + If you don't write to the array returned by this function, then you can + just ignore all of the above. + + If you depend on the current behavior, then we suggest copying the + returned array explicitly, i.e., use ``np.diagonal(a).copy()`` instead + of just ``np.diagonal(a)``. This will work with both past and future + versions of NumPy. + + Parameters + ---------- + a : array_like + Array from which the diagonals are taken. + offset : int, optional + Offset of the diagonal from the main diagonal. Can be positive or + negative. Defaults to main diagonal (0). + axis1 : int, optional + Axis to be used as the first axis of the 2-D sub-arrays from which + the diagonals should be taken. Defaults to first axis (0). + axis2 : int, optional + Axis to be used as the second axis of the 2-D sub-arrays from + which the diagonals should be taken. Defaults to second axis (1). + + Returns + ------- + array_of_diagonals : ndarray + If `a` is 2-D, then a 1-D array containing the diagonal and of the + same type as `a` is returned unless `a` is a `matrix`, in which case + a 1-D array rather than a (2-D) `matrix` is returned in order to + maintain backward compatibility. + + If ``a.ndim > 2``, then the dimensions specified by `axis1` and `axis2` + are removed, and a new axis inserted at the end corresponding to the + diagonal. + + Raises + ------ + ValueError + If the dimension of `a` is less than 2. + + See Also + -------- + diag : MATLAB work-a-like for 1-D and 2-D arrays. + diagflat : Create diagonal arrays. + trace : Sum along diagonals. + + Examples + -------- + >>> import numpy as np + >>> a = np.arange(4).reshape(2,2) + >>> a + array([[0, 1], + [2, 3]]) + >>> a.diagonal() + array([0, 3]) + >>> a.diagonal(1) + array([1]) + + A 3-D example: + + >>> a = np.arange(8).reshape(2,2,2); a + array([[[0, 1], + [2, 3]], + [[4, 5], + [6, 7]]]) + >>> a.diagonal(0, # Main diagonals of two arrays created by skipping + ... 0, # across the outer(left)-most axis last and + ... 1) # the "middle" (row) axis first. + array([[0, 6], + [1, 7]]) + + The sub-arrays whose main diagonals we just obtained; note that each + corresponds to fixing the right-most (column) axis, and that the + diagonals are "packed" in rows. + + >>> a[:,:,0] # main diagonal is [0 6] + array([[0, 2], + [4, 6]]) + >>> a[:,:,1] # main diagonal is [1 7] + array([[1, 3], + [5, 7]]) + + The anti-diagonal can be obtained by reversing the order of elements + using either `numpy.flipud` or `numpy.fliplr`. + + >>> a = np.arange(9).reshape(3, 3) + >>> a + array([[0, 1, 2], + [3, 4, 5], + [6, 7, 8]]) + >>> np.fliplr(a).diagonal() # Horizontal flip + array([2, 4, 6]) + >>> np.flipud(a).diagonal() # Vertical flip + array([6, 4, 2]) + + Note that the order in which the diagonal is retrieved varies depending + on the flip function. + """ + if isinstance(a, np.matrix): + # Make diagonal of matrix 1-D to preserve backward compatibility. + return asarray(a).diagonal(offset=offset, axis1=axis1, axis2=axis2) + else: + return asanyarray(a).diagonal(offset=offset, axis1=axis1, axis2=axis2) + + +def _trace_dispatcher( + a, offset=None, axis1=None, axis2=None, dtype=None, out=None): + return (a, out) + + +@array_function_dispatch(_trace_dispatcher) +def trace(a, offset=0, axis1=0, axis2=1, dtype=None, out=None): + """ + Return the sum along diagonals of the array. + + If `a` is 2-D, the sum along its diagonal with the given offset + is returned, i.e., the sum of elements ``a[i,i+offset]`` for all i. + + If `a` has more than two dimensions, then the axes specified by axis1 and + axis2 are used to determine the 2-D sub-arrays whose traces are returned. + The shape of the resulting array is the same as that of `a` with `axis1` + and `axis2` removed. + + Parameters + ---------- + a : array_like + Input array, from which the diagonals are taken. + offset : int, optional + Offset of the diagonal from the main diagonal. Can be both positive + and negative. Defaults to 0. + axis1, axis2 : int, optional + Axes to be used as the first and second axis of the 2-D sub-arrays + from which the diagonals should be taken. Defaults are the first two + axes of `a`. + dtype : dtype, optional + Determines the data-type of the returned array and of the accumulator + where the elements are summed. If dtype has the value None and `a` is + of integer type of precision less than the default integer + precision, then the default integer precision is used. Otherwise, + the precision is the same as that of `a`. + out : ndarray, optional + Array into which the output is placed. Its type is preserved and + it must be of the right shape to hold the output. + + Returns + ------- + sum_along_diagonals : ndarray + If `a` is 2-D, the sum along the diagonal is returned. If `a` has + larger dimensions, then an array of sums along diagonals is returned. + + See Also + -------- + diag, diagonal, diagflat + + Examples + -------- + >>> import numpy as np + >>> np.trace(np.eye(3)) + 3.0 + >>> a = np.arange(8).reshape((2,2,2)) + >>> np.trace(a) + array([6, 8]) + + >>> a = np.arange(24).reshape((2,2,2,3)) + >>> np.trace(a).shape + (2, 3) + + """ + if isinstance(a, np.matrix): + # Get trace of matrix via an array to preserve backward compatibility. + return asarray(a).trace( + offset=offset, axis1=axis1, axis2=axis2, dtype=dtype, out=out + ) + else: + return asanyarray(a).trace( + offset=offset, axis1=axis1, axis2=axis2, dtype=dtype, out=out + ) + + +def _ravel_dispatcher(a, order=None): + return (a,) + + +@array_function_dispatch(_ravel_dispatcher) +def ravel(a, order='C'): + """Return a contiguous flattened array. + + A 1-D array, containing the elements of the input, is returned. A copy is + made only if needed. + + As of NumPy 1.10, the returned array will have the same type as the input + array. (for example, a masked array will be returned for a masked array + input) + + Parameters + ---------- + a : array_like + Input array. The elements in `a` are read in the order specified by + `order`, and packed as a 1-D array. + order : {'C','F', 'A', 'K'}, optional + + The elements of `a` are read using this index order. 'C' means + to index the elements in row-major, C-style order, + with the last axis index changing fastest, back to the first + axis index changing slowest. 'F' means to index the elements + in column-major, Fortran-style order, with the + first index changing fastest, and the last index changing + slowest. Note that the 'C' and 'F' options take no account of + the memory layout of the underlying array, and only refer to + the order of axis indexing. 'A' means to read the elements in + Fortran-like index order if `a` is Fortran *contiguous* in + memory, C-like order otherwise. 'K' means to read the + elements in the order they occur in memory, except for + reversing the data when strides are negative. By default, 'C' + index order is used. + + Returns + ------- + y : array_like + y is a contiguous 1-D array of the same subtype as `a`, + with shape ``(a.size,)``. + Note that matrices are special cased for backward compatibility, + if `a` is a matrix, then y is a 1-D ndarray. + + See Also + -------- + ndarray.flat : 1-D iterator over an array. + ndarray.flatten : 1-D array copy of the elements of an array + in row-major order. + ndarray.reshape : Change the shape of an array without changing its data. + + Notes + ----- + In row-major, C-style order, in two dimensions, the row index + varies the slowest, and the column index the quickest. This can + be generalized to multiple dimensions, where row-major order + implies that the index along the first axis varies slowest, and + the index along the last quickest. The opposite holds for + column-major, Fortran-style index ordering. + + When a view is desired in as many cases as possible, ``arr.reshape(-1)`` + may be preferable. However, ``ravel`` supports ``K`` in the optional + ``order`` argument while ``reshape`` does not. + + Examples + -------- + It is equivalent to ``reshape(-1, order=order)``. + + >>> import numpy as np + >>> x = np.array([[1, 2, 3], [4, 5, 6]]) + >>> np.ravel(x) + array([1, 2, 3, 4, 5, 6]) + + >>> x.reshape(-1) + array([1, 2, 3, 4, 5, 6]) + + >>> np.ravel(x, order='F') + array([1, 4, 2, 5, 3, 6]) + + When ``order`` is 'A', it will preserve the array's 'C' or 'F' ordering: + + >>> np.ravel(x.T) + array([1, 4, 2, 5, 3, 6]) + >>> np.ravel(x.T, order='A') + array([1, 2, 3, 4, 5, 6]) + + When ``order`` is 'K', it will preserve orderings that are neither 'C' + nor 'F', but won't reverse axes: + + >>> a = np.arange(3)[::-1]; a + array([2, 1, 0]) + >>> a.ravel(order='C') + array([2, 1, 0]) + >>> a.ravel(order='K') + array([2, 1, 0]) + + >>> a = np.arange(12).reshape(2,3,2).swapaxes(1,2); a + array([[[ 0, 2, 4], + [ 1, 3, 5]], + [[ 6, 8, 10], + [ 7, 9, 11]]]) + >>> a.ravel(order='C') + array([ 0, 2, 4, 1, 3, 5, 6, 8, 10, 7, 9, 11]) + >>> a.ravel(order='K') + array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) + + """ + if isinstance(a, np.matrix): + return asarray(a).ravel(order=order) + else: + return asanyarray(a).ravel(order=order) + + +def _nonzero_dispatcher(a): + return (a,) + + +@array_function_dispatch(_nonzero_dispatcher) +def nonzero(a): + """ + Return the indices of the elements that are non-zero. + + Returns a tuple of arrays, one for each dimension of `a`, + containing the indices of the non-zero elements in that + dimension. The values in `a` are always tested and returned in + row-major, C-style order. + + To group the indices by element, rather than dimension, use `argwhere`, + which returns a row for each non-zero element. + + .. note:: + + When called on a zero-d array or scalar, ``nonzero(a)`` is treated + as ``nonzero(atleast_1d(a))``. + + .. deprecated:: 1.17.0 + + Use `atleast_1d` explicitly if this behavior is deliberate. + + Parameters + ---------- + a : array_like + Input array. + + Returns + ------- + tuple_of_arrays : tuple + Indices of elements that are non-zero. + + See Also + -------- + flatnonzero : + Return indices that are non-zero in the flattened version of the input + array. + ndarray.nonzero : + Equivalent ndarray method. + count_nonzero : + Counts the number of non-zero elements in the input array. + + Notes + ----- + While the nonzero values can be obtained with ``a[nonzero(a)]``, it is + recommended to use ``x[x.astype(bool)]`` or ``x[x != 0]`` instead, which + will correctly handle 0-d arrays. + + Examples + -------- + >>> import numpy as np + >>> x = np.array([[3, 0, 0], [0, 4, 0], [5, 6, 0]]) + >>> x + array([[3, 0, 0], + [0, 4, 0], + [5, 6, 0]]) + >>> np.nonzero(x) + (array([0, 1, 2, 2]), array([0, 1, 0, 1])) + + >>> x[np.nonzero(x)] + array([3, 4, 5, 6]) + >>> np.transpose(np.nonzero(x)) + array([[0, 0], + [1, 1], + [2, 0], + [2, 1]]) + + A common use for ``nonzero`` is to find the indices of an array, where + a condition is True. Given an array `a`, the condition `a` > 3 is a + boolean array and since False is interpreted as 0, np.nonzero(a > 3) + yields the indices of the `a` where the condition is true. + + >>> a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) + >>> a > 3 + array([[False, False, False], + [ True, True, True], + [ True, True, True]]) + >>> np.nonzero(a > 3) + (array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2])) + + Using this result to index `a` is equivalent to using the mask directly: + + >>> a[np.nonzero(a > 3)] + array([4, 5, 6, 7, 8, 9]) + >>> a[a > 3] # prefer this spelling + array([4, 5, 6, 7, 8, 9]) + + ``nonzero`` can also be called as a method of the array. + + >>> (a > 3).nonzero() + (array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2])) + + """ + return _wrapfunc(a, 'nonzero') + + +def _shape_dispatcher(a): + return (a,) + + +@array_function_dispatch(_shape_dispatcher) +def shape(a): + """ + Return the shape of an array. + + Parameters + ---------- + a : array_like + Input array. + + Returns + ------- + shape : tuple of ints + The elements of the shape tuple give the lengths of the + corresponding array dimensions. + + See Also + -------- + len : ``len(a)`` is equivalent to ``np.shape(a)[0]`` for N-D arrays with + ``N>=1``. + ndarray.shape : Equivalent array method. + + Examples + -------- + >>> import numpy as np + >>> np.shape(np.eye(3)) + (3, 3) + >>> np.shape([[1, 3]]) + (1, 2) + >>> np.shape([0]) + (1,) + >>> np.shape(0) + () + + >>> a = np.array([(1, 2), (3, 4), (5, 6)], + ... dtype=[('x', 'i4'), ('y', 'i4')]) + >>> np.shape(a) + (3,) + >>> a.shape + (3,) + + """ + try: + result = a.shape + except AttributeError: + result = asarray(a).shape + return result + + +def _compress_dispatcher(condition, a, axis=None, out=None): + return (condition, a, out) + + +@array_function_dispatch(_compress_dispatcher) +def compress(condition, a, axis=None, out=None): + """ + Return selected slices of an array along given axis. + + When working along a given axis, a slice along that axis is returned in + `output` for each index where `condition` evaluates to True. When + working on a 1-D array, `compress` is equivalent to `extract`. + + Parameters + ---------- + condition : 1-D array of bools + Array that selects which entries to return. If len(condition) + is less than the size of `a` along the given axis, then output is + truncated to the length of the condition array. + a : array_like + Array from which to extract a part. + axis : int, optional + Axis along which to take slices. If None (default), work on the + flattened array. + out : ndarray, optional + Output array. Its type is preserved and it must be of the right + shape to hold the output. + + Returns + ------- + compressed_array : ndarray + A copy of `a` without the slices along axis for which `condition` + is false. + + See Also + -------- + take, choose, diag, diagonal, select + ndarray.compress : Equivalent method in ndarray + extract : Equivalent method when working on 1-D arrays + :ref:`ufuncs-output-type` + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[1, 2], [3, 4], [5, 6]]) + >>> a + array([[1, 2], + [3, 4], + [5, 6]]) + >>> np.compress([0, 1], a, axis=0) + array([[3, 4]]) + >>> np.compress([False, True, True], a, axis=0) + array([[3, 4], + [5, 6]]) + >>> np.compress([False, True], a, axis=1) + array([[2], + [4], + [6]]) + + Working on the flattened array does not return slices along an axis but + selects elements. + + >>> np.compress([False, True], a) + array([2]) + + """ + return _wrapfunc(a, 'compress', condition, axis=axis, out=out) + + +def _clip_dispatcher(a, a_min=None, a_max=None, out=None, *, min=None, + max=None, **kwargs): + return (a, a_min, a_max, out, min, max) + + +@array_function_dispatch(_clip_dispatcher) +def clip(a, a_min=np._NoValue, a_max=np._NoValue, out=None, *, + min=np._NoValue, max=np._NoValue, **kwargs): + """ + Clip (limit) the values in an array. + + Given an interval, values outside the interval are clipped to + the interval edges. For example, if an interval of ``[0, 1]`` + is specified, values smaller than 0 become 0, and values larger + than 1 become 1. + + Equivalent to but faster than ``np.minimum(a_max, np.maximum(a, a_min))``. + + No check is performed to ensure ``a_min < a_max``. + + Parameters + ---------- + a : array_like + Array containing elements to clip. + a_min, a_max : array_like or None + Minimum and maximum value. If ``None``, clipping is not performed on + the corresponding edge. If both ``a_min`` and ``a_max`` are ``None``, + the elements of the returned array stay the same. Both are broadcasted + against ``a``. + out : ndarray, optional + The results will be placed in this array. It may be the input + array for in-place clipping. `out` must be of the right shape + to hold the output. Its type is preserved. + min, max : array_like or None + Array API compatible alternatives for ``a_min`` and ``a_max`` + arguments. Either ``a_min`` and ``a_max`` or ``min`` and ``max`` + can be passed at the same time. Default: ``None``. + + .. versionadded:: 2.1.0 + **kwargs + For other keyword-only arguments, see the + :ref:`ufunc docs `. + + Returns + ------- + clipped_array : ndarray + An array with the elements of `a`, but where values + < `a_min` are replaced with `a_min`, and those > `a_max` + with `a_max`. + + See Also + -------- + :ref:`ufuncs-output-type` + + Notes + ----- + When `a_min` is greater than `a_max`, `clip` returns an + array in which all values are equal to `a_max`, + as shown in the second example. + + Examples + -------- + >>> import numpy as np + >>> a = np.arange(10) + >>> a + array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + >>> np.clip(a, 1, 8) + array([1, 1, 2, 3, 4, 5, 6, 7, 8, 8]) + >>> np.clip(a, 8, 1) + array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1]) + >>> np.clip(a, 3, 6, out=a) + array([3, 3, 3, 3, 4, 5, 6, 6, 6, 6]) + >>> a + array([3, 3, 3, 3, 4, 5, 6, 6, 6, 6]) + >>> a = np.arange(10) + >>> a + array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + >>> np.clip(a, [3, 4, 1, 1, 1, 4, 4, 4, 4, 4], 8) + array([3, 4, 2, 3, 4, 5, 6, 7, 8, 8]) + + """ + if a_min is np._NoValue and a_max is np._NoValue: + a_min = None if min is np._NoValue else min + a_max = None if max is np._NoValue else max + elif a_min is np._NoValue: + raise TypeError("clip() missing 1 required positional " + "argument: 'a_min'") + elif a_max is np._NoValue: + raise TypeError("clip() missing 1 required positional " + "argument: 'a_max'") + elif min is not np._NoValue or max is not np._NoValue: + raise ValueError("Passing `min` or `max` keyword argument when " + "`a_min` and `a_max` are provided is forbidden.") + + return _wrapfunc(a, 'clip', a_min, a_max, out=out, **kwargs) + + +def _sum_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None, + initial=None, where=None): + return (a, out) + + +@array_function_dispatch(_sum_dispatcher) +def sum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, + initial=np._NoValue, where=np._NoValue): + """ + Sum of array elements over a given axis. + + Parameters + ---------- + a : array_like + Elements to sum. + axis : None or int or tuple of ints, optional + Axis or axes along which a sum is performed. The default, + axis=None, will sum all of the elements of the input array. If + axis is negative it counts from the last to the first axis. If + axis is a tuple of ints, a sum is performed on all of the axes + specified in the tuple instead of a single axis or all the axes as + before. + dtype : dtype, optional + The type of the returned array and of the accumulator in which the + elements are summed. The dtype of `a` is used by default unless `a` + has an integer dtype of less precision than the default platform + integer. In that case, if `a` is signed then the platform integer + is used while if `a` is unsigned then an unsigned integer of the + same precision as the platform integer is used. + out : ndarray, optional + Alternative output array in which to place the result. It must have + the same shape as the expected output, but the type of the output + values will be cast if necessary. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the input array. + + If the default value is passed, then `keepdims` will not be + passed through to the `sum` method of sub-classes of + `ndarray`, however any non-default value will be. If the + sub-class' method does not implement `keepdims` any + exceptions will be raised. + initial : scalar, optional + Starting value for the sum. See `~numpy.ufunc.reduce` for details. + where : array_like of bool, optional + Elements to include in the sum. See `~numpy.ufunc.reduce` for details. + + Returns + ------- + sum_along_axis : ndarray + An array with the same shape as `a`, with the specified + axis removed. If `a` is a 0-d array, or if `axis` is None, a scalar + is returned. If an output array is specified, a reference to + `out` is returned. + + See Also + -------- + ndarray.sum : Equivalent method. + add: ``numpy.add.reduce`` equivalent function. + cumsum : Cumulative sum of array elements. + trapezoid : Integration of array values using composite trapezoidal rule. + + mean, average + + Notes + ----- + Arithmetic is modular when using integer types, and no error is + raised on overflow. + + The sum of an empty array is the neutral element 0: + + >>> np.sum([]) + 0.0 + + For floating point numbers the numerical precision of sum (and + ``np.add.reduce``) is in general limited by directly adding each number + individually to the result causing rounding errors in every step. + However, often numpy will use a numerically better approach (partial + pairwise summation) leading to improved precision in many use-cases. + This improved precision is always provided when no ``axis`` is given. + When ``axis`` is given, it will depend on which axis is summed. + Technically, to provide the best speed possible, the improved precision + is only used when the summation is along the fast axis in memory. + Note that the exact precision may vary depending on other parameters. + In contrast to NumPy, Python's ``math.fsum`` function uses a slower but + more precise approach to summation. + Especially when summing a large number of lower precision floating point + numbers, such as ``float32``, numerical errors can become significant. + In such cases it can be advisable to use `dtype="float64"` to use a higher + precision for the output. + + Examples + -------- + >>> import numpy as np + >>> np.sum([0.5, 1.5]) + 2.0 + >>> np.sum([0.5, 0.7, 0.2, 1.5], dtype=np.int32) + np.int32(1) + >>> np.sum([[0, 1], [0, 5]]) + 6 + >>> np.sum([[0, 1], [0, 5]], axis=0) + array([0, 6]) + >>> np.sum([[0, 1], [0, 5]], axis=1) + array([1, 5]) + >>> np.sum([[0, 1], [np.nan, 5]], where=[False, True], axis=1) + array([1., 5.]) + + If the accumulator is too small, overflow occurs: + + >>> np.ones(128, dtype=np.int8).sum(dtype=np.int8) + np.int8(-128) + + You can also start the sum with a value other than zero: + + >>> np.sum([10], initial=5) + 15 + """ + if isinstance(a, _gentype): + # 2018-02-25, 1.15.0 + warnings.warn( + "Calling np.sum(generator) is deprecated, and in the future will " + "give a different result. Use np.sum(np.fromiter(generator)) or " + "the python sum builtin instead.", + DeprecationWarning, stacklevel=2 + ) + + res = _sum_(a) + if out is not None: + out[...] = res + return out + return res + + return _wrapreduction( + a, np.add, 'sum', axis, dtype, out, + keepdims=keepdims, initial=initial, where=where + ) + + +def _any_dispatcher(a, axis=None, out=None, keepdims=None, *, + where=np._NoValue): + return (a, where, out) + + +@array_function_dispatch(_any_dispatcher) +def any(a, axis=None, out=None, keepdims=np._NoValue, *, where=np._NoValue): + """ + Test whether any array element along a given axis evaluates to True. + + Returns single boolean if `axis` is ``None`` + + Parameters + ---------- + a : array_like + Input array or object that can be converted to an array. + axis : None or int or tuple of ints, optional + Axis or axes along which a logical OR reduction is performed. + The default (``axis=None``) is to perform a logical OR over all + the dimensions of the input array. `axis` may be negative, in + which case it counts from the last to the first axis. If this + is a tuple of ints, a reduction is performed on multiple + axes, instead of a single axis or all the axes as before. + out : ndarray, optional + Alternate output array in which to place the result. It must have + the same shape as the expected output and its type is preserved + (e.g., if it is of type float, then it will remain so, returning + 1.0 for True and 0.0 for False, regardless of the type of `a`). + See :ref:`ufuncs-output-type` for more details. + + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the input array. + + If the default value is passed, then `keepdims` will not be + passed through to the `any` method of sub-classes of + `ndarray`, however any non-default value will be. If the + sub-class' method does not implement `keepdims` any + exceptions will be raised. + + where : array_like of bool, optional + Elements to include in checking for any `True` values. + See `~numpy.ufunc.reduce` for details. + + .. versionadded:: 1.20.0 + + Returns + ------- + any : bool or ndarray + A new boolean or `ndarray` is returned unless `out` is specified, + in which case a reference to `out` is returned. + + See Also + -------- + ndarray.any : equivalent method + + all : Test whether all elements along a given axis evaluate to True. + + Notes + ----- + Not a Number (NaN), positive infinity and negative infinity evaluate + to `True` because these are not equal to zero. + + .. versionchanged:: 2.0 + Before NumPy 2.0, ``any`` did not return booleans for object dtype + input arrays. + This behavior is still available via ``np.logical_or.reduce``. + + Examples + -------- + >>> import numpy as np + >>> np.any([[True, False], [True, True]]) + True + + >>> np.any([[True, False, True ], + ... [False, False, False]], axis=0) + array([ True, False, True]) + + >>> np.any([-1, 0, 5]) + True + + >>> np.any([[np.nan], [np.inf]], axis=1, keepdims=True) + array([[ True], + [ True]]) + + >>> np.any([[True, False], [False, False]], where=[[False], [True]]) + False + + >>> a = np.array([[1, 0, 0], + ... [0, 0, 1], + ... [0, 0, 0]]) + >>> np.any(a, axis=0) + array([ True, False, True]) + >>> np.any(a, axis=1) + array([ True, True, False]) + + >>> o=np.array(False) + >>> z=np.any([-1, 4, 5], out=o) + >>> z, o + (array(True), array(True)) + >>> # Check now that z is a reference to o + >>> z is o + True + >>> id(z), id(o) # identity of z and o # doctest: +SKIP + (191614240, 191614240) + + """ + return _wrapreduction_any_all(a, np.logical_or, 'any', axis, out, + keepdims=keepdims, where=where) + + +def _all_dispatcher(a, axis=None, out=None, keepdims=None, *, + where=None): + return (a, where, out) + + +@array_function_dispatch(_all_dispatcher) +def all(a, axis=None, out=None, keepdims=np._NoValue, *, where=np._NoValue): + """ + Test whether all array elements along a given axis evaluate to True. + + Parameters + ---------- + a : array_like + Input array or object that can be converted to an array. + axis : None or int or tuple of ints, optional + Axis or axes along which a logical AND reduction is performed. + The default (``axis=None``) is to perform a logical AND over all + the dimensions of the input array. `axis` may be negative, in + which case it counts from the last to the first axis. If this + is a tuple of ints, a reduction is performed on multiple + axes, instead of a single axis or all the axes as before. + out : ndarray, optional + Alternate output array in which to place the result. + It must have the same shape as the expected output and its + type is preserved (e.g., if ``dtype(out)`` is float, the result + will consist of 0.0's and 1.0's). See :ref:`ufuncs-output-type` + for more details. + + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the input array. + + If the default value is passed, then `keepdims` will not be + passed through to the `all` method of sub-classes of + `ndarray`, however any non-default value will be. If the + sub-class' method does not implement `keepdims` any + exceptions will be raised. + + where : array_like of bool, optional + Elements to include in checking for all `True` values. + See `~numpy.ufunc.reduce` for details. + + .. versionadded:: 1.20.0 + + Returns + ------- + all : ndarray, bool + A new boolean or array is returned unless `out` is specified, + in which case a reference to `out` is returned. + + See Also + -------- + ndarray.all : equivalent method + + any : Test whether any element along a given axis evaluates to True. + + Notes + ----- + Not a Number (NaN), positive infinity and negative infinity + evaluate to `True` because these are not equal to zero. + + .. versionchanged:: 2.0 + Before NumPy 2.0, ``all`` did not return booleans for object dtype + input arrays. + This behavior is still available via ``np.logical_and.reduce``. + + Examples + -------- + >>> import numpy as np + >>> np.all([[True,False],[True,True]]) + False + + >>> np.all([[True,False],[True,True]], axis=0) + array([ True, False]) + + >>> np.all([-1, 4, 5]) + True + + >>> np.all([1.0, np.nan]) + True + + >>> np.all([[True, True], [False, True]], where=[[True], [False]]) + True + + >>> o=np.array(False) + >>> z=np.all([-1, 4, 5], out=o) + >>> id(z), id(o), z + (28293632, 28293632, array(True)) # may vary + + """ + return _wrapreduction_any_all(a, np.logical_and, 'all', axis, out, + keepdims=keepdims, where=where) + + +def _cumulative_func(x, func, axis, dtype, out, include_initial): + x = np.atleast_1d(x) + x_ndim = x.ndim + if axis is None: + if x_ndim >= 2: + raise ValueError("For arrays which have more than one dimension " + "``axis`` argument is required.") + axis = 0 + + if out is not None and include_initial: + item = [slice(None)] * x_ndim + item[axis] = slice(1, None) + func.accumulate(x, axis=axis, dtype=dtype, out=out[tuple(item)]) + item[axis] = 0 + out[tuple(item)] = func.identity + return out + + res = func.accumulate(x, axis=axis, dtype=dtype, out=out) + if include_initial: + initial_shape = list(x.shape) + initial_shape[axis] = 1 + res = np.concat( + [np.full_like(res, func.identity, shape=initial_shape), res], + axis=axis, + ) + + return res + + +def _cumulative_prod_dispatcher(x, /, *, axis=None, dtype=None, out=None, + include_initial=None): + return (x, out) + + +@array_function_dispatch(_cumulative_prod_dispatcher) +def cumulative_prod(x, /, *, axis=None, dtype=None, out=None, + include_initial=False): + """ + Return the cumulative product of elements along a given axis. + + This function is an Array API compatible alternative to `numpy.cumprod`. + + Parameters + ---------- + x : array_like + Input array. + axis : int, optional + Axis along which the cumulative product is computed. The default + (None) is only allowed for one-dimensional arrays. For arrays + with more than one dimension ``axis`` is required. + dtype : dtype, optional + Type of the returned array, as well as of the accumulator in which + the elements are multiplied. If ``dtype`` is not specified, it + defaults to the dtype of ``x``, unless ``x`` has an integer dtype + with a precision less than that of the default platform integer. + In that case, the default platform integer is used instead. + out : ndarray, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output + but the type of the resulting values will be cast if necessary. + See :ref:`ufuncs-output-type` for more details. + include_initial : bool, optional + Boolean indicating whether to include the initial value (ones) as + the first value in the output. With ``include_initial=True`` + the shape of the output is different than the shape of the input. + Default: ``False``. + + Returns + ------- + cumulative_prod_along_axis : ndarray + A new array holding the result is returned unless ``out`` is + specified, in which case a reference to ``out`` is returned. The + result has the same shape as ``x`` if ``include_initial=False``. + + Notes + ----- + Arithmetic is modular when using integer types, and no error is + raised on overflow. + + Examples + -------- + >>> a = np.array([1, 2, 3]) + >>> np.cumulative_prod(a) # intermediate results 1, 1*2 + ... # total product 1*2*3 = 6 + array([1, 2, 6]) + >>> a = np.array([1, 2, 3, 4, 5, 6]) + >>> np.cumulative_prod(a, dtype=float) # specify type of output + array([ 1., 2., 6., 24., 120., 720.]) + + The cumulative product for each column (i.e., over the rows) of ``b``: + + >>> b = np.array([[1, 2, 3], [4, 5, 6]]) + >>> np.cumulative_prod(b, axis=0) + array([[ 1, 2, 3], + [ 4, 10, 18]]) + + The cumulative product for each row (i.e. over the columns) of ``b``: + + >>> np.cumulative_prod(b, axis=1) + array([[ 1, 2, 6], + [ 4, 20, 120]]) + + """ + return _cumulative_func(x, um.multiply, axis, dtype, out, include_initial) + + +def _cumulative_sum_dispatcher(x, /, *, axis=None, dtype=None, out=None, + include_initial=None): + return (x, out) + + +@array_function_dispatch(_cumulative_sum_dispatcher) +def cumulative_sum(x, /, *, axis=None, dtype=None, out=None, + include_initial=False): + """ + Return the cumulative sum of the elements along a given axis. + + This function is an Array API compatible alternative to `numpy.cumsum`. + + Parameters + ---------- + x : array_like + Input array. + axis : int, optional + Axis along which the cumulative sum is computed. The default + (None) is only allowed for one-dimensional arrays. For arrays + with more than one dimension ``axis`` is required. + dtype : dtype, optional + Type of the returned array and of the accumulator in which the + elements are summed. If ``dtype`` is not specified, it defaults + to the dtype of ``x``, unless ``x`` has an integer dtype with + a precision less than that of the default platform integer. + In that case, the default platform integer is used. + out : ndarray, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output + but the type will be cast if necessary. See :ref:`ufuncs-output-type` + for more details. + include_initial : bool, optional + Boolean indicating whether to include the initial value (zeros) as + the first value in the output. With ``include_initial=True`` + the shape of the output is different than the shape of the input. + Default: ``False``. + + Returns + ------- + cumulative_sum_along_axis : ndarray + A new array holding the result is returned unless ``out`` is + specified, in which case a reference to ``out`` is returned. The + result has the same shape as ``x`` if ``include_initial=False``. + + See Also + -------- + sum : Sum array elements. + trapezoid : Integration of array values using composite trapezoidal rule. + diff : Calculate the n-th discrete difference along given axis. + + Notes + ----- + Arithmetic is modular when using integer types, and no error is + raised on overflow. + + ``cumulative_sum(a)[-1]`` may not be equal to ``sum(a)`` for + floating-point values since ``sum`` may use a pairwise summation routine, + reducing the roundoff-error. See `sum` for more information. + + Examples + -------- + >>> a = np.array([1, 2, 3, 4, 5, 6]) + >>> a + array([1, 2, 3, 4, 5, 6]) + >>> np.cumulative_sum(a) + array([ 1, 3, 6, 10, 15, 21]) + >>> np.cumulative_sum(a, dtype=float) # specifies type of output value(s) + array([ 1., 3., 6., 10., 15., 21.]) + + >>> b = np.array([[1, 2, 3], [4, 5, 6]]) + >>> np.cumulative_sum(b,axis=0) # sum over rows for each of the 3 columns + array([[1, 2, 3], + [5, 7, 9]]) + >>> np.cumulative_sum(b,axis=1) # sum over columns for each of the 2 rows + array([[ 1, 3, 6], + [ 4, 9, 15]]) + + ``cumulative_sum(c)[-1]`` may not be equal to ``sum(c)`` + + >>> c = np.array([1, 2e-9, 3e-9] * 1000000) + >>> np.cumulative_sum(c)[-1] + 1000000.0050045159 + >>> c.sum() + 1000000.0050000029 + + """ + return _cumulative_func(x, um.add, axis, dtype, out, include_initial) + + +def _cumsum_dispatcher(a, axis=None, dtype=None, out=None): + return (a, out) + + +@array_function_dispatch(_cumsum_dispatcher) +def cumsum(a, axis=None, dtype=None, out=None): + """ + Return the cumulative sum of the elements along a given axis. + + Parameters + ---------- + a : array_like + Input array. + axis : int, optional + Axis along which the cumulative sum is computed. The default + (None) is to compute the cumsum over the flattened array. + dtype : dtype, optional + Type of the returned array and of the accumulator in which the + elements are summed. If `dtype` is not specified, it defaults + to the dtype of `a`, unless `a` has an integer dtype with a + precision less than that of the default platform integer. In + that case, the default platform integer is used. + out : ndarray, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output + but the type will be cast if necessary. See :ref:`ufuncs-output-type` + for more details. + + Returns + ------- + cumsum_along_axis : ndarray. + A new array holding the result is returned unless `out` is + specified, in which case a reference to `out` is returned. The + result has the same size as `a`, and the same shape as `a` if + `axis` is not None or `a` is a 1-d array. + + See Also + -------- + cumulative_sum : Array API compatible alternative for ``cumsum``. + sum : Sum array elements. + trapezoid : Integration of array values using composite trapezoidal rule. + diff : Calculate the n-th discrete difference along given axis. + + Notes + ----- + Arithmetic is modular when using integer types, and no error is + raised on overflow. + + ``cumsum(a)[-1]`` may not be equal to ``sum(a)`` for floating-point + values since ``sum`` may use a pairwise summation routine, reducing + the roundoff-error. See `sum` for more information. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[1,2,3], [4,5,6]]) + >>> a + array([[1, 2, 3], + [4, 5, 6]]) + >>> np.cumsum(a) + array([ 1, 3, 6, 10, 15, 21]) + >>> np.cumsum(a, dtype=float) # specifies type of output value(s) + array([ 1., 3., 6., 10., 15., 21.]) + + >>> np.cumsum(a,axis=0) # sum over rows for each of the 3 columns + array([[1, 2, 3], + [5, 7, 9]]) + >>> np.cumsum(a,axis=1) # sum over columns for each of the 2 rows + array([[ 1, 3, 6], + [ 4, 9, 15]]) + + ``cumsum(b)[-1]`` may not be equal to ``sum(b)`` + + >>> b = np.array([1, 2e-9, 3e-9] * 1000000) + >>> b.cumsum()[-1] + 1000000.0050045159 + >>> b.sum() + 1000000.0050000029 + + """ + return _wrapfunc(a, 'cumsum', axis=axis, dtype=dtype, out=out) + + +def _ptp_dispatcher(a, axis=None, out=None, keepdims=None): + return (a, out) + + +@array_function_dispatch(_ptp_dispatcher) +def ptp(a, axis=None, out=None, keepdims=np._NoValue): + """ + Range of values (maximum - minimum) along an axis. + + The name of the function comes from the acronym for 'peak to peak'. + + .. warning:: + `ptp` preserves the data type of the array. This means the + return value for an input of signed integers with n bits + (e.g. `numpy.int8`, `numpy.int16`, etc) is also a signed integer + with n bits. In that case, peak-to-peak values greater than + ``2**(n-1)-1`` will be returned as negative values. An example + with a work-around is shown below. + + Parameters + ---------- + a : array_like + Input values. + axis : None or int or tuple of ints, optional + Axis along which to find the peaks. By default, flatten the + array. `axis` may be negative, in + which case it counts from the last to the first axis. + If this is a tuple of ints, a reduction is performed on multiple + axes, instead of a single axis or all the axes as before. + out : array_like + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output, + but the type of the output values will be cast if necessary. + + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the input array. + + If the default value is passed, then `keepdims` will not be + passed through to the `ptp` method of sub-classes of + `ndarray`, however any non-default value will be. If the + sub-class' method does not implement `keepdims` any + exceptions will be raised. + + Returns + ------- + ptp : ndarray or scalar + The range of a given array - `scalar` if array is one-dimensional + or a new array holding the result along the given axis + + Examples + -------- + >>> import numpy as np + >>> x = np.array([[4, 9, 2, 10], + ... [6, 9, 7, 12]]) + + >>> np.ptp(x, axis=1) + array([8, 6]) + + >>> np.ptp(x, axis=0) + array([2, 0, 5, 2]) + + >>> np.ptp(x) + 10 + + This example shows that a negative value can be returned when + the input is an array of signed integers. + + >>> y = np.array([[1, 127], + ... [0, 127], + ... [-1, 127], + ... [-2, 127]], dtype=np.int8) + >>> np.ptp(y, axis=1) + array([ 126, 127, -128, -127], dtype=int8) + + A work-around is to use the `view()` method to view the result as + unsigned integers with the same bit width: + + >>> np.ptp(y, axis=1).view(np.uint8) + array([126, 127, 128, 129], dtype=uint8) + + """ + kwargs = {} + if keepdims is not np._NoValue: + kwargs['keepdims'] = keepdims + return _methods._ptp(a, axis=axis, out=out, **kwargs) + + +def _max_dispatcher(a, axis=None, out=None, keepdims=None, initial=None, + where=None): + return (a, out) + + +@array_function_dispatch(_max_dispatcher) +@set_module('numpy') +def max(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, + where=np._NoValue): + """ + Return the maximum of an array or maximum along an axis. + + Parameters + ---------- + a : array_like + Input data. + axis : None or int or tuple of ints, optional + Axis or axes along which to operate. By default, flattened input is + used. If this is a tuple of ints, the maximum is selected over + multiple axes, instead of a single axis or all the axes as before. + + out : ndarray, optional + Alternative output array in which to place the result. Must + be of the same shape and buffer length as the expected output. + See :ref:`ufuncs-output-type` for more details. + + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the input array. + + If the default value is passed, then `keepdims` will not be + passed through to the ``max`` method of sub-classes of + `ndarray`, however any non-default value will be. If the + sub-class' method does not implement `keepdims` any + exceptions will be raised. + + initial : scalar, optional + The minimum value of an output element. Must be present to allow + computation on empty slice. See `~numpy.ufunc.reduce` for details. + + where : array_like of bool, optional + Elements to compare for the maximum. See `~numpy.ufunc.reduce` + for details. + + Returns + ------- + max : ndarray or scalar + Maximum of `a`. If `axis` is None, the result is a scalar value. + If `axis` is an int, the result is an array of dimension + ``a.ndim - 1``. If `axis` is a tuple, the result is an array of + dimension ``a.ndim - len(axis)``. + + See Also + -------- + amin : + The minimum value of an array along a given axis, propagating any NaNs. + nanmax : + The maximum value of an array along a given axis, ignoring any NaNs. + maximum : + Element-wise maximum of two arrays, propagating any NaNs. + fmax : + Element-wise maximum of two arrays, ignoring any NaNs. + argmax : + Return the indices of the maximum values. + + nanmin, minimum, fmin + + Notes + ----- + NaN values are propagated, that is if at least one item is NaN, the + corresponding max value will be NaN as well. To ignore NaN values + (MATLAB behavior), please use nanmax. + + Don't use `~numpy.max` for element-wise comparison of 2 arrays; when + ``a.shape[0]`` is 2, ``maximum(a[0], a[1])`` is faster than + ``max(a, axis=0)``. + + Examples + -------- + >>> import numpy as np + >>> a = np.arange(4).reshape((2,2)) + >>> a + array([[0, 1], + [2, 3]]) + >>> np.max(a) # Maximum of the flattened array + 3 + >>> np.max(a, axis=0) # Maxima along the first axis + array([2, 3]) + >>> np.max(a, axis=1) # Maxima along the second axis + array([1, 3]) + >>> np.max(a, where=[False, True], initial=-1, axis=0) + array([-1, 3]) + >>> b = np.arange(5, dtype=float) + >>> b[2] = np.nan + >>> np.max(b) + np.float64(nan) + >>> np.max(b, where=~np.isnan(b), initial=-1) + 4.0 + >>> np.nanmax(b) + 4.0 + + You can use an initial value to compute the maximum of an empty slice, or + to initialize it to a different value: + + >>> np.max([[-50], [10]], axis=-1, initial=0) + array([ 0, 10]) + + Notice that the initial value is used as one of the elements for which the + maximum is determined, unlike for the default argument Python's max + function, which is only used for empty iterables. + + >>> np.max([5], initial=6) + 6 + >>> max([5], default=6) + 5 + """ + return _wrapreduction(a, np.maximum, 'max', axis, None, out, + keepdims=keepdims, initial=initial, where=where) + + +@array_function_dispatch(_max_dispatcher) +def amax(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, + where=np._NoValue): + """ + Return the maximum of an array or maximum along an axis. + + `amax` is an alias of `~numpy.max`. + + See Also + -------- + max : alias of this function + ndarray.max : equivalent method + """ + return _wrapreduction(a, np.maximum, 'max', axis, None, out, + keepdims=keepdims, initial=initial, where=where) + + +def _min_dispatcher(a, axis=None, out=None, keepdims=None, initial=None, + where=None): + return (a, out) + + +@array_function_dispatch(_min_dispatcher) +def min(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, + where=np._NoValue): + """ + Return the minimum of an array or minimum along an axis. + + Parameters + ---------- + a : array_like + Input data. + axis : None or int or tuple of ints, optional + Axis or axes along which to operate. By default, flattened input is + used. + + If this is a tuple of ints, the minimum is selected over multiple axes, + instead of a single axis or all the axes as before. + out : ndarray, optional + Alternative output array in which to place the result. Must + be of the same shape and buffer length as the expected output. + See :ref:`ufuncs-output-type` for more details. + + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the input array. + + If the default value is passed, then `keepdims` will not be + passed through to the ``min`` method of sub-classes of + `ndarray`, however any non-default value will be. If the + sub-class' method does not implement `keepdims` any + exceptions will be raised. + + initial : scalar, optional + The maximum value of an output element. Must be present to allow + computation on empty slice. See `~numpy.ufunc.reduce` for details. + + where : array_like of bool, optional + Elements to compare for the minimum. See `~numpy.ufunc.reduce` + for details. + + Returns + ------- + min : ndarray or scalar + Minimum of `a`. If `axis` is None, the result is a scalar value. + If `axis` is an int, the result is an array of dimension + ``a.ndim - 1``. If `axis` is a tuple, the result is an array of + dimension ``a.ndim - len(axis)``. + + See Also + -------- + amax : + The maximum value of an array along a given axis, propagating any NaNs. + nanmin : + The minimum value of an array along a given axis, ignoring any NaNs. + minimum : + Element-wise minimum of two arrays, propagating any NaNs. + fmin : + Element-wise minimum of two arrays, ignoring any NaNs. + argmin : + Return the indices of the minimum values. + + nanmax, maximum, fmax + + Notes + ----- + NaN values are propagated, that is if at least one item is NaN, the + corresponding min value will be NaN as well. To ignore NaN values + (MATLAB behavior), please use nanmin. + + Don't use `~numpy.min` for element-wise comparison of 2 arrays; when + ``a.shape[0]`` is 2, ``minimum(a[0], a[1])`` is faster than + ``min(a, axis=0)``. + + Examples + -------- + >>> import numpy as np + >>> a = np.arange(4).reshape((2,2)) + >>> a + array([[0, 1], + [2, 3]]) + >>> np.min(a) # Minimum of the flattened array + 0 + >>> np.min(a, axis=0) # Minima along the first axis + array([0, 1]) + >>> np.min(a, axis=1) # Minima along the second axis + array([0, 2]) + >>> np.min(a, where=[False, True], initial=10, axis=0) + array([10, 1]) + + >>> b = np.arange(5, dtype=float) + >>> b[2] = np.nan + >>> np.min(b) + np.float64(nan) + >>> np.min(b, where=~np.isnan(b), initial=10) + 0.0 + >>> np.nanmin(b) + 0.0 + + >>> np.min([[-50], [10]], axis=-1, initial=0) + array([-50, 0]) + + Notice that the initial value is used as one of the elements for which the + minimum is determined, unlike for the default argument Python's max + function, which is only used for empty iterables. + + Notice that this isn't the same as Python's ``default`` argument. + + >>> np.min([6], initial=5) + 5 + >>> min([6], default=5) + 6 + """ + return _wrapreduction(a, np.minimum, 'min', axis, None, out, + keepdims=keepdims, initial=initial, where=where) + + +@array_function_dispatch(_min_dispatcher) +def amin(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, + where=np._NoValue): + """ + Return the minimum of an array or minimum along an axis. + + `amin` is an alias of `~numpy.min`. + + See Also + -------- + min : alias of this function + ndarray.min : equivalent method + """ + return _wrapreduction(a, np.minimum, 'min', axis, None, out, + keepdims=keepdims, initial=initial, where=where) + + +def _prod_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None, + initial=None, where=None): + return (a, out) + + +@array_function_dispatch(_prod_dispatcher) +def prod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, + initial=np._NoValue, where=np._NoValue): + """ + Return the product of array elements over a given axis. + + Parameters + ---------- + a : array_like + Input data. + axis : None or int or tuple of ints, optional + Axis or axes along which a product is performed. The default, + axis=None, will calculate the product of all the elements in the + input array. If axis is negative it counts from the last to the + first axis. + + If axis is a tuple of ints, a product is performed on all of the + axes specified in the tuple instead of a single axis or all the + axes as before. + dtype : dtype, optional + The type of the returned array, as well as of the accumulator in + which the elements are multiplied. The dtype of `a` is used by + default unless `a` has an integer dtype of less precision than the + default platform integer. In that case, if `a` is signed then the + platform integer is used while if `a` is unsigned then an unsigned + integer of the same precision as the platform integer is used. + out : ndarray, optional + Alternative output array in which to place the result. It must have + the same shape as the expected output, but the type of the output + values will be cast if necessary. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left in the + result as dimensions with size one. With this option, the result + will broadcast correctly against the input array. + + If the default value is passed, then `keepdims` will not be + passed through to the `prod` method of sub-classes of + `ndarray`, however any non-default value will be. If the + sub-class' method does not implement `keepdims` any + exceptions will be raised. + initial : scalar, optional + The starting value for this product. See `~numpy.ufunc.reduce` + for details. + where : array_like of bool, optional + Elements to include in the product. See `~numpy.ufunc.reduce` + for details. + + Returns + ------- + product_along_axis : ndarray, see `dtype` parameter above. + An array shaped as `a` but with the specified axis removed. + Returns a reference to `out` if specified. + + See Also + -------- + ndarray.prod : equivalent method + :ref:`ufuncs-output-type` + + Notes + ----- + Arithmetic is modular when using integer types, and no error is + raised on overflow. That means that, on a 32-bit platform: + + >>> x = np.array([536870910, 536870910, 536870910, 536870910]) + >>> np.prod(x) + 16 # may vary + + The product of an empty array is the neutral element 1: + + >>> np.prod([]) + 1.0 + + Examples + -------- + By default, calculate the product of all elements: + + >>> import numpy as np + >>> np.prod([1.,2.]) + 2.0 + + Even when the input array is two-dimensional: + + >>> a = np.array([[1., 2.], [3., 4.]]) + >>> np.prod(a) + 24.0 + + But we can also specify the axis over which to multiply: + + >>> np.prod(a, axis=1) + array([ 2., 12.]) + >>> np.prod(a, axis=0) + array([3., 8.]) + + Or select specific elements to include: + + >>> np.prod([1., np.nan, 3.], where=[True, False, True]) + 3.0 + + If the type of `x` is unsigned, then the output type is + the unsigned platform integer: + + >>> x = np.array([1, 2, 3], dtype=np.uint8) + >>> np.prod(x).dtype == np.uint + True + + If `x` is of a signed integer type, then the output type + is the default platform integer: + + >>> x = np.array([1, 2, 3], dtype=np.int8) + >>> np.prod(x).dtype == int + True + + You can also start the product with a value other than one: + + >>> np.prod([1, 2], initial=5) + 10 + """ + return _wrapreduction(a, np.multiply, 'prod', axis, dtype, out, + keepdims=keepdims, initial=initial, where=where) + + +def _cumprod_dispatcher(a, axis=None, dtype=None, out=None): + return (a, out) + + +@array_function_dispatch(_cumprod_dispatcher) +def cumprod(a, axis=None, dtype=None, out=None): + """ + Return the cumulative product of elements along a given axis. + + Parameters + ---------- + a : array_like + Input array. + axis : int, optional + Axis along which the cumulative product is computed. By default + the input is flattened. + dtype : dtype, optional + Type of the returned array, as well as of the accumulator in which + the elements are multiplied. If *dtype* is not specified, it + defaults to the dtype of `a`, unless `a` has an integer dtype with + a precision less than that of the default platform integer. In + that case, the default platform integer is used instead. + out : ndarray, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output + but the type of the resulting values will be cast if necessary. + + Returns + ------- + cumprod : ndarray + A new array holding the result is returned unless `out` is + specified, in which case a reference to out is returned. + + See Also + -------- + cumulative_prod : Array API compatible alternative for ``cumprod``. + :ref:`ufuncs-output-type` + + Notes + ----- + Arithmetic is modular when using integer types, and no error is + raised on overflow. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([1,2,3]) + >>> np.cumprod(a) # intermediate results 1, 1*2 + ... # total product 1*2*3 = 6 + array([1, 2, 6]) + >>> a = np.array([[1, 2, 3], [4, 5, 6]]) + >>> np.cumprod(a, dtype=float) # specify type of output + array([ 1., 2., 6., 24., 120., 720.]) + + The cumulative product for each column (i.e., over the rows) of `a`: + + >>> np.cumprod(a, axis=0) + array([[ 1, 2, 3], + [ 4, 10, 18]]) + + The cumulative product for each row (i.e. over the columns) of `a`: + + >>> np.cumprod(a,axis=1) + array([[ 1, 2, 6], + [ 4, 20, 120]]) + + """ + return _wrapfunc(a, 'cumprod', axis=axis, dtype=dtype, out=out) + + +def _ndim_dispatcher(a): + return (a,) + + +@array_function_dispatch(_ndim_dispatcher) +def ndim(a): + """ + Return the number of dimensions of an array. + + Parameters + ---------- + a : array_like + Input array. If it is not already an ndarray, a conversion is + attempted. + + Returns + ------- + number_of_dimensions : int + The number of dimensions in `a`. Scalars are zero-dimensional. + + See Also + -------- + ndarray.ndim : equivalent method + shape : dimensions of array + ndarray.shape : dimensions of array + + Examples + -------- + >>> import numpy as np + >>> np.ndim([[1,2,3],[4,5,6]]) + 2 + >>> np.ndim(np.array([[1,2,3],[4,5,6]])) + 2 + >>> np.ndim(1) + 0 + + """ + try: + return a.ndim + except AttributeError: + return asarray(a).ndim + + +def _size_dispatcher(a, axis=None): + return (a,) + + +@array_function_dispatch(_size_dispatcher) +def size(a, axis=None): + """ + Return the number of elements along a given axis. + + Parameters + ---------- + a : array_like + Input data. + axis : int, optional + Axis along which the elements are counted. By default, give + the total number of elements. + + Returns + ------- + element_count : int + Number of elements along the specified axis. + + See Also + -------- + shape : dimensions of array + ndarray.shape : dimensions of array + ndarray.size : number of elements in array + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[1,2,3],[4,5,6]]) + >>> np.size(a) + 6 + >>> np.size(a,1) + 3 + >>> np.size(a,0) + 2 + + """ + if axis is None: + try: + return a.size + except AttributeError: + return asarray(a).size + else: + try: + return a.shape[axis] + except AttributeError: + return asarray(a).shape[axis] + + +def _round_dispatcher(a, decimals=None, out=None): + return (a, out) + + +@array_function_dispatch(_round_dispatcher) +def round(a, decimals=0, out=None): + """ + Evenly round to the given number of decimals. + + Parameters + ---------- + a : array_like + Input data. + decimals : int, optional + Number of decimal places to round to (default: 0). If + decimals is negative, it specifies the number of positions to + the left of the decimal point. + out : ndarray, optional + Alternative output array in which to place the result. It must have + the same shape as the expected output, but the type of the output + values will be cast if necessary. See :ref:`ufuncs-output-type` + for more details. + + Returns + ------- + rounded_array : ndarray + An array of the same type as `a`, containing the rounded values. + Unless `out` was specified, a new array is created. A reference to + the result is returned. + + The real and imaginary parts of complex numbers are rounded + separately. The result of rounding a float is a float. + + See Also + -------- + ndarray.round : equivalent method + around : an alias for this function + ceil, fix, floor, rint, trunc + + + Notes + ----- + For values exactly halfway between rounded decimal values, NumPy + rounds to the nearest even value. Thus 1.5 and 2.5 round to 2.0, + -0.5 and 0.5 round to 0.0, etc. + + ``np.round`` uses a fast but sometimes inexact algorithm to round + floating-point datatypes. For positive `decimals` it is equivalent to + ``np.true_divide(np.rint(a * 10**decimals), 10**decimals)``, which has + error due to the inexact representation of decimal fractions in the IEEE + floating point standard [1]_ and errors introduced when scaling by powers + of ten. For instance, note the extra "1" in the following: + + >>> np.round(56294995342131.5, 3) + 56294995342131.51 + + If your goal is to print such values with a fixed number of decimals, it is + preferable to use numpy's float printing routines to limit the number of + printed decimals: + + >>> np.format_float_positional(56294995342131.5, precision=3) + '56294995342131.5' + + The float printing routines use an accurate but much more computationally + demanding algorithm to compute the number of digits after the decimal + point. + + Alternatively, Python's builtin `round` function uses a more accurate + but slower algorithm for 64-bit floating point values: + + >>> round(56294995342131.5, 3) + 56294995342131.5 + >>> np.round(16.055, 2), round(16.055, 2) # equals 16.0549999999999997 + (16.06, 16.05) + + + References + ---------- + .. [1] "Lecture Notes on the Status of IEEE 754", William Kahan, + https://people.eecs.berkeley.edu/~wkahan/ieee754status/IEEE754.PDF + + Examples + -------- + >>> import numpy as np + >>> np.round([0.37, 1.64]) + array([0., 2.]) + >>> np.round([0.37, 1.64], decimals=1) + array([0.4, 1.6]) + >>> np.round([.5, 1.5, 2.5, 3.5, 4.5]) # rounds to nearest even value + array([0., 2., 2., 4., 4.]) + >>> np.round([1,2,3,11], decimals=1) # ndarray of ints is returned + array([ 1, 2, 3, 11]) + >>> np.round([1,2,3,11], decimals=-1) + array([ 0, 0, 0, 10]) + + """ + return _wrapfunc(a, 'round', decimals=decimals, out=out) + + +@array_function_dispatch(_round_dispatcher) +def around(a, decimals=0, out=None): + """ + Round an array to the given number of decimals. + + `around` is an alias of `~numpy.round`. + + See Also + -------- + ndarray.round : equivalent method + round : alias for this function + ceil, fix, floor, rint, trunc + + """ + return _wrapfunc(a, 'round', decimals=decimals, out=out) + + +def _mean_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None, *, + where=None): + return (a, where, out) + + +@array_function_dispatch(_mean_dispatcher) +def mean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, *, + where=np._NoValue): + """ + Compute the arithmetic mean along the specified axis. + + Returns the average of the array elements. The average is taken over + the flattened array by default, otherwise over the specified axis. + `float64` intermediate and return values are used for integer inputs. + + Parameters + ---------- + a : array_like + Array containing numbers whose mean is desired. If `a` is not an + array, a conversion is attempted. + axis : None or int or tuple of ints, optional + Axis or axes along which the means are computed. The default is to + compute the mean of the flattened array. + + If this is a tuple of ints, a mean is performed over multiple axes, + instead of a single axis or all the axes as before. + dtype : data-type, optional + Type to use in computing the mean. For integer inputs, the default + is `float64`; for floating point inputs, it is the same as the + input dtype. + out : ndarray, optional + Alternate output array in which to place the result. The default + is ``None``; if provided, it must have the same shape as the + expected output, but the type will be cast if necessary. + See :ref:`ufuncs-output-type` for more details. + See :ref:`ufuncs-output-type` for more details. + + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the input array. + + If the default value is passed, then `keepdims` will not be + passed through to the `mean` method of sub-classes of + `ndarray`, however any non-default value will be. If the + sub-class' method does not implement `keepdims` any + exceptions will be raised. + + where : array_like of bool, optional + Elements to include in the mean. See `~numpy.ufunc.reduce` for details. + + .. versionadded:: 1.20.0 + + Returns + ------- + m : ndarray, see dtype parameter above + If `out=None`, returns a new array containing the mean values, + otherwise a reference to the output array is returned. + + See Also + -------- + average : Weighted average + std, var, nanmean, nanstd, nanvar + + Notes + ----- + The arithmetic mean is the sum of the elements along the axis divided + by the number of elements. + + Note that for floating-point input, the mean is computed using the + same precision the input has. Depending on the input data, this can + cause the results to be inaccurate, especially for `float32` (see + example below). Specifying a higher-precision accumulator using the + `dtype` keyword can alleviate this issue. + + By default, `float16` results are computed using `float32` intermediates + for extra precision. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[1, 2], [3, 4]]) + >>> np.mean(a) + 2.5 + >>> np.mean(a, axis=0) + array([2., 3.]) + >>> np.mean(a, axis=1) + array([1.5, 3.5]) + + In single precision, `mean` can be inaccurate: + + >>> a = np.zeros((2, 512*512), dtype=np.float32) + >>> a[0, :] = 1.0 + >>> a[1, :] = 0.1 + >>> np.mean(a) + np.float32(0.54999924) + + Computing the mean in float64 is more accurate: + + >>> np.mean(a, dtype=np.float64) + 0.55000000074505806 # may vary + + Computing the mean in timedelta64 is available: + + >>> b = np.array([1, 3], dtype="timedelta64[D]") + >>> np.mean(b) + np.timedelta64(2,'D') + + Specifying a where argument: + + >>> a = np.array([[5, 9, 13], [14, 10, 12], [11, 15, 19]]) + >>> np.mean(a) + 12.0 + >>> np.mean(a, where=[[True], [False], [False]]) + 9.0 + + """ + kwargs = {} + if keepdims is not np._NoValue: + kwargs['keepdims'] = keepdims + if where is not np._NoValue: + kwargs['where'] = where + if type(a) is not mu.ndarray: + try: + mean = a.mean + except AttributeError: + pass + else: + return mean(axis=axis, dtype=dtype, out=out, **kwargs) + + return _methods._mean(a, axis=axis, dtype=dtype, + out=out, **kwargs) + + +def _std_dispatcher(a, axis=None, dtype=None, out=None, ddof=None, + keepdims=None, *, where=None, mean=None, correction=None): + return (a, where, out, mean) + + +@array_function_dispatch(_std_dispatcher) +def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, *, + where=np._NoValue, mean=np._NoValue, correction=np._NoValue): + r""" + Compute the standard deviation along the specified axis. + + Returns the standard deviation, a measure of the spread of a distribution, + of the array elements. The standard deviation is computed for the + flattened array by default, otherwise over the specified axis. + + Parameters + ---------- + a : array_like + Calculate the standard deviation of these values. + axis : None or int or tuple of ints, optional + Axis or axes along which the standard deviation is computed. The + default is to compute the standard deviation of the flattened array. + If this is a tuple of ints, a standard deviation is performed over + multiple axes, instead of a single axis or all the axes as before. + dtype : dtype, optional + Type to use in computing the standard deviation. For arrays of + integer type the default is float64, for arrays of float types it is + the same as the array type. + out : ndarray, optional + Alternative output array in which to place the result. It must have + the same shape as the expected output but the type (of the calculated + values) will be cast if necessary. + See :ref:`ufuncs-output-type` for more details. + ddof : {int, float}, optional + Means Delta Degrees of Freedom. The divisor used in calculations + is ``N - ddof``, where ``N`` represents the number of elements. + By default `ddof` is zero. See Notes for details about use of `ddof`. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the input array. + + If the default value is passed, then `keepdims` will not be + passed through to the `std` method of sub-classes of + `ndarray`, however any non-default value will be. If the + sub-class' method does not implement `keepdims` any + exceptions will be raised. + where : array_like of bool, optional + Elements to include in the standard deviation. + See `~numpy.ufunc.reduce` for details. + + .. versionadded:: 1.20.0 + + mean : array_like, optional + Provide the mean to prevent its recalculation. The mean should have + a shape as if it was calculated with ``keepdims=True``. + The axis for the calculation of the mean should be the same as used in + the call to this std function. + + .. versionadded:: 2.0.0 + + correction : {int, float}, optional + Array API compatible name for the ``ddof`` parameter. Only one of them + can be provided at the same time. + + .. versionadded:: 2.0.0 + + Returns + ------- + standard_deviation : ndarray, see dtype parameter above. + If `out` is None, return a new array containing the standard deviation, + otherwise return a reference to the output array. + + See Also + -------- + var, mean, nanmean, nanstd, nanvar + :ref:`ufuncs-output-type` + + Notes + ----- + There are several common variants of the array standard deviation + calculation. Assuming the input `a` is a one-dimensional NumPy array + and ``mean`` is either provided as an argument or computed as + ``a.mean()``, NumPy computes the standard deviation of an array as:: + + N = len(a) + d2 = abs(a - mean)**2 # abs is for complex `a` + var = d2.sum() / (N - ddof) # note use of `ddof` + std = var**0.5 + + Different values of the argument `ddof` are useful in different + contexts. NumPy's default ``ddof=0`` corresponds with the expression: + + .. math:: + + \sqrt{\frac{\sum_i{|a_i - \bar{a}|^2 }}{N}} + + which is sometimes called the "population standard deviation" in the field + of statistics because it applies the definition of standard deviation to + `a` as if `a` were a complete population of possible observations. + + Many other libraries define the standard deviation of an array + differently, e.g.: + + .. math:: + + \sqrt{\frac{\sum_i{|a_i - \bar{a}|^2 }}{N - 1}} + + In statistics, the resulting quantity is sometimes called the "sample + standard deviation" because if `a` is a random sample from a larger + population, this calculation provides the square root of an unbiased + estimate of the variance of the population. The use of :math:`N-1` in the + denominator is often called "Bessel's correction" because it corrects for + bias (toward lower values) in the variance estimate introduced when the + sample mean of `a` is used in place of the true mean of the population. + The resulting estimate of the standard deviation is still biased, but less + than it would have been without the correction. For this quantity, use + ``ddof=1``. + + Note that, for complex numbers, `std` takes the absolute + value before squaring, so that the result is always real and nonnegative. + + For floating-point input, the standard deviation is computed using the same + precision the input has. Depending on the input data, this can cause + the results to be inaccurate, especially for float32 (see example below). + Specifying a higher-accuracy accumulator using the `dtype` keyword can + alleviate this issue. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[1, 2], [3, 4]]) + >>> np.std(a) + 1.1180339887498949 # may vary + >>> np.std(a, axis=0) + array([1., 1.]) + >>> np.std(a, axis=1) + array([0.5, 0.5]) + + In single precision, std() can be inaccurate: + + >>> a = np.zeros((2, 512*512), dtype=np.float32) + >>> a[0, :] = 1.0 + >>> a[1, :] = 0.1 + >>> np.std(a) + np.float32(0.45000005) + + Computing the standard deviation in float64 is more accurate: + + >>> np.std(a, dtype=np.float64) + 0.44999999925494177 # may vary + + Specifying a where argument: + + >>> a = np.array([[14, 8, 11, 10], [7, 9, 10, 11], [10, 15, 5, 10]]) + >>> np.std(a) + 2.614064523559687 # may vary + >>> np.std(a, where=[[True], [True], [False]]) + 2.0 + + Using the mean keyword to save computation time: + + >>> import numpy as np + >>> from timeit import timeit + >>> a = np.array([[14, 8, 11, 10], [7, 9, 10, 11], [10, 15, 5, 10]]) + >>> mean = np.mean(a, axis=1, keepdims=True) + >>> + >>> g = globals() + >>> n = 10000 + >>> t1 = timeit("std = np.std(a, axis=1, mean=mean)", globals=g, number=n) + >>> t2 = timeit("std = np.std(a, axis=1)", globals=g, number=n) + >>> print(f'Percentage execution time saved {100*(t2-t1)/t2:.0f}%') + #doctest: +SKIP + Percentage execution time saved 30% + + """ + kwargs = {} + if keepdims is not np._NoValue: + kwargs['keepdims'] = keepdims + if where is not np._NoValue: + kwargs['where'] = where + if mean is not np._NoValue: + kwargs['mean'] = mean + + if correction != np._NoValue: + if ddof != 0: + raise ValueError( + "ddof and correction can't be provided simultaneously." + ) + else: + ddof = correction + + if type(a) is not mu.ndarray: + try: + std = a.std + except AttributeError: + pass + else: + return std(axis=axis, dtype=dtype, out=out, ddof=ddof, **kwargs) + + return _methods._std(a, axis=axis, dtype=dtype, out=out, ddof=ddof, + **kwargs) + + +def _var_dispatcher(a, axis=None, dtype=None, out=None, ddof=None, + keepdims=None, *, where=None, mean=None, correction=None): + return (a, where, out, mean) + + +@array_function_dispatch(_var_dispatcher) +def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, *, + where=np._NoValue, mean=np._NoValue, correction=np._NoValue): + r""" + Compute the variance along the specified axis. + + Returns the variance of the array elements, a measure of the spread of a + distribution. The variance is computed for the flattened array by + default, otherwise over the specified axis. + + Parameters + ---------- + a : array_like + Array containing numbers whose variance is desired. If `a` is not an + array, a conversion is attempted. + axis : None or int or tuple of ints, optional + Axis or axes along which the variance is computed. The default is to + compute the variance of the flattened array. + If this is a tuple of ints, a variance is performed over multiple axes, + instead of a single axis or all the axes as before. + dtype : data-type, optional + Type to use in computing the variance. For arrays of integer type + the default is `float64`; for arrays of float types it is the same as + the array type. + out : ndarray, optional + Alternate output array in which to place the result. It must have + the same shape as the expected output, but the type is cast if + necessary. + ddof : {int, float}, optional + "Delta Degrees of Freedom": the divisor used in the calculation is + ``N - ddof``, where ``N`` represents the number of elements. By + default `ddof` is zero. See notes for details about use of `ddof`. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the input array. + + If the default value is passed, then `keepdims` will not be + passed through to the `var` method of sub-classes of + `ndarray`, however any non-default value will be. If the + sub-class' method does not implement `keepdims` any + exceptions will be raised. + where : array_like of bool, optional + Elements to include in the variance. See `~numpy.ufunc.reduce` for + details. + + .. versionadded:: 1.20.0 + + mean : array like, optional + Provide the mean to prevent its recalculation. The mean should have + a shape as if it was calculated with ``keepdims=True``. + The axis for the calculation of the mean should be the same as used in + the call to this var function. + + .. versionadded:: 2.0.0 + + correction : {int, float}, optional + Array API compatible name for the ``ddof`` parameter. Only one of them + can be provided at the same time. + + .. versionadded:: 2.0.0 + + Returns + ------- + variance : ndarray, see dtype parameter above + If ``out=None``, returns a new array containing the variance; + otherwise, a reference to the output array is returned. + + See Also + -------- + std, mean, nanmean, nanstd, nanvar + :ref:`ufuncs-output-type` + + Notes + ----- + There are several common variants of the array variance calculation. + Assuming the input `a` is a one-dimensional NumPy array and ``mean`` is + either provided as an argument or computed as ``a.mean()``, NumPy + computes the variance of an array as:: + + N = len(a) + d2 = abs(a - mean)**2 # abs is for complex `a` + var = d2.sum() / (N - ddof) # note use of `ddof` + + Different values of the argument `ddof` are useful in different + contexts. NumPy's default ``ddof=0`` corresponds with the expression: + + .. math:: + + \frac{\sum_i{|a_i - \bar{a}|^2 }}{N} + + which is sometimes called the "population variance" in the field of + statistics because it applies the definition of variance to `a` as if `a` + were a complete population of possible observations. + + Many other libraries define the variance of an array differently, e.g.: + + .. math:: + + \frac{\sum_i{|a_i - \bar{a}|^2}}{N - 1} + + In statistics, the resulting quantity is sometimes called the "sample + variance" because if `a` is a random sample from a larger population, + this calculation provides an unbiased estimate of the variance of the + population. The use of :math:`N-1` in the denominator is often called + "Bessel's correction" because it corrects for bias (toward lower values) + in the variance estimate introduced when the sample mean of `a` is used + in place of the true mean of the population. For this quantity, use + ``ddof=1``. + + Note that for complex numbers, the absolute value is taken before + squaring, so that the result is always real and nonnegative. + + For floating-point input, the variance is computed using the same + precision the input has. Depending on the input data, this can cause + the results to be inaccurate, especially for `float32` (see example + below). Specifying a higher-accuracy accumulator using the ``dtype`` + keyword can alleviate this issue. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[1, 2], [3, 4]]) + >>> np.var(a) + 1.25 + >>> np.var(a, axis=0) + array([1., 1.]) + >>> np.var(a, axis=1) + array([0.25, 0.25]) + + In single precision, var() can be inaccurate: + + >>> a = np.zeros((2, 512*512), dtype=np.float32) + >>> a[0, :] = 1.0 + >>> a[1, :] = 0.1 + >>> np.var(a) + np.float32(0.20250003) + + Computing the variance in float64 is more accurate: + + >>> np.var(a, dtype=np.float64) + 0.20249999932944759 # may vary + >>> ((1-0.55)**2 + (0.1-0.55)**2)/2 + 0.2025 + + Specifying a where argument: + + >>> a = np.array([[14, 8, 11, 10], [7, 9, 10, 11], [10, 15, 5, 10]]) + >>> np.var(a) + 6.833333333333333 # may vary + >>> np.var(a, where=[[True], [True], [False]]) + 4.0 + + Using the mean keyword to save computation time: + + >>> import numpy as np + >>> from timeit import timeit + >>> + >>> a = np.array([[14, 8, 11, 10], [7, 9, 10, 11], [10, 15, 5, 10]]) + >>> mean = np.mean(a, axis=1, keepdims=True) + >>> + >>> g = globals() + >>> n = 10000 + >>> t1 = timeit("var = np.var(a, axis=1, mean=mean)", globals=g, number=n) + >>> t2 = timeit("var = np.var(a, axis=1)", globals=g, number=n) + >>> print(f'Percentage execution time saved {100*(t2-t1)/t2:.0f}%') + #doctest: +SKIP + Percentage execution time saved 32% + + """ + kwargs = {} + if keepdims is not np._NoValue: + kwargs['keepdims'] = keepdims + if where is not np._NoValue: + kwargs['where'] = where + if mean is not np._NoValue: + kwargs['mean'] = mean + + if correction != np._NoValue: + if ddof != 0: + raise ValueError( + "ddof and correction can't be provided simultaneously." + ) + else: + ddof = correction + + if type(a) is not mu.ndarray: + try: + var = a.var + + except AttributeError: + pass + else: + return var(axis=axis, dtype=dtype, out=out, ddof=ddof, **kwargs) + + return _methods._var(a, axis=axis, dtype=dtype, out=out, ddof=ddof, + **kwargs) diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/fromnumeric.pyi b/.venv/lib/python3.12/site-packages/numpy/_core/fromnumeric.pyi new file mode 100644 index 0000000..f0f8309 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/fromnumeric.pyi @@ -0,0 +1,1750 @@ +# ruff: noqa: ANN401 +from collections.abc import Sequence +from typing import ( + Any, + Literal, + Never, + Protocol, + SupportsIndex, + TypeAlias, + TypeVar, + overload, + type_check_only, +) + +from _typeshed import Incomplete +from typing_extensions import deprecated + +import numpy as np +from numpy import ( + _AnyShapeT, + _CastingKind, + _ModeKind, + _OrderACF, + _OrderKACF, + _PartitionKind, + _SortKind, + _SortSide, + complexfloating, + float16, + floating, + generic, + int64, + int_, + intp, + object_, + timedelta64, + uint64, +) +from numpy._globals import _NoValueType +from numpy._typing import ( + ArrayLike, + DTypeLike, + NDArray, + _AnyShape, + _ArrayLike, + _ArrayLikeBool_co, + _ArrayLikeComplex_co, + _ArrayLikeFloat_co, + _ArrayLikeInt, + _ArrayLikeInt_co, + _ArrayLikeObject_co, + _ArrayLikeUInt_co, + _BoolLike_co, + _ComplexLike_co, + _DTypeLike, + _IntLike_co, + _NestedSequence, + _NumberLike_co, + _ScalarLike_co, + _ShapeLike, +) + +__all__ = [ + "all", + "amax", + "amin", + "any", + "argmax", + "argmin", + "argpartition", + "argsort", + "around", + "choose", + "clip", + "compress", + "cumprod", + "cumsum", + "cumulative_prod", + "cumulative_sum", + "diagonal", + "mean", + "max", + "min", + "matrix_transpose", + "ndim", + "nonzero", + "partition", + "prod", + "ptp", + "put", + "ravel", + "repeat", + "reshape", + "resize", + "round", + "searchsorted", + "shape", + "size", + "sort", + "squeeze", + "std", + "sum", + "swapaxes", + "take", + "trace", + "transpose", + "var", +] + +_ScalarT = TypeVar("_ScalarT", bound=generic) +_NumberOrObjectT = TypeVar("_NumberOrObjectT", bound=np.number | np.object_) +_ArrayT = TypeVar("_ArrayT", bound=np.ndarray[Any, Any]) +_ShapeT = TypeVar("_ShapeT", bound=tuple[int, ...]) +_ShapeT_co = TypeVar("_ShapeT_co", bound=tuple[int, ...], covariant=True) +_BoolOrIntArrayT = TypeVar("_BoolOrIntArrayT", bound=NDArray[np.integer | np.bool]) + +@type_check_only +class _SupportsShape(Protocol[_ShapeT_co]): + # NOTE: it matters that `self` is positional only + @property + def shape(self, /) -> _ShapeT_co: ... + +# a "sequence" that isn't a string, bytes, bytearray, or memoryview +_T = TypeVar("_T") +_PyArray: TypeAlias = list[_T] | tuple[_T, ...] +# `int` also covers `bool` +_PyScalar: TypeAlias = complex | bytes | str + +@overload +def take( + a: _ArrayLike[_ScalarT], + indices: _IntLike_co, + axis: None = ..., + out: None = ..., + mode: _ModeKind = ..., +) -> _ScalarT: ... +@overload +def take( + a: ArrayLike, + indices: _IntLike_co, + axis: SupportsIndex | None = ..., + out: None = ..., + mode: _ModeKind = ..., +) -> Any: ... +@overload +def take( + a: _ArrayLike[_ScalarT], + indices: _ArrayLikeInt_co, + axis: SupportsIndex | None = ..., + out: None = ..., + mode: _ModeKind = ..., +) -> NDArray[_ScalarT]: ... +@overload +def take( + a: ArrayLike, + indices: _ArrayLikeInt_co, + axis: SupportsIndex | None = ..., + out: None = ..., + mode: _ModeKind = ..., +) -> NDArray[Any]: ... +@overload +def take( + a: ArrayLike, + indices: _ArrayLikeInt_co, + axis: SupportsIndex | None, + out: _ArrayT, + mode: _ModeKind = ..., +) -> _ArrayT: ... +@overload +def take( + a: ArrayLike, + indices: _ArrayLikeInt_co, + axis: SupportsIndex | None = ..., + *, + out: _ArrayT, + mode: _ModeKind = ..., +) -> _ArrayT: ... + +@overload +def reshape( # shape: index + a: _ArrayLike[_ScalarT], + /, + shape: SupportsIndex, + order: _OrderACF = "C", + *, + copy: bool | None = None, +) -> np.ndarray[tuple[int], np.dtype[_ScalarT]]: ... +@overload +def reshape( # shape: (int, ...) @ _AnyShapeT + a: _ArrayLike[_ScalarT], + /, + shape: _AnyShapeT, + order: _OrderACF = "C", + *, + copy: bool | None = None, +) -> np.ndarray[_AnyShapeT, np.dtype[_ScalarT]]: ... +@overload # shape: Sequence[index] +def reshape( + a: _ArrayLike[_ScalarT], + /, + shape: Sequence[SupportsIndex], + order: _OrderACF = "C", + *, + copy: bool | None = None, +) -> NDArray[_ScalarT]: ... +@overload # shape: index +def reshape( + a: ArrayLike, + /, + shape: SupportsIndex, + order: _OrderACF = "C", + *, + copy: bool | None = None, +) -> np.ndarray[tuple[int], np.dtype]: ... +@overload +def reshape( # shape: (int, ...) @ _AnyShapeT + a: ArrayLike, + /, + shape: _AnyShapeT, + order: _OrderACF = "C", + *, + copy: bool | None = None, +) -> np.ndarray[_AnyShapeT, np.dtype]: ... +@overload # shape: Sequence[index] +def reshape( + a: ArrayLike, + /, + shape: Sequence[SupportsIndex], + order: _OrderACF = "C", + *, + copy: bool | None = None, +) -> NDArray[Any]: ... +@overload +@deprecated( + "`newshape` keyword argument is deprecated, " + "use `shape=...` or pass shape positionally instead. " + "(deprecated in NumPy 2.1)", +) +def reshape( + a: ArrayLike, + /, + shape: None = None, + order: _OrderACF = "C", + *, + newshape: _ShapeLike, + copy: bool | None = None, +) -> NDArray[Any]: ... + +@overload +def choose( + a: _IntLike_co, + choices: ArrayLike, + out: None = ..., + mode: _ModeKind = ..., +) -> Any: ... +@overload +def choose( + a: _ArrayLikeInt_co, + choices: _ArrayLike[_ScalarT], + out: None = ..., + mode: _ModeKind = ..., +) -> NDArray[_ScalarT]: ... +@overload +def choose( + a: _ArrayLikeInt_co, + choices: ArrayLike, + out: None = ..., + mode: _ModeKind = ..., +) -> NDArray[Any]: ... +@overload +def choose( + a: _ArrayLikeInt_co, + choices: ArrayLike, + out: _ArrayT, + mode: _ModeKind = ..., +) -> _ArrayT: ... + +@overload +def repeat( + a: _ArrayLike[_ScalarT], + repeats: _ArrayLikeInt_co, + axis: None = None, +) -> np.ndarray[tuple[int], np.dtype[_ScalarT]]: ... +@overload +def repeat( + a: _ArrayLike[_ScalarT], + repeats: _ArrayLikeInt_co, + axis: SupportsIndex, +) -> NDArray[_ScalarT]: ... +@overload +def repeat( + a: ArrayLike, + repeats: _ArrayLikeInt_co, + axis: None = None, +) -> np.ndarray[tuple[int], np.dtype[Any]]: ... +@overload +def repeat( + a: ArrayLike, + repeats: _ArrayLikeInt_co, + axis: SupportsIndex, +) -> NDArray[Any]: ... + +def put( + a: NDArray[Any], + ind: _ArrayLikeInt_co, + v: ArrayLike, + mode: _ModeKind = ..., +) -> None: ... + +@overload +def swapaxes( + a: _ArrayLike[_ScalarT], + axis1: SupportsIndex, + axis2: SupportsIndex, +) -> NDArray[_ScalarT]: ... +@overload +def swapaxes( + a: ArrayLike, + axis1: SupportsIndex, + axis2: SupportsIndex, +) -> NDArray[Any]: ... + +@overload +def transpose( + a: _ArrayLike[_ScalarT], + axes: _ShapeLike | None = ... +) -> NDArray[_ScalarT]: ... +@overload +def transpose( + a: ArrayLike, + axes: _ShapeLike | None = ... +) -> NDArray[Any]: ... + +@overload +def matrix_transpose(x: _ArrayLike[_ScalarT], /) -> NDArray[_ScalarT]: ... +@overload +def matrix_transpose(x: ArrayLike, /) -> NDArray[Any]: ... + +# +@overload +def partition( + a: _ArrayLike[_ScalarT], + kth: _ArrayLikeInt, + axis: SupportsIndex | None = -1, + kind: _PartitionKind = "introselect", + order: None = None, +) -> NDArray[_ScalarT]: ... +@overload +def partition( + a: _ArrayLike[np.void], + kth: _ArrayLikeInt, + axis: SupportsIndex | None = -1, + kind: _PartitionKind = "introselect", + order: str | Sequence[str] | None = None, +) -> NDArray[np.void]: ... +@overload +def partition( + a: ArrayLike, + kth: _ArrayLikeInt, + axis: SupportsIndex | None = -1, + kind: _PartitionKind = "introselect", + order: str | Sequence[str] | None = None, +) -> NDArray[Any]: ... + +# +def argpartition( + a: ArrayLike, + kth: _ArrayLikeInt, + axis: SupportsIndex | None = -1, + kind: _PartitionKind = "introselect", + order: str | Sequence[str] | None = None, +) -> NDArray[intp]: ... + +# +@overload +def sort( + a: _ArrayLike[_ScalarT], + axis: SupportsIndex | None = ..., + kind: _SortKind | None = ..., + order: str | Sequence[str] | None = ..., + *, + stable: bool | None = ..., +) -> NDArray[_ScalarT]: ... +@overload +def sort( + a: ArrayLike, + axis: SupportsIndex | None = ..., + kind: _SortKind | None = ..., + order: str | Sequence[str] | None = ..., + *, + stable: bool | None = ..., +) -> NDArray[Any]: ... + +def argsort( + a: ArrayLike, + axis: SupportsIndex | None = ..., + kind: _SortKind | None = ..., + order: str | Sequence[str] | None = ..., + *, + stable: bool | None = ..., +) -> NDArray[intp]: ... + +@overload +def argmax( + a: ArrayLike, + axis: None = ..., + out: None = ..., + *, + keepdims: Literal[False] = ..., +) -> intp: ... +@overload +def argmax( + a: ArrayLike, + axis: SupportsIndex | None = ..., + out: None = ..., + *, + keepdims: bool = ..., +) -> Any: ... +@overload +def argmax( + a: ArrayLike, + axis: SupportsIndex | None, + out: _BoolOrIntArrayT, + *, + keepdims: bool = ..., +) -> _BoolOrIntArrayT: ... +@overload +def argmax( + a: ArrayLike, + axis: SupportsIndex | None = ..., + *, + out: _BoolOrIntArrayT, + keepdims: bool = ..., +) -> _BoolOrIntArrayT: ... + +@overload +def argmin( + a: ArrayLike, + axis: None = ..., + out: None = ..., + *, + keepdims: Literal[False] = ..., +) -> intp: ... +@overload +def argmin( + a: ArrayLike, + axis: SupportsIndex | None = ..., + out: None = ..., + *, + keepdims: bool = ..., +) -> Any: ... +@overload +def argmin( + a: ArrayLike, + axis: SupportsIndex | None, + out: _BoolOrIntArrayT, + *, + keepdims: bool = ..., +) -> _BoolOrIntArrayT: ... +@overload +def argmin( + a: ArrayLike, + axis: SupportsIndex | None = ..., + *, + out: _BoolOrIntArrayT, + keepdims: bool = ..., +) -> _BoolOrIntArrayT: ... + +@overload +def searchsorted( + a: ArrayLike, + v: _ScalarLike_co, + side: _SortSide = ..., + sorter: _ArrayLikeInt_co | None = ..., # 1D int array +) -> intp: ... +@overload +def searchsorted( + a: ArrayLike, + v: ArrayLike, + side: _SortSide = ..., + sorter: _ArrayLikeInt_co | None = ..., # 1D int array +) -> NDArray[intp]: ... + +# +@overload +def resize(a: _ArrayLike[_ScalarT], new_shape: SupportsIndex | tuple[SupportsIndex]) -> np.ndarray[tuple[int], np.dtype[_ScalarT]]: ... +@overload +def resize(a: _ArrayLike[_ScalarT], new_shape: _AnyShapeT) -> np.ndarray[_AnyShapeT, np.dtype[_ScalarT]]: ... +@overload +def resize(a: _ArrayLike[_ScalarT], new_shape: _ShapeLike) -> NDArray[_ScalarT]: ... +@overload +def resize(a: ArrayLike, new_shape: SupportsIndex | tuple[SupportsIndex]) -> np.ndarray[tuple[int], np.dtype]: ... +@overload +def resize(a: ArrayLike, new_shape: _AnyShapeT) -> np.ndarray[_AnyShapeT, np.dtype]: ... +@overload +def resize(a: ArrayLike, new_shape: _ShapeLike) -> NDArray[Any]: ... + +@overload +def squeeze( + a: _ScalarT, + axis: _ShapeLike | None = ..., +) -> _ScalarT: ... +@overload +def squeeze( + a: _ArrayLike[_ScalarT], + axis: _ShapeLike | None = ..., +) -> NDArray[_ScalarT]: ... +@overload +def squeeze( + a: ArrayLike, + axis: _ShapeLike | None = ..., +) -> NDArray[Any]: ... + +@overload +def diagonal( + a: _ArrayLike[_ScalarT], + offset: SupportsIndex = ..., + axis1: SupportsIndex = ..., + axis2: SupportsIndex = ..., # >= 2D array +) -> NDArray[_ScalarT]: ... +@overload +def diagonal( + a: ArrayLike, + offset: SupportsIndex = ..., + axis1: SupportsIndex = ..., + axis2: SupportsIndex = ..., # >= 2D array +) -> NDArray[Any]: ... + +@overload +def trace( + a: ArrayLike, # >= 2D array + offset: SupportsIndex = ..., + axis1: SupportsIndex = ..., + axis2: SupportsIndex = ..., + dtype: DTypeLike = ..., + out: None = ..., +) -> Any: ... +@overload +def trace( + a: ArrayLike, # >= 2D array + offset: SupportsIndex, + axis1: SupportsIndex, + axis2: SupportsIndex, + dtype: DTypeLike, + out: _ArrayT, +) -> _ArrayT: ... +@overload +def trace( + a: ArrayLike, # >= 2D array + offset: SupportsIndex = ..., + axis1: SupportsIndex = ..., + axis2: SupportsIndex = ..., + dtype: DTypeLike = ..., + *, + out: _ArrayT, +) -> _ArrayT: ... + +_Array1D: TypeAlias = np.ndarray[tuple[int], np.dtype[_ScalarT]] + +@overload +def ravel(a: _ArrayLike[_ScalarT], order: _OrderKACF = "C") -> _Array1D[_ScalarT]: ... +@overload +def ravel(a: bytes | _NestedSequence[bytes], order: _OrderKACF = "C") -> _Array1D[np.bytes_]: ... +@overload +def ravel(a: str | _NestedSequence[str], order: _OrderKACF = "C") -> _Array1D[np.str_]: ... +@overload +def ravel(a: bool | _NestedSequence[bool], order: _OrderKACF = "C") -> _Array1D[np.bool]: ... +@overload +def ravel(a: int | _NestedSequence[int], order: _OrderKACF = "C") -> _Array1D[np.int_ | np.bool]: ... +@overload +def ravel(a: float | _NestedSequence[float], order: _OrderKACF = "C") -> _Array1D[np.float64 | np.int_ | np.bool]: ... +@overload +def ravel( + a: complex | _NestedSequence[complex], + order: _OrderKACF = "C", +) -> _Array1D[np.complex128 | np.float64 | np.int_ | np.bool]: ... +@overload +def ravel(a: ArrayLike, order: _OrderKACF = "C") -> np.ndarray[tuple[int], np.dtype]: ... + +def nonzero(a: _ArrayLike[Any]) -> tuple[NDArray[intp], ...]: ... + +# this prevents `Any` from being returned with Pyright +@overload +def shape(a: _SupportsShape[Never]) -> _AnyShape: ... +@overload +def shape(a: _SupportsShape[_ShapeT]) -> _ShapeT: ... +@overload +def shape(a: _PyScalar) -> tuple[()]: ... +# `collections.abc.Sequence` can't be used hesre, since `bytes` and `str` are +# subtypes of it, which would make the return types incompatible. +@overload +def shape(a: _PyArray[_PyScalar]) -> tuple[int]: ... +@overload +def shape(a: _PyArray[_PyArray[_PyScalar]]) -> tuple[int, int]: ... +# this overload will be skipped by typecheckers that don't support PEP 688 +@overload +def shape(a: memoryview | bytearray) -> tuple[int]: ... +@overload +def shape(a: ArrayLike) -> _AnyShape: ... + +@overload +def compress( + condition: _ArrayLikeBool_co, # 1D bool array + a: _ArrayLike[_ScalarT], + axis: SupportsIndex | None = ..., + out: None = ..., +) -> NDArray[_ScalarT]: ... +@overload +def compress( + condition: _ArrayLikeBool_co, # 1D bool array + a: ArrayLike, + axis: SupportsIndex | None = ..., + out: None = ..., +) -> NDArray[Any]: ... +@overload +def compress( + condition: _ArrayLikeBool_co, # 1D bool array + a: ArrayLike, + axis: SupportsIndex | None, + out: _ArrayT, +) -> _ArrayT: ... +@overload +def compress( + condition: _ArrayLikeBool_co, # 1D bool array + a: ArrayLike, + axis: SupportsIndex | None = ..., + *, + out: _ArrayT, +) -> _ArrayT: ... + +@overload +def clip( + a: _ScalarT, + a_min: ArrayLike | None, + a_max: ArrayLike | None, + out: None = ..., + *, + min: ArrayLike | None = ..., + max: ArrayLike | None = ..., + dtype: None = ..., + where: _ArrayLikeBool_co | None = ..., + order: _OrderKACF = ..., + subok: bool = ..., + signature: str | tuple[str | None, ...] = ..., + casting: _CastingKind = ..., +) -> _ScalarT: ... +@overload +def clip( + a: _ScalarLike_co, + a_min: ArrayLike | None, + a_max: ArrayLike | None, + out: None = ..., + *, + min: ArrayLike | None = ..., + max: ArrayLike | None = ..., + dtype: None = ..., + where: _ArrayLikeBool_co | None = ..., + order: _OrderKACF = ..., + subok: bool = ..., + signature: str | tuple[str | None, ...] = ..., + casting: _CastingKind = ..., +) -> Any: ... +@overload +def clip( + a: _ArrayLike[_ScalarT], + a_min: ArrayLike | None, + a_max: ArrayLike | None, + out: None = ..., + *, + min: ArrayLike | None = ..., + max: ArrayLike | None = ..., + dtype: None = ..., + where: _ArrayLikeBool_co | None = ..., + order: _OrderKACF = ..., + subok: bool = ..., + signature: str | tuple[str | None, ...] = ..., + casting: _CastingKind = ..., +) -> NDArray[_ScalarT]: ... +@overload +def clip( + a: ArrayLike, + a_min: ArrayLike | None, + a_max: ArrayLike | None, + out: None = ..., + *, + min: ArrayLike | None = ..., + max: ArrayLike | None = ..., + dtype: None = ..., + where: _ArrayLikeBool_co | None = ..., + order: _OrderKACF = ..., + subok: bool = ..., + signature: str | tuple[str | None, ...] = ..., + casting: _CastingKind = ..., +) -> NDArray[Any]: ... +@overload +def clip( + a: ArrayLike, + a_min: ArrayLike | None, + a_max: ArrayLike | None, + out: _ArrayT, + *, + min: ArrayLike | None = ..., + max: ArrayLike | None = ..., + dtype: DTypeLike = ..., + where: _ArrayLikeBool_co | None = ..., + order: _OrderKACF = ..., + subok: bool = ..., + signature: str | tuple[str | None, ...] = ..., + casting: _CastingKind = ..., +) -> _ArrayT: ... +@overload +def clip( + a: ArrayLike, + a_min: ArrayLike | None, + a_max: ArrayLike | None, + out: ArrayLike = ..., + *, + min: ArrayLike | None = ..., + max: ArrayLike | None = ..., + dtype: DTypeLike, + where: _ArrayLikeBool_co | None = ..., + order: _OrderKACF = ..., + subok: bool = ..., + signature: str | tuple[str | None, ...] = ..., + casting: _CastingKind = ..., +) -> Any: ... + +@overload +def sum( + a: _ArrayLike[_ScalarT], + axis: None = ..., + dtype: None = ..., + out: None = ..., + keepdims: Literal[False] = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., +) -> _ScalarT: ... +@overload +def sum( + a: _ArrayLike[_ScalarT], + axis: None = ..., + dtype: None = ..., + out: None = ..., + keepdims: bool = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., +) -> _ScalarT | NDArray[_ScalarT]: ... +@overload +def sum( + a: ArrayLike, + axis: None, + dtype: _DTypeLike[_ScalarT], + out: None = ..., + keepdims: Literal[False] = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., +) -> _ScalarT: ... +@overload +def sum( + a: ArrayLike, + axis: None = ..., + *, + dtype: _DTypeLike[_ScalarT], + out: None = ..., + keepdims: Literal[False] = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., +) -> _ScalarT: ... +@overload +def sum( + a: ArrayLike, + axis: _ShapeLike | None, + dtype: _DTypeLike[_ScalarT], + out: None = ..., + keepdims: bool = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., +) -> _ScalarT | NDArray[_ScalarT]: ... +@overload +def sum( + a: ArrayLike, + axis: _ShapeLike | None = ..., + *, + dtype: _DTypeLike[_ScalarT], + out: None = ..., + keepdims: bool = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., +) -> _ScalarT | NDArray[_ScalarT]: ... +@overload +def sum( + a: ArrayLike, + axis: _ShapeLike | None = ..., + dtype: DTypeLike = ..., + out: None = ..., + keepdims: bool = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., +) -> Any: ... +@overload +def sum( + a: ArrayLike, + axis: _ShapeLike | None, + dtype: DTypeLike, + out: _ArrayT, + keepdims: bool = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., +) -> _ArrayT: ... +@overload +def sum( + a: ArrayLike, + axis: _ShapeLike | None = ..., + dtype: DTypeLike = ..., + *, + out: _ArrayT, + keepdims: bool = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., +) -> _ArrayT: ... + +# keep in sync with `any` +@overload +def all( + a: ArrayLike | None, + axis: None = None, + out: None = None, + keepdims: Literal[False, 0] | _NoValueType = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> np.bool: ... +@overload +def all( + a: ArrayLike | None, + axis: int | tuple[int, ...] | None = None, + out: None = None, + keepdims: _BoolLike_co | _NoValueType = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> Incomplete: ... +@overload +def all( + a: ArrayLike | None, + axis: int | tuple[int, ...] | None, + out: _ArrayT, + keepdims: _BoolLike_co | _NoValueType = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> _ArrayT: ... +@overload +def all( + a: ArrayLike | None, + axis: int | tuple[int, ...] | None = None, + *, + out: _ArrayT, + keepdims: _BoolLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> _ArrayT: ... + +# keep in sync with `all` +@overload +def any( + a: ArrayLike | None, + axis: None = None, + out: None = None, + keepdims: Literal[False, 0] | _NoValueType = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> np.bool: ... +@overload +def any( + a: ArrayLike | None, + axis: int | tuple[int, ...] | None = None, + out: None = None, + keepdims: _BoolLike_co | _NoValueType = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> Incomplete: ... +@overload +def any( + a: ArrayLike | None, + axis: int | tuple[int, ...] | None, + out: _ArrayT, + keepdims: _BoolLike_co | _NoValueType = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> _ArrayT: ... +@overload +def any( + a: ArrayLike | None, + axis: int | tuple[int, ...] | None = None, + *, + out: _ArrayT, + keepdims: _BoolLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> _ArrayT: ... + +# +@overload +def cumsum( + a: _ArrayLike[_ScalarT], + axis: SupportsIndex | None = ..., + dtype: None = ..., + out: None = ..., +) -> NDArray[_ScalarT]: ... +@overload +def cumsum( + a: ArrayLike, + axis: SupportsIndex | None = ..., + dtype: None = ..., + out: None = ..., +) -> NDArray[Any]: ... +@overload +def cumsum( + a: ArrayLike, + axis: SupportsIndex | None, + dtype: _DTypeLike[_ScalarT], + out: None = ..., +) -> NDArray[_ScalarT]: ... +@overload +def cumsum( + a: ArrayLike, + axis: SupportsIndex | None = ..., + *, + dtype: _DTypeLike[_ScalarT], + out: None = ..., +) -> NDArray[_ScalarT]: ... +@overload +def cumsum( + a: ArrayLike, + axis: SupportsIndex | None = ..., + dtype: DTypeLike = ..., + out: None = ..., +) -> NDArray[Any]: ... +@overload +def cumsum( + a: ArrayLike, + axis: SupportsIndex | None, + dtype: DTypeLike, + out: _ArrayT, +) -> _ArrayT: ... +@overload +def cumsum( + a: ArrayLike, + axis: SupportsIndex | None = ..., + dtype: DTypeLike = ..., + *, + out: _ArrayT, +) -> _ArrayT: ... + +@overload +def cumulative_sum( + x: _ArrayLike[_ScalarT], + /, + *, + axis: SupportsIndex | None = ..., + dtype: None = ..., + out: None = ..., + include_initial: bool = ..., +) -> NDArray[_ScalarT]: ... +@overload +def cumulative_sum( + x: ArrayLike, + /, + *, + axis: SupportsIndex | None = ..., + dtype: None = ..., + out: None = ..., + include_initial: bool = ..., +) -> NDArray[Any]: ... +@overload +def cumulative_sum( + x: ArrayLike, + /, + *, + axis: SupportsIndex | None = ..., + dtype: _DTypeLike[_ScalarT], + out: None = ..., + include_initial: bool = ..., +) -> NDArray[_ScalarT]: ... +@overload +def cumulative_sum( + x: ArrayLike, + /, + *, + axis: SupportsIndex | None = ..., + dtype: DTypeLike = ..., + out: None = ..., + include_initial: bool = ..., +) -> NDArray[Any]: ... +@overload +def cumulative_sum( + x: ArrayLike, + /, + *, + axis: SupportsIndex | None = ..., + dtype: DTypeLike = ..., + out: _ArrayT, + include_initial: bool = ..., +) -> _ArrayT: ... + +@overload +def ptp( + a: _ArrayLike[_ScalarT], + axis: None = ..., + out: None = ..., + keepdims: Literal[False] = ..., +) -> _ScalarT: ... +@overload +def ptp( + a: ArrayLike, + axis: _ShapeLike | None = ..., + out: None = ..., + keepdims: bool = ..., +) -> Any: ... +@overload +def ptp( + a: ArrayLike, + axis: _ShapeLike | None, + out: _ArrayT, + keepdims: bool = ..., +) -> _ArrayT: ... +@overload +def ptp( + a: ArrayLike, + axis: _ShapeLike | None = ..., + *, + out: _ArrayT, + keepdims: bool = ..., +) -> _ArrayT: ... + +@overload +def amax( + a: _ArrayLike[_ScalarT], + axis: None = ..., + out: None = ..., + keepdims: Literal[False] = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., +) -> _ScalarT: ... +@overload +def amax( + a: ArrayLike, + axis: _ShapeLike | None = ..., + out: None = ..., + keepdims: bool = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., +) -> Any: ... +@overload +def amax( + a: ArrayLike, + axis: _ShapeLike | None, + out: _ArrayT, + keepdims: bool = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., +) -> _ArrayT: ... +@overload +def amax( + a: ArrayLike, + axis: _ShapeLike | None = ..., + *, + out: _ArrayT, + keepdims: bool = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., +) -> _ArrayT: ... + +@overload +def amin( + a: _ArrayLike[_ScalarT], + axis: None = ..., + out: None = ..., + keepdims: Literal[False] = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., +) -> _ScalarT: ... +@overload +def amin( + a: ArrayLike, + axis: _ShapeLike | None = ..., + out: None = ..., + keepdims: bool = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., +) -> Any: ... +@overload +def amin( + a: ArrayLike, + axis: _ShapeLike | None, + out: _ArrayT, + keepdims: bool = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., +) -> _ArrayT: ... +@overload +def amin( + a: ArrayLike, + axis: _ShapeLike | None = ..., + *, + out: _ArrayT, + keepdims: bool = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., +) -> _ArrayT: ... + +# TODO: `np.prod()``: For object arrays `initial` does not necessarily +# have to be a numerical scalar. +# The only requirement is that it is compatible +# with the `.__mul__()` method(s) of the passed array's elements. + +# Note that the same situation holds for all wrappers around +# `np.ufunc.reduce`, e.g. `np.sum()` (`.__add__()`). +@overload +def prod( + a: _ArrayLikeBool_co, + axis: None = ..., + dtype: None = ..., + out: None = ..., + keepdims: Literal[False] = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., +) -> int_: ... +@overload +def prod( + a: _ArrayLikeUInt_co, + axis: None = ..., + dtype: None = ..., + out: None = ..., + keepdims: Literal[False] = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., +) -> uint64: ... +@overload +def prod( + a: _ArrayLikeInt_co, + axis: None = ..., + dtype: None = ..., + out: None = ..., + keepdims: Literal[False] = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., +) -> int64: ... +@overload +def prod( + a: _ArrayLikeFloat_co, + axis: None = ..., + dtype: None = ..., + out: None = ..., + keepdims: Literal[False] = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., +) -> floating: ... +@overload +def prod( + a: _ArrayLikeComplex_co, + axis: None = ..., + dtype: None = ..., + out: None = ..., + keepdims: Literal[False] = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., +) -> complexfloating: ... +@overload +def prod( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: _ShapeLike | None = ..., + dtype: None = ..., + out: None = ..., + keepdims: bool = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., +) -> Any: ... +@overload +def prod( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: None, + dtype: _DTypeLike[_ScalarT], + out: None = ..., + keepdims: Literal[False] = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., +) -> _ScalarT: ... +@overload +def prod( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: None = ..., + *, + dtype: _DTypeLike[_ScalarT], + out: None = ..., + keepdims: Literal[False] = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., +) -> _ScalarT: ... +@overload +def prod( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: _ShapeLike | None = ..., + dtype: DTypeLike | None = ..., + out: None = ..., + keepdims: bool = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., +) -> Any: ... +@overload +def prod( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: _ArrayT, + keepdims: bool = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., +) -> _ArrayT: ... +@overload +def prod( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: _ShapeLike | None = ..., + dtype: DTypeLike | None = ..., + *, + out: _ArrayT, + keepdims: bool = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., +) -> _ArrayT: ... + +@overload +def cumprod( + a: _ArrayLikeBool_co, + axis: SupportsIndex | None = ..., + dtype: None = ..., + out: None = ..., +) -> NDArray[int_]: ... +@overload +def cumprod( + a: _ArrayLikeUInt_co, + axis: SupportsIndex | None = ..., + dtype: None = ..., + out: None = ..., +) -> NDArray[uint64]: ... +@overload +def cumprod( + a: _ArrayLikeInt_co, + axis: SupportsIndex | None = ..., + dtype: None = ..., + out: None = ..., +) -> NDArray[int64]: ... +@overload +def cumprod( + a: _ArrayLikeFloat_co, + axis: SupportsIndex | None = ..., + dtype: None = ..., + out: None = ..., +) -> NDArray[floating]: ... +@overload +def cumprod( + a: _ArrayLikeComplex_co, + axis: SupportsIndex | None = ..., + dtype: None = ..., + out: None = ..., +) -> NDArray[complexfloating]: ... +@overload +def cumprod( + a: _ArrayLikeObject_co, + axis: SupportsIndex | None = ..., + dtype: None = ..., + out: None = ..., +) -> NDArray[object_]: ... +@overload +def cumprod( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: SupportsIndex | None, + dtype: _DTypeLike[_ScalarT], + out: None = ..., +) -> NDArray[_ScalarT]: ... +@overload +def cumprod( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: SupportsIndex | None = ..., + *, + dtype: _DTypeLike[_ScalarT], + out: None = ..., +) -> NDArray[_ScalarT]: ... +@overload +def cumprod( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: SupportsIndex | None = ..., + dtype: DTypeLike = ..., + out: None = ..., +) -> NDArray[Any]: ... +@overload +def cumprod( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: SupportsIndex | None, + dtype: DTypeLike, + out: _ArrayT, +) -> _ArrayT: ... +@overload +def cumprod( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: SupportsIndex | None = ..., + dtype: DTypeLike = ..., + *, + out: _ArrayT, +) -> _ArrayT: ... + +@overload +def cumulative_prod( + x: _ArrayLikeBool_co, + /, + *, + axis: SupportsIndex | None = ..., + dtype: None = ..., + out: None = ..., + include_initial: bool = ..., +) -> NDArray[int_]: ... +@overload +def cumulative_prod( + x: _ArrayLikeUInt_co, + /, + *, + axis: SupportsIndex | None = ..., + dtype: None = ..., + out: None = ..., + include_initial: bool = ..., +) -> NDArray[uint64]: ... +@overload +def cumulative_prod( + x: _ArrayLikeInt_co, + /, + *, + axis: SupportsIndex | None = ..., + dtype: None = ..., + out: None = ..., + include_initial: bool = ..., +) -> NDArray[int64]: ... +@overload +def cumulative_prod( + x: _ArrayLikeFloat_co, + /, + *, + axis: SupportsIndex | None = ..., + dtype: None = ..., + out: None = ..., + include_initial: bool = ..., +) -> NDArray[floating]: ... +@overload +def cumulative_prod( + x: _ArrayLikeComplex_co, + /, + *, + axis: SupportsIndex | None = ..., + dtype: None = ..., + out: None = ..., + include_initial: bool = ..., +) -> NDArray[complexfloating]: ... +@overload +def cumulative_prod( + x: _ArrayLikeObject_co, + /, + *, + axis: SupportsIndex | None = ..., + dtype: None = ..., + out: None = ..., + include_initial: bool = ..., +) -> NDArray[object_]: ... +@overload +def cumulative_prod( + x: _ArrayLikeComplex_co | _ArrayLikeObject_co, + /, + *, + axis: SupportsIndex | None = ..., + dtype: _DTypeLike[_ScalarT], + out: None = ..., + include_initial: bool = ..., +) -> NDArray[_ScalarT]: ... +@overload +def cumulative_prod( + x: _ArrayLikeComplex_co | _ArrayLikeObject_co, + /, + *, + axis: SupportsIndex | None = ..., + dtype: DTypeLike = ..., + out: None = ..., + include_initial: bool = ..., +) -> NDArray[Any]: ... +@overload +def cumulative_prod( + x: _ArrayLikeComplex_co | _ArrayLikeObject_co, + /, + *, + axis: SupportsIndex | None = ..., + dtype: DTypeLike = ..., + out: _ArrayT, + include_initial: bool = ..., +) -> _ArrayT: ... + +def ndim(a: ArrayLike) -> int: ... + +def size(a: ArrayLike, axis: int | None = ...) -> int: ... + +@overload +def around( + a: _BoolLike_co, + decimals: SupportsIndex = ..., + out: None = ..., +) -> float16: ... +@overload +def around( + a: _NumberOrObjectT, + decimals: SupportsIndex = ..., + out: None = ..., +) -> _NumberOrObjectT: ... +@overload +def around( + a: _ComplexLike_co | object_, + decimals: SupportsIndex = ..., + out: None = ..., +) -> Any: ... +@overload +def around( + a: _ArrayLikeBool_co, + decimals: SupportsIndex = ..., + out: None = ..., +) -> NDArray[float16]: ... +@overload +def around( + a: _ArrayLike[_NumberOrObjectT], + decimals: SupportsIndex = ..., + out: None = ..., +) -> NDArray[_NumberOrObjectT]: ... +@overload +def around( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + decimals: SupportsIndex = ..., + out: None = ..., +) -> NDArray[Any]: ... +@overload +def around( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + decimals: SupportsIndex, + out: _ArrayT, +) -> _ArrayT: ... +@overload +def around( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + decimals: SupportsIndex = ..., + *, + out: _ArrayT, +) -> _ArrayT: ... + +@overload +def mean( + a: _ArrayLikeFloat_co, + axis: None = ..., + dtype: None = ..., + out: None = ..., + keepdims: Literal[False] | _NoValueType = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> floating: ... +@overload +def mean( + a: _ArrayLikeComplex_co, + axis: None = ..., + dtype: None = ..., + out: None = ..., + keepdims: Literal[False] | _NoValueType = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> complexfloating: ... +@overload +def mean( + a: _ArrayLike[np.timedelta64], + axis: None = ..., + dtype: None = ..., + out: None = ..., + keepdims: Literal[False] | _NoValueType = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> timedelta64: ... +@overload +def mean( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: _ShapeLike | None, + dtype: DTypeLike, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> _ArrayT: ... +@overload +def mean( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: _ShapeLike | None = ..., + dtype: DTypeLike | None = ..., + *, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> _ArrayT: ... +@overload +def mean( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: None, + dtype: _DTypeLike[_ScalarT], + out: None = ..., + keepdims: Literal[False] | _NoValueType = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> _ScalarT: ... +@overload +def mean( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: None = ..., + *, + dtype: _DTypeLike[_ScalarT], + out: None = ..., + keepdims: Literal[False] | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> _ScalarT: ... +@overload +def mean( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: _ShapeLike | None, + dtype: _DTypeLike[_ScalarT], + out: None, + keepdims: Literal[True, 1], + *, + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> NDArray[_ScalarT]: ... +@overload +def mean( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: _ShapeLike | None, + dtype: _DTypeLike[_ScalarT], + out: None = ..., + *, + keepdims: bool | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> _ScalarT | NDArray[_ScalarT]: ... +@overload +def mean( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: _ShapeLike | None = ..., + *, + dtype: _DTypeLike[_ScalarT], + out: None = ..., + keepdims: bool | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> _ScalarT | NDArray[_ScalarT]: ... +@overload +def mean( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: _ShapeLike | None = ..., + dtype: DTypeLike | None = ..., + out: None = ..., + keepdims: bool | _NoValueType = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> Incomplete: ... + +@overload +def std( + a: _ArrayLikeComplex_co, + axis: None = ..., + dtype: None = ..., + out: None = ..., + ddof: float = ..., + keepdims: Literal[False] = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> floating: ... +@overload +def std( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: _ShapeLike | None = ..., + dtype: None = ..., + out: None = ..., + ddof: float = ..., + keepdims: bool = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> Any: ... +@overload +def std( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: None, + dtype: _DTypeLike[_ScalarT], + out: None = ..., + ddof: float = ..., + keepdims: Literal[False] = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> _ScalarT: ... +@overload +def std( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: None = ..., + *, + dtype: _DTypeLike[_ScalarT], + out: None = ..., + ddof: float = ..., + keepdims: Literal[False] = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> _ScalarT: ... +@overload +def std( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: _ShapeLike | None = ..., + dtype: DTypeLike = ..., + out: None = ..., + ddof: float = ..., + keepdims: bool = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> Any: ... +@overload +def std( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: _ShapeLike | None, + dtype: DTypeLike, + out: _ArrayT, + ddof: float = ..., + keepdims: bool = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> _ArrayT: ... +@overload +def std( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: _ShapeLike | None = ..., + dtype: DTypeLike = ..., + *, + out: _ArrayT, + ddof: float = ..., + keepdims: bool = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> _ArrayT: ... + +@overload +def var( + a: _ArrayLikeComplex_co, + axis: None = ..., + dtype: None = ..., + out: None = ..., + ddof: float = ..., + keepdims: Literal[False] = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> floating: ... +@overload +def var( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: _ShapeLike | None = ..., + dtype: None = ..., + out: None = ..., + ddof: float = ..., + keepdims: bool = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> Any: ... +@overload +def var( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: None, + dtype: _DTypeLike[_ScalarT], + out: None = ..., + ddof: float = ..., + keepdims: Literal[False] = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> _ScalarT: ... +@overload +def var( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: None = ..., + *, + dtype: _DTypeLike[_ScalarT], + out: None = ..., + ddof: float = ..., + keepdims: Literal[False] = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> _ScalarT: ... +@overload +def var( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: _ShapeLike | None = ..., + dtype: DTypeLike = ..., + out: None = ..., + ddof: float = ..., + keepdims: bool = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> Any: ... +@overload +def var( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: _ShapeLike | None, + dtype: DTypeLike, + out: _ArrayT, + ddof: float = ..., + keepdims: bool = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> _ArrayT: ... +@overload +def var( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: _ShapeLike | None = ..., + dtype: DTypeLike = ..., + *, + out: _ArrayT, + ddof: float = ..., + keepdims: bool = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> _ArrayT: ... + +max = amax +min = amin +round = around diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/function_base.py b/.venv/lib/python3.12/site-packages/numpy/_core/function_base.py new file mode 100644 index 0000000..12ab2a7 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/function_base.py @@ -0,0 +1,545 @@ +import functools +import operator +import types +import warnings + +import numpy as np +from numpy._core import overrides +from numpy._core._multiarray_umath import _array_converter +from numpy._core.multiarray import add_docstring + +from . import numeric as _nx +from .numeric import asanyarray, nan, ndim, result_type + +__all__ = ['logspace', 'linspace', 'geomspace'] + + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy') + + +def _linspace_dispatcher(start, stop, num=None, endpoint=None, retstep=None, + dtype=None, axis=None, *, device=None): + return (start, stop) + + +@array_function_dispatch(_linspace_dispatcher) +def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None, + axis=0, *, device=None): + """ + Return evenly spaced numbers over a specified interval. + + Returns `num` evenly spaced samples, calculated over the + interval [`start`, `stop`]. + + The endpoint of the interval can optionally be excluded. + + .. versionchanged:: 1.20.0 + Values are rounded towards ``-inf`` instead of ``0`` when an + integer ``dtype`` is specified. The old behavior can + still be obtained with ``np.linspace(start, stop, num).astype(int)`` + + Parameters + ---------- + start : array_like + The starting value of the sequence. + stop : array_like + The end value of the sequence, unless `endpoint` is set to False. + In that case, the sequence consists of all but the last of ``num + 1`` + evenly spaced samples, so that `stop` is excluded. Note that the step + size changes when `endpoint` is False. + num : int, optional + Number of samples to generate. Default is 50. Must be non-negative. + endpoint : bool, optional + If True, `stop` is the last sample. Otherwise, it is not included. + Default is True. + retstep : bool, optional + If True, return (`samples`, `step`), where `step` is the spacing + between samples. + dtype : dtype, optional + The type of the output array. If `dtype` is not given, the data type + is inferred from `start` and `stop`. The inferred dtype will never be + an integer; `float` is chosen even if the arguments would produce an + array of integers. + axis : int, optional + The axis in the result to store the samples. Relevant only if start + or stop are array-like. By default (0), the samples will be along a + new axis inserted at the beginning. Use -1 to get an axis at the end. + device : str, optional + The device on which to place the created array. Default: None. + For Array-API interoperability only, so must be ``"cpu"`` if passed. + + .. versionadded:: 2.0.0 + + Returns + ------- + samples : ndarray + There are `num` equally spaced samples in the closed interval + ``[start, stop]`` or the half-open interval ``[start, stop)`` + (depending on whether `endpoint` is True or False). + step : float, optional + Only returned if `retstep` is True + + Size of spacing between samples. + + + See Also + -------- + arange : Similar to `linspace`, but uses a step size (instead of the + number of samples). + geomspace : Similar to `linspace`, but with numbers spaced evenly on a log + scale (a geometric progression). + logspace : Similar to `geomspace`, but with the end points specified as + logarithms. + :ref:`how-to-partition` + + Examples + -------- + >>> import numpy as np + >>> np.linspace(2.0, 3.0, num=5) + array([2. , 2.25, 2.5 , 2.75, 3. ]) + >>> np.linspace(2.0, 3.0, num=5, endpoint=False) + array([2. , 2.2, 2.4, 2.6, 2.8]) + >>> np.linspace(2.0, 3.0, num=5, retstep=True) + (array([2. , 2.25, 2.5 , 2.75, 3. ]), 0.25) + + Graphical illustration: + + >>> import matplotlib.pyplot as plt + >>> N = 8 + >>> y = np.zeros(N) + >>> x1 = np.linspace(0, 10, N, endpoint=True) + >>> x2 = np.linspace(0, 10, N, endpoint=False) + >>> plt.plot(x1, y, 'o') + [] + >>> plt.plot(x2, y + 0.5, 'o') + [] + >>> plt.ylim([-0.5, 1]) + (-0.5, 1) + >>> plt.show() + + """ + num = operator.index(num) + if num < 0: + raise ValueError( + f"Number of samples, {num}, must be non-negative." + ) + div = (num - 1) if endpoint else num + + conv = _array_converter(start, stop) + start, stop = conv.as_arrays() + dt = conv.result_type(ensure_inexact=True) + + if dtype is None: + dtype = dt + integer_dtype = False + else: + integer_dtype = _nx.issubdtype(dtype, _nx.integer) + + # Use `dtype=type(dt)` to enforce a floating point evaluation: + delta = np.subtract(stop, start, dtype=type(dt)) + y = _nx.arange( + 0, num, dtype=dt, device=device + ).reshape((-1,) + (1,) * ndim(delta)) + + # In-place multiplication y *= delta/div is faster, but prevents + # the multiplicant from overriding what class is produced, and thus + # prevents, e.g. use of Quantities, see gh-7142. Hence, we multiply + # in place only for standard scalar types. + if div > 0: + _mult_inplace = _nx.isscalar(delta) + step = delta / div + any_step_zero = ( + step == 0 if _mult_inplace else _nx.asanyarray(step == 0).any()) + if any_step_zero: + # Special handling for denormal numbers, gh-5437 + y /= div + if _mult_inplace: + y *= delta + else: + y = y * delta + elif _mult_inplace: + y *= step + else: + y = y * step + else: + # sequences with 0 items or 1 item with endpoint=True (i.e. div <= 0) + # have an undefined step + step = nan + # Multiply with delta to allow possible override of output class. + y = y * delta + + y += start + + if endpoint and num > 1: + y[-1, ...] = stop + + if axis != 0: + y = _nx.moveaxis(y, 0, axis) + + if integer_dtype: + _nx.floor(y, out=y) + + y = conv.wrap(y.astype(dtype, copy=False)) + if retstep: + return y, step + else: + return y + + +def _logspace_dispatcher(start, stop, num=None, endpoint=None, base=None, + dtype=None, axis=None): + return (start, stop, base) + + +@array_function_dispatch(_logspace_dispatcher) +def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None, + axis=0): + """ + Return numbers spaced evenly on a log scale. + + In linear space, the sequence starts at ``base ** start`` + (`base` to the power of `start`) and ends with ``base ** stop`` + (see `endpoint` below). + + .. versionchanged:: 1.25.0 + Non-scalar 'base` is now supported + + Parameters + ---------- + start : array_like + ``base ** start`` is the starting value of the sequence. + stop : array_like + ``base ** stop`` is the final value of the sequence, unless `endpoint` + is False. In that case, ``num + 1`` values are spaced over the + interval in log-space, of which all but the last (a sequence of + length `num`) are returned. + num : integer, optional + Number of samples to generate. Default is 50. + endpoint : boolean, optional + If true, `stop` is the last sample. Otherwise, it is not included. + Default is True. + base : array_like, optional + The base of the log space. The step size between the elements in + ``ln(samples) / ln(base)`` (or ``log_base(samples)``) is uniform. + Default is 10.0. + dtype : dtype + The type of the output array. If `dtype` is not given, the data type + is inferred from `start` and `stop`. The inferred type will never be + an integer; `float` is chosen even if the arguments would produce an + array of integers. + axis : int, optional + The axis in the result to store the samples. Relevant only if start, + stop, or base are array-like. By default (0), the samples will be + along a new axis inserted at the beginning. Use -1 to get an axis at + the end. + + Returns + ------- + samples : ndarray + `num` samples, equally spaced on a log scale. + + See Also + -------- + arange : Similar to linspace, with the step size specified instead of the + number of samples. Note that, when used with a float endpoint, the + endpoint may or may not be included. + linspace : Similar to logspace, but with the samples uniformly distributed + in linear space, instead of log space. + geomspace : Similar to logspace, but with endpoints specified directly. + :ref:`how-to-partition` + + Notes + ----- + If base is a scalar, logspace is equivalent to the code + + >>> y = np.linspace(start, stop, num=num, endpoint=endpoint) + ... # doctest: +SKIP + >>> power(base, y).astype(dtype) + ... # doctest: +SKIP + + Examples + -------- + >>> import numpy as np + >>> np.logspace(2.0, 3.0, num=4) + array([ 100. , 215.443469 , 464.15888336, 1000. ]) + >>> np.logspace(2.0, 3.0, num=4, endpoint=False) + array([100. , 177.827941 , 316.22776602, 562.34132519]) + >>> np.logspace(2.0, 3.0, num=4, base=2.0) + array([4. , 5.0396842 , 6.34960421, 8. ]) + >>> np.logspace(2.0, 3.0, num=4, base=[2.0, 3.0], axis=-1) + array([[ 4. , 5.0396842 , 6.34960421, 8. ], + [ 9. , 12.98024613, 18.72075441, 27. ]]) + + Graphical illustration: + + >>> import matplotlib.pyplot as plt + >>> N = 10 + >>> x1 = np.logspace(0.1, 1, N, endpoint=True) + >>> x2 = np.logspace(0.1, 1, N, endpoint=False) + >>> y = np.zeros(N) + >>> plt.plot(x1, y, 'o') + [] + >>> plt.plot(x2, y + 0.5, 'o') + [] + >>> plt.ylim([-0.5, 1]) + (-0.5, 1) + >>> plt.show() + + """ + if not isinstance(base, (float, int)) and np.ndim(base): + # If base is non-scalar, broadcast it with the others, since it + # may influence how axis is interpreted. + ndmax = np.broadcast(start, stop, base).ndim + start, stop, base = ( + np.array(a, copy=None, subok=True, ndmin=ndmax) + for a in (start, stop, base) + ) + base = np.expand_dims(base, axis=axis) + y = linspace(start, stop, num=num, endpoint=endpoint, axis=axis) + if dtype is None: + return _nx.power(base, y) + return _nx.power(base, y).astype(dtype, copy=False) + + +def _geomspace_dispatcher(start, stop, num=None, endpoint=None, dtype=None, + axis=None): + return (start, stop) + + +@array_function_dispatch(_geomspace_dispatcher) +def geomspace(start, stop, num=50, endpoint=True, dtype=None, axis=0): + """ + Return numbers spaced evenly on a log scale (a geometric progression). + + This is similar to `logspace`, but with endpoints specified directly. + Each output sample is a constant multiple of the previous. + + Parameters + ---------- + start : array_like + The starting value of the sequence. + stop : array_like + The final value of the sequence, unless `endpoint` is False. + In that case, ``num + 1`` values are spaced over the + interval in log-space, of which all but the last (a sequence of + length `num`) are returned. + num : integer, optional + Number of samples to generate. Default is 50. + endpoint : boolean, optional + If true, `stop` is the last sample. Otherwise, it is not included. + Default is True. + dtype : dtype + The type of the output array. If `dtype` is not given, the data type + is inferred from `start` and `stop`. The inferred dtype will never be + an integer; `float` is chosen even if the arguments would produce an + array of integers. + axis : int, optional + The axis in the result to store the samples. Relevant only if start + or stop are array-like. By default (0), the samples will be along a + new axis inserted at the beginning. Use -1 to get an axis at the end. + + Returns + ------- + samples : ndarray + `num` samples, equally spaced on a log scale. + + See Also + -------- + logspace : Similar to geomspace, but with endpoints specified using log + and base. + linspace : Similar to geomspace, but with arithmetic instead of geometric + progression. + arange : Similar to linspace, with the step size specified instead of the + number of samples. + :ref:`how-to-partition` + + Notes + ----- + If the inputs or dtype are complex, the output will follow a logarithmic + spiral in the complex plane. (There are an infinite number of spirals + passing through two points; the output will follow the shortest such path.) + + Examples + -------- + >>> import numpy as np + >>> np.geomspace(1, 1000, num=4) + array([ 1., 10., 100., 1000.]) + >>> np.geomspace(1, 1000, num=3, endpoint=False) + array([ 1., 10., 100.]) + >>> np.geomspace(1, 1000, num=4, endpoint=False) + array([ 1. , 5.62341325, 31.6227766 , 177.827941 ]) + >>> np.geomspace(1, 256, num=9) + array([ 1., 2., 4., 8., 16., 32., 64., 128., 256.]) + + Note that the above may not produce exact integers: + + >>> np.geomspace(1, 256, num=9, dtype=int) + array([ 1, 2, 4, 7, 16, 32, 63, 127, 256]) + >>> np.around(np.geomspace(1, 256, num=9)).astype(int) + array([ 1, 2, 4, 8, 16, 32, 64, 128, 256]) + + Negative, decreasing, and complex inputs are allowed: + + >>> np.geomspace(1000, 1, num=4) + array([1000., 100., 10., 1.]) + >>> np.geomspace(-1000, -1, num=4) + array([-1000., -100., -10., -1.]) + >>> np.geomspace(1j, 1000j, num=4) # Straight line + array([0. +1.j, 0. +10.j, 0. +100.j, 0.+1000.j]) + >>> np.geomspace(-1+0j, 1+0j, num=5) # Circle + array([-1.00000000e+00+1.22464680e-16j, -7.07106781e-01+7.07106781e-01j, + 6.12323400e-17+1.00000000e+00j, 7.07106781e-01+7.07106781e-01j, + 1.00000000e+00+0.00000000e+00j]) + + Graphical illustration of `endpoint` parameter: + + >>> import matplotlib.pyplot as plt + >>> N = 10 + >>> y = np.zeros(N) + >>> plt.semilogx(np.geomspace(1, 1000, N, endpoint=True), y + 1, 'o') + [] + >>> plt.semilogx(np.geomspace(1, 1000, N, endpoint=False), y + 2, 'o') + [] + >>> plt.axis([0.5, 2000, 0, 3]) + [0.5, 2000, 0, 3] + >>> plt.grid(True, color='0.7', linestyle='-', which='both', axis='both') + >>> plt.show() + + """ + start = asanyarray(start) + stop = asanyarray(stop) + if _nx.any(start == 0) or _nx.any(stop == 0): + raise ValueError('Geometric sequence cannot include zero') + + dt = result_type(start, stop, float(num), _nx.zeros((), dtype)) + if dtype is None: + dtype = dt + else: + # complex to dtype('complex128'), for instance + dtype = _nx.dtype(dtype) + + # Promote both arguments to the same dtype in case, for instance, one is + # complex and another is negative and log would produce NaN otherwise. + # Copy since we may change things in-place further down. + start = start.astype(dt, copy=True) + stop = stop.astype(dt, copy=True) + + # Allow negative real values and ensure a consistent result for complex + # (including avoiding negligible real or imaginary parts in output) by + # rotating start to positive real, calculating, then undoing rotation. + out_sign = _nx.sign(start) + start /= out_sign + stop = stop / out_sign + + log_start = _nx.log10(start) + log_stop = _nx.log10(stop) + result = logspace(log_start, log_stop, num=num, + endpoint=endpoint, base=10.0, dtype=dt) + + # Make sure the endpoints match the start and stop arguments. This is + # necessary because np.exp(np.log(x)) is not necessarily equal to x. + if num > 0: + result[0] = start + if num > 1 and endpoint: + result[-1] = stop + + result *= out_sign + + if axis != 0: + result = _nx.moveaxis(result, 0, axis) + + return result.astype(dtype, copy=False) + + +def _needs_add_docstring(obj): + """ + Returns true if the only way to set the docstring of `obj` from python is + via add_docstring. + + This function errs on the side of being overly conservative. + """ + Py_TPFLAGS_HEAPTYPE = 1 << 9 + + if isinstance(obj, (types.FunctionType, types.MethodType, property)): + return False + + if isinstance(obj, type) and obj.__flags__ & Py_TPFLAGS_HEAPTYPE: + return False + + return True + + +def _add_docstring(obj, doc, warn_on_python): + if warn_on_python and not _needs_add_docstring(obj): + warnings.warn( + f"add_newdoc was used on a pure-python object {obj}. " + "Prefer to attach it directly to the source.", + UserWarning, + stacklevel=3) + try: + add_docstring(obj, doc) + except Exception: + pass + + +def add_newdoc(place, obj, doc, warn_on_python=True): + """ + Add documentation to an existing object, typically one defined in C + + The purpose is to allow easier editing of the docstrings without requiring + a re-compile. This exists primarily for internal use within numpy itself. + + Parameters + ---------- + place : str + The absolute name of the module to import from + obj : str or None + The name of the object to add documentation to, typically a class or + function name. + doc : {str, Tuple[str, str], List[Tuple[str, str]]} + If a string, the documentation to apply to `obj` + + If a tuple, then the first element is interpreted as an attribute + of `obj` and the second as the docstring to apply - + ``(method, docstring)`` + + If a list, then each element of the list should be a tuple of length + two - ``[(method1, docstring1), (method2, docstring2), ...]`` + warn_on_python : bool + If True, the default, emit `UserWarning` if this is used to attach + documentation to a pure-python object. + + Notes + ----- + This routine never raises an error if the docstring can't be written, but + will raise an error if the object being documented does not exist. + + This routine cannot modify read-only docstrings, as appear + in new-style classes or built-in functions. Because this + routine never raises an error the caller must check manually + that the docstrings were changed. + + Since this function grabs the ``char *`` from a c-level str object and puts + it into the ``tp_doc`` slot of the type of `obj`, it violates a number of + C-API best-practices, by: + + - modifying a `PyTypeObject` after calling `PyType_Ready` + - calling `Py_INCREF` on the str and losing the reference, so the str + will never be released + + If possible it should be avoided. + """ + new = getattr(__import__(place, globals(), {}, [obj]), obj) + if isinstance(doc, str): + if "${ARRAY_FUNCTION_LIKE}" in doc: + doc = overrides.get_array_function_like_doc(new, doc) + _add_docstring(new, doc.strip(), warn_on_python) + elif isinstance(doc, tuple): + attr, docstring = doc + _add_docstring(getattr(new, attr), docstring.strip(), warn_on_python) + elif isinstance(doc, list): + for attr, docstring in doc: + _add_docstring( + getattr(new, attr), docstring.strip(), warn_on_python + ) diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/function_base.pyi b/.venv/lib/python3.12/site-packages/numpy/_core/function_base.pyi new file mode 100644 index 0000000..44d1311 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/function_base.pyi @@ -0,0 +1,278 @@ +from typing import Literal as L +from typing import SupportsIndex, TypeAlias, TypeVar, overload + +from _typeshed import Incomplete + +import numpy as np +from numpy._typing import ( + DTypeLike, + NDArray, + _ArrayLikeComplex_co, + _ArrayLikeFloat_co, + _DTypeLike, +) +from numpy._typing._array_like import _DualArrayLike + +__all__ = ["geomspace", "linspace", "logspace"] + +_ScalarT = TypeVar("_ScalarT", bound=np.generic) + +_ToArrayFloat64: TypeAlias = _DualArrayLike[np.dtype[np.float64 | np.integer | np.bool], float] + +@overload +def linspace( + start: _ToArrayFloat64, + stop: _ToArrayFloat64, + num: SupportsIndex = 50, + endpoint: bool = True, + retstep: L[False] = False, + dtype: None = None, + axis: SupportsIndex = 0, + *, + device: L["cpu"] | None = None, +) -> NDArray[np.float64]: ... +@overload +def linspace( + start: _ArrayLikeFloat_co, + stop: _ArrayLikeFloat_co, + num: SupportsIndex = 50, + endpoint: bool = True, + retstep: L[False] = False, + dtype: None = None, + axis: SupportsIndex = 0, + *, + device: L["cpu"] | None = None, +) -> NDArray[np.floating]: ... +@overload +def linspace( + start: _ArrayLikeComplex_co, + stop: _ArrayLikeComplex_co, + num: SupportsIndex = 50, + endpoint: bool = True, + retstep: L[False] = False, + dtype: None = None, + axis: SupportsIndex = 0, + *, + device: L["cpu"] | None = None, +) -> NDArray[np.complexfloating]: ... +@overload +def linspace( + start: _ArrayLikeComplex_co, + stop: _ArrayLikeComplex_co, + num: SupportsIndex, + endpoint: bool, + retstep: L[False], + dtype: _DTypeLike[_ScalarT], + axis: SupportsIndex = 0, + *, + device: L["cpu"] | None = None, +) -> NDArray[_ScalarT]: ... +@overload +def linspace( + start: _ArrayLikeComplex_co, + stop: _ArrayLikeComplex_co, + num: SupportsIndex = 50, + endpoint: bool = True, + retstep: L[False] = False, + *, + dtype: _DTypeLike[_ScalarT], + axis: SupportsIndex = 0, + device: L["cpu"] | None = None, +) -> NDArray[_ScalarT]: ... +@overload +def linspace( + start: _ArrayLikeComplex_co, + stop: _ArrayLikeComplex_co, + num: SupportsIndex = 50, + endpoint: bool = True, + retstep: L[False] = False, + dtype: DTypeLike | None = None, + axis: SupportsIndex = 0, + *, + device: L["cpu"] | None = None, +) -> NDArray[Incomplete]: ... +@overload +def linspace( + start: _ToArrayFloat64, + stop: _ToArrayFloat64, + num: SupportsIndex = 50, + endpoint: bool = True, + *, + retstep: L[True], + dtype: None = None, + axis: SupportsIndex = 0, + device: L["cpu"] | None = None, +) -> tuple[NDArray[np.float64], np.float64]: ... +@overload +def linspace( + start: _ArrayLikeFloat_co, + stop: _ArrayLikeFloat_co, + num: SupportsIndex = 50, + endpoint: bool = True, + *, + retstep: L[True], + dtype: None = None, + axis: SupportsIndex = 0, + device: L["cpu"] | None = None, +) -> tuple[NDArray[np.floating], np.floating]: ... +@overload +def linspace( + start: _ArrayLikeComplex_co, + stop: _ArrayLikeComplex_co, + num: SupportsIndex = 50, + endpoint: bool = True, + *, + retstep: L[True], + dtype: None = None, + axis: SupportsIndex = 0, + device: L["cpu"] | None = None, +) -> tuple[NDArray[np.complexfloating], np.complexfloating]: ... +@overload +def linspace( + start: _ArrayLikeComplex_co, + stop: _ArrayLikeComplex_co, + num: SupportsIndex = 50, + endpoint: bool = True, + *, + retstep: L[True], + dtype: _DTypeLike[_ScalarT], + axis: SupportsIndex = 0, + device: L["cpu"] | None = None, +) -> tuple[NDArray[_ScalarT], _ScalarT]: ... +@overload +def linspace( + start: _ArrayLikeComplex_co, + stop: _ArrayLikeComplex_co, + num: SupportsIndex = 50, + endpoint: bool = True, + *, + retstep: L[True], + dtype: DTypeLike | None = None, + axis: SupportsIndex = 0, + device: L["cpu"] | None = None, +) -> tuple[NDArray[Incomplete], Incomplete]: ... + +@overload +def logspace( + start: _ToArrayFloat64, + stop: _ToArrayFloat64, + num: SupportsIndex = 50, + endpoint: bool = True, + base: _ToArrayFloat64 = 10.0, + dtype: None = None, + axis: SupportsIndex = 0, +) -> NDArray[np.float64]: ... +@overload +def logspace( + start: _ArrayLikeFloat_co, + stop: _ArrayLikeFloat_co, + num: SupportsIndex = 50, + endpoint: bool = True, + base: _ArrayLikeFloat_co = 10.0, + dtype: None = None, + axis: SupportsIndex = 0, +) -> NDArray[np.floating]: ... +@overload +def logspace( + start: _ArrayLikeComplex_co, + stop: _ArrayLikeComplex_co, + num: SupportsIndex = 50, + endpoint: bool = True, + base: _ArrayLikeComplex_co = 10.0, + dtype: None = None, + axis: SupportsIndex = 0, +) -> NDArray[np.complexfloating]: ... +@overload +def logspace( + start: _ArrayLikeComplex_co, + stop: _ArrayLikeComplex_co, + num: SupportsIndex, + endpoint: bool, + base: _ArrayLikeComplex_co, + dtype: _DTypeLike[_ScalarT], + axis: SupportsIndex = 0, +) -> NDArray[_ScalarT]: ... +@overload +def logspace( + start: _ArrayLikeComplex_co, + stop: _ArrayLikeComplex_co, + num: SupportsIndex = 50, + endpoint: bool = True, + base: _ArrayLikeComplex_co = 10.0, + *, + dtype: _DTypeLike[_ScalarT], + axis: SupportsIndex = 0, +) -> NDArray[_ScalarT]: ... +@overload +def logspace( + start: _ArrayLikeComplex_co, + stop: _ArrayLikeComplex_co, + num: SupportsIndex = 50, + endpoint: bool = True, + base: _ArrayLikeComplex_co = 10.0, + dtype: DTypeLike | None = None, + axis: SupportsIndex = 0, +) -> NDArray[Incomplete]: ... + +@overload +def geomspace( + start: _ToArrayFloat64, + stop: _ToArrayFloat64, + num: SupportsIndex = 50, + endpoint: bool = True, + dtype: None = None, + axis: SupportsIndex = 0, +) -> NDArray[np.float64]: ... +@overload +def geomspace( + start: _ArrayLikeFloat_co, + stop: _ArrayLikeFloat_co, + num: SupportsIndex = 50, + endpoint: bool = True, + dtype: None = None, + axis: SupportsIndex = 0, +) -> NDArray[np.floating]: ... +@overload +def geomspace( + start: _ArrayLikeComplex_co, + stop: _ArrayLikeComplex_co, + num: SupportsIndex = 50, + endpoint: bool = True, + dtype: None = None, + axis: SupportsIndex = 0, +) -> NDArray[np.complexfloating]: ... +@overload +def geomspace( + start: _ArrayLikeComplex_co, + stop: _ArrayLikeComplex_co, + num: SupportsIndex, + endpoint: bool, + dtype: _DTypeLike[_ScalarT], + axis: SupportsIndex = 0, +) -> NDArray[_ScalarT]: ... +@overload +def geomspace( + start: _ArrayLikeComplex_co, + stop: _ArrayLikeComplex_co, + num: SupportsIndex = 50, + endpoint: bool = True, + *, + dtype: _DTypeLike[_ScalarT], + axis: SupportsIndex = 0, +) -> NDArray[_ScalarT]: ... +@overload +def geomspace( + start: _ArrayLikeComplex_co, + stop: _ArrayLikeComplex_co, + num: SupportsIndex = 50, + endpoint: bool = True, + dtype: DTypeLike | None = None, + axis: SupportsIndex = 0, +) -> NDArray[Incomplete]: ... + +def add_newdoc( + place: str, + obj: str, + doc: str | tuple[str, str] | list[tuple[str, str]], + warn_on_python: bool = True, +) -> None: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/getlimits.py b/.venv/lib/python3.12/site-packages/numpy/_core/getlimits.py new file mode 100644 index 0000000..afa2cce --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/getlimits.py @@ -0,0 +1,748 @@ +"""Machine limits for Float32 and Float64 and (long double) if available... + +""" +__all__ = ['finfo', 'iinfo'] + +import types +import warnings + +from numpy._utils import set_module + +from . import numeric +from . import numerictypes as ntypes +from ._machar import MachAr +from .numeric import array, inf, nan +from .umath import exp2, isnan, log10, nextafter + + +def _fr0(a): + """fix rank-0 --> rank-1""" + if a.ndim == 0: + a = a.copy() + a.shape = (1,) + return a + + +def _fr1(a): + """fix rank > 0 --> rank-0""" + if a.size == 1: + a = a.copy() + a.shape = () + return a + + +class MachArLike: + """ Object to simulate MachAr instance """ + def __init__(self, ftype, *, eps, epsneg, huge, tiny, + ibeta, smallest_subnormal=None, **kwargs): + self.params = _MACHAR_PARAMS[ftype] + self.ftype = ftype + self.title = self.params['title'] + # Parameter types same as for discovered MachAr object. + if not smallest_subnormal: + self._smallest_subnormal = nextafter( + self.ftype(0), self.ftype(1), dtype=self.ftype) + else: + self._smallest_subnormal = smallest_subnormal + self.epsilon = self.eps = self._float_to_float(eps) + self.epsneg = self._float_to_float(epsneg) + self.xmax = self.huge = self._float_to_float(huge) + self.xmin = self._float_to_float(tiny) + self.smallest_normal = self.tiny = self._float_to_float(tiny) + self.ibeta = self.params['itype'](ibeta) + self.__dict__.update(kwargs) + self.precision = int(-log10(self.eps)) + self.resolution = self._float_to_float( + self._float_conv(10) ** (-self.precision)) + self._str_eps = self._float_to_str(self.eps) + self._str_epsneg = self._float_to_str(self.epsneg) + self._str_xmin = self._float_to_str(self.xmin) + self._str_xmax = self._float_to_str(self.xmax) + self._str_resolution = self._float_to_str(self.resolution) + self._str_smallest_normal = self._float_to_str(self.xmin) + + @property + def smallest_subnormal(self): + """Return the value for the smallest subnormal. + + Returns + ------- + smallest_subnormal : float + value for the smallest subnormal. + + Warns + ----- + UserWarning + If the calculated value for the smallest subnormal is zero. + """ + # Check that the calculated value is not zero, in case it raises a + # warning. + value = self._smallest_subnormal + if self.ftype(0) == value: + warnings.warn( + f'The value of the smallest subnormal for {self.ftype} type is zero.', + UserWarning, stacklevel=2) + + return self._float_to_float(value) + + @property + def _str_smallest_subnormal(self): + """Return the string representation of the smallest subnormal.""" + return self._float_to_str(self.smallest_subnormal) + + def _float_to_float(self, value): + """Converts float to float. + + Parameters + ---------- + value : float + value to be converted. + """ + return _fr1(self._float_conv(value)) + + def _float_conv(self, value): + """Converts float to conv. + + Parameters + ---------- + value : float + value to be converted. + """ + return array([value], self.ftype) + + def _float_to_str(self, value): + """Converts float to str. + + Parameters + ---------- + value : float + value to be converted. + """ + return self.params['fmt'] % array(_fr0(value)[0], self.ftype) + + +_convert_to_float = { + ntypes.csingle: ntypes.single, + ntypes.complex128: ntypes.float64, + ntypes.clongdouble: ntypes.longdouble + } + +# Parameters for creating MachAr / MachAr-like objects +_title_fmt = 'numpy {} precision floating point number' +_MACHAR_PARAMS = { + ntypes.double: { + 'itype': ntypes.int64, + 'fmt': '%24.16e', + 'title': _title_fmt.format('double')}, + ntypes.single: { + 'itype': ntypes.int32, + 'fmt': '%15.7e', + 'title': _title_fmt.format('single')}, + ntypes.longdouble: { + 'itype': ntypes.longlong, + 'fmt': '%s', + 'title': _title_fmt.format('long double')}, + ntypes.half: { + 'itype': ntypes.int16, + 'fmt': '%12.5e', + 'title': _title_fmt.format('half')}} + +# Key to identify the floating point type. Key is result of +# +# ftype = np.longdouble # or float64, float32, etc. +# v = (ftype(-1.0) / ftype(10.0)) +# v.view(v.dtype.newbyteorder('<')).tobytes() +# +# Uses division to work around deficiencies in strtold on some platforms. +# See: +# https://perl5.git.perl.org/perl.git/blob/3118d7d684b56cbeb702af874f4326683c45f045:/Configure + +_KNOWN_TYPES = {} +def _register_type(machar, bytepat): + _KNOWN_TYPES[bytepat] = machar + + +_float_ma = {} + + +def _register_known_types(): + # Known parameters for float16 + # See docstring of MachAr class for description of parameters. + f16 = ntypes.float16 + float16_ma = MachArLike(f16, + machep=-10, + negep=-11, + minexp=-14, + maxexp=16, + it=10, + iexp=5, + ibeta=2, + irnd=5, + ngrd=0, + eps=exp2(f16(-10)), + epsneg=exp2(f16(-11)), + huge=f16(65504), + tiny=f16(2 ** -14)) + _register_type(float16_ma, b'f\xae') + _float_ma[16] = float16_ma + + # Known parameters for float32 + f32 = ntypes.float32 + float32_ma = MachArLike(f32, + machep=-23, + negep=-24, + minexp=-126, + maxexp=128, + it=23, + iexp=8, + ibeta=2, + irnd=5, + ngrd=0, + eps=exp2(f32(-23)), + epsneg=exp2(f32(-24)), + huge=f32((1 - 2 ** -24) * 2**128), + tiny=exp2(f32(-126))) + _register_type(float32_ma, b'\xcd\xcc\xcc\xbd') + _float_ma[32] = float32_ma + + # Known parameters for float64 + f64 = ntypes.float64 + epsneg_f64 = 2.0 ** -53.0 + tiny_f64 = 2.0 ** -1022.0 + float64_ma = MachArLike(f64, + machep=-52, + negep=-53, + minexp=-1022, + maxexp=1024, + it=52, + iexp=11, + ibeta=2, + irnd=5, + ngrd=0, + eps=2.0 ** -52.0, + epsneg=epsneg_f64, + huge=(1.0 - epsneg_f64) / tiny_f64 * f64(4), + tiny=tiny_f64) + _register_type(float64_ma, b'\x9a\x99\x99\x99\x99\x99\xb9\xbf') + _float_ma[64] = float64_ma + + # Known parameters for IEEE 754 128-bit binary float + ld = ntypes.longdouble + epsneg_f128 = exp2(ld(-113)) + tiny_f128 = exp2(ld(-16382)) + # Ignore runtime error when this is not f128 + with numeric.errstate(all='ignore'): + huge_f128 = (ld(1) - epsneg_f128) / tiny_f128 * ld(4) + float128_ma = MachArLike(ld, + machep=-112, + negep=-113, + minexp=-16382, + maxexp=16384, + it=112, + iexp=15, + ibeta=2, + irnd=5, + ngrd=0, + eps=exp2(ld(-112)), + epsneg=epsneg_f128, + huge=huge_f128, + tiny=tiny_f128) + # IEEE 754 128-bit binary float + _register_type(float128_ma, + b'\x9a\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\xfb\xbf') + _float_ma[128] = float128_ma + + # Known parameters for float80 (Intel 80-bit extended precision) + epsneg_f80 = exp2(ld(-64)) + tiny_f80 = exp2(ld(-16382)) + # Ignore runtime error when this is not f80 + with numeric.errstate(all='ignore'): + huge_f80 = (ld(1) - epsneg_f80) / tiny_f80 * ld(4) + float80_ma = MachArLike(ld, + machep=-63, + negep=-64, + minexp=-16382, + maxexp=16384, + it=63, + iexp=15, + ibeta=2, + irnd=5, + ngrd=0, + eps=exp2(ld(-63)), + epsneg=epsneg_f80, + huge=huge_f80, + tiny=tiny_f80) + # float80, first 10 bytes containing actual storage + _register_type(float80_ma, b'\xcd\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xfb\xbf') + _float_ma[80] = float80_ma + + # Guessed / known parameters for double double; see: + # https://en.wikipedia.org/wiki/Quadruple-precision_floating-point_format#Double-double_arithmetic + # These numbers have the same exponent range as float64, but extended + # number of digits in the significand. + huge_dd = nextafter(ld(inf), ld(0), dtype=ld) + # As the smallest_normal in double double is so hard to calculate we set + # it to NaN. + smallest_normal_dd = nan + # Leave the same value for the smallest subnormal as double + smallest_subnormal_dd = ld(nextafter(0., 1.)) + float_dd_ma = MachArLike(ld, + machep=-105, + negep=-106, + minexp=-1022, + maxexp=1024, + it=105, + iexp=11, + ibeta=2, + irnd=5, + ngrd=0, + eps=exp2(ld(-105)), + epsneg=exp2(ld(-106)), + huge=huge_dd, + tiny=smallest_normal_dd, + smallest_subnormal=smallest_subnormal_dd) + # double double; low, high order (e.g. PPC 64) + _register_type(float_dd_ma, + b'\x9a\x99\x99\x99\x99\x99Y<\x9a\x99\x99\x99\x99\x99\xb9\xbf') + # double double; high, low order (e.g. PPC 64 le) + _register_type(float_dd_ma, + b'\x9a\x99\x99\x99\x99\x99\xb9\xbf\x9a\x99\x99\x99\x99\x99Y<') + _float_ma['dd'] = float_dd_ma + + +def _get_machar(ftype): + """ Get MachAr instance or MachAr-like instance + + Get parameters for floating point type, by first trying signatures of + various known floating point types, then, if none match, attempting to + identify parameters by analysis. + + Parameters + ---------- + ftype : class + Numpy floating point type class (e.g. ``np.float64``) + + Returns + ------- + ma_like : instance of :class:`MachAr` or :class:`MachArLike` + Object giving floating point parameters for `ftype`. + + Warns + ----- + UserWarning + If the binary signature of the float type is not in the dictionary of + known float types. + """ + params = _MACHAR_PARAMS.get(ftype) + if params is None: + raise ValueError(repr(ftype)) + # Detect known / suspected types + # ftype(-1.0) / ftype(10.0) is better than ftype('-0.1') because stold + # may be deficient + key = (ftype(-1.0) / ftype(10.)) + key = key.view(key.dtype.newbyteorder("<")).tobytes() + ma_like = None + if ftype == ntypes.longdouble: + # Could be 80 bit == 10 byte extended precision, where last bytes can + # be random garbage. + # Comparing first 10 bytes to pattern first to avoid branching on the + # random garbage. + ma_like = _KNOWN_TYPES.get(key[:10]) + if ma_like is None: + # see if the full key is known. + ma_like = _KNOWN_TYPES.get(key) + if ma_like is None and len(key) == 16: + # machine limits could be f80 masquerading as np.float128, + # find all keys with length 16 and make new dict, but make the keys + # only 10 bytes long, the last bytes can be random garbage + _kt = {k[:10]: v for k, v in _KNOWN_TYPES.items() if len(k) == 16} + ma_like = _kt.get(key[:10]) + if ma_like is not None: + return ma_like + # Fall back to parameter discovery + warnings.warn( + f'Signature {key} for {ftype} does not match any known type: ' + 'falling back to type probe function.\n' + 'This warnings indicates broken support for the dtype!', + UserWarning, stacklevel=2) + return _discovered_machar(ftype) + + +def _discovered_machar(ftype): + """ Create MachAr instance with found information on float types + + TODO: MachAr should be retired completely ideally. We currently only + ever use it system with broken longdouble (valgrind, WSL). + """ + params = _MACHAR_PARAMS[ftype] + return MachAr(lambda v: array([v], ftype), + lambda v: _fr0(v.astype(params['itype']))[0], + lambda v: array(_fr0(v)[0], ftype), + lambda v: params['fmt'] % array(_fr0(v)[0], ftype), + params['title']) + + +@set_module('numpy') +class finfo: + """ + finfo(dtype) + + Machine limits for floating point types. + + Attributes + ---------- + bits : int + The number of bits occupied by the type. + dtype : dtype + Returns the dtype for which `finfo` returns information. For complex + input, the returned dtype is the associated ``float*`` dtype for its + real and complex components. + eps : float + The difference between 1.0 and the next smallest representable float + larger than 1.0. For example, for 64-bit binary floats in the IEEE-754 + standard, ``eps = 2**-52``, approximately 2.22e-16. + epsneg : float + The difference between 1.0 and the next smallest representable float + less than 1.0. For example, for 64-bit binary floats in the IEEE-754 + standard, ``epsneg = 2**-53``, approximately 1.11e-16. + iexp : int + The number of bits in the exponent portion of the floating point + representation. + machep : int + The exponent that yields `eps`. + max : floating point number of the appropriate type + The largest representable number. + maxexp : int + The smallest positive power of the base (2) that causes overflow. + min : floating point number of the appropriate type + The smallest representable number, typically ``-max``. + minexp : int + The most negative power of the base (2) consistent with there + being no leading 0's in the mantissa. + negep : int + The exponent that yields `epsneg`. + nexp : int + The number of bits in the exponent including its sign and bias. + nmant : int + The number of bits in the mantissa. + precision : int + The approximate number of decimal digits to which this kind of + float is precise. + resolution : floating point number of the appropriate type + The approximate decimal resolution of this type, i.e., + ``10**-precision``. + tiny : float + An alias for `smallest_normal`, kept for backwards compatibility. + smallest_normal : float + The smallest positive floating point number with 1 as leading bit in + the mantissa following IEEE-754 (see Notes). + smallest_subnormal : float + The smallest positive floating point number with 0 as leading bit in + the mantissa following IEEE-754. + + Parameters + ---------- + dtype : float, dtype, or instance + Kind of floating point or complex floating point + data-type about which to get information. + + See Also + -------- + iinfo : The equivalent for integer data types. + spacing : The distance between a value and the nearest adjacent number + nextafter : The next floating point value after x1 towards x2 + + Notes + ----- + For developers of NumPy: do not instantiate this at the module level. + The initial calculation of these parameters is expensive and negatively + impacts import times. These objects are cached, so calling ``finfo()`` + repeatedly inside your functions is not a problem. + + Note that ``smallest_normal`` is not actually the smallest positive + representable value in a NumPy floating point type. As in the IEEE-754 + standard [1]_, NumPy floating point types make use of subnormal numbers to + fill the gap between 0 and ``smallest_normal``. However, subnormal numbers + may have significantly reduced precision [2]_. + + This function can also be used for complex data types as well. If used, + the output will be the same as the corresponding real float type + (e.g. numpy.finfo(numpy.csingle) is the same as numpy.finfo(numpy.single)). + However, the output is true for the real and imaginary components. + + References + ---------- + .. [1] IEEE Standard for Floating-Point Arithmetic, IEEE Std 754-2008, + pp.1-70, 2008, https://doi.org/10.1109/IEEESTD.2008.4610935 + .. [2] Wikipedia, "Denormal Numbers", + https://en.wikipedia.org/wiki/Denormal_number + + Examples + -------- + >>> import numpy as np + >>> np.finfo(np.float64).dtype + dtype('float64') + >>> np.finfo(np.complex64).dtype + dtype('float32') + + """ + + _finfo_cache = {} + + __class_getitem__ = classmethod(types.GenericAlias) + + def __new__(cls, dtype): + try: + obj = cls._finfo_cache.get(dtype) # most common path + if obj is not None: + return obj + except TypeError: + pass + + if dtype is None: + # Deprecated in NumPy 1.25, 2023-01-16 + warnings.warn( + "finfo() dtype cannot be None. This behavior will " + "raise an error in the future. (Deprecated in NumPy 1.25)", + DeprecationWarning, + stacklevel=2 + ) + + try: + dtype = numeric.dtype(dtype) + except TypeError: + # In case a float instance was given + dtype = numeric.dtype(type(dtype)) + + obj = cls._finfo_cache.get(dtype) + if obj is not None: + return obj + dtypes = [dtype] + newdtype = ntypes.obj2sctype(dtype) + if newdtype is not dtype: + dtypes.append(newdtype) + dtype = newdtype + if not issubclass(dtype, numeric.inexact): + raise ValueError(f"data type {dtype!r} not inexact") + obj = cls._finfo_cache.get(dtype) + if obj is not None: + return obj + if not issubclass(dtype, numeric.floating): + newdtype = _convert_to_float[dtype] + if newdtype is not dtype: + # dtype changed, for example from complex128 to float64 + dtypes.append(newdtype) + dtype = newdtype + + obj = cls._finfo_cache.get(dtype, None) + if obj is not None: + # the original dtype was not in the cache, but the new + # dtype is in the cache. we add the original dtypes to + # the cache and return the result + for dt in dtypes: + cls._finfo_cache[dt] = obj + return obj + obj = object.__new__(cls)._init(dtype) + for dt in dtypes: + cls._finfo_cache[dt] = obj + return obj + + def _init(self, dtype): + self.dtype = numeric.dtype(dtype) + machar = _get_machar(dtype) + + for word in ['precision', 'iexp', + 'maxexp', 'minexp', 'negep', + 'machep']: + setattr(self, word, getattr(machar, word)) + for word in ['resolution', 'epsneg', 'smallest_subnormal']: + setattr(self, word, getattr(machar, word).flat[0]) + self.bits = self.dtype.itemsize * 8 + self.max = machar.huge.flat[0] + self.min = -self.max + self.eps = machar.eps.flat[0] + self.nexp = machar.iexp + self.nmant = machar.it + self._machar = machar + self._str_tiny = machar._str_xmin.strip() + self._str_max = machar._str_xmax.strip() + self._str_epsneg = machar._str_epsneg.strip() + self._str_eps = machar._str_eps.strip() + self._str_resolution = machar._str_resolution.strip() + self._str_smallest_normal = machar._str_smallest_normal.strip() + self._str_smallest_subnormal = machar._str_smallest_subnormal.strip() + return self + + def __str__(self): + fmt = ( + 'Machine parameters for %(dtype)s\n' + '---------------------------------------------------------------\n' + 'precision = %(precision)3s resolution = %(_str_resolution)s\n' + 'machep = %(machep)6s eps = %(_str_eps)s\n' + 'negep = %(negep)6s epsneg = %(_str_epsneg)s\n' + 'minexp = %(minexp)6s tiny = %(_str_tiny)s\n' + 'maxexp = %(maxexp)6s max = %(_str_max)s\n' + 'nexp = %(nexp)6s min = -max\n' + 'smallest_normal = %(_str_smallest_normal)s ' + 'smallest_subnormal = %(_str_smallest_subnormal)s\n' + '---------------------------------------------------------------\n' + ) + return fmt % self.__dict__ + + def __repr__(self): + c = self.__class__.__name__ + d = self.__dict__.copy() + d['klass'] = c + return (("%(klass)s(resolution=%(resolution)s, min=-%(_str_max)s," + " max=%(_str_max)s, dtype=%(dtype)s)") % d) + + @property + def smallest_normal(self): + """Return the value for the smallest normal. + + Returns + ------- + smallest_normal : float + Value for the smallest normal. + + Warns + ----- + UserWarning + If the calculated value for the smallest normal is requested for + double-double. + """ + # This check is necessary because the value for smallest_normal is + # platform dependent for longdouble types. + if isnan(self._machar.smallest_normal.flat[0]): + warnings.warn( + 'The value of smallest normal is undefined for double double', + UserWarning, stacklevel=2) + return self._machar.smallest_normal.flat[0] + + @property + def tiny(self): + """Return the value for tiny, alias of smallest_normal. + + Returns + ------- + tiny : float + Value for the smallest normal, alias of smallest_normal. + + Warns + ----- + UserWarning + If the calculated value for the smallest normal is requested for + double-double. + """ + return self.smallest_normal + + +@set_module('numpy') +class iinfo: + """ + iinfo(type) + + Machine limits for integer types. + + Attributes + ---------- + bits : int + The number of bits occupied by the type. + dtype : dtype + Returns the dtype for which `iinfo` returns information. + min : int + The smallest integer expressible by the type. + max : int + The largest integer expressible by the type. + + Parameters + ---------- + int_type : integer type, dtype, or instance + The kind of integer data type to get information about. + + See Also + -------- + finfo : The equivalent for floating point data types. + + Examples + -------- + With types: + + >>> import numpy as np + >>> ii16 = np.iinfo(np.int16) + >>> ii16.min + -32768 + >>> ii16.max + 32767 + >>> ii32 = np.iinfo(np.int32) + >>> ii32.min + -2147483648 + >>> ii32.max + 2147483647 + + With instances: + + >>> ii32 = np.iinfo(np.int32(10)) + >>> ii32.min + -2147483648 + >>> ii32.max + 2147483647 + + """ + + _min_vals = {} + _max_vals = {} + + __class_getitem__ = classmethod(types.GenericAlias) + + def __init__(self, int_type): + try: + self.dtype = numeric.dtype(int_type) + except TypeError: + self.dtype = numeric.dtype(type(int_type)) + self.kind = self.dtype.kind + self.bits = self.dtype.itemsize * 8 + self.key = "%s%d" % (self.kind, self.bits) + if self.kind not in 'iu': + raise ValueError(f"Invalid integer data type {self.kind!r}.") + + @property + def min(self): + """Minimum value of given dtype.""" + if self.kind == 'u': + return 0 + else: + try: + val = iinfo._min_vals[self.key] + except KeyError: + val = int(-(1 << (self.bits - 1))) + iinfo._min_vals[self.key] = val + return val + + @property + def max(self): + """Maximum value of given dtype.""" + try: + val = iinfo._max_vals[self.key] + except KeyError: + if self.kind == 'u': + val = int((1 << self.bits) - 1) + else: + val = int((1 << (self.bits - 1)) - 1) + iinfo._max_vals[self.key] = val + return val + + def __str__(self): + """String representation.""" + fmt = ( + 'Machine parameters for %(dtype)s\n' + '---------------------------------------------------------------\n' + 'min = %(min)s\n' + 'max = %(max)s\n' + '---------------------------------------------------------------\n' + ) + return fmt % {'dtype': self.dtype, 'min': self.min, 'max': self.max} + + def __repr__(self): + return "%s(min=%s, max=%s, dtype=%s)" % (self.__class__.__name__, + self.min, self.max, self.dtype) diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/getlimits.pyi b/.venv/lib/python3.12/site-packages/numpy/_core/getlimits.pyi new file mode 100644 index 0000000..9d79b17 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/getlimits.pyi @@ -0,0 +1,3 @@ +from numpy import finfo, iinfo + +__all__ = ["finfo", "iinfo"] diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/__multiarray_api.c b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/__multiarray_api.c new file mode 100644 index 0000000..8398c62 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/__multiarray_api.c @@ -0,0 +1,376 @@ + +/* These pointers will be stored in the C-object for use in other + extension modules +*/ + +void *PyArray_API[] = { + (void *) PyArray_GetNDArrayCVersion, + NULL, + (void *) &PyArray_Type, + (void *) &PyArrayDescr_Type, + NULL, + (void *) &PyArrayIter_Type, + (void *) &PyArrayMultiIter_Type, + (int *) &NPY_NUMUSERTYPES, + (void *) &PyBoolArrType_Type, + (void *) &_PyArrayScalar_BoolValues, + (void *) &PyGenericArrType_Type, + (void *) &PyNumberArrType_Type, + (void *) &PyIntegerArrType_Type, + (void *) &PySignedIntegerArrType_Type, + (void *) &PyUnsignedIntegerArrType_Type, + (void *) &PyInexactArrType_Type, + (void *) &PyFloatingArrType_Type, + (void *) &PyComplexFloatingArrType_Type, + (void *) &PyFlexibleArrType_Type, + (void *) &PyCharacterArrType_Type, + (void *) &PyByteArrType_Type, + (void *) &PyShortArrType_Type, + (void *) &PyIntArrType_Type, + (void *) &PyLongArrType_Type, + (void *) &PyLongLongArrType_Type, + (void *) &PyUByteArrType_Type, + (void *) &PyUShortArrType_Type, + (void *) &PyUIntArrType_Type, + (void *) &PyULongArrType_Type, + (void *) &PyULongLongArrType_Type, + (void *) &PyFloatArrType_Type, + (void *) &PyDoubleArrType_Type, + (void *) &PyLongDoubleArrType_Type, + (void *) &PyCFloatArrType_Type, + (void *) &PyCDoubleArrType_Type, + (void *) &PyCLongDoubleArrType_Type, + (void *) &PyObjectArrType_Type, + (void *) &PyStringArrType_Type, + (void *) &PyUnicodeArrType_Type, + (void *) &PyVoidArrType_Type, + NULL, + NULL, + (void *) PyArray_INCREF, + (void *) PyArray_XDECREF, + (void *) PyArray_SetStringFunction, + (void *) PyArray_DescrFromType, + (void *) PyArray_TypeObjectFromType, + (void *) PyArray_Zero, + (void *) PyArray_One, + (void *) PyArray_CastToType, + (void *) PyArray_CopyInto, + (void *) PyArray_CopyAnyInto, + (void *) PyArray_CanCastSafely, + (void *) PyArray_CanCastTo, + (void *) PyArray_ObjectType, + (void *) PyArray_DescrFromObject, + (void *) PyArray_ConvertToCommonType, + (void *) PyArray_DescrFromScalar, + (void *) PyArray_DescrFromTypeObject, + (void *) PyArray_Size, + (void *) PyArray_Scalar, + (void *) PyArray_FromScalar, + (void *) PyArray_ScalarAsCtype, + (void *) PyArray_CastScalarToCtype, + (void *) PyArray_CastScalarDirect, + (void *) PyArray_Pack, + NULL, + NULL, + NULL, + (void *) PyArray_FromAny, + (void *) PyArray_EnsureArray, + (void *) PyArray_EnsureAnyArray, + (void *) PyArray_FromFile, + (void *) PyArray_FromString, + (void *) PyArray_FromBuffer, + (void *) PyArray_FromIter, + (void *) PyArray_Return, + (void *) PyArray_GetField, + (void *) PyArray_SetField, + (void *) PyArray_Byteswap, + (void *) PyArray_Resize, + NULL, + NULL, + NULL, + (void *) PyArray_CopyObject, + (void *) PyArray_NewCopy, + (void *) PyArray_ToList, + (void *) PyArray_ToString, + (void *) PyArray_ToFile, + (void *) PyArray_Dump, + (void *) PyArray_Dumps, + (void *) PyArray_ValidType, + (void *) PyArray_UpdateFlags, + (void *) PyArray_New, + (void *) PyArray_NewFromDescr, + (void *) PyArray_DescrNew, + (void *) PyArray_DescrNewFromType, + (void *) PyArray_GetPriority, + (void *) PyArray_IterNew, + (void *) PyArray_MultiIterNew, + (void *) PyArray_PyIntAsInt, + (void *) PyArray_PyIntAsIntp, + (void *) PyArray_Broadcast, + NULL, + (void *) PyArray_FillWithScalar, + (void *) PyArray_CheckStrides, + (void *) PyArray_DescrNewByteorder, + (void *) PyArray_IterAllButAxis, + (void *) PyArray_CheckFromAny, + (void *) PyArray_FromArray, + (void *) PyArray_FromInterface, + (void *) PyArray_FromStructInterface, + (void *) PyArray_FromArrayAttr, + (void *) PyArray_ScalarKind, + (void *) PyArray_CanCoerceScalar, + NULL, + (void *) PyArray_CanCastScalar, + NULL, + (void *) PyArray_RemoveSmallest, + (void *) PyArray_ElementStrides, + (void *) PyArray_Item_INCREF, + (void *) PyArray_Item_XDECREF, + NULL, + (void *) PyArray_Transpose, + (void *) PyArray_TakeFrom, + (void *) PyArray_PutTo, + (void *) PyArray_PutMask, + (void *) PyArray_Repeat, + (void *) PyArray_Choose, + (void *) PyArray_Sort, + (void *) PyArray_ArgSort, + (void *) PyArray_SearchSorted, + (void *) PyArray_ArgMax, + (void *) PyArray_ArgMin, + (void *) PyArray_Reshape, + (void *) PyArray_Newshape, + (void *) PyArray_Squeeze, + (void *) PyArray_View, + (void *) PyArray_SwapAxes, + (void *) PyArray_Max, + (void *) PyArray_Min, + (void *) PyArray_Ptp, + (void *) PyArray_Mean, + (void *) PyArray_Trace, + (void *) PyArray_Diagonal, + (void *) PyArray_Clip, + (void *) PyArray_Conjugate, + (void *) PyArray_Nonzero, + (void *) PyArray_Std, + (void *) PyArray_Sum, + (void *) PyArray_CumSum, + (void *) PyArray_Prod, + (void *) PyArray_CumProd, + (void *) PyArray_All, + (void *) PyArray_Any, + (void *) PyArray_Compress, + (void *) PyArray_Flatten, + (void *) PyArray_Ravel, + (void *) PyArray_MultiplyList, + (void *) PyArray_MultiplyIntList, + (void *) PyArray_GetPtr, + (void *) PyArray_CompareLists, + (void *) PyArray_AsCArray, + NULL, + NULL, + (void *) PyArray_Free, + (void *) PyArray_Converter, + (void *) PyArray_IntpFromSequence, + (void *) PyArray_Concatenate, + (void *) PyArray_InnerProduct, + (void *) PyArray_MatrixProduct, + NULL, + (void *) PyArray_Correlate, + NULL, + (void *) PyArray_DescrConverter, + (void *) PyArray_DescrConverter2, + (void *) PyArray_IntpConverter, + (void *) PyArray_BufferConverter, + (void *) PyArray_AxisConverter, + (void *) PyArray_BoolConverter, + (void *) PyArray_ByteorderConverter, + (void *) PyArray_OrderConverter, + (void *) PyArray_EquivTypes, + (void *) PyArray_Zeros, + (void *) PyArray_Empty, + (void *) PyArray_Where, + (void *) PyArray_Arange, + (void *) PyArray_ArangeObj, + (void *) PyArray_SortkindConverter, + (void *) PyArray_LexSort, + (void *) PyArray_Round, + (void *) PyArray_EquivTypenums, + (void *) PyArray_RegisterDataType, + (void *) PyArray_RegisterCastFunc, + (void *) PyArray_RegisterCanCast, + (void *) PyArray_InitArrFuncs, + (void *) PyArray_IntTupleFromIntp, + NULL, + (void *) PyArray_ClipmodeConverter, + (void *) PyArray_OutputConverter, + (void *) PyArray_BroadcastToShape, + NULL, + NULL, + (void *) PyArray_DescrAlignConverter, + (void *) PyArray_DescrAlignConverter2, + (void *) PyArray_SearchsideConverter, + (void *) PyArray_CheckAxis, + (void *) PyArray_OverflowMultiplyList, + NULL, + (void *) PyArray_MultiIterFromObjects, + (void *) PyArray_GetEndianness, + (void *) PyArray_GetNDArrayCFeatureVersion, + (void *) PyArray_Correlate2, + (void *) PyArray_NeighborhoodIterNew, + (void *) &PyTimeIntegerArrType_Type, + (void *) &PyDatetimeArrType_Type, + (void *) &PyTimedeltaArrType_Type, + (void *) &PyHalfArrType_Type, + (void *) &NpyIter_Type, + NULL, + NULL, + NULL, + NULL, + (void *) NpyIter_GetTransferFlags, + (void *) NpyIter_New, + (void *) NpyIter_MultiNew, + (void *) NpyIter_AdvancedNew, + (void *) NpyIter_Copy, + (void *) NpyIter_Deallocate, + (void *) NpyIter_HasDelayedBufAlloc, + (void *) NpyIter_HasExternalLoop, + (void *) NpyIter_EnableExternalLoop, + (void *) NpyIter_GetInnerStrideArray, + (void *) NpyIter_GetInnerLoopSizePtr, + (void *) NpyIter_Reset, + (void *) NpyIter_ResetBasePointers, + (void *) NpyIter_ResetToIterIndexRange, + (void *) NpyIter_GetNDim, + (void *) NpyIter_GetNOp, + (void *) NpyIter_GetIterNext, + (void *) NpyIter_GetIterSize, + (void *) NpyIter_GetIterIndexRange, + (void *) NpyIter_GetIterIndex, + (void *) NpyIter_GotoIterIndex, + (void *) NpyIter_HasMultiIndex, + (void *) NpyIter_GetShape, + (void *) NpyIter_GetGetMultiIndex, + (void *) NpyIter_GotoMultiIndex, + (void *) NpyIter_RemoveMultiIndex, + (void *) NpyIter_HasIndex, + (void *) NpyIter_IsBuffered, + (void *) NpyIter_IsGrowInner, + (void *) NpyIter_GetBufferSize, + (void *) NpyIter_GetIndexPtr, + (void *) NpyIter_GotoIndex, + (void *) NpyIter_GetDataPtrArray, + (void *) NpyIter_GetDescrArray, + (void *) NpyIter_GetOperandArray, + (void *) NpyIter_GetIterView, + (void *) NpyIter_GetReadFlags, + (void *) NpyIter_GetWriteFlags, + (void *) NpyIter_DebugPrint, + (void *) NpyIter_IterationNeedsAPI, + (void *) NpyIter_GetInnerFixedStrideArray, + (void *) NpyIter_RemoveAxis, + (void *) NpyIter_GetAxisStrideArray, + (void *) NpyIter_RequiresBuffering, + (void *) NpyIter_GetInitialDataPtrArray, + (void *) NpyIter_CreateCompatibleStrides, + (void *) PyArray_CastingConverter, + (void *) PyArray_CountNonzero, + (void *) PyArray_PromoteTypes, + (void *) PyArray_MinScalarType, + (void *) PyArray_ResultType, + (void *) PyArray_CanCastArrayTo, + (void *) PyArray_CanCastTypeTo, + (void *) PyArray_EinsteinSum, + (void *) PyArray_NewLikeArray, + NULL, + (void *) PyArray_ConvertClipmodeSequence, + (void *) PyArray_MatrixProduct2, + (void *) NpyIter_IsFirstVisit, + (void *) PyArray_SetBaseObject, + (void *) PyArray_CreateSortedStridePerm, + (void *) PyArray_RemoveAxesInPlace, + (void *) PyArray_DebugPrint, + (void *) PyArray_FailUnlessWriteable, + (void *) PyArray_SetUpdateIfCopyBase, + (void *) PyDataMem_NEW, + (void *) PyDataMem_FREE, + (void *) PyDataMem_RENEW, + NULL, + (NPY_CASTING *) &NPY_DEFAULT_ASSIGN_CASTING, + NULL, + NULL, + NULL, + (void *) PyArray_Partition, + (void *) PyArray_ArgPartition, + (void *) PyArray_SelectkindConverter, + (void *) PyDataMem_NEW_ZEROED, + (void *) PyArray_CheckAnyScalarExact, + NULL, + (void *) PyArray_ResolveWritebackIfCopy, + (void *) PyArray_SetWritebackIfCopyBase, + (void *) PyDataMem_SetHandler, + (void *) PyDataMem_GetHandler, + (PyObject* *) &PyDataMem_DefaultHandler, + (void *) NpyDatetime_ConvertDatetime64ToDatetimeStruct, + (void *) NpyDatetime_ConvertDatetimeStructToDatetime64, + (void *) NpyDatetime_ConvertPyDateTimeToDatetimeStruct, + (void *) NpyDatetime_GetDatetimeISO8601StrLen, + (void *) NpyDatetime_MakeISO8601Datetime, + (void *) NpyDatetime_ParseISO8601Datetime, + (void *) NpyString_load, + (void *) NpyString_pack, + (void *) NpyString_pack_null, + (void *) NpyString_acquire_allocator, + (void *) NpyString_acquire_allocators, + (void *) NpyString_release_allocator, + (void *) NpyString_release_allocators, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + (void *) PyArray_GetDefaultDescr, + (void *) PyArrayInitDTypeMeta_FromSpec, + (void *) PyArray_CommonDType, + (void *) PyArray_PromoteDTypeSequence, + (void *) _PyDataType_GetArrFuncs, + NULL, + NULL, + NULL +}; diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/__multiarray_api.h b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/__multiarray_api.h new file mode 100644 index 0000000..34363fb --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/__multiarray_api.h @@ -0,0 +1,1622 @@ + +#if defined(_MULTIARRAYMODULE) || defined(WITH_CPYCHECKER_STEALS_REFERENCE_TO_ARG_ATTRIBUTE) + +typedef struct { + PyObject_HEAD + npy_bool obval; +} PyBoolScalarObject; + +extern NPY_NO_EXPORT PyTypeObject PyArrayNeighborhoodIter_Type; +extern NPY_NO_EXPORT PyBoolScalarObject _PyArrayScalar_BoolValues[2]; + +NPY_NO_EXPORT unsigned int PyArray_GetNDArrayCVersion \ + (void); +extern NPY_NO_EXPORT PyTypeObject PyArray_Type; + +extern NPY_NO_EXPORT PyArray_DTypeMeta PyArrayDescr_TypeFull; +#define PyArrayDescr_Type (*(PyTypeObject *)(&PyArrayDescr_TypeFull)) + +extern NPY_NO_EXPORT PyTypeObject PyArrayIter_Type; + +extern NPY_NO_EXPORT PyTypeObject PyArrayMultiIter_Type; + +extern NPY_NO_EXPORT int NPY_NUMUSERTYPES; + +extern NPY_NO_EXPORT PyTypeObject PyBoolArrType_Type; + +extern NPY_NO_EXPORT PyBoolScalarObject _PyArrayScalar_BoolValues[2]; + +extern NPY_NO_EXPORT PyTypeObject PyGenericArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyNumberArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyIntegerArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PySignedIntegerArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyUnsignedIntegerArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyInexactArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyFloatingArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyComplexFloatingArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyFlexibleArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyCharacterArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyByteArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyShortArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyIntArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyLongArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyLongLongArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyUByteArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyUShortArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyUIntArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyULongArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyULongLongArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyFloatArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyDoubleArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyLongDoubleArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyCFloatArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyCDoubleArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyCLongDoubleArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyObjectArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyStringArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyUnicodeArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyVoidArrType_Type; + +NPY_NO_EXPORT int PyArray_INCREF \ + (PyArrayObject *); +NPY_NO_EXPORT int PyArray_XDECREF \ + (PyArrayObject *); +NPY_NO_EXPORT void PyArray_SetStringFunction \ + (PyObject *, int); +NPY_NO_EXPORT PyArray_Descr * PyArray_DescrFromType \ + (int); +NPY_NO_EXPORT PyObject * PyArray_TypeObjectFromType \ + (int); +NPY_NO_EXPORT char * PyArray_Zero \ + (PyArrayObject *); +NPY_NO_EXPORT char * PyArray_One \ + (PyArrayObject *); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_CastToType \ + (PyArrayObject *, PyArray_Descr *, int); +NPY_NO_EXPORT int PyArray_CopyInto \ + (PyArrayObject *, PyArrayObject *); +NPY_NO_EXPORT int PyArray_CopyAnyInto \ + (PyArrayObject *, PyArrayObject *); +NPY_NO_EXPORT int PyArray_CanCastSafely \ + (int, int); +NPY_NO_EXPORT npy_bool PyArray_CanCastTo \ + (PyArray_Descr *, PyArray_Descr *); +NPY_NO_EXPORT int PyArray_ObjectType \ + (PyObject *, int); +NPY_NO_EXPORT PyArray_Descr * PyArray_DescrFromObject \ + (PyObject *, PyArray_Descr *); +NPY_NO_EXPORT PyArrayObject ** PyArray_ConvertToCommonType \ + (PyObject *, int *); +NPY_NO_EXPORT PyArray_Descr * PyArray_DescrFromScalar \ + (PyObject *); +NPY_NO_EXPORT PyArray_Descr * PyArray_DescrFromTypeObject \ + (PyObject *); +NPY_NO_EXPORT npy_intp PyArray_Size \ + (PyObject *); +NPY_NO_EXPORT PyObject * PyArray_Scalar \ + (void *, PyArray_Descr *, PyObject *); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_FromScalar \ + (PyObject *, PyArray_Descr *); +NPY_NO_EXPORT void PyArray_ScalarAsCtype \ + (PyObject *, void *); +NPY_NO_EXPORT int PyArray_CastScalarToCtype \ + (PyObject *, void *, PyArray_Descr *); +NPY_NO_EXPORT int PyArray_CastScalarDirect \ + (PyObject *, PyArray_Descr *, void *, int); +NPY_NO_EXPORT int PyArray_Pack \ + (PyArray_Descr *, void *, PyObject *); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_FromAny \ + (PyObject *, PyArray_Descr *, int, int, int, PyObject *); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(1) PyObject * PyArray_EnsureArray \ + (PyObject *); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(1) PyObject * PyArray_EnsureAnyArray \ + (PyObject *); +NPY_NO_EXPORT PyObject * PyArray_FromFile \ + (FILE *, PyArray_Descr *, npy_intp, char *); +NPY_NO_EXPORT PyObject * PyArray_FromString \ + (char *, npy_intp, PyArray_Descr *, npy_intp, char *); +NPY_NO_EXPORT PyObject * PyArray_FromBuffer \ + (PyObject *, PyArray_Descr *, npy_intp, npy_intp); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_FromIter \ + (PyObject *, PyArray_Descr *, npy_intp); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(1) PyObject * PyArray_Return \ + (PyArrayObject *); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_GetField \ + (PyArrayObject *, PyArray_Descr *, int); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) int PyArray_SetField \ + (PyArrayObject *, PyArray_Descr *, int, PyObject *); +NPY_NO_EXPORT PyObject * PyArray_Byteswap \ + (PyArrayObject *, npy_bool); +NPY_NO_EXPORT PyObject * PyArray_Resize \ + (PyArrayObject *, PyArray_Dims *, int, NPY_ORDER NPY_UNUSED(order)); +NPY_NO_EXPORT int PyArray_CopyObject \ + (PyArrayObject *, PyObject *); +NPY_NO_EXPORT PyObject * PyArray_NewCopy \ + (PyArrayObject *, NPY_ORDER); +NPY_NO_EXPORT PyObject * PyArray_ToList \ + (PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_ToString \ + (PyArrayObject *, NPY_ORDER); +NPY_NO_EXPORT int PyArray_ToFile \ + (PyArrayObject *, FILE *, char *, char *); +NPY_NO_EXPORT int PyArray_Dump \ + (PyObject *, PyObject *, int); +NPY_NO_EXPORT PyObject * PyArray_Dumps \ + (PyObject *, int); +NPY_NO_EXPORT int PyArray_ValidType \ + (int); +NPY_NO_EXPORT void PyArray_UpdateFlags \ + (PyArrayObject *, int); +NPY_NO_EXPORT PyObject * PyArray_New \ + (PyTypeObject *, int, npy_intp const *, int, npy_intp const *, void *, int, int, PyObject *); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_NewFromDescr \ + (PyTypeObject *, PyArray_Descr *, int, npy_intp const *, npy_intp const *, void *, int, PyObject *); +NPY_NO_EXPORT PyArray_Descr * PyArray_DescrNew \ + (PyArray_Descr *); +NPY_NO_EXPORT PyArray_Descr * PyArray_DescrNewFromType \ + (int); +NPY_NO_EXPORT double PyArray_GetPriority \ + (PyObject *, double); +NPY_NO_EXPORT PyObject * PyArray_IterNew \ + (PyObject *); +NPY_NO_EXPORT PyObject* PyArray_MultiIterNew \ + (int, ...); +NPY_NO_EXPORT int PyArray_PyIntAsInt \ + (PyObject *); +NPY_NO_EXPORT npy_intp PyArray_PyIntAsIntp \ + (PyObject *); +NPY_NO_EXPORT int PyArray_Broadcast \ + (PyArrayMultiIterObject *); +NPY_NO_EXPORT int PyArray_FillWithScalar \ + (PyArrayObject *, PyObject *); +NPY_NO_EXPORT npy_bool PyArray_CheckStrides \ + (int, int, npy_intp, npy_intp, npy_intp const *, npy_intp const *); +NPY_NO_EXPORT PyArray_Descr * PyArray_DescrNewByteorder \ + (PyArray_Descr *, char); +NPY_NO_EXPORT PyObject * PyArray_IterAllButAxis \ + (PyObject *, int *); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_CheckFromAny \ + (PyObject *, PyArray_Descr *, int, int, int, PyObject *); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_FromArray \ + (PyArrayObject *, PyArray_Descr *, int); +NPY_NO_EXPORT PyObject * PyArray_FromInterface \ + (PyObject *); +NPY_NO_EXPORT PyObject * PyArray_FromStructInterface \ + (PyObject *); +NPY_NO_EXPORT PyObject * PyArray_FromArrayAttr \ + (PyObject *, PyArray_Descr *, PyObject *); +NPY_NO_EXPORT NPY_SCALARKIND PyArray_ScalarKind \ + (int, PyArrayObject **); +NPY_NO_EXPORT int PyArray_CanCoerceScalar \ + (int, int, NPY_SCALARKIND); +NPY_NO_EXPORT npy_bool PyArray_CanCastScalar \ + (PyTypeObject *, PyTypeObject *); +NPY_NO_EXPORT int PyArray_RemoveSmallest \ + (PyArrayMultiIterObject *); +NPY_NO_EXPORT int PyArray_ElementStrides \ + (PyObject *); +NPY_NO_EXPORT void PyArray_Item_INCREF \ + (char *, PyArray_Descr *); +NPY_NO_EXPORT void PyArray_Item_XDECREF \ + (char *, PyArray_Descr *); +NPY_NO_EXPORT PyObject * PyArray_Transpose \ + (PyArrayObject *, PyArray_Dims *); +NPY_NO_EXPORT PyObject * PyArray_TakeFrom \ + (PyArrayObject *, PyObject *, int, PyArrayObject *, NPY_CLIPMODE); +NPY_NO_EXPORT PyObject * PyArray_PutTo \ + (PyArrayObject *, PyObject*, PyObject *, NPY_CLIPMODE); +NPY_NO_EXPORT PyObject * PyArray_PutMask \ + (PyArrayObject *, PyObject*, PyObject*); +NPY_NO_EXPORT PyObject * PyArray_Repeat \ + (PyArrayObject *, PyObject *, int); +NPY_NO_EXPORT PyObject * PyArray_Choose \ + (PyArrayObject *, PyObject *, PyArrayObject *, NPY_CLIPMODE); +NPY_NO_EXPORT int PyArray_Sort \ + (PyArrayObject *, int, NPY_SORTKIND); +NPY_NO_EXPORT PyObject * PyArray_ArgSort \ + (PyArrayObject *, int, NPY_SORTKIND); +NPY_NO_EXPORT PyObject * PyArray_SearchSorted \ + (PyArrayObject *, PyObject *, NPY_SEARCHSIDE, PyObject *); +NPY_NO_EXPORT PyObject * PyArray_ArgMax \ + (PyArrayObject *, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_ArgMin \ + (PyArrayObject *, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_Reshape \ + (PyArrayObject *, PyObject *); +NPY_NO_EXPORT PyObject * PyArray_Newshape \ + (PyArrayObject *, PyArray_Dims *, NPY_ORDER); +NPY_NO_EXPORT PyObject * PyArray_Squeeze \ + (PyArrayObject *); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_View \ + (PyArrayObject *, PyArray_Descr *, PyTypeObject *); +NPY_NO_EXPORT PyObject * PyArray_SwapAxes \ + (PyArrayObject *, int, int); +NPY_NO_EXPORT PyObject * PyArray_Max \ + (PyArrayObject *, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_Min \ + (PyArrayObject *, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_Ptp \ + (PyArrayObject *, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_Mean \ + (PyArrayObject *, int, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_Trace \ + (PyArrayObject *, int, int, int, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_Diagonal \ + (PyArrayObject *, int, int, int); +NPY_NO_EXPORT PyObject * PyArray_Clip \ + (PyArrayObject *, PyObject *, PyObject *, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_Conjugate \ + (PyArrayObject *, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_Nonzero \ + (PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_Std \ + (PyArrayObject *, int, int, PyArrayObject *, int); +NPY_NO_EXPORT PyObject * PyArray_Sum \ + (PyArrayObject *, int, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_CumSum \ + (PyArrayObject *, int, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_Prod \ + (PyArrayObject *, int, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_CumProd \ + (PyArrayObject *, int, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_All \ + (PyArrayObject *, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_Any \ + (PyArrayObject *, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_Compress \ + (PyArrayObject *, PyObject *, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_Flatten \ + (PyArrayObject *, NPY_ORDER); +NPY_NO_EXPORT PyObject * PyArray_Ravel \ + (PyArrayObject *, NPY_ORDER); +NPY_NO_EXPORT npy_intp PyArray_MultiplyList \ + (npy_intp const *, int); +NPY_NO_EXPORT int PyArray_MultiplyIntList \ + (int const *, int); +NPY_NO_EXPORT void * PyArray_GetPtr \ + (PyArrayObject *, npy_intp const*); +NPY_NO_EXPORT int PyArray_CompareLists \ + (npy_intp const *, npy_intp const *, int); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(5) int PyArray_AsCArray \ + (PyObject **, void *, npy_intp *, int, PyArray_Descr*); +NPY_NO_EXPORT int PyArray_Free \ + (PyObject *, void *); +NPY_NO_EXPORT int PyArray_Converter \ + (PyObject *, PyObject **); +NPY_NO_EXPORT int PyArray_IntpFromSequence \ + (PyObject *, npy_intp *, int); +NPY_NO_EXPORT PyObject * PyArray_Concatenate \ + (PyObject *, int); +NPY_NO_EXPORT PyObject * PyArray_InnerProduct \ + (PyObject *, PyObject *); +NPY_NO_EXPORT PyObject * PyArray_MatrixProduct \ + (PyObject *, PyObject *); +NPY_NO_EXPORT PyObject * PyArray_Correlate \ + (PyObject *, PyObject *, int); +NPY_NO_EXPORT int PyArray_DescrConverter \ + (PyObject *, PyArray_Descr **); +NPY_NO_EXPORT int PyArray_DescrConverter2 \ + (PyObject *, PyArray_Descr **); +NPY_NO_EXPORT int PyArray_IntpConverter \ + (PyObject *, PyArray_Dims *); +NPY_NO_EXPORT int PyArray_BufferConverter \ + (PyObject *, PyArray_Chunk *); +NPY_NO_EXPORT int PyArray_AxisConverter \ + (PyObject *, int *); +NPY_NO_EXPORT int PyArray_BoolConverter \ + (PyObject *, npy_bool *); +NPY_NO_EXPORT int PyArray_ByteorderConverter \ + (PyObject *, char *); +NPY_NO_EXPORT int PyArray_OrderConverter \ + (PyObject *, NPY_ORDER *); +NPY_NO_EXPORT unsigned char PyArray_EquivTypes \ + (PyArray_Descr *, PyArray_Descr *); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(3) PyObject * PyArray_Zeros \ + (int, npy_intp const *, PyArray_Descr *, int); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(3) PyObject * PyArray_Empty \ + (int, npy_intp const *, PyArray_Descr *, int); +NPY_NO_EXPORT PyObject * PyArray_Where \ + (PyObject *, PyObject *, PyObject *); +NPY_NO_EXPORT PyObject * PyArray_Arange \ + (double, double, double, int); +NPY_NO_EXPORT PyObject * PyArray_ArangeObj \ + (PyObject *, PyObject *, PyObject *, PyArray_Descr *); +NPY_NO_EXPORT int PyArray_SortkindConverter \ + (PyObject *, NPY_SORTKIND *); +NPY_NO_EXPORT PyObject * PyArray_LexSort \ + (PyObject *, int); +NPY_NO_EXPORT PyObject * PyArray_Round \ + (PyArrayObject *, int, PyArrayObject *); +NPY_NO_EXPORT unsigned char PyArray_EquivTypenums \ + (int, int); +NPY_NO_EXPORT int PyArray_RegisterDataType \ + (PyArray_DescrProto *); +NPY_NO_EXPORT int PyArray_RegisterCastFunc \ + (PyArray_Descr *, int, PyArray_VectorUnaryFunc *); +NPY_NO_EXPORT int PyArray_RegisterCanCast \ + (PyArray_Descr *, int, NPY_SCALARKIND); +NPY_NO_EXPORT void PyArray_InitArrFuncs \ + (PyArray_ArrFuncs *); +NPY_NO_EXPORT PyObject * PyArray_IntTupleFromIntp \ + (int, npy_intp const *); +NPY_NO_EXPORT int PyArray_ClipmodeConverter \ + (PyObject *, NPY_CLIPMODE *); +NPY_NO_EXPORT int PyArray_OutputConverter \ + (PyObject *, PyArrayObject **); +NPY_NO_EXPORT PyObject * PyArray_BroadcastToShape \ + (PyObject *, npy_intp *, int); +NPY_NO_EXPORT int PyArray_DescrAlignConverter \ + (PyObject *, PyArray_Descr **); +NPY_NO_EXPORT int PyArray_DescrAlignConverter2 \ + (PyObject *, PyArray_Descr **); +NPY_NO_EXPORT int PyArray_SearchsideConverter \ + (PyObject *, void *); +NPY_NO_EXPORT PyObject * PyArray_CheckAxis \ + (PyArrayObject *, int *, int); +NPY_NO_EXPORT npy_intp PyArray_OverflowMultiplyList \ + (npy_intp const *, int); +NPY_NO_EXPORT PyObject* PyArray_MultiIterFromObjects \ + (PyObject **, int, int, ...); +NPY_NO_EXPORT int PyArray_GetEndianness \ + (void); +NPY_NO_EXPORT unsigned int PyArray_GetNDArrayCFeatureVersion \ + (void); +NPY_NO_EXPORT PyObject * PyArray_Correlate2 \ + (PyObject *, PyObject *, int); +NPY_NO_EXPORT PyObject* PyArray_NeighborhoodIterNew \ + (PyArrayIterObject *, const npy_intp *, int, PyArrayObject*); +extern NPY_NO_EXPORT PyTypeObject PyTimeIntegerArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyDatetimeArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyTimedeltaArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyHalfArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject NpyIter_Type; + +NPY_NO_EXPORT NPY_ARRAYMETHOD_FLAGS NpyIter_GetTransferFlags \ + (NpyIter *); +NPY_NO_EXPORT NpyIter * NpyIter_New \ + (PyArrayObject *, npy_uint32, NPY_ORDER, NPY_CASTING, PyArray_Descr*); +NPY_NO_EXPORT NpyIter * NpyIter_MultiNew \ + (int, PyArrayObject **, npy_uint32, NPY_ORDER, NPY_CASTING, npy_uint32 *, PyArray_Descr **); +NPY_NO_EXPORT NpyIter * NpyIter_AdvancedNew \ + (int, PyArrayObject **, npy_uint32, NPY_ORDER, NPY_CASTING, npy_uint32 *, PyArray_Descr **, int, int **, npy_intp *, npy_intp); +NPY_NO_EXPORT NpyIter * NpyIter_Copy \ + (NpyIter *); +NPY_NO_EXPORT int NpyIter_Deallocate \ + (NpyIter *); +NPY_NO_EXPORT npy_bool NpyIter_HasDelayedBufAlloc \ + (NpyIter *); +NPY_NO_EXPORT npy_bool NpyIter_HasExternalLoop \ + (NpyIter *); +NPY_NO_EXPORT int NpyIter_EnableExternalLoop \ + (NpyIter *); +NPY_NO_EXPORT npy_intp * NpyIter_GetInnerStrideArray \ + (NpyIter *); +NPY_NO_EXPORT npy_intp * NpyIter_GetInnerLoopSizePtr \ + (NpyIter *); +NPY_NO_EXPORT int NpyIter_Reset \ + (NpyIter *, char **); +NPY_NO_EXPORT int NpyIter_ResetBasePointers \ + (NpyIter *, char **, char **); +NPY_NO_EXPORT int NpyIter_ResetToIterIndexRange \ + (NpyIter *, npy_intp, npy_intp, char **); +NPY_NO_EXPORT int NpyIter_GetNDim \ + (NpyIter *); +NPY_NO_EXPORT int NpyIter_GetNOp \ + (NpyIter *); +NPY_NO_EXPORT NpyIter_IterNextFunc * NpyIter_GetIterNext \ + (NpyIter *, char **); +NPY_NO_EXPORT npy_intp NpyIter_GetIterSize \ + (NpyIter *); +NPY_NO_EXPORT void NpyIter_GetIterIndexRange \ + (NpyIter *, npy_intp *, npy_intp *); +NPY_NO_EXPORT npy_intp NpyIter_GetIterIndex \ + (NpyIter *); +NPY_NO_EXPORT int NpyIter_GotoIterIndex \ + (NpyIter *, npy_intp); +NPY_NO_EXPORT npy_bool NpyIter_HasMultiIndex \ + (NpyIter *); +NPY_NO_EXPORT int NpyIter_GetShape \ + (NpyIter *, npy_intp *); +NPY_NO_EXPORT NpyIter_GetMultiIndexFunc * NpyIter_GetGetMultiIndex \ + (NpyIter *, char **); +NPY_NO_EXPORT int NpyIter_GotoMultiIndex \ + (NpyIter *, npy_intp const *); +NPY_NO_EXPORT int NpyIter_RemoveMultiIndex \ + (NpyIter *); +NPY_NO_EXPORT npy_bool NpyIter_HasIndex \ + (NpyIter *); +NPY_NO_EXPORT npy_bool NpyIter_IsBuffered \ + (NpyIter *); +NPY_NO_EXPORT npy_bool NpyIter_IsGrowInner \ + (NpyIter *); +NPY_NO_EXPORT npy_intp NpyIter_GetBufferSize \ + (NpyIter *); +NPY_NO_EXPORT npy_intp * NpyIter_GetIndexPtr \ + (NpyIter *); +NPY_NO_EXPORT int NpyIter_GotoIndex \ + (NpyIter *, npy_intp); +NPY_NO_EXPORT char ** NpyIter_GetDataPtrArray \ + (NpyIter *); +NPY_NO_EXPORT PyArray_Descr ** NpyIter_GetDescrArray \ + (NpyIter *); +NPY_NO_EXPORT PyArrayObject ** NpyIter_GetOperandArray \ + (NpyIter *); +NPY_NO_EXPORT PyArrayObject * NpyIter_GetIterView \ + (NpyIter *, npy_intp); +NPY_NO_EXPORT void NpyIter_GetReadFlags \ + (NpyIter *, char *); +NPY_NO_EXPORT void NpyIter_GetWriteFlags \ + (NpyIter *, char *); +NPY_NO_EXPORT void NpyIter_DebugPrint \ + (NpyIter *); +NPY_NO_EXPORT npy_bool NpyIter_IterationNeedsAPI \ + (NpyIter *); +NPY_NO_EXPORT void NpyIter_GetInnerFixedStrideArray \ + (NpyIter *, npy_intp *); +NPY_NO_EXPORT int NpyIter_RemoveAxis \ + (NpyIter *, int); +NPY_NO_EXPORT npy_intp * NpyIter_GetAxisStrideArray \ + (NpyIter *, int); +NPY_NO_EXPORT npy_bool NpyIter_RequiresBuffering \ + (NpyIter *); +NPY_NO_EXPORT char ** NpyIter_GetInitialDataPtrArray \ + (NpyIter *); +NPY_NO_EXPORT int NpyIter_CreateCompatibleStrides \ + (NpyIter *, npy_intp, npy_intp *); +NPY_NO_EXPORT int PyArray_CastingConverter \ + (PyObject *, NPY_CASTING *); +NPY_NO_EXPORT npy_intp PyArray_CountNonzero \ + (PyArrayObject *); +NPY_NO_EXPORT PyArray_Descr * PyArray_PromoteTypes \ + (PyArray_Descr *, PyArray_Descr *); +NPY_NO_EXPORT PyArray_Descr * PyArray_MinScalarType \ + (PyArrayObject *); +NPY_NO_EXPORT PyArray_Descr * PyArray_ResultType \ + (npy_intp, PyArrayObject *arrs[], npy_intp, PyArray_Descr *descrs[]); +NPY_NO_EXPORT npy_bool PyArray_CanCastArrayTo \ + (PyArrayObject *, PyArray_Descr *, NPY_CASTING); +NPY_NO_EXPORT npy_bool PyArray_CanCastTypeTo \ + (PyArray_Descr *, PyArray_Descr *, NPY_CASTING); +NPY_NO_EXPORT PyArrayObject * PyArray_EinsteinSum \ + (char *, npy_intp, PyArrayObject **, PyArray_Descr *, NPY_ORDER, NPY_CASTING, PyArrayObject *); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(3) PyObject * PyArray_NewLikeArray \ + (PyArrayObject *, NPY_ORDER, PyArray_Descr *, int); +NPY_NO_EXPORT int PyArray_ConvertClipmodeSequence \ + (PyObject *, NPY_CLIPMODE *, int); +NPY_NO_EXPORT PyObject * PyArray_MatrixProduct2 \ + (PyObject *, PyObject *, PyArrayObject*); +NPY_NO_EXPORT npy_bool NpyIter_IsFirstVisit \ + (NpyIter *, int); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) int PyArray_SetBaseObject \ + (PyArrayObject *, PyObject *); +NPY_NO_EXPORT void PyArray_CreateSortedStridePerm \ + (int, npy_intp const *, npy_stride_sort_item *); +NPY_NO_EXPORT void PyArray_RemoveAxesInPlace \ + (PyArrayObject *, const npy_bool *); +NPY_NO_EXPORT void PyArray_DebugPrint \ + (PyArrayObject *); +NPY_NO_EXPORT int PyArray_FailUnlessWriteable \ + (PyArrayObject *, const char *); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) int PyArray_SetUpdateIfCopyBase \ + (PyArrayObject *, PyArrayObject *); +NPY_NO_EXPORT void * PyDataMem_NEW \ + (size_t); +NPY_NO_EXPORT void PyDataMem_FREE \ + (void *); +NPY_NO_EXPORT void * PyDataMem_RENEW \ + (void *, size_t); +extern NPY_NO_EXPORT NPY_CASTING NPY_DEFAULT_ASSIGN_CASTING; + +NPY_NO_EXPORT int PyArray_Partition \ + (PyArrayObject *, PyArrayObject *, int, NPY_SELECTKIND); +NPY_NO_EXPORT PyObject * PyArray_ArgPartition \ + (PyArrayObject *, PyArrayObject *, int, NPY_SELECTKIND); +NPY_NO_EXPORT int PyArray_SelectkindConverter \ + (PyObject *, NPY_SELECTKIND *); +NPY_NO_EXPORT void * PyDataMem_NEW_ZEROED \ + (size_t, size_t); +NPY_NO_EXPORT int PyArray_CheckAnyScalarExact \ + (PyObject *); +NPY_NO_EXPORT int PyArray_ResolveWritebackIfCopy \ + (PyArrayObject *); +NPY_NO_EXPORT int PyArray_SetWritebackIfCopyBase \ + (PyArrayObject *, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyDataMem_SetHandler \ + (PyObject *); +NPY_NO_EXPORT PyObject * PyDataMem_GetHandler \ + (void); +extern NPY_NO_EXPORT PyObject* PyDataMem_DefaultHandler; + +NPY_NO_EXPORT int NpyDatetime_ConvertDatetime64ToDatetimeStruct \ + (PyArray_DatetimeMetaData *, npy_datetime, npy_datetimestruct *); +NPY_NO_EXPORT int NpyDatetime_ConvertDatetimeStructToDatetime64 \ + (PyArray_DatetimeMetaData *, const npy_datetimestruct *, npy_datetime *); +NPY_NO_EXPORT int NpyDatetime_ConvertPyDateTimeToDatetimeStruct \ + (PyObject *, npy_datetimestruct *, NPY_DATETIMEUNIT *, int); +NPY_NO_EXPORT int NpyDatetime_GetDatetimeISO8601StrLen \ + (int, NPY_DATETIMEUNIT); +NPY_NO_EXPORT int NpyDatetime_MakeISO8601Datetime \ + (npy_datetimestruct *, char *, npy_intp, int, int, NPY_DATETIMEUNIT, int, NPY_CASTING); +NPY_NO_EXPORT int NpyDatetime_ParseISO8601Datetime \ + (char const *, Py_ssize_t, NPY_DATETIMEUNIT, NPY_CASTING, npy_datetimestruct *, NPY_DATETIMEUNIT *, npy_bool *); +NPY_NO_EXPORT int NpyString_load \ + (npy_string_allocator *, const npy_packed_static_string *, npy_static_string *); +NPY_NO_EXPORT int NpyString_pack \ + (npy_string_allocator *, npy_packed_static_string *, const char *, size_t); +NPY_NO_EXPORT int NpyString_pack_null \ + (npy_string_allocator *, npy_packed_static_string *); +NPY_NO_EXPORT npy_string_allocator * NpyString_acquire_allocator \ + (const PyArray_StringDTypeObject *); +NPY_NO_EXPORT void NpyString_acquire_allocators \ + (size_t, PyArray_Descr *const descrs[], npy_string_allocator *allocators[]); +NPY_NO_EXPORT void NpyString_release_allocator \ + (npy_string_allocator *); +NPY_NO_EXPORT void NpyString_release_allocators \ + (size_t, npy_string_allocator *allocators[]); +NPY_NO_EXPORT PyArray_Descr * PyArray_GetDefaultDescr \ + (PyArray_DTypeMeta *); +NPY_NO_EXPORT int PyArrayInitDTypeMeta_FromSpec \ + (PyArray_DTypeMeta *, PyArrayDTypeMeta_Spec *); +NPY_NO_EXPORT PyArray_DTypeMeta * PyArray_CommonDType \ + (PyArray_DTypeMeta *, PyArray_DTypeMeta *); +NPY_NO_EXPORT PyArray_DTypeMeta * PyArray_PromoteDTypeSequence \ + (npy_intp, PyArray_DTypeMeta **); +NPY_NO_EXPORT PyArray_ArrFuncs * _PyDataType_GetArrFuncs \ + (const PyArray_Descr *); + +#else + +#if defined(PY_ARRAY_UNIQUE_SYMBOL) + #define PyArray_API PY_ARRAY_UNIQUE_SYMBOL + #define _NPY_VERSION_CONCAT_HELPER2(x, y) x ## y + #define _NPY_VERSION_CONCAT_HELPER(arg) \ + _NPY_VERSION_CONCAT_HELPER2(arg, PyArray_RUNTIME_VERSION) + #define PyArray_RUNTIME_VERSION \ + _NPY_VERSION_CONCAT_HELPER(PY_ARRAY_UNIQUE_SYMBOL) +#endif + +/* By default do not export API in an .so (was never the case on windows) */ +#ifndef NPY_API_SYMBOL_ATTRIBUTE + #define NPY_API_SYMBOL_ATTRIBUTE NPY_VISIBILITY_HIDDEN +#endif + +#if defined(NO_IMPORT) || defined(NO_IMPORT_ARRAY) +extern NPY_API_SYMBOL_ATTRIBUTE void **PyArray_API; +extern NPY_API_SYMBOL_ATTRIBUTE int PyArray_RUNTIME_VERSION; +#else +#if defined(PY_ARRAY_UNIQUE_SYMBOL) +NPY_API_SYMBOL_ATTRIBUTE void **PyArray_API; +NPY_API_SYMBOL_ATTRIBUTE int PyArray_RUNTIME_VERSION; +#else +static void **PyArray_API = NULL; +static int PyArray_RUNTIME_VERSION = 0; +#endif +#endif + +#define PyArray_GetNDArrayCVersion \ + (*(unsigned int (*)(void)) \ + PyArray_API[0]) +#define PyArray_Type (*(PyTypeObject *)PyArray_API[2]) +#define PyArrayDescr_Type (*(PyTypeObject *)PyArray_API[3]) +#define PyArrayIter_Type (*(PyTypeObject *)PyArray_API[5]) +#define PyArrayMultiIter_Type (*(PyTypeObject *)PyArray_API[6]) +#define NPY_NUMUSERTYPES (*(int *)PyArray_API[7]) +#define PyBoolArrType_Type (*(PyTypeObject *)PyArray_API[8]) +#define _PyArrayScalar_BoolValues ((PyBoolScalarObject *)PyArray_API[9]) +#define PyGenericArrType_Type (*(PyTypeObject *)PyArray_API[10]) +#define PyNumberArrType_Type (*(PyTypeObject *)PyArray_API[11]) +#define PyIntegerArrType_Type (*(PyTypeObject *)PyArray_API[12]) +#define PySignedIntegerArrType_Type (*(PyTypeObject *)PyArray_API[13]) +#define PyUnsignedIntegerArrType_Type (*(PyTypeObject *)PyArray_API[14]) +#define PyInexactArrType_Type (*(PyTypeObject *)PyArray_API[15]) +#define PyFloatingArrType_Type (*(PyTypeObject *)PyArray_API[16]) +#define PyComplexFloatingArrType_Type (*(PyTypeObject *)PyArray_API[17]) +#define PyFlexibleArrType_Type (*(PyTypeObject *)PyArray_API[18]) +#define PyCharacterArrType_Type (*(PyTypeObject *)PyArray_API[19]) +#define PyByteArrType_Type (*(PyTypeObject *)PyArray_API[20]) +#define PyShortArrType_Type (*(PyTypeObject *)PyArray_API[21]) +#define PyIntArrType_Type (*(PyTypeObject *)PyArray_API[22]) +#define PyLongArrType_Type (*(PyTypeObject *)PyArray_API[23]) +#define PyLongLongArrType_Type (*(PyTypeObject *)PyArray_API[24]) +#define PyUByteArrType_Type (*(PyTypeObject *)PyArray_API[25]) +#define PyUShortArrType_Type (*(PyTypeObject *)PyArray_API[26]) +#define PyUIntArrType_Type (*(PyTypeObject *)PyArray_API[27]) +#define PyULongArrType_Type (*(PyTypeObject *)PyArray_API[28]) +#define PyULongLongArrType_Type (*(PyTypeObject *)PyArray_API[29]) +#define PyFloatArrType_Type (*(PyTypeObject *)PyArray_API[30]) +#define PyDoubleArrType_Type (*(PyTypeObject *)PyArray_API[31]) +#define PyLongDoubleArrType_Type (*(PyTypeObject *)PyArray_API[32]) +#define PyCFloatArrType_Type (*(PyTypeObject *)PyArray_API[33]) +#define PyCDoubleArrType_Type (*(PyTypeObject *)PyArray_API[34]) +#define PyCLongDoubleArrType_Type (*(PyTypeObject *)PyArray_API[35]) +#define PyObjectArrType_Type (*(PyTypeObject *)PyArray_API[36]) +#define PyStringArrType_Type (*(PyTypeObject *)PyArray_API[37]) +#define PyUnicodeArrType_Type (*(PyTypeObject *)PyArray_API[38]) +#define PyVoidArrType_Type (*(PyTypeObject *)PyArray_API[39]) +#define PyArray_INCREF \ + (*(int (*)(PyArrayObject *)) \ + PyArray_API[42]) +#define PyArray_XDECREF \ + (*(int (*)(PyArrayObject *)) \ + PyArray_API[43]) +#define PyArray_SetStringFunction \ + (*(void (*)(PyObject *, int)) \ + PyArray_API[44]) +#define PyArray_DescrFromType \ + (*(PyArray_Descr * (*)(int)) \ + PyArray_API[45]) +#define PyArray_TypeObjectFromType \ + (*(PyObject * (*)(int)) \ + PyArray_API[46]) +#define PyArray_Zero \ + (*(char * (*)(PyArrayObject *)) \ + PyArray_API[47]) +#define PyArray_One \ + (*(char * (*)(PyArrayObject *)) \ + PyArray_API[48]) +#define PyArray_CastToType \ + (*(PyObject * (*)(PyArrayObject *, PyArray_Descr *, int)) \ + PyArray_API[49]) +#define PyArray_CopyInto \ + (*(int (*)(PyArrayObject *, PyArrayObject *)) \ + PyArray_API[50]) +#define PyArray_CopyAnyInto \ + (*(int (*)(PyArrayObject *, PyArrayObject *)) \ + PyArray_API[51]) +#define PyArray_CanCastSafely \ + (*(int (*)(int, int)) \ + PyArray_API[52]) +#define PyArray_CanCastTo \ + (*(npy_bool (*)(PyArray_Descr *, PyArray_Descr *)) \ + PyArray_API[53]) +#define PyArray_ObjectType \ + (*(int (*)(PyObject *, int)) \ + PyArray_API[54]) +#define PyArray_DescrFromObject \ + (*(PyArray_Descr * (*)(PyObject *, PyArray_Descr *)) \ + PyArray_API[55]) +#define PyArray_ConvertToCommonType \ + (*(PyArrayObject ** (*)(PyObject *, int *)) \ + PyArray_API[56]) +#define PyArray_DescrFromScalar \ + (*(PyArray_Descr * (*)(PyObject *)) \ + PyArray_API[57]) +#define PyArray_DescrFromTypeObject \ + (*(PyArray_Descr * (*)(PyObject *)) \ + PyArray_API[58]) +#define PyArray_Size \ + (*(npy_intp (*)(PyObject *)) \ + PyArray_API[59]) +#define PyArray_Scalar \ + (*(PyObject * (*)(void *, PyArray_Descr *, PyObject *)) \ + PyArray_API[60]) +#define PyArray_FromScalar \ + (*(PyObject * (*)(PyObject *, PyArray_Descr *)) \ + PyArray_API[61]) +#define PyArray_ScalarAsCtype \ + (*(void (*)(PyObject *, void *)) \ + PyArray_API[62]) +#define PyArray_CastScalarToCtype \ + (*(int (*)(PyObject *, void *, PyArray_Descr *)) \ + PyArray_API[63]) +#define PyArray_CastScalarDirect \ + (*(int (*)(PyObject *, PyArray_Descr *, void *, int)) \ + PyArray_API[64]) + +#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION +#define PyArray_Pack \ + (*(int (*)(PyArray_Descr *, void *, PyObject *)) \ + PyArray_API[65]) +#endif +#define PyArray_FromAny \ + (*(PyObject * (*)(PyObject *, PyArray_Descr *, int, int, int, PyObject *)) \ + PyArray_API[69]) +#define PyArray_EnsureArray \ + (*(PyObject * (*)(PyObject *)) \ + PyArray_API[70]) +#define PyArray_EnsureAnyArray \ + (*(PyObject * (*)(PyObject *)) \ + PyArray_API[71]) +#define PyArray_FromFile \ + (*(PyObject * (*)(FILE *, PyArray_Descr *, npy_intp, char *)) \ + PyArray_API[72]) +#define PyArray_FromString \ + (*(PyObject * (*)(char *, npy_intp, PyArray_Descr *, npy_intp, char *)) \ + PyArray_API[73]) +#define PyArray_FromBuffer \ + (*(PyObject * (*)(PyObject *, PyArray_Descr *, npy_intp, npy_intp)) \ + PyArray_API[74]) +#define PyArray_FromIter \ + (*(PyObject * (*)(PyObject *, PyArray_Descr *, npy_intp)) \ + PyArray_API[75]) +#define PyArray_Return \ + (*(PyObject * (*)(PyArrayObject *)) \ + PyArray_API[76]) +#define PyArray_GetField \ + (*(PyObject * (*)(PyArrayObject *, PyArray_Descr *, int)) \ + PyArray_API[77]) +#define PyArray_SetField \ + (*(int (*)(PyArrayObject *, PyArray_Descr *, int, PyObject *)) \ + PyArray_API[78]) +#define PyArray_Byteswap \ + (*(PyObject * (*)(PyArrayObject *, npy_bool)) \ + PyArray_API[79]) +#define PyArray_Resize \ + (*(PyObject * (*)(PyArrayObject *, PyArray_Dims *, int, NPY_ORDER NPY_UNUSED(order))) \ + PyArray_API[80]) +#define PyArray_CopyObject \ + (*(int (*)(PyArrayObject *, PyObject *)) \ + PyArray_API[84]) +#define PyArray_NewCopy \ + (*(PyObject * (*)(PyArrayObject *, NPY_ORDER)) \ + PyArray_API[85]) +#define PyArray_ToList \ + (*(PyObject * (*)(PyArrayObject *)) \ + PyArray_API[86]) +#define PyArray_ToString \ + (*(PyObject * (*)(PyArrayObject *, NPY_ORDER)) \ + PyArray_API[87]) +#define PyArray_ToFile \ + (*(int (*)(PyArrayObject *, FILE *, char *, char *)) \ + PyArray_API[88]) +#define PyArray_Dump \ + (*(int (*)(PyObject *, PyObject *, int)) \ + PyArray_API[89]) +#define PyArray_Dumps \ + (*(PyObject * (*)(PyObject *, int)) \ + PyArray_API[90]) +#define PyArray_ValidType \ + (*(int (*)(int)) \ + PyArray_API[91]) +#define PyArray_UpdateFlags \ + (*(void (*)(PyArrayObject *, int)) \ + PyArray_API[92]) +#define PyArray_New \ + (*(PyObject * (*)(PyTypeObject *, int, npy_intp const *, int, npy_intp const *, void *, int, int, PyObject *)) \ + PyArray_API[93]) +#define PyArray_NewFromDescr \ + (*(PyObject * (*)(PyTypeObject *, PyArray_Descr *, int, npy_intp const *, npy_intp const *, void *, int, PyObject *)) \ + PyArray_API[94]) +#define PyArray_DescrNew \ + (*(PyArray_Descr * (*)(PyArray_Descr *)) \ + PyArray_API[95]) +#define PyArray_DescrNewFromType \ + (*(PyArray_Descr * (*)(int)) \ + PyArray_API[96]) +#define PyArray_GetPriority \ + (*(double (*)(PyObject *, double)) \ + PyArray_API[97]) +#define PyArray_IterNew \ + (*(PyObject * (*)(PyObject *)) \ + PyArray_API[98]) +#define PyArray_MultiIterNew \ + (*(PyObject* (*)(int, ...)) \ + PyArray_API[99]) +#define PyArray_PyIntAsInt \ + (*(int (*)(PyObject *)) \ + PyArray_API[100]) +#define PyArray_PyIntAsIntp \ + (*(npy_intp (*)(PyObject *)) \ + PyArray_API[101]) +#define PyArray_Broadcast \ + (*(int (*)(PyArrayMultiIterObject *)) \ + PyArray_API[102]) +#define PyArray_FillWithScalar \ + (*(int (*)(PyArrayObject *, PyObject *)) \ + PyArray_API[104]) +#define PyArray_CheckStrides \ + (*(npy_bool (*)(int, int, npy_intp, npy_intp, npy_intp const *, npy_intp const *)) \ + PyArray_API[105]) +#define PyArray_DescrNewByteorder \ + (*(PyArray_Descr * (*)(PyArray_Descr *, char)) \ + PyArray_API[106]) +#define PyArray_IterAllButAxis \ + (*(PyObject * (*)(PyObject *, int *)) \ + PyArray_API[107]) +#define PyArray_CheckFromAny \ + (*(PyObject * (*)(PyObject *, PyArray_Descr *, int, int, int, PyObject *)) \ + PyArray_API[108]) +#define PyArray_FromArray \ + (*(PyObject * (*)(PyArrayObject *, PyArray_Descr *, int)) \ + PyArray_API[109]) +#define PyArray_FromInterface \ + (*(PyObject * (*)(PyObject *)) \ + PyArray_API[110]) +#define PyArray_FromStructInterface \ + (*(PyObject * (*)(PyObject *)) \ + PyArray_API[111]) +#define PyArray_FromArrayAttr \ + (*(PyObject * (*)(PyObject *, PyArray_Descr *, PyObject *)) \ + PyArray_API[112]) +#define PyArray_ScalarKind \ + (*(NPY_SCALARKIND (*)(int, PyArrayObject **)) \ + PyArray_API[113]) +#define PyArray_CanCoerceScalar \ + (*(int (*)(int, int, NPY_SCALARKIND)) \ + PyArray_API[114]) +#define PyArray_CanCastScalar \ + (*(npy_bool (*)(PyTypeObject *, PyTypeObject *)) \ + PyArray_API[116]) +#define PyArray_RemoveSmallest \ + (*(int (*)(PyArrayMultiIterObject *)) \ + PyArray_API[118]) +#define PyArray_ElementStrides \ + (*(int (*)(PyObject *)) \ + PyArray_API[119]) +#define PyArray_Item_INCREF \ + (*(void (*)(char *, PyArray_Descr *)) \ + PyArray_API[120]) +#define PyArray_Item_XDECREF \ + (*(void (*)(char *, PyArray_Descr *)) \ + PyArray_API[121]) +#define PyArray_Transpose \ + (*(PyObject * (*)(PyArrayObject *, PyArray_Dims *)) \ + PyArray_API[123]) +#define PyArray_TakeFrom \ + (*(PyObject * (*)(PyArrayObject *, PyObject *, int, PyArrayObject *, NPY_CLIPMODE)) \ + PyArray_API[124]) +#define PyArray_PutTo \ + (*(PyObject * (*)(PyArrayObject *, PyObject*, PyObject *, NPY_CLIPMODE)) \ + PyArray_API[125]) +#define PyArray_PutMask \ + (*(PyObject * (*)(PyArrayObject *, PyObject*, PyObject*)) \ + PyArray_API[126]) +#define PyArray_Repeat \ + (*(PyObject * (*)(PyArrayObject *, PyObject *, int)) \ + PyArray_API[127]) +#define PyArray_Choose \ + (*(PyObject * (*)(PyArrayObject *, PyObject *, PyArrayObject *, NPY_CLIPMODE)) \ + PyArray_API[128]) +#define PyArray_Sort \ + (*(int (*)(PyArrayObject *, int, NPY_SORTKIND)) \ + PyArray_API[129]) +#define PyArray_ArgSort \ + (*(PyObject * (*)(PyArrayObject *, int, NPY_SORTKIND)) \ + PyArray_API[130]) +#define PyArray_SearchSorted \ + (*(PyObject * (*)(PyArrayObject *, PyObject *, NPY_SEARCHSIDE, PyObject *)) \ + PyArray_API[131]) +#define PyArray_ArgMax \ + (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \ + PyArray_API[132]) +#define PyArray_ArgMin \ + (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \ + PyArray_API[133]) +#define PyArray_Reshape \ + (*(PyObject * (*)(PyArrayObject *, PyObject *)) \ + PyArray_API[134]) +#define PyArray_Newshape \ + (*(PyObject * (*)(PyArrayObject *, PyArray_Dims *, NPY_ORDER)) \ + PyArray_API[135]) +#define PyArray_Squeeze \ + (*(PyObject * (*)(PyArrayObject *)) \ + PyArray_API[136]) +#define PyArray_View \ + (*(PyObject * (*)(PyArrayObject *, PyArray_Descr *, PyTypeObject *)) \ + PyArray_API[137]) +#define PyArray_SwapAxes \ + (*(PyObject * (*)(PyArrayObject *, int, int)) \ + PyArray_API[138]) +#define PyArray_Max \ + (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \ + PyArray_API[139]) +#define PyArray_Min \ + (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \ + PyArray_API[140]) +#define PyArray_Ptp \ + (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \ + PyArray_API[141]) +#define PyArray_Mean \ + (*(PyObject * (*)(PyArrayObject *, int, int, PyArrayObject *)) \ + PyArray_API[142]) +#define PyArray_Trace \ + (*(PyObject * (*)(PyArrayObject *, int, int, int, int, PyArrayObject *)) \ + PyArray_API[143]) +#define PyArray_Diagonal \ + (*(PyObject * (*)(PyArrayObject *, int, int, int)) \ + PyArray_API[144]) +#define PyArray_Clip \ + (*(PyObject * (*)(PyArrayObject *, PyObject *, PyObject *, PyArrayObject *)) \ + PyArray_API[145]) +#define PyArray_Conjugate \ + (*(PyObject * (*)(PyArrayObject *, PyArrayObject *)) \ + PyArray_API[146]) +#define PyArray_Nonzero \ + (*(PyObject * (*)(PyArrayObject *)) \ + PyArray_API[147]) +#define PyArray_Std \ + (*(PyObject * (*)(PyArrayObject *, int, int, PyArrayObject *, int)) \ + PyArray_API[148]) +#define PyArray_Sum \ + (*(PyObject * (*)(PyArrayObject *, int, int, PyArrayObject *)) \ + PyArray_API[149]) +#define PyArray_CumSum \ + (*(PyObject * (*)(PyArrayObject *, int, int, PyArrayObject *)) \ + PyArray_API[150]) +#define PyArray_Prod \ + (*(PyObject * (*)(PyArrayObject *, int, int, PyArrayObject *)) \ + PyArray_API[151]) +#define PyArray_CumProd \ + (*(PyObject * (*)(PyArrayObject *, int, int, PyArrayObject *)) \ + PyArray_API[152]) +#define PyArray_All \ + (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \ + PyArray_API[153]) +#define PyArray_Any \ + (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \ + PyArray_API[154]) +#define PyArray_Compress \ + (*(PyObject * (*)(PyArrayObject *, PyObject *, int, PyArrayObject *)) \ + PyArray_API[155]) +#define PyArray_Flatten \ + (*(PyObject * (*)(PyArrayObject *, NPY_ORDER)) \ + PyArray_API[156]) +#define PyArray_Ravel \ + (*(PyObject * (*)(PyArrayObject *, NPY_ORDER)) \ + PyArray_API[157]) +#define PyArray_MultiplyList \ + (*(npy_intp (*)(npy_intp const *, int)) \ + PyArray_API[158]) +#define PyArray_MultiplyIntList \ + (*(int (*)(int const *, int)) \ + PyArray_API[159]) +#define PyArray_GetPtr \ + (*(void * (*)(PyArrayObject *, npy_intp const*)) \ + PyArray_API[160]) +#define PyArray_CompareLists \ + (*(int (*)(npy_intp const *, npy_intp const *, int)) \ + PyArray_API[161]) +#define PyArray_AsCArray \ + (*(int (*)(PyObject **, void *, npy_intp *, int, PyArray_Descr*)) \ + PyArray_API[162]) +#define PyArray_Free \ + (*(int (*)(PyObject *, void *)) \ + PyArray_API[165]) +#define PyArray_Converter \ + (*(int (*)(PyObject *, PyObject **)) \ + PyArray_API[166]) +#define PyArray_IntpFromSequence \ + (*(int (*)(PyObject *, npy_intp *, int)) \ + PyArray_API[167]) +#define PyArray_Concatenate \ + (*(PyObject * (*)(PyObject *, int)) \ + PyArray_API[168]) +#define PyArray_InnerProduct \ + (*(PyObject * (*)(PyObject *, PyObject *)) \ + PyArray_API[169]) +#define PyArray_MatrixProduct \ + (*(PyObject * (*)(PyObject *, PyObject *)) \ + PyArray_API[170]) +#define PyArray_Correlate \ + (*(PyObject * (*)(PyObject *, PyObject *, int)) \ + PyArray_API[172]) +#define PyArray_DescrConverter \ + (*(int (*)(PyObject *, PyArray_Descr **)) \ + PyArray_API[174]) +#define PyArray_DescrConverter2 \ + (*(int (*)(PyObject *, PyArray_Descr **)) \ + PyArray_API[175]) +#define PyArray_IntpConverter \ + (*(int (*)(PyObject *, PyArray_Dims *)) \ + PyArray_API[176]) +#define PyArray_BufferConverter \ + (*(int (*)(PyObject *, PyArray_Chunk *)) \ + PyArray_API[177]) +#define PyArray_AxisConverter \ + (*(int (*)(PyObject *, int *)) \ + PyArray_API[178]) +#define PyArray_BoolConverter \ + (*(int (*)(PyObject *, npy_bool *)) \ + PyArray_API[179]) +#define PyArray_ByteorderConverter \ + (*(int (*)(PyObject *, char *)) \ + PyArray_API[180]) +#define PyArray_OrderConverter \ + (*(int (*)(PyObject *, NPY_ORDER *)) \ + PyArray_API[181]) +#define PyArray_EquivTypes \ + (*(unsigned char (*)(PyArray_Descr *, PyArray_Descr *)) \ + PyArray_API[182]) +#define PyArray_Zeros \ + (*(PyObject * (*)(int, npy_intp const *, PyArray_Descr *, int)) \ + PyArray_API[183]) +#define PyArray_Empty \ + (*(PyObject * (*)(int, npy_intp const *, PyArray_Descr *, int)) \ + PyArray_API[184]) +#define PyArray_Where \ + (*(PyObject * (*)(PyObject *, PyObject *, PyObject *)) \ + PyArray_API[185]) +#define PyArray_Arange \ + (*(PyObject * (*)(double, double, double, int)) \ + PyArray_API[186]) +#define PyArray_ArangeObj \ + (*(PyObject * (*)(PyObject *, PyObject *, PyObject *, PyArray_Descr *)) \ + PyArray_API[187]) +#define PyArray_SortkindConverter \ + (*(int (*)(PyObject *, NPY_SORTKIND *)) \ + PyArray_API[188]) +#define PyArray_LexSort \ + (*(PyObject * (*)(PyObject *, int)) \ + PyArray_API[189]) +#define PyArray_Round \ + (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \ + PyArray_API[190]) +#define PyArray_EquivTypenums \ + (*(unsigned char (*)(int, int)) \ + PyArray_API[191]) +#define PyArray_RegisterDataType \ + (*(int (*)(PyArray_DescrProto *)) \ + PyArray_API[192]) +#define PyArray_RegisterCastFunc \ + (*(int (*)(PyArray_Descr *, int, PyArray_VectorUnaryFunc *)) \ + PyArray_API[193]) +#define PyArray_RegisterCanCast \ + (*(int (*)(PyArray_Descr *, int, NPY_SCALARKIND)) \ + PyArray_API[194]) +#define PyArray_InitArrFuncs \ + (*(void (*)(PyArray_ArrFuncs *)) \ + PyArray_API[195]) +#define PyArray_IntTupleFromIntp \ + (*(PyObject * (*)(int, npy_intp const *)) \ + PyArray_API[196]) +#define PyArray_ClipmodeConverter \ + (*(int (*)(PyObject *, NPY_CLIPMODE *)) \ + PyArray_API[198]) +#define PyArray_OutputConverter \ + (*(int (*)(PyObject *, PyArrayObject **)) \ + PyArray_API[199]) +#define PyArray_BroadcastToShape \ + (*(PyObject * (*)(PyObject *, npy_intp *, int)) \ + PyArray_API[200]) +#define PyArray_DescrAlignConverter \ + (*(int (*)(PyObject *, PyArray_Descr **)) \ + PyArray_API[203]) +#define PyArray_DescrAlignConverter2 \ + (*(int (*)(PyObject *, PyArray_Descr **)) \ + PyArray_API[204]) +#define PyArray_SearchsideConverter \ + (*(int (*)(PyObject *, void *)) \ + PyArray_API[205]) +#define PyArray_CheckAxis \ + (*(PyObject * (*)(PyArrayObject *, int *, int)) \ + PyArray_API[206]) +#define PyArray_OverflowMultiplyList \ + (*(npy_intp (*)(npy_intp const *, int)) \ + PyArray_API[207]) +#define PyArray_MultiIterFromObjects \ + (*(PyObject* (*)(PyObject **, int, int, ...)) \ + PyArray_API[209]) +#define PyArray_GetEndianness \ + (*(int (*)(void)) \ + PyArray_API[210]) +#define PyArray_GetNDArrayCFeatureVersion \ + (*(unsigned int (*)(void)) \ + PyArray_API[211]) +#define PyArray_Correlate2 \ + (*(PyObject * (*)(PyObject *, PyObject *, int)) \ + PyArray_API[212]) +#define PyArray_NeighborhoodIterNew \ + (*(PyObject* (*)(PyArrayIterObject *, const npy_intp *, int, PyArrayObject*)) \ + PyArray_API[213]) +#define PyTimeIntegerArrType_Type (*(PyTypeObject *)PyArray_API[214]) +#define PyDatetimeArrType_Type (*(PyTypeObject *)PyArray_API[215]) +#define PyTimedeltaArrType_Type (*(PyTypeObject *)PyArray_API[216]) +#define PyHalfArrType_Type (*(PyTypeObject *)PyArray_API[217]) +#define NpyIter_Type (*(PyTypeObject *)PyArray_API[218]) + +#if NPY_FEATURE_VERSION >= NPY_2_3_API_VERSION +#define NpyIter_GetTransferFlags \ + (*(NPY_ARRAYMETHOD_FLAGS (*)(NpyIter *)) \ + PyArray_API[223]) +#endif +#define NpyIter_New \ + (*(NpyIter * (*)(PyArrayObject *, npy_uint32, NPY_ORDER, NPY_CASTING, PyArray_Descr*)) \ + PyArray_API[224]) +#define NpyIter_MultiNew \ + (*(NpyIter * (*)(int, PyArrayObject **, npy_uint32, NPY_ORDER, NPY_CASTING, npy_uint32 *, PyArray_Descr **)) \ + PyArray_API[225]) +#define NpyIter_AdvancedNew \ + (*(NpyIter * (*)(int, PyArrayObject **, npy_uint32, NPY_ORDER, NPY_CASTING, npy_uint32 *, PyArray_Descr **, int, int **, npy_intp *, npy_intp)) \ + PyArray_API[226]) +#define NpyIter_Copy \ + (*(NpyIter * (*)(NpyIter *)) \ + PyArray_API[227]) +#define NpyIter_Deallocate \ + (*(int (*)(NpyIter *)) \ + PyArray_API[228]) +#define NpyIter_HasDelayedBufAlloc \ + (*(npy_bool (*)(NpyIter *)) \ + PyArray_API[229]) +#define NpyIter_HasExternalLoop \ + (*(npy_bool (*)(NpyIter *)) \ + PyArray_API[230]) +#define NpyIter_EnableExternalLoop \ + (*(int (*)(NpyIter *)) \ + PyArray_API[231]) +#define NpyIter_GetInnerStrideArray \ + (*(npy_intp * (*)(NpyIter *)) \ + PyArray_API[232]) +#define NpyIter_GetInnerLoopSizePtr \ + (*(npy_intp * (*)(NpyIter *)) \ + PyArray_API[233]) +#define NpyIter_Reset \ + (*(int (*)(NpyIter *, char **)) \ + PyArray_API[234]) +#define NpyIter_ResetBasePointers \ + (*(int (*)(NpyIter *, char **, char **)) \ + PyArray_API[235]) +#define NpyIter_ResetToIterIndexRange \ + (*(int (*)(NpyIter *, npy_intp, npy_intp, char **)) \ + PyArray_API[236]) +#define NpyIter_GetNDim \ + (*(int (*)(NpyIter *)) \ + PyArray_API[237]) +#define NpyIter_GetNOp \ + (*(int (*)(NpyIter *)) \ + PyArray_API[238]) +#define NpyIter_GetIterNext \ + (*(NpyIter_IterNextFunc * (*)(NpyIter *, char **)) \ + PyArray_API[239]) +#define NpyIter_GetIterSize \ + (*(npy_intp (*)(NpyIter *)) \ + PyArray_API[240]) +#define NpyIter_GetIterIndexRange \ + (*(void (*)(NpyIter *, npy_intp *, npy_intp *)) \ + PyArray_API[241]) +#define NpyIter_GetIterIndex \ + (*(npy_intp (*)(NpyIter *)) \ + PyArray_API[242]) +#define NpyIter_GotoIterIndex \ + (*(int (*)(NpyIter *, npy_intp)) \ + PyArray_API[243]) +#define NpyIter_HasMultiIndex \ + (*(npy_bool (*)(NpyIter *)) \ + PyArray_API[244]) +#define NpyIter_GetShape \ + (*(int (*)(NpyIter *, npy_intp *)) \ + PyArray_API[245]) +#define NpyIter_GetGetMultiIndex \ + (*(NpyIter_GetMultiIndexFunc * (*)(NpyIter *, char **)) \ + PyArray_API[246]) +#define NpyIter_GotoMultiIndex \ + (*(int (*)(NpyIter *, npy_intp const *)) \ + PyArray_API[247]) +#define NpyIter_RemoveMultiIndex \ + (*(int (*)(NpyIter *)) \ + PyArray_API[248]) +#define NpyIter_HasIndex \ + (*(npy_bool (*)(NpyIter *)) \ + PyArray_API[249]) +#define NpyIter_IsBuffered \ + (*(npy_bool (*)(NpyIter *)) \ + PyArray_API[250]) +#define NpyIter_IsGrowInner \ + (*(npy_bool (*)(NpyIter *)) \ + PyArray_API[251]) +#define NpyIter_GetBufferSize \ + (*(npy_intp (*)(NpyIter *)) \ + PyArray_API[252]) +#define NpyIter_GetIndexPtr \ + (*(npy_intp * (*)(NpyIter *)) \ + PyArray_API[253]) +#define NpyIter_GotoIndex \ + (*(int (*)(NpyIter *, npy_intp)) \ + PyArray_API[254]) +#define NpyIter_GetDataPtrArray \ + (*(char ** (*)(NpyIter *)) \ + PyArray_API[255]) +#define NpyIter_GetDescrArray \ + (*(PyArray_Descr ** (*)(NpyIter *)) \ + PyArray_API[256]) +#define NpyIter_GetOperandArray \ + (*(PyArrayObject ** (*)(NpyIter *)) \ + PyArray_API[257]) +#define NpyIter_GetIterView \ + (*(PyArrayObject * (*)(NpyIter *, npy_intp)) \ + PyArray_API[258]) +#define NpyIter_GetReadFlags \ + (*(void (*)(NpyIter *, char *)) \ + PyArray_API[259]) +#define NpyIter_GetWriteFlags \ + (*(void (*)(NpyIter *, char *)) \ + PyArray_API[260]) +#define NpyIter_DebugPrint \ + (*(void (*)(NpyIter *)) \ + PyArray_API[261]) +#define NpyIter_IterationNeedsAPI \ + (*(npy_bool (*)(NpyIter *)) \ + PyArray_API[262]) +#define NpyIter_GetInnerFixedStrideArray \ + (*(void (*)(NpyIter *, npy_intp *)) \ + PyArray_API[263]) +#define NpyIter_RemoveAxis \ + (*(int (*)(NpyIter *, int)) \ + PyArray_API[264]) +#define NpyIter_GetAxisStrideArray \ + (*(npy_intp * (*)(NpyIter *, int)) \ + PyArray_API[265]) +#define NpyIter_RequiresBuffering \ + (*(npy_bool (*)(NpyIter *)) \ + PyArray_API[266]) +#define NpyIter_GetInitialDataPtrArray \ + (*(char ** (*)(NpyIter *)) \ + PyArray_API[267]) +#define NpyIter_CreateCompatibleStrides \ + (*(int (*)(NpyIter *, npy_intp, npy_intp *)) \ + PyArray_API[268]) +#define PyArray_CastingConverter \ + (*(int (*)(PyObject *, NPY_CASTING *)) \ + PyArray_API[269]) +#define PyArray_CountNonzero \ + (*(npy_intp (*)(PyArrayObject *)) \ + PyArray_API[270]) +#define PyArray_PromoteTypes \ + (*(PyArray_Descr * (*)(PyArray_Descr *, PyArray_Descr *)) \ + PyArray_API[271]) +#define PyArray_MinScalarType \ + (*(PyArray_Descr * (*)(PyArrayObject *)) \ + PyArray_API[272]) +#define PyArray_ResultType \ + (*(PyArray_Descr * (*)(npy_intp, PyArrayObject *arrs[], npy_intp, PyArray_Descr *descrs[])) \ + PyArray_API[273]) +#define PyArray_CanCastArrayTo \ + (*(npy_bool (*)(PyArrayObject *, PyArray_Descr *, NPY_CASTING)) \ + PyArray_API[274]) +#define PyArray_CanCastTypeTo \ + (*(npy_bool (*)(PyArray_Descr *, PyArray_Descr *, NPY_CASTING)) \ + PyArray_API[275]) +#define PyArray_EinsteinSum \ + (*(PyArrayObject * (*)(char *, npy_intp, PyArrayObject **, PyArray_Descr *, NPY_ORDER, NPY_CASTING, PyArrayObject *)) \ + PyArray_API[276]) +#define PyArray_NewLikeArray \ + (*(PyObject * (*)(PyArrayObject *, NPY_ORDER, PyArray_Descr *, int)) \ + PyArray_API[277]) +#define PyArray_ConvertClipmodeSequence \ + (*(int (*)(PyObject *, NPY_CLIPMODE *, int)) \ + PyArray_API[279]) +#define PyArray_MatrixProduct2 \ + (*(PyObject * (*)(PyObject *, PyObject *, PyArrayObject*)) \ + PyArray_API[280]) +#define NpyIter_IsFirstVisit \ + (*(npy_bool (*)(NpyIter *, int)) \ + PyArray_API[281]) +#define PyArray_SetBaseObject \ + (*(int (*)(PyArrayObject *, PyObject *)) \ + PyArray_API[282]) +#define PyArray_CreateSortedStridePerm \ + (*(void (*)(int, npy_intp const *, npy_stride_sort_item *)) \ + PyArray_API[283]) +#define PyArray_RemoveAxesInPlace \ + (*(void (*)(PyArrayObject *, const npy_bool *)) \ + PyArray_API[284]) +#define PyArray_DebugPrint \ + (*(void (*)(PyArrayObject *)) \ + PyArray_API[285]) +#define PyArray_FailUnlessWriteable \ + (*(int (*)(PyArrayObject *, const char *)) \ + PyArray_API[286]) +#define PyArray_SetUpdateIfCopyBase \ + (*(int (*)(PyArrayObject *, PyArrayObject *)) \ + PyArray_API[287]) +#define PyDataMem_NEW \ + (*(void * (*)(size_t)) \ + PyArray_API[288]) +#define PyDataMem_FREE \ + (*(void (*)(void *)) \ + PyArray_API[289]) +#define PyDataMem_RENEW \ + (*(void * (*)(void *, size_t)) \ + PyArray_API[290]) +#define NPY_DEFAULT_ASSIGN_CASTING (*(NPY_CASTING *)PyArray_API[292]) +#define PyArray_Partition \ + (*(int (*)(PyArrayObject *, PyArrayObject *, int, NPY_SELECTKIND)) \ + PyArray_API[296]) +#define PyArray_ArgPartition \ + (*(PyObject * (*)(PyArrayObject *, PyArrayObject *, int, NPY_SELECTKIND)) \ + PyArray_API[297]) +#define PyArray_SelectkindConverter \ + (*(int (*)(PyObject *, NPY_SELECTKIND *)) \ + PyArray_API[298]) +#define PyDataMem_NEW_ZEROED \ + (*(void * (*)(size_t, size_t)) \ + PyArray_API[299]) +#define PyArray_CheckAnyScalarExact \ + (*(int (*)(PyObject *)) \ + PyArray_API[300]) +#define PyArray_ResolveWritebackIfCopy \ + (*(int (*)(PyArrayObject *)) \ + PyArray_API[302]) +#define PyArray_SetWritebackIfCopyBase \ + (*(int (*)(PyArrayObject *, PyArrayObject *)) \ + PyArray_API[303]) + +#if NPY_FEATURE_VERSION >= NPY_1_22_API_VERSION +#define PyDataMem_SetHandler \ + (*(PyObject * (*)(PyObject *)) \ + PyArray_API[304]) +#endif + +#if NPY_FEATURE_VERSION >= NPY_1_22_API_VERSION +#define PyDataMem_GetHandler \ + (*(PyObject * (*)(void)) \ + PyArray_API[305]) +#endif +#define PyDataMem_DefaultHandler (*(PyObject* *)PyArray_API[306]) + +#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION +#define NpyDatetime_ConvertDatetime64ToDatetimeStruct \ + (*(int (*)(PyArray_DatetimeMetaData *, npy_datetime, npy_datetimestruct *)) \ + PyArray_API[307]) +#endif + +#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION +#define NpyDatetime_ConvertDatetimeStructToDatetime64 \ + (*(int (*)(PyArray_DatetimeMetaData *, const npy_datetimestruct *, npy_datetime *)) \ + PyArray_API[308]) +#endif + +#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION +#define NpyDatetime_ConvertPyDateTimeToDatetimeStruct \ + (*(int (*)(PyObject *, npy_datetimestruct *, NPY_DATETIMEUNIT *, int)) \ + PyArray_API[309]) +#endif + +#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION +#define NpyDatetime_GetDatetimeISO8601StrLen \ + (*(int (*)(int, NPY_DATETIMEUNIT)) \ + PyArray_API[310]) +#endif + +#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION +#define NpyDatetime_MakeISO8601Datetime \ + (*(int (*)(npy_datetimestruct *, char *, npy_intp, int, int, NPY_DATETIMEUNIT, int, NPY_CASTING)) \ + PyArray_API[311]) +#endif + +#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION +#define NpyDatetime_ParseISO8601Datetime \ + (*(int (*)(char const *, Py_ssize_t, NPY_DATETIMEUNIT, NPY_CASTING, npy_datetimestruct *, NPY_DATETIMEUNIT *, npy_bool *)) \ + PyArray_API[312]) +#endif + +#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION +#define NpyString_load \ + (*(int (*)(npy_string_allocator *, const npy_packed_static_string *, npy_static_string *)) \ + PyArray_API[313]) +#endif + +#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION +#define NpyString_pack \ + (*(int (*)(npy_string_allocator *, npy_packed_static_string *, const char *, size_t)) \ + PyArray_API[314]) +#endif + +#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION +#define NpyString_pack_null \ + (*(int (*)(npy_string_allocator *, npy_packed_static_string *)) \ + PyArray_API[315]) +#endif + +#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION +#define NpyString_acquire_allocator \ + (*(npy_string_allocator * (*)(const PyArray_StringDTypeObject *)) \ + PyArray_API[316]) +#endif + +#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION +#define NpyString_acquire_allocators \ + (*(void (*)(size_t, PyArray_Descr *const descrs[], npy_string_allocator *allocators[])) \ + PyArray_API[317]) +#endif + +#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION +#define NpyString_release_allocator \ + (*(void (*)(npy_string_allocator *)) \ + PyArray_API[318]) +#endif + +#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION +#define NpyString_release_allocators \ + (*(void (*)(size_t, npy_string_allocator *allocators[])) \ + PyArray_API[319]) +#endif + +#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION +#define PyArray_GetDefaultDescr \ + (*(PyArray_Descr * (*)(PyArray_DTypeMeta *)) \ + PyArray_API[361]) +#endif + +#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION +#define PyArrayInitDTypeMeta_FromSpec \ + (*(int (*)(PyArray_DTypeMeta *, PyArrayDTypeMeta_Spec *)) \ + PyArray_API[362]) +#endif + +#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION +#define PyArray_CommonDType \ + (*(PyArray_DTypeMeta * (*)(PyArray_DTypeMeta *, PyArray_DTypeMeta *)) \ + PyArray_API[363]) +#endif + +#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION +#define PyArray_PromoteDTypeSequence \ + (*(PyArray_DTypeMeta * (*)(npy_intp, PyArray_DTypeMeta **)) \ + PyArray_API[364]) +#endif +#define _PyDataType_GetArrFuncs \ + (*(PyArray_ArrFuncs * (*)(const PyArray_Descr *)) \ + PyArray_API[365]) + +/* + * The DType classes are inconvenient for the Python generation so exposed + * manually in the header below (may be moved). + */ +#include "numpy/_public_dtype_api_table.h" + +#if !defined(NO_IMPORT_ARRAY) && !defined(NO_IMPORT) +static int +_import_array(void) +{ + int st; + PyObject *numpy = PyImport_ImportModule("numpy._core._multiarray_umath"); + PyObject *c_api; + if (numpy == NULL && PyErr_ExceptionMatches(PyExc_ModuleNotFoundError)) { + PyErr_Clear(); + numpy = PyImport_ImportModule("numpy.core._multiarray_umath"); + } + + if (numpy == NULL) { + return -1; + } + + c_api = PyObject_GetAttrString(numpy, "_ARRAY_API"); + Py_DECREF(numpy); + if (c_api == NULL) { + return -1; + } + + if (!PyCapsule_CheckExact(c_api)) { + PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is not PyCapsule object"); + Py_DECREF(c_api); + return -1; + } + PyArray_API = (void **)PyCapsule_GetPointer(c_api, NULL); + Py_DECREF(c_api); + if (PyArray_API == NULL) { + PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is NULL pointer"); + return -1; + } + + /* + * On exceedingly few platforms these sizes may not match, in which case + * We do not support older NumPy versions at all. + */ + if (sizeof(Py_ssize_t) != sizeof(Py_intptr_t) && + PyArray_RUNTIME_VERSION < NPY_2_0_API_VERSION) { + PyErr_Format(PyExc_RuntimeError, + "module compiled against NumPy 2.0 but running on NumPy 1.x. " + "Unfortunately, this is not supported on niche platforms where " + "`sizeof(size_t) != sizeof(inptr_t)`."); + } + /* + * Perform runtime check of C API version. As of now NumPy 2.0 is ABI + * backwards compatible (in the exposed feature subset!) for all practical + * purposes. + */ + if (NPY_VERSION < PyArray_GetNDArrayCVersion()) { + PyErr_Format(PyExc_RuntimeError, "module compiled against "\ + "ABI version 0x%x but this version of numpy is 0x%x", \ + (int) NPY_VERSION, (int) PyArray_GetNDArrayCVersion()); + return -1; + } + PyArray_RUNTIME_VERSION = (int)PyArray_GetNDArrayCFeatureVersion(); + if (NPY_FEATURE_VERSION > PyArray_RUNTIME_VERSION) { + PyErr_Format(PyExc_RuntimeError, + "module was compiled against NumPy C-API version 0x%x " + "(NumPy " NPY_FEATURE_VERSION_STRING ") " + "but the running NumPy has C-API version 0x%x. " + "Check the section C-API incompatibility at the " + "Troubleshooting ImportError section at " + "https://numpy.org/devdocs/user/troubleshooting-importerror.html" + "#c-api-incompatibility " + "for indications on how to solve this problem.", + (int)NPY_FEATURE_VERSION, PyArray_RUNTIME_VERSION); + return -1; + } + + /* + * Perform runtime check of endianness and check it matches the one set by + * the headers (npy_endian.h) as a safeguard + */ + st = PyArray_GetEndianness(); + if (st == NPY_CPU_UNKNOWN_ENDIAN) { + PyErr_SetString(PyExc_RuntimeError, + "FATAL: module compiled as unknown endian"); + return -1; + } +#if NPY_BYTE_ORDER == NPY_BIG_ENDIAN + if (st != NPY_CPU_BIG) { + PyErr_SetString(PyExc_RuntimeError, + "FATAL: module compiled as big endian, but " + "detected different endianness at runtime"); + return -1; + } +#elif NPY_BYTE_ORDER == NPY_LITTLE_ENDIAN + if (st != NPY_CPU_LITTLE) { + PyErr_SetString(PyExc_RuntimeError, + "FATAL: module compiled as little endian, but " + "detected different endianness at runtime"); + return -1; + } +#endif + + return 0; +} + +#define import_array() { \ + if (_import_array() < 0) { \ + PyErr_Print(); \ + PyErr_SetString( \ + PyExc_ImportError, \ + "numpy._core.multiarray failed to import" \ + ); \ + return NULL; \ + } \ +} + +#define import_array1(ret) { \ + if (_import_array() < 0) { \ + PyErr_Print(); \ + PyErr_SetString( \ + PyExc_ImportError, \ + "numpy._core.multiarray failed to import" \ + ); \ + return ret; \ + } \ +} + +#define import_array2(msg, ret) { \ + if (_import_array() < 0) { \ + PyErr_Print(); \ + PyErr_SetString(PyExc_ImportError, msg); \ + return ret; \ + } \ +} + +#endif + +#endif diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/__ufunc_api.c b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/__ufunc_api.c new file mode 100644 index 0000000..10fcbc4 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/__ufunc_api.c @@ -0,0 +1,54 @@ + +/* These pointers will be stored in the C-object for use in other + extension modules +*/ + +void *PyUFunc_API[] = { + (void *) &PyUFunc_Type, + (void *) PyUFunc_FromFuncAndData, + (void *) PyUFunc_RegisterLoopForType, + NULL, + (void *) PyUFunc_f_f_As_d_d, + (void *) PyUFunc_d_d, + (void *) PyUFunc_f_f, + (void *) PyUFunc_g_g, + (void *) PyUFunc_F_F_As_D_D, + (void *) PyUFunc_F_F, + (void *) PyUFunc_D_D, + (void *) PyUFunc_G_G, + (void *) PyUFunc_O_O, + (void *) PyUFunc_ff_f_As_dd_d, + (void *) PyUFunc_ff_f, + (void *) PyUFunc_dd_d, + (void *) PyUFunc_gg_g, + (void *) PyUFunc_FF_F_As_DD_D, + (void *) PyUFunc_DD_D, + (void *) PyUFunc_FF_F, + (void *) PyUFunc_GG_G, + (void *) PyUFunc_OO_O, + (void *) PyUFunc_O_O_method, + (void *) PyUFunc_OO_O_method, + (void *) PyUFunc_On_Om, + NULL, + NULL, + (void *) PyUFunc_clearfperr, + (void *) PyUFunc_getfperr, + NULL, + (void *) PyUFunc_ReplaceLoopBySignature, + (void *) PyUFunc_FromFuncAndDataAndSignature, + NULL, + (void *) PyUFunc_e_e, + (void *) PyUFunc_e_e_As_f_f, + (void *) PyUFunc_e_e_As_d_d, + (void *) PyUFunc_ee_e, + (void *) PyUFunc_ee_e_As_ff_f, + (void *) PyUFunc_ee_e_As_dd_d, + (void *) PyUFunc_DefaultTypeResolver, + (void *) PyUFunc_ValidateCasting, + (void *) PyUFunc_RegisterLoopForDescr, + (void *) PyUFunc_FromFuncAndDataAndSignatureAndIdentity, + (void *) PyUFunc_AddLoopFromSpec, + (void *) PyUFunc_AddPromoter, + (void *) PyUFunc_AddWrappingLoop, + (void *) PyUFunc_GiveFloatingpointErrors +}; diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/__ufunc_api.h b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/__ufunc_api.h new file mode 100644 index 0000000..b05dce3 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/__ufunc_api.h @@ -0,0 +1,341 @@ + +#ifdef _UMATHMODULE + +extern NPY_NO_EXPORT PyTypeObject PyUFunc_Type; + +extern NPY_NO_EXPORT PyTypeObject PyUFunc_Type; + +NPY_NO_EXPORT PyObject * PyUFunc_FromFuncAndData \ + (PyUFuncGenericFunction *, void *const *, const char *, int, int, int, int, const char *, const char *, int); +NPY_NO_EXPORT int PyUFunc_RegisterLoopForType \ + (PyUFuncObject *, int, PyUFuncGenericFunction, const int *, void *); +NPY_NO_EXPORT void PyUFunc_f_f_As_d_d \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_d_d \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_f_f \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_g_g \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_F_F_As_D_D \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_F_F \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_D_D \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_G_G \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_O_O \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_ff_f_As_dd_d \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_ff_f \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_dd_d \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_gg_g \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_FF_F_As_DD_D \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_DD_D \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_FF_F \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_GG_G \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_OO_O \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_O_O_method \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_OO_O_method \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_On_Om \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_clearfperr \ + (void); +NPY_NO_EXPORT int PyUFunc_getfperr \ + (void); +NPY_NO_EXPORT int PyUFunc_ReplaceLoopBySignature \ + (PyUFuncObject *, PyUFuncGenericFunction, const int *, PyUFuncGenericFunction *); +NPY_NO_EXPORT PyObject * PyUFunc_FromFuncAndDataAndSignature \ + (PyUFuncGenericFunction *, void *const *, const char *, int, int, int, int, const char *, const char *, int, const char *); +NPY_NO_EXPORT void PyUFunc_e_e \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_e_e_As_f_f \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_e_e_As_d_d \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_ee_e \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_ee_e_As_ff_f \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_ee_e_As_dd_d \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT int PyUFunc_DefaultTypeResolver \ + (PyUFuncObject *, NPY_CASTING, PyArrayObject **, PyObject *, PyArray_Descr **); +NPY_NO_EXPORT int PyUFunc_ValidateCasting \ + (PyUFuncObject *, NPY_CASTING, PyArrayObject **, PyArray_Descr *const *); +NPY_NO_EXPORT int PyUFunc_RegisterLoopForDescr \ + (PyUFuncObject *, PyArray_Descr *, PyUFuncGenericFunction, PyArray_Descr **, void *); +NPY_NO_EXPORT PyObject * PyUFunc_FromFuncAndDataAndSignatureAndIdentity \ + (PyUFuncGenericFunction *, void *const *, const char *, int, int, int, int, const char *, const char *, const int, const char *, PyObject *); +NPY_NO_EXPORT int PyUFunc_AddLoopFromSpec \ + (PyObject *, PyArrayMethod_Spec *); +NPY_NO_EXPORT int PyUFunc_AddPromoter \ + (PyObject *, PyObject *, PyObject *); +NPY_NO_EXPORT int PyUFunc_AddWrappingLoop \ + (PyObject *, PyArray_DTypeMeta *new_dtypes[], PyArray_DTypeMeta *wrapped_dtypes[], PyArrayMethod_TranslateGivenDescriptors *, PyArrayMethod_TranslateLoopDescriptors *); +NPY_NO_EXPORT int PyUFunc_GiveFloatingpointErrors \ + (const char *, int); + +#else + +#if defined(PY_UFUNC_UNIQUE_SYMBOL) +#define PyUFunc_API PY_UFUNC_UNIQUE_SYMBOL +#endif + +/* By default do not export API in an .so (was never the case on windows) */ +#ifndef NPY_API_SYMBOL_ATTRIBUTE + #define NPY_API_SYMBOL_ATTRIBUTE NPY_VISIBILITY_HIDDEN +#endif + +#if defined(NO_IMPORT) || defined(NO_IMPORT_UFUNC) +extern NPY_API_SYMBOL_ATTRIBUTE void **PyUFunc_API; +#else +#if defined(PY_UFUNC_UNIQUE_SYMBOL) +NPY_API_SYMBOL_ATTRIBUTE void **PyUFunc_API; +#else +static void **PyUFunc_API=NULL; +#endif +#endif + +#define PyUFunc_Type (*(PyTypeObject *)PyUFunc_API[0]) +#define PyUFunc_FromFuncAndData \ + (*(PyObject * (*)(PyUFuncGenericFunction *, void *const *, const char *, int, int, int, int, const char *, const char *, int)) \ + PyUFunc_API[1]) +#define PyUFunc_RegisterLoopForType \ + (*(int (*)(PyUFuncObject *, int, PyUFuncGenericFunction, const int *, void *)) \ + PyUFunc_API[2]) +#define PyUFunc_f_f_As_d_d \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[4]) +#define PyUFunc_d_d \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[5]) +#define PyUFunc_f_f \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[6]) +#define PyUFunc_g_g \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[7]) +#define PyUFunc_F_F_As_D_D \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[8]) +#define PyUFunc_F_F \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[9]) +#define PyUFunc_D_D \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[10]) +#define PyUFunc_G_G \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[11]) +#define PyUFunc_O_O \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[12]) +#define PyUFunc_ff_f_As_dd_d \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[13]) +#define PyUFunc_ff_f \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[14]) +#define PyUFunc_dd_d \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[15]) +#define PyUFunc_gg_g \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[16]) +#define PyUFunc_FF_F_As_DD_D \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[17]) +#define PyUFunc_DD_D \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[18]) +#define PyUFunc_FF_F \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[19]) +#define PyUFunc_GG_G \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[20]) +#define PyUFunc_OO_O \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[21]) +#define PyUFunc_O_O_method \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[22]) +#define PyUFunc_OO_O_method \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[23]) +#define PyUFunc_On_Om \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[24]) +#define PyUFunc_clearfperr \ + (*(void (*)(void)) \ + PyUFunc_API[27]) +#define PyUFunc_getfperr \ + (*(int (*)(void)) \ + PyUFunc_API[28]) +#define PyUFunc_ReplaceLoopBySignature \ + (*(int (*)(PyUFuncObject *, PyUFuncGenericFunction, const int *, PyUFuncGenericFunction *)) \ + PyUFunc_API[30]) +#define PyUFunc_FromFuncAndDataAndSignature \ + (*(PyObject * (*)(PyUFuncGenericFunction *, void *const *, const char *, int, int, int, int, const char *, const char *, int, const char *)) \ + PyUFunc_API[31]) +#define PyUFunc_e_e \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[33]) +#define PyUFunc_e_e_As_f_f \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[34]) +#define PyUFunc_e_e_As_d_d \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[35]) +#define PyUFunc_ee_e \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[36]) +#define PyUFunc_ee_e_As_ff_f \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[37]) +#define PyUFunc_ee_e_As_dd_d \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[38]) +#define PyUFunc_DefaultTypeResolver \ + (*(int (*)(PyUFuncObject *, NPY_CASTING, PyArrayObject **, PyObject *, PyArray_Descr **)) \ + PyUFunc_API[39]) +#define PyUFunc_ValidateCasting \ + (*(int (*)(PyUFuncObject *, NPY_CASTING, PyArrayObject **, PyArray_Descr *const *)) \ + PyUFunc_API[40]) +#define PyUFunc_RegisterLoopForDescr \ + (*(int (*)(PyUFuncObject *, PyArray_Descr *, PyUFuncGenericFunction, PyArray_Descr **, void *)) \ + PyUFunc_API[41]) + +#if NPY_FEATURE_VERSION >= NPY_1_16_API_VERSION +#define PyUFunc_FromFuncAndDataAndSignatureAndIdentity \ + (*(PyObject * (*)(PyUFuncGenericFunction *, void *const *, const char *, int, int, int, int, const char *, const char *, const int, const char *, PyObject *)) \ + PyUFunc_API[42]) +#endif + +#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION +#define PyUFunc_AddLoopFromSpec \ + (*(int (*)(PyObject *, PyArrayMethod_Spec *)) \ + PyUFunc_API[43]) +#endif + +#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION +#define PyUFunc_AddPromoter \ + (*(int (*)(PyObject *, PyObject *, PyObject *)) \ + PyUFunc_API[44]) +#endif + +#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION +#define PyUFunc_AddWrappingLoop \ + (*(int (*)(PyObject *, PyArray_DTypeMeta *new_dtypes[], PyArray_DTypeMeta *wrapped_dtypes[], PyArrayMethod_TranslateGivenDescriptors *, PyArrayMethod_TranslateLoopDescriptors *)) \ + PyUFunc_API[45]) +#endif + +#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION +#define PyUFunc_GiveFloatingpointErrors \ + (*(int (*)(const char *, int)) \ + PyUFunc_API[46]) +#endif + +static inline int +_import_umath(void) +{ + PyObject *c_api; + PyObject *numpy = PyImport_ImportModule("numpy._core._multiarray_umath"); + if (numpy == NULL && PyErr_ExceptionMatches(PyExc_ModuleNotFoundError)) { + PyErr_Clear(); + numpy = PyImport_ImportModule("numpy.core._multiarray_umath"); + } + + if (numpy == NULL) { + PyErr_SetString(PyExc_ImportError, + "_multiarray_umath failed to import"); + return -1; + } + + c_api = PyObject_GetAttrString(numpy, "_UFUNC_API"); + Py_DECREF(numpy); + if (c_api == NULL) { + PyErr_SetString(PyExc_AttributeError, "_UFUNC_API not found"); + return -1; + } + + if (!PyCapsule_CheckExact(c_api)) { + PyErr_SetString(PyExc_RuntimeError, "_UFUNC_API is not PyCapsule object"); + Py_DECREF(c_api); + return -1; + } + PyUFunc_API = (void **)PyCapsule_GetPointer(c_api, NULL); + Py_DECREF(c_api); + if (PyUFunc_API == NULL) { + PyErr_SetString(PyExc_RuntimeError, "_UFUNC_API is NULL pointer"); + return -1; + } + return 0; +} + +#define import_umath() \ + do {\ + UFUNC_NOFPE\ + if (_import_umath() < 0) {\ + PyErr_Print();\ + PyErr_SetString(PyExc_ImportError,\ + "numpy._core.umath failed to import");\ + return NULL;\ + }\ + } while(0) + +#define import_umath1(ret) \ + do {\ + UFUNC_NOFPE\ + if (_import_umath() < 0) {\ + PyErr_Print();\ + PyErr_SetString(PyExc_ImportError,\ + "numpy._core.umath failed to import");\ + return ret;\ + }\ + } while(0) + +#define import_umath2(ret, msg) \ + do {\ + UFUNC_NOFPE\ + if (_import_umath() < 0) {\ + PyErr_Print();\ + PyErr_SetString(PyExc_ImportError, msg);\ + return ret;\ + }\ + } while(0) + +#define import_ufunc() \ + do {\ + UFUNC_NOFPE\ + if (_import_umath() < 0) {\ + PyErr_Print();\ + PyErr_SetString(PyExc_ImportError,\ + "numpy._core.umath failed to import");\ + }\ + } while(0) + + +static inline int +PyUFunc_ImportUFuncAPI() +{ + if (NPY_UNLIKELY(PyUFunc_API == NULL)) { + import_umath1(-1); + } + return 0; +} + +#endif diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/_neighborhood_iterator_imp.h b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/_neighborhood_iterator_imp.h new file mode 100644 index 0000000..b365cb5 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/_neighborhood_iterator_imp.h @@ -0,0 +1,90 @@ +#ifndef NUMPY_CORE_INCLUDE_NUMPY__NEIGHBORHOOD_IMP_H_ +#error You should not include this header directly +#endif +/* + * Private API (here for inline) + */ +static inline int +_PyArrayNeighborhoodIter_IncrCoord(PyArrayNeighborhoodIterObject* iter); + +/* + * Update to next item of the iterator + * + * Note: this simply increment the coordinates vector, last dimension + * incremented first , i.e, for dimension 3 + * ... + * -1, -1, -1 + * -1, -1, 0 + * -1, -1, 1 + * .... + * -1, 0, -1 + * -1, 0, 0 + * .... + * 0, -1, -1 + * 0, -1, 0 + * .... + */ +#define _UPDATE_COORD_ITER(c) \ + wb = iter->coordinates[c] < iter->bounds[c][1]; \ + if (wb) { \ + iter->coordinates[c] += 1; \ + return 0; \ + } \ + else { \ + iter->coordinates[c] = iter->bounds[c][0]; \ + } + +static inline int +_PyArrayNeighborhoodIter_IncrCoord(PyArrayNeighborhoodIterObject* iter) +{ + npy_intp i, wb; + + for (i = iter->nd - 1; i >= 0; --i) { + _UPDATE_COORD_ITER(i) + } + + return 0; +} + +/* + * Version optimized for 2d arrays, manual loop unrolling + */ +static inline int +_PyArrayNeighborhoodIter_IncrCoord2D(PyArrayNeighborhoodIterObject* iter) +{ + npy_intp wb; + + _UPDATE_COORD_ITER(1) + _UPDATE_COORD_ITER(0) + + return 0; +} +#undef _UPDATE_COORD_ITER + +/* + * Advance to the next neighbour + */ +static inline int +PyArrayNeighborhoodIter_Next(PyArrayNeighborhoodIterObject* iter) +{ + _PyArrayNeighborhoodIter_IncrCoord (iter); + iter->dataptr = iter->translate((PyArrayIterObject*)iter, iter->coordinates); + + return 0; +} + +/* + * Reset functions + */ +static inline int +PyArrayNeighborhoodIter_Reset(PyArrayNeighborhoodIterObject* iter) +{ + npy_intp i; + + for (i = 0; i < iter->nd; ++i) { + iter->coordinates[i] = iter->bounds[i][0]; + } + iter->dataptr = iter->translate((PyArrayIterObject*)iter, iter->coordinates); + + return 0; +} diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/_numpyconfig.h b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/_numpyconfig.h new file mode 100644 index 0000000..16a4b44 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/_numpyconfig.h @@ -0,0 +1,33 @@ +#define NPY_HAVE_ENDIAN_H 1 + +#define NPY_SIZEOF_SHORT 2 +#define NPY_SIZEOF_INT 4 +#define NPY_SIZEOF_LONG 8 +#define NPY_SIZEOF_FLOAT 4 +#define NPY_SIZEOF_COMPLEX_FLOAT 8 +#define NPY_SIZEOF_DOUBLE 8 +#define NPY_SIZEOF_COMPLEX_DOUBLE 16 +#define NPY_SIZEOF_LONGDOUBLE 16 +#define NPY_SIZEOF_COMPLEX_LONGDOUBLE 32 +#define NPY_SIZEOF_PY_INTPTR_T 8 +#define NPY_SIZEOF_INTP 8 +#define NPY_SIZEOF_UINTP 8 +#define NPY_SIZEOF_WCHAR_T 4 +#define NPY_SIZEOF_OFF_T 8 +#define NPY_SIZEOF_PY_LONG_LONG 8 +#define NPY_SIZEOF_LONGLONG 8 + +/* + * Defined to 1 or 0. Note that Pyodide hardcodes NPY_NO_SMP (and other defines + * in this header) for better cross-compilation, so don't rename them without a + * good reason. + */ +#define NPY_NO_SMP 0 + +#define NPY_VISIBILITY_HIDDEN __attribute__((visibility("hidden"))) +#define NPY_ABI_VERSION 0x02000000 +#define NPY_API_VERSION 0x00000014 + +#ifndef __STDC_FORMAT_MACROS +#define __STDC_FORMAT_MACROS 1 +#endif diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/_public_dtype_api_table.h b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/_public_dtype_api_table.h new file mode 100644 index 0000000..51f3905 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/_public_dtype_api_table.h @@ -0,0 +1,86 @@ +/* + * Public exposure of the DType Classes. These are tricky to expose + * via the Python API, so they are exposed through this header for now. + * + * These definitions are only relevant for the public API and we reserve + * the slots 320-360 in the API table generation for this (currently). + * + * TODO: This file should be consolidated with the API table generation + * (although not sure the current generation is worth preserving). + */ +#ifndef NUMPY_CORE_INCLUDE_NUMPY__PUBLIC_DTYPE_API_TABLE_H_ +#define NUMPY_CORE_INCLUDE_NUMPY__PUBLIC_DTYPE_API_TABLE_H_ + +#if !(defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD) + +/* All of these require NumPy 2.0 support */ +#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION + +/* + * The type of the DType metaclass + */ +#define PyArrayDTypeMeta_Type (*(PyTypeObject *)(PyArray_API + 320)[0]) +/* + * NumPy's builtin DTypes: + */ +#define PyArray_BoolDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[1]) +/* Integers */ +#define PyArray_ByteDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[2]) +#define PyArray_UByteDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[3]) +#define PyArray_ShortDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[4]) +#define PyArray_UShortDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[5]) +#define PyArray_IntDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[6]) +#define PyArray_UIntDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[7]) +#define PyArray_LongDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[8]) +#define PyArray_ULongDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[9]) +#define PyArray_LongLongDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[10]) +#define PyArray_ULongLongDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[11]) +/* Integer aliases */ +#define PyArray_Int8DType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[12]) +#define PyArray_UInt8DType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[13]) +#define PyArray_Int16DType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[14]) +#define PyArray_UInt16DType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[15]) +#define PyArray_Int32DType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[16]) +#define PyArray_UInt32DType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[17]) +#define PyArray_Int64DType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[18]) +#define PyArray_UInt64DType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[19]) +#define PyArray_IntpDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[20]) +#define PyArray_UIntpDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[21]) +/* Floats */ +#define PyArray_HalfDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[22]) +#define PyArray_FloatDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[23]) +#define PyArray_DoubleDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[24]) +#define PyArray_LongDoubleDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[25]) +/* Complex */ +#define PyArray_CFloatDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[26]) +#define PyArray_CDoubleDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[27]) +#define PyArray_CLongDoubleDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[28]) +/* String/Bytes */ +#define PyArray_BytesDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[29]) +#define PyArray_UnicodeDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[30]) +/* Datetime/Timedelta */ +#define PyArray_DatetimeDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[31]) +#define PyArray_TimedeltaDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[32]) +/* Object/Void */ +#define PyArray_ObjectDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[33]) +#define PyArray_VoidDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[34]) +/* Python types (used as markers for scalars) */ +#define PyArray_PyLongDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[35]) +#define PyArray_PyFloatDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[36]) +#define PyArray_PyComplexDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[37]) +/* Default integer type */ +#define PyArray_DefaultIntDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[38]) +/* New non-legacy DTypes follow in the order they were added */ +#define PyArray_StringDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[39]) + +/* NOTE: offset 40 is free */ + +/* Need to start with a larger offset again for the abstract classes: */ +#define PyArray_IntAbstractDType (*(PyArray_DTypeMeta *)PyArray_API[366]) +#define PyArray_FloatAbstractDType (*(PyArray_DTypeMeta *)PyArray_API[367]) +#define PyArray_ComplexAbstractDType (*(PyArray_DTypeMeta *)PyArray_API[368]) + +#endif /* NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION */ + +#endif /* NPY_INTERNAL_BUILD */ +#endif /* NUMPY_CORE_INCLUDE_NUMPY__PUBLIC_DTYPE_API_TABLE_H_ */ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/arrayobject.h b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/arrayobject.h new file mode 100644 index 0000000..97d9359 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/arrayobject.h @@ -0,0 +1,7 @@ +#ifndef NUMPY_CORE_INCLUDE_NUMPY_ARRAYOBJECT_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_ARRAYOBJECT_H_ +#define Py_ARRAYOBJECT_H + +#include "ndarrayobject.h" + +#endif /* NUMPY_CORE_INCLUDE_NUMPY_ARRAYOBJECT_H_ */ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/arrayscalars.h b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/arrayscalars.h new file mode 100644 index 0000000..ff04806 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/arrayscalars.h @@ -0,0 +1,196 @@ +#ifndef NUMPY_CORE_INCLUDE_NUMPY_ARRAYSCALARS_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_ARRAYSCALARS_H_ + +#ifndef _MULTIARRAYMODULE +typedef struct { + PyObject_HEAD + npy_bool obval; +} PyBoolScalarObject; +#endif + + +typedef struct { + PyObject_HEAD + signed char obval; +} PyByteScalarObject; + + +typedef struct { + PyObject_HEAD + short obval; +} PyShortScalarObject; + + +typedef struct { + PyObject_HEAD + int obval; +} PyIntScalarObject; + + +typedef struct { + PyObject_HEAD + long obval; +} PyLongScalarObject; + + +typedef struct { + PyObject_HEAD + npy_longlong obval; +} PyLongLongScalarObject; + + +typedef struct { + PyObject_HEAD + unsigned char obval; +} PyUByteScalarObject; + + +typedef struct { + PyObject_HEAD + unsigned short obval; +} PyUShortScalarObject; + + +typedef struct { + PyObject_HEAD + unsigned int obval; +} PyUIntScalarObject; + + +typedef struct { + PyObject_HEAD + unsigned long obval; +} PyULongScalarObject; + + +typedef struct { + PyObject_HEAD + npy_ulonglong obval; +} PyULongLongScalarObject; + + +typedef struct { + PyObject_HEAD + npy_half obval; +} PyHalfScalarObject; + + +typedef struct { + PyObject_HEAD + float obval; +} PyFloatScalarObject; + + +typedef struct { + PyObject_HEAD + double obval; +} PyDoubleScalarObject; + + +typedef struct { + PyObject_HEAD + npy_longdouble obval; +} PyLongDoubleScalarObject; + + +typedef struct { + PyObject_HEAD + npy_cfloat obval; +} PyCFloatScalarObject; + + +typedef struct { + PyObject_HEAD + npy_cdouble obval; +} PyCDoubleScalarObject; + + +typedef struct { + PyObject_HEAD + npy_clongdouble obval; +} PyCLongDoubleScalarObject; + + +typedef struct { + PyObject_HEAD + PyObject * obval; +} PyObjectScalarObject; + +typedef struct { + PyObject_HEAD + npy_datetime obval; + PyArray_DatetimeMetaData obmeta; +} PyDatetimeScalarObject; + +typedef struct { + PyObject_HEAD + npy_timedelta obval; + PyArray_DatetimeMetaData obmeta; +} PyTimedeltaScalarObject; + + +typedef struct { + PyObject_HEAD + char obval; +} PyScalarObject; + +#define PyStringScalarObject PyBytesObject +#ifndef Py_LIMITED_API +typedef struct { + /* note that the PyObject_HEAD macro lives right here */ + PyUnicodeObject base; + Py_UCS4 *obval; + #if NPY_FEATURE_VERSION >= NPY_1_20_API_VERSION + char *buffer_fmt; + #endif +} PyUnicodeScalarObject; +#endif + + +typedef struct { + PyObject_VAR_HEAD + char *obval; +#if defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD + /* Internally use the subclass to allow accessing names/fields */ + _PyArray_LegacyDescr *descr; +#else + PyArray_Descr *descr; +#endif + int flags; + PyObject *base; + #if NPY_FEATURE_VERSION >= NPY_1_20_API_VERSION + void *_buffer_info; /* private buffer info, tagged to allow warning */ + #endif +} PyVoidScalarObject; + +/* Macros + PyScalarObject + PyArrType_Type + are defined in ndarrayobject.h +*/ + +#define PyArrayScalar_False ((PyObject *)(&(_PyArrayScalar_BoolValues[0]))) +#define PyArrayScalar_True ((PyObject *)(&(_PyArrayScalar_BoolValues[1]))) +#define PyArrayScalar_FromLong(i) \ + ((PyObject *)(&(_PyArrayScalar_BoolValues[((i)!=0)]))) +#define PyArrayScalar_RETURN_BOOL_FROM_LONG(i) \ + return Py_INCREF(PyArrayScalar_FromLong(i)), \ + PyArrayScalar_FromLong(i) +#define PyArrayScalar_RETURN_FALSE \ + return Py_INCREF(PyArrayScalar_False), \ + PyArrayScalar_False +#define PyArrayScalar_RETURN_TRUE \ + return Py_INCREF(PyArrayScalar_True), \ + PyArrayScalar_True + +#define PyArrayScalar_New(cls) \ + Py##cls##ArrType_Type.tp_alloc(&Py##cls##ArrType_Type, 0) +#ifndef Py_LIMITED_API +/* For the limited API, use PyArray_ScalarAsCtype instead */ +#define PyArrayScalar_VAL(obj, cls) \ + ((Py##cls##ScalarObject *)obj)->obval +#define PyArrayScalar_ASSIGN(obj, cls, val) \ + PyArrayScalar_VAL(obj, cls) = val +#endif + +#endif /* NUMPY_CORE_INCLUDE_NUMPY_ARRAYSCALARS_H_ */ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/dtype_api.h b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/dtype_api.h new file mode 100644 index 0000000..b37c9fb --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/dtype_api.h @@ -0,0 +1,480 @@ +/* + * The public DType API + */ + +#ifndef NUMPY_CORE_INCLUDE_NUMPY___DTYPE_API_H_ +#define NUMPY_CORE_INCLUDE_NUMPY___DTYPE_API_H_ + +struct PyArrayMethodObject_tag; + +/* + * Largely opaque struct for DType classes (i.e. metaclass instances). + * The internal definition is currently in `ndarraytypes.h` (export is a bit + * more complex because `PyArray_Descr` is a DTypeMeta internally but not + * externally). + */ +#if !(defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD) + +#ifndef Py_LIMITED_API + + typedef struct PyArray_DTypeMeta_tag { + PyHeapTypeObject super; + + /* + * Most DTypes will have a singleton default instance, for the + * parametric legacy DTypes (bytes, string, void, datetime) this + * may be a pointer to the *prototype* instance? + */ + PyArray_Descr *singleton; + /* Copy of the legacy DTypes type number, usually invalid. */ + int type_num; + + /* The type object of the scalar instances (may be NULL?) */ + PyTypeObject *scalar_type; + /* + * DType flags to signal legacy, parametric, or + * abstract. But plenty of space for additional information/flags. + */ + npy_uint64 flags; + + /* + * Use indirection in order to allow a fixed size for this struct. + * A stable ABI size makes creating a static DType less painful + * while also ensuring flexibility for all opaque API (with one + * indirection due the pointer lookup). + */ + void *dt_slots; + /* Allow growing (at the moment also beyond this) */ + void *reserved[3]; + } PyArray_DTypeMeta; + +#else + +typedef PyTypeObject PyArray_DTypeMeta; + +#endif /* Py_LIMITED_API */ + +#endif /* not internal build */ + +/* + * ****************************************************** + * ArrayMethod API (Casting and UFuncs) + * ****************************************************** + */ + + +typedef enum { + /* Flag for whether the GIL is required */ + NPY_METH_REQUIRES_PYAPI = 1 << 0, + /* + * Some functions cannot set floating point error flags, this flag + * gives us the option (not requirement) to skip floating point error + * setup/check. No function should set error flags and ignore them + * since it would interfere with chaining operations (e.g. casting). + */ + NPY_METH_NO_FLOATINGPOINT_ERRORS = 1 << 1, + /* Whether the method supports unaligned access (not runtime) */ + NPY_METH_SUPPORTS_UNALIGNED = 1 << 2, + /* + * Used for reductions to allow reordering the operation. At this point + * assume that if set, it also applies to normal operations though! + */ + NPY_METH_IS_REORDERABLE = 1 << 3, + /* + * Private flag for now for *logic* functions. The logical functions + * `logical_or` and `logical_and` can always cast the inputs to booleans + * "safely" (because that is how the cast to bool is defined). + * @seberg: I am not sure this is the best way to handle this, so its + * private for now (also it is very limited anyway). + * There is one "exception". NA aware dtypes cannot cast to bool + * (hopefully), so the `??->?` loop should error even with this flag. + * But a second NA fallback loop will be necessary. + */ + _NPY_METH_FORCE_CAST_INPUTS = 1 << 17, + + /* All flags which can change at runtime */ + NPY_METH_RUNTIME_FLAGS = ( + NPY_METH_REQUIRES_PYAPI | + NPY_METH_NO_FLOATINGPOINT_ERRORS), +} NPY_ARRAYMETHOD_FLAGS; + + +typedef struct PyArrayMethod_Context_tag { + /* The caller, which is typically the original ufunc. May be NULL */ + PyObject *caller; + /* The method "self". Currently an opaque object. */ + struct PyArrayMethodObject_tag *method; + + /* Operand descriptors, filled in by resolve_descriptors */ + PyArray_Descr *const *descriptors; + /* Structure may grow (this is harmless for DType authors) */ +} PyArrayMethod_Context; + + +/* + * The main object for creating a new ArrayMethod. We use the typical `slots` + * mechanism used by the Python limited API (see below for the slot defs). + */ +typedef struct { + const char *name; + int nin, nout; + NPY_CASTING casting; + NPY_ARRAYMETHOD_FLAGS flags; + PyArray_DTypeMeta **dtypes; + PyType_Slot *slots; +} PyArrayMethod_Spec; + + +/* + * ArrayMethod slots + * ----------------- + * + * SLOTS IDs For the ArrayMethod creation, once fully public, IDs are fixed + * but can be deprecated and arbitrarily extended. + */ +#define _NPY_METH_resolve_descriptors_with_scalars 1 +#define NPY_METH_resolve_descriptors 2 +#define NPY_METH_get_loop 3 +#define NPY_METH_get_reduction_initial 4 +/* specific loops for constructions/default get_loop: */ +#define NPY_METH_strided_loop 5 +#define NPY_METH_contiguous_loop 6 +#define NPY_METH_unaligned_strided_loop 7 +#define NPY_METH_unaligned_contiguous_loop 8 +#define NPY_METH_contiguous_indexed_loop 9 +#define _NPY_METH_static_data 10 + + +/* + * The resolve descriptors function, must be able to handle NULL values for + * all output (but not input) `given_descrs` and fill `loop_descrs`. + * Return -1 on error or 0 if the operation is not possible without an error + * set. (This may still be in flux.) + * Otherwise must return the "casting safety", for normal functions, this is + * almost always "safe" (or even "equivalent"?). + * + * `resolve_descriptors` is optional if all output DTypes are non-parametric. + */ +typedef NPY_CASTING (PyArrayMethod_ResolveDescriptors)( + /* "method" is currently opaque (necessary e.g. to wrap Python) */ + struct PyArrayMethodObject_tag *method, + /* DTypes the method was created for */ + PyArray_DTypeMeta *const *dtypes, + /* Input descriptors (instances). Outputs may be NULL. */ + PyArray_Descr *const *given_descrs, + /* Exact loop descriptors to use, must not hold references on error */ + PyArray_Descr **loop_descrs, + npy_intp *view_offset); + + +/* + * Rarely needed, slightly more powerful version of `resolve_descriptors`. + * See also `PyArrayMethod_ResolveDescriptors` for details on shared arguments. + * + * NOTE: This function is private now as it is unclear how and what to pass + * exactly as additional information to allow dealing with the scalars. + * See also gh-24915. + */ +typedef NPY_CASTING (PyArrayMethod_ResolveDescriptorsWithScalar)( + struct PyArrayMethodObject_tag *method, + PyArray_DTypeMeta *const *dtypes, + /* Unlike above, these can have any DType and we may allow NULL. */ + PyArray_Descr *const *given_descrs, + /* + * Input scalars or NULL. Only ever passed for python scalars. + * WARNING: In some cases, a loop may be explicitly selected and the + * value passed is not available (NULL) or does not have the + * expected type. + */ + PyObject *const *input_scalars, + PyArray_Descr **loop_descrs, + npy_intp *view_offset); + + + +typedef int (PyArrayMethod_StridedLoop)(PyArrayMethod_Context *context, + char *const *data, const npy_intp *dimensions, const npy_intp *strides, + NpyAuxData *transferdata); + + +typedef int (PyArrayMethod_GetLoop)( + PyArrayMethod_Context *context, + int aligned, int move_references, + const npy_intp *strides, + PyArrayMethod_StridedLoop **out_loop, + NpyAuxData **out_transferdata, + NPY_ARRAYMETHOD_FLAGS *flags); + +/** + * Query an ArrayMethod for the initial value for use in reduction. + * + * @param context The arraymethod context, mainly to access the descriptors. + * @param reduction_is_empty Whether the reduction is empty. When it is, the + * value returned may differ. In this case it is a "default" value that + * may differ from the "identity" value normally used. For example: + * - `0.0` is the default for `sum([])`. But `-0.0` is the correct + * identity otherwise as it preserves the sign for `sum([-0.0])`. + * - We use no identity for object, but return the default of `0` and `1` + * for the empty `sum([], dtype=object)` and `prod([], dtype=object)`. + * This allows `np.sum(np.array(["a", "b"], dtype=object))` to work. + * - `-inf` or `INT_MIN` for `max` is an identity, but at least `INT_MIN` + * not a good *default* when there are no items. + * @param initial Pointer to initial data to be filled (if possible) + * + * @returns -1, 0, or 1 indicating error, no initial value, and initial being + * successfully filled. Errors must not be given where 0 is correct, NumPy + * may call this even when not strictly necessary. + */ +typedef int (PyArrayMethod_GetReductionInitial)( + PyArrayMethod_Context *context, npy_bool reduction_is_empty, + void *initial); + +/* + * The following functions are only used by the wrapping array method defined + * in umath/wrapping_array_method.c + */ + + +/* + * The function to convert the given descriptors (passed in to + * `resolve_descriptors`) and translates them for the wrapped loop. + * The new descriptors MUST be viewable with the old ones, `NULL` must be + * supported (for outputs) and should normally be forwarded. + * + * The function must clean up on error. + * + * NOTE: We currently assume that this translation gives "viewable" results. + * I.e. there is no additional casting related to the wrapping process. + * In principle that could be supported, but not sure it is useful. + * This currently also means that e.g. alignment must apply identically + * to the new dtypes. + * + * TODO: Due to the fact that `resolve_descriptors` is also used for `can_cast` + * there is no way to "pass out" the result of this function. This means + * it will be called twice for every ufunc call. + * (I am considering including `auxdata` as an "optional" parameter to + * `resolve_descriptors`, so that it can be filled there if not NULL.) + */ +typedef int (PyArrayMethod_TranslateGivenDescriptors)(int nin, int nout, + PyArray_DTypeMeta *const wrapped_dtypes[], + PyArray_Descr *const given_descrs[], PyArray_Descr *new_descrs[]); + +/** + * The function to convert the actual loop descriptors (as returned by the + * original `resolve_descriptors` function) to the ones the output array + * should use. + * This function must return "viewable" types, it must not mutate them in any + * form that would break the inner-loop logic. Does not need to support NULL. + * + * The function must clean up on error. + * + * @param nin Number of input arguments + * @param nout Number of output arguments + * @param new_dtypes The DTypes of the output (usually probably not needed) + * @param given_descrs Original given_descrs to the resolver, necessary to + * fetch any information related to the new dtypes from the original. + * @param original_descrs The `loop_descrs` returned by the wrapped loop. + * @param loop_descrs The output descriptors, compatible to `original_descrs`. + * + * @returns 0 on success, -1 on failure. + */ +typedef int (PyArrayMethod_TranslateLoopDescriptors)(int nin, int nout, + PyArray_DTypeMeta *const new_dtypes[], PyArray_Descr *const given_descrs[], + PyArray_Descr *original_descrs[], PyArray_Descr *loop_descrs[]); + + + +/* + * A traverse loop working on a single array. This is similar to the general + * strided-loop function. This is designed for loops that need to visit every + * element of a single array. + * + * Currently this is used for array clearing, via the NPY_DT_get_clear_loop + * API hook, and zero-filling, via the NPY_DT_get_fill_zero_loop API hook. + * These are most useful for handling arrays storing embedded references to + * python objects or heap-allocated data. + * + * The `void *traverse_context` is passed in because we may need to pass in + * Interpreter state or similar in the future, but we don't want to pass in + * a full context (with pointers to dtypes, method, caller which all make + * no sense for a traverse function). + * + * We assume for now that this context can be just passed through in the + * the future (for structured dtypes). + * + */ +typedef int (PyArrayMethod_TraverseLoop)( + void *traverse_context, const PyArray_Descr *descr, char *data, + npy_intp size, npy_intp stride, NpyAuxData *auxdata); + + +/* + * Simplified get_loop function specific to dtype traversal + * + * It should set the flags needed for the traversal loop and set out_loop to the + * loop function, which must be a valid PyArrayMethod_TraverseLoop + * pointer. Currently this is used for zero-filling and clearing arrays storing + * embedded references. + * + */ +typedef int (PyArrayMethod_GetTraverseLoop)( + void *traverse_context, const PyArray_Descr *descr, + int aligned, npy_intp fixed_stride, + PyArrayMethod_TraverseLoop **out_loop, NpyAuxData **out_auxdata, + NPY_ARRAYMETHOD_FLAGS *flags); + + +/* + * Type of the C promoter function, which must be wrapped into a + * PyCapsule with name "numpy._ufunc_promoter". + * + * Note that currently the output dtypes are always NULL unless they are + * also part of the signature. This is an implementation detail and could + * change in the future. However, in general promoters should not have a + * need for output dtypes. + * (There are potential use-cases, these are currently unsupported.) + */ +typedef int (PyArrayMethod_PromoterFunction)(PyObject *ufunc, + PyArray_DTypeMeta *const op_dtypes[], PyArray_DTypeMeta *const signature[], + PyArray_DTypeMeta *new_op_dtypes[]); + +/* + * **************************** + * DTYPE API + * **************************** + */ + +#define NPY_DT_ABSTRACT 1 << 1 +#define NPY_DT_PARAMETRIC 1 << 2 +#define NPY_DT_NUMERIC 1 << 3 + +/* + * These correspond to slots in the NPY_DType_Slots struct and must + * be in the same order as the members of that struct. If new slots + * get added or old slots get removed NPY_NUM_DTYPE_SLOTS must also + * be updated + */ + +#define NPY_DT_discover_descr_from_pyobject 1 +// this slot is considered private because its API hasn't been decided +#define _NPY_DT_is_known_scalar_type 2 +#define NPY_DT_default_descr 3 +#define NPY_DT_common_dtype 4 +#define NPY_DT_common_instance 5 +#define NPY_DT_ensure_canonical 6 +#define NPY_DT_setitem 7 +#define NPY_DT_getitem 8 +#define NPY_DT_get_clear_loop 9 +#define NPY_DT_get_fill_zero_loop 10 +#define NPY_DT_finalize_descr 11 + +// These PyArray_ArrFunc slots will be deprecated and replaced eventually +// getitem and setitem can be defined as a performance optimization; +// by default the user dtypes call `legacy_getitem_using_DType` and +// `legacy_setitem_using_DType`, respectively. This functionality is +// only supported for basic NumPy DTypes. + + +// used to separate dtype slots from arrfuncs slots +// intended only for internal use but defined here for clarity +#define _NPY_DT_ARRFUNCS_OFFSET (1 << 10) + +// Cast is disabled +// #define NPY_DT_PyArray_ArrFuncs_cast 0 + _NPY_DT_ARRFUNCS_OFFSET + +#define NPY_DT_PyArray_ArrFuncs_getitem 1 + _NPY_DT_ARRFUNCS_OFFSET +#define NPY_DT_PyArray_ArrFuncs_setitem 2 + _NPY_DT_ARRFUNCS_OFFSET + +// Copyswap is disabled +// #define NPY_DT_PyArray_ArrFuncs_copyswapn 3 + _NPY_DT_ARRFUNCS_OFFSET +// #define NPY_DT_PyArray_ArrFuncs_copyswap 4 + _NPY_DT_ARRFUNCS_OFFSET +#define NPY_DT_PyArray_ArrFuncs_compare 5 + _NPY_DT_ARRFUNCS_OFFSET +#define NPY_DT_PyArray_ArrFuncs_argmax 6 + _NPY_DT_ARRFUNCS_OFFSET +#define NPY_DT_PyArray_ArrFuncs_dotfunc 7 + _NPY_DT_ARRFUNCS_OFFSET +#define NPY_DT_PyArray_ArrFuncs_scanfunc 8 + _NPY_DT_ARRFUNCS_OFFSET +#define NPY_DT_PyArray_ArrFuncs_fromstr 9 + _NPY_DT_ARRFUNCS_OFFSET +#define NPY_DT_PyArray_ArrFuncs_nonzero 10 + _NPY_DT_ARRFUNCS_OFFSET +#define NPY_DT_PyArray_ArrFuncs_fill 11 + _NPY_DT_ARRFUNCS_OFFSET +#define NPY_DT_PyArray_ArrFuncs_fillwithscalar 12 + _NPY_DT_ARRFUNCS_OFFSET +#define NPY_DT_PyArray_ArrFuncs_sort 13 + _NPY_DT_ARRFUNCS_OFFSET +#define NPY_DT_PyArray_ArrFuncs_argsort 14 + _NPY_DT_ARRFUNCS_OFFSET + +// Casting related slots are disabled. See +// https://github.com/numpy/numpy/pull/23173#discussion_r1101098163 +// #define NPY_DT_PyArray_ArrFuncs_castdict 15 + _NPY_DT_ARRFUNCS_OFFSET +// #define NPY_DT_PyArray_ArrFuncs_scalarkind 16 + _NPY_DT_ARRFUNCS_OFFSET +// #define NPY_DT_PyArray_ArrFuncs_cancastscalarkindto 17 + _NPY_DT_ARRFUNCS_OFFSET +// #define NPY_DT_PyArray_ArrFuncs_cancastto 18 + _NPY_DT_ARRFUNCS_OFFSET + +// These are deprecated in NumPy 1.19, so are disabled here. +// #define NPY_DT_PyArray_ArrFuncs_fastclip 19 + _NPY_DT_ARRFUNCS_OFFSET +// #define NPY_DT_PyArray_ArrFuncs_fastputmask 20 + _NPY_DT_ARRFUNCS_OFFSET +// #define NPY_DT_PyArray_ArrFuncs_fasttake 21 + _NPY_DT_ARRFUNCS_OFFSET +#define NPY_DT_PyArray_ArrFuncs_argmin 22 + _NPY_DT_ARRFUNCS_OFFSET + + +// TODO: These slots probably still need some thought, and/or a way to "grow"? +typedef struct { + PyTypeObject *typeobj; /* type of python scalar or NULL */ + int flags; /* flags, including parametric and abstract */ + /* NULL terminated cast definitions. Use NULL for the newly created DType */ + PyArrayMethod_Spec **casts; + PyType_Slot *slots; + /* Baseclass or NULL (will always subclass `np.dtype`) */ + PyTypeObject *baseclass; +} PyArrayDTypeMeta_Spec; + + +typedef PyArray_Descr *(PyArrayDTypeMeta_DiscoverDescrFromPyobject)( + PyArray_DTypeMeta *cls, PyObject *obj); + +/* + * Before making this public, we should decide whether it should pass + * the type, or allow looking at the object. A possible use-case: + * `np.array(np.array([0]), dtype=np.ndarray)` + * Could consider arrays that are not `dtype=ndarray` "scalars". + */ +typedef int (PyArrayDTypeMeta_IsKnownScalarType)( + PyArray_DTypeMeta *cls, PyTypeObject *obj); + +typedef PyArray_Descr *(PyArrayDTypeMeta_DefaultDescriptor)(PyArray_DTypeMeta *cls); +typedef PyArray_DTypeMeta *(PyArrayDTypeMeta_CommonDType)( + PyArray_DTypeMeta *dtype1, PyArray_DTypeMeta *dtype2); + + +/* + * Convenience utility for getting a reference to the DType metaclass associated + * with a dtype instance. + */ +#define NPY_DTYPE(descr) ((PyArray_DTypeMeta *)Py_TYPE(descr)) + +static inline PyArray_DTypeMeta * +NPY_DT_NewRef(PyArray_DTypeMeta *o) { + Py_INCREF((PyObject *)o); + return o; +} + + +typedef PyArray_Descr *(PyArrayDTypeMeta_CommonInstance)( + PyArray_Descr *dtype1, PyArray_Descr *dtype2); +typedef PyArray_Descr *(PyArrayDTypeMeta_EnsureCanonical)(PyArray_Descr *dtype); +/* + * Returns either a new reference to *dtype* or a new descriptor instance + * initialized with the same parameters as *dtype*. The caller cannot know + * which choice a dtype will make. This function is called just before the + * array buffer is created for a newly created array, it is not called for + * views and the descriptor returned by this function is attached to the array. + */ +typedef PyArray_Descr *(PyArrayDTypeMeta_FinalizeDescriptor)(PyArray_Descr *dtype); + +/* + * TODO: These two functions are currently only used for experimental DType + * API support. Their relation should be "reversed": NumPy should + * always use them internally. + * There are open points about "casting safety" though, e.g. setting + * elements is currently always unsafe. + */ +typedef int(PyArrayDTypeMeta_SetItem)(PyArray_Descr *, PyObject *, char *); +typedef PyObject *(PyArrayDTypeMeta_GetItem)(PyArray_Descr *, char *); + +#endif /* NUMPY_CORE_INCLUDE_NUMPY___DTYPE_API_H_ */ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/halffloat.h b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/halffloat.h new file mode 100644 index 0000000..9504016 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/halffloat.h @@ -0,0 +1,70 @@ +#ifndef NUMPY_CORE_INCLUDE_NUMPY_HALFFLOAT_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_HALFFLOAT_H_ + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * Half-precision routines + */ + +/* Conversions */ +float npy_half_to_float(npy_half h); +double npy_half_to_double(npy_half h); +npy_half npy_float_to_half(float f); +npy_half npy_double_to_half(double d); +/* Comparisons */ +int npy_half_eq(npy_half h1, npy_half h2); +int npy_half_ne(npy_half h1, npy_half h2); +int npy_half_le(npy_half h1, npy_half h2); +int npy_half_lt(npy_half h1, npy_half h2); +int npy_half_ge(npy_half h1, npy_half h2); +int npy_half_gt(npy_half h1, npy_half h2); +/* faster *_nonan variants for when you know h1 and h2 are not NaN */ +int npy_half_eq_nonan(npy_half h1, npy_half h2); +int npy_half_lt_nonan(npy_half h1, npy_half h2); +int npy_half_le_nonan(npy_half h1, npy_half h2); +/* Miscellaneous functions */ +int npy_half_iszero(npy_half h); +int npy_half_isnan(npy_half h); +int npy_half_isinf(npy_half h); +int npy_half_isfinite(npy_half h); +int npy_half_signbit(npy_half h); +npy_half npy_half_copysign(npy_half x, npy_half y); +npy_half npy_half_spacing(npy_half h); +npy_half npy_half_nextafter(npy_half x, npy_half y); +npy_half npy_half_divmod(npy_half x, npy_half y, npy_half *modulus); + +/* + * Half-precision constants + */ + +#define NPY_HALF_ZERO (0x0000u) +#define NPY_HALF_PZERO (0x0000u) +#define NPY_HALF_NZERO (0x8000u) +#define NPY_HALF_ONE (0x3c00u) +#define NPY_HALF_NEGONE (0xbc00u) +#define NPY_HALF_PINF (0x7c00u) +#define NPY_HALF_NINF (0xfc00u) +#define NPY_HALF_NAN (0x7e00u) + +#define NPY_MAX_HALF (0x7bffu) + +/* + * Bit-level conversions + */ + +npy_uint16 npy_floatbits_to_halfbits(npy_uint32 f); +npy_uint16 npy_doublebits_to_halfbits(npy_uint64 d); +npy_uint32 npy_halfbits_to_floatbits(npy_uint16 h); +npy_uint64 npy_halfbits_to_doublebits(npy_uint16 h); + +#ifdef __cplusplus +} +#endif + +#endif /* NUMPY_CORE_INCLUDE_NUMPY_HALFFLOAT_H_ */ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/ndarrayobject.h b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/ndarrayobject.h new file mode 100644 index 0000000..f06bafe --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/ndarrayobject.h @@ -0,0 +1,304 @@ +/* + * DON'T INCLUDE THIS DIRECTLY. + */ +#ifndef NUMPY_CORE_INCLUDE_NUMPY_NDARRAYOBJECT_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_NDARRAYOBJECT_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include "ndarraytypes.h" +#include "dtype_api.h" + +/* Includes the "function" C-API -- these are all stored in a + list of pointers --- one for each file + The two lists are concatenated into one in multiarray. + + They are available as import_array() +*/ + +#include "__multiarray_api.h" + +/* + * Include any definitions which are defined differently for 1.x and 2.x + * (Symbols only available on 2.x are not there, but rather guarded.) + */ +#include "npy_2_compat.h" + +/* C-API that requires previous API to be defined */ + +#define PyArray_DescrCheck(op) PyObject_TypeCheck(op, &PyArrayDescr_Type) + +#define PyArray_Check(op) PyObject_TypeCheck(op, &PyArray_Type) +#define PyArray_CheckExact(op) (((PyObject*)(op))->ob_type == &PyArray_Type) + +#define PyArray_HasArrayInterfaceType(op, type, context, out) \ + ((((out)=PyArray_FromStructInterface(op)) != Py_NotImplemented) || \ + (((out)=PyArray_FromInterface(op)) != Py_NotImplemented) || \ + (((out)=PyArray_FromArrayAttr(op, type, context)) != \ + Py_NotImplemented)) + +#define PyArray_HasArrayInterface(op, out) \ + PyArray_HasArrayInterfaceType(op, NULL, NULL, out) + +#define PyArray_IsZeroDim(op) (PyArray_Check(op) && \ + (PyArray_NDIM((PyArrayObject *)op) == 0)) + +#define PyArray_IsScalar(obj, cls) \ + (PyObject_TypeCheck(obj, &Py##cls##ArrType_Type)) + +#define PyArray_CheckScalar(m) (PyArray_IsScalar(m, Generic) || \ + PyArray_IsZeroDim(m)) +#define PyArray_IsPythonNumber(obj) \ + (PyFloat_Check(obj) || PyComplex_Check(obj) || \ + PyLong_Check(obj) || PyBool_Check(obj)) +#define PyArray_IsIntegerScalar(obj) (PyLong_Check(obj) \ + || PyArray_IsScalar((obj), Integer)) +#define PyArray_IsPythonScalar(obj) \ + (PyArray_IsPythonNumber(obj) || PyBytes_Check(obj) || \ + PyUnicode_Check(obj)) + +#define PyArray_IsAnyScalar(obj) \ + (PyArray_IsScalar(obj, Generic) || PyArray_IsPythonScalar(obj)) + +#define PyArray_CheckAnyScalar(obj) (PyArray_IsPythonScalar(obj) || \ + PyArray_CheckScalar(obj)) + + +#define PyArray_GETCONTIGUOUS(m) (PyArray_ISCONTIGUOUS(m) ? \ + Py_INCREF(m), (m) : \ + (PyArrayObject *)(PyArray_Copy(m))) + +#define PyArray_SAMESHAPE(a1,a2) ((PyArray_NDIM(a1) == PyArray_NDIM(a2)) && \ + PyArray_CompareLists(PyArray_DIMS(a1), \ + PyArray_DIMS(a2), \ + PyArray_NDIM(a1))) + +#define PyArray_SIZE(m) PyArray_MultiplyList(PyArray_DIMS(m), PyArray_NDIM(m)) +#define PyArray_NBYTES(m) (PyArray_ITEMSIZE(m) * PyArray_SIZE(m)) +#define PyArray_FROM_O(m) PyArray_FromAny(m, NULL, 0, 0, 0, NULL) + +#define PyArray_FROM_OF(m,flags) PyArray_CheckFromAny(m, NULL, 0, 0, flags, \ + NULL) + +#define PyArray_FROM_OT(m,type) PyArray_FromAny(m, \ + PyArray_DescrFromType(type), 0, 0, 0, NULL) + +#define PyArray_FROM_OTF(m, type, flags) \ + PyArray_FromAny(m, PyArray_DescrFromType(type), 0, 0, \ + (((flags) & NPY_ARRAY_ENSURECOPY) ? \ + ((flags) | NPY_ARRAY_DEFAULT) : (flags)), NULL) + +#define PyArray_FROMANY(m, type, min, max, flags) \ + PyArray_FromAny(m, PyArray_DescrFromType(type), min, max, \ + (((flags) & NPY_ARRAY_ENSURECOPY) ? \ + (flags) | NPY_ARRAY_DEFAULT : (flags)), NULL) + +#define PyArray_ZEROS(m, dims, type, is_f_order) \ + PyArray_Zeros(m, dims, PyArray_DescrFromType(type), is_f_order) + +#define PyArray_EMPTY(m, dims, type, is_f_order) \ + PyArray_Empty(m, dims, PyArray_DescrFromType(type), is_f_order) + +#define PyArray_FILLWBYTE(obj, val) memset(PyArray_DATA(obj), val, \ + PyArray_NBYTES(obj)) + +#define PyArray_ContiguousFromAny(op, type, min_depth, max_depth) \ + PyArray_FromAny(op, PyArray_DescrFromType(type), min_depth, \ + max_depth, NPY_ARRAY_DEFAULT, NULL) + +#define PyArray_EquivArrTypes(a1, a2) \ + PyArray_EquivTypes(PyArray_DESCR(a1), PyArray_DESCR(a2)) + +#define PyArray_EquivByteorders(b1, b2) \ + (((b1) == (b2)) || (PyArray_ISNBO(b1) == PyArray_ISNBO(b2))) + +#define PyArray_SimpleNew(nd, dims, typenum) \ + PyArray_New(&PyArray_Type, nd, dims, typenum, NULL, NULL, 0, 0, NULL) + +#define PyArray_SimpleNewFromData(nd, dims, typenum, data) \ + PyArray_New(&PyArray_Type, nd, dims, typenum, NULL, \ + data, 0, NPY_ARRAY_CARRAY, NULL) + +#define PyArray_SimpleNewFromDescr(nd, dims, descr) \ + PyArray_NewFromDescr(&PyArray_Type, descr, nd, dims, \ + NULL, NULL, 0, NULL) + +#define PyArray_ToScalar(data, arr) \ + PyArray_Scalar(data, PyArray_DESCR(arr), (PyObject *)arr) + + +/* These might be faster without the dereferencing of obj + going on inside -- of course an optimizing compiler should + inline the constants inside a for loop making it a moot point +*/ + +#define PyArray_GETPTR1(obj, i) ((void *)(PyArray_BYTES(obj) + \ + (i)*PyArray_STRIDES(obj)[0])) + +#define PyArray_GETPTR2(obj, i, j) ((void *)(PyArray_BYTES(obj) + \ + (i)*PyArray_STRIDES(obj)[0] + \ + (j)*PyArray_STRIDES(obj)[1])) + +#define PyArray_GETPTR3(obj, i, j, k) ((void *)(PyArray_BYTES(obj) + \ + (i)*PyArray_STRIDES(obj)[0] + \ + (j)*PyArray_STRIDES(obj)[1] + \ + (k)*PyArray_STRIDES(obj)[2])) + +#define PyArray_GETPTR4(obj, i, j, k, l) ((void *)(PyArray_BYTES(obj) + \ + (i)*PyArray_STRIDES(obj)[0] + \ + (j)*PyArray_STRIDES(obj)[1] + \ + (k)*PyArray_STRIDES(obj)[2] + \ + (l)*PyArray_STRIDES(obj)[3])) + +static inline void +PyArray_DiscardWritebackIfCopy(PyArrayObject *arr) +{ + PyArrayObject_fields *fa = (PyArrayObject_fields *)arr; + if (fa && fa->base) { + if (fa->flags & NPY_ARRAY_WRITEBACKIFCOPY) { + PyArray_ENABLEFLAGS((PyArrayObject*)fa->base, NPY_ARRAY_WRITEABLE); + Py_DECREF(fa->base); + fa->base = NULL; + PyArray_CLEARFLAGS(arr, NPY_ARRAY_WRITEBACKIFCOPY); + } + } +} + +#define PyArray_DESCR_REPLACE(descr) do { \ + PyArray_Descr *_new_; \ + _new_ = PyArray_DescrNew(descr); \ + Py_XDECREF(descr); \ + descr = _new_; \ + } while(0) + +/* Copy should always return contiguous array */ +#define PyArray_Copy(obj) PyArray_NewCopy(obj, NPY_CORDER) + +#define PyArray_FromObject(op, type, min_depth, max_depth) \ + PyArray_FromAny(op, PyArray_DescrFromType(type), min_depth, \ + max_depth, NPY_ARRAY_BEHAVED | \ + NPY_ARRAY_ENSUREARRAY, NULL) + +#define PyArray_ContiguousFromObject(op, type, min_depth, max_depth) \ + PyArray_FromAny(op, PyArray_DescrFromType(type), min_depth, \ + max_depth, NPY_ARRAY_DEFAULT | \ + NPY_ARRAY_ENSUREARRAY, NULL) + +#define PyArray_CopyFromObject(op, type, min_depth, max_depth) \ + PyArray_FromAny(op, PyArray_DescrFromType(type), min_depth, \ + max_depth, NPY_ARRAY_ENSURECOPY | \ + NPY_ARRAY_DEFAULT | \ + NPY_ARRAY_ENSUREARRAY, NULL) + +#define PyArray_Cast(mp, type_num) \ + PyArray_CastToType(mp, PyArray_DescrFromType(type_num), 0) + +#define PyArray_Take(ap, items, axis) \ + PyArray_TakeFrom(ap, items, axis, NULL, NPY_RAISE) + +#define PyArray_Put(ap, items, values) \ + PyArray_PutTo(ap, items, values, NPY_RAISE) + + +/* + Check to see if this key in the dictionary is the "title" + entry of the tuple (i.e. a duplicate dictionary entry in the fields + dict). +*/ + +static inline int +NPY_TITLE_KEY_check(PyObject *key, PyObject *value) +{ + PyObject *title; + if (PyTuple_Size(value) != 3) { + return 0; + } + title = PyTuple_GetItem(value, 2); + if (key == title) { + return 1; + } +#ifdef PYPY_VERSION + /* + * On PyPy, dictionary keys do not always preserve object identity. + * Fall back to comparison by value. + */ + if (PyUnicode_Check(title) && PyUnicode_Check(key)) { + return PyUnicode_Compare(title, key) == 0 ? 1 : 0; + } +#endif + return 0; +} + +/* Macro, for backward compat with "if NPY_TITLE_KEY(key, value) { ..." */ +#define NPY_TITLE_KEY(key, value) (NPY_TITLE_KEY_check((key), (value))) + +#define DEPRECATE(msg) PyErr_WarnEx(PyExc_DeprecationWarning,msg,1) +#define DEPRECATE_FUTUREWARNING(msg) PyErr_WarnEx(PyExc_FutureWarning,msg,1) + + +/* + * These macros and functions unfortunately require runtime version checks + * that are only defined in `npy_2_compat.h`. For that reasons they cannot be + * part of `ndarraytypes.h` which tries to be self contained. + */ + +static inline npy_intp +PyArray_ITEMSIZE(const PyArrayObject *arr) +{ + return PyDataType_ELSIZE(((PyArrayObject_fields *)arr)->descr); +} + +#define PyDataType_HASFIELDS(obj) (PyDataType_ISLEGACY((PyArray_Descr*)(obj)) && PyDataType_NAMES((PyArray_Descr*)(obj)) != NULL) +#define PyDataType_HASSUBARRAY(dtype) (PyDataType_ISLEGACY(dtype) && PyDataType_SUBARRAY(dtype) != NULL) +#define PyDataType_ISUNSIZED(dtype) ((dtype)->elsize == 0 && \ + !PyDataType_HASFIELDS(dtype)) + +#define PyDataType_FLAGCHK(dtype, flag) \ + ((PyDataType_FLAGS(dtype) & (flag)) == (flag)) + +#define PyDataType_REFCHK(dtype) \ + PyDataType_FLAGCHK(dtype, NPY_ITEM_REFCOUNT) + +#define NPY_BEGIN_THREADS_DESCR(dtype) \ + do {if (!(PyDataType_FLAGCHK((dtype), NPY_NEEDS_PYAPI))) \ + NPY_BEGIN_THREADS;} while (0); + +#define NPY_END_THREADS_DESCR(dtype) \ + do {if (!(PyDataType_FLAGCHK((dtype), NPY_NEEDS_PYAPI))) \ + NPY_END_THREADS; } while (0); + +#if !(defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD) +/* The internal copy of this is now defined in `dtypemeta.h` */ +/* + * `PyArray_Scalar` is the same as this function but converts will convert + * most NumPy types to Python scalars. + */ +static inline PyObject * +PyArray_GETITEM(const PyArrayObject *arr, const char *itemptr) +{ + return PyDataType_GetArrFuncs(((PyArrayObject_fields *)arr)->descr)->getitem( + (void *)itemptr, (PyArrayObject *)arr); +} + +/* + * SETITEM should only be used if it is known that the value is a scalar + * and of a type understood by the arrays dtype. + * Use `PyArray_Pack` if the value may be of a different dtype. + */ +static inline int +PyArray_SETITEM(PyArrayObject *arr, char *itemptr, PyObject *v) +{ + return PyDataType_GetArrFuncs(((PyArrayObject_fields *)arr)->descr)->setitem(v, itemptr, arr); +} +#endif /* not internal */ + + +#ifdef __cplusplus +} +#endif + + +#endif /* NUMPY_CORE_INCLUDE_NUMPY_NDARRAYOBJECT_H_ */ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/ndarraytypes.h b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/ndarraytypes.h new file mode 100644 index 0000000..baa4240 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/ndarraytypes.h @@ -0,0 +1,1950 @@ +#ifndef NUMPY_CORE_INCLUDE_NUMPY_NDARRAYTYPES_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_NDARRAYTYPES_H_ + +#include "npy_common.h" +#include "npy_endian.h" +#include "npy_cpu.h" +#include "utils.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define NPY_NO_EXPORT NPY_VISIBILITY_HIDDEN + +/* Always allow threading unless it was explicitly disabled at build time */ +#if !NPY_NO_SMP + #define NPY_ALLOW_THREADS 1 +#else + #define NPY_ALLOW_THREADS 0 +#endif + +#ifndef __has_extension +#define __has_extension(x) 0 +#endif + +/* + * There are several places in the code where an array of dimensions + * is allocated statically. This is the size of that static + * allocation. + * + * The array creation itself could have arbitrary dimensions but all + * the places where static allocation is used would need to be changed + * to dynamic (including inside of several structures) + * + * As of NumPy 2.0, we strongly discourage the downstream use of NPY_MAXDIMS, + * but since auditing everything seems a big ask, define it as 64. + * A future version could: + * - Increase or remove the limit and require recompilation (like 2.0 did) + * - Deprecate or remove the macro but keep the limit (at basically any time) + */ +#define NPY_MAXDIMS 64 +/* We cannot change this as it would break ABI: */ +#define NPY_MAXDIMS_LEGACY_ITERS 32 +/* NPY_MAXARGS is version dependent and defined in npy_2_compat.h */ + +/* Used for Converter Functions "O&" code in ParseTuple */ +#define NPY_FAIL 0 +#define NPY_SUCCEED 1 + + +enum NPY_TYPES { NPY_BOOL=0, + NPY_BYTE, NPY_UBYTE, + NPY_SHORT, NPY_USHORT, + NPY_INT, NPY_UINT, + NPY_LONG, NPY_ULONG, + NPY_LONGLONG, NPY_ULONGLONG, + NPY_FLOAT, NPY_DOUBLE, NPY_LONGDOUBLE, + NPY_CFLOAT, NPY_CDOUBLE, NPY_CLONGDOUBLE, + NPY_OBJECT=17, + NPY_STRING, NPY_UNICODE, + NPY_VOID, + /* + * New 1.6 types appended, may be integrated + * into the above in 2.0. + */ + NPY_DATETIME, NPY_TIMEDELTA, NPY_HALF, + + NPY_CHAR, /* Deprecated, will raise if used */ + + /* The number of *legacy* dtypes */ + NPY_NTYPES_LEGACY=24, + + /* assign a high value to avoid changing this in the + future when new dtypes are added */ + NPY_NOTYPE=25, + + NPY_USERDEF=256, /* leave room for characters */ + + /* The number of types not including the new 1.6 types */ + NPY_NTYPES_ABI_COMPATIBLE=21, + + /* + * New DTypes which do not share the legacy layout + * (added after NumPy 2.0). VSTRING is the first of these + * we may open up a block for user-defined dtypes in the + * future. + */ + NPY_VSTRING=2056, +}; + + +/* basetype array priority */ +#define NPY_PRIORITY 0.0 + +/* default subtype priority */ +#define NPY_SUBTYPE_PRIORITY 1.0 + +/* default scalar priority */ +#define NPY_SCALAR_PRIORITY -1000000.0 + +/* How many floating point types are there (excluding half) */ +#define NPY_NUM_FLOATTYPE 3 + +/* + * These characters correspond to the array type and the struct + * module + */ + +enum NPY_TYPECHAR { + NPY_BOOLLTR = '?', + NPY_BYTELTR = 'b', + NPY_UBYTELTR = 'B', + NPY_SHORTLTR = 'h', + NPY_USHORTLTR = 'H', + NPY_INTLTR = 'i', + NPY_UINTLTR = 'I', + NPY_LONGLTR = 'l', + NPY_ULONGLTR = 'L', + NPY_LONGLONGLTR = 'q', + NPY_ULONGLONGLTR = 'Q', + NPY_HALFLTR = 'e', + NPY_FLOATLTR = 'f', + NPY_DOUBLELTR = 'd', + NPY_LONGDOUBLELTR = 'g', + NPY_CFLOATLTR = 'F', + NPY_CDOUBLELTR = 'D', + NPY_CLONGDOUBLELTR = 'G', + NPY_OBJECTLTR = 'O', + NPY_STRINGLTR = 'S', + NPY_DEPRECATED_STRINGLTR2 = 'a', + NPY_UNICODELTR = 'U', + NPY_VOIDLTR = 'V', + NPY_DATETIMELTR = 'M', + NPY_TIMEDELTALTR = 'm', + NPY_CHARLTR = 'c', + + /* + * New non-legacy DTypes + */ + NPY_VSTRINGLTR = 'T', + + /* + * Note, we removed `NPY_INTPLTR` due to changing its definition + * to 'n', rather than 'p'. On any typical platform this is the + * same integer. 'n' should be used for the `np.intp` with the same + * size as `size_t` while 'p' remains pointer sized. + * + * 'p', 'P', 'n', and 'N' are valid and defined explicitly + * in `arraytypes.c.src`. + */ + + /* + * These are for dtype 'kinds', not dtype 'typecodes' + * as the above are for. + */ + NPY_GENBOOLLTR ='b', + NPY_SIGNEDLTR = 'i', + NPY_UNSIGNEDLTR = 'u', + NPY_FLOATINGLTR = 'f', + NPY_COMPLEXLTR = 'c', + +}; + +/* + * Changing this may break Numpy API compatibility + * due to changing offsets in PyArray_ArrFuncs, so be + * careful. Here we have reused the mergesort slot for + * any kind of stable sort, the actual implementation will + * depend on the data type. + */ +typedef enum { + _NPY_SORT_UNDEFINED=-1, + NPY_QUICKSORT=0, + NPY_HEAPSORT=1, + NPY_MERGESORT=2, + NPY_STABLESORT=2, +} NPY_SORTKIND; +#define NPY_NSORTS (NPY_STABLESORT + 1) + + +typedef enum { + NPY_INTROSELECT=0 +} NPY_SELECTKIND; +#define NPY_NSELECTS (NPY_INTROSELECT + 1) + + +typedef enum { + NPY_SEARCHLEFT=0, + NPY_SEARCHRIGHT=1 +} NPY_SEARCHSIDE; +#define NPY_NSEARCHSIDES (NPY_SEARCHRIGHT + 1) + + +typedef enum { + NPY_NOSCALAR=-1, + NPY_BOOL_SCALAR, + NPY_INTPOS_SCALAR, + NPY_INTNEG_SCALAR, + NPY_FLOAT_SCALAR, + NPY_COMPLEX_SCALAR, + NPY_OBJECT_SCALAR +} NPY_SCALARKIND; +#define NPY_NSCALARKINDS (NPY_OBJECT_SCALAR + 1) + +/* For specifying array memory layout or iteration order */ +typedef enum { + /* Fortran order if inputs are all Fortran, C otherwise */ + NPY_ANYORDER=-1, + /* C order */ + NPY_CORDER=0, + /* Fortran order */ + NPY_FORTRANORDER=1, + /* An order as close to the inputs as possible */ + NPY_KEEPORDER=2 +} NPY_ORDER; + +/* For specifying allowed casting in operations which support it */ +typedef enum { + _NPY_ERROR_OCCURRED_IN_CAST = -1, + /* Only allow identical types */ + NPY_NO_CASTING=0, + /* Allow identical and byte swapped types */ + NPY_EQUIV_CASTING=1, + /* Only allow safe casts */ + NPY_SAFE_CASTING=2, + /* Allow safe casts or casts within the same kind */ + NPY_SAME_KIND_CASTING=3, + /* Allow any casts */ + NPY_UNSAFE_CASTING=4, +} NPY_CASTING; + +typedef enum { + NPY_CLIP=0, + NPY_WRAP=1, + NPY_RAISE=2 +} NPY_CLIPMODE; + +typedef enum { + NPY_VALID=0, + NPY_SAME=1, + NPY_FULL=2 +} NPY_CORRELATEMODE; + +/* The special not-a-time (NaT) value */ +#define NPY_DATETIME_NAT NPY_MIN_INT64 + +/* + * Upper bound on the length of a DATETIME ISO 8601 string + * YEAR: 21 (64-bit year) + * MONTH: 3 + * DAY: 3 + * HOURS: 3 + * MINUTES: 3 + * SECONDS: 3 + * ATTOSECONDS: 1 + 3*6 + * TIMEZONE: 5 + * NULL TERMINATOR: 1 + */ +#define NPY_DATETIME_MAX_ISO8601_STRLEN (21 + 3*5 + 1 + 3*6 + 6 + 1) + +/* The FR in the unit names stands for frequency */ +typedef enum { + /* Force signed enum type, must be -1 for code compatibility */ + NPY_FR_ERROR = -1, /* error or undetermined */ + + /* Start of valid units */ + NPY_FR_Y = 0, /* Years */ + NPY_FR_M = 1, /* Months */ + NPY_FR_W = 2, /* Weeks */ + /* Gap where 1.6 NPY_FR_B (value 3) was */ + NPY_FR_D = 4, /* Days */ + NPY_FR_h = 5, /* hours */ + NPY_FR_m = 6, /* minutes */ + NPY_FR_s = 7, /* seconds */ + NPY_FR_ms = 8, /* milliseconds */ + NPY_FR_us = 9, /* microseconds */ + NPY_FR_ns = 10, /* nanoseconds */ + NPY_FR_ps = 11, /* picoseconds */ + NPY_FR_fs = 12, /* femtoseconds */ + NPY_FR_as = 13, /* attoseconds */ + NPY_FR_GENERIC = 14 /* unbound units, can convert to anything */ +} NPY_DATETIMEUNIT; + +/* + * NOTE: With the NPY_FR_B gap for 1.6 ABI compatibility, NPY_DATETIME_NUMUNITS + * is technically one more than the actual number of units. + */ +#define NPY_DATETIME_NUMUNITS (NPY_FR_GENERIC + 1) +#define NPY_DATETIME_DEFAULTUNIT NPY_FR_GENERIC + +/* + * Business day conventions for mapping invalid business + * days to valid business days. + */ +typedef enum { + /* Go forward in time to the following business day. */ + NPY_BUSDAY_FORWARD, + NPY_BUSDAY_FOLLOWING = NPY_BUSDAY_FORWARD, + /* Go backward in time to the preceding business day. */ + NPY_BUSDAY_BACKWARD, + NPY_BUSDAY_PRECEDING = NPY_BUSDAY_BACKWARD, + /* + * Go forward in time to the following business day, unless it + * crosses a month boundary, in which case go backward + */ + NPY_BUSDAY_MODIFIEDFOLLOWING, + /* + * Go backward in time to the preceding business day, unless it + * crosses a month boundary, in which case go forward. + */ + NPY_BUSDAY_MODIFIEDPRECEDING, + /* Produce a NaT for non-business days. */ + NPY_BUSDAY_NAT, + /* Raise an exception for non-business days. */ + NPY_BUSDAY_RAISE +} NPY_BUSDAY_ROLL; + + +/************************************************************ + * NumPy Auxiliary Data for inner loops, sort functions, etc. + ************************************************************/ + +/* + * When creating an auxiliary data struct, this should always appear + * as the first member, like this: + * + * typedef struct { + * NpyAuxData base; + * double constant; + * } constant_multiplier_aux_data; + */ +typedef struct NpyAuxData_tag NpyAuxData; + +/* Function pointers for freeing or cloning auxiliary data */ +typedef void (NpyAuxData_FreeFunc) (NpyAuxData *); +typedef NpyAuxData *(NpyAuxData_CloneFunc) (NpyAuxData *); + +struct NpyAuxData_tag { + NpyAuxData_FreeFunc *free; + NpyAuxData_CloneFunc *clone; + /* To allow for a bit of expansion without breaking the ABI */ + void *reserved[2]; +}; + +/* Macros to use for freeing and cloning auxiliary data */ +#define NPY_AUXDATA_FREE(auxdata) \ + do { \ + if ((auxdata) != NULL) { \ + (auxdata)->free(auxdata); \ + } \ + } while(0) +#define NPY_AUXDATA_CLONE(auxdata) \ + ((auxdata)->clone(auxdata)) + +#define NPY_ERR(str) fprintf(stderr, #str); fflush(stderr); +#define NPY_ERR2(str) fprintf(stderr, str); fflush(stderr); + +/* +* Macros to define how array, and dimension/strides data is +* allocated. These should be made private +*/ + +#define NPY_USE_PYMEM 1 + + +#if NPY_USE_PYMEM == 1 +/* use the Raw versions which are safe to call with the GIL released */ +#define PyArray_malloc PyMem_RawMalloc +#define PyArray_free PyMem_RawFree +#define PyArray_realloc PyMem_RawRealloc +#else +#define PyArray_malloc malloc +#define PyArray_free free +#define PyArray_realloc realloc +#endif + +/* Dimensions and strides */ +#define PyDimMem_NEW(size) \ + ((npy_intp *)PyArray_malloc(size*sizeof(npy_intp))) + +#define PyDimMem_FREE(ptr) PyArray_free(ptr) + +#define PyDimMem_RENEW(ptr,size) \ + ((npy_intp *)PyArray_realloc(ptr,size*sizeof(npy_intp))) + +/* forward declaration */ +struct _PyArray_Descr; + +/* These must deal with unaligned and swapped data if necessary */ +typedef PyObject * (PyArray_GetItemFunc) (void *, void *); +typedef int (PyArray_SetItemFunc)(PyObject *, void *, void *); + +typedef void (PyArray_CopySwapNFunc)(void *, npy_intp, void *, npy_intp, + npy_intp, int, void *); + +typedef void (PyArray_CopySwapFunc)(void *, void *, int, void *); +typedef npy_bool (PyArray_NonzeroFunc)(void *, void *); + + +/* + * These assume aligned and notswapped data -- a buffer will be used + * before or contiguous data will be obtained + */ + +typedef int (PyArray_CompareFunc)(const void *, const void *, void *); +typedef int (PyArray_ArgFunc)(void*, npy_intp, npy_intp*, void *); + +typedef void (PyArray_DotFunc)(void *, npy_intp, void *, npy_intp, void *, + npy_intp, void *); + +typedef void (PyArray_VectorUnaryFunc)(void *, void *, npy_intp, void *, + void *); + +/* + * XXX the ignore argument should be removed next time the API version + * is bumped. It used to be the separator. + */ +typedef int (PyArray_ScanFunc)(FILE *fp, void *dptr, + char *ignore, struct _PyArray_Descr *); +typedef int (PyArray_FromStrFunc)(char *s, void *dptr, char **endptr, + struct _PyArray_Descr *); + +typedef int (PyArray_FillFunc)(void *, npy_intp, void *); + +typedef int (PyArray_SortFunc)(void *, npy_intp, void *); +typedef int (PyArray_ArgSortFunc)(void *, npy_intp *, npy_intp, void *); + +typedef int (PyArray_FillWithScalarFunc)(void *, npy_intp, void *, void *); + +typedef int (PyArray_ScalarKindFunc)(void *); + +typedef struct { + npy_intp *ptr; + int len; +} PyArray_Dims; + +typedef struct { + /* + * Functions to cast to most other standard types + * Can have some NULL entries. The types + * DATETIME, TIMEDELTA, and HALF go into the castdict + * even though they are built-in. + */ + PyArray_VectorUnaryFunc *cast[NPY_NTYPES_ABI_COMPATIBLE]; + + /* The next four functions *cannot* be NULL */ + + /* + * Functions to get and set items with standard Python types + * -- not array scalars + */ + PyArray_GetItemFunc *getitem; + PyArray_SetItemFunc *setitem; + + /* + * Copy and/or swap data. Memory areas may not overlap + * Use memmove first if they might + */ + PyArray_CopySwapNFunc *copyswapn; + PyArray_CopySwapFunc *copyswap; + + /* + * Function to compare items + * Can be NULL + */ + PyArray_CompareFunc *compare; + + /* + * Function to select largest + * Can be NULL + */ + PyArray_ArgFunc *argmax; + + /* + * Function to compute dot product + * Can be NULL + */ + PyArray_DotFunc *dotfunc; + + /* + * Function to scan an ASCII file and + * place a single value plus possible separator + * Can be NULL + */ + PyArray_ScanFunc *scanfunc; + + /* + * Function to read a single value from a string + * and adjust the pointer; Can be NULL + */ + PyArray_FromStrFunc *fromstr; + + /* + * Function to determine if data is zero or not + * If NULL a default version is + * used at Registration time. + */ + PyArray_NonzeroFunc *nonzero; + + /* + * Used for arange. Should return 0 on success + * and -1 on failure. + * Can be NULL. + */ + PyArray_FillFunc *fill; + + /* + * Function to fill arrays with scalar values + * Can be NULL + */ + PyArray_FillWithScalarFunc *fillwithscalar; + + /* + * Sorting functions + * Can be NULL + */ + PyArray_SortFunc *sort[NPY_NSORTS]; + PyArray_ArgSortFunc *argsort[NPY_NSORTS]; + + /* + * Dictionary of additional casting functions + * PyArray_VectorUnaryFuncs + * which can be populated to support casting + * to other registered types. Can be NULL + */ + PyObject *castdict; + + /* + * Functions useful for generalizing + * the casting rules. + * Can be NULL; + */ + PyArray_ScalarKindFunc *scalarkind; + int **cancastscalarkindto; + int *cancastto; + + void *_unused1; + void *_unused2; + void *_unused3; + + /* + * Function to select smallest + * Can be NULL + */ + PyArray_ArgFunc *argmin; + +} PyArray_ArrFuncs; + + +/* The item must be reference counted when it is inserted or extracted. */ +#define NPY_ITEM_REFCOUNT 0x01 +/* Same as needing REFCOUNT */ +#define NPY_ITEM_HASOBJECT 0x01 +/* Convert to list for pickling */ +#define NPY_LIST_PICKLE 0x02 +/* The item is a POINTER */ +#define NPY_ITEM_IS_POINTER 0x04 +/* memory needs to be initialized for this data-type */ +#define NPY_NEEDS_INIT 0x08 +/* operations need Python C-API so don't give-up thread. */ +#define NPY_NEEDS_PYAPI 0x10 +/* Use f.getitem when extracting elements of this data-type */ +#define NPY_USE_GETITEM 0x20 +/* Use f.setitem when setting creating 0-d array from this data-type.*/ +#define NPY_USE_SETITEM 0x40 +/* A sticky flag specifically for structured arrays */ +#define NPY_ALIGNED_STRUCT 0x80 + +/* + *These are inherited for global data-type if any data-types in the + * field have them + */ +#define NPY_FROM_FIELDS (NPY_NEEDS_INIT | NPY_LIST_PICKLE | \ + NPY_ITEM_REFCOUNT | NPY_NEEDS_PYAPI) + +#define NPY_OBJECT_DTYPE_FLAGS (NPY_LIST_PICKLE | NPY_USE_GETITEM | \ + NPY_ITEM_IS_POINTER | NPY_ITEM_REFCOUNT | \ + NPY_NEEDS_INIT | NPY_NEEDS_PYAPI) + +#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION +/* + * Public version of the Descriptor struct as of 2.x + */ +typedef struct _PyArray_Descr { + PyObject_HEAD + /* + * the type object representing an + * instance of this type -- should not + * be two type_numbers with the same type + * object. + */ + PyTypeObject *typeobj; + /* kind for this type */ + char kind; + /* unique-character representing this type */ + char type; + /* + * '>' (big), '<' (little), '|' + * (not-applicable), or '=' (native). + */ + char byteorder; + /* Former flags flags space (unused) to ensure type_num is stable. */ + char _former_flags; + /* number representing this type */ + int type_num; + /* Space for dtype instance specific flags. */ + npy_uint64 flags; + /* element size (itemsize) for this type */ + npy_intp elsize; + /* alignment needed for this type */ + npy_intp alignment; + /* metadata dict or NULL */ + PyObject *metadata; + /* Cached hash value (-1 if not yet computed). */ + npy_hash_t hash; + /* Unused slot (must be initialized to NULL) for future use */ + void *reserved_null[2]; +} PyArray_Descr; + +#else /* 1.x and 2.x compatible version (only shared fields): */ + +typedef struct _PyArray_Descr { + PyObject_HEAD + PyTypeObject *typeobj; + char kind; + char type; + char byteorder; + char _former_flags; + int type_num; +} PyArray_Descr; + +/* To access modified fields, define the full 2.0 struct: */ +typedef struct { + PyObject_HEAD + PyTypeObject *typeobj; + char kind; + char type; + char byteorder; + char _former_flags; + int type_num; + npy_uint64 flags; + npy_intp elsize; + npy_intp alignment; + PyObject *metadata; + npy_hash_t hash; + void *reserved_null[2]; +} _PyArray_DescrNumPy2; + +#endif /* 1.x and 2.x compatible version */ + +/* + * Semi-private struct with additional field of legacy descriptors (must + * check NPY_DT_is_legacy before casting/accessing). The struct is also not + * valid when running on 1.x (i.e. in public API use). + */ +typedef struct { + PyObject_HEAD + PyTypeObject *typeobj; + char kind; + char type; + char byteorder; + char _former_flags; + int type_num; + npy_uint64 flags; + npy_intp elsize; + npy_intp alignment; + PyObject *metadata; + npy_hash_t hash; + void *reserved_null[2]; + struct _arr_descr *subarray; + PyObject *fields; + PyObject *names; + NpyAuxData *c_metadata; +} _PyArray_LegacyDescr; + + +/* + * Umodified PyArray_Descr struct identical to NumPy 1.x. This struct is + * used as a prototype for registering a new legacy DType. + * It is also used to access the fields in user code running on 1.x. + */ +typedef struct { + PyObject_HEAD + PyTypeObject *typeobj; + char kind; + char type; + char byteorder; + char flags; + int type_num; + int elsize; + int alignment; + struct _arr_descr *subarray; + PyObject *fields; + PyObject *names; + PyArray_ArrFuncs *f; + PyObject *metadata; + NpyAuxData *c_metadata; + npy_hash_t hash; +} PyArray_DescrProto; + + +typedef struct _arr_descr { + PyArray_Descr *base; + PyObject *shape; /* a tuple */ +} PyArray_ArrayDescr; + +/* + * Memory handler structure for array data. + */ +/* The declaration of free differs from PyMemAllocatorEx */ +typedef struct { + void *ctx; + void* (*malloc) (void *ctx, size_t size); + void* (*calloc) (void *ctx, size_t nelem, size_t elsize); + void* (*realloc) (void *ctx, void *ptr, size_t new_size); + void (*free) (void *ctx, void *ptr, size_t size); + /* + * This is the end of the version=1 struct. Only add new fields after + * this line + */ +} PyDataMemAllocator; + +typedef struct { + char name[127]; /* multiple of 64 to keep the struct aligned */ + uint8_t version; /* currently 1 */ + PyDataMemAllocator allocator; +} PyDataMem_Handler; + + +/* + * The main array object structure. + * + * It has been recommended to use the inline functions defined below + * (PyArray_DATA and friends) to access fields here for a number of + * releases. Direct access to the members themselves is deprecated. + * To ensure that your code does not use deprecated access, + * #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION + * (or NPY_1_8_API_VERSION or higher as required). + */ +/* This struct will be moved to a private header in a future release */ +typedef struct tagPyArrayObject_fields { + PyObject_HEAD + /* Pointer to the raw data buffer */ + char *data; + /* The number of dimensions, also called 'ndim' */ + int nd; + /* The size in each dimension, also called 'shape' */ + npy_intp *dimensions; + /* + * Number of bytes to jump to get to the + * next element in each dimension + */ + npy_intp *strides; + /* + * This object is decref'd upon + * deletion of array. Except in the + * case of WRITEBACKIFCOPY which has + * special handling. + * + * For views it points to the original + * array, collapsed so no chains of + * views occur. + * + * For creation from buffer object it + * points to an object that should be + * decref'd on deletion + * + * For WRITEBACKIFCOPY flag this is an + * array to-be-updated upon calling + * PyArray_ResolveWritebackIfCopy + */ + PyObject *base; + /* Pointer to type structure */ + PyArray_Descr *descr; + /* Flags describing array -- see below */ + int flags; + /* For weak references */ + PyObject *weakreflist; +#if NPY_FEATURE_VERSION >= NPY_1_20_API_VERSION + void *_buffer_info; /* private buffer info, tagged to allow warning */ +#endif + /* + * For malloc/calloc/realloc/free per object + */ +#if NPY_FEATURE_VERSION >= NPY_1_22_API_VERSION + PyObject *mem_handler; +#endif +} PyArrayObject_fields; + +/* + * To hide the implementation details, we only expose + * the Python struct HEAD. + */ +#if !defined(NPY_NO_DEPRECATED_API) || \ + (NPY_NO_DEPRECATED_API < NPY_1_7_API_VERSION) +/* + * Can't put this in npy_deprecated_api.h like the others. + * PyArrayObject field access is deprecated as of NumPy 1.7. + */ +typedef PyArrayObject_fields PyArrayObject; +#else +typedef struct tagPyArrayObject { + PyObject_HEAD +} PyArrayObject; +#endif + +/* + * Removed 2020-Nov-25, NumPy 1.20 + * #define NPY_SIZEOF_PYARRAYOBJECT (sizeof(PyArrayObject_fields)) + * + * The above macro was removed as it gave a false sense of a stable ABI + * with respect to the structures size. If you require a runtime constant, + * you can use `PyArray_Type.tp_basicsize` instead. Otherwise, please + * see the PyArrayObject documentation or ask the NumPy developers for + * information on how to correctly replace the macro in a way that is + * compatible with multiple NumPy versions. + */ + +/* Mirrors buffer object to ptr */ + +typedef struct { + PyObject_HEAD + PyObject *base; + void *ptr; + npy_intp len; + int flags; +} PyArray_Chunk; + +typedef struct { + NPY_DATETIMEUNIT base; + int num; +} PyArray_DatetimeMetaData; + +typedef struct { + NpyAuxData base; + PyArray_DatetimeMetaData meta; +} PyArray_DatetimeDTypeMetaData; + +/* + * This structure contains an exploded view of a date-time value. + * NaT is represented by year == NPY_DATETIME_NAT. + */ +typedef struct { + npy_int64 year; + npy_int32 month, day, hour, min, sec, us, ps, as; +} npy_datetimestruct; + +/* This structure contains an exploded view of a timedelta value */ +typedef struct { + npy_int64 day; + npy_int32 sec, us, ps, as; +} npy_timedeltastruct; + +typedef int (PyArray_FinalizeFunc)(PyArrayObject *, PyObject *); + +/* + * Means c-style contiguous (last index varies the fastest). The data + * elements right after each other. + * + * This flag may be requested in constructor functions. + * This flag may be tested for in PyArray_FLAGS(arr). + */ +#define NPY_ARRAY_C_CONTIGUOUS 0x0001 + +/* + * Set if array is a contiguous Fortran array: the first index varies + * the fastest in memory (strides array is reverse of C-contiguous + * array) + * + * This flag may be requested in constructor functions. + * This flag may be tested for in PyArray_FLAGS(arr). + */ +#define NPY_ARRAY_F_CONTIGUOUS 0x0002 + +/* + * Note: all 0-d arrays are C_CONTIGUOUS and F_CONTIGUOUS. If a + * 1-d array is C_CONTIGUOUS it is also F_CONTIGUOUS. Arrays with + * more then one dimension can be C_CONTIGUOUS and F_CONTIGUOUS + * at the same time if they have either zero or one element. + * A higher dimensional array always has the same contiguity flags as + * `array.squeeze()`; dimensions with `array.shape[dimension] == 1` are + * effectively ignored when checking for contiguity. + */ + +/* + * If set, the array owns the data: it will be free'd when the array + * is deleted. + * + * This flag may be tested for in PyArray_FLAGS(arr). + */ +#define NPY_ARRAY_OWNDATA 0x0004 + +/* + * An array never has the next four set; they're only used as parameter + * flags to the various FromAny functions + * + * This flag may be requested in constructor functions. + */ + +/* Cause a cast to occur regardless of whether or not it is safe. */ +#define NPY_ARRAY_FORCECAST 0x0010 + +/* + * Always copy the array. Returned arrays are always CONTIGUOUS, + * ALIGNED, and WRITEABLE. See also: NPY_ARRAY_ENSURENOCOPY = 0x4000. + * + * This flag may be requested in constructor functions. + */ +#define NPY_ARRAY_ENSURECOPY 0x0020 + +/* + * Make sure the returned array is a base-class ndarray + * + * This flag may be requested in constructor functions. + */ +#define NPY_ARRAY_ENSUREARRAY 0x0040 + +/* + * Make sure that the strides are in units of the element size Needed + * for some operations with record-arrays. + * + * This flag may be requested in constructor functions. + */ +#define NPY_ARRAY_ELEMENTSTRIDES 0x0080 + +/* + * Array data is aligned on the appropriate memory address for the type + * stored according to how the compiler would align things (e.g., an + * array of integers (4 bytes each) starts on a memory address that's + * a multiple of 4) + * + * This flag may be requested in constructor functions. + * This flag may be tested for in PyArray_FLAGS(arr). + */ +#define NPY_ARRAY_ALIGNED 0x0100 + +/* + * Array data has the native endianness + * + * This flag may be requested in constructor functions. + */ +#define NPY_ARRAY_NOTSWAPPED 0x0200 + +/* + * Array data is writeable + * + * This flag may be requested in constructor functions. + * This flag may be tested for in PyArray_FLAGS(arr). + */ +#define NPY_ARRAY_WRITEABLE 0x0400 + +/* + * If this flag is set, then base contains a pointer to an array of + * the same size that should be updated with the current contents of + * this array when PyArray_ResolveWritebackIfCopy is called. + * + * This flag may be requested in constructor functions. + * This flag may be tested for in PyArray_FLAGS(arr). + */ +#define NPY_ARRAY_WRITEBACKIFCOPY 0x2000 + +/* + * No copy may be made while converting from an object/array (result is a view) + * + * This flag may be requested in constructor functions. + */ +#define NPY_ARRAY_ENSURENOCOPY 0x4000 + +/* + * NOTE: there are also internal flags defined in multiarray/arrayobject.h, + * which start at bit 31 and work down. + */ + +#define NPY_ARRAY_BEHAVED (NPY_ARRAY_ALIGNED | \ + NPY_ARRAY_WRITEABLE) +#define NPY_ARRAY_BEHAVED_NS (NPY_ARRAY_ALIGNED | \ + NPY_ARRAY_WRITEABLE | \ + NPY_ARRAY_NOTSWAPPED) +#define NPY_ARRAY_CARRAY (NPY_ARRAY_C_CONTIGUOUS | \ + NPY_ARRAY_BEHAVED) +#define NPY_ARRAY_CARRAY_RO (NPY_ARRAY_C_CONTIGUOUS | \ + NPY_ARRAY_ALIGNED) +#define NPY_ARRAY_FARRAY (NPY_ARRAY_F_CONTIGUOUS | \ + NPY_ARRAY_BEHAVED) +#define NPY_ARRAY_FARRAY_RO (NPY_ARRAY_F_CONTIGUOUS | \ + NPY_ARRAY_ALIGNED) +#define NPY_ARRAY_DEFAULT (NPY_ARRAY_CARRAY) +#define NPY_ARRAY_IN_ARRAY (NPY_ARRAY_CARRAY_RO) +#define NPY_ARRAY_OUT_ARRAY (NPY_ARRAY_CARRAY) +#define NPY_ARRAY_INOUT_ARRAY (NPY_ARRAY_CARRAY) +#define NPY_ARRAY_INOUT_ARRAY2 (NPY_ARRAY_CARRAY | \ + NPY_ARRAY_WRITEBACKIFCOPY) +#define NPY_ARRAY_IN_FARRAY (NPY_ARRAY_FARRAY_RO) +#define NPY_ARRAY_OUT_FARRAY (NPY_ARRAY_FARRAY) +#define NPY_ARRAY_INOUT_FARRAY (NPY_ARRAY_FARRAY) +#define NPY_ARRAY_INOUT_FARRAY2 (NPY_ARRAY_FARRAY | \ + NPY_ARRAY_WRITEBACKIFCOPY) + +#define NPY_ARRAY_UPDATE_ALL (NPY_ARRAY_C_CONTIGUOUS | \ + NPY_ARRAY_F_CONTIGUOUS | \ + NPY_ARRAY_ALIGNED) + +/* This flag is for the array interface, not PyArrayObject */ +#define NPY_ARR_HAS_DESCR 0x0800 + + + + +/* + * Size of internal buffers used for alignment Make BUFSIZE a multiple + * of sizeof(npy_cdouble) -- usually 16 so that ufunc buffers are aligned + */ +#define NPY_MIN_BUFSIZE ((int)sizeof(npy_cdouble)) +#define NPY_MAX_BUFSIZE (((int)sizeof(npy_cdouble))*1000000) +#define NPY_BUFSIZE 8192 +/* buffer stress test size: */ +/*#define NPY_BUFSIZE 17*/ + +/* + * C API: consists of Macros and functions. The MACROS are defined + * here. + */ + + +#define PyArray_ISCONTIGUOUS(m) PyArray_CHKFLAGS((m), NPY_ARRAY_C_CONTIGUOUS) +#define PyArray_ISWRITEABLE(m) PyArray_CHKFLAGS((m), NPY_ARRAY_WRITEABLE) +#define PyArray_ISALIGNED(m) PyArray_CHKFLAGS((m), NPY_ARRAY_ALIGNED) + +#define PyArray_IS_C_CONTIGUOUS(m) PyArray_CHKFLAGS((m), NPY_ARRAY_C_CONTIGUOUS) +#define PyArray_IS_F_CONTIGUOUS(m) PyArray_CHKFLAGS((m), NPY_ARRAY_F_CONTIGUOUS) + +/* the variable is used in some places, so always define it */ +#define NPY_BEGIN_THREADS_DEF PyThreadState *_save=NULL; +#if NPY_ALLOW_THREADS +#define NPY_BEGIN_ALLOW_THREADS Py_BEGIN_ALLOW_THREADS +#define NPY_END_ALLOW_THREADS Py_END_ALLOW_THREADS +#define NPY_BEGIN_THREADS do {_save = PyEval_SaveThread();} while (0); +#define NPY_END_THREADS do { if (_save) \ + { PyEval_RestoreThread(_save); _save = NULL;} } while (0); +#define NPY_BEGIN_THREADS_THRESHOLDED(loop_size) do { if ((loop_size) > 500) \ + { _save = PyEval_SaveThread();} } while (0); + + +#define NPY_ALLOW_C_API_DEF PyGILState_STATE __save__; +#define NPY_ALLOW_C_API do {__save__ = PyGILState_Ensure();} while (0); +#define NPY_DISABLE_C_API do {PyGILState_Release(__save__);} while (0); +#else +#define NPY_BEGIN_ALLOW_THREADS +#define NPY_END_ALLOW_THREADS +#define NPY_BEGIN_THREADS +#define NPY_END_THREADS +#define NPY_BEGIN_THREADS_THRESHOLDED(loop_size) +#define NPY_BEGIN_THREADS_DESCR(dtype) +#define NPY_END_THREADS_DESCR(dtype) +#define NPY_ALLOW_C_API_DEF +#define NPY_ALLOW_C_API +#define NPY_DISABLE_C_API +#endif + +/********************************** + * The nditer object, added in 1.6 + **********************************/ + +/* The actual structure of the iterator is an internal detail */ +typedef struct NpyIter_InternalOnly NpyIter; + +/* Iterator function pointers that may be specialized */ +typedef int (NpyIter_IterNextFunc)(NpyIter *iter); +typedef void (NpyIter_GetMultiIndexFunc)(NpyIter *iter, + npy_intp *outcoords); + +/*** Global flags that may be passed to the iterator constructors ***/ + +/* Track an index representing C order */ +#define NPY_ITER_C_INDEX 0x00000001 +/* Track an index representing Fortran order */ +#define NPY_ITER_F_INDEX 0x00000002 +/* Track a multi-index */ +#define NPY_ITER_MULTI_INDEX 0x00000004 +/* User code external to the iterator does the 1-dimensional innermost loop */ +#define NPY_ITER_EXTERNAL_LOOP 0x00000008 +/* Convert all the operands to a common data type */ +#define NPY_ITER_COMMON_DTYPE 0x00000010 +/* Operands may hold references, requiring API access during iteration */ +#define NPY_ITER_REFS_OK 0x00000020 +/* Zero-sized operands should be permitted, iteration checks IterSize for 0 */ +#define NPY_ITER_ZEROSIZE_OK 0x00000040 +/* Permits reductions (size-0 stride with dimension size > 1) */ +#define NPY_ITER_REDUCE_OK 0x00000080 +/* Enables sub-range iteration */ +#define NPY_ITER_RANGED 0x00000100 +/* Enables buffering */ +#define NPY_ITER_BUFFERED 0x00000200 +/* When buffering is enabled, grows the inner loop if possible */ +#define NPY_ITER_GROWINNER 0x00000400 +/* Delay allocation of buffers until first Reset* call */ +#define NPY_ITER_DELAY_BUFALLOC 0x00000800 +/* When NPY_KEEPORDER is specified, disable reversing negative-stride axes */ +#define NPY_ITER_DONT_NEGATE_STRIDES 0x00001000 +/* + * If output operands overlap with other operands (based on heuristics that + * has false positives but no false negatives), make temporary copies to + * eliminate overlap. + */ +#define NPY_ITER_COPY_IF_OVERLAP 0x00002000 + +/*** Per-operand flags that may be passed to the iterator constructors ***/ + +/* The operand will be read from and written to */ +#define NPY_ITER_READWRITE 0x00010000 +/* The operand will only be read from */ +#define NPY_ITER_READONLY 0x00020000 +/* The operand will only be written to */ +#define NPY_ITER_WRITEONLY 0x00040000 +/* The operand's data must be in native byte order */ +#define NPY_ITER_NBO 0x00080000 +/* The operand's data must be aligned */ +#define NPY_ITER_ALIGNED 0x00100000 +/* The operand's data must be contiguous (within the inner loop) */ +#define NPY_ITER_CONTIG 0x00200000 +/* The operand may be copied to satisfy requirements */ +#define NPY_ITER_COPY 0x00400000 +/* The operand may be copied with WRITEBACKIFCOPY to satisfy requirements */ +#define NPY_ITER_UPDATEIFCOPY 0x00800000 +/* Allocate the operand if it is NULL */ +#define NPY_ITER_ALLOCATE 0x01000000 +/* If an operand is allocated, don't use any subtype */ +#define NPY_ITER_NO_SUBTYPE 0x02000000 +/* This is a virtual array slot, operand is NULL but temporary data is there */ +#define NPY_ITER_VIRTUAL 0x04000000 +/* Require that the dimension match the iterator dimensions exactly */ +#define NPY_ITER_NO_BROADCAST 0x08000000 +/* A mask is being used on this array, affects buffer -> array copy */ +#define NPY_ITER_WRITEMASKED 0x10000000 +/* This array is the mask for all WRITEMASKED operands */ +#define NPY_ITER_ARRAYMASK 0x20000000 +/* Assume iterator order data access for COPY_IF_OVERLAP */ +#define NPY_ITER_OVERLAP_ASSUME_ELEMENTWISE 0x40000000 + +#define NPY_ITER_GLOBAL_FLAGS 0x0000ffff +#define NPY_ITER_PER_OP_FLAGS 0xffff0000 + + +/***************************** + * Basic iterator object + *****************************/ + +/* FWD declaration */ +typedef struct PyArrayIterObject_tag PyArrayIterObject; + +/* + * type of the function which translates a set of coordinates to a + * pointer to the data + */ +typedef char* (*npy_iter_get_dataptr_t)( + PyArrayIterObject* iter, const npy_intp*); + +struct PyArrayIterObject_tag { + PyObject_HEAD + int nd_m1; /* number of dimensions - 1 */ + npy_intp index, size; + npy_intp coordinates[NPY_MAXDIMS_LEGACY_ITERS];/* N-dimensional loop */ + npy_intp dims_m1[NPY_MAXDIMS_LEGACY_ITERS]; /* ao->dimensions - 1 */ + npy_intp strides[NPY_MAXDIMS_LEGACY_ITERS]; /* ao->strides or fake */ + npy_intp backstrides[NPY_MAXDIMS_LEGACY_ITERS];/* how far to jump back */ + npy_intp factors[NPY_MAXDIMS_LEGACY_ITERS]; /* shape factors */ + PyArrayObject *ao; + char *dataptr; /* pointer to current item*/ + npy_bool contiguous; + + npy_intp bounds[NPY_MAXDIMS_LEGACY_ITERS][2]; + npy_intp limits[NPY_MAXDIMS_LEGACY_ITERS][2]; + npy_intp limits_sizes[NPY_MAXDIMS_LEGACY_ITERS]; + npy_iter_get_dataptr_t translate; +} ; + + +/* Iterator API */ +#define PyArrayIter_Check(op) PyObject_TypeCheck((op), &PyArrayIter_Type) + +#define _PyAIT(it) ((PyArrayIterObject *)(it)) +#define PyArray_ITER_RESET(it) do { \ + _PyAIT(it)->index = 0; \ + _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao); \ + memset(_PyAIT(it)->coordinates, 0, \ + (_PyAIT(it)->nd_m1+1)*sizeof(npy_intp)); \ +} while (0) + +#define _PyArray_ITER_NEXT1(it) do { \ + (it)->dataptr += _PyAIT(it)->strides[0]; \ + (it)->coordinates[0]++; \ +} while (0) + +#define _PyArray_ITER_NEXT2(it) do { \ + if ((it)->coordinates[1] < (it)->dims_m1[1]) { \ + (it)->coordinates[1]++; \ + (it)->dataptr += (it)->strides[1]; \ + } \ + else { \ + (it)->coordinates[1] = 0; \ + (it)->coordinates[0]++; \ + (it)->dataptr += (it)->strides[0] - \ + (it)->backstrides[1]; \ + } \ +} while (0) + +#define PyArray_ITER_NEXT(it) do { \ + _PyAIT(it)->index++; \ + if (_PyAIT(it)->nd_m1 == 0) { \ + _PyArray_ITER_NEXT1(_PyAIT(it)); \ + } \ + else if (_PyAIT(it)->contiguous) \ + _PyAIT(it)->dataptr += PyArray_ITEMSIZE(_PyAIT(it)->ao); \ + else if (_PyAIT(it)->nd_m1 == 1) { \ + _PyArray_ITER_NEXT2(_PyAIT(it)); \ + } \ + else { \ + int __npy_i; \ + for (__npy_i=_PyAIT(it)->nd_m1; __npy_i >= 0; __npy_i--) { \ + if (_PyAIT(it)->coordinates[__npy_i] < \ + _PyAIT(it)->dims_m1[__npy_i]) { \ + _PyAIT(it)->coordinates[__npy_i]++; \ + _PyAIT(it)->dataptr += \ + _PyAIT(it)->strides[__npy_i]; \ + break; \ + } \ + else { \ + _PyAIT(it)->coordinates[__npy_i] = 0; \ + _PyAIT(it)->dataptr -= \ + _PyAIT(it)->backstrides[__npy_i]; \ + } \ + } \ + } \ +} while (0) + +#define PyArray_ITER_GOTO(it, destination) do { \ + int __npy_i; \ + _PyAIT(it)->index = 0; \ + _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao); \ + for (__npy_i = _PyAIT(it)->nd_m1; __npy_i>=0; __npy_i--) { \ + if (destination[__npy_i] < 0) { \ + destination[__npy_i] += \ + _PyAIT(it)->dims_m1[__npy_i]+1; \ + } \ + _PyAIT(it)->dataptr += destination[__npy_i] * \ + _PyAIT(it)->strides[__npy_i]; \ + _PyAIT(it)->coordinates[__npy_i] = \ + destination[__npy_i]; \ + _PyAIT(it)->index += destination[__npy_i] * \ + ( __npy_i==_PyAIT(it)->nd_m1 ? 1 : \ + _PyAIT(it)->dims_m1[__npy_i+1]+1) ; \ + } \ +} while (0) + +#define PyArray_ITER_GOTO1D(it, ind) do { \ + int __npy_i; \ + npy_intp __npy_ind = (npy_intp)(ind); \ + if (__npy_ind < 0) __npy_ind += _PyAIT(it)->size; \ + _PyAIT(it)->index = __npy_ind; \ + if (_PyAIT(it)->nd_m1 == 0) { \ + _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao) + \ + __npy_ind * _PyAIT(it)->strides[0]; \ + } \ + else if (_PyAIT(it)->contiguous) \ + _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao) + \ + __npy_ind * PyArray_ITEMSIZE(_PyAIT(it)->ao); \ + else { \ + _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao); \ + for (__npy_i = 0; __npy_i<=_PyAIT(it)->nd_m1; \ + __npy_i++) { \ + _PyAIT(it)->coordinates[__npy_i] = \ + (__npy_ind / _PyAIT(it)->factors[__npy_i]); \ + _PyAIT(it)->dataptr += \ + (__npy_ind / _PyAIT(it)->factors[__npy_i]) \ + * _PyAIT(it)->strides[__npy_i]; \ + __npy_ind %= _PyAIT(it)->factors[__npy_i]; \ + } \ + } \ +} while (0) + +#define PyArray_ITER_DATA(it) ((void *)(_PyAIT(it)->dataptr)) + +#define PyArray_ITER_NOTDONE(it) (_PyAIT(it)->index < _PyAIT(it)->size) + + +/* + * Any object passed to PyArray_Broadcast must be binary compatible + * with this structure. + */ + +typedef struct { + PyObject_HEAD + int numiter; /* number of iters */ + npy_intp size; /* broadcasted size */ + npy_intp index; /* current index */ + int nd; /* number of dims */ + npy_intp dimensions[NPY_MAXDIMS_LEGACY_ITERS]; /* dimensions */ + /* + * Space for the individual iterators, do not specify size publicly + * to allow changing it more easily. + * One reason is that Cython uses this for checks and only allows + * growing structs (as of Cython 3.0.6). It also allows NPY_MAXARGS + * to be runtime dependent. + */ +#if (defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD) + PyArrayIterObject *iters[64]; +#elif defined(__cplusplus) + /* + * C++ doesn't strictly support flexible members and gives compilers + * warnings (pedantic only), so we lie. We can't make it 64 because + * then Cython is unhappy (larger struct at runtime is OK smaller not). + */ + PyArrayIterObject *iters[32]; +#else + PyArrayIterObject *iters[]; +#endif +} PyArrayMultiIterObject; + +#define _PyMIT(m) ((PyArrayMultiIterObject *)(m)) +#define PyArray_MultiIter_RESET(multi) do { \ + int __npy_mi; \ + _PyMIT(multi)->index = 0; \ + for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ + PyArray_ITER_RESET(_PyMIT(multi)->iters[__npy_mi]); \ + } \ +} while (0) + +#define PyArray_MultiIter_NEXT(multi) do { \ + int __npy_mi; \ + _PyMIT(multi)->index++; \ + for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ + PyArray_ITER_NEXT(_PyMIT(multi)->iters[__npy_mi]); \ + } \ +} while (0) + +#define PyArray_MultiIter_GOTO(multi, dest) do { \ + int __npy_mi; \ + for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ + PyArray_ITER_GOTO(_PyMIT(multi)->iters[__npy_mi], dest); \ + } \ + _PyMIT(multi)->index = _PyMIT(multi)->iters[0]->index; \ +} while (0) + +#define PyArray_MultiIter_GOTO1D(multi, ind) do { \ + int __npy_mi; \ + for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ + PyArray_ITER_GOTO1D(_PyMIT(multi)->iters[__npy_mi], ind); \ + } \ + _PyMIT(multi)->index = _PyMIT(multi)->iters[0]->index; \ +} while (0) + +#define PyArray_MultiIter_DATA(multi, i) \ + ((void *)(_PyMIT(multi)->iters[i]->dataptr)) + +#define PyArray_MultiIter_NEXTi(multi, i) \ + PyArray_ITER_NEXT(_PyMIT(multi)->iters[i]) + +#define PyArray_MultiIter_NOTDONE(multi) \ + (_PyMIT(multi)->index < _PyMIT(multi)->size) + + +static NPY_INLINE int +PyArray_MultiIter_NUMITER(PyArrayMultiIterObject *multi) +{ + return multi->numiter; +} + + +static NPY_INLINE npy_intp +PyArray_MultiIter_SIZE(PyArrayMultiIterObject *multi) +{ + return multi->size; +} + + +static NPY_INLINE npy_intp +PyArray_MultiIter_INDEX(PyArrayMultiIterObject *multi) +{ + return multi->index; +} + + +static NPY_INLINE int +PyArray_MultiIter_NDIM(PyArrayMultiIterObject *multi) +{ + return multi->nd; +} + + +static NPY_INLINE npy_intp * +PyArray_MultiIter_DIMS(PyArrayMultiIterObject *multi) +{ + return multi->dimensions; +} + + +static NPY_INLINE void ** +PyArray_MultiIter_ITERS(PyArrayMultiIterObject *multi) +{ + return (void**)multi->iters; +} + + +enum { + NPY_NEIGHBORHOOD_ITER_ZERO_PADDING, + NPY_NEIGHBORHOOD_ITER_ONE_PADDING, + NPY_NEIGHBORHOOD_ITER_CONSTANT_PADDING, + NPY_NEIGHBORHOOD_ITER_CIRCULAR_PADDING, + NPY_NEIGHBORHOOD_ITER_MIRROR_PADDING +}; + +typedef struct { + PyObject_HEAD + + /* + * PyArrayIterObject part: keep this in this exact order + */ + int nd_m1; /* number of dimensions - 1 */ + npy_intp index, size; + npy_intp coordinates[NPY_MAXDIMS_LEGACY_ITERS];/* N-dimensional loop */ + npy_intp dims_m1[NPY_MAXDIMS_LEGACY_ITERS]; /* ao->dimensions - 1 */ + npy_intp strides[NPY_MAXDIMS_LEGACY_ITERS]; /* ao->strides or fake */ + npy_intp backstrides[NPY_MAXDIMS_LEGACY_ITERS];/* how far to jump back */ + npy_intp factors[NPY_MAXDIMS_LEGACY_ITERS]; /* shape factors */ + PyArrayObject *ao; + char *dataptr; /* pointer to current item*/ + npy_bool contiguous; + + npy_intp bounds[NPY_MAXDIMS_LEGACY_ITERS][2]; + npy_intp limits[NPY_MAXDIMS_LEGACY_ITERS][2]; + npy_intp limits_sizes[NPY_MAXDIMS_LEGACY_ITERS]; + npy_iter_get_dataptr_t translate; + + /* + * New members + */ + npy_intp nd; + + /* Dimensions is the dimension of the array */ + npy_intp dimensions[NPY_MAXDIMS_LEGACY_ITERS]; + + /* + * Neighborhood points coordinates are computed relatively to the + * point pointed by _internal_iter + */ + PyArrayIterObject* _internal_iter; + /* + * To keep a reference to the representation of the constant value + * for constant padding + */ + char* constant; + + int mode; +} PyArrayNeighborhoodIterObject; + +/* + * Neighborhood iterator API + */ + +/* General: those work for any mode */ +static inline int +PyArrayNeighborhoodIter_Reset(PyArrayNeighborhoodIterObject* iter); +static inline int +PyArrayNeighborhoodIter_Next(PyArrayNeighborhoodIterObject* iter); +#if 0 +static inline int +PyArrayNeighborhoodIter_Next2D(PyArrayNeighborhoodIterObject* iter); +#endif + +/* + * Include inline implementations - functions defined there are not + * considered public API + */ +#define NUMPY_CORE_INCLUDE_NUMPY__NEIGHBORHOOD_IMP_H_ +#include "_neighborhood_iterator_imp.h" +#undef NUMPY_CORE_INCLUDE_NUMPY__NEIGHBORHOOD_IMP_H_ + + + +/* The default array type */ +#define NPY_DEFAULT_TYPE NPY_DOUBLE +/* default integer type defined in npy_2_compat header */ + +/* + * All sorts of useful ways to look into a PyArrayObject. It is recommended + * to use PyArrayObject * objects instead of always casting from PyObject *, + * for improved type checking. + * + * In many cases here the macro versions of the accessors are deprecated, + * but can't be immediately changed to inline functions because the + * preexisting macros accept PyObject * and do automatic casts. Inline + * functions accepting PyArrayObject * provides for some compile-time + * checking of correctness when working with these objects in C. + */ + +#define PyArray_ISONESEGMENT(m) (PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS) || \ + PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS)) + +#define PyArray_ISFORTRAN(m) (PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS) && \ + (!PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS))) + +#define PyArray_FORTRAN_IF(m) ((PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS) ? \ + NPY_ARRAY_F_CONTIGUOUS : 0)) + +static inline int +PyArray_NDIM(const PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->nd; +} + +static inline void * +PyArray_DATA(const PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->data; +} + +static inline char * +PyArray_BYTES(const PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->data; +} + +static inline npy_intp * +PyArray_DIMS(const PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->dimensions; +} + +static inline npy_intp * +PyArray_STRIDES(const PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->strides; +} + +static inline npy_intp +PyArray_DIM(const PyArrayObject *arr, int idim) +{ + return ((PyArrayObject_fields *)arr)->dimensions[idim]; +} + +static inline npy_intp +PyArray_STRIDE(const PyArrayObject *arr, int istride) +{ + return ((PyArrayObject_fields *)arr)->strides[istride]; +} + +static inline NPY_RETURNS_BORROWED_REF PyObject * +PyArray_BASE(const PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->base; +} + +static inline NPY_RETURNS_BORROWED_REF PyArray_Descr * +PyArray_DESCR(const PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->descr; +} + +static inline int +PyArray_FLAGS(const PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->flags; +} + + +static inline int +PyArray_TYPE(const PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->descr->type_num; +} + +static inline int +PyArray_CHKFLAGS(const PyArrayObject *arr, int flags) +{ + return (PyArray_FLAGS(arr) & flags) == flags; +} + +static inline PyArray_Descr * +PyArray_DTYPE(const PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->descr; +} + +static inline npy_intp * +PyArray_SHAPE(const PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->dimensions; +} + +/* + * Enables the specified array flags. Does no checking, + * assumes you know what you're doing. + */ +static inline void +PyArray_ENABLEFLAGS(PyArrayObject *arr, int flags) +{ + ((PyArrayObject_fields *)arr)->flags |= flags; +} + +/* + * Clears the specified array flags. Does no checking, + * assumes you know what you're doing. + */ +static inline void +PyArray_CLEARFLAGS(PyArrayObject *arr, int flags) +{ + ((PyArrayObject_fields *)arr)->flags &= ~flags; +} + +#if NPY_FEATURE_VERSION >= NPY_1_22_API_VERSION + static inline NPY_RETURNS_BORROWED_REF PyObject * + PyArray_HANDLER(PyArrayObject *arr) + { + return ((PyArrayObject_fields *)arr)->mem_handler; + } +#endif + +#define PyTypeNum_ISBOOL(type) ((type) == NPY_BOOL) + +#define PyTypeNum_ISUNSIGNED(type) (((type) == NPY_UBYTE) || \ + ((type) == NPY_USHORT) || \ + ((type) == NPY_UINT) || \ + ((type) == NPY_ULONG) || \ + ((type) == NPY_ULONGLONG)) + +#define PyTypeNum_ISSIGNED(type) (((type) == NPY_BYTE) || \ + ((type) == NPY_SHORT) || \ + ((type) == NPY_INT) || \ + ((type) == NPY_LONG) || \ + ((type) == NPY_LONGLONG)) + +#define PyTypeNum_ISINTEGER(type) (((type) >= NPY_BYTE) && \ + ((type) <= NPY_ULONGLONG)) + +#define PyTypeNum_ISFLOAT(type) ((((type) >= NPY_FLOAT) && \ + ((type) <= NPY_LONGDOUBLE)) || \ + ((type) == NPY_HALF)) + +#define PyTypeNum_ISNUMBER(type) (((type) <= NPY_CLONGDOUBLE) || \ + ((type) == NPY_HALF)) + +#define PyTypeNum_ISSTRING(type) (((type) == NPY_STRING) || \ + ((type) == NPY_UNICODE)) + +#define PyTypeNum_ISCOMPLEX(type) (((type) >= NPY_CFLOAT) && \ + ((type) <= NPY_CLONGDOUBLE)) + +#define PyTypeNum_ISFLEXIBLE(type) (((type) >=NPY_STRING) && \ + ((type) <=NPY_VOID)) + +#define PyTypeNum_ISDATETIME(type) (((type) >=NPY_DATETIME) && \ + ((type) <=NPY_TIMEDELTA)) + +#define PyTypeNum_ISUSERDEF(type) (((type) >= NPY_USERDEF) && \ + ((type) < NPY_USERDEF+ \ + NPY_NUMUSERTYPES)) + +#define PyTypeNum_ISEXTENDED(type) (PyTypeNum_ISFLEXIBLE(type) || \ + PyTypeNum_ISUSERDEF(type)) + +#define PyTypeNum_ISOBJECT(type) ((type) == NPY_OBJECT) + + +#define PyDataType_ISLEGACY(dtype) ((dtype)->type_num < NPY_VSTRING && ((dtype)->type_num >= 0)) +#define PyDataType_ISBOOL(obj) PyTypeNum_ISBOOL(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISUNSIGNED(obj) PyTypeNum_ISUNSIGNED(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISSIGNED(obj) PyTypeNum_ISSIGNED(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISINTEGER(obj) PyTypeNum_ISINTEGER(((PyArray_Descr*)(obj))->type_num ) +#define PyDataType_ISFLOAT(obj) PyTypeNum_ISFLOAT(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISNUMBER(obj) PyTypeNum_ISNUMBER(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISSTRING(obj) PyTypeNum_ISSTRING(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISCOMPLEX(obj) PyTypeNum_ISCOMPLEX(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISFLEXIBLE(obj) PyTypeNum_ISFLEXIBLE(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISDATETIME(obj) PyTypeNum_ISDATETIME(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISUSERDEF(obj) PyTypeNum_ISUSERDEF(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISEXTENDED(obj) PyTypeNum_ISEXTENDED(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISOBJECT(obj) PyTypeNum_ISOBJECT(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_MAKEUNSIZED(dtype) ((dtype)->elsize = 0) +/* + * PyDataType_* FLAGS, FLACHK, REFCHK, HASFIELDS, HASSUBARRAY, UNSIZED, + * SUBARRAY, NAMES, FIELDS, C_METADATA, and METADATA require version specific + * lookup and are defined in npy_2_compat.h. + */ + + +#define PyArray_ISBOOL(obj) PyTypeNum_ISBOOL(PyArray_TYPE(obj)) +#define PyArray_ISUNSIGNED(obj) PyTypeNum_ISUNSIGNED(PyArray_TYPE(obj)) +#define PyArray_ISSIGNED(obj) PyTypeNum_ISSIGNED(PyArray_TYPE(obj)) +#define PyArray_ISINTEGER(obj) PyTypeNum_ISINTEGER(PyArray_TYPE(obj)) +#define PyArray_ISFLOAT(obj) PyTypeNum_ISFLOAT(PyArray_TYPE(obj)) +#define PyArray_ISNUMBER(obj) PyTypeNum_ISNUMBER(PyArray_TYPE(obj)) +#define PyArray_ISSTRING(obj) PyTypeNum_ISSTRING(PyArray_TYPE(obj)) +#define PyArray_ISCOMPLEX(obj) PyTypeNum_ISCOMPLEX(PyArray_TYPE(obj)) +#define PyArray_ISFLEXIBLE(obj) PyTypeNum_ISFLEXIBLE(PyArray_TYPE(obj)) +#define PyArray_ISDATETIME(obj) PyTypeNum_ISDATETIME(PyArray_TYPE(obj)) +#define PyArray_ISUSERDEF(obj) PyTypeNum_ISUSERDEF(PyArray_TYPE(obj)) +#define PyArray_ISEXTENDED(obj) PyTypeNum_ISEXTENDED(PyArray_TYPE(obj)) +#define PyArray_ISOBJECT(obj) PyTypeNum_ISOBJECT(PyArray_TYPE(obj)) +#define PyArray_HASFIELDS(obj) PyDataType_HASFIELDS(PyArray_DESCR(obj)) + + /* + * FIXME: This should check for a flag on the data-type that + * states whether or not it is variable length. Because the + * ISFLEXIBLE check is hard-coded to the built-in data-types. + */ +#define PyArray_ISVARIABLE(obj) PyTypeNum_ISFLEXIBLE(PyArray_TYPE(obj)) + +#define PyArray_SAFEALIGNEDCOPY(obj) (PyArray_ISALIGNED(obj) && !PyArray_ISVARIABLE(obj)) + + +#define NPY_LITTLE '<' +#define NPY_BIG '>' +#define NPY_NATIVE '=' +#define NPY_SWAP 's' +#define NPY_IGNORE '|' + +#if NPY_BYTE_ORDER == NPY_BIG_ENDIAN +#define NPY_NATBYTE NPY_BIG +#define NPY_OPPBYTE NPY_LITTLE +#else +#define NPY_NATBYTE NPY_LITTLE +#define NPY_OPPBYTE NPY_BIG +#endif + +#define PyArray_ISNBO(arg) ((arg) != NPY_OPPBYTE) +#define PyArray_IsNativeByteOrder PyArray_ISNBO +#define PyArray_ISNOTSWAPPED(m) PyArray_ISNBO(PyArray_DESCR(m)->byteorder) +#define PyArray_ISBYTESWAPPED(m) (!PyArray_ISNOTSWAPPED(m)) + +#define PyArray_FLAGSWAP(m, flags) (PyArray_CHKFLAGS(m, flags) && \ + PyArray_ISNOTSWAPPED(m)) + +#define PyArray_ISCARRAY(m) PyArray_FLAGSWAP(m, NPY_ARRAY_CARRAY) +#define PyArray_ISCARRAY_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_CARRAY_RO) +#define PyArray_ISFARRAY(m) PyArray_FLAGSWAP(m, NPY_ARRAY_FARRAY) +#define PyArray_ISFARRAY_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_FARRAY_RO) +#define PyArray_ISBEHAVED(m) PyArray_FLAGSWAP(m, NPY_ARRAY_BEHAVED) +#define PyArray_ISBEHAVED_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_ALIGNED) + + +#define PyDataType_ISNOTSWAPPED(d) PyArray_ISNBO(((PyArray_Descr *)(d))->byteorder) +#define PyDataType_ISBYTESWAPPED(d) (!PyDataType_ISNOTSWAPPED(d)) + +/************************************************************ + * A struct used by PyArray_CreateSortedStridePerm, new in 1.7. + ************************************************************/ + +typedef struct { + npy_intp perm, stride; +} npy_stride_sort_item; + +/************************************************************ + * This is the form of the struct that's stored in the + * PyCapsule returned by an array's __array_struct__ attribute. See + * https://docs.scipy.org/doc/numpy/reference/arrays.interface.html for the full + * documentation. + ************************************************************/ +typedef struct { + int two; /* + * contains the integer 2 as a sanity + * check + */ + + int nd; /* number of dimensions */ + + char typekind; /* + * kind in array --- character code of + * typestr + */ + + int itemsize; /* size of each element */ + + int flags; /* + * how should be data interpreted. Valid + * flags are CONTIGUOUS (1), F_CONTIGUOUS (2), + * ALIGNED (0x100), NOTSWAPPED (0x200), and + * WRITEABLE (0x400). ARR_HAS_DESCR (0x800) + * states that arrdescr field is present in + * structure + */ + + npy_intp *shape; /* + * A length-nd array of shape + * information + */ + + npy_intp *strides; /* A length-nd array of stride information */ + + void *data; /* A pointer to the first element of the array */ + + PyObject *descr; /* + * A list of fields or NULL (ignored if flags + * does not have ARR_HAS_DESCR flag set) + */ +} PyArrayInterface; + + +/**************************************** + * NpyString + * + * Types used by the NpyString API. + ****************************************/ + +/* + * A "packed" encoded string. The string data must be accessed by first unpacking the string. + */ +typedef struct npy_packed_static_string npy_packed_static_string; + +/* + * An unpacked read-only view onto the data in a packed string + */ +typedef struct npy_unpacked_static_string { + size_t size; + const char *buf; +} npy_static_string; + +/* + * Handles heap allocations for static strings. + */ +typedef struct npy_string_allocator npy_string_allocator; + +typedef struct { + PyArray_Descr base; + // The object representing a null value + PyObject *na_object; + // Flag indicating whether or not to coerce arbitrary objects to strings + char coerce; + // Flag indicating the na object is NaN-like + char has_nan_na; + // Flag indicating the na object is a string + char has_string_na; + // If nonzero, indicates that this instance is owned by an array already + char array_owned; + // The string data to use when a default string is needed + npy_static_string default_string; + // The name of the missing data object, if any + npy_static_string na_name; + // the allocator should only be directly accessed after + // acquiring the allocator_lock and the lock should + // be released immediately after the allocator is + // no longer needed + npy_string_allocator *allocator; +} PyArray_StringDTypeObject; + +/* + * PyArray_DTypeMeta related definitions. + * + * As of now, this API is preliminary and will be extended as necessary. + */ +#if defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD + /* + * The Structures defined in this block are currently considered + * private API and may change without warning! + * Part of this (at least the size) is expected to be public API without + * further modifications. + */ + /* TODO: Make this definition public in the API, as soon as its settled */ + NPY_NO_EXPORT extern PyTypeObject PyArrayDTypeMeta_Type; + + /* + * While NumPy DTypes would not need to be heap types the plan is to + * make DTypes available in Python at which point they will be heap types. + * Since we also wish to add fields to the DType class, this looks like + * a typical instance definition, but with PyHeapTypeObject instead of + * only the PyObject_HEAD. + * This must only be exposed very extremely careful consideration, since + * it is a fairly complex construct which may be better to allow + * refactoring of. + */ + typedef struct { + PyHeapTypeObject super; + + /* + * Most DTypes will have a singleton default instance, for the + * parametric legacy DTypes (bytes, string, void, datetime) this + * may be a pointer to the *prototype* instance? + */ + PyArray_Descr *singleton; + /* Copy of the legacy DTypes type number, usually invalid. */ + int type_num; + + /* The type object of the scalar instances (may be NULL?) */ + PyTypeObject *scalar_type; + /* + * DType flags to signal legacy, parametric, or + * abstract. But plenty of space for additional information/flags. + */ + npy_uint64 flags; + + /* + * Use indirection in order to allow a fixed size for this struct. + * A stable ABI size makes creating a static DType less painful + * while also ensuring flexibility for all opaque API (with one + * indirection due the pointer lookup). + */ + void *dt_slots; + void *reserved[3]; + } PyArray_DTypeMeta; + +#endif /* NPY_INTERNAL_BUILD */ + + +/* + * Use the keyword NPY_DEPRECATED_INCLUDES to ensure that the header files + * npy_*_*_deprecated_api.h are only included from here and nowhere else. + */ +#ifdef NPY_DEPRECATED_INCLUDES +#error "Do not use the reserved keyword NPY_DEPRECATED_INCLUDES." +#endif +#define NPY_DEPRECATED_INCLUDES +/* + * There is no file npy_1_8_deprecated_api.h since there are no additional + * deprecated API features in NumPy 1.8. + * + * Note to maintainers: insert code like the following in future NumPy + * versions. + * + * #if !defined(NPY_NO_DEPRECATED_API) || \ + * (NPY_NO_DEPRECATED_API < NPY_1_9_API_VERSION) + * #include "npy_1_9_deprecated_api.h" + * #endif + * Then in the npy_1_9_deprecated_api.h header add something like this + * -------------------- + * #ifndef NPY_DEPRECATED_INCLUDES + * #error "Should never include npy_*_*_deprecated_api directly." + * #endif + * #ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_1_7_DEPRECATED_API_H_ + * #define NUMPY_CORE_INCLUDE_NUMPY_NPY_1_7_DEPRECATED_API_H_ + * + * #ifndef NPY_NO_DEPRECATED_API + * #if defined(_WIN32) + * #define _WARN___STR2__(x) #x + * #define _WARN___STR1__(x) _WARN___STR2__(x) + * #define _WARN___LOC__ __FILE__ "(" _WARN___STR1__(__LINE__) ") : Warning Msg: " + * #pragma message(_WARN___LOC__"Using deprecated NumPy API, disable it with " \ + * "#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION") + * #else + * #warning "Using deprecated NumPy API, disable it with " \ + * "#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION" + * #endif + * #endif + * -------------------- + */ +#undef NPY_DEPRECATED_INCLUDES + +#ifdef __cplusplus +} +#endif + +#endif /* NUMPY_CORE_INCLUDE_NUMPY_NDARRAYTYPES_H_ */ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/npy_2_compat.h b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/npy_2_compat.h new file mode 100644 index 0000000..e39e65a --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/npy_2_compat.h @@ -0,0 +1,249 @@ +/* + * This header file defines relevant features which: + * - Require runtime inspection depending on the NumPy version. + * - May be needed when compiling with an older version of NumPy to allow + * a smooth transition. + * + * As such, it is shipped with NumPy 2.0, but designed to be vendored in full + * or parts by downstream projects. + * + * It must be included after any other includes. `import_array()` must have + * been called in the scope or version dependency will misbehave, even when + * only `PyUFunc_` API is used. + * + * If required complicated defs (with inline functions) should be written as: + * + * #if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION + * Simple definition when NumPy 2.0 API is guaranteed. + * #else + * static inline definition of a 1.x compatibility shim + * #if NPY_ABI_VERSION < 0x02000000 + * Make 1.x compatibility shim the public API (1.x only branch) + * #else + * Runtime dispatched version (1.x or 2.x) + * #endif + * #endif + * + * An internal build always passes NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION + */ + +#ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_2_COMPAT_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_NPY_2_COMPAT_H_ + +/* + * New macros for accessing real and complex part of a complex number can be + * found in "npy_2_complexcompat.h". + */ + + +/* + * This header is meant to be included by downstream directly for 1.x compat. + * In that case we need to ensure that users first included the full headers + * and not just `ndarraytypes.h`. + */ + +#ifndef NPY_FEATURE_VERSION + #error "The NumPy 2 compat header requires `import_array()` for which " \ + "the `ndarraytypes.h` header include is not sufficient. Please " \ + "include it after `numpy/ndarrayobject.h` or similar.\n" \ + "To simplify inclusion, you may use `PyArray_ImportNumPy()` " \ + "which is defined in the compat header and is lightweight (can be)." +#endif + +#if NPY_ABI_VERSION < 0x02000000 + /* + * Define 2.0 feature version as it is needed below to decide whether we + * compile for both 1.x and 2.x (defining it guarantees 1.x only). + */ + #define NPY_2_0_API_VERSION 0x00000012 + /* + * If we are compiling with NumPy 1.x, PyArray_RUNTIME_VERSION so we + * pretend the `PyArray_RUNTIME_VERSION` is `NPY_FEATURE_VERSION`. + * This allows downstream to use `PyArray_RUNTIME_VERSION` if they need to. + */ + #define PyArray_RUNTIME_VERSION NPY_FEATURE_VERSION + /* Compiling on NumPy 1.x where these are the same: */ + #define PyArray_DescrProto PyArray_Descr +#endif + + +/* + * Define a better way to call `_import_array()` to simplify backporting as + * we now require imports more often (necessary to make ABI flexible). + */ +#ifdef import_array1 + +static inline int +PyArray_ImportNumPyAPI(void) +{ + if (NPY_UNLIKELY(PyArray_API == NULL)) { + import_array1(-1); + } + return 0; +} + +#endif /* import_array1 */ + + +/* + * NPY_DEFAULT_INT + * + * The default integer has changed, `NPY_DEFAULT_INT` is available at runtime + * for use as type number, e.g. `PyArray_DescrFromType(NPY_DEFAULT_INT)`. + * + * NPY_RAVEL_AXIS + * + * This was introduced in NumPy 2.0 to allow indicating that an axis should be + * raveled in an operation. Before NumPy 2.0, NPY_MAXDIMS was used for this purpose. + * + * NPY_MAXDIMS + * + * A constant indicating the maximum number dimensions allowed when creating + * an ndarray. + * + * NPY_NTYPES_LEGACY + * + * The number of built-in NumPy dtypes. + */ +#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION + #define NPY_DEFAULT_INT NPY_INTP + #define NPY_RAVEL_AXIS NPY_MIN_INT + #define NPY_MAXARGS 64 + +#elif NPY_ABI_VERSION < 0x02000000 + #define NPY_DEFAULT_INT NPY_LONG + #define NPY_RAVEL_AXIS 32 + #define NPY_MAXARGS 32 + + /* Aliases of 2.x names to 1.x only equivalent names */ + #define NPY_NTYPES NPY_NTYPES_LEGACY + #define PyArray_DescrProto PyArray_Descr + #define _PyArray_LegacyDescr PyArray_Descr + /* NumPy 2 definition always works, but add it for 1.x only */ + #define PyDataType_ISLEGACY(dtype) (1) +#else + #define NPY_DEFAULT_INT \ + (PyArray_RUNTIME_VERSION >= NPY_2_0_API_VERSION ? NPY_INTP : NPY_LONG) + #define NPY_RAVEL_AXIS \ + (PyArray_RUNTIME_VERSION >= NPY_2_0_API_VERSION ? NPY_MIN_INT : 32) + #define NPY_MAXARGS \ + (PyArray_RUNTIME_VERSION >= NPY_2_0_API_VERSION ? 64 : 32) +#endif + + +/* + * Access inline functions for descriptor fields. Except for the first + * few fields, these needed to be moved (elsize, alignment) for + * additional space. Or they are descriptor specific and are not generally + * available anymore (metadata, c_metadata, subarray, names, fields). + * + * Most of these are defined via the `DESCR_ACCESSOR` macro helper. + */ +#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION || NPY_ABI_VERSION < 0x02000000 + /* Compiling for 1.x or 2.x only, direct field access is OK: */ + + static inline void + PyDataType_SET_ELSIZE(PyArray_Descr *dtype, npy_intp size) + { + dtype->elsize = size; + } + + static inline npy_uint64 + PyDataType_FLAGS(const PyArray_Descr *dtype) + { + #if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION + return dtype->flags; + #else + return (unsigned char)dtype->flags; /* Need unsigned cast on 1.x */ + #endif + } + + #define DESCR_ACCESSOR(FIELD, field, type, legacy_only) \ + static inline type \ + PyDataType_##FIELD(const PyArray_Descr *dtype) { \ + if (legacy_only && !PyDataType_ISLEGACY(dtype)) { \ + return (type)0; \ + } \ + return ((_PyArray_LegacyDescr *)dtype)->field; \ + } +#else /* compiling for both 1.x and 2.x */ + + static inline void + PyDataType_SET_ELSIZE(PyArray_Descr *dtype, npy_intp size) + { + if (PyArray_RUNTIME_VERSION >= NPY_2_0_API_VERSION) { + ((_PyArray_DescrNumPy2 *)dtype)->elsize = size; + } + else { + ((PyArray_DescrProto *)dtype)->elsize = (int)size; + } + } + + static inline npy_uint64 + PyDataType_FLAGS(const PyArray_Descr *dtype) + { + if (PyArray_RUNTIME_VERSION >= NPY_2_0_API_VERSION) { + return ((_PyArray_DescrNumPy2 *)dtype)->flags; + } + else { + return (unsigned char)((PyArray_DescrProto *)dtype)->flags; + } + } + + /* Cast to LegacyDescr always fine but needed when `legacy_only` */ + #define DESCR_ACCESSOR(FIELD, field, type, legacy_only) \ + static inline type \ + PyDataType_##FIELD(const PyArray_Descr *dtype) { \ + if (legacy_only && !PyDataType_ISLEGACY(dtype)) { \ + return (type)0; \ + } \ + if (PyArray_RUNTIME_VERSION >= NPY_2_0_API_VERSION) { \ + return ((_PyArray_LegacyDescr *)dtype)->field; \ + } \ + else { \ + return ((PyArray_DescrProto *)dtype)->field; \ + } \ + } +#endif + +DESCR_ACCESSOR(ELSIZE, elsize, npy_intp, 0) +DESCR_ACCESSOR(ALIGNMENT, alignment, npy_intp, 0) +DESCR_ACCESSOR(METADATA, metadata, PyObject *, 1) +DESCR_ACCESSOR(SUBARRAY, subarray, PyArray_ArrayDescr *, 1) +DESCR_ACCESSOR(NAMES, names, PyObject *, 1) +DESCR_ACCESSOR(FIELDS, fields, PyObject *, 1) +DESCR_ACCESSOR(C_METADATA, c_metadata, NpyAuxData *, 1) + +#undef DESCR_ACCESSOR + + +#if !(defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD) +#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION + static inline PyArray_ArrFuncs * + PyDataType_GetArrFuncs(const PyArray_Descr *descr) + { + return _PyDataType_GetArrFuncs(descr); + } +#elif NPY_ABI_VERSION < 0x02000000 + static inline PyArray_ArrFuncs * + PyDataType_GetArrFuncs(const PyArray_Descr *descr) + { + return descr->f; + } +#else + static inline PyArray_ArrFuncs * + PyDataType_GetArrFuncs(const PyArray_Descr *descr) + { + if (PyArray_RUNTIME_VERSION >= NPY_2_0_API_VERSION) { + return _PyDataType_GetArrFuncs(descr); + } + else { + return ((PyArray_DescrProto *)descr)->f; + } + } +#endif + + +#endif /* not internal build */ + +#endif /* NUMPY_CORE_INCLUDE_NUMPY_NPY_2_COMPAT_H_ */ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/npy_2_complexcompat.h b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/npy_2_complexcompat.h new file mode 100644 index 0000000..0b50901 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/npy_2_complexcompat.h @@ -0,0 +1,28 @@ +/* This header is designed to be copy-pasted into downstream packages, since it provides + a compatibility layer between the old C struct complex types and the new native C99 + complex types. The new macros are in numpy/npy_math.h, which is why it is included here. */ +#ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_2_COMPLEXCOMPAT_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_NPY_2_COMPLEXCOMPAT_H_ + +#include + +#ifndef NPY_CSETREALF +#define NPY_CSETREALF(c, r) (c)->real = (r) +#endif +#ifndef NPY_CSETIMAGF +#define NPY_CSETIMAGF(c, i) (c)->imag = (i) +#endif +#ifndef NPY_CSETREAL +#define NPY_CSETREAL(c, r) (c)->real = (r) +#endif +#ifndef NPY_CSETIMAG +#define NPY_CSETIMAG(c, i) (c)->imag = (i) +#endif +#ifndef NPY_CSETREALL +#define NPY_CSETREALL(c, r) (c)->real = (r) +#endif +#ifndef NPY_CSETIMAGL +#define NPY_CSETIMAGL(c, i) (c)->imag = (i) +#endif + +#endif diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/npy_3kcompat.h b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/npy_3kcompat.h new file mode 100644 index 0000000..c2bf74f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/npy_3kcompat.h @@ -0,0 +1,374 @@ +/* + * This is a convenience header file providing compatibility utilities + * for supporting different minor versions of Python 3. + * It was originally used to support the transition from Python 2, + * hence the "3k" naming. + * + * If you want to use this for your own projects, it's recommended to make a + * copy of it. We don't provide backwards compatibility guarantees. + */ + +#ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_3KCOMPAT_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_NPY_3KCOMPAT_H_ + +#include +#include + +#include "npy_common.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* Python13 removes _PyLong_AsInt */ +static inline int +Npy__PyLong_AsInt(PyObject *obj) +{ + int overflow; + long result = PyLong_AsLongAndOverflow(obj, &overflow); + + /* INT_MAX and INT_MIN are defined in Python.h */ + if (overflow || result > INT_MAX || result < INT_MIN) { + /* XXX: could be cute and give a different + message for overflow == -1 */ + PyErr_SetString(PyExc_OverflowError, + "Python int too large to convert to C int"); + return -1; + } + return (int)result; +} + +#if defined _MSC_VER && _MSC_VER >= 1900 + +#include + +/* + * Macros to protect CRT calls against instant termination when passed an + * invalid parameter (https://bugs.python.org/issue23524). + */ +extern _invalid_parameter_handler _Py_silent_invalid_parameter_handler; +#define NPY_BEGIN_SUPPRESS_IPH { _invalid_parameter_handler _Py_old_handler = \ + _set_thread_local_invalid_parameter_handler(_Py_silent_invalid_parameter_handler); +#define NPY_END_SUPPRESS_IPH _set_thread_local_invalid_parameter_handler(_Py_old_handler); } + +#else + +#define NPY_BEGIN_SUPPRESS_IPH +#define NPY_END_SUPPRESS_IPH + +#endif /* _MSC_VER >= 1900 */ + +/* + * PyFile_* compatibility + */ + +/* + * Get a FILE* handle to the file represented by the Python object + */ +static inline FILE* +npy_PyFile_Dup2(PyObject *file, char *mode, npy_off_t *orig_pos) +{ + int fd, fd2, unbuf; + Py_ssize_t fd2_tmp; + PyObject *ret, *os, *io, *io_raw; + npy_off_t pos; + FILE *handle; + + /* Flush first to ensure things end up in the file in the correct order */ + ret = PyObject_CallMethod(file, "flush", ""); + if (ret == NULL) { + return NULL; + } + Py_DECREF(ret); + fd = PyObject_AsFileDescriptor(file); + if (fd == -1) { + return NULL; + } + + /* + * The handle needs to be dup'd because we have to call fclose + * at the end + */ + os = PyImport_ImportModule("os"); + if (os == NULL) { + return NULL; + } + ret = PyObject_CallMethod(os, "dup", "i", fd); + Py_DECREF(os); + if (ret == NULL) { + return NULL; + } + fd2_tmp = PyNumber_AsSsize_t(ret, PyExc_IOError); + Py_DECREF(ret); + if (fd2_tmp == -1 && PyErr_Occurred()) { + return NULL; + } + if (fd2_tmp < INT_MIN || fd2_tmp > INT_MAX) { + PyErr_SetString(PyExc_IOError, + "Getting an 'int' from os.dup() failed"); + return NULL; + } + fd2 = (int)fd2_tmp; + + /* Convert to FILE* handle */ +#ifdef _WIN32 + NPY_BEGIN_SUPPRESS_IPH + handle = _fdopen(fd2, mode); + NPY_END_SUPPRESS_IPH +#else + handle = fdopen(fd2, mode); +#endif + if (handle == NULL) { + PyErr_SetString(PyExc_IOError, + "Getting a FILE* from a Python file object via " + "_fdopen failed. If you built NumPy, you probably " + "linked with the wrong debug/release runtime"); + return NULL; + } + + /* Record the original raw file handle position */ + *orig_pos = npy_ftell(handle); + if (*orig_pos == -1) { + /* The io module is needed to determine if buffering is used */ + io = PyImport_ImportModule("io"); + if (io == NULL) { + fclose(handle); + return NULL; + } + /* File object instances of RawIOBase are unbuffered */ + io_raw = PyObject_GetAttrString(io, "RawIOBase"); + Py_DECREF(io); + if (io_raw == NULL) { + fclose(handle); + return NULL; + } + unbuf = PyObject_IsInstance(file, io_raw); + Py_DECREF(io_raw); + if (unbuf == 1) { + /* Succeed if the IO is unbuffered */ + return handle; + } + else { + PyErr_SetString(PyExc_IOError, "obtaining file position failed"); + fclose(handle); + return NULL; + } + } + + /* Seek raw handle to the Python-side position */ + ret = PyObject_CallMethod(file, "tell", ""); + if (ret == NULL) { + fclose(handle); + return NULL; + } + pos = PyLong_AsLongLong(ret); + Py_DECREF(ret); + if (PyErr_Occurred()) { + fclose(handle); + return NULL; + } + if (npy_fseek(handle, pos, SEEK_SET) == -1) { + PyErr_SetString(PyExc_IOError, "seeking file failed"); + fclose(handle); + return NULL; + } + return handle; +} + +/* + * Close the dup-ed file handle, and seek the Python one to the current position + */ +static inline int +npy_PyFile_DupClose2(PyObject *file, FILE* handle, npy_off_t orig_pos) +{ + int fd, unbuf; + PyObject *ret, *io, *io_raw; + npy_off_t position; + + position = npy_ftell(handle); + + /* Close the FILE* handle */ + fclose(handle); + + /* + * Restore original file handle position, in order to not confuse + * Python-side data structures + */ + fd = PyObject_AsFileDescriptor(file); + if (fd == -1) { + return -1; + } + + if (npy_lseek(fd, orig_pos, SEEK_SET) == -1) { + + /* The io module is needed to determine if buffering is used */ + io = PyImport_ImportModule("io"); + if (io == NULL) { + return -1; + } + /* File object instances of RawIOBase are unbuffered */ + io_raw = PyObject_GetAttrString(io, "RawIOBase"); + Py_DECREF(io); + if (io_raw == NULL) { + return -1; + } + unbuf = PyObject_IsInstance(file, io_raw); + Py_DECREF(io_raw); + if (unbuf == 1) { + /* Succeed if the IO is unbuffered */ + return 0; + } + else { + PyErr_SetString(PyExc_IOError, "seeking file failed"); + return -1; + } + } + + if (position == -1) { + PyErr_SetString(PyExc_IOError, "obtaining file position failed"); + return -1; + } + + /* Seek Python-side handle to the FILE* handle position */ + ret = PyObject_CallMethod(file, "seek", NPY_OFF_T_PYFMT "i", position, 0); + if (ret == NULL) { + return -1; + } + Py_DECREF(ret); + return 0; +} + +static inline PyObject* +npy_PyFile_OpenFile(PyObject *filename, const char *mode) +{ + PyObject *open; + open = PyDict_GetItemString(PyEval_GetBuiltins(), "open"); + if (open == NULL) { + return NULL; + } + return PyObject_CallFunction(open, "Os", filename, mode); +} + +static inline int +npy_PyFile_CloseFile(PyObject *file) +{ + PyObject *ret; + + ret = PyObject_CallMethod(file, "close", NULL); + if (ret == NULL) { + return -1; + } + Py_DECREF(ret); + return 0; +} + +/* This is a copy of _PyErr_ChainExceptions, which + * is no longer exported from Python3.12 + */ +static inline void +npy_PyErr_ChainExceptions(PyObject *exc, PyObject *val, PyObject *tb) +{ + if (exc == NULL) + return; + + if (PyErr_Occurred()) { + PyObject *exc2, *val2, *tb2; + PyErr_Fetch(&exc2, &val2, &tb2); + PyErr_NormalizeException(&exc, &val, &tb); + if (tb != NULL) { + PyException_SetTraceback(val, tb); + Py_DECREF(tb); + } + Py_DECREF(exc); + PyErr_NormalizeException(&exc2, &val2, &tb2); + PyException_SetContext(val2, val); + PyErr_Restore(exc2, val2, tb2); + } + else { + PyErr_Restore(exc, val, tb); + } +} + +/* This is a copy of _PyErr_ChainExceptions, with: + * __cause__ used instead of __context__ + */ +static inline void +npy_PyErr_ChainExceptionsCause(PyObject *exc, PyObject *val, PyObject *tb) +{ + if (exc == NULL) + return; + + if (PyErr_Occurred()) { + PyObject *exc2, *val2, *tb2; + PyErr_Fetch(&exc2, &val2, &tb2); + PyErr_NormalizeException(&exc, &val, &tb); + if (tb != NULL) { + PyException_SetTraceback(val, tb); + Py_DECREF(tb); + } + Py_DECREF(exc); + PyErr_NormalizeException(&exc2, &val2, &tb2); + PyException_SetCause(val2, val); + PyErr_Restore(exc2, val2, tb2); + } + else { + PyErr_Restore(exc, val, tb); + } +} + +/* + * PyCObject functions adapted to PyCapsules. + * + * The main job here is to get rid of the improved error handling + * of PyCapsules. It's a shame... + */ +static inline PyObject * +NpyCapsule_FromVoidPtr(void *ptr, void (*dtor)(PyObject *)) +{ + PyObject *ret = PyCapsule_New(ptr, NULL, dtor); + if (ret == NULL) { + PyErr_Clear(); + } + return ret; +} + +static inline PyObject * +NpyCapsule_FromVoidPtrAndDesc(void *ptr, void* context, void (*dtor)(PyObject *)) +{ + PyObject *ret = NpyCapsule_FromVoidPtr(ptr, dtor); + if (ret != NULL && PyCapsule_SetContext(ret, context) != 0) { + PyErr_Clear(); + Py_DECREF(ret); + ret = NULL; + } + return ret; +} + +static inline void * +NpyCapsule_AsVoidPtr(PyObject *obj) +{ + void *ret = PyCapsule_GetPointer(obj, NULL); + if (ret == NULL) { + PyErr_Clear(); + } + return ret; +} + +static inline void * +NpyCapsule_GetDesc(PyObject *obj) +{ + return PyCapsule_GetContext(obj); +} + +static inline int +NpyCapsule_Check(PyObject *ptr) +{ + return PyCapsule_CheckExact(ptr); +} + +#ifdef __cplusplus +} +#endif + + +#endif /* NUMPY_CORE_INCLUDE_NUMPY_NPY_3KCOMPAT_H_ */ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/npy_common.h b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/npy_common.h new file mode 100644 index 0000000..e2556a0 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/npy_common.h @@ -0,0 +1,977 @@ +#ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_COMMON_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_NPY_COMMON_H_ + +/* need Python.h for npy_intp, npy_uintp */ +#include + +/* numpconfig.h is auto-generated */ +#include "numpyconfig.h" +#ifdef HAVE_NPY_CONFIG_H +#include +#endif + +/* + * using static inline modifiers when defining npy_math functions + * allows the compiler to make optimizations when possible + */ +#ifndef NPY_INLINE_MATH +#if defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD + #define NPY_INLINE_MATH 1 +#else + #define NPY_INLINE_MATH 0 +#endif +#endif + +/* + * gcc does not unroll even with -O3 + * use with care, unrolling on modern cpus rarely speeds things up + */ +#ifdef HAVE_ATTRIBUTE_OPTIMIZE_UNROLL_LOOPS +#define NPY_GCC_UNROLL_LOOPS \ + __attribute__((optimize("unroll-loops"))) +#else +#define NPY_GCC_UNROLL_LOOPS +#endif + +/* highest gcc optimization level, enabled autovectorizer */ +#ifdef HAVE_ATTRIBUTE_OPTIMIZE_OPT_3 +#define NPY_GCC_OPT_3 __attribute__((optimize("O3"))) +#else +#define NPY_GCC_OPT_3 +#endif + +/* + * mark an argument (starting from 1) that must not be NULL and is not checked + * DO NOT USE IF FUNCTION CHECKS FOR NULL!! the compiler will remove the check + */ +#ifdef HAVE_ATTRIBUTE_NONNULL +#define NPY_GCC_NONNULL(n) __attribute__((nonnull(n))) +#else +#define NPY_GCC_NONNULL(n) +#endif + +/* + * give a hint to the compiler which branch is more likely or unlikely + * to occur, e.g. rare error cases: + * + * if (NPY_UNLIKELY(failure == 0)) + * return NULL; + * + * the double !! is to cast the expression (e.g. NULL) to a boolean required by + * the intrinsic + */ +#ifdef HAVE___BUILTIN_EXPECT +#define NPY_LIKELY(x) __builtin_expect(!!(x), 1) +#define NPY_UNLIKELY(x) __builtin_expect(!!(x), 0) +#else +#define NPY_LIKELY(x) (x) +#define NPY_UNLIKELY(x) (x) +#endif + +#ifdef HAVE___BUILTIN_PREFETCH +/* unlike _mm_prefetch also works on non-x86 */ +#define NPY_PREFETCH(x, rw, loc) __builtin_prefetch((x), (rw), (loc)) +#else +#ifdef NPY_HAVE_SSE +/* _MM_HINT_ET[01] (rw = 1) unsupported, only available in gcc >= 4.9 */ +#define NPY_PREFETCH(x, rw, loc) _mm_prefetch((x), loc == 0 ? _MM_HINT_NTA : \ + (loc == 1 ? _MM_HINT_T2 : \ + (loc == 2 ? _MM_HINT_T1 : \ + (loc == 3 ? _MM_HINT_T0 : -1)))) +#else +#define NPY_PREFETCH(x, rw,loc) +#endif +#endif + +/* `NPY_INLINE` kept for backwards compatibility; use `inline` instead */ +#if defined(_MSC_VER) && !defined(__clang__) + #define NPY_INLINE __inline +/* clang included here to handle clang-cl on Windows */ +#elif defined(__GNUC__) || defined(__clang__) + #if defined(__STRICT_ANSI__) + #define NPY_INLINE __inline__ + #else + #define NPY_INLINE inline + #endif +#else + #define NPY_INLINE +#endif + +#ifdef _MSC_VER + #define NPY_FINLINE static __forceinline +#elif defined(__GNUC__) + #define NPY_FINLINE static inline __attribute__((always_inline)) +#else + #define NPY_FINLINE static +#endif + +#if defined(_MSC_VER) + #define NPY_NOINLINE static __declspec(noinline) +#elif defined(__GNUC__) || defined(__clang__) + #define NPY_NOINLINE static __attribute__((noinline)) +#else + #define NPY_NOINLINE static +#endif + +#ifdef __cplusplus + #define NPY_TLS thread_local +#elif defined(HAVE_THREAD_LOCAL) + #define NPY_TLS thread_local +#elif defined(HAVE__THREAD_LOCAL) + #define NPY_TLS _Thread_local +#elif defined(HAVE___THREAD) + #define NPY_TLS __thread +#elif defined(HAVE___DECLSPEC_THREAD_) + #define NPY_TLS __declspec(thread) +#else + #define NPY_TLS +#endif + +#ifdef WITH_CPYCHECKER_RETURNS_BORROWED_REF_ATTRIBUTE + #define NPY_RETURNS_BORROWED_REF \ + __attribute__((cpychecker_returns_borrowed_ref)) +#else + #define NPY_RETURNS_BORROWED_REF +#endif + +#ifdef WITH_CPYCHECKER_STEALS_REFERENCE_TO_ARG_ATTRIBUTE + #define NPY_STEALS_REF_TO_ARG(n) \ + __attribute__((cpychecker_steals_reference_to_arg(n))) +#else + #define NPY_STEALS_REF_TO_ARG(n) +#endif + +/* 64 bit file position support, also on win-amd64. Issue gh-2256 */ +#if defined(_MSC_VER) && defined(_WIN64) && (_MSC_VER > 1400) || \ + defined(__MINGW32__) || defined(__MINGW64__) + #include + + #define npy_fseek _fseeki64 + #define npy_ftell _ftelli64 + #define npy_lseek _lseeki64 + #define npy_off_t npy_int64 + + #if NPY_SIZEOF_INT == 8 + #define NPY_OFF_T_PYFMT "i" + #elif NPY_SIZEOF_LONG == 8 + #define NPY_OFF_T_PYFMT "l" + #elif NPY_SIZEOF_LONGLONG == 8 + #define NPY_OFF_T_PYFMT "L" + #else + #error Unsupported size for type off_t + #endif +#else +#ifdef HAVE_FSEEKO + #define npy_fseek fseeko +#else + #define npy_fseek fseek +#endif +#ifdef HAVE_FTELLO + #define npy_ftell ftello +#else + #define npy_ftell ftell +#endif + #include + #ifndef _WIN32 + #include + #endif + #define npy_lseek lseek + #define npy_off_t off_t + + #if NPY_SIZEOF_OFF_T == NPY_SIZEOF_SHORT + #define NPY_OFF_T_PYFMT "h" + #elif NPY_SIZEOF_OFF_T == NPY_SIZEOF_INT + #define NPY_OFF_T_PYFMT "i" + #elif NPY_SIZEOF_OFF_T == NPY_SIZEOF_LONG + #define NPY_OFF_T_PYFMT "l" + #elif NPY_SIZEOF_OFF_T == NPY_SIZEOF_LONGLONG + #define NPY_OFF_T_PYFMT "L" + #else + #error Unsupported size for type off_t + #endif +#endif + +/* enums for detected endianness */ +enum { + NPY_CPU_UNKNOWN_ENDIAN, + NPY_CPU_LITTLE, + NPY_CPU_BIG +}; + +/* + * This is to typedef npy_intp to the appropriate size for Py_ssize_t. + * (Before NumPy 2.0 we used Py_intptr_t and Py_uintptr_t from `pyport.h`.) + */ +typedef Py_ssize_t npy_intp; +typedef size_t npy_uintp; + +/* + * Define sizes that were not defined in numpyconfig.h. + */ +#define NPY_SIZEOF_CHAR 1 +#define NPY_SIZEOF_BYTE 1 +#define NPY_SIZEOF_DATETIME 8 +#define NPY_SIZEOF_TIMEDELTA 8 +#define NPY_SIZEOF_HALF 2 +#define NPY_SIZEOF_CFLOAT NPY_SIZEOF_COMPLEX_FLOAT +#define NPY_SIZEOF_CDOUBLE NPY_SIZEOF_COMPLEX_DOUBLE +#define NPY_SIZEOF_CLONGDOUBLE NPY_SIZEOF_COMPLEX_LONGDOUBLE + +#ifdef constchar +#undef constchar +#endif + +#define NPY_SSIZE_T_PYFMT "n" +#define constchar char + +/* NPY_INTP_FMT Note: + * Unlike the other NPY_*_FMT macros, which are used with PyOS_snprintf, + * NPY_INTP_FMT is used with PyErr_Format and PyUnicode_FromFormat. Those + * functions use different formatting codes that are portably specified + * according to the Python documentation. See issue gh-2388. + */ +#if NPY_SIZEOF_INTP == NPY_SIZEOF_LONG + #define NPY_INTP NPY_LONG + #define NPY_UINTP NPY_ULONG + #define PyIntpArrType_Type PyLongArrType_Type + #define PyUIntpArrType_Type PyULongArrType_Type + #define NPY_MAX_INTP NPY_MAX_LONG + #define NPY_MIN_INTP NPY_MIN_LONG + #define NPY_MAX_UINTP NPY_MAX_ULONG + #define NPY_INTP_FMT "ld" +#elif NPY_SIZEOF_INTP == NPY_SIZEOF_INT + #define NPY_INTP NPY_INT + #define NPY_UINTP NPY_UINT + #define PyIntpArrType_Type PyIntArrType_Type + #define PyUIntpArrType_Type PyUIntArrType_Type + #define NPY_MAX_INTP NPY_MAX_INT + #define NPY_MIN_INTP NPY_MIN_INT + #define NPY_MAX_UINTP NPY_MAX_UINT + #define NPY_INTP_FMT "d" +#elif defined(PY_LONG_LONG) && (NPY_SIZEOF_INTP == NPY_SIZEOF_LONGLONG) + #define NPY_INTP NPY_LONGLONG + #define NPY_UINTP NPY_ULONGLONG + #define PyIntpArrType_Type PyLongLongArrType_Type + #define PyUIntpArrType_Type PyULongLongArrType_Type + #define NPY_MAX_INTP NPY_MAX_LONGLONG + #define NPY_MIN_INTP NPY_MIN_LONGLONG + #define NPY_MAX_UINTP NPY_MAX_ULONGLONG + #define NPY_INTP_FMT "lld" +#else + #error "Failed to correctly define NPY_INTP and NPY_UINTP" +#endif + + +/* + * Some platforms don't define bool, long long, or long double. + * Handle that here. + */ +#define NPY_BYTE_FMT "hhd" +#define NPY_UBYTE_FMT "hhu" +#define NPY_SHORT_FMT "hd" +#define NPY_USHORT_FMT "hu" +#define NPY_INT_FMT "d" +#define NPY_UINT_FMT "u" +#define NPY_LONG_FMT "ld" +#define NPY_ULONG_FMT "lu" +#define NPY_HALF_FMT "g" +#define NPY_FLOAT_FMT "g" +#define NPY_DOUBLE_FMT "g" + + +#ifdef PY_LONG_LONG +typedef PY_LONG_LONG npy_longlong; +typedef unsigned PY_LONG_LONG npy_ulonglong; +# ifdef _MSC_VER +# define NPY_LONGLONG_FMT "I64d" +# define NPY_ULONGLONG_FMT "I64u" +# else +# define NPY_LONGLONG_FMT "lld" +# define NPY_ULONGLONG_FMT "llu" +# endif +# ifdef _MSC_VER +# define NPY_LONGLONG_SUFFIX(x) (x##i64) +# define NPY_ULONGLONG_SUFFIX(x) (x##Ui64) +# else +# define NPY_LONGLONG_SUFFIX(x) (x##LL) +# define NPY_ULONGLONG_SUFFIX(x) (x##ULL) +# endif +#else +typedef long npy_longlong; +typedef unsigned long npy_ulonglong; +# define NPY_LONGLONG_SUFFIX(x) (x##L) +# define NPY_ULONGLONG_SUFFIX(x) (x##UL) +#endif + + +typedef unsigned char npy_bool; +#define NPY_FALSE 0 +#define NPY_TRUE 1 +/* + * `NPY_SIZEOF_LONGDOUBLE` isn't usually equal to sizeof(long double). + * In some certain cases, it may forced to be equal to sizeof(double) + * even against the compiler implementation and the same goes for + * `complex long double`. + * + * Therefore, avoid `long double`, use `npy_longdouble` instead, + * and when it comes to standard math functions make sure of using + * the double version when `NPY_SIZEOF_LONGDOUBLE` == `NPY_SIZEOF_DOUBLE`. + * For example: + * npy_longdouble *ptr, x; + * #if NPY_SIZEOF_LONGDOUBLE == NPY_SIZEOF_DOUBLE + * npy_longdouble r = modf(x, ptr); + * #else + * npy_longdouble r = modfl(x, ptr); + * #endif + * + * See https://github.com/numpy/numpy/issues/20348 + */ +#if NPY_SIZEOF_LONGDOUBLE == NPY_SIZEOF_DOUBLE + #define NPY_LONGDOUBLE_FMT "g" + #define longdouble_t double + typedef double npy_longdouble; +#else + #define NPY_LONGDOUBLE_FMT "Lg" + #define longdouble_t long double + typedef long double npy_longdouble; +#endif + +#ifndef Py_USING_UNICODE +#error Must use Python with unicode enabled. +#endif + + +typedef signed char npy_byte; +typedef unsigned char npy_ubyte; +typedef unsigned short npy_ushort; +typedef unsigned int npy_uint; +typedef unsigned long npy_ulong; + +/* These are for completeness */ +typedef char npy_char; +typedef short npy_short; +typedef int npy_int; +typedef long npy_long; +typedef float npy_float; +typedef double npy_double; + +typedef Py_hash_t npy_hash_t; +#define NPY_SIZEOF_HASH_T NPY_SIZEOF_INTP + +#if defined(__cplusplus) + +typedef struct +{ + double _Val[2]; +} npy_cdouble; + +typedef struct +{ + float _Val[2]; +} npy_cfloat; + +typedef struct +{ + long double _Val[2]; +} npy_clongdouble; + +#else + +#include + + +#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) +typedef _Dcomplex npy_cdouble; +typedef _Fcomplex npy_cfloat; +typedef _Lcomplex npy_clongdouble; +#else /* !defined(_MSC_VER) || defined(__INTEL_COMPILER) */ +typedef double _Complex npy_cdouble; +typedef float _Complex npy_cfloat; +typedef longdouble_t _Complex npy_clongdouble; +#endif + +#endif + +/* + * numarray-style bit-width typedefs + */ +#define NPY_MAX_INT8 127 +#define NPY_MIN_INT8 -128 +#define NPY_MAX_UINT8 255 +#define NPY_MAX_INT16 32767 +#define NPY_MIN_INT16 -32768 +#define NPY_MAX_UINT16 65535 +#define NPY_MAX_INT32 2147483647 +#define NPY_MIN_INT32 (-NPY_MAX_INT32 - 1) +#define NPY_MAX_UINT32 4294967295U +#define NPY_MAX_INT64 NPY_LONGLONG_SUFFIX(9223372036854775807) +#define NPY_MIN_INT64 (-NPY_MAX_INT64 - NPY_LONGLONG_SUFFIX(1)) +#define NPY_MAX_UINT64 NPY_ULONGLONG_SUFFIX(18446744073709551615) +#define NPY_MAX_INT128 NPY_LONGLONG_SUFFIX(85070591730234615865843651857942052864) +#define NPY_MIN_INT128 (-NPY_MAX_INT128 - NPY_LONGLONG_SUFFIX(1)) +#define NPY_MAX_UINT128 NPY_ULONGLONG_SUFFIX(170141183460469231731687303715884105728) +#define NPY_MIN_DATETIME NPY_MIN_INT64 +#define NPY_MAX_DATETIME NPY_MAX_INT64 +#define NPY_MIN_TIMEDELTA NPY_MIN_INT64 +#define NPY_MAX_TIMEDELTA NPY_MAX_INT64 + + /* Need to find the number of bits for each type and + make definitions accordingly. + + C states that sizeof(char) == 1 by definition + + So, just using the sizeof keyword won't help. + + It also looks like Python itself uses sizeof(char) quite a + bit, which by definition should be 1 all the time. + + Idea: Make Use of CHAR_BIT which should tell us how many + BITS per CHARACTER + */ + + /* Include platform definitions -- These are in the C89/90 standard */ +#include +#define NPY_MAX_BYTE SCHAR_MAX +#define NPY_MIN_BYTE SCHAR_MIN +#define NPY_MAX_UBYTE UCHAR_MAX +#define NPY_MAX_SHORT SHRT_MAX +#define NPY_MIN_SHORT SHRT_MIN +#define NPY_MAX_USHORT USHRT_MAX +#define NPY_MAX_INT INT_MAX +#ifndef INT_MIN +#define INT_MIN (-INT_MAX - 1) +#endif +#define NPY_MIN_INT INT_MIN +#define NPY_MAX_UINT UINT_MAX +#define NPY_MAX_LONG LONG_MAX +#define NPY_MIN_LONG LONG_MIN +#define NPY_MAX_ULONG ULONG_MAX + +#define NPY_BITSOF_BOOL (sizeof(npy_bool) * CHAR_BIT) +#define NPY_BITSOF_CHAR CHAR_BIT +#define NPY_BITSOF_BYTE (NPY_SIZEOF_BYTE * CHAR_BIT) +#define NPY_BITSOF_SHORT (NPY_SIZEOF_SHORT * CHAR_BIT) +#define NPY_BITSOF_INT (NPY_SIZEOF_INT * CHAR_BIT) +#define NPY_BITSOF_LONG (NPY_SIZEOF_LONG * CHAR_BIT) +#define NPY_BITSOF_LONGLONG (NPY_SIZEOF_LONGLONG * CHAR_BIT) +#define NPY_BITSOF_INTP (NPY_SIZEOF_INTP * CHAR_BIT) +#define NPY_BITSOF_HALF (NPY_SIZEOF_HALF * CHAR_BIT) +#define NPY_BITSOF_FLOAT (NPY_SIZEOF_FLOAT * CHAR_BIT) +#define NPY_BITSOF_DOUBLE (NPY_SIZEOF_DOUBLE * CHAR_BIT) +#define NPY_BITSOF_LONGDOUBLE (NPY_SIZEOF_LONGDOUBLE * CHAR_BIT) +#define NPY_BITSOF_CFLOAT (NPY_SIZEOF_CFLOAT * CHAR_BIT) +#define NPY_BITSOF_CDOUBLE (NPY_SIZEOF_CDOUBLE * CHAR_BIT) +#define NPY_BITSOF_CLONGDOUBLE (NPY_SIZEOF_CLONGDOUBLE * CHAR_BIT) +#define NPY_BITSOF_DATETIME (NPY_SIZEOF_DATETIME * CHAR_BIT) +#define NPY_BITSOF_TIMEDELTA (NPY_SIZEOF_TIMEDELTA * CHAR_BIT) + +#if NPY_BITSOF_LONG == 8 +#define NPY_INT8 NPY_LONG +#define NPY_UINT8 NPY_ULONG + typedef long npy_int8; + typedef unsigned long npy_uint8; +#define PyInt8ScalarObject PyLongScalarObject +#define PyInt8ArrType_Type PyLongArrType_Type +#define PyUInt8ScalarObject PyULongScalarObject +#define PyUInt8ArrType_Type PyULongArrType_Type +#define NPY_INT8_FMT NPY_LONG_FMT +#define NPY_UINT8_FMT NPY_ULONG_FMT +#elif NPY_BITSOF_LONG == 16 +#define NPY_INT16 NPY_LONG +#define NPY_UINT16 NPY_ULONG + typedef long npy_int16; + typedef unsigned long npy_uint16; +#define PyInt16ScalarObject PyLongScalarObject +#define PyInt16ArrType_Type PyLongArrType_Type +#define PyUInt16ScalarObject PyULongScalarObject +#define PyUInt16ArrType_Type PyULongArrType_Type +#define NPY_INT16_FMT NPY_LONG_FMT +#define NPY_UINT16_FMT NPY_ULONG_FMT +#elif NPY_BITSOF_LONG == 32 +#define NPY_INT32 NPY_LONG +#define NPY_UINT32 NPY_ULONG + typedef long npy_int32; + typedef unsigned long npy_uint32; + typedef unsigned long npy_ucs4; +#define PyInt32ScalarObject PyLongScalarObject +#define PyInt32ArrType_Type PyLongArrType_Type +#define PyUInt32ScalarObject PyULongScalarObject +#define PyUInt32ArrType_Type PyULongArrType_Type +#define NPY_INT32_FMT NPY_LONG_FMT +#define NPY_UINT32_FMT NPY_ULONG_FMT +#elif NPY_BITSOF_LONG == 64 +#define NPY_INT64 NPY_LONG +#define NPY_UINT64 NPY_ULONG + typedef long npy_int64; + typedef unsigned long npy_uint64; +#define PyInt64ScalarObject PyLongScalarObject +#define PyInt64ArrType_Type PyLongArrType_Type +#define PyUInt64ScalarObject PyULongScalarObject +#define PyUInt64ArrType_Type PyULongArrType_Type +#define NPY_INT64_FMT NPY_LONG_FMT +#define NPY_UINT64_FMT NPY_ULONG_FMT +#define MyPyLong_FromInt64 PyLong_FromLong +#define MyPyLong_AsInt64 PyLong_AsLong +#endif + +#if NPY_BITSOF_LONGLONG == 8 +# ifndef NPY_INT8 +# define NPY_INT8 NPY_LONGLONG +# define NPY_UINT8 NPY_ULONGLONG + typedef npy_longlong npy_int8; + typedef npy_ulonglong npy_uint8; +# define PyInt8ScalarObject PyLongLongScalarObject +# define PyInt8ArrType_Type PyLongLongArrType_Type +# define PyUInt8ScalarObject PyULongLongScalarObject +# define PyUInt8ArrType_Type PyULongLongArrType_Type +#define NPY_INT8_FMT NPY_LONGLONG_FMT +#define NPY_UINT8_FMT NPY_ULONGLONG_FMT +# endif +# define NPY_MAX_LONGLONG NPY_MAX_INT8 +# define NPY_MIN_LONGLONG NPY_MIN_INT8 +# define NPY_MAX_ULONGLONG NPY_MAX_UINT8 +#elif NPY_BITSOF_LONGLONG == 16 +# ifndef NPY_INT16 +# define NPY_INT16 NPY_LONGLONG +# define NPY_UINT16 NPY_ULONGLONG + typedef npy_longlong npy_int16; + typedef npy_ulonglong npy_uint16; +# define PyInt16ScalarObject PyLongLongScalarObject +# define PyInt16ArrType_Type PyLongLongArrType_Type +# define PyUInt16ScalarObject PyULongLongScalarObject +# define PyUInt16ArrType_Type PyULongLongArrType_Type +#define NPY_INT16_FMT NPY_LONGLONG_FMT +#define NPY_UINT16_FMT NPY_ULONGLONG_FMT +# endif +# define NPY_MAX_LONGLONG NPY_MAX_INT16 +# define NPY_MIN_LONGLONG NPY_MIN_INT16 +# define NPY_MAX_ULONGLONG NPY_MAX_UINT16 +#elif NPY_BITSOF_LONGLONG == 32 +# ifndef NPY_INT32 +# define NPY_INT32 NPY_LONGLONG +# define NPY_UINT32 NPY_ULONGLONG + typedef npy_longlong npy_int32; + typedef npy_ulonglong npy_uint32; + typedef npy_ulonglong npy_ucs4; +# define PyInt32ScalarObject PyLongLongScalarObject +# define PyInt32ArrType_Type PyLongLongArrType_Type +# define PyUInt32ScalarObject PyULongLongScalarObject +# define PyUInt32ArrType_Type PyULongLongArrType_Type +#define NPY_INT32_FMT NPY_LONGLONG_FMT +#define NPY_UINT32_FMT NPY_ULONGLONG_FMT +# endif +# define NPY_MAX_LONGLONG NPY_MAX_INT32 +# define NPY_MIN_LONGLONG NPY_MIN_INT32 +# define NPY_MAX_ULONGLONG NPY_MAX_UINT32 +#elif NPY_BITSOF_LONGLONG == 64 +# ifndef NPY_INT64 +# define NPY_INT64 NPY_LONGLONG +# define NPY_UINT64 NPY_ULONGLONG + typedef npy_longlong npy_int64; + typedef npy_ulonglong npy_uint64; +# define PyInt64ScalarObject PyLongLongScalarObject +# define PyInt64ArrType_Type PyLongLongArrType_Type +# define PyUInt64ScalarObject PyULongLongScalarObject +# define PyUInt64ArrType_Type PyULongLongArrType_Type +#define NPY_INT64_FMT NPY_LONGLONG_FMT +#define NPY_UINT64_FMT NPY_ULONGLONG_FMT +# define MyPyLong_FromInt64 PyLong_FromLongLong +# define MyPyLong_AsInt64 PyLong_AsLongLong +# endif +# define NPY_MAX_LONGLONG NPY_MAX_INT64 +# define NPY_MIN_LONGLONG NPY_MIN_INT64 +# define NPY_MAX_ULONGLONG NPY_MAX_UINT64 +#endif + +#if NPY_BITSOF_INT == 8 +#ifndef NPY_INT8 +#define NPY_INT8 NPY_INT +#define NPY_UINT8 NPY_UINT + typedef int npy_int8; + typedef unsigned int npy_uint8; +# define PyInt8ScalarObject PyIntScalarObject +# define PyInt8ArrType_Type PyIntArrType_Type +# define PyUInt8ScalarObject PyUIntScalarObject +# define PyUInt8ArrType_Type PyUIntArrType_Type +#define NPY_INT8_FMT NPY_INT_FMT +#define NPY_UINT8_FMT NPY_UINT_FMT +#endif +#elif NPY_BITSOF_INT == 16 +#ifndef NPY_INT16 +#define NPY_INT16 NPY_INT +#define NPY_UINT16 NPY_UINT + typedef int npy_int16; + typedef unsigned int npy_uint16; +# define PyInt16ScalarObject PyIntScalarObject +# define PyInt16ArrType_Type PyIntArrType_Type +# define PyUInt16ScalarObject PyIntUScalarObject +# define PyUInt16ArrType_Type PyIntUArrType_Type +#define NPY_INT16_FMT NPY_INT_FMT +#define NPY_UINT16_FMT NPY_UINT_FMT +#endif +#elif NPY_BITSOF_INT == 32 +#ifndef NPY_INT32 +#define NPY_INT32 NPY_INT +#define NPY_UINT32 NPY_UINT + typedef int npy_int32; + typedef unsigned int npy_uint32; + typedef unsigned int npy_ucs4; +# define PyInt32ScalarObject PyIntScalarObject +# define PyInt32ArrType_Type PyIntArrType_Type +# define PyUInt32ScalarObject PyUIntScalarObject +# define PyUInt32ArrType_Type PyUIntArrType_Type +#define NPY_INT32_FMT NPY_INT_FMT +#define NPY_UINT32_FMT NPY_UINT_FMT +#endif +#elif NPY_BITSOF_INT == 64 +#ifndef NPY_INT64 +#define NPY_INT64 NPY_INT +#define NPY_UINT64 NPY_UINT + typedef int npy_int64; + typedef unsigned int npy_uint64; +# define PyInt64ScalarObject PyIntScalarObject +# define PyInt64ArrType_Type PyIntArrType_Type +# define PyUInt64ScalarObject PyUIntScalarObject +# define PyUInt64ArrType_Type PyUIntArrType_Type +#define NPY_INT64_FMT NPY_INT_FMT +#define NPY_UINT64_FMT NPY_UINT_FMT +# define MyPyLong_FromInt64 PyLong_FromLong +# define MyPyLong_AsInt64 PyLong_AsLong +#endif +#endif + +#if NPY_BITSOF_SHORT == 8 +#ifndef NPY_INT8 +#define NPY_INT8 NPY_SHORT +#define NPY_UINT8 NPY_USHORT + typedef short npy_int8; + typedef unsigned short npy_uint8; +# define PyInt8ScalarObject PyShortScalarObject +# define PyInt8ArrType_Type PyShortArrType_Type +# define PyUInt8ScalarObject PyUShortScalarObject +# define PyUInt8ArrType_Type PyUShortArrType_Type +#define NPY_INT8_FMT NPY_SHORT_FMT +#define NPY_UINT8_FMT NPY_USHORT_FMT +#endif +#elif NPY_BITSOF_SHORT == 16 +#ifndef NPY_INT16 +#define NPY_INT16 NPY_SHORT +#define NPY_UINT16 NPY_USHORT + typedef short npy_int16; + typedef unsigned short npy_uint16; +# define PyInt16ScalarObject PyShortScalarObject +# define PyInt16ArrType_Type PyShortArrType_Type +# define PyUInt16ScalarObject PyUShortScalarObject +# define PyUInt16ArrType_Type PyUShortArrType_Type +#define NPY_INT16_FMT NPY_SHORT_FMT +#define NPY_UINT16_FMT NPY_USHORT_FMT +#endif +#elif NPY_BITSOF_SHORT == 32 +#ifndef NPY_INT32 +#define NPY_INT32 NPY_SHORT +#define NPY_UINT32 NPY_USHORT + typedef short npy_int32; + typedef unsigned short npy_uint32; + typedef unsigned short npy_ucs4; +# define PyInt32ScalarObject PyShortScalarObject +# define PyInt32ArrType_Type PyShortArrType_Type +# define PyUInt32ScalarObject PyUShortScalarObject +# define PyUInt32ArrType_Type PyUShortArrType_Type +#define NPY_INT32_FMT NPY_SHORT_FMT +#define NPY_UINT32_FMT NPY_USHORT_FMT +#endif +#elif NPY_BITSOF_SHORT == 64 +#ifndef NPY_INT64 +#define NPY_INT64 NPY_SHORT +#define NPY_UINT64 NPY_USHORT + typedef short npy_int64; + typedef unsigned short npy_uint64; +# define PyInt64ScalarObject PyShortScalarObject +# define PyInt64ArrType_Type PyShortArrType_Type +# define PyUInt64ScalarObject PyUShortScalarObject +# define PyUInt64ArrType_Type PyUShortArrType_Type +#define NPY_INT64_FMT NPY_SHORT_FMT +#define NPY_UINT64_FMT NPY_USHORT_FMT +# define MyPyLong_FromInt64 PyLong_FromLong +# define MyPyLong_AsInt64 PyLong_AsLong +#endif +#endif + + +#if NPY_BITSOF_CHAR == 8 +#ifndef NPY_INT8 +#define NPY_INT8 NPY_BYTE +#define NPY_UINT8 NPY_UBYTE + typedef signed char npy_int8; + typedef unsigned char npy_uint8; +# define PyInt8ScalarObject PyByteScalarObject +# define PyInt8ArrType_Type PyByteArrType_Type +# define PyUInt8ScalarObject PyUByteScalarObject +# define PyUInt8ArrType_Type PyUByteArrType_Type +#define NPY_INT8_FMT NPY_BYTE_FMT +#define NPY_UINT8_FMT NPY_UBYTE_FMT +#endif +#elif NPY_BITSOF_CHAR == 16 +#ifndef NPY_INT16 +#define NPY_INT16 NPY_BYTE +#define NPY_UINT16 NPY_UBYTE + typedef signed char npy_int16; + typedef unsigned char npy_uint16; +# define PyInt16ScalarObject PyByteScalarObject +# define PyInt16ArrType_Type PyByteArrType_Type +# define PyUInt16ScalarObject PyUByteScalarObject +# define PyUInt16ArrType_Type PyUByteArrType_Type +#define NPY_INT16_FMT NPY_BYTE_FMT +#define NPY_UINT16_FMT NPY_UBYTE_FMT +#endif +#elif NPY_BITSOF_CHAR == 32 +#ifndef NPY_INT32 +#define NPY_INT32 NPY_BYTE +#define NPY_UINT32 NPY_UBYTE + typedef signed char npy_int32; + typedef unsigned char npy_uint32; + typedef unsigned char npy_ucs4; +# define PyInt32ScalarObject PyByteScalarObject +# define PyInt32ArrType_Type PyByteArrType_Type +# define PyUInt32ScalarObject PyUByteScalarObject +# define PyUInt32ArrType_Type PyUByteArrType_Type +#define NPY_INT32_FMT NPY_BYTE_FMT +#define NPY_UINT32_FMT NPY_UBYTE_FMT +#endif +#elif NPY_BITSOF_CHAR == 64 +#ifndef NPY_INT64 +#define NPY_INT64 NPY_BYTE +#define NPY_UINT64 NPY_UBYTE + typedef signed char npy_int64; + typedef unsigned char npy_uint64; +# define PyInt64ScalarObject PyByteScalarObject +# define PyInt64ArrType_Type PyByteArrType_Type +# define PyUInt64ScalarObject PyUByteScalarObject +# define PyUInt64ArrType_Type PyUByteArrType_Type +#define NPY_INT64_FMT NPY_BYTE_FMT +#define NPY_UINT64_FMT NPY_UBYTE_FMT +# define MyPyLong_FromInt64 PyLong_FromLong +# define MyPyLong_AsInt64 PyLong_AsLong +#endif +#elif NPY_BITSOF_CHAR == 128 +#endif + + + +#if NPY_BITSOF_DOUBLE == 32 +#ifndef NPY_FLOAT32 +#define NPY_FLOAT32 NPY_DOUBLE +#define NPY_COMPLEX64 NPY_CDOUBLE + typedef double npy_float32; + typedef npy_cdouble npy_complex64; +# define PyFloat32ScalarObject PyDoubleScalarObject +# define PyComplex64ScalarObject PyCDoubleScalarObject +# define PyFloat32ArrType_Type PyDoubleArrType_Type +# define PyComplex64ArrType_Type PyCDoubleArrType_Type +#define NPY_FLOAT32_FMT NPY_DOUBLE_FMT +#define NPY_COMPLEX64_FMT NPY_CDOUBLE_FMT +#endif +#elif NPY_BITSOF_DOUBLE == 64 +#ifndef NPY_FLOAT64 +#define NPY_FLOAT64 NPY_DOUBLE +#define NPY_COMPLEX128 NPY_CDOUBLE + typedef double npy_float64; + typedef npy_cdouble npy_complex128; +# define PyFloat64ScalarObject PyDoubleScalarObject +# define PyComplex128ScalarObject PyCDoubleScalarObject +# define PyFloat64ArrType_Type PyDoubleArrType_Type +# define PyComplex128ArrType_Type PyCDoubleArrType_Type +#define NPY_FLOAT64_FMT NPY_DOUBLE_FMT +#define NPY_COMPLEX128_FMT NPY_CDOUBLE_FMT +#endif +#elif NPY_BITSOF_DOUBLE == 80 +#ifndef NPY_FLOAT80 +#define NPY_FLOAT80 NPY_DOUBLE +#define NPY_COMPLEX160 NPY_CDOUBLE + typedef double npy_float80; + typedef npy_cdouble npy_complex160; +# define PyFloat80ScalarObject PyDoubleScalarObject +# define PyComplex160ScalarObject PyCDoubleScalarObject +# define PyFloat80ArrType_Type PyDoubleArrType_Type +# define PyComplex160ArrType_Type PyCDoubleArrType_Type +#define NPY_FLOAT80_FMT NPY_DOUBLE_FMT +#define NPY_COMPLEX160_FMT NPY_CDOUBLE_FMT +#endif +#elif NPY_BITSOF_DOUBLE == 96 +#ifndef NPY_FLOAT96 +#define NPY_FLOAT96 NPY_DOUBLE +#define NPY_COMPLEX192 NPY_CDOUBLE + typedef double npy_float96; + typedef npy_cdouble npy_complex192; +# define PyFloat96ScalarObject PyDoubleScalarObject +# define PyComplex192ScalarObject PyCDoubleScalarObject +# define PyFloat96ArrType_Type PyDoubleArrType_Type +# define PyComplex192ArrType_Type PyCDoubleArrType_Type +#define NPY_FLOAT96_FMT NPY_DOUBLE_FMT +#define NPY_COMPLEX192_FMT NPY_CDOUBLE_FMT +#endif +#elif NPY_BITSOF_DOUBLE == 128 +#ifndef NPY_FLOAT128 +#define NPY_FLOAT128 NPY_DOUBLE +#define NPY_COMPLEX256 NPY_CDOUBLE + typedef double npy_float128; + typedef npy_cdouble npy_complex256; +# define PyFloat128ScalarObject PyDoubleScalarObject +# define PyComplex256ScalarObject PyCDoubleScalarObject +# define PyFloat128ArrType_Type PyDoubleArrType_Type +# define PyComplex256ArrType_Type PyCDoubleArrType_Type +#define NPY_FLOAT128_FMT NPY_DOUBLE_FMT +#define NPY_COMPLEX256_FMT NPY_CDOUBLE_FMT +#endif +#endif + + + +#if NPY_BITSOF_FLOAT == 32 +#ifndef NPY_FLOAT32 +#define NPY_FLOAT32 NPY_FLOAT +#define NPY_COMPLEX64 NPY_CFLOAT + typedef float npy_float32; + typedef npy_cfloat npy_complex64; +# define PyFloat32ScalarObject PyFloatScalarObject +# define PyComplex64ScalarObject PyCFloatScalarObject +# define PyFloat32ArrType_Type PyFloatArrType_Type +# define PyComplex64ArrType_Type PyCFloatArrType_Type +#define NPY_FLOAT32_FMT NPY_FLOAT_FMT +#define NPY_COMPLEX64_FMT NPY_CFLOAT_FMT +#endif +#elif NPY_BITSOF_FLOAT == 64 +#ifndef NPY_FLOAT64 +#define NPY_FLOAT64 NPY_FLOAT +#define NPY_COMPLEX128 NPY_CFLOAT + typedef float npy_float64; + typedef npy_cfloat npy_complex128; +# define PyFloat64ScalarObject PyFloatScalarObject +# define PyComplex128ScalarObject PyCFloatScalarObject +# define PyFloat64ArrType_Type PyFloatArrType_Type +# define PyComplex128ArrType_Type PyCFloatArrType_Type +#define NPY_FLOAT64_FMT NPY_FLOAT_FMT +#define NPY_COMPLEX128_FMT NPY_CFLOAT_FMT +#endif +#elif NPY_BITSOF_FLOAT == 80 +#ifndef NPY_FLOAT80 +#define NPY_FLOAT80 NPY_FLOAT +#define NPY_COMPLEX160 NPY_CFLOAT + typedef float npy_float80; + typedef npy_cfloat npy_complex160; +# define PyFloat80ScalarObject PyFloatScalarObject +# define PyComplex160ScalarObject PyCFloatScalarObject +# define PyFloat80ArrType_Type PyFloatArrType_Type +# define PyComplex160ArrType_Type PyCFloatArrType_Type +#define NPY_FLOAT80_FMT NPY_FLOAT_FMT +#define NPY_COMPLEX160_FMT NPY_CFLOAT_FMT +#endif +#elif NPY_BITSOF_FLOAT == 96 +#ifndef NPY_FLOAT96 +#define NPY_FLOAT96 NPY_FLOAT +#define NPY_COMPLEX192 NPY_CFLOAT + typedef float npy_float96; + typedef npy_cfloat npy_complex192; +# define PyFloat96ScalarObject PyFloatScalarObject +# define PyComplex192ScalarObject PyCFloatScalarObject +# define PyFloat96ArrType_Type PyFloatArrType_Type +# define PyComplex192ArrType_Type PyCFloatArrType_Type +#define NPY_FLOAT96_FMT NPY_FLOAT_FMT +#define NPY_COMPLEX192_FMT NPY_CFLOAT_FMT +#endif +#elif NPY_BITSOF_FLOAT == 128 +#ifndef NPY_FLOAT128 +#define NPY_FLOAT128 NPY_FLOAT +#define NPY_COMPLEX256 NPY_CFLOAT + typedef float npy_float128; + typedef npy_cfloat npy_complex256; +# define PyFloat128ScalarObject PyFloatScalarObject +# define PyComplex256ScalarObject PyCFloatScalarObject +# define PyFloat128ArrType_Type PyFloatArrType_Type +# define PyComplex256ArrType_Type PyCFloatArrType_Type +#define NPY_FLOAT128_FMT NPY_FLOAT_FMT +#define NPY_COMPLEX256_FMT NPY_CFLOAT_FMT +#endif +#endif + +/* half/float16 isn't a floating-point type in C */ +#define NPY_FLOAT16 NPY_HALF +typedef npy_uint16 npy_half; +typedef npy_half npy_float16; + +#if NPY_BITSOF_LONGDOUBLE == 32 +#ifndef NPY_FLOAT32 +#define NPY_FLOAT32 NPY_LONGDOUBLE +#define NPY_COMPLEX64 NPY_CLONGDOUBLE + typedef npy_longdouble npy_float32; + typedef npy_clongdouble npy_complex64; +# define PyFloat32ScalarObject PyLongDoubleScalarObject +# define PyComplex64ScalarObject PyCLongDoubleScalarObject +# define PyFloat32ArrType_Type PyLongDoubleArrType_Type +# define PyComplex64ArrType_Type PyCLongDoubleArrType_Type +#define NPY_FLOAT32_FMT NPY_LONGDOUBLE_FMT +#define NPY_COMPLEX64_FMT NPY_CLONGDOUBLE_FMT +#endif +#elif NPY_BITSOF_LONGDOUBLE == 64 +#ifndef NPY_FLOAT64 +#define NPY_FLOAT64 NPY_LONGDOUBLE +#define NPY_COMPLEX128 NPY_CLONGDOUBLE + typedef npy_longdouble npy_float64; + typedef npy_clongdouble npy_complex128; +# define PyFloat64ScalarObject PyLongDoubleScalarObject +# define PyComplex128ScalarObject PyCLongDoubleScalarObject +# define PyFloat64ArrType_Type PyLongDoubleArrType_Type +# define PyComplex128ArrType_Type PyCLongDoubleArrType_Type +#define NPY_FLOAT64_FMT NPY_LONGDOUBLE_FMT +#define NPY_COMPLEX128_FMT NPY_CLONGDOUBLE_FMT +#endif +#elif NPY_BITSOF_LONGDOUBLE == 80 +#ifndef NPY_FLOAT80 +#define NPY_FLOAT80 NPY_LONGDOUBLE +#define NPY_COMPLEX160 NPY_CLONGDOUBLE + typedef npy_longdouble npy_float80; + typedef npy_clongdouble npy_complex160; +# define PyFloat80ScalarObject PyLongDoubleScalarObject +# define PyComplex160ScalarObject PyCLongDoubleScalarObject +# define PyFloat80ArrType_Type PyLongDoubleArrType_Type +# define PyComplex160ArrType_Type PyCLongDoubleArrType_Type +#define NPY_FLOAT80_FMT NPY_LONGDOUBLE_FMT +#define NPY_COMPLEX160_FMT NPY_CLONGDOUBLE_FMT +#endif +#elif NPY_BITSOF_LONGDOUBLE == 96 +#ifndef NPY_FLOAT96 +#define NPY_FLOAT96 NPY_LONGDOUBLE +#define NPY_COMPLEX192 NPY_CLONGDOUBLE + typedef npy_longdouble npy_float96; + typedef npy_clongdouble npy_complex192; +# define PyFloat96ScalarObject PyLongDoubleScalarObject +# define PyComplex192ScalarObject PyCLongDoubleScalarObject +# define PyFloat96ArrType_Type PyLongDoubleArrType_Type +# define PyComplex192ArrType_Type PyCLongDoubleArrType_Type +#define NPY_FLOAT96_FMT NPY_LONGDOUBLE_FMT +#define NPY_COMPLEX192_FMT NPY_CLONGDOUBLE_FMT +#endif +#elif NPY_BITSOF_LONGDOUBLE == 128 +#ifndef NPY_FLOAT128 +#define NPY_FLOAT128 NPY_LONGDOUBLE +#define NPY_COMPLEX256 NPY_CLONGDOUBLE + typedef npy_longdouble npy_float128; + typedef npy_clongdouble npy_complex256; +# define PyFloat128ScalarObject PyLongDoubleScalarObject +# define PyComplex256ScalarObject PyCLongDoubleScalarObject +# define PyFloat128ArrType_Type PyLongDoubleArrType_Type +# define PyComplex256ArrType_Type PyCLongDoubleArrType_Type +#define NPY_FLOAT128_FMT NPY_LONGDOUBLE_FMT +#define NPY_COMPLEX256_FMT NPY_CLONGDOUBLE_FMT +#endif +#endif + +/* datetime typedefs */ +typedef npy_int64 npy_timedelta; +typedef npy_int64 npy_datetime; +#define NPY_DATETIME_FMT NPY_INT64_FMT +#define NPY_TIMEDELTA_FMT NPY_INT64_FMT + +/* End of typedefs for numarray style bit-width names */ + +#endif /* NUMPY_CORE_INCLUDE_NUMPY_NPY_COMMON_H_ */ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/npy_cpu.h b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/npy_cpu.h new file mode 100644 index 0000000..91cf2d8 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/npy_cpu.h @@ -0,0 +1,124 @@ +/* + * This set (target) cpu specific macros: + * - Possible values: + * NPY_CPU_X86 + * NPY_CPU_AMD64 + * NPY_CPU_PPC + * NPY_CPU_PPC64 + * NPY_CPU_PPC64LE + * NPY_CPU_SPARC + * NPY_CPU_S390 + * NPY_CPU_IA64 + * NPY_CPU_HPPA + * NPY_CPU_ALPHA + * NPY_CPU_ARMEL + * NPY_CPU_ARMEB + * NPY_CPU_SH_LE + * NPY_CPU_SH_BE + * NPY_CPU_ARCEL + * NPY_CPU_ARCEB + * NPY_CPU_RISCV64 + * NPY_CPU_RISCV32 + * NPY_CPU_LOONGARCH + * NPY_CPU_WASM + */ +#ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_CPU_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_NPY_CPU_H_ + +#include "numpyconfig.h" + +#if defined( __i386__ ) || defined(i386) || defined(_M_IX86) + /* + * __i386__ is defined by gcc and Intel compiler on Linux, + * _M_IX86 by VS compiler, + * i386 by Sun compilers on opensolaris at least + */ + #define NPY_CPU_X86 +#elif defined(__x86_64__) || defined(__amd64__) || defined(__x86_64) || defined(_M_AMD64) + /* + * both __x86_64__ and __amd64__ are defined by gcc + * __x86_64 defined by sun compiler on opensolaris at least + * _M_AMD64 defined by MS compiler + */ + #define NPY_CPU_AMD64 +#elif defined(__powerpc64__) && defined(__LITTLE_ENDIAN__) + #define NPY_CPU_PPC64LE +#elif defined(__powerpc64__) && defined(__BIG_ENDIAN__) + #define NPY_CPU_PPC64 +#elif defined(__ppc__) || defined(__powerpc__) || defined(_ARCH_PPC) + /* + * __ppc__ is defined by gcc, I remember having seen __powerpc__ once, + * but can't find it ATM + * _ARCH_PPC is used by at least gcc on AIX + * As __powerpc__ and _ARCH_PPC are also defined by PPC64 check + * for those specifically first before defaulting to ppc + */ + #define NPY_CPU_PPC +#elif defined(__sparc__) || defined(__sparc) + /* __sparc__ is defined by gcc and Forte (e.g. Sun) compilers */ + #define NPY_CPU_SPARC +#elif defined(__s390__) + #define NPY_CPU_S390 +#elif defined(__ia64) + #define NPY_CPU_IA64 +#elif defined(__hppa) + #define NPY_CPU_HPPA +#elif defined(__alpha__) + #define NPY_CPU_ALPHA +#elif defined(__arm__) || defined(__aarch64__) || defined(_M_ARM64) + /* _M_ARM64 is defined in MSVC for ARM64 compilation on Windows */ + #if defined(__ARMEB__) || defined(__AARCH64EB__) + #if defined(__ARM_32BIT_STATE) + #define NPY_CPU_ARMEB_AARCH32 + #elif defined(__ARM_64BIT_STATE) + #define NPY_CPU_ARMEB_AARCH64 + #else + #define NPY_CPU_ARMEB + #endif + #elif defined(__ARMEL__) || defined(__AARCH64EL__) || defined(_M_ARM64) + #if defined(__ARM_32BIT_STATE) + #define NPY_CPU_ARMEL_AARCH32 + #elif defined(__ARM_64BIT_STATE) || defined(_M_ARM64) || defined(__AARCH64EL__) + #define NPY_CPU_ARMEL_AARCH64 + #else + #define NPY_CPU_ARMEL + #endif + #else + # error Unknown ARM CPU, please report this to numpy maintainers with \ + information about your platform (OS, CPU and compiler) + #endif +#elif defined(__sh__) && defined(__LITTLE_ENDIAN__) + #define NPY_CPU_SH_LE +#elif defined(__sh__) && defined(__BIG_ENDIAN__) + #define NPY_CPU_SH_BE +#elif defined(__MIPSEL__) + #define NPY_CPU_MIPSEL +#elif defined(__MIPSEB__) + #define NPY_CPU_MIPSEB +#elif defined(__or1k__) + #define NPY_CPU_OR1K +#elif defined(__mc68000__) + #define NPY_CPU_M68K +#elif defined(__arc__) && defined(__LITTLE_ENDIAN__) + #define NPY_CPU_ARCEL +#elif defined(__arc__) && defined(__BIG_ENDIAN__) + #define NPY_CPU_ARCEB +#elif defined(__riscv) + #if __riscv_xlen == 64 + #define NPY_CPU_RISCV64 + #elif __riscv_xlen == 32 + #define NPY_CPU_RISCV32 + #endif +#elif defined(__loongarch_lp64) + #define NPY_CPU_LOONGARCH64 +#elif defined(__EMSCRIPTEN__) + /* __EMSCRIPTEN__ is defined by emscripten: an LLVM-to-Web compiler */ + #define NPY_CPU_WASM +#else + #error Unknown CPU, please report this to numpy maintainers with \ + information about your platform (OS, CPU and compiler) +#endif + +#define NPY_ALIGNMENT_REQUIRED 1 + +#endif /* NUMPY_CORE_INCLUDE_NUMPY_NPY_CPU_H_ */ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/npy_endian.h b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/npy_endian.h new file mode 100644 index 0000000..0926212 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/npy_endian.h @@ -0,0 +1,78 @@ +#ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_ENDIAN_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_NPY_ENDIAN_H_ + +/* + * NPY_BYTE_ORDER is set to the same value as BYTE_ORDER set by glibc in + * endian.h + */ + +#if defined(NPY_HAVE_ENDIAN_H) || defined(NPY_HAVE_SYS_ENDIAN_H) + /* Use endian.h if available */ + + #if defined(NPY_HAVE_ENDIAN_H) + #include + #elif defined(NPY_HAVE_SYS_ENDIAN_H) + #include + #endif + + #if defined(BYTE_ORDER) && defined(BIG_ENDIAN) && defined(LITTLE_ENDIAN) + #define NPY_BYTE_ORDER BYTE_ORDER + #define NPY_LITTLE_ENDIAN LITTLE_ENDIAN + #define NPY_BIG_ENDIAN BIG_ENDIAN + #elif defined(_BYTE_ORDER) && defined(_BIG_ENDIAN) && defined(_LITTLE_ENDIAN) + #define NPY_BYTE_ORDER _BYTE_ORDER + #define NPY_LITTLE_ENDIAN _LITTLE_ENDIAN + #define NPY_BIG_ENDIAN _BIG_ENDIAN + #elif defined(__BYTE_ORDER) && defined(__BIG_ENDIAN) && defined(__LITTLE_ENDIAN) + #define NPY_BYTE_ORDER __BYTE_ORDER + #define NPY_LITTLE_ENDIAN __LITTLE_ENDIAN + #define NPY_BIG_ENDIAN __BIG_ENDIAN + #endif +#endif + +#ifndef NPY_BYTE_ORDER + /* Set endianness info using target CPU */ + #include "npy_cpu.h" + + #define NPY_LITTLE_ENDIAN 1234 + #define NPY_BIG_ENDIAN 4321 + + #if defined(NPY_CPU_X86) \ + || defined(NPY_CPU_AMD64) \ + || defined(NPY_CPU_IA64) \ + || defined(NPY_CPU_ALPHA) \ + || defined(NPY_CPU_ARMEL) \ + || defined(NPY_CPU_ARMEL_AARCH32) \ + || defined(NPY_CPU_ARMEL_AARCH64) \ + || defined(NPY_CPU_SH_LE) \ + || defined(NPY_CPU_MIPSEL) \ + || defined(NPY_CPU_PPC64LE) \ + || defined(NPY_CPU_ARCEL) \ + || defined(NPY_CPU_RISCV64) \ + || defined(NPY_CPU_RISCV32) \ + || defined(NPY_CPU_LOONGARCH) \ + || defined(NPY_CPU_WASM) + #define NPY_BYTE_ORDER NPY_LITTLE_ENDIAN + + #elif defined(NPY_CPU_PPC) \ + || defined(NPY_CPU_SPARC) \ + || defined(NPY_CPU_S390) \ + || defined(NPY_CPU_HPPA) \ + || defined(NPY_CPU_PPC64) \ + || defined(NPY_CPU_ARMEB) \ + || defined(NPY_CPU_ARMEB_AARCH32) \ + || defined(NPY_CPU_ARMEB_AARCH64) \ + || defined(NPY_CPU_SH_BE) \ + || defined(NPY_CPU_MIPSEB) \ + || defined(NPY_CPU_OR1K) \ + || defined(NPY_CPU_M68K) \ + || defined(NPY_CPU_ARCEB) + #define NPY_BYTE_ORDER NPY_BIG_ENDIAN + + #else + #error Unknown CPU: can not set endianness + #endif + +#endif + +#endif /* NUMPY_CORE_INCLUDE_NUMPY_NPY_ENDIAN_H_ */ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/npy_math.h b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/npy_math.h new file mode 100644 index 0000000..abc784b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/npy_math.h @@ -0,0 +1,602 @@ +#ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_MATH_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_NPY_MATH_H_ + +#include + +#include + +/* By adding static inline specifiers to npy_math function definitions when + appropriate, compiler is given the opportunity to optimize */ +#if NPY_INLINE_MATH +#define NPY_INPLACE static inline +#else +#define NPY_INPLACE +#endif + + +#ifdef __cplusplus +extern "C" { +#endif + +#define PyArray_MAX(a,b) (((a)>(b))?(a):(b)) +#define PyArray_MIN(a,b) (((a)<(b))?(a):(b)) + +/* + * NAN and INFINITY like macros (same behavior as glibc for NAN, same as C99 + * for INFINITY) + * + * XXX: I should test whether INFINITY and NAN are available on the platform + */ +static inline float __npy_inff(void) +{ + const union { npy_uint32 __i; float __f;} __bint = {0x7f800000UL}; + return __bint.__f; +} + +static inline float __npy_nanf(void) +{ + const union { npy_uint32 __i; float __f;} __bint = {0x7fc00000UL}; + return __bint.__f; +} + +static inline float __npy_pzerof(void) +{ + const union { npy_uint32 __i; float __f;} __bint = {0x00000000UL}; + return __bint.__f; +} + +static inline float __npy_nzerof(void) +{ + const union { npy_uint32 __i; float __f;} __bint = {0x80000000UL}; + return __bint.__f; +} + +#define NPY_INFINITYF __npy_inff() +#define NPY_NANF __npy_nanf() +#define NPY_PZEROF __npy_pzerof() +#define NPY_NZEROF __npy_nzerof() + +#define NPY_INFINITY ((npy_double)NPY_INFINITYF) +#define NPY_NAN ((npy_double)NPY_NANF) +#define NPY_PZERO ((npy_double)NPY_PZEROF) +#define NPY_NZERO ((npy_double)NPY_NZEROF) + +#define NPY_INFINITYL ((npy_longdouble)NPY_INFINITYF) +#define NPY_NANL ((npy_longdouble)NPY_NANF) +#define NPY_PZEROL ((npy_longdouble)NPY_PZEROF) +#define NPY_NZEROL ((npy_longdouble)NPY_NZEROF) + +/* + * Useful constants + */ +#define NPY_E 2.718281828459045235360287471352662498 /* e */ +#define NPY_LOG2E 1.442695040888963407359924681001892137 /* log_2 e */ +#define NPY_LOG10E 0.434294481903251827651128918916605082 /* log_10 e */ +#define NPY_LOGE2 0.693147180559945309417232121458176568 /* log_e 2 */ +#define NPY_LOGE10 2.302585092994045684017991454684364208 /* log_e 10 */ +#define NPY_PI 3.141592653589793238462643383279502884 /* pi */ +#define NPY_PI_2 1.570796326794896619231321691639751442 /* pi/2 */ +#define NPY_PI_4 0.785398163397448309615660845819875721 /* pi/4 */ +#define NPY_1_PI 0.318309886183790671537767526745028724 /* 1/pi */ +#define NPY_2_PI 0.636619772367581343075535053490057448 /* 2/pi */ +#define NPY_EULER 0.577215664901532860606512090082402431 /* Euler constant */ +#define NPY_SQRT2 1.414213562373095048801688724209698079 /* sqrt(2) */ +#define NPY_SQRT1_2 0.707106781186547524400844362104849039 /* 1/sqrt(2) */ + +#define NPY_Ef 2.718281828459045235360287471352662498F /* e */ +#define NPY_LOG2Ef 1.442695040888963407359924681001892137F /* log_2 e */ +#define NPY_LOG10Ef 0.434294481903251827651128918916605082F /* log_10 e */ +#define NPY_LOGE2f 0.693147180559945309417232121458176568F /* log_e 2 */ +#define NPY_LOGE10f 2.302585092994045684017991454684364208F /* log_e 10 */ +#define NPY_PIf 3.141592653589793238462643383279502884F /* pi */ +#define NPY_PI_2f 1.570796326794896619231321691639751442F /* pi/2 */ +#define NPY_PI_4f 0.785398163397448309615660845819875721F /* pi/4 */ +#define NPY_1_PIf 0.318309886183790671537767526745028724F /* 1/pi */ +#define NPY_2_PIf 0.636619772367581343075535053490057448F /* 2/pi */ +#define NPY_EULERf 0.577215664901532860606512090082402431F /* Euler constant */ +#define NPY_SQRT2f 1.414213562373095048801688724209698079F /* sqrt(2) */ +#define NPY_SQRT1_2f 0.707106781186547524400844362104849039F /* 1/sqrt(2) */ + +#define NPY_El 2.718281828459045235360287471352662498L /* e */ +#define NPY_LOG2El 1.442695040888963407359924681001892137L /* log_2 e */ +#define NPY_LOG10El 0.434294481903251827651128918916605082L /* log_10 e */ +#define NPY_LOGE2l 0.693147180559945309417232121458176568L /* log_e 2 */ +#define NPY_LOGE10l 2.302585092994045684017991454684364208L /* log_e 10 */ +#define NPY_PIl 3.141592653589793238462643383279502884L /* pi */ +#define NPY_PI_2l 1.570796326794896619231321691639751442L /* pi/2 */ +#define NPY_PI_4l 0.785398163397448309615660845819875721L /* pi/4 */ +#define NPY_1_PIl 0.318309886183790671537767526745028724L /* 1/pi */ +#define NPY_2_PIl 0.636619772367581343075535053490057448L /* 2/pi */ +#define NPY_EULERl 0.577215664901532860606512090082402431L /* Euler constant */ +#define NPY_SQRT2l 1.414213562373095048801688724209698079L /* sqrt(2) */ +#define NPY_SQRT1_2l 0.707106781186547524400844362104849039L /* 1/sqrt(2) */ + +/* + * Integer functions. + */ +NPY_INPLACE npy_uint npy_gcdu(npy_uint a, npy_uint b); +NPY_INPLACE npy_uint npy_lcmu(npy_uint a, npy_uint b); +NPY_INPLACE npy_ulong npy_gcdul(npy_ulong a, npy_ulong b); +NPY_INPLACE npy_ulong npy_lcmul(npy_ulong a, npy_ulong b); +NPY_INPLACE npy_ulonglong npy_gcdull(npy_ulonglong a, npy_ulonglong b); +NPY_INPLACE npy_ulonglong npy_lcmull(npy_ulonglong a, npy_ulonglong b); + +NPY_INPLACE npy_int npy_gcd(npy_int a, npy_int b); +NPY_INPLACE npy_int npy_lcm(npy_int a, npy_int b); +NPY_INPLACE npy_long npy_gcdl(npy_long a, npy_long b); +NPY_INPLACE npy_long npy_lcml(npy_long a, npy_long b); +NPY_INPLACE npy_longlong npy_gcdll(npy_longlong a, npy_longlong b); +NPY_INPLACE npy_longlong npy_lcmll(npy_longlong a, npy_longlong b); + +NPY_INPLACE npy_ubyte npy_rshiftuhh(npy_ubyte a, npy_ubyte b); +NPY_INPLACE npy_ubyte npy_lshiftuhh(npy_ubyte a, npy_ubyte b); +NPY_INPLACE npy_ushort npy_rshiftuh(npy_ushort a, npy_ushort b); +NPY_INPLACE npy_ushort npy_lshiftuh(npy_ushort a, npy_ushort b); +NPY_INPLACE npy_uint npy_rshiftu(npy_uint a, npy_uint b); +NPY_INPLACE npy_uint npy_lshiftu(npy_uint a, npy_uint b); +NPY_INPLACE npy_ulong npy_rshiftul(npy_ulong a, npy_ulong b); +NPY_INPLACE npy_ulong npy_lshiftul(npy_ulong a, npy_ulong b); +NPY_INPLACE npy_ulonglong npy_rshiftull(npy_ulonglong a, npy_ulonglong b); +NPY_INPLACE npy_ulonglong npy_lshiftull(npy_ulonglong a, npy_ulonglong b); + +NPY_INPLACE npy_byte npy_rshifthh(npy_byte a, npy_byte b); +NPY_INPLACE npy_byte npy_lshifthh(npy_byte a, npy_byte b); +NPY_INPLACE npy_short npy_rshifth(npy_short a, npy_short b); +NPY_INPLACE npy_short npy_lshifth(npy_short a, npy_short b); +NPY_INPLACE npy_int npy_rshift(npy_int a, npy_int b); +NPY_INPLACE npy_int npy_lshift(npy_int a, npy_int b); +NPY_INPLACE npy_long npy_rshiftl(npy_long a, npy_long b); +NPY_INPLACE npy_long npy_lshiftl(npy_long a, npy_long b); +NPY_INPLACE npy_longlong npy_rshiftll(npy_longlong a, npy_longlong b); +NPY_INPLACE npy_longlong npy_lshiftll(npy_longlong a, npy_longlong b); + +NPY_INPLACE uint8_t npy_popcountuhh(npy_ubyte a); +NPY_INPLACE uint8_t npy_popcountuh(npy_ushort a); +NPY_INPLACE uint8_t npy_popcountu(npy_uint a); +NPY_INPLACE uint8_t npy_popcountul(npy_ulong a); +NPY_INPLACE uint8_t npy_popcountull(npy_ulonglong a); +NPY_INPLACE uint8_t npy_popcounthh(npy_byte a); +NPY_INPLACE uint8_t npy_popcounth(npy_short a); +NPY_INPLACE uint8_t npy_popcount(npy_int a); +NPY_INPLACE uint8_t npy_popcountl(npy_long a); +NPY_INPLACE uint8_t npy_popcountll(npy_longlong a); + +/* + * C99 double math funcs that need fixups or are blocklist-able + */ +NPY_INPLACE double npy_sin(double x); +NPY_INPLACE double npy_cos(double x); +NPY_INPLACE double npy_tan(double x); +NPY_INPLACE double npy_hypot(double x, double y); +NPY_INPLACE double npy_log2(double x); +NPY_INPLACE double npy_atan2(double x, double y); + +/* Mandatory C99 double math funcs, no blocklisting or fixups */ +/* defined for legacy reasons, should be deprecated at some point */ +#define npy_sinh sinh +#define npy_cosh cosh +#define npy_tanh tanh +#define npy_asin asin +#define npy_acos acos +#define npy_atan atan +#define npy_log log +#define npy_log10 log10 +#define npy_cbrt cbrt +#define npy_fabs fabs +#define npy_ceil ceil +#define npy_fmod fmod +#define npy_floor floor +#define npy_expm1 expm1 +#define npy_log1p log1p +#define npy_acosh acosh +#define npy_asinh asinh +#define npy_atanh atanh +#define npy_rint rint +#define npy_trunc trunc +#define npy_exp2 exp2 +#define npy_frexp frexp +#define npy_ldexp ldexp +#define npy_copysign copysign +#define npy_exp exp +#define npy_sqrt sqrt +#define npy_pow pow +#define npy_modf modf +#define npy_nextafter nextafter + +double npy_spacing(double x); + +/* + * IEEE 754 fpu handling + */ + +/* use builtins to avoid function calls in tight loops + * only available if npy_config.h is available (= numpys own build) */ +#ifdef HAVE___BUILTIN_ISNAN + #define npy_isnan(x) __builtin_isnan(x) +#else + #define npy_isnan(x) isnan(x) +#endif + + +/* only available if npy_config.h is available (= numpys own build) */ +#ifdef HAVE___BUILTIN_ISFINITE + #define npy_isfinite(x) __builtin_isfinite(x) +#else + #define npy_isfinite(x) isfinite((x)) +#endif + +/* only available if npy_config.h is available (= numpys own build) */ +#ifdef HAVE___BUILTIN_ISINF + #define npy_isinf(x) __builtin_isinf(x) +#else + #define npy_isinf(x) isinf((x)) +#endif + +#define npy_signbit(x) signbit((x)) + +/* + * float C99 math funcs that need fixups or are blocklist-able + */ +NPY_INPLACE float npy_sinf(float x); +NPY_INPLACE float npy_cosf(float x); +NPY_INPLACE float npy_tanf(float x); +NPY_INPLACE float npy_expf(float x); +NPY_INPLACE float npy_sqrtf(float x); +NPY_INPLACE float npy_hypotf(float x, float y); +NPY_INPLACE float npy_log2f(float x); +NPY_INPLACE float npy_atan2f(float x, float y); +NPY_INPLACE float npy_powf(float x, float y); +NPY_INPLACE float npy_modff(float x, float* y); + +/* Mandatory C99 float math funcs, no blocklisting or fixups */ +/* defined for legacy reasons, should be deprecated at some point */ + +#define npy_sinhf sinhf +#define npy_coshf coshf +#define npy_tanhf tanhf +#define npy_asinf asinf +#define npy_acosf acosf +#define npy_atanf atanf +#define npy_logf logf +#define npy_log10f log10f +#define npy_cbrtf cbrtf +#define npy_fabsf fabsf +#define npy_ceilf ceilf +#define npy_fmodf fmodf +#define npy_floorf floorf +#define npy_expm1f expm1f +#define npy_log1pf log1pf +#define npy_asinhf asinhf +#define npy_acoshf acoshf +#define npy_atanhf atanhf +#define npy_rintf rintf +#define npy_truncf truncf +#define npy_exp2f exp2f +#define npy_frexpf frexpf +#define npy_ldexpf ldexpf +#define npy_copysignf copysignf +#define npy_nextafterf nextafterf + +float npy_spacingf(float x); + +/* + * long double C99 double math funcs that need fixups or are blocklist-able + */ +NPY_INPLACE npy_longdouble npy_sinl(npy_longdouble x); +NPY_INPLACE npy_longdouble npy_cosl(npy_longdouble x); +NPY_INPLACE npy_longdouble npy_tanl(npy_longdouble x); +NPY_INPLACE npy_longdouble npy_expl(npy_longdouble x); +NPY_INPLACE npy_longdouble npy_sqrtl(npy_longdouble x); +NPY_INPLACE npy_longdouble npy_hypotl(npy_longdouble x, npy_longdouble y); +NPY_INPLACE npy_longdouble npy_log2l(npy_longdouble x); +NPY_INPLACE npy_longdouble npy_atan2l(npy_longdouble x, npy_longdouble y); +NPY_INPLACE npy_longdouble npy_powl(npy_longdouble x, npy_longdouble y); +NPY_INPLACE npy_longdouble npy_modfl(npy_longdouble x, npy_longdouble* y); + +/* Mandatory C99 double math funcs, no blocklisting or fixups */ +/* defined for legacy reasons, should be deprecated at some point */ +#define npy_sinhl sinhl +#define npy_coshl coshl +#define npy_tanhl tanhl +#define npy_fabsl fabsl +#define npy_floorl floorl +#define npy_ceill ceill +#define npy_rintl rintl +#define npy_truncl truncl +#define npy_cbrtl cbrtl +#define npy_log10l log10l +#define npy_logl logl +#define npy_expm1l expm1l +#define npy_asinl asinl +#define npy_acosl acosl +#define npy_atanl atanl +#define npy_asinhl asinhl +#define npy_acoshl acoshl +#define npy_atanhl atanhl +#define npy_log1pl log1pl +#define npy_exp2l exp2l +#define npy_fmodl fmodl +#define npy_frexpl frexpl +#define npy_ldexpl ldexpl +#define npy_copysignl copysignl +#define npy_nextafterl nextafterl + +npy_longdouble npy_spacingl(npy_longdouble x); + +/* + * Non standard functions + */ +NPY_INPLACE double npy_deg2rad(double x); +NPY_INPLACE double npy_rad2deg(double x); +NPY_INPLACE double npy_logaddexp(double x, double y); +NPY_INPLACE double npy_logaddexp2(double x, double y); +NPY_INPLACE double npy_divmod(double x, double y, double *modulus); +NPY_INPLACE double npy_heaviside(double x, double h0); + +NPY_INPLACE float npy_deg2radf(float x); +NPY_INPLACE float npy_rad2degf(float x); +NPY_INPLACE float npy_logaddexpf(float x, float y); +NPY_INPLACE float npy_logaddexp2f(float x, float y); +NPY_INPLACE float npy_divmodf(float x, float y, float *modulus); +NPY_INPLACE float npy_heavisidef(float x, float h0); + +NPY_INPLACE npy_longdouble npy_deg2radl(npy_longdouble x); +NPY_INPLACE npy_longdouble npy_rad2degl(npy_longdouble x); +NPY_INPLACE npy_longdouble npy_logaddexpl(npy_longdouble x, npy_longdouble y); +NPY_INPLACE npy_longdouble npy_logaddexp2l(npy_longdouble x, npy_longdouble y); +NPY_INPLACE npy_longdouble npy_divmodl(npy_longdouble x, npy_longdouble y, + npy_longdouble *modulus); +NPY_INPLACE npy_longdouble npy_heavisidel(npy_longdouble x, npy_longdouble h0); + +#define npy_degrees npy_rad2deg +#define npy_degreesf npy_rad2degf +#define npy_degreesl npy_rad2degl + +#define npy_radians npy_deg2rad +#define npy_radiansf npy_deg2radf +#define npy_radiansl npy_deg2radl + +/* + * Complex declarations + */ + +static inline double npy_creal(const npy_cdouble z) +{ +#if defined(__cplusplus) + return z._Val[0]; +#else + return creal(z); +#endif +} + +static inline void npy_csetreal(npy_cdouble *z, const double r) +{ + ((double *) z)[0] = r; +} + +static inline double npy_cimag(const npy_cdouble z) +{ +#if defined(__cplusplus) + return z._Val[1]; +#else + return cimag(z); +#endif +} + +static inline void npy_csetimag(npy_cdouble *z, const double i) +{ + ((double *) z)[1] = i; +} + +static inline float npy_crealf(const npy_cfloat z) +{ +#if defined(__cplusplus) + return z._Val[0]; +#else + return crealf(z); +#endif +} + +static inline void npy_csetrealf(npy_cfloat *z, const float r) +{ + ((float *) z)[0] = r; +} + +static inline float npy_cimagf(const npy_cfloat z) +{ +#if defined(__cplusplus) + return z._Val[1]; +#else + return cimagf(z); +#endif +} + +static inline void npy_csetimagf(npy_cfloat *z, const float i) +{ + ((float *) z)[1] = i; +} + +static inline npy_longdouble npy_creall(const npy_clongdouble z) +{ +#if defined(__cplusplus) + return (npy_longdouble)z._Val[0]; +#else + return creall(z); +#endif +} + +static inline void npy_csetreall(npy_clongdouble *z, const longdouble_t r) +{ + ((longdouble_t *) z)[0] = r; +} + +static inline npy_longdouble npy_cimagl(const npy_clongdouble z) +{ +#if defined(__cplusplus) + return (npy_longdouble)z._Val[1]; +#else + return cimagl(z); +#endif +} + +static inline void npy_csetimagl(npy_clongdouble *z, const longdouble_t i) +{ + ((longdouble_t *) z)[1] = i; +} + +#define NPY_CSETREAL(z, r) npy_csetreal(z, r) +#define NPY_CSETIMAG(z, i) npy_csetimag(z, i) +#define NPY_CSETREALF(z, r) npy_csetrealf(z, r) +#define NPY_CSETIMAGF(z, i) npy_csetimagf(z, i) +#define NPY_CSETREALL(z, r) npy_csetreall(z, r) +#define NPY_CSETIMAGL(z, i) npy_csetimagl(z, i) + +static inline npy_cdouble npy_cpack(double x, double y) +{ + npy_cdouble z; + npy_csetreal(&z, x); + npy_csetimag(&z, y); + return z; +} + +static inline npy_cfloat npy_cpackf(float x, float y) +{ + npy_cfloat z; + npy_csetrealf(&z, x); + npy_csetimagf(&z, y); + return z; +} + +static inline npy_clongdouble npy_cpackl(npy_longdouble x, npy_longdouble y) +{ + npy_clongdouble z; + npy_csetreall(&z, x); + npy_csetimagl(&z, y); + return z; +} + +/* + * Double precision complex functions + */ +double npy_cabs(npy_cdouble z); +double npy_carg(npy_cdouble z); + +npy_cdouble npy_cexp(npy_cdouble z); +npy_cdouble npy_clog(npy_cdouble z); +npy_cdouble npy_cpow(npy_cdouble x, npy_cdouble y); + +npy_cdouble npy_csqrt(npy_cdouble z); + +npy_cdouble npy_ccos(npy_cdouble z); +npy_cdouble npy_csin(npy_cdouble z); +npy_cdouble npy_ctan(npy_cdouble z); + +npy_cdouble npy_ccosh(npy_cdouble z); +npy_cdouble npy_csinh(npy_cdouble z); +npy_cdouble npy_ctanh(npy_cdouble z); + +npy_cdouble npy_cacos(npy_cdouble z); +npy_cdouble npy_casin(npy_cdouble z); +npy_cdouble npy_catan(npy_cdouble z); + +npy_cdouble npy_cacosh(npy_cdouble z); +npy_cdouble npy_casinh(npy_cdouble z); +npy_cdouble npy_catanh(npy_cdouble z); + +/* + * Single precision complex functions + */ +float npy_cabsf(npy_cfloat z); +float npy_cargf(npy_cfloat z); + +npy_cfloat npy_cexpf(npy_cfloat z); +npy_cfloat npy_clogf(npy_cfloat z); +npy_cfloat npy_cpowf(npy_cfloat x, npy_cfloat y); + +npy_cfloat npy_csqrtf(npy_cfloat z); + +npy_cfloat npy_ccosf(npy_cfloat z); +npy_cfloat npy_csinf(npy_cfloat z); +npy_cfloat npy_ctanf(npy_cfloat z); + +npy_cfloat npy_ccoshf(npy_cfloat z); +npy_cfloat npy_csinhf(npy_cfloat z); +npy_cfloat npy_ctanhf(npy_cfloat z); + +npy_cfloat npy_cacosf(npy_cfloat z); +npy_cfloat npy_casinf(npy_cfloat z); +npy_cfloat npy_catanf(npy_cfloat z); + +npy_cfloat npy_cacoshf(npy_cfloat z); +npy_cfloat npy_casinhf(npy_cfloat z); +npy_cfloat npy_catanhf(npy_cfloat z); + + +/* + * Extended precision complex functions + */ +npy_longdouble npy_cabsl(npy_clongdouble z); +npy_longdouble npy_cargl(npy_clongdouble z); + +npy_clongdouble npy_cexpl(npy_clongdouble z); +npy_clongdouble npy_clogl(npy_clongdouble z); +npy_clongdouble npy_cpowl(npy_clongdouble x, npy_clongdouble y); + +npy_clongdouble npy_csqrtl(npy_clongdouble z); + +npy_clongdouble npy_ccosl(npy_clongdouble z); +npy_clongdouble npy_csinl(npy_clongdouble z); +npy_clongdouble npy_ctanl(npy_clongdouble z); + +npy_clongdouble npy_ccoshl(npy_clongdouble z); +npy_clongdouble npy_csinhl(npy_clongdouble z); +npy_clongdouble npy_ctanhl(npy_clongdouble z); + +npy_clongdouble npy_cacosl(npy_clongdouble z); +npy_clongdouble npy_casinl(npy_clongdouble z); +npy_clongdouble npy_catanl(npy_clongdouble z); + +npy_clongdouble npy_cacoshl(npy_clongdouble z); +npy_clongdouble npy_casinhl(npy_clongdouble z); +npy_clongdouble npy_catanhl(npy_clongdouble z); + + +/* + * Functions that set the floating point error + * status word. + */ + +/* + * platform-dependent code translates floating point + * status to an integer sum of these values + */ +#define NPY_FPE_DIVIDEBYZERO 1 +#define NPY_FPE_OVERFLOW 2 +#define NPY_FPE_UNDERFLOW 4 +#define NPY_FPE_INVALID 8 + +int npy_clear_floatstatus_barrier(char*); +int npy_get_floatstatus_barrier(char*); +/* + * use caution with these - clang and gcc8.1 are known to reorder calls + * to this form of the function which can defeat the check. The _barrier + * form of the call is preferable, where the argument is + * (char*)&local_variable + */ +int npy_clear_floatstatus(void); +int npy_get_floatstatus(void); + +void npy_set_floatstatus_divbyzero(void); +void npy_set_floatstatus_overflow(void); +void npy_set_floatstatus_underflow(void); +void npy_set_floatstatus_invalid(void); + +#ifdef __cplusplus +} +#endif + +#if NPY_INLINE_MATH +#include "npy_math_internal.h" +#endif + +#endif /* NUMPY_CORE_INCLUDE_NUMPY_NPY_MATH_H_ */ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/npy_no_deprecated_api.h b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/npy_no_deprecated_api.h new file mode 100644 index 0000000..39658c0 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/npy_no_deprecated_api.h @@ -0,0 +1,20 @@ +/* + * This include file is provided for inclusion in Cython *.pyd files where + * one would like to define the NPY_NO_DEPRECATED_API macro. It can be + * included by + * + * cdef extern from "npy_no_deprecated_api.h": pass + * + */ +#ifndef NPY_NO_DEPRECATED_API + +/* put this check here since there may be multiple includes in C extensions. */ +#if defined(NUMPY_CORE_INCLUDE_NUMPY_NDARRAYTYPES_H_) || \ + defined(NUMPY_CORE_INCLUDE_NUMPY_NPY_DEPRECATED_API_H) || \ + defined(NUMPY_CORE_INCLUDE_NUMPY_OLD_DEFINES_H_) +#error "npy_no_deprecated_api.h" must be first among numpy includes. +#else +#define NPY_NO_DEPRECATED_API NPY_API_VERSION +#endif + +#endif /* NPY_NO_DEPRECATED_API */ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/npy_os.h b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/npy_os.h new file mode 100644 index 0000000..0ce5d78 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/npy_os.h @@ -0,0 +1,42 @@ +#ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_OS_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_NPY_OS_H_ + +#if defined(linux) || defined(__linux) || defined(__linux__) + #define NPY_OS_LINUX +#elif defined(__FreeBSD__) || defined(__NetBSD__) || \ + defined(__OpenBSD__) || defined(__DragonFly__) + #define NPY_OS_BSD + #ifdef __FreeBSD__ + #define NPY_OS_FREEBSD + #elif defined(__NetBSD__) + #define NPY_OS_NETBSD + #elif defined(__OpenBSD__) + #define NPY_OS_OPENBSD + #elif defined(__DragonFly__) + #define NPY_OS_DRAGONFLY + #endif +#elif defined(sun) || defined(__sun) + #define NPY_OS_SOLARIS +#elif defined(__CYGWIN__) + #define NPY_OS_CYGWIN +/* We are on Windows.*/ +#elif defined(_WIN32) + /* We are using MinGW (64-bit or 32-bit)*/ + #if defined(__MINGW32__) || defined(__MINGW64__) + #define NPY_OS_MINGW + /* Otherwise, if _WIN64 is defined, we are targeting 64-bit Windows*/ + #elif defined(_WIN64) + #define NPY_OS_WIN64 + /* Otherwise assume we are targeting 32-bit Windows*/ + #else + #define NPY_OS_WIN32 + #endif +#elif defined(__APPLE__) + #define NPY_OS_DARWIN +#elif defined(__HAIKU__) + #define NPY_OS_HAIKU +#else + #define NPY_OS_UNKNOWN +#endif + +#endif /* NUMPY_CORE_INCLUDE_NUMPY_NPY_OS_H_ */ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/numpyconfig.h b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/numpyconfig.h new file mode 100644 index 0000000..ba44c28 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/numpyconfig.h @@ -0,0 +1,182 @@ +#ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_NUMPYCONFIG_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_NPY_NUMPYCONFIG_H_ + +#include "_numpyconfig.h" + +/* + * On Mac OS X, because there is only one configuration stage for all the archs + * in universal builds, any macro which depends on the arch needs to be + * hardcoded. + * + * Note that distutils/pip will attempt a universal2 build when Python itself + * is built as universal2, hence this hardcoding is needed even if we do not + * support universal2 wheels anymore (see gh-22796). + * This code block can be removed after we have dropped the setup.py based + * build completely. + */ +#ifdef __APPLE__ + #undef NPY_SIZEOF_LONG + + #ifdef __LP64__ + #define NPY_SIZEOF_LONG 8 + #else + #define NPY_SIZEOF_LONG 4 + #endif + + #undef NPY_SIZEOF_LONGDOUBLE + #undef NPY_SIZEOF_COMPLEX_LONGDOUBLE + #ifdef HAVE_LDOUBLE_IEEE_DOUBLE_LE + #undef HAVE_LDOUBLE_IEEE_DOUBLE_LE + #endif + #ifdef HAVE_LDOUBLE_INTEL_EXTENDED_16_BYTES_LE + #undef HAVE_LDOUBLE_INTEL_EXTENDED_16_BYTES_LE + #endif + + #if defined(__arm64__) + #define NPY_SIZEOF_LONGDOUBLE 8 + #define NPY_SIZEOF_COMPLEX_LONGDOUBLE 16 + #define HAVE_LDOUBLE_IEEE_DOUBLE_LE 1 + #elif defined(__x86_64) + #define NPY_SIZEOF_LONGDOUBLE 16 + #define NPY_SIZEOF_COMPLEX_LONGDOUBLE 32 + #define HAVE_LDOUBLE_INTEL_EXTENDED_16_BYTES_LE 1 + #elif defined (__i386) + #define NPY_SIZEOF_LONGDOUBLE 12 + #define NPY_SIZEOF_COMPLEX_LONGDOUBLE 24 + #elif defined(__ppc__) || defined (__ppc64__) + #define NPY_SIZEOF_LONGDOUBLE 16 + #define NPY_SIZEOF_COMPLEX_LONGDOUBLE 32 + #else + #error "unknown architecture" + #endif +#endif + + +/** + * To help with both NPY_TARGET_VERSION and the NPY_NO_DEPRECATED_API macro, + * we include API version numbers for specific versions of NumPy. + * To exclude all API that was deprecated as of 1.7, add the following before + * #including any NumPy headers: + * #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION + * The same is true for NPY_TARGET_VERSION, although NumPy will default to + * a backwards compatible build anyway. + */ +#define NPY_1_7_API_VERSION 0x00000007 +#define NPY_1_8_API_VERSION 0x00000008 +#define NPY_1_9_API_VERSION 0x00000009 +#define NPY_1_10_API_VERSION 0x0000000a +#define NPY_1_11_API_VERSION 0x0000000a +#define NPY_1_12_API_VERSION 0x0000000a +#define NPY_1_13_API_VERSION 0x0000000b +#define NPY_1_14_API_VERSION 0x0000000c +#define NPY_1_15_API_VERSION 0x0000000c +#define NPY_1_16_API_VERSION 0x0000000d +#define NPY_1_17_API_VERSION 0x0000000d +#define NPY_1_18_API_VERSION 0x0000000d +#define NPY_1_19_API_VERSION 0x0000000d +#define NPY_1_20_API_VERSION 0x0000000e +#define NPY_1_21_API_VERSION 0x0000000e +#define NPY_1_22_API_VERSION 0x0000000f +#define NPY_1_23_API_VERSION 0x00000010 +#define NPY_1_24_API_VERSION 0x00000010 +#define NPY_1_25_API_VERSION 0x00000011 +#define NPY_2_0_API_VERSION 0x00000012 +#define NPY_2_1_API_VERSION 0x00000013 +#define NPY_2_2_API_VERSION 0x00000013 +#define NPY_2_3_API_VERSION 0x00000014 + + +/* + * Binary compatibility version number. This number is increased + * whenever the C-API is changed such that binary compatibility is + * broken, i.e. whenever a recompile of extension modules is needed. + */ +#define NPY_VERSION NPY_ABI_VERSION + +/* + * Minor API version we are compiling to be compatible with. The version + * Number is always increased when the API changes via: `NPY_API_VERSION` + * (and should maybe just track the NumPy version). + * + * If we have an internal build, we always target the current version of + * course. + * + * For downstream users, we default to an older version to provide them with + * maximum compatibility by default. Downstream can choose to extend that + * default, or narrow it down if they wish to use newer API. If you adjust + * this, consider the Python version support (example for 1.25.x): + * + * NumPy 1.25.x supports Python: 3.9 3.10 3.11 (3.12) + * NumPy 1.19.x supports Python: 3.6 3.7 3.8 3.9 + * NumPy 1.17.x supports Python: 3.5 3.6 3.7 3.8 + * NumPy 1.15.x supports Python: ... 3.6 3.7 + * + * Users of the stable ABI may wish to target the last Python that is not + * end of life. This would be 3.8 at NumPy 1.25 release time. + * 1.17 as default was the choice of oldest-support-numpy at the time and + * has in practice no limit (compared to 1.19). Even earlier becomes legacy. + */ +#if defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD + /* NumPy internal build, always use current version. */ + #define NPY_FEATURE_VERSION NPY_API_VERSION +#elif defined(NPY_TARGET_VERSION) && NPY_TARGET_VERSION + /* user provided a target version, use it */ + #define NPY_FEATURE_VERSION NPY_TARGET_VERSION +#else + /* Use the default (increase when dropping Python 3.11 support) */ + #define NPY_FEATURE_VERSION NPY_1_23_API_VERSION +#endif + +/* Sanity check the (requested) feature version */ +#if NPY_FEATURE_VERSION > NPY_API_VERSION + #error "NPY_TARGET_VERSION higher than NumPy headers!" +#elif NPY_FEATURE_VERSION < NPY_1_15_API_VERSION + /* No support for irrelevant old targets, no need for error, but warn. */ + #ifndef _MSC_VER + #warning "Requested NumPy target lower than supported NumPy 1.15." + #else + #define _WARN___STR2__(x) #x + #define _WARN___STR1__(x) _WARN___STR2__(x) + #define _WARN___LOC__ __FILE__ "(" _WARN___STR1__(__LINE__) ") : Warning Msg: " + #pragma message(_WARN___LOC__"Requested NumPy target lower than supported NumPy 1.15.") + #endif +#endif + +/* + * We define a human readable translation to the Python version of NumPy + * for error messages (and also to allow grepping the binaries for conda). + */ +#if NPY_FEATURE_VERSION == NPY_1_7_API_VERSION + #define NPY_FEATURE_VERSION_STRING "1.7" +#elif NPY_FEATURE_VERSION == NPY_1_8_API_VERSION + #define NPY_FEATURE_VERSION_STRING "1.8" +#elif NPY_FEATURE_VERSION == NPY_1_9_API_VERSION + #define NPY_FEATURE_VERSION_STRING "1.9" +#elif NPY_FEATURE_VERSION == NPY_1_10_API_VERSION /* also 1.11, 1.12 */ + #define NPY_FEATURE_VERSION_STRING "1.10" +#elif NPY_FEATURE_VERSION == NPY_1_13_API_VERSION + #define NPY_FEATURE_VERSION_STRING "1.13" +#elif NPY_FEATURE_VERSION == NPY_1_14_API_VERSION /* also 1.15 */ + #define NPY_FEATURE_VERSION_STRING "1.14" +#elif NPY_FEATURE_VERSION == NPY_1_16_API_VERSION /* also 1.17, 1.18, 1.19 */ + #define NPY_FEATURE_VERSION_STRING "1.16" +#elif NPY_FEATURE_VERSION == NPY_1_20_API_VERSION /* also 1.21 */ + #define NPY_FEATURE_VERSION_STRING "1.20" +#elif NPY_FEATURE_VERSION == NPY_1_22_API_VERSION + #define NPY_FEATURE_VERSION_STRING "1.22" +#elif NPY_FEATURE_VERSION == NPY_1_23_API_VERSION /* also 1.24 */ + #define NPY_FEATURE_VERSION_STRING "1.23" +#elif NPY_FEATURE_VERSION == NPY_1_25_API_VERSION + #define NPY_FEATURE_VERSION_STRING "1.25" +#elif NPY_FEATURE_VERSION == NPY_2_0_API_VERSION + #define NPY_FEATURE_VERSION_STRING "2.0" +#elif NPY_FEATURE_VERSION == NPY_2_1_API_VERSION + #define NPY_FEATURE_VERSION_STRING "2.1" +#elif NPY_FEATURE_VERSION == NPY_2_3_API_VERSION + #define NPY_FEATURE_VERSION_STRING "2.3" +#else + #error "Missing version string define for new NumPy version." +#endif + + +#endif /* NUMPY_CORE_INCLUDE_NUMPY_NPY_NUMPYCONFIG_H_ */ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/random/LICENSE.txt b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/random/LICENSE.txt new file mode 100644 index 0000000..d72a7c3 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/random/LICENSE.txt @@ -0,0 +1,21 @@ + zlib License + ------------ + + Copyright (C) 2010 - 2019 ridiculous_fish, + Copyright (C) 2016 - 2019 Kim Walisch, + + This software is provided 'as-is', without any express or implied + warranty. In no event will the authors be held liable for any damages + arising from the use of this software. + + Permission is granted to anyone to use this software for any purpose, + including commercial applications, and to alter it and redistribute it + freely, subject to the following restrictions: + + 1. The origin of this software must not be misrepresented; you must not + claim that you wrote the original software. If you use this software + in a product, an acknowledgment in the product documentation would be + appreciated but is not required. + 2. Altered source versions must be plainly marked as such, and must not be + misrepresented as being the original software. + 3. This notice may not be removed or altered from any source distribution. diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/random/bitgen.h b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/random/bitgen.h new file mode 100644 index 0000000..162dd5c --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/random/bitgen.h @@ -0,0 +1,20 @@ +#ifndef NUMPY_CORE_INCLUDE_NUMPY_RANDOM_BITGEN_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_RANDOM_BITGEN_H_ + +#pragma once +#include +#include +#include + +/* Must match the declaration in numpy/random/.pxd */ + +typedef struct bitgen { + void *state; + uint64_t (*next_uint64)(void *st); + uint32_t (*next_uint32)(void *st); + double (*next_double)(void *st); + uint64_t (*next_raw)(void *st); +} bitgen_t; + + +#endif /* NUMPY_CORE_INCLUDE_NUMPY_RANDOM_BITGEN_H_ */ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/random/distributions.h b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/random/distributions.h new file mode 100644 index 0000000..e7fa4bd --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/random/distributions.h @@ -0,0 +1,209 @@ +#ifndef NUMPY_CORE_INCLUDE_NUMPY_RANDOM_DISTRIBUTIONS_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_RANDOM_DISTRIBUTIONS_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include "numpy/npy_common.h" +#include +#include +#include + +#include "numpy/npy_math.h" +#include "numpy/random/bitgen.h" + +/* + * RAND_INT_TYPE is used to share integer generators with RandomState which + * used long in place of int64_t. If changing a distribution that uses + * RAND_INT_TYPE, then the original unmodified copy must be retained for + * use in RandomState by copying to the legacy distributions source file. + */ +#ifdef NP_RANDOM_LEGACY +#define RAND_INT_TYPE long +#define RAND_INT_MAX LONG_MAX +#else +#define RAND_INT_TYPE int64_t +#define RAND_INT_MAX INT64_MAX +#endif + +#ifdef _MSC_VER +#define DECLDIR __declspec(dllexport) +#else +#define DECLDIR extern +#endif + +#ifndef MIN +#define MIN(x, y) (((x) < (y)) ? x : y) +#define MAX(x, y) (((x) > (y)) ? x : y) +#endif + +#ifndef M_PI +#define M_PI 3.14159265358979323846264338328 +#endif + +typedef struct s_binomial_t { + int has_binomial; /* !=0: following parameters initialized for binomial */ + double psave; + RAND_INT_TYPE nsave; + double r; + double q; + double fm; + RAND_INT_TYPE m; + double p1; + double xm; + double xl; + double xr; + double c; + double laml; + double lamr; + double p2; + double p3; + double p4; +} binomial_t; + +DECLDIR float random_standard_uniform_f(bitgen_t *bitgen_state); +DECLDIR double random_standard_uniform(bitgen_t *bitgen_state); +DECLDIR void random_standard_uniform_fill(bitgen_t *, npy_intp, double *); +DECLDIR void random_standard_uniform_fill_f(bitgen_t *, npy_intp, float *); + +DECLDIR int64_t random_positive_int64(bitgen_t *bitgen_state); +DECLDIR int32_t random_positive_int32(bitgen_t *bitgen_state); +DECLDIR int64_t random_positive_int(bitgen_t *bitgen_state); +DECLDIR uint64_t random_uint(bitgen_t *bitgen_state); + +DECLDIR double random_standard_exponential(bitgen_t *bitgen_state); +DECLDIR float random_standard_exponential_f(bitgen_t *bitgen_state); +DECLDIR void random_standard_exponential_fill(bitgen_t *, npy_intp, double *); +DECLDIR void random_standard_exponential_fill_f(bitgen_t *, npy_intp, float *); +DECLDIR void random_standard_exponential_inv_fill(bitgen_t *, npy_intp, double *); +DECLDIR void random_standard_exponential_inv_fill_f(bitgen_t *, npy_intp, float *); + +DECLDIR double random_standard_normal(bitgen_t *bitgen_state); +DECLDIR float random_standard_normal_f(bitgen_t *bitgen_state); +DECLDIR void random_standard_normal_fill(bitgen_t *, npy_intp, double *); +DECLDIR void random_standard_normal_fill_f(bitgen_t *, npy_intp, float *); +DECLDIR double random_standard_gamma(bitgen_t *bitgen_state, double shape); +DECLDIR float random_standard_gamma_f(bitgen_t *bitgen_state, float shape); + +DECLDIR double random_normal(bitgen_t *bitgen_state, double loc, double scale); + +DECLDIR double random_gamma(bitgen_t *bitgen_state, double shape, double scale); +DECLDIR float random_gamma_f(bitgen_t *bitgen_state, float shape, float scale); + +DECLDIR double random_exponential(bitgen_t *bitgen_state, double scale); +DECLDIR double random_uniform(bitgen_t *bitgen_state, double lower, double range); +DECLDIR double random_beta(bitgen_t *bitgen_state, double a, double b); +DECLDIR double random_chisquare(bitgen_t *bitgen_state, double df); +DECLDIR double random_f(bitgen_t *bitgen_state, double dfnum, double dfden); +DECLDIR double random_standard_cauchy(bitgen_t *bitgen_state); +DECLDIR double random_pareto(bitgen_t *bitgen_state, double a); +DECLDIR double random_weibull(bitgen_t *bitgen_state, double a); +DECLDIR double random_power(bitgen_t *bitgen_state, double a); +DECLDIR double random_laplace(bitgen_t *bitgen_state, double loc, double scale); +DECLDIR double random_gumbel(bitgen_t *bitgen_state, double loc, double scale); +DECLDIR double random_logistic(bitgen_t *bitgen_state, double loc, double scale); +DECLDIR double random_lognormal(bitgen_t *bitgen_state, double mean, double sigma); +DECLDIR double random_rayleigh(bitgen_t *bitgen_state, double mode); +DECLDIR double random_standard_t(bitgen_t *bitgen_state, double df); +DECLDIR double random_noncentral_chisquare(bitgen_t *bitgen_state, double df, + double nonc); +DECLDIR double random_noncentral_f(bitgen_t *bitgen_state, double dfnum, + double dfden, double nonc); +DECLDIR double random_wald(bitgen_t *bitgen_state, double mean, double scale); +DECLDIR double random_vonmises(bitgen_t *bitgen_state, double mu, double kappa); +DECLDIR double random_triangular(bitgen_t *bitgen_state, double left, double mode, + double right); + +DECLDIR RAND_INT_TYPE random_poisson(bitgen_t *bitgen_state, double lam); +DECLDIR RAND_INT_TYPE random_negative_binomial(bitgen_t *bitgen_state, double n, + double p); + +DECLDIR int64_t random_binomial(bitgen_t *bitgen_state, double p, + int64_t n, binomial_t *binomial); + +DECLDIR int64_t random_logseries(bitgen_t *bitgen_state, double p); +DECLDIR int64_t random_geometric(bitgen_t *bitgen_state, double p); +DECLDIR RAND_INT_TYPE random_geometric_search(bitgen_t *bitgen_state, double p); +DECLDIR RAND_INT_TYPE random_zipf(bitgen_t *bitgen_state, double a); +DECLDIR int64_t random_hypergeometric(bitgen_t *bitgen_state, + int64_t good, int64_t bad, int64_t sample); +DECLDIR uint64_t random_interval(bitgen_t *bitgen_state, uint64_t max); + +/* Generate random uint64 numbers in closed interval [off, off + rng]. */ +DECLDIR uint64_t random_bounded_uint64(bitgen_t *bitgen_state, uint64_t off, + uint64_t rng, uint64_t mask, + bool use_masked); + +/* Generate random uint32 numbers in closed interval [off, off + rng]. */ +DECLDIR uint32_t random_buffered_bounded_uint32(bitgen_t *bitgen_state, + uint32_t off, uint32_t rng, + uint32_t mask, bool use_masked, + int *bcnt, uint32_t *buf); +DECLDIR uint16_t random_buffered_bounded_uint16(bitgen_t *bitgen_state, + uint16_t off, uint16_t rng, + uint16_t mask, bool use_masked, + int *bcnt, uint32_t *buf); +DECLDIR uint8_t random_buffered_bounded_uint8(bitgen_t *bitgen_state, uint8_t off, + uint8_t rng, uint8_t mask, + bool use_masked, int *bcnt, + uint32_t *buf); +DECLDIR npy_bool random_buffered_bounded_bool(bitgen_t *bitgen_state, npy_bool off, + npy_bool rng, npy_bool mask, + bool use_masked, int *bcnt, + uint32_t *buf); + +DECLDIR void random_bounded_uint64_fill(bitgen_t *bitgen_state, uint64_t off, + uint64_t rng, npy_intp cnt, + bool use_masked, uint64_t *out); +DECLDIR void random_bounded_uint32_fill(bitgen_t *bitgen_state, uint32_t off, + uint32_t rng, npy_intp cnt, + bool use_masked, uint32_t *out); +DECLDIR void random_bounded_uint16_fill(bitgen_t *bitgen_state, uint16_t off, + uint16_t rng, npy_intp cnt, + bool use_masked, uint16_t *out); +DECLDIR void random_bounded_uint8_fill(bitgen_t *bitgen_state, uint8_t off, + uint8_t rng, npy_intp cnt, + bool use_masked, uint8_t *out); +DECLDIR void random_bounded_bool_fill(bitgen_t *bitgen_state, npy_bool off, + npy_bool rng, npy_intp cnt, + bool use_masked, npy_bool *out); + +DECLDIR void random_multinomial(bitgen_t *bitgen_state, RAND_INT_TYPE n, RAND_INT_TYPE *mnix, + double *pix, npy_intp d, binomial_t *binomial); + +/* multivariate hypergeometric, "count" method */ +DECLDIR int random_multivariate_hypergeometric_count(bitgen_t *bitgen_state, + int64_t total, + size_t num_colors, int64_t *colors, + int64_t nsample, + size_t num_variates, int64_t *variates); + +/* multivariate hypergeometric, "marginals" method */ +DECLDIR void random_multivariate_hypergeometric_marginals(bitgen_t *bitgen_state, + int64_t total, + size_t num_colors, int64_t *colors, + int64_t nsample, + size_t num_variates, int64_t *variates); + +/* Common to legacy-distributions.c and distributions.c but not exported */ + +RAND_INT_TYPE random_binomial_btpe(bitgen_t *bitgen_state, + RAND_INT_TYPE n, + double p, + binomial_t *binomial); +RAND_INT_TYPE random_binomial_inversion(bitgen_t *bitgen_state, + RAND_INT_TYPE n, + double p, + binomial_t *binomial); +double random_loggam(double x); +static inline double next_double(bitgen_t *bitgen_state) { + return bitgen_state->next_double(bitgen_state->state); +} + +#ifdef __cplusplus +} +#endif + +#endif /* NUMPY_CORE_INCLUDE_NUMPY_RANDOM_DISTRIBUTIONS_H_ */ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/random/libdivide.h b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/random/libdivide.h new file mode 100644 index 0000000..f4eb803 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/random/libdivide.h @@ -0,0 +1,2079 @@ +// libdivide.h - Optimized integer division +// https://libdivide.com +// +// Copyright (C) 2010 - 2019 ridiculous_fish, +// Copyright (C) 2016 - 2019 Kim Walisch, +// +// libdivide is dual-licensed under the Boost or zlib licenses. +// You may use libdivide under the terms of either of these. +// See LICENSE.txt for more details. + +#ifndef NUMPY_CORE_INCLUDE_NUMPY_LIBDIVIDE_LIBDIVIDE_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_LIBDIVIDE_LIBDIVIDE_H_ + +#define LIBDIVIDE_VERSION "3.0" +#define LIBDIVIDE_VERSION_MAJOR 3 +#define LIBDIVIDE_VERSION_MINOR 0 + +#include + +#if defined(__cplusplus) + #include + #include + #include +#else + #include + #include +#endif + +#if defined(LIBDIVIDE_AVX512) + #include +#elif defined(LIBDIVIDE_AVX2) + #include +#elif defined(LIBDIVIDE_SSE2) + #include +#endif + +#if defined(_MSC_VER) + #include + // disable warning C4146: unary minus operator applied + // to unsigned type, result still unsigned + #pragma warning(disable: 4146) + #define LIBDIVIDE_VC +#endif + +#if !defined(__has_builtin) + #define __has_builtin(x) 0 +#endif + +#if defined(__SIZEOF_INT128__) + #define HAS_INT128_T + // clang-cl on Windows does not yet support 128-bit division + #if !(defined(__clang__) && defined(LIBDIVIDE_VC)) + #define HAS_INT128_DIV + #endif +#endif + +#if defined(__x86_64__) || defined(_M_X64) + #define LIBDIVIDE_X86_64 +#endif + +#if defined(__i386__) + #define LIBDIVIDE_i386 +#endif + +#if defined(__GNUC__) || defined(__clang__) + #define LIBDIVIDE_GCC_STYLE_ASM +#endif + +#if defined(__cplusplus) || defined(LIBDIVIDE_VC) + #define LIBDIVIDE_FUNCTION __FUNCTION__ +#else + #define LIBDIVIDE_FUNCTION __func__ +#endif + +#define LIBDIVIDE_ERROR(msg) \ + do { \ + fprintf(stderr, "libdivide.h:%d: %s(): Error: %s\n", \ + __LINE__, LIBDIVIDE_FUNCTION, msg); \ + abort(); \ + } while (0) + +#if defined(LIBDIVIDE_ASSERTIONS_ON) + #define LIBDIVIDE_ASSERT(x) \ + do { \ + if (!(x)) { \ + fprintf(stderr, "libdivide.h:%d: %s(): Assertion failed: %s\n", \ + __LINE__, LIBDIVIDE_FUNCTION, #x); \ + abort(); \ + } \ + } while (0) +#else + #define LIBDIVIDE_ASSERT(x) +#endif + +#ifdef __cplusplus +namespace libdivide { +#endif + +// pack divider structs to prevent compilers from padding. +// This reduces memory usage by up to 43% when using a large +// array of libdivide dividers and improves performance +// by up to 10% because of reduced memory bandwidth. +#pragma pack(push, 1) + +struct libdivide_u32_t { + uint32_t magic; + uint8_t more; +}; + +struct libdivide_s32_t { + int32_t magic; + uint8_t more; +}; + +struct libdivide_u64_t { + uint64_t magic; + uint8_t more; +}; + +struct libdivide_s64_t { + int64_t magic; + uint8_t more; +}; + +struct libdivide_u32_branchfree_t { + uint32_t magic; + uint8_t more; +}; + +struct libdivide_s32_branchfree_t { + int32_t magic; + uint8_t more; +}; + +struct libdivide_u64_branchfree_t { + uint64_t magic; + uint8_t more; +}; + +struct libdivide_s64_branchfree_t { + int64_t magic; + uint8_t more; +}; + +#pragma pack(pop) + +// Explanation of the "more" field: +// +// * Bits 0-5 is the shift value (for shift path or mult path). +// * Bit 6 is the add indicator for mult path. +// * Bit 7 is set if the divisor is negative. We use bit 7 as the negative +// divisor indicator so that we can efficiently use sign extension to +// create a bitmask with all bits set to 1 (if the divisor is negative) +// or 0 (if the divisor is positive). +// +// u32: [0-4] shift value +// [5] ignored +// [6] add indicator +// magic number of 0 indicates shift path +// +// s32: [0-4] shift value +// [5] ignored +// [6] add indicator +// [7] indicates negative divisor +// magic number of 0 indicates shift path +// +// u64: [0-5] shift value +// [6] add indicator +// magic number of 0 indicates shift path +// +// s64: [0-5] shift value +// [6] add indicator +// [7] indicates negative divisor +// magic number of 0 indicates shift path +// +// In s32 and s64 branchfree modes, the magic number is negated according to +// whether the divisor is negated. In branchfree strategy, it is not negated. + +enum { + LIBDIVIDE_32_SHIFT_MASK = 0x1F, + LIBDIVIDE_64_SHIFT_MASK = 0x3F, + LIBDIVIDE_ADD_MARKER = 0x40, + LIBDIVIDE_NEGATIVE_DIVISOR = 0x80 +}; + +static inline struct libdivide_s32_t libdivide_s32_gen(int32_t d); +static inline struct libdivide_u32_t libdivide_u32_gen(uint32_t d); +static inline struct libdivide_s64_t libdivide_s64_gen(int64_t d); +static inline struct libdivide_u64_t libdivide_u64_gen(uint64_t d); + +static inline struct libdivide_s32_branchfree_t libdivide_s32_branchfree_gen(int32_t d); +static inline struct libdivide_u32_branchfree_t libdivide_u32_branchfree_gen(uint32_t d); +static inline struct libdivide_s64_branchfree_t libdivide_s64_branchfree_gen(int64_t d); +static inline struct libdivide_u64_branchfree_t libdivide_u64_branchfree_gen(uint64_t d); + +static inline int32_t libdivide_s32_do(int32_t numer, const struct libdivide_s32_t *denom); +static inline uint32_t libdivide_u32_do(uint32_t numer, const struct libdivide_u32_t *denom); +static inline int64_t libdivide_s64_do(int64_t numer, const struct libdivide_s64_t *denom); +static inline uint64_t libdivide_u64_do(uint64_t numer, const struct libdivide_u64_t *denom); + +static inline int32_t libdivide_s32_branchfree_do(int32_t numer, const struct libdivide_s32_branchfree_t *denom); +static inline uint32_t libdivide_u32_branchfree_do(uint32_t numer, const struct libdivide_u32_branchfree_t *denom); +static inline int64_t libdivide_s64_branchfree_do(int64_t numer, const struct libdivide_s64_branchfree_t *denom); +static inline uint64_t libdivide_u64_branchfree_do(uint64_t numer, const struct libdivide_u64_branchfree_t *denom); + +static inline int32_t libdivide_s32_recover(const struct libdivide_s32_t *denom); +static inline uint32_t libdivide_u32_recover(const struct libdivide_u32_t *denom); +static inline int64_t libdivide_s64_recover(const struct libdivide_s64_t *denom); +static inline uint64_t libdivide_u64_recover(const struct libdivide_u64_t *denom); + +static inline int32_t libdivide_s32_branchfree_recover(const struct libdivide_s32_branchfree_t *denom); +static inline uint32_t libdivide_u32_branchfree_recover(const struct libdivide_u32_branchfree_t *denom); +static inline int64_t libdivide_s64_branchfree_recover(const struct libdivide_s64_branchfree_t *denom); +static inline uint64_t libdivide_u64_branchfree_recover(const struct libdivide_u64_branchfree_t *denom); + +//////// Internal Utility Functions + +static inline uint32_t libdivide_mullhi_u32(uint32_t x, uint32_t y) { + uint64_t xl = x, yl = y; + uint64_t rl = xl * yl; + return (uint32_t)(rl >> 32); +} + +static inline int32_t libdivide_mullhi_s32(int32_t x, int32_t y) { + int64_t xl = x, yl = y; + int64_t rl = xl * yl; + // needs to be arithmetic shift + return (int32_t)(rl >> 32); +} + +static inline uint64_t libdivide_mullhi_u64(uint64_t x, uint64_t y) { +#if defined(LIBDIVIDE_VC) && \ + defined(LIBDIVIDE_X86_64) + return __umulh(x, y); +#elif defined(HAS_INT128_T) + __uint128_t xl = x, yl = y; + __uint128_t rl = xl * yl; + return (uint64_t)(rl >> 64); +#else + // full 128 bits are x0 * y0 + (x0 * y1 << 32) + (x1 * y0 << 32) + (x1 * y1 << 64) + uint32_t mask = 0xFFFFFFFF; + uint32_t x0 = (uint32_t)(x & mask); + uint32_t x1 = (uint32_t)(x >> 32); + uint32_t y0 = (uint32_t)(y & mask); + uint32_t y1 = (uint32_t)(y >> 32); + uint32_t x0y0_hi = libdivide_mullhi_u32(x0, y0); + uint64_t x0y1 = x0 * (uint64_t)y1; + uint64_t x1y0 = x1 * (uint64_t)y0; + uint64_t x1y1 = x1 * (uint64_t)y1; + uint64_t temp = x1y0 + x0y0_hi; + uint64_t temp_lo = temp & mask; + uint64_t temp_hi = temp >> 32; + + return x1y1 + temp_hi + ((temp_lo + x0y1) >> 32); +#endif +} + +static inline int64_t libdivide_mullhi_s64(int64_t x, int64_t y) { +#if defined(LIBDIVIDE_VC) && \ + defined(LIBDIVIDE_X86_64) + return __mulh(x, y); +#elif defined(HAS_INT128_T) + __int128_t xl = x, yl = y; + __int128_t rl = xl * yl; + return (int64_t)(rl >> 64); +#else + // full 128 bits are x0 * y0 + (x0 * y1 << 32) + (x1 * y0 << 32) + (x1 * y1 << 64) + uint32_t mask = 0xFFFFFFFF; + uint32_t x0 = (uint32_t)(x & mask); + uint32_t y0 = (uint32_t)(y & mask); + int32_t x1 = (int32_t)(x >> 32); + int32_t y1 = (int32_t)(y >> 32); + uint32_t x0y0_hi = libdivide_mullhi_u32(x0, y0); + int64_t t = x1 * (int64_t)y0 + x0y0_hi; + int64_t w1 = x0 * (int64_t)y1 + (t & mask); + + return x1 * (int64_t)y1 + (t >> 32) + (w1 >> 32); +#endif +} + +static inline int32_t libdivide_count_leading_zeros32(uint32_t val) { +#if defined(__GNUC__) || \ + __has_builtin(__builtin_clz) + // Fast way to count leading zeros + return __builtin_clz(val); +#elif defined(LIBDIVIDE_VC) + unsigned long result; + if (_BitScanReverse(&result, val)) { + return 31 - result; + } + return 0; +#else + if (val == 0) + return 32; + int32_t result = 8; + uint32_t hi = 0xFFU << 24; + while ((val & hi) == 0) { + hi >>= 8; + result += 8; + } + while (val & hi) { + result -= 1; + hi <<= 1; + } + return result; +#endif +} + +static inline int32_t libdivide_count_leading_zeros64(uint64_t val) { +#if defined(__GNUC__) || \ + __has_builtin(__builtin_clzll) + // Fast way to count leading zeros + return __builtin_clzll(val); +#elif defined(LIBDIVIDE_VC) && defined(_WIN64) + unsigned long result; + if (_BitScanReverse64(&result, val)) { + return 63 - result; + } + return 0; +#else + uint32_t hi = val >> 32; + uint32_t lo = val & 0xFFFFFFFF; + if (hi != 0) return libdivide_count_leading_zeros32(hi); + return 32 + libdivide_count_leading_zeros32(lo); +#endif +} + +// libdivide_64_div_32_to_32: divides a 64-bit uint {u1, u0} by a 32-bit +// uint {v}. The result must fit in 32 bits. +// Returns the quotient directly and the remainder in *r +static inline uint32_t libdivide_64_div_32_to_32(uint32_t u1, uint32_t u0, uint32_t v, uint32_t *r) { +#if (defined(LIBDIVIDE_i386) || defined(LIBDIVIDE_X86_64)) && \ + defined(LIBDIVIDE_GCC_STYLE_ASM) + uint32_t result; + __asm__("divl %[v]" + : "=a"(result), "=d"(*r) + : [v] "r"(v), "a"(u0), "d"(u1) + ); + return result; +#else + uint64_t n = ((uint64_t)u1 << 32) | u0; + uint32_t result = (uint32_t)(n / v); + *r = (uint32_t)(n - result * (uint64_t)v); + return result; +#endif +} + +// libdivide_128_div_64_to_64: divides a 128-bit uint {u1, u0} by a 64-bit +// uint {v}. The result must fit in 64 bits. +// Returns the quotient directly and the remainder in *r +static uint64_t libdivide_128_div_64_to_64(uint64_t u1, uint64_t u0, uint64_t v, uint64_t *r) { +#if defined(LIBDIVIDE_X86_64) && \ + defined(LIBDIVIDE_GCC_STYLE_ASM) + uint64_t result; + __asm__("divq %[v]" + : "=a"(result), "=d"(*r) + : [v] "r"(v), "a"(u0), "d"(u1) + ); + return result; +#elif defined(HAS_INT128_T) && \ + defined(HAS_INT128_DIV) + __uint128_t n = ((__uint128_t)u1 << 64) | u0; + uint64_t result = (uint64_t)(n / v); + *r = (uint64_t)(n - result * (__uint128_t)v); + return result; +#else + // Code taken from Hacker's Delight: + // http://www.hackersdelight.org/HDcode/divlu.c. + // License permits inclusion here per: + // http://www.hackersdelight.org/permissions.htm + + const uint64_t b = (1ULL << 32); // Number base (32 bits) + uint64_t un1, un0; // Norm. dividend LSD's + uint64_t vn1, vn0; // Norm. divisor digits + uint64_t q1, q0; // Quotient digits + uint64_t un64, un21, un10; // Dividend digit pairs + uint64_t rhat; // A remainder + int32_t s; // Shift amount for norm + + // If overflow, set rem. to an impossible value, + // and return the largest possible quotient + if (u1 >= v) { + *r = (uint64_t) -1; + return (uint64_t) -1; + } + + // count leading zeros + s = libdivide_count_leading_zeros64(v); + if (s > 0) { + // Normalize divisor + v = v << s; + un64 = (u1 << s) | (u0 >> (64 - s)); + un10 = u0 << s; // Shift dividend left + } else { + // Avoid undefined behavior of (u0 >> 64). + // The behavior is undefined if the right operand is + // negative, or greater than or equal to the length + // in bits of the promoted left operand. + un64 = u1; + un10 = u0; + } + + // Break divisor up into two 32-bit digits + vn1 = v >> 32; + vn0 = v & 0xFFFFFFFF; + + // Break right half of dividend into two digits + un1 = un10 >> 32; + un0 = un10 & 0xFFFFFFFF; + + // Compute the first quotient digit, q1 + q1 = un64 / vn1; + rhat = un64 - q1 * vn1; + + while (q1 >= b || q1 * vn0 > b * rhat + un1) { + q1 = q1 - 1; + rhat = rhat + vn1; + if (rhat >= b) + break; + } + + // Multiply and subtract + un21 = un64 * b + un1 - q1 * v; + + // Compute the second quotient digit + q0 = un21 / vn1; + rhat = un21 - q0 * vn1; + + while (q0 >= b || q0 * vn0 > b * rhat + un0) { + q0 = q0 - 1; + rhat = rhat + vn1; + if (rhat >= b) + break; + } + + *r = (un21 * b + un0 - q0 * v) >> s; + return q1 * b + q0; +#endif +} + +// Bitshift a u128 in place, left (signed_shift > 0) or right (signed_shift < 0) +static inline void libdivide_u128_shift(uint64_t *u1, uint64_t *u0, int32_t signed_shift) { + if (signed_shift > 0) { + uint32_t shift = signed_shift; + *u1 <<= shift; + *u1 |= *u0 >> (64 - shift); + *u0 <<= shift; + } + else if (signed_shift < 0) { + uint32_t shift = -signed_shift; + *u0 >>= shift; + *u0 |= *u1 << (64 - shift); + *u1 >>= shift; + } +} + +// Computes a 128 / 128 -> 64 bit division, with a 128 bit remainder. +static uint64_t libdivide_128_div_128_to_64(uint64_t u_hi, uint64_t u_lo, uint64_t v_hi, uint64_t v_lo, uint64_t *r_hi, uint64_t *r_lo) { +#if defined(HAS_INT128_T) && \ + defined(HAS_INT128_DIV) + __uint128_t ufull = u_hi; + __uint128_t vfull = v_hi; + ufull = (ufull << 64) | u_lo; + vfull = (vfull << 64) | v_lo; + uint64_t res = (uint64_t)(ufull / vfull); + __uint128_t remainder = ufull - (vfull * res); + *r_lo = (uint64_t)remainder; + *r_hi = (uint64_t)(remainder >> 64); + return res; +#else + // Adapted from "Unsigned Doubleword Division" in Hacker's Delight + // We want to compute u / v + typedef struct { uint64_t hi; uint64_t lo; } u128_t; + u128_t u = {u_hi, u_lo}; + u128_t v = {v_hi, v_lo}; + + if (v.hi == 0) { + // divisor v is a 64 bit value, so we just need one 128/64 division + // Note that we are simpler than Hacker's Delight here, because we know + // the quotient fits in 64 bits whereas Hacker's Delight demands a full + // 128 bit quotient + *r_hi = 0; + return libdivide_128_div_64_to_64(u.hi, u.lo, v.lo, r_lo); + } + // Here v >= 2**64 + // We know that v.hi != 0, so count leading zeros is OK + // We have 0 <= n <= 63 + uint32_t n = libdivide_count_leading_zeros64(v.hi); + + // Normalize the divisor so its MSB is 1 + u128_t v1t = v; + libdivide_u128_shift(&v1t.hi, &v1t.lo, n); + uint64_t v1 = v1t.hi; // i.e. v1 = v1t >> 64 + + // To ensure no overflow + u128_t u1 = u; + libdivide_u128_shift(&u1.hi, &u1.lo, -1); + + // Get quotient from divide unsigned insn. + uint64_t rem_ignored; + uint64_t q1 = libdivide_128_div_64_to_64(u1.hi, u1.lo, v1, &rem_ignored); + + // Undo normalization and division of u by 2. + u128_t q0 = {0, q1}; + libdivide_u128_shift(&q0.hi, &q0.lo, n); + libdivide_u128_shift(&q0.hi, &q0.lo, -63); + + // Make q0 correct or too small by 1 + // Equivalent to `if (q0 != 0) q0 = q0 - 1;` + if (q0.hi != 0 || q0.lo != 0) { + q0.hi -= (q0.lo == 0); // borrow + q0.lo -= 1; + } + + // Now q0 is correct. + // Compute q0 * v as q0v + // = (q0.hi << 64 + q0.lo) * (v.hi << 64 + v.lo) + // = (q0.hi * v.hi << 128) + (q0.hi * v.lo << 64) + + // (q0.lo * v.hi << 64) + q0.lo * v.lo) + // Each term is 128 bit + // High half of full product (upper 128 bits!) are dropped + u128_t q0v = {0, 0}; + q0v.hi = q0.hi*v.lo + q0.lo*v.hi + libdivide_mullhi_u64(q0.lo, v.lo); + q0v.lo = q0.lo*v.lo; + + // Compute u - q0v as u_q0v + // This is the remainder + u128_t u_q0v = u; + u_q0v.hi -= q0v.hi + (u.lo < q0v.lo); // second term is borrow + u_q0v.lo -= q0v.lo; + + // Check if u_q0v >= v + // This checks if our remainder is larger than the divisor + if ((u_q0v.hi > v.hi) || + (u_q0v.hi == v.hi && u_q0v.lo >= v.lo)) { + // Increment q0 + q0.lo += 1; + q0.hi += (q0.lo == 0); // carry + + // Subtract v from remainder + u_q0v.hi -= v.hi + (u_q0v.lo < v.lo); + u_q0v.lo -= v.lo; + } + + *r_hi = u_q0v.hi; + *r_lo = u_q0v.lo; + + LIBDIVIDE_ASSERT(q0.hi == 0); + return q0.lo; +#endif +} + +////////// UINT32 + +static inline struct libdivide_u32_t libdivide_internal_u32_gen(uint32_t d, int branchfree) { + if (d == 0) { + LIBDIVIDE_ERROR("divider must be != 0"); + } + + struct libdivide_u32_t result; + uint32_t floor_log_2_d = 31 - libdivide_count_leading_zeros32(d); + + // Power of 2 + if ((d & (d - 1)) == 0) { + // We need to subtract 1 from the shift value in case of an unsigned + // branchfree divider because there is a hardcoded right shift by 1 + // in its division algorithm. Because of this we also need to add back + // 1 in its recovery algorithm. + result.magic = 0; + result.more = (uint8_t)(floor_log_2_d - (branchfree != 0)); + } else { + uint8_t more; + uint32_t rem, proposed_m; + proposed_m = libdivide_64_div_32_to_32(1U << floor_log_2_d, 0, d, &rem); + + LIBDIVIDE_ASSERT(rem > 0 && rem < d); + const uint32_t e = d - rem; + + // This power works if e < 2**floor_log_2_d. + if (!branchfree && (e < (1U << floor_log_2_d))) { + // This power works + more = floor_log_2_d; + } else { + // We have to use the general 33-bit algorithm. We need to compute + // (2**power) / d. However, we already have (2**(power-1))/d and + // its remainder. By doubling both, and then correcting the + // remainder, we can compute the larger division. + // don't care about overflow here - in fact, we expect it + proposed_m += proposed_m; + const uint32_t twice_rem = rem + rem; + if (twice_rem >= d || twice_rem < rem) proposed_m += 1; + more = floor_log_2_d | LIBDIVIDE_ADD_MARKER; + } + result.magic = 1 + proposed_m; + result.more = more; + // result.more's shift should in general be ceil_log_2_d. But if we + // used the smaller power, we subtract one from the shift because we're + // using the smaller power. If we're using the larger power, we + // subtract one from the shift because it's taken care of by the add + // indicator. So floor_log_2_d happens to be correct in both cases. + } + return result; +} + +struct libdivide_u32_t libdivide_u32_gen(uint32_t d) { + return libdivide_internal_u32_gen(d, 0); +} + +struct libdivide_u32_branchfree_t libdivide_u32_branchfree_gen(uint32_t d) { + if (d == 1) { + LIBDIVIDE_ERROR("branchfree divider must be != 1"); + } + struct libdivide_u32_t tmp = libdivide_internal_u32_gen(d, 1); + struct libdivide_u32_branchfree_t ret = {tmp.magic, (uint8_t)(tmp.more & LIBDIVIDE_32_SHIFT_MASK)}; + return ret; +} + +uint32_t libdivide_u32_do(uint32_t numer, const struct libdivide_u32_t *denom) { + uint8_t more = denom->more; + if (!denom->magic) { + return numer >> more; + } + else { + uint32_t q = libdivide_mullhi_u32(denom->magic, numer); + if (more & LIBDIVIDE_ADD_MARKER) { + uint32_t t = ((numer - q) >> 1) + q; + return t >> (more & LIBDIVIDE_32_SHIFT_MASK); + } + else { + // All upper bits are 0, + // don't need to mask them off. + return q >> more; + } + } +} + +uint32_t libdivide_u32_branchfree_do(uint32_t numer, const struct libdivide_u32_branchfree_t *denom) { + uint32_t q = libdivide_mullhi_u32(denom->magic, numer); + uint32_t t = ((numer - q) >> 1) + q; + return t >> denom->more; +} + +uint32_t libdivide_u32_recover(const struct libdivide_u32_t *denom) { + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + + if (!denom->magic) { + return 1U << shift; + } else if (!(more & LIBDIVIDE_ADD_MARKER)) { + // We compute q = n/d = n*m / 2^(32 + shift) + // Therefore we have d = 2^(32 + shift) / m + // We need to ceil it. + // We know d is not a power of 2, so m is not a power of 2, + // so we can just add 1 to the floor + uint32_t hi_dividend = 1U << shift; + uint32_t rem_ignored; + return 1 + libdivide_64_div_32_to_32(hi_dividend, 0, denom->magic, &rem_ignored); + } else { + // Here we wish to compute d = 2^(32+shift+1)/(m+2^32). + // Notice (m + 2^32) is a 33 bit number. Use 64 bit division for now + // Also note that shift may be as high as 31, so shift + 1 will + // overflow. So we have to compute it as 2^(32+shift)/(m+2^32), and + // then double the quotient and remainder. + uint64_t half_n = 1ULL << (32 + shift); + uint64_t d = (1ULL << 32) | denom->magic; + // Note that the quotient is guaranteed <= 32 bits, but the remainder + // may need 33! + uint32_t half_q = (uint32_t)(half_n / d); + uint64_t rem = half_n % d; + // We computed 2^(32+shift)/(m+2^32) + // Need to double it, and then add 1 to the quotient if doubling th + // remainder would increase the quotient. + // Note that rem<<1 cannot overflow, since rem < d and d is 33 bits + uint32_t full_q = half_q + half_q + ((rem<<1) >= d); + + // We rounded down in gen (hence +1) + return full_q + 1; + } +} + +uint32_t libdivide_u32_branchfree_recover(const struct libdivide_u32_branchfree_t *denom) { + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + + if (!denom->magic) { + return 1U << (shift + 1); + } else { + // Here we wish to compute d = 2^(32+shift+1)/(m+2^32). + // Notice (m + 2^32) is a 33 bit number. Use 64 bit division for now + // Also note that shift may be as high as 31, so shift + 1 will + // overflow. So we have to compute it as 2^(32+shift)/(m+2^32), and + // then double the quotient and remainder. + uint64_t half_n = 1ULL << (32 + shift); + uint64_t d = (1ULL << 32) | denom->magic; + // Note that the quotient is guaranteed <= 32 bits, but the remainder + // may need 33! + uint32_t half_q = (uint32_t)(half_n / d); + uint64_t rem = half_n % d; + // We computed 2^(32+shift)/(m+2^32) + // Need to double it, and then add 1 to the quotient if doubling th + // remainder would increase the quotient. + // Note that rem<<1 cannot overflow, since rem < d and d is 33 bits + uint32_t full_q = half_q + half_q + ((rem<<1) >= d); + + // We rounded down in gen (hence +1) + return full_q + 1; + } +} + +/////////// UINT64 + +static inline struct libdivide_u64_t libdivide_internal_u64_gen(uint64_t d, int branchfree) { + if (d == 0) { + LIBDIVIDE_ERROR("divider must be != 0"); + } + + struct libdivide_u64_t result; + uint32_t floor_log_2_d = 63 - libdivide_count_leading_zeros64(d); + + // Power of 2 + if ((d & (d - 1)) == 0) { + // We need to subtract 1 from the shift value in case of an unsigned + // branchfree divider because there is a hardcoded right shift by 1 + // in its division algorithm. Because of this we also need to add back + // 1 in its recovery algorithm. + result.magic = 0; + result.more = (uint8_t)(floor_log_2_d - (branchfree != 0)); + } else { + uint64_t proposed_m, rem; + uint8_t more; + // (1 << (64 + floor_log_2_d)) / d + proposed_m = libdivide_128_div_64_to_64(1ULL << floor_log_2_d, 0, d, &rem); + + LIBDIVIDE_ASSERT(rem > 0 && rem < d); + const uint64_t e = d - rem; + + // This power works if e < 2**floor_log_2_d. + if (!branchfree && e < (1ULL << floor_log_2_d)) { + // This power works + more = floor_log_2_d; + } else { + // We have to use the general 65-bit algorithm. We need to compute + // (2**power) / d. However, we already have (2**(power-1))/d and + // its remainder. By doubling both, and then correcting the + // remainder, we can compute the larger division. + // don't care about overflow here - in fact, we expect it + proposed_m += proposed_m; + const uint64_t twice_rem = rem + rem; + if (twice_rem >= d || twice_rem < rem) proposed_m += 1; + more = floor_log_2_d | LIBDIVIDE_ADD_MARKER; + } + result.magic = 1 + proposed_m; + result.more = more; + // result.more's shift should in general be ceil_log_2_d. But if we + // used the smaller power, we subtract one from the shift because we're + // using the smaller power. If we're using the larger power, we + // subtract one from the shift because it's taken care of by the add + // indicator. So floor_log_2_d happens to be correct in both cases, + // which is why we do it outside of the if statement. + } + return result; +} + +struct libdivide_u64_t libdivide_u64_gen(uint64_t d) { + return libdivide_internal_u64_gen(d, 0); +} + +struct libdivide_u64_branchfree_t libdivide_u64_branchfree_gen(uint64_t d) { + if (d == 1) { + LIBDIVIDE_ERROR("branchfree divider must be != 1"); + } + struct libdivide_u64_t tmp = libdivide_internal_u64_gen(d, 1); + struct libdivide_u64_branchfree_t ret = {tmp.magic, (uint8_t)(tmp.more & LIBDIVIDE_64_SHIFT_MASK)}; + return ret; +} + +uint64_t libdivide_u64_do(uint64_t numer, const struct libdivide_u64_t *denom) { + uint8_t more = denom->more; + if (!denom->magic) { + return numer >> more; + } + else { + uint64_t q = libdivide_mullhi_u64(denom->magic, numer); + if (more & LIBDIVIDE_ADD_MARKER) { + uint64_t t = ((numer - q) >> 1) + q; + return t >> (more & LIBDIVIDE_64_SHIFT_MASK); + } + else { + // All upper bits are 0, + // don't need to mask them off. + return q >> more; + } + } +} + +uint64_t libdivide_u64_branchfree_do(uint64_t numer, const struct libdivide_u64_branchfree_t *denom) { + uint64_t q = libdivide_mullhi_u64(denom->magic, numer); + uint64_t t = ((numer - q) >> 1) + q; + return t >> denom->more; +} + +uint64_t libdivide_u64_recover(const struct libdivide_u64_t *denom) { + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + + if (!denom->magic) { + return 1ULL << shift; + } else if (!(more & LIBDIVIDE_ADD_MARKER)) { + // We compute q = n/d = n*m / 2^(64 + shift) + // Therefore we have d = 2^(64 + shift) / m + // We need to ceil it. + // We know d is not a power of 2, so m is not a power of 2, + // so we can just add 1 to the floor + uint64_t hi_dividend = 1ULL << shift; + uint64_t rem_ignored; + return 1 + libdivide_128_div_64_to_64(hi_dividend, 0, denom->magic, &rem_ignored); + } else { + // Here we wish to compute d = 2^(64+shift+1)/(m+2^64). + // Notice (m + 2^64) is a 65 bit number. This gets hairy. See + // libdivide_u32_recover for more on what we do here. + // TODO: do something better than 128 bit math + + // Full n is a (potentially) 129 bit value + // half_n is a 128 bit value + // Compute the hi half of half_n. Low half is 0. + uint64_t half_n_hi = 1ULL << shift, half_n_lo = 0; + // d is a 65 bit value. The high bit is always set to 1. + const uint64_t d_hi = 1, d_lo = denom->magic; + // Note that the quotient is guaranteed <= 64 bits, + // but the remainder may need 65! + uint64_t r_hi, r_lo; + uint64_t half_q = libdivide_128_div_128_to_64(half_n_hi, half_n_lo, d_hi, d_lo, &r_hi, &r_lo); + // We computed 2^(64+shift)/(m+2^64) + // Double the remainder ('dr') and check if that is larger than d + // Note that d is a 65 bit value, so r1 is small and so r1 + r1 + // cannot overflow + uint64_t dr_lo = r_lo + r_lo; + uint64_t dr_hi = r_hi + r_hi + (dr_lo < r_lo); // last term is carry + int dr_exceeds_d = (dr_hi > d_hi) || (dr_hi == d_hi && dr_lo >= d_lo); + uint64_t full_q = half_q + half_q + (dr_exceeds_d ? 1 : 0); + return full_q + 1; + } +} + +uint64_t libdivide_u64_branchfree_recover(const struct libdivide_u64_branchfree_t *denom) { + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + + if (!denom->magic) { + return 1ULL << (shift + 1); + } else { + // Here we wish to compute d = 2^(64+shift+1)/(m+2^64). + // Notice (m + 2^64) is a 65 bit number. This gets hairy. See + // libdivide_u32_recover for more on what we do here. + // TODO: do something better than 128 bit math + + // Full n is a (potentially) 129 bit value + // half_n is a 128 bit value + // Compute the hi half of half_n. Low half is 0. + uint64_t half_n_hi = 1ULL << shift, half_n_lo = 0; + // d is a 65 bit value. The high bit is always set to 1. + const uint64_t d_hi = 1, d_lo = denom->magic; + // Note that the quotient is guaranteed <= 64 bits, + // but the remainder may need 65! + uint64_t r_hi, r_lo; + uint64_t half_q = libdivide_128_div_128_to_64(half_n_hi, half_n_lo, d_hi, d_lo, &r_hi, &r_lo); + // We computed 2^(64+shift)/(m+2^64) + // Double the remainder ('dr') and check if that is larger than d + // Note that d is a 65 bit value, so r1 is small and so r1 + r1 + // cannot overflow + uint64_t dr_lo = r_lo + r_lo; + uint64_t dr_hi = r_hi + r_hi + (dr_lo < r_lo); // last term is carry + int dr_exceeds_d = (dr_hi > d_hi) || (dr_hi == d_hi && dr_lo >= d_lo); + uint64_t full_q = half_q + half_q + (dr_exceeds_d ? 1 : 0); + return full_q + 1; + } +} + +/////////// SINT32 + +static inline struct libdivide_s32_t libdivide_internal_s32_gen(int32_t d, int branchfree) { + if (d == 0) { + LIBDIVIDE_ERROR("divider must be != 0"); + } + + struct libdivide_s32_t result; + + // If d is a power of 2, or negative a power of 2, we have to use a shift. + // This is especially important because the magic algorithm fails for -1. + // To check if d is a power of 2 or its inverse, it suffices to check + // whether its absolute value has exactly one bit set. This works even for + // INT_MIN, because abs(INT_MIN) == INT_MIN, and INT_MIN has one bit set + // and is a power of 2. + uint32_t ud = (uint32_t)d; + uint32_t absD = (d < 0) ? -ud : ud; + uint32_t floor_log_2_d = 31 - libdivide_count_leading_zeros32(absD); + // check if exactly one bit is set, + // don't care if absD is 0 since that's divide by zero + if ((absD & (absD - 1)) == 0) { + // Branchfree and normal paths are exactly the same + result.magic = 0; + result.more = floor_log_2_d | (d < 0 ? LIBDIVIDE_NEGATIVE_DIVISOR : 0); + } else { + LIBDIVIDE_ASSERT(floor_log_2_d >= 1); + + uint8_t more; + // the dividend here is 2**(floor_log_2_d + 31), so the low 32 bit word + // is 0 and the high word is floor_log_2_d - 1 + uint32_t rem, proposed_m; + proposed_m = libdivide_64_div_32_to_32(1U << (floor_log_2_d - 1), 0, absD, &rem); + const uint32_t e = absD - rem; + + // We are going to start with a power of floor_log_2_d - 1. + // This works if works if e < 2**floor_log_2_d. + if (!branchfree && e < (1U << floor_log_2_d)) { + // This power works + more = floor_log_2_d - 1; + } else { + // We need to go one higher. This should not make proposed_m + // overflow, but it will make it negative when interpreted as an + // int32_t. + proposed_m += proposed_m; + const uint32_t twice_rem = rem + rem; + if (twice_rem >= absD || twice_rem < rem) proposed_m += 1; + more = floor_log_2_d | LIBDIVIDE_ADD_MARKER; + } + + proposed_m += 1; + int32_t magic = (int32_t)proposed_m; + + // Mark if we are negative. Note we only negate the magic number in the + // branchfull case. + if (d < 0) { + more |= LIBDIVIDE_NEGATIVE_DIVISOR; + if (!branchfree) { + magic = -magic; + } + } + + result.more = more; + result.magic = magic; + } + return result; +} + +struct libdivide_s32_t libdivide_s32_gen(int32_t d) { + return libdivide_internal_s32_gen(d, 0); +} + +struct libdivide_s32_branchfree_t libdivide_s32_branchfree_gen(int32_t d) { + struct libdivide_s32_t tmp = libdivide_internal_s32_gen(d, 1); + struct libdivide_s32_branchfree_t result = {tmp.magic, tmp.more}; + return result; +} + +int32_t libdivide_s32_do(int32_t numer, const struct libdivide_s32_t *denom) { + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + + if (!denom->magic) { + uint32_t sign = (int8_t)more >> 7; + uint32_t mask = (1U << shift) - 1; + uint32_t uq = numer + ((numer >> 31) & mask); + int32_t q = (int32_t)uq; + q >>= shift; + q = (q ^ sign) - sign; + return q; + } else { + uint32_t uq = (uint32_t)libdivide_mullhi_s32(denom->magic, numer); + if (more & LIBDIVIDE_ADD_MARKER) { + // must be arithmetic shift and then sign extend + int32_t sign = (int8_t)more >> 7; + // q += (more < 0 ? -numer : numer) + // cast required to avoid UB + uq += ((uint32_t)numer ^ sign) - sign; + } + int32_t q = (int32_t)uq; + q >>= shift; + q += (q < 0); + return q; + } +} + +int32_t libdivide_s32_branchfree_do(int32_t numer, const struct libdivide_s32_branchfree_t *denom) { + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + // must be arithmetic shift and then sign extend + int32_t sign = (int8_t)more >> 7; + int32_t magic = denom->magic; + int32_t q = libdivide_mullhi_s32(magic, numer); + q += numer; + + // If q is non-negative, we have nothing to do + // If q is negative, we want to add either (2**shift)-1 if d is a power of + // 2, or (2**shift) if it is not a power of 2 + uint32_t is_power_of_2 = (magic == 0); + uint32_t q_sign = (uint32_t)(q >> 31); + q += q_sign & ((1U << shift) - is_power_of_2); + + // Now arithmetic right shift + q >>= shift; + // Negate if needed + q = (q ^ sign) - sign; + + return q; +} + +int32_t libdivide_s32_recover(const struct libdivide_s32_t *denom) { + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + if (!denom->magic) { + uint32_t absD = 1U << shift; + if (more & LIBDIVIDE_NEGATIVE_DIVISOR) { + absD = -absD; + } + return (int32_t)absD; + } else { + // Unsigned math is much easier + // We negate the magic number only in the branchfull case, and we don't + // know which case we're in. However we have enough information to + // determine the correct sign of the magic number. The divisor was + // negative if LIBDIVIDE_NEGATIVE_DIVISOR is set. If ADD_MARKER is set, + // the magic number's sign is opposite that of the divisor. + // We want to compute the positive magic number. + int negative_divisor = (more & LIBDIVIDE_NEGATIVE_DIVISOR); + int magic_was_negated = (more & LIBDIVIDE_ADD_MARKER) + ? denom->magic > 0 : denom->magic < 0; + + // Handle the power of 2 case (including branchfree) + if (denom->magic == 0) { + int32_t result = 1U << shift; + return negative_divisor ? -result : result; + } + + uint32_t d = (uint32_t)(magic_was_negated ? -denom->magic : denom->magic); + uint64_t n = 1ULL << (32 + shift); // this shift cannot exceed 30 + uint32_t q = (uint32_t)(n / d); + int32_t result = (int32_t)q; + result += 1; + return negative_divisor ? -result : result; + } +} + +int32_t libdivide_s32_branchfree_recover(const struct libdivide_s32_branchfree_t *denom) { + return libdivide_s32_recover((const struct libdivide_s32_t *)denom); +} + +///////////// SINT64 + +static inline struct libdivide_s64_t libdivide_internal_s64_gen(int64_t d, int branchfree) { + if (d == 0) { + LIBDIVIDE_ERROR("divider must be != 0"); + } + + struct libdivide_s64_t result; + + // If d is a power of 2, or negative a power of 2, we have to use a shift. + // This is especially important because the magic algorithm fails for -1. + // To check if d is a power of 2 or its inverse, it suffices to check + // whether its absolute value has exactly one bit set. This works even for + // INT_MIN, because abs(INT_MIN) == INT_MIN, and INT_MIN has one bit set + // and is a power of 2. + uint64_t ud = (uint64_t)d; + uint64_t absD = (d < 0) ? -ud : ud; + uint32_t floor_log_2_d = 63 - libdivide_count_leading_zeros64(absD); + // check if exactly one bit is set, + // don't care if absD is 0 since that's divide by zero + if ((absD & (absD - 1)) == 0) { + // Branchfree and non-branchfree cases are the same + result.magic = 0; + result.more = floor_log_2_d | (d < 0 ? LIBDIVIDE_NEGATIVE_DIVISOR : 0); + } else { + // the dividend here is 2**(floor_log_2_d + 63), so the low 64 bit word + // is 0 and the high word is floor_log_2_d - 1 + uint8_t more; + uint64_t rem, proposed_m; + proposed_m = libdivide_128_div_64_to_64(1ULL << (floor_log_2_d - 1), 0, absD, &rem); + const uint64_t e = absD - rem; + + // We are going to start with a power of floor_log_2_d - 1. + // This works if works if e < 2**floor_log_2_d. + if (!branchfree && e < (1ULL << floor_log_2_d)) { + // This power works + more = floor_log_2_d - 1; + } else { + // We need to go one higher. This should not make proposed_m + // overflow, but it will make it negative when interpreted as an + // int32_t. + proposed_m += proposed_m; + const uint64_t twice_rem = rem + rem; + if (twice_rem >= absD || twice_rem < rem) proposed_m += 1; + // note that we only set the LIBDIVIDE_NEGATIVE_DIVISOR bit if we + // also set ADD_MARKER this is an annoying optimization that + // enables algorithm #4 to avoid the mask. However we always set it + // in the branchfree case + more = floor_log_2_d | LIBDIVIDE_ADD_MARKER; + } + proposed_m += 1; + int64_t magic = (int64_t)proposed_m; + + // Mark if we are negative + if (d < 0) { + more |= LIBDIVIDE_NEGATIVE_DIVISOR; + if (!branchfree) { + magic = -magic; + } + } + + result.more = more; + result.magic = magic; + } + return result; +} + +struct libdivide_s64_t libdivide_s64_gen(int64_t d) { + return libdivide_internal_s64_gen(d, 0); +} + +struct libdivide_s64_branchfree_t libdivide_s64_branchfree_gen(int64_t d) { + struct libdivide_s64_t tmp = libdivide_internal_s64_gen(d, 1); + struct libdivide_s64_branchfree_t ret = {tmp.magic, tmp.more}; + return ret; +} + +int64_t libdivide_s64_do(int64_t numer, const struct libdivide_s64_t *denom) { + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + + if (!denom->magic) { // shift path + uint64_t mask = (1ULL << shift) - 1; + uint64_t uq = numer + ((numer >> 63) & mask); + int64_t q = (int64_t)uq; + q >>= shift; + // must be arithmetic shift and then sign-extend + int64_t sign = (int8_t)more >> 7; + q = (q ^ sign) - sign; + return q; + } else { + uint64_t uq = (uint64_t)libdivide_mullhi_s64(denom->magic, numer); + if (more & LIBDIVIDE_ADD_MARKER) { + // must be arithmetic shift and then sign extend + int64_t sign = (int8_t)more >> 7; + // q += (more < 0 ? -numer : numer) + // cast required to avoid UB + uq += ((uint64_t)numer ^ sign) - sign; + } + int64_t q = (int64_t)uq; + q >>= shift; + q += (q < 0); + return q; + } +} + +int64_t libdivide_s64_branchfree_do(int64_t numer, const struct libdivide_s64_branchfree_t *denom) { + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + // must be arithmetic shift and then sign extend + int64_t sign = (int8_t)more >> 7; + int64_t magic = denom->magic; + int64_t q = libdivide_mullhi_s64(magic, numer); + q += numer; + + // If q is non-negative, we have nothing to do. + // If q is negative, we want to add either (2**shift)-1 if d is a power of + // 2, or (2**shift) if it is not a power of 2. + uint64_t is_power_of_2 = (magic == 0); + uint64_t q_sign = (uint64_t)(q >> 63); + q += q_sign & ((1ULL << shift) - is_power_of_2); + + // Arithmetic right shift + q >>= shift; + // Negate if needed + q = (q ^ sign) - sign; + + return q; +} + +int64_t libdivide_s64_recover(const struct libdivide_s64_t *denom) { + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + if (denom->magic == 0) { // shift path + uint64_t absD = 1ULL << shift; + if (more & LIBDIVIDE_NEGATIVE_DIVISOR) { + absD = -absD; + } + return (int64_t)absD; + } else { + // Unsigned math is much easier + int negative_divisor = (more & LIBDIVIDE_NEGATIVE_DIVISOR); + int magic_was_negated = (more & LIBDIVIDE_ADD_MARKER) + ? denom->magic > 0 : denom->magic < 0; + + uint64_t d = (uint64_t)(magic_was_negated ? -denom->magic : denom->magic); + uint64_t n_hi = 1ULL << shift, n_lo = 0; + uint64_t rem_ignored; + uint64_t q = libdivide_128_div_64_to_64(n_hi, n_lo, d, &rem_ignored); + int64_t result = (int64_t)(q + 1); + if (negative_divisor) { + result = -result; + } + return result; + } +} + +int64_t libdivide_s64_branchfree_recover(const struct libdivide_s64_branchfree_t *denom) { + return libdivide_s64_recover((const struct libdivide_s64_t *)denom); +} + +#if defined(LIBDIVIDE_AVX512) + +static inline __m512i libdivide_u32_do_vector(__m512i numers, const struct libdivide_u32_t *denom); +static inline __m512i libdivide_s32_do_vector(__m512i numers, const struct libdivide_s32_t *denom); +static inline __m512i libdivide_u64_do_vector(__m512i numers, const struct libdivide_u64_t *denom); +static inline __m512i libdivide_s64_do_vector(__m512i numers, const struct libdivide_s64_t *denom); + +static inline __m512i libdivide_u32_branchfree_do_vector(__m512i numers, const struct libdivide_u32_branchfree_t *denom); +static inline __m512i libdivide_s32_branchfree_do_vector(__m512i numers, const struct libdivide_s32_branchfree_t *denom); +static inline __m512i libdivide_u64_branchfree_do_vector(__m512i numers, const struct libdivide_u64_branchfree_t *denom); +static inline __m512i libdivide_s64_branchfree_do_vector(__m512i numers, const struct libdivide_s64_branchfree_t *denom); + +//////// Internal Utility Functions + +static inline __m512i libdivide_s64_signbits(__m512i v) {; + return _mm512_srai_epi64(v, 63); +} + +static inline __m512i libdivide_s64_shift_right_vector(__m512i v, int amt) { + return _mm512_srai_epi64(v, amt); +} + +// Here, b is assumed to contain one 32-bit value repeated. +static inline __m512i libdivide_mullhi_u32_vector(__m512i a, __m512i b) { + __m512i hi_product_0Z2Z = _mm512_srli_epi64(_mm512_mul_epu32(a, b), 32); + __m512i a1X3X = _mm512_srli_epi64(a, 32); + __m512i mask = _mm512_set_epi32(-1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0); + __m512i hi_product_Z1Z3 = _mm512_and_si512(_mm512_mul_epu32(a1X3X, b), mask); + return _mm512_or_si512(hi_product_0Z2Z, hi_product_Z1Z3); +} + +// b is one 32-bit value repeated. +static inline __m512i libdivide_mullhi_s32_vector(__m512i a, __m512i b) { + __m512i hi_product_0Z2Z = _mm512_srli_epi64(_mm512_mul_epi32(a, b), 32); + __m512i a1X3X = _mm512_srli_epi64(a, 32); + __m512i mask = _mm512_set_epi32(-1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0); + __m512i hi_product_Z1Z3 = _mm512_and_si512(_mm512_mul_epi32(a1X3X, b), mask); + return _mm512_or_si512(hi_product_0Z2Z, hi_product_Z1Z3); +} + +// Here, y is assumed to contain one 64-bit value repeated. +// https://stackoverflow.com/a/28827013 +static inline __m512i libdivide_mullhi_u64_vector(__m512i x, __m512i y) { + __m512i lomask = _mm512_set1_epi64(0xffffffff); + __m512i xh = _mm512_shuffle_epi32(x, (_MM_PERM_ENUM) 0xB1); + __m512i yh = _mm512_shuffle_epi32(y, (_MM_PERM_ENUM) 0xB1); + __m512i w0 = _mm512_mul_epu32(x, y); + __m512i w1 = _mm512_mul_epu32(x, yh); + __m512i w2 = _mm512_mul_epu32(xh, y); + __m512i w3 = _mm512_mul_epu32(xh, yh); + __m512i w0h = _mm512_srli_epi64(w0, 32); + __m512i s1 = _mm512_add_epi64(w1, w0h); + __m512i s1l = _mm512_and_si512(s1, lomask); + __m512i s1h = _mm512_srli_epi64(s1, 32); + __m512i s2 = _mm512_add_epi64(w2, s1l); + __m512i s2h = _mm512_srli_epi64(s2, 32); + __m512i hi = _mm512_add_epi64(w3, s1h); + hi = _mm512_add_epi64(hi, s2h); + + return hi; +} + +// y is one 64-bit value repeated. +static inline __m512i libdivide_mullhi_s64_vector(__m512i x, __m512i y) { + __m512i p = libdivide_mullhi_u64_vector(x, y); + __m512i t1 = _mm512_and_si512(libdivide_s64_signbits(x), y); + __m512i t2 = _mm512_and_si512(libdivide_s64_signbits(y), x); + p = _mm512_sub_epi64(p, t1); + p = _mm512_sub_epi64(p, t2); + return p; +} + +////////// UINT32 + +__m512i libdivide_u32_do_vector(__m512i numers, const struct libdivide_u32_t *denom) { + uint8_t more = denom->more; + if (!denom->magic) { + return _mm512_srli_epi32(numers, more); + } + else { + __m512i q = libdivide_mullhi_u32_vector(numers, _mm512_set1_epi32(denom->magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // uint32_t t = ((numer - q) >> 1) + q; + // return t >> denom->shift; + uint32_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + __m512i t = _mm512_add_epi32(_mm512_srli_epi32(_mm512_sub_epi32(numers, q), 1), q); + return _mm512_srli_epi32(t, shift); + } + else { + return _mm512_srli_epi32(q, more); + } + } +} + +__m512i libdivide_u32_branchfree_do_vector(__m512i numers, const struct libdivide_u32_branchfree_t *denom) { + __m512i q = libdivide_mullhi_u32_vector(numers, _mm512_set1_epi32(denom->magic)); + __m512i t = _mm512_add_epi32(_mm512_srli_epi32(_mm512_sub_epi32(numers, q), 1), q); + return _mm512_srli_epi32(t, denom->more); +} + +////////// UINT64 + +__m512i libdivide_u64_do_vector(__m512i numers, const struct libdivide_u64_t *denom) { + uint8_t more = denom->more; + if (!denom->magic) { + return _mm512_srli_epi64(numers, more); + } + else { + __m512i q = libdivide_mullhi_u64_vector(numers, _mm512_set1_epi64(denom->magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // uint32_t t = ((numer - q) >> 1) + q; + // return t >> denom->shift; + uint32_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + __m512i t = _mm512_add_epi64(_mm512_srli_epi64(_mm512_sub_epi64(numers, q), 1), q); + return _mm512_srli_epi64(t, shift); + } + else { + return _mm512_srli_epi64(q, more); + } + } +} + +__m512i libdivide_u64_branchfree_do_vector(__m512i numers, const struct libdivide_u64_branchfree_t *denom) { + __m512i q = libdivide_mullhi_u64_vector(numers, _mm512_set1_epi64(denom->magic)); + __m512i t = _mm512_add_epi64(_mm512_srli_epi64(_mm512_sub_epi64(numers, q), 1), q); + return _mm512_srli_epi64(t, denom->more); +} + +////////// SINT32 + +__m512i libdivide_s32_do_vector(__m512i numers, const struct libdivide_s32_t *denom) { + uint8_t more = denom->more; + if (!denom->magic) { + uint32_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + uint32_t mask = (1U << shift) - 1; + __m512i roundToZeroTweak = _mm512_set1_epi32(mask); + // q = numer + ((numer >> 31) & roundToZeroTweak); + __m512i q = _mm512_add_epi32(numers, _mm512_and_si512(_mm512_srai_epi32(numers, 31), roundToZeroTweak)); + q = _mm512_srai_epi32(q, shift); + __m512i sign = _mm512_set1_epi32((int8_t)more >> 7); + // q = (q ^ sign) - sign; + q = _mm512_sub_epi32(_mm512_xor_si512(q, sign), sign); + return q; + } + else { + __m512i q = libdivide_mullhi_s32_vector(numers, _mm512_set1_epi32(denom->magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // must be arithmetic shift + __m512i sign = _mm512_set1_epi32((int8_t)more >> 7); + // q += ((numer ^ sign) - sign); + q = _mm512_add_epi32(q, _mm512_sub_epi32(_mm512_xor_si512(numers, sign), sign)); + } + // q >>= shift + q = _mm512_srai_epi32(q, more & LIBDIVIDE_32_SHIFT_MASK); + q = _mm512_add_epi32(q, _mm512_srli_epi32(q, 31)); // q += (q < 0) + return q; + } +} + +__m512i libdivide_s32_branchfree_do_vector(__m512i numers, const struct libdivide_s32_branchfree_t *denom) { + int32_t magic = denom->magic; + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + // must be arithmetic shift + __m512i sign = _mm512_set1_epi32((int8_t)more >> 7); + __m512i q = libdivide_mullhi_s32_vector(numers, _mm512_set1_epi32(magic)); + q = _mm512_add_epi32(q, numers); // q += numers + + // If q is non-negative, we have nothing to do + // If q is negative, we want to add either (2**shift)-1 if d is + // a power of 2, or (2**shift) if it is not a power of 2 + uint32_t is_power_of_2 = (magic == 0); + __m512i q_sign = _mm512_srai_epi32(q, 31); // q_sign = q >> 31 + __m512i mask = _mm512_set1_epi32((1U << shift) - is_power_of_2); + q = _mm512_add_epi32(q, _mm512_and_si512(q_sign, mask)); // q = q + (q_sign & mask) + q = _mm512_srai_epi32(q, shift); // q >>= shift + q = _mm512_sub_epi32(_mm512_xor_si512(q, sign), sign); // q = (q ^ sign) - sign + return q; +} + +////////// SINT64 + +__m512i libdivide_s64_do_vector(__m512i numers, const struct libdivide_s64_t *denom) { + uint8_t more = denom->more; + int64_t magic = denom->magic; + if (magic == 0) { // shift path + uint32_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + uint64_t mask = (1ULL << shift) - 1; + __m512i roundToZeroTweak = _mm512_set1_epi64(mask); + // q = numer + ((numer >> 63) & roundToZeroTweak); + __m512i q = _mm512_add_epi64(numers, _mm512_and_si512(libdivide_s64_signbits(numers), roundToZeroTweak)); + q = libdivide_s64_shift_right_vector(q, shift); + __m512i sign = _mm512_set1_epi32((int8_t)more >> 7); + // q = (q ^ sign) - sign; + q = _mm512_sub_epi64(_mm512_xor_si512(q, sign), sign); + return q; + } + else { + __m512i q = libdivide_mullhi_s64_vector(numers, _mm512_set1_epi64(magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // must be arithmetic shift + __m512i sign = _mm512_set1_epi32((int8_t)more >> 7); + // q += ((numer ^ sign) - sign); + q = _mm512_add_epi64(q, _mm512_sub_epi64(_mm512_xor_si512(numers, sign), sign)); + } + // q >>= denom->mult_path.shift + q = libdivide_s64_shift_right_vector(q, more & LIBDIVIDE_64_SHIFT_MASK); + q = _mm512_add_epi64(q, _mm512_srli_epi64(q, 63)); // q += (q < 0) + return q; + } +} + +__m512i libdivide_s64_branchfree_do_vector(__m512i numers, const struct libdivide_s64_branchfree_t *denom) { + int64_t magic = denom->magic; + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + // must be arithmetic shift + __m512i sign = _mm512_set1_epi32((int8_t)more >> 7); + + // libdivide_mullhi_s64(numers, magic); + __m512i q = libdivide_mullhi_s64_vector(numers, _mm512_set1_epi64(magic)); + q = _mm512_add_epi64(q, numers); // q += numers + + // If q is non-negative, we have nothing to do. + // If q is negative, we want to add either (2**shift)-1 if d is + // a power of 2, or (2**shift) if it is not a power of 2. + uint32_t is_power_of_2 = (magic == 0); + __m512i q_sign = libdivide_s64_signbits(q); // q_sign = q >> 63 + __m512i mask = _mm512_set1_epi64((1ULL << shift) - is_power_of_2); + q = _mm512_add_epi64(q, _mm512_and_si512(q_sign, mask)); // q = q + (q_sign & mask) + q = libdivide_s64_shift_right_vector(q, shift); // q >>= shift + q = _mm512_sub_epi64(_mm512_xor_si512(q, sign), sign); // q = (q ^ sign) - sign + return q; +} + +#elif defined(LIBDIVIDE_AVX2) + +static inline __m256i libdivide_u32_do_vector(__m256i numers, const struct libdivide_u32_t *denom); +static inline __m256i libdivide_s32_do_vector(__m256i numers, const struct libdivide_s32_t *denom); +static inline __m256i libdivide_u64_do_vector(__m256i numers, const struct libdivide_u64_t *denom); +static inline __m256i libdivide_s64_do_vector(__m256i numers, const struct libdivide_s64_t *denom); + +static inline __m256i libdivide_u32_branchfree_do_vector(__m256i numers, const struct libdivide_u32_branchfree_t *denom); +static inline __m256i libdivide_s32_branchfree_do_vector(__m256i numers, const struct libdivide_s32_branchfree_t *denom); +static inline __m256i libdivide_u64_branchfree_do_vector(__m256i numers, const struct libdivide_u64_branchfree_t *denom); +static inline __m256i libdivide_s64_branchfree_do_vector(__m256i numers, const struct libdivide_s64_branchfree_t *denom); + +//////// Internal Utility Functions + +// Implementation of _mm256_srai_epi64(v, 63) (from AVX512). +static inline __m256i libdivide_s64_signbits(__m256i v) { + __m256i hiBitsDuped = _mm256_shuffle_epi32(v, _MM_SHUFFLE(3, 3, 1, 1)); + __m256i signBits = _mm256_srai_epi32(hiBitsDuped, 31); + return signBits; +} + +// Implementation of _mm256_srai_epi64 (from AVX512). +static inline __m256i libdivide_s64_shift_right_vector(__m256i v, int amt) { + const int b = 64 - amt; + __m256i m = _mm256_set1_epi64x(1ULL << (b - 1)); + __m256i x = _mm256_srli_epi64(v, amt); + __m256i result = _mm256_sub_epi64(_mm256_xor_si256(x, m), m); + return result; +} + +// Here, b is assumed to contain one 32-bit value repeated. +static inline __m256i libdivide_mullhi_u32_vector(__m256i a, __m256i b) { + __m256i hi_product_0Z2Z = _mm256_srli_epi64(_mm256_mul_epu32(a, b), 32); + __m256i a1X3X = _mm256_srli_epi64(a, 32); + __m256i mask = _mm256_set_epi32(-1, 0, -1, 0, -1, 0, -1, 0); + __m256i hi_product_Z1Z3 = _mm256_and_si256(_mm256_mul_epu32(a1X3X, b), mask); + return _mm256_or_si256(hi_product_0Z2Z, hi_product_Z1Z3); +} + +// b is one 32-bit value repeated. +static inline __m256i libdivide_mullhi_s32_vector(__m256i a, __m256i b) { + __m256i hi_product_0Z2Z = _mm256_srli_epi64(_mm256_mul_epi32(a, b), 32); + __m256i a1X3X = _mm256_srli_epi64(a, 32); + __m256i mask = _mm256_set_epi32(-1, 0, -1, 0, -1, 0, -1, 0); + __m256i hi_product_Z1Z3 = _mm256_and_si256(_mm256_mul_epi32(a1X3X, b), mask); + return _mm256_or_si256(hi_product_0Z2Z, hi_product_Z1Z3); +} + +// Here, y is assumed to contain one 64-bit value repeated. +// https://stackoverflow.com/a/28827013 +static inline __m256i libdivide_mullhi_u64_vector(__m256i x, __m256i y) { + __m256i lomask = _mm256_set1_epi64x(0xffffffff); + __m256i xh = _mm256_shuffle_epi32(x, 0xB1); // x0l, x0h, x1l, x1h + __m256i yh = _mm256_shuffle_epi32(y, 0xB1); // y0l, y0h, y1l, y1h + __m256i w0 = _mm256_mul_epu32(x, y); // x0l*y0l, x1l*y1l + __m256i w1 = _mm256_mul_epu32(x, yh); // x0l*y0h, x1l*y1h + __m256i w2 = _mm256_mul_epu32(xh, y); // x0h*y0l, x1h*y0l + __m256i w3 = _mm256_mul_epu32(xh, yh); // x0h*y0h, x1h*y1h + __m256i w0h = _mm256_srli_epi64(w0, 32); + __m256i s1 = _mm256_add_epi64(w1, w0h); + __m256i s1l = _mm256_and_si256(s1, lomask); + __m256i s1h = _mm256_srli_epi64(s1, 32); + __m256i s2 = _mm256_add_epi64(w2, s1l); + __m256i s2h = _mm256_srli_epi64(s2, 32); + __m256i hi = _mm256_add_epi64(w3, s1h); + hi = _mm256_add_epi64(hi, s2h); + + return hi; +} + +// y is one 64-bit value repeated. +static inline __m256i libdivide_mullhi_s64_vector(__m256i x, __m256i y) { + __m256i p = libdivide_mullhi_u64_vector(x, y); + __m256i t1 = _mm256_and_si256(libdivide_s64_signbits(x), y); + __m256i t2 = _mm256_and_si256(libdivide_s64_signbits(y), x); + p = _mm256_sub_epi64(p, t1); + p = _mm256_sub_epi64(p, t2); + return p; +} + +////////// UINT32 + +__m256i libdivide_u32_do_vector(__m256i numers, const struct libdivide_u32_t *denom) { + uint8_t more = denom->more; + if (!denom->magic) { + return _mm256_srli_epi32(numers, more); + } + else { + __m256i q = libdivide_mullhi_u32_vector(numers, _mm256_set1_epi32(denom->magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // uint32_t t = ((numer - q) >> 1) + q; + // return t >> denom->shift; + uint32_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + __m256i t = _mm256_add_epi32(_mm256_srli_epi32(_mm256_sub_epi32(numers, q), 1), q); + return _mm256_srli_epi32(t, shift); + } + else { + return _mm256_srli_epi32(q, more); + } + } +} + +__m256i libdivide_u32_branchfree_do_vector(__m256i numers, const struct libdivide_u32_branchfree_t *denom) { + __m256i q = libdivide_mullhi_u32_vector(numers, _mm256_set1_epi32(denom->magic)); + __m256i t = _mm256_add_epi32(_mm256_srli_epi32(_mm256_sub_epi32(numers, q), 1), q); + return _mm256_srli_epi32(t, denom->more); +} + +////////// UINT64 + +__m256i libdivide_u64_do_vector(__m256i numers, const struct libdivide_u64_t *denom) { + uint8_t more = denom->more; + if (!denom->magic) { + return _mm256_srli_epi64(numers, more); + } + else { + __m256i q = libdivide_mullhi_u64_vector(numers, _mm256_set1_epi64x(denom->magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // uint32_t t = ((numer - q) >> 1) + q; + // return t >> denom->shift; + uint32_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + __m256i t = _mm256_add_epi64(_mm256_srli_epi64(_mm256_sub_epi64(numers, q), 1), q); + return _mm256_srli_epi64(t, shift); + } + else { + return _mm256_srli_epi64(q, more); + } + } +} + +__m256i libdivide_u64_branchfree_do_vector(__m256i numers, const struct libdivide_u64_branchfree_t *denom) { + __m256i q = libdivide_mullhi_u64_vector(numers, _mm256_set1_epi64x(denom->magic)); + __m256i t = _mm256_add_epi64(_mm256_srli_epi64(_mm256_sub_epi64(numers, q), 1), q); + return _mm256_srli_epi64(t, denom->more); +} + +////////// SINT32 + +__m256i libdivide_s32_do_vector(__m256i numers, const struct libdivide_s32_t *denom) { + uint8_t more = denom->more; + if (!denom->magic) { + uint32_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + uint32_t mask = (1U << shift) - 1; + __m256i roundToZeroTweak = _mm256_set1_epi32(mask); + // q = numer + ((numer >> 31) & roundToZeroTweak); + __m256i q = _mm256_add_epi32(numers, _mm256_and_si256(_mm256_srai_epi32(numers, 31), roundToZeroTweak)); + q = _mm256_srai_epi32(q, shift); + __m256i sign = _mm256_set1_epi32((int8_t)more >> 7); + // q = (q ^ sign) - sign; + q = _mm256_sub_epi32(_mm256_xor_si256(q, sign), sign); + return q; + } + else { + __m256i q = libdivide_mullhi_s32_vector(numers, _mm256_set1_epi32(denom->magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // must be arithmetic shift + __m256i sign = _mm256_set1_epi32((int8_t)more >> 7); + // q += ((numer ^ sign) - sign); + q = _mm256_add_epi32(q, _mm256_sub_epi32(_mm256_xor_si256(numers, sign), sign)); + } + // q >>= shift + q = _mm256_srai_epi32(q, more & LIBDIVIDE_32_SHIFT_MASK); + q = _mm256_add_epi32(q, _mm256_srli_epi32(q, 31)); // q += (q < 0) + return q; + } +} + +__m256i libdivide_s32_branchfree_do_vector(__m256i numers, const struct libdivide_s32_branchfree_t *denom) { + int32_t magic = denom->magic; + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + // must be arithmetic shift + __m256i sign = _mm256_set1_epi32((int8_t)more >> 7); + __m256i q = libdivide_mullhi_s32_vector(numers, _mm256_set1_epi32(magic)); + q = _mm256_add_epi32(q, numers); // q += numers + + // If q is non-negative, we have nothing to do + // If q is negative, we want to add either (2**shift)-1 if d is + // a power of 2, or (2**shift) if it is not a power of 2 + uint32_t is_power_of_2 = (magic == 0); + __m256i q_sign = _mm256_srai_epi32(q, 31); // q_sign = q >> 31 + __m256i mask = _mm256_set1_epi32((1U << shift) - is_power_of_2); + q = _mm256_add_epi32(q, _mm256_and_si256(q_sign, mask)); // q = q + (q_sign & mask) + q = _mm256_srai_epi32(q, shift); // q >>= shift + q = _mm256_sub_epi32(_mm256_xor_si256(q, sign), sign); // q = (q ^ sign) - sign + return q; +} + +////////// SINT64 + +__m256i libdivide_s64_do_vector(__m256i numers, const struct libdivide_s64_t *denom) { + uint8_t more = denom->more; + int64_t magic = denom->magic; + if (magic == 0) { // shift path + uint32_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + uint64_t mask = (1ULL << shift) - 1; + __m256i roundToZeroTweak = _mm256_set1_epi64x(mask); + // q = numer + ((numer >> 63) & roundToZeroTweak); + __m256i q = _mm256_add_epi64(numers, _mm256_and_si256(libdivide_s64_signbits(numers), roundToZeroTweak)); + q = libdivide_s64_shift_right_vector(q, shift); + __m256i sign = _mm256_set1_epi32((int8_t)more >> 7); + // q = (q ^ sign) - sign; + q = _mm256_sub_epi64(_mm256_xor_si256(q, sign), sign); + return q; + } + else { + __m256i q = libdivide_mullhi_s64_vector(numers, _mm256_set1_epi64x(magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // must be arithmetic shift + __m256i sign = _mm256_set1_epi32((int8_t)more >> 7); + // q += ((numer ^ sign) - sign); + q = _mm256_add_epi64(q, _mm256_sub_epi64(_mm256_xor_si256(numers, sign), sign)); + } + // q >>= denom->mult_path.shift + q = libdivide_s64_shift_right_vector(q, more & LIBDIVIDE_64_SHIFT_MASK); + q = _mm256_add_epi64(q, _mm256_srli_epi64(q, 63)); // q += (q < 0) + return q; + } +} + +__m256i libdivide_s64_branchfree_do_vector(__m256i numers, const struct libdivide_s64_branchfree_t *denom) { + int64_t magic = denom->magic; + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + // must be arithmetic shift + __m256i sign = _mm256_set1_epi32((int8_t)more >> 7); + + // libdivide_mullhi_s64(numers, magic); + __m256i q = libdivide_mullhi_s64_vector(numers, _mm256_set1_epi64x(magic)); + q = _mm256_add_epi64(q, numers); // q += numers + + // If q is non-negative, we have nothing to do. + // If q is negative, we want to add either (2**shift)-1 if d is + // a power of 2, or (2**shift) if it is not a power of 2. + uint32_t is_power_of_2 = (magic == 0); + __m256i q_sign = libdivide_s64_signbits(q); // q_sign = q >> 63 + __m256i mask = _mm256_set1_epi64x((1ULL << shift) - is_power_of_2); + q = _mm256_add_epi64(q, _mm256_and_si256(q_sign, mask)); // q = q + (q_sign & mask) + q = libdivide_s64_shift_right_vector(q, shift); // q >>= shift + q = _mm256_sub_epi64(_mm256_xor_si256(q, sign), sign); // q = (q ^ sign) - sign + return q; +} + +#elif defined(LIBDIVIDE_SSE2) + +static inline __m128i libdivide_u32_do_vector(__m128i numers, const struct libdivide_u32_t *denom); +static inline __m128i libdivide_s32_do_vector(__m128i numers, const struct libdivide_s32_t *denom); +static inline __m128i libdivide_u64_do_vector(__m128i numers, const struct libdivide_u64_t *denom); +static inline __m128i libdivide_s64_do_vector(__m128i numers, const struct libdivide_s64_t *denom); + +static inline __m128i libdivide_u32_branchfree_do_vector(__m128i numers, const struct libdivide_u32_branchfree_t *denom); +static inline __m128i libdivide_s32_branchfree_do_vector(__m128i numers, const struct libdivide_s32_branchfree_t *denom); +static inline __m128i libdivide_u64_branchfree_do_vector(__m128i numers, const struct libdivide_u64_branchfree_t *denom); +static inline __m128i libdivide_s64_branchfree_do_vector(__m128i numers, const struct libdivide_s64_branchfree_t *denom); + +//////// Internal Utility Functions + +// Implementation of _mm_srai_epi64(v, 63) (from AVX512). +static inline __m128i libdivide_s64_signbits(__m128i v) { + __m128i hiBitsDuped = _mm_shuffle_epi32(v, _MM_SHUFFLE(3, 3, 1, 1)); + __m128i signBits = _mm_srai_epi32(hiBitsDuped, 31); + return signBits; +} + +// Implementation of _mm_srai_epi64 (from AVX512). +static inline __m128i libdivide_s64_shift_right_vector(__m128i v, int amt) { + const int b = 64 - amt; + __m128i m = _mm_set1_epi64x(1ULL << (b - 1)); + __m128i x = _mm_srli_epi64(v, amt); + __m128i result = _mm_sub_epi64(_mm_xor_si128(x, m), m); + return result; +} + +// Here, b is assumed to contain one 32-bit value repeated. +static inline __m128i libdivide_mullhi_u32_vector(__m128i a, __m128i b) { + __m128i hi_product_0Z2Z = _mm_srli_epi64(_mm_mul_epu32(a, b), 32); + __m128i a1X3X = _mm_srli_epi64(a, 32); + __m128i mask = _mm_set_epi32(-1, 0, -1, 0); + __m128i hi_product_Z1Z3 = _mm_and_si128(_mm_mul_epu32(a1X3X, b), mask); + return _mm_or_si128(hi_product_0Z2Z, hi_product_Z1Z3); +} + +// SSE2 does not have a signed multiplication instruction, but we can convert +// unsigned to signed pretty efficiently. Again, b is just a 32 bit value +// repeated four times. +static inline __m128i libdivide_mullhi_s32_vector(__m128i a, __m128i b) { + __m128i p = libdivide_mullhi_u32_vector(a, b); + // t1 = (a >> 31) & y, arithmetic shift + __m128i t1 = _mm_and_si128(_mm_srai_epi32(a, 31), b); + __m128i t2 = _mm_and_si128(_mm_srai_epi32(b, 31), a); + p = _mm_sub_epi32(p, t1); + p = _mm_sub_epi32(p, t2); + return p; +} + +// Here, y is assumed to contain one 64-bit value repeated. +// https://stackoverflow.com/a/28827013 +static inline __m128i libdivide_mullhi_u64_vector(__m128i x, __m128i y) { + __m128i lomask = _mm_set1_epi64x(0xffffffff); + __m128i xh = _mm_shuffle_epi32(x, 0xB1); // x0l, x0h, x1l, x1h + __m128i yh = _mm_shuffle_epi32(y, 0xB1); // y0l, y0h, y1l, y1h + __m128i w0 = _mm_mul_epu32(x, y); // x0l*y0l, x1l*y1l + __m128i w1 = _mm_mul_epu32(x, yh); // x0l*y0h, x1l*y1h + __m128i w2 = _mm_mul_epu32(xh, y); // x0h*y0l, x1h*y0l + __m128i w3 = _mm_mul_epu32(xh, yh); // x0h*y0h, x1h*y1h + __m128i w0h = _mm_srli_epi64(w0, 32); + __m128i s1 = _mm_add_epi64(w1, w0h); + __m128i s1l = _mm_and_si128(s1, lomask); + __m128i s1h = _mm_srli_epi64(s1, 32); + __m128i s2 = _mm_add_epi64(w2, s1l); + __m128i s2h = _mm_srli_epi64(s2, 32); + __m128i hi = _mm_add_epi64(w3, s1h); + hi = _mm_add_epi64(hi, s2h); + + return hi; +} + +// y is one 64-bit value repeated. +static inline __m128i libdivide_mullhi_s64_vector(__m128i x, __m128i y) { + __m128i p = libdivide_mullhi_u64_vector(x, y); + __m128i t1 = _mm_and_si128(libdivide_s64_signbits(x), y); + __m128i t2 = _mm_and_si128(libdivide_s64_signbits(y), x); + p = _mm_sub_epi64(p, t1); + p = _mm_sub_epi64(p, t2); + return p; +} + +////////// UINT32 + +__m128i libdivide_u32_do_vector(__m128i numers, const struct libdivide_u32_t *denom) { + uint8_t more = denom->more; + if (!denom->magic) { + return _mm_srli_epi32(numers, more); + } + else { + __m128i q = libdivide_mullhi_u32_vector(numers, _mm_set1_epi32(denom->magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // uint32_t t = ((numer - q) >> 1) + q; + // return t >> denom->shift; + uint32_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + __m128i t = _mm_add_epi32(_mm_srli_epi32(_mm_sub_epi32(numers, q), 1), q); + return _mm_srli_epi32(t, shift); + } + else { + return _mm_srli_epi32(q, more); + } + } +} + +__m128i libdivide_u32_branchfree_do_vector(__m128i numers, const struct libdivide_u32_branchfree_t *denom) { + __m128i q = libdivide_mullhi_u32_vector(numers, _mm_set1_epi32(denom->magic)); + __m128i t = _mm_add_epi32(_mm_srli_epi32(_mm_sub_epi32(numers, q), 1), q); + return _mm_srli_epi32(t, denom->more); +} + +////////// UINT64 + +__m128i libdivide_u64_do_vector(__m128i numers, const struct libdivide_u64_t *denom) { + uint8_t more = denom->more; + if (!denom->magic) { + return _mm_srli_epi64(numers, more); + } + else { + __m128i q = libdivide_mullhi_u64_vector(numers, _mm_set1_epi64x(denom->magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // uint32_t t = ((numer - q) >> 1) + q; + // return t >> denom->shift; + uint32_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + __m128i t = _mm_add_epi64(_mm_srli_epi64(_mm_sub_epi64(numers, q), 1), q); + return _mm_srli_epi64(t, shift); + } + else { + return _mm_srli_epi64(q, more); + } + } +} + +__m128i libdivide_u64_branchfree_do_vector(__m128i numers, const struct libdivide_u64_branchfree_t *denom) { + __m128i q = libdivide_mullhi_u64_vector(numers, _mm_set1_epi64x(denom->magic)); + __m128i t = _mm_add_epi64(_mm_srli_epi64(_mm_sub_epi64(numers, q), 1), q); + return _mm_srli_epi64(t, denom->more); +} + +////////// SINT32 + +__m128i libdivide_s32_do_vector(__m128i numers, const struct libdivide_s32_t *denom) { + uint8_t more = denom->more; + if (!denom->magic) { + uint32_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + uint32_t mask = (1U << shift) - 1; + __m128i roundToZeroTweak = _mm_set1_epi32(mask); + // q = numer + ((numer >> 31) & roundToZeroTweak); + __m128i q = _mm_add_epi32(numers, _mm_and_si128(_mm_srai_epi32(numers, 31), roundToZeroTweak)); + q = _mm_srai_epi32(q, shift); + __m128i sign = _mm_set1_epi32((int8_t)more >> 7); + // q = (q ^ sign) - sign; + q = _mm_sub_epi32(_mm_xor_si128(q, sign), sign); + return q; + } + else { + __m128i q = libdivide_mullhi_s32_vector(numers, _mm_set1_epi32(denom->magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // must be arithmetic shift + __m128i sign = _mm_set1_epi32((int8_t)more >> 7); + // q += ((numer ^ sign) - sign); + q = _mm_add_epi32(q, _mm_sub_epi32(_mm_xor_si128(numers, sign), sign)); + } + // q >>= shift + q = _mm_srai_epi32(q, more & LIBDIVIDE_32_SHIFT_MASK); + q = _mm_add_epi32(q, _mm_srli_epi32(q, 31)); // q += (q < 0) + return q; + } +} + +__m128i libdivide_s32_branchfree_do_vector(__m128i numers, const struct libdivide_s32_branchfree_t *denom) { + int32_t magic = denom->magic; + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + // must be arithmetic shift + __m128i sign = _mm_set1_epi32((int8_t)more >> 7); + __m128i q = libdivide_mullhi_s32_vector(numers, _mm_set1_epi32(magic)); + q = _mm_add_epi32(q, numers); // q += numers + + // If q is non-negative, we have nothing to do + // If q is negative, we want to add either (2**shift)-1 if d is + // a power of 2, or (2**shift) if it is not a power of 2 + uint32_t is_power_of_2 = (magic == 0); + __m128i q_sign = _mm_srai_epi32(q, 31); // q_sign = q >> 31 + __m128i mask = _mm_set1_epi32((1U << shift) - is_power_of_2); + q = _mm_add_epi32(q, _mm_and_si128(q_sign, mask)); // q = q + (q_sign & mask) + q = _mm_srai_epi32(q, shift); // q >>= shift + q = _mm_sub_epi32(_mm_xor_si128(q, sign), sign); // q = (q ^ sign) - sign + return q; +} + +////////// SINT64 + +__m128i libdivide_s64_do_vector(__m128i numers, const struct libdivide_s64_t *denom) { + uint8_t more = denom->more; + int64_t magic = denom->magic; + if (magic == 0) { // shift path + uint32_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + uint64_t mask = (1ULL << shift) - 1; + __m128i roundToZeroTweak = _mm_set1_epi64x(mask); + // q = numer + ((numer >> 63) & roundToZeroTweak); + __m128i q = _mm_add_epi64(numers, _mm_and_si128(libdivide_s64_signbits(numers), roundToZeroTweak)); + q = libdivide_s64_shift_right_vector(q, shift); + __m128i sign = _mm_set1_epi32((int8_t)more >> 7); + // q = (q ^ sign) - sign; + q = _mm_sub_epi64(_mm_xor_si128(q, sign), sign); + return q; + } + else { + __m128i q = libdivide_mullhi_s64_vector(numers, _mm_set1_epi64x(magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // must be arithmetic shift + __m128i sign = _mm_set1_epi32((int8_t)more >> 7); + // q += ((numer ^ sign) - sign); + q = _mm_add_epi64(q, _mm_sub_epi64(_mm_xor_si128(numers, sign), sign)); + } + // q >>= denom->mult_path.shift + q = libdivide_s64_shift_right_vector(q, more & LIBDIVIDE_64_SHIFT_MASK); + q = _mm_add_epi64(q, _mm_srli_epi64(q, 63)); // q += (q < 0) + return q; + } +} + +__m128i libdivide_s64_branchfree_do_vector(__m128i numers, const struct libdivide_s64_branchfree_t *denom) { + int64_t magic = denom->magic; + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + // must be arithmetic shift + __m128i sign = _mm_set1_epi32((int8_t)more >> 7); + + // libdivide_mullhi_s64(numers, magic); + __m128i q = libdivide_mullhi_s64_vector(numers, _mm_set1_epi64x(magic)); + q = _mm_add_epi64(q, numers); // q += numers + + // If q is non-negative, we have nothing to do. + // If q is negative, we want to add either (2**shift)-1 if d is + // a power of 2, or (2**shift) if it is not a power of 2. + uint32_t is_power_of_2 = (magic == 0); + __m128i q_sign = libdivide_s64_signbits(q); // q_sign = q >> 63 + __m128i mask = _mm_set1_epi64x((1ULL << shift) - is_power_of_2); + q = _mm_add_epi64(q, _mm_and_si128(q_sign, mask)); // q = q + (q_sign & mask) + q = libdivide_s64_shift_right_vector(q, shift); // q >>= shift + q = _mm_sub_epi64(_mm_xor_si128(q, sign), sign); // q = (q ^ sign) - sign + return q; +} + +#endif + +/////////// C++ stuff + +#ifdef __cplusplus + +// The C++ divider class is templated on both an integer type +// (like uint64_t) and an algorithm type. +// * BRANCHFULL is the default algorithm type. +// * BRANCHFREE is the branchfree algorithm type. +enum { + BRANCHFULL, + BRANCHFREE +}; + +#if defined(LIBDIVIDE_AVX512) + #define LIBDIVIDE_VECTOR_TYPE __m512i +#elif defined(LIBDIVIDE_AVX2) + #define LIBDIVIDE_VECTOR_TYPE __m256i +#elif defined(LIBDIVIDE_SSE2) + #define LIBDIVIDE_VECTOR_TYPE __m128i +#endif + +#if !defined(LIBDIVIDE_VECTOR_TYPE) + #define LIBDIVIDE_DIVIDE_VECTOR(ALGO) +#else + #define LIBDIVIDE_DIVIDE_VECTOR(ALGO) \ + LIBDIVIDE_VECTOR_TYPE divide(LIBDIVIDE_VECTOR_TYPE n) const { \ + return libdivide_##ALGO##_do_vector(n, &denom); \ + } +#endif + +// The DISPATCHER_GEN() macro generates C++ methods (for the given integer +// and algorithm types) that redirect to libdivide's C API. +#define DISPATCHER_GEN(T, ALGO) \ + libdivide_##ALGO##_t denom; \ + dispatcher() { } \ + dispatcher(T d) \ + : denom(libdivide_##ALGO##_gen(d)) \ + { } \ + T divide(T n) const { \ + return libdivide_##ALGO##_do(n, &denom); \ + } \ + LIBDIVIDE_DIVIDE_VECTOR(ALGO) \ + T recover() const { \ + return libdivide_##ALGO##_recover(&denom); \ + } + +// The dispatcher selects a specific division algorithm for a given +// type and ALGO using partial template specialization. +template struct dispatcher { }; + +template<> struct dispatcher { DISPATCHER_GEN(int32_t, s32) }; +template<> struct dispatcher { DISPATCHER_GEN(int32_t, s32_branchfree) }; +template<> struct dispatcher { DISPATCHER_GEN(uint32_t, u32) }; +template<> struct dispatcher { DISPATCHER_GEN(uint32_t, u32_branchfree) }; +template<> struct dispatcher { DISPATCHER_GEN(int64_t, s64) }; +template<> struct dispatcher { DISPATCHER_GEN(int64_t, s64_branchfree) }; +template<> struct dispatcher { DISPATCHER_GEN(uint64_t, u64) }; +template<> struct dispatcher { DISPATCHER_GEN(uint64_t, u64_branchfree) }; + +// This is the main divider class for use by the user (C++ API). +// The actual division algorithm is selected using the dispatcher struct +// based on the integer and algorithm template parameters. +template +class divider { +public: + // We leave the default constructor empty so that creating + // an array of dividers and then initializing them + // later doesn't slow us down. + divider() { } + + // Constructor that takes the divisor as a parameter + divider(T d) : div(d) { } + + // Divides n by the divisor + T divide(T n) const { + return div.divide(n); + } + + // Recovers the divisor, returns the value that was + // used to initialize this divider object. + T recover() const { + return div.recover(); + } + + bool operator==(const divider& other) const { + return div.denom.magic == other.denom.magic && + div.denom.more == other.denom.more; + } + + bool operator!=(const divider& other) const { + return !(*this == other); + } + +#if defined(LIBDIVIDE_VECTOR_TYPE) + // Treats the vector as packed integer values with the same type as + // the divider (e.g. s32, u32, s64, u64) and divides each of + // them by the divider, returning the packed quotients. + LIBDIVIDE_VECTOR_TYPE divide(LIBDIVIDE_VECTOR_TYPE n) const { + return div.divide(n); + } +#endif + +private: + // Storage for the actual divisor + dispatcher::value, + std::is_signed::value, sizeof(T), ALGO> div; +}; + +// Overload of operator / for scalar division +template +T operator/(T n, const divider& div) { + return div.divide(n); +} + +// Overload of operator /= for scalar division +template +T& operator/=(T& n, const divider& div) { + n = div.divide(n); + return n; +} + +#if defined(LIBDIVIDE_VECTOR_TYPE) + // Overload of operator / for vector division + template + LIBDIVIDE_VECTOR_TYPE operator/(LIBDIVIDE_VECTOR_TYPE n, const divider& div) { + return div.divide(n); + } + // Overload of operator /= for vector division + template + LIBDIVIDE_VECTOR_TYPE& operator/=(LIBDIVIDE_VECTOR_TYPE& n, const divider& div) { + n = div.divide(n); + return n; + } +#endif + +// libdivdie::branchfree_divider +template +using branchfree_divider = divider; + +} // namespace libdivide + +#endif // __cplusplus + +#endif // NUMPY_CORE_INCLUDE_NUMPY_LIBDIVIDE_LIBDIVIDE_H_ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/ufuncobject.h b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/ufuncobject.h new file mode 100644 index 0000000..f5f82b5 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/ufuncobject.h @@ -0,0 +1,343 @@ +#ifndef NUMPY_CORE_INCLUDE_NUMPY_UFUNCOBJECT_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_UFUNCOBJECT_H_ + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * The legacy generic inner loop for a standard element-wise or + * generalized ufunc. + */ +typedef void (*PyUFuncGenericFunction) + (char **args, + npy_intp const *dimensions, + npy_intp const *strides, + void *innerloopdata); + +/* + * The most generic one-dimensional inner loop for + * a masked standard element-wise ufunc. "Masked" here means that it skips + * doing calculations on any items for which the maskptr array has a true + * value. + */ +typedef void (PyUFunc_MaskedStridedInnerLoopFunc)( + char **dataptrs, npy_intp *strides, + char *maskptr, npy_intp mask_stride, + npy_intp count, + NpyAuxData *innerloopdata); + +/* Forward declaration for the type resolver and loop selector typedefs */ +struct _tagPyUFuncObject; + +/* + * Given the operands for calling a ufunc, should determine the + * calculation input and output data types and return an inner loop function. + * This function should validate that the casting rule is being followed, + * and fail if it is not. + * + * For backwards compatibility, the regular type resolution function does not + * support auxiliary data with object semantics. The type resolution call + * which returns a masked generic function returns a standard NpyAuxData + * object, for which the NPY_AUXDATA_FREE and NPY_AUXDATA_CLONE macros + * work. + * + * ufunc: The ufunc object. + * casting: The 'casting' parameter provided to the ufunc. + * operands: An array of length (ufunc->nin + ufunc->nout), + * with the output parameters possibly NULL. + * type_tup: Either NULL, or the type_tup passed to the ufunc. + * out_dtypes: An array which should be populated with new + * references to (ufunc->nin + ufunc->nout) new + * dtypes, one for each input and output. These + * dtypes should all be in native-endian format. + * + * Should return 0 on success, -1 on failure (with exception set), + * or -2 if Py_NotImplemented should be returned. + */ +typedef int (PyUFunc_TypeResolutionFunc)( + struct _tagPyUFuncObject *ufunc, + NPY_CASTING casting, + PyArrayObject **operands, + PyObject *type_tup, + PyArray_Descr **out_dtypes); + +/* + * This is the signature for the functions that may be assigned to the + * `process_core_dims_func` field of the PyUFuncObject structure. + * Implementation of this function is optional. This function is only used + * by generalized ufuncs (i.e. those with the field `core_enabled` set to 1). + * The function is called by the ufunc during the processing of the arguments + * of a call of the ufunc. The function can check the core dimensions of the + * input and output arrays and return -1 with an exception set if any + * requirements are not satisfied. If the caller of the ufunc didn't provide + * output arrays, the core dimensions associated with the output arrays (i.e. + * those that are not also used in input arrays) will have the value -1 in + * `core_dim_sizes`. This function can replace any output core dimensions + * that are -1 with a value that is appropriate for the ufunc. + * + * Parameter Description + * --------------- ------------------------------------------------------ + * ufunc The ufunc object + * core_dim_sizes An array with length `ufunc->core_num_dim_ix`. + * The core dimensions of the arrays passed to the ufunc + * will have been set. If the caller of the ufunc didn't + * provide the output array(s), the output-only core + * dimensions will have the value -1. + * + * The function must not change any element in `core_dim_sizes` that is + * not -1 on input. Doing so will result in incorrect output from the + * ufunc, and could result in a crash of the Python interpreter. + * + * The function must return 0 on success, -1 on failure (with an exception + * set). + */ +typedef int (PyUFunc_ProcessCoreDimsFunc)( + struct _tagPyUFuncObject *ufunc, + npy_intp *core_dim_sizes); + +typedef struct _tagPyUFuncObject { + PyObject_HEAD + /* + * nin: Number of inputs + * nout: Number of outputs + * nargs: Always nin + nout (Why is it stored?) + */ + int nin, nout, nargs; + + /* + * Identity for reduction, any of PyUFunc_One, PyUFunc_Zero + * PyUFunc_MinusOne, PyUFunc_None, PyUFunc_ReorderableNone, + * PyUFunc_IdentityValue. + */ + int identity; + + /* Array of one-dimensional core loops */ + PyUFuncGenericFunction *functions; + /* Array of funcdata that gets passed into the functions */ + void *const *data; + /* The number of elements in 'functions' and 'data' */ + int ntypes; + + /* Used to be unused field 'check_return' */ + int reserved1; + + /* The name of the ufunc */ + const char *name; + + /* Array of type numbers, of size ('nargs' * 'ntypes') */ + const char *types; + + /* Documentation string */ + const char *doc; + + void *ptr; + PyObject *obj; + PyObject *userloops; + + /* generalized ufunc parameters */ + + /* 0 for scalar ufunc; 1 for generalized ufunc */ + int core_enabled; + /* number of distinct dimension names in signature */ + int core_num_dim_ix; + + /* + * dimension indices of input/output argument k are stored in + * core_dim_ixs[core_offsets[k]..core_offsets[k]+core_num_dims[k]-1] + */ + + /* numbers of core dimensions of each argument */ + int *core_num_dims; + /* + * dimension indices in a flatted form; indices + * are in the range of [0,core_num_dim_ix) + */ + int *core_dim_ixs; + /* + * positions of 1st core dimensions of each + * argument in core_dim_ixs, equivalent to cumsum(core_num_dims) + */ + int *core_offsets; + /* signature string for printing purpose */ + char *core_signature; + + /* + * A function which resolves the types and fills an array + * with the dtypes for the inputs and outputs. + */ + PyUFunc_TypeResolutionFunc *type_resolver; + + /* A dictionary to monkeypatch ufuncs */ + PyObject *dict; + + /* + * This was blocked off to be the "new" inner loop selector in 1.7, + * but this was never implemented. (This is also why the above + * selector is called the "legacy" selector.) + */ + #ifndef Py_LIMITED_API + vectorcallfunc vectorcall; + #else + void *vectorcall; + #endif + + /* Was previously the `PyUFunc_MaskedInnerLoopSelectionFunc` */ + void *reserved3; + + /* + * List of flags for each operand when ufunc is called by nditer object. + * These flags will be used in addition to the default flags for each + * operand set by nditer object. + */ + npy_uint32 *op_flags; + + /* + * List of global flags used when ufunc is called by nditer object. + * These flags will be used in addition to the default global flags + * set by nditer object. + */ + npy_uint32 iter_flags; + + /* New in NPY_API_VERSION 0x0000000D and above */ + #if NPY_FEATURE_VERSION >= NPY_1_16_API_VERSION + /* + * for each core_num_dim_ix distinct dimension names, + * the possible "frozen" size (-1 if not frozen). + */ + npy_intp *core_dim_sizes; + + /* + * for each distinct core dimension, a set of UFUNC_CORE_DIM* flags + */ + npy_uint32 *core_dim_flags; + + /* Identity for reduction, when identity == PyUFunc_IdentityValue */ + PyObject *identity_value; + #endif /* NPY_FEATURE_VERSION >= NPY_1_16_API_VERSION */ + + /* New in NPY_API_VERSION 0x0000000F and above */ + #if NPY_FEATURE_VERSION >= NPY_1_22_API_VERSION + /* New private fields related to dispatching */ + void *_dispatch_cache; + /* A PyListObject of `(tuple of DTypes, ArrayMethod/Promoter)` */ + PyObject *_loops; + #endif + #if NPY_FEATURE_VERSION >= NPY_2_1_API_VERSION + /* + * Optional function to process core dimensions of a gufunc. + */ + PyUFunc_ProcessCoreDimsFunc *process_core_dims_func; + #endif +} PyUFuncObject; + +#include "arrayobject.h" +/* Generalized ufunc; 0x0001 reserved for possible use as CORE_ENABLED */ +/* the core dimension's size will be determined by the operands. */ +#define UFUNC_CORE_DIM_SIZE_INFERRED 0x0002 +/* the core dimension may be absent */ +#define UFUNC_CORE_DIM_CAN_IGNORE 0x0004 +/* flags inferred during execution */ +#define UFUNC_CORE_DIM_MISSING 0x00040000 + + +#define UFUNC_OBJ_ISOBJECT 1 +#define UFUNC_OBJ_NEEDS_API 2 + + +#if NPY_ALLOW_THREADS +#define NPY_LOOP_BEGIN_THREADS do {if (!(loop->obj & UFUNC_OBJ_NEEDS_API)) _save = PyEval_SaveThread();} while (0); +#define NPY_LOOP_END_THREADS do {if (!(loop->obj & UFUNC_OBJ_NEEDS_API)) PyEval_RestoreThread(_save);} while (0); +#else +#define NPY_LOOP_BEGIN_THREADS +#define NPY_LOOP_END_THREADS +#endif + +/* + * UFunc has unit of 0, and the order of operations can be reordered + * This case allows reduction with multiple axes at once. + */ +#define PyUFunc_Zero 0 +/* + * UFunc has unit of 1, and the order of operations can be reordered + * This case allows reduction with multiple axes at once. + */ +#define PyUFunc_One 1 +/* + * UFunc has unit of -1, and the order of operations can be reordered + * This case allows reduction with multiple axes at once. Intended for + * bitwise_and reduction. + */ +#define PyUFunc_MinusOne 2 +/* + * UFunc has no unit, and the order of operations cannot be reordered. + * This case does not allow reduction with multiple axes at once. + */ +#define PyUFunc_None -1 +/* + * UFunc has no unit, and the order of operations can be reordered + * This case allows reduction with multiple axes at once. + */ +#define PyUFunc_ReorderableNone -2 +/* + * UFunc unit is an identity_value, and the order of operations can be reordered + * This case allows reduction with multiple axes at once. + */ +#define PyUFunc_IdentityValue -3 + + +#define UFUNC_REDUCE 0 +#define UFUNC_ACCUMULATE 1 +#define UFUNC_REDUCEAT 2 +#define UFUNC_OUTER 3 + + +typedef struct { + int nin; + int nout; + PyObject *callable; +} PyUFunc_PyFuncData; + +/* A linked-list of function information for + user-defined 1-d loops. + */ +typedef struct _loop1d_info { + PyUFuncGenericFunction func; + void *data; + int *arg_types; + struct _loop1d_info *next; + int nargs; + PyArray_Descr **arg_dtypes; +} PyUFunc_Loop1d; + + +#define UFUNC_PYVALS_NAME "UFUNC_PYVALS" + +/* THESE MACROS ARE DEPRECATED. + * Use npy_set_floatstatus_* in the npymath library. + */ +#define UFUNC_FPE_DIVIDEBYZERO NPY_FPE_DIVIDEBYZERO +#define UFUNC_FPE_OVERFLOW NPY_FPE_OVERFLOW +#define UFUNC_FPE_UNDERFLOW NPY_FPE_UNDERFLOW +#define UFUNC_FPE_INVALID NPY_FPE_INVALID + +/* Make sure it gets defined if it isn't already */ +#ifndef UFUNC_NOFPE +/* Clear the floating point exception default of Borland C++ */ +#if defined(__BORLANDC__) +#define UFUNC_NOFPE _control87(MCW_EM, MCW_EM); +#else +#define UFUNC_NOFPE +#endif +#endif + +#include "__ufunc_api.h" + +#ifdef __cplusplus +} +#endif + +#endif /* NUMPY_CORE_INCLUDE_NUMPY_UFUNCOBJECT_H_ */ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/utils.h b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/utils.h new file mode 100644 index 0000000..97f0609 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/utils.h @@ -0,0 +1,37 @@ +#ifndef NUMPY_CORE_INCLUDE_NUMPY_UTILS_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_UTILS_H_ + +#ifndef __COMP_NPY_UNUSED + #if defined(__GNUC__) + #define __COMP_NPY_UNUSED __attribute__ ((__unused__)) + #elif defined(__ICC) + #define __COMP_NPY_UNUSED __attribute__ ((__unused__)) + #elif defined(__clang__) + #define __COMP_NPY_UNUSED __attribute__ ((unused)) + #else + #define __COMP_NPY_UNUSED + #endif +#endif + +#if defined(__GNUC__) || defined(__ICC) || defined(__clang__) + #define NPY_DECL_ALIGNED(x) __attribute__ ((aligned (x))) +#elif defined(_MSC_VER) + #define NPY_DECL_ALIGNED(x) __declspec(align(x)) +#else + #define NPY_DECL_ALIGNED(x) +#endif + +/* Use this to tag a variable as not used. It will remove unused variable + * warning on support platforms (see __COM_NPY_UNUSED) and mangle the variable + * to avoid accidental use */ +#define NPY_UNUSED(x) __NPY_UNUSED_TAGGED ## x __COMP_NPY_UNUSED +#define NPY_EXPAND(x) x + +#define NPY_STRINGIFY(x) #x +#define NPY_TOSTRING(x) NPY_STRINGIFY(x) + +#define NPY_CAT__(a, b) a ## b +#define NPY_CAT_(a, b) NPY_CAT__(a, b) +#define NPY_CAT(a, b) NPY_CAT_(a, b) + +#endif /* NUMPY_CORE_INCLUDE_NUMPY_UTILS_H_ */ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/lib/libnpymath.a b/.venv/lib/python3.12/site-packages/numpy/_core/lib/libnpymath.a new file mode 100644 index 0000000..f8d561d Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/lib/libnpymath.a differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/lib/npy-pkg-config/mlib.ini b/.venv/lib/python3.12/site-packages/numpy/_core/lib/npy-pkg-config/mlib.ini new file mode 100644 index 0000000..5840f5e --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/lib/npy-pkg-config/mlib.ini @@ -0,0 +1,12 @@ +[meta] +Name = mlib +Description = Math library used with this version of numpy +Version = 1.0 + +[default] +Libs=-lm +Cflags= + +[msvc] +Libs=m.lib +Cflags= diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/lib/npy-pkg-config/npymath.ini b/.venv/lib/python3.12/site-packages/numpy/_core/lib/npy-pkg-config/npymath.ini new file mode 100644 index 0000000..8d879e3 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/lib/npy-pkg-config/npymath.ini @@ -0,0 +1,20 @@ +[meta] +Name=npymath +Description=Portable, core math library implementing C99 standard +Version=0.1 + +[variables] +pkgname=numpy._core +prefix=${pkgdir} +libdir=${prefix}/lib +includedir=${prefix}/include + +[default] +Libs=-L${libdir} -lnpymath +Cflags=-I${includedir} +Requires=mlib + +[msvc] +Libs=/LIBPATH:${libdir} npymath.lib +Cflags=/INCLUDE:${includedir} +Requires=mlib diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/lib/pkgconfig/numpy.pc b/.venv/lib/python3.12/site-packages/numpy/_core/lib/pkgconfig/numpy.pc new file mode 100644 index 0000000..3a4fdbc --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/lib/pkgconfig/numpy.pc @@ -0,0 +1,7 @@ +prefix=${pcfiledir}/../.. +includedir=${prefix}/include + +Name: numpy +Description: NumPy is the fundamental package for scientific computing with Python. +Version: 2.3.2 +Cflags: -I${includedir} diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/memmap.py b/.venv/lib/python3.12/site-packages/numpy/_core/memmap.py new file mode 100644 index 0000000..8cfa7f9 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/memmap.py @@ -0,0 +1,363 @@ +import operator +from contextlib import nullcontext + +import numpy as np +from numpy._utils import set_module + +from .numeric import dtype, ndarray, uint8 + +__all__ = ['memmap'] + +dtypedescr = dtype +valid_filemodes = ["r", "c", "r+", "w+"] +writeable_filemodes = ["r+", "w+"] + +mode_equivalents = { + "readonly": "r", + "copyonwrite": "c", + "readwrite": "r+", + "write": "w+" + } + + +@set_module('numpy') +class memmap(ndarray): + """Create a memory-map to an array stored in a *binary* file on disk. + + Memory-mapped files are used for accessing small segments of large files + on disk, without reading the entire file into memory. NumPy's + memmap's are array-like objects. This differs from Python's ``mmap`` + module, which uses file-like objects. + + This subclass of ndarray has some unpleasant interactions with + some operations, because it doesn't quite fit properly as a subclass. + An alternative to using this subclass is to create the ``mmap`` + object yourself, then create an ndarray with ndarray.__new__ directly, + passing the object created in its 'buffer=' parameter. + + This class may at some point be turned into a factory function + which returns a view into an mmap buffer. + + Flush the memmap instance to write the changes to the file. Currently there + is no API to close the underlying ``mmap``. It is tricky to ensure the + resource is actually closed, since it may be shared between different + memmap instances. + + + Parameters + ---------- + filename : str, file-like object, or pathlib.Path instance + The file name or file object to be used as the array data buffer. + dtype : data-type, optional + The data-type used to interpret the file contents. + Default is `uint8`. + mode : {'r+', 'r', 'w+', 'c'}, optional + The file is opened in this mode: + + +------+-------------------------------------------------------------+ + | 'r' | Open existing file for reading only. | + +------+-------------------------------------------------------------+ + | 'r+' | Open existing file for reading and writing. | + +------+-------------------------------------------------------------+ + | 'w+' | Create or overwrite existing file for reading and writing. | + | | If ``mode == 'w+'`` then `shape` must also be specified. | + +------+-------------------------------------------------------------+ + | 'c' | Copy-on-write: assignments affect data in memory, but | + | | changes are not saved to disk. The file on disk is | + | | read-only. | + +------+-------------------------------------------------------------+ + + Default is 'r+'. + offset : int, optional + In the file, array data starts at this offset. Since `offset` is + measured in bytes, it should normally be a multiple of the byte-size + of `dtype`. When ``mode != 'r'``, even positive offsets beyond end of + file are valid; The file will be extended to accommodate the + additional data. By default, ``memmap`` will start at the beginning of + the file, even if ``filename`` is a file pointer ``fp`` and + ``fp.tell() != 0``. + shape : int or sequence of ints, optional + The desired shape of the array. If ``mode == 'r'`` and the number + of remaining bytes after `offset` is not a multiple of the byte-size + of `dtype`, you must specify `shape`. By default, the returned array + will be 1-D with the number of elements determined by file size + and data-type. + + .. versionchanged:: 2.0 + The shape parameter can now be any integer sequence type, previously + types were limited to tuple and int. + + order : {'C', 'F'}, optional + Specify the order of the ndarray memory layout: + :term:`row-major`, C-style or :term:`column-major`, + Fortran-style. This only has an effect if the shape is + greater than 1-D. The default order is 'C'. + + Attributes + ---------- + filename : str or pathlib.Path instance + Path to the mapped file. + offset : int + Offset position in the file. + mode : str + File mode. + + Methods + ------- + flush + Flush any changes in memory to file on disk. + When you delete a memmap object, flush is called first to write + changes to disk. + + + See also + -------- + lib.format.open_memmap : Create or load a memory-mapped ``.npy`` file. + + Notes + ----- + The memmap object can be used anywhere an ndarray is accepted. + Given a memmap ``fp``, ``isinstance(fp, numpy.ndarray)`` returns + ``True``. + + Memory-mapped files cannot be larger than 2GB on 32-bit systems. + + When a memmap causes a file to be created or extended beyond its + current size in the filesystem, the contents of the new part are + unspecified. On systems with POSIX filesystem semantics, the extended + part will be filled with zero bytes. + + Examples + -------- + >>> import numpy as np + >>> data = np.arange(12, dtype='float32') + >>> data.resize((3,4)) + + This example uses a temporary file so that doctest doesn't write + files to your directory. You would use a 'normal' filename. + + >>> from tempfile import mkdtemp + >>> import os.path as path + >>> filename = path.join(mkdtemp(), 'newfile.dat') + + Create a memmap with dtype and shape that matches our data: + + >>> fp = np.memmap(filename, dtype='float32', mode='w+', shape=(3,4)) + >>> fp + memmap([[0., 0., 0., 0.], + [0., 0., 0., 0.], + [0., 0., 0., 0.]], dtype=float32) + + Write data to memmap array: + + >>> fp[:] = data[:] + >>> fp + memmap([[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.]], dtype=float32) + + >>> fp.filename == path.abspath(filename) + True + + Flushes memory changes to disk in order to read them back + + >>> fp.flush() + + Load the memmap and verify data was stored: + + >>> newfp = np.memmap(filename, dtype='float32', mode='r', shape=(3,4)) + >>> newfp + memmap([[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.]], dtype=float32) + + Read-only memmap: + + >>> fpr = np.memmap(filename, dtype='float32', mode='r', shape=(3,4)) + >>> fpr.flags.writeable + False + + Copy-on-write memmap: + + >>> fpc = np.memmap(filename, dtype='float32', mode='c', shape=(3,4)) + >>> fpc.flags.writeable + True + + It's possible to assign to copy-on-write array, but values are only + written into the memory copy of the array, and not written to disk: + + >>> fpc + memmap([[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.]], dtype=float32) + >>> fpc[0,:] = 0 + >>> fpc + memmap([[ 0., 0., 0., 0.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.]], dtype=float32) + + File on disk is unchanged: + + >>> fpr + memmap([[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.]], dtype=float32) + + Offset into a memmap: + + >>> fpo = np.memmap(filename, dtype='float32', mode='r', offset=16) + >>> fpo + memmap([ 4., 5., 6., 7., 8., 9., 10., 11.], dtype=float32) + + """ + + __array_priority__ = -100.0 + + def __new__(subtype, filename, dtype=uint8, mode='r+', offset=0, + shape=None, order='C'): + # Import here to minimize 'import numpy' overhead + import mmap + import os.path + try: + mode = mode_equivalents[mode] + except KeyError as e: + if mode not in valid_filemodes: + all_modes = valid_filemodes + list(mode_equivalents.keys()) + raise ValueError( + f"mode must be one of {all_modes!r} (got {mode!r})" + ) from None + + if mode == 'w+' and shape is None: + raise ValueError("shape must be given if mode == 'w+'") + + if hasattr(filename, 'read'): + f_ctx = nullcontext(filename) + else: + f_ctx = open( + os.fspath(filename), + ('r' if mode == 'c' else mode) + 'b' + ) + + with f_ctx as fid: + fid.seek(0, 2) + flen = fid.tell() + descr = dtypedescr(dtype) + _dbytes = descr.itemsize + + if shape is None: + bytes = flen - offset + if bytes % _dbytes: + raise ValueError("Size of available data is not a " + "multiple of the data-type size.") + size = bytes // _dbytes + shape = (size,) + else: + if not isinstance(shape, (tuple, list)): + try: + shape = [operator.index(shape)] + except TypeError: + pass + shape = tuple(shape) + size = np.intp(1) # avoid overflows + for k in shape: + size *= k + + bytes = int(offset + size * _dbytes) + + if mode in ('w+', 'r+'): + # gh-27723 + # if bytes == 0, we write out 1 byte to allow empty memmap. + bytes = max(bytes, 1) + if flen < bytes: + fid.seek(bytes - 1, 0) + fid.write(b'\0') + fid.flush() + + if mode == 'c': + acc = mmap.ACCESS_COPY + elif mode == 'r': + acc = mmap.ACCESS_READ + else: + acc = mmap.ACCESS_WRITE + + start = offset - offset % mmap.ALLOCATIONGRANULARITY + bytes -= start + # bytes == 0 is problematic as in mmap length=0 maps the full file. + # See PR gh-27723 for a more detailed explanation. + if bytes == 0 and start > 0: + bytes += mmap.ALLOCATIONGRANULARITY + start -= mmap.ALLOCATIONGRANULARITY + array_offset = offset - start + mm = mmap.mmap(fid.fileno(), bytes, access=acc, offset=start) + + self = ndarray.__new__(subtype, shape, dtype=descr, buffer=mm, + offset=array_offset, order=order) + self._mmap = mm + self.offset = offset + self.mode = mode + + if isinstance(filename, os.PathLike): + # special case - if we were constructed with a pathlib.path, + # then filename is a path object, not a string + self.filename = filename.resolve() + elif hasattr(fid, "name") and isinstance(fid.name, str): + # py3 returns int for TemporaryFile().name + self.filename = os.path.abspath(fid.name) + # same as memmap copies (e.g. memmap + 1) + else: + self.filename = None + + return self + + def __array_finalize__(self, obj): + if hasattr(obj, '_mmap') and np.may_share_memory(self, obj): + self._mmap = obj._mmap + self.filename = obj.filename + self.offset = obj.offset + self.mode = obj.mode + else: + self._mmap = None + self.filename = None + self.offset = None + self.mode = None + + def flush(self): + """ + Write any changes in the array to the file on disk. + + For further information, see `memmap`. + + Parameters + ---------- + None + + See Also + -------- + memmap + + """ + if self.base is not None and hasattr(self.base, 'flush'): + self.base.flush() + + def __array_wrap__(self, arr, context=None, return_scalar=False): + arr = super().__array_wrap__(arr, context) + + # Return a memmap if a memmap was given as the output of the + # ufunc. Leave the arr class unchanged if self is not a memmap + # to keep original memmap subclasses behavior + if self is arr or type(self) is not memmap: + return arr + + # Return scalar instead of 0d memmap, e.g. for np.sum with + # axis=None (note that subclasses will not reach here) + if return_scalar: + return arr[()] + + # Return ndarray otherwise + return arr.view(np.ndarray) + + def __getitem__(self, index): + res = super().__getitem__(index) + if type(res) is memmap and res._mmap is None: + return res.view(type=ndarray) + return res diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/memmap.pyi b/.venv/lib/python3.12/site-packages/numpy/_core/memmap.pyi new file mode 100644 index 0000000..0b31328 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/memmap.pyi @@ -0,0 +1,3 @@ +from numpy import memmap + +__all__ = ["memmap"] diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/multiarray.py b/.venv/lib/python3.12/site-packages/numpy/_core/multiarray.py new file mode 100644 index 0000000..236ca7e --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/multiarray.py @@ -0,0 +1,1762 @@ +""" +Create the numpy._core.multiarray namespace for backward compatibility. +In v1.16 the multiarray and umath c-extension modules were merged into +a single _multiarray_umath extension module. So we replicate the old +namespace by importing from the extension module. + +""" + +import functools + +from . import _multiarray_umath, overrides +from ._multiarray_umath import * # noqa: F403 + +# These imports are needed for backward compatibility, +# do not change them. issue gh-15518 +# _get_ndarray_c_version is semi-public, on purpose not added to __all__ +from ._multiarray_umath import ( # noqa: F401 + _ARRAY_API, + _flagdict, + _get_madvise_hugepage, + _get_ndarray_c_version, + _monotonicity, + _place, + _reconstruct, + _set_madvise_hugepage, + _vec_string, + from_dlpack, +) + +__all__ = [ + '_ARRAY_API', 'ALLOW_THREADS', 'BUFSIZE', 'CLIP', 'DATETIMEUNITS', + 'ITEM_HASOBJECT', 'ITEM_IS_POINTER', 'LIST_PICKLE', 'MAXDIMS', + 'MAY_SHARE_BOUNDS', 'MAY_SHARE_EXACT', 'NEEDS_INIT', 'NEEDS_PYAPI', + 'RAISE', 'USE_GETITEM', 'USE_SETITEM', 'WRAP', + '_flagdict', 'from_dlpack', '_place', '_reconstruct', '_vec_string', + '_monotonicity', 'add_docstring', 'arange', 'array', 'asarray', + 'asanyarray', 'ascontiguousarray', 'asfortranarray', 'bincount', + 'broadcast', 'busday_count', 'busday_offset', 'busdaycalendar', 'can_cast', + 'compare_chararrays', 'concatenate', 'copyto', 'correlate', 'correlate2', + 'count_nonzero', 'c_einsum', 'datetime_as_string', 'datetime_data', + 'dot', 'dragon4_positional', 'dragon4_scientific', 'dtype', + 'empty', 'empty_like', 'error', 'flagsobj', 'flatiter', 'format_longfloat', + 'frombuffer', 'fromfile', 'fromiter', 'fromstring', + 'get_handler_name', 'get_handler_version', 'inner', 'interp', + 'interp_complex', 'is_busday', 'lexsort', 'matmul', 'vecdot', + 'may_share_memory', 'min_scalar_type', 'ndarray', 'nditer', 'nested_iters', + 'normalize_axis_index', 'packbits', 'promote_types', 'putmask', + 'ravel_multi_index', 'result_type', 'scalar', 'set_datetimeparse_function', + 'set_typeDict', 'shares_memory', 'typeinfo', + 'unpackbits', 'unravel_index', 'vdot', 'where', 'zeros'] + +# For backward compatibility, make sure pickle imports +# these functions from here +_reconstruct.__module__ = 'numpy._core.multiarray' +scalar.__module__ = 'numpy._core.multiarray' + + +from_dlpack.__module__ = 'numpy' +arange.__module__ = 'numpy' +array.__module__ = 'numpy' +asarray.__module__ = 'numpy' +asanyarray.__module__ = 'numpy' +ascontiguousarray.__module__ = 'numpy' +asfortranarray.__module__ = 'numpy' +datetime_data.__module__ = 'numpy' +empty.__module__ = 'numpy' +frombuffer.__module__ = 'numpy' +fromfile.__module__ = 'numpy' +fromiter.__module__ = 'numpy' +frompyfunc.__module__ = 'numpy' +fromstring.__module__ = 'numpy' +may_share_memory.__module__ = 'numpy' +nested_iters.__module__ = 'numpy' +promote_types.__module__ = 'numpy' +zeros.__module__ = 'numpy' +normalize_axis_index.__module__ = 'numpy.lib.array_utils' +add_docstring.__module__ = 'numpy.lib' +compare_chararrays.__module__ = 'numpy.char' + + +def _override___module__(): + namespace_names = globals() + for ufunc_name in [ + 'absolute', 'arccos', 'arccosh', 'add', 'arcsin', 'arcsinh', 'arctan', + 'arctan2', 'arctanh', 'bitwise_and', 'bitwise_count', 'invert', + 'left_shift', 'bitwise_or', 'right_shift', 'bitwise_xor', 'cbrt', + 'ceil', 'conjugate', 'copysign', 'cos', 'cosh', 'deg2rad', 'degrees', + 'divide', 'divmod', 'equal', 'exp', 'exp2', 'expm1', 'fabs', + 'float_power', 'floor', 'floor_divide', 'fmax', 'fmin', 'fmod', + 'frexp', 'gcd', 'greater', 'greater_equal', 'heaviside', 'hypot', + 'isfinite', 'isinf', 'isnan', 'isnat', 'lcm', 'ldexp', 'less', + 'less_equal', 'log', 'log10', 'log1p', 'log2', 'logaddexp', + 'logaddexp2', 'logical_and', 'logical_not', 'logical_or', + 'logical_xor', 'matmul', 'matvec', 'maximum', 'minimum', 'remainder', + 'modf', 'multiply', 'negative', 'nextafter', 'not_equal', 'positive', + 'power', 'rad2deg', 'radians', 'reciprocal', 'rint', 'sign', 'signbit', + 'sin', 'sinh', 'spacing', 'sqrt', 'square', 'subtract', 'tan', 'tanh', + 'trunc', 'vecdot', 'vecmat', + ]: + ufunc = namespace_names[ufunc_name] + ufunc.__module__ = "numpy" + ufunc.__qualname__ = ufunc_name + + +_override___module__() + + +# We can't verify dispatcher signatures because NumPy's C functions don't +# support introspection. +array_function_from_c_func_and_dispatcher = functools.partial( + overrides.array_function_from_dispatcher, + module='numpy', docs_from_dispatcher=True, verify=False) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.empty_like) +def empty_like( + prototype, dtype=None, order=None, subok=None, shape=None, *, device=None +): + """ + empty_like(prototype, dtype=None, order='K', subok=True, shape=None, *, + device=None) + + Return a new array with the same shape and type as a given array. + + Parameters + ---------- + prototype : array_like + The shape and data-type of `prototype` define these same attributes + of the returned array. + dtype : data-type, optional + Overrides the data type of the result. + order : {'C', 'F', 'A', or 'K'}, optional + Overrides the memory layout of the result. 'C' means C-order, + 'F' means F-order, 'A' means 'F' if `prototype` is Fortran + contiguous, 'C' otherwise. 'K' means match the layout of `prototype` + as closely as possible. + subok : bool, optional. + If True, then the newly created array will use the sub-class + type of `prototype`, otherwise it will be a base-class array. Defaults + to True. + shape : int or sequence of ints, optional. + Overrides the shape of the result. If order='K' and the number of + dimensions is unchanged, will try to keep order, otherwise, + order='C' is implied. + device : str, optional + The device on which to place the created array. Default: None. + For Array-API interoperability only, so must be ``"cpu"`` if passed. + + .. versionadded:: 2.0.0 + + Returns + ------- + out : ndarray + Array of uninitialized (arbitrary) data with the same + shape and type as `prototype`. + + See Also + -------- + ones_like : Return an array of ones with shape and type of input. + zeros_like : Return an array of zeros with shape and type of input. + full_like : Return a new array with shape of input filled with value. + empty : Return a new uninitialized array. + + Notes + ----- + Unlike other array creation functions (e.g. `zeros_like`, `ones_like`, + `full_like`), `empty_like` does not initialize the values of the array, + and may therefore be marginally faster. However, the values stored in the + newly allocated array are arbitrary. For reproducible behavior, be sure + to set each element of the array before reading. + + Examples + -------- + >>> import numpy as np + >>> a = ([1,2,3], [4,5,6]) # a is array-like + >>> np.empty_like(a) + array([[-1073741821, -1073741821, 3], # uninitialized + [ 0, 0, -1073741821]]) + >>> a = np.array([[1., 2., 3.],[4.,5.,6.]]) + >>> np.empty_like(a) + array([[ -2.00000715e+000, 1.48219694e-323, -2.00000572e+000], # uninitialized + [ 4.38791518e-305, -2.00000715e+000, 4.17269252e-309]]) + + """ + return (prototype,) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.concatenate) +def concatenate(arrays, axis=None, out=None, *, dtype=None, casting=None): + """ + concatenate( + (a1, a2, ...), + axis=0, + out=None, + dtype=None, + casting="same_kind" + ) + + Join a sequence of arrays along an existing axis. + + Parameters + ---------- + a1, a2, ... : sequence of array_like + The arrays must have the same shape, except in the dimension + corresponding to `axis` (the first, by default). + axis : int, optional + The axis along which the arrays will be joined. If axis is None, + arrays are flattened before use. Default is 0. + out : ndarray, optional + If provided, the destination to place the result. The shape must be + correct, matching that of what concatenate would have returned if no + out argument were specified. + dtype : str or dtype + If provided, the destination array will have this dtype. Cannot be + provided together with `out`. + + .. versionadded:: 1.20.0 + + casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional + Controls what kind of data casting may occur. Defaults to 'same_kind'. + For a description of the options, please see :term:`casting`. + + .. versionadded:: 1.20.0 + + Returns + ------- + res : ndarray + The concatenated array. + + See Also + -------- + ma.concatenate : Concatenate function that preserves input masks. + array_split : Split an array into multiple sub-arrays of equal or + near-equal size. + split : Split array into a list of multiple sub-arrays of equal size. + hsplit : Split array into multiple sub-arrays horizontally (column wise). + vsplit : Split array into multiple sub-arrays vertically (row wise). + dsplit : Split array into multiple sub-arrays along the 3rd axis (depth). + stack : Stack a sequence of arrays along a new axis. + block : Assemble arrays from blocks. + hstack : Stack arrays in sequence horizontally (column wise). + vstack : Stack arrays in sequence vertically (row wise). + dstack : Stack arrays in sequence depth wise (along third dimension). + column_stack : Stack 1-D arrays as columns into a 2-D array. + + Notes + ----- + When one or more of the arrays to be concatenated is a MaskedArray, + this function will return a MaskedArray object instead of an ndarray, + but the input masks are *not* preserved. In cases where a MaskedArray + is expected as input, use the ma.concatenate function from the masked + array module instead. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[1, 2], [3, 4]]) + >>> b = np.array([[5, 6]]) + >>> np.concatenate((a, b), axis=0) + array([[1, 2], + [3, 4], + [5, 6]]) + >>> np.concatenate((a, b.T), axis=1) + array([[1, 2, 5], + [3, 4, 6]]) + >>> np.concatenate((a, b), axis=None) + array([1, 2, 3, 4, 5, 6]) + + This function will not preserve masking of MaskedArray inputs. + + >>> a = np.ma.arange(3) + >>> a[1] = np.ma.masked + >>> b = np.arange(2, 5) + >>> a + masked_array(data=[0, --, 2], + mask=[False, True, False], + fill_value=999999) + >>> b + array([2, 3, 4]) + >>> np.concatenate([a, b]) + masked_array(data=[0, 1, 2, 2, 3, 4], + mask=False, + fill_value=999999) + >>> np.ma.concatenate([a, b]) + masked_array(data=[0, --, 2, 2, 3, 4], + mask=[False, True, False, False, False, False], + fill_value=999999) + + """ + if out is not None: + # optimize for the typical case where only arrays is provided + arrays = list(arrays) + arrays.append(out) + return arrays + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.inner) +def inner(a, b): + """ + inner(a, b, /) + + Inner product of two arrays. + + Ordinary inner product of vectors for 1-D arrays (without complex + conjugation), in higher dimensions a sum product over the last axes. + + Parameters + ---------- + a, b : array_like + If `a` and `b` are nonscalar, their last dimensions must match. + + Returns + ------- + out : ndarray + If `a` and `b` are both + scalars or both 1-D arrays then a scalar is returned; otherwise + an array is returned. + ``out.shape = (*a.shape[:-1], *b.shape[:-1])`` + + Raises + ------ + ValueError + If both `a` and `b` are nonscalar and their last dimensions have + different sizes. + + See Also + -------- + tensordot : Sum products over arbitrary axes. + dot : Generalised matrix product, using second last dimension of `b`. + vecdot : Vector dot product of two arrays. + einsum : Einstein summation convention. + + Notes + ----- + For vectors (1-D arrays) it computes the ordinary inner-product:: + + np.inner(a, b) = sum(a[:]*b[:]) + + More generally, if ``ndim(a) = r > 0`` and ``ndim(b) = s > 0``:: + + np.inner(a, b) = np.tensordot(a, b, axes=(-1,-1)) + + or explicitly:: + + np.inner(a, b)[i0,...,ir-2,j0,...,js-2] + = sum(a[i0,...,ir-2,:]*b[j0,...,js-2,:]) + + In addition `a` or `b` may be scalars, in which case:: + + np.inner(a,b) = a*b + + Examples + -------- + Ordinary inner product for vectors: + + >>> import numpy as np + >>> a = np.array([1,2,3]) + >>> b = np.array([0,1,0]) + >>> np.inner(a, b) + 2 + + Some multidimensional examples: + + >>> a = np.arange(24).reshape((2,3,4)) + >>> b = np.arange(4) + >>> c = np.inner(a, b) + >>> c.shape + (2, 3) + >>> c + array([[ 14, 38, 62], + [ 86, 110, 134]]) + + >>> a = np.arange(2).reshape((1,1,2)) + >>> b = np.arange(6).reshape((3,2)) + >>> c = np.inner(a, b) + >>> c.shape + (1, 1, 3) + >>> c + array([[[1, 3, 5]]]) + + An example where `b` is a scalar: + + >>> np.inner(np.eye(2), 7) + array([[7., 0.], + [0., 7.]]) + + """ + return (a, b) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.where) +def where(condition, x=None, y=None): + """ + where(condition, [x, y], /) + + Return elements chosen from `x` or `y` depending on `condition`. + + .. note:: + When only `condition` is provided, this function is a shorthand for + ``np.asarray(condition).nonzero()``. Using `nonzero` directly should be + preferred, as it behaves correctly for subclasses. The rest of this + documentation covers only the case where all three arguments are + provided. + + Parameters + ---------- + condition : array_like, bool + Where True, yield `x`, otherwise yield `y`. + x, y : array_like + Values from which to choose. `x`, `y` and `condition` need to be + broadcastable to some shape. + + Returns + ------- + out : ndarray + An array with elements from `x` where `condition` is True, and elements + from `y` elsewhere. + + See Also + -------- + choose + nonzero : The function that is called when x and y are omitted + + Notes + ----- + If all the arrays are 1-D, `where` is equivalent to:: + + [xv if c else yv + for c, xv, yv in zip(condition, x, y)] + + Examples + -------- + >>> import numpy as np + >>> a = np.arange(10) + >>> a + array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + >>> np.where(a < 5, a, 10*a) + array([ 0, 1, 2, 3, 4, 50, 60, 70, 80, 90]) + + This can be used on multidimensional arrays too: + + >>> np.where([[True, False], [True, True]], + ... [[1, 2], [3, 4]], + ... [[9, 8], [7, 6]]) + array([[1, 8], + [3, 4]]) + + The shapes of x, y, and the condition are broadcast together: + + >>> x, y = np.ogrid[:3, :4] + >>> np.where(x < y, x, 10 + y) # both x and 10+y are broadcast + array([[10, 0, 0, 0], + [10, 11, 1, 1], + [10, 11, 12, 2]]) + + >>> a = np.array([[0, 1, 2], + ... [0, 2, 4], + ... [0, 3, 6]]) + >>> np.where(a < 4, a, -1) # -1 is broadcast + array([[ 0, 1, 2], + [ 0, 2, -1], + [ 0, 3, -1]]) + """ + return (condition, x, y) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.lexsort) +def lexsort(keys, axis=None): + """ + lexsort(keys, axis=-1) + + Perform an indirect stable sort using a sequence of keys. + + Given multiple sorting keys, lexsort returns an array of integer indices + that describes the sort order by multiple keys. The last key in the + sequence is used for the primary sort order, ties are broken by the + second-to-last key, and so on. + + Parameters + ---------- + keys : (k, m, n, ...) array-like + The `k` keys to be sorted. The *last* key (e.g, the last + row if `keys` is a 2D array) is the primary sort key. + Each element of `keys` along the zeroth axis must be + an array-like object of the same shape. + axis : int, optional + Axis to be indirectly sorted. By default, sort over the last axis + of each sequence. Separate slices along `axis` sorted over + independently; see last example. + + Returns + ------- + indices : (m, n, ...) ndarray of ints + Array of indices that sort the keys along the specified axis. + + See Also + -------- + argsort : Indirect sort. + ndarray.sort : In-place sort. + sort : Return a sorted copy of an array. + + Examples + -------- + Sort names: first by surname, then by name. + + >>> import numpy as np + >>> surnames = ('Hertz', 'Galilei', 'Hertz') + >>> first_names = ('Heinrich', 'Galileo', 'Gustav') + >>> ind = np.lexsort((first_names, surnames)) + >>> ind + array([1, 2, 0]) + + >>> [surnames[i] + ", " + first_names[i] for i in ind] + ['Galilei, Galileo', 'Hertz, Gustav', 'Hertz, Heinrich'] + + Sort according to two numerical keys, first by elements + of ``a``, then breaking ties according to elements of ``b``: + + >>> a = [1, 5, 1, 4, 3, 4, 4] # First sequence + >>> b = [9, 4, 0, 4, 0, 2, 1] # Second sequence + >>> ind = np.lexsort((b, a)) # Sort by `a`, then by `b` + >>> ind + array([2, 0, 4, 6, 5, 3, 1]) + >>> [(a[i], b[i]) for i in ind] + [(1, 0), (1, 9), (3, 0), (4, 1), (4, 2), (4, 4), (5, 4)] + + Compare against `argsort`, which would sort each key independently. + + >>> np.argsort((b, a), kind='stable') + array([[2, 4, 6, 5, 1, 3, 0], + [0, 2, 4, 3, 5, 6, 1]]) + + To sort lexicographically with `argsort`, we would need to provide a + structured array. + + >>> x = np.array([(ai, bi) for ai, bi in zip(a, b)], + ... dtype = np.dtype([('x', int), ('y', int)])) + >>> np.argsort(x) # or np.argsort(x, order=('x', 'y')) + array([2, 0, 4, 6, 5, 3, 1]) + + The zeroth axis of `keys` always corresponds with the sequence of keys, + so 2D arrays are treated just like other sequences of keys. + + >>> arr = np.asarray([b, a]) + >>> ind2 = np.lexsort(arr) + >>> np.testing.assert_equal(ind2, ind) + + Accordingly, the `axis` parameter refers to an axis of *each* key, not of + the `keys` argument itself. For instance, the array ``arr`` is treated as + a sequence of two 1-D keys, so specifying ``axis=0`` is equivalent to + using the default axis, ``axis=-1``. + + >>> np.testing.assert_equal(np.lexsort(arr, axis=0), + ... np.lexsort(arr, axis=-1)) + + For higher-dimensional arrays, the axis parameter begins to matter. The + resulting array has the same shape as each key, and the values are what + we would expect if `lexsort` were performed on corresponding slices + of the keys independently. For instance, + + >>> x = [[1, 2, 3, 4], + ... [4, 3, 2, 1], + ... [2, 1, 4, 3]] + >>> y = [[2, 2, 1, 1], + ... [1, 2, 1, 2], + ... [1, 1, 2, 1]] + >>> np.lexsort((x, y), axis=1) + array([[2, 3, 0, 1], + [2, 0, 3, 1], + [1, 0, 3, 2]]) + + Each row of the result is what we would expect if we were to perform + `lexsort` on the corresponding row of the keys: + + >>> for i in range(3): + ... print(np.lexsort((x[i], y[i]))) + [2 3 0 1] + [2 0 3 1] + [1 0 3 2] + + """ + if isinstance(keys, tuple): + return keys + else: + return (keys,) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.can_cast) +def can_cast(from_, to, casting=None): + """ + can_cast(from_, to, casting='safe') + + Returns True if cast between data types can occur according to the + casting rule. + + Parameters + ---------- + from_ : dtype, dtype specifier, NumPy scalar, or array + Data type, NumPy scalar, or array to cast from. + to : dtype or dtype specifier + Data type to cast to. + casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional + Controls what kind of data casting may occur. + + * 'no' means the data types should not be cast at all. + * 'equiv' means only byte-order changes are allowed. + * 'safe' means only casts which can preserve values are allowed. + * 'same_kind' means only safe casts or casts within a kind, + like float64 to float32, are allowed. + * 'unsafe' means any data conversions may be done. + + Returns + ------- + out : bool + True if cast can occur according to the casting rule. + + Notes + ----- + .. versionchanged:: 2.0 + This function does not support Python scalars anymore and does not + apply any value-based logic for 0-D arrays and NumPy scalars. + + See also + -------- + dtype, result_type + + Examples + -------- + Basic examples + + >>> import numpy as np + >>> np.can_cast(np.int32, np.int64) + True + >>> np.can_cast(np.float64, complex) + True + >>> np.can_cast(complex, float) + False + + >>> np.can_cast('i8', 'f8') + True + >>> np.can_cast('i8', 'f4') + False + >>> np.can_cast('i4', 'S4') + False + + """ + return (from_,) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.min_scalar_type) +def min_scalar_type(a): + """ + min_scalar_type(a, /) + + For scalar ``a``, returns the data type with the smallest size + and smallest scalar kind which can hold its value. For non-scalar + array ``a``, returns the vector's dtype unmodified. + + Floating point values are not demoted to integers, + and complex values are not demoted to floats. + + Parameters + ---------- + a : scalar or array_like + The value whose minimal data type is to be found. + + Returns + ------- + out : dtype + The minimal data type. + + See Also + -------- + result_type, promote_types, dtype, can_cast + + Examples + -------- + >>> import numpy as np + >>> np.min_scalar_type(10) + dtype('uint8') + + >>> np.min_scalar_type(-260) + dtype('int16') + + >>> np.min_scalar_type(3.1) + dtype('float16') + + >>> np.min_scalar_type(1e50) + dtype('float64') + + >>> np.min_scalar_type(np.arange(4,dtype='f8')) + dtype('float64') + + """ + return (a,) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.result_type) +def result_type(*arrays_and_dtypes): + """ + result_type(*arrays_and_dtypes) + + Returns the type that results from applying the NumPy + type promotion rules to the arguments. + + Type promotion in NumPy works similarly to the rules in languages + like C++, with some slight differences. When both scalars and + arrays are used, the array's type takes precedence and the actual value + of the scalar is taken into account. + + For example, calculating 3*a, where a is an array of 32-bit floats, + intuitively should result in a 32-bit float output. If the 3 is a + 32-bit integer, the NumPy rules indicate it can't convert losslessly + into a 32-bit float, so a 64-bit float should be the result type. + By examining the value of the constant, '3', we see that it fits in + an 8-bit integer, which can be cast losslessly into the 32-bit float. + + Parameters + ---------- + arrays_and_dtypes : list of arrays and dtypes + The operands of some operation whose result type is needed. + + Returns + ------- + out : dtype + The result type. + + See also + -------- + dtype, promote_types, min_scalar_type, can_cast + + Notes + ----- + The specific algorithm used is as follows. + + Categories are determined by first checking which of boolean, + integer (int/uint), or floating point (float/complex) the maximum + kind of all the arrays and the scalars are. + + If there are only scalars or the maximum category of the scalars + is higher than the maximum category of the arrays, + the data types are combined with :func:`promote_types` + to produce the return value. + + Otherwise, `min_scalar_type` is called on each scalar, and + the resulting data types are all combined with :func:`promote_types` + to produce the return value. + + The set of int values is not a subset of the uint values for types + with the same number of bits, something not reflected in + :func:`min_scalar_type`, but handled as a special case in `result_type`. + + Examples + -------- + >>> import numpy as np + >>> np.result_type(3, np.arange(7, dtype='i1')) + dtype('int8') + + >>> np.result_type('i4', 'c8') + dtype('complex128') + + >>> np.result_type(3.0, -2) + dtype('float64') + + """ + return arrays_and_dtypes + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.dot) +def dot(a, b, out=None): + """ + dot(a, b, out=None) + + Dot product of two arrays. Specifically, + + - If both `a` and `b` are 1-D arrays, it is inner product of vectors + (without complex conjugation). + + - If both `a` and `b` are 2-D arrays, it is matrix multiplication, + but using :func:`matmul` or ``a @ b`` is preferred. + + - If either `a` or `b` is 0-D (scalar), it is equivalent to + :func:`multiply` and using ``numpy.multiply(a, b)`` or ``a * b`` is + preferred. + + - If `a` is an N-D array and `b` is a 1-D array, it is a sum product over + the last axis of `a` and `b`. + + - If `a` is an N-D array and `b` is an M-D array (where ``M>=2``), it is a + sum product over the last axis of `a` and the second-to-last axis of + `b`:: + + dot(a, b)[i,j,k,m] = sum(a[i,j,:] * b[k,:,m]) + + It uses an optimized BLAS library when possible (see `numpy.linalg`). + + Parameters + ---------- + a : array_like + First argument. + b : array_like + Second argument. + out : ndarray, optional + Output argument. This must have the exact kind that would be returned + if it was not used. In particular, it must have the right type, must be + C-contiguous, and its dtype must be the dtype that would be returned + for `dot(a,b)`. This is a performance feature. Therefore, if these + conditions are not met, an exception is raised, instead of attempting + to be flexible. + + Returns + ------- + output : ndarray + Returns the dot product of `a` and `b`. If `a` and `b` are both + scalars or both 1-D arrays then a scalar is returned; otherwise + an array is returned. + If `out` is given, then it is returned. + + Raises + ------ + ValueError + If the last dimension of `a` is not the same size as + the second-to-last dimension of `b`. + + See Also + -------- + vdot : Complex-conjugating dot product. + vecdot : Vector dot product of two arrays. + tensordot : Sum products over arbitrary axes. + einsum : Einstein summation convention. + matmul : '@' operator as method with out parameter. + linalg.multi_dot : Chained dot product. + + Examples + -------- + >>> import numpy as np + >>> np.dot(3, 4) + 12 + + Neither argument is complex-conjugated: + + >>> np.dot([2j, 3j], [2j, 3j]) + (-13+0j) + + For 2-D arrays it is the matrix product: + + >>> a = [[1, 0], [0, 1]] + >>> b = [[4, 1], [2, 2]] + >>> np.dot(a, b) + array([[4, 1], + [2, 2]]) + + >>> a = np.arange(3*4*5*6).reshape((3,4,5,6)) + >>> b = np.arange(3*4*5*6)[::-1].reshape((5,4,6,3)) + >>> np.dot(a, b)[2,3,2,1,2,2] + 499128 + >>> sum(a[2,3,2,:] * b[1,2,:,2]) + 499128 + + """ + return (a, b, out) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.vdot) +def vdot(a, b): + r""" + vdot(a, b, /) + + Return the dot product of two vectors. + + The `vdot` function handles complex numbers differently than `dot`: + if the first argument is complex, it is replaced by its complex conjugate + in the dot product calculation. `vdot` also handles multidimensional + arrays differently than `dot`: it does not perform a matrix product, but + flattens the arguments to 1-D arrays before taking a vector dot product. + + Consequently, when the arguments are 2-D arrays of the same shape, this + function effectively returns their + `Frobenius inner product `_ + (also known as the *trace inner product* or the *standard inner product* + on a vector space of matrices). + + Parameters + ---------- + a : array_like + If `a` is complex the complex conjugate is taken before calculation + of the dot product. + b : array_like + Second argument to the dot product. + + Returns + ------- + output : ndarray + Dot product of `a` and `b`. Can be an int, float, or + complex depending on the types of `a` and `b`. + + See Also + -------- + dot : Return the dot product without using the complex conjugate of the + first argument. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([1+2j,3+4j]) + >>> b = np.array([5+6j,7+8j]) + >>> np.vdot(a, b) + (70-8j) + >>> np.vdot(b, a) + (70+8j) + + Note that higher-dimensional arrays are flattened! + + >>> a = np.array([[1, 4], [5, 6]]) + >>> b = np.array([[4, 1], [2, 2]]) + >>> np.vdot(a, b) + 30 + >>> np.vdot(b, a) + 30 + >>> 1*4 + 4*1 + 5*2 + 6*2 + 30 + + """ # noqa: E501 + return (a, b) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.bincount) +def bincount(x, weights=None, minlength=None): + """ + bincount(x, /, weights=None, minlength=0) + + Count number of occurrences of each value in array of non-negative ints. + + The number of bins (of size 1) is one larger than the largest value in + `x`. If `minlength` is specified, there will be at least this number + of bins in the output array (though it will be longer if necessary, + depending on the contents of `x`). + Each bin gives the number of occurrences of its index value in `x`. + If `weights` is specified the input array is weighted by it, i.e. if a + value ``n`` is found at position ``i``, ``out[n] += weight[i]`` instead + of ``out[n] += 1``. + + Parameters + ---------- + x : array_like, 1 dimension, nonnegative ints + Input array. + weights : array_like, optional + Weights, array of the same shape as `x`. + minlength : int, optional + A minimum number of bins for the output array. + + Returns + ------- + out : ndarray of ints + The result of binning the input array. + The length of `out` is equal to ``np.amax(x)+1``. + + Raises + ------ + ValueError + If the input is not 1-dimensional, or contains elements with negative + values, or if `minlength` is negative. + TypeError + If the type of the input is float or complex. + + See Also + -------- + histogram, digitize, unique + + Examples + -------- + >>> import numpy as np + >>> np.bincount(np.arange(5)) + array([1, 1, 1, 1, 1]) + >>> np.bincount(np.array([0, 1, 1, 3, 2, 1, 7])) + array([1, 3, 1, 1, 0, 0, 0, 1]) + + >>> x = np.array([0, 1, 1, 3, 2, 1, 7, 23]) + >>> np.bincount(x).size == np.amax(x)+1 + True + + The input array needs to be of integer dtype, otherwise a + TypeError is raised: + + >>> np.bincount(np.arange(5, dtype=float)) + Traceback (most recent call last): + ... + TypeError: Cannot cast array data from dtype('float64') to dtype('int64') + according to the rule 'safe' + + A possible use of ``bincount`` is to perform sums over + variable-size chunks of an array, using the ``weights`` keyword. + + >>> w = np.array([0.3, 0.5, 0.2, 0.7, 1., -0.6]) # weights + >>> x = np.array([0, 1, 1, 2, 2, 2]) + >>> np.bincount(x, weights=w) + array([ 0.3, 0.7, 1.1]) + + """ + return (x, weights) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.ravel_multi_index) +def ravel_multi_index(multi_index, dims, mode=None, order=None): + """ + ravel_multi_index(multi_index, dims, mode='raise', order='C') + + Converts a tuple of index arrays into an array of flat + indices, applying boundary modes to the multi-index. + + Parameters + ---------- + multi_index : tuple of array_like + A tuple of integer arrays, one array for each dimension. + dims : tuple of ints + The shape of array into which the indices from ``multi_index`` apply. + mode : {'raise', 'wrap', 'clip'}, optional + Specifies how out-of-bounds indices are handled. Can specify + either one mode or a tuple of modes, one mode per index. + + * 'raise' -- raise an error (default) + * 'wrap' -- wrap around + * 'clip' -- clip to the range + + In 'clip' mode, a negative index which would normally + wrap will clip to 0 instead. + order : {'C', 'F'}, optional + Determines whether the multi-index should be viewed as + indexing in row-major (C-style) or column-major + (Fortran-style) order. + + Returns + ------- + raveled_indices : ndarray + An array of indices into the flattened version of an array + of dimensions ``dims``. + + See Also + -------- + unravel_index + + Examples + -------- + >>> import numpy as np + >>> arr = np.array([[3,6,6],[4,5,1]]) + >>> np.ravel_multi_index(arr, (7,6)) + array([22, 41, 37]) + >>> np.ravel_multi_index(arr, (7,6), order='F') + array([31, 41, 13]) + >>> np.ravel_multi_index(arr, (4,6), mode='clip') + array([22, 23, 19]) + >>> np.ravel_multi_index(arr, (4,4), mode=('clip','wrap')) + array([12, 13, 13]) + + >>> np.ravel_multi_index((3,1,4,1), (6,7,8,9)) + 1621 + """ + return multi_index + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.unravel_index) +def unravel_index(indices, shape=None, order=None): + """ + unravel_index(indices, shape, order='C') + + Converts a flat index or array of flat indices into a tuple + of coordinate arrays. + + Parameters + ---------- + indices : array_like + An integer array whose elements are indices into the flattened + version of an array of dimensions ``shape``. Before version 1.6.0, + this function accepted just one index value. + shape : tuple of ints + The shape of the array to use for unraveling ``indices``. + order : {'C', 'F'}, optional + Determines whether the indices should be viewed as indexing in + row-major (C-style) or column-major (Fortran-style) order. + + Returns + ------- + unraveled_coords : tuple of ndarray + Each array in the tuple has the same shape as the ``indices`` + array. + + See Also + -------- + ravel_multi_index + + Examples + -------- + >>> import numpy as np + >>> np.unravel_index([22, 41, 37], (7,6)) + (array([3, 6, 6]), array([4, 5, 1])) + >>> np.unravel_index([31, 41, 13], (7,6), order='F') + (array([3, 6, 6]), array([4, 5, 1])) + + >>> np.unravel_index(1621, (6,7,8,9)) + (3, 1, 4, 1) + + """ + return (indices,) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.copyto) +def copyto(dst, src, casting=None, where=None): + """ + copyto(dst, src, casting='same_kind', where=True) + + Copies values from one array to another, broadcasting as necessary. + + Raises a TypeError if the `casting` rule is violated, and if + `where` is provided, it selects which elements to copy. + + Parameters + ---------- + dst : ndarray + The array into which values are copied. + src : array_like + The array from which values are copied. + casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional + Controls what kind of data casting may occur when copying. + + * 'no' means the data types should not be cast at all. + * 'equiv' means only byte-order changes are allowed. + * 'safe' means only casts which can preserve values are allowed. + * 'same_kind' means only safe casts or casts within a kind, + like float64 to float32, are allowed. + * 'unsafe' means any data conversions may be done. + where : array_like of bool, optional + A boolean array which is broadcasted to match the dimensions + of `dst`, and selects elements to copy from `src` to `dst` + wherever it contains the value True. + + Examples + -------- + >>> import numpy as np + >>> A = np.array([4, 5, 6]) + >>> B = [1, 2, 3] + >>> np.copyto(A, B) + >>> A + array([1, 2, 3]) + + >>> A = np.array([[1, 2, 3], [4, 5, 6]]) + >>> B = [[4, 5, 6], [7, 8, 9]] + >>> np.copyto(A, B) + >>> A + array([[4, 5, 6], + [7, 8, 9]]) + + """ + return (dst, src, where) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.putmask) +def putmask(a, /, mask, values): + """ + putmask(a, mask, values) + + Changes elements of an array based on conditional and input values. + + Sets ``a.flat[n] = values[n]`` for each n where ``mask.flat[n]==True``. + + If `values` is not the same size as `a` and `mask` then it will repeat. + This gives behavior different from ``a[mask] = values``. + + Parameters + ---------- + a : ndarray + Target array. + mask : array_like + Boolean mask array. It has to be the same shape as `a`. + values : array_like + Values to put into `a` where `mask` is True. If `values` is smaller + than `a` it will be repeated. + + See Also + -------- + place, put, take, copyto + + Examples + -------- + >>> import numpy as np + >>> x = np.arange(6).reshape(2, 3) + >>> np.putmask(x, x>2, x**2) + >>> x + array([[ 0, 1, 2], + [ 9, 16, 25]]) + + If `values` is smaller than `a` it is repeated: + + >>> x = np.arange(5) + >>> np.putmask(x, x>1, [-33, -44]) + >>> x + array([ 0, 1, -33, -44, -33]) + + """ + return (a, mask, values) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.packbits) +def packbits(a, axis=None, bitorder='big'): + """ + packbits(a, /, axis=None, bitorder='big') + + Packs the elements of a binary-valued array into bits in a uint8 array. + + The result is padded to full bytes by inserting zero bits at the end. + + Parameters + ---------- + a : array_like + An array of integers or booleans whose elements should be packed to + bits. + axis : int, optional + The dimension over which bit-packing is done. + ``None`` implies packing the flattened array. + bitorder : {'big', 'little'}, optional + The order of the input bits. 'big' will mimic bin(val), + ``[0, 0, 0, 0, 0, 0, 1, 1] => 3 = 0b00000011``, 'little' will + reverse the order so ``[1, 1, 0, 0, 0, 0, 0, 0] => 3``. + Defaults to 'big'. + + Returns + ------- + packed : ndarray + Array of type uint8 whose elements represent bits corresponding to the + logical (0 or nonzero) value of the input elements. The shape of + `packed` has the same number of dimensions as the input (unless `axis` + is None, in which case the output is 1-D). + + See Also + -------- + unpackbits: Unpacks elements of a uint8 array into a binary-valued output + array. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[[1,0,1], + ... [0,1,0]], + ... [[1,1,0], + ... [0,0,1]]]) + >>> b = np.packbits(a, axis=-1) + >>> b + array([[[160], + [ 64]], + [[192], + [ 32]]], dtype=uint8) + + Note that in binary 160 = 1010 0000, 64 = 0100 0000, 192 = 1100 0000, + and 32 = 0010 0000. + + """ + return (a,) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.unpackbits) +def unpackbits(a, axis=None, count=None, bitorder='big'): + """ + unpackbits(a, /, axis=None, count=None, bitorder='big') + + Unpacks elements of a uint8 array into a binary-valued output array. + + Each element of `a` represents a bit-field that should be unpacked + into a binary-valued output array. The shape of the output array is + either 1-D (if `axis` is ``None``) or the same shape as the input + array with unpacking done along the axis specified. + + Parameters + ---------- + a : ndarray, uint8 type + Input array. + axis : int, optional + The dimension over which bit-unpacking is done. + ``None`` implies unpacking the flattened array. + count : int or None, optional + The number of elements to unpack along `axis`, provided as a way + of undoing the effect of packing a size that is not a multiple + of eight. A non-negative number means to only unpack `count` + bits. A negative number means to trim off that many bits from + the end. ``None`` means to unpack the entire array (the + default). Counts larger than the available number of bits will + add zero padding to the output. Negative counts must not + exceed the available number of bits. + bitorder : {'big', 'little'}, optional + The order of the returned bits. 'big' will mimic bin(val), + ``3 = 0b00000011 => [0, 0, 0, 0, 0, 0, 1, 1]``, 'little' will reverse + the order to ``[1, 1, 0, 0, 0, 0, 0, 0]``. + Defaults to 'big'. + + Returns + ------- + unpacked : ndarray, uint8 type + The elements are binary-valued (0 or 1). + + See Also + -------- + packbits : Packs the elements of a binary-valued array into bits in + a uint8 array. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[2], [7], [23]], dtype=np.uint8) + >>> a + array([[ 2], + [ 7], + [23]], dtype=uint8) + >>> b = np.unpackbits(a, axis=1) + >>> b + array([[0, 0, 0, 0, 0, 0, 1, 0], + [0, 0, 0, 0, 0, 1, 1, 1], + [0, 0, 0, 1, 0, 1, 1, 1]], dtype=uint8) + >>> c = np.unpackbits(a, axis=1, count=-3) + >>> c + array([[0, 0, 0, 0, 0], + [0, 0, 0, 0, 0], + [0, 0, 0, 1, 0]], dtype=uint8) + + >>> p = np.packbits(b, axis=0) + >>> np.unpackbits(p, axis=0) + array([[0, 0, 0, 0, 0, 0, 1, 0], + [0, 0, 0, 0, 0, 1, 1, 1], + [0, 0, 0, 1, 0, 1, 1, 1], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0]], dtype=uint8) + >>> np.array_equal(b, np.unpackbits(p, axis=0, count=b.shape[0])) + True + + """ + return (a,) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.shares_memory) +def shares_memory(a, b, max_work=None): + """ + shares_memory(a, b, /, max_work=None) + + Determine if two arrays share memory. + + .. warning:: + + This function can be exponentially slow for some inputs, unless + `max_work` is set to zero or a positive integer. + If in doubt, use `numpy.may_share_memory` instead. + + Parameters + ---------- + a, b : ndarray + Input arrays + max_work : int, optional + Effort to spend on solving the overlap problem (maximum number + of candidate solutions to consider). The following special + values are recognized: + + max_work=-1 (default) + The problem is solved exactly. In this case, the function returns + True only if there is an element shared between the arrays. Finding + the exact solution may take extremely long in some cases. + max_work=0 + Only the memory bounds of a and b are checked. + This is equivalent to using ``may_share_memory()``. + + Raises + ------ + numpy.exceptions.TooHardError + Exceeded max_work. + + Returns + ------- + out : bool + + See Also + -------- + may_share_memory + + Examples + -------- + >>> import numpy as np + >>> x = np.array([1, 2, 3, 4]) + >>> np.shares_memory(x, np.array([5, 6, 7])) + False + >>> np.shares_memory(x[::2], x) + True + >>> np.shares_memory(x[::2], x[1::2]) + False + + Checking whether two arrays share memory is NP-complete, and + runtime may increase exponentially in the number of + dimensions. Hence, `max_work` should generally be set to a finite + number, as it is possible to construct examples that take + extremely long to run: + + >>> from numpy.lib.stride_tricks import as_strided + >>> x = np.zeros([192163377], dtype=np.int8) + >>> x1 = as_strided( + ... x, strides=(36674, 61119, 85569), shape=(1049, 1049, 1049)) + >>> x2 = as_strided( + ... x[64023025:], strides=(12223, 12224, 1), shape=(1049, 1049, 1)) + >>> np.shares_memory(x1, x2, max_work=1000) + Traceback (most recent call last): + ... + numpy.exceptions.TooHardError: Exceeded max_work + + Running ``np.shares_memory(x1, x2)`` without `max_work` set takes + around 1 minute for this case. It is possible to find problems + that take still significantly longer. + + """ + return (a, b) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.may_share_memory) +def may_share_memory(a, b, max_work=None): + """ + may_share_memory(a, b, /, max_work=None) + + Determine if two arrays might share memory + + A return of True does not necessarily mean that the two arrays + share any element. It just means that they *might*. + + Only the memory bounds of a and b are checked by default. + + Parameters + ---------- + a, b : ndarray + Input arrays + max_work : int, optional + Effort to spend on solving the overlap problem. See + `shares_memory` for details. Default for ``may_share_memory`` + is to do a bounds check. + + Returns + ------- + out : bool + + See Also + -------- + shares_memory + + Examples + -------- + >>> import numpy as np + >>> np.may_share_memory(np.array([1,2]), np.array([5,8,9])) + False + >>> x = np.zeros([3, 4]) + >>> np.may_share_memory(x[:,0], x[:,1]) + True + + """ + return (a, b) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.is_busday) +def is_busday(dates, weekmask=None, holidays=None, busdaycal=None, out=None): + """ + is_busday( + dates, + weekmask='1111100', + holidays=None, + busdaycal=None, + out=None + ) + + Calculates which of the given dates are valid days, and which are not. + + Parameters + ---------- + dates : array_like of datetime64[D] + The array of dates to process. + weekmask : str or array_like of bool, optional + A seven-element array indicating which of Monday through Sunday are + valid days. May be specified as a length-seven list or array, like + [1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string + like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for + weekdays, optionally separated by white space. Valid abbreviations + are: Mon Tue Wed Thu Fri Sat Sun + holidays : array_like of datetime64[D], optional + An array of dates to consider as invalid dates. They may be + specified in any order, and NaT (not-a-time) dates are ignored. + This list is saved in a normalized form that is suited for + fast calculations of valid days. + busdaycal : busdaycalendar, optional + A `busdaycalendar` object which specifies the valid days. If this + parameter is provided, neither weekmask nor holidays may be + provided. + out : array of bool, optional + If provided, this array is filled with the result. + + Returns + ------- + out : array of bool + An array with the same shape as ``dates``, containing True for + each valid day, and False for each invalid day. + + See Also + -------- + busdaycalendar : An object that specifies a custom set of valid days. + busday_offset : Applies an offset counted in valid days. + busday_count : Counts how many valid days are in a half-open date range. + + Examples + -------- + >>> import numpy as np + >>> # The weekdays are Friday, Saturday, and Monday + ... np.is_busday(['2011-07-01', '2011-07-02', '2011-07-18'], + ... holidays=['2011-07-01', '2011-07-04', '2011-07-17']) + array([False, False, True]) + """ + return (dates, weekmask, holidays, out) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.busday_offset) +def busday_offset(dates, offsets, roll=None, weekmask=None, holidays=None, + busdaycal=None, out=None): + """ + busday_offset( + dates, + offsets, + roll='raise', + weekmask='1111100', + holidays=None, + busdaycal=None, + out=None + ) + + First adjusts the date to fall on a valid day according to + the ``roll`` rule, then applies offsets to the given dates + counted in valid days. + + Parameters + ---------- + dates : array_like of datetime64[D] + The array of dates to process. + offsets : array_like of int + The array of offsets, which is broadcast with ``dates``. + roll : {'raise', 'nat', 'forward', 'following', 'backward', 'preceding', \ + 'modifiedfollowing', 'modifiedpreceding'}, optional + How to treat dates that do not fall on a valid day. The default + is 'raise'. + + * 'raise' means to raise an exception for an invalid day. + * 'nat' means to return a NaT (not-a-time) for an invalid day. + * 'forward' and 'following' mean to take the first valid day + later in time. + * 'backward' and 'preceding' mean to take the first valid day + earlier in time. + * 'modifiedfollowing' means to take the first valid day + later in time unless it is across a Month boundary, in which + case to take the first valid day earlier in time. + * 'modifiedpreceding' means to take the first valid day + earlier in time unless it is across a Month boundary, in which + case to take the first valid day later in time. + weekmask : str or array_like of bool, optional + A seven-element array indicating which of Monday through Sunday are + valid days. May be specified as a length-seven list or array, like + [1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string + like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for + weekdays, optionally separated by white space. Valid abbreviations + are: Mon Tue Wed Thu Fri Sat Sun + holidays : array_like of datetime64[D], optional + An array of dates to consider as invalid dates. They may be + specified in any order, and NaT (not-a-time) dates are ignored. + This list is saved in a normalized form that is suited for + fast calculations of valid days. + busdaycal : busdaycalendar, optional + A `busdaycalendar` object which specifies the valid days. If this + parameter is provided, neither weekmask nor holidays may be + provided. + out : array of datetime64[D], optional + If provided, this array is filled with the result. + + Returns + ------- + out : array of datetime64[D] + An array with a shape from broadcasting ``dates`` and ``offsets`` + together, containing the dates with offsets applied. + + See Also + -------- + busdaycalendar : An object that specifies a custom set of valid days. + is_busday : Returns a boolean array indicating valid days. + busday_count : Counts how many valid days are in a half-open date range. + + Examples + -------- + >>> import numpy as np + >>> # First business day in October 2011 (not accounting for holidays) + ... np.busday_offset('2011-10', 0, roll='forward') + np.datetime64('2011-10-03') + >>> # Last business day in February 2012 (not accounting for holidays) + ... np.busday_offset('2012-03', -1, roll='forward') + np.datetime64('2012-02-29') + >>> # Third Wednesday in January 2011 + ... np.busday_offset('2011-01', 2, roll='forward', weekmask='Wed') + np.datetime64('2011-01-19') + >>> # 2012 Mother's Day in Canada and the U.S. + ... np.busday_offset('2012-05', 1, roll='forward', weekmask='Sun') + np.datetime64('2012-05-13') + + >>> # First business day on or after a date + ... np.busday_offset('2011-03-20', 0, roll='forward') + np.datetime64('2011-03-21') + >>> np.busday_offset('2011-03-22', 0, roll='forward') + np.datetime64('2011-03-22') + >>> # First business day after a date + ... np.busday_offset('2011-03-20', 1, roll='backward') + np.datetime64('2011-03-21') + >>> np.busday_offset('2011-03-22', 1, roll='backward') + np.datetime64('2011-03-23') + """ + return (dates, offsets, weekmask, holidays, out) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.busday_count) +def busday_count(begindates, enddates, weekmask=None, holidays=None, + busdaycal=None, out=None): + """ + busday_count( + begindates, + enddates, + weekmask='1111100', + holidays=[], + busdaycal=None, + out=None + ) + + Counts the number of valid days between `begindates` and + `enddates`, not including the day of `enddates`. + + If ``enddates`` specifies a date value that is earlier than the + corresponding ``begindates`` date value, the count will be negative. + + Parameters + ---------- + begindates : array_like of datetime64[D] + The array of the first dates for counting. + enddates : array_like of datetime64[D] + The array of the end dates for counting, which are excluded + from the count themselves. + weekmask : str or array_like of bool, optional + A seven-element array indicating which of Monday through Sunday are + valid days. May be specified as a length-seven list or array, like + [1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string + like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for + weekdays, optionally separated by white space. Valid abbreviations + are: Mon Tue Wed Thu Fri Sat Sun + holidays : array_like of datetime64[D], optional + An array of dates to consider as invalid dates. They may be + specified in any order, and NaT (not-a-time) dates are ignored. + This list is saved in a normalized form that is suited for + fast calculations of valid days. + busdaycal : busdaycalendar, optional + A `busdaycalendar` object which specifies the valid days. If this + parameter is provided, neither weekmask nor holidays may be + provided. + out : array of int, optional + If provided, this array is filled with the result. + + Returns + ------- + out : array of int + An array with a shape from broadcasting ``begindates`` and ``enddates`` + together, containing the number of valid days between + the begin and end dates. + + See Also + -------- + busdaycalendar : An object that specifies a custom set of valid days. + is_busday : Returns a boolean array indicating valid days. + busday_offset : Applies an offset counted in valid days. + + Examples + -------- + >>> import numpy as np + >>> # Number of weekdays in January 2011 + ... np.busday_count('2011-01', '2011-02') + 21 + >>> # Number of weekdays in 2011 + >>> np.busday_count('2011', '2012') + 260 + >>> # Number of Saturdays in 2011 + ... np.busday_count('2011', '2012', weekmask='Sat') + 53 + """ + return (begindates, enddates, weekmask, holidays, out) + + +@array_function_from_c_func_and_dispatcher( + _multiarray_umath.datetime_as_string) +def datetime_as_string(arr, unit=None, timezone=None, casting=None): + """ + datetime_as_string(arr, unit=None, timezone='naive', casting='same_kind') + + Convert an array of datetimes into an array of strings. + + Parameters + ---------- + arr : array_like of datetime64 + The array of UTC timestamps to format. + unit : str + One of None, 'auto', or + a :ref:`datetime unit `. + timezone : {'naive', 'UTC', 'local'} or tzinfo + Timezone information to use when displaying the datetime. If 'UTC', + end with a Z to indicate UTC time. If 'local', convert to the local + timezone first, and suffix with a +-#### timezone offset. If a tzinfo + object, then do as with 'local', but use the specified timezone. + casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'} + Casting to allow when changing between datetime units. + + Returns + ------- + str_arr : ndarray + An array of strings the same shape as `arr`. + + Examples + -------- + >>> import numpy as np + >>> import pytz + >>> d = np.arange('2002-10-27T04:30', 4*60, 60, dtype='M8[m]') + >>> d + array(['2002-10-27T04:30', '2002-10-27T05:30', '2002-10-27T06:30', + '2002-10-27T07:30'], dtype='datetime64[m]') + + Setting the timezone to UTC shows the same information, but with a Z suffix + + >>> np.datetime_as_string(d, timezone='UTC') + array(['2002-10-27T04:30Z', '2002-10-27T05:30Z', '2002-10-27T06:30Z', + '2002-10-27T07:30Z'], dtype='>> np.datetime_as_string(d, timezone=pytz.timezone('US/Eastern')) + array(['2002-10-27T00:30-0400', '2002-10-27T01:30-0400', + '2002-10-27T01:30-0500', '2002-10-27T02:30-0500'], dtype='>> np.datetime_as_string(d, unit='h') + array(['2002-10-27T04', '2002-10-27T05', '2002-10-27T06', '2002-10-27T07'], + dtype='>> np.datetime_as_string(d, unit='s') + array(['2002-10-27T04:30:00', '2002-10-27T05:30:00', '2002-10-27T06:30:00', + '2002-10-27T07:30:00'], dtype='>> np.datetime_as_string(d, unit='h', casting='safe') + Traceback (most recent call last): + ... + TypeError: Cannot create a datetime string as units 'h' from a NumPy + datetime with units 'm' according to the rule 'safe' + """ + return (arr,) diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/multiarray.pyi b/.venv/lib/python3.12/site-packages/numpy/_core/multiarray.pyi new file mode 100644 index 0000000..13a3f00 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/multiarray.pyi @@ -0,0 +1,1285 @@ +# TODO: Sort out any and all missing functions in this namespace +import datetime as dt +from collections.abc import Callable, Iterable, Sequence +from typing import ( + Any, + ClassVar, + Final, + Protocol, + SupportsIndex, + TypeAlias, + TypedDict, + TypeVar, + Unpack, + final, + overload, + type_check_only, +) +from typing import ( + Literal as L, +) + +from _typeshed import StrOrBytesPath, SupportsLenAndGetItem +from typing_extensions import CapsuleType + +import numpy as np +from numpy import ( # type: ignore[attr-defined] + _AnyShapeT, + _CastingKind, + _CopyMode, + _ModeKind, + _NDIterFlagsKind, + _NDIterFlagsOp, + _OrderCF, + _OrderKACF, + _SupportsBuffer, + _SupportsFileMethods, + broadcast, + # Re-exports + busdaycalendar, + complexfloating, + correlate, + count_nonzero, + datetime64, + dtype, + flatiter, + float64, + floating, + from_dlpack, + generic, + int_, + interp, + intp, + matmul, + ndarray, + nditer, + signedinteger, + str_, + timedelta64, + # The rest + ufunc, + uint8, + unsignedinteger, + vecdot, +) +from numpy import ( + einsum as c_einsum, +) +from numpy._typing import ( + ArrayLike, + # DTypes + DTypeLike, + # Arrays + NDArray, + _ArrayLike, + _ArrayLikeBool_co, + _ArrayLikeBytes_co, + _ArrayLikeComplex_co, + _ArrayLikeDT64_co, + _ArrayLikeFloat_co, + _ArrayLikeInt_co, + _ArrayLikeObject_co, + _ArrayLikeStr_co, + _ArrayLikeTD64_co, + _ArrayLikeUInt_co, + _DTypeLike, + _FloatLike_co, + _IntLike_co, + _NestedSequence, + _ScalarLike_co, + # Shapes + _Shape, + _ShapeLike, + _SupportsArrayFunc, + _SupportsDType, + _TD64Like_co, +) +from numpy._typing._ufunc import ( + _2PTuple, + _PyFunc_Nin1_Nout1, + _PyFunc_Nin1P_Nout2P, + _PyFunc_Nin2_Nout1, + _PyFunc_Nin3P_Nout1, +) +from numpy.lib._array_utils_impl import normalize_axis_index + +__all__ = [ + "_ARRAY_API", + "ALLOW_THREADS", + "BUFSIZE", + "CLIP", + "DATETIMEUNITS", + "ITEM_HASOBJECT", + "ITEM_IS_POINTER", + "LIST_PICKLE", + "MAXDIMS", + "MAY_SHARE_BOUNDS", + "MAY_SHARE_EXACT", + "NEEDS_INIT", + "NEEDS_PYAPI", + "RAISE", + "USE_GETITEM", + "USE_SETITEM", + "WRAP", + "_flagdict", + "from_dlpack", + "_place", + "_reconstruct", + "_vec_string", + "_monotonicity", + "add_docstring", + "arange", + "array", + "asarray", + "asanyarray", + "ascontiguousarray", + "asfortranarray", + "bincount", + "broadcast", + "busday_count", + "busday_offset", + "busdaycalendar", + "can_cast", + "compare_chararrays", + "concatenate", + "copyto", + "correlate", + "correlate2", + "count_nonzero", + "c_einsum", + "datetime_as_string", + "datetime_data", + "dot", + "dragon4_positional", + "dragon4_scientific", + "dtype", + "empty", + "empty_like", + "error", + "flagsobj", + "flatiter", + "format_longfloat", + "frombuffer", + "fromfile", + "fromiter", + "fromstring", + "get_handler_name", + "get_handler_version", + "inner", + "interp", + "interp_complex", + "is_busday", + "lexsort", + "matmul", + "vecdot", + "may_share_memory", + "min_scalar_type", + "ndarray", + "nditer", + "nested_iters", + "normalize_axis_index", + "packbits", + "promote_types", + "putmask", + "ravel_multi_index", + "result_type", + "scalar", + "set_datetimeparse_function", + "set_typeDict", + "shares_memory", + "typeinfo", + "unpackbits", + "unravel_index", + "vdot", + "where", + "zeros", +] + +_ScalarT = TypeVar("_ScalarT", bound=generic) +_DTypeT = TypeVar("_DTypeT", bound=np.dtype) +_ArrayT = TypeVar("_ArrayT", bound=ndarray[Any, Any]) +_ArrayT_co = TypeVar( + "_ArrayT_co", + bound=ndarray[Any, Any], + covariant=True, +) +_ReturnType = TypeVar("_ReturnType") +_IDType = TypeVar("_IDType") +_Nin = TypeVar("_Nin", bound=int) +_Nout = TypeVar("_Nout", bound=int) + +_ShapeT = TypeVar("_ShapeT", bound=_Shape) +_Array: TypeAlias = ndarray[_ShapeT, dtype[_ScalarT]] +_Array1D: TypeAlias = ndarray[tuple[int], dtype[_ScalarT]] + +# Valid time units +_UnitKind: TypeAlias = L[ + "Y", + "M", + "D", + "h", + "m", + "s", + "ms", + "us", "μs", + "ns", + "ps", + "fs", + "as", +] +_RollKind: TypeAlias = L[ # `raise` is deliberately excluded + "nat", + "forward", + "following", + "backward", + "preceding", + "modifiedfollowing", + "modifiedpreceding", +] + +@type_check_only +class _SupportsArray(Protocol[_ArrayT_co]): + def __array__(self, /) -> _ArrayT_co: ... + +@type_check_only +class _KwargsEmpty(TypedDict, total=False): + device: L["cpu"] | None + like: _SupportsArrayFunc | None + +@type_check_only +class _ConstructorEmpty(Protocol): + # 1-D shape + @overload + def __call__( + self, + /, + shape: SupportsIndex, + dtype: None = ..., + order: _OrderCF = ..., + **kwargs: Unpack[_KwargsEmpty], + ) -> _Array1D[float64]: ... + @overload + def __call__( + self, + /, + shape: SupportsIndex, + dtype: _DTypeT | _SupportsDType[_DTypeT], + order: _OrderCF = ..., + **kwargs: Unpack[_KwargsEmpty], + ) -> ndarray[tuple[int], _DTypeT]: ... + @overload + def __call__( + self, + /, + shape: SupportsIndex, + dtype: type[_ScalarT], + order: _OrderCF = ..., + **kwargs: Unpack[_KwargsEmpty], + ) -> _Array1D[_ScalarT]: ... + @overload + def __call__( + self, + /, + shape: SupportsIndex, + dtype: DTypeLike | None = ..., + order: _OrderCF = ..., + **kwargs: Unpack[_KwargsEmpty], + ) -> _Array1D[Any]: ... + + # known shape + @overload + def __call__( + self, + /, + shape: _AnyShapeT, + dtype: None = ..., + order: _OrderCF = ..., + **kwargs: Unpack[_KwargsEmpty], + ) -> _Array[_AnyShapeT, float64]: ... + @overload + def __call__( + self, + /, + shape: _AnyShapeT, + dtype: _DTypeT | _SupportsDType[_DTypeT], + order: _OrderCF = ..., + **kwargs: Unpack[_KwargsEmpty], + ) -> ndarray[_AnyShapeT, _DTypeT]: ... + @overload + def __call__( + self, + /, + shape: _AnyShapeT, + dtype: type[_ScalarT], + order: _OrderCF = ..., + **kwargs: Unpack[_KwargsEmpty], + ) -> _Array[_AnyShapeT, _ScalarT]: ... + @overload + def __call__( + self, + /, + shape: _AnyShapeT, + dtype: DTypeLike | None = ..., + order: _OrderCF = ..., + **kwargs: Unpack[_KwargsEmpty], + ) -> _Array[_AnyShapeT, Any]: ... + + # unknown shape + @overload + def __call__( + self, /, + shape: _ShapeLike, + dtype: None = ..., + order: _OrderCF = ..., + **kwargs: Unpack[_KwargsEmpty], + ) -> NDArray[float64]: ... + @overload + def __call__( + self, /, + shape: _ShapeLike, + dtype: _DTypeT | _SupportsDType[_DTypeT], + order: _OrderCF = ..., + **kwargs: Unpack[_KwargsEmpty], + ) -> ndarray[Any, _DTypeT]: ... + @overload + def __call__( + self, /, + shape: _ShapeLike, + dtype: type[_ScalarT], + order: _OrderCF = ..., + **kwargs: Unpack[_KwargsEmpty], + ) -> NDArray[_ScalarT]: ... + @overload + def __call__( + self, + /, + shape: _ShapeLike, + dtype: DTypeLike | None = ..., + order: _OrderCF = ..., + **kwargs: Unpack[_KwargsEmpty], + ) -> NDArray[Any]: ... + +# using `Final` or `TypeAlias` will break stubtest +error = Exception + +# from ._multiarray_umath +ITEM_HASOBJECT: Final = 1 +LIST_PICKLE: Final = 2 +ITEM_IS_POINTER: Final = 4 +NEEDS_INIT: Final = 8 +NEEDS_PYAPI: Final = 16 +USE_GETITEM: Final = 32 +USE_SETITEM: Final = 64 +DATETIMEUNITS: Final[CapsuleType] +_ARRAY_API: Final[CapsuleType] +_flagdict: Final[dict[str, int]] +_monotonicity: Final[Callable[..., object]] +_place: Final[Callable[..., object]] +_reconstruct: Final[Callable[..., object]] +_vec_string: Final[Callable[..., object]] +correlate2: Final[Callable[..., object]] +dragon4_positional: Final[Callable[..., object]] +dragon4_scientific: Final[Callable[..., object]] +interp_complex: Final[Callable[..., object]] +set_datetimeparse_function: Final[Callable[..., object]] +def get_handler_name(a: NDArray[Any] = ..., /) -> str | None: ... +def get_handler_version(a: NDArray[Any] = ..., /) -> int | None: ... +def format_longfloat(x: np.longdouble, precision: int) -> str: ... +def scalar(dtype: _DTypeT, object: bytes | object = ...) -> ndarray[tuple[()], _DTypeT]: ... +def set_typeDict(dict_: dict[str, np.dtype], /) -> None: ... +typeinfo: Final[dict[str, np.dtype[np.generic]]] + +ALLOW_THREADS: Final[int] # 0 or 1 (system-specific) +BUFSIZE: L[8192] +CLIP: L[0] +WRAP: L[1] +RAISE: L[2] +MAXDIMS: L[32] +MAY_SHARE_BOUNDS: L[0] +MAY_SHARE_EXACT: L[-1] +tracemalloc_domain: L[389047] + +zeros: Final[_ConstructorEmpty] +empty: Final[_ConstructorEmpty] + +@overload +def empty_like( + prototype: _ArrayT, + dtype: None = ..., + order: _OrderKACF = ..., + subok: bool = ..., + shape: _ShapeLike | None = ..., + *, + device: L["cpu"] | None = ..., +) -> _ArrayT: ... +@overload +def empty_like( + prototype: _ArrayLike[_ScalarT], + dtype: None = ..., + order: _OrderKACF = ..., + subok: bool = ..., + shape: _ShapeLike | None = ..., + *, + device: L["cpu"] | None = ..., +) -> NDArray[_ScalarT]: ... +@overload +def empty_like( + prototype: Any, + dtype: _DTypeLike[_ScalarT], + order: _OrderKACF = ..., + subok: bool = ..., + shape: _ShapeLike | None = ..., + *, + device: L["cpu"] | None = ..., +) -> NDArray[_ScalarT]: ... +@overload +def empty_like( + prototype: Any, + dtype: DTypeLike | None = ..., + order: _OrderKACF = ..., + subok: bool = ..., + shape: _ShapeLike | None = ..., + *, + device: L["cpu"] | None = ..., +) -> NDArray[Any]: ... + +@overload +def array( + object: _ArrayT, + dtype: None = ..., + *, + copy: bool | _CopyMode | None = ..., + order: _OrderKACF = ..., + subok: L[True], + ndmin: int = ..., + like: _SupportsArrayFunc | None = ..., +) -> _ArrayT: ... +@overload +def array( + object: _SupportsArray[_ArrayT], + dtype: None = ..., + *, + copy: bool | _CopyMode | None = ..., + order: _OrderKACF = ..., + subok: L[True], + ndmin: L[0] = ..., + like: _SupportsArrayFunc | None = ..., +) -> _ArrayT: ... +@overload +def array( + object: _ArrayLike[_ScalarT], + dtype: None = ..., + *, + copy: bool | _CopyMode | None = ..., + order: _OrderKACF = ..., + subok: bool = ..., + ndmin: int = ..., + like: _SupportsArrayFunc | None = ..., +) -> NDArray[_ScalarT]: ... +@overload +def array( + object: Any, + dtype: _DTypeLike[_ScalarT], + *, + copy: bool | _CopyMode | None = ..., + order: _OrderKACF = ..., + subok: bool = ..., + ndmin: int = ..., + like: _SupportsArrayFunc | None = ..., +) -> NDArray[_ScalarT]: ... +@overload +def array( + object: Any, + dtype: DTypeLike | None = ..., + *, + copy: bool | _CopyMode | None = ..., + order: _OrderKACF = ..., + subok: bool = ..., + ndmin: int = ..., + like: _SupportsArrayFunc | None = ..., +) -> NDArray[Any]: ... + +# +@overload +def ravel_multi_index( + multi_index: SupportsLenAndGetItem[_IntLike_co], + dims: _ShapeLike, + mode: _ModeKind | tuple[_ModeKind, ...] = "raise", + order: _OrderCF = "C", +) -> intp: ... +@overload +def ravel_multi_index( + multi_index: SupportsLenAndGetItem[_ArrayLikeInt_co], + dims: _ShapeLike, + mode: _ModeKind | tuple[_ModeKind, ...] = "raise", + order: _OrderCF = "C", +) -> NDArray[intp]: ... + +# +@overload +def unravel_index(indices: _IntLike_co, shape: _ShapeLike, order: _OrderCF = "C") -> tuple[intp, ...]: ... +@overload +def unravel_index(indices: _ArrayLikeInt_co, shape: _ShapeLike, order: _OrderCF = "C") -> tuple[NDArray[intp], ...]: ... + +# NOTE: Allow any sequence of array-like objects +@overload +def concatenate( # type: ignore[misc] + arrays: _ArrayLike[_ScalarT], + /, + axis: SupportsIndex | None = ..., + out: None = ..., + *, + dtype: None = ..., + casting: _CastingKind | None = ... +) -> NDArray[_ScalarT]: ... +@overload +@overload +def concatenate( # type: ignore[misc] + arrays: SupportsLenAndGetItem[ArrayLike], + /, + axis: SupportsIndex | None = ..., + out: None = ..., + *, + dtype: _DTypeLike[_ScalarT], + casting: _CastingKind | None = ... +) -> NDArray[_ScalarT]: ... +@overload +def concatenate( # type: ignore[misc] + arrays: SupportsLenAndGetItem[ArrayLike], + /, + axis: SupportsIndex | None = ..., + out: None = ..., + *, + dtype: DTypeLike | None = None, + casting: _CastingKind | None = ... +) -> NDArray[Any]: ... +@overload +def concatenate( + arrays: SupportsLenAndGetItem[ArrayLike], + /, + axis: SupportsIndex | None = ..., + out: _ArrayT = ..., + *, + dtype: DTypeLike = ..., + casting: _CastingKind | None = ... +) -> _ArrayT: ... + +def inner( + a: ArrayLike, + b: ArrayLike, + /, +) -> Any: ... + +@overload +def where( + condition: ArrayLike, + /, +) -> tuple[NDArray[intp], ...]: ... +@overload +def where( + condition: ArrayLike, + x: ArrayLike, + y: ArrayLike, + /, +) -> NDArray[Any]: ... + +def lexsort( + keys: ArrayLike, + axis: SupportsIndex | None = ..., +) -> Any: ... + +def can_cast( + from_: ArrayLike | DTypeLike, + to: DTypeLike, + casting: _CastingKind | None = ..., +) -> bool: ... + +def min_scalar_type(a: ArrayLike, /) -> dtype: ... + +def result_type(*arrays_and_dtypes: ArrayLike | DTypeLike) -> dtype: ... + +@overload +def dot(a: ArrayLike, b: ArrayLike, out: None = ...) -> Any: ... +@overload +def dot(a: ArrayLike, b: ArrayLike, out: _ArrayT) -> _ArrayT: ... + +@overload +def vdot(a: _ArrayLikeBool_co, b: _ArrayLikeBool_co, /) -> np.bool: ... # type: ignore[misc] +@overload +def vdot(a: _ArrayLikeUInt_co, b: _ArrayLikeUInt_co, /) -> unsignedinteger: ... # type: ignore[misc] +@overload +def vdot(a: _ArrayLikeInt_co, b: _ArrayLikeInt_co, /) -> signedinteger: ... # type: ignore[misc] +@overload +def vdot(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, /) -> floating: ... # type: ignore[misc] +@overload +def vdot(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, /) -> complexfloating: ... # type: ignore[misc] +@overload +def vdot(a: _ArrayLikeTD64_co, b: _ArrayLikeTD64_co, /) -> timedelta64: ... +@overload +def vdot(a: _ArrayLikeObject_co, b: Any, /) -> Any: ... +@overload +def vdot(a: Any, b: _ArrayLikeObject_co, /) -> Any: ... + +def bincount( + x: ArrayLike, + /, + weights: ArrayLike | None = ..., + minlength: SupportsIndex = ..., +) -> NDArray[intp]: ... + +def copyto( + dst: NDArray[Any], + src: ArrayLike, + casting: _CastingKind | None = ..., + where: _ArrayLikeBool_co | None = ..., +) -> None: ... + +def putmask( + a: NDArray[Any], + /, + mask: _ArrayLikeBool_co, + values: ArrayLike, +) -> None: ... + +def packbits( + a: _ArrayLikeInt_co, + /, + axis: SupportsIndex | None = ..., + bitorder: L["big", "little"] = ..., +) -> NDArray[uint8]: ... + +def unpackbits( + a: _ArrayLike[uint8], + /, + axis: SupportsIndex | None = ..., + count: SupportsIndex | None = ..., + bitorder: L["big", "little"] = ..., +) -> NDArray[uint8]: ... + +def shares_memory( + a: object, + b: object, + /, + max_work: int | None = ..., +) -> bool: ... + +def may_share_memory( + a: object, + b: object, + /, + max_work: int | None = ..., +) -> bool: ... + +@overload +def asarray( + a: _ArrayLike[_ScalarT], + dtype: None = ..., + order: _OrderKACF = ..., + *, + device: L["cpu"] | None = ..., + copy: bool | None = ..., + like: _SupportsArrayFunc | None = ..., +) -> NDArray[_ScalarT]: ... +@overload +def asarray( + a: Any, + dtype: _DTypeLike[_ScalarT], + order: _OrderKACF = ..., + *, + device: L["cpu"] | None = ..., + copy: bool | None = ..., + like: _SupportsArrayFunc | None = ..., +) -> NDArray[_ScalarT]: ... +@overload +def asarray( + a: Any, + dtype: DTypeLike | None = ..., + order: _OrderKACF = ..., + *, + device: L["cpu"] | None = ..., + copy: bool | None = ..., + like: _SupportsArrayFunc | None = ..., +) -> NDArray[Any]: ... + +@overload +def asanyarray( + a: _ArrayT, # Preserve subclass-information + dtype: None = ..., + order: _OrderKACF = ..., + *, + device: L["cpu"] | None = ..., + copy: bool | None = ..., + like: _SupportsArrayFunc | None = ..., +) -> _ArrayT: ... +@overload +def asanyarray( + a: _ArrayLike[_ScalarT], + dtype: None = ..., + order: _OrderKACF = ..., + *, + device: L["cpu"] | None = ..., + copy: bool | None = ..., + like: _SupportsArrayFunc | None = ..., +) -> NDArray[_ScalarT]: ... +@overload +def asanyarray( + a: Any, + dtype: _DTypeLike[_ScalarT], + order: _OrderKACF = ..., + *, + device: L["cpu"] | None = ..., + copy: bool | None = ..., + like: _SupportsArrayFunc | None = ..., +) -> NDArray[_ScalarT]: ... +@overload +def asanyarray( + a: Any, + dtype: DTypeLike | None = ..., + order: _OrderKACF = ..., + *, + device: L["cpu"] | None = ..., + copy: bool | None = ..., + like: _SupportsArrayFunc | None = ..., +) -> NDArray[Any]: ... + +@overload +def ascontiguousarray( + a: _ArrayLike[_ScalarT], + dtype: None = ..., + *, + like: _SupportsArrayFunc | None = ..., +) -> NDArray[_ScalarT]: ... +@overload +def ascontiguousarray( + a: Any, + dtype: _DTypeLike[_ScalarT], + *, + like: _SupportsArrayFunc | None = ..., +) -> NDArray[_ScalarT]: ... +@overload +def ascontiguousarray( + a: Any, + dtype: DTypeLike | None = ..., + *, + like: _SupportsArrayFunc | None = ..., +) -> NDArray[Any]: ... + +@overload +def asfortranarray( + a: _ArrayLike[_ScalarT], + dtype: None = ..., + *, + like: _SupportsArrayFunc | None = ..., +) -> NDArray[_ScalarT]: ... +@overload +def asfortranarray( + a: Any, + dtype: _DTypeLike[_ScalarT], + *, + like: _SupportsArrayFunc | None = ..., +) -> NDArray[_ScalarT]: ... +@overload +def asfortranarray( + a: Any, + dtype: DTypeLike | None = ..., + *, + like: _SupportsArrayFunc | None = ..., +) -> NDArray[Any]: ... + +def promote_types(__type1: DTypeLike, __type2: DTypeLike) -> dtype: ... + +# `sep` is a de facto mandatory argument, as its default value is deprecated +@overload +def fromstring( + string: str | bytes, + dtype: None = ..., + count: SupportsIndex = ..., + *, + sep: str, + like: _SupportsArrayFunc | None = ..., +) -> NDArray[float64]: ... +@overload +def fromstring( + string: str | bytes, + dtype: _DTypeLike[_ScalarT], + count: SupportsIndex = ..., + *, + sep: str, + like: _SupportsArrayFunc | None = ..., +) -> NDArray[_ScalarT]: ... +@overload +def fromstring( + string: str | bytes, + dtype: DTypeLike | None = ..., + count: SupportsIndex = ..., + *, + sep: str, + like: _SupportsArrayFunc | None = ..., +) -> NDArray[Any]: ... + +@overload +def frompyfunc( # type: ignore[overload-overlap] + func: Callable[[Any], _ReturnType], /, + nin: L[1], + nout: L[1], + *, + identity: None = ..., +) -> _PyFunc_Nin1_Nout1[_ReturnType, None]: ... +@overload +def frompyfunc( # type: ignore[overload-overlap] + func: Callable[[Any], _ReturnType], /, + nin: L[1], + nout: L[1], + *, + identity: _IDType, +) -> _PyFunc_Nin1_Nout1[_ReturnType, _IDType]: ... +@overload +def frompyfunc( # type: ignore[overload-overlap] + func: Callable[[Any, Any], _ReturnType], /, + nin: L[2], + nout: L[1], + *, + identity: None = ..., +) -> _PyFunc_Nin2_Nout1[_ReturnType, None]: ... +@overload +def frompyfunc( # type: ignore[overload-overlap] + func: Callable[[Any, Any], _ReturnType], /, + nin: L[2], + nout: L[1], + *, + identity: _IDType, +) -> _PyFunc_Nin2_Nout1[_ReturnType, _IDType]: ... +@overload +def frompyfunc( # type: ignore[overload-overlap] + func: Callable[..., _ReturnType], /, + nin: _Nin, + nout: L[1], + *, + identity: None = ..., +) -> _PyFunc_Nin3P_Nout1[_ReturnType, None, _Nin]: ... +@overload +def frompyfunc( # type: ignore[overload-overlap] + func: Callable[..., _ReturnType], /, + nin: _Nin, + nout: L[1], + *, + identity: _IDType, +) -> _PyFunc_Nin3P_Nout1[_ReturnType, _IDType, _Nin]: ... +@overload +def frompyfunc( + func: Callable[..., _2PTuple[_ReturnType]], /, + nin: _Nin, + nout: _Nout, + *, + identity: None = ..., +) -> _PyFunc_Nin1P_Nout2P[_ReturnType, None, _Nin, _Nout]: ... +@overload +def frompyfunc( + func: Callable[..., _2PTuple[_ReturnType]], /, + nin: _Nin, + nout: _Nout, + *, + identity: _IDType, +) -> _PyFunc_Nin1P_Nout2P[_ReturnType, _IDType, _Nin, _Nout]: ... +@overload +def frompyfunc( + func: Callable[..., Any], /, + nin: SupportsIndex, + nout: SupportsIndex, + *, + identity: object | None = ..., +) -> ufunc: ... + +@overload +def fromfile( + file: StrOrBytesPath | _SupportsFileMethods, + dtype: None = ..., + count: SupportsIndex = ..., + sep: str = ..., + offset: SupportsIndex = ..., + *, + like: _SupportsArrayFunc | None = ..., +) -> NDArray[float64]: ... +@overload +def fromfile( + file: StrOrBytesPath | _SupportsFileMethods, + dtype: _DTypeLike[_ScalarT], + count: SupportsIndex = ..., + sep: str = ..., + offset: SupportsIndex = ..., + *, + like: _SupportsArrayFunc | None = ..., +) -> NDArray[_ScalarT]: ... +@overload +def fromfile( + file: StrOrBytesPath | _SupportsFileMethods, + dtype: DTypeLike | None = ..., + count: SupportsIndex = ..., + sep: str = ..., + offset: SupportsIndex = ..., + *, + like: _SupportsArrayFunc | None = ..., +) -> NDArray[Any]: ... + +@overload +def fromiter( + iter: Iterable[Any], + dtype: _DTypeLike[_ScalarT], + count: SupportsIndex = ..., + *, + like: _SupportsArrayFunc | None = ..., +) -> NDArray[_ScalarT]: ... +@overload +def fromiter( + iter: Iterable[Any], + dtype: DTypeLike, + count: SupportsIndex = ..., + *, + like: _SupportsArrayFunc | None = ..., +) -> NDArray[Any]: ... + +@overload +def frombuffer( + buffer: _SupportsBuffer, + dtype: None = ..., + count: SupportsIndex = ..., + offset: SupportsIndex = ..., + *, + like: _SupportsArrayFunc | None = ..., +) -> NDArray[float64]: ... +@overload +def frombuffer( + buffer: _SupportsBuffer, + dtype: _DTypeLike[_ScalarT], + count: SupportsIndex = ..., + offset: SupportsIndex = ..., + *, + like: _SupportsArrayFunc | None = ..., +) -> NDArray[_ScalarT]: ... +@overload +def frombuffer( + buffer: _SupportsBuffer, + dtype: DTypeLike | None = ..., + count: SupportsIndex = ..., + offset: SupportsIndex = ..., + *, + like: _SupportsArrayFunc | None = ..., +) -> NDArray[Any]: ... + +@overload +def arange( # type: ignore[misc] + stop: _IntLike_co, + /, *, + dtype: None = ..., + device: L["cpu"] | None = ..., + like: _SupportsArrayFunc | None = ..., +) -> _Array1D[signedinteger]: ... +@overload +def arange( # type: ignore[misc] + start: _IntLike_co, + stop: _IntLike_co, + step: _IntLike_co = ..., + dtype: None = ..., + *, + device: L["cpu"] | None = ..., + like: _SupportsArrayFunc | None = ..., +) -> _Array1D[signedinteger]: ... +@overload +def arange( # type: ignore[misc] + stop: _FloatLike_co, + /, *, + dtype: None = ..., + device: L["cpu"] | None = ..., + like: _SupportsArrayFunc | None = ..., +) -> _Array1D[floating]: ... +@overload +def arange( # type: ignore[misc] + start: _FloatLike_co, + stop: _FloatLike_co, + step: _FloatLike_co = ..., + dtype: None = ..., + *, + device: L["cpu"] | None = ..., + like: _SupportsArrayFunc | None = ..., +) -> _Array1D[floating]: ... +@overload +def arange( + stop: _TD64Like_co, + /, *, + dtype: None = ..., + device: L["cpu"] | None = ..., + like: _SupportsArrayFunc | None = ..., +) -> _Array1D[timedelta64]: ... +@overload +def arange( + start: _TD64Like_co, + stop: _TD64Like_co, + step: _TD64Like_co = ..., + dtype: None = ..., + *, + device: L["cpu"] | None = ..., + like: _SupportsArrayFunc | None = ..., +) -> _Array1D[timedelta64]: ... +@overload +def arange( # both start and stop must always be specified for datetime64 + start: datetime64, + stop: datetime64, + step: datetime64 = ..., + dtype: None = ..., + *, + device: L["cpu"] | None = ..., + like: _SupportsArrayFunc | None = ..., +) -> _Array1D[datetime64]: ... +@overload +def arange( + stop: Any, + /, *, + dtype: _DTypeLike[_ScalarT], + device: L["cpu"] | None = ..., + like: _SupportsArrayFunc | None = ..., +) -> _Array1D[_ScalarT]: ... +@overload +def arange( + start: Any, + stop: Any, + step: Any = ..., + dtype: _DTypeLike[_ScalarT] = ..., + *, + device: L["cpu"] | None = ..., + like: _SupportsArrayFunc | None = ..., +) -> _Array1D[_ScalarT]: ... +@overload +def arange( + stop: Any, /, + *, + dtype: DTypeLike | None = ..., + device: L["cpu"] | None = ..., + like: _SupportsArrayFunc | None = ..., +) -> _Array1D[Any]: ... +@overload +def arange( + start: Any, + stop: Any, + step: Any = ..., + dtype: DTypeLike | None = ..., + *, + device: L["cpu"] | None = ..., + like: _SupportsArrayFunc | None = ..., +) -> _Array1D[Any]: ... + +def datetime_data( + dtype: str | _DTypeLike[datetime64] | _DTypeLike[timedelta64], /, +) -> tuple[str, int]: ... + +# The datetime functions perform unsafe casts to `datetime64[D]`, +# so a lot of different argument types are allowed here + +@overload +def busday_count( # type: ignore[misc] + begindates: _ScalarLike_co | dt.date, + enddates: _ScalarLike_co | dt.date, + weekmask: ArrayLike = ..., + holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ..., + busdaycal: busdaycalendar | None = ..., + out: None = ..., +) -> int_: ... +@overload +def busday_count( # type: ignore[misc] + begindates: ArrayLike | dt.date | _NestedSequence[dt.date], + enddates: ArrayLike | dt.date | _NestedSequence[dt.date], + weekmask: ArrayLike = ..., + holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ..., + busdaycal: busdaycalendar | None = ..., + out: None = ..., +) -> NDArray[int_]: ... +@overload +def busday_count( + begindates: ArrayLike | dt.date | _NestedSequence[dt.date], + enddates: ArrayLike | dt.date | _NestedSequence[dt.date], + weekmask: ArrayLike = ..., + holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ..., + busdaycal: busdaycalendar | None = ..., + out: _ArrayT = ..., +) -> _ArrayT: ... + +# `roll="raise"` is (more or less?) equivalent to `casting="safe"` +@overload +def busday_offset( # type: ignore[misc] + dates: datetime64 | dt.date, + offsets: _TD64Like_co | dt.timedelta, + roll: L["raise"] = ..., + weekmask: ArrayLike = ..., + holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ..., + busdaycal: busdaycalendar | None = ..., + out: None = ..., +) -> datetime64: ... +@overload +def busday_offset( # type: ignore[misc] + dates: _ArrayLike[datetime64] | dt.date | _NestedSequence[dt.date], + offsets: _ArrayLikeTD64_co | dt.timedelta | _NestedSequence[dt.timedelta], + roll: L["raise"] = ..., + weekmask: ArrayLike = ..., + holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ..., + busdaycal: busdaycalendar | None = ..., + out: None = ..., +) -> NDArray[datetime64]: ... +@overload +def busday_offset( # type: ignore[misc] + dates: _ArrayLike[datetime64] | dt.date | _NestedSequence[dt.date], + offsets: _ArrayLikeTD64_co | dt.timedelta | _NestedSequence[dt.timedelta], + roll: L["raise"] = ..., + weekmask: ArrayLike = ..., + holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ..., + busdaycal: busdaycalendar | None = ..., + out: _ArrayT = ..., +) -> _ArrayT: ... +@overload +def busday_offset( # type: ignore[misc] + dates: _ScalarLike_co | dt.date, + offsets: _ScalarLike_co | dt.timedelta, + roll: _RollKind, + weekmask: ArrayLike = ..., + holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ..., + busdaycal: busdaycalendar | None = ..., + out: None = ..., +) -> datetime64: ... +@overload +def busday_offset( # type: ignore[misc] + dates: ArrayLike | dt.date | _NestedSequence[dt.date], + offsets: ArrayLike | dt.timedelta | _NestedSequence[dt.timedelta], + roll: _RollKind, + weekmask: ArrayLike = ..., + holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ..., + busdaycal: busdaycalendar | None = ..., + out: None = ..., +) -> NDArray[datetime64]: ... +@overload +def busday_offset( + dates: ArrayLike | dt.date | _NestedSequence[dt.date], + offsets: ArrayLike | dt.timedelta | _NestedSequence[dt.timedelta], + roll: _RollKind, + weekmask: ArrayLike = ..., + holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ..., + busdaycal: busdaycalendar | None = ..., + out: _ArrayT = ..., +) -> _ArrayT: ... + +@overload +def is_busday( # type: ignore[misc] + dates: _ScalarLike_co | dt.date, + weekmask: ArrayLike = ..., + holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ..., + busdaycal: busdaycalendar | None = ..., + out: None = ..., +) -> np.bool: ... +@overload +def is_busday( # type: ignore[misc] + dates: ArrayLike | _NestedSequence[dt.date], + weekmask: ArrayLike = ..., + holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ..., + busdaycal: busdaycalendar | None = ..., + out: None = ..., +) -> NDArray[np.bool]: ... +@overload +def is_busday( + dates: ArrayLike | _NestedSequence[dt.date], + weekmask: ArrayLike = ..., + holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ..., + busdaycal: busdaycalendar | None = ..., + out: _ArrayT = ..., +) -> _ArrayT: ... + +@overload +def datetime_as_string( # type: ignore[misc] + arr: datetime64 | dt.date, + unit: L["auto"] | _UnitKind | None = ..., + timezone: L["naive", "UTC", "local"] | dt.tzinfo = ..., + casting: _CastingKind = ..., +) -> str_: ... +@overload +def datetime_as_string( + arr: _ArrayLikeDT64_co | _NestedSequence[dt.date], + unit: L["auto"] | _UnitKind | None = ..., + timezone: L["naive", "UTC", "local"] | dt.tzinfo = ..., + casting: _CastingKind = ..., +) -> NDArray[str_]: ... + +@overload +def compare_chararrays( + a1: _ArrayLikeStr_co, + a2: _ArrayLikeStr_co, + cmp: L["<", "<=", "==", ">=", ">", "!="], + rstrip: bool, +) -> NDArray[np.bool]: ... +@overload +def compare_chararrays( + a1: _ArrayLikeBytes_co, + a2: _ArrayLikeBytes_co, + cmp: L["<", "<=", "==", ">=", ">", "!="], + rstrip: bool, +) -> NDArray[np.bool]: ... + +def add_docstring(obj: Callable[..., Any], docstring: str, /) -> None: ... + +_GetItemKeys: TypeAlias = L[ + "C", "CONTIGUOUS", "C_CONTIGUOUS", + "F", "FORTRAN", "F_CONTIGUOUS", + "W", "WRITEABLE", + "B", "BEHAVED", + "O", "OWNDATA", + "A", "ALIGNED", + "X", "WRITEBACKIFCOPY", + "CA", "CARRAY", + "FA", "FARRAY", + "FNC", + "FORC", +] +_SetItemKeys: TypeAlias = L[ + "A", "ALIGNED", + "W", "WRITEABLE", + "X", "WRITEBACKIFCOPY", +] + +@final +class flagsobj: + __hash__: ClassVar[None] # type: ignore[assignment] + aligned: bool + # NOTE: deprecated + # updateifcopy: bool + writeable: bool + writebackifcopy: bool + @property + def behaved(self) -> bool: ... + @property + def c_contiguous(self) -> bool: ... + @property + def carray(self) -> bool: ... + @property + def contiguous(self) -> bool: ... + @property + def f_contiguous(self) -> bool: ... + @property + def farray(self) -> bool: ... + @property + def fnc(self) -> bool: ... + @property + def forc(self) -> bool: ... + @property + def fortran(self) -> bool: ... + @property + def num(self) -> int: ... + @property + def owndata(self) -> bool: ... + def __getitem__(self, key: _GetItemKeys) -> bool: ... + def __setitem__(self, key: _SetItemKeys, value: bool) -> None: ... + +def nested_iters( + op: ArrayLike | Sequence[ArrayLike], + axes: Sequence[Sequence[SupportsIndex]], + flags: Sequence[_NDIterFlagsKind] | None = ..., + op_flags: Sequence[Sequence[_NDIterFlagsOp]] | None = ..., + op_dtypes: DTypeLike | Sequence[DTypeLike] = ..., + order: _OrderKACF = ..., + casting: _CastingKind = ..., + buffersize: SupportsIndex = ..., +) -> tuple[nditer, ...]: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/numeric.py b/.venv/lib/python3.12/site-packages/numpy/_core/numeric.py new file mode 100644 index 0000000..964447f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/numeric.py @@ -0,0 +1,2760 @@ +import builtins +import functools +import itertools +import math +import numbers +import operator +import sys +import warnings + +import numpy as np +from numpy.exceptions import AxisError + +from . import multiarray, numerictypes, overrides, shape_base, umath +from . import numerictypes as nt +from ._ufunc_config import errstate +from .multiarray import ( # noqa: F401 + ALLOW_THREADS, + BUFSIZE, + CLIP, + MAXDIMS, + MAY_SHARE_BOUNDS, + MAY_SHARE_EXACT, + RAISE, + WRAP, + arange, + array, + asanyarray, + asarray, + ascontiguousarray, + asfortranarray, + broadcast, + can_cast, + concatenate, + copyto, + dot, + dtype, + empty, + empty_like, + flatiter, + from_dlpack, + frombuffer, + fromfile, + fromiter, + fromstring, + inner, + lexsort, + matmul, + may_share_memory, + min_scalar_type, + ndarray, + nditer, + nested_iters, + normalize_axis_index, + promote_types, + putmask, + result_type, + shares_memory, + vdot, + vecdot, + where, + zeros, +) +from .overrides import finalize_array_function_like, set_module +from .umath import NAN, PINF, invert, multiply, sin + +bitwise_not = invert +ufunc = type(sin) +newaxis = None + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy') + + +__all__ = [ + 'newaxis', 'ndarray', 'flatiter', 'nditer', 'nested_iters', 'ufunc', + 'arange', 'array', 'asarray', 'asanyarray', 'ascontiguousarray', + 'asfortranarray', 'zeros', 'count_nonzero', 'empty', 'broadcast', 'dtype', + 'fromstring', 'fromfile', 'frombuffer', 'from_dlpack', 'where', + 'argwhere', 'copyto', 'concatenate', 'lexsort', 'astype', + 'can_cast', 'promote_types', 'min_scalar_type', + 'result_type', 'isfortran', 'empty_like', 'zeros_like', 'ones_like', + 'correlate', 'convolve', 'inner', 'dot', 'outer', 'vdot', 'roll', + 'rollaxis', 'moveaxis', 'cross', 'tensordot', 'little_endian', + 'fromiter', 'array_equal', 'array_equiv', 'indices', 'fromfunction', + 'isclose', 'isscalar', 'binary_repr', 'base_repr', 'ones', + 'identity', 'allclose', 'putmask', + 'flatnonzero', 'inf', 'nan', 'False_', 'True_', 'bitwise_not', + 'full', 'full_like', 'matmul', 'vecdot', 'shares_memory', + 'may_share_memory'] + + +def _zeros_like_dispatcher( + a, dtype=None, order=None, subok=None, shape=None, *, device=None +): + return (a,) + + +@array_function_dispatch(_zeros_like_dispatcher) +def zeros_like( + a, dtype=None, order='K', subok=True, shape=None, *, device=None +): + """ + Return an array of zeros with the same shape and type as a given array. + + Parameters + ---------- + a : array_like + The shape and data-type of `a` define these same attributes of + the returned array. + dtype : data-type, optional + Overrides the data type of the result. + order : {'C', 'F', 'A', or 'K'}, optional + Overrides the memory layout of the result. 'C' means C-order, + 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous, + 'C' otherwise. 'K' means match the layout of `a` as closely + as possible. + subok : bool, optional. + If True, then the newly created array will use the sub-class + type of `a`, otherwise it will be a base-class array. Defaults + to True. + shape : int or sequence of ints, optional. + Overrides the shape of the result. If order='K' and the number of + dimensions is unchanged, will try to keep order, otherwise, + order='C' is implied. + device : str, optional + The device on which to place the created array. Default: None. + For Array-API interoperability only, so must be ``"cpu"`` if passed. + + .. versionadded:: 2.0.0 + + Returns + ------- + out : ndarray + Array of zeros with the same shape and type as `a`. + + See Also + -------- + empty_like : Return an empty array with shape and type of input. + ones_like : Return an array of ones with shape and type of input. + full_like : Return a new array with shape of input filled with value. + zeros : Return a new array setting values to zero. + + Examples + -------- + >>> import numpy as np + >>> x = np.arange(6) + >>> x = x.reshape((2, 3)) + >>> x + array([[0, 1, 2], + [3, 4, 5]]) + >>> np.zeros_like(x) + array([[0, 0, 0], + [0, 0, 0]]) + + >>> y = np.arange(3, dtype=float) + >>> y + array([0., 1., 2.]) + >>> np.zeros_like(y) + array([0., 0., 0.]) + + """ + res = empty_like( + a, dtype=dtype, order=order, subok=subok, shape=shape, device=device + ) + # needed instead of a 0 to get same result as zeros for string dtypes + z = zeros(1, dtype=res.dtype) + multiarray.copyto(res, z, casting='unsafe') + return res + + +@finalize_array_function_like +@set_module('numpy') +def ones(shape, dtype=None, order='C', *, device=None, like=None): + """ + Return a new array of given shape and type, filled with ones. + + Parameters + ---------- + shape : int or sequence of ints + Shape of the new array, e.g., ``(2, 3)`` or ``2``. + dtype : data-type, optional + The desired data-type for the array, e.g., `numpy.int8`. Default is + `numpy.float64`. + order : {'C', 'F'}, optional, default: C + Whether to store multi-dimensional data in row-major + (C-style) or column-major (Fortran-style) order in + memory. + device : str, optional + The device on which to place the created array. Default: None. + For Array-API interoperability only, so must be ``"cpu"`` if passed. + + .. versionadded:: 2.0.0 + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + out : ndarray + Array of ones with the given shape, dtype, and order. + + See Also + -------- + ones_like : Return an array of ones with shape and type of input. + empty : Return a new uninitialized array. + zeros : Return a new array setting values to zero. + full : Return a new array of given shape filled with value. + + Examples + -------- + >>> import numpy as np + >>> np.ones(5) + array([1., 1., 1., 1., 1.]) + + >>> np.ones((5,), dtype=int) + array([1, 1, 1, 1, 1]) + + >>> np.ones((2, 1)) + array([[1.], + [1.]]) + + >>> s = (2,2) + >>> np.ones(s) + array([[1., 1.], + [1., 1.]]) + + """ + if like is not None: + return _ones_with_like( + like, shape, dtype=dtype, order=order, device=device + ) + + a = empty(shape, dtype, order, device=device) + multiarray.copyto(a, 1, casting='unsafe') + return a + + +_ones_with_like = array_function_dispatch()(ones) + + +def _ones_like_dispatcher( + a, dtype=None, order=None, subok=None, shape=None, *, device=None +): + return (a,) + + +@array_function_dispatch(_ones_like_dispatcher) +def ones_like( + a, dtype=None, order='K', subok=True, shape=None, *, device=None +): + """ + Return an array of ones with the same shape and type as a given array. + + Parameters + ---------- + a : array_like + The shape and data-type of `a` define these same attributes of + the returned array. + dtype : data-type, optional + Overrides the data type of the result. + order : {'C', 'F', 'A', or 'K'}, optional + Overrides the memory layout of the result. 'C' means C-order, + 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous, + 'C' otherwise. 'K' means match the layout of `a` as closely + as possible. + subok : bool, optional. + If True, then the newly created array will use the sub-class + type of `a`, otherwise it will be a base-class array. Defaults + to True. + shape : int or sequence of ints, optional. + Overrides the shape of the result. If order='K' and the number of + dimensions is unchanged, will try to keep order, otherwise, + order='C' is implied. + device : str, optional + The device on which to place the created array. Default: None. + For Array-API interoperability only, so must be ``"cpu"`` if passed. + + .. versionadded:: 2.0.0 + + Returns + ------- + out : ndarray + Array of ones with the same shape and type as `a`. + + See Also + -------- + empty_like : Return an empty array with shape and type of input. + zeros_like : Return an array of zeros with shape and type of input. + full_like : Return a new array with shape of input filled with value. + ones : Return a new array setting values to one. + + Examples + -------- + >>> import numpy as np + >>> x = np.arange(6) + >>> x = x.reshape((2, 3)) + >>> x + array([[0, 1, 2], + [3, 4, 5]]) + >>> np.ones_like(x) + array([[1, 1, 1], + [1, 1, 1]]) + + >>> y = np.arange(3, dtype=float) + >>> y + array([0., 1., 2.]) + >>> np.ones_like(y) + array([1., 1., 1.]) + + """ + res = empty_like( + a, dtype=dtype, order=order, subok=subok, shape=shape, device=device + ) + multiarray.copyto(res, 1, casting='unsafe') + return res + + +def _full_dispatcher( + shape, fill_value, dtype=None, order=None, *, device=None, like=None +): + return (like,) + + +@finalize_array_function_like +@set_module('numpy') +def full(shape, fill_value, dtype=None, order='C', *, device=None, like=None): + """ + Return a new array of given shape and type, filled with `fill_value`. + + Parameters + ---------- + shape : int or sequence of ints + Shape of the new array, e.g., ``(2, 3)`` or ``2``. + fill_value : scalar or array_like + Fill value. + dtype : data-type, optional + The desired data-type for the array The default, None, means + ``np.array(fill_value).dtype``. + order : {'C', 'F'}, optional + Whether to store multidimensional data in C- or Fortran-contiguous + (row- or column-wise) order in memory. + device : str, optional + The device on which to place the created array. Default: None. + For Array-API interoperability only, so must be ``"cpu"`` if passed. + + .. versionadded:: 2.0.0 + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + out : ndarray + Array of `fill_value` with the given shape, dtype, and order. + + See Also + -------- + full_like : Return a new array with shape of input filled with value. + empty : Return a new uninitialized array. + ones : Return a new array setting values to one. + zeros : Return a new array setting values to zero. + + Examples + -------- + >>> import numpy as np + >>> np.full((2, 2), np.inf) + array([[inf, inf], + [inf, inf]]) + >>> np.full((2, 2), 10) + array([[10, 10], + [10, 10]]) + + >>> np.full((2, 2), [1, 2]) + array([[1, 2], + [1, 2]]) + + """ + if like is not None: + return _full_with_like( + like, shape, fill_value, dtype=dtype, order=order, device=device + ) + + if dtype is None: + fill_value = asarray(fill_value) + dtype = fill_value.dtype + a = empty(shape, dtype, order, device=device) + multiarray.copyto(a, fill_value, casting='unsafe') + return a + + +_full_with_like = array_function_dispatch()(full) + + +def _full_like_dispatcher( + a, fill_value, dtype=None, order=None, subok=None, shape=None, + *, device=None +): + return (a,) + + +@array_function_dispatch(_full_like_dispatcher) +def full_like( + a, fill_value, dtype=None, order='K', subok=True, shape=None, + *, device=None +): + """ + Return a full array with the same shape and type as a given array. + + Parameters + ---------- + a : array_like + The shape and data-type of `a` define these same attributes of + the returned array. + fill_value : array_like + Fill value. + dtype : data-type, optional + Overrides the data type of the result. + order : {'C', 'F', 'A', or 'K'}, optional + Overrides the memory layout of the result. 'C' means C-order, + 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous, + 'C' otherwise. 'K' means match the layout of `a` as closely + as possible. + subok : bool, optional. + If True, then the newly created array will use the sub-class + type of `a`, otherwise it will be a base-class array. Defaults + to True. + shape : int or sequence of ints, optional. + Overrides the shape of the result. If order='K' and the number of + dimensions is unchanged, will try to keep order, otherwise, + order='C' is implied. + device : str, optional + The device on which to place the created array. Default: None. + For Array-API interoperability only, so must be ``"cpu"`` if passed. + + .. versionadded:: 2.0.0 + + Returns + ------- + out : ndarray + Array of `fill_value` with the same shape and type as `a`. + + See Also + -------- + empty_like : Return an empty array with shape and type of input. + ones_like : Return an array of ones with shape and type of input. + zeros_like : Return an array of zeros with shape and type of input. + full : Return a new array of given shape filled with value. + + Examples + -------- + >>> import numpy as np + >>> x = np.arange(6, dtype=int) + >>> np.full_like(x, 1) + array([1, 1, 1, 1, 1, 1]) + >>> np.full_like(x, 0.1) + array([0, 0, 0, 0, 0, 0]) + >>> np.full_like(x, 0.1, dtype=np.double) + array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1]) + >>> np.full_like(x, np.nan, dtype=np.double) + array([nan, nan, nan, nan, nan, nan]) + + >>> y = np.arange(6, dtype=np.double) + >>> np.full_like(y, 0.1) + array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1]) + + >>> y = np.zeros([2, 2, 3], dtype=int) + >>> np.full_like(y, [0, 0, 255]) + array([[[ 0, 0, 255], + [ 0, 0, 255]], + [[ 0, 0, 255], + [ 0, 0, 255]]]) + """ + res = empty_like( + a, dtype=dtype, order=order, subok=subok, shape=shape, device=device + ) + multiarray.copyto(res, fill_value, casting='unsafe') + return res + + +def _count_nonzero_dispatcher(a, axis=None, *, keepdims=None): + return (a,) + + +@array_function_dispatch(_count_nonzero_dispatcher) +def count_nonzero(a, axis=None, *, keepdims=False): + """ + Counts the number of non-zero values in the array ``a``. + + The word "non-zero" is in reference to the Python 2.x + built-in method ``__nonzero__()`` (renamed ``__bool__()`` + in Python 3.x) of Python objects that tests an object's + "truthfulness". For example, any number is considered + truthful if it is nonzero, whereas any string is considered + truthful if it is not the empty string. Thus, this function + (recursively) counts how many elements in ``a`` (and in + sub-arrays thereof) have their ``__nonzero__()`` or ``__bool__()`` + method evaluated to ``True``. + + Parameters + ---------- + a : array_like + The array for which to count non-zeros. + axis : int or tuple, optional + Axis or tuple of axes along which to count non-zeros. + Default is None, meaning that non-zeros will be counted + along a flattened version of ``a``. + keepdims : bool, optional + If this is set to True, the axes that are counted are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the input array. + + Returns + ------- + count : int or array of int + Number of non-zero values in the array along a given axis. + Otherwise, the total number of non-zero values in the array + is returned. + + See Also + -------- + nonzero : Return the coordinates of all the non-zero values. + + Examples + -------- + >>> import numpy as np + >>> np.count_nonzero(np.eye(4)) + 4 + >>> a = np.array([[0, 1, 7, 0], + ... [3, 0, 2, 19]]) + >>> np.count_nonzero(a) + 5 + >>> np.count_nonzero(a, axis=0) + array([1, 1, 2, 1]) + >>> np.count_nonzero(a, axis=1) + array([2, 3]) + >>> np.count_nonzero(a, axis=1, keepdims=True) + array([[2], + [3]]) + """ + if axis is None and not keepdims: + return multiarray.count_nonzero(a) + + a = asanyarray(a) + + # TODO: this works around .astype(bool) not working properly (gh-9847) + if np.issubdtype(a.dtype, np.character): + a_bool = a != a.dtype.type() + else: + a_bool = a.astype(np.bool, copy=False) + + return a_bool.sum(axis=axis, dtype=np.intp, keepdims=keepdims) + + +@set_module('numpy') +def isfortran(a): + """ + Check if the array is Fortran contiguous but *not* C contiguous. + + This function is obsolete. If you only want to check if an array is Fortran + contiguous use ``a.flags.f_contiguous`` instead. + + Parameters + ---------- + a : ndarray + Input array. + + Returns + ------- + isfortran : bool + Returns True if the array is Fortran contiguous but *not* C contiguous. + + + Examples + -------- + + np.array allows to specify whether the array is written in C-contiguous + order (last index varies the fastest), or FORTRAN-contiguous order in + memory (first index varies the fastest). + + >>> import numpy as np + >>> a = np.array([[1, 2, 3], [4, 5, 6]], order='C') + >>> a + array([[1, 2, 3], + [4, 5, 6]]) + >>> np.isfortran(a) + False + + >>> b = np.array([[1, 2, 3], [4, 5, 6]], order='F') + >>> b + array([[1, 2, 3], + [4, 5, 6]]) + >>> np.isfortran(b) + True + + + The transpose of a C-ordered array is a FORTRAN-ordered array. + + >>> a = np.array([[1, 2, 3], [4, 5, 6]], order='C') + >>> a + array([[1, 2, 3], + [4, 5, 6]]) + >>> np.isfortran(a) + False + >>> b = a.T + >>> b + array([[1, 4], + [2, 5], + [3, 6]]) + >>> np.isfortran(b) + True + + C-ordered arrays evaluate as False even if they are also FORTRAN-ordered. + + >>> np.isfortran(np.array([1, 2], order='F')) + False + + """ + return a.flags.fnc + + +def _argwhere_dispatcher(a): + return (a,) + + +@array_function_dispatch(_argwhere_dispatcher) +def argwhere(a): + """ + Find the indices of array elements that are non-zero, grouped by element. + + Parameters + ---------- + a : array_like + Input data. + + Returns + ------- + index_array : (N, a.ndim) ndarray + Indices of elements that are non-zero. Indices are grouped by element. + This array will have shape ``(N, a.ndim)`` where ``N`` is the number of + non-zero items. + + See Also + -------- + where, nonzero + + Notes + ----- + ``np.argwhere(a)`` is almost the same as ``np.transpose(np.nonzero(a))``, + but produces a result of the correct shape for a 0D array. + + The output of ``argwhere`` is not suitable for indexing arrays. + For this purpose use ``nonzero(a)`` instead. + + Examples + -------- + >>> import numpy as np + >>> x = np.arange(6).reshape(2,3) + >>> x + array([[0, 1, 2], + [3, 4, 5]]) + >>> np.argwhere(x>1) + array([[0, 2], + [1, 0], + [1, 1], + [1, 2]]) + + """ + # nonzero does not behave well on 0d, so promote to 1d + if np.ndim(a) == 0: + a = shape_base.atleast_1d(a) + # then remove the added dimension + return argwhere(a)[:, :0] + return transpose(nonzero(a)) + + +def _flatnonzero_dispatcher(a): + return (a,) + + +@array_function_dispatch(_flatnonzero_dispatcher) +def flatnonzero(a): + """ + Return indices that are non-zero in the flattened version of a. + + This is equivalent to ``np.nonzero(np.ravel(a))[0]``. + + Parameters + ---------- + a : array_like + Input data. + + Returns + ------- + res : ndarray + Output array, containing the indices of the elements of ``a.ravel()`` + that are non-zero. + + See Also + -------- + nonzero : Return the indices of the non-zero elements of the input array. + ravel : Return a 1-D array containing the elements of the input array. + + Examples + -------- + >>> import numpy as np + >>> x = np.arange(-2, 3) + >>> x + array([-2, -1, 0, 1, 2]) + >>> np.flatnonzero(x) + array([0, 1, 3, 4]) + + Use the indices of the non-zero elements as an index array to extract + these elements: + + >>> x.ravel()[np.flatnonzero(x)] + array([-2, -1, 1, 2]) + + """ + return np.nonzero(np.ravel(a))[0] + + +def _correlate_dispatcher(a, v, mode=None): + return (a, v) + + +@array_function_dispatch(_correlate_dispatcher) +def correlate(a, v, mode='valid'): + r""" + Cross-correlation of two 1-dimensional sequences. + + This function computes the correlation as generally defined in signal + processing texts [1]_: + + .. math:: c_k = \sum_n a_{n+k} \cdot \overline{v}_n + + with a and v sequences being zero-padded where necessary and + :math:`\overline v` denoting complex conjugation. + + Parameters + ---------- + a, v : array_like + Input sequences. + mode : {'valid', 'same', 'full'}, optional + Refer to the `convolve` docstring. Note that the default + is 'valid', unlike `convolve`, which uses 'full'. + + Returns + ------- + out : ndarray + Discrete cross-correlation of `a` and `v`. + + See Also + -------- + convolve : Discrete, linear convolution of two one-dimensional sequences. + scipy.signal.correlate : uses FFT which has superior performance + on large arrays. + + Notes + ----- + The definition of correlation above is not unique and sometimes + correlation may be defined differently. Another common definition is [1]_: + + .. math:: c'_k = \sum_n a_{n} \cdot \overline{v_{n+k}} + + which is related to :math:`c_k` by :math:`c'_k = c_{-k}`. + + `numpy.correlate` may perform slowly in large arrays (i.e. n = 1e5) + because it does not use the FFT to compute the convolution; in that case, + `scipy.signal.correlate` might be preferable. + + References + ---------- + .. [1] Wikipedia, "Cross-correlation", + https://en.wikipedia.org/wiki/Cross-correlation + + Examples + -------- + >>> import numpy as np + >>> np.correlate([1, 2, 3], [0, 1, 0.5]) + array([3.5]) + >>> np.correlate([1, 2, 3], [0, 1, 0.5], "same") + array([2. , 3.5, 3. ]) + >>> np.correlate([1, 2, 3], [0, 1, 0.5], "full") + array([0.5, 2. , 3.5, 3. , 0. ]) + + Using complex sequences: + + >>> np.correlate([1+1j, 2, 3-1j], [0, 1, 0.5j], 'full') + array([ 0.5-0.5j, 1.0+0.j , 1.5-1.5j, 3.0-1.j , 0.0+0.j ]) + + Note that you get the time reversed, complex conjugated result + (:math:`\overline{c_{-k}}`) when the two input sequences a and v change + places: + + >>> np.correlate([0, 1, 0.5j], [1+1j, 2, 3-1j], 'full') + array([ 0.0+0.j , 3.0+1.j , 1.5+1.5j, 1.0+0.j , 0.5+0.5j]) + + """ + return multiarray.correlate2(a, v, mode) + + +def _convolve_dispatcher(a, v, mode=None): + return (a, v) + + +@array_function_dispatch(_convolve_dispatcher) +def convolve(a, v, mode='full'): + """ + Returns the discrete, linear convolution of two one-dimensional sequences. + + The convolution operator is often seen in signal processing, where it + models the effect of a linear time-invariant system on a signal [1]_. In + probability theory, the sum of two independent random variables is + distributed according to the convolution of their individual + distributions. + + If `v` is longer than `a`, the arrays are swapped before computation. + + Parameters + ---------- + a : (N,) array_like + First one-dimensional input array. + v : (M,) array_like + Second one-dimensional input array. + mode : {'full', 'valid', 'same'}, optional + 'full': + By default, mode is 'full'. This returns the convolution + at each point of overlap, with an output shape of (N+M-1,). At + the end-points of the convolution, the signals do not overlap + completely, and boundary effects may be seen. + + 'same': + Mode 'same' returns output of length ``max(M, N)``. Boundary + effects are still visible. + + 'valid': + Mode 'valid' returns output of length + ``max(M, N) - min(M, N) + 1``. The convolution product is only given + for points where the signals overlap completely. Values outside + the signal boundary have no effect. + + Returns + ------- + out : ndarray + Discrete, linear convolution of `a` and `v`. + + See Also + -------- + scipy.signal.fftconvolve : Convolve two arrays using the Fast Fourier + Transform. + scipy.linalg.toeplitz : Used to construct the convolution operator. + polymul : Polynomial multiplication. Same output as convolve, but also + accepts poly1d objects as input. + + Notes + ----- + The discrete convolution operation is defined as + + .. math:: (a * v)_n = \\sum_{m = -\\infty}^{\\infty} a_m v_{n - m} + + It can be shown that a convolution :math:`x(t) * y(t)` in time/space + is equivalent to the multiplication :math:`X(f) Y(f)` in the Fourier + domain, after appropriate padding (padding is necessary to prevent + circular convolution). Since multiplication is more efficient (faster) + than convolution, the function `scipy.signal.fftconvolve` exploits the + FFT to calculate the convolution of large data-sets. + + References + ---------- + .. [1] Wikipedia, "Convolution", + https://en.wikipedia.org/wiki/Convolution + + Examples + -------- + Note how the convolution operator flips the second array + before "sliding" the two across one another: + + >>> import numpy as np + >>> np.convolve([1, 2, 3], [0, 1, 0.5]) + array([0. , 1. , 2.5, 4. , 1.5]) + + Only return the middle values of the convolution. + Contains boundary effects, where zeros are taken + into account: + + >>> np.convolve([1,2,3],[0,1,0.5], 'same') + array([1. , 2.5, 4. ]) + + The two arrays are of the same length, so there + is only one position where they completely overlap: + + >>> np.convolve([1,2,3],[0,1,0.5], 'valid') + array([2.5]) + + """ + a, v = array(a, copy=None, ndmin=1), array(v, copy=None, ndmin=1) + if (len(v) > len(a)): + a, v = v, a + if len(a) == 0: + raise ValueError('a cannot be empty') + if len(v) == 0: + raise ValueError('v cannot be empty') + return multiarray.correlate(a, v[::-1], mode) + + +def _outer_dispatcher(a, b, out=None): + return (a, b, out) + + +@array_function_dispatch(_outer_dispatcher) +def outer(a, b, out=None): + """ + Compute the outer product of two vectors. + + Given two vectors `a` and `b` of length ``M`` and ``N``, respectively, + the outer product [1]_ is:: + + [[a_0*b_0 a_0*b_1 ... a_0*b_{N-1} ] + [a_1*b_0 . + [ ... . + [a_{M-1}*b_0 a_{M-1}*b_{N-1} ]] + + Parameters + ---------- + a : (M,) array_like + First input vector. Input is flattened if + not already 1-dimensional. + b : (N,) array_like + Second input vector. Input is flattened if + not already 1-dimensional. + out : (M, N) ndarray, optional + A location where the result is stored + + Returns + ------- + out : (M, N) ndarray + ``out[i, j] = a[i] * b[j]`` + + See also + -------- + inner + einsum : ``einsum('i,j->ij', a.ravel(), b.ravel())`` is the equivalent. + ufunc.outer : A generalization to dimensions other than 1D and other + operations. ``np.multiply.outer(a.ravel(), b.ravel())`` + is the equivalent. + linalg.outer : An Array API compatible variation of ``np.outer``, + which accepts 1-dimensional inputs only. + tensordot : ``np.tensordot(a.ravel(), b.ravel(), axes=((), ()))`` + is the equivalent. + + References + ---------- + .. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*, 3rd + ed., Baltimore, MD, Johns Hopkins University Press, 1996, + pg. 8. + + Examples + -------- + Make a (*very* coarse) grid for computing a Mandelbrot set: + + >>> import numpy as np + >>> rl = np.outer(np.ones((5,)), np.linspace(-2, 2, 5)) + >>> rl + array([[-2., -1., 0., 1., 2.], + [-2., -1., 0., 1., 2.], + [-2., -1., 0., 1., 2.], + [-2., -1., 0., 1., 2.], + [-2., -1., 0., 1., 2.]]) + >>> im = np.outer(1j*np.linspace(2, -2, 5), np.ones((5,))) + >>> im + array([[0.+2.j, 0.+2.j, 0.+2.j, 0.+2.j, 0.+2.j], + [0.+1.j, 0.+1.j, 0.+1.j, 0.+1.j, 0.+1.j], + [0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], + [0.-1.j, 0.-1.j, 0.-1.j, 0.-1.j, 0.-1.j], + [0.-2.j, 0.-2.j, 0.-2.j, 0.-2.j, 0.-2.j]]) + >>> grid = rl + im + >>> grid + array([[-2.+2.j, -1.+2.j, 0.+2.j, 1.+2.j, 2.+2.j], + [-2.+1.j, -1.+1.j, 0.+1.j, 1.+1.j, 2.+1.j], + [-2.+0.j, -1.+0.j, 0.+0.j, 1.+0.j, 2.+0.j], + [-2.-1.j, -1.-1.j, 0.-1.j, 1.-1.j, 2.-1.j], + [-2.-2.j, -1.-2.j, 0.-2.j, 1.-2.j, 2.-2.j]]) + + An example using a "vector" of letters: + + >>> x = np.array(['a', 'b', 'c'], dtype=object) + >>> np.outer(x, [1, 2, 3]) + array([['a', 'aa', 'aaa'], + ['b', 'bb', 'bbb'], + ['c', 'cc', 'ccc']], dtype=object) + + """ + a = asarray(a) + b = asarray(b) + return multiply(a.ravel()[:, newaxis], b.ravel()[newaxis, :], out) + + +def _tensordot_dispatcher(a, b, axes=None): + return (a, b) + + +@array_function_dispatch(_tensordot_dispatcher) +def tensordot(a, b, axes=2): + """ + Compute tensor dot product along specified axes. + + Given two tensors, `a` and `b`, and an array_like object containing + two array_like objects, ``(a_axes, b_axes)``, sum the products of + `a`'s and `b`'s elements (components) over the axes specified by + ``a_axes`` and ``b_axes``. The third argument can be a single non-negative + integer_like scalar, ``N``; if it is such, then the last ``N`` dimensions + of `a` and the first ``N`` dimensions of `b` are summed over. + + Parameters + ---------- + a, b : array_like + Tensors to "dot". + + axes : int or (2,) array_like + * integer_like + If an int N, sum over the last N axes of `a` and the first N axes + of `b` in order. The sizes of the corresponding axes must match. + * (2,) array_like + Or, a list of axes to be summed over, first sequence applying to `a`, + second to `b`. Both elements array_like must be of the same length. + + Returns + ------- + output : ndarray + The tensor dot product of the input. + + See Also + -------- + dot, einsum + + Notes + ----- + Three common use cases are: + * ``axes = 0`` : tensor product :math:`a\\otimes b` + * ``axes = 1`` : tensor dot product :math:`a\\cdot b` + * ``axes = 2`` : (default) tensor double contraction :math:`a:b` + + When `axes` is integer_like, the sequence of axes for evaluation + will be: from the -Nth axis to the -1th axis in `a`, + and from the 0th axis to (N-1)th axis in `b`. + For example, ``axes = 2`` is the equal to + ``axes = [[-2, -1], [0, 1]]``. + When N-1 is smaller than 0, or when -N is larger than -1, + the element of `a` and `b` are defined as the `axes`. + + When there is more than one axis to sum over - and they are not the last + (first) axes of `a` (`b`) - the argument `axes` should consist of + two sequences of the same length, with the first axis to sum over given + first in both sequences, the second axis second, and so forth. + The calculation can be referred to ``numpy.einsum``. + + The shape of the result consists of the non-contracted axes of the + first tensor, followed by the non-contracted axes of the second. + + Examples + -------- + An example on integer_like: + + >>> a_0 = np.array([[1, 2], [3, 4]]) + >>> b_0 = np.array([[5, 6], [7, 8]]) + >>> c_0 = np.tensordot(a_0, b_0, axes=0) + >>> c_0.shape + (2, 2, 2, 2) + >>> c_0 + array([[[[ 5, 6], + [ 7, 8]], + [[10, 12], + [14, 16]]], + [[[15, 18], + [21, 24]], + [[20, 24], + [28, 32]]]]) + + An example on array_like: + + >>> a = np.arange(60.).reshape(3,4,5) + >>> b = np.arange(24.).reshape(4,3,2) + >>> c = np.tensordot(a,b, axes=([1,0],[0,1])) + >>> c.shape + (5, 2) + >>> c + array([[4400., 4730.], + [4532., 4874.], + [4664., 5018.], + [4796., 5162.], + [4928., 5306.]]) + + A slower but equivalent way of computing the same... + + >>> d = np.zeros((5,2)) + >>> for i in range(5): + ... for j in range(2): + ... for k in range(3): + ... for n in range(4): + ... d[i,j] += a[k,n,i] * b[n,k,j] + >>> c == d + array([[ True, True], + [ True, True], + [ True, True], + [ True, True], + [ True, True]]) + + An extended example taking advantage of the overloading of + and \\*: + + >>> a = np.array(range(1, 9)) + >>> a.shape = (2, 2, 2) + >>> A = np.array(('a', 'b', 'c', 'd'), dtype=object) + >>> A.shape = (2, 2) + >>> a; A + array([[[1, 2], + [3, 4]], + [[5, 6], + [7, 8]]]) + array([['a', 'b'], + ['c', 'd']], dtype=object) + + >>> np.tensordot(a, A) # third argument default is 2 for double-contraction + array(['abbcccdddd', 'aaaaabbbbbbcccccccdddddddd'], dtype=object) + + >>> np.tensordot(a, A, 1) + array([[['acc', 'bdd'], + ['aaacccc', 'bbbdddd']], + [['aaaaacccccc', 'bbbbbdddddd'], + ['aaaaaaacccccccc', 'bbbbbbbdddddddd']]], dtype=object) + + >>> np.tensordot(a, A, 0) # tensor product (result too long to incl.) + array([[[[['a', 'b'], + ['c', 'd']], + ... + + >>> np.tensordot(a, A, (0, 1)) + array([[['abbbbb', 'cddddd'], + ['aabbbbbb', 'ccdddddd']], + [['aaabbbbbbb', 'cccddddddd'], + ['aaaabbbbbbbb', 'ccccdddddddd']]], dtype=object) + + >>> np.tensordot(a, A, (2, 1)) + array([[['abb', 'cdd'], + ['aaabbbb', 'cccdddd']], + [['aaaaabbbbbb', 'cccccdddddd'], + ['aaaaaaabbbbbbbb', 'cccccccdddddddd']]], dtype=object) + + >>> np.tensordot(a, A, ((0, 1), (0, 1))) + array(['abbbcccccddddddd', 'aabbbbccccccdddddddd'], dtype=object) + + >>> np.tensordot(a, A, ((2, 1), (1, 0))) + array(['acccbbdddd', 'aaaaacccccccbbbbbbdddddddd'], dtype=object) + + """ + try: + iter(axes) + except Exception: + axes_a = list(range(-axes, 0)) + axes_b = list(range(axes)) + else: + axes_a, axes_b = axes + try: + na = len(axes_a) + axes_a = list(axes_a) + except TypeError: + axes_a = [axes_a] + na = 1 + try: + nb = len(axes_b) + axes_b = list(axes_b) + except TypeError: + axes_b = [axes_b] + nb = 1 + + a, b = asarray(a), asarray(b) + as_ = a.shape + nda = a.ndim + bs = b.shape + ndb = b.ndim + equal = True + if na != nb: + equal = False + else: + for k in range(na): + if as_[axes_a[k]] != bs[axes_b[k]]: + equal = False + break + if axes_a[k] < 0: + axes_a[k] += nda + if axes_b[k] < 0: + axes_b[k] += ndb + if not equal: + raise ValueError("shape-mismatch for sum") + + # Move the axes to sum over to the end of "a" + # and to the front of "b" + notin = [k for k in range(nda) if k not in axes_a] + newaxes_a = notin + axes_a + N2 = math.prod(as_[axis] for axis in axes_a) + newshape_a = (math.prod(as_[ax] for ax in notin), N2) + olda = [as_[axis] for axis in notin] + + notin = [k for k in range(ndb) if k not in axes_b] + newaxes_b = axes_b + notin + N2 = math.prod(bs[axis] for axis in axes_b) + newshape_b = (N2, math.prod(bs[ax] for ax in notin)) + oldb = [bs[axis] for axis in notin] + + at = a.transpose(newaxes_a).reshape(newshape_a) + bt = b.transpose(newaxes_b).reshape(newshape_b) + res = dot(at, bt) + return res.reshape(olda + oldb) + + +def _roll_dispatcher(a, shift, axis=None): + return (a,) + + +@array_function_dispatch(_roll_dispatcher) +def roll(a, shift, axis=None): + """ + Roll array elements along a given axis. + + Elements that roll beyond the last position are re-introduced at + the first. + + Parameters + ---------- + a : array_like + Input array. + shift : int or tuple of ints + The number of places by which elements are shifted. If a tuple, + then `axis` must be a tuple of the same size, and each of the + given axes is shifted by the corresponding number. If an int + while `axis` is a tuple of ints, then the same value is used for + all given axes. + axis : int or tuple of ints, optional + Axis or axes along which elements are shifted. By default, the + array is flattened before shifting, after which the original + shape is restored. + + Returns + ------- + res : ndarray + Output array, with the same shape as `a`. + + See Also + -------- + rollaxis : Roll the specified axis backwards, until it lies in a + given position. + + Notes + ----- + Supports rolling over multiple dimensions simultaneously. + + Examples + -------- + >>> import numpy as np + >>> x = np.arange(10) + >>> np.roll(x, 2) + array([8, 9, 0, 1, 2, 3, 4, 5, 6, 7]) + >>> np.roll(x, -2) + array([2, 3, 4, 5, 6, 7, 8, 9, 0, 1]) + + >>> x2 = np.reshape(x, (2, 5)) + >>> x2 + array([[0, 1, 2, 3, 4], + [5, 6, 7, 8, 9]]) + >>> np.roll(x2, 1) + array([[9, 0, 1, 2, 3], + [4, 5, 6, 7, 8]]) + >>> np.roll(x2, -1) + array([[1, 2, 3, 4, 5], + [6, 7, 8, 9, 0]]) + >>> np.roll(x2, 1, axis=0) + array([[5, 6, 7, 8, 9], + [0, 1, 2, 3, 4]]) + >>> np.roll(x2, -1, axis=0) + array([[5, 6, 7, 8, 9], + [0, 1, 2, 3, 4]]) + >>> np.roll(x2, 1, axis=1) + array([[4, 0, 1, 2, 3], + [9, 5, 6, 7, 8]]) + >>> np.roll(x2, -1, axis=1) + array([[1, 2, 3, 4, 0], + [6, 7, 8, 9, 5]]) + >>> np.roll(x2, (1, 1), axis=(1, 0)) + array([[9, 5, 6, 7, 8], + [4, 0, 1, 2, 3]]) + >>> np.roll(x2, (2, 1), axis=(1, 0)) + array([[8, 9, 5, 6, 7], + [3, 4, 0, 1, 2]]) + + """ + a = asanyarray(a) + if axis is None: + return roll(a.ravel(), shift, 0).reshape(a.shape) + + else: + axis = normalize_axis_tuple(axis, a.ndim, allow_duplicate=True) + broadcasted = broadcast(shift, axis) + if broadcasted.ndim > 1: + raise ValueError( + "'shift' and 'axis' should be scalars or 1D sequences") + shifts = dict.fromkeys(range(a.ndim), 0) + for sh, ax in broadcasted: + shifts[ax] += int(sh) + + rolls = [((slice(None), slice(None)),)] * a.ndim + for ax, offset in shifts.items(): + offset %= a.shape[ax] or 1 # If `a` is empty, nothing matters. + if offset: + # (original, result), (original, result) + rolls[ax] = ((slice(None, -offset), slice(offset, None)), + (slice(-offset, None), slice(None, offset))) + + result = empty_like(a) + for indices in itertools.product(*rolls): + arr_index, res_index = zip(*indices) + result[res_index] = a[arr_index] + + return result + + +def _rollaxis_dispatcher(a, axis, start=None): + return (a,) + + +@array_function_dispatch(_rollaxis_dispatcher) +def rollaxis(a, axis, start=0): + """ + Roll the specified axis backwards, until it lies in a given position. + + This function continues to be supported for backward compatibility, but you + should prefer `moveaxis`. The `moveaxis` function was added in NumPy + 1.11. + + Parameters + ---------- + a : ndarray + Input array. + axis : int + The axis to be rolled. The positions of the other axes do not + change relative to one another. + start : int, optional + When ``start <= axis``, the axis is rolled back until it lies in + this position. When ``start > axis``, the axis is rolled until it + lies before this position. The default, 0, results in a "complete" + roll. The following table describes how negative values of ``start`` + are interpreted: + + .. table:: + :align: left + + +-------------------+----------------------+ + | ``start`` | Normalized ``start`` | + +===================+======================+ + | ``-(arr.ndim+1)`` | raise ``AxisError`` | + +-------------------+----------------------+ + | ``-arr.ndim`` | 0 | + +-------------------+----------------------+ + | |vdots| | |vdots| | + +-------------------+----------------------+ + | ``-1`` | ``arr.ndim-1`` | + +-------------------+----------------------+ + | ``0`` | ``0`` | + +-------------------+----------------------+ + | |vdots| | |vdots| | + +-------------------+----------------------+ + | ``arr.ndim`` | ``arr.ndim`` | + +-------------------+----------------------+ + | ``arr.ndim + 1`` | raise ``AxisError`` | + +-------------------+----------------------+ + + .. |vdots| unicode:: U+22EE .. Vertical Ellipsis + + Returns + ------- + res : ndarray + For NumPy >= 1.10.0 a view of `a` is always returned. For earlier + NumPy versions a view of `a` is returned only if the order of the + axes is changed, otherwise the input array is returned. + + See Also + -------- + moveaxis : Move array axes to new positions. + roll : Roll the elements of an array by a number of positions along a + given axis. + + Examples + -------- + >>> import numpy as np + >>> a = np.ones((3,4,5,6)) + >>> np.rollaxis(a, 3, 1).shape + (3, 6, 4, 5) + >>> np.rollaxis(a, 2).shape + (5, 3, 4, 6) + >>> np.rollaxis(a, 1, 4).shape + (3, 5, 6, 4) + + """ + n = a.ndim + axis = normalize_axis_index(axis, n) + if start < 0: + start += n + msg = "'%s' arg requires %d <= %s < %d, but %d was passed in" + if not (0 <= start < n + 1): + raise AxisError(msg % ('start', -n, 'start', n + 1, start)) + if axis < start: + # it's been removed + start -= 1 + if axis == start: + return a[...] + axes = list(range(n)) + axes.remove(axis) + axes.insert(start, axis) + return a.transpose(axes) + + +@set_module("numpy.lib.array_utils") +def normalize_axis_tuple(axis, ndim, argname=None, allow_duplicate=False): + """ + Normalizes an axis argument into a tuple of non-negative integer axes. + + This handles shorthands such as ``1`` and converts them to ``(1,)``, + as well as performing the handling of negative indices covered by + `normalize_axis_index`. + + By default, this forbids axes from being specified multiple times. + + Used internally by multi-axis-checking logic. + + Parameters + ---------- + axis : int, iterable of int + The un-normalized index or indices of the axis. + ndim : int + The number of dimensions of the array that `axis` should be normalized + against. + argname : str, optional + A prefix to put before the error message, typically the name of the + argument. + allow_duplicate : bool, optional + If False, the default, disallow an axis from being specified twice. + + Returns + ------- + normalized_axes : tuple of int + The normalized axis index, such that `0 <= normalized_axis < ndim` + + Raises + ------ + AxisError + If any axis provided is out of range + ValueError + If an axis is repeated + + See also + -------- + normalize_axis_index : normalizing a single scalar axis + """ + # Optimization to speed-up the most common cases. + if not isinstance(axis, (tuple, list)): + try: + axis = [operator.index(axis)] + except TypeError: + pass + # Going via an iterator directly is slower than via list comprehension. + axis = tuple(normalize_axis_index(ax, ndim, argname) for ax in axis) + if not allow_duplicate and len(set(axis)) != len(axis): + if argname: + raise ValueError(f'repeated axis in `{argname}` argument') + else: + raise ValueError('repeated axis') + return axis + + +def _moveaxis_dispatcher(a, source, destination): + return (a,) + + +@array_function_dispatch(_moveaxis_dispatcher) +def moveaxis(a, source, destination): + """ + Move axes of an array to new positions. + + Other axes remain in their original order. + + Parameters + ---------- + a : np.ndarray + The array whose axes should be reordered. + source : int or sequence of int + Original positions of the axes to move. These must be unique. + destination : int or sequence of int + Destination positions for each of the original axes. These must also be + unique. + + Returns + ------- + result : np.ndarray + Array with moved axes. This array is a view of the input array. + + See Also + -------- + transpose : Permute the dimensions of an array. + swapaxes : Interchange two axes of an array. + + Examples + -------- + >>> import numpy as np + >>> x = np.zeros((3, 4, 5)) + >>> np.moveaxis(x, 0, -1).shape + (4, 5, 3) + >>> np.moveaxis(x, -1, 0).shape + (5, 3, 4) + + These all achieve the same result: + + >>> np.transpose(x).shape + (5, 4, 3) + >>> np.swapaxes(x, 0, -1).shape + (5, 4, 3) + >>> np.moveaxis(x, [0, 1], [-1, -2]).shape + (5, 4, 3) + >>> np.moveaxis(x, [0, 1, 2], [-1, -2, -3]).shape + (5, 4, 3) + + """ + try: + # allow duck-array types if they define transpose + transpose = a.transpose + except AttributeError: + a = asarray(a) + transpose = a.transpose + + source = normalize_axis_tuple(source, a.ndim, 'source') + destination = normalize_axis_tuple(destination, a.ndim, 'destination') + if len(source) != len(destination): + raise ValueError('`source` and `destination` arguments must have ' + 'the same number of elements') + + order = [n for n in range(a.ndim) if n not in source] + + for dest, src in sorted(zip(destination, source)): + order.insert(dest, src) + + result = transpose(order) + return result + + +def _cross_dispatcher(a, b, axisa=None, axisb=None, axisc=None, axis=None): + return (a, b) + + +@array_function_dispatch(_cross_dispatcher) +def cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None): + """ + Return the cross product of two (arrays of) vectors. + + The cross product of `a` and `b` in :math:`R^3` is a vector perpendicular + to both `a` and `b`. If `a` and `b` are arrays of vectors, the vectors + are defined by the last axis of `a` and `b` by default, and these axes + can have dimensions 2 or 3. Where the dimension of either `a` or `b` is + 2, the third component of the input vector is assumed to be zero and the + cross product calculated accordingly. In cases where both input vectors + have dimension 2, the z-component of the cross product is returned. + + Parameters + ---------- + a : array_like + Components of the first vector(s). + b : array_like + Components of the second vector(s). + axisa : int, optional + Axis of `a` that defines the vector(s). By default, the last axis. + axisb : int, optional + Axis of `b` that defines the vector(s). By default, the last axis. + axisc : int, optional + Axis of `c` containing the cross product vector(s). Ignored if + both input vectors have dimension 2, as the return is scalar. + By default, the last axis. + axis : int, optional + If defined, the axis of `a`, `b` and `c` that defines the vector(s) + and cross product(s). Overrides `axisa`, `axisb` and `axisc`. + + Returns + ------- + c : ndarray + Vector cross product(s). + + Raises + ------ + ValueError + When the dimension of the vector(s) in `a` and/or `b` does not + equal 2 or 3. + + See Also + -------- + inner : Inner product + outer : Outer product. + linalg.cross : An Array API compatible variation of ``np.cross``, + which accepts (arrays of) 3-element vectors only. + ix_ : Construct index arrays. + + Notes + ----- + Supports full broadcasting of the inputs. + + Dimension-2 input arrays were deprecated in 2.0.0. If you do need this + functionality, you can use:: + + def cross2d(x, y): + return x[..., 0] * y[..., 1] - x[..., 1] * y[..., 0] + + Examples + -------- + Vector cross-product. + + >>> import numpy as np + >>> x = [1, 2, 3] + >>> y = [4, 5, 6] + >>> np.cross(x, y) + array([-3, 6, -3]) + + One vector with dimension 2. + + >>> x = [1, 2] + >>> y = [4, 5, 6] + >>> np.cross(x, y) + array([12, -6, -3]) + + Equivalently: + + >>> x = [1, 2, 0] + >>> y = [4, 5, 6] + >>> np.cross(x, y) + array([12, -6, -3]) + + Both vectors with dimension 2. + + >>> x = [1,2] + >>> y = [4,5] + >>> np.cross(x, y) + array(-3) + + Multiple vector cross-products. Note that the direction of the cross + product vector is defined by the *right-hand rule*. + + >>> x = np.array([[1,2,3], [4,5,6]]) + >>> y = np.array([[4,5,6], [1,2,3]]) + >>> np.cross(x, y) + array([[-3, 6, -3], + [ 3, -6, 3]]) + + The orientation of `c` can be changed using the `axisc` keyword. + + >>> np.cross(x, y, axisc=0) + array([[-3, 3], + [ 6, -6], + [-3, 3]]) + + Change the vector definition of `x` and `y` using `axisa` and `axisb`. + + >>> x = np.array([[1,2,3], [4,5,6], [7, 8, 9]]) + >>> y = np.array([[7, 8, 9], [4,5,6], [1,2,3]]) + >>> np.cross(x, y) + array([[ -6, 12, -6], + [ 0, 0, 0], + [ 6, -12, 6]]) + >>> np.cross(x, y, axisa=0, axisb=0) + array([[-24, 48, -24], + [-30, 60, -30], + [-36, 72, -36]]) + + """ + if axis is not None: + axisa, axisb, axisc = (axis,) * 3 + a = asarray(a) + b = asarray(b) + + if (a.ndim < 1) or (b.ndim < 1): + raise ValueError("At least one array has zero dimension") + + # Check axisa and axisb are within bounds + axisa = normalize_axis_index(axisa, a.ndim, msg_prefix='axisa') + axisb = normalize_axis_index(axisb, b.ndim, msg_prefix='axisb') + + # Move working axis to the end of the shape + a = moveaxis(a, axisa, -1) + b = moveaxis(b, axisb, -1) + msg = ("incompatible dimensions for cross product\n" + "(dimension must be 2 or 3)") + if a.shape[-1] not in (2, 3) or b.shape[-1] not in (2, 3): + raise ValueError(msg) + if a.shape[-1] == 2 or b.shape[-1] == 2: + # Deprecated in NumPy 2.0, 2023-09-26 + warnings.warn( + "Arrays of 2-dimensional vectors are deprecated. Use arrays of " + "3-dimensional vectors instead. (deprecated in NumPy 2.0)", + DeprecationWarning, stacklevel=2 + ) + + # Create the output array + shape = broadcast(a[..., 0], b[..., 0]).shape + if a.shape[-1] == 3 or b.shape[-1] == 3: + shape += (3,) + # Check axisc is within bounds + axisc = normalize_axis_index(axisc, len(shape), msg_prefix='axisc') + dtype = promote_types(a.dtype, b.dtype) + cp = empty(shape, dtype) + + # recast arrays as dtype + a = a.astype(dtype) + b = b.astype(dtype) + + # create local aliases for readability + a0 = a[..., 0] + a1 = a[..., 1] + if a.shape[-1] == 3: + a2 = a[..., 2] + b0 = b[..., 0] + b1 = b[..., 1] + if b.shape[-1] == 3: + b2 = b[..., 2] + if cp.ndim != 0 and cp.shape[-1] == 3: + cp0 = cp[..., 0] + cp1 = cp[..., 1] + cp2 = cp[..., 2] + + if a.shape[-1] == 2: + if b.shape[-1] == 2: + # a0 * b1 - a1 * b0 + multiply(a0, b1, out=cp) + cp -= a1 * b0 + return cp + else: + assert b.shape[-1] == 3 + # cp0 = a1 * b2 - 0 (a2 = 0) + # cp1 = 0 - a0 * b2 (a2 = 0) + # cp2 = a0 * b1 - a1 * b0 + multiply(a1, b2, out=cp0) + multiply(a0, b2, out=cp1) + negative(cp1, out=cp1) + multiply(a0, b1, out=cp2) + cp2 -= a1 * b0 + else: + assert a.shape[-1] == 3 + if b.shape[-1] == 3: + # cp0 = a1 * b2 - a2 * b1 + # cp1 = a2 * b0 - a0 * b2 + # cp2 = a0 * b1 - a1 * b0 + multiply(a1, b2, out=cp0) + tmp = np.multiply(a2, b1, out=...) + cp0 -= tmp + multiply(a2, b0, out=cp1) + multiply(a0, b2, out=tmp) + cp1 -= tmp + multiply(a0, b1, out=cp2) + multiply(a1, b0, out=tmp) + cp2 -= tmp + else: + assert b.shape[-1] == 2 + # cp0 = 0 - a2 * b1 (b2 = 0) + # cp1 = a2 * b0 - 0 (b2 = 0) + # cp2 = a0 * b1 - a1 * b0 + multiply(a2, b1, out=cp0) + negative(cp0, out=cp0) + multiply(a2, b0, out=cp1) + multiply(a0, b1, out=cp2) + cp2 -= a1 * b0 + + return moveaxis(cp, -1, axisc) + + +little_endian = (sys.byteorder == 'little') + + +@set_module('numpy') +def indices(dimensions, dtype=int, sparse=False): + """ + Return an array representing the indices of a grid. + + Compute an array where the subarrays contain index values 0, 1, ... + varying only along the corresponding axis. + + Parameters + ---------- + dimensions : sequence of ints + The shape of the grid. + dtype : dtype, optional + Data type of the result. + sparse : boolean, optional + Return a sparse representation of the grid instead of a dense + representation. Default is False. + + Returns + ------- + grid : one ndarray or tuple of ndarrays + If sparse is False: + Returns one array of grid indices, + ``grid.shape = (len(dimensions),) + tuple(dimensions)``. + If sparse is True: + Returns a tuple of arrays, with + ``grid[i].shape = (1, ..., 1, dimensions[i], 1, ..., 1)`` with + dimensions[i] in the ith place + + See Also + -------- + mgrid, ogrid, meshgrid + + Notes + ----- + The output shape in the dense case is obtained by prepending the number + of dimensions in front of the tuple of dimensions, i.e. if `dimensions` + is a tuple ``(r0, ..., rN-1)`` of length ``N``, the output shape is + ``(N, r0, ..., rN-1)``. + + The subarrays ``grid[k]`` contains the N-D array of indices along the + ``k-th`` axis. Explicitly:: + + grid[k, i0, i1, ..., iN-1] = ik + + Examples + -------- + >>> import numpy as np + >>> grid = np.indices((2, 3)) + >>> grid.shape + (2, 2, 3) + >>> grid[0] # row indices + array([[0, 0, 0], + [1, 1, 1]]) + >>> grid[1] # column indices + array([[0, 1, 2], + [0, 1, 2]]) + + The indices can be used as an index into an array. + + >>> x = np.arange(20).reshape(5, 4) + >>> row, col = np.indices((2, 3)) + >>> x[row, col] + array([[0, 1, 2], + [4, 5, 6]]) + + Note that it would be more straightforward in the above example to + extract the required elements directly with ``x[:2, :3]``. + + If sparse is set to true, the grid will be returned in a sparse + representation. + + >>> i, j = np.indices((2, 3), sparse=True) + >>> i.shape + (2, 1) + >>> j.shape + (1, 3) + >>> i # row indices + array([[0], + [1]]) + >>> j # column indices + array([[0, 1, 2]]) + + """ + dimensions = tuple(dimensions) + N = len(dimensions) + shape = (1,) * N + if sparse: + res = () + else: + res = empty((N,) + dimensions, dtype=dtype) + for i, dim in enumerate(dimensions): + idx = arange(dim, dtype=dtype).reshape( + shape[:i] + (dim,) + shape[i + 1:] + ) + if sparse: + res = res + (idx,) + else: + res[i] = idx + return res + + +@finalize_array_function_like +@set_module('numpy') +def fromfunction(function, shape, *, dtype=float, like=None, **kwargs): + """ + Construct an array by executing a function over each coordinate. + + The resulting array therefore has a value ``fn(x, y, z)`` at + coordinate ``(x, y, z)``. + + Parameters + ---------- + function : callable + The function is called with N parameters, where N is the rank of + `shape`. Each parameter represents the coordinates of the array + varying along a specific axis. For example, if `shape` + were ``(2, 2)``, then the parameters would be + ``array([[0, 0], [1, 1]])`` and ``array([[0, 1], [0, 1]])`` + shape : (N,) tuple of ints + Shape of the output array, which also determines the shape of + the coordinate arrays passed to `function`. + dtype : data-type, optional + Data-type of the coordinate arrays passed to `function`. + By default, `dtype` is float. + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + fromfunction : any + The result of the call to `function` is passed back directly. + Therefore the shape of `fromfunction` is completely determined by + `function`. If `function` returns a scalar value, the shape of + `fromfunction` would not match the `shape` parameter. + + See Also + -------- + indices, meshgrid + + Notes + ----- + Keywords other than `dtype` and `like` are passed to `function`. + + Examples + -------- + >>> import numpy as np + >>> np.fromfunction(lambda i, j: i, (2, 2), dtype=float) + array([[0., 0.], + [1., 1.]]) + + >>> np.fromfunction(lambda i, j: j, (2, 2), dtype=float) + array([[0., 1.], + [0., 1.]]) + + >>> np.fromfunction(lambda i, j: i == j, (3, 3), dtype=int) + array([[ True, False, False], + [False, True, False], + [False, False, True]]) + + >>> np.fromfunction(lambda i, j: i + j, (3, 3), dtype=int) + array([[0, 1, 2], + [1, 2, 3], + [2, 3, 4]]) + + """ + if like is not None: + return _fromfunction_with_like( + like, function, shape, dtype=dtype, **kwargs) + + args = indices(shape, dtype=dtype) + return function(*args, **kwargs) + + +_fromfunction_with_like = array_function_dispatch()(fromfunction) + + +def _frombuffer(buf, dtype, shape, order, axis_order=None): + array = frombuffer(buf, dtype=dtype) + if order == 'K' and axis_order is not None: + return array.reshape(shape, order='C').transpose(axis_order) + return array.reshape(shape, order=order) + + +@set_module('numpy') +def isscalar(element): + """ + Returns True if the type of `element` is a scalar type. + + Parameters + ---------- + element : any + Input argument, can be of any type and shape. + + Returns + ------- + val : bool + True if `element` is a scalar type, False if it is not. + + See Also + -------- + ndim : Get the number of dimensions of an array + + Notes + ----- + If you need a stricter way to identify a *numerical* scalar, use + ``isinstance(x, numbers.Number)``, as that returns ``False`` for most + non-numerical elements such as strings. + + In most cases ``np.ndim(x) == 0`` should be used instead of this function, + as that will also return true for 0d arrays. This is how numpy overloads + functions in the style of the ``dx`` arguments to `gradient` and + the ``bins`` argument to `histogram`. Some key differences: + + +------------------------------------+---------------+-------------------+ + | x |``isscalar(x)``|``np.ndim(x) == 0``| + +====================================+===============+===================+ + | PEP 3141 numeric objects | ``True`` | ``True`` | + | (including builtins) | | | + +------------------------------------+---------------+-------------------+ + | builtin string and buffer objects | ``True`` | ``True`` | + +------------------------------------+---------------+-------------------+ + | other builtin objects, like | ``False`` | ``True`` | + | `pathlib.Path`, `Exception`, | | | + | the result of `re.compile` | | | + +------------------------------------+---------------+-------------------+ + | third-party objects like | ``False`` | ``True`` | + | `matplotlib.figure.Figure` | | | + +------------------------------------+---------------+-------------------+ + | zero-dimensional numpy arrays | ``False`` | ``True`` | + +------------------------------------+---------------+-------------------+ + | other numpy arrays | ``False`` | ``False`` | + +------------------------------------+---------------+-------------------+ + | `list`, `tuple`, and other | ``False`` | ``False`` | + | sequence objects | | | + +------------------------------------+---------------+-------------------+ + + Examples + -------- + >>> import numpy as np + + >>> np.isscalar(3.1) + True + + >>> np.isscalar(np.array(3.1)) + False + + >>> np.isscalar([3.1]) + False + + >>> np.isscalar(False) + True + + >>> np.isscalar('numpy') + True + + NumPy supports PEP 3141 numbers: + + >>> from fractions import Fraction + >>> np.isscalar(Fraction(5, 17)) + True + >>> from numbers import Number + >>> np.isscalar(Number()) + True + + """ + return (isinstance(element, generic) + or type(element) in ScalarType + or isinstance(element, numbers.Number)) + + +@set_module('numpy') +def binary_repr(num, width=None): + """ + Return the binary representation of the input number as a string. + + For negative numbers, if width is not given, a minus sign is added to the + front. If width is given, the two's complement of the number is + returned, with respect to that width. + + In a two's-complement system negative numbers are represented by the two's + complement of the absolute value. This is the most common method of + representing signed integers on computers [1]_. A N-bit two's-complement + system can represent every integer in the range + :math:`-2^{N-1}` to :math:`+2^{N-1}-1`. + + Parameters + ---------- + num : int + Only an integer decimal number can be used. + width : int, optional + The length of the returned string if `num` is positive, or the length + of the two's complement if `num` is negative, provided that `width` is + at least a sufficient number of bits for `num` to be represented in + the designated form. If the `width` value is insufficient, an error is + raised. + + Returns + ------- + bin : str + Binary representation of `num` or two's complement of `num`. + + See Also + -------- + base_repr: Return a string representation of a number in the given base + system. + bin: Python's built-in binary representation generator of an integer. + + Notes + ----- + `binary_repr` is equivalent to using `base_repr` with base 2, but about 25x + faster. + + References + ---------- + .. [1] Wikipedia, "Two's complement", + https://en.wikipedia.org/wiki/Two's_complement + + Examples + -------- + >>> import numpy as np + >>> np.binary_repr(3) + '11' + >>> np.binary_repr(-3) + '-11' + >>> np.binary_repr(3, width=4) + '0011' + + The two's complement is returned when the input number is negative and + width is specified: + + >>> np.binary_repr(-3, width=3) + '101' + >>> np.binary_repr(-3, width=5) + '11101' + + """ + def err_if_insufficient(width, binwidth): + if width is not None and width < binwidth: + raise ValueError( + f"Insufficient bit {width=} provided for {binwidth=}" + ) + + # Ensure that num is a Python integer to avoid overflow or unwanted + # casts to floating point. + num = operator.index(num) + + if num == 0: + return '0' * (width or 1) + + elif num > 0: + binary = f'{num:b}' + binwidth = len(binary) + outwidth = (binwidth if width is None + else builtins.max(binwidth, width)) + err_if_insufficient(width, binwidth) + return binary.zfill(outwidth) + + elif width is None: + return f'-{-num:b}' + + else: + poswidth = len(f'{-num:b}') + + # See gh-8679: remove extra digit + # for numbers at boundaries. + if 2**(poswidth - 1) == -num: + poswidth -= 1 + + twocomp = 2**(poswidth + 1) + num + binary = f'{twocomp:b}' + binwidth = len(binary) + + outwidth = builtins.max(binwidth, width) + err_if_insufficient(width, binwidth) + return '1' * (outwidth - binwidth) + binary + + +@set_module('numpy') +def base_repr(number, base=2, padding=0): + """ + Return a string representation of a number in the given base system. + + Parameters + ---------- + number : int + The value to convert. Positive and negative values are handled. + base : int, optional + Convert `number` to the `base` number system. The valid range is 2-36, + the default value is 2. + padding : int, optional + Number of zeros padded on the left. Default is 0 (no padding). + + Returns + ------- + out : str + String representation of `number` in `base` system. + + See Also + -------- + binary_repr : Faster version of `base_repr` for base 2. + + Examples + -------- + >>> import numpy as np + >>> np.base_repr(5) + '101' + >>> np.base_repr(6, 5) + '11' + >>> np.base_repr(7, base=5, padding=3) + '00012' + + >>> np.base_repr(10, base=16) + 'A' + >>> np.base_repr(32, base=16) + '20' + + """ + digits = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ' + if base > len(digits): + raise ValueError("Bases greater than 36 not handled in base_repr.") + elif base < 2: + raise ValueError("Bases less than 2 not handled in base_repr.") + + num = abs(int(number)) + res = [] + while num: + res.append(digits[num % base]) + num //= base + if padding: + res.append('0' * padding) + if number < 0: + res.append('-') + return ''.join(reversed(res or '0')) + + +# These are all essentially abbreviations +# These might wind up in a special abbreviations module + + +def _maketup(descr, val): + dt = dtype(descr) + # Place val in all scalar tuples: + fields = dt.fields + if fields is None: + return val + else: + res = [_maketup(fields[name][0], val) for name in dt.names] + return tuple(res) + + +@finalize_array_function_like +@set_module('numpy') +def identity(n, dtype=None, *, like=None): + """ + Return the identity array. + + The identity array is a square array with ones on + the main diagonal. + + Parameters + ---------- + n : int + Number of rows (and columns) in `n` x `n` output. + dtype : data-type, optional + Data-type of the output. Defaults to ``float``. + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + out : ndarray + `n` x `n` array with its main diagonal set to one, + and all other elements 0. + + Examples + -------- + >>> import numpy as np + >>> np.identity(3) + array([[1., 0., 0.], + [0., 1., 0.], + [0., 0., 1.]]) + + """ + if like is not None: + return _identity_with_like(like, n, dtype=dtype) + + from numpy import eye + return eye(n, dtype=dtype, like=like) + + +_identity_with_like = array_function_dispatch()(identity) + + +def _allclose_dispatcher(a, b, rtol=None, atol=None, equal_nan=None): + return (a, b, rtol, atol) + + +@array_function_dispatch(_allclose_dispatcher) +def allclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False): + """ + Returns True if two arrays are element-wise equal within a tolerance. + + The tolerance values are positive, typically very small numbers. The + relative difference (`rtol` * abs(`b`)) and the absolute difference + `atol` are added together to compare against the absolute difference + between `a` and `b`. + + .. warning:: The default `atol` is not appropriate for comparing numbers + with magnitudes much smaller than one (see Notes). + + NaNs are treated as equal if they are in the same place and if + ``equal_nan=True``. Infs are treated as equal if they are in the same + place and of the same sign in both arrays. + + Parameters + ---------- + a, b : array_like + Input arrays to compare. + rtol : array_like + The relative tolerance parameter (see Notes). + atol : array_like + The absolute tolerance parameter (see Notes). + equal_nan : bool + Whether to compare NaN's as equal. If True, NaN's in `a` will be + considered equal to NaN's in `b` in the output array. + + Returns + ------- + allclose : bool + Returns True if the two arrays are equal within the given + tolerance; False otherwise. + + See Also + -------- + isclose, all, any, equal + + Notes + ----- + If the following equation is element-wise True, then allclose returns + True.:: + + absolute(a - b) <= (atol + rtol * absolute(b)) + + The above equation is not symmetric in `a` and `b`, so that + ``allclose(a, b)`` might be different from ``allclose(b, a)`` in + some rare cases. + + The default value of `atol` is not appropriate when the reference value + `b` has magnitude smaller than one. For example, it is unlikely that + ``a = 1e-9`` and ``b = 2e-9`` should be considered "close", yet + ``allclose(1e-9, 2e-9)`` is ``True`` with default settings. Be sure + to select `atol` for the use case at hand, especially for defining the + threshold below which a non-zero value in `a` will be considered "close" + to a very small or zero value in `b`. + + The comparison of `a` and `b` uses standard broadcasting, which + means that `a` and `b` need not have the same shape in order for + ``allclose(a, b)`` to evaluate to True. The same is true for + `equal` but not `array_equal`. + + `allclose` is not defined for non-numeric data types. + `bool` is considered a numeric data-type for this purpose. + + Examples + -------- + >>> import numpy as np + >>> np.allclose([1e10,1e-7], [1.00001e10,1e-8]) + False + + >>> np.allclose([1e10,1e-8], [1.00001e10,1e-9]) + True + + >>> np.allclose([1e10,1e-8], [1.0001e10,1e-9]) + False + + >>> np.allclose([1.0, np.nan], [1.0, np.nan]) + False + + >>> np.allclose([1.0, np.nan], [1.0, np.nan], equal_nan=True) + True + + + """ + res = all(isclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan)) + return builtins.bool(res) + + +def _isclose_dispatcher(a, b, rtol=None, atol=None, equal_nan=None): + return (a, b, rtol, atol) + + +@array_function_dispatch(_isclose_dispatcher) +def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False): + """ + Returns a boolean array where two arrays are element-wise equal within a + tolerance. + + The tolerance values are positive, typically very small numbers. The + relative difference (`rtol` * abs(`b`)) and the absolute difference + `atol` are added together to compare against the absolute difference + between `a` and `b`. + + .. warning:: The default `atol` is not appropriate for comparing numbers + with magnitudes much smaller than one (see Notes). + + Parameters + ---------- + a, b : array_like + Input arrays to compare. + rtol : array_like + The relative tolerance parameter (see Notes). + atol : array_like + The absolute tolerance parameter (see Notes). + equal_nan : bool + Whether to compare NaN's as equal. If True, NaN's in `a` will be + considered equal to NaN's in `b` in the output array. + + Returns + ------- + y : array_like + Returns a boolean array of where `a` and `b` are equal within the + given tolerance. If both `a` and `b` are scalars, returns a single + boolean value. + + See Also + -------- + allclose + math.isclose + + Notes + ----- + For finite values, isclose uses the following equation to test whether + two floating point values are equivalent.:: + + absolute(a - b) <= (atol + rtol * absolute(b)) + + Unlike the built-in `math.isclose`, the above equation is not symmetric + in `a` and `b` -- it assumes `b` is the reference value -- so that + `isclose(a, b)` might be different from `isclose(b, a)`. + + The default value of `atol` is not appropriate when the reference value + `b` has magnitude smaller than one. For example, it is unlikely that + ``a = 1e-9`` and ``b = 2e-9`` should be considered "close", yet + ``isclose(1e-9, 2e-9)`` is ``True`` with default settings. Be sure + to select `atol` for the use case at hand, especially for defining the + threshold below which a non-zero value in `a` will be considered "close" + to a very small or zero value in `b`. + + `isclose` is not defined for non-numeric data types. + :class:`bool` is considered a numeric data-type for this purpose. + + Examples + -------- + >>> import numpy as np + >>> np.isclose([1e10,1e-7], [1.00001e10,1e-8]) + array([ True, False]) + + >>> np.isclose([1e10,1e-8], [1.00001e10,1e-9]) + array([ True, True]) + + >>> np.isclose([1e10,1e-8], [1.0001e10,1e-9]) + array([False, True]) + + >>> np.isclose([1.0, np.nan], [1.0, np.nan]) + array([ True, False]) + + >>> np.isclose([1.0, np.nan], [1.0, np.nan], equal_nan=True) + array([ True, True]) + + >>> np.isclose([1e-8, 1e-7], [0.0, 0.0]) + array([ True, False]) + + >>> np.isclose([1e-100, 1e-7], [0.0, 0.0], atol=0.0) + array([False, False]) + + >>> np.isclose([1e-10, 1e-10], [1e-20, 0.0]) + array([ True, True]) + + >>> np.isclose([1e-10, 1e-10], [1e-20, 0.999999e-10], atol=0.0) + array([False, True]) + + """ + # Turn all but python scalars into arrays. + x, y, atol, rtol = ( + a if isinstance(a, (int, float, complex)) else asanyarray(a) + for a in (a, b, atol, rtol)) + + # Make sure y is an inexact type to avoid bad behavior on abs(MIN_INT). + # This will cause casting of x later. Also, make sure to allow subclasses + # (e.g., for numpy.ma). + # NOTE: We explicitly allow timedelta, which used to work. This could + # possibly be deprecated. See also gh-18286. + # timedelta works if `atol` is an integer or also a timedelta. + # Although, the default tolerances are unlikely to be useful + if (dtype := getattr(y, "dtype", None)) is not None and dtype.kind != "m": + dt = multiarray.result_type(y, 1.) + y = asanyarray(y, dtype=dt) + elif isinstance(y, int): + y = float(y) + + # atol and rtol can be arrays + if not (np.all(np.isfinite(atol)) and np.all(np.isfinite(rtol))): + err_s = np.geterr()["invalid"] + err_msg = f"One of rtol or atol is not valid, atol: {atol}, rtol: {rtol}" + + if err_s == "warn": + warnings.warn(err_msg, RuntimeWarning, stacklevel=2) + elif err_s == "raise": + raise FloatingPointError(err_msg) + elif err_s == "print": + print(err_msg) + + with errstate(invalid='ignore'): + + result = (less_equal(abs(x - y), atol + rtol * abs(y)) + & isfinite(y) + | (x == y)) + if equal_nan: + result |= isnan(x) & isnan(y) + + return result[()] # Flatten 0d arrays to scalars + + +def _array_equal_dispatcher(a1, a2, equal_nan=None): + return (a1, a2) + + +_no_nan_types = { + # should use np.dtype.BoolDType, but as of writing + # that fails the reloading test. + type(dtype(nt.bool)), + type(dtype(nt.int8)), + type(dtype(nt.int16)), + type(dtype(nt.int32)), + type(dtype(nt.int64)), +} + + +def _dtype_cannot_hold_nan(dtype): + return type(dtype) in _no_nan_types + + +@array_function_dispatch(_array_equal_dispatcher) +def array_equal(a1, a2, equal_nan=False): + """ + True if two arrays have the same shape and elements, False otherwise. + + Parameters + ---------- + a1, a2 : array_like + Input arrays. + equal_nan : bool + Whether to compare NaN's as equal. If the dtype of a1 and a2 is + complex, values will be considered equal if either the real or the + imaginary component of a given value is ``nan``. + + Returns + ------- + b : bool + Returns True if the arrays are equal. + + See Also + -------- + allclose: Returns True if two arrays are element-wise equal within a + tolerance. + array_equiv: Returns True if input arrays are shape consistent and all + elements equal. + + Examples + -------- + >>> import numpy as np + + >>> np.array_equal([1, 2], [1, 2]) + True + + >>> np.array_equal(np.array([1, 2]), np.array([1, 2])) + True + + >>> np.array_equal([1, 2], [1, 2, 3]) + False + + >>> np.array_equal([1, 2], [1, 4]) + False + + >>> a = np.array([1, np.nan]) + >>> np.array_equal(a, a) + False + + >>> np.array_equal(a, a, equal_nan=True) + True + + When ``equal_nan`` is True, complex values with nan components are + considered equal if either the real *or* the imaginary components are nan. + + >>> a = np.array([1 + 1j]) + >>> b = a.copy() + >>> a.real = np.nan + >>> b.imag = np.nan + >>> np.array_equal(a, b, equal_nan=True) + True + """ + try: + a1, a2 = asarray(a1), asarray(a2) + except Exception: + return False + if a1.shape != a2.shape: + return False + if not equal_nan: + return builtins.bool((asanyarray(a1 == a2)).all()) + + if a1 is a2: + # nan will compare equal so an array will compare equal to itself. + return True + + cannot_have_nan = (_dtype_cannot_hold_nan(a1.dtype) + and _dtype_cannot_hold_nan(a2.dtype)) + if cannot_have_nan: + return builtins.bool(asarray(a1 == a2).all()) + + # Handling NaN values if equal_nan is True + a1nan, a2nan = isnan(a1), isnan(a2) + # NaN's occur at different locations + if not (a1nan == a2nan).all(): + return False + # Shapes of a1, a2 and masks are guaranteed to be consistent by this point + return builtins.bool((a1[~a1nan] == a2[~a1nan]).all()) + + +def _array_equiv_dispatcher(a1, a2): + return (a1, a2) + + +@array_function_dispatch(_array_equiv_dispatcher) +def array_equiv(a1, a2): + """ + Returns True if input arrays are shape consistent and all elements equal. + + Shape consistent means they are either the same shape, or one input array + can be broadcasted to create the same shape as the other one. + + Parameters + ---------- + a1, a2 : array_like + Input arrays. + + Returns + ------- + out : bool + True if equivalent, False otherwise. + + Examples + -------- + >>> import numpy as np + >>> np.array_equiv([1, 2], [1, 2]) + True + >>> np.array_equiv([1, 2], [1, 3]) + False + + Showing the shape equivalence: + + >>> np.array_equiv([1, 2], [[1, 2], [1, 2]]) + True + >>> np.array_equiv([1, 2], [[1, 2, 1, 2], [1, 2, 1, 2]]) + False + + >>> np.array_equiv([1, 2], [[1, 2], [1, 3]]) + False + + """ + try: + a1, a2 = asarray(a1), asarray(a2) + except Exception: + return False + try: + multiarray.broadcast(a1, a2) + except Exception: + return False + + return builtins.bool(asanyarray(a1 == a2).all()) + + +def _astype_dispatcher(x, dtype, /, *, copy=None, device=None): + return (x, dtype) + + +@array_function_dispatch(_astype_dispatcher) +def astype(x, dtype, /, *, copy=True, device=None): + """ + Copies an array to a specified data type. + + This function is an Array API compatible alternative to + `numpy.ndarray.astype`. + + Parameters + ---------- + x : ndarray + Input NumPy array to cast. ``array_likes`` are explicitly not + supported here. + dtype : dtype + Data type of the result. + copy : bool, optional + Specifies whether to copy an array when the specified dtype matches + the data type of the input array ``x``. If ``True``, a newly allocated + array must always be returned. If ``False`` and the specified dtype + matches the data type of the input array, the input array must be + returned; otherwise, a newly allocated array must be returned. + Defaults to ``True``. + device : str, optional + The device on which to place the returned array. Default: None. + For Array-API interoperability only, so must be ``"cpu"`` if passed. + + .. versionadded:: 2.1.0 + + Returns + ------- + out : ndarray + An array having the specified data type. + + See Also + -------- + ndarray.astype + + Examples + -------- + >>> import numpy as np + >>> arr = np.array([1, 2, 3]); arr + array([1, 2, 3]) + >>> np.astype(arr, np.float64) + array([1., 2., 3.]) + + Non-copy case: + + >>> arr = np.array([1, 2, 3]) + >>> arr_noncpy = np.astype(arr, arr.dtype, copy=False) + >>> np.shares_memory(arr, arr_noncpy) + True + + """ + if not (isinstance(x, np.ndarray) or isscalar(x)): + raise TypeError( + "Input should be a NumPy array or scalar. " + f"It is a {type(x)} instead." + ) + if device is not None and device != "cpu": + raise ValueError( + 'Device not understood. Only "cpu" is allowed, but received:' + f' {device}' + ) + return x.astype(dtype, copy=copy) + + +inf = PINF +nan = NAN +False_ = nt.bool(False) +True_ = nt.bool(True) + + +def extend_all(module): + existing = set(__all__) + mall = module.__all__ + for a in mall: + if a not in existing: + __all__.append(a) + + +from . import _asarray, _ufunc_config, arrayprint, fromnumeric +from ._asarray import * +from ._ufunc_config import * +from .arrayprint import * +from .fromnumeric import * +from .numerictypes import * +from .umath import * + +extend_all(fromnumeric) +extend_all(umath) +extend_all(numerictypes) +extend_all(arrayprint) +extend_all(_asarray) +extend_all(_ufunc_config) diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/numeric.pyi b/.venv/lib/python3.12/site-packages/numpy/_core/numeric.pyi new file mode 100644 index 0000000..919fe19 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/numeric.pyi @@ -0,0 +1,882 @@ +from collections.abc import Callable, Sequence +from typing import ( + Any, + Final, + Never, + NoReturn, + SupportsAbs, + SupportsIndex, + TypeAlias, + TypeGuard, + TypeVar, + Unpack, + overload, +) +from typing import Literal as L + +import numpy as np +from numpy import ( + False_, + True_, + _OrderCF, + _OrderKACF, + # re-exports + bitwise_not, + broadcast, + complexfloating, + dtype, + flatiter, + float64, + floating, + from_dlpack, + # other + generic, + inf, + int_, + intp, + little_endian, + matmul, + nan, + ndarray, + nditer, + newaxis, + object_, + signedinteger, + timedelta64, + ufunc, + unsignedinteger, + vecdot, +) +from numpy._typing import ( + ArrayLike, + DTypeLike, + NDArray, + _ArrayLike, + _ArrayLikeBool_co, + _ArrayLikeComplex_co, + _ArrayLikeFloat_co, + _ArrayLikeInt_co, + _ArrayLikeObject_co, + _ArrayLikeTD64_co, + _ArrayLikeUInt_co, + _DTypeLike, + _NestedSequence, + _ScalarLike_co, + _Shape, + _ShapeLike, + _SupportsArrayFunc, + _SupportsDType, +) + +from .fromnumeric import all as all +from .fromnumeric import any as any +from .fromnumeric import argpartition as argpartition +from .fromnumeric import matrix_transpose as matrix_transpose +from .fromnumeric import mean as mean +from .multiarray import ( + # other + _Array, + _ConstructorEmpty, + _KwargsEmpty, + # re-exports + arange, + array, + asanyarray, + asarray, + ascontiguousarray, + asfortranarray, + can_cast, + concatenate, + copyto, + dot, + empty, + empty_like, + frombuffer, + fromfile, + fromiter, + fromstring, + inner, + lexsort, + may_share_memory, + min_scalar_type, + nested_iters, + promote_types, + putmask, + result_type, + shares_memory, + vdot, + where, + zeros, +) + +__all__ = [ + "newaxis", + "ndarray", + "flatiter", + "nditer", + "nested_iters", + "ufunc", + "arange", + "array", + "asarray", + "asanyarray", + "ascontiguousarray", + "asfortranarray", + "zeros", + "count_nonzero", + "empty", + "broadcast", + "dtype", + "fromstring", + "fromfile", + "frombuffer", + "from_dlpack", + "where", + "argwhere", + "copyto", + "concatenate", + "lexsort", + "astype", + "can_cast", + "promote_types", + "min_scalar_type", + "result_type", + "isfortran", + "empty_like", + "zeros_like", + "ones_like", + "correlate", + "convolve", + "inner", + "dot", + "outer", + "vdot", + "roll", + "rollaxis", + "moveaxis", + "cross", + "tensordot", + "little_endian", + "fromiter", + "array_equal", + "array_equiv", + "indices", + "fromfunction", + "isclose", + "isscalar", + "binary_repr", + "base_repr", + "ones", + "identity", + "allclose", + "putmask", + "flatnonzero", + "inf", + "nan", + "False_", + "True_", + "bitwise_not", + "full", + "full_like", + "matmul", + "vecdot", + "shares_memory", + "may_share_memory", +] + +_T = TypeVar("_T") +_ScalarT = TypeVar("_ScalarT", bound=generic) +_DTypeT = TypeVar("_DTypeT", bound=np.dtype) +_ArrayT = TypeVar("_ArrayT", bound=np.ndarray[Any, Any]) +_ShapeT = TypeVar("_ShapeT", bound=_Shape) +_AnyShapeT = TypeVar( + "_AnyShapeT", + tuple[()], + tuple[int], + tuple[int, int], + tuple[int, int, int], + tuple[int, int, int, int], + tuple[int, ...], +) + +_CorrelateMode: TypeAlias = L["valid", "same", "full"] + +@overload +def zeros_like( + a: _ArrayT, + dtype: None = ..., + order: _OrderKACF = ..., + subok: L[True] = ..., + shape: None = ..., + *, + device: L["cpu"] | None = ..., +) -> _ArrayT: ... +@overload +def zeros_like( + a: _ArrayLike[_ScalarT], + dtype: None = ..., + order: _OrderKACF = ..., + subok: bool = ..., + shape: _ShapeLike | None = ..., + *, + device: L["cpu"] | None = ..., +) -> NDArray[_ScalarT]: ... +@overload +def zeros_like( + a: Any, + dtype: _DTypeLike[_ScalarT], + order: _OrderKACF = ..., + subok: bool = ..., + shape: _ShapeLike | None = ..., + *, + device: L["cpu"] | None = ..., +) -> NDArray[_ScalarT]: ... +@overload +def zeros_like( + a: Any, + dtype: DTypeLike | None = ..., + order: _OrderKACF = ..., + subok: bool = ..., + shape: _ShapeLike | None = ..., + *, + device: L["cpu"] | None = ..., +) -> NDArray[Any]: ... + +ones: Final[_ConstructorEmpty] + +@overload +def ones_like( + a: _ArrayT, + dtype: None = ..., + order: _OrderKACF = ..., + subok: L[True] = ..., + shape: None = ..., + *, + device: L["cpu"] | None = ..., +) -> _ArrayT: ... +@overload +def ones_like( + a: _ArrayLike[_ScalarT], + dtype: None = ..., + order: _OrderKACF = ..., + subok: bool = ..., + shape: _ShapeLike | None = ..., + *, + device: L["cpu"] | None = ..., +) -> NDArray[_ScalarT]: ... +@overload +def ones_like( + a: Any, + dtype: _DTypeLike[_ScalarT], + order: _OrderKACF = ..., + subok: bool = ..., + shape: _ShapeLike | None = ..., + *, + device: L["cpu"] | None = ..., +) -> NDArray[_ScalarT]: ... +@overload +def ones_like( + a: Any, + dtype: DTypeLike | None = ..., + order: _OrderKACF = ..., + subok: bool = ..., + shape: _ShapeLike | None = ..., + *, + device: L["cpu"] | None = ..., +) -> NDArray[Any]: ... + +# TODO: Add overloads for bool, int, float, complex, str, bytes, and memoryview +# 1-D shape +@overload +def full( + shape: SupportsIndex, + fill_value: _ScalarT, + dtype: None = ..., + order: _OrderCF = ..., + **kwargs: Unpack[_KwargsEmpty], +) -> _Array[tuple[int], _ScalarT]: ... +@overload +def full( + shape: SupportsIndex, + fill_value: Any, + dtype: _DTypeT | _SupportsDType[_DTypeT], + order: _OrderCF = ..., + **kwargs: Unpack[_KwargsEmpty], +) -> np.ndarray[tuple[int], _DTypeT]: ... +@overload +def full( + shape: SupportsIndex, + fill_value: Any, + dtype: type[_ScalarT], + order: _OrderCF = ..., + **kwargs: Unpack[_KwargsEmpty], +) -> _Array[tuple[int], _ScalarT]: ... +@overload +def full( + shape: SupportsIndex, + fill_value: Any, + dtype: DTypeLike | None = ..., + order: _OrderCF = ..., + **kwargs: Unpack[_KwargsEmpty], +) -> _Array[tuple[int], Any]: ... +# known shape +@overload +def full( + shape: _AnyShapeT, + fill_value: _ScalarT, + dtype: None = ..., + order: _OrderCF = ..., + **kwargs: Unpack[_KwargsEmpty], +) -> _Array[_AnyShapeT, _ScalarT]: ... +@overload +def full( + shape: _AnyShapeT, + fill_value: Any, + dtype: _DTypeT | _SupportsDType[_DTypeT], + order: _OrderCF = ..., + **kwargs: Unpack[_KwargsEmpty], +) -> np.ndarray[_AnyShapeT, _DTypeT]: ... +@overload +def full( + shape: _AnyShapeT, + fill_value: Any, + dtype: type[_ScalarT], + order: _OrderCF = ..., + **kwargs: Unpack[_KwargsEmpty], +) -> _Array[_AnyShapeT, _ScalarT]: ... +@overload +def full( + shape: _AnyShapeT, + fill_value: Any, + dtype: DTypeLike | None = ..., + order: _OrderCF = ..., + **kwargs: Unpack[_KwargsEmpty], +) -> _Array[_AnyShapeT, Any]: ... +# unknown shape +@overload +def full( + shape: _ShapeLike, + fill_value: _ScalarT, + dtype: None = ..., + order: _OrderCF = ..., + **kwargs: Unpack[_KwargsEmpty], +) -> NDArray[_ScalarT]: ... +@overload +def full( + shape: _ShapeLike, + fill_value: Any, + dtype: _DTypeT | _SupportsDType[_DTypeT], + order: _OrderCF = ..., + **kwargs: Unpack[_KwargsEmpty], +) -> np.ndarray[Any, _DTypeT]: ... +@overload +def full( + shape: _ShapeLike, + fill_value: Any, + dtype: type[_ScalarT], + order: _OrderCF = ..., + **kwargs: Unpack[_KwargsEmpty], +) -> NDArray[_ScalarT]: ... +@overload +def full( + shape: _ShapeLike, + fill_value: Any, + dtype: DTypeLike | None = ..., + order: _OrderCF = ..., + **kwargs: Unpack[_KwargsEmpty], +) -> NDArray[Any]: ... + +@overload +def full_like( + a: _ArrayT, + fill_value: Any, + dtype: None = ..., + order: _OrderKACF = ..., + subok: L[True] = ..., + shape: None = ..., + *, + device: L["cpu"] | None = ..., +) -> _ArrayT: ... +@overload +def full_like( + a: _ArrayLike[_ScalarT], + fill_value: Any, + dtype: None = ..., + order: _OrderKACF = ..., + subok: bool = ..., + shape: _ShapeLike | None = ..., + *, + device: L["cpu"] | None = ..., +) -> NDArray[_ScalarT]: ... +@overload +def full_like( + a: Any, + fill_value: Any, + dtype: _DTypeLike[_ScalarT], + order: _OrderKACF = ..., + subok: bool = ..., + shape: _ShapeLike | None = ..., + *, + device: L["cpu"] | None = ..., +) -> NDArray[_ScalarT]: ... +@overload +def full_like( + a: Any, + fill_value: Any, + dtype: DTypeLike | None = ..., + order: _OrderKACF = ..., + subok: bool = ..., + shape: _ShapeLike | None = ..., + *, + device: L["cpu"] | None = ..., +) -> NDArray[Any]: ... + +# +@overload +def count_nonzero(a: ArrayLike, axis: None = None, *, keepdims: L[False] = False) -> np.intp: ... +@overload +def count_nonzero(a: _ScalarLike_co, axis: _ShapeLike | None = None, *, keepdims: L[True]) -> np.intp: ... +@overload +def count_nonzero( + a: NDArray[Any] | _NestedSequence[ArrayLike], axis: _ShapeLike | None = None, *, keepdims: L[True] +) -> NDArray[np.intp]: ... +@overload +def count_nonzero(a: ArrayLike, axis: _ShapeLike | None = None, *, keepdims: bool = False) -> Any: ... + +# +def isfortran(a: NDArray[Any] | generic) -> bool: ... + +def argwhere(a: ArrayLike) -> NDArray[intp]: ... + +def flatnonzero(a: ArrayLike) -> NDArray[intp]: ... + +@overload +def correlate( + a: _ArrayLike[Never], + v: _ArrayLike[Never], + mode: _CorrelateMode = ..., +) -> NDArray[Any]: ... +@overload +def correlate( + a: _ArrayLikeBool_co, + v: _ArrayLikeBool_co, + mode: _CorrelateMode = ..., +) -> NDArray[np.bool]: ... +@overload +def correlate( + a: _ArrayLikeUInt_co, + v: _ArrayLikeUInt_co, + mode: _CorrelateMode = ..., +) -> NDArray[unsignedinteger]: ... +@overload +def correlate( + a: _ArrayLikeInt_co, + v: _ArrayLikeInt_co, + mode: _CorrelateMode = ..., +) -> NDArray[signedinteger]: ... +@overload +def correlate( + a: _ArrayLikeFloat_co, + v: _ArrayLikeFloat_co, + mode: _CorrelateMode = ..., +) -> NDArray[floating]: ... +@overload +def correlate( + a: _ArrayLikeComplex_co, + v: _ArrayLikeComplex_co, + mode: _CorrelateMode = ..., +) -> NDArray[complexfloating]: ... +@overload +def correlate( + a: _ArrayLikeTD64_co, + v: _ArrayLikeTD64_co, + mode: _CorrelateMode = ..., +) -> NDArray[timedelta64]: ... +@overload +def correlate( + a: _ArrayLikeObject_co, + v: _ArrayLikeObject_co, + mode: _CorrelateMode = ..., +) -> NDArray[object_]: ... + +@overload +def convolve( + a: _ArrayLike[Never], + v: _ArrayLike[Never], + mode: _CorrelateMode = ..., +) -> NDArray[Any]: ... +@overload +def convolve( + a: _ArrayLikeBool_co, + v: _ArrayLikeBool_co, + mode: _CorrelateMode = ..., +) -> NDArray[np.bool]: ... +@overload +def convolve( + a: _ArrayLikeUInt_co, + v: _ArrayLikeUInt_co, + mode: _CorrelateMode = ..., +) -> NDArray[unsignedinteger]: ... +@overload +def convolve( + a: _ArrayLikeInt_co, + v: _ArrayLikeInt_co, + mode: _CorrelateMode = ..., +) -> NDArray[signedinteger]: ... +@overload +def convolve( + a: _ArrayLikeFloat_co, + v: _ArrayLikeFloat_co, + mode: _CorrelateMode = ..., +) -> NDArray[floating]: ... +@overload +def convolve( + a: _ArrayLikeComplex_co, + v: _ArrayLikeComplex_co, + mode: _CorrelateMode = ..., +) -> NDArray[complexfloating]: ... +@overload +def convolve( + a: _ArrayLikeTD64_co, + v: _ArrayLikeTD64_co, + mode: _CorrelateMode = ..., +) -> NDArray[timedelta64]: ... +@overload +def convolve( + a: _ArrayLikeObject_co, + v: _ArrayLikeObject_co, + mode: _CorrelateMode = ..., +) -> NDArray[object_]: ... + +@overload +def outer( + a: _ArrayLike[Never], + b: _ArrayLike[Never], + out: None = ..., +) -> NDArray[Any]: ... +@overload +def outer( + a: _ArrayLikeBool_co, + b: _ArrayLikeBool_co, + out: None = ..., +) -> NDArray[np.bool]: ... +@overload +def outer( + a: _ArrayLikeUInt_co, + b: _ArrayLikeUInt_co, + out: None = ..., +) -> NDArray[unsignedinteger]: ... +@overload +def outer( + a: _ArrayLikeInt_co, + b: _ArrayLikeInt_co, + out: None = ..., +) -> NDArray[signedinteger]: ... +@overload +def outer( + a: _ArrayLikeFloat_co, + b: _ArrayLikeFloat_co, + out: None = ..., +) -> NDArray[floating]: ... +@overload +def outer( + a: _ArrayLikeComplex_co, + b: _ArrayLikeComplex_co, + out: None = ..., +) -> NDArray[complexfloating]: ... +@overload +def outer( + a: _ArrayLikeTD64_co, + b: _ArrayLikeTD64_co, + out: None = ..., +) -> NDArray[timedelta64]: ... +@overload +def outer( + a: _ArrayLikeObject_co, + b: _ArrayLikeObject_co, + out: None = ..., +) -> NDArray[object_]: ... +@overload +def outer( + a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, + b: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, + out: _ArrayT, +) -> _ArrayT: ... + +@overload +def tensordot( + a: _ArrayLike[Never], + b: _ArrayLike[Never], + axes: int | tuple[_ShapeLike, _ShapeLike] = ..., +) -> NDArray[Any]: ... +@overload +def tensordot( + a: _ArrayLikeBool_co, + b: _ArrayLikeBool_co, + axes: int | tuple[_ShapeLike, _ShapeLike] = ..., +) -> NDArray[np.bool]: ... +@overload +def tensordot( + a: _ArrayLikeUInt_co, + b: _ArrayLikeUInt_co, + axes: int | tuple[_ShapeLike, _ShapeLike] = ..., +) -> NDArray[unsignedinteger]: ... +@overload +def tensordot( + a: _ArrayLikeInt_co, + b: _ArrayLikeInt_co, + axes: int | tuple[_ShapeLike, _ShapeLike] = ..., +) -> NDArray[signedinteger]: ... +@overload +def tensordot( + a: _ArrayLikeFloat_co, + b: _ArrayLikeFloat_co, + axes: int | tuple[_ShapeLike, _ShapeLike] = ..., +) -> NDArray[floating]: ... +@overload +def tensordot( + a: _ArrayLikeComplex_co, + b: _ArrayLikeComplex_co, + axes: int | tuple[_ShapeLike, _ShapeLike] = ..., +) -> NDArray[complexfloating]: ... +@overload +def tensordot( + a: _ArrayLikeTD64_co, + b: _ArrayLikeTD64_co, + axes: int | tuple[_ShapeLike, _ShapeLike] = ..., +) -> NDArray[timedelta64]: ... +@overload +def tensordot( + a: _ArrayLikeObject_co, + b: _ArrayLikeObject_co, + axes: int | tuple[_ShapeLike, _ShapeLike] = ..., +) -> NDArray[object_]: ... + +@overload +def roll( + a: _ArrayLike[_ScalarT], + shift: _ShapeLike, + axis: _ShapeLike | None = ..., +) -> NDArray[_ScalarT]: ... +@overload +def roll( + a: ArrayLike, + shift: _ShapeLike, + axis: _ShapeLike | None = ..., +) -> NDArray[Any]: ... + +def rollaxis( + a: NDArray[_ScalarT], + axis: int, + start: int = ..., +) -> NDArray[_ScalarT]: ... + +def moveaxis( + a: NDArray[_ScalarT], + source: _ShapeLike, + destination: _ShapeLike, +) -> NDArray[_ScalarT]: ... + +@overload +def cross( + a: _ArrayLike[Never], + b: _ArrayLike[Never], + axisa: int = ..., + axisb: int = ..., + axisc: int = ..., + axis: int | None = ..., +) -> NDArray[Any]: ... +@overload +def cross( + a: _ArrayLikeBool_co, + b: _ArrayLikeBool_co, + axisa: int = ..., + axisb: int = ..., + axisc: int = ..., + axis: int | None = ..., +) -> NoReturn: ... +@overload +def cross( + a: _ArrayLikeUInt_co, + b: _ArrayLikeUInt_co, + axisa: int = ..., + axisb: int = ..., + axisc: int = ..., + axis: int | None = ..., +) -> NDArray[unsignedinteger]: ... +@overload +def cross( + a: _ArrayLikeInt_co, + b: _ArrayLikeInt_co, + axisa: int = ..., + axisb: int = ..., + axisc: int = ..., + axis: int | None = ..., +) -> NDArray[signedinteger]: ... +@overload +def cross( + a: _ArrayLikeFloat_co, + b: _ArrayLikeFloat_co, + axisa: int = ..., + axisb: int = ..., + axisc: int = ..., + axis: int | None = ..., +) -> NDArray[floating]: ... +@overload +def cross( + a: _ArrayLikeComplex_co, + b: _ArrayLikeComplex_co, + axisa: int = ..., + axisb: int = ..., + axisc: int = ..., + axis: int | None = ..., +) -> NDArray[complexfloating]: ... +@overload +def cross( + a: _ArrayLikeObject_co, + b: _ArrayLikeObject_co, + axisa: int = ..., + axisb: int = ..., + axisc: int = ..., + axis: int | None = ..., +) -> NDArray[object_]: ... + +@overload +def indices( + dimensions: Sequence[int], + dtype: type[int] = ..., + sparse: L[False] = ..., +) -> NDArray[int_]: ... +@overload +def indices( + dimensions: Sequence[int], + dtype: type[int], + sparse: L[True], +) -> tuple[NDArray[int_], ...]: ... +@overload +def indices( + dimensions: Sequence[int], + dtype: type[int] = ..., + *, + sparse: L[True], +) -> tuple[NDArray[int_], ...]: ... +@overload +def indices( + dimensions: Sequence[int], + dtype: _DTypeLike[_ScalarT], + sparse: L[False] = ..., +) -> NDArray[_ScalarT]: ... +@overload +def indices( + dimensions: Sequence[int], + dtype: _DTypeLike[_ScalarT], + sparse: L[True], +) -> tuple[NDArray[_ScalarT], ...]: ... +@overload +def indices( + dimensions: Sequence[int], + dtype: DTypeLike = ..., + sparse: L[False] = ..., +) -> NDArray[Any]: ... +@overload +def indices( + dimensions: Sequence[int], + dtype: DTypeLike, + sparse: L[True], +) -> tuple[NDArray[Any], ...]: ... +@overload +def indices( + dimensions: Sequence[int], + dtype: DTypeLike = ..., + *, + sparse: L[True], +) -> tuple[NDArray[Any], ...]: ... + +def fromfunction( + function: Callable[..., _T], + shape: Sequence[int], + *, + dtype: DTypeLike = ..., + like: _SupportsArrayFunc | None = ..., + **kwargs: Any, +) -> _T: ... + +def isscalar(element: object) -> TypeGuard[generic | complex | str | bytes | memoryview]: ... + +def binary_repr(num: SupportsIndex, width: int | None = ...) -> str: ... + +def base_repr( + number: SupportsAbs[float], + base: float = ..., + padding: SupportsIndex | None = ..., +) -> str: ... + +@overload +def identity( + n: int, + dtype: None = ..., + *, + like: _SupportsArrayFunc | None = ..., +) -> NDArray[float64]: ... +@overload +def identity( + n: int, + dtype: _DTypeLike[_ScalarT], + *, + like: _SupportsArrayFunc | None = ..., +) -> NDArray[_ScalarT]: ... +@overload +def identity( + n: int, + dtype: DTypeLike | None = ..., + *, + like: _SupportsArrayFunc | None = ..., +) -> NDArray[Any]: ... + +def allclose( + a: ArrayLike, + b: ArrayLike, + rtol: ArrayLike = ..., + atol: ArrayLike = ..., + equal_nan: bool = ..., +) -> bool: ... + +@overload +def isclose( + a: _ScalarLike_co, + b: _ScalarLike_co, + rtol: ArrayLike = ..., + atol: ArrayLike = ..., + equal_nan: bool = ..., +) -> np.bool: ... +@overload +def isclose( + a: ArrayLike, + b: ArrayLike, + rtol: ArrayLike = ..., + atol: ArrayLike = ..., + equal_nan: bool = ..., +) -> NDArray[np.bool]: ... + +def array_equal(a1: ArrayLike, a2: ArrayLike, equal_nan: bool = ...) -> bool: ... + +def array_equiv(a1: ArrayLike, a2: ArrayLike) -> bool: ... + +@overload +def astype( + x: ndarray[_ShapeT, dtype], + dtype: _DTypeLike[_ScalarT], + /, + *, + copy: bool = ..., + device: L["cpu"] | None = ..., +) -> ndarray[_ShapeT, dtype[_ScalarT]]: ... +@overload +def astype( + x: ndarray[_ShapeT, dtype], + dtype: DTypeLike, + /, + *, + copy: bool = ..., + device: L["cpu"] | None = ..., +) -> ndarray[_ShapeT, dtype]: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/numerictypes.py b/.venv/lib/python3.12/site-packages/numpy/_core/numerictypes.py new file mode 100644 index 0000000..135dc1b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/numerictypes.py @@ -0,0 +1,633 @@ +""" +numerictypes: Define the numeric type objects + +This module is designed so "from numerictypes import \\*" is safe. +Exported symbols include: + + Dictionary with all registered number types (including aliases): + sctypeDict + + Type objects (not all will be available, depends on platform): + see variable sctypes for which ones you have + + Bit-width names + + int8 int16 int32 int64 + uint8 uint16 uint32 uint64 + float16 float32 float64 float96 float128 + complex64 complex128 complex192 complex256 + datetime64 timedelta64 + + c-based names + + bool + + object_ + + void, str_ + + byte, ubyte, + short, ushort + intc, uintc, + intp, uintp, + int_, uint, + longlong, ulonglong, + + single, csingle, + double, cdouble, + longdouble, clongdouble, + + As part of the type-hierarchy: xx -- is bit-width + + generic + +-> bool (kind=b) + +-> number + | +-> integer + | | +-> signedinteger (intxx) (kind=i) + | | | byte + | | | short + | | | intc + | | | intp + | | | int_ + | | | longlong + | | \\-> unsignedinteger (uintxx) (kind=u) + | | ubyte + | | ushort + | | uintc + | | uintp + | | uint + | | ulonglong + | +-> inexact + | +-> floating (floatxx) (kind=f) + | | half + | | single + | | double + | | longdouble + | \\-> complexfloating (complexxx) (kind=c) + | csingle + | cdouble + | clongdouble + +-> flexible + | +-> character + | | bytes_ (kind=S) + | | str_ (kind=U) + | | + | \\-> void (kind=V) + \\-> object_ (not used much) (kind=O) + +""" +import numbers +import warnings + +from numpy._utils import set_module + +from . import multiarray as ma +from .multiarray import ( + busday_count, + busday_offset, + busdaycalendar, + datetime_as_string, + datetime_data, + dtype, + is_busday, + ndarray, +) + +# we add more at the bottom +__all__ = [ + 'ScalarType', 'typecodes', 'issubdtype', 'datetime_data', + 'datetime_as_string', 'busday_offset', 'busday_count', + 'is_busday', 'busdaycalendar', 'isdtype' +] + +# we don't need all these imports, but we need to keep them for compatibility +# for users using np._core.numerictypes.UPPER_TABLE +# we don't export these for import *, but we do want them accessible +# as numerictypes.bool, etc. +from builtins import bool, bytes, complex, float, int, object, str # noqa: F401, UP029 + +from ._dtype import _kind_name +from ._string_helpers import ( # noqa: F401 + LOWER_TABLE, + UPPER_TABLE, + english_capitalize, + english_lower, + english_upper, +) +from ._type_aliases import allTypes, sctypeDict, sctypes + +# We use this later +generic = allTypes['generic'] + +genericTypeRank = ['bool', 'int8', 'uint8', 'int16', 'uint16', + 'int32', 'uint32', 'int64', 'uint64', + 'float16', 'float32', 'float64', 'float96', 'float128', + 'complex64', 'complex128', 'complex192', 'complex256', + 'object'] + +@set_module('numpy') +def maximum_sctype(t): + """ + Return the scalar type of highest precision of the same kind as the input. + + .. deprecated:: 2.0 + Use an explicit dtype like int64 or float64 instead. + + Parameters + ---------- + t : dtype or dtype specifier + The input data type. This can be a `dtype` object or an object that + is convertible to a `dtype`. + + Returns + ------- + out : dtype + The highest precision data type of the same kind (`dtype.kind`) as `t`. + + See Also + -------- + obj2sctype, mintypecode, sctype2char + dtype + + Examples + -------- + >>> from numpy._core.numerictypes import maximum_sctype + >>> maximum_sctype(int) + + >>> maximum_sctype(np.uint8) + + >>> maximum_sctype(complex) + # may vary + + >>> maximum_sctype(str) + + + >>> maximum_sctype('i2') + + >>> maximum_sctype('f4') + # may vary + + """ + + # Deprecated in NumPy 2.0, 2023-07-11 + warnings.warn( + "`maximum_sctype` is deprecated. Use an explicit dtype like int64 " + "or float64 instead. (deprecated in NumPy 2.0)", + DeprecationWarning, + stacklevel=2 + ) + + g = obj2sctype(t) + if g is None: + return t + t = g + base = _kind_name(dtype(t)) + if base in sctypes: + return sctypes[base][-1] + else: + return t + + +@set_module('numpy') +def issctype(rep): + """ + Determines whether the given object represents a scalar data-type. + + Parameters + ---------- + rep : any + If `rep` is an instance of a scalar dtype, True is returned. If not, + False is returned. + + Returns + ------- + out : bool + Boolean result of check whether `rep` is a scalar dtype. + + See Also + -------- + issubsctype, issubdtype, obj2sctype, sctype2char + + Examples + -------- + >>> from numpy._core.numerictypes import issctype + >>> issctype(np.int32) + True + >>> issctype(list) + False + >>> issctype(1.1) + False + + Strings are also a scalar type: + + >>> issctype(np.dtype('str')) + True + + """ + if not isinstance(rep, (type, dtype)): + return False + try: + res = obj2sctype(rep) + if res and res != object_: + return True + else: + return False + except Exception: + return False + + +def obj2sctype(rep, default=None): + """ + Return the scalar dtype or NumPy equivalent of Python type of an object. + + Parameters + ---------- + rep : any + The object of which the type is returned. + default : any, optional + If given, this is returned for objects whose types can not be + determined. If not given, None is returned for those objects. + + Returns + ------- + dtype : dtype or Python type + The data type of `rep`. + + See Also + -------- + sctype2char, issctype, issubsctype, issubdtype + + Examples + -------- + >>> from numpy._core.numerictypes import obj2sctype + >>> obj2sctype(np.int32) + + >>> obj2sctype(np.array([1., 2.])) + + >>> obj2sctype(np.array([1.j])) + + + >>> obj2sctype(dict) + + >>> obj2sctype('string') + + >>> obj2sctype(1, default=list) + + + """ + # prevent abstract classes being upcast + if isinstance(rep, type) and issubclass(rep, generic): + return rep + # extract dtype from arrays + if isinstance(rep, ndarray): + return rep.dtype.type + # fall back on dtype to convert + try: + res = dtype(rep) + except Exception: + return default + else: + return res.type + + +@set_module('numpy') +def issubclass_(arg1, arg2): + """ + Determine if a class is a subclass of a second class. + + `issubclass_` is equivalent to the Python built-in ``issubclass``, + except that it returns False instead of raising a TypeError if one + of the arguments is not a class. + + Parameters + ---------- + arg1 : class + Input class. True is returned if `arg1` is a subclass of `arg2`. + arg2 : class or tuple of classes. + Input class. If a tuple of classes, True is returned if `arg1` is a + subclass of any of the tuple elements. + + Returns + ------- + out : bool + Whether `arg1` is a subclass of `arg2` or not. + + See Also + -------- + issubsctype, issubdtype, issctype + + Examples + -------- + >>> np.issubclass_(np.int32, int) + False + >>> np.issubclass_(np.int32, float) + False + >>> np.issubclass_(np.float64, float) + True + + """ + try: + return issubclass(arg1, arg2) + except TypeError: + return False + + +@set_module('numpy') +def issubsctype(arg1, arg2): + """ + Determine if the first argument is a subclass of the second argument. + + Parameters + ---------- + arg1, arg2 : dtype or dtype specifier + Data-types. + + Returns + ------- + out : bool + The result. + + See Also + -------- + issctype, issubdtype, obj2sctype + + Examples + -------- + >>> from numpy._core import issubsctype + >>> issubsctype('S8', str) + False + >>> issubsctype(np.array([1]), int) + True + >>> issubsctype(np.array([1]), float) + False + + """ + return issubclass(obj2sctype(arg1), obj2sctype(arg2)) + + +class _PreprocessDTypeError(Exception): + pass + + +def _preprocess_dtype(dtype): + """ + Preprocess dtype argument by: + 1. fetching type from a data type + 2. verifying that types are built-in NumPy dtypes + """ + if isinstance(dtype, ma.dtype): + dtype = dtype.type + if isinstance(dtype, ndarray) or dtype not in allTypes.values(): + raise _PreprocessDTypeError + return dtype + + +@set_module('numpy') +def isdtype(dtype, kind): + """ + Determine if a provided dtype is of a specified data type ``kind``. + + This function only supports built-in NumPy's data types. + Third-party dtypes are not yet supported. + + Parameters + ---------- + dtype : dtype + The input dtype. + kind : dtype or str or tuple of dtypes/strs. + dtype or dtype kind. Allowed dtype kinds are: + * ``'bool'`` : boolean kind + * ``'signed integer'`` : signed integer data types + * ``'unsigned integer'`` : unsigned integer data types + * ``'integral'`` : integer data types + * ``'real floating'`` : real-valued floating-point data types + * ``'complex floating'`` : complex floating-point data types + * ``'numeric'`` : numeric data types + + Returns + ------- + out : bool + + See Also + -------- + issubdtype + + Examples + -------- + >>> import numpy as np + >>> np.isdtype(np.float32, np.float64) + False + >>> np.isdtype(np.float32, "real floating") + True + >>> np.isdtype(np.complex128, ("real floating", "complex floating")) + True + + """ + try: + dtype = _preprocess_dtype(dtype) + except _PreprocessDTypeError: + raise TypeError( + "dtype argument must be a NumPy dtype, " + f"but it is a {type(dtype)}." + ) from None + + input_kinds = kind if isinstance(kind, tuple) else (kind,) + + processed_kinds = set() + + for kind in input_kinds: + if kind == "bool": + processed_kinds.add(allTypes["bool"]) + elif kind == "signed integer": + processed_kinds.update(sctypes["int"]) + elif kind == "unsigned integer": + processed_kinds.update(sctypes["uint"]) + elif kind == "integral": + processed_kinds.update(sctypes["int"] + sctypes["uint"]) + elif kind == "real floating": + processed_kinds.update(sctypes["float"]) + elif kind == "complex floating": + processed_kinds.update(sctypes["complex"]) + elif kind == "numeric": + processed_kinds.update( + sctypes["int"] + sctypes["uint"] + + sctypes["float"] + sctypes["complex"] + ) + elif isinstance(kind, str): + raise ValueError( + "kind argument is a string, but" + f" {kind!r} is not a known kind name." + ) + else: + try: + kind = _preprocess_dtype(kind) + except _PreprocessDTypeError: + raise TypeError( + "kind argument must be comprised of " + "NumPy dtypes or strings only, " + f"but is a {type(kind)}." + ) from None + processed_kinds.add(kind) + + return dtype in processed_kinds + + +@set_module('numpy') +def issubdtype(arg1, arg2): + r""" + Returns True if first argument is a typecode lower/equal in type hierarchy. + + This is like the builtin :func:`issubclass`, but for `dtype`\ s. + + Parameters + ---------- + arg1, arg2 : dtype_like + `dtype` or object coercible to one + + Returns + ------- + out : bool + + See Also + -------- + :ref:`arrays.scalars` : Overview of the numpy type hierarchy. + + Examples + -------- + `issubdtype` can be used to check the type of arrays: + + >>> ints = np.array([1, 2, 3], dtype=np.int32) + >>> np.issubdtype(ints.dtype, np.integer) + True + >>> np.issubdtype(ints.dtype, np.floating) + False + + >>> floats = np.array([1, 2, 3], dtype=np.float32) + >>> np.issubdtype(floats.dtype, np.integer) + False + >>> np.issubdtype(floats.dtype, np.floating) + True + + Similar types of different sizes are not subdtypes of each other: + + >>> np.issubdtype(np.float64, np.float32) + False + >>> np.issubdtype(np.float32, np.float64) + False + + but both are subtypes of `floating`: + + >>> np.issubdtype(np.float64, np.floating) + True + >>> np.issubdtype(np.float32, np.floating) + True + + For convenience, dtype-like objects are allowed too: + + >>> np.issubdtype('S1', np.bytes_) + True + >>> np.issubdtype('i4', np.signedinteger) + True + + """ + if not issubclass_(arg1, generic): + arg1 = dtype(arg1).type + if not issubclass_(arg2, generic): + arg2 = dtype(arg2).type + + return issubclass(arg1, arg2) + + +@set_module('numpy') +def sctype2char(sctype): + """ + Return the string representation of a scalar dtype. + + Parameters + ---------- + sctype : scalar dtype or object + If a scalar dtype, the corresponding string character is + returned. If an object, `sctype2char` tries to infer its scalar type + and then return the corresponding string character. + + Returns + ------- + typechar : str + The string character corresponding to the scalar type. + + Raises + ------ + ValueError + If `sctype` is an object for which the type can not be inferred. + + See Also + -------- + obj2sctype, issctype, issubsctype, mintypecode + + Examples + -------- + >>> from numpy._core.numerictypes import sctype2char + >>> for sctype in [np.int32, np.double, np.cdouble, np.bytes_, np.ndarray]: + ... print(sctype2char(sctype)) + l # may vary + d + D + S + O + + >>> x = np.array([1., 2-1.j]) + >>> sctype2char(x) + 'D' + >>> sctype2char(list) + 'O' + + """ + sctype = obj2sctype(sctype) + if sctype is None: + raise ValueError("unrecognized type") + if sctype not in sctypeDict.values(): + # for compatibility + raise KeyError(sctype) + return dtype(sctype).char + + +def _scalar_type_key(typ): + """A ``key`` function for `sorted`.""" + dt = dtype(typ) + return (dt.kind.lower(), dt.itemsize) + + +ScalarType = [int, float, complex, bool, bytes, str, memoryview] +ScalarType += sorted(set(sctypeDict.values()), key=_scalar_type_key) +ScalarType = tuple(ScalarType) + + +# Now add the types we've determined to this module +for key in allTypes: + globals()[key] = allTypes[key] + __all__.append(key) + +del key + +typecodes = {'Character': 'c', + 'Integer': 'bhilqnp', + 'UnsignedInteger': 'BHILQNP', + 'Float': 'efdg', + 'Complex': 'FDG', + 'AllInteger': 'bBhHiIlLqQnNpP', + 'AllFloat': 'efdgFDG', + 'Datetime': 'Mm', + 'All': '?bhilqnpBHILQNPefdgFDGSUVOMm'} + +# backwards compatibility --- deprecated name +# Formal deprecation: Numpy 1.20.0, 2020-10-19 (see numpy/__init__.py) +typeDict = sctypeDict + +def _register_types(): + numbers.Integral.register(integer) + numbers.Complex.register(inexact) + numbers.Real.register(floating) + numbers.Number.register(number) + + +_register_types() diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/numerictypes.pyi b/.venv/lib/python3.12/site-packages/numpy/_core/numerictypes.pyi new file mode 100644 index 0000000..753fe34 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/numerictypes.pyi @@ -0,0 +1,192 @@ +import builtins +from typing import Any, TypedDict, type_check_only +from typing import Literal as L + +import numpy as np +from numpy import ( + bool, + bool_, + byte, + bytes_, + cdouble, + character, + clongdouble, + complex64, + complex128, + complexfloating, + csingle, + datetime64, + double, + dtype, + flexible, + float16, + float32, + float64, + floating, + generic, + half, + inexact, + int8, + int16, + int32, + int64, + int_, + intc, + integer, + intp, + long, + longdouble, + longlong, + number, + object_, + short, + signedinteger, + single, + str_, + timedelta64, + ubyte, + uint, + uint8, + uint16, + uint32, + uint64, + uintc, + uintp, + ulong, + ulonglong, + unsignedinteger, + ushort, + void, +) +from numpy._typing import DTypeLike +from numpy._typing._extended_precision import complex192, complex256, float96, float128 + +from ._type_aliases import sctypeDict # noqa: F401 +from .multiarray import ( + busday_count, + busday_offset, + busdaycalendar, + datetime_as_string, + datetime_data, + is_busday, +) + +__all__ = [ + "ScalarType", + "typecodes", + "issubdtype", + "datetime_data", + "datetime_as_string", + "busday_offset", + "busday_count", + "is_busday", + "busdaycalendar", + "isdtype", + "generic", + "unsignedinteger", + "character", + "inexact", + "number", + "integer", + "flexible", + "complexfloating", + "signedinteger", + "floating", + "bool", + "float16", + "float32", + "float64", + "longdouble", + "complex64", + "complex128", + "clongdouble", + "bytes_", + "str_", + "void", + "object_", + "datetime64", + "timedelta64", + "int8", + "byte", + "uint8", + "ubyte", + "int16", + "short", + "uint16", + "ushort", + "int32", + "intc", + "uint32", + "uintc", + "int64", + "long", + "uint64", + "ulong", + "longlong", + "ulonglong", + "intp", + "uintp", + "double", + "cdouble", + "single", + "csingle", + "half", + "bool_", + "int_", + "uint", + "float96", + "float128", + "complex192", + "complex256", +] + +@type_check_only +class _TypeCodes(TypedDict): + Character: L['c'] + Integer: L['bhilqnp'] + UnsignedInteger: L['BHILQNP'] + Float: L['efdg'] + Complex: L['FDG'] + AllInteger: L['bBhHiIlLqQnNpP'] + AllFloat: L['efdgFDG'] + Datetime: L['Mm'] + All: L['?bhilqnpBHILQNPefdgFDGSUVOMm'] + +def isdtype(dtype: dtype | type[Any], kind: DTypeLike | tuple[DTypeLike, ...]) -> builtins.bool: ... + +def issubdtype(arg1: DTypeLike, arg2: DTypeLike) -> builtins.bool: ... + +typecodes: _TypeCodes +ScalarType: tuple[ + type[int], + type[float], + type[complex], + type[builtins.bool], + type[bytes], + type[str], + type[memoryview], + type[np.bool], + type[csingle], + type[cdouble], + type[clongdouble], + type[half], + type[single], + type[double], + type[longdouble], + type[byte], + type[short], + type[intc], + type[long], + type[longlong], + type[timedelta64], + type[datetime64], + type[object_], + type[bytes_], + type[str_], + type[ubyte], + type[ushort], + type[uintc], + type[ulong], + type[ulonglong], + type[void], +] diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/overrides.py b/.venv/lib/python3.12/site-packages/numpy/_core/overrides.py new file mode 100644 index 0000000..6414710 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/overrides.py @@ -0,0 +1,183 @@ +"""Implementation of __array_function__ overrides from NEP-18.""" +import collections +import functools + +from numpy._core._multiarray_umath import ( + _ArrayFunctionDispatcher, + _get_implementing_args, + add_docstring, +) +from numpy._utils import set_module # noqa: F401 +from numpy._utils._inspect import getargspec + +ARRAY_FUNCTIONS = set() + +array_function_like_doc = ( + """like : array_like, optional + Reference object to allow the creation of arrays which are not + NumPy arrays. If an array-like passed in as ``like`` supports + the ``__array_function__`` protocol, the result will be defined + by it. In this case, it ensures the creation of an array object + compatible with that passed in via this argument.""" +) + +def get_array_function_like_doc(public_api, docstring_template=""): + ARRAY_FUNCTIONS.add(public_api) + docstring = public_api.__doc__ or docstring_template + return docstring.replace("${ARRAY_FUNCTION_LIKE}", array_function_like_doc) + +def finalize_array_function_like(public_api): + public_api.__doc__ = get_array_function_like_doc(public_api) + return public_api + + +add_docstring( + _ArrayFunctionDispatcher, + """ + Class to wrap functions with checks for __array_function__ overrides. + + All arguments are required, and can only be passed by position. + + Parameters + ---------- + dispatcher : function or None + The dispatcher function that returns a single sequence-like object + of all arguments relevant. It must have the same signature (except + the default values) as the actual implementation. + If ``None``, this is a ``like=`` dispatcher and the + ``_ArrayFunctionDispatcher`` must be called with ``like`` as the + first (additional and positional) argument. + implementation : function + Function that implements the operation on NumPy arrays without + overrides. Arguments passed calling the ``_ArrayFunctionDispatcher`` + will be forwarded to this (and the ``dispatcher``) as if using + ``*args, **kwargs``. + + Attributes + ---------- + _implementation : function + The original implementation passed in. + """) + + +# exposed for testing purposes; used internally by _ArrayFunctionDispatcher +add_docstring( + _get_implementing_args, + """ + Collect arguments on which to call __array_function__. + + Parameters + ---------- + relevant_args : iterable of array-like + Iterable of possibly array-like arguments to check for + __array_function__ methods. + + Returns + ------- + Sequence of arguments with __array_function__ methods, in the order in + which they should be called. + """) + + +ArgSpec = collections.namedtuple('ArgSpec', 'args varargs keywords defaults') + + +def verify_matching_signatures(implementation, dispatcher): + """Verify that a dispatcher function has the right signature.""" + implementation_spec = ArgSpec(*getargspec(implementation)) + dispatcher_spec = ArgSpec(*getargspec(dispatcher)) + + if (implementation_spec.args != dispatcher_spec.args or + implementation_spec.varargs != dispatcher_spec.varargs or + implementation_spec.keywords != dispatcher_spec.keywords or + (bool(implementation_spec.defaults) != + bool(dispatcher_spec.defaults)) or + (implementation_spec.defaults is not None and + len(implementation_spec.defaults) != + len(dispatcher_spec.defaults))): + raise RuntimeError('implementation and dispatcher for %s have ' + 'different function signatures' % implementation) + + if implementation_spec.defaults is not None: + if dispatcher_spec.defaults != (None,) * len(dispatcher_spec.defaults): + raise RuntimeError('dispatcher functions can only use None for ' + 'default argument values') + + +def array_function_dispatch(dispatcher=None, module=None, verify=True, + docs_from_dispatcher=False): + """Decorator for adding dispatch with the __array_function__ protocol. + + See NEP-18 for example usage. + + Parameters + ---------- + dispatcher : callable or None + Function that when called like ``dispatcher(*args, **kwargs)`` with + arguments from the NumPy function call returns an iterable of + array-like arguments to check for ``__array_function__``. + + If `None`, the first argument is used as the single `like=` argument + and not passed on. A function implementing `like=` must call its + dispatcher with `like` as the first non-keyword argument. + module : str, optional + __module__ attribute to set on new function, e.g., ``module='numpy'``. + By default, module is copied from the decorated function. + verify : bool, optional + If True, verify the that the signature of the dispatcher and decorated + function signatures match exactly: all required and optional arguments + should appear in order with the same names, but the default values for + all optional arguments should be ``None``. Only disable verification + if the dispatcher's signature needs to deviate for some particular + reason, e.g., because the function has a signature like + ``func(*args, **kwargs)``. + docs_from_dispatcher : bool, optional + If True, copy docs from the dispatcher function onto the dispatched + function, rather than from the implementation. This is useful for + functions defined in C, which otherwise don't have docstrings. + + Returns + ------- + Function suitable for decorating the implementation of a NumPy function. + + """ + def decorator(implementation): + if verify: + if dispatcher is not None: + verify_matching_signatures(implementation, dispatcher) + else: + # Using __code__ directly similar to verify_matching_signature + co = implementation.__code__ + last_arg = co.co_argcount + co.co_kwonlyargcount - 1 + last_arg = co.co_varnames[last_arg] + if last_arg != "like" or co.co_kwonlyargcount == 0: + raise RuntimeError( + "__array_function__ expects `like=` to be the last " + "argument and a keyword-only argument. " + f"{implementation} does not seem to comply.") + + if docs_from_dispatcher: + add_docstring(implementation, dispatcher.__doc__) + + public_api = _ArrayFunctionDispatcher(dispatcher, implementation) + public_api = functools.wraps(implementation)(public_api) + + if module is not None: + public_api.__module__ = module + + ARRAY_FUNCTIONS.add(public_api) + + return public_api + + return decorator + + +def array_function_from_dispatcher( + implementation, module=None, verify=True, docs_from_dispatcher=True): + """Like array_function_dispatcher, but with function arguments flipped.""" + + def decorator(dispatcher): + return array_function_dispatch( + dispatcher, module, verify=verify, + docs_from_dispatcher=docs_from_dispatcher)(implementation) + return decorator diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/overrides.pyi b/.venv/lib/python3.12/site-packages/numpy/_core/overrides.pyi new file mode 100644 index 0000000..0545319 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/overrides.pyi @@ -0,0 +1,48 @@ +from collections.abc import Callable, Iterable +from typing import Any, Final, NamedTuple, ParamSpec, TypeVar + +from numpy._typing import _SupportsArrayFunc + +_T = TypeVar("_T") +_Tss = ParamSpec("_Tss") +_FuncT = TypeVar("_FuncT", bound=Callable[..., object]) + +### + +ARRAY_FUNCTIONS: set[Callable[..., Any]] = ... +array_function_like_doc: Final[str] = ... + +class ArgSpec(NamedTuple): + args: list[str] + varargs: str | None + keywords: str | None + defaults: tuple[Any, ...] + +def get_array_function_like_doc(public_api: Callable[..., Any], docstring_template: str = "") -> str: ... +def finalize_array_function_like(public_api: _FuncT) -> _FuncT: ... + +# +def verify_matching_signatures( + implementation: Callable[_Tss, object], + dispatcher: Callable[_Tss, Iterable[_SupportsArrayFunc]], +) -> None: ... + +# NOTE: This actually returns a `_ArrayFunctionDispatcher` callable wrapper object, with +# the original wrapped callable stored in the `._implementation` attribute. It checks +# for any `__array_function__` of the values of specific arguments that the dispatcher +# specifies. Since the dispatcher only returns an iterable of passed array-like args, +# this overridable behaviour is impossible to annotate. +def array_function_dispatch( + dispatcher: Callable[_Tss, Iterable[_SupportsArrayFunc]] | None = None, + module: str | None = None, + verify: bool = True, + docs_from_dispatcher: bool = False, +) -> Callable[[_FuncT], _FuncT]: ... + +# +def array_function_from_dispatcher( + implementation: Callable[_Tss, _T], + module: str | None = None, + verify: bool = True, + docs_from_dispatcher: bool = True, +) -> Callable[[Callable[_Tss, Iterable[_SupportsArrayFunc]]], Callable[_Tss, _T]]: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/printoptions.py b/.venv/lib/python3.12/site-packages/numpy/_core/printoptions.py new file mode 100644 index 0000000..5d6f963 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/printoptions.py @@ -0,0 +1,32 @@ +""" +Stores and defines the low-level format_options context variable. + +This is defined in its own file outside of the arrayprint module +so we can import it from C while initializing the multiarray +C module during import without introducing circular dependencies. +""" + +import sys +from contextvars import ContextVar + +__all__ = ["format_options"] + +default_format_options_dict = { + "edgeitems": 3, # repr N leading and trailing items of each dimension + "threshold": 1000, # total items > triggers array summarization + "floatmode": "maxprec", + "precision": 8, # precision of floating point representations + "suppress": False, # suppress printing small floating values in exp format + "linewidth": 75, + "nanstr": "nan", + "infstr": "inf", + "sign": "-", + "formatter": None, + # Internally stored as an int to simplify comparisons; converted from/to + # str/False on the way in/out. + 'legacy': sys.maxsize, + 'override_repr': None, +} + +format_options = ContextVar( + "format_options", default=default_format_options_dict) diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/printoptions.pyi b/.venv/lib/python3.12/site-packages/numpy/_core/printoptions.pyi new file mode 100644 index 0000000..bd7c7b4 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/printoptions.pyi @@ -0,0 +1,28 @@ +from collections.abc import Callable +from contextvars import ContextVar +from typing import Any, Final, TypedDict + +from .arrayprint import _FormatDict + +__all__ = ["format_options"] + +### + +class _FormatOptionsDict(TypedDict): + edgeitems: int + threshold: int + floatmode: str + precision: int + suppress: bool + linewidth: int + nanstr: str + infstr: str + sign: str + formatter: _FormatDict | None + legacy: int + override_repr: Callable[[Any], str] | None + +### + +default_format_options_dict: Final[_FormatOptionsDict] = ... +format_options: ContextVar[_FormatOptionsDict] diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/records.py b/.venv/lib/python3.12/site-packages/numpy/_core/records.py new file mode 100644 index 0000000..39bcf4b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/records.py @@ -0,0 +1,1089 @@ +""" +This module contains a set of functions for record arrays. +""" +import os +import warnings +from collections import Counter +from contextlib import nullcontext + +from numpy._utils import set_module + +from . import numeric as sb +from . import numerictypes as nt +from .arrayprint import _get_legacy_print_mode + +# All of the functions allow formats to be a dtype +__all__ = [ + 'record', 'recarray', 'format_parser', 'fromarrays', 'fromrecords', + 'fromstring', 'fromfile', 'array', 'find_duplicate', +] + + +ndarray = sb.ndarray + +_byteorderconv = {'b': '>', + 'l': '<', + 'n': '=', + 'B': '>', + 'L': '<', + 'N': '=', + 'S': 's', + 's': 's', + '>': '>', + '<': '<', + '=': '=', + '|': '|', + 'I': '|', + 'i': '|'} + +# formats regular expression +# allows multidimensional spec with a tuple syntax in front +# of the letter code '(2,3)f4' and ' ( 2 , 3 ) f4 ' +# are equally allowed + +numfmt = nt.sctypeDict + + +@set_module('numpy.rec') +def find_duplicate(list): + """Find duplication in a list, return a list of duplicated elements""" + return [ + item + for item, counts in Counter(list).items() + if counts > 1 + ] + + +@set_module('numpy.rec') +class format_parser: + """ + Class to convert formats, names, titles description to a dtype. + + After constructing the format_parser object, the dtype attribute is + the converted data-type: + ``dtype = format_parser(formats, names, titles).dtype`` + + Attributes + ---------- + dtype : dtype + The converted data-type. + + Parameters + ---------- + formats : str or list of str + The format description, either specified as a string with + comma-separated format descriptions in the form ``'f8, i4, S5'``, or + a list of format description strings in the form + ``['f8', 'i4', 'S5']``. + names : str or list/tuple of str + The field names, either specified as a comma-separated string in the + form ``'col1, col2, col3'``, or as a list or tuple of strings in the + form ``['col1', 'col2', 'col3']``. + An empty list can be used, in that case default field names + ('f0', 'f1', ...) are used. + titles : sequence + Sequence of title strings. An empty list can be used to leave titles + out. + aligned : bool, optional + If True, align the fields by padding as the C-compiler would. + Default is False. + byteorder : str, optional + If specified, all the fields will be changed to the + provided byte-order. Otherwise, the default byte-order is + used. For all available string specifiers, see `dtype.newbyteorder`. + + See Also + -------- + numpy.dtype, numpy.typename + + Examples + -------- + >>> import numpy as np + >>> np.rec.format_parser(['>> np.rec.format_parser(['f8', 'i4', 'a5'], ['col1', 'col2', 'col3'], + ... []).dtype + dtype([('col1', '>> np.rec.format_parser([' len(titles): + self._titles += [None] * (self._nfields - len(titles)) + + def _createdtype(self, byteorder): + dtype = sb.dtype({ + 'names': self._names, + 'formats': self._f_formats, + 'offsets': self._offsets, + 'titles': self._titles, + }) + if byteorder is not None: + byteorder = _byteorderconv[byteorder[0]] + dtype = dtype.newbyteorder(byteorder) + + self.dtype = dtype + + +class record(nt.void): + """A data-type scalar that allows field access as attribute lookup. + """ + + # manually set name and module so that this class's type shows up + # as numpy.record when printed + __name__ = 'record' + __module__ = 'numpy' + + def __repr__(self): + if _get_legacy_print_mode() <= 113: + return self.__str__() + return super().__repr__() + + def __str__(self): + if _get_legacy_print_mode() <= 113: + return str(self.item()) + return super().__str__() + + def __getattribute__(self, attr): + if attr in ('setfield', 'getfield', 'dtype'): + return nt.void.__getattribute__(self, attr) + try: + return nt.void.__getattribute__(self, attr) + except AttributeError: + pass + fielddict = nt.void.__getattribute__(self, 'dtype').fields + res = fielddict.get(attr, None) + if res: + obj = self.getfield(*res[:2]) + # if it has fields return a record, + # otherwise return the object + try: + dt = obj.dtype + except AttributeError: + # happens if field is Object type + return obj + if dt.names is not None: + return obj.view((self.__class__, obj.dtype)) + return obj + else: + raise AttributeError(f"'record' object has no attribute '{attr}'") + + def __setattr__(self, attr, val): + if attr in ('setfield', 'getfield', 'dtype'): + raise AttributeError(f"Cannot set '{attr}' attribute") + fielddict = nt.void.__getattribute__(self, 'dtype').fields + res = fielddict.get(attr, None) + if res: + return self.setfield(val, *res[:2]) + elif getattr(self, attr, None): + return nt.void.__setattr__(self, attr, val) + else: + raise AttributeError(f"'record' object has no attribute '{attr}'") + + def __getitem__(self, indx): + obj = nt.void.__getitem__(self, indx) + + # copy behavior of record.__getattribute__, + if isinstance(obj, nt.void) and obj.dtype.names is not None: + return obj.view((self.__class__, obj.dtype)) + else: + # return a single element + return obj + + def pprint(self): + """Pretty-print all fields.""" + # pretty-print all fields + names = self.dtype.names + maxlen = max(len(name) for name in names) + fmt = '%% %ds: %%s' % maxlen + rows = [fmt % (name, getattr(self, name)) for name in names] + return "\n".join(rows) + +# The recarray is almost identical to a standard array (which supports +# named fields already) The biggest difference is that it can use +# attribute-lookup to find the fields and it is constructed using +# a record. + +# If byteorder is given it forces a particular byteorder on all +# the fields (and any subfields) + + +@set_module("numpy.rec") +class recarray(ndarray): + """Construct an ndarray that allows field access using attributes. + + Arrays may have a data-types containing fields, analogous + to columns in a spread sheet. An example is ``[(x, int), (y, float)]``, + where each entry in the array is a pair of ``(int, float)``. Normally, + these attributes are accessed using dictionary lookups such as ``arr['x']`` + and ``arr['y']``. Record arrays allow the fields to be accessed as members + of the array, using ``arr.x`` and ``arr.y``. + + Parameters + ---------- + shape : tuple + Shape of output array. + dtype : data-type, optional + The desired data-type. By default, the data-type is determined + from `formats`, `names`, `titles`, `aligned` and `byteorder`. + formats : list of data-types, optional + A list containing the data-types for the different columns, e.g. + ``['i4', 'f8', 'i4']``. `formats` does *not* support the new + convention of using types directly, i.e. ``(int, float, int)``. + Note that `formats` must be a list, not a tuple. + Given that `formats` is somewhat limited, we recommend specifying + `dtype` instead. + names : tuple of str, optional + The name of each column, e.g. ``('x', 'y', 'z')``. + buf : buffer, optional + By default, a new array is created of the given shape and data-type. + If `buf` is specified and is an object exposing the buffer interface, + the array will use the memory from the existing buffer. In this case, + the `offset` and `strides` keywords are available. + + Other Parameters + ---------------- + titles : tuple of str, optional + Aliases for column names. For example, if `names` were + ``('x', 'y', 'z')`` and `titles` is + ``('x_coordinate', 'y_coordinate', 'z_coordinate')``, then + ``arr['x']`` is equivalent to both ``arr.x`` and ``arr.x_coordinate``. + byteorder : {'<', '>', '='}, optional + Byte-order for all fields. + aligned : bool, optional + Align the fields in memory as the C-compiler would. + strides : tuple of ints, optional + Buffer (`buf`) is interpreted according to these strides (strides + define how many bytes each array element, row, column, etc. + occupy in memory). + offset : int, optional + Start reading buffer (`buf`) from this offset onwards. + order : {'C', 'F'}, optional + Row-major (C-style) or column-major (Fortran-style) order. + + Returns + ------- + rec : recarray + Empty array of the given shape and type. + + See Also + -------- + numpy.rec.fromrecords : Construct a record array from data. + numpy.record : fundamental data-type for `recarray`. + numpy.rec.format_parser : determine data-type from formats, names, titles. + + Notes + ----- + This constructor can be compared to ``empty``: it creates a new record + array but does not fill it with data. To create a record array from data, + use one of the following methods: + + 1. Create a standard ndarray and convert it to a record array, + using ``arr.view(np.recarray)`` + 2. Use the `buf` keyword. + 3. Use `np.rec.fromrecords`. + + Examples + -------- + Create an array with two fields, ``x`` and ``y``: + + >>> import numpy as np + >>> x = np.array([(1.0, 2), (3.0, 4)], dtype=[('x', '>> x + array([(1., 2), (3., 4)], dtype=[('x', '>> x['x'] + array([1., 3.]) + + View the array as a record array: + + >>> x = x.view(np.recarray) + + >>> x.x + array([1., 3.]) + + >>> x.y + array([2, 4]) + + Create a new, empty record array: + + >>> np.recarray((2,), + ... dtype=[('x', int), ('y', float), ('z', int)]) #doctest: +SKIP + rec.array([(-1073741821, 1.2249118382103472e-301, 24547520), + (3471280, 1.2134086255804012e-316, 0)], + dtype=[('x', ' 0 or self.shape == (0,): + lst = sb.array2string( + self, separator=', ', prefix=prefix, suffix=',') + else: + # show zero-length shape unless it is (0,) + lst = f"[], shape={repr(self.shape)}" + + lf = '\n' + ' ' * len(prefix) + if _get_legacy_print_mode() <= 113: + lf = ' ' + lf # trailing space + return fmt % (lst, lf, repr_dtype) + + def field(self, attr, val=None): + if isinstance(attr, int): + names = ndarray.__getattribute__(self, 'dtype').names + attr = names[attr] + + fielddict = ndarray.__getattribute__(self, 'dtype').fields + + res = fielddict[attr][:2] + + if val is None: + obj = self.getfield(*res) + if obj.dtype.names is not None: + return obj + return obj.view(ndarray) + else: + return self.setfield(val, *res) + + +def _deprecate_shape_0_as_None(shape): + if shape == 0: + warnings.warn( + "Passing `shape=0` to have the shape be inferred is deprecated, " + "and in future will be equivalent to `shape=(0,)`. To infer " + "the shape and suppress this warning, pass `shape=None` instead.", + FutureWarning, stacklevel=3) + return None + else: + return shape + + +@set_module("numpy.rec") +def fromarrays(arrayList, dtype=None, shape=None, formats=None, + names=None, titles=None, aligned=False, byteorder=None): + """Create a record array from a (flat) list of arrays + + Parameters + ---------- + arrayList : list or tuple + List of array-like objects (such as lists, tuples, + and ndarrays). + dtype : data-type, optional + valid dtype for all arrays + shape : int or tuple of ints, optional + Shape of the resulting array. If not provided, inferred from + ``arrayList[0]``. + formats, names, titles, aligned, byteorder : + If `dtype` is ``None``, these arguments are passed to + `numpy.rec.format_parser` to construct a dtype. See that function for + detailed documentation. + + Returns + ------- + np.recarray + Record array consisting of given arrayList columns. + + Examples + -------- + >>> x1=np.array([1,2,3,4]) + >>> x2=np.array(['a','dd','xyz','12']) + >>> x3=np.array([1.1,2,3,4]) + >>> r = np.rec.fromarrays([x1,x2,x3],names='a,b,c') + >>> print(r[1]) + (2, 'dd', 2.0) # may vary + >>> x1[1]=34 + >>> r.a + array([1, 2, 3, 4]) + + >>> x1 = np.array([1, 2, 3, 4]) + >>> x2 = np.array(['a', 'dd', 'xyz', '12']) + >>> x3 = np.array([1.1, 2, 3,4]) + >>> r = np.rec.fromarrays( + ... [x1, x2, x3], + ... dtype=np.dtype([('a', np.int32), ('b', 'S3'), ('c', np.float32)])) + >>> r + rec.array([(1, b'a', 1.1), (2, b'dd', 2. ), (3, b'xyz', 3. ), + (4, b'12', 4. )], + dtype=[('a', ' 0: + shape = shape[:-nn] + + _array = recarray(shape, descr) + + # populate the record array (makes a copy) + for k, obj in enumerate(arrayList): + nn = descr[k].ndim + testshape = obj.shape[:obj.ndim - nn] + name = _names[k] + if testshape != shape: + raise ValueError(f'array-shape mismatch in array {k} ("{name}")') + + _array[name] = obj + + return _array + + +@set_module("numpy.rec") +def fromrecords(recList, dtype=None, shape=None, formats=None, names=None, + titles=None, aligned=False, byteorder=None): + """Create a recarray from a list of records in text form. + + Parameters + ---------- + recList : sequence + data in the same field may be heterogeneous - they will be promoted + to the highest data type. + dtype : data-type, optional + valid dtype for all arrays + shape : int or tuple of ints, optional + shape of each array. + formats, names, titles, aligned, byteorder : + If `dtype` is ``None``, these arguments are passed to + `numpy.format_parser` to construct a dtype. See that function for + detailed documentation. + + If both `formats` and `dtype` are None, then this will auto-detect + formats. Use list of tuples rather than list of lists for faster + processing. + + Returns + ------- + np.recarray + record array consisting of given recList rows. + + Examples + -------- + >>> r=np.rec.fromrecords([(456,'dbe',1.2),(2,'de',1.3)], + ... names='col1,col2,col3') + >>> print(r[0]) + (456, 'dbe', 1.2) + >>> r.col1 + array([456, 2]) + >>> r.col2 + array(['dbe', 'de'], dtype='>> import pickle + >>> pickle.loads(pickle.dumps(r)) + rec.array([(456, 'dbe', 1.2), ( 2, 'de', 1.3)], + dtype=[('col1', ' 1: + raise ValueError("Can only deal with 1-d array.") + _array = recarray(shape, descr) + for k in range(_array.size): + _array[k] = tuple(recList[k]) + # list of lists instead of list of tuples ? + # 2018-02-07, 1.14.1 + warnings.warn( + "fromrecords expected a list of tuples, may have received a list " + "of lists instead. In the future that will raise an error", + FutureWarning, stacklevel=2) + return _array + else: + if shape is not None and retval.shape != shape: + retval.shape = shape + + res = retval.view(recarray) + + return res + + +@set_module("numpy.rec") +def fromstring(datastring, dtype=None, shape=None, offset=0, formats=None, + names=None, titles=None, aligned=False, byteorder=None): + r"""Create a record array from binary data + + Note that despite the name of this function it does not accept `str` + instances. + + Parameters + ---------- + datastring : bytes-like + Buffer of binary data + dtype : data-type, optional + Valid dtype for all arrays + shape : int or tuple of ints, optional + Shape of each array. + offset : int, optional + Position in the buffer to start reading from. + formats, names, titles, aligned, byteorder : + If `dtype` is ``None``, these arguments are passed to + `numpy.format_parser` to construct a dtype. See that function for + detailed documentation. + + + Returns + ------- + np.recarray + Record array view into the data in datastring. This will be readonly + if `datastring` is readonly. + + See Also + -------- + numpy.frombuffer + + Examples + -------- + >>> a = b'\x01\x02\x03abc' + >>> np.rec.fromstring(a, dtype='u1,u1,u1,S3') + rec.array([(1, 2, 3, b'abc')], + dtype=[('f0', 'u1'), ('f1', 'u1'), ('f2', 'u1'), ('f3', 'S3')]) + + >>> grades_dtype = [('Name', (np.str_, 10)), ('Marks', np.float64), + ... ('GradeLevel', np.int32)] + >>> grades_array = np.array([('Sam', 33.3, 3), ('Mike', 44.4, 5), + ... ('Aadi', 66.6, 6)], dtype=grades_dtype) + >>> np.rec.fromstring(grades_array.tobytes(), dtype=grades_dtype) + rec.array([('Sam', 33.3, 3), ('Mike', 44.4, 5), ('Aadi', 66.6, 6)], + dtype=[('Name', '>> s = '\x01\x02\x03abc' + >>> np.rec.fromstring(s, dtype='u1,u1,u1,S3') + Traceback (most recent call last): + ... + TypeError: a bytes-like object is required, not 'str' + """ + + if dtype is None and formats is None: + raise TypeError("fromstring() needs a 'dtype' or 'formats' argument") + + if dtype is not None: + descr = sb.dtype(dtype) + else: + descr = format_parser(formats, names, titles, aligned, byteorder).dtype + + itemsize = descr.itemsize + + # NumPy 1.19.0, 2020-01-01 + shape = _deprecate_shape_0_as_None(shape) + + if shape in (None, -1): + shape = (len(datastring) - offset) // itemsize + + _array = recarray(shape, descr, buf=datastring, offset=offset) + return _array + +def get_remaining_size(fd): + pos = fd.tell() + try: + fd.seek(0, 2) + return fd.tell() - pos + finally: + fd.seek(pos, 0) + + +@set_module("numpy.rec") +def fromfile(fd, dtype=None, shape=None, offset=0, formats=None, + names=None, titles=None, aligned=False, byteorder=None): + """Create an array from binary file data + + Parameters + ---------- + fd : str or file type + If file is a string or a path-like object then that file is opened, + else it is assumed to be a file object. The file object must + support random access (i.e. it must have tell and seek methods). + dtype : data-type, optional + valid dtype for all arrays + shape : int or tuple of ints, optional + shape of each array. + offset : int, optional + Position in the file to start reading from. + formats, names, titles, aligned, byteorder : + If `dtype` is ``None``, these arguments are passed to + `numpy.format_parser` to construct a dtype. See that function for + detailed documentation + + Returns + ------- + np.recarray + record array consisting of data enclosed in file. + + Examples + -------- + >>> from tempfile import TemporaryFile + >>> a = np.empty(10,dtype='f8,i4,a5') + >>> a[5] = (0.5,10,'abcde') + >>> + >>> fd=TemporaryFile() + >>> a = a.view(a.dtype.newbyteorder('<')) + >>> a.tofile(fd) + >>> + >>> _ = fd.seek(0) + >>> r=np.rec.fromfile(fd, formats='f8,i4,a5', shape=10, + ... byteorder='<') + >>> print(r[5]) + (0.5, 10, b'abcde') + >>> r.shape + (10,) + """ + + if dtype is None and formats is None: + raise TypeError("fromfile() needs a 'dtype' or 'formats' argument") + + # NumPy 1.19.0, 2020-01-01 + shape = _deprecate_shape_0_as_None(shape) + + if shape is None: + shape = (-1,) + elif isinstance(shape, int): + shape = (shape,) + + if hasattr(fd, 'readinto'): + # GH issue 2504. fd supports io.RawIOBase or io.BufferedIOBase + # interface. Example of fd: gzip, BytesIO, BufferedReader + # file already opened + ctx = nullcontext(fd) + else: + # open file + ctx = open(os.fspath(fd), 'rb') + + with ctx as fd: + if offset > 0: + fd.seek(offset, 1) + size = get_remaining_size(fd) + + if dtype is not None: + descr = sb.dtype(dtype) + else: + descr = format_parser( + formats, names, titles, aligned, byteorder + ).dtype + + itemsize = descr.itemsize + + shapeprod = sb.array(shape).prod(dtype=nt.intp) + shapesize = shapeprod * itemsize + if shapesize < 0: + shape = list(shape) + shape[shape.index(-1)] = size // -shapesize + shape = tuple(shape) + shapeprod = sb.array(shape).prod(dtype=nt.intp) + + nbytes = shapeprod * itemsize + + if nbytes > size: + raise ValueError( + "Not enough bytes left in file for specified " + "shape and type." + ) + + # create the array + _array = recarray(shape, descr) + nbytesread = fd.readinto(_array.data) + if nbytesread != nbytes: + raise OSError("Didn't read as many bytes as expected") + + return _array + + +@set_module("numpy.rec") +def array(obj, dtype=None, shape=None, offset=0, strides=None, formats=None, + names=None, titles=None, aligned=False, byteorder=None, copy=True): + """ + Construct a record array from a wide-variety of objects. + + A general-purpose record array constructor that dispatches to the + appropriate `recarray` creation function based on the inputs (see Notes). + + Parameters + ---------- + obj : any + Input object. See Notes for details on how various input types are + treated. + dtype : data-type, optional + Valid dtype for array. + shape : int or tuple of ints, optional + Shape of each array. + offset : int, optional + Position in the file or buffer to start reading from. + strides : tuple of ints, optional + Buffer (`buf`) is interpreted according to these strides (strides + define how many bytes each array element, row, column, etc. + occupy in memory). + formats, names, titles, aligned, byteorder : + If `dtype` is ``None``, these arguments are passed to + `numpy.format_parser` to construct a dtype. See that function for + detailed documentation. + copy : bool, optional + Whether to copy the input object (True), or to use a reference instead. + This option only applies when the input is an ndarray or recarray. + Defaults to True. + + Returns + ------- + np.recarray + Record array created from the specified object. + + Notes + ----- + If `obj` is ``None``, then call the `~numpy.recarray` constructor. If + `obj` is a string, then call the `fromstring` constructor. If `obj` is a + list or a tuple, then if the first object is an `~numpy.ndarray`, call + `fromarrays`, otherwise call `fromrecords`. If `obj` is a + `~numpy.recarray`, then make a copy of the data in the recarray + (if ``copy=True``) and use the new formats, names, and titles. If `obj` + is a file, then call `fromfile`. Finally, if obj is an `ndarray`, then + return ``obj.view(recarray)``, making a copy of the data if ``copy=True``. + + Examples + -------- + >>> a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) + >>> a + array([[1, 2, 3], + [4, 5, 6], + [7, 8, 9]]) + + >>> np.rec.array(a) + rec.array([[1, 2, 3], + [4, 5, 6], + [7, 8, 9]], + dtype=int64) + + >>> b = [(1, 1), (2, 4), (3, 9)] + >>> c = np.rec.array(b, formats = ['i2', 'f2'], names = ('x', 'y')) + >>> c + rec.array([(1, 1.), (2, 4.), (3, 9.)], + dtype=[('x', '>> c.x + array([1, 2, 3], dtype=int16) + + >>> c.y + array([1., 4., 9.], dtype=float16) + + >>> r = np.rec.array(['abc','def'], names=['col1','col2']) + >>> print(r.col1) + abc + + >>> r.col1 + array('abc', dtype='>> r.col2 + array('def', dtype=' object: ... + def tell(self, /) -> int: ... + def readinto(self, buffer: memoryview, /) -> int: ... + +### + +# exported in `numpy.rec` +class record(np.void): + def __getattribute__(self, attr: str) -> Any: ... + def __setattr__(self, attr: str, val: ArrayLike) -> None: ... + def pprint(self) -> str: ... + @overload + def __getitem__(self, key: str | SupportsIndex) -> Any: ... + @overload + def __getitem__(self, key: list[str]) -> record: ... + +# exported in `numpy.rec` +class recarray(np.ndarray[_ShapeT_co, _DTypeT_co]): + __name__: ClassVar[Literal["record"]] = "record" + __module__: Literal["numpy"] = "numpy" + @overload + def __new__( + subtype, + shape: _ShapeLike, + dtype: None = None, + buf: _SupportsBuffer | None = None, + offset: SupportsIndex = 0, + strides: _ShapeLike | None = None, + *, + formats: DTypeLike, + names: str | Sequence[str] | None = None, + titles: str | Sequence[str] | None = None, + byteorder: _ByteOrder | None = None, + aligned: bool = False, + order: _OrderKACF = "C", + ) -> _RecArray[record]: ... + @overload + def __new__( + subtype, + shape: _ShapeLike, + dtype: DTypeLike, + buf: _SupportsBuffer | None = None, + offset: SupportsIndex = 0, + strides: _ShapeLike | None = None, + formats: None = None, + names: None = None, + titles: None = None, + byteorder: None = None, + aligned: Literal[False] = False, + order: _OrderKACF = "C", + ) -> _RecArray[Any]: ... + def __array_finalize__(self, /, obj: object) -> None: ... + def __getattribute__(self, attr: str, /) -> Any: ... + def __setattr__(self, attr: str, val: ArrayLike, /) -> None: ... + + # + @overload + def field(self, /, attr: int | str, val: ArrayLike) -> None: ... + @overload + def field(self, /, attr: int | str, val: None = None) -> Any: ... + +# exported in `numpy.rec` +class format_parser: + dtype: np.dtype[np.void] + def __init__( + self, + /, + formats: DTypeLike, + names: str | Sequence[str] | None, + titles: str | Sequence[str] | None, + aligned: bool = False, + byteorder: _ByteOrder | None = None, + ) -> None: ... + +# exported in `numpy.rec` +@overload +def fromarrays( + arrayList: Iterable[ArrayLike], + dtype: DTypeLike | None = None, + shape: _ShapeLike | None = None, + formats: None = None, + names: None = None, + titles: None = None, + aligned: bool = False, + byteorder: None = None, +) -> _RecArray[Any]: ... +@overload +def fromarrays( + arrayList: Iterable[ArrayLike], + dtype: None = None, + shape: _ShapeLike | None = None, + *, + formats: DTypeLike, + names: str | Sequence[str] | None = None, + titles: str | Sequence[str] | None = None, + aligned: bool = False, + byteorder: _ByteOrder | None = None, +) -> _RecArray[record]: ... + +@overload +def fromrecords( + recList: _ArrayLikeVoid_co | tuple[object, ...] | _NestedSequence[tuple[object, ...]], + dtype: DTypeLike | None = None, + shape: _ShapeLike | None = None, + formats: None = None, + names: None = None, + titles: None = None, + aligned: bool = False, + byteorder: None = None, +) -> _RecArray[record]: ... +@overload +def fromrecords( + recList: _ArrayLikeVoid_co | tuple[object, ...] | _NestedSequence[tuple[object, ...]], + dtype: None = None, + shape: _ShapeLike | None = None, + *, + formats: DTypeLike, + names: str | Sequence[str] | None = None, + titles: str | Sequence[str] | None = None, + aligned: bool = False, + byteorder: _ByteOrder | None = None, +) -> _RecArray[record]: ... + +# exported in `numpy.rec` +@overload +def fromstring( + datastring: _SupportsBuffer, + dtype: DTypeLike, + shape: _ShapeLike | None = None, + offset: int = 0, + formats: None = None, + names: None = None, + titles: None = None, + aligned: bool = False, + byteorder: None = None, +) -> _RecArray[record]: ... +@overload +def fromstring( + datastring: _SupportsBuffer, + dtype: None = None, + shape: _ShapeLike | None = None, + offset: int = 0, + *, + formats: DTypeLike, + names: str | Sequence[str] | None = None, + titles: str | Sequence[str] | None = None, + aligned: bool = False, + byteorder: _ByteOrder | None = None, +) -> _RecArray[record]: ... + +# exported in `numpy.rec` +@overload +def fromfile( + fd: StrOrBytesPath | _SupportsReadInto, + dtype: DTypeLike, + shape: _ShapeLike | None = None, + offset: int = 0, + formats: None = None, + names: None = None, + titles: None = None, + aligned: bool = False, + byteorder: None = None, +) -> _RecArray[Any]: ... +@overload +def fromfile( + fd: StrOrBytesPath | _SupportsReadInto, + dtype: None = None, + shape: _ShapeLike | None = None, + offset: int = 0, + *, + formats: DTypeLike, + names: str | Sequence[str] | None = None, + titles: str | Sequence[str] | None = None, + aligned: bool = False, + byteorder: _ByteOrder | None = None, +) -> _RecArray[record]: ... + +# exported in `numpy.rec` +@overload +def array( + obj: _ScalarT | NDArray[_ScalarT], + dtype: None = None, + shape: _ShapeLike | None = None, + offset: int = 0, + strides: tuple[int, ...] | None = None, + formats: None = None, + names: None = None, + titles: None = None, + aligned: bool = False, + byteorder: None = None, + copy: bool = True, +) -> _RecArray[_ScalarT]: ... +@overload +def array( + obj: ArrayLike, + dtype: DTypeLike, + shape: _ShapeLike | None = None, + offset: int = 0, + strides: tuple[int, ...] | None = None, + formats: None = None, + names: None = None, + titles: None = None, + aligned: bool = False, + byteorder: None = None, + copy: bool = True, +) -> _RecArray[Any]: ... +@overload +def array( + obj: ArrayLike, + dtype: None = None, + shape: _ShapeLike | None = None, + offset: int = 0, + strides: tuple[int, ...] | None = None, + *, + formats: DTypeLike, + names: str | Sequence[str] | None = None, + titles: str | Sequence[str] | None = None, + aligned: bool = False, + byteorder: _ByteOrder | None = None, + copy: bool = True, +) -> _RecArray[record]: ... +@overload +def array( + obj: None, + dtype: DTypeLike, + shape: _ShapeLike, + offset: int = 0, + strides: tuple[int, ...] | None = None, + formats: None = None, + names: None = None, + titles: None = None, + aligned: bool = False, + byteorder: None = None, + copy: bool = True, +) -> _RecArray[Any]: ... +@overload +def array( + obj: None, + dtype: None = None, + *, + shape: _ShapeLike, + offset: int = 0, + strides: tuple[int, ...] | None = None, + formats: DTypeLike, + names: str | Sequence[str] | None = None, + titles: str | Sequence[str] | None = None, + aligned: bool = False, + byteorder: _ByteOrder | None = None, + copy: bool = True, +) -> _RecArray[record]: ... +@overload +def array( + obj: _SupportsReadInto, + dtype: DTypeLike, + shape: _ShapeLike | None = None, + offset: int = 0, + strides: tuple[int, ...] | None = None, + formats: None = None, + names: None = None, + titles: None = None, + aligned: bool = False, + byteorder: None = None, + copy: bool = True, +) -> _RecArray[Any]: ... +@overload +def array( + obj: _SupportsReadInto, + dtype: None = None, + shape: _ShapeLike | None = None, + offset: int = 0, + strides: tuple[int, ...] | None = None, + *, + formats: DTypeLike, + names: str | Sequence[str] | None = None, + titles: str | Sequence[str] | None = None, + aligned: bool = False, + byteorder: _ByteOrder | None = None, + copy: bool = True, +) -> _RecArray[record]: ... + +# exported in `numpy.rec` +def find_duplicate(list: Iterable[_T]) -> list[_T]: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/shape_base.py b/.venv/lib/python3.12/site-packages/numpy/_core/shape_base.py new file mode 100644 index 0000000..c2a0f0d --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/shape_base.py @@ -0,0 +1,998 @@ +__all__ = ['atleast_1d', 'atleast_2d', 'atleast_3d', 'block', 'hstack', + 'stack', 'unstack', 'vstack'] + +import functools +import itertools +import operator + +from . import fromnumeric as _from_nx +from . import numeric as _nx +from . import overrides +from .multiarray import array, asanyarray, normalize_axis_index + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy') + + +def _atleast_1d_dispatcher(*arys): + return arys + + +@array_function_dispatch(_atleast_1d_dispatcher) +def atleast_1d(*arys): + """ + Convert inputs to arrays with at least one dimension. + + Scalar inputs are converted to 1-dimensional arrays, whilst + higher-dimensional inputs are preserved. + + Parameters + ---------- + arys1, arys2, ... : array_like + One or more input arrays. + + Returns + ------- + ret : ndarray + An array, or tuple of arrays, each with ``a.ndim >= 1``. + Copies are made only if necessary. + + See Also + -------- + atleast_2d, atleast_3d + + Examples + -------- + >>> import numpy as np + >>> np.atleast_1d(1.0) + array([1.]) + + >>> x = np.arange(9.0).reshape(3,3) + >>> np.atleast_1d(x) + array([[0., 1., 2.], + [3., 4., 5.], + [6., 7., 8.]]) + >>> np.atleast_1d(x) is x + True + + >>> np.atleast_1d(1, [3, 4]) + (array([1]), array([3, 4])) + + """ + if len(arys) == 1: + result = asanyarray(arys[0]) + if result.ndim == 0: + result = result.reshape(1) + return result + res = [] + for ary in arys: + result = asanyarray(ary) + if result.ndim == 0: + result = result.reshape(1) + res.append(result) + return tuple(res) + + +def _atleast_2d_dispatcher(*arys): + return arys + + +@array_function_dispatch(_atleast_2d_dispatcher) +def atleast_2d(*arys): + """ + View inputs as arrays with at least two dimensions. + + Parameters + ---------- + arys1, arys2, ... : array_like + One or more array-like sequences. Non-array inputs are converted + to arrays. Arrays that already have two or more dimensions are + preserved. + + Returns + ------- + res, res2, ... : ndarray + An array, or tuple of arrays, each with ``a.ndim >= 2``. + Copies are avoided where possible, and views with two or more + dimensions are returned. + + See Also + -------- + atleast_1d, atleast_3d + + Examples + -------- + >>> import numpy as np + >>> np.atleast_2d(3.0) + array([[3.]]) + + >>> x = np.arange(3.0) + >>> np.atleast_2d(x) + array([[0., 1., 2.]]) + >>> np.atleast_2d(x).base is x + True + + >>> np.atleast_2d(1, [1, 2], [[1, 2]]) + (array([[1]]), array([[1, 2]]), array([[1, 2]])) + + """ + res = [] + for ary in arys: + ary = asanyarray(ary) + if ary.ndim == 0: + result = ary.reshape(1, 1) + elif ary.ndim == 1: + result = ary[_nx.newaxis, :] + else: + result = ary + res.append(result) + if len(res) == 1: + return res[0] + else: + return tuple(res) + + +def _atleast_3d_dispatcher(*arys): + return arys + + +@array_function_dispatch(_atleast_3d_dispatcher) +def atleast_3d(*arys): + """ + View inputs as arrays with at least three dimensions. + + Parameters + ---------- + arys1, arys2, ... : array_like + One or more array-like sequences. Non-array inputs are converted to + arrays. Arrays that already have three or more dimensions are + preserved. + + Returns + ------- + res1, res2, ... : ndarray + An array, or tuple of arrays, each with ``a.ndim >= 3``. Copies are + avoided where possible, and views with three or more dimensions are + returned. For example, a 1-D array of shape ``(N,)`` becomes a view + of shape ``(1, N, 1)``, and a 2-D array of shape ``(M, N)`` becomes a + view of shape ``(M, N, 1)``. + + See Also + -------- + atleast_1d, atleast_2d + + Examples + -------- + >>> import numpy as np + >>> np.atleast_3d(3.0) + array([[[3.]]]) + + >>> x = np.arange(3.0) + >>> np.atleast_3d(x).shape + (1, 3, 1) + + >>> x = np.arange(12.0).reshape(4,3) + >>> np.atleast_3d(x).shape + (4, 3, 1) + >>> np.atleast_3d(x).base is x.base # x is a reshape, so not base itself + True + + >>> for arr in np.atleast_3d([1, 2], [[1, 2]], [[[1, 2]]]): + ... print(arr, arr.shape) # doctest: +SKIP + ... + [[[1] + [2]]] (1, 2, 1) + [[[1] + [2]]] (1, 2, 1) + [[[1 2]]] (1, 1, 2) + + """ + res = [] + for ary in arys: + ary = asanyarray(ary) + if ary.ndim == 0: + result = ary.reshape(1, 1, 1) + elif ary.ndim == 1: + result = ary[_nx.newaxis, :, _nx.newaxis] + elif ary.ndim == 2: + result = ary[:, :, _nx.newaxis] + else: + result = ary + res.append(result) + if len(res) == 1: + return res[0] + else: + return tuple(res) + + +def _arrays_for_stack_dispatcher(arrays): + if not hasattr(arrays, "__getitem__"): + raise TypeError('arrays to stack must be passed as a "sequence" type ' + 'such as list or tuple.') + + return tuple(arrays) + + +def _vhstack_dispatcher(tup, *, dtype=None, casting=None): + return _arrays_for_stack_dispatcher(tup) + + +@array_function_dispatch(_vhstack_dispatcher) +def vstack(tup, *, dtype=None, casting="same_kind"): + """ + Stack arrays in sequence vertically (row wise). + + This is equivalent to concatenation along the first axis after 1-D arrays + of shape `(N,)` have been reshaped to `(1,N)`. Rebuilds arrays divided by + `vsplit`. + + This function makes most sense for arrays with up to 3 dimensions. For + instance, for pixel-data with a height (first axis), width (second axis), + and r/g/b channels (third axis). The functions `concatenate`, `stack` and + `block` provide more general stacking and concatenation operations. + + Parameters + ---------- + tup : sequence of ndarrays + The arrays must have the same shape along all but the first axis. + 1-D arrays must have the same length. In the case of a single + array_like input, it will be treated as a sequence of arrays; i.e., + each element along the zeroth axis is treated as a separate array. + + dtype : str or dtype + If provided, the destination array will have this dtype. Cannot be + provided together with `out`. + + .. versionadded:: 1.24 + + casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional + Controls what kind of data casting may occur. Defaults to 'same_kind'. + + .. versionadded:: 1.24 + + Returns + ------- + stacked : ndarray + The array formed by stacking the given arrays, will be at least 2-D. + + See Also + -------- + concatenate : Join a sequence of arrays along an existing axis. + stack : Join a sequence of arrays along a new axis. + block : Assemble an nd-array from nested lists of blocks. + hstack : Stack arrays in sequence horizontally (column wise). + dstack : Stack arrays in sequence depth wise (along third axis). + column_stack : Stack 1-D arrays as columns into a 2-D array. + vsplit : Split an array into multiple sub-arrays vertically (row-wise). + unstack : Split an array into a tuple of sub-arrays along an axis. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([1, 2, 3]) + >>> b = np.array([4, 5, 6]) + >>> np.vstack((a,b)) + array([[1, 2, 3], + [4, 5, 6]]) + + >>> a = np.array([[1], [2], [3]]) + >>> b = np.array([[4], [5], [6]]) + >>> np.vstack((a,b)) + array([[1], + [2], + [3], + [4], + [5], + [6]]) + + """ + arrs = atleast_2d(*tup) + if not isinstance(arrs, tuple): + arrs = (arrs,) + return _nx.concatenate(arrs, 0, dtype=dtype, casting=casting) + + +@array_function_dispatch(_vhstack_dispatcher) +def hstack(tup, *, dtype=None, casting="same_kind"): + """ + Stack arrays in sequence horizontally (column wise). + + This is equivalent to concatenation along the second axis, except for 1-D + arrays where it concatenates along the first axis. Rebuilds arrays divided + by `hsplit`. + + This function makes most sense for arrays with up to 3 dimensions. For + instance, for pixel-data with a height (first axis), width (second axis), + and r/g/b channels (third axis). The functions `concatenate`, `stack` and + `block` provide more general stacking and concatenation operations. + + Parameters + ---------- + tup : sequence of ndarrays + The arrays must have the same shape along all but the second axis, + except 1-D arrays which can be any length. In the case of a single + array_like input, it will be treated as a sequence of arrays; i.e., + each element along the zeroth axis is treated as a separate array. + + dtype : str or dtype + If provided, the destination array will have this dtype. Cannot be + provided together with `out`. + + .. versionadded:: 1.24 + + casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional + Controls what kind of data casting may occur. Defaults to 'same_kind'. + + .. versionadded:: 1.24 + + Returns + ------- + stacked : ndarray + The array formed by stacking the given arrays. + + See Also + -------- + concatenate : Join a sequence of arrays along an existing axis. + stack : Join a sequence of arrays along a new axis. + block : Assemble an nd-array from nested lists of blocks. + vstack : Stack arrays in sequence vertically (row wise). + dstack : Stack arrays in sequence depth wise (along third axis). + column_stack : Stack 1-D arrays as columns into a 2-D array. + hsplit : Split an array into multiple sub-arrays + horizontally (column-wise). + unstack : Split an array into a tuple of sub-arrays along an axis. + + Examples + -------- + >>> import numpy as np + >>> a = np.array((1,2,3)) + >>> b = np.array((4,5,6)) + >>> np.hstack((a,b)) + array([1, 2, 3, 4, 5, 6]) + >>> a = np.array([[1],[2],[3]]) + >>> b = np.array([[4],[5],[6]]) + >>> np.hstack((a,b)) + array([[1, 4], + [2, 5], + [3, 6]]) + + """ + arrs = atleast_1d(*tup) + if not isinstance(arrs, tuple): + arrs = (arrs,) + # As a special case, dimension 0 of 1-dimensional arrays is "horizontal" + if arrs and arrs[0].ndim == 1: + return _nx.concatenate(arrs, 0, dtype=dtype, casting=casting) + else: + return _nx.concatenate(arrs, 1, dtype=dtype, casting=casting) + + +def _stack_dispatcher(arrays, axis=None, out=None, *, + dtype=None, casting=None): + arrays = _arrays_for_stack_dispatcher(arrays) + if out is not None: + # optimize for the typical case where only arrays is provided + arrays = list(arrays) + arrays.append(out) + return arrays + + +@array_function_dispatch(_stack_dispatcher) +def stack(arrays, axis=0, out=None, *, dtype=None, casting="same_kind"): + """ + Join a sequence of arrays along a new axis. + + The ``axis`` parameter specifies the index of the new axis in the + dimensions of the result. For example, if ``axis=0`` it will be the first + dimension and if ``axis=-1`` it will be the last dimension. + + Parameters + ---------- + arrays : sequence of ndarrays + Each array must have the same shape. In the case of a single ndarray + array_like input, it will be treated as a sequence of arrays; i.e., + each element along the zeroth axis is treated as a separate array. + + axis : int, optional + The axis in the result array along which the input arrays are stacked. + + out : ndarray, optional + If provided, the destination to place the result. The shape must be + correct, matching that of what stack would have returned if no + out argument were specified. + + dtype : str or dtype + If provided, the destination array will have this dtype. Cannot be + provided together with `out`. + + .. versionadded:: 1.24 + + casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional + Controls what kind of data casting may occur. Defaults to 'same_kind'. + + .. versionadded:: 1.24 + + + Returns + ------- + stacked : ndarray + The stacked array has one more dimension than the input arrays. + + See Also + -------- + concatenate : Join a sequence of arrays along an existing axis. + block : Assemble an nd-array from nested lists of blocks. + split : Split array into a list of multiple sub-arrays of equal size. + unstack : Split an array into a tuple of sub-arrays along an axis. + + Examples + -------- + >>> import numpy as np + >>> rng = np.random.default_rng() + >>> arrays = [rng.normal(size=(3,4)) for _ in range(10)] + >>> np.stack(arrays, axis=0).shape + (10, 3, 4) + + >>> np.stack(arrays, axis=1).shape + (3, 10, 4) + + >>> np.stack(arrays, axis=2).shape + (3, 4, 10) + + >>> a = np.array([1, 2, 3]) + >>> b = np.array([4, 5, 6]) + >>> np.stack((a, b)) + array([[1, 2, 3], + [4, 5, 6]]) + + >>> np.stack((a, b), axis=-1) + array([[1, 4], + [2, 5], + [3, 6]]) + + """ + arrays = [asanyarray(arr) for arr in arrays] + if not arrays: + raise ValueError('need at least one array to stack') + + shapes = {arr.shape for arr in arrays} + if len(shapes) != 1: + raise ValueError('all input arrays must have the same shape') + + result_ndim = arrays[0].ndim + 1 + axis = normalize_axis_index(axis, result_ndim) + + sl = (slice(None),) * axis + (_nx.newaxis,) + expanded_arrays = [arr[sl] for arr in arrays] + return _nx.concatenate(expanded_arrays, axis=axis, out=out, + dtype=dtype, casting=casting) + +def _unstack_dispatcher(x, /, *, axis=None): + return (x,) + +@array_function_dispatch(_unstack_dispatcher) +def unstack(x, /, *, axis=0): + """ + Split an array into a sequence of arrays along the given axis. + + The ``axis`` parameter specifies the dimension along which the array will + be split. For example, if ``axis=0`` (the default) it will be the first + dimension and if ``axis=-1`` it will be the last dimension. + + The result is a tuple of arrays split along ``axis``. + + .. versionadded:: 2.1.0 + + Parameters + ---------- + x : ndarray + The array to be unstacked. + axis : int, optional + Axis along which the array will be split. Default: ``0``. + + Returns + ------- + unstacked : tuple of ndarrays + The unstacked arrays. + + See Also + -------- + stack : Join a sequence of arrays along a new axis. + concatenate : Join a sequence of arrays along an existing axis. + block : Assemble an nd-array from nested lists of blocks. + split : Split array into a list of multiple sub-arrays of equal size. + + Notes + ----- + ``unstack`` serves as the reverse operation of :py:func:`stack`, i.e., + ``stack(unstack(x, axis=axis), axis=axis) == x``. + + This function is equivalent to ``tuple(np.moveaxis(x, axis, 0))``, since + iterating on an array iterates along the first axis. + + Examples + -------- + >>> arr = np.arange(24).reshape((2, 3, 4)) + >>> np.unstack(arr) + (array([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]), + array([[12, 13, 14, 15], + [16, 17, 18, 19], + [20, 21, 22, 23]])) + >>> np.unstack(arr, axis=1) + (array([[ 0, 1, 2, 3], + [12, 13, 14, 15]]), + array([[ 4, 5, 6, 7], + [16, 17, 18, 19]]), + array([[ 8, 9, 10, 11], + [20, 21, 22, 23]])) + >>> arr2 = np.stack(np.unstack(arr, axis=1), axis=1) + >>> arr2.shape + (2, 3, 4) + >>> np.all(arr == arr2) + np.True_ + + """ + if x.ndim == 0: + raise ValueError("Input array must be at least 1-d.") + return tuple(_nx.moveaxis(x, axis, 0)) + + +# Internal functions to eliminate the overhead of repeated dispatch in one of +# the two possible paths inside np.block. +# Use getattr to protect against __array_function__ being disabled. +_size = getattr(_from_nx.size, '__wrapped__', _from_nx.size) +_ndim = getattr(_from_nx.ndim, '__wrapped__', _from_nx.ndim) +_concatenate = getattr(_from_nx.concatenate, + '__wrapped__', _from_nx.concatenate) + + +def _block_format_index(index): + """ + Convert a list of indices ``[0, 1, 2]`` into ``"arrays[0][1][2]"``. + """ + idx_str = ''.join(f'[{i}]' for i in index if i is not None) + return 'arrays' + idx_str + + +def _block_check_depths_match(arrays, parent_index=[]): + """ + Recursive function checking that the depths of nested lists in `arrays` + all match. Mismatch raises a ValueError as described in the block + docstring below. + + The entire index (rather than just the depth) needs to be calculated + for each innermost list, in case an error needs to be raised, so that + the index of the offending list can be printed as part of the error. + + Parameters + ---------- + arrays : nested list of arrays + The arrays to check + parent_index : list of int + The full index of `arrays` within the nested lists passed to + `_block_check_depths_match` at the top of the recursion. + + Returns + ------- + first_index : list of int + The full index of an element from the bottom of the nesting in + `arrays`. If any element at the bottom is an empty list, this will + refer to it, and the last index along the empty axis will be None. + max_arr_ndim : int + The maximum of the ndims of the arrays nested in `arrays`. + final_size: int + The number of elements in the final array. This is used the motivate + the choice of algorithm used using benchmarking wisdom. + + """ + if isinstance(arrays, tuple): + # not strictly necessary, but saves us from: + # - more than one way to do things - no point treating tuples like + # lists + # - horribly confusing behaviour that results when tuples are + # treated like ndarray + raise TypeError( + f'{_block_format_index(parent_index)} is a tuple. ' + 'Only lists can be used to arrange blocks, and np.block does ' + 'not allow implicit conversion from tuple to ndarray.' + ) + elif isinstance(arrays, list) and len(arrays) > 0: + idxs_ndims = (_block_check_depths_match(arr, parent_index + [i]) + for i, arr in enumerate(arrays)) + + first_index, max_arr_ndim, final_size = next(idxs_ndims) + for index, ndim, size in idxs_ndims: + final_size += size + if ndim > max_arr_ndim: + max_arr_ndim = ndim + if len(index) != len(first_index): + raise ValueError( + "List depths are mismatched. First element was at " + f"depth {len(first_index)}, but there is an element at " + f"depth {len(index)} ({_block_format_index(index)})" + ) + # propagate our flag that indicates an empty list at the bottom + if index[-1] is None: + first_index = index + + return first_index, max_arr_ndim, final_size + elif isinstance(arrays, list) and len(arrays) == 0: + # We've 'bottomed out' on an empty list + return parent_index + [None], 0, 0 + else: + # We've 'bottomed out' - arrays is either a scalar or an array + size = _size(arrays) + return parent_index, _ndim(arrays), size + + +def _atleast_nd(a, ndim): + # Ensures `a` has at least `ndim` dimensions by prepending + # ones to `a.shape` as necessary + return array(a, ndmin=ndim, copy=None, subok=True) + + +def _accumulate(values): + return list(itertools.accumulate(values)) + + +def _concatenate_shapes(shapes, axis): + """Given array shapes, return the resulting shape and slices prefixes. + + These help in nested concatenation. + + Returns + ------- + shape: tuple of int + This tuple satisfies:: + + shape, _ = _concatenate_shapes([arr.shape for shape in arrs], axis) + shape == concatenate(arrs, axis).shape + + slice_prefixes: tuple of (slice(start, end), ) + For a list of arrays being concatenated, this returns the slice + in the larger array at axis that needs to be sliced into. + + For example, the following holds:: + + ret = concatenate([a, b, c], axis) + _, (sl_a, sl_b, sl_c) = concatenate_slices([a, b, c], axis) + + ret[(slice(None),) * axis + sl_a] == a + ret[(slice(None),) * axis + sl_b] == b + ret[(slice(None),) * axis + sl_c] == c + + These are called slice prefixes since they are used in the recursive + blocking algorithm to compute the left-most slices during the + recursion. Therefore, they must be prepended to rest of the slice + that was computed deeper in the recursion. + + These are returned as tuples to ensure that they can quickly be added + to existing slice tuple without creating a new tuple every time. + + """ + # Cache a result that will be reused. + shape_at_axis = [shape[axis] for shape in shapes] + + # Take a shape, any shape + first_shape = shapes[0] + first_shape_pre = first_shape[:axis] + first_shape_post = first_shape[axis + 1:] + + if any(shape[:axis] != first_shape_pre or + shape[axis + 1:] != first_shape_post for shape in shapes): + raise ValueError( + f'Mismatched array shapes in block along axis {axis}.') + + shape = (first_shape_pre + (sum(shape_at_axis),) + first_shape[axis + 1:]) + + offsets_at_axis = _accumulate(shape_at_axis) + slice_prefixes = [(slice(start, end),) + for start, end in zip([0] + offsets_at_axis, + offsets_at_axis)] + return shape, slice_prefixes + + +def _block_info_recursion(arrays, max_depth, result_ndim, depth=0): + """ + Returns the shape of the final array, along with a list + of slices and a list of arrays that can be used for assignment inside the + new array + + Parameters + ---------- + arrays : nested list of arrays + The arrays to check + max_depth : list of int + The number of nested lists + result_ndim : int + The number of dimensions in thefinal array. + + Returns + ------- + shape : tuple of int + The shape that the final array will take on. + slices: list of tuple of slices + The slices into the full array required for assignment. These are + required to be prepended with ``(Ellipsis, )`` to obtain to correct + final index. + arrays: list of ndarray + The data to assign to each slice of the full array + + """ + if depth < max_depth: + shapes, slices, arrays = zip( + *[_block_info_recursion(arr, max_depth, result_ndim, depth + 1) + for arr in arrays]) + + axis = result_ndim - max_depth + depth + shape, slice_prefixes = _concatenate_shapes(shapes, axis) + + # Prepend the slice prefix and flatten the slices + slices = [slice_prefix + the_slice + for slice_prefix, inner_slices in zip(slice_prefixes, slices) + for the_slice in inner_slices] + + # Flatten the array list + arrays = functools.reduce(operator.add, arrays) + + return shape, slices, arrays + else: + # We've 'bottomed out' - arrays is either a scalar or an array + # type(arrays) is not list + # Return the slice and the array inside a list to be consistent with + # the recursive case. + arr = _atleast_nd(arrays, result_ndim) + return arr.shape, [()], [arr] + + +def _block(arrays, max_depth, result_ndim, depth=0): + """ + Internal implementation of block based on repeated concatenation. + `arrays` is the argument passed to + block. `max_depth` is the depth of nested lists within `arrays` and + `result_ndim` is the greatest of the dimensions of the arrays in + `arrays` and the depth of the lists in `arrays` (see block docstring + for details). + """ + if depth < max_depth: + arrs = [_block(arr, max_depth, result_ndim, depth + 1) + for arr in arrays] + return _concatenate(arrs, axis=-(max_depth - depth)) + else: + # We've 'bottomed out' - arrays is either a scalar or an array + # type(arrays) is not list + return _atleast_nd(arrays, result_ndim) + + +def _block_dispatcher(arrays): + # Use type(...) is list to match the behavior of np.block(), which special + # cases list specifically rather than allowing for generic iterables or + # tuple. Also, we know that list.__array_function__ will never exist. + if isinstance(arrays, list): + for subarrays in arrays: + yield from _block_dispatcher(subarrays) + else: + yield arrays + + +@array_function_dispatch(_block_dispatcher) +def block(arrays): + """ + Assemble an nd-array from nested lists of blocks. + + Blocks in the innermost lists are concatenated (see `concatenate`) along + the last dimension (-1), then these are concatenated along the + second-last dimension (-2), and so on until the outermost list is reached. + + Blocks can be of any dimension, but will not be broadcasted using + the normal rules. Instead, leading axes of size 1 are inserted, + to make ``block.ndim`` the same for all blocks. This is primarily useful + for working with scalars, and means that code like ``np.block([v, 1])`` + is valid, where ``v.ndim == 1``. + + When the nested list is two levels deep, this allows block matrices to be + constructed from their components. + + Parameters + ---------- + arrays : nested list of array_like or scalars (but not tuples) + If passed a single ndarray or scalar (a nested list of depth 0), this + is returned unmodified (and not copied). + + Elements shapes must match along the appropriate axes (without + broadcasting), but leading 1s will be prepended to the shape as + necessary to make the dimensions match. + + Returns + ------- + block_array : ndarray + The array assembled from the given blocks. + + The dimensionality of the output is equal to the greatest of: + + * the dimensionality of all the inputs + * the depth to which the input list is nested + + Raises + ------ + ValueError + * If list depths are mismatched - for instance, ``[[a, b], c]`` is + illegal, and should be spelt ``[[a, b], [c]]`` + * If lists are empty - for instance, ``[[a, b], []]`` + + See Also + -------- + concatenate : Join a sequence of arrays along an existing axis. + stack : Join a sequence of arrays along a new axis. + vstack : Stack arrays in sequence vertically (row wise). + hstack : Stack arrays in sequence horizontally (column wise). + dstack : Stack arrays in sequence depth wise (along third axis). + column_stack : Stack 1-D arrays as columns into a 2-D array. + vsplit : Split an array into multiple sub-arrays vertically (row-wise). + unstack : Split an array into a tuple of sub-arrays along an axis. + + Notes + ----- + When called with only scalars, ``np.block`` is equivalent to an ndarray + call. So ``np.block([[1, 2], [3, 4]])`` is equivalent to + ``np.array([[1, 2], [3, 4]])``. + + This function does not enforce that the blocks lie on a fixed grid. + ``np.block([[a, b], [c, d]])`` is not restricted to arrays of the form:: + + AAAbb + AAAbb + cccDD + + But is also allowed to produce, for some ``a, b, c, d``:: + + AAAbb + AAAbb + cDDDD + + Since concatenation happens along the last axis first, `block` is *not* + capable of producing the following directly:: + + AAAbb + cccbb + cccDD + + Matlab's "square bracket stacking", ``[A, B, ...; p, q, ...]``, is + equivalent to ``np.block([[A, B, ...], [p, q, ...]])``. + + Examples + -------- + The most common use of this function is to build a block matrix: + + >>> import numpy as np + >>> A = np.eye(2) * 2 + >>> B = np.eye(3) * 3 + >>> np.block([ + ... [A, np.zeros((2, 3))], + ... [np.ones((3, 2)), B ] + ... ]) + array([[2., 0., 0., 0., 0.], + [0., 2., 0., 0., 0.], + [1., 1., 3., 0., 0.], + [1., 1., 0., 3., 0.], + [1., 1., 0., 0., 3.]]) + + With a list of depth 1, `block` can be used as `hstack`: + + >>> np.block([1, 2, 3]) # hstack([1, 2, 3]) + array([1, 2, 3]) + + >>> a = np.array([1, 2, 3]) + >>> b = np.array([4, 5, 6]) + >>> np.block([a, b, 10]) # hstack([a, b, 10]) + array([ 1, 2, 3, 4, 5, 6, 10]) + + >>> A = np.ones((2, 2), int) + >>> B = 2 * A + >>> np.block([A, B]) # hstack([A, B]) + array([[1, 1, 2, 2], + [1, 1, 2, 2]]) + + With a list of depth 2, `block` can be used in place of `vstack`: + + >>> a = np.array([1, 2, 3]) + >>> b = np.array([4, 5, 6]) + >>> np.block([[a], [b]]) # vstack([a, b]) + array([[1, 2, 3], + [4, 5, 6]]) + + >>> A = np.ones((2, 2), int) + >>> B = 2 * A + >>> np.block([[A], [B]]) # vstack([A, B]) + array([[1, 1], + [1, 1], + [2, 2], + [2, 2]]) + + It can also be used in place of `atleast_1d` and `atleast_2d`: + + >>> a = np.array(0) + >>> b = np.array([1]) + >>> np.block([a]) # atleast_1d(a) + array([0]) + >>> np.block([b]) # atleast_1d(b) + array([1]) + + >>> np.block([[a]]) # atleast_2d(a) + array([[0]]) + >>> np.block([[b]]) # atleast_2d(b) + array([[1]]) + + + """ + arrays, list_ndim, result_ndim, final_size = _block_setup(arrays) + + # It was found through benchmarking that making an array of final size + # around 256x256 was faster by straight concatenation on a + # i7-7700HQ processor and dual channel ram 2400MHz. + # It didn't seem to matter heavily on the dtype used. + # + # A 2D array using repeated concatenation requires 2 copies of the array. + # + # The fastest algorithm will depend on the ratio of CPU power to memory + # speed. + # One can monitor the results of the benchmark + # https://pv.github.io/numpy-bench/#bench_shape_base.Block2D.time_block2d + # to tune this parameter until a C version of the `_block_info_recursion` + # algorithm is implemented which would likely be faster than the python + # version. + if list_ndim * final_size > (2 * 512 * 512): + return _block_slicing(arrays, list_ndim, result_ndim) + else: + return _block_concatenate(arrays, list_ndim, result_ndim) + + +# These helper functions are mostly used for testing. +# They allow us to write tests that directly call `_block_slicing` +# or `_block_concatenate` without blocking large arrays to force the wisdom +# to trigger the desired path. +def _block_setup(arrays): + """ + Returns + (`arrays`, list_ndim, result_ndim, final_size) + """ + bottom_index, arr_ndim, final_size = _block_check_depths_match(arrays) + list_ndim = len(bottom_index) + if bottom_index and bottom_index[-1] is None: + raise ValueError( + f'List at {_block_format_index(bottom_index)} cannot be empty' + ) + result_ndim = max(arr_ndim, list_ndim) + return arrays, list_ndim, result_ndim, final_size + + +def _block_slicing(arrays, list_ndim, result_ndim): + shape, slices, arrays = _block_info_recursion( + arrays, list_ndim, result_ndim) + dtype = _nx.result_type(*[arr.dtype for arr in arrays]) + + # Test preferring F only in the case that all input arrays are F + F_order = all(arr.flags['F_CONTIGUOUS'] for arr in arrays) + C_order = all(arr.flags['C_CONTIGUOUS'] for arr in arrays) + order = 'F' if F_order and not C_order else 'C' + result = _nx.empty(shape=shape, dtype=dtype, order=order) + # Note: In a c implementation, the function + # PyArray_CreateMultiSortedStridePerm could be used for more advanced + # guessing of the desired order. + + for the_slice, arr in zip(slices, arrays): + result[(Ellipsis,) + the_slice] = arr + return result + + +def _block_concatenate(arrays, list_ndim, result_ndim): + result = _block(arrays, list_ndim, result_ndim) + if list_ndim == 0: + # Catch an edge case where _block returns a view because + # `arrays` is a single numpy array and not a list of numpy arrays. + # This might copy scalars or lists twice, but this isn't a likely + # usecase for those interested in performance + result = result.copy() + return result diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/shape_base.pyi b/.venv/lib/python3.12/site-packages/numpy/_core/shape_base.pyi new file mode 100644 index 0000000..c2c9c96 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/shape_base.pyi @@ -0,0 +1,175 @@ +from collections.abc import Sequence +from typing import Any, SupportsIndex, TypeVar, overload + +from numpy import _CastingKind, generic +from numpy._typing import ArrayLike, DTypeLike, NDArray, _ArrayLike, _DTypeLike + +__all__ = [ + "atleast_1d", + "atleast_2d", + "atleast_3d", + "block", + "hstack", + "stack", + "unstack", + "vstack", +] + +_ScalarT = TypeVar("_ScalarT", bound=generic) +_ScalarT1 = TypeVar("_ScalarT1", bound=generic) +_ScalarT2 = TypeVar("_ScalarT2", bound=generic) +_ArrayT = TypeVar("_ArrayT", bound=NDArray[Any]) + +### + +@overload +def atleast_1d(a0: _ArrayLike[_ScalarT], /) -> NDArray[_ScalarT]: ... +@overload +def atleast_1d(a0: _ArrayLike[_ScalarT1], a1: _ArrayLike[_ScalarT2], /) -> tuple[NDArray[_ScalarT1], NDArray[_ScalarT2]]: ... +@overload +def atleast_1d(a0: _ArrayLike[_ScalarT], a1: _ArrayLike[_ScalarT], /, *arys: _ArrayLike[_ScalarT]) -> tuple[NDArray[_ScalarT], ...]: ... +@overload +def atleast_1d(a0: ArrayLike, /) -> NDArray[Any]: ... +@overload +def atleast_1d(a0: ArrayLike, a1: ArrayLike, /) -> tuple[NDArray[Any], NDArray[Any]]: ... +@overload +def atleast_1d(a0: ArrayLike, a1: ArrayLike, /, *ai: ArrayLike) -> tuple[NDArray[Any], ...]: ... + +# +@overload +def atleast_2d(a0: _ArrayLike[_ScalarT], /) -> NDArray[_ScalarT]: ... +@overload +def atleast_2d(a0: _ArrayLike[_ScalarT1], a1: _ArrayLike[_ScalarT2], /) -> tuple[NDArray[_ScalarT1], NDArray[_ScalarT2]]: ... +@overload +def atleast_2d(a0: _ArrayLike[_ScalarT], a1: _ArrayLike[_ScalarT], /, *arys: _ArrayLike[_ScalarT]) -> tuple[NDArray[_ScalarT], ...]: ... +@overload +def atleast_2d(a0: ArrayLike, /) -> NDArray[Any]: ... +@overload +def atleast_2d(a0: ArrayLike, a1: ArrayLike, /) -> tuple[NDArray[Any], NDArray[Any]]: ... +@overload +def atleast_2d(a0: ArrayLike, a1: ArrayLike, /, *ai: ArrayLike) -> tuple[NDArray[Any], ...]: ... + +# +@overload +def atleast_3d(a0: _ArrayLike[_ScalarT], /) -> NDArray[_ScalarT]: ... +@overload +def atleast_3d(a0: _ArrayLike[_ScalarT1], a1: _ArrayLike[_ScalarT2], /) -> tuple[NDArray[_ScalarT1], NDArray[_ScalarT2]]: ... +@overload +def atleast_3d(a0: _ArrayLike[_ScalarT], a1: _ArrayLike[_ScalarT], /, *arys: _ArrayLike[_ScalarT]) -> tuple[NDArray[_ScalarT], ...]: ... +@overload +def atleast_3d(a0: ArrayLike, /) -> NDArray[Any]: ... +@overload +def atleast_3d(a0: ArrayLike, a1: ArrayLike, /) -> tuple[NDArray[Any], NDArray[Any]]: ... +@overload +def atleast_3d(a0: ArrayLike, a1: ArrayLike, /, *ai: ArrayLike) -> tuple[NDArray[Any], ...]: ... + +# +@overload +def vstack( + tup: Sequence[_ArrayLike[_ScalarT]], + *, + dtype: None = ..., + casting: _CastingKind = ... +) -> NDArray[_ScalarT]: ... +@overload +def vstack( + tup: Sequence[ArrayLike], + *, + dtype: _DTypeLike[_ScalarT], + casting: _CastingKind = ... +) -> NDArray[_ScalarT]: ... +@overload +def vstack( + tup: Sequence[ArrayLike], + *, + dtype: DTypeLike = ..., + casting: _CastingKind = ... +) -> NDArray[Any]: ... + +@overload +def hstack( + tup: Sequence[_ArrayLike[_ScalarT]], + *, + dtype: None = ..., + casting: _CastingKind = ... +) -> NDArray[_ScalarT]: ... +@overload +def hstack( + tup: Sequence[ArrayLike], + *, + dtype: _DTypeLike[_ScalarT], + casting: _CastingKind = ... +) -> NDArray[_ScalarT]: ... +@overload +def hstack( + tup: Sequence[ArrayLike], + *, + dtype: DTypeLike = ..., + casting: _CastingKind = ... +) -> NDArray[Any]: ... + +@overload +def stack( + arrays: Sequence[_ArrayLike[_ScalarT]], + axis: SupportsIndex = ..., + out: None = ..., + *, + dtype: None = ..., + casting: _CastingKind = ... +) -> NDArray[_ScalarT]: ... +@overload +def stack( + arrays: Sequence[ArrayLike], + axis: SupportsIndex = ..., + out: None = ..., + *, + dtype: _DTypeLike[_ScalarT], + casting: _CastingKind = ... +) -> NDArray[_ScalarT]: ... +@overload +def stack( + arrays: Sequence[ArrayLike], + axis: SupportsIndex = ..., + out: None = ..., + *, + dtype: DTypeLike = ..., + casting: _CastingKind = ... +) -> NDArray[Any]: ... +@overload +def stack( + arrays: Sequence[ArrayLike], + axis: SupportsIndex, + out: _ArrayT, + *, + dtype: DTypeLike | None = None, + casting: _CastingKind = "same_kind", +) -> _ArrayT: ... +@overload +def stack( + arrays: Sequence[ArrayLike], + axis: SupportsIndex = 0, + *, + out: _ArrayT, + dtype: DTypeLike | None = None, + casting: _CastingKind = "same_kind", +) -> _ArrayT: ... + +@overload +def unstack( + array: _ArrayLike[_ScalarT], + /, + *, + axis: int = ..., +) -> tuple[NDArray[_ScalarT], ...]: ... +@overload +def unstack( + array: ArrayLike, + /, + *, + axis: int = ..., +) -> tuple[NDArray[Any], ...]: ... + +@overload +def block(arrays: _ArrayLike[_ScalarT]) -> NDArray[_ScalarT]: ... +@overload +def block(arrays: ArrayLike) -> NDArray[Any]: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/strings.py b/.venv/lib/python3.12/site-packages/numpy/_core/strings.py new file mode 100644 index 0000000..b4dc165 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/strings.py @@ -0,0 +1,1823 @@ +""" +This module contains a set of functions for vectorized string +operations. +""" + +import functools +import sys + +import numpy as np +from numpy import ( + add, + equal, + greater, + greater_equal, + less, + less_equal, + not_equal, +) +from numpy import ( + multiply as _multiply_ufunc, +) +from numpy._core.multiarray import _vec_string +from numpy._core.overrides import array_function_dispatch, set_module +from numpy._core.umath import ( + _center, + _expandtabs, + _expandtabs_length, + _ljust, + _lstrip_chars, + _lstrip_whitespace, + _partition, + _partition_index, + _replace, + _rjust, + _rpartition, + _rpartition_index, + _rstrip_chars, + _rstrip_whitespace, + _slice, + _strip_chars, + _strip_whitespace, + _zfill, + isalnum, + isalpha, + isdecimal, + isdigit, + islower, + isnumeric, + isspace, + istitle, + isupper, + str_len, +) +from numpy._core.umath import ( + count as _count_ufunc, +) +from numpy._core.umath import ( + endswith as _endswith_ufunc, +) +from numpy._core.umath import ( + find as _find_ufunc, +) +from numpy._core.umath import ( + index as _index_ufunc, +) +from numpy._core.umath import ( + rfind as _rfind_ufunc, +) +from numpy._core.umath import ( + rindex as _rindex_ufunc, +) +from numpy._core.umath import ( + startswith as _startswith_ufunc, +) + + +def _override___module__(): + for ufunc in [ + isalnum, isalpha, isdecimal, isdigit, islower, isnumeric, isspace, + istitle, isupper, str_len, + ]: + ufunc.__module__ = "numpy.strings" + ufunc.__qualname__ = ufunc.__name__ + + +_override___module__() + + +__all__ = [ + # UFuncs + "equal", "not_equal", "less", "less_equal", "greater", "greater_equal", + "add", "multiply", "isalpha", "isdigit", "isspace", "isalnum", "islower", + "isupper", "istitle", "isdecimal", "isnumeric", "str_len", "find", + "rfind", "index", "rindex", "count", "startswith", "endswith", "lstrip", + "rstrip", "strip", "replace", "expandtabs", "center", "ljust", "rjust", + "zfill", "partition", "rpartition", "slice", + + # _vec_string - Will gradually become ufuncs as well + "upper", "lower", "swapcase", "capitalize", "title", + + # _vec_string - Will probably not become ufuncs + "mod", "decode", "encode", "translate", + + # Removed from namespace until behavior has been crystallized + # "join", "split", "rsplit", "splitlines", +] + + +MAX = np.iinfo(np.int64).max + +array_function_dispatch = functools.partial( + array_function_dispatch, module='numpy.strings') + + +def _get_num_chars(a): + """ + Helper function that returns the number of characters per field in + a string or unicode array. This is to abstract out the fact that + for a unicode array this is itemsize / 4. + """ + if issubclass(a.dtype.type, np.str_): + return a.itemsize // 4 + return a.itemsize + + +def _to_bytes_or_str_array(result, output_dtype_like): + """ + Helper function to cast a result back into an array + with the appropriate dtype if an object array must be used + as an intermediary. + """ + output_dtype_like = np.asarray(output_dtype_like) + if result.size == 0: + # Calling asarray & tolist in an empty array would result + # in losing shape information + return result.astype(output_dtype_like.dtype) + ret = np.asarray(result.tolist()) + if isinstance(output_dtype_like.dtype, np.dtypes.StringDType): + return ret.astype(type(output_dtype_like.dtype)) + return ret.astype(type(output_dtype_like.dtype)(_get_num_chars(ret))) + + +def _clean_args(*args): + """ + Helper function for delegating arguments to Python string + functions. + + Many of the Python string operations that have optional arguments + do not use 'None' to indicate a default value. In these cases, + we need to remove all None arguments, and those following them. + """ + newargs = [] + for chk in args: + if chk is None: + break + newargs.append(chk) + return newargs + + +def _multiply_dispatcher(a, i): + return (a,) + + +@set_module("numpy.strings") +@array_function_dispatch(_multiply_dispatcher) +def multiply(a, i): + """ + Return (a * i), that is string multiple concatenation, + element-wise. + + Values in ``i`` of less than 0 are treated as 0 (which yields an + empty string). + + Parameters + ---------- + a : array_like, with ``StringDType``, ``bytes_`` or ``str_`` dtype + + i : array_like, with any integer dtype + + Returns + ------- + out : ndarray + Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype, + depending on input types + + Examples + -------- + >>> import numpy as np + >>> a = np.array(["a", "b", "c"]) + >>> np.strings.multiply(a, 3) + array(['aaa', 'bbb', 'ccc'], dtype='>> i = np.array([1, 2, 3]) + >>> np.strings.multiply(a, i) + array(['a', 'bb', 'ccc'], dtype='>> np.strings.multiply(np.array(['a']), i) + array(['a', 'aa', 'aaa'], dtype='>> a = np.array(['a', 'b', 'c', 'd', 'e', 'f']).reshape((2, 3)) + >>> np.strings.multiply(a, 3) + array([['aaa', 'bbb', 'ccc'], + ['ddd', 'eee', 'fff']], dtype='>> np.strings.multiply(a, i) + array([['a', 'bb', 'ccc'], + ['d', 'ee', 'fff']], dtype=' sys.maxsize / np.maximum(i, 1)): + raise OverflowError("Overflow encountered in string multiply") + + buffersizes = a_len * i + out_dtype = f"{a.dtype.char}{buffersizes.max()}" + out = np.empty_like(a, shape=buffersizes.shape, dtype=out_dtype) + return _multiply_ufunc(a, i, out=out) + + +def _mod_dispatcher(a, values): + return (a, values) + + +@set_module("numpy.strings") +@array_function_dispatch(_mod_dispatcher) +def mod(a, values): + """ + Return (a % i), that is pre-Python 2.6 string formatting + (interpolation), element-wise for a pair of array_likes of str + or unicode. + + Parameters + ---------- + a : array_like, with `np.bytes_` or `np.str_` dtype + + values : array_like of values + These values will be element-wise interpolated into the string. + + Returns + ------- + out : ndarray + Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype, + depending on input types + + Examples + -------- + >>> import numpy as np + >>> a = np.array(["NumPy is a %s library"]) + >>> np.strings.mod(a, values=["Python"]) + array(['NumPy is a Python library'], dtype='>> a = np.array([b'%d bytes', b'%d bits']) + >>> values = np.array([8, 64]) + >>> np.strings.mod(a, values) + array([b'8 bytes', b'64 bits'], dtype='|S7') + + """ + return _to_bytes_or_str_array( + _vec_string(a, np.object_, '__mod__', (values,)), a) + + +@set_module("numpy.strings") +def find(a, sub, start=0, end=None): + """ + For each element, return the lowest index in the string where + substring ``sub`` is found, such that ``sub`` is contained in the + range [``start``, ``end``). + + Parameters + ---------- + a : array_like, with ``StringDType``, ``bytes_`` or ``str_`` dtype + + sub : array_like, with `np.bytes_` or `np.str_` dtype + The substring to search for. + + start, end : array_like, with any integer dtype + The range to look in, interpreted as in slice notation. + + Returns + ------- + y : ndarray + Output array of ints + + See Also + -------- + str.find + + Examples + -------- + >>> import numpy as np + >>> a = np.array(["NumPy is a Python library"]) + >>> np.strings.find(a, "Python") + array([11]) + + """ + end = end if end is not None else MAX + return _find_ufunc(a, sub, start, end) + + +@set_module("numpy.strings") +def rfind(a, sub, start=0, end=None): + """ + For each element, return the highest index in the string where + substring ``sub`` is found, such that ``sub`` is contained in the + range [``start``, ``end``). + + Parameters + ---------- + a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype + + sub : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype + The substring to search for. + + start, end : array_like, with any integer dtype + The range to look in, interpreted as in slice notation. + + Returns + ------- + y : ndarray + Output array of ints + + See Also + -------- + str.rfind + + Examples + -------- + >>> import numpy as np + >>> a = np.array(["Computer Science"]) + >>> np.strings.rfind(a, "Science", start=0, end=None) + array([9]) + >>> np.strings.rfind(a, "Science", start=0, end=8) + array([-1]) + >>> b = np.array(["Computer Science", "Science"]) + >>> np.strings.rfind(b, "Science", start=0, end=None) + array([9, 0]) + + """ + end = end if end is not None else MAX + return _rfind_ufunc(a, sub, start, end) + + +@set_module("numpy.strings") +def index(a, sub, start=0, end=None): + """ + Like `find`, but raises :exc:`ValueError` when the substring is not found. + + Parameters + ---------- + a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype + + sub : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype + + start, end : array_like, with any integer dtype, optional + + Returns + ------- + out : ndarray + Output array of ints. + + See Also + -------- + find, str.index + + Examples + -------- + >>> import numpy as np + >>> a = np.array(["Computer Science"]) + >>> np.strings.index(a, "Science", start=0, end=None) + array([9]) + + """ + end = end if end is not None else MAX + return _index_ufunc(a, sub, start, end) + + +@set_module("numpy.strings") +def rindex(a, sub, start=0, end=None): + """ + Like `rfind`, but raises :exc:`ValueError` when the substring `sub` is + not found. + + Parameters + ---------- + a : array-like, with `np.bytes_` or `np.str_` dtype + + sub : array-like, with `np.bytes_` or `np.str_` dtype + + start, end : array-like, with any integer dtype, optional + + Returns + ------- + out : ndarray + Output array of ints. + + See Also + -------- + rfind, str.rindex + + Examples + -------- + >>> a = np.array(["Computer Science"]) + >>> np.strings.rindex(a, "Science", start=0, end=None) + array([9]) + + """ + end = end if end is not None else MAX + return _rindex_ufunc(a, sub, start, end) + + +@set_module("numpy.strings") +def count(a, sub, start=0, end=None): + """ + Returns an array with the number of non-overlapping occurrences of + substring ``sub`` in the range [``start``, ``end``). + + Parameters + ---------- + a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype + + sub : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype + The substring to search for. + + start, end : array_like, with any integer dtype + The range to look in, interpreted as in slice notation. + + Returns + ------- + y : ndarray + Output array of ints + + See Also + -------- + str.count + + Examples + -------- + >>> import numpy as np + >>> c = np.array(['aAaAaA', ' aA ', 'abBABba']) + >>> c + array(['aAaAaA', ' aA ', 'abBABba'], dtype='>> np.strings.count(c, 'A') + array([3, 1, 1]) + >>> np.strings.count(c, 'aA') + array([3, 1, 0]) + >>> np.strings.count(c, 'A', start=1, end=4) + array([2, 1, 1]) + >>> np.strings.count(c, 'A', start=1, end=3) + array([1, 0, 0]) + + """ + end = end if end is not None else MAX + return _count_ufunc(a, sub, start, end) + + +@set_module("numpy.strings") +def startswith(a, prefix, start=0, end=None): + """ + Returns a boolean array which is `True` where the string element + in ``a`` starts with ``prefix``, otherwise `False`. + + Parameters + ---------- + a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype + + prefix : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype + + start, end : array_like, with any integer dtype + With ``start``, test beginning at that position. With ``end``, + stop comparing at that position. + + Returns + ------- + out : ndarray + Output array of bools + + See Also + -------- + str.startswith + + Examples + -------- + >>> import numpy as np + >>> s = np.array(['foo', 'bar']) + >>> s + array(['foo', 'bar'], dtype='>> np.strings.startswith(s, 'fo') + array([True, False]) + >>> np.strings.startswith(s, 'o', start=1, end=2) + array([True, False]) + + """ + end = end if end is not None else MAX + return _startswith_ufunc(a, prefix, start, end) + + +@set_module("numpy.strings") +def endswith(a, suffix, start=0, end=None): + """ + Returns a boolean array which is `True` where the string element + in ``a`` ends with ``suffix``, otherwise `False`. + + Parameters + ---------- + a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype + + suffix : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype + + start, end : array_like, with any integer dtype + With ``start``, test beginning at that position. With ``end``, + stop comparing at that position. + + Returns + ------- + out : ndarray + Output array of bools + + See Also + -------- + str.endswith + + Examples + -------- + >>> import numpy as np + >>> s = np.array(['foo', 'bar']) + >>> s + array(['foo', 'bar'], dtype='>> np.strings.endswith(s, 'ar') + array([False, True]) + >>> np.strings.endswith(s, 'a', start=1, end=2) + array([False, True]) + + """ + end = end if end is not None else MAX + return _endswith_ufunc(a, suffix, start, end) + + +def _code_dispatcher(a, encoding=None, errors=None): + return (a,) + + +@set_module("numpy.strings") +@array_function_dispatch(_code_dispatcher) +def decode(a, encoding=None, errors=None): + r""" + Calls :meth:`bytes.decode` element-wise. + + The set of available codecs comes from the Python standard library, + and may be extended at runtime. For more information, see the + :mod:`codecs` module. + + Parameters + ---------- + a : array_like, with ``bytes_`` dtype + + encoding : str, optional + The name of an encoding + + errors : str, optional + Specifies how to handle encoding errors + + Returns + ------- + out : ndarray + + See Also + -------- + :py:meth:`bytes.decode` + + Notes + ----- + The type of the result will depend on the encoding specified. + + Examples + -------- + >>> import numpy as np + >>> c = np.array([b'\x81\xc1\x81\xc1\x81\xc1', b'@@\x81\xc1@@', + ... b'\x81\x82\xc2\xc1\xc2\x82\x81']) + >>> c + array([b'\x81\xc1\x81\xc1\x81\xc1', b'@@\x81\xc1@@', + b'\x81\x82\xc2\xc1\xc2\x82\x81'], dtype='|S7') + >>> np.strings.decode(c, encoding='cp037') + array(['aAaAaA', ' aA ', 'abBABba'], dtype='>> import numpy as np + >>> a = np.array(['aAaAaA', ' aA ', 'abBABba']) + >>> np.strings.encode(a, encoding='cp037') + array([b'\x81\xc1\x81\xc1\x81\xc1', b'@@\x81\xc1@@', + b'\x81\x82\xc2\xc1\xc2\x82\x81'], dtype='|S7') + + """ + return _to_bytes_or_str_array( + _vec_string(a, np.object_, 'encode', _clean_args(encoding, errors)), + np.bytes_(b'')) + + +def _expandtabs_dispatcher(a, tabsize=None): + return (a,) + + +@set_module("numpy.strings") +@array_function_dispatch(_expandtabs_dispatcher) +def expandtabs(a, tabsize=8): + """ + Return a copy of each string element where all tab characters are + replaced by one or more spaces. + + Calls :meth:`str.expandtabs` element-wise. + + Return a copy of each string element where all tab characters are + replaced by one or more spaces, depending on the current column + and the given `tabsize`. The column number is reset to zero after + each newline occurring in the string. This doesn't understand other + non-printing characters or escape sequences. + + Parameters + ---------- + a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype + Input array + tabsize : int, optional + Replace tabs with `tabsize` number of spaces. If not given defaults + to 8 spaces. + + Returns + ------- + out : ndarray + Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype, + depending on input type + + See Also + -------- + str.expandtabs + + Examples + -------- + >>> import numpy as np + >>> a = np.array(['\t\tHello\tworld']) + >>> np.strings.expandtabs(a, tabsize=4) # doctest: +SKIP + array([' Hello world'], dtype='>> import numpy as np + >>> c = np.array(['a1b2','1b2a','b2a1','2a1b']); c + array(['a1b2', '1b2a', 'b2a1', '2a1b'], dtype='>> np.strings.center(c, width=9) + array([' a1b2 ', ' 1b2a ', ' b2a1 ', ' 2a1b '], dtype='>> np.strings.center(c, width=9, fillchar='*') + array(['***a1b2**', '***1b2a**', '***b2a1**', '***2a1b**'], dtype='>> np.strings.center(c, width=1) + array(['a1b2', '1b2a', 'b2a1', '2a1b'], dtype='>> import numpy as np + >>> c = np.array(['aAaAaA', ' aA ', 'abBABba']) + >>> np.strings.ljust(c, width=3) + array(['aAaAaA', ' aA ', 'abBABba'], dtype='>> np.strings.ljust(c, width=9) + array(['aAaAaA ', ' aA ', 'abBABba '], dtype='>> import numpy as np + >>> a = np.array(['aAaAaA', ' aA ', 'abBABba']) + >>> np.strings.rjust(a, width=3) + array(['aAaAaA', ' aA ', 'abBABba'], dtype='>> np.strings.rjust(a, width=9) + array([' aAaAaA', ' aA ', ' abBABba'], dtype='>> import numpy as np + >>> np.strings.zfill(['1', '-1', '+1'], 3) + array(['001', '-01', '+01'], dtype='>> import numpy as np + >>> c = np.array(['aAaAaA', ' aA ', 'abBABba']) + >>> c + array(['aAaAaA', ' aA ', 'abBABba'], dtype='>> np.strings.lstrip(c, 'a') + array(['AaAaA', ' aA ', 'bBABba'], dtype='>> np.strings.lstrip(c, 'A') # leaves c unchanged + array(['aAaAaA', ' aA ', 'abBABba'], dtype='>> (np.strings.lstrip(c, ' ') == np.strings.lstrip(c, '')).all() + np.False_ + >>> (np.strings.lstrip(c, ' ') == np.strings.lstrip(c)).all() + np.True_ + + """ + if chars is None: + return _lstrip_whitespace(a) + return _lstrip_chars(a, chars) + + +@set_module("numpy.strings") +def rstrip(a, chars=None): + """ + For each element in `a`, return a copy with the trailing characters + removed. + + Parameters + ---------- + a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype + chars : scalar with the same dtype as ``a``, optional + The ``chars`` argument is a string specifying the set of + characters to be removed. If ``None``, the ``chars`` + argument defaults to removing whitespace. The ``chars`` argument + is not a prefix or suffix; rather, all combinations of its + values are stripped. + + Returns + ------- + out : ndarray + Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype, + depending on input types + + See Also + -------- + str.rstrip + + Examples + -------- + >>> import numpy as np + >>> c = np.array(['aAaAaA', 'abBABba']) + >>> c + array(['aAaAaA', 'abBABba'], dtype='>> np.strings.rstrip(c, 'a') + array(['aAaAaA', 'abBABb'], dtype='>> np.strings.rstrip(c, 'A') + array(['aAaAa', 'abBABba'], dtype='>> import numpy as np + >>> c = np.array(['aAaAaA', ' aA ', 'abBABba']) + >>> c + array(['aAaAaA', ' aA ', 'abBABba'], dtype='>> np.strings.strip(c) + array(['aAaAaA', 'aA', 'abBABba'], dtype='>> np.strings.strip(c, 'a') + array(['AaAaA', ' aA ', 'bBABb'], dtype='>> np.strings.strip(c, 'A') + array(['aAaAa', ' aA ', 'abBABba'], dtype='>> import numpy as np + >>> c = np.array(['a1b c', '1bca', 'bca1']); c + array(['a1b c', '1bca', 'bca1'], dtype='>> np.strings.upper(c) + array(['A1B C', '1BCA', 'BCA1'], dtype='>> import numpy as np + >>> c = np.array(['A1B C', '1BCA', 'BCA1']); c + array(['A1B C', '1BCA', 'BCA1'], dtype='>> np.strings.lower(c) + array(['a1b c', '1bca', 'bca1'], dtype='>> import numpy as np + >>> c=np.array(['a1B c','1b Ca','b Ca1','cA1b'],'S5'); c + array(['a1B c', '1b Ca', 'b Ca1', 'cA1b'], + dtype='|S5') + >>> np.strings.swapcase(c) + array(['A1b C', '1B cA', 'B cA1', 'Ca1B'], + dtype='|S5') + + """ + a_arr = np.asarray(a) + return _vec_string(a_arr, a_arr.dtype, 'swapcase') + + +@set_module("numpy.strings") +@array_function_dispatch(_unary_op_dispatcher) +def capitalize(a): + """ + Return a copy of ``a`` with only the first character of each element + capitalized. + + Calls :meth:`str.capitalize` element-wise. + + For byte strings, this method is locale-dependent. + + Parameters + ---------- + a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype + Input array of strings to capitalize. + + Returns + ------- + out : ndarray + Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype, + depending on input types + + See Also + -------- + str.capitalize + + Examples + -------- + >>> import numpy as np + >>> c = np.array(['a1b2','1b2a','b2a1','2a1b'],'S4'); c + array(['a1b2', '1b2a', 'b2a1', '2a1b'], + dtype='|S4') + >>> np.strings.capitalize(c) + array(['A1b2', '1b2a', 'B2a1', '2a1b'], + dtype='|S4') + + """ + a_arr = np.asarray(a) + return _vec_string(a_arr, a_arr.dtype, 'capitalize') + + +@set_module("numpy.strings") +@array_function_dispatch(_unary_op_dispatcher) +def title(a): + """ + Return element-wise title cased version of string or unicode. + + Title case words start with uppercase characters, all remaining cased + characters are lowercase. + + Calls :meth:`str.title` element-wise. + + For 8-bit strings, this method is locale-dependent. + + Parameters + ---------- + a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype + Input array. + + Returns + ------- + out : ndarray + Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype, + depending on input types + + See Also + -------- + str.title + + Examples + -------- + >>> import numpy as np + >>> c=np.array(['a1b c','1b ca','b ca1','ca1b'],'S5'); c + array(['a1b c', '1b ca', 'b ca1', 'ca1b'], + dtype='|S5') + >>> np.strings.title(c) + array(['A1B C', '1B Ca', 'B Ca1', 'Ca1B'], + dtype='|S5') + + """ + a_arr = np.asarray(a) + return _vec_string(a_arr, a_arr.dtype, 'title') + + +def _replace_dispatcher(a, old, new, count=None): + return (a,) + + +@set_module("numpy.strings") +@array_function_dispatch(_replace_dispatcher) +def replace(a, old, new, count=-1): + """ + For each element in ``a``, return a copy of the string with + occurrences of substring ``old`` replaced by ``new``. + + Parameters + ---------- + a : array_like, with ``bytes_`` or ``str_`` dtype + + old, new : array_like, with ``bytes_`` or ``str_`` dtype + + count : array_like, with ``int_`` dtype + If the optional argument ``count`` is given, only the first + ``count`` occurrences are replaced. + + Returns + ------- + out : ndarray + Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype, + depending on input types + + See Also + -------- + str.replace + + Examples + -------- + >>> import numpy as np + >>> a = np.array(["That is a mango", "Monkeys eat mangos"]) + >>> np.strings.replace(a, 'mango', 'banana') + array(['That is a banana', 'Monkeys eat bananas'], dtype='>> a = np.array(["The dish is fresh", "This is it"]) + >>> np.strings.replace(a, 'is', 'was') + array(['The dwash was fresh', 'Thwas was it'], dtype='>> import numpy as np + >>> np.strings.join('-', 'osd') # doctest: +SKIP + array('o-s-d', dtype='>> np.strings.join(['-', '.'], ['ghc', 'osd']) # doctest: +SKIP + array(['g-h-c', 'o.s.d'], dtype='>> import numpy as np + >>> x = np.array("Numpy is nice!") + >>> np.strings.split(x, " ") # doctest: +SKIP + array(list(['Numpy', 'is', 'nice!']), dtype=object) # doctest: +SKIP + + >>> np.strings.split(x, " ", 1) # doctest: +SKIP + array(list(['Numpy', 'is nice!']), dtype=object) # doctest: +SKIP + + See Also + -------- + str.split, rsplit + + """ + # This will return an array of lists of different sizes, so we + # leave it as an object array + return _vec_string( + a, np.object_, 'split', [sep] + _clean_args(maxsplit)) + + +@array_function_dispatch(_split_dispatcher) +def _rsplit(a, sep=None, maxsplit=None): + """ + For each element in `a`, return a list of the words in the + string, using `sep` as the delimiter string. + + Calls :meth:`str.rsplit` element-wise. + + Except for splitting from the right, `rsplit` + behaves like `split`. + + Parameters + ---------- + a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype + + sep : str or unicode, optional + If `sep` is not specified or None, any whitespace string + is a separator. + maxsplit : int, optional + If `maxsplit` is given, at most `maxsplit` splits are done, + the rightmost ones. + + Returns + ------- + out : ndarray + Array of list objects + + See Also + -------- + str.rsplit, split + + Examples + -------- + >>> import numpy as np + >>> a = np.array(['aAaAaA', 'abBABba']) + >>> np.strings.rsplit(a, 'A') # doctest: +SKIP + array([list(['a', 'a', 'a', '']), # doctest: +SKIP + list(['abB', 'Bba'])], dtype=object) # doctest: +SKIP + + """ + # This will return an array of lists of different sizes, so we + # leave it as an object array + return _vec_string( + a, np.object_, 'rsplit', [sep] + _clean_args(maxsplit)) + + +def _splitlines_dispatcher(a, keepends=None): + return (a,) + + +@array_function_dispatch(_splitlines_dispatcher) +def _splitlines(a, keepends=None): + """ + For each element in `a`, return a list of the lines in the + element, breaking at line boundaries. + + Calls :meth:`str.splitlines` element-wise. + + Parameters + ---------- + a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype + + keepends : bool, optional + Line breaks are not included in the resulting list unless + keepends is given and true. + + Returns + ------- + out : ndarray + Array of list objects + + See Also + -------- + str.splitlines + + Examples + -------- + >>> np.char.splitlines("first line\\nsecond line") + array(list(['first line', 'second line']), dtype=object) + >>> a = np.array(["first\\nsecond", "third\\nfourth"]) + >>> np.char.splitlines(a) + array([list(['first', 'second']), list(['third', 'fourth'])], dtype=object) + + """ + return _vec_string( + a, np.object_, 'splitlines', _clean_args(keepends)) + + +def _partition_dispatcher(a, sep): + return (a,) + + +@set_module("numpy.strings") +@array_function_dispatch(_partition_dispatcher) +def partition(a, sep): + """ + Partition each element in ``a`` around ``sep``. + + For each element in ``a``, split the element at the first + occurrence of ``sep``, and return a 3-tuple containing the part + before the separator, the separator itself, and the part after + the separator. If the separator is not found, the first item of + the tuple will contain the whole string, and the second and third + ones will be the empty string. + + Parameters + ---------- + a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype + Input array + sep : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype + Separator to split each string element in ``a``. + + Returns + ------- + out : 3-tuple: + - array with ``StringDType``, ``bytes_`` or ``str_`` dtype with the + part before the separator + - array with ``StringDType``, ``bytes_`` or ``str_`` dtype with the + separator + - array with ``StringDType``, ``bytes_`` or ``str_`` dtype with the + part after the separator + + See Also + -------- + str.partition + + Examples + -------- + >>> import numpy as np + >>> x = np.array(["Numpy is nice!"]) + >>> np.strings.partition(x, " ") + (array(['Numpy'], dtype='>> import numpy as np + >>> a = np.array(['aAaAaA', ' aA ', 'abBABba']) + >>> np.strings.rpartition(a, 'A') + (array(['aAaAa', ' a', 'abB'], dtype='>> import numpy as np + >>> a = np.array(['a1b c', '1bca', 'bca1']) + >>> table = a[0].maketrans('abc', '123') + >>> deletechars = ' ' + >>> np.char.translate(a, table, deletechars) + array(['112 3', '1231', '2311'], dtype='>> import numpy as np + >>> a = np.array(['hello', 'world']) + >>> np.strings.slice(a, 2) + array(['he', 'wo'], dtype='>> np.strings.slice(a, 1, 5, 2) + array(['el', 'ol'], dtype='>> np.strings.slice(a, np.array([1, 2]), np.array([4, 5])) + array(['ell', 'rld'], dtype='>> b = np.array(['hello world', 'γεια σου κόσμε', '你好世界', '👋 🌍'], + ... dtype=np.dtypes.StringDType()) + >>> np.strings.slice(b, -2) + array(['hello wor', 'γεια σου κόσ', '你好', '👋'], dtype=StringDType()) + + >>> np.strings.slice(b, [3, -10, 2, -3], [-1, -2, -1, 3]) + array(['lo worl', ' σου κόσ', '世', '👋 🌍'], dtype=StringDType()) + + >>> np.strings.slice(b, None, None, -1) + array(['dlrow olleh', 'εμσόκ υοσ αιεγ', '界世好你', '🌍 👋'], + dtype=StringDType()) + + """ + # Just like in the construction of a regular slice object, if only start + # is specified then start will become stop, see logic in slice_new. + if stop is None: + stop = start + start = None + + # adjust start, stop, step to be integers, see logic in PySlice_Unpack + if step is None: + step = 1 + step = np.asanyarray(step) + if not np.issubdtype(step.dtype, np.integer): + raise TypeError(f"unsupported type {step.dtype} for operand 'step'") + if np.any(step == 0): + raise ValueError("slice step cannot be zero") + + if start is None: + start = np.where(step < 0, np.iinfo(np.intp).max, 0) + + if stop is None: + stop = np.where(step < 0, np.iinfo(np.intp).min, np.iinfo(np.intp).max) + + return _slice(a, start, stop, step) diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/strings.pyi b/.venv/lib/python3.12/site-packages/numpy/_core/strings.pyi new file mode 100644 index 0000000..b187ce7 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/strings.pyi @@ -0,0 +1,511 @@ +from typing import TypeAlias, overload + +import numpy as np +from numpy._typing import NDArray, _AnyShape, _SupportsArray +from numpy._typing import _ArrayLikeAnyString_co as UST_co +from numpy._typing import _ArrayLikeBytes_co as S_co +from numpy._typing import _ArrayLikeInt_co as i_co +from numpy._typing import _ArrayLikeStr_co as U_co +from numpy._typing import _ArrayLikeString_co as T_co + +__all__ = [ + "add", + "capitalize", + "center", + "count", + "decode", + "encode", + "endswith", + "equal", + "expandtabs", + "find", + "greater", + "greater_equal", + "index", + "isalnum", + "isalpha", + "isdecimal", + "isdigit", + "islower", + "isnumeric", + "isspace", + "istitle", + "isupper", + "less", + "less_equal", + "ljust", + "lower", + "lstrip", + "mod", + "multiply", + "not_equal", + "partition", + "replace", + "rfind", + "rindex", + "rjust", + "rpartition", + "rstrip", + "startswith", + "str_len", + "strip", + "swapcase", + "title", + "translate", + "upper", + "zfill", + "slice", +] + +_StringDTypeArray: TypeAlias = np.ndarray[_AnyShape, np.dtypes.StringDType] +_StringDTypeSupportsArray: TypeAlias = _SupportsArray[np.dtypes.StringDType] +_StringDTypeOrUnicodeArray: TypeAlias = np.ndarray[_AnyShape, np.dtype[np.str_]] | _StringDTypeArray + +@overload +def equal(x1: U_co, x2: U_co) -> NDArray[np.bool]: ... +@overload +def equal(x1: S_co, x2: S_co) -> NDArray[np.bool]: ... +@overload +def equal(x1: T_co, x2: T_co) -> NDArray[np.bool]: ... + +@overload +def not_equal(x1: U_co, x2: U_co) -> NDArray[np.bool]: ... +@overload +def not_equal(x1: S_co, x2: S_co) -> NDArray[np.bool]: ... +@overload +def not_equal(x1: T_co, x2: T_co) -> NDArray[np.bool]: ... + +@overload +def greater_equal(x1: U_co, x2: U_co) -> NDArray[np.bool]: ... +@overload +def greater_equal(x1: S_co, x2: S_co) -> NDArray[np.bool]: ... +@overload +def greater_equal(x1: T_co, x2: T_co) -> NDArray[np.bool]: ... + +@overload +def less_equal(x1: U_co, x2: U_co) -> NDArray[np.bool]: ... +@overload +def less_equal(x1: S_co, x2: S_co) -> NDArray[np.bool]: ... +@overload +def less_equal(x1: T_co, x2: T_co) -> NDArray[np.bool]: ... + +@overload +def greater(x1: U_co, x2: U_co) -> NDArray[np.bool]: ... +@overload +def greater(x1: S_co, x2: S_co) -> NDArray[np.bool]: ... +@overload +def greater(x1: T_co, x2: T_co) -> NDArray[np.bool]: ... + +@overload +def less(x1: U_co, x2: U_co) -> NDArray[np.bool]: ... +@overload +def less(x1: S_co, x2: S_co) -> NDArray[np.bool]: ... +@overload +def less(x1: T_co, x2: T_co) -> NDArray[np.bool]: ... + +@overload +def add(x1: U_co, x2: U_co) -> NDArray[np.str_]: ... +@overload +def add(x1: S_co, x2: S_co) -> NDArray[np.bytes_]: ... +@overload +def add(x1: _StringDTypeSupportsArray, x2: _StringDTypeSupportsArray) -> _StringDTypeArray: ... +@overload +def add(x1: T_co, x2: T_co) -> _StringDTypeOrUnicodeArray: ... + +@overload +def multiply(a: U_co, i: i_co) -> NDArray[np.str_]: ... +@overload +def multiply(a: S_co, i: i_co) -> NDArray[np.bytes_]: ... +@overload +def multiply(a: _StringDTypeSupportsArray, i: i_co) -> _StringDTypeArray: ... +@overload +def multiply(a: T_co, i: i_co) -> _StringDTypeOrUnicodeArray: ... + +@overload +def mod(a: U_co, value: object) -> NDArray[np.str_]: ... +@overload +def mod(a: S_co, value: object) -> NDArray[np.bytes_]: ... +@overload +def mod(a: _StringDTypeSupportsArray, value: object) -> _StringDTypeArray: ... +@overload +def mod(a: T_co, value: object) -> _StringDTypeOrUnicodeArray: ... + +def isalpha(x: UST_co) -> NDArray[np.bool]: ... +def isalnum(a: UST_co) -> NDArray[np.bool]: ... +def isdigit(x: UST_co) -> NDArray[np.bool]: ... +def isspace(x: UST_co) -> NDArray[np.bool]: ... +def isdecimal(x: U_co | T_co) -> NDArray[np.bool]: ... +def isnumeric(x: U_co | T_co) -> NDArray[np.bool]: ... +def islower(a: UST_co) -> NDArray[np.bool]: ... +def istitle(a: UST_co) -> NDArray[np.bool]: ... +def isupper(a: UST_co) -> NDArray[np.bool]: ... + +def str_len(x: UST_co) -> NDArray[np.int_]: ... + +@overload +def find( + a: U_co, + sub: U_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[np.int_]: ... +@overload +def find( + a: S_co, + sub: S_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[np.int_]: ... +@overload +def find( + a: T_co, + sub: T_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[np.int_]: ... + +@overload +def rfind( + a: U_co, + sub: U_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[np.int_]: ... +@overload +def rfind( + a: S_co, + sub: S_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[np.int_]: ... +@overload +def rfind( + a: T_co, + sub: T_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[np.int_]: ... + +@overload +def index( + a: U_co, + sub: U_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[np.int_]: ... +@overload +def index( + a: S_co, + sub: S_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[np.int_]: ... +@overload +def index( + a: T_co, + sub: T_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[np.int_]: ... + +@overload +def rindex( + a: U_co, + sub: U_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[np.int_]: ... +@overload +def rindex( + a: S_co, + sub: S_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[np.int_]: ... +@overload +def rindex( + a: T_co, + sub: T_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[np.int_]: ... + +@overload +def count( + a: U_co, + sub: U_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[np.int_]: ... +@overload +def count( + a: S_co, + sub: S_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[np.int_]: ... +@overload +def count( + a: T_co, + sub: T_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[np.int_]: ... + +@overload +def startswith( + a: U_co, + prefix: U_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[np.bool]: ... +@overload +def startswith( + a: S_co, + prefix: S_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[np.bool]: ... +@overload +def startswith( + a: T_co, + prefix: T_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[np.bool]: ... + +@overload +def endswith( + a: U_co, + suffix: U_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[np.bool]: ... +@overload +def endswith( + a: S_co, + suffix: S_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[np.bool]: ... +@overload +def endswith( + a: T_co, + suffix: T_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[np.bool]: ... + +def decode( + a: S_co, + encoding: str | None = None, + errors: str | None = None, +) -> NDArray[np.str_]: ... +def encode( + a: U_co | T_co, + encoding: str | None = None, + errors: str | None = None, +) -> NDArray[np.bytes_]: ... + +@overload +def expandtabs(a: U_co, tabsize: i_co = ...) -> NDArray[np.str_]: ... +@overload +def expandtabs(a: S_co, tabsize: i_co = ...) -> NDArray[np.bytes_]: ... +@overload +def expandtabs(a: _StringDTypeSupportsArray, tabsize: i_co = ...) -> _StringDTypeArray: ... +@overload +def expandtabs(a: T_co, tabsize: i_co = ...) -> _StringDTypeOrUnicodeArray: ... + +@overload +def center(a: U_co, width: i_co, fillchar: UST_co = " ") -> NDArray[np.str_]: ... +@overload +def center(a: S_co, width: i_co, fillchar: UST_co = " ") -> NDArray[np.bytes_]: ... +@overload +def center(a: _StringDTypeSupportsArray, width: i_co, fillchar: UST_co = " ") -> _StringDTypeArray: ... +@overload +def center(a: T_co, width: i_co, fillchar: UST_co = " ") -> _StringDTypeOrUnicodeArray: ... + +@overload +def ljust(a: U_co, width: i_co, fillchar: UST_co = " ") -> NDArray[np.str_]: ... +@overload +def ljust(a: S_co, width: i_co, fillchar: UST_co = " ") -> NDArray[np.bytes_]: ... +@overload +def ljust(a: _StringDTypeSupportsArray, width: i_co, fillchar: UST_co = " ") -> _StringDTypeArray: ... +@overload +def ljust(a: T_co, width: i_co, fillchar: UST_co = " ") -> _StringDTypeOrUnicodeArray: ... + +@overload +def rjust(a: U_co, width: i_co, fillchar: UST_co = " ") -> NDArray[np.str_]: ... +@overload +def rjust(a: S_co, width: i_co, fillchar: UST_co = " ") -> NDArray[np.bytes_]: ... +@overload +def rjust(a: _StringDTypeSupportsArray, width: i_co, fillchar: UST_co = " ") -> _StringDTypeArray: ... +@overload +def rjust(a: T_co, width: i_co, fillchar: UST_co = " ") -> _StringDTypeOrUnicodeArray: ... + +@overload +def lstrip(a: U_co, chars: U_co | None = None) -> NDArray[np.str_]: ... +@overload +def lstrip(a: S_co, chars: S_co | None = None) -> NDArray[np.bytes_]: ... +@overload +def lstrip(a: _StringDTypeSupportsArray, chars: T_co | None = None) -> _StringDTypeArray: ... +@overload +def lstrip(a: T_co, chars: T_co | None = None) -> _StringDTypeOrUnicodeArray: ... + +@overload +def rstrip(a: U_co, chars: U_co | None = None) -> NDArray[np.str_]: ... +@overload +def rstrip(a: S_co, chars: S_co | None = None) -> NDArray[np.bytes_]: ... +@overload +def rstrip(a: _StringDTypeSupportsArray, chars: T_co | None = None) -> _StringDTypeArray: ... +@overload +def rstrip(a: T_co, chars: T_co | None = None) -> _StringDTypeOrUnicodeArray: ... + +@overload +def strip(a: U_co, chars: U_co | None = None) -> NDArray[np.str_]: ... +@overload +def strip(a: S_co, chars: S_co | None = None) -> NDArray[np.bytes_]: ... +@overload +def strip(a: _StringDTypeSupportsArray, chars: T_co | None = None) -> _StringDTypeArray: ... +@overload +def strip(a: T_co, chars: T_co | None = None) -> _StringDTypeOrUnicodeArray: ... + +@overload +def zfill(a: U_co, width: i_co) -> NDArray[np.str_]: ... +@overload +def zfill(a: S_co, width: i_co) -> NDArray[np.bytes_]: ... +@overload +def zfill(a: _StringDTypeSupportsArray, width: i_co) -> _StringDTypeArray: ... +@overload +def zfill(a: T_co, width: i_co) -> _StringDTypeOrUnicodeArray: ... + +@overload +def upper(a: U_co) -> NDArray[np.str_]: ... +@overload +def upper(a: S_co) -> NDArray[np.bytes_]: ... +@overload +def upper(a: _StringDTypeSupportsArray) -> _StringDTypeArray: ... +@overload +def upper(a: T_co) -> _StringDTypeOrUnicodeArray: ... + +@overload +def lower(a: U_co) -> NDArray[np.str_]: ... +@overload +def lower(a: S_co) -> NDArray[np.bytes_]: ... +@overload +def lower(a: _StringDTypeSupportsArray) -> _StringDTypeArray: ... +@overload +def lower(a: T_co) -> _StringDTypeOrUnicodeArray: ... + +@overload +def swapcase(a: U_co) -> NDArray[np.str_]: ... +@overload +def swapcase(a: S_co) -> NDArray[np.bytes_]: ... +@overload +def swapcase(a: _StringDTypeSupportsArray) -> _StringDTypeArray: ... +@overload +def swapcase(a: T_co) -> _StringDTypeOrUnicodeArray: ... + +@overload +def capitalize(a: U_co) -> NDArray[np.str_]: ... +@overload +def capitalize(a: S_co) -> NDArray[np.bytes_]: ... +@overload +def capitalize(a: _StringDTypeSupportsArray) -> _StringDTypeArray: ... +@overload +def capitalize(a: T_co) -> _StringDTypeOrUnicodeArray: ... + +@overload +def title(a: U_co) -> NDArray[np.str_]: ... +@overload +def title(a: S_co) -> NDArray[np.bytes_]: ... +@overload +def title(a: _StringDTypeSupportsArray) -> _StringDTypeArray: ... +@overload +def title(a: T_co) -> _StringDTypeOrUnicodeArray: ... + +@overload +def replace( + a: U_co, + old: U_co, + new: U_co, + count: i_co = ..., +) -> NDArray[np.str_]: ... +@overload +def replace( + a: S_co, + old: S_co, + new: S_co, + count: i_co = ..., +) -> NDArray[np.bytes_]: ... +@overload +def replace( + a: _StringDTypeSupportsArray, + old: _StringDTypeSupportsArray, + new: _StringDTypeSupportsArray, + count: i_co = ..., +) -> _StringDTypeArray: ... +@overload +def replace( + a: T_co, + old: T_co, + new: T_co, + count: i_co = ..., +) -> _StringDTypeOrUnicodeArray: ... + +@overload +def partition(a: U_co, sep: U_co) -> NDArray[np.str_]: ... +@overload +def partition(a: S_co, sep: S_co) -> NDArray[np.bytes_]: ... +@overload +def partition(a: _StringDTypeSupportsArray, sep: _StringDTypeSupportsArray) -> _StringDTypeArray: ... +@overload +def partition(a: T_co, sep: T_co) -> _StringDTypeOrUnicodeArray: ... + +@overload +def rpartition(a: U_co, sep: U_co) -> NDArray[np.str_]: ... +@overload +def rpartition(a: S_co, sep: S_co) -> NDArray[np.bytes_]: ... +@overload +def rpartition(a: _StringDTypeSupportsArray, sep: _StringDTypeSupportsArray) -> _StringDTypeArray: ... +@overload +def rpartition(a: T_co, sep: T_co) -> _StringDTypeOrUnicodeArray: ... + +@overload +def translate( + a: U_co, + table: str, + deletechars: str | None = None, +) -> NDArray[np.str_]: ... +@overload +def translate( + a: S_co, + table: str, + deletechars: str | None = None, +) -> NDArray[np.bytes_]: ... +@overload +def translate( + a: _StringDTypeSupportsArray, + table: str, + deletechars: str | None = None, +) -> _StringDTypeArray: ... +@overload +def translate( + a: T_co, + table: str, + deletechars: str | None = None, +) -> _StringDTypeOrUnicodeArray: ... + +# +@overload +def slice(a: U_co, start: i_co | None = None, stop: i_co | None = None, step: i_co | None = None, /) -> NDArray[np.str_]: ... # type: ignore[overload-overlap] +@overload +def slice(a: S_co, start: i_co | None = None, stop: i_co | None = None, step: i_co | None = None, /) -> NDArray[np.bytes_]: ... +@overload +def slice( + a: _StringDTypeSupportsArray, start: i_co | None = None, stop: i_co | None = None, step: i_co | None = None, / +) -> _StringDTypeArray: ... +@overload +def slice( + a: T_co, start: i_co | None = None, stop: i_co | None = None, step: i_co | None = None, / +) -> _StringDTypeOrUnicodeArray: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/_locales.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/_locales.cpython-312.pyc new file mode 100644 index 0000000..2ff2bc1 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/_locales.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/_natype.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/_natype.cpython-312.pyc new file mode 100644 index 0000000..3c399d9 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/_natype.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test__exceptions.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test__exceptions.cpython-312.pyc new file mode 100644 index 0000000..c699888 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test__exceptions.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_abc.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_abc.cpython-312.pyc new file mode 100644 index 0000000..40e9375 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_abc.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_api.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_api.cpython-312.pyc new file mode 100644 index 0000000..a58ff81 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_api.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_argparse.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_argparse.cpython-312.pyc new file mode 100644 index 0000000..7719c19 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_argparse.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_array_api_info.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_array_api_info.cpython-312.pyc new file mode 100644 index 0000000..51fb4f5 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_array_api_info.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_array_coercion.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_array_coercion.cpython-312.pyc new file mode 100644 index 0000000..e945dff Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_array_coercion.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_array_interface.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_array_interface.cpython-312.pyc new file mode 100644 index 0000000..f5dc08a Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_array_interface.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_arraymethod.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_arraymethod.cpython-312.pyc new file mode 100644 index 0000000..c429d91 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_arraymethod.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_arrayobject.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_arrayobject.cpython-312.pyc new file mode 100644 index 0000000..f609ac4 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_arrayobject.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_arrayprint.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_arrayprint.cpython-312.pyc new file mode 100644 index 0000000..207c4fd Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_arrayprint.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_casting_floatingpoint_errors.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_casting_floatingpoint_errors.cpython-312.pyc new file mode 100644 index 0000000..d824fdf Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_casting_floatingpoint_errors.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_casting_unittests.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_casting_unittests.cpython-312.pyc new file mode 100644 index 0000000..ae0cfba Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_casting_unittests.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_conversion_utils.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_conversion_utils.cpython-312.pyc new file mode 100644 index 0000000..4701a48 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_conversion_utils.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_cpu_dispatcher.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_cpu_dispatcher.cpython-312.pyc new file mode 100644 index 0000000..2147ee6 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_cpu_dispatcher.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_cpu_features.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_cpu_features.cpython-312.pyc new file mode 100644 index 0000000..986635b Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_cpu_features.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_custom_dtypes.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_custom_dtypes.cpython-312.pyc new file mode 100644 index 0000000..7b8947c Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_custom_dtypes.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_cython.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_cython.cpython-312.pyc new file mode 100644 index 0000000..111302f Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_cython.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_datetime.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_datetime.cpython-312.pyc new file mode 100644 index 0000000..25d54fb Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_datetime.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_defchararray.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_defchararray.cpython-312.pyc new file mode 100644 index 0000000..eea0d51 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_defchararray.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_deprecations.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_deprecations.cpython-312.pyc new file mode 100644 index 0000000..5a5df17 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_deprecations.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_dlpack.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_dlpack.cpython-312.pyc new file mode 100644 index 0000000..06d72d1 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_dlpack.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_dtype.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_dtype.cpython-312.pyc new file mode 100644 index 0000000..6c7ad88 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_dtype.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_einsum.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_einsum.cpython-312.pyc new file mode 100644 index 0000000..35cb55a Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_einsum.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_errstate.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_errstate.cpython-312.pyc new file mode 100644 index 0000000..604b57e Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_errstate.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_extint128.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_extint128.cpython-312.pyc new file mode 100644 index 0000000..aa90f69 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_extint128.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_function_base.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_function_base.cpython-312.pyc new file mode 100644 index 0000000..31c8c22 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_function_base.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_getlimits.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_getlimits.cpython-312.pyc new file mode 100644 index 0000000..6a52fff Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_getlimits.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_half.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_half.cpython-312.pyc new file mode 100644 index 0000000..df043c2 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_half.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_hashtable.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_hashtable.cpython-312.pyc new file mode 100644 index 0000000..d465e90 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_hashtable.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_indexerrors.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_indexerrors.cpython-312.pyc new file mode 100644 index 0000000..d522867 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_indexerrors.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_indexing.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_indexing.cpython-312.pyc new file mode 100644 index 0000000..773f201 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_indexing.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_item_selection.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_item_selection.cpython-312.pyc new file mode 100644 index 0000000..b75140e Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_item_selection.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_limited_api.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_limited_api.cpython-312.pyc new file mode 100644 index 0000000..41fc8fa Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_limited_api.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_longdouble.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_longdouble.cpython-312.pyc new file mode 100644 index 0000000..99e0d83 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_longdouble.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_machar.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_machar.cpython-312.pyc new file mode 100644 index 0000000..790d2e0 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_machar.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_mem_overlap.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_mem_overlap.cpython-312.pyc new file mode 100644 index 0000000..7f0fb6f Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_mem_overlap.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_mem_policy.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_mem_policy.cpython-312.pyc new file mode 100644 index 0000000..064f3ca Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_mem_policy.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_memmap.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_memmap.cpython-312.pyc new file mode 100644 index 0000000..574af4a Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_memmap.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_multiarray.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_multiarray.cpython-312.pyc new file mode 100644 index 0000000..9532db8 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_multiarray.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_multithreading.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_multithreading.cpython-312.pyc new file mode 100644 index 0000000..fedc155 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_multithreading.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_nditer.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_nditer.cpython-312.pyc new file mode 100644 index 0000000..a33cd89 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_nditer.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_nep50_promotions.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_nep50_promotions.cpython-312.pyc new file mode 100644 index 0000000..5325076 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_nep50_promotions.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_numeric.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_numeric.cpython-312.pyc new file mode 100644 index 0000000..22d62b8 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_numeric.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_numerictypes.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_numerictypes.cpython-312.pyc new file mode 100644 index 0000000..fd3ae3d Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_numerictypes.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_overrides.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_overrides.cpython-312.pyc new file mode 100644 index 0000000..7115dcb Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_overrides.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_print.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_print.cpython-312.pyc new file mode 100644 index 0000000..68530ec Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_print.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_protocols.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_protocols.cpython-312.pyc new file mode 100644 index 0000000..4610ff8 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_protocols.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_records.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_records.cpython-312.pyc new file mode 100644 index 0000000..a700a7f Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_records.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_regression.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_regression.cpython-312.pyc new file mode 100644 index 0000000..996363b Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_regression.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_scalar_ctors.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_scalar_ctors.cpython-312.pyc new file mode 100644 index 0000000..67d8c55 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_scalar_ctors.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_scalar_methods.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_scalar_methods.cpython-312.pyc new file mode 100644 index 0000000..9efa210 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_scalar_methods.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_scalarbuffer.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_scalarbuffer.cpython-312.pyc new file mode 100644 index 0000000..d0b6eed Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_scalarbuffer.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_scalarinherit.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_scalarinherit.cpython-312.pyc new file mode 100644 index 0000000..5f57cb4 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_scalarinherit.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_scalarmath.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_scalarmath.cpython-312.pyc new file mode 100644 index 0000000..d5509d0 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_scalarmath.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_scalarprint.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_scalarprint.cpython-312.pyc new file mode 100644 index 0000000..585a575 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_scalarprint.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_shape_base.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_shape_base.cpython-312.pyc new file mode 100644 index 0000000..b06fceb Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_shape_base.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_simd.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_simd.cpython-312.pyc new file mode 100644 index 0000000..7f2c07d Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_simd.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_simd_module.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_simd_module.cpython-312.pyc new file mode 100644 index 0000000..b05f5f9 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_simd_module.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_stringdtype.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_stringdtype.cpython-312.pyc new file mode 100644 index 0000000..5484abe Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_stringdtype.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_strings.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_strings.cpython-312.pyc new file mode 100644 index 0000000..f2f2ee6 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_strings.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_ufunc.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_ufunc.cpython-312.pyc new file mode 100644 index 0000000..168a209 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_ufunc.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_umath.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_umath.cpython-312.pyc new file mode 100644 index 0000000..a4b0db6 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_umath.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_umath_accuracy.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_umath_accuracy.cpython-312.pyc new file mode 100644 index 0000000..2af157b Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_umath_accuracy.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_umath_complex.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_umath_complex.cpython-312.pyc new file mode 100644 index 0000000..6c038d1 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_umath_complex.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_unicode.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_unicode.cpython-312.pyc new file mode 100644 index 0000000..0fcf3c8 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/tests/__pycache__/test_unicode.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/_locales.py b/.venv/lib/python3.12/site-packages/numpy/_core/tests/_locales.py new file mode 100644 index 0000000..debda96 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/tests/_locales.py @@ -0,0 +1,72 @@ +"""Provide class for testing in French locale + +""" +import locale +import sys + +import pytest + +__ALL__ = ['CommaDecimalPointLocale'] + + +def find_comma_decimal_point_locale(): + """See if platform has a decimal point as comma locale. + + Find a locale that uses a comma instead of a period as the + decimal point. + + Returns + ------- + old_locale: str + Locale when the function was called. + new_locale: {str, None) + First French locale found, None if none found. + + """ + if sys.platform == 'win32': + locales = ['FRENCH'] + else: + locales = ['fr_FR', 'fr_FR.UTF-8', 'fi_FI', 'fi_FI.UTF-8'] + + old_locale = locale.getlocale(locale.LC_NUMERIC) + new_locale = None + try: + for loc in locales: + try: + locale.setlocale(locale.LC_NUMERIC, loc) + new_locale = loc + break + except locale.Error: + pass + finally: + locale.setlocale(locale.LC_NUMERIC, locale=old_locale) + return old_locale, new_locale + + +class CommaDecimalPointLocale: + """Sets LC_NUMERIC to a locale with comma as decimal point. + + Classes derived from this class have setup and teardown methods that run + tests with locale.LC_NUMERIC set to a locale where commas (',') are used as + the decimal point instead of periods ('.'). On exit the locale is restored + to the initial locale. It also serves as context manager with the same + effect. If no such locale is available, the test is skipped. + + """ + (cur_locale, tst_locale) = find_comma_decimal_point_locale() + + def setup_method(self): + if self.tst_locale is None: + pytest.skip("No French locale available") + locale.setlocale(locale.LC_NUMERIC, locale=self.tst_locale) + + def teardown_method(self): + locale.setlocale(locale.LC_NUMERIC, locale=self.cur_locale) + + def __enter__(self): + if self.tst_locale is None: + pytest.skip("No French locale available") + locale.setlocale(locale.LC_NUMERIC, locale=self.tst_locale) + + def __exit__(self, type, value, traceback): + locale.setlocale(locale.LC_NUMERIC, locale=self.cur_locale) diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/_natype.py b/.venv/lib/python3.12/site-packages/numpy/_core/tests/_natype.py new file mode 100644 index 0000000..1c2175b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/tests/_natype.py @@ -0,0 +1,205 @@ +# Vendored implementation of pandas.NA, adapted from pandas/_libs/missing.pyx +# +# This is vendored to avoid adding pandas as a test dependency. + +__all__ = ["pd_NA"] + +import numbers + +import numpy as np + + +def _create_binary_propagating_op(name, is_divmod=False): + is_cmp = name.strip("_") in ["eq", "ne", "le", "lt", "ge", "gt"] + + def method(self, other): + if ( + other is pd_NA + or isinstance(other, (str, bytes, numbers.Number, np.bool)) + or (isinstance(other, np.ndarray) and not other.shape) + ): + # Need the other.shape clause to handle NumPy scalars, + # since we do a setitem on `out` below, which + # won't work for NumPy scalars. + if is_divmod: + return pd_NA, pd_NA + else: + return pd_NA + + elif isinstance(other, np.ndarray): + out = np.empty(other.shape, dtype=object) + out[:] = pd_NA + + if is_divmod: + return out, out.copy() + else: + return out + + elif is_cmp and isinstance(other, (np.datetime64, np.timedelta64)): + return pd_NA + + elif isinstance(other, np.datetime64): + if name in ["__sub__", "__rsub__"]: + return pd_NA + + elif isinstance(other, np.timedelta64): + if name in ["__sub__", "__rsub__", "__add__", "__radd__"]: + return pd_NA + + return NotImplemented + + method.__name__ = name + return method + + +def _create_unary_propagating_op(name: str): + def method(self): + return pd_NA + + method.__name__ = name + return method + + +class NAType: + def __repr__(self) -> str: + return "" + + def __format__(self, format_spec) -> str: + try: + return self.__repr__().__format__(format_spec) + except ValueError: + return self.__repr__() + + def __bool__(self): + raise TypeError("boolean value of NA is ambiguous") + + def __hash__(self): + exponent = 31 if is_32bit else 61 + return 2**exponent - 1 + + def __reduce__(self): + return "pd_NA" + + # Binary arithmetic and comparison ops -> propagate + + __add__ = _create_binary_propagating_op("__add__") + __radd__ = _create_binary_propagating_op("__radd__") + __sub__ = _create_binary_propagating_op("__sub__") + __rsub__ = _create_binary_propagating_op("__rsub__") + __mul__ = _create_binary_propagating_op("__mul__") + __rmul__ = _create_binary_propagating_op("__rmul__") + __matmul__ = _create_binary_propagating_op("__matmul__") + __rmatmul__ = _create_binary_propagating_op("__rmatmul__") + __truediv__ = _create_binary_propagating_op("__truediv__") + __rtruediv__ = _create_binary_propagating_op("__rtruediv__") + __floordiv__ = _create_binary_propagating_op("__floordiv__") + __rfloordiv__ = _create_binary_propagating_op("__rfloordiv__") + __mod__ = _create_binary_propagating_op("__mod__") + __rmod__ = _create_binary_propagating_op("__rmod__") + __divmod__ = _create_binary_propagating_op("__divmod__", is_divmod=True) + __rdivmod__ = _create_binary_propagating_op("__rdivmod__", is_divmod=True) + # __lshift__ and __rshift__ are not implemented + + __eq__ = _create_binary_propagating_op("__eq__") + __ne__ = _create_binary_propagating_op("__ne__") + __le__ = _create_binary_propagating_op("__le__") + __lt__ = _create_binary_propagating_op("__lt__") + __gt__ = _create_binary_propagating_op("__gt__") + __ge__ = _create_binary_propagating_op("__ge__") + + # Unary ops + + __neg__ = _create_unary_propagating_op("__neg__") + __pos__ = _create_unary_propagating_op("__pos__") + __abs__ = _create_unary_propagating_op("__abs__") + __invert__ = _create_unary_propagating_op("__invert__") + + # pow has special + def __pow__(self, other): + if other is pd_NA: + return pd_NA + elif isinstance(other, (numbers.Number, np.bool)): + if other == 0: + # returning positive is correct for +/- 0. + return type(other)(1) + else: + return pd_NA + elif util.is_array(other): + return np.where(other == 0, other.dtype.type(1), pd_NA) + + return NotImplemented + + def __rpow__(self, other): + if other is pd_NA: + return pd_NA + elif isinstance(other, (numbers.Number, np.bool)): + if other == 1: + return other + else: + return pd_NA + elif util.is_array(other): + return np.where(other == 1, other, pd_NA) + return NotImplemented + + # Logical ops using Kleene logic + + def __and__(self, other): + if other is False: + return False + elif other is True or other is pd_NA: + return pd_NA + return NotImplemented + + __rand__ = __and__ + + def __or__(self, other): + if other is True: + return True + elif other is False or other is pd_NA: + return pd_NA + return NotImplemented + + __ror__ = __or__ + + def __xor__(self, other): + if other is False or other is True or other is pd_NA: + return pd_NA + return NotImplemented + + __rxor__ = __xor__ + + __array_priority__ = 1000 + _HANDLED_TYPES = (np.ndarray, numbers.Number, str, np.bool) + + def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): + types = self._HANDLED_TYPES + (NAType,) + for x in inputs: + if not isinstance(x, types): + return NotImplemented + + if method != "__call__": + raise ValueError(f"ufunc method '{method}' not supported for NA") + result = maybe_dispatch_ufunc_to_dunder_op( + self, ufunc, method, *inputs, **kwargs + ) + if result is NotImplemented: + # For a NumPy ufunc that's not a binop, like np.logaddexp + index = next(i for i, x in enumerate(inputs) if x is pd_NA) + result = np.broadcast_arrays(*inputs)[index] + if result.ndim == 0: + result = result.item() + if ufunc.nout > 1: + result = (pd_NA,) * ufunc.nout + + return result + + +pd_NA = NAType() + + +def get_stringdtype_dtype(na_object, coerce=True): + # explicit is check for pd_NA because != with pd_NA returns pd_NA + if na_object is pd_NA or na_object != "unset": + return np.dtypes.StringDType(na_object=na_object, coerce=coerce) + else: + return np.dtypes.StringDType(coerce=coerce) diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/data/astype_copy.pkl b/.venv/lib/python3.12/site-packages/numpy/_core/tests/data/astype_copy.pkl new file mode 100644 index 0000000..7397c97 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/tests/data/astype_copy.pkl differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/data/generate_umath_validation_data.cpp b/.venv/lib/python3.12/site-packages/numpy/_core/tests/data/generate_umath_validation_data.cpp new file mode 100644 index 0000000..88ff45e --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/tests/data/generate_umath_validation_data.cpp @@ -0,0 +1,170 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +struct ufunc { + std::string name; + double (*f32func)(double); + long double (*f64func)(long double); + float f32ulp; + float f64ulp; +}; + +template +T +RandomFloat(T a, T b) +{ + T random = ((T)rand()) / (T)RAND_MAX; + T diff = b - a; + T r = random * diff; + return a + r; +} + +template +void +append_random_array(std::vector &arr, T min, T max, size_t N) +{ + for (size_t ii = 0; ii < N; ++ii) + arr.emplace_back(RandomFloat(min, max)); +} + +template +std::vector +computeTrueVal(const std::vector &in, T2 (*mathfunc)(T2)) +{ + std::vector out; + for (T1 elem : in) { + T2 elem_d = (T2)elem; + T1 out_elem = (T1)mathfunc(elem_d); + out.emplace_back(out_elem); + } + return out; +} + +/* + * FP range: + * [-inf, -maxflt, -1., -minflt, -minden, 0., minden, minflt, 1., maxflt, inf] + */ + +#define MINDEN std::numeric_limits::denorm_min() +#define MINFLT std::numeric_limits::min() +#define MAXFLT std::numeric_limits::max() +#define INF std::numeric_limits::infinity() +#define qNAN std::numeric_limits::quiet_NaN() +#define sNAN std::numeric_limits::signaling_NaN() + +template +std::vector +generate_input_vector(std::string func) +{ + std::vector input = {MINDEN, -MINDEN, MINFLT, -MINFLT, MAXFLT, + -MAXFLT, INF, -INF, qNAN, sNAN, + -1.0, 1.0, 0.0, -0.0}; + + // [-1.0, 1.0] + if ((func == "arcsin") || (func == "arccos") || (func == "arctanh")) { + append_random_array(input, -1.0, 1.0, 700); + } + // (0.0, INF] + else if ((func == "log2") || (func == "log10")) { + append_random_array(input, 0.0, 1.0, 200); + append_random_array(input, MINDEN, MINFLT, 200); + append_random_array(input, MINFLT, 1.0, 200); + append_random_array(input, 1.0, MAXFLT, 200); + } + // (-1.0, INF] + else if (func == "log1p") { + append_random_array(input, -1.0, 1.0, 200); + append_random_array(input, -MINFLT, -MINDEN, 100); + append_random_array(input, -1.0, -MINFLT, 100); + append_random_array(input, MINDEN, MINFLT, 100); + append_random_array(input, MINFLT, 1.0, 100); + append_random_array(input, 1.0, MAXFLT, 100); + } + // [1.0, INF] + else if (func == "arccosh") { + append_random_array(input, 1.0, 2.0, 400); + append_random_array(input, 2.0, MAXFLT, 300); + } + // [-INF, INF] + else { + append_random_array(input, -1.0, 1.0, 100); + append_random_array(input, MINDEN, MINFLT, 100); + append_random_array(input, -MINFLT, -MINDEN, 100); + append_random_array(input, MINFLT, 1.0, 100); + append_random_array(input, -1.0, -MINFLT, 100); + append_random_array(input, 1.0, MAXFLT, 100); + append_random_array(input, -MAXFLT, -100.0, 100); + } + + std::random_shuffle(input.begin(), input.end()); + return input; +} + +int +main() +{ + srand(42); + std::vector umathfunc = { + {"sin", sin, sin, 1.49, 1.00}, + {"cos", cos, cos, 1.49, 1.00}, + {"tan", tan, tan, 3.91, 1.00}, + {"arcsin", asin, asin, 3.12, 1.00}, + {"arccos", acos, acos, 2.1, 1.00}, + {"arctan", atan, atan, 2.3, 1.00}, + {"sinh", sinh, sinh, 1.55, 1.00}, + {"cosh", cosh, cosh, 2.48, 1.00}, + {"tanh", tanh, tanh, 1.38, 2.00}, + {"arcsinh", asinh, asinh, 1.01, 1.00}, + {"arccosh", acosh, acosh, 1.16, 1.00}, + {"arctanh", atanh, atanh, 1.45, 1.00}, + {"cbrt", cbrt, cbrt, 1.94, 2.00}, + //{"exp",exp,exp,3.76,1.00}, + {"exp2", exp2, exp2, 1.01, 1.00}, + {"expm1", expm1, expm1, 2.62, 1.00}, + //{"log",log,log,1.84,1.00}, + {"log10", log10, log10, 3.5, 1.00}, + {"log1p", log1p, log1p, 1.96, 1.0}, + {"log2", log2, log2, 2.12, 1.00}, + }; + + for (int ii = 0; ii < umathfunc.size(); ++ii) { + // ignore sin/cos + if ((umathfunc[ii].name != "sin") && (umathfunc[ii].name != "cos")) { + std::string fileName = + "umath-validation-set-" + umathfunc[ii].name + ".csv"; + std::ofstream txtOut; + txtOut.open(fileName, std::ofstream::trunc); + txtOut << "dtype,input,output,ulperrortol" << std::endl; + + // Single Precision + auto f32in = generate_input_vector(umathfunc[ii].name); + auto f32out = computeTrueVal(f32in, + umathfunc[ii].f32func); + for (int jj = 0; jj < f32in.size(); ++jj) { + txtOut << "np.float32" << std::hex << ",0x" + << *reinterpret_cast(&f32in[jj]) << ",0x" + << *reinterpret_cast(&f32out[jj]) << "," + << ceil(umathfunc[ii].f32ulp) << std::endl; + } + + // Double Precision + auto f64in = generate_input_vector(umathfunc[ii].name); + auto f64out = computeTrueVal( + f64in, umathfunc[ii].f64func); + for (int jj = 0; jj < f64in.size(); ++jj) { + txtOut << "np.float64" << std::hex << ",0x" + << *reinterpret_cast(&f64in[jj]) << ",0x" + << *reinterpret_cast(&f64out[jj]) << "," + << ceil(umathfunc[ii].f64ulp) << std::endl; + } + txtOut.close(); + } + } + return 0; +} diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/data/recarray_from_file.fits b/.venv/lib/python3.12/site-packages/numpy/_core/tests/data/recarray_from_file.fits new file mode 100644 index 0000000..ca48ee8 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/tests/data/recarray_from_file.fits differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/data/umath-validation-set-README.txt b/.venv/lib/python3.12/site-packages/numpy/_core/tests/data/umath-validation-set-README.txt new file mode 100644 index 0000000..cfc9e41 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/tests/data/umath-validation-set-README.txt @@ -0,0 +1,15 @@ +Steps to validate transcendental functions: +1) Add a file 'umath-validation-set-.txt', where ufuncname is name of + the function in NumPy you want to validate +2) The file should contain 4 columns: dtype,input,expected output,ulperror + a. dtype: one of np.float16, np.float32, np.float64 + b. input: floating point input to ufunc in hex. Example: 0x414570a4 + represents 12.340000152587890625 + c. expected output: floating point output for the corresponding input in hex. + This should be computed using a high(er) precision library and then rounded to + same format as the input. + d. ulperror: expected maximum ulp error of the function. This + should be same across all rows of the same dtype. Otherwise, the function is + tested for the maximum ulp error among all entries of that dtype. +3) Add file umath-validation-set-.txt to the test file test_umath_accuracy.py + which will then validate your ufunc. diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/data/umath-validation-set-arccos.csv b/.venv/lib/python3.12/site-packages/numpy/_core/tests/data/umath-validation-set-arccos.csv new file mode 100644 index 0000000..82c8595 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/tests/data/umath-validation-set-arccos.csv @@ -0,0 +1,1429 @@ +dtype,input,output,ulperrortol +np.float32,0xbddd7f50,0x3fd6eec2,3 +np.float32,0xbe32a20c,0x3fdf8182,3 +np.float32,0xbf607c09,0x4028f84f,3 +np.float32,0x3f25d906,0x3f5db544,3 +np.float32,0x3f01cec8,0x3f84febf,3 +np.float32,0x3f1d5c6e,0x3f68a735,3 +np.float32,0xbf0cab89,0x4009c36d,3 +np.float32,0xbf176b40,0x400d0941,3 +np.float32,0x3f3248b2,0x3f4ce6d4,3 +np.float32,0x3f390b48,0x3f434e0d,3 +np.float32,0xbe261698,0x3fddea43,3 +np.float32,0x3f0e1154,0x3f7b848b,3 +np.float32,0xbf379a3c,0x4017b764,3 +np.float32,0xbeda6f2c,0x4000bd62,3 +np.float32,0xbf6a0c3f,0x402e5d5a,3 +np.float32,0x3ef1d700,0x3f8a17b7,3 +np.float32,0xbf6f4f65,0x4031d30d,3 +np.float32,0x3f2c9eee,0x3f54adfd,3 +np.float32,0x3f3cfb18,0x3f3d8a1e,3 +np.float32,0x3ba80800,0x3fc867d2,3 +np.float32,0x3e723b08,0x3faa7e4d,3 +np.float32,0xbf65820f,0x402bb054,3 +np.float32,0xbee64e7a,0x40026410,3 +np.float32,0x3cb15140,0x3fc64a87,3 +np.float32,0x3f193660,0x3f6ddf2a,3 +np.float32,0xbf0e5b52,0x400a44f7,3 +np.float32,0x3ed55f14,0x3f920a4b,3 +np.float32,0x3dd11a80,0x3fbbf85c,3 +np.float32,0xbf4f5c4b,0x4020f4f9,3 +np.float32,0x3f787532,0x3e792e87,3 +np.float32,0x3f40e6ac,0x3f37a74f,3 +np.float32,0x3f1c1318,0x3f6a47b6,3 +np.float32,0xbe3c48d8,0x3fe0bb70,3 +np.float32,0xbe94d4bc,0x3feed08e,3 +np.float32,0xbe5c3688,0x3fe4ce26,3 +np.float32,0xbf6fe026,0x403239cb,3 +np.float32,0x3ea5983c,0x3f9ee7bf,3 +np.float32,0x3f1471e6,0x3f73c5bb,3 +np.float32,0x3f0e2622,0x3f7b6b87,3 +np.float32,0xbf597180,0x40257ad1,3 +np.float32,0xbeb5321c,0x3ff75d34,3 +np.float32,0x3f5afcd2,0x3f0b6012,3 +np.float32,0xbef2ff88,0x40042e14,3 +np.float32,0xbedc747e,0x400104f5,3 +np.float32,0xbee0c2f4,0x40019dfc,3 +np.float32,0xbf152cd8,0x400c57dc,3 +np.float32,0xbf6cf9e2,0x40303bbe,3 +np.float32,0x3ed9cd74,0x3f90d1a1,3 +np.float32,0xbf754406,0x4036767f,3 +np.float32,0x3f59c5c2,0x3f0db42f,3 +np.float32,0x3f2eefd8,0x3f518684,3 +np.float32,0xbf156bf9,0x400c6b49,3 +np.float32,0xbd550790,0x3fcfb8dc,3 +np.float32,0x3ede58fc,0x3f8f8f77,3 +np.float32,0xbf00ac19,0x40063c4b,3 +np.float32,0x3f4d25ba,0x3f24280e,3 +np.float32,0xbe9568be,0x3feef73c,3 +np.float32,0x3f67d154,0x3ee05547,3 +np.float32,0x3f617226,0x3efcb4f4,3 +np.float32,0xbf3ab41a,0x4018d6cc,3 +np.float32,0xbf3186fe,0x401592cd,3 +np.float32,0x3de3ba50,0x3fbacca9,3 +np.float32,0x3e789f98,0x3fa9ab97,3 +np.float32,0x3f016e08,0x3f8536d8,3 +np.float32,0x3e8b618c,0x3fa5c571,3 +np.float32,0x3eff97bc,0x3f8628a9,3 +np.float32,0xbf6729f0,0x402ca32f,3 +np.float32,0xbebec146,0x3ff9eddc,3 +np.float32,0x3ddb2e60,0x3fbb563a,3 +np.float32,0x3caa8e40,0x3fc66595,3 +np.float32,0xbf5973f2,0x40257bfa,3 +np.float32,0xbdd82c70,0x3fd69916,3 +np.float32,0xbedf4c82,0x400169ef,3 +np.float32,0x3ef8f22c,0x3f881184,3 +np.float32,0xbf1d74d4,0x400eedc9,3 +np.float32,0x3f2e10a6,0x3f52b790,3 +np.float32,0xbf08ecc0,0x4008a628,3 +np.float32,0x3ecb7db4,0x3f94be9f,3 +np.float32,0xbf052ded,0x40078bfc,3 +np.float32,0x3f2ee78a,0x3f5191e4,3 +np.float32,0xbf56f4e1,0x40245194,3 +np.float32,0x3f600a3e,0x3f014a25,3 +np.float32,0x3f3836f8,0x3f44808b,3 +np.float32,0x3ecabfbc,0x3f94f25c,3 +np.float32,0x3c70f500,0x3fc72dec,3 +np.float32,0x3f17c444,0x3f6fabf0,3 +np.float32,0xbf4c22a5,0x401f9a09,3 +np.float32,0xbe4205dc,0x3fe1765a,3 +np.float32,0x3ea49138,0x3f9f2d36,3 +np.float32,0xbece0082,0x3ffe106b,3 +np.float32,0xbe387578,0x3fe03eef,3 +np.float32,0xbf2b6466,0x40137a30,3 +np.float32,0xbe9dadb2,0x3ff12204,3 +np.float32,0xbf56b3f2,0x402433bb,3 +np.float32,0xbdf9b4d8,0x3fd8b51f,3 +np.float32,0x3f58a596,0x3f0fd4b4,3 +np.float32,0xbedf5748,0x40016b6e,3 +np.float32,0x3f446442,0x3f32476f,3 +np.float32,0x3f5be886,0x3f099658,3 +np.float32,0x3ea1e44c,0x3f9fe1de,3 +np.float32,0xbf11e9b8,0x400b585f,3 +np.float32,0xbf231f8f,0x4010befb,3 +np.float32,0xbf4395ea,0x401c2dd0,3 +np.float32,0x3e9e7784,0x3fa0c8a6,3 +np.float32,0xbe255184,0x3fddd14c,3 +np.float32,0x3f70d25e,0x3eb13148,3 +np.float32,0x3f220cdc,0x3f62a722,3 +np.float32,0xbd027bf0,0x3fcd23e7,3 +np.float32,0x3e4ef8b8,0x3faf02d2,3 +np.float32,0xbf76fc6b,0x40380728,3 +np.float32,0xbf57e761,0x4024c1cd,3 +np.float32,0x3ed4fc20,0x3f922580,3 +np.float32,0xbf09b64a,0x4008e1db,3 +np.float32,0x3f21ca62,0x3f62fcf5,3 +np.float32,0xbe55f610,0x3fe40170,3 +np.float32,0xbc0def80,0x3fca2bbb,3 +np.float32,0xbebc8764,0x3ff9547b,3 +np.float32,0x3ec1b200,0x3f9766d1,3 +np.float32,0xbf4ee44e,0x4020c1ee,3 +np.float32,0xbea85852,0x3ff3f22a,3 +np.float32,0xbf195c0c,0x400da3d3,3 +np.float32,0xbf754b5d,0x40367ce8,3 +np.float32,0xbdcbfe50,0x3fd5d52b,3 +np.float32,0xbf1adb87,0x400e1be3,3 +np.float32,0xbf6f8491,0x4031f898,3 +np.float32,0xbf6f9ae7,0x4032086e,3 +np.float32,0xbf52b3f0,0x40226790,3 +np.float32,0xbf698452,0x402e09f4,3 +np.float32,0xbf43dc9a,0x401c493a,3 +np.float32,0xbf165f7f,0x400cb664,3 +np.float32,0x3e635468,0x3fac682f,3 +np.float32,0xbe8cf2b6,0x3fecc28a,3 +np.float32,0x7f7fffff,0x7fc00000,3 +np.float32,0xbf4c6513,0x401fb597,3 +np.float32,0xbf02b8f8,0x4006d47e,3 +np.float32,0x3ed3759c,0x3f9290c8,3 +np.float32,0xbf2a7a5f,0x40132b98,3 +np.float32,0xbae65000,0x3fc9496f,3 +np.float32,0x3f65f5ea,0x3ee8ef07,3 +np.float32,0xbe7712fc,0x3fe84106,3 +np.float32,0xbb9ff700,0x3fc9afd2,3 +np.float32,0x3d8d87a0,0x3fc03592,3 +np.float32,0xbefc921c,0x40058c23,3 +np.float32,0xbf286566,0x401279d8,3 +np.float32,0x3f53857e,0x3f192eaf,3 +np.float32,0xbee9b0f4,0x4002dd90,3 +np.float32,0x3f4041f8,0x3f38a14a,3 +np.float32,0x3f54ea96,0x3f16b02d,3 +np.float32,0x3ea50ef8,0x3f9f0c01,3 +np.float32,0xbeaad2dc,0x3ff49a4a,3 +np.float32,0xbec428c8,0x3ffb636f,3 +np.float32,0xbda46178,0x3fd358c7,3 +np.float32,0xbefacfc4,0x40054b7f,3 +np.float32,0xbf7068f9,0x40329c85,3 +np.float32,0x3f70b850,0x3eb1caa7,3 +np.float32,0x7fa00000,0x7fe00000,3 +np.float32,0x80000000,0x3fc90fdb,3 +np.float32,0x3f68d5c8,0x3edb7cf3,3 +np.float32,0x3d9443d0,0x3fbfc98a,3 +np.float32,0xff7fffff,0x7fc00000,3 +np.float32,0xbeee7ba8,0x40038a5e,3 +np.float32,0xbf0aaaba,0x40092a73,3 +np.float32,0x3f36a4e8,0x3f46c0ee,3 +np.float32,0x3ed268e4,0x3f92da82,3 +np.float32,0xbee6002c,0x4002591b,3 +np.float32,0xbe8f2752,0x3fed5576,3 +np.float32,0x3f525912,0x3f1b40e0,3 +np.float32,0xbe8e151e,0x3fed0e16,3 +np.float32,0x1,0x3fc90fdb,3 +np.float32,0x3ee23b84,0x3f8e7ae1,3 +np.float32,0xbf5961ca,0x40257361,3 +np.float32,0x3f6bbca0,0x3ecd14cd,3 +np.float32,0x3e27b230,0x3fb4014d,3 +np.float32,0xbf183bb8,0x400d49fc,3 +np.float32,0x3f57759c,0x3f120b68,3 +np.float32,0xbd6994c0,0x3fd05d84,3 +np.float32,0xbf1dd684,0x400f0cc8,3 +np.float32,0xbececc1c,0x3ffe480a,3 +np.float32,0xbf48855f,0x401e206d,3 +np.float32,0x3f28c922,0x3f59d382,3 +np.float32,0xbf65c094,0x402bd3b0,3 +np.float32,0x3f657d42,0x3eeb11dd,3 +np.float32,0xbed32d4e,0x3fff7b15,3 +np.float32,0xbf31af02,0x4015a0b1,3 +np.float32,0x3d89eb00,0x3fc06f7f,3 +np.float32,0x3dac2830,0x3fbe4a17,3 +np.float32,0x3f7f7cb6,0x3d81a7df,3 +np.float32,0xbedbb570,0x4000ea82,3 +np.float32,0x3db37830,0x3fbdd4a8,3 +np.float32,0xbf376f48,0x4017a7fd,3 +np.float32,0x3f319f12,0x3f4dd2c9,3 +np.float32,0x7fc00000,0x7fc00000,3 +np.float32,0x3f1b4f70,0x3f6b3e31,3 +np.float32,0x3e33c880,0x3fb278d1,3 +np.float32,0x3f2796e0,0x3f5b69bd,3 +np.float32,0x3f4915d6,0x3f2ad4d0,3 +np.float32,0x3e4db120,0x3faf2ca0,3 +np.float32,0x3ef03dd4,0x3f8a8ba9,3 +np.float32,0x3e96ca88,0x3fa2cbf7,3 +np.float32,0xbeb136ce,0x3ff64d2b,3 +np.float32,0xbf2f3938,0x4014c75e,3 +np.float32,0x3f769dde,0x3e8b0d76,3 +np.float32,0x3f67cec8,0x3ee06148,3 +np.float32,0x3f0a1ade,0x3f80204e,3 +np.float32,0x3e4b9718,0x3faf7144,3 +np.float32,0x3cccb480,0x3fc5dcf3,3 +np.float32,0x3caeb740,0x3fc654f0,3 +np.float32,0x3f684e0e,0x3ede0678,3 +np.float32,0x3f0ba93c,0x3f7e6663,3 +np.float32,0xbf12bbc4,0x400b985e,3 +np.float32,0xbf2a8e1a,0x40133235,3 +np.float32,0x3f42029c,0x3f35f5c5,3 +np.float32,0x3eed1728,0x3f8b6f9c,3 +np.float32,0xbe5779ac,0x3fe432fd,3 +np.float32,0x3f6ed8b8,0x3ebc7e4b,3 +np.float32,0x3eea25b0,0x3f8c43c7,3 +np.float32,0x3f1988a4,0x3f6d786b,3 +np.float32,0xbe751674,0x3fe7ff8a,3 +np.float32,0xbe9f7418,0x3ff1997d,3 +np.float32,0x3dca11d0,0x3fbc6979,3 +np.float32,0x3f795226,0x3e6a6cab,3 +np.float32,0xbea780e0,0x3ff3b926,3 +np.float32,0xbed92770,0x4000901e,3 +np.float32,0xbf3e9f8c,0x401a49f8,3 +np.float32,0x3f0f7054,0x3f79ddb2,3 +np.float32,0x3a99d400,0x3fc8e966,3 +np.float32,0xbef082b0,0x4003d3c6,3 +np.float32,0xbf0d0790,0x4009defb,3 +np.float32,0xbf1649da,0x400cafb4,3 +np.float32,0xbea5aca8,0x3ff33d5c,3 +np.float32,0xbf4e1843,0x40206ba1,3 +np.float32,0xbe3d7d5c,0x3fe0e2ad,3 +np.float32,0xbf0e802d,0x400a500e,3 +np.float32,0xbf0de8f0,0x400a2295,3 +np.float32,0xbf3016ba,0x4015137e,3 +np.float32,0x3f36b1ea,0x3f46ae5d,3 +np.float32,0xbd27f170,0x3fce4fc7,3 +np.float32,0x3e96ec54,0x3fa2c31f,3 +np.float32,0x3eb4dfdc,0x3f9ad87d,3 +np.float32,0x3f5cac6c,0x3f0815cc,3 +np.float32,0xbf0489aa,0x40075bf1,3 +np.float32,0x3df010c0,0x3fba05f5,3 +np.float32,0xbf229f4a,0x4010956a,3 +np.float32,0x3f75e474,0x3e905a99,3 +np.float32,0xbcece6a0,0x3fccc397,3 +np.float32,0xbdb41528,0x3fd454e7,3 +np.float32,0x3ec8b2f8,0x3f958118,3 +np.float32,0x3f5eaa70,0x3f041a1d,3 +np.float32,0xbf32e1cc,0x40160b91,3 +np.float32,0xbe8e6026,0x3fed219c,3 +np.float32,0x3e6b3160,0x3fab65e3,3 +np.float32,0x3e6d7460,0x3fab1b81,3 +np.float32,0xbf13fbde,0x400bfa3b,3 +np.float32,0xbe8235ec,0x3fe9f9e3,3 +np.float32,0x3d71c4a0,0x3fc18096,3 +np.float32,0x3eb769d0,0x3f9a2aa0,3 +np.float32,0xbf68cb3b,0x402d99e4,3 +np.float32,0xbd917610,0x3fd22932,3 +np.float32,0x3d3cba60,0x3fc3297f,3 +np.float32,0xbf383cbe,0x4017f1cc,3 +np.float32,0xbeee96d0,0x40038e34,3 +np.float32,0x3ec89cb4,0x3f958725,3 +np.float32,0x3ebf92d8,0x3f97f95f,3 +np.float32,0x3f30f3da,0x3f4ec021,3 +np.float32,0xbd26b560,0x3fce45e4,3 +np.float32,0xbec0eb12,0x3ffa8330,3 +np.float32,0x3f6d592a,0x3ec4a6c1,3 +np.float32,0x3ea6d39c,0x3f9e9463,3 +np.float32,0x3e884184,0x3fa6951e,3 +np.float32,0x3ea566c4,0x3f9ef4d1,3 +np.float32,0x3f0c8f4c,0x3f7d5380,3 +np.float32,0x3f28e1ba,0x3f59b2cb,3 +np.float32,0x3f798538,0x3e66e1c3,3 +np.float32,0xbe2889b8,0x3fde39b8,3 +np.float32,0x3f3da05e,0x3f3c949c,3 +np.float32,0x3f24d700,0x3f5f073e,3 +np.float32,0xbe5b5768,0x3fe4b198,3 +np.float32,0xbed3b03a,0x3fff9f05,3 +np.float32,0x3e8a1c4c,0x3fa619eb,3 +np.float32,0xbf075d24,0x40083030,3 +np.float32,0x3f765648,0x3e8d1f52,3 +np.float32,0xbf70fc5e,0x403308bb,3 +np.float32,0x3f557ae8,0x3f15ab76,3 +np.float32,0x3f02f7ea,0x3f84521c,3 +np.float32,0x3f7ebbde,0x3dcbc5c5,3 +np.float32,0xbefbdfc6,0x40057285,3 +np.float32,0x3ec687ac,0x3f9617d9,3 +np.float32,0x3e4831c8,0x3fafe01b,3 +np.float32,0x3e25cde0,0x3fb43ea8,3 +np.float32,0x3e4f2ab8,0x3faefc70,3 +np.float32,0x3ea60ae4,0x3f9ec973,3 +np.float32,0xbf1ed55f,0x400f5dde,3 +np.float32,0xbf5ad4aa,0x40262479,3 +np.float32,0x3e8b3594,0x3fa5d0de,3 +np.float32,0x3f3a77aa,0x3f413c80,3 +np.float32,0xbf07512b,0x40082ca9,3 +np.float32,0x3f33d990,0x3f4ab5e5,3 +np.float32,0x3f521556,0x3f1bb78f,3 +np.float32,0xbecf6036,0x3ffe7086,3 +np.float32,0x3db91bd0,0x3fbd7a11,3 +np.float32,0x3ef63a74,0x3f88d839,3 +np.float32,0xbf2f1116,0x4014b99c,3 +np.float32,0xbf17fdc0,0x400d36b9,3 +np.float32,0xbe87df2c,0x3feb7117,3 +np.float32,0x80800000,0x3fc90fdb,3 +np.float32,0x3ee24c1c,0x3f8e7641,3 +np.float32,0x3f688dce,0x3edcd644,3 +np.float32,0xbf0f4e1c,0x400a8e1b,3 +np.float32,0x0,0x3fc90fdb,3 +np.float32,0x3f786eba,0x3e7999d4,3 +np.float32,0xbf404f80,0x401aeca8,3 +np.float32,0xbe9ffb6a,0x3ff1bd18,3 +np.float32,0x3f146bfc,0x3f73ccfd,3 +np.float32,0xbe47d630,0x3fe233ee,3 +np.float32,0xbe95847c,0x3feefe7c,3 +np.float32,0xbf135df0,0x400bc9e5,3 +np.float32,0x3ea19f3c,0x3f9ff411,3 +np.float32,0x3f235e20,0x3f60f247,3 +np.float32,0xbec789ec,0x3ffc4def,3 +np.float32,0x3f04b656,0x3f834db6,3 +np.float32,0x3dfaf440,0x3fb95679,3 +np.float32,0xbe4a7f28,0x3fe28abe,3 +np.float32,0x3ed4850c,0x3f92463b,3 +np.float32,0x3ec4ba5c,0x3f9694dd,3 +np.float32,0xbce24ca0,0x3fcc992b,3 +np.float32,0xbf5b7c6e,0x402675a0,3 +np.float32,0xbea3ce2a,0x3ff2bf04,3 +np.float32,0x3db02c60,0x3fbe0998,3 +np.float32,0x3c47b780,0x3fc78069,3 +np.float32,0x3ed33b20,0x3f92a0d5,3 +np.float32,0xbf4556d7,0x401cdcde,3 +np.float32,0xbe1b6e28,0x3fdc90ec,3 +np.float32,0xbf3289b7,0x4015ecd0,3 +np.float32,0x3df3f240,0x3fb9c76d,3 +np.float32,0x3eefa7d0,0x3f8ab61d,3 +np.float32,0xbe945838,0x3feeb006,3 +np.float32,0xbf0b1386,0x400949a3,3 +np.float32,0x3f77e546,0x3e812cc1,3 +np.float32,0x3e804ba0,0x3fa8a480,3 +np.float32,0x3f43dcea,0x3f331a06,3 +np.float32,0x3eb87450,0x3f99e33c,3 +np.float32,0x3e5f4898,0x3facecea,3 +np.float32,0x3f646640,0x3eeff10e,3 +np.float32,0x3f1aa832,0x3f6c1051,3 +np.float32,0xbebf6bfa,0x3ffa1bdc,3 +np.float32,0xbb77f300,0x3fc98bd4,3 +np.float32,0x3f3587fe,0x3f485645,3 +np.float32,0x3ef85f34,0x3f883b8c,3 +np.float32,0x3f50e584,0x3f1dc82c,3 +np.float32,0x3f1d30a8,0x3f68deb0,3 +np.float32,0x3ee75a78,0x3f8d0c86,3 +np.float32,0x3f2c023a,0x3f5581e1,3 +np.float32,0xbf074e34,0x40082bca,3 +np.float32,0xbead71f0,0x3ff54c6d,3 +np.float32,0xbf39ed88,0x40188e69,3 +np.float32,0x3f5d2fe6,0x3f07118b,3 +np.float32,0xbf1f79f8,0x400f9267,3 +np.float32,0x3e900c58,0x3fa48e99,3 +np.float32,0xbf759cb2,0x4036c47b,3 +np.float32,0x3f63329c,0x3ef5359c,3 +np.float32,0xbf5d6755,0x40276709,3 +np.float32,0x3f2ce31c,0x3f54519a,3 +np.float32,0x7f800000,0x7fc00000,3 +np.float32,0x3f1bf50e,0x3f6a6d9a,3 +np.float32,0x3f258334,0x3f5e25d8,3 +np.float32,0xbf661a3f,0x402c06ac,3 +np.float32,0x3d1654c0,0x3fc45cef,3 +np.float32,0xbef14a36,0x4003f009,3 +np.float32,0xbf356051,0x4016ec3a,3 +np.float32,0x3f6ccc42,0x3ec79193,3 +np.float32,0xbf2fe3d6,0x401501f9,3 +np.float32,0x3deedc80,0x3fba195b,3 +np.float32,0x3f2e5a28,0x3f52533e,3 +np.float32,0x3e6b68b8,0x3fab5ec8,3 +np.float32,0x3e458240,0x3fb037b7,3 +np.float32,0xbf24bab0,0x401144cb,3 +np.float32,0x3f600f4c,0x3f013fb2,3 +np.float32,0x3f021a04,0x3f84d316,3 +np.float32,0x3f741732,0x3e9cc948,3 +np.float32,0x3f0788aa,0x3f81a5b0,3 +np.float32,0x3f28802c,0x3f5a347c,3 +np.float32,0x3c9eb400,0x3fc69500,3 +np.float32,0x3e5d11e8,0x3fad357a,3 +np.float32,0x3d921250,0x3fbfecb9,3 +np.float32,0x3f354866,0x3f48b066,3 +np.float32,0xbf72cf43,0x40346d84,3 +np.float32,0x3eecdbb8,0x3f8b805f,3 +np.float32,0xbee585d0,0x400247fd,3 +np.float32,0x3e3607a8,0x3fb22fc6,3 +np.float32,0xbf0cb7d6,0x4009c71c,3 +np.float32,0xbf56b230,0x402432ec,3 +np.float32,0xbf4ced02,0x401fee29,3 +np.float32,0xbf3a325c,0x4018a776,3 +np.float32,0x3ecae8bc,0x3f94e732,3 +np.float32,0xbe48c7e8,0x3fe252bd,3 +np.float32,0xbe175d7c,0x3fdc0d5b,3 +np.float32,0x3ea78dac,0x3f9e632d,3 +np.float32,0xbe7434a8,0x3fe7e279,3 +np.float32,0x3f1f9e02,0x3f65c7b9,3 +np.float32,0xbe150f2c,0x3fdbc2c2,3 +np.float32,0x3ee13480,0x3f8ec423,3 +np.float32,0x3ecb7d54,0x3f94beb9,3 +np.float32,0x3f1cef42,0x3f693181,3 +np.float32,0xbf1ec06a,0x400f5730,3 +np.float32,0xbe112acc,0x3fdb44e8,3 +np.float32,0xbe77b024,0x3fe85545,3 +np.float32,0x3ec86fe0,0x3f959353,3 +np.float32,0x3f36b326,0x3f46ac9a,3 +np.float32,0x3e581a70,0x3fadd829,3 +np.float32,0xbf032c0c,0x4006f5f9,3 +np.float32,0xbf43b1fd,0x401c38b1,3 +np.float32,0x3f3701b4,0x3f463c5c,3 +np.float32,0x3f1a995a,0x3f6c22f1,3 +np.float32,0xbf05de0b,0x4007bf97,3 +np.float32,0x3d4bd960,0x3fc2b063,3 +np.float32,0x3f0e1618,0x3f7b7ed0,3 +np.float32,0x3edfd420,0x3f8f2628,3 +np.float32,0xbf6662fe,0x402c3047,3 +np.float32,0x3ec0690c,0x3f97bf9b,3 +np.float32,0xbeaf4146,0x3ff5c7a0,3 +np.float32,0x3f5e7764,0x3f04816d,3 +np.float32,0xbedd192c,0x40011bc5,3 +np.float32,0x3eb76350,0x3f9a2c5e,3 +np.float32,0xbed8108c,0x400069a5,3 +np.float32,0xbe59f31c,0x3fe48401,3 +np.float32,0xbea3e1e6,0x3ff2c439,3 +np.float32,0x3e26d1f8,0x3fb41db5,3 +np.float32,0x3f3a0a7c,0x3f41dba5,3 +np.float32,0x3ebae068,0x3f993ce4,3 +np.float32,0x3f2d8e30,0x3f536942,3 +np.float32,0xbe838bbe,0x3fea5247,3 +np.float32,0x3ebe4420,0x3f98538f,3 +np.float32,0xbcc59b80,0x3fcc265c,3 +np.float32,0x3eebb5c8,0x3f8bd334,3 +np.float32,0xbafc3400,0x3fc94ee8,3 +np.float32,0xbf63ddc1,0x402ac683,3 +np.float32,0xbeabdf80,0x3ff4e18f,3 +np.float32,0x3ea863f0,0x3f9e2a78,3 +np.float32,0x3f45b292,0x3f303bc1,3 +np.float32,0xbe68aa60,0x3fe666bf,3 +np.float32,0x3eb9de18,0x3f998239,3 +np.float32,0xbf719d85,0x4033815e,3 +np.float32,0x3edef9a8,0x3f8f62db,3 +np.float32,0xbd7781c0,0x3fd0cd1e,3 +np.float32,0x3f0b3b90,0x3f7ee92a,3 +np.float32,0xbe3eb3b4,0x3fe10a27,3 +np.float32,0xbf31a4c4,0x40159d23,3 +np.float32,0x3e929434,0x3fa3e5b0,3 +np.float32,0xbeb1a90e,0x3ff66b9e,3 +np.float32,0xbeba9b5e,0x3ff8d048,3 +np.float32,0xbf272a84,0x4012119e,3 +np.float32,0x3f1ebbd0,0x3f66e889,3 +np.float32,0x3ed3cdc8,0x3f927893,3 +np.float32,0xbf50dfce,0x40219b58,3 +np.float32,0x3f0c02de,0x3f7dfb62,3 +np.float32,0xbf694de3,0x402de8d2,3 +np.float32,0xbeaeb13e,0x3ff5a14f,3 +np.float32,0xbf61aa7a,0x40299702,3 +np.float32,0xbf13d159,0x400bed35,3 +np.float32,0xbeecd034,0x40034e0b,3 +np.float32,0xbe50c2e8,0x3fe35761,3 +np.float32,0x3f714406,0x3eae8e57,3 +np.float32,0xbf1ca486,0x400eabd8,3 +np.float32,0x3f5858cc,0x3f106497,3 +np.float32,0x3f670288,0x3ee41c84,3 +np.float32,0xbf20bd2c,0x400ff9f5,3 +np.float32,0xbe29afd8,0x3fde5eff,3 +np.float32,0xbf635e6a,0x402a80f3,3 +np.float32,0x3e82b7b0,0x3fa80446,3 +np.float32,0x3e982e7c,0x3fa26ece,3 +np.float32,0x3d9f0e00,0x3fbf1c6a,3 +np.float32,0x3e8299b4,0x3fa80c07,3 +np.float32,0xbf0529c1,0x40078ac3,3 +np.float32,0xbf403b8a,0x401ae519,3 +np.float32,0xbe57e09c,0x3fe44027,3 +np.float32,0x3ea1c8f4,0x3f9fe913,3 +np.float32,0xbe216a94,0x3fdd52d0,3 +np.float32,0x3f59c442,0x3f0db709,3 +np.float32,0xbd636260,0x3fd02bdd,3 +np.float32,0xbdbbc788,0x3fd4d08d,3 +np.float32,0x3dd19560,0x3fbbf0a3,3 +np.float32,0x3f060ad4,0x3f828641,3 +np.float32,0x3b102e00,0x3fc8c7c4,3 +np.float32,0x3f42b3b8,0x3f34e5a6,3 +np.float32,0x3f0255ac,0x3f84b071,3 +np.float32,0xbf014898,0x40066996,3 +np.float32,0x3e004dc0,0x3fb8fb51,3 +np.float32,0xbf594ff8,0x40256af2,3 +np.float32,0x3efafddc,0x3f877b80,3 +np.float32,0xbf5f0780,0x40283899,3 +np.float32,0x3ee95e54,0x3f8c7bcc,3 +np.float32,0x3eba2f0c,0x3f996c80,3 +np.float32,0x3f37721c,0x3f459b68,3 +np.float32,0x3e2be780,0x3fb378bf,3 +np.float32,0x3e550270,0x3fae3d69,3 +np.float32,0x3e0f9500,0x3fb70e0a,3 +np.float32,0xbf51974a,0x4021eaf4,3 +np.float32,0x3f393832,0x3f430d05,3 +np.float32,0x3f3df16a,0x3f3c1bd8,3 +np.float32,0xbd662340,0x3fd041ed,3 +np.float32,0x3f7e8418,0x3ddc9fce,3 +np.float32,0xbf392734,0x40184672,3 +np.float32,0x3ee3b278,0x3f8e124e,3 +np.float32,0x3eed4808,0x3f8b61d2,3 +np.float32,0xbf6fccbd,0x40322beb,3 +np.float32,0x3e3ecdd0,0x3fb1123b,3 +np.float32,0x3f4419e0,0x3f32bb45,3 +np.float32,0x3f595e00,0x3f0e7914,3 +np.float32,0xbe8c1486,0x3fec88c6,3 +np.float32,0xbf800000,0x40490fdb,3 +np.float32,0xbdaf5020,0x3fd4084d,3 +np.float32,0xbf407660,0x401afb63,3 +np.float32,0x3f0c3aa8,0x3f7db8b8,3 +np.float32,0xbcdb5980,0x3fcc7d5b,3 +np.float32,0x3f4738d4,0x3f2dd1ed,3 +np.float32,0x3f4d7064,0x3f23ab14,3 +np.float32,0xbeb1d576,0x3ff67774,3 +np.float32,0xbf507166,0x40216bb3,3 +np.float32,0x3e86484c,0x3fa71813,3 +np.float32,0x3f09123e,0x3f80bd35,3 +np.float32,0xbe9abe0e,0x3ff05cb2,3 +np.float32,0x3f3019dc,0x3f4fed21,3 +np.float32,0xbe99e00e,0x3ff0227d,3 +np.float32,0xbf155ec5,0x400c6739,3 +np.float32,0x3f5857ba,0x3f106698,3 +np.float32,0x3edf619c,0x3f8f45fb,3 +np.float32,0xbf5ab76a,0x40261664,3 +np.float32,0x3e54b5a8,0x3fae4738,3 +np.float32,0xbee92772,0x4002ca40,3 +np.float32,0x3f2fd610,0x3f504a7a,3 +np.float32,0xbf38521c,0x4017f97e,3 +np.float32,0xff800000,0x7fc00000,3 +np.float32,0x3e2da348,0x3fb34077,3 +np.float32,0x3f2f85fa,0x3f50b894,3 +np.float32,0x3e88f9c8,0x3fa66551,3 +np.float32,0xbf61e570,0x4029b648,3 +np.float32,0xbeab362c,0x3ff4b4a1,3 +np.float32,0x3ec6c310,0x3f9607bd,3 +np.float32,0x3f0d7bda,0x3f7c3810,3 +np.float32,0xbeba5d36,0x3ff8bf99,3 +np.float32,0x3f4b0554,0x3f27adda,3 +np.float32,0x3f60f5dc,0x3efebfb3,3 +np.float32,0x3f36ce2c,0x3f468603,3 +np.float32,0xbe70afac,0x3fe76e8e,3 +np.float32,0x3f673350,0x3ee339b5,3 +np.float32,0xbe124cf0,0x3fdb698c,3 +np.float32,0xbf1243dc,0x400b73d0,3 +np.float32,0x3f3c8850,0x3f3e3407,3 +np.float32,0x3ea02f24,0x3fa05500,3 +np.float32,0xbeffed34,0x400607db,3 +np.float32,0x3f5c75c2,0x3f08817c,3 +np.float32,0x3f4b2fbe,0x3f27682d,3 +np.float32,0x3ee47c34,0x3f8dd9f9,3 +np.float32,0x3f50d48c,0x3f1de584,3 +np.float32,0x3f12dc5e,0x3f75b628,3 +np.float32,0xbefe7e4a,0x4005d2f4,3 +np.float32,0xbec2e846,0x3ffb0cbc,3 +np.float32,0xbedc3036,0x4000fb80,3 +np.float32,0xbf48aedc,0x401e311f,3 +np.float32,0x3f6e032e,0x3ec11363,3 +np.float32,0xbf60de15,0x40292b72,3 +np.float32,0x3f06585e,0x3f8258ba,3 +np.float32,0x3ef49b98,0x3f894e66,3 +np.float32,0x3cc5fe00,0x3fc5f7cf,3 +np.float32,0xbf7525c5,0x40365c2c,3 +np.float32,0x3f64f9f8,0x3eed5fb2,3 +np.float32,0x3e8849c0,0x3fa692fb,3 +np.float32,0x3e50c878,0x3faec79e,3 +np.float32,0x3ed61530,0x3f91d831,3 +np.float32,0xbf54872e,0x40233724,3 +np.float32,0xbf52ee7f,0x4022815e,3 +np.float32,0xbe708c24,0x3fe769fc,3 +np.float32,0xbf26fc54,0x40120260,3 +np.float32,0x3f226e8a,0x3f6228db,3 +np.float32,0xbef30406,0x40042eb8,3 +np.float32,0x3f5d996c,0x3f063f5f,3 +np.float32,0xbf425f9c,0x401bb618,3 +np.float32,0x3e4bb260,0x3faf6dc9,3 +np.float32,0xbe52d5a4,0x3fe39b29,3 +np.float32,0xbe169cf0,0x3fdbf505,3 +np.float32,0xbedfc422,0x40017a8e,3 +np.float32,0x3d8ffef0,0x3fc00e05,3 +np.float32,0xbf12bdab,0x400b98f2,3 +np.float32,0x3f295d0a,0x3f590e88,3 +np.float32,0x3f49d8e4,0x3f2998aa,3 +np.float32,0xbef914f4,0x40050c12,3 +np.float32,0xbf4ea2b5,0x4020a61e,3 +np.float32,0xbf3a89e5,0x4018c762,3 +np.float32,0x3e8707b4,0x3fa6e67a,3 +np.float32,0x3ac55400,0x3fc8de86,3 +np.float32,0x800000,0x3fc90fdb,3 +np.float32,0xbeb9762c,0x3ff8819b,3 +np.float32,0xbebbe23c,0x3ff92815,3 +np.float32,0xbf598c88,0x402587a1,3 +np.float32,0x3e95d864,0x3fa30b4a,3 +np.float32,0x3f7f6f40,0x3d882486,3 +np.float32,0xbf53658c,0x4022b604,3 +np.float32,0xbf2a35f2,0x401314ad,3 +np.float32,0x3eb14380,0x3f9bcf28,3 +np.float32,0x3f0e0c64,0x3f7b8a7a,3 +np.float32,0x3d349920,0x3fc36a9a,3 +np.float32,0xbec2092c,0x3ffad071,3 +np.float32,0xbe1d08e8,0x3fdcc4e0,3 +np.float32,0xbf008968,0x40063243,3 +np.float32,0xbefad582,0x40054c51,3 +np.float32,0xbe52d010,0x3fe39a72,3 +np.float32,0x3f4afdac,0x3f27ba6b,3 +np.float32,0x3f6c483c,0x3eca4408,3 +np.float32,0xbef3cb68,0x40044b0c,3 +np.float32,0x3e94687c,0x3fa36b6f,3 +np.float32,0xbf64ae5c,0x402b39bb,3 +np.float32,0xbf0022b4,0x40061497,3 +np.float32,0x80000001,0x3fc90fdb,3 +np.float32,0x3f25bcd0,0x3f5dda4b,3 +np.float32,0x3ed91b40,0x3f9102d7,3 +np.float32,0x3f800000,0x0,3 +np.float32,0xbebc6aca,0x3ff94cca,3 +np.float32,0x3f239e9a,0x3f609e7d,3 +np.float32,0xbf7312be,0x4034a305,3 +np.float32,0x3efd16d0,0x3f86e148,3 +np.float32,0x3f52753a,0x3f1b0f72,3 +np.float32,0xbde58960,0x3fd7702c,3 +np.float32,0x3ef88580,0x3f883099,3 +np.float32,0x3eebaefc,0x3f8bd51e,3 +np.float32,0x3e877d2c,0x3fa6c807,3 +np.float32,0x3f1a0324,0x3f6cdf32,3 +np.float32,0xbedfe20a,0x40017eb6,3 +np.float32,0x3f205a3c,0x3f64d69d,3 +np.float32,0xbeed5b7c,0x400361b0,3 +np.float32,0xbf69ba10,0x402e2ad0,3 +np.float32,0x3c4fe200,0x3fc77014,3 +np.float32,0x3f043310,0x3f839a69,3 +np.float32,0xbeaf359a,0x3ff5c485,3 +np.float32,0x3db3f110,0x3fbdcd12,3 +np.float32,0x3e24af88,0x3fb462ed,3 +np.float32,0xbf34e858,0x4016c1c8,3 +np.float32,0x3f3334f2,0x3f4b9cd0,3 +np.float32,0xbf145882,0x400c16a2,3 +np.float32,0xbf541c38,0x40230748,3 +np.float32,0x3eba7e10,0x3f99574b,3 +np.float32,0xbe34c6e0,0x3fdfc731,3 +np.float32,0xbe957abe,0x3feefbf0,3 +np.float32,0xbf595a59,0x40256fdb,3 +np.float32,0xbdedc7b8,0x3fd7f4f0,3 +np.float32,0xbf627c02,0x402a06a9,3 +np.float32,0x3f339b78,0x3f4b0d18,3 +np.float32,0xbf2df6d2,0x40145929,3 +np.float32,0x3f617726,0x3efc9fd8,3 +np.float32,0xbee3a8fc,0x40020561,3 +np.float32,0x3efe9f68,0x3f867043,3 +np.float32,0xbf2c3e76,0x4013c3ba,3 +np.float32,0xbf218f28,0x40103d84,3 +np.float32,0xbf1ea847,0x400f4f7f,3 +np.float32,0x3ded9160,0x3fba2e31,3 +np.float32,0x3bce1b00,0x3fc841bf,3 +np.float32,0xbe90566e,0x3feda46a,3 +np.float32,0xbf5ea2ba,0x4028056b,3 +np.float32,0x3f538e62,0x3f191ee6,3 +np.float32,0xbf59e054,0x4025af74,3 +np.float32,0xbe8c98ba,0x3fecab24,3 +np.float32,0x3ee7bdb0,0x3f8cf0b7,3 +np.float32,0xbf2eb828,0x40149b2b,3 +np.float32,0xbe5eb904,0x3fe52068,3 +np.float32,0xbf16b422,0x400cd08d,3 +np.float32,0x3f1ab9b4,0x3f6bfa58,3 +np.float32,0x3dc23040,0x3fbce82a,3 +np.float32,0xbf29d9e7,0x4012f5e5,3 +np.float32,0xbf38f30a,0x40183393,3 +np.float32,0x3e88e798,0x3fa66a09,3 +np.float32,0x3f1d07e6,0x3f69124f,3 +np.float32,0xbe1d3d34,0x3fdccb7e,3 +np.float32,0xbf1715be,0x400ceec2,3 +np.float32,0x3f7a0eac,0x3e5d11f7,3 +np.float32,0xbe764924,0x3fe82707,3 +np.float32,0xbf01a1f8,0x4006837c,3 +np.float32,0x3f2be730,0x3f55a661,3 +np.float32,0xbf7bb070,0x403d4ce5,3 +np.float32,0xbd602110,0x3fd011c9,3 +np.float32,0x3f5d080c,0x3f07609d,3 +np.float32,0xbda20400,0x3fd332d1,3 +np.float32,0x3f1c62da,0x3f69e308,3 +np.float32,0xbf2c6916,0x4013d223,3 +np.float32,0xbf44f8fd,0x401cb816,3 +np.float32,0x3f4da392,0x3f235539,3 +np.float32,0x3e9e8aa0,0x3fa0c3a0,3 +np.float32,0x3e9633c4,0x3fa2f366,3 +np.float32,0xbf0422ab,0x40073ddd,3 +np.float32,0x3f518386,0x3f1cb603,3 +np.float32,0x3f24307a,0x3f5fe096,3 +np.float32,0xbdfb4220,0x3fd8ce24,3 +np.float32,0x3f179d28,0x3f6fdc7d,3 +np.float32,0xbecc2df0,0x3ffd911e,3 +np.float32,0x3f3dff0c,0x3f3c0782,3 +np.float32,0xbf58c4d8,0x4025295b,3 +np.float32,0xbdcf8438,0x3fd60dd3,3 +np.float32,0xbeeaf1b2,0x40030aa7,3 +np.float32,0xbf298a28,0x4012db45,3 +np.float32,0x3f6c4dec,0x3eca2678,3 +np.float32,0x3f4d1ac8,0x3f243a59,3 +np.float32,0x3f62cdfa,0x3ef6e8f8,3 +np.float32,0xbee8acce,0x4002b909,3 +np.float32,0xbd5f2af0,0x3fd00a15,3 +np.float32,0x3f5fde8e,0x3f01a453,3 +np.float32,0x3e95233c,0x3fa33aa4,3 +np.float32,0x3ecd2a60,0x3f9449be,3 +np.float32,0x3f10aa86,0x3f78619d,3 +np.float32,0x3f3888e8,0x3f440a70,3 +np.float32,0x3eeb5bfc,0x3f8bec7d,3 +np.float32,0xbe12d654,0x3fdb7ae6,3 +np.float32,0x3eca3110,0x3f951931,3 +np.float32,0xbe2d1b7c,0x3fdece05,3 +np.float32,0xbf29e9db,0x4012fb3a,3 +np.float32,0xbf0c50b8,0x4009a845,3 +np.float32,0xbed9f0e4,0x4000abef,3 +np.float64,0x3fd078ec5ba0f1d8,0x3ff4f7c00595a4d3,1 +np.float64,0xbfdbc39743b7872e,0x400027f85bce43b2,1 +np.float64,0xbfacd2707c39a4e0,0x3ffa08ae1075d766,1 +np.float64,0xbfc956890f32ad14,0x3ffc52308e7285fd,1 +np.float64,0xbf939c2298273840,0x3ff9706d18e6ea6b,1 +np.float64,0xbfe0d7048961ae09,0x4000fff4406bd395,1 +np.float64,0xbfe9d19b86f3a337,0x4004139bc683a69f,1 +np.float64,0x3fd35c7f90a6b900,0x3ff437220e9123f8,1 +np.float64,0x3fdddca171bbb944,0x3ff15da61e61ec08,1 +np.float64,0x3feb300de9f6601c,0x3fe1c6fadb68cdca,1 +np.float64,0xbfef1815327e302a,0x400739808fc6f964,1 +np.float64,0xbfe332d78e6665af,0x4001b6c4ef922f7c,1 +np.float64,0xbfedbf4dfb7b7e9c,0x40061cefed62a58b,1 +np.float64,0xbfd8dcc7e3b1b990,0x3fff84307713c2c3,1 +np.float64,0xbfedaf161c7b5e2c,0x400612027c1b2b25,1 +np.float64,0xbfed9bde897b37bd,0x4006053f05bd7d26,1 +np.float64,0xbfe081ebc26103d8,0x4000e70755eb66e0,1 +np.float64,0xbfe0366f9c606cdf,0x4000d11212f29afd,1 +np.float64,0xbfc7c115212f822c,0x3ffc1e8c9d58f7db,1 +np.float64,0x3fd8dd9a78b1bb34,0x3ff2bf8d0f4c9376,1 +np.float64,0xbfe54eff466a9dfe,0x4002655950b611f4,1 +np.float64,0xbfe4aad987e955b3,0x40022efb19882518,1 +np.float64,0x3f70231ca0204600,0x3ff911d834e7abf4,1 +np.float64,0x3fede01d047bc03a,0x3fd773cecbd8561b,1 +np.float64,0xbfd6a00d48ad401a,0x3ffee9fd7051633f,1 +np.float64,0x3fd44f3d50a89e7c,0x3ff3f74dd0fc9c91,1 +np.float64,0x3fe540f0d0ea81e2,0x3feb055a7c7d43d6,1 +np.float64,0xbf3ba2e200374800,0x3ff923b582650c6c,1 +np.float64,0x3fe93b2d3f72765a,0x3fe532fa15331072,1 +np.float64,0x3fee8ce5a17d19cc,0x3fd35666eefbe336,1 +np.float64,0x3fe55d5f8feabac0,0x3feadf3dcfe251d4,1 +np.float64,0xbfd1d2ede8a3a5dc,0x3ffda600041ac884,1 +np.float64,0xbfee41186e7c8231,0x40067a625cc6f64d,1 +np.float64,0x3fe521a8b9ea4352,0x3feb2f1a6c8084e5,1 +np.float64,0x3fc65378ef2ca6f0,0x3ff653dfe81ee9f2,1 +np.float64,0x3fdaba0fbcb57420,0x3ff23d630995c6ba,1 +np.float64,0xbfe6b7441d6d6e88,0x4002e182539a2994,1 +np.float64,0x3fda00b6dcb4016c,0x3ff2703d516f28e7,1 +np.float64,0xbfe8699f01f0d33e,0x400382326920ea9e,1 +np.float64,0xbfef5889367eb112,0x4007832af5983793,1 +np.float64,0x3fefb57c8aff6afa,0x3fc14700ab38dcef,1 +np.float64,0xbfda0dfdaab41bfc,0x3fffd75b6fd497f6,1 +np.float64,0xbfb059c36620b388,0x3ffa27c528b97a42,1 +np.float64,0xbfdd450ab1ba8a16,0x40005dcac6ab50fd,1 +np.float64,0xbfe54d6156ea9ac2,0x400264ce9f3f0fb9,1 +np.float64,0xbfe076e94760edd2,0x4000e3d1374884da,1 +np.float64,0xbfc063286720c650,0x3ffb2fd1d6bff0ef,1 +np.float64,0xbfe24680f2e48d02,0x40016ddfbb5bcc0e,1 +np.float64,0xbfdc9351d2b926a4,0x400044e3756fb765,1 +np.float64,0x3fefb173d8ff62e8,0x3fc1bd5626f80850,1 +np.float64,0x3fe77c117a6ef822,0x3fe7e57089bad2ec,1 +np.float64,0xbfddbcebf7bb79d8,0x40006eadb60406b3,1 +np.float64,0xbfecf6625ff9ecc5,0x40059e6c6961a6db,1 +np.float64,0x3fdc8950b8b912a0,0x3ff1bcfb2e27795b,1 +np.float64,0xbfeb2fa517765f4a,0x4004b00aee3e6888,1 +np.float64,0x3fd0efc88da1df90,0x3ff4d8f7cbd8248a,1 +np.float64,0xbfe6641a2becc834,0x4002c43362c1bd0f,1 +np.float64,0xbfe28aec0fe515d8,0x400182c91d4df039,1 +np.float64,0xbfd5ede8d0abdbd2,0x3ffeba7baef05ae8,1 +np.float64,0xbfbd99702a3b32e0,0x3ffafca21c1053f1,1 +np.float64,0x3f96f043f82de080,0x3ff8c6384d5eb610,1 +np.float64,0xbfe5badbc9eb75b8,0x400289c8cd5873d1,1 +np.float64,0x3fe5c6bf95eb8d80,0x3fea5093e9a3e43e,1 +np.float64,0x3fb1955486232ab0,0x3ff8086d4c3e71d5,1 +np.float64,0xbfea145f397428be,0x4004302237a35871,1 +np.float64,0xbfdabe685db57cd0,0x400003e2e29725fb,1 +np.float64,0xbfefc79758ff8f2f,0x400831814e23bfc8,1 +np.float64,0x3fd7edb66cafdb6c,0x3ff3006c5123bfaf,1 +np.float64,0xbfeaf7644bf5eec8,0x400495a7963ce4ed,1 +np.float64,0x3fdf838d78bf071c,0x3ff0e527eed73800,1 +np.float64,0xbfd1a0165ba3402c,0x3ffd98c5ab76d375,1 +np.float64,0x3fd75b67a9aeb6d0,0x3ff327c8d80b17cf,1 +np.float64,0x3fc2aa9647255530,0x3ff6ca854b157df1,1 +np.float64,0xbfe0957fd4612b00,0x4000ecbf3932becd,1 +np.float64,0x3fda1792c0b42f24,0x3ff269fbb2360487,1 +np.float64,0x3fd480706ca900e0,0x3ff3ea53a6aa3ae8,1 +np.float64,0xbfd0780ed9a0f01e,0x3ffd4bfd544c7d47,1 +np.float64,0x3feeec0cd77dd81a,0x3fd0a8a241fdb441,1 +np.float64,0x3fcfa933e93f5268,0x3ff5223478621a6b,1 +np.float64,0x3fdad2481fb5a490,0x3ff236b86c6b2b49,1 +np.float64,0x3fe03b129de07626,0x3ff09f21fb868451,1 +np.float64,0xbfc01212cd202424,0x3ffb259a07159ae9,1 +np.float64,0x3febdb912df7b722,0x3fe0768e20dac8c9,1 +np.float64,0xbfbf2148763e4290,0x3ffb154c361ce5bf,1 +np.float64,0xbfb1a7eb1e234fd8,0x3ffa3cb37ac4a176,1 +np.float64,0xbfe26ad1ec64d5a4,0x400178f480ecce8d,1 +np.float64,0x3fe6d1cd1b6da39a,0x3fe8dc20ec4dad3b,1 +np.float64,0xbfede0e53dfbc1ca,0x4006340d3bdd7c97,1 +np.float64,0xbfe8fd1bd9f1fa38,0x4003bc3477f93f40,1 +np.float64,0xbfe329d0f26653a2,0x4001b3f345af5648,1 +np.float64,0xbfe4bb20eee97642,0x40023451404d6d08,1 +np.float64,0x3fb574832e2ae900,0x3ff7ca4bed0c7110,1 +np.float64,0xbfdf3c098fbe7814,0x4000a525bb72d659,1 +np.float64,0x3fa453e6d428a7c0,0x3ff87f512bb9b0c6,1 +np.float64,0x3faaec888435d920,0x3ff84a7d9e4def63,1 +np.float64,0xbfcdc240df3b8480,0x3ffce30ece754e7f,1 +np.float64,0xbf8c3220f0386440,0x3ff95a600ae6e157,1 +np.float64,0x3fe806076c700c0e,0x3fe71784a96c76eb,1 +np.float64,0x3fedf9b0e17bf362,0x3fd6e35fc0a7b6c3,1 +np.float64,0xbfe1b48422636908,0x400141bd8ed251bc,1 +np.float64,0xbfe82e2817705c50,0x40036b5a5556d021,1 +np.float64,0xbfc8ef8ff931df20,0x3ffc450ffae7ce58,1 +np.float64,0xbfe919fa94f233f5,0x4003c7cce4697fe8,1 +np.float64,0xbfc3ace4a72759c8,0x3ffb9a197bb22651,1 +np.float64,0x3fe479f71ee8f3ee,0x3fec0bd2f59097aa,1 +np.float64,0xbfeeb54a967d6a95,0x4006da12c83649c5,1 +np.float64,0x3fe5e74ea8ebce9e,0x3fea2407cef0f08c,1 +np.float64,0x3fb382baf2270570,0x3ff7e98213b921ba,1 +np.float64,0xbfdd86fd3cbb0dfa,0x40006712952ddbcf,1 +np.float64,0xbfd250eb52a4a1d6,0x3ffdc6d56253b1cd,1 +np.float64,0x3fea30c4ed74618a,0x3fe3962deba4f30e,1 +np.float64,0x3fc895963d312b30,0x3ff60a5d52fcbccc,1 +np.float64,0x3fe9cc4f6273989e,0x3fe442740942c80f,1 +np.float64,0xbfe8769f5cf0ed3f,0x4003873b4cb5bfce,1 +np.float64,0xbfe382f3726705e7,0x4001cfeb3204d110,1 +np.float64,0x3fbfe9a9163fd350,0x3ff7220bd2b97c8f,1 +np.float64,0xbfca6162bb34c2c4,0x3ffc743f939358f1,1 +np.float64,0x3fe127a014e24f40,0x3ff0147c4bafbc39,1 +np.float64,0x3fee9cdd2a7d39ba,0x3fd2e9ef45ab122f,1 +np.float64,0x3fa9ffb97c33ff80,0x3ff851e69fa3542c,1 +np.float64,0x3fd378f393a6f1e8,0x3ff42faafa77de56,1 +np.float64,0xbfe4df1e1669be3c,0x400240284df1c321,1 +np.float64,0x3fed0ed79bfa1db0,0x3fdba89060aa96fb,1 +np.float64,0x3fdef2ee52bde5dc,0x3ff10e942244f4f1,1 +np.float64,0xbfdab38f3ab5671e,0x40000264d8d5b49b,1 +np.float64,0x3fbe95a96e3d2b50,0x3ff73774cb59ce2d,1 +np.float64,0xbfe945653af28aca,0x4003d9657bf129c2,1 +np.float64,0xbfb18f3f2a231e80,0x3ffa3b27cba23f50,1 +np.float64,0xbfef50bf22fea17e,0x40077998a850082c,1 +np.float64,0xbfc52b8c212a5718,0x3ffbca8d6560a2da,1 +np.float64,0x7ff8000000000000,0x7ff8000000000000,1 +np.float64,0x3fc1e3a02d23c740,0x3ff6e3a5fcac12a4,1 +np.float64,0xbfeb5e4ea5f6bc9d,0x4004c65abef9426f,1 +np.float64,0xbfe425b132684b62,0x400203c29608b00d,1 +np.float64,0xbfbfa1c19e3f4380,0x3ffb1d6367711158,1 +np.float64,0x3fbba2776e3744f0,0x3ff766f6df586fad,1 +np.float64,0xbfb5d0951e2ba128,0x3ffa7f712480b25e,1 +np.float64,0xbfe949fdab7293fb,0x4003db4530a18507,1 +np.float64,0xbfcf13519b3e26a4,0x3ffd0e6f0a6c38ee,1 +np.float64,0x3f91e6d72823cdc0,0x3ff8da5f08909b6e,1 +np.float64,0x3f78a2e360314600,0x3ff909586727caef,1 +np.float64,0xbfe1ae7e8fe35cfd,0x40013fef082caaa3,1 +np.float64,0x3fe97a6dd1f2f4dc,0x3fe4cb4b99863478,1 +np.float64,0xbfcc1e1e69383c3c,0x3ffcad250a949843,1 +np.float64,0x3faccb797c399700,0x3ff83b8066b49330,1 +np.float64,0x3fe7a2647a6f44c8,0x3fe7acceae6ec425,1 +np.float64,0xbfec3bfcf0f877fa,0x4005366af5a7175b,1 +np.float64,0xbfe2310b94646217,0x400167588fceb228,1 +np.float64,0x3feb167372762ce6,0x3fe1f74c0288fad8,1 +np.float64,0xbfb722b4ee2e4568,0x3ffa94a81b94dfca,1 +np.float64,0x3fc58da9712b1b50,0x3ff66cf8f072aa14,1 +np.float64,0xbfe7fff9d6effff4,0x400359d01b8141de,1 +np.float64,0xbfd56691c5aacd24,0x3ffe9686697797e8,1 +np.float64,0x3fe3ab0557e7560a,0x3fed1593959ef8e8,1 +np.float64,0x3fdd458995ba8b14,0x3ff1883d6f22a322,1 +np.float64,0x3fe7bbed2cef77da,0x3fe786d618094cda,1 +np.float64,0x3fa31a30c4263460,0x3ff88920b936fd79,1 +np.float64,0x8010000000000000,0x3ff921fb54442d18,1 +np.float64,0xbfdc5effbdb8be00,0x40003d95fe0dff11,1 +np.float64,0x3febfdad7e77fb5a,0x3fe030b5297dbbdd,1 +np.float64,0x3fe4f3f3b2e9e7e8,0x3feb6bc59eeb2be2,1 +np.float64,0xbfe44469fd6888d4,0x40020daa5488f97a,1 +np.float64,0xbfe19fddb0e33fbc,0x40013b8c902b167b,1 +np.float64,0x3fa36ad17c26d5a0,0x3ff8869b3e828134,1 +np.float64,0x3fcf23e6c93e47d0,0x3ff5336491a65d1e,1 +np.float64,0xffefffffffffffff,0x7ff8000000000000,1 +np.float64,0xbfe375f4cee6ebea,0x4001cbd2ba42e8b5,1 +np.float64,0xbfaef1215c3de240,0x3ffa19ab02081189,1 +np.float64,0xbfec39c59c78738b,0x4005353dc38e3d78,1 +np.float64,0x7ff4000000000000,0x7ffc000000000000,1 +np.float64,0xbfec09bb7b781377,0x40051c0a5754cb3a,1 +np.float64,0x3fe8301f2870603e,0x3fe6d783c5ef0944,1 +np.float64,0xbfed418c987a8319,0x4005cbae1b8693d1,1 +np.float64,0xbfdc16e7adb82dd0,0x4000338b634eaf03,1 +np.float64,0x3fd5d361bdaba6c4,0x3ff390899300a54c,1 +np.float64,0xbff0000000000000,0x400921fb54442d18,1 +np.float64,0x3fd5946232ab28c4,0x3ff3a14767813f29,1 +np.float64,0x3fe833e5fef067cc,0x3fe6d1be720edf2d,1 +np.float64,0x3fedf746a67bee8e,0x3fd6f127fdcadb7b,1 +np.float64,0x3fd90353d3b206a8,0x3ff2b54f7d369ba9,1 +np.float64,0x3fec4b4b72f89696,0x3fdf1b38d2e93532,1 +np.float64,0xbfe9c67596f38ceb,0x40040ee5f524ce03,1 +np.float64,0x3fd350d91aa6a1b4,0x3ff43a303c0da27f,1 +np.float64,0x3fd062603ba0c4c0,0x3ff4fd9514b935d8,1 +np.float64,0xbfe24c075f64980e,0x40016f8e9f2663b3,1 +np.float64,0x3fdaa546eeb54a8c,0x3ff2431a88fef1d5,1 +np.float64,0x3fe92b8151f25702,0x3fe54c67e005cbf9,1 +np.float64,0xbfe1be8b8a637d17,0x400144c078f67c6e,1 +np.float64,0xbfe468a1d7e8d144,0x40021964b118cbf4,1 +np.float64,0xbfdc6de4fab8dbca,0x40003fa9e27893d8,1 +np.float64,0xbfe3c2788ae784f1,0x4001e407ba3aa956,1 +np.float64,0xbfe2bf1542e57e2a,0x400192d4a9072016,1 +np.float64,0xbfe6982f4c6d305e,0x4002d681b1991bbb,1 +np.float64,0x3fdbceb1c4b79d64,0x3ff1f0f117b9d354,1 +np.float64,0x3fdb3705e7b66e0c,0x3ff21af01ca27ace,1 +np.float64,0x3fe3e6358ee7cc6c,0x3fecca4585053983,1 +np.float64,0xbfe16d6a9a62dad5,0x40012c7988aee247,1 +np.float64,0xbfce66e4413ccdc8,0x3ffcf83b08043a0c,1 +np.float64,0xbfeb6cd46876d9a9,0x4004cd61733bfb79,1 +np.float64,0xbfdb1cdd64b639ba,0x400010e6cf087cb7,1 +np.float64,0xbfe09e4e30e13c9c,0x4000ef5277c47721,1 +np.float64,0xbfee88dd127d11ba,0x4006b3cd443643ac,1 +np.float64,0xbf911e06c8223c00,0x3ff966744064fb05,1 +np.float64,0xbfe8f22bc471e458,0x4003b7d5513af295,1 +np.float64,0x3fe3d7329567ae66,0x3fecdd6c241f83ee,1 +np.float64,0x3fc8a9404b315280,0x3ff607dc175edf3f,1 +np.float64,0x3fe7eb80ad6fd702,0x3fe73f8fdb3e6a6c,1 +np.float64,0x3fef0931e37e1264,0x3fcf7fde80a3c5ab,1 +np.float64,0x3fe2ed3c3fe5da78,0x3fee038334cd1860,1 +np.float64,0x3fe251fdb8e4a3fc,0x3feec26dc636ac31,1 +np.float64,0x3feb239436764728,0x3fe1de9462455da7,1 +np.float64,0xbfe63fd7eeec7fb0,0x4002b78cfa3d2fa6,1 +np.float64,0x3fdd639cb5bac738,0x3ff17fc7d92b3eee,1 +np.float64,0x3fd0a7a13fa14f44,0x3ff4eba95c559c84,1 +np.float64,0x3fe804362d70086c,0x3fe71a44cd91ffa4,1 +np.float64,0xbfe0fecf6e61fd9f,0x40010bac8edbdc4f,1 +np.float64,0x3fcb74acfd36e958,0x3ff5ac84437f1b7c,1 +np.float64,0x3fe55053e1eaa0a8,0x3feaf0bf76304c30,1 +np.float64,0x3fc06b508d20d6a0,0x3ff7131da17f3902,1 +np.float64,0x3fdd78750fbaf0ec,0x3ff179e97fbf7f65,1 +np.float64,0x3fe44cb946689972,0x3fec46859b5da6be,1 +np.float64,0xbfeb165a7ff62cb5,0x4004a41c9cc9589e,1 +np.float64,0x3fe01ffb2b603ff6,0x3ff0aed52bf1c3c1,1 +np.float64,0x3f983c60a83078c0,0x3ff8c107805715ab,1 +np.float64,0x3fd8b5ff13b16c00,0x3ff2ca4a837a476a,1 +np.float64,0x3fc80510a1300a20,0x3ff61cc3b4af470b,1 +np.float64,0xbfd3935b06a726b6,0x3ffe1b3a2066f473,1 +np.float64,0xbfdd4a1f31ba943e,0x40005e81979ed445,1 +np.float64,0xbfa76afdd42ed600,0x3ff9dd63ffba72d2,1 +np.float64,0x3fe7e06d496fc0da,0x3fe7503773566707,1 +np.float64,0xbfea5fbfe874bf80,0x40045106af6c538f,1 +np.float64,0x3fee000c487c0018,0x3fd6bef1f8779d88,1 +np.float64,0xbfb39f4ee2273ea0,0x3ffa5c3f2b3888ab,1 +np.float64,0x3feb9247b0772490,0x3fe1092d2905efce,1 +np.float64,0x3fdaa39b4cb54738,0x3ff243901da0da17,1 +np.float64,0x3fcd5b2b493ab658,0x3ff56e262e65b67d,1 +np.float64,0x3fcf82512f3f04a0,0x3ff52738847c55f2,1 +np.float64,0x3fe2af5e0c655ebc,0x3fee4ffab0c82348,1 +np.float64,0xbfec0055d0f800ac,0x4005172d325933e8,1 +np.float64,0x3fe71da9336e3b52,0x3fe86f2e12f6e303,1 +np.float64,0x3fbefab0723df560,0x3ff731188ac716ec,1 +np.float64,0xbfe11dca28623b94,0x400114d3d4ad370d,1 +np.float64,0x3fbcbda8ca397b50,0x3ff755281078abd4,1 +np.float64,0x3fe687c7126d0f8e,0x3fe945099a7855cc,1 +np.float64,0xbfecde510579bca2,0x400590606e244591,1 +np.float64,0xbfd72de681ae5bce,0x3fff0ff797ad1755,1 +np.float64,0xbfe7c0f7386f81ee,0x40034226e0805309,1 +np.float64,0x3fd8d55619b1aaac,0x3ff2c1cb3267b14e,1 +np.float64,0x3fecd7a2ad79af46,0x3fdcabbffeaa279e,1 +np.float64,0x3fee7fb1a8fcff64,0x3fd3ae620286fe19,1 +np.float64,0xbfc5f3a3592be748,0x3ffbe3ed204d9842,1 +np.float64,0x3fec9e5527793caa,0x3fddb00bc8687e4b,1 +np.float64,0x3fc35dc70f26bb90,0x3ff6b3ded7191e33,1 +np.float64,0x3fda91c07ab52380,0x3ff24878848fec8f,1 +np.float64,0xbfe12cde1fe259bc,0x4001194ab99d5134,1 +np.float64,0xbfd35ab736a6b56e,0x3ffe0c5ce8356d16,1 +np.float64,0x3fc9c94123339280,0x3ff5e3239f3ad795,1 +np.float64,0xbfe72f54926e5ea9,0x40030c95d1d02b56,1 +np.float64,0xbfee283186fc5063,0x40066786bd0feb79,1 +np.float64,0xbfe7b383f56f6708,0x40033d23ef0e903d,1 +np.float64,0x3fd6037327ac06e8,0x3ff383bf2f311ddb,1 +np.float64,0x3fe0e344b561c68a,0x3ff03cd90fd4ba65,1 +np.float64,0xbfef0ff54b7e1feb,0x400730fa5fce381e,1 +np.float64,0x3fd269929da4d324,0x3ff476b230136d32,1 +np.float64,0xbfbc5fb9f638bf70,0x3ffae8e63a4e3234,1 +np.float64,0xbfe2e8bc84e5d179,0x40019fb5874f4310,1 +np.float64,0xbfd7017413ae02e8,0x3fff040d843c1531,1 +np.float64,0x3fefd362fa7fa6c6,0x3fbababc3ddbb21d,1 +np.float64,0x3fecb62ed3f96c5e,0x3fdd44ba77ccff94,1 +np.float64,0xbfb16fad5222df58,0x3ffa392d7f02b522,1 +np.float64,0x3fbcf4abc639e950,0x3ff751b23c40e27f,1 +np.float64,0x3fe128adbce2515c,0x3ff013dc91db04b5,1 +np.float64,0x3fa5dd9d842bbb40,0x3ff87300c88d512f,1 +np.float64,0xbfe61efcaf6c3dfa,0x4002ac27117f87c9,1 +np.float64,0x3feffe1233fffc24,0x3f9638d3796a4954,1 +np.float64,0xbfe78548b66f0a92,0x40032c0447b7bfe2,1 +np.float64,0x3fe7bd38416f7a70,0x3fe784e86d6546b6,1 +np.float64,0x3fe0d6bc5961ad78,0x3ff0443899e747ac,1 +np.float64,0xbfd0bb6e47a176dc,0x3ffd5d6dff390d41,1 +np.float64,0xbfec1d16b8f83a2e,0x40052620378d3b78,1 +np.float64,0x3fe9bbec20f377d8,0x3fe45e167c7a3871,1 +np.float64,0xbfeed81d9dfdb03b,0x4006f9dec2db7310,1 +np.float64,0xbfe1e35179e3c6a3,0x40014fd1b1186ac0,1 +np.float64,0xbfc9c7e605338fcc,0x3ffc60a6bd1a7126,1 +np.float64,0x3feec92810fd9250,0x3fd1afde414ab338,1 +np.float64,0xbfeb9f1d90773e3b,0x4004e606b773f5b0,1 +np.float64,0x3fcbabdf6b3757c0,0x3ff5a573866404af,1 +np.float64,0x3fe9f4e1fff3e9c4,0x3fe3fd7b6712dd7b,1 +np.float64,0xbfe6c0175ded802e,0x4002e4a4dc12f3fe,1 +np.float64,0xbfeefc96f37df92e,0x40071d367cd721ff,1 +np.float64,0xbfeaab58dc7556b2,0x400472ce37e31e50,1 +np.float64,0xbfc62668772c4cd0,0x3ffbea5e6c92010a,1 +np.float64,0x3fafe055fc3fc0a0,0x3ff822ce6502519a,1 +np.float64,0x3fd7b648ffaf6c90,0x3ff30f5a42f11418,1 +np.float64,0xbfe934fe827269fd,0x4003d2b9fed9e6ad,1 +np.float64,0xbfe6d691f2edad24,0x4002eca6a4b1797b,1 +np.float64,0x3fc7e62ced2fcc58,0x3ff620b1f44398b7,1 +np.float64,0xbfc89be9f33137d4,0x3ffc3a67a497f59c,1 +np.float64,0xbfe7793d536ef27a,0x40032794bf14dd64,1 +np.float64,0x3fde55a02dbcab40,0x3ff13b5f82d223e4,1 +np.float64,0xbfc8eabd7b31d57c,0x3ffc4472a81cb6d0,1 +np.float64,0x3fddcb5468bb96a8,0x3ff162899c381f2e,1 +np.float64,0xbfec7554d8f8eaaa,0x40055550e18ec463,1 +np.float64,0x3fd0b6e8b6a16dd0,0x3ff4e7b4781a50e3,1 +np.float64,0x3fedaae01b7b55c0,0x3fd8964916cdf53d,1 +np.float64,0x3fe0870f8a610e20,0x3ff072e7db95c2a2,1 +np.float64,0xbfec3e3ce2787c7a,0x4005379d0f6be873,1 +np.float64,0xbfe65502586caa04,0x4002beecff89147f,1 +np.float64,0xbfe0df39a961be74,0x4001025e36d1c061,1 +np.float64,0xbfb5d8edbe2bb1d8,0x3ffa7ff72b7d6a2b,1 +np.float64,0xbfde89574bbd12ae,0x40008ba4cd74544d,1 +np.float64,0xbfe72938f0ee5272,0x40030a5efd1acb6d,1 +np.float64,0xbfcd500d133aa01c,0x3ffcd462f9104689,1 +np.float64,0x3fe0350766606a0e,0x3ff0a2a3664e2c14,1 +np.float64,0xbfc892fb573125f8,0x3ffc3944641cc69d,1 +np.float64,0xbfba7dc7c634fb90,0x3ffaca9a6a0ffe61,1 +np.float64,0xbfeac94478759289,0x40048068a8b83e45,1 +np.float64,0xbfe8f60c1af1ec18,0x4003b961995b6e51,1 +np.float64,0x3fea1c0817743810,0x3fe3ba28c1643cf7,1 +np.float64,0xbfe42a0fefe85420,0x4002052aadd77f01,1 +np.float64,0x3fd2c61c56a58c38,0x3ff45e84cb9a7fa9,1 +np.float64,0xbfd83fb7cdb07f70,0x3fff59ab4790074c,1 +np.float64,0x3fd95e630fb2bcc8,0x3ff29c8bee1335ad,1 +np.float64,0x3feee88f387dd11e,0x3fd0c3ad3ded4094,1 +np.float64,0x3fe061291160c252,0x3ff0890010199bbc,1 +np.float64,0xbfdc7db3b5b8fb68,0x400041dea3759443,1 +np.float64,0x3fee23b320fc4766,0x3fd5ee73d7aa5c56,1 +np.float64,0xbfdc25c590b84b8c,0x4000359cf98a00b4,1 +np.float64,0xbfd63cbfd2ac7980,0x3ffecf7b9cf99b3c,1 +np.float64,0xbfbeb3c29a3d6788,0x3ffb0e66ecc0fc3b,1 +np.float64,0xbfd2f57fd6a5eb00,0x3ffdf1d7c79e1532,1 +np.float64,0xbfab3eda9c367db0,0x3ff9fc0c875f42e9,1 +np.float64,0xbfe12df1c6e25be4,0x4001199c673e698c,1 +np.float64,0x3fef8ab23a7f1564,0x3fc5aff358c59f1c,1 +np.float64,0x3fe562f50feac5ea,0x3fead7bce205f7d9,1 +np.float64,0x3fdc41adbeb8835c,0x3ff1d0f71341b8f2,1 +np.float64,0x3fe2748967e4e912,0x3fee9837f970ff9e,1 +np.float64,0xbfdaa89d57b5513a,0x400000e3889ba4cf,1 +np.float64,0x3fdf2a137dbe5428,0x3ff0fecfbecbbf86,1 +np.float64,0xbfea1fdcd2f43fba,0x4004351974b32163,1 +np.float64,0xbfe34a93a3e69528,0x4001be323946a3e0,1 +np.float64,0x3fe929bacff25376,0x3fe54f47bd7f4cf2,1 +np.float64,0xbfd667fbd6accff8,0x3ffedb04032b3a1a,1 +np.float64,0xbfeb695796f6d2af,0x4004cbb08ec6f525,1 +np.float64,0x3fd204df2ea409c0,0x3ff490f51e6670f5,1 +np.float64,0xbfd89a2757b1344e,0x3fff722127b988c4,1 +np.float64,0xbfd0787187a0f0e4,0x3ffd4c16dbe94f32,1 +np.float64,0x3fd44239bfa88474,0x3ff3fabbfb24b1fa,1 +np.float64,0xbfeb0b3489f61669,0x40049ee33d811d33,1 +np.float64,0x3fdcf04eaab9e09c,0x3ff1a02a29996c4e,1 +np.float64,0x3fd4c51e4fa98a3c,0x3ff3d8302c68fc9a,1 +np.float64,0x3fd1346645a268cc,0x3ff4c72b4970ecaf,1 +np.float64,0x3fd6a89d09ad513c,0x3ff357af6520afac,1 +np.float64,0xbfba0f469a341e90,0x3ffac3a8f41bed23,1 +np.float64,0xbfe13f8ddce27f1c,0x40011ed557719fd6,1 +np.float64,0x3fd43e5e26a87cbc,0x3ff3fbc040fc30dc,1 +np.float64,0x3fe838125a707024,0x3fe6cb5c987248f3,1 +np.float64,0x3fe128c30c625186,0x3ff013cff238dd1b,1 +np.float64,0xbfcd4718833a8e30,0x3ffcd33c96bde6f9,1 +np.float64,0x3fe43fcd08e87f9a,0x3fec573997456ec1,1 +np.float64,0xbfe9a29104734522,0x4003ffd502a1b57f,1 +np.float64,0xbfe4709d7968e13b,0x40021bfc5cd55af4,1 +np.float64,0x3fd21c3925a43874,0x3ff48adf48556cbb,1 +np.float64,0x3fe9a521b2734a44,0x3fe4844fc054e839,1 +np.float64,0xbfdfa6a912bf4d52,0x4000b4730ad8521e,1 +np.float64,0x3fe3740702e6e80e,0x3fed5b106283b6ed,1 +np.float64,0x3fd0a3aa36a14754,0x3ff4ecb02a5e3f49,1 +np.float64,0x3fdcb903d0b97208,0x3ff1afa5d692c5b9,1 +np.float64,0xbfe7d67839efacf0,0x40034a3146abf6f2,1 +np.float64,0x3f9981c6d8330380,0x3ff8bbf1853d7b90,1 +np.float64,0xbfe9d4191673a832,0x400414a9ab453c5d,1 +np.float64,0x3fef0a1e5c7e143c,0x3fcf70b02a54c415,1 +np.float64,0xbfd996dee6b32dbe,0x3fffb6cf707ad8e4,1 +np.float64,0x3fe19bef17e337de,0x3fef9e70d4fcedae,1 +np.float64,0x3fe34a59716694b2,0x3fed8f6d5cfba474,1 +np.float64,0x3fdf27e27cbe4fc4,0x3ff0ff70500e0c7c,1 +np.float64,0xbfe19df87fe33bf1,0x40013afb401de24c,1 +np.float64,0xbfbdfd97ba3bfb30,0x3ffb02ef8c225e57,1 +np.float64,0xbfe3d3417267a683,0x4001e95ed240b0f8,1 +np.float64,0x3fe566498b6acc94,0x3fead342957d4910,1 +np.float64,0x3ff0000000000000,0x0,1 +np.float64,0x3feb329bd8766538,0x3fe1c2225aafe3b4,1 +np.float64,0xbfc19ca703233950,0x3ffb575b5df057b9,1 +np.float64,0x3fe755027d6eaa04,0x3fe81eb99c262e00,1 +np.float64,0xbfe6c2b8306d8570,0x4002e594199f9eec,1 +np.float64,0x3fd69438e6ad2870,0x3ff35d2275ae891d,1 +np.float64,0x3fda3e7285b47ce4,0x3ff25f5573dd47ae,1 +np.float64,0x3fe7928a166f2514,0x3fe7c4490ef4b9a9,1 +np.float64,0xbfd4eb71b9a9d6e4,0x3ffe75e8ccb74be1,1 +np.float64,0xbfcc3a07f1387410,0x3ffcb0b8af914a5b,1 +np.float64,0xbfe6e80225edd004,0x4002f2e26eae8999,1 +np.float64,0xbfb347728a268ee8,0x3ffa56bd526a12db,1 +np.float64,0x3fe5140ead6a281e,0x3feb4132c9140a1c,1 +np.float64,0xbfc147f125228fe4,0x3ffb4cab18b9050f,1 +np.float64,0xbfcb9145b537228c,0x3ffc9b1b6227a8c9,1 +np.float64,0xbfda84ef4bb509de,0x3ffff7f8a674e17d,1 +np.float64,0x3fd2eb6bbfa5d6d8,0x3ff454c225529d7e,1 +np.float64,0x3fe18c95f1e3192c,0x3fefb0cf0efba75a,1 +np.float64,0x3fe78606efef0c0e,0x3fe7d6c3a092d64c,1 +np.float64,0x3fbad5119a35aa20,0x3ff773dffe3ce660,1 +np.float64,0x3fd0cf5903a19eb4,0x3ff4e15fd21fdb42,1 +np.float64,0xbfd85ce90bb0b9d2,0x3fff618ee848e974,1 +np.float64,0x3fe90e11b9f21c24,0x3fe57be62f606f4a,1 +np.float64,0x3fd7a2040faf4408,0x3ff314ce85457ec2,1 +np.float64,0xbfd73fba69ae7f74,0x3fff14bff3504811,1 +np.float64,0x3fa04b4bd42096a0,0x3ff89f9b52f521a2,1 +np.float64,0xbfd7219ce5ae433a,0x3fff0cac0b45cc18,1 +np.float64,0xbfe0cf4661e19e8d,0x4000fdadb14e3c22,1 +np.float64,0x3fd07469fea0e8d4,0x3ff4f8eaa9b2394a,1 +np.float64,0x3f9b05c5d8360b80,0x3ff8b5e10672db5c,1 +np.float64,0x3fe4c25b916984b8,0x3febad29bd0e25e2,1 +np.float64,0xbfde8b4891bd1692,0x40008beb88d5c409,1 +np.float64,0xbfe199a7efe33350,0x400139b089aee21c,1 +np.float64,0x3fecdad25cf9b5a4,0x3fdc9d062867e8c3,1 +np.float64,0xbfe979b277f2f365,0x4003eedb061e25a4,1 +np.float64,0x3fc8c7311f318e60,0x3ff6040b9aeaad9d,1 +np.float64,0x3fd2b605b8a56c0c,0x3ff462b9a955c224,1 +np.float64,0x3fc073b6ad20e770,0x3ff7120e9f2fd63c,1 +np.float64,0xbfec60ede678c1dc,0x40054a3863e24dc2,1 +np.float64,0x3fe225171be44a2e,0x3feef910dca420ea,1 +np.float64,0xbfd7529762aea52e,0x3fff19d00661f650,1 +np.float64,0xbfd781783daf02f0,0x3fff2667b90be461,1 +np.float64,0x3fe3f6ec6d67edd8,0x3fecb4e814a2e33a,1 +np.float64,0x3fece6702df9cce0,0x3fdc6719d92a50d2,1 +np.float64,0xbfb5c602ce2b8c08,0x3ffa7ec761ba856a,1 +np.float64,0xbfd61f0153ac3e02,0x3ffec78e3b1a6c4d,1 +np.float64,0xbfec3462b2f868c5,0x400532630bbd7050,1 +np.float64,0xbfdd248485ba490a,0x400059391c07c1bb,1 +np.float64,0xbfd424921fa84924,0x3ffe416a85d1dcdf,1 +np.float64,0x3fbb23a932364750,0x3ff76eef79209f7f,1 +np.float64,0x3fca248b0f344918,0x3ff5d77c5c1b4e5e,1 +np.float64,0xbfe69af4a4ed35ea,0x4002d77c2e4fbd4e,1 +np.float64,0x3fdafe3cdcb5fc78,0x3ff22a9be6efbbf2,1 +np.float64,0xbfebba3377f77467,0x4004f3836e1fe71a,1 +np.float64,0xbfe650fae06ca1f6,0x4002bd851406377c,1 +np.float64,0x3fda630007b4c600,0x3ff2554f1832bd94,1 +np.float64,0xbfda8107d9b50210,0x3ffff6e6209659f3,1 +np.float64,0x3fea759a02f4eb34,0x3fe31d1a632c9aae,1 +np.float64,0x3fbf88149e3f1030,0x3ff728313aa12ccb,1 +np.float64,0x3f7196d2a0232e00,0x3ff910647e1914c1,1 +np.float64,0x3feeae51d17d5ca4,0x3fd2709698d31f6f,1 +np.float64,0xbfd73cd663ae79ac,0x3fff13f96300b55a,1 +np.float64,0x3fd4fc5f06a9f8c0,0x3ff3c99359854b97,1 +np.float64,0x3fb29f5d6e253ec0,0x3ff7f7c20e396b20,1 +np.float64,0xbfd757c82aaeaf90,0x3fff1b34c6141e98,1 +np.float64,0x3fc56fd4cf2adfa8,0x3ff670c145122909,1 +np.float64,0x3fc609a2f52c1348,0x3ff65d3ef3cade2c,1 +np.float64,0xbfe1de631163bcc6,0x40014e5528fadb73,1 +np.float64,0xbfe7eb4a726fd695,0x40035202f49d95c4,1 +np.float64,0xbfc9223771324470,0x3ffc4b84d5e263b9,1 +np.float64,0x3fee91a8a87d2352,0x3fd3364befde8de6,1 +np.float64,0x3fbc9784fe392f10,0x3ff7578e29f6a1b2,1 +np.float64,0xbfec627c2c78c4f8,0x40054b0ff2cb9c55,1 +np.float64,0xbfb8b406a6316810,0x3ffaadd97062fb8c,1 +np.float64,0xbfecf98384f9f307,0x4005a043d9110d79,1 +np.float64,0xbfe5834bab6b0698,0x400276f114aebee4,1 +np.float64,0xbfd90f391eb21e72,0x3fff91e26a8f48f3,1 +np.float64,0xbfee288ce2fc511a,0x400667cb09aa04b3,1 +np.float64,0x3fd5aa5e32ab54bc,0x3ff39b7080a52214,1 +np.float64,0xbfee7ef907fcfdf2,0x4006ab96a8eba4c5,1 +np.float64,0x3fd6097973ac12f4,0x3ff3822486978bd1,1 +np.float64,0xbfe02d14b8e05a2a,0x4000ce5be53047b1,1 +np.float64,0xbf9c629a6838c540,0x3ff993897728c3f9,1 +np.float64,0xbfee2024667c4049,0x40066188782fb1f0,1 +np.float64,0xbfa42a88fc285510,0x3ff9c35a4bbce104,1 +np.float64,0x3fa407af5c280f60,0x3ff881b360d8eea1,1 +np.float64,0x3fed0ba42cfa1748,0x3fdbb7d55609175f,1 +np.float64,0xbfdd0b5844ba16b0,0x400055b0bb59ebb2,1 +np.float64,0x3fd88d97e6b11b30,0x3ff2d53c1ecb8f8c,1 +np.float64,0xbfeb7a915ef6f523,0x4004d410812eb84c,1 +np.float64,0xbfb5f979ca2bf2f0,0x3ffa8201d73cd4ca,1 +np.float64,0x3fb3b65dd6276cc0,0x3ff7e64576199505,1 +np.float64,0x3fcd47a7793a8f50,0x3ff570a7b672f160,1 +np.float64,0xbfa41dd30c283ba0,0x3ff9c2f488127eb3,1 +np.float64,0x3fe4b1ea1f6963d4,0x3febc2bed7760427,1 +np.float64,0xbfdd0f81d2ba1f04,0x400056463724b768,1 +np.float64,0x3fd15d93f7a2bb28,0x3ff4bc7a24eacfd7,1 +np.float64,0xbfe3213af8e64276,0x4001b14579dfded3,1 +np.float64,0x3fd90dfbeab21bf8,0x3ff2b26a6c2c3bb3,1 +np.float64,0xbfd02d54bca05aaa,0x3ffd38ab3886b203,1 +np.float64,0x3fc218dcad2431b8,0x3ff6dced56d5b417,1 +np.float64,0x3fea5edf71f4bdbe,0x3fe3455ee09f27e6,1 +np.float64,0x3fa74319042e8640,0x3ff867d224545438,1 +np.float64,0x3fd970ad92b2e15c,0x3ff2979084815dc1,1 +np.float64,0x3fce0a4bf73c1498,0x3ff557a4df32df3e,1 +np.float64,0x3fef5c8e10feb91c,0x3fc99ca0eeaaebe4,1 +np.float64,0xbfedae997ffb5d33,0x400611af18f407ab,1 +np.float64,0xbfbcf07d6239e0f8,0x3ffaf201177a2d36,1 +np.float64,0xbfc3c52541278a4c,0x3ffb9d2af0408e4a,1 +np.float64,0x3fe4ef44e4e9de8a,0x3feb71f7331255e5,1 +np.float64,0xbfccd9f5f539b3ec,0x3ffcc53a99339592,1 +np.float64,0xbfda32c745b4658e,0x3fffe16e8727ef89,1 +np.float64,0xbfef54932a7ea926,0x40077e4605e61ca1,1 +np.float64,0x3fe9d4ae3573a95c,0x3fe4344a069a3fd0,1 +np.float64,0x3fda567e73b4acfc,0x3ff258bd77a663c7,1 +np.float64,0xbfd5bcac5eab7958,0x3ffead6379c19c52,1 +np.float64,0xbfee5e56f97cbcae,0x40069131fc54018d,1 +np.float64,0x3fc2d4413925a880,0x3ff6c54163816298,1 +np.float64,0xbfe9ddf6e873bbee,0x400418d8c722f7c5,1 +np.float64,0x3fdaf2a683b5e54c,0x3ff22dcda599d69c,1 +np.float64,0xbfca69789f34d2f0,0x3ffc7547ff10b1a6,1 +np.float64,0x3fed076f62fa0ede,0x3fdbcbda03c1d72a,1 +np.float64,0xbfcb38326f367064,0x3ffc8fb55dadeae5,1 +np.float64,0x3fe1938705e3270e,0x3fefa88130c5adda,1 +np.float64,0x3feaffae3b75ff5c,0x3fe221e3da537c7e,1 +np.float64,0x3fefc94acb7f9296,0x3fbd9a360ace67b4,1 +np.float64,0xbfe8bddeb0f17bbe,0x4003a316685c767e,1 +np.float64,0x3fbe10fbee3c21f0,0x3ff73fceb10650f5,1 +np.float64,0x3fde9126c1bd224c,0x3ff12a742f734d0a,1 +np.float64,0xbfe9686c91f2d0d9,0x4003e7bc6ee77906,1 +np.float64,0xbfb1ba4892237490,0x3ffa3dda064c2509,1 +np.float64,0xbfe2879100e50f22,0x400181c1a5b16f0f,1 +np.float64,0x3fd1cd40b6a39a80,0x3ff49f70e3064e95,1 +np.float64,0xbfc965869132cb0c,0x3ffc5419f3b43701,1 +np.float64,0x3fea7a6f2874f4de,0x3fe31480fb2dd862,1 +np.float64,0x3fc3bc56892778b0,0x3ff6a7e8fa0e8b0e,1 +np.float64,0x3fec1ed451f83da8,0x3fdfd78e564b8ad7,1 +np.float64,0x3feb77d16df6efa2,0x3fe13d083344e45e,1 +np.float64,0xbfe822e7c67045d0,0x400367104a830cf6,1 +np.float64,0x8000000000000001,0x3ff921fb54442d18,1 +np.float64,0xbfd4900918a92012,0x3ffe5dc0e19737b4,1 +np.float64,0x3fed184187fa3084,0x3fdb7b7a39f234f4,1 +np.float64,0x3fecef846179df08,0x3fdc3cb2228c3682,1 +np.float64,0xbfe2d2aed165a55e,0x400198e21c5b861b,1 +np.float64,0x7ff0000000000000,0x7ff8000000000000,1 +np.float64,0xbfee9409a07d2813,0x4006bd358232d073,1 +np.float64,0xbfecedc2baf9db86,0x4005995df566fc21,1 +np.float64,0x3fe6d857396db0ae,0x3fe8d2cb8794aa99,1 +np.float64,0xbf9a579e7834af40,0x3ff98b5cc8021e1c,1 +np.float64,0x3fc664fefb2cca00,0x3ff651a664ccf8fa,1 +np.float64,0xbfe8a7aa0e714f54,0x40039a5b4df938a0,1 +np.float64,0xbfdf27d380be4fa8,0x4000a241074dbae6,1 +np.float64,0x3fe00ddf55e01bbe,0x3ff0b94eb1ea1851,1 +np.float64,0x3feb47edbff68fdc,0x3fe199822d075959,1 +np.float64,0x3fb4993822293270,0x3ff7d80c838186d0,1 +np.float64,0xbfca2cd1473459a4,0x3ffc6d88c8de3d0d,1 +np.float64,0xbfea7d9c7674fb39,0x40045e4559e9e52d,1 +np.float64,0x3fe0dce425e1b9c8,0x3ff04099cab23289,1 +np.float64,0x3fd6bb7e97ad76fc,0x3ff352a30434499c,1 +np.float64,0x3fd4a4f16da949e4,0x3ff3e0b07432c9aa,1 +np.float64,0x8000000000000000,0x3ff921fb54442d18,1 +np.float64,0x3fe688f5b56d11ec,0x3fe9435f63264375,1 +np.float64,0xbfdf5a427ebeb484,0x4000a97a6c5d4abc,1 +np.float64,0xbfd1f3483fa3e690,0x3ffdae6c8a299383,1 +np.float64,0xbfeac920db759242,0x4004805862be51ec,1 +np.float64,0x3fef5bc711feb78e,0x3fc9ac40fba5b93b,1 +np.float64,0x3fe4bd9e12e97b3c,0x3febb363c787d381,1 +np.float64,0x3fef6a59ab7ed4b4,0x3fc880f1324eafce,1 +np.float64,0x3fc07a362120f470,0x3ff7113cf2c672b3,1 +np.float64,0xbfe4d6dbe2e9adb8,0x40023d6f6bea44b7,1 +np.float64,0xbfec2d6a15785ad4,0x40052eb425cc37a2,1 +np.float64,0x3fc90dae05321b60,0x3ff5fb10015d2934,1 +np.float64,0xbfa9239f74324740,0x3ff9eb2d057068ea,1 +np.float64,0xbfeb4fc8baf69f92,0x4004bf5e17fb08a4,1 +np.float64,0x0,0x3ff921fb54442d18,1 +np.float64,0x3faaf1884c35e320,0x3ff84a5591dbe1f3,1 +np.float64,0xbfed842561fb084b,0x4005f5c0a19116ce,1 +np.float64,0xbfc64850c32c90a0,0x3ffbeeac2ee70f9a,1 +np.float64,0x3fd7d879f5afb0f4,0x3ff306254c453436,1 +np.float64,0xbfdabaa586b5754c,0x4000035e6ac83a2b,1 +np.float64,0xbfebfeefa977fddf,0x4005167446fb9faf,1 +np.float64,0xbfe9383462727069,0x4003d407aa6a1577,1 +np.float64,0x3fe108dfb6e211c0,0x3ff026ac924b281d,1 +np.float64,0xbf85096df02a12c0,0x3ff94c0e60a22ede,1 +np.float64,0xbfe3121cd566243a,0x4001ac8f90db5882,1 +np.float64,0xbfd227f62aa44fec,0x3ffdbc26bb175dcc,1 +np.float64,0x3fd931af2cb26360,0x3ff2a8b62dfe003c,1 +np.float64,0xbfd9b794e3b36f2a,0x3fffbfbc89ec013d,1 +np.float64,0x3fc89b2e6f313660,0x3ff609a6e67f15f2,1 +np.float64,0x3fc0b14a8f216298,0x3ff70a4b6905aad2,1 +np.float64,0xbfeda11a657b4235,0x400608b3f9fff574,1 +np.float64,0xbfed2ee9ec7a5dd4,0x4005c040b7c02390,1 +np.float64,0xbfef7819d8fef034,0x4007ac6bf75cf09d,1 +np.float64,0xbfcc4720fb388e40,0x3ffcb2666a00b336,1 +np.float64,0xbfe05dec4be0bbd8,0x4000dc8a25ca3760,1 +np.float64,0x3fb093416e212680,0x3ff81897b6d8b374,1 +np.float64,0xbfc6ab89332d5714,0x3ffbfb4559d143e7,1 +np.float64,0x3fc51948512a3290,0x3ff67bb9df662c0a,1 +np.float64,0x3fed4d94177a9b28,0x3fda76c92f0c0132,1 +np.float64,0x3fdd195fbeba32c0,0x3ff194a5586dd18e,1 +np.float64,0x3fe3f82799e7f050,0x3fecb354c2faf55c,1 +np.float64,0x3fecac2169f95842,0x3fdd7222296cb7a7,1 +np.float64,0x3fe3d3f36fe7a7e6,0x3fece18f45e30dd7,1 +np.float64,0x3fe31ff63d663fec,0x3fedc46c77d30c6a,1 +np.float64,0xbfe3120c83e62419,0x4001ac8a7c4aa742,1 +np.float64,0x3fe7c1a7976f8350,0x3fe77e4a9307c9f8,1 +np.float64,0x3fe226fe9de44dfe,0x3feef6c0f3cb00fa,1 +np.float64,0x3fd5c933baab9268,0x3ff3933e8a37de42,1 +np.float64,0x3feaa98496f5530a,0x3fe2c003832ebf21,1 +np.float64,0xbfc6f80a2f2df014,0x3ffc04fd54cb1317,1 +np.float64,0x3fde5e18d0bcbc30,0x3ff138f7b32a2ca3,1 +np.float64,0xbfe30c8dd566191c,0x4001aad4af935a78,1 +np.float64,0x3fbe8d196e3d1a30,0x3ff737fec8149ecc,1 +np.float64,0x3feaee6731f5dcce,0x3fe241fa42cce22d,1 +np.float64,0x3fef9cc46cff3988,0x3fc3f17b708dbdbb,1 +np.float64,0xbfdb181bdeb63038,0x4000103ecf405602,1 +np.float64,0xbfc58de0ed2b1bc0,0x3ffbd704c14e15cd,1 +np.float64,0xbfee05d5507c0bab,0x40064e480faba6d8,1 +np.float64,0x3fe27d0ffa64fa20,0x3fee8dc71ef79f2c,1 +np.float64,0xbfe4f7ad4c69ef5a,0x400248456cd09a07,1 +np.float64,0xbfe4843e91e9087d,0x4002225f3e139c84,1 +np.float64,0x3fe7158b9c6e2b18,0x3fe87ae845c5ba96,1 +np.float64,0xbfea64316074c863,0x400452fd2bc23a44,1 +np.float64,0xbfc9f3ae4133e75c,0x3ffc663d482afa42,1 +np.float64,0xbfd5e18513abc30a,0x3ffeb72fc76d7071,1 +np.float64,0xbfd52f6438aa5ec8,0x3ffe87e5b18041e5,1 +np.float64,0xbfea970650f52e0d,0x400469a4a6758154,1 +np.float64,0xbfe44321b7e88644,0x40020d404a2141b1,1 +np.float64,0x3fdf5a39bbbeb474,0x3ff0f10453059dbd,1 +np.float64,0xbfa1d4069423a810,0x3ff9b0a2eacd2ce2,1 +np.float64,0xbfc36d16a326da2c,0x3ffb92077d41d26a,1 +np.float64,0x1,0x3ff921fb54442d18,1 +np.float64,0x3feb232a79764654,0x3fe1df5beeb249d0,1 +np.float64,0xbfed2003d5fa4008,0x4005b737c2727583,1 +np.float64,0x3fd5b093a3ab6128,0x3ff399ca2db1d96d,1 +np.float64,0x3fca692c3d34d258,0x3ff5ceb86b79223e,1 +np.float64,0x3fd6bbdf89ad77c0,0x3ff3528916df652d,1 +np.float64,0xbfefdadd46ffb5bb,0x40085ee735e19f19,1 +np.float64,0x3feb69fb2676d3f6,0x3fe157ee0c15691e,1 +np.float64,0x3fe44c931f689926,0x3fec46b6f5e3f265,1 +np.float64,0xbfc43ddbcb287bb8,0x3ffbac71d268d74d,1 +np.float64,0x3fe6e16d43edc2da,0x3fe8c5cf0f0daa66,1 +np.float64,0x3fe489efc76913e0,0x3febf704ca1ac2a6,1 +np.float64,0xbfe590aadceb2156,0x40027b764205cf78,1 +np.float64,0xbf782e8aa0305d00,0x3ff93a29e81928ab,1 +np.float64,0x3fedcb80cffb9702,0x3fd7e5d1f98a418b,1 +np.float64,0x3fe075858060eb0c,0x3ff07d23ab46b60f,1 +np.float64,0x3fe62a68296c54d0,0x3fe9c77f7068043b,1 +np.float64,0x3feff16a3c7fe2d4,0x3fae8e8a739cc67a,1 +np.float64,0xbfd6ed93e3addb28,0x3ffefebab206fa99,1 +np.float64,0x3fe40d8ccf681b1a,0x3fec97e9cd29966d,1 +np.float64,0x3fd6408210ac8104,0x3ff3737a7d374107,1 +np.float64,0x3fec8023b8f90048,0x3fde35ebfb2b3afd,1 +np.float64,0xbfe13babd4627758,0x40011dae5c07c56b,1 +np.float64,0xbfd2183e61a4307c,0x3ffdb80dd747cfbe,1 +np.float64,0x3feae8eb1d75d1d6,0x3fe24c1f6e42ae77,1 +np.float64,0xbfea559b9c74ab37,0x40044c8e5e123b20,1 +np.float64,0xbfd12c9d57a2593a,0x3ffd7ac6222f561c,1 +np.float64,0x3fe32eb697e65d6e,0x3fedb202693875b6,1 +np.float64,0xbfde0808c3bc1012,0x4000794bd8616ea3,1 +np.float64,0x3fe14958a06292b2,0x3ff0007b40ac648a,1 +np.float64,0x3fe3d388a6e7a712,0x3fece21751a6dd7c,1 +np.float64,0x3fe7ad7897ef5af2,0x3fe79c5b3da302a7,1 +np.float64,0x3fec75527e78eaa4,0x3fde655de0cf0508,1 +np.float64,0x3fea920d4c75241a,0x3fe2ea48f031d908,1 +np.float64,0x7fefffffffffffff,0x7ff8000000000000,1 +np.float64,0xbfc17a68cb22f4d0,0x3ffb530925f41aa0,1 +np.float64,0xbfe1c93166e39263,0x400147f3cb435dec,1 +np.float64,0x3feb97c402f72f88,0x3fe0fe5b561bf869,1 +np.float64,0x3fb58ff5162b1ff0,0x3ff7c8933fa969dc,1 +np.float64,0x3fe68e2beded1c58,0x3fe93c075283703b,1 +np.float64,0xbf94564cc828aca0,0x3ff97355e5ee35db,1 +np.float64,0x3fd31061c9a620c4,0x3ff44b150ec96998,1 +np.float64,0xbfc7d0c89f2fa190,0x3ffc208bf4eddc4d,1 +np.float64,0x3fe5736f1d6ae6de,0x3feac18f84992d1e,1 +np.float64,0x3fdb62e480b6c5c8,0x3ff20ecfdc4afe7c,1 +np.float64,0xbfc417228b282e44,0x3ffba78afea35979,1 +np.float64,0x3f8f5ba1303eb780,0x3ff8e343714630ff,1 +np.float64,0x3fe8e99126f1d322,0x3fe5b6511d4c0798,1 +np.float64,0xbfe2ec08a1e5d812,0x4001a0bb28a85875,1 +np.float64,0x3fea3b46cf74768e,0x3fe383dceaa74296,1 +np.float64,0xbfe008b5ed60116c,0x4000c3d62c275d40,1 +np.float64,0xbfcd9f8a4b3b3f14,0x3ffcde98d6484202,1 +np.float64,0xbfdb5fb112b6bf62,0x40001a22137ef1c9,1 +np.float64,0xbfe9079565f20f2b,0x4003c0670c92e401,1 +np.float64,0xbfce250dc53c4a1c,0x3ffcefc2b3dc3332,1 +np.float64,0x3fe9ba85d373750c,0x3fe4607131b28773,1 +np.float64,0x10000000000000,0x3ff921fb54442d18,1 +np.float64,0xbfeb9ef42c773de8,0x4004e5f239203ad8,1 +np.float64,0xbfd6bf457dad7e8a,0x3ffef2563d87b18d,1 +np.float64,0x3fe4de9aa5e9bd36,0x3feb87f97defb04a,1 +np.float64,0x3fedb4f67cfb69ec,0x3fd8603c465bffac,1 +np.float64,0x3fe7b6d9506f6db2,0x3fe78e670c7bdb67,1 +np.float64,0x3fe071717460e2e2,0x3ff07f84472d9cc5,1 +np.float64,0xbfed2e79dbfa5cf4,0x4005bffc6f9ad24f,1 +np.float64,0x3febb8adc377715c,0x3fe0bcebfbd45900,1 +np.float64,0xbfee2cffd87c5a00,0x40066b20a037c478,1 +np.float64,0x3fef7e358d7efc6c,0x3fc6d0ba71a542a8,1 +np.float64,0xbfef027eef7e04fe,0x400723291cb00a7a,1 +np.float64,0x3fac96da34392dc0,0x3ff83d260a936c6a,1 +np.float64,0x3fe9dba94a73b752,0x3fe428736b94885e,1 +np.float64,0x3fed37581efa6eb0,0x3fdae49dcadf1d90,1 +np.float64,0xbfe6e61037edcc20,0x4002f23031b8d522,1 +np.float64,0xbfdea7204dbd4e40,0x40008fe1f37918b7,1 +np.float64,0x3feb9f8edb773f1e,0x3fe0eef20bd4387b,1 +np.float64,0x3feeb0b6ed7d616e,0x3fd25fb3b7a525d6,1 +np.float64,0xbfd7ce9061af9d20,0x3fff3b25d531aa2b,1 +np.float64,0xbfc806b509300d6c,0x3ffc2768743a8360,1 +np.float64,0xbfa283882c250710,0x3ff9b61fda28914a,1 +np.float64,0x3fdec70050bd8e00,0x3ff11b1d769b578f,1 +np.float64,0xbfc858a44930b148,0x3ffc31d6758b4721,1 +np.float64,0x3fdc321150b86424,0x3ff1d5504c3c91e4,1 +np.float64,0x3fd9416870b282d0,0x3ff2a46f3a850f5b,1 +np.float64,0x3fdd756968baead4,0x3ff17ac510a5573f,1 +np.float64,0xbfedfd632cfbfac6,0x400648345a2f89b0,1 +np.float64,0x3fd6874285ad0e84,0x3ff36098ebff763f,1 +np.float64,0x3fe6daacc9edb55a,0x3fe8cf75fae1e35f,1 +np.float64,0x3fe53f19766a7e32,0x3feb07d0e97cd55b,1 +np.float64,0x3fd13cc36ca27988,0x3ff4c4ff801b1faa,1 +np.float64,0x3fe4f21cbce9e43a,0x3feb6e34a72ef529,1 +np.float64,0xbfc21c1cc9243838,0x3ffb67726394ca89,1 +np.float64,0x3fe947a3f2728f48,0x3fe51eae4660e23c,1 +np.float64,0xbfce78cd653cf19c,0x3ffcfa89194b3f5e,1 +np.float64,0x3fe756f049eeade0,0x3fe81be7f2d399e2,1 +np.float64,0xbfcc727cf138e4f8,0x3ffcb7f547841bb0,1 +np.float64,0xbfc2d8d58f25b1ac,0x3ffb7f496cc72458,1 +np.float64,0xbfcfd0e4653fa1c8,0x3ffd26e1309bc80b,1 +np.float64,0xbfe2126c106424d8,0x40015e0e01db6a4a,1 +np.float64,0x3fe580e4306b01c8,0x3feaaf683ce51aa5,1 +np.float64,0x3fcea8a1b93d5140,0x3ff543456c0d28c7,1 +np.float64,0xfff0000000000000,0x7ff8000000000000,1 +np.float64,0xbfd9d5da72b3abb4,0x3fffc8013113f968,1 +np.float64,0xbfe1fdfcea63fbfa,0x400157def2e4808d,1 +np.float64,0xbfc0022e0720045c,0x3ffb239963e7cbf2,1 diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/data/umath-validation-set-arccosh.csv b/.venv/lib/python3.12/site-packages/numpy/_core/tests/data/umath-validation-set-arccosh.csv new file mode 100644 index 0000000..1b3eda4 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/tests/data/umath-validation-set-arccosh.csv @@ -0,0 +1,1429 @@ +dtype,input,output,ulperrortol +np.float32,0x3f83203f,0x3e61d9d6,2 +np.float32,0x3f98dea1,0x3f1d1af6,2 +np.float32,0x7fa00000,0x7fe00000,2 +np.float32,0x7eba99af,0x42b0d032,2 +np.float32,0x3fc95a13,0x3f833650,2 +np.float32,0x3fce9a45,0x3f8771e1,2 +np.float32,0x3fc1bd96,0x3f797811,2 +np.float32,0x7eba2391,0x42b0ceed,2 +np.float32,0x7d4e8f15,0x42acdb8c,2 +np.float32,0x3feca42e,0x3f9cc88e,2 +np.float32,0x7e2b314e,0x42af412e,2 +np.float32,0x7f7fffff,0x42b2d4fc,2 +np.float32,0x3f803687,0x3d6c4380,2 +np.float32,0x3fa0edbd,0x3f33e706,2 +np.float32,0x3faa8074,0x3f4b3d3c,2 +np.float32,0x3fa0c49e,0x3f337af3,2 +np.float32,0x3f8c9ec4,0x3ee18812,2 +np.float32,0x7efef78e,0x42b17006,2 +np.float32,0x3fc75720,0x3f818aa4,2 +np.float32,0x7f52d4c8,0x42b27198,2 +np.float32,0x3f88f21e,0x3ebe52b0,2 +np.float32,0x3ff7a042,0x3fa3a07a,2 +np.float32,0x7f52115c,0x42b26fbd,2 +np.float32,0x3fc6bf6f,0x3f810b42,2 +np.float32,0x3fd105d0,0x3f895649,2 +np.float32,0x3fee7c2a,0x3f9df66e,2 +np.float32,0x7f0ff9a5,0x42b1ae4f,2 +np.float32,0x7e81f075,0x42b016e7,2 +np.float32,0x3fa57d65,0x3f3f70c6,2 +np.float32,0x80800000,0xffc00000,2 +np.float32,0x7da239f5,0x42adc2bf,2 +np.float32,0x3f9e432c,0x3f2cbd80,2 +np.float32,0x3ff2839b,0x3fa07ee4,2 +np.float32,0x3fec8aef,0x3f9cb850,2 +np.float32,0x7d325893,0x42ac905b,2 +np.float32,0x3fa27431,0x3f37dade,2 +np.float32,0x3fce7408,0x3f8753ae,2 +np.float32,0x3fde6684,0x3f93353f,2 +np.float32,0x3feb9a3e,0x3f9c1cff,2 +np.float32,0x7deb34bb,0x42ae80f0,2 +np.float32,0x3fed9300,0x3f9d61b7,2 +np.float32,0x7f35e253,0x42b225fb,2 +np.float32,0x7e6db57f,0x42afe93f,2 +np.float32,0x3fa41f08,0x3f3c10bc,2 +np.float32,0x3fb0d4da,0x3f590de3,2 +np.float32,0x3fb5c690,0x3f632351,2 +np.float32,0x3fcde9ce,0x3f86e638,2 +np.float32,0x3f809c7b,0x3dc81161,2 +np.float32,0x3fd77291,0x3f8e3226,2 +np.float32,0x3fc21a06,0x3f7a1a82,2 +np.float32,0x3fba177e,0x3f6b8139,2 +np.float32,0x7f370dff,0x42b22944,2 +np.float32,0x3fe5bfcc,0x3f9841c1,2 +np.float32,0x3feb0caa,0x3f9bc139,2 +np.float32,0x7f4fe5c3,0x42b26a6c,2 +np.float32,0x7f1e1419,0x42b1de28,2 +np.float32,0x7f5e3c96,0x42b28c92,2 +np.float32,0x3f8cd313,0x3ee3521e,2 +np.float32,0x3fa97824,0x3f48e049,2 +np.float32,0x7d8ca281,0x42ad799e,2 +np.float32,0x3f96b51b,0x3f165193,2 +np.float32,0x3f81328a,0x3e0bf504,2 +np.float32,0x3ff60bf3,0x3fa2ab45,2 +np.float32,0x3ff9b629,0x3fa4e107,2 +np.float32,0x3fecacfc,0x3f9cce37,2 +np.float32,0x3fba8804,0x3f6c5600,2 +np.float32,0x3f81f752,0x3e333fdd,2 +np.float32,0x3fb5b262,0x3f62fb46,2 +np.float32,0x3fa21bc0,0x3f36f7e6,2 +np.float32,0x3fbc87bb,0x3f7011dc,2 +np.float32,0x3fe18b32,0x3f9565ae,2 +np.float32,0x7dfb6dd5,0x42aea316,2 +np.float32,0x3fb7c602,0x3f670ee3,2 +np.float32,0x7efeb6a2,0x42b16f84,2 +np.float32,0x3fa56180,0x3f3f2ca4,2 +np.float32,0x3f8dcaff,0x3eeb9ac0,2 +np.float32,0x7e876238,0x42b02beb,2 +np.float32,0x7f0bb67d,0x42b19eec,2 +np.float32,0x3faca01c,0x3f4fffa5,2 +np.float32,0x3fdb57ee,0x3f9108b8,2 +np.float32,0x3fe3bade,0x3f96e4b7,2 +np.float32,0x7f7aa2dd,0x42b2ca25,2 +np.float32,0x3fed92ec,0x3f9d61aa,2 +np.float32,0x7eb789b1,0x42b0c7b9,2 +np.float32,0x7f7f16e4,0x42b2d329,2 +np.float32,0x3fb6647e,0x3f645b84,2 +np.float32,0x3f99335e,0x3f1e1d96,2 +np.float32,0x7e690a11,0x42afdf17,2 +np.float32,0x7dff2f95,0x42aeaaae,2 +np.float32,0x7f70adfd,0x42b2b564,2 +np.float32,0x3fe92252,0x3f9a80fe,2 +np.float32,0x3fef54ce,0x3f9e7fe5,2 +np.float32,0x3ff24eaa,0x3fa05df9,2 +np.float32,0x7f04565a,0x42b18328,2 +np.float32,0x3fcb8b80,0x3f85007f,2 +np.float32,0x3fcd4d0a,0x3f866983,2 +np.float32,0x3fbe7d82,0x3f73a911,2 +np.float32,0x3f8a7a8a,0x3ecdc8f6,2 +np.float32,0x3f912441,0x3f030d56,2 +np.float32,0x3f9b29d6,0x3f23f663,2 +np.float32,0x3fab7f36,0x3f4d7c6c,2 +np.float32,0x7dfedafc,0x42aeaa04,2 +np.float32,0x3fe190c0,0x3f956982,2 +np.float32,0x3f927515,0x3f07e0bb,2 +np.float32,0x3ff6442a,0x3fa2cd7e,2 +np.float32,0x7f6656d0,0x42b29ee8,2 +np.float32,0x3fe29aa0,0x3f96201f,2 +np.float32,0x3fa4a247,0x3f3d5687,2 +np.float32,0x3fa1cf19,0x3f363226,2 +np.float32,0x3fc20037,0x3f79ed36,2 +np.float32,0x7cc1241a,0x42ab5645,2 +np.float32,0x3fafd540,0x3f56f25a,2 +np.float32,0x7e5b3f5f,0x42afbfdb,2 +np.float32,0x7f48de5f,0x42b258d0,2 +np.float32,0x3fce1ca0,0x3f870e85,2 +np.float32,0x7ee40bb2,0x42b136e4,2 +np.float32,0x7ecdb133,0x42b10212,2 +np.float32,0x3f9f181c,0x3f2f02ca,2 +np.float32,0x3f936cbf,0x3f0b4f63,2 +np.float32,0x3fa4f8ea,0x3f3e2c2f,2 +np.float32,0x3fcc03e2,0x3f8561ac,2 +np.float32,0x3fb801f2,0x3f67831b,2 +np.float32,0x7e141dad,0x42aef70c,2 +np.float32,0x3fe8c04e,0x3f9a4087,2 +np.float32,0x3f8548d5,0x3e929f37,2 +np.float32,0x7f148d7d,0x42b1be56,2 +np.float32,0x3fd2c9a2,0x3f8ab1ed,2 +np.float32,0x7eb374fd,0x42b0bc36,2 +np.float32,0x7f296d36,0x42b201a7,2 +np.float32,0x3ff138e2,0x3f9fb09d,2 +np.float32,0x3ff42898,0x3fa18347,2 +np.float32,0x7da8c5e1,0x42add700,2 +np.float32,0x7dcf72c4,0x42ae40a4,2 +np.float32,0x7ea571fc,0x42b09296,2 +np.float32,0x3fc0953d,0x3f776ba3,2 +np.float32,0x7f1773dd,0x42b1c83c,2 +np.float32,0x7ef53b68,0x42b15c17,2 +np.float32,0x3f85d69f,0x3e9a0f3a,2 +np.float32,0x7e8b9a05,0x42b03ba0,2 +np.float32,0x3ff07d20,0x3f9f3ad2,2 +np.float32,0x7e8da32c,0x42b0430a,2 +np.float32,0x7ef96004,0x42b164ab,2 +np.float32,0x3fdfaa62,0x3f941837,2 +np.float32,0x7f0057c5,0x42b17377,2 +np.float32,0x3fb2663f,0x3f5c5065,2 +np.float32,0x3fd3d8c3,0x3f8b8055,2 +np.float32,0x1,0xffc00000,2 +np.float32,0x3fd536c1,0x3f8c8862,2 +np.float32,0x3f91b953,0x3f053619,2 +np.float32,0x3fb3305c,0x3f5deee1,2 +np.float32,0x7ecd86b9,0x42b101a8,2 +np.float32,0x3fbf71c5,0x3f75624d,2 +np.float32,0x3ff5f0f4,0x3fa29ad2,2 +np.float32,0x3fe50389,0x3f97c328,2 +np.float32,0x3fa325a1,0x3f399e69,2 +np.float32,0x3fe4397a,0x3f973a9f,2 +np.float32,0x3f8684c6,0x3ea2b784,2 +np.float32,0x7f25ae00,0x42b1f634,2 +np.float32,0x3ff7cbf7,0x3fa3badb,2 +np.float32,0x7f73f0e0,0x42b2bc48,2 +np.float32,0x3fc88b70,0x3f828b92,2 +np.float32,0x3fb01c16,0x3f578886,2 +np.float32,0x7e557623,0x42afb229,2 +np.float32,0x3fcbcd5b,0x3f8535b4,2 +np.float32,0x7f7157e4,0x42b2b6cd,2 +np.float32,0x7f51d9d4,0x42b26f36,2 +np.float32,0x7f331a3b,0x42b21e17,2 +np.float32,0x7f777fb5,0x42b2c3b2,2 +np.float32,0x3f832001,0x3e61d11f,2 +np.float32,0x7f2cd055,0x42b20bca,2 +np.float32,0x3f89831f,0x3ec42f76,2 +np.float32,0x7f21da33,0x42b1ea3d,2 +np.float32,0x3f99e416,0x3f20330a,2 +np.float32,0x7f2c8ea1,0x42b20b07,2 +np.float32,0x7f462c98,0x42b251e6,2 +np.float32,0x7f4fdb3f,0x42b26a52,2 +np.float32,0x3fcc1338,0x3f856e07,2 +np.float32,0x3f823673,0x3e3e20da,2 +np.float32,0x7dbfe89d,0x42ae18c6,2 +np.float32,0x3fc9b04c,0x3f837d38,2 +np.float32,0x7dba3213,0x42ae094d,2 +np.float32,0x7ec5a483,0x42b0eda1,2 +np.float32,0x3fbc4d14,0x3f6fa543,2 +np.float32,0x3fc85ce2,0x3f8264f1,2 +np.float32,0x7f77c816,0x42b2c447,2 +np.float32,0x3f9c9281,0x3f280492,2 +np.float32,0x7f49b3e2,0x42b25aef,2 +np.float32,0x3fa7e4da,0x3f45347c,2 +np.float32,0x7e0c9df5,0x42aedc72,2 +np.float32,0x7f21fd1a,0x42b1eaab,2 +np.float32,0x7f7c63ad,0x42b2cdb6,2 +np.float32,0x7f4eb80a,0x42b26783,2 +np.float32,0x7e98038c,0x42b0673c,2 +np.float32,0x7e89ba08,0x42b034b4,2 +np.float32,0x3ffc06ba,0x3fa64094,2 +np.float32,0x3fae63f6,0x3f53db36,2 +np.float32,0x3fbc2d30,0x3f6f6a1c,2 +np.float32,0x7de0e5e5,0x42ae69fe,2 +np.float32,0x7e09ed18,0x42aed28d,2 +np.float32,0x3fea78f8,0x3f9b6129,2 +np.float32,0x7dfe0bcc,0x42aea863,2 +np.float32,0x7ee21d03,0x42b13289,2 +np.float32,0x3fcc3aed,0x3f858dfc,2 +np.float32,0x3fe6b3ba,0x3f98e4ea,2 +np.float32,0x3f90f25f,0x3f025225,2 +np.float32,0x7f1bcaf4,0x42b1d6b3,2 +np.float32,0x3f83ac81,0x3e74c20e,2 +np.float32,0x3f98681d,0x3f1bae16,2 +np.float32,0x3fe1f2d9,0x3f95ad08,2 +np.float32,0x3fa279d7,0x3f37e951,2 +np.float32,0x3feb922a,0x3f9c17c4,2 +np.float32,0x7f1c72e8,0x42b1d8da,2 +np.float32,0x3fea156b,0x3f9b2038,2 +np.float32,0x3fed6bda,0x3f9d48aa,2 +np.float32,0x3fa86142,0x3f46589c,2 +np.float32,0x3ff16bc2,0x3f9fd072,2 +np.float32,0x3fbebf65,0x3f74207b,2 +np.float32,0x7e7b78b5,0x42b00610,2 +np.float32,0x3ff51ab8,0x3fa217f0,2 +np.float32,0x3f8361bb,0x3e6adf07,2 +np.float32,0x7edbceed,0x42b1240e,2 +np.float32,0x7f10e2c0,0x42b1b18a,2 +np.float32,0x3fa7bc58,0x3f44d4ef,2 +np.float32,0x3f813bde,0x3e0e1138,2 +np.float32,0x7f30d5b9,0x42b21791,2 +np.float32,0x3fb4f450,0x3f61806a,2 +np.float32,0x7eee02c4,0x42b14cca,2 +np.float32,0x7ec74b62,0x42b0f1e4,2 +np.float32,0x3ff96bca,0x3fa4b498,2 +np.float32,0x7f50e304,0x42b26cda,2 +np.float32,0x7eb14c57,0x42b0b603,2 +np.float32,0x7c3f0733,0x42a9edbf,2 +np.float32,0x7ea57acb,0x42b092b1,2 +np.float32,0x7f2788dc,0x42b1fbe7,2 +np.float32,0x3fa39f14,0x3f3ad09b,2 +np.float32,0x3fc3a7e0,0x3f7ccfa0,2 +np.float32,0x3fe70a73,0x3f991eb0,2 +np.float32,0x7f4831f7,0x42b25718,2 +np.float32,0x3fe947d0,0x3f9a999c,2 +np.float32,0x7ef2b1c7,0x42b156c4,2 +np.float32,0x3fede0ea,0x3f9d937f,2 +np.float32,0x3f9fef8e,0x3f314637,2 +np.float32,0x3fc313c5,0x3f7bcebd,2 +np.float32,0x7ee99337,0x42b14328,2 +np.float32,0x7eb9042e,0x42b0cbd5,2 +np.float32,0x3fc9d3dc,0x3f839a69,2 +np.float32,0x3fb2c018,0x3f5d091d,2 +np.float32,0x3fcc4e8f,0x3f859dc5,2 +np.float32,0x3fa9363b,0x3f484819,2 +np.float32,0x7f72ce2e,0x42b2b9e4,2 +np.float32,0x7e639326,0x42afd2f1,2 +np.float32,0x7f4595d3,0x42b25060,2 +np.float32,0x7f6d0ac4,0x42b2ad97,2 +np.float32,0x7f1bda0d,0x42b1d6e5,2 +np.float32,0x3fd85ffd,0x3f8ee0ed,2 +np.float32,0x3f91d53f,0x3f059c8e,2 +np.float32,0x7d06e103,0x42ac0155,2 +np.float32,0x3fb83126,0x3f67de6e,2 +np.float32,0x7d81ce1f,0x42ad5097,2 +np.float32,0x7f79cb3b,0x42b2c86b,2 +np.float32,0x7f800000,0x7f800000,2 +np.float32,0x3fdbfffd,0x3f918137,2 +np.float32,0x7f4ecb1c,0x42b267b2,2 +np.float32,0x3fc2c122,0x3f7b3ed3,2 +np.float32,0x7f415854,0x42b24544,2 +np.float32,0x7e3d988b,0x42af7575,2 +np.float32,0x3f83ca99,0x3e789fcb,2 +np.float32,0x7f274f70,0x42b1fb38,2 +np.float32,0x7f0d20e6,0x42b1a416,2 +np.float32,0x3fdf3a1d,0x3f93c9c1,2 +np.float32,0x7efaa13e,0x42b1673d,2 +np.float32,0x3fb20b15,0x3f5b9434,2 +np.float32,0x3f86af9f,0x3ea4c664,2 +np.float32,0x3fe4fcb0,0x3f97be8a,2 +np.float32,0x3f920683,0x3f065085,2 +np.float32,0x3fa4b278,0x3f3d7e8b,2 +np.float32,0x3f8077a8,0x3daef77f,2 +np.float32,0x7e865be4,0x42b02807,2 +np.float32,0x3fcea7e2,0x3f877c9f,2 +np.float32,0x7e7e9db1,0x42b00c6d,2 +np.float32,0x3f9819aa,0x3f1aba7e,2 +np.float32,0x7f2b6c4b,0x42b207a7,2 +np.float32,0x7ef85e3e,0x42b16299,2 +np.float32,0x3fbd8290,0x3f71df8b,2 +np.float32,0x3fbbb615,0x3f6e8c8c,2 +np.float32,0x7f1bc7f5,0x42b1d6a9,2 +np.float32,0x3fbb4fea,0x3f6dcdad,2 +np.float32,0x3fb67e09,0x3f648dd1,2 +np.float32,0x3fc83495,0x3f824374,2 +np.float32,0x3fe52980,0x3f97dcbc,2 +np.float32,0x3f87d893,0x3eb25d7c,2 +np.float32,0x3fdb805a,0x3f9125c0,2 +np.float32,0x3fb33f0f,0x3f5e0ce1,2 +np.float32,0x3facc524,0x3f50516b,2 +np.float32,0x3ff40484,0x3fa16d0e,2 +np.float32,0x3ff078bf,0x3f9f3811,2 +np.float32,0x7f736747,0x42b2bb27,2 +np.float32,0x7f55768b,0x42b277f3,2 +np.float32,0x80000001,0xffc00000,2 +np.float32,0x7f6463d1,0x42b29a8e,2 +np.float32,0x3f8f8b59,0x3ef9d792,2 +np.float32,0x3f8a6f4d,0x3ecd5bf4,2 +np.float32,0x3fe958d9,0x3f9aa4ca,2 +np.float32,0x7f1e2ce2,0x42b1de78,2 +np.float32,0x3fb8584a,0x3f682a05,2 +np.float32,0x7dea3dc6,0x42ae7ed5,2 +np.float32,0x7f53a815,0x42b27399,2 +np.float32,0x7e0cf986,0x42aeddbf,2 +np.float32,0x7f3afb71,0x42b23422,2 +np.float32,0x3fd87d6e,0x3f8ef685,2 +np.float32,0x3ffcaa46,0x3fa6a0d7,2 +np.float32,0x7eecd276,0x42b14a3a,2 +np.float32,0x3ffc30b4,0x3fa65951,2 +np.float32,0x7e9c85e2,0x42b07634,2 +np.float32,0x3f95d862,0x3f1383de,2 +np.float32,0x7ef21410,0x42b15577,2 +np.float32,0x3fbfa1b5,0x3f75b86e,2 +np.float32,0x3fd6d90f,0x3f8dc086,2 +np.float32,0x0,0xffc00000,2 +np.float32,0x7e885dcd,0x42b02f9f,2 +np.float32,0x3fb3e057,0x3f5f54bf,2 +np.float32,0x7f40afdd,0x42b24385,2 +np.float32,0x3fb795c2,0x3f66b120,2 +np.float32,0x3fba7c11,0x3f6c3f73,2 +np.float32,0x3ffef620,0x3fa7f828,2 +np.float32,0x7d430508,0x42acbe1e,2 +np.float32,0x3f8d2892,0x3ee6369f,2 +np.float32,0x3fbea139,0x3f73e9d5,2 +np.float32,0x3ffaa928,0x3fa571b9,2 +np.float32,0x7fc00000,0x7fc00000,2 +np.float32,0x7f16f9ce,0x42b1c69f,2 +np.float32,0x3fa8f753,0x3f47b657,2 +np.float32,0x3fd48a63,0x3f8c06ac,2 +np.float32,0x7f13419e,0x42b1b9d9,2 +np.float32,0x3fdf1526,0x3f93afde,2 +np.float32,0x3f903c8b,0x3eff3be8,2 +np.float32,0x7f085323,0x42b1925b,2 +np.float32,0x7cdbe309,0x42ab98ac,2 +np.float32,0x3fba2cfd,0x3f6ba9f1,2 +np.float32,0x7f5a805d,0x42b283e4,2 +np.float32,0x7f6753dd,0x42b2a119,2 +np.float32,0x3fed9f02,0x3f9d6964,2 +np.float32,0x3f96422c,0x3f14ddba,2 +np.float32,0x7f22f2a9,0x42b1edb1,2 +np.float32,0x3fe3fcfd,0x3f97119d,2 +np.float32,0x7e018ad0,0x42aeb271,2 +np.float32,0x7db896f5,0x42ae04de,2 +np.float32,0x7e55c795,0x42afb2ec,2 +np.float32,0x7f58ef8d,0x42b28036,2 +np.float32,0x7f24a16a,0x42b1f2f3,2 +np.float32,0x3fcf714c,0x3f881b09,2 +np.float32,0x3fcdd056,0x3f86d200,2 +np.float32,0x7f02fad0,0x42b17de0,2 +np.float32,0x7eeab877,0x42b145a9,2 +np.float32,0x3fd6029d,0x3f8d20f7,2 +np.float32,0x3fd4f8cd,0x3f8c59d6,2 +np.float32,0x3fb29d4a,0x3f5cc1a5,2 +np.float32,0x3fb11e2d,0x3f59a77a,2 +np.float32,0x7eded576,0x42b12b0e,2 +np.float32,0x7f26c2a5,0x42b1f988,2 +np.float32,0x3fb6165b,0x3f63c151,2 +np.float32,0x7f3bca47,0x42b23657,2 +np.float32,0x7d8c93bf,0x42ad7968,2 +np.float32,0x3f8ede02,0x3ef47176,2 +np.float32,0x3fbef762,0x3f7485b9,2 +np.float32,0x7f1419af,0x42b1bcc6,2 +np.float32,0x7d9e8c79,0x42adb701,2 +np.float32,0x3fa26336,0x3f37af63,2 +np.float32,0x7f5f5590,0x42b28f18,2 +np.float32,0x3fddc93a,0x3f92c651,2 +np.float32,0x3ff0a5fc,0x3f9f547f,2 +np.float32,0x3fb2f6b8,0x3f5d790e,2 +np.float32,0x3ffe59a4,0x3fa79d2c,2 +np.float32,0x7e4df848,0x42af9fde,2 +np.float32,0x3fb0ab3b,0x3f58b678,2 +np.float32,0x7ea54d47,0x42b09225,2 +np.float32,0x3fdd6404,0x3f927eb2,2 +np.float32,0x3f846dc0,0x3e864caa,2 +np.float32,0x7d046aee,0x42abf7e7,2 +np.float32,0x7f7c5a05,0x42b2cda3,2 +np.float32,0x3faf6126,0x3f55fb21,2 +np.float32,0x7f36a910,0x42b22829,2 +np.float32,0x3fdc7b36,0x3f91d938,2 +np.float32,0x3fff443e,0x3fa82577,2 +np.float32,0x7ee7154a,0x42b13daa,2 +np.float32,0x3f944742,0x3f0e435c,2 +np.float32,0x7f5b510a,0x42b285cc,2 +np.float32,0x3f9bc940,0x3f25c4d2,2 +np.float32,0x3fee4782,0x3f9dd4ea,2 +np.float32,0x3fcfc2dd,0x3f885aea,2 +np.float32,0x7eab65cf,0x42b0a4af,2 +np.float32,0x3f9cf908,0x3f292689,2 +np.float32,0x7ed35501,0x42b10feb,2 +np.float32,0x7dabb70a,0x42addfd9,2 +np.float32,0x7f348919,0x42b2222b,2 +np.float32,0x3fb137d4,0x3f59dd17,2 +np.float32,0x7e7b36c9,0x42b0058a,2 +np.float32,0x7e351fa4,0x42af5e0d,2 +np.float32,0x3f973c0c,0x3f18011e,2 +np.float32,0xff800000,0xffc00000,2 +np.float32,0x3f9b0a4b,0x3f239a33,2 +np.float32,0x3f87c4cf,0x3eb17e7e,2 +np.float32,0x7ef67760,0x42b15eaa,2 +np.float32,0x3fc4d2c8,0x3f7ed20f,2 +np.float32,0x7e940dac,0x42b059b8,2 +np.float32,0x7f6e6a52,0x42b2b08d,2 +np.float32,0x3f838752,0x3e6fe4b2,2 +np.float32,0x3fd8f046,0x3f8f4a94,2 +np.float32,0x3fa82112,0x3f45c223,2 +np.float32,0x3fd49b16,0x3f8c1345,2 +np.float32,0x7f02a941,0x42b17ca1,2 +np.float32,0x3f8a9d2c,0x3ecf1768,2 +np.float32,0x7c9372e3,0x42aacc0f,2 +np.float32,0x3fd260b3,0x3f8a619a,2 +np.float32,0x3f8a1b88,0x3eca27cb,2 +np.float32,0x7d25d510,0x42ac6b1c,2 +np.float32,0x7ef5a578,0x42b15cf5,2 +np.float32,0x3fe6625d,0x3f98ae9a,2 +np.float32,0x3ff53240,0x3fa22658,2 +np.float32,0x3f8bb2e6,0x3ed944cf,2 +np.float32,0x7f4679b1,0x42b252ad,2 +np.float32,0x3fa8db30,0x3f4774fc,2 +np.float32,0x7ee5fafd,0x42b13b37,2 +np.float32,0x3fc405e0,0x3f7d71fb,2 +np.float32,0x3f9303cd,0x3f09ddfd,2 +np.float32,0x7f486e67,0x42b257b2,2 +np.float32,0x7e73f12b,0x42aff680,2 +np.float32,0x3fe80f8b,0x3f99cbe4,2 +np.float32,0x3f84200a,0x3e81a3f3,2 +np.float32,0x3fa14e5c,0x3f34e3ce,2 +np.float32,0x3fda22ec,0x3f9029bb,2 +np.float32,0x3f801772,0x3d1aef98,2 +np.float32,0x7eaa1428,0x42b0a0bb,2 +np.float32,0x3feae0b3,0x3f9ba4aa,2 +np.float32,0x7ea439b4,0x42b08ecc,2 +np.float32,0x3fa28b1c,0x3f381579,2 +np.float32,0x7e8af247,0x42b03937,2 +np.float32,0x3fd19216,0x3f89c2b7,2 +np.float32,0x7f6ea033,0x42b2b100,2 +np.float32,0x3fad4fbf,0x3f518224,2 +np.float32,0x3febd940,0x3f9c45bd,2 +np.float32,0x7f4643a3,0x42b25221,2 +np.float32,0x7ec34478,0x42b0e771,2 +np.float32,0x7f18c83b,0x42b1ccb5,2 +np.float32,0x3fc665ad,0x3f80bf94,2 +np.float32,0x3ff0a999,0x3f9f56c4,2 +np.float32,0x3faf1cd2,0x3f5568fe,2 +np.float32,0x7ecd9dc6,0x42b101e1,2 +np.float32,0x3faad282,0x3f4bf754,2 +np.float32,0x3ff905a0,0x3fa47771,2 +np.float32,0x7f596481,0x42b28149,2 +np.float32,0x7f1cb31f,0x42b1d9ac,2 +np.float32,0x7e266719,0x42af32a6,2 +np.float32,0x7eccce06,0x42b0ffdb,2 +np.float32,0x3f9b6f71,0x3f24c102,2 +np.float32,0x3f80e4ba,0x3df1d6bc,2 +np.float32,0x3f843d51,0x3e836a60,2 +np.float32,0x7f70bd88,0x42b2b585,2 +np.float32,0x3fe4cc96,0x3f979e18,2 +np.float32,0x3ff737c7,0x3fa36151,2 +np.float32,0x3ff1197e,0x3f9f9cf4,2 +np.float32,0x7f08e190,0x42b19471,2 +np.float32,0x3ff1542e,0x3f9fc1b2,2 +np.float32,0x3ff6673c,0x3fa2e2d2,2 +np.float32,0xbf800000,0xffc00000,2 +np.float32,0x7e3f9ba7,0x42af7add,2 +np.float32,0x7f658ff6,0x42b29d2d,2 +np.float32,0x3f93441c,0x3f0ac0d9,2 +np.float32,0x7f526a74,0x42b27096,2 +np.float32,0x7f5b00c8,0x42b28511,2 +np.float32,0x3ff212f8,0x3fa038cf,2 +np.float32,0x7e0bd60d,0x42aed998,2 +np.float32,0x7f71ef7f,0x42b2b80e,2 +np.float32,0x7f7a897e,0x42b2c9f1,2 +np.float32,0x7e8b76a6,0x42b03b1e,2 +np.float32,0x7efa0da3,0x42b1660f,2 +np.float32,0x3fce9166,0x3f876ae0,2 +np.float32,0x3fc4163d,0x3f7d8e30,2 +np.float32,0x3fdb3784,0x3f90f16b,2 +np.float32,0x7c5f177b,0x42aa3d30,2 +np.float32,0x3fc6276d,0x3f808af5,2 +np.float32,0x7bac9cc2,0x42a856f4,2 +np.float32,0x3fe5876f,0x3f981bea,2 +np.float32,0x3fef60e3,0x3f9e878a,2 +np.float32,0x3fb23cd8,0x3f5bfb06,2 +np.float32,0x3fe114e2,0x3f951402,2 +np.float32,0x7ca8ef04,0x42ab11b4,2 +np.float32,0x7d93c2ad,0x42ad92ec,2 +np.float32,0x3fe5bb8a,0x3f983ee6,2 +np.float32,0x7f0182fd,0x42b1781b,2 +np.float32,0x7da63bb2,0x42adcf3d,2 +np.float32,0x3fac46b7,0x3f4f399e,2 +np.float32,0x7f7a5d8f,0x42b2c997,2 +np.float32,0x7f76572e,0x42b2c14b,2 +np.float32,0x7f42d53e,0x42b24931,2 +np.float32,0x7f7ffd00,0x42b2d4f6,2 +np.float32,0x3fc346c3,0x3f7c2756,2 +np.float32,0x7f1f6ae3,0x42b1e27a,2 +np.float32,0x3f87fb56,0x3eb3e2ee,2 +np.float32,0x3fed17a2,0x3f9d12b4,2 +np.float32,0x7f5ea903,0x42b28d8c,2 +np.float32,0x3f967f82,0x3f15a4ab,2 +np.float32,0x7d3b540c,0x42aca984,2 +np.float32,0x7f56711a,0x42b27a4a,2 +np.float32,0x7f122223,0x42b1b5ee,2 +np.float32,0x3fd6fa34,0x3f8dd919,2 +np.float32,0x3fadd62e,0x3f52a7b3,2 +np.float32,0x3fb7bf0c,0x3f67015f,2 +np.float32,0x7edf4ba7,0x42b12c1d,2 +np.float32,0x7e33cc65,0x42af5a4b,2 +np.float32,0x3fa6be17,0x3f427831,2 +np.float32,0x3fa07aa8,0x3f32b7d4,2 +np.float32,0x3fa4a3af,0x3f3d5a01,2 +np.float32,0x3fdbb267,0x3f9149a8,2 +np.float32,0x7ed45e25,0x42b1126c,2 +np.float32,0x3fe3f432,0x3f970ba6,2 +np.float32,0x7f752080,0x42b2bec3,2 +np.float32,0x3f872747,0x3eaa62ea,2 +np.float32,0x7e52175d,0x42afaa03,2 +np.float32,0x3fdc766c,0x3f91d5ce,2 +np.float32,0x7ecd6841,0x42b1015c,2 +np.float32,0x7f3d6c40,0x42b23ac6,2 +np.float32,0x3fb80c14,0x3f6796b9,2 +np.float32,0x3ff6ad56,0x3fa30d68,2 +np.float32,0x3fda44c3,0x3f90423e,2 +np.float32,0x3fdcba0c,0x3f9205fc,2 +np.float32,0x7e14a720,0x42aef8e6,2 +np.float32,0x3fe9e489,0x3f9b0047,2 +np.float32,0x7e69f933,0x42afe123,2 +np.float32,0x3ff3ee6d,0x3fa15f71,2 +np.float32,0x3f8538cd,0x3e91c1a7,2 +np.float32,0x3fdc3f07,0x3f91ae46,2 +np.float32,0x3fba2ef0,0x3f6bada2,2 +np.float32,0x7da64cd8,0x42adcf71,2 +np.float32,0x3fc34bd2,0x3f7c301d,2 +np.float32,0x3fa273aa,0x3f37d984,2 +np.float32,0x3ff0338c,0x3f9f0c86,2 +np.float32,0x7ed62cef,0x42b116c3,2 +np.float32,0x3f911e7e,0x3f02f7c6,2 +np.float32,0x7c8514c9,0x42aa9792,2 +np.float32,0x3fea2a74,0x3f9b2df5,2 +np.float32,0x3fe036f8,0x3f947a25,2 +np.float32,0x7c5654bf,0x42aa28ad,2 +np.float32,0x3fd9e423,0x3f8ffc32,2 +np.float32,0x7eec0439,0x42b1487b,2 +np.float32,0x3fc580f4,0x3f7ffb62,2 +np.float32,0x3fb0e316,0x3f592bbe,2 +np.float32,0x7c4cfb7d,0x42aa11d8,2 +np.float32,0x3faf9704,0x3f566e00,2 +np.float32,0x3fa7cf8a,0x3f45023d,2 +np.float32,0x7f7b724d,0x42b2cbcc,2 +np.float32,0x7f05bfe3,0x42b18897,2 +np.float32,0x3f90bde3,0x3f018bf3,2 +np.float32,0x7c565479,0x42aa28ad,2 +np.float32,0x3f94b517,0x3f0fb8e5,2 +np.float32,0x3fd6aadd,0x3f8d9e3c,2 +np.float32,0x7f09b37c,0x42b1977f,2 +np.float32,0x7f2b45ea,0x42b20734,2 +np.float32,0x3ff1d15e,0x3fa00fe9,2 +np.float32,0x3f99bce6,0x3f1fbd6c,2 +np.float32,0x7ecd1f76,0x42b100a7,2 +np.float32,0x7f443e2b,0x42b24ce2,2 +np.float32,0x7da7d6a5,0x42add428,2 +np.float32,0x7ebe0193,0x42b0d975,2 +np.float32,0x7ee13c43,0x42b1308b,2 +np.float32,0x3f8adf1b,0x3ed18e0c,2 +np.float32,0x7f76ce65,0x42b2c242,2 +np.float32,0x7e34f43d,0x42af5d92,2 +np.float32,0x7f306b76,0x42b2165d,2 +np.float32,0x7e1fd07f,0x42af1df7,2 +np.float32,0x3fab9a41,0x3f4db909,2 +np.float32,0x3fc23d1a,0x3f7a5803,2 +np.float32,0x3f8b7403,0x3ed70245,2 +np.float32,0x3f8c4dd6,0x3edebbae,2 +np.float32,0x3fe5f411,0x3f9864cd,2 +np.float32,0x3f88128b,0x3eb4e508,2 +np.float32,0x3fcb09de,0x3f84976f,2 +np.float32,0x7f32f2f5,0x42b21da6,2 +np.float32,0x3fe75610,0x3f9950f6,2 +np.float32,0x3f993edf,0x3f1e408d,2 +np.float32,0x3fc4a9d7,0x3f7e8be9,2 +np.float32,0x7f74551a,0x42b2bd1a,2 +np.float32,0x7de87129,0x42ae7ae2,2 +np.float32,0x7f18bbbd,0x42b1cc8c,2 +np.float32,0x7e7e1dd4,0x42b00b6c,2 +np.float32,0x3ff6e55b,0x3fa32f64,2 +np.float32,0x3fa634c8,0x3f412df3,2 +np.float32,0x3fd0fb7c,0x3f894e49,2 +np.float32,0x3ff4f6a6,0x3fa201d7,2 +np.float32,0x7f69d418,0x42b2a69a,2 +np.float32,0x7cb9632d,0x42ab414a,2 +np.float32,0x3fc57d36,0x3f7ff503,2 +np.float32,0x7e9e2ed7,0x42b07b9b,2 +np.float32,0x7f2e6868,0x42b2107d,2 +np.float32,0x3fa3169a,0x3f39785d,2 +np.float32,0x7f03cde0,0x42b18117,2 +np.float32,0x7f6d75d2,0x42b2ae7f,2 +np.float32,0x3ff483f2,0x3fa1bb75,2 +np.float32,0x7f1b39f7,0x42b1d4d6,2 +np.float32,0x3f8c7a7d,0x3ee0481e,2 +np.float32,0x3f989095,0x3f1c2b19,2 +np.float32,0x3fa4cbfd,0x3f3dbd87,2 +np.float32,0x7f75b00f,0x42b2bfef,2 +np.float32,0x3f940724,0x3f0d6756,2 +np.float32,0x7f5e5a1a,0x42b28cd6,2 +np.float32,0x800000,0xffc00000,2 +np.float32,0x7edd1d29,0x42b12716,2 +np.float32,0x3fa3e9e4,0x3f3b8c16,2 +np.float32,0x7e46d70e,0x42af8dd5,2 +np.float32,0x3f824745,0x3e40ec1e,2 +np.float32,0x3fd67623,0x3f8d770a,2 +np.float32,0x3fe9a6f3,0x3f9ad7fa,2 +np.float32,0x3fdda67c,0x3f92adc1,2 +np.float32,0x7ccb6c9a,0x42ab70d4,2 +np.float32,0x3ffd364a,0x3fa6f2fe,2 +np.float32,0x7e02424c,0x42aeb545,2 +np.float32,0x3fb6d2f2,0x3f6534a1,2 +np.float32,0x3fe1fe26,0x3f95b4cc,2 +np.float32,0x7e93ac57,0x42b05867,2 +np.float32,0x7f7b3433,0x42b2cb4d,2 +np.float32,0x3fb76803,0x3f66580d,2 +np.float32,0x3f9af881,0x3f23661b,2 +np.float32,0x3fd58062,0x3f8cbf98,2 +np.float32,0x80000000,0xffc00000,2 +np.float32,0x7f1af8f4,0x42b1d3ff,2 +np.float32,0x3fe66bba,0x3f98b4dc,2 +np.float32,0x7f6bd7bf,0x42b2aaff,2 +np.float32,0x3f84f79a,0x3e8e2e49,2 +np.float32,0x7e475b06,0x42af8f28,2 +np.float32,0x3faff89b,0x3f573d5e,2 +np.float32,0x7de5aa77,0x42ae74bb,2 +np.float32,0x3f8e9e42,0x3ef26cd2,2 +np.float32,0x3fb1cec3,0x3f5b1740,2 +np.float32,0x3f8890d6,0x3eba4821,2 +np.float32,0x3f9b39e9,0x3f242547,2 +np.float32,0x3fc895a4,0x3f829407,2 +np.float32,0x7f77943c,0x42b2c3dc,2 +np.float32,0x7f390d58,0x42b22ed2,2 +np.float32,0x3fe7e160,0x3f99ad58,2 +np.float32,0x3f93d2a0,0x3f0cb205,2 +np.float32,0x7f29499b,0x42b2013c,2 +np.float32,0x3f8c11b2,0x3edca10f,2 +np.float32,0x7e898ef8,0x42b03413,2 +np.float32,0x3fdff942,0x3f944f34,2 +np.float32,0x7f3d602f,0x42b23aa5,2 +np.float32,0x3f8a50f3,0x3ecc345b,2 +np.float32,0x3fa1f86d,0x3f369ce4,2 +np.float32,0x3f97ad95,0x3f19681d,2 +np.float32,0x3ffad1e0,0x3fa589e5,2 +np.float32,0x3fa70590,0x3f432311,2 +np.float32,0x7e6840cb,0x42afdd5c,2 +np.float32,0x3fd4036d,0x3f8ba0aa,2 +np.float32,0x7f7cc953,0x42b2ce84,2 +np.float32,0x7f228e1e,0x42b1ec74,2 +np.float32,0x7e37a866,0x42af652a,2 +np.float32,0x3fda22d0,0x3f9029a7,2 +np.float32,0x7f736bff,0x42b2bb31,2 +np.float32,0x3f9833b6,0x3f1b0b8e,2 +np.float32,0x7f466001,0x42b2526a,2 +np.float32,0xff7fffff,0xffc00000,2 +np.float32,0x7dd62bcd,0x42ae50f8,2 +np.float32,0x7f1d2bfe,0x42b1db36,2 +np.float32,0x7ecffe9e,0x42b107c5,2 +np.float32,0x7ebefe0a,0x42b0dc1b,2 +np.float32,0x7f45c63d,0x42b250dd,2 +np.float32,0x7f601af0,0x42b290db,2 +np.float32,0x3fcbb88a,0x3f8524e5,2 +np.float32,0x7ede55ff,0x42b129e8,2 +np.float32,0x7ea5dd5a,0x42b093e2,2 +np.float32,0x3ff53857,0x3fa22a12,2 +np.float32,0x3f8dbd6a,0x3eeb28a4,2 +np.float32,0x3fd1b467,0x3f89dd2c,2 +np.float32,0x3fe0423f,0x3f9481fc,2 +np.float32,0x3f84b421,0x3e8a6174,2 +np.float32,0x7f4efc97,0x42b2682c,2 +np.float32,0x7f601b33,0x42b290dc,2 +np.float32,0x3f94f240,0x3f108719,2 +np.float32,0x7decd251,0x42ae8471,2 +np.float32,0x3fdc457c,0x3f91b2e2,2 +np.float32,0x3f92a966,0x3f089c5a,2 +np.float32,0x3fc9732f,0x3f834afc,2 +np.float32,0x3f97948f,0x3f19194e,2 +np.float32,0x7f0824a1,0x42b191ac,2 +np.float32,0x7f0365a5,0x42b17f81,2 +np.float32,0x3f800000,0x0,2 +np.float32,0x7f0054c6,0x42b1736b,2 +np.float32,0x3fe86544,0x3f9a0484,2 +np.float32,0x7e95f844,0x42b0604e,2 +np.float32,0x3fce8602,0x3f8761e2,2 +np.float32,0x3fc726c8,0x3f81621d,2 +np.float32,0x3fcf6b03,0x3f88161b,2 +np.float32,0x3fceb843,0x3f87898a,2 +np.float32,0x3fe2f8b2,0x3f966071,2 +np.float32,0x7f3c8e7f,0x42b2386d,2 +np.float32,0x3fcee13a,0x3f87a9d2,2 +np.float32,0x3fc4df27,0x3f7ee73c,2 +np.float32,0x3ffde486,0x3fa758e3,2 +np.float32,0x3fa91be0,0x3f480b17,2 +np.float32,0x7f2a5a7d,0x42b20472,2 +np.float32,0x7e278d80,0x42af362d,2 +np.float32,0x3f96d091,0x3f16a9d5,2 +np.float32,0x7e925225,0x42b053b2,2 +np.float32,0x7f7ef83a,0x42b2d2ec,2 +np.float32,0x7eb4923a,0x42b0bf61,2 +np.float32,0x7e98bf19,0x42b069b3,2 +np.float32,0x3fac93a2,0x3f4fe410,2 +np.float32,0x7f46389c,0x42b25205,2 +np.float32,0x3f9fd447,0x3f30fd54,2 +np.float32,0x3fef42d4,0x3f9e7483,2 +np.float32,0x7f482174,0x42b256ed,2 +np.float32,0x3f97aedb,0x3f196c1e,2 +np.float32,0x7f764edd,0x42b2c13a,2 +np.float32,0x3f9117b5,0x3f02de5c,2 +np.float32,0x3fc7984e,0x3f81c12d,2 +np.float64,0x3ff1e2cb7463c597,0x3fdec6caf39e0c0e,1 +np.float64,0x3ffe4f89789c9f13,0x3ff40f4b1da0f3e9,1 +np.float64,0x7f6a5c9ac034b935,0x408605e51703c145,1 +np.float64,0x7fdcb6ece3b96dd9,0x40862d6521e16d60,1 +np.float64,0x3ff6563e182cac7c,0x3feb9d8210f3fa88,1 +np.float64,0x7fde32025f3c6404,0x40862dcc1d1a9b7f,1 +np.float64,0x7fd755ed35aeabd9,0x40862bbc5522b779,1 +np.float64,0x3ff5c81f4bcb903e,0x3fea71f10b954ea3,1 +np.float64,0x3fffe805d35fd00c,0x3ff50463a1ba2938,1 +np.float64,0x7fd045a1c1a08b43,0x408628d9f431f2f5,1 +np.float64,0x3ff49f7dd9893efc,0x3fe7c6736e17ea8e,1 +np.float64,0x7fccfbc1fd39f783,0x408627eca79acf51,1 +np.float64,0x3ff1af0a00035e14,0x3fdd1c0e7d5706ea,1 +np.float64,0x7fe7bd17162f7a2d,0x4086316af683502b,1 +np.float64,0x3ff0941b8d012837,0x3fd128d274065ac0,1 +np.float64,0x3ffa0c5d98b418bb,0x3ff11af9c8edd17f,1 +np.float64,0x3ffad9733355b2e6,0x3ff1b6d1307acb42,1 +np.float64,0x3ffabb2a33d57654,0x3ff1a0442b034e50,1 +np.float64,0x3ff36118b0c6c231,0x3fe472b7dfb23516,1 +np.float64,0x3ff2441d3664883a,0x3fe0d61145608f0c,1 +np.float64,0x7fe039862d20730b,0x40862e5f8ed752d3,1 +np.float64,0x7fb1dde24023bbc4,0x40861e824cdb0664,1 +np.float64,0x7face6335839cc66,0x40861ccf90a26e16,1 +np.float64,0x3ffb5d0e1af6ba1c,0x3ff2170f6f42fafe,1 +np.float64,0x3ff5c2c6a50b858d,0x3fea665aabf04407,1 +np.float64,0x3ffabb409db57681,0x3ff1a054ea32bfc3,1 +np.float64,0x3ff1e054e983c0aa,0x3fdeb30c17286cb6,1 +np.float64,0x7fe467f73268cfed,0x4086303529e52e9b,1 +np.float64,0x7fe0e86bf961d0d7,0x40862eb40788b04a,1 +np.float64,0x3ffb743542f6e86a,0x3ff227b4ea5acee0,1 +np.float64,0x3ff2de6826e5bcd0,0x3fe2e31fcde0a96c,1 +np.float64,0x7fd6b27ccfad64f9,0x40862b8385697c31,1 +np.float64,0x7fe0918e8d21231c,0x40862e8a82d9517a,1 +np.float64,0x7fd0ca0395a19406,0x4086291a0696ed33,1 +np.float64,0x3ffb042496960849,0x3ff1d658c928abfc,1 +np.float64,0x3ffcd0409799a081,0x3ff31877df0cb245,1 +np.float64,0x7fe429bd06685379,0x4086301c9f259934,1 +np.float64,0x3ff933076092660f,0x3ff06d2e5f4d9ab7,1 +np.float64,0x7feaefcb28f5df95,0x4086326dccf88e6f,1 +np.float64,0x7fb5f2c1f82be583,0x40862027ac02a39d,1 +np.float64,0x3ffb5d9e3bd6bb3c,0x3ff21777501d097e,1 +np.float64,0x10000000000000,0xfff8000000000000,1 +np.float64,0x3ff70361596e06c3,0x3fecf675ceda7e19,1 +np.float64,0x3ff71a21b5ee3444,0x3fed224fa048d9a9,1 +np.float64,0x3ffb102b86762057,0x3ff1df2cc9390518,1 +np.float64,0x7feaaeb35c355d66,0x4086325a60704a90,1 +np.float64,0x7fd9a3d0a93347a0,0x40862c7d300fc076,1 +np.float64,0x7fabcf159c379e2a,0x40861c80cdbbff27,1 +np.float64,0x7fd1c066ec2380cd,0x4086298c3006fee6,1 +np.float64,0x3ff3d5ae2d67ab5c,0x3fe5bc16447428db,1 +np.float64,0x3ff4b76add696ed6,0x3fe800f5bbf21376,1 +np.float64,0x3ff60d89ee0c1b14,0x3feb063fdebe1a68,1 +np.float64,0x7f1d2648003a4c8f,0x4085eaf9238af95a,1 +np.float64,0x7fe8b45f6df168be,0x408631bca5abf6d6,1 +np.float64,0x7fe9ea5308f3d4a5,0x4086321ea2bd3af9,1 +np.float64,0x7fcb6ba5a636d74a,0x4086277b208075ed,1 +np.float64,0x3ff621cfd74c43a0,0x3feb30d59baf5919,1 +np.float64,0x3ff7bc8ca0af7919,0x3fee524da8032896,1 +np.float64,0x7fda22dd0c3445b9,0x40862ca47326d063,1 +np.float64,0x7fd02ed4b2a05da8,0x408628ceb6919421,1 +np.float64,0x3ffe64309fdcc861,0x3ff41c1b18940709,1 +np.float64,0x3ffee4042abdc808,0x3ff46a6005bccb41,1 +np.float64,0x3ff078145b00f029,0x3fceeb3d6bfae0eb,1 +np.float64,0x7fda20fd20b441f9,0x40862ca3e03b990b,1 +np.float64,0x3ffa9e9e9af53d3d,0x3ff18ade3cbee789,1 +np.float64,0x3ff0a1062501420c,0x3fd1e32de6d18c0d,1 +np.float64,0x3ff3bdf118477be2,0x3fe57ad89b7fdf8b,1 +np.float64,0x3ff101c0d5c20382,0x3fd6965d3539be47,1 +np.float64,0x7feba3b53b774769,0x408632a28c7aca4d,1 +np.float64,0x3ff598db5d4b31b7,0x3fea0aa65c0b421a,1 +np.float64,0x3ff5fdfbb72bfbf8,0x3feae55accde4a5e,1 +np.float64,0x7fe5bae53aab75c9,0x408630b5e7a5b92a,1 +np.float64,0x3ff8f668afd1ecd2,0x3ff03af686666c9c,1 +np.float64,0x3ff5ba72dd2b74e6,0x3fea5441f223c093,1 +np.float64,0x3ff8498147109302,0x3fef4e45d501601d,1 +np.float64,0x7feddcfa5efbb9f4,0x4086334106a6e76b,1 +np.float64,0x7fd1a30200234603,0x4086297ee5cc562c,1 +np.float64,0x3ffffa8ee07ff51e,0x3ff50f1dc46f1303,1 +np.float64,0x7fef7ed00ebefd9f,0x408633ae01dabe52,1 +np.float64,0x3ffb6e062276dc0c,0x3ff22344c58c2016,1 +np.float64,0x7fcf2b59943e56b2,0x4086288190dd5eeb,1 +np.float64,0x3ffa589f9254b13f,0x3ff155cc081eee0b,1 +np.float64,0x3ff05415ca60a82c,0x3fc9e45565baef0a,1 +np.float64,0x7feb34bed576697d,0x408632822d5a178c,1 +np.float64,0x3ff3993845c73270,0x3fe51423baf246c3,1 +np.float64,0x3ff88367aaf106d0,0x3fefb2d9ca9f1192,1 +np.float64,0x7fef364304fe6c85,0x4086339b7ed82997,1 +np.float64,0x7fcba2c317374585,0x4086278b24e42934,1 +np.float64,0x3ff1aef885e35df1,0x3fdd1b79f55b20c0,1 +np.float64,0x7fe19367886326ce,0x40862f035f867445,1 +np.float64,0x3ff3c8295e279053,0x3fe5970aa670d32e,1 +np.float64,0x3ff6edda164ddbb4,0x3feccca9eb59d6b9,1 +np.float64,0x7fdeaea940bd5d52,0x40862dece02d151b,1 +np.float64,0x7fea9d6324353ac5,0x408632552ddf0d4f,1 +np.float64,0x7fe60e39e66c1c73,0x408630d45b1ad0c4,1 +np.float64,0x7fde06325abc0c64,0x40862dc07910038c,1 +np.float64,0x7f9ec89d303d9139,0x408617c55ea4c576,1 +np.float64,0x3ff9801930530032,0x3ff0abe5be046051,1 +np.float64,0x3ff4d5859689ab0b,0x3fe849a7f7a19fa3,1 +np.float64,0x3ff38afbc48715f8,0x3fe4ebb7710cbab9,1 +np.float64,0x3ffd88a0e77b1142,0x3ff3916964407e21,1 +np.float64,0x1,0xfff8000000000000,1 +np.float64,0x3ff5db59e58bb6b4,0x3fea9b6b5ccc116f,1 +np.float64,0x3ffd4b05b15a960c,0x3ff369792f661a90,1 +np.float64,0x7fdcebc4fb39d789,0x40862d73cd623378,1 +np.float64,0x3ff5b56f944b6adf,0x3fea4955d6b06ca3,1 +np.float64,0x7fd4e4abf2a9c957,0x40862ad9e9da3c61,1 +np.float64,0x7fe08e0d6aa11c1a,0x40862e88d17ef277,1 +np.float64,0x3ff0dfc97da1bf93,0x3fd50f9004136d8f,1 +np.float64,0x7fdec38eaebd871c,0x40862df2511e26b4,1 +np.float64,0x7ff8000000000000,0x7ff8000000000000,1 +np.float64,0x3ff21865504430cb,0x3fe033fe3cf3947a,1 +np.float64,0x7fdc139708b8272d,0x40862d371cfbad03,1 +np.float64,0x7fe1fe3be3a3fc77,0x40862f336e3ba63a,1 +np.float64,0x7fd9fa2493b3f448,0x40862c97f2960be9,1 +np.float64,0x3ff0a027db414050,0x3fd1d6e54a707c87,1 +np.float64,0x3ff568b16f4ad163,0x3fe99f5c6d7b6e18,1 +np.float64,0x3ffe2f82877c5f05,0x3ff3fb54bd0da753,1 +np.float64,0x7fbaf5778435eaee,0x408621ccc9e2c1be,1 +np.float64,0x7fc5aaf8362b55ef,0x40862598e7072a49,1 +np.float64,0x7fe0ebfdd4a1d7fb,0x40862eb5b7bf99d5,1 +np.float64,0x7fd8efeb5931dfd6,0x40862c444636f408,1 +np.float64,0x3ff361a308c6c346,0x3fe4744cae63e6df,1 +np.float64,0x7fef287d39be50f9,0x40863397f65c807e,1 +np.float64,0x7fe72c4a14ae5893,0x4086313992e52082,1 +np.float64,0x3ffd1be44cba37c8,0x3ff34a9a45239eb9,1 +np.float64,0x3ff50369c18a06d4,0x3fe8b69319f091f1,1 +np.float64,0x3ffb333c25766678,0x3ff1f8c78eeb28f1,1 +np.float64,0x7fe12050416240a0,0x40862ece4e2f2f24,1 +np.float64,0x7fe348f5526691ea,0x40862fc16fbe7b6c,1 +np.float64,0x3ff343cc4d068799,0x3fe41c2a30cab7d2,1 +np.float64,0x7fd1b0daaa2361b4,0x408629852b3104ff,1 +np.float64,0x3ff6a41f37ad483e,0x3fec3b36ee6c6d4a,1 +np.float64,0x3ffad9439435b287,0x3ff1b6add9a1b3d7,1 +np.float64,0x7fbeb9a2f23d7345,0x408622d89ac1eaba,1 +np.float64,0x3ffab3d39fb567a7,0x3ff19ac75b4427f3,1 +np.float64,0x3ff890003ed12000,0x3fefc8844471c6ad,1 +np.float64,0x3ffc9f595e593eb2,0x3ff2f7a8699f06d8,1 +np.float64,0x7fe2224ef6e4449d,0x40862f43684a154a,1 +np.float64,0x3ffa67ba08d4cf74,0x3ff161525778df99,1 +np.float64,0x7fe87e24b570fc48,0x408631ab02b159fb,1 +np.float64,0x7fd6e99be92dd337,0x40862b96dba73685,1 +np.float64,0x7fe90f39fdf21e73,0x408631d9dbd36c1e,1 +np.float64,0x3ffb7806abd6f00e,0x3ff22a719b0f4c46,1 +np.float64,0x3ffa511ba3d4a238,0x3ff1500c124f6e17,1 +np.float64,0x3ff5d7a569abaf4b,0x3fea937391c280e8,1 +np.float64,0x7fc4279d20284f39,0x40862504a5cdcb96,1 +np.float64,0x3ffe8791b1fd0f24,0x3ff431f1ed7eaba0,1 +np.float64,0x7fe3b2f5276765e9,0x40862fecf15e2535,1 +np.float64,0x7feeab0e7abd561c,0x408633778044cfbc,1 +np.float64,0x7fdba88531375109,0x40862d1860306d7a,1 +np.float64,0x7fe7b19b3def6335,0x4086316716d6890b,1 +np.float64,0x3ff9e9437413d287,0x3ff0ff89431c748c,1 +np.float64,0x3ff960716a52c0e3,0x3ff092498028f802,1 +np.float64,0x3ff271bf56a4e37f,0x3fe1786fc8dd775d,1 +np.float64,0x3fff2a6578be54cb,0x3ff494bbe303eeb5,1 +np.float64,0x3ffd842eb5fb085e,0x3ff38e8b7ba42bc5,1 +np.float64,0x3ff91600e5d22c02,0x3ff0553c6a6b3d93,1 +np.float64,0x3ff9153f45f22a7e,0x3ff0549c0eaecf95,1 +np.float64,0x7fe0ab319da15662,0x40862e96da3b19f9,1 +np.float64,0x3ff06acd1f60d59a,0x3fcd2aca543d2772,1 +np.float64,0x3ffb3e7a54d67cf4,0x3ff200f288cd391b,1 +np.float64,0x3ffd01356f1a026b,0x3ff339003462a56c,1 +np.float64,0x3ffacd35def59a6c,0x3ff1adb8d32b3ec0,1 +np.float64,0x3ff6f953264df2a6,0x3fece2f992948d6e,1 +np.float64,0x3ff0fa91f5a1f524,0x3fd64609a28f1590,1 +np.float64,0x7fd1b7610ca36ec1,0x408629881e03dc7d,1 +np.float64,0x3ff4317fb7c86300,0x3fe6b086ed265887,1 +np.float64,0x3ff3856198070ac3,0x3fe4dbb6bc88b9e3,1 +np.float64,0x7fed7fc4573aff88,0x40863327e7013a81,1 +np.float64,0x3ffe53cbbf5ca798,0x3ff411f07a29b1f4,1 +np.float64,0x3ff092195b012433,0x3fd10b1c0b4b14fe,1 +np.float64,0x3ff1a3171163462e,0x3fdcb5c301d5d40d,1 +np.float64,0x3ffa1401f1742804,0x3ff120eb319e9faa,1 +np.float64,0x7fd352f6f426a5ed,0x40862a3a048feb6d,1 +np.float64,0x7fd4ee246fa9dc48,0x40862add895d808f,1 +np.float64,0x3ff0675cfa00ceba,0x3fccb2222c5493ca,1 +np.float64,0x3ffe5cb38f3cb967,0x3ff417773483d161,1 +np.float64,0x7fe11469ea2228d3,0x40862ec8bd3e497f,1 +np.float64,0x3fff13cba67e2798,0x3ff4872fe2c26104,1 +np.float64,0x3ffb73d3d316e7a8,0x3ff2276f08612ea2,1 +np.float64,0x7febfb70f237f6e1,0x408632bbc9450721,1 +np.float64,0x3ff84a0d87b0941b,0x3fef4f3b707e3145,1 +np.float64,0x7fd71fd5082e3fa9,0x40862ba9b4091172,1 +np.float64,0x3ff560737d8ac0e7,0x3fe98cc9c9ba2f61,1 +np.float64,0x3ff46a266ae8d44d,0x3fe74190e5234822,1 +np.float64,0x7fe8cc9225719923,0x408631c477db9708,1 +np.float64,0x3ff871de5930e3bc,0x3fef948f7d00fbef,1 +np.float64,0x3ffd0bc7895a178f,0x3ff33ffc18357721,1 +np.float64,0x3ff66099f9ccc134,0x3febb2bc775b4720,1 +np.float64,0x7fe91f1be9723e37,0x408631deec3a5c9e,1 +np.float64,0x7fd60462f12c08c5,0x40862b4537e1c1c6,1 +np.float64,0x3ff053100ba0a620,0x3fc9bc0c21e2284f,1 +np.float64,0x7fd864c611b0c98b,0x40862c1724506255,1 +np.float64,0x7fd191decb2323bd,0x408629771bfb68cc,1 +np.float64,0x3ff792a1656f2543,0x3fee054f2e135fcf,1 +np.float64,0x7fd03625cea06c4b,0x408628d253b840e3,1 +np.float64,0x7fc3967716272ced,0x408624ca35451042,1 +np.float64,0x7fe6636cb32cc6d8,0x408630f3073a22a7,1 +np.float64,0x3ffc2d3976585a73,0x3ff2a9d4c0dae607,1 +np.float64,0x3fffd10ee79fa21e,0x3ff4f70db69888be,1 +np.float64,0x3ff1d4fcae23a9f9,0x3fde57675007b23c,1 +np.float64,0x3ffa5da19e14bb43,0x3ff1599f74d1c113,1 +np.float64,0x3ff7f4eb0d6fe9d6,0x3feeb85189659e99,1 +np.float64,0x7fbcca44d8399489,0x408622536234f7c1,1 +np.float64,0x7fef5f97ec3ebf2f,0x408633a60fdde0d7,1 +np.float64,0x7fde4a66da3c94cd,0x40862dd290ebc184,1 +np.float64,0x3ff072957a40e52b,0x3fce34d913d87613,1 +np.float64,0x3ff2bc4c9dc57899,0x3fe27497e6ebe27d,1 +np.float64,0x7fd7d152b4afa2a4,0x40862be63469eecd,1 +np.float64,0x3ff957d768f2afaf,0x3ff08b4ad8062a73,1 +np.float64,0x7fe4bc5f45a978be,0x40863055fd66e4eb,1 +np.float64,0x7fc90de345321bc6,0x408626c24ce7e370,1 +np.float64,0x3ff2d7a37d85af47,0x3fe2cd6a40b544a0,1 +np.float64,0x7fe536ea1f6a6dd3,0x40863084bade76a3,1 +np.float64,0x3fff970c9cdf2e19,0x3ff4d524572356dd,1 +np.float64,0x3ffe173ae63c2e76,0x3ff3ec1ee35ad28c,1 +np.float64,0x3ff714025cce2805,0x3fed168aedff4a2b,1 +np.float64,0x7fce7b414c3cf682,0x40862853dcdd19d4,1 +np.float64,0x3ff019623f2032c4,0x3fbc7c602df0bbaf,1 +np.float64,0x3ff72f57fd0e5eb0,0x3fed4ae75f697432,1 +np.float64,0x3ff283778e8506ef,0x3fe1b5c5725b0dfd,1 +np.float64,0x3ff685a29aed0b45,0x3febfdfdedd581e2,1 +np.float64,0x3ff942d24fb285a4,0x3ff07a224c3ecfaf,1 +np.float64,0x3ff2e4a9f465c954,0x3fe2f71905399e8f,1 +np.float64,0x7fdfa1c7fa3f438f,0x40862e2b4e06f098,1 +np.float64,0x3ff49b59c26936b4,0x3fe7bc41c8c1e59d,1 +np.float64,0x3ff2102d3704205a,0x3fe014bf7e28924e,1 +np.float64,0x3ff88de3b8311bc8,0x3fefc4e3e0a15a89,1 +np.float64,0x7fea5ba25374b744,0x40863241519c9b66,1 +np.float64,0x3fffe5df637fcbbf,0x3ff5032488f570f9,1 +np.float64,0x7fe67cfefe6cf9fd,0x408630fc25333cb4,1 +np.float64,0x3ff090bf2b01217e,0x3fd0f6fcf1092b4a,1 +np.float64,0x7fecd75bc5f9aeb7,0x408632f9b6c2e013,1 +np.float64,0x7fe15df38c62bbe6,0x40862eeae5ac944b,1 +np.float64,0x3ff4757875a8eaf1,0x3fe75e0eafbe28ce,1 +np.float64,0x7fecca8a51b99514,0x408632f627c23923,1 +np.float64,0x3ff91ca529d2394a,0x3ff05abb327fd1ca,1 +np.float64,0x3ffb962993b72c53,0x3ff23ff831717579,1 +np.float64,0x3ffd548a2c7aa914,0x3ff36fac7f56d716,1 +np.float64,0x7fbafb5cb035f6b8,0x408621ce898a02fb,1 +np.float64,0x3ff1d86daca3b0db,0x3fde73536c29218c,1 +np.float64,0x7fa8d0f8f431a1f1,0x40861b97a03c3a18,1 +np.float64,0x3ff44f1067489e21,0x3fe6fcbd8144ab2a,1 +np.float64,0x7fec062b07380c55,0x408632bed9c6ce85,1 +np.float64,0x3ff7e11e0fcfc23c,0x3fee94ada7efaac4,1 +np.float64,0x7fe77505c1aeea0b,0x4086315287dda0ba,1 +np.float64,0x7fc465af2728cb5d,0x4086251d236107f7,1 +np.float64,0x3ffe811c4a7d0238,0x3ff42df7e8b6cf2d,1 +np.float64,0x7fe05a471260b48d,0x40862e6fa502738b,1 +np.float64,0x7fec32cd9778659a,0x408632cb8d98c5a3,1 +np.float64,0x7fd203a220a40743,0x408629aa43b010c0,1 +np.float64,0x7fed71f7d17ae3ef,0x4086332428207101,1 +np.float64,0x3ff3918999e72313,0x3fe4fe5e8991402f,1 +np.float64,0x3ff3ecae38c7d95c,0x3fe5fa787d887981,1 +np.float64,0x7fd65345b82ca68a,0x40862b61aed8c64e,1 +np.float64,0x3ff1efdd01c3dfba,0x3fdf2eae36139204,1 +np.float64,0x3ffba9344f375268,0x3ff24d7fdcfc313b,1 +np.float64,0x7fd0469b35208d35,0x408628da6ed24bdd,1 +np.float64,0x7fe525782daa4aef,0x4086307e240c8b30,1 +np.float64,0x3ff8e473d371c8e8,0x3ff02beebd4171c7,1 +np.float64,0x3ff59a43898b3487,0x3fea0dc0a6acea0a,1 +np.float64,0x7fef50c7263ea18d,0x408633a247d7cd42,1 +np.float64,0x7fe8b5a301f16b45,0x408631bd0e71c855,1 +np.float64,0x3ff209369de4126d,0x3fdff4264334446b,1 +np.float64,0x3ffbe2ff4437c5fe,0x3ff2763b356814c7,1 +np.float64,0x3ff55938156ab270,0x3fe97c70514f91bf,1 +np.float64,0x3fff5d8bf81ebb18,0x3ff4b333b230672a,1 +np.float64,0x3ff16a317bc2d463,0x3fdab84e7faa468f,1 +np.float64,0x3ff7e64f8dafcc9f,0x3fee9e0bd57e9566,1 +np.float64,0x7fef4dc065be9b80,0x408633a181e25abb,1 +np.float64,0x3ff64a24a62c9449,0x3feb849ced76437e,1 +np.float64,0x7fc3cb85ef27970b,0x408624dfc39c8f74,1 +np.float64,0x7fec2162a77842c4,0x408632c69b0d43b6,1 +np.float64,0x7feccee6dc399dcd,0x408632f75de98c46,1 +np.float64,0x7faff4f5f43fe9eb,0x40861d9d89be14c9,1 +np.float64,0x7fee82df60fd05be,0x4086336cfdeb7317,1 +np.float64,0x3ffe54588d9ca8b1,0x3ff41247eb2f75ca,1 +np.float64,0x3ffe5615b55cac2c,0x3ff4135c4eb11620,1 +np.float64,0x3ffdaf9a6a1b5f35,0x3ff3aa70e50d1692,1 +np.float64,0x3ff69c045f4d3809,0x3fec2b00734e2cde,1 +np.float64,0x7fd049239aa09246,0x408628dbad6dd995,1 +np.float64,0x3ff2acbe8465597d,0x3fe24138652195e1,1 +np.float64,0x3ffb288302365106,0x3ff1f0f86ca7e5d1,1 +np.float64,0x3fff6fe8d87edfd2,0x3ff4be136acf53c5,1 +np.float64,0x3ffc87c8bfb90f92,0x3ff2e7bbd65867cb,1 +np.float64,0x3ff173327ca2e665,0x3fdb0b945abb00d7,1 +np.float64,0x3ff9a5cf7a134b9f,0x3ff0ca2450f07c78,1 +np.float64,0x7faf782b043ef055,0x40861d7e0e9b35ef,1 +np.float64,0x3ffa0874975410e9,0x3ff117ee3dc8f5ba,1 +np.float64,0x7fc710fc7f2e21f8,0x40862618fed167fb,1 +np.float64,0x7feb73f4c876e7e9,0x40863294ae3ac1eb,1 +np.float64,0x8000000000000000,0xfff8000000000000,1 +np.float64,0x7fb46615c028cc2b,0x40861f91bade4dad,1 +np.float64,0x7fc26b064624d60c,0x4086244c1b76c938,1 +np.float64,0x3ff06ab9fa40d574,0x3fcd282fd971d1b4,1 +np.float64,0x3ff61da7410c3b4e,0x3feb28201031af02,1 +np.float64,0x3ffec7ba1b9d8f74,0x3ff459342511f952,1 +np.float64,0x7ff4000000000000,0x7ffc000000000000,1 +np.float64,0x7fe5d570422baae0,0x408630bfa75008c9,1 +np.float64,0x3ffa895832f512b0,0x3ff17ad41555dccb,1 +np.float64,0x7fd343ac21a68757,0x40862a33ad59947a,1 +np.float64,0x3ffc1eeb37383dd6,0x3ff29ff29e55a006,1 +np.float64,0x7fee3c5c507c78b8,0x4086335a6b768090,1 +np.float64,0x7fe96d774a32daee,0x408631f7b9937e36,1 +np.float64,0x7fb878362430f06b,0x40862106603497b6,1 +np.float64,0x7fec0a79c03814f3,0x408632c01479905e,1 +np.float64,0x3ffa2f143c145e28,0x3ff135e25d902e1a,1 +np.float64,0x3ff14ccff80299a0,0x3fd9a0cd3397b14c,1 +np.float64,0x3ff97980dcb2f302,0x3ff0a6942a8133ab,1 +np.float64,0x3ff872e2d1f0e5c6,0x3fef96526eb2f756,1 +np.float64,0x7fdf1c9b46be3936,0x40862e0957fee329,1 +np.float64,0x7fcab6525d356ca4,0x408627458791f029,1 +np.float64,0x3ff964e74a52c9ce,0x3ff095e8845d523c,1 +np.float64,0x3ffb3aa23c967544,0x3ff1fe282d897c13,1 +np.float64,0x7fdd8a36afbb146c,0x40862d9f2b05f61b,1 +np.float64,0x3ffea39f42fd473e,0x3ff4432a48176399,1 +np.float64,0x7fea614f68b4c29e,0x408632430a750385,1 +np.float64,0x7feeafb86abd5f70,0x40863378b79f70cf,1 +np.float64,0x3ff80bc94eb01792,0x3feee138e9d626bd,1 +np.float64,0x7fcaca74743594e8,0x4086274b8ce4d1e1,1 +np.float64,0x3ff8b14815316290,0x3ff000b3526c8321,1 +np.float64,0x7fc698eb5f2d31d6,0x408625eeec86cd2b,1 +np.float64,0x7fe15429a3e2a852,0x40862ee6621205b8,1 +np.float64,0x7fee37f81b7c6fef,0x4086335941ed80dd,1 +np.float64,0x3ff8097ab3f012f6,0x3feedd1bafc3196e,1 +np.float64,0x7fe7c889ceaf9113,0x4086316ed13f2394,1 +np.float64,0x7fceca94513d9528,0x4086286893a06824,1 +np.float64,0x3ff593a103cb2742,0x3fe9ff1af4f63cc9,1 +np.float64,0x7fee237d24bc46f9,0x40863353d4142c87,1 +np.float64,0x3ffbf71e4777ee3c,0x3ff2844c0ed9f4d9,1 +np.float64,0x3ff490c65c09218d,0x3fe7a2216d9f69fd,1 +np.float64,0x3fff5ceaf1feb9d6,0x3ff4b2d430a90110,1 +np.float64,0x3ff55baecceab75e,0x3fe98203980666c4,1 +np.float64,0x3ff511bc306a2378,0x3fe8d81ce7be7b50,1 +np.float64,0x3ff38f83dcc71f08,0x3fe4f89f130d5f87,1 +np.float64,0x3ff73a3676ee746d,0x3fed5f98a65107ee,1 +np.float64,0x7fc27e50c824fca1,0x408624547828bc49,1 +np.float64,0xfff0000000000000,0xfff8000000000000,1 +np.float64,0x3fff38959ebe712b,0x3ff49d362c7ba16a,1 +np.float64,0x3ffad6d23a75ada4,0x3ff1b4dda6394ed0,1 +np.float64,0x3ffe77c6c2dcef8e,0x3ff4283698835ecb,1 +np.float64,0x3fff5feb413ebfd6,0x3ff4b49bcbdb3aa9,1 +np.float64,0x3ff0d30aa161a615,0x3fd4751bcdd7d727,1 +np.float64,0x3ff51e07e00a3c10,0x3fe8f4bd1408d694,1 +np.float64,0x8010000000000000,0xfff8000000000000,1 +np.float64,0x7fd231d2fe2463a5,0x408629beaceafcba,1 +np.float64,0x3fff6b4aee1ed696,0x3ff4bb58544bf8eb,1 +np.float64,0x3ff91fcd2f323f9a,0x3ff05d56e33db6b3,1 +np.float64,0x3ff3b889ab477113,0x3fe56bdeab74cce5,1 +np.float64,0x3ff99bfe30d337fc,0x3ff0c24bbf265561,1 +np.float64,0x3ffbe9e5eaf7d3cc,0x3ff27b0fe60f827a,1 +np.float64,0x7fd65678e92cacf1,0x40862b62d44fe8b6,1 +np.float64,0x7fd9cc477233988e,0x40862c89c638ee48,1 +np.float64,0x3ffc123c72d82479,0x3ff297294d05cbc0,1 +np.float64,0x3ff58abad58b1576,0x3fe9eb65da2a867a,1 +np.float64,0x7fe534887b2a6910,0x40863083d4ec2877,1 +np.float64,0x7fe1d3dcb123a7b8,0x40862f208116c55e,1 +np.float64,0x7fd4d570dba9aae1,0x40862ad412c413cd,1 +np.float64,0x3fffce7d3fdf9cfa,0x3ff4f58f02451928,1 +np.float64,0x3ffa76901c74ed20,0x3ff16c9a5851539c,1 +np.float64,0x7fdd88ffa23b11fe,0x40862d9ed6c6f426,1 +np.float64,0x3ff09fdbb9e13fb7,0x3fd1d2ae4fcbf713,1 +np.float64,0x7fe64567772c8ace,0x408630e845dbc290,1 +np.float64,0x7fb1a849ba235092,0x40861e6a291535b2,1 +np.float64,0x3ffaddb105f5bb62,0x3ff1b9f68f4c419b,1 +np.float64,0x7fd2fc3d5025f87a,0x40862a15cbc1df75,1 +np.float64,0x7fdea7d872bd4fb0,0x40862deb190b2c50,1 +np.float64,0x7fd50ea97eaa1d52,0x40862ae9edc4c812,1 +np.float64,0x3fff659c245ecb38,0x3ff4b7fb18b31aea,1 +np.float64,0x3ff3f1fbb7c7e3f7,0x3fe608bd9d76268c,1 +np.float64,0x3ff76869d9aed0d4,0x3fedb6c23d3a317b,1 +np.float64,0x7fedd4efe93ba9df,0x4086333edeecaa43,1 +np.float64,0x3ff9a5bd4eb34b7a,0x3ff0ca15d02bc960,1 +np.float64,0x3ffd9359cc5b26b4,0x3ff39850cb1a6b6c,1 +np.float64,0x7fe912d0427225a0,0x408631db00e46272,1 +np.float64,0x3ffb3802fe567006,0x3ff1fc4093646465,1 +np.float64,0x3ff02cc38a205987,0x3fc2e8182802a07b,1 +np.float64,0x3ffda953dd1b52a8,0x3ff3a66c504cf207,1 +np.float64,0x7fe0a487e4a1490f,0x40862e93a6f20152,1 +np.float64,0x7fed265ed1fa4cbd,0x4086330f838ae431,1 +np.float64,0x7fd0000114200001,0x408628b76ec48b5c,1 +np.float64,0x3ff2c262786584c5,0x3fe288860d354b0f,1 +np.float64,0x8000000000000001,0xfff8000000000000,1 +np.float64,0x3ffdae9f075b5d3e,0x3ff3a9d006ae55c1,1 +np.float64,0x3ffb69c72156d38e,0x3ff22037cbb85e5b,1 +np.float64,0x7feeae255f7d5c4a,0x408633784e89bc05,1 +np.float64,0x7feb13927c362724,0x408632786630c55d,1 +np.float64,0x7fef49e072be93c0,0x408633a08451d476,1 +np.float64,0x3fff23d6337e47ac,0x3ff490ceb6e634ae,1 +np.float64,0x3ffba82cf8f7505a,0x3ff24cc51c73234d,1 +np.float64,0x7fe948719ef290e2,0x408631ec0b36476e,1 +np.float64,0x3ff41926c5e8324e,0x3fe670e14bbda8cd,1 +np.float64,0x3ff91f09c1523e14,0x3ff05cb5731878da,1 +np.float64,0x3ff6ae6afccd5cd6,0x3fec4fbeca764086,1 +np.float64,0x3ff927f7e0f24ff0,0x3ff06413eeb8eb1e,1 +np.float64,0x3ff19dd2b9e33ba5,0x3fdc882f97994600,1 +np.float64,0x7fe8e502c5b1ca05,0x408631cc56526fff,1 +np.float64,0x7feb49f70fb693ed,0x4086328868486fcd,1 +np.float64,0x3ffd942d535b285a,0x3ff398d8d89f52ca,1 +np.float64,0x7fc3b9c5c627738b,0x408624d893e692ca,1 +np.float64,0x7fea0780ff340f01,0x408632279fa46704,1 +np.float64,0x7fe4c90066a99200,0x4086305adb47a598,1 +np.float64,0x7fdb209113364121,0x40862cf0ab64fd7d,1 +np.float64,0x3ff38617e5470c30,0x3fe4ddc0413b524f,1 +np.float64,0x7fea1b5b803436b6,0x4086322db767f091,1 +np.float64,0x7fe2004898e40090,0x40862f3457795dc5,1 +np.float64,0x3ff3c4360ac7886c,0x3fe58c29843a4c75,1 +np.float64,0x3ff504bc168a0978,0x3fe8b9ada7f698e6,1 +np.float64,0x3ffd3e936fda7d27,0x3ff3615912c5b4ac,1 +np.float64,0x3ffbdc52fb97b8a6,0x3ff2718dae5f1f2b,1 +np.float64,0x3fffef6d84ffdedb,0x3ff508adbc8556cf,1 +np.float64,0x3ff23b65272476ca,0x3fe0b646ed2579eb,1 +np.float64,0x7fe4633068a8c660,0x408630334a4b7ff7,1 +np.float64,0x3ff769b754aed36f,0x3fedb932af0223f9,1 +np.float64,0x7fe7482d92ee905a,0x408631432de1b057,1 +np.float64,0x3ff5dd682aabbad0,0x3fea9fd5e506a86d,1 +np.float64,0x7fd68399a2ad0732,0x40862b72ed89805d,1 +np.float64,0x3ffad7acc3d5af5a,0x3ff1b57fe632c948,1 +np.float64,0x3ffc68e43698d1c8,0x3ff2d2be6f758761,1 +np.float64,0x3ff4e517fbc9ca30,0x3fe86eddf5e63a58,1 +np.float64,0x3ff34c63c56698c8,0x3fe435b74ccd6a13,1 +np.float64,0x7fea9456c17528ad,0x4086325275237015,1 +np.float64,0x7fee6573f2fccae7,0x4086336543760346,1 +np.float64,0x7fd5496fb9aa92de,0x40862b0023235667,1 +np.float64,0x7ff0000000000000,0x7ff0000000000000,1 +np.float64,0x3ffb70e31256e1c6,0x3ff22552f54b13e0,1 +np.float64,0x3ff66a33988cd467,0x3febc656da46a1ca,1 +np.float64,0x3fff0af2eb1e15e6,0x3ff481dec325f5c8,1 +np.float64,0x3ff6a0233d0d4046,0x3fec33400958eda1,1 +np.float64,0x7fdb11e2d5b623c5,0x40862cec55e405f9,1 +np.float64,0x3ffb8a015ad71402,0x3ff2374d7b563a72,1 +np.float64,0x3ff1807d8ce300fb,0x3fdb849e4bce8335,1 +np.float64,0x3ffefd535e3dfaa6,0x3ff479aaac6ffe79,1 +np.float64,0x3ff701e23a6e03c4,0x3fecf39072d96fc7,1 +np.float64,0x3ff4ac809f895901,0x3fe7e6598f2335a5,1 +np.float64,0x3ff0309f26a0613e,0x3fc3b3f4b2783690,1 +np.float64,0x3ff241dd0ce483ba,0x3fe0cde2cb639144,1 +np.float64,0x3ffabce63fb579cc,0x3ff1a18fe2a2da59,1 +np.float64,0x3ffd84b967db0973,0x3ff38ee4f240645d,1 +np.float64,0x7fc3f88b9a27f116,0x408624f1e10cdf3f,1 +np.float64,0x7fe1d5fd5923abfa,0x40862f2175714a3a,1 +np.float64,0x7fe487b145690f62,0x4086304190700183,1 +np.float64,0x7fe7997feaef32ff,0x4086315eeefdddd2,1 +np.float64,0x3ff8f853b671f0a8,0x3ff03c907353a8da,1 +np.float64,0x7fca4c23b5349846,0x408627257ace5778,1 +np.float64,0x7fe0c9bf3a21937d,0x40862ea576c3ea43,1 +np.float64,0x7fc442b389288566,0x4086250f5f126ec9,1 +np.float64,0x7fc6d382ed2da705,0x40862603900431b0,1 +np.float64,0x7fe40b069068160c,0x4086301066468124,1 +np.float64,0x3ff7f62a146fec54,0x3feeba8dfc4363fe,1 +np.float64,0x3ff721e8e94e43d2,0x3fed313a6755d34f,1 +np.float64,0x7fe579feaf2af3fc,0x4086309ddefb6112,1 +np.float64,0x3ffe2c6bde5c58d8,0x3ff3f9665dc9a16e,1 +np.float64,0x7fcf9998ed3f3331,0x4086289dab274788,1 +np.float64,0x7fdb03af2236075d,0x40862ce82252e490,1 +np.float64,0x7fe72799392e4f31,0x40863137f428ee71,1 +np.float64,0x7f9f2190603e4320,0x408617dc5b3b3c3c,1 +np.float64,0x3ff69c56d52d38ae,0x3fec2ba59fe938b2,1 +np.float64,0x7fdcde27bf39bc4e,0x40862d70086cd06d,1 +np.float64,0x3ff654d6b8eca9ae,0x3feb9aa0107609a6,1 +np.float64,0x7fdf69d967bed3b2,0x40862e1d1c2b94c2,1 +np.float64,0xffefffffffffffff,0xfff8000000000000,1 +np.float64,0x7fedfd073f3bfa0d,0x40863349980c2c8b,1 +np.float64,0x7f7c1856803830ac,0x40860bf312b458c7,1 +np.float64,0x7fe9553f1bb2aa7d,0x408631f0173eadd5,1 +np.float64,0x3ff6e92efc2dd25e,0x3fecc38f98e7e1a7,1 +np.float64,0x7fe9719ac532e335,0x408631f906cd79c3,1 +np.float64,0x3ff60e56ae4c1cad,0x3feb07ef8637ec7e,1 +np.float64,0x3ff0d0803501a100,0x3fd455c0af195a9c,1 +np.float64,0x7fe75248a3eea490,0x40863146a614aec1,1 +np.float64,0x7fdff61ead3fec3c,0x40862e408643d7aa,1 +np.float64,0x7fed4ac7a4fa958e,0x408633197b5cf6ea,1 +np.float64,0x7fe58d44562b1a88,0x408630a5098d1bbc,1 +np.float64,0x7fd89dcdb1b13b9a,0x40862c29c2979288,1 +np.float64,0x3ff205deda240bbe,0x3fdfda67c84fd3a8,1 +np.float64,0x7fdf84c15abf0982,0x40862e23f361923d,1 +np.float64,0x3ffe012b3afc0256,0x3ff3de3dfa5f47ce,1 +np.float64,0x3ffe2f3512dc5e6a,0x3ff3fb245206398e,1 +np.float64,0x7fed6174c2bac2e9,0x4086331faa699617,1 +np.float64,0x3ff1f30f8783e61f,0x3fdf47e06f2c40d1,1 +np.float64,0x3ff590da9eab21b5,0x3fe9f8f7b4baf3c2,1 +np.float64,0x3ffb3ca1eb967944,0x3ff1ff9baf66d704,1 +np.float64,0x7fe50ba9a5aa1752,0x408630745ab7fd3c,1 +np.float64,0x3ff43743a4a86e87,0x3fe6bf7ae80b1dda,1 +np.float64,0x3ff47e1a24e8fc34,0x3fe773acca44c7d6,1 +np.float64,0x3ff589ede9eb13dc,0x3fe9e99f28fab3a4,1 +np.float64,0x3ff72f2cbf8e5e5a,0x3fed4a94e7edbf24,1 +np.float64,0x3ffa4f9bbc549f38,0x3ff14ee60aea45d3,1 +np.float64,0x3ff975dae732ebb6,0x3ff0a3a1fbd7284a,1 +np.float64,0x7fbcf14ee039e29d,0x4086225e33f3793e,1 +np.float64,0x3ff10e027f621c05,0x3fd71cce2452b4e0,1 +np.float64,0x3ff33ea193067d43,0x3fe40cbac4daaddc,1 +np.float64,0x7fbef8f2263df1e3,0x408622e905c8e1b4,1 +np.float64,0x3fff7f5bfe3efeb8,0x3ff4c732e83df253,1 +np.float64,0x3ff5700a6b4ae015,0x3fe9afdd7b8b82b0,1 +np.float64,0x3ffd5099da5aa134,0x3ff36d1bf26e55bf,1 +np.float64,0x3ffed8e0f89db1c2,0x3ff4639ff065107a,1 +np.float64,0x3fff9d0c463f3a18,0x3ff4d8a9f297cf52,1 +np.float64,0x3ff23db5b2e47b6b,0x3fe0bebdd48f961a,1 +np.float64,0x3ff042bff1e08580,0x3fc713bf24cc60ef,1 +np.float64,0x7feb4fe97a769fd2,0x4086328a26675646,1 +np.float64,0x3ffeafbfeedd5f80,0x3ff44a955a553b1c,1 +np.float64,0x3ff83fb524507f6a,0x3fef3d1729ae0976,1 +np.float64,0x3ff1992294433245,0x3fdc5f5ce53dd197,1 +np.float64,0x7fe89fe629b13fcb,0x408631b601a83867,1 +np.float64,0x7fe53e4d74aa7c9a,0x40863087839b52f1,1 +np.float64,0x3ff113713e6226e2,0x3fd757631ca7cd09,1 +np.float64,0x7fd4a0b7a629416e,0x40862abfba27a09b,1 +np.float64,0x3ff184c6e2a3098e,0x3fdbab2e3966ae57,1 +np.float64,0x3ffafbbf77f5f77f,0x3ff1d02bb331d9f9,1 +np.float64,0x3ffc6099a358c134,0x3ff2cd16941613d1,1 +np.float64,0x3ffb7c441ef6f888,0x3ff22d7b12e31432,1 +np.float64,0x3ff625ba5eec4b75,0x3feb39060e55fb79,1 +np.float64,0x7fde879acbbd0f35,0x40862de2aab4d72d,1 +np.float64,0x7f930aed982615da,0x408613edb6df8528,1 +np.float64,0x7fa4b82dac29705a,0x40861a261c0a9aae,1 +np.float64,0x7fced5c16b3dab82,0x4086286b7a73e611,1 +np.float64,0x7fe133749d2266e8,0x40862ed73a41b112,1 +np.float64,0x3ff2d8146ea5b029,0x3fe2ced55dbf997d,1 +np.float64,0x3ff60dac77ac1b59,0x3feb0688b0e54c7b,1 +np.float64,0x3ff275d9b024ebb3,0x3fe186b87258b834,1 +np.float64,0x3ff533e6500a67cd,0x3fe92746c8b50ddd,1 +np.float64,0x7fe370896666e112,0x40862fd1ca144736,1 +np.float64,0x7fee7695357ced29,0x40863369c459420e,1 +np.float64,0x7fd1e0528023c0a4,0x4086299a85caffd0,1 +np.float64,0x7fd05c7b24a0b8f5,0x408628e52824386f,1 +np.float64,0x3ff11dcc3b023b98,0x3fd7c56c8cef1be1,1 +np.float64,0x7fc9d9fae933b3f5,0x408627027404bc5f,1 +np.float64,0x7fe2359981246b32,0x40862f4be675e90d,1 +np.float64,0x3ffb10a949962152,0x3ff1df88f83b8cde,1 +np.float64,0x3ffa65b53654cb6a,0x3ff15fc8956ccc87,1 +np.float64,0x3ff0000000000000,0x0,1 +np.float64,0x7fad97ef703b2fde,0x40861d002f3d02da,1 +np.float64,0x3ff57aaf93aaf55f,0x3fe9c7b01f194edb,1 +np.float64,0x7fe9ecd73f33d9ad,0x4086321f69917205,1 +np.float64,0x3ff0dcb79c61b96f,0x3fd4eac86a7a9c38,1 +np.float64,0x7fee9c12ffbd3825,0x4086337396cd706d,1 +np.float64,0x3ff52c40af4a5881,0x3fe915a8a7de8f00,1 +np.float64,0x3ffbcfff59779ffe,0x3ff268e523fe8dda,1 +np.float64,0x7fe014cb4b602996,0x40862e4d5de42a03,1 +np.float64,0x7fae2370e83c46e1,0x40861d258dd5b3ee,1 +np.float64,0x7fe9e33602f3c66b,0x4086321c704ac2bb,1 +np.float64,0x3ff648acd74c915a,0x3feb8195ca53bcaa,1 +np.float64,0x7fe385f507670be9,0x40862fda95ebaf44,1 +np.float64,0x3ffb0e382c361c70,0x3ff1ddbea963e0a7,1 +np.float64,0x3ff47d6b6ae8fad7,0x3fe771f80ad37cd2,1 +np.float64,0x3ffca7d538f94faa,0x3ff2fd5f62e851ac,1 +np.float64,0x3ff83e949c107d29,0x3fef3b1c5bbac99b,1 +np.float64,0x7fc6fb933a2df725,0x408626118e51a286,1 +np.float64,0x7fe43a1454e87428,0x4086302318512d9b,1 +np.float64,0x7fe51fe32aaa3fc5,0x4086307c07271348,1 +np.float64,0x3ff35e563966bcac,0x3fe46aa2856ef85f,1 +np.float64,0x3ff84dd4e4909baa,0x3fef55d86d1d5c2e,1 +np.float64,0x7febe3d84077c7b0,0x408632b507686f03,1 +np.float64,0x3ff6aca2e32d5946,0x3fec4c32a2368ee3,1 +np.float64,0x7fe7070e3e6e0e1b,0x4086312caddb0454,1 +np.float64,0x7fd3657f2aa6cafd,0x40862a41acf47e70,1 +np.float64,0x3ff61534456c2a68,0x3feb1663900af13b,1 +np.float64,0x3ff8bc556eb178ab,0x3ff00a16b5403f88,1 +np.float64,0x3ffa7782e3f4ef06,0x3ff16d529c94a438,1 +np.float64,0x7fc15785ed22af0b,0x408623d0cd94fb86,1 +np.float64,0x3ff2e3eeb6e5c7dd,0x3fe2f4c4876d3edf,1 +np.float64,0x3ff2e4e17e85c9c3,0x3fe2f7c9e437b22e,1 +np.float64,0x7feb3aaf67f6755e,0x40863283ec4a0d76,1 +np.float64,0x7fe89efcf7313df9,0x408631b5b5e41263,1 +np.float64,0x3ffcc6fad4f98df6,0x3ff31245778dff6d,1 +np.float64,0x3ff356114466ac22,0x3fe45253d040a024,1 +np.float64,0x3ff81c70d2d038e2,0x3feefed71ebac776,1 +np.float64,0x7fdb75c96136eb92,0x40862d09a603f03e,1 +np.float64,0x3ff340f91b8681f2,0x3fe413bb6e6d4a54,1 +np.float64,0x3fff906079df20c1,0x3ff4d13869d16bc7,1 +np.float64,0x3ff226a42d644d48,0x3fe0698d316f1ac0,1 +np.float64,0x3ff948abc3b29158,0x3ff07eeb0b3c81ba,1 +np.float64,0x3ffc25df1fb84bbe,0x3ff2a4c13ad4edad,1 +np.float64,0x7fe07ea3b960fd46,0x40862e815b4cf43d,1 +np.float64,0x3ff497d3dae92fa8,0x3fe7b3917bf10311,1 +np.float64,0x7fea561db1f4ac3a,0x4086323fa4aef2a9,1 +np.float64,0x7fd1b49051236920,0x40862986d8759ce5,1 +np.float64,0x7f7ba3bd6037477a,0x40860bd19997fd90,1 +np.float64,0x3ff01126dd00224e,0x3fb76b67938dfb11,1 +np.float64,0x3ff29e1105053c22,0x3fe2102a4c5fa102,1 +np.float64,0x3ff9de2a6553bc55,0x3ff0f6cfe4dea30e,1 +np.float64,0x7fc558e7d42ab1cf,0x4086257a608fc055,1 +np.float64,0x3ff79830a74f3061,0x3fee0f93db153d65,1 +np.float64,0x7fe2661648e4cc2c,0x40862f6117a71eb2,1 +np.float64,0x3ff140cf4262819e,0x3fd92aefedae1ab4,1 +np.float64,0x3ff5f36251abe6c5,0x3feaced481ceaee3,1 +np.float64,0x7fc80911d5301223,0x4086266d4757f768,1 +np.float64,0x3ff9079a6c320f35,0x3ff04949d21ebe1e,1 +np.float64,0x3ffde8d2e09bd1a6,0x3ff3cedca8a5db5d,1 +np.float64,0x3ffadd1de375ba3c,0x3ff1b989790e8d93,1 +np.float64,0x3ffdbc40ee1b7882,0x3ff3b286b1c7da57,1 +np.float64,0x3ff8ff514771fea2,0x3ff04264add00971,1 +np.float64,0x7fefd7d0e63fafa1,0x408633c47d9f7ae4,1 +np.float64,0x3ffc47798c588ef3,0x3ff2bbe441fa783a,1 +np.float64,0x7fe6ebc55b6dd78a,0x408631232d9abf31,1 +np.float64,0xbff0000000000000,0xfff8000000000000,1 +np.float64,0x7fd378e4afa6f1c8,0x40862a49a8f98cb4,1 +np.float64,0x0,0xfff8000000000000,1 +np.float64,0x3ffe88ed7efd11db,0x3ff432c7ecb95492,1 +np.float64,0x3ff4f5509289eaa1,0x3fe8955a11656323,1 +np.float64,0x7fda255b41344ab6,0x40862ca53676a23e,1 +np.float64,0x3ffebe85b9bd7d0c,0x3ff453992cd55dea,1 +np.float64,0x3ff5d6180b8bac30,0x3fea901c2160c3bc,1 +np.float64,0x3ffcdfb8fcf9bf72,0x3ff322c83b3bc735,1 +np.float64,0x3ff3c91c26679238,0x3fe599a652b7cf59,1 +np.float64,0x7fc389f7a62713ee,0x408624c518edef93,1 +np.float64,0x3ffe1245ba1c248c,0x3ff3e901b2c4a47a,1 +np.float64,0x7fe1e76e95e3cedc,0x40862f29446f9eff,1 +np.float64,0x3ff02ae4f92055ca,0x3fc28221abd63daa,1 +np.float64,0x7fbf648a143ec913,0x40862304a0619d03,1 +np.float64,0x3ff2be7ef8657cfe,0x3fe27bcc6c97522e,1 +np.float64,0x3ffa7595e514eb2c,0x3ff16bdc64249ad1,1 +np.float64,0x3ff4ee130049dc26,0x3fe884354cbad8c9,1 +np.float64,0x3ff19211fc232424,0x3fdc2160bf3eae40,1 +np.float64,0x3ffec215aedd842c,0x3ff455c4cdd50c32,1 +np.float64,0x7fe7cb50ffaf96a1,0x4086316fc06a53af,1 +np.float64,0x3fffa679161f4cf2,0x3ff4de30ba7ac5b8,1 +np.float64,0x7fdcb459763968b2,0x40862d646a21011d,1 +np.float64,0x3ff9f338d6d3e672,0x3ff1075835d8f64e,1 +np.float64,0x3ff8de3319d1bc66,0x3ff026ae858c0458,1 +np.float64,0x7fee0199d33c0333,0x4086334ad03ac683,1 +np.float64,0x3ffc06076c380c0f,0x3ff28eaec3814faa,1 +np.float64,0x3ffe9e2e235d3c5c,0x3ff43fd4d2191a7f,1 +np.float64,0x3ffd93b06adb2761,0x3ff398888239cde8,1 +np.float64,0x7fefe4b71cffc96d,0x408633c7ba971b92,1 +np.float64,0x7fb2940352252806,0x40861ed244bcfed6,1 +np.float64,0x3ffba4647e3748c9,0x3ff24a15f02e11b9,1 +np.float64,0x7fd2d9543725b2a7,0x40862a0708446596,1 +np.float64,0x7fc04997f120932f,0x4086235055d35251,1 +np.float64,0x3ff6d14313ada286,0x3fec94b177f5d3fc,1 +np.float64,0x3ff279fc8684f3f9,0x3fe19511c3e5b9a8,1 +np.float64,0x3ff42f4609085e8c,0x3fe6aabe526ce2bc,1 +np.float64,0x7fc1c6c62a238d8b,0x408624037de7f6ec,1 +np.float64,0x7fe31ff4b8e63fe8,0x40862fb05b40fd16,1 +np.float64,0x7fd2a8825fa55104,0x408629f234d460d6,1 +np.float64,0x3ffe8c1d725d183b,0x3ff434bdc444143f,1 +np.float64,0x3ff0e9dc3e21d3b8,0x3fd58676e2c13fc9,1 +np.float64,0x3ffed03172fda063,0x3ff45e59f7aa6c8b,1 +np.float64,0x7fd74621962e8c42,0x40862bb6e90d66f8,1 +np.float64,0x3ff1faa29663f545,0x3fdf833a2c5efde1,1 +np.float64,0x7fda02834db40506,0x40862c9a860d6747,1 +np.float64,0x7f709b2fc021365f,0x408607be328eb3eb,1 +np.float64,0x7fec0d58aa381ab0,0x408632c0e61a1af6,1 +np.float64,0x3ff524d1720a49a3,0x3fe90479968d40fd,1 +np.float64,0x7fd64cb3b32c9966,0x40862b5f53c4b0b4,1 +np.float64,0x3ff9593e3ed2b27c,0x3ff08c6eea5f6e8b,1 +np.float64,0x3ff7de8b1f6fbd16,0x3fee9007abcfdf7b,1 +np.float64,0x7fe8d816d6b1b02d,0x408631c82e38a894,1 +np.float64,0x7fd726bbe22e4d77,0x40862bac16ee8d52,1 +np.float64,0x7fa70b07d42e160f,0x40861affcc4265e2,1 +np.float64,0x7fe18b4091e31680,0x40862effa8bce66f,1 +np.float64,0x3ff830253010604a,0x3fef21b2eaa75758,1 +np.float64,0x3fffcade407f95bc,0x3ff4f3734b24c419,1 +np.float64,0x3ff8c17cecb182fa,0x3ff00e75152d7bda,1 +np.float64,0x7fdad9b9d035b373,0x40862cdbabb793ba,1 +np.float64,0x3ff9f9e154f3f3c2,0x3ff10c8dfdbd2510,1 +np.float64,0x3ff465e162e8cbc3,0x3fe736c751c75b73,1 +np.float64,0x3ff9b4cd8493699b,0x3ff0d616235544b8,1 +np.float64,0x7fe557c4a56aaf88,0x4086309114ed12d9,1 +np.float64,0x7fe5999133eb3321,0x408630a9991a9b54,1 +np.float64,0x7fe7c9009e2f9200,0x4086316ef9359a47,1 +np.float64,0x3ff8545cabd0a8ba,0x3fef6141f1030c36,1 +np.float64,0x3ffa1f1712943e2e,0x3ff129849d492ce3,1 +np.float64,0x7fea803a14750073,0x4086324c652c276c,1 +np.float64,0x3ff5b6f97fcb6df3,0x3fea4cb0b97b18e9,1 +np.float64,0x7fc2efdfc425dfbf,0x40862485036a5c6e,1 +np.float64,0x7fe2c78e5be58f1c,0x40862f8b0a5e7baf,1 +np.float64,0x7fe80d7fff301aff,0x40863185e234060a,1 +np.float64,0x3ffd895d457b12ba,0x3ff391e2cac7a3f8,1 +np.float64,0x3ff44c9764a8992f,0x3fe6f6690396c232,1 +np.float64,0x3ff731688b8e62d1,0x3fed4ed70fac3839,1 +np.float64,0x3ff060200460c040,0x3fcbad4a07d97f0e,1 +np.float64,0x3ffbd2f70a17a5ee,0x3ff26afb46ade929,1 +np.float64,0x7febe9e841f7d3d0,0x408632b6c465ddd9,1 +np.float64,0x3ff2532f8be4a65f,0x3fe10c6cd8d64cf4,1 +np.float64,0x7fefffffffffffff,0x408633ce8fb9f87e,1 +np.float64,0x3ff3a1ae3a47435c,0x3fe52c00210cc459,1 +np.float64,0x7fe9c34ae6b38695,0x408632128d150149,1 +np.float64,0x3fff311029fe6220,0x3ff498b852f30bff,1 +np.float64,0x3ffd4485a1ba890c,0x3ff3653b6fa701cd,1 +np.float64,0x7fd52718b1aa4e30,0x40862af330d9c68c,1 +np.float64,0x3ff10b695a4216d3,0x3fd7009294e367b7,1 +np.float64,0x3ffdf73de59bee7c,0x3ff3d7fa96d2c1ae,1 +np.float64,0x3ff2f1c75965e38f,0x3fe320aaff3db882,1 +np.float64,0x3ff2a56a5a854ad5,0x3fe228cc4ad7e7a5,1 +np.float64,0x7fe60cd1cf6c19a3,0x408630d3d87a04b3,1 +np.float64,0x3ff89fa65c113f4c,0x3fefe3543773180c,1 +np.float64,0x3ffd253130ba4a62,0x3ff350b76ba692a0,1 +np.float64,0x7feaad7051f55ae0,0x40863259ff932d62,1 +np.float64,0x7fd9cc37cf33986f,0x40862c89c15f963b,1 +np.float64,0x3ff8c08de771811c,0x3ff00daa9c17acd7,1 +np.float64,0x7fea58b25d34b164,0x408632406d54cc6f,1 +np.float64,0x7fe5f161fd2be2c3,0x408630c9ddf272a5,1 +np.float64,0x3ff5840dbf8b081c,0x3fe9dc9117b4cbc7,1 +np.float64,0x3ff3fd762307faec,0x3fe6277cd530c640,1 +np.float64,0x3ff9095c98b212b9,0x3ff04abff170ac24,1 +np.float64,0x7feaac66017558cb,0x40863259afb4f8ce,1 +np.float64,0x7fd78f96bcaf1f2c,0x40862bd00175fdf9,1 +np.float64,0x3ffaca27e0959450,0x3ff1ab72b8f8633e,1 +np.float64,0x3ffb7f18cb96fe32,0x3ff22f81bcb8907b,1 +np.float64,0x3ffcce48d1199c92,0x3ff317276f62c0b2,1 +np.float64,0x3ffcb9a7f3797350,0x3ff30958e0d6a34d,1 +np.float64,0x7fda569ef6b4ad3d,0x40862cb43b33275a,1 +np.float64,0x7fde9f0893bd3e10,0x40862de8cc036283,1 +np.float64,0x3ff428be3928517c,0x3fe699bb5ab58904,1 +np.float64,0x7fa4d3344029a668,0x40861a3084989291,1 +np.float64,0x3ff03607bd006c0f,0x3fc4c4840cf35f48,1 +np.float64,0x3ff2b1335c056267,0x3fe25000846b75a2,1 +np.float64,0x7fe0cb8bd8e19717,0x40862ea65237d496,1 +np.float64,0x3fff4b1b7b9e9637,0x3ff4a83fb08e7b24,1 +np.float64,0x7fe7526140aea4c2,0x40863146ae86069c,1 +np.float64,0x7fbfcfb7c23f9f6f,0x4086231fc246ede5,1 diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/data/umath-validation-set-arcsin.csv b/.venv/lib/python3.12/site-packages/numpy/_core/tests/data/umath-validation-set-arcsin.csv new file mode 100644 index 0000000..75d5707 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/tests/data/umath-validation-set-arcsin.csv @@ -0,0 +1,1429 @@ +dtype,input,output,ulperrortol +np.float32,0xbe7d3a7c,0xbe7fe217,4 +np.float32,0x3dc102f0,0x3dc14c60,4 +np.float32,0xbe119c28,0xbe121aef,4 +np.float32,0xbe51cd68,0xbe534c75,4 +np.float32,0x3c04a300,0x3c04a35f,4 +np.float32,0xbf4f0b62,0xbf712a69,4 +np.float32,0x3ef61a5c,0x3f005cf6,4 +np.float32,0xbf13024c,0xbf1c97df,4 +np.float32,0x3e93b580,0x3e95d6b5,4 +np.float32,0x3e44e7b8,0x3e4623a5,4 +np.float32,0xbe35df20,0xbe36d773,4 +np.float32,0x3eecd2c0,0x3ef633cf,4 +np.float32,0x3f2772ba,0x3f36862a,4 +np.float32,0x3e211ea8,0x3e21cac5,4 +np.float32,0x3e3b3d90,0x3e3c4cc6,4 +np.float32,0x3f37c962,0x3f4d018c,4 +np.float32,0x3e92ad88,0x3e94c31a,4 +np.float32,0x3f356ffc,0x3f49a766,4 +np.float32,0x3f487ba2,0x3f665254,4 +np.float32,0x3f061c46,0x3f0d27ae,4 +np.float32,0xbee340a2,0xbeeb7722,4 +np.float32,0xbe85aede,0xbe874026,4 +np.float32,0x3f34cf9a,0x3f48c474,4 +np.float32,0x3e29a690,0x3e2a6fbd,4 +np.float32,0xbeb29428,0xbeb669d1,4 +np.float32,0xbe606d40,0xbe624370,4 +np.float32,0x3dae6860,0x3dae9e85,4 +np.float32,0xbf04872b,0xbf0b4d25,4 +np.float32,0x3f2080e2,0x3f2d7ab0,4 +np.float32,0xbec77dcc,0xbecceb27,4 +np.float32,0x3e0dda10,0x3e0e4f38,4 +np.float32,0xbefaf970,0xbf03262c,4 +np.float32,0x3f576a0c,0x3f7ffee6,4 +np.float32,0x3f222382,0x3f2f95d6,4 +np.float32,0x7fc00000,0x7fc00000,4 +np.float32,0x3e41c468,0x3e42f14e,4 +np.float32,0xbf2f64dd,0xbf4139a8,4 +np.float32,0xbf60ef90,0xbf895956,4 +np.float32,0xbf67c855,0xbf90eff0,4 +np.float32,0xbed35aee,0xbed9df00,4 +np.float32,0xbf2c7d92,0xbf3d448f,4 +np.float32,0x3f7b1604,0x3faff122,4 +np.float32,0xbf7c758b,0xbfb3bf87,4 +np.float32,0x3ecda1c8,0x3ed39acf,4 +np.float32,0x3f3af8ae,0x3f519fcb,4 +np.float32,0xbf16e6a3,0xbf2160fd,4 +np.float32,0x3f0c97d2,0x3f14d668,4 +np.float32,0x3f0a8060,0x3f1257b9,4 +np.float32,0x3f27905a,0x3f36ad57,4 +np.float32,0x3eeaeba4,0x3ef40efe,4 +np.float32,0x3e58dde0,0x3e5a8580,4 +np.float32,0xbf0cabe2,0xbf14ee6b,4 +np.float32,0xbe805ca8,0xbe81bf03,4 +np.float32,0x3f5462ba,0x3f7a7b85,4 +np.float32,0xbee235d0,0xbeea4d8b,4 +np.float32,0xbe880cb0,0xbe89b426,4 +np.float32,0x80000001,0x80000001,4 +np.float32,0x3f208c00,0x3f2d88f6,4 +np.float32,0xbf34f3d2,0xbf48f7a2,4 +np.float32,0x3f629428,0x3f8b1763,4 +np.float32,0xbf52a900,0xbf776b4a,4 +np.float32,0xbd17f8d0,0xbd1801be,4 +np.float32,0xbef7cada,0xbf0153d1,4 +np.float32,0x3f7d3b90,0x3fb63967,4 +np.float32,0xbd6a20b0,0xbd6a4160,4 +np.float32,0x3f740496,0x3fa1beb7,4 +np.float32,0x3ed8762c,0x3edf7dd9,4 +np.float32,0x3f53b066,0x3f793d42,4 +np.float32,0xbe9de718,0xbea084f9,4 +np.float32,0x3ea3ae90,0x3ea69b4b,4 +np.float32,0x3f1b8f00,0x3f273183,4 +np.float32,0x3f5cd6ac,0x3f852ead,4 +np.float32,0x3f29d510,0x3f39b169,4 +np.float32,0x3ee2a934,0x3eeace33,4 +np.float32,0x3eecac94,0x3ef608c2,4 +np.float32,0xbea915e2,0xbeac5203,4 +np.float32,0xbd316e90,0xbd317cc8,4 +np.float32,0xbf70b495,0xbf9c97b6,4 +np.float32,0xbe80d976,0xbe823ff3,4 +np.float32,0x3e9205f8,0x3e94143f,4 +np.float32,0x3f49247e,0x3f676296,4 +np.float32,0x3d9030c0,0x3d904f50,4 +np.float32,0x3e4df058,0x3e4f5a5c,4 +np.float32,0xbe1fd360,0xbe207b58,4 +np.float32,0xbf69dc7c,0xbf937006,4 +np.float32,0x3f36babe,0x3f4b7df3,4 +np.float32,0xbe8c9758,0xbe8e6bb7,4 +np.float32,0xbf4de72d,0xbf6f3c20,4 +np.float32,0xbecdad68,0xbed3a780,4 +np.float32,0xbf73e2cf,0xbfa18702,4 +np.float32,0xbece16a8,0xbed41a75,4 +np.float32,0x3f618a96,0x3f89fc6d,4 +np.float32,0xbf325853,0xbf454ea9,4 +np.float32,0x3f138568,0x3f1d3828,4 +np.float32,0xbf56a6e9,0xbf7e9748,4 +np.float32,0x3ef5d594,0x3f0035bf,4 +np.float32,0xbf408220,0xbf59dfaa,4 +np.float32,0xbed120e6,0xbed76dd5,4 +np.float32,0xbf6dbda5,0xbf986cee,4 +np.float32,0x3f744a38,0x3fa23282,4 +np.float32,0xbe4b56d8,0xbe4cb329,4 +np.float32,0x3f54c5f2,0x3f7b2d97,4 +np.float32,0xbd8b1c90,0xbd8b3801,4 +np.float32,0x3ee19a48,0x3ee9a03b,4 +np.float32,0x3f48460e,0x3f65fc3d,4 +np.float32,0x3eb541c0,0x3eb9461e,4 +np.float32,0xbea7d098,0xbeaaf98c,4 +np.float32,0xbda99e40,0xbda9d00c,4 +np.float32,0xbefb2ca6,0xbf03438d,4 +np.float32,0x3f4256be,0x3f5cab0b,4 +np.float32,0xbdbdb198,0xbdbdf74d,4 +np.float32,0xbf325b5f,0xbf4552e9,4 +np.float32,0xbf704d1a,0xbf9c00b4,4 +np.float32,0x3ebb1d04,0x3ebf8cf8,4 +np.float32,0xbed03566,0xbed66bf1,4 +np.float32,0x3e8fcee8,0x3e91c501,4 +np.float32,0xbf2e1eec,0xbf3f7b9d,4 +np.float32,0x3f33c4d2,0x3f474cac,4 +np.float32,0x3f598ef4,0x3f8201b4,4 +np.float32,0x3e09bb30,0x3e0a2660,4 +np.float32,0x3ed4e228,0x3edb8cdb,4 +np.float32,0x3eb7a190,0x3ebbd0a1,4 +np.float32,0xbd9ae630,0xbd9b0c18,4 +np.float32,0x3f43020e,0x3f5db2d7,4 +np.float32,0xbec06ac0,0xbec542d4,4 +np.float32,0x3f3dfde0,0x3f561674,4 +np.float32,0xbf64084a,0xbf8cabe6,4 +np.float32,0xbd6f95b0,0xbd6fb8b7,4 +np.float32,0x3f268640,0x3f354e2d,4 +np.float32,0xbe72b4bc,0xbe7509b2,4 +np.float32,0xbf3414fa,0xbf47bd5a,4 +np.float32,0xbf375218,0xbf4c566b,4 +np.float32,0x3f203c1a,0x3f2d2273,4 +np.float32,0xbd503530,0xbd504c2b,4 +np.float32,0xbc45e540,0xbc45e67b,4 +np.float32,0xbf175c4f,0xbf21f2c6,4 +np.float32,0x3f7432a6,0x3fa20b2b,4 +np.float32,0xbf43367f,0xbf5e03d8,4 +np.float32,0x3eb3997c,0x3eb780c4,4 +np.float32,0x3e5574c8,0x3e570878,4 +np.float32,0xbf04b57b,0xbf0b8349,4 +np.float32,0x3f6216d8,0x3f8a914b,4 +np.float32,0xbf57a237,0xbf80337d,4 +np.float32,0xbee1403a,0xbee93bee,4 +np.float32,0xbeaf9b9a,0xbeb33f3b,4 +np.float32,0xbf109374,0xbf19a223,4 +np.float32,0xbeae6824,0xbeb1f810,4 +np.float32,0xbcff9320,0xbcff9dbe,4 +np.float32,0x3ed205c0,0x3ed868a9,4 +np.float32,0x3d897c30,0x3d8996ad,4 +np.float32,0xbf2899d2,0xbf380d4c,4 +np.float32,0xbf54cb0b,0xbf7b36c2,4 +np.float32,0x3ea8e8ec,0x3eac2262,4 +np.float32,0x3ef5e1a0,0x3f003c9d,4 +np.float32,0xbf00c81e,0xbf06f1e2,4 +np.float32,0xbf346775,0xbf483181,4 +np.float32,0x3f7a4fe4,0x3fae077c,4 +np.float32,0x3f00776e,0x3f06948f,4 +np.float32,0xbe0a3078,0xbe0a9cbc,4 +np.float32,0xbeba0b06,0xbebe66be,4 +np.float32,0xbdff4e38,0xbdfff8b2,4 +np.float32,0xbe927f70,0xbe9492ff,4 +np.float32,0x3ebb07e0,0x3ebf7642,4 +np.float32,0x3ebcf8e0,0x3ec18c95,4 +np.float32,0x3f49bdfc,0x3f685b51,4 +np.float32,0x3cbc29c0,0x3cbc2dfd,4 +np.float32,0xbe9e951a,0xbea13bf1,4 +np.float32,0xbe8c237c,0xbe8df33d,4 +np.float32,0x3e17f198,0x3e1881c4,4 +np.float32,0xbd0b5220,0xbd0b5902,4 +np.float32,0xbf34c4a2,0xbf48b4f5,4 +np.float32,0xbedaa814,0xbee1ea94,4 +np.float32,0x3ebf5d6c,0x3ec42053,4 +np.float32,0x3cd04b40,0x3cd050ff,4 +np.float32,0xbec33fe0,0xbec85244,4 +np.float32,0xbf00b27a,0xbf06d8d8,4 +np.float32,0x3f15d7be,0x3f201243,4 +np.float32,0xbe3debd0,0xbe3f06f7,4 +np.float32,0xbea81704,0xbeab4418,4 +np.float32,0x1,0x1,4 +np.float32,0x3f49e6ba,0x3f689d8b,4 +np.float32,0x3f351030,0x3f491fc0,4 +np.float32,0x3e607de8,0x3e625482,4 +np.float32,0xbe8dbbe4,0xbe8f9c0e,4 +np.float32,0x3edbf350,0x3ee35924,4 +np.float32,0xbf0c84c4,0xbf14bf9c,4 +np.float32,0x3eb218b0,0x3eb5e61a,4 +np.float32,0x3e466dd0,0x3e47b138,4 +np.float32,0xbe8ece94,0xbe90ba01,4 +np.float32,0xbe82ec2a,0xbe84649a,4 +np.float32,0xbf7e1f10,0xbfb98b9e,4 +np.float32,0xbf2d00ea,0xbf3df688,4 +np.float32,0x3db7cdd0,0x3db80d36,4 +np.float32,0xbe388b98,0xbe398f25,4 +np.float32,0xbd86cb40,0xbd86e436,4 +np.float32,0x7f7fffff,0x7fc00000,4 +np.float32,0x3f472a60,0x3f6436c6,4 +np.float32,0xbf5b2c1d,0xbf838d87,4 +np.float32,0x3f0409ea,0x3f0abad8,4 +np.float32,0x3f47dd0e,0x3f6553f0,4 +np.float32,0x3e3eab00,0x3e3fc98a,4 +np.float32,0xbf7c2a7f,0xbfb2e19b,4 +np.float32,0xbeda0048,0xbee13112,4 +np.float32,0x3f46600a,0x3f62f5b2,4 +np.float32,0x3f45aef4,0x3f61de43,4 +np.float32,0x3dd40a50,0x3dd46bc4,4 +np.float32,0xbf6cdd0b,0xbf974191,4 +np.float32,0x3f78de4c,0x3faac725,4 +np.float32,0x3f3c39a4,0x3f53777f,4 +np.float32,0xbe2a30ec,0xbe2afc0b,4 +np.float32,0xbf3c0ef0,0xbf533887,4 +np.float32,0x3ecb6548,0x3ed12a53,4 +np.float32,0x3eb994e8,0x3ebde7fc,4 +np.float32,0x3d4c1ee0,0x3d4c3487,4 +np.float32,0xbf52cb6d,0xbf77a7eb,4 +np.float32,0x3eb905d4,0x3ebd4e80,4 +np.float32,0x3e712428,0x3e736d72,4 +np.float32,0xbf79ee6e,0xbfad22be,4 +np.float32,0x3de6f8b0,0x3de776c1,4 +np.float32,0x3e9b2898,0x3e9da325,4 +np.float32,0x3ea09b20,0x3ea35d20,4 +np.float32,0x3d0ea9a0,0x3d0eb103,4 +np.float32,0xbd911500,0xbd913423,4 +np.float32,0x3e004618,0x3e009c97,4 +np.float32,0x3f5e0e5a,0x3f86654c,4 +np.float32,0x3f2e6300,0x3f3fd88b,4 +np.float32,0x3e0cf5d0,0x3e0d68c3,4 +np.float32,0x3d6a16c0,0x3d6a376c,4 +np.float32,0x3f7174aa,0x3f9db53c,4 +np.float32,0xbe04bba0,0xbe051b81,4 +np.float32,0xbe6fdcb4,0xbe721c92,4 +np.float32,0x3f4379f0,0x3f5e6c31,4 +np.float32,0xbf680098,0xbf913257,4 +np.float32,0xbf3c31ca,0xbf536bea,4 +np.float32,0x3f59db58,0x3f824a4e,4 +np.float32,0xbf3ffc84,0xbf591554,4 +np.float32,0x3d1d5160,0x3d1d5b48,4 +np.float32,0x3f6c64ae,0x3f96a3da,4 +np.float32,0xbf1b49fd,0xbf26daaa,4 +np.float32,0x3ec80be0,0x3ecd8576,4 +np.float32,0x3f3becc0,0x3f530629,4 +np.float32,0xbea93890,0xbeac76c1,4 +np.float32,0x3f5b3acc,0x3f839bbd,4 +np.float32,0xbf5d6818,0xbf85bef9,4 +np.float32,0x3f794266,0x3fab9fa6,4 +np.float32,0xbee8eb7c,0xbef1cf3b,4 +np.float32,0xbf360a06,0xbf4a821e,4 +np.float32,0x3f441cf6,0x3f5f693d,4 +np.float32,0x3e60de40,0x3e62b742,4 +np.float32,0xbebb3d7e,0xbebfafdc,4 +np.float32,0x3e56a3a0,0x3e583e28,4 +np.float32,0x3f375bfe,0x3f4c6499,4 +np.float32,0xbf384d7d,0xbf4dbf9a,4 +np.float32,0x3efb03a4,0x3f032c06,4 +np.float32,0x3f1d5d10,0x3f29794d,4 +np.float32,0xbe25f7dc,0xbe26b41d,4 +np.float32,0x3f6d2f88,0x3f97aebb,4 +np.float32,0xbe9fa100,0xbea255cb,4 +np.float32,0xbf21dafa,0xbf2f382a,4 +np.float32,0x3d3870e0,0x3d3880d9,4 +np.float32,0x3eeaf00c,0x3ef413f4,4 +np.float32,0xbc884ea0,0xbc88503c,4 +np.float32,0xbf7dbdad,0xbfb80b6d,4 +np.float32,0xbf4eb713,0xbf709b46,4 +np.float32,0xbf1c0ad4,0xbf27cd92,4 +np.float32,0x3f323088,0x3f451737,4 +np.float32,0x3e405d88,0x3e4183e1,4 +np.float32,0x3d7ad580,0x3d7afdb4,4 +np.float32,0xbf207338,0xbf2d6927,4 +np.float32,0xbecf7948,0xbed59e1a,4 +np.float32,0x3f16ff94,0x3f217fde,4 +np.float32,0xbdf19588,0xbdf225dd,4 +np.float32,0xbf4d9654,0xbf6eb442,4 +np.float32,0xbf390b9b,0xbf4ed220,4 +np.float32,0xbe155a74,0xbe15e354,4 +np.float32,0x3f519e4c,0x3f759850,4 +np.float32,0xbee3f08c,0xbeec3b84,4 +np.float32,0xbf478be7,0xbf64d23b,4 +np.float32,0xbefdee50,0xbf04d92a,4 +np.float32,0x3e8def78,0x3e8fd1bc,4 +np.float32,0x3e3df2a8,0x3e3f0dee,4 +np.float32,0xbf413e22,0xbf5afd97,4 +np.float32,0xbf1b8bc4,0xbf272d71,4 +np.float32,0xbf31e5be,0xbf44af22,4 +np.float32,0x3de7e080,0x3de86010,4 +np.float32,0xbf5ddf7e,0xbf863645,4 +np.float32,0x3f3eba6a,0x3f57306e,4 +np.float32,0xff7fffff,0x7fc00000,4 +np.float32,0x3ec22d5c,0x3ec72973,4 +np.float32,0x80800000,0x80800000,4 +np.float32,0x3f032e0c,0x3f09ba82,4 +np.float32,0x3d74bd60,0x3d74e2b7,4 +np.float32,0xbea0d61e,0xbea39b42,4 +np.float32,0xbefdfa78,0xbf04e02a,4 +np.float32,0x3e5cb220,0x3e5e70ec,4 +np.float32,0xbe239e54,0xbe2452a4,4 +np.float32,0x3f452738,0x3f61090e,4 +np.float32,0x3e99a2e0,0x3e9c0a66,4 +np.float32,0x3e4394d8,0x3e44ca5f,4 +np.float32,0x3f4472e2,0x3f5fef14,4 +np.float32,0xbf46bc70,0xbf638814,4 +np.float32,0xbf0b910f,0xbf139c7a,4 +np.float32,0x3f36b4a6,0x3f4b753f,4 +np.float32,0x3e0bf478,0x3e0c64f6,4 +np.float32,0x3ce02480,0x3ce02ba9,4 +np.float32,0xbd904b10,0xbd9069b1,4 +np.float32,0xbf7f5d72,0xbfc00b70,4 +np.float32,0x3f62127e,0x3f8a8ca8,4 +np.float32,0xbf320253,0xbf44d6e4,4 +np.float32,0x3f2507be,0x3f335833,4 +np.float32,0x3f299284,0x3f395887,4 +np.float32,0xbd8211b0,0xbd82281d,4 +np.float32,0xbd3374c0,0xbd338376,4 +np.float32,0x3f36c56a,0x3f4b8d30,4 +np.float32,0xbf51f704,0xbf76331f,4 +np.float32,0xbe9871ca,0xbe9acab2,4 +np.float32,0xbe818d8c,0xbe82fa0f,4 +np.float32,0x3f08b958,0x3f103c18,4 +np.float32,0x3f22559a,0x3f2fd698,4 +np.float32,0xbf11f388,0xbf1b4db8,4 +np.float32,0x3ebe1990,0x3ec2c359,4 +np.float32,0xbe75ab38,0xbe7816b6,4 +np.float32,0x3e96102c,0x3e984c99,4 +np.float32,0xbe80d9d2,0xbe824052,4 +np.float32,0x3ef47588,0x3efeda7f,4 +np.float32,0xbe45e524,0xbe4725ea,4 +np.float32,0x3f7f9e7a,0x3fc213ff,4 +np.float32,0x3f1d3c36,0x3f294faa,4 +np.float32,0xbf3c58db,0xbf53a591,4 +np.float32,0x3f0d3d20,0x3f159c69,4 +np.float32,0x3f744be6,0x3fa23552,4 +np.float32,0x3f2e0cea,0x3f3f630e,4 +np.float32,0x3e193c10,0x3e19cff7,4 +np.float32,0xbf4150ac,0xbf5b19dd,4 +np.float32,0xbf145f72,0xbf1e4355,4 +np.float32,0xbb76cc00,0xbb76cc26,4 +np.float32,0x3f756780,0x3fa41b3e,4 +np.float32,0x3ea9b868,0x3eacfe3c,4 +np.float32,0x3d07c920,0x3d07cf7f,4 +np.float32,0xbf2263d4,0xbf2fe8ff,4 +np.float32,0x3e53b3f8,0x3e553daa,4 +np.float32,0xbf785be8,0xbfa9b5ba,4 +np.float32,0x3f324f7a,0x3f454254,4 +np.float32,0xbf2188f2,0xbf2ece5b,4 +np.float32,0xbe33781c,0xbe3466a2,4 +np.float32,0xbd3cf120,0xbd3d024c,4 +np.float32,0x3f06b18a,0x3f0dd70f,4 +np.float32,0x3f40d63e,0x3f5a5f6a,4 +np.float32,0x3f752340,0x3fa3a41e,4 +np.float32,0xbe1cf1c0,0xbe1d90bc,4 +np.float32,0xbf02d948,0xbf0957d7,4 +np.float32,0x3f73bed0,0x3fa14bf7,4 +np.float32,0x3d914920,0x3d916864,4 +np.float32,0x7fa00000,0x7fe00000,4 +np.float32,0xbe67a5d8,0xbe69aba7,4 +np.float32,0x3f689c4a,0x3f91eb9f,4 +np.float32,0xbf196e00,0xbf248601,4 +np.float32,0xbf50dacb,0xbf7444fe,4 +np.float32,0x3f628b86,0x3f8b0e1e,4 +np.float32,0x3f6ee2f2,0x3f99fe7f,4 +np.float32,0x3ee5df40,0x3eee6492,4 +np.float32,0x3f501746,0x3f72f41b,4 +np.float32,0xbf1f0f18,0xbf2ba164,4 +np.float32,0xbf1a8bfd,0xbf25ec01,4 +np.float32,0xbd4926f0,0xbd493ba9,4 +np.float32,0xbf4e364f,0xbf6fc17b,4 +np.float32,0x3e50c578,0x3e523ed4,4 +np.float32,0x3f65bf10,0x3f8e95ce,4 +np.float32,0xbe8d75a2,0xbe8f52f2,4 +np.float32,0xbf3f557e,0xbf581962,4 +np.float32,0xbeff2bfc,0xbf05903a,4 +np.float32,0x3f5e8bde,0x3f86e3d8,4 +np.float32,0xbf7a0012,0xbfad4b9b,4 +np.float32,0x3edefce0,0x3ee6b790,4 +np.float32,0xbf0003de,0xbf060f09,4 +np.float32,0x3efc4650,0x3f03e548,4 +np.float32,0x3f4582e4,0x3f6198f5,4 +np.float32,0x3f10086c,0x3f18f9d0,4 +np.float32,0x3f1cd304,0x3f28ca77,4 +np.float32,0x3f683366,0x3f916e8d,4 +np.float32,0xbed49392,0xbedb3675,4 +np.float32,0xbf6fe5f6,0xbf9b6c0e,4 +np.float32,0xbf59b416,0xbf8224f6,4 +np.float32,0x3d20c960,0x3d20d3f4,4 +np.float32,0x3f6b00d6,0x3f94dbe7,4 +np.float32,0x3f6c26ae,0x3f965352,4 +np.float32,0xbf370ea6,0xbf4bf5dd,4 +np.float32,0x3dfe7230,0x3dff1af1,4 +np.float32,0xbefc21a8,0xbf03d038,4 +np.float32,0x3f16a990,0x3f21156a,4 +np.float32,0xbef8ac0c,0xbf01d48f,4 +np.float32,0x3f170de8,0x3f21919d,4 +np.float32,0x3db9ef80,0x3dba3122,4 +np.float32,0x3d696400,0x3d698461,4 +np.float32,0x3f007aa2,0x3f069843,4 +np.float32,0x3f22827c,0x3f3010a9,4 +np.float32,0x3f3650dc,0x3f4ae6f1,4 +np.float32,0xbf1d8037,0xbf29a5e1,4 +np.float32,0xbf08fdc4,0xbf108d0e,4 +np.float32,0xbd8df350,0xbd8e1079,4 +np.float32,0xbf36bb32,0xbf4b7e98,4 +np.float32,0x3f2e3756,0x3f3f9ced,4 +np.float32,0x3d5a6f20,0x3d5a89aa,4 +np.float32,0x3f55d568,0x3f7d1889,4 +np.float32,0x3e1ed110,0x3e1f75d9,4 +np.float32,0x3e7386b8,0x3e75e1dc,4 +np.float32,0x3f48ea0e,0x3f670434,4 +np.float32,0x3e921fb0,0x3e942f14,4 +np.float32,0xbf0d4d0b,0xbf15af7f,4 +np.float32,0x3f179ed2,0x3f224549,4 +np.float32,0xbf3a328e,0xbf507e6d,4 +np.float32,0xbf74591a,0xbfa24b6e,4 +np.float32,0x3ec7d1c4,0x3ecd4657,4 +np.float32,0xbf6ecbed,0xbf99de85,4 +np.float32,0x3db0bd00,0x3db0f559,4 +np.float32,0x7f800000,0x7fc00000,4 +np.float32,0x3e0373b8,0x3e03d0d6,4 +np.float32,0xbf439784,0xbf5e9a04,4 +np.float32,0xbef97a9e,0xbf024ac6,4 +np.float32,0x3e4d71a8,0x3e4ed90a,4 +np.float32,0xbf14d868,0xbf1ed7e3,4 +np.float32,0xbf776870,0xbfa7ce37,4 +np.float32,0xbe32a500,0xbe339038,4 +np.float32,0xbf326d8a,0xbf456c3d,4 +np.float32,0xbe9b758c,0xbe9df3e7,4 +np.float32,0x3d9515a0,0x3d95376a,4 +np.float32,0x3e3f7320,0x3e40953e,4 +np.float32,0xbee57e7e,0xbeedf84f,4 +np.float32,0x3e821e94,0x3e838ffd,4 +np.float32,0x3f74beaa,0x3fa2f721,4 +np.float32,0xbe9b7672,0xbe9df4d9,4 +np.float32,0x3f4041fc,0x3f597e71,4 +np.float32,0xbe9ea7c4,0xbea14f92,4 +np.float32,0xbf800000,0xbfc90fdb,4 +np.float32,0x3e04fb90,0x3e055bfd,4 +np.float32,0xbf14d3d6,0xbf1ed245,4 +np.float32,0xbe84ebec,0xbe86763e,4 +np.float32,0x3f08e568,0x3f107039,4 +np.float32,0x3d8dc9e0,0x3d8de6ef,4 +np.float32,0x3ea4549c,0x3ea74a94,4 +np.float32,0xbebd2806,0xbec1bf51,4 +np.float32,0x3f311a26,0x3f439498,4 +np.float32,0xbf3d2222,0xbf54cf7e,4 +np.float32,0x3e00c500,0x3e011c81,4 +np.float32,0xbe35ed1c,0xbe36e5a9,4 +np.float32,0xbd4ec020,0xbd4ed6a0,4 +np.float32,0x3e1eb088,0x3e1f54eb,4 +np.float32,0x3cf94840,0x3cf9521a,4 +np.float32,0xbf010c5d,0xbf0740e0,4 +np.float32,0xbf3bd63b,0xbf52e502,4 +np.float32,0x3f233f30,0x3f310542,4 +np.float32,0x3ea24128,0x3ea519d7,4 +np.float32,0x3f478b38,0x3f64d124,4 +np.float32,0x3f1e0c6c,0x3f2a57ec,4 +np.float32,0xbf3ad294,0xbf51680a,4 +np.float32,0x3ede0554,0x3ee5a4b4,4 +np.float32,0x3e451a98,0x3e46577d,4 +np.float32,0x3f520164,0x3f764542,4 +np.float32,0x0,0x0,4 +np.float32,0xbd056cd0,0xbd0572db,4 +np.float32,0xbf58b018,0xbf812f5e,4 +np.float32,0x3e036eb0,0x3e03cbc3,4 +np.float32,0x3d1377a0,0x3d137fc9,4 +np.float32,0xbf692d3a,0xbf929a2c,4 +np.float32,0xbec60fb8,0xbecb5dea,4 +np.float32,0x3ed23340,0x3ed89a8e,4 +np.float32,0x3c87f040,0x3c87f1d9,4 +np.float32,0x3dac62f0,0x3dac9737,4 +np.float32,0xbed97c16,0xbee09f02,4 +np.float32,0xbf2d5f3c,0xbf3e769c,4 +np.float32,0xbc3b7c40,0xbc3b7d4c,4 +np.float32,0x3ed998ec,0x3ee0bedd,4 +np.float32,0x3dd86630,0x3dd8cdcb,4 +np.float32,0x3e8b4304,0x3e8d09ea,4 +np.float32,0x3f51e6b0,0x3f761697,4 +np.float32,0x3ec51f24,0x3eca5923,4 +np.float32,0xbf647430,0xbf8d2307,4 +np.float32,0x3f253d9c,0x3f339eb2,4 +np.float32,0x3dc969d0,0x3dc9bd4b,4 +np.float32,0xbc2f1300,0xbc2f13da,4 +np.float32,0xbf170007,0xbf21806d,4 +np.float32,0x3f757d10,0x3fa4412e,4 +np.float32,0xbe7864ac,0xbe7ae564,4 +np.float32,0x3f2ffe90,0x3f420cfb,4 +np.float32,0xbe576138,0xbe590012,4 +np.float32,0xbf517a21,0xbf755959,4 +np.float32,0xbf159cfe,0xbf1fc9d5,4 +np.float32,0xbf638b2a,0xbf8c22cf,4 +np.float32,0xff800000,0x7fc00000,4 +np.float32,0x3ed19ca0,0x3ed7f569,4 +np.float32,0x3f7c4460,0x3fb32d26,4 +np.float32,0x3ebfae6c,0x3ec477ab,4 +np.float32,0x3dd452d0,0x3dd4b4a8,4 +np.float32,0x3f471482,0x3f6413fb,4 +np.float32,0xbf49d704,0xbf6883fe,4 +np.float32,0xbd42c4e0,0xbd42d7af,4 +np.float32,0xbeb02994,0xbeb3d668,4 +np.float32,0x3f4d1fd8,0x3f6dedd2,4 +np.float32,0x3efb591c,0x3f035d11,4 +np.float32,0x80000000,0x80000000,4 +np.float32,0xbf50f782,0xbf7476ad,4 +np.float32,0x3d7232c0,0x3d7256f0,4 +np.float32,0x3f649460,0x3f8d46bb,4 +np.float32,0x3f5561bc,0x3f7c46a9,4 +np.float32,0x3e64f6a0,0x3e66ea5d,4 +np.float32,0x3e5b0470,0x3e5cb8f9,4 +np.float32,0xbe9b6b2c,0xbe9de904,4 +np.float32,0x3f6c33f4,0x3f966486,4 +np.float32,0x3f5cee54,0x3f854613,4 +np.float32,0x3ed3e044,0x3eda716e,4 +np.float32,0xbf3cac7f,0xbf542131,4 +np.float32,0x3c723500,0x3c723742,4 +np.float32,0x3de59900,0x3de614d3,4 +np.float32,0xbdf292f8,0xbdf32517,4 +np.float32,0x3f05c8b2,0x3f0cc59b,4 +np.float32,0xbf1ab182,0xbf261b14,4 +np.float32,0xbda396f0,0xbda3c39a,4 +np.float32,0xbf270ed0,0xbf360231,4 +np.float32,0x3f2063e6,0x3f2d557e,4 +np.float32,0x3c550280,0x3c550409,4 +np.float32,0xbe103b48,0xbe10b679,4 +np.float32,0xbebae390,0xbebf4f40,4 +np.float32,0x3f3bc868,0x3f52d0aa,4 +np.float32,0xbd62f880,0xbd631647,4 +np.float32,0xbe7a38f4,0xbe7cc833,4 +np.float32,0x3f09d796,0x3f118f39,4 +np.float32,0xbf5fa558,0xbf8802d0,4 +np.float32,0x3f111cc8,0x3f1a48b0,4 +np.float32,0x3e831958,0x3e849356,4 +np.float32,0xbf614dbd,0xbf89bc3b,4 +np.float32,0xbd521510,0xbd522cac,4 +np.float32,0x3f05af22,0x3f0ca7a0,4 +np.float32,0xbf1ac60e,0xbf2634df,4 +np.float32,0xbf6bd05e,0xbf95e3fe,4 +np.float32,0xbd1fa6e0,0xbd1fb13b,4 +np.float32,0xbeb82f7a,0xbebc68b1,4 +np.float32,0xbd92aaf8,0xbd92cb23,4 +np.float32,0xbe073a54,0xbe079fbf,4 +np.float32,0xbf198655,0xbf24a468,4 +np.float32,0x3f62f6d8,0x3f8b81ba,4 +np.float32,0x3eef4310,0x3ef8f4f9,4 +np.float32,0x3e8988e0,0x3e8b3eae,4 +np.float32,0xbf3ddba5,0xbf55e367,4 +np.float32,0x3dc6d2e0,0x3dc7232b,4 +np.float32,0xbf31040e,0xbf437601,4 +np.float32,0x3f1bb74a,0x3f276442,4 +np.float32,0xbf0075d2,0xbf0692b3,4 +np.float32,0xbf606ce0,0xbf88d0ff,4 +np.float32,0xbf083856,0xbf0fa39d,4 +np.float32,0xbdb25b20,0xbdb2950a,4 +np.float32,0xbeb86860,0xbebca5ae,4 +np.float32,0x3de83160,0x3de8b176,4 +np.float32,0xbf33a98f,0xbf472664,4 +np.float32,0x3e7795f8,0x3e7a1058,4 +np.float32,0x3e0ca6f8,0x3e0d192a,4 +np.float32,0xbf1aef60,0xbf2668c3,4 +np.float32,0xbda53b58,0xbda5695e,4 +np.float32,0xbf178096,0xbf221fc5,4 +np.float32,0xbf0a4159,0xbf120ccf,4 +np.float32,0x3f7bca36,0x3fb1d0df,4 +np.float32,0xbef94360,0xbf022b26,4 +np.float32,0xbef16f36,0xbefb6ad6,4 +np.float32,0x3f53a7e6,0x3f792e25,4 +np.float32,0xbf7c536f,0xbfb35993,4 +np.float32,0xbe84aaa0,0xbe8632a2,4 +np.float32,0x3ecb3998,0x3ed0fab9,4 +np.float32,0x3f539304,0x3f79090a,4 +np.float32,0xbf3c7816,0xbf53d3b3,4 +np.float32,0xbe7a387c,0xbe7cc7b7,4 +np.float32,0x3f7000e4,0x3f9b92b1,4 +np.float32,0x3e08fd70,0x3e0966e5,4 +np.float32,0x3db97ba0,0x3db9bcc8,4 +np.float32,0xbee99056,0xbef2886a,4 +np.float32,0xbf0668da,0xbf0d819e,4 +np.float32,0x3e58a408,0x3e5a4a51,4 +np.float32,0x3f3440b8,0x3f47faed,4 +np.float32,0xbf19a2ce,0xbf24c7ff,4 +np.float32,0xbe75e990,0xbe7856ee,4 +np.float32,0x3f3c865c,0x3f53e8cb,4 +np.float32,0x3e5e03d0,0x3e5fcac9,4 +np.float32,0x3edb8e34,0x3ee2e932,4 +np.float32,0xbf7e1f5f,0xbfb98ce4,4 +np.float32,0xbf7372ff,0xbfa0d0ae,4 +np.float32,0xbf3ee850,0xbf577548,4 +np.float32,0x3ef19658,0x3efb9737,4 +np.float32,0xbe8088de,0xbe81ecaf,4 +np.float32,0x800000,0x800000,4 +np.float32,0xbde39dd8,0xbde4167a,4 +np.float32,0xbf065d7a,0xbf0d7441,4 +np.float32,0xbde52c78,0xbde5a79b,4 +np.float32,0xbe3a28c0,0xbe3b333e,4 +np.float32,0x3f6e8b3c,0x3f998516,4 +np.float32,0x3f3485c2,0x3f485c39,4 +np.float32,0x3e6f2c68,0x3e71673e,4 +np.float32,0xbe4ec9cc,0xbe50385e,4 +np.float32,0xbf1c3bb0,0xbf280b39,4 +np.float32,0x3ec8ea18,0x3ece76f7,4 +np.float32,0x3e26b5f8,0x3e2774c9,4 +np.float32,0x3e1e4a38,0x3e1eed5c,4 +np.float32,0xbee7a106,0xbef05c6b,4 +np.float32,0xbf305928,0xbf4289d8,4 +np.float32,0x3f0c431c,0x3f147118,4 +np.float32,0xbe57ba6c,0xbe595b52,4 +np.float32,0x3eabc9cc,0x3eaf2fc7,4 +np.float32,0xbef1ed24,0xbefbf9ae,4 +np.float32,0xbf61b576,0xbf8a29cc,4 +np.float32,0x3e9c1ff4,0x3e9ea6cb,4 +np.float32,0x3f6c53b2,0x3f968dbe,4 +np.float32,0x3e2d1b80,0x3e2df156,4 +np.float32,0x3e9f2f70,0x3ea1de4a,4 +np.float32,0xbf5861ee,0xbf80e61a,4 +np.float32,0x3f429144,0x3f5d0505,4 +np.float32,0x3e235cc8,0x3e24103e,4 +np.float32,0xbf354879,0xbf496f6a,4 +np.float32,0xbf20a146,0xbf2da447,4 +np.float32,0x3e8d8968,0x3e8f6785,4 +np.float32,0x3f3fbc94,0x3f58b4c1,4 +np.float32,0x3f2c5f50,0x3f3d1b9f,4 +np.float32,0x3f7bf0f8,0x3fb23d23,4 +np.float32,0xbf218282,0xbf2ec60f,4 +np.float32,0x3f2545aa,0x3f33a93e,4 +np.float32,0xbf4b17be,0xbf6a9018,4 +np.float32,0xbb9df700,0xbb9df728,4 +np.float32,0x3f685d54,0x3f91a06c,4 +np.float32,0x3efdfe2c,0x3f04e24c,4 +np.float32,0x3ef1c5a0,0x3efbccd9,4 +np.float32,0xbf41d731,0xbf5be76e,4 +np.float32,0x3ebd1360,0x3ec1a919,4 +np.float32,0xbf706bd4,0xbf9c2d58,4 +np.float32,0x3ea525e4,0x3ea8279d,4 +np.float32,0xbe51f1b0,0xbe537186,4 +np.float32,0x3f5e8cf6,0x3f86e4f4,4 +np.float32,0xbdad2520,0xbdad5a19,4 +np.float32,0xbf5c5704,0xbf84b0e5,4 +np.float32,0x3f47b54e,0x3f65145e,4 +np.float32,0x3eb4fc78,0x3eb8fc0c,4 +np.float32,0x3dca1450,0x3dca68a1,4 +np.float32,0x3eb02a74,0x3eb3d757,4 +np.float32,0x3f74ae6a,0x3fa2db75,4 +np.float32,0x3f800000,0x3fc90fdb,4 +np.float32,0xbdb46a00,0xbdb4a5f2,4 +np.float32,0xbe9f2ba6,0xbea1da4e,4 +np.float32,0x3f0afa70,0x3f12e8f7,4 +np.float32,0xbf677b20,0xbf909547,4 +np.float32,0x3eff9188,0x3f05cacf,4 +np.float32,0x3f720562,0x3f9e911b,4 +np.float32,0xbf7180d8,0xbf9dc794,4 +np.float32,0xbee7d076,0xbef0919d,4 +np.float32,0x3f0432ce,0x3f0aea95,4 +np.float32,0x3f3bc4c8,0x3f52cb54,4 +np.float32,0xbea72f30,0xbeaa4ebe,4 +np.float32,0x3e90ed00,0x3e92ef33,4 +np.float32,0xbda63670,0xbda6654a,4 +np.float32,0xbf5a6f85,0xbf82d7e0,4 +np.float32,0x3e6e8808,0x3e70be34,4 +np.float32,0xbf4f3822,0xbf71768f,4 +np.float32,0x3e5c8a68,0x3e5e483f,4 +np.float32,0xbf0669d4,0xbf0d82c4,4 +np.float32,0xbf79f77c,0xbfad37b0,4 +np.float32,0x3f25c82c,0x3f345453,4 +np.float32,0x3f1b2948,0x3f26b188,4 +np.float32,0x3ef7e288,0x3f016159,4 +np.float32,0x3c274280,0x3c27433e,4 +np.float32,0xbf4c8fa0,0xbf6cfd5e,4 +np.float32,0x3ea4ccb4,0x3ea7c966,4 +np.float32,0xbf7b157e,0xbfafefca,4 +np.float32,0xbee4c2b0,0xbeed264d,4 +np.float32,0xbc1fd640,0xbc1fd6e6,4 +np.float32,0x3e892308,0x3e8ad4f6,4 +np.float32,0xbf3f69c7,0xbf5837ed,4 +np.float32,0x3ec879e8,0x3ecdfd05,4 +np.float32,0x3f07a8c6,0x3f0efa30,4 +np.float32,0x3f67b880,0x3f90dd4d,4 +np.float32,0x3e8a11c8,0x3e8bccd5,4 +np.float32,0x3f7df6fc,0x3fb8e935,4 +np.float32,0xbef3e498,0xbefe3599,4 +np.float32,0xbf18ad7d,0xbf2395d8,4 +np.float32,0x3f2bce74,0x3f3c57f5,4 +np.float32,0xbf38086e,0xbf4d5c2e,4 +np.float32,0x3f772d7a,0x3fa75c35,4 +np.float32,0xbf3b6e24,0xbf524c00,4 +np.float32,0xbdd39108,0xbdd3f1d4,4 +np.float32,0xbf691f6b,0xbf928974,4 +np.float32,0x3f146188,0x3f1e45e4,4 +np.float32,0xbf56045b,0xbf7d6e03,4 +np.float32,0xbf4b2ee4,0xbf6ab622,4 +np.float32,0xbf3fa3f6,0xbf588f9d,4 +np.float32,0x3f127bb0,0x3f1bf398,4 +np.float32,0x3ed858a0,0x3edf5d3e,4 +np.float32,0xbd6de3b0,0xbd6e05fa,4 +np.float32,0xbecc662c,0xbed24261,4 +np.float32,0xbd6791d0,0xbd67b170,4 +np.float32,0xbf146016,0xbf1e441e,4 +np.float32,0xbf61f04c,0xbf8a6841,4 +np.float32,0xbe7f16d0,0xbe80e6e7,4 +np.float32,0xbebf93e6,0xbec45b10,4 +np.float32,0xbe8a59fc,0xbe8c17d1,4 +np.float32,0xbebc7a0c,0xbec10426,4 +np.float32,0xbf2a682e,0xbf3a7649,4 +np.float32,0xbe18d0cc,0xbe19637b,4 +np.float32,0x3d7f5100,0x3d7f7b66,4 +np.float32,0xbf10f5fa,0xbf1a1998,4 +np.float32,0x3f25e956,0x3f347fdc,4 +np.float32,0x3e6e8658,0x3e70bc78,4 +np.float32,0x3f21a5de,0x3f2ef3a5,4 +np.float32,0xbf4e71d4,0xbf702607,4 +np.float32,0xbf49d6b6,0xbf688380,4 +np.float32,0xbdb729c0,0xbdb7687c,4 +np.float32,0xbf63e1f4,0xbf8c81c7,4 +np.float32,0x3dda6cb0,0x3ddad73e,4 +np.float32,0x3ee1bc40,0x3ee9c612,4 +np.float32,0x3ebdb5f8,0x3ec2581b,4 +np.float32,0x3f7d9576,0x3fb77646,4 +np.float32,0x3e087140,0x3e08d971,4 +np.float64,0xbfdba523cfb74a48,0xbfdc960ddd9c0506,1 +np.float64,0x3fb51773622a2ee0,0x3fb51d93f77089d5,1 +np.float64,0x3fc839f6d33073f0,0x3fc85f9a47dfe8e6,1 +np.float64,0xbfecba2d82f9745b,0xbff1d55416c6c993,1 +np.float64,0x3fd520fe47aa41fc,0x3fd58867f1179634,1 +np.float64,0x3fe1b369c56366d4,0x3fe2c1ac9dd2c45a,1 +np.float64,0xbfec25a7cd784b50,0xbff133417389b12d,1 +np.float64,0xbfd286342ea50c68,0xbfd2cb0bca22e66d,1 +np.float64,0x3fd5f6fe5eabedfc,0x3fd66bad16680d08,1 +np.float64,0xbfe863a87570c751,0xbfebbb9b637eb6dc,1 +np.float64,0x3fc97f5b4d32feb8,0x3fc9ab5066d8eaec,1 +np.float64,0xbfcb667af936ccf4,0xbfcb9d3017047a1d,1 +np.float64,0xbfd1b7b9afa36f74,0xbfd1f3c175706154,1 +np.float64,0x3fef97385b7f2e70,0x3ff6922a1a6c709f,1 +np.float64,0xbfd13e4205a27c84,0xbfd1757c993cdb74,1 +np.float64,0xbfd18d88aca31b12,0xbfd1c7dd75068f7d,1 +np.float64,0x3fe040ce0f60819c,0x3fe10c59d2a27089,1 +np.float64,0xbfddc7deddbb8fbe,0xbfdef9de5baecdda,1 +np.float64,0xbfcf6e96193edd2c,0xbfcfc1bb7396b9a3,1 +np.float64,0x3fd544f494aa89e8,0x3fd5ae850e2b37dd,1 +np.float64,0x3fe15b381fe2b670,0x3fe25841c7bfe2af,1 +np.float64,0xbfde793420bcf268,0xbfdfc2ddc7b4a341,1 +np.float64,0x3fd0d5db30a1abb8,0x3fd1092cef4aa4fb,1 +np.float64,0x3fe386a08c670d42,0x3fe50059bbf7f491,1 +np.float64,0xbfe0aae3a96155c8,0xbfe1880ef13e95ce,1 +np.float64,0xbfe80eeb03f01dd6,0xbfeb39e9f107e944,1 +np.float64,0xbfd531af3caa635e,0xbfd59a178f17552a,1 +np.float64,0x3fcced14ab39da28,0x3fcd2d9a806337ef,1 +np.float64,0xbfdb4c71bcb698e4,0xbfdc33d9d9daf708,1 +np.float64,0xbfde7375ecbce6ec,0xbfdfbc5611bc48ff,1 +np.float64,0x3fecc5707a798ae0,0x3ff1e2268d778017,1 +np.float64,0x3fe8f210a1f1e422,0x3fec9b3349a5baa2,1 +np.float64,0x3fe357f9b8e6aff4,0x3fe4c5a0b89a9228,1 +np.float64,0xbfe0f863b761f0c8,0xbfe1e3283494c3d4,1 +np.float64,0x3fd017c395a02f88,0x3fd044761f2f4a66,1 +np.float64,0x3febeb4746f7d68e,0x3ff0f6b955e7feb6,1 +np.float64,0xbfbdaaeeae3b55e0,0xbfbdbc0950109261,1 +np.float64,0xbfea013095f40261,0xbfee5b8fe8ad8593,1 +np.float64,0xbfe9f87b7973f0f7,0xbfee4ca3a8438d72,1 +np.float64,0x3fd37f77cfa6fef0,0x3fd3d018c825f057,1 +np.float64,0x3fb0799cee20f340,0x3fb07c879e7cb63f,1 +np.float64,0xbfdcfd581cb9fab0,0xbfde15e35314b52d,1 +np.float64,0xbfd49781b8a92f04,0xbfd4f6fa1516fefc,1 +np.float64,0x3fb3fcb6d627f970,0x3fb401ed44a713a8,1 +np.float64,0x3fd5737ef8aae6fc,0x3fd5dfe42d4416c7,1 +np.float64,0x7ff4000000000000,0x7ffc000000000000,1 +np.float64,0xbfe56ae780ead5cf,0xbfe776ea5721b900,1 +np.float64,0x3fd4567786a8acf0,0x3fd4b255421c161a,1 +np.float64,0x3fef6fb58cfedf6c,0x3ff62012dfcf0a33,1 +np.float64,0xbfd1dbcd3da3b79a,0xbfd2194fd628f74d,1 +np.float64,0x3fd9350016b26a00,0x3fd9e8b01eb023e9,1 +np.float64,0xbfe4fb3a69e9f675,0xbfe6e1d2c9eca56c,1 +np.float64,0x3fe9fe0f73f3fc1e,0x3fee5631cfd39772,1 +np.float64,0xbfd51c1bc6aa3838,0xbfd5833b3bd53543,1 +np.float64,0x3fc64158e12c82b0,0x3fc65e7352f237d7,1 +np.float64,0x3fd0d8ee1ba1b1dc,0x3fd10c5c99a16f0e,1 +np.float64,0x3fd5554e15aaaa9c,0x3fd5bfdb9ec9e873,1 +np.float64,0x3fe61ce209ec39c4,0x3fe869bc4c28437d,1 +np.float64,0xbfe4e42c8c69c859,0xbfe6c356dac7e2db,1 +np.float64,0xbfe157021062ae04,0xbfe2533ed39f4212,1 +np.float64,0x3fe844066cf0880c,0x3feb8aea0b7bd0a4,1 +np.float64,0x3fe55016586aa02c,0x3fe752e4b2a67b9f,1 +np.float64,0x3fdabce619b579cc,0x3fdb95809bc789d9,1 +np.float64,0x3fee03bae37c0776,0x3ff3778ba38ca882,1 +np.float64,0xbfeb2f5844f65eb0,0xbff03dd1b767d3c8,1 +np.float64,0x3fedcfdbaffb9fb8,0x3ff32e81d0639164,1 +np.float64,0x3fe06fc63ee0df8c,0x3fe142fc27f92eaf,1 +np.float64,0x3fe7ce90fd6f9d22,0x3fead8f832bbbf5d,1 +np.float64,0xbfbc0015ce380028,0xbfbc0e7470e06e86,1 +np.float64,0xbfe9b3de90f367bd,0xbfedd857931dfc6b,1 +np.float64,0xbfcb588f5936b120,0xbfcb8ef0124a4f21,1 +np.float64,0x3f8d376a503a6f00,0x3f8d37ab43e7988d,1 +np.float64,0xbfdb123a40b62474,0xbfdbf38b6cf5db92,1 +np.float64,0xbfee7da6be7cfb4e,0xbff433042cd9d5eb,1 +np.float64,0xbfc4c9e01b2993c0,0xbfc4e18dbafe37ef,1 +np.float64,0x3fedd42faffba860,0x3ff334790cd18a19,1 +np.float64,0x3fe9cdf772f39bee,0x3fee044f87b856ab,1 +np.float64,0x3fe0245881e048b2,0x3fe0eb5a1f739c8d,1 +np.float64,0xbfe4712bd9e8e258,0xbfe62cb3d82034aa,1 +np.float64,0x3fe9a16b46f342d6,0x3fedb972b2542551,1 +np.float64,0xbfe57ab4536af568,0xbfe78c34b03569c2,1 +np.float64,0x3fb6d6ceb22dada0,0x3fb6de976964d6dd,1 +np.float64,0x3fc3ac23a3275848,0x3fc3c02de53919b8,1 +np.float64,0xbfccb531e7396a64,0xbfccf43ec69f6281,1 +np.float64,0xbfd2f07fc8a5e100,0xbfd33a35a8c41b62,1 +np.float64,0xbfe3e5dd04e7cbba,0xbfe57940157c27ba,1 +np.float64,0x3feefe40757dfc80,0x3ff51bc72b846af6,1 +np.float64,0x8000000000000001,0x8000000000000001,1 +np.float64,0x3fecb7b766796f6e,0x3ff1d28972a0fc7e,1 +np.float64,0xbfea1bf1357437e2,0xbfee89a6532bfd71,1 +np.float64,0xbfca3983b7347308,0xbfca696463b791ef,1 +np.float64,0x10000000000000,0x10000000000000,1 +np.float64,0xbf886b45d030d680,0xbf886b6bbc04314b,1 +np.float64,0x3fd5224bb5aa4498,0x3fd589c92e82218f,1 +np.float64,0xbfec799874f8f331,0xbff18d5158b8e640,1 +np.float64,0xbf88124410302480,0xbf88126863350a16,1 +np.float64,0xbfe37feaaa66ffd6,0xbfe4f7e24382e79d,1 +np.float64,0x3fd777eca1aeefd8,0x3fd8076ead6d55dc,1 +np.float64,0x3fecaaeb3af955d6,0x3ff1c4159fa3e965,1 +np.float64,0xbfeb81e4e6f703ca,0xbff08d4e4c77fada,1 +np.float64,0xbfd7d0a0edafa142,0xbfd866e37010312e,1 +np.float64,0x3feda48c00fb4918,0x3ff2f3fd33c36307,1 +np.float64,0x3feb87ecc4770fda,0x3ff09336e490deda,1 +np.float64,0xbfefd78ad27faf16,0xbff78abbafb50ac1,1 +np.float64,0x3fe58e918c6b1d24,0x3fe7a70b38cbf016,1 +np.float64,0x3fda163b95b42c78,0x3fdade86b88ba4ee,1 +np.float64,0x3fe8fc1aaf71f836,0x3fecab3f93b59df5,1 +np.float64,0xbf8de56f903bcac0,0xbf8de5b527cec797,1 +np.float64,0xbfec112db2f8225b,0xbff11dd648de706f,1 +np.float64,0x3fc3214713264290,0x3fc333b1c862f7d0,1 +np.float64,0xbfeb5e5836f6bcb0,0xbff06ac364b49177,1 +np.float64,0x3fc23d9777247b30,0x3fc24d8ae3bcb615,1 +np.float64,0xbfdf0eed65be1dda,0xbfe036cea9b9dfb6,1 +np.float64,0xbfb2d5c85a25ab90,0xbfb2da24bb409ff3,1 +np.float64,0xbfecdda0c3f9bb42,0xbff1fdf94fc6e89e,1 +np.float64,0x3fdfe79154bfcf24,0x3fe0b338e0476a9d,1 +np.float64,0xbfd712ac6bae2558,0xbfd79abde21f287b,1 +np.float64,0x3fea3f148a747e2a,0x3feec6bed9d4fa04,1 +np.float64,0x3fd4879e4ca90f3c,0x3fd4e632fa4e2edd,1 +np.float64,0x3fe9137a9e7226f6,0x3fecd0c441088d6a,1 +np.float64,0xbfc75bf4ef2eb7e8,0xbfc77da8347d742d,1 +np.float64,0xbfd94090a0b28122,0xbfd9f5458816ed5a,1 +np.float64,0x3fde439cbcbc8738,0x3fdf85fbf496b61f,1 +np.float64,0xbfe18bacdce3175a,0xbfe29210e01237f7,1 +np.float64,0xbfd58ec413ab1d88,0xbfd5fcd838f0a934,1 +np.float64,0xbfeae5af2d75cb5e,0xbfeff1de1b4a06be,1 +np.float64,0x3fb64d1a162c9a30,0x3fb65458fb831354,1 +np.float64,0x3fc18b1e15231640,0x3fc1994c6ffd7a6a,1 +np.float64,0xbfd7b881bcaf7104,0xbfd84ce89a9ee8c7,1 +np.float64,0x3feb916a40f722d4,0x3ff09c8aa851d7c4,1 +np.float64,0x3fdab5fbb5b56bf8,0x3fdb8de43961bbde,1 +np.float64,0x3fe4f35402e9e6a8,0x3fe6d75dc5082894,1 +np.float64,0x3fe2fdb2e5e5fb66,0x3fe454e32a5d2182,1 +np.float64,0x3fe8607195f0c0e4,0x3febb6a4c3bf6a5c,1 +np.float64,0x3fd543ca9aaa8794,0x3fd5ad49203ae572,1 +np.float64,0x3fe8e05ca1f1c0ba,0x3fec7eff123dcc58,1 +np.float64,0x3fe298b6ca65316e,0x3fe3d81d2927c4dd,1 +np.float64,0x3fcfecea733fd9d8,0x3fd0220f1d0faf78,1 +np.float64,0xbfe2e739f065ce74,0xbfe439004e73772a,1 +np.float64,0xbfd1ae6b82a35cd8,0xbfd1ea129a5ee756,1 +np.float64,0xbfeb7edff576fdc0,0xbff08a5a638b8a8b,1 +np.float64,0x3fe5b645ff6b6c8c,0x3fe7dcee1faefe3f,1 +np.float64,0xbfd478427ba8f084,0xbfd4d5fc7c239e60,1 +np.float64,0xbfe39904e3e7320a,0xbfe517972b30b1e5,1 +np.float64,0xbfd3b75b6ba76eb6,0xbfd40acf20a6e074,1 +np.float64,0x3fd596267aab2c4c,0x3fd604b01faeaf75,1 +np.float64,0x3fe134463762688c,0x3fe229fc36784a72,1 +np.float64,0x3fd25dadf7a4bb5c,0x3fd2a0b9e04ea060,1 +np.float64,0xbfc05d3e0b20ba7c,0xbfc068bd2bb9966f,1 +np.float64,0x3f8cf517b039ea00,0x3f8cf556ed74b163,1 +np.float64,0x3fda87361cb50e6c,0x3fdb5a75af897e7f,1 +np.float64,0x3fe53e1926ea7c32,0x3fe73acf01b8ff31,1 +np.float64,0x3fe2e94857e5d290,0x3fe43b8cc820f9c7,1 +np.float64,0x3fd81fe6acb03fcc,0x3fd8bc623c0068cf,1 +np.float64,0xbfddf662c3bbecc6,0xbfdf2e76dc90786e,1 +np.float64,0x3fece174fbf9c2ea,0x3ff2026a1a889580,1 +np.float64,0xbfdc83c5b8b9078c,0xbfdd8dcf6ee3b7da,1 +np.float64,0x3feaf5448f75ea8a,0x3ff0075b108bcd0d,1 +np.float64,0xbfebf32f7ef7e65f,0xbff0fed42aaa826a,1 +np.float64,0x3fe389e5e8e713cc,0x3fe5047ade055ccb,1 +np.float64,0x3f635cdcc026ba00,0x3f635cddeea082ce,1 +np.float64,0x3fae580f543cb020,0x3fae5c9d5108a796,1 +np.float64,0x3fec9fafce793f60,0x3ff1b77bec654f00,1 +np.float64,0x3fb19d226e233a40,0x3fb1a0b32531f7ee,1 +np.float64,0xbfdf9a71e7bf34e4,0xbfe086cef88626c7,1 +np.float64,0x8010000000000000,0x8010000000000000,1 +np.float64,0xbfef170ba2fe2e17,0xbff54ed4675f5b8a,1 +np.float64,0xbfcc6e2f8f38dc60,0xbfccab65fc34d183,1 +np.float64,0x3fee756c4bfcead8,0x3ff4258782c137e6,1 +np.float64,0xbfd461c218a8c384,0xbfd4be3e391f0ff4,1 +np.float64,0xbfe3b64686e76c8d,0xbfe53caa16d6c90f,1 +np.float64,0xbfc1c65d8d238cbc,0xbfc1d51e58f82403,1 +np.float64,0x3fe6e06c63edc0d8,0x3fe97cb832eeb6a2,1 +np.float64,0xbfc9fc20b933f840,0xbfca2ab004312d85,1 +np.float64,0xbfe29aa6df65354e,0xbfe3da7ecf3ba466,1 +np.float64,0x3fea4df7d1749bf0,0x3feee0d448bd4746,1 +np.float64,0xbfedec6161fbd8c3,0xbff3563e1d943aa2,1 +np.float64,0x3fdb6f0437b6de08,0x3fdc5a1888b1213d,1 +np.float64,0xbfe270cbd3e4e198,0xbfe3a72ac27a0b0c,1 +np.float64,0xbfdfff8068bfff00,0xbfe0c1088e3b8983,1 +np.float64,0xbfd28edbe6a51db8,0xbfd2d416c8ed363e,1 +np.float64,0xbfb4e35f9229c6c0,0xbfb4e9531d2a737f,1 +np.float64,0xbfee6727e97cce50,0xbff40e7717576e46,1 +np.float64,0xbfddb5fbddbb6bf8,0xbfdee5aad78f5361,1 +np.float64,0xbfdf9d3e9dbf3a7e,0xbfe0886b191f2957,1 +np.float64,0x3fa57e77042afce0,0x3fa5801518ea9342,1 +np.float64,0x3f95c4e4882b89c0,0x3f95c55003c8e714,1 +np.float64,0x3fd9b10f61b36220,0x3fda6fe5d635a8aa,1 +np.float64,0xbfe2973411652e68,0xbfe3d641fe9885fd,1 +np.float64,0xbfee87bd5a7d0f7b,0xbff443bea81b3fff,1 +np.float64,0x3f9ea064c83d40c0,0x3f9ea19025085b2f,1 +np.float64,0xbfe4b823dfe97048,0xbfe689623d30dc75,1 +np.float64,0xbfa06a326c20d460,0xbfa06aeacbcd3eb8,1 +np.float64,0x3fe1e5c4c1e3cb8a,0x3fe2fe44b822f20e,1 +np.float64,0x3f99dafaa833b600,0x3f99dbaec10a1a0a,1 +np.float64,0xbfed7cb3877af967,0xbff2bfe9e556aaf9,1 +np.float64,0x3fd604f2e2ac09e4,0x3fd67a89408ce6ba,1 +np.float64,0x3fec57b60f78af6c,0x3ff16881f46d60f7,1 +np.float64,0xbfea2e3a17745c74,0xbfeea95c7190fd42,1 +np.float64,0xbfd60a7c37ac14f8,0xbfd6806ed642de35,1 +np.float64,0xbfe544b9726a8973,0xbfe743ac399d81d7,1 +np.float64,0xbfd13520faa26a42,0xbfd16c02034a8fe0,1 +np.float64,0xbfea9ea59ff53d4b,0xbfef70538ee12e00,1 +np.float64,0x3fd66633f8accc68,0x3fd6e23c13ab0e9e,1 +np.float64,0xbfe4071bd3e80e38,0xbfe5a3c9ba897d81,1 +np.float64,0xbfbe1659fa3c2cb0,0xbfbe2831d4fed196,1 +np.float64,0xbfd3312777a6624e,0xbfd37df09b9baeba,1 +np.float64,0x3fd13997caa27330,0x3fd170a4900c8907,1 +np.float64,0xbfe7cbc235ef9784,0xbfead4c4d6cbf129,1 +np.float64,0xbfe1456571628acb,0xbfe23e4ec768c8e2,1 +np.float64,0xbfedf1a044fbe340,0xbff35da96773e176,1 +np.float64,0x3fce38b1553c7160,0x3fce8270709774f9,1 +np.float64,0xbfecb01761f9602f,0xbff1c9e9d382f1f8,1 +np.float64,0xbfe0a03560e1406b,0xbfe17b8d5a1ca662,1 +np.float64,0x3fe50f37cbea1e70,0x3fe6fc55e1ae7da6,1 +np.float64,0xbfe12d64a0625aca,0xbfe221d3a7834e43,1 +np.float64,0xbf6fb288403f6500,0xbf6fb28d6f389db6,1 +np.float64,0x3fda831765b50630,0x3fdb55eecae58ca9,1 +np.float64,0x3fe1a0fe4c6341fc,0x3fe2ab9564304425,1 +np.float64,0xbfef2678a77e4cf1,0xbff56ff42b2797bb,1 +np.float64,0xbfab269c1c364d40,0xbfab29df1cd48779,1 +np.float64,0x3fe8ec82a271d906,0x3fec92567d7a6675,1 +np.float64,0xbfc235115f246a24,0xbfc244ee567682ea,1 +np.float64,0x3feef5bf8d7deb80,0x3ff50ad4875ee9bd,1 +np.float64,0x3fe768b5486ed16a,0x3fea421356160e65,1 +np.float64,0xbfd4255684a84aae,0xbfd47e8baf7ec7f6,1 +np.float64,0x3fc7f67f2b2fed00,0x3fc81ae83cf92dd5,1 +np.float64,0x3fe9b1b19a736364,0x3fedd4b0e24ee741,1 +np.float64,0x3fb27eb9e624fd70,0x3fb282dacd89ce28,1 +np.float64,0xbfd490b710a9216e,0xbfd4efcdeb213458,1 +np.float64,0xbfd1347b2ca268f6,0xbfd16b55dece2d38,1 +np.float64,0x3fc6a5668d2d4ad0,0x3fc6c41452c0c087,1 +np.float64,0xbfca7b209f34f640,0xbfcaac710486f6bd,1 +np.float64,0x3fc23a1a47247438,0x3fc24a047fd4c27a,1 +np.float64,0x3fdb1413a8b62828,0x3fdbf595e2d994bc,1 +np.float64,0xbfea69b396f4d367,0xbfef11bdd2b0709a,1 +np.float64,0x3fd14c9958a29934,0x3fd1846161b10422,1 +np.float64,0xbfe205f44be40be8,0xbfe325283aa3c6a8,1 +np.float64,0x3fecd03c9ef9a07a,0x3ff1ee85aaf52a01,1 +np.float64,0x3fe34281d7e68504,0x3fe4aab63e6de816,1 +np.float64,0xbfe120e2376241c4,0xbfe213023ab03939,1 +np.float64,0xbfe951edc4f2a3dc,0xbfed3615e38576f8,1 +np.float64,0x3fe5a2286f6b4450,0x3fe7c196e0ec10ed,1 +np.float64,0xbfed7a3e1f7af47c,0xbff2bcc0793555d2,1 +np.float64,0x3fe050274960a04e,0x3fe11e2e256ea5cc,1 +np.float64,0xbfcfa71f653f4e40,0xbfcffc11483d6a06,1 +np.float64,0x3f6ead2e403d5a00,0x3f6ead32f314c052,1 +np.float64,0x3fe3a2a026674540,0x3fe523bfe085f6ec,1 +np.float64,0xbfe294a62e65294c,0xbfe3d31ebd0b4ca2,1 +np.float64,0xbfb4894d06291298,0xbfb48ef4b8e256b8,1 +np.float64,0xbfc0c042c1218084,0xbfc0cc98ac2767c4,1 +np.float64,0xbfc6a32cb52d4658,0xbfc6c1d1597ed06b,1 +np.float64,0xbfd30f7777a61eee,0xbfd35aa39fee34eb,1 +np.float64,0x3fe7fc2c2eeff858,0x3feb1d8a558b5537,1 +np.float64,0x7fefffffffffffff,0x7ff8000000000000,1 +np.float64,0xbfdadf917bb5bf22,0xbfdbbbae9a9f67a0,1 +np.float64,0xbfcf0395e13e072c,0xbfcf5366015f7362,1 +np.float64,0xbfe8644c9170c899,0xbfebbc98e74a227d,1 +np.float64,0x3fc3b2d8e52765b0,0x3fc3c6f7d44cffaa,1 +np.float64,0x3fc57407b92ae810,0x3fc58e12ccdd47a1,1 +np.float64,0x3fd56a560daad4ac,0x3fd5d62b8dfcc058,1 +np.float64,0x3fd595deefab2bbc,0x3fd6046420b2f79b,1 +np.float64,0xbfd5360f50aa6c1e,0xbfd59ebaacd815b8,1 +np.float64,0x3fdfb6aababf6d54,0x3fe0970b8aac9f61,1 +np.float64,0x3ff0000000000000,0x3ff921fb54442d18,1 +np.float64,0xbfeb3a8958f67513,0xbff04872e8278c79,1 +np.float64,0x3f9e1ea6683c3d40,0x3f9e1fc326186705,1 +np.float64,0x3fe6b6d5986d6dac,0x3fe94175bd60b19d,1 +np.float64,0xbfee4d90b77c9b21,0xbff3e60e9134edc2,1 +np.float64,0x3fd806ce0cb00d9c,0x3fd8a14c4855a8f5,1 +np.float64,0x3fd54acc75aa9598,0x3fd5b4b72fcbb5df,1 +np.float64,0xbfe59761f16b2ec4,0xbfe7b2fa5d0244ac,1 +np.float64,0xbfcd4fa3513a9f48,0xbfcd92d0814a5383,1 +np.float64,0xbfdc827523b904ea,0xbfdd8c577b53053c,1 +np.float64,0xbfd4bb7f34a976fe,0xbfd51d00d9a99360,1 +np.float64,0xbfe818bc87f03179,0xbfeb48d1ea0199c5,1 +np.float64,0xbfa8a2e15c3145c0,0xbfa8a5510ba0e45c,1 +np.float64,0xbfb6d15f422da2c0,0xbfb6d922689da015,1 +np.float64,0x3fcd04eaab3a09d8,0x3fcd46131746ef08,1 +np.float64,0x3fcfb5cfbb3f6ba0,0x3fd0059d308237f3,1 +np.float64,0x3fe8dcf609f1b9ec,0x3fec7997973010b6,1 +np.float64,0xbfdf1834d7be306a,0xbfe03c1d4e2b48f0,1 +np.float64,0x3fee82ae50fd055c,0x3ff43b545066fe1a,1 +np.float64,0xbfde039c08bc0738,0xbfdf3d6ed4d2ee5c,1 +np.float64,0x3fec07389bf80e72,0x3ff1137ed0acd161,1 +np.float64,0xbfef44c010fe8980,0xbff5b488ad22a4c5,1 +np.float64,0x3f76e722e02dce00,0x3f76e72ab2759d88,1 +np.float64,0xbfcaa9e6053553cc,0xbfcadc41125fca93,1 +np.float64,0x3fed6088147ac110,0x3ff29c06c4ef35fc,1 +np.float64,0x3fd32bd836a657b0,0x3fd3785fdb75909f,1 +np.float64,0xbfeedbb1d97db764,0xbff4d87f6c82a93c,1 +np.float64,0xbfe40f31d5e81e64,0xbfe5ae292cf258a2,1 +np.float64,0x7ff8000000000000,0x7ff8000000000000,1 +np.float64,0xbfeb2b25bc76564c,0xbff039d81388550c,1 +np.float64,0x3fec5008fa78a012,0x3ff1604195801da3,1 +np.float64,0x3fce2d4f293c5aa0,0x3fce76b99c2db4da,1 +np.float64,0xbfdc435412b886a8,0xbfdd45e7b7813f1e,1 +np.float64,0x3fdf2c9d06be593c,0x3fe047cb03c141b6,1 +np.float64,0x3fddefc61ebbdf8c,0x3fdf26fb8fad9fae,1 +np.float64,0x3fab50218436a040,0x3fab537395eaf3bb,1 +np.float64,0xbfd5b95a8fab72b6,0xbfd62a191a59343a,1 +np.float64,0x3fdbf803b4b7f008,0x3fdcf211578e98c3,1 +np.float64,0xbfec8c255979184b,0xbff1a1bee108ed30,1 +np.float64,0x3fe33cdaffe679b6,0x3fe4a3a318cd994f,1 +np.float64,0x3fd8cf585cb19eb0,0x3fd97a408bf3c38c,1 +np.float64,0x3fe919dde07233bc,0x3fecdb0ea13a2455,1 +np.float64,0xbfd5ba35e4ab746c,0xbfd62b024805542d,1 +np.float64,0x3fd2f933e7a5f268,0x3fd343527565e97c,1 +np.float64,0xbfe5b9f8ddeb73f2,0xbfe7e1f772c3e438,1 +np.float64,0x3fe843cd92f0879c,0x3feb8a92d68eae3e,1 +np.float64,0xbfd096b234a12d64,0xbfd0c7beca2c6605,1 +np.float64,0xbfef3363da7e66c8,0xbff58c98dde6c27c,1 +np.float64,0x3fd51b01ddaa3604,0x3fd582109d89ead1,1 +np.float64,0x3fea0f10ff741e22,0x3fee736c2d2a2067,1 +np.float64,0x3fc276e7b724edd0,0x3fc28774520bc6d4,1 +np.float64,0xbfef9abc9f7f3579,0xbff69d49762b1889,1 +np.float64,0x3fe1539ec0e2a73e,0x3fe24f370b7687d0,1 +np.float64,0x3fad72350c3ae460,0x3fad765e7766682a,1 +np.float64,0x3fa289a47c251340,0x3fa28aae12f41646,1 +np.float64,0xbfe5c488e5eb8912,0xbfe7f05d7e7dcddb,1 +np.float64,0xbfc22ef1d7245de4,0xbfc23ebeb990a1b8,1 +np.float64,0x3fe59a0b80eb3418,0x3fe7b695fdcba1de,1 +np.float64,0xbfe9cad619f395ac,0xbfedff0514d91e2c,1 +np.float64,0x3fc8bc74eb3178e8,0x3fc8e48cb22da666,1 +np.float64,0xbfc5389a3f2a7134,0xbfc551cd6febc544,1 +np.float64,0x3fce82feb33d0600,0x3fceceecce2467ef,1 +np.float64,0x3fda346791b468d0,0x3fdaff95154a4ca6,1 +np.float64,0x3fd04501fea08a04,0x3fd073397b32607e,1 +np.float64,0xbfb6be498a2d7c90,0xbfb6c5f93aeb0e57,1 +np.float64,0x3fe1f030dd63e062,0x3fe30ad8fb97cce0,1 +np.float64,0xbfee3fb36dfc7f67,0xbff3d0a5e380b86f,1 +np.float64,0xbfa876773c30ecf0,0xbfa878d9d3df6a3f,1 +np.float64,0x3fdb58296eb6b054,0x3fdc40ceffb17f82,1 +np.float64,0xbfea16b5d8742d6c,0xbfee809b99fd6adc,1 +np.float64,0xbfdc5062b6b8a0c6,0xbfdd547623275fdb,1 +np.float64,0x3fef6db242fedb64,0x3ff61ab4cdaef467,1 +np.float64,0xbfc9f778f933eef0,0xbfca25eef1088167,1 +np.float64,0xbfd22063eba440c8,0xbfd260c8766c69cf,1 +np.float64,0x3fdd2379f2ba46f4,0x3fde40b025cb1ffa,1 +np.float64,0xbfea967af2f52cf6,0xbfef61a178774636,1 +np.float64,0x3fe4f5b49fe9eb6a,0x3fe6da8311a5520e,1 +np.float64,0x3feccde17b799bc2,0x3ff1ebd0ea228b71,1 +np.float64,0x3fe1bb76506376ec,0x3fe2cb56fca01840,1 +np.float64,0xbfef94e583ff29cb,0xbff68aeab8ba75a2,1 +np.float64,0x3fed024a55fa0494,0x3ff228ea5d456e9d,1 +np.float64,0xbfe877b2a8f0ef65,0xbfebdaa1a4712459,1 +np.float64,0x3fef687a8d7ed0f6,0x3ff60cf5fef8d448,1 +np.float64,0xbfeeb2dc8afd65b9,0xbff48dda6a906cd6,1 +np.float64,0x3fdb2e28aeb65c50,0x3fdc12620655eb7a,1 +np.float64,0x3fedc1863afb830c,0x3ff31ae823315e83,1 +np.float64,0xbfe6b1bb546d6376,0xbfe93a38163e3a59,1 +np.float64,0x3fe479c78468f390,0x3fe637e5c0fc5730,1 +np.float64,0x3fbad1fade35a3f0,0x3fbade9a43ca05cf,1 +np.float64,0xbfe2d1c563e5a38b,0xbfe41e712785900c,1 +np.float64,0xbfc08c33ed211868,0xbfc09817a752d500,1 +np.float64,0xbfecce0935f99c12,0xbff1ebfe84524037,1 +np.float64,0x3fce4ef0e73c9de0,0x3fce995638a3dc48,1 +np.float64,0xbfd2fb2343a5f646,0xbfd345592517ca18,1 +np.float64,0x3fd848f7cdb091f0,0x3fd8e8bee5f7b49a,1 +np.float64,0x3fe532b7d2ea6570,0x3fe72b9ac747926a,1 +np.float64,0x3fd616aadcac2d54,0x3fd68d692c5cad42,1 +np.float64,0x3fd7720eb3aee41c,0x3fd801206a0e1e43,1 +np.float64,0x3fee835a35fd06b4,0x3ff43c7175eb7a54,1 +np.float64,0xbfe2e8f70b65d1ee,0xbfe43b2800a947a7,1 +np.float64,0xbfed38f45d7a71e9,0xbff26acd6bde7174,1 +np.float64,0xbfc0c62661218c4c,0xbfc0d28964d66120,1 +np.float64,0x3fe97940bef2f282,0x3fed76b986a74ee3,1 +np.float64,0x3fc96f7dc532def8,0x3fc99b20044c8fcf,1 +np.float64,0xbfd60201eeac0404,0xbfd677675efaaedc,1 +np.float64,0x3fe63c0867ec7810,0x3fe894f060200140,1 +np.float64,0xbfef6144b37ec289,0xbff5fa589a515ba8,1 +np.float64,0xbfde2da0c8bc5b42,0xbfdf6d0b59e3232a,1 +np.float64,0xbfd7401612ae802c,0xbfd7cb74ddd413b9,1 +np.float64,0x3fe41c012de83802,0x3fe5be9d87da3f82,1 +np.float64,0x3fdf501609bea02c,0x3fe05c1d96a2270b,1 +np.float64,0x3fcf9fa1233f3f40,0x3fcff45598e72f07,1 +np.float64,0x3fd4e3895ea9c714,0x3fd547580d8392a2,1 +np.float64,0x3fe1e8ff5fe3d1fe,0x3fe3022a0b86a2ab,1 +np.float64,0xbfe0aa55956154ab,0xbfe18768823da589,1 +np.float64,0x3fb2a0aa26254150,0x3fb2a4e1faff1c93,1 +np.float64,0x3fd3823417a70468,0x3fd3d2f808dbb167,1 +np.float64,0xbfaed323643da640,0xbfaed7e9bef69811,1 +np.float64,0x3fe661e8c4ecc3d2,0x3fe8c9c535f43c16,1 +np.float64,0xbfa429777c2852f0,0xbfa42acd38ba02a6,1 +np.float64,0x3fb5993ea22b3280,0x3fb59fd353e47397,1 +np.float64,0x3fee62d21efcc5a4,0x3ff40788f9278ade,1 +np.float64,0xbf813fb810227f80,0xbf813fc56d8f3c53,1 +np.float64,0x3fd56205deaac40c,0x3fd5cd59671ef193,1 +np.float64,0x3fd31a4de5a6349c,0x3fd365fe401b66e8,1 +np.float64,0xbfec7cc7a478f98f,0xbff190cf69703ca4,1 +np.float64,0xbf755881a02ab100,0xbf755887f52e7794,1 +np.float64,0x3fdd1c92e6ba3924,0x3fde38efb4e8605c,1 +np.float64,0x3fdf49da80be93b4,0x3fe0588af8dd4a34,1 +np.float64,0x3fe1fcdbf2e3f9b8,0x3fe31a27b9d273f2,1 +np.float64,0x3fe2a0f18be541e4,0x3fe3e23b159ce20f,1 +np.float64,0xbfed0f1561fa1e2b,0xbff23820fc0a54ca,1 +np.float64,0x3fe34a006c669400,0x3fe4b419b9ed2b83,1 +np.float64,0xbfd51be430aa37c8,0xbfd583005a4d62e7,1 +np.float64,0x3fe5ec4e336bd89c,0x3fe826caad6b0f65,1 +np.float64,0xbfdad71b1fb5ae36,0xbfdbb25bef8b53d8,1 +np.float64,0xbfe8eac2d871d586,0xbfec8f8cac7952f9,1 +np.float64,0xbfe1d5aef663ab5e,0xbfe2eae14b7ccdfd,1 +np.float64,0x3fec11d3157823a6,0x3ff11e8279506753,1 +np.float64,0xbfe67ff1166cffe2,0xbfe8f3e61c1dfd32,1 +np.float64,0xbfd101eecda203de,0xbfd136e0e9557022,1 +np.float64,0x3fde6c9e5cbcd93c,0x3fdfb48ee7efe134,1 +np.float64,0x3fec3ede9c787dbe,0x3ff14dead1e5cc1c,1 +np.float64,0x3fe7a022086f4044,0x3fea93ce2980b161,1 +np.float64,0xbfc3b2b1b7276564,0xbfc3c6d02d60bb21,1 +np.float64,0x7ff0000000000000,0x7ff8000000000000,1 +np.float64,0x3fe60b5647ec16ac,0x3fe8517ef0544b40,1 +np.float64,0xbfd20ab654a4156c,0xbfd24a2f1b8e4932,1 +np.float64,0xbfe4aa1e2f69543c,0xbfe677005cbd2646,1 +np.float64,0xbfc831cc0b306398,0xbfc8574910d0b86d,1 +np.float64,0xbfc3143495262868,0xbfc3267961b79198,1 +np.float64,0x3fc14d64c1229ac8,0x3fc15afea90a319d,1 +np.float64,0x3fc0a5a207214b48,0x3fc0b1bd2f15c1b0,1 +np.float64,0xbfc0b8351521706c,0xbfc0c4792672d6db,1 +np.float64,0xbfdc383600b8706c,0xbfdd398429e163bd,1 +np.float64,0x3fd9e17321b3c2e8,0x3fdaa4c4d140a622,1 +np.float64,0xbfd44f079ea89e10,0xbfd4aa7d6deff4ab,1 +np.float64,0xbfc3de52a927bca4,0xbfc3f2f8f65f4c3f,1 +np.float64,0x3fe7779d566eef3a,0x3fea57f8592dbaad,1 +np.float64,0xbfe309039e661207,0xbfe462f47f9a64e5,1 +np.float64,0x3fd8e06d08b1c0dc,0x3fd98cc946e440a6,1 +np.float64,0x3fdde66c9ebbccd8,0x3fdf1c68009a8dc1,1 +np.float64,0x3fd4369c6ba86d38,0x3fd490bf460a69e4,1 +np.float64,0xbfe132252fe2644a,0xbfe22775e109cc2e,1 +np.float64,0x3fee15483c7c2a90,0x3ff39111de89036f,1 +np.float64,0xbfc1d5ee8123abdc,0xbfc1e4d66c6871a5,1 +np.float64,0x3fc851c52b30a388,0x3fc877d93fb4ae1a,1 +np.float64,0x3fdaade707b55bd0,0x3fdb85001661fffe,1 +np.float64,0xbfe79fb7f96f3f70,0xbfea9330ec27ac10,1 +np.float64,0xbfe8b0f725f161ee,0xbfec3411c0e4517a,1 +np.float64,0xbfea79f5f374f3ec,0xbfef2e9dd9270488,1 +np.float64,0x3fe0b5fe5b616bfc,0x3fe19512a36a4534,1 +np.float64,0xbfad7c622c3af8c0,0xbfad808fea96a804,1 +np.float64,0xbfe3e24dbce7c49c,0xbfe574b4c1ea9818,1 +np.float64,0xbfe80b038af01607,0xbfeb33fec279576a,1 +np.float64,0xbfef69e2ea7ed3c6,0xbff610a5593a18bc,1 +np.float64,0x3fdcc0bb39b98178,0x3fddd1f8c9a46430,1 +np.float64,0xbfba39976a347330,0xbfba4563bb5369a4,1 +np.float64,0xbfebf9768ef7f2ed,0xbff10548ab725f74,1 +np.float64,0xbfec21c066f84381,0xbff12f2803ba052f,1 +np.float64,0xbfca216a6b3442d4,0xbfca50c5e1e5748e,1 +np.float64,0x3fd5e40da4abc81c,0x3fd65783f9a22946,1 +np.float64,0x3fc235ca17246b98,0x3fc245a8f453173f,1 +np.float64,0x3fecb5b867796b70,0x3ff1d046a0bfda69,1 +np.float64,0x3fcb457fef368b00,0x3fcb7b6daa8165a7,1 +np.float64,0xbfa5ed6f7c2bdae0,0xbfa5ef27244e2e42,1 +np.float64,0x3fecf618a1f9ec32,0x3ff21a86cc104542,1 +np.float64,0x3fe9d95413f3b2a8,0x3fee178dcafa11fc,1 +np.float64,0xbfe93a5357f274a7,0xbfed0f9a565da84a,1 +np.float64,0xbfeb9e45ff773c8c,0xbff0a93cab8e258d,1 +np.float64,0x3fcbd9d0bd37b3a0,0x3fcc134e87cae241,1 +np.float64,0x3fe55d4db76aba9c,0x3fe764a0e028475a,1 +np.float64,0xbfc8a6fc71314df8,0xbfc8ceaafbfc59a7,1 +np.float64,0x3fe0615fa660c2c0,0x3fe1323611c4cbc2,1 +np.float64,0x3fb965558632cab0,0x3fb9700b84de20ab,1 +np.float64,0x8000000000000000,0x8000000000000000,1 +np.float64,0x3fe76776c6eeceee,0x3fea40403e24a9f1,1 +np.float64,0x3fe3b7f672676fec,0x3fe53ece71a1a1b1,1 +np.float64,0xbfa9b82ba4337050,0xbfa9baf15394ca64,1 +np.float64,0xbfe31faf49663f5e,0xbfe47f31b1ca73dc,1 +np.float64,0xbfcc4c6beb3898d8,0xbfcc88c5f814b2c1,1 +np.float64,0x3fd481530aa902a8,0x3fd4df8df03bc155,1 +np.float64,0x3fd47593b8a8eb28,0x3fd4d327ab78a1a8,1 +np.float64,0x3fd70e6ccbae1cd8,0x3fd7962fe8b63d46,1 +np.float64,0x3fd25191f7a4a324,0x3fd2941623c88e02,1 +np.float64,0x3fd0603ef0a0c07c,0x3fd08f64e97588dc,1 +np.float64,0xbfc653bae52ca774,0xbfc6711e5e0d8ea9,1 +np.float64,0xbfd11db8fea23b72,0xbfd153b63c6e8812,1 +np.float64,0xbfea9bde25f537bc,0xbfef6b52268e139a,1 +np.float64,0x1,0x1,1 +np.float64,0xbfefd3806d7fa701,0xbff776dcef9583ca,1 +np.float64,0xbfe0fb8cfde1f71a,0xbfe1e6e2e774a8f8,1 +np.float64,0x3fea384534f4708a,0x3feebadaa389be0d,1 +np.float64,0x3feff761c97feec4,0x3ff866157b9d072d,1 +np.float64,0x3fe7131ccb6e263a,0x3fe9c58b4389f505,1 +np.float64,0x3fe9084f7872109e,0x3fecbed0355dbc8f,1 +np.float64,0x3f708e89e0211d00,0x3f708e8cd4946b9e,1 +np.float64,0xbfe39185f067230c,0xbfe50e1cd178244d,1 +np.float64,0x3fd67cc1a9acf984,0x3fd6fa514784b48c,1 +np.float64,0xbfecaef005f95de0,0xbff1c89c9c3ef94a,1 +np.float64,0xbfe12eec81e25dd9,0xbfe223a4285bba9a,1 +np.float64,0x3fbe7f9faa3cff40,0x3fbe92363525068d,1 +np.float64,0xbfe1950b2b632a16,0xbfe29d45fc1e4ce9,1 +np.float64,0x3fe45049e6e8a094,0x3fe6020de759e383,1 +np.float64,0x3fe4d10c8969a21a,0x3fe6aa1fe42cbeb9,1 +np.float64,0xbfe9d04658f3a08d,0xbfee08370a0dbf0c,1 +np.float64,0x3fe14fb314e29f66,0x3fe24a8d73663521,1 +np.float64,0xbfef4abfe4fe9580,0xbff5c2c1ff1250ca,1 +np.float64,0xbfe6162b366c2c56,0xbfe86073ac3c6243,1 +np.float64,0x3feffe781e7ffcf0,0x3ff8d2cbedd6a1b5,1 +np.float64,0xbff0000000000000,0xbff921fb54442d18,1 +np.float64,0x3fc1dc45ad23b888,0x3fc1eb3d9bddda58,1 +np.float64,0xbfe793f6fcef27ee,0xbfea81c93d65aa64,1 +np.float64,0x3fdef6d2bbbdeda4,0x3fe029079d42efb5,1 +np.float64,0xbfdf0ac479be1588,0xbfe0346dbc95963f,1 +np.float64,0xbfd33927d7a67250,0xbfd38653f90a5b73,1 +np.float64,0xbfe248b072e49161,0xbfe37631ef6572e1,1 +np.float64,0xbfc8ceb6af319d6c,0xbfc8f7288657f471,1 +np.float64,0x3fdd7277fcbae4f0,0x3fde99886e6766ef,1 +np.float64,0xbfe0d30c6561a619,0xbfe1b72f90bf53d6,1 +np.float64,0xbfcb0fe07d361fc0,0xbfcb448e2eae9542,1 +np.float64,0xbfe351f57fe6a3eb,0xbfe4be13eef250f2,1 +np.float64,0x3fe85ec02cf0bd80,0x3febb407e2e52e4c,1 +np.float64,0x3fc8bc59b53178b0,0x3fc8e470f65800ec,1 +np.float64,0xbfd278d447a4f1a8,0xbfd2bd133c9c0620,1 +np.float64,0x3feda5cfd87b4ba0,0x3ff2f5ab4324f43f,1 +np.float64,0xbfd2b32a36a56654,0xbfd2fa09c36afd34,1 +np.float64,0xbfed4a81cb7a9504,0xbff28077a4f4fff4,1 +np.float64,0x3fdf079bf9be0f38,0x3fe0329f7fb13f54,1 +np.float64,0x3fd14097f6a28130,0x3fd177e9834ec23f,1 +np.float64,0xbfaeab11843d5620,0xbfaeafc5531eb6b5,1 +np.float64,0xbfac3f8c14387f20,0xbfac433893d53360,1 +np.float64,0xbfc139d7ed2273b0,0xbfc14743adbbe660,1 +np.float64,0x3fe78cb02cef1960,0x3fea7707f76edba9,1 +np.float64,0x3fefe16b41ffc2d6,0x3ff7bff36a7aa7b8,1 +np.float64,0x3fec5260d378a4c2,0x3ff162c588b0da38,1 +np.float64,0x3fedb146f17b628e,0x3ff304f90d3a15d1,1 +np.float64,0x3fd1fd45f7a3fa8c,0x3fd23c2dc3929e20,1 +np.float64,0x3fe0898a5ee11314,0x3fe1610c63e726eb,1 +np.float64,0x3fe7719946eee332,0x3fea4f205eecb59f,1 +np.float64,0x3fe955218972aa44,0x3fed3b530c1f7651,1 +np.float64,0x3fe0ccbf4461997e,0x3fe1afc7b4587836,1 +np.float64,0xbfe9204314f24086,0xbfece5605780e346,1 +np.float64,0xbfe552017feaa403,0xbfe755773cbd74d5,1 +np.float64,0x3fd8ce4b32b19c98,0x3fd9791c8dd44eae,1 +np.float64,0x3fef89acd9ff135a,0x3ff668f78adf7ced,1 +np.float64,0x3fc9d713ad33ae28,0x3fca04da6c293bbd,1 +np.float64,0xbfe22d9c4de45b38,0xbfe3553effadcf92,1 +np.float64,0x3fa5cda38c2b9b40,0x3fa5cf53c5787482,1 +np.float64,0x3fa878ebdc30f1e0,0x3fa87b4f2bf1d4c3,1 +np.float64,0x3fe8030353700606,0x3feb27e196928789,1 +np.float64,0x3fb50607222a0c10,0x3fb50c188ce391e6,1 +np.float64,0x3fd9ba4ab4b37494,0x3fda79fa8bd40f45,1 +np.float64,0x3fb564598e2ac8b0,0x3fb56abe42d1ba13,1 +np.float64,0xbfd1177c83a22efa,0xbfd14d3d7ef30cc4,1 +np.float64,0xbfd952cec7b2a59e,0xbfda09215d17c0ac,1 +np.float64,0x3fe1d8066663b00c,0x3fe2edb35770b8dd,1 +np.float64,0xbfc89427a3312850,0xbfc8bb7a7c389497,1 +np.float64,0xbfe86ebfd3f0dd80,0xbfebccc2ba0f506c,1 +np.float64,0x3fc390578b2720b0,0x3fc3a40cb7f5f728,1 +np.float64,0xbfd122f9b8a245f4,0xbfd15929dc57a897,1 +np.float64,0x3f8d0636d03a0c80,0x3f8d06767de576df,1 +np.float64,0xbfe4b55d8b696abb,0xbfe685be537a9637,1 +np.float64,0xbfdfd51cf9bfaa3a,0xbfe0a894fcff0c76,1 +np.float64,0xbfd37c1f52a6f83e,0xbfd3cc9593c37aad,1 +np.float64,0x3fd0e8283ea1d050,0x3fd11c25c800785a,1 +np.float64,0x3fd3160784a62c10,0x3fd36183a6c2880c,1 +np.float64,0x3fd4c66e57a98cdc,0x3fd5288fe3394eff,1 +np.float64,0x3fee2f7e3afc5efc,0x3ff3b8063eb30cdc,1 +np.float64,0xbfe526773a6a4cee,0xbfe71b4364215b18,1 +np.float64,0x3fea01181e740230,0x3fee5b65eccfd130,1 +np.float64,0xbfe51c03f76a3808,0xbfe70d5919d37587,1 +np.float64,0x3fd97e1375b2fc28,0x3fda3845da40b22b,1 +np.float64,0x3fd5c14a14ab8294,0x3fd632890d07ed03,1 +np.float64,0xbfec9b474279368e,0xbff1b28f50584fe3,1 +np.float64,0x3fe0139ca860273a,0x3fe0d7fc377f001c,1 +np.float64,0x3fdb080c9db61018,0x3fdbe85056358fa0,1 +np.float64,0xbfdd72ceb1bae59e,0xbfde99ea171661eb,1 +np.float64,0xbfe64e934fec9d26,0xbfe8aec2ef24be63,1 +np.float64,0x3fd1036a93a206d4,0x3fd1386adabe01bd,1 +np.float64,0x3febc9d4a5f793aa,0x3ff0d4c069f1e67d,1 +np.float64,0xbfe547a16fea8f43,0xbfe747902fe6fb4d,1 +np.float64,0x3fc289b0f9251360,0x3fc29a709de6bdd9,1 +np.float64,0xbfe694494a6d2892,0xbfe9108f3dc133e2,1 +np.float64,0x3fd827dfe4b04fc0,0x3fd8c4fe40532b91,1 +np.float64,0xbfe8b89418f17128,0xbfec400c5a334b2e,1 +np.float64,0x3fed5605147aac0a,0x3ff28ed1f612814a,1 +np.float64,0xbfed36af31fa6d5e,0xbff26804e1f71af0,1 +np.float64,0x3fdbb01c02b76038,0x3fdca2381558bbf0,1 +np.float64,0x3fe2a951666552a2,0x3fe3ec88f780f9e6,1 +np.float64,0x3fe662defbecc5be,0x3fe8cb1dbfca98ab,1 +np.float64,0x3fd098b1b3a13164,0x3fd0c9d064e4eaf2,1 +np.float64,0x3fefa10edeff421e,0x3ff6b1c6187b18a8,1 +np.float64,0xbfec4feb7a789fd7,0xbff16021ef37a219,1 +np.float64,0x3fd8e415bbb1c82c,0x3fd990c1f8b786bd,1 +np.float64,0xbfead5a09275ab41,0xbfefd44fab5b4f6e,1 +np.float64,0xbfe8666c16f0ccd8,0xbfebbfe0c9f2a9ae,1 +np.float64,0x3fdc962132b92c44,0x3fdda2525a6f406c,1 +np.float64,0xbfe2037f03e406fe,0xbfe3222ec2a3449e,1 +np.float64,0xbfec82c27e790585,0xbff197626ea9df1e,1 +np.float64,0x3fd2b4e03ca569c0,0x3fd2fbd3c7fda23e,1 +np.float64,0xbfe9b0dee5f361be,0xbfedd34f6d3dfe8a,1 +np.float64,0x3feef45cd17de8ba,0x3ff508180687b591,1 +np.float64,0x3f82c39bf0258700,0x3f82c3ad24c3b3f1,1 +np.float64,0xbfca848cfd350918,0xbfcab612ce258546,1 +np.float64,0x3fd6442aaaac8854,0x3fd6bdea54016e48,1 +np.float64,0x3fe550799e6aa0f4,0x3fe75369c9ea5b1e,1 +np.float64,0xbfe0e9d5a361d3ac,0xbfe1d20011139d89,1 +np.float64,0x3fbfc9ff1e3f9400,0x3fbfdf0ea6885c80,1 +np.float64,0xbfa187e8b4230fd0,0xbfa188c95072092e,1 +np.float64,0x3fcd28c9533a5190,0x3fcd6ae879c21b47,1 +np.float64,0x3fc6227ec52c4500,0x3fc63f1fbb441d29,1 +np.float64,0x3fe9b7a2ed736f46,0x3feddeab49b2d176,1 +np.float64,0x3fd4aee93da95dd4,0x3fd50fb3b71e0339,1 +np.float64,0xbfe164dacf62c9b6,0xbfe263bb2f7dd5d9,1 +np.float64,0x3fec62e525f8c5ca,0x3ff17496416d9921,1 +np.float64,0x3fdd363ee0ba6c7c,0x3fde55c6a49a5f86,1 +np.float64,0x3fe65cbf75ecb97e,0x3fe8c28d31ff3ebd,1 +np.float64,0xbfe76d27ca6eda50,0xbfea4899e3661425,1 +np.float64,0xbfc305738d260ae8,0xbfc3178dcfc9d30f,1 +np.float64,0xbfd3aa2a54a75454,0xbfd3fcf1e1ce8328,1 +np.float64,0x3fd1609fc9a2c140,0x3fd1992efa539b9f,1 +np.float64,0xbfac1291bc382520,0xbfac162cc7334b4d,1 +np.float64,0xbfedb461ea7b68c4,0xbff309247850455d,1 +np.float64,0xbfe8d2adf8f1a55c,0xbfec6947be90ba92,1 +np.float64,0xbfd7128965ae2512,0xbfd79a9855bcfc5a,1 +np.float64,0x3fe8deb09471bd62,0x3fec7c56b3aee531,1 +np.float64,0xbfe5f4d329ebe9a6,0xbfe8327ea8189af8,1 +np.float64,0xbfd3b46ac9a768d6,0xbfd407b80b12ff17,1 +np.float64,0x3fec899d7cf9133a,0x3ff19ef26baca36f,1 +np.float64,0xbfec192fd5783260,0xbff126306e507fd0,1 +np.float64,0x3fe945bdaef28b7c,0x3fed222f787310bf,1 +np.float64,0xbfeff9635d7ff2c7,0xbff87d6773f318eb,1 +np.float64,0xbfd604b81cac0970,0xbfd67a4aa852559a,1 +np.float64,0x3fcd1cc9d53a3990,0x3fcd5e962e237c24,1 +np.float64,0xbfed77b0fffaef62,0xbff2b97a1c9b6483,1 +np.float64,0xbfc9c69325338d28,0xbfc9f401500402fb,1 +np.float64,0xbfdf97e246bf2fc4,0xbfe0855601ea9db3,1 +np.float64,0x3fc7e6304f2fcc60,0x3fc80a4e718504cd,1 +np.float64,0x3fec3b599e7876b4,0x3ff14a2d1b9c68e6,1 +np.float64,0xbfe98618e1f30c32,0xbfed8bfbb31c394a,1 +np.float64,0xbfe59b3c0feb3678,0xbfe7b832d6df81de,1 +np.float64,0xbfe54ce2fe6a99c6,0xbfe74e9a85be4116,1 +np.float64,0x3fc9db49cb33b690,0x3fca092737ef500a,1 +np.float64,0xbfb4a922ae295248,0xbfb4aee4e39078a9,1 +np.float64,0xbfd0e542e0a1ca86,0xbfd11925208d66af,1 +np.float64,0x3fd70543f2ae0a88,0x3fd78c5e9238a3ee,1 +np.float64,0x3fd67f7a7facfef4,0x3fd6fd3998df8545,1 +np.float64,0xbfe40b643d6816c8,0xbfe5a947e427f298,1 +np.float64,0xbfcd85f69b3b0bec,0xbfcdcaa24b75f1a3,1 +np.float64,0x3fec705fb4f8e0c0,0x3ff1833c82163ee2,1 +np.float64,0x3fb37650ea26eca0,0x3fb37b20c16fb717,1 +np.float64,0x3fe5ebfa55ebd7f4,0x3fe826578d716e70,1 +np.float64,0x3fe991dfe5f323c0,0x3fed9f8a4bf1f588,1 +np.float64,0xbfd658bd0aacb17a,0xbfd6d3dd06e54900,1 +np.float64,0xbfc24860252490c0,0xbfc258701a0b9290,1 +np.float64,0xbfefb8d763ff71af,0xbff705b6ea4a569d,1 +np.float64,0x3fb8fcb4ae31f970,0x3fb906e809e7899f,1 +np.float64,0x3fce6343cb3cc688,0x3fceae41d1629625,1 +np.float64,0xbfd43d5a11a87ab4,0xbfd497da25687e07,1 +np.float64,0xbfe9568851f2ad11,0xbfed3d9e5fe83a76,1 +np.float64,0x3fe1b66153e36cc2,0x3fe2c53c7e016271,1 +np.float64,0x3fef27452bfe4e8a,0x3ff571b3486ed416,1 +np.float64,0x3fca87c0a7350f80,0x3fcab958a7bb82d4,1 +np.float64,0xbfd8776a8fb0eed6,0xbfd91afaf2f50edf,1 +np.float64,0x3fe9522a76f2a454,0x3fed3679264e1525,1 +np.float64,0x3fea14ff2cf429fe,0x3fee7da6431cc316,1 +np.float64,0x3fe970618bf2e0c4,0x3fed68154d54dd97,1 +np.float64,0x3fd3410cfca68218,0x3fd38e9b21792240,1 +np.float64,0xbf6a8070c0350100,0xbf6a8073c7c34517,1 +np.float64,0xbfbe449de23c8938,0xbfbe56c8e5e4d98b,1 +np.float64,0x3fedbc92e27b7926,0x3ff314313216d8e6,1 +np.float64,0xbfe3be4706677c8e,0xbfe546d3ceb85aea,1 +np.float64,0x3fe30cd6d76619ae,0x3fe467b6f2664a8d,1 +np.float64,0x3fd7d69b21afad38,0x3fd86d54284d05ad,1 +np.float64,0xbfe501001fea0200,0xbfe6e978afcff4d9,1 +np.float64,0xbfe44ba3d8e89748,0xbfe5fc0a31cd1e3e,1 +np.float64,0x3fec52f7c078a5f0,0x3ff16367acb209b2,1 +np.float64,0xbfcb19efcb3633e0,0xbfcb4ed9235a7d47,1 +np.float64,0xbfab86796c370cf0,0xbfab89df7bf15710,1 +np.float64,0xbfb962feda32c600,0xbfb96db1e1679c98,1 +np.float64,0x3fe0dd14e861ba2a,0x3fe1c2fc72810567,1 +np.float64,0x3fe41bcc6de83798,0x3fe5be59b7f9003b,1 +np.float64,0x3fc82f4c4f305e98,0x3fc854bd9798939f,1 +np.float64,0xbfcd143a613a2874,0xbfcd55cbd1619d84,1 +np.float64,0xbfd52da61baa5b4c,0xbfd595d0b3543439,1 +np.float64,0xbfb71b4a8e2e3698,0xbfb7235a4ab8432f,1 +np.float64,0xbfec141a19782834,0xbff120e1e39fc856,1 +np.float64,0xbfdba9319db75264,0xbfdc9a8ca2578bb2,1 +np.float64,0xbfbce5d74639cbb0,0xbfbcf5a4878cfa51,1 +np.float64,0x3fde67f7b3bccff0,0x3fdfaf45a9f843ad,1 +np.float64,0xbfe12d87bc625b10,0xbfe221fd4476eb71,1 +np.float64,0x3fe35b8f6be6b71e,0x3fe4ca20f65179e1,1 +np.float64,0xbfdbada1d3b75b44,0xbfdc9f78b19f93d1,1 +np.float64,0xbfc60159c52c02b4,0xbfc61d79b879f598,1 +np.float64,0x3fd6b81c38ad7038,0x3fd739c27bfa16d8,1 +np.float64,0xbfd646a253ac8d44,0xbfd6c08c19612bbb,1 +np.float64,0xbfe6babef0ed757e,0xbfe94703d0bfa311,1 +np.float64,0xbfed5671f1faace4,0xbff28f5a3f3683d0,1 +np.float64,0x3fc01d1e85203a40,0x3fc02817ec0dfd38,1 +np.float64,0xbfe9188a61f23115,0xbfecd8eb5da84223,1 +np.float64,0x3fdca3bab9b94774,0x3fddb1868660c239,1 +np.float64,0xbfa255750c24aaf0,0xbfa25675f7b36343,1 +np.float64,0x3fb3602db626c060,0x3fb364ed2d5b2876,1 +np.float64,0xbfd30a14bda6142a,0xbfd354ff703b8862,1 +np.float64,0xbfe1cfe381639fc7,0xbfe2e3e720b968c8,1 +np.float64,0xbfd2af6a4fa55ed4,0xbfd2f61e190bcd1f,1 +np.float64,0xbfe93c50937278a1,0xbfed12d64bb10d73,1 +np.float64,0x3fddd8bc44bbb178,0x3fdf0ced7f9005cc,1 +np.float64,0x3fdb2bc73cb65790,0x3fdc0fc0e18e425e,1 +np.float64,0xbfd073f6aba0e7ee,0xbfd0a3cb5468a961,1 +np.float64,0x3fed4bad7b7a975a,0x3ff281ebeb75e414,1 +np.float64,0xbfdc75b50bb8eb6a,0xbfdd7e1a7631cb22,1 +np.float64,0x3fd458a90fa8b154,0x3fd4b4a5817248ce,1 +np.float64,0x3feead5db57d5abc,0x3ff484286fab55ff,1 +np.float64,0x3fb3894382271280,0x3fb38e217b4e7905,1 +np.float64,0xffefffffffffffff,0x7ff8000000000000,1 +np.float64,0xbfe428212ae85042,0xbfe5ce36f226bea8,1 +np.float64,0xbfc08b39f7211674,0xbfc0971b93ebc7ad,1 +np.float64,0xbfc2e7cf5525cfa0,0xbfc2f994eb72b623,1 +np.float64,0xbfdb0d85afb61b0c,0xbfdbee5a2de3c5db,1 +np.float64,0xfff0000000000000,0x7ff8000000000000,1 +np.float64,0xbfd0d36af7a1a6d6,0xbfd106a5f05ef6ff,1 +np.float64,0xbfc333d0912667a0,0xbfc3467162b7289a,1 +np.float64,0x3fcdababc53b5758,0x3fcdf16458c20fa8,1 +np.float64,0x3fd0821b38a10438,0x3fd0b26e3e0b9185,1 +np.float64,0x0,0x0,1 +np.float64,0x3feb7f70edf6fee2,0x3ff08ae81854bf20,1 +np.float64,0x3fe6e075716dc0ea,0x3fe97cc5254be6ff,1 +np.float64,0x3fea13b682f4276e,0x3fee7b6f18073b5b,1 diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/data/umath-validation-set-arcsinh.csv b/.venv/lib/python3.12/site-packages/numpy/_core/tests/data/umath-validation-set-arcsinh.csv new file mode 100644 index 0000000..9eedb1a --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/tests/data/umath-validation-set-arcsinh.csv @@ -0,0 +1,1429 @@ +dtype,input,output,ulperrortol +np.float32,0xbf24142a,0xbf1a85ef,2 +np.float32,0x3e71cf91,0x3e6f9e37,2 +np.float32,0xe52a7,0xe52a7,2 +np.float32,0x3ef1e074,0x3ee9add9,2 +np.float32,0x806160ac,0x806160ac,2 +np.float32,0x7e2d59a2,0x42af4798,2 +np.float32,0xbf32cac9,0xbf26bf96,2 +np.float32,0x3f081701,0x3f026142,2 +np.float32,0x3f23cc88,0x3f1a499c,2 +np.float32,0xbf090d94,0xbf033ad0,2 +np.float32,0x803af2fc,0x803af2fc,2 +np.float32,0x807eb17e,0x807eb17e,2 +np.float32,0x5c0d8e,0x5c0d8e,2 +np.float32,0x3f7b79d2,0x3f5e6b1d,2 +np.float32,0x806feeae,0x806feeae,2 +np.float32,0x3e4b423a,0x3e49f274,2 +np.float32,0x3f49e5ac,0x3f394a41,2 +np.float32,0x3f18cd4e,0x3f10ef35,2 +np.float32,0xbed75734,0xbed17322,2 +np.float32,0x7f591151,0x42b28085,2 +np.float32,0xfefe9da6,0xc2b16f51,2 +np.float32,0xfeac90fc,0xc2b0a82a,2 +np.float32,0x805c198e,0x805c198e,2 +np.float32,0x7f66d6df,0x42b2a004,2 +np.float32,0x505438,0x505438,2 +np.float32,0xbf39a209,0xbf2c5255,2 +np.float32,0x7fa00000,0x7fe00000,2 +np.float32,0xc84cb,0xc84cb,2 +np.float32,0x7f07d6f5,0x42b19088,2 +np.float32,0x79d7e4,0x79d7e4,2 +np.float32,0xff32f6a0,0xc2b21db1,2 +np.float32,0x7c005c05,0x42a9222e,2 +np.float32,0x3ec449aa,0x3ebfc5ae,2 +np.float32,0x800ec323,0x800ec323,2 +np.float32,0xff1c904c,0xc2b1d93a,2 +np.float32,0x7f4eca52,0x42b267b0,2 +np.float32,0x3ee06540,0x3ed9c514,2 +np.float32,0x6aab4,0x6aab4,2 +np.float32,0x3e298d8c,0x3e28c99e,2 +np.float32,0xbf38d162,0xbf2ba94a,2 +np.float32,0x2d9083,0x2d9083,2 +np.float32,0x7eae5032,0x42b0ad52,2 +np.float32,0x3ead5b3c,0x3eaa3443,2 +np.float32,0x806fef66,0x806fef66,2 +np.float32,0x3f5b614e,0x3f46ca71,2 +np.float32,0xbf4c906a,0xbf3b60fc,2 +np.float32,0x8049453e,0x8049453e,2 +np.float32,0x3d305220,0x3d304432,2 +np.float32,0x2e1a89,0x2e1a89,2 +np.float32,0xbf4e74ec,0xbf3cdacf,2 +np.float32,0x807a827a,0x807a827a,2 +np.float32,0x80070745,0x80070745,2 +np.float32,0xbe1ba2fc,0xbe1b0b28,2 +np.float32,0xbe5131d0,0xbe4fc421,2 +np.float32,0x5bfd98,0x5bfd98,2 +np.float32,0xbd8e1a48,0xbd8dfd27,2 +np.float32,0x8006c160,0x8006c160,2 +np.float32,0x346490,0x346490,2 +np.float32,0xbdbdf060,0xbdbdaaf0,2 +np.float32,0x3ea9d0c4,0x3ea6d8c7,2 +np.float32,0xbf2aaa28,0xbf200916,2 +np.float32,0xbf160c26,0xbf0e9047,2 +np.float32,0x80081fd4,0x80081fd4,2 +np.float32,0x7db44283,0x42adf8b6,2 +np.float32,0xbf1983f8,0xbf118bf5,2 +np.float32,0x2c4a35,0x2c4a35,2 +np.float32,0x6165a7,0x6165a7,2 +np.float32,0xbe776b44,0xbe75129f,2 +np.float32,0xfe81841a,0xc2b0153b,2 +np.float32,0xbf7d1b2f,0xbf5f9461,2 +np.float32,0x80602d36,0x80602d36,2 +np.float32,0xfe8d5046,0xc2b041dd,2 +np.float32,0xfe5037bc,0xc2afa56d,2 +np.float32,0x4bbea6,0x4bbea6,2 +np.float32,0xfea039de,0xc2b0822d,2 +np.float32,0x7ea627a4,0x42b094c7,2 +np.float32,0x3f556198,0x3f423591,2 +np.float32,0xfedbae04,0xc2b123c1,2 +np.float32,0xbe30432c,0xbe2f6744,2 +np.float32,0x80202c77,0x80202c77,2 +np.float32,0xff335cc1,0xc2b21ed5,2 +np.float32,0x3e1e1ebe,0x3e1d7f95,2 +np.float32,0x8021c9c0,0x8021c9c0,2 +np.float32,0x7dc978,0x7dc978,2 +np.float32,0xff6cfabc,0xc2b2ad75,2 +np.float32,0x7f2bd542,0x42b208e0,2 +np.float32,0x53bf33,0x53bf33,2 +np.float32,0x804e04bb,0x804e04bb,2 +np.float32,0x3f30d2f9,0x3f2521ca,2 +np.float32,0x3dfde876,0x3dfd4316,2 +np.float32,0x46f8b1,0x46f8b1,2 +np.float32,0xbd5f9e20,0xbd5f81ba,2 +np.float32,0x807d6a22,0x807d6a22,2 +np.float32,0xff3881da,0xc2b22d50,2 +np.float32,0x1b1cb5,0x1b1cb5,2 +np.float32,0x3f75f2d0,0x3f5a7435,2 +np.float32,0xfee39c1a,0xc2b135e9,2 +np.float32,0x7f79f14a,0x42b2c8b9,2 +np.float32,0x8000e2d1,0x8000e2d1,2 +np.float32,0xab779,0xab779,2 +np.float32,0xbede6690,0xbed7f102,2 +np.float32,0x76e20d,0x76e20d,2 +np.float32,0x3ed714cb,0x3ed135e9,2 +np.float32,0xbeaa6f44,0xbea76f31,2 +np.float32,0x7f7dc8b1,0x42b2d089,2 +np.float32,0x108cb2,0x108cb2,2 +np.float32,0x7d37ba82,0x42ac9f94,2 +np.float32,0x3f31d068,0x3f25f221,2 +np.float32,0x8010a331,0x8010a331,2 +np.float32,0x3f2fdc7c,0x3f2456cd,2 +np.float32,0x7f7a9a67,0x42b2ca13,2 +np.float32,0x3f2acb31,0x3f202492,2 +np.float32,0x7f54fa94,0x42b276c9,2 +np.float32,0x3ebf8a70,0x3ebb553c,2 +np.float32,0x7f75b1a7,0x42b2bff2,2 +np.float32,0x7daebe07,0x42ade8cc,2 +np.float32,0xbd3a3ef0,0xbd3a2e86,2 +np.float32,0x8078ec9e,0x8078ec9e,2 +np.float32,0x3eda206a,0x3ed403ec,2 +np.float32,0x3f7248f2,0x3f57cd77,2 +np.float32,0x805d55ba,0x805d55ba,2 +np.float32,0xff30dc3e,0xc2b217a3,2 +np.float32,0xbe12b27c,0xbe123333,2 +np.float32,0xbf6ed9cf,0xbf554cd0,2 +np.float32,0xbed9eb5c,0xbed3d31c,2 +np.float32,0xbf1c9aea,0xbf14307b,2 +np.float32,0x3f540ac4,0x3f412de2,2 +np.float32,0x800333ac,0x800333ac,2 +np.float32,0x3f74cdb4,0x3f59a09a,2 +np.float32,0xbf41dc41,0xbf32ee6f,2 +np.float32,0xff2c7804,0xc2b20ac4,2 +np.float32,0x514493,0x514493,2 +np.float32,0xbddf1220,0xbddea1cf,2 +np.float32,0xfeaf74de,0xc2b0b0ab,2 +np.float32,0xfe5dfb30,0xc2afc633,2 +np.float32,0xbf4785c4,0xbf376bdb,2 +np.float32,0x80191cd3,0x80191cd3,2 +np.float32,0xfe44f708,0xc2af88fb,2 +np.float32,0x3d4cd8a0,0x3d4cc2ca,2 +np.float32,0x7f572eff,0x42b27c0f,2 +np.float32,0x8031bacb,0x8031bacb,2 +np.float32,0x7f2ea684,0x42b21133,2 +np.float32,0xbea1976a,0xbe9f05bb,2 +np.float32,0x3d677b41,0x3d675bc1,2 +np.float32,0x3f61bf24,0x3f4b9870,2 +np.float32,0x7ef55ddf,0x42b15c5f,2 +np.float32,0x3eabcb20,0x3ea8b91c,2 +np.float32,0xff73d9ec,0xc2b2bc18,2 +np.float32,0x77b9f5,0x77b9f5,2 +np.float32,0x4c6c6c,0x4c6c6c,2 +np.float32,0x7ed09c94,0x42b10949,2 +np.float32,0xdeeec,0xdeeec,2 +np.float32,0x7eac5858,0x42b0a782,2 +np.float32,0x7e190658,0x42af07bd,2 +np.float32,0xbe3c8980,0xbe3b7ce2,2 +np.float32,0x8059e86e,0x8059e86e,2 +np.float32,0xff201836,0xc2b1e4a5,2 +np.float32,0xbeac109c,0xbea8fafb,2 +np.float32,0x7edd1e2b,0x42b12718,2 +np.float32,0x639cd8,0x639cd8,2 +np.float32,0x3f5e4cae,0x3f490059,2 +np.float32,0x3d84c185,0x3d84a9c4,2 +np.float32,0xbe8c1130,0xbe8a605b,2 +np.float32,0x80000000,0x80000000,2 +np.float32,0x3f1da5e4,0x3f151404,2 +np.float32,0x7f75a873,0x42b2bfdf,2 +np.float32,0xbd873540,0xbd871c28,2 +np.float32,0xbe8e5e10,0xbe8c9808,2 +np.float32,0x7f004bf2,0x42b17347,2 +np.float32,0x800000,0x800000,2 +np.float32,0xbf6d6b79,0xbf544095,2 +np.float32,0x7ed7b563,0x42b11a6a,2 +np.float32,0x80693745,0x80693745,2 +np.float32,0x3ee0f608,0x3eda49a8,2 +np.float32,0xfe1285a4,0xc2aef181,2 +np.float32,0x72d946,0x72d946,2 +np.float32,0x6a0dca,0x6a0dca,2 +np.float32,0x3f5c9df6,0x3f47ba99,2 +np.float32,0xff002af6,0xc2b172c4,2 +np.float32,0x3f4ac98f,0x3f39fd0a,2 +np.float32,0x8066acf7,0x8066acf7,2 +np.float32,0xbcaa4e60,0xbcaa4b3c,2 +np.float32,0x80162813,0x80162813,2 +np.float32,0xff34b318,0xc2b222a2,2 +np.float32,0x7f1ce33c,0x42b1da49,2 +np.float32,0x3f0e55ab,0x3f07ddb0,2 +np.float32,0x7c75d996,0x42aa6eec,2 +np.float32,0xbf221bc6,0xbf18dc89,2 +np.float32,0x3f5a1a4c,0x3f45d1d4,2 +np.float32,0x7f2451b8,0x42b1f1fb,2 +np.float32,0x3ec55ca0,0x3ec0c655,2 +np.float32,0x3f752dc2,0x3f59e600,2 +np.float32,0xbe33f638,0xbe330c4d,2 +np.float32,0x3e2a9148,0x3e29c9d8,2 +np.float32,0x3f3362a1,0x3f273c01,2 +np.float32,0x5f83b3,0x5f83b3,2 +np.float32,0x3e362488,0x3e353216,2 +np.float32,0x140bcf,0x140bcf,2 +np.float32,0x7e3e96df,0x42af7822,2 +np.float32,0xbebc7082,0xbeb86ce6,2 +np.float32,0xbe92a92e,0xbe90b9d2,2 +np.float32,0xff3d8afc,0xc2b23b19,2 +np.float32,0x804125e3,0x804125e3,2 +np.float32,0x3f3675d1,0x3f29bedb,2 +np.float32,0xff70bb09,0xc2b2b57f,2 +np.float32,0x3f29681c,0x3f1efcd2,2 +np.float32,0xbdc70380,0xbdc6b3a8,2 +np.float32,0x54e0dd,0x54e0dd,2 +np.float32,0x3d545de0,0x3d54458c,2 +np.float32,0x7f800000,0x7f800000,2 +np.float32,0x8014a4c2,0x8014a4c2,2 +np.float32,0xbe93f58a,0xbe91f938,2 +np.float32,0x17de33,0x17de33,2 +np.float32,0xfefb679a,0xc2b168d2,2 +np.float32,0xbf23423e,0xbf19d511,2 +np.float32,0x7e893fa1,0x42b032ec,2 +np.float32,0x3f44fe2d,0x3f356bda,2 +np.float32,0xbebb2e78,0xbeb73e8f,2 +np.float32,0x3f5632e0,0x3f42d633,2 +np.float32,0x3ddd8698,0x3ddd1896,2 +np.float32,0x80164ea7,0x80164ea7,2 +np.float32,0x80087b37,0x80087b37,2 +np.float32,0xbf06ab1e,0xbf011f95,2 +np.float32,0x3db95524,0x3db9149f,2 +np.float32,0x7aa1fbb3,0x42a570a1,2 +np.float32,0xbd84fc48,0xbd84e467,2 +np.float32,0x3d65c6f5,0x3d65a826,2 +np.float32,0xfe987800,0xc2b068c4,2 +np.float32,0x7ec59532,0x42b0ed7a,2 +np.float32,0x3ea0232c,0x3e9da29a,2 +np.float32,0x80292a08,0x80292a08,2 +np.float32,0x734cfe,0x734cfe,2 +np.float32,0x3f3b6d63,0x3f2dc596,2 +np.float32,0x3f27bcc1,0x3f1d97e6,2 +np.float32,0xfe1da554,0xc2af16f9,2 +np.float32,0x7c91f5,0x7c91f5,2 +np.float32,0xfe4e78cc,0xc2afa11e,2 +np.float32,0x7e4b4e08,0x42af9933,2 +np.float32,0xfe0949ec,0xc2aed02e,2 +np.float32,0x7e2f057f,0x42af4c81,2 +np.float32,0xbf200ae0,0xbf171ce1,2 +np.float32,0x3ebcc244,0x3eb8b99e,2 +np.float32,0xbf68f58d,0xbf50f7aa,2 +np.float32,0x4420b1,0x4420b1,2 +np.float32,0x3f5b61bf,0x3f46cac7,2 +np.float32,0x3fec78,0x3fec78,2 +np.float32,0x7f4183c8,0x42b245b7,2 +np.float32,0xbf10587c,0xbf099ee2,2 +np.float32,0x0,0x0,2 +np.float32,0x7ec84dc3,0x42b0f47a,2 +np.float32,0x3f5fbd7b,0x3f4a166d,2 +np.float32,0xbd884eb8,0xbd883502,2 +np.float32,0xfe3f10a4,0xc2af7969,2 +np.float32,0xff3f4920,0xc2b23fc9,2 +np.float32,0x8013900f,0x8013900f,2 +np.float32,0x8003529d,0x8003529d,2 +np.float32,0xbf032384,0xbefbfb3c,2 +np.float32,0xff418c7c,0xc2b245ce,2 +np.float32,0xbec0aad0,0xbebc633b,2 +np.float32,0xfdbff178,0xc2ae18de,2 +np.float32,0x68ab15,0x68ab15,2 +np.float32,0xbdfc4a88,0xbdfba848,2 +np.float32,0xbf5adec6,0xbf466747,2 +np.float32,0x807d5dcc,0x807d5dcc,2 +np.float32,0x61d144,0x61d144,2 +np.float32,0x807e3a03,0x807e3a03,2 +np.float32,0x1872f2,0x1872f2,2 +np.float32,0x7f2a272c,0x42b203d8,2 +np.float32,0xfe7f8314,0xc2b00e3a,2 +np.float32,0xbe42aeac,0xbe418737,2 +np.float32,0x8024b614,0x8024b614,2 +np.float32,0xbe41b6b8,0xbe40939a,2 +np.float32,0xa765c,0xa765c,2 +np.float32,0x7ea74f4b,0x42b09853,2 +np.float32,0x7f7ef631,0x42b2d2e7,2 +np.float32,0x7eaef5e6,0x42b0af38,2 +np.float32,0xff733d85,0xc2b2bacf,2 +np.float32,0x537ac0,0x537ac0,2 +np.float32,0xbeca4790,0xbec55b1d,2 +np.float32,0x80117314,0x80117314,2 +np.float32,0xfe958536,0xc2b05ec5,2 +np.float32,0x8066ecc2,0x8066ecc2,2 +np.float32,0xbf56baf3,0xbf433e82,2 +np.float32,0x1f7fd7,0x1f7fd7,2 +np.float32,0x3e942104,0x3e9222fc,2 +np.float32,0xfeaffe82,0xc2b0b23c,2 +np.float32,0xfe0e02b0,0xc2aee17e,2 +np.float32,0xbf800000,0xbf61a1b3,2 +np.float32,0x800b7e49,0x800b7e49,2 +np.float32,0x6c514f,0x6c514f,2 +np.float32,0xff800000,0xff800000,2 +np.float32,0x7f7d9a45,0x42b2d02b,2 +np.float32,0x800c9c69,0x800c9c69,2 +np.float32,0x274b14,0x274b14,2 +np.float32,0xbf4b22b0,0xbf3a42e2,2 +np.float32,0x63e5ae,0x63e5ae,2 +np.float32,0xbe18facc,0xbe186a90,2 +np.float32,0x7e137351,0x42aef4bd,2 +np.float32,0x80518ffd,0x80518ffd,2 +np.float32,0xbf0a8ffc,0xbf048f0d,2 +np.float32,0x841d,0x841d,2 +np.float32,0x7edfdc9e,0x42b12d69,2 +np.float32,0xfd1092b0,0xc2ac24de,2 +np.float32,0x7e2c9bdf,0x42af4566,2 +np.float32,0x7f7fffff,0x42b2d4fc,2 +np.float32,0x3f4954a6,0x3f38d853,2 +np.float32,0xbe83efd2,0xbe8284c3,2 +np.float32,0x800e8e02,0x800e8e02,2 +np.float32,0x78ad39,0x78ad39,2 +np.float32,0x7eb0f967,0x42b0b514,2 +np.float32,0xbe39aa94,0xbe38a9ee,2 +np.float32,0x80194e7b,0x80194e7b,2 +np.float32,0x3cf3a340,0x3cf39a0f,2 +np.float32,0x3ed3117a,0x3ecd8173,2 +np.float32,0x7f530b11,0x42b2721c,2 +np.float32,0xff756ba2,0xc2b2bf60,2 +np.float32,0x15ea25,0x15ea25,2 +np.float32,0x803cbb64,0x803cbb64,2 +np.float32,0x3f34722d,0x3f281a2c,2 +np.float32,0x3ddd88e0,0x3ddd1adb,2 +np.float32,0x3f54244c,0x3f41418b,2 +np.float32,0x3e0adb98,0x3e0a6f8b,2 +np.float32,0x80800000,0x80800000,2 +np.float32,0x58902b,0x58902b,2 +np.float32,0xfe3b50b8,0xc2af6f43,2 +np.float32,0xfe0846d0,0xc2aecc64,2 +np.float32,0xbe0299d0,0xbe023fd4,2 +np.float32,0x18dde6,0x18dde6,2 +np.float32,0x8039fe8b,0x8039fe8b,2 +np.float32,0x8015d179,0x8015d179,2 +np.float32,0x3f551322,0x3f41f947,2 +np.float32,0x2ab387,0x2ab387,2 +np.float32,0xbf7e311e,0xbf6059d0,2 +np.float32,0xbdba58a8,0xbdba1713,2 +np.float32,0xbf1d008a,0xbf148724,2 +np.float32,0xbf6b9c97,0xbf52ec98,2 +np.float32,0x802acf04,0x802acf04,2 +np.float32,0x1,0x1,2 +np.float32,0xbe9e16d6,0xbe9bade3,2 +np.float32,0xbf048a14,0xbefe78c7,2 +np.float32,0x7e432ad3,0x42af8449,2 +np.float32,0xbdcc7fe0,0xbdcc2944,2 +np.float32,0x6dfc27,0x6dfc27,2 +np.float32,0xfef6eed8,0xc2b15fa1,2 +np.float32,0xbeeff6e8,0xbee7f2e4,2 +np.float32,0x7e3a6ca8,0x42af6cd2,2 +np.float32,0xff2c82e8,0xc2b20ae4,2 +np.float32,0x3e9f8d74,0x3e9d13b0,2 +np.float32,0x7ea36191,0x42b08c29,2 +np.float32,0x7f734bed,0x42b2baed,2 +np.float32,0x7f2df96d,0x42b20f37,2 +np.float32,0x5036fd,0x5036fd,2 +np.float32,0x806eab38,0x806eab38,2 +np.float32,0xbe9db90e,0xbe9b5446,2 +np.float32,0xfeef6fac,0xc2b14fd9,2 +np.float32,0xc2bf7,0xc2bf7,2 +np.float32,0xff53ec3d,0xc2b2743d,2 +np.float32,0x7e837637,0x42b01cde,2 +np.float32,0xbefb5934,0xbef23662,2 +np.float32,0x3f6cec80,0x3f53e371,2 +np.float32,0x3e86e7de,0x3e85643f,2 +np.float32,0x3f09cb42,0x3f03e1ef,2 +np.float32,0xbec3d236,0xbebf5620,2 +np.float32,0xfedef246,0xc2b12b50,2 +np.float32,0xbf08d6a8,0xbf030a62,2 +np.float32,0x8036cbf9,0x8036cbf9,2 +np.float32,0x3f74d3e3,0x3f59a512,2 +np.float32,0x6a600c,0x6a600c,2 +np.float32,0xfd1295b0,0xc2ac2bf1,2 +np.float32,0xbeb61142,0xbeb26efa,2 +np.float32,0x80216556,0x80216556,2 +np.float32,0xbf1fa0f6,0xbf16c30a,2 +np.float32,0x3e0af8e1,0x3e0a8c90,2 +np.float32,0x80434709,0x80434709,2 +np.float32,0x49efd9,0x49efd9,2 +np.float32,0x7f7cce6c,0x42b2ce8f,2 +np.float32,0x6e5450,0x6e5450,2 +np.float32,0x7f0fc115,0x42b1ad86,2 +np.float32,0x632db0,0x632db0,2 +np.float32,0x3f6f4c2a,0x3f55a064,2 +np.float32,0x7ec4f273,0x42b0ebd3,2 +np.float32,0x61ae1e,0x61ae1e,2 +np.float32,0x5f47c4,0x5f47c4,2 +np.float32,0xbf3c8f62,0xbf2eaf54,2 +np.float32,0xfca38900,0xc2ab0113,2 +np.float32,0x3ec89d52,0x3ec3ce78,2 +np.float32,0xbe0e3f70,0xbe0dcb53,2 +np.float32,0x805d3156,0x805d3156,2 +np.float32,0x3eee33f8,0x3ee65a4e,2 +np.float32,0xbeda7e9a,0xbed45a90,2 +np.float32,0x7e2fac7b,0x42af4e69,2 +np.float32,0x7efd0e28,0x42b16c2c,2 +np.float32,0x3f0c7b17,0x3f063e46,2 +np.float32,0xbf395bec,0xbf2c198f,2 +np.float32,0xfdf1c3f8,0xc2ae8f05,2 +np.float32,0xbe11f4e4,0xbe117783,2 +np.float32,0x7eddc901,0x42b128a3,2 +np.float32,0x3f4bad09,0x3f3aaf33,2 +np.float32,0xfefb5d76,0xc2b168bd,2 +np.float32,0x3ed3a4cf,0x3ece09a3,2 +np.float32,0x7ec582e4,0x42b0ed4a,2 +np.float32,0x3dc2268a,0x3dc1dc64,2 +np.float32,0x3ef9b17c,0x3ef0b9c9,2 +np.float32,0x2748ac,0x2748ac,2 +np.float32,0xfed6a602,0xc2b117e4,2 +np.float32,0xbefc9c36,0xbef35832,2 +np.float32,0x7e0476,0x7e0476,2 +np.float32,0x804be1a0,0x804be1a0,2 +np.float32,0xbefbc1c2,0xbef2943a,2 +np.float32,0xbd4698f0,0xbd46850a,2 +np.float32,0x688627,0x688627,2 +np.float32,0x3f7f7685,0x3f61406f,2 +np.float32,0x827fb,0x827fb,2 +np.float32,0x3f503264,0x3f3e34fd,2 +np.float32,0x7f5458d1,0x42b27543,2 +np.float32,0x800ac01f,0x800ac01f,2 +np.float32,0x6188dd,0x6188dd,2 +np.float32,0x806ac0ba,0x806ac0ba,2 +np.float32,0xbe14493c,0xbe13c5cc,2 +np.float32,0x3f77542c,0x3f5b72ae,2 +np.float32,0xfeaacab6,0xc2b0a2df,2 +np.float32,0x7f2893d5,0x42b1ff15,2 +np.float32,0x66b528,0x66b528,2 +np.float32,0xbf653e24,0xbf4e3573,2 +np.float32,0x801a2853,0x801a2853,2 +np.float32,0x3f3d8c98,0x3f2f7b04,2 +np.float32,0xfdffbad8,0xc2aeabc5,2 +np.float32,0x3dd50f,0x3dd50f,2 +np.float32,0x3f325a4c,0x3f266353,2 +np.float32,0xfcc48ec0,0xc2ab5f3f,2 +np.float32,0x3e6f5b9a,0x3e6d3ae5,2 +np.float32,0x3dbcd62b,0x3dbc91ee,2 +np.float32,0xbf7458d9,0xbf594c1c,2 +np.float32,0xff5adb24,0xc2b284b9,2 +np.float32,0x807b246d,0x807b246d,2 +np.float32,0x3f800000,0x3f61a1b3,2 +np.float32,0x231a28,0x231a28,2 +np.float32,0xbdc66258,0xbdc61341,2 +np.float32,0x3c84b4b4,0x3c84b338,2 +np.float32,0xbf215894,0xbf183783,2 +np.float32,0xff4ee298,0xc2b267ec,2 +np.float32,0x801ef52e,0x801ef52e,2 +np.float32,0x1040b0,0x1040b0,2 +np.float32,0xff545582,0xc2b2753b,2 +np.float32,0x3f3b9dda,0x3f2decaf,2 +np.float32,0x730f99,0x730f99,2 +np.float32,0xff7fffff,0xc2b2d4fc,2 +np.float32,0xff24cc5e,0xc2b1f379,2 +np.float32,0xbe9b456a,0xbe98fc0b,2 +np.float32,0x188fb,0x188fb,2 +np.float32,0x3f5c7ce2,0x3f47a18a,2 +np.float32,0x7fc00000,0x7fc00000,2 +np.float32,0x806ea4da,0x806ea4da,2 +np.float32,0xfe810570,0xc2b01345,2 +np.float32,0x8036af89,0x8036af89,2 +np.float32,0x8043cec6,0x8043cec6,2 +np.float32,0x80342bb3,0x80342bb3,2 +np.float32,0x1a2bd4,0x1a2bd4,2 +np.float32,0x3f6248c2,0x3f4bff9a,2 +np.float32,0x8024eb35,0x8024eb35,2 +np.float32,0x7ea55872,0x42b09247,2 +np.float32,0x806d6e56,0x806d6e56,2 +np.float32,0x25c21a,0x25c21a,2 +np.float32,0x3f4e95f3,0x3f3cf483,2 +np.float32,0x15ca38,0x15ca38,2 +np.float32,0x803f01b2,0x803f01b2,2 +np.float32,0xbe731634,0xbe70dc10,2 +np.float32,0x3e80cee4,0x3e7ef933,2 +np.float32,0x3ef6dda5,0x3eee2e7b,2 +np.float32,0x3f3dfdc2,0x3f2fd5ed,2 +np.float32,0xff0492a7,0xc2b18411,2 +np.float32,0xbf1d0adf,0xbf148ff3,2 +np.float32,0xfcf75460,0xc2abd4e3,2 +np.float32,0x3f46fca6,0x3f36ffa6,2 +np.float32,0xbe63b5c0,0xbe61dfb3,2 +np.float32,0xff019bec,0xc2b1787d,2 +np.float32,0x801f14a9,0x801f14a9,2 +np.float32,0x3f176cfa,0x3f0fc051,2 +np.float32,0x3f69d976,0x3f51a015,2 +np.float32,0x3f4917cb,0x3f38a87a,2 +np.float32,0x3b2a0bea,0x3b2a0bdd,2 +np.float32,0xbf41d857,0xbf32eb50,2 +np.float32,0xbf08841a,0xbf02c18f,2 +np.float32,0x7ec86f14,0x42b0f4d0,2 +np.float32,0xbf7d15d1,0xbf5f9090,2 +np.float32,0xbd080550,0xbd07feea,2 +np.float32,0xbf6f1bef,0xbf557d26,2 +np.float32,0xfebc282c,0xc2b0d473,2 +np.float32,0x3e68d2f5,0x3e66dd03,2 +np.float32,0x3f3ed8fe,0x3f3085d5,2 +np.float32,0xff2f78ae,0xc2b2139a,2 +np.float32,0xff647a70,0xc2b29ac1,2 +np.float32,0xfd0859a0,0xc2ac06e2,2 +np.float32,0x3ea578a8,0x3ea2b7e1,2 +np.float32,0x6c58c6,0x6c58c6,2 +np.float32,0xff23f26a,0xc2b1f0d2,2 +np.float32,0x800902a4,0x800902a4,2 +np.float32,0xfe8ba64e,0xc2b03bcd,2 +np.float32,0x3f091143,0x3f033e0f,2 +np.float32,0x8017c4bd,0x8017c4bd,2 +np.float32,0xbf708fd4,0xbf568c8c,2 +np.float32,0x3be1d8,0x3be1d8,2 +np.float32,0x80091f07,0x80091f07,2 +np.float32,0x68eabe,0x68eabe,2 +np.float32,0xfe9ab2c8,0xc2b07033,2 +np.float32,0x3eabe752,0x3ea8d3d7,2 +np.float32,0xbf7adcb2,0xbf5dfaf5,2 +np.float32,0x801ecc01,0x801ecc01,2 +np.float32,0xbf5570a9,0xbf424123,2 +np.float32,0x3e89eecd,0x3e88510e,2 +np.float32,0xfeb2feee,0xc2b0bae4,2 +np.float32,0xbeb25ec2,0xbeaef22b,2 +np.float32,0x201e49,0x201e49,2 +np.float32,0x800a35f6,0x800a35f6,2 +np.float32,0xbf02d449,0xbefb6e2a,2 +np.float32,0x3f062bea,0x3f00aef6,2 +np.float32,0x7f5219ff,0x42b26fd2,2 +np.float32,0xbd4561d0,0xbd454e47,2 +np.float32,0x3f6c4789,0x3f536a4b,2 +np.float32,0x7f58b06d,0x42b27fa1,2 +np.float32,0x7f132f39,0x42b1b999,2 +np.float32,0x3e05dcb4,0x3e057bd8,2 +np.float32,0x7f526045,0x42b2707d,2 +np.float32,0x3f6117d0,0x3f4b1adb,2 +np.float32,0xbf21f47d,0xbf18bb57,2 +np.float32,0x1a26d6,0x1a26d6,2 +np.float32,0x46b114,0x46b114,2 +np.float32,0x3eb24518,0x3eaed9ef,2 +np.float32,0xfe2139c8,0xc2af2278,2 +np.float32,0xbf7c36fb,0xbf5ef1f6,2 +np.float32,0x3f193834,0x3f114af7,2 +np.float32,0xff3ea650,0xc2b23e14,2 +np.float32,0xfeeb3bca,0xc2b146c7,2 +np.float32,0x7e8b8ca0,0x42b03b6f,2 +np.float32,0x3eed903d,0x3ee5c5d2,2 +np.float32,0xbdc73740,0xbdc6e72a,2 +np.float32,0x7e500307,0x42afa4ec,2 +np.float32,0xe003c,0xe003c,2 +np.float32,0x3e612bb4,0x3e5f64fd,2 +np.float32,0xfd81e248,0xc2ad50e6,2 +np.float32,0x766a4f,0x766a4f,2 +np.float32,0x3e8708c9,0x3e858414,2 +np.float32,0xbf206c58,0xbf176f7f,2 +np.float32,0x7e93aeb0,0x42b0586f,2 +np.float32,0xfd9d36b8,0xc2adb2ad,2 +np.float32,0xff1f4e0e,0xc2b1e21d,2 +np.float32,0x3f22bd5a,0x3f1964f8,2 +np.float32,0x7f6a517a,0x42b2a7ad,2 +np.float32,0xff6ca773,0xc2b2acc1,2 +np.float32,0x7f6bf453,0x42b2ab3d,2 +np.float32,0x3edfdd64,0x3ed9489f,2 +np.float32,0xbeafc5ba,0xbeac7daa,2 +np.float32,0x7d862039,0x42ad615b,2 +np.float32,0xbe9d2002,0xbe9ac1fc,2 +np.float32,0xbdcc54c0,0xbdcbfe5b,2 +np.float32,0xbf1bc0aa,0xbf13762a,2 +np.float32,0xbf4679ce,0xbf36984b,2 +np.float32,0x3ef45696,0x3eebe713,2 +np.float32,0xff6eb999,0xc2b2b137,2 +np.float32,0xbe4b2e4c,0xbe49dee8,2 +np.float32,0x3f498951,0x3f3901b7,2 +np.float32,0xbe9692f4,0xbe947be1,2 +np.float32,0xbf44ce26,0xbf3545c8,2 +np.float32,0x805787a8,0x805787a8,2 +np.float32,0xbf342650,0xbf27dc26,2 +np.float32,0x3edafbf0,0x3ed4cdd2,2 +np.float32,0x3f6fb858,0x3f55ef63,2 +np.float32,0xff227d0a,0xc2b1ec3f,2 +np.float32,0xfeb9a202,0xc2b0cd89,2 +np.float32,0x7f5b12c1,0x42b2853b,2 +np.float32,0x584578,0x584578,2 +np.float32,0x7ec0b76f,0x42b0e0b5,2 +np.float32,0x3f57f54b,0x3f442f10,2 +np.float32,0x7eef3620,0x42b14f5d,2 +np.float32,0x4525b5,0x4525b5,2 +np.float32,0x801bd407,0x801bd407,2 +np.float32,0xbed1f166,0xbecc7703,2 +np.float32,0x3f57e732,0x3f442449,2 +np.float32,0x80767cd5,0x80767cd5,2 +np.float32,0xbef1a7d2,0xbee97aa3,2 +np.float32,0x3dd5b1af,0x3dd54ee6,2 +np.float32,0x960c,0x960c,2 +np.float32,0x7c392d41,0x42a9ddd1,2 +np.float32,0x3f5c9a34,0x3f47b7c1,2 +np.float32,0x3f5cecee,0x3f47f667,2 +np.float32,0xbee482ce,0xbedd8899,2 +np.float32,0x8066ba7e,0x8066ba7e,2 +np.float32,0x7ed76127,0x42b119a2,2 +np.float32,0x805ca40b,0x805ca40b,2 +np.float32,0x7f5ed5d1,0x42b28df3,2 +np.float32,0xfe9e1b1e,0xc2b07b5b,2 +np.float32,0x3f0201a2,0x3ef9f6c4,2 +np.float32,0xbf2e6430,0xbf232039,2 +np.float32,0x80326b4d,0x80326b4d,2 +np.float32,0x3f11dc7c,0x3f0af06e,2 +np.float32,0xbe89c42e,0xbe8827e6,2 +np.float32,0x3f3c69f8,0x3f2e9133,2 +np.float32,0x806326a9,0x806326a9,2 +np.float32,0x3f1c5286,0x3f13f2b6,2 +np.float32,0xff5c0ead,0xc2b28786,2 +np.float32,0xff32b952,0xc2b21d01,2 +np.float32,0x7dd27c4e,0x42ae4815,2 +np.float32,0xbf7a6816,0xbf5da7a2,2 +np.float32,0xfeac72f8,0xc2b0a7d1,2 +np.float32,0x335ad7,0x335ad7,2 +np.float32,0xbe682da4,0xbe663bcc,2 +np.float32,0x3f2df244,0x3f22c208,2 +np.float32,0x80686e8e,0x80686e8e,2 +np.float32,0x7f50120f,0x42b26ad9,2 +np.float32,0x3dbc596a,0x3dbc15b3,2 +np.float32,0xbf4f2868,0xbf3d666d,2 +np.float32,0x80000001,0x80000001,2 +np.float32,0xff66c059,0xc2b29fd2,2 +np.float32,0xfe8bbcaa,0xc2b03c1f,2 +np.float32,0x3ece6a51,0x3ec93271,2 +np.float32,0x7f06cd26,0x42b18c9a,2 +np.float32,0x7e41e6dc,0x42af80f5,2 +np.float32,0x7d878334,0x42ad669f,2 +np.float32,0xfe8c5c4c,0xc2b03e67,2 +np.float32,0x337a05,0x337a05,2 +np.float32,0x3e63801d,0x3e61ab58,2 +np.float32,0x62c315,0x62c315,2 +np.float32,0x802aa888,0x802aa888,2 +np.float32,0x80038b43,0x80038b43,2 +np.float32,0xff5c1271,0xc2b2878f,2 +np.float32,0xff4184a5,0xc2b245b9,2 +np.float32,0x7ef58f4b,0x42b15cc6,2 +np.float32,0x7f42d8ac,0x42b2493a,2 +np.float32,0x806609f2,0x806609f2,2 +np.float32,0x801e763b,0x801e763b,2 +np.float32,0x7f2bc073,0x42b208a2,2 +np.float32,0x801d7d7f,0x801d7d7f,2 +np.float32,0x7d415dc1,0x42acb9c2,2 +np.float32,0xbf624ff9,0xbf4c0502,2 +np.float32,0xbf603afd,0xbf4a74e2,2 +np.float32,0x8007fe42,0x8007fe42,2 +np.float32,0x800456db,0x800456db,2 +np.float32,0x620871,0x620871,2 +np.float32,0x3e9c6c1e,0x3e9a15fa,2 +np.float32,0x4245d,0x4245d,2 +np.float32,0x8035bde9,0x8035bde9,2 +np.float32,0xbf597418,0xbf45533c,2 +np.float32,0x3c730f80,0x3c730d38,2 +np.float32,0x3f7cd8ed,0x3f5f6540,2 +np.float32,0x807e49c3,0x807e49c3,2 +np.float32,0x3d6584c0,0x3d65660c,2 +np.float32,0xff42a744,0xc2b248b8,2 +np.float32,0xfedc6f56,0xc2b12583,2 +np.float32,0x806263a4,0x806263a4,2 +np.float32,0x175a17,0x175a17,2 +np.float32,0x3f1e8537,0x3f15d208,2 +np.float32,0x4055b5,0x4055b5,2 +np.float32,0x438aa6,0x438aa6,2 +np.float32,0x8038507f,0x8038507f,2 +np.float32,0xbed75348,0xbed16f85,2 +np.float32,0x7f07b7d6,0x42b19012,2 +np.float32,0xfe8b9d30,0xc2b03bac,2 +np.float32,0x805c501c,0x805c501c,2 +np.float32,0x3ef22b1d,0x3ee9f159,2 +np.float32,0x802b6759,0x802b6759,2 +np.float32,0x45281a,0x45281a,2 +np.float32,0xbf7e9970,0xbf60a3cf,2 +np.float32,0xbf14d152,0xbf0d8062,2 +np.float32,0x3d9ff950,0x3d9fcfc8,2 +np.float32,0x7865d9,0x7865d9,2 +np.float32,0xbee67fa4,0xbedf58eb,2 +np.float32,0x7dc822d1,0x42ae2e44,2 +np.float32,0x3f3af0fe,0x3f2d612c,2 +np.float32,0xbefea106,0xbef5274e,2 +np.float32,0xbf758a3f,0xbf5a28c5,2 +np.float32,0xbf331bdd,0xbf270209,2 +np.float32,0x7f51c901,0x42b26f0d,2 +np.float32,0x3f67c33b,0x3f5014d8,2 +np.float32,0xbbc9d980,0xbbc9d92c,2 +np.float32,0xbc407540,0xbc40741e,2 +np.float32,0x7eed9a3c,0x42b14be9,2 +np.float32,0x1be0fe,0x1be0fe,2 +np.float32,0xbf6b4913,0xbf52af1f,2 +np.float32,0xbda8eba8,0xbda8bac6,2 +np.float32,0x8004bcea,0x8004bcea,2 +np.float32,0xff6f6afe,0xc2b2b2b3,2 +np.float32,0xbf205810,0xbf175e50,2 +np.float32,0x80651944,0x80651944,2 +np.float32,0xbec73016,0xbec27a3f,2 +np.float32,0x5701b9,0x5701b9,2 +np.float32,0xbf1062ce,0xbf09a7df,2 +np.float32,0x3e0306ae,0x3e02abd1,2 +np.float32,0x7bfc62,0x7bfc62,2 +np.float32,0xbf48dd3c,0xbf387a6b,2 +np.float32,0x8009573e,0x8009573e,2 +np.float32,0x660a2c,0x660a2c,2 +np.float32,0xff2280da,0xc2b1ec4b,2 +np.float32,0xbf7034fe,0xbf564a54,2 +np.float32,0xbeeb448e,0xbee3b045,2 +np.float32,0xff4e949c,0xc2b2672b,2 +np.float32,0xbf3c4486,0xbf2e7309,2 +np.float32,0x7eb086d8,0x42b0b3c8,2 +np.float32,0x7eac8aca,0x42b0a817,2 +np.float32,0xfd3d2d60,0xc2acae8b,2 +np.float32,0xbf363226,0xbf2987bd,2 +np.float32,0x7f02e524,0x42b17d8c,2 +np.float32,0x8049a148,0x8049a148,2 +np.float32,0x147202,0x147202,2 +np.float32,0x8031d3f6,0x8031d3f6,2 +np.float32,0xfe78bf68,0xc2b0007d,2 +np.float32,0x7ebd16d0,0x42b0d6fb,2 +np.float32,0xbdaed2e8,0xbdae9cbb,2 +np.float32,0x802833ae,0x802833ae,2 +np.float32,0x7f62adf6,0x42b296b5,2 +np.float32,0xff2841c0,0xc2b1fe1b,2 +np.float32,0xbeb2c47e,0xbeaf523b,2 +np.float32,0x7e42a36e,0x42af82e6,2 +np.float32,0x41ea29,0x41ea29,2 +np.float32,0xbcaaa800,0xbcaaa4d7,2 +np.float64,0x3fed71f27ebae3e5,0x3fea5c6095012ca6,1 +np.float64,0x224dc392449b9,0x224dc392449b9,1 +np.float64,0x3fdf897a7d3f12f5,0x3fde620339360992,1 +np.float64,0xbfe1f99a5123f334,0xbfe124a57cfaf556,1 +np.float64,0xbfd9725c3bb2e4b8,0xbfd8d1e3f75110c7,1 +np.float64,0x3fe38977546712ee,0x3fe27d9d37f4b91f,1 +np.float64,0xbfc36c29e526d854,0xbfc3594743ee45c4,1 +np.float64,0xbfe5cbec332b97d8,0xbfe4638802316849,1 +np.float64,0x2ff35efe5fe6d,0x2ff35efe5fe6d,1 +np.float64,0x7fd3f828e227f051,0x40862a7d4a40b1e0,1 +np.float64,0xffd06fc11620df82,0xc08628ee8f1bf6c8,1 +np.float64,0x3fe5321bf4aa6438,0x3fe3e3d9fa453199,1 +np.float64,0xffd07a323ca0f464,0xc08628f3a2930f8c,1 +np.float64,0x3fdf7abe7abef57c,0x3fde54cb193d49cb,1 +np.float64,0x40941f1881285,0x40941f1881285,1 +np.float64,0xffef18defc7e31bd,0xc0863393f2c9f061,1 +np.float64,0xbfe379f871e6f3f1,0xbfe270620cb68347,1 +np.float64,0xffec829848f90530,0xc08632e210edaa2b,1 +np.float64,0x80070c00574e1801,0x80070c00574e1801,1 +np.float64,0xffce7654b23ceca8,0xc086285291e89975,1 +np.float64,0x7fc9932daa33265a,0x408626ec6cc2b807,1 +np.float64,0x355ee98c6abde,0x355ee98c6abde,1 +np.float64,0x3fac54962c38a920,0x3fac50e40b6c19f2,1 +np.float64,0x800857984af0af31,0x800857984af0af31,1 +np.float64,0x7fea6a3d55f4d47a,0x40863245bf39f179,1 +np.float64,0x3fdb8fab33371f56,0x3fdac5ffc9e1c347,1 +np.float64,0x800a887a7bf510f5,0x800a887a7bf510f5,1 +np.float64,0xbfbdbda3c63b7b48,0xbfbdac9dd5a2d3e8,1 +np.float64,0xbfd4a2457b29448a,0xbfd44acb3b316d6d,1 +np.float64,0x7fd5329a502a6534,0x40862af789b528b5,1 +np.float64,0x3fd96a7bceb2d4f8,0x3fd8ca92104d6cd6,1 +np.float64,0x3fde6a0cd6bcd41a,0x3fdd5f4b85abf749,1 +np.float64,0xbfc7faaff32ff560,0xbfc7d7560b8c4a52,1 +np.float64,0x7fec381b2f787035,0x408632cd0e9c095c,1 +np.float64,0x1fc2eb543f85e,0x1fc2eb543f85e,1 +np.float64,0x7ac6000af58c1,0x7ac6000af58c1,1 +np.float64,0xffe060a87920c150,0xc0862e72c37d5a4e,1 +np.float64,0xbfb7d8c89e2fb190,0xbfb7cffd3c3f8e3a,1 +np.float64,0x3fd91033deb22068,0x3fd87695b067aa1e,1 +np.float64,0x3fec1aff01b835fe,0x3fe95d5cbd729af7,1 +np.float64,0x7fb97f69ec32fed3,0x4086215aaae5c697,1 +np.float64,0x7feaf1e4e5f5e3c9,0x4086326e6ca6a2bb,1 +np.float64,0x800537e44d0a6fc9,0x800537e44d0a6fc9,1 +np.float64,0x800b2a0d0d36541a,0x800b2a0d0d36541a,1 +np.float64,0x3fe2193846e43270,0x3fe140308550138e,1 +np.float64,0x5e2a0a32bc542,0x5e2a0a32bc542,1 +np.float64,0xffe5888b09eb1116,0xc08630a348783aa3,1 +np.float64,0xbfceb9b5033d736c,0xbfce701049c10435,1 +np.float64,0x7fe5d68589abad0a,0x408630c00ce63f23,1 +np.float64,0x8009b5457ff36a8b,0x8009b5457ff36a8b,1 +np.float64,0xbfb5518c2e2aa318,0xbfb54b42638ca718,1 +np.float64,0x3f9c58469838b080,0x3f9c575974fbcd7b,1 +np.float64,0x3fe8db4b4731b697,0x3fe6dc9231587966,1 +np.float64,0x8007d0f77f4fa1f0,0x8007d0f77f4fa1f0,1 +np.float64,0x7fe79eef542f3dde,0x40863160c673c67f,1 +np.float64,0xffbdc0b6163b8170,0xc0862296be4bf032,1 +np.float64,0x3fbb8d3312371a66,0x3fbb7fa76fb4cf8d,1 +np.float64,0xffd8a0eedbb141de,0xc0862c2ac6e512f0,1 +np.float64,0x7fee99d8d87d33b1,0x4086337301c4c8df,1 +np.float64,0xffe7479b552e8f36,0xc0863142fba0f0ec,1 +np.float64,0xffedf8ef4abbf1de,0xc08633488068fe69,1 +np.float64,0x895c4d9f12b8a,0x895c4d9f12b8a,1 +np.float64,0x29b4caf05369a,0x29b4caf05369a,1 +np.float64,0xbfefb90d657f721b,0xbfec01efa2425b35,1 +np.float64,0xde07c3bdbc0f9,0xde07c3bdbc0f9,1 +np.float64,0x7feae9fd02f5d3f9,0x4086326c1368ed5a,1 +np.float64,0x3feab792da756f26,0x3fe84f6e15338ed7,1 +np.float64,0xbfeff8ed72fff1db,0xbfec2f35da06daaf,1 +np.float64,0x8004b2c132896583,0x8004b2c132896583,1 +np.float64,0xbf9fcb00103f9600,0xbf9fc9b1751c569e,1 +np.float64,0x4182b72e83058,0x4182b72e83058,1 +np.float64,0x90820d812105,0x90820d812105,1 +np.float64,0xbfdec9a0ba3d9342,0xbfddb585df607ce1,1 +np.float64,0x7fdc0a69a03814d2,0x40862d347f201b63,1 +np.float64,0xbfef0708937e0e11,0xbfeb82d27f8ea97f,1 +np.float64,0xffda57e4ddb4afca,0xc0862cb49e2e0c4c,1 +np.float64,0xbfa30b9af4261730,0xbfa30a7b4a633060,1 +np.float64,0x7feb57fcc4b6aff9,0x4086328c83957a0b,1 +np.float64,0x7fe6759153eceb22,0x408630f980433963,1 +np.float64,0x7fdd3278c8ba64f1,0x40862d87445243e9,1 +np.float64,0xd3b8e6b9a771d,0xd3b8e6b9a771d,1 +np.float64,0x6267dc88c4cfc,0x6267dc88c4cfc,1 +np.float64,0x7fedd3cf00bba79d,0x4086333e91712ff5,1 +np.float64,0xffbe512ce03ca258,0xc08622bd39314cea,1 +np.float64,0xbfe71742ca6e2e86,0xbfe572ccbf2d010d,1 +np.float64,0x8002fb048c65f60a,0x8002fb048c65f60a,1 +np.float64,0x800d9d9ddf7b3b3c,0x800d9d9ddf7b3b3c,1 +np.float64,0xbfeaf6230df5ec46,0xbfe87f5d751ec3d5,1 +np.float64,0xbfe69973a42d32e8,0xbfe50c680f7002fe,1 +np.float64,0x3fe309cf87e613a0,0x3fe21048714ce1ac,1 +np.float64,0x800435d17a286ba4,0x800435d17a286ba4,1 +np.float64,0x7fefffffffffffff,0x408633ce8fb9f87e,1 +np.float64,0x3fe36ade1766d5bc,0x3fe26379fb285dde,1 +np.float64,0x3f98d8d94831b1c0,0x3f98d839885dc527,1 +np.float64,0xbfd08f7ae5211ef6,0xbfd0618ab5293e1e,1 +np.float64,0xbfcf630bd53ec618,0xbfcf14a0cd20704d,1 +np.float64,0xbfe58f0ca6eb1e1a,0xbfe4312225df8e28,1 +np.float64,0xffef4f6406be9ec7,0xc08633a1ed1d27e5,1 +np.float64,0x7fe10120b3e20240,0x40862ebfaf94e6e8,1 +np.float64,0xffe96c52fbb2d8a5,0xc08631f75d9a59a0,1 +np.float64,0xbfe448a333e89146,0xbfe31fee44c3ec43,1 +np.float64,0x80045ff4e788bfeb,0x80045ff4e788bfeb,1 +np.float64,0x7fefaa2f823f545e,0x408633b8fea29524,1 +np.float64,0xffea6b8bf234d717,0xc0863246248e5960,1 +np.float64,0xbfdb085d80b610bc,0xbfda498b15b43eec,1 +np.float64,0xbfd5e12da3abc25c,0xbfd57970e2b8aecc,1 +np.float64,0x3fcc84928a390925,0x3fcc497c417a89f3,1 +np.float64,0xbfdcb713bf396e28,0xbfdbd46c5e731fd9,1 +np.float64,0xffdf50c0453ea180,0xc0862e16b5562f25,1 +np.float64,0x800342c2f7268587,0x800342c2f7268587,1 +np.float64,0x7feb8b6d743716da,0x4086329b8248de2c,1 +np.float64,0x800a9b18b4953632,0x800a9b18b4953632,1 +np.float64,0xffedaf0d12fb5e19,0xc0863334af82de1a,1 +np.float64,0x800aebda4ab5d7b5,0x800aebda4ab5d7b5,1 +np.float64,0xbfa9f5848433eb10,0xbfa9f2ac7ac065d4,1 +np.float64,0x3fea375928f46eb2,0x3fe7ec9f10eeac7d,1 +np.float64,0x3fd6c213fead8428,0x3fd64dcc1eff5f1b,1 +np.float64,0xbfa0476f44208ee0,0xbfa046bb986007ac,1 +np.float64,0x6c8e18aed91c4,0x6c8e18aed91c4,1 +np.float64,0x8000000000000001,0x8000000000000001,1 +np.float64,0x7fea86b5ba350d6a,0x4086324e59f13027,1 +np.float64,0x2316c3b0462d9,0x2316c3b0462d9,1 +np.float64,0x3fec4e3281389c65,0x3fe983c5c9d65940,1 +np.float64,0x3fbb87c47f772,0x3fbb87c47f772,1 +np.float64,0x8004af00fdc95e03,0x8004af00fdc95e03,1 +np.float64,0xbfd316db9ba62db8,0xbfd2d12765b9d155,1 +np.float64,0x3fec1a7a99f834f6,0x3fe95cf941889b3d,1 +np.float64,0x3feff7e1477fefc3,0x3fec2e782392d4b9,1 +np.float64,0xbfc683ea042d07d4,0xbfc66698cfa5026e,1 +np.float64,0x3fdbc8aaa9b79154,0x3fdafa50e6fc3fff,1 +np.float64,0xfb3b630ff676d,0xfb3b630ff676d,1 +np.float64,0x7fe715ef8eae2bde,0x40863131d794b41f,1 +np.float64,0x7fefa06c11bf40d7,0x408633b686c7996a,1 +np.float64,0x80002a40f5205483,0x80002a40f5205483,1 +np.float64,0x7fe95f3c74b2be78,0x408631f33e37bf76,1 +np.float64,0x3fb2977b32252ef0,0x3fb2934eaf5a4be8,1 +np.float64,0x3fc0f3dbc821e7b8,0x3fc0e745288c84c3,1 +np.float64,0x3fda98da56b531b5,0x3fd9e2b19447dacc,1 +np.float64,0x3f95b9d5202b73aa,0x3f95b96a53282949,1 +np.float64,0x3fdc1ace7738359d,0x3fdb4597d31df7ff,1 +np.float64,0xffeac5bb2e358b76,0xc0863261452ab66c,1 +np.float64,0xbfefb1b78f7f636f,0xbfebfcb9be100ced,1 +np.float64,0xf5c9e191eb93c,0xf5c9e191eb93c,1 +np.float64,0x3fe83a977630752f,0x3fe65d0df90ff6ef,1 +np.float64,0x3fc317515d262ea0,0x3fc3056072b719f0,1 +np.float64,0x7fe2dcfab225b9f4,0x40862f94257c28a2,1 +np.float64,0xca2b115794562,0xca2b115794562,1 +np.float64,0x3fd495301aa92a60,0x3fd43e57108761d5,1 +np.float64,0x800ccc4293199885,0x800ccc4293199885,1 +np.float64,0xc8d3173d91a63,0xc8d3173d91a63,1 +np.float64,0xbf2541bb7e4a8,0xbf2541bb7e4a8,1 +np.float64,0xbfe9a330df334662,0xbfe779816573f5be,1 +np.float64,0xffd5e4c8252bc990,0xc0862b39b3ca5d72,1 +np.float64,0x3fe90f3a53721e75,0x3fe70585ae09531d,1 +np.float64,0xbfe2b5ddc7a56bbc,0xbfe1c7fa91a675ed,1 +np.float64,0xbf981a0360303400,0xbf9819719345073a,1 +np.float64,0x19174b0e322ea,0x19174b0e322ea,1 +np.float64,0xbfd2f71a1725ee34,0xbfd2b2b6f7cd10b1,1 +np.float64,0x80056e83236add07,0x80056e83236add07,1 +np.float64,0x7fe4bc41d9697883,0x40863055f20ce0cb,1 +np.float64,0xffe76e06c46edc0d,0xc086315024b25559,1 +np.float64,0x3fe3c4f0f96789e2,0x3fe2b04b584609bf,1 +np.float64,0x3fe6cfc533ed9f8a,0x3fe538b4d784d5ee,1 +np.float64,0x7fd234a640a4694c,0x408629bfead4f0b2,1 +np.float64,0x3fdbc49c9ab78939,0x3fdaf698a83d08e2,1 +np.float64,0x3fe4c5336ee98a66,0x3fe388c6ddb60e0a,1 +np.float64,0xf4b9497be9729,0xf4b9497be9729,1 +np.float64,0x3fb312be12262580,0x3fb30e3c847c1d16,1 +np.float64,0x3fe9554218f2aa84,0x3fe73c8b311c7a98,1 +np.float64,0xff899816a0333040,0xc08610bfb2cd8559,1 +np.float64,0x8006008ad52c0116,0x8006008ad52c0116,1 +np.float64,0x3fd7d47be4afa8f8,0x3fd74fa71ec17fd0,1 +np.float64,0x8010000000000000,0x8010000000000000,1 +np.float64,0xdf2a9943be553,0xdf2a9943be553,1 +np.float64,0xbfeb86bf1eb70d7e,0xbfe8ed797580ba5c,1 +np.float64,0x800e2c0c28bc5818,0x800e2c0c28bc5818,1 +np.float64,0xbfe2be65d4657ccc,0xbfe1cf578dec2323,1 +np.float64,0xbfedea3a5afbd475,0xbfeab490bf05e585,1 +np.float64,0xbfe04b1583a0962b,0xbfdf523dfd7be25c,1 +np.float64,0x75929bb4eb254,0x75929bb4eb254,1 +np.float64,0x3fd7b4968caf692d,0x3fd731c0938ff97c,1 +np.float64,0x60bd8fd2c17b3,0x60bd8fd2c17b3,1 +np.float64,0xbfdaf15e70b5e2bc,0xbfda345a95ce18fe,1 +np.float64,0x7fdd7c35c2baf86b,0x40862d9b5f40c6b2,1 +np.float64,0x7feeb4d2ab7d69a4,0x4086337a0c0dffaf,1 +np.float64,0xffe65b5a1decb6b4,0xc08630f024420efb,1 +np.float64,0x7feb272b30764e55,0x4086327e2e553aa2,1 +np.float64,0x3fd27513e8a4ea28,0x3fd235ea49670f6a,1 +np.float64,0x3fe6541a6aeca834,0x3fe4d3a5b69fd1b6,1 +np.float64,0xbfe0c6ca0f618d94,0xbfe017058259efdb,1 +np.float64,0x7fc1bf07b7237e0e,0x4086240000fa5a52,1 +np.float64,0x7fe96af9c0f2d5f3,0x408631f6f0f4faa2,1 +np.float64,0x3fe0728be7a0e518,0x3fdf9881a5869de9,1 +np.float64,0xffe8ea4441b1d488,0xc08631ce0685ae7e,1 +np.float64,0xffd0b973f02172e8,0xc08629121e7fdf85,1 +np.float64,0xffe37b907a26f720,0xc0862fd6529401a0,1 +np.float64,0x3fe0ee826461dd05,0x3fe03a2a424a1b40,1 +np.float64,0xbfe8073c92300e79,0xbfe6340cbd179ac1,1 +np.float64,0x800768383f8ed071,0x800768383f8ed071,1 +np.float64,0x8002e467c7c5c8d0,0x8002e467c7c5c8d0,1 +np.float64,0xbfd8d53ea5b1aa7e,0xbfd83fa7243289d7,1 +np.float64,0xffebefce2bb7df9c,0xc08632b874f4f8dc,1 +np.float64,0xffe3be9eb9277d3d,0xc0862ff1ac70ad0b,1 +np.float64,0xffe2f8a82e65f150,0xc0862f9fd9e77d86,1 +np.float64,0xbfa01d151c203a30,0xbfa01c66dc13a70a,1 +np.float64,0x800877062d30ee0d,0x800877062d30ee0d,1 +np.float64,0xaade16a755bc3,0xaade16a755bc3,1 +np.float64,0xbfeb1abc70363579,0xbfe89b52c3b003aa,1 +np.float64,0x80097d0b2ad2fa17,0x80097d0b2ad2fa17,1 +np.float64,0x8001499907429333,0x8001499907429333,1 +np.float64,0x3fe8db2aaf71b656,0x3fe6dc7873f1b235,1 +np.float64,0x5cfeadc4b9fd6,0x5cfeadc4b9fd6,1 +np.float64,0xff3f77d1fe7ef,0xff3f77d1fe7ef,1 +np.float64,0xffeecd56f9bd9aad,0xc08633806cb1163d,1 +np.float64,0xbf96f3ca582de7a0,0xbf96f34c6b8e1c85,1 +np.float64,0x7ed6b44afdad7,0x7ed6b44afdad7,1 +np.float64,0x80071808da4e3012,0x80071808da4e3012,1 +np.float64,0x3feb8aee2bf715dc,0x3fe8f0a55516615c,1 +np.float64,0x800038f62e2071ed,0x800038f62e2071ed,1 +np.float64,0x3fb13f9af2227f30,0x3fb13c456ced8e08,1 +np.float64,0xffd584d1812b09a4,0xc0862b165558ec0c,1 +np.float64,0x800b20c30fb64186,0x800b20c30fb64186,1 +np.float64,0x80024f9646e49f2d,0x80024f9646e49f2d,1 +np.float64,0xffefffffffffffff,0xc08633ce8fb9f87e,1 +np.float64,0x3fdddbcb5bbbb797,0x3fdcde981111f650,1 +np.float64,0xffed14077f3a280e,0xc086330a795ad634,1 +np.float64,0x800fec2da7ffd85b,0x800fec2da7ffd85b,1 +np.float64,0x3fe8205ffc7040c0,0x3fe6482318d217f9,1 +np.float64,0x3013e5226027d,0x3013e5226027d,1 +np.float64,0xffe4e5aad469cb55,0xc0863065dc2fb4e3,1 +np.float64,0x5cb0f7b2b9620,0x5cb0f7b2b9620,1 +np.float64,0xbfeb4537d2768a70,0xbfe8bbb2c1d3bff9,1 +np.float64,0xbfd859e297b0b3c6,0xbfd7cc807948bf9d,1 +np.float64,0x71f00b8ce3e02,0x71f00b8ce3e02,1 +np.float64,0xf5c1b875eb837,0xf5c1b875eb837,1 +np.float64,0xa0f35c8141e8,0xa0f35c8141e8,1 +np.float64,0xffe24860b42490c1,0xc0862f54222f616e,1 +np.float64,0xffcd9ae8583b35d0,0xc08628181e643a42,1 +np.float64,0x7fe9b710c7736e21,0x4086320ec033490f,1 +np.float64,0x3fd2b9ca1d257394,0x3fd277e631f0c0b3,1 +np.float64,0x23559bfc46ab4,0x23559bfc46ab4,1 +np.float64,0x8002adf75e455bef,0x8002adf75e455bef,1 +np.float64,0xbfefa4d75cbf49af,0xbfebf392e51d6a1a,1 +np.float64,0xffcfef263e3fde4c,0xc08628b336adb611,1 +np.float64,0x80061acaa8ec3596,0x80061acaa8ec3596,1 +np.float64,0x7fc1b33be0236677,0x408623faaddcc17e,1 +np.float64,0x7fe3a84083675080,0x40862fe8972e41e1,1 +np.float64,0xbfe756c1276ead82,0xbfe5a6318b061e1b,1 +np.float64,0xbfae4b71b43c96e0,0xbfae46ed0b6203a4,1 +np.float64,0x800421c6d0a8438e,0x800421c6d0a8438e,1 +np.float64,0x8009ad56fe335aae,0x8009ad56fe335aae,1 +np.float64,0xbfe71afc976e35f9,0xbfe575d21f3d7193,1 +np.float64,0x7fec0bbe4c38177c,0x408632c0710f1d8a,1 +np.float64,0x750e1daeea1c4,0x750e1daeea1c4,1 +np.float64,0x800501d4240a03a9,0x800501d4240a03a9,1 +np.float64,0x800794955cef292b,0x800794955cef292b,1 +np.float64,0x3fdf8a87f5bf1510,0x3fde62f4f00cfa19,1 +np.float64,0xbfebebdbc7f7d7b8,0xbfe939e51ba1340c,1 +np.float64,0xbfe3a16217a742c4,0xbfe292039dd08a71,1 +np.float64,0x3fed6cd04c3ad9a1,0x3fea58995973f74b,1 +np.float64,0xffcad8787335b0f0,0xc086274fbb35dd37,1 +np.float64,0x3fcb178e3d362f1c,0x3fcae4c9f3e6dddc,1 +np.float64,0xbfcadc669435b8cc,0xbfcaaae7cf075420,1 +np.float64,0x7fe0e3906321c720,0x40862eb1bacc5c43,1 +np.float64,0xff8ad5edb035abc0,0xc0861120b6404d0b,1 +np.float64,0x3fe175a21562eb44,0x3fe0b13120a46549,1 +np.float64,0xbfeb4c4a5f769895,0xbfe8c1147f1c9d8f,1 +np.float64,0x7fca22f4e63445e9,0x40862718e9b4094e,1 +np.float64,0x3fe4269d0c684d3a,0x3fe3032aa2015c53,1 +np.float64,0x3fef551c09beaa38,0x3febbabe03f49c83,1 +np.float64,0xffd843df9fb087c0,0xc0862c0c52d5e5d9,1 +np.float64,0x7fc497e2ca292fc5,0x40862530bbd9fcc7,1 +np.float64,0x3fee02919efc0523,0x3feac655588a4acd,1 +np.float64,0x7fed1e52c0fa3ca5,0x4086330d4ddd8a2c,1 +np.float64,0xba04d4ef7409b,0xba04d4ef7409b,1 +np.float64,0x3fee22d0937c45a2,0x3feaddd4ca66b447,1 +np.float64,0xffeb2558cf764ab1,0xc086327da4e84053,1 +np.float64,0xbfe103d987e207b3,0xbfe04d04818ad1ff,1 +np.float64,0x3f9fd7fed03faffe,0x3f9fd6ae9a45be84,1 +np.float64,0x800a53ec4c34a7d9,0x800a53ec4c34a7d9,1 +np.float64,0xbfe2feb17f65fd63,0xbfe206b9d33a78a2,1 +np.float64,0x989bdd613139,0x989bdd613139,1 +np.float64,0xbfdd0ad3fb3a15a8,0xbfdc20c32a530741,1 +np.float64,0xbfc4222163284444,0xbfc40d1c612784b5,1 +np.float64,0xc30cf5c78619f,0xc30cf5c78619f,1 +np.float64,0x3fe913bd6732277b,0x3fe70912f76bad71,1 +np.float64,0x98f175f531e2f,0x98f175f531e2f,1 +np.float64,0x3fed8c1f717b183f,0x3fea6f9fb3af3423,1 +np.float64,0x7fee46b085bc8d60,0x4086335d269eb7e9,1 +np.float64,0x8007480f564e901f,0x8007480f564e901f,1 +np.float64,0xc9b96e179372e,0xc9b96e179372e,1 +np.float64,0x3fe44deac4289bd6,0x3fe32463a74a69e7,1 +np.float64,0x80021d6c5c243ad9,0x80021d6c5c243ad9,1 +np.float64,0xbfebc805a6f7900b,0xbfe91edcf65a1c19,1 +np.float64,0x80044748adc88e92,0x80044748adc88e92,1 +np.float64,0x4007ee44800fe,0x4007ee44800fe,1 +np.float64,0xbfe24307a4648610,0xbfe1648ad5c47b6f,1 +np.float64,0xbfee6d3a93fcda75,0xbfeb13e1a3196e78,1 +np.float64,0x3fe49a287f293451,0x3fe364a11b9f0068,1 +np.float64,0x80052b37ceaa5670,0x80052b37ceaa5670,1 +np.float64,0xbfd42be893a857d2,0xbfd3da05dac7c286,1 +np.float64,0xffb4bbe4ac2977c8,0xc0861fb31bda6956,1 +np.float64,0xbfc732a4142e6548,0xbfc7129a4eafa399,1 +np.float64,0x7fd0696791a0d2ce,0x408628eb7756cb9c,1 +np.float64,0x3fe46c8f8d68d91f,0x3fe33e3df16187c1,1 +np.float64,0x3fe3a28f1ce7451e,0x3fe293043238d08c,1 +np.float64,0xffedc4eb723b89d6,0xc086333a92258c15,1 +np.float64,0x8000d15b4c41a2b7,0x8000d15b4c41a2b7,1 +np.float64,0xffeb73450236e689,0xc08632947b0148ab,1 +np.float64,0xffe68cf4722d19e8,0xc0863101d08d77bd,1 +np.float64,0x800c70eb4698e1d7,0x800c70eb4698e1d7,1 +np.float64,0xffa94387ff529,0xffa94387ff529,1 +np.float64,0x7fe3835d996706ba,0x40862fd985ff8e7d,1 +np.float64,0x3fe55e476feabc8e,0x3fe408a15594ec52,1 +np.float64,0xffc69672222d2ce4,0xc08625ee0c4c0f6a,1 +np.float64,0xbf9d900b883b2020,0xbf9d8efe811d36df,1 +np.float64,0xbfdb9b9755b7372e,0xbfdad0f2aa2cb110,1 +np.float64,0xffeade6073b5bcc0,0xc08632689f17a25d,1 +np.float64,0xffd1d6a6baa3ad4e,0xc086299630a93a7b,1 +np.float64,0x7fd05ba25620b744,0x408628e4be1ef845,1 +np.float64,0xbfc7d422d52fa844,0xbfc7b170a61531bf,1 +np.float64,0x3fd5196797aa32d0,0x3fd4bc0f0e7d8e1d,1 +np.float64,0x617594a4c2eb3,0x617594a4c2eb3,1 +np.float64,0x7fd779bc4caef378,0x40862bc89271b882,1 +np.float64,0xffd2fb262ba5f64c,0xc0862a15561e9524,1 +np.float64,0x72fd661ae5fad,0x72fd661ae5fad,1 +np.float64,0x3fecf441f339e884,0x3fe9ff880d584f64,1 +np.float64,0x7fc3a8968827512c,0x408624d198b05c61,1 +np.float64,0x3fe7a25c56ef44b9,0x3fe5e32509a7c32d,1 +np.float64,0x7fd117d514222fa9,0x4086293ec640d5f2,1 +np.float64,0x3fe37dfe5ee6fbfc,0x3fe273d1bcaa1ef0,1 +np.float64,0xbfed4cd19d7a99a3,0xbfea41064cba4c8b,1 +np.float64,0x8003ff12aaa7fe26,0x8003ff12aaa7fe26,1 +np.float64,0x3fcbc3d1193787a2,0x3fcb8d39e3e88264,1 +np.float64,0xe9ba1a91d3744,0xe9ba1a91d3744,1 +np.float64,0x8002ab71998556e4,0x8002ab71998556e4,1 +np.float64,0x800110057922200c,0x800110057922200c,1 +np.float64,0xbfe3b7af19a76f5e,0xbfe2a502fc0a2882,1 +np.float64,0x7fd9de9d5e33bd3a,0x40862c8f73cccabf,1 +np.float64,0xbfba0f0a86341e18,0xbfba0392f44c2771,1 +np.float64,0x8000000000000000,0x8000000000000000,1 +np.float64,0x7fe5d162e96ba2c5,0x408630be2b15e01b,1 +np.float64,0x800b7f0eac76fe1e,0x800b7f0eac76fe1e,1 +np.float64,0xff98bed150317da0,0xc086160633164f5f,1 +np.float64,0x3fef91fd70ff23fb,0x3febe629709d0ae7,1 +np.float64,0x7fe5bea7f16b7d4f,0x408630b749f445e9,1 +np.float64,0xbfe3dc428467b885,0xbfe2c41ea93fab07,1 +np.float64,0xbfeba1fbfcf743f8,0xbfe9021b52851bb9,1 +np.float64,0x7fd2fb2108a5f641,0x40862a1553f45830,1 +np.float64,0x7feb8199a4370332,0x40863298a7169dad,1 +np.float64,0x800f97ff8d7f2fff,0x800f97ff8d7f2fff,1 +np.float64,0x3fd5e20b6b2bc417,0x3fd57a42bd1c0993,1 +np.float64,0x8006b4072dad680f,0x8006b4072dad680f,1 +np.float64,0x605dccf2c0bba,0x605dccf2c0bba,1 +np.float64,0x3fc705ed142e0bda,0x3fc6e69971d86f73,1 +np.float64,0xffd2ba1aad257436,0xc08629f9bc918f8b,1 +np.float64,0x8002954e23c52a9d,0x8002954e23c52a9d,1 +np.float64,0xbfecc65da7798cbb,0xbfe9dd745be18562,1 +np.float64,0x7fc66110482cc220,0x408625db0db57ef8,1 +np.float64,0x3fcd09446d3a1289,0x3fcccaf2dd0a41ea,1 +np.float64,0x3febe7095437ce13,0x3fe93642d1e73b2a,1 +np.float64,0x8004773c7da8ee7a,0x8004773c7da8ee7a,1 +np.float64,0x8001833241230665,0x8001833241230665,1 +np.float64,0x3fe6a262db6d44c6,0x3fe513b3dab5adce,1 +np.float64,0xe6282cc1cc506,0xe6282cc1cc506,1 +np.float64,0x800b9d8553973b0b,0x800b9d8553973b0b,1 +np.float64,0x3fdfbe0c7b3f7c19,0x3fde912375d867a8,1 +np.float64,0x7fd5ac11ebab5823,0x40862b24dfc6d08e,1 +np.float64,0x800e4b7cb1fc96f9,0x800e4b7cb1fc96f9,1 +np.float64,0x3fe14706da628e0e,0x3fe0883aec2a917a,1 +np.float64,0x7fc963f97532c7f2,0x408626dd9b0cafe1,1 +np.float64,0xbfe9c250b5b384a2,0xbfe791c5eabcb05d,1 +np.float64,0x3fe8d16e6c71a2dd,0x3fe6d4c7a33a0bf4,1 +np.float64,0x3fe474ae4628e95d,0x3fe34515c93f4733,1 +np.float64,0x3fbf3257ee3e64b0,0x3fbf1eb530e126ea,1 +np.float64,0x8005f089b3abe114,0x8005f089b3abe114,1 +np.float64,0x3fece07bccf9c0f8,0x3fe9f0dc228124d5,1 +np.float64,0xbfc52521632a4a44,0xbfc50ccebdf59c2c,1 +np.float64,0x7fdf53beb13ea77c,0x40862e177918195e,1 +np.float64,0x8003d9f6ad07b3ee,0x8003d9f6ad07b3ee,1 +np.float64,0xffeacf96bbb59f2d,0xc086326436b38b1a,1 +np.float64,0xdccaea29b995e,0xdccaea29b995e,1 +np.float64,0x5948d21eb291b,0x5948d21eb291b,1 +np.float64,0x10000000000000,0x10000000000000,1 +np.float64,0x7fef6d2c543eda58,0x408633a98593cdf5,1 +np.float64,0x7feda454f47b48a9,0x40863331cb6dc9f7,1 +np.float64,0x3fdd377cecba6ef8,0x3fdc4968f74a9c83,1 +np.float64,0x800644096d4c8814,0x800644096d4c8814,1 +np.float64,0xbfe33ca15ae67942,0xbfe23be5de832bd8,1 +np.float64,0xffce9582bd3d2b04,0xc086285abdf9bf9d,1 +np.float64,0x3fe6621e86acc43d,0x3fe4df231bfa93e1,1 +np.float64,0xee7d19e9dcfa3,0xee7d19e9dcfa3,1 +np.float64,0x800be5997277cb33,0x800be5997277cb33,1 +np.float64,0x82069041040e,0x82069041040e,1 +np.float64,0x800d6efdc19addfc,0x800d6efdc19addfc,1 +np.float64,0x7fb27770ee24eee1,0x40861ec5ed91b839,1 +np.float64,0x3fd506064caa0c0d,0x3fd4a9a66353fefd,1 +np.float64,0xbfeca9b36bf95367,0xbfe9c81f03ba37b8,1 +np.float64,0xffeab1b7bab5636f,0xc086325b47f61f2b,1 +np.float64,0xffc99f5b2e333eb8,0xc08626f03b08b412,1 +np.float64,0x3fbf1a71bc3e34e3,0x3fbf06fbcaa5de58,1 +np.float64,0x3fe75015736ea02b,0x3fe5a0cd8d763d8d,1 +np.float64,0xffe6a7442fad4e88,0xc086310b20addba4,1 +np.float64,0x3fe5d62ff86bac60,0x3fe46c033195bf28,1 +np.float64,0x7fd0b1f0362163df,0x4086290e857dc1be,1 +np.float64,0xbe0353737c06b,0xbe0353737c06b,1 +np.float64,0x7fec912d8739225a,0x408632e627704635,1 +np.float64,0xded8ba2fbdb18,0xded8ba2fbdb18,1 +np.float64,0x7fec0b53fdf816a7,0x408632c052bc1bd2,1 +np.float64,0x7fe9640d12b2c819,0x408631f4c2ba54d8,1 +np.float64,0x800be714eeb7ce2a,0x800be714eeb7ce2a,1 +np.float64,0xbfcf444a793e8894,0xbfcef6c126b54853,1 +np.float64,0xffeb20cf1bf6419e,0xc086327c4e6ffe80,1 +np.float64,0xc07de22180fd,0xc07de22180fd,1 +np.float64,0xffed129d387a253a,0xc086330a15ad0adb,1 +np.float64,0x3fd9e94fedb3d2a0,0x3fd94049924706a8,1 +np.float64,0x7fe6ba488c2d7490,0x40863111d51e7861,1 +np.float64,0xbfebbdf25db77be5,0xbfe91740ad7ba521,1 +np.float64,0x7fbc6c3c4838d878,0x40862239160cb613,1 +np.float64,0xbfefa82ecebf505e,0xbfebf5f31957dffd,1 +np.float64,0x800bebeb7ad7d7d7,0x800bebeb7ad7d7d7,1 +np.float64,0x7fecccc6f8f9998d,0x408632f6c6da8aac,1 +np.float64,0xcbe4926197ca,0xcbe4926197ca,1 +np.float64,0x2c5d9fd858bb5,0x2c5d9fd858bb5,1 +np.float64,0xbfe9fb021073f604,0xbfe7bddc61f1151a,1 +np.float64,0xbfebb18572f7630b,0xbfe90ddc5002313f,1 +np.float64,0x13bb0d3227763,0x13bb0d3227763,1 +np.float64,0x3feefa5e5cbdf4bd,0x3feb79b9e8ce16bf,1 +np.float64,0x3fc97f086132fe10,0x3fc9549fc8e15ecb,1 +np.float64,0xffe70887c06e110f,0xc086312d30fd31cf,1 +np.float64,0xa00c113540182,0xa00c113540182,1 +np.float64,0x800950984772a131,0x800950984772a131,1 +np.float64,0x1,0x1,1 +np.float64,0x3fd83b4026b07680,0x3fd7afdc659d9a34,1 +np.float64,0xbfe32348fbe64692,0xbfe226292a706a1a,1 +np.float64,0x800b894dcc77129c,0x800b894dcc77129c,1 +np.float64,0xeb2ca419d6595,0xeb2ca419d6595,1 +np.float64,0xbff0000000000000,0xbfec34366179d427,1 +np.float64,0x3feb269e99f64d3d,0x3fe8a4634b927a21,1 +np.float64,0xbfe83149d7706294,0xbfe655a2b245254e,1 +np.float64,0xbfe6eef3ca6ddde8,0xbfe5521310e24d16,1 +np.float64,0x3fea89a4b7b51349,0x3fe82c1fc69edcec,1 +np.float64,0x800f2a8bf17e5518,0x800f2a8bf17e5518,1 +np.float64,0x800f71fac29ee3f6,0x800f71fac29ee3f6,1 +np.float64,0xe7cb31f1cf966,0xe7cb31f1cf966,1 +np.float64,0x3b0f8752761f2,0x3b0f8752761f2,1 +np.float64,0x3fea27dea3744fbd,0x3fe7e0a4705476b2,1 +np.float64,0xbfa97c019c32f800,0xbfa97950c1257b92,1 +np.float64,0xffeff13647ffe26c,0xc08633cadc7105ed,1 +np.float64,0x3feee162353dc2c4,0x3feb67c2da0fbce8,1 +np.float64,0x80088c0807911810,0x80088c0807911810,1 +np.float64,0x3fe936ab1db26d56,0x3fe72489bc69719d,1 +np.float64,0xa2f84bd545f0a,0xa2f84bd545f0a,1 +np.float64,0xbfed445ed27a88be,0xbfea3acac0aaf482,1 +np.float64,0x800faf3e69df5e7d,0x800faf3e69df5e7d,1 +np.float64,0x3fc145a330228b46,0x3fc13853f11b1c90,1 +np.float64,0xbfe25ec5abe4bd8c,0xbfe17c9e9b486f07,1 +np.float64,0x3fe119b160e23363,0x3fe0604b10178966,1 +np.float64,0x7fe0cbf2836197e4,0x40862ea6831e5f4a,1 +np.float64,0x3fe75dd3b4eebba8,0x3fe5abe80fd628fb,1 +np.float64,0x3f7c391000387220,0x3f7c39015d8f3a36,1 +np.float64,0x899d9cad133b4,0x899d9cad133b4,1 +np.float64,0x3fe5f0e34febe1c6,0x3fe4820cefe138fc,1 +np.float64,0x7fe060dfdba0c1bf,0x40862e72de8afcd0,1 +np.float64,0xbfae42f7103c85f0,0xbfae3e7630819c60,1 +np.float64,0x35f1f2c06be5,0x35f1f2c06be5,1 +np.float64,0xffc5194d362a329c,0xc086256266c8b7ad,1 +np.float64,0xbfda034f1b34069e,0xbfd95860a44c43ad,1 +np.float64,0x32bcebca6579e,0x32bcebca6579e,1 +np.float64,0xbfd1751ebca2ea3e,0xbfd13f79f45bf75c,1 +np.float64,0x3fee4fa1e5bc9f44,0x3feafe69e0d6c1c7,1 +np.float64,0x7f9c03cd5038079a,0x4086170459172900,1 +np.float64,0x7fc5fb6d6d2bf6da,0x408625b6651cfc73,1 +np.float64,0x7ff8000000000000,0x7ff8000000000000,1 +np.float64,0xffd1a8162ca3502c,0xc0862981333931ad,1 +np.float64,0x7fc415c198282b82,0x408624fd8c155d1b,1 +np.float64,0xffda37fbe7b46ff8,0xc0862caae7865c43,1 +np.float64,0xbfef4312257e8624,0xbfebadd89f3ee31c,1 +np.float64,0xbfec45e1fd788bc4,0xbfe97d8b14db6274,1 +np.float64,0xbfe6fdcfd26dfba0,0xbfe55e25b770d00a,1 +np.float64,0x7feb66d424f6cda7,0x40863290d9ff7ea2,1 +np.float64,0x8b08a29916115,0x8b08a29916115,1 +np.float64,0xffe12ca25c625944,0xc0862ed40d769f72,1 +np.float64,0x7ff4000000000000,0x7ffc000000000000,1 +np.float64,0x804925e100925,0x804925e100925,1 +np.float64,0xcebf3e019d9,0xcebf3e019d9,1 +np.float64,0xbfd5d75d4aabaeba,0xbfd57027671dedf7,1 +np.float64,0x800b829ecd37053e,0x800b829ecd37053e,1 +np.float64,0x800b1205daf6240c,0x800b1205daf6240c,1 +np.float64,0x3fdf7e9889befd31,0x3fde583fdff406c3,1 +np.float64,0x7ff0000000000000,0x7ff0000000000000,1 +np.float64,0x3fdc09760d3812ec,0x3fdb35b55c8090c6,1 +np.float64,0x800c4d99e4f89b34,0x800c4d99e4f89b34,1 +np.float64,0xffbaa6772e354cf0,0xc08621b535badb2f,1 +np.float64,0xbfc91188fd322310,0xbfc8e933b5d25ea7,1 +np.float64,0xffc1b947f4237290,0xc08623fd69164251,1 +np.float64,0x3fc6ab3b252d5678,0x3fc68d50bbac106d,1 +np.float64,0xffac8eb968391d70,0xc0861cb734833355,1 +np.float64,0xffe29a35c365346b,0xc0862f77a1aed6d8,1 +np.float64,0x3fde14b9543c2973,0x3fdd122697779015,1 +np.float64,0xbf10f5400021e000,0xbf10f53fffef1383,1 +np.float64,0xffe0831aa3e10635,0xc0862e838553d0ca,1 +np.float64,0x3fccbadbcf3975b8,0x3fcc7e768d0154ec,1 +np.float64,0x3fe092ef66e125df,0x3fdfd212a7116c9b,1 +np.float64,0xbfd727f039ae4fe0,0xbfd6adad040b2334,1 +np.float64,0xbfe4223b93a84477,0xbfe2ff7587364db4,1 +np.float64,0x3f4e5c3a003cb874,0x3f4e5c39b75c70f7,1 +np.float64,0x800e76b1a87ced63,0x800e76b1a87ced63,1 +np.float64,0x3fed2b7368fa56e7,0x3fea2863b9131b8c,1 +np.float64,0xffadb76ec43b6ee0,0xc0861d08ae79f20c,1 +np.float64,0x800b6a0cd1f6d41a,0x800b6a0cd1f6d41a,1 +np.float64,0xffee6aa943fcd552,0xc0863366a24250d5,1 +np.float64,0xbfe68cbc4e6d1978,0xbfe502040591aa5b,1 +np.float64,0xff859a38002b3480,0xc0860f64726235cc,1 +np.float64,0x3474d13e68e9b,0x3474d13e68e9b,1 +np.float64,0xffc11d49f6223a94,0xc08623b5c2df9712,1 +np.float64,0x800d82d019bb05a0,0x800d82d019bb05a0,1 +np.float64,0xbfe2af0192255e03,0xbfe1c20e38106388,1 +np.float64,0x3fe97d13c032fa28,0x3fe75bba11a65f86,1 +np.float64,0x7fcd457e133a8afb,0x40862800e80f5863,1 +np.float64,0x9d7254cf3ae4b,0x9d7254cf3ae4b,1 +np.float64,0x8003047675a608ee,0x8003047675a608ee,1 +np.float64,0x3fead6cd7d75ad9a,0x3fe8676138e5ff93,1 +np.float64,0x3fea6ee3b0f4ddc7,0x3fe817838a2bcbe3,1 +np.float64,0x3feed0edea7da1dc,0x3feb5bea3cb12fe2,1 +np.float64,0x88003fe510008,0x88003fe510008,1 +np.float64,0x3fe64cadc56c995c,0x3fe4cd8ead87fc79,1 +np.float64,0xaae30c5955c62,0xaae30c5955c62,1 +np.float64,0x7fc8c97cae3192f8,0x408626ac579f4fc5,1 +np.float64,0xbfc2bc0e8b25781c,0xbfc2ab188fdab7dc,1 +np.float64,0xc8f8e5e791f1d,0xc8f8e5e791f1d,1 +np.float64,0x3fecfaa5d6f9f54c,0x3fea0444dabe5a15,1 +np.float64,0xbfeb93740ff726e8,0xbfe8f71a9ab13baf,1 +np.float64,0xffd951236c32a246,0xc0862c633a4661eb,1 +np.float64,0x3fddbc5fcd3b78c0,0x3fdcc21c1a0a9246,1 +np.float64,0xbfd242443da48488,0xbfd20512d91f7924,1 +np.float64,0x2a3689b2546d2,0x2a3689b2546d2,1 +np.float64,0xffe24c67382498ce,0xc0862f55e4ea6283,1 +np.float64,0x800cbfce22197f9c,0x800cbfce22197f9c,1 +np.float64,0x8002269428044d29,0x8002269428044d29,1 +np.float64,0x7fd44babbd289756,0x40862a9e79b51c3b,1 +np.float64,0x3feea056a27d40ad,0x3feb38dcddb682f0,1 +np.float64,0xffeca8174b39502e,0xc08632ec8f88a5b2,1 +np.float64,0x7fbe0853a03c10a6,0x408622a9e8d53a9e,1 +np.float64,0xbfa9704b2432e090,0xbfa96d9dfc8c0cc2,1 +np.float64,0x800bda28fab7b452,0x800bda28fab7b452,1 +np.float64,0xbfb0ffa2f621ff48,0xbfb0fc71f405e82a,1 +np.float64,0xbfe66c04216cd808,0xbfe4e73ea3b58cf6,1 +np.float64,0x3fe336ea5d266dd5,0x3fe236ffcf078c62,1 +np.float64,0xbfe7729ae6aee536,0xbfe5bcad4b8ac62d,1 +np.float64,0x558cfc96ab1a0,0x558cfc96ab1a0,1 +np.float64,0xbfe7d792aaefaf26,0xbfe60de1b8f0279d,1 +np.float64,0xffd19ef6bda33dee,0xc086297d0ffee3c7,1 +np.float64,0x666b3ab4ccd68,0x666b3ab4ccd68,1 +np.float64,0xffa3d89e3c27b140,0xc08619cdeb2c1e49,1 +np.float64,0xbfb1728f7f62f,0xbfb1728f7f62f,1 +np.float64,0x3fc76319f32ec634,0x3fc74247bd005e20,1 +np.float64,0xbfbf1caee23e3960,0xbfbf0934c13d70e2,1 +np.float64,0x7fe79626f32f2c4d,0x4086315dcc68a5cb,1 +np.float64,0xffee78c4603cf188,0xc086336a572c05c2,1 +np.float64,0x3fce546eda3ca8de,0x3fce0d8d737fd31d,1 +np.float64,0xa223644d4446d,0xa223644d4446d,1 +np.float64,0x3fecea878b79d510,0x3fe9f850d50973f6,1 +np.float64,0x3fc20e0ea1241c1d,0x3fc1fedda87c5e75,1 +np.float64,0xffd1c5a99ca38b54,0xc086298e8e94cd47,1 +np.float64,0x7feb2c299d765852,0x4086327fa6db2808,1 +np.float64,0xcaf9d09595f3a,0xcaf9d09595f3a,1 +np.float64,0xbfe293bf21e5277e,0xbfe1aa7f6ac274ef,1 +np.float64,0xbfbaa3c8ce354790,0xbfba97891df19c01,1 +np.float64,0x3faf5784543eaf09,0x3faf5283acc7d71d,1 +np.float64,0x7fc014f8f62029f1,0x40862336531c662d,1 +np.float64,0xbfe0d9ac2d61b358,0xbfe027bce36699ca,1 +np.float64,0x8003e112ff27c227,0x8003e112ff27c227,1 +np.float64,0xffec0d4151381a82,0xc08632c0df718dd0,1 +np.float64,0x7fa2156fb0242ade,0x4086190f7587d708,1 +np.float64,0xd698358dad307,0xd698358dad307,1 +np.float64,0xbfed8d1b0efb1a36,0xbfea70588ef9ba18,1 +np.float64,0xbfd2cae6a92595ce,0xbfd28851e2185dee,1 +np.float64,0xffe7a36764ef46ce,0xc086316249c9287a,1 +np.float64,0xbfdb8ad8e5b715b2,0xbfdac19213c14315,1 +np.float64,0x3b5dba6076bc,0x3b5dba6076bc,1 +np.float64,0x800e6e8347bcdd07,0x800e6e8347bcdd07,1 +np.float64,0x800bea9f3fb7d53f,0x800bea9f3fb7d53f,1 +np.float64,0x7fb6d0e5fc2da1cb,0x4086207714c4ab85,1 +np.float64,0x0,0x0,1 +np.float64,0xbfe2aa1e1465543c,0xbfe1bdd550ef2966,1 +np.float64,0x7fd3f6a47fa7ed48,0x40862a7caea33055,1 +np.float64,0x800094e292c129c6,0x800094e292c129c6,1 +np.float64,0x800e1500ecbc2a02,0x800e1500ecbc2a02,1 +np.float64,0xbfd8ff6f97b1fee0,0xbfd866f84346ecdc,1 +np.float64,0x681457d0d028c,0x681457d0d028c,1 +np.float64,0x3feed0b5987da16b,0x3feb5bc1ab424984,1 +np.float64,0x3fdbcb34cdb79668,0x3fdafca540f32c06,1 +np.float64,0xbfdc9eacdcb93d5a,0xbfdbbe274aa8aeb0,1 +np.float64,0xffe6e35d526dc6ba,0xc08631203df38ed2,1 +np.float64,0x3fcac1cc65358398,0x3fca90de41889613,1 +np.float64,0xbfebf07a55b7e0f5,0xbfe93d6007db0c67,1 +np.float64,0xbfd7a7b1e7af4f64,0xbfd725a9081c22cb,1 +np.float64,0x800232bd7de4657c,0x800232bd7de4657c,1 +np.float64,0x7fb1dae43c23b5c7,0x40861e80f5c0a64e,1 +np.float64,0x8013ded70027c,0x8013ded70027c,1 +np.float64,0x7fc4373a59286e74,0x4086250ad60575d0,1 +np.float64,0xbfe9980fd6733020,0xbfe770d1352d0ed3,1 +np.float64,0x8008a66b8dd14cd7,0x8008a66b8dd14cd7,1 +np.float64,0xbfaebc67f83d78d0,0xbfaeb7b015848478,1 +np.float64,0xffd0c52762218a4e,0xc0862917b564afc6,1 +np.float64,0xbfd503860aaa070c,0xbfd4a74618441561,1 +np.float64,0x5bdacabcb7b5a,0x5bdacabcb7b5a,1 +np.float64,0xf3623cffe6c48,0xf3623cffe6c48,1 +np.float64,0x7fe16c6c7ea2d8d8,0x40862ef18d90201f,1 +np.float64,0x3ff0000000000000,0x3fec34366179d427,1 +np.float64,0x7fe19cbc84233978,0x40862f079dcbc169,1 +np.float64,0x3fcfd3d6933fa7ad,0x3fcf822187907f6b,1 +np.float64,0x8007d65d672facbc,0x8007d65d672facbc,1 +np.float64,0xffca6115aa34c22c,0xc086272bd7728750,1 +np.float64,0xbfe77ab1556ef562,0xbfe5c332fb55b66e,1 +np.float64,0x8001ed797c23daf4,0x8001ed797c23daf4,1 +np.float64,0x7fdd3d16cb3a7a2d,0x40862d8a2c869281,1 +np.float64,0x75f36beaebe6e,0x75f36beaebe6e,1 +np.float64,0xffda3c2798b47850,0xc0862cac2d3435df,1 +np.float64,0xbfa37cc3c426f980,0xbfa37b8f9d3ec4b7,1 +np.float64,0x80030ea8bd061d52,0x80030ea8bd061d52,1 +np.float64,0xffe41f7617683eec,0xc08630188a3e135e,1 +np.float64,0x800e40590dfc80b2,0x800e40590dfc80b2,1 +np.float64,0x3fea950d80f52a1c,0x3fe834e74481e66f,1 +np.float64,0xffec95e39a792bc6,0xc08632e779150084,1 +np.float64,0xbfd54310ecaa8622,0xbfd4e39c4d767002,1 +np.float64,0xffd40c9971a81932,0xc0862a85764eb2f4,1 +np.float64,0xb0a2230761445,0xb0a2230761445,1 +np.float64,0x80092973661252e7,0x80092973661252e7,1 +np.float64,0x7fb13b030a227605,0x40861e380aeb5549,1 +np.float64,0x3fbd5d8db23abb1b,0x3fbd4d2a0b94af36,1 +np.float64,0xbfd6cb8567ad970a,0xbfd656b19ab8fa61,1 +np.float64,0xbfe7c0fd346f81fa,0xbfe5fbc28807c794,1 +np.float64,0xffd586579eab0cb0,0xc0862b16e65c0754,1 +np.float64,0x8000e52da461ca5c,0x8000e52da461ca5c,1 +np.float64,0x3fc69d17112d3a2e,0x3fc67f63fe1fea1c,1 +np.float64,0x3fd36ba892a6d750,0x3fd3225be1fa87af,1 +np.float64,0x7fe2850598e50a0a,0x40862f6e7fcd6c1a,1 +np.float64,0x80074a4dacce949c,0x80074a4dacce949c,1 +np.float64,0x3fe25eea4d64bdd5,0x3fe17cbe5fefbd4e,1 +np.float64,0xbfe250c08be4a181,0xbfe17074c520e5de,1 +np.float64,0x8000f5665481eacd,0x8000f5665481eacd,1 +np.float64,0x7fdb3172f83662e5,0x40862cf5a46764f1,1 +np.float64,0x7fd8ed82d631db05,0x40862c4380658afa,1 +np.float64,0xffec5163feb8a2c7,0xc08632d4366aab06,1 +np.float64,0x800ff14ac6ffe296,0x800ff14ac6ffe296,1 +np.float64,0xbfc7cc7aea2f98f4,0xbfc7a9e9cb38f023,1 +np.float64,0xbfd50cdfc32a19c0,0xbfd4b0282b452fb2,1 +np.float64,0xbfec256d75b84adb,0xbfe965328c1860b2,1 +np.float64,0xffe860c4cdb0c189,0xc08631a164b7059a,1 +np.float64,0xbfe23de164247bc3,0xbfe16011bffa4651,1 +np.float64,0xcc96b39d992d7,0xcc96b39d992d7,1 +np.float64,0xbfec43acf938875a,0xbfe97be3a13b50c3,1 +np.float64,0xc4f587bb89eb1,0xc4f587bb89eb1,1 +np.float64,0xbfcd971d9a3b2e3c,0xbfcd5537ad15dab4,1 +np.float64,0xffcaf00d8035e01c,0xc0862756bf2cdf8f,1 +np.float64,0x8008c26f93f184e0,0x8008c26f93f184e0,1 +np.float64,0xfff0000000000000,0xfff0000000000000,1 +np.float64,0xbfd13552c3a26aa6,0xbfd101e5e252eb7b,1 +np.float64,0x7fe497235e292e46,0x4086304792fb423a,1 +np.float64,0x7fd6dc0192adb802,0x40862b921a5e935d,1 +np.float64,0xf16d49a1e2da9,0xf16d49a1e2da9,1 +np.float64,0xffef6b1b71bed636,0xc08633a8feed0178,1 +np.float64,0x7fe15ec62f62bd8b,0x40862eeb46b193dc,1 +np.float64,0x3fef4369ec7e86d4,0x3febae1768be52cc,1 +np.float64,0x4f84e8e89f09e,0x4f84e8e89f09e,1 +np.float64,0xbfe19e71ade33ce4,0xbfe0d4fad05e0ebc,1 +np.float64,0xbfe7e1df1defc3be,0xbfe616233e15b3d0,1 +np.float64,0x7fe9349afdb26935,0x408631e5c1c5c6cd,1 +np.float64,0xff90c35ac82186c0,0xc08612e896a06467,1 +np.float64,0xbfe88bf8807117f1,0xbfe69dc786464422,1 +np.float64,0x3feaf9ff6475f3fe,0x3fe8825132410d18,1 +np.float64,0x9ff487a33fe91,0x9ff487a33fe91,1 +np.float64,0x7fedb30159bb6602,0x40863335c0419322,1 +np.float64,0x800bddf6ed77bbee,0x800bddf6ed77bbee,1 +np.float64,0x3fd919df133233be,0x3fd87f963b9584ce,1 +np.float64,0x7fd64da3b52c9b46,0x40862b5fa9dd3b6d,1 +np.float64,0xbfce288db43c511c,0xbfcde2d953407ae8,1 +np.float64,0x3fe88bc72771178e,0x3fe69da05e9e9b4e,1 +np.float64,0x800feafe259fd5fc,0x800feafe259fd5fc,1 +np.float64,0x3febbbff4a7777ff,0x3fe915c78f6a280f,1 +np.float64,0xbfefbde4417f7bc9,0xbfec055f4fb2cd21,1 +np.float64,0xf13ca103e2794,0xf13ca103e2794,1 +np.float64,0x3fe6423884ec8471,0x3fe4c4f97eaa876a,1 +np.float64,0x800ca01c8cb94039,0x800ca01c8cb94039,1 +np.float64,0x3fbc5073f638a0e0,0x3fbc41c163ac0001,1 +np.float64,0xbfda0d83cfb41b08,0xbfd961d4cacc82cf,1 +np.float64,0x800f37b8f17e6f72,0x800f37b8f17e6f72,1 +np.float64,0x7fe0b08cd7216119,0x40862e996becb771,1 +np.float64,0xffd4222a40a84454,0xc0862a8e0c984917,1 +np.float64,0x7feb3df98ff67bf2,0x40863284e3a86ee6,1 +np.float64,0x8001d5d291e3aba6,0x8001d5d291e3aba6,1 +np.float64,0xbfd3c21629a7842c,0xbfd3750095a5894a,1 +np.float64,0xbfd069eb48a0d3d6,0xbfd03d2b1c2ae9db,1 +np.float64,0xffeb1be2973637c4,0xc086327ada954662,1 +np.float64,0x3fc659f97e2cb3f3,0x3fc63d497a451f10,1 +np.float64,0xbfeb624bc776c498,0xbfe8d1cf7c0626ca,1 +np.float64,0xffeedf26e23dbe4d,0xc08633850baab425,1 +np.float64,0xffe70da48a6e1b48,0xc086312ef75d5036,1 +np.float64,0x2b4f4830569ea,0x2b4f4830569ea,1 +np.float64,0xffe82e7fcfb05cff,0xc0863190d4771f75,1 +np.float64,0x3fcc2c1fd5385840,0x3fcbf3211ddc5123,1 +np.float64,0x7fe22ced5a6459da,0x40862f481629ee6a,1 +np.float64,0x7fe13d2895e27a50,0x40862edbbc411899,1 +np.float64,0x3fd54c4280aa9884,0x3fd4ec55a946c5d7,1 +np.float64,0xffd75b8e01aeb71c,0xc0862bbe42d76e5e,1 +np.float64,0x7f1d5376fe3ab,0x7f1d5376fe3ab,1 +np.float64,0x3fe6ec6c902dd8d9,0x3fe55004f35192bd,1 +np.float64,0x5634504aac68b,0x5634504aac68b,1 +np.float64,0x3feedb0d83bdb61b,0x3feb633467467ce6,1 +np.float64,0x3fddb1c0dcbb6380,0x3fdcb87a02daf1fa,1 +np.float64,0xbfa832da443065b0,0xbfa8308c70257209,1 +np.float64,0x87a9836b0f531,0x87a9836b0f531,1 diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/data/umath-validation-set-arctan.csv b/.venv/lib/python3.12/site-packages/numpy/_core/tests/data/umath-validation-set-arctan.csv new file mode 100644 index 0000000..c03e144 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/tests/data/umath-validation-set-arctan.csv @@ -0,0 +1,1429 @@ +dtype,input,output,ulperrortol +np.float32,0x3f338252,0x3f1c8d9c,3 +np.float32,0x7e569df2,0x3fc90fdb,3 +np.float32,0xbf347e25,0xbf1d361f,3 +np.float32,0xbf0a654e,0xbefdbfd2,3 +np.float32,0x8070968e,0x8070968e,3 +np.float32,0x803cfb27,0x803cfb27,3 +np.float32,0x8024362e,0x8024362e,3 +np.float32,0xfd55dca0,0xbfc90fdb,3 +np.float32,0x592b82,0x592b82,3 +np.float32,0x802eb8e1,0x802eb8e1,3 +np.float32,0xbc5fef40,0xbc5febae,3 +np.float32,0x3f1f6ce8,0x3f0e967c,3 +np.float32,0x20bedc,0x20bedc,3 +np.float32,0xbf058860,0xbef629c7,3 +np.float32,0x311504,0x311504,3 +np.float32,0xbd23f560,0xbd23defa,3 +np.float32,0x800ff4e8,0x800ff4e8,3 +np.float32,0x355009,0x355009,3 +np.float32,0x3f7be42e,0x3f46fdb3,3 +np.float32,0xbf225f7c,0xbf10b364,3 +np.float32,0x8074fa9e,0x8074fa9e,3 +np.float32,0xbea4b418,0xbe9f59ce,3 +np.float32,0xbe909c14,0xbe8cf045,3 +np.float32,0x80026bee,0x80026bee,3 +np.float32,0x3d789c20,0x3d784e25,3 +np.float32,0x7f56a4ba,0x3fc90fdb,3 +np.float32,0xbf70d141,0xbf413db7,3 +np.float32,0xbf2c4886,0xbf17a505,3 +np.float32,0x7e2993bf,0x3fc90fdb,3 +np.float32,0xbe2c8a30,0xbe2aef28,3 +np.float32,0x803f82d9,0x803f82d9,3 +np.float32,0x3f062fbc,0x3ef730a1,3 +np.float32,0x3f349ee0,0x3f1d4bfa,3 +np.float32,0x3eccfb69,0x3ec2f9e8,3 +np.float32,0x7e8a85dd,0x3fc90fdb,3 +np.float32,0x25331,0x25331,3 +np.float32,0x464f19,0x464f19,3 +np.float32,0x8035c818,0x8035c818,3 +np.float32,0x802e5799,0x802e5799,3 +np.float32,0x64e1c0,0x64e1c0,3 +np.float32,0x701cc2,0x701cc2,3 +np.float32,0x265c57,0x265c57,3 +np.float32,0x807a053f,0x807a053f,3 +np.float32,0x3bd2c412,0x3bd2c354,3 +np.float32,0xff28f1c8,0xbfc90fdb,3 +np.float32,0x7f08f08b,0x3fc90fdb,3 +np.float32,0x800c50e4,0x800c50e4,3 +np.float32,0x369674,0x369674,3 +np.float32,0xbf5b7db3,0xbf3571bf,3 +np.float32,0x7edcf5e2,0x3fc90fdb,3 +np.float32,0x800e5d4b,0x800e5d4b,3 +np.float32,0x80722554,0x80722554,3 +np.float32,0x693f33,0x693f33,3 +np.float32,0x800844e4,0x800844e4,3 +np.float32,0xbf111b82,0xbf0402ec,3 +np.float32,0x7df9c9ac,0x3fc90fdb,3 +np.float32,0xbf6619a6,0xbf3b6f57,3 +np.float32,0x8002fafe,0x8002fafe,3 +np.float32,0xfe1e67f8,0xbfc90fdb,3 +np.float32,0x3f7f4bf8,0x3f48b5b7,3 +np.float32,0x7f017b20,0x3fc90fdb,3 +np.float32,0x2d9b07,0x2d9b07,3 +np.float32,0x803aa174,0x803aa174,3 +np.float32,0x7d530336,0x3fc90fdb,3 +np.float32,0x80662195,0x80662195,3 +np.float32,0xfd5ebcf0,0xbfc90fdb,3 +np.float32,0xbe7b8dcc,0xbe76ab59,3 +np.float32,0x7f2bacaf,0x3fc90fdb,3 +np.float32,0x3f194fc4,0x3f0a229e,3 +np.float32,0x7ee21cdf,0x3fc90fdb,3 +np.float32,0x3f5a17fc,0x3f34a307,3 +np.float32,0x7f100c58,0x3fc90fdb,3 +np.float32,0x7e9128f5,0x3fc90fdb,3 +np.float32,0xbf2107c6,0xbf0fbdb4,3 +np.float32,0xbd29c800,0xbd29af22,3 +np.float32,0xbf5af499,0xbf3522a6,3 +np.float32,0x801bde44,0x801bde44,3 +np.float32,0xfeb4761a,0xbfc90fdb,3 +np.float32,0x3d88aa1b,0x3d887650,3 +np.float32,0x7eba5e0b,0x3fc90fdb,3 +np.float32,0x803906bd,0x803906bd,3 +np.float32,0x80101512,0x80101512,3 +np.float32,0x7e898f83,0x3fc90fdb,3 +np.float32,0x806406d3,0x806406d3,3 +np.float32,0x7ed20fc0,0x3fc90fdb,3 +np.float32,0x20827d,0x20827d,3 +np.float32,0x3f361359,0x3f1e43fe,3 +np.float32,0xfe4ef8d8,0xbfc90fdb,3 +np.float32,0x805e7d2d,0x805e7d2d,3 +np.float32,0xbe4316b0,0xbe40c745,3 +np.float32,0xbf0a1c06,0xbefd4e5a,3 +np.float32,0x3e202860,0x3e1edee1,3 +np.float32,0xbeb32a2c,0xbeac5899,3 +np.float32,0xfe528838,0xbfc90fdb,3 +np.float32,0x2f73e2,0x2f73e2,3 +np.float32,0xbe16e010,0xbe15cc27,3 +np.float32,0x3f50d6c5,0x3f2f2d75,3 +np.float32,0xbe88a6a2,0xbe8589c7,3 +np.float32,0x3ee36060,0x3ed5fb36,3 +np.float32,0x6c978b,0x6c978b,3 +np.float32,0x7f1b735f,0x3fc90fdb,3 +np.float32,0x3dad8256,0x3dad1885,3 +np.float32,0x807f5094,0x807f5094,3 +np.float32,0x65c358,0x65c358,3 +np.float32,0xff315ce4,0xbfc90fdb,3 +np.float32,0x7411a6,0x7411a6,3 +np.float32,0x80757b04,0x80757b04,3 +np.float32,0x3eec73a6,0x3edd82f4,3 +np.float32,0xfe9f69e8,0xbfc90fdb,3 +np.float32,0x801f4fa8,0x801f4fa8,3 +np.float32,0xbf6f2fae,0xbf405f79,3 +np.float32,0xfea206b6,0xbfc90fdb,3 +np.float32,0x3f257301,0x3f12e1ee,3 +np.float32,0x7ea6a506,0x3fc90fdb,3 +np.float32,0x80800000,0x80800000,3 +np.float32,0xff735c2d,0xbfc90fdb,3 +np.float32,0x80197f95,0x80197f95,3 +np.float32,0x7f4a354f,0x3fc90fdb,3 +np.float32,0xff320c00,0xbfc90fdb,3 +np.float32,0x3f2659de,0x3f138484,3 +np.float32,0xbe5451bc,0xbe515a52,3 +np.float32,0x3f6e228c,0x3f3fcf7c,3 +np.float32,0x66855a,0x66855a,3 +np.float32,0x8034b3a3,0x8034b3a3,3 +np.float32,0xbe21a2fc,0xbe20505d,3 +np.float32,0x7f79e2dc,0x3fc90fdb,3 +np.float32,0xbe19a8e0,0xbe18858c,3 +np.float32,0x10802c,0x10802c,3 +np.float32,0xfeee579e,0xbfc90fdb,3 +np.float32,0x3f3292c8,0x3f1becc0,3 +np.float32,0xbf595a71,0xbf34350a,3 +np.float32,0xbf7c3373,0xbf4725f4,3 +np.float32,0xbdd30938,0xbdd24b36,3 +np.float32,0x153a17,0x153a17,3 +np.float32,0x807282a0,0x807282a0,3 +np.float32,0xfe817322,0xbfc90fdb,3 +np.float32,0x3f1b3628,0x3f0b8771,3 +np.float32,0x41be8f,0x41be8f,3 +np.float32,0x7f4a8343,0x3fc90fdb,3 +np.float32,0x3dc4ea2b,0x3dc44fae,3 +np.float32,0x802aac25,0x802aac25,3 +np.float32,0xbf20e1d7,0xbf0fa284,3 +np.float32,0xfd91a1b0,0xbfc90fdb,3 +np.float32,0x3f0d5476,0x3f012265,3 +np.float32,0x21c916,0x21c916,3 +np.float32,0x807df399,0x807df399,3 +np.float32,0x7e207b4c,0x3fc90fdb,3 +np.float32,0x8055f8ff,0x8055f8ff,3 +np.float32,0x7edf3b01,0x3fc90fdb,3 +np.float32,0x803a8df3,0x803a8df3,3 +np.float32,0x3ce3b002,0x3ce3a101,3 +np.float32,0x3f62dd54,0x3f39a248,3 +np.float32,0xff33ae10,0xbfc90fdb,3 +np.float32,0x7e3de69d,0x3fc90fdb,3 +np.float32,0x8024581e,0x8024581e,3 +np.float32,0xbf4ac99d,0xbf2b807a,3 +np.float32,0x3f157d19,0x3f074d8c,3 +np.float32,0xfed383f4,0xbfc90fdb,3 +np.float32,0xbf5a39fa,0xbf34b6b8,3 +np.float32,0x800d757d,0x800d757d,3 +np.float32,0x807d606b,0x807d606b,3 +np.float32,0x3e828f89,0x3e7fac2d,3 +np.float32,0x7a6604,0x7a6604,3 +np.float32,0x7dc7e72b,0x3fc90fdb,3 +np.float32,0x80144146,0x80144146,3 +np.float32,0x7c2eed69,0x3fc90fdb,3 +np.float32,0x3f5b4d8c,0x3f3555fc,3 +np.float32,0xfd8b7778,0xbfc90fdb,3 +np.float32,0xfc9d9140,0xbfc90fdb,3 +np.float32,0xbea265d4,0xbe9d4232,3 +np.float32,0xbe9344d0,0xbe8f65da,3 +np.float32,0x3f71f19a,0x3f41d65b,3 +np.float32,0x804a3f59,0x804a3f59,3 +np.float32,0x3e596290,0x3e563476,3 +np.float32,0x3e994ee4,0x3e94f546,3 +np.float32,0xbc103e00,0xbc103d0c,3 +np.float32,0xbf1cd896,0xbf0cb889,3 +np.float32,0x7f52b080,0x3fc90fdb,3 +np.float32,0xff584452,0xbfc90fdb,3 +np.float32,0x58b26b,0x58b26b,3 +np.float32,0x3f23cd4c,0x3f11b799,3 +np.float32,0x707d7,0x707d7,3 +np.float32,0xff732cff,0xbfc90fdb,3 +np.float32,0x3e41c2a6,0x3e3f7f0f,3 +np.float32,0xbf7058e9,0xbf40fdcf,3 +np.float32,0x7dca9857,0x3fc90fdb,3 +np.float32,0x7f0eb44b,0x3fc90fdb,3 +np.float32,0x8000405c,0x8000405c,3 +np.float32,0x4916ab,0x4916ab,3 +np.float32,0x4811a8,0x4811a8,3 +np.float32,0x3d69bf,0x3d69bf,3 +np.float32,0xfeadcf1e,0xbfc90fdb,3 +np.float32,0x3e08dbbf,0x3e080d58,3 +np.float32,0xff031f88,0xbfc90fdb,3 +np.float32,0xbe09cab8,0xbe08f818,3 +np.float32,0x21d7cd,0x21d7cd,3 +np.float32,0x3f23230d,0x3f113ea9,3 +np.float32,0x7e8a48d4,0x3fc90fdb,3 +np.float32,0x413869,0x413869,3 +np.float32,0x7e832990,0x3fc90fdb,3 +np.float32,0x800f5c09,0x800f5c09,3 +np.float32,0x7f5893b6,0x3fc90fdb,3 +np.float32,0x7f06b5b1,0x3fc90fdb,3 +np.float32,0xbe1cbee8,0xbe1b89d6,3 +np.float32,0xbf279f14,0xbf1468a8,3 +np.float32,0xfea86060,0xbfc90fdb,3 +np.float32,0x3e828174,0x3e7f91bb,3 +np.float32,0xff682c82,0xbfc90fdb,3 +np.float32,0x4e20f3,0x4e20f3,3 +np.float32,0x7f17d7e9,0x3fc90fdb,3 +np.float32,0x80671f92,0x80671f92,3 +np.float32,0x7f6dd100,0x3fc90fdb,3 +np.float32,0x3f219a4d,0x3f102695,3 +np.float32,0x803c9808,0x803c9808,3 +np.float32,0x3c432ada,0x3c43287d,3 +np.float32,0xbd3db450,0xbd3d91a2,3 +np.float32,0x3baac135,0x3baac0d0,3 +np.float32,0xff7fffe1,0xbfc90fdb,3 +np.float32,0xfe38a6f4,0xbfc90fdb,3 +np.float32,0x3dfb0a04,0x3df9cb04,3 +np.float32,0x800b05c2,0x800b05c2,3 +np.float32,0x644163,0x644163,3 +np.float32,0xff03a025,0xbfc90fdb,3 +np.float32,0x3f7d506c,0x3f47b641,3 +np.float32,0xff0e682a,0xbfc90fdb,3 +np.float32,0x3e09b7b0,0x3e08e567,3 +np.float32,0x7f72a216,0x3fc90fdb,3 +np.float32,0x7f800000,0x3fc90fdb,3 +np.float32,0x8050a281,0x8050a281,3 +np.float32,0x7edafa2f,0x3fc90fdb,3 +np.float32,0x3f4e0df6,0x3f2d7f2f,3 +np.float32,0xbf6728e0,0xbf3c050f,3 +np.float32,0x3e904ce4,0x3e8ca6eb,3 +np.float32,0x0,0x0,3 +np.float32,0xfd215070,0xbfc90fdb,3 +np.float32,0x7e406b15,0x3fc90fdb,3 +np.float32,0xbf2803c9,0xbf14af18,3 +np.float32,0x5950c8,0x5950c8,3 +np.float32,0xbeddcec8,0xbed14faa,3 +np.float32,0xbec6457e,0xbebd2aa5,3 +np.float32,0xbf42843c,0xbf2656db,3 +np.float32,0x3ee9cba8,0x3edb5163,3 +np.float32,0xbe30c954,0xbe2f0f90,3 +np.float32,0xbeee6b44,0xbedf216f,3 +np.float32,0xbe35d818,0xbe33f7cd,3 +np.float32,0xbe47c630,0xbe454bc6,3 +np.float32,0x801b146f,0x801b146f,3 +np.float32,0x7f6788da,0x3fc90fdb,3 +np.float32,0x3eaef088,0x3ea8927d,3 +np.float32,0x3eb5983e,0x3eae81fc,3 +np.float32,0x40b51d,0x40b51d,3 +np.float32,0xfebddd04,0xbfc90fdb,3 +np.float32,0x3e591aee,0x3e55efea,3 +np.float32,0xbe2b6b48,0xbe29d81f,3 +np.float32,0xff4a8826,0xbfc90fdb,3 +np.float32,0x3e791df0,0x3e745eac,3 +np.float32,0x7c8f681f,0x3fc90fdb,3 +np.float32,0xfe7a15c4,0xbfc90fdb,3 +np.float32,0x3c8963,0x3c8963,3 +np.float32,0x3f0afa0a,0x3efea5cc,3 +np.float32,0xbf0d2680,0xbf00ff29,3 +np.float32,0x3dc306b0,0x3dc27096,3 +np.float32,0x7f4cf105,0x3fc90fdb,3 +np.float32,0xbe196060,0xbe183ea4,3 +np.float32,0x5caf1c,0x5caf1c,3 +np.float32,0x801f2852,0x801f2852,3 +np.float32,0xbe01aa0c,0xbe00fa53,3 +np.float32,0x3f0cfd32,0x3f00df7a,3 +np.float32,0x7d82038e,0x3fc90fdb,3 +np.float32,0x7f7b927f,0x3fc90fdb,3 +np.float32,0xbe93b2e4,0xbe8fcb7f,3 +np.float32,0x1ffe8c,0x1ffe8c,3 +np.float32,0x3faaf6,0x3faaf6,3 +np.float32,0x3e32b1b8,0x3e30e9ab,3 +np.float32,0x802953c0,0x802953c0,3 +np.float32,0xfe5d9844,0xbfc90fdb,3 +np.float32,0x3e1a59d0,0x3e193292,3 +np.float32,0x801c6edc,0x801c6edc,3 +np.float32,0x1ecf41,0x1ecf41,3 +np.float32,0xfe56b09c,0xbfc90fdb,3 +np.float32,0x7e878351,0x3fc90fdb,3 +np.float32,0x3f401e2c,0x3f24cfcb,3 +np.float32,0xbf204a40,0xbf0f35bb,3 +np.float32,0x3e155a98,0x3e144ee1,3 +np.float32,0xbf34f929,0xbf1d8838,3 +np.float32,0x801bbf70,0x801bbf70,3 +np.float32,0x7e7c9730,0x3fc90fdb,3 +np.float32,0x7cc23432,0x3fc90fdb,3 +np.float32,0xbf351638,0xbf1d9b97,3 +np.float32,0x80152094,0x80152094,3 +np.float32,0x3f2d731c,0x3f187219,3 +np.float32,0x804ab0b7,0x804ab0b7,3 +np.float32,0x37d6db,0x37d6db,3 +np.float32,0xbf3ccc56,0xbf22acbf,3 +np.float32,0x3e546f8c,0x3e5176e7,3 +np.float32,0xbe90e87e,0xbe8d3707,3 +np.float32,0x48256c,0x48256c,3 +np.float32,0x7e2468d0,0x3fc90fdb,3 +np.float32,0x807af47e,0x807af47e,3 +np.float32,0x3ed4b221,0x3ec996f0,3 +np.float32,0x3d3b1956,0x3d3af811,3 +np.float32,0xbe69d93c,0xbe65e7f0,3 +np.float32,0xff03ff14,0xbfc90fdb,3 +np.float32,0x801e79dc,0x801e79dc,3 +np.float32,0x3f467c53,0x3f28d63d,3 +np.float32,0x3eab6baa,0x3ea56a1c,3 +np.float32,0xbf15519c,0xbf072d1c,3 +np.float32,0x7f0bd8e8,0x3fc90fdb,3 +np.float32,0xbe1e0d1c,0xbe1cd053,3 +np.float32,0x8016edab,0x8016edab,3 +np.float32,0x7ecaa09b,0x3fc90fdb,3 +np.float32,0x3f72e6d9,0x3f4257a8,3 +np.float32,0xbefe787e,0xbeec29a4,3 +np.float32,0xbee989e8,0xbedb1af9,3 +np.float32,0xbe662db0,0xbe626a45,3 +np.float32,0x495bf7,0x495bf7,3 +np.float32,0x26c379,0x26c379,3 +np.float32,0x7f54d41a,0x3fc90fdb,3 +np.float32,0x801e7dd9,0x801e7dd9,3 +np.float32,0x80000000,0x80000000,3 +np.float32,0xfa3d3000,0xbfc90fdb,3 +np.float32,0xfa3cb800,0xbfc90fdb,3 +np.float32,0x264894,0x264894,3 +np.float32,0xff6de011,0xbfc90fdb,3 +np.float32,0x7e9045b2,0x3fc90fdb,3 +np.float32,0x3f2253a8,0x3f10aaf4,3 +np.float32,0xbd462bf0,0xbd460469,3 +np.float32,0x7f1796af,0x3fc90fdb,3 +np.float32,0x3e718858,0x3e6d3279,3 +np.float32,0xff437d7e,0xbfc90fdb,3 +np.float32,0x805ae7cb,0x805ae7cb,3 +np.float32,0x807e32e9,0x807e32e9,3 +np.float32,0x3ee0bafc,0x3ed3c453,3 +np.float32,0xbf721dee,0xbf41edc3,3 +np.float32,0xfec9f792,0xbfc90fdb,3 +np.float32,0x7f050720,0x3fc90fdb,3 +np.float32,0x182261,0x182261,3 +np.float32,0x3e39e678,0x3e37e5be,3 +np.float32,0x7e096e4b,0x3fc90fdb,3 +np.float32,0x103715,0x103715,3 +np.float32,0x3f7e7741,0x3f484ae4,3 +np.float32,0x3e29aea5,0x3e28277c,3 +np.float32,0x58c183,0x58c183,3 +np.float32,0xff72fdb2,0xbfc90fdb,3 +np.float32,0xbd9a9420,0xbd9a493c,3 +np.float32,0x7f1e07e7,0x3fc90fdb,3 +np.float32,0xff79f522,0xbfc90fdb,3 +np.float32,0x7c7d0e96,0x3fc90fdb,3 +np.float32,0xbeba9e8e,0xbeb2f504,3 +np.float32,0xfd880a80,0xbfc90fdb,3 +np.float32,0xff7f2a33,0xbfc90fdb,3 +np.float32,0x3e861ae0,0x3e83289c,3 +np.float32,0x7f0161c1,0x3fc90fdb,3 +np.float32,0xfe844ff8,0xbfc90fdb,3 +np.float32,0xbebf4b98,0xbeb7128e,3 +np.float32,0x652bee,0x652bee,3 +np.float32,0xff188a4b,0xbfc90fdb,3 +np.float32,0xbf800000,0xbf490fdb,3 +np.float32,0x80418711,0x80418711,3 +np.float32,0xbeb712d4,0xbeafd1f6,3 +np.float32,0xbf7cee28,0xbf478491,3 +np.float32,0xfe66c59c,0xbfc90fdb,3 +np.float32,0x4166a2,0x4166a2,3 +np.float32,0x3dfa1a2c,0x3df8deb5,3 +np.float32,0xbdbfbcb8,0xbdbf2e0f,3 +np.float32,0xfe60ef70,0xbfc90fdb,3 +np.float32,0xfe009444,0xbfc90fdb,3 +np.float32,0xfeb27aa0,0xbfc90fdb,3 +np.float32,0xbe99f7bc,0xbe95902b,3 +np.float32,0x8043d28d,0x8043d28d,3 +np.float32,0xfe5328c4,0xbfc90fdb,3 +np.float32,0x8017b27e,0x8017b27e,3 +np.float32,0x3ef1d2cf,0x3ee1ebd7,3 +np.float32,0x805ddd90,0x805ddd90,3 +np.float32,0xbf424263,0xbf262d17,3 +np.float32,0xfc99dde0,0xbfc90fdb,3 +np.float32,0xbf7ec13b,0xbf487015,3 +np.float32,0xbef727ea,0xbee64377,3 +np.float32,0xff15ce95,0xbfc90fdb,3 +np.float32,0x1fbba4,0x1fbba4,3 +np.float32,0x3f3b2368,0x3f2198a9,3 +np.float32,0xfefda26e,0xbfc90fdb,3 +np.float32,0x801519ad,0x801519ad,3 +np.float32,0x80473fa2,0x80473fa2,3 +np.float32,0x7e7a8bc1,0x3fc90fdb,3 +np.float32,0x3e8a9289,0x3e87548a,3 +np.float32,0x3ed68987,0x3ecb2872,3 +np.float32,0x805bca66,0x805bca66,3 +np.float32,0x8079c4e3,0x8079c4e3,3 +np.float32,0x3a2510,0x3a2510,3 +np.float32,0x7eedc598,0x3fc90fdb,3 +np.float32,0x80681956,0x80681956,3 +np.float32,0xff64c778,0xbfc90fdb,3 +np.float32,0x806bbc46,0x806bbc46,3 +np.float32,0x433643,0x433643,3 +np.float32,0x705b92,0x705b92,3 +np.float32,0xff359392,0xbfc90fdb,3 +np.float32,0xbee78672,0xbed96fa7,3 +np.float32,0x3e21717b,0x3e202010,3 +np.float32,0xfea13c34,0xbfc90fdb,3 +np.float32,0x2c8895,0x2c8895,3 +np.float32,0x3ed33290,0x3ec84f7c,3 +np.float32,0x3e63031e,0x3e5f662e,3 +np.float32,0x7e30907b,0x3fc90fdb,3 +np.float32,0xbe293708,0xbe27b310,3 +np.float32,0x3ed93738,0x3ecd6ea3,3 +np.float32,0x9db7e,0x9db7e,3 +np.float32,0x3f7cd1b8,0x3f47762c,3 +np.float32,0x3eb5143c,0x3eae0cb0,3 +np.float32,0xbe69b234,0xbe65c2d7,3 +np.float32,0x3f6e74de,0x3f3ffb97,3 +np.float32,0x5d0559,0x5d0559,3 +np.float32,0x3e1e8c30,0x3e1d4c70,3 +np.float32,0xbf2d1878,0xbf1833ef,3 +np.float32,0xff2adf82,0xbfc90fdb,3 +np.float32,0x8012e2c1,0x8012e2c1,3 +np.float32,0x7f031be3,0x3fc90fdb,3 +np.float32,0x805ff94e,0x805ff94e,3 +np.float32,0x3e9d5b27,0x3e98aa31,3 +np.float32,0x3f56d5cf,0x3f32bc9e,3 +np.float32,0x3eaa0412,0x3ea4267f,3 +np.float32,0xbe899ea4,0xbe86712f,3 +np.float32,0x800f2f48,0x800f2f48,3 +np.float32,0x3f1c2269,0x3f0c33ea,3 +np.float32,0x3f4a5f64,0x3f2b3f28,3 +np.float32,0x80739318,0x80739318,3 +np.float32,0x806e9b47,0x806e9b47,3 +np.float32,0x3c8cd300,0x3c8ccf73,3 +np.float32,0x7f39a39d,0x3fc90fdb,3 +np.float32,0x3ec95d61,0x3ebfd9dc,3 +np.float32,0xff351ff8,0xbfc90fdb,3 +np.float32,0xff3a8f58,0xbfc90fdb,3 +np.float32,0x7f313ec0,0x3fc90fdb,3 +np.float32,0x803aed13,0x803aed13,3 +np.float32,0x7f771d9b,0x3fc90fdb,3 +np.float32,0x8045a6d6,0x8045a6d6,3 +np.float32,0xbc85f280,0xbc85ef72,3 +np.float32,0x7e9c68f5,0x3fc90fdb,3 +np.float32,0xbf0f9379,0xbf02d975,3 +np.float32,0x7e97bcb1,0x3fc90fdb,3 +np.float32,0x804a07d5,0x804a07d5,3 +np.float32,0x802e6117,0x802e6117,3 +np.float32,0x7ed5e388,0x3fc90fdb,3 +np.float32,0x80750455,0x80750455,3 +np.float32,0xff4a8325,0xbfc90fdb,3 +np.float32,0xbedb6866,0xbecf497c,3 +np.float32,0x52ea3b,0x52ea3b,3 +np.float32,0xff773172,0xbfc90fdb,3 +np.float32,0xbeaa8ff0,0xbea4a46e,3 +np.float32,0x7eef2058,0x3fc90fdb,3 +np.float32,0x3f712472,0x3f4169d3,3 +np.float32,0xff6c8608,0xbfc90fdb,3 +np.float32,0xbf6eaa41,0xbf40182a,3 +np.float32,0x3eb03c24,0x3ea9bb34,3 +np.float32,0xfe118cd4,0xbfc90fdb,3 +np.float32,0x3e5b03b0,0x3e57c378,3 +np.float32,0x7f34d92d,0x3fc90fdb,3 +np.float32,0x806c3418,0x806c3418,3 +np.float32,0x7f3074e3,0x3fc90fdb,3 +np.float32,0x8002df02,0x8002df02,3 +np.float32,0x3f6df63a,0x3f3fb7b7,3 +np.float32,0xfd2b4100,0xbfc90fdb,3 +np.float32,0x80363d5c,0x80363d5c,3 +np.float32,0xbeac1f98,0xbea60bd6,3 +np.float32,0xff7fffff,0xbfc90fdb,3 +np.float32,0x80045097,0x80045097,3 +np.float32,0xfe011100,0xbfc90fdb,3 +np.float32,0x80739ef5,0x80739ef5,3 +np.float32,0xff3976ed,0xbfc90fdb,3 +np.float32,0xbe18e3a0,0xbe17c49e,3 +np.float32,0xbe289294,0xbe2712f6,3 +np.float32,0x3f1d41e7,0x3f0d050e,3 +np.float32,0x39364a,0x39364a,3 +np.float32,0x8072b77e,0x8072b77e,3 +np.float32,0x3f7cfec0,0x3f478cf6,3 +np.float32,0x2f68f6,0x2f68f6,3 +np.float32,0xbf031fb8,0xbef25c84,3 +np.float32,0xbf0b842c,0xbeff7afc,3 +np.float32,0x3f081e7e,0x3efa3676,3 +np.float32,0x7f7fffff,0x3fc90fdb,3 +np.float32,0xff15da0e,0xbfc90fdb,3 +np.float32,0x3d2001b2,0x3d1fece1,3 +np.float32,0x7f76efef,0x3fc90fdb,3 +np.float32,0x3f2405dd,0x3f11dfb7,3 +np.float32,0xa0319,0xa0319,3 +np.float32,0x3e23d2bd,0x3e227255,3 +np.float32,0xbd4d4c50,0xbd4d205e,3 +np.float32,0x382344,0x382344,3 +np.float32,0x21bbf,0x21bbf,3 +np.float32,0xbf209e82,0xbf0f7239,3 +np.float32,0xff03bf9f,0xbfc90fdb,3 +np.float32,0x7b1789,0x7b1789,3 +np.float32,0xff314944,0xbfc90fdb,3 +np.float32,0x1a63eb,0x1a63eb,3 +np.float32,0x803dc983,0x803dc983,3 +np.float32,0x3f0ff558,0x3f0323dc,3 +np.float32,0x3f544f2c,0x3f313f58,3 +np.float32,0xff032948,0xbfc90fdb,3 +np.float32,0x7f4933cc,0x3fc90fdb,3 +np.float32,0x7f14c5ed,0x3fc90fdb,3 +np.float32,0x803aeebf,0x803aeebf,3 +np.float32,0xbf0d4c0f,0xbf011bf5,3 +np.float32,0xbeaf8de2,0xbea91f57,3 +np.float32,0xff3ae030,0xbfc90fdb,3 +np.float32,0xbb362d00,0xbb362ce1,3 +np.float32,0x3d1f79e0,0x3d1f6544,3 +np.float32,0x3f56e9d9,0x3f32c860,3 +np.float32,0x3f723e5e,0x3f41fee2,3 +np.float32,0x4c0179,0x4c0179,3 +np.float32,0xfee36132,0xbfc90fdb,3 +np.float32,0x619ae6,0x619ae6,3 +np.float32,0xfde5d670,0xbfc90fdb,3 +np.float32,0xff079ac5,0xbfc90fdb,3 +np.float32,0x3e974fbd,0x3e931fae,3 +np.float32,0x8020ae6b,0x8020ae6b,3 +np.float32,0x6b5af1,0x6b5af1,3 +np.float32,0xbeb57cd6,0xbeae69a3,3 +np.float32,0x806e7eb2,0x806e7eb2,3 +np.float32,0x7e666edb,0x3fc90fdb,3 +np.float32,0xbf458c18,0xbf283ff0,3 +np.float32,0x3e50518e,0x3e4d8399,3 +np.float32,0x3e9ce224,0x3e983b98,3 +np.float32,0x3e6bc067,0x3e67b6c6,3 +np.float32,0x13783d,0x13783d,3 +np.float32,0xff3d518c,0xbfc90fdb,3 +np.float32,0xfeba5968,0xbfc90fdb,3 +np.float32,0xbf0b9f76,0xbeffa50f,3 +np.float32,0xfe174900,0xbfc90fdb,3 +np.float32,0x3f38bb0a,0x3f200527,3 +np.float32,0x7e94a77d,0x3fc90fdb,3 +np.float32,0x29d776,0x29d776,3 +np.float32,0xbf4e058d,0xbf2d7a15,3 +np.float32,0xbd94abc8,0xbd946923,3 +np.float32,0xbee62db0,0xbed85124,3 +np.float32,0x800000,0x800000,3 +np.float32,0xbef1df7e,0xbee1f636,3 +np.float32,0xbcf3cd20,0xbcf3bab5,3 +np.float32,0x80007b05,0x80007b05,3 +np.float32,0x3d9b3f2e,0x3d9af351,3 +np.float32,0xbf714a68,0xbf417dee,3 +np.float32,0xbf2a2d37,0xbf163069,3 +np.float32,0x8055104f,0x8055104f,3 +np.float32,0x7f5c40d7,0x3fc90fdb,3 +np.float32,0x1,0x1,3 +np.float32,0xff35f3a6,0xbfc90fdb,3 +np.float32,0xd9c7c,0xd9c7c,3 +np.float32,0xbf440cfc,0xbf274f22,3 +np.float32,0x8050ac43,0x8050ac43,3 +np.float32,0x63ee16,0x63ee16,3 +np.float32,0x7d90419b,0x3fc90fdb,3 +np.float32,0xfee22198,0xbfc90fdb,3 +np.float32,0xc2ead,0xc2ead,3 +np.float32,0x7f5cd6a6,0x3fc90fdb,3 +np.float32,0x3f6fab7e,0x3f40a184,3 +np.float32,0x3ecf998c,0x3ec53a73,3 +np.float32,0x7e5271f0,0x3fc90fdb,3 +np.float32,0x67c016,0x67c016,3 +np.float32,0x2189c8,0x2189c8,3 +np.float32,0x27d892,0x27d892,3 +np.float32,0x3f0d02c4,0x3f00e3c0,3 +np.float32,0xbf69ebca,0xbf3d8862,3 +np.float32,0x3e60c0d6,0x3e5d3ebb,3 +np.float32,0x3f45206c,0x3f27fc66,3 +np.float32,0xbf6b47dc,0xbf3e4592,3 +np.float32,0xfe9be2e2,0xbfc90fdb,3 +np.float32,0x7fa00000,0x7fe00000,3 +np.float32,0xff271562,0xbfc90fdb,3 +np.float32,0x3e2e5270,0x3e2caaaf,3 +np.float32,0x80222934,0x80222934,3 +np.float32,0xbd01d220,0xbd01c701,3 +np.float32,0x223aa0,0x223aa0,3 +np.float32,0x3f4b5a7e,0x3f2bd967,3 +np.float32,0x3f217d85,0x3f101200,3 +np.float32,0xbf57663a,0xbf331144,3 +np.float32,0x3f219862,0x3f102536,3 +np.float32,0x28a28c,0x28a28c,3 +np.float32,0xbf3f55f4,0xbf244f86,3 +np.float32,0xbf3de287,0xbf236092,3 +np.float32,0xbf1c1ce2,0xbf0c2fe3,3 +np.float32,0x80000001,0x80000001,3 +np.float32,0x3db695d0,0x3db61a90,3 +np.float32,0x6c39bf,0x6c39bf,3 +np.float32,0x7e33a12f,0x3fc90fdb,3 +np.float32,0x67623a,0x67623a,3 +np.float32,0x3e45dc54,0x3e4373b6,3 +np.float32,0x7f62fa68,0x3fc90fdb,3 +np.float32,0x3f0e1d01,0x3f01bbe5,3 +np.float32,0x3f13dc69,0x3f0615f5,3 +np.float32,0x246703,0x246703,3 +np.float32,0xbf1055b5,0xbf036d07,3 +np.float32,0x7f46d3d0,0x3fc90fdb,3 +np.float32,0x3d2b8086,0x3d2b66e5,3 +np.float32,0xbf03be44,0xbef35776,3 +np.float32,0x3f800000,0x3f490fdb,3 +np.float32,0xbec8d226,0xbebf613d,3 +np.float32,0x3d8faf00,0x3d8f72d4,3 +np.float32,0x170c4e,0x170c4e,3 +np.float32,0xff14c0f0,0xbfc90fdb,3 +np.float32,0xff16245d,0xbfc90fdb,3 +np.float32,0x7f44ce6d,0x3fc90fdb,3 +np.float32,0xbe8175d8,0xbe7d9aeb,3 +np.float32,0x3df7a4a1,0x3df67254,3 +np.float32,0xfe2cc46c,0xbfc90fdb,3 +np.float32,0x3f284e63,0x3f14e335,3 +np.float32,0x7e46e5d6,0x3fc90fdb,3 +np.float32,0x397be4,0x397be4,3 +np.float32,0xbf2560bc,0xbf12d50b,3 +np.float32,0x3ed9b8c1,0x3ecddc60,3 +np.float32,0xfec18c5a,0xbfc90fdb,3 +np.float32,0x64894d,0x64894d,3 +np.float32,0x36a65d,0x36a65d,3 +np.float32,0x804ffcd7,0x804ffcd7,3 +np.float32,0x800f79e4,0x800f79e4,3 +np.float32,0x5d45ac,0x5d45ac,3 +np.float32,0x6cdda0,0x6cdda0,3 +np.float32,0xbf7f2077,0xbf489fe5,3 +np.float32,0xbf152f78,0xbf0713a1,3 +np.float32,0x807bf344,0x807bf344,3 +np.float32,0x3f775023,0x3f44a4d8,3 +np.float32,0xbf3edf67,0xbf240365,3 +np.float32,0x7eed729c,0x3fc90fdb,3 +np.float32,0x14cc29,0x14cc29,3 +np.float32,0x7edd7b6b,0x3fc90fdb,3 +np.float32,0xbf3c6e2c,0xbf226fb7,3 +np.float32,0x51b9ad,0x51b9ad,3 +np.float32,0x3f617ee8,0x3f38dd7c,3 +np.float32,0xff800000,0xbfc90fdb,3 +np.float32,0x7f440ea0,0x3fc90fdb,3 +np.float32,0x3e639893,0x3e5ff49e,3 +np.float32,0xbd791bb0,0xbd78cd3c,3 +np.float32,0x8059fcbc,0x8059fcbc,3 +np.float32,0xbf7d1214,0xbf4796bd,3 +np.float32,0x3ef368fa,0x3ee33788,3 +np.float32,0xbecec0f4,0xbec48055,3 +np.float32,0xbc83d940,0xbc83d656,3 +np.float32,0xbce01220,0xbce003d4,3 +np.float32,0x803192a5,0x803192a5,3 +np.float32,0xbe40e0c0,0xbe3ea4f0,3 +np.float32,0xfb692600,0xbfc90fdb,3 +np.float32,0x3f1bec65,0x3f0c0c88,3 +np.float32,0x7f042798,0x3fc90fdb,3 +np.float32,0xbe047374,0xbe03b83b,3 +np.float32,0x7f7c6630,0x3fc90fdb,3 +np.float32,0x7f58dae3,0x3fc90fdb,3 +np.float32,0x80691c92,0x80691c92,3 +np.float32,0x7dbe76,0x7dbe76,3 +np.float32,0xbf231384,0xbf11339d,3 +np.float32,0xbef4acf8,0xbee43f8b,3 +np.float32,0x3ee9f9d0,0x3edb7793,3 +np.float32,0x3f0064f6,0x3eee04a8,3 +np.float32,0x313732,0x313732,3 +np.float32,0xfd58cf80,0xbfc90fdb,3 +np.float32,0x3f7a2bc9,0x3f461d30,3 +np.float32,0x7f7681af,0x3fc90fdb,3 +np.float32,0x7f504211,0x3fc90fdb,3 +np.float32,0xfeae0c00,0xbfc90fdb,3 +np.float32,0xbee14396,0xbed436d1,3 +np.float32,0x7fc00000,0x7fc00000,3 +np.float32,0x693406,0x693406,3 +np.float32,0x3eb4a679,0x3eadab1b,3 +np.float32,0x550505,0x550505,3 +np.float32,0xfd493d10,0xbfc90fdb,3 +np.float32,0x3f4fc907,0x3f2e8b2c,3 +np.float32,0x80799aa4,0x80799aa4,3 +np.float32,0xff1ea89b,0xbfc90fdb,3 +np.float32,0xff424510,0xbfc90fdb,3 +np.float32,0x7f68d026,0x3fc90fdb,3 +np.float32,0xbea230ca,0xbe9d1200,3 +np.float32,0x7ea585da,0x3fc90fdb,3 +np.float32,0x3f3db211,0x3f23414c,3 +np.float32,0xfea4d964,0xbfc90fdb,3 +np.float32,0xbf17fe18,0xbf092984,3 +np.float32,0x7cc8a2,0x7cc8a2,3 +np.float32,0xff0330ba,0xbfc90fdb,3 +np.float32,0x3f769835,0x3f444592,3 +np.float32,0xeb0ac,0xeb0ac,3 +np.float32,0x7f7e45de,0x3fc90fdb,3 +np.float32,0xbdb510a8,0xbdb49873,3 +np.float32,0x3ebf900b,0x3eb74e9c,3 +np.float32,0xbf21bbce,0xbf103e89,3 +np.float32,0xbf3f4682,0xbf24459d,3 +np.float32,0x7eb6e9c8,0x3fc90fdb,3 +np.float32,0xbf42532d,0xbf2637be,3 +np.float32,0xbd3b2600,0xbd3b04b4,3 +np.float32,0x3f1fa9aa,0x3f0ec23e,3 +np.float32,0x7ed6a0f1,0x3fc90fdb,3 +np.float32,0xff4759a1,0xbfc90fdb,3 +np.float32,0x6d26e3,0x6d26e3,3 +np.float32,0xfe1108e0,0xbfc90fdb,3 +np.float32,0xfdf76900,0xbfc90fdb,3 +np.float32,0xfec66f22,0xbfc90fdb,3 +np.float32,0xbf3d097f,0xbf22d458,3 +np.float32,0x3d85be25,0x3d858d99,3 +np.float32,0x7f36739f,0x3fc90fdb,3 +np.float32,0x7bc0a304,0x3fc90fdb,3 +np.float32,0xff48dd90,0xbfc90fdb,3 +np.float32,0x48cab0,0x48cab0,3 +np.float32,0x3ed3943c,0x3ec8a2ef,3 +np.float32,0xbf61488e,0xbf38bede,3 +np.float32,0x3f543df5,0x3f313525,3 +np.float32,0x5cf2ca,0x5cf2ca,3 +np.float32,0x572686,0x572686,3 +np.float32,0x80369c7c,0x80369c7c,3 +np.float32,0xbd2c1d20,0xbd2c0338,3 +np.float32,0x3e255428,0x3e23ea0b,3 +np.float32,0xbeba9ee0,0xbeb2f54c,3 +np.float32,0x8015c165,0x8015c165,3 +np.float32,0x3d31f488,0x3d31d7e6,3 +np.float32,0x3f68591c,0x3f3cac43,3 +np.float32,0xf5ed5,0xf5ed5,3 +np.float32,0xbf3b1d34,0xbf21949e,3 +np.float32,0x1f0343,0x1f0343,3 +np.float32,0x3f0e52b5,0x3f01e4ef,3 +np.float32,0x7f57c596,0x3fc90fdb,3 +np.float64,0x7fd8e333ddb1c667,0x3ff921fb54442d18,1 +np.float64,0x800bcc9cdad7993a,0x800bcc9cdad7993a,1 +np.float64,0x3fcd6f81df3adf00,0x3fcceebbafc5d55e,1 +np.float64,0x3fed7338a57ae671,0x3fe7ce3e5811fc0a,1 +np.float64,0x7fe64994fcac9329,0x3ff921fb54442d18,1 +np.float64,0xfa5a6345f4b4d,0xfa5a6345f4b4d,1 +np.float64,0xe9dcd865d3b9b,0xe9dcd865d3b9b,1 +np.float64,0x7fea6cffabf4d9fe,0x3ff921fb54442d18,1 +np.float64,0xa9e1de6153c3c,0xa9e1de6153c3c,1 +np.float64,0xab6bdc5356d7c,0xab6bdc5356d7c,1 +np.float64,0x80062864a02c50ca,0x80062864a02c50ca,1 +np.float64,0xbfdac03aa7b58076,0xbfd9569f3230128d,1 +np.float64,0xbfe61b77752c36ef,0xbfe3588f51b8be8f,1 +np.float64,0x800bc854c8d790aa,0x800bc854c8d790aa,1 +np.float64,0x3feed1a2da3da346,0x3fe887f9b8ea031f,1 +np.float64,0x3fe910d3697221a7,0x3fe54365a53d840e,1 +np.float64,0x7fe7ab4944ef5692,0x3ff921fb54442d18,1 +np.float64,0x3fa462f1a028c5e3,0x3fa460303a6a4e69,1 +np.float64,0x800794f1a3af29e4,0x800794f1a3af29e4,1 +np.float64,0x3fee6fe7fafcdfd0,0x3fe854f863816d55,1 +np.float64,0x8000000000000000,0x8000000000000000,1 +np.float64,0x7f336472fe66d,0x7f336472fe66d,1 +np.float64,0xffb1623ac822c478,0xbff921fb54442d18,1 +np.float64,0x3fbacd68ce359ad2,0x3fbab480b3638846,1 +np.float64,0xffd5c02706ab804e,0xbff921fb54442d18,1 +np.float64,0xbfd4daf03d29b5e0,0xbfd42928f069c062,1 +np.float64,0x800c6e85dbd8dd0c,0x800c6e85dbd8dd0c,1 +np.float64,0x800e3599c5bc6b34,0x800e3599c5bc6b34,1 +np.float64,0x2c0d654c581ad,0x2c0d654c581ad,1 +np.float64,0xbfdd3eb13fba7d62,0xbfdb6e8143302de7,1 +np.float64,0x800b60cb8776c197,0x800b60cb8776c197,1 +np.float64,0x80089819ad113034,0x80089819ad113034,1 +np.float64,0x29fe721453fcf,0x29fe721453fcf,1 +np.float64,0x3fe8722f4df0e45f,0x3fe4e026d9eadb4d,1 +np.float64,0xffd1fbcd01a3f79a,0xbff921fb54442d18,1 +np.float64,0x7fc74e1e982e9c3c,0x3ff921fb54442d18,1 +np.float64,0x800c09d3d15813a8,0x800c09d3d15813a8,1 +np.float64,0xbfeee4578b3dc8af,0xbfe891ab3d6c3ce4,1 +np.float64,0xffdd01a6f33a034e,0xbff921fb54442d18,1 +np.float64,0x7fcc130480382608,0x3ff921fb54442d18,1 +np.float64,0xffcbb6bd1d376d7c,0xbff921fb54442d18,1 +np.float64,0xc068a53780d15,0xc068a53780d15,1 +np.float64,0xbfc974f15532e9e4,0xbfc92100b355f3e7,1 +np.float64,0x3fe6da79442db4f3,0x3fe3d87393b082e7,1 +np.float64,0xd9d9be4db3b38,0xd9d9be4db3b38,1 +np.float64,0x5ea50a20bd4a2,0x5ea50a20bd4a2,1 +np.float64,0xbfe5597f7d2ab2ff,0xbfe2d3ccc544b52b,1 +np.float64,0x80019364e4e326cb,0x80019364e4e326cb,1 +np.float64,0x3fed2902c3fa5206,0x3fe7a5e1df07e5c1,1 +np.float64,0xbfa7b72b5c2f6e50,0xbfa7b2d545b3cc1f,1 +np.float64,0xffdb60dd43b6c1ba,0xbff921fb54442d18,1 +np.float64,0x81a65d8b034cc,0x81a65d8b034cc,1 +np.float64,0x8000c30385818608,0x8000c30385818608,1 +np.float64,0x6022f5f4c045f,0x6022f5f4c045f,1 +np.float64,0x8007a2bb810f4578,0x8007a2bb810f4578,1 +np.float64,0x7fdc68893238d111,0x3ff921fb54442d18,1 +np.float64,0x7fd443454ea8868a,0x3ff921fb54442d18,1 +np.float64,0xffe6b04209ed6084,0xbff921fb54442d18,1 +np.float64,0x7fcd9733d13b2e67,0x3ff921fb54442d18,1 +np.float64,0xf5ee80a9ebdd0,0xf5ee80a9ebdd0,1 +np.float64,0x3fe3788e8de6f11e,0x3fe17dec7e6843a0,1 +np.float64,0x3fee36f62f7c6dec,0x3fe836f832515b43,1 +np.float64,0xf6cb49aded969,0xf6cb49aded969,1 +np.float64,0x3fd2b15ea4a562bc,0x3fd22fdc09920e67,1 +np.float64,0x7fccf6aef139ed5d,0x3ff921fb54442d18,1 +np.float64,0x3fd396b8ce272d72,0x3fd3026118857bd4,1 +np.float64,0x7fe53d3c80ea7a78,0x3ff921fb54442d18,1 +np.float64,0x3feae88fc4f5d120,0x3fe65fb04b18ef7a,1 +np.float64,0x3fedc643747b8c86,0x3fe7fafa6c20e25a,1 +np.float64,0xffdb2dc0df365b82,0xbff921fb54442d18,1 +np.float64,0xbfa2af3658255e70,0xbfa2ad17348f4253,1 +np.float64,0x3f8aa77b30354ef6,0x3f8aa71892336a69,1 +np.float64,0xbfdd1b1efbba363e,0xbfdb510dcd186820,1 +np.float64,0x800f50d99c5ea1b3,0x800f50d99c5ea1b3,1 +np.float64,0xff6ed602403dac00,0xbff921fb54442d18,1 +np.float64,0x800477d71aa8efaf,0x800477d71aa8efaf,1 +np.float64,0xbfe729a9e86e5354,0xbfe40ca78d9eefcf,1 +np.float64,0x3fd81ab2d4303566,0x3fd70d7e3937ea22,1 +np.float64,0xb617cbab6c2fa,0xb617cbab6c2fa,1 +np.float64,0x7fefffffffffffff,0x3ff921fb54442d18,1 +np.float64,0xffa40933ac281260,0xbff921fb54442d18,1 +np.float64,0xbfe1ede621e3dbcc,0xbfe057bb2b341ced,1 +np.float64,0xbfec700f03b8e01e,0xbfe73fb190bc722e,1 +np.float64,0x6e28af02dc517,0x6e28af02dc517,1 +np.float64,0x3fe37ad37ae6f5a7,0x3fe17f94674818a9,1 +np.float64,0x8000cbdeeae197bf,0x8000cbdeeae197bf,1 +np.float64,0x3fe8fd1f01f1fa3e,0x3fe5372bbec5d72c,1 +np.float64,0x3f8f9229103f2452,0x3f8f918531894256,1 +np.float64,0x800536858e0a6d0c,0x800536858e0a6d0c,1 +np.float64,0x7fe82bb4f9f05769,0x3ff921fb54442d18,1 +np.float64,0xffc1c2fb592385f8,0xbff921fb54442d18,1 +np.float64,0x7f924ddfc0249bbf,0x3ff921fb54442d18,1 +np.float64,0xffd5e125c52bc24c,0xbff921fb54442d18,1 +np.float64,0xbfef0d8738be1b0e,0xbfe8a6ef17b16c10,1 +np.float64,0x3fc9c8875233910f,0x3fc9715e708503cb,1 +np.float64,0xbfe2d926f4e5b24e,0xbfe108956e61cbb3,1 +np.float64,0x7fd61c496dac3892,0x3ff921fb54442d18,1 +np.float64,0x7fed545c6b7aa8b8,0x3ff921fb54442d18,1 +np.float64,0x8003746fea86e8e1,0x8003746fea86e8e1,1 +np.float64,0x3fdf515e75bea2bd,0x3fdd201a5585caa3,1 +np.float64,0xffda87c8ee350f92,0xbff921fb54442d18,1 +np.float64,0xffc675d8e22cebb0,0xbff921fb54442d18,1 +np.float64,0xffcdc173433b82e8,0xbff921fb54442d18,1 +np.float64,0xffed9df1517b3be2,0xbff921fb54442d18,1 +np.float64,0x3fd6a2eec72d45de,0x3fd5c1f1d7dcddcf,1 +np.float64,0xffec116a66f822d4,0xbff921fb54442d18,1 +np.float64,0x8007c2a2458f8545,0x8007c2a2458f8545,1 +np.float64,0x3fe4ee80d969dd02,0x3fe2895076094668,1 +np.float64,0x3fe3cae7116795ce,0x3fe1b9c07e0d03a7,1 +np.float64,0xbfd81bf8d8b037f2,0xbfd70e9bbbb4ca57,1 +np.float64,0x800c88ccd1f9119a,0x800c88ccd1f9119a,1 +np.float64,0xffdab2aee2b5655e,0xbff921fb54442d18,1 +np.float64,0x3fe743d227ee87a4,0x3fe41dcaef186d96,1 +np.float64,0x3fb060fd0220c1fa,0x3fb05b47f56ebbb4,1 +np.float64,0xbfd3f03772a7e06e,0xbfd3541522377291,1 +np.float64,0x190a5ae03216,0x190a5ae03216,1 +np.float64,0x3fe48c71916918e4,0x3fe24442f45b3183,1 +np.float64,0x800862470590c48e,0x800862470590c48e,1 +np.float64,0x7fd3ced89d279db0,0x3ff921fb54442d18,1 +np.float64,0x3feb3d9b4ab67b37,0x3fe69140cf2623f7,1 +np.float64,0xbc3f296b787e5,0xbc3f296b787e5,1 +np.float64,0xbfed6b905dfad721,0xbfe7ca1881a8c0fd,1 +np.float64,0xbfe621c2aaac4386,0xbfe35cd1969a82db,1 +np.float64,0x8009e7b17593cf63,0x8009e7b17593cf63,1 +np.float64,0x80045f580ca8beb1,0x80045f580ca8beb1,1 +np.float64,0xbfea2e177e745c2f,0xbfe5f13971633339,1 +np.float64,0x3fee655787fccab0,0x3fe84f6b98b6de26,1 +np.float64,0x3fc9cde92f339bd0,0x3fc9768a88b2c97c,1 +np.float64,0x3fc819c3b3303388,0x3fc7d25e1526e731,1 +np.float64,0x3fd3e848d2a7d090,0x3fd34cd9e6af558f,1 +np.float64,0x3fe19dacac633b5a,0x3fe01a6b4d27adc2,1 +np.float64,0x800b190da316321c,0x800b190da316321c,1 +np.float64,0xd5c69711ab8d3,0xd5c69711ab8d3,1 +np.float64,0xbfdc31bed7b8637e,0xbfda8ea3c1309d6d,1 +np.float64,0xbfd02ba007a05740,0xbfcfad86f0d756dc,1 +np.float64,0x3fe874473d70e88e,0x3fe4e1793cd82123,1 +np.float64,0xffb465585c28cab0,0xbff921fb54442d18,1 +np.float64,0xbfb5d8e13e2bb1c0,0xbfb5cb5c7807fc4d,1 +np.float64,0xffe80f933bf01f26,0xbff921fb54442d18,1 +np.float64,0x7feea783f5fd4f07,0x3ff921fb54442d18,1 +np.float64,0xbfae6665f43cccd0,0xbfae5d45b0a6f90a,1 +np.float64,0x800bd6ef5a77addf,0x800bd6ef5a77addf,1 +np.float64,0x800d145babda28b8,0x800d145babda28b8,1 +np.float64,0x39de155473bc3,0x39de155473bc3,1 +np.float64,0x3fefbd6bb1ff7ad8,0x3fe9008e73a3296e,1 +np.float64,0x3fc40bca3d281798,0x3fc3e2710e167007,1 +np.float64,0x3fcae0918335c120,0x3fca7e09e704a678,1 +np.float64,0x51287fbea2511,0x51287fbea2511,1 +np.float64,0x7fa6bc33a82d7866,0x3ff921fb54442d18,1 +np.float64,0xe72a2bebce546,0xe72a2bebce546,1 +np.float64,0x3fe1c8fd686391fa,0x3fe03b9622aeb4e3,1 +np.float64,0x3fe2a73ac3654e76,0x3fe0e36bc1ee4ac4,1 +np.float64,0x59895218b312b,0x59895218b312b,1 +np.float64,0xc6dc25c78db85,0xc6dc25c78db85,1 +np.float64,0xbfc06cfac520d9f4,0xbfc0561f85d2c907,1 +np.float64,0xbfea912dc4f5225c,0xbfe62c3b1c01c793,1 +np.float64,0x3fb78ce89a2f19d0,0x3fb77bfcb65a67d3,1 +np.float64,0xbfece5cdea39cb9c,0xbfe78103d24099e5,1 +np.float64,0x30d3054e61a61,0x30d3054e61a61,1 +np.float64,0xbfd3fe26fba7fc4e,0xbfd360c8447c4f7a,1 +np.float64,0x800956072a92ac0f,0x800956072a92ac0f,1 +np.float64,0x7fe639b3b6ec7366,0x3ff921fb54442d18,1 +np.float64,0x800ee30240bdc605,0x800ee30240bdc605,1 +np.float64,0x7fef6af0d2bed5e1,0x3ff921fb54442d18,1 +np.float64,0xffefce8725ff9d0d,0xbff921fb54442d18,1 +np.float64,0x3fe2e311da65c624,0x3fe10ff1623089dc,1 +np.float64,0xbfe7e5cbe56fcb98,0xbfe486c3daeda67c,1 +np.float64,0x80095bc14472b783,0x80095bc14472b783,1 +np.float64,0xffef0cb4553e1968,0xbff921fb54442d18,1 +np.float64,0xe3e60567c7cc1,0xe3e60567c7cc1,1 +np.float64,0xffde919f06bd233e,0xbff921fb54442d18,1 +np.float64,0x3fe3f9632e27f2c6,0x3fe1db49ebd21c4e,1 +np.float64,0x9dee9a233bdd4,0x9dee9a233bdd4,1 +np.float64,0xbfe3bb0602e7760c,0xbfe1ae41b6d4c488,1 +np.float64,0x3fc46945a128d288,0x3fc43da54c6c6a2a,1 +np.float64,0x7fdef149ac3de292,0x3ff921fb54442d18,1 +np.float64,0x800a96c76d752d8f,0x800a96c76d752d8f,1 +np.float64,0x3f971a32382e3464,0x3f9719316b9e9baf,1 +np.float64,0x7fe97bcf15b2f79d,0x3ff921fb54442d18,1 +np.float64,0x7fea894558f5128a,0x3ff921fb54442d18,1 +np.float64,0x3fc9e3be1933c780,0x3fc98b847c3923eb,1 +np.float64,0x3f7accac40359959,0x3f7acc9330741b64,1 +np.float64,0xa80c136950183,0xa80c136950183,1 +np.float64,0x3fe408732b2810e6,0x3fe1e61e7cbc8824,1 +np.float64,0xffa775bc042eeb80,0xbff921fb54442d18,1 +np.float64,0x3fbf04bd223e0980,0x3fbede37b8fc697e,1 +np.float64,0x7fd999b34c333366,0x3ff921fb54442d18,1 +np.float64,0xe72146dfce429,0xe72146dfce429,1 +np.float64,0x4f511ee49ea24,0x4f511ee49ea24,1 +np.float64,0xffb3e6e58827cdc8,0xbff921fb54442d18,1 +np.float64,0x3fd1f180cfa3e300,0x3fd17e85b2871de2,1 +np.float64,0x97c8e45b2f91d,0x97c8e45b2f91d,1 +np.float64,0xbfeeb20e88fd641d,0xbfe8778f878440bf,1 +np.float64,0xbfe1fc6dee23f8dc,0xbfe062c815a93cde,1 +np.float64,0xab4bf71f5697f,0xab4bf71f5697f,1 +np.float64,0xa9675a2952cec,0xa9675a2952cec,1 +np.float64,0xbfef3ea4a33e7d49,0xbfe8c02743ebc1b6,1 +np.float64,0x3fe22a2eafa4545d,0x3fe08577afca52a9,1 +np.float64,0x3fe8a08daaf1411c,0x3fe4fd5a34f05305,1 +np.float64,0xbfc6cda77b2d9b50,0xbfc6910bcfa0cf4f,1 +np.float64,0x3fec398394387307,0x3fe7211dd5276500,1 +np.float64,0x3fe36c95c626d92c,0x3fe1752e5aa2357b,1 +np.float64,0xffd8b9e7073173ce,0xbff921fb54442d18,1 +np.float64,0xffe19f043ae33e08,0xbff921fb54442d18,1 +np.float64,0x800e3640709c6c81,0x800e3640709c6c81,1 +np.float64,0x3fe7d6c20aafad84,0x3fe47d1a3307d9c8,1 +np.float64,0x80093fd63b727fad,0x80093fd63b727fad,1 +np.float64,0xffe1a671a4634ce3,0xbff921fb54442d18,1 +np.float64,0xbfe53a6b386a74d6,0xbfe2be41859cb10d,1 +np.float64,0xbfed149a097a2934,0xbfe79ab7e3e93c1c,1 +np.float64,0x7fc2769a5724ed34,0x3ff921fb54442d18,1 +np.float64,0xffd01e4e99a03c9e,0xbff921fb54442d18,1 +np.float64,0xa61f38434c3e7,0xa61f38434c3e7,1 +np.float64,0x800ad4ac5195a959,0x800ad4ac5195a959,1 +np.float64,0x7ff8000000000000,0x7ff8000000000000,1 +np.float64,0x80034a45b6c6948c,0x80034a45b6c6948c,1 +np.float64,0x6350b218c6a17,0x6350b218c6a17,1 +np.float64,0xfff0000000000000,0xbff921fb54442d18,1 +np.float64,0x3fe363e759e6c7cf,0x3fe16ed58d80f9ce,1 +np.float64,0xffe3b98e59e7731c,0xbff921fb54442d18,1 +np.float64,0x3fdbf7b40337ef68,0x3fda5df7ad3c80f9,1 +np.float64,0xbfe9cdf784739bef,0xbfe5b74f346ef93d,1 +np.float64,0xbfc321bea326437c,0xbfc2fdc0d4ff7561,1 +np.float64,0xbfe40f77d2a81ef0,0xbfe1eb28c4ae4dde,1 +np.float64,0x7fe071806960e300,0x3ff921fb54442d18,1 +np.float64,0x7fd269006ea4d200,0x3ff921fb54442d18,1 +np.float64,0x80017a56e0e2f4af,0x80017a56e0e2f4af,1 +np.float64,0x8004b4ea09a969d5,0x8004b4ea09a969d5,1 +np.float64,0xbfedbb01e63b7604,0xbfe7f4f0e84297df,1 +np.float64,0x3fe44454826888a9,0x3fe210ff6d005706,1 +np.float64,0xbfe0e77e6ea1cefd,0xbfdf1a977da33402,1 +np.float64,0xbfed6d4c8c3ada99,0xbfe7cb0932093f60,1 +np.float64,0x1d74cb9e3ae9a,0x1d74cb9e3ae9a,1 +np.float64,0x80082a785d1054f1,0x80082a785d1054f1,1 +np.float64,0x3fe58393266b0726,0x3fe2f0d8e91d4887,1 +np.float64,0xffe4028899680510,0xbff921fb54442d18,1 +np.float64,0x783a2e5af0746,0x783a2e5af0746,1 +np.float64,0x7fcdce88e73b9d11,0x3ff921fb54442d18,1 +np.float64,0x3fc58672a72b0ce5,0x3fc5535e090e56e2,1 +np.float64,0x800889c839b11391,0x800889c839b11391,1 +np.float64,0xffe5e05c466bc0b8,0xbff921fb54442d18,1 +np.float64,0xbfcbef6ebe37dedc,0xbfcb810752468f49,1 +np.float64,0xffe9408563b2810a,0xbff921fb54442d18,1 +np.float64,0xbfee4738367c8e70,0xbfe83f8e5dd7602f,1 +np.float64,0xbfe4aeb587295d6b,0xbfe25c7a0c76a454,1 +np.float64,0xffc9aea0a7335d40,0xbff921fb54442d18,1 +np.float64,0xe1e02199c3c04,0xe1e02199c3c04,1 +np.float64,0xbfbd9400783b2800,0xbfbd729345d1d14f,1 +np.float64,0x7a5418bcf4a84,0x7a5418bcf4a84,1 +np.float64,0x3fdc1c2fa5b83860,0x3fda7c935965ae72,1 +np.float64,0x80076a9f58ced53f,0x80076a9f58ced53f,1 +np.float64,0x3fedc4bf957b897f,0x3fe7fa2a83148f1c,1 +np.float64,0x800981b8a9d30372,0x800981b8a9d30372,1 +np.float64,0xffe1082311621046,0xbff921fb54442d18,1 +np.float64,0xe0091f89c0124,0xe0091f89c0124,1 +np.float64,0xbfce8d674f3d1ad0,0xbfcdfdbf2ddaa0ca,1 +np.float64,0x800516e72eaa2dcf,0x800516e72eaa2dcf,1 +np.float64,0xffe61ee64c6c3dcc,0xbff921fb54442d18,1 +np.float64,0x7fed2683cafa4d07,0x3ff921fb54442d18,1 +np.float64,0xffd4faf27729f5e4,0xbff921fb54442d18,1 +np.float64,0x7fe308fa842611f4,0x3ff921fb54442d18,1 +np.float64,0x3fc612a62b2c2550,0x3fc5db9ddbd4e159,1 +np.float64,0xbfe5b01e766b603d,0xbfe30f72a875e988,1 +np.float64,0x3fc2dd8b9a25bb17,0x3fc2bb06246b9f78,1 +np.float64,0x8170908102e12,0x8170908102e12,1 +np.float64,0x800c1c8a8a583915,0x800c1c8a8a583915,1 +np.float64,0xffe5d91e8b6bb23c,0xbff921fb54442d18,1 +np.float64,0xffd140adee22815c,0xbff921fb54442d18,1 +np.float64,0xbfe2f1f5f8e5e3ec,0xbfe11afa5d749952,1 +np.float64,0xbfed6d1d587ada3b,0xbfe7caef9ecf7651,1 +np.float64,0x3fe9b85e67f370bd,0x3fe5aa3474768982,1 +np.float64,0x7fdc8932edb91265,0x3ff921fb54442d18,1 +np.float64,0x7fd136bc54a26d78,0x3ff921fb54442d18,1 +np.float64,0x800a1ea12a343d43,0x800a1ea12a343d43,1 +np.float64,0x3fec6a5c1b78d4b8,0x3fe73c82235c3f8f,1 +np.float64,0x800fbf6a00df7ed4,0x800fbf6a00df7ed4,1 +np.float64,0xbfd0e6e0cda1cdc2,0xbfd0864bf8cad294,1 +np.float64,0x3fc716df482e2dbf,0x3fc6d7fbfd4a8470,1 +np.float64,0xbfe75990936eb321,0xbfe42bffec3fa0d7,1 +np.float64,0x3fd58e54a02b1ca9,0x3fd4cace1107a5cc,1 +np.float64,0xbfc9c04136338084,0xbfc9696ad2591d54,1 +np.float64,0xdd1f0147ba3e0,0xdd1f0147ba3e0,1 +np.float64,0x5c86a940b90e,0x5c86a940b90e,1 +np.float64,0xbfecae3b8e795c77,0xbfe7624d4988c612,1 +np.float64,0xffd0370595206e0c,0xbff921fb54442d18,1 +np.float64,0xbfdc26d443384da8,0xbfda857ecd33ba9f,1 +np.float64,0xbfd1c849d9a39094,0xbfd15849449cc378,1 +np.float64,0xffee04acdb3c0959,0xbff921fb54442d18,1 +np.float64,0xbfded1056dbda20a,0xbfdcb83b30e1528c,1 +np.float64,0x7fb7b826622f704c,0x3ff921fb54442d18,1 +np.float64,0xbfee4df8ae7c9bf1,0xbfe8431df9dfd05d,1 +np.float64,0x7fe7f3670e2fe6cd,0x3ff921fb54442d18,1 +np.float64,0x8008ac9ae0d15936,0x8008ac9ae0d15936,1 +np.float64,0x800dce9f3b3b9d3f,0x800dce9f3b3b9d3f,1 +np.float64,0x7fbb19db203633b5,0x3ff921fb54442d18,1 +np.float64,0x3fe56c7f302ad8fe,0x3fe2e0eec3ad45fd,1 +np.float64,0x7fe82c05c570580b,0x3ff921fb54442d18,1 +np.float64,0xc0552b7780aa6,0xc0552b7780aa6,1 +np.float64,0x39d40e3073a83,0x39d40e3073a83,1 +np.float64,0x3fd8db54d731b6aa,0x3fd7b589b3ee9b20,1 +np.float64,0xffcdd355233ba6ac,0xbff921fb54442d18,1 +np.float64,0x3fbe97b3a43d2f67,0x3fbe72bca9be0348,1 +np.float64,0xbff0000000000000,0xbfe921fb54442d18,1 +np.float64,0xbfb4f55e6229eac0,0xbfb4e96df18a75a7,1 +np.float64,0xbfc66399ba2cc734,0xbfc62a3298bd96fc,1 +np.float64,0x3fd00988bb201311,0x3fcf6d67a9374c38,1 +np.float64,0x7fe471867d28e30c,0x3ff921fb54442d18,1 +np.float64,0xbfe38e0e64271c1d,0xbfe18d9888b7523b,1 +np.float64,0x8009dc127573b825,0x8009dc127573b825,1 +np.float64,0x800047bde4608f7d,0x800047bde4608f7d,1 +np.float64,0xffeede42c77dbc85,0xbff921fb54442d18,1 +np.float64,0xd8cf6d13b19ee,0xd8cf6d13b19ee,1 +np.float64,0xbfd08fb302a11f66,0xbfd034b1f8235e23,1 +np.float64,0x7fdb404c0b368097,0x3ff921fb54442d18,1 +np.float64,0xbfd6ba0438ad7408,0xbfd5d673e3276ec1,1 +np.float64,0xffd9568027b2ad00,0xbff921fb54442d18,1 +np.float64,0xbfb313b73e262770,0xbfb30ab4acb4fa67,1 +np.float64,0xbfe2dc1a15e5b834,0xbfe10ac5f8f3acd3,1 +np.float64,0xbfee426bf4bc84d8,0xbfe83d061df91edd,1 +np.float64,0xd9142c2fb2286,0xd9142c2fb2286,1 +np.float64,0x7feb0d11dff61a23,0x3ff921fb54442d18,1 +np.float64,0x800fea5b509fd4b7,0x800fea5b509fd4b7,1 +np.float64,0x3fe1a8818da35103,0x3fe022ba1bdf366e,1 +np.float64,0x8010000000000000,0x8010000000000000,1 +np.float64,0xbfd8fc6de6b1f8dc,0xbfd7d24726ed8dcc,1 +np.float64,0xf4b3dc2de967c,0xf4b3dc2de967c,1 +np.float64,0x8af0409b15e08,0x8af0409b15e08,1 +np.float64,0x3fb21e6934243cd2,0x3fb216b065f8709a,1 +np.float64,0x3fc53069392a60d2,0x3fc4ffa931211fb9,1 +np.float64,0xffc955812c32ab04,0xbff921fb54442d18,1 +np.float64,0xbfe3de42b1a7bc86,0xbfe1c7bd1324de75,1 +np.float64,0x1dc149a03b82a,0x1dc149a03b82a,1 +np.float64,0x8001bc5a24a378b5,0x8001bc5a24a378b5,1 +np.float64,0x3da14c407b44,0x3da14c407b44,1 +np.float64,0x80025e8da924bd1c,0x80025e8da924bd1c,1 +np.float64,0xbfcb0141c9360284,0xbfca9d572ea5e1f3,1 +np.float64,0xc90036fd92007,0xc90036fd92007,1 +np.float64,0x138312c427063,0x138312c427063,1 +np.float64,0x800dda3a963bb475,0x800dda3a963bb475,1 +np.float64,0x3fe9339934f26732,0x3fe558e723291f78,1 +np.float64,0xbfea8357027506ae,0xbfe6240826faaf48,1 +np.float64,0x7fe04735cae08e6b,0x3ff921fb54442d18,1 +np.float64,0x3fe29aca3c653594,0x3fe0da214c8bc6a4,1 +np.float64,0x3fbe1f09a03c3e13,0x3fbdfbbefef0155b,1 +np.float64,0x816ee4ad02ddd,0x816ee4ad02ddd,1 +np.float64,0xffddd1b31d3ba366,0xbff921fb54442d18,1 +np.float64,0x3fe2e01e0625c03c,0x3fe10dc0bd6677c2,1 +np.float64,0x3fec6bcf1978d79e,0x3fe73d518cddeb7c,1 +np.float64,0x7fe01aaaf8603555,0x3ff921fb54442d18,1 +np.float64,0xdf300cc5be602,0xdf300cc5be602,1 +np.float64,0xbfe71c01a36e3804,0xbfe403af80ce47b8,1 +np.float64,0xffa5be00ac2b7c00,0xbff921fb54442d18,1 +np.float64,0xbfda9ba711b5374e,0xbfd93775e3ac6bda,1 +np.float64,0xbfe56d8a27eadb14,0xbfe2e1a7185e8e6d,1 +np.float64,0x800f1bc937be3792,0x800f1bc937be3792,1 +np.float64,0x800a61d93c74c3b3,0x800a61d93c74c3b3,1 +np.float64,0x7fe71a52fcae34a5,0x3ff921fb54442d18,1 +np.float64,0x7fb4aef256295de4,0x3ff921fb54442d18,1 +np.float64,0x3fe6c1e861ed83d1,0x3fe3c828f281a7ef,1 +np.float64,0x3fba128402342508,0x3fb9fb94cf141860,1 +np.float64,0x3fee55a7ecfcab50,0x3fe8472a9af893ee,1 +np.float64,0x3fe586f31b2b0de6,0x3fe2f32bce9e91bc,1 +np.float64,0xbfbb1d1442363a28,0xbfbb034c7729d5f2,1 +np.float64,0xc78b4d3f8f16a,0xc78b4d3f8f16a,1 +np.float64,0x7fdbc277d4b784ef,0x3ff921fb54442d18,1 +np.float64,0xbfa728ca2c2e5190,0xbfa724c04e73ccbd,1 +np.float64,0x7fefc7b2143f8f63,0x3ff921fb54442d18,1 +np.float64,0x3fd153a3dda2a748,0x3fd0ebccd33a4dca,1 +np.float64,0xbfe18a6eace314de,0xbfe00ba32ec89d30,1 +np.float64,0x7feef518537dea30,0x3ff921fb54442d18,1 +np.float64,0x8005f007cd4be010,0x8005f007cd4be010,1 +np.float64,0x7fd890b840b12170,0x3ff921fb54442d18,1 +np.float64,0x7feed0582ebda0af,0x3ff921fb54442d18,1 +np.float64,0x1013f53220280,0x1013f53220280,1 +np.float64,0xbfe77273986ee4e7,0xbfe43c375a8bf6de,1 +np.float64,0x7fe3ab8918675711,0x3ff921fb54442d18,1 +np.float64,0xbfc6ad515b2d5aa4,0xbfc671b2f7f86624,1 +np.float64,0x7fcd86231d3b0c45,0x3ff921fb54442d18,1 +np.float64,0xffe2523299a4a464,0xbff921fb54442d18,1 +np.float64,0x7fcadc5a1b35b8b3,0x3ff921fb54442d18,1 +np.float64,0x3fe5e020c4ebc042,0x3fe330418eec75bd,1 +np.float64,0x7fe332a9dc266553,0x3ff921fb54442d18,1 +np.float64,0xfa11dc21f425,0xfa11dc21f425,1 +np.float64,0xbec800177d900,0xbec800177d900,1 +np.float64,0x3fcadd057835ba0b,0x3fca7aa42face8bc,1 +np.float64,0xbfe6b9a206ad7344,0xbfe3c2a9719803de,1 +np.float64,0x3fbb4250b63684a0,0x3fbb281e9cefc519,1 +np.float64,0x7fef8787517f0f0e,0x3ff921fb54442d18,1 +np.float64,0x8001315c2d6262b9,0x8001315c2d6262b9,1 +np.float64,0xbfd94e3cf2b29c7a,0xbfd819257d36f56c,1 +np.float64,0xf1f325abe3e65,0xf1f325abe3e65,1 +np.float64,0x7fd6c07079ad80e0,0x3ff921fb54442d18,1 +np.float64,0x7fe328b075a65160,0x3ff921fb54442d18,1 +np.float64,0x7fe7998f812f331e,0x3ff921fb54442d18,1 +np.float64,0xffe026bb65604d76,0xbff921fb54442d18,1 +np.float64,0xffd6c06de8ad80dc,0xbff921fb54442d18,1 +np.float64,0x3fcd5a37bf3ab46f,0x3fccda82935d98ce,1 +np.float64,0xffc3e5a45227cb48,0xbff921fb54442d18,1 +np.float64,0x3febf7dd8177efbc,0x3fe6fc0bb999883e,1 +np.float64,0x7fd7047ea92e08fc,0x3ff921fb54442d18,1 +np.float64,0x35b3fc406b680,0x35b3fc406b680,1 +np.float64,0x7fd52e97632a5d2e,0x3ff921fb54442d18,1 +np.float64,0x3fd464d401a8c9a8,0x3fd3be2967fc97c3,1 +np.float64,0x800e815b2ebd02b6,0x800e815b2ebd02b6,1 +np.float64,0x3fca8428af350850,0x3fca257b466b8970,1 +np.float64,0x8007b7526f6f6ea6,0x8007b7526f6f6ea6,1 +np.float64,0x82f60a8f05ec2,0x82f60a8f05ec2,1 +np.float64,0x3fb71a5d0a2e34c0,0x3fb70a629ef8e2a2,1 +np.float64,0x7fc8570c7d30ae18,0x3ff921fb54442d18,1 +np.float64,0x7fe5528e77eaa51c,0x3ff921fb54442d18,1 +np.float64,0xffc20dbbf1241b78,0xbff921fb54442d18,1 +np.float64,0xeb13368fd6267,0xeb13368fd6267,1 +np.float64,0x7fe7d529056faa51,0x3ff921fb54442d18,1 +np.float64,0x3fecd02eabf9a05d,0x3fe77516f0ba1ac4,1 +np.float64,0x800fcba6a09f974d,0x800fcba6a09f974d,1 +np.float64,0x7fe7e8e015afd1bf,0x3ff921fb54442d18,1 +np.float64,0xbfd271a382a4e348,0xbfd1f513a191c595,1 +np.float64,0x9f1014013e21,0x9f1014013e21,1 +np.float64,0x3fc05da47f20bb49,0x3fc04708a13a3a47,1 +np.float64,0x3fe0f427dda1e850,0x3fdf2e60ba8678b9,1 +np.float64,0xbfecb29fa539653f,0xbfe764bc791c45dd,1 +np.float64,0x45881ec68b104,0x45881ec68b104,1 +np.float64,0x8000000000000001,0x8000000000000001,1 +np.float64,0x3fe9c67ee1338cfe,0x3fe5b2c7b3df6ce8,1 +np.float64,0x7fedb8fef6bb71fd,0x3ff921fb54442d18,1 +np.float64,0x3fe54f6aaaea9ed6,0x3fe2ccd1df2abaa9,1 +np.float64,0x7feff58a1bbfeb13,0x3ff921fb54442d18,1 +np.float64,0x7fe3b62827276c4f,0x3ff921fb54442d18,1 +np.float64,0x3fe5feb682ebfd6d,0x3fe345105bc6d980,1 +np.float64,0x3fe49f38d9693e72,0x3fe2518b2824757f,1 +np.float64,0x8006bfd27c6d7fa6,0x8006bfd27c6d7fa6,1 +np.float64,0x3fc13409e2226814,0x3fc119ce0c01a5a2,1 +np.float64,0x95f8c7212bf19,0x95f8c7212bf19,1 +np.float64,0x3fd9f0fa6133e1f5,0x3fd8a567515edecf,1 +np.float64,0x3fef95cbe5ff2b98,0x3fe8ec88c768ba0b,1 +np.float64,0x3fbed28bba3da510,0x3fbeacbf136e51c2,1 +np.float64,0xbfd3987aeca730f6,0xbfd303fca58e3e60,1 +np.float64,0xbfed0f90cbfa1f22,0xbfe797f59249410d,1 +np.float64,0xffe55d8cbf2abb19,0xbff921fb54442d18,1 +np.float64,0x3feb4d9fc6769b40,0x3fe69a88131a1f1f,1 +np.float64,0x80085569acd0aad4,0x80085569acd0aad4,1 +np.float64,0x20557a6e40ab0,0x20557a6e40ab0,1 +np.float64,0x3fead2fd5df5a5fb,0x3fe653091f33b27f,1 +np.float64,0x3fe7b9983eaf7330,0x3fe46a50c4b5235e,1 +np.float64,0xffdad237ffb5a470,0xbff921fb54442d18,1 +np.float64,0xbfe5cc39a4eb9874,0xbfe322ad3a903f93,1 +np.float64,0x800ad6eecb35adde,0x800ad6eecb35adde,1 +np.float64,0xffec620f6438c41e,0xbff921fb54442d18,1 +np.float64,0xbfe5ef29122bde52,0xbfe33a7dfcc255e2,1 +np.float64,0x3fd451e7d0a8a3d0,0x3fd3acfa4939af10,1 +np.float64,0x8003ea93c127d528,0x8003ea93c127d528,1 +np.float64,0x800b48d37c9691a7,0x800b48d37c9691a7,1 +np.float64,0x3fe7e202acafc405,0x3fe484558246069b,1 +np.float64,0x80070c9b686e1938,0x80070c9b686e1938,1 +np.float64,0xbfda90bbc6352178,0xbfd92e25fcd12288,1 +np.float64,0x800e1ffebb1c3ffe,0x800e1ffebb1c3ffe,1 +np.float64,0x3ff0000000000000,0x3fe921fb54442d18,1 +np.float64,0xffd8cfdd46319fba,0xbff921fb54442d18,1 +np.float64,0x7fd8cd4182319a82,0x3ff921fb54442d18,1 +np.float64,0x3fed8bb778bb176f,0x3fe7db7c77c4c694,1 +np.float64,0x3fc74a70302e94e0,0x3fc709e95d6defec,1 +np.float64,0x3fe87269d070e4d4,0x3fe4e04bcc4a2137,1 +np.float64,0x7fb48223f6290447,0x3ff921fb54442d18,1 +np.float64,0xffe8ec444b71d888,0xbff921fb54442d18,1 +np.float64,0x7fde17d280bc2fa4,0x3ff921fb54442d18,1 +np.float64,0x3fd1cbde01a397bc,0x3fd15b9bb7b3147b,1 +np.float64,0x800883a64451074d,0x800883a64451074d,1 +np.float64,0x7fe3160a3f262c13,0x3ff921fb54442d18,1 +np.float64,0xbfe051d4d9a0a3aa,0xbfde2ecf14dc75fb,1 +np.float64,0xbfd89de689b13bce,0xbfd780176d1a28a3,1 +np.float64,0x3fecde2bf779bc58,0x3fe77ccf10bdd8e2,1 +np.float64,0xffe75774dc6eaee9,0xbff921fb54442d18,1 +np.float64,0x7fe834414d706882,0x3ff921fb54442d18,1 +np.float64,0x1,0x1,1 +np.float64,0xbfea5e4e4a74bc9c,0xbfe60e0601711835,1 +np.float64,0xffec248d4cb8491a,0xbff921fb54442d18,1 +np.float64,0xffd9942c2c332858,0xbff921fb54442d18,1 +np.float64,0xa9db36a553b67,0xa9db36a553b67,1 +np.float64,0x7fec630718b8c60d,0x3ff921fb54442d18,1 +np.float64,0xbfd062188f20c432,0xbfd009ecd652be89,1 +np.float64,0x8001b84e3023709d,0x8001b84e3023709d,1 +np.float64,0xbfe9e26d7cb3c4db,0xbfe5c3b157ecf668,1 +np.float64,0xbfef66ddf33ecdbc,0xbfe8d4b1f6410a24,1 +np.float64,0x3fd8d7109431ae21,0x3fd7b1d4860719a2,1 +np.float64,0xffee0f53107c1ea5,0xbff921fb54442d18,1 +np.float64,0x80000b4fd60016a0,0x80000b4fd60016a0,1 +np.float64,0xbfd99ff6e5333fee,0xbfd85fb3cbdaa049,1 +np.float64,0xbfe9cfd268339fa5,0xbfe5b86ef021a1b1,1 +np.float64,0xe32eace1c65d6,0xe32eace1c65d6,1 +np.float64,0xffc81f6627303ecc,0xbff921fb54442d18,1 +np.float64,0x7fe98dadde331b5b,0x3ff921fb54442d18,1 +np.float64,0xbfbcebd11e39d7a0,0xbfbccc8ec47883c7,1 +np.float64,0x7fe164880f22c90f,0x3ff921fb54442d18,1 +np.float64,0x800467c0cae8cf82,0x800467c0cae8cf82,1 +np.float64,0x800071e4b140e3ca,0x800071e4b140e3ca,1 +np.float64,0xbfc87a7eae30f4fc,0xbfc82fbc55bb0f24,1 +np.float64,0xffb2e0e23225c1c8,0xbff921fb54442d18,1 +np.float64,0x20ef338041df,0x20ef338041df,1 +np.float64,0x7fe6de71ca6dbce3,0x3ff921fb54442d18,1 +np.float64,0x5d1fa026ba3f5,0x5d1fa026ba3f5,1 +np.float64,0xffd112a9ce222554,0xbff921fb54442d18,1 +np.float64,0x3fb351f66626a3ed,0x3fb3489ab578c452,1 +np.float64,0x7fef7b2bd3bef657,0x3ff921fb54442d18,1 +np.float64,0xffe144f5d4e289eb,0xbff921fb54442d18,1 +np.float64,0xffd63a6750ac74ce,0xbff921fb54442d18,1 +np.float64,0x7fd2d8bb25a5b175,0x3ff921fb54442d18,1 +np.float64,0x3fec5920a078b242,0x3fe732dcffcf6521,1 +np.float64,0x80009a8b7f813518,0x80009a8b7f813518,1 +np.float64,0x3fdea220893d4441,0x3fdc921edf6bf3d8,1 +np.float64,0x8006cee2208d9dc5,0x8006cee2208d9dc5,1 +np.float64,0xdd0b0081ba17,0xdd0b0081ba17,1 +np.float64,0x7ff4000000000000,0x7ffc000000000000,1 +np.float64,0xbfdac33955358672,0xbfd9592bce7daf1f,1 +np.float64,0x7fe8301d7170603a,0x3ff921fb54442d18,1 +np.float64,0xbfc1d34d8523a69c,0xbfc1b62449af9684,1 +np.float64,0x800c62239458c447,0x800c62239458c447,1 +np.float64,0xffd398c009a73180,0xbff921fb54442d18,1 +np.float64,0xbfe0c6d9ee218db4,0xbfdee777557f4401,1 +np.float64,0x3feccdd373799ba7,0x3fe773c9c2263f89,1 +np.float64,0xbfd21898bda43132,0xbfd1a2be8545fcc5,1 +np.float64,0x3fd77019b62ee033,0x3fd67793cabdf267,1 +np.float64,0x7fa609cad42c1395,0x3ff921fb54442d18,1 +np.float64,0x7fb4eaea5a29d5d4,0x3ff921fb54442d18,1 +np.float64,0x3fc570dc9a2ae1b9,0x3fc53e5f6218a799,1 +np.float64,0x800344ae8466895e,0x800344ae8466895e,1 +np.float64,0xbfc7c985252f930c,0xbfc784d60fa27bac,1 +np.float64,0xffaa2929fc345250,0xbff921fb54442d18,1 +np.float64,0xffe63e5ee9ac7cbe,0xbff921fb54442d18,1 +np.float64,0x73f0280ce7e06,0x73f0280ce7e06,1 +np.float64,0xffc525f8822a4bf0,0xbff921fb54442d18,1 +np.float64,0x7fd744d00aae899f,0x3ff921fb54442d18,1 +np.float64,0xbfe0fe590761fcb2,0xbfdf3e493e8b1f32,1 +np.float64,0xfae04ae7f5c0a,0xfae04ae7f5c0a,1 +np.float64,0xef821939df043,0xef821939df043,1 +np.float64,0x7fef6135843ec26a,0x3ff921fb54442d18,1 +np.float64,0xbfebf34dcbf7e69c,0xbfe6f97588a8f911,1 +np.float64,0xbfeec0b498fd8169,0xbfe87f2eceeead12,1 +np.float64,0x7fb67161b42ce2c2,0x3ff921fb54442d18,1 +np.float64,0x3fdcfd998639fb33,0x3fdb38934927c096,1 +np.float64,0xffda5960bc34b2c2,0xbff921fb54442d18,1 +np.float64,0xbfe11f8c71223f19,0xbfdf71fe770c96ab,1 +np.float64,0x3fe4ac1bab695838,0x3fe25aa4517b8322,1 +np.float64,0x3f730458a02608b1,0x3f73044fabb5e999,1 +np.float64,0x3fdb14ffcdb62a00,0x3fd99ea6c241a3ed,1 +np.float64,0xbfc93208cd326410,0xbfc8e09d78b6d4db,1 +np.float64,0x19e734dc33ce8,0x19e734dc33ce8,1 +np.float64,0x3fe5e98428abd308,0x3fe336a6a085eb55,1 +np.float64,0x7fec672a1378ce53,0x3ff921fb54442d18,1 +np.float64,0x800f8bd8d4ff17b2,0x800f8bd8d4ff17b2,1 +np.float64,0xbfe5a12e4e6b425c,0xbfe30533f99d5d06,1 +np.float64,0x75a34cb0eb46a,0x75a34cb0eb46a,1 +np.float64,0x7fe1d21d16a3a439,0x3ff921fb54442d18,1 +np.float64,0x7ff0000000000000,0x3ff921fb54442d18,1 +np.float64,0xffe0f50db261ea1b,0xbff921fb54442d18,1 +np.float64,0xbfd9dc22feb3b846,0xbfd8937ec965a501,1 +np.float64,0x8009d68e48d3ad1d,0x8009d68e48d3ad1d,1 +np.float64,0xbfe2eba620e5d74c,0xbfe1164d7d273c60,1 +np.float64,0x992efa09325e0,0x992efa09325e0,1 +np.float64,0x3fdab640ea356c82,0x3fd94e20cab88db2,1 +np.float64,0x69a6f04ad34df,0x69a6f04ad34df,1 +np.float64,0x3fe397df25272fbe,0x3fe194bd1a3a6192,1 +np.float64,0xebcce9fdd799d,0xebcce9fdd799d,1 +np.float64,0x3fbb49490c369292,0x3fbb2f02eccc497d,1 +np.float64,0xffd871f980b0e3f4,0xbff921fb54442d18,1 +np.float64,0x800348f6966691ee,0x800348f6966691ee,1 +np.float64,0xbfebc270a7f784e1,0xbfe6dda8d0d80f26,1 +np.float64,0xffd6d559b1adaab4,0xbff921fb54442d18,1 +np.float64,0x3fec3635c0b86c6c,0x3fe71f420256e43e,1 +np.float64,0x7fbc82ad7039055a,0x3ff921fb54442d18,1 +np.float64,0x7f873050602e60a0,0x3ff921fb54442d18,1 +np.float64,0x3fca44b8c3348970,0x3fc9e8a1a1a2d96e,1 +np.float64,0x3fe0fc308fe1f861,0x3fdf3aeb469ea225,1 +np.float64,0x7fefc27de8bf84fb,0x3ff921fb54442d18,1 +np.float64,0x8005f3f3916be7e8,0x8005f3f3916be7e8,1 +np.float64,0xbfd4278c7c284f18,0xbfd38678988873b6,1 +np.float64,0x435eafc486bd7,0x435eafc486bd7,1 +np.float64,0xbfd01f5199203ea4,0xbfcf96631f2108a3,1 +np.float64,0xffd5ee9185abdd24,0xbff921fb54442d18,1 +np.float64,0xffedb363257b66c5,0xbff921fb54442d18,1 +np.float64,0x800d68e6e11ad1ce,0x800d68e6e11ad1ce,1 +np.float64,0xbfcf687f8e3ed100,0xbfceccb771b0d39a,1 +np.float64,0x7feb3b9ef2f6773d,0x3ff921fb54442d18,1 +np.float64,0x3fe15ec5ca62bd8c,0x3fdfd3fab9d96f81,1 +np.float64,0x10000000000000,0x10000000000000,1 +np.float64,0xd2386f81a470e,0xd2386f81a470e,1 +np.float64,0xb9feed4573fde,0xb9feed4573fde,1 +np.float64,0x3fe7ed25c9efda4c,0x3fe48b7b72db4014,1 +np.float64,0xbfe01478726028f1,0xbfddcd1f5a2efc59,1 +np.float64,0x9946d02f328da,0x9946d02f328da,1 +np.float64,0xbfe3bb67f06776d0,0xbfe1ae88aa81c5a6,1 +np.float64,0xbfd3fd8a4c27fb14,0xbfd3603982e3b78d,1 +np.float64,0xffd5c3ab912b8758,0xbff921fb54442d18,1 +np.float64,0xffd5f502b12bea06,0xbff921fb54442d18,1 +np.float64,0xbfc64981ec2c9304,0xbfc610e0382b1fa6,1 +np.float64,0xffec42e3413885c6,0xbff921fb54442d18,1 +np.float64,0x80084eb4ed109d6a,0x80084eb4ed109d6a,1 +np.float64,0xbfd17cac9fa2f95a,0xbfd112020588a4b3,1 +np.float64,0xbfd06c1359a0d826,0xbfd0134a28aa9a66,1 +np.float64,0x7fdc3d7c03b87af7,0x3ff921fb54442d18,1 +np.float64,0x7bdf5aaaf7bec,0x7bdf5aaaf7bec,1 +np.float64,0xbfee3cd966fc79b3,0xbfe83a14bc07ac3b,1 +np.float64,0x7fec910da3f9221a,0x3ff921fb54442d18,1 +np.float64,0xffb4ea667029d4d0,0xbff921fb54442d18,1 +np.float64,0x800103d7cce207b0,0x800103d7cce207b0,1 +np.float64,0x7fbb229a6c364534,0x3ff921fb54442d18,1 +np.float64,0x0,0x0,1 +np.float64,0xffd8fccd0331f99a,0xbff921fb54442d18,1 +np.float64,0xbfd0784ae1a0f096,0xbfd01ebff62e39ad,1 +np.float64,0xbfed2ec9b3ba5d93,0xbfe7a9099410bc76,1 +np.float64,0x800690b8d16d2172,0x800690b8d16d2172,1 +np.float64,0x7fc061b26520c364,0x3ff921fb54442d18,1 +np.float64,0x8007ec47054fd88f,0x8007ec47054fd88f,1 +np.float64,0x775546b6eeaa9,0x775546b6eeaa9,1 +np.float64,0x8005e00fb56bc020,0x8005e00fb56bc020,1 +np.float64,0xbfe510f8d0ea21f2,0xbfe2a16862b5a37f,1 +np.float64,0xffd87a6bf3b0f4d8,0xbff921fb54442d18,1 +np.float64,0x800906e3d0520dc8,0x800906e3d0520dc8,1 +np.float64,0x2296f000452f,0x2296f000452f,1 +np.float64,0xbfe3189fa2e63140,0xbfe1378c0e005be4,1 +np.float64,0xb4d2447f69a49,0xb4d2447f69a49,1 +np.float64,0xffd056a24a20ad44,0xbff921fb54442d18,1 +np.float64,0xbfe3b23fe4e76480,0xbfe1a7e5840fcbeb,1 +np.float64,0x80018ee270831dc6,0x80018ee270831dc6,1 +np.float64,0x800df89f245bf13e,0x800df89f245bf13e,1 +np.float64,0x3fee1409d7bc2814,0x3fe824779d133232,1 +np.float64,0xbfef8d81667f1b03,0xbfe8e85523620368,1 +np.float64,0xffd8a6519b314ca4,0xbff921fb54442d18,1 +np.float64,0x7fc7bc86f32f790d,0x3ff921fb54442d18,1 +np.float64,0xffea6159e674c2b3,0xbff921fb54442d18,1 +np.float64,0x3fe153c3fba2a788,0x3fdfc2f74769d300,1 +np.float64,0xffc4261ef3284c3c,0xbff921fb54442d18,1 +np.float64,0x7fe8a8961ff1512b,0x3ff921fb54442d18,1 +np.float64,0xbfe3fb1fd167f640,0xbfe1dc89dcb7ecdf,1 +np.float64,0x3fd88577c2b10af0,0x3fd76acc09660704,1 +np.float64,0x3fe128ec27e251d8,0x3fdf808fc7ebcd8f,1 +np.float64,0xbfed6ca7c4fad950,0xbfe7caafe9a3e213,1 +np.float64,0xbf9a3912b8347220,0xbf9a379b3349352e,1 +np.float64,0xbfd724d7bcae49b0,0xbfd6351efa2a5fc5,1 +np.float64,0xbfed59700a7ab2e0,0xbfe7c043014c694c,1 +np.float64,0x8002ad435bc55a87,0x8002ad435bc55a87,1 +np.float64,0xffe46ed345a8dda6,0xbff921fb54442d18,1 +np.float64,0x7fd2f1d1d825e3a3,0x3ff921fb54442d18,1 +np.float64,0xbfea0265e23404cc,0xbfe5d6fb3fd30464,1 +np.float64,0xbfd17e049122fc0a,0xbfd113421078bbae,1 +np.float64,0xffea03b986b40772,0xbff921fb54442d18,1 +np.float64,0x800b55331a16aa67,0x800b55331a16aa67,1 +np.float64,0xbfc6fcafbf2df960,0xbfc6be9ecd0ebc1f,1 +np.float64,0xd6a36017ad46c,0xd6a36017ad46c,1 +np.float64,0xbfe9ba86dfb3750e,0xbfe5ab840cb0ef86,1 +np.float64,0x75c4a108eb895,0x75c4a108eb895,1 +np.float64,0x8008d6bc8051ad79,0x8008d6bc8051ad79,1 +np.float64,0xbfd3dc5984a7b8b4,0xbfd341f78e0528ec,1 +np.float64,0xffe1cbb01aa39760,0xbff921fb54442d18,1 +np.float64,0x3fc7e292f52fc526,0x3fc79d0ce9365767,1 +np.float64,0xbfcbeae2bd37d5c4,0xbfcb7cb034f82467,1 +np.float64,0x8000f0c62e21e18d,0x8000f0c62e21e18d,1 +np.float64,0xbfe23d8bc6247b18,0xbfe09418ee35c3c7,1 +np.float64,0x717394bae2e73,0x717394bae2e73,1 +np.float64,0xffa2ef1cc425de40,0xbff921fb54442d18,1 +np.float64,0x3fd938c229b27184,0x3fd806900735c99d,1 +np.float64,0x800bf3ec8a77e7d9,0x800bf3ec8a77e7d9,1 +np.float64,0xffeef41dd57de83b,0xbff921fb54442d18,1 +np.float64,0x8008df97e5b1bf30,0x8008df97e5b1bf30,1 +np.float64,0xffe9ab9d0db35739,0xbff921fb54442d18,1 +np.float64,0x99ff391333fe7,0x99ff391333fe7,1 +np.float64,0x3fb864b4a630c969,0x3fb851e883ea2cf9,1 +np.float64,0x22c1230a45825,0x22c1230a45825,1 +np.float64,0xff2336fbfe467,0xff2336fbfe467,1 +np.float64,0xbfd488f4cea911ea,0xbfd3def0490f5414,1 +np.float64,0x3fa379c78426f38f,0x3fa377607370800b,1 +np.float64,0xbfb0873302210e68,0xbfb08155b78dfd53,1 +np.float64,0xbfdf9ff7c2bf3ff0,0xbfdd5f658e357ad2,1 +np.float64,0x800978719192f0e4,0x800978719192f0e4,1 +np.float64,0xbfba8759ea350eb0,0xbfba6f325013b9e5,1 +np.float64,0xbfdd3e6b06ba7cd6,0xbfdb6e472b6091b0,1 +np.float64,0x7fe0c334a7a18668,0x3ff921fb54442d18,1 +np.float64,0xbfeb971feb772e40,0xbfe6c4e0f61404d1,1 +np.float64,0x3fe2a50968e54a13,0x3fe0e1c8b8d96e85,1 +np.float64,0x800fa9c5515f538b,0x800fa9c5515f538b,1 +np.float64,0x800f8532fbbf0a66,0x800f8532fbbf0a66,1 +np.float64,0x167d6f1e2cfaf,0x167d6f1e2cfaf,1 +np.float64,0xffee88e769fd11ce,0xbff921fb54442d18,1 +np.float64,0xbfeecc8529fd990a,0xbfe885520cdad8ea,1 +np.float64,0xffefffffffffffff,0xbff921fb54442d18,1 +np.float64,0xbfef6a566afed4ad,0xbfe8d6767b4c4235,1 +np.float64,0xffec12415af82482,0xbff921fb54442d18,1 +np.float64,0x3678a20a6cf15,0x3678a20a6cf15,1 +np.float64,0xffe468d54ee8d1aa,0xbff921fb54442d18,1 +np.float64,0x800ad6006795ac01,0x800ad6006795ac01,1 +np.float64,0x8001d5b61063ab6d,0x8001d5b61063ab6d,1 +np.float64,0x800dfcd1863bf9a3,0x800dfcd1863bf9a3,1 +np.float64,0xc9fbff6f93f80,0xc9fbff6f93f80,1 +np.float64,0xffe55c20f9eab842,0xbff921fb54442d18,1 +np.float64,0xbfcb596b6536b2d8,0xbfcaf1b339c5c615,1 +np.float64,0xbfe092689ea124d1,0xbfde94fa58946e51,1 +np.float64,0x3fe9ec733af3d8e6,0x3fe5c9bf5dee2623,1 +np.float64,0x3fe30f3d83261e7b,0x3fe1309fd6620e03,1 +np.float64,0xffd31d7f84263b00,0xbff921fb54442d18,1 +np.float64,0xbfe88d2d3e711a5a,0xbfe4f12b5a136178,1 +np.float64,0xffc81e4ce1303c98,0xbff921fb54442d18,1 +np.float64,0xffe5b96ebfab72dd,0xbff921fb54442d18,1 +np.float64,0x512f0502a25e1,0x512f0502a25e1,1 +np.float64,0x7fa3a376982746ec,0x3ff921fb54442d18,1 +np.float64,0x80005b5f2f60b6bf,0x80005b5f2f60b6bf,1 +np.float64,0xc337cc69866fa,0xc337cc69866fa,1 +np.float64,0x3fe7719c4caee339,0x3fe43bab42b19e64,1 +np.float64,0x7fde7ec1d93cfd83,0x3ff921fb54442d18,1 +np.float64,0x3fd2f38f3825e71e,0x3fd26cc7b1dd0acb,1 +np.float64,0x7fce298b993c5316,0x3ff921fb54442d18,1 +np.float64,0x56ae3b2cad5c8,0x56ae3b2cad5c8,1 +np.float64,0x3fe9299f2bf2533e,0x3fe552bddd999e72,1 +np.float64,0x7feff3a4823fe748,0x3ff921fb54442d18,1 +np.float64,0xbfd05c670aa0b8ce,0xbfd00494d78e9e97,1 +np.float64,0xffe745323eae8a64,0xbff921fb54442d18,1 diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/data/umath-validation-set-arctanh.csv b/.venv/lib/python3.12/site-packages/numpy/_core/tests/data/umath-validation-set-arctanh.csv new file mode 100644 index 0000000..68ecaab --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/tests/data/umath-validation-set-arctanh.csv @@ -0,0 +1,1429 @@ +dtype,input,output,ulperrortol +np.float32,0x3ee82930,0x3efa60fd,2 +np.float32,0x3f0aa640,0x3f1b3e13,2 +np.float32,0x3ec1a21c,0x3ecbbf8d,2 +np.float32,0x3cdb1740,0x3cdb24a1,2 +np.float32,0xbf28b6f3,0xbf4a86ac,2 +np.float32,0xbe490dcc,0xbe4bb2eb,2 +np.float32,0x80000001,0x80000001,2 +np.float32,0xbf44f9dd,0xbf826ce1,2 +np.float32,0xbf1d66c4,0xbf37786b,2 +np.float32,0x3f0ad26a,0x3f1b7c9b,2 +np.float32,0x3f7b6c54,0x4016aab0,2 +np.float32,0xbf715bb8,0xbfe1a0bc,2 +np.float32,0xbee8a562,0xbefafd6a,2 +np.float32,0x3db94d00,0x3db9cf16,2 +np.float32,0x3ee2970c,0x3ef368b3,2 +np.float32,0x3f3f8614,0x3f77fdca,2 +np.float32,0xbf1fb5f0,0xbf3b3789,2 +np.float32,0x3f798dc0,0x400b96bb,2 +np.float32,0x3e975d64,0x3e9c0573,2 +np.float32,0xbe3f1908,0xbe415d1f,2 +np.float32,0x3f2cea38,0x3f52192e,2 +np.float32,0x3e82f1ac,0x3e85eaa1,2 +np.float32,0x3eab6b30,0x3eb24acd,2 +np.float32,0xbe9bb90c,0xbea0cf5f,2 +np.float32,0xbf43e847,0xbf81202f,2 +np.float32,0xbd232fa0,0xbd2345c0,2 +np.float32,0xbbabbc00,0xbbabbc67,2 +np.float32,0xbf0b2975,0xbf1bf808,2 +np.float32,0xbef5ab0a,0xbf05d305,2 +np.float32,0x3f2cad16,0x3f51a8e2,2 +np.float32,0xbef75940,0xbf06eb08,2 +np.float32,0xbf0c1216,0xbf1d4325,2 +np.float32,0x3e7bdc08,0x3e8090c2,2 +np.float32,0x3da14e10,0x3da1a3c5,2 +np.float32,0x3f627412,0x3fb2bf21,2 +np.float32,0xbd6d08c0,0xbd6d4ca0,2 +np.float32,0x3f3e2368,0x3f74df8b,2 +np.float32,0xbe0df104,0xbe0edc77,2 +np.float32,0x3e8a265c,0x3e8da833,2 +np.float32,0xbdccdbb0,0xbdcd8ba8,2 +np.float32,0x3eb080c4,0x3eb80a44,2 +np.float32,0x3e627800,0x3e6645fe,2 +np.float32,0xbd8be0b0,0xbd8c1886,2 +np.float32,0xbf3282ac,0xbf5cae8c,2 +np.float32,0xbe515910,0xbe545707,2 +np.float32,0xbf2e64ac,0xbf54d637,2 +np.float32,0x3e0fc230,0x3e10b6de,2 +np.float32,0x3eb13ca0,0x3eb8df94,2 +np.float32,0x3f07a3ca,0x3f170572,2 +np.float32,0x3f2c7026,0x3f513935,2 +np.float32,0x3f3c4ec8,0x3f70d67c,2 +np.float32,0xbee9cce8,0xbefc724f,2 +np.float32,0xbe53ca60,0xbe56e3f3,2 +np.float32,0x3dd9e9a0,0x3ddabd98,2 +np.float32,0x3f38b8d4,0x3f69319b,2 +np.float32,0xbe176dc4,0xbe188c1d,2 +np.float32,0xbf322f2e,0xbf5c0c51,2 +np.float32,0xbe9b8676,0xbea097a2,2 +np.float32,0xbca44280,0xbca44823,2 +np.float32,0xbe2b0248,0xbe2ca036,2 +np.float32,0x3d101e80,0x3d102dbd,2 +np.float32,0xbf4eb610,0xbf8f526d,2 +np.float32,0xbec32a50,0xbecd89d1,2 +np.float32,0x3d549100,0x3d54c1ee,2 +np.float32,0x3f78e55e,0x40087025,2 +np.float32,0x3e592798,0x3e5c802d,2 +np.float32,0x3de045d0,0x3de12cfb,2 +np.float32,0xbdad28e0,0xbdad92f7,2 +np.float32,0x3e9a69e0,0x3e9f5e59,2 +np.float32,0x3e809778,0x3e836716,2 +np.float32,0xbf3278d9,0xbf5c9b6d,2 +np.float32,0x3f39fa00,0x3f6bd4a5,2 +np.float32,0xbec8143c,0xbed34ffa,2 +np.float32,0x3ddb7f40,0x3ddc57e6,2 +np.float32,0x3f0e8342,0x3f20c634,2 +np.float32,0x3f353dda,0x3f6213a4,2 +np.float32,0xbe96b400,0xbe9b4bea,2 +np.float32,0x3e626580,0x3e66328a,2 +np.float32,0xbde091c8,0xbde179df,2 +np.float32,0x3eb47b5c,0x3ebc91ca,2 +np.float32,0xbf282182,0xbf497f2f,2 +np.float32,0x3ea9f64c,0x3eb0a748,2 +np.float32,0x3f28dd4e,0x3f4aca86,2 +np.float32,0xbf71de18,0xbfe3f587,2 +np.float32,0x7fa00000,0x7fe00000,2 +np.float32,0xbf6696a6,0xbfbcf11a,2 +np.float32,0xbc853ae0,0xbc853de2,2 +np.float32,0xbeced246,0xbedb51b8,2 +np.float32,0x3f3472a4,0x3f607e00,2 +np.float32,0xbee90124,0xbefb7117,2 +np.float32,0x3eb45b90,0x3ebc6d7c,2 +np.float32,0xbe53ead0,0xbe5705d6,2 +np.float32,0x3f630c80,0x3fb420e2,2 +np.float32,0xbf408cd0,0xbf7a56a2,2 +np.float32,0x3dda4ed0,0x3ddb23f1,2 +np.float32,0xbf37ae88,0xbf67096b,2 +np.float32,0xbdd48c28,0xbdd550c9,2 +np.float32,0xbf5745b0,0xbf9cb4a4,2 +np.float32,0xbf44e6fc,0xbf8255c1,2 +np.float32,0x3f5c8e6a,0x3fa65020,2 +np.float32,0xbea45fe8,0xbeaa6630,2 +np.float32,0x3f08bdee,0x3f188ef5,2 +np.float32,0x3ec77e74,0x3ed29f4b,2 +np.float32,0xbf1a1d3c,0xbf324029,2 +np.float32,0x3cad7340,0x3cad79e3,2 +np.float32,0xbf4fac2e,0xbf90b72a,2 +np.float32,0x3f58516e,0x3f9e8330,2 +np.float32,0x3f442008,0x3f816391,2 +np.float32,0xbf6e0c6c,0xbfd42854,2 +np.float32,0xbf266f7a,0xbf4689b2,2 +np.float32,0x3eb7e2f0,0x3ec077ba,2 +np.float32,0xbf320fd0,0xbf5bcf83,2 +np.float32,0xbf6a76b9,0xbfc80a11,2 +np.float32,0xbf2a91b4,0xbf4dd526,2 +np.float32,0x3f176e30,0x3f2e150e,2 +np.float32,0xbdcccad0,0xbdcd7a9c,2 +np.float32,0x3f60a8a4,0x3faebbf7,2 +np.float32,0x3d9706f0,0x3d974d40,2 +np.float32,0x3ef3cd34,0x3f049d58,2 +np.float32,0xbf73c615,0xbfed79fe,2 +np.float32,0x3df1b170,0x3df2d31b,2 +np.float32,0x3f632a46,0x3fb466c7,2 +np.float32,0xbf3ea18e,0xbf75f9ce,2 +np.float32,0xbf3ea05c,0xbf75f71f,2 +np.float32,0xbdd76750,0xbdd83403,2 +np.float32,0xbca830c0,0xbca836cd,2 +np.float32,0x3f1d4162,0x3f373c59,2 +np.float32,0x3c115700,0x3c1157fa,2 +np.float32,0x3dae8ab0,0x3daef758,2 +np.float32,0xbcad5020,0xbcad56bf,2 +np.float32,0x3ee299c4,0x3ef36c15,2 +np.float32,0xbf7f566c,0xc054c3bd,2 +np.float32,0x3f0cc698,0x3f1e4557,2 +np.float32,0xbe75c648,0xbe7aaa04,2 +np.float32,0x3ea29238,0x3ea86417,2 +np.float32,0x3f09d9c0,0x3f1a1d61,2 +np.float32,0x3f67275c,0x3fbe74b3,2 +np.float32,0x3e1a4e18,0x3e1b7d3a,2 +np.float32,0xbef6e3fc,0xbf069e98,2 +np.float32,0xbf6038ac,0xbfadc9fd,2 +np.float32,0xbe46bdd4,0xbe494b7f,2 +np.float32,0xbf4df1f4,0xbf8e3a98,2 +np.float32,0x3d094dc0,0x3d095aed,2 +np.float32,0x3f44c7d2,0x3f822fa3,2 +np.float32,0xbea30816,0xbea8e737,2 +np.float32,0xbe3c27c4,0xbe3e511b,2 +np.float32,0x3f3bb47c,0x3f6f8789,2 +np.float32,0xbe423760,0xbe4498c3,2 +np.float32,0x3ece1a74,0x3eda7634,2 +np.float32,0x3f14d1f6,0x3f2a1a89,2 +np.float32,0xbf4d9e8f,0xbf8dc4c1,2 +np.float32,0xbe92968e,0xbe96cd7f,2 +np.float32,0x3e99e6c0,0x3e9ece26,2 +np.float32,0xbf397361,0xbf6ab878,2 +np.float32,0xbf4fcea4,0xbf90e99f,2 +np.float32,0x3de37640,0x3de46779,2 +np.float32,0x3eb1b604,0x3eb9698c,2 +np.float32,0xbf52d0a2,0xbf957361,2 +np.float32,0xbe20435c,0xbe21975a,2 +np.float32,0x3f437a58,0x3f809bf1,2 +np.float32,0x3f27d1cc,0x3f48f335,2 +np.float32,0x3f7d4ff2,0x4027d1e2,2 +np.float32,0xbef732e4,0xbf06d205,2 +np.float32,0x3f4a0ae6,0x3f88e18e,2 +np.float32,0x3f800000,0x7f800000,2 +np.float32,0x3e3e56a0,0x3e4093ba,2 +np.float32,0xbed2fcfa,0xbee0517d,2 +np.float32,0xbe0e0114,0xbe0eecd7,2 +np.float32,0xbe808574,0xbe8353db,2 +np.float32,0x3f572e2a,0x3f9c8c86,2 +np.float32,0x80800000,0x80800000,2 +np.float32,0x3f3f3c82,0x3f775703,2 +np.float32,0xbf6e2482,0xbfd4818b,2 +np.float32,0xbf3943b0,0xbf6a5439,2 +np.float32,0x3f6e42ac,0x3fd4f1ea,2 +np.float32,0x3eb676c4,0x3ebed619,2 +np.float32,0xbe5e56c4,0xbe61ef6c,2 +np.float32,0x3eea200c,0x3efcdb65,2 +np.float32,0x3e3d2c78,0x3e3f5ef8,2 +np.float32,0xbdfd8fb0,0xbdfede71,2 +np.float32,0xbee69c8a,0xbef86e89,2 +np.float32,0x3e9efca0,0x3ea46a1c,2 +np.float32,0x3e4c2498,0x3e4ee9ee,2 +np.float32,0xbf3cc93c,0xbf71e21d,2 +np.float32,0x3ee0d77c,0x3ef13d2b,2 +np.float32,0xbefbcd2a,0xbf09d6a3,2 +np.float32,0x3f6dbe5c,0x3fd30a3e,2 +np.float32,0x3dae63e0,0x3daed03f,2 +np.float32,0xbd5001e0,0xbd502fb9,2 +np.float32,0x3f59632a,0x3fa067c8,2 +np.float32,0x3f0d355a,0x3f1ee452,2 +np.float32,0x3f2cbe5c,0x3f51c896,2 +np.float32,0x3c5e6e80,0x3c5e7200,2 +np.float32,0xbe8ac49c,0xbe8e52f0,2 +np.float32,0x3f54e576,0x3f98c0e6,2 +np.float32,0xbeaa0762,0xbeb0ba7c,2 +np.float32,0x3ec81e88,0x3ed35c21,2 +np.float32,0x3f5a6738,0x3fa23fb6,2 +np.float32,0xbf24a682,0xbf43784a,2 +np.float32,0x1,0x1,2 +np.float32,0x3ee6bc24,0x3ef89630,2 +np.float32,0x3f19444a,0x3f30ecf5,2 +np.float32,0x3ec1fc70,0x3ecc28fc,2 +np.float32,0xbf706e14,0xbfdd92fb,2 +np.float32,0x3eccb630,0x3ed8cd98,2 +np.float32,0xbcdf7aa0,0xbcdf88d3,2 +np.float32,0xbe450da8,0xbe478a8e,2 +np.float32,0x3ec9c210,0x3ed54c0b,2 +np.float32,0xbf3b86ca,0xbf6f24d1,2 +np.float32,0x3edcc7a0,0x3eec3a5c,2 +np.float32,0x3f075d5c,0x3f16a39a,2 +np.float32,0xbf5719ce,0xbf9c69de,2 +np.float32,0x3f62cb22,0x3fb3885a,2 +np.float32,0x3f639216,0x3fb55c93,2 +np.float32,0xbf473ee7,0xbf85413a,2 +np.float32,0xbf01b66c,0xbf0eea86,2 +np.float32,0x3e872d80,0x3e8a74f8,2 +np.float32,0xbf60957e,0xbfae925c,2 +np.float32,0xbf6847b2,0xbfc1929b,2 +np.float32,0x3f78bb94,0x4007b363,2 +np.float32,0xbf47efdb,0xbf8622db,2 +np.float32,0xbe1f2308,0xbe206fd6,2 +np.float32,0xbf414926,0xbf7c0a7e,2 +np.float32,0x3eecc268,0x3f00194d,2 +np.float32,0x3eb086d0,0x3eb81120,2 +np.float32,0xbef1af80,0xbf033ff5,2 +np.float32,0xbf454e56,0xbf82d4aa,2 +np.float32,0x3e622560,0x3e65ef20,2 +np.float32,0x3f50d2b2,0x3f926a83,2 +np.float32,0x3eb2c45c,0x3eba9d2c,2 +np.float32,0x3e42d1a0,0x3e4538c9,2 +np.float32,0xbf24cc5c,0xbf43b8e3,2 +np.float32,0x3e8c6464,0x3e90141a,2 +np.float32,0xbf3abff2,0xbf6d79c5,2 +np.float32,0xbec8f2e6,0xbed456fa,2 +np.float32,0xbf787b38,0xc00698b4,2 +np.float32,0xbf58d5cd,0xbf9f6c03,2 +np.float32,0x3df4ee20,0x3df61ba8,2 +np.float32,0xbf34581e,0xbf604951,2 +np.float32,0xbeba5cf4,0xbec35119,2 +np.float32,0xbf76c22d,0xbfffc51c,2 +np.float32,0x3ef63b2c,0x3f0630b4,2 +np.float32,0x3eeadb64,0x3efdc877,2 +np.float32,0x3dfd8c70,0x3dfedb24,2 +np.float32,0x3f441600,0x3f81576d,2 +np.float32,0x3f23a0d8,0x3f41bbf6,2 +np.float32,0x3cb84d40,0x3cb85536,2 +np.float32,0xbf25cb5c,0xbf456e38,2 +np.float32,0xbc108540,0xbc108636,2 +np.float32,0xbc5b9140,0xbc5b949e,2 +np.float32,0xbf62ff40,0xbfb401dd,2 +np.float32,0x3e8e0710,0x3e91d93e,2 +np.float32,0x3f1b6ae0,0x3f344dfd,2 +np.float32,0xbf4dbbbe,0xbf8dedea,2 +np.float32,0x3f1a5fb2,0x3f32a880,2 +np.float32,0xbe56bd00,0xbe59f8cb,2 +np.float32,0xbf490a5c,0xbf87902d,2 +np.float32,0xbf513072,0xbf92f717,2 +np.float32,0x3e73ee28,0x3e78b542,2 +np.float32,0x3f0a4c7a,0x3f1abf2c,2 +np.float32,0x3e10d5c8,0x3e11d00b,2 +np.float32,0xbf771aac,0xc001207e,2 +np.float32,0x3efe2f54,0x3f0b6a46,2 +np.float32,0xbea5f3ea,0xbeac291f,2 +np.float32,0xbf1a73e8,0xbf32c845,2 +np.float32,0x3ebcc82c,0x3ec61c4f,2 +np.float32,0xbf24f492,0xbf43fd9a,2 +np.float32,0x3ecbd908,0x3ed7c691,2 +np.float32,0x3f461c5e,0x3f83d3f0,2 +np.float32,0x3eed0524,0x3f0043c1,2 +np.float32,0x3d06e840,0x3d06f4bf,2 +np.float32,0x3eb6c974,0x3ebf34d7,2 +np.float32,0xbf1c85e1,0xbf36100f,2 +np.float32,0x3ed697d0,0x3ee4ad04,2 +np.float32,0x3eab0484,0x3eb1d733,2 +np.float32,0xbf3b02f2,0xbf6e0935,2 +np.float32,0xbeeab154,0xbefd9334,2 +np.float32,0xbf695372,0xbfc49881,2 +np.float32,0x3e8aaa7c,0x3e8e36be,2 +np.float32,0xbf208754,0xbf3c8f7b,2 +np.float32,0xbe0dbf28,0xbe0ea9a1,2 +np.float32,0x3ca780c0,0x3ca786ba,2 +np.float32,0xbeb320b4,0xbebb065e,2 +np.float32,0x3f13c698,0x3f288821,2 +np.float32,0xbe8cbbec,0xbe9072c4,2 +np.float32,0x3f1ed534,0x3f39c8df,2 +np.float32,0x3e1ca450,0x3e1de190,2 +np.float32,0x3f54be1c,0x3f988134,2 +np.float32,0x3f34e4ee,0x3f6161b4,2 +np.float32,0xbf7e6913,0xc038b246,2 +np.float32,0x3d3c3f20,0x3d3c6119,2 +np.float32,0x3ca9dc80,0x3ca9e2bc,2 +np.float32,0xbf577ea2,0xbf9d161a,2 +np.float32,0xbedb22c8,0xbeea3644,2 +np.float32,0x3f22a044,0x3f400bfa,2 +np.float32,0xbe214b8c,0xbe22a637,2 +np.float32,0x3e8cd300,0x3e908bbc,2 +np.float32,0xbec4d214,0xbecf7a58,2 +np.float32,0x3e9399a4,0x3e97e7e4,2 +np.float32,0xbee6a1a2,0xbef874ed,2 +np.float32,0xbf323742,0xbf5c1bfd,2 +np.float32,0x3f48b882,0x3f8725ac,2 +np.float32,0xbf4d4dba,0xbf8d532e,2 +np.float32,0xbf59640a,0xbfa0695a,2 +np.float32,0xbf2ad562,0xbf4e4f03,2 +np.float32,0x3e317d98,0x3e334d03,2 +np.float32,0xbf6a5b71,0xbfc7b5a2,2 +np.float32,0x3e87b434,0x3e8b05cf,2 +np.float32,0xbf1c344c,0xbf358dee,2 +np.float32,0x3e449428,0x3e470c65,2 +np.float32,0xbf2c0f2f,0xbf508808,2 +np.float32,0xbec5b5ac,0xbed0859c,2 +np.float32,0xbf4aa956,0xbf89b4b1,2 +np.float32,0x3f6dd374,0x3fd35717,2 +np.float32,0x3f45f76c,0x3f83a5ef,2 +np.float32,0xbed1fba8,0xbedf1bd5,2 +np.float32,0xbd26b2d0,0xbd26ca66,2 +np.float32,0xbe9817c2,0xbe9cd1c3,2 +np.float32,0x3e725988,0x3e770875,2 +np.float32,0xbf1a8ded,0xbf32f132,2 +np.float32,0xbe695860,0xbe6d83d3,2 +np.float32,0x3d8cecd0,0x3d8d25ea,2 +np.float32,0x3f574706,0x3f9cb6ec,2 +np.float32,0xbf5c5a1f,0xbfa5eaf3,2 +np.float32,0x3e7a7c88,0x3e7fab83,2 +np.float32,0xff800000,0xffc00000,2 +np.float32,0x3f66396a,0x3fbbfbb0,2 +np.float32,0x3ed6e588,0x3ee50b53,2 +np.float32,0xbb56d500,0xbb56d532,2 +np.float32,0x3ebd23fc,0x3ec6869a,2 +np.float32,0xbf70d490,0xbfdf4af5,2 +np.float32,0x3e514f88,0x3e544d15,2 +np.float32,0x3e660f98,0x3e6a0dac,2 +np.float32,0xbf034da1,0xbf1110bb,2 +np.float32,0xbf60d9be,0xbfaf2714,2 +np.float32,0x3df67b10,0x3df7ae64,2 +np.float32,0xbeeedc0a,0xbf017010,2 +np.float32,0xbe149224,0xbe15a072,2 +np.float32,0x3f455084,0x3f82d759,2 +np.float32,0x3f210f9e,0x3f3d7093,2 +np.float32,0xbeaea3e0,0xbeb5edd3,2 +np.float32,0x3e0724b0,0x3e07efad,2 +np.float32,0x3f09a784,0x3f19d6ac,2 +np.float32,0xbf044340,0xbf125ee8,2 +np.float32,0xbf71adc9,0xbfe315fe,2 +np.float32,0x3efd3870,0x3f0ac6a8,2 +np.float32,0xbf53c7a6,0xbf96f6df,2 +np.float32,0xbf3cf784,0xbf7247af,2 +np.float32,0x3e0ce9e0,0x3e0dd035,2 +np.float32,0xbd3051a0,0xbd306d89,2 +np.float32,0x3ecab804,0x3ed66f77,2 +np.float32,0x3e984350,0x3e9d0189,2 +np.float32,0x3edd1c00,0x3eeca20b,2 +np.float32,0xbe8e22a0,0xbe91f71b,2 +np.float32,0x3ebebc18,0x3ec85fd6,2 +np.float32,0xba275c00,0xba275c01,2 +np.float32,0x3f1d8190,0x3f37a385,2 +np.float32,0x3f17343e,0x3f2dbbfe,2 +np.float32,0x3caa8000,0x3caa864e,2 +np.float32,0x3e7a7308,0x3e7fa168,2 +np.float32,0x3f7359a6,0x3feb3e1a,2 +np.float32,0xbf7ad15a,0xc012a743,2 +np.float32,0xbf122efb,0xbf262812,2 +np.float32,0xbf03ba04,0xbf11a3fa,2 +np.float32,0x3ed7a90c,0x3ee5f8d4,2 +np.float32,0xbe23e318,0xbe254eed,2 +np.float32,0xbe2866f4,0xbe29f20a,2 +np.float32,0xbeaedff2,0xbeb631d0,2 +np.float32,0x0,0x0,2 +np.float32,0x3ef2a034,0x3f03dafd,2 +np.float32,0x3f35806c,0x3f62994e,2 +np.float32,0xbf655e19,0xbfb9c718,2 +np.float32,0x3f5d54ce,0x3fa7d4f4,2 +np.float32,0x3f33e64a,0x3f5f67e3,2 +np.float32,0x3ebf4010,0x3ec8f923,2 +np.float32,0xbe050dc8,0xbe05cf70,2 +np.float32,0x3f61693e,0x3fb063b0,2 +np.float32,0xbd94ac00,0xbd94ef12,2 +np.float32,0x3e9de008,0x3ea32f61,2 +np.float32,0xbe3d042c,0xbe3f3540,2 +np.float32,0x3e8fdfc0,0x3e93d9e4,2 +np.float32,0x3f28bc48,0x3f4a9019,2 +np.float32,0x3edea928,0x3eee8b09,2 +np.float32,0xbf05f673,0xbf14b362,2 +np.float32,0xbf360730,0xbf63a914,2 +np.float32,0xbe3fb454,0xbe41fe0a,2 +np.float32,0x3f6d99a8,0x3fd28552,2 +np.float32,0xbf3ae866,0xbf6dd052,2 +np.float32,0x3f5b1164,0x3fa37aec,2 +np.float32,0xbf64a451,0xbfb7f61b,2 +np.float32,0xbdd79bd0,0xbdd86919,2 +np.float32,0x3e89fc00,0x3e8d7a85,2 +np.float32,0x3f4bf690,0x3f8b77ea,2 +np.float32,0x3cbdf280,0x3cbdfb38,2 +np.float32,0x3f138f98,0x3f2835b4,2 +np.float32,0xbe33967c,0xbe3576bc,2 +np.float32,0xbf298164,0xbf4bedda,2 +np.float32,0x3e9955cc,0x3e9e2edb,2 +np.float32,0xbf79b383,0xc00c56c0,2 +np.float32,0x3ea0834c,0x3ea61aea,2 +np.float32,0xbf511184,0xbf92c89a,2 +np.float32,0x3f4d9fba,0x3f8dc666,2 +np.float32,0x3f3387c2,0x3f5ead80,2 +np.float32,0x3e3f7360,0x3e41babb,2 +np.float32,0xbf3cc4d6,0xbf71d879,2 +np.float32,0x3f2e4402,0x3f54994e,2 +np.float32,0x3e6a7118,0x3e6eabff,2 +np.float32,0xbf05d83e,0xbf1489cc,2 +np.float32,0xbdce4fd8,0xbdcf039a,2 +np.float32,0xbf03e2f4,0xbf11dbaf,2 +np.float32,0x3f1ea0a0,0x3f397375,2 +np.float32,0x3f7aff54,0x4013cb1b,2 +np.float32,0x3f5ef158,0x3fab1801,2 +np.float32,0xbe33bcc8,0xbe359e40,2 +np.float32,0xbf04dd0e,0xbf133111,2 +np.float32,0xbf14f887,0xbf2a54d1,2 +np.float32,0x3f75c37a,0x3ff9196e,2 +np.float32,0x3f35c3c8,0x3f6320f2,2 +np.float32,0x3f53bb94,0x3f96e3c3,2 +np.float32,0x3f4d473e,0x3f8d4a19,2 +np.float32,0xbdfe19e0,0xbdff6ac9,2 +np.float32,0xbf7f0cc4,0xc049342d,2 +np.float32,0xbdbfc778,0xbdc057bb,2 +np.float32,0xbf7575b7,0xbff73067,2 +np.float32,0xbe9df488,0xbea34609,2 +np.float32,0xbefbd3c6,0xbf09daff,2 +np.float32,0x3f19962c,0x3f316cbd,2 +np.float32,0x3f7acec6,0x40129732,2 +np.float32,0xbf5db7de,0xbfa89a21,2 +np.float32,0x3f62f444,0x3fb3e830,2 +np.float32,0xbf522adb,0xbf94737f,2 +np.float32,0xbef6ceb2,0xbf0690ba,2 +np.float32,0xbf57c41e,0xbf9d8db0,2 +np.float32,0x3eb3360c,0x3ebb1eb0,2 +np.float32,0x3f29327e,0x3f4b618e,2 +np.float32,0xbf08d099,0xbf18a916,2 +np.float32,0x3ea21014,0x3ea7d369,2 +np.float32,0x3f39e516,0x3f6ba861,2 +np.float32,0x3e7c4f28,0x3e80ce08,2 +np.float32,0xbec5a7f8,0xbed07582,2 +np.float32,0xbf0b1b46,0xbf1be3e7,2 +np.float32,0xbef0e0ec,0xbf02bb2e,2 +np.float32,0x3d835a30,0x3d838869,2 +np.float32,0x3f08aa40,0x3f18736e,2 +np.float32,0x3eb0e4c8,0x3eb87bcd,2 +np.float32,0x3eb3821c,0x3ebb7564,2 +np.float32,0xbe3a7320,0xbe3c8d5a,2 +np.float32,0x3e43f8c0,0x3e466b10,2 +np.float32,0x3e914288,0x3e955b69,2 +np.float32,0x3ec7d800,0x3ed308e7,2 +np.float32,0x3e603df8,0x3e63eef2,2 +np.float32,0x3f225cac,0x3f3f9ac6,2 +np.float32,0x3e3db8f0,0x3e3ff06b,2 +np.float32,0x3f358d78,0x3f62b38c,2 +np.float32,0xbed9bd64,0xbee88158,2 +np.float32,0x800000,0x800000,2 +np.float32,0x3f1adfce,0x3f337230,2 +np.float32,0xbefdc346,0xbf0b229d,2 +np.float32,0xbf091018,0xbf190208,2 +np.float32,0xbf800000,0xff800000,2 +np.float32,0x3f27c2c4,0x3f48d8db,2 +np.float32,0x3ef59c80,0x3f05c993,2 +np.float32,0x3e18a340,0x3e19c893,2 +np.float32,0x3f209610,0x3f3ca7c5,2 +np.float32,0x3f69cc22,0x3fc60087,2 +np.float32,0xbf66cf07,0xbfbd8721,2 +np.float32,0xbf768098,0xbffdfcc4,2 +np.float32,0x3df27a40,0x3df39ec4,2 +np.float32,0x3daf5bd0,0x3dafca02,2 +np.float32,0x3f53f2be,0x3f973b41,2 +np.float32,0xbf7edcbc,0xc0436ce3,2 +np.float32,0xbdf61db8,0xbdf74fae,2 +np.float32,0x3e2c9328,0x3e2e3cb2,2 +np.float32,0x3f1a4570,0x3f327f41,2 +np.float32,0xbf766306,0xbffd32f1,2 +np.float32,0xbf468b9d,0xbf845f0f,2 +np.float32,0x3e398970,0x3e3b9bb1,2 +np.float32,0xbbefa900,0xbbefaa18,2 +np.float32,0xbf54c989,0xbf9893ad,2 +np.float32,0x3f262cf6,0x3f46169d,2 +np.float32,0x3f638a8a,0x3fb54a98,2 +np.float32,0xbeb36c78,0xbebb5cb8,2 +np.float32,0xbeac4d42,0xbeb34993,2 +np.float32,0x3f1d1942,0x3f36fbf2,2 +np.float32,0xbf5d49ba,0xbfa7bf07,2 +np.float32,0xbf182b5c,0xbf2f38d0,2 +np.float32,0x3f41a742,0x3f7ce5ef,2 +np.float32,0x3f0b9a6c,0x3f1c9898,2 +np.float32,0x3e847494,0x3e8788f3,2 +np.float32,0xbde41608,0xbde50941,2 +np.float32,0x3f693944,0x3fc44b5a,2 +np.float32,0x3f0386b2,0x3f115e37,2 +np.float32,0x3f3a08b0,0x3f6bf3c1,2 +np.float32,0xbf78ee64,0xc0089977,2 +np.float32,0xbf013a11,0xbf0e436e,2 +np.float32,0x3f00668e,0x3f0d2836,2 +np.float32,0x3e6d9850,0x3e720081,2 +np.float32,0x3eacf578,0x3eb4075d,2 +np.float32,0x3f18aef8,0x3f3004b4,2 +np.float32,0x3de342f0,0x3de43385,2 +np.float32,0x3e56cee8,0x3e5a0b85,2 +np.float32,0xbf287912,0xbf4a1966,2 +np.float32,0x3e92c948,0x3e9704c2,2 +np.float32,0x3c07d080,0x3c07d14c,2 +np.float32,0xbe90f6a0,0xbe9508e0,2 +np.float32,0x3e8b4f28,0x3e8ee884,2 +np.float32,0xbf35b56c,0xbf6303ff,2 +np.float32,0xbef512b8,0xbf057027,2 +np.float32,0x3e36c630,0x3e38c0cd,2 +np.float32,0x3f0b3ca8,0x3f1c134a,2 +np.float32,0x3e4cd610,0x3e4fa2c5,2 +np.float32,0xbf5a8372,0xbfa273a3,2 +np.float32,0xbecaad3c,0xbed662ae,2 +np.float32,0xbec372d2,0xbecddeac,2 +np.float32,0x3f6fb2b2,0x3fda8a22,2 +np.float32,0x3f365f28,0x3f645b5a,2 +np.float32,0xbecd00fa,0xbed926a4,2 +np.float32,0xbebafa32,0xbec40672,2 +np.float32,0xbf235b73,0xbf4146c4,2 +np.float32,0x3f7a4658,0x400f6e2c,2 +np.float32,0x3f35e824,0x3f636a54,2 +np.float32,0x3cb87640,0x3cb87e3c,2 +np.float32,0xbf296288,0xbf4bb6ee,2 +np.float32,0x7f800000,0xffc00000,2 +np.float32,0xbf4de86e,0xbf8e2d1a,2 +np.float32,0xbf4ace12,0xbf89e5f3,2 +np.float32,0x3d65a300,0x3d65e0b5,2 +np.float32,0xbe10c534,0xbe11bf21,2 +np.float32,0xbeba3c1c,0xbec32b3e,2 +np.float32,0x3e87eaf8,0x3e8b40b8,2 +np.float32,0x3d5c3bc0,0x3d5c722d,2 +np.float32,0x3e8c14b8,0x3e8fbdf8,2 +np.float32,0xbf06c6f0,0xbf15d327,2 +np.float32,0xbe0f1e30,0xbe100f96,2 +np.float32,0xbee244b0,0xbef30251,2 +np.float32,0x3f2a21b0,0x3f4d0c1d,2 +np.float32,0xbf5f7f81,0xbfac408e,2 +np.float32,0xbe3dba2c,0xbe3ff1b2,2 +np.float32,0x3f3ffc22,0x3f790abf,2 +np.float32,0x3edc3dac,0x3eeb90fd,2 +np.float32,0x7f7fffff,0xffc00000,2 +np.float32,0x3ecfaaac,0x3edc5485,2 +np.float32,0x3f0affbe,0x3f1bbcd9,2 +np.float32,0x3f5f2264,0x3fab7dca,2 +np.float32,0x3f37394c,0x3f66186c,2 +np.float32,0xbe6b2f6c,0xbe6f74e3,2 +np.float32,0x3f284772,0x3f49c1f1,2 +np.float32,0xbdf27bc8,0xbdf3a051,2 +np.float32,0xbc8b14e0,0xbc8b184c,2 +np.float32,0x3f6a867c,0x3fc83b07,2 +np.float32,0x3f1ec876,0x3f39b429,2 +np.float32,0x3f6fd9a8,0x3fdb28d6,2 +np.float32,0xbf473cca,0xbf853e8c,2 +np.float32,0x3e23eff8,0x3e255c23,2 +np.float32,0x3ebefdfc,0x3ec8ac5d,2 +np.float32,0x3f6c8c22,0x3fced2b1,2 +np.float32,0x3f168388,0x3f2cad44,2 +np.float32,0xbece2410,0xbeda81ac,2 +np.float32,0x3f5532f0,0x3f993eea,2 +np.float32,0x3ef1938c,0x3f032dfa,2 +np.float32,0xbef05268,0xbf025fba,2 +np.float32,0x3f552e4a,0x3f993754,2 +np.float32,0x3e9ed068,0x3ea4392d,2 +np.float32,0xbe1a0c24,0xbe1b39be,2 +np.float32,0xbf2623aa,0xbf46068c,2 +np.float32,0xbe1cc300,0xbe1e00fc,2 +np.float32,0xbe9c0576,0xbea12397,2 +np.float32,0xbd827338,0xbd82a07e,2 +np.float32,0x3f0fc31a,0x3f229786,2 +np.float32,0x3e577810,0x3e5abc7d,2 +np.float32,0x3e0e1cb8,0x3e0f0906,2 +np.float32,0x3e84d344,0x3e87ee73,2 +np.float32,0xbf39c45e,0xbf6b6337,2 +np.float32,0x3edfb25c,0x3eefd273,2 +np.float32,0x3e016398,0x3e021596,2 +np.float32,0xbefeb1be,0xbf0bc0de,2 +np.float32,0x3f37e104,0x3f677196,2 +np.float32,0x3f545316,0x3f97d500,2 +np.float32,0xbefc165a,0xbf0a06ed,2 +np.float32,0xbf0923e6,0xbf191dcd,2 +np.float32,0xbf386508,0xbf68831f,2 +np.float32,0xbf3d4630,0xbf72f4e1,2 +np.float32,0x3f3dbe82,0x3f73ff13,2 +np.float32,0xbf703de4,0xbfdcc7e2,2 +np.float32,0xbf531482,0xbf95dd1a,2 +np.float32,0xbf0af1b6,0xbf1ba8f4,2 +np.float32,0xbec8fd9c,0xbed463a4,2 +np.float32,0xbe230320,0xbe24691a,2 +np.float32,0xbf7de541,0xc02faf38,2 +np.float32,0x3efd2360,0x3f0ab8b7,2 +np.float32,0x3db7f350,0x3db87291,2 +np.float32,0x3e74c510,0x3e799924,2 +np.float32,0x3da549c0,0x3da5a5fc,2 +np.float32,0x3e8a3bc4,0x3e8dbf4a,2 +np.float32,0xbf69f086,0xbfc66e84,2 +np.float32,0x3f323f8e,0x3f5c2c17,2 +np.float32,0x3ec0ae3c,0x3ecaa334,2 +np.float32,0xbebe8966,0xbec824fc,2 +np.float32,0x3f34691e,0x3f606b13,2 +np.float32,0x3f13790e,0x3f2813f5,2 +np.float32,0xbf61c027,0xbfb12618,2 +np.float32,0x3e90c690,0x3e94d4a1,2 +np.float32,0xbefce8f0,0xbf0a920e,2 +np.float32,0xbf5c0e8a,0xbfa559a7,2 +np.float32,0x3f374f60,0x3f6645b6,2 +np.float32,0x3f25f6fa,0x3f45b967,2 +np.float32,0x3f2421aa,0x3f42963a,2 +np.float32,0x3ebfa328,0x3ec96c57,2 +np.float32,0x3e3bef28,0x3e3e1685,2 +np.float32,0x3ea3fa3c,0x3ea9f4dd,2 +np.float32,0x3f362b8e,0x3f63f2b2,2 +np.float32,0xbedcef18,0xbeec6ada,2 +np.float32,0xbdd29c88,0xbdd35bd0,2 +np.float32,0x3f261aea,0x3f45f76f,2 +np.float32,0xbe62c470,0xbe66965e,2 +np.float32,0x7fc00000,0x7fc00000,2 +np.float32,0xbee991aa,0xbefc277b,2 +np.float32,0xbf571960,0xbf9c6923,2 +np.float32,0xbe6fb410,0xbe743b41,2 +np.float32,0x3eb1bed0,0x3eb9738d,2 +np.float32,0x80000000,0x80000000,2 +np.float32,0x3eddcbe4,0x3eed7a69,2 +np.float32,0xbf2a81ba,0xbf4db86d,2 +np.float32,0x3f74da54,0x3ff38737,2 +np.float32,0xbeb6bff4,0xbebf29f4,2 +np.float32,0x3f445752,0x3f81a698,2 +np.float32,0x3ed081b4,0x3edd5618,2 +np.float32,0xbee73802,0xbef931b4,2 +np.float32,0xbd13f2a0,0xbd14031c,2 +np.float32,0xbb4d1200,0xbb4d122c,2 +np.float32,0xbee8777a,0xbefac393,2 +np.float32,0x3f42047c,0x3f7dc06c,2 +np.float32,0xbd089270,0xbd089f67,2 +np.float32,0xbf628c16,0xbfb2f66b,2 +np.float32,0x3e72e098,0x3e77978d,2 +np.float32,0x3ed967cc,0x3ee818e4,2 +np.float32,0x3e284c80,0x3e29d6d9,2 +np.float32,0x3f74e8ba,0x3ff3dbef,2 +np.float32,0x3f013e86,0x3f0e4969,2 +np.float32,0xbf610d4f,0xbfaf983c,2 +np.float32,0xbf3c8d36,0xbf715eba,2 +np.float32,0xbedbc756,0xbeeaffdb,2 +np.float32,0x3e143ec8,0x3e154b4c,2 +np.float32,0xbe1c9808,0xbe1dd4fc,2 +np.float32,0xbe887a1e,0xbe8bdac5,2 +np.float32,0xbe85c4bc,0xbe88f17a,2 +np.float32,0x3f35967e,0x3f62c5b4,2 +np.float32,0x3ea2c4a4,0x3ea89c2d,2 +np.float32,0xbc8703c0,0xbc8706e1,2 +np.float32,0xbf13d52c,0xbf289dff,2 +np.float32,0xbf63bb56,0xbfb5bf29,2 +np.float32,0xbf61c5ef,0xbfb13319,2 +np.float32,0xbf128410,0xbf26a675,2 +np.float32,0x3f03fcf2,0x3f11ff13,2 +np.float32,0xbe49c924,0xbe4c75cd,2 +np.float32,0xbf211a9c,0xbf3d82c5,2 +np.float32,0x3f7e9d52,0x403d1b42,2 +np.float32,0x3edfefd4,0x3ef01e71,2 +np.float32,0x3ebc5bd8,0x3ec59efb,2 +np.float32,0x3d7b02e0,0x3d7b537f,2 +np.float32,0xbf1163ba,0xbf24fb43,2 +np.float32,0x3f5072f2,0x3f91dbf1,2 +np.float32,0xbee700ce,0xbef8ec60,2 +np.float32,0x3f534168,0x3f962359,2 +np.float32,0x3e6d6c40,0x3e71d1ef,2 +np.float32,0x3def9d70,0x3df0b7a8,2 +np.float32,0x3e89cf80,0x3e8d4a8a,2 +np.float32,0xbf687ca7,0xbfc2290f,2 +np.float32,0x3f35e134,0x3f635c51,2 +np.float32,0x3e59eef8,0x3e5d50fa,2 +np.float32,0xbf65c9e1,0xbfbada61,2 +np.float32,0xbf759292,0xbff7e43d,2 +np.float32,0x3f4635a0,0x3f83f372,2 +np.float32,0x3f29baaa,0x3f4c53f1,2 +np.float32,0x3f6b15a6,0x3fc9fe04,2 +np.float32,0x3edabc88,0x3ee9b922,2 +np.float32,0x3ef382e0,0x3f046d4d,2 +np.float32,0xbe351310,0xbe36ff7f,2 +np.float32,0xbf05c935,0xbf14751c,2 +np.float32,0xbf0e7c50,0xbf20bc24,2 +np.float32,0xbf69bc94,0xbfc5d1b8,2 +np.float32,0xbed41aca,0xbee1aa23,2 +np.float32,0x3f518c08,0x3f938162,2 +np.float32,0xbf3d7974,0xbf73661a,2 +np.float32,0x3f1951a6,0x3f3101c9,2 +np.float32,0xbeb3f436,0xbebbf787,2 +np.float32,0xbf77a190,0xc0031d43,2 +np.float32,0x3eb5b3cc,0x3ebdf6e7,2 +np.float32,0xbed534b4,0xbee2fed2,2 +np.float32,0xbe53e1b8,0xbe56fc56,2 +np.float32,0x3f679e20,0x3fbfb91c,2 +np.float32,0xff7fffff,0xffc00000,2 +np.float32,0xbf7b9bcb,0xc0180073,2 +np.float32,0xbf5635e8,0xbf9aea15,2 +np.float32,0xbe5a3318,0xbe5d9856,2 +np.float32,0xbe003284,0xbe00df9a,2 +np.float32,0x3eb119a4,0x3eb8b7d6,2 +np.float32,0xbf3bccf8,0xbf6fbc84,2 +np.float32,0x3f36f600,0x3f658ea8,2 +np.float32,0x3f1ea834,0x3f397fc2,2 +np.float32,0xbe7cfb54,0xbe8129b3,2 +np.float32,0xbe9b3746,0xbea0406a,2 +np.float32,0x3edc0f90,0x3eeb586c,2 +np.float32,0x3e1842e8,0x3e19660c,2 +np.float32,0xbd8f10b0,0xbd8f4c70,2 +np.float32,0xbf064aca,0xbf1527a2,2 +np.float32,0x3e632e58,0x3e6705be,2 +np.float32,0xbef28ba4,0xbf03cdbb,2 +np.float32,0x3f27b21e,0x3f48bbaf,2 +np.float32,0xbe6f30d4,0xbe73b06e,2 +np.float32,0x3f3e6cb0,0x3f75834b,2 +np.float32,0xbf264aa5,0xbf4649f0,2 +np.float32,0xbf690775,0xbfc3b978,2 +np.float32,0xbf3e4a38,0xbf753632,2 +np.float64,0x3fe12bbe8c62577e,0x3fe32de8e5f961b0,1 +np.float64,0x3fc9b8909b337120,0x3fca1366da00efff,1 +np.float64,0x3feaee4245f5dc84,0x3ff3a011ea0432f3,1 +np.float64,0xbfe892c000f12580,0xbff03e5adaed6f0c,1 +np.float64,0xbf9be8de4837d1c0,0xbf9beaa367756bd1,1 +np.float64,0x3fe632e58fec65cc,0x3feb5ccc5114ca38,1 +np.float64,0x3fe78a0ef7ef141e,0x3fee1b4521d8eb6c,1 +np.float64,0x3feec27a65fd84f4,0x3fff643c8318e81e,1 +np.float64,0x3fbed6efce3dade0,0x3fbefd76cff00111,1 +np.float64,0xbfe3a05fab6740c0,0xbfe6db078aeeb0ca,1 +np.float64,0x3fdca11a56b94234,0x3fdece9e6eacff1b,1 +np.float64,0x3fe0fb15aae1f62c,0x3fe2e9e095ec2089,1 +np.float64,0x3fede12abf7bc256,0x3ffafd0ff4142807,1 +np.float64,0x3feb919edcf7233e,0x3ff4c9aa0bc2432f,1 +np.float64,0x3fd39633b5a72c68,0x3fd43c2e6d5f441c,1 +np.float64,0x3fd9efcbfeb3df98,0x3fdb83f03e58f91c,1 +np.float64,0x3fe2867a36650cf4,0x3fe525858c8ce72e,1 +np.float64,0x3fdacbb8f3b59770,0x3fdc8cd431b6e3ff,1 +np.float64,0x3fcc120503382408,0x3fcc88a8fa43e1c6,1 +np.float64,0xbfd99ff4eab33fea,0xbfdb24a20ae3687d,1 +np.float64,0xbfe8caf0157195e0,0xbff083b8dd0941d3,1 +np.float64,0x3fddc9bf92bb9380,0x3fe022aac0f761d5,1 +np.float64,0x3fe2dbb66e65b76c,0x3fe5a6e7caf3f1f2,1 +np.float64,0x3fe95f5c4a72beb8,0x3ff1444697e96138,1 +np.float64,0xbfc6b163d92d62c8,0xbfc6ef6e006658a1,1 +np.float64,0x3fdf1b2616be364c,0x3fe0fcbd2848c9e8,1 +np.float64,0xbfdca1ccf7b9439a,0xbfdecf7dc0eaa663,1 +np.float64,0x3fe078d6a260f1ae,0x3fe236a7c66ef6c2,1 +np.float64,0x3fdf471bb9be8e38,0x3fe11990ec74e704,1 +np.float64,0xbfe417626be82ec5,0xbfe79c9aa5ed2e2f,1 +np.float64,0xbfeb9cf5677739eb,0xbff4dfc24c012c90,1 +np.float64,0x3f8d9142b03b2280,0x3f8d91c9559d4779,1 +np.float64,0x3fb052c67220a590,0x3fb05873c90d1cd6,1 +np.float64,0x3fd742e2c7ae85c4,0x3fd860128947d15d,1 +np.float64,0x3fec2e2a2bf85c54,0x3ff60eb554bb8d71,1 +np.float64,0xbfeb2b8bc8f65718,0xbff40b734679497a,1 +np.float64,0x3fe25f8e0d64bf1c,0x3fe4eb381d077803,1 +np.float64,0x3fe56426256ac84c,0x3fe9dafbe79370f0,1 +np.float64,0x3feecc1e5d7d983c,0x3fffa49bedc7aa25,1 +np.float64,0xbfc88ce94b3119d4,0xbfc8dbba0fdee2d2,1 +np.float64,0xbfabcf51ac379ea0,0xbfabd6552aa63da3,1 +np.float64,0xbfccc8b849399170,0xbfcd48d6ff057a4d,1 +np.float64,0x3fd2f831e8a5f064,0x3fd38e67b0dda905,1 +np.float64,0x3fcafdcd6135fb98,0x3fcb670ae2ef4d36,1 +np.float64,0x3feda6042efb4c08,0x3ffa219442ac4ea5,1 +np.float64,0x3fed382b157a7056,0x3ff8bc01bc6d10bc,1 +np.float64,0x3fed858a50fb0b14,0x3ff9b1c05cb6cc0f,1 +np.float64,0x3fcc3960653872c0,0x3fccb2045373a3d1,1 +np.float64,0xbfec5177e478a2f0,0xbff65eb4557d94eb,1 +np.float64,0x3feafe0d5e75fc1a,0x3ff3bb4a260a0dcb,1 +np.float64,0x3fe08bc87ee11790,0x3fe25078aac99d31,1 +np.float64,0xffefffffffffffff,0xfff8000000000000,1 +np.float64,0x3f79985ce0333100,0x3f799872b591d1cb,1 +np.float64,0xbfd4001cf9a8003a,0xbfd4b14b9035b94f,1 +np.float64,0x3fe54a17e6ea9430,0x3fe9ac0f18682343,1 +np.float64,0xbfb4e07fea29c100,0xbfb4ec6520dd0689,1 +np.float64,0xbfed2b6659fa56cd,0xbff895ed57dc1450,1 +np.float64,0xbfe81fc8b5f03f92,0xbfef6b95e72a7a7c,1 +np.float64,0xbfe6aced16ed59da,0xbfec4ce131ee3704,1 +np.float64,0xbfe599f30ceb33e6,0xbfea3d07c1cd78e2,1 +np.float64,0xbfe0ff278b61fe4f,0xbfe2ef8b5efa89ed,1 +np.float64,0xbfe3e9406467d281,0xbfe750e43e841736,1 +np.float64,0x3fcc6b52cf38d6a8,0x3fcce688f4fb2cf1,1 +np.float64,0xbfc890e8133121d0,0xbfc8dfdfee72d258,1 +np.float64,0x3fe46e81dbe8dd04,0x3fe82e09783811a8,1 +np.float64,0x3fd94455e5b288ac,0x3fdab7cef2de0b1f,1 +np.float64,0xbfe82151fff042a4,0xbfef6f254c9696ca,1 +np.float64,0x3fcee1ac1d3dc358,0x3fcf80a6ed07070a,1 +np.float64,0x3fcce8f90939d1f0,0x3fcd6ad18d34f8b5,1 +np.float64,0x3fd6afe56fad5fcc,0x3fd7b7567526b1fb,1 +np.float64,0x3fb1a77092234ee0,0x3fb1ae9fe0d176fc,1 +np.float64,0xbfeb758b0d76eb16,0xbff493d105652edc,1 +np.float64,0xbfb857c24e30af88,0xbfb86aa4da3be53f,1 +np.float64,0x3fe89064eff120ca,0x3ff03b7c5b3339a8,1 +np.float64,0xbfc1bd2fef237a60,0xbfc1da99893473ed,1 +np.float64,0xbfe5ad6e2eeb5adc,0xbfea60ed181b5c05,1 +np.float64,0x3fd5a66358ab4cc8,0x3fd6899e640aeb1f,1 +np.float64,0xbfe198e832e331d0,0xbfe3c8c9496d0de5,1 +np.float64,0xbfdaa5c0d7b54b82,0xbfdc5ed7d3c5ce49,1 +np.float64,0x3fcceccb6939d998,0x3fcd6ed88c2dd3a5,1 +np.float64,0xbfe44413eae88828,0xbfe7e6cd32b34046,1 +np.float64,0xbfc7cbeccf2f97d8,0xbfc8139a2626edae,1 +np.float64,0x3fbf31e4fa3e63d0,0x3fbf59c6e863255e,1 +np.float64,0x3fdf03fa05be07f4,0x3fe0ed953f7989ad,1 +np.float64,0x3fe7f4eaceefe9d6,0x3fef092ca7e2ac39,1 +np.float64,0xbfc084e9d92109d4,0xbfc09ca10fd6aaea,1 +np.float64,0xbf88cfbf70319f80,0xbf88d00effa6d897,1 +np.float64,0x7ff4000000000000,0x7ffc000000000000,1 +np.float64,0xbfa0176e9c202ee0,0xbfa018ca0a6ceef3,1 +np.float64,0xbfd88d0815b11a10,0xbfd9dfc6c6bcbe4e,1 +np.float64,0x3fe89f7730713eee,0x3ff04de52fb536f3,1 +np.float64,0xbfedc9707bfb92e1,0xbffaa25fcf9dd6da,1 +np.float64,0x3fe936d1a6726da4,0x3ff10e40c2d94bc9,1 +np.float64,0x3fdb64aec7b6c95c,0x3fdd473177317b3f,1 +np.float64,0xbfee4f9aaefc9f35,0xbffcdd212667003c,1 +np.float64,0x3fe3730067e6e600,0x3fe692b0a0babf5f,1 +np.float64,0xbfc257e58924afcc,0xbfc27871f8c218d7,1 +np.float64,0x3fe62db12dec5b62,0x3feb52c61b97d9f6,1 +np.float64,0xbfe3ff491367fe92,0xbfe774f1b3a96fd6,1 +np.float64,0x3fea43255274864a,0x3ff28b0c4b7b8d21,1 +np.float64,0xbfea37923c746f24,0xbff27962159f2072,1 +np.float64,0x3fcd0ac3c73a1588,0x3fcd8e6f8de41755,1 +np.float64,0xbfdccafde6b995fc,0xbfdf030fea8a0630,1 +np.float64,0x3fdba35268b746a4,0x3fdd94094f6f50c1,1 +np.float64,0x3fc68ea1d92d1d40,0x3fc6cb8d07cbb0e4,1 +np.float64,0xbfb88b1f6e311640,0xbfb89e7af4e58778,1 +np.float64,0xbfedc7cadffb8f96,0xbffa9c3766227956,1 +np.float64,0x3fe7928d3eef251a,0x3fee2dcf2ac7961b,1 +np.float64,0xbfeff42ede7fe85e,0xc00cef6b0f1e8323,1 +np.float64,0xbfebf07fa477e0ff,0xbff5893f99e15236,1 +np.float64,0x3fe3002ab9660056,0x3fe5defba550c583,1 +np.float64,0x3feb8f4307f71e86,0x3ff4c517ec8d6de9,1 +np.float64,0x3fd3c16f49a782e0,0x3fd46becaacf74da,1 +np.float64,0x3fc7613df12ec278,0x3fc7a52b2a3c3368,1 +np.float64,0xbfe33af560e675eb,0xbfe63a6528ff1587,1 +np.float64,0xbfde86495abd0c92,0xbfe09bd7ba05b461,1 +np.float64,0x3fe1e7fb4ee3cff6,0x3fe43b04311c0ab6,1 +np.float64,0xbfc528b6bd2a516c,0xbfc55ae0a0c184c8,1 +np.float64,0xbfd81025beb0204c,0xbfd94dd72d804613,1 +np.float64,0x10000000000000,0x10000000000000,1 +np.float64,0x3fc1151c47222a38,0x3fc12f5aad80a6bf,1 +np.float64,0x3feafa136775f426,0x3ff3b46854da0b3a,1 +np.float64,0x3fed2da0747a5b40,0x3ff89c85b658459e,1 +np.float64,0x3fda2a4b51b45498,0x3fdbca0d908ddbbd,1 +np.float64,0xbfd04cf518a099ea,0xbfd0aae0033b9e4c,1 +np.float64,0xbfb9065586320ca8,0xbfb91adb7e31f322,1 +np.float64,0xbfd830b428b06168,0xbfd973ca3c484d8d,1 +np.float64,0x3fc952f7ed32a5f0,0x3fc9a9994561fc1a,1 +np.float64,0xbfeb06c83c760d90,0xbff3ca77b326df20,1 +np.float64,0xbfeb1c98ac763931,0xbff3f0d0900f6149,1 +np.float64,0x3fdf061dbebe0c3c,0x3fe0eefb32b48d17,1 +np.float64,0xbf9acbaf28359760,0xbf9acd4024be9fec,1 +np.float64,0x3fec0adde2f815bc,0x3ff5c1628423794d,1 +np.float64,0xbfc4bc750d2978ec,0xbfc4eba43f590b94,1 +np.float64,0x3fdbe47878b7c8f0,0x3fdde44a2b500d73,1 +np.float64,0x3fe160d18162c1a4,0x3fe378cff08f18f0,1 +np.float64,0x3fc3b58dfd276b18,0x3fc3de01d3802de9,1 +np.float64,0x3fa860343430c060,0x3fa864ecd07ec962,1 +np.float64,0x3fcaebfb4b35d7f8,0x3fcb546512d1b4c7,1 +np.float64,0x3fe3fda558e7fb4a,0x3fe772412e5776de,1 +np.float64,0xbfe8169f2c702d3e,0xbfef5666c9a10f6d,1 +np.float64,0x3feda78e9efb4f1e,0x3ffa270712ded769,1 +np.float64,0xbfda483161b49062,0xbfdbedfbf2e850ba,1 +np.float64,0x3fd7407cf3ae80f8,0x3fd85d4f52622743,1 +np.float64,0xbfd63de4d4ac7bca,0xbfd73550a33e3c32,1 +np.float64,0xbfd9c30b90b38618,0xbfdb4e7695c856f3,1 +np.float64,0x3fcd70c00b3ae180,0x3fcdfa0969e0a119,1 +np.float64,0x3feb4f127f769e24,0x3ff44bf42514e0f4,1 +np.float64,0xbfec1db44af83b69,0xbff5ea54aed1f8e9,1 +np.float64,0x3fd68ff051ad1fe0,0x3fd792d0ed6d6122,1 +np.float64,0x3fe0a048a5614092,0x3fe26c80a826b2a2,1 +np.float64,0x3fd59f3742ab3e70,0x3fd6818563fcaf80,1 +np.float64,0x3fca26ecf9344dd8,0x3fca867ceb5d7ba8,1 +np.float64,0x3fdc1d547ab83aa8,0x3fde2a9cea866484,1 +np.float64,0xbfc78df6312f1bec,0xbfc7d3719b698a39,1 +np.float64,0x3fe754e72b6ea9ce,0x3feda89ea844a2e5,1 +np.float64,0x3fe740c1a4ee8184,0x3fed7dc56ec0c425,1 +np.float64,0x3fe77566a9eeeace,0x3fedee6f408df6de,1 +np.float64,0xbfbbf5bf8e37eb80,0xbfbc126a223781b4,1 +np.float64,0xbfe0acb297615965,0xbfe27d86681ca2b5,1 +np.float64,0xbfc20a0487241408,0xbfc228f5f7d52ce8,1 +np.float64,0xfff0000000000000,0xfff8000000000000,1 +np.float64,0x3fef98a4dbff314a,0x40043cfb60bd46fa,1 +np.float64,0x3fd059102ca0b220,0x3fd0b7d2be6d7822,1 +np.float64,0x3fe89f18a1f13e32,0x3ff04d714bbbf400,1 +np.float64,0x3fd45b6275a8b6c4,0x3fd516a44a276a4b,1 +np.float64,0xbfe04463e86088c8,0xbfe1ef9dfc9f9a53,1 +np.float64,0xbfe086e279610dc5,0xbfe249c9c1040a13,1 +np.float64,0x3f89c9b110339380,0x3f89ca0a641454b5,1 +np.float64,0xbfb5f5b4322beb68,0xbfb6038dc3fd1516,1 +np.float64,0x3fe6eae76f6dd5ce,0x3feccabae04d5c14,1 +np.float64,0x3fa9ef6c9c33dee0,0x3fa9f51c9a8c8a2f,1 +np.float64,0xbfe171b45f62e368,0xbfe390ccc4c01bf6,1 +np.float64,0x3fb2999442253330,0x3fb2a1fc006804b5,1 +np.float64,0x3fd124bf04a24980,0x3fd1927abb92472d,1 +np.float64,0xbfe6e05938edc0b2,0xbfecb519ba78114f,1 +np.float64,0x3fed466ee6fa8cde,0x3ff8e75405b50490,1 +np.float64,0xbfb999aa92333358,0xbfb9afa4f19f80a2,1 +np.float64,0xbfe98969ed7312d4,0xbff17d887b0303e7,1 +np.float64,0x3fe782843e6f0508,0x3fee0adbeebe3486,1 +np.float64,0xbfe232fcc26465fa,0xbfe4a90a68d46040,1 +np.float64,0x3fd190a90fa32154,0x3fd206f56ffcdca2,1 +np.float64,0xbfc4f8b75929f170,0xbfc5298b2d4e7740,1 +np.float64,0xbfba3a63d63474c8,0xbfba520835c2fdc2,1 +np.float64,0xbfb7708eea2ee120,0xbfb781695ec17846,1 +np.float64,0x3fed9fb7a5fb3f70,0x3ffa0b717bcd1609,1 +np.float64,0xbfc1b158cd2362b0,0xbfc1ce87345f3473,1 +np.float64,0x3f963478082c6900,0x3f96355c3000953b,1 +np.float64,0x3fc5050e532a0a20,0x3fc536397f38f616,1 +np.float64,0x3fe239f9eee473f4,0x3fe4b360da3b2faa,1 +np.float64,0xbfd66bd80eacd7b0,0xbfd769a29fd784c0,1 +np.float64,0x3fc57cdad52af9b8,0x3fc5b16b937f5f72,1 +np.float64,0xbfd3c36a0aa786d4,0xbfd46e1cd0b4eddc,1 +np.float64,0x3feff433487fe866,0x400cf0ea1def3161,1 +np.float64,0xbfed5577807aaaef,0xbff915e8f6bfdf22,1 +np.float64,0xbfca0dd3eb341ba8,0xbfca6c4d11836cb6,1 +np.float64,0x7ff8000000000000,0x7ff8000000000000,1 +np.float64,0xbf974deaa82e9be0,0xbf974ef26a3130d1,1 +np.float64,0xbfe7f425e1efe84c,0xbfef076cb00d649d,1 +np.float64,0xbfe4413605e8826c,0xbfe7e20448b8a4b1,1 +np.float64,0xbfdfad202cbf5a40,0xbfe15cd9eb2be707,1 +np.float64,0xbfe43261ee6864c4,0xbfe7c952c951fe33,1 +np.float64,0xbfec141225782824,0xbff5d54d33861d98,1 +np.float64,0x3fd0f47abaa1e8f4,0x3fd15e8691a7f1c2,1 +np.float64,0x3fd378f0baa6f1e0,0x3fd41bea4a599081,1 +np.float64,0xbfb52523462a4a48,0xbfb5317fa7f436e2,1 +np.float64,0x3fcb30797d3660f0,0x3fcb9c174ea401ff,1 +np.float64,0xbfd48480dea90902,0xbfd5446e02c8b329,1 +np.float64,0xbfee4ae3ab7c95c7,0xbffcc650340ba274,1 +np.float64,0xbfeab086d075610e,0xbff3387f4e83ae26,1 +np.float64,0x3fa17cddf422f9c0,0x3fa17e9bf1b25736,1 +np.float64,0xbfe3064536e60c8a,0xbfe5e86aa5244319,1 +np.float64,0x3feb2882c5765106,0x3ff40604c7d97d44,1 +np.float64,0xbfa6923ff42d2480,0xbfa695ff57b2fc3f,1 +np.float64,0xbfa8bdbdcc317b80,0xbfa8c2ada0d94aa7,1 +np.float64,0x3fe7f16b8e6fe2d8,0x3fef013948c391a6,1 +np.float64,0x3fe4e7169f69ce2e,0x3fe8fceef835050a,1 +np.float64,0x3fed877638fb0eec,0x3ff9b83694127959,1 +np.float64,0xbfe0cc9ecf61993e,0xbfe2a978234cbde5,1 +np.float64,0xbfe977e79672efcf,0xbff16589ea494a38,1 +np.float64,0xbfe240130ae48026,0xbfe4bc69113e0d7f,1 +np.float64,0x3feb1e9b70763d36,0x3ff3f4615938a491,1 +np.float64,0xbfdf197dfcbe32fc,0xbfe0fba78a0fc816,1 +np.float64,0xbfee0f8543fc1f0a,0xbffbb9d9a4ee5387,1 +np.float64,0x3fe88d2191f11a44,0x3ff037843b5b6313,1 +np.float64,0xbfd11bb850a23770,0xbfd188c1cef40007,1 +np.float64,0xbfa1b36e9c2366e0,0xbfa1b53d1d8a8bc4,1 +np.float64,0xbfea2d70d9f45ae2,0xbff26a0629e36b3e,1 +np.float64,0xbfd9188703b2310e,0xbfda83f9ddc18348,1 +np.float64,0xbfee194894fc3291,0xbffbe3c83b61e7cb,1 +np.float64,0xbfe093b4a9e1276a,0xbfe25b4ad6f8f83d,1 +np.float64,0x3fea031489f4062a,0x3ff22accc000082e,1 +np.float64,0xbfc6c0827b2d8104,0xbfc6ff0a94326381,1 +np.float64,0x3fef5cd340feb9a6,0x4002659c5a1b34af,1 +np.float64,0x8010000000000000,0x8010000000000000,1 +np.float64,0x3fd97cb533b2f96c,0x3fdafab28aaae8e3,1 +np.float64,0x3fe2123334642466,0x3fe478bd83a8ce02,1 +np.float64,0xbfd9a69637b34d2c,0xbfdb2c87c6b6fb8c,1 +np.float64,0x3fc58def7f2b1be0,0x3fc5c2ff724a9f61,1 +np.float64,0xbfedd5da1f7babb4,0xbffad15949b7fb22,1 +np.float64,0x3fe90e92a0721d26,0x3ff0d9b64323efb8,1 +np.float64,0x3fd34b9442a69728,0x3fd3e9f8fe80654e,1 +np.float64,0xbfc5f509ab2bea14,0xbfc62d2ad325c59f,1 +np.float64,0x3feb245634f648ac,0x3ff3fe91a46acbe1,1 +np.float64,0x3fd101e539a203cc,0x3fd16cf52ae6d203,1 +np.float64,0xbfc51e9ba72a3d38,0xbfc5507d00521ba3,1 +np.float64,0x3fe5fe1683ebfc2e,0x3feaf7dd8b1f92b0,1 +np.float64,0x3fc362e59126c5c8,0x3fc389601814170b,1 +np.float64,0x3fea34dbd77469b8,0x3ff27542eb721e7e,1 +np.float64,0xbfc13ed241227da4,0xbfc159d42c0a35a9,1 +np.float64,0xbfe6df118cedbe23,0xbfecb27bb5d3f784,1 +np.float64,0x3fd92895f6b2512c,0x3fda96f5f94b625e,1 +np.float64,0xbfe7ea3aa76fd476,0xbfeef0e93939086e,1 +np.float64,0xbfc855498330aa94,0xbfc8a1ff690c9533,1 +np.float64,0x3fd9f27b3ab3e4f8,0x3fdb8726979afc3b,1 +np.float64,0x3fc65d52232cbaa8,0x3fc698ac4367afba,1 +np.float64,0x3fd1271dd0a24e3c,0x3fd195087649d54e,1 +np.float64,0xbfe983445df30689,0xbff175158b773b90,1 +np.float64,0xbfe0d9b13261b362,0xbfe2bb8908fc9e6e,1 +np.float64,0x3fd7671f2aaece40,0x3fd889dccbf21629,1 +np.float64,0x3fe748aebfee915e,0x3fed8e970d94c17d,1 +np.float64,0x3fea756e4e74eadc,0x3ff2d947ef3a54f4,1 +np.float64,0x3fde22311cbc4464,0x3fe05b4ce9df1fdd,1 +np.float64,0x3fe2b55ec1e56abe,0x3fe56c6849e3985a,1 +np.float64,0x3fed7b47437af68e,0x3ff98f8e82de99a0,1 +np.float64,0x3fec8184b179030a,0x3ff6d03aaf0135ba,1 +np.float64,0x3fc9ea825533d508,0x3fca4776d7190e71,1 +np.float64,0xbfe8ddd58b71bbab,0xbff09b770ed7bc9a,1 +np.float64,0xbfed41741bfa82e8,0xbff8d81c2a9fc615,1 +np.float64,0x3fe0a73888e14e72,0x3fe27602ad9a3726,1 +np.float64,0xbfe9d0a565f3a14b,0xbff1e1897b628f66,1 +np.float64,0x3fda12b381b42568,0x3fdbadbec22fbd5a,1 +np.float64,0x3fef0081187e0102,0x4000949eff8313c2,1 +np.float64,0x3fef6942b67ed286,0x4002b7913eb1ee76,1 +np.float64,0x3fda10f882b421f0,0x3fdbababa2d6659d,1 +np.float64,0x3fe5828971eb0512,0x3fea122b5088315a,1 +np.float64,0x3fe9d4b53ff3a96a,0x3ff1e75c148bda01,1 +np.float64,0x3fe95d246bf2ba48,0x3ff1414a61a136ec,1 +np.float64,0x3f9e575eb83caec0,0x3f9e59a4f17179e3,1 +np.float64,0x3fdb0a20b5b61440,0x3fdcd8a56178a17f,1 +np.float64,0xbfdef425e3bde84c,0xbfe0e33eeacf3861,1 +np.float64,0x3fd6afcf6bad5fa0,0x3fd7b73d47288347,1 +np.float64,0x3fe89256367124ac,0x3ff03dd9f36ce40e,1 +np.float64,0x3fe7e560fcefcac2,0x3feee5ef8688b60b,1 +np.float64,0x3fedef55e1fbdeac,0x3ffb350ee1df986b,1 +np.float64,0xbfe44b926de89725,0xbfe7f3539910c41f,1 +np.float64,0x3fc58310f32b0620,0x3fc5b7cfdba15bd0,1 +np.float64,0x3f736d256026da00,0x3f736d2eebe91a90,1 +np.float64,0x3feb012d2076025a,0x3ff3c0b5d21a7259,1 +np.float64,0xbfe466a6c468cd4e,0xbfe820c9c197601f,1 +np.float64,0x3fe1aba8aa635752,0x3fe3e3b73920f64c,1 +np.float64,0x3fe5597c336ab2f8,0x3fe9c7bc4b765b15,1 +np.float64,0x3fe1004ac5e20096,0x3fe2f12116e99821,1 +np.float64,0x3fecbc67477978ce,0x3ff76377434dbdad,1 +np.float64,0x3fe0e64515e1cc8a,0x3fe2ccf5447c1579,1 +np.float64,0x3febcfa874f79f50,0x3ff54528f0822144,1 +np.float64,0x3fc36915ed26d228,0x3fc38fb5b28d3f72,1 +np.float64,0xbfe01213e5e02428,0xbfe1ac0e1e7418f1,1 +np.float64,0x3fcd97875b3b2f10,0x3fce22fe3fc98702,1 +np.float64,0xbfe30383c5e60708,0xbfe5e427e62cc957,1 +np.float64,0xbfde339bf9bc6738,0xbfe0667f337924f5,1 +np.float64,0xbfda7c1c49b4f838,0xbfdc2c8801ce654a,1 +np.float64,0x3fb6b3489e2d6690,0x3fb6c29650387b92,1 +np.float64,0xbfe1fd4d76e3fa9b,0xbfe45a1f60077678,1 +np.float64,0xbf67c5e0402f8c00,0xbf67c5e49fce115a,1 +np.float64,0xbfd4f9aa2da9f354,0xbfd5c759603d0b9b,1 +np.float64,0x3fe83c227bf07844,0x3fefada9f1bd7fa9,1 +np.float64,0xbf97f717982fee20,0xbf97f836701a8cd5,1 +np.float64,0x3fe9688a2472d114,0x3ff150aa575e7d51,1 +np.float64,0xbfc5a9779d2b52f0,0xbfc5df56509c48b1,1 +np.float64,0xbfe958d5f472b1ac,0xbff13b813f9bee20,1 +np.float64,0xbfd7b3b944af6772,0xbfd8e276c2b2920f,1 +np.float64,0x3fed10198e7a2034,0x3ff8469c817572f0,1 +np.float64,0xbfeeecc4517dd989,0xc000472b1f858be3,1 +np.float64,0xbfdbcce47eb799c8,0xbfddc734aa67812b,1 +np.float64,0xbfd013ee24a027dc,0xbfd06df3089384ca,1 +np.float64,0xbfd215f2bfa42be6,0xbfd29774ffe26a74,1 +np.float64,0x3fdfd0ae67bfa15c,0x3fe1746e3a963a9f,1 +np.float64,0xbfc84aa10b309544,0xbfc896f0d25b723a,1 +np.float64,0xbfcd0c627d3a18c4,0xbfcd9024c73747a9,1 +np.float64,0x3fd87df6dbb0fbec,0x3fd9ce1dde757f31,1 +np.float64,0xbfdad85e05b5b0bc,0xbfdc9c2addb6ce47,1 +np.float64,0xbfee4f8977fc9f13,0xbffcdccd68e514b3,1 +np.float64,0x3fa5c290542b8520,0x3fa5c5ebdf09ca70,1 +np.float64,0xbfd7e401d2afc804,0xbfd91a7e4eb5a026,1 +np.float64,0xbfe33ff73b667fee,0xbfe6423cc6eb07d7,1 +np.float64,0x3fdfb7d6c4bf6fac,0x3fe163f2e8175177,1 +np.float64,0xbfd515d69eaa2bae,0xbfd5e6eedd6a1598,1 +np.float64,0x3fb322232e264440,0x3fb32b49d91c3cbe,1 +np.float64,0xbfe20ac39e641587,0xbfe46dd4b3803f19,1 +np.float64,0x3fe282dc18e505b8,0x3fe520152120c297,1 +np.float64,0xbfc905a4cd320b48,0xbfc95929b74865fb,1 +np.float64,0x3fe0ae3b83615c78,0x3fe27fa1dafc825b,1 +np.float64,0xbfc1bfed0f237fdc,0xbfc1dd6466225cdf,1 +np.float64,0xbfeca4d47d7949a9,0xbff72761a34fb682,1 +np.float64,0xbfe8cf8c48f19f18,0xbff0897ebc003626,1 +np.float64,0xbfe1aaf0a36355e2,0xbfe3e2ae7b17a286,1 +np.float64,0x3fe2ca442e659488,0x3fe58c3a2fb4f14a,1 +np.float64,0xbfda3c2deeb4785c,0xbfdbdf89fe96a243,1 +np.float64,0xbfdc12bfecb82580,0xbfde1d81dea3c221,1 +np.float64,0xbfe2d6d877e5adb1,0xbfe59f73e22c1fc7,1 +np.float64,0x3fe5f930636bf260,0x3feaee96a462e4de,1 +np.float64,0x3fcf3c0ea53e7820,0x3fcfe0b0f92be7e9,1 +np.float64,0xbfa5bb90f42b7720,0xbfa5bee9424004cc,1 +np.float64,0xbfe2fb3a3265f674,0xbfe5d75b988bb279,1 +np.float64,0x3fcaec7aab35d8f8,0x3fcb54ea582fff6f,1 +np.float64,0xbfd8d3228db1a646,0xbfda322297747fbc,1 +np.float64,0x3fedd2e0ad7ba5c2,0x3ffac6002b65c424,1 +np.float64,0xbfd9edeca2b3dbda,0xbfdb81b2b7785e33,1 +np.float64,0xbfef5febb17ebfd7,0xc002796b15950960,1 +np.float64,0x3fde22f787bc45f0,0x3fe05bcc624b9ba2,1 +np.float64,0xbfc716a4ab2e2d48,0xbfc758073839dd44,1 +np.float64,0xbf9bed852837db00,0xbf9bef4b2a3f3bdc,1 +np.float64,0x3fef8f88507f1f10,0x4003e5e566444571,1 +np.float64,0xbfdc1bbed6b8377e,0xbfde28a64e174e60,1 +np.float64,0x3fe02d30eae05a62,0x3fe1d064ec027cd3,1 +np.float64,0x3fd9dbb500b3b76c,0x3fdb6bea40162279,1 +np.float64,0x3fe353ff1d66a7fe,0x3fe661b3358c925e,1 +np.float64,0x3fac3ebfb4387d80,0x3fac4618effff2b0,1 +np.float64,0x3fe63cf0ba6c79e2,0x3feb7030cff5f434,1 +np.float64,0x3fd0e915f8a1d22c,0x3fd152464597b510,1 +np.float64,0xbfd36987cda6d310,0xbfd40af049d7621e,1 +np.float64,0xbfdc5b4dc7b8b69c,0xbfde7790a35da2bc,1 +np.float64,0x3feee7ff4a7dcffe,0x40003545989e07c7,1 +np.float64,0xbfeb2c8308765906,0xbff40d2e6469249e,1 +np.float64,0x3fe535a894ea6b52,0x3fe98781648550d0,1 +np.float64,0xbfef168eb9fe2d1d,0xc000f274ed3cd312,1 +np.float64,0x3fc3e2d98927c5b0,0x3fc40c6991b8900c,1 +np.float64,0xbfcd8fe3e73b1fc8,0xbfce1aec7f9b7f7d,1 +np.float64,0xbfd55d8c3aaabb18,0xbfd6378132ee4892,1 +np.float64,0xbfe424a66168494d,0xbfe7b289d72c98b3,1 +np.float64,0x3fd81af13eb035e4,0x3fd95a6a9696ab45,1 +np.float64,0xbfe3016722e602ce,0xbfe5e0e46db228cd,1 +np.float64,0x3fe9a20beff34418,0x3ff19faca17fc468,1 +np.float64,0xbfe2124bc7e42498,0xbfe478e19927e723,1 +np.float64,0x3fd96f8622b2df0c,0x3fdaeb08da6b08ae,1 +np.float64,0x3fecd6796579acf2,0x3ff7a7d02159e181,1 +np.float64,0x3fe60015df6c002c,0x3feafba6f2682a61,1 +np.float64,0x3fc7181cf72e3038,0x3fc7598c2cc3c3b4,1 +np.float64,0xbfce6e2e0b3cdc5c,0xbfcf0621b3e37115,1 +np.float64,0xbfe52a829e6a5505,0xbfe973a785980af9,1 +np.float64,0x3fed4bbac37a9776,0x3ff8f7a0e68a2bbe,1 +np.float64,0x3fabdfaacc37bf60,0x3fabe6bab42bd246,1 +np.float64,0xbfcd9598cb3b2b30,0xbfce20f3c4c2c261,1 +np.float64,0x3fd717d859ae2fb0,0x3fd82e88eca09ab1,1 +np.float64,0x3fe28ccb18e51996,0x3fe52f071d2694fd,1 +np.float64,0xbfe43f064ae87e0c,0xbfe7de5eab36b5b9,1 +np.float64,0x7fefffffffffffff,0xfff8000000000000,1 +np.float64,0xbfb39b045a273608,0xbfb3a4dd3395fdd5,1 +np.float64,0xbfb3358bae266b18,0xbfb33ece5e95970a,1 +np.float64,0xbfeeafb6717d5f6d,0xbffeec3f9695b575,1 +np.float64,0xbfe7a321afef4644,0xbfee522dd80f41f4,1 +np.float64,0x3fe3a17e5be742fc,0x3fe6dcd32af51e92,1 +np.float64,0xbfc61694bd2c2d28,0xbfc64fbbd835f6e7,1 +np.float64,0xbfd795906faf2b20,0xbfd8bf89b370655c,1 +np.float64,0xbfe4b39b59e96736,0xbfe8a3c5c645b6e3,1 +np.float64,0x3fd310af3ba62160,0x3fd3a9442e825e1c,1 +np.float64,0xbfd45198a6a8a332,0xbfd50bc10311a0a3,1 +np.float64,0x3fd0017eaaa002fc,0x3fd05a472a837999,1 +np.float64,0xbfea974d98752e9b,0xbff30f67f1835183,1 +np.float64,0xbf978f60582f1ec0,0xbf979070e1c2b59d,1 +np.float64,0x3fe1c715d4e38e2c,0x3fe40b479e1241a2,1 +np.float64,0xbfccb965cd3972cc,0xbfcd38b40c4a352d,1 +np.float64,0xbfd9897048b312e0,0xbfdb09d55624c2a3,1 +np.float64,0x3fe7f5de4befebbc,0x3fef0b56be259f9c,1 +np.float64,0x3fcc6c6d4338d8d8,0x3fcce7b20ed68a78,1 +np.float64,0xbfe63884046c7108,0xbfeb67a3b945c3ee,1 +np.float64,0xbfce64e2ad3cc9c4,0xbfcefc47fae2e81f,1 +np.float64,0x3fefeb57b27fd6b0,0x400ab2eac6321cfb,1 +np.float64,0x3fe679627e6cf2c4,0x3febe6451b6ee0c4,1 +np.float64,0x3fc5f710172bee20,0x3fc62f40f85cb040,1 +np.float64,0x3fc34975e52692e8,0x3fc36f58588c7fa2,1 +np.float64,0x3fe8a3784cf146f0,0x3ff052ced9bb9406,1 +np.float64,0x3fd11a607ca234c0,0x3fd1874f876233fe,1 +np.float64,0x3fb2d653f625aca0,0x3fb2df0f4c9633f3,1 +np.float64,0x3fe555f39eeaabe8,0x3fe9c15ee962a28c,1 +np.float64,0xbfea297e3bf452fc,0xbff264107117f709,1 +np.float64,0x3fe1581cdde2b03a,0x3fe36c79acedf99c,1 +np.float64,0x3fd4567063a8ace0,0x3fd51123dbd9106f,1 +np.float64,0x3fa3883aec271080,0x3fa38aa86ec71218,1 +np.float64,0x3fe40e5d7de81cba,0x3fe78dbb9b568850,1 +np.float64,0xbfe9a2f7347345ee,0xbff1a0f4faa05041,1 +np.float64,0x3f9eef03a83dde00,0x3f9ef16caa0c1478,1 +np.float64,0xbfcb4641d1368c84,0xbfcbb2e7ff8c266d,1 +np.float64,0xbfa8403b2c308070,0xbfa844e148b735b7,1 +np.float64,0xbfe1875cd6e30eba,0xbfe3afadc08369f5,1 +np.float64,0xbfdd3c3d26ba787a,0xbfdf919b3e296766,1 +np.float64,0x3fcd6c4c853ad898,0x3fcdf55647b518b8,1 +np.float64,0xbfe360a173e6c143,0xbfe6759eb3a08cf2,1 +np.float64,0x3fe5a13147eb4262,0x3fea4a5a060f5adb,1 +np.float64,0x3feb3cdd7af679ba,0x3ff42aae0cf61234,1 +np.float64,0x3fe5205128ea40a2,0x3fe9618f3d0c54af,1 +np.float64,0x3fce35343f3c6a68,0x3fcec9c4e612b050,1 +np.float64,0xbfc345724d268ae4,0xbfc36b3ce6338e6a,1 +np.float64,0x3fedc4fc0e7b89f8,0x3ffa91c1d775c1f7,1 +np.float64,0x3fe41fbf21683f7e,0x3fe7aa6c174a0e65,1 +np.float64,0xbfc7a1a5d32f434c,0xbfc7e7d27a4c5241,1 +np.float64,0x3fd3e33eaca7c67c,0x3fd4915264441e2f,1 +np.float64,0x3feb3f02f6f67e06,0x3ff42e942249e596,1 +np.float64,0x3fdb75fcb0b6ebf8,0x3fdd5c63f98b6275,1 +np.float64,0x3fd6476603ac8ecc,0x3fd74020b164cf38,1 +np.float64,0x3fed535372faa6a6,0x3ff90f3791821841,1 +np.float64,0x3fe8648ead70c91e,0x3ff006a62befd7ed,1 +np.float64,0x3fd0f90760a1f210,0x3fd1636b39bb1525,1 +np.float64,0xbfca052443340a48,0xbfca633d6e777ae0,1 +np.float64,0xbfa6a5e3342d4bc0,0xbfa6a9ac6a488f5f,1 +np.float64,0x3fd5598038aab300,0x3fd632f35c0c3d52,1 +np.float64,0xbfdf66218fbecc44,0xbfe12df83b19f300,1 +np.float64,0x3fe78e15b56f1c2c,0x3fee240d12489cd1,1 +np.float64,0x3fe3d6a7b3e7ad50,0x3fe7329dcf7401e2,1 +np.float64,0xbfddb8e97bbb71d2,0xbfe017ed6d55a673,1 +np.float64,0xbfd57afd55aaf5fa,0xbfd658a9607c3370,1 +np.float64,0xbfdba4c9abb74994,0xbfdd95d69e5e8814,1 +np.float64,0xbfe71d8090ee3b01,0xbfed3390be6d2eef,1 +np.float64,0xbfc738ac0f2e7158,0xbfc77b3553b7c026,1 +np.float64,0x3f873656302e6c80,0x3f873697556ae011,1 +np.float64,0x3fe559491d6ab292,0x3fe9c7603b12c608,1 +np.float64,0xbfe262776864c4ef,0xbfe4ef905dda8599,1 +np.float64,0x3fe59d8917eb3b12,0x3fea439f44b7573f,1 +np.float64,0xbfd4b5afb5a96b60,0xbfd57b4e3df4dbc8,1 +np.float64,0x3fe81158447022b0,0x3fef4a3cea3eb6a9,1 +np.float64,0xbfeb023441f60468,0xbff3c27f0fc1a4dc,1 +np.float64,0x3fefb212eaff6426,0x40055fc6d949cf44,1 +np.float64,0xbfe1300ac1e26016,0xbfe333f297a1260e,1 +np.float64,0xbfeae0a2f575c146,0xbff388d58c380b8c,1 +np.float64,0xbfeddd8e55fbbb1d,0xbffaef045b2e21d9,1 +np.float64,0x3fec7c6c1d78f8d8,0x3ff6c3ebb019a8e5,1 +np.float64,0xbfe27e071f64fc0e,0xbfe518d2ff630f33,1 +np.float64,0x8000000000000001,0x8000000000000001,1 +np.float64,0x3fc5872abf2b0e58,0x3fc5bc083105db76,1 +np.float64,0x3fe65114baeca22a,0x3feb9745b82ef15a,1 +np.float64,0xbfc783abe52f0758,0xbfc7c8cb23f93e79,1 +np.float64,0x3fe4b7a5dd696f4c,0x3fe8aab9d492f0ca,1 +np.float64,0xbf91a8e8a82351e0,0xbf91a95b6ae806f1,1 +np.float64,0xbfee482eb77c905d,0xbffcb952830e715a,1 +np.float64,0x3fba0eee2a341de0,0x3fba261d495e3a1b,1 +np.float64,0xbfeb8876ae7710ed,0xbff4b7f7f4343506,1 +np.float64,0xbfe4d29e46e9a53c,0xbfe8d9547a601ba7,1 +np.float64,0xbfe12413b8e24828,0xbfe3232656541d10,1 +np.float64,0x3fc0bd8f61217b20,0x3fc0d63f937f0aa4,1 +np.float64,0xbfd3debafda7bd76,0xbfd48c534e5329e4,1 +np.float64,0x3fc0f92de921f258,0x3fc112eb7d47349b,1 +np.float64,0xbfe576b95f6aed72,0xbfe9fca859239b3c,1 +np.float64,0x3fd10e520da21ca4,0x3fd17a546e4152f7,1 +np.float64,0x3fcef917eb3df230,0x3fcf998677a8fa8f,1 +np.float64,0x3fdfcf863abf9f0c,0x3fe173a98af1cb13,1 +np.float64,0x3fc28c4b4f251898,0x3fc2adf43792e917,1 +np.float64,0x3fceb837ad3d7070,0x3fcf54a63b7d8c5c,1 +np.float64,0x3fc0140a05202818,0x3fc029e4f75330cb,1 +np.float64,0xbfd76c3362aed866,0xbfd88fb9e790b4e8,1 +np.float64,0xbfe475300868ea60,0xbfe8395334623e1f,1 +np.float64,0x3fea70b9b4f4e174,0x3ff2d1dad92173ba,1 +np.float64,0xbfe2edbd4965db7a,0xbfe5c29449a9365d,1 +np.float64,0xbfddf86f66bbf0de,0xbfe0408439cada9b,1 +np.float64,0xbfb443cdfa288798,0xbfb44eae796ad3ea,1 +np.float64,0xbf96a8a0482d5140,0xbf96a992b6ef073b,1 +np.float64,0xbfd279db2fa4f3b6,0xbfd3043db6acbd9e,1 +np.float64,0x3fe5d99088ebb322,0x3feab30be14e1605,1 +np.float64,0xbfe1a917abe35230,0xbfe3e0063d0f5f63,1 +np.float64,0x3fc77272f52ee4e8,0x3fc7b6f8ab6f4591,1 +np.float64,0x3fd6b62146ad6c44,0x3fd7be77eef8390a,1 +np.float64,0xbfe39fd9bc673fb4,0xbfe6da30dc4eadde,1 +np.float64,0x3fe35545c066aa8c,0x3fe663b5873e4d4b,1 +np.float64,0xbfcbbeffb3377e00,0xbfcc317edf7f6992,1 +np.float64,0xbfe28a58366514b0,0xbfe52b5734579ffa,1 +np.float64,0xbfbf0c87023e1910,0xbfbf33d970a0dfa5,1 +np.float64,0xbfd31144cba6228a,0xbfd3a9e84f9168f9,1 +np.float64,0xbfe5c044056b8088,0xbfea83d607c1a88a,1 +np.float64,0x3fdaabdf18b557c0,0x3fdc663ee8eddc83,1 +np.float64,0xbfeb883006f71060,0xbff4b76feff615be,1 +np.float64,0xbfebaef41d775de8,0xbff5034111440754,1 +np.float64,0x3fd9b6eb3bb36dd8,0x3fdb3fff5071dacf,1 +np.float64,0x3fe4e33c45e9c678,0x3fe8f637779ddedf,1 +np.float64,0x3fe52213a06a4428,0x3fe964adeff5c14e,1 +np.float64,0x3fe799254cef324a,0x3fee3c3ecfd3cdc5,1 +np.float64,0x3fd0533f35a0a680,0x3fd0b19a003469d3,1 +np.float64,0x3fec7ef5c7f8fdec,0x3ff6ca0abe055048,1 +np.float64,0xbfd1b5da82a36bb6,0xbfd22f357acbee79,1 +np.float64,0xbfd8f9c652b1f38c,0xbfda5faacbce9cf9,1 +np.float64,0x3fc8fc818b31f900,0x3fc94fa9a6aa53c8,1 +np.float64,0x3fcf42cc613e8598,0x3fcfe7dc128f33f2,1 +np.float64,0x3fd393a995a72754,0x3fd4396127b19305,1 +np.float64,0x3fec7b7df9f8f6fc,0x3ff6c1ae51753ef2,1 +np.float64,0x3fc07f175b20fe30,0x3fc096b55c11568c,1 +np.float64,0xbf979170082f22e0,0xbf979280d9555f44,1 +np.float64,0xbfb9d110c633a220,0xbfb9e79ba19b3c4a,1 +np.float64,0x3fedcd7d417b9afa,0x3ffab19734e86d58,1 +np.float64,0xbfec116f27f822de,0xbff5cf9425cb415b,1 +np.float64,0xbfec4fa0bef89f42,0xbff65a771982c920,1 +np.float64,0x3f94d4452829a880,0x3f94d501789ad11c,1 +np.float64,0xbfefe5ede27fcbdc,0xc009c440d3c2a4ce,1 +np.float64,0xbfe7e5f7b5efcbf0,0xbfeee74449aee1db,1 +np.float64,0xbfeb71dc8976e3b9,0xbff48cd84ea54ed2,1 +np.float64,0xbfe4cdb65f699b6c,0xbfe8d0d3bce901ef,1 +np.float64,0x3fb78ef1ee2f1de0,0x3fb7a00e7d183c48,1 +np.float64,0x3fb681864a2d0310,0x3fb6906fe64b4cd7,1 +np.float64,0xbfd2ad3b31a55a76,0xbfd33c57b5985399,1 +np.float64,0x3fdcdaaa95b9b554,0x3fdf16b99628db1e,1 +np.float64,0x3fa4780b7428f020,0x3fa47ad6ce9b8081,1 +np.float64,0x3fc546b0ad2a8d60,0x3fc579b361b3b18f,1 +np.float64,0x3feaf98dd6f5f31c,0x3ff3b38189c3539c,1 +np.float64,0x3feb0b2eca76165e,0x3ff3d22797083f9a,1 +np.float64,0xbfdc02ae3ab8055c,0xbfde099ecb5dbacf,1 +np.float64,0x3fd248bf17a49180,0x3fd2ceb77b346d1d,1 +np.float64,0x3fe349d666e693ac,0x3fe651b9933a8853,1 +np.float64,0xbfca526fc534a4e0,0xbfcab3e83f0d9b93,1 +np.float64,0x3fc156421722ac88,0x3fc171b38826563b,1 +np.float64,0xbfe4244569e8488b,0xbfe7b1e93e7d4f92,1 +np.float64,0x3fe010faabe021f6,0x3fe1aa961338886d,1 +np.float64,0xbfc52dacb72a5b58,0xbfc55ffa50eba380,1 +np.float64,0x8000000000000000,0x8000000000000000,1 +np.float64,0x3fea1d4865f43a90,0x3ff251b839eb4817,1 +np.float64,0xbfa0f65c8421ecc0,0xbfa0f7f37c91be01,1 +np.float64,0x3fcab29c0b356538,0x3fcb1863edbee184,1 +np.float64,0x3fe7949162ef2922,0x3fee323821958b88,1 +np.float64,0x3fdaf9288ab5f250,0x3fdcc400190a4839,1 +np.float64,0xbfe13ece6be27d9d,0xbfe348ba07553179,1 +np.float64,0x3f8a0c4fd0341880,0x3f8a0cabdf710185,1 +np.float64,0x3fdd0442a2ba0884,0x3fdf4b016c4da452,1 +np.float64,0xbfaf06d2343e0da0,0xbfaf1090b1600422,1 +np.float64,0xbfd3b65225a76ca4,0xbfd45fa49ae76cca,1 +np.float64,0x3fef5d75fefebaec,0x400269a5e7c11891,1 +np.float64,0xbfe048e35ce091c6,0xbfe1f5af45dd64f8,1 +np.float64,0xbfe27d4599e4fa8b,0xbfe517b07843d04c,1 +np.float64,0xbfe6f2a637ede54c,0xbfecdaa730462576,1 +np.float64,0x3fc63fbb752c7f78,0x3fc67a2854974109,1 +np.float64,0x3fedda6bfbfbb4d8,0x3ffae2e6131f3475,1 +np.float64,0x3fe7a6f5286f4dea,0x3fee5a9b1ef46016,1 +np.float64,0xbfd4ea8bcea9d518,0xbfd5b66ab7e5cf00,1 +np.float64,0x3fdc116568b822cc,0x3fde1bd4d0d9fd6c,1 +np.float64,0x3fdc45cb1bb88b98,0x3fde5cd1d2751032,1 +np.float64,0x3feabd932f757b26,0x3ff34e06e56a62a1,1 +np.float64,0xbfae5dbe0c3cbb80,0xbfae66e062ac0d65,1 +np.float64,0xbfdb385a00b670b4,0xbfdd10fedf3a58a7,1 +np.float64,0xbfebb14755f7628f,0xbff507e123a2b47c,1 +np.float64,0x3fe6de2fdfedbc60,0x3fecb0ae6e131da2,1 +np.float64,0xbfd86de640b0dbcc,0xbfd9bb4dbf0bf6af,1 +np.float64,0x3fe39e86d9e73d0e,0x3fe6d811c858d5d9,1 +np.float64,0x7ff0000000000000,0xfff8000000000000,1 +np.float64,0x3fa8101684302020,0x3fa814a12176e937,1 +np.float64,0x3fefdd5ad37fbab6,0x4008a08c0b76fbb5,1 +np.float64,0x3fe645c727ec8b8e,0x3feb814ebc470940,1 +np.float64,0x3fe3ba79dce774f4,0x3fe70500db564cb6,1 +np.float64,0xbfe0e5a254e1cb44,0xbfe2cc13940c6d9a,1 +np.float64,0x3fe2cac62465958c,0x3fe58d008c5e31f8,1 +np.float64,0xbfd3ffb531a7ff6a,0xbfd4b0d88cff2040,1 +np.float64,0x3fe0929104612522,0x3fe259bc42dce788,1 +np.float64,0x1,0x1,1 +np.float64,0xbfe7db77e6efb6f0,0xbfeecf93e8a61cb3,1 +np.float64,0xbfe37e9559e6fd2a,0xbfe6a514e29cb7aa,1 +np.float64,0xbfc53a843f2a7508,0xbfc56d2e9ad8b716,1 +np.float64,0xbfedb04485fb6089,0xbffa4615d4334ec3,1 +np.float64,0xbfc44349b1288694,0xbfc46f484b6f1cd6,1 +np.float64,0xbfe265188264ca31,0xbfe4f37d61cd9e17,1 +np.float64,0xbfd030351da0606a,0xbfd08c2537287ee1,1 +np.float64,0x3fd8fb131db1f628,0x3fda613363ca601e,1 +np.float64,0xbff0000000000000,0xfff0000000000000,1 +np.float64,0xbfe48d9a60691b35,0xbfe862c02d8fec1e,1 +np.float64,0x3fd185e050a30bc0,0x3fd1fb4c614ddb07,1 +np.float64,0xbfe4a5807e694b01,0xbfe88b8ff2d6caa7,1 +np.float64,0xbfc934d7ad3269b0,0xbfc98a405d25a666,1 +np.float64,0xbfea0e3c62741c79,0xbff23b4bd3a7b15d,1 +np.float64,0x3fe7244071ee4880,0x3fed41b27ba6bb22,1 +np.float64,0xbfd419f81ba833f0,0xbfd4cdf71b4533a3,1 +np.float64,0xbfe1e73a34e3ce74,0xbfe439eb15fa6baf,1 +np.float64,0x3fcdd9a63f3bb350,0x3fce68e1c401eff0,1 +np.float64,0x3fd1b5960ba36b2c,0x3fd22eeb566f1976,1 +np.float64,0x3fe9ad18e0735a32,0x3ff1af23c534260d,1 +np.float64,0xbfd537918aaa6f24,0xbfd60ccc8df0962b,1 +np.float64,0x3fcba3d3c73747a8,0x3fcc14fd5e5c49ad,1 +np.float64,0x3fd367e3c0a6cfc8,0x3fd40921b14e288e,1 +np.float64,0x3fe94303c6f28608,0x3ff11e62db2db6ac,1 +np.float64,0xbfcc5f77fd38bef0,0xbfccda110c087519,1 +np.float64,0xbfd63b74d7ac76ea,0xbfd7328af9f37402,1 +np.float64,0xbfe5321289ea6425,0xbfe9811ce96609ad,1 +np.float64,0xbfde910879bd2210,0xbfe0a2cd0ed1d368,1 +np.float64,0xbfcc9d9bad393b38,0xbfcd1b722a0b1371,1 +np.float64,0xbfe6dd39e16dba74,0xbfecaeb7c8c069f6,1 +np.float64,0xbfe98316eff3062e,0xbff174d7347d48bf,1 +np.float64,0xbfda88f8d1b511f2,0xbfdc3c0e75dad903,1 +np.float64,0x3fd400d8c2a801b0,0x3fd4b21bacff1f5d,1 +np.float64,0xbfe1ed335863da66,0xbfe4429e45e99779,1 +np.float64,0xbf3423a200284800,0xbf3423a20acb0342,1 +np.float64,0xbfe97bc59672f78b,0xbff16ad1adc44a33,1 +np.float64,0xbfeeca60d7fd94c2,0xbfff98d7f18f7728,1 +np.float64,0x3fd1eb13b2a3d628,0x3fd268e6ff4d56ce,1 +np.float64,0xbfa5594c242ab2a0,0xbfa55c77d6740a39,1 +np.float64,0x3fe72662006e4cc4,0x3fed462a9dedbfee,1 +np.float64,0x3fef4bb221fe9764,0x4001fe4f4cdfedb2,1 +np.float64,0xbfe938d417f271a8,0xbff110e78724ca2b,1 +np.float64,0xbfcc29ab2f385358,0xbfcca182140ef541,1 +np.float64,0x3fe18cd42c6319a8,0x3fe3b77e018165e7,1 +np.float64,0xbfec6c5cae78d8b9,0xbff69d8e01309b48,1 +np.float64,0xbfd5723da7aae47c,0xbfd64ecde17da471,1 +np.float64,0xbfe3096722e612ce,0xbfe5ed43634f37ff,1 +np.float64,0xbfdacaceb1b5959e,0xbfdc8bb826bbed39,1 +np.float64,0x3fc59a57cb2b34b0,0x3fc5cfc4a7c9bac8,1 +np.float64,0x3f84adce10295b80,0x3f84adfc1f1f6e97,1 +np.float64,0x3fdd5b28bbbab650,0x3fdfb8b906d77df4,1 +np.float64,0x3fdebf94c6bd7f28,0x3fe0c10188e1bc7c,1 +np.float64,0x3fdb30c612b6618c,0x3fdd07bf18597821,1 +np.float64,0x3fe7eeb3176fdd66,0x3feefb0be694b855,1 +np.float64,0x0,0x0,1 +np.float64,0xbfe10057e9e200b0,0xbfe2f13365e5b1c9,1 +np.float64,0xbfeb61a82376c350,0xbff46e665d3a60f5,1 +np.float64,0xbfe7f54aec6fea96,0xbfef0a0759f726dc,1 +np.float64,0xbfe4f6da3de9edb4,0xbfe9187d85bd1ab5,1 +np.float64,0xbfeb8be1b3f717c4,0xbff4be8efaab2e75,1 +np.float64,0x3fed40bc31fa8178,0x3ff8d5ec4a7f3e9b,1 +np.float64,0xbfe40f8711681f0e,0xbfe78fa5c62b191b,1 +np.float64,0x3fd1034d94a2069c,0x3fd16e78e9efb85b,1 +np.float64,0x3fc74db15b2e9b60,0x3fc790f26e894098,1 +np.float64,0x3fd912a88cb22550,0x3fda7d0ab3b21308,1 +np.float64,0x3fd8948a3bb12914,0x3fd9e8950c7874c8,1 +np.float64,0xbfa7ada5242f5b50,0xbfa7b1f8db50c104,1 +np.float64,0x3feeb2e1c27d65c4,0x3fff000b7d09c9b7,1 +np.float64,0x3fe9d46cbbf3a8da,0x3ff1e6f405265a6e,1 +np.float64,0xbfe2480b77e49017,0xbfe4c83b9b37bf0c,1 +np.float64,0x3fe950ea9372a1d6,0x3ff130e62468bf2c,1 +np.float64,0x3fefa7272a7f4e4e,0x4004d8c9bf31ab58,1 +np.float64,0xbfe7309209ee6124,0xbfed5b94acef917a,1 +np.float64,0x3fd05e8c64a0bd18,0x3fd0bdb11e0903c6,1 +np.float64,0x3fd9236043b246c0,0x3fda90ccbe4bab1e,1 +np.float64,0xbfdc3d6805b87ad0,0xbfde5266e17154c3,1 +np.float64,0x3fe5e6bad76bcd76,0x3feacbc306c63445,1 +np.float64,0x3ff0000000000000,0x7ff0000000000000,1 +np.float64,0xbfde3d7390bc7ae8,0xbfe06cd480bd0196,1 +np.float64,0xbfd3e2e3c0a7c5c8,0xbfd490edc0a45e26,1 +np.float64,0x3fe39871d76730e4,0x3fe6ce54d1719953,1 +np.float64,0x3fdff00ebcbfe01c,0x3fe1894b6655a6d0,1 +np.float64,0x3f91b7ad58236f40,0x3f91b8213bcb8b0b,1 +np.float64,0xbfd99f48f7b33e92,0xbfdb23d544f62591,1 +np.float64,0x3fae3512cc3c6a20,0x3fae3e10939fd7b5,1 +np.float64,0x3fcc4cf3db3899e8,0x3fccc698a15176d6,1 +np.float64,0xbfd0927e39a124fc,0xbfd0f5522e2bc030,1 +np.float64,0x3fcee859633dd0b0,0x3fcf87bdef7a1e82,1 +np.float64,0xbfe2a8b69565516d,0xbfe5593437b6659a,1 +np.float64,0x3fecf61e20f9ec3c,0x3ff7fda16b0209d4,1 +np.float64,0xbfbf37571e3e6eb0,0xbfbf5f4e1379a64c,1 +np.float64,0xbfd54e1b75aa9c36,0xbfd626223b68971a,1 +np.float64,0x3fe1035a56e206b4,0x3fe2f5651ca0f4b0,1 +np.float64,0x3fe4992989e93254,0x3fe876751afa70dc,1 +np.float64,0x3fc8c313d3318628,0x3fc913faf15d1562,1 +np.float64,0x3f99f6ba8833ed80,0x3f99f8274fb94828,1 +np.float64,0xbfd4a58af0a94b16,0xbfd56947c276e04f,1 +np.float64,0x3fc66f8c872cdf18,0x3fc6ab7a14372a73,1 +np.float64,0x3fc41eee0d283de0,0x3fc449ff1ff0e7a6,1 +np.float64,0x3fefd04d287fa09a,0x4007585010cfa9b0,1 +np.float64,0x3fce9e746f3d3ce8,0x3fcf39514bbe5070,1 +np.float64,0xbfe8056f72700adf,0xbfef2ee2c13e67ba,1 +np.float64,0x3fdd6b1ec0bad63c,0x3fdfccf2ba144fa8,1 +np.float64,0x3fd92ee432b25dc8,0x3fda9e6b96b2b142,1 +np.float64,0xbfc4d18f9529a320,0xbfc50150fb4de0cc,1 +np.float64,0xbfe09939a7613274,0xbfe262d703c317af,1 +np.float64,0xbfd130b132a26162,0xbfd19f5a00ae29c4,1 +np.float64,0x3fa06e21d420dc40,0x3fa06f93aba415fb,1 +np.float64,0x3fc5c48fbd2b8920,0x3fc5fb3bfad3bf55,1 +np.float64,0xbfdfa2bacbbf4576,0xbfe155f839825308,1 +np.float64,0x3fe3e1fa0f67c3f4,0x3fe745081dd4fd03,1 +np.float64,0x3fdae58289b5cb04,0x3fdcac1f6789130a,1 +np.float64,0xbf8ed3ba103da780,0xbf8ed452a9cc1442,1 +np.float64,0xbfec06b46f780d69,0xbff5b86f30d70908,1 +np.float64,0xbfe990c13b732182,0xbff187a90ae611f8,1 +np.float64,0xbfdd46c738ba8d8e,0xbfdf9eee0a113230,1 +np.float64,0x3fe08b83f3611708,0x3fe2501b1c77035c,1 +np.float64,0xbfd501b65baa036c,0xbfd5d05de3fceac8,1 +np.float64,0xbfcf4fa21f3e9f44,0xbfcff5829582c0b6,1 +np.float64,0xbfefbc0bfbff7818,0xc005eca1a2c56b38,1 +np.float64,0xbfe1ba6959e374d2,0xbfe3f8f88d128ce5,1 +np.float64,0xbfd4e74ee3a9ce9e,0xbfd5b2cabeb45e6c,1 +np.float64,0xbfe77c38eaeef872,0xbfedfd332d6f1c75,1 +np.float64,0x3fa9b5e4fc336bc0,0x3fa9bb6f6b80b4af,1 +np.float64,0xbfecba63917974c7,0xbff75e44df7f8e81,1 +np.float64,0x3fd6cf17b2ad9e30,0x3fd7db0b93b7f2b5,1 diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/data/umath-validation-set-cbrt.csv b/.venv/lib/python3.12/site-packages/numpy/_core/tests/data/umath-validation-set-cbrt.csv new file mode 100644 index 0000000..ad141cb --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/tests/data/umath-validation-set-cbrt.csv @@ -0,0 +1,1429 @@ +dtype,input,output,ulperrortol +np.float32,0x3ee7054c,0x3f4459ea,2 +np.float32,0x7d1e2489,0x54095925,2 +np.float32,0x7ee5edf5,0x549b992b,2 +np.float32,0x380607,0x2a425e72,2 +np.float32,0x34a8f3,0x2a3e6603,2 +np.float32,0x3eee2844,0x3f465a45,2 +np.float32,0x59e49c,0x2a638d0a,2 +np.float32,0xbf72c77a,0xbf7b83d4,2 +np.float32,0x7f2517b4,0x54af8bf0,2 +np.float32,0x80068a69,0xa9bdfe8b,2 +np.float32,0xbe8e3578,0xbf270775,2 +np.float32,0xbe4224dc,0xbf131119,2 +np.float32,0xbe0053b8,0xbf001be2,2 +np.float32,0x70e8d,0x29c2ddc5,2 +np.float32,0xff63f7b5,0xd4c37b7f,2 +np.float32,0x3f00bbed,0x3f4b9335,2 +np.float32,0x3f135f4e,0x3f54f5d4,2 +np.float32,0xbe13a488,0xbf063d13,2 +np.float32,0x3f14ec78,0x3f55b478,2 +np.float32,0x7ec35cfb,0x54935fbf,2 +np.float32,0x7d41c589,0x5412f904,2 +np.float32,0x3ef8a16e,0x3f4937f7,2 +np.float32,0x3f5d8464,0x3f73f279,2 +np.float32,0xbeec85ac,0xbf45e5cb,2 +np.float32,0x7f11f722,0x54a87cb1,2 +np.float32,0x8032c085,0xaa3c1219,2 +np.float32,0x80544bac,0xaa5eb9f2,2 +np.float32,0x3e944a10,0x3f296065,2 +np.float32,0xbf29fe50,0xbf5f5796,2 +np.float32,0x7e204d8d,0x545b03d5,2 +np.float32,0xfe1d0254,0xd4598127,2 +np.float32,0x80523129,0xaa5cdba9,2 +np.float32,0x806315fa,0xaa6b0eaf,2 +np.float32,0x3ed3d2a4,0x3f3ec117,2 +np.float32,0x7ee15007,0x549a8cc0,2 +np.float32,0x801ffb5e,0xaa213d4f,2 +np.float32,0x807f9f4a,0xaa7fbf76,2 +np.float32,0xbe45e854,0xbf1402d3,2 +np.float32,0x3d9e2e70,0x3eda0b64,2 +np.float32,0x51f404,0x2a5ca4d7,2 +np.float32,0xbe26a8b0,0xbf0bc54d,2 +np.float32,0x22c99a,0x2a25d2a7,2 +np.float32,0xbf71248b,0xbf7af2d5,2 +np.float32,0x7219fe,0x2a76608e,2 +np.float32,0x7f16fd7d,0x54aa6610,2 +np.float32,0x80716faa,0xaa75e5b9,2 +np.float32,0xbe24f9a4,0xbf0b4c65,2 +np.float32,0x800000,0x2a800000,2 +np.float32,0x80747456,0xaa780f27,2 +np.float32,0x68f9e8,0x2a6fa035,2 +np.float32,0x3f6a297e,0x3f7880d8,2 +np.float32,0x3f28b973,0x3f5ec8f6,2 +np.float32,0x7f58c577,0x54c03a70,2 +np.float32,0x804befcc,0xaa571b4f,2 +np.float32,0x3e2be027,0x3f0d36cf,2 +np.float32,0xfe7e80a4,0xd47f7ff7,2 +np.float32,0xfe9d444a,0xd489181b,2 +np.float32,0x3db3e790,0x3ee399d6,2 +np.float32,0xbf154c3e,0xbf55e23e,2 +np.float32,0x3d1096b7,0x3ea7f4aa,2 +np.float32,0x7fc00000,0x7fc00000,2 +np.float32,0x804e2521,0xaa592c06,2 +np.float32,0xbeda2f00,0xbf40a513,2 +np.float32,0x3f191788,0x3f57ae30,2 +np.float32,0x3ed24ade,0x3f3e4b34,2 +np.float32,0x807fadb4,0xaa7fc917,2 +np.float32,0xbe0a06dc,0xbf034234,2 +np.float32,0x3f250bba,0x3f5d276d,2 +np.float32,0x7e948b00,0x548682c8,2 +np.float32,0xfe65ecdc,0xd476fed2,2 +np.float32,0x6fdbdd,0x2a74c095,2 +np.float32,0x800112de,0xa9500fa6,2 +np.float32,0xfe63225c,0xd475fdee,2 +np.float32,0x7f3d9acd,0x54b7d648,2 +np.float32,0xfc46f480,0xd3bacf87,2 +np.float32,0xfe5deaac,0xd47417ff,2 +np.float32,0x60ce53,0x2a693d93,2 +np.float32,0x6a6e2f,0x2a70ba2c,2 +np.float32,0x7f43f0f1,0x54b9dcd0,2 +np.float32,0xbf6170c9,0xbf756104,2 +np.float32,0xbe5c9f74,0xbf197852,2 +np.float32,0xff1502b0,0xd4a9a693,2 +np.float32,0x8064f6af,0xaa6c886e,2 +np.float32,0xbf380564,0xbf6552e5,2 +np.float32,0xfeb9b7dc,0xd490e85f,2 +np.float32,0x7f34f941,0x54b5010d,2 +np.float32,0xbe9d4ca0,0xbf2cbd5f,2 +np.float32,0x3f6e43d2,0x3f79f240,2 +np.float32,0xbdad0530,0xbee0a8f2,2 +np.float32,0x3da18459,0x3edb9105,2 +np.float32,0xfd968340,0xd42a3808,2 +np.float32,0x3ea03e64,0x3f2dcf96,2 +np.float32,0x801d2f5b,0xaa1c6525,2 +np.float32,0xbf47d92d,0xbf6bb7e9,2 +np.float32,0x55a6b9,0x2a5fe9fb,2 +np.float32,0x77a7c2,0x2a7a4fb8,2 +np.float32,0xfebbc16e,0xd4916f88,2 +np.float32,0x3f5d3d6e,0x3f73d86a,2 +np.float32,0xfccd2b60,0xd3edcacb,2 +np.float32,0xbd026460,0xbea244b0,2 +np.float32,0x3e55bd,0x2a4968e4,2 +np.float32,0xbe7b5708,0xbf20490d,2 +np.float32,0xfe413cf4,0xd469171f,2 +np.float32,0x7710e3,0x2a79e657,2 +np.float32,0xfc932520,0xd3d4d9ca,2 +np.float32,0xbf764a1b,0xbf7cb8aa,2 +np.float32,0x6b1923,0x2a713aca,2 +np.float32,0xfe4dcd04,0xd46e092d,2 +np.float32,0xff3085ac,0xd4b381f8,2 +np.float32,0x3f72c438,0x3f7b82b4,2 +np.float32,0xbf6f0c6e,0xbf7a3852,2 +np.float32,0x801d2b1b,0xaa1c5d8d,2 +np.float32,0x3e9db91e,0x3f2ce50d,2 +np.float32,0x3f684f9d,0x3f77d8c5,2 +np.float32,0x7dc784,0x2a7e82cc,2 +np.float32,0x7d2c88e9,0x540d64f8,2 +np.float32,0x807fb708,0xaa7fcf51,2 +np.float32,0x8003c49a,0xa99e16e0,2 +np.float32,0x3ee4f5b8,0x3f43c3ff,2 +np.float32,0xfe992c5e,0xd487e4ec,2 +np.float32,0x4b4dfa,0x2a568216,2 +np.float32,0x3d374c80,0x3eb5c6a8,2 +np.float32,0xbd3a4700,0xbeb6c15c,2 +np.float32,0xbf13cb80,0xbf5529e5,2 +np.float32,0xbe7306d4,0xbf1e7f91,2 +np.float32,0xbf800000,0xbf800000,2 +np.float32,0xbea42efe,0xbf2f394e,2 +np.float32,0x3e1981d0,0x3f07fe2c,2 +np.float32,0x3f17ea1d,0x3f572047,2 +np.float32,0x7dc1e0,0x2a7e7efe,2 +np.float32,0x80169c08,0xaa0fa320,2 +np.float32,0x3f3e1972,0x3f67d248,2 +np.float32,0xfe5d3c88,0xd473d815,2 +np.float32,0xbf677448,0xbf778aac,2 +np.float32,0x7e799b7d,0x547dd9e4,2 +np.float32,0x3f00bb2c,0x3f4b92cf,2 +np.float32,0xbeb29f9c,0xbf343798,2 +np.float32,0xbd6b7830,0xbec59a86,2 +np.float32,0x807a524a,0xaa7c282a,2 +np.float32,0xbe0a7a04,0xbf0366ab,2 +np.float32,0x80237470,0xaa26e061,2 +np.float32,0x3ccbc0f6,0x3e95744f,2 +np.float32,0x3edec6bc,0x3f41fcb6,2 +np.float32,0x3f635198,0x3f760efa,2 +np.float32,0x800eca4f,0xa9f960d8,2 +np.float32,0x3f800000,0x3f800000,2 +np.float32,0xff4eeb9e,0xd4bd456a,2 +np.float32,0x56f4e,0x29b29e70,2 +np.float32,0xff5383a0,0xd4bea95c,2 +np.float32,0x3f4c3a77,0x3f6d6d94,2 +np.float32,0x3f6c324a,0x3f79388c,2 +np.float32,0xbebdc092,0xbf37e27c,2 +np.float32,0xff258956,0xd4afb42e,2 +np.float32,0xdc78c,0x29f39012,2 +np.float32,0xbf2db06a,0xbf60f2f5,2 +np.float32,0xbe3c5808,0xbf119660,2 +np.float32,0xbf1ba866,0xbf58e0f4,2 +np.float32,0x80377640,0xaa41b79d,2 +np.float32,0x4fdc4d,0x2a5abfea,2 +np.float32,0x7f5e7560,0x54c1e516,2 +np.float32,0xfeb4d3f2,0xd48f9fde,2 +np.float32,0x3f12a622,0x3f549c7d,2 +np.float32,0x7f737ed7,0x54c7d2dc,2 +np.float32,0xa0ddc,0x29db456d,2 +np.float32,0xfe006740,0xd44b6689,2 +np.float32,0x3f17dfd4,0x3f571b6c,2 +np.float32,0x67546e,0x2a6e5dd1,2 +np.float32,0xff0d0f11,0xd4a693e2,2 +np.float32,0xbd170090,0xbeaa6738,2 +np.float32,0x5274a0,0x2a5d1806,2 +np.float32,0x3e154fe0,0x3f06be1a,2 +np.float32,0x7ddb302e,0x5440f0a7,2 +np.float32,0x3f579d10,0x3f71c2af,2 +np.float32,0xff2bc5bb,0xd4b1e20c,2 +np.float32,0xfee8fa6a,0xd49c4872,2 +np.float32,0xbea551b0,0xbf2fa07b,2 +np.float32,0xfeabc75c,0xd48d3004,2 +np.float32,0x7f50a5a8,0x54bdcbd1,2 +np.float32,0x50354b,0x2a5b110d,2 +np.float32,0x7d139f13,0x54063b6b,2 +np.float32,0xbeee1b08,0xbf465699,2 +np.float32,0xfe5e1650,0xd47427fe,2 +np.float32,0x7f7fffff,0x54cb2ff5,2 +np.float32,0xbf52ede8,0xbf6fff35,2 +np.float32,0x804bba81,0xaa56e8f1,2 +np.float32,0x6609e2,0x2a6d5e94,2 +np.float32,0x692621,0x2a6fc1d6,2 +np.float32,0xbf288bb6,0xbf5eb4d3,2 +np.float32,0x804f28c4,0xaa5a1b82,2 +np.float32,0xbdaad2a8,0xbedfb46e,2 +np.float32,0x5e04f8,0x2a66fb13,2 +np.float32,0x804c10da,0xaa573a81,2 +np.float32,0xbe412764,0xbf12d0fd,2 +np.float32,0x801c35cc,0xaa1aa250,2 +np.float32,0x6364d4,0x2a6b4cf9,2 +np.float32,0xbf6d3cea,0xbf79962f,2 +np.float32,0x7e5a9935,0x5472defb,2 +np.float32,0xbe73a38c,0xbf1ea19c,2 +np.float32,0xbd35e950,0xbeb550f2,2 +np.float32,0x46cc16,0x2a5223d6,2 +np.float32,0x3f005288,0x3f4b5b97,2 +np.float32,0x8034e8b7,0xaa3eb2be,2 +np.float32,0xbea775fc,0xbf3061cf,2 +np.float32,0xea0e9,0x29f87751,2 +np.float32,0xbf38faaf,0xbf65b89d,2 +np.float32,0xbedf3184,0xbf421bb0,2 +np.float32,0xbe04250c,0xbf015def,2 +np.float32,0x7f56dae8,0x54bfa901,2 +np.float32,0xfebe3e04,0xd492132e,2 +np.float32,0x3e4dc326,0x3f15f19e,2 +np.float32,0x803da197,0xaa48a621,2 +np.float32,0x7eeb35aa,0x549cc7c6,2 +np.float32,0xfebb3eb6,0xd4914dc0,2 +np.float32,0xfed17478,0xd496d5e2,2 +np.float32,0x80243694,0xaa280ed2,2 +np.float32,0x8017e666,0xaa1251d3,2 +np.float32,0xbf07e942,0xbf4f4a3e,2 +np.float32,0xbf578fa6,0xbf71bdab,2 +np.float32,0x7ed8d80f,0x549896b6,2 +np.float32,0x3f2277ae,0x3f5bff11,2 +np.float32,0x7e6f195b,0x547a3cd4,2 +np.float32,0xbf441559,0xbf6a3a91,2 +np.float32,0x7f1fb427,0x54ad9d8d,2 +np.float32,0x71695f,0x2a75e12d,2 +np.float32,0xbd859588,0xbece19a1,2 +np.float32,0x7f5702fc,0x54bfb4eb,2 +np.float32,0x3f040008,0x3f4d4842,2 +np.float32,0x3de00ca5,0x3ef4df89,2 +np.float32,0x3eeabb03,0x3f45658c,2 +np.float32,0x3dfe5e65,0x3eff7480,2 +np.float32,0x1,0x26a14518,2 +np.float32,0x8065e400,0xaa6d4130,2 +np.float32,0xff50e1bb,0xd4bdde07,2 +np.float32,0xbe88635a,0xbf24b7e9,2 +np.float32,0x3f46bfab,0x3f6b4908,2 +np.float32,0xbd85c3c8,0xbece3168,2 +np.float32,0xbe633f64,0xbf1afdb1,2 +np.float32,0xff2c7706,0xd4b21f2a,2 +np.float32,0xbf02816c,0xbf4c812a,2 +np.float32,0x80653aeb,0xaa6cbdab,2 +np.float32,0x3eef1d10,0x3f469e24,2 +np.float32,0x3d9944bf,0x3ed7c36a,2 +np.float32,0x1b03d4,0x2a186b2b,2 +np.float32,0x3f251b7c,0x3f5d2e76,2 +np.float32,0x3edebab0,0x3f41f937,2 +np.float32,0xfefc2148,0xd4a073ff,2 +np.float32,0x7448ee,0x2a77f051,2 +np.float32,0x3bb8a400,0x3e3637ee,2 +np.float32,0x57df36,0x2a61d527,2 +np.float32,0xfd8b9098,0xd425fccb,2 +np.float32,0x7f67627e,0x54c4744d,2 +np.float32,0x801165d7,0xaa039fba,2 +np.float32,0x53aae5,0x2a5e2bfd,2 +np.float32,0x8014012b,0xaa09e4f1,2 +np.float32,0x3f7a2d53,0x3f7e0b4b,2 +np.float32,0x3f5fb700,0x3f74c052,2 +np.float32,0x7f192a06,0x54ab366c,2 +np.float32,0x3f569611,0x3f71603b,2 +np.float32,0x25e2dc,0x2a2a9b65,2 +np.float32,0x8036465e,0xaa405342,2 +np.float32,0x804118e1,0xaa4c5785,2 +np.float32,0xbef08d3e,0xbf4703e1,2 +np.float32,0x3447e2,0x2a3df0be,2 +np.float32,0xbf2a350b,0xbf5f6f8c,2 +np.float32,0xbec87e3e,0xbf3b4a73,2 +np.float32,0xbe99a4a8,0xbf2b6412,2 +np.float32,0x2ea2ae,0x2a36d77e,2 +np.float32,0xfcb69600,0xd3e4b9e3,2 +np.float32,0x717700,0x2a75eb06,2 +np.float32,0xbf4e81ce,0xbf6e4ecc,2 +np.float32,0xbe2021ac,0xbf09ebee,2 +np.float32,0xfef94eee,0xd49fda31,2 +np.float32,0x8563e,0x29ce0015,2 +np.float32,0x7f5d0ca5,0x54c17c0f,2 +np.float32,0x3f16459a,0x3f56590f,2 +np.float32,0xbe12f7bc,0xbf0608a0,2 +np.float32,0x3f10fd3d,0x3f53ce5f,2 +np.float32,0x3ca5e1b0,0x3e8b8d96,2 +np.float32,0xbe5288e0,0xbf17181f,2 +np.float32,0xbf7360f6,0xbf7bb8c9,2 +np.float32,0x7e989d33,0x5487ba88,2 +np.float32,0x3ea7b5dc,0x3f307839,2 +np.float32,0x7e8da0c9,0x548463f0,2 +np.float32,0xfeaf7888,0xd48e3122,2 +np.float32,0x7d90402d,0x5427d321,2 +np.float32,0x72e309,0x2a76f0ee,2 +np.float32,0xbe1faa34,0xbf09c998,2 +np.float32,0xbf2b1652,0xbf5fd1f4,2 +np.float32,0x8051eb0c,0xaa5c9cca,2 +np.float32,0x7edf02bf,0x549a058e,2 +np.float32,0x7fa00000,0x7fe00000,2 +np.float32,0x3f67f873,0x3f77b9c1,2 +np.float32,0x3f276b63,0x3f5e358c,2 +np.float32,0x7eeb4bf2,0x549cccb9,2 +np.float32,0x3bfa2c,0x2a46d675,2 +np.float32,0x3e133c50,0x3f061d75,2 +np.float32,0x3ca302c0,0x3e8abe4a,2 +np.float32,0x802e152e,0xaa361dd5,2 +np.float32,0x3f504810,0x3f6efd0a,2 +np.float32,0xbf43e0b5,0xbf6a2599,2 +np.float32,0x80800000,0xaa800000,2 +np.float32,0x3f1c0980,0x3f590e03,2 +np.float32,0xbf0084f6,0xbf4b7638,2 +np.float32,0xfee72d32,0xd49be10d,2 +np.float32,0x3f3c00ed,0x3f66f763,2 +np.float32,0x80511e81,0xaa5be492,2 +np.float32,0xfdd1b8a0,0xd43e1f0d,2 +np.float32,0x7d877474,0x54245785,2 +np.float32,0x7f110bfe,0x54a82207,2 +np.float32,0xff800000,0xff800000,2 +np.float32,0x6b6a2,0x29bfa706,2 +np.float32,0xbf5bdfd9,0xbf7357b7,2 +np.float32,0x8025bfa3,0xaa2a6676,2 +np.float32,0x3a3581,0x2a44dd3a,2 +np.float32,0x542c2a,0x2a5e9e2f,2 +np.float32,0xbe1d5650,0xbf091d57,2 +np.float32,0x3e97760d,0x3f2a935e,2 +np.float32,0x7f5dcde2,0x54c1b460,2 +np.float32,0x800bde1e,0xa9e7bbaf,2 +np.float32,0x3e6b9e61,0x3f1cdf07,2 +np.float32,0x7d46c003,0x54143884,2 +np.float32,0x80073fbb,0xa9c49e67,2 +np.float32,0x503c23,0x2a5b1748,2 +np.float32,0x7eb7b070,0x549060c8,2 +np.float32,0xe9d8f,0x29f86456,2 +np.float32,0xbeedd4f0,0xbf464320,2 +np.float32,0x3f40d5d6,0x3f68eda1,2 +np.float32,0xff201f28,0xd4adc44b,2 +np.float32,0xbdf61e98,0xbefca9c7,2 +np.float32,0x3e8a0dc9,0x3f2562e3,2 +np.float32,0xbc0c0c80,0xbe515f61,2 +np.float32,0x2b3c15,0x2a3248e3,2 +np.float32,0x42a7bb,0x2a4df592,2 +np.float32,0x7f337947,0x54b480af,2 +np.float32,0xfec21db4,0xd4930f4b,2 +np.float32,0x7f4fdbf3,0x54bd8e94,2 +np.float32,0x1e2253,0x2a1e1286,2 +np.float32,0x800c4c80,0xa9ea819e,2 +np.float32,0x7e96f5b7,0x54873c88,2 +np.float32,0x7ce4e131,0x53f69ed4,2 +np.float32,0xbead8372,0xbf327b63,2 +np.float32,0x3e15ca7e,0x3f06e2f3,2 +np.float32,0xbf63e17b,0xbf7642da,2 +np.float32,0xff5bdbdb,0xd4c122f9,2 +np.float32,0x3f44411e,0x3f6a4bfd,2 +np.float32,0xfd007da0,0xd40029d2,2 +np.float32,0xbe940168,0xbf2944b7,2 +np.float32,0x80000000,0x80000000,2 +np.float32,0x3d28e356,0x3eb0e1b8,2 +np.float32,0x3eb9fcd8,0x3f36a918,2 +np.float32,0x4f6410,0x2a5a51eb,2 +np.float32,0xbdf18e30,0xbefb1775,2 +np.float32,0x32edbd,0x2a3c49e3,2 +np.float32,0x801f70a5,0xaa2052da,2 +np.float32,0x8045a045,0xaa50f98c,2 +np.float32,0xbdd6cb00,0xbef17412,2 +np.float32,0x3f118f2c,0x3f541557,2 +np.float32,0xbe65c378,0xbf1b8f95,2 +np.float32,0xfd9a9060,0xd42bbb8b,2 +np.float32,0x3f04244f,0x3f4d5b0f,2 +np.float32,0xff05214b,0xd4a3656f,2 +np.float32,0xfe342cd0,0xd463b706,2 +np.float32,0x3f3409a8,0x3f63a836,2 +np.float32,0x80205db2,0xaa21e1e5,2 +np.float32,0xbf37c982,0xbf653a03,2 +np.float32,0x3f36ce8f,0x3f64d17e,2 +np.float32,0x36ffda,0x2a412d61,2 +np.float32,0xff569752,0xd4bf94e6,2 +np.float32,0x802fdb0f,0xaa386c3a,2 +np.float32,0x7ec55a87,0x5493df71,2 +np.float32,0x7f2234c7,0x54ae847e,2 +np.float32,0xbf02df76,0xbf4cb23d,2 +np.float32,0x3d68731a,0x3ec4c156,2 +np.float32,0x8146,0x2921cd8e,2 +np.float32,0x80119364,0xaa041235,2 +np.float32,0xfe6c1c00,0xd47930b5,2 +np.float32,0x8070da44,0xaa757996,2 +np.float32,0xfefbf50c,0xd4a06a9d,2 +np.float32,0xbf01b6a8,0xbf4c170a,2 +np.float32,0x110702,0x2a02aedb,2 +np.float32,0xbf063cd4,0xbf4e6f87,2 +np.float32,0x3f1ff178,0x3f5ad9dd,2 +np.float32,0xbf76dcd4,0xbf7cead0,2 +np.float32,0x80527281,0xaa5d1620,2 +np.float32,0xfea96df8,0xd48c8a7f,2 +np.float32,0x68db02,0x2a6f88b0,2 +np.float32,0x62d971,0x2a6adec7,2 +np.float32,0x3e816fe0,0x3f21df04,2 +np.float32,0x3f586379,0x3f720cc0,2 +np.float32,0x804a3718,0xaa5577ff,2 +np.float32,0x2e2506,0x2a3632b2,2 +np.float32,0x3f297d,0x2a4a4bf3,2 +np.float32,0xbe37aba8,0xbf105f88,2 +np.float32,0xbf18b264,0xbf577ea7,2 +np.float32,0x7f50d02d,0x54bdd8b5,2 +np.float32,0xfee296dc,0xd49ad757,2 +np.float32,0x7ec5137e,0x5493cdb1,2 +np.float32,0x3f4811f4,0x3f6bce3a,2 +np.float32,0xfdff32a0,0xd44af991,2 +np.float32,0x3f6ef140,0x3f7a2ed6,2 +np.float32,0x250838,0x2a2950b5,2 +np.float32,0x25c28e,0x2a2a6ada,2 +np.float32,0xbe875e50,0xbf244e90,2 +np.float32,0x3e3bdff8,0x3f11776a,2 +np.float32,0x3e9fe493,0x3f2daf17,2 +np.float32,0x804d8599,0xaa5897d9,2 +np.float32,0x3f0533da,0x3f4de759,2 +np.float32,0xbe63023c,0xbf1aefc8,2 +np.float32,0x80636e5e,0xaa6b547f,2 +np.float32,0xff112958,0xd4a82d5d,2 +np.float32,0x3e924112,0x3f28991f,2 +np.float32,0xbe996ffc,0xbf2b507a,2 +np.float32,0x802a7cda,0xaa314081,2 +np.float32,0x8022b524,0xaa25b21e,2 +np.float32,0x3f0808c8,0x3f4f5a43,2 +np.float32,0xbef0ec2a,0xbf471e0b,2 +np.float32,0xff4c2345,0xd4bc6b3c,2 +np.float32,0x25ccc8,0x2a2a7a3b,2 +np.float32,0x7f4467d6,0x54ba0260,2 +np.float32,0x7f506539,0x54bdb846,2 +np.float32,0x412ab4,0x2a4c6a2a,2 +np.float32,0x80672c4a,0xaa6e3ef0,2 +np.float32,0xbddfb7f8,0xbef4c0ac,2 +np.float32,0xbf250bb9,0xbf5d276c,2 +np.float32,0x807dca65,0xaa7e84bd,2 +np.float32,0xbf63b8e0,0xbf763438,2 +np.float32,0xbeed1b0c,0xbf460f6b,2 +np.float32,0x8021594f,0xaa238136,2 +np.float32,0xbebc74c8,0xbf377710,2 +np.float32,0x3e9f8e3b,0x3f2d8fce,2 +np.float32,0x7f50ca09,0x54bdd6d8,2 +np.float32,0x805797c1,0xaa6197df,2 +np.float32,0x3de198f9,0x3ef56f98,2 +np.float32,0xf154d,0x29fb0392,2 +np.float32,0xff7fffff,0xd4cb2ff5,2 +np.float32,0xfed22fa8,0xd49702c4,2 +np.float32,0xbf733736,0xbf7baa64,2 +np.float32,0xbf206a8a,0xbf5b1108,2 +np.float32,0xbca49680,0xbe8b3078,2 +np.float32,0xfecba794,0xd4956e1a,2 +np.float32,0x80126582,0xaa061886,2 +np.float32,0xfee5cc82,0xd49b919f,2 +np.float32,0xbf7ad6ae,0xbf7e4491,2 +np.float32,0x7ea88c81,0x548c4c0c,2 +np.float32,0xbf493a0d,0xbf6c4255,2 +np.float32,0xbf06dda0,0xbf4ec1d4,2 +np.float32,0xff3f6e84,0xd4b86cf6,2 +np.float32,0x3e4fe093,0x3f1674b0,2 +np.float32,0x8048ad60,0xaa53fbde,2 +np.float32,0x7ebb7112,0x54915ac5,2 +np.float32,0x5bd191,0x2a652a0d,2 +np.float32,0xfe3121d0,0xd4626cfb,2 +np.float32,0x7e4421c6,0x546a3f83,2 +np.float32,0x19975b,0x2a15b14f,2 +np.float32,0x801c8087,0xaa1b2a64,2 +np.float32,0xfdf6e950,0xd448c0f6,2 +np.float32,0x74e711,0x2a786083,2 +np.float32,0xbf2b2f2e,0xbf5fdccb,2 +np.float32,0x7ed19ece,0x5496e00b,2 +np.float32,0x7f6f8322,0x54c6ba63,2 +np.float32,0x3e90316d,0x3f27cd69,2 +np.float32,0x7ecb42ce,0x54955571,2 +np.float32,0x3f6d49be,0x3f799aaf,2 +np.float32,0x8053d327,0xaa5e4f9a,2 +np.float32,0x7ebd7361,0x5491df3e,2 +np.float32,0xfdb6eed0,0xd435a7aa,2 +np.float32,0x7f3e79f4,0x54b81e4b,2 +np.float32,0xfe83afa6,0xd4813794,2 +np.float32,0x37c443,0x2a421246,2 +np.float32,0xff075a10,0xd4a44cd8,2 +np.float32,0x3ebc5fe0,0x3f377047,2 +np.float32,0x739694,0x2a77714e,2 +np.float32,0xfe832946,0xd4810b91,2 +np.float32,0x7f2638e6,0x54aff235,2 +np.float32,0xfe87f7a6,0xd4829a3f,2 +np.float32,0x3f50f3f8,0x3f6f3eb8,2 +np.float32,0x3eafa3d0,0x3f333548,2 +np.float32,0xbec26ee6,0xbf39626f,2 +np.float32,0x7e6f924f,0x547a66ff,2 +np.float32,0x7f0baa46,0x54a606f8,2 +np.float32,0xbf6dfc49,0xbf79d939,2 +np.float32,0x7f005709,0x54a1699d,2 +np.float32,0x7ee3d7ef,0x549b2057,2 +np.float32,0x803709a4,0xaa4138d7,2 +np.float32,0x3f7bf49a,0x3f7ea509,2 +np.float32,0x509db7,0x2a5b6ff5,2 +np.float32,0x7eb1b0d4,0x548ec9ff,2 +np.float32,0x7eb996ec,0x5490dfce,2 +np.float32,0xbf1fcbaa,0xbf5ac89e,2 +np.float32,0x3e2c9a98,0x3f0d69cc,2 +np.float32,0x3ea77994,0x3f306312,2 +np.float32,0x3f3cbfe4,0x3f67457c,2 +np.float32,0x8422a,0x29cd5a30,2 +np.float32,0xbd974558,0xbed6d264,2 +np.float32,0xfecee77a,0xd496387f,2 +np.float32,0x3f51876b,0x3f6f76f1,2 +np.float32,0x3b1a25,0x2a45ddad,2 +np.float32,0xfe9912f0,0xd487dd67,2 +np.float32,0x3f3ab13d,0x3f666d99,2 +np.float32,0xbf35565a,0xbf64341b,2 +np.float32,0x7d4e84aa,0x54162091,2 +np.float32,0x4c2570,0x2a574dea,2 +np.float32,0x7e82dca6,0x5480f26b,2 +np.float32,0x7f5503e7,0x54bf1c8d,2 +np.float32,0xbeb85034,0xbf361c59,2 +np.float32,0x80460a69,0xaa516387,2 +np.float32,0x805fbbab,0xaa68602c,2 +np.float32,0x7d4b4c1b,0x541557b8,2 +np.float32,0xbefa9a0a,0xbf49bfbc,2 +np.float32,0x3dbd233f,0x3ee76e09,2 +np.float32,0x58b6df,0x2a628d50,2 +np.float32,0xfcdcc180,0xd3f3aad9,2 +np.float32,0x423a37,0x2a4d8487,2 +np.float32,0xbed8b32a,0xbf403507,2 +np.float32,0x3f68e85d,0x3f780f0b,2 +np.float32,0x7ee13c4b,0x549a883d,2 +np.float32,0xff2ed4c5,0xd4b2eec1,2 +np.float32,0xbf54dadc,0xbf70b99a,2 +np.float32,0x3f78b0af,0x3f7d8a32,2 +np.float32,0x3f377372,0x3f651635,2 +np.float32,0xfdaa6178,0xd43166bc,2 +np.float32,0x8060c337,0xaa6934a6,2 +np.float32,0x7ec752c2,0x54945cf6,2 +np.float32,0xbd01a760,0xbea1f624,2 +np.float32,0x6f6599,0x2a746a35,2 +np.float32,0x3f6315b0,0x3f75f95b,2 +np.float32,0x7f2baf32,0x54b1da44,2 +np.float32,0x3e400353,0x3f1286d8,2 +np.float32,0x40d3bf,0x2a4c0f15,2 +np.float32,0x7f733aca,0x54c7c03d,2 +np.float32,0x7e5c5407,0x5473828b,2 +np.float32,0x80191703,0xaa14b56a,2 +np.float32,0xbf4fc144,0xbf6ec970,2 +np.float32,0xbf1137a7,0xbf53eacd,2 +np.float32,0x80575410,0xaa615db3,2 +np.float32,0xbd0911d0,0xbea4fe07,2 +np.float32,0x3e98534a,0x3f2ae643,2 +np.float32,0x3f3b089a,0x3f669185,2 +np.float32,0x4fc752,0x2a5aacc1,2 +np.float32,0xbef44ddc,0xbf480b6e,2 +np.float32,0x80464217,0xaa519af4,2 +np.float32,0x80445fae,0xaa4fb6de,2 +np.float32,0x80771cf4,0xaa79eec8,2 +np.float32,0xfd9182e8,0xd4284fed,2 +np.float32,0xff0a5d16,0xd4a58288,2 +np.float32,0x3f33e169,0x3f63973e,2 +np.float32,0x8021a247,0xaa23f820,2 +np.float32,0xbf362522,0xbf648ab8,2 +np.float32,0x3f457cd7,0x3f6ac95e,2 +np.float32,0xbcadf400,0xbe8dc7e2,2 +np.float32,0x80237210,0xaa26dca7,2 +np.float32,0xbf1293c9,0xbf54939f,2 +np.float32,0xbc5e73c0,0xbe744a37,2 +np.float32,0x3c03f980,0x3e4d44df,2 +np.float32,0x7da46f,0x2a7e6b20,2 +np.float32,0x5d4570,0x2a665dd0,2 +np.float32,0x3e93fbac,0x3f294287,2 +np.float32,0x7e6808fd,0x5477bfa4,2 +np.float32,0xff5aa9a6,0xd4c0c925,2 +np.float32,0xbf5206ba,0xbf6fa767,2 +np.float32,0xbf6e513e,0xbf79f6f1,2 +np.float32,0x3ed01c0f,0x3f3da20f,2 +np.float32,0xff47d93d,0xd4bb1704,2 +np.float32,0x7f466cfd,0x54baa514,2 +np.float32,0x665e10,0x2a6d9fc8,2 +np.float32,0x804d0629,0xaa5820e8,2 +np.float32,0x7e0beaa0,0x54514e7e,2 +np.float32,0xbf7fcb6c,0xbf7fee78,2 +np.float32,0x3f6c5b03,0x3f7946dd,2 +np.float32,0x3e941504,0x3f294c30,2 +np.float32,0xbf2749ad,0xbf5e26a1,2 +np.float32,0xfec2a00a,0xd493302d,2 +np.float32,0x3f15a358,0x3f560bce,2 +np.float32,0x3f15c4e7,0x3f561bcd,2 +np.float32,0xfedc8692,0xd499728c,2 +np.float32,0x7e8f6902,0x5484f180,2 +np.float32,0x7f663d62,0x54c42136,2 +np.float32,0x8027ea62,0xaa2d99b4,2 +np.float32,0x3f3d093d,0x3f67636d,2 +np.float32,0x7f118c33,0x54a85382,2 +np.float32,0x803e866a,0xaa499d43,2 +np.float32,0x80053632,0xa9b02407,2 +np.float32,0xbf36dd66,0xbf64d7af,2 +np.float32,0xbf560358,0xbf71292b,2 +np.float32,0x139a8,0x29596bc0,2 +np.float32,0xbe04f75c,0xbf01a26c,2 +np.float32,0xfe1c3268,0xd45920fa,2 +np.float32,0x7ec77f72,0x5494680c,2 +np.float32,0xbedde724,0xbf41bbba,2 +np.float32,0x3e81dbe0,0x3f220bfd,2 +np.float32,0x800373ac,0xa99989d4,2 +np.float32,0x3f7f859a,0x3f7fd72d,2 +np.float32,0x3eb9dc7e,0x3f369e80,2 +np.float32,0xff5f8eb7,0xd4c236b1,2 +np.float32,0xff1c03cb,0xd4ac44ac,2 +np.float32,0x18cfe1,0x2a14285b,2 +np.float32,0x7f21b075,0x54ae54fd,2 +np.float32,0xff490bd8,0xd4bb7680,2 +np.float32,0xbf15dc22,0xbf5626de,2 +np.float32,0xfe1d5a10,0xd459a9a3,2 +np.float32,0x750544,0x2a7875e4,2 +np.float32,0x8023d5df,0xaa2778b3,2 +np.float32,0x3e42aa08,0x3f1332b2,2 +np.float32,0x3ecaa751,0x3f3bf60d,2 +np.float32,0x0,0x0,2 +np.float32,0x80416da6,0xaa4cb011,2 +np.float32,0x3f4ea9ae,0x3f6e5e22,2 +np.float32,0x2113f4,0x2a230f8e,2 +np.float32,0x3f35c2e6,0x3f64619a,2 +np.float32,0xbf50db8a,0xbf6f3564,2 +np.float32,0xff4d5cea,0xd4bccb8a,2 +np.float32,0x7ee54420,0x549b72d2,2 +np.float32,0x64ee68,0x2a6c81f7,2 +np.float32,0x5330da,0x2a5dbfc2,2 +np.float32,0x80047f88,0xa9a7b467,2 +np.float32,0xbda01078,0xbedae800,2 +np.float32,0xfe96d05a,0xd487315f,2 +np.float32,0x8003cc10,0xa99e7ef4,2 +np.float32,0x8007b4ac,0xa9c8aa3d,2 +np.float32,0x5d4bcf,0x2a66630e,2 +np.float32,0xfdd0c0b0,0xd43dd403,2 +np.float32,0xbf7a1d82,0xbf7e05f0,2 +np.float32,0x74ca33,0x2a784c0f,2 +np.float32,0x804f45e5,0xaa5a3640,2 +np.float32,0x7e6d16aa,0x547988c4,2 +np.float32,0x807d5762,0xaa7e3714,2 +np.float32,0xfecf93d0,0xd4966229,2 +np.float32,0xfecbd25c,0xd4957890,2 +np.float32,0xff7db31c,0xd4ca93b0,2 +np.float32,0x3dac9e18,0x3ee07c4a,2 +np.float32,0xbf4b2d28,0xbf6d0509,2 +np.float32,0xbd4f4c50,0xbebd62e0,2 +np.float32,0xbd2eac40,0xbeb2e0ee,2 +np.float32,0x3d01b69b,0x3ea1fc7b,2 +np.float32,0x7ec63902,0x549416ed,2 +np.float32,0xfcc47700,0xd3ea616d,2 +np.float32,0xbf5ddec2,0xbf7413a1,2 +np.float32,0xff6a6110,0xd4c54c52,2 +np.float32,0xfdfae2a0,0xd449d335,2 +np.float32,0x7e54868c,0x547099cd,2 +np.float32,0x802b5b88,0xaa327413,2 +np.float32,0x80440e72,0xaa4f647a,2 +np.float32,0x3e313c94,0x3f0eaad5,2 +np.float32,0x3ebb492a,0x3f3715a2,2 +np.float32,0xbef56286,0xbf4856d5,2 +np.float32,0x3f0154ba,0x3f4be3a0,2 +np.float32,0xff2df86c,0xd4b2a376,2 +np.float32,0x3ef6a850,0x3f48af57,2 +np.float32,0x3d8d33e1,0x3ed1f22d,2 +np.float32,0x4dd9b9,0x2a58e615,2 +np.float32,0x7f1caf83,0x54ac83c9,2 +np.float32,0xbf7286b3,0xbf7b6d73,2 +np.float32,0x80064f88,0xa9bbbd9f,2 +np.float32,0xbf1f55fa,0xbf5a92db,2 +np.float32,0x546a81,0x2a5ed516,2 +np.float32,0xbe912880,0xbf282d0a,2 +np.float32,0x5df587,0x2a66ee6e,2 +np.float32,0x801f706c,0xaa205279,2 +np.float32,0x58cb6d,0x2a629ece,2 +np.float32,0xfe754f8c,0xd47c62da,2 +np.float32,0xbefb6f4c,0xbf49f8e7,2 +np.float32,0x80000001,0xa6a14518,2 +np.float32,0xbf067837,0xbf4e8df4,2 +np.float32,0x3e8e715c,0x3f271ee4,2 +np.float32,0x8009de9b,0xa9d9ebc8,2 +np.float32,0xbf371ff1,0xbf64f36e,2 +np.float32,0x7f5ce661,0x54c170e4,2 +np.float32,0x3f3c47d1,0x3f671467,2 +np.float32,0xfea5e5a6,0xd48b8eb2,2 +np.float32,0xff62b17f,0xd4c31e15,2 +np.float32,0xff315932,0xd4b3c98f,2 +np.float32,0xbf1c3ca8,0xbf5925b9,2 +np.float32,0x7f800000,0x7f800000,2 +np.float32,0xfdf20868,0xd4476c3b,2 +np.float32,0x5b790e,0x2a64e052,2 +np.float32,0x3f5ddf4e,0x3f7413d4,2 +np.float32,0x7f1a3182,0x54ab9861,2 +np.float32,0x3f4b906e,0x3f6d2b9d,2 +np.float32,0x7ebac760,0x54912edb,2 +np.float32,0x7f626d3f,0x54c30a7e,2 +np.float32,0x3e27b058,0x3f0c0edc,2 +np.float32,0x8041e69c,0xaa4d2de8,2 +np.float32,0x3f42cee0,0x3f69b84a,2 +np.float32,0x7ec5fe83,0x5494085b,2 +np.float32,0x9d3e6,0x29d99cde,2 +np.float32,0x3edc50c0,0x3f41452d,2 +np.float32,0xbf2c463a,0xbf60562c,2 +np.float32,0x800bfa33,0xa9e871e8,2 +np.float32,0x7c9f2c,0x2a7dba4d,2 +np.float32,0x7f2ef9fd,0x54b2fb73,2 +np.float32,0x80741847,0xaa77cdb9,2 +np.float32,0x7e9c462a,0x5488ce1b,2 +np.float32,0x3ea47ec1,0x3f2f55a9,2 +np.float32,0x7f311c43,0x54b3b4f5,2 +np.float32,0x3d8f4c73,0x3ed2facd,2 +np.float32,0x806d7bd2,0xaa7301ef,2 +np.float32,0xbf633d24,0xbf760799,2 +np.float32,0xff4f9a3f,0xd4bd7a99,2 +np.float32,0x3f6021ca,0x3f74e73d,2 +np.float32,0x7e447015,0x546a5eac,2 +np.float32,0x6bff3c,0x2a71e711,2 +np.float32,0xe9c9f,0x29f85f06,2 +np.float32,0x8009fe14,0xa9dad277,2 +np.float32,0x807cf79c,0xaa7df644,2 +np.float32,0xff440e1b,0xd4b9e608,2 +np.float32,0xbddf9a50,0xbef4b5db,2 +np.float32,0x7f3b1c39,0x54b706fc,2 +np.float32,0x3c7471a0,0x3e7c16a7,2 +np.float32,0x8065b02b,0xaa6d18ee,2 +np.float32,0x7f63a3b2,0x54c36379,2 +np.float32,0xbe9c9d92,0xbf2c7d33,2 +np.float32,0x3d93aad3,0x3ed51a2e,2 +np.float32,0xbf41b040,0xbf694571,2 +np.float32,0x80396b9e,0xaa43f899,2 +np.float64,0x800fa025695f404b,0xaaa4000ff64bb00c,2 +np.float64,0xbfecc00198f98003,0xbfeee0b623fbd94b,2 +np.float64,0x7f9eeb60b03dd6c0,0x55291bf8554bb303,2 +np.float64,0x3fba74485634e890,0x3fde08710bdb148d,2 +np.float64,0xbfdd9a75193b34ea,0xbfe8bf711660a2f5,2 +np.float64,0xbfcf92e17a3f25c4,0xbfe4119eda6f3773,2 +np.float64,0xbfe359e2ba66b3c6,0xbfeb0f7ae97ea142,2 +np.float64,0x20791a5640f24,0x2a9441f13d262bed,2 +np.float64,0x3fe455fbfae8abf8,0x3feb830d63e1022c,2 +np.float64,0xbd112b7b7a226,0x2aa238c097ec269a,2 +np.float64,0x93349ba126694,0x2aa0c363cd74465a,2 +np.float64,0x20300cd440602,0x2a9432b4f4081209,2 +np.float64,0x3fdcfae677b9f5cc,0x3fe892a9ee56fe8d,2 +np.float64,0xbfefaae3f7bf55c8,0xbfefe388066132c4,2 +np.float64,0x1a7d6eb634faf,0x2a92ed9851d29ab5,2 +np.float64,0x7fd5308d39aa6119,0x553be444e30326c6,2 +np.float64,0xff811c7390223900,0xd5205cb404952fa7,2 +np.float64,0x80083d24aff07a4a,0xaaa0285cf764d898,2 +np.float64,0x800633810ccc6703,0xaa9d65341419586b,2 +np.float64,0x800ff456223fe8ac,0xaaa423bbcc24dff1,2 +np.float64,0x7fde5c99aebcb932,0x553f71be7d6d9daa,2 +np.float64,0x3fed961c4b3b2c39,0x3fef2ca146270cac,2 +np.float64,0x7fe744d30c6e89a5,0x554220a4cdc78e62,2 +np.float64,0x3fd8f527c7b1ea50,0x3fe76101085be1cb,2 +np.float64,0xbfc96a14b232d428,0xbfe2ab1a8962606c,2 +np.float64,0xffe85f540cf0bea7,0xd54268dff964519a,2 +np.float64,0x800e3be0fe7c77c2,0xaaa3634efd7f020b,2 +np.float64,0x3feb90d032f721a0,0x3fee72a4579e8b12,2 +np.float64,0xffe05674aaa0ace9,0xd5401c9e3fb4abcf,2 +np.float64,0x3fefc2e32c3f85c6,0x3fefeb940924bf42,2 +np.float64,0xbfecfd89e9f9fb14,0xbfeef6addf73ee49,2 +np.float64,0xf5862717eb0c5,0x2aa3e1428780382d,2 +np.float64,0xffc3003b32260078,0xd53558f92202dcdb,2 +np.float64,0x3feb4c152c36982a,0x3fee5940f7da0825,2 +np.float64,0x3fe7147b002e28f6,0x3fecb2948f46d1e3,2 +np.float64,0x7fe00ad9b4a015b2,0x5540039d15e1da54,2 +np.float64,0x8010000000000000,0xaaa428a2f98d728b,2 +np.float64,0xbfd3a41bfea74838,0xbfe595ab45b1be91,2 +np.float64,0x7fdbfd6e5537fadc,0x553e9a6e1107b8d0,2 +np.float64,0x800151d9d9a2a3b4,0xaa918cd8fb63f40f,2 +np.float64,0x7fe6828401ad0507,0x5541eda05dcd1fcf,2 +np.float64,0x3fdae1e7a1b5c3d0,0x3fe7f711e72ecc35,2 +np.float64,0x7fdf4936133e926b,0x553fc29c8d5edea3,2 +np.float64,0x80079de12d4f3bc3,0xaa9f7b06a9286da4,2 +np.float64,0x3fe1261cade24c39,0x3fe9fe09488e417a,2 +np.float64,0xbfc20dce21241b9c,0xbfe0a842fb207a28,2 +np.float64,0x3fe3285dfa2650bc,0x3feaf85215f59ef9,2 +np.float64,0x7fe42b93aea85726,0x554148c3c3bb35e3,2 +np.float64,0xffe6c74e7f6d8e9c,0xd541ffd13fa36dbd,2 +np.float64,0x3fe73ea139ee7d42,0x3fecc402242ab7d3,2 +np.float64,0xffbd4b46be3a9690,0xd53392de917c72e4,2 +np.float64,0x800caed8df395db2,0xaaa2a811a02e6be4,2 +np.float64,0x800aacdb6c9559b7,0xaaa19d6fbc8feebf,2 +np.float64,0x839fb4eb073f7,0x2aa0264b98327c12,2 +np.float64,0xffd0157ba9a02af8,0xd5397157a11c0d05,2 +np.float64,0x7fddc8ff173b91fd,0x553f3e7663fb2ac7,2 +np.float64,0x67b365facf66d,0x2a9dd4d838b0d853,2 +np.float64,0xffe12e7fc7225cff,0xd5406272a83a8e1b,2 +np.float64,0x7fea5b19a034b632,0x5542e567658b3e36,2 +np.float64,0x124989d824932,0x2a90ba8dc7a39532,2 +np.float64,0xffe12ef098225de0,0xd54062968450a078,2 +np.float64,0x3fea2f44a3f45e8a,0x3fedee3c461f4716,2 +np.float64,0x3fe6b033e66d6068,0x3fec88c8035e06b1,2 +np.float64,0x3fe928a2ccf25146,0x3fed88d4cde7a700,2 +np.float64,0x3feead27e97d5a50,0x3fef8d7537d82e60,2 +np.float64,0x8003ab80b6875702,0xaa98adfedd7715a9,2 +np.float64,0x45a405828b481,0x2a9a1fa99a4eff1e,2 +np.float64,0x8002ddebad85bbd8,0xaa96babfda4e0031,2 +np.float64,0x3fc278c32824f186,0x3fe0c8e7c979fbd5,2 +np.float64,0x2e10fffc5c221,0x2a96c30a766d06fa,2 +np.float64,0xffd6ba8c2ead7518,0xd53c8d1d92bc2788,2 +np.float64,0xbfeb5ec3a036bd87,0xbfee602bbf0a0d01,2 +np.float64,0x3fed5bd58f7ab7ab,0x3fef181bf591a4a7,2 +np.float64,0x7feb5274a5b6a4e8,0x55431fcf81876218,2 +np.float64,0xaf8fd6cf5f1fb,0x2aa1c6edbb1e2aaf,2 +np.float64,0x7fece718f179ce31,0x55437c74efb90933,2 +np.float64,0xbfa3c42d0c278860,0xbfd5a16407c77e73,2 +np.float64,0x800b5cff0576b9fe,0xaaa1fc4ecb0dec4f,2 +np.float64,0x800be89ae557d136,0xaaa244d115fc0963,2 +np.float64,0x800d2578f5ba4af2,0xaaa2e18a3a3fc134,2 +np.float64,0x80090ff93e321ff3,0xaaa0add578e3cc3c,2 +np.float64,0x28c5a240518c,0x2a81587cccd7e202,2 +np.float64,0x7fec066929780cd1,0x55434971435d1069,2 +np.float64,0x7fc84d4d15309a99,0x55372c204515694f,2 +np.float64,0xffe070a75de0e14e,0xd54025365046dad2,2 +np.float64,0x7fe5b27cc36b64f9,0x5541b5b822f0b6ca,2 +np.float64,0x3fdea35ac8bd46b6,0x3fe9086a0fb792c2,2 +np.float64,0xbfe79996f7af332e,0xbfece9571d37a5b3,2 +np.float64,0xffdfb47f943f6900,0xd53fe6c14c3366db,2 +np.float64,0xc015cf63802ba,0x2aa2517164d075f4,2 +np.float64,0x7feba98948375312,0x5543340b5b1f1181,2 +np.float64,0x8008678e6550cf1d,0xaaa043e7cea90da5,2 +np.float64,0x3fb11b92fa223726,0x3fd9f8b53be4d90b,2 +np.float64,0x7fc9b18cf0336319,0x55379b42da882047,2 +np.float64,0xbfe5043e736a087d,0xbfebd0c67db7a8e3,2 +np.float64,0x7fde88546a3d10a8,0x553f80cfe5bcf5fe,2 +np.float64,0x8006a6c82dcd4d91,0xaa9e171d182ba049,2 +np.float64,0xbfa0f707ac21ee10,0xbfd48e5d3faa1699,2 +np.float64,0xbfe7716bffaee2d8,0xbfecd8e6abfb8964,2 +np.float64,0x9511ccab2a23a,0x2aa0d56d748f0313,2 +np.float64,0x8003ddb9b847bb74,0xaa991ca06fd9d308,2 +np.float64,0x80030710fac60e23,0xaa9725845ac95fe8,2 +np.float64,0xffece5bbaeb9cb76,0xd5437c2670f894f4,2 +np.float64,0x3fd9be5c72b37cb9,0x3fe79f2e932a5708,2 +np.float64,0x1f050cca3e0a3,0x2a93f36499fe5228,2 +np.float64,0x3fd5422becaa8458,0x3fe6295d6150df58,2 +np.float64,0xffd72c050e2e580a,0xd53cbc52d73b495f,2 +np.float64,0xbfe66d5235ecdaa4,0xbfec6ca27e60bf23,2 +np.float64,0x17ac49a42f58a,0x2a923b5b757087a0,2 +np.float64,0xffd39edc40273db8,0xd53b2f7bb99b96bf,2 +np.float64,0x7fde6cf009bcd9df,0x553f77614eb30d75,2 +np.float64,0x80042b4c3fa85699,0xaa99c05fbdd057db,2 +np.float64,0xbfde5547f8bcaa90,0xbfe8f3147d67a940,2 +np.float64,0xbfdd02f9bf3a05f4,0xbfe894f2048aa3fe,2 +np.float64,0xbfa20ec82c241d90,0xbfd4fd02ee55aac7,2 +np.float64,0x8002f670f8c5ece3,0xaa96fad7e53dd479,2 +np.float64,0x80059f24d7eb3e4a,0xaa9c7312dae0d7bc,2 +np.float64,0x7fe6ae7423ad5ce7,0x5541f9430be53062,2 +np.float64,0xe135ea79c26be,0x2aa350d8f8c526e1,2 +np.float64,0x3fec188ce4f8311a,0x3feea44d21c23f68,2 +np.float64,0x800355688286aad2,0xaa97e6ca51eb8357,2 +np.float64,0xa2d6530b45acb,0x2aa15635bbd366e8,2 +np.float64,0x600e0150c01c1,0x2a9d1456ea6c239c,2 +np.float64,0x8009c30863338611,0xaaa118f94b188bcf,2 +np.float64,0x3fe7e4c0dfefc982,0x3fed07e8480b8c07,2 +np.float64,0xbfddac6407bb58c8,0xbfe8c46f63a50225,2 +np.float64,0xbc85e977790bd,0x2aa2344636ed713d,2 +np.float64,0xfff0000000000000,0xfff0000000000000,2 +np.float64,0xffcd1570303a2ae0,0xd5389a27d5148701,2 +np.float64,0xbf937334d026e660,0xbfd113762e4e29a7,2 +np.float64,0x3fdbfdaa9b37fb55,0x3fe84a425fdff7df,2 +np.float64,0xffc10800f5221000,0xd5349535ffe12030,2 +np.float64,0xaf40f3755e81f,0x2aa1c443af16cd27,2 +np.float64,0x800f7da34f7efb47,0xaaa3f14bf25fc89f,2 +np.float64,0xffe4a60125a94c02,0xd5416b764a294128,2 +np.float64,0xbf8e25aa903c4b40,0xbfcf5ebc275b4789,2 +np.float64,0x3fca681bbb34d038,0x3fe2e882bcaee320,2 +np.float64,0xbfd0f3c9c1a1e794,0xbfe48d0df7b47572,2 +np.float64,0xffeb99b49d373368,0xd5433060dc641910,2 +np.float64,0x3fe554fb916aa9f8,0x3febf437cf30bd67,2 +np.float64,0x80079518d0af2a32,0xaa9f6ee87044745a,2 +np.float64,0x5e01a8a0bc036,0x2a9cdf0badf222c3,2 +np.float64,0xbfea9831b3f53064,0xbfee1601ee953ab3,2 +np.float64,0xbfc369d1a826d3a4,0xbfe110b675c311e0,2 +np.float64,0xa82e640d505cd,0x2aa1863d4e523b9c,2 +np.float64,0x3fe506d70a2a0dae,0x3febd1eba3aa83fa,2 +np.float64,0xcbacba7197598,0x2aa2adeb9927f1f2,2 +np.float64,0xc112d6038225b,0x2aa25978f12038b0,2 +np.float64,0xffa7f5f44c2febf0,0xd52d0ede02d4e18b,2 +np.float64,0x8006f218e34de433,0xaa9e870cf373b4eb,2 +np.float64,0xffe6d9a5d06db34b,0xd54204a4adc608c7,2 +np.float64,0x7fe717210eae2e41,0x554214bf3e2b5228,2 +np.float64,0xbfdd4b45cdba968c,0xbfe8a94c7f225f8e,2 +np.float64,0x883356571066b,0x2aa055ab0b2a8833,2 +np.float64,0x3fe307fc02a60ff8,0x3feae9175053288f,2 +np.float64,0x3fefa985f77f530c,0x3fefe31289446615,2 +np.float64,0x8005698a98aad316,0xaa9c17814ff7d630,2 +np.float64,0x3fea77333c74ee66,0x3fee098ba70e10fd,2 +np.float64,0xbfd1d00b0023a016,0xbfe4e497fd1cbea1,2 +np.float64,0x80009b0c39813619,0xaa8b130a6909cc3f,2 +np.float64,0x3fdbeb896fb7d714,0x3fe84502ba5437f8,2 +np.float64,0x3fb6e7e3562dcfc7,0x3fdca00d35c389ad,2 +np.float64,0xb2d46ebf65a8e,0x2aa1e2fe158d0838,2 +np.float64,0xbfd5453266aa8a64,0xbfe62a6a74c8ef6e,2 +np.float64,0x7fe993aa07732753,0x5542b5438bf31cb7,2 +np.float64,0xbfda5a098cb4b414,0xbfe7ce6d4d606203,2 +np.float64,0xbfe40c3ce068187a,0xbfeb61a32c57a6d0,2 +np.float64,0x3fcf17671d3e2ed0,0x3fe3f753170ab686,2 +np.float64,0xbfe4f814b6e9f02a,0xbfebcb67c60b7b08,2 +np.float64,0x800efedf59fdfdbf,0xaaa3ba4ed44ad45a,2 +np.float64,0x800420b556e8416b,0xaa99aa7fb14edeab,2 +np.float64,0xbf6e4ae6403c9600,0xbfc3cb2b29923989,2 +np.float64,0x3fda5c760a34b8ec,0x3fe7cf2821c52391,2 +np.float64,0x7f898faac0331f55,0x5522b44a01408188,2 +np.float64,0x3fd55af4b7aab5e9,0x3fe631f6d19503b3,2 +np.float64,0xbfa30a255c261450,0xbfd55caf0826361d,2 +np.float64,0x7fdfb801343f7001,0x553fe7ee50b9199a,2 +np.float64,0x7fa89ee91c313dd1,0x552d528ca2a4d659,2 +np.float64,0xffea72921d34e524,0xd542eb01af2e470d,2 +np.float64,0x3feddf0f33fbbe1e,0x3fef462b67fc0a91,2 +np.float64,0x3fe36700b566ce01,0x3feb1596caa8eff7,2 +np.float64,0x7fe6284a25ac5093,0x5541d58be3956601,2 +np.float64,0xffda16f7c8b42df0,0xd53de4f722485205,2 +np.float64,0x7f9355b94026ab72,0x552578cdeb41d2ca,2 +np.float64,0xffd3a9b022275360,0xd53b347b02dcea21,2 +np.float64,0x3fcb7f4f4a36fe9f,0x3fe32a40e9f6c1aa,2 +np.float64,0x7fdb958836372b0f,0x553e746103f92111,2 +np.float64,0x3fd37761c0a6eec4,0x3fe5853c5654027e,2 +np.float64,0x3fe449f1a2e893e4,0x3feb7d9e4eacc356,2 +np.float64,0x80077dfbef0efbf9,0xaa9f4ed788d2fadd,2 +np.float64,0x4823aa7890476,0x2a9a6eb4b653bad5,2 +np.float64,0xbfede01a373bc034,0xbfef468895fbcd29,2 +np.float64,0xbfe2bac5f125758c,0xbfeac4811c4dd66f,2 +np.float64,0x3fec10373af8206e,0x3feea14529e0f178,2 +np.float64,0x3fe305e30ca60bc6,0x3feae81a2f9d0302,2 +np.float64,0xa9668c5f52cd2,0x2aa1910e3a8f2113,2 +np.float64,0xbfd98b1717b3162e,0xbfe78f75995335d2,2 +np.float64,0x800fa649c35f4c94,0xaaa402ae79026a8f,2 +np.float64,0xbfb07dacf620fb58,0xbfd9a7d33d93a30f,2 +np.float64,0x80015812f382b027,0xaa91a843e9c85c0e,2 +np.float64,0x3fc687d96c2d0fb3,0x3fe1ef0ac16319c5,2 +np.float64,0xbfecad2ecd795a5e,0xbfeed9f786697af0,2 +np.float64,0x1608c1242c119,0x2a91cd11e9b4ccd2,2 +np.float64,0x6df775e8dbeef,0x2a9e6ba8c71130eb,2 +np.float64,0xffe96e9332b2dd26,0xd542ac342d06299b,2 +np.float64,0x7fecb6a3b8396d46,0x5543718af8162472,2 +np.float64,0x800d379f893a6f3f,0xaaa2ea36bbcb9308,2 +np.float64,0x3f924cdb202499b6,0x3fd0bb90af8d1f79,2 +np.float64,0x0,0x0,2 +np.float64,0x7feaf3b365f5e766,0x5543099a160e2427,2 +np.float64,0x3fea169ed0742d3e,0x3fede4d526e404f8,2 +np.float64,0x7feaf5f2f775ebe5,0x55430a2196c5f35a,2 +np.float64,0xbfc80d4429301a88,0xbfe2541f2ddd3334,2 +np.float64,0xffc75203b32ea408,0xd536db2837068689,2 +np.float64,0xffed2850e63a50a1,0xd5438b1217b72b8a,2 +np.float64,0x7fc16b0e7f22d61c,0x5534bcd0bfddb6f0,2 +np.float64,0x7feee8ed09fdd1d9,0x5543ed5b3ca483ab,2 +np.float64,0x7fb6c7ee662d8fdc,0x5531fffb5d46dafb,2 +np.float64,0x3fd77cebf8aef9d8,0x3fe6e9242e2bd29d,2 +np.float64,0x3f81c33f70238680,0x3fca4c7f3c9848f7,2 +np.float64,0x3fd59fea92ab3fd5,0x3fe649c1558cadd5,2 +np.float64,0xffeba82d4bf7505a,0xd54333bad387f7bd,2 +np.float64,0xffd37630e1a6ec62,0xd53b1ca62818c670,2 +np.float64,0xffec2c1e70b8583c,0xd5435213dcd27c22,2 +np.float64,0x7fec206971f840d2,0x55434f6660a8ae41,2 +np.float64,0x3fed2964adba52c9,0x3fef0642fe72e894,2 +np.float64,0xffd08e30d6211c62,0xd539b060e0ae02da,2 +np.float64,0x3e5f976c7cbf4,0x2a992e6ff991a122,2 +np.float64,0xffe6eee761adddce,0xd5420a393c67182f,2 +np.float64,0xbfe8ec9a31f1d934,0xbfed714426f58147,2 +np.float64,0x7fefffffffffffff,0x554428a2f98d728b,2 +np.float64,0x3fb3ae8b2c275d16,0x3fdb36b81b18a546,2 +np.float64,0x800f73df4dfee7bf,0xaaa3ed1a3e2cf49c,2 +np.float64,0xffd0c8873b21910e,0xd539ce6a3eab5dfd,2 +np.float64,0x3facd6c49439ad80,0x3fd8886f46335df1,2 +np.float64,0x3935859c726b2,0x2a98775f6438dbb1,2 +np.float64,0x7feed879fbfdb0f3,0x5543e9d1ac239469,2 +np.float64,0xbfe84dd990f09bb3,0xbfed323af09543b1,2 +np.float64,0xbfe767cc5a6ecf98,0xbfecd4f39aedbacb,2 +np.float64,0xffd8bd91d5b17b24,0xd53d5eb3734a2609,2 +np.float64,0xbfe13edeb2a27dbe,0xbfea0a856f0b9656,2 +np.float64,0xd933dd53b267c,0x2aa3158784e428c9,2 +np.float64,0xbfef6fef987edfdf,0xbfefcfb1c160462b,2 +np.float64,0x8009eeda4893ddb5,0xaaa13268a41045b1,2 +np.float64,0xab48c7a156919,0x2aa1a1a9c124c87d,2 +np.float64,0xa997931d532f3,0x2aa192bfe5b7bbb4,2 +np.float64,0xffe39ce8b1e739d1,0xd5411fa1c5c2cbd8,2 +np.float64,0x7e7ac2f6fcf59,0x2a9fdf6f263a9e9f,2 +np.float64,0xbfee1e35a6fc3c6b,0xbfef5c25d32b4047,2 +np.float64,0xffe5589c626ab138,0xd5419d220cc9a6da,2 +np.float64,0x7fe12509bf224a12,0x55405f7036dc5932,2 +np.float64,0xa6f15ba94de2c,0x2aa17b3367b1fc1b,2 +np.float64,0x3fca8adbfa3515b8,0x3fe2f0ca775749e5,2 +np.float64,0xbfcb03aa21360754,0xbfe30d5b90ca41f7,2 +np.float64,0x3fefafb2da7f5f66,0x3fefe5251aead4e7,2 +np.float64,0xffd90a59d23214b4,0xd53d7cf63a644f0e,2 +np.float64,0x3fba499988349333,0x3fddf84154fab7e5,2 +np.float64,0x800a76a0bc54ed42,0xaaa17f68cf67f2fa,2 +np.float64,0x3fea33d15bb467a3,0x3fedeff7f445b2ff,2 +np.float64,0x8005d9b0726bb362,0xaa9cd48624afeca9,2 +np.float64,0x7febf42e9a77e85c,0x55434541d8073376,2 +np.float64,0xbfedfc4469bbf889,0xbfef505989f7ee7d,2 +np.float64,0x8001211f1422423f,0xaa90a9889d865349,2 +np.float64,0x800e852f7fdd0a5f,0xaaa3845f11917f8e,2 +np.float64,0xffefd613c87fac27,0xd5441fd17ec669b4,2 +np.float64,0x7fed2a74543a54e8,0x55438b8c637da8b8,2 +np.float64,0xb83d50ff707aa,0x2aa210b4fc11e4b2,2 +np.float64,0x10000000000000,0x2aa428a2f98d728b,2 +np.float64,0x474ad9208e97,0x2a84e5a31530368a,2 +np.float64,0xffd0c5498ea18a94,0xd539ccc0e5cb425e,2 +np.float64,0x8001a8e9c82351d4,0xaa92f1aee6ca5b7c,2 +np.float64,0xd28db1e5a51b6,0x2aa2e328c0788f4a,2 +np.float64,0x3bf734ac77ee7,0x2a98da65c014b761,2 +np.float64,0x3fe56e17c96adc30,0x3febff2b6b829b7a,2 +np.float64,0x7783113eef063,0x2a9f46c3f09eb42c,2 +np.float64,0x3fd69d4e42ad3a9d,0x3fe69f83a21679f4,2 +np.float64,0x3fd34f4841a69e90,0x3fe5766b3c771616,2 +np.float64,0x3febb49895b76931,0x3fee7fcb603416c9,2 +np.float64,0x7fe8d6cb55f1ad96,0x554286c3b3bf4313,2 +np.float64,0xbfe67c6ba36cf8d8,0xbfec730218f2e284,2 +np.float64,0xffef9d97723f3b2e,0xd54413e38b6c29be,2 +np.float64,0x12d8cd2a25b1b,0x2a90e5ccd37b8563,2 +np.float64,0x81fe019103fc0,0x2aa01524155e73c5,2 +np.float64,0x7fe95d546f72baa8,0x5542a7fabfd425ff,2 +np.float64,0x800e742f1f9ce85e,0xaaa37cbe09e1f874,2 +np.float64,0xffd96bd3a732d7a8,0xd53da3086071264a,2 +np.float64,0x4ef2691e9de4e,0x2a9b3d316047fd6d,2 +np.float64,0x1a91684c3522e,0x2a92f25913c213de,2 +np.float64,0x3d5151b87aa2b,0x2a9909dbd9a44a84,2 +np.float64,0x800d9049435b2093,0xaaa31424e32d94a2,2 +np.float64,0xffe5b25fcc2b64bf,0xd541b5b0416b40b5,2 +np.float64,0xffe0eb784c21d6f0,0xd5404d083c3d6bc6,2 +np.float64,0x8007ceefbf0f9de0,0xaa9fbe0d739368b4,2 +np.float64,0xb78529416f0b,0x2a8ca3b29b5b3f18,2 +np.float64,0x7fba61130034c225,0x5532e6d4ca0f2918,2 +np.float64,0x3fba8d67ae351acf,0x3fde11efd6239b09,2 +np.float64,0x3fe7f24c576fe498,0x3fed0d63947a854d,2 +np.float64,0x2bb58dec576b3,0x2a965de7fca12aff,2 +np.float64,0xbfe86ceec4f0d9de,0xbfed3ea7f1d084e2,2 +np.float64,0x7fd1a7f7bca34fee,0x553a3f01b67fad2a,2 +np.float64,0x3fd9a43acfb34874,0x3fe7972dc5d8dfd6,2 +np.float64,0x7fd9861acdb30c35,0x553dad3b1bbb3b4d,2 +np.float64,0xffecc0c388398186,0xd54373d3b903deec,2 +np.float64,0x3fa6f86e9c2df0e0,0x3fd6bdbe40fcf710,2 +np.float64,0x800ddd99815bbb33,0xaaa33820d2f889bb,2 +np.float64,0x7fe087089b610e10,0x55402c868348a6d3,2 +np.float64,0x3fdf43d249be87a5,0x3fe933d29fbf7c23,2 +np.float64,0x7fe4f734c7a9ee69,0x5541822e56c40725,2 +np.float64,0x3feb39a9d3b67354,0x3fee526bf1f69f0e,2 +np.float64,0x3fe61454a0ec28a9,0x3fec46d7c36f7566,2 +np.float64,0xbfeafaa0a375f541,0xbfee3af2e49d457a,2 +np.float64,0x3fda7378e1b4e6f0,0x3fe7d613a3f92c40,2 +np.float64,0xe3e31c5fc7c64,0x2aa3645c12e26171,2 +np.float64,0xbfe97a556df2f4ab,0xbfeda8aa84cf3544,2 +np.float64,0xff612f9c80225f00,0xd514a51e5a2a8a97,2 +np.float64,0x800c51c8a0f8a391,0xaaa279fe7d40b50b,2 +np.float64,0xffd6f9d2312df3a4,0xd53ca783a5f8d110,2 +np.float64,0xbfead48bd7f5a918,0xbfee2cb2f89c5e57,2 +np.float64,0x800f5949e89eb294,0xaaa3e1a67a10cfef,2 +np.float64,0x800faf292b7f5e52,0xaaa40675e0c96cfd,2 +np.float64,0xbfedc238453b8470,0xbfef3c179d2d0209,2 +np.float64,0x3feb0443c5760888,0x3fee3e8bf29089c2,2 +np.float64,0xb26f69e164ded,0x2aa1df9f3dd7d765,2 +np.float64,0x3fcacdc053359b80,0x3fe300a67765b667,2 +np.float64,0x3fe8b274647164e8,0x3fed5a4cd4da8155,2 +np.float64,0x291e6782523ce,0x2a95ea7ac1b13a68,2 +np.float64,0xbfc4fc094e29f814,0xbfe1838671fc8513,2 +np.float64,0x3fbf1301f23e2600,0x3fdfb03a6f13e597,2 +np.float64,0xffeb36554ab66caa,0xd543193d8181e4f9,2 +np.float64,0xbfd969a52db2d34a,0xbfe78528ae61f16d,2 +np.float64,0x800cccd04d3999a1,0xaaa2b6b7a2d2d2d6,2 +np.float64,0x808eb4cb011d7,0x2aa005effecb2b4a,2 +np.float64,0x7fe839b3f9b07367,0x55425f61e344cd6d,2 +np.float64,0xbfeb25b6ed764b6e,0xbfee4b0234fee365,2 +np.float64,0xffefffffffffffff,0xd54428a2f98d728b,2 +np.float64,0xbfe01305da60260c,0xbfe9700b784af7e9,2 +np.float64,0xffcbf36b0a37e6d8,0xd538474b1d74ffe1,2 +np.float64,0xffaeebe3e83dd7c0,0xd52fa2e8dabf7209,2 +np.float64,0xbfd9913bf0b32278,0xbfe7915907aab13c,2 +np.float64,0xbfe7d125d9efa24c,0xbfecfff563177706,2 +np.float64,0xbfee98d23cbd31a4,0xbfef867ae393e446,2 +np.float64,0x3fe30efb67e61df6,0x3feaec6344633d11,2 +np.float64,0x1,0x2990000000000000,2 +np.float64,0x7fd5524fd3aaa49f,0x553bf30d18ab877e,2 +np.float64,0xc98b403f93168,0x2aa29d2fadb13c07,2 +np.float64,0xffe57080046ae100,0xd541a3b1b687360e,2 +np.float64,0x7fe20bade5e4175b,0x5540a79b94294f40,2 +np.float64,0x3fe155400a22aa80,0x3fea15c45f5b5837,2 +np.float64,0x7fe428dc8f6851b8,0x554147fd2ce93cc1,2 +np.float64,0xffefb77eb67f6efc,0xd544195dcaff4980,2 +np.float64,0x3fe49e733b293ce6,0x3feba394b833452a,2 +np.float64,0x38e01e3e71c05,0x2a986b2c955bad21,2 +np.float64,0x7fe735eb376e6bd5,0x55421cc51290d92d,2 +np.float64,0xbfd81d8644b03b0c,0xbfe71ce6d6fbd51a,2 +np.float64,0x8009a32325134647,0xaaa10645d0e6b0d7,2 +np.float64,0x56031ab8ac064,0x2a9c074be40b1f80,2 +np.float64,0xff8989aa30331340,0xd522b2d319a0ac6e,2 +np.float64,0xbfd6c183082d8306,0xbfe6ab8ffb3a8293,2 +np.float64,0x7ff8000000000000,0x7ff8000000000000,2 +np.float64,0xbfe17b68b1e2f6d2,0xbfea28dac8e0c457,2 +np.float64,0x3fbb50e42236a1c8,0x3fde5b090d51e3bd,2 +np.float64,0xffc2bb7cbf2576f8,0xd5353f1b3571c17f,2 +np.float64,0xbfe7576bca6eaed8,0xbfecce388241f47c,2 +np.float64,0x3fe7b52b04ef6a56,0x3fecf495bef99e7e,2 +np.float64,0xffe5511af82aa236,0xd5419b11524e8350,2 +np.float64,0xbfe66d5edf2cdabe,0xbfec6ca7d7b5be8c,2 +np.float64,0xc84a0ba790942,0x2aa29346f16a2cb4,2 +np.float64,0x6db5e7a0db6be,0x2a9e659c0e8244a0,2 +np.float64,0x7fef8f7b647f1ef6,0x554410e67af75d27,2 +np.float64,0xbfe2b4ada7e5695c,0xbfeac1997ec5a064,2 +np.float64,0xbfe99372e03326e6,0xbfedb2662b287543,2 +np.float64,0x3fa45d352428ba6a,0x3fd5d8a895423abb,2 +np.float64,0x3fa029695c2052d3,0x3fd439f858998886,2 +np.float64,0xffe0a9bd3261537a,0xd54037d0cd8bfcda,2 +np.float64,0xbfef83e09a7f07c1,0xbfefd66a4070ce73,2 +np.float64,0x7fee3dcc31fc7b97,0x5543c8503869407e,2 +np.float64,0xffbd16f1603a2de0,0xd533872fa5be978b,2 +np.float64,0xbfe8173141b02e62,0xbfed1c478614c6f4,2 +np.float64,0xbfef57aa277eaf54,0xbfefc77fdab27771,2 +np.float64,0x7fe883a02f31073f,0x554271ff0e3208da,2 +np.float64,0xe3adb63bc75b7,0x2aa362d833d0e41c,2 +np.float64,0x8001c430bac38862,0xaa93575026d26510,2 +np.float64,0x12fb347225f67,0x2a90f00eb9edb3fe,2 +np.float64,0x3fe53f83cbaa7f08,0x3febead40de452c2,2 +np.float64,0xbfe7f67227efece4,0xbfed0f10e32ad220,2 +np.float64,0xb8c5b45d718b7,0x2aa2152912cda86d,2 +np.float64,0x3fd23bb734a4776e,0x3fe50e5d3008c095,2 +np.float64,0x8001fd558ee3faac,0xaa941faa1f7ed450,2 +np.float64,0xffe6bbeda9ed77db,0xd541fcd185a63afa,2 +np.float64,0x4361d79086c3c,0x2a99d692237c30b7,2 +np.float64,0xbfd012f004a025e0,0xbfe43093e290fd0d,2 +np.float64,0xffe1d8850423b10a,0xd54097cf79d8d01e,2 +np.float64,0x3fccf4df7939e9bf,0x3fe37f8cf8be6436,2 +np.float64,0x8000546bc6c0a8d8,0xaa861bb3588556f2,2 +np.float64,0xbfecb4d6ba7969ae,0xbfeedcb6239135fe,2 +np.float64,0xbfaeb425cc3d6850,0xbfd90cfc103bb896,2 +np.float64,0x800ec037ec7d8070,0xaaa39eae8bde9774,2 +np.float64,0xbfeeaf863dfd5f0c,0xbfef8e4514772a8a,2 +np.float64,0xffec67c6c4b8cf8d,0xd5435fad89f900cf,2 +np.float64,0x3fda4498da348932,0x3fe7c7f6b3f84048,2 +np.float64,0xbfd05fd3dea0bfa8,0xbfe4509265a9b65f,2 +np.float64,0x3fe42cc713a8598e,0x3feb706ba9cd533c,2 +np.float64,0xec22d4d7d845b,0x2aa39f8cccb9711c,2 +np.float64,0x7fda30606c3460c0,0x553deea865065196,2 +np.float64,0xbfd58cba8bab1976,0xbfe64327ce32d611,2 +np.float64,0xadd521c75baa4,0x2aa1b7efce201a98,2 +np.float64,0x7fed43c1027a8781,0x55439131832b6429,2 +np.float64,0x800bee278fb7dc4f,0xaaa247a71e776db4,2 +np.float64,0xbfe9be5dd2737cbc,0xbfedc2f9501755b0,2 +np.float64,0x8003f4854447e90b,0xaa994d9b5372b13b,2 +np.float64,0xbfe5d0f867eba1f1,0xbfec29f8dd8b33a4,2 +np.float64,0x3fd79102d5af2206,0x3fe6efaa7a1efddb,2 +np.float64,0xbfeae783c835cf08,0xbfee33cdb4a44e81,2 +np.float64,0x3fcf1713e83e2e28,0x3fe3f7414753ddfb,2 +np.float64,0xffe5ab3cff2b567a,0xd541b3bf0213274a,2 +np.float64,0x7fe0fc65d8a1f8cb,0x554052761ac96386,2 +np.float64,0x7e81292efd026,0x2a9fdff8c01ae86f,2 +np.float64,0x80091176039222ec,0xaaa0aebf0565dfa6,2 +np.float64,0x800d2bf5ab5a57ec,0xaaa2e4a4c31e7e29,2 +np.float64,0xffd1912ea923225e,0xd53a33b2856726ab,2 +np.float64,0x800869918ed0d323,0xaaa0453408e1295d,2 +np.float64,0xffba0898fa341130,0xd532d19b202a9646,2 +np.float64,0xbfe09fac29613f58,0xbfe9b9687b5811a1,2 +np.float64,0xbfbd4ae82e3a95d0,0xbfdf1220f6f0fdfa,2 +np.float64,0xffea11d27bb423a4,0xd542d3d3e1522474,2 +np.float64,0xbfe6b05705ad60ae,0xbfec88d6bcab2683,2 +np.float64,0x3fe624a3f2ec4948,0x3fec4dcc78ddf871,2 +np.float64,0x53483018a6907,0x2a9bba8f92006b69,2 +np.float64,0xbfec0a6eeb7814de,0xbfee9f2a741248d7,2 +np.float64,0x3fe8c8ce6371919d,0x3fed63250c643482,2 +np.float64,0xbfe26b0ef964d61e,0xbfea9e511db83437,2 +np.float64,0xffa0408784208110,0xd52987f62c369ae9,2 +np.float64,0xffc153abc322a758,0xd534b384b5c5fe63,2 +np.float64,0xbfbdce88a63b9d10,0xbfdf4065ef0b01d4,2 +np.float64,0xffed4a4136fa9482,0xd54392a450f8b0af,2 +np.float64,0x8007aa18748f5432,0xaa9f8bd2226d4299,2 +np.float64,0xbfdab4d3e8b569a8,0xbfe7e9a5402540e5,2 +np.float64,0x7fe68914f92d1229,0x5541ef5e78fa35de,2 +np.float64,0x800a538bb1b4a718,0xaaa16bc487711295,2 +np.float64,0xffe02edbc8605db7,0xd5400f8f713df890,2 +np.float64,0xffe8968053712d00,0xd54276b9cc7f460a,2 +np.float64,0x800a4ce211d499c5,0xaaa1680491deb40c,2 +np.float64,0x3f988080f8310102,0x3fd2713691e99329,2 +np.float64,0xf64e42a7ec9c9,0x2aa3e6a7af780878,2 +np.float64,0xff73cc7100279900,0xd51b4478c3409618,2 +np.float64,0x71e6722ce3ccf,0x2a9ec76ddf296ce0,2 +np.float64,0x8006ca16ab0d942e,0xaa9e4bfd862af570,2 +np.float64,0x8000000000000000,0x8000000000000000,2 +np.float64,0xbfed373e02ba6e7c,0xbfef0b2b7bb767b3,2 +np.float64,0xa6cb0f694d962,0x2aa179dd16b0242b,2 +np.float64,0x7fec14626cf828c4,0x55434ca55b7c85d5,2 +np.float64,0x3fcda404513b4808,0x3fe3a68e8d977752,2 +np.float64,0xbfeb94995f772933,0xbfee74091d288b81,2 +np.float64,0x3fce2299a13c4530,0x3fe3c2603f28d23b,2 +np.float64,0xffd07f4534a0fe8a,0xd539a8a6ebc5a603,2 +np.float64,0x7fdb1c651e3638c9,0x553e478a6385c86b,2 +np.float64,0x3fec758336f8eb06,0x3feec5f3b92c8b28,2 +np.float64,0x796fc87cf2dfa,0x2a9f7184a4ad8c49,2 +np.float64,0x3fef9ba866ff3750,0x3fefde6a446fc2cd,2 +np.float64,0x964d26c72c9a5,0x2aa0e143f1820179,2 +np.float64,0xbfef6af750bed5ef,0xbfefce04870a97bd,2 +np.float64,0x3fe2f3961aa5e72c,0x3feadf769321a3ff,2 +np.float64,0xbfd6b706e9ad6e0e,0xbfe6a8141c5c3b5d,2 +np.float64,0x7fe0ecc40a21d987,0x55404d72c2b46a82,2 +np.float64,0xbfe560d19deac1a3,0xbfebf962681a42a4,2 +np.float64,0xbfea37170ab46e2e,0xbfedf136ee9df02b,2 +np.float64,0xbfebf78947b7ef12,0xbfee9847ef160257,2 +np.float64,0x800551f8312aa3f1,0xaa9bee7d3aa5491b,2 +np.float64,0xffed2513897a4a26,0xd5438a58c4ae28ec,2 +np.float64,0x7fd962d75cb2c5ae,0x553d9f8a0c2016f3,2 +np.float64,0x3fefdd8512bfbb0a,0x3feff47d8da7424d,2 +np.float64,0xbfefa5b43bff4b68,0xbfefe1ca42867af0,2 +np.float64,0xbfc8a2853531450c,0xbfe279bb7b965729,2 +np.float64,0x800c8843bc391088,0xaaa2951344e7b29b,2 +np.float64,0x7fe22587bae44b0e,0x5540af8bb58cfe86,2 +np.float64,0xbfe159fae822b3f6,0xbfea182394eafd8d,2 +np.float64,0xbfe6fdfd50edfbfa,0xbfeca93f2a3597d0,2 +np.float64,0xbfe5cd5afaeb9ab6,0xbfec286a8ce0470f,2 +np.float64,0xbfc84bb97f309774,0xbfe263ef0f8f1f6e,2 +np.float64,0x7fd9c1e548b383ca,0x553dc4556874ecb9,2 +np.float64,0x7fda43d33bb487a5,0x553df60f61532fc0,2 +np.float64,0xbfe774bd25eee97a,0xbfecda42e8578c1f,2 +np.float64,0x800df1f5ab9be3ec,0xaaa34184712e69db,2 +np.float64,0xbff0000000000000,0xbff0000000000000,2 +np.float64,0x3fe14ec21b629d84,0x3fea128244215713,2 +np.float64,0x7fc1ce7843239cf0,0x5534e3fa8285b7b8,2 +np.float64,0xbfe922b204724564,0xbfed86818687d649,2 +np.float64,0x3fc58924fb2b1248,0x3fe1aa715ff6ebbf,2 +np.float64,0x8008b637e4d16c70,0xaaa0760b53abcf46,2 +np.float64,0xffbf55bd4c3eab78,0xd53404a23091a842,2 +np.float64,0x9f6b4a753ed6a,0x2aa136ef9fef9596,2 +np.float64,0xbfd11da7f8a23b50,0xbfe49deb493710d8,2 +np.float64,0x800a2f07fcd45e10,0xaaa157237c98b4f6,2 +np.float64,0x3fdd4defa4ba9bdf,0x3fe8aa0bcf895f4f,2 +np.float64,0x7fe9b0ab05f36155,0x5542bc5335414473,2 +np.float64,0x3fe89c97de313930,0x3fed51a1189b8982,2 +np.float64,0x3fdd45c8773a8b91,0x3fe8a7c2096fbf5a,2 +np.float64,0xbfeb6f64daf6deca,0xbfee665167ef43ad,2 +np.float64,0xffdf9da1c4bf3b44,0xd53fdf141944a983,2 +np.float64,0x3fde092ed0bc125c,0x3fe8de25bfbfc2db,2 +np.float64,0xbfcb21f96b3643f4,0xbfe3147904c258cf,2 +np.float64,0x800c9c934f993927,0xaaa29f17c43f021b,2 +np.float64,0x9b91814d37230,0x2aa11329e59bf6b0,2 +np.float64,0x3fe28a7e0b6514fc,0x3feaad6d23e2eadd,2 +np.float64,0xffecf38395f9e706,0xd5437f3ee1cd61e4,2 +np.float64,0x3fcade92a935bd25,0x3fe3049f4c1da1d0,2 +np.float64,0x800ab25d95d564bc,0xaaa1a076d7c66e04,2 +np.float64,0xffc0989e1e21313c,0xd53467f3b8158298,2 +np.float64,0x3fd81523eeb02a48,0x3fe71a38d2da8a82,2 +np.float64,0x7fe5b9dd402b73ba,0x5541b7b9b8631010,2 +np.float64,0x2c160d94582c3,0x2a966e51b503a3d1,2 +np.float64,0x2c416ffa5882f,0x2a9675aaef8b29c4,2 +np.float64,0x7fefe2ff01bfc5fd,0x55442289faf22b86,2 +np.float64,0xbfd469bf5d28d37e,0xbfe5dd239ffdc7eb,2 +np.float64,0xbfdd56f3eabaade8,0xbfe8ac93244ca17b,2 +np.float64,0xbfe057b89160af71,0xbfe9941557340bb3,2 +np.float64,0x800c50e140b8a1c3,0xaaa2798ace9097ee,2 +np.float64,0xbfda5a8984b4b514,0xbfe7ce93d65a56b0,2 +np.float64,0xbfcd6458323ac8b0,0xbfe39872514127bf,2 +np.float64,0x3fefb1f5ebff63ec,0x3fefe5e761b49b89,2 +np.float64,0x3fea3abc1df47578,0x3fedf29a1c997863,2 +np.float64,0x7fcb4a528e3694a4,0x553815f169667213,2 +np.float64,0x8c77da7b18efc,0x2aa080e52bdedb54,2 +np.float64,0x800e5dde4c5cbbbd,0xaaa372b16fd8b1ad,2 +np.float64,0x3fd2976038a52ec0,0x3fe5316b4f79fdbc,2 +np.float64,0x69413a0ed2828,0x2a9dfacd9cb44286,2 +np.float64,0xbfebbac0bdb77582,0xbfee820d9288b631,2 +np.float64,0x1a12aa7c34256,0x2a92d407e073bbfe,2 +np.float64,0xbfc41a27c3283450,0xbfe143c8665b0d3c,2 +np.float64,0xffe4faa41369f548,0xd54183230e0ce613,2 +np.float64,0xbfdeae81f23d5d04,0xbfe90b734bf35b68,2 +np.float64,0x3fc984ba58330975,0x3fe2b19e9052008e,2 +np.float64,0x7fe6e51b8d2dca36,0x554207a74ae2bb39,2 +np.float64,0x80081a58a81034b2,0xaaa0117d4aff11c8,2 +np.float64,0x7fde3fddfe3c7fbb,0x553f67d0082acc67,2 +np.float64,0x3fac7c999038f933,0x3fd86ec2f5dc3aa4,2 +np.float64,0x7fa26b4c4c24d698,0x552a9e6ea8545c18,2 +np.float64,0x3fdacd06e6b59a0e,0x3fe7f0dc0e8f9c6d,2 +np.float64,0x80064b62cbec96c6,0xaa9d8ac0506fdd05,2 +np.float64,0xb858116170b1,0x2a8caea703d9ccc8,2 +np.float64,0xbfe8d94ccef1b29a,0xbfed69a8782cbf3d,2 +np.float64,0x8005607d6a6ac0fc,0xaa9c07cf8620b037,2 +np.float64,0xbfe66a52daacd4a6,0xbfec6b5e403e6864,2 +np.float64,0x7fc398c2e0273185,0x5535918245894606,2 +np.float64,0x74b2d7dce965c,0x2a9f077020defdbc,2 +np.float64,0x7fe8f7a4d9b1ef49,0x55428eeae210e8eb,2 +np.float64,0x80027deddc84fbdc,0xaa95b11ff9089745,2 +np.float64,0xffeba2a94e774552,0xd5433273f6568902,2 +np.float64,0x80002f8259405f05,0xaa8240b68d7b9dc4,2 +np.float64,0xbfdf0d84883e1b0a,0xbfe92532c69c5802,2 +np.float64,0xbfcdfa7b6b3bf4f8,0xbfe3b997a84d0914,2 +np.float64,0x800c18b04e183161,0xaaa25d46d60b15c6,2 +np.float64,0xffeaf1e37c35e3c6,0xd543092cd929ac19,2 +np.float64,0xbfc5aa07752b5410,0xbfe1b36ab5ec741f,2 +np.float64,0x3fe5c491d1eb8924,0x3fec24a1c3f6a178,2 +np.float64,0xbfeb736937f6e6d2,0xbfee67cd296e6fa9,2 +np.float64,0xffec3d5718787aad,0xd5435602e1a2cc43,2 +np.float64,0x7fe71e1da86e3c3a,0x55421691ead882cb,2 +np.float64,0x3fdd6ed0c93adda2,0x3fe8b341d066c43c,2 +np.float64,0x7fbe3d7a203c7af3,0x5533c83e53283430,2 +np.float64,0x3fdc20cb56384197,0x3fe854676360aba9,2 +np.float64,0xb7a1ac636f436,0x2aa20b9d40d66e78,2 +np.float64,0x3fb1491bb8229237,0x3fda0fabad1738ee,2 +np.float64,0xbfdf9c0ce73f381a,0xbfe94b716dbe35ee,2 +np.float64,0xbfbd4f0ad23a9e18,0xbfdf1397329a2dce,2 +np.float64,0xbfe4e0caac69c196,0xbfebc119b8a181cd,2 +np.float64,0x5753641aaea6d,0x2a9c2ba3e92b0cd2,2 +np.float64,0x72bb814ae5771,0x2a9eda92fada66de,2 +np.float64,0x57ed8f5aafdb3,0x2a9c3c2e1d42e609,2 +np.float64,0xffec33359c38666a,0xd54353b2acd0daf1,2 +np.float64,0x3fa5fe6e8c2bfce0,0x3fd66a0b3bf2720a,2 +np.float64,0xffe2dc8d7ca5b91a,0xd540e6ebc097d601,2 +np.float64,0x7fd99d260eb33a4b,0x553db626c9c75f78,2 +np.float64,0xbfe2dd73e425bae8,0xbfead4fc4b93a727,2 +np.float64,0xdcd4a583b9a95,0x2aa33094c9a17ad7,2 +np.float64,0x7fb0af6422215ec7,0x553039a606e8e64f,2 +np.float64,0x7fdfab6227bf56c3,0x553fe3b26164aeda,2 +np.float64,0x1e4d265e3c9a6,0x2a93cba8a1a8ae6d,2 +np.float64,0xbfdc7d097238fa12,0xbfe86ee2f24fd473,2 +np.float64,0x7fe5d35d29eba6b9,0x5541bea5878bce2b,2 +np.float64,0xffcb886a903710d4,0xd53828281710aab5,2 +np.float64,0xffe058c7ffe0b190,0xd5401d61e9a7cbcf,2 +np.float64,0x3ff0000000000000,0x3ff0000000000000,2 +np.float64,0xffd5b1c1132b6382,0xd53c1c839c098340,2 +np.float64,0x3fe2e7956725cf2b,0x3fead9c907b9d041,2 +np.float64,0x800a8ee293951dc6,0xaaa18ce3f079f118,2 +np.float64,0x7febcd3085b79a60,0x55433c47e1f822ad,2 +np.float64,0x3feb0e14cd761c2a,0x3fee423542102546,2 +np.float64,0x3fb45e6d0628bcda,0x3fdb86db67d0c992,2 +np.float64,0x7fa836e740306dce,0x552d2907cb8118b2,2 +np.float64,0x3fd15ba25b22b745,0x3fe4b6b018409d78,2 +np.float64,0xbfb59980ce2b3300,0xbfdc1206274cb51d,2 +np.float64,0x3fdef1b87fbde371,0x3fe91dafc62124a1,2 +np.float64,0x7fed37a4337a6f47,0x55438e7e0b50ae37,2 +np.float64,0xffe6c87633ad90ec,0xd542001f216ab448,2 +np.float64,0x8008d2548ab1a4a9,0xaaa087ad272d8e17,2 +np.float64,0xbfd1d6744da3ace8,0xbfe4e71965adda74,2 +np.float64,0xbfb27f751224fee8,0xbfdaa82132775406,2 +np.float64,0x3fe2b336ae65666d,0x3feac0e6b13ec2d2,2 +np.float64,0xffc6bac2262d7584,0xd536a951a2eecb49,2 +np.float64,0x7fdb661321b6cc25,0x553e62dfd7fcd3f3,2 +np.float64,0xffe83567d5706acf,0xd5425e4bb5027568,2 +np.float64,0xbf7f0693e03e0d00,0xbfc9235314d53f82,2 +np.float64,0x3feb32b218766564,0x3fee4fd5847f3722,2 +np.float64,0x3fec25d33df84ba6,0x3feea91fcd4aebab,2 +np.float64,0x7fe17abecb22f57d,0x55407a8ba661207c,2 +np.float64,0xbfe5674b1eeace96,0xbfebfc351708dc70,2 +np.float64,0xbfe51a2d2f6a345a,0xbfebda702c9d302a,2 +np.float64,0x3fec05584af80ab0,0x3fee9d502a7bf54d,2 +np.float64,0xffda8871dcb510e4,0xd53e10105f0365b5,2 +np.float64,0xbfc279c31824f388,0xbfe0c9354d871484,2 +np.float64,0x1cbed61e397dc,0x2a937364712cd518,2 +np.float64,0x800787d198af0fa4,0xaa9f5c847affa1d2,2 +np.float64,0x80079f6d65af3edc,0xaa9f7d2863368bbd,2 +np.float64,0xb942f1e97285e,0x2aa2193e0c513b7f,2 +np.float64,0x7fe9078263320f04,0x554292d85dee2c18,2 +np.float64,0xbfe4de0761a9bc0f,0xbfebbfe04116b829,2 +np.float64,0xbfdbe6f3fc37cde8,0xbfe843aea59a0749,2 +np.float64,0xffcb6c0de136d81c,0xd5381fd9c525b813,2 +np.float64,0x9b6bda9336d7c,0x2aa111c924c35386,2 +np.float64,0x3fe17eece422fdda,0x3fea2a9bacd78607,2 +np.float64,0xd8011c49b0024,0x2aa30c87574fc0c6,2 +np.float64,0xbfc0a08b3f214118,0xbfe034d48f0d8dc0,2 +np.float64,0x3fd60adb1eac15b8,0x3fe66e42e4e7e6b5,2 +np.float64,0x80011d68ea023ad3,0xaa909733befbb962,2 +np.float64,0xffb35ac32426b588,0xd5310c4be1c37270,2 +np.float64,0x3fee8b56c9bd16ae,0x3fef81d8d15f6939,2 +np.float64,0x3fdc10a45e382149,0x3fe84fbe4cf11e68,2 +np.float64,0xbfc85dc45e30bb88,0xbfe2687b5518abde,2 +np.float64,0x3fd53b85212a770a,0x3fe6270d6d920d0f,2 +np.float64,0x800fc158927f82b1,0xaaa40e303239586f,2 +np.float64,0x11af5e98235ed,0x2a908b04a790083f,2 +np.float64,0xbfe2a097afe54130,0xbfeab80269eece99,2 +np.float64,0xbfd74ac588ae958c,0xbfe6d8ca3828d0b8,2 +np.float64,0xffea18ab2ef43156,0xd542d579ab31df1e,2 +np.float64,0xbfecda7058f9b4e1,0xbfeeea29c33b7913,2 +np.float64,0x3fc4ac56ed2958b0,0x3fe16d3e2bd7806d,2 +np.float64,0x3feccc898cb99913,0x3feee531f217dcfa,2 +np.float64,0xffeb3a64c5b674c9,0xd5431a30a41f0905,2 +np.float64,0x3fe5a7ee212b4fdc,0x3fec1844af9076fc,2 +np.float64,0x80080fdb52301fb7,0xaaa00a8b4274db67,2 +np.float64,0x800b3e7e47d67cfd,0xaaa1ec2876959852,2 +np.float64,0x80063fb8ee2c7f73,0xaa9d7875c9f20d6f,2 +np.float64,0x7fdacf80d0b59f01,0x553e2acede4c62a8,2 +np.float64,0x401e9b24803d4,0x2a996a0a75d0e093,2 +np.float64,0x3fe6c29505ed852a,0x3fec907a6d8c10af,2 +np.float64,0x8005c04ee2cb809f,0xaa9caa9813faef46,2 +np.float64,0xbfe1360f21e26c1e,0xbfea06155d6985b6,2 +np.float64,0xffc70606682e0c0c,0xd536c239b9d4be0a,2 +np.float64,0x800e639afefcc736,0xaaa37547d0229a26,2 +np.float64,0x3fe5589290aab125,0x3febf5c925c4e6db,2 +np.float64,0x8003b59330276b27,0xaa98c47e44524335,2 +np.float64,0x800d67ec22dacfd8,0xaaa301251b6a730a,2 +np.float64,0x7fdaeb5025b5d69f,0x553e35397dfe87eb,2 +np.float64,0x3fdae32a24b5c654,0x3fe7f771bc108f6c,2 +np.float64,0xffe6c1fc93ad83f8,0xd541fe6a6a716756,2 +np.float64,0xbfd7b9c1d32f7384,0xbfe6fcdae563d638,2 +np.float64,0x800e1bea06fc37d4,0xaaa354c0bf61449c,2 +np.float64,0xbfd78f097aaf1e12,0xbfe6ef068329bdf4,2 +np.float64,0x7fea6a400874d47f,0x5542e905978ad722,2 +np.float64,0x8008b4377cb1686f,0xaaa074c87eee29f9,2 +np.float64,0x8002f3fb8d45e7f8,0xaa96f47ac539b614,2 +np.float64,0xbfcf2b3fd13e5680,0xbfe3fb91c0cc66ad,2 +np.float64,0xffecca2f5279945e,0xd54375f361075927,2 +np.float64,0x7ff0000000000000,0x7ff0000000000000,2 +np.float64,0x7f84d5a5a029ab4a,0x552178d1d4e8640e,2 +np.float64,0x3fea8a4b64351497,0x3fee10c332440eb2,2 +np.float64,0x800fe01ac1dfc036,0xaaa41b34d91a4bee,2 +np.float64,0x3fc0b3d8872167b1,0x3fe03b178d354f8d,2 +np.float64,0x5ee8b0acbdd17,0x2a9cf69f2e317729,2 +np.float64,0x8006ef0407adde09,0xaa9e82888f3dd83e,2 +np.float64,0x7fdbb08a07b76113,0x553e7e4e35b938b9,2 +np.float64,0x49663f9c92cc9,0x2a9a95e0affe5108,2 +np.float64,0x7fd9b87e79b370fc,0x553dc0b5cff3dc7d,2 +np.float64,0xbfd86ae657b0d5cc,0xbfe73584d02bdd2b,2 +np.float64,0x3fd4d4a13729a942,0x3fe6030a962aaaf8,2 +np.float64,0x7fcc246bcb3848d7,0x5538557309449bba,2 +np.float64,0xbfdc86a7d5b90d50,0xbfe871a2983c2a29,2 +np.float64,0xd2a6e995a54dd,0x2aa2e3e9c0fdd6c0,2 +np.float64,0x3f92eb447825d680,0x3fd0eb4fd2ba16d2,2 +np.float64,0x800d4001697a8003,0xaaa2ee358661b75c,2 +np.float64,0x3fd3705fd1a6e0c0,0x3fe582a6f321d7d6,2 +np.float64,0xbfcfdf51533fbea4,0xbfe421c3bdd9f2a3,2 +np.float64,0x3fe268e87964d1d1,0x3fea9d47e08aad8a,2 +np.float64,0x24b8901e49713,0x2a951adeefe7b31b,2 +np.float64,0x3fedb35d687b66bb,0x3fef36e440850bf8,2 +np.float64,0x3fb7ab5cbe2f56c0,0x3fdcf097380721c6,2 +np.float64,0x3f8c4eaa10389d54,0x3fceb7ecb605b73b,2 +np.float64,0xbfed831ed6fb063e,0xbfef25f462a336f1,2 +np.float64,0x7fd8c52112318a41,0x553d61b0ee609f58,2 +np.float64,0xbfe71c4ff76e38a0,0xbfecb5d32e789771,2 +np.float64,0xbfe35fb7b166bf70,0xbfeb12328e75ee6b,2 +np.float64,0x458e1a3a8b1c4,0x2a9a1cebadc81342,2 +np.float64,0x8003c1b3ad478368,0xaa98df5ed060b28c,2 +np.float64,0x7ff4000000000000,0x7ffc000000000000,2 +np.float64,0x7fe17098c162e131,0x5540775a9a3a104f,2 +np.float64,0xbfd95cb71732b96e,0xbfe7812acf7ea511,2 +np.float64,0x8000000000000001,0xa990000000000000,2 +np.float64,0xbfde0e7d9ebc1cfc,0xbfe8df9ca9e49a5b,2 +np.float64,0xffef4f67143e9ecd,0xd5440348a6a2f231,2 +np.float64,0x7fe37d23c826fa47,0x5541165de17caa03,2 +np.float64,0xbfcc0e5f85381cc0,0xbfe34b44b0deefe9,2 +np.float64,0x3fe858f1c470b1e4,0x3fed36ab90557d89,2 +np.float64,0x800e857278fd0ae5,0xaaa3847d13220545,2 +np.float64,0x3febd31a66f7a635,0x3fee8af90e66b043,2 +np.float64,0x7fd3fde1b127fbc2,0x553b5b186a49b968,2 +np.float64,0x3fd3dabb8b27b577,0x3fe5a99b446bed26,2 +np.float64,0xffeb4500f1768a01,0xd5431cab828e254a,2 +np.float64,0xffccca8fc6399520,0xd53884f8b505e79e,2 +np.float64,0xffeee9406b7dd280,0xd543ed6d27a1a899,2 +np.float64,0xffecdde0f0f9bbc1,0xd5437a6258b14092,2 +np.float64,0xe6b54005cd6a8,0x2aa378c25938dfda,2 +np.float64,0x7fe610f1022c21e1,0x5541cf460b972925,2 +np.float64,0xbfe5a170ec6b42e2,0xbfec1576081e3232,2 diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/data/umath-validation-set-cos.csv b/.venv/lib/python3.12/site-packages/numpy/_core/tests/data/umath-validation-set-cos.csv new file mode 100644 index 0000000..258ae48 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/tests/data/umath-validation-set-cos.csv @@ -0,0 +1,1375 @@ +dtype,input,output,ulperrortol +## +ve denormals ## +np.float32,0x004b4716,0x3f800000,2 +np.float32,0x007b2490,0x3f800000,2 +np.float32,0x007c99fa,0x3f800000,2 +np.float32,0x00734a0c,0x3f800000,2 +np.float32,0x0070de24,0x3f800000,2 +np.float32,0x007fffff,0x3f800000,2 +np.float32,0x00000001,0x3f800000,2 +## -ve denormals ## +np.float32,0x80495d65,0x3f800000,2 +np.float32,0x806894f6,0x3f800000,2 +np.float32,0x80555a76,0x3f800000,2 +np.float32,0x804e1fb8,0x3f800000,2 +np.float32,0x80687de9,0x3f800000,2 +np.float32,0x807fffff,0x3f800000,2 +np.float32,0x80000001,0x3f800000,2 +## +/-0.0f, +/-FLT_MIN +/-FLT_MAX ## +np.float32,0x00000000,0x3f800000,2 +np.float32,0x80000000,0x3f800000,2 +np.float32,0x00800000,0x3f800000,2 +np.float32,0x80800000,0x3f800000,2 +## 1.00f + 0x00000001 ## +np.float32,0x3f800000,0x3f0a5140,2 +np.float32,0x3f800001,0x3f0a513f,2 +np.float32,0x3f800002,0x3f0a513d,2 +np.float32,0xc090a8b0,0xbe4332ce,2 +np.float32,0x41ce3184,0x3f4d1de1,2 +np.float32,0xc1d85848,0xbeaa8980,2 +np.float32,0x402b8820,0xbf653aa3,2 +np.float32,0x42b4e454,0xbf4a338b,2 +np.float32,0x42a67a60,0x3c58202e,2 +np.float32,0x41d92388,0xbed987c7,2 +np.float32,0x422dd66c,0x3f5dcab3,2 +np.float32,0xc28f5be6,0xbf5688d8,2 +np.float32,0x41ab2674,0xbf53aa3b,2 +np.float32,0x3f490fdb,0x3f3504f3,2 +np.float32,0xbf490fdb,0x3f3504f3,2 +np.float32,0x3fc90fdb,0xb33bbd2e,2 +np.float32,0xbfc90fdb,0xb33bbd2e,2 +np.float32,0x40490fdb,0xbf800000,2 +np.float32,0xc0490fdb,0xbf800000,2 +np.float32,0x3fc90fdb,0xb33bbd2e,2 +np.float32,0xbfc90fdb,0xb33bbd2e,2 +np.float32,0x40490fdb,0xbf800000,2 +np.float32,0xc0490fdb,0xbf800000,2 +np.float32,0x40c90fdb,0x3f800000,2 +np.float32,0xc0c90fdb,0x3f800000,2 +np.float32,0x4016cbe4,0xbf3504f3,2 +np.float32,0xc016cbe4,0xbf3504f3,2 +np.float32,0x4096cbe4,0x324cde2e,2 +np.float32,0xc096cbe4,0x324cde2e,2 +np.float32,0x4116cbe4,0xbf800000,2 +np.float32,0xc116cbe4,0xbf800000,2 +np.float32,0x40490fdb,0xbf800000,2 +np.float32,0xc0490fdb,0xbf800000,2 +np.float32,0x40c90fdb,0x3f800000,2 +np.float32,0xc0c90fdb,0x3f800000,2 +np.float32,0x41490fdb,0x3f800000,2 +np.float32,0xc1490fdb,0x3f800000,2 +np.float32,0x407b53d2,0xbf3504f1,2 +np.float32,0xc07b53d2,0xbf3504f1,2 +np.float32,0x40fb53d2,0xb4b5563d,2 +np.float32,0xc0fb53d2,0xb4b5563d,2 +np.float32,0x417b53d2,0xbf800000,2 +np.float32,0xc17b53d2,0xbf800000,2 +np.float32,0x4096cbe4,0x324cde2e,2 +np.float32,0xc096cbe4,0x324cde2e,2 +np.float32,0x4116cbe4,0xbf800000,2 +np.float32,0xc116cbe4,0xbf800000,2 +np.float32,0x4196cbe4,0x3f800000,2 +np.float32,0xc196cbe4,0x3f800000,2 +np.float32,0x40afede0,0x3f3504f7,2 +np.float32,0xc0afede0,0x3f3504f7,2 +np.float32,0x412fede0,0x353222c4,2 +np.float32,0xc12fede0,0x353222c4,2 +np.float32,0x41afede0,0xbf800000,2 +np.float32,0xc1afede0,0xbf800000,2 +np.float32,0x40c90fdb,0x3f800000,2 +np.float32,0xc0c90fdb,0x3f800000,2 +np.float32,0x41490fdb,0x3f800000,2 +np.float32,0xc1490fdb,0x3f800000,2 +np.float32,0x41c90fdb,0x3f800000,2 +np.float32,0xc1c90fdb,0x3f800000,2 +np.float32,0x40e231d6,0x3f3504f3,2 +np.float32,0xc0e231d6,0x3f3504f3,2 +np.float32,0x416231d6,0xb319a6a2,2 +np.float32,0xc16231d6,0xb319a6a2,2 +np.float32,0x41e231d6,0xbf800000,2 +np.float32,0xc1e231d6,0xbf800000,2 +np.float32,0x40fb53d2,0xb4b5563d,2 +np.float32,0xc0fb53d2,0xb4b5563d,2 +np.float32,0x417b53d2,0xbf800000,2 +np.float32,0xc17b53d2,0xbf800000,2 +np.float32,0x41fb53d2,0x3f800000,2 +np.float32,0xc1fb53d2,0x3f800000,2 +np.float32,0x410a3ae7,0xbf3504fb,2 +np.float32,0xc10a3ae7,0xbf3504fb,2 +np.float32,0x418a3ae7,0x35b08908,2 +np.float32,0xc18a3ae7,0x35b08908,2 +np.float32,0x420a3ae7,0xbf800000,2 +np.float32,0xc20a3ae7,0xbf800000,2 +np.float32,0x4116cbe4,0xbf800000,2 +np.float32,0xc116cbe4,0xbf800000,2 +np.float32,0x4196cbe4,0x3f800000,2 +np.float32,0xc196cbe4,0x3f800000,2 +np.float32,0x4216cbe4,0x3f800000,2 +np.float32,0xc216cbe4,0x3f800000,2 +np.float32,0x41235ce2,0xbf3504ef,2 +np.float32,0xc1235ce2,0xbf3504ef,2 +np.float32,0x41a35ce2,0xb53889b6,2 +np.float32,0xc1a35ce2,0xb53889b6,2 +np.float32,0x42235ce2,0xbf800000,2 +np.float32,0xc2235ce2,0xbf800000,2 +np.float32,0x412fede0,0x353222c4,2 +np.float32,0xc12fede0,0x353222c4,2 +np.float32,0x41afede0,0xbf800000,2 +np.float32,0xc1afede0,0xbf800000,2 +np.float32,0x422fede0,0x3f800000,2 +np.float32,0xc22fede0,0x3f800000,2 +np.float32,0x413c7edd,0x3f3504f4,2 +np.float32,0xc13c7edd,0x3f3504f4,2 +np.float32,0x41bc7edd,0x33800add,2 +np.float32,0xc1bc7edd,0x33800add,2 +np.float32,0x423c7edd,0xbf800000,2 +np.float32,0xc23c7edd,0xbf800000,2 +np.float32,0x41490fdb,0x3f800000,2 +np.float32,0xc1490fdb,0x3f800000,2 +np.float32,0x41c90fdb,0x3f800000,2 +np.float32,0xc1c90fdb,0x3f800000,2 +np.float32,0x42490fdb,0x3f800000,2 +np.float32,0xc2490fdb,0x3f800000,2 +np.float32,0x4155a0d9,0x3f3504eb,2 +np.float32,0xc155a0d9,0x3f3504eb,2 +np.float32,0x41d5a0d9,0xb5b3bc81,2 +np.float32,0xc1d5a0d9,0xb5b3bc81,2 +np.float32,0x4255a0d9,0xbf800000,2 +np.float32,0xc255a0d9,0xbf800000,2 +np.float32,0x416231d6,0xb319a6a2,2 +np.float32,0xc16231d6,0xb319a6a2,2 +np.float32,0x41e231d6,0xbf800000,2 +np.float32,0xc1e231d6,0xbf800000,2 +np.float32,0x426231d6,0x3f800000,2 +np.float32,0xc26231d6,0x3f800000,2 +np.float32,0x416ec2d4,0xbf3504f7,2 +np.float32,0xc16ec2d4,0xbf3504f7,2 +np.float32,0x41eec2d4,0x353ef0a7,2 +np.float32,0xc1eec2d4,0x353ef0a7,2 +np.float32,0x426ec2d4,0xbf800000,2 +np.float32,0xc26ec2d4,0xbf800000,2 +np.float32,0x417b53d2,0xbf800000,2 +np.float32,0xc17b53d2,0xbf800000,2 +np.float32,0x41fb53d2,0x3f800000,2 +np.float32,0xc1fb53d2,0x3f800000,2 +np.float32,0x427b53d2,0x3f800000,2 +np.float32,0xc27b53d2,0x3f800000,2 +np.float32,0x4183f268,0xbf3504e7,2 +np.float32,0xc183f268,0xbf3504e7,2 +np.float32,0x4203f268,0xb6059a13,2 +np.float32,0xc203f268,0xb6059a13,2 +np.float32,0x4283f268,0xbf800000,2 +np.float32,0xc283f268,0xbf800000,2 +np.float32,0x418a3ae7,0x35b08908,2 +np.float32,0xc18a3ae7,0x35b08908,2 +np.float32,0x420a3ae7,0xbf800000,2 +np.float32,0xc20a3ae7,0xbf800000,2 +np.float32,0x428a3ae7,0x3f800000,2 +np.float32,0xc28a3ae7,0x3f800000,2 +np.float32,0x41908365,0x3f3504f0,2 +np.float32,0xc1908365,0x3f3504f0,2 +np.float32,0x42108365,0xb512200d,2 +np.float32,0xc2108365,0xb512200d,2 +np.float32,0x42908365,0xbf800000,2 +np.float32,0xc2908365,0xbf800000,2 +np.float32,0x4196cbe4,0x3f800000,2 +np.float32,0xc196cbe4,0x3f800000,2 +np.float32,0x4216cbe4,0x3f800000,2 +np.float32,0xc216cbe4,0x3f800000,2 +np.float32,0x4296cbe4,0x3f800000,2 +np.float32,0xc296cbe4,0x3f800000,2 +np.float32,0x419d1463,0x3f3504ef,2 +np.float32,0xc19d1463,0x3f3504ef,2 +np.float32,0x421d1463,0xb5455799,2 +np.float32,0xc21d1463,0xb5455799,2 +np.float32,0x429d1463,0xbf800000,2 +np.float32,0xc29d1463,0xbf800000,2 +np.float32,0x41a35ce2,0xb53889b6,2 +np.float32,0xc1a35ce2,0xb53889b6,2 +np.float32,0x42235ce2,0xbf800000,2 +np.float32,0xc2235ce2,0xbf800000,2 +np.float32,0x42a35ce2,0x3f800000,2 +np.float32,0xc2a35ce2,0x3f800000,2 +np.float32,0x41a9a561,0xbf3504ff,2 +np.float32,0xc1a9a561,0xbf3504ff,2 +np.float32,0x4229a561,0x360733d0,2 +np.float32,0xc229a561,0x360733d0,2 +np.float32,0x42a9a561,0xbf800000,2 +np.float32,0xc2a9a561,0xbf800000,2 +np.float32,0x41afede0,0xbf800000,2 +np.float32,0xc1afede0,0xbf800000,2 +np.float32,0x422fede0,0x3f800000,2 +np.float32,0xc22fede0,0x3f800000,2 +np.float32,0x42afede0,0x3f800000,2 +np.float32,0xc2afede0,0x3f800000,2 +np.float32,0x41b6365e,0xbf3504f6,2 +np.float32,0xc1b6365e,0xbf3504f6,2 +np.float32,0x4236365e,0x350bb91c,2 +np.float32,0xc236365e,0x350bb91c,2 +np.float32,0x42b6365e,0xbf800000,2 +np.float32,0xc2b6365e,0xbf800000,2 +np.float32,0x41bc7edd,0x33800add,2 +np.float32,0xc1bc7edd,0x33800add,2 +np.float32,0x423c7edd,0xbf800000,2 +np.float32,0xc23c7edd,0xbf800000,2 +np.float32,0x42bc7edd,0x3f800000,2 +np.float32,0xc2bc7edd,0x3f800000,2 +np.float32,0x41c2c75c,0x3f3504f8,2 +np.float32,0xc1c2c75c,0x3f3504f8,2 +np.float32,0x4242c75c,0x354bbe8a,2 +np.float32,0xc242c75c,0x354bbe8a,2 +np.float32,0x42c2c75c,0xbf800000,2 +np.float32,0xc2c2c75c,0xbf800000,2 +np.float32,0x41c90fdb,0x3f800000,2 +np.float32,0xc1c90fdb,0x3f800000,2 +np.float32,0x42490fdb,0x3f800000,2 +np.float32,0xc2490fdb,0x3f800000,2 +np.float32,0x42c90fdb,0x3f800000,2 +np.float32,0xc2c90fdb,0x3f800000,2 +np.float32,0x41cf585a,0x3f3504e7,2 +np.float32,0xc1cf585a,0x3f3504e7,2 +np.float32,0x424f585a,0xb608cd8c,2 +np.float32,0xc24f585a,0xb608cd8c,2 +np.float32,0x42cf585a,0xbf800000,2 +np.float32,0xc2cf585a,0xbf800000,2 +np.float32,0x41d5a0d9,0xb5b3bc81,2 +np.float32,0xc1d5a0d9,0xb5b3bc81,2 +np.float32,0x4255a0d9,0xbf800000,2 +np.float32,0xc255a0d9,0xbf800000,2 +np.float32,0x42d5a0d9,0x3f800000,2 +np.float32,0xc2d5a0d9,0x3f800000,2 +np.float32,0x41dbe958,0xbf350507,2 +np.float32,0xc1dbe958,0xbf350507,2 +np.float32,0x425be958,0x365eab75,2 +np.float32,0xc25be958,0x365eab75,2 +np.float32,0x42dbe958,0xbf800000,2 +np.float32,0xc2dbe958,0xbf800000,2 +np.float32,0x41e231d6,0xbf800000,2 +np.float32,0xc1e231d6,0xbf800000,2 +np.float32,0x426231d6,0x3f800000,2 +np.float32,0xc26231d6,0x3f800000,2 +np.float32,0x42e231d6,0x3f800000,2 +np.float32,0xc2e231d6,0x3f800000,2 +np.float32,0x41e87a55,0xbf3504ef,2 +np.float32,0xc1e87a55,0xbf3504ef,2 +np.float32,0x42687a55,0xb552257b,2 +np.float32,0xc2687a55,0xb552257b,2 +np.float32,0x42e87a55,0xbf800000,2 +np.float32,0xc2e87a55,0xbf800000,2 +np.float32,0x41eec2d4,0x353ef0a7,2 +np.float32,0xc1eec2d4,0x353ef0a7,2 +np.float32,0x426ec2d4,0xbf800000,2 +np.float32,0xc26ec2d4,0xbf800000,2 +np.float32,0x42eec2d4,0x3f800000,2 +np.float32,0xc2eec2d4,0x3f800000,2 +np.float32,0x41f50b53,0x3f3504ff,2 +np.float32,0xc1f50b53,0x3f3504ff,2 +np.float32,0x42750b53,0x360a6748,2 +np.float32,0xc2750b53,0x360a6748,2 +np.float32,0x42f50b53,0xbf800000,2 +np.float32,0xc2f50b53,0xbf800000,2 +np.float32,0x41fb53d2,0x3f800000,2 +np.float32,0xc1fb53d2,0x3f800000,2 +np.float32,0x427b53d2,0x3f800000,2 +np.float32,0xc27b53d2,0x3f800000,2 +np.float32,0x42fb53d2,0x3f800000,2 +np.float32,0xc2fb53d2,0x3f800000,2 +np.float32,0x4200ce28,0x3f3504f6,2 +np.float32,0xc200ce28,0x3f3504f6,2 +np.float32,0x4280ce28,0x34fdd672,2 +np.float32,0xc280ce28,0x34fdd672,2 +np.float32,0x4300ce28,0xbf800000,2 +np.float32,0xc300ce28,0xbf800000,2 +np.float32,0x4203f268,0xb6059a13,2 +np.float32,0xc203f268,0xb6059a13,2 +np.float32,0x4283f268,0xbf800000,2 +np.float32,0xc283f268,0xbf800000,2 +np.float32,0x4303f268,0x3f800000,2 +np.float32,0xc303f268,0x3f800000,2 +np.float32,0x420716a7,0xbf3504f8,2 +np.float32,0xc20716a7,0xbf3504f8,2 +np.float32,0x428716a7,0x35588c6d,2 +np.float32,0xc28716a7,0x35588c6d,2 +np.float32,0x430716a7,0xbf800000,2 +np.float32,0xc30716a7,0xbf800000,2 +np.float32,0x420a3ae7,0xbf800000,2 +np.float32,0xc20a3ae7,0xbf800000,2 +np.float32,0x428a3ae7,0x3f800000,2 +np.float32,0xc28a3ae7,0x3f800000,2 +np.float32,0x430a3ae7,0x3f800000,2 +np.float32,0xc30a3ae7,0x3f800000,2 +np.float32,0x420d5f26,0xbf3504e7,2 +np.float32,0xc20d5f26,0xbf3504e7,2 +np.float32,0x428d5f26,0xb60c0105,2 +np.float32,0xc28d5f26,0xb60c0105,2 +np.float32,0x430d5f26,0xbf800000,2 +np.float32,0xc30d5f26,0xbf800000,2 +np.float32,0x42108365,0xb512200d,2 +np.float32,0xc2108365,0xb512200d,2 +np.float32,0x42908365,0xbf800000,2 +np.float32,0xc2908365,0xbf800000,2 +np.float32,0x43108365,0x3f800000,2 +np.float32,0xc3108365,0x3f800000,2 +np.float32,0x4213a7a5,0x3f350507,2 +np.float32,0xc213a7a5,0x3f350507,2 +np.float32,0x4293a7a5,0x3661deee,2 +np.float32,0xc293a7a5,0x3661deee,2 +np.float32,0x4313a7a5,0xbf800000,2 +np.float32,0xc313a7a5,0xbf800000,2 +np.float32,0x4216cbe4,0x3f800000,2 +np.float32,0xc216cbe4,0x3f800000,2 +np.float32,0x4296cbe4,0x3f800000,2 +np.float32,0xc296cbe4,0x3f800000,2 +np.float32,0x4316cbe4,0x3f800000,2 +np.float32,0xc316cbe4,0x3f800000,2 +np.float32,0x4219f024,0x3f3504d8,2 +np.float32,0xc219f024,0x3f3504d8,2 +np.float32,0x4299f024,0xb69bde6c,2 +np.float32,0xc299f024,0xb69bde6c,2 +np.float32,0x4319f024,0xbf800000,2 +np.float32,0xc319f024,0xbf800000,2 +np.float32,0x421d1463,0xb5455799,2 +np.float32,0xc21d1463,0xb5455799,2 +np.float32,0x429d1463,0xbf800000,2 +np.float32,0xc29d1463,0xbf800000,2 +np.float32,0x431d1463,0x3f800000,2 +np.float32,0xc31d1463,0x3f800000,2 +np.float32,0x422038a3,0xbf350516,2 +np.float32,0xc22038a3,0xbf350516,2 +np.float32,0x42a038a3,0x36c6cd61,2 +np.float32,0xc2a038a3,0x36c6cd61,2 +np.float32,0x432038a3,0xbf800000,2 +np.float32,0xc32038a3,0xbf800000,2 +np.float32,0x42235ce2,0xbf800000,2 +np.float32,0xc2235ce2,0xbf800000,2 +np.float32,0x42a35ce2,0x3f800000,2 +np.float32,0xc2a35ce2,0x3f800000,2 +np.float32,0x43235ce2,0x3f800000,2 +np.float32,0xc3235ce2,0x3f800000,2 +np.float32,0x42268121,0xbf3504f6,2 +np.float32,0xc2268121,0xbf3504f6,2 +np.float32,0x42a68121,0x34e43aac,2 +np.float32,0xc2a68121,0x34e43aac,2 +np.float32,0x43268121,0xbf800000,2 +np.float32,0xc3268121,0xbf800000,2 +np.float32,0x4229a561,0x360733d0,2 +np.float32,0xc229a561,0x360733d0,2 +np.float32,0x42a9a561,0xbf800000,2 +np.float32,0xc2a9a561,0xbf800000,2 +np.float32,0x4329a561,0x3f800000,2 +np.float32,0xc329a561,0x3f800000,2 +np.float32,0x422cc9a0,0x3f3504f8,2 +np.float32,0xc22cc9a0,0x3f3504f8,2 +np.float32,0x42acc9a0,0x35655a50,2 +np.float32,0xc2acc9a0,0x35655a50,2 +np.float32,0x432cc9a0,0xbf800000,2 +np.float32,0xc32cc9a0,0xbf800000,2 +np.float32,0x422fede0,0x3f800000,2 +np.float32,0xc22fede0,0x3f800000,2 +np.float32,0x42afede0,0x3f800000,2 +np.float32,0xc2afede0,0x3f800000,2 +np.float32,0x432fede0,0x3f800000,2 +np.float32,0xc32fede0,0x3f800000,2 +np.float32,0x4233121f,0x3f3504e7,2 +np.float32,0xc233121f,0x3f3504e7,2 +np.float32,0x42b3121f,0xb60f347d,2 +np.float32,0xc2b3121f,0xb60f347d,2 +np.float32,0x4333121f,0xbf800000,2 +np.float32,0xc333121f,0xbf800000,2 +np.float32,0x4236365e,0x350bb91c,2 +np.float32,0xc236365e,0x350bb91c,2 +np.float32,0x42b6365e,0xbf800000,2 +np.float32,0xc2b6365e,0xbf800000,2 +np.float32,0x4336365e,0x3f800000,2 +np.float32,0xc336365e,0x3f800000,2 +np.float32,0x42395a9e,0xbf350507,2 +np.float32,0xc2395a9e,0xbf350507,2 +np.float32,0x42b95a9e,0x36651267,2 +np.float32,0xc2b95a9e,0x36651267,2 +np.float32,0x43395a9e,0xbf800000,2 +np.float32,0xc3395a9e,0xbf800000,2 +np.float32,0x423c7edd,0xbf800000,2 +np.float32,0xc23c7edd,0xbf800000,2 +np.float32,0x42bc7edd,0x3f800000,2 +np.float32,0xc2bc7edd,0x3f800000,2 +np.float32,0x433c7edd,0x3f800000,2 +np.float32,0xc33c7edd,0x3f800000,2 +np.float32,0x423fa31d,0xbf3504d7,2 +np.float32,0xc23fa31d,0xbf3504d7,2 +np.float32,0x42bfa31d,0xb69d7828,2 +np.float32,0xc2bfa31d,0xb69d7828,2 +np.float32,0x433fa31d,0xbf800000,2 +np.float32,0xc33fa31d,0xbf800000,2 +np.float32,0x4242c75c,0x354bbe8a,2 +np.float32,0xc242c75c,0x354bbe8a,2 +np.float32,0x42c2c75c,0xbf800000,2 +np.float32,0xc2c2c75c,0xbf800000,2 +np.float32,0x4342c75c,0x3f800000,2 +np.float32,0xc342c75c,0x3f800000,2 +np.float32,0x4245eb9c,0x3f350517,2 +np.float32,0xc245eb9c,0x3f350517,2 +np.float32,0x42c5eb9c,0x36c8671d,2 +np.float32,0xc2c5eb9c,0x36c8671d,2 +np.float32,0x4345eb9c,0xbf800000,2 +np.float32,0xc345eb9c,0xbf800000,2 +np.float32,0x42490fdb,0x3f800000,2 +np.float32,0xc2490fdb,0x3f800000,2 +np.float32,0x42c90fdb,0x3f800000,2 +np.float32,0xc2c90fdb,0x3f800000,2 +np.float32,0x43490fdb,0x3f800000,2 +np.float32,0xc3490fdb,0x3f800000,2 +np.float32,0x424c341a,0x3f3504f5,2 +np.float32,0xc24c341a,0x3f3504f5,2 +np.float32,0x42cc341a,0x34ca9ee6,2 +np.float32,0xc2cc341a,0x34ca9ee6,2 +np.float32,0x434c341a,0xbf800000,2 +np.float32,0xc34c341a,0xbf800000,2 +np.float32,0x424f585a,0xb608cd8c,2 +np.float32,0xc24f585a,0xb608cd8c,2 +np.float32,0x42cf585a,0xbf800000,2 +np.float32,0xc2cf585a,0xbf800000,2 +np.float32,0x434f585a,0x3f800000,2 +np.float32,0xc34f585a,0x3f800000,2 +np.float32,0x42527c99,0xbf3504f9,2 +np.float32,0xc2527c99,0xbf3504f9,2 +np.float32,0x42d27c99,0x35722833,2 +np.float32,0xc2d27c99,0x35722833,2 +np.float32,0x43527c99,0xbf800000,2 +np.float32,0xc3527c99,0xbf800000,2 +np.float32,0x4255a0d9,0xbf800000,2 +np.float32,0xc255a0d9,0xbf800000,2 +np.float32,0x42d5a0d9,0x3f800000,2 +np.float32,0xc2d5a0d9,0x3f800000,2 +np.float32,0x4355a0d9,0x3f800000,2 +np.float32,0xc355a0d9,0x3f800000,2 +np.float32,0x4258c518,0xbf3504e6,2 +np.float32,0xc258c518,0xbf3504e6,2 +np.float32,0x42d8c518,0xb61267f6,2 +np.float32,0xc2d8c518,0xb61267f6,2 +np.float32,0x4358c518,0xbf800000,2 +np.float32,0xc358c518,0xbf800000,2 +np.float32,0x425be958,0x365eab75,2 +np.float32,0xc25be958,0x365eab75,2 +np.float32,0x42dbe958,0xbf800000,2 +np.float32,0xc2dbe958,0xbf800000,2 +np.float32,0x435be958,0x3f800000,2 +np.float32,0xc35be958,0x3f800000,2 +np.float32,0x425f0d97,0x3f350508,2 +np.float32,0xc25f0d97,0x3f350508,2 +np.float32,0x42df0d97,0x366845e0,2 +np.float32,0xc2df0d97,0x366845e0,2 +np.float32,0x435f0d97,0xbf800000,2 +np.float32,0xc35f0d97,0xbf800000,2 +np.float32,0x426231d6,0x3f800000,2 +np.float32,0xc26231d6,0x3f800000,2 +np.float32,0x42e231d6,0x3f800000,2 +np.float32,0xc2e231d6,0x3f800000,2 +np.float32,0x436231d6,0x3f800000,2 +np.float32,0xc36231d6,0x3f800000,2 +np.float32,0x42655616,0x3f3504d7,2 +np.float32,0xc2655616,0x3f3504d7,2 +np.float32,0x42e55616,0xb69f11e5,2 +np.float32,0xc2e55616,0xb69f11e5,2 +np.float32,0x43655616,0xbf800000,2 +np.float32,0xc3655616,0xbf800000,2 +np.float32,0x42687a55,0xb552257b,2 +np.float32,0xc2687a55,0xb552257b,2 +np.float32,0x42e87a55,0xbf800000,2 +np.float32,0xc2e87a55,0xbf800000,2 +np.float32,0x43687a55,0x3f800000,2 +np.float32,0xc3687a55,0x3f800000,2 +np.float32,0x426b9e95,0xbf350517,2 +np.float32,0xc26b9e95,0xbf350517,2 +np.float32,0x42eb9e95,0x36ca00d9,2 +np.float32,0xc2eb9e95,0x36ca00d9,2 +np.float32,0x436b9e95,0xbf800000,2 +np.float32,0xc36b9e95,0xbf800000,2 +np.float32,0x426ec2d4,0xbf800000,2 +np.float32,0xc26ec2d4,0xbf800000,2 +np.float32,0x42eec2d4,0x3f800000,2 +np.float32,0xc2eec2d4,0x3f800000,2 +np.float32,0x436ec2d4,0x3f800000,2 +np.float32,0xc36ec2d4,0x3f800000,2 +np.float32,0x4271e713,0xbf3504f5,2 +np.float32,0xc271e713,0xbf3504f5,2 +np.float32,0x42f1e713,0x34b10321,2 +np.float32,0xc2f1e713,0x34b10321,2 +np.float32,0x4371e713,0xbf800000,2 +np.float32,0xc371e713,0xbf800000,2 +np.float32,0x42750b53,0x360a6748,2 +np.float32,0xc2750b53,0x360a6748,2 +np.float32,0x42f50b53,0xbf800000,2 +np.float32,0xc2f50b53,0xbf800000,2 +np.float32,0x43750b53,0x3f800000,2 +np.float32,0xc3750b53,0x3f800000,2 +np.float32,0x42782f92,0x3f3504f9,2 +np.float32,0xc2782f92,0x3f3504f9,2 +np.float32,0x42f82f92,0x357ef616,2 +np.float32,0xc2f82f92,0x357ef616,2 +np.float32,0x43782f92,0xbf800000,2 +np.float32,0xc3782f92,0xbf800000,2 +np.float32,0x427b53d2,0x3f800000,2 +np.float32,0xc27b53d2,0x3f800000,2 +np.float32,0x42fb53d2,0x3f800000,2 +np.float32,0xc2fb53d2,0x3f800000,2 +np.float32,0x437b53d2,0x3f800000,2 +np.float32,0xc37b53d2,0x3f800000,2 +np.float32,0x427e7811,0x3f3504e6,2 +np.float32,0xc27e7811,0x3f3504e6,2 +np.float32,0x42fe7811,0xb6159b6f,2 +np.float32,0xc2fe7811,0xb6159b6f,2 +np.float32,0x437e7811,0xbf800000,2 +np.float32,0xc37e7811,0xbf800000,2 +np.float32,0x4280ce28,0x34fdd672,2 +np.float32,0xc280ce28,0x34fdd672,2 +np.float32,0x4300ce28,0xbf800000,2 +np.float32,0xc300ce28,0xbf800000,2 +np.float32,0x4380ce28,0x3f800000,2 +np.float32,0xc380ce28,0x3f800000,2 +np.float32,0x42826048,0xbf350508,2 +np.float32,0xc2826048,0xbf350508,2 +np.float32,0x43026048,0x366b7958,2 +np.float32,0xc3026048,0x366b7958,2 +np.float32,0x43826048,0xbf800000,2 +np.float32,0xc3826048,0xbf800000,2 +np.float32,0x4283f268,0xbf800000,2 +np.float32,0xc283f268,0xbf800000,2 +np.float32,0x4303f268,0x3f800000,2 +np.float32,0xc303f268,0x3f800000,2 +np.float32,0x4383f268,0x3f800000,2 +np.float32,0xc383f268,0x3f800000,2 +np.float32,0x42858487,0xbf350504,2 +np.float32,0xc2858487,0xbf350504,2 +np.float32,0x43058487,0x363ea8be,2 +np.float32,0xc3058487,0x363ea8be,2 +np.float32,0x43858487,0xbf800000,2 +np.float32,0xc3858487,0xbf800000,2 +np.float32,0x428716a7,0x35588c6d,2 +np.float32,0xc28716a7,0x35588c6d,2 +np.float32,0x430716a7,0xbf800000,2 +np.float32,0xc30716a7,0xbf800000,2 +np.float32,0x438716a7,0x3f800000,2 +np.float32,0xc38716a7,0x3f800000,2 +np.float32,0x4288a8c7,0x3f350517,2 +np.float32,0xc288a8c7,0x3f350517,2 +np.float32,0x4308a8c7,0x36cb9a96,2 +np.float32,0xc308a8c7,0x36cb9a96,2 +np.float32,0x4388a8c7,0xbf800000,2 +np.float32,0xc388a8c7,0xbf800000,2 +np.float32,0x428a3ae7,0x3f800000,2 +np.float32,0xc28a3ae7,0x3f800000,2 +np.float32,0x430a3ae7,0x3f800000,2 +np.float32,0xc30a3ae7,0x3f800000,2 +np.float32,0x438a3ae7,0x3f800000,2 +np.float32,0xc38a3ae7,0x3f800000,2 +np.float32,0x428bcd06,0x3f3504f5,2 +np.float32,0xc28bcd06,0x3f3504f5,2 +np.float32,0x430bcd06,0x3497675b,2 +np.float32,0xc30bcd06,0x3497675b,2 +np.float32,0x438bcd06,0xbf800000,2 +np.float32,0xc38bcd06,0xbf800000,2 +np.float32,0x428d5f26,0xb60c0105,2 +np.float32,0xc28d5f26,0xb60c0105,2 +np.float32,0x430d5f26,0xbf800000,2 +np.float32,0xc30d5f26,0xbf800000,2 +np.float32,0x438d5f26,0x3f800000,2 +np.float32,0xc38d5f26,0x3f800000,2 +np.float32,0x428ef146,0xbf350526,2 +np.float32,0xc28ef146,0xbf350526,2 +np.float32,0x430ef146,0x3710bc40,2 +np.float32,0xc30ef146,0x3710bc40,2 +np.float32,0x438ef146,0xbf800000,2 +np.float32,0xc38ef146,0xbf800000,2 +np.float32,0x42908365,0xbf800000,2 +np.float32,0xc2908365,0xbf800000,2 +np.float32,0x43108365,0x3f800000,2 +np.float32,0xc3108365,0x3f800000,2 +np.float32,0x43908365,0x3f800000,2 +np.float32,0xc3908365,0x3f800000,2 +np.float32,0x42921585,0xbf3504e6,2 +np.float32,0xc2921585,0xbf3504e6,2 +np.float32,0x43121585,0xb618cee8,2 +np.float32,0xc3121585,0xb618cee8,2 +np.float32,0x43921585,0xbf800000,2 +np.float32,0xc3921585,0xbf800000,2 +np.float32,0x4293a7a5,0x3661deee,2 +np.float32,0xc293a7a5,0x3661deee,2 +np.float32,0x4313a7a5,0xbf800000,2 +np.float32,0xc313a7a5,0xbf800000,2 +np.float32,0x4393a7a5,0x3f800000,2 +np.float32,0xc393a7a5,0x3f800000,2 +np.float32,0x429539c5,0x3f350536,2 +np.float32,0xc29539c5,0x3f350536,2 +np.float32,0x431539c5,0x373bab34,2 +np.float32,0xc31539c5,0x373bab34,2 +np.float32,0x439539c5,0xbf800000,2 +np.float32,0xc39539c5,0xbf800000,2 +np.float32,0x4296cbe4,0x3f800000,2 +np.float32,0xc296cbe4,0x3f800000,2 +np.float32,0x4316cbe4,0x3f800000,2 +np.float32,0xc316cbe4,0x3f800000,2 +np.float32,0x4396cbe4,0x3f800000,2 +np.float32,0xc396cbe4,0x3f800000,2 +np.float32,0x42985e04,0x3f3504d7,2 +np.float32,0xc2985e04,0x3f3504d7,2 +np.float32,0x43185e04,0xb6a2455d,2 +np.float32,0xc3185e04,0xb6a2455d,2 +np.float32,0x43985e04,0xbf800000,2 +np.float32,0xc3985e04,0xbf800000,2 +np.float32,0x4299f024,0xb69bde6c,2 +np.float32,0xc299f024,0xb69bde6c,2 +np.float32,0x4319f024,0xbf800000,2 +np.float32,0xc319f024,0xbf800000,2 +np.float32,0x4399f024,0x3f800000,2 +np.float32,0xc399f024,0x3f800000,2 +np.float32,0x429b8243,0xbf3504ea,2 +np.float32,0xc29b8243,0xbf3504ea,2 +np.float32,0x431b8243,0xb5cb2eb8,2 +np.float32,0xc31b8243,0xb5cb2eb8,2 +np.float32,0x439b8243,0xbf800000,2 +np.float32,0xc39b8243,0xbf800000,2 +np.float32,0x435b2047,0x3f3504c1,2 +np.float32,0x42a038a2,0xb5e4ca7e,2 +np.float32,0x432038a2,0xbf800000,2 +np.float32,0x4345eb9b,0xbf800000,2 +np.float32,0x42c5eb9b,0xb5de638c,2 +np.float32,0x42eb9e94,0xb5d7fc9b,2 +np.float32,0x4350ea79,0x3631dadb,2 +np.float32,0x42dbe957,0xbf800000,2 +np.float32,0x425be957,0xb505522a,2 +np.float32,0x435be957,0x3f800000,2 +np.float32,0x46027eb2,0x3e7d94c9,2 +np.float32,0x4477baed,0xbe7f1824,2 +np.float32,0x454b8024,0x3e7f5268,2 +np.float32,0x455d2c09,0x3e7f40cb,2 +np.float32,0x4768d3de,0xba14b4af,2 +np.float32,0x46c1e7cd,0x3e7fb102,2 +np.float32,0x44a52949,0xbe7dc9d5,2 +np.float32,0x4454633a,0x3e7dbc7d,2 +np.float32,0x4689810b,0x3e7eb02b,2 +np.float32,0x473473cd,0xbe7eef6f,2 +np.float32,0x44a5193f,0x3e7e1b1f,2 +np.float32,0x46004b36,0x3e7dac59,2 +np.float32,0x467f604b,0x3d7ffd3a,2 +np.float32,0x45ea1805,0x3dffd2e0,2 +np.float32,0x457b6af3,0x3dff7831,2 +np.float32,0x44996159,0xbe7d85f4,2 +np.float32,0x47883553,0xbb80584e,2 +np.float32,0x44e19f0c,0xbdffcfe6,2 +np.float32,0x472b3bf6,0xbe7f7a82,2 +np.float32,0x4600bb4e,0x3a135e33,2 +np.float32,0x449f4556,0x3e7e42e5,2 +np.float32,0x474e9420,0x3dff77b2,2 +np.float32,0x45cbdb23,0x3dff7240,2 +np.float32,0x44222747,0x3dffb039,2 +np.float32,0x4772e419,0xbdff74b8,2 +np.float64,0x1,0x3ff0000000000000,1 +np.float64,0x8000000000000001,0x3ff0000000000000,1 +np.float64,0x10000000000000,0x3ff0000000000000,1 +np.float64,0x8010000000000000,0x3ff0000000000000,1 +np.float64,0x7fefffffffffffff,0xbfefffe62ecfab75,1 +np.float64,0xffefffffffffffff,0xbfefffe62ecfab75,1 +np.float64,0x7ff0000000000000,0xfff8000000000000,1 +np.float64,0xfff0000000000000,0xfff8000000000000,1 +np.float64,0x7ff8000000000000,0x7ff8000000000000,1 +np.float64,0x7ff4000000000000,0x7ffc000000000000,1 +np.float64,0xbfc28bd9dd2517b4,0x3fefaa28ba13a702,1 +np.float64,0x3fb673c62e2ce790,0x3fefe083847a717f,1 +np.float64,0xbfe3e1dac7e7c3b6,0x3fea0500ba099f3a,1 +np.float64,0xbfbe462caa3c8c58,0x3fefc6c8b9c1c87c,1 +np.float64,0xbfb9353576326a68,0x3fefd8513e50e6b1,1 +np.float64,0xbfc05e798520bcf4,0x3fefbd1ad81cf089,1 +np.float64,0xbfe3ca3be2e79478,0x3fea12b995ea6574,1 +np.float64,0xbfde875d46bd0eba,0x3fec6d888662a824,1 +np.float64,0x3fafc4e02c3f89c0,0x3feff03c34bffd69,1 +np.float64,0xbf98855848310ac0,0x3feffda6c1588bdb,1 +np.float64,0x3fe66c51186cd8a2,0x3fe875c61c630ecb,1 +np.float64,0xbfedff1c3b7bfe38,0x3fe2f0c8c9e8fa39,1 +np.float64,0x3fd6082267ac1044,0x3fee1f6023695050,1 +np.float64,0xbfe78449b06f0894,0x3fe7bda2b223850e,1 +np.float64,0x3feedb8e63fdb71c,0x3fe23d5dfd2dd33f,1 +np.float64,0xbfc0a9de3d2153bc,0x3fefbaadf5e5285e,1 +np.float64,0x3fc04c67432098d0,0x3fefbdae07b7de8d,1 +np.float64,0xbfeeef84c4fddf0a,0x3fe22cf37f309d88,1 +np.float64,0x3fc04bb025209760,0x3fefbdb3d7d34ecf,1 +np.float64,0x3fd6b84d48ad709c,0x3fee013403da6e2a,1 +np.float64,0x3fec1ae25d7835c4,0x3fe46e62195cf274,1 +np.float64,0xbfdc6fdf9bb8dfc0,0x3fece48dc78bbb2e,1 +np.float64,0x3fb4db2c9229b660,0x3fefe4d42f79bf49,1 +np.float64,0xbfc0ed698521dad4,0x3fefb8785ea658c9,1 +np.float64,0xbfee82772b7d04ee,0x3fe2864a80efe8e9,1 +np.float64,0x3fd575b664aaeb6c,0x3fee37c669a12879,1 +np.float64,0x3fe4afb1c5e95f64,0x3fe98b177194439c,1 +np.float64,0x3fd93962f9b272c4,0x3fed8bef61876294,1 +np.float64,0x3fd97ae025b2f5c0,0x3fed7f4cfbf4d300,1 +np.float64,0xbfd9afdb1bb35fb6,0x3fed74fdc44dabb1,1 +np.float64,0x3f8ae65e3035cc80,0x3fefff4b1a0ea62b,1 +np.float64,0xbfe7e58664efcb0d,0x3fe77c02a1cbb670,1 +np.float64,0x3fe5f68b37ebed16,0x3fe8c10f849a5d4d,1 +np.float64,0x3fd9137d61b226fc,0x3fed9330eb4815a1,1 +np.float64,0x3fc146d019228da0,0x3fefb57e2d4d52f8,1 +np.float64,0xbfda6036edb4c06e,0x3fed521b2b578679,1 +np.float64,0xbfe78ddfb0ef1bc0,0x3fe7b734319a77e4,1 +np.float64,0x3fe0877823610ef0,0x3febd33a993dd786,1 +np.float64,0x3fbc61af2e38c360,0x3fefcdb4f889756d,1 +np.float64,0x3fd4dcdca4a9b9b8,0x3fee50962ffea5ae,1 +np.float64,0xbfe03cb29f607965,0x3febf7dbf640a75a,1 +np.float64,0xbfc81de407303bc8,0x3fef6f066cef64bc,1 +np.float64,0x3fd8dea42db1bd48,0x3fed9d3e00dbe0b3,1 +np.float64,0x3feac75e94f58ebe,0x3fe56f1f47f97896,1 +np.float64,0x3fb3a1ea6e2743d0,0x3fefe7ec1247cdaa,1 +np.float64,0x3fd695c0f4ad2b80,0x3fee0730bd40883d,1 +np.float64,0xbfd2c631f5a58c64,0x3feea20cbd1105d7,1 +np.float64,0xbfe978a8e1f2f152,0x3fe663014d40ad7a,1 +np.float64,0x3fd8b6b76ab16d70,0x3feda4c879aacc19,1 +np.float64,0x3feaafd30e755fa6,0x3fe5809514c28453,1 +np.float64,0x3fe1e37dc263c6fc,0x3feb20f9ad1f3f5c,1 +np.float64,0x3fd0ec7c24a1d8f8,0x3feee34048f43b75,1 +np.float64,0xbfe3881cbf67103a,0x3fea38d7886e6f53,1 +np.float64,0xbfd7023957ae0472,0x3fedf4471c765a1c,1 +np.float64,0xbfebc51c4ef78a38,0x3fe4b01c424e297b,1 +np.float64,0xbfe20a93eae41528,0x3feb0c2aa321d2e0,1 +np.float64,0x3fef39be867e737e,0x3fe1efaba9164d27,1 +np.float64,0x3fe8ea9576f1d52a,0x3fe6c7a8826ce1be,1 +np.float64,0x3fea921d91f5243c,0x3fe5968c6cf78963,1 +np.float64,0x3fd7ee5d31afdcbc,0x3fedc9f19d43fe61,1 +np.float64,0xbfe3ed581767dab0,0x3fe9fe4ee2f2b1cd,1 +np.float64,0xbfc40923d5281248,0x3fef9bd8ee9f6e68,1 +np.float64,0x3fe411a834682350,0x3fe9e9103854f057,1 +np.float64,0xbfedf6ccdf7bed9a,0x3fe2f77ad6543246,1 +np.float64,0xbfe8788a44f0f114,0x3fe7172f3aa0c742,1 +np.float64,0xbfce728f173ce520,0x3fef1954083bea04,1 +np.float64,0xbfd64dd0acac9ba2,0x3fee138c3293c246,1 +np.float64,0xbfe00669f5600cd4,0x3fec121443945350,1 +np.float64,0xbfe7152ba2ee2a58,0x3fe8079465d09846,1 +np.float64,0x3fe8654d8f70ca9c,0x3fe7247c94f09596,1 +np.float64,0x3fea68045cf4d008,0x3fe5b58cfe81a243,1 +np.float64,0xbfcd4779073a8ef4,0x3fef2a9d78153fa5,1 +np.float64,0xbfdb4456e5b688ae,0x3fed23b11614203f,1 +np.float64,0x3fcb5d59cd36bab0,0x3fef45818216a515,1 +np.float64,0xbfd914ff5ab229fe,0x3fed92e73746fea8,1 +np.float64,0x3fe4d211db69a424,0x3fe97653f433d15f,1 +np.float64,0xbfdbbb9224b77724,0x3fed0adb593dde80,1 +np.float64,0x3fd424ceafa8499c,0x3fee6d9124795d33,1 +np.float64,0x3feb5968f976b2d2,0x3fe501d116efbf54,1 +np.float64,0x3fee7d92a2fcfb26,0x3fe28a479b6a9dcf,1 +np.float64,0x3fc308e9972611d0,0x3fefa595f4df0c89,1 +np.float64,0x3fda79cd77b4f39c,0x3fed4cf8e69ba1f8,1 +np.float64,0x3fcbcf42d5379e88,0x3fef3f6a6a77c187,1 +np.float64,0x3fe13a1da662743c,0x3feb79504faea888,1 +np.float64,0xbfee4435f07c886c,0x3fe2b8ea98d2fc29,1 +np.float64,0x3fd65d68ccacbad0,0x3fee10e1ac7ada89,1 +np.float64,0x3fef2f89bb7e5f14,0x3fe1f81e882cc3f4,1 +np.float64,0xbfef0a7769fe14ef,0x3fe216bf384fc646,1 +np.float64,0x3fc065277320ca50,0x3fefbce44835c193,1 +np.float64,0x3fe9c1a74d73834e,0x3fe62e9ee0c2f2bf,1 +np.float64,0x3fd9d96e5db3b2dc,0x3fed6cd88eb51f6a,1 +np.float64,0x3fe02bf1c56057e4,0x3febfffc24b5a7ba,1 +np.float64,0xbfd6814350ad0286,0x3fee0ab9ad318b84,1 +np.float64,0x3f9fcbec583f97c0,0x3feffc0d0f1d8e75,1 +np.float64,0x3fe23524e5e46a4a,0x3feaf55372949a06,1 +np.float64,0xbfbdc95f6a3b92c0,0x3fefc89c21d44995,1 +np.float64,0x3fe961bb9cf2c378,0x3fe6735d6e1cca58,1 +np.float64,0xbfe8f1c370f1e387,0x3fe6c29d1be8bee9,1 +np.float64,0x3fd880d43ab101a8,0x3fedaee3c7ccfc96,1 +np.float64,0xbfedb37005fb66e0,0x3fe32d91ef2e3bd3,1 +np.float64,0xfdce287bfb9c5,0x3ff0000000000000,1 +np.float64,0x9aa1b9e735437,0x3ff0000000000000,1 +np.float64,0x6beac6e0d7d59,0x3ff0000000000000,1 +np.float64,0x47457aae8e8b0,0x3ff0000000000000,1 +np.float64,0x35ff13b46bfe3,0x3ff0000000000000,1 +np.float64,0xb9c0c82b73819,0x3ff0000000000000,1 +np.float64,0x1a8dc21a351b9,0x3ff0000000000000,1 +np.float64,0x7e87ef6afd0ff,0x3ff0000000000000,1 +np.float64,0x620a6588c414d,0x3ff0000000000000,1 +np.float64,0x7f366000fe6e,0x3ff0000000000000,1 +np.float64,0x787e39f4f0fc8,0x3ff0000000000000,1 +np.float64,0xf5134f1fea26a,0x3ff0000000000000,1 +np.float64,0xbce700ef79ce0,0x3ff0000000000000,1 +np.float64,0x144d7cc8289b1,0x3ff0000000000000,1 +np.float64,0xb9fbc5b973f79,0x3ff0000000000000,1 +np.float64,0xc3d6292d87ac5,0x3ff0000000000000,1 +np.float64,0xc1084e618210a,0x3ff0000000000000,1 +np.float64,0xb6b9eca56d73e,0x3ff0000000000000,1 +np.float64,0xc7ac4b858f58a,0x3ff0000000000000,1 +np.float64,0x516d75d2a2daf,0x3ff0000000000000,1 +np.float64,0x9dc089d93b811,0x3ff0000000000000,1 +np.float64,0x7b5f2840f6be6,0x3ff0000000000000,1 +np.float64,0x121d3ce8243a9,0x3ff0000000000000,1 +np.float64,0xf0be0337e17c1,0x3ff0000000000000,1 +np.float64,0xff58a5cbfeb15,0x3ff0000000000000,1 +np.float64,0xdaf1d07fb5e3a,0x3ff0000000000000,1 +np.float64,0x61d95382c3b2b,0x3ff0000000000000,1 +np.float64,0xe4df943fc9bf3,0x3ff0000000000000,1 +np.float64,0xf72ac2bdee559,0x3ff0000000000000,1 +np.float64,0x12dafbf625b60,0x3ff0000000000000,1 +np.float64,0xee11d427dc23b,0x3ff0000000000000,1 +np.float64,0xf4f8eb37e9f1e,0x3ff0000000000000,1 +np.float64,0xad7cb5df5af97,0x3ff0000000000000,1 +np.float64,0x59fc9b06b3f94,0x3ff0000000000000,1 +np.float64,0x3c3e65e4787ce,0x3ff0000000000000,1 +np.float64,0xe37bc993c6f79,0x3ff0000000000000,1 +np.float64,0x13bd6330277ad,0x3ff0000000000000,1 +np.float64,0x56cc2800ad986,0x3ff0000000000000,1 +np.float64,0x6203b8fcc4078,0x3ff0000000000000,1 +np.float64,0x75c7c8b8eb8fa,0x3ff0000000000000,1 +np.float64,0x5ebf8e00bd7f2,0x3ff0000000000000,1 +np.float64,0xda81f2f1b503f,0x3ff0000000000000,1 +np.float64,0x6adb17d6d5b64,0x3ff0000000000000,1 +np.float64,0x1ba68eee374d3,0x3ff0000000000000,1 +np.float64,0xeecf6fbbdd9ee,0x3ff0000000000000,1 +np.float64,0x24d6dd8e49add,0x3ff0000000000000,1 +np.float64,0xdf7cb81bbef97,0x3ff0000000000000,1 +np.float64,0xafd7be1b5faf8,0x3ff0000000000000,1 +np.float64,0xdb90ca35b721a,0x3ff0000000000000,1 +np.float64,0xa72903a14e521,0x3ff0000000000000,1 +np.float64,0x14533ee028a7,0x3ff0000000000000,1 +np.float64,0x7951540cf2a2b,0x3ff0000000000000,1 +np.float64,0x22882be045106,0x3ff0000000000000,1 +np.float64,0x136270d626c4f,0x3ff0000000000000,1 +np.float64,0x6a0f5744d41ec,0x3ff0000000000000,1 +np.float64,0x21e0d1aa43c1b,0x3ff0000000000000,1 +np.float64,0xee544155dca88,0x3ff0000000000000,1 +np.float64,0xcbe8aac797d16,0x3ff0000000000000,1 +np.float64,0x6c065e80d80e,0x3ff0000000000000,1 +np.float64,0xe57f0411cafe1,0x3ff0000000000000,1 +np.float64,0xdec3a6bdbd875,0x3ff0000000000000,1 +np.float64,0xf4d23a0fe9a48,0x3ff0000000000000,1 +np.float64,0xda77ef47b4efe,0x3ff0000000000000,1 +np.float64,0x8c405c9b1880c,0x3ff0000000000000,1 +np.float64,0x4eced5149d9db,0x3ff0000000000000,1 +np.float64,0x16b6552c2d6cc,0x3ff0000000000000,1 +np.float64,0x6fbc262cdf785,0x3ff0000000000000,1 +np.float64,0x628c3844c5188,0x3ff0000000000000,1 +np.float64,0x6d827d2cdb050,0x3ff0000000000000,1 +np.float64,0xd1bfdf29a37fc,0x3ff0000000000000,1 +np.float64,0xd85400fdb0a80,0x3ff0000000000000,1 +np.float64,0xcc420b2d98842,0x3ff0000000000000,1 +np.float64,0xac41d21b5883b,0x3ff0000000000000,1 +np.float64,0x432f18d4865e4,0x3ff0000000000000,1 +np.float64,0xe7e89a1bcfd14,0x3ff0000000000000,1 +np.float64,0x9b1141d536228,0x3ff0000000000000,1 +np.float64,0x6805f662d00bf,0x3ff0000000000000,1 +np.float64,0xc76552358ecab,0x3ff0000000000000,1 +np.float64,0x4ae8ffee95d21,0x3ff0000000000000,1 +np.float64,0x4396c096872d9,0x3ff0000000000000,1 +np.float64,0x6e8e55d4dd1cb,0x3ff0000000000000,1 +np.float64,0x4c2e33dc985c7,0x3ff0000000000000,1 +np.float64,0xbce814a579d03,0x3ff0000000000000,1 +np.float64,0x911681b5222d0,0x3ff0000000000000,1 +np.float64,0x5f90a4b2bf215,0x3ff0000000000000,1 +np.float64,0x26f76be84deee,0x3ff0000000000000,1 +np.float64,0xb2f7536165eeb,0x3ff0000000000000,1 +np.float64,0x4de4e6089bc9d,0x3ff0000000000000,1 +np.float64,0xf2e016afe5c03,0x3ff0000000000000,1 +np.float64,0xb9b7b949736f7,0x3ff0000000000000,1 +np.float64,0x3363ea1866c7e,0x3ff0000000000000,1 +np.float64,0xd1a3bd6ba3478,0x3ff0000000000000,1 +np.float64,0xae89f3595d13f,0x3ff0000000000000,1 +np.float64,0xddbd9601bb7c,0x3ff0000000000000,1 +np.float64,0x5de41a06bbc84,0x3ff0000000000000,1 +np.float64,0xfd58c86dfab19,0x3ff0000000000000,1 +np.float64,0x24922e8c49247,0x3ff0000000000000,1 +np.float64,0xcda040339b408,0x3ff0000000000000,1 +np.float64,0x5fe500b2bfca1,0x3ff0000000000000,1 +np.float64,0x9214abb924296,0x3ff0000000000000,1 +np.float64,0x800609fe0a2c13fd,0x3ff0000000000000,1 +np.float64,0x800c7c6fe518f8e0,0x3ff0000000000000,1 +np.float64,0x800a1a9491b4352a,0x3ff0000000000000,1 +np.float64,0x800b45e0e8968bc2,0x3ff0000000000000,1 +np.float64,0x8008497e57d092fd,0x3ff0000000000000,1 +np.float64,0x800b9c0af0173816,0x3ff0000000000000,1 +np.float64,0x800194cccb43299a,0x3ff0000000000000,1 +np.float64,0x8001c91ef183923f,0x3ff0000000000000,1 +np.float64,0x800f25b5ccde4b6c,0x3ff0000000000000,1 +np.float64,0x800ce63ccc79cc7a,0x3ff0000000000000,1 +np.float64,0x800d8fb2e83b1f66,0x3ff0000000000000,1 +np.float64,0x80083cd06f7079a1,0x3ff0000000000000,1 +np.float64,0x800823598e9046b3,0x3ff0000000000000,1 +np.float64,0x8001c1319de38264,0x3ff0000000000000,1 +np.float64,0x800f2b68543e56d1,0x3ff0000000000000,1 +np.float64,0x80022a4f4364549f,0x3ff0000000000000,1 +np.float64,0x800f51badf7ea376,0x3ff0000000000000,1 +np.float64,0x8003fbf31e27f7e7,0x3ff0000000000000,1 +np.float64,0x800d4c00e2fa9802,0x3ff0000000000000,1 +np.float64,0x800023b974804774,0x3ff0000000000000,1 +np.float64,0x800860778990c0ef,0x3ff0000000000000,1 +np.float64,0x800a15c241542b85,0x3ff0000000000000,1 +np.float64,0x8003097d9dc612fc,0x3ff0000000000000,1 +np.float64,0x800d77d8541aefb1,0x3ff0000000000000,1 +np.float64,0x80093804ab52700a,0x3ff0000000000000,1 +np.float64,0x800d2b3bfd7a5678,0x3ff0000000000000,1 +np.float64,0x800da24bcd5b4498,0x3ff0000000000000,1 +np.float64,0x8006eee1c28dddc4,0x3ff0000000000000,1 +np.float64,0x80005137fa40a271,0x3ff0000000000000,1 +np.float64,0x8007a3fbc22f47f8,0x3ff0000000000000,1 +np.float64,0x800dcd97071b9b2e,0x3ff0000000000000,1 +np.float64,0x80065b36048cb66d,0x3ff0000000000000,1 +np.float64,0x8004206ba72840d8,0x3ff0000000000000,1 +np.float64,0x8007e82b98cfd058,0x3ff0000000000000,1 +np.float64,0x8001a116ed23422f,0x3ff0000000000000,1 +np.float64,0x800c69e9ff18d3d4,0x3ff0000000000000,1 +np.float64,0x8003843688e7086e,0x3ff0000000000000,1 +np.float64,0x800335e3b8866bc8,0x3ff0000000000000,1 +np.float64,0x800e3308f0bc6612,0x3ff0000000000000,1 +np.float64,0x8002a9ec55c553d9,0x3ff0000000000000,1 +np.float64,0x80001c2084e03842,0x3ff0000000000000,1 +np.float64,0x800bc2bbd8d78578,0x3ff0000000000000,1 +np.float64,0x800ae6bcc555cd7a,0x3ff0000000000000,1 +np.float64,0x80083f7a13907ef5,0x3ff0000000000000,1 +np.float64,0x800d83ed76db07db,0x3ff0000000000000,1 +np.float64,0x800a12251974244b,0x3ff0000000000000,1 +np.float64,0x800a69c95714d393,0x3ff0000000000000,1 +np.float64,0x800cd5a85639ab51,0x3ff0000000000000,1 +np.float64,0x800e0e1837bc1c31,0x3ff0000000000000,1 +np.float64,0x8007b5ca39ef6b95,0x3ff0000000000000,1 +np.float64,0x800cf961cad9f2c4,0x3ff0000000000000,1 +np.float64,0x80066e8fc14cdd20,0x3ff0000000000000,1 +np.float64,0x8001cb8c7b43971a,0x3ff0000000000000,1 +np.float64,0x800002df68a005c0,0x3ff0000000000000,1 +np.float64,0x8003e6681567ccd1,0x3ff0000000000000,1 +np.float64,0x800b039126b60723,0x3ff0000000000000,1 +np.float64,0x800d2e1b663a5c37,0x3ff0000000000000,1 +np.float64,0x800188b3e2a31169,0x3ff0000000000000,1 +np.float64,0x8001f272e943e4e7,0x3ff0000000000000,1 +np.float64,0x800d7f53607afea7,0x3ff0000000000000,1 +np.float64,0x80092cafa4f25960,0x3ff0000000000000,1 +np.float64,0x800fc009f07f8014,0x3ff0000000000000,1 +np.float64,0x8003da896507b514,0x3ff0000000000000,1 +np.float64,0x800d4d1b4c3a9a37,0x3ff0000000000000,1 +np.float64,0x8007a835894f506c,0x3ff0000000000000,1 +np.float64,0x80057ba0522af741,0x3ff0000000000000,1 +np.float64,0x8009b7054b336e0b,0x3ff0000000000000,1 +np.float64,0x800b2c6c125658d9,0x3ff0000000000000,1 +np.float64,0x8008b1840ad16308,0x3ff0000000000000,1 +np.float64,0x8007ea0e3befd41d,0x3ff0000000000000,1 +np.float64,0x800dd658683bacb1,0x3ff0000000000000,1 +np.float64,0x8008cda48fd19b49,0x3ff0000000000000,1 +np.float64,0x8003acca14c75995,0x3ff0000000000000,1 +np.float64,0x8008bd152d717a2b,0x3ff0000000000000,1 +np.float64,0x80010d1ea3621a3e,0x3ff0000000000000,1 +np.float64,0x800130b78b826170,0x3ff0000000000000,1 +np.float64,0x8002cf3a46e59e75,0x3ff0000000000000,1 +np.float64,0x800b76e7fa76edd0,0x3ff0000000000000,1 +np.float64,0x800e065fe1dc0cc0,0x3ff0000000000000,1 +np.float64,0x8000dd527ea1baa6,0x3ff0000000000000,1 +np.float64,0x80032cb234665965,0x3ff0000000000000,1 +np.float64,0x800affc1acb5ff84,0x3ff0000000000000,1 +np.float64,0x80074be23fee97c5,0x3ff0000000000000,1 +np.float64,0x8004f83eafc9f07e,0x3ff0000000000000,1 +np.float64,0x800b02a115560543,0x3ff0000000000000,1 +np.float64,0x800b324a55766495,0x3ff0000000000000,1 +np.float64,0x800ffbcfd69ff7a0,0x3ff0000000000000,1 +np.float64,0x800830bc7b906179,0x3ff0000000000000,1 +np.float64,0x800cbafe383975fd,0x3ff0000000000000,1 +np.float64,0x8001ee42bfe3dc86,0x3ff0000000000000,1 +np.float64,0x8005b00fdc0b6020,0x3ff0000000000000,1 +np.float64,0x8005e7addd0bcf5c,0x3ff0000000000000,1 +np.float64,0x8001ae4cb0635c9a,0x3ff0000000000000,1 +np.float64,0x80098a9941131533,0x3ff0000000000000,1 +np.float64,0x800334c929466993,0x3ff0000000000000,1 +np.float64,0x8009568239d2ad05,0x3ff0000000000000,1 +np.float64,0x800f0639935e0c73,0x3ff0000000000000,1 +np.float64,0x800cebce7499d79d,0x3ff0000000000000,1 +np.float64,0x800482ee4c2905dd,0x3ff0000000000000,1 +np.float64,0x8007b7bd9e2f6f7c,0x3ff0000000000000,1 +np.float64,0x3fe654469f2ca88d,0x3fe8853f6c01ffb3,1 +np.float64,0x3feb4d7297369ae5,0x3fe50ad5bb621408,1 +np.float64,0x3feef53ba43dea77,0x3fe2283f356f8658,1 +np.float64,0x3fddf564eabbeaca,0x3fec8ec0e0dead9c,1 +np.float64,0x3fd3a69078274d21,0x3fee80e05c320000,1 +np.float64,0x3fecdafe5d39b5fd,0x3fe3d91a5d440fd9,1 +np.float64,0x3fd93286bc32650d,0x3fed8d40696cd10e,1 +np.float64,0x3fc0d34eb821a69d,0x3fefb954023d4284,1 +np.float64,0x3fc7b4b9a02f6973,0x3fef73e8739787ce,1 +np.float64,0x3fe08c839a611907,0x3febd0bc6f5641cd,1 +np.float64,0x3fb3d1758627a2eb,0x3fefe776f6183f96,1 +np.float64,0x3fef93c9ff3f2794,0x3fe1a4d2f622627d,1 +np.float64,0x3fea8d0041351a01,0x3fe59a52a1c78c9e,1 +np.float64,0x3fe3e26a30e7c4d4,0x3fea04ad3e0bbf8d,1 +np.float64,0x3fe5a34c9f6b4699,0x3fe8f57c5ccd1eab,1 +np.float64,0x3fc21ef859243df1,0x3fefae0b68a3a2e7,1 +np.float64,0x3fed7dd585fafbab,0x3fe35860041e5b0d,1 +np.float64,0x3fe5abacf22b575a,0x3fe8f03d8b6ef0f2,1 +np.float64,0x3fe426451f284c8a,0x3fe9dcf21f13205b,1 +np.float64,0x3fc01f6456203ec9,0x3fefbf19e2a8e522,1 +np.float64,0x3fe1cf2772239e4f,0x3feb2bbd645c7697,1 +np.float64,0x3fd18c4ace231896,0x3feecdfdd086c110,1 +np.float64,0x3fe8387d5b7070fb,0x3fe74358f2ec4910,1 +np.float64,0x3fdce51c2239ca38,0x3feccb2ae5459632,1 +np.float64,0x3fe5b0f2e4eb61e6,0x3fe8ecef4dbe4277,1 +np.float64,0x3fe1ceeb08a39dd6,0x3feb2bdd4dcfb3df,1 +np.float64,0x3febc5899d778b13,0x3fe4afc8dd8ad228,1 +np.float64,0x3fe7a47fbe2f48ff,0x3fe7a7fd9b352ea5,1 +np.float64,0x3fe7f74e1fafee9c,0x3fe76feb2755b247,1 +np.float64,0x3fe2bfad04e57f5a,0x3feaa9b46adddaeb,1 +np.float64,0x3fd06a090320d412,0x3feef40c334f8fba,1 +np.float64,0x3fdc97297d392e53,0x3fecdc16a3e22fcb,1 +np.float64,0x3fdc1a3f3838347e,0x3fecf6db2769d404,1 +np.float64,0x3fcca90096395201,0x3fef338156fcd218,1 +np.float64,0x3fed464733fa8c8e,0x3fe38483f0465d91,1 +np.float64,0x3fe7e067d82fc0d0,0x3fe77f7c8c9de896,1 +np.float64,0x3fc014fa0b2029f4,0x3fefbf6d84c933f8,1 +np.float64,0x3fd3bf1524277e2a,0x3fee7d2997b74dec,1 +np.float64,0x3fec153b86782a77,0x3fe472bb5497bb2a,1 +np.float64,0x3fd3e4d9d5a7c9b4,0x3fee776842691902,1 +np.float64,0x3fea6c0e2c74d81c,0x3fe5b2954cb458d9,1 +np.float64,0x3fee8f6a373d1ed4,0x3fe27bb9e348125b,1 +np.float64,0x3fd30c6dd42618dc,0x3fee97d2cab2b0bc,1 +np.float64,0x3fe4f90e6d69f21d,0x3fe95ea3dd4007f2,1 +np.float64,0x3fe271d467e4e3a9,0x3fead470d6d4008b,1 +np.float64,0x3fef2983897e5307,0x3fe1fd1a4debe33b,1 +np.float64,0x3fe980cc83b30199,0x3fe65d2fb8a0eb46,1 +np.float64,0x3fdfdf53db3fbea8,0x3fec1cf95b2a1cc7,1 +np.float64,0x3fe4d5307ba9aa61,0x3fe974701b4156cb,1 +np.float64,0x3fdb4e2345b69c47,0x3fed21aa6c146512,1 +np.float64,0x3fe3f7830327ef06,0x3fe9f85f6c88c2a8,1 +np.float64,0x3fca915fb63522bf,0x3fef502b73a52ecf,1 +np.float64,0x3fe66d3709ecda6e,0x3fe87531d7372d7a,1 +np.float64,0x3fd86000bcb0c001,0x3fedb5018dd684ca,1 +np.float64,0x3fe516e5feea2dcc,0x3fe94c68b111404e,1 +np.float64,0x3fd83c53dd3078a8,0x3fedbb9e5dd9e165,1 +np.float64,0x3fedfeeb673bfdd7,0x3fe2f0f0253c5d5d,1 +np.float64,0x3fe0dc6f9c21b8df,0x3feba8e2452410c2,1 +np.float64,0x3fbe154d643c2a9b,0x3fefc780a9357457,1 +np.float64,0x3fe5f63986abec73,0x3fe8c1434951a40a,1 +np.float64,0x3fbce0e50839c1ca,0x3fefcbeeaa27de75,1 +np.float64,0x3fd7ef5c5c2fdeb9,0x3fedc9c3022495b3,1 +np.float64,0x3fc1073914220e72,0x3fefb79de80fc0fd,1 +np.float64,0x3fe1a93c3d235278,0x3feb3fb21f86ac67,1 +np.float64,0x3fe321ee53e643dd,0x3fea72e2999f1e22,1 +np.float64,0x3fa881578c3102af,0x3feff69e6e51e0d6,1 +np.float64,0x3fd313482a262690,0x3fee96d161199495,1 +np.float64,0x3fe7272cd6ae4e5a,0x3fe7fbacbd0d8f43,1 +np.float64,0x3fd6cf4015ad9e80,0x3fedfd3513d544b8,1 +np.float64,0x3fc67b7e6d2cf6fd,0x3fef81f5c16923a4,1 +np.float64,0x3fa1999c14233338,0x3feffb2913a14184,1 +np.float64,0x3fc74eb8dd2e9d72,0x3fef78909a138e3c,1 +np.float64,0x3fc0b9274921724f,0x3fefba2ebd5f3e1c,1 +np.float64,0x3fd53fa156aa7f43,0x3fee40a18e952e88,1 +np.float64,0x3feaccbca4b59979,0x3fe56b22b33eb713,1 +np.float64,0x3fe6a01e3a2d403c,0x3fe8543fbd820ecc,1 +np.float64,0x3fd392a869a72551,0x3fee83e0ffe0e8de,1 +np.float64,0x3fe44d8928689b12,0x3fe9c5bf3c8fffdb,1 +np.float64,0x3fca3f209f347e41,0x3fef5461b6fa0924,1 +np.float64,0x3fee9e84b07d3d09,0x3fe26f638f733549,1 +np.float64,0x3faf49acb03e9359,0x3feff0b583cd8c48,1 +np.float64,0x3fea874b2af50e96,0x3fe59e882fa6febf,1 +np.float64,0x3fc50b72772a16e5,0x3fef918777dc41be,1 +np.float64,0x3fe861d1d4f0c3a4,0x3fe726e44d9d42c2,1 +np.float64,0x3fcadd2e2535ba5c,0x3fef4c3e2b56da38,1 +np.float64,0x3fea59c29cb4b385,0x3fe5c0043e586439,1 +np.float64,0x3fc1ffef0d23ffde,0x3fefaf22be452d13,1 +np.float64,0x3fc2d8dbc125b1b8,0x3fefa75b646d8e4e,1 +np.float64,0x3fd66c6471acd8c9,0x3fee0e5038b895c0,1 +np.float64,0x3fd0854adfa10a96,0x3feef0945bcc5c99,1 +np.float64,0x3feaac7076f558e1,0x3fe58316c23a82ad,1 +np.float64,0x3fdda49db3bb493b,0x3feca0e347c0ad6f,1 +np.float64,0x3fe43a539de874a7,0x3fe9d11d722d4822,1 +np.float64,0x3feeee3ebbfddc7d,0x3fe22dffd251e9af,1 +np.float64,0x3f8ee2c5b03dc58b,0x3fefff11855a7b6c,1 +np.float64,0x3fcd7107c63ae210,0x3fef2840bb55ca52,1 +np.float64,0x3f8d950d203b2a1a,0x3fefff253a08e40e,1 +np.float64,0x3fd40a5e57a814bd,0x3fee71a633c761fc,1 +np.float64,0x3fee836ec83d06de,0x3fe28580975be2fd,1 +np.float64,0x3fd7bbe87f2f77d1,0x3fedd31f661890cc,1 +np.float64,0xbfe05bf138a0b7e2,0x3febe8a000d96e47,1 +np.float64,0xbf88bddd90317bc0,0x3fefff66f6e2ff26,1 +np.float64,0xbfdc9cbb12393976,0x3fecdae2982335db,1 +np.float64,0xbfd85b4eccb0b69e,0x3fedb5e0dd87f702,1 +np.float64,0xbfe5c326cb2b864e,0x3fe8e180f525fa12,1 +np.float64,0xbfe381a0e4a70342,0x3fea3c8e5e3ab78e,1 +np.float64,0xbfe58d892c2b1b12,0x3fe9031551617aed,1 +np.float64,0xbfd7f3a52cafe74a,0x3fedc8fa97edd080,1 +np.float64,0xbfef3417bc7e682f,0x3fe1f45989f6a009,1 +np.float64,0xbfddfb8208bbf704,0x3fec8d5fa9970773,1 +np.float64,0xbfdab69bcc356d38,0x3fed40b2f6c347c6,1 +np.float64,0xbfed3f7cf17a7efa,0x3fe389e4ff4d9235,1 +np.float64,0xbfe47675d9a8ecec,0x3fe9ad6829a69e94,1 +np.float64,0xbfd030e2902061c6,0x3feefb3f811e024f,1 +np.float64,0xbfc376ac7226ed58,0x3fefa1798712b37e,1 +np.float64,0xbfdb7e54a0b6fcaa,0x3fed17a974c4bc28,1 +np.float64,0xbfdb7d5d5736faba,0x3fed17dcf31a8d84,1 +np.float64,0xbf876bd6502ed7c0,0x3fefff76dce6232c,1 +np.float64,0xbfd211e6c02423ce,0x3feebba41f0a1764,1 +np.float64,0xbfb443e3962887c8,0x3fefe658953629d4,1 +np.float64,0xbfe81b09e9b03614,0x3fe757882e4fdbae,1 +np.float64,0xbfdcb905d2b9720c,0x3fecd4c22cfe84e5,1 +np.float64,0xbfe3b62d99276c5b,0x3fea1e5520b3098d,1 +np.float64,0xbfbf05b25c3e0b68,0x3fefc3ecc04bca8e,1 +np.float64,0xbfdedc885b3db910,0x3fec59e22feb49f3,1 +np.float64,0xbfe33aa282667545,0x3fea64f2d55ec471,1 +np.float64,0xbfec84745a3908e9,0x3fe41cb3214e7044,1 +np.float64,0xbfddefdff1bbdfc0,0x3fec8fff88d4d0ec,1 +np.float64,0xbfd26ae6aca4d5ce,0x3feeaf208c7fedf6,1 +np.float64,0xbfee010591fc020b,0x3fe2ef3e57211a5e,1 +np.float64,0xbfb8cfddca319fb8,0x3fefd98d8f7918ed,1 +np.float64,0xbfe991648f3322c9,0x3fe6514e54670bae,1 +np.float64,0xbfee63fd087cc7fa,0x3fe29f1bfa3297cc,1 +np.float64,0xbfe1685942a2d0b2,0x3feb617f5f839eee,1 +np.float64,0xbfc6fc2fd62df860,0x3fef7c4698fd58cf,1 +np.float64,0xbfe42723d3a84e48,0x3fe9dc6ef7243e90,1 +np.float64,0xbfc3a7e89d274fd0,0x3fef9f99e3314e77,1 +np.float64,0xbfeb4c9521f6992a,0x3fe50b7c919bc6d8,1 +np.float64,0xbf707b34e020f680,0x3fefffef05e30264,1 +np.float64,0xbfc078478e20f090,0x3fefbc479305d5aa,1 +np.float64,0xbfd494ac4ca92958,0x3fee5c11f1cd8269,1 +np.float64,0xbfdaf888a035f112,0x3fed3346ae600469,1 +np.float64,0xbfa5d8ed502bb1e0,0x3feff88b0f262609,1 +np.float64,0xbfeec0cbfffd8198,0x3fe253543b2371cb,1 +np.float64,0xbfe594b5986b296b,0x3fe8fe9b39fb3940,1 +np.float64,0xbfc8ece7c631d9d0,0x3fef652bd0611ac7,1 +np.float64,0xbfd8ffeca0b1ffda,0x3fed96ebdf9b65cb,1 +np.float64,0xbfba9b221e353648,0x3fefd3cc21e2f15c,1 +np.float64,0xbfca63a52c34c74c,0x3fef52848eb9ed3b,1 +np.float64,0xbfe588e9b06b11d4,0x3fe905f7403e8881,1 +np.float64,0xbfc76f82db2edf04,0x3fef77138fe9bbc2,1 +np.float64,0xbfeeb3f334bd67e6,0x3fe25ddadb1096d6,1 +np.float64,0xbfbf2b64ce3e56c8,0x3fefc35a9555f6df,1 +np.float64,0xbfe9920e4ff3241c,0x3fe650d4ab8f5c42,1 +np.float64,0xbfb4a54c02294a98,0x3fefe55fc85ae5e9,1 +np.float64,0xbfe353b0c766a762,0x3fea56c02d17e4b7,1 +np.float64,0xbfd99961a4b332c4,0x3fed795fcd00dbf9,1 +np.float64,0xbfef191ddabe323c,0x3fe20aa79524f636,1 +np.float64,0xbfb25d060224ba10,0x3fefeaeee5cc8c0b,1 +np.float64,0xbfe6022428ec0448,0x3fe8b9b46e776194,1 +np.float64,0xbfed1a236cba3447,0x3fe3a76bee0d9861,1 +np.float64,0xbfc59671e72b2ce4,0x3fef8bc4daef6f14,1 +np.float64,0xbfdf2711703e4e22,0x3fec4886a8c9ceb5,1 +np.float64,0xbfeb7e207536fc41,0x3fe4e610c783f168,1 +np.float64,0xbfe6cdf5bcad9bec,0x3fe8365f8a59bc81,1 +np.float64,0xbfe55294adaaa52a,0x3fe927b0af5ccd09,1 +np.float64,0xbfdf4a88913e9512,0x3fec4036df58ba74,1 +np.float64,0xbfebb7efe4376fe0,0x3fe4ba276006992d,1 +np.float64,0xbfe09f29cfa13e54,0x3febc77f4f9c95e7,1 +np.float64,0xbfdf8c75653f18ea,0x3fec30ac924e4f46,1 +np.float64,0xbfefd601c7ffac04,0x3fe16d6f21bcb9c1,1 +np.float64,0xbfeae97ff5f5d300,0x3fe555bb5b87efe9,1 +np.float64,0xbfed427f02fa84fe,0x3fe387830db093bc,1 +np.float64,0xbfa33909cc267210,0x3feffa3a1bcb50dd,1 +np.float64,0xbfe9aa4bf5f35498,0x3fe63f6e98f6aa0f,1 +np.float64,0xbfe2d7349b25ae69,0x3fea9caa7c331e7e,1 +np.float64,0xbfcdbb2a3a3b7654,0x3fef2401c9659e4b,1 +np.float64,0xbfc8a90919315214,0x3fef686fe7fc0513,1 +np.float64,0xbfe62a98df2c5532,0x3fe89ff22a02cc6b,1 +np.float64,0xbfdc0f67b3b81ed0,0x3fecf928b637798f,1 +np.float64,0xbfebb32bf6f76658,0x3fe4bdc893c09698,1 +np.float64,0xbfec067996380cf3,0x3fe47e132741db97,1 +np.float64,0xbfd9774e1d32ee9c,0x3fed7ffe1e87c434,1 +np.float64,0xbfef989890bf3131,0x3fe1a0d025c80cf4,1 +np.float64,0xbfe59887e62b3110,0x3fe8fc382a3d4197,1 +np.float64,0xbfdea0a11e3d4142,0x3fec67b987e236ec,1 +np.float64,0xbfe2ec495825d892,0x3fea90efb231602d,1 +np.float64,0xbfb329c5c2265388,0x3fefe90f1b8209c3,1 +np.float64,0xbfdcd2dcd339a5ba,0x3feccf24c60b1478,1 +np.float64,0xbfe537ea18aa6fd4,0x3fe938237e217fe0,1 +np.float64,0xbfe8675ce170ceba,0x3fe723105925ce3a,1 +np.float64,0xbfd70723acae0e48,0x3fedf369ac070e65,1 +np.float64,0xbfea9d8692b53b0d,0x3fe58e1ee42e3fdb,1 +np.float64,0xbfcfeb96653fd72c,0x3fef029770033bdc,1 +np.float64,0xbfcc06c92d380d94,0x3fef3c69797d9b0a,1 +np.float64,0xbfe16b7c4f62d6f8,0x3feb5fdf9f0a9a07,1 +np.float64,0xbfed4d7a473a9af4,0x3fe37ecee27b1eb7,1 +np.float64,0xbfe6a6f6942d4ded,0x3fe84fccdf762b19,1 +np.float64,0xbfda46d867348db0,0x3fed572d928fa657,1 +np.float64,0xbfdbd9482db7b290,0x3fed049b5f907b52,1 +np.float64,0x7fe992ceb933259c,0xbfeb15af92aad70e,1 +np.float64,0x7fe3069204a60d23,0xbfe5eeff454240e9,1 +np.float64,0x7fe729dbf32e53b7,0xbfefe0528a330e4c,1 +np.float64,0x7fec504fb638a09e,0x3fd288e95dbedf65,1 +np.float64,0x7fe1d30167a3a602,0xbfeffc41f946fd02,1 +np.float64,0x7fed7f8ffd3aff1f,0x3fefe68ec604a19d,1 +np.float64,0x7fd2f23635a5e46b,0x3fea63032efbb447,1 +np.float64,0x7fd4c86db1a990da,0x3fdf6b9f7888db5d,1 +np.float64,0x7fe7554db6eeaa9a,0x3fe1b41476861bb0,1 +np.float64,0x7fe34e823ba69d03,0x3fefc435532e6294,1 +np.float64,0x7fec5c82fef8b905,0x3fef8f0c6473034f,1 +np.float64,0x7feba221bff74442,0xbfea95b81eb19b47,1 +np.float64,0x7fe74808a5ae9010,0xbfd3aa322917c3e5,1 +np.float64,0x7fdf41b7e0be836f,0x3fd14283c7147282,1 +np.float64,0x7fec09892f381311,0x3fe5240376ae484b,1 +np.float64,0x7faaf80bf435f017,0x3fe20227fa811423,1 +np.float64,0x7f8422d8402845b0,0x3fe911714593b8a0,1 +np.float64,0x7fd23a7fada474fe,0x3feff9f40aa37e9c,1 +np.float64,0x7fef4a4806fe948f,0x3fec6eca89cb4a62,1 +np.float64,0x7fe1e71cf763ce39,0xbfea6ac63f9ba457,1 +np.float64,0x7fe3e555be27caaa,0xbfe75b305d0dbbfd,1 +np.float64,0x7fcb8bac96371758,0xbfe8b126077f9d4c,1 +np.float64,0x7fc98e2c84331c58,0x3fef9092eb0bc85a,1 +np.float64,0x7fe947cf2b728f9d,0xbfebfff2c5b7d198,1 +np.float64,0x7feee8058c3dd00a,0xbfef21ebaae2eb17,1 +np.float64,0x7fef61d8d5bec3b1,0xbfdf1a032fb1c864,1 +np.float64,0x7fcf714b6f3ee296,0x3fe6fc89a8084098,1 +np.float64,0x7fa9a8b44c335168,0xbfeb16c149cea943,1 +np.float64,0x7fd175c482a2eb88,0xbfef64d341e73f88,1 +np.float64,0x7feab8e6a87571cc,0x3feb10069c397464,1 +np.float64,0x7fe3ade72de75bcd,0x3fd1753e333d5790,1 +np.float64,0x7fb26d87d224db0f,0xbfe753d36b18f4ca,1 +np.float64,0x7fdb7ef159b6fde2,0x3fe5c0a6044d3607,1 +np.float64,0x7fd5af86422b5f0c,0x3fe77193c95f6484,1 +np.float64,0x7fee9e00b07d3c00,0x3fe864d494596845,1 +np.float64,0x7fef927a147f24f3,0xbfe673b14715693d,1 +np.float64,0x7fd0aea63c215d4b,0xbfeff435f119fce9,1 +np.float64,0x7fd02e3796a05c6e,0x3fe4f7e3706e9a3d,1 +np.float64,0x7fd3ed61da27dac3,0xbfefef2f057f168c,1 +np.float64,0x7fefaca0d4ff5941,0x3fd3e8ad205cd4ab,1 +np.float64,0x7feb659e06f6cb3b,0x3fd64d803203e027,1 +np.float64,0x7fc94ccfaf32999e,0x3fee04922209369a,1 +np.float64,0x7feb4ec294f69d84,0xbfd102763a056c89,1 +np.float64,0x7fe2ada6ac655b4c,0x3fef4f6792aa6093,1 +np.float64,0x7fe5f40fdc2be81f,0xbfb4a6327186eee8,1 +np.float64,0x7fe7584bc3eeb097,0xbfd685b8ff94651d,1 +np.float64,0x7fe45d276be8ba4e,0x3fee53b13f7e442f,1 +np.float64,0x7fe6449b3d6c8935,0xbfe7e08bafa75251,1 +np.float64,0x7f8d62e6b03ac5cc,0x3fe73d30762f38fd,1 +np.float64,0x7fe3a76f72a74ede,0xbfeb48a28bc60968,1 +np.float64,0x7fd057706920aee0,0x3fdece8fa06f626c,1 +np.float64,0x7fe45ae158e8b5c2,0x3fe7a70f47b4d349,1 +np.float64,0x7fea8a5a983514b4,0x3fefb053d5f9ddd7,1 +np.float64,0x7fdd1e86ab3a3d0c,0x3fe3cded1b93816b,1 +np.float64,0x7fdb456108b68ac1,0xbfe37574c0b9bf8f,1 +np.float64,0x7fe972602432e4bf,0x3fef9a26e65ec01c,1 +np.float64,0x7fdbe2385637c470,0x3fed541df57969e1,1 +np.float64,0x7fe57f03602afe06,0x3fbd90f595cbbd94,1 +np.float64,0x7feb0ceb68f619d6,0xbfeae9cb8ee5261f,1 +np.float64,0x7fe6abfe6c6d57fc,0xbfef40a6edaca26f,1 +np.float64,0x7fe037ea08606fd3,0xbfda817d75858597,1 +np.float64,0x7fdd75a52dbaeb49,0x3feef2a0d91d6aa1,1 +np.float64,0x7fe8f9af66b1f35e,0xbfedfceef2a3bfc9,1 +np.float64,0x7fedf762b53beec4,0x3fd8b4f21ef69ee3,1 +np.float64,0x7fe99295b7f3252a,0x3feffc24d970383e,1 +np.float64,0x7fe797b0172f2f5f,0x3fee089aa56f7ce8,1 +np.float64,0x7fed89dcc97b13b9,0xbfcfa2bb0c3ea41f,1 +np.float64,0x7fae9e8d5c3d3d1a,0xbfe512ffe16c6b08,1 +np.float64,0x7fefaecbe27f5d97,0x3fbfc718a5e972f1,1 +np.float64,0x7fce0236d93c046d,0xbfa9b7cd790db256,1 +np.float64,0x7fa9689aac32d134,0x3feced501946628a,1 +np.float64,0x7feb1469e93628d3,0x3fef2a988e7673ed,1 +np.float64,0x7fdba78344b74f06,0xbfe092e78965b30c,1 +np.float64,0x7fece54c3fb9ca97,0x3fd3cfd184bed2e6,1 +np.float64,0x7fdb84212b370841,0xbfe25ebf2db6ee55,1 +np.float64,0x7fbe3e8bf23c7d17,0x3fe2ee72df573345,1 +np.float64,0x7fe43d9803687b2f,0xbfed2eff6a9e66a0,1 +np.float64,0x7fb0f9c00a21f37f,0x3feff70f3276fdb7,1 +np.float64,0x7fea0c6cbbb418d8,0xbfefa612494798b2,1 +np.float64,0x7fe4b3239e296646,0xbfe74dd959af8cdc,1 +np.float64,0x7fe5c6a773eb8d4e,0xbfd06944048f8d2b,1 +np.float64,0x7fb1c1278223824e,0xbfeb533a34655bde,1 +np.float64,0x7fd21c09ee243813,0xbfe921ccbc9255c3,1 +np.float64,0x7fe051020c20a203,0x3fbd519d700c1f2f,1 +np.float64,0x7fe0c76845e18ed0,0x3fefb9595191a31b,1 +np.float64,0x7fe6b0b57b6d616a,0xbf8c59a8ba5fcd9a,1 +np.float64,0x7fd386c460270d88,0x3fe8ffea5d1a5c46,1 +np.float64,0x7feeb884713d7108,0x3fee9b2247ef6c0d,1 +np.float64,0x7fd85f71b6b0bee2,0xbfefc30ec3e28f07,1 +np.float64,0x7fc341366426826c,0x3fd4234d35386d3b,1 +np.float64,0x7fe56482dd6ac905,0x3fe7189de6a50668,1 +np.float64,0x7fec67a2e3f8cf45,0xbfef86d0b940f37f,1 +np.float64,0x7fe38b202fe7163f,0x3feb90b75caa2030,1 +np.float64,0x7fdcbc64883978c8,0x3fed4f758fbf64d4,1 +np.float64,0x7fea5f0598f4be0a,0x3fdd503a417b3d4d,1 +np.float64,0x7fda3b6bcf3476d7,0x3fea6e9af3f7f9f5,1 +np.float64,0x7fc7d7896c2faf12,0x3fda2bebc36a2363,1 +np.float64,0x7fe7e8e2626fd1c4,0xbfe7d5e390c4cc3f,1 +np.float64,0x7fde0f3d7abc1e7a,0xbfede7a0ecfa3606,1 +np.float64,0x7fc692b8f52d2571,0x3feff0cd7ab6f61b,1 +np.float64,0xff92d1fce825a400,0xbfc921c36fc014fa,1 +np.float64,0xffdec3af2fbd875e,0xbfed6a77e6a0364e,1 +np.float64,0xffef46e7d9be8dcf,0xbfed7d39476f7e27,1 +np.float64,0xffe2c2ce4525859c,0x3fe1757261316bc9,1 +np.float64,0xffe27c8b5864f916,0xbfefe017c0d43457,1 +np.float64,0xffe184d7442309ae,0x3fa1fb8c49dba596,1 +np.float64,0xffddf5f98d3bebf4,0x3fee4f8eaa5f847e,1 +np.float64,0xffee3ef354fc7de6,0xbfebfd60fa51b2ba,1 +np.float64,0xffdecb3e85bd967e,0x3fbfad2667a8b468,1 +np.float64,0xffe4ee900b29dd20,0xbfdc02dc626f91cd,1 +np.float64,0xffd3179f6da62f3e,0xbfe2cfe442511776,1 +np.float64,0xffe99ef7cef33def,0x3f50994542a7f303,1 +np.float64,0xffe2b66b1ae56cd6,0xbfefe3e066eb6329,1 +np.float64,0xff8f72aff03ee540,0x3fe9c46224cf5003,1 +np.float64,0xffd29beb85a537d8,0x3fefcb0b6166be71,1 +np.float64,0xffaef02d4c3de060,0xbfef5fb71028fc72,1 +np.float64,0xffd39a2a89273456,0x3fe6d4b183205dca,1 +np.float64,0xffef8a9392ff1526,0x3fedb99fbf402468,1 +np.float64,0xffb9b3f31e3367e8,0x3fee1005270fcf80,1 +np.float64,0xffed9d5c693b3ab8,0x3fd110f4b02365d5,1 +np.float64,0xffeaba45f9f5748b,0x3fe499e0a6f4afb2,1 +np.float64,0xffdba3f70d3747ee,0xbfca0c30493ae519,1 +np.float64,0xffa35b985426b730,0xbfdb625df56bcf45,1 +np.float64,0xffccbc9728397930,0x3fc53cbc59020704,1 +np.float64,0xffef73c942bee792,0xbfdc647a7a5e08be,1 +np.float64,0xffcb5acfb236b5a0,0x3feeb4ec038c39fc,1 +np.float64,0xffea116fe2b422df,0x3fefe03b6ae0b435,1 +np.float64,0xffe97de6e7b2fbcd,0xbfd2025698fab9eb,1 +np.float64,0xffdddba314bbb746,0x3fd31f0fdb8f93be,1 +np.float64,0xffd613a24a2c2744,0xbfebbb1efae884b3,1 +np.float64,0xffe3d938aa67b271,0xbfc2099cead3d3be,1 +np.float64,0xffdf08c2e33e1186,0xbfefd236839b900d,1 +np.float64,0xffea6ba8bd34d751,0x3fe8dfc032114719,1 +np.float64,0xffe3202083e64040,0x3fed513b81432a22,1 +np.float64,0xffb2397db62472f8,0xbfee7d7fe1c3f76c,1 +np.float64,0xffd9d0682ab3a0d0,0x3fe0bcf9e531ad79,1 +np.float64,0xffc293df202527c0,0xbfe58d0bdece5e64,1 +np.float64,0xffe1422c7da28458,0xbf81bd72595f2341,1 +np.float64,0xffd64e4ed4ac9c9e,0x3fa4334cc011c703,1 +np.float64,0xffe40a970ae8152e,0x3fead3d258b55b7d,1 +np.float64,0xffc8c2f2223185e4,0xbfef685f07c8b9fd,1 +np.float64,0xffe4b2f7216965ee,0x3fe3861d3d896a83,1 +np.float64,0xffdb531db3b6a63c,0x3fe18cb8332dd59d,1 +np.float64,0xffe8e727a3b1ce4e,0xbfe57b15abb677b9,1 +np.float64,0xffe530c1e12a6184,0xbfb973ea5535e48f,1 +np.float64,0xffe6f7849cedef08,0x3fd39a37ec5af4b6,1 +np.float64,0xffead62a78b5ac54,0x3fe69b3f6c7aa24b,1 +np.float64,0xffeefdd725fdfbad,0xbfc08a456111fdd5,1 +np.float64,0xffe682182fed0430,0x3fecc7c1292761d2,1 +np.float64,0xffee0ca8dcbc1951,0x3fef6cc361ef2c19,1 +np.float64,0xffec9b338f393666,0x3fefa9ab8e0471b5,1 +np.float64,0xffe13c5e29a278bc,0xbfef8da74ad83398,1 +np.float64,0xffd7bd48c62f7a92,0x3fe3468cd4ac9d34,1 +np.float64,0xffedd0ed14bba1d9,0xbfd563a83477077b,1 +np.float64,0xffe86b83f3f0d707,0x3fe9eb3c658e4b2d,1 +np.float64,0xffd6a4db4bad49b6,0xbfc7e11276166e17,1 +np.float64,0xffc29e8404253d08,0x3fd35971961c789f,1 +np.float64,0xffe27cf3d664f9e7,0xbfeca0f73c72f810,1 +np.float64,0xffc34152352682a4,0x3fef384e564c002c,1 +np.float64,0xffe395728ba72ae4,0x3f8fe18c2de86eba,1 +np.float64,0xffed86c4fbbb0d89,0x3fef709db881c672,1 +np.float64,0xffe8a98d37f1531a,0x3fd4879c8f73c3dc,1 +np.float64,0xffb8ce9fea319d40,0xbfb853c8fe46b08d,1 +np.float64,0xffe7f26db8efe4db,0xbfec1cfd3e5c2ac1,1 +np.float64,0xffd7935b77af26b6,0x3fb7368c89b2a460,1 +np.float64,0xffc5840ed02b081c,0x3fd92220b56631f3,1 +np.float64,0xffc36a873926d510,0x3fa84d61baf61811,1 +np.float64,0xffe06ea583e0dd4a,0x3feb647e348b9e39,1 +np.float64,0xffe6a33031ed4660,0xbfe096b851dc1a0a,1 +np.float64,0xffe001c938e00392,0x3fe4eece77623e7a,1 +np.float64,0xffc1e4f23b23c9e4,0xbfdb9bb1f83f6ac4,1 +np.float64,0xffecd3ecbab9a7d9,0x3fbafb1f800f177d,1 +np.float64,0xffc2d3016825a604,0xbfef650e8b0d6afb,1 +np.float64,0xffe222cb68e44596,0x3fde3690e44de5bd,1 +np.float64,0xffe5bb145e2b7628,0x3fedbb98e23c9dc1,1 +np.float64,0xffe9e5823b73cb04,0xbfee41661016c03c,1 +np.float64,0xffd234a00ba46940,0x3fda0312cda580c2,1 +np.float64,0xffe0913ed6e1227d,0xbfed508bb529bd23,1 +np.float64,0xffe8e3596171c6b2,0xbfdc33e1c1d0310e,1 +np.float64,0xffef9c6835ff38cf,0x3fea8ce6d27dfba3,1 +np.float64,0xffdd3bcf66ba779e,0x3fe50523d2b6470e,1 +np.float64,0xffe57e8cf06afd1a,0xbfee600933347247,1 +np.float64,0xffe0d8c65fa1b18c,0x3fe75091f93d5e4c,1 +np.float64,0xffea7c8c16b4f918,0x3fee681724795198,1 +np.float64,0xffe34f7a05269ef4,0xbfe3c3e179676f13,1 +np.float64,0xffd28894a6a5112a,0xbfe5d1027aee615d,1 +np.float64,0xffc73be6f22e77cc,0x3fe469bbc08b472a,1 +np.float64,0xffe7f71b066fee36,0x3fe7ed136c8fdfaa,1 +np.float64,0xffebc13e29f7827c,0x3fefcdc6e677d314,1 +np.float64,0xffd53e9c942a7d3a,0x3fea5a02c7341749,1 +np.float64,0xffd7191b23ae3236,0x3fea419b66023443,1 +np.float64,0xffe9480325b29006,0xbfefeaff5fa38cd5,1 +np.float64,0xffba46dc0e348db8,0xbfefa54f4de28eba,1 +np.float64,0xffdd4cc31eba9986,0x3fe60bb41fe1c4da,1 +np.float64,0xffe13a70dea274e1,0xbfaa9192f7bd6c9b,1 +np.float64,0xffde25127bbc4a24,0x3f7c75f45e29be7d,1 +np.float64,0xffe4076543a80eca,0x3fea5aad50d2f687,1 +np.float64,0xffe61512acec2a25,0xbfefffeb67401649,1 +np.float64,0xffef812ec1ff025d,0xbfe919c7c073c766,1 +np.float64,0xffd5552aeaaaaa56,0x3fc89d38ab047396,1 diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/data/umath-validation-set-cosh.csv b/.venv/lib/python3.12/site-packages/numpy/_core/tests/data/umath-validation-set-cosh.csv new file mode 100644 index 0000000..af14d84 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/tests/data/umath-validation-set-cosh.csv @@ -0,0 +1,1429 @@ +dtype,input,output,ulperrortol +np.float32,0xfe0ac238,0x7f800000,3 +np.float32,0xbf553b86,0x3faf079b,3 +np.float32,0xff4457da,0x7f800000,3 +np.float32,0xff7253f3,0x7f800000,3 +np.float32,0x5a5802,0x3f800000,3 +np.float32,0x3db03413,0x3f80795b,3 +np.float32,0x7f6795c9,0x7f800000,3 +np.float32,0x805b9142,0x3f800000,3 +np.float32,0xfeea581a,0x7f800000,3 +np.float32,0x3f7e2dba,0x3fc472f6,3 +np.float32,0x3d9c4d74,0x3f805f7a,3 +np.float32,0x7f18c665,0x7f800000,3 +np.float32,0x7f003e23,0x7f800000,3 +np.float32,0x3d936fa0,0x3f8054f3,3 +np.float32,0x3f32034f,0x3fa0368e,3 +np.float32,0xff087604,0x7f800000,3 +np.float32,0x380a5,0x3f800000,3 +np.float32,0x3f59694e,0x3fb10077,3 +np.float32,0x3e63e648,0x3f832ee4,3 +np.float32,0x80712f42,0x3f800000,3 +np.float32,0x3e169908,0x3f816302,3 +np.float32,0x3f2d766e,0x3f9e8692,3 +np.float32,0x3d6412e0,0x3f8032d0,3 +np.float32,0xbde689e8,0x3f80cfd4,3 +np.float32,0x483e2e,0x3f800000,3 +np.float32,0xff1ba2d0,0x7f800000,3 +np.float32,0x80136bff,0x3f800000,3 +np.float32,0x3f72534c,0x3fbdc1d4,3 +np.float32,0x3e9eb381,0x3f8632c6,3 +np.float32,0x3e142892,0x3f815795,3 +np.float32,0x0,0x3f800000,3 +np.float32,0x2f2528,0x3f800000,3 +np.float32,0x7f38be13,0x7f800000,3 +np.float32,0xfeee6896,0x7f800000,3 +np.float32,0x7f09095d,0x7f800000,3 +np.float32,0xbe94d,0x3f800000,3 +np.float32,0xbedcf8d4,0x3f8c1b74,3 +np.float32,0xbf694c02,0x3fb8ef07,3 +np.float32,0x3e2261f8,0x3f819cde,3 +np.float32,0xbf01d3ce,0x3f90d0e0,3 +np.float32,0xbeb7b3a2,0x3f8853de,3 +np.float32,0x8046de7b,0x3f800000,3 +np.float32,0xbcb45ea0,0x3f8007f1,3 +np.float32,0x3eef14af,0x3f8e35dd,3 +np.float32,0xbf047316,0x3f91846e,3 +np.float32,0x801cef45,0x3f800000,3 +np.float32,0x3e9ad891,0x3f85e609,3 +np.float32,0xff20e9cf,0x7f800000,3 +np.float32,0x80068434,0x3f800000,3 +np.float32,0xbe253020,0x3f81ab49,3 +np.float32,0x3f13f4b8,0x3f95fac9,3 +np.float32,0x804accd1,0x3f800000,3 +np.float32,0x3dee3e10,0x3f80ddf7,3 +np.float32,0xbe6c4690,0x3f836c29,3 +np.float32,0xff30d431,0x7f800000,3 +np.float32,0xbec82416,0x3f89e791,3 +np.float32,0x3f30bbcb,0x3f9fbbcc,3 +np.float32,0x3f5620a2,0x3faf72b8,3 +np.float32,0x807a8130,0x3f800000,3 +np.float32,0x3e3cb02d,0x3f822de0,3 +np.float32,0xff4839ac,0x7f800000,3 +np.float32,0x800a3e9c,0x3f800000,3 +np.float32,0x3dffd65b,0x3f810002,3 +np.float32,0xbf2b1492,0x3f9da987,3 +np.float32,0xbf21602c,0x3f9a48fe,3 +np.float32,0x512531,0x3f800000,3 +np.float32,0x24b99a,0x3f800000,3 +np.float32,0xbf53e345,0x3fae67b1,3 +np.float32,0xff2126ec,0x7f800000,3 +np.float32,0x7e79b49d,0x7f800000,3 +np.float32,0x3ea3cf04,0x3f869b6f,3 +np.float32,0x7f270059,0x7f800000,3 +np.float32,0x3f625b2f,0x3fb561e1,3 +np.float32,0xbf59947e,0x3fb11519,3 +np.float32,0xfe0d1c64,0x7f800000,3 +np.float32,0xbf3f3eae,0x3fa568e2,3 +np.float32,0x7c04d1,0x3f800000,3 +np.float32,0x7e66bd,0x3f800000,3 +np.float32,0x8011880d,0x3f800000,3 +np.float32,0x3f302f07,0x3f9f8759,3 +np.float32,0x4e3375,0x3f800000,3 +np.float32,0xfe67a134,0x7f800000,3 +np.float32,0xff670249,0x7f800000,3 +np.float32,0x7e19f27d,0x7f800000,3 +np.float32,0xbf36ce12,0x3fa20b81,3 +np.float32,0xbe6bcfc4,0x3f8368b5,3 +np.float32,0x76fcba,0x3f800000,3 +np.float32,0x7f30abaf,0x7f800000,3 +np.float32,0x3f4c1f6d,0x3faae43c,3 +np.float32,0x7f61f44a,0x7f800000,3 +np.float32,0xbf4bb3c9,0x3faab4af,3 +np.float32,0xbda15ee0,0x3f8065c6,3 +np.float32,0xfbb4e800,0x7f800000,3 +np.float32,0x7fa00000,0x7fe00000,3 +np.float32,0x80568501,0x3f800000,3 +np.float32,0xfeb285e4,0x7f800000,3 +np.float32,0x804423a7,0x3f800000,3 +np.float32,0x7e6c0f21,0x7f800000,3 +np.float32,0x7f136b3c,0x7f800000,3 +np.float32,0x3f2d08e6,0x3f9e5e9c,3 +np.float32,0xbf6b454e,0x3fb9f7e6,3 +np.float32,0x3e6bceb0,0x3f8368ad,3 +np.float32,0xff1ad16a,0x7f800000,3 +np.float32,0x7cce1a04,0x7f800000,3 +np.float32,0xff7bcf95,0x7f800000,3 +np.float32,0x8049788d,0x3f800000,3 +np.float32,0x7ec45918,0x7f800000,3 +np.float32,0xff7fffff,0x7f800000,3 +np.float32,0x8039a1a0,0x3f800000,3 +np.float32,0x7e90cd72,0x7f800000,3 +np.float32,0xbf7dfd53,0x3fc456cc,3 +np.float32,0x3eeeb664,0x3f8e2a76,3 +np.float32,0x8055ef9b,0x3f800000,3 +np.float32,0x7ee06ddd,0x7f800000,3 +np.float32,0xba2cc000,0x3f800002,3 +np.float32,0x806da632,0x3f800000,3 +np.float32,0x7ecfaaf5,0x7f800000,3 +np.float32,0x3ddd12e6,0x3f80bf19,3 +np.float32,0xbf754394,0x3fbf60b1,3 +np.float32,0x6f3f19,0x3f800000,3 +np.float32,0x800a9af0,0x3f800000,3 +np.float32,0xfeef13ea,0x7f800000,3 +np.float32,0x7f74841f,0x7f800000,3 +np.float32,0xbeb9a2f0,0x3f888181,3 +np.float32,0x77cbb,0x3f800000,3 +np.float32,0xbf587f84,0x3fb0911b,3 +np.float32,0x210ba5,0x3f800000,3 +np.float32,0x3ee60a28,0x3f8d2367,3 +np.float32,0xbe3731ac,0x3f820dc7,3 +np.float32,0xbee8cfee,0x3f8d765e,3 +np.float32,0x7b2ef179,0x7f800000,3 +np.float32,0xfe81377c,0x7f800000,3 +np.float32,0x6ac98c,0x3f800000,3 +np.float32,0x3f51f144,0x3fad8288,3 +np.float32,0x80785750,0x3f800000,3 +np.float32,0x3f46615a,0x3fa864ff,3 +np.float32,0xbf35ac9e,0x3fa19b8e,3 +np.float32,0x7f0982ac,0x7f800000,3 +np.float32,0x1b2610,0x3f800000,3 +np.float32,0x3ed8bb25,0x3f8ba3df,3 +np.float32,0xbeb41bac,0x3f88006d,3 +np.float32,0xff48e89d,0x7f800000,3 +np.float32,0x3ed0ab8c,0x3f8ac755,3 +np.float32,0xbe64671c,0x3f833282,3 +np.float32,0x64bce4,0x3f800000,3 +np.float32,0x284f79,0x3f800000,3 +np.float32,0x7e09faa7,0x7f800000,3 +np.float32,0x4376c1,0x3f800000,3 +np.float32,0x805ca8c0,0x3f800000,3 +np.float32,0xff0859d5,0x7f800000,3 +np.float32,0xbed2f3b2,0x3f8b04dd,3 +np.float32,0x8045bd0c,0x3f800000,3 +np.float32,0x3f0e6216,0x3f94503f,3 +np.float32,0x3f41e3ae,0x3fa68035,3 +np.float32,0x80088ccc,0x3f800000,3 +np.float32,0x3f37fc19,0x3fa2812f,3 +np.float32,0x71c87d,0x3f800000,3 +np.float32,0x8024f4b2,0x3f800000,3 +np.float32,0xff78dd88,0x7f800000,3 +np.float32,0xbda66c90,0x3f806c40,3 +np.float32,0x7f33ef0d,0x7f800000,3 +np.float32,0x46a343,0x3f800000,3 +np.float32,0xff1dce38,0x7f800000,3 +np.float32,0x1b935d,0x3f800000,3 +np.float32,0x3ebec598,0x3f88fd0e,3 +np.float32,0xff115530,0x7f800000,3 +np.float32,0x803916aa,0x3f800000,3 +np.float32,0xff60a3e2,0x7f800000,3 +np.float32,0x3b8ddd48,0x3f80004f,3 +np.float32,0x3f761b6e,0x3fbfd8ea,3 +np.float32,0xbdf55b88,0x3f80eb70,3 +np.float32,0x37374,0x3f800000,3 +np.float32,0x3de150e0,0x3f80c682,3 +np.float32,0x3f343278,0x3fa10a83,3 +np.float32,0xbe9baefa,0x3f85f68b,3 +np.float32,0x3d8d43,0x3f800000,3 +np.float32,0x3e80994b,0x3f840f0c,3 +np.float32,0xbe573c6c,0x3f82d685,3 +np.float32,0x805b83b4,0x3f800000,3 +np.float32,0x683d88,0x3f800000,3 +np.float32,0x692465,0x3f800000,3 +np.float32,0xbdc345f8,0x3f809511,3 +np.float32,0x3f7c1c5a,0x3fc3406f,3 +np.float32,0xbf40bef3,0x3fa606df,3 +np.float32,0xff1e25b9,0x7f800000,3 +np.float32,0x3e4481e0,0x3f825d37,3 +np.float32,0x75d188,0x3f800000,3 +np.float32,0x3ea53cec,0x3f86b956,3 +np.float32,0xff105a54,0x7f800000,3 +np.float32,0x7f800000,0x7f800000,3 +np.float32,0x7f11f0b0,0x7f800000,3 +np.float32,0xbf58a57d,0x3fb0a328,3 +np.float32,0xbdd11e38,0x3f80aaf8,3 +np.float32,0xbea94adc,0x3f870fa0,3 +np.float32,0x3e9dd780,0x3f862180,3 +np.float32,0xff1786b9,0x7f800000,3 +np.float32,0xfec46aa2,0x7f800000,3 +np.float32,0x7f4300c1,0x7f800000,3 +np.float32,0x29ba2b,0x3f800000,3 +np.float32,0x3f4112e2,0x3fa62993,3 +np.float32,0xbe6c9224,0x3f836e5d,3 +np.float32,0x7f0e42a3,0x7f800000,3 +np.float32,0xff6390ad,0x7f800000,3 +np.float32,0x3f54e374,0x3faede94,3 +np.float32,0x7f2642a2,0x7f800000,3 +np.float32,0x7f46b2be,0x7f800000,3 +np.float32,0xfe59095c,0x7f800000,3 +np.float32,0x7146a0,0x3f800000,3 +np.float32,0x3f07763d,0x3f925786,3 +np.float32,0x3d172780,0x3f801651,3 +np.float32,0xff66f1c5,0x7f800000,3 +np.float32,0xff025349,0x7f800000,3 +np.float32,0x6ce99d,0x3f800000,3 +np.float32,0xbf7e4f50,0x3fc48685,3 +np.float32,0xbeff8ca2,0x3f904708,3 +np.float32,0x3e6c8,0x3f800000,3 +np.float32,0x7f7153dc,0x7f800000,3 +np.float32,0xbedcf612,0x3f8c1b26,3 +np.float32,0xbbc2f180,0x3f800094,3 +np.float32,0xbf397399,0x3fa314b8,3 +np.float32,0x6c6e35,0x3f800000,3 +np.float32,0x7f50a88b,0x7f800000,3 +np.float32,0xfe84093e,0x7f800000,3 +np.float32,0x3f737b9d,0x3fbe6478,3 +np.float32,0x7f6a5340,0x7f800000,3 +np.float32,0xbde83c20,0x3f80d2e7,3 +np.float32,0xff769ce9,0x7f800000,3 +np.float32,0xfdd33c30,0x7f800000,3 +np.float32,0xbc95cb60,0x3f80057a,3 +np.float32,0x8007a40d,0x3f800000,3 +np.float32,0x3f55d90c,0x3faf5132,3 +np.float32,0x80282082,0x3f800000,3 +np.float32,0xbf43b1f2,0x3fa7418c,3 +np.float32,0x3f1dc7cb,0x3f991731,3 +np.float32,0xbd4346a0,0x3f80253f,3 +np.float32,0xbf5aa82a,0x3fb19946,3 +np.float32,0x3f4b8c22,0x3faaa333,3 +np.float32,0x3d13468c,0x3f80152f,3 +np.float32,0x7db77097,0x7f800000,3 +np.float32,0x4a00df,0x3f800000,3 +np.float32,0xbedea5e0,0x3f8c4b64,3 +np.float32,0x80482543,0x3f800000,3 +np.float32,0xbef344fe,0x3f8eb8dd,3 +np.float32,0x7ebd4044,0x7f800000,3 +np.float32,0xbf512c0e,0x3fad287e,3 +np.float32,0x3db28cce,0x3f807c9c,3 +np.float32,0xbd0f5ae0,0x3f801412,3 +np.float32,0xfe7ed9ac,0x7f800000,3 +np.float32,0x3eb1aa82,0x3f87c8b4,3 +np.float32,0xfef1679e,0x7f800000,3 +np.float32,0xff3629f2,0x7f800000,3 +np.float32,0xff3562b4,0x7f800000,3 +np.float32,0x3dcafe1d,0x3f80a118,3 +np.float32,0xfedf242a,0x7f800000,3 +np.float32,0xbf43102a,0x3fa6fda4,3 +np.float32,0x8028834e,0x3f800000,3 +np.float32,0x805c8513,0x3f800000,3 +np.float32,0x3f59306a,0x3fb0e550,3 +np.float32,0x3eda2c9c,0x3f8bcc4a,3 +np.float32,0x80023524,0x3f800000,3 +np.float32,0x7ef72879,0x7f800000,3 +np.float32,0x661c8a,0x3f800000,3 +np.float32,0xfec3ba6c,0x7f800000,3 +np.float32,0x805aaca6,0x3f800000,3 +np.float32,0xff5c1f13,0x7f800000,3 +np.float32,0x3f6ab3f4,0x3fb9ab6b,3 +np.float32,0x3f014896,0x3f90ac20,3 +np.float32,0x3f030584,0x3f91222a,3 +np.float32,0xbf74853d,0x3fbef71d,3 +np.float32,0xbf534ee0,0x3fae2323,3 +np.float32,0x2c90c3,0x3f800000,3 +np.float32,0x7f62ad25,0x7f800000,3 +np.float32,0x1c8847,0x3f800000,3 +np.float32,0x7e2a8d43,0x7f800000,3 +np.float32,0x807a09cd,0x3f800000,3 +np.float32,0x413871,0x3f800000,3 +np.float32,0x80063692,0x3f800000,3 +np.float32,0x3edaf29b,0x3f8be211,3 +np.float32,0xbf64a7ab,0x3fb68b2d,3 +np.float32,0xfe56a720,0x7f800000,3 +np.float32,0xbf54a8d4,0x3faec350,3 +np.float32,0x3ecbaef7,0x3f8a4350,3 +np.float32,0x3f413714,0x3fa63890,3 +np.float32,0x7d3aa8,0x3f800000,3 +np.float32,0xbea9a13c,0x3f8716e7,3 +np.float32,0x7ef7553e,0x7f800000,3 +np.float32,0x8056f29f,0x3f800000,3 +np.float32,0xff1f7ffe,0x7f800000,3 +np.float32,0x3f41953b,0x3fa65f9c,3 +np.float32,0x3daa2f,0x3f800000,3 +np.float32,0xff0893e4,0x7f800000,3 +np.float32,0xbefc7ec6,0x3f8fe207,3 +np.float32,0xbb026800,0x3f800011,3 +np.float32,0x341e4f,0x3f800000,3 +np.float32,0x3e7b708a,0x3f83e0d1,3 +np.float32,0xa18cb,0x3f800000,3 +np.float32,0x7e290239,0x7f800000,3 +np.float32,0xbf4254f2,0x3fa6af62,3 +np.float32,0x80000000,0x3f800000,3 +np.float32,0x3f0a6c,0x3f800000,3 +np.float32,0xbec44d28,0x3f898609,3 +np.float32,0xf841f,0x3f800000,3 +np.float32,0x7f01a693,0x7f800000,3 +np.float32,0x8053340b,0x3f800000,3 +np.float32,0xfd4e7990,0x7f800000,3 +np.float32,0xbf782f1f,0x3fc10356,3 +np.float32,0xbe962118,0x3f858acc,3 +np.float32,0xfe8cd702,0x7f800000,3 +np.float32,0x7ecd986f,0x7f800000,3 +np.float32,0x3ebe775f,0x3f88f59b,3 +np.float32,0x8065524f,0x3f800000,3 +np.float32,0x3ede7fc4,0x3f8c471e,3 +np.float32,0x7f5e15ea,0x7f800000,3 +np.float32,0xbe871ada,0x3f847b78,3 +np.float32,0x3f21958b,0x3f9a5af7,3 +np.float32,0x3f64d480,0x3fb6a1fa,3 +np.float32,0xff18b0e9,0x7f800000,3 +np.float32,0xbf0840dd,0x3f928fd9,3 +np.float32,0x80104f5d,0x3f800000,3 +np.float32,0x643b94,0x3f800000,3 +np.float32,0xbc560a80,0x3f8002cc,3 +np.float32,0x3f5c75d6,0x3fb2786e,3 +np.float32,0x7f365fc9,0x7f800000,3 +np.float32,0x54e965,0x3f800000,3 +np.float32,0x6dcd4d,0x3f800000,3 +np.float32,0x3f2057a0,0x3f99f04d,3 +np.float32,0x272fa3,0x3f800000,3 +np.float32,0xff423dc9,0x7f800000,3 +np.float32,0x80273463,0x3f800000,3 +np.float32,0xfe21cc78,0x7f800000,3 +np.float32,0x7fc00000,0x7fc00000,3 +np.float32,0x802feb65,0x3f800000,3 +np.float32,0x3dc733d0,0x3f809b21,3 +np.float32,0x65d56b,0x3f800000,3 +np.float32,0x80351d8e,0x3f800000,3 +np.float32,0xbf244247,0x3f9b43dd,3 +np.float32,0x7f328e7e,0x7f800000,3 +np.float32,0x7f4d9712,0x7f800000,3 +np.float32,0x2c505d,0x3f800000,3 +np.float32,0xbf232ebe,0x3f9ae5a0,3 +np.float32,0x804a363a,0x3f800000,3 +np.float32,0x80417102,0x3f800000,3 +np.float32,0xbf48b170,0x3fa963d4,3 +np.float32,0x7ea3e3b6,0x7f800000,3 +np.float32,0xbf41415b,0x3fa63cd2,3 +np.float32,0xfe3af7c8,0x7f800000,3 +np.float32,0x7f478010,0x7f800000,3 +np.float32,0x80143113,0x3f800000,3 +np.float32,0x3f7626a7,0x3fbfdf2e,3 +np.float32,0xfea20b0a,0x7f800000,3 +np.float32,0x80144d64,0x3f800000,3 +np.float32,0x7db9ba47,0x7f800000,3 +np.float32,0x7f7fffff,0x7f800000,3 +np.float32,0xbe410834,0x3f8247ef,3 +np.float32,0x14a7af,0x3f800000,3 +np.float32,0x7eaebf9e,0x7f800000,3 +np.float32,0xff800000,0x7f800000,3 +np.float32,0x3f0a7d8e,0x3f9330fd,3 +np.float32,0x3ef780,0x3f800000,3 +np.float32,0x3f62253e,0x3fb546d1,3 +np.float32,0x3f4cbeac,0x3fab2acc,3 +np.float32,0x25db1,0x3f800000,3 +np.float32,0x65c54a,0x3f800000,3 +np.float32,0x800f0645,0x3f800000,3 +np.float32,0x3ed28c78,0x3f8af9f0,3 +np.float32,0x8040c6ce,0x3f800000,3 +np.float32,0x5e4e9a,0x3f800000,3 +np.float32,0xbd3fd2b0,0x3f8023f1,3 +np.float32,0xbf5d2d3f,0x3fb2d1b6,3 +np.float32,0x7ead999f,0x7f800000,3 +np.float32,0xbf30dc86,0x3f9fc805,3 +np.float32,0xff2b0a62,0x7f800000,3 +np.float32,0x3d5180e9,0x3f802adf,3 +np.float32,0x3f62716f,0x3fb56d0d,3 +np.float32,0x7e82ae9c,0x7f800000,3 +np.float32,0xfe2d4bdc,0x7f800000,3 +np.float32,0x805cc7d4,0x3f800000,3 +np.float32,0xfb50f700,0x7f800000,3 +np.float32,0xff57b684,0x7f800000,3 +np.float32,0x80344f01,0x3f800000,3 +np.float32,0x7f2af372,0x7f800000,3 +np.float32,0xfeab6204,0x7f800000,3 +np.float32,0x30b251,0x3f800000,3 +np.float32,0x3eed8cc4,0x3f8e0698,3 +np.float32,0x7eeb1c6a,0x7f800000,3 +np.float32,0x3f17ece6,0x3f9735b0,3 +np.float32,0x21e985,0x3f800000,3 +np.float32,0x3f3a7df3,0x3fa37e34,3 +np.float32,0x802a14a2,0x3f800000,3 +np.float32,0x807d4d5b,0x3f800000,3 +np.float32,0x7f6093ce,0x7f800000,3 +np.float32,0x3f800000,0x3fc583ab,3 +np.float32,0x3da2c26e,0x3f806789,3 +np.float32,0xfe05f278,0x7f800000,3 +np.float32,0x800000,0x3f800000,3 +np.float32,0xbee63342,0x3f8d282e,3 +np.float32,0xbf225586,0x3f9a9bd4,3 +np.float32,0xbed60e86,0x3f8b59ba,3 +np.float32,0xbec99484,0x3f8a0ca3,3 +np.float32,0x3e967c71,0x3f859199,3 +np.float32,0x7f26ab62,0x7f800000,3 +np.float32,0xca7f4,0x3f800000,3 +np.float32,0xbf543790,0x3fae8ebc,3 +np.float32,0x3e4c1ed9,0x3f828d2d,3 +np.float32,0xbdf37f88,0x3f80e7e1,3 +np.float32,0xff0cc44e,0x7f800000,3 +np.float32,0x5dea48,0x3f800000,3 +np.float32,0x31023c,0x3f800000,3 +np.float32,0x3ea10733,0x3f866208,3 +np.float32,0x3e11e6f2,0x3f814d2e,3 +np.float32,0x80641960,0x3f800000,3 +np.float32,0x3ef779a8,0x3f8f3edb,3 +np.float32,0x3f2a5062,0x3f9d632a,3 +np.float32,0x2b7d34,0x3f800000,3 +np.float32,0x3eeb95c5,0x3f8dca67,3 +np.float32,0x805c1357,0x3f800000,3 +np.float32,0x3db3a79d,0x3f807e29,3 +np.float32,0xfded1900,0x7f800000,3 +np.float32,0x45f362,0x3f800000,3 +np.float32,0x451f38,0x3f800000,3 +np.float32,0x801d3ae5,0x3f800000,3 +np.float32,0x458d45,0x3f800000,3 +np.float32,0xfda9d298,0x7f800000,3 +np.float32,0x467439,0x3f800000,3 +np.float32,0x7f66554a,0x7f800000,3 +np.float32,0xfef2375a,0x7f800000,3 +np.float32,0xbf33fc47,0x3fa0f5d7,3 +np.float32,0x3f75ba69,0x3fbfa2d0,3 +np.float32,0xfeb625b2,0x7f800000,3 +np.float32,0x8066b371,0x3f800000,3 +np.float32,0x3f5cb4e9,0x3fb29718,3 +np.float32,0x7f3b6a58,0x7f800000,3 +np.float32,0x7f6b35ea,0x7f800000,3 +np.float32,0xbf6ee555,0x3fbbe5be,3 +np.float32,0x3d836e21,0x3f804380,3 +np.float32,0xff43cd0c,0x7f800000,3 +np.float32,0xff55c1fa,0x7f800000,3 +np.float32,0xbf0dfccc,0x3f9432a6,3 +np.float32,0x3ed92121,0x3f8baf00,3 +np.float32,0x80068cc1,0x3f800000,3 +np.float32,0xff0103f9,0x7f800000,3 +np.float32,0x7e51b175,0x7f800000,3 +np.float32,0x8012f214,0x3f800000,3 +np.float32,0x62d298,0x3f800000,3 +np.float32,0xbf3e1525,0x3fa4ef8d,3 +np.float32,0x806b4882,0x3f800000,3 +np.float32,0xbf38c146,0x3fa2ce7c,3 +np.float32,0xbed59c30,0x3f8b4d70,3 +np.float32,0x3d1910c0,0x3f8016e2,3 +np.float32,0x7f33d55b,0x7f800000,3 +np.float32,0x7f5800e3,0x7f800000,3 +np.float32,0x5b2c5d,0x3f800000,3 +np.float32,0x807be750,0x3f800000,3 +np.float32,0x7eb297c1,0x7f800000,3 +np.float32,0x7dafee62,0x7f800000,3 +np.float32,0x7d9e23f0,0x7f800000,3 +np.float32,0x3e580537,0x3f82dbd8,3 +np.float32,0xbf800000,0x3fc583ab,3 +np.float32,0x7f40f880,0x7f800000,3 +np.float32,0x775ad3,0x3f800000,3 +np.float32,0xbedacd36,0x3f8bddf3,3 +np.float32,0x2138f6,0x3f800000,3 +np.float32,0x52c3b7,0x3f800000,3 +np.float32,0x8041cfdd,0x3f800000,3 +np.float32,0x7bf16791,0x7f800000,3 +np.float32,0xbe95869c,0x3f857f55,3 +np.float32,0xbf199796,0x3f97bcaf,3 +np.float32,0x3ef8da38,0x3f8f6b45,3 +np.float32,0x803f3648,0x3f800000,3 +np.float32,0x80026fd2,0x3f800000,3 +np.float32,0x7eb3ac26,0x7f800000,3 +np.float32,0x3e49921b,0x3f827ce8,3 +np.float32,0xbf689aed,0x3fb892de,3 +np.float32,0x3f253509,0x3f9b9779,3 +np.float32,0xff17894a,0x7f800000,3 +np.float32,0x3cd12639,0x3f800aae,3 +np.float32,0x1db14b,0x3f800000,3 +np.float32,0x39a0bf,0x3f800000,3 +np.float32,0xfdfe1d08,0x7f800000,3 +np.float32,0xff416cd2,0x7f800000,3 +np.float32,0x8070d818,0x3f800000,3 +np.float32,0x3e516e12,0x3f82afb8,3 +np.float32,0x80536651,0x3f800000,3 +np.float32,0xbf2903d2,0x3f9cecb7,3 +np.float32,0x3e896ae4,0x3f84a353,3 +np.float32,0xbd6ba2c0,0x3f80363d,3 +np.float32,0x80126d3e,0x3f800000,3 +np.float32,0xfd9d43d0,0x7f800000,3 +np.float32,0x7b56b6,0x3f800000,3 +np.float32,0xff04718e,0x7f800000,3 +np.float32,0x31440f,0x3f800000,3 +np.float32,0xbf7a1313,0x3fc215c9,3 +np.float32,0x7f43d6a0,0x7f800000,3 +np.float32,0x3f566503,0x3faf92cc,3 +np.float32,0xbf39eb0e,0x3fa343f1,3 +np.float32,0xbe35fd70,0x3f8206df,3 +np.float32,0x800c36ac,0x3f800000,3 +np.float32,0x60d061,0x3f800000,3 +np.float32,0x80453e12,0x3f800000,3 +np.float32,0xfe17c36c,0x7f800000,3 +np.float32,0x3d8c72,0x3f800000,3 +np.float32,0xfe8e9134,0x7f800000,3 +np.float32,0xff5d89de,0x7f800000,3 +np.float32,0x7f45020e,0x7f800000,3 +np.float32,0x3f28225e,0x3f9c9d01,3 +np.float32,0xbf3b6900,0x3fa3dbdd,3 +np.float32,0x80349023,0x3f800000,3 +np.float32,0xbf14d780,0x3f964042,3 +np.float32,0x3f56b5d2,0x3fafb8c3,3 +np.float32,0x800c639c,0x3f800000,3 +np.float32,0x7f7a19c8,0x7f800000,3 +np.float32,0xbf7a0815,0x3fc20f86,3 +np.float32,0xbec55926,0x3f89a06e,3 +np.float32,0x4b2cd2,0x3f800000,3 +np.float32,0xbf271eb2,0x3f9c41c8,3 +np.float32,0xff26e168,0x7f800000,3 +np.float32,0x800166b2,0x3f800000,3 +np.float32,0xbde97e38,0x3f80d532,3 +np.float32,0xbf1f93ec,0x3f99af1a,3 +np.float32,0x7f2896ed,0x7f800000,3 +np.float32,0x3da7d96d,0x3f806e1d,3 +np.float32,0x802b7237,0x3f800000,3 +np.float32,0xfdca6bc0,0x7f800000,3 +np.float32,0xbed2e300,0x3f8b0318,3 +np.float32,0x8079d9e8,0x3f800000,3 +np.float32,0x3f388c81,0x3fa2b9c2,3 +np.float32,0x3ed2607c,0x3f8af54a,3 +np.float32,0xff287de6,0x7f800000,3 +np.float32,0x3f55ed89,0x3faf5ac9,3 +np.float32,0x7f5b6af7,0x7f800000,3 +np.float32,0xbeb24730,0x3f87d698,3 +np.float32,0x1,0x3f800000,3 +np.float32,0x3f3a2350,0x3fa35a3b,3 +np.float32,0x8013b422,0x3f800000,3 +np.float32,0x3e9a6560,0x3f85dd35,3 +np.float32,0x80510631,0x3f800000,3 +np.float32,0xfeae39d6,0x7f800000,3 +np.float32,0x7eb437ad,0x7f800000,3 +np.float32,0x8047545b,0x3f800000,3 +np.float32,0x806a1c71,0x3f800000,3 +np.float32,0xbe5543f0,0x3f82c93b,3 +np.float32,0x40e8d,0x3f800000,3 +np.float32,0x63d18b,0x3f800000,3 +np.float32,0x1fa1ea,0x3f800000,3 +np.float32,0x801944e0,0x3f800000,3 +np.float32,0xbf4c7ac6,0x3fab0cae,3 +np.float32,0x7f2679d4,0x7f800000,3 +np.float32,0x3f0102fc,0x3f9099d0,3 +np.float32,0x7e44bdc1,0x7f800000,3 +np.float32,0xbf2072f6,0x3f99f970,3 +np.float32,0x5c7d38,0x3f800000,3 +np.float32,0x30a2e6,0x3f800000,3 +np.float32,0x805b9ca3,0x3f800000,3 +np.float32,0x7cc24ad5,0x7f800000,3 +np.float32,0x3f4f7920,0x3fac6357,3 +np.float32,0x111d62,0x3f800000,3 +np.float32,0xbf4de40a,0x3fabad77,3 +np.float32,0x805d0354,0x3f800000,3 +np.float32,0xbb3d2b00,0x3f800023,3 +np.float32,0x3ef229e7,0x3f8e960b,3 +np.float32,0x3f15754e,0x3f9670e0,3 +np.float32,0xbf689c6b,0x3fb893a5,3 +np.float32,0xbf3796c6,0x3fa2599b,3 +np.float32,0xbe95303c,0x3f8578f2,3 +np.float32,0xfee330de,0x7f800000,3 +np.float32,0xff0d9705,0x7f800000,3 +np.float32,0xbeb0ebd0,0x3f87b7dd,3 +np.float32,0xbf4d5a13,0x3fab6fe7,3 +np.float32,0x80142f5a,0x3f800000,3 +np.float32,0x7e01a87b,0x7f800000,3 +np.float32,0xbe45e5ec,0x3f8265d7,3 +np.float32,0x7f4ac255,0x7f800000,3 +np.float32,0x3ebf6a60,0x3f890ccb,3 +np.float32,0x7f771e16,0x7f800000,3 +np.float32,0x3f41834e,0x3fa6582b,3 +np.float32,0x3f7f6f98,0x3fc52ef0,3 +np.float32,0x7e4ad775,0x7f800000,3 +np.float32,0x3eb39991,0x3f87f4c4,3 +np.float32,0x1e3f4,0x3f800000,3 +np.float32,0x7e84ba19,0x7f800000,3 +np.float32,0x80640be4,0x3f800000,3 +np.float32,0x3f459fc8,0x3fa81272,3 +np.float32,0x3f554ed0,0x3faf109b,3 +np.float32,0x3c6617,0x3f800000,3 +np.float32,0x7f441158,0x7f800000,3 +np.float32,0x7f66e6d8,0x7f800000,3 +np.float32,0x7f565152,0x7f800000,3 +np.float32,0x7f16d550,0x7f800000,3 +np.float32,0xbd4f1950,0x3f8029e5,3 +np.float32,0xcf722,0x3f800000,3 +np.float32,0x3f37d6fd,0x3fa272ad,3 +np.float32,0xff7324ea,0x7f800000,3 +np.float32,0x804bc246,0x3f800000,3 +np.float32,0x7f099ef8,0x7f800000,3 +np.float32,0x5f838b,0x3f800000,3 +np.float32,0x80523534,0x3f800000,3 +np.float32,0x3f595e84,0x3fb0fb50,3 +np.float32,0xfdef8ac8,0x7f800000,3 +np.float32,0x3d9a07,0x3f800000,3 +np.float32,0x410f61,0x3f800000,3 +np.float32,0xbf715dbb,0x3fbd3bcb,3 +np.float32,0xbedd4734,0x3f8c242f,3 +np.float32,0x7e86739a,0x7f800000,3 +np.float32,0x3e81f144,0x3f8424fe,3 +np.float32,0x7f6342d1,0x7f800000,3 +np.float32,0xff6919a3,0x7f800000,3 +np.float32,0xff051878,0x7f800000,3 +np.float32,0x800ba28f,0x3f800000,3 +np.float32,0xfefab3d8,0x7f800000,3 +np.float32,0xff612a84,0x7f800000,3 +np.float32,0x800cd5ab,0x3f800000,3 +np.float32,0x802a07ae,0x3f800000,3 +np.float32,0xfef6ee3a,0x7f800000,3 +np.float32,0x8037e896,0x3f800000,3 +np.float32,0x3ef2d86f,0x3f8eab7d,3 +np.float32,0x3eafe53d,0x3f87a0cb,3 +np.float32,0xba591c00,0x3f800003,3 +np.float32,0x3e9ed028,0x3f863508,3 +np.float32,0x4a12a8,0x3f800000,3 +np.float32,0xbee55c84,0x3f8d0f45,3 +np.float32,0x8038a8d3,0x3f800000,3 +np.float32,0xff055243,0x7f800000,3 +np.float32,0xbf659067,0x3fb701ca,3 +np.float32,0xbee36a86,0x3f8cd5e0,3 +np.float32,0x7f1d74c1,0x7f800000,3 +np.float32,0xbf7657df,0x3fbffaad,3 +np.float32,0x7e37ee34,0x7f800000,3 +np.float32,0xff04bc74,0x7f800000,3 +np.float32,0x806d194e,0x3f800000,3 +np.float32,0x7f5596c3,0x7f800000,3 +np.float32,0xbe09d268,0x3f81293e,3 +np.float32,0x79ff75,0x3f800000,3 +np.float32,0xbf55479c,0x3faf0d3e,3 +np.float32,0xbe5428ec,0x3f82c1d4,3 +np.float32,0x3f624134,0x3fb554d7,3 +np.float32,0x2ccb8a,0x3f800000,3 +np.float32,0xfc082040,0x7f800000,3 +np.float32,0xff315467,0x7f800000,3 +np.float32,0x3e6ea2d2,0x3f837dd5,3 +np.float32,0x8020fdd1,0x3f800000,3 +np.float32,0x7f0416a1,0x7f800000,3 +np.float32,0x710a1b,0x3f800000,3 +np.float32,0x3dfcd050,0x3f80f9fc,3 +np.float32,0xfe995e96,0x7f800000,3 +np.float32,0x3f020d00,0x3f90e006,3 +np.float32,0x8064263e,0x3f800000,3 +np.float32,0xfcee4160,0x7f800000,3 +np.float32,0x801b3a18,0x3f800000,3 +np.float32,0x3f62c984,0x3fb59955,3 +np.float32,0x806e8355,0x3f800000,3 +np.float32,0x7e94f65d,0x7f800000,3 +np.float32,0x1173de,0x3f800000,3 +np.float32,0x3e3ff3b7,0x3f824166,3 +np.float32,0x803b4aea,0x3f800000,3 +np.float32,0x804c5bcc,0x3f800000,3 +np.float32,0x509fe5,0x3f800000,3 +np.float32,0xbf33b5ee,0x3fa0db0b,3 +np.float32,0x3f2ac15c,0x3f9d8ba4,3 +np.float32,0x7f2c54f8,0x7f800000,3 +np.float32,0x7f33d933,0x7f800000,3 +np.float32,0xbf09b2b4,0x3f92f795,3 +np.float32,0x805db8d6,0x3f800000,3 +np.float32,0x6d6e66,0x3f800000,3 +np.float32,0x3ddfea92,0x3f80c40c,3 +np.float32,0xfda719b8,0x7f800000,3 +np.float32,0x5d657f,0x3f800000,3 +np.float32,0xbf005ba3,0x3f906df6,3 +np.float32,0xbf45e606,0x3fa8305c,3 +np.float32,0x5e9fd1,0x3f800000,3 +np.float32,0x8079dc45,0x3f800000,3 +np.float32,0x7e9c40e3,0x7f800000,3 +np.float32,0x6bd5f6,0x3f800000,3 +np.float32,0xbea14a0e,0x3f866761,3 +np.float32,0x7e7323f3,0x7f800000,3 +np.float32,0x7f0c0a79,0x7f800000,3 +np.float32,0xbf7d7aeb,0x3fc40b0f,3 +np.float32,0x437588,0x3f800000,3 +np.float32,0xbf356376,0x3fa17f63,3 +np.float32,0x7f129921,0x7f800000,3 +np.float32,0x7f47a52e,0x7f800000,3 +np.float32,0xba8cb400,0x3f800005,3 +np.float32,0x802284e0,0x3f800000,3 +np.float32,0xbe820f56,0x3f8426ec,3 +np.float32,0x7f2ef6cf,0x7f800000,3 +np.float32,0xbf70a090,0x3fbcd501,3 +np.float32,0xbf173fea,0x3f96ff6d,3 +np.float32,0x3e19c489,0x3f817224,3 +np.float32,0x7f429b30,0x7f800000,3 +np.float32,0xbdae4118,0x3f8076af,3 +np.float32,0x3e70ad30,0x3f838d41,3 +np.float32,0x335fed,0x3f800000,3 +np.float32,0xff5359cf,0x7f800000,3 +np.float32,0xbf17e42b,0x3f9732f1,3 +np.float32,0xff3a950b,0x7f800000,3 +np.float32,0xbcca70c0,0x3f800a02,3 +np.float32,0x3f2cda62,0x3f9e4dad,3 +np.float32,0x3f50c185,0x3facf805,3 +np.float32,0x80000001,0x3f800000,3 +np.float32,0x807b86d2,0x3f800000,3 +np.float32,0x8010c2cf,0x3f800000,3 +np.float32,0x3f130fb8,0x3f95b519,3 +np.float32,0x807dc546,0x3f800000,3 +np.float32,0xbee20740,0x3f8cad3f,3 +np.float32,0x80800000,0x3f800000,3 +np.float32,0x3cbd90c0,0x3f8008c6,3 +np.float32,0x3e693488,0x3f835571,3 +np.float32,0xbe70cd44,0x3f838e35,3 +np.float32,0xbe348dc8,0x3f81feb1,3 +np.float32,0x3f31ea90,0x3fa02d3f,3 +np.float32,0xfcd7e180,0x7f800000,3 +np.float32,0xbe30a75c,0x3f81e8d0,3 +np.float32,0x3e552c5a,0x3f82c89d,3 +np.float32,0xff513f74,0x7f800000,3 +np.float32,0xbdb16248,0x3f807afd,3 +np.float64,0x7fbbf954e437f2a9,0x7ff0000000000000,1 +np.float64,0x581bbf0cb0379,0x3ff0000000000000,1 +np.float64,0x7ff8000000000000,0x7ff8000000000000,1 +np.float64,0xffb959a2a632b348,0x7ff0000000000000,1 +np.float64,0xbfdbd6baebb7ad76,0x3ff189a5ca25a6e1,1 +np.float64,0xbfd094ec9aa129da,0x3ff08a3f6b918065,1 +np.float64,0x3fe236753f646cea,0x3ff2a982660b8b43,1 +np.float64,0xbfe537fadfaa6ff6,0x3ff3a5f1c49c31bf,1 +np.float64,0xbfe31fa7dc663f50,0x3ff2f175374aef0e,1 +np.float64,0x3fc4b6569f296cb0,0x3ff035bde801bb53,1 +np.float64,0x800ce3c00f99c780,0x3ff0000000000000,1 +np.float64,0xbfebcde33e779bc6,0x3ff66de82cd30fc5,1 +np.float64,0x800dc09d3b7b813b,0x3ff0000000000000,1 +np.float64,0x80067d4c450cfa99,0x3ff0000000000000,1 +np.float64,0x1f6ade203ed7,0x3ff0000000000000,1 +np.float64,0xbfd4e311eca9c624,0x3ff0dc1383d6c3db,1 +np.float64,0x800649b3a54c9368,0x3ff0000000000000,1 +np.float64,0xcc14d1ab9829a,0x3ff0000000000000,1 +np.float64,0x3fc290c5bb25218b,0x3ff02b290f46dd6d,1 +np.float64,0x3fe78eb8376f1d70,0x3ff488f3bc259537,1 +np.float64,0xffc60f58e82c1eb0,0x7ff0000000000000,1 +np.float64,0x3fd35666ad26accd,0x3ff0bc6573da6bcd,1 +np.float64,0x7fc20257a62404ae,0x7ff0000000000000,1 +np.float64,0x80076d842e0edb09,0x3ff0000000000000,1 +np.float64,0x3fd8e44b08b1c898,0x3ff139b9a1f8428e,1 +np.float64,0x7fd6f6fc7a2dedf8,0x7ff0000000000000,1 +np.float64,0x3fa01b9f0820373e,0x3ff00206f8ad0f1b,1 +np.float64,0x69ed190ed3da4,0x3ff0000000000000,1 +np.float64,0xbfd997eb34b32fd6,0x3ff14be65a5db4a0,1 +np.float64,0x7feada2d0935b459,0x7ff0000000000000,1 +np.float64,0xbf80987120213100,0x3ff000226d29a9fc,1 +np.float64,0xbfef203e37fe407c,0x3ff82f51f04e8821,1 +np.float64,0xffe3dcf91fa7b9f2,0x7ff0000000000000,1 +np.float64,0x9a367283346cf,0x3ff0000000000000,1 +np.float64,0x800feb09f7bfd614,0x3ff0000000000000,1 +np.float64,0xbfe0319f9520633f,0x3ff217c5205c403f,1 +np.float64,0xbfa91eabd4323d50,0x3ff004ee4347f627,1 +np.float64,0x3fd19cbf7d23397f,0x3ff09c13e8e43571,1 +np.float64,0xffeb8945f0b7128b,0x7ff0000000000000,1 +np.float64,0x800a0eb4f2141d6a,0x3ff0000000000000,1 +np.float64,0xffe83e7312f07ce6,0x7ff0000000000000,1 +np.float64,0xffca53fee834a7fc,0x7ff0000000000000,1 +np.float64,0x800881cbf1710398,0x3ff0000000000000,1 +np.float64,0x80003e6abbe07cd6,0x3ff0000000000000,1 +np.float64,0xbfef6a998afed533,0x3ff859b7852d1b4d,1 +np.float64,0x3fd4eb7577a9d6eb,0x3ff0dcc601261aab,1 +np.float64,0xbfc9c12811338250,0x3ff05331268b05c8,1 +np.float64,0x7fddf84e5e3bf09c,0x7ff0000000000000,1 +np.float64,0xbfd4d6fbbc29adf8,0x3ff0db12db19d187,1 +np.float64,0x80077892bfaef126,0x3ff0000000000000,1 +np.float64,0xffae9d49543d3a90,0x7ff0000000000000,1 +np.float64,0xbfd8bef219317de4,0x3ff136034e5d2f1b,1 +np.float64,0xffe89c74ddb138e9,0x7ff0000000000000,1 +np.float64,0x8003b6bbb7e76d78,0x3ff0000000000000,1 +np.float64,0x315a4e8462b4b,0x3ff0000000000000,1 +np.float64,0x800ee616edddcc2e,0x3ff0000000000000,1 +np.float64,0xdfb27f97bf650,0x3ff0000000000000,1 +np.float64,0x8004723dc328e47c,0x3ff0000000000000,1 +np.float64,0xbfe529500daa52a0,0x3ff3a0b9b33fc84c,1 +np.float64,0xbfe4e46a7ce9c8d5,0x3ff3886ce0f92612,1 +np.float64,0xbf52003680240000,0x3ff00000a203d61a,1 +np.float64,0xffd3400458268008,0x7ff0000000000000,1 +np.float64,0x80076deb444edbd7,0x3ff0000000000000,1 +np.float64,0xa612f6c14c27,0x3ff0000000000000,1 +np.float64,0xbfd41c74c9a838ea,0x3ff0cbe61e16aecf,1 +np.float64,0x43f464a887e8d,0x3ff0000000000000,1 +np.float64,0x800976e748b2edcf,0x3ff0000000000000,1 +np.float64,0xffc79d6ba12f3ad8,0x7ff0000000000000,1 +np.float64,0xffd6dbcb022db796,0x7ff0000000000000,1 +np.float64,0xffd6a9672a2d52ce,0x7ff0000000000000,1 +np.float64,0x3fe95dcfa632bb9f,0x3ff54bbad2ee919e,1 +np.float64,0x3febadd2e1375ba6,0x3ff65e336c47c018,1 +np.float64,0x7fd47c37d828f86f,0x7ff0000000000000,1 +np.float64,0xbfd4ea59e0a9d4b4,0x3ff0dcae6af3e443,1 +np.float64,0x2c112afc58226,0x3ff0000000000000,1 +np.float64,0x8008122bced02458,0x3ff0000000000000,1 +np.float64,0x7fe7105ab3ee20b4,0x7ff0000000000000,1 +np.float64,0x80089634df312c6a,0x3ff0000000000000,1 +np.float64,0x68e9fbc8d1d40,0x3ff0000000000000,1 +np.float64,0xbfec1e1032f83c20,0x3ff69590b9f18ea8,1 +np.float64,0xbfedf181623be303,0x3ff787ef48935dc6,1 +np.float64,0xffe8600457f0c008,0x7ff0000000000000,1 +np.float64,0x7a841ec6f5084,0x3ff0000000000000,1 +np.float64,0x459a572e8b34c,0x3ff0000000000000,1 +np.float64,0x3fe8a232bef14465,0x3ff4fac1780f731e,1 +np.float64,0x3fcb37597d366eb3,0x3ff05cf08ab14ebd,1 +np.float64,0xbfb0261d00204c38,0x3ff00826fb86ca8a,1 +np.float64,0x3fc6e7a6dd2dcf4e,0x3ff041c1222ffa79,1 +np.float64,0xee65dd03dccbc,0x3ff0000000000000,1 +np.float64,0xffe26fdc23e4dfb8,0x7ff0000000000000,1 +np.float64,0x7fe8d6c8cab1ad91,0x7ff0000000000000,1 +np.float64,0xbfeb64bf2676c97e,0x3ff63abb8607828c,1 +np.float64,0x3fd28417b425082f,0x3ff0ac9eb22a732b,1 +np.float64,0xbfd26835b3a4d06c,0x3ff0aa94c48fb6d2,1 +np.float64,0xffec617a01b8c2f3,0x7ff0000000000000,1 +np.float64,0xe1bfff01c3800,0x3ff0000000000000,1 +np.float64,0x3fd4def913a9bdf4,0x3ff0dbbc7271046f,1 +np.float64,0x94f4c17129e98,0x3ff0000000000000,1 +np.float64,0x8009b2eaa33365d6,0x3ff0000000000000,1 +np.float64,0x3fd9633b41b2c678,0x3ff1468388bdfb65,1 +np.float64,0xffe0ae5c80e15cb8,0x7ff0000000000000,1 +np.float64,0x7fdfc35996bf86b2,0x7ff0000000000000,1 +np.float64,0x3fcfc5bdc23f8b7c,0x3ff07ed5caa4545c,1 +np.float64,0xd48b4907a9169,0x3ff0000000000000,1 +np.float64,0xbfe0a2cc52614598,0x3ff2361665895d95,1 +np.float64,0xbfe9068f90720d1f,0x3ff525b82491a1a5,1 +np.float64,0x4238b9208472,0x3ff0000000000000,1 +np.float64,0x800e6b2bf69cd658,0x3ff0000000000000,1 +np.float64,0x7fb638b6ae2c716c,0x7ff0000000000000,1 +np.float64,0x7fe267641764cec7,0x7ff0000000000000,1 +np.float64,0xffc0933d3521267c,0x7ff0000000000000,1 +np.float64,0x7fddfdfb533bfbf6,0x7ff0000000000000,1 +np.float64,0xced2a8e99da55,0x3ff0000000000000,1 +np.float64,0x2a80d5165501b,0x3ff0000000000000,1 +np.float64,0xbfeead2ab63d5a55,0x3ff7eeb5cbcfdcab,1 +np.float64,0x80097f6f92f2fee0,0x3ff0000000000000,1 +np.float64,0x3fee1f29b77c3e54,0x3ff7a0a58c13df62,1 +np.float64,0x3f9d06b8383a0d70,0x3ff001a54a2d8cf8,1 +np.float64,0xbfc8b41d3f31683c,0x3ff04c85379dd6b0,1 +np.float64,0xffd2a04c1e254098,0x7ff0000000000000,1 +np.float64,0xbfb71c01e02e3800,0x3ff010b34220e838,1 +np.float64,0xbfe69249ef6d2494,0x3ff425e48d1e938b,1 +np.float64,0xffefffffffffffff,0x7ff0000000000000,1 +np.float64,0x3feb1d52fbf63aa6,0x3ff618813ae922d7,1 +np.float64,0x7fb8d1a77e31a34e,0x7ff0000000000000,1 +np.float64,0xffc3cfc4ed279f88,0x7ff0000000000000,1 +np.float64,0x2164b9fc42c98,0x3ff0000000000000,1 +np.float64,0x3fbb868cee370d1a,0x3ff017b31b0d4d27,1 +np.float64,0x3fcd6dea583adbd5,0x3ff06cbd16bf44a0,1 +np.float64,0xbfecd041d479a084,0x3ff6efb25f61012d,1 +np.float64,0xbfb0552e6e20aa60,0x3ff00856ca83834a,1 +np.float64,0xe6293cbfcc528,0x3ff0000000000000,1 +np.float64,0x7fba58394034b072,0x7ff0000000000000,1 +np.float64,0x33bc96d467794,0x3ff0000000000000,1 +np.float64,0xffe90ea86bf21d50,0x7ff0000000000000,1 +np.float64,0xbfc626ea6d2c4dd4,0x3ff03d7e01ec3849,1 +np.float64,0x65b56fe4cb6af,0x3ff0000000000000,1 +np.float64,0x3fea409fb7f4813f,0x3ff5b171deab0ebd,1 +np.float64,0x3fe849c1df709384,0x3ff4d59063ff98c4,1 +np.float64,0x169073082d20f,0x3ff0000000000000,1 +np.float64,0xcc8b6add9916e,0x3ff0000000000000,1 +np.float64,0xbfef3d78d5fe7af2,0x3ff83fecc26abeea,1 +np.float64,0x3fe8c65a4a718cb4,0x3ff50a23bfeac7df,1 +np.float64,0x3fde9fa5c8bd3f4c,0x3ff1ddeb12b9d623,1 +np.float64,0xffe2af536da55ea6,0x7ff0000000000000,1 +np.float64,0x800186d0b0c30da2,0x3ff0000000000000,1 +np.float64,0x3fe9ba3c1d737478,0x3ff574ab2bf3a560,1 +np.float64,0xbfe1489c46a29138,0x3ff2641d36b30e21,1 +np.float64,0xbfe4b6b7c0e96d70,0x3ff37880ac8b0540,1 +np.float64,0x800e66ad82fccd5b,0x3ff0000000000000,1 +np.float64,0x7ff0000000000000,0x7ff0000000000000,1 +np.float64,0x7febb0fd477761fa,0x7ff0000000000000,1 +np.float64,0xbfdc433f2eb8867e,0x3ff195ec2a6cce27,1 +np.float64,0x3fe12c5a172258b4,0x3ff25c225b8a34bb,1 +np.float64,0xbfef6f116c3ede23,0x3ff85c47eaed49a0,1 +np.float64,0x800af6f60f35edec,0x3ff0000000000000,1 +np.float64,0xffe567999a2acf32,0x7ff0000000000000,1 +np.float64,0xbfc5ac5ae72b58b4,0x3ff03adb50ec04f3,1 +np.float64,0x3fea1b57e23436b0,0x3ff5a06f98541767,1 +np.float64,0x7fcc3e36fb387c6d,0x7ff0000000000000,1 +np.float64,0x8000c8dc698191ba,0x3ff0000000000000,1 +np.float64,0x3fee5085ed7ca10c,0x3ff7bb92f61245b8,1 +np.float64,0x7fbb9f803a373eff,0x7ff0000000000000,1 +np.float64,0xbfe1e5e806e3cbd0,0x3ff2918f2d773007,1 +np.float64,0x8008f8c3f3b1f188,0x3ff0000000000000,1 +np.float64,0x7fe53df515ea7be9,0x7ff0000000000000,1 +np.float64,0x7fdbb87fb3b770fe,0x7ff0000000000000,1 +np.float64,0x3fefcc0f50ff981f,0x3ff89210a6a04e6b,1 +np.float64,0x3fe33f87d0267f10,0x3ff2fb989ea4f2bc,1 +np.float64,0x1173992022e8,0x3ff0000000000000,1 +np.float64,0x3fef534632bea68c,0x3ff84c5ca9713ff9,1 +np.float64,0x3fc5991d552b3238,0x3ff03a72bfdb6e5f,1 +np.float64,0x3fdad90dc1b5b21c,0x3ff16db868180034,1 +np.float64,0xffe20b8078e41700,0x7ff0000000000000,1 +np.float64,0x7fdf409a82be8134,0x7ff0000000000000,1 +np.float64,0x3fccb7e691396fcd,0x3ff06786b6ccdbcb,1 +np.float64,0xffe416e0b7282dc1,0x7ff0000000000000,1 +np.float64,0xffe3a8a981275152,0x7ff0000000000000,1 +np.float64,0x3fd9c8bd31b3917c,0x3ff150ee6f5f692f,1 +np.float64,0xffeab6fef6356dfd,0x7ff0000000000000,1 +np.float64,0x3fe9c5e3faf38bc8,0x3ff579e18c9bd548,1 +np.float64,0x800b173e44762e7d,0x3ff0000000000000,1 +np.float64,0xffe2719db764e33b,0x7ff0000000000000,1 +np.float64,0x3fd1fcf31223f9e6,0x3ff0a2da7ad99856,1 +np.float64,0x80082c4afcd05896,0x3ff0000000000000,1 +np.float64,0xa56e5e4b4adcc,0x3ff0000000000000,1 +np.float64,0xffbbbddab2377bb8,0x7ff0000000000000,1 +np.float64,0x3b3927c076726,0x3ff0000000000000,1 +np.float64,0x3fec03fd58f807fb,0x3ff6889b8a774728,1 +np.float64,0xbfaa891fb4351240,0x3ff00580987bd914,1 +np.float64,0x7fb4800c4a290018,0x7ff0000000000000,1 +np.float64,0xffbb5d2b6036ba58,0x7ff0000000000000,1 +np.float64,0x7fd6608076acc100,0x7ff0000000000000,1 +np.float64,0x31267e4c624d1,0x3ff0000000000000,1 +np.float64,0x33272266664e5,0x3ff0000000000000,1 +np.float64,0x47bb37f28f768,0x3ff0000000000000,1 +np.float64,0x3fe134bb4ee26977,0x3ff25e7ea647a928,1 +np.float64,0xbfe2b5f42ba56be8,0x3ff2d05cbdc7344b,1 +np.float64,0xbfe0e013fd61c028,0x3ff246dfce572914,1 +np.float64,0x7fecedcda4f9db9a,0x7ff0000000000000,1 +np.float64,0x8001816c2da302d9,0x3ff0000000000000,1 +np.float64,0xffced8b65b3db16c,0x7ff0000000000000,1 +np.float64,0xffdc1d4a0b383a94,0x7ff0000000000000,1 +np.float64,0x7fe94e7339f29ce5,0x7ff0000000000000,1 +np.float64,0x33fb846667f71,0x3ff0000000000000,1 +np.float64,0x800a1380e9542702,0x3ff0000000000000,1 +np.float64,0x800b74eaa776e9d6,0x3ff0000000000000,1 +np.float64,0x5681784aad030,0x3ff0000000000000,1 +np.float64,0xbfee0eb7917c1d6f,0x3ff797b949f7f6b4,1 +np.float64,0xffe4ec5fd2a9d8bf,0x7ff0000000000000,1 +np.float64,0xbfcd7401dd3ae804,0x3ff06cea52c792c0,1 +np.float64,0x800587563beb0ead,0x3ff0000000000000,1 +np.float64,0x3fc15c6f3322b8de,0x3ff025bbd030166d,1 +np.float64,0x7feb6b4caf76d698,0x7ff0000000000000,1 +np.float64,0x7fe136ef82a26dde,0x7ff0000000000000,1 +np.float64,0xf592dac3eb25c,0x3ff0000000000000,1 +np.float64,0x7fd300baf6a60175,0x7ff0000000000000,1 +np.float64,0x7fc880de9e3101bc,0x7ff0000000000000,1 +np.float64,0x7fe7a1aa5caf4354,0x7ff0000000000000,1 +np.float64,0x2f9b8e0e5f373,0x3ff0000000000000,1 +np.float64,0xffcc9071993920e4,0x7ff0000000000000,1 +np.float64,0x8009e151b313c2a4,0x3ff0000000000000,1 +np.float64,0xbfd46e2d18a8dc5a,0x3ff0d27a7b37c1ae,1 +np.float64,0x3fe65c7961acb8f3,0x3ff4116946062a4c,1 +np.float64,0x7fd31b371626366d,0x7ff0000000000000,1 +np.float64,0x98dc924d31b93,0x3ff0000000000000,1 +np.float64,0x268bef364d17f,0x3ff0000000000000,1 +np.float64,0x7fd883ba56310774,0x7ff0000000000000,1 +np.float64,0x3fc53f01a32a7e03,0x3ff0388dea9cd63e,1 +np.float64,0xffe1ea8c0563d518,0x7ff0000000000000,1 +np.float64,0x3fd0bf0e63a17e1d,0x3ff08d0577f5ffa6,1 +np.float64,0x7fef42418f7e8482,0x7ff0000000000000,1 +np.float64,0x8000bccd38c1799b,0x3ff0000000000000,1 +np.float64,0xbfe6c48766ed890f,0x3ff43936fa4048c8,1 +np.float64,0xbfb2a38f3a254720,0x3ff00adc7f7b2822,1 +np.float64,0x3fd5262b2eaa4c56,0x3ff0e1af492c08f5,1 +np.float64,0x80065b4691ecb68e,0x3ff0000000000000,1 +np.float64,0xfb6b9e9ff6d74,0x3ff0000000000000,1 +np.float64,0x8006c71e6ecd8e3e,0x3ff0000000000000,1 +np.float64,0x3fd0a3e43ca147c8,0x3ff08b3ad7b42485,1 +np.float64,0xbfc82d8607305b0c,0x3ff04949d6733ef6,1 +np.float64,0xde048c61bc092,0x3ff0000000000000,1 +np.float64,0xffcf73e0fa3ee7c0,0x7ff0000000000000,1 +np.float64,0xbfe8639d7830c73b,0x3ff4e05f97948376,1 +np.float64,0x8010000000000000,0x3ff0000000000000,1 +np.float64,0x67f01a2acfe04,0x3ff0000000000000,1 +np.float64,0x3fe222e803e445d0,0x3ff2a3a75e5f29d8,1 +np.float64,0xffef84c6387f098b,0x7ff0000000000000,1 +np.float64,0x3fe5969c1e6b2d38,0x3ff3c80130462bb2,1 +np.float64,0x8009f56953d3ead3,0x3ff0000000000000,1 +np.float64,0x3fe05c9b6360b937,0x3ff2232e1cba5617,1 +np.float64,0x3fd8888d63b1111b,0x3ff130a5b788d52f,1 +np.float64,0xffe3a9e6f26753ce,0x7ff0000000000000,1 +np.float64,0x800e2aaa287c5554,0x3ff0000000000000,1 +np.float64,0x3fea8d6c82351ad9,0x3ff5d4d8cde9a11d,1 +np.float64,0x7feef700723dee00,0x7ff0000000000000,1 +np.float64,0x3fa5cb77242b96e0,0x3ff003b62b3e50f1,1 +np.float64,0x7fb68f0a862d1e14,0x7ff0000000000000,1 +np.float64,0x7fb97ee83432fdcf,0x7ff0000000000000,1 +np.float64,0x7fd74a78632e94f0,0x7ff0000000000000,1 +np.float64,0x7fcfe577713fcaee,0x7ff0000000000000,1 +np.float64,0xffe192ee5ea325dc,0x7ff0000000000000,1 +np.float64,0x477d6ae48efae,0x3ff0000000000000,1 +np.float64,0xffe34d5237669aa4,0x7ff0000000000000,1 +np.float64,0x7fe3ce8395a79d06,0x7ff0000000000000,1 +np.float64,0x80019c01ffa33805,0x3ff0000000000000,1 +np.float64,0x74b5b56ce96b7,0x3ff0000000000000,1 +np.float64,0x7fe05ecdeda0bd9b,0x7ff0000000000000,1 +np.float64,0xffe9693eb232d27d,0x7ff0000000000000,1 +np.float64,0xffd2be2c7da57c58,0x7ff0000000000000,1 +np.float64,0x800dbd5cbc1b7aba,0x3ff0000000000000,1 +np.float64,0xbfa36105d426c210,0x3ff002ef2e3a87f7,1 +np.float64,0x800b2d69fb765ad4,0x3ff0000000000000,1 +np.float64,0xbfdb81c9a9370394,0x3ff1802d409cbf7a,1 +np.float64,0x7fd481d014a9039f,0x7ff0000000000000,1 +np.float64,0xffe66c3c1fecd878,0x7ff0000000000000,1 +np.float64,0x3fc55865192ab0c8,0x3ff03915b51e8839,1 +np.float64,0xd6a78987ad4f1,0x3ff0000000000000,1 +np.float64,0x800c6cc80d58d990,0x3ff0000000000000,1 +np.float64,0x979435a12f29,0x3ff0000000000000,1 +np.float64,0xbfbd971e7a3b2e40,0x3ff01b647e45f5a6,1 +np.float64,0x80067565bfeceacc,0x3ff0000000000000,1 +np.float64,0x8001ad689ce35ad2,0x3ff0000000000000,1 +np.float64,0x7fa43253dc2864a7,0x7ff0000000000000,1 +np.float64,0xbfe3dda307e7bb46,0x3ff32ef99a2efe1d,1 +np.float64,0x3fe5d7b395ebaf68,0x3ff3dfd33cdc8ef4,1 +np.float64,0xd94cc9c3b2999,0x3ff0000000000000,1 +np.float64,0x3fee5a513fbcb4a2,0x3ff7c0f17b876ce5,1 +np.float64,0xffe27761fa64eec4,0x7ff0000000000000,1 +np.float64,0x3feb788119b6f102,0x3ff64446f67f4efa,1 +np.float64,0xbfed6e10dffadc22,0x3ff741d5ef610ca0,1 +np.float64,0x7fe73cf98b2e79f2,0x7ff0000000000000,1 +np.float64,0x7847d09af08fb,0x3ff0000000000000,1 +np.float64,0x29ded2da53bdb,0x3ff0000000000000,1 +np.float64,0xbfe51c1ec1aa383e,0x3ff39c0b7cf832e2,1 +np.float64,0xbfeafd5e65f5fabd,0x3ff609548a787f57,1 +np.float64,0x3fd872a26fb0e545,0x3ff12e7fbd95505c,1 +np.float64,0x7fed6b7c1b7ad6f7,0x7ff0000000000000,1 +np.float64,0xffe7ba9ec16f753d,0x7ff0000000000000,1 +np.float64,0x7f89b322f0336645,0x7ff0000000000000,1 +np.float64,0xbfad1677383a2cf0,0x3ff0069ca67e7baa,1 +np.float64,0x3fe0906d04a120da,0x3ff2311b04b7bfef,1 +np.float64,0xffe4b3c9d4296793,0x7ff0000000000000,1 +np.float64,0xbfe476bb0ce8ed76,0x3ff36277d2921a74,1 +np.float64,0x7fc35655cf26acab,0x7ff0000000000000,1 +np.float64,0x7fe9980f0373301d,0x7ff0000000000000,1 +np.float64,0x9e6e04cb3cdc1,0x3ff0000000000000,1 +np.float64,0x800b89e0afb713c2,0x3ff0000000000000,1 +np.float64,0x800bd951a3f7b2a4,0x3ff0000000000000,1 +np.float64,0x29644a9e52c8a,0x3ff0000000000000,1 +np.float64,0x3fe1be2843637c51,0x3ff285e90d8387e4,1 +np.float64,0x7fa233cce4246799,0x7ff0000000000000,1 +np.float64,0xbfcfb7bc2d3f6f78,0x3ff07e657de3e2ed,1 +np.float64,0xffd7c953e7af92a8,0x7ff0000000000000,1 +np.float64,0xbfc5bbaf772b7760,0x3ff03b2ee4febb1e,1 +np.float64,0x8007b7315a6f6e63,0x3ff0000000000000,1 +np.float64,0xbfe906d902320db2,0x3ff525d7e16acfe0,1 +np.float64,0x3fde33d8553c67b1,0x3ff1d09faa19aa53,1 +np.float64,0x61fe76a0c3fcf,0x3ff0000000000000,1 +np.float64,0xa75e355b4ebc7,0x3ff0000000000000,1 +np.float64,0x3fc9e6d86033cdb1,0x3ff05426299c7064,1 +np.float64,0x7fd83f489eb07e90,0x7ff0000000000000,1 +np.float64,0x8000000000000001,0x3ff0000000000000,1 +np.float64,0x80014434ae62886a,0x3ff0000000000000,1 +np.float64,0xbfe21af9686435f3,0x3ff2a149338bdefe,1 +np.float64,0x9354e6cd26a9d,0x3ff0000000000000,1 +np.float64,0xb42b95f768573,0x3ff0000000000000,1 +np.float64,0xbfecb4481bb96890,0x3ff6e15d269dd651,1 +np.float64,0x3f97842ae82f0840,0x3ff0011485156f28,1 +np.float64,0xffdef63d90bdec7c,0x7ff0000000000000,1 +np.float64,0x7fe511a8d36a2351,0x7ff0000000000000,1 +np.float64,0xbf8cb638a0396c80,0x3ff000670c318fb6,1 +np.float64,0x3fe467e1f668cfc4,0x3ff35d65f93ccac6,1 +np.float64,0xbfce7d88f03cfb10,0x3ff074c22475fe5b,1 +np.float64,0x6d0a4994da14a,0x3ff0000000000000,1 +np.float64,0xbfb3072580260e48,0x3ff00b51d3913e9f,1 +np.float64,0x8008fcde36b1f9bd,0x3ff0000000000000,1 +np.float64,0x3fd984df66b309c0,0x3ff149f29125eca4,1 +np.float64,0xffee2a10fe7c5421,0x7ff0000000000000,1 +np.float64,0x80039168ace722d2,0x3ff0000000000000,1 +np.float64,0xffda604379b4c086,0x7ff0000000000000,1 +np.float64,0xffdc6a405bb8d480,0x7ff0000000000000,1 +np.float64,0x3fe62888b26c5111,0x3ff3fdda754c4372,1 +np.float64,0x8008b452cb5168a6,0x3ff0000000000000,1 +np.float64,0x6165d540c2cbb,0x3ff0000000000000,1 +np.float64,0xbfee0c04d17c180a,0x3ff796431c64bcbe,1 +np.float64,0x800609b8448c1371,0x3ff0000000000000,1 +np.float64,0x800fc3fca59f87f9,0x3ff0000000000000,1 +np.float64,0x77f64848efeca,0x3ff0000000000000,1 +np.float64,0x8007cf522d8f9ea5,0x3ff0000000000000,1 +np.float64,0xbfe9fb0b93f3f617,0x3ff591cb0052e22c,1 +np.float64,0x7fd569d5f0aad3ab,0x7ff0000000000000,1 +np.float64,0x7fe5cf489d6b9e90,0x7ff0000000000000,1 +np.float64,0x7fd6e193e92dc327,0x7ff0000000000000,1 +np.float64,0xf78988a5ef131,0x3ff0000000000000,1 +np.float64,0x3fe8f97562b1f2eb,0x3ff5201080fbc12d,1 +np.float64,0x7febfd69d7b7fad3,0x7ff0000000000000,1 +np.float64,0xffc07b5c1720f6b8,0x7ff0000000000000,1 +np.float64,0xbfd966926832cd24,0x3ff146da9adf492e,1 +np.float64,0x7fef5bd9edfeb7b3,0x7ff0000000000000,1 +np.float64,0xbfd2afbc96255f7a,0x3ff0afd601febf44,1 +np.float64,0x7fdd4ea6293a9d4b,0x7ff0000000000000,1 +np.float64,0xbfe8a1e916b143d2,0x3ff4faa23c2793e5,1 +np.float64,0x800188fcd8c311fa,0x3ff0000000000000,1 +np.float64,0xbfe30803f1661008,0x3ff2e9fc729baaee,1 +np.float64,0x7fefffffffffffff,0x7ff0000000000000,1 +np.float64,0x3fd287bec3250f7e,0x3ff0ace34d3102f6,1 +np.float64,0x1f0ee9443e1de,0x3ff0000000000000,1 +np.float64,0xbfd92f73da325ee8,0x3ff14143e4fa2c5a,1 +np.float64,0x3fed7c9bdffaf938,0x3ff74984168734d3,1 +np.float64,0x8002c4d1696589a4,0x3ff0000000000000,1 +np.float64,0xfe03011bfc060,0x3ff0000000000000,1 +np.float64,0x7f7a391e6034723c,0x7ff0000000000000,1 +np.float64,0xffd6fd46f82dfa8e,0x7ff0000000000000,1 +np.float64,0xbfd7520a742ea414,0x3ff112f1ba5d4f91,1 +np.float64,0x8009389d8812713b,0x3ff0000000000000,1 +np.float64,0x7fefb846aaff708c,0x7ff0000000000000,1 +np.float64,0x3fd98a0983331413,0x3ff14a79efb8adbf,1 +np.float64,0xbfd897158db12e2c,0x3ff132137902cf3e,1 +np.float64,0xffc4048d5928091c,0x7ff0000000000000,1 +np.float64,0x80036ae46046d5ca,0x3ff0000000000000,1 +np.float64,0x7faba7ed3c374fd9,0x7ff0000000000000,1 +np.float64,0xbfec4265e1f884cc,0x3ff6a7b8602422c9,1 +np.float64,0xaa195e0b5432c,0x3ff0000000000000,1 +np.float64,0x3feac15d317582ba,0x3ff5ed115758145f,1 +np.float64,0x6c13a5bcd8275,0x3ff0000000000000,1 +np.float64,0xbfed20b8883a4171,0x3ff7194dbd0dc988,1 +np.float64,0x800cde65c899bccc,0x3ff0000000000000,1 +np.float64,0x7c72912af8e53,0x3ff0000000000000,1 +np.float64,0x3fe49d2bb4e93a57,0x3ff36fab3aba15d4,1 +np.float64,0xbfd598fa02ab31f4,0x3ff0eb72fc472025,1 +np.float64,0x8007a191712f4324,0x3ff0000000000000,1 +np.float64,0xbfdeb14872bd6290,0x3ff1e01ca83f35fd,1 +np.float64,0xbfe1da46b3e3b48e,0x3ff28e23ad2f5615,1 +np.float64,0x800a2f348e745e69,0x3ff0000000000000,1 +np.float64,0xbfee66928afccd25,0x3ff7c7ac7dbb3273,1 +np.float64,0xffd78a0a2b2f1414,0x7ff0000000000000,1 +np.float64,0x7fc5fa80b82bf500,0x7ff0000000000000,1 +np.float64,0x800e6d7260dcdae5,0x3ff0000000000000,1 +np.float64,0xbfd6cff2aaad9fe6,0x3ff106f78ee61642,1 +np.float64,0x7fe1041d1d220839,0x7ff0000000000000,1 +np.float64,0xbfdf75586cbeeab0,0x3ff1f8dbaa7e57f0,1 +np.float64,0xffdcaae410b955c8,0x7ff0000000000000,1 +np.float64,0x800fe5e0d1ffcbc2,0x3ff0000000000000,1 +np.float64,0x800d7999527af333,0x3ff0000000000000,1 +np.float64,0xbfe62c233bac5846,0x3ff3ff34220a204c,1 +np.float64,0x7fe99bbff8f3377f,0x7ff0000000000000,1 +np.float64,0x7feeaf471d3d5e8d,0x7ff0000000000000,1 +np.float64,0xd5904ff5ab20a,0x3ff0000000000000,1 +np.float64,0x3fd07aae3320f55c,0x3ff08888c227c968,1 +np.float64,0x7fea82b8dff50571,0x7ff0000000000000,1 +np.float64,0xffef2db9057e5b71,0x7ff0000000000000,1 +np.float64,0xbfe2077fef640f00,0x3ff29b7dd0d39d36,1 +np.float64,0xbfe09a4d7c61349b,0x3ff233c7e88881f4,1 +np.float64,0x3fda50c4cbb4a188,0x3ff15f28a71deee7,1 +np.float64,0x7fe7d9ee6b2fb3dc,0x7ff0000000000000,1 +np.float64,0x3febbf6faeb77edf,0x3ff666d13682ea93,1 +np.float64,0xc401a32988035,0x3ff0000000000000,1 +np.float64,0xbfeab30aa8f56615,0x3ff5e65dcc6603f8,1 +np.float64,0x92c8cea32591a,0x3ff0000000000000,1 +np.float64,0xbff0000000000000,0x3ff8b07551d9f550,1 +np.float64,0xbfbddfb4dc3bbf68,0x3ff01bebaec38faa,1 +np.float64,0xbfd8de3e2a31bc7c,0x3ff1391f4830d20b,1 +np.float64,0xffc83a8f8a307520,0x7ff0000000000000,1 +np.float64,0x3fee026ef53c04de,0x3ff7911337085827,1 +np.float64,0x7fbaf380b235e700,0x7ff0000000000000,1 +np.float64,0xffe5b89fa62b713f,0x7ff0000000000000,1 +np.float64,0xbfdc1ff54ab83fea,0x3ff191e8c0b60bb2,1 +np.float64,0x6ae3534cd5c6b,0x3ff0000000000000,1 +np.float64,0xbfea87e558750fcb,0x3ff5d24846013794,1 +np.float64,0xffe0f467bee1e8cf,0x7ff0000000000000,1 +np.float64,0x7fee3b0dc7bc761b,0x7ff0000000000000,1 +np.float64,0x3fed87521afb0ea4,0x3ff74f2f5cd36a5c,1 +np.float64,0x7b3c9882f6794,0x3ff0000000000000,1 +np.float64,0x7fdd1a62243a34c3,0x7ff0000000000000,1 +np.float64,0x800f1dc88d3e3b91,0x3ff0000000000000,1 +np.float64,0x7fc3213cfa264279,0x7ff0000000000000,1 +np.float64,0x3fe40e0f3d681c1e,0x3ff33f135e9d5ded,1 +np.float64,0x7febf14e51f7e29c,0x7ff0000000000000,1 +np.float64,0xffe96c630c72d8c5,0x7ff0000000000000,1 +np.float64,0x7fdd82fbe7bb05f7,0x7ff0000000000000,1 +np.float64,0xbf9a6a0b1034d420,0x3ff0015ce009f7d8,1 +np.float64,0xbfceb4f8153d69f0,0x3ff0766e3ecc77df,1 +np.float64,0x3fd9de31e633bc64,0x3ff15327b794a16e,1 +np.float64,0x3faa902a30352054,0x3ff00583848d1969,1 +np.float64,0x0,0x3ff0000000000000,1 +np.float64,0x3fbe3459c43c68b4,0x3ff01c8af6710ef6,1 +np.float64,0xbfa8df010031be00,0x3ff004d5632dc9f5,1 +np.float64,0x7fbcf6cf2a39ed9d,0x7ff0000000000000,1 +np.float64,0xffe4236202a846c4,0x7ff0000000000000,1 +np.float64,0x3fd35ed52e26bdaa,0x3ff0bd0b231f11f7,1 +np.float64,0x7fe7a2df532f45be,0x7ff0000000000000,1 +np.float64,0xffe32f8315665f06,0x7ff0000000000000,1 +np.float64,0x7fe1a69f03e34d3d,0x7ff0000000000000,1 +np.float64,0x7fa5542b742aa856,0x7ff0000000000000,1 +np.float64,0x3fe84e9f8ef09d3f,0x3ff4d79816359765,1 +np.float64,0x29076fe6520ef,0x3ff0000000000000,1 +np.float64,0xffd70894f7ae112a,0x7ff0000000000000,1 +np.float64,0x800188edcbe311dc,0x3ff0000000000000,1 +np.float64,0x3fe2c7acda258f5a,0x3ff2d5dad4617703,1 +np.float64,0x3f775d41a02ebb00,0x3ff000110f212445,1 +np.float64,0x7fe8a084d1714109,0x7ff0000000000000,1 +np.float64,0x3fe31562d8a62ac6,0x3ff2ee35055741cd,1 +np.float64,0xbfd195d4d1a32baa,0x3ff09b98a50c151b,1 +np.float64,0xffaae9ff0c35d400,0x7ff0000000000000,1 +np.float64,0xff819866502330c0,0x7ff0000000000000,1 +np.float64,0x7fddc64815bb8c8f,0x7ff0000000000000,1 +np.float64,0xbfd442b428288568,0x3ff0cef70aa73ae6,1 +np.float64,0x8002e7625aa5cec5,0x3ff0000000000000,1 +np.float64,0x7fe8d4f70e71a9ed,0x7ff0000000000000,1 +np.float64,0xbfc3bd015f277a04,0x3ff030cbf16f29d9,1 +np.float64,0x3fd315d5baa62bab,0x3ff0b77a551a5335,1 +np.float64,0x7fa638b4642c7168,0x7ff0000000000000,1 +np.float64,0x3fdea8b795bd516f,0x3ff1df0bb70cdb79,1 +np.float64,0xbfd78754762f0ea8,0x3ff117ee0f29abed,1 +np.float64,0x8009f6a37633ed47,0x3ff0000000000000,1 +np.float64,0x3fea1daf75343b5f,0x3ff5a1804789bf13,1 +np.float64,0x3fd044b6c0a0896e,0x3ff0850b7297d02f,1 +np.float64,0x8003547a9c86a8f6,0x3ff0000000000000,1 +np.float64,0x3fa6c2cd782d859b,0x3ff0040c4ac8f44a,1 +np.float64,0x3fe225baaae44b76,0x3ff2a47f5e1f5e85,1 +np.float64,0x8000000000000000,0x3ff0000000000000,1 +np.float64,0x3fcb53da8736a7b8,0x3ff05db45af470ac,1 +np.float64,0x80079f8f140f3f1f,0x3ff0000000000000,1 +np.float64,0xbfcd1d7e2b3a3afc,0x3ff06a6b6845d05f,1 +np.float64,0x96df93672dbf3,0x3ff0000000000000,1 +np.float64,0xdef86e43bdf0e,0x3ff0000000000000,1 +np.float64,0xbfec05a09db80b41,0x3ff6896b768eea08,1 +np.float64,0x7fe3ff91d267ff23,0x7ff0000000000000,1 +np.float64,0xffea3eaa07347d53,0x7ff0000000000000,1 +np.float64,0xbfebde1cc1f7bc3a,0x3ff675e34ac2afc2,1 +np.float64,0x629bcde8c537a,0x3ff0000000000000,1 +np.float64,0xbfdde4fcff3bc9fa,0x3ff1c7061d21f0fe,1 +np.float64,0x3fee60fd003cc1fa,0x3ff7c49af3878a51,1 +np.float64,0x3fe5c92ac32b9256,0x3ff3da7a7929588b,1 +np.float64,0xbfe249c78f64938f,0x3ff2af52a06f1a50,1 +np.float64,0xbfc6de9dbe2dbd3c,0x3ff0418d284ee29f,1 +np.float64,0xffc8ef094631de14,0x7ff0000000000000,1 +np.float64,0x3fdef05f423de0bf,0x3ff1e800caba8ab5,1 +np.float64,0xffc1090731221210,0x7ff0000000000000,1 +np.float64,0xbfedec9b5fbbd937,0x3ff7854b6792a24a,1 +np.float64,0xbfb873507630e6a0,0x3ff012b23b3b7a67,1 +np.float64,0xbfe3cd6692679acd,0x3ff3299d6936ec4b,1 +np.float64,0xbfb107c890220f90,0x3ff0091122162472,1 +np.float64,0xbfe4e6ee48e9cddc,0x3ff3894e5a5e70a6,1 +np.float64,0xffe6fa3413edf468,0x7ff0000000000000,1 +np.float64,0x3fe2faf79b65f5ef,0x3ff2e5e11fae8b54,1 +np.float64,0xbfdfeb8df9bfd71c,0x3ff208189691b15f,1 +np.float64,0x75d2d03ceba5b,0x3ff0000000000000,1 +np.float64,0x3feb48c182b69183,0x3ff62d4462eba6cb,1 +np.float64,0xffcda9f7ff3b53f0,0x7ff0000000000000,1 +np.float64,0x7fcafbdcbd35f7b8,0x7ff0000000000000,1 +np.float64,0xbfd1895523a312aa,0x3ff09aba642a78d9,1 +np.float64,0x3fe3129c3f662538,0x3ff2ed546bbfafcf,1 +np.float64,0x3fb444dee02889be,0x3ff00cd86273b964,1 +np.float64,0xbf73b32d7ee77,0x3ff0000000000000,1 +np.float64,0x3fae19904c3c3321,0x3ff00714865c498a,1 +np.float64,0x7fefbfaef5bf7f5d,0x7ff0000000000000,1 +np.float64,0x8000dc3816e1b871,0x3ff0000000000000,1 +np.float64,0x8003f957ba47f2b0,0x3ff0000000000000,1 +np.float64,0xbfe3563c7ea6ac79,0x3ff302dcebc92856,1 +np.float64,0xbfdc80fbae3901f8,0x3ff19cfe73e58092,1 +np.float64,0x8009223b04524476,0x3ff0000000000000,1 +np.float64,0x3fd95f431c32be86,0x3ff1461c21cb03f0,1 +np.float64,0x7ff4000000000000,0x7ffc000000000000,1 +np.float64,0xbfe7c12ed3ef825e,0x3ff49d59c265efcd,1 +np.float64,0x10000000000000,0x3ff0000000000000,1 +np.float64,0x7fc5e2632f2bc4c5,0x7ff0000000000000,1 +np.float64,0xffd8f6b4c7b1ed6a,0x7ff0000000000000,1 +np.float64,0x80034b93d4069728,0x3ff0000000000000,1 +np.float64,0xffdf5d4c1dbeba98,0x7ff0000000000000,1 +np.float64,0x800bc63d70178c7b,0x3ff0000000000000,1 +np.float64,0xbfeba31ea0f7463d,0x3ff658fa27073d2b,1 +np.float64,0xbfeebeede97d7ddc,0x3ff7f89a8e80dec4,1 +np.float64,0x7feb0f1f91361e3e,0x7ff0000000000000,1 +np.float64,0xffec3158d0b862b1,0x7ff0000000000000,1 +np.float64,0x3fde51cbfbbca398,0x3ff1d44c2ff15b3d,1 +np.float64,0xd58fb2b3ab1f7,0x3ff0000000000000,1 +np.float64,0x80028b9e32e5173d,0x3ff0000000000000,1 +np.float64,0x7fea77a56c74ef4a,0x7ff0000000000000,1 +np.float64,0x3fdaabbd4a35577b,0x3ff168d82edf2fe0,1 +np.float64,0xbfe69c39cc2d3874,0x3ff429b2f4cdb362,1 +np.float64,0x3b78f5d876f20,0x3ff0000000000000,1 +np.float64,0x7fa47d116428fa22,0x7ff0000000000000,1 +np.float64,0xbfe4118b0ce82316,0x3ff3403d989f780f,1 +np.float64,0x800482e793c905d0,0x3ff0000000000000,1 +np.float64,0xbfe48e5728e91cae,0x3ff36a9020bf9d20,1 +np.float64,0x7fe078ba8860f174,0x7ff0000000000000,1 +np.float64,0x3fd80843e5b01088,0x3ff1242f401e67da,1 +np.float64,0x3feb1f6965f63ed3,0x3ff6197fc590e143,1 +np.float64,0xffa41946d8283290,0x7ff0000000000000,1 +np.float64,0xffe30de129661bc2,0x7ff0000000000000,1 +np.float64,0x3fec9c8e1ab9391c,0x3ff6d542ea2f49b4,1 +np.float64,0x3fdc3e4490387c89,0x3ff1955ae18cac37,1 +np.float64,0xffef49d9c77e93b3,0x7ff0000000000000,1 +np.float64,0xfff0000000000000,0x7ff0000000000000,1 +np.float64,0x3fe0442455608849,0x3ff21cab90067d5c,1 +np.float64,0xbfed86aebd3b0d5e,0x3ff74ed8d4b75f50,1 +np.float64,0xffe4600d2b28c01a,0x7ff0000000000000,1 +np.float64,0x7fc1e8ccff23d199,0x7ff0000000000000,1 +np.float64,0x8008d49b0091a936,0x3ff0000000000000,1 +np.float64,0xbfe4139df028273c,0x3ff340ef3c86227c,1 +np.float64,0xbfe9ab4542b3568a,0x3ff56dfe32061247,1 +np.float64,0xbfd76dd365aedba6,0x3ff11589bab5fe71,1 +np.float64,0x3fd42cf829a859f0,0x3ff0cd3844bb0e11,1 +np.float64,0x7fd077cf2e20ef9d,0x7ff0000000000000,1 +np.float64,0x3fd7505760aea0b0,0x3ff112c937b3f088,1 +np.float64,0x1f93341a3f267,0x3ff0000000000000,1 +np.float64,0x7fe3c3c1b0678782,0x7ff0000000000000,1 +np.float64,0x800f85cec97f0b9e,0x3ff0000000000000,1 +np.float64,0xd93ab121b2756,0x3ff0000000000000,1 +np.float64,0xbfef8066fd7f00ce,0x3ff8663ed7d15189,1 +np.float64,0xffe31dd4af663ba9,0x7ff0000000000000,1 +np.float64,0xbfd7ff05a6affe0c,0x3ff1234c09bb686d,1 +np.float64,0xbfe718c31fee3186,0x3ff45a0c2d0ef7b0,1 +np.float64,0x800484bf33e9097f,0x3ff0000000000000,1 +np.float64,0xffd409dad02813b6,0x7ff0000000000000,1 +np.float64,0x3fe59679896b2cf4,0x3ff3c7f49e4fbbd3,1 +np.float64,0xbfd830c54d30618a,0x3ff1281729861390,1 +np.float64,0x1d4fc81c3a9fa,0x3ff0000000000000,1 +np.float64,0x3fd334e4272669c8,0x3ff0b9d5d82894f0,1 +np.float64,0xffc827e65c304fcc,0x7ff0000000000000,1 +np.float64,0xffe2d1814aa5a302,0x7ff0000000000000,1 +np.float64,0xffd7b5b8d32f6b72,0x7ff0000000000000,1 +np.float64,0xbfdbc9f077b793e0,0x3ff18836b9106ad0,1 +np.float64,0x7fc724c2082e4983,0x7ff0000000000000,1 +np.float64,0x3fa39ed72c273da0,0x3ff00302051ce17e,1 +np.float64,0xbfe3c4c209678984,0x3ff326c4fd16b5cd,1 +np.float64,0x7fe91f6d00f23ed9,0x7ff0000000000000,1 +np.float64,0x8004ee93fea9dd29,0x3ff0000000000000,1 +np.float64,0xbfe7c32d0eaf865a,0x3ff49e290ed2ca0e,1 +np.float64,0x800ea996b29d532d,0x3ff0000000000000,1 +np.float64,0x2df9ec1c5bf3e,0x3ff0000000000000,1 +np.float64,0xabb175df5762f,0x3ff0000000000000,1 +np.float64,0xffe3fc9c8e27f938,0x7ff0000000000000,1 +np.float64,0x7fb358a62826b14b,0x7ff0000000000000,1 +np.float64,0x800aedcccaf5db9a,0x3ff0000000000000,1 +np.float64,0xffca530c5234a618,0x7ff0000000000000,1 +np.float64,0x40f91e9681f24,0x3ff0000000000000,1 +np.float64,0x80098f4572f31e8b,0x3ff0000000000000,1 +np.float64,0xbfdc58c21fb8b184,0x3ff1986115f8fe92,1 +np.float64,0xbfebeafd40b7d5fa,0x3ff67c3cf34036e3,1 +np.float64,0x7fd108861a22110b,0x7ff0000000000000,1 +np.float64,0xff8e499ae03c9340,0x7ff0000000000000,1 +np.float64,0xbfd2f58caa25eb1a,0x3ff0b50b1bffafdf,1 +np.float64,0x3fa040c9bc208193,0x3ff002105e95aefa,1 +np.float64,0xbfd2ebc0a5a5d782,0x3ff0b44ed5a11584,1 +np.float64,0xffe237bc93a46f78,0x7ff0000000000000,1 +np.float64,0x3fd557c5eeaaaf8c,0x3ff0e5e0a575e1ba,1 +np.float64,0x7abb419ef5769,0x3ff0000000000000,1 +np.float64,0xffefa1fe353f43fb,0x7ff0000000000000,1 +np.float64,0x3fa6f80ba02df017,0x3ff0041f51fa0d76,1 +np.float64,0xbfdce79488b9cf2a,0x3ff1a8e32877beb4,1 +np.float64,0x2285f3e4450bf,0x3ff0000000000000,1 +np.float64,0x3bf7eb7277efe,0x3ff0000000000000,1 +np.float64,0xbfd5925fd3ab24c0,0x3ff0eae1c2ac2e78,1 +np.float64,0xbfed6325227ac64a,0x3ff73c14a2ad5bfe,1 +np.float64,0x8000429c02408539,0x3ff0000000000000,1 +np.float64,0xb67c21e76cf84,0x3ff0000000000000,1 +np.float64,0x3fec3d3462f87a69,0x3ff6a51e4c027eb7,1 +np.float64,0x3feae69cbcf5cd3a,0x3ff5fe9387314afd,1 +np.float64,0x7fd0c9a0ec219341,0x7ff0000000000000,1 +np.float64,0x8004adb7f6295b71,0x3ff0000000000000,1 +np.float64,0xffd61fe8bb2c3fd2,0x7ff0000000000000,1 +np.float64,0xffe7fb3834aff670,0x7ff0000000000000,1 +np.float64,0x7fd1eef163a3dde2,0x7ff0000000000000,1 +np.float64,0x2e84547a5d08b,0x3ff0000000000000,1 +np.float64,0x8002d8875ee5b10f,0x3ff0000000000000,1 +np.float64,0x3fe1d1c5f763a38c,0x3ff28ba524fb6de8,1 +np.float64,0x8001dea0bc43bd42,0x3ff0000000000000,1 +np.float64,0xfecfad91fd9f6,0x3ff0000000000000,1 +np.float64,0xffed7965fa3af2cb,0x7ff0000000000000,1 +np.float64,0xbfe6102ccc2c205a,0x3ff3f4c082506686,1 +np.float64,0x3feff75b777feeb6,0x3ff8ab6222578e0c,1 +np.float64,0x3fb8a97bd43152f8,0x3ff013057f0a9d89,1 +np.float64,0xffe234b5e964696c,0x7ff0000000000000,1 +np.float64,0x984d9137309b2,0x3ff0000000000000,1 +np.float64,0xbfe42e9230e85d24,0x3ff349fb7d1a7560,1 +np.float64,0xbfecc8b249f99165,0x3ff6ebd0fea0ea72,1 +np.float64,0x8000840910410813,0x3ff0000000000000,1 +np.float64,0xbfd81db9e7303b74,0x3ff126402d3539ec,1 +np.float64,0x800548eb7fea91d8,0x3ff0000000000000,1 +np.float64,0xbfe4679ad0e8cf36,0x3ff35d4db89296a3,1 +np.float64,0x3fd4c55b5a298ab7,0x3ff0d99da31081f9,1 +np.float64,0xbfa8f5b38c31eb60,0x3ff004de3a23b32d,1 +np.float64,0x80005d348e80ba6a,0x3ff0000000000000,1 +np.float64,0x800c348d6118691b,0x3ff0000000000000,1 +np.float64,0xffd6b88f84ad7120,0x7ff0000000000000,1 +np.float64,0x3fc1aaaa82235555,0x3ff027136afd08e0,1 +np.float64,0x7fca7d081b34fa0f,0x7ff0000000000000,1 +np.float64,0x1,0x3ff0000000000000,1 +np.float64,0xbfdc810d1139021a,0x3ff19d007408cfe3,1 +np.float64,0xbfe5dce05f2bb9c0,0x3ff3e1bb9234617b,1 +np.float64,0xffecfe2c32b9fc58,0x7ff0000000000000,1 +np.float64,0x95b2891b2b651,0x3ff0000000000000,1 +np.float64,0x8000b60c6c616c1a,0x3ff0000000000000,1 +np.float64,0x4944f0889289f,0x3ff0000000000000,1 +np.float64,0x3fe6e508696dca10,0x3ff445d1b94863e9,1 +np.float64,0xbfe63355d0ec66ac,0x3ff401e74f16d16f,1 +np.float64,0xbfe9b9595af372b3,0x3ff57445e1b4d670,1 +np.float64,0x800e16f7313c2dee,0x3ff0000000000000,1 +np.float64,0xffe898f5f0b131eb,0x7ff0000000000000,1 +np.float64,0x3fe91ac651f2358d,0x3ff52e787c21c004,1 +np.float64,0x7fbfaac6783f558c,0x7ff0000000000000,1 +np.float64,0xd8ef3dfbb1de8,0x3ff0000000000000,1 +np.float64,0xbfc58c13a52b1828,0x3ff03a2c19d65019,1 +np.float64,0xbfbde55e8a3bcac0,0x3ff01bf648a3e0a7,1 +np.float64,0xffc3034930260694,0x7ff0000000000000,1 +np.float64,0xea77a64dd4ef5,0x3ff0000000000000,1 +np.float64,0x800cfe7e7739fcfd,0x3ff0000000000000,1 +np.float64,0x4960f31a92c1f,0x3ff0000000000000,1 +np.float64,0x3fd9552c94b2aa58,0x3ff14515a29add09,1 +np.float64,0xffe8b3244c316648,0x7ff0000000000000,1 +np.float64,0x3fe8201e6a70403d,0x3ff4c444fa679cce,1 +np.float64,0xffe9ab7c20f356f8,0x7ff0000000000000,1 +np.float64,0x3fed8bba5f7b1774,0x3ff751853c4c95c5,1 +np.float64,0x8007639cb76ec73a,0x3ff0000000000000,1 +np.float64,0xbfe396db89672db7,0x3ff317bfd1d6fa8c,1 +np.float64,0xbfeb42f888f685f1,0x3ff62a7e0eee56b1,1 +np.float64,0x3fe894827c712904,0x3ff4f4f561d9ea13,1 +np.float64,0xb66b3caf6cd68,0x3ff0000000000000,1 +np.float64,0x800f8907fdbf1210,0x3ff0000000000000,1 +np.float64,0x7fe9b0cddb73619b,0x7ff0000000000000,1 +np.float64,0xbfda70c0e634e182,0x3ff1628c6fdffc53,1 +np.float64,0x3fe0b5f534a16bea,0x3ff23b4ed4c2b48e,1 +np.float64,0xbfe8eee93671ddd2,0x3ff51b85b3c50ae4,1 +np.float64,0xbfe8c22627f1844c,0x3ff50858787a3bfe,1 +np.float64,0x37bb83c86f771,0x3ff0000000000000,1 +np.float64,0xffb7827ffe2f0500,0x7ff0000000000000,1 +np.float64,0x64317940c864,0x3ff0000000000000,1 +np.float64,0x800430ecee6861db,0x3ff0000000000000,1 +np.float64,0x3fa4291fbc285240,0x3ff0032d0204f6dd,1 +np.float64,0xffec69f76af8d3ee,0x7ff0000000000000,1 +np.float64,0x3ff0000000000000,0x3ff8b07551d9f550,1 +np.float64,0x3fc4cf3c42299e79,0x3ff0363fb1d3c254,1 +np.float64,0x7fe0223a77e04474,0x7ff0000000000000,1 +np.float64,0x800a3d4fa4347aa0,0x3ff0000000000000,1 +np.float64,0x3fdd273f94ba4e7f,0x3ff1b05b686e6879,1 +np.float64,0x3feca79052f94f20,0x3ff6dadedfa283aa,1 +np.float64,0x5e7f6f80bcfef,0x3ff0000000000000,1 +np.float64,0xbfef035892fe06b1,0x3ff81efb39cbeba2,1 +np.float64,0x3fee6c08e07cd812,0x3ff7caad952860a1,1 +np.float64,0xffeda715877b4e2a,0x7ff0000000000000,1 +np.float64,0x800580286b0b0052,0x3ff0000000000000,1 +np.float64,0x800703a73fee074f,0x3ff0000000000000,1 +np.float64,0xbfccf96a6639f2d4,0x3ff0696330a60832,1 +np.float64,0x7feb408442368108,0x7ff0000000000000,1 +np.float64,0x3fedc87a46fb90f5,0x3ff771e3635649a9,1 +np.float64,0x3fd8297b773052f7,0x3ff12762bc0cea76,1 +np.float64,0x3fee41bb03fc8376,0x3ff7b37b2da48ab4,1 +np.float64,0xbfe2b05a226560b4,0x3ff2cea17ae7c528,1 +np.float64,0xbfd2e92cf2a5d25a,0x3ff0b41d605ced61,1 +np.float64,0x4817f03a902ff,0x3ff0000000000000,1 +np.float64,0x8c9d4f0d193aa,0x3ff0000000000000,1 diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/data/umath-validation-set-exp.csv b/.venv/lib/python3.12/site-packages/numpy/_core/tests/data/umath-validation-set-exp.csv new file mode 100644 index 0000000..7c5ef3b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/tests/data/umath-validation-set-exp.csv @@ -0,0 +1,412 @@ +dtype,input,output,ulperrortol +## +ve denormals ## +np.float32,0x004b4716,0x3f800000,3 +np.float32,0x007b2490,0x3f800000,3 +np.float32,0x007c99fa,0x3f800000,3 +np.float32,0x00734a0c,0x3f800000,3 +np.float32,0x0070de24,0x3f800000,3 +np.float32,0x00495d65,0x3f800000,3 +np.float32,0x006894f6,0x3f800000,3 +np.float32,0x00555a76,0x3f800000,3 +np.float32,0x004e1fb8,0x3f800000,3 +np.float32,0x00687de9,0x3f800000,3 +## -ve denormals ## +np.float32,0x805b59af,0x3f800000,3 +np.float32,0x807ed8ed,0x3f800000,3 +np.float32,0x807142ad,0x3f800000,3 +np.float32,0x80772002,0x3f800000,3 +np.float32,0x8062abcb,0x3f800000,3 +np.float32,0x8045e31c,0x3f800000,3 +np.float32,0x805f01c2,0x3f800000,3 +np.float32,0x80506432,0x3f800000,3 +np.float32,0x8060089d,0x3f800000,3 +np.float32,0x8071292f,0x3f800000,3 +## floats that output a denormal ## +np.float32,0xc2cf3fc1,0x00000001,3 +np.float32,0xc2c79726,0x00000021,3 +np.float32,0xc2cb295d,0x00000005,3 +np.float32,0xc2b49e6b,0x00068c4c,3 +np.float32,0xc2ca8116,0x00000008,3 +np.float32,0xc2c23f82,0x000001d7,3 +np.float32,0xc2cb69c0,0x00000005,3 +np.float32,0xc2cc1f4d,0x00000003,3 +np.float32,0xc2ae094e,0x00affc4c,3 +np.float32,0xc2c86c44,0x00000015,3 +## random floats between -87.0f and 88.0f ## +np.float32,0x4030d7e0,0x417d9a05,3 +np.float32,0x426f60e8,0x6aa1be2c,3 +np.float32,0x41a1b220,0x4e0efc11,3 +np.float32,0xc20cc722,0x26159da7,3 +np.float32,0x41c492bc,0x512ec79d,3 +np.float32,0x40980210,0x42e73a0e,3 +np.float32,0xbf1f7b80,0x3f094de3,3 +np.float32,0x42a678a4,0x7b87a383,3 +np.float32,0xc20f3cfd,0x25a1c304,3 +np.float32,0x423ff34c,0x6216467f,3 +np.float32,0x00000000,0x3f800000,3 +## floats that cause an overflow ## +np.float32,0x7f06d8c1,0x7f800000,3 +np.float32,0x7f451912,0x7f800000,3 +np.float32,0x7ecceac3,0x7f800000,3 +np.float32,0x7f643b45,0x7f800000,3 +np.float32,0x7e910ea0,0x7f800000,3 +np.float32,0x7eb4756b,0x7f800000,3 +np.float32,0x7f4ec708,0x7f800000,3 +np.float32,0x7f6b4551,0x7f800000,3 +np.float32,0x7d8edbda,0x7f800000,3 +np.float32,0x7f730718,0x7f800000,3 +np.float32,0x42b17217,0x7f7fff84,3 +np.float32,0x42b17218,0x7f800000,3 +np.float32,0x42b17219,0x7f800000,3 +np.float32,0xfef2b0bc,0x00000000,3 +np.float32,0xff69f83e,0x00000000,3 +np.float32,0xff4ecb12,0x00000000,3 +np.float32,0xfeac6d86,0x00000000,3 +np.float32,0xfde0cdb8,0x00000000,3 +np.float32,0xff26aef4,0x00000000,3 +np.float32,0xff6f9277,0x00000000,3 +np.float32,0xff7adfc4,0x00000000,3 +np.float32,0xff0ad40e,0x00000000,3 +np.float32,0xff6fd8f3,0x00000000,3 +np.float32,0xc2cff1b4,0x00000001,3 +np.float32,0xc2cff1b5,0x00000000,3 +np.float32,0xc2cff1b6,0x00000000,3 +np.float32,0x7f800000,0x7f800000,3 +np.float32,0xff800000,0x00000000,3 +np.float32,0x4292f27c,0x7480000a,3 +np.float32,0x42a920be,0x7c7fff94,3 +np.float32,0x41c214c9,0x50ffffd9,3 +np.float32,0x41abe686,0x4effffd9,3 +np.float32,0x4287db5a,0x707fffd3,3 +np.float32,0x41902cbb,0x4c800078,3 +np.float32,0x42609466,0x67ffffeb,3 +np.float32,0x41a65af5,0x4e7fffd1,3 +np.float32,0x417f13ff,0x4affffc9,3 +np.float32,0x426d0e6c,0x6a3504f2,3 +np.float32,0x41bc8934,0x507fff51,3 +np.float32,0x42a7bdde,0x7c0000d6,3 +np.float32,0x4120cf66,0x46b504f6,3 +np.float32,0x4244da8f,0x62ffff1a,3 +np.float32,0x41a0cf69,0x4e000034,3 +np.float32,0x41cd2bec,0x52000005,3 +np.float32,0x42893e41,0x7100009e,3 +np.float32,0x41b437e1,0x4fb50502,3 +np.float32,0x41d8430f,0x5300001d,3 +np.float32,0x4244da92,0x62ffffda,3 +np.float32,0x41a0cf63,0x4dffffa9,3 +np.float32,0x3eb17218,0x3fb504f3,3 +np.float32,0x428729e8,0x703504dc,3 +np.float32,0x41a0cf67,0x4e000014,3 +np.float32,0x4252b77d,0x65800011,3 +np.float32,0x41902cb9,0x4c800058,3 +np.float32,0x42a0cf67,0x79800052,3 +np.float32,0x4152b77b,0x48ffffe9,3 +np.float32,0x41265af3,0x46ffffc8,3 +np.float32,0x42187e0b,0x5affff9a,3 +np.float32,0xc0d2b77c,0x3ab504f6,3 +np.float32,0xc283b2ac,0x10000072,3 +np.float32,0xc1cff1b4,0x2cb504f5,3 +np.float32,0xc05dce9e,0x3d000000,3 +np.float32,0xc28ec9d2,0x0bfffea5,3 +np.float32,0xc23c893a,0x1d7fffde,3 +np.float32,0xc2a920c0,0x027fff6c,3 +np.float32,0xc1f9886f,0x2900002b,3 +np.float32,0xc2c42920,0x000000b5,3 +np.float32,0xc2893e41,0x0dfffec5,3 +np.float32,0xc2c4da93,0x00000080,3 +np.float32,0xc17f1401,0x3400000c,3 +np.float32,0xc1902cb6,0x327fffaf,3 +np.float32,0xc27c4e3b,0x11ffffc5,3 +np.float32,0xc268e5c5,0x157ffe9d,3 +np.float32,0xc2b4e953,0x0005a826,3 +np.float32,0xc287db5a,0x0e800016,3 +np.float32,0xc207db5a,0x2700000b,3 +np.float32,0xc2b2d4fe,0x000ffff1,3 +np.float32,0xc268e5c0,0x157fffdd,3 +np.float32,0xc22920bd,0x2100003b,3 +np.float32,0xc2902caf,0x0b80011e,3 +np.float32,0xc1902cba,0x327fff2f,3 +np.float32,0xc2ca6625,0x00000008,3 +np.float32,0xc280ece8,0x10fffeb5,3 +np.float32,0xc2918f94,0x0b0000ea,3 +np.float32,0xc29b43d5,0x077ffffc,3 +np.float32,0xc1e61ff7,0x2ab504f5,3 +np.float32,0xc2867878,0x0effff15,3 +np.float32,0xc2a2324a,0x04fffff4,3 +#float64 +## near zero ## +np.float64,0x8000000000000000,0x3ff0000000000000,1 +np.float64,0x8010000000000000,0x3ff0000000000000,1 +np.float64,0x8000000000000001,0x3ff0000000000000,1 +np.float64,0x8360000000000000,0x3ff0000000000000,1 +np.float64,0x9a70000000000000,0x3ff0000000000000,1 +np.float64,0xb9b0000000000000,0x3ff0000000000000,1 +np.float64,0xb810000000000000,0x3ff0000000000000,1 +np.float64,0xbc30000000000000,0x3ff0000000000000,1 +np.float64,0xb6a0000000000000,0x3ff0000000000000,1 +np.float64,0x0000000000000000,0x3ff0000000000000,1 +np.float64,0x0010000000000000,0x3ff0000000000000,1 +np.float64,0x0000000000000001,0x3ff0000000000000,1 +np.float64,0x0360000000000000,0x3ff0000000000000,1 +np.float64,0x1a70000000000000,0x3ff0000000000000,1 +np.float64,0x3c30000000000000,0x3ff0000000000000,1 +np.float64,0x36a0000000000000,0x3ff0000000000000,1 +np.float64,0x39b0000000000000,0x3ff0000000000000,1 +np.float64,0x3810000000000000,0x3ff0000000000000,1 +## underflow ## +np.float64,0xc0c6276800000000,0x0000000000000000,1 +np.float64,0xc0c62d918ce2421d,0x0000000000000000,1 +np.float64,0xc0c62d918ce2421e,0x0000000000000000,1 +np.float64,0xc0c62d91a0000000,0x0000000000000000,1 +np.float64,0xc0c62d9180000000,0x0000000000000000,1 +np.float64,0xc0c62dea45ee3e06,0x0000000000000000,1 +np.float64,0xc0c62dea45ee3e07,0x0000000000000000,1 +np.float64,0xc0c62dea40000000,0x0000000000000000,1 +np.float64,0xc0c62dea60000000,0x0000000000000000,1 +np.float64,0xc0875f1120000000,0x0000000000000000,1 +np.float64,0xc0875f113c30b1c8,0x0000000000000000,1 +np.float64,0xc0875f1140000000,0x0000000000000000,1 +np.float64,0xc093480000000000,0x0000000000000000,1 +np.float64,0xffefffffffffffff,0x0000000000000000,1 +np.float64,0xc7efffffe0000000,0x0000000000000000,1 +## overflow ## +np.float64,0x40862e52fefa39ef,0x7ff0000000000000,1 +np.float64,0x40872e42fefa39ef,0x7ff0000000000000,1 +## +/- INF, +/- NAN ## +np.float64,0x7ff0000000000000,0x7ff0000000000000,1 +np.float64,0xfff0000000000000,0x0000000000000000,1 +np.float64,0x7ff8000000000000,0x7ff8000000000000,1 +np.float64,0xfff8000000000000,0xfff8000000000000,1 +## output denormal ## +np.float64,0xc087438520000000,0x0000000000000001,1 +np.float64,0xc08743853f2f4461,0x0000000000000001,1 +np.float64,0xc08743853f2f4460,0x0000000000000001,1 +np.float64,0xc087438540000000,0x0000000000000001,1 +## between -745.13321910 and 709.78271289 ## +np.float64,0xbff760cd14774bd9,0x3fcdb14ced00ceb6,1 +np.float64,0xbff760cd20000000,0x3fcdb14cd7993879,1 +np.float64,0xbff760cd00000000,0x3fcdb14d12fbd264,1 +np.float64,0xc07f1cf360000000,0x130c1b369af14fda,1 +np.float64,0xbeb0000000000000,0x3feffffe00001000,1 +np.float64,0xbd70000000000000,0x3fefffffffffe000,1 +np.float64,0xc084fd46e5c84952,0x0360000000000139,1 +np.float64,0xc084fd46e5c84953,0x035ffffffffffe71,1 +np.float64,0xc084fd46e0000000,0x0360000b9096d32c,1 +np.float64,0xc084fd4700000000,0x035fff9721d12104,1 +np.float64,0xc086232bc0000000,0x0010003af5e64635,1 +np.float64,0xc086232bdd7abcd2,0x001000000000007c,1 +np.float64,0xc086232bdd7abcd3,0x000ffffffffffe7c,1 +np.float64,0xc086232be0000000,0x000ffffaf57a6fc9,1 +np.float64,0xc086233920000000,0x000fe590e3b45eb0,1 +np.float64,0xc086233938000000,0x000fe56133493c57,1 +np.float64,0xc086233940000000,0x000fe5514deffbbc,1 +np.float64,0xc086234c98000000,0x000fbf1024c32ccb,1 +np.float64,0xc086234ca0000000,0x000fbf0065bae78d,1 +np.float64,0xc086234c80000000,0x000fbf3f623a7724,1 +np.float64,0xc086234ec0000000,0x000fbad237c846f9,1 +np.float64,0xc086234ec8000000,0x000fbac27cfdec97,1 +np.float64,0xc086234ee0000000,0x000fba934cfd3dc2,1 +np.float64,0xc086234ef0000000,0x000fba73d7f618d9,1 +np.float64,0xc086234f00000000,0x000fba54632dddc0,1 +np.float64,0xc0862356e0000000,0x000faae0945b761a,1 +np.float64,0xc0862356f0000000,0x000faac13eb9a310,1 +np.float64,0xc086235700000000,0x000faaa1e9567b0a,1 +np.float64,0xc086236020000000,0x000f98cd75c11ed7,1 +np.float64,0xc086236ca0000000,0x000f8081b4d93f89,1 +np.float64,0xc086236cb0000000,0x000f8062b3f4d6c5,1 +np.float64,0xc086236cc0000000,0x000f8043b34e6f8c,1 +np.float64,0xc086238d98000000,0x000f41220d9b0d2c,1 +np.float64,0xc086238da0000000,0x000f4112cc80a01f,1 +np.float64,0xc086238d80000000,0x000f414fd145db5b,1 +np.float64,0xc08624fd00000000,0x000cbfce8ea1e6c4,1 +np.float64,0xc086256080000000,0x000c250747fcd46e,1 +np.float64,0xc08626c480000000,0x000a34f4bd975193,1 +np.float64,0xbf50000000000000,0x3feff800ffeaac00,1 +np.float64,0xbe10000000000000,0x3fefffffff800000,1 +np.float64,0xbcd0000000000000,0x3feffffffffffff8,1 +np.float64,0xc055d589e0000000,0x38100004bf94f63e,1 +np.float64,0xc055d58a00000000,0x380ffff97f292ce8,1 +np.float64,0xbfd962d900000000,0x3fe585a4b00110e1,1 +np.float64,0x3ff4bed280000000,0x400d411e7a58a303,1 +np.float64,0x3fff0b3620000000,0x401bd7737ffffcf3,1 +np.float64,0x3ff0000000000000,0x4005bf0a8b145769,1 +np.float64,0x3eb0000000000000,0x3ff0000100000800,1 +np.float64,0x3d70000000000000,0x3ff0000000001000,1 +np.float64,0x40862e42e0000000,0x7fefff841808287f,1 +np.float64,0x40862e42fefa39ef,0x7fefffffffffff2a,1 +np.float64,0x40862e0000000000,0x7feef85a11e73f2d,1 +np.float64,0x4000000000000000,0x401d8e64b8d4ddae,1 +np.float64,0x4009242920000000,0x40372a52c383a488,1 +np.float64,0x4049000000000000,0x44719103e4080b45,1 +np.float64,0x4008000000000000,0x403415e5bf6fb106,1 +np.float64,0x3f50000000000000,0x3ff00400800aab55,1 +np.float64,0x3e10000000000000,0x3ff0000000400000,1 +np.float64,0x3cd0000000000000,0x3ff0000000000004,1 +np.float64,0x40562e40a0000000,0x47effed088821c3f,1 +np.float64,0x40562e42e0000000,0x47effff082e6c7ff,1 +np.float64,0x40562e4300000000,0x47f00000417184b8,1 +np.float64,0x3fe8000000000000,0x4000ef9db467dcf8,1 +np.float64,0x402b12e8d4f33589,0x412718f68c71a6fe,1 +np.float64,0x402b12e8d4f3358a,0x412718f68c71a70a,1 +np.float64,0x402b12e8c0000000,0x412718f59a7f472e,1 +np.float64,0x402b12e8e0000000,0x412718f70c0eac62,1 +##use 1th entry +np.float64,0x40631659AE147CB4,0x4db3a95025a4890f,1 +np.float64,0xC061B87D2E85A4E2,0x332640c8e2de2c51,1 +np.float64,0x405A4A50BE243AF4,0x496a45e4b7f0339a,1 +np.float64,0xC0839898B98EC5C6,0x0764027828830df4,1 +#use 2th entry +np.float64,0xC072428C44B6537C,0x2596ade838b96f3e,1 +np.float64,0xC053057C5E1AE9BF,0x3912c8fad18fdadf,1 +np.float64,0x407E89C78328BAA3,0x6bfe35d5b9a1a194,1 +np.float64,0x4083501B6DD87112,0x77a855503a38924e,1 +#use 3th entry +np.float64,0x40832C6195F24540,0x7741e73c80e5eb2f,1 +np.float64,0xC083D4CD557C2EC9,0x06b61727c2d2508e,1 +np.float64,0x400C48F5F67C99BD,0x404128820f02b92e,1 +np.float64,0x4056E36D9B2DF26A,0x4830f52ff34a8242,1 +#use 4th entry +np.float64,0x4080FF700D8CBD06,0x70fa70df9bc30f20,1 +np.float64,0x406C276D39E53328,0x543eb8e20a8f4741,1 +np.float64,0xC070D6159BBD8716,0x27a4a0548c904a75,1 +np.float64,0xC052EBCF8ED61F83,0x391c0e92368d15e4,1 +#use 5th entry +np.float64,0xC061F892A8AC5FBE,0x32f807a89efd3869,1 +np.float64,0x4021D885D2DBA085,0x40bd4dc86d3e3270,1 +np.float64,0x40767AEEEE7D4FCF,0x605e22851ee2afb7,1 +np.float64,0xC0757C5D75D08C80,0x20f0751599b992a2,1 +#use 6th entry +np.float64,0x405ACF7A284C4CE3,0x499a4e0b7a27027c,1 +np.float64,0xC085A6C9E80D7AF5,0x0175914009d62ec2,1 +np.float64,0xC07E4C02F86F1DAE,0x1439269b29a9231e,1 +np.float64,0x4080D80F9691CC87,0x7088a6cdafb041de,1 +#use 7th entry +np.float64,0x407FDFD84FBA0AC1,0x6deb1ae6f9bc4767,1 +np.float64,0x40630C06A1A2213D,0x4dac7a9d51a838b7,1 +np.float64,0x40685FDB30BB8B4F,0x5183f5cc2cac9e79,1 +np.float64,0x408045A2208F77F4,0x6ee299e08e2aa2f0,1 +#use 8th entry +np.float64,0xC08104E391F5078B,0x0ed397b7cbfbd230,1 +np.float64,0xC031501CAEFAE395,0x3e6040fd1ea35085,1 +np.float64,0xC079229124F6247C,0x1babf4f923306b1e,1 +np.float64,0x407FB65F44600435,0x6db03beaf2512b8a,1 +#use 9th entry +np.float64,0xC07EDEE8E8E8A5AC,0x136536cec9cbef48,1 +np.float64,0x4072BB4086099A14,0x5af4d3c3008b56cc,1 +np.float64,0x4050442A2EC42CB4,0x45cd393bd8fad357,1 +np.float64,0xC06AC28FB3D419B4,0x2ca1b9d3437df85f,1 +#use 10th entry +np.float64,0x40567FC6F0A68076,0x480c977fd5f3122e,1 +np.float64,0x40620A2F7EDA59BB,0x4cf278e96f4ce4d7,1 +np.float64,0xC085044707CD557C,0x034aad6c968a045a,1 +np.float64,0xC07374EA5AC516AA,0x23dd6afdc03e83d5,1 +#use 11th entry +np.float64,0x4073CC95332619C1,0x5c804b1498bbaa54,1 +np.float64,0xC0799FEBBE257F31,0x1af6a954c43b87d2,1 +np.float64,0x408159F19EA424F6,0x7200858efcbfc84d,1 +np.float64,0x404A81F6F24C0792,0x44b664a07ce5bbfa,1 +#use 12th entry +np.float64,0x40295FF1EFB9A741,0x4113c0e74c52d7b0,1 +np.float64,0x4073975F4CC411DA,0x5c32be40b4fec2c1,1 +np.float64,0x406E9DE52E82A77E,0x56049c9a3f1ae089,1 +np.float64,0x40748C2F52560ED9,0x5d93bc14fd4cd23b,1 +#use 13th entry +np.float64,0x4062A553CDC4D04C,0x4d6266bfde301318,1 +np.float64,0xC079EC1D63598AB7,0x1a88cb184dab224c,1 +np.float64,0xC0725C1CB3167427,0x25725b46f8a081f6,1 +np.float64,0x407888771D9B45F9,0x6353b1ec6bd7ce80,1 +#use 14th entry +np.float64,0xC082CBA03AA89807,0x09b383723831ce56,1 +np.float64,0xC083A8961BB67DD7,0x0735b118d5275552,1 +np.float64,0xC076BC6ECA12E7E3,0x1f2222679eaef615,1 +np.float64,0xC072752503AA1A5B,0x254eb832242c77e1,1 +#use 15th entry +np.float64,0xC058800792125DEC,0x371882372a0b48d4,1 +np.float64,0x4082909FD863E81C,0x7580d5f386920142,1 +np.float64,0xC071616F8FB534F9,0x26dbe20ef64a412b,1 +np.float64,0x406D1AB571CAA747,0x54ee0d55cb38ac20,1 +#use 16th entry +np.float64,0x406956428B7DAD09,0x52358682c271237f,1 +np.float64,0xC07EFC2D9D17B621,0x133b3e77c27a4d45,1 +np.float64,0xC08469BAC5BA3CCA,0x050863e5f42cc52f,1 +np.float64,0x407189D9626386A5,0x593cb1c0b3b5c1d3,1 +#use 17th entry +np.float64,0x4077E652E3DEB8C6,0x6269a10dcbd3c752,1 +np.float64,0x407674C97DB06878,0x605485dcc2426ec2,1 +np.float64,0xC07CE9969CF4268D,0x16386cf8996669f2,1 +np.float64,0x40780EE32D5847C4,0x62a436bd1abe108d,1 +#use 18th entry +np.float64,0x4076C3AA5E1E8DA1,0x60c62f56a5e72e24,1 +np.float64,0xC0730AFC7239B9BE,0x24758ead095cec1e,1 +np.float64,0xC085CC2B9C420DDB,0x0109cdaa2e5694c1,1 +np.float64,0x406D0765CB6D7AA4,0x54e06f8dd91bd945,1 +#use 19th entry +np.float64,0xC082D011F3B495E7,0x09a6647661d279c2,1 +np.float64,0xC072826AF8F6AFBC,0x253acd3cd224507e,1 +np.float64,0x404EB9C4810CEA09,0x457933dbf07e8133,1 +np.float64,0x408284FBC97C58CE,0x755f6eb234aa4b98,1 +#use 20th entry +np.float64,0x40856008CF6EDC63,0x7d9c0b3c03f4f73c,1 +np.float64,0xC077CB2E9F013B17,0x1d9b3d3a166a55db,1 +np.float64,0xC0479CA3C20AD057,0x3bad40e081555b99,1 +np.float64,0x40844CD31107332A,0x7a821d70aea478e2,1 +#use 21th entry +np.float64,0xC07C8FCC0BFCC844,0x16ba1cc8c539d19b,1 +np.float64,0xC085C4E9A3ABA488,0x011ff675ba1a2217,1 +np.float64,0x4074D538B32966E5,0x5dfd9d78043c6ad9,1 +np.float64,0xC0630CA16902AD46,0x3231a446074cede6,1 +#use 22th entry +np.float64,0xC06C826733D7D0B7,0x2b5f1078314d41e1,1 +np.float64,0xC0520DF55B2B907F,0x396c13a6ce8e833e,1 +np.float64,0xC080712072B0F437,0x107eae02d11d98ea,1 +np.float64,0x40528A6150E19EFB,0x469fdabda02228c5,1 +#use 23th entry +np.float64,0xC07B1D74B6586451,0x18d1253883ae3b48,1 +np.float64,0x4045AFD7867DAEC0,0x43d7d634fc4c5d98,1 +np.float64,0xC07A08B91F9ED3E2,0x1a60973e6397fc37,1 +np.float64,0x407B3ECF0AE21C8C,0x673e03e9d98d7235,1 +#use 24th entry +np.float64,0xC078AEB6F30CEABF,0x1c530b93ab54a1b3,1 +np.float64,0x4084495006A41672,0x7a775b6dc7e63064,1 +np.float64,0x40830B1C0EBF95DD,0x76e1e6eed77cfb89,1 +np.float64,0x407D93E8F33D8470,0x6a9adbc9e1e4f1e5,1 +#use 25th entry +np.float64,0x4066B11A09EFD9E8,0x504dd528065c28a7,1 +np.float64,0x408545823723AEEB,0x7d504a9b1844f594,1 +np.float64,0xC068C711F2CA3362,0x2e104f3496ea118e,1 +np.float64,0x407F317FCC3CA873,0x6cf0732c9948ebf4,1 +#use 26th entry +np.float64,0x407AFB3EBA2ED50F,0x66dc28a129c868d5,1 +np.float64,0xC075377037708ADE,0x21531a329f3d793e,1 +np.float64,0xC07C30066A1F3246,0x174448baa16ded2b,1 +np.float64,0xC06689A75DE2ABD3,0x2fad70662fae230b,1 +#use 27th entry +np.float64,0x4081514E9FCCF1E0,0x71e673b9efd15f44,1 +np.float64,0xC0762C710AF68460,0x1ff1ed7d8947fe43,1 +np.float64,0xC0468102FF70D9C4,0x3be0c3a8ff3419a3,1 +np.float64,0xC07EA4CEEF02A83E,0x13b908f085102c61,1 +#use 28th entry +np.float64,0xC06290B04AE823C4,0x328a83da3c2e3351,1 +np.float64,0xC0770EB1D1C395FB,0x1eab281c1f1db5fe,1 +np.float64,0xC06F5D4D838A5BAE,0x29500ea32fb474ea,1 +np.float64,0x40723B3133B54C5D,0x5a3c82c7c3a2b848,1 +#use 29th entry +np.float64,0x4085E6454CE3B4AA,0x7f20319b9638d06a,1 +np.float64,0x408389F2A0585D4B,0x7850667c58aab3d0,1 +np.float64,0xC0382798F9C8AE69,0x3dc1c79fe8739d6d,1 +np.float64,0xC08299D827608418,0x0a4335f76cdbaeb5,1 +#use 30th entry +np.float64,0xC06F3DED43301BF1,0x2965670ae46750a8,1 +np.float64,0xC070CAF6BDD577D9,0x27b4aa4ffdd29981,1 +np.float64,0x4078529AD4B2D9F2,0x6305c12755d5e0a6,1 +np.float64,0xC055B14E75A31B96,0x381c2eda6d111e5d,1 +#use 31th entry +np.float64,0x407B13EE414FA931,0x6700772c7544564d,1 +np.float64,0x407EAFDE9DE3EC54,0x6c346a0e49724a3c,1 +np.float64,0xC08362F398B9530D,0x07ffeddbadf980cb,1 +np.float64,0x407E865CDD9EEB86,0x6bf866cac5e0d126,1 +#use 32th entry +np.float64,0x407FB62DBC794C86,0x6db009f708ac62cb,1 +np.float64,0xC063D0BAA68CDDDE,0x31a3b2a51ce50430,1 +np.float64,0xC05E7706A2231394,0x34f24bead6fab5c9,1 +np.float64,0x4083E3A06FDE444E,0x79527b7a386d1937,1 diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/data/umath-validation-set-exp2.csv b/.venv/lib/python3.12/site-packages/numpy/_core/tests/data/umath-validation-set-exp2.csv new file mode 100644 index 0000000..4e0a63e --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/tests/data/umath-validation-set-exp2.csv @@ -0,0 +1,1429 @@ +dtype,input,output,ulperrortol +np.float32,0xbdfe94b0,0x3f6adda6,2 +np.float32,0x3f20f8f8,0x3fc5ec69,2 +np.float32,0x7040b5,0x3f800000,2 +np.float32,0x30ec5,0x3f800000,2 +np.float32,0x3eb63070,0x3fa3ce29,2 +np.float32,0xff4dda3d,0x0,2 +np.float32,0x805b832f,0x3f800000,2 +np.float32,0x3e883fb7,0x3f99ed8c,2 +np.float32,0x3f14d71f,0x3fbf8708,2 +np.float32,0xff7b1e55,0x0,2 +np.float32,0xbf691ac6,0x3f082fa2,2 +np.float32,0x7ee3e6ab,0x7f800000,2 +np.float32,0xbec6e2b4,0x3f439248,2 +np.float32,0xbf5f5ec2,0x3f0bd2c0,2 +np.float32,0x8025cc2c,0x3f800000,2 +np.float32,0x7e0d7672,0x7f800000,2 +np.float32,0xff4bbc5c,0x0,2 +np.float32,0xbd94fb30,0x3f73696b,2 +np.float32,0x6cc079,0x3f800000,2 +np.float32,0x803cf080,0x3f800000,2 +np.float32,0x71d418,0x3f800000,2 +np.float32,0xbf24a442,0x3f23ec1e,2 +np.float32,0xbe6c9510,0x3f5a1e1d,2 +np.float32,0xbe8fb284,0x3f52be38,2 +np.float32,0x7ea64754,0x7f800000,2 +np.float32,0x7fc00000,0x7fc00000,2 +np.float32,0x80620cfd,0x3f800000,2 +np.float32,0x3f3e20e8,0x3fd62e72,2 +np.float32,0x3f384600,0x3fd2d00e,2 +np.float32,0xff362150,0x0,2 +np.float32,0xbf349fa8,0x3f1cfaef,2 +np.float32,0xbf776cf2,0x3f0301a6,2 +np.float32,0x8021fc60,0x3f800000,2 +np.float32,0xbdb75280,0x3f70995c,2 +np.float32,0x7e9363a6,0x7f800000,2 +np.float32,0x7e728422,0x7f800000,2 +np.float32,0xfe91edc2,0x0,2 +np.float32,0x3f5f438c,0x3fea491d,2 +np.float32,0x3f2afae9,0x3fcb5c1f,2 +np.float32,0xbef8e766,0x3f36c448,2 +np.float32,0xba522c00,0x3f7fdb97,2 +np.float32,0xff18ee8c,0x0,2 +np.float32,0xbee8c5f4,0x3f3acd44,2 +np.float32,0x3e790448,0x3f97802c,2 +np.float32,0x3e8c9541,0x3f9ad571,2 +np.float32,0xbf03fa9f,0x3f331460,2 +np.float32,0x801ee053,0x3f800000,2 +np.float32,0xbf773230,0x3f03167f,2 +np.float32,0x356fd9,0x3f800000,2 +np.float32,0x8009cd88,0x3f800000,2 +np.float32,0x7f2bac51,0x7f800000,2 +np.float32,0x4d9eeb,0x3f800000,2 +np.float32,0x3133,0x3f800000,2 +np.float32,0x7f4290e0,0x7f800000,2 +np.float32,0xbf5e6523,0x3f0c3161,2 +np.float32,0x3f19182e,0x3fc1bf10,2 +np.float32,0x7e1248bb,0x7f800000,2 +np.float32,0xff5f7aae,0x0,2 +np.float32,0x7e8557b5,0x7f800000,2 +np.float32,0x26fc7f,0x3f800000,2 +np.float32,0x80397d61,0x3f800000,2 +np.float32,0x3cb1825d,0x3f81efe0,2 +np.float32,0x3ed808d0,0x3fab7c45,2 +np.float32,0xbf6f668a,0x3f05e259,2 +np.float32,0x3e3c7802,0x3f916abd,2 +np.float32,0xbd5ac5a0,0x3f76b21b,2 +np.float32,0x805aa6c9,0x3f800000,2 +np.float32,0xbe4d6f68,0x3f5ec3e1,2 +np.float32,0x3f3108b2,0x3fceb87f,2 +np.float32,0x3ec385cc,0x3fa6c9fb,2 +np.float32,0xbe9fc1ce,0x3f4e35e8,2 +np.float32,0x43b68,0x3f800000,2 +np.float32,0x3ef0cdcc,0x3fb15557,2 +np.float32,0x3e3f729b,0x3f91b5e1,2 +np.float32,0x7f52a4df,0x7f800000,2 +np.float32,0xbf56da96,0x3f0f15b9,2 +np.float32,0xbf161d2b,0x3f2a7faf,2 +np.float32,0x3e8df763,0x3f9b1fbe,2 +np.float32,0xff4f0780,0x0,2 +np.float32,0x8048f594,0x3f800000,2 +np.float32,0x3e62bb1d,0x3f953b7e,2 +np.float32,0xfe58e764,0x0,2 +np.float32,0x3dd2c922,0x3f897718,2 +np.float32,0x7fa00000,0x7fe00000,2 +np.float32,0xff07b4b2,0x0,2 +np.float32,0x7f6231a0,0x7f800000,2 +np.float32,0xb8d1d,0x3f800000,2 +np.float32,0x3ee01d24,0x3fad5f16,2 +np.float32,0xbf43f59f,0x3f169869,2 +np.float32,0x801f5257,0x3f800000,2 +np.float32,0x803c15d8,0x3f800000,2 +np.float32,0x3f171a08,0x3fc0b42a,2 +np.float32,0x127aef,0x3f800000,2 +np.float32,0xfd1c6,0x3f800000,2 +np.float32,0x3f1ed13e,0x3fc4c59a,2 +np.float32,0x57fd4f,0x3f800000,2 +np.float32,0x6e8c61,0x3f800000,2 +np.float32,0x804019ab,0x3f800000,2 +np.float32,0x3ef4e5c6,0x3fb251a1,2 +np.float32,0x5044c3,0x3f800000,2 +np.float32,0x3f04460f,0x3fb7204b,2 +np.float32,0x7e326b47,0x7f800000,2 +np.float32,0x800a7e4c,0x3f800000,2 +np.float32,0xbf47ec82,0x3f14fccc,2 +np.float32,0xbedb1b3e,0x3f3e4a4d,2 +np.float32,0x3f741d86,0x3ff7e4b0,2 +np.float32,0xbe249d20,0x3f6501a6,2 +np.float32,0xbf2ea152,0x3f1f8c68,2 +np.float32,0x3ec6dbcc,0x3fa78b3f,2 +np.float32,0x7ebd9bb4,0x7f800000,2 +np.float32,0x3f61b574,0x3febd77a,2 +np.float32,0x3f3dfb2b,0x3fd61891,2 +np.float32,0x3c7d95,0x3f800000,2 +np.float32,0x8071e840,0x3f800000,2 +np.float32,0x15c6fe,0x3f800000,2 +np.float32,0xbf096601,0x3f307893,2 +np.float32,0x7f5c2ef9,0x7f800000,2 +np.float32,0xbe79f750,0x3f582689,2 +np.float32,0x1eb692,0x3f800000,2 +np.float32,0xbd8024f0,0x3f75226d,2 +np.float32,0xbf5a8be8,0x3f0da950,2 +np.float32,0xbf4d28f3,0x3f12e3e1,2 +np.float32,0x7f800000,0x7f800000,2 +np.float32,0xfea8a758,0x0,2 +np.float32,0x8075d2cf,0x3f800000,2 +np.float32,0xfd99af58,0x0,2 +np.float32,0x9e6a,0x3f800000,2 +np.float32,0x2fa19f,0x3f800000,2 +np.float32,0x3e9f4206,0x3f9ecc56,2 +np.float32,0xbee0b666,0x3f3cd9fc,2 +np.float32,0xbec558c4,0x3f43fab1,2 +np.float32,0x7e9a77df,0x7f800000,2 +np.float32,0xff3a9694,0x0,2 +np.float32,0x3f3b3708,0x3fd47f9a,2 +np.float32,0x807cd6d4,0x3f800000,2 +np.float32,0x804aa422,0x3f800000,2 +np.float32,0xfead7a70,0x0,2 +np.float32,0x3f08c610,0x3fb95efe,2 +np.float32,0xff390126,0x0,2 +np.float32,0x5d2d47,0x3f800000,2 +np.float32,0x8006849c,0x3f800000,2 +np.float32,0x654f6e,0x3f800000,2 +np.float32,0xff478a16,0x0,2 +np.float32,0x3f480b0c,0x3fdc024c,2 +np.float32,0xbc3b96c0,0x3f7df9f4,2 +np.float32,0xbcc96460,0x3f7bacb5,2 +np.float32,0x7f349f30,0x7f800000,2 +np.float32,0xbe08fa98,0x3f6954a1,2 +np.float32,0x4f3a13,0x3f800000,2 +np.float32,0x7f6a5ab4,0x7f800000,2 +np.float32,0x7eb85247,0x7f800000,2 +np.float32,0xbf287246,0x3f223e08,2 +np.float32,0x801584d0,0x3f800000,2 +np.float32,0x7ec25371,0x7f800000,2 +np.float32,0x3f002165,0x3fb51552,2 +np.float32,0x3e1108a8,0x3f8d3429,2 +np.float32,0x4f0f88,0x3f800000,2 +np.float32,0x7f67c1ce,0x7f800000,2 +np.float32,0xbf4348f8,0x3f16dedf,2 +np.float32,0xbe292b64,0x3f644d24,2 +np.float32,0xbf2bfa36,0x3f20b2d6,2 +np.float32,0xbf2a6e58,0x3f215f71,2 +np.float32,0x3e97d5d3,0x3f9d35df,2 +np.float32,0x31f597,0x3f800000,2 +np.float32,0x100544,0x3f800000,2 +np.float32,0x10a197,0x3f800000,2 +np.float32,0x3f44df50,0x3fda20d2,2 +np.float32,0x59916d,0x3f800000,2 +np.float32,0x707472,0x3f800000,2 +np.float32,0x8054194e,0x3f800000,2 +np.float32,0x80627b01,0x3f800000,2 +np.float32,0x7f4d5a5b,0x7f800000,2 +np.float32,0xbcecad00,0x3f7aeca5,2 +np.float32,0xff69c541,0x0,2 +np.float32,0xbe164e20,0x3f673c3a,2 +np.float32,0x3dd321de,0x3f897b39,2 +np.float32,0x3c9c4900,0x3f81b431,2 +np.float32,0x7f0efae3,0x7f800000,2 +np.float32,0xbf1b3ee6,0x3f282567,2 +np.float32,0x3ee858ac,0x3faf5083,2 +np.float32,0x3f0e6a39,0x3fbc3965,2 +np.float32,0x7f0c06d8,0x7f800000,2 +np.float32,0x801dd236,0x3f800000,2 +np.float32,0x564245,0x3f800000,2 +np.float32,0x7e99d3ad,0x7f800000,2 +np.float32,0xff3b0164,0x0,2 +np.float32,0x3f386f18,0x3fd2e785,2 +np.float32,0x7f603c39,0x7f800000,2 +np.float32,0x3cbd9b00,0x3f8211f0,2 +np.float32,0x2178e2,0x3f800000,2 +np.float32,0x5db226,0x3f800000,2 +np.float32,0xfec78d62,0x0,2 +np.float32,0x7f40bc1e,0x7f800000,2 +np.float32,0x80325064,0x3f800000,2 +np.float32,0x3f6068dc,0x3feb0377,2 +np.float32,0xfe8b95c6,0x0,2 +np.float32,0xbe496894,0x3f5f5f87,2 +np.float32,0xbf18722a,0x3f296cf4,2 +np.float32,0x332d0e,0x3f800000,2 +np.float32,0x3f6329dc,0x3fecc5c0,2 +np.float32,0x807d1802,0x3f800000,2 +np.float32,0x3e8afcee,0x3f9a7ff1,2 +np.float32,0x26a0a7,0x3f800000,2 +np.float32,0x7f13085d,0x7f800000,2 +np.float32,0x68d547,0x3f800000,2 +np.float32,0x7e9b04ae,0x7f800000,2 +np.float32,0x3f3ecdfe,0x3fd692ea,2 +np.float32,0x805256f4,0x3f800000,2 +np.float32,0x3f312dc8,0x3fcecd42,2 +np.float32,0x23ca15,0x3f800000,2 +np.float32,0x3f53c455,0x3fe31ad6,2 +np.float32,0xbf21186c,0x3f2580fd,2 +np.float32,0x803b9bb1,0x3f800000,2 +np.float32,0xff6ae1fc,0x0,2 +np.float32,0x2103cf,0x3f800000,2 +np.float32,0xbedcec6c,0x3f3dd29d,2 +np.float32,0x7f520afa,0x7f800000,2 +np.float32,0x7e8b44f2,0x7f800000,2 +np.float32,0xfef7f6ce,0x0,2 +np.float32,0xbd5e7c30,0x3f768a6f,2 +np.float32,0xfeb36848,0x0,2 +np.float32,0xff49effb,0x0,2 +np.float32,0xbec207c0,0x3f44dc74,2 +np.float32,0x3e91147f,0x3f9bc77f,2 +np.float32,0xfe784cd4,0x0,2 +np.float32,0xfd1a7250,0x0,2 +np.float32,0xff3b3f48,0x0,2 +np.float32,0x3f685db5,0x3ff0219f,2 +np.float32,0x3f370976,0x3fd21bae,2 +np.float32,0xfed4cc20,0x0,2 +np.float32,0xbf41e337,0x3f17714a,2 +np.float32,0xbf4e8638,0x3f12593a,2 +np.float32,0x3edaf0f1,0x3fac295e,2 +np.float32,0x803cbb4f,0x3f800000,2 +np.float32,0x7f492043,0x7f800000,2 +np.float32,0x2cabcf,0x3f800000,2 +np.float32,0x17f8ac,0x3f800000,2 +np.float32,0x3e846478,0x3f99205a,2 +np.float32,0x76948f,0x3f800000,2 +np.float32,0x1,0x3f800000,2 +np.float32,0x7ea6419e,0x7f800000,2 +np.float32,0xa5315,0x3f800000,2 +np.float32,0xff3a8e32,0x0,2 +np.float32,0xbe5714e8,0x3f5d50b7,2 +np.float32,0xfeadf960,0x0,2 +np.float32,0x3ebbd1a8,0x3fa50efc,2 +np.float32,0x7f31dce7,0x7f800000,2 +np.float32,0x80314999,0x3f800000,2 +np.float32,0x8017f41b,0x3f800000,2 +np.float32,0x7ed6d051,0x7f800000,2 +np.float32,0x7f525688,0x7f800000,2 +np.float32,0x7f7fffff,0x7f800000,2 +np.float32,0x3e8b0461,0x3f9a8180,2 +np.float32,0x3d9fe46e,0x3f871e1f,2 +np.float32,0x5e6d8f,0x3f800000,2 +np.float32,0xbf09ae55,0x3f305608,2 +np.float32,0xfe7028c4,0x0,2 +np.float32,0x7f3ade56,0x7f800000,2 +np.float32,0xff4c9ef9,0x0,2 +np.float32,0x7e3199cf,0x7f800000,2 +np.float32,0x8048652f,0x3f800000,2 +np.float32,0x805e1237,0x3f800000,2 +np.float32,0x189ed8,0x3f800000,2 +np.float32,0xbea7c094,0x3f4bfd98,2 +np.float32,0xbf2f109c,0x3f1f5c5c,2 +np.float32,0xbf0e7f4c,0x3f2e0d2c,2 +np.float32,0x8005981f,0x3f800000,2 +np.float32,0xbf762005,0x3f0377f3,2 +np.float32,0xbf0f60ab,0x3f2da317,2 +np.float32,0xbf4aa3e7,0x3f13e54e,2 +np.float32,0xbf348fd2,0x3f1d01aa,2 +np.float32,0x3e530b50,0x3f93a7fb,2 +np.float32,0xbf0b05a4,0x3f2fb26a,2 +np.float32,0x3eea416c,0x3fafc4aa,2 +np.float32,0x805ad04d,0x3f800000,2 +np.float32,0xbf6328d8,0x3f0a655e,2 +np.float32,0x3f7347b9,0x3ff75558,2 +np.float32,0xfda3ca68,0x0,2 +np.float32,0x80497d21,0x3f800000,2 +np.float32,0x3e740452,0x3f96fd22,2 +np.float32,0x3e528e57,0x3f939b7e,2 +np.float32,0x3e9e19fa,0x3f9e8cbd,2 +np.float32,0x8078060b,0x3f800000,2 +np.float32,0x3f3fea7a,0x3fd73872,2 +np.float32,0xfcfa30a0,0x0,2 +np.float32,0x7f4eb4bf,0x7f800000,2 +np.float32,0x3f712618,0x3ff5e900,2 +np.float32,0xbf668f0e,0x3f0920c6,2 +np.float32,0x3f3001e9,0x3fce259d,2 +np.float32,0xbe9b6fac,0x3f4f6b9c,2 +np.float32,0xbf61fcf3,0x3f0ad5ec,2 +np.float32,0xff08a55c,0x0,2 +np.float32,0x3e805014,0x3f984872,2 +np.float32,0x6ce04c,0x3f800000,2 +np.float32,0x7f7cbc07,0x7f800000,2 +np.float32,0x3c87dc,0x3f800000,2 +np.float32,0x3f2ee498,0x3fcd869a,2 +np.float32,0x4b1116,0x3f800000,2 +np.float32,0x3d382d06,0x3f840d5f,2 +np.float32,0xff7de21e,0x0,2 +np.float32,0x3f2f1d6d,0x3fcda63c,2 +np.float32,0xbf1c1618,0x3f27c38a,2 +np.float32,0xff4264b1,0x0,2 +np.float32,0x8026e5e7,0x3f800000,2 +np.float32,0xbe6fa180,0x3f59ab02,2 +np.float32,0xbe923c02,0x3f52053b,2 +np.float32,0xff3aa453,0x0,2 +np.float32,0x3f77a7ac,0x3ffa47d0,2 +np.float32,0xbed15f36,0x3f40d08a,2 +np.float32,0xa62d,0x3f800000,2 +np.float32,0xbf342038,0x3f1d3123,2 +np.float32,0x7f2f7f80,0x7f800000,2 +np.float32,0x7f2b6fc1,0x7f800000,2 +np.float32,0xff323540,0x0,2 +np.float32,0x3f1a2b6e,0x3fc24faa,2 +np.float32,0x800cc1d2,0x3f800000,2 +np.float32,0xff38fa01,0x0,2 +np.float32,0x80800000,0x3f800000,2 +np.float32,0xbf3d22e0,0x3f196745,2 +np.float32,0x7f40fd62,0x7f800000,2 +np.float32,0x7e1785c7,0x7f800000,2 +np.float32,0x807408c4,0x3f800000,2 +np.float32,0xbf300192,0x3f1ef485,2 +np.float32,0x351e3d,0x3f800000,2 +np.float32,0x7f5ab736,0x7f800000,2 +np.float32,0x2f1696,0x3f800000,2 +np.float32,0x806ac5d7,0x3f800000,2 +np.float32,0x42ec59,0x3f800000,2 +np.float32,0x7f79f52d,0x7f800000,2 +np.float32,0x44ad28,0x3f800000,2 +np.float32,0xbf49dc9c,0x3f143532,2 +np.float32,0x3f6c1f1f,0x3ff295e7,2 +np.float32,0x1589b3,0x3f800000,2 +np.float32,0x3f49b44e,0x3fdd0031,2 +np.float32,0x7f5942c9,0x7f800000,2 +np.float32,0x3f2dab28,0x3fccd877,2 +np.float32,0xff7fffff,0x0,2 +np.float32,0x80578eb2,0x3f800000,2 +np.float32,0x3f39ba67,0x3fd3a50b,2 +np.float32,0x8020340d,0x3f800000,2 +np.float32,0xbf6025b2,0x3f0b8783,2 +np.float32,0x8015ccfe,0x3f800000,2 +np.float32,0x3f6b9762,0x3ff23cd0,2 +np.float32,0xfeeb0c86,0x0,2 +np.float32,0x802779bc,0x3f800000,2 +np.float32,0xbf32bf64,0x3f1dc796,2 +np.float32,0xbf577eb6,0x3f0ed631,2 +np.float32,0x0,0x3f800000,2 +np.float32,0xfe99de6c,0x0,2 +np.float32,0x7a4e53,0x3f800000,2 +np.float32,0x1a15d3,0x3f800000,2 +np.float32,0x8035fe16,0x3f800000,2 +np.float32,0x3e845784,0x3f991dab,2 +np.float32,0x43d688,0x3f800000,2 +np.float32,0xbd447cc0,0x3f77a0b7,2 +np.float32,0x3f83fa,0x3f800000,2 +np.float32,0x3f141df2,0x3fbf2719,2 +np.float32,0x805c586a,0x3f800000,2 +np.float32,0x14c47e,0x3f800000,2 +np.float32,0x3d3bed00,0x3f8422d4,2 +np.float32,0x7f6f4ecd,0x7f800000,2 +np.float32,0x3f0a5e5a,0x3fba2c5c,2 +np.float32,0x523ecf,0x3f800000,2 +np.float32,0xbef4a6e8,0x3f37d262,2 +np.float32,0xff54eb58,0x0,2 +np.float32,0xff3fc875,0x0,2 +np.float32,0x8067c392,0x3f800000,2 +np.float32,0xfedae910,0x0,2 +np.float32,0x80595979,0x3f800000,2 +np.float32,0x3ee87d1d,0x3faf5929,2 +np.float32,0x7f5bad33,0x7f800000,2 +np.float32,0xbf45b868,0x3f15e109,2 +np.float32,0x3ef2277d,0x3fb1a868,2 +np.float32,0x3ca5a950,0x3f81ce8c,2 +np.float32,0x3e70f4e6,0x3f96ad25,2 +np.float32,0xfe3515bc,0x0,2 +np.float32,0xfe4af088,0x0,2 +np.float32,0xff3c78b2,0x0,2 +np.float32,0x7f50f51a,0x7f800000,2 +np.float32,0x3e3a232a,0x3f913009,2 +np.float32,0x7dfec6ff,0x7f800000,2 +np.float32,0x3e1bbaec,0x3f8e3ad6,2 +np.float32,0xbd658fa0,0x3f763ee7,2 +np.float32,0xfe958684,0x0,2 +np.float32,0x503670,0x3f800000,2 +np.float32,0x3f800000,0x40000000,2 +np.float32,0x1bbec6,0x3f800000,2 +np.float32,0xbea7bb7c,0x3f4bff00,2 +np.float32,0xff3a24a2,0x0,2 +np.float32,0xbf416240,0x3f17a635,2 +np.float32,0xbf800000,0x3f000000,2 +np.float32,0xff0c965c,0x0,2 +np.float32,0x80000000,0x3f800000,2 +np.float32,0xbec2c69a,0x3f44a99e,2 +np.float32,0x5b68d4,0x3f800000,2 +np.float32,0xb9a93000,0x3f7ff158,2 +np.float32,0x3d5a0dd8,0x3f84cfbc,2 +np.float32,0xbeaf7a28,0x3f49de4e,2 +np.float32,0x3ee83555,0x3faf4820,2 +np.float32,0xfd320330,0x0,2 +np.float32,0xe1af2,0x3f800000,2 +np.float32,0x7cf28caf,0x7f800000,2 +np.float32,0x80781009,0x3f800000,2 +np.float32,0xbf1e0baf,0x3f26e04d,2 +np.float32,0x7edb05b1,0x7f800000,2 +np.float32,0x3de004,0x3f800000,2 +np.float32,0xff436af6,0x0,2 +np.float32,0x802a9408,0x3f800000,2 +np.float32,0x7ed82205,0x7f800000,2 +np.float32,0x3e3f8212,0x3f91b767,2 +np.float32,0x16a2b2,0x3f800000,2 +np.float32,0xff1e5af3,0x0,2 +np.float32,0xbf1c860c,0x3f2790b7,2 +np.float32,0x3f3bc5da,0x3fd4d1d6,2 +np.float32,0x7f5f7085,0x7f800000,2 +np.float32,0x7f68e409,0x7f800000,2 +np.float32,0x7f4b3388,0x7f800000,2 +np.float32,0x7ecaf440,0x7f800000,2 +np.float32,0x80078785,0x3f800000,2 +np.float32,0x3ebd800d,0x3fa56f45,2 +np.float32,0xbe39a140,0x3f61c58e,2 +np.float32,0x803b587e,0x3f800000,2 +np.float32,0xbeaaa418,0x3f4b31c4,2 +np.float32,0xff7e2b9f,0x0,2 +np.float32,0xff5180a3,0x0,2 +np.float32,0xbf291394,0x3f21f73c,2 +np.float32,0x7f7b9698,0x7f800000,2 +np.float32,0x4218da,0x3f800000,2 +np.float32,0x7f135262,0x7f800000,2 +np.float32,0x804c10e8,0x3f800000,2 +np.float32,0xbf1c2a54,0x3f27ba5a,2 +np.float32,0x7f41fd32,0x7f800000,2 +np.float32,0x3e5cc464,0x3f94a195,2 +np.float32,0xff7a2fa7,0x0,2 +np.float32,0x3e05dc30,0x3f8c23c9,2 +np.float32,0x7f206d99,0x7f800000,2 +np.float32,0xbe9ae520,0x3f4f9287,2 +np.float32,0xfe4f4d58,0x0,2 +np.float32,0xbf44db42,0x3f163ae3,2 +np.float32,0x3f65ac48,0x3fee6300,2 +np.float32,0x3ebfaf36,0x3fa5ecb0,2 +np.float32,0x3f466719,0x3fdb08b0,2 +np.float32,0x80000001,0x3f800000,2 +np.float32,0xff4b3c7b,0x0,2 +np.float32,0x3df44374,0x3f8b0819,2 +np.float32,0xfea4b540,0x0,2 +np.float32,0x7f358e3d,0x7f800000,2 +np.float32,0x801f5e63,0x3f800000,2 +np.float32,0x804ae77e,0x3f800000,2 +np.float32,0xdbb5,0x3f800000,2 +np.float32,0x7f0a7e3b,0x7f800000,2 +np.float32,0xbe4152e4,0x3f609953,2 +np.float32,0x4b9579,0x3f800000,2 +np.float32,0x3ece0bd4,0x3fa92ea5,2 +np.float32,0x7e499d9a,0x7f800000,2 +np.float32,0x80637d8a,0x3f800000,2 +np.float32,0x3e50a425,0x3f936a8b,2 +np.float32,0xbf0e8cb0,0x3f2e06dd,2 +np.float32,0x802763e2,0x3f800000,2 +np.float32,0xff73041b,0x0,2 +np.float32,0xfea466da,0x0,2 +np.float32,0x80064c73,0x3f800000,2 +np.float32,0xbef29222,0x3f385728,2 +np.float32,0x8029c215,0x3f800000,2 +np.float32,0xbd3994e0,0x3f7815d1,2 +np.float32,0xbe6ac9e4,0x3f5a61f3,2 +np.float32,0x804b58b0,0x3f800000,2 +np.float32,0xbdb83be0,0x3f70865c,2 +np.float32,0x7ee18da2,0x7f800000,2 +np.float32,0xfd4ca010,0x0,2 +np.float32,0x807c668b,0x3f800000,2 +np.float32,0xbd40ed90,0x3f77c6e9,2 +np.float32,0x7efc6881,0x7f800000,2 +np.float32,0xfe633bfc,0x0,2 +np.float32,0x803ce363,0x3f800000,2 +np.float32,0x7ecba81e,0x7f800000,2 +np.float32,0xfdcb2378,0x0,2 +np.float32,0xbebc5524,0x3f4662b2,2 +np.float32,0xfaa30000,0x0,2 +np.float32,0x805d451b,0x3f800000,2 +np.float32,0xbee85600,0x3f3ae996,2 +np.float32,0xfefb0a54,0x0,2 +np.float32,0xbdfc6690,0x3f6b0a08,2 +np.float32,0x58a57,0x3f800000,2 +np.float32,0x3b41b7,0x3f800000,2 +np.float32,0x7c99812d,0x7f800000,2 +np.float32,0xbd3ae740,0x3f78079d,2 +np.float32,0xbf4a48a7,0x3f1409dd,2 +np.float32,0xfdeaad58,0x0,2 +np.float32,0xbe9aa65a,0x3f4fa42c,2 +np.float32,0x3f79d78c,0x3ffbc458,2 +np.float32,0x805e7389,0x3f800000,2 +np.float32,0x7ebb3612,0x7f800000,2 +np.float32,0x2e27dc,0x3f800000,2 +np.float32,0x80726dec,0x3f800000,2 +np.float32,0xfe8fb738,0x0,2 +np.float32,0xff1ff3bd,0x0,2 +np.float32,0x7f5264a2,0x7f800000,2 +np.float32,0x3f5a6893,0x3fe739ca,2 +np.float32,0xbec4029c,0x3f44558d,2 +np.float32,0xbef65cfa,0x3f37657e,2 +np.float32,0x63aba1,0x3f800000,2 +np.float32,0xfbb6e200,0x0,2 +np.float32,0xbf3466fc,0x3f1d1307,2 +np.float32,0x3f258844,0x3fc861d7,2 +np.float32,0xbf5f29a7,0x3f0be6dc,2 +np.float32,0x802b51cd,0x3f800000,2 +np.float32,0xbe9094dc,0x3f527dae,2 +np.float32,0xfec2e68c,0x0,2 +np.float32,0x807b38bd,0x3f800000,2 +np.float32,0xbf594662,0x3f0e2663,2 +np.float32,0x7cbcf747,0x7f800000,2 +np.float32,0xbe4b88f0,0x3f5f0d47,2 +np.float32,0x3c53c4,0x3f800000,2 +np.float32,0xbe883562,0x3f54e3f7,2 +np.float32,0xbf1efaf0,0x3f267456,2 +np.float32,0x3e22cd3e,0x3f8ee98b,2 +np.float32,0x80434875,0x3f800000,2 +np.float32,0xbf000b44,0x3f34ff6e,2 +np.float32,0x7f311c3a,0x7f800000,2 +np.float32,0x802f7f3f,0x3f800000,2 +np.float32,0x805155fe,0x3f800000,2 +np.float32,0x7f5d7485,0x7f800000,2 +np.float32,0x80119197,0x3f800000,2 +np.float32,0x3f445b8b,0x3fd9d30d,2 +np.float32,0xbf638eb3,0x3f0a3f38,2 +np.float32,0x402410,0x3f800000,2 +np.float32,0xbc578a40,0x3f7dad1d,2 +np.float32,0xbeecbf8a,0x3f39cc9e,2 +np.float32,0x7f2935a4,0x7f800000,2 +np.float32,0x3f570fea,0x3fe523e2,2 +np.float32,0xbf06bffa,0x3f31bdb6,2 +np.float32,0xbf2afdfd,0x3f2120ba,2 +np.float32,0x7f76f7ab,0x7f800000,2 +np.float32,0xfee2d1e8,0x0,2 +np.float32,0x800b026d,0x3f800000,2 +np.float32,0xff0eda75,0x0,2 +np.float32,0x3d4c,0x3f800000,2 +np.float32,0xbed538a2,0x3f3fcffb,2 +np.float32,0x3f73f4f9,0x3ff7c979,2 +np.float32,0x2aa9fc,0x3f800000,2 +np.float32,0x806a45b3,0x3f800000,2 +np.float32,0xff770d35,0x0,2 +np.float32,0x7e999be3,0x7f800000,2 +np.float32,0x80741128,0x3f800000,2 +np.float32,0xff6aac34,0x0,2 +np.float32,0x470f74,0x3f800000,2 +np.float32,0xff423b7b,0x0,2 +np.float32,0x17dfdd,0x3f800000,2 +np.float32,0x7f029e12,0x7f800000,2 +np.float32,0x803fcb9d,0x3f800000,2 +np.float32,0x3f3dc3,0x3f800000,2 +np.float32,0x7f3a27bc,0x7f800000,2 +np.float32,0x3e473108,0x3f9279ec,2 +np.float32,0x7f4add5d,0x7f800000,2 +np.float32,0xfd9736e0,0x0,2 +np.float32,0x805f1df2,0x3f800000,2 +np.float32,0x6c49c1,0x3f800000,2 +np.float32,0x7ec733c7,0x7f800000,2 +np.float32,0x804c1abf,0x3f800000,2 +np.float32,0x3de2e887,0x3f8a37a5,2 +np.float32,0x3f51630a,0x3fe1a561,2 +np.float32,0x3de686a8,0x3f8a62ff,2 +np.float32,0xbedb3538,0x3f3e439c,2 +np.float32,0xbf3aa892,0x3f1a6f9e,2 +np.float32,0x7ee5fb32,0x7f800000,2 +np.float32,0x7e916c9b,0x7f800000,2 +np.float32,0x3f033f1c,0x3fb69e19,2 +np.float32,0x25324b,0x3f800000,2 +np.float32,0x3f348d1d,0x3fd0b2e2,2 +np.float32,0x3f5797e8,0x3fe57851,2 +np.float32,0xbf69c316,0x3f07f1a0,2 +np.float32,0xbe8b7fb0,0x3f53f1bf,2 +np.float32,0xbdbbc190,0x3f703d00,2 +np.float32,0xff6c4fc0,0x0,2 +np.float32,0x7f29fcbe,0x7f800000,2 +np.float32,0x3f678d19,0x3fef9a23,2 +np.float32,0x73d140,0x3f800000,2 +np.float32,0x3e25bdd2,0x3f8f326b,2 +np.float32,0xbeb775ec,0x3f47b2c6,2 +np.float32,0xff451c4d,0x0,2 +np.float32,0x8072c466,0x3f800000,2 +np.float32,0x3f65e836,0x3fee89b2,2 +np.float32,0x52ca7a,0x3f800000,2 +np.float32,0x62cfed,0x3f800000,2 +np.float32,0xbf583dd0,0x3f0e8c5c,2 +np.float32,0xbf683842,0x3f088342,2 +np.float32,0x3f1a7828,0x3fc2780c,2 +np.float32,0x800ea979,0x3f800000,2 +np.float32,0xbeb9133c,0x3f474328,2 +np.float32,0x3ef09fc7,0x3fb14a4b,2 +np.float32,0x7ebbcb75,0x7f800000,2 +np.float32,0xff316c0e,0x0,2 +np.float32,0x805b84e3,0x3f800000,2 +np.float32,0x3d6a55e0,0x3f852d8a,2 +np.float32,0x3e755788,0x3f971fd1,2 +np.float32,0x3ee7aacb,0x3faf2743,2 +np.float32,0x7f714039,0x7f800000,2 +np.float32,0xff70bad8,0x0,2 +np.float32,0xbe0b74c8,0x3f68f08c,2 +np.float32,0xbf6cb170,0x3f06de86,2 +np.float32,0x7ec1fbff,0x7f800000,2 +np.float32,0x8014b1f6,0x3f800000,2 +np.float32,0xfe8b45fe,0x0,2 +np.float32,0x6e2220,0x3f800000,2 +np.float32,0x3ed1777d,0x3fa9f7ab,2 +np.float32,0xff48e467,0x0,2 +np.float32,0xff76c5aa,0x0,2 +np.float32,0x3e9bd330,0x3f9e0fd7,2 +np.float32,0x3f17de4f,0x3fc11aae,2 +np.float32,0x7eeaa2fd,0x7f800000,2 +np.float32,0xbf572746,0x3f0ef806,2 +np.float32,0x7e235554,0x7f800000,2 +np.float32,0xfe24fc1c,0x0,2 +np.float32,0x7daf71ad,0x7f800000,2 +np.float32,0x800d4a6b,0x3f800000,2 +np.float32,0xbf6fc31d,0x3f05c0ce,2 +np.float32,0x1c4d93,0x3f800000,2 +np.float32,0x7ee9200c,0x7f800000,2 +np.float32,0x3f54b4da,0x3fe3aeec,2 +np.float32,0x2b37b1,0x3f800000,2 +np.float32,0x3f7468bd,0x3ff81731,2 +np.float32,0x3f2850ea,0x3fc9e5f4,2 +np.float32,0xbe0d47ac,0x3f68a6f9,2 +np.float32,0x314877,0x3f800000,2 +np.float32,0x802700c3,0x3f800000,2 +np.float32,0x7e2c915f,0x7f800000,2 +np.float32,0x800d0059,0x3f800000,2 +np.float32,0x3f7f3c25,0x3fff7862,2 +np.float32,0xff735d31,0x0,2 +np.float32,0xff7e339e,0x0,2 +np.float32,0xbef96cf0,0x3f36a340,2 +np.float32,0x3db6ea21,0x3f882cb2,2 +np.float32,0x67cb3d,0x3f800000,2 +np.float32,0x801f349d,0x3f800000,2 +np.float32,0x3f1390ec,0x3fbede29,2 +np.float32,0x7f13644a,0x7f800000,2 +np.float32,0x804a369b,0x3f800000,2 +np.float32,0x80262666,0x3f800000,2 +np.float32,0x7e850fbc,0x7f800000,2 +np.float32,0x18b002,0x3f800000,2 +np.float32,0x8051f1ed,0x3f800000,2 +np.float32,0x3eba48f6,0x3fa4b753,2 +np.float32,0xbf3f4130,0x3f1886a9,2 +np.float32,0xbedac006,0x3f3e61cf,2 +np.float32,0xbf097c70,0x3f306ddc,2 +np.float32,0x4aba6d,0x3f800000,2 +np.float32,0x580078,0x3f800000,2 +np.float32,0x3f64d82e,0x3fedda40,2 +np.float32,0x7f781fd6,0x7f800000,2 +np.float32,0x6aff3d,0x3f800000,2 +np.float32,0xff25e074,0x0,2 +np.float32,0x7ea9ec89,0x7f800000,2 +np.float32,0xbf63b816,0x3f0a2fbb,2 +np.float32,0x133f07,0x3f800000,2 +np.float32,0xff800000,0x0,2 +np.float32,0x8013dde7,0x3f800000,2 +np.float32,0xff770b95,0x0,2 +np.float32,0x806154e8,0x3f800000,2 +np.float32,0x3f1e7bce,0x3fc4981a,2 +np.float32,0xff262c78,0x0,2 +np.float32,0x3f59a652,0x3fe6c04c,2 +np.float32,0x7f220166,0x7f800000,2 +np.float32,0x7eb24939,0x7f800000,2 +np.float32,0xbed58bb0,0x3f3fba6a,2 +np.float32,0x3c2ad000,0x3f80eda7,2 +np.float32,0x2adb2e,0x3f800000,2 +np.float32,0xfe8b213e,0x0,2 +np.float32,0xbf2e0c1e,0x3f1fccea,2 +np.float32,0x7e1716be,0x7f800000,2 +np.float32,0x80184e73,0x3f800000,2 +np.float32,0xbf254743,0x3f23a3d5,2 +np.float32,0x8063a722,0x3f800000,2 +np.float32,0xbe50adf0,0x3f5e46c7,2 +np.float32,0x3f614158,0x3feb8d60,2 +np.float32,0x8014bbc8,0x3f800000,2 +np.float32,0x283bc7,0x3f800000,2 +np.float32,0x3ffb5c,0x3f800000,2 +np.float32,0xfe8de6bc,0x0,2 +np.float32,0xbea6e086,0x3f4c3b82,2 +np.float32,0xfee64b92,0x0,2 +np.float32,0x506c1a,0x3f800000,2 +np.float32,0xff342af8,0x0,2 +np.float32,0x6b6f4c,0x3f800000,2 +np.float32,0xfeb42b1e,0x0,2 +np.float32,0x3e49384a,0x3f92ad71,2 +np.float32,0x152d08,0x3f800000,2 +np.float32,0x804c8f09,0x3f800000,2 +np.float32,0xff5e927d,0x0,2 +np.float32,0x6374da,0x3f800000,2 +np.float32,0x3f48f011,0x3fdc8ae4,2 +np.float32,0xbf446a30,0x3f1668e8,2 +np.float32,0x3ee77073,0x3faf196e,2 +np.float32,0xff4caa40,0x0,2 +np.float32,0x7efc9363,0x7f800000,2 +np.float32,0xbf706dcc,0x3f05830d,2 +np.float32,0xfe29c7e8,0x0,2 +np.float32,0x803cfe58,0x3f800000,2 +np.float32,0x3ec34c7c,0x3fa6bd0a,2 +np.float32,0x3eb85b62,0x3fa44968,2 +np.float32,0xfda1b9d8,0x0,2 +np.float32,0x802932cd,0x3f800000,2 +np.float32,0xbf5cde78,0x3f0cc5fa,2 +np.float32,0x3f31bf44,0x3fcf1ec8,2 +np.float32,0x803a0882,0x3f800000,2 +np.float32,0x800000,0x3f800000,2 +np.float32,0x3f54110e,0x3fe34a08,2 +np.float32,0x80645ea9,0x3f800000,2 +np.float32,0xbd8c1070,0x3f7425c3,2 +np.float32,0x801a006a,0x3f800000,2 +np.float32,0x7f5d161e,0x7f800000,2 +np.float32,0x805b5df3,0x3f800000,2 +np.float32,0xbf71a7c0,0x3f0511be,2 +np.float32,0xbe9a55c0,0x3f4fbad6,2 +np.float64,0xde7e2fd9bcfc6,0x3ff0000000000000,1 +np.float64,0xbfd8cd88eb319b12,0x3fe876349efbfa2b,1 +np.float64,0x3fe4fa13ace9f428,0x3ff933fbb117d196,1 +np.float64,0x475b3d048eb68,0x3ff0000000000000,1 +np.float64,0x7fef39ed07be73d9,0x7ff0000000000000,1 +np.float64,0x80026b84d904d70a,0x3ff0000000000000,1 +np.float64,0xebd60627d7ac1,0x3ff0000000000000,1 +np.float64,0xbfd7cbefdbaf97e0,0x3fe8bad30f6cf8e1,1 +np.float64,0x7fc17c605a22f8c0,0x7ff0000000000000,1 +np.float64,0x8cdac05119b58,0x3ff0000000000000,1 +np.float64,0x3fc45cd60a28b9ac,0x3ff1dd8028ec3f41,1 +np.float64,0x7fef4fce137e9f9b,0x7ff0000000000000,1 +np.float64,0xe5a2b819cb457,0x3ff0000000000000,1 +np.float64,0xe3bcfd4dc77a0,0x3ff0000000000000,1 +np.float64,0x68f0b670d1e17,0x3ff0000000000000,1 +np.float64,0xae69a6455cd35,0x3ff0000000000000,1 +np.float64,0xffe7007a0c6e00f4,0x0,1 +np.float64,0x59fc57a8b3f8c,0x3ff0000000000000,1 +np.float64,0xbfeee429c0bdc854,0x3fe0638fa62bed9f,1 +np.float64,0x80030bb6e206176f,0x3ff0000000000000,1 +np.float64,0x8006967a36ad2cf5,0x3ff0000000000000,1 +np.float64,0x3fe128176a22502f,0x3ff73393301e5dc8,1 +np.float64,0x218de20c431bd,0x3ff0000000000000,1 +np.float64,0x3fe7dbc48aafb789,0x3ffad38989b5955c,1 +np.float64,0xffda1ef411343de8,0x0,1 +np.float64,0xc6b392838d673,0x3ff0000000000000,1 +np.float64,0x7fe6d080c1ada101,0x7ff0000000000000,1 +np.float64,0xbfed36dd67fa6dbb,0x3fe0fec342c4ee89,1 +np.float64,0x3fee2bb6a3fc576e,0x3ffec1c149f1f092,1 +np.float64,0xbfd1f785eb23ef0c,0x3fea576eb01233cb,1 +np.float64,0x7fdad29a1f35a533,0x7ff0000000000000,1 +np.float64,0xffe8928c4fb12518,0x0,1 +np.float64,0x7fb123160022462b,0x7ff0000000000000,1 +np.float64,0x8007ab56cfaf56ae,0x3ff0000000000000,1 +np.float64,0x7fda342d6634685a,0x7ff0000000000000,1 +np.float64,0xbfe3b7e42c676fc8,0x3fe4e05cf8685b8a,1 +np.float64,0xffa708be7c2e1180,0x0,1 +np.float64,0xbfe8ffbece31ff7e,0x3fe29eb84077a34a,1 +np.float64,0xbf91002008220040,0x3fefa245058f05cb,1 +np.float64,0x8000281f0ee0503f,0x3ff0000000000000,1 +np.float64,0x8005617adc2ac2f6,0x3ff0000000000000,1 +np.float64,0x7fa84fec60309fd8,0x7ff0000000000000,1 +np.float64,0x8d00c0231a018,0x3ff0000000000000,1 +np.float64,0xbfdfe52ca63fca5a,0x3fe6a7324cc00d57,1 +np.float64,0x7fcc81073d39020d,0x7ff0000000000000,1 +np.float64,0x800134ff5a6269ff,0x3ff0000000000000,1 +np.float64,0xffc7fff98d2ffff4,0x0,1 +np.float64,0x8000925ce50124bb,0x3ff0000000000000,1 +np.float64,0xffe2530c66a4a618,0x0,1 +np.float64,0x7fc99070673320e0,0x7ff0000000000000,1 +np.float64,0xbfddd5c1f13bab84,0x3fe72a0c80f8df39,1 +np.float64,0x3fe1c220fee38442,0x3ff7817ec66aa55b,1 +np.float64,0x3fb9a1e1043343c2,0x3ff1265e575e6404,1 +np.float64,0xffef72e0833ee5c0,0x0,1 +np.float64,0x3fe710c0416e2181,0x3ffa5e93588aaa69,1 +np.float64,0xbfd8d23cbab1a47a,0x3fe874f5b9d99885,1 +np.float64,0x7fe9628ebd72c51c,0x7ff0000000000000,1 +np.float64,0xdd5fa611babf5,0x3ff0000000000000,1 +np.float64,0x8002bafac86575f6,0x3ff0000000000000,1 +np.float64,0x68acea44d159e,0x3ff0000000000000,1 +np.float64,0xffd776695eaeecd2,0x0,1 +np.float64,0x80059b59bb4b36b4,0x3ff0000000000000,1 +np.float64,0xbdcdd2af7b9bb,0x3ff0000000000000,1 +np.float64,0x8002b432ee856867,0x3ff0000000000000,1 +np.float64,0xcbc72f09978e6,0x3ff0000000000000,1 +np.float64,0xbfee8f4bf6fd1e98,0x3fe081cc0318b170,1 +np.float64,0xffc6e2892d2dc514,0x0,1 +np.float64,0x7feb682e4db6d05c,0x7ff0000000000000,1 +np.float64,0x8004b70a04296e15,0x3ff0000000000000,1 +np.float64,0x42408a4284812,0x3ff0000000000000,1 +np.float64,0xbfe9b8b197f37163,0x3fe254b4c003ce0a,1 +np.float64,0x3fcaadf5f5355bec,0x3ff27ca7876a8d20,1 +np.float64,0xfff0000000000000,0x0,1 +np.float64,0x7fea8376d33506ed,0x7ff0000000000000,1 +np.float64,0xffef73c2d63ee785,0x0,1 +np.float64,0xffe68b2bae2d1657,0x0,1 +np.float64,0x3fd8339cb2306739,0x3ff4cb774d616f90,1 +np.float64,0xbfc6d1db4d2da3b8,0x3fec47bb873a309c,1 +np.float64,0x7fe858016230b002,0x7ff0000000000000,1 +np.float64,0x7fe74cb99d2e9972,0x7ff0000000000000,1 +np.float64,0xffec2e96dc385d2d,0x0,1 +np.float64,0xb762a9876ec55,0x3ff0000000000000,1 +np.float64,0x3feca230c5794462,0x3ffdbfe62a572f52,1 +np.float64,0xbfb5ebad3a2bd758,0x3fee27eed86dcc39,1 +np.float64,0x471c705a8e38f,0x3ff0000000000000,1 +np.float64,0x7fc79bb5cf2f376b,0x7ff0000000000000,1 +np.float64,0xbfe53d6164ea7ac3,0x3fe4331b3beb73bd,1 +np.float64,0xbfe375a3f766eb48,0x3fe4fe67edb516e6,1 +np.float64,0x3fe1c7686ca38ed1,0x3ff7842f04770ba9,1 +np.float64,0x242e74dc485cf,0x3ff0000000000000,1 +np.float64,0x8009c06ab71380d6,0x3ff0000000000000,1 +np.float64,0x3fd08505efa10a0c,0x3ff3227b735b956d,1 +np.float64,0xffe3dfcecda7bf9d,0x0,1 +np.float64,0x8001f079bbc3e0f4,0x3ff0000000000000,1 +np.float64,0x3fddc706b6bb8e0c,0x3ff616d927987363,1 +np.float64,0xbfd151373ea2a26e,0x3fea870ba53ec126,1 +np.float64,0x7fe89533bfb12a66,0x7ff0000000000000,1 +np.float64,0xffed302cbc3a6059,0x0,1 +np.float64,0x3fd871cc28b0e398,0x3ff4d97d58c16ae2,1 +np.float64,0x7fbe9239683d2472,0x7ff0000000000000,1 +np.float64,0x848a445909149,0x3ff0000000000000,1 +np.float64,0x8007b104ce2f620a,0x3ff0000000000000,1 +np.float64,0x7fc2cd6259259ac4,0x7ff0000000000000,1 +np.float64,0xbfeadb640df5b6c8,0x3fe1e2b068de10af,1 +np.float64,0x800033b2f1a06767,0x3ff0000000000000,1 +np.float64,0x7fe54e5b7caa9cb6,0x7ff0000000000000,1 +np.float64,0x4f928f209f26,0x3ff0000000000000,1 +np.float64,0x8003c3dc6f2787ba,0x3ff0000000000000,1 +np.float64,0xbfd55a59daaab4b4,0x3fe9649d57b32b5d,1 +np.float64,0xffe3e2968d67c52c,0x0,1 +np.float64,0x80087434d550e86a,0x3ff0000000000000,1 +np.float64,0xffdde800083bd000,0x0,1 +np.float64,0xffe291f0542523e0,0x0,1 +np.float64,0xbfe1419bc3e28338,0x3fe6051d4f95a34a,1 +np.float64,0x3fd9d00ee1b3a01e,0x3ff5292bb8d5f753,1 +np.float64,0x3fdb720b60b6e417,0x3ff589d133625374,1 +np.float64,0xbfe3e21f0967c43e,0x3fe4cd4d02e3ef9a,1 +np.float64,0x7fd7e27f3dafc4fd,0x7ff0000000000000,1 +np.float64,0x3fd1cc2620a3984c,0x3ff366befbc38e3e,1 +np.float64,0x3fe78d05436f1a0b,0x3ffaa5ee4ea54b79,1 +np.float64,0x7e2acc84fc55a,0x3ff0000000000000,1 +np.float64,0x800ffb861c5ff70c,0x3ff0000000000000,1 +np.float64,0xffb2b0db1a2561b8,0x0,1 +np.float64,0xbfe80c2363701847,0x3fe301fdfe789576,1 +np.float64,0x7fe383c1c3e70783,0x7ff0000000000000,1 +np.float64,0xbfeefc02e6fdf806,0x3fe05b1a8528bf6c,1 +np.float64,0xbfe42c9268285925,0x3fe4abdc14793cb8,1 +np.float64,0x1,0x3ff0000000000000,1 +np.float64,0xa71c7ce94e390,0x3ff0000000000000,1 +np.float64,0x800ed4e6777da9cd,0x3ff0000000000000,1 +np.float64,0x3fde11b35d3c2367,0x3ff628bdc6dd1b78,1 +np.float64,0x3fef3964dbfe72ca,0x3fff777cae357608,1 +np.float64,0x3fefe369b7ffc6d4,0x3fffec357be508a3,1 +np.float64,0xbfdef1855f3de30a,0x3fe6e348c58e3fed,1 +np.float64,0x3fee0e2bc13c1c58,0x3ffeae1909c1b973,1 +np.float64,0xbfd31554ffa62aaa,0x3fea06628b2f048a,1 +np.float64,0x800dc56bcc7b8ad8,0x3ff0000000000000,1 +np.float64,0x7fbba01b8e374036,0x7ff0000000000000,1 +np.float64,0x7fd9737a92b2e6f4,0x7ff0000000000000,1 +np.float64,0x3feeae0fac3d5c1f,0x3fff1913705f1f07,1 +np.float64,0x3fdcc64fcdb98ca0,0x3ff5d9c3e5862972,1 +np.float64,0x3fdad9f83db5b3f0,0x3ff56674e81c1bd1,1 +np.float64,0x32b8797065710,0x3ff0000000000000,1 +np.float64,0x3fd20deae6241bd6,0x3ff37495bc057394,1 +np.float64,0x7fc899f0763133e0,0x7ff0000000000000,1 +np.float64,0x80045805fc08b00d,0x3ff0000000000000,1 +np.float64,0xbfcd8304cb3b0608,0x3feb4611f1eaa30c,1 +np.float64,0x3fd632a2fcac6544,0x3ff4592e1ea14fb0,1 +np.float64,0xffeeb066007d60cb,0x0,1 +np.float64,0x800bb12a42b76255,0x3ff0000000000000,1 +np.float64,0xbfe060fe1760c1fc,0x3fe6714640ab2574,1 +np.float64,0x80067ed737acfdaf,0x3ff0000000000000,1 +np.float64,0x3fd5ec3211abd864,0x3ff449adea82e73e,1 +np.float64,0x7fc4b2fdc22965fb,0x7ff0000000000000,1 +np.float64,0xff656afd002ad600,0x0,1 +np.float64,0xffeadefcdcb5bdf9,0x0,1 +np.float64,0x80052f18610a5e32,0x3ff0000000000000,1 +np.float64,0xbfd5b75c78ab6eb8,0x3fe94b15e0f39194,1 +np.float64,0xa4d3de2b49a7c,0x3ff0000000000000,1 +np.float64,0xbfe321c93de64392,0x3fe524ac7bbee401,1 +np.float64,0x3feb32f5def665ec,0x3ffcd6e4e5f9c271,1 +np.float64,0x7fe6b07e4ced60fc,0x7ff0000000000000,1 +np.float64,0x3fe013bb2de02776,0x3ff6aa4c32ab5ba4,1 +np.float64,0xbfeadd81d375bb04,0x3fe1e1de89b4aebf,1 +np.float64,0xffece7678079cece,0x0,1 +np.float64,0x3fe3d87b8467b0f8,0x3ff897cf22505e4d,1 +np.float64,0xffc4e3a05129c740,0x0,1 +np.float64,0xbfddee6b03bbdcd6,0x3fe723dd83ab49bd,1 +np.float64,0x3fcc4e2672389c4d,0x3ff2a680db769116,1 +np.float64,0x3fd8ed221ab1da44,0x3ff4f569aec8b850,1 +np.float64,0x80000a3538a0146b,0x3ff0000000000000,1 +np.float64,0x8004832eb109065e,0x3ff0000000000000,1 +np.float64,0xffdca83c60395078,0x0,1 +np.float64,0xffef551cda3eaa39,0x0,1 +np.float64,0x800fd95dd65fb2bc,0x3ff0000000000000,1 +np.float64,0x3ff0000000000000,0x4000000000000000,1 +np.float64,0xbfc06f5c4f20deb8,0x3fed466c17305ad8,1 +np.float64,0xbfeb01b5f476036c,0x3fe1d3de0f4211f4,1 +np.float64,0xbfdb2b9284365726,0x3fe7d7b02f790b05,1 +np.float64,0xff76ba83202d7500,0x0,1 +np.float64,0x3fd3f1c59ea7e38c,0x3ff3db96b3a0aaad,1 +np.float64,0x8b99ff6d17340,0x3ff0000000000000,1 +np.float64,0xbfeb383aa0f67075,0x3fe1bedcf2531c08,1 +np.float64,0x3fe321e35fa643c7,0x3ff83749a5d686ee,1 +np.float64,0xbfd863eb2130c7d6,0x3fe8923fcc39bac7,1 +np.float64,0x9e71dd333ce3c,0x3ff0000000000000,1 +np.float64,0x9542962b2a853,0x3ff0000000000000,1 +np.float64,0xba2c963b74593,0x3ff0000000000000,1 +np.float64,0x80019f4d0ca33e9b,0x3ff0000000000000,1 +np.float64,0xffde3e39a73c7c74,0x0,1 +np.float64,0x800258ae02c4b15d,0x3ff0000000000000,1 +np.float64,0xbfd99a535a3334a6,0x3fe8402f3a0662a5,1 +np.float64,0xe6c62143cd8c4,0x3ff0000000000000,1 +np.float64,0x7fbcc828f0399051,0x7ff0000000000000,1 +np.float64,0xbfe42e3596285c6b,0x3fe4ab2066d66071,1 +np.float64,0xffe2ee42d365dc85,0x0,1 +np.float64,0x3fe1f98abea3f315,0x3ff79dc68002a80b,1 +np.float64,0x7fd7225891ae44b0,0x7ff0000000000000,1 +np.float64,0x477177408ee30,0x3ff0000000000000,1 +np.float64,0xbfe16a7e2162d4fc,0x3fe5f1a5c745385d,1 +np.float64,0xbf98aaee283155e0,0x3fef785952e9c089,1 +np.float64,0x7fd7c14a8daf8294,0x7ff0000000000000,1 +np.float64,0xf7e7713defcee,0x3ff0000000000000,1 +np.float64,0x800769aa11aed355,0x3ff0000000000000,1 +np.float64,0xbfed30385e3a6071,0x3fe10135a3bd9ae6,1 +np.float64,0x3fe6dd7205edbae4,0x3ffa4155899efd70,1 +np.float64,0x800d705d26bae0ba,0x3ff0000000000000,1 +np.float64,0xa443ac1f48876,0x3ff0000000000000,1 +np.float64,0xbfec8cfec43919fe,0x3fe13dbf966e6633,1 +np.float64,0x7fd246efaa248dde,0x7ff0000000000000,1 +np.float64,0x800f2ad14afe55a3,0x3ff0000000000000,1 +np.float64,0x800487a894c90f52,0x3ff0000000000000,1 +np.float64,0x80014c4f19e2989f,0x3ff0000000000000,1 +np.float64,0x3fc11f265f223e4d,0x3ff18def05c971e5,1 +np.float64,0xffeb6d565776daac,0x0,1 +np.float64,0x7fd5ca5df8ab94bb,0x7ff0000000000000,1 +np.float64,0xbfe33de4fde67bca,0x3fe517d0e212cd1c,1 +np.float64,0xbfd1c738e5a38e72,0x3fea6539e9491693,1 +np.float64,0xbfec1d8c33b83b18,0x3fe16790fbca0c65,1 +np.float64,0xbfeecb464b7d968d,0x3fe06c67e2aefa55,1 +np.float64,0xbfd621dbf1ac43b8,0x3fe92dfa32d93846,1 +np.float64,0x80069a02860d3406,0x3ff0000000000000,1 +np.float64,0xbfe84f650e309eca,0x3fe2e661300f1975,1 +np.float64,0x7fc1d2cec523a59d,0x7ff0000000000000,1 +np.float64,0x3fd7706d79aee0db,0x3ff49fb033353dfe,1 +np.float64,0xffd94ba458329748,0x0,1 +np.float64,0x7fea98ba1a753173,0x7ff0000000000000,1 +np.float64,0xbfe756ba092ead74,0x3fe34d428d1857bc,1 +np.float64,0xffecfbd836b9f7b0,0x0,1 +np.float64,0x3fd211fbe5a423f8,0x3ff375711a3641e0,1 +np.float64,0x7fee24f7793c49ee,0x7ff0000000000000,1 +np.float64,0x7fe6a098886d4130,0x7ff0000000000000,1 +np.float64,0xbfd4ade909a95bd2,0x3fe99436524db1f4,1 +np.float64,0xbfeb704e6476e09d,0x3fe1a95be4a21bc6,1 +np.float64,0xffefc0f6627f81ec,0x0,1 +np.float64,0x7feff3f896ffe7f0,0x7ff0000000000000,1 +np.float64,0xa3f74edb47eea,0x3ff0000000000000,1 +np.float64,0xbfe0a551cf214aa4,0x3fe65027a7ff42e3,1 +np.float64,0x3fe164b23622c964,0x3ff7521c6225f51d,1 +np.float64,0x7fc258752324b0e9,0x7ff0000000000000,1 +np.float64,0x4739b3348e737,0x3ff0000000000000,1 +np.float64,0xb0392b1d60726,0x3ff0000000000000,1 +np.float64,0x7fe26f42e5e4de85,0x7ff0000000000000,1 +np.float64,0x8004601f87e8c040,0x3ff0000000000000,1 +np.float64,0xffe92ce37b3259c6,0x0,1 +np.float64,0x3fe620da3a6c41b4,0x3ff9d6ee3d005466,1 +np.float64,0x3fd850cfa2b0a1a0,0x3ff4d20bd249d411,1 +np.float64,0xffdcdfdfb5b9bfc0,0x0,1 +np.float64,0x800390297d672054,0x3ff0000000000000,1 +np.float64,0x3fde5864f6bcb0ca,0x3ff639bb9321f5ef,1 +np.float64,0x3fee484cec7c909a,0x3ffed4d2c6274219,1 +np.float64,0x7fe9b9a064b37340,0x7ff0000000000000,1 +np.float64,0xffe50028b8aa0051,0x0,1 +np.float64,0x3fe37774ade6eee9,0x3ff864558498a9a8,1 +np.float64,0x7fef83c724bf078d,0x7ff0000000000000,1 +np.float64,0xbfeb58450fb6b08a,0x3fe1b290556be73d,1 +np.float64,0x7fd7161475ae2c28,0x7ff0000000000000,1 +np.float64,0x3fece09621f9c12c,0x3ffde836a583bbdd,1 +np.float64,0x3fd045790ea08af2,0x3ff31554778fd4e2,1 +np.float64,0xbfe7c7dd6cef8fbb,0x3fe31e2eeda857fc,1 +np.float64,0xffe9632f5372c65e,0x0,1 +np.float64,0x800d4f3a703a9e75,0x3ff0000000000000,1 +np.float64,0xffea880e4df5101c,0x0,1 +np.float64,0xbfeb7edc4ff6fdb8,0x3fe1a3cb5dc33594,1 +np.float64,0xbfcaae4bab355c98,0x3febb1ee65e16b58,1 +np.float64,0xbfde598a19bcb314,0x3fe709145eafaaf8,1 +np.float64,0x3feefb6d78fdf6db,0x3fff4d5c8c68e39a,1 +np.float64,0x13efc75427dfa,0x3ff0000000000000,1 +np.float64,0xffe26f65c064decb,0x0,1 +np.float64,0xbfed5c1addfab836,0x3fe0f1133bd2189a,1 +np.float64,0x7fe7a7cf756f4f9e,0x7ff0000000000000,1 +np.float64,0xffc681702e2d02e0,0x0,1 +np.float64,0x8003d6ab5067ad57,0x3ff0000000000000,1 +np.float64,0xffa695f1342d2be0,0x0,1 +np.float64,0xbfcf8857db3f10b0,0x3feafa14da8c29a4,1 +np.float64,0xbfe8ca06be71940e,0x3fe2b46f6d2c64b4,1 +np.float64,0x3451c74468a3a,0x3ff0000000000000,1 +np.float64,0x3fde47d5f6bc8fac,0x3ff635bf8e024716,1 +np.float64,0xffda159d5db42b3a,0x0,1 +np.float64,0x7fef9fecaa3f3fd8,0x7ff0000000000000,1 +np.float64,0x3fd4e745e3a9ce8c,0x3ff410a9cb6fd8bf,1 +np.float64,0xffef57019b3eae02,0x0,1 +np.float64,0xbfe6604f4f6cc09e,0x3fe3b55de43c626d,1 +np.float64,0xffe066a424a0cd48,0x0,1 +np.float64,0x3fd547de85aa8fbc,0x3ff425b2a7a16675,1 +np.float64,0xffb3c69280278d28,0x0,1 +np.float64,0xffebe0b759f7c16e,0x0,1 +np.float64,0x3fefc84106ff9082,0x3fffd973687337d8,1 +np.float64,0x501c42a4a0389,0x3ff0000000000000,1 +np.float64,0x7feb45d13eb68ba1,0x7ff0000000000000,1 +np.float64,0xbfb16a8c2e22d518,0x3fee86a9c0f9291a,1 +np.float64,0x3be327b877c66,0x3ff0000000000000,1 +np.float64,0x7fe4a58220694b03,0x7ff0000000000000,1 +np.float64,0x3fe0286220a050c4,0x3ff6b472157ab8f2,1 +np.float64,0x3fc9381825327030,0x3ff2575fbea2bf5d,1 +np.float64,0xbfd1af7ee8a35efe,0x3fea6c032cf7e669,1 +np.float64,0xbfea9b0f39b5361e,0x3fe1fbae14b40b4d,1 +np.float64,0x39efe4aa73dfd,0x3ff0000000000000,1 +np.float64,0xffeb06fdc8360dfb,0x0,1 +np.float64,0xbfda481e72b4903c,0x3fe812b4b08d4884,1 +np.float64,0xbfd414ba5ba82974,0x3fe9bec9474bdfe6,1 +np.float64,0x7fe707177b6e0e2e,0x7ff0000000000000,1 +np.float64,0x8000000000000001,0x3ff0000000000000,1 +np.float64,0xbfede6a75bbbcd4f,0x3fe0be874cccd399,1 +np.float64,0x8006cdb577cd9b6c,0x3ff0000000000000,1 +np.float64,0x800051374f20a26f,0x3ff0000000000000,1 +np.float64,0x3fe5cba8c96b9752,0x3ff9a76b3adcc122,1 +np.float64,0xbfee3933487c7267,0x3fe0a0b190f9609a,1 +np.float64,0x3fd574b8d8aae970,0x3ff42f7e83de1af9,1 +np.float64,0xba5db72b74bb7,0x3ff0000000000000,1 +np.float64,0x3fa9bf512c337ea0,0x3ff0914a7f743a94,1 +np.float64,0xffe8cb736c3196e6,0x0,1 +np.float64,0x3761b2f06ec37,0x3ff0000000000000,1 +np.float64,0x8b4d4433169a9,0x3ff0000000000000,1 +np.float64,0x800f0245503e048b,0x3ff0000000000000,1 +np.float64,0x7fb20d54ac241aa8,0x7ff0000000000000,1 +np.float64,0x3fdf26666b3e4ccd,0x3ff66b8995142017,1 +np.float64,0xbfcbf2a83737e550,0x3feb8173a7b9d6b5,1 +np.float64,0x3fd31572a0a62ae5,0x3ff3ac6c94313dcd,1 +np.float64,0x7fb6c2807a2d8500,0x7ff0000000000000,1 +np.float64,0x800799758f2f32ec,0x3ff0000000000000,1 +np.float64,0xe72f1f6bce5e4,0x3ff0000000000000,1 +np.float64,0x3fe0e0f223a1c1e4,0x3ff70fed5b761673,1 +np.float64,0x3fe6d4f133eda9e2,0x3ffa3c8000c169eb,1 +np.float64,0xbfe1ccc3d8639988,0x3fe5c32148bedbda,1 +np.float64,0x3fea71c53574e38a,0x3ffc5f31201fe9be,1 +np.float64,0x9e0323eb3c065,0x3ff0000000000000,1 +np.float64,0x8005cc79a5cb98f4,0x3ff0000000000000,1 +np.float64,0x1dace1f83b59d,0x3ff0000000000000,1 +np.float64,0x10000000000000,0x3ff0000000000000,1 +np.float64,0xbfdef50830bdea10,0x3fe6e269fc17ebef,1 +np.float64,0x8010000000000000,0x3ff0000000000000,1 +np.float64,0xbfdfa82192bf5044,0x3fe6b6313ee0a095,1 +np.float64,0x3fd9398fe2b27320,0x3ff506ca2093c060,1 +np.float64,0x8002721fe664e441,0x3ff0000000000000,1 +np.float64,0x800c04166ad8082d,0x3ff0000000000000,1 +np.float64,0xffec3918b3387230,0x0,1 +np.float64,0x3fec62d5dfb8c5ac,0x3ffd972ea4a54b32,1 +np.float64,0x3fe7e42a0b6fc854,0x3ffad86b0443181d,1 +np.float64,0x3fc0aff5f3215fec,0x3ff1836058d4d210,1 +np.float64,0xbf82ff68a025fec0,0x3fefcb7f06862dce,1 +np.float64,0xae2e35195c5c7,0x3ff0000000000000,1 +np.float64,0x3fece3bddf79c77c,0x3ffdea41fb1ba8fa,1 +np.float64,0xbfa97b947832f730,0x3feeea34ebedbbd2,1 +np.float64,0xbfdfb1b1ce3f6364,0x3fe6b3d72871335c,1 +np.float64,0xbfe61a4f24ac349e,0x3fe3d356bf991b06,1 +np.float64,0x7fe23117a5e4622e,0x7ff0000000000000,1 +np.float64,0x800552a8cccaa552,0x3ff0000000000000,1 +np.float64,0x625b4d0ac4b6a,0x3ff0000000000000,1 +np.float64,0x3f86cf15702d9e00,0x3ff01fbe0381676d,1 +np.float64,0x800d7d1b685afa37,0x3ff0000000000000,1 +np.float64,0x3fe2cb6e40a596dd,0x3ff80a1a562f7fc9,1 +np.float64,0x3fe756eb8e2eadd7,0x3ffa86c638aad07d,1 +np.float64,0x800dc9a5513b934b,0x3ff0000000000000,1 +np.float64,0xbfbbdd118a37ba20,0x3fedacb4624f3cee,1 +np.float64,0x800de01f8efbc03f,0x3ff0000000000000,1 +np.float64,0x800da1a3fe9b4348,0x3ff0000000000000,1 +np.float64,0xbf87d8c7602fb180,0x3fefbe2614998ab6,1 +np.float64,0xbfdfff6141bffec2,0x3fe6a0c54d9f1bc8,1 +np.float64,0xee8fbba5dd1f8,0x3ff0000000000000,1 +np.float64,0x3fe79dc93e6f3b92,0x3ffaaf9d7d955b2c,1 +np.float64,0xffedd4b3d07ba967,0x0,1 +np.float64,0x800905dfc1720bc0,0x3ff0000000000000,1 +np.float64,0x3fd9e483b8b3c907,0x3ff52ddc6c950e7f,1 +np.float64,0xe34ffefdc6a00,0x3ff0000000000000,1 +np.float64,0x2168e62242d1e,0x3ff0000000000000,1 +np.float64,0x800349950e26932b,0x3ff0000000000000,1 +np.float64,0x7fc50da8532a1b50,0x7ff0000000000000,1 +np.float64,0xae1a4d115c34a,0x3ff0000000000000,1 +np.float64,0xa020f0b74041e,0x3ff0000000000000,1 +np.float64,0x3fd2aa2f77a5545f,0x3ff3959f09519a25,1 +np.float64,0x3fbfefc3223fdf86,0x3ff171f3df2d408b,1 +np.float64,0xbfea9fc340b53f86,0x3fe1f9d92b712654,1 +np.float64,0xffe9b920a5337240,0x0,1 +np.float64,0xbfe2eb0265e5d605,0x3fe53dd195782de3,1 +np.float64,0x7fb932c70e32658d,0x7ff0000000000000,1 +np.float64,0x3fda816bfcb502d8,0x3ff551f8d5c84c82,1 +np.float64,0x3fed68cbe9fad198,0x3ffe40f6692d5693,1 +np.float64,0x32df077665be2,0x3ff0000000000000,1 +np.float64,0x7fdc9c2f3539385d,0x7ff0000000000000,1 +np.float64,0x7fe71091a2ee2122,0x7ff0000000000000,1 +np.float64,0xbfe68106c46d020e,0x3fe3a76b56024c2c,1 +np.float64,0xffcf0572823e0ae4,0x0,1 +np.float64,0xbfeeab341fbd5668,0x3fe077d496941cda,1 +np.float64,0x7fe7ada0d2af5b41,0x7ff0000000000000,1 +np.float64,0xffacdef2a439bde0,0x0,1 +np.float64,0x3fe4200f3128401e,0x3ff8be0ddf30fd1e,1 +np.float64,0xffd9022a69320454,0x0,1 +np.float64,0xbfe8e06914f1c0d2,0x3fe2ab5fe7fffb5a,1 +np.float64,0x3fc4b976602972ed,0x3ff1e6786fa7a890,1 +np.float64,0xbfd784c105af0982,0x3fe8cdeb1cdbd57e,1 +np.float64,0x7feb20a20eb64143,0x7ff0000000000000,1 +np.float64,0xbfc87dd83630fbb0,0x3fec067c1e7e6983,1 +np.float64,0x7fe5400cbe6a8018,0x7ff0000000000000,1 +np.float64,0xbfb4a1f5e22943e8,0x3fee42e6c81559a9,1 +np.float64,0x3fe967c575f2cf8a,0x3ffbbd8bc0d5c50d,1 +np.float64,0xbfeb059cf4760b3a,0x3fe1d25c592c4dab,1 +np.float64,0xbfeef536d5bdea6e,0x3fe05d832c15c64a,1 +np.float64,0x3fa90b3f6432167f,0x3ff08d410dd732cc,1 +np.float64,0xbfeaff265e75fe4d,0x3fe1d4db3fb3208d,1 +np.float64,0x6d93d688db27b,0x3ff0000000000000,1 +np.float64,0x800ab9b4ea55736a,0x3ff0000000000000,1 +np.float64,0x3fd444b39d288967,0x3ff3ed749d48d444,1 +np.float64,0xbfd5f2c0d0abe582,0x3fe93ad6124d88e7,1 +np.float64,0x3fea8fd915f51fb2,0x3ffc71b32cb92d60,1 +np.float64,0xbfd23d6491a47aca,0x3fea43875709b0f0,1 +np.float64,0xffe76f75ce6edeeb,0x0,1 +np.float64,0x1f5670da3eacf,0x3ff0000000000000,1 +np.float64,0x8000d89c9621b13a,0x3ff0000000000000,1 +np.float64,0x3fedb51c52bb6a39,0x3ffe732279c228ff,1 +np.float64,0x7f99215ac83242b5,0x7ff0000000000000,1 +np.float64,0x742a6864e854e,0x3ff0000000000000,1 +np.float64,0xbfe02fb340205f66,0x3fe689495f9164e3,1 +np.float64,0x7fef4c12b0fe9824,0x7ff0000000000000,1 +np.float64,0x3fd40e17c2a81c30,0x3ff3e1aee8ed972f,1 +np.float64,0x7fdcd264e939a4c9,0x7ff0000000000000,1 +np.float64,0x3fdb675838b6ceb0,0x3ff587526241c550,1 +np.float64,0x3fdf1a4081be3480,0x3ff66896a18c2385,1 +np.float64,0xbfea5082b874a106,0x3fe218cf8f11be13,1 +np.float64,0xffe1a0ebf7e341d8,0x0,1 +np.float64,0x3fed0a2222ba1444,0x3ffe032ce928ae7d,1 +np.float64,0xffeae036da75c06d,0x0,1 +np.float64,0x5b05fc8ab60c0,0x3ff0000000000000,1 +np.float64,0x7fd8aae5f03155cb,0x7ff0000000000000,1 +np.float64,0xbfd0b4d9fda169b4,0x3feab41e58b6ccb7,1 +np.float64,0xffdcaffa57395ff4,0x0,1 +np.float64,0xbfcbf1455437e28c,0x3feb81a884182c5d,1 +np.float64,0x3f9d6700b83ace01,0x3ff0525657db35d4,1 +np.float64,0x4fd5b0b29fab7,0x3ff0000000000000,1 +np.float64,0x3fe9af2df5b35e5c,0x3ffbe895684df916,1 +np.float64,0x800dfd41f9dbfa84,0x3ff0000000000000,1 +np.float64,0xbf2a30457e546,0x3ff0000000000000,1 +np.float64,0x7fc6be37182d7c6d,0x7ff0000000000000,1 +np.float64,0x800e0f9788dc1f2f,0x3ff0000000000000,1 +np.float64,0x8006890c704d121a,0x3ff0000000000000,1 +np.float64,0xffecb1a7cbb9634f,0x0,1 +np.float64,0xffb35c330426b868,0x0,1 +np.float64,0x7fe8f2ba8a71e574,0x7ff0000000000000,1 +np.float64,0xf3ccff8fe79a0,0x3ff0000000000000,1 +np.float64,0x3fdf19a84e3e3351,0x3ff66871b17474c1,1 +np.float64,0x80049a662d0934cd,0x3ff0000000000000,1 +np.float64,0xdf5bb4bbbeb77,0x3ff0000000000000,1 +np.float64,0x8005eca030cbd941,0x3ff0000000000000,1 +np.float64,0xffe5f239586be472,0x0,1 +np.float64,0xbfc4526a0728a4d4,0x3fecaa52fbf5345e,1 +np.float64,0xbfe8f1ecda31e3da,0x3fe2a44c080848b3,1 +np.float64,0x3feebd32f4bd7a66,0x3fff234788938c3e,1 +np.float64,0xffd6ca04e9ad940a,0x0,1 +np.float64,0x7ff0000000000000,0x7ff0000000000000,1 +np.float64,0xbfd4c560a9a98ac2,0x3fe98db6d97442fc,1 +np.float64,0x8005723471cae46a,0x3ff0000000000000,1 +np.float64,0xbfeb278299764f05,0x3fe1c54b48f8ba4b,1 +np.float64,0x8007907b376f20f7,0x3ff0000000000000,1 +np.float64,0x7fe9c2fd01b385f9,0x7ff0000000000000,1 +np.float64,0x7fdaa37368b546e6,0x7ff0000000000000,1 +np.float64,0xbfe6d0f3786da1e7,0x3fe38582271cada7,1 +np.float64,0xbfea9b77823536ef,0x3fe1fb8575cd1b7d,1 +np.float64,0xbfe90ac38bf21587,0x3fe29a471b47a2e8,1 +np.float64,0xbfe9c51844738a30,0x3fe24fc8de03ea84,1 +np.float64,0x3fe45a9013a8b520,0x3ff8dd7c80f1cf75,1 +np.float64,0xbfe5780551eaf00a,0x3fe419832a6a4c56,1 +np.float64,0xffefffffffffffff,0x0,1 +np.float64,0x7fe3778c84a6ef18,0x7ff0000000000000,1 +np.float64,0xbfdc8a60413914c0,0x3fe77dc55b85028f,1 +np.float64,0xef47ae2fde8f6,0x3ff0000000000000,1 +np.float64,0x8001269fa4c24d40,0x3ff0000000000000,1 +np.float64,0x3fe9d2d39e73a5a7,0x3ffbfe2a66c4148e,1 +np.float64,0xffee61f528fcc3e9,0x0,1 +np.float64,0x3fe8a259ab7144b3,0x3ffb47e797a34bd2,1 +np.float64,0x3f906d610820dac0,0x3ff02dccda8e1a75,1 +np.float64,0x3fe70739f32e0e74,0x3ffa59232f4fcd07,1 +np.float64,0x3fe6b7f5e6ad6fec,0x3ffa2c0cc54f2c16,1 +np.float64,0x95a91a792b524,0x3ff0000000000000,1 +np.float64,0xbfedf6fcf57bedfa,0x3fe0b89bb40081cc,1 +np.float64,0xbfa4d2de9c29a5c0,0x3fef1c485678d657,1 +np.float64,0x3fe130470d22608e,0x3ff737b0be409a38,1 +np.float64,0x3fcf8035423f006b,0x3ff2f9d7c3c6a302,1 +np.float64,0xffe5995a3eab32b4,0x0,1 +np.float64,0xffca68c63034d18c,0x0,1 +np.float64,0xff9d53af903aa760,0x0,1 +np.float64,0x800563f1de6ac7e4,0x3ff0000000000000,1 +np.float64,0x7fce284fa63c509e,0x7ff0000000000000,1 +np.float64,0x7fb2a3959a25472a,0x7ff0000000000000,1 +np.float64,0x7fdbe2652f37c4c9,0x7ff0000000000000,1 +np.float64,0x800d705bbc1ae0b8,0x3ff0000000000000,1 +np.float64,0x7fd9bd2347b37a46,0x7ff0000000000000,1 +np.float64,0x3fcac3c0fb358782,0x3ff27ed62d6c8221,1 +np.float64,0x800110691ec220d3,0x3ff0000000000000,1 +np.float64,0x3fef79a8157ef350,0x3fffa368513eb909,1 +np.float64,0x7fe8bd2f0e317a5d,0x7ff0000000000000,1 +np.float64,0x7fd3040e60a6081c,0x7ff0000000000000,1 +np.float64,0xffea50723234a0e4,0x0,1 +np.float64,0xbfe6220054ac4400,0x3fe3d00961238a93,1 +np.float64,0x3f9eddd8c83dbbc0,0x3ff0567b0c73005a,1 +np.float64,0xbfa4a062c42940c0,0x3fef1e68badde324,1 +np.float64,0xbfd077ad4720ef5a,0x3feac5d577581d07,1 +np.float64,0x7fdfd4b025bfa95f,0x7ff0000000000000,1 +np.float64,0xd00d3cf3a01a8,0x3ff0000000000000,1 +np.float64,0x7fe3010427260207,0x7ff0000000000000,1 +np.float64,0x22ea196645d44,0x3ff0000000000000,1 +np.float64,0x7fd747e8cd2e8fd1,0x7ff0000000000000,1 +np.float64,0xd50665e7aa0cd,0x3ff0000000000000,1 +np.float64,0x7fe1da580ae3b4af,0x7ff0000000000000,1 +np.float64,0xffeb218ecfb6431d,0x0,1 +np.float64,0xbf887d0dd030fa00,0x3fefbc6252c8b354,1 +np.float64,0x3fcaa31067354621,0x3ff27b904c07e07f,1 +np.float64,0x7fe698cc4ded3198,0x7ff0000000000000,1 +np.float64,0x1c40191a38804,0x3ff0000000000000,1 +np.float64,0x80086fd20e30dfa4,0x3ff0000000000000,1 +np.float64,0x7fed34d5eaba69ab,0x7ff0000000000000,1 +np.float64,0xffd00b52622016a4,0x0,1 +np.float64,0x3f80abcdb021579b,0x3ff0172d27945851,1 +np.float64,0x3fe614cfd66c29a0,0x3ff9d031e1839191,1 +np.float64,0x80021d71c8843ae4,0x3ff0000000000000,1 +np.float64,0x800bc2adc657855c,0x3ff0000000000000,1 +np.float64,0x6b9fec1cd73fe,0x3ff0000000000000,1 +np.float64,0xffd9093b5f321276,0x0,1 +np.float64,0x800d3c6c77fa78d9,0x3ff0000000000000,1 +np.float64,0xffe80fc1cbf01f83,0x0,1 +np.float64,0xffbffbaf2a3ff760,0x0,1 +np.float64,0x3fea1ed29eb43da5,0x3ffc2c64ec0e17a3,1 +np.float64,0x7ff4000000000000,0x7ffc000000000000,1 +np.float64,0x3fd944a052328941,0x3ff5094f4c43ecca,1 +np.float64,0x800b1f9416163f29,0x3ff0000000000000,1 +np.float64,0x800f06bf33de0d7e,0x3ff0000000000000,1 +np.float64,0xbfdbf0d226b7e1a4,0x3fe7a4f73793d95b,1 +np.float64,0xffe7306c30ae60d8,0x0,1 +np.float64,0x7fe991accfb32359,0x7ff0000000000000,1 +np.float64,0x3fcc0040d2380082,0x3ff29ea47e4f07d4,1 +np.float64,0x7fefffffffffffff,0x7ff0000000000000,1 +np.float64,0x0,0x3ff0000000000000,1 +np.float64,0x3fe1423f7be2847e,0x3ff740bc1d3b20f8,1 +np.float64,0xbfeae3a3cab5c748,0x3fe1df7e936f8504,1 +np.float64,0x800b2da7d6165b50,0x3ff0000000000000,1 +np.float64,0x800b2404fcd6480a,0x3ff0000000000000,1 +np.float64,0x6fcbcf88df97b,0x3ff0000000000000,1 +np.float64,0xa248c0e14492,0x3ff0000000000000,1 +np.float64,0xffd255776824aaee,0x0,1 +np.float64,0x80057b3effeaf67f,0x3ff0000000000000,1 +np.float64,0x3feb0b07d7761610,0x3ffcbdfe1be5a594,1 +np.float64,0x924e1019249c2,0x3ff0000000000000,1 +np.float64,0x80074307e80e8611,0x3ff0000000000000,1 +np.float64,0xffb207fa46240ff8,0x0,1 +np.float64,0x95ac388d2b587,0x3ff0000000000000,1 +np.float64,0xbff0000000000000,0x3fe0000000000000,1 +np.float64,0x3fd38b6a492716d5,0x3ff3c59f62b5add5,1 +np.float64,0x7fe49362c3e926c5,0x7ff0000000000000,1 +np.float64,0x7fe842889db08510,0x7ff0000000000000,1 +np.float64,0xbfba6003e834c008,0x3fedcb620a2d9856,1 +np.float64,0xffe7e782bd6fcf05,0x0,1 +np.float64,0x7fd9b93d9433727a,0x7ff0000000000000,1 +np.float64,0x7fc8fcb61d31f96b,0x7ff0000000000000,1 +np.float64,0xbfef9be8db3f37d2,0x3fe022d603b81dc2,1 +np.float64,0x6f4fc766de9fa,0x3ff0000000000000,1 +np.float64,0xbfe93016f132602e,0x3fe28b42d782d949,1 +np.float64,0x3fe10e52b8e21ca5,0x3ff726a38b0bb895,1 +np.float64,0x3fbbba0ae6377416,0x3ff13f56084a9da3,1 +np.float64,0x3fe09e42ece13c86,0x3ff6eeb57e775e24,1 +np.float64,0x800942e39fb285c8,0x3ff0000000000000,1 +np.float64,0xffe5964370eb2c86,0x0,1 +np.float64,0x3fde479f32bc8f3e,0x3ff635b2619ba53a,1 +np.float64,0x3fe826e187f04dc3,0x3ffaff52b79c3a08,1 +np.float64,0x3febcbf1eab797e4,0x3ffd37152e5e2598,1 +np.float64,0x3fa0816a202102d4,0x3ff05c8e6a8b00d5,1 +np.float64,0xbd005ccb7a00c,0x3ff0000000000000,1 +np.float64,0x44c12fdc89827,0x3ff0000000000000,1 +np.float64,0xffc8fdffa431fc00,0x0,1 +np.float64,0xffeb4f5a87b69eb4,0x0,1 +np.float64,0xbfb07e7f8420fd00,0x3fee9a32924fe6a0,1 +np.float64,0xbfbd9d1bb63b3a38,0x3fed88ca81e5771c,1 +np.float64,0x8008682a74f0d055,0x3ff0000000000000,1 +np.float64,0x3fdeedbc7b3ddb79,0x3ff65dcb7c55f4dc,1 +np.float64,0x8009e889c613d114,0x3ff0000000000000,1 +np.float64,0x3faea831f43d5064,0x3ff0ad935e890e49,1 +np.float64,0xf0af1703e15e3,0x3ff0000000000000,1 +np.float64,0xffec06c4a5f80d88,0x0,1 +np.float64,0x53a1cc0ca743a,0x3ff0000000000000,1 +np.float64,0x7fd10c9eea22193d,0x7ff0000000000000,1 +np.float64,0xbfd48a6bf0a914d8,0x3fe99e0d109f2bac,1 +np.float64,0x3fd6dfe931adbfd4,0x3ff47f81c2dfc5d3,1 +np.float64,0x3fed20e86b7a41d0,0x3ffe11fecc7bc686,1 +np.float64,0xbfea586818b4b0d0,0x3fe215b7747d5cb8,1 +np.float64,0xbfd4ad3e20295a7c,0x3fe99465ab8c3275,1 +np.float64,0x3fd6619ee4acc33e,0x3ff4638b7b80c08a,1 +np.float64,0x3fdf6fcb63bedf97,0x3ff67d62fd3d560c,1 +np.float64,0x800a9191e7152324,0x3ff0000000000000,1 +np.float64,0x3fd2ff3c0da5fe78,0x3ff3a7b17e892a28,1 +np.float64,0x8003dbf1f327b7e5,0x3ff0000000000000,1 +np.float64,0xffea6b89a934d712,0x0,1 +np.float64,0x7fcfb879043f70f1,0x7ff0000000000000,1 +np.float64,0xea6a84dbd4d51,0x3ff0000000000000,1 +np.float64,0x800ec97a815d92f5,0x3ff0000000000000,1 +np.float64,0xffe304c3a8660987,0x0,1 +np.float64,0xbfefe24dd3ffc49c,0x3fe00a4e065be96d,1 +np.float64,0xffd3cc8c00a79918,0x0,1 +np.float64,0x95be8b7b2b7d2,0x3ff0000000000000,1 +np.float64,0x7fe20570cba40ae1,0x7ff0000000000000,1 +np.float64,0x7f97a06da02f40da,0x7ff0000000000000,1 +np.float64,0xffe702b9522e0572,0x0,1 +np.float64,0x3fada2d8543b45b1,0x3ff0a7adc4201e08,1 +np.float64,0x235e6acc46bce,0x3ff0000000000000,1 +np.float64,0x3fea6bc28ef4d786,0x3ffc5b7fc68fddac,1 +np.float64,0xffdbc9f505b793ea,0x0,1 +np.float64,0xffe98b137ff31626,0x0,1 +np.float64,0x800e26c6721c4d8d,0x3ff0000000000000,1 +np.float64,0x80080de445301bc9,0x3ff0000000000000,1 +np.float64,0x37e504a86fca1,0x3ff0000000000000,1 +np.float64,0x8002f5f60325ebed,0x3ff0000000000000,1 +np.float64,0x5c8772feb90ef,0x3ff0000000000000,1 +np.float64,0xbfe021abb4604358,0x3fe69023a51d22b8,1 +np.float64,0x3fde744f8fbce8a0,0x3ff64074dc84edd7,1 +np.float64,0xbfdd92899f3b2514,0x3fe73aefd9701858,1 +np.float64,0x7fc1ad5c51235ab8,0x7ff0000000000000,1 +np.float64,0xaae2f98955c5f,0x3ff0000000000000,1 +np.float64,0x7f9123d5782247aa,0x7ff0000000000000,1 +np.float64,0xbfe3f8e94b67f1d2,0x3fe4c30ab28e9cb7,1 +np.float64,0x7fdaba8b4cb57516,0x7ff0000000000000,1 +np.float64,0x7fefc85cfeff90b9,0x7ff0000000000000,1 +np.float64,0xffb83b4f523076a0,0x0,1 +np.float64,0xbfe888a68c71114d,0x3fe2ceff17c203d1,1 +np.float64,0x800de1dac4bbc3b6,0x3ff0000000000000,1 +np.float64,0xbfe4f27f09e9e4fe,0x3fe453f9af407eac,1 +np.float64,0xffe3d2713467a4e2,0x0,1 +np.float64,0xbfebaab840375570,0x3fe1931131b98842,1 +np.float64,0x93892a1b27126,0x3ff0000000000000,1 +np.float64,0x1e8e7f983d1d1,0x3ff0000000000000,1 +np.float64,0x3fecc950627992a0,0x3ffdd926f036add0,1 +np.float64,0xbfd41dfb1aa83bf6,0x3fe9bc34ece35b94,1 +np.float64,0x800aebfc6555d7f9,0x3ff0000000000000,1 +np.float64,0x7fe33ba52ca67749,0x7ff0000000000000,1 +np.float64,0xffe57c9b3feaf936,0x0,1 +np.float64,0x3fdd12464fba248c,0x3ff5ebc5598e6bd0,1 +np.float64,0xffe06d7f0fe0dafe,0x0,1 +np.float64,0x800e55b7fe9cab70,0x3ff0000000000000,1 +np.float64,0x3fd33803c8267008,0x3ff3b3cb78b2d642,1 +np.float64,0xe9cab8a1d3957,0x3ff0000000000000,1 +np.float64,0x3fb38ac166271580,0x3ff0de906947c0f0,1 +np.float64,0xbfd67aa552acf54a,0x3fe915cf64a389fd,1 +np.float64,0x1db96daa3b72f,0x3ff0000000000000,1 +np.float64,0xbfee9f08f4fd3e12,0x3fe07c2c615add3c,1 +np.float64,0xf14f6d65e29ee,0x3ff0000000000000,1 +np.float64,0x800bce089e179c12,0x3ff0000000000000,1 +np.float64,0xffc42dcc37285b98,0x0,1 +np.float64,0x7fd5f37063abe6e0,0x7ff0000000000000,1 +np.float64,0xbfd943c2cbb28786,0x3fe856f6452ec753,1 +np.float64,0x8ddfbc091bbf8,0x3ff0000000000000,1 +np.float64,0xbfe153491e22a692,0x3fe5fcb075dbbd5d,1 +np.float64,0xffe7933999ef2672,0x0,1 +np.float64,0x7ff8000000000000,0x7ff8000000000000,1 +np.float64,0x8000000000000000,0x3ff0000000000000,1 +np.float64,0xbfe9154580b22a8b,0x3fe2960bac3a8220,1 +np.float64,0x800dc6dda21b8dbb,0x3ff0000000000000,1 +np.float64,0xbfb26225a824c448,0x3fee7239a457df81,1 +np.float64,0xbfd7b68c83af6d1a,0x3fe8c08e351ab468,1 +np.float64,0xffde01f7213c03ee,0x0,1 +np.float64,0x3fe54cbe0faa997c,0x3ff9614527191d72,1 +np.float64,0xbfd6bec3732d7d86,0x3fe90354909493de,1 +np.float64,0xbfef3c85bd7e790b,0x3fe0444f8c489ca6,1 +np.float64,0x899501b7132a0,0x3ff0000000000000,1 +np.float64,0xbfe17a456462f48b,0x3fe5ea2719a9a84b,1 +np.float64,0xffe34003b8668007,0x0,1 +np.float64,0x7feff6a3633fed46,0x7ff0000000000000,1 +np.float64,0x3fba597ecc34b2fe,0x3ff12ee72e4de474,1 +np.float64,0x4084c7b68109a,0x3ff0000000000000,1 +np.float64,0x3fad23bf4c3a4780,0x3ff0a4d06193ff6d,1 +np.float64,0xffd0fe2707a1fc4e,0x0,1 +np.float64,0xb96cb43f72d97,0x3ff0000000000000,1 +np.float64,0x7fc4d684d829ad09,0x7ff0000000000000,1 +np.float64,0x7fdc349226b86923,0x7ff0000000000000,1 +np.float64,0x7fd82851cd3050a3,0x7ff0000000000000,1 +np.float64,0x800cde0041b9bc01,0x3ff0000000000000,1 +np.float64,0x4e8caa1e9d196,0x3ff0000000000000,1 +np.float64,0xbfed06a6d2fa0d4e,0x3fe1108c3682b05a,1 +np.float64,0xffe8908122312102,0x0,1 +np.float64,0xffe56ed6d9aaddad,0x0,1 +np.float64,0x3fedd6db00fbadb6,0x3ffe896c68c4b26e,1 +np.float64,0x3fde31f9b4bc63f4,0x3ff6307e08f8b6ba,1 +np.float64,0x6bb963c2d772d,0x3ff0000000000000,1 +np.float64,0x787b7142f0f6f,0x3ff0000000000000,1 +np.float64,0x3fe6e4147c6dc829,0x3ffa451bbdece240,1 +np.float64,0x8003857401470ae9,0x3ff0000000000000,1 +np.float64,0xbfeae82c3c75d058,0x3fe1ddbd66e65aab,1 +np.float64,0x7fe174707c62e8e0,0x7ff0000000000000,1 +np.float64,0x80008d2545e11a4b,0x3ff0000000000000,1 +np.float64,0xbfecc2dce17985ba,0x3fe129ad4325985a,1 +np.float64,0xbfe1fa1daf63f43c,0x3fe5adcb0731a44b,1 +np.float64,0x7fcf2530203e4a5f,0x7ff0000000000000,1 +np.float64,0xbfea5cefe874b9e0,0x3fe213f134b61f4a,1 +np.float64,0x800103729f2206e6,0x3ff0000000000000,1 +np.float64,0xbfe8442ff7708860,0x3fe2eaf850faa169,1 +np.float64,0x8006c78e19ed8f1d,0x3ff0000000000000,1 +np.float64,0x3fc259589c24b2b1,0x3ff1abe6a4d28816,1 +np.float64,0xffed02b7b5ba056e,0x0,1 +np.float64,0xbfce0aa4fe3c1548,0x3feb32115d92103e,1 +np.float64,0x7fec06e78bf80dce,0x7ff0000000000000,1 +np.float64,0xbfe0960bbc612c18,0x3fe6578ab29b70d4,1 +np.float64,0x3fee45841cbc8b08,0x3ffed2f6ca808ad3,1 +np.float64,0xbfeb0f8ebef61f1e,0x3fe1ce86003044cd,1 +np.float64,0x8002c357358586af,0x3ff0000000000000,1 +np.float64,0x3fe9aa10cc735422,0x3ffbe57e294ce68b,1 +np.float64,0x800256c0a544ad82,0x3ff0000000000000,1 +np.float64,0x4de6e1449bcdd,0x3ff0000000000000,1 +np.float64,0x65e9bc9ccbd38,0x3ff0000000000000,1 +np.float64,0xbfe53b0fa9aa7620,0x3fe4341f0aa29bbc,1 +np.float64,0xbfcdd94cd13bb298,0x3feb3956acd2e2dd,1 +np.float64,0x8004a49b65a94938,0x3ff0000000000000,1 +np.float64,0x800d3d05deba7a0c,0x3ff0000000000000,1 +np.float64,0x3fe4e05bce69c0b8,0x3ff925f55602a7e0,1 +np.float64,0xffe391e3256723c6,0x0,1 +np.float64,0xbfe92f0f37b25e1e,0x3fe28bacc76ae753,1 +np.float64,0x3f990238d8320472,0x3ff045edd36e2d62,1 +np.float64,0xffed8d15307b1a2a,0x0,1 +np.float64,0x3fee82e01afd05c0,0x3ffefc09e8b9c2b7,1 +np.float64,0xffb2d94b2225b298,0x0,1 diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/data/umath-validation-set-expm1.csv b/.venv/lib/python3.12/site-packages/numpy/_core/tests/data/umath-validation-set-expm1.csv new file mode 100644 index 0000000..dcbc7cd --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/tests/data/umath-validation-set-expm1.csv @@ -0,0 +1,1429 @@ +dtype,input,output,ulperrortol +np.float32,0x80606724,0x80606724,3 +np.float32,0xbf16790f,0xbee38e14,3 +np.float32,0xbf1778a1,0xbee4a97f,3 +np.float32,0x7d4fc610,0x7f800000,3 +np.float32,0xbec30a20,0xbea230d5,3 +np.float32,0x3eae8a36,0x3ecffac5,3 +np.float32,0xbf1f08f1,0xbeece93c,3 +np.float32,0x80374376,0x80374376,3 +np.float32,0x3f2e04ca,0x3f793115,3 +np.float32,0x7e2c7e36,0x7f800000,3 +np.float32,0xbf686cae,0xbf18bcf0,3 +np.float32,0xbf5518cd,0xbf10a3da,3 +np.float32,0x807e233c,0x807e233c,3 +np.float32,0x7f4edd54,0x7f800000,3 +np.float32,0x7ed70088,0x7f800000,3 +np.float32,0x801675da,0x801675da,3 +np.float32,0x806735d5,0x806735d5,3 +np.float32,0xfe635fec,0xbf800000,3 +np.float32,0xfed88a0a,0xbf800000,3 +np.float32,0xff52c052,0xbf800000,3 +np.float32,0x7fc00000,0x7fc00000,3 +np.float32,0xff4f65f9,0xbf800000,3 +np.float32,0xfe0f6c20,0xbf800000,3 +np.float32,0x80322b30,0x80322b30,3 +np.float32,0xfb757000,0xbf800000,3 +np.float32,0x3c81e0,0x3c81e0,3 +np.float32,0x79d56a,0x79d56a,3 +np.float32,0x8029d7af,0x8029d7af,3 +np.float32,0x8058a593,0x8058a593,3 +np.float32,0x3f3a13c7,0x3f88c75c,3 +np.float32,0x2a6b05,0x2a6b05,3 +np.float32,0xbd64c960,0xbd5e83ae,3 +np.float32,0x80471052,0x80471052,3 +np.float32,0xbe5dd950,0xbe47766c,3 +np.float32,0xfd8f88f0,0xbf800000,3 +np.float32,0x75a4b7,0x75a4b7,3 +np.float32,0x3f726f2e,0x3fc9fb7d,3 +np.float32,0x3ed6795c,0x3f053115,3 +np.float32,0x17d7f5,0x17d7f5,3 +np.float32,0xbf4cf19b,0xbf0d094f,3 +np.float32,0x3e0ec532,0x3e1933c6,3 +np.float32,0xff084016,0xbf800000,3 +np.float32,0x800829aa,0x800829aa,3 +np.float32,0x806d7302,0x806d7302,3 +np.float32,0x7f59d9da,0x7f800000,3 +np.float32,0x15f8b9,0x15f8b9,3 +np.float32,0x803befb3,0x803befb3,3 +np.float32,0x525043,0x525043,3 +np.float32,0x51a647,0x51a647,3 +np.float32,0xbf1cfce4,0xbeeab3d9,3 +np.float32,0x3f1f27a4,0x3f5cb1d2,3 +np.float32,0xbebc3a04,0xbe9d8142,3 +np.float32,0xbeea548c,0xbebc07e5,3 +np.float32,0x3f47401c,0x3f96c2a3,3 +np.float32,0x806b1ea3,0x806b1ea3,3 +np.float32,0x3ea56bb8,0x3ec3450c,3 +np.float32,0x3f7b4963,0x3fd597b5,3 +np.float32,0x7f051fa0,0x7f800000,3 +np.float32,0x1d411c,0x1d411c,3 +np.float32,0xff0b6a35,0xbf800000,3 +np.float32,0xbead63c0,0xbe9314f7,3 +np.float32,0x3738be,0x3738be,3 +np.float32,0x3f138cc8,0x3f479155,3 +np.float32,0x800a539f,0x800a539f,3 +np.float32,0x801b0ebd,0x801b0ebd,3 +np.float32,0x318fcd,0x318fcd,3 +np.float32,0x3ed67556,0x3f052e06,3 +np.float32,0x702886,0x702886,3 +np.float32,0x80000001,0x80000001,3 +np.float32,0x70a174,0x70a174,3 +np.float32,0x4f9c66,0x4f9c66,3 +np.float32,0x3e3e1927,0x3e50e351,3 +np.float32,0x7eac9a4d,0x7f800000,3 +np.float32,0x4b7407,0x4b7407,3 +np.float32,0x7f5bd2fd,0x7f800000,3 +np.float32,0x3eaafc58,0x3ecaffbd,3 +np.float32,0xbc989360,0xbc9729e2,3 +np.float32,0x3f470e5c,0x3f968c7b,3 +np.float32,0x4c5672,0x4c5672,3 +np.float32,0xff2b2ee2,0xbf800000,3 +np.float32,0xbf28a104,0xbef7079b,3 +np.float32,0x2c6175,0x2c6175,3 +np.float32,0x3d7e4fb0,0x3d832f9f,3 +np.float32,0x763276,0x763276,3 +np.float32,0x3cf364,0x3cf364,3 +np.float32,0xbf7ace75,0xbf1fe48c,3 +np.float32,0xff19e858,0xbf800000,3 +np.float32,0x80504c70,0x80504c70,3 +np.float32,0xff390210,0xbf800000,3 +np.float32,0x8046a743,0x8046a743,3 +np.float32,0x80000000,0x80000000,3 +np.float32,0x806c51da,0x806c51da,3 +np.float32,0x806ab38f,0x806ab38f,3 +np.float32,0x3f3de863,0x3f8cc538,3 +np.float32,0x7f6d45bb,0x7f800000,3 +np.float32,0xfd16ec60,0xbf800000,3 +np.float32,0x80513cba,0x80513cba,3 +np.float32,0xbf68996b,0xbf18cefa,3 +np.float32,0xfe039f2c,0xbf800000,3 +np.float32,0x3f013207,0x3f280c55,3 +np.float32,0x7ef4bc07,0x7f800000,3 +np.float32,0xbe8b65ac,0xbe741069,3 +np.float32,0xbf7a8186,0xbf1fc7a6,3 +np.float32,0x802532e5,0x802532e5,3 +np.float32,0x32c7df,0x32c7df,3 +np.float32,0x3ce4dceb,0x3ce81701,3 +np.float32,0xfe801118,0xbf800000,3 +np.float32,0x3d905f20,0x3d9594fb,3 +np.float32,0xbe11ed28,0xbe080168,3 +np.float32,0x59e773,0x59e773,3 +np.float32,0x3e9a2547,0x3eb3dd57,3 +np.float32,0x7ecb7c67,0x7f800000,3 +np.float32,0x7f69a67e,0x7f800000,3 +np.float32,0xff121e11,0xbf800000,3 +np.float32,0x3f7917cb,0x3fd2ad8c,3 +np.float32,0xbf1a7da8,0xbee7fc0c,3 +np.float32,0x3f077e66,0x3f329c40,3 +np.float32,0x3ce8e040,0x3cec37b3,3 +np.float32,0xbf3f0b8e,0xbf069f4d,3 +np.float32,0x3f52f194,0x3fa3c9d6,3 +np.float32,0xbf0e7422,0xbeda80f2,3 +np.float32,0xfd67e230,0xbf800000,3 +np.float32,0xff14d9a9,0xbf800000,3 +np.float32,0x3f3546e3,0x3f83dc2b,3 +np.float32,0x3e152e3a,0x3e20983d,3 +np.float32,0x4a89a3,0x4a89a3,3 +np.float32,0x63217,0x63217,3 +np.float32,0xbeb9e2a8,0xbe9be153,3 +np.float32,0x7e9fa049,0x7f800000,3 +np.float32,0x7f58110c,0x7f800000,3 +np.float32,0x3e88290c,0x3e9bfba9,3 +np.float32,0xbf2cb206,0xbefb3494,3 +np.float32,0xff5880c4,0xbf800000,3 +np.float32,0x7ecff3ac,0x7f800000,3 +np.float32,0x3f4b3de6,0x3f9b23fd,3 +np.float32,0xbebd2048,0xbe9e208c,3 +np.float32,0xff08f7a2,0xbf800000,3 +np.float32,0xff473330,0xbf800000,3 +np.float32,0x1,0x1,3 +np.float32,0xbf5dc239,0xbf14584b,3 +np.float32,0x458e3f,0x458e3f,3 +np.float32,0xbdb8a650,0xbdb091f8,3 +np.float32,0xff336ffc,0xbf800000,3 +np.float32,0x3c60bd00,0x3c624966,3 +np.float32,0xbe16a4f8,0xbe0c1664,3 +np.float32,0x3f214246,0x3f60a0f0,3 +np.float32,0x7fa00000,0x7fe00000,3 +np.float32,0x7e08737e,0x7f800000,3 +np.float32,0x3f70574c,0x3fc74b8e,3 +np.float32,0xbed5745c,0xbeae8c77,3 +np.float32,0x361752,0x361752,3 +np.float32,0x3eb276d6,0x3ed584ea,3 +np.float32,0x3f03fc1e,0x3f2cb1a5,3 +np.float32,0x3fafd1,0x3fafd1,3 +np.float32,0x7e50d74c,0x7f800000,3 +np.float32,0x3eeca5,0x3eeca5,3 +np.float32,0x5dc963,0x5dc963,3 +np.float32,0x7f0e63ae,0x7f800000,3 +np.float32,0x8021745f,0x8021745f,3 +np.float32,0xbf5881a9,0xbf121d07,3 +np.float32,0x7dadc7fd,0x7f800000,3 +np.float32,0xbf2c0798,0xbefa86bb,3 +np.float32,0x3e635f50,0x3e7e97a9,3 +np.float32,0xbf2053fa,0xbeee4c0e,3 +np.float32,0x3e8eee2b,0x3ea4dfcc,3 +np.float32,0xfc8a03c0,0xbf800000,3 +np.float32,0xfd9e4948,0xbf800000,3 +np.float32,0x801e817e,0x801e817e,3 +np.float32,0xbf603a27,0xbf1560c3,3 +np.float32,0x7f729809,0x7f800000,3 +np.float32,0x3f5a1864,0x3fac0e04,3 +np.float32,0x3e7648b8,0x3e8b3677,3 +np.float32,0x3edade24,0x3f088bc1,3 +np.float32,0x65e16e,0x65e16e,3 +np.float32,0x3f24aa50,0x3f671117,3 +np.float32,0x803cb1d0,0x803cb1d0,3 +np.float32,0xbe7b1858,0xbe5eadcc,3 +np.float32,0xbf19bb27,0xbee726fb,3 +np.float32,0xfd1f6e60,0xbf800000,3 +np.float32,0xfeb0de60,0xbf800000,3 +np.float32,0xff511a52,0xbf800000,3 +np.float32,0xff7757f7,0xbf800000,3 +np.float32,0x463ff5,0x463ff5,3 +np.float32,0x3f770d12,0x3fcffcc2,3 +np.float32,0xbf208562,0xbeee80dc,3 +np.float32,0x6df204,0x6df204,3 +np.float32,0xbf62d24f,0xbf1673fb,3 +np.float32,0x3dfcf210,0x3e069d5f,3 +np.float32,0xbef26002,0xbec114d7,3 +np.float32,0x7f800000,0x7f800000,3 +np.float32,0x7f30fb85,0x7f800000,3 +np.float32,0x7ee5dfef,0x7f800000,3 +np.float32,0x3f317829,0x3f800611,3 +np.float32,0x3f4b0bbd,0x3f9aec88,3 +np.float32,0x7edf708c,0x7f800000,3 +np.float32,0xff071260,0xbf800000,3 +np.float32,0x3e7b8c30,0x3e8e9198,3 +np.float32,0x3f33778b,0x3f82077f,3 +np.float32,0x3e8cd11d,0x3ea215fd,3 +np.float32,0x8004483d,0x8004483d,3 +np.float32,0x801633e3,0x801633e3,3 +np.float32,0x7e76eb15,0x7f800000,3 +np.float32,0x3c1571,0x3c1571,3 +np.float32,0x7de3de52,0x7f800000,3 +np.float32,0x804ae906,0x804ae906,3 +np.float32,0x7f3a2616,0x7f800000,3 +np.float32,0xff7fffff,0xbf800000,3 +np.float32,0xff5d17e4,0xbf800000,3 +np.float32,0xbeaa6704,0xbe90f252,3 +np.float32,0x7e6a43af,0x7f800000,3 +np.float32,0x2a0f35,0x2a0f35,3 +np.float32,0xfd8fece0,0xbf800000,3 +np.float32,0xfeef2e2a,0xbf800000,3 +np.float32,0xff800000,0xbf800000,3 +np.float32,0xbeefcc52,0xbebf78e4,3 +np.float32,0x3db6c490,0x3dbf2bd5,3 +np.float32,0x8290f,0x8290f,3 +np.float32,0xbeace648,0xbe92bb7f,3 +np.float32,0x801fea79,0x801fea79,3 +np.float32,0x3ea6c230,0x3ec51ebf,3 +np.float32,0x3e5f2ca3,0x3e795c8a,3 +np.float32,0x3eb6f634,0x3edbeb9f,3 +np.float32,0xff790b45,0xbf800000,3 +np.float32,0x3d82e240,0x3d872816,3 +np.float32,0x3f0d6a57,0x3f3cc7db,3 +np.float32,0x7f08531a,0x7f800000,3 +np.float32,0x702b6d,0x702b6d,3 +np.float32,0x7d3a3c38,0x7f800000,3 +np.float32,0x3d0a7fb3,0x3d0cddf3,3 +np.float32,0xff28084c,0xbf800000,3 +np.float32,0xfeee8804,0xbf800000,3 +np.float32,0x804094eb,0x804094eb,3 +np.float32,0x7acb39,0x7acb39,3 +np.float32,0x3f01c07a,0x3f28f88c,3 +np.float32,0x3e05c500,0x3e0ee674,3 +np.float32,0xbe6f7c38,0xbe558ac1,3 +np.float32,0x803b1f4b,0x803b1f4b,3 +np.float32,0xbf76561f,0xbf1e332b,3 +np.float32,0xff30d368,0xbf800000,3 +np.float32,0x7e2e1f38,0x7f800000,3 +np.float32,0x3ee085b8,0x3f0ce7c0,3 +np.float32,0x8064c4a7,0x8064c4a7,3 +np.float32,0xa7c1d,0xa7c1d,3 +np.float32,0x3f27498a,0x3f6c14bc,3 +np.float32,0x137ca,0x137ca,3 +np.float32,0x3d0a5c60,0x3d0cb969,3 +np.float32,0x80765f1f,0x80765f1f,3 +np.float32,0x80230a71,0x80230a71,3 +np.float32,0x3f321ed2,0x3f80acf4,3 +np.float32,0x7d61e7f4,0x7f800000,3 +np.float32,0xbf39f7f2,0xbf0430f7,3 +np.float32,0xbe2503f8,0xbe1867e8,3 +np.float32,0x29333d,0x29333d,3 +np.float32,0x7edc5a0e,0x7f800000,3 +np.float32,0xbe81a8a2,0xbe651663,3 +np.float32,0x7f76ab6d,0x7f800000,3 +np.float32,0x7f46111f,0x7f800000,3 +np.float32,0xff0fc888,0xbf800000,3 +np.float32,0x805ece89,0x805ece89,3 +np.float32,0xc390b,0xc390b,3 +np.float32,0xff64bdee,0xbf800000,3 +np.float32,0x3dd07e4e,0x3ddb79bd,3 +np.float32,0xfecc1f10,0xbf800000,3 +np.float32,0x803f5177,0x803f5177,3 +np.float32,0x802a24d2,0x802a24d2,3 +np.float32,0x7f27d0cc,0x7f800000,3 +np.float32,0x3ef57c98,0x3f1d7e88,3 +np.float32,0x7b848d,0x7b848d,3 +np.float32,0x7f7fffff,0x7f800000,3 +np.float32,0xfe889c46,0xbf800000,3 +np.float32,0xff2d6dc5,0xbf800000,3 +np.float32,0x3f53a186,0x3fa492a6,3 +np.float32,0xbf239c94,0xbef1c90c,3 +np.float32,0xff7c0f4e,0xbf800000,3 +np.float32,0x3e7c69a9,0x3e8f1f3a,3 +np.float32,0xbf47c9e9,0xbf0ab2a9,3 +np.float32,0xbc1eaf00,0xbc1deae9,3 +np.float32,0x3f4a6d39,0x3f9a3d8e,3 +np.float32,0x3f677930,0x3fbc26eb,3 +np.float32,0x3f45eea1,0x3f955418,3 +np.float32,0x7f61a1f8,0x7f800000,3 +np.float32,0xff58c7c6,0xbf800000,3 +np.float32,0x80239801,0x80239801,3 +np.float32,0xff56e616,0xbf800000,3 +np.float32,0xff62052c,0xbf800000,3 +np.float32,0x8009b615,0x8009b615,3 +np.float32,0x293d6b,0x293d6b,3 +np.float32,0xfe9e585c,0xbf800000,3 +np.float32,0x7f58ff4b,0x7f800000,3 +np.float32,0x10937c,0x10937c,3 +np.float32,0x7f5cc13f,0x7f800000,3 +np.float32,0x110c5d,0x110c5d,3 +np.float32,0x805e51fc,0x805e51fc,3 +np.float32,0xbedcf70a,0xbeb3766c,3 +np.float32,0x3f4d5e42,0x3f9d8091,3 +np.float32,0xff5925a0,0xbf800000,3 +np.float32,0x7e87cafa,0x7f800000,3 +np.float32,0xbf6474b2,0xbf171fee,3 +np.float32,0x4b39b2,0x4b39b2,3 +np.float32,0x8020cc28,0x8020cc28,3 +np.float32,0xff004ed8,0xbf800000,3 +np.float32,0xbf204cf5,0xbeee448d,3 +np.float32,0x3e30cf10,0x3e40fdb1,3 +np.float32,0x80202bee,0x80202bee,3 +np.float32,0xbf55a985,0xbf10e2bc,3 +np.float32,0xbe297dd8,0xbe1c351c,3 +np.float32,0x5780d9,0x5780d9,3 +np.float32,0x7ef729fa,0x7f800000,3 +np.float32,0x8039a3b5,0x8039a3b5,3 +np.float32,0x7cdd3f,0x7cdd3f,3 +np.float32,0x7ef0145a,0x7f800000,3 +np.float32,0x807ad7ae,0x807ad7ae,3 +np.float32,0x7f6c2643,0x7f800000,3 +np.float32,0xbec56124,0xbea3c929,3 +np.float32,0x512c3b,0x512c3b,3 +np.float32,0xbed3effe,0xbead8c1e,3 +np.float32,0x7f5e0a4d,0x7f800000,3 +np.float32,0x3f315316,0x3f7fc200,3 +np.float32,0x7eca5727,0x7f800000,3 +np.float32,0x7f4834f3,0x7f800000,3 +np.float32,0x8004af6d,0x8004af6d,3 +np.float32,0x3f223ca4,0x3f6277e3,3 +np.float32,0x7eea4fdd,0x7f800000,3 +np.float32,0x3e7143e8,0x3e880763,3 +np.float32,0xbf737008,0xbf1d160e,3 +np.float32,0xfc408b00,0xbf800000,3 +np.float32,0x803912ca,0x803912ca,3 +np.float32,0x7db31f4e,0x7f800000,3 +np.float32,0xff578b54,0xbf800000,3 +np.float32,0x3f068ec4,0x3f31062b,3 +np.float32,0x35f64f,0x35f64f,3 +np.float32,0x80437df4,0x80437df4,3 +np.float32,0x568059,0x568059,3 +np.float32,0x8005f8ba,0x8005f8ba,3 +np.float32,0x6824ad,0x6824ad,3 +np.float32,0xff3fdf30,0xbf800000,3 +np.float32,0xbf6f7682,0xbf1b89d6,3 +np.float32,0x3dcea8a0,0x3dd971f5,3 +np.float32,0x3ee32a62,0x3f0ef5a9,3 +np.float32,0xbf735bcd,0xbf1d0e3d,3 +np.float32,0x7e8c7c28,0x7f800000,3 +np.float32,0x3ed552bc,0x3f045161,3 +np.float32,0xfed90a8a,0xbf800000,3 +np.float32,0xbe454368,0xbe336d2a,3 +np.float32,0xbf171d26,0xbee4442d,3 +np.float32,0x80652bf9,0x80652bf9,3 +np.float32,0xbdbaaa20,0xbdb26914,3 +np.float32,0x3f56063d,0x3fa7522e,3 +np.float32,0x3d3d4fd3,0x3d41c13f,3 +np.float32,0x80456040,0x80456040,3 +np.float32,0x3dc15586,0x3dcac0ef,3 +np.float32,0x7f753060,0x7f800000,3 +np.float32,0x7f7d8039,0x7f800000,3 +np.float32,0xfdebf280,0xbf800000,3 +np.float32,0xbf1892c3,0xbee5e116,3 +np.float32,0xbf0f1468,0xbedb3878,3 +np.float32,0x40d85c,0x40d85c,3 +np.float32,0x3f93dd,0x3f93dd,3 +np.float32,0xbf5730fd,0xbf118c24,3 +np.float32,0xfe17aa44,0xbf800000,3 +np.float32,0x3dc0baf4,0x3dca1716,3 +np.float32,0xbf3433d8,0xbf015efb,3 +np.float32,0x1c59f5,0x1c59f5,3 +np.float32,0x802b1540,0x802b1540,3 +np.float32,0xbe47df6c,0xbe35936e,3 +np.float32,0xbe8e7070,0xbe78af32,3 +np.float32,0xfe7057f4,0xbf800000,3 +np.float32,0x80668b69,0x80668b69,3 +np.float32,0xbe677810,0xbe4f2c2d,3 +np.float32,0xbe7a2f1c,0xbe5df733,3 +np.float32,0xfeb79e3c,0xbf800000,3 +np.float32,0xbeb6e320,0xbe99c9e8,3 +np.float32,0xfea188f2,0xbf800000,3 +np.float32,0x7dcaeb15,0x7f800000,3 +np.float32,0x1be567,0x1be567,3 +np.float32,0xbf4041cc,0xbf07320d,3 +np.float32,0x3f721aa7,0x3fc98e9a,3 +np.float32,0x7f5aa835,0x7f800000,3 +np.float32,0x15180e,0x15180e,3 +np.float32,0x3f73d739,0x3fcbccdb,3 +np.float32,0xbeecd380,0xbebd9b36,3 +np.float32,0x3f2caec7,0x3f768fea,3 +np.float32,0xbeaf65f2,0xbe9482bb,3 +np.float32,0xfe6aa384,0xbf800000,3 +np.float32,0xbf4f2c0a,0xbf0e085e,3 +np.float32,0xbf2b5907,0xbef9d431,3 +np.float32,0x3e855e0d,0x3e985960,3 +np.float32,0x8056cc64,0x8056cc64,3 +np.float32,0xff746bb5,0xbf800000,3 +np.float32,0x3e0332f6,0x3e0bf986,3 +np.float32,0xff637720,0xbf800000,3 +np.float32,0xbf330676,0xbf00c990,3 +np.float32,0x3ec449a1,0x3eef3862,3 +np.float32,0x766541,0x766541,3 +np.float32,0xfe2edf6c,0xbf800000,3 +np.float32,0xbebb28ca,0xbe9cc3e2,3 +np.float32,0x3f16c930,0x3f4d5ce4,3 +np.float32,0x7f1a9a4a,0x7f800000,3 +np.float32,0x3e9ba1,0x3e9ba1,3 +np.float32,0xbf73d5f6,0xbf1d3d69,3 +np.float32,0xfdc8a8b0,0xbf800000,3 +np.float32,0x50f051,0x50f051,3 +np.float32,0xff0add02,0xbf800000,3 +np.float32,0x1e50bf,0x1e50bf,3 +np.float32,0x3f04d287,0x3f2e1948,3 +np.float32,0x7f1e50,0x7f1e50,3 +np.float32,0x2affb3,0x2affb3,3 +np.float32,0x80039f07,0x80039f07,3 +np.float32,0x804ba79e,0x804ba79e,3 +np.float32,0x7b5a8eed,0x7f800000,3 +np.float32,0x3e1a8b28,0x3e26d0a7,3 +np.float32,0x3ea95f29,0x3ec8bfa4,3 +np.float32,0x7e09fa55,0x7f800000,3 +np.float32,0x7eacb1b3,0x7f800000,3 +np.float32,0x3e8ad7c0,0x3e9f7dec,3 +np.float32,0x7e0e997c,0x7f800000,3 +np.float32,0x3f4422b4,0x3f936398,3 +np.float32,0x806bd222,0x806bd222,3 +np.float32,0x677ae6,0x677ae6,3 +np.float32,0x62cf68,0x62cf68,3 +np.float32,0x7e4e594e,0x7f800000,3 +np.float32,0x80445fd1,0x80445fd1,3 +np.float32,0xff3a0d04,0xbf800000,3 +np.float32,0x8052b256,0x8052b256,3 +np.float32,0x3cb34440,0x3cb53e11,3 +np.float32,0xbf0e3865,0xbeda3c6d,3 +np.float32,0x3f49f5df,0x3f99ba17,3 +np.float32,0xbed75a22,0xbeafcc09,3 +np.float32,0xbf7aec64,0xbf1fefc8,3 +np.float32,0x7f35a62d,0x7f800000,3 +np.float32,0xbf787b03,0xbf1f03fc,3 +np.float32,0x8006a62a,0x8006a62a,3 +np.float32,0x3f6419e7,0x3fb803c7,3 +np.float32,0x3ecea2e5,0x3efe8f01,3 +np.float32,0x80603577,0x80603577,3 +np.float32,0xff73198c,0xbf800000,3 +np.float32,0x7def110a,0x7f800000,3 +np.float32,0x544efd,0x544efd,3 +np.float32,0x3f052340,0x3f2ea0fc,3 +np.float32,0xff306666,0xbf800000,3 +np.float32,0xbf800000,0xbf21d2a7,3 +np.float32,0xbed3e150,0xbead826a,3 +np.float32,0x3f430c99,0x3f92390f,3 +np.float32,0xbf4bffa4,0xbf0c9c73,3 +np.float32,0xfd97a710,0xbf800000,3 +np.float32,0x3cadf0fe,0x3cafcd1a,3 +np.float32,0x807af7b4,0x807af7b4,3 +np.float32,0xbc508600,0xbc4f33bc,3 +np.float32,0x7f3e0ec7,0x7f800000,3 +np.float32,0xbe51334c,0xbe3d36f7,3 +np.float32,0xfe7b7fb4,0xbf800000,3 +np.float32,0xfed9c45e,0xbf800000,3 +np.float32,0x3da024eb,0x3da6926a,3 +np.float32,0x7eed9e76,0x7f800000,3 +np.float32,0xbf2b8f1f,0xbefa0b91,3 +np.float32,0x3f2b9286,0x3f746318,3 +np.float32,0xfe8af49c,0xbf800000,3 +np.float32,0x9c4f7,0x9c4f7,3 +np.float32,0x801d7543,0x801d7543,3 +np.float32,0xbf66474a,0xbf17de66,3 +np.float32,0xbf562155,0xbf1116b1,3 +np.float32,0x46a8de,0x46a8de,3 +np.float32,0x8053fe6b,0x8053fe6b,3 +np.float32,0xbf6ee842,0xbf1b51f3,3 +np.float32,0xbf6ad78e,0xbf19b565,3 +np.float32,0xbf012574,0xbecad7ff,3 +np.float32,0x748364,0x748364,3 +np.float32,0x8073f59b,0x8073f59b,3 +np.float32,0xff526825,0xbf800000,3 +np.float32,0xfeb02dc4,0xbf800000,3 +np.float32,0x8033eb1c,0x8033eb1c,3 +np.float32,0x3f3685ea,0x3f8520cc,3 +np.float32,0x7f657902,0x7f800000,3 +np.float32,0xbf75eac4,0xbf1e0a1f,3 +np.float32,0xfe67f384,0xbf800000,3 +np.float32,0x3f56d3cc,0x3fa83faf,3 +np.float32,0x44a4ce,0x44a4ce,3 +np.float32,0x1dc4b3,0x1dc4b3,3 +np.float32,0x4fb3b2,0x4fb3b2,3 +np.float32,0xbea904a4,0xbe8ff3ed,3 +np.float32,0x7e668f16,0x7f800000,3 +np.float32,0x7f538378,0x7f800000,3 +np.float32,0x80541709,0x80541709,3 +np.float32,0x80228040,0x80228040,3 +np.float32,0x7ef9694e,0x7f800000,3 +np.float32,0x3f5fca9b,0x3fb2ce54,3 +np.float32,0xbe9c43c2,0xbe86ab84,3 +np.float32,0xfecee000,0xbf800000,3 +np.float32,0x5a65c2,0x5a65c2,3 +np.float32,0x3f736572,0x3fcb3985,3 +np.float32,0xbf2a03f7,0xbef87600,3 +np.float32,0xfe96b488,0xbf800000,3 +np.float32,0xfedd8800,0xbf800000,3 +np.float32,0x80411804,0x80411804,3 +np.float32,0x7edcb0a6,0x7f800000,3 +np.float32,0x2bb882,0x2bb882,3 +np.float32,0x3f800000,0x3fdbf0a9,3 +np.float32,0x764b27,0x764b27,3 +np.float32,0x7e92035d,0x7f800000,3 +np.float32,0x3e80facb,0x3e92ae1d,3 +np.float32,0x8040b81a,0x8040b81a,3 +np.float32,0x7f487fe4,0x7f800000,3 +np.float32,0xbc641780,0xbc6282ed,3 +np.float32,0x804b0bb9,0x804b0bb9,3 +np.float32,0x7d0b7c39,0x7f800000,3 +np.float32,0xff072080,0xbf800000,3 +np.float32,0xbed7aff8,0xbeb00462,3 +np.float32,0x35e247,0x35e247,3 +np.float32,0xbf7edd19,0xbf216766,3 +np.float32,0x8004a539,0x8004a539,3 +np.float32,0xfdfc1790,0xbf800000,3 +np.float32,0x8037a841,0x8037a841,3 +np.float32,0xfed0a8a8,0xbf800000,3 +np.float32,0x7f1f1697,0x7f800000,3 +np.float32,0x3f2ccc6e,0x3f76ca23,3 +np.float32,0x35eada,0x35eada,3 +np.float32,0xff111f42,0xbf800000,3 +np.float32,0x3ee1ab7f,0x3f0dcbbe,3 +np.float32,0xbf6e89ee,0xbf1b2cd4,3 +np.float32,0x3f58611c,0x3faa0cdc,3 +np.float32,0x1ac6a6,0x1ac6a6,3 +np.float32,0xbf1286fa,0xbedf2312,3 +np.float32,0x7e451137,0x7f800000,3 +np.float32,0xbe92c326,0xbe7f3405,3 +np.float32,0x3f2fdd16,0x3f7cd87b,3 +np.float32,0xbe5c0ea0,0xbe4604c2,3 +np.float32,0xbdb29968,0xbdab0883,3 +np.float32,0x3964,0x3964,3 +np.float32,0x3f0dc236,0x3f3d60a0,3 +np.float32,0x7c3faf06,0x7f800000,3 +np.float32,0xbef41f7a,0xbec22b16,3 +np.float32,0x3f4c0289,0x3f9bfdcc,3 +np.float32,0x806084e9,0x806084e9,3 +np.float32,0x3ed1d8dd,0x3f01b0c1,3 +np.float32,0x806d8d8b,0x806d8d8b,3 +np.float32,0x3f052180,0x3f2e9e0a,3 +np.float32,0x803d85d5,0x803d85d5,3 +np.float32,0x3e0afd70,0x3e14dd48,3 +np.float32,0x2fbc63,0x2fbc63,3 +np.float32,0x2e436f,0x2e436f,3 +np.float32,0xbf7b19e6,0xbf2000da,3 +np.float32,0x3f34022e,0x3f829362,3 +np.float32,0x3d2b40e0,0x3d2ee246,3 +np.float32,0x3f5298b4,0x3fa3649b,3 +np.float32,0xbdb01328,0xbda8b7de,3 +np.float32,0x7f693c81,0x7f800000,3 +np.float32,0xbeb1abc0,0xbe961edc,3 +np.float32,0x801d9b5d,0x801d9b5d,3 +np.float32,0x80628668,0x80628668,3 +np.float32,0x800f57dd,0x800f57dd,3 +np.float32,0x8017c94f,0x8017c94f,3 +np.float32,0xbf16f5f4,0xbee418b8,3 +np.float32,0x3e686476,0x3e827022,3 +np.float32,0xbf256796,0xbef3abd9,3 +np.float32,0x7f1b4485,0x7f800000,3 +np.float32,0xbea0b3cc,0xbe89ed21,3 +np.float32,0xfee08b2e,0xbf800000,3 +np.float32,0x523cb4,0x523cb4,3 +np.float32,0x3daf2cb2,0x3db6e273,3 +np.float32,0xbd531c40,0xbd4dc323,3 +np.float32,0x80078fe5,0x80078fe5,3 +np.float32,0x80800000,0x80800000,3 +np.float32,0x3f232438,0x3f642d1a,3 +np.float32,0x3ec29446,0x3eecb7c0,3 +np.float32,0x3dbcd2a4,0x3dc5cd1d,3 +np.float32,0x7f045b0d,0x7f800000,3 +np.float32,0x7f22e6d1,0x7f800000,3 +np.float32,0xbf5d3430,0xbf141c80,3 +np.float32,0xbe03ec70,0xbdf78ee6,3 +np.float32,0x3e93ec9a,0x3eab822f,3 +np.float32,0x7f3b9262,0x7f800000,3 +np.float32,0x65ac6a,0x65ac6a,3 +np.float32,0x3db9a8,0x3db9a8,3 +np.float32,0xbf37ab59,0xbf031306,3 +np.float32,0x33c40e,0x33c40e,3 +np.float32,0x7f7a478f,0x7f800000,3 +np.float32,0xbe8532d0,0xbe6a906f,3 +np.float32,0x801c081d,0x801c081d,3 +np.float32,0xbe4212a0,0xbe30ca73,3 +np.float32,0xff0b603e,0xbf800000,3 +np.float32,0x4554dc,0x4554dc,3 +np.float32,0x3dd324be,0x3dde695e,3 +np.float32,0x3f224c44,0x3f629557,3 +np.float32,0x8003cd79,0x8003cd79,3 +np.float32,0xbf31351c,0xbeffc2fd,3 +np.float32,0x8034603a,0x8034603a,3 +np.float32,0xbf6fcb70,0xbf1bab24,3 +np.float32,0x804eb67e,0x804eb67e,3 +np.float32,0xff05c00e,0xbf800000,3 +np.float32,0x3eb5b36f,0x3eda1ec7,3 +np.float32,0x3f1ed7f9,0x3f5c1d90,3 +np.float32,0x3f052d8a,0x3f2eb24b,3 +np.float32,0x5ddf51,0x5ddf51,3 +np.float32,0x7e50c11c,0x7f800000,3 +np.float32,0xff74f55a,0xbf800000,3 +np.float32,0x4322d,0x4322d,3 +np.float32,0x3f16f8a9,0x3f4db27a,3 +np.float32,0x3f4f23d6,0x3f9f7c2c,3 +np.float32,0xbf706c1e,0xbf1bea0a,3 +np.float32,0x3f2cbd52,0x3f76ac77,3 +np.float32,0xf3043,0xf3043,3 +np.float32,0xfee79de0,0xbf800000,3 +np.float32,0x7e942f69,0x7f800000,3 +np.float32,0x180139,0x180139,3 +np.float32,0xff69c678,0xbf800000,3 +np.float32,0x3f46773f,0x3f95e840,3 +np.float32,0x804aae1c,0x804aae1c,3 +np.float32,0x3eb383b4,0x3ed7024c,3 +np.float32,0x8032624e,0x8032624e,3 +np.float32,0xbd0a0f80,0xbd07c27d,3 +np.float32,0xbf1c9b98,0xbeea4a61,3 +np.float32,0x7f370999,0x7f800000,3 +np.float32,0x801931f9,0x801931f9,3 +np.float32,0x3f6f45ce,0x3fc5eea0,3 +np.float32,0xff0ab4cc,0xbf800000,3 +np.float32,0x4c043d,0x4c043d,3 +np.float32,0x8002a599,0x8002a599,3 +np.float32,0xbc4a6080,0xbc4921d7,3 +np.float32,0x3f008d14,0x3f26fb72,3 +np.float32,0x7f48b3d9,0x7f800000,3 +np.float32,0x7cb2ec7e,0x7f800000,3 +np.float32,0xbf1338bd,0xbedfeb61,3 +np.float32,0x0,0x0,3 +np.float32,0xbf2f5b64,0xbefde71c,3 +np.float32,0xbe422974,0xbe30dd56,3 +np.float32,0x3f776be8,0x3fd07950,3 +np.float32,0xbf3e97a1,0xbf06684a,3 +np.float32,0x7d28cb26,0x7f800000,3 +np.float32,0x801618d2,0x801618d2,3 +np.float32,0x807e4f83,0x807e4f83,3 +np.float32,0x8006b07d,0x8006b07d,3 +np.float32,0xfea1c042,0xbf800000,3 +np.float32,0xff24ef74,0xbf800000,3 +np.float32,0xfef7ab16,0xbf800000,3 +np.float32,0x70b771,0x70b771,3 +np.float32,0x7daeb64e,0x7f800000,3 +np.float32,0xbe66e378,0xbe4eb59c,3 +np.float32,0xbead1534,0xbe92dcf7,3 +np.float32,0x7e6769b8,0x7f800000,3 +np.float32,0x7ecd0890,0x7f800000,3 +np.float32,0xbe7380d8,0xbe58b747,3 +np.float32,0x3efa6f2f,0x3f218265,3 +np.float32,0x3f59dada,0x3fabc5eb,3 +np.float32,0xff0f2d20,0xbf800000,3 +np.float32,0x8060210e,0x8060210e,3 +np.float32,0x3ef681e8,0x3f1e51c8,3 +np.float32,0x77a6dd,0x77a6dd,3 +np.float32,0xbebfdd0e,0xbea00399,3 +np.float32,0xfe889b72,0xbf800000,3 +np.float32,0x8049ed2c,0x8049ed2c,3 +np.float32,0x3b089dc4,0x3b08c23e,3 +np.float32,0xbf13c7c4,0xbee08c28,3 +np.float32,0x3efa13b9,0x3f2137d7,3 +np.float32,0x3e9385dc,0x3eaaf914,3 +np.float32,0x7e0e6a43,0x7f800000,3 +np.float32,0x7df6d63f,0x7f800000,3 +np.float32,0x3f3efead,0x3f8dea03,3 +np.float32,0xff52548c,0xbf800000,3 +np.float32,0x803ff9d8,0x803ff9d8,3 +np.float32,0x3c825823,0x3c836303,3 +np.float32,0xfc9e97a0,0xbf800000,3 +np.float32,0xfe644f48,0xbf800000,3 +np.float32,0x802f5017,0x802f5017,3 +np.float32,0x3d5753b9,0x3d5d1661,3 +np.float32,0x7f2a55d2,0x7f800000,3 +np.float32,0x7f4dabfe,0x7f800000,3 +np.float32,0x3f49492a,0x3f98fc47,3 +np.float32,0x3f4d1589,0x3f9d2f82,3 +np.float32,0xff016208,0xbf800000,3 +np.float32,0xbf571cb7,0xbf118365,3 +np.float32,0xbf1ef297,0xbeecd136,3 +np.float32,0x36266b,0x36266b,3 +np.float32,0xbed07b0e,0xbeab4129,3 +np.float32,0x7f553365,0x7f800000,3 +np.float32,0xfe9bb8c6,0xbf800000,3 +np.float32,0xbeb497d6,0xbe982e19,3 +np.float32,0xbf27af6c,0xbef60d16,3 +np.float32,0x55cf51,0x55cf51,3 +np.float32,0x3eab1db0,0x3ecb2e4f,3 +np.float32,0x3e777603,0x3e8bf62f,3 +np.float32,0x7f10e374,0x7f800000,3 +np.float32,0xbf1f6480,0xbeed4b8d,3 +np.float32,0x40479d,0x40479d,3 +np.float32,0x156259,0x156259,3 +np.float32,0x3d852e30,0x3d899b2d,3 +np.float32,0x80014ff3,0x80014ff3,3 +np.float32,0xbd812fa8,0xbd7a645c,3 +np.float32,0x800ab780,0x800ab780,3 +np.float32,0x3ea02ff4,0x3ebc13bd,3 +np.float32,0x7e858b8e,0x7f800000,3 +np.float32,0x75d63b,0x75d63b,3 +np.float32,0xbeb15c94,0xbe95e6e3,3 +np.float32,0x3da0cee0,0x3da74a39,3 +np.float32,0xff21c01c,0xbf800000,3 +np.float32,0x8049b5eb,0x8049b5eb,3 +np.float32,0x80177ab0,0x80177ab0,3 +np.float32,0xff137a50,0xbf800000,3 +np.float32,0x3f7febba,0x3fdbd51c,3 +np.float32,0x8041e4dd,0x8041e4dd,3 +np.float32,0x99b8c,0x99b8c,3 +np.float32,0x5621ba,0x5621ba,3 +np.float32,0x14b534,0x14b534,3 +np.float32,0xbe2eb3a8,0xbe209c95,3 +np.float32,0x7e510c28,0x7f800000,3 +np.float32,0x804ec2f2,0x804ec2f2,3 +np.float32,0x3f662406,0x3fba82b0,3 +np.float32,0x800000,0x800000,3 +np.float32,0x3f3120d6,0x3f7f5d96,3 +np.float32,0x7f179b8e,0x7f800000,3 +np.float32,0x7f65278e,0x7f800000,3 +np.float32,0xfeb50f52,0xbf800000,3 +np.float32,0x7f051bd1,0x7f800000,3 +np.float32,0x7ea0558d,0x7f800000,3 +np.float32,0xbd0a96c0,0xbd08453f,3 +np.float64,0xee82da5ddd05c,0xee82da5ddd05c,1 +np.float64,0x800c3a22d7f87446,0x800c3a22d7f87446,1 +np.float64,0xbfd34b20eaa69642,0xbfd0a825e7688d3e,1 +np.float64,0x3fd6a0f2492d41e5,0x3fdb253b906057b3,1 +np.float64,0xbfda13d8783427b0,0xbfd56b1d76684332,1 +np.float64,0xbfe50b5a99ea16b5,0xbfded7dd82c6f746,1 +np.float64,0x3f82468fc0248d20,0x3f825b7fa9378ee9,1 +np.float64,0x7ff0000000000000,0x7ff0000000000000,1 +np.float64,0x856e50290adca,0x856e50290adca,1 +np.float64,0x7fde55a5fa3cab4b,0x7ff0000000000000,1 +np.float64,0x7fcf2c8dd93e591b,0x7ff0000000000000,1 +np.float64,0x8001b3a0e3236743,0x8001b3a0e3236743,1 +np.float64,0x8000fdb14821fb63,0x8000fdb14821fb63,1 +np.float64,0xbfe3645e08e6c8bc,0xbfdd161362a5e9ef,1 +np.float64,0x7feb34d28b3669a4,0x7ff0000000000000,1 +np.float64,0x80099dd810933bb1,0x80099dd810933bb1,1 +np.float64,0xbfedbcc1097b7982,0xbfe35d86414d53dc,1 +np.float64,0x7fdc406fbdb880de,0x7ff0000000000000,1 +np.float64,0x800c4bf85ab897f1,0x800c4bf85ab897f1,1 +np.float64,0x3fd8f7b0e0b1ef60,0x3fde89b497ae20d8,1 +np.float64,0xffe4fced5c69f9da,0xbff0000000000000,1 +np.float64,0xbfe54d421fea9a84,0xbfdf1be0cbfbfcba,1 +np.float64,0x800af72f3535ee5f,0x800af72f3535ee5f,1 +np.float64,0x3fe24e6570e49ccb,0x3fe8b3a86d970411,1 +np.float64,0xbfdd7b22d0baf646,0xbfd79fac2e4f7558,1 +np.float64,0xbfe6a7654c6d4eca,0xbfe03c1f13f3b409,1 +np.float64,0x3fe2c3eb662587d7,0x3fe98566e625d4f5,1 +np.float64,0x3b1ef71e763e0,0x3b1ef71e763e0,1 +np.float64,0xffed03c6baba078d,0xbff0000000000000,1 +np.float64,0x3febac19d0b75834,0x3ff5fdacc9d51bcd,1 +np.float64,0x800635d6794c6bae,0x800635d6794c6bae,1 +np.float64,0xbfe8cafc827195f9,0xbfe1411438608ae1,1 +np.float64,0x7feeb616a83d6c2c,0x7ff0000000000000,1 +np.float64,0x3fd52d62a2aa5ac5,0x3fd91a07a7f18f44,1 +np.float64,0x80036996b8a6d32e,0x80036996b8a6d32e,1 +np.float64,0x2b1945965632a,0x2b1945965632a,1 +np.float64,0xbfecb5e8c9796bd2,0xbfe2f40fca276aa2,1 +np.float64,0x3fe8669ed4f0cd3e,0x3ff24c89fc9cdbff,1 +np.float64,0x71e9f65ee3d3f,0x71e9f65ee3d3f,1 +np.float64,0xbfd5ab262bab564c,0xbfd261ae108ef79e,1 +np.float64,0xbfe7091342ee1226,0xbfe06bf5622d75f6,1 +np.float64,0x49e888d093d12,0x49e888d093d12,1 +np.float64,0x2272f3dc44e5f,0x2272f3dc44e5f,1 +np.float64,0x7fe98736e0b30e6d,0x7ff0000000000000,1 +np.float64,0x30fa9cde61f54,0x30fa9cde61f54,1 +np.float64,0x7fdc163fc0382c7f,0x7ff0000000000000,1 +np.float64,0xffb40d04ee281a08,0xbff0000000000000,1 +np.float64,0xffe624617f2c48c2,0xbff0000000000000,1 +np.float64,0x3febb582bd376b05,0x3ff608da584d1716,1 +np.float64,0xfc30a5a5f8615,0xfc30a5a5f8615,1 +np.float64,0x3fef202efd7e405e,0x3ffa52009319b069,1 +np.float64,0x8004d0259829a04c,0x8004d0259829a04c,1 +np.float64,0x800622dc71ec45ba,0x800622dc71ec45ba,1 +np.float64,0xffefffffffffffff,0xbff0000000000000,1 +np.float64,0x800e89113c9d1223,0x800e89113c9d1223,1 +np.float64,0x7fba7fde3034ffbb,0x7ff0000000000000,1 +np.float64,0xbfeea31e807d463d,0xbfe3b7369b725915,1 +np.float64,0x3feb7c9589f6f92c,0x3ff5c56cf71b0dff,1 +np.float64,0x3fd52d3b59aa5a77,0x3fd919d0f683fd07,1 +np.float64,0x800de90a43fbd215,0x800de90a43fbd215,1 +np.float64,0x3fe7eb35a9efd66b,0x3ff1c940dbfc6ef9,1 +np.float64,0xbda0adcb7b416,0xbda0adcb7b416,1 +np.float64,0x7fc5753e3a2aea7b,0x7ff0000000000000,1 +np.float64,0xffdd101d103a203a,0xbff0000000000000,1 +np.float64,0x7fcb54f56836a9ea,0x7ff0000000000000,1 +np.float64,0xbfd61c8d6eac391a,0xbfd2b23bc0a2cef4,1 +np.float64,0x3feef55de37deabc,0x3ffa198639a0161d,1 +np.float64,0x7fe4ffbfaea9ff7e,0x7ff0000000000000,1 +np.float64,0x9d1071873a20e,0x9d1071873a20e,1 +np.float64,0x3fef1ecb863e3d97,0x3ffa502a81e09cfc,1 +np.float64,0xad2da12b5a5b4,0xad2da12b5a5b4,1 +np.float64,0xffe614b74c6c296e,0xbff0000000000000,1 +np.float64,0xffe60d3f286c1a7e,0xbff0000000000000,1 +np.float64,0x7fda7d91f4b4fb23,0x7ff0000000000000,1 +np.float64,0x800023f266a047e6,0x800023f266a047e6,1 +np.float64,0x7fdf5f9ad23ebf35,0x7ff0000000000000,1 +np.float64,0x3fa7459f002e8b3e,0x3fa7cf178dcf0af6,1 +np.float64,0x3fe9938d61f3271b,0x3ff39516a13caec3,1 +np.float64,0xbfd59314c3ab262a,0xbfd250830f73efd2,1 +np.float64,0xbfc7e193f72fc328,0xbfc5c924339dd7a8,1 +np.float64,0x7fec1965f17832cb,0x7ff0000000000000,1 +np.float64,0xbfd932908eb26522,0xbfd4d4312d272580,1 +np.float64,0xbfdf2d08e2be5a12,0xbfd8add1413b0b1b,1 +np.float64,0x7fdcf7cc74b9ef98,0x7ff0000000000000,1 +np.float64,0x7fc79300912f2600,0x7ff0000000000000,1 +np.float64,0xffd4bd8f23297b1e,0xbff0000000000000,1 +np.float64,0x41869ce0830e,0x41869ce0830e,1 +np.float64,0x3fe5dcec91ebb9da,0x3fef5e213598cbd4,1 +np.float64,0x800815d9c2902bb4,0x800815d9c2902bb4,1 +np.float64,0x800ba1a4b877434a,0x800ba1a4b877434a,1 +np.float64,0x80069d7bdc4d3af8,0x80069d7bdc4d3af8,1 +np.float64,0xcf00d4339e01b,0xcf00d4339e01b,1 +np.float64,0x80072b71bd4e56e4,0x80072b71bd4e56e4,1 +np.float64,0x80059ca6fbab394f,0x80059ca6fbab394f,1 +np.float64,0x3fe522fc092a45f8,0x3fedf212682bf894,1 +np.float64,0x7fe17f384ea2fe70,0x7ff0000000000000,1 +np.float64,0x0,0x0,1 +np.float64,0x3f72bb4c20257698,0x3f72c64766b52069,1 +np.float64,0x7fbc97c940392f92,0x7ff0000000000000,1 +np.float64,0xffc5904ebd2b209c,0xbff0000000000000,1 +np.float64,0xbfe34fb55b669f6a,0xbfdcff81dd30a49d,1 +np.float64,0x8007ccda006f99b5,0x8007ccda006f99b5,1 +np.float64,0x3fee50e4c8fca1ca,0x3ff9434c7750ad0f,1 +np.float64,0x7fee7b07c67cf60f,0x7ff0000000000000,1 +np.float64,0x3fdcce4a5a399c95,0x3fe230c83f28218a,1 +np.float64,0x7fee5187b37ca30e,0x7ff0000000000000,1 +np.float64,0x3fc48f6a97291ed8,0x3fc64db6200a9833,1 +np.float64,0xc7fec3498ffd9,0xc7fec3498ffd9,1 +np.float64,0x800769c59d2ed38c,0x800769c59d2ed38c,1 +np.float64,0xffe69ede782d3dbc,0xbff0000000000000,1 +np.float64,0x3fecd9770979b2ee,0x3ff76a1f2f0f08f2,1 +np.float64,0x5aa358a8b546c,0x5aa358a8b546c,1 +np.float64,0xbfe795a0506f2b40,0xbfe0afcc52c0166b,1 +np.float64,0xffd4ada1e8a95b44,0xbff0000000000000,1 +np.float64,0xffcac1dc213583b8,0xbff0000000000000,1 +np.float64,0xffe393c15fa72782,0xbff0000000000000,1 +np.float64,0xbfcd6a3c113ad478,0xbfca47a2157b9cdd,1 +np.float64,0xffedde20647bbc40,0xbff0000000000000,1 +np.float64,0x3fd0d011b1a1a024,0x3fd33a57945559f4,1 +np.float64,0x3fef27e29f7e4fc6,0x3ffa5c314e0e3d69,1 +np.float64,0xffe96ff71f72dfee,0xbff0000000000000,1 +np.float64,0xffe762414f2ec482,0xbff0000000000000,1 +np.float64,0x3fc2dcfd3d25b9fa,0x3fc452f41682a12e,1 +np.float64,0xbfbdb125b63b6248,0xbfbc08e6553296d4,1 +np.float64,0x7b915740f724,0x7b915740f724,1 +np.float64,0x60b502b2c16a1,0x60b502b2c16a1,1 +np.float64,0xbfeb38b0be367162,0xbfe254f6782cfc47,1 +np.float64,0x800dc39a3edb8735,0x800dc39a3edb8735,1 +np.float64,0x3fea4fb433349f68,0x3ff468b97cf699f5,1 +np.float64,0xbfd49967962932d0,0xbfd19ceb41ff4cd0,1 +np.float64,0xbfebf75cd377eeba,0xbfe2a576bdbccccc,1 +np.float64,0xbfb653d65c2ca7b0,0xbfb561ab8fcb3f26,1 +np.float64,0xffe3f34b8727e696,0xbff0000000000000,1 +np.float64,0x3fdd798064baf301,0x3fe2b7c130a6fc63,1 +np.float64,0x3febe027e6b7c050,0x3ff63bac1b22e12d,1 +np.float64,0x7fcaa371af3546e2,0x7ff0000000000000,1 +np.float64,0xbfe6ee980a2ddd30,0xbfe05f0bc5dc80d2,1 +np.float64,0xc559c33f8ab39,0xc559c33f8ab39,1 +np.float64,0x84542c2b08a86,0x84542c2b08a86,1 +np.float64,0xbfe5645e046ac8bc,0xbfdf3398dc3cc1bd,1 +np.float64,0x3fee8c48ae7d1892,0x3ff9902899480526,1 +np.float64,0x3fb706471c2e0c8e,0x3fb817787aace8db,1 +np.float64,0x7fefe78f91ffcf1e,0x7ff0000000000000,1 +np.float64,0xbfcf6d560b3edaac,0xbfcbddc72a2130df,1 +np.float64,0x7fd282bfd925057f,0x7ff0000000000000,1 +np.float64,0x3fb973dbee32e7b8,0x3fbac2c87cbd0215,1 +np.float64,0x3fd1ce38ff239c72,0x3fd4876de5164420,1 +np.float64,0x8008ac2e3c31585d,0x8008ac2e3c31585d,1 +np.float64,0x3fa05e06dc20bc00,0x3fa0a1b7de904dce,1 +np.float64,0x7fd925f215324be3,0x7ff0000000000000,1 +np.float64,0x3f949d95d0293b2c,0x3f94d31197d51874,1 +np.float64,0xffdded9e67bbdb3c,0xbff0000000000000,1 +np.float64,0x3fed390dcfba721c,0x3ff7e08c7a709240,1 +np.float64,0x7fe6e62300adcc45,0x7ff0000000000000,1 +np.float64,0xbfd779bc312ef378,0xbfd3a6cb64bb0181,1 +np.float64,0x3fe43e9877287d31,0x3fec3e100ef935fd,1 +np.float64,0x210b68e44216e,0x210b68e44216e,1 +np.float64,0x3fcdffc1e73bff84,0x3fd0e729d02ec539,1 +np.float64,0xcea10c0f9d422,0xcea10c0f9d422,1 +np.float64,0x7feb97a82d772f4f,0x7ff0000000000000,1 +np.float64,0x9b4b4d953696a,0x9b4b4d953696a,1 +np.float64,0x3fd1bd8e95237b1d,0x3fd4716dd34cf828,1 +np.float64,0x800fc273841f84e7,0x800fc273841f84e7,1 +np.float64,0xbfd2aef167255de2,0xbfd0340f30d82f18,1 +np.float64,0x800d021a551a0435,0x800d021a551a0435,1 +np.float64,0xffebf934a8b7f268,0xbff0000000000000,1 +np.float64,0x3fd819849fb03308,0x3fdd43bca0aac749,1 +np.float64,0x7ff8000000000000,0x7ff8000000000000,1 +np.float64,0x27c34b064f86a,0x27c34b064f86a,1 +np.float64,0x7fef4f5a373e9eb3,0x7ff0000000000000,1 +np.float64,0x7fd92fccce325f99,0x7ff0000000000000,1 +np.float64,0x800520869d6a410e,0x800520869d6a410e,1 +np.float64,0x3fccbcaddf397958,0x3fd01bf6b0c4d97f,1 +np.float64,0x80039ebfc4273d80,0x80039ebfc4273d80,1 +np.float64,0xbfed1f0b3c7a3e16,0xbfe31ea6e4c69141,1 +np.float64,0x7fee1bb7c4bc376f,0x7ff0000000000000,1 +np.float64,0xbfa8bee1d8317dc0,0xbfa8283b7dbf95a9,1 +np.float64,0x3fe797db606f2fb6,0x3ff171b1c2bc8fe5,1 +np.float64,0xbfee2ecfdbbc5da0,0xbfe38a3f0a43d14e,1 +np.float64,0x3fe815c7f1302b90,0x3ff1f65165c45d71,1 +np.float64,0xbfbb265c94364cb8,0xbfb9c27ec61a9a1d,1 +np.float64,0x3fcf1cab5d3e3957,0x3fd19c07444642f9,1 +np.float64,0xbfe6ae753f6d5cea,0xbfe03f99666dbe17,1 +np.float64,0xbfd18a2a73a31454,0xbfceaee204aca016,1 +np.float64,0x3fb8a1dffc3143c0,0x3fb9db38341ab1a3,1 +np.float64,0x7fd2a0376025406e,0x7ff0000000000000,1 +np.float64,0x7fe718c0e3ae3181,0x7ff0000000000000,1 +np.float64,0x3fb264d42424c9a8,0x3fb3121f071d4db4,1 +np.float64,0xd27190a7a4e32,0xd27190a7a4e32,1 +np.float64,0xbfe467668c68cecd,0xbfde2c4616738d5e,1 +np.float64,0x800ab9a2b9357346,0x800ab9a2b9357346,1 +np.float64,0x7fcbd108d537a211,0x7ff0000000000000,1 +np.float64,0x3fb79bba6e2f3770,0x3fb8bb2c140d3445,1 +np.float64,0xffefa7165e3f4e2c,0xbff0000000000000,1 +np.float64,0x7fb40185a428030a,0x7ff0000000000000,1 +np.float64,0xbfe9e3d58e73c7ab,0xbfe1c04d51c83d69,1 +np.float64,0x7fef5b97b17eb72e,0x7ff0000000000000,1 +np.float64,0x800a2957683452af,0x800a2957683452af,1 +np.float64,0x800f54f1925ea9e3,0x800f54f1925ea9e3,1 +np.float64,0xeffa4e77dff4a,0xeffa4e77dff4a,1 +np.float64,0xffbe501aa03ca038,0xbff0000000000000,1 +np.float64,0x8006c651bced8ca4,0x8006c651bced8ca4,1 +np.float64,0x3fe159faff22b3f6,0x3fe708f78efbdbed,1 +np.float64,0x800e7d59a31cfab3,0x800e7d59a31cfab3,1 +np.float64,0x3fe6ac2f272d585e,0x3ff07ee5305385c3,1 +np.float64,0x7fd014c054202980,0x7ff0000000000000,1 +np.float64,0xbfe4800b11e90016,0xbfde4648c6f29ce5,1 +np.float64,0xbfe6738470ece709,0xbfe0227b5b42b713,1 +np.float64,0x3fed052add3a0a56,0x3ff7a01819e65c6e,1 +np.float64,0xffe03106f120620e,0xbff0000000000000,1 +np.float64,0x7fe11df4d4e23be9,0x7ff0000000000000,1 +np.float64,0xbfcea25d7b3d44bc,0xbfcb3e808e7ce852,1 +np.float64,0xd0807b03a1010,0xd0807b03a1010,1 +np.float64,0x8004eda4fec9db4b,0x8004eda4fec9db4b,1 +np.float64,0x3fceb5c98d3d6b90,0x3fd15a894b15dd9f,1 +np.float64,0xbfee27228afc4e45,0xbfe38741702f3c0b,1 +np.float64,0xbfe606278c6c0c4f,0xbfdfd7cb6093652d,1 +np.float64,0xbfd66f59bc2cdeb4,0xbfd2ecb2297f6afc,1 +np.float64,0x4aee390095dc8,0x4aee390095dc8,1 +np.float64,0xbfe391355d67226a,0xbfdd46ddc0997014,1 +np.float64,0xffd27765e7a4eecc,0xbff0000000000000,1 +np.float64,0xbfe795e20a2f2bc4,0xbfe0afebc66c4dbd,1 +np.float64,0x7fc9a62e81334c5c,0x7ff0000000000000,1 +np.float64,0xffe4e57e52a9cafc,0xbff0000000000000,1 +np.float64,0x7fac326c8c3864d8,0x7ff0000000000000,1 +np.float64,0x3fe8675f6370cebf,0x3ff24d5863029c15,1 +np.float64,0x7fcf4745e73e8e8b,0x7ff0000000000000,1 +np.float64,0x7fcc9aec9f3935d8,0x7ff0000000000000,1 +np.float64,0x3fec2e8fcab85d20,0x3ff699ccd0b2fed6,1 +np.float64,0x3fd110a968222153,0x3fd38e81a88c2d13,1 +np.float64,0xffb3a68532274d08,0xbff0000000000000,1 +np.float64,0xf0e562bbe1cad,0xf0e562bbe1cad,1 +np.float64,0xbfe815b9e5f02b74,0xbfe0ec9f5023aebc,1 +np.float64,0xbf5151d88022a400,0xbf514f80c465feea,1 +np.float64,0x2547e3144a8fd,0x2547e3144a8fd,1 +np.float64,0x3fedcc0c28fb9818,0x3ff899612fbeb4c5,1 +np.float64,0x3fdc3d1c0f387a38,0x3fe1bf6e2d39bd75,1 +np.float64,0x7fe544dbe62a89b7,0x7ff0000000000000,1 +np.float64,0x8001500e48e2a01d,0x8001500e48e2a01d,1 +np.float64,0xbfed3b2b09fa7656,0xbfe329f3e7bada64,1 +np.float64,0xbfe76a943aeed528,0xbfe09b24e3aa3f79,1 +np.float64,0x3fe944330e328866,0x3ff33d472dee70c5,1 +np.float64,0x8004bbbd6cc9777c,0x8004bbbd6cc9777c,1 +np.float64,0xbfe28133fb650268,0xbfdc1ac230ac4ef5,1 +np.float64,0xc1370af7826e2,0xc1370af7826e2,1 +np.float64,0x7fcfa47f5f3f48fe,0x7ff0000000000000,1 +np.float64,0xbfa3002a04260050,0xbfa2a703a538b54e,1 +np.float64,0xffef44f3903e89e6,0xbff0000000000000,1 +np.float64,0xc32cce298659a,0xc32cce298659a,1 +np.float64,0x7b477cc2f68f0,0x7b477cc2f68f0,1 +np.float64,0x40a7f4ec814ff,0x40a7f4ec814ff,1 +np.float64,0xffee38edf67c71db,0xbff0000000000000,1 +np.float64,0x3fe23f6f1ce47ede,0x3fe8992b8bb03499,1 +np.float64,0x7fc8edfe7f31dbfc,0x7ff0000000000000,1 +np.float64,0x800bb8e6fb3771ce,0x800bb8e6fb3771ce,1 +np.float64,0xbfe11d364ee23a6c,0xbfda82a0c2ef9e46,1 +np.float64,0xbfeb993cb4b7327a,0xbfe27df565da85dc,1 +np.float64,0x10000000000000,0x10000000000000,1 +np.float64,0x3fc1f997d723f330,0x3fc34c5cff060af1,1 +np.float64,0x6e326fa0dc64f,0x6e326fa0dc64f,1 +np.float64,0x800fa30c2c5f4618,0x800fa30c2c5f4618,1 +np.float64,0x7fed16ad603a2d5a,0x7ff0000000000000,1 +np.float64,0x9411cf172823a,0x9411cf172823a,1 +np.float64,0xffece51d4cb9ca3a,0xbff0000000000000,1 +np.float64,0x3fdda3d1453b47a3,0x3fe2d954f7849890,1 +np.float64,0xffd58330172b0660,0xbff0000000000000,1 +np.float64,0xbfc6962ae52d2c54,0xbfc4b4bdf0069f17,1 +np.float64,0xbfb4010a8e280218,0xbfb33e1236f7efa0,1 +np.float64,0x7fd0444909208891,0x7ff0000000000000,1 +np.float64,0xbfe027a24de04f44,0xbfd95e9064101e7c,1 +np.float64,0xa6f3f3214de9,0xa6f3f3214de9,1 +np.float64,0xbfe112eb0fe225d6,0xbfda768f7cbdf346,1 +np.float64,0xbfe99e90d4b33d22,0xbfe1a153e45a382a,1 +np.float64,0xffecb34f8e79669e,0xbff0000000000000,1 +np.float64,0xbfdf32c9653e6592,0xbfd8b159caf5633d,1 +np.float64,0x3fe9519829b2a330,0x3ff34c0a8152e20f,1 +np.float64,0xffd08ec8a7a11d92,0xbff0000000000000,1 +np.float64,0xffd19b71b6a336e4,0xbff0000000000000,1 +np.float64,0x7feda6b9377b4d71,0x7ff0000000000000,1 +np.float64,0x800fda2956bfb453,0x800fda2956bfb453,1 +np.float64,0x3fe54f601bea9ec0,0x3fee483cb03cbde4,1 +np.float64,0xbfe2a8ad5ee5515a,0xbfdc46ee7a10bf0d,1 +np.float64,0xbfd336c8bd266d92,0xbfd09916d432274a,1 +np.float64,0xfff0000000000000,0xbff0000000000000,1 +np.float64,0x3fd9a811a9b35024,0x3fdf8fa68cc048e3,1 +np.float64,0x3fe078c68520f18d,0x3fe58aecc1f9649b,1 +np.float64,0xbfc6d5aa3a2dab54,0xbfc4e9ea84f3d73c,1 +np.float64,0xf9682007f2d04,0xf9682007f2d04,1 +np.float64,0x3fee54523dbca8a4,0x3ff947b826de81f4,1 +np.float64,0x80461e5d008c4,0x80461e5d008c4,1 +np.float64,0x3fdd6d12d5bada26,0x3fe2ade8dee2fa02,1 +np.float64,0x3fcd5f0dfd3abe18,0x3fd081d6cd25731d,1 +np.float64,0x7fa36475c826c8eb,0x7ff0000000000000,1 +np.float64,0xbfdf3ce052be79c0,0xbfd8b78baccfb908,1 +np.float64,0x7fcd890dd13b121b,0x7ff0000000000000,1 +np.float64,0x8000000000000001,0x8000000000000001,1 +np.float64,0x800ec0f4281d81e8,0x800ec0f4281d81e8,1 +np.float64,0xbfba960116352c00,0xbfb94085424496d9,1 +np.float64,0x3fdddedc9bbbbdb8,0x3fe30853fe4ef5ce,1 +np.float64,0x238092a847013,0x238092a847013,1 +np.float64,0xbfe38d4803271a90,0xbfdd429a955c46af,1 +np.float64,0xbfd4c9067329920c,0xbfd1bf6255ed91a4,1 +np.float64,0xbfbee213923dc428,0xbfbd17ce1bda6088,1 +np.float64,0xffd5a2d337ab45a6,0xbff0000000000000,1 +np.float64,0x7fe21bfcf82437f9,0x7ff0000000000000,1 +np.float64,0x3fe2a2714da544e3,0x3fe949594a74ea25,1 +np.float64,0x800e05cf8ebc0b9f,0x800e05cf8ebc0b9f,1 +np.float64,0x559a1526ab343,0x559a1526ab343,1 +np.float64,0xffe6a1b7906d436e,0xbff0000000000000,1 +np.float64,0xffef27d6253e4fab,0xbff0000000000000,1 +np.float64,0xbfe0f90ab0a1f216,0xbfda5828a1edde48,1 +np.float64,0x9675d2ab2cebb,0x9675d2ab2cebb,1 +np.float64,0xffee0f7eecfc1efd,0xbff0000000000000,1 +np.float64,0x2ec005625d801,0x2ec005625d801,1 +np.float64,0x7fde35ff14bc6bfd,0x7ff0000000000000,1 +np.float64,0xffe03f36d9e07e6d,0xbff0000000000000,1 +np.float64,0x7fe09ff7c4213fef,0x7ff0000000000000,1 +np.float64,0xffeac29dd1b5853b,0xbff0000000000000,1 +np.float64,0x3fb63120aa2c6241,0x3fb72ea3de98a853,1 +np.float64,0xffd079eb84a0f3d8,0xbff0000000000000,1 +np.float64,0xbfd3c2cc75a78598,0xbfd1005996880b3f,1 +np.float64,0x7fb80507ee300a0f,0x7ff0000000000000,1 +np.float64,0xffe8006105f000c1,0xbff0000000000000,1 +np.float64,0x8009138b0ab22716,0x8009138b0ab22716,1 +np.float64,0xbfd6dfb40b2dbf68,0xbfd33b8e4008e3b0,1 +np.float64,0xbfe7c2cf9bef859f,0xbfe0c55c807460df,1 +np.float64,0xbfe75fe4da6ebfca,0xbfe09600256d3b81,1 +np.float64,0xffd662fc73acc5f8,0xbff0000000000000,1 +np.float64,0x20b99dbc41735,0x20b99dbc41735,1 +np.float64,0x3fe10b38ade21671,0x3fe68229a9bbeefc,1 +np.float64,0x3743b99c6e878,0x3743b99c6e878,1 +np.float64,0xff9eb5ed903d6be0,0xbff0000000000000,1 +np.float64,0x3ff0000000000000,0x3ffb7e151628aed3,1 +np.float64,0xffb9e0569e33c0b0,0xbff0000000000000,1 +np.float64,0x7fd39c804fa73900,0x7ff0000000000000,1 +np.float64,0x3fe881ef67f103df,0x3ff269dd704b7129,1 +np.float64,0x1b6eb40236dd7,0x1b6eb40236dd7,1 +np.float64,0xbfe734ea432e69d4,0xbfe0813e6355d02f,1 +np.float64,0xffcf48f3743e91e8,0xbff0000000000000,1 +np.float64,0xffed10bcf6fa2179,0xbff0000000000000,1 +np.float64,0x3fef07723b7e0ee4,0x3ffa3156123f3c15,1 +np.float64,0xffe45c704aa8b8e0,0xbff0000000000000,1 +np.float64,0xb7b818d96f703,0xb7b818d96f703,1 +np.float64,0x42fcc04085f99,0x42fcc04085f99,1 +np.float64,0xbfda7ced01b4f9da,0xbfd5b0ce1e5524ae,1 +np.float64,0xbfe1e5963d63cb2c,0xbfdb6a87b6c09185,1 +np.float64,0x7fdfa18003bf42ff,0x7ff0000000000000,1 +np.float64,0xbfe3790a43e6f214,0xbfdd2c9a38b4f089,1 +np.float64,0xffe0ff5b9ae1feb6,0xbff0000000000000,1 +np.float64,0x80085a7d3110b4fb,0x80085a7d3110b4fb,1 +np.float64,0xffd6bfa6622d7f4c,0xbff0000000000000,1 +np.float64,0xbfef5ddc7cfebbb9,0xbfe3fe170521593e,1 +np.float64,0x3fc21773fa242ee8,0x3fc36ebda1f91a72,1 +np.float64,0x7fc04d98da209b31,0x7ff0000000000000,1 +np.float64,0xbfeba3b535b7476a,0xbfe282602e3c322e,1 +np.float64,0xffd41fb5c1a83f6c,0xbff0000000000000,1 +np.float64,0xf87d206df0fa4,0xf87d206df0fa4,1 +np.float64,0x800060946fc0c12a,0x800060946fc0c12a,1 +np.float64,0x3fe69d5f166d3abe,0x3ff06fdddcf4ca93,1 +np.float64,0x7fe9b5793b336af1,0x7ff0000000000000,1 +np.float64,0x7fe0dd4143e1ba82,0x7ff0000000000000,1 +np.float64,0xbfa8eaea3c31d5d0,0xbfa8522e397da3bd,1 +np.float64,0x119f0078233e1,0x119f0078233e1,1 +np.float64,0xbfd78a207aaf1440,0xbfd3b225bbf2ab4f,1 +np.float64,0xc66a6d4d8cd4e,0xc66a6d4d8cd4e,1 +np.float64,0xe7fc4b57cff8a,0xe7fc4b57cff8a,1 +np.float64,0x800883e8091107d0,0x800883e8091107d0,1 +np.float64,0x3fa6520c842ca419,0x3fa6d06e1041743a,1 +np.float64,0x3fa563182c2ac630,0x3fa5d70e27a84c97,1 +np.float64,0xe6a30b61cd462,0xe6a30b61cd462,1 +np.float64,0x3fee85dac37d0bb6,0x3ff987cfa41a9778,1 +np.float64,0x3fe8f621db71ec44,0x3ff2e7b768a2e9d0,1 +np.float64,0x800f231d861e463b,0x800f231d861e463b,1 +np.float64,0xbfe22eb07c645d61,0xbfdbbdbb853ab4c6,1 +np.float64,0x7fd2dda2dea5bb45,0x7ff0000000000000,1 +np.float64,0xbfd09b79a0a136f4,0xbfcd4147606ffd27,1 +np.float64,0xca039cc394074,0xca039cc394074,1 +np.float64,0x8000000000000000,0x8000000000000000,1 +np.float64,0xcb34575d9668b,0xcb34575d9668b,1 +np.float64,0x3fea62c1f3f4c584,0x3ff47e6dc67ec89f,1 +np.float64,0x7fe544c8606a8990,0x7ff0000000000000,1 +np.float64,0xffe0a980c4615301,0xbff0000000000000,1 +np.float64,0x3fdd67d5f8bacfac,0x3fe2a9c3421830f1,1 +np.float64,0xffe41d3dda283a7b,0xbff0000000000000,1 +np.float64,0xffeed59e5ffdab3c,0xbff0000000000000,1 +np.float64,0xffeeae8326fd5d05,0xbff0000000000000,1 +np.float64,0x800d70b4fa7ae16a,0x800d70b4fa7ae16a,1 +np.float64,0xffec932e6839265c,0xbff0000000000000,1 +np.float64,0xee30b185dc616,0xee30b185dc616,1 +np.float64,0x7fc3cf4397279e86,0x7ff0000000000000,1 +np.float64,0xbfeab34f1875669e,0xbfe21b868229de7d,1 +np.float64,0xf45f5f7de8bec,0xf45f5f7de8bec,1 +np.float64,0x3fad2c4b203a5896,0x3fae0528b568f3cf,1 +np.float64,0xbfe2479543e48f2a,0xbfdbd9e57cf64028,1 +np.float64,0x3fd41a1473283429,0x3fd79df2bc60debb,1 +np.float64,0x3febb5155ef76a2a,0x3ff608585afd698b,1 +np.float64,0xffe21f5303e43ea6,0xbff0000000000000,1 +np.float64,0x7fe9ef390833de71,0x7ff0000000000000,1 +np.float64,0xffe8ee873d71dd0e,0xbff0000000000000,1 +np.float64,0x7fd7cbc55e2f978a,0x7ff0000000000000,1 +np.float64,0x80081f9080d03f21,0x80081f9080d03f21,1 +np.float64,0x7fecbafc8b3975f8,0x7ff0000000000000,1 +np.float64,0x800b6c4b0b16d896,0x800b6c4b0b16d896,1 +np.float64,0xbfaa0fc2d4341f80,0xbfa968cdf32b98ad,1 +np.float64,0x3fec79fe4078f3fc,0x3ff6f5361a4a5d93,1 +np.float64,0xbfb14b79de2296f0,0xbfb0b93b75ecec11,1 +np.float64,0x800009d084c013a2,0x800009d084c013a2,1 +np.float64,0x4a4cdfe29499d,0x4a4cdfe29499d,1 +np.float64,0xbfe721c2d56e4386,0xbfe077f541987d76,1 +np.float64,0x3e5f539e7cbeb,0x3e5f539e7cbeb,1 +np.float64,0x3fd23f044c247e09,0x3fd51ceafcdd64aa,1 +np.float64,0x3fc70785b02e0f0b,0x3fc93b2a37eb342a,1 +np.float64,0xbfe7ab4ec7af569e,0xbfe0ba28eecbf6b0,1 +np.float64,0x800c1d4134583a83,0x800c1d4134583a83,1 +np.float64,0xffd9a73070334e60,0xbff0000000000000,1 +np.float64,0x68a4bf24d1499,0x68a4bf24d1499,1 +np.float64,0x7feba9d9507753b2,0x7ff0000000000000,1 +np.float64,0xbfe9d747db73ae90,0xbfe1bab53d932010,1 +np.float64,0x800a9a4aed953496,0x800a9a4aed953496,1 +np.float64,0xffcb89b0ad371360,0xbff0000000000000,1 +np.float64,0xbfc62388b82c4710,0xbfc4547be442a38c,1 +np.float64,0x800a006d187400db,0x800a006d187400db,1 +np.float64,0x3fcef2fbd33de5f8,0x3fd18177b2150148,1 +np.float64,0x8000b74e3da16e9d,0x8000b74e3da16e9d,1 +np.float64,0x25be536e4b7cb,0x25be536e4b7cb,1 +np.float64,0x3fa86e189430dc31,0x3fa905b4684c9f01,1 +np.float64,0xa7584b114eb0a,0xa7584b114eb0a,1 +np.float64,0x800331133c866227,0x800331133c866227,1 +np.float64,0x3fb52b48142a5690,0x3fb611a6f6e7c664,1 +np.float64,0x3fe825797cf04af2,0x3ff206fd60e98116,1 +np.float64,0x3fd0bec4e5217d8a,0x3fd323db3ffd59b2,1 +np.float64,0x907b43a120f7,0x907b43a120f7,1 +np.float64,0x3fed31eb1d3a63d6,0x3ff7d7a91c6930a4,1 +np.float64,0x7f97a13d782f427a,0x7ff0000000000000,1 +np.float64,0xffc7121a702e2434,0xbff0000000000000,1 +np.float64,0xbfe8bb4cbbf1769a,0xbfe139d7f46f1fb1,1 +np.float64,0xbfe3593cc5a6b27a,0xbfdd09ec91d6cd48,1 +np.float64,0x7fcff218ff9ff,0x7fcff218ff9ff,1 +np.float64,0x3fe73651d4ae6ca4,0x3ff10c5c1d21d127,1 +np.float64,0x80054e396eaa9c74,0x80054e396eaa9c74,1 +np.float64,0x3fe527d5f9aa4fac,0x3fedfb7743db9b53,1 +np.float64,0x7fec6f28c5f8de51,0x7ff0000000000000,1 +np.float64,0x3fcd2bbff53a5780,0x3fd061987416b49b,1 +np.float64,0xffd1f0046423e008,0xbff0000000000000,1 +np.float64,0x80034d97fac69b31,0x80034d97fac69b31,1 +np.float64,0x3faa803f14350080,0x3fab32e3f8073be4,1 +np.float64,0x3fcf8da0163f1b40,0x3fd1e42ba2354c8e,1 +np.float64,0x3fd573c2632ae785,0x3fd97c37609d18d7,1 +np.float64,0x7f922960482452c0,0x7ff0000000000000,1 +np.float64,0x800ebd0c5d3d7a19,0x800ebd0c5d3d7a19,1 +np.float64,0xbfee63b7807cc76f,0xbfe39ec7981035db,1 +np.float64,0xffdc023f8e380480,0xbff0000000000000,1 +np.float64,0x3fe3ffa02c67ff40,0x3febc7f8b900ceba,1 +np.float64,0x36c508b86d8a2,0x36c508b86d8a2,1 +np.float64,0x3fc9fbb0f133f760,0x3fcccee9f6ba801c,1 +np.float64,0x3fd75c1d5faeb83b,0x3fdc3150f9eff99e,1 +np.float64,0x3fe9a8d907b351b2,0x3ff3accc78a31df8,1 +np.float64,0x3fdd8fdcafbb1fb8,0x3fe2c97c97757994,1 +np.float64,0x3fb10c34ca22186a,0x3fb1a0cc42c76b86,1 +np.float64,0xbff0000000000000,0xbfe43a54e4e98864,1 +np.float64,0xffd046aefda08d5e,0xbff0000000000000,1 +np.float64,0x80067989758cf314,0x80067989758cf314,1 +np.float64,0x3fee9d77763d3aef,0x3ff9a67ff0841ba5,1 +np.float64,0xffe4d3cbf8e9a798,0xbff0000000000000,1 +np.float64,0x800f9cab273f3956,0x800f9cab273f3956,1 +np.float64,0x800a5c84f9f4b90a,0x800a5c84f9f4b90a,1 +np.float64,0x4fd377009fa8,0x4fd377009fa8,1 +np.float64,0xbfe7ba26af6f744e,0xbfe0c13ce45d6f95,1 +np.float64,0x609c8a86c1392,0x609c8a86c1392,1 +np.float64,0x7fe4d0296ea9a052,0x7ff0000000000000,1 +np.float64,0x59847bccb3090,0x59847bccb3090,1 +np.float64,0xbfdf944157bf2882,0xbfd8ed092bacad43,1 +np.float64,0xbfe7560a632eac15,0xbfe091405ec34973,1 +np.float64,0x3fea0699f4340d34,0x3ff415eb72089230,1 +np.float64,0x800a5533f374aa68,0x800a5533f374aa68,1 +np.float64,0xbf8e8cdb103d19c0,0xbf8e52cffcb83774,1 +np.float64,0x3fe87d9e52f0fb3d,0x3ff2653952344b81,1 +np.float64,0x7fca3950f73472a1,0x7ff0000000000000,1 +np.float64,0xffd5d1068aaba20e,0xbff0000000000000,1 +np.float64,0x3fd1a5f169a34be4,0x3fd4524b6ef17f91,1 +np.float64,0x3fdc4b95a8b8972c,0x3fe1caafd8652bf7,1 +np.float64,0x3fe333f65a6667ed,0x3fea502fb1f8a578,1 +np.float64,0xbfc117aaac222f54,0xbfc00018a4b84b6e,1 +np.float64,0x7fecf2efdf39e5df,0x7ff0000000000000,1 +np.float64,0x4e99d83e9d33c,0x4e99d83e9d33c,1 +np.float64,0x800d18937bda3127,0x800d18937bda3127,1 +np.float64,0x3fd6c67778ad8cef,0x3fdb5aba70a3ea9e,1 +np.float64,0x3fdbb71770b76e2f,0x3fe157ae8da20bc5,1 +np.float64,0xbfe9faf6ebf3f5ee,0xbfe1ca963d83f17f,1 +np.float64,0x80038850ac0710a2,0x80038850ac0710a2,1 +np.float64,0x8006beb72f8d7d6f,0x8006beb72f8d7d6f,1 +np.float64,0x3feead67bffd5acf,0x3ff9bb43e8b15e2f,1 +np.float64,0xbfd1174b89222e98,0xbfcdff9972799907,1 +np.float64,0x7fee2c077cfc580e,0x7ff0000000000000,1 +np.float64,0xbfbdbd904e3b7b20,0xbfbc13f4916ed466,1 +np.float64,0xffee47b8fe3c8f71,0xbff0000000000000,1 +np.float64,0xffd161884222c310,0xbff0000000000000,1 +np.float64,0xbfd42f27c4a85e50,0xbfd14fa8d67ba5ee,1 +np.float64,0x7fefffffffffffff,0x7ff0000000000000,1 +np.float64,0x8008151791b02a30,0x8008151791b02a30,1 +np.float64,0xbfba79029234f208,0xbfb926616cf41755,1 +np.float64,0x8004c486be29890e,0x8004c486be29890e,1 +np.float64,0x7fe5325a252a64b3,0x7ff0000000000000,1 +np.float64,0x5a880f04b5103,0x5a880f04b5103,1 +np.float64,0xbfe6f4b7702de96f,0xbfe06209002dd72c,1 +np.float64,0xbfdf8b3739bf166e,0xbfd8e783efe3c30f,1 +np.float64,0xbfe32571c8e64ae4,0xbfdcd128b9aa49a1,1 +np.float64,0xbfe97c98c172f932,0xbfe1920ac0fc040f,1 +np.float64,0x3fd0b513a2a16a28,0x3fd31744e3a1bf0a,1 +np.float64,0xffe3ab70832756e0,0xbff0000000000000,1 +np.float64,0x80030f055ce61e0b,0x80030f055ce61e0b,1 +np.float64,0xffd5f3b21b2be764,0xbff0000000000000,1 +np.float64,0x800c1f2d6c783e5b,0x800c1f2d6c783e5b,1 +np.float64,0x80075f4f148ebe9f,0x80075f4f148ebe9f,1 +np.float64,0xbfa5a046f42b4090,0xbfa52cfbf8992256,1 +np.float64,0xffd6702583ace04c,0xbff0000000000000,1 +np.float64,0x800dc0a5cf1b814c,0x800dc0a5cf1b814c,1 +np.float64,0x14f2203a29e45,0x14f2203a29e45,1 +np.float64,0x800421a40ee84349,0x800421a40ee84349,1 +np.float64,0xbfea7c279df4f84f,0xbfe2037fff3ed877,1 +np.float64,0xbfe9b41ddcf3683c,0xbfe1aafe18a44bf8,1 +np.float64,0xffe7b037022f606e,0xbff0000000000000,1 +np.float64,0x800bafb648775f6d,0x800bafb648775f6d,1 +np.float64,0x800b81681d5702d1,0x800b81681d5702d1,1 +np.float64,0x3fe29f8dc8653f1c,0x3fe9442da1c32c6b,1 +np.float64,0xffef9a05dc7f340b,0xbff0000000000000,1 +np.float64,0x800c8c65a65918cb,0x800c8c65a65918cb,1 +np.float64,0xffe99df0d5f33be1,0xbff0000000000000,1 +np.float64,0x9afeb22535fd7,0x9afeb22535fd7,1 +np.float64,0x7fc620dd822c41ba,0x7ff0000000000000,1 +np.float64,0x29c2cdf25385b,0x29c2cdf25385b,1 +np.float64,0x2d92284e5b246,0x2d92284e5b246,1 +np.float64,0xffc794aa942f2954,0xbff0000000000000,1 +np.float64,0xbfe7ed907eafdb21,0xbfe0d9a7b1442497,1 +np.float64,0xbfd4e0d4aea9c1aa,0xbfd1d09366dba2a7,1 +np.float64,0xa70412c34e083,0xa70412c34e083,1 +np.float64,0x41dc0ee083b9,0x41dc0ee083b9,1 +np.float64,0x8000ece20da1d9c5,0x8000ece20da1d9c5,1 +np.float64,0x3fdf3dae103e7b5c,0x3fe42314bf826bc5,1 +np.float64,0x3fe972533c72e4a6,0x3ff3703761e70f04,1 +np.float64,0xffba1d2b82343a58,0xbff0000000000000,1 +np.float64,0xe0086c83c010e,0xe0086c83c010e,1 +np.float64,0x3fe6fb0dde6df61c,0x3ff0cf5fae01aa08,1 +np.float64,0x3fcfaf057e3f5e0b,0x3fd1f98c1fd20139,1 +np.float64,0xbfdca19d9239433c,0xbfd7158745192ca9,1 +np.float64,0xffb17f394e22fe70,0xbff0000000000000,1 +np.float64,0x7fe40f05c7681e0b,0x7ff0000000000000,1 +np.float64,0x800b3c575d5678af,0x800b3c575d5678af,1 +np.float64,0x7fa4ab20ac295640,0x7ff0000000000000,1 +np.float64,0xbfd2fff4f6a5ffea,0xbfd07069bb50e1a6,1 +np.float64,0xbfef81b9147f0372,0xbfe40b845a749787,1 +np.float64,0x7fd7400e54ae801c,0x7ff0000000000000,1 +np.float64,0x3fd4401a17a88034,0x3fd7d20fb76a4f3d,1 +np.float64,0xbfd3e907fd27d210,0xbfd11c64b7577fc5,1 +np.float64,0x7fe34bed9ae697da,0x7ff0000000000000,1 +np.float64,0x80039119c0472234,0x80039119c0472234,1 +np.float64,0xbfe2e36ac565c6d6,0xbfdc88454ee997b3,1 +np.float64,0xbfec57204478ae40,0xbfe2cd3183de1d2d,1 +np.float64,0x7fed7e2a12fafc53,0x7ff0000000000000,1 +np.float64,0x7fd5c5fa7d2b8bf4,0x7ff0000000000000,1 +np.float64,0x3fdcf368d6b9e6d0,0x3fe24decce1ebd35,1 +np.float64,0xbfe0ebfcf2e1d7fa,0xbfda48c9247ae8cf,1 +np.float64,0xbfe10dbea2e21b7e,0xbfda707d68b59674,1 +np.float64,0xbfdf201b6ebe4036,0xbfd8a5df27742fdf,1 +np.float64,0xffe16555be62caab,0xbff0000000000000,1 +np.float64,0xffc23a5db22474bc,0xbff0000000000000,1 +np.float64,0xffe1cbb3f8a39768,0xbff0000000000000,1 +np.float64,0x8007b823be0f7048,0x8007b823be0f7048,1 +np.float64,0xbfa5d1f3042ba3e0,0xbfa55c97cd77bf6e,1 +np.float64,0xbfe316a074662d41,0xbfdcc0da4e7334d0,1 +np.float64,0xbfdfab2bf2bf5658,0xbfd8fb046b88b51f,1 +np.float64,0xfacc9dabf5994,0xfacc9dabf5994,1 +np.float64,0xffe7e420a4efc841,0xbff0000000000000,1 +np.float64,0x800bb986cd57730e,0x800bb986cd57730e,1 +np.float64,0xbfe314fa38e629f4,0xbfdcbf09302c3bf5,1 +np.float64,0x7fc56b17772ad62e,0x7ff0000000000000,1 +np.float64,0x8006a87d54ad50fb,0x8006a87d54ad50fb,1 +np.float64,0xbfe6633e4a6cc67c,0xbfe01a67c3b3ff32,1 +np.float64,0x3fe0ff56eb21feae,0x3fe66df01defb0fb,1 +np.float64,0xffc369cfc126d3a0,0xbff0000000000000,1 +np.float64,0x7fe8775d9a30eeba,0x7ff0000000000000,1 +np.float64,0x3fb53db13e2a7b60,0x3fb625a7279cdac3,1 +np.float64,0xffee76e7e6fcedcf,0xbff0000000000000,1 +np.float64,0xb45595b568ab3,0xb45595b568ab3,1 +np.float64,0xffa09a1d50213440,0xbff0000000000000,1 +np.float64,0x7d11dc16fa23c,0x7d11dc16fa23c,1 +np.float64,0x7fd4cc2928299851,0x7ff0000000000000,1 +np.float64,0x6a30e0ead461d,0x6a30e0ead461d,1 +np.float64,0x7fd3ee735a27dce6,0x7ff0000000000000,1 +np.float64,0x8008d7084b31ae11,0x8008d7084b31ae11,1 +np.float64,0x3fe469353fe8d26a,0x3fec8e7e2df38590,1 +np.float64,0x3fcecef2743d9de5,0x3fd16a888b715dfd,1 +np.float64,0x460130d68c027,0x460130d68c027,1 +np.float64,0xbfd76510c62eca22,0xbfd398766b741d6e,1 +np.float64,0x800ec88c2a5d9118,0x800ec88c2a5d9118,1 +np.float64,0x3fac969c6c392d40,0x3fad66ca6a1e583c,1 +np.float64,0x3fe5c616bf6b8c2e,0x3fef30f931e8dde5,1 +np.float64,0xb4cb6cd56996e,0xb4cb6cd56996e,1 +np.float64,0xffc3eacf8827d5a0,0xbff0000000000000,1 +np.float64,0x3fe1ceaf60e39d5f,0x3fe7d31e0a627cf9,1 +np.float64,0xffea69b42ff4d368,0xbff0000000000000,1 +np.float64,0x800ff8aef99ff15e,0x800ff8aef99ff15e,1 +np.float64,0x6c3953f0d872b,0x6c3953f0d872b,1 +np.float64,0x8007ca5a0d0f94b5,0x8007ca5a0d0f94b5,1 +np.float64,0x800993ce3ad3279d,0x800993ce3ad3279d,1 +np.float64,0x3fe5a4d1516b49a2,0x3feeef67b22ac65b,1 +np.float64,0x8003d7512a67aea3,0x8003d7512a67aea3,1 +np.float64,0x33864430670c9,0x33864430670c9,1 +np.float64,0xbfdbf477e3b7e8f0,0xbfd6a63f1b36f424,1 +np.float64,0x3fb5da92582bb525,0x3fb6d04ef1a1d31a,1 +np.float64,0xe38aae71c7156,0xe38aae71c7156,1 +np.float64,0x3fcaf5590a35eab2,0x3fce01ed6eb6188e,1 +np.float64,0x800deba9b05bd754,0x800deba9b05bd754,1 +np.float64,0x7fee0cde287c19bb,0x7ff0000000000000,1 +np.float64,0xbfe0c2ae70e1855d,0xbfda17fa64d84fcf,1 +np.float64,0x518618faa30c4,0x518618faa30c4,1 +np.float64,0xbfeb4c49b8769894,0xbfe25d52cd7e529f,1 +np.float64,0xbfeb3aa21b367544,0xbfe255cae1df4cfd,1 +np.float64,0xffd23f1c5d247e38,0xbff0000000000000,1 +np.float64,0xff9a75132034ea20,0xbff0000000000000,1 +np.float64,0xbfef9d96307f3b2c,0xbfe415e8b6ce0e50,1 +np.float64,0x8004046f2f0808df,0x8004046f2f0808df,1 +np.float64,0x3fe15871aea2b0e3,0x3fe706532ea5c770,1 +np.float64,0x7fd86b1576b0d62a,0x7ff0000000000000,1 +np.float64,0xbfc240a5c724814c,0xbfc102c7971ca455,1 +np.float64,0xffd8ea670bb1d4ce,0xbff0000000000000,1 +np.float64,0xbfeb1ddd1ff63bba,0xbfe2497c4e27bb8e,1 +np.float64,0x3fcd47e0a33a8fc1,0x3fd0734444150d83,1 +np.float64,0xe00b6a65c016e,0xe00b6a65c016e,1 +np.float64,0xbfc7d582142fab04,0xbfc5bf1fbe755a4c,1 +np.float64,0x8cc91ca11993,0x8cc91ca11993,1 +np.float64,0x7fdbc530e3b78a61,0x7ff0000000000000,1 +np.float64,0x7fee437522bc86e9,0x7ff0000000000000,1 +np.float64,0xffe9e09ae2b3c135,0xbff0000000000000,1 +np.float64,0x8002841cada5083a,0x8002841cada5083a,1 +np.float64,0x3fd6b485f8ad690c,0x3fdb412135932699,1 +np.float64,0x80070e8d0b0e1d1b,0x80070e8d0b0e1d1b,1 +np.float64,0x7fed5df165babbe2,0x7ff0000000000000,1 +np.float64,0x7ff4000000000000,0x7ffc000000000000,1 +np.float64,0x7fe99d08cd333a11,0x7ff0000000000000,1 +np.float64,0xdfff4201bfff,0xdfff4201bfff,1 +np.float64,0x800ccf7aaf999ef6,0x800ccf7aaf999ef6,1 +np.float64,0x3fddb05aad3b60b5,0x3fe2e34bdd1dd9d5,1 +np.float64,0xbfe5e1c60e6bc38c,0xbfdfb3275cc1675f,1 +np.float64,0x8004fe674269fccf,0x8004fe674269fccf,1 +np.float64,0x7fe9280363325006,0x7ff0000000000000,1 +np.float64,0xf605b9f1ec0b7,0xf605b9f1ec0b7,1 +np.float64,0x800c7c214018f843,0x800c7c214018f843,1 +np.float64,0x7fd97eb6b9b2fd6c,0x7ff0000000000000,1 +np.float64,0x7fd03f8fb6207f1e,0x7ff0000000000000,1 +np.float64,0x7fc526b64d2a4d6c,0x7ff0000000000000,1 +np.float64,0xbfef1a7c42fe34f9,0xbfe3e4b4399e0fcf,1 +np.float64,0xffdde10a2fbbc214,0xbff0000000000000,1 +np.float64,0xbfdd274f72ba4e9e,0xbfd76aa73788863c,1 +np.float64,0xbfecf7f77af9efef,0xbfe30ee2ae03fed1,1 +np.float64,0xffde709322bce126,0xbff0000000000000,1 +np.float64,0x268b5dac4d16d,0x268b5dac4d16d,1 +np.float64,0x8005c099606b8134,0x8005c099606b8134,1 +np.float64,0xffcf54c1593ea984,0xbff0000000000000,1 +np.float64,0xbfee9b8ebabd371d,0xbfe3b44f2663139d,1 +np.float64,0x3faf0330643e0661,0x3faff88fab74b447,1 +np.float64,0x7fe1c6011be38c01,0x7ff0000000000000,1 +np.float64,0xbfe9d58053b3ab01,0xbfe1b9ea12242485,1 +np.float64,0xbfe15a80fee2b502,0xbfdaca2aa7d1231a,1 +np.float64,0x7fe0d766d8a1aecd,0x7ff0000000000000,1 +np.float64,0x800f65e6a21ecbcd,0x800f65e6a21ecbcd,1 +np.float64,0x7fc85e45a530bc8a,0x7ff0000000000000,1 +np.float64,0x3fcc240e5438481d,0x3fcf7954fc080ac3,1 +np.float64,0xffddd49da2bba93c,0xbff0000000000000,1 +np.float64,0x1376f36c26edf,0x1376f36c26edf,1 +np.float64,0x3feffb7af17ff6f6,0x3ffb77f0ead2f881,1 +np.float64,0x3fd9354ea9b26a9d,0x3fdee4e4c8db8239,1 +np.float64,0xffdf7beed4bef7de,0xbff0000000000000,1 +np.float64,0xbfdef256ecbde4ae,0xbfd889b0e213a019,1 +np.float64,0x800d78bd1e7af17a,0x800d78bd1e7af17a,1 +np.float64,0xb66d66276cdad,0xb66d66276cdad,1 +np.float64,0x7fd8f51138b1ea21,0x7ff0000000000000,1 +np.float64,0xffe8c9c302b19385,0xbff0000000000000,1 +np.float64,0x8000be4cf5417c9b,0x8000be4cf5417c9b,1 +np.float64,0xbfe2293a25645274,0xbfdbb78a8c547c68,1 +np.float64,0xce8392c19d08,0xce8392c19d08,1 +np.float64,0xbfe075736b60eae7,0xbfd9bc0f6e34a283,1 +np.float64,0xbfe8d6fe6a71adfd,0xbfe1469ba80b4915,1 +np.float64,0xffe0c7993fa18f32,0xbff0000000000000,1 +np.float64,0x3fce5210fd3ca422,0x3fd11b40a1270a95,1 +np.float64,0x6c0534a8d80a7,0x6c0534a8d80a7,1 +np.float64,0x23c1823647831,0x23c1823647831,1 +np.float64,0x3fc901253732024a,0x3fcb9d264accb07c,1 +np.float64,0x3fe42b8997685714,0x3fec1a39e207b6e4,1 +np.float64,0x3fec4fd00fb89fa0,0x3ff6c1fdd0c262c8,1 +np.float64,0x8007b333caaf6668,0x8007b333caaf6668,1 +np.float64,0x800f9275141f24ea,0x800f9275141f24ea,1 +np.float64,0xffbba361a23746c0,0xbff0000000000000,1 +np.float64,0xbfee4effa9fc9dff,0xbfe396c11d0cd524,1 +np.float64,0x3e47e84c7c8fe,0x3e47e84c7c8fe,1 +np.float64,0x3fe80eb7b1301d6f,0x3ff1eed318a00153,1 +np.float64,0x7fd3f4c5b4a7e98a,0x7ff0000000000000,1 +np.float64,0x158abab02b158,0x158abab02b158,1 +np.float64,0x1,0x1,1 +np.float64,0x1f1797883e2f4,0x1f1797883e2f4,1 +np.float64,0x3feec055d03d80ac,0x3ff9d3fb0394de33,1 +np.float64,0x8010000000000000,0x8010000000000000,1 +np.float64,0xbfd070860ea0e10c,0xbfccfeec2828efef,1 +np.float64,0x80015c8b3e82b917,0x80015c8b3e82b917,1 +np.float64,0xffef9956d9ff32ad,0xbff0000000000000,1 +np.float64,0x7fe7f087dd2fe10f,0x7ff0000000000000,1 +np.float64,0x8002e7718665cee4,0x8002e7718665cee4,1 +np.float64,0x3fdfb9adb2bf735c,0x3fe4887a86214c1e,1 +np.float64,0xffc7747dfb2ee8fc,0xbff0000000000000,1 +np.float64,0x3fec309bb5386137,0x3ff69c44e1738547,1 +np.float64,0xffdbe2bf9ab7c580,0xbff0000000000000,1 +np.float64,0xbfe6a274daed44ea,0xbfe039aff2be9d48,1 +np.float64,0x7fd5a4e4efab49c9,0x7ff0000000000000,1 +np.float64,0xffbe6aaeb03cd560,0xbff0000000000000,1 diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/data/umath-validation-set-log.csv b/.venv/lib/python3.12/site-packages/numpy/_core/tests/data/umath-validation-set-log.csv new file mode 100644 index 0000000..b8f6b08 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/tests/data/umath-validation-set-log.csv @@ -0,0 +1,271 @@ +dtype,input,output,ulperrortol +## +ve denormals ## +np.float32,0x004b4716,0xc2afbc1b,4 +np.float32,0x007b2490,0xc2aec01e,4 +np.float32,0x007c99fa,0xc2aeba17,4 +np.float32,0x00734a0c,0xc2aee1dc,4 +np.float32,0x0070de24,0xc2aeecba,4 +np.float32,0x007fffff,0xc2aeac50,4 +np.float32,0x00000001,0xc2ce8ed0,4 +## -ve denormals ## +np.float32,0x80495d65,0xffc00000,4 +np.float32,0x806894f6,0xffc00000,4 +np.float32,0x80555a76,0xffc00000,4 +np.float32,0x804e1fb8,0xffc00000,4 +np.float32,0x80687de9,0xffc00000,4 +np.float32,0x807fffff,0xffc00000,4 +np.float32,0x80000001,0xffc00000,4 +## +/-0.0f, +/-FLT_MIN +/-FLT_MAX ## +np.float32,0x00000000,0xff800000,4 +np.float32,0x80000000,0xff800000,4 +np.float32,0x7f7fffff,0x42b17218,4 +np.float32,0x80800000,0xffc00000,4 +np.float32,0xff7fffff,0xffc00000,4 +## 1.00f + 0x00000001 ## +np.float32,0x3f800000,0x00000000,4 +np.float32,0x3f800001,0x33ffffff,4 +np.float32,0x3f800002,0x347ffffe,4 +np.float32,0x3f7fffff,0xb3800000,4 +np.float32,0x3f7ffffe,0xb4000000,4 +np.float32,0x3f7ffffd,0xb4400001,4 +np.float32,0x402df853,0x3f7ffffe,4 +np.float32,0x402df854,0x3f7fffff,4 +np.float32,0x402df855,0x3f800000,4 +np.float32,0x402df856,0x3f800001,4 +np.float32,0x3ebc5ab0,0xbf800001,4 +np.float32,0x3ebc5ab1,0xbf800000,4 +np.float32,0x3ebc5ab2,0xbf800000,4 +np.float32,0x3ebc5ab3,0xbf7ffffe,4 +np.float32,0x423ef575,0x407768ab,4 +np.float32,0x427b8c61,0x408485dd,4 +np.float32,0x4211e9ee,0x406630b0,4 +np.float32,0x424d5c41,0x407c0fed,4 +np.float32,0x42be722a,0x4091cc91,4 +np.float32,0x42b73d30,0x4090908b,4 +np.float32,0x427e48e2,0x4084de7f,4 +np.float32,0x428f759b,0x4088bba3,4 +np.float32,0x41629069,0x4029a0cc,4 +np.float32,0x4272c99d,0x40836379,4 +np.float32,0x4d1b7458,0x4197463d,4 +np.float32,0x4f10c594,0x41ace2b2,4 +np.float32,0x4ea397c2,0x41a85171,4 +np.float32,0x4fefa9d1,0x41b6769c,4 +np.float32,0x4ebac6ab,0x41a960dc,4 +np.float32,0x4f6efb42,0x41b0e535,4 +np.float32,0x4e9ab8e7,0x41a7df44,4 +np.float32,0x4e81b5d1,0x41a67625,4 +np.float32,0x5014d9f2,0x41b832bd,4 +np.float32,0x4f02175c,0x41ac07b8,4 +np.float32,0x7f034f89,0x42b01c47,4 +np.float32,0x7f56d00e,0x42b11849,4 +np.float32,0x7f1cd5f6,0x42b0773a,4 +np.float32,0x7e979174,0x42af02d7,4 +np.float32,0x7f23369f,0x42b08ba2,4 +np.float32,0x7f0637ae,0x42b0277d,4 +np.float32,0x7efcb6e8,0x42b00897,4 +np.float32,0x7f7907c8,0x42b163f6,4 +np.float32,0x7e95c4c2,0x42aefcba,4 +np.float32,0x7f4577b2,0x42b0ed2d,4 +np.float32,0x3f49c92e,0xbe73ae84,4 +np.float32,0x3f4a23d1,0xbe71e2f8,4 +np.float32,0x3f4abb67,0xbe6ee430,4 +np.float32,0x3f48169a,0xbe7c5532,4 +np.float32,0x3f47f5fa,0xbe7cfc37,4 +np.float32,0x3f488309,0xbe7a2ad8,4 +np.float32,0x3f479df4,0xbe7ebf5f,4 +np.float32,0x3f47cfff,0xbe7dbec9,4 +np.float32,0x3f496704,0xbe75a125,4 +np.float32,0x3f478ee8,0xbe7f0c92,4 +np.float32,0x3f4a763b,0xbe7041ce,4 +np.float32,0x3f47a108,0xbe7eaf94,4 +np.float32,0x3f48136c,0xbe7c6578,4 +np.float32,0x3f481c17,0xbe7c391c,4 +np.float32,0x3f47cd28,0xbe7dcd56,4 +np.float32,0x3f478be8,0xbe7f1bf7,4 +np.float32,0x3f4c1f8e,0xbe67e367,4 +np.float32,0x3f489b0c,0xbe79b03f,4 +np.float32,0x3f4934cf,0xbe76a08a,4 +np.float32,0x3f4954df,0xbe75fd6a,4 +np.float32,0x3f47a3f5,0xbe7ea093,4 +np.float32,0x3f4ba4fc,0xbe6a4b02,4 +np.float32,0x3f47a0e1,0xbe7eb05c,4 +np.float32,0x3f48c30a,0xbe78e42f,4 +np.float32,0x3f48cab8,0xbe78bd05,4 +np.float32,0x3f4b0569,0xbe6d6ea4,4 +np.float32,0x3f47de32,0xbe7d7607,4 +np.float32,0x3f477328,0xbe7f9b00,4 +np.float32,0x3f496dab,0xbe757f52,4 +np.float32,0x3f47662c,0xbe7fddac,4 +np.float32,0x3f48ddd8,0xbe785b80,4 +np.float32,0x3f481866,0xbe7c4bff,4 +np.float32,0x3f48b119,0xbe793fb6,4 +np.float32,0x3f48c7e8,0xbe78cb5c,4 +np.float32,0x3f4985f6,0xbe7503da,4 +np.float32,0x3f483fdf,0xbe7b8212,4 +np.float32,0x3f4b1c76,0xbe6cfa67,4 +np.float32,0x3f480b2e,0xbe7c8fa8,4 +np.float32,0x3f48745f,0xbe7a75bf,4 +np.float32,0x3f485bda,0xbe7af308,4 +np.float32,0x3f47a660,0xbe7e942c,4 +np.float32,0x3f47d4d5,0xbe7da600,4 +np.float32,0x3f4b0a26,0xbe6d56be,4 +np.float32,0x3f4a4883,0xbe712924,4 +np.float32,0x3f4769e7,0xbe7fca84,4 +np.float32,0x3f499702,0xbe74ad3f,4 +np.float32,0x3f494ab1,0xbe763131,4 +np.float32,0x3f476b69,0xbe7fc2c6,4 +np.float32,0x3f4884e8,0xbe7a214a,4 +np.float32,0x3f486945,0xbe7aae76,4 +#float64 +## +ve denormal ## +np.float64,0x0000000000000001,0xc0874385446d71c3,1 +np.float64,0x0001000000000000,0xc086395a2079b70c,1 +np.float64,0x000fffffffffffff,0xc086232bdd7abcd2,1 +np.float64,0x0007ad63e2168cb6,0xc086290bc0b2980f,1 +## -ve denormal ## +np.float64,0x8000000000000001,0xfff8000000000001,1 +np.float64,0x8001000000000000,0xfff8000000000001,1 +np.float64,0x800fffffffffffff,0xfff8000000000001,1 +np.float64,0x8007ad63e2168cb6,0xfff8000000000001,1 +## +/-0.0f, MAX, MIN## +np.float64,0x0000000000000000,0xfff0000000000000,1 +np.float64,0x8000000000000000,0xfff0000000000000,1 +np.float64,0x7fefffffffffffff,0x40862e42fefa39ef,1 +np.float64,0xffefffffffffffff,0xfff8000000000001,1 +## near 1.0f ## +np.float64,0x3ff0000000000000,0x0000000000000000,1 +np.float64,0x3fe8000000000000,0xbfd269621134db92,1 +np.float64,0x3ff0000000000001,0x3cafffffffffffff,1 +np.float64,0x3ff0000020000000,0x3e7fffffe000002b,1 +np.float64,0x3ff0000000000001,0x3cafffffffffffff,1 +np.float64,0x3fefffffe0000000,0xbe70000008000005,1 +np.float64,0x3fefffffffffffff,0xbca0000000000000,1 +## random numbers ## +np.float64,0x02500186f3d9da56,0xc0855b8abf135773,1 +np.float64,0x09200815a3951173,0xc082ff1ad7131bdc,1 +np.float64,0x0da029623b0243d4,0xc0816fc994695bb5,1 +np.float64,0x48703b8ac483a382,0x40579213a313490b,1 +np.float64,0x09207b74c87c9860,0xc082fee20ff349ef,1 +np.float64,0x62c077698e8df947,0x407821c996d110f0,1 +np.float64,0x2350b45e87c3cfb0,0xc073d6b16b51d072,1 +np.float64,0x3990a23f9ff2b623,0xc051aa60eadd8c61,1 +np.float64,0x0d011386a116c348,0xc081a6cc7ea3b8fb,1 +np.float64,0x1fe0f0303ebe273a,0xc0763870b78a81ca,1 +np.float64,0x0cd1260121d387da,0xc081b7668d61a9d1,1 +np.float64,0x1e6135a8f581d422,0xc077425ac10f08c2,1 +np.float64,0x622168db5fe52d30,0x4077b3c669b9fadb,1 +np.float64,0x69f188e1ec6d1718,0x407d1e2f18c63889,1 +np.float64,0x3aa1bf1d9c4dd1a3,0xc04d682e24bde479,1 +np.float64,0x6c81c4011ce4f683,0x407ee5190e8a8e6a,1 +np.float64,0x2191fa55aa5a5095,0xc0750c0c318b5e2d,1 +np.float64,0x32a1f602a32bf360,0xc06270caa493fc17,1 +np.float64,0x16023c90ba93249b,0xc07d0f88e0801638,1 +np.float64,0x1c525fe6d71fa9ff,0xc078af49c66a5d63,1 +np.float64,0x1a927675815d65b7,0xc079e5bdd7fe376e,1 +np.float64,0x41227b8fe70da028,0x402aa0c9f9a84c71,1 +np.float64,0x4962bb6e853fe87d,0x405a34aa04c83747,1 +np.float64,0x23d2cda00b26b5a4,0xc0737c13a06d00ea,1 +np.float64,0x2d13083fd62987fa,0xc06a25055aeb474e,1 +np.float64,0x10e31e4c9b4579a1,0xc0804e181929418e,1 +np.float64,0x26d3247d556a86a9,0xc0716774171da7e8,1 +np.float64,0x6603379398d0d4ac,0x407a64f51f8a887b,1 +np.float64,0x02d38af17d9442ba,0xc0852d955ac9dd68,1 +np.float64,0x6a2382b4818dd967,0x407d4129d688e5d4,1 +np.float64,0x2ee3c403c79b3934,0xc067a091fefaf8b6,1 +np.float64,0x6493a699acdbf1a4,0x4079663c8602bfc5,1 +np.float64,0x1c8413c4f0de3100,0xc0788c99697059b6,1 +np.float64,0x4573f1ed350d9622,0x404e9bd1e4c08920,1 +np.float64,0x2f34265c9200b69c,0xc067310cfea4e986,1 +np.float64,0x19b43e65fa22029b,0xc07a7f8877de22d6,1 +np.float64,0x0af48ab7925ed6bc,0xc0825c4fbc0e5ade,1 +np.float64,0x4fa49699cad82542,0x4065c76d2a318235,1 +np.float64,0x7204a15e56ade492,0x40815bb87484dffb,1 +np.float64,0x4734aa08a230982d,0x40542a4bf7a361a9,1 +np.float64,0x1ae4ed296c2fd749,0xc079ac4921f20abb,1 +np.float64,0x472514ea4370289c,0x4053ff372bd8f18f,1 +np.float64,0x53a54b3f73820430,0x406b5411fc5f2e33,1 +np.float64,0x64754de5a15684fa,0x407951592e99a5ab,1 +np.float64,0x69358e279868a7c3,0x407c9c671a882c31,1 +np.float64,0x284579ec61215945,0xc0706688e55f0927,1 +np.float64,0x68b5c58806447adc,0x407c43d6f4eff760,1 +np.float64,0x1945a83f98b0e65d,0xc07acc15eeb032cc,1 +np.float64,0x0fc5eb98a16578bf,0xc080b0d02eddca0e,1 +np.float64,0x6a75e208f5784250,0x407d7a7383bf8f05,1 +np.float64,0x0fe63a029c47645d,0xc080a59ca1e98866,1 +np.float64,0x37963ac53f065510,0xc057236281f7bdb6,1 +np.float64,0x135661bb07067ff7,0xc07ee924930c21e4,1 +np.float64,0x4b4699469d458422,0x405f73843756e887,1 +np.float64,0x1a66d73e4bf4881b,0xc07a039ba1c63adf,1 +np.float64,0x12a6b9b119a7da59,0xc07f62e49c6431f3,1 +np.float64,0x24c719aa8fd1bdb5,0xc072d26da4bf84d3,1 +np.float64,0x0fa6ff524ffef314,0xc080bb8514662e77,1 +np.float64,0x1db751d66fdd4a9a,0xc077b77cb50d7c92,1 +np.float64,0x4947374c516da82c,0x4059e9acfc7105bf,1 +np.float64,0x1b1771ab98f3afc8,0xc07989326b8e1f66,1 +np.float64,0x25e78805baac8070,0xc0720a818e6ef080,1 +np.float64,0x4bd7a148225d3687,0x406082d004ea3ee7,1 +np.float64,0x53d7d6b2bbbda00a,0x406b9a398967cbd5,1 +np.float64,0x6997fb9f4e1c685f,0x407ce0a703413eba,1 +np.float64,0x069802c2ff71b951,0xc083df39bf7acddc,1 +np.float64,0x4d683ac9890f66d8,0x4062ae21d8c2acf0,1 +np.float64,0x5a2825863ec14f4c,0x40722d718d549552,1 +np.float64,0x0398799a88f4db80,0xc084e93dab8e2158,1 +np.float64,0x5ed87a8b77e135a5,0x40756d7051777b33,1 +np.float64,0x5828cd6d79b9bede,0x4070cafb22fc6ca1,1 +np.float64,0x7b18ba2a5ec6f068,0x408481386b3ed6fe,1 +np.float64,0x4938fd60922198fe,0x4059c206b762ea7e,1 +np.float64,0x31b8f44fcdd1a46e,0xc063b2faa8b6434e,1 +np.float64,0x5729341c0d918464,0x407019cac0c4a7d7,1 +np.float64,0x13595e9228ee878e,0xc07ee7235a7d8088,1 +np.float64,0x17698b0dc9dd4135,0xc07c1627e3a5ad5f,1 +np.float64,0x63b977c283abb0cc,0x4078cf1ec6ed65be,1 +np.float64,0x7349cc0d4dc16943,0x4081cc697ce4cb53,1 +np.float64,0x4e49a80b732fb28d,0x4063e67e3c5cbe90,1 +np.float64,0x07ba14b848a8ae02,0xc0837ac032a094e0,1 +np.float64,0x3da9f17b691bfddc,0xc03929c25366acda,1 +np.float64,0x02ea39aa6c3ac007,0xc08525af6f21e1c4,1 +np.float64,0x3a6a42f04ed9563d,0xc04e98e825dca46b,1 +np.float64,0x1afa877cd7900be7,0xc0799d6648cb34a9,1 +np.float64,0x58ea986649e052c6,0x4071512e939ad790,1 +np.float64,0x691abbc04647f536,0x407c89aaae0fcb83,1 +np.float64,0x43aabc5063e6f284,0x4044b45d18106fd2,1 +np.float64,0x488b003c893e0bea,0x4057df012a2dafbe,1 +np.float64,0x77eb076ed67caee5,0x40836720de94769e,1 +np.float64,0x5c1b46974aba46f4,0x40738731ba256007,1 +np.float64,0x1a5b29ecb5d3c261,0xc07a0becc77040d6,1 +np.float64,0x5d8b6ccf868c6032,0x4074865c1865e2db,1 +np.float64,0x4cfb6690b4aaf5af,0x406216cd8c7e8ddb,1 +np.float64,0x76cbd8eb5c5fc39e,0x4083038dc66d682b,1 +np.float64,0x28bbd1fec5012814,0xc07014c2dd1b9711,1 +np.float64,0x33dc1b3a4fd6bf7a,0xc060bd0756e07d8a,1 +np.float64,0x52bbe89b37de99f3,0x406a10041aa7d343,1 +np.float64,0x07bc479d15eb2dd3,0xc0837a1a6e3a3b61,1 +np.float64,0x18fc5275711a901d,0xc07aff3e9d62bc93,1 +np.float64,0x114c9758e247dc71,0xc080299a7cf15b05,1 +np.float64,0x25ac8f6d60755148,0xc07233c4c0c511d4,1 +np.float64,0x260cae2bb9e9fd7e,0xc071f128c7e82eac,1 +np.float64,0x572ccdfe0241de82,0x40701bedc84bb504,1 +np.float64,0x0ddcef6c8d41f5ee,0xc0815a7e16d07084,1 +np.float64,0x6dad1d59c988af68,0x407fb4a0bc0142b1,1 +np.float64,0x025d200580d8b6d1,0xc08556c0bc32b1b2,1 +np.float64,0x7aad344b6aa74c18,0x40845bbc453f22be,1 +np.float64,0x5b5d9d6ad9d14429,0x4073036d2d21f382,1 +np.float64,0x49cd8d8dcdf19954,0x405b5c034f5c7353,1 +np.float64,0x63edb9483335c1e6,0x4078f2dd21378786,1 +np.float64,0x7b1dd64c9d2c26bd,0x408482b922017bc9,1 +np.float64,0x782e13e0b574be5f,0x40837e2a0090a5ad,1 +np.float64,0x592dfe18b9d6db2f,0x40717f777fbcb1ec,1 +np.float64,0x654e3232ac60d72c,0x4079e71a95a70446,1 +np.float64,0x7b8e42ad22091456,0x4084a9a6f1e61722,1 +np.float64,0x570e88dfd5860ae6,0x407006ae6c0d137a,1 +np.float64,0x294e98346cb98ef1,0xc06f5edaac12bd44,1 +np.float64,0x1adeaa4ab792e642,0xc079b1431d5e2633,1 +np.float64,0x7b6ead3377529ac8,0x40849eabc8c7683c,1 +np.float64,0x2b8eedae8a9b2928,0xc06c400054deef11,1 +np.float64,0x65defb45b2dcf660,0x407a4b53f181c05a,1 +np.float64,0x1baf582d475e7701,0xc07920bcad4a502c,1 +np.float64,0x461f39cf05a0f15a,0x405126368f984fa1,1 +np.float64,0x7e5f6f5dcfff005b,0x4085a37d610439b4,1 +np.float64,0x136f66e4d09bd662,0xc07ed8a2719f2511,1 +np.float64,0x65afd8983fb6ca1f,0x407a2a7f48bf7fc1,1 +np.float64,0x572fa7f95ed22319,0x40701d706cf82e6f,1 diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/data/umath-validation-set-log10.csv b/.venv/lib/python3.12/site-packages/numpy/_core/tests/data/umath-validation-set-log10.csv new file mode 100644 index 0000000..c765777 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/tests/data/umath-validation-set-log10.csv @@ -0,0 +1,1629 @@ +dtype,input,output,ulperrortol +np.float32,0x3f6fd5c8,0xbce80e8e,4 +np.float32,0x3ea4ab17,0xbefc3deb,4 +np.float32,0x3e87a133,0xbf13b0b7,4 +np.float32,0x3f0d9069,0xbe83bb19,4 +np.float32,0x3f7b9269,0xbbf84f47,4 +np.float32,0x3f7a9ffa,0xbc16fd97,4 +np.float32,0x7f535d34,0x4219cb66,4 +np.float32,0x3e79ad7c,0xbf1ce857,4 +np.float32,0x7e8bfd3b,0x4217dfe9,4 +np.float32,0x3f2d2ee9,0xbe2dcec6,4 +np.float32,0x572e04,0xc21862e4,4 +np.float32,0x7f36f8,0xc217bad5,4 +np.float32,0x3f7982fb,0xbc36aaed,4 +np.float32,0x45b019,0xc218c67c,4 +np.float32,0x3f521c46,0xbdafb3e3,4 +np.float32,0x80000001,0x7fc00000,4 +np.float32,0x3f336c81,0xbe1e107f,4 +np.float32,0x3eac92d7,0xbef1d0bb,4 +np.float32,0x47bdfc,0xc218b990,4 +np.float32,0x7f2d94c8,0x421973d1,4 +np.float32,0x7d53ff8d,0x4214fbb6,4 +np.float32,0x3f581e4e,0xbd96a079,4 +np.float32,0x7ddaf20d,0x42163e4e,4 +np.float32,0x3f341d3c,0xbe1c5b4c,4 +np.float32,0x7ef04ba9,0x4218d032,4 +np.float32,0x620ed2,0xc2182e99,4 +np.float32,0x507850,0xc2188682,4 +np.float32,0x7d08f9,0xc217c284,4 +np.float32,0x7f0cf2aa,0x42191734,4 +np.float32,0x3f109a17,0xbe7e04fe,4 +np.float32,0x7f426152,0x4219a625,4 +np.float32,0x7f32d5a3,0x42198113,4 +np.float32,0x2e14b2,0xc2197e6f,4 +np.float32,0x3a5acd,0xc219156a,4 +np.float32,0x50a565,0xc2188589,4 +np.float32,0x5b751c,0xc2184d97,4 +np.float32,0x7e4149f6,0x42173b22,4 +np.float32,0x3dc34bf9,0xbf82a42a,4 +np.float32,0x3d12bc28,0xbfb910d6,4 +np.float32,0x7ebd2584,0x421865c1,4 +np.float32,0x7f6b3375,0x4219faeb,4 +np.float32,0x7fa00000,0x7fe00000,4 +np.float32,0x3f35fe7d,0xbe17bd33,4 +np.float32,0x7db45c87,0x4215e818,4 +np.float32,0x3efff366,0xbe9a2b8d,4 +np.float32,0x3eb331d0,0xbee971a3,4 +np.float32,0x3f259d5f,0xbe41ae2e,4 +np.float32,0x3eab85ec,0xbef32c4a,4 +np.float32,0x7f194b8a,0x42193c8c,4 +np.float32,0x3f11a614,0xbe7acfc7,4 +np.float32,0x5b17,0xc221f16b,4 +np.float32,0x3f33dadc,0xbe1cff4d,4 +np.float32,0x3cda1506,0xbfc9920f,4 +np.float32,0x3f6856f1,0xbd2c8290,4 +np.float32,0x7f3357fb,0x42198257,4 +np.float32,0x7f56f329,0x4219d2e1,4 +np.float32,0x3ef84108,0xbea0f595,4 +np.float32,0x3f72340f,0xbcc51916,4 +np.float32,0x3daf28,0xc218fcbd,4 +np.float32,0x131035,0xc21b06f4,4 +np.float32,0x3f275c3b,0xbe3d0487,4 +np.float32,0x3ef06130,0xbea82069,4 +np.float32,0x3f57f3b0,0xbd974fef,4 +np.float32,0x7f6c4a78,0x4219fcfa,4 +np.float32,0x7e8421d0,0x4217c639,4 +np.float32,0x3f17a479,0xbe68e08e,4 +np.float32,0x7f03774e,0x4218f83b,4 +np.float32,0x441a33,0xc218d0b8,4 +np.float32,0x539158,0xc21875b6,4 +np.float32,0x3e8fcc75,0xbf0d3018,4 +np.float32,0x7ef74130,0x4218dce4,4 +np.float32,0x3ea6f4fa,0xbef92c38,4 +np.float32,0x7f3948ab,0x421990d5,4 +np.float32,0x7db6f8f5,0x4215ee7c,4 +np.float32,0x3ee44a2f,0xbeb399e5,4 +np.float32,0x156c59,0xc21ad30d,4 +np.float32,0x3f21ee53,0xbe4baf16,4 +np.float32,0x3f2c08f4,0xbe30c424,4 +np.float32,0x3f49885c,0xbdd4c6a9,4 +np.float32,0x3eae0b9c,0xbeefed54,4 +np.float32,0x1b5c1f,0xc21a6646,4 +np.float32,0x3e7330e2,0xbf1fd592,4 +np.float32,0x3ebbeb4c,0xbededf82,4 +np.float32,0x427154,0xc218dbb1,4 +np.float32,0x3f6b8b4b,0xbd142498,4 +np.float32,0x8e769,0xc21c5981,4 +np.float32,0x3e9db557,0xbf02ec1c,4 +np.float32,0x3f001bef,0xbe99f019,4 +np.float32,0x3e58b48c,0xbf2ca77a,4 +np.float32,0x3d46c16b,0xbfa8327c,4 +np.float32,0x7eeeb305,0x4218cd3b,4 +np.float32,0x3e3f163d,0xbf3aa446,4 +np.float32,0x3f66c872,0xbd3877d9,4 +np.float32,0x7f7162f8,0x421a0677,4 +np.float32,0x3edca3bc,0xbebb2e28,4 +np.float32,0x3dc1055b,0xbf834afa,4 +np.float32,0x12b16f,0xc21b0fad,4 +np.float32,0x3f733898,0xbcb62e16,4 +np.float32,0x3e617af8,0xbf283db0,4 +np.float32,0x7e86577a,0x4217cd99,4 +np.float32,0x3f0ba3c7,0xbe86c633,4 +np.float32,0x3f4cad25,0xbdc70247,4 +np.float32,0xb6cdf,0xc21bea9f,4 +np.float32,0x3f42971a,0xbdf3f49e,4 +np.float32,0x3e6ccad2,0xbf22cc78,4 +np.float32,0x7f2121b2,0x421952b8,4 +np.float32,0x3f6d3f55,0xbd075366,4 +np.float32,0x3f524f,0xc218f117,4 +np.float32,0x3e95b5d9,0xbf08b56a,4 +np.float32,0x7f6ae47d,0x4219fa56,4 +np.float32,0x267539,0xc219ceda,4 +np.float32,0x3ef72f6d,0xbea1eb2e,4 +np.float32,0x2100b2,0xc21a12e2,4 +np.float32,0x3d9777d1,0xbf90c4e7,4 +np.float32,0x44c6f5,0xc218cc56,4 +np.float32,0x7f2a613d,0x42196b8a,4 +np.float32,0x390a25,0xc2191f8d,4 +np.float32,0x3f1de5ad,0xbe56e703,4 +np.float32,0x2f59ce,0xc2197258,4 +np.float32,0x7f3b12a1,0x4219951b,4 +np.float32,0x3ecb66d4,0xbecd44ca,4 +np.float32,0x7e74ff,0xc217bd7d,4 +np.float32,0x7ed83f78,0x4218a14d,4 +np.float32,0x685994,0xc21812f1,4 +np.float32,0xbf800000,0x7fc00000,4 +np.float32,0x736f47,0xc217e60b,4 +np.float32,0x7f09c371,0x42190d0a,4 +np.float32,0x3f7ca51d,0xbbbbbce0,4 +np.float32,0x7f4b4d3b,0x4219ba1a,4 +np.float32,0x3f6c4471,0xbd0eb076,4 +np.float32,0xd944e,0xc21b9dcf,4 +np.float32,0x7cb06ffc,0x421375cd,4 +np.float32,0x586187,0xc2185cce,4 +np.float32,0x3f3cbf5b,0xbe078911,4 +np.float32,0x3f30b504,0xbe24d983,4 +np.float32,0x3f0a16ba,0xbe8941fd,4 +np.float32,0x5c43b0,0xc21849af,4 +np.float32,0x3dad74f6,0xbf893bd5,4 +np.float32,0x3c586958,0xbff087a6,4 +np.float32,0x3e8307a8,0xbf1786ba,4 +np.float32,0x7dcd1776,0x4216213d,4 +np.float32,0x3f44d107,0xbde9d662,4 +np.float32,0x3e2e6823,0xbf44cbec,4 +np.float32,0x3d87ea27,0xbf96caca,4 +np.float32,0x3e0c715b,0xbf5ce07e,4 +np.float32,0x7ec9cd5a,0x4218828e,4 +np.float32,0x3e26c0b4,0xbf49c93e,4 +np.float32,0x75b94e,0xc217dd50,4 +np.float32,0x3df7b9f5,0xbf6ad7f4,4 +np.float32,0x0,0xff800000,4 +np.float32,0x3f284795,0xbe3a94da,4 +np.float32,0x7ee49092,0x4218b9f0,4 +np.float32,0x7f4c20e0,0x4219bbe8,4 +np.float32,0x3efbbce8,0xbe9ddc4b,4 +np.float32,0x12274a,0xc21b1cb4,4 +np.float32,0x5fa1b1,0xc21839be,4 +np.float32,0x7f0b210e,0x4219116d,4 +np.float32,0x3f67092a,0xbd368545,4 +np.float32,0x3d572721,0xbfa3ca5b,4 +np.float32,0x3f7913ce,0xbc431028,4 +np.float32,0x3b0613,0xc2191059,4 +np.float32,0x3e1d16c0,0xbf506c6f,4 +np.float32,0xab130,0xc21c081d,4 +np.float32,0x3e23ac97,0xbf4bdb9d,4 +np.float32,0x7ef52368,0x4218d911,4 +np.float32,0x7f38e686,0x42198fe9,4 +np.float32,0x3f106a21,0xbe7e9897,4 +np.float32,0x3ecef8d5,0xbec96644,4 +np.float32,0x3ec37e02,0xbed61683,4 +np.float32,0x3efbd063,0xbe9dcb17,4 +np.float32,0x3f318fe3,0xbe22b402,4 +np.float32,0x7e5e5228,0x4217795d,4 +np.float32,0x72a046,0xc217e92c,4 +np.float32,0x7f6f970b,0x421a0324,4 +np.float32,0x3ed871b4,0xbebf72fb,4 +np.float32,0x7a2eaa,0xc217ccc8,4 +np.float32,0x3e819655,0xbf18c1d7,4 +np.float32,0x80800000,0x7fc00000,4 +np.float32,0x7eab0719,0x421838f9,4 +np.float32,0x7f0763cb,0x4219054f,4 +np.float32,0x3f191672,0xbe64a8af,4 +np.float32,0x7d4327,0xc217c1b6,4 +np.float32,0x3f724ba6,0xbcc3bea3,4 +np.float32,0x60fe06,0xc2183375,4 +np.float32,0x48cd59,0xc218b30b,4 +np.float32,0x3f7fec2b,0xb909d3f3,4 +np.float32,0x1c7bb9,0xc21a5460,4 +np.float32,0x24d8a8,0xc219e1e4,4 +np.float32,0x3e727c52,0xbf20283c,4 +np.float32,0x4bc460,0xc218a14a,4 +np.float32,0x63e313,0xc2182661,4 +np.float32,0x7f625581,0x4219e9d4,4 +np.float32,0x3eeb3e77,0xbeacedc0,4 +np.float32,0x7ef27a47,0x4218d437,4 +np.float32,0x27105a,0xc219c7e6,4 +np.float32,0x22a10b,0xc219fd7d,4 +np.float32,0x3f41e907,0xbdf711ab,4 +np.float32,0x7c1fbf95,0x4212155b,4 +np.float32,0x7e5acceb,0x42177244,4 +np.float32,0x3e0892fa,0xbf5ffb83,4 +np.float32,0x3ea0e51d,0xbf00b2c0,4 +np.float32,0x3e56fc29,0xbf2d8a51,4 +np.float32,0x7ee724ed,0x4218beed,4 +np.float32,0x7ebf142b,0x42186a46,4 +np.float32,0x7f6cf35c,0x4219fe37,4 +np.float32,0x3f11abf7,0xbe7abdcd,4 +np.float32,0x588d7a,0xc2185bf1,4 +np.float32,0x3f6e81d2,0xbcfbcf97,4 +np.float32,0x3f1b6be8,0xbe5dee2b,4 +np.float32,0x7f3815e0,0x42198df2,4 +np.float32,0x3f5bfc88,0xbd86d93d,4 +np.float32,0x3f3775d0,0xbe142bbc,4 +np.float32,0x78a958,0xc217d25a,4 +np.float32,0x2ff7c3,0xc2196c96,4 +np.float32,0x4b9c0,0xc21d733c,4 +np.float32,0x3ec025af,0xbed9ecf3,4 +np.float32,0x6443f0,0xc21824b3,4 +np.float32,0x3f754e28,0xbc97d299,4 +np.float32,0x3eaa91d3,0xbef4699d,4 +np.float32,0x3e5f2837,0xbf296478,4 +np.float32,0xe5676,0xc21b85a4,4 +np.float32,0x3f6859f2,0xbd2c6b90,4 +np.float32,0x3f68686b,0xbd2bfcc6,4 +np.float32,0x4b39b8,0xc218a47b,4 +np.float32,0x630ac4,0xc2182a28,4 +np.float32,0x160980,0xc21ac67d,4 +np.float32,0x3ed91c4d,0xbebec3fd,4 +np.float32,0x7ec27b0d,0x4218721f,4 +np.float32,0x3f3c0a5f,0xbe09344b,4 +np.float32,0x3dbff9c1,0xbf839841,4 +np.float32,0x7f0e8ea7,0x42191c40,4 +np.float32,0x3f36b162,0xbe1608e4,4 +np.float32,0x228bb3,0xc219fe90,4 +np.float32,0x2fdd30,0xc2196d8c,4 +np.float32,0x3e8fce8e,0xbf0d2e79,4 +np.float32,0x3f36acc7,0xbe16141a,4 +np.float32,0x7f44b51c,0x4219ab70,4 +np.float32,0x3ec3371c,0xbed66736,4 +np.float32,0x4388a2,0xc218d473,4 +np.float32,0x3f5aa6c3,0xbd8c4344,4 +np.float32,0x7f09fce4,0x42190dc3,4 +np.float32,0x7ed7854a,0x42189fce,4 +np.float32,0x7f4da83a,0x4219bf3a,4 +np.float32,0x3db8da28,0xbf85b25a,4 +np.float32,0x7f449686,0x4219ab2b,4 +np.float32,0x2eb25,0xc21e498c,4 +np.float32,0x3f2bcc08,0xbe3161bd,4 +np.float32,0x36c923,0xc219317b,4 +np.float32,0x3d52a866,0xbfa4f6d2,4 +np.float32,0x3f7d6688,0xbb913e4e,4 +np.float32,0x3f5a6ba4,0xbd8d33e3,4 +np.float32,0x719740,0xc217ed35,4 +np.float32,0x78a472,0xc217d26c,4 +np.float32,0x7ee33d0c,0x4218b759,4 +np.float32,0x7f668c1d,0x4219f208,4 +np.float32,0x3e29c600,0xbf47ca46,4 +np.float32,0x3f3cefc3,0xbe071712,4 +np.float32,0x3e224ebd,0xbf4cca41,4 +np.float32,0x7f1417be,0x42192d31,4 +np.float32,0x7f29d7d5,0x42196a23,4 +np.float32,0x3338ce,0xc2194f65,4 +np.float32,0x2a7897,0xc219a2b6,4 +np.float32,0x3d6bc3d8,0xbf9eb468,4 +np.float32,0x3f6bd7bf,0xbd11e392,4 +np.float32,0x7f6d26bf,0x4219fe98,4 +np.float32,0x3f52d378,0xbdacadb5,4 +np.float32,0x3efac453,0xbe9eb84a,4 +np.float32,0x3f692eb7,0xbd261184,4 +np.float32,0x3f6a0bb5,0xbd1f7ec1,4 +np.float32,0x3f037a49,0xbe942aa8,4 +np.float32,0x3f465bd4,0xbde2e530,4 +np.float32,0x7ef0f47b,0x4218d16a,4 +np.float32,0x637127,0xc218285e,4 +np.float32,0x3f41e511,0xbdf723d7,4 +np.float32,0x7f800000,0x7f800000,4 +np.float32,0x3f3342d5,0xbe1e77d5,4 +np.float32,0x7f57cfe6,0x4219d4a9,4 +np.float32,0x3e4358ed,0xbf3830a7,4 +np.float32,0x3ce25f15,0xbfc77f2b,4 +np.float32,0x7ed057e7,0x421890be,4 +np.float32,0x7ce154d9,0x4213e295,4 +np.float32,0x3ee91984,0xbeaef703,4 +np.float32,0x7e4e919c,0x421758af,4 +np.float32,0x6830e7,0xc218139e,4 +np.float32,0x3f12f08e,0xbe76e328,4 +np.float32,0x7f0a7a32,0x42190f56,4 +np.float32,0x7f38e,0xc21c8bd3,4 +np.float32,0x3e01def9,0xbf6593e3,4 +np.float32,0x3f5c8c6d,0xbd849432,4 +np.float32,0x3eed8747,0xbeaac7a3,4 +np.float32,0x3cadaa0e,0xbfd63b21,4 +np.float32,0x3f7532a9,0xbc996178,4 +np.float32,0x31f3ac,0xc2195a8f,4 +np.float32,0x3f0e0f97,0xbe82f3af,4 +np.float32,0x3f2a1f35,0xbe35bd3f,4 +np.float32,0x3f4547b2,0xbde7bebd,4 +np.float32,0x3f7988a6,0xbc36094c,4 +np.float32,0x74464c,0xc217e2d2,4 +np.float32,0x7f7518be,0x421a0d3f,4 +np.float32,0x7e97fa0a,0x42180473,4 +np.float32,0x584e3a,0xc2185d2f,4 +np.float32,0x3e7291f3,0xbf201e52,4 +np.float32,0xc0a05,0xc21bd359,4 +np.float32,0x3a3177,0xc21916a6,4 +np.float32,0x4f417f,0xc2188d45,4 +np.float32,0x263fce,0xc219d145,4 +np.float32,0x7e1d58,0xc217beb1,4 +np.float32,0x7f056af3,0x4218fec9,4 +np.float32,0x3f21c181,0xbe4c2a3f,4 +np.float32,0x7eca4956,0x4218839f,4 +np.float32,0x3e58afa8,0xbf2ca9fd,4 +np.float32,0x3f40d583,0xbdfc04ef,4 +np.float32,0x7f432fbb,0x4219a7fc,4 +np.float32,0x43aaa4,0xc218d393,4 +np.float32,0x7f2c9b62,0x42197150,4 +np.float32,0x5c3876,0xc21849e5,4 +np.float32,0x7f2034e8,0x42195029,4 +np.float32,0x7e5be772,0x42177481,4 +np.float32,0x80000000,0xff800000,4 +np.float32,0x3f5be03b,0xbd874bb0,4 +np.float32,0x3e32494f,0xbf4259be,4 +np.float32,0x3e1f4671,0xbf4ee30b,4 +np.float32,0x4606cc,0xc218c454,4 +np.float32,0x425cbc,0xc218dc3b,4 +np.float32,0x7dd9b8bf,0x42163bd0,4 +np.float32,0x3f0465d0,0xbe929db7,4 +np.float32,0x3f735077,0xbcb4d0fa,4 +np.float32,0x4d6a43,0xc21897b8,4 +np.float32,0x3e27d600,0xbf4910f5,4 +np.float32,0x3f06e0cc,0xbe8e7d24,4 +np.float32,0x3f3fd064,0xbe005e45,4 +np.float32,0x176f1,0xc21f7c2d,4 +np.float32,0x3eb64e6f,0xbee59d9c,4 +np.float32,0x7f0f075d,0x42191db8,4 +np.float32,0x3f718cbe,0xbcceb621,4 +np.float32,0x3ead7bda,0xbef0a54a,4 +np.float32,0x7f77c1a8,0x421a120c,4 +np.float32,0x3f6a79c5,0xbd1c3afd,4 +np.float32,0x3e992d1f,0xbf062a02,4 +np.float32,0x3e6f6335,0xbf219639,4 +np.float32,0x7f6d9a3e,0x4219ff70,4 +np.float32,0x557ed1,0xc2186b91,4 +np.float32,0x3f13a456,0xbe74c457,4 +np.float32,0x15c2dc,0xc21acc17,4 +np.float32,0x71f36f,0xc217ebcc,4 +np.float32,0x748dea,0xc217e1c1,4 +np.float32,0x7f0f32e0,0x42191e3f,4 +np.float32,0x5b1da8,0xc2184f41,4 +np.float32,0x3d865d3a,0xbf976e11,4 +np.float32,0x3f800000,0x0,4 +np.float32,0x7f67b56d,0x4219f444,4 +np.float32,0x6266a1,0xc2182d0c,4 +np.float32,0x3ec9c5e4,0xbecf0e6b,4 +np.float32,0x6a6a0e,0xc2180a3b,4 +np.float32,0x7e9db6fd,0x421814ef,4 +np.float32,0x3e7458f7,0xbf1f4e88,4 +np.float32,0x3ead8016,0xbef09fdc,4 +np.float32,0x3e263d1c,0xbf4a211e,4 +np.float32,0x7f6b3329,0x4219faeb,4 +np.float32,0x800000,0xc217b818,4 +np.float32,0x3f0654c7,0xbe8f6471,4 +np.float32,0x3f281b71,0xbe3b0990,4 +np.float32,0x7c4c8e,0xc217c524,4 +np.float32,0x7d113a87,0x4214537d,4 +np.float32,0x734b5f,0xc217e696,4 +np.float32,0x7f079d05,0x4219060b,4 +np.float32,0x3ee830b1,0xbeafd58b,4 +np.float32,0x3f1c3b8b,0xbe5b9d96,4 +np.float32,0x3f2bf0c6,0xbe3102aa,4 +np.float32,0x7ddffe22,0x42164871,4 +np.float32,0x3f1e58b4,0xbe55a37f,4 +np.float32,0x5f3edf,0xc2183b8a,4 +np.float32,0x7f1fb6ec,0x42194eca,4 +np.float32,0x3f78718e,0xbc55311e,4 +np.float32,0x3e574b7d,0xbf2d6152,4 +np.float32,0x7eab27c6,0x4218394e,4 +np.float32,0x7f34603c,0x421984e5,4 +np.float32,0x3f3a8b57,0xbe0cc1ca,4 +np.float32,0x3f744181,0xbca7134e,4 +np.float32,0x3f7e3bc4,0xbb45156b,4 +np.float32,0x93ab4,0xc21c498b,4 +np.float32,0x7ed5541e,0x42189b42,4 +np.float32,0x6bf8ec,0xc21803c4,4 +np.float32,0x757395,0xc217de58,4 +np.float32,0x7f177214,0x42193726,4 +np.float32,0x59935f,0xc21856d6,4 +np.float32,0x2cd9ba,0xc2198a78,4 +np.float32,0x3ef6fd5c,0xbea2183c,4 +np.float32,0x3ebb6c63,0xbedf75e0,4 +np.float32,0x7f43272c,0x4219a7e9,4 +np.float32,0x7f42e67d,0x4219a755,4 +np.float32,0x3f3f744f,0xbe0133f6,4 +np.float32,0x7f5fddaa,0x4219e4f4,4 +np.float32,0x3dc9874f,0xbf80e529,4 +np.float32,0x3f2efe64,0xbe292ec8,4 +np.float32,0x3e0406a6,0xbf63bf7c,4 +np.float32,0x3cdbb0aa,0xbfc92984,4 +np.float32,0x3e6597e7,0xbf263b30,4 +np.float32,0x3f0c1153,0xbe861807,4 +np.float32,0x7fce16,0xc217b8c6,4 +np.float32,0x3f5f4e5f,0xbd730dc6,4 +np.float32,0x3ed41ffa,0xbec3ee69,4 +np.float32,0x3f216c78,0xbe4d1446,4 +np.float32,0x3f123ed7,0xbe78fe4b,4 +np.float32,0x7f7e0ca9,0x421a1d34,4 +np.float32,0x7e318af4,0x42171558,4 +np.float32,0x7f1e1659,0x42194a3d,4 +np.float32,0x34d12a,0xc21941c2,4 +np.float32,0x3d9566ad,0xbf918870,4 +np.float32,0x3e799a47,0xbf1cf0e5,4 +np.float32,0x3e89dd6f,0xbf11df76,4 +np.float32,0x32f0d3,0xc21951d8,4 +np.float32,0x7e89d17e,0x4217d8f6,4 +np.float32,0x1f3b38,0xc21a2b6b,4 +np.float32,0x7ee9e060,0x4218c427,4 +np.float32,0x31a673,0xc2195d41,4 +np.float32,0x5180f1,0xc21880d5,4 +np.float32,0x3cd36f,0xc21902f8,4 +np.float32,0x3bb63004,0xc01050cb,4 +np.float32,0x3e8ee9d1,0xbf0ddfde,4 +np.float32,0x3d2a7da3,0xbfb0b970,4 +np.float32,0x3ea58107,0xbefb1dc3,4 +np.float32,0x7f6760b0,0x4219f3a2,4 +np.float32,0x7f7f9e08,0x421a1ff0,4 +np.float32,0x37e7f1,0xc219287b,4 +np.float32,0x3ef7eb53,0xbea14267,4 +np.float32,0x3e2eb581,0xbf449aa5,4 +np.float32,0x3da7671c,0xbf8b3568,4 +np.float32,0x7af36f7b,0x420f33ee,4 +np.float32,0x3eb3602c,0xbee93823,4 +np.float32,0x3f68bcff,0xbd2975de,4 +np.float32,0x3ea7cefb,0xbef80a9d,4 +np.float32,0x3f329689,0xbe202414,4 +np.float32,0x7f0c7c80,0x421915be,4 +np.float32,0x7f4739b8,0x4219b118,4 +np.float32,0x73af58,0xc217e515,4 +np.float32,0x7f13eb2a,0x42192cab,4 +np.float32,0x30f2d9,0xc2196395,4 +np.float32,0x7ea7066c,0x42182e71,4 +np.float32,0x669fec,0xc2181a5b,4 +np.float32,0x3f7d6876,0xbb90d1ef,4 +np.float32,0x3f08a4ef,0xbe8b9897,4 +np.float32,0x7f2a906c,0x42196c05,4 +np.float32,0x3ed3ca42,0xbec44856,4 +np.float32,0x9d27,0xc220fee2,4 +np.float32,0x3e4508a1,0xbf373c03,4 +np.float32,0x3e41f8de,0xbf38f9bb,4 +np.float32,0x3e912714,0xbf0c255b,4 +np.float32,0xff800000,0x7fc00000,4 +np.float32,0x7eefd13d,0x4218cf4f,4 +np.float32,0x3f491674,0xbdd6bded,4 +np.float32,0x3ef49512,0xbea445c9,4 +np.float32,0x3f045b79,0xbe92af15,4 +np.float32,0x3ef6c412,0xbea24bd5,4 +np.float32,0x3e6f3c28,0xbf21a85d,4 +np.float32,0x3ef71839,0xbea2000e,4 +np.float32,0x1,0xc23369f4,4 +np.float32,0x3e3fcfe4,0xbf3a3876,4 +np.float32,0x3e9d7a65,0xbf0315b2,4 +np.float32,0x20b7c4,0xc21a16bd,4 +np.float32,0x7f707b10,0x421a04cb,4 +np.float32,0x7fc00000,0x7fc00000,4 +np.float32,0x3f285ebd,0xbe3a57ac,4 +np.float32,0x74c9ea,0xc217e0dc,4 +np.float32,0x3f6501f2,0xbd4634ab,4 +np.float32,0x3f248959,0xbe4495cc,4 +np.float32,0x7e915ff0,0x4217f0b3,4 +np.float32,0x7edbb910,0x4218a864,4 +np.float32,0x3f7042dd,0xbce1bddb,4 +np.float32,0x6f08c9,0xc217f754,4 +np.float32,0x7f423993,0x4219a5ca,4 +np.float32,0x3f125704,0xbe78b4cd,4 +np.float32,0x7ef7f5ae,0x4218de28,4 +np.float32,0x3f2dd940,0xbe2c1a33,4 +np.float32,0x3f1ca78e,0xbe5a6a8b,4 +np.float32,0x244863,0xc219e8be,4 +np.float32,0x3f2614fe,0xbe406d6b,4 +np.float32,0x3e75e7a3,0xbf1e99b5,4 +np.float32,0x2bdd6e,0xc2199459,4 +np.float32,0x7e49e279,0x42174e7b,4 +np.float32,0x3e3bb09a,0xbf3ca2cd,4 +np.float32,0x649f06,0xc2182320,4 +np.float32,0x7f4a44e1,0x4219b7d6,4 +np.float32,0x400473,0xc218ec3a,4 +np.float32,0x3edb19ad,0xbebcbcad,4 +np.float32,0x3d8ee956,0xbf94006c,4 +np.float32,0x7e91c603,0x4217f1eb,4 +np.float32,0x221384,0xc21a04a6,4 +np.float32,0x7f7dd660,0x421a1cd5,4 +np.float32,0x7ef34609,0x4218d5ac,4 +np.float32,0x7f5ed529,0x4219e2e5,4 +np.float32,0x7f1bf685,0x42194438,4 +np.float32,0x3cdd094a,0xbfc8d294,4 +np.float32,0x7e87fc8e,0x4217d303,4 +np.float32,0x7f53d971,0x4219cc6b,4 +np.float32,0xabc8b,0xc21c0646,4 +np.float32,0x7f5011e6,0x4219c46a,4 +np.float32,0x7e460638,0x421745e5,4 +np.float32,0xa8126,0xc21c0ffd,4 +np.float32,0x3eec2a66,0xbeac0f2d,4 +np.float32,0x3f3a1213,0xbe0de340,4 +np.float32,0x7f5908db,0x4219d72c,4 +np.float32,0x7e0ad3c5,0x4216a7f3,4 +np.float32,0x3f2de40e,0xbe2bfe90,4 +np.float32,0x3d0463c5,0xbfbec8e4,4 +np.float32,0x7c7cde0b,0x4212e19a,4 +np.float32,0x74c24f,0xc217e0f9,4 +np.float32,0x3f14b4cb,0xbe71929b,4 +np.float32,0x3e94e192,0xbf09537f,4 +np.float32,0x3eebde71,0xbeac56bd,4 +np.float32,0x3f65e413,0xbd3f5b8a,4 +np.float32,0x7e109199,0x4216b9f9,4 +np.float32,0x3f22f5d0,0xbe48ddc0,4 +np.float32,0x3e22d3bc,0xbf4c6f4d,4 +np.float32,0x3f7a812f,0xbc1a680b,4 +np.float32,0x3f67f361,0xbd2f7d7c,4 +np.float32,0x3f1caa63,0xbe5a6281,4 +np.float32,0x3f306fde,0xbe2587ab,4 +np.float32,0x3e8df9d3,0xbf0e9b2f,4 +np.float32,0x3eaaccc4,0xbef41cd4,4 +np.float32,0x7f3f65ec,0x42199f45,4 +np.float32,0x3dc706e0,0xbf8196ec,4 +np.float32,0x3e14eaba,0xbf565cf6,4 +np.float32,0xcc60,0xc2208a09,4 +np.float32,0x358447,0xc2193be7,4 +np.float32,0x3dcecade,0xbf7eec70,4 +np.float32,0x3f20b4f8,0xbe4f0ef0,4 +np.float32,0x7e7c979f,0x4217b222,4 +np.float32,0x7f2387b9,0x4219594a,4 +np.float32,0x3f6f6e5c,0xbcee0e05,4 +np.float32,0x7f19ad81,0x42193da8,4 +np.float32,0x5635e1,0xc21867dd,4 +np.float32,0x4c5e97,0xc2189dc4,4 +np.float32,0x7f35f97f,0x421988d1,4 +np.float32,0x7f685224,0x4219f571,4 +np.float32,0x3eca0616,0xbecec7b8,4 +np.float32,0x3f436d0d,0xbdf024ca,4 +np.float32,0x12a97d,0xc21b106a,4 +np.float32,0x7f0fdc93,0x4219204d,4 +np.float32,0x3debfb42,0xbf703e65,4 +np.float32,0x3c6c54d2,0xbfeba291,4 +np.float32,0x7e5d7491,0x421777a1,4 +np.float32,0x3f4bd2f0,0xbdcab87d,4 +np.float32,0x3f7517f4,0xbc9ae510,4 +np.float32,0x3f71a59a,0xbccd480d,4 +np.float32,0x3f514653,0xbdb33f61,4 +np.float32,0x3f4e6ea4,0xbdbf694b,4 +np.float32,0x3eadadec,0xbef06526,4 +np.float32,0x3f3b41c1,0xbe0b0fbf,4 +np.float32,0xc35a,0xc2209e1e,4 +np.float32,0x384982,0xc2192575,4 +np.float32,0x3464c3,0xc2194556,4 +np.float32,0x7f5e20d9,0x4219e17d,4 +np.float32,0x3ea18b62,0xbf004016,4 +np.float32,0x63a02b,0xc218278c,4 +np.float32,0x7ef547ba,0x4218d953,4 +np.float32,0x3f2496fb,0xbe4470f4,4 +np.float32,0x7ea0c8c6,0x42181d81,4 +np.float32,0x3f42ba60,0xbdf35372,4 +np.float32,0x7e40d9,0xc217be34,4 +np.float32,0x3e95883b,0xbf08d750,4 +np.float32,0x3e0cddf3,0xbf5c8aa8,4 +np.float32,0x3f2305d5,0xbe48b20a,4 +np.float32,0x7f0d0941,0x4219177b,4 +np.float32,0x3f7b98d3,0xbbf6e477,4 +np.float32,0x3f687cdc,0xbd2b6057,4 +np.float32,0x3f42ce91,0xbdf2f73d,4 +np.float32,0x3ee00fc0,0xbeb7c217,4 +np.float32,0x7f3d483a,0x42199a53,4 +np.float32,0x3e1e08eb,0xbf4fc18d,4 +np.float32,0x7e202ff5,0x4216e798,4 +np.float32,0x582898,0xc2185ded,4 +np.float32,0x3e3552b1,0xbf40790c,4 +np.float32,0x3d3f7c87,0xbfaa44b6,4 +np.float32,0x669d8e,0xc2181a65,4 +np.float32,0x3f0e21b4,0xbe82d757,4 +np.float32,0x686f95,0xc2181293,4 +np.float32,0x3f48367f,0xbdda9ead,4 +np.float32,0x3dc27802,0xbf82e0a0,4 +np.float32,0x3f6ac40c,0xbd1a07d4,4 +np.float32,0x3bba6d,0xc2190b12,4 +np.float32,0x3ec7b6b0,0xbed15665,4 +np.float32,0x3f1f9ca4,0xbe521955,4 +np.float32,0x3ef2f147,0xbea5c4b8,4 +np.float32,0x7c65f769,0x4212b762,4 +np.float32,0x7e98e162,0x42180716,4 +np.float32,0x3f0f0c09,0xbe8169ea,4 +np.float32,0x3d67f03b,0xbf9f9d48,4 +np.float32,0x7f3751e4,0x42198c18,4 +np.float32,0x7f1fac61,0x42194ead,4 +np.float32,0x3e9b698b,0xbf048d89,4 +np.float32,0x7e66507b,0x42178913,4 +np.float32,0x7f5cb680,0x4219dea5,4 +np.float32,0x234700,0xc219f53e,4 +np.float32,0x3d9984ad,0xbf900591,4 +np.float32,0x3f33a3f2,0xbe1d872a,4 +np.float32,0x3eaf52b6,0xbeee4cf4,4 +np.float32,0x7f078930,0x421905ca,4 +np.float32,0x3f083b39,0xbe8c44df,4 +np.float32,0x3e3823f8,0xbf3ec231,4 +np.float32,0x3eef6f5d,0xbea9008c,4 +np.float32,0x6145e1,0xc218322c,4 +np.float32,0x16d9ae,0xc21ab65f,4 +np.float32,0x7e543376,0x421764a5,4 +np.float32,0x3ef77ccb,0xbea1a5a0,4 +np.float32,0x3f4a443f,0xbdd18af5,4 +np.float32,0x8f209,0xc21c5770,4 +np.float32,0x3ecac126,0xbecdfa33,4 +np.float32,0x3e8662f9,0xbf14b6c7,4 +np.float32,0x23759a,0xc219f2f4,4 +np.float32,0xf256d,0xc21b6d3f,4 +np.float32,0x3f579f93,0xbd98aaa2,4 +np.float32,0x3ed4cc8e,0xbec339cb,4 +np.float32,0x3ed25400,0xbec5d2a1,4 +np.float32,0x3ed6f8ba,0xbec0f795,4 +np.float32,0x7f36efd9,0x42198b2a,4 +np.float32,0x7f5169dd,0x4219c746,4 +np.float32,0x7de18a20,0x42164b80,4 +np.float32,0x3e8de526,0xbf0eab61,4 +np.float32,0x3de0cbcd,0xbf75a47e,4 +np.float32,0xe265f,0xc21b8b82,4 +np.float32,0x3df3cdbd,0xbf6c9e40,4 +np.float32,0x3f38a25a,0xbe115589,4 +np.float32,0x7f01f2c0,0x4218f311,4 +np.float32,0x3da7d5f4,0xbf8b10a5,4 +np.float32,0x4d4fe8,0xc2189850,4 +np.float32,0x3cc96d9d,0xbfcdfc8d,4 +np.float32,0x259a88,0xc219d8d7,4 +np.float32,0x7f1d5102,0x42194810,4 +np.float32,0x7e17ca91,0x4216cfa7,4 +np.float32,0x3f73d110,0xbcad7a8f,4 +np.float32,0x3f009383,0xbe9920ed,4 +np.float32,0x7e22af,0xc217be9f,4 +np.float32,0x3f7de2ce,0xbb6c0394,4 +np.float32,0x3edd0cd2,0xbebac45a,4 +np.float32,0x3ec9b5c1,0xbecf2035,4 +np.float32,0x3168c5,0xc2195f6b,4 +np.float32,0x3e935522,0xbf0a7d18,4 +np.float32,0x3e494077,0xbf34e120,4 +np.float32,0x3f52ed06,0xbdac41ec,4 +np.float32,0x3f73d51e,0xbcad3f65,4 +np.float32,0x3f03d453,0xbe939295,4 +np.float32,0x7ef4ee68,0x4218d8b1,4 +np.float32,0x3ed0e2,0xc218f4a7,4 +np.float32,0x4efab8,0xc2188ed3,4 +np.float32,0x3dbd5632,0xbf845d3b,4 +np.float32,0x7eecad4f,0x4218c972,4 +np.float32,0x9d636,0xc21c2d32,4 +np.float32,0x3e5f3b6b,0xbf295ae7,4 +np.float32,0x7f4932df,0x4219b57a,4 +np.float32,0x4b59b5,0xc218a3be,4 +np.float32,0x3e5de97f,0xbf2a03b4,4 +np.float32,0x3f1c479d,0xbe5b7b3c,4 +np.float32,0x3f42e7e4,0xbdf283a5,4 +np.float32,0x2445,0xc2238af2,4 +np.float32,0x7aa71b43,0x420e8c9e,4 +np.float32,0x3ede6e4e,0xbeb961e1,4 +np.float32,0x7f05dd3b,0x42190045,4 +np.float32,0x3ef5b55c,0xbea3404b,4 +np.float32,0x7f738624,0x421a0a62,4 +np.float32,0x3e7d50a1,0xbf1b4cb4,4 +np.float32,0x3f44cc4a,0xbde9ebcc,4 +np.float32,0x7e1a7b0b,0x4216d777,4 +np.float32,0x3f1d9868,0xbe57c0da,4 +np.float32,0x1ebee2,0xc21a3263,4 +np.float32,0x31685f,0xc2195f6e,4 +np.float32,0x368a8e,0xc2193379,4 +np.float32,0xa9847,0xc21c0c2e,4 +np.float32,0x3bd3b3,0xc2190a56,4 +np.float32,0x3961e4,0xc2191ce3,4 +np.float32,0x7e13a243,0x4216c34e,4 +np.float32,0x7f7b1790,0x421a17ff,4 +np.float32,0x3e55f020,0xbf2e1545,4 +np.float32,0x3f513861,0xbdb37aa8,4 +np.float32,0x3dd9e754,0xbf791ad2,4 +np.float32,0x5e8d86,0xc2183ec9,4 +np.float32,0x26b796,0xc219cbdd,4 +np.float32,0x429daa,0xc218da89,4 +np.float32,0x3f477caa,0xbdddd9ba,4 +np.float32,0x3f0e5114,0xbe828d45,4 +np.float32,0x3f54f362,0xbda3c286,4 +np.float32,0x6eac1c,0xc217f8c8,4 +np.float32,0x3f04c479,0xbe91fef5,4 +np.float32,0x3e993765,0xbf06228e,4 +np.float32,0x3eafd99f,0xbeeda21b,4 +np.float32,0x3f2a759e,0xbe34db96,4 +np.float32,0x3f05adfb,0xbe907937,4 +np.float32,0x3f6e2dfc,0xbd005980,4 +np.float32,0x3f2f2daa,0xbe28b6b5,4 +np.float32,0x15e746,0xc21ac931,4 +np.float32,0x7d34ca26,0x4214b4e5,4 +np.float32,0x7ebd175c,0x4218659f,4 +np.float32,0x7f1ed26b,0x42194c4c,4 +np.float32,0x2588b,0xc21eaab0,4 +np.float32,0x3f0065e3,0xbe996fe2,4 +np.float32,0x3f610376,0xbd658122,4 +np.float32,0x451995,0xc218ca41,4 +np.float32,0x70e083,0xc217f002,4 +np.float32,0x7e19821a,0x4216d4a8,4 +np.float32,0x3e7cd9a0,0xbf1b80fb,4 +np.float32,0x7f1a8f18,0x42194033,4 +np.float32,0x3f008fee,0xbe99271f,4 +np.float32,0xff7fffff,0x7fc00000,4 +np.float32,0x7f31d826,0x42197e9b,4 +np.float32,0x3f18cf12,0xbe657838,4 +np.float32,0x3e5c1bc7,0xbf2aebf9,4 +np.float32,0x3e3d3993,0xbf3bbaf8,4 +np.float32,0x68457a,0xc2181347,4 +np.float32,0x7ddf7561,0x42164761,4 +np.float32,0x7f47341b,0x4219b10c,4 +np.float32,0x4d3ecd,0xc21898b2,4 +np.float32,0x7f43dee8,0x4219a98b,4 +np.float32,0x3f0def7c,0xbe8325f5,4 +np.float32,0x3d5a551f,0xbfa2f994,4 +np.float32,0x7ed26602,0x4218951b,4 +np.float32,0x3ee7fa5b,0xbeb0099a,4 +np.float32,0x7ef74ea8,0x4218dcfc,4 +np.float32,0x6a3bb2,0xc2180afd,4 +np.float32,0x7f4c1e6e,0x4219bbe3,4 +np.float32,0x3e26f625,0xbf49a5a2,4 +np.float32,0xb8482,0xc21be70b,4 +np.float32,0x3f32f077,0xbe1f445b,4 +np.float32,0x7dd694b6,0x4216355a,4 +np.float32,0x7f3d62fd,0x42199a92,4 +np.float32,0x3f48e41a,0xbdd79cbf,4 +np.float32,0x338fc3,0xc2194c75,4 +np.float32,0x3e8355f0,0xbf174462,4 +np.float32,0x7f487e83,0x4219b3eb,4 +np.float32,0x2227f7,0xc21a039b,4 +np.float32,0x7e4383dd,0x4217403a,4 +np.float32,0x52d28b,0xc21879b2,4 +np.float32,0x12472c,0xc21b19a9,4 +np.float32,0x353530,0xc2193e7b,4 +np.float32,0x3f4e4728,0xbdc0137a,4 +np.float32,0x3bf169,0xc2190979,4 +np.float32,0x3eb3ee2e,0xbee8885f,4 +np.float32,0x3f03e3c0,0xbe937892,4 +np.float32,0x3c9f8408,0xbfdaf47f,4 +np.float32,0x40e792,0xc218e61b,4 +np.float32,0x5a6b29,0xc21852ab,4 +np.float32,0x7f268b83,0x4219616a,4 +np.float32,0x3ee25997,0xbeb57fa7,4 +np.float32,0x3f175324,0xbe69cf53,4 +np.float32,0x3f781d91,0xbc5e9827,4 +np.float32,0x7dba5210,0x4215f68c,4 +np.float32,0x7f1e66,0xc217bb2b,4 +np.float32,0x7f7fffff,0x421a209b,4 +np.float32,0x3f646202,0xbd4b10b8,4 +np.float32,0x575248,0xc218622b,4 +np.float32,0x7c67faa1,0x4212bb42,4 +np.float32,0x7f1683f2,0x42193469,4 +np.float32,0x1a3864,0xc21a7931,4 +np.float32,0x7f30ad75,0x42197bae,4 +np.float32,0x7f1c9d05,0x42194612,4 +np.float32,0x3e791795,0xbf1d2b2c,4 +np.float32,0x7e9ebc19,0x421817cd,4 +np.float32,0x4999b7,0xc218ae31,4 +np.float32,0x3d130e2c,0xbfb8f1cc,4 +np.float32,0x3f7e436f,0xbb41bb07,4 +np.float32,0x3ee00241,0xbeb7cf7d,4 +np.float32,0x7e496181,0x42174d5f,4 +np.float32,0x7efe58be,0x4218e978,4 +np.float32,0x3f5e5b0c,0xbd7aa43f,4 +np.float32,0x7ee4c6ab,0x4218ba59,4 +np.float32,0x3f6da8c6,0xbd043d7e,4 +np.float32,0x3e3e6e0f,0xbf3b064b,4 +np.float32,0x3f0143b3,0xbe97f10a,4 +np.float32,0x79170f,0xc217d0c6,4 +np.float32,0x517645,0xc218810f,4 +np.float32,0x3f1f9960,0xbe52226e,4 +np.float32,0x2a8df9,0xc219a1d6,4 +np.float32,0x2300a6,0xc219f8b8,4 +np.float32,0x3ee31355,0xbeb4c97a,4 +np.float32,0x3f20b05f,0xbe4f1ba9,4 +np.float32,0x3ee64249,0xbeb1b0ff,4 +np.float32,0x3a94b7,0xc21913b2,4 +np.float32,0x7ef7ef43,0x4218de1d,4 +np.float32,0x3f1abb5d,0xbe5fe872,4 +np.float32,0x7f65360b,0x4219ef72,4 +np.float32,0x3d315d,0xc219004c,4 +np.float32,0x3f26bbc4,0xbe3eafb9,4 +np.float32,0x3ee8c6e9,0xbeaf45de,4 +np.float32,0x7e5f1452,0x42177ae1,4 +np.float32,0x3f32e777,0xbe1f5aba,4 +np.float32,0x4d39a1,0xc21898d0,4 +np.float32,0x3e59ad15,0xbf2c2841,4 +np.float32,0x3f4be746,0xbdca5fc4,4 +np.float32,0x72e4fd,0xc217e821,4 +np.float32,0x1af0b8,0xc21a6d25,4 +np.float32,0x3f311147,0xbe23f18d,4 +np.float32,0x3f1ecebb,0xbe545880,4 +np.float32,0x7e90d293,0x4217ef02,4 +np.float32,0x3e3b366a,0xbf3ceb46,4 +np.float32,0x3f133239,0xbe761c96,4 +np.float32,0x7541ab,0xc217df15,4 +np.float32,0x3d8c8275,0xbf94f1a1,4 +np.float32,0x483b92,0xc218b689,4 +np.float32,0x3eb0dbed,0xbeec5c6b,4 +np.float32,0x3f00c676,0xbe98c8e2,4 +np.float32,0x3f445ac2,0xbdebed7c,4 +np.float32,0x3d2af4,0xc219007a,4 +np.float32,0x7f196ee1,0x42193cf2,4 +np.float32,0x290c94,0xc219b1db,4 +np.float32,0x3f5dbdc9,0xbd7f9019,4 +np.float32,0x3e80c62e,0xbf1974fc,4 +np.float32,0x3ec9ed2c,0xbecee326,4 +np.float32,0x7f469d60,0x4219afbb,4 +np.float32,0x3f698413,0xbd2386ce,4 +np.float32,0x42163f,0xc218de14,4 +np.float32,0x67a554,0xc21815f4,4 +np.float32,0x3f4bff74,0xbdc9f651,4 +np.float32,0x16a743,0xc21aba39,4 +np.float32,0x2eb8b0,0xc219784b,4 +np.float32,0x3eed9be1,0xbeaab45b,4 +np.float64,0x7fe0d76873e1aed0,0x40733f9d783bad7a,1 +np.float64,0x3fe22626bb244c4d,0xbfcf86a59864eea2,1 +np.float64,0x7f874113d02e8227,0x407324f54c4015b8,1 +np.float64,0x3fe40a46a9e8148d,0xbfca0411f533fcb9,1 +np.float64,0x3fd03932eea07266,0xbfe312bc9cf5649e,1 +np.float64,0x7fee5d2a1b3cba53,0x407343b5f56367a0,1 +np.float64,0x3feb7bda4a76f7b5,0xbfb0ea2c6edc784a,1 +np.float64,0x3fd6cd831a2d9b06,0xbfdcaf2e1a5faf51,1 +np.float64,0x98324e273064a,0xc0733e0e4c6d11c6,1 +np.float64,0x7fe1dd63b363bac6,0x4073400667c405c3,1 +np.float64,0x3fec5971f178b2e4,0xbfaaef32a7d94563,1 +np.float64,0x17abc07e2f579,0xc0734afca4da721e,1 +np.float64,0x3feec6ab5cfd8d57,0xbf9157f3545a8235,1 +np.float64,0x3fe3ae9622a75d2c,0xbfcb04b5ad254581,1 +np.float64,0x7fea73d854b4e7b0,0x407342c0a548f4c5,1 +np.float64,0x7fe29babf4653757,0x4073404eeb5fe714,1 +np.float64,0x7fd3a55d85a74aba,0x40733bde72e86c27,1 +np.float64,0x3fe83ce305f079c6,0xbfbee3511e85e0f1,1 +np.float64,0x3fd72087ea2e4110,0xbfdc4ab30802d7c2,1 +np.float64,0x7feb54ddab76a9ba,0x407342facb6f3ede,1 +np.float64,0xc57e34a18afd,0xc0734f82ec815baa,1 +np.float64,0x7a8cb97ef5198,0xc0733f8fb3777a67,1 +np.float64,0x7fe801032c300205,0x40734213dbe4eda9,1 +np.float64,0x3aefb1f475df7,0xc07344a5f08a0584,1 +np.float64,0x7fee85f1dd3d0be3,0x407343bf4441c2a7,1 +np.float64,0x3fdc7f1055b8fe21,0xbfd67d300630e893,1 +np.float64,0xe8ecddb3d1d9c,0xc0733b194f18f466,1 +np.float64,0x3fdf2b23c73e5648,0xbfd3ff6872c1f887,1 +np.float64,0x3fdba4aef2b7495e,0xbfd7557205e18b7b,1 +np.float64,0x3fe2ac34c6e5586a,0xbfcdf1dac69bfa08,1 +np.float64,0x3fc9852628330a4c,0xbfe66914f0fb9b0a,1 +np.float64,0x7fda211acf344235,0x40733dd9c2177aeb,1 +np.float64,0x3fe9420eb432841d,0xbfba4dd969a32575,1 +np.float64,0xb2f9d1ed65f3a,0xc0733cedfb6527ff,1 +np.float64,0x3fe9768a68f2ed15,0xbfb967c39c35c435,1 +np.float64,0x7fe8268462b04d08,0x4073421eaed32734,1 +np.float64,0x3fcf331f063e663e,0xbfe39e2f4b427ca9,1 +np.float64,0x7fd4eb9e2b29d73b,0x40733c4e4141418d,1 +np.float64,0x7fd2bba658a5774c,0x40733b89cd53d5b1,1 +np.float64,0x3fdfdf04913fbe09,0xbfd360c7fd9d251b,1 +np.float64,0x3fca5bfd0534b7fa,0xbfe5f5f844b2b20c,1 +np.float64,0x3feacd5032f59aa0,0xbfb3b5234ba8bf7b,1 +np.float64,0x7fe9241cec724839,0x4073426631362cec,1 +np.float64,0x3fe57aca20eaf594,0xbfc628e3ac2c6387,1 +np.float64,0x3fec6553ca38caa8,0xbfaa921368d3b222,1 +np.float64,0x3fe1e9676563d2cf,0xbfd020f866ba9b24,1 +np.float64,0x3fd5590667aab20d,0xbfde8458af5a4fd6,1 +np.float64,0x3fdf7528f43eea52,0xbfd3bdb438d6ba5e,1 +np.float64,0xb8dddc5571bbc,0xc0733cb4601e5bb2,1 +np.float64,0xe6d4e1fbcda9c,0xc0733b295ef4a4ba,1 +np.float64,0x3fe7019d962e033b,0xbfc257c0a6e8de16,1 +np.float64,0x3f94ef585029deb1,0xbffb07e5dfb0e936,1 +np.float64,0x7fc863b08030c760,0x4073388e28d7b354,1 +np.float64,0xf684443bed089,0xc0733ab46cfbff9a,1 +np.float64,0x7fe00e901d201d1f,0x40733f489c05a0f0,1 +np.float64,0x9e5c0a273cb82,0xc0733dc7af797e19,1 +np.float64,0x7fe49734f0692e69,0x4073410303680df0,1 +np.float64,0x7fb7b584442f6b08,0x4073338acff72502,1 +np.float64,0x3f99984c30333098,0xbff9a2642a6ed8cc,1 +np.float64,0x7fea2fcda8745f9a,0x407342aeae7f5e64,1 +np.float64,0xe580caadcb01a,0xc0733b33a3639217,1 +np.float64,0x1899ab3831336,0xc0734ab823729417,1 +np.float64,0x39bd4c76737aa,0xc07344ca6fac6d21,1 +np.float64,0xd755b2dbaeab7,0xc0733ba4fe19f2cc,1 +np.float64,0x3f952bebf82a57d8,0xbffaf3e7749c2512,1 +np.float64,0x3fe62ee5d72c5dcc,0xbfc45e3cb5baad08,1 +np.float64,0xb1264a7d624ca,0xc0733d003a1d0a66,1 +np.float64,0x3fc4bd1bcd297a38,0xbfe94b3058345c46,1 +np.float64,0x7fc5758bb32aeb16,0x407337aa7805497f,1 +np.float64,0x3fb0edcaf421db96,0xbff2dfb09c405294,1 +np.float64,0x3fd240fceaa481fa,0xbfe16f356bb36134,1 +np.float64,0x38c0c62a7181a,0xc07344e916d1e9b7,1 +np.float64,0x3fe98f2b3bf31e56,0xbfb8fc6eb622a820,1 +np.float64,0x3fe2bdf99c257bf3,0xbfcdbd0dbbae4d0b,1 +np.float64,0xce4b390d9c967,0xc0733bf14ada3134,1 +np.float64,0x3fd2ad607ba55ac1,0xbfe11da15167b37b,1 +np.float64,0x3fd8154f11b02a9e,0xbfdb2a6fabb9a026,1 +np.float64,0xf37849fde6f09,0xc0733aca8c64344c,1 +np.float64,0x3fcbae43b2375c87,0xbfe547f267c8e570,1 +np.float64,0x3fcd46fd7d3a8dfb,0xbfe48070f7232929,1 +np.float64,0x7fcdd245273ba489,0x407339f3d907b101,1 +np.float64,0x3fac75cd0838eb9a,0xbff4149d177b057b,1 +np.float64,0x7fe8ff3fd7f1fe7f,0x4073425bf968ba6f,1 +np.float64,0x7febadaa4df75b54,0x407343113a91f0e9,1 +np.float64,0x7fd5e4649c2bc8c8,0x40733c9f0620b065,1 +np.float64,0x903429812069,0xc07351b255e27887,1 +np.float64,0x3fe1d8c51c63b18a,0xbfd03ad448c1f1ee,1 +np.float64,0x3fe573ea646ae7d5,0xbfc63ab0bfd0e601,1 +np.float64,0x3f83b3f3c02767e8,0xc00022677e310649,1 +np.float64,0x7fd15d1582a2ba2a,0x40733b02c469c1d6,1 +np.float64,0x3fe63d3dabec7a7b,0xbfc43a56ee97b27e,1 +np.float64,0x7fe3a452fb2748a5,0x407340af1973c228,1 +np.float64,0x3fafac6b303f58d6,0xbff35651703ae9f2,1 +np.float64,0x513ddd24a27bc,0xc073426af96aaebb,1 +np.float64,0x3fef152246be2a45,0xbf89df79d7719282,1 +np.float64,0x3fe8c923e9f19248,0xbfbc67228e8db5f6,1 +np.float64,0x3fd6e2325fadc465,0xbfdc9602fb0b950f,1 +np.float64,0x3fe9616815f2c2d0,0xbfb9c4311a3b415b,1 +np.float64,0x2fe4e4005fc9d,0xc0734616fe294395,1 +np.float64,0x3fbceb02dc39d606,0xbfee4e68f1c7886f,1 +np.float64,0x7fe35e843d66bd07,0x407340963b066ad6,1 +np.float64,0x7fecd6c648f9ad8c,0x4073435a4c176e94,1 +np.float64,0x7fcbd72bf437ae57,0x4073397994b85665,1 +np.float64,0x3feff6443b3fec88,0xbf40eb380d5318ae,1 +np.float64,0x7fb9373cf6326e79,0x407333f869edef08,1 +np.float64,0x63790d9cc6f22,0xc0734102d4793cda,1 +np.float64,0x3f9de6efe83bcde0,0xbff88db6f0a6b56e,1 +np.float64,0xe00f2dc1c01f,0xc0734ea26ab84ff2,1 +np.float64,0xd7a9aa8baf536,0xc0733ba248fa33ab,1 +np.float64,0x3fee0089ea7c0114,0xbf9cab936ac31c4b,1 +np.float64,0x3fdec0d51cbd81aa,0xbfd45ed8878c5860,1 +np.float64,0x7fe91bf5e9f237eb,0x40734263f005081d,1 +np.float64,0x34ea7d1e69d50,0xc07345659dde7444,1 +np.float64,0x7fe67321a3ace642,0x4073419cc8130d95,1 +np.float64,0x9d1aeb2f3a35e,0xc0733dd5d506425c,1 +np.float64,0x7fbb01df003603bd,0x4073347282f1391d,1 +np.float64,0x42b945b285729,0xc07343c92d1bbef9,1 +np.float64,0x7fc92799b8324f32,0x407338c51e3f0733,1 +np.float64,0x3fe119c19b223383,0xbfd16ab707f65686,1 +np.float64,0x3fc9f9ac5333f359,0xbfe62a2f91ec0dff,1 +np.float64,0x3fd820d5a8b041ab,0xbfdb1d2586fe7b18,1 +np.float64,0x10000000000000,0xc0733a7146f72a42,1 +np.float64,0x3fe7e1543eafc2a8,0xbfc045362889592d,1 +np.float64,0xcbc0e1819783,0xc0734f4b68e05b1c,1 +np.float64,0xeb57e411d6afd,0xc0733b06efec001a,1 +np.float64,0xa9b74b47536ea,0xc0733d4c7bd06ddc,1 +np.float64,0x3fe56d4022eada80,0xbfc64bf8c7e3dd59,1 +np.float64,0x3fd445ca27288b94,0xbfdff40aecd0f882,1 +np.float64,0x3fe5af1cf5ab5e3a,0xbfc5a21d83699a04,1 +np.float64,0x7fed3431eb7a6863,0x40734370aa6131e1,1 +np.float64,0x3fd878dea1b0f1bd,0xbfdab8730dc00517,1 +np.float64,0x7ff8000000000000,0x7ff8000000000000,1 +np.float64,0x3feba9fcc1f753fa,0xbfb03027dcecbf65,1 +np.float64,0x7fca4feed6349fdd,0x4073391526327eb0,1 +np.float64,0x3fe7748ddbaee91c,0xbfc144b438218065,1 +np.float64,0x3fb5fbd94c2bf7b3,0xbff10ee6342c21a0,1 +np.float64,0x3feb603b97f6c077,0xbfb15a1f99d6d25e,1 +np.float64,0x3fe2e6fc8ce5cdf9,0xbfcd43edd7f3b4e6,1 +np.float64,0x7feb2b31f7765663,0x407342f02b306688,1 +np.float64,0x3fe290e2282521c4,0xbfce436deb8dbcf3,1 +np.float64,0x3fe3d5adf9e7ab5c,0xbfca96b8aa55d942,1 +np.float64,0x691899f2d2314,0xc07340a1026897c8,1 +np.float64,0x7fe468b008e8d15f,0x407340f33eadc628,1 +np.float64,0x3fb3a4c416274988,0xbff1d71da539a56e,1 +np.float64,0x3fe2442b29e48856,0xbfcf2b0037322661,1 +np.float64,0x3f376fbc7e6ef,0xc073442939a84643,1 +np.float64,0x3fe7c78d65ef8f1b,0xbfc08157cff411de,1 +np.float64,0xd4f27acba9e50,0xc0733bb8d38daa50,1 +np.float64,0x5198919ea3313,0xc07342633ba7cbea,1 +np.float64,0x7fd09f66f0a13ecd,0x40733ab5310b4385,1 +np.float64,0x3fdfe5531dbfcaa6,0xbfd35b487c7e739f,1 +np.float64,0x3fc4b0fecc2961fe,0xbfe95350c38c1640,1 +np.float64,0x7fd5ae21962b5c42,0x40733c8db78b7250,1 +np.float64,0x3fa4a8fcd42951fa,0xbff64e62fe602b72,1 +np.float64,0x7fc8e0e25831c1c4,0x407338b179b91223,1 +np.float64,0x7fdde1df6f3bc3be,0x40733ec87f9f027e,1 +np.float64,0x3fd8b9ad86b1735b,0xbfda6f385532c41b,1 +np.float64,0x3fd9f20ee933e41e,0xbfd91872fd858597,1 +np.float64,0x7feb35332df66a65,0x407342f2b9c715f0,1 +np.float64,0x7fe783dc7eaf07b8,0x407341ef41873706,1 +np.float64,0x7fceee929f3ddd24,0x40733a34e3c660fd,1 +np.float64,0x985b58d730b6b,0xc0733e0c6cfbb6f8,1 +np.float64,0x3fef4bb55cfe976b,0xbf83cb246c6f2a78,1 +np.float64,0x3fe218014f243003,0xbfcfb20ac683e1f6,1 +np.float64,0x7fe43b9fbea8773e,0x407340e3d5d5d29e,1 +np.float64,0x7fe148c74c62918e,0x40733fcba4367b8b,1 +np.float64,0x3feea4ad083d495a,0xbf93443917f3c991,1 +np.float64,0x8bcf6311179ed,0xc0733ea54d59dd31,1 +np.float64,0xf4b7a2dbe96f5,0xc0733ac175182401,1 +np.float64,0x543338baa8668,0xc073422b59165fe4,1 +np.float64,0x3fdb467317368ce6,0xbfd7b4d515929635,1 +np.float64,0x7fe3bbbc89e77778,0x407340b75cdf3de7,1 +np.float64,0x7fe693377aad266e,0x407341a6af60a0f1,1 +np.float64,0x3fc66210502cc421,0xbfe83bb940610a24,1 +np.float64,0x7fa75638982eac70,0x40732e9da476b816,1 +np.float64,0x3fe0d72a4761ae55,0xbfd1d7c82c479fab,1 +np.float64,0x97dec0dd2fbd8,0xc0733e121e072804,1 +np.float64,0x3fef33ec8c7e67d9,0xbf86701be6be8df1,1 +np.float64,0x7fcfca9b423f9536,0x40733a65a51efb94,1 +np.float64,0x9f2215633e443,0xc0733dbf043de9ed,1 +np.float64,0x2469373e48d28,0xc07347fe9e904b77,1 +np.float64,0x7fecc2e18cb985c2,0x407343557f58dfa2,1 +np.float64,0x3fde4acbfdbc9598,0xbfd4ca559e575e74,1 +np.float64,0x3fd6b11cf1ad623a,0xbfdcd1e17ef36114,1 +np.float64,0x3fc19ec494233d89,0xbfeb8ef228e8826a,1 +np.float64,0x4c89ee389913e,0xc07342d50c904f61,1 +np.float64,0x88c2046f11841,0xc0733ecc91369431,1 +np.float64,0x7fc88c13fd311827,0x40733899a125b392,1 +np.float64,0x3fcebd893a3d7b12,0xbfe3d2f35ab93765,1 +np.float64,0x3feb582a1476b054,0xbfb17ae8ec6a0465,1 +np.float64,0x7fd4369e5da86d3c,0x40733c1118b8cd67,1 +np.float64,0x3fda013fc1340280,0xbfd90831b85e98b2,1 +np.float64,0x7fed33d73fba67ad,0x4073437094ce1bd9,1 +np.float64,0x3fed3191053a6322,0xbfa468cc26a8f685,1 +np.float64,0x3fc04ed51c209daa,0xbfeca24a6f093bca,1 +np.float64,0x3fee4ac8763c9591,0xbf986458abbb90b5,1 +np.float64,0xa2d39dd145a74,0xc0733d9633651fbc,1 +np.float64,0x3fe7d9f86f2fb3f1,0xbfc0565a0b059f1c,1 +np.float64,0x3fe3250144e64a03,0xbfcc8eb2b9ae494b,1 +np.float64,0x7fe2b29507a56529,0x4073405774492075,1 +np.float64,0x7fdcdfcbe2b9bf97,0x40733e8b736b1bd8,1 +np.float64,0x3fc832730f3064e6,0xbfe7267ac9b2e7c3,1 +np.float64,0x3fc7e912e52fd226,0xbfe750dfc0aeae57,1 +np.float64,0x7fc960472f32c08d,0x407338d4b4cb3957,1 +np.float64,0x3fbdf182ea3be306,0xbfedd27150283ffb,1 +np.float64,0x3fd1e9359823d26b,0xbfe1b2ac7fd25f8d,1 +np.float64,0x7fbcf75f6039eebe,0x407334ef13eb16f8,1 +np.float64,0x3fe5a3c910eb4792,0xbfc5bf2f57c5d643,1 +np.float64,0x3fcf4f2a6e3e9e55,0xbfe391b6f065c4b8,1 +np.float64,0x3fee067873fc0cf1,0xbf9c53af0373fc0e,1 +np.float64,0xd3f08b85a7e12,0xc0733bc14357e686,1 +np.float64,0x7ff0000000000000,0x7ff0000000000000,1 +np.float64,0x3fc8635f6430c6bf,0xbfe70a7dc77749a7,1 +np.float64,0x3fe3ff5c52a7feb9,0xbfca22617c6636d5,1 +np.float64,0x3fbbae91fa375d24,0xbfeee9d4c300543f,1 +np.float64,0xe3f71b59c7ee4,0xc0733b3f99187375,1 +np.float64,0x7fca93d3be3527a6,0x40733926fd48ecd6,1 +np.float64,0x3fcd29f7223a53ee,0xbfe48e3edf32fe57,1 +np.float64,0x7fdc4ef6f8389ded,0x40733e68401cf2a6,1 +np.float64,0xe009bc81c014,0xc0734ea295ee3e5b,1 +np.float64,0x61f56c78c3eae,0xc073411e1dbd7c54,1 +np.float64,0x3fde131928bc2632,0xbfd4fda024f6927c,1 +np.float64,0x3fb21ee530243dca,0xbff266aaf0358129,1 +np.float64,0x7feaac82a4f55904,0x407342cf7809d9f9,1 +np.float64,0x3fe66ab177ecd563,0xbfc3c92d4d522819,1 +np.float64,0xfe9f9c2bfd3f4,0xc0733a7ade3a88a7,1 +np.float64,0x7fd0c5217c218a42,0x40733ac4e4c6dfa5,1 +np.float64,0x430f4ae6861ea,0xc07343c03d8a9442,1 +np.float64,0x494bff2a92981,0xc073432209d2fd16,1 +np.float64,0x3f8860e9d030c1d4,0xbffeca059ebf5e89,1 +np.float64,0x3fe43732dc286e66,0xbfc98800388bad2e,1 +np.float64,0x6443b60ec8877,0xc07340f4bab11827,1 +np.float64,0x3feda9be6d7b537d,0xbfa0dcb9a6914069,1 +np.float64,0x3fc5ceb6772b9d6d,0xbfe89868c881db70,1 +np.float64,0x3fbdf153023be2a6,0xbfedd2878c3b4949,1 +np.float64,0x7fe8f6b8e8f1ed71,0x407342599a30b273,1 +np.float64,0x3fea6fbdb8b4df7b,0xbfb53bf66f71ee96,1 +np.float64,0xc7ac3dbb8f588,0xc0733c2b525b7963,1 +np.float64,0x3fef3a91f77e7524,0xbf85b2bd3adbbe31,1 +np.float64,0x3f887cb97030f973,0xbffec21ccbb5d22a,1 +np.float64,0x8b2f1c9f165e4,0xc0733ead49300951,1 +np.float64,0x2c1cb32058397,0xc07346a951bd8d2b,1 +np.float64,0x3fe057edd620afdc,0xbfd2acf1881b7e99,1 +np.float64,0x7f82e9530025d2a5,0x4073238591dd52ce,1 +np.float64,0x3fe4e03dff69c07c,0xbfc7be96c5c006fc,1 +np.float64,0x52727b4aa4e50,0xc0734250c58ebbc1,1 +np.float64,0x3f99a62160334c43,0xbff99ea3ca09d8f9,1 +np.float64,0x3fd5314b4faa6297,0xbfdeb843daf01e03,1 +np.float64,0x3fefde89e13fbd14,0xbf5d1facb7a1e9de,1 +np.float64,0x7fb460f1a228c1e2,0x4073327d8cbc5f86,1 +np.float64,0xeb93efb3d727e,0xc0733b052a4990e4,1 +np.float64,0x3fe884baecf10976,0xbfbd9ba9cfe23713,1 +np.float64,0x7fefffffffffffff,0x40734413509f79ff,1 +np.float64,0x149dc7c6293ba,0xc0734bf26b1df025,1 +np.float64,0x64188f88c8313,0xc07340f7b8e6f4b5,1 +np.float64,0x3fdfac314abf5863,0xbfd38d3e9dba1b0e,1 +np.float64,0x3fd72052a42e40a5,0xbfdc4af30ee0b245,1 +np.float64,0x7fdd951f743b2a3e,0x40733eb68fafa838,1 +np.float64,0x65a2dd5acb45c,0xc07340dc8ed625e1,1 +np.float64,0x7fe89a79997134f2,0x4073423fbceb1cbe,1 +np.float64,0x3fe70a000d6e1400,0xbfc24381e09d02f7,1 +np.float64,0x3fe2cec160259d83,0xbfcd8b5e92354129,1 +np.float64,0x3feb9ef77a773def,0xbfb05c7b2ee6f388,1 +np.float64,0xe0d66689c1acd,0xc0733b582c779620,1 +np.float64,0x3fee86bd0ffd0d7a,0xbf94f7870502c325,1 +np.float64,0x186afc6230d60,0xc0734ac55fb66d5d,1 +np.float64,0xc0631f4b80c64,0xc0733c6d7149d373,1 +np.float64,0x3fdad1b87735a371,0xbfd82cca73ec663b,1 +np.float64,0x7fe7f6d313efeda5,0x40734210e84576ab,1 +np.float64,0x7fd7b7fce6af6ff9,0x40733d2d92ffdaaf,1 +np.float64,0x3fe6f35a28ade6b4,0xbfc27a4239b540c3,1 +np.float64,0x7fdb0b834eb61706,0x40733e17073a61f3,1 +np.float64,0x82f4661105e8d,0xc0733f19b34adeed,1 +np.float64,0x3fc77230112ee460,0xbfe796a7603c0d16,1 +np.float64,0x8000000000000000,0xfff0000000000000,1 +np.float64,0x7fb8317bc63062f7,0x407333aec761a739,1 +np.float64,0x7fd165609a22cac0,0x40733b061541ff15,1 +np.float64,0x3fed394768fa728f,0xbfa42e1596e1faf6,1 +np.float64,0x7febab693d7756d1,0x40734310a9ac828e,1 +np.float64,0x7fe809a69230134c,0x407342165b9acb69,1 +np.float64,0x3fc091d38f2123a7,0xbfec69a70fc23548,1 +np.float64,0x3fb2a8f5dc2551ec,0xbff2327f2641dd0d,1 +np.float64,0x7fc60b6fe02c16df,0x407337da5adc342c,1 +np.float64,0x3fefa53c3bbf4a78,0xbf73d1be15b73b00,1 +np.float64,0x7fee09c1717c1382,0x407343a2c479e1cb,1 +np.float64,0x8000000000000001,0x7ff8000000000000,1 +np.float64,0x3fede0b2733bc165,0xbf9e848ac2ecf604,1 +np.float64,0x3fee2ac331bc5586,0xbf9a3b699b721c9a,1 +np.float64,0x3fd4db12d829b626,0xbfdf2a413d1e453a,1 +np.float64,0x7fe605230dec0a45,0x4073417a67db06be,1 +np.float64,0x3fe378b2bf26f165,0xbfcb9dbb2b6d6832,1 +np.float64,0xc1d4c1ab83a98,0xc0733c60244cadbf,1 +np.float64,0x3feb15500e762aa0,0xbfb28c071d5efc22,1 +np.float64,0x3fe36225a626c44b,0xbfcbde4259e9047e,1 +np.float64,0x3fe7c586a72f8b0d,0xbfc08614b13ed4b2,1 +np.float64,0x7fb0f2d8cc21e5b1,0x40733135b2c7dd99,1 +np.float64,0x5957f3feb2aff,0xc07341c1df75638c,1 +np.float64,0x3fca4851bd3490a3,0xbfe6005ae5279485,1 +np.float64,0x824217d904843,0xc0733f232fd58f0f,1 +np.float64,0x4f9332269f267,0xc073428fd8e9cb32,1 +np.float64,0x3fea6f087374de11,0xbfb53ef0d03918b2,1 +np.float64,0x3fd9409ab4328135,0xbfd9d9231381e2b8,1 +np.float64,0x3fdba03b00374076,0xbfd759ec94a7ab5b,1 +np.float64,0x3fe0ce3766619c6f,0xbfd1e6912582ccf0,1 +np.float64,0x3fabd45ddc37a8bc,0xbff43c78d3188423,1 +np.float64,0x3fc3cadd592795bb,0xbfe9f1576c9b2c79,1 +np.float64,0x3fe10df049621be1,0xbfd17df2f2c28022,1 +np.float64,0x945b5d1328b6c,0xc0733e3bc06f1e75,1 +np.float64,0x7fc1c3742b2386e7,0x4073365a403d1051,1 +np.float64,0x7fdc957138b92ae1,0x40733e7977717586,1 +np.float64,0x7f943fa1a0287f42,0x407328d01de143f5,1 +np.float64,0x3fec9631c4392c64,0xbfa914b176d8f9d2,1 +np.float64,0x3fd8e7c008b1cf80,0xbfda3b9d9b6da8f4,1 +np.float64,0x7222f9fee4460,0xc073400e371516cc,1 +np.float64,0x3fe890e43eb121c8,0xbfbd64921462e823,1 +np.float64,0x3fcfd7fe2a3faffc,0xbfe3557e2f207800,1 +np.float64,0x3fed5dd1c1babba4,0xbfa318bb20db64e6,1 +np.float64,0x3fe6aa34c66d546a,0xbfc32c8a8991c11e,1 +np.float64,0x8ca79801196,0xc0736522bd5adf6a,1 +np.float64,0x3feb274079364e81,0xbfb2427b24b0ca20,1 +np.float64,0x7fe04927e4a0924f,0x40733f61c96f7f89,1 +np.float64,0x7c05f656f80bf,0xc0733f7a70555b4e,1 +np.float64,0x7fe97819eff2f033,0x4073427d4169b0f8,1 +np.float64,0x9def86e33bdf1,0xc0733dcc740b7175,1 +np.float64,0x7fedd1ef3f3ba3dd,0x40734395ceab8238,1 +np.float64,0x77bed86cef7dc,0xc0733fb8e0e9bf73,1 +np.float64,0x9274b41b24e97,0xc0733e52b16dff71,1 +np.float64,0x8010000000000000,0x7ff8000000000000,1 +np.float64,0x9c977855392ef,0xc0733ddba7d421d9,1 +np.float64,0xfb4560a3f68ac,0xc0733a9271e6a118,1 +np.float64,0xa67d9f394cfb4,0xc0733d6e9d58cc94,1 +np.float64,0x3fbfa766b03f4ecd,0xbfed0cccfecfc900,1 +np.float64,0x3fe177417522ee83,0xbfd0d45803bff01a,1 +np.float64,0x7fe85e077bb0bc0e,0x4073422e957a4aa3,1 +np.float64,0x7feeb0a6883d614c,0x407343c8f6568f7c,1 +np.float64,0xbab82edb75706,0xc0733ca2a2b20094,1 +np.float64,0xfadb44bdf5b69,0xc0733a9561b7ec04,1 +np.float64,0x3fefb9b82b3f7370,0xbf6ea776b2dcc3a9,1 +np.float64,0x7fe080ba8a610174,0x40733f795779b220,1 +np.float64,0x3f87faa1c02ff544,0xbffee76acafc92b7,1 +np.float64,0x7fed474108fa8e81,0x4073437531d4313e,1 +np.float64,0x3fdb7b229336f645,0xbfd77f583a4a067f,1 +np.float64,0x256dbf0c4adb9,0xc07347cd94e6fa81,1 +np.float64,0x3fd034ae25a0695c,0xbfe3169c15decdac,1 +np.float64,0x3a72177274e44,0xc07344b4cf7d68cd,1 +np.float64,0x7fa2522d5c24a45a,0x40732cef2f793470,1 +np.float64,0x3fb052bdde20a57c,0xbff3207fd413c848,1 +np.float64,0x3fdccfecbbb99fd9,0xbfd62ec04a1a687a,1 +np.float64,0x3fd403ac53280759,0xbfe027a31df2c8cc,1 +np.float64,0x3fab708e4036e11d,0xbff45591df4f2e8b,1 +np.float64,0x7fcfc001993f8002,0x40733a63539acf9d,1 +np.float64,0x3fd2b295dfa5652c,0xbfe119c1b476c536,1 +np.float64,0x7fe8061262b00c24,0x4073421552ae4538,1 +np.float64,0xffefffffffffffff,0x7ff8000000000000,1 +np.float64,0x7fed52093ffaa411,0x40734377c072a7e8,1 +np.float64,0xf3df902fe7bf2,0xc0733ac79a75ff7a,1 +np.float64,0x7fe13d382e227a6f,0x40733fc6fd0486bd,1 +np.float64,0x3621d5086c43b,0xc073453d31effbcd,1 +np.float64,0x3ff0000000000000,0x0,1 +np.float64,0x3fdaffea27b5ffd4,0xbfd7fd139dc1c2c5,1 +np.float64,0x7fea6536dc34ca6d,0x407342bccc564fdd,1 +np.float64,0x7fd478f00c28f1df,0x40733c27c0072fde,1 +np.float64,0x7fa72ef0502e5de0,0x40732e91e83db75c,1 +np.float64,0x7fd302970626052d,0x40733ba3ec6775f6,1 +np.float64,0x7fbb57ab0036af55,0x407334887348e613,1 +np.float64,0x3fda0ff722b41fee,0xbfd8f87b77930330,1 +np.float64,0x1e983ce23d309,0xc073493438f57e61,1 +np.float64,0x7fc90de97c321bd2,0x407338be01ffd4bd,1 +np.float64,0x7fe074b09c20e960,0x40733f7443f0dbe1,1 +np.float64,0x3fed5dec9fbabbd9,0xbfa317efb1fe8a95,1 +np.float64,0x7fdb877632b70eeb,0x40733e3697c88ba8,1 +np.float64,0x7fe4fb0067e9f600,0x40734124604b99e8,1 +np.float64,0x7fd447dc96288fb8,0x40733c1703ab2cce,1 +np.float64,0x3feb2d1e64f65a3d,0xbfb22a781df61c05,1 +np.float64,0xb6c8e6676d91d,0xc0733cc8859a0b91,1 +np.float64,0x3fdc3c2418387848,0xbfd6bec3a3c3cdb5,1 +np.float64,0x3fdecb9ccdbd973a,0xbfd4551c05721a8e,1 +np.float64,0x3feb1100e7762202,0xbfb29db911fe6768,1 +np.float64,0x3fe0444bc2a08898,0xbfd2ce69582e78c1,1 +np.float64,0x7fda403218b48063,0x40733de201d8340c,1 +np.float64,0x3fdc70421238e084,0xbfd68ba4bd48322b,1 +np.float64,0x3fe06e747c60dce9,0xbfd286bcac34a981,1 +np.float64,0x7fc1931d9623263a,0x407336473da54de4,1 +np.float64,0x229914da45323,0xc073485979ff141c,1 +np.float64,0x3fe142f92da285f2,0xbfd1280909992cb6,1 +np.float64,0xf1d02fa9e3a06,0xc0733ad6b19d71a0,1 +np.float64,0x3fb1fe9b0023fd36,0xbff27317d8252c16,1 +np.float64,0x3fa544b9242a8972,0xbff61ac38569bcfc,1 +np.float64,0x3feeb129d4fd6254,0xbf928f23ad20c1ee,1 +np.float64,0xa2510b7f44a22,0xc0733d9bc81ea0a1,1 +np.float64,0x3fca75694d34ead3,0xbfe5e8975b3646c2,1 +np.float64,0x7fece10621b9c20b,0x4073435cc3dd9a1b,1 +np.float64,0x7fe98a57d3b314af,0x4073428239b6a135,1 +np.float64,0x3fe259c62a64b38c,0xbfcee96682a0f355,1 +np.float64,0x3feaaa9b9d755537,0xbfb445779f3359af,1 +np.float64,0xdaadecfdb55be,0xc0733b899338432a,1 +np.float64,0x3fed00eae4fa01d6,0xbfa5dc8d77be5991,1 +np.float64,0x7fcc96c773392d8e,0x407339a8c5cd786e,1 +np.float64,0x3fef7b8b203ef716,0xbf7cff655ecb6424,1 +np.float64,0x7fd4008113a80101,0x40733bfe6552acb7,1 +np.float64,0x7fe99ff035b33fdf,0x407342881753ee2e,1 +np.float64,0x3ee031e87dc07,0xc0734432d736e492,1 +np.float64,0x3fddfe390f3bfc72,0xbfd510f1d9ec3e36,1 +np.float64,0x3fd9ddce74b3bb9d,0xbfd92e2d75a061bb,1 +np.float64,0x7fe5f742edebee85,0x40734176058e3a77,1 +np.float64,0x3fdb04185b360831,0xbfd7f8c63aa5e1c4,1 +np.float64,0xea2b0f43d4562,0xc0733b0fd77c8118,1 +np.float64,0x7fc3f4973527e92d,0x407337293bbb22c4,1 +np.float64,0x3fb9adfb38335bf6,0xbfeff4f3ea85821a,1 +np.float64,0x87fb98750ff73,0xc0733ed6ad83c269,1 +np.float64,0x3fe005721a200ae4,0xbfd33a9f1ebfb0ac,1 +np.float64,0xd9e04fe7b3c0a,0xc0733b901ee257f3,1 +np.float64,0x2c39102658723,0xc07346a4db63bf55,1 +np.float64,0x3f7dc28e003b851c,0xc0011c1d1233d948,1 +np.float64,0x3430fd3868620,0xc073457e24e0b70d,1 +np.float64,0xbff0000000000000,0x7ff8000000000000,1 +np.float64,0x3fd23e45e0247c8c,0xbfe17146bcf87b57,1 +np.float64,0x6599df3ecb33d,0xc07340dd2c41644c,1 +np.float64,0x3fdf074f31be0e9e,0xbfd41f6e9dbb68a5,1 +np.float64,0x7fdd6233f3bac467,0x40733eaa8f674b72,1 +np.float64,0x7fe03e8481607d08,0x40733f5d3df3b087,1 +np.float64,0x3fcc3b79f13876f4,0xbfe501bf3b379b77,1 +np.float64,0xe5d97ae3cbb30,0xc0733b30f47cbd12,1 +np.float64,0x8acbc4a115979,0xc0733eb240a4d2c6,1 +np.float64,0x3fedbdbc48bb7b79,0xbfa0470fd70c4359,1 +np.float64,0x3fde1611103c2c22,0xbfd4fae1fa8e7e5e,1 +np.float64,0x3fe09478bd2128f1,0xbfd246b7e85711dc,1 +np.float64,0x3fd6dfe8f3adbfd2,0xbfdc98ca2f32c1ad,1 +np.float64,0x72ccf274e599f,0xc0734003e5b0da63,1 +np.float64,0xe27c7265c4f8f,0xc0733b4b2d808566,1 +np.float64,0x7fee3161703c62c2,0x407343abe90f5649,1 +np.float64,0xf54fb5c1eaa0,0xc0734e01384fcf78,1 +np.float64,0xcde5924d9bcb3,0xc0733bf4b83c66c2,1 +np.float64,0x3fc46fdbe528dfb8,0xbfe97f55ef5e9683,1 +np.float64,0x7fe513528a2a26a4,0x4073412c69baceca,1 +np.float64,0x3fd29eca4aa53d95,0xbfe128801cd33ed0,1 +np.float64,0x7febb21718b7642d,0x4073431256def857,1 +np.float64,0x3fcab536c0356a6e,0xbfe5c73c59f41578,1 +np.float64,0x7fc7e9f0d82fd3e1,0x4073386b213e5dfe,1 +np.float64,0xb5b121276b624,0xc0733cd33083941c,1 +np.float64,0x7e0dd9bcfc1bc,0xc0733f5d8bf35050,1 +np.float64,0x3fd1c75106238ea2,0xbfe1cd11cccda0f4,1 +np.float64,0x9f060e673e0c2,0xc0733dc03da71909,1 +np.float64,0x7fd915a2f3322b45,0x40733d912af07189,1 +np.float64,0x3fd8cbae4431975d,0xbfda5b02ca661139,1 +np.float64,0x3fde8b411f3d1682,0xbfd48f6f710a53b6,1 +np.float64,0x3fc17a780622f4f0,0xbfebabb10c55255f,1 +np.float64,0x3fde5cbe5f3cb97d,0xbfd4b9e2e0101fb1,1 +np.float64,0x7fd859036530b206,0x40733d5c2252ff81,1 +np.float64,0xb0f5040f61ea1,0xc0733d02292f527b,1 +np.float64,0x3fde5c49ae3cb893,0xbfd4ba4db3ce2cf3,1 +np.float64,0x3fecc4518df988a3,0xbfa7af0bfc98bc65,1 +np.float64,0x3feffee03cbffdc0,0xbf0f3ede6ca7d695,1 +np.float64,0xbc5eac9b78bd6,0xc0733c92fb51c8ae,1 +np.float64,0x3fe2bb4ef765769e,0xbfcdc4f70a65dadc,1 +np.float64,0x5089443ca1129,0xc073427a7d0cde4a,1 +np.float64,0x3fd0d6e29121adc5,0xbfe28e28ece1db86,1 +np.float64,0xbe171e397c2e4,0xc0733c82cede5d02,1 +np.float64,0x4ede27be9dbc6,0xc073429fba1a4af1,1 +np.float64,0x3fe2aff3af655fe7,0xbfcde6b52a8ed3c1,1 +np.float64,0x7fd85ca295b0b944,0x40733d5d2adcccf1,1 +np.float64,0x24919bba49234,0xc07347f6ed704a6f,1 +np.float64,0x7fd74bc1eeae9783,0x40733d0d94a89011,1 +np.float64,0x3fc1cd12cb239a26,0xbfeb6a9c25c2a11d,1 +np.float64,0x3fdafbc0ac35f781,0xbfd8015ccf1f1b51,1 +np.float64,0x3fee01327c3c0265,0xbf9ca1d0d762dc18,1 +np.float64,0x3fe65bd7702cb7af,0xbfc3ee0de5c36b8d,1 +np.float64,0x7349c82ee693a,0xc0733ffc5b6eccf2,1 +np.float64,0x3fdc5906f738b20e,0xbfd6a26288eb5933,1 +np.float64,0x1,0xc07434e6420f4374,1 +np.float64,0x3fb966128a32cc25,0xbff00e0aa7273838,1 +np.float64,0x3fd501ff9a2a03ff,0xbfdef69133482121,1 +np.float64,0x194d4f3c329ab,0xc0734a861b44cfbe,1 +np.float64,0x3fec5d34f8f8ba6a,0xbfaad1b31510e70b,1 +np.float64,0x1635e4c22c6be,0xc0734b6dec650943,1 +np.float64,0x3fead2f8edb5a5f2,0xbfb39dac30a962cf,1 +np.float64,0x3f7dfa4ce03bf49a,0xc00115a112141aa7,1 +np.float64,0x3fef6827223ed04e,0xbf80a42c9edebfe9,1 +np.float64,0xe771f303cee3f,0xc0733b24a6269fe4,1 +np.float64,0x1160ccc622c1b,0xc0734d22604eacb9,1 +np.float64,0x3fc485cd08290b9a,0xbfe970723008c8c9,1 +np.float64,0x7fef99c518bf3389,0x407343fcf9ed202f,1 +np.float64,0x7fd8c1447a318288,0x40733d79a440b44d,1 +np.float64,0xaf219f955e434,0xc0733d149c13f440,1 +np.float64,0xcf45f6239e8bf,0xc0733be8ddda045d,1 +np.float64,0x7599394aeb328,0xc0733fd90fdbb0ea,1 +np.float64,0xc7f6390f8fec7,0xc0733c28bfbc66a3,1 +np.float64,0x3fd39ae96c2735d3,0xbfe0712274a8742b,1 +np.float64,0xa4d6c18f49ad8,0xc0733d805a0528f7,1 +np.float64,0x7fd9ea78d7b3d4f1,0x40733dcb2b74802a,1 +np.float64,0x3fecd251cb39a4a4,0xbfa742ed41d4ae57,1 +np.float64,0x7fed7a07cd7af40f,0x407343813476027e,1 +np.float64,0x3fd328ae7f26515d,0xbfe0c30b56a83c64,1 +np.float64,0x7fc937ff7a326ffe,0x407338c9a45b9140,1 +np.float64,0x3fcf1d31143e3a62,0xbfe3a7f760fbd6a8,1 +np.float64,0x7fb911dcbc3223b8,0x407333ee158cccc7,1 +np.float64,0x3fd352fc83a6a5f9,0xbfe0a47d2f74d283,1 +np.float64,0x7fd310753fa620e9,0x40733ba8fc4300dd,1 +np.float64,0x3febd64b4577ac97,0xbfaefd4a79f95c4b,1 +np.float64,0x6a6961a4d4d2d,0xc073408ae1687943,1 +np.float64,0x3fe4ba73d16974e8,0xbfc8239341b9e457,1 +np.float64,0x3fed8e7cac3b1cf9,0xbfa1a96a0cc5fcdc,1 +np.float64,0x7fd505ec04aa0bd7,0x40733c56f86e3531,1 +np.float64,0x3fdf166e9abe2cdd,0xbfd411e5f8569d70,1 +np.float64,0x7fe1bc6434e378c7,0x40733ff9861bdabb,1 +np.float64,0x3fd3b0b175a76163,0xbfe061ba5703f3c8,1 +np.float64,0x7fed75d7ffbaebaf,0x4073438037ba6f19,1 +np.float64,0x5a9e109cb53c3,0xc07341a8b04819c8,1 +np.float64,0x3fe14786b4e28f0d,0xbfd120b541bb880e,1 +np.float64,0x3fed4948573a9291,0xbfa3b471ff91614b,1 +np.float64,0x66aac5d8cd559,0xc07340ca9b18af46,1 +np.float64,0x3fdb48efd23691e0,0xbfd7b24c5694838b,1 +np.float64,0x7fe6da7d1eadb4f9,0x407341bc7d1fae43,1 +np.float64,0x7feb702cf336e059,0x40734301b96cc3c0,1 +np.float64,0x3fd1e60987a3cc13,0xbfe1b522cfcc3d0e,1 +np.float64,0x3feca57f50794aff,0xbfa89dc90625d39c,1 +np.float64,0x7fdc46dc56b88db8,0x40733e664294a0f9,1 +np.float64,0x8dc8fd811b920,0xc0733e8c5955df06,1 +np.float64,0xf01634abe02c7,0xc0733ae370a76d0c,1 +np.float64,0x3fc6f8d8ab2df1b1,0xbfe7df5093829464,1 +np.float64,0xda3d7597b47af,0xc0733b8d2702727a,1 +np.float64,0x7feefd53227dfaa5,0x407343da3d04db28,1 +np.float64,0x3fe2fbca3525f794,0xbfcd06e134417c08,1 +np.float64,0x7fd36d3ce226da79,0x40733bca7c322df1,1 +np.float64,0x7fec37e00b786fbf,0x4073433397b48a5b,1 +np.float64,0x3fbf133f163e267e,0xbfed4e72f1362a77,1 +np.float64,0x3fc11efbb9223df7,0xbfebf53002a561fe,1 +np.float64,0x3fc89c0e5431381d,0xbfe6ea562364bf81,1 +np.float64,0x3f9cd45da839a8bb,0xbff8ceb14669ee4b,1 +np.float64,0x23dc8fa647b93,0xc0734819aaa9b0ee,1 +np.float64,0x3fe829110d305222,0xbfbf3e60c45e2399,1 +np.float64,0x7fed8144e57b0289,0x40734382e917a02a,1 +np.float64,0x7fe033fbf7a067f7,0x40733f58bb00b20f,1 +np.float64,0xe3807f45c7010,0xc0733b43379415d1,1 +np.float64,0x3fd708fb342e11f6,0xbfdc670ef9793782,1 +np.float64,0x3fe88c924b311925,0xbfbd78210d9e7164,1 +np.float64,0x3fe0a2a7c7614550,0xbfd22efaf0472c4a,1 +np.float64,0x7fe3a37501a746e9,0x407340aecaeade41,1 +np.float64,0x3fd05077ec20a0f0,0xbfe2fedbf07a5302,1 +np.float64,0x7fd33bf61da677eb,0x40733bb8c58912aa,1 +np.float64,0x3feb29bdae76537b,0xbfb2384a8f61b5f9,1 +np.float64,0x3fec0fc14ff81f83,0xbfad3423e7ade174,1 +np.float64,0x3fd0f8b1a1a1f163,0xbfe2725dd4ccea8b,1 +np.float64,0x3fe382d26a6705a5,0xbfcb80dba4218bdf,1 +np.float64,0x3fa873f2cc30e7e6,0xbff522911cb34279,1 +np.float64,0x7fed7fd7377affad,0x4073438292f6829b,1 +np.float64,0x3feeacd8067d59b0,0xbf92cdbeda94b35e,1 +np.float64,0x7fe464d62228c9ab,0x407340f1eee19aa9,1 +np.float64,0xe997648bd32ed,0xc0733b143aa0fad3,1 +np.float64,0x7fea4869f13490d3,0x407342b5333b54f7,1 +np.float64,0x935b871926b71,0xc0733e47c6683319,1 +np.float64,0x28a9d0c05155,0xc0735a7e3532af83,1 +np.float64,0x79026548f204d,0xc0733fa6339ffa2f,1 +np.float64,0x3fdb1daaabb63b55,0xbfd7de839c240ace,1 +np.float64,0x3fc0db73b421b6e7,0xbfec2c6e36c4f416,1 +np.float64,0xb8b50ac1716b,0xc0734ff9fc60ebce,1 +np.float64,0x7fdf13e0c6be27c1,0x40733f0e44f69437,1 +np.float64,0x3fcd0cb97b3a1973,0xbfe49c34ff531273,1 +np.float64,0x3fcbac034b375807,0xbfe54913d73f180d,1 +np.float64,0x3fe091d2a2e123a5,0xbfd24b290a9218de,1 +np.float64,0xede43627dbc87,0xc0733af3c7c7f716,1 +np.float64,0x7fc037e7ed206fcf,0x407335b85fb0fedb,1 +np.float64,0x3fce7ae4c63cf5ca,0xbfe3f1350fe03f28,1 +np.float64,0x7fcdd862263bb0c3,0x407339f5458bb20e,1 +np.float64,0x4d7adf709af5d,0xc07342bf4edfadb2,1 +np.float64,0xdc6c03f3b8d81,0xc0733b7b74d6a635,1 +np.float64,0x3fe72ae0a4ee55c1,0xbfc1f4665608b21f,1 +np.float64,0xcd62f19d9ac5e,0xc0733bf92235e4d8,1 +np.float64,0xe3a7b8fdc74f7,0xc0733b4204f8e166,1 +np.float64,0x3fdafd35adb5fa6b,0xbfd7ffdca0753b36,1 +np.float64,0x3fa023e8702047d1,0xbff8059150ea1464,1 +np.float64,0x99ff336933fe7,0xc0733df961197517,1 +np.float64,0x7feeb365b9bd66ca,0x407343c995864091,1 +np.float64,0x7fe449b49f689368,0x407340e8aa3369e3,1 +np.float64,0x7faf5843043eb085,0x407330aa700136ca,1 +np.float64,0x3fd47b2922a8f652,0xbfdfab3de86f09ee,1 +np.float64,0x7fd9fc3248b3f864,0x40733dcfea6f9b3e,1 +np.float64,0xe20b0d8dc4162,0xc0733b4ea8fe7b3f,1 +np.float64,0x7feff8e0e23ff1c1,0x40734411c490ed70,1 +np.float64,0x7fa58382d02b0705,0x40732e0cf28e14fe,1 +np.float64,0xb8ad9a1b715b4,0xc0733cb630b8f2d4,1 +np.float64,0xe90abcf1d2158,0xc0733b186b04eeee,1 +np.float64,0x7fd6aa6f32ad54dd,0x40733cdccc636604,1 +np.float64,0x3fd8f84eedb1f09e,0xbfda292909a5298a,1 +np.float64,0x7fecd6b1d9f9ad63,0x4073435a472b05b5,1 +np.float64,0x3fd9f47604b3e8ec,0xbfd915e028cbf4a6,1 +np.float64,0x3fd20d9398241b27,0xbfe19691363dd508,1 +np.float64,0x3fe5ed09bbabda13,0xbfc5043dfc9c8081,1 +np.float64,0x7fbe5265363ca4c9,0x407335406f8e4fac,1 +np.float64,0xac2878af5850f,0xc0733d3311be9786,1 +np.float64,0xac2074555840f,0xc0733d3364970018,1 +np.float64,0x3fcd49b96b3a9373,0xbfe47f24c8181d9c,1 +np.float64,0x3fd10caca6a21959,0xbfe2620ae5594f9a,1 +np.float64,0xec5b87e9d8b71,0xc0733aff499e72ca,1 +np.float64,0x9d5e9fad3abd4,0xc0733dd2d70eeb4a,1 +np.float64,0x7fe3d3a24227a744,0x407340bfc2072fdb,1 +np.float64,0x3fc5f7a77c2bef4f,0xbfe87e69d502d784,1 +np.float64,0x33161a66662c4,0xc07345a436308244,1 +np.float64,0xa27acdc744f5a,0xc0733d99feb3d8ea,1 +np.float64,0x3fe2d9301565b260,0xbfcd6c914e204437,1 +np.float64,0x7fd5d111e12ba223,0x40733c98e14a6fd0,1 +np.float64,0x6c3387bed8672,0xc073406d3648171a,1 +np.float64,0x24d89fe849b15,0xc07347e97bec008c,1 +np.float64,0x3fefd763677faec7,0xbf61ae69caa9cad9,1 +np.float64,0x7fe0a4684ba148d0,0x40733f884d32c464,1 +np.float64,0x3fd5c3c939ab8792,0xbfddfaaefc1c7fca,1 +np.float64,0x3fec9b87a6b9370f,0xbfa8eb34efcc6b9b,1 +np.float64,0x3feb062431f60c48,0xbfb2ca6036698877,1 +np.float64,0x3fef97f6633f2fed,0xbf76bc742860a340,1 +np.float64,0x74477490e88ef,0xc0733fed220986bc,1 +np.float64,0x3fe4bea67ce97d4d,0xbfc818525292b0f6,1 +np.float64,0x3fc6add3a92d5ba7,0xbfe80cfdc9a90bda,1 +np.float64,0x847c9ce308f94,0xc0733f05026f5965,1 +np.float64,0x7fea53fd2eb4a7f9,0x407342b841fc4723,1 +np.float64,0x3fc55a16fc2ab42e,0xbfe8e3849130da34,1 +np.float64,0x3fbdf7d07c3befa1,0xbfedcf84b9c6c161,1 +np.float64,0x3fe5fb25aa6bf64b,0xbfc4e083ff96b116,1 +np.float64,0x61c776a8c38ef,0xc0734121611d84d7,1 +np.float64,0x3fec413164f88263,0xbfabadbd05131546,1 +np.float64,0x9bf06fe137e0e,0xc0733de315469ee0,1 +np.float64,0x2075eefc40ebf,0xc07348cae84de924,1 +np.float64,0x3fdd42e0143a85c0,0xbfd5c0b6f60b3cea,1 +np.float64,0xdbb1ab45b7636,0xc0733b8157329daf,1 +np.float64,0x3feac6d56bf58dab,0xbfb3d00771b28621,1 +np.float64,0x7fb2dc825025b904,0x407331f3e950751a,1 +np.float64,0x3fecea6efd79d4de,0xbfa689309cc0e3fe,1 +np.float64,0x3fd83abec7b0757e,0xbfdaff5c674a9c59,1 +np.float64,0x3fd396f7c0272df0,0xbfe073ee75c414ba,1 +np.float64,0x3fe10036c162006e,0xbfd1945a38342ae1,1 +np.float64,0x3fd5bbded52b77be,0xbfde04cca40d4156,1 +np.float64,0x3fe870945ab0e129,0xbfbdf72f0e6206fa,1 +np.float64,0x3fef72fddcbee5fc,0xbf7ee2dba88b1bad,1 +np.float64,0x4e111aa09c224,0xc07342b1e2b29643,1 +np.float64,0x3fd926d8b5b24db1,0xbfd9f58b78d6b061,1 +np.float64,0x3fc55679172aacf2,0xbfe8e5df687842e2,1 +np.float64,0x7f5f1749803e2e92,0x40731886e16cfc4d,1 +np.float64,0x7fea082b53b41056,0x407342a42227700e,1 +np.float64,0x3fece1d1d039c3a4,0xbfa6cb780988a469,1 +np.float64,0x3b2721d8764e5,0xc073449f6a5a4832,1 +np.float64,0x365cb7006cba,0xc0735879ba5f0b6e,1 +np.float64,0x7ff4000000000000,0x7ffc000000000000,1 +np.float64,0x7fe606ce92ac0d9c,0x4073417aeebe97e8,1 +np.float64,0x3fe237b544a46f6b,0xbfcf50f8f76d7df9,1 +np.float64,0x3fe7265e5eee4cbd,0xbfc1ff39089ec8d0,1 +np.float64,0x7fe2bb3c5ea57678,0x4073405aaad81cf2,1 +np.float64,0x3fd811df84b023bf,0xbfdb2e670ea8d8de,1 +np.float64,0x3f6a0efd00341dfa,0xc003fac1ae831241,1 +np.float64,0x3fd0d214afa1a429,0xbfe2922080a91c72,1 +np.float64,0x3feca6a350b94d47,0xbfa894eea3a96809,1 +np.float64,0x7fe23e5c76247cb8,0x4073402bbaaf71c7,1 +np.float64,0x3fe739a1fdae7344,0xbfc1d109f66efb5d,1 +np.float64,0x3fdf4b8e283e971c,0xbfd3e28f46169cc5,1 +np.float64,0x38f2535271e4b,0xc07344e3085219fa,1 +np.float64,0x7fd263a0f9a4c741,0x40733b68d945dae0,1 +np.float64,0x7fdd941863bb2830,0x40733eb651e3dca9,1 +np.float64,0xace7279159ce5,0xc0733d2b63b5947e,1 +np.float64,0x7fe34670b2268ce0,0x4073408d92770cb5,1 +np.float64,0x7fd11fa6dfa23f4d,0x40733aea02e76ea3,1 +np.float64,0x3fe6d9cbca6db398,0xbfc2b84b5c8c7eab,1 +np.float64,0x3fd69a0274ad3405,0xbfdcee3c7e52c463,1 +np.float64,0x3feb5af671f6b5ed,0xbfb16f88d739477f,1 +np.float64,0x3feea400163d4800,0xbf934e071c64fd0b,1 +np.float64,0x3fefd6bcf17fad7a,0xbf61f711c392b119,1 +np.float64,0x3fe148d43da291a8,0xbfd11e9cd3f91cd3,1 +np.float64,0x7fedf1308b7be260,0x4073439d135656da,1 +np.float64,0x3fe614c99c6c2993,0xbfc49fd1984dfd6d,1 +np.float64,0xd6e8d4e5add1b,0xc0733ba88256026e,1 +np.float64,0xfff0000000000000,0x7ff8000000000000,1 +np.float64,0x3fb530b5562a616b,0xbff1504bcc5c8f73,1 +np.float64,0xb7da68396fb4d,0xc0733cbe2790f52e,1 +np.float64,0x7fad78e26c3af1c4,0x4073303cdbfb0a15,1 +np.float64,0x7fee5698447cad30,0x407343b474573a8b,1 +np.float64,0x3fd488325c291065,0xbfdf999296d901e7,1 +np.float64,0x2669283a4cd26,0xc073479f823109a4,1 +np.float64,0x7fef3b090afe7611,0x407343e805a3b264,1 +np.float64,0x7fe8b96ae0f172d5,0x4073424874a342ab,1 +np.float64,0x7fef409f56fe813e,0x407343e943c3cd44,1 +np.float64,0x3fed28073dfa500e,0xbfa4b17e4cd31a3a,1 +np.float64,0x7f87ecc4802fd988,0x40732527e027b24b,1 +np.float64,0x3fdda24da0bb449b,0xbfd566a43ac035af,1 +np.float64,0x179fc9e62f3fa,0xc0734b0028c80fc1,1 +np.float64,0x3fef85b0927f0b61,0xbf7ac27565d5ab4f,1 +np.float64,0x5631501aac62b,0xc0734201be12c5d4,1 +np.float64,0x3fd782e424af05c8,0xbfdbd57544f8a7c3,1 +np.float64,0x3fe603a9a6ac0753,0xbfc4caff04dc3caf,1 +np.float64,0x7fbd5225163aa449,0x40733504b88f0a56,1 +np.float64,0x3fecd27506b9a4ea,0xbfa741dd70e6b08c,1 +np.float64,0x9c99603b3932c,0xc0733ddb922dc5db,1 +np.float64,0x3fbeb57f1a3d6afe,0xbfed789ff217aa08,1 +np.float64,0x3fef9c0f85bf381f,0xbf75d5c3d6cb281a,1 +np.float64,0x3fde4afb613c95f7,0xbfd4ca2a231c9005,1 +np.float64,0x396233d472c47,0xc07344d56ee70631,1 +np.float64,0x3fb31ea1c6263d44,0xbff207356152138d,1 +np.float64,0x3fe50bdf78aa17bf,0xbfc74ae0cbffb735,1 +np.float64,0xef74c701dee99,0xc0733ae81e4bb443,1 +np.float64,0x9a3e13a1347c3,0xc0733df68b60afc7,1 +np.float64,0x33ba4f886774b,0xc073458e03f0c13e,1 +np.float64,0x3fe8ba0e9931741d,0xbfbcaadf974e8f64,1 +np.float64,0x3fe090a4cd61214a,0xbfd24d236cf365d6,1 +np.float64,0x7fd87d992930fb31,0x40733d668b73b820,1 +np.float64,0x3fe6422b296c8456,0xbfc42e070b695d01,1 +np.float64,0x3febe9334677d267,0xbfae667864606cfe,1 +np.float64,0x771a3ce4ee348,0xc0733fc274d12c97,1 +np.float64,0x3fe0413542e0826b,0xbfd2d3b08fb5b8a6,1 +np.float64,0x3fd00870ea2010e2,0xbfe33cc04cbd42e0,1 +np.float64,0x3fe74fb817ae9f70,0xbfc19c45dbf919e1,1 +np.float64,0x40382fa08071,0xc07357514ced5577,1 +np.float64,0xa14968474292d,0xc0733da71a990f3a,1 +np.float64,0x5487c740a90fa,0xc0734224622d5801,1 +np.float64,0x3fed7d8d14fafb1a,0xbfa228f7ecc2ac03,1 +np.float64,0x3fe39bb485e73769,0xbfcb3a235a722960,1 +np.float64,0x3fd01090b2202121,0xbfe335b752589a22,1 +np.float64,0x3fd21a3e7da4347d,0xbfe18cd435a7c582,1 +np.float64,0x3fe7fa855a2ff50b,0xbfc00ab0665709fe,1 +np.float64,0x3fedc0d4577b81a9,0xbfa02fef3ff553fc,1 +np.float64,0x3fe99d4906333a92,0xbfb8bf18220e5e8e,1 +np.float64,0x3fd944ee3c3289dc,0xbfd9d46071675e73,1 +np.float64,0x3fe3ed8d52e7db1b,0xbfca53f8d4aef484,1 +np.float64,0x7fe748623a6e90c3,0x407341dd97c9dd79,1 +np.float64,0x3fea1b4b98343697,0xbfb6a1560a56927f,1 +np.float64,0xe1215715c242b,0xc0733b55dbf1f0a8,1 +np.float64,0x3fd0d5bccca1ab7a,0xbfe28f1b66d7a470,1 +np.float64,0x881a962710353,0xc0733ed51848a30d,1 +np.float64,0x3fcf022afe3e0456,0xbfe3b40eabf24501,1 +np.float64,0x3fdf1ac6bbbe358d,0xbfd40e03e888288d,1 +np.float64,0x3fa51a5eac2a34bd,0xbff628a7c34d51b3,1 +np.float64,0x3fdbaf408d375e81,0xbfd74ad39d97c92a,1 +np.float64,0x3fcd2418ea3a4832,0xbfe4910b009d8b11,1 +np.float64,0x3fc7b3062a2f660c,0xbfe7706dc47993e1,1 +np.float64,0x7fb8232218304643,0x407333aaa7041a9f,1 +np.float64,0x7fd5f186362be30b,0x40733ca32fdf9cc6,1 +np.float64,0x3fe57ef1d6aafde4,0xbfc61e23d00210c7,1 +np.float64,0x7c6830baf8d07,0xc0733f74f19e9dad,1 +np.float64,0xcacbfd5595980,0xc0733c0fb49edca7,1 +np.float64,0x3fdfdeac873fbd59,0xbfd36114c56bed03,1 +np.float64,0x3fd31f0889263e11,0xbfe0ca0cc1250169,1 +np.float64,0x3fe839fbe47073f8,0xbfbef0a2abc3d63f,1 +np.float64,0x3fc36af57e26d5eb,0xbfea3553f38770b7,1 +np.float64,0x3fe73dbc44ee7b79,0xbfc1c738f8fa6b3d,1 +np.float64,0x3fd3760e4da6ec1d,0xbfe08b5b609d11e5,1 +np.float64,0x3fee1cfa297c39f4,0xbf9b06d081bc9d5b,1 +np.float64,0xdfb01561bf61,0xc0734ea55e559888,1 +np.float64,0x687bd01cd0f7b,0xc07340ab67fe1816,1 +np.float64,0x3fefc88f4cbf911f,0xbf6828c359cf19dc,1 +np.float64,0x8ad34adb15a6a,0xc0733eb1e03811e5,1 +np.float64,0x3fe2b49c12e56938,0xbfcdd8dbdbc0ce59,1 +np.float64,0x6e05037adc0a1,0xc073404f91261635,1 +np.float64,0x3fe2fd737fe5fae7,0xbfcd020407ef4d78,1 +np.float64,0x3fd0f3c0dc21e782,0xbfe2766a1ab02eae,1 +np.float64,0x28564d9850acb,0xc073474875f87c5e,1 +np.float64,0x3fe4758015a8eb00,0xbfc8ddb45134a1bd,1 +np.float64,0x7fe7f19306efe325,0x4073420f626141a7,1 +np.float64,0x7fd27f34c0a4fe69,0x40733b733d2a5b50,1 +np.float64,0x92c2366325847,0xc0733e4f04f8195a,1 +np.float64,0x3fc21f8441243f09,0xbfeb2ad23bc1ab0b,1 +np.float64,0x3fc721d3e42e43a8,0xbfe7c69bb47b40c2,1 +np.float64,0x3fe2f11a1625e234,0xbfcd26363b9c36c3,1 +np.float64,0x3fdcb585acb96b0b,0xbfd648446237cb55,1 +np.float64,0x3fd4060bf2280c18,0xbfe025fd4c8a658b,1 +np.float64,0x7fb8ae2750315c4e,0x407333d23b025d08,1 +np.float64,0x3fe3a03119a74062,0xbfcb2d6c91b38552,1 +np.float64,0x7fdd2af92bba55f1,0x40733e9d737e16e6,1 +np.float64,0x3fe50b05862a160b,0xbfc74d20815fe36b,1 +np.float64,0x164409f82c882,0xc0734b6980e19c03,1 +np.float64,0x3fe4093712a8126e,0xbfca070367fda5e3,1 +np.float64,0xae3049935c609,0xc0733d1e3608797b,1 +np.float64,0x3fd71df4b4ae3be9,0xbfdc4dcb7637600d,1 +np.float64,0x7fca01e8023403cf,0x407339006c521c49,1 +np.float64,0x3fb0c5c43e218b88,0xbff2f03211c63f25,1 +np.float64,0x3fee757af83ceaf6,0xbf95f33a6e56b454,1 +np.float64,0x3f865f1f402cbe3f,0xbfff62d9c9072bd7,1 +np.float64,0x89864e95130ca,0xc0733ec29f1e32c6,1 +np.float64,0x3fe51482bcea2905,0xbfc73414ddc8f1b7,1 +np.float64,0x7fd802f8fa3005f1,0x40733d43684e460a,1 +np.float64,0x3fbeb86ca63d70d9,0xbfed774ccca9b8f5,1 +np.float64,0x3fb355dcc826abba,0xbff1f33f9339e7a3,1 +np.float64,0x3fe506c61eaa0d8c,0xbfc7585a3f7565a6,1 +np.float64,0x7fe393f25ba727e4,0x407340a94bcea73b,1 +np.float64,0xf66f532decdeb,0xc0733ab5041feb0f,1 +np.float64,0x3fe26e872be4dd0e,0xbfceaaab466f32e0,1 +np.float64,0x3fefd9e290bfb3c5,0xbf60977d24496295,1 +np.float64,0x7fe19c5f692338be,0x40733fecef53ad95,1 +np.float64,0x3fe80365ab3006cb,0xbfbfec4090ef76ec,1 +np.float64,0x3fe88ab39eb11567,0xbfbd8099388d054d,1 +np.float64,0x3fe68fb09fad1f61,0xbfc36db9de38c2c0,1 +np.float64,0x3fe9051883b20a31,0xbfbb5b75b8cb8f24,1 +np.float64,0x3fd4708683a8e10d,0xbfdfb9b085dd8a83,1 +np.float64,0x3fe00ac11a601582,0xbfd3316af3e43500,1 +np.float64,0xd16af30ba2d5f,0xc0733bd68e8252f9,1 +np.float64,0x3fb97d654632facb,0xbff007ac1257f575,1 +np.float64,0x7fd637c10fac6f81,0x40733cb949d76546,1 +np.float64,0x7fed2cab6dba5956,0x4073436edfc3764e,1 +np.float64,0x3fed04afbbba095f,0xbfa5bfaa5074b7f4,1 +np.float64,0x0,0xfff0000000000000,1 +np.float64,0x389a1dc671345,0xc07344edd4206338,1 +np.float64,0x3fbc9ba25a393745,0xbfee74c34f49b921,1 +np.float64,0x3feee749947dce93,0xbf8f032d9cf6b5ae,1 +np.float64,0xedc4cf89db89a,0xc0733af4b2a57920,1 +np.float64,0x3fe41629eba82c54,0xbfc9e321faf79e1c,1 +np.float64,0x3feb0bcbf7b61798,0xbfb2b31e5d952869,1 +np.float64,0xad60654b5ac0d,0xc0733d26860df676,1 +np.float64,0x3fe154e1ff22a9c4,0xbfd10b416e58c867,1 +np.float64,0x7fb20e9c8a241d38,0x407331a66453b8bc,1 +np.float64,0x7fcbbaaf7d37755e,0x4073397274f28008,1 +np.float64,0x187d0fbc30fa3,0xc0734ac03cc98cc9,1 +np.float64,0x7fd153afeaa2a75f,0x40733aff00b4311d,1 +np.float64,0x3fe05310a5e0a621,0xbfd2b5386aeecaac,1 +np.float64,0x7fea863b2b750c75,0x407342c57807f700,1 +np.float64,0x3fed5f0c633abe19,0xbfa30f6cfbc4bf94,1 +np.float64,0xf227c8b3e44f9,0xc0733ad42daaec9f,1 +np.float64,0x3fe956524772aca5,0xbfb9f4cabed7081d,1 +np.float64,0xefd11af7dfa24,0xc0733ae570ed2552,1 +np.float64,0x1690fff02d221,0xc0734b51a56c2980,1 +np.float64,0x7fd2e547a825ca8e,0x40733b992d6d9635,1 diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/data/umath-validation-set-log1p.csv b/.venv/lib/python3.12/site-packages/numpy/_core/tests/data/umath-validation-set-log1p.csv new file mode 100644 index 0000000..094e052 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/tests/data/umath-validation-set-log1p.csv @@ -0,0 +1,1429 @@ +dtype,input,output,ulperrortol +np.float32,0x3e10aca8,0x3e075347,2 +np.float32,0x3f776e66,0x3f2d2003,2 +np.float32,0xbf34e8ce,0xbf9cfd5c,2 +np.float32,0xbf0260ee,0xbf363f69,2 +np.float32,0x3ed285e8,0x3eb05870,2 +np.float32,0x262b88,0x262b88,2 +np.float32,0x3eeffd6c,0x3ec4cfdb,2 +np.float32,0x3ee86808,0x3ebf9f54,2 +np.float32,0x3f36eba8,0x3f0a0524,2 +np.float32,0xbf1c047a,0xbf70afc7,2 +np.float32,0x3ead2916,0x3e952902,2 +np.float32,0x61c9c9,0x61c9c9,2 +np.float32,0xff7fffff,0xffc00000,2 +np.float32,0x7f64ee52,0x42b138e0,2 +np.float32,0x7ed00b1e,0x42afa4ff,2 +np.float32,0x3db53340,0x3dada0b2,2 +np.float32,0x3e6b0a4a,0x3e5397a4,2 +np.float32,0x7ed5d64f,0x42afb310,2 +np.float32,0xbf12bc5f,0xbf59f5ee,2 +np.float32,0xbda12710,0xbda7d8b5,2 +np.float32,0xbe2e89d8,0xbe3f5a9f,2 +np.float32,0x3f5bee75,0x3f1ebea4,2 +np.float32,0x9317a,0x9317a,2 +np.float32,0x7ee00130,0x42afcad8,2 +np.float32,0x7ef0d16d,0x42afefe7,2 +np.float32,0xbec7463a,0xbefc6a44,2 +np.float32,0xbf760ecc,0xc04fe59c,2 +np.float32,0xbecacb3c,0xbf011ae3,2 +np.float32,0x3ead92be,0x3e9577f0,2 +np.float32,0xbf41510d,0xbfb41b3a,2 +np.float32,0x7f71d489,0x42b154f1,2 +np.float32,0x8023bcd5,0x8023bcd5,2 +np.float32,0x801d33d8,0x801d33d8,2 +np.float32,0x3f3f545d,0x3f0ee0d4,2 +np.float32,0xbf700682,0xc0318c25,2 +np.float32,0xbe54e990,0xbe6eb0a3,2 +np.float32,0x7f0289bf,0x42b01941,2 +np.float32,0xbd61ac90,0xbd682113,2 +np.float32,0xbf2ff310,0xbf94cd6f,2 +np.float32,0x7f10064a,0x42b04b98,2 +np.float32,0x804d0d6d,0x804d0d6d,2 +np.float32,0x80317b0a,0x80317b0a,2 +np.float32,0xbddfef18,0xbded2640,2 +np.float32,0x3f00c9ab,0x3ed0a5bd,2 +np.float32,0x7f04b905,0x42b021c1,2 +np.float32,0x7fc00000,0x7fc00000,2 +np.float32,0x6524c4,0x6524c4,2 +np.float32,0x3da08ae0,0x3d9a8f88,2 +np.float32,0x293ea9,0x293ea9,2 +np.float32,0x71499e,0x71499e,2 +np.float32,0xbf14f54d,0xbf5f38a5,2 +np.float32,0x806e60f5,0x806e60f5,2 +np.float32,0x3f5f34bb,0x3f207fff,2 +np.float32,0x80513427,0x80513427,2 +np.float32,0x7f379670,0x42b0c7dc,2 +np.float32,0x3efba888,0x3eccb20b,2 +np.float32,0x3eeadd1b,0x3ec14f4b,2 +np.float32,0x7ec5a27f,0x42af8ab8,2 +np.float32,0x3f2afe4e,0x3f02f7a2,2 +np.float32,0x5591c8,0x5591c8,2 +np.float32,0x3dbb7240,0x3db35bab,2 +np.float32,0x805b911b,0x805b911b,2 +np.float32,0x800000,0x800000,2 +np.float32,0x7e784c04,0x42ae9cab,2 +np.float32,0x7ebaae14,0x42af6d86,2 +np.float32,0xbec84f7a,0xbefe1d42,2 +np.float32,0x7cea8281,0x42aa56bf,2 +np.float32,0xbf542cf6,0xbfe1eb1b,2 +np.float32,0xbf6bfb13,0xc0231a5b,2 +np.float32,0x7d6eeaef,0x42abc32c,2 +np.float32,0xbf062f6b,0xbf3e2000,2 +np.float32,0x8073d8e9,0x8073d8e9,2 +np.float32,0xbea4db14,0xbec6f485,2 +np.float32,0x7d7e8d62,0x42abe3a0,2 +np.float32,0x7e8fc34e,0x42aee7c6,2 +np.float32,0x7dcbb0c3,0x42acd464,2 +np.float32,0x7e123c,0x7e123c,2 +np.float32,0x3d77af62,0x3d707c34,2 +np.float32,0x498cc8,0x498cc8,2 +np.float32,0x7f4e2206,0x42b1032a,2 +np.float32,0x3f734e0a,0x3f2b04a1,2 +np.float32,0x8053a9d0,0x8053a9d0,2 +np.float32,0xbe8a67e0,0xbea15be9,2 +np.float32,0xbf78e0ea,0xc065409e,2 +np.float32,0x352bdd,0x352bdd,2 +np.float32,0x3ee42be7,0x3ebcb38a,2 +np.float32,0x7f482d10,0x42b0f427,2 +np.float32,0xbf23155e,0xbf81b993,2 +np.float32,0x594920,0x594920,2 +np.float32,0x63f53f,0x63f53f,2 +np.float32,0x363592,0x363592,2 +np.float32,0x7dafbb78,0x42ac88cc,2 +np.float32,0x7f69516c,0x42b14298,2 +np.float32,0x3e1d5be2,0x3e126131,2 +np.float32,0x410c23,0x410c23,2 +np.float32,0x7ec9563c,0x42af9439,2 +np.float32,0xbedd3a0e,0xbf10d705,2 +np.float32,0x7f7c4f1f,0x42b16aa8,2 +np.float32,0xbe99b34e,0xbeb6c2d3,2 +np.float32,0x6cdc84,0x6cdc84,2 +np.float32,0x5b3bbe,0x5b3bbe,2 +np.float32,0x252178,0x252178,2 +np.float32,0x7d531865,0x42ab83c8,2 +np.float32,0xbf565b44,0xbfe873bf,2 +np.float32,0x5977ce,0x5977ce,2 +np.float32,0x588a58,0x588a58,2 +np.float32,0x3eae7054,0x3e961d51,2 +np.float32,0x725049,0x725049,2 +np.float32,0x7f2b9386,0x42b0a538,2 +np.float32,0xbe674714,0xbe831245,2 +np.float32,0x8044f0d8,0x8044f0d8,2 +np.float32,0x800a3c21,0x800a3c21,2 +np.float32,0x807b275b,0x807b275b,2 +np.float32,0xbf2463b6,0xbf83896e,2 +np.float32,0x801cca42,0x801cca42,2 +np.float32,0xbf28f2d0,0xbf8a121a,2 +np.float32,0x3f4168c2,0x3f1010ce,2 +np.float32,0x6f91a1,0x6f91a1,2 +np.float32,0xbf2b9eeb,0xbf8e0fc5,2 +np.float32,0xbea4c858,0xbec6d8e4,2 +np.float32,0xbf7abba0,0xc0788e88,2 +np.float32,0x802f18f7,0x802f18f7,2 +np.float32,0xbf7f6c75,0xc0c3145c,2 +np.float32,0xbe988210,0xbeb50f5e,2 +np.float32,0xbf219b7e,0xbf7f6a3b,2 +np.float32,0x7f800000,0x7f800000,2 +np.float32,0x7f7fffff,0x42b17218,2 +np.float32,0xbdca8d90,0xbdd5487e,2 +np.float32,0xbef683b0,0xbf2821b0,2 +np.float32,0x8043e648,0x8043e648,2 +np.float32,0xbf4319a4,0xbfb7cd1b,2 +np.float32,0x62c2b2,0x62c2b2,2 +np.float32,0xbf479ccd,0xbfc1a7b1,2 +np.float32,0x806c8a32,0x806c8a32,2 +np.float32,0x7f004447,0x42b01045,2 +np.float32,0x3f737d36,0x3f2b1ccf,2 +np.float32,0x3ee71f24,0x3ebebced,2 +np.float32,0x3ea0b6b4,0x3e8bc606,2 +np.float32,0x358fd7,0x358fd7,2 +np.float32,0xbe69780c,0xbe847d17,2 +np.float32,0x7f6bed18,0x42b14849,2 +np.float32,0xbf6a5113,0xc01dfe1d,2 +np.float32,0xbf255693,0xbf84de88,2 +np.float32,0x7f34acac,0x42b0bfac,2 +np.float32,0xbe8a3b6a,0xbea11efe,2 +np.float32,0x3f470d84,0x3f1342ab,2 +np.float32,0xbf2cbde3,0xbf8fc602,2 +np.float32,0x47c103,0x47c103,2 +np.float32,0xe3c94,0xe3c94,2 +np.float32,0xbec07afa,0xbef1693a,2 +np.float32,0x6a9cfe,0x6a9cfe,2 +np.float32,0xbe4339e0,0xbe5899da,2 +np.float32,0x7ea9bf1e,0x42af3cd6,2 +np.float32,0x3f6378b4,0x3f22c4c4,2 +np.float32,0xbd989ff0,0xbd9e9c77,2 +np.float32,0xbe6f2f50,0xbe88343d,2 +np.float32,0x3f7f2ac5,0x3f310764,2 +np.float32,0x3f256704,0x3eff2fb2,2 +np.float32,0x80786aca,0x80786aca,2 +np.float32,0x65d02f,0x65d02f,2 +np.float32,0x50d1c3,0x50d1c3,2 +np.float32,0x3f4a9d76,0x3f1541b4,2 +np.float32,0x802cf491,0x802cf491,2 +np.float32,0x3e935cec,0x3e81829b,2 +np.float32,0x3e2ad478,0x3e1dfd81,2 +np.float32,0xbf107cbd,0xbf54bef2,2 +np.float32,0xbf58c02e,0xbff007fe,2 +np.float32,0x80090808,0x80090808,2 +np.float32,0x805d1f66,0x805d1f66,2 +np.float32,0x6aec95,0x6aec95,2 +np.float32,0xbee3fc6e,0xbf16dc73,2 +np.float32,0x7f63314b,0x42b134f9,2 +np.float32,0x550443,0x550443,2 +np.float32,0xbefa8174,0xbf2c026e,2 +np.float32,0x3f7fb380,0x3f314bd5,2 +np.float32,0x80171f2c,0x80171f2c,2 +np.float32,0x3f2f56ae,0x3f058f2d,2 +np.float32,0x3eacaecb,0x3e94cd97,2 +np.float32,0xbe0c4f0c,0xbe16e69d,2 +np.float32,0x3f48e4cb,0x3f144b42,2 +np.float32,0x7f03efe2,0x42b01eb7,2 +np.float32,0xbf1019ac,0xbf53dbe9,2 +np.float32,0x3e958524,0x3e832eb5,2 +np.float32,0xbf1b23c6,0xbf6e72f2,2 +np.float32,0x12c554,0x12c554,2 +np.float32,0x7dee588c,0x42ad24d6,2 +np.float32,0xbe8c216c,0xbea3ba70,2 +np.float32,0x804553cb,0x804553cb,2 +np.float32,0xbe446324,0xbe5a0966,2 +np.float32,0xbef7150a,0xbf28adff,2 +np.float32,0xbf087282,0xbf42ec6e,2 +np.float32,0x3eeef15c,0x3ec41937,2 +np.float32,0x61bbd2,0x61bbd2,2 +np.float32,0x3e51b28d,0x3e3ec538,2 +np.float32,0x57e869,0x57e869,2 +np.float32,0x7e5e7711,0x42ae646c,2 +np.float32,0x8050b173,0x8050b173,2 +np.float32,0xbf63c90c,0xc00d2438,2 +np.float32,0xbeba774c,0xbee7dcf8,2 +np.float32,0x8016faac,0x8016faac,2 +np.float32,0xbe8b448c,0xbea28aaf,2 +np.float32,0x3e8cd448,0x3e78d29e,2 +np.float32,0x80484e02,0x80484e02,2 +np.float32,0x3f63ba68,0x3f22e78c,2 +np.float32,0x2e87bb,0x2e87bb,2 +np.float32,0x230496,0x230496,2 +np.float32,0x1327b2,0x1327b2,2 +np.float32,0xbf046c56,0xbf3a72d2,2 +np.float32,0x3ecefe60,0x3eadd69a,2 +np.float32,0x49c56e,0x49c56e,2 +np.float32,0x3df22d60,0x3de4e550,2 +np.float32,0x3f67c19d,0x3f250707,2 +np.float32,0x3f20eb9c,0x3ef9b624,2 +np.float32,0x3f05ca75,0x3ed742fa,2 +np.float32,0xbe8514f8,0xbe9a1d45,2 +np.float32,0x8070a003,0x8070a003,2 +np.float32,0x7e49650e,0x42ae317a,2 +np.float32,0x3de16ce9,0x3dd5dc3e,2 +np.float32,0xbf4ae952,0xbfc95f1f,2 +np.float32,0xbe44dd84,0xbe5aa0db,2 +np.float32,0x803c3bc0,0x803c3bc0,2 +np.float32,0x3eebb9e8,0x3ec1e692,2 +np.float32,0x80588275,0x80588275,2 +np.float32,0xbea1e69a,0xbec29d86,2 +np.float32,0x3f7b4bf8,0x3f2f154c,2 +np.float32,0x7eb47ecc,0x42af5c46,2 +np.float32,0x3d441e00,0x3d3f911a,2 +np.float32,0x7f54d40e,0x42b11388,2 +np.float32,0xbf47f17e,0xbfc26882,2 +np.float32,0x3ea7da57,0x3e912db4,2 +np.float32,0x3f59cc7b,0x3f1d984e,2 +np.float32,0x570e08,0x570e08,2 +np.float32,0x3e99560c,0x3e8620a2,2 +np.float32,0x3ecfbd14,0x3eae5e55,2 +np.float32,0x7e86be08,0x42aec698,2 +np.float32,0x3f10f28a,0x3ee5b5d3,2 +np.float32,0x7f228722,0x42b0897a,2 +np.float32,0x3f4b979b,0x3f15cd30,2 +np.float32,0xbf134283,0xbf5b30f9,2 +np.float32,0x3f2ae16a,0x3f02e64f,2 +np.float32,0x3e98e158,0x3e85c6cc,2 +np.float32,0x7ec39f27,0x42af857a,2 +np.float32,0x3effedb0,0x3ecf8cea,2 +np.float32,0xbd545620,0xbd5a09c1,2 +np.float32,0x503a28,0x503a28,2 +np.float32,0x3f712744,0x3f29e9a1,2 +np.float32,0x3edc6194,0x3eb748b1,2 +np.float32,0xbf4ec1e5,0xbfd2ff5f,2 +np.float32,0x3f46669e,0x3f12e4b5,2 +np.float32,0xabad3,0xabad3,2 +np.float32,0x80000000,0x80000000,2 +np.float32,0x803f2e6d,0x803f2e6d,2 +np.float32,0xbf431542,0xbfb7c3e6,2 +np.float32,0x3f6f2d53,0x3f28e496,2 +np.float32,0x546bd8,0x546bd8,2 +np.float32,0x25c80a,0x25c80a,2 +np.float32,0x3e50883c,0x3e3dcd7e,2 +np.float32,0xbf5fa2ba,0xc0045c14,2 +np.float32,0x80271c07,0x80271c07,2 +np.float32,0x8043755d,0x8043755d,2 +np.float32,0xbf3c5cea,0xbfaa5ee9,2 +np.float32,0x3f2fea38,0x3f05e6af,2 +np.float32,0x6da3dc,0x6da3dc,2 +np.float32,0xbf095945,0xbf44dc70,2 +np.float32,0xbe33d584,0xbe45c1f5,2 +np.float32,0x7eb41b2e,0x42af5b2b,2 +np.float32,0xbf0feb74,0xbf537242,2 +np.float32,0xbe96225a,0xbeb1b0b1,2 +np.float32,0x3f63b95f,0x3f22e700,2 +np.float32,0x0,0x0,2 +np.float32,0x3e20b0cc,0x3e154374,2 +np.float32,0xbf79880c,0xc06b6801,2 +np.float32,0xbea690b6,0xbec97b93,2 +np.float32,0xbf3e11ca,0xbfada449,2 +np.float32,0x7e7e6292,0x42aea912,2 +np.float32,0x3e793350,0x3e5f0b7b,2 +np.float32,0x802e7183,0x802e7183,2 +np.float32,0x3f1b3695,0x3ef2a788,2 +np.float32,0x801efa20,0x801efa20,2 +np.float32,0x3f1ec43a,0x3ef70f42,2 +np.float32,0xbf12c5ed,0xbf5a0c52,2 +np.float32,0x8005e99c,0x8005e99c,2 +np.float32,0xbf79f5e7,0xc06fcca5,2 +np.float32,0x3ecbaf50,0x3eab7a03,2 +np.float32,0x46b0fd,0x46b0fd,2 +np.float32,0x3edb9023,0x3eb6b631,2 +np.float32,0x7f24bc41,0x42b09063,2 +np.float32,0xbd8d9328,0xbd92b4c6,2 +np.float32,0x3f2c5d7f,0x3f03c9d9,2 +np.float32,0x807bebc9,0x807bebc9,2 +np.float32,0x7f797a99,0x42b164e2,2 +np.float32,0x756e3c,0x756e3c,2 +np.float32,0x80416f8a,0x80416f8a,2 +np.float32,0x3e0d512a,0x3e04611a,2 +np.float32,0x3f7be3e6,0x3f2f61ec,2 +np.float32,0x80075c41,0x80075c41,2 +np.float32,0xbe850294,0xbe9a046c,2 +np.float32,0x684679,0x684679,2 +np.float32,0x3eb393c4,0x3e99eed2,2 +np.float32,0x3f4177c6,0x3f10195b,2 +np.float32,0x3dd1f402,0x3dc7dfe5,2 +np.float32,0x3ef484d4,0x3ec7e2e1,2 +np.float32,0x53eb8f,0x53eb8f,2 +np.float32,0x7f072cb6,0x42b02b20,2 +np.float32,0xbf1b6b55,0xbf6f28d4,2 +np.float32,0xbd8a98d8,0xbd8f827d,2 +np.float32,0x3eafb418,0x3e970e96,2 +np.float32,0x6555af,0x6555af,2 +np.float32,0x7dd5118e,0x42aceb6f,2 +np.float32,0x800a13f7,0x800a13f7,2 +np.float32,0x331a9d,0x331a9d,2 +np.float32,0x8063773f,0x8063773f,2 +np.float32,0x3e95e068,0x3e837553,2 +np.float32,0x80654b32,0x80654b32,2 +np.float32,0x3dabe0e0,0x3da50bb3,2 +np.float32,0xbf6283c3,0xc00a5280,2 +np.float32,0x80751cc5,0x80751cc5,2 +np.float32,0x3f668eb6,0x3f2465c0,2 +np.float32,0x3e13c058,0x3e0a048c,2 +np.float32,0x77780c,0x77780c,2 +np.float32,0x3f7d6e48,0x3f302868,2 +np.float32,0x7e31f9e3,0x42adf22f,2 +np.float32,0x246c7b,0x246c7b,2 +np.float32,0xbe915bf0,0xbeaafa6c,2 +np.float32,0xbf800000,0xff800000,2 +np.float32,0x3f698f42,0x3f25f8e0,2 +np.float32,0x7e698885,0x42ae7d48,2 +np.float32,0x3f5bbd42,0x3f1ea42c,2 +np.float32,0x5b8444,0x5b8444,2 +np.float32,0xbf6065f6,0xc005e2c6,2 +np.float32,0xbeb95036,0xbee60dad,2 +np.float32,0xbf44f846,0xbfbbcade,2 +np.float32,0xc96e5,0xc96e5,2 +np.float32,0xbf213e90,0xbf7e6eae,2 +np.float32,0xbeb309cc,0xbedc4fe6,2 +np.float32,0xbe781cf4,0xbe8e0fe6,2 +np.float32,0x7f0cf0db,0x42b04083,2 +np.float32,0xbf7b6143,0xc08078f9,2 +np.float32,0x80526fc6,0x80526fc6,2 +np.float32,0x3f092bf3,0x3edbaeec,2 +np.float32,0x3ecdf154,0x3ead16df,2 +np.float32,0x2fe85b,0x2fe85b,2 +np.float32,0xbf5100a0,0xbfd8f871,2 +np.float32,0xbec09d40,0xbef1a028,2 +np.float32,0x5e6a85,0x5e6a85,2 +np.float32,0xbec0e2a0,0xbef20f6b,2 +np.float32,0x3f72e788,0x3f2ad00d,2 +np.float32,0x880a6,0x880a6,2 +np.float32,0x3d9e90bf,0x3d98b9fc,2 +np.float32,0x15cf25,0x15cf25,2 +np.float32,0x10171b,0x10171b,2 +np.float32,0x805cf1aa,0x805cf1aa,2 +np.float32,0x3f19bd36,0x3ef0d0d2,2 +np.float32,0x3ebe2bda,0x3ea1b774,2 +np.float32,0xbecd8192,0xbf035c49,2 +np.float32,0x3e2ce508,0x3e1fc21b,2 +np.float32,0x290f,0x290f,2 +np.float32,0x803b679f,0x803b679f,2 +np.float32,0x1,0x1,2 +np.float32,0x807a9c76,0x807a9c76,2 +np.float32,0xbf65fced,0xc01257f8,2 +np.float32,0x3f783414,0x3f2d8475,2 +np.float32,0x3f2d9d92,0x3f0488da,2 +np.float32,0xbddb5798,0xbde80018,2 +np.float32,0x3e91afb8,0x3e8034e7,2 +np.float32,0xbf1b775a,0xbf6f476d,2 +np.float32,0xbf73a32c,0xc041f3ba,2 +np.float32,0xbea39364,0xbec5121b,2 +np.float32,0x80375b94,0x80375b94,2 +np.float32,0x3f331252,0x3f07c3e9,2 +np.float32,0xbf285774,0xbf892e74,2 +np.float32,0x3e699bb8,0x3e526d55,2 +np.float32,0x3f08208a,0x3eda523a,2 +np.float32,0xbf42fb4a,0xbfb78d60,2 +np.float32,0x8029c894,0x8029c894,2 +np.float32,0x3e926c0c,0x3e80c76e,2 +np.float32,0x801e4715,0x801e4715,2 +np.float32,0x3e4b36d8,0x3e395ffd,2 +np.float32,0x8041556b,0x8041556b,2 +np.float32,0xbf2d99ba,0xbf9119bd,2 +np.float32,0x3ed83ea8,0x3eb46250,2 +np.float32,0xbe94a280,0xbeaf92b4,2 +np.float32,0x7f4c7a64,0x42b0ff0a,2 +np.float32,0x806d4022,0x806d4022,2 +np.float32,0xbed382f8,0xbf086d26,2 +np.float32,0x1846fe,0x1846fe,2 +np.float32,0xbe702558,0xbe88d4d8,2 +np.float32,0xbe650ee0,0xbe81a3cc,2 +np.float32,0x3ee9d088,0x3ec0970c,2 +np.float32,0x7f6d4498,0x42b14b30,2 +np.float32,0xbef9f9e6,0xbf2b7ddb,2 +np.float32,0xbf70c384,0xc0349370,2 +np.float32,0xbeff9e9e,0xbf3110c8,2 +np.float32,0xbef06372,0xbf224aa9,2 +np.float32,0xbf15a692,0xbf60e1fa,2 +np.float32,0x8058c117,0x8058c117,2 +np.float32,0xbd9f74b8,0xbda6017b,2 +np.float32,0x801bf130,0x801bf130,2 +np.float32,0x805da84c,0x805da84c,2 +np.float32,0xff800000,0xffc00000,2 +np.float32,0xbeb01de2,0xbed7d6d6,2 +np.float32,0x8077de08,0x8077de08,2 +np.float32,0x3e327668,0x3e2482c1,2 +np.float32,0xbe7add88,0xbe8fe1ab,2 +np.float32,0x805a3c2e,0x805a3c2e,2 +np.float32,0x80326a73,0x80326a73,2 +np.float32,0x800b8a34,0x800b8a34,2 +np.float32,0x8048c83a,0x8048c83a,2 +np.float32,0xbf3799d6,0xbfa1a975,2 +np.float32,0x807649c7,0x807649c7,2 +np.float32,0x3dfdbf90,0x3def3798,2 +np.float32,0xbf1b538a,0xbf6eec4c,2 +np.float32,0xbf1e5989,0xbf76baa0,2 +np.float32,0xc7a80,0xc7a80,2 +np.float32,0x8001be54,0x8001be54,2 +np.float32,0x3f435bbc,0x3f112c6d,2 +np.float32,0xbeabcff8,0xbed151d1,2 +np.float32,0x7de20c78,0x42ad09b7,2 +np.float32,0x3f0e6d2e,0x3ee27b1e,2 +np.float32,0xbf0cb352,0xbf4c3267,2 +np.float32,0x7f6ec06f,0x42b14e61,2 +np.float32,0x7f6fa8ef,0x42b15053,2 +np.float32,0xbf3d2a6a,0xbfabe623,2 +np.float32,0x7f077a4c,0x42b02c46,2 +np.float32,0xbf2a68dc,0xbf8c3cc4,2 +np.float32,0x802a5dbe,0x802a5dbe,2 +np.float32,0x807f631c,0x807f631c,2 +np.float32,0x3dc9b8,0x3dc9b8,2 +np.float32,0x3ebdc1b7,0x3ea16a0a,2 +np.float32,0x7ef29dab,0x42aff3b5,2 +np.float32,0x3e8ab1cc,0x3e757806,2 +np.float32,0x3f27e88e,0x3f011c6d,2 +np.float32,0x3cfd1455,0x3cf93fb5,2 +np.float32,0x7f7eebf5,0x42b16fef,2 +np.float32,0x3c9b2140,0x3c99ade9,2 +np.float32,0x7e928601,0x42aef183,2 +np.float32,0xbd7d2db0,0xbd82abae,2 +np.float32,0x3e6f0df3,0x3e56da20,2 +np.float32,0x7d36a2fc,0x42ab39a3,2 +np.float32,0xbf49d3a2,0xbfc6c859,2 +np.float32,0x7ee541d3,0x42afd6b6,2 +np.float32,0x80753dc0,0x80753dc0,2 +np.float32,0x3f4ce486,0x3f16865d,2 +np.float32,0x39e701,0x39e701,2 +np.float32,0x3f3d9ede,0x3f0de5fa,2 +np.float32,0x7fafb2,0x7fafb2,2 +np.float32,0x3e013fdc,0x3df37090,2 +np.float32,0x807b6a2c,0x807b6a2c,2 +np.float32,0xbe86800a,0xbe9c08c7,2 +np.float32,0x7f40f080,0x42b0e14d,2 +np.float32,0x7eef5afe,0x42afecc8,2 +np.float32,0x7ec30052,0x42af83da,2 +np.float32,0x3eacf768,0x3e9503e1,2 +np.float32,0x7f13ef0e,0x42b0594e,2 +np.float32,0x80419f4a,0x80419f4a,2 +np.float32,0xbf485932,0xbfc3562a,2 +np.float32,0xbe8a24d6,0xbea10011,2 +np.float32,0xbda791c0,0xbdaed2bc,2 +np.float32,0x3e9b5169,0x3e87a67d,2 +np.float32,0x807dd882,0x807dd882,2 +np.float32,0x7f40170e,0x42b0df0a,2 +np.float32,0x7f02f7f9,0x42b01af1,2 +np.float32,0x3ea38bf9,0x3e8decde,2 +np.float32,0x3e2e7ce8,0x3e211ed4,2 +np.float32,0x70a7a6,0x70a7a6,2 +np.float32,0x7d978592,0x42ac3ce7,2 +np.float32,0x804d12d0,0x804d12d0,2 +np.float32,0x80165dc8,0x80165dc8,2 +np.float32,0x80000001,0x80000001,2 +np.float32,0x3e325da0,0x3e246da6,2 +np.float32,0xbe063bb8,0xbe0fe281,2 +np.float32,0x160b8,0x160b8,2 +np.float32,0xbe5687a4,0xbe70bbef,2 +np.float32,0x7f11ab34,0x42b05168,2 +np.float32,0xc955c,0xc955c,2 +np.float32,0xbea0003a,0xbebfd826,2 +np.float32,0x3f7fbdd9,0x3f315102,2 +np.float32,0xbe61aefc,0xbe7ef121,2 +np.float32,0xbf1b9873,0xbf6f9bc3,2 +np.float32,0x3a6d14,0x3a6d14,2 +np.float32,0xbf1ad3b4,0xbf6da808,2 +np.float32,0x3ed2dd24,0x3eb0963d,2 +np.float32,0xbe81a4ca,0xbe957d52,2 +np.float32,0x7f1be3e9,0x42b07421,2 +np.float32,0x7f5ce943,0x42b1269e,2 +np.float32,0x7eebcbdf,0x42afe51d,2 +np.float32,0x807181b5,0x807181b5,2 +np.float32,0xbecb03ba,0xbf0149ad,2 +np.float32,0x42edb8,0x42edb8,2 +np.float32,0xbf3aeec8,0xbfa7b13f,2 +np.float32,0xbd0c4f00,0xbd0ec4a0,2 +np.float32,0x3e48d260,0x3e376070,2 +np.float32,0x1a9731,0x1a9731,2 +np.float32,0x7f323be4,0x42b0b8b5,2 +np.float32,0x1a327f,0x1a327f,2 +np.float32,0x17f1fc,0x17f1fc,2 +np.float32,0xbf2f4f9b,0xbf93c91a,2 +np.float32,0x3ede8934,0x3eb8c9c3,2 +np.float32,0xbf56aaac,0xbfe968bb,2 +np.float32,0x3e22cb5a,0x3e17148c,2 +np.float32,0x7d9def,0x7d9def,2 +np.float32,0x8045b963,0x8045b963,2 +np.float32,0x77404f,0x77404f,2 +np.float32,0x7e2c9efb,0x42ade28b,2 +np.float32,0x8058ad89,0x8058ad89,2 +np.float32,0x7f4139,0x7f4139,2 +np.float32,0x8020e12a,0x8020e12a,2 +np.float32,0x800c9daa,0x800c9daa,2 +np.float32,0x7f2c5ac5,0x42b0a789,2 +np.float32,0x3f04a47b,0x3ed5c043,2 +np.float32,0x804692d5,0x804692d5,2 +np.float32,0xbf6e7fa4,0xc02bb493,2 +np.float32,0x80330756,0x80330756,2 +np.float32,0x7f3e29ad,0x42b0d9e1,2 +np.float32,0xbebf689a,0xbeefb24d,2 +np.float32,0x3f29a86c,0x3f022a56,2 +np.float32,0x3e3bd1c0,0x3e2c72b3,2 +np.float32,0x3f78f2e8,0x3f2de546,2 +np.float32,0x3f3709be,0x3f0a16af,2 +np.float32,0x3e11f150,0x3e086f97,2 +np.float32,0xbf5867ad,0xbfeee8a0,2 +np.float32,0xbebfb328,0xbef0296c,2 +np.float32,0x2f7f15,0x2f7f15,2 +np.float32,0x805cfe84,0x805cfe84,2 +np.float32,0xbf504e01,0xbfd71589,2 +np.float32,0x3ee0903c,0x3eba330c,2 +np.float32,0xbd838990,0xbd87f399,2 +np.float32,0x3f14444e,0x3ee9ee7d,2 +np.float32,0x7e352583,0x42adfb3a,2 +np.float32,0x7e76f824,0x42ae99ec,2 +np.float32,0x3f772d00,0x3f2cfebf,2 +np.float32,0x801f7763,0x801f7763,2 +np.float32,0x3f760bf5,0x3f2c6b87,2 +np.float32,0xbf0bb696,0xbf4a03a5,2 +np.float32,0x3f175d2c,0x3eedd6d2,2 +np.float32,0xbf5723f8,0xbfeae288,2 +np.float32,0x24de0a,0x24de0a,2 +np.float32,0x3cd73f80,0x3cd47801,2 +np.float32,0x7f013305,0x42b013fa,2 +np.float32,0x3e3ad425,0x3e2b9c50,2 +np.float32,0x7d3d16,0x7d3d16,2 +np.float32,0x3ef49738,0x3ec7ef54,2 +np.float32,0x3f5b8612,0x3f1e8678,2 +np.float32,0x7f0eeb5c,0x42b047a7,2 +np.float32,0x7e9d7cb0,0x42af1675,2 +np.float32,0xbdd1cfb0,0xbddd5aa0,2 +np.float32,0xbf645dba,0xc00e78fe,2 +np.float32,0x3f511174,0x3f18d56c,2 +np.float32,0x3d91ad00,0x3d8cba62,2 +np.float32,0x805298da,0x805298da,2 +np.float32,0xbedb6af4,0xbf0f4090,2 +np.float32,0x3d23b1ba,0x3d208205,2 +np.float32,0xbea5783e,0xbec7dc87,2 +np.float32,0x79d191,0x79d191,2 +np.float32,0x3e894413,0x3e7337da,2 +np.float32,0x80800000,0x80800000,2 +np.float32,0xbf34a8d3,0xbf9c907b,2 +np.float32,0x3bae779a,0x3bae011f,2 +np.float32,0x8049284d,0x8049284d,2 +np.float32,0x3eb42cc4,0x3e9a600b,2 +np.float32,0x3da1e2d0,0x3d9bce5f,2 +np.float32,0x3f364b8a,0x3f09a7af,2 +np.float32,0x3d930b10,0x3d8e0118,2 +np.float32,0x8061f8d7,0x8061f8d7,2 +np.float32,0x3f473213,0x3f13573b,2 +np.float32,0x3f1e2a38,0x3ef65102,2 +np.float32,0x8068f7d9,0x8068f7d9,2 +np.float32,0x3f181ef8,0x3eeeca2c,2 +np.float32,0x3eeb6168,0x3ec1a9f5,2 +np.float32,0xc2db6,0xc2db6,2 +np.float32,0x3ef7b578,0x3eca0a69,2 +np.float32,0xbf5b5a84,0xbff8d075,2 +np.float32,0x7f479d5f,0x42b0f2b7,2 +np.float32,0x3e6f3c24,0x3e56ff92,2 +np.float32,0x3f45543a,0x3f1249f0,2 +np.float32,0xbea7c1fa,0xbecb40d2,2 +np.float32,0x7de082,0x7de082,2 +np.float32,0x383729,0x383729,2 +np.float32,0xbd91cb90,0xbd973eb3,2 +np.float32,0x7f320218,0x42b0b80f,2 +np.float32,0x5547f2,0x5547f2,2 +np.float32,0x291fe4,0x291fe4,2 +np.float32,0xbe078ba0,0xbe11655f,2 +np.float32,0x7e0c0658,0x42ad7764,2 +np.float32,0x7e129a2b,0x42ad8ee5,2 +np.float32,0x3f7c96d4,0x3f2fbc0c,2 +np.float32,0x3f800000,0x3f317218,2 +np.float32,0x7f131754,0x42b05662,2 +np.float32,0x15f833,0x15f833,2 +np.float32,0x80392ced,0x80392ced,2 +np.float32,0x3f7c141a,0x3f2f7a36,2 +np.float32,0xbf71c03f,0xc038dcfd,2 +np.float32,0xbe14fb2c,0xbe20fff3,2 +np.float32,0xbee0bac6,0xbf13f14c,2 +np.float32,0x801a32dd,0x801a32dd,2 +np.float32,0x8e12d,0x8e12d,2 +np.float32,0x3f48c606,0x3f143a04,2 +np.float32,0x7f418af5,0x42b0e2e6,2 +np.float32,0x3f1f2918,0x3ef78bb7,2 +np.float32,0x11141b,0x11141b,2 +np.float32,0x3e9fc9e8,0x3e8b11ad,2 +np.float32,0xbea5447a,0xbec79010,2 +np.float32,0xbe31d904,0xbe4359db,2 +np.float32,0x80184667,0x80184667,2 +np.float32,0xbf00503c,0xbf3212c2,2 +np.float32,0x3e0328cf,0x3df6d425,2 +np.float32,0x7ee8e1b7,0x42afdebe,2 +np.float32,0xbef95e24,0xbf2ae5db,2 +np.float32,0x7f3e4eed,0x42b0da45,2 +np.float32,0x3f43ee85,0x3f117fa0,2 +np.float32,0xbcfa2ac0,0xbcfe10fe,2 +np.float32,0x80162774,0x80162774,2 +np.float32,0x372e8b,0x372e8b,2 +np.float32,0x3f263802,0x3f0016b0,2 +np.float32,0x8008725f,0x8008725f,2 +np.float32,0x800beb40,0x800beb40,2 +np.float32,0xbe93308e,0xbead8a77,2 +np.float32,0x3d8a4240,0x3d85cab8,2 +np.float32,0x80179de0,0x80179de0,2 +np.float32,0x7f4a98f2,0x42b0fa4f,2 +np.float32,0x3f0d214e,0x3ee0cff1,2 +np.float32,0x80536c2c,0x80536c2c,2 +np.float32,0x7e7038ed,0x42ae8bbe,2 +np.float32,0x7f345af9,0x42b0bec4,2 +np.float32,0xbf243219,0xbf83442f,2 +np.float32,0x7e0d5555,0x42ad7c27,2 +np.float32,0x762e95,0x762e95,2 +np.float32,0x7ebf4548,0x42af79f6,2 +np.float32,0x8079639e,0x8079639e,2 +np.float32,0x3ef925c0,0x3ecb0260,2 +np.float32,0x3f708695,0x3f2996d6,2 +np.float32,0xfca9f,0xfca9f,2 +np.float32,0x8060dbf4,0x8060dbf4,2 +np.float32,0x4c8840,0x4c8840,2 +np.float32,0xbea922ee,0xbecd4ed5,2 +np.float32,0xbf4f28a9,0xbfd40b98,2 +np.float32,0xbe25ad48,0xbe34ba1b,2 +np.float32,0x3f2fb254,0x3f05c58c,2 +np.float32,0x3f73bcc2,0x3f2b3d5f,2 +np.float32,0xbf479a07,0xbfc1a165,2 +np.float32,0xbeb9a808,0xbee69763,2 +np.float32,0x7eb16a65,0x42af5376,2 +np.float32,0xbeb3e442,0xbedda042,2 +np.float32,0x3d8f439c,0x3d8a79ac,2 +np.float32,0x80347516,0x80347516,2 +np.float32,0x3e8a0c5d,0x3e74738c,2 +np.float32,0xbf0383a4,0xbf389289,2 +np.float32,0x806be8f5,0x806be8f5,2 +np.float32,0x8023f0c5,0x8023f0c5,2 +np.float32,0x2060e9,0x2060e9,2 +np.float32,0xbf759eba,0xc04d239f,2 +np.float32,0x3d84cc5a,0x3d80ab96,2 +np.float32,0xbf57746b,0xbfebdf87,2 +np.float32,0x3e418417,0x3e31401f,2 +np.float32,0xaecce,0xaecce,2 +np.float32,0x3cd1766f,0x3cced45c,2 +np.float32,0x53724a,0x53724a,2 +np.float32,0x3f773710,0x3f2d03de,2 +np.float32,0x8013d040,0x8013d040,2 +np.float32,0x4d0eb2,0x4d0eb2,2 +np.float32,0x8014364a,0x8014364a,2 +np.float32,0x7f3c56c9,0x42b0d4f2,2 +np.float32,0x3eee1e1c,0x3ec3891a,2 +np.float32,0xbdda3eb8,0xbde6c5a0,2 +np.float32,0x26ef4a,0x26ef4a,2 +np.float32,0x7ed3370c,0x42afacbf,2 +np.float32,0xbf06e31b,0xbf3f9ab7,2 +np.float32,0xbe3185f0,0xbe42f556,2 +np.float32,0x3dcf9abe,0x3dc5be41,2 +np.float32,0xbf3696d9,0xbf9fe2bd,2 +np.float32,0x3e68ee50,0x3e51e01a,2 +np.float32,0x3f3d4cc2,0x3f0db6ca,2 +np.float32,0x7fa00000,0x7fe00000,2 +np.float32,0xbf03070c,0xbf3792d0,2 +np.float32,0x3ea79e6c,0x3e910092,2 +np.float32,0xbf1a393a,0xbf6c2251,2 +np.float32,0x3f41eb0e,0x3f105afc,2 +np.float32,0x3ceadb2f,0x3ce78d79,2 +np.float32,0xbf5dc105,0xc000be2c,2 +np.float32,0x7ebb5a0e,0x42af6f5c,2 +np.float32,0xbf7c44eb,0xc0875058,2 +np.float32,0x6aaaf4,0x6aaaf4,2 +np.float32,0x807d8f23,0x807d8f23,2 +np.float32,0xbee6b142,0xbf194fef,2 +np.float32,0xbe83f256,0xbe989526,2 +np.float32,0x7d588e,0x7d588e,2 +np.float32,0x7cc80131,0x42aa0542,2 +np.float32,0x3e0ab198,0x3e02124f,2 +np.float32,0xbf6e64db,0xc02b52eb,2 +np.float32,0x3d238b56,0x3d205d1b,2 +np.float32,0xbeb408e2,0xbeddd8bc,2 +np.float32,0x3f78340d,0x3f2d8471,2 +np.float32,0x806162a3,0x806162a3,2 +np.float32,0x804e484f,0x804e484f,2 +np.float32,0xbeb8c576,0xbee53466,2 +np.float32,0x807aab15,0x807aab15,2 +np.float32,0x3f523e20,0x3f197ab8,2 +np.float32,0xbf009190,0xbf3295de,2 +np.float32,0x3df43da5,0x3de6bd82,2 +np.float32,0x7f639aea,0x42b135e6,2 +np.float32,0x3f1e638a,0x3ef697da,2 +np.float32,0xbf4884de,0xbfc3bac3,2 +np.float32,0xbe9336b6,0xbead931b,2 +np.float32,0x6daf7f,0x6daf7f,2 +np.float32,0xbf1fc152,0xbf7a70b1,2 +np.float32,0x3f103720,0x3ee4c649,2 +np.float32,0x3eeaa227,0x3ec126df,2 +np.float32,0x7f7ea945,0x42b16f69,2 +np.float32,0x3d3cd800,0x3d389ead,2 +np.float32,0x3f3d7268,0x3f0dcc6e,2 +np.float32,0xbf3c1b41,0xbfa9e2e3,2 +np.float32,0x3ecf3818,0x3eadffb2,2 +np.float32,0x3f1af312,0x3ef25372,2 +np.float32,0x48fae4,0x48fae4,2 +np.float64,0x7fedaa1ee4fb543d,0x40862da7ca7c308e,1 +np.float64,0x8007d2d810efa5b1,0x8007d2d810efa5b1,1 +np.float64,0x3fc385e069270bc0,0x3fc22b8884cf2c3b,1 +np.float64,0x68ed4130d1da9,0x68ed4130d1da9,1 +np.float64,0x8008e93e58d1d27d,0x8008e93e58d1d27d,1 +np.float64,0xbfd3d62852a7ac50,0xbfd7be3a7ad1af02,1 +np.float64,0xbfc1fa0ba923f418,0xbfc35f0f19447df7,1 +np.float64,0xbfe01b8cec20371a,0xbfe6658c7e6c8e50,1 +np.float64,0xbfeda81a147b5034,0xc004e9c94f2b91c1,1 +np.float64,0xbfe1c36a97e386d5,0xbfe9ead4d6beaa92,1 +np.float64,0x3fe50be51f2a17ca,0x3fe02c8067d9e5c5,1 +np.float64,0x3febed4d3337da9a,0x3fe413956466134f,1 +np.float64,0x80068ea59ced1d4c,0x80068ea59ced1d4c,1 +np.float64,0x3febe77d5877cefb,0x3fe4107ac088bc71,1 +np.float64,0x800ae77617d5ceed,0x800ae77617d5ceed,1 +np.float64,0x3fd0546b60a0a8d7,0x3fcd16c2e995ab23,1 +np.float64,0xbfe33e1476667c29,0xbfed6d7faec4db2f,1 +np.float64,0x3fe9d2fd51b3a5fb,0x3fe2eef834310219,1 +np.float64,0x8004249878284932,0x8004249878284932,1 +np.float64,0xbfd5b485c72b690c,0xbfda828ccc6a7a5c,1 +np.float64,0x7fcd6e6b6b3adcd6,0x408622807f04768e,1 +np.float64,0x3fd7f9c32caff386,0x3fd45d024514b8da,1 +np.float64,0x7f87eb9d702fd73a,0x40860aa99fcff27f,1 +np.float64,0xbfc5d1f6fb2ba3ec,0xbfc7ec367cb3fecc,1 +np.float64,0x8008316a44d062d5,0x8008316a44d062d5,1 +np.float64,0xbfd54e4358aa9c86,0xbfd9e889d2998a4a,1 +np.float64,0xda65facdb4cc0,0xda65facdb4cc0,1 +np.float64,0x3fc5b4f6f32b69f0,0x3fc40d13aa8e248b,1 +np.float64,0x3fd825a5d5b04b4c,0x3fd47ce73e04d3ff,1 +np.float64,0x7ac9d56ef593b,0x7ac9d56ef593b,1 +np.float64,0xbfd0a51977214a32,0xbfd34702071428be,1 +np.float64,0x3fd21f620b243ec4,0x3fcfea0c02193640,1 +np.float64,0x3fe6fb3f1b2df67e,0x3fe151ffb18c983b,1 +np.float64,0x700de022e01bd,0x700de022e01bd,1 +np.float64,0xbfbb76b81236ed70,0xbfbd0d31deea1ec7,1 +np.float64,0x3fecfc3856f9f870,0x3fe4a2fcadf221e0,1 +np.float64,0x3fede286517bc50c,0x3fe51af2fbd6ef63,1 +np.float64,0x7fdc8da96c391b52,0x408627ce09cfef2b,1 +np.float64,0x8000edfcfb81dbfb,0x8000edfcfb81dbfb,1 +np.float64,0x8009ebc42af3d789,0x8009ebc42af3d789,1 +np.float64,0x7fd658aaf8acb155,0x408625d80cd1ccc9,1 +np.float64,0x3feea584a37d4b09,0x3fe57f29a73729cd,1 +np.float64,0x4cfe494699fca,0x4cfe494699fca,1 +np.float64,0xbfe9d96460b3b2c9,0xbffa62ecfa026c77,1 +np.float64,0x7fdb3852c3b670a5,0x4086276c191dc9b1,1 +np.float64,0xbfe4d1fc9ee9a3f9,0xbff0d37ce37cf479,1 +np.float64,0xffefffffffffffff,0xfff8000000000000,1 +np.float64,0xbfd1c43d7fa3887a,0xbfd4cfbefb5f2c43,1 +np.float64,0x3fec4a8e0d78951c,0x3fe4453a82ca2570,1 +np.float64,0x7fafed74583fdae8,0x4086181017b8dac9,1 +np.float64,0x80076c4ebcced89e,0x80076c4ebcced89e,1 +np.float64,0x8001a9aa7b235356,0x8001a9aa7b235356,1 +np.float64,0x121260fe2424d,0x121260fe2424d,1 +np.float64,0x3fddd028e3bba052,0x3fd87998c4c43c5b,1 +np.float64,0x800ed1cf4a9da39f,0x800ed1cf4a9da39f,1 +np.float64,0xbfef2e63d7fe5cc8,0xc00d53480b16971b,1 +np.float64,0xbfedde3309fbbc66,0xc005ab55b7a7c127,1 +np.float64,0x3fda3e1e85b47c3d,0x3fd5fddafd8d6729,1 +np.float64,0x8007c6443c6f8c89,0x8007c6443c6f8c89,1 +np.float64,0xbfe101705f2202e0,0xbfe8420817665121,1 +np.float64,0x7fe0bff3c1e17fe7,0x4086291539c56d80,1 +np.float64,0x7fe6001dab6c003a,0x40862b43aa7cb060,1 +np.float64,0x7fbdecf7de3bd9ef,0x40861d170b1c51a5,1 +np.float64,0xbfc0fd508c21faa0,0xbfc23a5876e99fa3,1 +np.float64,0xbfcf6eb14f3edd64,0xbfd208cbf742c8ea,1 +np.float64,0x3f6d40ea403a81d5,0x3f6d33934ab8e799,1 +np.float64,0x7fc32600b6264c00,0x40861f10302357e0,1 +np.float64,0x3fd05870baa0b0e0,0x3fcd1d2af420fac7,1 +np.float64,0x80051d5120aa3aa3,0x80051d5120aa3aa3,1 +np.float64,0x3fdb783fcfb6f080,0x3fd6db229658c083,1 +np.float64,0x3fe0b61199e16c24,0x3fdae41e277be2eb,1 +np.float64,0x3daf62167b5ed,0x3daf62167b5ed,1 +np.float64,0xbfec3c53b6f878a7,0xc0011f0ce7a78a2a,1 +np.float64,0x800fc905161f920a,0x800fc905161f920a,1 +np.float64,0x3fdc7b9cc138f73a,0x3fd78f9c2360e661,1 +np.float64,0x7fe4079e97a80f3c,0x40862a83795f2443,1 +np.float64,0x8010000000000000,0x8010000000000000,1 +np.float64,0x7fe6da5345adb4a6,0x40862b9183c1e4b0,1 +np.float64,0xbfd0a76667214ecc,0xbfd34a1e0c1f6186,1 +np.float64,0x37fb0b906ff62,0x37fb0b906ff62,1 +np.float64,0x7fe170e59fa2e1ca,0x408629680a55e5c5,1 +np.float64,0x3fea900c77752019,0x3fe356eec75aa345,1 +np.float64,0x3fc575c63a2aeb8c,0x3fc3d701167d76b5,1 +np.float64,0x3fe8b45da87168bc,0x3fe24ecbb778fd44,1 +np.float64,0xbfcb990ab5373214,0xbfcf1596c076813c,1 +np.float64,0xf146fdfbe28e0,0xf146fdfbe28e0,1 +np.float64,0x8001fcd474c3f9aa,0x8001fcd474c3f9aa,1 +np.float64,0xbfe9b555eeb36aac,0xbffa0630c3bb485b,1 +np.float64,0x800f950be83f2a18,0x800f950be83f2a18,1 +np.float64,0x7feb0e03ab761c06,0x40862ceb30e36887,1 +np.float64,0x7fca51bd4a34a37a,0x4086219b9dfd35c9,1 +np.float64,0xbfdc27c34cb84f86,0xbfe28ccde8d6bc08,1 +np.float64,0x80009ce1714139c4,0x80009ce1714139c4,1 +np.float64,0x8005290fb1ea5220,0x8005290fb1ea5220,1 +np.float64,0xbfee81e6473d03cd,0xc00885972ca1699b,1 +np.float64,0x7fcfb11a373f6233,0x408623180b8f75d9,1 +np.float64,0xbfcb9c4bfd373898,0xbfcf19bd25881928,1 +np.float64,0x7feaec5885f5d8b0,0x40862ce136050e6c,1 +np.float64,0x8009e17a4a53c2f5,0x8009e17a4a53c2f5,1 +np.float64,0xbfe1cceb9e6399d7,0xbfea0038bd3def20,1 +np.float64,0x8009170bd7122e18,0x8009170bd7122e18,1 +np.float64,0xb2b6f7f1656df,0xb2b6f7f1656df,1 +np.float64,0x3fc75bfd1f2eb7f8,0x3fc574c858332265,1 +np.float64,0x3fa24c06ec249800,0x3fa1fa462ffcb8ec,1 +np.float64,0xaa9a4d2d5534a,0xaa9a4d2d5534a,1 +np.float64,0xbfd7b76208af6ec4,0xbfdda0c3200dcc9f,1 +np.float64,0x7f8cbab73039756d,0x40860c20cba57a94,1 +np.float64,0x3fdbcf9f48b79f3f,0x3fd71827a60e8b6d,1 +np.float64,0xbfdd60f71a3ac1ee,0xbfe3a94bc8cf134d,1 +np.float64,0xb9253589724a7,0xb9253589724a7,1 +np.float64,0xbfcf28e37e3e51c8,0xbfd1da9977b741e3,1 +np.float64,0x80011457f7e228b1,0x80011457f7e228b1,1 +np.float64,0x7fec33df737867be,0x40862d404a897122,1 +np.float64,0xae55f8f95cabf,0xae55f8f95cabf,1 +np.float64,0xbfc1ab9397235728,0xbfc303e5533d4a5f,1 +np.float64,0x7fef0f84b3be1f08,0x40862e05f9ba7118,1 +np.float64,0x7fdc94f328b929e5,0x408627d01449d825,1 +np.float64,0x3fee1b598c7c36b3,0x3fe53847be166834,1 +np.float64,0x3fee8326f37d064e,0x3fe56d96f3fbcf43,1 +np.float64,0x3fe7b18a83ef6316,0x3fe1bb6a6d48c675,1 +np.float64,0x3fe5db969c6bb72e,0x3fe0a8d7d151996c,1 +np.float64,0x3e3391d27c673,0x3e3391d27c673,1 +np.float64,0x3fe79a46d76f348e,0x3fe1ae09a96ea628,1 +np.float64,0x7ff4000000000000,0x7ffc000000000000,1 +np.float64,0x7fe57d6505aafac9,0x40862b13925547f1,1 +np.float64,0x3fc433371d28666e,0x3fc2c196a764c47b,1 +np.float64,0x8008dbf69cd1b7ee,0x8008dbf69cd1b7ee,1 +np.float64,0xbfe744f459ee89e8,0xbff4c847ad3ee152,1 +np.float64,0x80098aa245331545,0x80098aa245331545,1 +np.float64,0x6747112ece8e3,0x6747112ece8e3,1 +np.float64,0x5d342a40ba69,0x5d342a40ba69,1 +np.float64,0xf7a17739ef42f,0xf7a17739ef42f,1 +np.float64,0x3fe1b34a9d236695,0x3fdc2d7c4e2c347a,1 +np.float64,0x7fb53bf5ec2a77eb,0x40861a585ec8f7ff,1 +np.float64,0xbfe6256f1cec4ade,0xbff2d89a36be65ae,1 +np.float64,0xb783bc9b6f078,0xb783bc9b6f078,1 +np.float64,0xbfedf74a3bfbee94,0xc0060bb6f2bc11ef,1 +np.float64,0x3fda2a5eccb454be,0x3fd5efd7f18b8e81,1 +np.float64,0xbfb3838ab2270718,0xbfb44c337fbca3c3,1 +np.float64,0x3fb4ac6dc22958e0,0x3fb3e194ca01a502,1 +np.float64,0x76c11aaaed824,0x76c11aaaed824,1 +np.float64,0x80025bb1af04b764,0x80025bb1af04b764,1 +np.float64,0x3fdc02740ab804e8,0x3fd73b8cd6f95f19,1 +np.float64,0x3fe71856f5ee30ae,0x3fe162e9fafb4428,1 +np.float64,0x800236f332646de7,0x800236f332646de7,1 +np.float64,0x7fe13fd9d2e27fb3,0x408629516b42a317,1 +np.float64,0x7fdf6bbd34bed779,0x40862892069d805c,1 +np.float64,0x3fd4727beba8e4f8,0x3fd1be5b48d9e282,1 +np.float64,0x800e0fac9e5c1f59,0x800e0fac9e5c1f59,1 +np.float64,0xfb54423ff6a89,0xfb54423ff6a89,1 +np.float64,0x800fbf7ed47f7efe,0x800fbf7ed47f7efe,1 +np.float64,0x3fe9d41fa2f3a840,0x3fe2ef98dc1fd463,1 +np.float64,0x800d733e805ae67d,0x800d733e805ae67d,1 +np.float64,0x3feebe4c46fd7c98,0x3fe58bcf7f47264e,1 +np.float64,0x7fe1ab77b5e356ee,0x40862982bb3dce34,1 +np.float64,0xbfdddac05abbb580,0xbfe41aa45f72d5a2,1 +np.float64,0x3fe14219dee28434,0x3fdb9b137d1f1220,1 +np.float64,0x3fe25d3d5a24ba7b,0x3fdd06e1cf32d35a,1 +np.float64,0x8000fa4fbe81f4a0,0x8000fa4fbe81f4a0,1 +np.float64,0x3fe303e23e6607c4,0x3fddd94982efa9f1,1 +np.float64,0x3fe89cf5d83139ec,0x3fe24193a2e12f75,1 +np.float64,0x3fe9b36ef87366de,0x3fe2dd7cdc25a4a5,1 +np.float64,0xbfdb8b38f8371672,0xbfe2023ba7e002bb,1 +np.float64,0xafc354955f86b,0xafc354955f86b,1 +np.float64,0xbfe2f3d49e65e7a9,0xbfecb557a94123d3,1 +np.float64,0x800496617c092cc4,0x800496617c092cc4,1 +np.float64,0x32db0cfa65b62,0x32db0cfa65b62,1 +np.float64,0xbfd893bfa2b12780,0xbfdf02a8c1e545aa,1 +np.float64,0x7fd5ac927d2b5924,0x408625997e7c1f9b,1 +np.float64,0x3fde9defb8bd3be0,0x3fd9056190986349,1 +np.float64,0x80030cfeb54619fe,0x80030cfeb54619fe,1 +np.float64,0x3fcba85b273750b8,0x3fc90a5ca976594f,1 +np.float64,0x3fe98f6f5cf31edf,0x3fe2c97fcb4eca25,1 +np.float64,0x3fe33dbf90667b80,0x3fde21b83321b993,1 +np.float64,0x3fe4686636e8d0cc,0x3fdf928cdca751b3,1 +np.float64,0x80018ade6ce315be,0x80018ade6ce315be,1 +np.float64,0x7fa9af70c8335ee1,0x408616528cd5a906,1 +np.float64,0x3fbeb460aa3d68c0,0x3fbcff96b00a2193,1 +np.float64,0x7fa82c869830590c,0x408615d6598d9368,1 +np.float64,0xd08c0e6fa1182,0xd08c0e6fa1182,1 +np.float64,0x3fef4eb750fe9d6f,0x3fe5d522fd4e7f64,1 +np.float64,0xbfc586f5492b0dec,0xbfc791eaae92aad1,1 +np.float64,0x7fede64ac7bbcc95,0x40862db7f444fa7b,1 +np.float64,0x3fe540003d6a8000,0x3fe04bdfc2916a0b,1 +np.float64,0x8009417fe6f28300,0x8009417fe6f28300,1 +np.float64,0x3fe6959cf16d2b3a,0x3fe116a1ce01887b,1 +np.float64,0x3fb0a40036214800,0x3fb01f447778219a,1 +np.float64,0x3feff26e91ffe4dd,0x3fe627798fc859a7,1 +np.float64,0x7fed8e46cd7b1c8d,0x40862da044a1d102,1 +np.float64,0x7fec4eb774f89d6e,0x40862d47e43edb53,1 +np.float64,0x3fe800e5e07001cc,0x3fe1e8e2b9105fc2,1 +np.float64,0x800f4eb2f9be9d66,0x800f4eb2f9be9d66,1 +np.float64,0x800611659bcc22cc,0x800611659bcc22cc,1 +np.float64,0x3fd66e65d2acdccc,0x3fd33ad63a5e1000,1 +np.float64,0x800a9085b7f5210c,0x800a9085b7f5210c,1 +np.float64,0x7fdf933a3fbf2673,0x4086289c0e292f2b,1 +np.float64,0x1cd1ba7a39a38,0x1cd1ba7a39a38,1 +np.float64,0xbfefd0b10fffa162,0xc0149ded900ed851,1 +np.float64,0xbfe8c63485b18c69,0xbff7cf3078b1574f,1 +np.float64,0x3fecde56ca79bcae,0x3fe4934afbd7dda9,1 +np.float64,0x8006cd6888cd9ad2,0x8006cd6888cd9ad2,1 +np.float64,0x3fd7a391c2af4724,0x3fd41e2f74df2329,1 +np.float64,0x3fe6a8ad58ed515a,0x3fe121ccfb28e6f5,1 +np.float64,0x7fe18a80dd631501,0x40862973c09086b9,1 +np.float64,0xbf74fd6d8029fb00,0xbf750b3e368ebe6b,1 +np.float64,0x3fdd35e93dba6bd4,0x3fd810071faaffad,1 +np.float64,0x3feb0d8f57361b1f,0x3fe39b3abdef8b7a,1 +np.float64,0xbfd5ec7288abd8e6,0xbfdad764df0d2ca1,1 +np.float64,0x7fdc848272b90904,0x408627cb78f3fb9e,1 +np.float64,0x800ed3eda91da7db,0x800ed3eda91da7db,1 +np.float64,0x3fefac64857f58c9,0x3fe60459dbaad1ba,1 +np.float64,0x3fd1df7a5ba3bef4,0x3fcf864a39b926ff,1 +np.float64,0xfe26ca4bfc4da,0xfe26ca4bfc4da,1 +np.float64,0xbfd1099f8da21340,0xbfd3cf6e6efe934b,1 +np.float64,0xbfe15de9a7a2bbd4,0xbfe909cc895f8795,1 +np.float64,0x3fe89714ed712e2a,0x3fe23e40d31242a4,1 +np.float64,0x800387113e470e23,0x800387113e470e23,1 +np.float64,0x3fe4f80730e9f00e,0x3fe0208219314cf1,1 +np.float64,0x2f95a97c5f2b6,0x2f95a97c5f2b6,1 +np.float64,0x800ea7cdd87d4f9c,0x800ea7cdd87d4f9c,1 +np.float64,0xbf64b967c0297300,0xbf64c020a145b7a5,1 +np.float64,0xbfc5a91a342b5234,0xbfc7bafd77a61d81,1 +np.float64,0xbfe2226fe76444e0,0xbfeac33eb1d1b398,1 +np.float64,0x3fc6aaa8d42d5552,0x3fc4de79f5c68cd4,1 +np.float64,0x3fe54fd4c1ea9faa,0x3fe05561a9a5922b,1 +np.float64,0x80029c1f75653840,0x80029c1f75653840,1 +np.float64,0xbfcb4a84a2369508,0xbfceb1a23bac3995,1 +np.float64,0x80010abeff02157f,0x80010abeff02157f,1 +np.float64,0x7f92d12cf825a259,0x40860e49bde3a5b6,1 +np.float64,0x800933e7027267ce,0x800933e7027267ce,1 +np.float64,0x3fc022b12e204562,0x3fbe64acc53ed887,1 +np.float64,0xbfe35f938de6bf27,0xbfedc1f3e443c016,1 +np.float64,0x1f8d9bae3f1b4,0x1f8d9bae3f1b4,1 +np.float64,0x3fe552f22ceaa5e4,0x3fe057404072350f,1 +np.float64,0xbfa73753442e6ea0,0xbfa7c24a100190f1,1 +np.float64,0x7fb3e2982827c52f,0x408619d1efa676b6,1 +np.float64,0xbfd80cb7a5301970,0xbfde28e65f344f33,1 +np.float64,0xbfcde835973bd06c,0xbfd10806fba46c8f,1 +np.float64,0xbfd4e3c749a9c78e,0xbfd949aff65de39c,1 +np.float64,0x3fcb4b9d6f36973b,0x3fc8be02ad6dc0d3,1 +np.float64,0x1a63000034c7,0x1a63000034c7,1 +np.float64,0x7fdc9c751e3938e9,0x408627d22df71959,1 +np.float64,0x3fd74f3f712e9e7f,0x3fd3e07df0c37ec1,1 +np.float64,0xbfceab74d33d56e8,0xbfd187e99bf82903,1 +np.float64,0x7ff0000000000000,0x7ff0000000000000,1 +np.float64,0xbfb2cca466259948,0xbfb3868208e8de30,1 +np.float64,0x800204688b8408d2,0x800204688b8408d2,1 +np.float64,0x3e4547407c8aa,0x3e4547407c8aa,1 +np.float64,0xbfe4668846e8cd10,0xbff03c85189f3818,1 +np.float64,0x800dd350245ba6a0,0x800dd350245ba6a0,1 +np.float64,0xbfbc13c160382780,0xbfbdbd56ce996d16,1 +np.float64,0x7fe25a628a24b4c4,0x408629d06eb2d64d,1 +np.float64,0x3fd19dabbc233b57,0x3fcf1f3ed1d34c8c,1 +np.float64,0x547e20faa8fc5,0x547e20faa8fc5,1 +np.float64,0xbfe19392c6232726,0xbfe97ffe4f303335,1 +np.float64,0x3f87f9f6702ff400,0x3f87d64fb471bb04,1 +np.float64,0x9dfc52db3bf8b,0x9dfc52db3bf8b,1 +np.float64,0x800e1f5a9adc3eb5,0x800e1f5a9adc3eb5,1 +np.float64,0xbfddbd09c8bb7a14,0xbfe3fed7d7cffc70,1 +np.float64,0xbfeda71af87b4e36,0xc004e6631c514544,1 +np.float64,0xbfdbfcfe1bb7f9fc,0xbfe266b5d4a56265,1 +np.float64,0x3fe4ee78cd69dcf2,0x3fe01abba4e81fc9,1 +np.float64,0x800f13b820de2770,0x800f13b820de2770,1 +np.float64,0x3f861e09702c3c00,0x3f85ffae83b02c4f,1 +np.float64,0xbfc0972479212e48,0xbfc1c4bf70b30cbc,1 +np.float64,0x7fef057ef57e0afd,0x40862e036479f6a9,1 +np.float64,0x8bdbabe517b76,0x8bdbabe517b76,1 +np.float64,0xbfec495417f892a8,0xc0013ade88746d18,1 +np.float64,0x3fec680ab3f8d015,0x3fe454dd304b560d,1 +np.float64,0xbfae7ce60c3cf9d0,0xbfaf6eef15bbe56b,1 +np.float64,0x3fec314124786282,0x3fe437ca06294f5a,1 +np.float64,0x7fd5ed05b82bda0a,0x408625b125518e58,1 +np.float64,0x3feac9f02f3593e0,0x3fe3768104dd5cb7,1 +np.float64,0x0,0x0,1 +np.float64,0xbfddd2abd5bba558,0xbfe41312b8ea20de,1 +np.float64,0xbfedf9558c7bf2ab,0xc00613c53e0bb33a,1 +np.float64,0x3fef245ffefe48c0,0x3fe5bfb4dfe3b7a5,1 +np.float64,0x7fe178604922f0c0,0x4086296b77d5eaef,1 +np.float64,0x10000000000000,0x10000000000000,1 +np.float64,0x7fed026766ba04ce,0x40862d7a0dc45643,1 +np.float64,0xbfde27d8c3bc4fb2,0xbfe46336b6447697,1 +np.float64,0x3fe9485d9cb290bb,0x3fe2a1e4b6419423,1 +np.float64,0xbfe27b8a7464f715,0xbfeb9382f5b16f65,1 +np.float64,0x5c34d274b869b,0x5c34d274b869b,1 +np.float64,0xbfeee0b7453dc16f,0xc00acdb46459b6e6,1 +np.float64,0x7fe3dfb4d4e7bf69,0x40862a73785fdf12,1 +np.float64,0xb4635eef68c6c,0xb4635eef68c6c,1 +np.float64,0xbfe522a2c82a4546,0xbff148912a59a1d6,1 +np.float64,0x8009ba38a9737472,0x8009ba38a9737472,1 +np.float64,0xbfc056ff3820ae00,0xbfc17b2205fa180d,1 +np.float64,0x7fe1c8b8a0239170,0x4086298feeee6133,1 +np.float64,0x3fe2d2c6b9e5a58e,0x3fdd9b907471031b,1 +np.float64,0x3fa0a161bc2142c0,0x3fa05db36f6a073b,1 +np.float64,0x3fdef4268ebde84c,0x3fd93f980794d1e7,1 +np.float64,0x800ecd9fe2fd9b40,0x800ecd9fe2fd9b40,1 +np.float64,0xbfc9fbd45e33f7a8,0xbfcd0afc47c340f6,1 +np.float64,0x3fe8c3035b718606,0x3fe2570eb65551a1,1 +np.float64,0xbfe78c4ad2ef1896,0xbff54d25b3328742,1 +np.float64,0x8006f5dcf8adebbb,0x8006f5dcf8adebbb,1 +np.float64,0x800301dca2a603ba,0x800301dca2a603ba,1 +np.float64,0xad4289e55a851,0xad4289e55a851,1 +np.float64,0x80037764f9e6eecb,0x80037764f9e6eecb,1 +np.float64,0xbfe73575b26e6aec,0xbff4abfb5e985c62,1 +np.float64,0xbfc6cb91652d9724,0xbfc91a8001b33ec2,1 +np.float64,0xbfe3a918ffe75232,0xbfee7e6e4fd34c53,1 +np.float64,0x9bc84e2b3790a,0x9bc84e2b3790a,1 +np.float64,0x7fdeec303cbdd85f,0x408628714a49d996,1 +np.float64,0x3fe1d1dcb763a3ba,0x3fdc54ce060dc7f4,1 +np.float64,0x8008ae6432b15cc9,0x8008ae6432b15cc9,1 +np.float64,0x3fd8022fa2b00460,0x3fd46322bf02a609,1 +np.float64,0xbfc55b64472ab6c8,0xbfc75d9568f462e0,1 +np.float64,0xbfe8b165437162ca,0xbff7a15e2ead645f,1 +np.float64,0x7f759330feeb3,0x7f759330feeb3,1 +np.float64,0xbfd504f68eaa09ee,0xbfd97b06c01d7473,1 +np.float64,0x54702d5aa8e06,0x54702d5aa8e06,1 +np.float64,0xbfed1779337a2ef2,0xc0032f7109ef5a51,1 +np.float64,0xe248bd4dc4918,0xe248bd4dc4918,1 +np.float64,0xbfd8c59150318b22,0xbfdf53bca6ca8b1e,1 +np.float64,0xbfe3b9d942e773b2,0xbfeea9fcad277ba7,1 +np.float64,0x800934ec127269d9,0x800934ec127269d9,1 +np.float64,0xbfbb7f535a36fea8,0xbfbd16d61b6c52b8,1 +np.float64,0xccb185a199631,0xccb185a199631,1 +np.float64,0x3fe3dda76fe7bb4e,0x3fdee83bc6094301,1 +np.float64,0xbfe0c902f5e19206,0xbfe7ca7c0e888006,1 +np.float64,0xbfefeed08cbfdda1,0xc018aadc483c8724,1 +np.float64,0x7fd0c05c52a180b8,0x40862389daf64aac,1 +np.float64,0xbfd28e3323a51c66,0xbfd5e9ba278fb685,1 +np.float64,0xbef4103b7de82,0xbef4103b7de82,1 +np.float64,0x3fe7661fd12ecc40,0x3fe18ff7dfb696e2,1 +np.float64,0x3fddd5f2f0bbabe4,0x3fd87d8bb6719c3b,1 +np.float64,0x800b3914cfd6722a,0x800b3914cfd6722a,1 +np.float64,0xf3f09a97e7e14,0xf3f09a97e7e14,1 +np.float64,0x7f97092b502e1256,0x40860fe8054cf54e,1 +np.float64,0xbfdbec7917b7d8f2,0xbfe2580b4b792c79,1 +np.float64,0x7fe7ff215aaffe42,0x40862bf5887fa062,1 +np.float64,0x80080186e570030e,0x80080186e570030e,1 +np.float64,0xbfc27f05e624fe0c,0xbfc3fa214be4adc4,1 +np.float64,0x3fe4481be1689038,0x3fdf6b11e9c4ca72,1 +np.float64,0x3fd642cc9cac8598,0x3fd31a857fe70227,1 +np.float64,0xbef8782d7df0f,0xbef8782d7df0f,1 +np.float64,0x8003077dc2e60efc,0x8003077dc2e60efc,1 +np.float64,0x80083eb5a2507d6c,0x80083eb5a2507d6c,1 +np.float64,0x800e8d1eb77d1a3e,0x800e8d1eb77d1a3e,1 +np.float64,0xbfc7737cd22ee6f8,0xbfc9e7716f03f1fc,1 +np.float64,0xbfe9a2b4ddf3456a,0xbff9d71664a8fc78,1 +np.float64,0x7fe67c7d322cf8f9,0x40862b7066465194,1 +np.float64,0x3fec080ce2b8101a,0x3fe421dac225be46,1 +np.float64,0xbfe6d27beb6da4f8,0xbff3fbb1add521f7,1 +np.float64,0x3fdd4f96ceba9f2e,0x3fd821a638986dbe,1 +np.float64,0x3fbd89f1303b13e2,0x3fbbf49223a9d002,1 +np.float64,0xbfe94e2b9d329c57,0xbff907e549c534f5,1 +np.float64,0x3fe2f2cc51e5e599,0x3fddc3d6b4a834a1,1 +np.float64,0xfdcb5b49fb96c,0xfdcb5b49fb96c,1 +np.float64,0xbfea7108fa74e212,0xbffc01b392f4897b,1 +np.float64,0x3fd38baef7a7175c,0x3fd10e7fd3b958dd,1 +np.float64,0x3fa75bf9cc2eb800,0x3fa6d792ecdedb8e,1 +np.float64,0x7fd19fd20aa33fa3,0x408623f1e2cd04c3,1 +np.float64,0x3fd62c708dac58e0,0x3fd309ec7818d16e,1 +np.float64,0x3fdf489047be9120,0x3fd978640617c758,1 +np.float64,0x1,0x1,1 +np.float64,0xbfe21e7c3ea43cf8,0xbfeaba21320697d3,1 +np.float64,0xbfd3649047a6c920,0xbfd71a6f14223744,1 +np.float64,0xbfd68ca68c2d194e,0xbfdbcce6784e5d44,1 +np.float64,0x3fdb26b0ea364d62,0x3fd6a1f86f64ff74,1 +np.float64,0xbfd843821cb08704,0xbfde80e90805ab3f,1 +np.float64,0x3fd508a27aaa1144,0x3fd22fc203a7b9d8,1 +np.float64,0xbfdb951c7eb72a38,0xbfe20aeaec13699b,1 +np.float64,0x3fef556ba57eaad7,0x3fe5d8865cce0a6d,1 +np.float64,0x3fd0d224b3a1a448,0x3fcdde7be5d7e21e,1 +np.float64,0x8007ff272baffe4f,0x8007ff272baffe4f,1 +np.float64,0x3fe1c7bddf638f7c,0x3fdc47cc6cf2f5cd,1 +np.float64,0x7ff8000000000000,0x7ff8000000000000,1 +np.float64,0x2016d560402f,0x2016d560402f,1 +np.float64,0xbfcca10be9394218,0xbfd033f36b94fc54,1 +np.float64,0xbfdb833628b7066c,0xbfe1fb344b840c70,1 +np.float64,0x3fd8529cb3b0a539,0x3fd49d847fe77218,1 +np.float64,0xbfc0b0ebab2161d8,0xbfc1e260c60ffd1b,1 +np.float64,0xbfea8b9a79f51735,0xbffc4ee6be8a0fa2,1 +np.float64,0x7feca8fab7f951f4,0x40862d613e454646,1 +np.float64,0x7fd8c52d82318a5a,0x408626aaf37423a3,1 +np.float64,0xbfe364ad4526c95a,0xbfedcee39bc93ff5,1 +np.float64,0x800b78161256f02d,0x800b78161256f02d,1 +np.float64,0xbfd55f0153aabe02,0xbfda01a78f72d494,1 +np.float64,0x800315a5f0662b4d,0x800315a5f0662b4d,1 +np.float64,0x7fe4c0dca02981b8,0x40862acc27e4819f,1 +np.float64,0x8009825c703304b9,0x8009825c703304b9,1 +np.float64,0x3fe6e94e1cadd29c,0x3fe1478ccc634f49,1 +np.float64,0x7fe622d8586c45b0,0x40862b504177827e,1 +np.float64,0x3fe4458600688b0c,0x3fdf67e79a84b953,1 +np.float64,0xbfdd75d8a1baebb2,0xbfe3bc9e6ca1bbb5,1 +np.float64,0x3fde789c6bbcf138,0x3fd8ec1d435531b3,1 +np.float64,0x3fe7052b94ee0a58,0x3fe157c5c4418dc1,1 +np.float64,0x7fef31652abe62c9,0x40862e0eaeabcfc0,1 +np.float64,0x3fe279691ee4f2d2,0x3fdd2aa41eb43cd4,1 +np.float64,0xbfd533fa95aa67f6,0xbfd9c12f516d29d7,1 +np.float64,0x3fe6d057f96da0b0,0x3fe138fd96693a6a,1 +np.float64,0x800bad984f775b31,0x800bad984f775b31,1 +np.float64,0x7fdd6fdba4badfb6,0x4086280c73d8ef97,1 +np.float64,0x7fe9b5c0eef36b81,0x40862c82c6f57a53,1 +np.float64,0x8000bc02ece17807,0x8000bc02ece17807,1 +np.float64,0xbff0000000000000,0xfff0000000000000,1 +np.float64,0xbfed430be3fa8618,0xc003aaf338c75b3c,1 +np.float64,0x3fee17b759fc2f6f,0x3fe53668696bf48b,1 +np.float64,0x3f8d4cf9d03a9a00,0x3f8d17d2f532afdc,1 +np.float64,0x8005d6257b8bac4c,0x8005d6257b8bac4c,1 +np.float64,0xbfd17a6df9a2f4dc,0xbfd469e3848adc6e,1 +np.float64,0xb28a293965145,0xb28a293965145,1 +np.float64,0xbfe7d011e42fa024,0xbff5cf818998c8ec,1 +np.float64,0xbfe74f0f136e9e1e,0xbff4dad6ebb0443c,1 +np.float64,0x800f249fc9be4940,0x800f249fc9be4940,1 +np.float64,0x2542f8fe4a860,0x2542f8fe4a860,1 +np.float64,0xc48d40cd891a8,0xc48d40cd891a8,1 +np.float64,0x3fe4e64bc8e9cc98,0x3fe015c9eb3caa53,1 +np.float64,0x3fd33881eca67104,0x3fd0cea886be2457,1 +np.float64,0xbfd01748fba02e92,0xbfd28875959e6901,1 +np.float64,0x7fb7ab01f22f5603,0x40861b369927bf53,1 +np.float64,0xbfe340274ce6804e,0xbfed72b39f0ebb24,1 +np.float64,0x7fc16c0c3422d817,0x40861e4eaf1a286c,1 +np.float64,0x3fc26944a324d288,0x3fc133a77b356ac4,1 +np.float64,0xa149d7134293b,0xa149d7134293b,1 +np.float64,0x800837382d106e71,0x800837382d106e71,1 +np.float64,0x797d1740f2fa4,0x797d1740f2fa4,1 +np.float64,0xc3f15b7787e2c,0xc3f15b7787e2c,1 +np.float64,0x80cad1b90195a,0x80cad1b90195a,1 +np.float64,0x3fdd8f1142bb1e23,0x3fd84d21490d1ce6,1 +np.float64,0xbfbde6c9123bcd90,0xbfbfcc030a86836a,1 +np.float64,0x8007f77e032feefd,0x8007f77e032feefd,1 +np.float64,0x3fe74fed1c6e9fda,0x3fe18322cf19cb61,1 +np.float64,0xbfd8a40bbcb14818,0xbfdf1d23520ba74b,1 +np.float64,0xbfeb7a0e6076f41d,0xbfff4ddfb926efa5,1 +np.float64,0xbfcb8c5f663718c0,0xbfcf0570f702bda9,1 +np.float64,0xf668cd97ecd1a,0xf668cd97ecd1a,1 +np.float64,0xbfe92accf572559a,0xbff8b4393878ffdb,1 +np.float64,0xbfeaa955567552ab,0xbffca70c7d73eee5,1 +np.float64,0xbfe083a14f610742,0xbfe739d84bc35077,1 +np.float64,0x78290568f0521,0x78290568f0521,1 +np.float64,0x3fe94bae2372975c,0x3fe2a3beac5c9858,1 +np.float64,0x3fca4fbab9349f78,0x3fc7edbca2492acb,1 +np.float64,0x8000000000000000,0x8000000000000000,1 +np.float64,0x7fb9eb505433d6a0,0x40861bf0adedb74d,1 +np.float64,0x7fdc66f72a38cded,0x408627c32aeecf0f,1 +np.float64,0x2e8e6f445d1cf,0x2e8e6f445d1cf,1 +np.float64,0xbfec43195af88633,0xc0012d7e3f91b7e8,1 +np.float64,0x7fcdb971e93b72e3,0x40862294c9e3a7bc,1 +np.float64,0x800cabc461195789,0x800cabc461195789,1 +np.float64,0x2c79709c58f2f,0x2c79709c58f2f,1 +np.float64,0x8005d772d3cbaee6,0x8005d772d3cbaee6,1 +np.float64,0x3fe84d8c03709b18,0x3fe21490ce3673dd,1 +np.float64,0x7fe5578adc2aaf15,0x40862b056e8437d4,1 +np.float64,0xbf91298c58225320,0xbf914ec86c32d11f,1 +np.float64,0xc7ed2b6d8fda6,0xc7ed2b6d8fda6,1 +np.float64,0x2761404c4ec29,0x2761404c4ec29,1 +np.float64,0x3fbad3c48835a789,0x3fb9833c02385305,1 +np.float64,0x3fa46fee5428dfe0,0x3fa40a357fb24c23,1 +np.float64,0xbfe3900c6fe72019,0xbfee3dba29dd9d43,1 +np.float64,0x3fe7a9e41a6f53c8,0x3fe1b704dfb9884b,1 +np.float64,0xbfe74a7a1eee94f4,0xbff4d269cacb1f29,1 +np.float64,0xbfee609c72fcc139,0xc007da8499d34123,1 +np.float64,0x3fef2d5fc23e5ac0,0x3fe5c44414e59cb4,1 +np.float64,0xbfd7bdc0402f7b80,0xbfddaae1e7bb78fb,1 +np.float64,0xd71ee01dae3dc,0xd71ee01dae3dc,1 +np.float64,0x3fe98cbcdef3197a,0x3fe2c7ffe33c4541,1 +np.float64,0x8000f8dbb3a1f1b8,0x8000f8dbb3a1f1b8,1 +np.float64,0x3fe3e98ad567d316,0x3fdef6e58058313f,1 +np.float64,0x41ad0bfc835a2,0x41ad0bfc835a2,1 +np.float64,0x7fdcc2dc0d3985b7,0x408627dce39f77af,1 +np.float64,0xbfe47b980de8f730,0xbff059acdccd6e2b,1 +np.float64,0xbfef49b6577e936d,0xc00e714f46b2ccc1,1 +np.float64,0x3fac31816c386300,0x3fab71cb92b0db8f,1 +np.float64,0x3fe59097e76b2130,0x3fe07c299fd1127c,1 +np.float64,0xbfecf0df5cf9e1bf,0xc002c7ebdd65039c,1 +np.float64,0x3fd2b7d0b6a56fa1,0x3fd06b638990ae02,1 +np.float64,0xbfeb68deecf6d1be,0xbfff1187e042d3e4,1 +np.float64,0x3fd44a9771a8952f,0x3fd1a01867c5e302,1 +np.float64,0xf79a9dedef354,0xf79a9dedef354,1 +np.float64,0x800c25a170d84b43,0x800c25a170d84b43,1 +np.float64,0x3ff0000000000000,0x3fe62e42fefa39ef,1 +np.float64,0x3fbff4f7623fe9f0,0x3fbe1d3878f4c417,1 +np.float64,0xd284c845a5099,0xd284c845a5099,1 +np.float64,0xbfe3c7815f678f02,0xbfeecdab5ca2e651,1 +np.float64,0x3fc19c934e233927,0x3fc08036104b1f23,1 +np.float64,0x800b6096de16c12e,0x800b6096de16c12e,1 +np.float64,0xbfe962a67e32c54d,0xbff9392313a112a1,1 +np.float64,0x2b9d0116573a1,0x2b9d0116573a1,1 +np.float64,0x3fcab269ed3564d4,0x3fc83f7e1c3095b7,1 +np.float64,0x3fc8c78d86318f1b,0x3fc6a6cde5696f99,1 +np.float64,0xd5b1e9b5ab63d,0xd5b1e9b5ab63d,1 +np.float64,0xbfed802a47fb0054,0xc00465cad3b5b0ef,1 +np.float64,0xbfd73aaf08ae755e,0xbfdcdbd62b8af271,1 +np.float64,0xbfd4f13c0229e278,0xbfd95dacff79e570,1 +np.float64,0xbfe9622808f2c450,0xbff937f13c397e8d,1 +np.float64,0xbfeddfa62efbbf4c,0xc005b0c835eed829,1 +np.float64,0x3fd65663d4acacc8,0x3fd3290cd0e675dc,1 +np.float64,0x8005e890f1abd123,0x8005e890f1abd123,1 +np.float64,0xbfe924919fb24923,0xbff8a5a827a28756,1 +np.float64,0x3fe8cdf490719be9,0x3fe25d39535e8366,1 +np.float64,0x7fc229e6ff2453cd,0x40861ea40ef87a5a,1 +np.float64,0x3fe5cf53ceeb9ea8,0x3fe0a18e0b65f27e,1 +np.float64,0xa79cf6fb4f39f,0xa79cf6fb4f39f,1 +np.float64,0x7fddbb3c0f3b7677,0x40862820d5edf310,1 +np.float64,0x3e1011de7c203,0x3e1011de7c203,1 +np.float64,0x3fc0b59a83216b38,0x3fbf6916510ff411,1 +np.float64,0x8647f98d0c8ff,0x8647f98d0c8ff,1 +np.float64,0x8005dad33ecbb5a7,0x8005dad33ecbb5a7,1 +np.float64,0x8a80d0631501a,0x8a80d0631501a,1 +np.float64,0xbfe18f7d6ee31efb,0xbfe976f06713afc1,1 +np.float64,0xbfe06eaed560dd5e,0xbfe70eac696933e6,1 +np.float64,0xbfed8ef93c7b1df2,0xc00495bfa3195b53,1 +np.float64,0x3febe9c24677d385,0x3fe411b10db16c42,1 +np.float64,0x7fd5d80c1fabb017,0x408625a97a7787ba,1 +np.float64,0x3fca79b59334f368,0x3fc8108a521341dc,1 +np.float64,0xbfccf8db4339f1b8,0xbfd06c9a5424aadb,1 +np.float64,0xbfea5ac5a574b58b,0xbffbc21d1405d840,1 +np.float64,0x800ce2bf4b19c57f,0x800ce2bf4b19c57f,1 +np.float64,0xbfe8df896d31bf13,0xbff807ab38ac41ab,1 +np.float64,0x3feab83da9f5707c,0x3fe36cdd827c0eff,1 +np.float64,0x3fee717683bce2ed,0x3fe564879171719b,1 +np.float64,0x80025e5577c4bcac,0x80025e5577c4bcac,1 +np.float64,0x3fe3e5378e67ca70,0x3fdef1902c5d1efd,1 +np.float64,0x3fa014bb7c202980,0x3f9faacf9238d499,1 +np.float64,0x3fddbf5e16bb7ebc,0x3fd86e2311cb0f6d,1 +np.float64,0x3fd24e50e6a49ca0,0x3fd0198f04f82186,1 +np.float64,0x656b5214cad6b,0x656b5214cad6b,1 +np.float64,0x8b0a4bfd1614a,0x8b0a4bfd1614a,1 +np.float64,0xbfeeb6bd9e7d6d7b,0xc009b669285e319e,1 +np.float64,0x8000000000000001,0x8000000000000001,1 +np.float64,0xbfe719feceee33fe,0xbff47a4c8cbf0cca,1 +np.float64,0xbfd14fa8c8a29f52,0xbfd42f27b1aced39,1 +np.float64,0x7fec9dcb80f93b96,0x40862d5e1e70bbb9,1 +np.float64,0x7fecacb826f9596f,0x40862d6249746915,1 +np.float64,0x973459f52e68b,0x973459f52e68b,1 +np.float64,0x7f40a59e00214b3b,0x4085f194f45f82b1,1 +np.float64,0x7fc5dbaec32bb75d,0x4086201f3e7065d9,1 +np.float64,0x82d0801305a10,0x82d0801305a10,1 +np.float64,0x7fec81c0f4790381,0x40862d5643c0fc85,1 +np.float64,0xbfe2d81e9ee5b03d,0xbfec71a8e864ea40,1 +np.float64,0x6c545c9ad8a8c,0x6c545c9ad8a8c,1 +np.float64,0x3f9be95a5037d2b5,0x3f9b89b48ac8f5d8,1 +np.float64,0x8000cae9702195d4,0x8000cae9702195d4,1 +np.float64,0xbfd375f45126ebe8,0xbfd733677e54a80d,1 +np.float64,0x3fd29a5b81a534b7,0x3fd05494bf200278,1 +np.float64,0xfff0000000000000,0xfff8000000000000,1 +np.float64,0x7fca8fc195351f82,0x408621ae61aa6c13,1 +np.float64,0x1b28e2ae3651d,0x1b28e2ae3651d,1 +np.float64,0x3fe7fdbd14effb7a,0x3fe1e714884b46a8,1 +np.float64,0x3fdf1ce068be39c0,0x3fd95b054e0fad3d,1 +np.float64,0x3fe79f9a636f3f34,0x3fe1b11a40c00b3e,1 +np.float64,0x3fe60eb7036c1d6e,0x3fe0c72a02176874,1 +np.float64,0x229da17e453b5,0x229da17e453b5,1 +np.float64,0x3fc1a921b5235240,0x3fc08b3f35e47fb1,1 +np.float64,0xbb92d2af7725b,0xbb92d2af7725b,1 +np.float64,0x3fe4110cb1e8221a,0x3fdf2787de6c73f7,1 +np.float64,0xbfbc87771a390ef0,0xbfbe3f6e95622363,1 +np.float64,0xbfe74025dfee804c,0xbff4bf7b1895e697,1 +np.float64,0x964eb6592c9d7,0x964eb6592c9d7,1 +np.float64,0x3f951689b82a2d00,0x3f94dfb38d746fdf,1 +np.float64,0x800356271be6ac4f,0x800356271be6ac4f,1 +np.float64,0x7fefffffffffffff,0x40862e42fefa39ef,1 +np.float64,0xbfed5ce250fab9c5,0xc003f7ddfeb94345,1 +np.float64,0x3fec3d5dc1387abc,0x3fe43e39c02d86f4,1 +np.float64,0x3999897e73332,0x3999897e73332,1 +np.float64,0xbfdcb57744b96aee,0xbfe30c4b98f3d088,1 +np.float64,0x7f961fb0b82c3f60,0x40860f9549c3a380,1 +np.float64,0x67d6efcacfadf,0x67d6efcacfadf,1 +np.float64,0x8002c9498f859294,0x8002c9498f859294,1 +np.float64,0xbfa3033800260670,0xbfa35fe3bf43e188,1 +np.float64,0xbfeab2fc157565f8,0xbffcc413c486b4eb,1 +np.float64,0x3fe25e62f364bcc6,0x3fdd0856e19e3430,1 +np.float64,0x7fb2f42dda25e85b,0x4086196fb34a65fd,1 +np.float64,0x3fe0f1a5af61e34c,0x3fdb3235a1786efb,1 +np.float64,0x800a340ca1f4681a,0x800a340ca1f4681a,1 +np.float64,0x7c20b9def8418,0x7c20b9def8418,1 +np.float64,0xdf0842a1be109,0xdf0842a1be109,1 +np.float64,0x3fe9f22cc2f3e45a,0x3fe300359b842bf0,1 +np.float64,0x3fe389ed73e713da,0x3fde809780fe4432,1 +np.float64,0x9500fb932a020,0x9500fb932a020,1 +np.float64,0x3fd8a21ffdb14440,0x3fd4d70862345d86,1 +np.float64,0x800d99c15cbb3383,0x800d99c15cbb3383,1 +np.float64,0x3fd96c98c932d932,0x3fd568959c9b028f,1 +np.float64,0x7fc228483a24508f,0x40861ea358420976,1 +np.float64,0x7fc6737bef2ce6f7,0x408620560ffc6a98,1 +np.float64,0xbfb2c27cee2584f8,0xbfb37b8cc7774b5f,1 +np.float64,0xbfd18409f9230814,0xbfd4771d1a9a24fb,1 +np.float64,0x3fb53cb3f42a7968,0x3fb466f06f88044b,1 +np.float64,0x3fef61d0187ec3a0,0x3fe5dec8a9d13dd9,1 +np.float64,0x3fe59a6ffd2b34e0,0x3fe0820a99c6143d,1 +np.float64,0x3fce18aff43c3160,0x3fcb07c7b523f0d1,1 +np.float64,0xbfb1319a62226338,0xbfb1cc62f31b2b40,1 +np.float64,0xa00cce6d4019a,0xa00cce6d4019a,1 +np.float64,0x80068ae8e0ed15d3,0x80068ae8e0ed15d3,1 +np.float64,0x3fecef353239de6a,0x3fe49c280adc607b,1 +np.float64,0x3fdf1a7fb0be34ff,0x3fd9596bafe2d766,1 +np.float64,0x3feb5e12eeb6bc26,0x3fe3c6be3ede8d07,1 +np.float64,0x3fdeff5cd43dfeba,0x3fd947262ec96b05,1 +np.float64,0x3f995e75e832bd00,0x3f990f511f4c7f1c,1 +np.float64,0xbfeb5b3ed0b6b67e,0xbffee24fc0fc2881,1 +np.float64,0x7fb82aad0a305559,0x40861b614d901182,1 +np.float64,0xbfe5c3a4926b8749,0xbff23cd0ad144fe6,1 +np.float64,0x3fef47da373e8fb4,0x3fe5d1aaa4031993,1 +np.float64,0x7fc6a8c3872d5186,0x40862068f5ca84be,1 +np.float64,0x7fc0c2276221844e,0x40861dff2566d001,1 +np.float64,0x7fc9ce7d28339cf9,0x40862173541f84d1,1 +np.float64,0x3fce2c34933c5869,0x3fcb179428ad241d,1 +np.float64,0xbfcf864c293f0c98,0xbfd21872c4821cfc,1 +np.float64,0x3fc51fd1f82a3fa4,0x3fc38d4f1685c166,1 +np.float64,0xbfe2707b70a4e0f7,0xbfeb795fbd5bb444,1 +np.float64,0x46629b568cc54,0x46629b568cc54,1 +np.float64,0x7fe5f821f32bf043,0x40862b40c2cdea3f,1 +np.float64,0x3fedd2c9457ba592,0x3fe512ce92394526,1 +np.float64,0x7fe6dcb8ceadb971,0x40862b925a7dc05d,1 +np.float64,0x3fd1b983b4a37307,0x3fcf4ae2545cf64e,1 +np.float64,0xbfe1c93104639262,0xbfe9f7d28e4c0c82,1 +np.float64,0x995ebc2932bd8,0x995ebc2932bd8,1 +np.float64,0x800a4c3ee614987e,0x800a4c3ee614987e,1 +np.float64,0x3fbb58766e36b0f0,0x3fb9fb3b9810ec16,1 +np.float64,0xbfe36d636666dac7,0xbfede5080f69053c,1 +np.float64,0x3f4feee1003fddc2,0x3f4feae5f05443d1,1 +np.float64,0x3fed0b772ffa16ee,0x3fe4aafb924903c6,1 +np.float64,0x800bb3faef3767f6,0x800bb3faef3767f6,1 +np.float64,0x3fe285cda5e50b9c,0x3fdd3a58df06c427,1 +np.float64,0x7feb9d560bb73aab,0x40862d152362bb94,1 +np.float64,0x3fecd1f447f9a3e9,0x3fe48cc78288cb3f,1 +np.float64,0x3fca927b0c3524f6,0x3fc8250f49ba28df,1 +np.float64,0x7fcc19944e383328,0x40862221b02fcf43,1 +np.float64,0xbfd8ddf41db1bbe8,0xbfdf7b92073ff2fd,1 +np.float64,0x80006fe736e0dfcf,0x80006fe736e0dfcf,1 +np.float64,0x800bbeb66d577d6d,0x800bbeb66d577d6d,1 +np.float64,0xbfe4329353e86526,0xbfefeaf19ab92b42,1 +np.float64,0x2fad72805f5af,0x2fad72805f5af,1 +np.float64,0x3fe1b827aa637050,0x3fdc33bf46012c0d,1 +np.float64,0x3fc3f3f8e227e7f2,0x3fc28aeb86d65278,1 +np.float64,0x3fec018933780312,0x3fe41e619aa4285c,1 +np.float64,0xbfd92428e0b24852,0xbfdfeecb08d154df,1 +np.float64,0x2d7046845ae0a,0x2d7046845ae0a,1 +np.float64,0x7fde7fd2233cffa3,0x408628550f8a948f,1 +np.float64,0x8000a32cd241465a,0x8000a32cd241465a,1 +np.float64,0x8004267a45084cf5,0x8004267a45084cf5,1 +np.float64,0xbfe6b422556d6844,0xbff3c71f67661e6e,1 +np.float64,0x3fe3a37d922746fb,0x3fdea04e04d6195c,1 +np.float64,0xbfddcc54b53b98aa,0xbfe40d2389cdb848,1 +np.float64,0x3fe18b4b92a31697,0x3fdbf9e68cbf5794,1 +np.float64,0x7fc9c5b2ee338b65,0x408621709a17a47a,1 +np.float64,0x1ebd1ce03d7b,0x1ebd1ce03d7b,1 +np.float64,0x8008a6fc39d14df9,0x8008a6fc39d14df9,1 +np.float64,0x3fec11384c782270,0x3fe426bdaedd2965,1 +np.float64,0x3fefc28344ff8507,0x3fe60f75d34fc3d2,1 +np.float64,0xc35f379786be7,0xc35f379786be7,1 +np.float64,0x3feef51f4a7dea3e,0x3fe5a7b95d7786b5,1 +np.float64,0x3fec9b9f0379373e,0x3fe4702477abbb63,1 +np.float64,0x3fde94f8cdbd29f0,0x3fd8ff50f7df0a6f,1 +np.float64,0xbfed32d1cdfa65a4,0xc0037c1470f6f979,1 +np.float64,0x800d3ba44f5a7749,0x800d3ba44f5a7749,1 +np.float64,0x3fe3c56c8fe78ad9,0x3fdeca4eb9bb8918,1 +np.float64,0xbfe7c97242ef92e4,0xbff5c2950dfd6f69,1 +np.float64,0xbd9440057b288,0xbd9440057b288,1 +np.float64,0x7feb2fc111f65f81,0x40862cf524bd2001,1 +np.float64,0x800a431e2df4863d,0x800a431e2df4863d,1 +np.float64,0x80038a3b79e71478,0x80038a3b79e71478,1 +np.float64,0x80000c93d4601928,0x80000c93d4601928,1 +np.float64,0x7fe9fec022f3fd7f,0x40862c995db8ada0,1 +np.float64,0x3fead0129c35a025,0x3fe379d7a92c8f79,1 +np.float64,0x3fdd8cbaf7bb1974,0x3fd84b87ff0c26c7,1 +np.float64,0x3fe8fb7c60b1f6f9,0x3fe276d5339e7135,1 +np.float64,0x85a255e10b44b,0x85a255e10b44b,1 +np.float64,0xbfe507c23fea0f84,0xbff1212d2260022a,1 +np.float64,0x3fc5487c7b2a90f9,0x3fc3b03222d3d148,1 +np.float64,0x7fec0bdcb8f817b8,0x40862d34e8fd11e7,1 +np.float64,0xbfc5f34b4f2be698,0xbfc8146a899c7a0c,1 +np.float64,0xbfa2a49c14254940,0xbfa2fdab2eae3826,1 +np.float64,0x800ec52f15dd8a5e,0x800ec52f15dd8a5e,1 +np.float64,0xbfe3ba4b12a77496,0xbfeeab256b3e9422,1 +np.float64,0x80034d6c7ba69ada,0x80034d6c7ba69ada,1 +np.float64,0x7fd394d4202729a7,0x408624c98a216742,1 +np.float64,0xbfd4493a38289274,0xbfd865d67af2de91,1 +np.float64,0xe47d6203c8fad,0xe47d6203c8fad,1 +np.float64,0x98eb4e4b31d6a,0x98eb4e4b31d6a,1 +np.float64,0x4507fb128a100,0x4507fb128a100,1 +np.float64,0xbfc77032e42ee064,0xbfc9e36ab747a14d,1 +np.float64,0xa1f8a03b43f14,0xa1f8a03b43f14,1 +np.float64,0xbfc3d4da8527a9b4,0xbfc58c27af2476b0,1 +np.float64,0x3fc0eb7d6921d6fb,0x3fbfc858a077ed61,1 +np.float64,0x7fddb2e9403b65d2,0x4086281e98443709,1 +np.float64,0xbfa7ea62942fd4c0,0xbfa87dfd06b05d2a,1 +np.float64,0xbfe7d5c5426fab8a,0xbff5daa969c6d9e5,1 +np.float64,0x3fbf7cba0c3ef974,0x3fbdb23cd8fe875b,1 +np.float64,0x7fe92021eb324043,0x40862c53aee8b154,1 +np.float64,0x7fefbaa1827f7542,0x40862e3194737072,1 +np.float64,0x3fc6f82c402df059,0x3fc520432cbc533f,1 +np.float64,0x7fb37679a826ecf2,0x408619a5f857e27f,1 +np.float64,0x79ec1528f3d83,0x79ec1528f3d83,1 +np.float64,0x3fbefe1d0c3dfc3a,0x3fbd41650ba2c893,1 +np.float64,0x3fc3e5e11827cbc2,0x3fc27eb9b47c9c42,1 +np.float64,0x16aed1922d5db,0x16aed1922d5db,1 +np.float64,0x800124f7e58249f1,0x800124f7e58249f1,1 +np.float64,0x8004f7d12489efa3,0x8004f7d12489efa3,1 +np.float64,0x3fef80b8e27f0172,0x3fe5ee5fd43322c6,1 +np.float64,0xbfe7740c88eee819,0xbff51f823c8da14d,1 +np.float64,0xbfe6e1f1f6edc3e4,0xbff416bcb1302e7c,1 +np.float64,0x8001a2c4a7e3458a,0x8001a2c4a7e3458a,1 +np.float64,0x3fe861e155f0c3c2,0x3fe2201d3000c329,1 +np.float64,0x3fd00a101a201420,0x3fcca01087dbd728,1 +np.float64,0x7fdf0eb1133e1d61,0x4086287a327839b8,1 +np.float64,0x95e3ffdb2bc80,0x95e3ffdb2bc80,1 +np.float64,0x3fd87a1e8230f43d,0x3fd4ba1eb9be1270,1 +np.float64,0x3fedc4792afb88f2,0x3fe50b6529080f73,1 +np.float64,0x7fc9e81fa833d03e,0x4086217b428cc6ff,1 +np.float64,0xbfd21f1ba5a43e38,0xbfd54e048b988e09,1 +np.float64,0xbfbf52af5a3ea560,0xbfc0b4ab3b81fafc,1 +np.float64,0x7fe475f8e268ebf1,0x40862aaf14fee029,1 +np.float64,0x3fcf56899f3ead10,0x3fcc081de28ae9cf,1 +np.float64,0x917d407122fa8,0x917d407122fa8,1 +np.float64,0x22e23e3245c49,0x22e23e3245c49,1 +np.float64,0xbfeec2814f3d8503,0xc00a00ecca27b426,1 +np.float64,0xbfd97fee1c32ffdc,0xbfe04351dfe306ec,1 diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/data/umath-validation-set-log2.csv b/.venv/lib/python3.12/site-packages/numpy/_core/tests/data/umath-validation-set-log2.csv new file mode 100644 index 0000000..26921ef --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/tests/data/umath-validation-set-log2.csv @@ -0,0 +1,1629 @@ +dtype,input,output,ulperrortol +np.float32,0x80000000,0xff800000,3 +np.float32,0x7f12870a,0x42fe63db,3 +np.float32,0x3ef29cf5,0xbf89eb12,3 +np.float32,0x3d6ba8fb,0xc083d26c,3 +np.float32,0x3d9907e8,0xc06f8230,3 +np.float32,0x4ee592,0xc2fd656e,3 +np.float32,0x58d8b1,0xc2fd0db3,3 +np.float32,0x7ba103,0xc2fc19aa,3 +np.float32,0x7f52e90e,0x42ff70e4,3 +np.float32,0x7fcb15,0xc2fc0132,3 +np.float32,0x7cb7129f,0x42f50855,3 +np.float32,0x9faba,0xc301ae59,3 +np.float32,0x7f300a,0xc2fc04b4,3 +np.float32,0x3f0bf047,0xbf5f10cb,3 +np.float32,0x2fb1fb,0xc2fed934,3 +np.float32,0x3eedb0d1,0xbf8db417,3 +np.float32,0x3d7a0b40,0xc0811638,3 +np.float32,0x2e0bac,0xc2fef334,3 +np.float32,0x6278c1,0xc2fcc1b9,3 +np.float32,0x7f61ab2e,0x42ffa2d9,3 +np.float32,0x8fe7c,0xc301d4be,3 +np.float32,0x3f25e6ee,0xbf203536,3 +np.float32,0x7efc78f0,0x42fdf5c0,3 +np.float32,0x6d7304,0xc2fc73a7,3 +np.float32,0x7f1a472a,0x42fe89ed,3 +np.float32,0x7dd029a6,0x42f96734,3 +np.float32,0x3e9b9327,0xbfdbf8f7,3 +np.float32,0x3f4eefc1,0xbe9d2942,3 +np.float32,0x7f5b9b64,0x42ff8ebc,3 +np.float32,0x3e458ee1,0xc017ed6e,3 +np.float32,0x3f7b766b,0xbcd35acf,3 +np.float32,0x3e616070,0xc00bc378,3 +np.float32,0x7f20e633,0x42fea8f8,3 +np.float32,0x3ee3b461,0xbf95a126,3 +np.float32,0x7e7722ba,0x42fbe5f8,3 +np.float32,0x3f0873d7,0xbf6861fa,3 +np.float32,0x7b4cb2,0xc2fc1ba3,3 +np.float32,0x3f0b6b02,0xbf60712e,3 +np.float32,0x9bff4,0xc301b6f2,3 +np.float32,0x3f07be25,0xbf6a4f0c,3 +np.float32,0x3ef10e57,0xbf8b1b75,3 +np.float32,0x46ad75,0xc2fdb6b1,3 +np.float32,0x3f7bc542,0xbcc4e3a9,3 +np.float32,0x3f6673d4,0xbe1b509c,3 +np.float32,0x7f19fe59,0x42fe8890,3 +np.float32,0x7f800000,0x7f800000,3 +np.float32,0x7f2fe696,0x42feead0,3 +np.float32,0x3dc9432d,0xc0563655,3 +np.float32,0x3ee47623,0xbf950446,3 +np.float32,0x3f1f8817,0xbf2eab51,3 +np.float32,0x7f220ec5,0x42feae44,3 +np.float32,0x2325e3,0xc2ffbab1,3 +np.float32,0x29dfc8,0xc2ff395a,3 +np.float32,0x7f524950,0x42ff6eb3,3 +np.float32,0x3e2234e0,0xc02a21c8,3 +np.float32,0x7f1c6f5a,0x42fe942f,3 +np.float32,0x3b6a61,0xc2fe36e7,3 +np.float32,0x3f1df90e,0xbf324ba9,3 +np.float32,0xb57f0,0xc3017f07,3 +np.float32,0x7d0eba,0xc2fc112e,3 +np.float32,0x403aa9,0xc2fdfd5c,3 +np.float32,0x3e74ecc7,0xc004155f,3 +np.float32,0x17509c,0xc30074f2,3 +np.float32,0x7f62196b,0x42ffa442,3 +np.float32,0x3ecef9a9,0xbfa7417a,3 +np.float32,0x7f14b158,0x42fe6eb1,3 +np.float32,0x3ede12be,0xbf9a40fe,3 +np.float32,0x42cfaa,0xc2fde03f,3 +np.float32,0x3f407b0f,0xbed2a6f5,3 +np.float32,0x7f7fffff,0x43000000,3 +np.float32,0x5467c6,0xc2fd3394,3 +np.float32,0x7ea6b80f,0x42fcc336,3 +np.float32,0x3f21e7b2,0xbf293704,3 +np.float32,0x3dc7e9eb,0xc056d542,3 +np.float32,0x7f3e6e67,0x42ff2571,3 +np.float32,0x3e3e809d,0xc01b4911,3 +np.float32,0x3f800000,0x0,3 +np.float32,0x3d8fd238,0xc0753d52,3 +np.float32,0x3f74aa65,0xbd85cd0e,3 +np.float32,0x7ec30305,0x42fd36ff,3 +np.float32,0x3e97bb93,0xbfe0971d,3 +np.float32,0x3e109d9c,0xc034bb1b,3 +np.float32,0x3f4a0b67,0xbeaed537,3 +np.float32,0x3f25a7aa,0xbf20c228,3 +np.float32,0x3ebc05eb,0xbfb8fd6b,3 +np.float32,0x3eebe749,0xbf8f18e5,3 +np.float32,0x3e9dc479,0xbfd96356,3 +np.float32,0x7f245200,0x42feb882,3 +np.float32,0x1573a8,0xc30093b5,3 +np.float32,0x3e66c4b9,0xc00994a6,3 +np.float32,0x3e73bffc,0xc0048709,3 +np.float32,0x3dfef8e5,0xc0405f16,3 +np.float32,0x403750,0xc2fdfd83,3 +np.float32,0x3ebedf17,0xbfb636a4,3 +np.float32,0x15cae6,0xc3008de2,3 +np.float32,0x3edf4d4e,0xbf993c24,3 +np.float32,0x3f7cc41e,0xbc963fb3,3 +np.float32,0x3e9e12a4,0xbfd907ee,3 +np.float32,0x7ded7b59,0x42f9c889,3 +np.float32,0x7f034878,0x42fe12b5,3 +np.float32,0x7ddce43f,0x42f9930b,3 +np.float32,0x3d82b257,0xc07e1333,3 +np.float32,0x3dae89c1,0xc0635dd4,3 +np.float32,0x6b1d00,0xc2fc8396,3 +np.float32,0x449a5a,0xc2fdccb3,3 +np.float32,0x4e89d2,0xc2fd68cb,3 +np.float32,0x7e1ae83f,0x42fa8cef,3 +np.float32,0x7e4bb22c,0x42fb572e,3 +np.float32,0x3de308ea,0xc04b1634,3 +np.float32,0x7f238c7a,0x42feb508,3 +np.float32,0x3f6c62a3,0xbdeb86f3,3 +np.float32,0x3e58cba6,0xc00f5908,3 +np.float32,0x7f7dd91f,0x42fff9c4,3 +np.float32,0x3d989376,0xc06fc88d,3 +np.float32,0x3dd013c5,0xc0532339,3 +np.float32,0x4b17e6,0xc2fd89ed,3 +np.float32,0x7f67f287,0x42ffb71e,3 +np.float32,0x3f69365e,0xbe09ba3c,3 +np.float32,0x3e4b8b21,0xc0152bf1,3 +np.float32,0x3a75b,0xc3032171,3 +np.float32,0x7f303676,0x42feec1f,3 +np.float32,0x7f6570e5,0x42ffaf18,3 +np.float32,0x3f5ed61e,0xbe4cf676,3 +np.float32,0x3e9b22f9,0xbfdc7e4f,3 +np.float32,0x2c095e,0xc2ff1428,3 +np.float32,0x3f1b17c1,0xbf391754,3 +np.float32,0x422dc6,0xc2fde746,3 +np.float32,0x3f677c8d,0xbe14b365,3 +np.float32,0x3ef85d0c,0xbf8597a9,3 +np.float32,0x3ecaaa6b,0xbfab2430,3 +np.float32,0x3f0607d1,0xbf6eff3d,3 +np.float32,0x3f011fdb,0xbf7cc50d,3 +np.float32,0x6ed7c1,0xc2fc6a4e,3 +np.float32,0x7ec2d1a2,0x42fd3644,3 +np.float32,0x3f75b7fe,0xbd7238a2,3 +np.float32,0x3ef2d146,0xbf89c344,3 +np.float32,0x7ec2cd27,0x42fd3633,3 +np.float32,0x7ee1e55a,0x42fda397,3 +np.float32,0x7f464d6a,0x42ff435c,3 +np.float32,0x7f469a93,0x42ff447b,3 +np.float32,0x7ece752f,0x42fd6121,3 +np.float32,0x2ed878,0xc2fee67b,3 +np.float32,0x75b23,0xc3021eff,3 +np.float32,0x3e0f4be4,0xc03593b8,3 +np.float32,0x2778e1,0xc2ff64fc,3 +np.float32,0x5fe2b7,0xc2fcd561,3 +np.float32,0x19b8a9,0xc30050ab,3 +np.float32,0x7df303e5,0x42f9d98d,3 +np.float32,0x608b8d,0xc2fcd051,3 +np.float32,0x588f46,0xc2fd1017,3 +np.float32,0x3eec6a11,0xbf8eb2a1,3 +np.float32,0x3f714121,0xbdaf4906,3 +np.float32,0x7f4f7b9e,0x42ff64c9,3 +np.float32,0x3c271606,0xc0d3b29c,3 +np.float32,0x3f002fe0,0xbf7f75f6,3 +np.float32,0x7efa4798,0x42fdef4f,3 +np.float32,0x3f61a865,0xbe3a601a,3 +np.float32,0x7e8087aa,0x42fc030d,3 +np.float32,0x3f70f0c7,0xbdb321ba,3 +np.float32,0x5db898,0xc2fce63f,3 +np.float32,0x7a965f,0xc2fc1fea,3 +np.float32,0x7f68b112,0x42ffb97c,3 +np.float32,0x7ef0ed3d,0x42fdd32d,3 +np.float32,0x7f3156a1,0x42fef0d3,3 +np.float32,0x3f1d405f,0xbf33fc6e,3 +np.float32,0x3e3494cf,0xc0203945,3 +np.float32,0x6018de,0xc2fcd3c1,3 +np.float32,0x623e49,0xc2fcc370,3 +np.float32,0x3ea29f0f,0xbfd3cad4,3 +np.float32,0xa514,0xc305a20c,3 +np.float32,0x3e1b2ab1,0xc02e3a8f,3 +np.float32,0x3f450b6f,0xbec1578f,3 +np.float32,0x7eb12908,0x42fcf015,3 +np.float32,0x3f10b720,0xbf52ab48,3 +np.float32,0x3e0a93,0xc2fe16f6,3 +np.float32,0x93845,0xc301cb96,3 +np.float32,0x7f4e9ce3,0x42ff61af,3 +np.float32,0x3f6d4296,0xbde09ceb,3 +np.float32,0x6ddede,0xc2fc70d0,3 +np.float32,0x3f4fb6fd,0xbe9a636d,3 +np.float32,0x3f6d08de,0xbde36c0b,3 +np.float32,0x3f56f057,0xbe8122ad,3 +np.float32,0x334e95,0xc2fea349,3 +np.float32,0x7efadbcd,0x42fdf104,3 +np.float32,0x3db02e88,0xc0628046,3 +np.float32,0x3f3309d1,0xbf041066,3 +np.float32,0x2d8722,0xc2fefb8f,3 +np.float32,0x7e926cac,0x42fc6356,3 +np.float32,0x3e3674ab,0xc01f452e,3 +np.float32,0x1b46ce,0xc3003afc,3 +np.float32,0x3f06a338,0xbf6d53fc,3 +np.float32,0x1b1ba7,0xc3003d46,3 +np.float32,0x319dfb,0xc2febc06,3 +np.float32,0x3e2f126a,0xc02315a5,3 +np.float32,0x3f40fe65,0xbed0af9e,3 +np.float32,0x3f1d842f,0xbf335d4b,3 +np.float32,0x3d044e4f,0xc09e78f8,3 +np.float32,0x7f272674,0x42fec51f,3 +np.float32,0x3cda6d8f,0xc0a753db,3 +np.float32,0x3eb92f12,0xbfbbccbb,3 +np.float32,0x7e4318f4,0x42fb3752,3 +np.float32,0x3c5890,0xc2fe2b6d,3 +np.float32,0x3d1993c9,0xc09796f8,3 +np.float32,0x7f18ef24,0x42fe8377,3 +np.float32,0x3e30c3a0,0xc0223244,3 +np.float32,0x3f27cd27,0xbf1c00ef,3 +np.float32,0x3f150957,0xbf47cd6c,3 +np.float32,0x7e7178a3,0x42fbd4d8,3 +np.float32,0x3f298db8,0xbf182ac3,3 +np.float32,0x7cb3be,0xc2fc1348,3 +np.float32,0x3ef64266,0xbf8729de,3 +np.float32,0x3eeb06ce,0xbf8fc8f2,3 +np.float32,0x3f406e36,0xbed2d845,3 +np.float32,0x7f1e1bd3,0x42fe9c0b,3 +np.float32,0x478dcc,0xc2fdad97,3 +np.float32,0x7f7937b5,0x42ffec2b,3 +np.float32,0x3f20f350,0xbf2b6624,3 +np.float32,0x7f13661a,0x42fe683c,3 +np.float32,0x208177,0xc2fff46b,3 +np.float32,0x263cfb,0xc2ff7c72,3 +np.float32,0x7f0bd28c,0x42fe4141,3 +np.float32,0x7230d8,0xc2fc5453,3 +np.float32,0x3f261bbf,0xbf1fbfb4,3 +np.float32,0x737b56,0xc2fc4c05,3 +np.float32,0x3ef88f33,0xbf857263,3 +np.float32,0x7e036464,0x42fa1352,3 +np.float32,0x4b5c4f,0xc2fd874d,3 +np.float32,0x3f77984d,0xbd454596,3 +np.float32,0x3f674202,0xbe162932,3 +np.float32,0x3e7157d9,0xc0057197,3 +np.float32,0x3f3f21da,0xbed7d861,3 +np.float32,0x7f1fb40f,0x42fea375,3 +np.float32,0x7ef0157f,0x42fdd096,3 +np.float32,0x3f71e88d,0xbda74962,3 +np.float32,0x3f174855,0xbf424728,3 +np.float32,0x3f3fdd2c,0xbed505d5,3 +np.float32,0x7b95d1,0xc2fc19ed,3 +np.float32,0x7f23f4e5,0x42feb6df,3 +np.float32,0x7d741925,0x42f7dcd6,3 +np.float32,0x60f81d,0xc2fccd14,3 +np.float32,0x3f17d267,0xbf40f6ae,3 +np.float32,0x3f036fc8,0xbf7636f8,3 +np.float32,0x167653,0xc30082b5,3 +np.float32,0x256d05,0xc2ff8c4f,3 +np.float32,0x3eccc63d,0xbfa93adb,3 +np.float32,0x7f6c91ea,0x42ffc5b2,3 +np.float32,0x2ee52a,0xc2fee5b3,3 +np.float32,0x3dc3579e,0xc058f80d,3 +np.float32,0x4c7170,0xc2fd7cc4,3 +np.float32,0x7f737f20,0x42ffdb03,3 +np.float32,0x3f2f9dbf,0xbf0b3119,3 +np.float32,0x3f4d0c54,0xbea3eec5,3 +np.float32,0x7e380862,0x42fb0c32,3 +np.float32,0x5d637f,0xc2fce8df,3 +np.float32,0x3f0aa623,0xbf627c27,3 +np.float32,0x3e4d5896,0xc0145b88,3 +np.float32,0x3f6cacdc,0xbde7e7ca,3 +np.float32,0x63a2c3,0xc2fcb90a,3 +np.float32,0x6c138c,0xc2fc7cfa,3 +np.float32,0x2063c,0xc303fb88,3 +np.float32,0x7e9e5a3e,0x42fc9d2f,3 +np.float32,0x56ec64,0xc2fd1ddd,3 +np.float32,0x7f1d6a35,0x42fe98cc,3 +np.float32,0x73dc96,0xc2fc4998,3 +np.float32,0x3e5d74e5,0xc00d6238,3 +np.float32,0x7f033cbb,0x42fe1273,3 +np.float32,0x3f5143fc,0xbe94e4e7,3 +np.float32,0x1d56d9,0xc3002010,3 +np.float32,0x2bf3e4,0xc2ff1591,3 +np.float32,0x3f2a6ef1,0xbf164170,3 +np.float32,0x3f33238b,0xbf03db58,3 +np.float32,0x22780e,0xc2ffc91a,3 +np.float32,0x7f00b873,0x42fe0425,3 +np.float32,0x3f7f6145,0xbb654706,3 +np.float32,0x7fc00000,0x7fc00000,3 +np.float32,0x63895a,0xc2fcb9c7,3 +np.float32,0x18a1b2,0xc30060a8,3 +np.float32,0x7e43c6a6,0x42fb39e3,3 +np.float32,0x78676e,0xc2fc2d30,3 +np.float32,0x3f16d839,0xbf435940,3 +np.float32,0x7eff78ba,0x42fdfe79,3 +np.float32,0x3f2e152c,0xbf0e6e54,3 +np.float32,0x3db20ced,0xc06186e1,3 +np.float32,0x3f0cd1d8,0xbf5cbf57,3 +np.float32,0x3fd7a8,0xc2fe01d2,3 +np.float32,0x3ebb075e,0xbfb9f816,3 +np.float32,0x7f94ef,0xc2fc026b,3 +np.float32,0x3d80ba0e,0xc07f7a2b,3 +np.float32,0x7f227e15,0x42feb03f,3 +np.float32,0x792264bf,0x42e6afcc,3 +np.float32,0x7f501576,0x42ff66ec,3 +np.float32,0x223629,0xc2ffcea3,3 +np.float32,0x40a79e,0xc2fdf87b,3 +np.float32,0x449483,0xc2fdccf2,3 +np.float32,0x3f4fa978,0xbe9a9382,3 +np.float32,0x7f148c53,0x42fe6df9,3 +np.float32,0x3ec98b3c,0xbfac2a98,3 +np.float32,0x3e4da320,0xc0143a0a,3 +np.float32,0x3d1d94bb,0xc09666d0,3 +np.float32,0x3c8e624e,0xc0bb155b,3 +np.float32,0x66a9af,0xc2fca2ef,3 +np.float32,0x3ec76ed7,0xbfae1c57,3 +np.float32,0x3f4b52f3,0xbeaa2b81,3 +np.float32,0x7e99bbb5,0x42fc8750,3 +np.float32,0x3f69a46b,0xbe0701be,3 +np.float32,0x3f775400,0xbd4ba495,3 +np.float32,0x131e56,0xc300be3c,3 +np.float32,0x3f30abb4,0xbf08fb10,3 +np.float32,0x7f7e528c,0x42fffb25,3 +np.float32,0x3eb89515,0xbfbc668a,3 +np.float32,0x7e9191b6,0x42fc5f02,3 +np.float32,0x7e80c7e9,0x42fc047e,3 +np.float32,0x3f77ef58,0xbd3d2995,3 +np.float32,0x7ddb1f8a,0x42f98d1b,3 +np.float32,0x7ebc6c4f,0x42fd1d9c,3 +np.float32,0x3f6638e0,0xbe1ccab8,3 +np.float32,0x7f4c45,0xc2fc0410,3 +np.float32,0x3e7d8aad,0xc000e414,3 +np.float32,0x3f4d148b,0xbea3d12e,3 +np.float32,0x3e98c45c,0xbfdf55f4,3 +np.float32,0x3d754c78,0xc081f8a9,3 +np.float32,0x17e4cf,0xc3006be3,3 +np.float32,0x7eb65814,0x42fd0563,3 +np.float32,0x3f65e0d8,0xbe1f0008,3 +np.float32,0x3e99541f,0xbfdea87e,3 +np.float32,0x3f3cb80e,0xbee13b27,3 +np.float32,0x3e99f0c0,0xbfddec3b,3 +np.float32,0x3f43903e,0xbec6ea66,3 +np.float32,0x7e211cd4,0x42faa9f2,3 +np.float32,0x824af,0xc301f971,3 +np.float32,0x3e16a56e,0xc030f56c,3 +np.float32,0x542b3b,0xc2fd35a6,3 +np.float32,0x3eeea2d1,0xbf8cf873,3 +np.float32,0x232e93,0xc2ffb9fa,3 +np.float32,0x3e8c52b9,0xbfef06aa,3 +np.float32,0x7f69c7e3,0x42ffbcef,3 +np.float32,0x3f573e43,0xbe801714,3 +np.float32,0x43b009,0xc2fdd69f,3 +np.float32,0x3ee571ab,0xbf943966,3 +np.float32,0x3ee3d5d8,0xbf958604,3 +np.float32,0x338b12,0xc2fe9fe4,3 +np.float32,0x29cb1f,0xc2ff3ac6,3 +np.float32,0x3f0892b4,0xbf680e7a,3 +np.float32,0x3e8c4f7f,0xbfef0ae9,3 +np.float32,0x7c9d3963,0x42f497e6,3 +np.float32,0x3f26ba84,0xbf1e5f59,3 +np.float32,0x3dd0acc0,0xc052df6f,3 +np.float32,0x3e43fbda,0xc018aa8c,3 +np.float32,0x3ec4fd0f,0xbfb0635d,3 +np.float32,0x3f52c8c6,0xbe8f8d85,3 +np.float32,0x3f5fdc5d,0xbe462fdb,3 +np.float32,0x3f461920,0xbebd6743,3 +np.float32,0x6161ff,0xc2fcc9ef,3 +np.float32,0x7f7ed306,0x42fffc9a,3 +np.float32,0x3d212263,0xc0955f46,3 +np.float32,0x3eca5826,0xbfab6f36,3 +np.float32,0x7d6317ac,0x42f7a77e,3 +np.float32,0x3eb02063,0xbfc50f60,3 +np.float32,0x7f71a6f8,0x42ffd565,3 +np.float32,0x1a3efe,0xc3004935,3 +np.float32,0x3dc599c9,0xc057e856,3 +np.float32,0x3f3e1301,0xbedbf205,3 +np.float32,0xf17d4,0xc301158d,3 +np.float32,0x3f615f84,0xbe3c3d85,3 +np.float32,0x3de63be1,0xc049cb77,3 +np.float32,0x3e8d2f51,0xbfede541,3 +np.float32,0x3a5cdd,0xc2fe441c,3 +np.float32,0x3f443ec0,0xbec4586a,3 +np.float32,0x3eacbd00,0xbfc8a5ad,3 +np.float32,0x3f600f6a,0xbe44df1b,3 +np.float32,0x5f77a6,0xc2fcd89c,3 +np.float32,0x476706,0xc2fdaf28,3 +np.float32,0x2f469,0xc3036fde,3 +np.float32,0x7dc4ba24,0x42f93d77,3 +np.float32,0x3e2d6080,0xc023fb9b,3 +np.float32,0x7e8d7135,0x42fc49c3,3 +np.float32,0x3f589065,0xbe77247b,3 +np.float32,0x3f59e210,0xbe6e2c05,3 +np.float32,0x7f51d388,0x42ff6d15,3 +np.float32,0x7d9a5fda,0x42f88a63,3 +np.float32,0x3e67d5bc,0xc00927ab,3 +np.float32,0x61d72c,0xc2fcc679,3 +np.float32,0x3ef3351d,0xbf897766,3 +np.float32,0x1,0xc3150000,3 +np.float32,0x7f653429,0x42ffae54,3 +np.float32,0x7e1ad3e5,0x42fa8c8e,3 +np.float32,0x3f4ca01d,0xbea57500,3 +np.float32,0x3f7606db,0xbd6ad13e,3 +np.float32,0x7ec4a27d,0x42fd3d1f,3 +np.float32,0x3efe4fd5,0xbf8138c7,3 +np.float32,0x77c2f1,0xc2fc3124,3 +np.float32,0x7e4d3251,0x42fb5c9a,3 +np.float32,0x3f543ac7,0xbe8a8154,3 +np.float32,0x7c3dbe29,0x42f322c4,3 +np.float32,0x408e01,0xc2fdf9a0,3 +np.float32,0x45069b,0xc2fdc829,3 +np.float32,0x3d7ecab7,0xc08037e8,3 +np.float32,0xf8c22,0xc3010a99,3 +np.float32,0x7f69af63,0x42ffbca2,3 +np.float32,0x7ec7d228,0x42fd48fe,3 +np.float32,0xff800000,0xffc00000,3 +np.float32,0xdd7c5,0xc301357c,3 +np.float32,0x143f38,0xc300a90e,3 +np.float32,0x7e65c176,0x42fbb01b,3 +np.float32,0x2c1a9e,0xc2ff1307,3 +np.float32,0x7f6e9224,0x42ffcbeb,3 +np.float32,0x3d32ab39,0xc0909a77,3 +np.float32,0x3e150b42,0xc031f22b,3 +np.float32,0x1f84b4,0xc300059a,3 +np.float32,0x3f71ce21,0xbda88c2a,3 +np.float32,0x2625c4,0xc2ff7e33,3 +np.float32,0x3dd0b293,0xc052dcdc,3 +np.float32,0x625c11,0xc2fcc290,3 +np.float32,0x3f610297,0xbe3e9f24,3 +np.float32,0x7ebdd5e5,0x42fd2320,3 +np.float32,0x3e883458,0xbff486ff,3 +np.float32,0x782313,0xc2fc2ed4,3 +np.float32,0x7f39c843,0x42ff132f,3 +np.float32,0x7f326aa7,0x42fef54d,3 +np.float32,0x4d2c71,0xc2fd75be,3 +np.float32,0x3f55747c,0xbe86409e,3 +np.float32,0x7f7f0867,0x42fffd34,3 +np.float32,0x321316,0xc2feb53f,3 +np.float32,0x3e1b37ed,0xc02e32b0,3 +np.float32,0x80edf,0xc301fd54,3 +np.float32,0x3f0b08ad,0xbf617607,3 +np.float32,0x7f3f4174,0x42ff28a2,3 +np.float32,0x3d79306d,0xc0813eb0,3 +np.float32,0x3f5f657a,0xbe49413d,3 +np.float32,0x3f56c63a,0xbe81b376,3 +np.float32,0x7f667123,0x42ffb24f,3 +np.float32,0x3f71021b,0xbdb24d43,3 +np.float32,0x7f434ab1,0x42ff380f,3 +np.float32,0x3dcae496,0xc055779c,3 +np.float32,0x3f5a7d88,0xbe6a0f5b,3 +np.float32,0x3cdf5c32,0xc0a64bf5,3 +np.float32,0x3e56222c,0xc0107d11,3 +np.float32,0x561a3a,0xc2fd24df,3 +np.float32,0x7ddd953c,0x42f9955a,3 +np.float32,0x7e35d839,0x42fb035c,3 +np.float32,0x3ec1816c,0xbfb3aeb2,3 +np.float32,0x7c87cfcd,0x42f42bc2,3 +np.float32,0xd9cd,0xc3053baf,3 +np.float32,0x3f388234,0xbef1e5b7,3 +np.float32,0x3edfcaca,0xbf98d47b,3 +np.float32,0x3ef28852,0xbf89fac8,3 +np.float32,0x7f7525df,0x42ffe001,3 +np.float32,0x7f6c33ef,0x42ffc48c,3 +np.float32,0x3ea4a881,0xbfd17e61,3 +np.float32,0x3f3e379f,0xbedb63c6,3 +np.float32,0x3f0524c1,0xbf717301,3 +np.float32,0x3db3e7f0,0xc06091d3,3 +np.float32,0x800000,0xc2fc0000,3 +np.float32,0x3f2f2897,0xbf0c27ce,3 +np.float32,0x7eb1776d,0x42fcf15c,3 +np.float32,0x3f039018,0xbf75dc37,3 +np.float32,0x3c4055,0xc2fe2c96,3 +np.float32,0x3f603653,0xbe43dea5,3 +np.float32,0x7f700d24,0x42ffd07c,3 +np.float32,0x3f4741a3,0xbeb918dc,3 +np.float32,0x3f5fe959,0xbe45da2d,3 +np.float32,0x3f3e4401,0xbedb33b1,3 +np.float32,0x7f0705ff,0x42fe2775,3 +np.float32,0x3ea85662,0xbfcd69b0,3 +np.float32,0x3f15f49f,0xbf458829,3 +np.float32,0x3f17c50e,0xbf411728,3 +np.float32,0x3e483f60,0xc016add2,3 +np.float32,0x3f1ab9e5,0xbf39f71b,3 +np.float32,0x3de0b6fb,0xc04c08fe,3 +np.float32,0x7e671225,0x42fbb452,3 +np.float32,0x80800000,0xffc00000,3 +np.float32,0xe2df3,0xc3012c9d,3 +np.float32,0x3ede1e3c,0xbf9a3770,3 +np.float32,0x3df2ffde,0xc044cfec,3 +np.float32,0x3eed8da5,0xbf8dcf6c,3 +np.float32,0x3ead15c3,0xbfc846e1,3 +np.float32,0x7ef3750a,0x42fddae4,3 +np.float32,0x7e6ab7c0,0x42fbbfe4,3 +np.float32,0x7ea4bbe5,0x42fcba5d,3 +np.float32,0x3f227706,0xbf27f0a1,3 +np.float32,0x3ef39bfd,0xbf89295a,3 +np.float32,0x3f289a20,0xbf1a3edd,3 +np.float32,0x7f225f82,0x42feafb4,3 +np.float32,0x768963,0xc2fc38bc,3 +np.float32,0x3f493c00,0xbeb1ccfc,3 +np.float32,0x3f4e7249,0xbe9ee9a7,3 +np.float32,0x1d0c3a,0xc30023c0,3 +np.float32,0x7f3c5f78,0x42ff1d6a,3 +np.float32,0xff7fffff,0xffc00000,3 +np.float32,0x3ee7896a,0xbf928c2a,3 +np.float32,0x3e788479,0xc002bd2e,3 +np.float32,0x3ee4df17,0xbf94af84,3 +np.float32,0x5e06d7,0xc2fce3d7,3 +np.float32,0x3d7b2776,0xc080e1dc,3 +np.float32,0x3e3d39d3,0xc01be7fd,3 +np.float32,0x7c81dece,0x42f40ab7,3 +np.float32,0x3f7d2085,0xbc856255,3 +np.float32,0x7f7f6627,0x42fffe44,3 +np.float32,0x7f5f2e94,0x42ff9aaa,3 +np.float32,0x7f5835f2,0x42ff8339,3 +np.float32,0x3f6a0e32,0xbe046580,3 +np.float32,0x7e16f586,0x42fa79dd,3 +np.float32,0x3f04a2f2,0xbf72dbc5,3 +np.float32,0x3f35e334,0xbefc7740,3 +np.float32,0x3f0d056e,0xbf5c3824,3 +np.float32,0x7ebeb95e,0x42fd2693,3 +np.float32,0x3c6192,0xc2fe2aff,3 +np.float32,0x3e892b4f,0xbff33958,3 +np.float32,0x3f61d694,0xbe3931df,3 +np.float32,0x29d183,0xc2ff3a56,3 +np.float32,0x7f0b0598,0x42fe3d04,3 +np.float32,0x7f743b28,0x42ffdd3d,3 +np.float32,0x3a2ed6,0xc2fe4663,3 +np.float32,0x3e27403a,0xc0274de8,3 +np.float32,0x3f58ee78,0xbe74a349,3 +np.float32,0x3eaa4b,0xc2fe0f92,3 +np.float32,0x3ecb613b,0xbfaa7de8,3 +np.float32,0x7f637d81,0x42ffa8c9,3 +np.float32,0x3f026e96,0xbf790c73,3 +np.float32,0x386cdf,0xc2fe5d0c,3 +np.float32,0x35abd1,0xc2fe8202,3 +np.float32,0x3eac3cd1,0xbfc92ee8,3 +np.float32,0x3f567869,0xbe82bf47,3 +np.float32,0x3f65c643,0xbe1faae6,3 +np.float32,0x7f5422b9,0x42ff752b,3 +np.float32,0x7c26e9,0xc2fc168c,3 +np.float32,0x7eff5cfd,0x42fdfe29,3 +np.float32,0x3f728e7f,0xbd9f6142,3 +np.float32,0x3f10fd43,0xbf51f874,3 +np.float32,0x7e7ada08,0x42fbf0fe,3 +np.float32,0x3e82a611,0xbffc37be,3 +np.float32,0xbf800000,0xffc00000,3 +np.float32,0x3dbe2e12,0xc05b711c,3 +np.float32,0x7e768fa9,0x42fbe440,3 +np.float32,0x5e44e8,0xc2fce1f0,3 +np.float32,0x7f25071a,0x42febbae,3 +np.float32,0x3f54db5e,0xbe885339,3 +np.float32,0x3f0f2c26,0xbf56a0b8,3 +np.float32,0x22f9a7,0xc2ffbe55,3 +np.float32,0x7ed63dcb,0x42fd7c77,3 +np.float32,0x7ea4fae2,0x42fcbb78,3 +np.float32,0x3f1d7766,0xbf337b47,3 +np.float32,0x7f16d59f,0x42fe7941,3 +np.float32,0x3f3a1bb6,0xbeeb855c,3 +np.float32,0x3ef57128,0xbf87c709,3 +np.float32,0xb24ff,0xc3018591,3 +np.float32,0x3ef99e27,0xbf84a983,3 +np.float32,0x3eac2ccf,0xbfc94013,3 +np.float32,0x3e9d3e1e,0xbfda00dc,3 +np.float32,0x718213,0xc2fc58c1,3 +np.float32,0x7edbf509,0x42fd8fea,3 +np.float32,0x70c7f1,0xc2fc5d80,3 +np.float32,0x3f7012f5,0xbdbdc6cd,3 +np.float32,0x12cba,0xc304c487,3 +np.float32,0x7f5d445d,0x42ff944c,3 +np.float32,0x7f3e30bd,0x42ff2481,3 +np.float32,0x63b110,0xc2fcb8a0,3 +np.float32,0x3f39f728,0xbeec1680,3 +np.float32,0x3f5bea58,0xbe6074b1,3 +np.float32,0x3f350749,0xbefff679,3 +np.float32,0x3e91ab2c,0xbfe81f3e,3 +np.float32,0x7ec53fe0,0x42fd3f6d,3 +np.float32,0x3f6cbbdc,0xbde72c8e,3 +np.float32,0x3f4df49f,0xbea0abcf,3 +np.float32,0x3e9c9638,0xbfdac674,3 +np.float32,0x7f3b82ec,0x42ff1a07,3 +np.float32,0x7f612a09,0x42ffa132,3 +np.float32,0x7ea26650,0x42fcafd3,3 +np.float32,0x3a615138,0xc122f26d,3 +np.float32,0x3f1108bd,0xbf51db39,3 +np.float32,0x6f80f6,0xc2fc65ea,3 +np.float32,0x3f7cb578,0xbc98ecb1,3 +np.float32,0x7f54d31a,0x42ff7790,3 +np.float32,0x196868,0xc3005532,3 +np.float32,0x3f01ee0a,0xbf7a7925,3 +np.float32,0x3e184013,0xc02ffb11,3 +np.float32,0xadde3,0xc3018ee3,3 +np.float32,0x252a91,0xc2ff9173,3 +np.float32,0x3f0382c2,0xbf7601a9,3 +np.float32,0x6d818c,0xc2fc7345,3 +np.float32,0x3bfbfd,0xc2fe2fdd,3 +np.float32,0x7f3cad19,0x42ff1e9a,3 +np.float32,0x4169a7,0xc2fdefdf,3 +np.float32,0x3f615d96,0xbe3c4a2b,3 +np.float32,0x3f036480,0xbf7656ac,3 +np.float32,0x7f5fbda3,0x42ff9c83,3 +np.float32,0x3d202d,0xc2fe21f1,3 +np.float32,0x3d0f5e5d,0xc09ac3e9,3 +np.float32,0x3f0fff6e,0xbf548142,3 +np.float32,0x7f11ed32,0x42fe60d2,3 +np.float32,0x3e6f856b,0xc00624b6,3 +np.float32,0x7f7c4dd7,0x42fff542,3 +np.float32,0x3e76fb86,0xc0034fa0,3 +np.float32,0x3e8a0d6e,0xbff209e7,3 +np.float32,0x3eacad19,0xbfc8b6ad,3 +np.float32,0xa7776,0xc3019cbe,3 +np.float32,0x3dc84d74,0xc056a754,3 +np.float32,0x3efb8052,0xbf834626,3 +np.float32,0x3f0e55fc,0xbf58cacc,3 +np.float32,0x7e0e71e3,0x42fa4efb,3 +np.float32,0x3ed5a800,0xbfa1639c,3 +np.float32,0x3f33335b,0xbf03babf,3 +np.float32,0x38cad7,0xc2fe5842,3 +np.float32,0x3bc21256,0xc0ecc927,3 +np.float32,0x3f09522d,0xbf660a19,3 +np.float32,0xcbd5d,0xc3015428,3 +np.float32,0x492752,0xc2fd9d42,3 +np.float32,0x3f2b9b32,0xbf13b904,3 +np.float32,0x6544ac,0xc2fcad09,3 +np.float32,0x52eb12,0xc2fd40b5,3 +np.float32,0x3f66a7c0,0xbe1a03e8,3 +np.float32,0x7ab289,0xc2fc1f41,3 +np.float32,0x62af5e,0xc2fcc020,3 +np.float32,0x7f73e9cf,0x42ffdc46,3 +np.float32,0x3e5eca,0xc2fe130e,3 +np.float32,0x3e3a10f4,0xc01d7602,3 +np.float32,0x3f04db46,0xbf723f0d,3 +np.float32,0x18fc4a,0xc3005b63,3 +np.float32,0x525bcb,0xc2fd45b6,3 +np.float32,0x3f6b9108,0xbdf5c769,3 +np.float32,0x3e992e8c,0xbfded5c5,3 +np.float32,0x7efea647,0x42fdfc18,3 +np.float32,0x7e8371db,0x42fc139e,3 +np.float32,0x3f397cfb,0xbeedfc69,3 +np.float32,0x7e46d233,0x42fb454a,3 +np.float32,0x7d5281ad,0x42f76f79,3 +np.float32,0x7f4c1878,0x42ff58a1,3 +np.float32,0x3e96ca5e,0xbfe1bd97,3 +np.float32,0x6a2743,0xc2fc8a3d,3 +np.float32,0x7f688781,0x42ffb8f8,3 +np.float32,0x7814b7,0xc2fc2f2d,3 +np.float32,0x3f2ffdc9,0xbf0a6756,3 +np.float32,0x3f766fa8,0xbd60fe24,3 +np.float32,0x4dc64e,0xc2fd7003,3 +np.float32,0x3a296f,0xc2fe46a8,3 +np.float32,0x3f2af942,0xbf15162e,3 +np.float32,0x7f702c32,0x42ffd0dc,3 +np.float32,0x7e61e318,0x42fba390,3 +np.float32,0x7f7d3bdb,0x42fff7fa,3 +np.float32,0x3ee87f3f,0xbf91c881,3 +np.float32,0x2bbc28,0xc2ff193c,3 +np.float32,0x3e01f918,0xc03e966e,3 +np.float32,0x7f0b39f4,0x42fe3e1a,3 +np.float32,0x3eaa4d64,0xbfcb4516,3 +np.float32,0x3e53901e,0xc0119a88,3 +np.float32,0x603cb,0xc3026957,3 +np.float32,0x7e81f926,0x42fc0b4d,3 +np.float32,0x5dab7c,0xc2fce6a6,3 +np.float32,0x3f46fefd,0xbeba1018,3 +np.float32,0x648448,0xc2fcb28a,3 +np.float32,0x3ec49470,0xbfb0c58b,3 +np.float32,0x3e8a5393,0xbff1ac2b,3 +np.float32,0x3f27ccfc,0xbf1c014e,3 +np.float32,0x3ed886e6,0xbf9eeca8,3 +np.float32,0x7cfbe06e,0x42f5f401,3 +np.float32,0x3f5aa7ba,0xbe68f229,3 +np.float32,0x9500d,0xc301c7e3,3 +np.float32,0x3f4861,0xc2fe0853,3 +np.float32,0x3e5ae104,0xc00e76f5,3 +np.float32,0x71253a,0xc2fc5b1e,3 +np.float32,0xcf7b8,0xc3014d9c,3 +np.float32,0x7f7edd2d,0x42fffcb7,3 +np.float32,0x3e9039ee,0xbfe9f5ab,3 +np.float32,0x2fd54e,0xc2fed712,3 +np.float32,0x3f600752,0xbe45147a,3 +np.float32,0x3f4da8f6,0xbea1bb5c,3 +np.float32,0x3f2d34a9,0xbf104bd9,3 +np.float32,0x3e1e66dd,0xc02c52d2,3 +np.float32,0x798276,0xc2fc2670,3 +np.float32,0xd55e2,0xc3014347,3 +np.float32,0x80000001,0xffc00000,3 +np.float32,0x3e7a5ead,0xc0020da6,3 +np.float32,0x7ec4c744,0x42fd3da9,3 +np.float32,0x597e00,0xc2fd085a,3 +np.float32,0x3dff6bf4,0xc0403575,3 +np.float32,0x5d6f1a,0xc2fce883,3 +np.float32,0x7e21faff,0x42faadea,3 +np.float32,0x3e570fea,0xc01016c6,3 +np.float32,0x28e6b6,0xc2ff4ab7,3 +np.float32,0x7e77062d,0x42fbe5a3,3 +np.float32,0x74cac4,0xc2fc43b0,3 +np.float32,0x3f707273,0xbdb93078,3 +np.float32,0x228e96,0xc2ffc737,3 +np.float32,0x686ac1,0xc2fc966b,3 +np.float32,0x3d76400d,0xc081cae8,3 +np.float32,0x3e9f502f,0xbfd7966b,3 +np.float32,0x3f6bc656,0xbdf32b1f,3 +np.float32,0x3edb828b,0xbf9c65d4,3 +np.float32,0x6c6e56,0xc2fc7a8e,3 +np.float32,0x3f04552e,0xbf73b48f,3 +np.float32,0x3f39cb69,0xbeecc457,3 +np.float32,0x7f681c44,0x42ffb7a3,3 +np.float32,0x7f5b44ee,0x42ff8d99,3 +np.float32,0x3e71430a,0xc005798d,3 +np.float32,0x3edcfde3,0xbf9b27c6,3 +np.float32,0x3f616a5a,0xbe3bf67f,3 +np.float32,0x3f523936,0xbe918548,3 +np.float32,0x3f39ce3a,0xbeecb925,3 +np.float32,0x3eac589a,0xbfc91120,3 +np.float32,0x7efc8d3d,0x42fdf5fc,3 +np.float32,0x5704b0,0xc2fd1d0f,3 +np.float32,0x7e7972e9,0x42fbecda,3 +np.float32,0x3eb0811c,0xbfc4aa13,3 +np.float32,0x7f1efcbb,0x42fea023,3 +np.float32,0x3e0b9e32,0xc037fa6b,3 +np.float32,0x7eef6a48,0x42fdce87,3 +np.float32,0x3cc0a373,0xc0ad20c0,3 +np.float32,0x3f2a75bb,0xbf1632ba,3 +np.float32,0x0,0xff800000,3 +np.float32,0x7ecdb6f4,0x42fd5e77,3 +np.float32,0x7f2e2dfd,0x42fee38d,3 +np.float32,0x3ee17f6e,0xbf976d8c,3 +np.float32,0x3f51e7ee,0xbe92a319,3 +np.float32,0x3f06942f,0xbf6d7d3c,3 +np.float32,0x3f7ba528,0xbccac6f1,3 +np.float32,0x3f413787,0xbecfd513,3 +np.float32,0x3e085e48,0xc03a2716,3 +np.float32,0x7e4c5e0e,0x42fb599c,3 +np.float32,0x306f76,0xc2fecdd4,3 +np.float32,0x7f5c2203,0x42ff9081,3 +np.float32,0x3d5355b4,0xc088da05,3 +np.float32,0x9a2a,0xc305bb4f,3 +np.float32,0x3db93a1f,0xc05de0db,3 +np.float32,0x4e50c6,0xc2fd6ae4,3 +np.float32,0x7ec4afed,0x42fd3d51,3 +np.float32,0x3a8f27,0xc2fe41a0,3 +np.float32,0x7f213caf,0x42feaa84,3 +np.float32,0x7e7b5f00,0x42fbf286,3 +np.float32,0x7e367194,0x42fb05ca,3 +np.float32,0x7f56e6de,0x42ff7ebd,3 +np.float32,0x3ed7383e,0xbfa00aef,3 +np.float32,0x7e844752,0x42fc184a,3 +np.float32,0x15157,0xc3049a19,3 +np.float32,0x3f78cd92,0xbd28824a,3 +np.float32,0x7ecddb16,0x42fd5ef9,3 +np.float32,0x3e479f16,0xc016f7d8,3 +np.float32,0x3f5cb418,0xbe5b2bd3,3 +np.float32,0x7c0934cb,0x42f2334e,3 +np.float32,0x3ebe5505,0xbfb6bc69,3 +np.float32,0x3eb1335a,0xbfc3eff5,3 +np.float32,0x3f2488a3,0xbf234444,3 +np.float32,0x642906,0xc2fcb52a,3 +np.float32,0x3da635fa,0xc067e15a,3 +np.float32,0x7e0d80db,0x42fa4a15,3 +np.float32,0x4f0b9d,0xc2fd640a,3 +np.float32,0x7e083806,0x42fa2df8,3 +np.float32,0x7f77f8c6,0x42ffe877,3 +np.float32,0x3e7bb46a,0xc0018ff5,3 +np.float32,0x3f06eb2e,0xbf6c8eca,3 +np.float32,0x7eae8f7c,0x42fce52a,3 +np.float32,0x3de481a0,0xc04a7d7f,3 +np.float32,0x3eed4311,0xbf8e096f,3 +np.float32,0x3f7b0300,0xbce8903d,3 +np.float32,0x3811b,0xc30330dd,3 +np.float32,0x3eb6f8e1,0xbfbe04bc,3 +np.float32,0x3ec35210,0xbfb1f55a,3 +np.float32,0x3d386916,0xc08f24a5,3 +np.float32,0x3f1fa197,0xbf2e704d,3 +np.float32,0x7f2020a5,0x42fea56a,3 +np.float32,0x7e1ea53f,0x42fa9e8c,3 +np.float32,0x3f148903,0xbf490bf9,3 +np.float32,0x3f2f56a0,0xbf0bc6c9,3 +np.float32,0x7da9fc,0xc2fc0d9b,3 +np.float32,0x3d802134,0xc07fe810,3 +np.float32,0x3f6cb927,0xbde74e57,3 +np.float32,0x7e05b125,0x42fa2023,3 +np.float32,0x3f3307f9,0xbf041433,3 +np.float32,0x5666bf,0xc2fd2250,3 +np.float32,0x3f51c93b,0xbe930f28,3 +np.float32,0x3eb5dcfe,0xbfbf241e,3 +np.float32,0xb2773,0xc301853f,3 +np.float32,0x7f4dee96,0x42ff5f3f,3 +np.float32,0x3e3f5c33,0xc01adee1,3 +np.float32,0x3f2ed29a,0xbf0cdd4a,3 +np.float32,0x3e3c01ef,0xc01c80ab,3 +np.float32,0x3ec2236e,0xbfb31458,3 +np.float32,0x7e841dc4,0x42fc1761,3 +np.float32,0x3df2cd8e,0xc044e30c,3 +np.float32,0x3f010901,0xbf7d0670,3 +np.float32,0x3c05ceaa,0xc0ddf39b,3 +np.float32,0x3f517226,0xbe944206,3 +np.float32,0x3f23c83d,0xbf24f522,3 +np.float32,0x7fc9da,0xc2fc0139,3 +np.float32,0x7f1bde53,0x42fe9181,3 +np.float32,0x3ea3786c,0xbfd2d4a5,3 +np.float32,0x3e83a71b,0xbffacdd2,3 +np.float32,0x3f6f0d4f,0xbdca61d5,3 +np.float32,0x7f5ab613,0x42ff8bb7,3 +np.float32,0x3ab1ec,0xc2fe3fea,3 +np.float32,0x4fbf58,0xc2fd5d82,3 +np.float32,0x3dea141b,0xc0484403,3 +np.float32,0x7d86ad3b,0x42f8258f,3 +np.float32,0x7f345315,0x42fefd29,3 +np.float32,0x3f3752fe,0xbef6a780,3 +np.float32,0x64830d,0xc2fcb293,3 +np.float32,0x3d9dc1eb,0xc06cb32a,3 +np.float32,0x3f2f935a,0xbf0b46f6,3 +np.float32,0xb90a4,0xc30177e3,3 +np.float32,0x4111dd,0xc2fdf3c1,3 +np.float32,0x3d4cd078,0xc08a4c68,3 +np.float32,0x3e95c3f1,0xbfe30011,3 +np.float32,0x3ec9f356,0xbfabcb4e,3 +np.float32,0x1b90d5,0xc3003717,3 +np.float32,0xee70f,0xc3011a3e,3 +np.float32,0x7fa00000,0x7fe00000,3 +np.float32,0x3f74cdb6,0xbd8422af,3 +np.float32,0x3d9b56fe,0xc06e2037,3 +np.float32,0x3f1853df,0xbf3fbc40,3 +np.float32,0x7d86a011,0x42f82547,3 +np.float32,0x3dff9629,0xc0402634,3 +np.float32,0x46f8c9,0xc2fdb39f,3 +np.float32,0x3e9b410b,0xbfdc5a87,3 +np.float32,0x3f5aed42,0xbe671cac,3 +np.float32,0x3b739886,0xc101257f,3 +np.float64,0x3fe2f58d6565eb1b,0xbfe82a641138e19a,1 +np.float64,0x3fee7f0642fcfe0d,0xbfb1c702f6974932,1 +np.float64,0x25b71f244b6e5,0xc090030d3b3c5d2b,1 +np.float64,0x8c9cc8e1193b,0xc0900b752a678fa8,1 +np.float64,0x3fd329b5d326536c,0xbffbd607f6db945c,1 +np.float64,0x3fb5109b3a2a2136,0xc00cd36bd15dfb18,1 +np.float64,0x3fd5393ae12a7276,0xbff97a7e4a157154,1 +np.float64,0x3fd374d1b926e9a3,0xbffb7c3e1a3a7ed3,1 +np.float64,0x3fe2c7f4e2658fea,0xbfe899f15ca78fcb,1 +np.float64,0x7fe3d6b81ee7ad6f,0x408ffa7b63d407ee,1 +np.float64,0x3fe086d097e10da1,0xbfee81456ce8dd03,1 +np.float64,0x7fd374a64ca6e94c,0x408ff241c7306d39,1 +np.float64,0x3fc0709a5b20e135,0xc007afdede31b29c,1 +np.float64,0x3fd4218f4b28431f,0xbffab2c696966e2d,1 +np.float64,0x143134c828628,0xc09006a8372c4d8a,1 +np.float64,0x3f8bd0aa0037a154,0xc018cf0e8b9c3107,1 +np.float64,0x7fe0ce905ee19d20,0x408ff8915e71bd67,1 +np.float64,0x3fda0f5f32b41ebe,0xbff4bd5e0869e820,1 +np.float64,0x7fe9ae63d0b35cc7,0x408ffd760ca4f292,1 +np.float64,0x3fe75abd9eeeb57b,0xbfdd1476fc8b3089,1 +np.float64,0x786c3110f0d87,0xc08ff8b44cedbeea,1 +np.float64,0x22c5fe80458d,0xc09013853591c2f2,1 +np.float64,0x3fdc250797384a0f,0xbff2f6a02c961f0b,1 +np.float64,0x3fa2b367b02566cf,0xc013199238485054,1 +np.float64,0x3fd26a910ca4d522,0xbffcc0e2089b1c0c,1 +np.float64,0x8068d3b300d1b,0xc08ff7f690210aac,1 +np.float64,0x3fe663bfa9ecc77f,0xbfe07cd95a43a5ce,1 +np.float64,0x3fd0ddb07321bb61,0xbffec886665e895e,1 +np.float64,0x3f91c730b0238e61,0xc0176452badc8d22,1 +np.float64,0x4dd10d309ba22,0xc08ffdbe738b1d8d,1 +np.float64,0x7fe322afa4a6455e,0x408ffa10c038f9de,1 +np.float64,0x7fdf7f7c42befef8,0x408ff7d147ddaad5,1 +np.float64,0x7fd673f386ace7e6,0x408ff3e920d00eef,1 +np.float64,0x3feaebfcadb5d7f9,0xbfcfe8ec27083478,1 +np.float64,0x3fdc6dc23738db84,0xbff2bb46794f07b8,1 +np.float64,0xcd8819599b103,0xc08ff288c5b2cf0f,1 +np.float64,0xfda00e77fb402,0xc08ff01b895d2236,1 +np.float64,0x840b02ff08161,0xc08ff7a41e41114c,1 +np.float64,0x3fbdce3a383b9c74,0xc008d1e61903a289,1 +np.float64,0x3fd24ed3c4a49da8,0xbffce3c12136b6d3,1 +np.float64,0x3fe8d0834131a107,0xbfd77b194e7051d4,1 +np.float64,0x3fdd0cb11aba1962,0xbff23b9dbd554455,1 +np.float64,0x1a32d97e3465c,0xc090052781a37271,1 +np.float64,0x3fdb09d2b1b613a5,0xbff3e396b862bd83,1 +np.float64,0x3fe04c848aa09909,0xbfef2540dd90103a,1 +np.float64,0x3fce0c48613c1891,0xc000b9f76877d744,1 +np.float64,0x3fc37109a226e213,0xc005c05d8b2b9a2f,1 +np.float64,0x81cf3837039e7,0xc08ff7d686517dff,1 +np.float64,0xd9342c29b2686,0xc08ff1e591c9a895,1 +np.float64,0x7fec731b0638e635,0x408ffea4884550a9,1 +np.float64,0x3fba0fc138341f82,0xc00a5e839b085f64,1 +np.float64,0x7fdda893b03b5126,0x408ff71f7c5a2797,1 +np.float64,0xd2a4bb03a5498,0xc08ff2402f7a907c,1 +np.float64,0x3fea61fb0d34c3f6,0xbfd1d293fbe76183,1 +np.float64,0x3fed5cf486fab9e9,0xbfbfc2e01a7ffff1,1 +np.float64,0x3fcbabc2bf375785,0xc001ad7750c9dbdf,1 +np.float64,0x3fdb5fff53b6bfff,0xbff39a7973a0c6a5,1 +np.float64,0x7feef05a00bde0b3,0x408fff9c5cbc8651,1 +np.float64,0xb1cf24f1639e5,0xc08ff434de10fffb,1 +np.float64,0x3fa583989c2b0731,0xc0124a8a3bbf18ce,1 +np.float64,0x7feae90bf9f5d217,0x408ffe002e7bbbea,1 +np.float64,0x3fe9ef41c4b3de84,0xbfd367878ae4528e,1 +np.float64,0x9be24ce337c4a,0xc08ff5b9b1c31cf9,1 +np.float64,0x3fe916894cb22d13,0xbfd677f915d58503,1 +np.float64,0x3fec1bab20f83756,0xbfc7f2777aabe8ee,1 +np.float64,0x3feaabf2873557e5,0xbfd0d11f28341233,1 +np.float64,0x3fd4d3c3b529a787,0xbff9e9e47acc8ca9,1 +np.float64,0x3fe4cfe96c699fd3,0xbfe3dc53fa739169,1 +np.float64,0xccfdb97399fb7,0xc08ff2908d893400,1 +np.float64,0x3fec7598be78eb31,0xbfc5a750f8f3441a,1 +np.float64,0x355be5fc6ab7e,0xc090010ca315b50b,1 +np.float64,0x3fba9f9074353f21,0xc00a1f80eaf5e581,1 +np.float64,0x7fdcaff189395fe2,0x408ff6bd1c5b90d9,1 +np.float64,0x3fd94d3b64b29a77,0xbff56be1b43d25f3,1 +np.float64,0x4e5f29949cbe6,0xc08ffda972da1d73,1 +np.float64,0x3fe654e2d9aca9c6,0xbfe09b88dcd8f15d,1 +np.float64,0x7fdc130190b82602,0x408ff67d496c1a27,1 +np.float64,0x3fbcd4701e39a8e0,0xc009343e36627e80,1 +np.float64,0x7fdaa4d38f3549a6,0x408ff5e2c6d8678f,1 +np.float64,0x3febe95e5237d2bd,0xbfc93e16d453fe3a,1 +np.float64,0x9ef5ca553deba,0xc08ff57ff4f7883d,1 +np.float64,0x7fe878e91170f1d1,0x408ffce795868fc8,1 +np.float64,0x3fe63dff466c7bff,0xbfe0caf2b79c9e5f,1 +np.float64,0x6561446ccac29,0xc08ffab0e383834c,1 +np.float64,0x30c6c2ae618d9,0xc09001914b30381b,1 +np.float64,0x7ff0000000000000,0x7ff0000000000000,1 +np.float64,0x3fe5c9daf1ab93b6,0xbfe1be81baf4dbdb,1 +np.float64,0x3fe0a03e24a1407c,0xbfee3a73c4c0e8f8,1 +np.float64,0xff2a2cf3fe546,0xc08ff009a7e6e782,1 +np.float64,0x7fcf0332213e0663,0x408fefa36235e210,1 +np.float64,0x3fb612affc2c2560,0xc00c494be9c8c33b,1 +np.float64,0x3fd2b259702564b3,0xbffc67967f077e75,1 +np.float64,0x7fcb63685d36c6d0,0x408fee343343f913,1 +np.float64,0x3fe369f1d5a6d3e4,0xbfe71251139939ad,1 +np.float64,0x3fdd17c618ba2f8c,0xbff232d11c986251,1 +np.float64,0x3f92cc8040259901,0xc01711d8e06b52ee,1 +np.float64,0x69a81dc2d3504,0xc08ffa36cdaf1141,1 +np.float64,0x3fea0fad99b41f5b,0xbfd2f4625a652645,1 +np.float64,0xd1cd5799a39ab,0xc08ff24c02b90d26,1 +np.float64,0x324e59ce649cc,0xc0900163ad091c76,1 +np.float64,0x3fc3d460a227a8c1,0xc00585f903dc7a7f,1 +np.float64,0xa7185ec74e30c,0xc08ff4ec7d65ccd9,1 +np.float64,0x3fa254eaac24a9d5,0xc01337053963321a,1 +np.float64,0x3feaeb112435d622,0xbfcfef3be17f81f6,1 +np.float64,0x60144c3ac028a,0xc08ffb4f8eb94595,1 +np.float64,0x7fa4d2ec6829a5d8,0x408fdb0a9670ab83,1 +np.float64,0x3fed1372f97a26e6,0xbfc1b1fe50d48a55,1 +np.float64,0x3fd5ade5972b5bcb,0xbff8fcf28f525031,1 +np.float64,0x7fe72e335bee5c66,0x408ffc4759236437,1 +np.float64,0x7fdfafab143f5f55,0x408ff7e2e22a8129,1 +np.float64,0x3fe90d0db9321a1b,0xbfd69ae5fe10eb9e,1 +np.float64,0x7fe20a59072414b1,0x408ff962a2492484,1 +np.float64,0x3fed853690bb0a6d,0xbfbdc9dc5f199d2b,1 +np.float64,0x3fd709d469ae13a9,0xbff795a218deb700,1 +np.float64,0x3fe21c35f5e4386c,0xbfea47d71789329b,1 +np.float64,0x9ea5ec053d4be,0xc08ff585c2f6b7a3,1 +np.float64,0x3fc0580f9e20b01f,0xc007c1268f49d037,1 +np.float64,0xd99127abb3225,0xc08ff1e0a1ff339d,1 +np.float64,0x3fdc8c9bbfb91937,0xbff2a2478354effb,1 +np.float64,0x3fe15fc6b162bf8d,0xbfec323ac358e008,1 +np.float64,0xffefffffffffffff,0x7ff8000000000000,1 +np.float64,0x3fee341afb3c6836,0xbfb556b6faee9a84,1 +np.float64,0x3fe4b64c56296c99,0xbfe4154835ad2afe,1 +np.float64,0x85de22810bbc5,0xc08ff77b914fe5b5,1 +np.float64,0x3fd22c72e3a458e6,0xbffd0f4269d20bb9,1 +np.float64,0xc090e5218123,0xc09009a4a65a8a8f,1 +np.float64,0x7fd9641692b2c82c,0x408ff5547782bdfc,1 +np.float64,0x3fd9b9cb28b37396,0xbff509a8fb59a9f1,1 +np.float64,0x3fcd2726f93a4e4e,0xc001135059a22117,1 +np.float64,0x3fa4b493d4296928,0xc0128323c7a55f4a,1 +np.float64,0x47455e788e8ac,0xc08ffec2101c1e82,1 +np.float64,0x3fe0d7e2e261afc6,0xbfeda0f1e2d0f4bd,1 +np.float64,0x3fe860fc5b70c1f9,0xbfd91dc42eaf72c2,1 +np.float64,0xa5d7805b4baf0,0xc08ff502bc819ff6,1 +np.float64,0xd83395b1b0673,0xc08ff1f33c3f94c2,1 +np.float64,0x3f865972e02cb2e6,0xc01a1243651565c8,1 +np.float64,0x52fc6952a5f8e,0xc08ffd006b158179,1 +np.float64,0x7fecac6c793958d8,0x408ffebbb1c09a70,1 +np.float64,0x7fe621ff606c43fe,0x408ffbbeb2b1473a,1 +np.float64,0x3fdb9f3f9db73e7f,0xbff365610c52bda7,1 +np.float64,0x7feab92992757252,0x408ffdeb92a04813,1 +np.float64,0xcc46c79f988d9,0xc08ff29adf03fb7c,1 +np.float64,0x3fe3156a03262ad4,0xbfe7dd0f598781c7,1 +np.float64,0x3fc00e3a61201c75,0xc007f5c121a87302,1 +np.float64,0x3fdce8e9f739d1d4,0xbff2581d41ef50ef,1 +np.float64,0x0,0xfff0000000000000,1 +np.float64,0x7d373ac4fa6e8,0xc08ff840fa8beaec,1 +np.float64,0x3fee41e0653c83c1,0xbfb4ae786f2a0d54,1 +np.float64,0x3ff0000000000000,0x0,1 +np.float64,0x7feca6fff9794dff,0x408ffeb982a70556,1 +np.float64,0x7fc532716d2a64e2,0x408feb3f0f6c095b,1 +np.float64,0x3fe4ec2954a9d853,0xbfe39dd44aa5a040,1 +np.float64,0x7fd3321d52a6643a,0x408ff21a0ab9cd85,1 +np.float64,0x7fd8f1b2dfb1e365,0x408ff52001fa7922,1 +np.float64,0x3fee5e58cabcbcb2,0xbfb3539734a24d8b,1 +np.float64,0x3feebf6e7dfd7edd,0xbfad7c648f025102,1 +np.float64,0x6008026ec0101,0xc08ffb5108b54a93,1 +np.float64,0x3fea06f5e2340dec,0xbfd3134a48283360,1 +np.float64,0x41cad13c8395b,0xc08fffae654b2426,1 +np.float64,0x7fedb5c9353b6b91,0x408fff249f1f32b6,1 +np.float64,0xe00c5af9c018c,0xc08ff189e68c655f,1 +np.float64,0x7feac398ddf58731,0x408ffdf01374de9f,1 +np.float64,0x3fed21127c7a4225,0xbfc15b8cf55628fa,1 +np.float64,0x3fd3446711a688ce,0xbffbb5f7252a9fa3,1 +np.float64,0x7fe75fa07a6ebf40,0x408ffc5fdb096018,1 +np.float64,0x3feeb1618cbd62c3,0xbfaece3bd0863070,1 +np.float64,0x7f5226e180244dc2,0x408fb174d506e52f,1 +np.float64,0x3fcd67deca3acfbe,0xc000f9cd7a490749,1 +np.float64,0xdc6f30efb8de6,0xc08ff1b9f2a22d2e,1 +np.float64,0x9c14931338293,0xc08ff5b5f975ec5d,1 +np.float64,0x7fe93e802df27cff,0x408ffd4354eba0e0,1 +np.float64,0x3feb92ae5077255d,0xbfcb7f2084e44dbb,1 +np.float64,0xd78dbfddaf1b8,0xc08ff1fc19fa5a13,1 +np.float64,0x7fe14c301fa2985f,0x408ff8e666cb6592,1 +np.float64,0xbda3d8b77b47b,0xc08ff37689f4b2e5,1 +np.float64,0x8a42953b14853,0xc08ff71c2db3b8cf,1 +np.float64,0x7fe4ca7e186994fb,0x408ffb05e94254a7,1 +np.float64,0x7fe92ffc5e325ff8,0x408ffd3cb0265b12,1 +np.float64,0x91b262912364d,0xc08ff681619be214,1 +np.float64,0x33fe2b0667fc6,0xc0900132f3fab55e,1 +np.float64,0x3fde10e9183c21d2,0xbff17060fb4416c7,1 +np.float64,0xb6b811cb6d702,0xc08ff3e46303b541,1 +np.float64,0x3fe4a7bda0a94f7b,0xbfe435c6481cd0e3,1 +np.float64,0x7fd9fe6057b3fcc0,0x408ff599c79a822c,1 +np.float64,0x3fef44bf917e897f,0xbfa11484e351a6e9,1 +np.float64,0x3fe57d701daafae0,0xbfe2618ab40fc01b,1 +np.float64,0x7fe52d2adbaa5a55,0x408ffb3c2fb1c99d,1 +np.float64,0xb432f66d6865f,0xc08ff40d6b4084fe,1 +np.float64,0xbff0000000000000,0x7ff8000000000000,1 +np.float64,0x7fecd2292bf9a451,0x408ffecad860de6f,1 +np.float64,0x3fddd2ae153ba55c,0xbff1a059adaca33e,1 +np.float64,0x3fee55d6e5bcabae,0xbfb3bb1c6179d820,1 +np.float64,0x7fc1d0085623a010,0x408fe93d16ada7a7,1 +np.float64,0x829b000105360,0xc08ff7c47629a68f,1 +np.float64,0x7fe1e0257523c04a,0x408ff94782cf0717,1 +np.float64,0x7fd652f9ad2ca5f2,0x408ff3d820ec892e,1 +np.float64,0x3fef2246203e448c,0xbfa444ab6209d8cd,1 +np.float64,0x3fec6c0ae178d816,0xbfc5e559ebd4e790,1 +np.float64,0x3fe6ddfee92dbbfe,0xbfdf06dd7d3fa7a8,1 +np.float64,0x3fb7fbcbea2ff798,0xc00b5404d859d148,1 +np.float64,0x7feb9a154d37342a,0x408ffe4b26c29e55,1 +np.float64,0x3fe4db717aa9b6e3,0xbfe3c2c6b3ef13bc,1 +np.float64,0x3fbae17dda35c2fc,0xc00a030f7f4b37e7,1 +np.float64,0x7fd632b9082c6571,0x408ff3c76826ef19,1 +np.float64,0x7fc4184a15283093,0x408feaa14adf00be,1 +np.float64,0x3fe052d19920a5a3,0xbfef136b5df81a3e,1 +np.float64,0x7fe38b872b67170d,0x408ffa4f51aafc86,1 +np.float64,0x3fef9842d03f3086,0xbf92d3d2a21d4be2,1 +np.float64,0x9cea662139d4d,0xc08ff5a634810daa,1 +np.float64,0x3fe35f0855e6be11,0xbfe72c4b564e62aa,1 +np.float64,0x3fecee3d3779dc7a,0xbfc29ee942f8729e,1 +np.float64,0x3fe7903fd72f2080,0xbfdc41db9b5f4048,1 +np.float64,0xb958889572b11,0xc08ff3ba366cf84b,1 +np.float64,0x3fcb3a67c53674d0,0xc001dd21081ad1ea,1 +np.float64,0xe3b1b53fc7637,0xc08ff15a3505e1ce,1 +np.float64,0xe5954ae9cb2aa,0xc08ff141cbbf0ae4,1 +np.float64,0x3fe394af74e7295f,0xbfe6ad1d13f206e8,1 +np.float64,0x7fe21dd704643bad,0x408ff96f13f80c1a,1 +np.float64,0x3fd23a7cf02474fa,0xbffcfd7454117a05,1 +np.float64,0x7fe257515e24aea2,0x408ff99378764d52,1 +np.float64,0x7fe4c5d0a6e98ba0,0x408ffb03503cf939,1 +np.float64,0x3fadc2c1603b8583,0xc0106b2c17550e3a,1 +np.float64,0x3fc0f7f02421efe0,0xc007525ac446864c,1 +np.float64,0x3feaf0b27275e165,0xbfcfc8a03eaa32ad,1 +np.float64,0x5ce7503cb9ceb,0xc08ffbb2de365fa8,1 +np.float64,0x2a0014f654003,0xc090026e41761a0d,1 +np.float64,0x7fe2c848a8e59090,0x408ff9d9b723ee89,1 +np.float64,0x7f66f54bc02dea97,0x408fbc2ae0ec5623,1 +np.float64,0xa35a890146b6,0xc0900a97b358ddbd,1 +np.float64,0x7fee267ded7c4cfb,0x408fff501560c9f5,1 +np.float64,0x3fe07c328520f865,0xbfee9ef7c3435b58,1 +np.float64,0x3fe67122cf6ce246,0xbfe06147001932ba,1 +np.float64,0x3fdacc8925359912,0xbff41824cece219e,1 +np.float64,0xffa3047fff461,0xc08ff00431ec9be3,1 +np.float64,0x3e1af43e7c35f,0xc090002c6573d29b,1 +np.float64,0x86fa94590df53,0xc08ff7632525ed92,1 +np.float64,0x7fec4c76227898eb,0x408ffe94d032c657,1 +np.float64,0x7fe2274ce1e44e99,0x408ff975194cfdff,1 +np.float64,0x7fe670e1b4ace1c2,0x408ffbe78cc451de,1 +np.float64,0x7fe853871db0a70d,0x408ffcd5e6a6ff47,1 +np.float64,0x3fcbf265db37e4cc,0xc0019026336e1176,1 +np.float64,0x3fef033cef3e067a,0xbfa726712eaae7f0,1 +np.float64,0x5d74973abae94,0xc08ffba15e6bb992,1 +np.float64,0x7fdd9c99b6bb3932,0x408ff71ad24a7ae0,1 +np.float64,0xbdc8e09b7b91c,0xc08ff3744939e9a3,1 +np.float64,0xdbfcff71b7fa0,0xc08ff1bfeecc9dfb,1 +np.float64,0xf9b38cf5f3672,0xc08ff0499af34a43,1 +np.float64,0x3fea820aa6b50415,0xbfd162a38e1927b1,1 +np.float64,0x3fe67f59a12cfeb3,0xbfe04412adca49dc,1 +np.float64,0x3feb301d9c76603b,0xbfce17e6edeb92d5,1 +np.float64,0x828ce00b0519c,0xc08ff7c5b5c57cde,1 +np.float64,0x4f935e229f26c,0xc08ffd7c67c1c54f,1 +np.float64,0x7fcd139e023a273b,0x408feee4f12ff11e,1 +np.float64,0x666a9944ccd54,0xc08ffa92d5e5cd64,1 +np.float64,0x3fe792f0fa6f25e2,0xbfdc374fda28f470,1 +np.float64,0xe996029bd32c1,0xc08ff10eb9b47a11,1 +np.float64,0x3fe7b0dd1eef61ba,0xbfdbc2676dc77db0,1 +np.float64,0x7fd3ec0127a7d801,0x408ff287bf47e27d,1 +np.float64,0x3fe793a8ea6f2752,0xbfdc347f7717e48d,1 +np.float64,0x7fdb89d15e3713a2,0x408ff64457a13ea2,1 +np.float64,0x3fe35b3cbbe6b679,0xbfe73557c8321b70,1 +np.float64,0x66573c94ccae8,0xc08ffa9504af7eb5,1 +np.float64,0x3fc620a2302c4144,0xc00442036b944a67,1 +np.float64,0x49b2fe0693660,0xc08ffe5f131c3c7e,1 +np.float64,0x7fda936cdfb526d9,0x408ff5db3ab3f701,1 +np.float64,0xc774ceef8ee9a,0xc08ff2e16d082fa1,1 +np.float64,0x4da9f8a09b55,0xc0900ee2206d0c88,1 +np.float64,0x3fe2ca5d5ae594bb,0xbfe89406611a5f1a,1 +np.float64,0x7fe0832497e10648,0x408ff85d1de6056e,1 +np.float64,0x3fe6a9e3222d53c6,0xbfdfda35a9bc2de1,1 +np.float64,0x3fed3d92c8ba7b26,0xbfc0a73620db8b98,1 +np.float64,0x3fdd2ec093ba5d81,0xbff2209cf78ce3f1,1 +np.float64,0x62fcb968c5f98,0xc08ffaf775a593c7,1 +np.float64,0xfcfb019ff9f60,0xc08ff0230e95bd16,1 +np.float64,0x3fd7a63e8f2f4c7d,0xbff6faf4fff7dbe0,1 +np.float64,0x3fef23b0ec3e4762,0xbfa4230cb176f917,1 +np.float64,0x340d1e6a681a5,0xc09001314b68a0a2,1 +np.float64,0x7fc0b85ba02170b6,0x408fe8821487b802,1 +np.float64,0x7fe9976e84f32edc,0x408ffd6bb6aaf467,1 +np.float64,0x329a0e9e65343,0xc090015b044e3270,1 +np.float64,0x3fea4928d3f49252,0xbfd2299b05546eab,1 +np.float64,0x3f188c70003118e0,0xc02ac3ce23bc5d5a,1 +np.float64,0x3fecce5020b99ca0,0xbfc36b23153d5f50,1 +np.float64,0x3fe203873e24070e,0xbfea86edb3690830,1 +np.float64,0x3fe02d9eaa205b3d,0xbfef7d18c54a76d2,1 +np.float64,0xef7537ebdeea7,0xc08ff0c55e9d89e7,1 +np.float64,0x3fedf7572efbeeae,0xbfb840af357cf07c,1 +np.float64,0xd1a97a61a354,0xc0900926fdfb96cc,1 +np.float64,0x7fe6a0daeced41b5,0x408ffc001edf1407,1 +np.float64,0x3fe5063625aa0c6c,0xbfe3647cfb949d62,1 +np.float64,0x7fe9b28d31736519,0x408ffd77eb4a922b,1 +np.float64,0x7feea90d033d5219,0x408fff81a4bbff62,1 +np.float64,0x3fe9494d17f2929a,0xbfd5bde02eb5287a,1 +np.float64,0x7feee17a8cbdc2f4,0x408fff96cf0dc16a,1 +np.float64,0xb2ad18ef655a3,0xc08ff4267eda8af8,1 +np.float64,0x3fad3b52683a76a5,0xc01085ab75b797ce,1 +np.float64,0x2300a65846016,0xc090037b81ce9500,1 +np.float64,0x3feb1041f9b62084,0xbfcef0c87d8b3249,1 +np.float64,0x3fdd887d3e3b10fa,0xbff1da0e1ede6db2,1 +np.float64,0x3fd3e410eb27c822,0xbffaf9b5fc9cc8cc,1 +np.float64,0x3fe0aa53e3e154a8,0xbfee1e7b5c486578,1 +np.float64,0x7fe33e389aa67c70,0x408ffa214fe50961,1 +np.float64,0x3fd27e3a43a4fc75,0xbffca84a79e8adeb,1 +np.float64,0x3fb309e0082613c0,0xc00dfe407b77a508,1 +np.float64,0x7feaf2ed8cf5e5da,0x408ffe046a9d1ba9,1 +np.float64,0x1e76167a3cec4,0xc0900448cd35ec67,1 +np.float64,0x3fe0a18e1721431c,0xbfee36cf1165a0d4,1 +np.float64,0x3fa73b78c02e76f2,0xc011d9069823b172,1 +np.float64,0x3fef6d48287eda90,0xbf9ab2d08722c101,1 +np.float64,0x8fdf0da31fbe2,0xc08ff6a6a2accaa1,1 +np.float64,0x3fc3638db826c71b,0xc005c86191688826,1 +np.float64,0xaa9c09c555381,0xc08ff4aefe1d9473,1 +np.float64,0x7fccb0f4523961e8,0x408feebd84773f23,1 +np.float64,0xede75dcfdbcec,0xc08ff0d89ba887d1,1 +np.float64,0x7f8a051520340a29,0x408fcd9cc17f0d95,1 +np.float64,0x3fef5ca2babeb945,0xbf9dc221f3618e6a,1 +np.float64,0x7fea0ff4bcf41fe8,0x408ffda193359f22,1 +np.float64,0x7fe05c53fd20b8a7,0x408ff841dc7123e8,1 +np.float64,0x3fc625664b2c4acd,0xc0043f8749b9a1d8,1 +np.float64,0x7fed58f98f7ab1f2,0x408fff00585f48c2,1 +np.float64,0x3fb3e5e51427cbca,0xc00d7bcb6528cafe,1 +np.float64,0x3fe728bd3d6e517a,0xbfdddafa72bd0f60,1 +np.float64,0x3fe3f005dd27e00c,0xbfe5d7b3ec93bca0,1 +np.float64,0x3fd74fbd1a2e9f7a,0xbff750001b63ce81,1 +np.float64,0x3fd3af6d85a75edb,0xbffb371d678d11b4,1 +np.float64,0x7fa690ad8c2d215a,0x408fdbf7db9c7640,1 +np.float64,0x3fbdfd38e23bfa72,0xc008bfc1c5c9b89e,1 +np.float64,0x3fe2374684a46e8d,0xbfea030c4595dfba,1 +np.float64,0x7fc0806c372100d7,0x408fe85b36fee334,1 +np.float64,0x3fef3ac47b7e7589,0xbfa2007195c5213f,1 +np.float64,0x3fb55473922aa8e7,0xc00cae7af8230e0c,1 +np.float64,0x7fe018dc152031b7,0x408ff811e0d712fa,1 +np.float64,0x3fe3b3fca56767f9,0xbfe6638ae2c99c62,1 +np.float64,0x7fac79818c38f302,0x408fdea720b39c3c,1 +np.float64,0x7fefffffffffffff,0x4090000000000000,1 +np.float64,0xd2b290cba5652,0xc08ff23f6d7152a6,1 +np.float64,0x7fc5848eb52b091c,0x408feb6b6f8b77d0,1 +np.float64,0xf399f62de733f,0xc08ff092ae319ad8,1 +np.float64,0x7fdec56c12bd8ad7,0x408ff78c4ddbc667,1 +np.float64,0x3fca640f1e34c81e,0xc0023969c5cbfa4c,1 +np.float64,0x3fd55225db2aa44c,0xbff95f7442a2189e,1 +np.float64,0x7fefa009a97f4012,0x408fffdd2f42ef9f,1 +np.float64,0x4a3b70609478,0xc0900f24e449bc3d,1 +np.float64,0x7fe3738b1ba6e715,0x408ffa411f2cb5e7,1 +np.float64,0x7fe5e53f0b6bca7d,0x408ffb9ed8d95cea,1 +np.float64,0x3fe274dd24a4e9ba,0xbfe967fb114b2a83,1 +np.float64,0x3fcbc58b8c378b17,0xc001a2bb1e158bcc,1 +np.float64,0x3fefc2c0043f8580,0xbf862c9b464dcf38,1 +np.float64,0xc2c4fafd858a0,0xc08ff327aecc409b,1 +np.float64,0x3fd8bc39a9b17873,0xbff5f1ad46e5a51c,1 +np.float64,0x3fdf341656be682d,0xbff094f41e7cb4c4,1 +np.float64,0x3fef8495c13f092c,0xbf966cf6313bae4c,1 +np.float64,0x3fe14e0f05229c1e,0xbfec6166f26b7161,1 +np.float64,0x3fed42d3b2ba85a7,0xbfc0860b773d35d8,1 +np.float64,0x7fd92bbac5b25775,0x408ff53abcb3fe0c,1 +np.float64,0xb1635b6f62c6c,0xc08ff43bdf47accf,1 +np.float64,0x4a3a2dbc94746,0xc08ffe49fabddb36,1 +np.float64,0x87d831290fb06,0xc08ff750419dc6fb,1 +np.float64,0x3fec4713f7f88e28,0xbfc6d6217c9f5cf9,1 +np.float64,0x7fed43ba2d3a8773,0x408ffef7fa2fc303,1 +np.float64,0x7fd1ec5b56a3d8b6,0x408ff14f62615f1e,1 +np.float64,0x3fee534b6c7ca697,0xbfb3da1951aa3e68,1 +np.float64,0x3febb564c2b76aca,0xbfca9737062e55e7,1 +np.float64,0x943e6b0f287ce,0xc08ff64e2d09335c,1 +np.float64,0xf177d957e2efb,0xc08ff0acab2999fa,1 +np.float64,0x7fb5b881a82b7102,0x408fe3872b4fde5e,1 +np.float64,0x3fdb2b4a97b65695,0xbff3c715c91359bc,1 +np.float64,0x3fac0a17e4381430,0xc010c330967309fb,1 +np.float64,0x7fd8057990b00af2,0x408ff4b0a287a348,1 +np.float64,0x1f9026a23f206,0xc09004144f3a19dd,1 +np.float64,0x3fdb2977243652ee,0xbff3c8a2fd05803d,1 +np.float64,0x3fe0f6e74b21edcf,0xbfed4c3bb956bae0,1 +np.float64,0xde9cc3bbbd399,0xc08ff19ce5c1e762,1 +np.float64,0x3fe72ce106ae59c2,0xbfddca7ab14ceba2,1 +np.float64,0x3fa8ee14e031dc2a,0xc01170d54ca88e86,1 +np.float64,0x3fe0b09bbb216137,0xbfee0d189a95b877,1 +np.float64,0x7fdfdcb157bfb962,0x408ff7f33cf2afea,1 +np.float64,0x3fef84d5f53f09ac,0xbf966134e2a154f4,1 +np.float64,0x3fea0e0b1bb41c16,0xbfd2fa2d36637d19,1 +np.float64,0x1ab76fd6356ef,0xc090050a9616ffbd,1 +np.float64,0x7fd0ccf79a2199ee,0x408ff09045af2dee,1 +np.float64,0x7fea929345f52526,0x408ffddadc322b07,1 +np.float64,0x3fe9ef629cf3dec5,0xbfd367129c166838,1 +np.float64,0x3feedf0ea2fdbe1d,0xbfaa862afca44c00,1 +np.float64,0x7fce725f723ce4be,0x408fef6cfd2769a8,1 +np.float64,0x7fe4313b3ca86275,0x408ffaaf9557ef8c,1 +np.float64,0xe2d46463c5a8d,0xc08ff165725c6b08,1 +np.float64,0x7fbacb4ace359695,0x408fe5f3647bd0d5,1 +np.float64,0x3fbafd009635fa01,0xc009f745a7a5c5d5,1 +np.float64,0x3fe3cea66ce79d4d,0xbfe6253b895e2838,1 +np.float64,0x7feaa71484354e28,0x408ffde3c0bad2a6,1 +np.float64,0x3fd755b8b42eab71,0xbff74a1444c6e654,1 +np.float64,0x3fc313e2172627c4,0xc005f830e77940c3,1 +np.float64,0x12d699a225ad4,0xc090070ec00f2338,1 +np.float64,0x3fa975fe8432ebfd,0xc01151b3da48b3f9,1 +np.float64,0x7fdce3103b39c61f,0x408ff6d19b3326fa,1 +np.float64,0x7fd341cbba268396,0x408ff2237490fdca,1 +np.float64,0x3fd8405885b080b1,0xbff6666d8802a7d5,1 +np.float64,0x3fe0f0cca3a1e199,0xbfed5cdb3e600791,1 +np.float64,0x7fbd56680c3aaccf,0x408fe6ff55bf378d,1 +np.float64,0x3f939c4f3027389e,0xc016d364dd6313fb,1 +np.float64,0x3fe9e87fac73d0ff,0xbfd37f9a2be4fe38,1 +np.float64,0x7fc93c6a883278d4,0x408fed4260e614f1,1 +np.float64,0x7fa88c0ff031181f,0x408fdcf09a46bd3a,1 +np.float64,0xd5487f99aa910,0xc08ff21b6390ab3b,1 +np.float64,0x3fe34acc96e69599,0xbfe75c9d290428fb,1 +np.float64,0x3fd17f5964a2feb3,0xbffdef50b524137b,1 +np.float64,0xe23dec0dc47be,0xc08ff16d1ce61dcb,1 +np.float64,0x3fec8bd64fb917ad,0xbfc5173941614b8f,1 +np.float64,0x3fc81d97d7303b30,0xc00343ccb791401d,1 +np.float64,0x7fe79ad18e2f35a2,0x408ffc7cf0ab0f2a,1 +np.float64,0x3f96306b402c60d7,0xc0161ce54754cac1,1 +np.float64,0xfb09fc97f6140,0xc08ff039d1d30123,1 +np.float64,0x3fec9c4afa793896,0xbfc4ace43ee46079,1 +np.float64,0x3f9262dac824c5b6,0xc01732a3a7eeb598,1 +np.float64,0x3fa5cd33f42b9a68,0xc01236ed4d315a3a,1 +np.float64,0x3fe7bb336caf7667,0xbfdb9a268a82e267,1 +np.float64,0xc6c338f98d867,0xc08ff2ebb8475bbc,1 +np.float64,0x3fd50714482a0e29,0xbff9b14a9f84f2c2,1 +np.float64,0xfff0000000000000,0x7ff8000000000000,1 +np.float64,0x3fde2cd0f93c59a2,0xbff15afe35a43a37,1 +np.float64,0xf1719cb9e2e34,0xc08ff0acf77b06d3,1 +np.float64,0xfd3caaf9fa796,0xc08ff020101771bd,1 +np.float64,0x7f750d63a02a1ac6,0x408fc32ad0caa362,1 +np.float64,0x7fcc50f4e238a1e9,0x408fee96a5622f1a,1 +np.float64,0x421d1da0843a4,0xc08fff9ffe62d869,1 +np.float64,0x3fd9e17023b3c2e0,0xbff4e631d687ee8e,1 +np.float64,0x3fe4999a09693334,0xbfe4556b3734c215,1 +np.float64,0xd619ef03ac33e,0xc08ff21013c85529,1 +np.float64,0x3fc4da522229b4a4,0xc004f150b2c573aa,1 +np.float64,0x3feb04b053b60961,0xbfcf3fc9e00ebc40,1 +np.float64,0x3fbedec5ea3dbd8c,0xc0086a33dc22fab5,1 +np.float64,0x7fec3b217ab87642,0x408ffe8dbc8ca041,1 +np.float64,0xdb257d33b64b0,0xc08ff1cb42d3c182,1 +np.float64,0x7fa2d92ec025b25d,0x408fd9e414d11cb0,1 +np.float64,0x3fa425c550284b8b,0xc012ab7cbf83be12,1 +np.float64,0x10b4869021692,0xc09007c0487d648a,1 +np.float64,0x7f97918c902f2318,0x408fd47867806574,1 +np.float64,0x3fe4f91238e9f224,0xbfe38160b4e99919,1 +np.float64,0x3fc2b1af6125635f,0xc00634343bc58461,1 +np.float64,0x3fc2a98071255301,0xc0063942bc8301be,1 +np.float64,0x3fe4cfc585299f8b,0xbfe3dca39f114f34,1 +np.float64,0x3fd1ea75b3a3d4eb,0xbffd63acd02c5406,1 +np.float64,0x3fd6bf48492d7e91,0xbff7e0cd249f80f9,1 +np.float64,0x76643d36ecc88,0xc08ff8e68f13b38c,1 +np.float64,0x7feeabab3e7d5755,0x408fff82a0fd4501,1 +np.float64,0x46c0d4a68d81b,0xc08ffed79abaddc9,1 +np.float64,0x3fd088d57ca111ab,0xbfff3dd0ed7128ea,1 +np.float64,0x3fed25887cba4b11,0xbfc13f47639bd645,1 +np.float64,0x7fd90984b4b21308,0x408ff52b022c7fb4,1 +np.float64,0x3fe6ef31daadde64,0xbfdec185760cbf21,1 +np.float64,0x3fe48dbe83291b7d,0xbfe47005b99920bd,1 +np.float64,0x3fdce8422f39d084,0xbff258a33a96cc8e,1 +np.float64,0xb8ecdef771d9c,0xc08ff3c0eca61b10,1 +np.float64,0x3fe9bbf9a03377f3,0xbfd41ecfdcc336b9,1 +np.float64,0x7fe2565339a4aca5,0x408ff992d8851eaf,1 +np.float64,0x3fe1693e3822d27c,0xbfec1919da2ca697,1 +np.float64,0x3fd3680488a6d009,0xbffb8b7330275947,1 +np.float64,0x7fbe4f3d2c3c9e79,0x408fe75fa3f4e600,1 +np.float64,0x7fd4cfef3ca99fdd,0x408ff308ee3ab50f,1 +np.float64,0x3fd9c9a51cb3934a,0xbff4fb7440055ce6,1 +np.float64,0x3fe08a9640a1152d,0xbfee76bd1bfbf5c2,1 +np.float64,0x3fef012c41fe0259,0xbfa757a2da7f9707,1 +np.float64,0x3fee653fe2fcca80,0xbfb2ffae0c95025c,1 +np.float64,0x7fd0776933a0eed1,0x408ff054e7b43d41,1 +np.float64,0x4c94e5c09929d,0xc08ffdedb7f49e5e,1 +np.float64,0xca3e3d17947c8,0xc08ff2b86dce2f7a,1 +np.float64,0x3fb528e1342a51c2,0xc00cc626c8e2d9ba,1 +np.float64,0xd774df81aee9c,0xc08ff1fd6f0a7548,1 +np.float64,0x3fc47a9b6128f537,0xc00526c577b80849,1 +np.float64,0x3fe29a6f6a6534df,0xbfe90a5f83644911,1 +np.float64,0x3fecda4f59f9b49f,0xbfc31e4a80c4cbb6,1 +np.float64,0x7fe51d44f5aa3a89,0x408ffb3382437426,1 +np.float64,0x3fd677fc412ceff9,0xbff82999086977e7,1 +np.float64,0x3fe2a3c7e7254790,0xbfe8f33415cdba9d,1 +np.float64,0x3fe6d8d1dc6db1a4,0xbfdf1bc61bc24dff,1 +np.float64,0x7febb32d8ef7665a,0x408ffe55a043ded1,1 +np.float64,0x60677860c0d0,0xc0900da2caa7d571,1 +np.float64,0x7390c2e0e7219,0xc08ff92df18bb5d2,1 +np.float64,0x3fca53711b34a6e2,0xc00240b07a9b529b,1 +np.float64,0x7fe7ce6dd8ef9cdb,0x408ffc961164ead9,1 +np.float64,0x7fc0c9de0d2193bb,0x408fe88e245767f6,1 +np.float64,0xc0ee217981dc4,0xc08ff343b77ea770,1 +np.float64,0x72bd4668e57a9,0xc08ff94323fd74fc,1 +np.float64,0x7fd6970e252d2e1b,0x408ff3fb1e2fead2,1 +np.float64,0x7fdcb61040396c20,0x408ff6bf926bc98f,1 +np.float64,0xda4faa25b49f6,0xc08ff1d68b3877f0,1 +np.float64,0x3feb344749f6688f,0xbfcdfba2d66c72c5,1 +np.float64,0x3fe2aa4284e55485,0xbfe8e32ae0683f57,1 +np.float64,0x3f8e8fcfd03d1fa0,0xc01843efb2129908,1 +np.float64,0x8000000000000000,0xfff0000000000000,1 +np.float64,0x3fd8e01155b1c023,0xbff5d0529dae9515,1 +np.float64,0x3fe8033f3370067e,0xbfda837c80b87e7c,1 +np.float64,0x7fc5bf831e2b7f05,0x408feb8ae3b039a0,1 +np.float64,0x3fd8dcdf5331b9bf,0xbff5d349e1ed422a,1 +np.float64,0x3fe58b4e302b169c,0xbfe243c9cbccde44,1 +np.float64,0x3fea8a2e47b5145d,0xbfd1464e37221894,1 +np.float64,0x75cd1e88eb9a4,0xc08ff8f553ef0475,1 +np.float64,0x7fcfc876e23f90ed,0x408fefebe6cc95e6,1 +np.float64,0x7f51aceb002359d5,0x408fb1263f9003fb,1 +np.float64,0x7fc2a1b877254370,0x408fe9c1ec52f8b9,1 +np.float64,0x7fd495810e292b01,0x408ff2e859414d31,1 +np.float64,0x7fd72048632e4090,0x408ff440690cebdb,1 +np.float64,0x7fd7aafaffaf6,0xc08ff803a390779f,1 +np.float64,0x7fe18067d4a300cf,0x408ff9090a02693f,1 +np.float64,0x3fdc1080f8b82102,0xbff3077bf44a89bd,1 +np.float64,0x3fc34a462f26948c,0xc005d777b3cdf139,1 +np.float64,0x3fe21e4a1fe43c94,0xbfea428acfbc6ea9,1 +np.float64,0x1f0d79083e1b0,0xc090042c65a7abf2,1 +np.float64,0x3fe8d0d15931a1a3,0xbfd779f6bbd4db78,1 +np.float64,0x3fe74578022e8af0,0xbfdd68b6c15e9f5e,1 +np.float64,0x50995dd0a132c,0xc08ffd56a5c8accf,1 +np.float64,0x3f9a6342b034c685,0xc0151ce1973c62bd,1 +np.float64,0x3f30856a00210ad4,0xc027e852f4d1fcbc,1 +np.float64,0x3febcf7646b79eed,0xbfc9e9cc9d12425c,1 +np.float64,0x8010000000000000,0x7ff8000000000000,1 +np.float64,0x3fdf520c02bea418,0xbff07ed5013f3062,1 +np.float64,0x3fe5433ecbea867e,0xbfe2df38968b6d14,1 +np.float64,0x3fb933a84e326751,0xc00ac1a144ad26c5,1 +np.float64,0x7b6d72c2f6daf,0xc08ff86b7a67f962,1 +np.float64,0xaef5dae75debc,0xc08ff46496bb2932,1 +np.float64,0x522d869aa45b1,0xc08ffd1d55281e98,1 +np.float64,0xa2462b05448c6,0xc08ff542fe0ac5fd,1 +np.float64,0x3fe2b71dd6e56e3c,0xbfe8c3690cf15415,1 +np.float64,0x3fe5778231aaef04,0xbfe26e495d09b783,1 +np.float64,0x3fe9b8d564f371ab,0xbfd42a161132970d,1 +np.float64,0x3f89ebc34033d787,0xc019373f90bfc7f1,1 +np.float64,0x3fe438ddc6e871bc,0xbfe53039341b0a93,1 +np.float64,0x873c75250e78f,0xc08ff75d8478dccd,1 +np.float64,0x807134cb00e27,0xc08ff7f5cf59c57a,1 +np.float64,0x3fac459878388b31,0xc010b6fe803bcdc2,1 +np.float64,0xca9dc7eb953b9,0xc08ff2b2fb480784,1 +np.float64,0x7feb38587bb670b0,0x408ffe21ff6d521e,1 +np.float64,0x7fd70e9b782e1d36,0x408ff437936b393a,1 +np.float64,0x3fa4037bbc2806f7,0xc012b55744c65ab2,1 +np.float64,0x3fd3d4637427a8c7,0xbffb0beebf4311ef,1 +np.float64,0x7fdabbda5db577b4,0x408ff5ecbc0d4428,1 +np.float64,0x7fda9be0a2b537c0,0x408ff5dee5d03d5a,1 +np.float64,0x7fe9c74396338e86,0x408ffd813506a18a,1 +np.float64,0x3fd058243e20b048,0xbfff822ffd8a7f21,1 +np.float64,0x3fe6aa6ca9ed54d9,0xbfdfd805629ff49e,1 +np.float64,0x3fd91431d5322864,0xbff5a025eea8c78b,1 +np.float64,0x7fe4d7f02329afdf,0x408ffb0d5d9b7878,1 +np.float64,0x3fe2954a12252a94,0xbfe917266e3e22d5,1 +np.float64,0x3fb25f7c8224bef9,0xc00e6764c81b3718,1 +np.float64,0x3fda4bddeeb497bc,0xbff4880638908c81,1 +np.float64,0x55dfd12eabbfb,0xc08ffc9b54ff4002,1 +np.float64,0x3fe8f399e031e734,0xbfd6f8e5c4dcd93f,1 +np.float64,0x3fd954a24832a945,0xbff56521f4707a06,1 +np.float64,0x3fdea911f2bd5224,0xbff0fcb2d0c2b2e2,1 +np.float64,0x3fe6b4ff8a2d69ff,0xbfdfacfc85cafeab,1 +np.float64,0x3fc7fa02042ff404,0xc00354e13b0767ad,1 +np.float64,0x3fe955088c72aa11,0xbfd593130f29949e,1 +np.float64,0xd7e74ec1afcea,0xc08ff1f74f61721c,1 +np.float64,0x3fe9d69c1ab3ad38,0xbfd3bf710a337e06,1 +np.float64,0x3fd85669a2b0acd3,0xbff65176143ccc1e,1 +np.float64,0x3fea99b285353365,0xbfd11062744783f2,1 +np.float64,0x3fe2c79f80a58f3f,0xbfe89ac33f990289,1 +np.float64,0x3f8332ba30266574,0xc01af2cb7b635783,1 +np.float64,0x30d0150061a1,0xc090119030f74c5d,1 +np.float64,0x3fdbf4cb06b7e996,0xbff31e5207aaa754,1 +np.float64,0x3fe6b56c216d6ad8,0xbfdfab42fb2941c5,1 +np.float64,0x7fc4dc239829b846,0x408feb0fb0e13fbe,1 +np.float64,0x3fd0ab85ef21570c,0xbfff0d95d6c7a35c,1 +np.float64,0x7fe13d75e5e27aeb,0x408ff8dc8efa476b,1 +np.float64,0x3fece3b832f9c770,0xbfc2e21b165d583f,1 +np.float64,0x3fe3a279c4e744f4,0xbfe68ca4fbb55dbf,1 +np.float64,0x3feb64659ef6c8cb,0xbfccb6204b6bf724,1 +np.float64,0x2279a6bc44f36,0xc0900391eeeb3e7c,1 +np.float64,0xb88046d571009,0xc08ff3c7b5b45300,1 +np.float64,0x7ff4000000000000,0x7ffc000000000000,1 +np.float64,0x3fe49af059a935e1,0xbfe4526c294f248f,1 +np.float64,0xa3e5508147cc,0xc0900a92ce5924b1,1 +np.float64,0x7fc56def3d2adbdd,0x408feb5f46c360e8,1 +np.float64,0x7fd99f3574333e6a,0x408ff56f3807987c,1 +np.float64,0x3fdc38d56fb871ab,0xbff2e667cad8f36a,1 +np.float64,0xd0b03507a1607,0xc08ff25bbcf8aa9d,1 +np.float64,0xc493f9078927f,0xc08ff30c5fa4e759,1 +np.float64,0x3fc86ddbcb30dbb8,0xc0031da1fcb56d75,1 +np.float64,0x7fe75dc395aebb86,0x408ffc5eef841491,1 +np.float64,0x1647618a2c8ed,0xc0900616ef9479c1,1 +np.float64,0xdf144763be289,0xc08ff196b527f3c9,1 +np.float64,0x3fe0b29da6a1653b,0xbfee078b5f4d7744,1 +np.float64,0x3feb055852b60ab1,0xbfcf3b4db5779a7a,1 +np.float64,0x3fe8bc1625f1782c,0xbfd7c739ade904bc,1 +np.float64,0x7fd19bfb8ea337f6,0x408ff11b2b55699c,1 +np.float64,0x3fed1d80d1ba3b02,0xbfc1722e8d3ce094,1 +np.float64,0x2d9c65925b38e,0xc09001f46bcd3bc5,1 +np.float64,0x7fed6f4d857ade9a,0x408fff091cf6a3b4,1 +np.float64,0x3fd070cd6ba0e19b,0xbfff5f7609ca29e8,1 +np.float64,0x7fea3508b8f46a10,0x408ffdb1f30bd6be,1 +np.float64,0x508b897ca1172,0xc08ffd58a0eb3583,1 +np.float64,0x7feba367b07746ce,0x408ffe4f0bf4bd4e,1 +np.float64,0x3fefebd5c4bfd7ac,0xbf6d20b4fcf21b69,1 +np.float64,0x3fd8ef07b8b1de0f,0xbff5c2745c0795a5,1 +np.float64,0x3fd38ed518271daa,0xbffb5d75f00f6900,1 +np.float64,0x6de0fecedbc20,0xc08ff9c307bbc647,1 +np.float64,0xafc0ffc35f820,0xc08ff45737e5d6b4,1 +np.float64,0x7fd282097ca50412,0x408ff1ae3b27bf3b,1 +np.float64,0x3fe2f2d50b65e5aa,0xbfe831042e6a1e99,1 +np.float64,0x3faa437bac3486f7,0xc01123d8d962205a,1 +np.float64,0x3feea54434fd4a88,0xbfaff202cc456647,1 +np.float64,0x3fc9e65b8633ccb7,0xc00270e77ffd19da,1 +np.float64,0x7fee15af61fc2b5e,0x408fff49a49154a3,1 +np.float64,0x7fefe670a73fcce0,0x408ffff6c44c1005,1 +np.float64,0x3fc0832d0f21065a,0xc007a2dc2f25384a,1 +np.float64,0x3fecfc96bcb9f92d,0xbfc24367c3912620,1 +np.float64,0x3feb705682b6e0ad,0xbfcc65b1bb16f9c5,1 +np.float64,0x3fe185c4f9630b8a,0xbfebcdb401af67a4,1 +np.float64,0x3fb0a5a9f6214b54,0xc00f8ada2566a047,1 +np.float64,0x7fe2908cdda52119,0x408ff9b744861fb1,1 +np.float64,0x7fee776e183ceedb,0x408fff6ee7c2f86e,1 +np.float64,0x3fce1d608f3c3ac1,0xc000b3685d006474,1 +np.float64,0x7fecf92aa339f254,0x408ffeda6c998267,1 +np.float64,0xce13cb519c27a,0xc08ff280f02882a9,1 +np.float64,0x1,0xc090c80000000000,1 +np.float64,0x3fe485a8afa90b51,0xbfe4823265d5a50a,1 +np.float64,0x3feea60908bd4c12,0xbfafdf7ad7fe203f,1 +np.float64,0x3fd2253033a44a60,0xbffd187d0ec8d5b9,1 +np.float64,0x435338fc86a68,0xc08fff6a591059dd,1 +np.float64,0x7fce8763a73d0ec6,0x408fef74f1e715ff,1 +np.float64,0x3fbe5ddb783cbbb7,0xc0089acc5afa794b,1 +np.float64,0x7fe4cf19ada99e32,0x408ffb0877ca302b,1 +np.float64,0x3fe94c9ea1b2993d,0xbfd5b1c2e867b911,1 +np.float64,0x3fe75541c72eaa84,0xbfdd2a27aa117699,1 +np.float64,0x8000000000000001,0x7ff8000000000000,1 +np.float64,0x7fdbec7f2c37d8fd,0x408ff66d69a7f818,1 +np.float64,0x8ef10d091de22,0xc08ff6b9ca5094f8,1 +np.float64,0x3fea69025b74d205,0xbfd1b9fe2c252c70,1 +np.float64,0x562376d0ac46f,0xc08ffc924111cd31,1 +np.float64,0x8e8097ab1d013,0xc08ff6c2e2706f67,1 +np.float64,0x3fca6803ed34d008,0xc00237aef808825b,1 +np.float64,0x7fe8fe9067b1fd20,0x408ffd25f459a7d1,1 +np.float64,0x3f918e8c7f233,0xc0900009fe011d54,1 +np.float64,0x3fdfe773833fcee7,0xbff011bc1af87bb9,1 +np.float64,0xefffef6fdfffe,0xc08ff0beb0f09eb0,1 +np.float64,0x7fe64610282c8c1f,0x408ffbd17209db18,1 +np.float64,0xe66be8c1ccd7d,0xc08ff13706c056e1,1 +np.float64,0x2837e570506fd,0xc09002ae4dae0c1a,1 +np.float64,0x3febe3a081f7c741,0xbfc964171f2a5a47,1 +np.float64,0x3fe21ed09a243da1,0xbfea41342d29c3ff,1 +np.float64,0x3fe1596c8162b2d9,0xbfec431eee30823a,1 +np.float64,0x8f2b9a131e574,0xc08ff6b51104ed4e,1 +np.float64,0x3fe88ed179711da3,0xbfd870d08a4a4b0c,1 +np.float64,0x34159bc2682b4,0xc09001305a885f94,1 +np.float64,0x1ed31e543da65,0xc0900437481577f8,1 +np.float64,0x3feafbe9de75f7d4,0xbfcf7bcdbacf1c61,1 +np.float64,0xfb16fb27f62e0,0xc08ff03938e682a2,1 +np.float64,0x3fe5cd5ba7eb9ab7,0xbfe1b7165771af3c,1 +np.float64,0x7fe72905e76e520b,0x408ffc44c4e7e80c,1 +np.float64,0x7fb7136e2e2e26db,0x408fe439fd383fb7,1 +np.float64,0x8fa585e11f4c,0xc0900b55a08a486b,1 +np.float64,0x7fed985ce47b30b9,0x408fff192b596821,1 +np.float64,0x3feaaf0869755e11,0xbfd0c671571b3764,1 +np.float64,0x3fa40fd4ec281faa,0xc012b1c8dc0b9e5f,1 +np.float64,0x7fda2a70993454e0,0x408ff5ad47b0c68a,1 +np.float64,0x3fe5f7e931abefd2,0xbfe15d52b3605abf,1 +np.float64,0x3fe9fc6d3533f8da,0xbfd338b06a790994,1 +np.float64,0x3fe060649420c0c9,0xbfeeed1756111891,1 +np.float64,0x3fce8435e33d086c,0xc0008c41cea9ed40,1 +np.float64,0x7ff8000000000000,0x7ff8000000000000,1 +np.float64,0x617820aec2f05,0xc08ffb251e9af0f0,1 +np.float64,0x7fcc4ab6ee38956d,0x408fee9419c8f77d,1 +np.float64,0x7fdefda2fc3dfb45,0x408ff7a15063bc05,1 +np.float64,0x7fe5138ccaaa2719,0x408ffb2e30f3a46e,1 +np.float64,0x3fe3817a836702f5,0xbfe6da7c2b25e35a,1 +np.float64,0x3fb8a7dafa314fb6,0xc00b025bc0784ebe,1 +np.float64,0x349dc420693d,0xc09011215825d2c8,1 +np.float64,0x6b0e504ad61cb,0xc08ffa0fee9c5cd6,1 +np.float64,0x273987644e732,0xc09002d34294ed79,1 +np.float64,0x3fc0bd8a6e217b15,0xc0077a5828b4d2f5,1 +np.float64,0x758b48c4eb16a,0xc08ff8fbc8fbe46a,1 +np.float64,0x3fc8a9a52631534a,0xc00301854ec0ef81,1 +np.float64,0x7fe79d29a76f3a52,0x408ffc7e1607a4c1,1 +np.float64,0x3fd7d3ebce2fa7d8,0xbff6ce8a94aebcda,1 +np.float64,0x7fd1cb68a52396d0,0x408ff13a17533b2b,1 +np.float64,0x7fda514a5d34a294,0x408ff5be5e081578,1 +np.float64,0x3fc40b4382281687,0xc0056632c8067228,1 +np.float64,0x7feff1208c3fe240,0x408ffffaa180fa0d,1 +np.float64,0x8f58739f1eb0f,0xc08ff6b17402689d,1 +np.float64,0x1fdbe9a23fb7e,0xc090040685b2d24f,1 +np.float64,0xcb1d0e87963a2,0xc08ff2abbd903b82,1 +np.float64,0x3fc45a6a1a28b4d4,0xc00538f86c4aeaee,1 +np.float64,0x3fe61885b1ac310b,0xbfe118fd2251d2ec,1 +np.float64,0x3fedf584c8fbeb0a,0xbfb8572433ff67a9,1 +np.float64,0x7fb0bddd1a217bb9,0x408fe085e0d621db,1 +np.float64,0x72d8d3e0e5b3,0xc0900ca02f68c7a1,1 +np.float64,0x5cca6ff6b994f,0xc08ffbb6751fda01,1 +np.float64,0x7fe3197839a632ef,0x408ffa0b2fccfb68,1 +np.float64,0x3fcce4d9c139c9b4,0xc0012dae05baa91b,1 +np.float64,0x3fe76d00f62eda02,0xbfdccc5f12799be1,1 +np.float64,0x3fc53c22f72a7846,0xc004bbaa9cbc7958,1 +np.float64,0x7fdda02f1ebb405d,0x408ff71c37c71659,1 +np.float64,0x3fe0844eaba1089d,0xbfee884722762583,1 +np.float64,0x3febb438dc776872,0xbfca9f05e1c691f1,1 +np.float64,0x3fdf4170cdbe82e2,0xbff08b1561c8d848,1 +np.float64,0x3fce1b8d6f3c371b,0xc000b41b69507671,1 +np.float64,0x8370e60706e1d,0xc08ff7b19ea0b4ca,1 +np.float64,0x7fa5bf92382b7f23,0x408fdb8aebb3df87,1 +np.float64,0x7fe4a59979a94b32,0x408ffaf15c1358cd,1 +np.float64,0x3faa66086034cc11,0xc0111c466b7835d6,1 +np.float64,0x7fb7a958262f52af,0x408fe48408b1e093,1 +np.float64,0x3fdaacc5f635598c,0xbff43390d06b5614,1 +np.float64,0x3fd2825b9e2504b7,0xbffca3234264f109,1 +np.float64,0x3fcede160a3dbc2c,0xc0006a759e29060c,1 +np.float64,0x7fd3b19603a7632b,0x408ff265b528371c,1 +np.float64,0x7fcf8a86ea3f150d,0x408fefd552e7f3b2,1 +np.float64,0xedbcc0f7db798,0xc08ff0daad12096b,1 +np.float64,0xf1e1683de3c2d,0xc08ff0a7a0a37e00,1 +np.float64,0xb6ebd9bf6dd7b,0xc08ff3e11e28378d,1 +np.float64,0x3fec8090d6f90122,0xbfc56031b72194cc,1 +np.float64,0x3fd3e10e37a7c21c,0xbffafd34a3ebc933,1 +np.float64,0x7fbb1c96aa36392c,0x408fe616347b3342,1 +np.float64,0x3fe2f3996f25e733,0xbfe82f25bc5d1bbd,1 +np.float64,0x7fe8709da870e13a,0x408ffce3ab6ce59a,1 +np.float64,0x7fea3233d1b46467,0x408ffdb0b3bbc6de,1 +np.float64,0x65fa4112cbf49,0xc08ffa9f85eb72b9,1 +np.float64,0x3fca2cae9f34595d,0xc00251bb275afb87,1 +np.float64,0x8135fd9f026c0,0xc08ff7e42e14dce7,1 +np.float64,0x7fe0a6f057e14de0,0x408ff876081a4bfe,1 +np.float64,0x10000000000000,0xc08ff00000000000,1 +np.float64,0x3fe1fd506263faa1,0xbfea96dd8c543b72,1 +np.float64,0xa5532c554aa66,0xc08ff50bf5bfc66d,1 +np.float64,0xc239d00b8473a,0xc08ff32ff0ea3f92,1 +np.float64,0x7fdb5314e336a629,0x408ff62d4ff60d82,1 +np.float64,0x3fe5f506e2abea0e,0xbfe16362a4682120,1 +np.float64,0x3fa20c60202418c0,0xc0134e08d82608b6,1 +np.float64,0x7fe03864b22070c8,0x408ff82866d65e9a,1 +np.float64,0x3fe72cf5656e59eb,0xbfddca298969effa,1 +np.float64,0x5c295386b852b,0xc08ffbca90b136c9,1 +np.float64,0x7fd71e5020ae3c9f,0x408ff43f6d58eb7c,1 +np.float64,0x3fd1905a842320b5,0xbffdd8ecd288159c,1 +np.float64,0x3fe6bddb256d7bb6,0xbfdf88fee1a820bb,1 +np.float64,0xe061b967c0c37,0xc08ff18581951561,1 +np.float64,0x3fe534f65cea69ed,0xbfe2fe45fe7d3040,1 +np.float64,0xdc7dae07b8fb6,0xc08ff1b93074ea76,1 +np.float64,0x3fd0425082a084a1,0xbfffa11838b21633,1 +np.float64,0xba723fc974e48,0xc08ff3a8b8d01c58,1 +np.float64,0x3fce42ffc73c8600,0xc000a5062678406e,1 +np.float64,0x3f2e6d3c7e5ce,0xc090001304cfd1c7,1 +np.float64,0x3fd4b2e5f7a965cc,0xbffa0e6e6bae0a68,1 +np.float64,0x3fe6db1d18edb63a,0xbfdf128158ee92d9,1 +np.float64,0x7fe4e5792f29caf1,0x408ffb14d9dbf133,1 +np.float64,0x3fc11cdf992239bf,0xc00739569619cd77,1 +np.float64,0x3fc05ea11220bd42,0xc007bc841b48a890,1 +np.float64,0x4bd592d497ab3,0xc08ffe0ab1c962e2,1 +np.float64,0x280068fc5000e,0xc09002b64955e865,1 +np.float64,0x7fe2f2637065e4c6,0x408ff9f379c1253a,1 +np.float64,0x3fefc38467ff8709,0xbf85e53e64b9a424,1 +np.float64,0x2d78ec5a5af1e,0xc09001f8ea8601e0,1 +np.float64,0x7feeef2b957dde56,0x408fff9bebe995f7,1 +np.float64,0x2639baf44c738,0xc09002f9618d623b,1 +np.float64,0x3fc562964d2ac52d,0xc004a6d76959ef78,1 +np.float64,0x3fe21b071fe4360e,0xbfea4adb2cd96ade,1 +np.float64,0x7fe56aa6802ad54c,0x408ffb5d81d1a898,1 +np.float64,0x4296b452852d7,0xc08fff8ad7fbcbe1,1 +np.float64,0x7fe3fac4ff27f589,0x408ffa9049eec479,1 +np.float64,0x7fe7a83e6caf507c,0x408ffc837f436604,1 +np.float64,0x3fc4ac5b872958b7,0xc0050add72381ac3,1 +np.float64,0x3fd6d697c02dad30,0xbff7c931a3eefb01,1 +np.float64,0x3f61e391c023c724,0xc021ad91e754f94b,1 +np.float64,0x10817f9c21031,0xc09007d20434d7bc,1 +np.float64,0x3fdb9c4c4cb73899,0xbff367d8615c5ece,1 +np.float64,0x3fe26ead6b64dd5b,0xbfe977771def5989,1 +np.float64,0x3fc43ea5c3287d4c,0xc00548c2163ae631,1 +np.float64,0x3fe05bd8bba0b7b1,0xbfeef9ea0db91abc,1 +np.float64,0x3feac78369358f07,0xbfd071e2b0aeab39,1 +np.float64,0x7fe254922ca4a923,0x408ff991bdd4e5d3,1 +np.float64,0x3fe5a2f5842b45eb,0xbfe21135c9a71666,1 +np.float64,0x3fd5daf98c2bb5f3,0xbff8cd24f7c07003,1 +np.float64,0x3fcb2a1384365427,0xc001e40f0d04299a,1 +np.float64,0x3fe073974360e72f,0xbfeeb7183a9930b7,1 +np.float64,0xcf3440819e688,0xc08ff270d3a71001,1 +np.float64,0x3fd35656cda6acae,0xbffba083fba4939d,1 +np.float64,0x7fe6c59b4ded8b36,0x408ffc12ce725425,1 +np.float64,0x3fba896f943512df,0xc00a291cb6947701,1 +np.float64,0x7fe54917e86a922f,0x408ffb4b5e0fb848,1 +np.float64,0x7fed2a3f51ba547e,0x408ffeede945a948,1 +np.float64,0x3fdc72bd5038e57b,0xbff2b73b7e93e209,1 +np.float64,0x7fefdb3f9f3fb67e,0x408ffff2b702a768,1 +np.float64,0x3fb0184430203088,0xc00fee8c1351763c,1 +np.float64,0x7d6c3668fad87,0xc08ff83c195f2cca,1 +np.float64,0x3fd5aa254aab544b,0xbff900f16365991b,1 +np.float64,0x3f963daab02c7b55,0xc0161974495b1b71,1 +np.float64,0x3fa7a9c5982f538b,0xc011bde0f6052a89,1 +np.float64,0xb3a5a74b674b5,0xc08ff4167bc97c81,1 +np.float64,0x7fad0c14503a1828,0x408fdee1f2d56cd7,1 +np.float64,0x43e0e9d887c1e,0xc08fff522837b13b,1 +np.float64,0x3fe513b20aea2764,0xbfe346ea994100e6,1 +np.float64,0x7fe4e10393e9c206,0x408ffb12630f6a06,1 +np.float64,0x68b286e2d1651,0xc08ffa51c0d795d4,1 +np.float64,0x7fe8de453331bc89,0x408ffd17012b75ac,1 +np.float64,0x1b3d77d4367b0,0xc09004edea60aa36,1 +np.float64,0x3fd351cbc326a398,0xbffba5f0f4d5fdba,1 +np.float64,0x3fd264951b24c92a,0xbffcc8636788b9bf,1 +np.float64,0xd2465761a48cb,0xc08ff2455c9c53e5,1 +np.float64,0x7fe46a0ef028d41d,0x408ffacfe32c6f5d,1 +np.float64,0x3fafd8ac4c3fb159,0xc010071bf33195d0,1 +np.float64,0x902aec5d2055e,0xc08ff6a08e28aabc,1 +np.float64,0x3fcea61bb03d4c37,0xc0007f76e509b657,1 +np.float64,0x7fe8d90f9571b21e,0x408ffd1495f952e7,1 +np.float64,0x7fa650c9442ca192,0x408fdbd6ff22fdd8,1 +np.float64,0x3fe8ecfdf171d9fc,0xbfd7115df40e8580,1 +np.float64,0x7fd4e6fe7f29cdfc,0x408ff315b0dae183,1 +np.float64,0x77df4c52efbea,0xc08ff8c1d5c1df33,1 +np.float64,0xe200b0cfc4016,0xc08ff1703cfb8e79,1 +np.float64,0x3fe230ea7e2461d5,0xbfea132d2385160e,1 +np.float64,0x7fd1f7ced723ef9d,0x408ff156bfbf92a4,1 +np.float64,0x3fea762818f4ec50,0xbfd18c12a88e5f79,1 +np.float64,0x7feea4ba7c7d4974,0x408fff8004164054,1 +np.float64,0x833ec605067d9,0xc08ff7b606383841,1 +np.float64,0x7fd0c2d7fea185af,0x408ff0894f3a0cf4,1 +np.float64,0x3fe1d7d61d23afac,0xbfeaf76fee875d3e,1 +np.float64,0x65adecb0cb5be,0xc08ffaa82cb09d68,1 diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/data/umath-validation-set-sin.csv b/.venv/lib/python3.12/site-packages/numpy/_core/tests/data/umath-validation-set-sin.csv new file mode 100644 index 0000000..03e76ff --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/tests/data/umath-validation-set-sin.csv @@ -0,0 +1,1370 @@ +dtype,input,output,ulperrortol +## +ve denormals ## +np.float32,0x004b4716,0x004b4716,2 +np.float32,0x007b2490,0x007b2490,2 +np.float32,0x007c99fa,0x007c99fa,2 +np.float32,0x00734a0c,0x00734a0c,2 +np.float32,0x0070de24,0x0070de24,2 +np.float32,0x007fffff,0x007fffff,2 +np.float32,0x00000001,0x00000001,2 +## -ve denormals ## +np.float32,0x80495d65,0x80495d65,2 +np.float32,0x806894f6,0x806894f6,2 +np.float32,0x80555a76,0x80555a76,2 +np.float32,0x804e1fb8,0x804e1fb8,2 +np.float32,0x80687de9,0x80687de9,2 +np.float32,0x807fffff,0x807fffff,2 +np.float32,0x80000001,0x80000001,2 +## +/-0.0f, +/-FLT_MIN +/-FLT_MAX ## +np.float32,0x00000000,0x00000000,2 +np.float32,0x80000000,0x80000000,2 +np.float32,0x00800000,0x00800000,2 +np.float32,0x80800000,0x80800000,2 +## 1.00f ## +np.float32,0x3f800000,0x3f576aa4,2 +np.float32,0x3f800001,0x3f576aa6,2 +np.float32,0x3f800002,0x3f576aa7,2 +np.float32,0xc090a8b0,0x3f7b4e48,2 +np.float32,0x41ce3184,0x3f192d43,2 +np.float32,0xc1d85848,0xbf7161cb,2 +np.float32,0x402b8820,0x3ee3f29f,2 +np.float32,0x42b4e454,0x3f1d0151,2 +np.float32,0x42a67a60,0x3f7ffa4c,2 +np.float32,0x41d92388,0x3f67beef,2 +np.float32,0x422dd66c,0xbeffb0c1,2 +np.float32,0xc28f5be6,0xbf0bae79,2 +np.float32,0x41ab2674,0x3f0ffe2b,2 +np.float32,0x3f490fdb,0x3f3504f3,2 +np.float32,0xbf490fdb,0xbf3504f3,2 +np.float32,0x3fc90fdb,0x3f800000,2 +np.float32,0xbfc90fdb,0xbf800000,2 +np.float32,0x40490fdb,0xb3bbbd2e,2 +np.float32,0xc0490fdb,0x33bbbd2e,2 +np.float32,0x3fc90fdb,0x3f800000,2 +np.float32,0xbfc90fdb,0xbf800000,2 +np.float32,0x40490fdb,0xb3bbbd2e,2 +np.float32,0xc0490fdb,0x33bbbd2e,2 +np.float32,0x40c90fdb,0x343bbd2e,2 +np.float32,0xc0c90fdb,0xb43bbd2e,2 +np.float32,0x4016cbe4,0x3f3504f3,2 +np.float32,0xc016cbe4,0xbf3504f3,2 +np.float32,0x4096cbe4,0xbf800000,2 +np.float32,0xc096cbe4,0x3f800000,2 +np.float32,0x4116cbe4,0xb2ccde2e,2 +np.float32,0xc116cbe4,0x32ccde2e,2 +np.float32,0x40490fdb,0xb3bbbd2e,2 +np.float32,0xc0490fdb,0x33bbbd2e,2 +np.float32,0x40c90fdb,0x343bbd2e,2 +np.float32,0xc0c90fdb,0xb43bbd2e,2 +np.float32,0x41490fdb,0x34bbbd2e,2 +np.float32,0xc1490fdb,0xb4bbbd2e,2 +np.float32,0x407b53d2,0xbf3504f5,2 +np.float32,0xc07b53d2,0x3f3504f5,2 +np.float32,0x40fb53d2,0x3f800000,2 +np.float32,0xc0fb53d2,0xbf800000,2 +np.float32,0x417b53d2,0xb535563d,2 +np.float32,0xc17b53d2,0x3535563d,2 +np.float32,0x4096cbe4,0xbf800000,2 +np.float32,0xc096cbe4,0x3f800000,2 +np.float32,0x4116cbe4,0xb2ccde2e,2 +np.float32,0xc116cbe4,0x32ccde2e,2 +np.float32,0x4196cbe4,0x334cde2e,2 +np.float32,0xc196cbe4,0xb34cde2e,2 +np.float32,0x40afede0,0xbf3504ef,2 +np.float32,0xc0afede0,0x3f3504ef,2 +np.float32,0x412fede0,0xbf800000,2 +np.float32,0xc12fede0,0x3f800000,2 +np.float32,0x41afede0,0xb5b222c4,2 +np.float32,0xc1afede0,0x35b222c4,2 +np.float32,0x40c90fdb,0x343bbd2e,2 +np.float32,0xc0c90fdb,0xb43bbd2e,2 +np.float32,0x41490fdb,0x34bbbd2e,2 +np.float32,0xc1490fdb,0xb4bbbd2e,2 +np.float32,0x41c90fdb,0x353bbd2e,2 +np.float32,0xc1c90fdb,0xb53bbd2e,2 +np.float32,0x40e231d6,0x3f3504f3,2 +np.float32,0xc0e231d6,0xbf3504f3,2 +np.float32,0x416231d6,0x3f800000,2 +np.float32,0xc16231d6,0xbf800000,2 +np.float32,0x41e231d6,0xb399a6a2,2 +np.float32,0xc1e231d6,0x3399a6a2,2 +np.float32,0x40fb53d2,0x3f800000,2 +np.float32,0xc0fb53d2,0xbf800000,2 +np.float32,0x417b53d2,0xb535563d,2 +np.float32,0xc17b53d2,0x3535563d,2 +np.float32,0x41fb53d2,0x35b5563d,2 +np.float32,0xc1fb53d2,0xb5b5563d,2 +np.float32,0x410a3ae7,0x3f3504eb,2 +np.float32,0xc10a3ae7,0xbf3504eb,2 +np.float32,0x418a3ae7,0xbf800000,2 +np.float32,0xc18a3ae7,0x3f800000,2 +np.float32,0x420a3ae7,0xb6308908,2 +np.float32,0xc20a3ae7,0x36308908,2 +np.float32,0x4116cbe4,0xb2ccde2e,2 +np.float32,0xc116cbe4,0x32ccde2e,2 +np.float32,0x4196cbe4,0x334cde2e,2 +np.float32,0xc196cbe4,0xb34cde2e,2 +np.float32,0x4216cbe4,0x33ccde2e,2 +np.float32,0xc216cbe4,0xb3ccde2e,2 +np.float32,0x41235ce2,0xbf3504f7,2 +np.float32,0xc1235ce2,0x3f3504f7,2 +np.float32,0x41a35ce2,0x3f800000,2 +np.float32,0xc1a35ce2,0xbf800000,2 +np.float32,0x42235ce2,0xb5b889b6,2 +np.float32,0xc2235ce2,0x35b889b6,2 +np.float32,0x412fede0,0xbf800000,2 +np.float32,0xc12fede0,0x3f800000,2 +np.float32,0x41afede0,0xb5b222c4,2 +np.float32,0xc1afede0,0x35b222c4,2 +np.float32,0x422fede0,0x363222c4,2 +np.float32,0xc22fede0,0xb63222c4,2 +np.float32,0x413c7edd,0xbf3504f3,2 +np.float32,0xc13c7edd,0x3f3504f3,2 +np.float32,0x41bc7edd,0xbf800000,2 +np.float32,0xc1bc7edd,0x3f800000,2 +np.float32,0x423c7edd,0xb4000add,2 +np.float32,0xc23c7edd,0x34000add,2 +np.float32,0x41490fdb,0x34bbbd2e,2 +np.float32,0xc1490fdb,0xb4bbbd2e,2 +np.float32,0x41c90fdb,0x353bbd2e,2 +np.float32,0xc1c90fdb,0xb53bbd2e,2 +np.float32,0x42490fdb,0x35bbbd2e,2 +np.float32,0xc2490fdb,0xb5bbbd2e,2 +np.float32,0x4155a0d9,0x3f3504fb,2 +np.float32,0xc155a0d9,0xbf3504fb,2 +np.float32,0x41d5a0d9,0x3f800000,2 +np.float32,0xc1d5a0d9,0xbf800000,2 +np.float32,0x4255a0d9,0xb633bc81,2 +np.float32,0xc255a0d9,0x3633bc81,2 +np.float32,0x416231d6,0x3f800000,2 +np.float32,0xc16231d6,0xbf800000,2 +np.float32,0x41e231d6,0xb399a6a2,2 +np.float32,0xc1e231d6,0x3399a6a2,2 +np.float32,0x426231d6,0x3419a6a2,2 +np.float32,0xc26231d6,0xb419a6a2,2 +np.float32,0x416ec2d4,0x3f3504ef,2 +np.float32,0xc16ec2d4,0xbf3504ef,2 +np.float32,0x41eec2d4,0xbf800000,2 +np.float32,0xc1eec2d4,0x3f800000,2 +np.float32,0x426ec2d4,0xb5bef0a7,2 +np.float32,0xc26ec2d4,0x35bef0a7,2 +np.float32,0x417b53d2,0xb535563d,2 +np.float32,0xc17b53d2,0x3535563d,2 +np.float32,0x41fb53d2,0x35b5563d,2 +np.float32,0xc1fb53d2,0xb5b5563d,2 +np.float32,0x427b53d2,0x3635563d,2 +np.float32,0xc27b53d2,0xb635563d,2 +np.float32,0x4183f268,0xbf3504ff,2 +np.float32,0xc183f268,0x3f3504ff,2 +np.float32,0x4203f268,0x3f800000,2 +np.float32,0xc203f268,0xbf800000,2 +np.float32,0x4283f268,0xb6859a13,2 +np.float32,0xc283f268,0x36859a13,2 +np.float32,0x418a3ae7,0xbf800000,2 +np.float32,0xc18a3ae7,0x3f800000,2 +np.float32,0x420a3ae7,0xb6308908,2 +np.float32,0xc20a3ae7,0x36308908,2 +np.float32,0x428a3ae7,0x36b08908,2 +np.float32,0xc28a3ae7,0xb6b08908,2 +np.float32,0x41908365,0xbf3504f6,2 +np.float32,0xc1908365,0x3f3504f6,2 +np.float32,0x42108365,0xbf800000,2 +np.float32,0xc2108365,0x3f800000,2 +np.float32,0x42908365,0x3592200d,2 +np.float32,0xc2908365,0xb592200d,2 +np.float32,0x4196cbe4,0x334cde2e,2 +np.float32,0xc196cbe4,0xb34cde2e,2 +np.float32,0x4216cbe4,0x33ccde2e,2 +np.float32,0xc216cbe4,0xb3ccde2e,2 +np.float32,0x4296cbe4,0x344cde2e,2 +np.float32,0xc296cbe4,0xb44cde2e,2 +np.float32,0x419d1463,0x3f3504f8,2 +np.float32,0xc19d1463,0xbf3504f8,2 +np.float32,0x421d1463,0x3f800000,2 +np.float32,0xc21d1463,0xbf800000,2 +np.float32,0x429d1463,0xb5c55799,2 +np.float32,0xc29d1463,0x35c55799,2 +np.float32,0x41a35ce2,0x3f800000,2 +np.float32,0xc1a35ce2,0xbf800000,2 +np.float32,0x42235ce2,0xb5b889b6,2 +np.float32,0xc2235ce2,0x35b889b6,2 +np.float32,0x42a35ce2,0x363889b6,2 +np.float32,0xc2a35ce2,0xb63889b6,2 +np.float32,0x41a9a561,0x3f3504e7,2 +np.float32,0xc1a9a561,0xbf3504e7,2 +np.float32,0x4229a561,0xbf800000,2 +np.float32,0xc229a561,0x3f800000,2 +np.float32,0x42a9a561,0xb68733d0,2 +np.float32,0xc2a9a561,0x368733d0,2 +np.float32,0x41afede0,0xb5b222c4,2 +np.float32,0xc1afede0,0x35b222c4,2 +np.float32,0x422fede0,0x363222c4,2 +np.float32,0xc22fede0,0xb63222c4,2 +np.float32,0x42afede0,0x36b222c4,2 +np.float32,0xc2afede0,0xb6b222c4,2 +np.float32,0x41b6365e,0xbf3504f0,2 +np.float32,0xc1b6365e,0x3f3504f0,2 +np.float32,0x4236365e,0x3f800000,2 +np.float32,0xc236365e,0xbf800000,2 +np.float32,0x42b6365e,0x358bb91c,2 +np.float32,0xc2b6365e,0xb58bb91c,2 +np.float32,0x41bc7edd,0xbf800000,2 +np.float32,0xc1bc7edd,0x3f800000,2 +np.float32,0x423c7edd,0xb4000add,2 +np.float32,0xc23c7edd,0x34000add,2 +np.float32,0x42bc7edd,0x34800add,2 +np.float32,0xc2bc7edd,0xb4800add,2 +np.float32,0x41c2c75c,0xbf3504ef,2 +np.float32,0xc1c2c75c,0x3f3504ef,2 +np.float32,0x4242c75c,0xbf800000,2 +np.float32,0xc242c75c,0x3f800000,2 +np.float32,0x42c2c75c,0xb5cbbe8a,2 +np.float32,0xc2c2c75c,0x35cbbe8a,2 +np.float32,0x41c90fdb,0x353bbd2e,2 +np.float32,0xc1c90fdb,0xb53bbd2e,2 +np.float32,0x42490fdb,0x35bbbd2e,2 +np.float32,0xc2490fdb,0xb5bbbd2e,2 +np.float32,0x42c90fdb,0x363bbd2e,2 +np.float32,0xc2c90fdb,0xb63bbd2e,2 +np.float32,0x41cf585a,0x3f3504ff,2 +np.float32,0xc1cf585a,0xbf3504ff,2 +np.float32,0x424f585a,0x3f800000,2 +np.float32,0xc24f585a,0xbf800000,2 +np.float32,0x42cf585a,0xb688cd8c,2 +np.float32,0xc2cf585a,0x3688cd8c,2 +np.float32,0x41d5a0d9,0x3f800000,2 +np.float32,0xc1d5a0d9,0xbf800000,2 +np.float32,0x4255a0d9,0xb633bc81,2 +np.float32,0xc255a0d9,0x3633bc81,2 +np.float32,0x42d5a0d9,0x36b3bc81,2 +np.float32,0xc2d5a0d9,0xb6b3bc81,2 +np.float32,0x41dbe958,0x3f3504e0,2 +np.float32,0xc1dbe958,0xbf3504e0,2 +np.float32,0x425be958,0xbf800000,2 +np.float32,0xc25be958,0x3f800000,2 +np.float32,0x42dbe958,0xb6deab75,2 +np.float32,0xc2dbe958,0x36deab75,2 +np.float32,0x41e231d6,0xb399a6a2,2 +np.float32,0xc1e231d6,0x3399a6a2,2 +np.float32,0x426231d6,0x3419a6a2,2 +np.float32,0xc26231d6,0xb419a6a2,2 +np.float32,0x42e231d6,0x3499a6a2,2 +np.float32,0xc2e231d6,0xb499a6a2,2 +np.float32,0x41e87a55,0xbf3504f8,2 +np.float32,0xc1e87a55,0x3f3504f8,2 +np.float32,0x42687a55,0x3f800000,2 +np.float32,0xc2687a55,0xbf800000,2 +np.float32,0x42e87a55,0xb5d2257b,2 +np.float32,0xc2e87a55,0x35d2257b,2 +np.float32,0x41eec2d4,0xbf800000,2 +np.float32,0xc1eec2d4,0x3f800000,2 +np.float32,0x426ec2d4,0xb5bef0a7,2 +np.float32,0xc26ec2d4,0x35bef0a7,2 +np.float32,0x42eec2d4,0x363ef0a7,2 +np.float32,0xc2eec2d4,0xb63ef0a7,2 +np.float32,0x41f50b53,0xbf3504e7,2 +np.float32,0xc1f50b53,0x3f3504e7,2 +np.float32,0x42750b53,0xbf800000,2 +np.float32,0xc2750b53,0x3f800000,2 +np.float32,0x42f50b53,0xb68a6748,2 +np.float32,0xc2f50b53,0x368a6748,2 +np.float32,0x41fb53d2,0x35b5563d,2 +np.float32,0xc1fb53d2,0xb5b5563d,2 +np.float32,0x427b53d2,0x3635563d,2 +np.float32,0xc27b53d2,0xb635563d,2 +np.float32,0x42fb53d2,0x36b5563d,2 +np.float32,0xc2fb53d2,0xb6b5563d,2 +np.float32,0x4200ce28,0x3f3504f0,2 +np.float32,0xc200ce28,0xbf3504f0,2 +np.float32,0x4280ce28,0x3f800000,2 +np.float32,0xc280ce28,0xbf800000,2 +np.float32,0x4300ce28,0x357dd672,2 +np.float32,0xc300ce28,0xb57dd672,2 +np.float32,0x4203f268,0x3f800000,2 +np.float32,0xc203f268,0xbf800000,2 +np.float32,0x4283f268,0xb6859a13,2 +np.float32,0xc283f268,0x36859a13,2 +np.float32,0x4303f268,0x37059a13,2 +np.float32,0xc303f268,0xb7059a13,2 +np.float32,0x420716a7,0x3f3504ee,2 +np.float32,0xc20716a7,0xbf3504ee,2 +np.float32,0x428716a7,0xbf800000,2 +np.float32,0xc28716a7,0x3f800000,2 +np.float32,0x430716a7,0xb5d88c6d,2 +np.float32,0xc30716a7,0x35d88c6d,2 +np.float32,0x420a3ae7,0xb6308908,2 +np.float32,0xc20a3ae7,0x36308908,2 +np.float32,0x428a3ae7,0x36b08908,2 +np.float32,0xc28a3ae7,0xb6b08908,2 +np.float32,0x430a3ae7,0x37308908,2 +np.float32,0xc30a3ae7,0xb7308908,2 +np.float32,0x420d5f26,0xbf350500,2 +np.float32,0xc20d5f26,0x3f350500,2 +np.float32,0x428d5f26,0x3f800000,2 +np.float32,0xc28d5f26,0xbf800000,2 +np.float32,0x430d5f26,0xb68c0105,2 +np.float32,0xc30d5f26,0x368c0105,2 +np.float32,0x42108365,0xbf800000,2 +np.float32,0xc2108365,0x3f800000,2 +np.float32,0x42908365,0x3592200d,2 +np.float32,0xc2908365,0xb592200d,2 +np.float32,0x43108365,0xb612200d,2 +np.float32,0xc3108365,0x3612200d,2 +np.float32,0x4213a7a5,0xbf3504df,2 +np.float32,0xc213a7a5,0x3f3504df,2 +np.float32,0x4293a7a5,0xbf800000,2 +np.float32,0xc293a7a5,0x3f800000,2 +np.float32,0x4313a7a5,0xb6e1deee,2 +np.float32,0xc313a7a5,0x36e1deee,2 +np.float32,0x4216cbe4,0x33ccde2e,2 +np.float32,0xc216cbe4,0xb3ccde2e,2 +np.float32,0x4296cbe4,0x344cde2e,2 +np.float32,0xc296cbe4,0xb44cde2e,2 +np.float32,0x4316cbe4,0x34ccde2e,2 +np.float32,0xc316cbe4,0xb4ccde2e,2 +np.float32,0x4219f024,0x3f35050f,2 +np.float32,0xc219f024,0xbf35050f,2 +np.float32,0x4299f024,0x3f800000,2 +np.float32,0xc299f024,0xbf800000,2 +np.float32,0x4319f024,0xb71bde6c,2 +np.float32,0xc319f024,0x371bde6c,2 +np.float32,0x421d1463,0x3f800000,2 +np.float32,0xc21d1463,0xbf800000,2 +np.float32,0x429d1463,0xb5c55799,2 +np.float32,0xc29d1463,0x35c55799,2 +np.float32,0x431d1463,0x36455799,2 +np.float32,0xc31d1463,0xb6455799,2 +np.float32,0x422038a3,0x3f3504d0,2 +np.float32,0xc22038a3,0xbf3504d0,2 +np.float32,0x42a038a3,0xbf800000,2 +np.float32,0xc2a038a3,0x3f800000,2 +np.float32,0x432038a3,0xb746cd61,2 +np.float32,0xc32038a3,0x3746cd61,2 +np.float32,0x42235ce2,0xb5b889b6,2 +np.float32,0xc2235ce2,0x35b889b6,2 +np.float32,0x42a35ce2,0x363889b6,2 +np.float32,0xc2a35ce2,0xb63889b6,2 +np.float32,0x43235ce2,0x36b889b6,2 +np.float32,0xc3235ce2,0xb6b889b6,2 +np.float32,0x42268121,0xbf3504f1,2 +np.float32,0xc2268121,0x3f3504f1,2 +np.float32,0x42a68121,0x3f800000,2 +np.float32,0xc2a68121,0xbf800000,2 +np.float32,0x43268121,0x35643aac,2 +np.float32,0xc3268121,0xb5643aac,2 +np.float32,0x4229a561,0xbf800000,2 +np.float32,0xc229a561,0x3f800000,2 +np.float32,0x42a9a561,0xb68733d0,2 +np.float32,0xc2a9a561,0x368733d0,2 +np.float32,0x4329a561,0x370733d0,2 +np.float32,0xc329a561,0xb70733d0,2 +np.float32,0x422cc9a0,0xbf3504ee,2 +np.float32,0xc22cc9a0,0x3f3504ee,2 +np.float32,0x42acc9a0,0xbf800000,2 +np.float32,0xc2acc9a0,0x3f800000,2 +np.float32,0x432cc9a0,0xb5e55a50,2 +np.float32,0xc32cc9a0,0x35e55a50,2 +np.float32,0x422fede0,0x363222c4,2 +np.float32,0xc22fede0,0xb63222c4,2 +np.float32,0x42afede0,0x36b222c4,2 +np.float32,0xc2afede0,0xb6b222c4,2 +np.float32,0x432fede0,0x373222c4,2 +np.float32,0xc32fede0,0xb73222c4,2 +np.float32,0x4233121f,0x3f350500,2 +np.float32,0xc233121f,0xbf350500,2 +np.float32,0x42b3121f,0x3f800000,2 +np.float32,0xc2b3121f,0xbf800000,2 +np.float32,0x4333121f,0xb68f347d,2 +np.float32,0xc333121f,0x368f347d,2 +np.float32,0x4236365e,0x3f800000,2 +np.float32,0xc236365e,0xbf800000,2 +np.float32,0x42b6365e,0x358bb91c,2 +np.float32,0xc2b6365e,0xb58bb91c,2 +np.float32,0x4336365e,0xb60bb91c,2 +np.float32,0xc336365e,0x360bb91c,2 +np.float32,0x42395a9e,0x3f3504df,2 +np.float32,0xc2395a9e,0xbf3504df,2 +np.float32,0x42b95a9e,0xbf800000,2 +np.float32,0xc2b95a9e,0x3f800000,2 +np.float32,0x43395a9e,0xb6e51267,2 +np.float32,0xc3395a9e,0x36e51267,2 +np.float32,0x423c7edd,0xb4000add,2 +np.float32,0xc23c7edd,0x34000add,2 +np.float32,0x42bc7edd,0x34800add,2 +np.float32,0xc2bc7edd,0xb4800add,2 +np.float32,0x433c7edd,0x35000add,2 +np.float32,0xc33c7edd,0xb5000add,2 +np.float32,0x423fa31d,0xbf35050f,2 +np.float32,0xc23fa31d,0x3f35050f,2 +np.float32,0x42bfa31d,0x3f800000,2 +np.float32,0xc2bfa31d,0xbf800000,2 +np.float32,0x433fa31d,0xb71d7828,2 +np.float32,0xc33fa31d,0x371d7828,2 +np.float32,0x4242c75c,0xbf800000,2 +np.float32,0xc242c75c,0x3f800000,2 +np.float32,0x42c2c75c,0xb5cbbe8a,2 +np.float32,0xc2c2c75c,0x35cbbe8a,2 +np.float32,0x4342c75c,0x364bbe8a,2 +np.float32,0xc342c75c,0xb64bbe8a,2 +np.float32,0x4245eb9c,0xbf3504d0,2 +np.float32,0xc245eb9c,0x3f3504d0,2 +np.float32,0x42c5eb9c,0xbf800000,2 +np.float32,0xc2c5eb9c,0x3f800000,2 +np.float32,0x4345eb9c,0xb748671d,2 +np.float32,0xc345eb9c,0x3748671d,2 +np.float32,0x42490fdb,0x35bbbd2e,2 +np.float32,0xc2490fdb,0xb5bbbd2e,2 +np.float32,0x42c90fdb,0x363bbd2e,2 +np.float32,0xc2c90fdb,0xb63bbd2e,2 +np.float32,0x43490fdb,0x36bbbd2e,2 +np.float32,0xc3490fdb,0xb6bbbd2e,2 +np.float32,0x424c341a,0x3f3504f1,2 +np.float32,0xc24c341a,0xbf3504f1,2 +np.float32,0x42cc341a,0x3f800000,2 +np.float32,0xc2cc341a,0xbf800000,2 +np.float32,0x434c341a,0x354a9ee6,2 +np.float32,0xc34c341a,0xb54a9ee6,2 +np.float32,0x424f585a,0x3f800000,2 +np.float32,0xc24f585a,0xbf800000,2 +np.float32,0x42cf585a,0xb688cd8c,2 +np.float32,0xc2cf585a,0x3688cd8c,2 +np.float32,0x434f585a,0x3708cd8c,2 +np.float32,0xc34f585a,0xb708cd8c,2 +np.float32,0x42527c99,0x3f3504ee,2 +np.float32,0xc2527c99,0xbf3504ee,2 +np.float32,0x42d27c99,0xbf800000,2 +np.float32,0xc2d27c99,0x3f800000,2 +np.float32,0x43527c99,0xb5f22833,2 +np.float32,0xc3527c99,0x35f22833,2 +np.float32,0x4255a0d9,0xb633bc81,2 +np.float32,0xc255a0d9,0x3633bc81,2 +np.float32,0x42d5a0d9,0x36b3bc81,2 +np.float32,0xc2d5a0d9,0xb6b3bc81,2 +np.float32,0x4355a0d9,0x3733bc81,2 +np.float32,0xc355a0d9,0xb733bc81,2 +np.float32,0x4258c518,0xbf350500,2 +np.float32,0xc258c518,0x3f350500,2 +np.float32,0x42d8c518,0x3f800000,2 +np.float32,0xc2d8c518,0xbf800000,2 +np.float32,0x4358c518,0xb69267f6,2 +np.float32,0xc358c518,0x369267f6,2 +np.float32,0x425be958,0xbf800000,2 +np.float32,0xc25be958,0x3f800000,2 +np.float32,0x42dbe958,0xb6deab75,2 +np.float32,0xc2dbe958,0x36deab75,2 +np.float32,0x435be958,0x375eab75,2 +np.float32,0xc35be958,0xb75eab75,2 +np.float32,0x425f0d97,0xbf3504df,2 +np.float32,0xc25f0d97,0x3f3504df,2 +np.float32,0x42df0d97,0xbf800000,2 +np.float32,0xc2df0d97,0x3f800000,2 +np.float32,0x435f0d97,0xb6e845e0,2 +np.float32,0xc35f0d97,0x36e845e0,2 +np.float32,0x426231d6,0x3419a6a2,2 +np.float32,0xc26231d6,0xb419a6a2,2 +np.float32,0x42e231d6,0x3499a6a2,2 +np.float32,0xc2e231d6,0xb499a6a2,2 +np.float32,0x436231d6,0x3519a6a2,2 +np.float32,0xc36231d6,0xb519a6a2,2 +np.float32,0x42655616,0x3f35050f,2 +np.float32,0xc2655616,0xbf35050f,2 +np.float32,0x42e55616,0x3f800000,2 +np.float32,0xc2e55616,0xbf800000,2 +np.float32,0x43655616,0xb71f11e5,2 +np.float32,0xc3655616,0x371f11e5,2 +np.float32,0x42687a55,0x3f800000,2 +np.float32,0xc2687a55,0xbf800000,2 +np.float32,0x42e87a55,0xb5d2257b,2 +np.float32,0xc2e87a55,0x35d2257b,2 +np.float32,0x43687a55,0x3652257b,2 +np.float32,0xc3687a55,0xb652257b,2 +np.float32,0x426b9e95,0x3f3504cf,2 +np.float32,0xc26b9e95,0xbf3504cf,2 +np.float32,0x42eb9e95,0xbf800000,2 +np.float32,0xc2eb9e95,0x3f800000,2 +np.float32,0x436b9e95,0xb74a00d9,2 +np.float32,0xc36b9e95,0x374a00d9,2 +np.float32,0x426ec2d4,0xb5bef0a7,2 +np.float32,0xc26ec2d4,0x35bef0a7,2 +np.float32,0x42eec2d4,0x363ef0a7,2 +np.float32,0xc2eec2d4,0xb63ef0a7,2 +np.float32,0x436ec2d4,0x36bef0a7,2 +np.float32,0xc36ec2d4,0xb6bef0a7,2 +np.float32,0x4271e713,0xbf3504f1,2 +np.float32,0xc271e713,0x3f3504f1,2 +np.float32,0x42f1e713,0x3f800000,2 +np.float32,0xc2f1e713,0xbf800000,2 +np.float32,0x4371e713,0x35310321,2 +np.float32,0xc371e713,0xb5310321,2 +np.float32,0x42750b53,0xbf800000,2 +np.float32,0xc2750b53,0x3f800000,2 +np.float32,0x42f50b53,0xb68a6748,2 +np.float32,0xc2f50b53,0x368a6748,2 +np.float32,0x43750b53,0x370a6748,2 +np.float32,0xc3750b53,0xb70a6748,2 +np.float32,0x42782f92,0xbf3504ee,2 +np.float32,0xc2782f92,0x3f3504ee,2 +np.float32,0x42f82f92,0xbf800000,2 +np.float32,0xc2f82f92,0x3f800000,2 +np.float32,0x43782f92,0xb5fef616,2 +np.float32,0xc3782f92,0x35fef616,2 +np.float32,0x427b53d2,0x3635563d,2 +np.float32,0xc27b53d2,0xb635563d,2 +np.float32,0x42fb53d2,0x36b5563d,2 +np.float32,0xc2fb53d2,0xb6b5563d,2 +np.float32,0x437b53d2,0x3735563d,2 +np.float32,0xc37b53d2,0xb735563d,2 +np.float32,0x427e7811,0x3f350500,2 +np.float32,0xc27e7811,0xbf350500,2 +np.float32,0x42fe7811,0x3f800000,2 +np.float32,0xc2fe7811,0xbf800000,2 +np.float32,0x437e7811,0xb6959b6f,2 +np.float32,0xc37e7811,0x36959b6f,2 +np.float32,0x4280ce28,0x3f800000,2 +np.float32,0xc280ce28,0xbf800000,2 +np.float32,0x4300ce28,0x357dd672,2 +np.float32,0xc300ce28,0xb57dd672,2 +np.float32,0x4380ce28,0xb5fdd672,2 +np.float32,0xc380ce28,0x35fdd672,2 +np.float32,0x42826048,0x3f3504de,2 +np.float32,0xc2826048,0xbf3504de,2 +np.float32,0x43026048,0xbf800000,2 +np.float32,0xc3026048,0x3f800000,2 +np.float32,0x43826048,0xb6eb7958,2 +np.float32,0xc3826048,0x36eb7958,2 +np.float32,0x4283f268,0xb6859a13,2 +np.float32,0xc283f268,0x36859a13,2 +np.float32,0x4303f268,0x37059a13,2 +np.float32,0xc303f268,0xb7059a13,2 +np.float32,0x4383f268,0x37859a13,2 +np.float32,0xc383f268,0xb7859a13,2 +np.float32,0x42858487,0xbf3504e2,2 +np.float32,0xc2858487,0x3f3504e2,2 +np.float32,0x43058487,0x3f800000,2 +np.float32,0xc3058487,0xbf800000,2 +np.float32,0x43858487,0x36bea8be,2 +np.float32,0xc3858487,0xb6bea8be,2 +np.float32,0x428716a7,0xbf800000,2 +np.float32,0xc28716a7,0x3f800000,2 +np.float32,0x430716a7,0xb5d88c6d,2 +np.float32,0xc30716a7,0x35d88c6d,2 +np.float32,0x438716a7,0x36588c6d,2 +np.float32,0xc38716a7,0xb6588c6d,2 +np.float32,0x4288a8c7,0xbf3504cf,2 +np.float32,0xc288a8c7,0x3f3504cf,2 +np.float32,0x4308a8c7,0xbf800000,2 +np.float32,0xc308a8c7,0x3f800000,2 +np.float32,0x4388a8c7,0xb74b9a96,2 +np.float32,0xc388a8c7,0x374b9a96,2 +np.float32,0x428a3ae7,0x36b08908,2 +np.float32,0xc28a3ae7,0xb6b08908,2 +np.float32,0x430a3ae7,0x37308908,2 +np.float32,0xc30a3ae7,0xb7308908,2 +np.float32,0x438a3ae7,0x37b08908,2 +np.float32,0xc38a3ae7,0xb7b08908,2 +np.float32,0x428bcd06,0x3f3504f2,2 +np.float32,0xc28bcd06,0xbf3504f2,2 +np.float32,0x430bcd06,0x3f800000,2 +np.float32,0xc30bcd06,0xbf800000,2 +np.float32,0x438bcd06,0x3517675b,2 +np.float32,0xc38bcd06,0xb517675b,2 +np.float32,0x428d5f26,0x3f800000,2 +np.float32,0xc28d5f26,0xbf800000,2 +np.float32,0x430d5f26,0xb68c0105,2 +np.float32,0xc30d5f26,0x368c0105,2 +np.float32,0x438d5f26,0x370c0105,2 +np.float32,0xc38d5f26,0xb70c0105,2 +np.float32,0x428ef146,0x3f3504c0,2 +np.float32,0xc28ef146,0xbf3504c0,2 +np.float32,0x430ef146,0xbf800000,2 +np.float32,0xc30ef146,0x3f800000,2 +np.float32,0x438ef146,0xb790bc40,2 +np.float32,0xc38ef146,0x3790bc40,2 +np.float32,0x42908365,0x3592200d,2 +np.float32,0xc2908365,0xb592200d,2 +np.float32,0x43108365,0xb612200d,2 +np.float32,0xc3108365,0x3612200d,2 +np.float32,0x43908365,0xb692200d,2 +np.float32,0xc3908365,0x3692200d,2 +np.float32,0x42921585,0xbf350501,2 +np.float32,0xc2921585,0x3f350501,2 +np.float32,0x43121585,0x3f800000,2 +np.float32,0xc3121585,0xbf800000,2 +np.float32,0x43921585,0xb698cee8,2 +np.float32,0xc3921585,0x3698cee8,2 +np.float32,0x4293a7a5,0xbf800000,2 +np.float32,0xc293a7a5,0x3f800000,2 +np.float32,0x4313a7a5,0xb6e1deee,2 +np.float32,0xc313a7a5,0x36e1deee,2 +np.float32,0x4393a7a5,0x3761deee,2 +np.float32,0xc393a7a5,0xb761deee,2 +np.float32,0x429539c5,0xbf3504b1,2 +np.float32,0xc29539c5,0x3f3504b1,2 +np.float32,0x431539c5,0xbf800000,2 +np.float32,0xc31539c5,0x3f800000,2 +np.float32,0x439539c5,0xb7bbab34,2 +np.float32,0xc39539c5,0x37bbab34,2 +np.float32,0x4296cbe4,0x344cde2e,2 +np.float32,0xc296cbe4,0xb44cde2e,2 +np.float32,0x4316cbe4,0x34ccde2e,2 +np.float32,0xc316cbe4,0xb4ccde2e,2 +np.float32,0x4396cbe4,0x354cde2e,2 +np.float32,0xc396cbe4,0xb54cde2e,2 +np.float32,0x42985e04,0x3f350510,2 +np.float32,0xc2985e04,0xbf350510,2 +np.float32,0x43185e04,0x3f800000,2 +np.float32,0xc3185e04,0xbf800000,2 +np.float32,0x43985e04,0xb722455d,2 +np.float32,0xc3985e04,0x3722455d,2 +np.float32,0x4299f024,0x3f800000,2 +np.float32,0xc299f024,0xbf800000,2 +np.float32,0x4319f024,0xb71bde6c,2 +np.float32,0xc319f024,0x371bde6c,2 +np.float32,0x4399f024,0x379bde6c,2 +np.float32,0xc399f024,0xb79bde6c,2 +np.float32,0x429b8243,0x3f3504fc,2 +np.float32,0xc29b8243,0xbf3504fc,2 +np.float32,0x431b8243,0xbf800000,2 +np.float32,0xc31b8243,0x3f800000,2 +np.float32,0x439b8243,0x364b2eb8,2 +np.float32,0xc39b8243,0xb64b2eb8,2 +np.float32,0x435b2047,0xbf350525,2 +np.float32,0x42a038a2,0xbf800000,2 +np.float32,0x432038a2,0x3664ca7e,2 +np.float32,0x4345eb9b,0x365e638c,2 +np.float32,0x42c5eb9b,0xbf800000,2 +np.float32,0x42eb9e94,0xbf800000,2 +np.float32,0x4350ea79,0x3f800000,2 +np.float32,0x42dbe957,0x3585522a,2 +np.float32,0x425be957,0xbf800000,2 +np.float32,0x435be957,0xb605522a,2 +np.float32,0x476362a2,0xbd7ff911,2 +np.float32,0x464c99a4,0x3e7f4d41,2 +np.float32,0x4471f73d,0x3e7fe1b0,2 +np.float32,0x445a6752,0x3e7ef367,2 +np.float32,0x474fa400,0x3e7f9fcd,2 +np.float32,0x45c1e72f,0xbe7fc7af,2 +np.float32,0x4558c91d,0x3e7e9f31,2 +np.float32,0x43784f94,0xbdff6654,2 +np.float32,0x466e8500,0xbe7ea0a3,2 +np.float32,0x468e1c25,0x3e7e22fb,2 +np.float32,0x44ea6cfc,0x3dff70c3,2 +np.float32,0x4605126c,0x3e7f89ef,2 +np.float32,0x4788b3c6,0xbb87d853,2 +np.float32,0x4531b042,0x3dffd163,2 +np.float32,0x43f1f71d,0x3dfff387,2 +np.float32,0x462c3fa5,0xbd7fe13d,2 +np.float32,0x441c5354,0xbdff76b4,2 +np.float32,0x44908b69,0x3e7dcf0d,2 +np.float32,0x478813ad,0xbe7e9d80,2 +np.float32,0x441c4351,0x3dff937b,2 +np.float64,0x1,0x1,1 +np.float64,0x8000000000000001,0x8000000000000001,1 +np.float64,0x10000000000000,0x10000000000000,1 +np.float64,0x8010000000000000,0x8010000000000000,1 +np.float64,0x7fefffffffffffff,0x3f7452fc98b34e97,1 +np.float64,0xffefffffffffffff,0xbf7452fc98b34e97,1 +np.float64,0x7ff0000000000000,0xfff8000000000000,1 +np.float64,0xfff0000000000000,0xfff8000000000000,1 +np.float64,0x7ff8000000000000,0x7ff8000000000000,1 +np.float64,0x7ff4000000000000,0x7ffc000000000000,1 +np.float64,0xbfda51b226b4a364,0xbfd9956328ff876c,1 +np.float64,0xbfb4a65aee294cb8,0xbfb4a09fd744f8a5,1 +np.float64,0xbfd73b914fae7722,0xbfd6b9cce55af379,1 +np.float64,0xbfd90c12b4b21826,0xbfd869a3867b51c2,1 +np.float64,0x3fe649bb3d6c9376,0x3fe48778d9b48a21,1 +np.float64,0xbfd5944532ab288a,0xbfd52c30e1951b42,1 +np.float64,0x3fb150c45222a190,0x3fb14d633eb8275d,1 +np.float64,0x3fe4a6ffa9e94e00,0x3fe33f8a95c33299,1 +np.float64,0x3fe8d2157171a42a,0x3fe667d904ac95a6,1 +np.float64,0xbfa889f52c3113f0,0xbfa8878d90a23fa5,1 +np.float64,0x3feb3234bef6646a,0x3fe809d541d9017a,1 +np.float64,0x3fc6de266f2dbc50,0x3fc6bf0ee80a0d86,1 +np.float64,0x3fe8455368f08aa6,0x3fe6028254338ed5,1 +np.float64,0xbfe5576079eaaec1,0xbfe3cb4a8f6bc3f5,1 +np.float64,0xbfe9f822ff73f046,0xbfe7360d7d5cb887,1 +np.float64,0xbfb1960e7e232c20,0xbfb1928438258602,1 +np.float64,0xbfca75938d34eb28,0xbfca4570979bf2fa,1 +np.float64,0x3fd767dd15aecfbc,0x3fd6e33039018bab,1 +np.float64,0xbfe987750ef30eea,0xbfe6e7ed30ce77f0,1 +np.float64,0xbfe87f95a1f0ff2b,0xbfe62ca7e928bb2a,1 +np.float64,0xbfd2465301a48ca6,0xbfd2070245775d76,1 +np.float64,0xbfb1306ed22260e0,0xbfb12d2088eaa4f9,1 +np.float64,0xbfd8089010b01120,0xbfd778f9db77f2f3,1 +np.float64,0x3fbf9cf4ee3f39f0,0x3fbf88674fde1ca2,1 +np.float64,0x3fe6d8468a6db08e,0x3fe4f403f38b7bec,1 +np.float64,0xbfd9e5deefb3cbbe,0xbfd932692c722351,1 +np.float64,0x3fd1584d55a2b09c,0x3fd122253eeecc2e,1 +np.float64,0x3fe857979cf0af30,0x3fe60fc12b5ba8db,1 +np.float64,0x3fe3644149e6c882,0x3fe239f47013cfe6,1 +np.float64,0xbfe22ea62be45d4c,0xbfe13834c17d56fe,1 +np.float64,0xbfe8d93e1df1b27c,0xbfe66cf4ee467fd2,1 +np.float64,0xbfe9c497c9f38930,0xbfe7127417da4204,1 +np.float64,0x3fd6791cecacf238,0x3fd6039ccb5a7fde,1 +np.float64,0xbfc1dc1b1523b838,0xbfc1cd48edd9ae19,1 +np.float64,0xbfc92a8491325508,0xbfc901176e0158a5,1 +np.float64,0x3fa8649b3430c940,0x3fa8623e82d9504f,1 +np.float64,0x3fe0bed6a1617dae,0x3fdffbb307fb1abe,1 +np.float64,0x3febdf7765f7beee,0x3fe87ad01a89b74a,1 +np.float64,0xbfd3a56d46a74ada,0xbfd356cf41bf83cd,1 +np.float64,0x3fd321d824a643b0,0x3fd2d93846a224b3,1 +np.float64,0xbfc6a49fb52d4940,0xbfc686704906e7d3,1 +np.float64,0xbfdd4103c9ba8208,0xbfdc3ef0c03615b4,1 +np.float64,0xbfe0b78a51e16f14,0xbfdfef0d9ffc38b5,1 +np.float64,0xbfdac7a908b58f52,0xbfda0158956ceecf,1 +np.float64,0xbfbfbf12f23f7e28,0xbfbfaa428989258c,1 +np.float64,0xbfd55f5aa2aabeb6,0xbfd4fa39de65f33a,1 +np.float64,0x3fe06969abe0d2d4,0x3fdf6744fafdd9cf,1 +np.float64,0x3fe56ab8be6ad572,0x3fe3da7a1986d543,1 +np.float64,0xbfeefbbec67df77e,0xbfea5d426132f4aa,1 +np.float64,0x3fe6e1f49cedc3ea,0x3fe4fb53f3d8e3d5,1 +np.float64,0x3feceb231c79d646,0x3fe923d3efa55414,1 +np.float64,0xbfd03dd08ea07ba2,0xbfd011549aa1998a,1 +np.float64,0xbfd688327aad1064,0xbfd611c61b56adbe,1 +np.float64,0xbfde3249d8bc6494,0xbfdd16a7237a39d5,1 +np.float64,0x3febd4b65677a96c,0x3fe873e1a401ef03,1 +np.float64,0xbfe46bd2b368d7a6,0xbfe31023c2467749,1 +np.float64,0x3fbf9f5cde3f3ec0,0x3fbf8aca8ec53c45,1 +np.float64,0x3fc20374032406e8,0x3fc1f43f1f2f4d5e,1 +np.float64,0xbfec143b16f82876,0xbfe89caa42582381,1 +np.float64,0xbfd14fa635a29f4c,0xbfd119ced11da669,1 +np.float64,0x3fe25236d4e4a46e,0x3fe156242d644b7a,1 +np.float64,0xbfe4ed793469daf2,0xbfe377a88928fd77,1 +np.float64,0xbfb363572626c6b0,0xbfb35e98d8fe87ae,1 +np.float64,0xbfb389d5aa2713a8,0xbfb384fae55565a7,1 +np.float64,0x3fca6e001934dc00,0x3fca3e0661eaca84,1 +np.float64,0x3fe748f3f76e91e8,0x3fe548ab2168aea6,1 +np.float64,0x3fef150efdfe2a1e,0x3fea6b92d74f60d3,1 +np.float64,0xbfd14b52b1a296a6,0xbfd115a387c0fa93,1 +np.float64,0x3fe3286b5ce650d6,0x3fe208a6469a7527,1 +np.float64,0xbfd57b4f4baaf69e,0xbfd514a12a9f7ab0,1 +np.float64,0xbfef14bd467e297b,0xbfea6b64bbfd42ce,1 +np.float64,0xbfe280bc90650179,0xbfe17d2c49955dba,1 +np.float64,0x3fca8759d7350eb0,0x3fca56d5c17bbc14,1 +np.float64,0xbfdf988f30bf311e,0xbfde53f96f69b05f,1 +np.float64,0x3f6b6eeb4036de00,0x3f6b6ee7e3f86f9a,1 +np.float64,0xbfed560be8faac18,0xbfe9656c5cf973d8,1 +np.float64,0x3fc6102c592c2058,0x3fc5f43efad5396d,1 +np.float64,0xbfdef64ed2bdec9e,0xbfddc4b7fbd45aea,1 +np.float64,0x3fe814acd570295a,0x3fe5df183d543bfe,1 +np.float64,0x3fca21313f344260,0x3fc9f2d47f64fbe2,1 +np.float64,0xbfe89932cc713266,0xbfe63f186a2f60ce,1 +np.float64,0x3fe4ffcff169ffa0,0x3fe386336115ee21,1 +np.float64,0x3fee6964087cd2c8,0x3fea093d31e2c2c5,1 +np.float64,0xbfbeea604e3dd4c0,0xbfbed72734852669,1 +np.float64,0xbfea1954fb7432aa,0xbfe74cdad8720032,1 +np.float64,0x3fea3e1a5ef47c34,0x3fe765ffba65a11d,1 +np.float64,0x3fcedb850b3db708,0x3fce8f39d92f00ba,1 +np.float64,0x3fd3b52d41a76a5c,0x3fd365d22b0003f9,1 +np.float64,0xbfa4108a0c282110,0xbfa40f397fcd844f,1 +np.float64,0x3fd7454c57ae8a98,0x3fd6c2e5542c6c83,1 +np.float64,0xbfeecd3c7a7d9a79,0xbfea42ca943a1695,1 +np.float64,0xbfdddda397bbbb48,0xbfdccb27283d4c4c,1 +np.float64,0x3fe6b52cf76d6a5a,0x3fe4d96ff32925ff,1 +np.float64,0xbfa39a75ec2734f0,0xbfa3993c0da84f87,1 +np.float64,0x3fdd3fe6fdba7fcc,0x3fdc3df12fe9e525,1 +np.float64,0xbfb57a98162af530,0xbfb5742525d5fbe2,1 +np.float64,0xbfd3e166cfa7c2ce,0xbfd38ff2891be9b0,1 +np.float64,0x3fdb6a04f9b6d408,0x3fda955e5018e9dc,1 +np.float64,0x3fe4ab03a4e95608,0x3fe342bfa76e1aa8,1 +np.float64,0xbfe6c8480b6d9090,0xbfe4e7eaa935b3f5,1 +np.float64,0xbdd6b5a17bae,0xbdd6b5a17bae,1 +np.float64,0xd6591979acb23,0xd6591979acb23,1 +np.float64,0x5adbed90b5b7e,0x5adbed90b5b7e,1 +np.float64,0xa664c5314cc99,0xa664c5314cc99,1 +np.float64,0x1727fb162e500,0x1727fb162e500,1 +np.float64,0xdb49a93db6935,0xdb49a93db6935,1 +np.float64,0xb10c958d62193,0xb10c958d62193,1 +np.float64,0xad38276f5a705,0xad38276f5a705,1 +np.float64,0x1d5d0b983aba2,0x1d5d0b983aba2,1 +np.float64,0x915f48e122be9,0x915f48e122be9,1 +np.float64,0x475958ae8eb2c,0x475958ae8eb2c,1 +np.float64,0x3af8406675f09,0x3af8406675f09,1 +np.float64,0x655e88a4cabd2,0x655e88a4cabd2,1 +np.float64,0x40fee8ce81fde,0x40fee8ce81fde,1 +np.float64,0xab83103f57062,0xab83103f57062,1 +np.float64,0x7cf934b8f9f27,0x7cf934b8f9f27,1 +np.float64,0x29f7524853eeb,0x29f7524853eeb,1 +np.float64,0x4a5e954894bd3,0x4a5e954894bd3,1 +np.float64,0x24638f3a48c73,0x24638f3a48c73,1 +np.float64,0xa4f32fc749e66,0xa4f32fc749e66,1 +np.float64,0xf8e92df7f1d26,0xf8e92df7f1d26,1 +np.float64,0x292e9d50525d4,0x292e9d50525d4,1 +np.float64,0xe937e897d26fd,0xe937e897d26fd,1 +np.float64,0xd3bde1d5a77bc,0xd3bde1d5a77bc,1 +np.float64,0xa447ffd548900,0xa447ffd548900,1 +np.float64,0xa3b7b691476f7,0xa3b7b691476f7,1 +np.float64,0x490095c892013,0x490095c892013,1 +np.float64,0xfc853235f90a7,0xfc853235f90a7,1 +np.float64,0x5a8bc082b5179,0x5a8bc082b5179,1 +np.float64,0x1baca45a37595,0x1baca45a37595,1 +np.float64,0x2164120842c83,0x2164120842c83,1 +np.float64,0x66692bdeccd26,0x66692bdeccd26,1 +np.float64,0xf205bdd3e40b8,0xf205bdd3e40b8,1 +np.float64,0x7c3fff98f8801,0x7c3fff98f8801,1 +np.float64,0xccdf10e199bf,0xccdf10e199bf,1 +np.float64,0x92db8e8125b8,0x92db8e8125b8,1 +np.float64,0x5789a8d6af136,0x5789a8d6af136,1 +np.float64,0xbdda869d7bb51,0xbdda869d7bb51,1 +np.float64,0xb665e0596ccbc,0xb665e0596ccbc,1 +np.float64,0x74e6b46ee9cd7,0x74e6b46ee9cd7,1 +np.float64,0x4f39cf7c9e73b,0x4f39cf7c9e73b,1 +np.float64,0xfdbf3907fb7e7,0xfdbf3907fb7e7,1 +np.float64,0xafdef4d55fbdf,0xafdef4d55fbdf,1 +np.float64,0xb49858236930b,0xb49858236930b,1 +np.float64,0x3ebe21d47d7c5,0x3ebe21d47d7c5,1 +np.float64,0x5b620512b6c41,0x5b620512b6c41,1 +np.float64,0x31918cda63232,0x31918cda63232,1 +np.float64,0x68b5741ed16af,0x68b5741ed16af,1 +np.float64,0xa5c09a5b4b814,0xa5c09a5b4b814,1 +np.float64,0x55f51c14abea4,0x55f51c14abea4,1 +np.float64,0xda8a3e41b515,0xda8a3e41b515,1 +np.float64,0x9ea9c8513d539,0x9ea9c8513d539,1 +np.float64,0x7f23b964fe478,0x7f23b964fe478,1 +np.float64,0xf6e08c7bedc12,0xf6e08c7bedc12,1 +np.float64,0x7267aa24e4cf6,0x7267aa24e4cf6,1 +np.float64,0x236bb93a46d78,0x236bb93a46d78,1 +np.float64,0x9a98430b35309,0x9a98430b35309,1 +np.float64,0xbb683fef76d08,0xbb683fef76d08,1 +np.float64,0x1ff0eb6e3fe1e,0x1ff0eb6e3fe1e,1 +np.float64,0xf524038fea481,0xf524038fea481,1 +np.float64,0xd714e449ae29d,0xd714e449ae29d,1 +np.float64,0x4154fd7682aa0,0x4154fd7682aa0,1 +np.float64,0x5b8d2f6cb71a7,0x5b8d2f6cb71a7,1 +np.float64,0xc91aa21d92355,0xc91aa21d92355,1 +np.float64,0xbd94fd117b2a0,0xbd94fd117b2a0,1 +np.float64,0x685b207ad0b65,0x685b207ad0b65,1 +np.float64,0xd2485b05a490c,0xd2485b05a490c,1 +np.float64,0x151ea5e62a3d6,0x151ea5e62a3d6,1 +np.float64,0x2635a7164c6b6,0x2635a7164c6b6,1 +np.float64,0x88ae3b5d115c8,0x88ae3b5d115c8,1 +np.float64,0x8a055a55140ac,0x8a055a55140ac,1 +np.float64,0x756f7694eadef,0x756f7694eadef,1 +np.float64,0x866d74630cdaf,0x866d74630cdaf,1 +np.float64,0x39e44f2873c8b,0x39e44f2873c8b,1 +np.float64,0x2a07ceb6540fb,0x2a07ceb6540fb,1 +np.float64,0xc52b96398a573,0xc52b96398a573,1 +np.float64,0x9546543b2a8cb,0x9546543b2a8cb,1 +np.float64,0x5b995b90b732c,0x5b995b90b732c,1 +np.float64,0x2de10a565bc22,0x2de10a565bc22,1 +np.float64,0x3b06ee94760df,0x3b06ee94760df,1 +np.float64,0xb18e77a5631cf,0xb18e77a5631cf,1 +np.float64,0x3b89ae3a77137,0x3b89ae3a77137,1 +np.float64,0xd9b0b6e5b3617,0xd9b0b6e5b3617,1 +np.float64,0x30b2310861647,0x30b2310861647,1 +np.float64,0x326a3ab464d48,0x326a3ab464d48,1 +np.float64,0x4c18610a9830d,0x4c18610a9830d,1 +np.float64,0x541dea42a83be,0x541dea42a83be,1 +np.float64,0xcd027dbf9a050,0xcd027dbf9a050,1 +np.float64,0x780a0f80f015,0x780a0f80f015,1 +np.float64,0x740ed5b2e81db,0x740ed5b2e81db,1 +np.float64,0xc226814d844d0,0xc226814d844d0,1 +np.float64,0xde958541bd2b1,0xde958541bd2b1,1 +np.float64,0xb563d3296ac7b,0xb563d3296ac7b,1 +np.float64,0x1db3b0b83b677,0x1db3b0b83b677,1 +np.float64,0xa7b0275d4f605,0xa7b0275d4f605,1 +np.float64,0x72f8d038e5f1b,0x72f8d038e5f1b,1 +np.float64,0x860ed1350c1da,0x860ed1350c1da,1 +np.float64,0x79f88262f3f11,0x79f88262f3f11,1 +np.float64,0x8817761f102ef,0x8817761f102ef,1 +np.float64,0xac44784b5888f,0xac44784b5888f,1 +np.float64,0x800fd594241fab28,0x800fd594241fab28,1 +np.float64,0x800ede32f8ddbc66,0x800ede32f8ddbc66,1 +np.float64,0x800de4c1121bc982,0x800de4c1121bc982,1 +np.float64,0x80076ebcddcedd7a,0x80076ebcddcedd7a,1 +np.float64,0x800b3fee06567fdc,0x800b3fee06567fdc,1 +np.float64,0x800b444426b68889,0x800b444426b68889,1 +np.float64,0x800b1c037a563807,0x800b1c037a563807,1 +np.float64,0x8001eb88c2a3d712,0x8001eb88c2a3d712,1 +np.float64,0x80058aae6dab155e,0x80058aae6dab155e,1 +np.float64,0x80083df2d4f07be6,0x80083df2d4f07be6,1 +np.float64,0x800e3b19d97c7634,0x800e3b19d97c7634,1 +np.float64,0x800a71c6f374e38e,0x800a71c6f374e38e,1 +np.float64,0x80048557f1490ab1,0x80048557f1490ab1,1 +np.float64,0x8000a00e6b01401e,0x8000a00e6b01401e,1 +np.float64,0x800766a3e2cecd49,0x800766a3e2cecd49,1 +np.float64,0x80015eb44602bd69,0x80015eb44602bd69,1 +np.float64,0x800bde885a77bd11,0x800bde885a77bd11,1 +np.float64,0x800224c53ea4498b,0x800224c53ea4498b,1 +np.float64,0x80048e8c6a291d1a,0x80048e8c6a291d1a,1 +np.float64,0x800b667e4af6ccfd,0x800b667e4af6ccfd,1 +np.float64,0x800ae3d7e395c7b0,0x800ae3d7e395c7b0,1 +np.float64,0x80086c245550d849,0x80086c245550d849,1 +np.float64,0x800d7d25f6fafa4c,0x800d7d25f6fafa4c,1 +np.float64,0x800f8d9ab0ff1b35,0x800f8d9ab0ff1b35,1 +np.float64,0x800690e949cd21d3,0x800690e949cd21d3,1 +np.float64,0x8003022381060448,0x8003022381060448,1 +np.float64,0x80085e0dad70bc1c,0x80085e0dad70bc1c,1 +np.float64,0x800e2ffc369c5ff9,0x800e2ffc369c5ff9,1 +np.float64,0x800b629b5af6c537,0x800b629b5af6c537,1 +np.float64,0x800fdc964b7fb92d,0x800fdc964b7fb92d,1 +np.float64,0x80036bb4b1c6d76a,0x80036bb4b1c6d76a,1 +np.float64,0x800b382f7f16705f,0x800b382f7f16705f,1 +np.float64,0x800ebac9445d7593,0x800ebac9445d7593,1 +np.float64,0x80015075c3e2a0ec,0x80015075c3e2a0ec,1 +np.float64,0x8002a6ec5ce54dd9,0x8002a6ec5ce54dd9,1 +np.float64,0x8009fab74a93f56f,0x8009fab74a93f56f,1 +np.float64,0x800c94b9ea992974,0x800c94b9ea992974,1 +np.float64,0x800dc2efd75b85e0,0x800dc2efd75b85e0,1 +np.float64,0x800be6400d57cc80,0x800be6400d57cc80,1 +np.float64,0x80021f6858443ed1,0x80021f6858443ed1,1 +np.float64,0x800600e2ac4c01c6,0x800600e2ac4c01c6,1 +np.float64,0x800a2159e6b442b4,0x800a2159e6b442b4,1 +np.float64,0x800c912f4bb9225f,0x800c912f4bb9225f,1 +np.float64,0x800a863a9db50c76,0x800a863a9db50c76,1 +np.float64,0x800ac16851d582d1,0x800ac16851d582d1,1 +np.float64,0x8003f7d32e87efa7,0x8003f7d32e87efa7,1 +np.float64,0x800be4eee3d7c9de,0x800be4eee3d7c9de,1 +np.float64,0x80069ff0ac4d3fe2,0x80069ff0ac4d3fe2,1 +np.float64,0x80061c986d4c3932,0x80061c986d4c3932,1 +np.float64,0x8000737b4de0e6f7,0x8000737b4de0e6f7,1 +np.float64,0x8002066ef7440cdf,0x8002066ef7440cdf,1 +np.float64,0x8001007050c200e1,0x8001007050c200e1,1 +np.float64,0x8008df9fa351bf40,0x8008df9fa351bf40,1 +np.float64,0x800f8394ee5f072a,0x800f8394ee5f072a,1 +np.float64,0x80008e0b01c11c17,0x80008e0b01c11c17,1 +np.float64,0x800f7088ed3ee112,0x800f7088ed3ee112,1 +np.float64,0x800285b86f650b72,0x800285b86f650b72,1 +np.float64,0x8008ec18af51d832,0x8008ec18af51d832,1 +np.float64,0x800da08523bb410a,0x800da08523bb410a,1 +np.float64,0x800de853ca7bd0a8,0x800de853ca7bd0a8,1 +np.float64,0x8008c8aefad1915e,0x8008c8aefad1915e,1 +np.float64,0x80010c39d5821874,0x80010c39d5821874,1 +np.float64,0x8009208349724107,0x8009208349724107,1 +np.float64,0x800783783f0f06f1,0x800783783f0f06f1,1 +np.float64,0x80025caf9984b960,0x80025caf9984b960,1 +np.float64,0x800bc76fa6778ee0,0x800bc76fa6778ee0,1 +np.float64,0x80017e2f89a2fc60,0x80017e2f89a2fc60,1 +np.float64,0x800ef169843de2d3,0x800ef169843de2d3,1 +np.float64,0x80098a5f7db314bf,0x80098a5f7db314bf,1 +np.float64,0x800d646f971ac8df,0x800d646f971ac8df,1 +np.float64,0x800110d1dc6221a4,0x800110d1dc6221a4,1 +np.float64,0x800f8b422a1f1684,0x800f8b422a1f1684,1 +np.float64,0x800785c97dcf0b94,0x800785c97dcf0b94,1 +np.float64,0x800da201283b4403,0x800da201283b4403,1 +np.float64,0x800a117cc7b422fa,0x800a117cc7b422fa,1 +np.float64,0x80024731cfa48e64,0x80024731cfa48e64,1 +np.float64,0x800199d456c333a9,0x800199d456c333a9,1 +np.float64,0x8005f66bab8becd8,0x8005f66bab8becd8,1 +np.float64,0x8008e7227c11ce45,0x8008e7227c11ce45,1 +np.float64,0x8007b66cc42f6cda,0x8007b66cc42f6cda,1 +np.float64,0x800669e6f98cd3cf,0x800669e6f98cd3cf,1 +np.float64,0x800aed917375db23,0x800aed917375db23,1 +np.float64,0x8008b6dd15116dbb,0x8008b6dd15116dbb,1 +np.float64,0x800f49869cfe930d,0x800f49869cfe930d,1 +np.float64,0x800a712661b4e24d,0x800a712661b4e24d,1 +np.float64,0x800944e816f289d1,0x800944e816f289d1,1 +np.float64,0x800eba0f8a1d741f,0x800eba0f8a1d741f,1 +np.float64,0x800cf6ded139edbe,0x800cf6ded139edbe,1 +np.float64,0x80023100c6246202,0x80023100c6246202,1 +np.float64,0x800c5a94add8b52a,0x800c5a94add8b52a,1 +np.float64,0x800adf329b95be66,0x800adf329b95be66,1 +np.float64,0x800af9afc115f360,0x800af9afc115f360,1 +np.float64,0x800d66ce837acd9d,0x800d66ce837acd9d,1 +np.float64,0x8003ffb5e507ff6d,0x8003ffb5e507ff6d,1 +np.float64,0x80027d280024fa51,0x80027d280024fa51,1 +np.float64,0x800fc37e1d1f86fc,0x800fc37e1d1f86fc,1 +np.float64,0x800fc7258b9f8e4b,0x800fc7258b9f8e4b,1 +np.float64,0x8003fb5789e7f6b0,0x8003fb5789e7f6b0,1 +np.float64,0x800eb4e7a13d69cf,0x800eb4e7a13d69cf,1 +np.float64,0x800951850952a30a,0x800951850952a30a,1 +np.float64,0x3fed4071be3a80e3,0x3fe95842074431df,1 +np.float64,0x3f8d2341203a4682,0x3f8d2300b453bd9f,1 +np.float64,0x3fdc8ce332b919c6,0x3fdb9cdf1440c28f,1 +np.float64,0x3fdc69bd84b8d37b,0x3fdb7d25c8166b7b,1 +np.float64,0x3fc4c22ad0298456,0x3fc4aae73e231b4f,1 +np.float64,0x3fea237809f446f0,0x3fe753cc6ca96193,1 +np.float64,0x3fd34cf6462699ed,0x3fd30268909bb47e,1 +np.float64,0x3fafce20643f9c41,0x3fafc8e41a240e35,1 +np.float64,0x3fdc6d416538da83,0x3fdb805262292863,1 +np.float64,0x3fe7d8362aefb06c,0x3fe5b2ce659db7fd,1 +np.float64,0x3fe290087de52011,0x3fe189f9a3eb123d,1 +np.float64,0x3fa62d2bf82c5a58,0x3fa62b65958ca2b8,1 +np.float64,0x3fafd134403fa269,0x3fafcbf670f8a6f3,1 +np.float64,0x3fa224e53c2449ca,0x3fa223ec5de1631b,1 +np.float64,0x3fb67e2c2c2cfc58,0x3fb676c445fb70a0,1 +np.float64,0x3fda358d01346b1a,0x3fd97b9441666eb2,1 +np.float64,0x3fdd30fc4bba61f9,0x3fdc308da423778d,1 +np.float64,0x3fc56e99c52add34,0x3fc5550004492621,1 +np.float64,0x3fe32d08de265a12,0x3fe20c761a73cec2,1 +np.float64,0x3fd46cf932a8d9f2,0x3fd414a7f3db03df,1 +np.float64,0x3fd94cfa2b3299f4,0x3fd8a5961b3e4bdd,1 +np.float64,0x3fed6ea3a6fadd47,0x3fe9745b2f6c9204,1 +np.float64,0x3fe4431d1768863a,0x3fe2ef61d0481de0,1 +np.float64,0x3fe1d8e00ea3b1c0,0x3fe0efab5050ee78,1 +np.float64,0x3fe56f37dcaade70,0x3fe3de00b0f392e0,1 +np.float64,0x3fde919a2dbd2334,0x3fdd6b6d2dcf2396,1 +np.float64,0x3fe251e3d4a4a3c8,0x3fe155de69605d60,1 +np.float64,0x3fe5e0ecc5abc1da,0x3fe436a5de5516cf,1 +np.float64,0x3fcd48780c3a90f0,0x3fcd073fa907ba9b,1 +np.float64,0x3fe4e8149229d029,0x3fe37360801d5b66,1 +np.float64,0x3fb9ef159633de2b,0x3fb9e3bc05a15d1d,1 +np.float64,0x3fc24a3f0424947e,0x3fc23a5432ca0e7c,1 +np.float64,0x3fe55ca196aab943,0x3fe3cf6b3143435a,1 +np.float64,0x3fe184544c2308a9,0x3fe0a7b49fa80aec,1 +np.float64,0x3fe2c76e83658edd,0x3fe1b8355c1ea771,1 +np.float64,0x3fea8d2c4ab51a59,0x3fe79ba85aabc099,1 +np.float64,0x3fd74f98abae9f31,0x3fd6cc85005d0593,1 +np.float64,0x3fec6de9a678dbd3,0x3fe8d59a1d23cdd1,1 +np.float64,0x3fec8a0e50f9141d,0x3fe8e7500f6f6a00,1 +np.float64,0x3fe9de6d08b3bcda,0x3fe7245319508767,1 +np.float64,0x3fe4461fd1688c40,0x3fe2f1cf0b93aba6,1 +np.float64,0x3fde342d9d3c685b,0x3fdd185609d5719d,1 +np.float64,0x3feb413fc8368280,0x3fe813c091d2519a,1 +np.float64,0x3fe64333156c8666,0x3fe48275b9a6a358,1 +np.float64,0x3fe03c65226078ca,0x3fdf18b26786be35,1 +np.float64,0x3fee11054dbc220b,0x3fe9d579a1cfa7ad,1 +np.float64,0x3fbaefccae35df99,0x3fbae314fef7c7ea,1 +np.float64,0x3feed4e3487da9c7,0x3fea4729241c8811,1 +np.float64,0x3fbb655df836cabc,0x3fbb57fcf9a097be,1 +np.float64,0x3fe68b0273ed1605,0x3fe4b96109afdf76,1 +np.float64,0x3fd216bfc3242d80,0x3fd1d957363f6a43,1 +np.float64,0x3fe01328d4a02652,0x3fded083bbf94aba,1 +np.float64,0x3fe3f9a61ae7f34c,0x3fe2b3f701b79028,1 +np.float64,0x3fed4e7cf8fa9cfa,0x3fe960d27084fb40,1 +np.float64,0x3faec08e343d811c,0x3faebbd2aa07ac1f,1 +np.float64,0x3fd2d1bbeea5a378,0x3fd28c9aefcf48ad,1 +np.float64,0x3fd92e941fb25d28,0x3fd889857f88410d,1 +np.float64,0x3fe43decb7e87bd9,0x3fe2eb32b4ee4667,1 +np.float64,0x3fef49cabcfe9395,0x3fea892f9a233f76,1 +np.float64,0x3fe3e96812e7d2d0,0x3fe2a6c6b45dd6ee,1 +np.float64,0x3fd24c0293a49805,0x3fd20c76d54473cb,1 +np.float64,0x3fb43d6b7e287ad7,0x3fb438060772795a,1 +np.float64,0x3fe87bf7d3f0f7f0,0x3fe62a0c47411c62,1 +np.float64,0x3fee82a2e07d0546,0x3fea17e27e752b7b,1 +np.float64,0x3fe40c01bbe81803,0x3fe2c2d9483f44d8,1 +np.float64,0x3fd686ccae2d0d99,0x3fd610763fb61097,1 +np.float64,0x3fe90fcf2af21f9e,0x3fe693c12df59ba9,1 +np.float64,0x3fefb3ce11ff679c,0x3feac3dd4787529d,1 +np.float64,0x3fcec53ff63d8a80,0x3fce79992af00c58,1 +np.float64,0x3fe599dd7bab33bb,0x3fe3ff5da7575d85,1 +np.float64,0x3fe9923b1a732476,0x3fe6ef71d13db456,1 +np.float64,0x3febf76fcef7eee0,0x3fe88a3952e11373,1 +np.float64,0x3fc2cfd128259fa2,0x3fc2be7fd47fd811,1 +np.float64,0x3fe4d37ae269a6f6,0x3fe36300d45e3745,1 +np.float64,0x3fe23aa2e4247546,0x3fe1424e172f756f,1 +np.float64,0x3fe4f0596ca9e0b3,0x3fe379f0c49de7ef,1 +np.float64,0x3fe2e4802fe5c900,0x3fe1d062a8812601,1 +np.float64,0x3fe5989c79eb3139,0x3fe3fe6308552dec,1 +np.float64,0x3fe3c53cb4e78a79,0x3fe28956e573aca4,1 +np.float64,0x3fe6512beeeca258,0x3fe48d2d5ece979f,1 +np.float64,0x3fd8473ddb308e7c,0x3fd7b33e38adc6ad,1 +np.float64,0x3fecd09c9679a139,0x3fe91361fa0c5bcb,1 +np.float64,0x3fc991530e3322a6,0x3fc965e2c514a9e9,1 +np.float64,0x3f6d4508403a8a11,0x3f6d45042b68acc5,1 +np.float64,0x3fea1f198f743e33,0x3fe750ce918d9330,1 +np.float64,0x3fd0a0bb4da14177,0x3fd07100f9c71e1c,1 +np.float64,0x3fd30c45ffa6188c,0x3fd2c499f9961f66,1 +np.float64,0x3fcad98e7c35b31d,0x3fcaa74293cbc52e,1 +np.float64,0x3fec8e4a5eb91c95,0x3fe8e9f898d118db,1 +np.float64,0x3fd19fdb79233fb7,0x3fd1670c00febd24,1 +np.float64,0x3fea9fcbb1f53f97,0x3fe7a836b29c4075,1 +np.float64,0x3fc6d12ea12da25d,0x3fc6b24bd2f89f59,1 +np.float64,0x3fd6af3658ad5e6d,0x3fd636613e08df3f,1 +np.float64,0x3fe31bc385a63787,0x3fe1fe3081621213,1 +np.float64,0x3fc0dbba2221b774,0x3fc0cf42c9313dba,1 +np.float64,0x3fef639ce87ec73a,0x3fea9795454f1036,1 +np.float64,0x3fee5f29dcbcbe54,0x3fea0349b288f355,1 +np.float64,0x3fed46bdb37a8d7b,0x3fe95c199f5aa569,1 +np.float64,0x3fef176afa3e2ed6,0x3fea6ce78b2aa3aa,1 +np.float64,0x3fc841e7683083cf,0x3fc81cccb84848cc,1 +np.float64,0xbfda3ec9a2347d94,0xbfd9840d180e9de3,1 +np.float64,0xbfcd5967ae3ab2d0,0xbfcd17be13142bb9,1 +np.float64,0xbfedf816573bf02d,0xbfe9c6bb06476c60,1 +np.float64,0xbfd0d6e10e21adc2,0xbfd0a54f99d2f3dc,1 +np.float64,0xbfe282df096505be,0xbfe17ef5e2e80760,1 +np.float64,0xbfd77ae6e62ef5ce,0xbfd6f4f6b603ad8a,1 +np.float64,0xbfe37b171aa6f62e,0xbfe24cb4b2d0ade4,1 +np.float64,0xbfef9e5ed9bf3cbe,0xbfeab817b41000bd,1 +np.float64,0xbfe624d6f96c49ae,0xbfe46b1e9c9aff86,1 +np.float64,0xbfefb5da65ff6bb5,0xbfeac4fc9c982772,1 +np.float64,0xbfd29a65d52534cc,0xbfd2579df8ff87b9,1 +np.float64,0xbfd40270172804e0,0xbfd3af6471104aef,1 +np.float64,0xbfb729ee7a2e53e0,0xbfb721d7dbd2705e,1 +np.float64,0xbfb746f1382e8de0,0xbfb73ebc1207f8e3,1 +np.float64,0xbfd3c7e606a78fcc,0xbfd377a8aa1b0dd9,1 +np.float64,0xbfd18c4880231892,0xbfd1543506584ad5,1 +np.float64,0xbfea988080753101,0xbfe7a34cba0d0fa1,1 +np.float64,0xbf877400e02ee800,0xbf8773df47fa7e35,1 +np.float64,0xbfb07e050820fc08,0xbfb07b198d4a52c9,1 +np.float64,0xbfee0a3621fc146c,0xbfe9d1745a05ba77,1 +np.float64,0xbfe78de246ef1bc4,0xbfe57bf2baab91c8,1 +np.float64,0xbfcdbfd3bd3b7fa8,0xbfcd7b728a955a06,1 +np.float64,0xbfe855ea79b0abd5,0xbfe60e8a4a17b921,1 +np.float64,0xbfd86c8e3530d91c,0xbfd7d5e36c918dc1,1 +np.float64,0xbfe4543169e8a863,0xbfe2fd23d42f552e,1 +np.float64,0xbfe41efbf1283df8,0xbfe2d235a2faed1a,1 +np.float64,0xbfd9a55464b34aa8,0xbfd8f7083f7281e5,1 +np.float64,0xbfe5f5078d6bea0f,0xbfe44637d910c270,1 +np.float64,0xbfe6d83e3dedb07c,0xbfe4f3fdadd10552,1 +np.float64,0xbfdb767e70b6ecfc,0xbfdaa0b6c17f3fb1,1 +np.float64,0xbfdfc91b663f9236,0xbfde7eb0dfbeaa26,1 +np.float64,0xbfbfbd18783f7a30,0xbfbfa84bf2fa1c8d,1 +np.float64,0xbfe51199242a2332,0xbfe39447dbe066ae,1 +np.float64,0xbfdbb94814b77290,0xbfdadd63bd796972,1 +np.float64,0xbfd8c6272cb18c4e,0xbfd828f2d9e8607e,1 +np.float64,0xbfce51e0b63ca3c0,0xbfce097ee908083a,1 +np.float64,0xbfe99a177d73342f,0xbfe6f4ec776a57ae,1 +np.float64,0xbfefde2ab0ffbc55,0xbfeadafdcbf54733,1 +np.float64,0xbfcccb5c1c3996b8,0xbfcc8d586a73d126,1 +np.float64,0xbfdf7ddcedbefbba,0xbfde3c749a906de7,1 +np.float64,0xbfef940516ff280a,0xbfeab26429e89f4b,1 +np.float64,0xbfe08009f1e10014,0xbfdf8eab352997eb,1 +np.float64,0xbfe9c02682b3804d,0xbfe70f5fd05f79ee,1 +np.float64,0xbfb3ca1732279430,0xbfb3c50bec5b453a,1 +np.float64,0xbfe368e81926d1d0,0xbfe23dc704d0887c,1 +np.float64,0xbfbd20cc2e3a4198,0xbfbd10b7e6d81c6c,1 +np.float64,0xbfd67ece4d2cfd9c,0xbfd608f527dcc5e7,1 +np.float64,0xbfdc02d1333805a2,0xbfdb20104454b79f,1 +np.float64,0xbfc007a626200f4c,0xbfbff9dc9dc70193,1 +np.float64,0xbfda9e4f8fb53ca0,0xbfd9db8af35dc630,1 +np.float64,0xbfd8173d77302e7a,0xbfd786a0cf3e2914,1 +np.float64,0xbfeb8fcbd0b71f98,0xbfe84734debc10fb,1 +np.float64,0xbfe4bf1cb7697e3a,0xbfe352c891113f29,1 +np.float64,0xbfc18624d5230c48,0xbfc178248e863b64,1 +np.float64,0xbfcf184bac3e3098,0xbfceca3b19be1ebe,1 +np.float64,0xbfd2269c42a44d38,0xbfd1e8920d72b694,1 +np.float64,0xbfe8808526b1010a,0xbfe62d5497292495,1 +np.float64,0xbfe498bd1da9317a,0xbfe334245eadea93,1 +np.float64,0xbfef0855aebe10ab,0xbfea6462f29aeaf9,1 +np.float64,0xbfdeb186c93d630e,0xbfdd87c37943c602,1 +np.float64,0xbfb29fe2ae253fc8,0xbfb29bae3c87efe4,1 +np.float64,0xbfddd9c6c3bbb38e,0xbfdcc7b400bf384b,1 +np.float64,0xbfe3506673e6a0cd,0xbfe2299f26295553,1 +np.float64,0xbfe765957a2ecb2b,0xbfe55e03cf22edab,1 +np.float64,0xbfecc9876c79930f,0xbfe90efaf15b6207,1 +np.float64,0xbfefb37a0a7f66f4,0xbfeac3af3898e7c2,1 +np.float64,0xbfeefa0da7bdf41b,0xbfea5c4cde53c1c3,1 +np.float64,0xbfe6639ee9ecc73e,0xbfe49b4e28a72482,1 +np.float64,0xbfef91a4bb7f2349,0xbfeab114ac9e25dd,1 +np.float64,0xbfc8b392bb316724,0xbfc88c657f4441a3,1 +np.float64,0xbfc88a358231146c,0xbfc863cb900970fe,1 +np.float64,0xbfef25a9d23e4b54,0xbfea74eda432aabe,1 +np.float64,0xbfe6aceea0ed59de,0xbfe4d32e54a3fd01,1 +np.float64,0xbfefe2b3e37fc568,0xbfeadd74f4605835,1 +np.float64,0xbfa9eecb8833dd90,0xbfa9ebf4f4cb2591,1 +np.float64,0xbfd42bad7428575a,0xbfd3d69de8e52d0a,1 +np.float64,0xbfbc366b4a386cd8,0xbfbc27ceee8f3019,1 +np.float64,0xbfd9bca7be337950,0xbfd90c80e6204e57,1 +np.float64,0xbfe8173f53f02e7f,0xbfe5e0f8d8ed329c,1 +np.float64,0xbfce22dbcb3c45b8,0xbfcddbc8159b63af,1 +np.float64,0xbfea2d7ba7345af7,0xbfe75aa62ad5b80a,1 +np.float64,0xbfc08b783e2116f0,0xbfc07faf8d501558,1 +np.float64,0xbfb8c4161c318830,0xbfb8ba33950748ec,1 +np.float64,0xbfddd930bcbbb262,0xbfdcc72dffdf51bb,1 +np.float64,0xbfd108ce8a22119e,0xbfd0d5801e7698bd,1 +np.float64,0xbfd5bd2b5dab7a56,0xbfd552c52c468c76,1 +np.float64,0xbfe7ffe67fefffcd,0xbfe5cfe96e35e6e5,1 +np.float64,0xbfa04ec6bc209d90,0xbfa04e120a2c25cc,1 +np.float64,0xbfef7752cc7eeea6,0xbfeaa28715addc4f,1 +np.float64,0xbfe7083c2eae1078,0xbfe5182bf8ddfc8e,1 +np.float64,0xbfe05dafd0a0bb60,0xbfdf52d397cfe5f6,1 +np.float64,0xbfacb4f2243969e0,0xbfacb118991ea235,1 +np.float64,0xbfc7d47e422fa8fc,0xbfc7b1504714a4fd,1 +np.float64,0xbfbd70b2243ae168,0xbfbd60182efb61de,1 +np.float64,0xbfe930e49cb261c9,0xbfe6ab272b3f9cfc,1 +np.float64,0xbfb5f537e62bea70,0xbfb5ee540dcdc635,1 +np.float64,0xbfbb0c8278361908,0xbfbaffa1f7642a87,1 +np.float64,0xbfe82af2447055e4,0xbfe5ef54ca8db9e8,1 +np.float64,0xbfe92245e6f2448c,0xbfe6a0d32168040b,1 +np.float64,0xbfb799a8522f3350,0xbfb7911a7ada3640,1 +np.float64,0x7faa8290c8350521,0x3fe5916f67209cd6,1 +np.float64,0x7f976597082ecb2d,0x3fcf94dce396bd37,1 +np.float64,0x7fede721237bce41,0x3fe3e7b1575b005f,1 +np.float64,0x7fd5f674d72bece9,0x3fe3210628eba199,1 +np.float64,0x7f9b0f1aa0361e34,0x3feffd34d15d1da7,1 +np.float64,0x7fec48346ab89068,0x3fe93dd84253d9a2,1 +np.float64,0x7f9cac76283958eb,0xbfec4cd999653868,1 +np.float64,0x7fed51ab6bbaa356,0x3fecc27fb5f37bca,1 +np.float64,0x7fded3c116bda781,0xbfda473efee47cf1,1 +np.float64,0x7fd19c48baa33890,0xbfe25700cbfc0326,1 +np.float64,0x7fe5c8f478ab91e8,0xbfee4ab6d84806be,1 +np.float64,0x7fe53c64e46a78c9,0x3fee19c3f227f4e1,1 +np.float64,0x7fc2ad1936255a31,0xbfe56db9b877f807,1 +np.float64,0x7fe2b071b52560e2,0xbfce3990a8d390a9,1 +np.float64,0x7fc93f3217327e63,0xbfd1f6d7ef838d2b,1 +np.float64,0x7fec26df08784dbd,0x3fd5397be41c93d9,1 +np.float64,0x7fcf4770183e8edf,0x3fe6354f5a785016,1 +np.float64,0x7fdc9fcc0bb93f97,0xbfeeeae952e8267d,1 +np.float64,0x7feb21f29c7643e4,0x3fec20122e33f1bf,1 +np.float64,0x7fd0b51273216a24,0x3fefb09f8daba00b,1 +np.float64,0x7fe747a9d76e8f53,0x3feb46a3232842a4,1 +np.float64,0x7fd58885972b110a,0xbfce5ea57c186221,1 +np.float64,0x7fca3ce85c3479d0,0x3fef93a24548e8ca,1 +np.float64,0x7fe1528a46a2a514,0xbfb54bb578d9da91,1 +np.float64,0x7fcc58b21b38b163,0x3feffb5b741ffc2d,1 +np.float64,0x7fdabcaaf5357955,0x3fecbf855db524d1,1 +np.float64,0x7fdd27c6933a4f8c,0xbfef2f41bb80144b,1 +np.float64,0x7fbda4e1be3b49c2,0x3fdb9b33f84f5381,1 +np.float64,0x7fe53363362a66c5,0x3fe4daff3a6a4ed0,1 +np.float64,0x7fe5719d62eae33a,0xbfef761d98f625d5,1 +np.float64,0x7f982ce5a83059ca,0x3fd0b27c3365f0a8,1 +np.float64,0x7fe6db8c42edb718,0x3fe786f4b1fe11a6,1 +np.float64,0x7fe62cca1b2c5993,0x3fd425b6c4c9714a,1 +np.float64,0x7feea88850bd5110,0xbfd7bbb432017175,1 +np.float64,0x7fad6c6ae43ad8d5,0x3fe82e49098bc6de,1 +np.float64,0x7fe70542f02e0a85,0x3fec3017960b4822,1 +np.float64,0x7feaf0bcbb35e178,0xbfc3aac74dd322d5,1 +np.float64,0x7fb5e152fe2bc2a5,0x3fd4b27a4720614c,1 +np.float64,0x7fe456ee5be8addc,0xbfe9e15ab5cff229,1 +np.float64,0x7fd4b53a8d296a74,0xbfefff450f503326,1 +np.float64,0x7fd7149d7a2e293a,0x3fef4ef0a9009096,1 +np.float64,0x7fd43fc5a8a87f8a,0x3fe0c929fee9dce7,1 +np.float64,0x7fef97022aff2e03,0x3fd4ea52a813da20,1 +np.float64,0x7fe035950ae06b29,0x3fef4e125394fb05,1 +np.float64,0x7fecd0548979a0a8,0x3fe89d226244037b,1 +np.float64,0x7fc79b3ac22f3675,0xbfee9c9cf78c8270,1 +np.float64,0x7fd8b8e8263171cf,0x3fe8e24437961db0,1 +np.float64,0x7fc288c23e251183,0xbfbaf8eca50986ca,1 +np.float64,0x7fe436b4b6686d68,0xbfecd661741931c4,1 +np.float64,0x7fcdf99abe3bf334,0x3feaa75c90830b92,1 +np.float64,0x7fd9f9739233f2e6,0xbfebbfcb301b0da5,1 +np.float64,0x7fd6fcbd1b2df979,0xbfccf2c77cb65f56,1 +np.float64,0x7fe242a97b248552,0xbfe5b0f13bcbabc8,1 +np.float64,0x7fe38bf3e06717e7,0x3fbc8fa9004d2668,1 +np.float64,0x7fecd0e8d479a1d1,0xbfe886a6b4f73a4a,1 +np.float64,0x7fe958d60232b1ab,0xbfeb7c4cf0cee2dd,1 +np.float64,0x7f9d492b583a9256,0xbfebe975d00221cb,1 +np.float64,0x7fd6c9983bad932f,0xbfefe817621a31f6,1 +np.float64,0x7fed0d7239fa1ae3,0x3feac7e1b6455b4b,1 +np.float64,0x7fe61dac90ec3b58,0x3fef845b9efe8421,1 +np.float64,0x7f9acd3010359a5f,0xbfe460d376200130,1 +np.float64,0x7fedced9673b9db2,0xbfeeaf23445e1944,1 +np.float64,0x7fd9f271a733e4e2,0xbfd41544535ecb78,1 +np.float64,0x7fe703339bee0666,0x3fef93334626b56c,1 +np.float64,0x7fec7761b7b8eec2,0xbfe6da9179e8e714,1 +np.float64,0x7fdd9fff043b3ffd,0xbfc0761dfb8d94f9,1 +np.float64,0x7fdc10ed17b821d9,0x3fe1481e2a26c77f,1 +np.float64,0x7fe7681e72aed03c,0x3fefff94a6d47c84,1 +np.float64,0x7fe18c29e1e31853,0x3fe86ebd2fd89456,1 +np.float64,0x7fb2fb273c25f64d,0xbfefc136f57e06de,1 +np.float64,0x7fac2bbb90385776,0x3fe25d8e3cdae7e3,1 +np.float64,0x7fed16789efa2cf0,0x3fe94555091fdfd9,1 +np.float64,0x7fd8fe8f7831fd1e,0xbfed58d520361902,1 +np.float64,0x7fa59bde3c2b37bb,0x3fef585391c077ff,1 +np.float64,0x7fda981b53353036,0x3fde02ca08737b5f,1 +np.float64,0x7fd29f388aa53e70,0xbfe04f5499246df2,1 +np.float64,0x7fcd0232513a0464,0xbfd9737f2f565829,1 +np.float64,0x7fe9a881bcf35102,0xbfe079cf285b35dd,1 +np.float64,0x7fdbe399a9b7c732,0x3fe965bc4220f340,1 +np.float64,0x7feb77414af6ee82,0xbfb7df2fcd491f55,1 +np.float64,0x7fa26e86c424dd0d,0xbfea474c3d65b9be,1 +np.float64,0x7feaee869e35dd0c,0xbfd7b333a888cd14,1 +np.float64,0x7fcbd67f6137acfe,0xbfe15a7a15dfcee6,1 +np.float64,0x7fe36991e766d323,0xbfeb288077c4ed9f,1 +np.float64,0x7fdcf4f4fcb9e9e9,0xbfea331ef7a75e7b,1 +np.float64,0x7fbe3445643c688a,0x3fedf21b94ae8e37,1 +np.float64,0x7fd984cfd2b3099f,0x3fc0d3ade71c395e,1 +np.float64,0x7fdec987b23d930e,0x3fe4af5e48f6c26e,1 +np.float64,0x7fde56a9953cad52,0x3fc8e7762cefb8b0,1 +np.float64,0x7fd39fb446273f68,0xbfe6c3443208f44d,1 +np.float64,0x7fc609c1a72c1382,0x3fe884e639571baa,1 +np.float64,0x7fe001be4b20037c,0xbfed0d90cbcb6010,1 +np.float64,0x7fce7ace283cf59b,0xbfd0303792e51f49,1 +np.float64,0x7fe27ba93da4f751,0x3fe548b5ce740d71,1 +np.float64,0x7fcc13c79b38278e,0xbfe2e14f5b64a1e9,1 +np.float64,0x7fc058550620b0a9,0x3fe44bb55ebd0590,1 +np.float64,0x7fa4ba8bf8297517,0x3fee59b39f9d08c4,1 +np.float64,0x7fe50d6872ea1ad0,0xbfea1eaa2d059e13,1 +np.float64,0x7feb7e33b476fc66,0xbfeff28a4424dd3e,1 +np.float64,0x7fe2d7d2a165afa4,0xbfdbaff0ba1ea460,1 +np.float64,0xffd126654b224cca,0xbfef0cd3031fb97c,1 +np.float64,0xffb5f884942bf108,0x3fe0de589bea2e4c,1 +np.float64,0xffe011b4bfe02369,0xbfe805a0edf1e1f2,1 +np.float64,0xffec13eae9b827d5,0x3fb5f30347d78447,1 +np.float64,0xffa6552ae82caa50,0x3fb1ecee60135f2f,1 +np.float64,0xffb62d38b02c5a70,0x3fbd35903148fd12,1 +np.float64,0xffe2c44ea425889d,0xbfd7616547f99a7d,1 +np.float64,0xffea24c61a74498c,0x3fef4a1b15ae9005,1 +np.float64,0xffd23a4ab2a47496,0x3fe933bfaa569ae9,1 +np.float64,0xffc34a073d269410,0xbfeec0f510bb7474,1 +np.float64,0xffeead84cfbd5b09,0x3feb2d635e5a78bd,1 +np.float64,0xffcfd8f3b43fb1e8,0xbfdd59625801771b,1 +np.float64,0xffd3c7f662a78fec,0x3f9cf3209edfbc4e,1 +np.float64,0xffe7b7e4f72f6fca,0xbfefdcff4925632c,1 +np.float64,0xffe48cab05e91956,0x3fe6b41217948423,1 +np.float64,0xffeb6980b336d301,0xbfca5de148f69324,1 +np.float64,0xffe3f15c4aa7e2b8,0xbfeb18efae892081,1 +np.float64,0xffcf290c713e5218,0x3fefe6f1a513ed26,1 +np.float64,0xffd80979b43012f4,0xbfde6c8df91af976,1 +np.float64,0xffc3181e0026303c,0x3fe7448f681def38,1 +np.float64,0xffedfa68f97bf4d1,0xbfeca6efb802d109,1 +np.float64,0xffca0931c0341264,0x3fe31b9f073b08cd,1 +np.float64,0xffe4c44934e98892,0x3feda393a2e8a0f7,1 +np.float64,0xffe65bb56f2cb76a,0xbfeffaf638a4b73e,1 +np.float64,0xffe406a332a80d46,0x3fe8151dadb853c1,1 +np.float64,0xffdb7eae9c36fd5e,0xbfeff89abf5ab16e,1 +np.float64,0xffe245a02da48b40,0x3fef1fb43e85f4b8,1 +np.float64,0xffe2bafa732575f4,0x3fcbab115c6fd86e,1 +np.float64,0xffe8b1eedb7163dd,0x3feff263df6f6b12,1 +np.float64,0xffe6c76c796d8ed8,0xbfe61a8668511293,1 +np.float64,0xffefe327d1ffc64f,0xbfd9b92887a84827,1 +np.float64,0xffa452180c28a430,0xbfa9b9e578a4e52f,1 +np.float64,0xffe9867d0bf30cf9,0xbfca577867588408,1 +np.float64,0xffdfe9b923bfd372,0x3fdab5c15f085c2d,1 +np.float64,0xffed590c6abab218,0xbfd7e7b6c5a120e6,1 +np.float64,0xffeaebcfbab5d79f,0x3fed58be8a9e2c3b,1 +np.float64,0xffe2ba83a8257507,0x3fe6c42a4ac1d4d9,1 +np.float64,0xffe01d5b0ee03ab6,0xbfe5dad6c9247db7,1 +np.float64,0xffe51095d52a212b,0x3fef822cebc32d8e,1 +np.float64,0xffebd7a901b7af51,0xbfe5e63f3e3b1185,1 +np.float64,0xffe4efdcde29dfb9,0xbfe811294dfa758f,1 +np.float64,0xffe3be1aa4a77c35,0x3fdd8dcfcd409bb1,1 +np.float64,0xffbe6f2f763cde60,0x3fd13766e43bd622,1 +np.float64,0xffeed3d80fbda7af,0x3fec10a23c1b7a4a,1 +np.float64,0xffd6ebff37add7fe,0xbfe6177411607c86,1 +np.float64,0xffe85a90f4b0b521,0x3fc09fdd66c8fde9,1 +np.float64,0xffea3d58c2b47ab1,0x3feb5bd4a04b3562,1 +np.float64,0xffef675be6beceb7,0x3fecd840683d1044,1 +np.float64,0xff726a088024d400,0x3feff2b4f47b5214,1 +np.float64,0xffc90856733210ac,0xbfe3c6ffbf6840a5,1 +np.float64,0xffc0b58d9a216b1c,0xbfe10314267d0611,1 +np.float64,0xffee1f3d0abc3e79,0xbfd12ea7efea9067,1 +np.float64,0xffd988c41a331188,0x3febe83802d8a32e,1 +np.float64,0xffe8f1ac9bb1e358,0xbfdbf5fa7e84f2f2,1 +np.float64,0xffe47af279e8f5e4,0x3fef11e339e5fa78,1 +np.float64,0xff9960a7f832c140,0xbfa150363f8ec5b2,1 +np.float64,0xffcac40fa7358820,0xbfec3d5847a3df1d,1 +np.float64,0xffcb024a9d360494,0xbfd060fa31fd6b6a,1 +np.float64,0xffe385ffb3270bff,0xbfee6859e8dcd9e8,1 +np.float64,0xffef62f2c53ec5e5,0x3fe0a71ffddfc718,1 +np.float64,0xffed87ff20fb0ffd,0xbfe661db7c4098e3,1 +np.float64,0xffe369278526d24e,0x3fd64d89a41822fc,1 +np.float64,0xff950288c02a0520,0x3fe1df91d1ad7d5c,1 +np.float64,0xffe70e7c2cee1cf8,0x3fc9fece08df2fd8,1 +np.float64,0xffbaf020b635e040,0xbfc68c43ff9911a7,1 +np.float64,0xffee0120b0fc0240,0x3f9f792e17b490b0,1 +np.float64,0xffe1fa4be7a3f498,0xbfef4b18ab4b319e,1 +np.float64,0xffe61887bf2c310f,0x3fe846714826cb32,1 +np.float64,0xffdc3cf77f3879ee,0x3fe033b948a36125,1 +np.float64,0xffcc2b86f238570c,0xbfefdcceac3f220f,1 +np.float64,0xffe1f030c0a3e061,0x3fef502a808c359a,1 +np.float64,0xffb872c4ee30e588,0x3fef66ed8d3e6175,1 +np.float64,0xffeac8fc617591f8,0xbfe5d8448602aac9,1 +np.float64,0xffe5be16afab7c2d,0x3fee75ccde3cd14d,1 +np.float64,0xffae230ad83c4610,0xbfe49bbe6074d459,1 +np.float64,0xffc8fbeff531f7e0,0x3f77201e0c927f97,1 +np.float64,0xffdc314f48b8629e,0x3fef810dfc5db118,1 +np.float64,0xffec1f8970783f12,0x3fe15567102e042a,1 +np.float64,0xffc6995f902d32c0,0xbfecd5d2eedf342c,1 +np.float64,0xffdc7af76b38f5ee,0xbfd6e754476ab320,1 +np.float64,0xffb30cf8682619f0,0x3fd5ac3dfc4048d0,1 +np.float64,0xffd3a77695a74eee,0xbfefb5d6889e36e9,1 +np.float64,0xffd8b971803172e4,0xbfeb7f62f0b6c70b,1 +np.float64,0xffde4c0234bc9804,0xbfed50ba9e16d5e0,1 +np.float64,0xffb62b3f342c5680,0xbfeabc0de4069b84,1 +np.float64,0xff9af5674035eac0,0xbfed6c198b6b1bd8,1 +np.float64,0xffdfe20cb43fc41a,0x3fb11f8238f66306,1 +np.float64,0xffd2ecd7a0a5d9b0,0xbfec17ef1a62b1e3,1 +np.float64,0xffce60f7863cc1f0,0x3fe6dbcad3e3a006,1 +np.float64,0xffbbb8306a377060,0xbfbfd0fbef485c4c,1 +np.float64,0xffd1b2bd2b23657a,0xbfda3e046d987b99,1 +np.float64,0xffc480f4092901e8,0xbfeeff0427f6897b,1 +np.float64,0xffe6e02d926dc05a,0xbfcd59552778890b,1 +np.float64,0xffd302e5b7a605cc,0xbfee7c08641366b0,1 +np.float64,0xffec2eb92f785d72,0xbfef5c9c7f771050,1 +np.float64,0xffea3e31a9747c62,0xbfc49cd54755faf0,1 +np.float64,0xffce0a4e333c149c,0x3feeb9a6d0db4aee,1 +np.float64,0xffdc520a2db8a414,0x3fefc7b72613dcd0,1 +np.float64,0xffe056b968a0ad72,0xbfe47a9fe1f827fb,1 +np.float64,0xffe5a10f4cab421e,0x3fec2b1f74b73dec,1 diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/data/umath-validation-set-sinh.csv b/.venv/lib/python3.12/site-packages/numpy/_core/tests/data/umath-validation-set-sinh.csv new file mode 100644 index 0000000..1ef7b6e --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/tests/data/umath-validation-set-sinh.csv @@ -0,0 +1,1429 @@ +dtype,input,output,ulperrortol +np.float32,0xfee27582,0xff800000,2 +np.float32,0xff19f092,0xff800000,2 +np.float32,0xbf393576,0xbf49cb31,2 +np.float32,0x8020fdea,0x8020fdea,2 +np.float32,0x455f4e,0x455f4e,2 +np.float32,0xff718c35,0xff800000,2 +np.float32,0x3f3215e3,0x3f40cce5,2 +np.float32,0x19e833,0x19e833,2 +np.float32,0xff2dcd49,0xff800000,2 +np.float32,0x7e8f6c95,0x7f800000,2 +np.float32,0xbf159dac,0xbf1e47a5,2 +np.float32,0x100d3d,0x100d3d,2 +np.float32,0xff673441,0xff800000,2 +np.float32,0x80275355,0x80275355,2 +np.float32,0x4812d0,0x4812d0,2 +np.float32,0x8072b956,0x8072b956,2 +np.float32,0xff3bb918,0xff800000,2 +np.float32,0x0,0x0,2 +np.float32,0xfe327798,0xff800000,2 +np.float32,0x41d4e2,0x41d4e2,2 +np.float32,0xfe34b1b8,0xff800000,2 +np.float32,0x80199f72,0x80199f72,2 +np.float32,0x807242ce,0x807242ce,2 +np.float32,0x3ef4202d,0x3efd7b48,2 +np.float32,0x763529,0x763529,2 +np.float32,0x4f6662,0x4f6662,2 +np.float32,0x3f18efe9,0x3f2232b5,2 +np.float32,0x80701846,0x80701846,2 +np.float32,0x3f599948,0x3f74c393,2 +np.float32,0x5a3d69,0x5a3d69,2 +np.float32,0xbf4a7e65,0xbf6047a3,2 +np.float32,0xff0d4c82,0xff800000,2 +np.float32,0x7a74db,0x7a74db,2 +np.float32,0x803388e6,0x803388e6,2 +np.float32,0x7f4430bb,0x7f800000,2 +np.float32,0x14c5b1,0x14c5b1,2 +np.float32,0xfa113400,0xff800000,2 +np.float32,0x7f4b3209,0x7f800000,2 +np.float32,0x8038d88c,0x8038d88c,2 +np.float32,0xbef2f9de,0xbefc330b,2 +np.float32,0xbe147b38,0xbe15008f,2 +np.float32,0x2b61e6,0x2b61e6,2 +np.float32,0x80000001,0x80000001,2 +np.float32,0x8060456c,0x8060456c,2 +np.float32,0x3f30fa82,0x3f3f6a99,2 +np.float32,0xfd1f0220,0xff800000,2 +np.float32,0xbf2b7555,0xbf389151,2 +np.float32,0xff100b7a,0xff800000,2 +np.float32,0x70d3cd,0x70d3cd,2 +np.float32,0x2a8d4a,0x2a8d4a,2 +np.float32,0xbf7b733f,0xbf92f05f,2 +np.float32,0x3f7106dc,0x3f8b1fc6,2 +np.float32,0x3f39da7a,0x3f4a9d79,2 +np.float32,0x3f5dd73f,0x3f7aaab5,2 +np.float32,0xbe8c8754,0xbe8e4cba,2 +np.float32,0xbf6c74c9,0xbf87c556,2 +np.float32,0x800efbbb,0x800efbbb,2 +np.float32,0xff054ab5,0xff800000,2 +np.float32,0x800b4b46,0x800b4b46,2 +np.float32,0xff77fd74,0xff800000,2 +np.float32,0x257d0,0x257d0,2 +np.float32,0x7caa0c,0x7caa0c,2 +np.float32,0x8025d24d,0x8025d24d,2 +np.float32,0x3d9f1b60,0x3d9f445c,2 +np.float32,0xbe3bf6e8,0xbe3d0595,2 +np.float32,0x54bb93,0x54bb93,2 +np.float32,0xbf3e6a45,0xbf507716,2 +np.float32,0x3f4bb26e,0x3f61e1cd,2 +np.float32,0x3f698edc,0x3f85aac5,2 +np.float32,0xff7bd0ef,0xff800000,2 +np.float32,0xbed07b68,0xbed64a8e,2 +np.float32,0xbf237c72,0xbf2ed3d2,2 +np.float32,0x27b0fa,0x27b0fa,2 +np.float32,0x3f7606d1,0x3f8ed7d6,2 +np.float32,0x790dc0,0x790dc0,2 +np.float32,0x7f68f3ac,0x7f800000,2 +np.float32,0xbed39288,0xbed9a52f,2 +np.float32,0x3f6f8266,0x3f8a0187,2 +np.float32,0x3fbdca,0x3fbdca,2 +np.float32,0xbf7c3e5d,0xbf938b2c,2 +np.float32,0x802321a8,0x802321a8,2 +np.float32,0x3eecab66,0x3ef53031,2 +np.float32,0x62b324,0x62b324,2 +np.float32,0x3f13afac,0x3f1c03fe,2 +np.float32,0xff315ad7,0xff800000,2 +np.float32,0xbf1fac0d,0xbf2a3a63,2 +np.float32,0xbf543984,0xbf6d61d6,2 +np.float32,0x71a212,0x71a212,2 +np.float32,0x114fbe,0x114fbe,2 +np.float32,0x3f5b6ff2,0x3f77505f,2 +np.float32,0xff6ff89e,0xff800000,2 +np.float32,0xff4527a1,0xff800000,2 +np.float32,0x22cb3,0x22cb3,2 +np.float32,0x7f53bb6b,0x7f800000,2 +np.float32,0xff3d2dea,0xff800000,2 +np.float32,0xfd21dac0,0xff800000,2 +np.float32,0xfc486140,0xff800000,2 +np.float32,0x7e2b693a,0x7f800000,2 +np.float32,0x8022a9fb,0x8022a9fb,2 +np.float32,0x80765de0,0x80765de0,2 +np.float32,0x13d299,0x13d299,2 +np.float32,0x7ee53713,0x7f800000,2 +np.float32,0xbde1c770,0xbde23c96,2 +np.float32,0xbd473fc0,0xbd4753de,2 +np.float32,0x3f1cb455,0x3f26acf3,2 +np.float32,0x683e49,0x683e49,2 +np.float32,0x3ed5a9fc,0x3edbeb79,2 +np.float32,0x3f4fe3f6,0x3f67814f,2 +np.float32,0x802a2bce,0x802a2bce,2 +np.float32,0x7e951b4c,0x7f800000,2 +np.float32,0xbe6eb260,0xbe70dd44,2 +np.float32,0xbe3daca8,0xbe3ec2cb,2 +np.float32,0xbe9c38b2,0xbe9ea822,2 +np.float32,0xff2e29dc,0xff800000,2 +np.float32,0x7f62c7cc,0x7f800000,2 +np.float32,0xbf6799a4,0xbf84416c,2 +np.float32,0xbe30a7f0,0xbe318898,2 +np.float32,0xc83d9,0xc83d9,2 +np.float32,0x3f05abf4,0x3f0bd447,2 +np.float32,0x7e9b018a,0x7f800000,2 +np.float32,0xbf0ed72e,0xbf165e5b,2 +np.float32,0x8011ac8c,0x8011ac8c,2 +np.float32,0xbeb7c706,0xbebbbfcb,2 +np.float32,0x803637f9,0x803637f9,2 +np.float32,0xfe787cc8,0xff800000,2 +np.float32,0x3f533d4b,0x3f6c0a50,2 +np.float32,0x3f5c0f1c,0x3f782dde,2 +np.float32,0x3f301f36,0x3f3e590d,2 +np.float32,0x2dc929,0x2dc929,2 +np.float32,0xff15018a,0xff800000,2 +np.float32,0x3f4d0c56,0x3f63afeb,2 +np.float32,0xbf7a2ae3,0xbf91f6e4,2 +np.float32,0xbe771b84,0xbe798346,2 +np.float32,0x80800000,0x80800000,2 +np.float32,0x7f5689ba,0x7f800000,2 +np.float32,0x3f1c3177,0x3f2610df,2 +np.float32,0x3f1b9664,0x3f255825,2 +np.float32,0x3f7e5066,0x3f9520d4,2 +np.float32,0xbf1935f8,0xbf2285ab,2 +np.float32,0x3f096cc7,0x3f101ef9,2 +np.float32,0x8030c180,0x8030c180,2 +np.float32,0x6627ed,0x6627ed,2 +np.float32,0x454595,0x454595,2 +np.float32,0x7de66a33,0x7f800000,2 +np.float32,0xbf800000,0xbf966cfe,2 +np.float32,0xbf35c0a8,0xbf456939,2 +np.float32,0x3f6a6266,0x3f8643e0,2 +np.float32,0x3f0cbcee,0x3f13ef6a,2 +np.float32,0x7efd1e58,0x7f800000,2 +np.float32,0xfe9a74c6,0xff800000,2 +np.float32,0x807ebe6c,0x807ebe6c,2 +np.float32,0x80656736,0x80656736,2 +np.float32,0x800e0608,0x800e0608,2 +np.float32,0xbf30e39a,0xbf3f4e00,2 +np.float32,0x802015fd,0x802015fd,2 +np.float32,0x3e3ce26d,0x3e3df519,2 +np.float32,0x7ec142ac,0x7f800000,2 +np.float32,0xbf68c9ce,0xbf851c78,2 +np.float32,0xfede8356,0xff800000,2 +np.float32,0xbf1507ce,0xbf1d978d,2 +np.float32,0x3e53914c,0x3e551374,2 +np.float32,0x7f3e1c14,0x7f800000,2 +np.float32,0x8070d2ba,0x8070d2ba,2 +np.float32,0xbf4eb793,0xbf65ecee,2 +np.float32,0x7365a6,0x7365a6,2 +np.float32,0x8045cba2,0x8045cba2,2 +np.float32,0x7e4af521,0x7f800000,2 +np.float32,0xbf228625,0xbf2da9e1,2 +np.float32,0x7ee0536c,0x7f800000,2 +np.float32,0x3e126607,0x3e12e5d5,2 +np.float32,0x80311d92,0x80311d92,2 +np.float32,0xbf386b8b,0xbf48ca54,2 +np.float32,0x7f800000,0x7f800000,2 +np.float32,0x8049ec7a,0x8049ec7a,2 +np.float32,0xbf1dfde4,0xbf2836be,2 +np.float32,0x7e719a8c,0x7f800000,2 +np.float32,0x3eb9c856,0x3ebde2e6,2 +np.float32,0xfe3efda8,0xff800000,2 +np.float32,0xbe89d60c,0xbe8b81d1,2 +np.float32,0x3eaad338,0x3eae0317,2 +np.float32,0x7f4e5217,0x7f800000,2 +np.float32,0x3e9d0f40,0x3e9f88ce,2 +np.float32,0xbe026708,0xbe02c155,2 +np.float32,0x5fc22f,0x5fc22f,2 +np.float32,0x1c4572,0x1c4572,2 +np.float32,0xbed89d96,0xbedf22c5,2 +np.float32,0xbf3debee,0xbf4fd441,2 +np.float32,0xbf465520,0xbf5ac6e5,2 +np.float32,0x3f797081,0x3f9169b3,2 +np.float32,0xbf250734,0xbf30b2aa,2 +np.float32,0x7f5068e9,0x7f800000,2 +np.float32,0x3f1b814e,0x3f253f0c,2 +np.float32,0xbf27c5d3,0xbf340b05,2 +np.float32,0x3f1b78ae,0x3f2534c8,2 +np.float32,0x8059b51a,0x8059b51a,2 +np.float32,0x8059f182,0x8059f182,2 +np.float32,0xbf1bb36e,0xbf257ab8,2 +np.float32,0x41ac35,0x41ac35,2 +np.float32,0x68f41f,0x68f41f,2 +np.float32,0xbea504dc,0xbea7e40f,2 +np.float32,0x1,0x1,2 +np.float32,0x3e96b5b0,0x3e98e542,2 +np.float32,0x7f7fffff,0x7f800000,2 +np.float32,0x3c557a80,0x3c557c0c,2 +np.float32,0x800ca3ec,0x800ca3ec,2 +np.float32,0x8077d4aa,0x8077d4aa,2 +np.float32,0x3f000af0,0x3f0572d6,2 +np.float32,0x3e0434dd,0x3e0492f8,2 +np.float32,0x7d1a710a,0x7f800000,2 +np.float32,0x3f70f996,0x3f8b15f8,2 +np.float32,0x8033391d,0x8033391d,2 +np.float32,0x11927c,0x11927c,2 +np.float32,0x7f7784be,0x7f800000,2 +np.float32,0x7acb22af,0x7f800000,2 +np.float32,0x7e8b153c,0x7f800000,2 +np.float32,0x66d402,0x66d402,2 +np.float32,0xfed6e7b0,0xff800000,2 +np.float32,0x7f6872d3,0x7f800000,2 +np.float32,0x1bd49c,0x1bd49c,2 +np.float32,0xfdc4f1b8,0xff800000,2 +np.float32,0xbed8a466,0xbedf2a33,2 +np.float32,0x7ee789,0x7ee789,2 +np.float32,0xbece94b4,0xbed43b52,2 +np.float32,0x3cf3f734,0x3cf4006f,2 +np.float32,0x7e44aa00,0x7f800000,2 +np.float32,0x7f19e99c,0x7f800000,2 +np.float32,0x806ff1bc,0x806ff1bc,2 +np.float32,0x80296934,0x80296934,2 +np.float32,0x7f463363,0x7f800000,2 +np.float32,0xbf212ac3,0xbf2c06bb,2 +np.float32,0x3dc63778,0x3dc686ba,2 +np.float32,0x7f1b4328,0x7f800000,2 +np.float32,0x6311f6,0x6311f6,2 +np.float32,0xbf6b6fb6,0xbf870751,2 +np.float32,0xbf2c44cf,0xbf399155,2 +np.float32,0x3e7a67bc,0x3e7ce887,2 +np.float32,0x7f57c5f7,0x7f800000,2 +np.float32,0x7f2bb4ff,0x7f800000,2 +np.float32,0xbe9d448e,0xbe9fc0a4,2 +np.float32,0xbf4840f0,0xbf5d4f6b,2 +np.float32,0x7f1e1176,0x7f800000,2 +np.float32,0xff76638e,0xff800000,2 +np.float32,0xff055555,0xff800000,2 +np.float32,0x3f32b82b,0x3f419834,2 +np.float32,0xff363aa8,0xff800000,2 +np.float32,0x7f737fd0,0x7f800000,2 +np.float32,0x3da5d798,0x3da60602,2 +np.float32,0x3f1cc126,0x3f26bc3e,2 +np.float32,0x7eb07541,0x7f800000,2 +np.float32,0x3f7b2ff2,0x3f92bd2a,2 +np.float32,0x474f7,0x474f7,2 +np.float32,0x7fc00000,0x7fc00000,2 +np.float32,0xff2b0a4e,0xff800000,2 +np.float32,0xfeb24f16,0xff800000,2 +np.float32,0x2cb9fc,0x2cb9fc,2 +np.float32,0x67189d,0x67189d,2 +np.float32,0x8033d854,0x8033d854,2 +np.float32,0xbe85e94c,0xbe87717a,2 +np.float32,0x80767c6c,0x80767c6c,2 +np.float32,0x7ea84d65,0x7f800000,2 +np.float32,0x3f024bc7,0x3f07fead,2 +np.float32,0xbdcb0100,0xbdcb5625,2 +np.float32,0x3f160a9e,0x3f1ec7c9,2 +np.float32,0xff1734c8,0xff800000,2 +np.float32,0x7f424d5e,0x7f800000,2 +np.float32,0xbf75b215,0xbf8e9862,2 +np.float32,0x3f262a42,0x3f3214c4,2 +np.float32,0xbf4cfb53,0xbf639927,2 +np.float32,0x3f4ac8b8,0x3f60aa7c,2 +np.float32,0x3e90e593,0x3e92d6b3,2 +np.float32,0xbf66bccf,0xbf83a2d8,2 +np.float32,0x7d3d851a,0x7f800000,2 +np.float32,0x7bac783c,0x7f800000,2 +np.float32,0x8001c626,0x8001c626,2 +np.float32,0xbdffd480,0xbe003f7b,2 +np.float32,0x7f6680bf,0x7f800000,2 +np.float32,0xbecf448e,0xbed4f9bb,2 +np.float32,0x584c7,0x584c7,2 +np.float32,0x3f3e8ea0,0x3f50a5fb,2 +np.float32,0xbf5a5f04,0xbf75d56e,2 +np.float32,0x8065ae47,0x8065ae47,2 +np.float32,0xbf48dce3,0xbf5e1dba,2 +np.float32,0xbe8dae2e,0xbe8f7ed8,2 +np.float32,0x3f7ca6ab,0x3f93dace,2 +np.float32,0x4c3e81,0x4c3e81,2 +np.float32,0x80000000,0x80000000,2 +np.float32,0x3ee1f7d9,0x3ee96033,2 +np.float32,0x80588c6f,0x80588c6f,2 +np.float32,0x5ba34e,0x5ba34e,2 +np.float32,0x80095d28,0x80095d28,2 +np.float32,0xbe7ba198,0xbe7e2bdd,2 +np.float32,0xbe0bdcb4,0xbe0c4c22,2 +np.float32,0x1776f7,0x1776f7,2 +np.float32,0x80328b2a,0x80328b2a,2 +np.float32,0x3e978d37,0x3e99c63e,2 +np.float32,0x7ed50906,0x7f800000,2 +np.float32,0x3f776a54,0x3f8fe2bd,2 +np.float32,0xbed624c4,0xbedc7120,2 +np.float32,0x7f0b6a31,0x7f800000,2 +np.float32,0x7eb13913,0x7f800000,2 +np.float32,0xbe733684,0xbe758190,2 +np.float32,0x80016474,0x80016474,2 +np.float32,0x7a51ee,0x7a51ee,2 +np.float32,0x3f6cb91e,0x3f87f729,2 +np.float32,0xbd99b050,0xbd99d540,2 +np.float32,0x7c6e3cba,0x7f800000,2 +np.float32,0xbf00179a,0xbf05811e,2 +np.float32,0x3e609b29,0x3e626954,2 +np.float32,0xff3fd71a,0xff800000,2 +np.float32,0x5d8c2,0x5d8c2,2 +np.float32,0x7ee93662,0x7f800000,2 +np.float32,0x4b0b31,0x4b0b31,2 +np.float32,0x3ec243b7,0x3ec6f594,2 +np.float32,0x804d60f1,0x804d60f1,2 +np.float32,0xbf0cb784,0xbf13e929,2 +np.float32,0x3f13b74d,0x3f1c0cee,2 +np.float32,0xfe37cb64,0xff800000,2 +np.float32,0x1a88,0x1a88,2 +np.float32,0x3e22a472,0x3e2353ba,2 +np.float32,0x7f07d6a0,0x7f800000,2 +np.float32,0x3f78f435,0x3f910bb5,2 +np.float32,0x555a4a,0x555a4a,2 +np.float32,0x3e306c1f,0x3e314be3,2 +np.float32,0x8005877c,0x8005877c,2 +np.float32,0x4df389,0x4df389,2 +np.float32,0x8069ffc7,0x8069ffc7,2 +np.float32,0x3f328f24,0x3f4164c6,2 +np.float32,0x53a31b,0x53a31b,2 +np.float32,0xbe4d6768,0xbe4ec8be,2 +np.float32,0x7fa00000,0x7fe00000,2 +np.float32,0x3f484c1b,0x3f5d5e2f,2 +np.float32,0x8038be05,0x8038be05,2 +np.float32,0x58ac0f,0x58ac0f,2 +np.float32,0x7ed7fb72,0x7f800000,2 +np.float32,0x5a22e1,0x5a22e1,2 +np.float32,0xbebb7394,0xbebfaad6,2 +np.float32,0xbda98160,0xbda9b2ef,2 +np.float32,0x7f3e5c42,0x7f800000,2 +np.float32,0xfed204ae,0xff800000,2 +np.float32,0xbf5ef782,0xbf7c3ec5,2 +np.float32,0xbef7a0a8,0xbf00b292,2 +np.float32,0xfee6e176,0xff800000,2 +np.float32,0xfe121140,0xff800000,2 +np.float32,0xfe9e13be,0xff800000,2 +np.float32,0xbf3c98b1,0xbf4e2003,2 +np.float32,0x77520d,0x77520d,2 +np.float32,0xf17b2,0xf17b2,2 +np.float32,0x724d2f,0x724d2f,2 +np.float32,0x7eb326f5,0x7f800000,2 +np.float32,0x3edd6bf2,0x3ee4636e,2 +np.float32,0x350f57,0x350f57,2 +np.float32,0xff7d4435,0xff800000,2 +np.float32,0x802b2b9d,0x802b2b9d,2 +np.float32,0xbf7fbeee,0xbf963acf,2 +np.float32,0x804f3100,0x804f3100,2 +np.float32,0x7c594a71,0x7f800000,2 +np.float32,0x3ef49340,0x3efdfbb6,2 +np.float32,0x2e0659,0x2e0659,2 +np.float32,0x8006d5fe,0x8006d5fe,2 +np.float32,0xfd2a00b0,0xff800000,2 +np.float32,0xbee1c016,0xbee922ed,2 +np.float32,0x3e3b7de8,0x3e3c8a8b,2 +np.float32,0x805e6bba,0x805e6bba,2 +np.float32,0x1a7da2,0x1a7da2,2 +np.float32,0x6caba4,0x6caba4,2 +np.float32,0x802f7eab,0x802f7eab,2 +np.float32,0xff68b16b,0xff800000,2 +np.float32,0x8064f5e5,0x8064f5e5,2 +np.float32,0x2e39b4,0x2e39b4,2 +np.float32,0x800000,0x800000,2 +np.float32,0xfd0334c0,0xff800000,2 +np.float32,0x3e952fc4,0x3e974e7e,2 +np.float32,0x80057d33,0x80057d33,2 +np.float32,0x3ed3ddc4,0x3ed9f6f1,2 +np.float32,0x3f74ce18,0x3f8dedf4,2 +np.float32,0xff6bb7c0,0xff800000,2 +np.float32,0xff43bc21,0xff800000,2 +np.float32,0x80207570,0x80207570,2 +np.float32,0x7e1dda75,0x7f800000,2 +np.float32,0x3efe335c,0x3f0462ff,2 +np.float32,0xbf252c0c,0xbf30df70,2 +np.float32,0x3ef4b8e3,0x3efe25ba,2 +np.float32,0x7c33938d,0x7f800000,2 +np.float32,0x3eb1593c,0x3eb4ea95,2 +np.float32,0xfe1d0068,0xff800000,2 +np.float32,0xbf10da9b,0xbf18b551,2 +np.float32,0xfeb65748,0xff800000,2 +np.float32,0xfe8c6014,0xff800000,2 +np.float32,0x3f0503e2,0x3f0b14e3,2 +np.float32,0xfe5e5248,0xff800000,2 +np.float32,0xbd10afa0,0xbd10b754,2 +np.float32,0xff64b609,0xff800000,2 +np.float32,0xbf674a96,0xbf84089c,2 +np.float32,0x7f5d200d,0x7f800000,2 +np.float32,0x3cf44900,0x3cf45245,2 +np.float32,0x8044445a,0x8044445a,2 +np.float32,0xff35b676,0xff800000,2 +np.float32,0x806452cd,0x806452cd,2 +np.float32,0xbf2930fb,0xbf35c7b4,2 +np.float32,0x7e500617,0x7f800000,2 +np.float32,0x543719,0x543719,2 +np.float32,0x3ed11068,0x3ed6ec1d,2 +np.float32,0xbd8db068,0xbd8dcd59,2 +np.float32,0x3ede62c8,0x3ee571d0,2 +np.float32,0xbf00a410,0xbf061f9c,2 +np.float32,0xbf44fa39,0xbf58ff5b,2 +np.float32,0x3f1c3114,0x3f261069,2 +np.float32,0xbdea6210,0xbdeae521,2 +np.float32,0x80059f6d,0x80059f6d,2 +np.float32,0xbdba15f8,0xbdba578c,2 +np.float32,0x6d8a61,0x6d8a61,2 +np.float32,0x6f5428,0x6f5428,2 +np.float32,0x18d0e,0x18d0e,2 +np.float32,0x50e131,0x50e131,2 +np.float32,0x3f2f52be,0x3f3d5a7e,2 +np.float32,0x7399d8,0x7399d8,2 +np.float32,0x106524,0x106524,2 +np.float32,0x7ebf1c53,0x7f800000,2 +np.float32,0x80276458,0x80276458,2 +np.float32,0x3ebbde67,0x3ec01ceb,2 +np.float32,0x80144d9d,0x80144d9d,2 +np.float32,0x8017ea6b,0x8017ea6b,2 +np.float32,0xff38f201,0xff800000,2 +np.float32,0x7f2daa82,0x7f800000,2 +np.float32,0x3f3cb7c7,0x3f4e47ed,2 +np.float32,0x7f08c779,0x7f800000,2 +np.float32,0xbecc907a,0xbed20cec,2 +np.float32,0x7d440002,0x7f800000,2 +np.float32,0xbd410d80,0xbd411fcd,2 +np.float32,0x3d63ae07,0x3d63cc0c,2 +np.float32,0x805a9c13,0x805a9c13,2 +np.float32,0x803bdcdc,0x803bdcdc,2 +np.float32,0xbe88b354,0xbe8a5497,2 +np.float32,0x3f4eaf43,0x3f65e1c2,2 +np.float32,0x3f15e5b8,0x3f1e9c60,2 +np.float32,0x3e8a870c,0x3e8c394e,2 +np.float32,0x7e113de9,0x7f800000,2 +np.float32,0x7ee5ba41,0x7f800000,2 +np.float32,0xbe73d178,0xbe7620eb,2 +np.float32,0xfe972e6a,0xff800000,2 +np.float32,0xbf65567d,0xbf82a25a,2 +np.float32,0x3f38247e,0x3f487010,2 +np.float32,0xbece1c62,0xbed3b918,2 +np.float32,0x442c8d,0x442c8d,2 +np.float32,0x2dc52,0x2dc52,2 +np.float32,0x802ed923,0x802ed923,2 +np.float32,0x788cf8,0x788cf8,2 +np.float32,0x8024888e,0x8024888e,2 +np.float32,0x3f789bde,0x3f90c8fc,2 +np.float32,0x3f5de620,0x3f7abf88,2 +np.float32,0x3f0ffc45,0x3f17b2a7,2 +np.float32,0xbf709678,0xbf8accd4,2 +np.float32,0x12181f,0x12181f,2 +np.float32,0xfe54bbe4,0xff800000,2 +np.float32,0x7f1daba0,0x7f800000,2 +np.float32,0xbf6226df,0xbf805e3c,2 +np.float32,0xbd120610,0xbd120dfb,2 +np.float32,0x7f75e951,0x7f800000,2 +np.float32,0x80068048,0x80068048,2 +np.float32,0x45f04a,0x45f04a,2 +np.float32,0xff4c4f58,0xff800000,2 +np.float32,0x311604,0x311604,2 +np.float32,0x805e809c,0x805e809c,2 +np.float32,0x3d1d62c0,0x3d1d6caa,2 +np.float32,0x7f14ccf9,0x7f800000,2 +np.float32,0xff10017c,0xff800000,2 +np.float32,0xbf43ec48,0xbf579df4,2 +np.float32,0xff64da57,0xff800000,2 +np.float32,0x7f0622c5,0x7f800000,2 +np.float32,0x7f5460cd,0x7f800000,2 +np.float32,0xff0ef1c6,0xff800000,2 +np.float32,0xbece1146,0xbed3ad13,2 +np.float32,0x3f4d457f,0x3f63fc70,2 +np.float32,0xbdc1da28,0xbdc2244b,2 +np.float32,0xbe46d3f4,0xbe481463,2 +np.float32,0xff36b3d6,0xff800000,2 +np.float32,0xbec2e76c,0xbec7a540,2 +np.float32,0x8078fb81,0x8078fb81,2 +np.float32,0x7ec819cb,0x7f800000,2 +np.float32,0x39c4d,0x39c4d,2 +np.float32,0xbe8cddc2,0xbe8ea670,2 +np.float32,0xbf36dffb,0xbf46d48b,2 +np.float32,0xbf2302a3,0xbf2e4065,2 +np.float32,0x3e7b34a2,0x3e7dbb9a,2 +np.float32,0x3e3d87e1,0x3e3e9d62,2 +np.float32,0x7f3c94b1,0x7f800000,2 +np.float32,0x80455a85,0x80455a85,2 +np.float32,0xfd875568,0xff800000,2 +np.float32,0xbf618103,0xbf7fd1c8,2 +np.float32,0xbe332e3c,0xbe3418ac,2 +np.float32,0x80736b79,0x80736b79,2 +np.float32,0x3f705d9a,0x3f8aa2e6,2 +np.float32,0xbf3a36d2,0xbf4b134b,2 +np.float32,0xfddc55c0,0xff800000,2 +np.float32,0x805606fd,0x805606fd,2 +np.float32,0x3f4f0bc4,0x3f665e25,2 +np.float32,0xfebe7494,0xff800000,2 +np.float32,0xff0c541b,0xff800000,2 +np.float32,0xff0b8e7f,0xff800000,2 +np.float32,0xbcc51640,0xbcc51b1e,2 +np.float32,0x7ec1c4d0,0x7f800000,2 +np.float32,0xfc5c8e00,0xff800000,2 +np.float32,0x7f48d682,0x7f800000,2 +np.float32,0x7d5c7d8d,0x7f800000,2 +np.float32,0x8052ed03,0x8052ed03,2 +np.float32,0x7d4db058,0x7f800000,2 +np.float32,0xff3a65ee,0xff800000,2 +np.float32,0x806eeb93,0x806eeb93,2 +np.float32,0x803f9733,0x803f9733,2 +np.float32,0xbf2d1388,0xbf3a90e3,2 +np.float32,0x68e260,0x68e260,2 +np.float32,0x3e47a69f,0x3e48eb0e,2 +np.float32,0x3f0c4623,0x3f136646,2 +np.float32,0x3f37a831,0x3f47d249,2 +np.float32,0xff153a0c,0xff800000,2 +np.float32,0x2e8086,0x2e8086,2 +np.float32,0xc3f5e,0xc3f5e,2 +np.float32,0x7f31dc14,0x7f800000,2 +np.float32,0xfee37d68,0xff800000,2 +np.float32,0x711d4,0x711d4,2 +np.float32,0x7ede2ce4,0x7f800000,2 +np.float32,0xbf5d76d0,0xbf7a23d0,2 +np.float32,0xbe2b9eb4,0xbe2c6cac,2 +np.float32,0x2b14d7,0x2b14d7,2 +np.float32,0x3ea1db72,0x3ea4910e,2 +np.float32,0x7f3f03f7,0x7f800000,2 +np.float32,0x92de5,0x92de5,2 +np.float32,0x80322e1b,0x80322e1b,2 +np.float32,0xbf5eb214,0xbf7bdd55,2 +np.float32,0xbf21bf87,0xbf2cba14,2 +np.float32,0xbf5d4b78,0xbf79e73a,2 +np.float32,0xbc302840,0xbc30291e,2 +np.float32,0xfee567c6,0xff800000,2 +np.float32,0x7f70ee14,0x7f800000,2 +np.float32,0x7e5c4b33,0x7f800000,2 +np.float32,0x3f1e7b64,0x3f28ccfd,2 +np.float32,0xbf6309f7,0xbf80ff3e,2 +np.float32,0x1c2fe3,0x1c2fe3,2 +np.float32,0x8e78d,0x8e78d,2 +np.float32,0x7f2fce73,0x7f800000,2 +np.float32,0x7f25f690,0x7f800000,2 +np.float32,0x8074cba5,0x8074cba5,2 +np.float32,0x16975f,0x16975f,2 +np.float32,0x8012cf5c,0x8012cf5c,2 +np.float32,0x7da72138,0x7f800000,2 +np.float32,0xbf563f35,0xbf7025be,2 +np.float32,0x3f69d3f5,0x3f85dcbe,2 +np.float32,0xbf15c148,0xbf1e7184,2 +np.float32,0xbe7a077c,0xbe7c8564,2 +np.float32,0x3ebb6ef1,0x3ebfa5e3,2 +np.float32,0xbe41fde4,0xbe43277b,2 +np.float32,0x7f10b479,0x7f800000,2 +np.float32,0x3e021ace,0x3e02747d,2 +np.float32,0x3e93d984,0x3e95e9be,2 +np.float32,0xfe17e924,0xff800000,2 +np.float32,0xfe21a7cc,0xff800000,2 +np.float32,0x8019b660,0x8019b660,2 +np.float32,0x7e954631,0x7f800000,2 +np.float32,0x7e7330d1,0x7f800000,2 +np.float32,0xbe007d98,0xbe00d3fb,2 +np.float32,0x3ef3870e,0x3efcd077,2 +np.float32,0x7f5bbde8,0x7f800000,2 +np.float32,0x14a5b3,0x14a5b3,2 +np.float32,0x3e84d23f,0x3e8650e8,2 +np.float32,0x80763017,0x80763017,2 +np.float32,0xfe871f36,0xff800000,2 +np.float32,0x7ed43150,0x7f800000,2 +np.float32,0x3cc44547,0x3cc44a16,2 +np.float32,0x3ef0c0fa,0x3ef9b97d,2 +np.float32,0xbede9944,0xbee5ad86,2 +np.float32,0xbf10f0b2,0xbf18cf0a,2 +np.float32,0x3ecdaa78,0x3ed33dd9,2 +np.float32,0x3f7cc058,0x3f93ee6b,2 +np.float32,0x2d952f,0x2d952f,2 +np.float32,0x3f2cf2de,0x3f3a687a,2 +np.float32,0x8029b33c,0x8029b33c,2 +np.float32,0xbf22c737,0xbf2df888,2 +np.float32,0xff53c84a,0xff800000,2 +np.float32,0x40a509,0x40a509,2 +np.float32,0x56abce,0x56abce,2 +np.float32,0xff7fffff,0xff800000,2 +np.float32,0xbf3e67f6,0xbf50741c,2 +np.float32,0xfde67580,0xff800000,2 +np.float32,0x3f103e9b,0x3f17ffc7,2 +np.float32,0x3f3f7232,0x3f51cbe2,2 +np.float32,0x803e6d78,0x803e6d78,2 +np.float32,0x3a61da,0x3a61da,2 +np.float32,0xbc04de80,0xbc04dedf,2 +np.float32,0x7f1e7c52,0x7f800000,2 +np.float32,0x8058ee88,0x8058ee88,2 +np.float32,0x806dd660,0x806dd660,2 +np.float32,0x7e4af9,0x7e4af9,2 +np.float32,0x80702d27,0x80702d27,2 +np.float32,0x802cdad1,0x802cdad1,2 +np.float32,0x3e9b5c23,0x3e9dc149,2 +np.float32,0x7f076e89,0x7f800000,2 +np.float32,0x7f129d68,0x7f800000,2 +np.float32,0x7f6f0b0a,0x7f800000,2 +np.float32,0x7eafafb5,0x7f800000,2 +np.float32,0xbf2ef2ca,0xbf3ce332,2 +np.float32,0xff34c000,0xff800000,2 +np.float32,0x7f559274,0x7f800000,2 +np.float32,0xfed08556,0xff800000,2 +np.float32,0xbf014621,0xbf06d6ad,2 +np.float32,0xff23086a,0xff800000,2 +np.float32,0x6cb33f,0x6cb33f,2 +np.float32,0xfe6e3ffc,0xff800000,2 +np.float32,0x3e6bbec0,0x3e6dd546,2 +np.float32,0x8036afa6,0x8036afa6,2 +np.float32,0xff800000,0xff800000,2 +np.float32,0x3e0ed05c,0x3e0f46ff,2 +np.float32,0x3ec9215c,0x3ece57e6,2 +np.float32,0xbf449fa4,0xbf5888aa,2 +np.float32,0xff2c6640,0xff800000,2 +np.float32,0x7f08f4a7,0x7f800000,2 +np.float32,0xbf4f63e5,0xbf66d4c1,2 +np.float32,0x3f800000,0x3f966cfe,2 +np.float32,0xfe86c7d2,0xff800000,2 +np.float32,0x3f63f969,0x3f81a970,2 +np.float32,0xbd7022d0,0xbd704609,2 +np.float32,0xbead906c,0xbeb0e853,2 +np.float32,0x7ef149ee,0x7f800000,2 +np.float32,0xff0b9ff7,0xff800000,2 +np.float32,0x3f38380d,0x3f4888e7,2 +np.float32,0x3ef3a3e2,0x3efcf09e,2 +np.float32,0xff616477,0xff800000,2 +np.float32,0x3f3f83e4,0x3f51e2c3,2 +np.float32,0xbf79963c,0xbf918642,2 +np.float32,0x801416f4,0x801416f4,2 +np.float32,0xff75ce6d,0xff800000,2 +np.float32,0xbdbf3588,0xbdbf7cad,2 +np.float32,0xbe6ea938,0xbe70d3dc,2 +np.float32,0x8066f977,0x8066f977,2 +np.float32,0x3f5b5362,0x3f7728aa,2 +np.float32,0xbf72052c,0xbf8bdbd8,2 +np.float32,0xbe21ed74,0xbe229a6f,2 +np.float32,0x8062d19c,0x8062d19c,2 +np.float32,0x3ed8d01f,0x3edf59e6,2 +np.float32,0x803ed42b,0x803ed42b,2 +np.float32,0xbe099a64,0xbe0a0481,2 +np.float32,0xbe173eb4,0xbe17cba2,2 +np.float32,0xbebdcf02,0xbec22faf,2 +np.float32,0x7e3ff29e,0x7f800000,2 +np.float32,0x367c92,0x367c92,2 +np.float32,0xbf5c9db8,0xbf78f4a4,2 +np.float32,0xff0b49ea,0xff800000,2 +np.float32,0x3f4f9bc4,0x3f672001,2 +np.float32,0x85d4a,0x85d4a,2 +np.float32,0x80643e33,0x80643e33,2 +np.float32,0x8013aabd,0x8013aabd,2 +np.float32,0xff6997c3,0xff800000,2 +np.float32,0x3f4dd43c,0x3f64bbb6,2 +np.float32,0xff13bbb9,0xff800000,2 +np.float32,0x3f34efa2,0x3f446187,2 +np.float32,0x3e4b2f10,0x3e4c850d,2 +np.float32,0xfef695c6,0xff800000,2 +np.float32,0x7f7e0057,0x7f800000,2 +np.float32,0x3f6e1b9c,0x3f88fa40,2 +np.float32,0x806e46cf,0x806e46cf,2 +np.float32,0x3f15a88a,0x3f1e546c,2 +np.float32,0xbd2de7d0,0xbd2df530,2 +np.float32,0xbf63cae0,0xbf818854,2 +np.float32,0xbdc3e1a0,0xbdc42e1e,2 +np.float32,0xbf11a038,0xbf199b98,2 +np.float32,0xbec13706,0xbec5d56b,2 +np.float32,0x3f1c5f54,0x3f26478d,2 +np.float32,0x3e9ea97e,0x3ea136b4,2 +np.float32,0xfeb5a508,0xff800000,2 +np.float32,0x7f4698f4,0x7f800000,2 +np.float32,0xff51ee2c,0xff800000,2 +np.float32,0xff5994df,0xff800000,2 +np.float32,0x4b9fb9,0x4b9fb9,2 +np.float32,0xfda10d98,0xff800000,2 +np.float32,0x525555,0x525555,2 +np.float32,0x7ed571ef,0x7f800000,2 +np.float32,0xbf600d18,0xbf7dc50c,2 +np.float32,0x3ec674ca,0x3ecb768b,2 +np.float32,0x3cb69115,0x3cb694f3,2 +np.float32,0x7eac75f2,0x7f800000,2 +np.float32,0x804d4d75,0x804d4d75,2 +np.float32,0xfed5292e,0xff800000,2 +np.float32,0x800ed06a,0x800ed06a,2 +np.float32,0xfec37584,0xff800000,2 +np.float32,0x3ef96ac7,0x3f01b326,2 +np.float32,0x42f743,0x42f743,2 +np.float32,0x3f56f442,0x3f711e39,2 +np.float32,0xbf7ea726,0xbf956375,2 +np.float32,0x806c7202,0x806c7202,2 +np.float32,0xbd8ee980,0xbd8f0733,2 +np.float32,0xbdf2e930,0xbdf37b18,2 +np.float32,0x3f103910,0x3f17f955,2 +np.float32,0xff123e8f,0xff800000,2 +np.float32,0x806e4b5d,0x806e4b5d,2 +np.float32,0xbf4f3bfc,0xbf669f07,2 +np.float32,0xbf070c16,0xbf0d6609,2 +np.float32,0xff00e0ba,0xff800000,2 +np.float32,0xff49d828,0xff800000,2 +np.float32,0x7e47f04a,0x7f800000,2 +np.float32,0x7e984dac,0x7f800000,2 +np.float32,0x3f77473c,0x3f8fc858,2 +np.float32,0x3f017439,0x3f070ac8,2 +np.float32,0x118417,0x118417,2 +np.float32,0xbcf7a2c0,0xbcf7ac68,2 +np.float32,0xfee46fee,0xff800000,2 +np.float32,0x3e42a648,0x3e43d2e9,2 +np.float32,0x80131916,0x80131916,2 +np.float32,0x806209d3,0x806209d3,2 +np.float32,0x807c1f12,0x807c1f12,2 +np.float32,0x2f3696,0x2f3696,2 +np.float32,0xff28722b,0xff800000,2 +np.float32,0x7f1416a1,0x7f800000,2 +np.float32,0x8054e7a1,0x8054e7a1,2 +np.float32,0xbddc39a0,0xbddca656,2 +np.float32,0x7dc60175,0x7f800000,2 +np.float64,0x7fd0ae584da15cb0,0x7ff0000000000000,1 +np.float64,0x7fd41d68e5283ad1,0x7ff0000000000000,1 +np.float64,0x7fe93073bb7260e6,0x7ff0000000000000,1 +np.float64,0x3fb4fd19d229fa34,0x3fb5031f57dbac0f,1 +np.float64,0x85609ce10ac2,0x85609ce10ac2,1 +np.float64,0xbfd7aa12ccaf5426,0xbfd8351003a320e2,1 +np.float64,0x8004487c9b4890fa,0x8004487c9b4890fa,1 +np.float64,0x7fe7584cfd2eb099,0x7ff0000000000000,1 +np.float64,0x800ea8edc6dd51dc,0x800ea8edc6dd51dc,1 +np.float64,0x3fe0924aa5a12495,0x3fe15276e271c6dc,1 +np.float64,0x3feb1abf6d36357f,0x3fee76b4d3d06964,1 +np.float64,0x3fa8c14534318280,0x3fa8c3bd5ce5923c,1 +np.float64,0x800b9f5915d73eb3,0x800b9f5915d73eb3,1 +np.float64,0xffc05aaa7820b554,0xfff0000000000000,1 +np.float64,0x800157eda8c2afdc,0x800157eda8c2afdc,1 +np.float64,0xffe8d90042b1b200,0xfff0000000000000,1 +np.float64,0x3feda02ea93b405d,0x3ff1057e61d08d59,1 +np.float64,0xffd03b7361a076e6,0xfff0000000000000,1 +np.float64,0x3fe1a8ecd7e351da,0x3fe291eda9080847,1 +np.float64,0xffc5bfdff82b7fc0,0xfff0000000000000,1 +np.float64,0xbfe6fb3d386df67a,0xbfe9022c05df0565,1 +np.float64,0x7fefffffffffffff,0x7ff0000000000000,1 +np.float64,0x7fa10c340c221867,0x7ff0000000000000,1 +np.float64,0x3fe55cbf1daab97e,0x3fe6fc1648258b75,1 +np.float64,0xbfddeb5f60bbd6be,0xbfdf056d4fb5825f,1 +np.float64,0xffddb1a8213b6350,0xfff0000000000000,1 +np.float64,0xbfb20545e4240a88,0xbfb2091579375176,1 +np.float64,0x3f735ded2026bbda,0x3f735df1dad4ee3a,1 +np.float64,0xbfd1eb91efa3d724,0xbfd227c044dead61,1 +np.float64,0xffd737c588ae6f8c,0xfff0000000000000,1 +np.float64,0x3fc46818ec28d032,0x3fc47e416c4237a6,1 +np.float64,0x0,0x0,1 +np.float64,0xffb632097a2c6410,0xfff0000000000000,1 +np.float64,0xbfcb5ae84b36b5d0,0xbfcb905613af55b8,1 +np.float64,0xbfe7b926402f724c,0xbfe9f4f0be6aacc3,1 +np.float64,0x80081840b3f03082,0x80081840b3f03082,1 +np.float64,0x3fe767a656eecf4d,0x3fe98c53b4779de7,1 +np.float64,0x8005834c088b0699,0x8005834c088b0699,1 +np.float64,0x80074e92658e9d26,0x80074e92658e9d26,1 +np.float64,0x80045d60c268bac2,0x80045d60c268bac2,1 +np.float64,0xffb9aecfe8335da0,0xfff0000000000000,1 +np.float64,0x7fcad3e1cd35a7c3,0x7ff0000000000000,1 +np.float64,0xbf881853d03030c0,0xbf8818783e28fc87,1 +np.float64,0xe18c6d23c318e,0xe18c6d23c318e,1 +np.float64,0x7fcb367b8f366cf6,0x7ff0000000000000,1 +np.float64,0x5c13436cb8269,0x5c13436cb8269,1 +np.float64,0xffe5399938aa7332,0xfff0000000000000,1 +np.float64,0xbfdc45dbc3b88bb8,0xbfdd33958222c27e,1 +np.float64,0xbfd714691bae28d2,0xbfd7954edbef810b,1 +np.float64,0xbfdf18b02b3e3160,0xbfe02ad13634c651,1 +np.float64,0x8003e6f276e7cde6,0x8003e6f276e7cde6,1 +np.float64,0x3febb6b412776d68,0x3fef4f753def31f9,1 +np.float64,0x7fe016a3b4a02d46,0x7ff0000000000000,1 +np.float64,0x3fdc899ac7b91336,0x3fdd7e1cee1cdfc8,1 +np.float64,0x800219271e24324f,0x800219271e24324f,1 +np.float64,0x1529d93e2a53c,0x1529d93e2a53c,1 +np.float64,0x800d5bc827fab790,0x800d5bc827fab790,1 +np.float64,0x3e1495107c293,0x3e1495107c293,1 +np.float64,0x3fe89da0f2b13b42,0x3feb1dc1f3015ad7,1 +np.float64,0x800ba8c17b975183,0x800ba8c17b975183,1 +np.float64,0x8002dacf0265b59f,0x8002dacf0265b59f,1 +np.float64,0xffe6d0a4cc2da149,0xfff0000000000000,1 +np.float64,0x3fdf23fe82be47fc,0x3fe03126d8e2b309,1 +np.float64,0xffe41b1f1c28363e,0xfff0000000000000,1 +np.float64,0xbfd635c634ac6b8c,0xbfd6a8966da6adaa,1 +np.float64,0x800755bc08eeab79,0x800755bc08eeab79,1 +np.float64,0x800ba4c47c374989,0x800ba4c47c374989,1 +np.float64,0x7fec9f7649793eec,0x7ff0000000000000,1 +np.float64,0x7fdbf45738b7e8ad,0x7ff0000000000000,1 +np.float64,0x3f5597f07eab4,0x3f5597f07eab4,1 +np.float64,0xbfbf4599183e8b30,0xbfbf5985d8c65097,1 +np.float64,0xbf5b200580364000,0xbf5b2006501b21ae,1 +np.float64,0x7f91868370230d06,0x7ff0000000000000,1 +np.float64,0x3838e2a67071d,0x3838e2a67071d,1 +np.float64,0xffefe3ff5d3fc7fe,0xfff0000000000000,1 +np.float64,0xffe66b26d06cd64d,0xfff0000000000000,1 +np.float64,0xbfd830a571b0614a,0xbfd8c526927c742c,1 +np.float64,0x7fe8442122f08841,0x7ff0000000000000,1 +np.float64,0x800efa8c637df519,0x800efa8c637df519,1 +np.float64,0xf0026835e004d,0xf0026835e004d,1 +np.float64,0xffb11beefe2237e0,0xfff0000000000000,1 +np.float64,0x3fef9bbb327f3776,0x3ff2809f10641c32,1 +np.float64,0x350595306a0b3,0x350595306a0b3,1 +np.float64,0xf7f6538befecb,0xf7f6538befecb,1 +np.float64,0xffe36379c4a6c6f3,0xfff0000000000000,1 +np.float64,0x28b1d82e5163c,0x28b1d82e5163c,1 +np.float64,0x70a3d804e147c,0x70a3d804e147c,1 +np.float64,0xffd96c1bc9b2d838,0xfff0000000000000,1 +np.float64,0xffce8e00893d1c00,0xfff0000000000000,1 +np.float64,0x800f2bdcb25e57b9,0x800f2bdcb25e57b9,1 +np.float64,0xbfe0d9c63361b38c,0xbfe1a3eb02192b76,1 +np.float64,0xbfdc7b8711b8f70e,0xbfdd6e9db3a01e51,1 +np.float64,0x99e22ec133c46,0x99e22ec133c46,1 +np.float64,0xffeaef6ddab5dedb,0xfff0000000000000,1 +np.float64,0x7fe89c22c0f13845,0x7ff0000000000000,1 +np.float64,0x8002d5207de5aa42,0x8002d5207de5aa42,1 +np.float64,0x3fd1b13353236267,0x3fd1eb1b9345dfca,1 +np.float64,0x800ccae0a41995c1,0x800ccae0a41995c1,1 +np.float64,0x3fdbdaba38b7b574,0x3fdcbdfcbca37ce6,1 +np.float64,0x5b06d12cb60db,0x5b06d12cb60db,1 +np.float64,0xffd52262752a44c4,0xfff0000000000000,1 +np.float64,0x5a17f050b42ff,0x5a17f050b42ff,1 +np.float64,0x3d24205e7a485,0x3d24205e7a485,1 +np.float64,0x7fbed4dec63da9bd,0x7ff0000000000000,1 +np.float64,0xbfe56e9776aadd2f,0xbfe71212863c284f,1 +np.float64,0x7fea0bc952341792,0x7ff0000000000000,1 +np.float64,0x800f692d139ed25a,0x800f692d139ed25a,1 +np.float64,0xffdb63feab36c7fe,0xfff0000000000000,1 +np.float64,0x3fe1c2297fe38452,0x3fe2af21293c9571,1 +np.float64,0x7fede384747bc708,0x7ff0000000000000,1 +np.float64,0x800440169288802e,0x800440169288802e,1 +np.float64,0xffe3241eeb26483e,0xfff0000000000000,1 +np.float64,0xffe28f3879651e70,0xfff0000000000000,1 +np.float64,0xa435cbc1486d,0xa435cbc1486d,1 +np.float64,0x7fe55e08db6abc11,0x7ff0000000000000,1 +np.float64,0x1405e624280be,0x1405e624280be,1 +np.float64,0x3fd861bdf0b0c37c,0x3fd8f9d2e33e45e5,1 +np.float64,0x3feeb67cdc3d6cfa,0x3ff1d337d81d1c14,1 +np.float64,0x3fd159a10e22b342,0x3fd1903be7c2ea0c,1 +np.float64,0x3fd84626bc308c4d,0x3fd8dc373645e65b,1 +np.float64,0xffd3da81d9a7b504,0xfff0000000000000,1 +np.float64,0xbfd4a768b8294ed2,0xbfd503aa7c240051,1 +np.float64,0x3fe3059f2a660b3e,0x3fe42983e0c6bb2e,1 +np.float64,0x3fe3b8353827706a,0x3fe4fdd635c7269b,1 +np.float64,0xbfe4af0399695e07,0xbfe6277d9002b46c,1 +np.float64,0xbfd7e18a92afc316,0xbfd87066b54c4fe6,1 +np.float64,0x800432bcab48657a,0x800432bcab48657a,1 +np.float64,0x80033d609d267ac2,0x80033d609d267ac2,1 +np.float64,0x7fef5f758e7ebeea,0x7ff0000000000000,1 +np.float64,0xbfed7833dbfaf068,0xbff0e85bf45a5ebc,1 +np.float64,0x3fe2283985a45073,0x3fe325b0a9099c74,1 +np.float64,0xe820b4b3d0417,0xe820b4b3d0417,1 +np.float64,0x8003ecb72aa7d96f,0x8003ecb72aa7d96f,1 +np.float64,0xbfeab2c755b5658f,0xbfede7c83e92a625,1 +np.float64,0xbfc7b287f72f6510,0xbfc7d53ef2ffe9dc,1 +np.float64,0xffd9a41d0f33483a,0xfff0000000000000,1 +np.float64,0x3fd3a5b6e3a74b6c,0x3fd3f516f39a4725,1 +np.float64,0x800bc72091578e42,0x800bc72091578e42,1 +np.float64,0x800ff405ce9fe80c,0x800ff405ce9fe80c,1 +np.float64,0x57918600af24,0x57918600af24,1 +np.float64,0x2a5be7fa54b7e,0x2a5be7fa54b7e,1 +np.float64,0xbfdca7886bb94f10,0xbfdd9f142b5b43e4,1 +np.float64,0xbfe216993ee42d32,0xbfe3112936590995,1 +np.float64,0xbfe06bd9cf20d7b4,0xbfe126cd353ab42f,1 +np.float64,0x8003e6c31827cd87,0x8003e6c31827cd87,1 +np.float64,0x8005f37d810be6fc,0x8005f37d810be6fc,1 +np.float64,0x800715b081ae2b62,0x800715b081ae2b62,1 +np.float64,0x3fef94c35bff2986,0x3ff27b4bed2f4051,1 +np.float64,0x6f5798e0deb0,0x6f5798e0deb0,1 +np.float64,0x3fcef1f05c3de3e1,0x3fcf3f557550598f,1 +np.float64,0xbf9a91c400352380,0xbf9a92876273b85c,1 +np.float64,0x3fc9143f7f322880,0x3fc93d678c05d26b,1 +np.float64,0x78ad847af15b1,0x78ad847af15b1,1 +np.float64,0x8000fdc088c1fb82,0x8000fdc088c1fb82,1 +np.float64,0x800200fd304401fb,0x800200fd304401fb,1 +np.float64,0x7fb8ab09dc315613,0x7ff0000000000000,1 +np.float64,0x3fe949771b7292ee,0x3fec00891c3fc5a2,1 +np.float64,0xbfc54cae0e2a995c,0xbfc565e0f3d0e3af,1 +np.float64,0xffd546161e2a8c2c,0xfff0000000000000,1 +np.float64,0x800fe1d1279fc3a2,0x800fe1d1279fc3a2,1 +np.float64,0x3fd9c45301b388a8,0x3fda77fa1f4c79bf,1 +np.float64,0x7fe10ff238221fe3,0x7ff0000000000000,1 +np.float64,0xbfbc2181ae384300,0xbfbc3002229155c4,1 +np.float64,0xbfe7bbfae4ef77f6,0xbfe9f895e91f468d,1 +np.float64,0x800d3d994f7a7b33,0x800d3d994f7a7b33,1 +np.float64,0xffe6e15a896dc2b4,0xfff0000000000000,1 +np.float64,0x800e6b6c8abcd6d9,0x800e6b6c8abcd6d9,1 +np.float64,0xbfd862c938b0c592,0xbfd8faf1cdcb09db,1 +np.float64,0xffe2411f8464823e,0xfff0000000000000,1 +np.float64,0xffd0b32efaa1665e,0xfff0000000000000,1 +np.float64,0x3ac4ace475896,0x3ac4ace475896,1 +np.float64,0xf9c3a7ebf3875,0xf9c3a7ebf3875,1 +np.float64,0xdb998ba5b7332,0xdb998ba5b7332,1 +np.float64,0xbfe438a14fe87142,0xbfe5981751e4c5cd,1 +np.float64,0xbfbcf48cbc39e918,0xbfbd045d60e65d3a,1 +np.float64,0x7fde499615bc932b,0x7ff0000000000000,1 +np.float64,0x800bba269057744e,0x800bba269057744e,1 +np.float64,0x3fc9bb1ba3337638,0x3fc9e78fdb6799c1,1 +np.float64,0xffd9f974fbb3f2ea,0xfff0000000000000,1 +np.float64,0x7fcf1ad1693e35a2,0x7ff0000000000000,1 +np.float64,0x7fe5dcedd32bb9db,0x7ff0000000000000,1 +np.float64,0xeb06500bd60ca,0xeb06500bd60ca,1 +np.float64,0x7fd73e7b592e7cf6,0x7ff0000000000000,1 +np.float64,0xbfe9d91ae873b236,0xbfecc08482849bcd,1 +np.float64,0xffc85338b730a670,0xfff0000000000000,1 +np.float64,0x7fbba41eee37483d,0x7ff0000000000000,1 +np.float64,0x3fed5624fb7aac4a,0x3ff0cf9f0de1fd54,1 +np.float64,0xffe566d80d6acdb0,0xfff0000000000000,1 +np.float64,0x3fd4477884a88ef1,0x3fd49ec7acdd25a0,1 +np.float64,0x3fcb98c5fd37318c,0x3fcbcfa20e2c2712,1 +np.float64,0xffdeba71d5bd74e4,0xfff0000000000000,1 +np.float64,0x8001edc59dc3db8c,0x8001edc59dc3db8c,1 +np.float64,0x3fe6b09e896d613e,0x3fe8a3bb541ec0e3,1 +np.float64,0x3fe8694b4970d296,0x3fead94d271d05cf,1 +np.float64,0xb52c27bf6a585,0xb52c27bf6a585,1 +np.float64,0x7fcb0a21d9361443,0x7ff0000000000000,1 +np.float64,0xbfd9efc68cb3df8e,0xbfdaa7058c0ccbd1,1 +np.float64,0x8007cd170fef9a2f,0x8007cd170fef9a2f,1 +np.float64,0x3fe83325e770664c,0x3fea92c55c9d567e,1 +np.float64,0x800bd0085537a011,0x800bd0085537a011,1 +np.float64,0xffe05b9e7820b73c,0xfff0000000000000,1 +np.float64,0x3fea4ce4347499c8,0x3fed5cea9fdc541b,1 +np.float64,0x7fe08aae1921155b,0x7ff0000000000000,1 +np.float64,0x3fe7a5e7deef4bd0,0x3fe9dc2e20cfb61c,1 +np.float64,0xbfe0ccc8e6e19992,0xbfe195175f32ee3f,1 +np.float64,0xbfe8649717f0c92e,0xbfead3298974dcf0,1 +np.float64,0x7fed6c5308bad8a5,0x7ff0000000000000,1 +np.float64,0xffdbd8c7af37b190,0xfff0000000000000,1 +np.float64,0xbfb2bc4d06257898,0xbfb2c09569912839,1 +np.float64,0x3fc62eca512c5d95,0x3fc64b4251bce8f9,1 +np.float64,0xbfcae2ddbd35c5bc,0xbfcb15971fc61312,1 +np.float64,0x18d26ce831a4f,0x18d26ce831a4f,1 +np.float64,0x7fe38b279267164e,0x7ff0000000000000,1 +np.float64,0x97e1d9ab2fc3b,0x97e1d9ab2fc3b,1 +np.float64,0xbfee8e4785fd1c8f,0xbff1b52d16807627,1 +np.float64,0xbfb189b4a6231368,0xbfb18d37e83860ee,1 +np.float64,0xffd435761ea86aec,0xfff0000000000000,1 +np.float64,0x3fe6c48ebced891e,0x3fe8bcea189c3867,1 +np.float64,0x7fdadd3678b5ba6c,0x7ff0000000000000,1 +np.float64,0x7fea8f15b7b51e2a,0x7ff0000000000000,1 +np.float64,0xbff0000000000000,0xbff2cd9fc44eb982,1 +np.float64,0x80004c071120980f,0x80004c071120980f,1 +np.float64,0x8005367adfea6cf6,0x8005367adfea6cf6,1 +np.float64,0x3fbdc9139a3b9220,0x3fbdda4aba667ce5,1 +np.float64,0x7fed5ee3ad7abdc6,0x7ff0000000000000,1 +np.float64,0x51563fb2a2ac9,0x51563fb2a2ac9,1 +np.float64,0xbfba7d26ce34fa50,0xbfba894229c50ea1,1 +np.float64,0x6c10db36d821c,0x6c10db36d821c,1 +np.float64,0xbfbdaec0d03b5d80,0xbfbdbfca6ede64f4,1 +np.float64,0x800a1cbe7414397d,0x800a1cbe7414397d,1 +np.float64,0x800ae6e7f2d5cdd0,0x800ae6e7f2d5cdd0,1 +np.float64,0x3fea63d3fef4c7a8,0x3fed7c1356688ddc,1 +np.float64,0xbfde1e3a88bc3c76,0xbfdf3dfb09cc2260,1 +np.float64,0xbfd082d75a2105ae,0xbfd0b1e28c84877b,1 +np.float64,0x7fea1e5e85f43cbc,0x7ff0000000000000,1 +np.float64,0xffe2237a1a6446f4,0xfff0000000000000,1 +np.float64,0x3fd1e2be8523c57d,0x3fd21e93dfd1bbc4,1 +np.float64,0x3fd1acd428a359a8,0x3fd1e6916a42bc3a,1 +np.float64,0x61a152f0c342b,0x61a152f0c342b,1 +np.float64,0xbfc61a6b902c34d8,0xbfc6369557690ba0,1 +np.float64,0x7fd1a84b1f235095,0x7ff0000000000000,1 +np.float64,0x1c5cc7e638b9a,0x1c5cc7e638b9a,1 +np.float64,0x8008039755f0072f,0x8008039755f0072f,1 +np.float64,0x80097532d6f2ea66,0x80097532d6f2ea66,1 +np.float64,0xbfc6d979a12db2f4,0xbfc6f89777c53f8f,1 +np.float64,0x8004293ab1085276,0x8004293ab1085276,1 +np.float64,0x3fc2af5c21255eb8,0x3fc2c05dc0652554,1 +np.float64,0xbfd9a5ab87b34b58,0xbfda56d1076abc98,1 +np.float64,0xbfebd360ba77a6c2,0xbfef779fd6595f9b,1 +np.float64,0xffd5313c43aa6278,0xfff0000000000000,1 +np.float64,0xbfe994a262b32945,0xbfec64b969852ed5,1 +np.float64,0x3fce01a52e3c034a,0x3fce48324eb29c31,1 +np.float64,0x56bd74b2ad7af,0x56bd74b2ad7af,1 +np.float64,0xb84093ff70813,0xb84093ff70813,1 +np.float64,0x7fe776df946eedbe,0x7ff0000000000000,1 +np.float64,0xbfe294ac2e652958,0xbfe3a480938afa26,1 +np.float64,0x7fe741b4d0ee8369,0x7ff0000000000000,1 +np.float64,0x800b7e8a1056fd15,0x800b7e8a1056fd15,1 +np.float64,0x7fd28f1269251e24,0x7ff0000000000000,1 +np.float64,0x8009d4492e73a893,0x8009d4492e73a893,1 +np.float64,0x3fe3f27fca67e500,0x3fe543aff825e244,1 +np.float64,0x3fd12447e5a24890,0x3fd158efe43c0452,1 +np.float64,0xbfd58df0f2ab1be2,0xbfd5f6d908e3ebce,1 +np.float64,0xffc0a8e4642151c8,0xfff0000000000000,1 +np.float64,0xbfedb197787b632f,0xbff112367ec9d3e7,1 +np.float64,0xffdde07a7f3bc0f4,0xfff0000000000000,1 +np.float64,0x3fe91f3e5b723e7d,0x3febc886a1d48364,1 +np.float64,0x3fe50415236a082a,0x3fe68f43a5468d8c,1 +np.float64,0xd9a0c875b3419,0xd9a0c875b3419,1 +np.float64,0xbfee04ccf4bc099a,0xbff14f4740a114cf,1 +np.float64,0xbfd2bcc6a125798e,0xbfd30198b1e7d7ed,1 +np.float64,0xbfeb3c16f8f6782e,0xbfeea4ce47d09f58,1 +np.float64,0xffd3ba19e4a77434,0xfff0000000000000,1 +np.float64,0x8010000000000000,0x8010000000000000,1 +np.float64,0x3fdef0a642bde14d,0x3fe0146677b3a488,1 +np.float64,0x3fdc3dd0a2b87ba0,0x3fdd2abe65651487,1 +np.float64,0x3fdbb1fd47b763fb,0x3fdc915a2fd19f4b,1 +np.float64,0x7fbaa375e63546eb,0x7ff0000000000000,1 +np.float64,0x433ef8ee867e0,0x433ef8ee867e0,1 +np.float64,0xf5345475ea68b,0xf5345475ea68b,1 +np.float64,0xa126419b424c8,0xa126419b424c8,1 +np.float64,0x3fe0057248200ae5,0x3fe0b2f488339709,1 +np.float64,0xffc5e3b82f2bc770,0xfff0000000000000,1 +np.float64,0xffb215c910242b90,0xfff0000000000000,1 +np.float64,0xbfeba4ae0837495c,0xbfef3642e4b54aac,1 +np.float64,0xffbb187ebe363100,0xfff0000000000000,1 +np.float64,0x3fe4c6a496a98d49,0x3fe64440cdf06aab,1 +np.float64,0x800767a28f6ecf46,0x800767a28f6ecf46,1 +np.float64,0x3fdbed63b1b7dac8,0x3fdcd27318c0b683,1 +np.float64,0x80006d8339e0db07,0x80006d8339e0db07,1 +np.float64,0x8000b504f0416a0b,0x8000b504f0416a0b,1 +np.float64,0xbfe88055bfb100ac,0xbfeaf767bd2767b9,1 +np.float64,0x3fefe503317fca06,0x3ff2b8d4057240c8,1 +np.float64,0x7fe307538b660ea6,0x7ff0000000000000,1 +np.float64,0x944963c12892d,0x944963c12892d,1 +np.float64,0xbfd2c20b38a58416,0xbfd30717900f8233,1 +np.float64,0x7feed04e3e3da09b,0x7ff0000000000000,1 +np.float64,0x3fe639619cac72c3,0x3fe80de7b8560a8d,1 +np.float64,0x3fde066c66bc0cd9,0x3fdf237fb759a652,1 +np.float64,0xbfc56b22b52ad644,0xbfc584c267a47ebd,1 +np.float64,0x3fc710d5b12e21ab,0x3fc730d817ba0d0c,1 +np.float64,0x3fee1dfc347c3bf8,0x3ff161d9c3e15f68,1 +np.float64,0x3fde400954bc8013,0x3fdf639e5cc9e7a9,1 +np.float64,0x56e701f8adce1,0x56e701f8adce1,1 +np.float64,0xbfe33bbc89e67779,0xbfe46996b39381fe,1 +np.float64,0x7fec89e2f87913c5,0x7ff0000000000000,1 +np.float64,0xbfdad58b40b5ab16,0xbfdba098cc0ad5d3,1 +np.float64,0x3fe99c76a13338ed,0x3fec6f31bae613e7,1 +np.float64,0x3fe4242a29a84854,0x3fe57f6b45e5c0ef,1 +np.float64,0xbfe79d3199ef3a63,0xbfe9d0fb96c846ba,1 +np.float64,0x7ff8000000000000,0x7ff8000000000000,1 +np.float64,0xbfeb35a6cf766b4e,0xbfee9be4e7e943f7,1 +np.float64,0x3e047f267c091,0x3e047f267c091,1 +np.float64,0x4bf1376a97e28,0x4bf1376a97e28,1 +np.float64,0x800ef419685de833,0x800ef419685de833,1 +np.float64,0x3fe0efa61a21df4c,0x3fe1bce98baf2f0f,1 +np.float64,0x3fcc13c4d738278a,0x3fcc4d8c778bcaf7,1 +np.float64,0x800f1d291afe3a52,0x800f1d291afe3a52,1 +np.float64,0x3fd3f10e6da7e21d,0x3fd444106761ea1d,1 +np.float64,0x800706d6d76e0dae,0x800706d6d76e0dae,1 +np.float64,0xffa1ffbc9023ff80,0xfff0000000000000,1 +np.float64,0xbfe098f26d6131e5,0xbfe15a08a5f3eac0,1 +np.float64,0x3fe984f9cc7309f4,0x3fec4fcdbdb1cb9b,1 +np.float64,0x7fd7c2f1eaaf85e3,0x7ff0000000000000,1 +np.float64,0x800a8adb64f515b7,0x800a8adb64f515b7,1 +np.float64,0x80060d3ffc8c1a81,0x80060d3ffc8c1a81,1 +np.float64,0xbfec37e4aef86fc9,0xbff0029a6a1d61e2,1 +np.float64,0x800b21bcfcf6437a,0x800b21bcfcf6437a,1 +np.float64,0xbfc08facc1211f58,0xbfc09b8380ea8032,1 +np.float64,0xffebb4b52577696a,0xfff0000000000000,1 +np.float64,0x800b08096df61013,0x800b08096df61013,1 +np.float64,0x8000000000000000,0x8000000000000000,1 +np.float64,0xffd2f0c9c8a5e194,0xfff0000000000000,1 +np.float64,0xffe78b2299af1644,0xfff0000000000000,1 +np.float64,0x7fd0444794a0888e,0x7ff0000000000000,1 +np.float64,0x307c47b460f8a,0x307c47b460f8a,1 +np.float64,0xffe6b4c851ad6990,0xfff0000000000000,1 +np.float64,0xffe1877224a30ee4,0xfff0000000000000,1 +np.float64,0x48d7b5c091af7,0x48d7b5c091af7,1 +np.float64,0xbfa1dc6b1c23b8d0,0xbfa1dd5889e1b7da,1 +np.float64,0x3fe5004737ea008e,0x3fe68a9c310b08c1,1 +np.float64,0x7fec5f0742b8be0e,0x7ff0000000000000,1 +np.float64,0x3fd0a86285a150c5,0x3fd0d8b238d557fa,1 +np.float64,0x7fed60380efac06f,0x7ff0000000000000,1 +np.float64,0xeeca74dfdd94f,0xeeca74dfdd94f,1 +np.float64,0x3fda05aaa8b40b54,0x3fdabebdbf405e84,1 +np.float64,0x800e530ceb1ca61a,0x800e530ceb1ca61a,1 +np.float64,0x800b3866379670cd,0x800b3866379670cd,1 +np.float64,0xffedb3e7fa3b67cf,0xfff0000000000000,1 +np.float64,0xffdfa4c0713f4980,0xfff0000000000000,1 +np.float64,0x7fe4679e0728cf3b,0x7ff0000000000000,1 +np.float64,0xffe978611ef2f0c2,0xfff0000000000000,1 +np.float64,0x7fc9f4601f33e8bf,0x7ff0000000000000,1 +np.float64,0x3fd4942de6a9285c,0x3fd4ef6e089357dd,1 +np.float64,0x3faafe064435fc00,0x3fab0139cd6564dc,1 +np.float64,0x800d145a519a28b5,0x800d145a519a28b5,1 +np.float64,0xbfd82636f2304c6e,0xbfd8b9f75ddd2f02,1 +np.float64,0xbfdf2e975e3e5d2e,0xbfe037174280788c,1 +np.float64,0x7fd7051d7c2e0a3a,0x7ff0000000000000,1 +np.float64,0x8007933d452f267b,0x8007933d452f267b,1 +np.float64,0xb2043beb64088,0xb2043beb64088,1 +np.float64,0x3febfd9708f7fb2e,0x3fefb2ef090f18d2,1 +np.float64,0xffd9bc6bc83378d8,0xfff0000000000000,1 +np.float64,0xc10f9fd3821f4,0xc10f9fd3821f4,1 +np.float64,0x3fe3c83413a79068,0x3fe510fa1dd8edf7,1 +np.float64,0x3fbe26ccda3c4da0,0x3fbe38a892279975,1 +np.float64,0x3fcc1873103830e6,0x3fcc5257a6ae168d,1 +np.float64,0xe7e000e9cfc00,0xe7e000e9cfc00,1 +np.float64,0xffda73852bb4e70a,0xfff0000000000000,1 +np.float64,0xbfe831be19f0637c,0xbfea90f1b34da3e5,1 +np.float64,0xbfeb568f3076ad1e,0xbfeec97eebfde862,1 +np.float64,0x510a6ad0a214e,0x510a6ad0a214e,1 +np.float64,0x3fe6ba7e35ed74fc,0x3fe8b032a9a28c6a,1 +np.float64,0xffeb5cdcff76b9b9,0xfff0000000000000,1 +np.float64,0x4f0a23e89e145,0x4f0a23e89e145,1 +np.float64,0x446ec20288dd9,0x446ec20288dd9,1 +np.float64,0x7fe2521b02e4a435,0x7ff0000000000000,1 +np.float64,0x8001cd2969e39a54,0x8001cd2969e39a54,1 +np.float64,0x3fdfe90600bfd20c,0x3fe09fdcca10001c,1 +np.float64,0x7fd660c5762cc18a,0x7ff0000000000000,1 +np.float64,0xbfb11b23aa223648,0xbfb11e661949b377,1 +np.float64,0x800e025285fc04a5,0x800e025285fc04a5,1 +np.float64,0xffb180bb18230178,0xfff0000000000000,1 +np.float64,0xaaf590df55eb2,0xaaf590df55eb2,1 +np.float64,0xbfe8637d9df0c6fb,0xbfead1ba429462ec,1 +np.float64,0x7fd2577866a4aef0,0x7ff0000000000000,1 +np.float64,0xbfcfb2ab5a3f6558,0xbfd002ee87f272b9,1 +np.float64,0x7fdd64ae2f3ac95b,0x7ff0000000000000,1 +np.float64,0xffd1a502c9234a06,0xfff0000000000000,1 +np.float64,0x7fc4be4b60297c96,0x7ff0000000000000,1 +np.float64,0xbfb46b712a28d6e0,0xbfb470fca9919172,1 +np.float64,0xffdef913033df226,0xfff0000000000000,1 +np.float64,0x3fd94a3545b2946b,0x3fd9f40431ce9f9c,1 +np.float64,0x7fef88a0b6ff1140,0x7ff0000000000000,1 +np.float64,0xbfbcc81876399030,0xbfbcd7a0ab6cb388,1 +np.float64,0x800a4acfdd9495a0,0x800a4acfdd9495a0,1 +np.float64,0xffe270b3d5e4e167,0xfff0000000000000,1 +np.float64,0xbfd23f601e247ec0,0xbfd27eeca50a49eb,1 +np.float64,0x7fec6e796a78dcf2,0x7ff0000000000000,1 +np.float64,0x3fb85e0c9630bc19,0x3fb867791ccd6c72,1 +np.float64,0x7fe49fc424a93f87,0x7ff0000000000000,1 +np.float64,0xbfe75a99fbaeb534,0xbfe97ba37663de4c,1 +np.float64,0xffe85011b630a023,0xfff0000000000000,1 +np.float64,0xffe5962e492b2c5c,0xfff0000000000000,1 +np.float64,0x6f36ed4cde6de,0x6f36ed4cde6de,1 +np.float64,0x3feb72170af6e42e,0x3feeefbe6f1a2084,1 +np.float64,0x80014d8d60629b1c,0x80014d8d60629b1c,1 +np.float64,0xbfe0eb40d321d682,0xbfe1b7e31f252bf1,1 +np.float64,0x31fe305663fc7,0x31fe305663fc7,1 +np.float64,0x3fd2cd6381a59ac7,0x3fd312edc9868a4d,1 +np.float64,0xffcf0720793e0e40,0xfff0000000000000,1 +np.float64,0xbfeef1ef133de3de,0xbff1ffd5e1a3b648,1 +np.float64,0xbfd01c787aa038f0,0xbfd0482be3158a01,1 +np.float64,0x3fda3607c5b46c10,0x3fdaf3301e217301,1 +np.float64,0xffda9a9911b53532,0xfff0000000000000,1 +np.float64,0x3fc0b37c392166f8,0x3fc0bfa076f3c43e,1 +np.float64,0xbfe06591c760cb24,0xbfe11fad179ea12c,1 +np.float64,0x8006e369c20dc6d4,0x8006e369c20dc6d4,1 +np.float64,0x3fdf2912a8be5224,0x3fe033ff74b92f4d,1 +np.float64,0xffc0feb07821fd60,0xfff0000000000000,1 +np.float64,0xa4b938c949727,0xa4b938c949727,1 +np.float64,0x8008fe676571fccf,0x8008fe676571fccf,1 +np.float64,0xbfdda68459bb4d08,0xbfdeb8faab34fcbc,1 +np.float64,0xbfda18b419343168,0xbfdad360ca52ec7c,1 +np.float64,0x3febcbae35b7975c,0x3fef6cd51c9ebc15,1 +np.float64,0x3fbec615f63d8c30,0x3fbed912ba729926,1 +np.float64,0x7f99a831c8335063,0x7ff0000000000000,1 +np.float64,0x3fe663e8826cc7d1,0x3fe84330bd9aada8,1 +np.float64,0x70a9f9e6e1540,0x70a9f9e6e1540,1 +np.float64,0x8a13a5db14275,0x8a13a5db14275,1 +np.float64,0x7fc4330a3b286613,0x7ff0000000000000,1 +np.float64,0xbfe580c6136b018c,0xbfe728806cc7a99a,1 +np.float64,0x8000000000000001,0x8000000000000001,1 +np.float64,0xffec079d5df80f3a,0xfff0000000000000,1 +np.float64,0x8e1173c31c22f,0x8e1173c31c22f,1 +np.float64,0x3fe088456d21108b,0x3fe14712ca414103,1 +np.float64,0x3fe1b76f73636edf,0x3fe2a2b658557112,1 +np.float64,0xbfd4a1dd162943ba,0xbfd4fdd45cae8fb8,1 +np.float64,0x7fd60b46c8ac168d,0x7ff0000000000000,1 +np.float64,0xffe36cc3b166d987,0xfff0000000000000,1 +np.float64,0x3fdc2ae0cfb855c0,0x3fdd15f026773151,1 +np.float64,0xbfc41aa203283544,0xbfc42fd1b145fdd5,1 +np.float64,0xffed90c55fbb218a,0xfff0000000000000,1 +np.float64,0x3fe67e3a9aecfc75,0x3fe86440db65b4f6,1 +np.float64,0x7fd12dbeaba25b7c,0x7ff0000000000000,1 +np.float64,0xbfe1267c0de24cf8,0xbfe1fbb611bdf1e9,1 +np.float64,0x22e5619645cad,0x22e5619645cad,1 +np.float64,0x7fe327c72ea64f8d,0x7ff0000000000000,1 +np.float64,0x7fd2c3f545a587ea,0x7ff0000000000000,1 +np.float64,0x7fc7b689372f6d11,0x7ff0000000000000,1 +np.float64,0xc5e140bd8bc28,0xc5e140bd8bc28,1 +np.float64,0x3fccb3627a3966c5,0x3fccf11b44fa4102,1 +np.float64,0xbfd2cf725c259ee4,0xbfd315138d0e5dca,1 +np.float64,0x10000000000000,0x10000000000000,1 +np.float64,0xbfd3dfa8b627bf52,0xbfd431d17b235477,1 +np.float64,0xbfb82124e6304248,0xbfb82a4b6d9c2663,1 +np.float64,0x3fdcd590d9b9ab22,0x3fddd1d548806347,1 +np.float64,0x7fdee0cd1b3dc199,0x7ff0000000000000,1 +np.float64,0x8004ebfc60a9d7fa,0x8004ebfc60a9d7fa,1 +np.float64,0x3fe8eb818b71d704,0x3feb842679806108,1 +np.float64,0xffdd5e8fe63abd20,0xfff0000000000000,1 +np.float64,0xbfe3efcbd9e7df98,0xbfe54071436645ee,1 +np.float64,0x3fd5102557aa204b,0x3fd57203d31a05b8,1 +np.float64,0x3fe6318af7ec6316,0x3fe8041a177cbf96,1 +np.float64,0x3fdf3cecdabe79da,0x3fe03f2084ffbc78,1 +np.float64,0x7fe0ab6673a156cc,0x7ff0000000000000,1 +np.float64,0x800037d5c6c06fac,0x800037d5c6c06fac,1 +np.float64,0xffce58b86a3cb170,0xfff0000000000000,1 +np.float64,0xbfe3455d6ce68abb,0xbfe475034cecb2b8,1 +np.float64,0x991b663d3236d,0x991b663d3236d,1 +np.float64,0x3fda82d37c3505a7,0x3fdb46973da05c12,1 +np.float64,0x3f9b736fa036e6df,0x3f9b74471c234411,1 +np.float64,0x8001c96525e392cb,0x8001c96525e392cb,1 +np.float64,0x7ff0000000000000,0x7ff0000000000000,1 +np.float64,0xbfaf59122c3eb220,0xbfaf5e15f8b272b0,1 +np.float64,0xbf9aa7d288354fa0,0xbf9aa897d2a40cb5,1 +np.float64,0x8004a43428694869,0x8004a43428694869,1 +np.float64,0x7feead476dbd5a8e,0x7ff0000000000000,1 +np.float64,0xffca150f81342a20,0xfff0000000000000,1 +np.float64,0x80047ec3bc88fd88,0x80047ec3bc88fd88,1 +np.float64,0xbfee3e5b123c7cb6,0xbff179c8b8334278,1 +np.float64,0x3fd172359f22e46b,0x3fd1a9ba6b1420a1,1 +np.float64,0x3fe8e5e242f1cbc5,0x3feb7cbcaefc4d5c,1 +np.float64,0x8007fb059a6ff60c,0x8007fb059a6ff60c,1 +np.float64,0xe3899e71c7134,0xe3899e71c7134,1 +np.float64,0x7fe3b98326a77305,0x7ff0000000000000,1 +np.float64,0x7fec4e206cb89c40,0x7ff0000000000000,1 +np.float64,0xbfa3b012c4276020,0xbfa3b150c13b3cc5,1 +np.float64,0xffefffffffffffff,0xfff0000000000000,1 +np.float64,0xffe28a5b9aa514b6,0xfff0000000000000,1 +np.float64,0xbfd76a6cc2aed4da,0xbfd7f10f4d04e7f6,1 +np.float64,0xbc2b1c0178564,0xbc2b1c0178564,1 +np.float64,0x6d9d444adb3a9,0x6d9d444adb3a9,1 +np.float64,0xbfdcadd368395ba6,0xbfdda6037b5c429c,1 +np.float64,0x3fe11891fde23124,0x3fe1ebc1c204b14b,1 +np.float64,0x3fdd66c3eebacd88,0x3fde72526b5304c4,1 +np.float64,0xbfe79d85612f3b0b,0xbfe9d1673bd1f6d6,1 +np.float64,0x3fed60abdabac158,0x3ff0d7426b3800a2,1 +np.float64,0xbfb0ffa54021ff48,0xbfb102d81073a9f0,1 +np.float64,0xd2452af5a48a6,0xd2452af5a48a6,1 +np.float64,0xf4b835c1e971,0xf4b835c1e971,1 +np.float64,0x7e269cdafc4d4,0x7e269cdafc4d4,1 +np.float64,0x800097a21d812f45,0x800097a21d812f45,1 +np.float64,0x3fdfcc85e8bf990c,0x3fe08fcf770fd456,1 +np.float64,0xd8d53155b1aa6,0xd8d53155b1aa6,1 +np.float64,0x7fb8ed658831daca,0x7ff0000000000000,1 +np.float64,0xbfec865415b90ca8,0xbff03a4584d719f9,1 +np.float64,0xffd8cda62a319b4c,0xfff0000000000000,1 +np.float64,0x273598d84e6b4,0x273598d84e6b4,1 +np.float64,0x7fd566b5c32acd6b,0x7ff0000000000000,1 +np.float64,0xff61d9d48023b400,0xfff0000000000000,1 +np.float64,0xbfec5c3bf4f8b878,0xbff01c594243337c,1 +np.float64,0x7fd1be0561a37c0a,0x7ff0000000000000,1 +np.float64,0xffeaee3271b5dc64,0xfff0000000000000,1 +np.float64,0x800c0e1931b81c33,0x800c0e1931b81c33,1 +np.float64,0xbfad1171583a22e0,0xbfad1570e5c466d2,1 +np.float64,0x7fd783b0fe2f0761,0x7ff0000000000000,1 +np.float64,0x7fc39903e6273207,0x7ff0000000000000,1 +np.float64,0xffe00003c5600007,0xfff0000000000000,1 +np.float64,0x35a7b9c06b50,0x35a7b9c06b50,1 +np.float64,0x7fee441a22bc8833,0x7ff0000000000000,1 +np.float64,0xff6e47fbc03c9000,0xfff0000000000000,1 +np.float64,0xbfd3c3c9c8a78794,0xbfd41499b1912534,1 +np.float64,0x82c9c87f05939,0x82c9c87f05939,1 +np.float64,0xbfedeb0fe4fbd620,0xbff13c573ce9d3d0,1 +np.float64,0x2b79298656f26,0x2b79298656f26,1 +np.float64,0xbf5ee44f003dc800,0xbf5ee4503353c0ba,1 +np.float64,0xbfe1dd264e63ba4c,0xbfe2ce68116c7bf6,1 +np.float64,0x3fece10b7579c217,0x3ff07b21b11799c6,1 +np.float64,0x3fba47143a348e28,0x3fba52e601adf24c,1 +np.float64,0xffe9816e7a7302dc,0xfff0000000000000,1 +np.float64,0x8009a8047fd35009,0x8009a8047fd35009,1 +np.float64,0x800ac28e4e95851d,0x800ac28e4e95851d,1 +np.float64,0x80093facf4f27f5a,0x80093facf4f27f5a,1 +np.float64,0x3ff0000000000000,0x3ff2cd9fc44eb982,1 +np.float64,0x3fe76a9857eed530,0x3fe99018a5895a4f,1 +np.float64,0xbfd13c59a3a278b4,0xbfd171e133df0b16,1 +np.float64,0x7feb43bc83368778,0x7ff0000000000000,1 +np.float64,0xbfe2970c5fa52e18,0xbfe3a74a434c6efe,1 +np.float64,0xffd091c380212388,0xfff0000000000000,1 +np.float64,0x3febb3b9d2f76774,0x3fef4b4af2bd8580,1 +np.float64,0x7fec66787ef8ccf0,0x7ff0000000000000,1 +np.float64,0xbf935e185826bc40,0xbf935e640557a354,1 +np.float64,0x979df1552f3be,0x979df1552f3be,1 +np.float64,0x7fc096ee73212ddc,0x7ff0000000000000,1 +np.float64,0xbfe9de88faf3bd12,0xbfecc7d1ae691d1b,1 +np.float64,0x7fdc733f06b8e67d,0x7ff0000000000000,1 +np.float64,0xffd71be1a0ae37c4,0xfff0000000000000,1 +np.float64,0xb50dabd36a1b6,0xb50dabd36a1b6,1 +np.float64,0x7fce3d94d63c7b29,0x7ff0000000000000,1 +np.float64,0x7fbaf95e4435f2bc,0x7ff0000000000000,1 +np.float64,0x81a32a6f03466,0x81a32a6f03466,1 +np.float64,0xa99b5b4d5336c,0xa99b5b4d5336c,1 +np.float64,0x7f97c1eeb82f83dc,0x7ff0000000000000,1 +np.float64,0x3fe761636d6ec2c6,0x3fe98451160d2ffb,1 +np.float64,0xbfe3224ef5e6449e,0xbfe44b73eeadac52,1 +np.float64,0x7fde6feb0dbcdfd5,0x7ff0000000000000,1 +np.float64,0xbfee87f9ca7d0ff4,0xbff1b079e9d7f706,1 +np.float64,0x3fe46f4c9828de99,0x3fe5da2ab9609ea5,1 +np.float64,0xffb92fe882325fd0,0xfff0000000000000,1 +np.float64,0x80054bc63cea978d,0x80054bc63cea978d,1 +np.float64,0x3d988bea7b312,0x3d988bea7b312,1 +np.float64,0x3fe6468e1d6c8d1c,0x3fe81e64d37d39a8,1 +np.float64,0x3fd68eefc22d1de0,0x3fd7074264faeead,1 +np.float64,0xffb218a074243140,0xfff0000000000000,1 +np.float64,0x3fdbcb3b6cb79678,0x3fdcad011de40b7d,1 +np.float64,0x7fe3c161772782c2,0x7ff0000000000000,1 +np.float64,0x25575c904aaec,0x25575c904aaec,1 +np.float64,0x800fa43a8f5f4875,0x800fa43a8f5f4875,1 +np.float64,0x3fe41fc9e1e83f94,0x3fe57a25dd1a37f1,1 +np.float64,0x3fd895f4a7b12be9,0x3fd931e7b721a08a,1 +np.float64,0xce31469f9c629,0xce31469f9c629,1 +np.float64,0xffea0f55ca341eab,0xfff0000000000000,1 +np.float64,0xffe831c9ba306393,0xfff0000000000000,1 +np.float64,0x7fe2056f03a40add,0x7ff0000000000000,1 +np.float64,0x7fd6b075e02d60eb,0x7ff0000000000000,1 +np.float64,0x3fdfbef4273f7de8,0x3fe0882c1f59efc0,1 +np.float64,0x8005b9e094ab73c2,0x8005b9e094ab73c2,1 +np.float64,0x3fea881ac6351036,0x3fedad7a319b887c,1 +np.float64,0xbfe2c61c7ee58c39,0xbfe3de9a99d8a9c6,1 +np.float64,0x30b0d3786161b,0x30b0d3786161b,1 +np.float64,0x3fa51d56a02a3aad,0x3fa51edee2d2ecef,1 +np.float64,0x79745732f2e8c,0x79745732f2e8c,1 +np.float64,0x800d55b4907aab69,0x800d55b4907aab69,1 +np.float64,0xbfbe8fcf0a3d1fa0,0xbfbea267fbb5bfdf,1 +np.float64,0xbfd04e2756a09c4e,0xbfd07b74d079f9a2,1 +np.float64,0x3fc65170552ca2e1,0x3fc66e6eb00c82ed,1 +np.float64,0xbfb0674b8020ce98,0xbfb06a2b4771b64c,1 +np.float64,0x2059975840b34,0x2059975840b34,1 +np.float64,0x33d1385467a28,0x33d1385467a28,1 +np.float64,0x3fea41b74ff4836f,0x3fed4dc1a09e53cc,1 +np.float64,0xbfe8e08c9d71c119,0xbfeb75b4c59a6bec,1 +np.float64,0x7fdbbf14d6377e29,0x7ff0000000000000,1 +np.float64,0x3fcd8b71513b16e0,0x3fcdcec80174f9ad,1 +np.float64,0x5c50bc94b8a18,0x5c50bc94b8a18,1 +np.float64,0x969a18f52d343,0x969a18f52d343,1 +np.float64,0x3fd7ae44462f5c89,0x3fd8398bc34e395c,1 +np.float64,0xffdd0f8617ba1f0c,0xfff0000000000000,1 +np.float64,0xfff0000000000000,0xfff0000000000000,1 +np.float64,0xbfe2f9badb65f376,0xbfe41b771320ece8,1 +np.float64,0x3fd140bc7fa29,0x3fd140bc7fa29,1 +np.float64,0xbfe14523b5628a48,0xbfe21ee850972043,1 +np.float64,0x3feedd0336bdba06,0x3ff1f01afc1f3a06,1 +np.float64,0x800de423ad7bc848,0x800de423ad7bc848,1 +np.float64,0x4cef857c99df1,0x4cef857c99df1,1 +np.float64,0xbfea55e0e374abc2,0xbfed691e41d648dd,1 +np.float64,0x3fe70d7a18ae1af4,0x3fe91955a34d8094,1 +np.float64,0xbfc62fc3032c5f88,0xbfc64c3ec25decb8,1 +np.float64,0x3fc915abb5322b58,0x3fc93edac5cc73fe,1 +np.float64,0x69aaff66d3561,0x69aaff66d3561,1 +np.float64,0x5c6a90f2b8d53,0x5c6a90f2b8d53,1 +np.float64,0x3fefe30dc1bfc61c,0x3ff2b752257bdacd,1 +np.float64,0x3fef15db15fe2bb6,0x3ff21aea05601396,1 +np.float64,0xbfe353e5ac66a7cc,0xbfe48644e6553d1a,1 +np.float64,0x3fe6d30cffada61a,0x3fe8cf3e4c61ddac,1 +np.float64,0x7fb7857eb62f0afc,0x7ff0000000000000,1 +np.float64,0xbfdd9b53d23b36a8,0xbfdeac91a7af1340,1 +np.float64,0x3fd1456357228ac7,0x3fd17b3f7d39b27a,1 +np.float64,0x3fb57d10ae2afa21,0x3fb5838702b806f4,1 +np.float64,0x800c59c96c98b393,0x800c59c96c98b393,1 +np.float64,0x7fc1f2413823e481,0x7ff0000000000000,1 +np.float64,0xbfa3983624273070,0xbfa3996fa26c419a,1 +np.float64,0x7fb28874ae2510e8,0x7ff0000000000000,1 +np.float64,0x3fe826d02a304da0,0x3fea82bec50bc0b6,1 +np.float64,0x8008d6f0d3d1ade2,0x8008d6f0d3d1ade2,1 +np.float64,0xffe7c970ca2f92e1,0xfff0000000000000,1 +np.float64,0x7fcf42bcaa3e8578,0x7ff0000000000000,1 +np.float64,0x7fda1ab517343569,0x7ff0000000000000,1 +np.float64,0xbfe7926a65ef24d5,0xbfe9c323dd890d5b,1 +np.float64,0xbfcaf6282d35ec50,0xbfcb294f36a0a33d,1 +np.float64,0x800ca49df8d9493c,0x800ca49df8d9493c,1 +np.float64,0xffea18d26af431a4,0xfff0000000000000,1 +np.float64,0x3fb72f276e2e5e50,0x3fb7374539fd1221,1 +np.float64,0xffa6b613842d6c20,0xfff0000000000000,1 +np.float64,0xbfeb3c7263f678e5,0xbfeea54cdb60b54c,1 +np.float64,0x3fc976d2ba32eda5,0x3fc9a1e83a058de4,1 +np.float64,0xbfe4acd4b0e959aa,0xbfe624d5d4f9b9a6,1 +np.float64,0x7fca410a0f348213,0x7ff0000000000000,1 +np.float64,0xbfde368f77bc6d1e,0xbfdf5910c8c8bcb0,1 +np.float64,0xbfed7412937ae825,0xbff0e55afc428453,1 +np.float64,0xffef6b7b607ed6f6,0xfff0000000000000,1 +np.float64,0xbfb936f17e326de0,0xbfb941629a53c694,1 +np.float64,0x800dbb0c469b7619,0x800dbb0c469b7619,1 +np.float64,0x800f68b0581ed161,0x800f68b0581ed161,1 +np.float64,0x3fe25b2aad64b656,0x3fe361266fa9c5eb,1 +np.float64,0xbfb87e445a30fc88,0xbfb887d676910c3f,1 +np.float64,0x6e6ba9b6dcd76,0x6e6ba9b6dcd76,1 +np.float64,0x3fad27ce583a4f9d,0x3fad2bd72782ffdb,1 +np.float64,0xbfec0bc5d638178c,0xbfefc6e8c8f9095f,1 +np.float64,0x7fcba4a296374944,0x7ff0000000000000,1 +np.float64,0x8004ca237cc99448,0x8004ca237cc99448,1 +np.float64,0xffe85b8c3270b718,0xfff0000000000000,1 +np.float64,0x7fe7ee3eddafdc7d,0x7ff0000000000000,1 +np.float64,0xffd275967ca4eb2c,0xfff0000000000000,1 +np.float64,0xbfa95bc3a032b780,0xbfa95e6b288ecf43,1 +np.float64,0x3fc9e3214b33c643,0x3fca10667e7e7ff4,1 +np.float64,0x8001b89c5d837139,0x8001b89c5d837139,1 +np.float64,0xbf8807dfc0300fc0,0xbf880803e3badfbd,1 +np.float64,0x800aca94b895952a,0x800aca94b895952a,1 +np.float64,0x7fd79534a02f2a68,0x7ff0000000000000,1 +np.float64,0x3fe1b81179e37023,0x3fe2a371d8cc26f0,1 +np.float64,0x800699539d6d32a8,0x800699539d6d32a8,1 +np.float64,0xffe51dfbb3aa3bf7,0xfff0000000000000,1 +np.float64,0xbfdfb775abbf6eec,0xbfe083f48be2f98f,1 +np.float64,0x3fe87979d7b0f2f4,0x3feaee701d959079,1 +np.float64,0x3fd8e4e6a731c9cd,0x3fd986d29f25f982,1 +np.float64,0x3fe3dadaaf67b5b6,0x3fe527520fb02920,1 +np.float64,0x8003c2262bc7844d,0x8003c2262bc7844d,1 +np.float64,0x800c930add392616,0x800c930add392616,1 +np.float64,0xffb7a152a22f42a8,0xfff0000000000000,1 +np.float64,0x80028fe03dc51fc1,0x80028fe03dc51fc1,1 +np.float64,0xffe32ae60c6655cc,0xfff0000000000000,1 +np.float64,0x3fea3527e4746a50,0x3fed3cbbf47f18eb,1 +np.float64,0x800a53059e14a60c,0x800a53059e14a60c,1 +np.float64,0xbfd79e3b202f3c76,0xbfd828672381207b,1 +np.float64,0xffeed7e2eb7dafc5,0xfff0000000000000,1 +np.float64,0x3fec51ed6778a3db,0x3ff01509e34df61d,1 +np.float64,0xbfd84bc577b0978a,0xbfd8e23ec55e42e8,1 +np.float64,0x2483aff849077,0x2483aff849077,1 +np.float64,0x6f57883adeaf2,0x6f57883adeaf2,1 +np.float64,0xffd3fd74d927faea,0xfff0000000000000,1 +np.float64,0x7fca49ec773493d8,0x7ff0000000000000,1 +np.float64,0x7fd08fe2e8211fc5,0x7ff0000000000000,1 +np.float64,0x800852086db0a411,0x800852086db0a411,1 +np.float64,0x3fe5b1f2c9eb63e6,0x3fe7654f511bafc6,1 +np.float64,0xbfe01e2a58e03c54,0xbfe0cedb68f021e6,1 +np.float64,0x800988421d331085,0x800988421d331085,1 +np.float64,0xffd5038b18aa0716,0xfff0000000000000,1 +np.float64,0x8002c9264c85924d,0x8002c9264c85924d,1 +np.float64,0x3fd21ca302243946,0x3fd25ac653a71aab,1 +np.float64,0xbfea60d6e6f4c1ae,0xbfed78031d9dfa2b,1 +np.float64,0xffef97b6263f2f6b,0xfff0000000000000,1 +np.float64,0xbfd524732faa48e6,0xbfd5876ecc415dcc,1 +np.float64,0x660387e8cc072,0x660387e8cc072,1 +np.float64,0x7fcfc108a33f8210,0x7ff0000000000000,1 +np.float64,0x7febe5b0f877cb61,0x7ff0000000000000,1 +np.float64,0xbfa55fdfac2abfc0,0xbfa56176991851a8,1 +np.float64,0x25250f4c4a4a3,0x25250f4c4a4a3,1 +np.float64,0xffe2f6a2f2a5ed46,0xfff0000000000000,1 +np.float64,0x7fa754fcc02ea9f9,0x7ff0000000000000,1 +np.float64,0x3febd19dea37a33c,0x3fef75279f75d3b8,1 +np.float64,0xc5ed55218bdab,0xc5ed55218bdab,1 +np.float64,0x3fe72ff6b3ee5fed,0x3fe945388b979882,1 +np.float64,0xbfe16b854e22d70a,0xbfe24b10fc0dff14,1 +np.float64,0xffb22cbe10245980,0xfff0000000000000,1 +np.float64,0xa54246b54a849,0xa54246b54a849,1 +np.float64,0x3fe7f4cda76fe99c,0x3fea41edc74888b6,1 +np.float64,0x1,0x1,1 +np.float64,0x800d84acce9b095a,0x800d84acce9b095a,1 +np.float64,0xb0eef04761dde,0xb0eef04761dde,1 +np.float64,0x7ff4000000000000,0x7ffc000000000000,1 +np.float64,0xffecaf1dbb795e3b,0xfff0000000000000,1 +np.float64,0x90dbab8d21b76,0x90dbab8d21b76,1 +np.float64,0x3fe79584a9ef2b09,0x3fe9c71fa9e40eb5,1 diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/data/umath-validation-set-tan.csv b/.venv/lib/python3.12/site-packages/numpy/_core/tests/data/umath-validation-set-tan.csv new file mode 100644 index 0000000..ac97624 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/tests/data/umath-validation-set-tan.csv @@ -0,0 +1,1429 @@ +dtype,input,output,ulperrortol +np.float32,0xfd97ece0,0xc11186e9,4 +np.float32,0x8013bb34,0x8013bb34,4 +np.float32,0x316389,0x316389,4 +np.float32,0x7f7fffff,0xbf1c9eca,4 +np.float32,0x3f7674bb,0x3fb7e450,4 +np.float32,0x80800000,0x80800000,4 +np.float32,0x7f5995e8,0xbf94106c,4 +np.float32,0x74527,0x74527,4 +np.float32,0x7f08caea,0xbeceddb6,4 +np.float32,0x2d49b2,0x2d49b2,4 +np.float32,0x3f74e5e4,0x3fb58695,4 +np.float32,0x3f3fcd51,0x3f6e1e81,4 +np.float32,0xbf4f3608,0xbf864d3d,4 +np.float32,0xbed974a0,0xbee78c70,4 +np.float32,0xff5f483c,0x3ecf3cb2,4 +np.float32,0x7f4532f4,0xc0b96f7b,4 +np.float32,0x3f0a4f7c,0x3f198cc0,4 +np.float32,0x210193,0x210193,4 +np.float32,0xfeebad7a,0xbf92eba8,4 +np.float32,0xfed29f74,0xc134cab6,4 +np.float32,0x803433a0,0x803433a0,4 +np.float32,0x64eb46,0x64eb46,4 +np.float32,0xbf54ef22,0xbf8c757b,4 +np.float32,0x3f3d5fdd,0x3f69a17b,4 +np.float32,0x80000001,0x80000001,4 +np.float32,0x800a837a,0x800a837a,4 +np.float32,0x6ff0be,0x6ff0be,4 +np.float32,0xfe8f1186,0x3f518820,4 +np.float32,0x804963e5,0x804963e5,4 +np.float32,0xfebaa59a,0x3fa1dbb0,4 +np.float32,0x637970,0x637970,4 +np.float32,0x3e722a6b,0x3e76c89a,4 +np.float32,0xff2b0478,0xbddccb5f,4 +np.float32,0xbf7bd85b,0xbfc06821,4 +np.float32,0x3ec33600,0x3ecd4126,4 +np.float32,0x3e0a43b9,0x3e0b1c69,4 +np.float32,0x7f7511b6,0xbe427083,4 +np.float32,0x3f28c114,0x3f465a73,4 +np.float32,0x3f179e1c,0x3f2c3e7c,4 +np.float32,0x7b2963,0x7b2963,4 +np.float32,0x3f423d06,0x3f72b442,4 +np.float32,0x3f5a24c6,0x3f925508,4 +np.float32,0xff18c834,0xbf79b5c8,4 +np.float32,0x3f401ece,0x3f6eb6ac,4 +np.float32,0x7b8a3013,0xbffab968,4 +np.float32,0x80091ff0,0x80091ff0,4 +np.float32,0x3f389c51,0x3f610b47,4 +np.float32,0x5ea174,0x5ea174,4 +np.float32,0x807a9eb2,0x807a9eb2,4 +np.float32,0x806ce61e,0x806ce61e,4 +np.float32,0xbe956acc,0xbe99cefc,4 +np.float32,0x7e60e247,0xbf5e64a5,4 +np.float32,0x7f398e24,0x404d12ed,4 +np.float32,0x3d9049f8,0x3d908735,4 +np.float32,0x7db17ffc,0xbf5b3d87,4 +np.float32,0xff453f78,0xc0239c9f,4 +np.float32,0x3f024aac,0x3f0ed802,4 +np.float32,0xbe781c30,0xbe7d1508,4 +np.float32,0x3f77962a,0x3fb9a28e,4 +np.float32,0xff7fffff,0x3f1c9eca,4 +np.float32,0x3f7152e3,0x3fb03f9d,4 +np.float32,0xff7cb167,0x3f9ce831,4 +np.float32,0x3e763e30,0x3e7b1a10,4 +np.float32,0xbf126527,0xbf24c253,4 +np.float32,0x803f6660,0x803f6660,4 +np.float32,0xbf79de38,0xbfbd38b1,4 +np.float32,0x8046c2f0,0x8046c2f0,4 +np.float32,0x6dc74e,0x6dc74e,4 +np.float32,0xbec9c45e,0xbed4e768,4 +np.float32,0x3f0eedb6,0x3f1fe610,4 +np.float32,0x7e031999,0xbcc13026,4 +np.float32,0x7efc2fd7,0x41e4b284,4 +np.float32,0xbeab7454,0xbeb22a1b,4 +np.float32,0x805ee67b,0x805ee67b,4 +np.float32,0x7f76e58e,0xc2436659,4 +np.float32,0xbe62b024,0xbe667718,4 +np.float32,0x3eea0808,0x3efbd182,4 +np.float32,0xbf7fd00c,0xbfc70719,4 +np.float32,0x7f27b640,0xbf0d97e0,4 +np.float32,0x3f1b58a4,0x3f31b6f4,4 +np.float32,0x252a9f,0x252a9f,4 +np.float32,0x7f65f95a,0xbead5de3,4 +np.float32,0xfc6ea780,0x42d15801,4 +np.float32,0x7eac4c52,0xc0682424,4 +np.float32,0xbe8a3f5a,0xbe8db54d,4 +np.float32,0xbf1644e2,0xbf2a4abd,4 +np.float32,0x3fc96a,0x3fc96a,4 +np.float32,0x7f38c0e4,0x3cc04af8,4 +np.float32,0x3f623d75,0x3f9c065d,4 +np.float32,0x3ee6a51a,0x3ef7a058,4 +np.float32,0x3dd11020,0x3dd1cacf,4 +np.float32,0xb6918,0xb6918,4 +np.float32,0xfdd7a540,0x3f22f081,4 +np.float32,0x80798563,0x80798563,4 +np.float32,0x3e9a8b7a,0x3e9f6a7e,4 +np.float32,0xbea515d4,0xbeab0df5,4 +np.float32,0xbea9b9f4,0xbeb03abe,4 +np.float32,0xbf11a5fa,0xbf23b478,4 +np.float32,0xfd6cadf0,0xbfa2a878,4 +np.float32,0xbf6edd07,0xbfacbb78,4 +np.float32,0xff5c5328,0x3e2d1552,4 +np.float32,0xbea2f788,0xbea8b3f5,4 +np.float32,0x802efaeb,0x802efaeb,4 +np.float32,0xff1c85e5,0x41f8560e,4 +np.float32,0x3f53b123,0x3f8b18e1,4 +np.float32,0xff798c4a,0x4092e66f,4 +np.float32,0x7f2e6fe7,0xbdcbd58f,4 +np.float32,0xfe8a8196,0x3fd7fc56,4 +np.float32,0x5e7ad4,0x5e7ad4,4 +np.float32,0xbf23a02d,0xbf3e4533,4 +np.float32,0x3f31c55c,0x3f5531bf,4 +np.float32,0x80331be3,0x80331be3,4 +np.float32,0x8056960a,0x8056960a,4 +np.float32,0xff1c06ae,0xbfd26992,4 +np.float32,0xbe0cc4b0,0xbe0da96c,4 +np.float32,0x7e925ad5,0xbf8dba54,4 +np.float32,0x2c8cec,0x2c8cec,4 +np.float32,0x8011951e,0x8011951e,4 +np.float32,0x3f2caf84,0x3f4cb89f,4 +np.float32,0xbd32c220,0xbd32df33,4 +np.float32,0xbec358d6,0xbecd6996,4 +np.float32,0x3f6e4930,0x3fabeb92,4 +np.float32,0xbf6a3afd,0xbfa65a3a,4 +np.float32,0x80067764,0x80067764,4 +np.float32,0x3d8df1,0x3d8df1,4 +np.float32,0x7ee51cf2,0x409e4061,4 +np.float32,0x435f5d,0x435f5d,4 +np.float32,0xbf5b17f7,0xbf936ebe,4 +np.float32,0x3ecaacb5,0x3ed5f81f,4 +np.float32,0x807b0aa5,0x807b0aa5,4 +np.float32,0x52b40b,0x52b40b,4 +np.float32,0x146a97,0x146a97,4 +np.float32,0x7f42b952,0xbfdcb413,4 +np.float32,0xbf1a1af2,0xbf2fe1bb,4 +np.float32,0x3f312034,0x3f541aa2,4 +np.float32,0x3f281d60,0x3f4554f9,4 +np.float32,0x50e451,0x50e451,4 +np.float32,0xbe45838c,0xbe480016,4 +np.float32,0xff7d0aeb,0x3eb0746e,4 +np.float32,0x7f32a489,0xbf96af6d,4 +np.float32,0xbf1b4e27,0xbf31a769,4 +np.float32,0x3f242936,0x3f3f1a44,4 +np.float32,0xbf7482ff,0xbfb4f201,4 +np.float32,0x4bda38,0x4bda38,4 +np.float32,0xbf022208,0xbf0ea2bb,4 +np.float32,0x7d08ca95,0xbe904602,4 +np.float32,0x7ed2f356,0xc02b55ad,4 +np.float32,0xbf131204,0xbf25b734,4 +np.float32,0xff3464b4,0x3fb23706,4 +np.float32,0x5a97cf,0x5a97cf,4 +np.float32,0xbe52db70,0xbe55e388,4 +np.float32,0x3f52934f,0x3f89e2aa,4 +np.float32,0xfeea866a,0x40a2b33f,4 +np.float32,0x80333925,0x80333925,4 +np.float32,0xfef5d13e,0xc00139ec,4 +np.float32,0x3f4750ab,0x3f7c87ad,4 +np.float32,0x3e41bfdd,0x3e44185a,4 +np.float32,0xbf5b0572,0xbf935935,4 +np.float32,0xbe93c9da,0xbe9808d8,4 +np.float32,0x7f501f33,0xc0f9973c,4 +np.float32,0x800af035,0x800af035,4 +np.float32,0x3f29faf8,0x3f4852a8,4 +np.float32,0xbe1e4c20,0xbe1f920c,4 +np.float32,0xbf7e8616,0xbfc4d79d,4 +np.float32,0x43ffbf,0x43ffbf,4 +np.float32,0x7f28e8a9,0xbfa1ac24,4 +np.float32,0xbf1f9f92,0xbf3820bc,4 +np.float32,0x3f07e004,0x3f1641c4,4 +np.float32,0x3ef7ea7f,0x3f06a64a,4 +np.float32,0x7e013101,0x3f6080e6,4 +np.float32,0x7f122a4f,0xbf0a796f,4 +np.float32,0xfe096960,0x3ed7273a,4 +np.float32,0x3f06abf1,0x3f14a4b2,4 +np.float32,0x3e50ded3,0x3e53d0f1,4 +np.float32,0x7f50b346,0x3eabb536,4 +np.float32,0xff5adb0f,0xbd441972,4 +np.float32,0xbecefe46,0xbedb0f66,4 +np.float32,0x7da70bd4,0xbec66273,4 +np.float32,0x169811,0x169811,4 +np.float32,0xbee4dfee,0xbef5721a,4 +np.float32,0x3efbeae3,0x3f0936e6,4 +np.float32,0x8031bd61,0x8031bd61,4 +np.float32,0x8048e443,0x8048e443,4 +np.float32,0xff209aa6,0xbeb364cb,4 +np.float32,0xff477499,0x3c1b0041,4 +np.float32,0x803fe929,0x803fe929,4 +np.float32,0x3f70158b,0x3fae7725,4 +np.float32,0x7f795723,0x3e8e850a,4 +np.float32,0x3cba99,0x3cba99,4 +np.float32,0x80588d2a,0x80588d2a,4 +np.float32,0x805d1f05,0x805d1f05,4 +np.float32,0xff4ac09a,0xbefe614d,4 +np.float32,0x804af084,0x804af084,4 +np.float32,0x7c64ae63,0xc1a8b563,4 +np.float32,0x8078d793,0x8078d793,4 +np.float32,0x7f3e2436,0xbf8bf9d3,4 +np.float32,0x7ccec1,0x7ccec1,4 +np.float32,0xbf6462c7,0xbf9eb830,4 +np.float32,0x3f1002ca,0x3f216843,4 +np.float32,0xfe878ca6,0x409e73a5,4 +np.float32,0x3bd841d9,0x3bd842a7,4 +np.float32,0x7d406f41,0xbd9dcfa3,4 +np.float32,0x7c6d6,0x7c6d6,4 +np.float32,0x3f4ef360,0x3f86074b,4 +np.float32,0x805f534a,0x805f534a,4 +np.float32,0x1,0x1,4 +np.float32,0x3f739ee2,0x3fb39db2,4 +np.float32,0x3d0c2352,0x3d0c3153,4 +np.float32,0xfe8a4f2c,0x3edd8add,4 +np.float32,0x3e52eaa0,0x3e55f362,4 +np.float32,0x7bde9758,0xbf5ba5cf,4 +np.float32,0xff422654,0xbf41e487,4 +np.float32,0x385e5b,0x385e5b,4 +np.float32,0x5751dd,0x5751dd,4 +np.float32,0xff6c671c,0xc03e2d6d,4 +np.float32,0x1458be,0x1458be,4 +np.float32,0x80153d4d,0x80153d4d,4 +np.float32,0x7efd2adb,0x3e25458f,4 +np.float32,0xbe161880,0xbe172e12,4 +np.float32,0x7ecea1aa,0x40a66d79,4 +np.float32,0xbf5b02a2,0xbf9355f0,4 +np.float32,0x15d9ab,0x15d9ab,4 +np.float32,0x2dc7c7,0x2dc7c7,4 +np.float32,0xfebbf81a,0x4193f6e6,4 +np.float32,0xfe8e3594,0xc00a6695,4 +np.float32,0x185aa8,0x185aa8,4 +np.float32,0x3daea156,0x3daf0e00,4 +np.float32,0x3e071688,0x3e07e08e,4 +np.float32,0x802db9e6,0x802db9e6,4 +np.float32,0x7f7be2c4,0x3f1363dd,4 +np.float32,0x7eba3f5e,0xc13eb497,4 +np.float32,0x3de04a00,0x3de130a9,4 +np.float32,0xbf1022bc,0xbf2194eb,4 +np.float32,0xbf5b547e,0xbf93b53b,4 +np.float32,0x3e867bd6,0x3e89aa10,4 +np.float32,0xbea5eb5c,0xbeabfb73,4 +np.float32,0x7f1efae9,0x3ffca038,4 +np.float32,0xff5d0344,0xbe55dbbb,4 +np.float32,0x805167e7,0x805167e7,4 +np.float32,0xbdb3a020,0xbdb41667,4 +np.float32,0xbedea6b4,0xbeedd5fd,4 +np.float32,0x8053b45c,0x8053b45c,4 +np.float32,0x7ed370e9,0x3d90eba5,4 +np.float32,0xbefcd7da,0xbf09cf91,4 +np.float32,0x78b9ac,0x78b9ac,4 +np.float32,0xbf2f6dc0,0xbf5141ef,4 +np.float32,0x802d3a7b,0x802d3a7b,4 +np.float32,0xfd45d120,0x3fec31cc,4 +np.float32,0xbf7e7020,0xbfc4b2af,4 +np.float32,0xf04da,0xf04da,4 +np.float32,0xbe9819d4,0xbe9cbd35,4 +np.float32,0x8075ab35,0x8075ab35,4 +np.float32,0xbf052fdc,0xbf12aa2c,4 +np.float32,0x3f1530d0,0x3f28bd9f,4 +np.float32,0x80791881,0x80791881,4 +np.float32,0x67f309,0x67f309,4 +np.float32,0x3f12f16a,0x3f2588f5,4 +np.float32,0x3ecdac47,0x3ed97ff8,4 +np.float32,0xbf297fb7,0xbf478c39,4 +np.float32,0x8069fa80,0x8069fa80,4 +np.float32,0x807f940e,0x807f940e,4 +np.float32,0xbf648dc8,0xbf9eeecb,4 +np.float32,0x3de873b0,0x3de9748d,4 +np.float32,0x3f1aa645,0x3f30af1f,4 +np.float32,0xff227a62,0x3d8283cc,4 +np.float32,0xbf37187d,0xbf5e5f4c,4 +np.float32,0x803b1b1f,0x803b1b1f,4 +np.float32,0x3f58142a,0x3f8ff8da,4 +np.float32,0x8004339e,0x8004339e,4 +np.float32,0xbf0f5654,0xbf2077a4,4 +np.float32,0x3f17e509,0x3f2ca598,4 +np.float32,0x3f800000,0x3fc75923,4 +np.float32,0xfdf79980,0x42f13047,4 +np.float32,0x7f111381,0x3f13c4c9,4 +np.float32,0xbea40c70,0xbea9e724,4 +np.float32,0x110520,0x110520,4 +np.float32,0x60490d,0x60490d,4 +np.float32,0x3f6703ec,0x3fa21951,4 +np.float32,0xbf098256,0xbf187652,4 +np.float32,0x658951,0x658951,4 +np.float32,0x3f53bf16,0x3f8b2818,4 +np.float32,0xff451811,0xc0026068,4 +np.float32,0x80777ee0,0x80777ee0,4 +np.float32,0x3e4fcc19,0x3e52b286,4 +np.float32,0x7f387ee0,0x3ce93eb6,4 +np.float32,0xff51181f,0xbfca3ee4,4 +np.float32,0xbf5655ae,0xbf8e0304,4 +np.float32,0xff2f1dcd,0x40025471,4 +np.float32,0x7f6e58e5,0xbe9930d5,4 +np.float32,0x7adf11,0x7adf11,4 +np.float32,0xbe9a2bc2,0xbe9f0185,4 +np.float32,0x8065d3a0,0x8065d3a0,4 +np.float32,0x3ed6e826,0x3ee47c45,4 +np.float32,0x80598ea0,0x80598ea0,4 +np.float32,0x7f10b90a,0x40437bd0,4 +np.float32,0x27b447,0x27b447,4 +np.float32,0x7ecd861c,0x3fce250f,4 +np.float32,0x0,0x0,4 +np.float32,0xbeba82d6,0xbec3394c,4 +np.float32,0xbf4958b0,0xbf8048ea,4 +np.float32,0x7c643e,0x7c643e,4 +np.float32,0x580770,0x580770,4 +np.float32,0x805bf54a,0x805bf54a,4 +np.float32,0x7f1f3cee,0xbe1a54d6,4 +np.float32,0xfefefdea,0x3fa84576,4 +np.float32,0x7f007b7a,0x3e8a6d25,4 +np.float32,0xbf177959,0xbf2c0919,4 +np.float32,0xbf30fda0,0xbf53e058,4 +np.float32,0x3f0576be,0x3f130861,4 +np.float32,0x3f49380e,0x3f80283a,4 +np.float32,0xebc56,0xebc56,4 +np.float32,0x654e3b,0x654e3b,4 +np.float32,0x14a4d8,0x14a4d8,4 +np.float32,0xff69b3cb,0xbf822a88,4 +np.float32,0xbe9b6c1c,0xbea06109,4 +np.float32,0xbefddd7e,0xbf0a787b,4 +np.float32,0x4c4ebb,0x4c4ebb,4 +np.float32,0x7d0a74,0x7d0a74,4 +np.float32,0xbebb5f80,0xbec43635,4 +np.float32,0x7ee79723,0xc1c7f3f3,4 +np.float32,0x7f2be4c7,0xbfa6c693,4 +np.float32,0x805bc7d5,0x805bc7d5,4 +np.float32,0x8042f12c,0x8042f12c,4 +np.float32,0x3ef91be8,0x3f07697b,4 +np.float32,0x3cf37ac0,0x3cf38d1c,4 +np.float32,0x800000,0x800000,4 +np.float32,0xbe1ebf4c,0xbe200806,4 +np.float32,0x7f380862,0xbeb512e8,4 +np.float32,0xbe320064,0xbe33d0fc,4 +np.float32,0xff300b0c,0xbfadb805,4 +np.float32,0x308a06,0x308a06,4 +np.float32,0xbf084f6e,0xbf16d7b6,4 +np.float32,0xff47cab6,0x3f892b65,4 +np.float32,0xbed99f4a,0xbee7bfd5,4 +np.float32,0xff7d74c0,0x3ee88c9a,4 +np.float32,0x3c3d23,0x3c3d23,4 +np.float32,0x8074bde8,0x8074bde8,4 +np.float32,0x80042164,0x80042164,4 +np.float32,0x3e97c92a,0x3e9c6500,4 +np.float32,0x3b80e0,0x3b80e0,4 +np.float32,0xbf16646a,0xbf2a783d,4 +np.float32,0x7f3b4cb1,0xc01339be,4 +np.float32,0xbf31f36e,0xbf557fd0,4 +np.float32,0x7f540618,0xbe5f6fc1,4 +np.float32,0x7eee47d0,0x40a27e94,4 +np.float32,0x7f12f389,0xbebed654,4 +np.float32,0x56cff5,0x56cff5,4 +np.float32,0x8056032b,0x8056032b,4 +np.float32,0x3ed34e40,0x3ee02e38,4 +np.float32,0x7d51a908,0xbf19a90e,4 +np.float32,0x80000000,0x80000000,4 +np.float32,0xfdf73fd0,0xbf0f8cad,4 +np.float32,0x7ee4fe6d,0xbf1ea7e4,4 +np.float32,0x1f15ba,0x1f15ba,4 +np.float32,0xd18c3,0xd18c3,4 +np.float32,0x80797705,0x80797705,4 +np.float32,0x7ef07091,0x3f2f3b9a,4 +np.float32,0x7f552f41,0x3faf608c,4 +np.float32,0x3f779977,0x3fb9a7ad,4 +np.float32,0xfe1a7a50,0xbdadc4d1,4 +np.float32,0xbf449cf0,0xbf7740db,4 +np.float32,0xbe44e620,0xbe475cad,4 +np.float32,0x3f63a098,0x3f9dc2b5,4 +np.float32,0xfed40a12,0x4164533a,4 +np.float32,0x7a2bbb,0x7a2bbb,4 +np.float32,0xff7f7b9e,0xbeee8740,4 +np.float32,0x7ee27f8b,0x4233f53b,4 +np.float32,0xbf044c06,0xbf117c28,4 +np.float32,0xbeffde54,0xbf0bc49f,4 +np.float32,0xfeaef2e8,0x3ff258fe,4 +np.float32,0x527451,0x527451,4 +np.float32,0xbcef8d00,0xbcef9e7c,4 +np.float32,0xbf0e20c0,0xbf1ec9b2,4 +np.float32,0x8024afda,0x8024afda,4 +np.float32,0x7ef6cb3e,0x422cad0b,4 +np.float32,0x3c120,0x3c120,4 +np.float32,0xbf125c8f,0xbf24b62c,4 +np.float32,0x7e770a93,0x402c9d86,4 +np.float32,0xbd30a4e0,0xbd30c0ee,4 +np.float32,0xbf4d3388,0xbf843530,4 +np.float32,0x3f529072,0x3f89df92,4 +np.float32,0xff0270b1,0xbf81be9a,4 +np.float32,0x5e07e7,0x5e07e7,4 +np.float32,0x7bec32,0x7bec32,4 +np.float32,0x7fc00000,0x7fc00000,4 +np.float32,0x3e3ba5e0,0x3e3dc6e9,4 +np.float32,0x3ecb62d4,0x3ed6ce2c,4 +np.float32,0x3eb3dde8,0x3ebba68f,4 +np.float32,0x8063f952,0x8063f952,4 +np.float32,0x7f204aeb,0x3e88614e,4 +np.float32,0xbeae1ddc,0xbeb5278e,4 +np.float32,0x6829e9,0x6829e9,4 +np.float32,0xbf361a99,0xbf5ca354,4 +np.float32,0xbf24fbe6,0xbf406326,4 +np.float32,0x3f329d41,0x3f56a061,4 +np.float32,0xfed6d666,0x3e8f71a5,4 +np.float32,0x337f92,0x337f92,4 +np.float32,0xbe1c4970,0xbe1d8305,4 +np.float32,0xbe6b7e18,0xbe6fbbde,4 +np.float32,0x3f2267b9,0x3f3c61da,4 +np.float32,0xbee1ee94,0xbef1d628,4 +np.float32,0x7ecffc1a,0x3f02987e,4 +np.float32,0xbe9b1306,0xbe9fff3b,4 +np.float32,0xbeffacae,0xbf0ba468,4 +np.float32,0x7f800000,0xffc00000,4 +np.float32,0xfefc9aa8,0xc19de2a3,4 +np.float32,0x7d7185bb,0xbf9090ec,4 +np.float32,0x7edfbafd,0x3fe9352f,4 +np.float32,0x4ef2ec,0x4ef2ec,4 +np.float32,0x7f4cab2e,0xbff4e5dd,4 +np.float32,0xff3b1788,0x3e3c22e9,4 +np.float32,0x4e15ee,0x4e15ee,4 +np.float32,0xbf5451e6,0xbf8bc8a7,4 +np.float32,0x3f7f6d2e,0x3fc65e8b,4 +np.float32,0xbf1d9184,0xbf35071b,4 +np.float32,0xbf3a81cf,0xbf646d9b,4 +np.float32,0xbe71acc4,0xbe7643ab,4 +np.float32,0x528b7d,0x528b7d,4 +np.float32,0x2cb1d0,0x2cb1d0,4 +np.float32,0x3f324bf8,0x3f56161a,4 +np.float32,0x80709a21,0x80709a21,4 +np.float32,0x4bc448,0x4bc448,4 +np.float32,0x3e8bd600,0x3e8f6b7a,4 +np.float32,0xbeb97d30,0xbec20dd6,4 +np.float32,0x2a5669,0x2a5669,4 +np.float32,0x805f2689,0x805f2689,4 +np.float32,0xfe569f50,0x3fc51952,4 +np.float32,0x1de44c,0x1de44c,4 +np.float32,0x3ec7036c,0x3ed1ae67,4 +np.float32,0x8052b8e5,0x8052b8e5,4 +np.float32,0xff740a6b,0x3f4981a8,4 +np.float32,0xfee9bb70,0xc05e23be,4 +np.float32,0xff4e12c9,0x4002b4ad,4 +np.float32,0x803de0c2,0x803de0c2,4 +np.float32,0xbf433a07,0xbf74966f,4 +np.float32,0x803e60ca,0x803e60ca,4 +np.float32,0xbf19ee98,0xbf2fa07a,4 +np.float32,0x92929,0x92929,4 +np.float32,0x7f709c27,0x4257ba2d,4 +np.float32,0x803167c6,0x803167c6,4 +np.float32,0xbf095ead,0xbf184607,4 +np.float32,0x617060,0x617060,4 +np.float32,0x2d85b3,0x2d85b3,4 +np.float32,0x53d20b,0x53d20b,4 +np.float32,0x3e046838,0x3e052666,4 +np.float32,0xbe7c5fdc,0xbe80ce4b,4 +np.float32,0x3d18d060,0x3d18e289,4 +np.float32,0x804dc031,0x804dc031,4 +np.float32,0x3f224166,0x3f3c26cd,4 +np.float32,0x7d683e3c,0xbea24f25,4 +np.float32,0xbf3a92aa,0xbf648be4,4 +np.float32,0x8072670b,0x8072670b,4 +np.float32,0xbe281aec,0xbe29a1bc,4 +np.float32,0x7f09d918,0xc0942490,4 +np.float32,0x7ca9fd07,0x4018b990,4 +np.float32,0x7d36ac5d,0x3cf57184,4 +np.float32,0x8039b62f,0x8039b62f,4 +np.float32,0x6cad7b,0x6cad7b,4 +np.float32,0x3c0fd9ab,0x3c0fda9d,4 +np.float32,0x80299883,0x80299883,4 +np.float32,0x3c2d0e3e,0x3c2d0fe4,4 +np.float32,0x8002cf62,0x8002cf62,4 +np.float32,0x801dde97,0x801dde97,4 +np.float32,0x80411856,0x80411856,4 +np.float32,0x6ebce8,0x6ebce8,4 +np.float32,0x7b7d1a,0x7b7d1a,4 +np.float32,0x8031d3de,0x8031d3de,4 +np.float32,0x8005c4ab,0x8005c4ab,4 +np.float32,0xbf7dd803,0xbfc3b3ef,4 +np.float32,0x8017ae60,0x8017ae60,4 +np.float32,0xfe9316ce,0xbfe0544a,4 +np.float32,0x3f136bfe,0x3f2636ff,4 +np.float32,0x3df87b80,0x3df9b57d,4 +np.float32,0xff44c356,0xbf11c7ad,4 +np.float32,0x4914ae,0x4914ae,4 +np.float32,0x80524c21,0x80524c21,4 +np.float32,0x805c7dc8,0x805c7dc8,4 +np.float32,0xfed3c0aa,0xbff0c0ab,4 +np.float32,0x7eb2bfbb,0xbf4600bc,4 +np.float32,0xfec8df84,0x3f5bd350,4 +np.float32,0x3e5431a4,0x3e5748c3,4 +np.float32,0xbee6a3a0,0xbef79e86,4 +np.float32,0xbf6cc9b2,0xbfa9d61a,4 +np.float32,0x3f132bd5,0x3f25dbd9,4 +np.float32,0x7e6d2e48,0x3f9d025b,4 +np.float32,0x3edf430c,0x3eee942d,4 +np.float32,0x3f0d1b8a,0x3f1d60e1,4 +np.float32,0xbdf2f688,0xbdf41bfb,4 +np.float32,0xbe47a284,0xbe4a33ff,4 +np.float32,0x3eaa9fbc,0x3eb13be7,4 +np.float32,0xfe98d45e,0x3eb84517,4 +np.float32,0x7efc23b3,0x3dcc1c99,4 +np.float32,0x3ca36242,0x3ca367ce,4 +np.float32,0x3f76a944,0x3fb834e3,4 +np.float32,0xbf45207c,0xbf783f9b,4 +np.float32,0x3e7c1220,0x3e80a4f8,4 +np.float32,0x3f018200,0x3f0dd14e,4 +np.float32,0x3f53cdde,0x3f8b3839,4 +np.float32,0xbdbacb58,0xbdbb5063,4 +np.float32,0x804af68d,0x804af68d,4 +np.float32,0x3e2c12fc,0x3e2db65b,4 +np.float32,0x3f039433,0x3f10895a,4 +np.float32,0x7ef5193d,0x3f4115f7,4 +np.float32,0x8030afbe,0x8030afbe,4 +np.float32,0x3f06fa2a,0x3f150d5d,4 +np.float32,0x3f124442,0x3f2493d2,4 +np.float32,0xbeb5b792,0xbebdc090,4 +np.float32,0xbedc90a4,0xbeeb4de9,4 +np.float32,0x3f3ff8,0x3f3ff8,4 +np.float32,0x3ee75bc5,0x3ef881e4,4 +np.float32,0xfe80e3de,0xbf5cd535,4 +np.float32,0xf52eb,0xf52eb,4 +np.float32,0x80660ee8,0x80660ee8,4 +np.float32,0x3e173a58,0x3e185648,4 +np.float32,0xfe49520c,0xbf728d7c,4 +np.float32,0xbecbb8ec,0xbed73373,4 +np.float32,0xbf027ae0,0xbf0f173e,4 +np.float32,0xbcab6740,0xbcab6da8,4 +np.float32,0xbf2a15e2,0xbf487e11,4 +np.float32,0x3b781b,0x3b781b,4 +np.float32,0x44f559,0x44f559,4 +np.float32,0xff6a0ca6,0xc174d7c3,4 +np.float32,0x6460ef,0x6460ef,4 +np.float32,0xfe58009c,0x3ee2bb30,4 +np.float32,0xfec3c038,0x3e30d617,4 +np.float32,0x7f0687c0,0xbf62c820,4 +np.float32,0xbf44655e,0xbf76d589,4 +np.float32,0xbf42968c,0xbf735e78,4 +np.float32,0x80385503,0x80385503,4 +np.float32,0xbea7e3a2,0xbeae2d59,4 +np.float32,0x3dd0b770,0x3dd17131,4 +np.float32,0xbf4bc185,0xbf82b907,4 +np.float32,0xfefd7d64,0xbee05650,4 +np.float32,0xfaac3c00,0xbff23bc9,4 +np.float32,0xbf562f0d,0xbf8dd7f4,4 +np.float32,0x7fa00000,0x7fe00000,4 +np.float32,0x3e01bdb8,0x3e027098,4 +np.float32,0x3e2868ab,0x3e29f19e,4 +np.float32,0xfec55f2e,0x3f39f304,4 +np.float32,0xed4e,0xed4e,4 +np.float32,0x3e2b7330,0x3e2d11fa,4 +np.float32,0x7f738542,0x40cbbe16,4 +np.float32,0x3f123521,0x3f247e71,4 +np.float32,0x73572c,0x73572c,4 +np.float32,0x804936c8,0x804936c8,4 +np.float32,0x803b80d8,0x803b80d8,4 +np.float32,0x7f566c57,0xbee2855a,4 +np.float32,0xff0e3bd8,0xbff0543f,4 +np.float32,0x7d2b2fe7,0xbf94ba4c,4 +np.float32,0xbf0da470,0xbf1e1dc2,4 +np.float32,0xbd276500,0xbd277ce0,4 +np.float32,0xfcd15dc0,0x403ccc2a,4 +np.float32,0x80071e59,0x80071e59,4 +np.float32,0xbe9b0c34,0xbe9ff7be,4 +np.float32,0x3f4f9069,0x3f86ac50,4 +np.float32,0x80042a95,0x80042a95,4 +np.float32,0x7de28e39,0x3bc9b7f4,4 +np.float32,0xbf641935,0xbf9e5af8,4 +np.float32,0x8034f068,0x8034f068,4 +np.float32,0xff33a3d2,0xbf408e75,4 +np.float32,0xbcc51540,0xbcc51efc,4 +np.float32,0xff6d1ddf,0x3ef58f0e,4 +np.float32,0xbf64dfc4,0xbf9f5725,4 +np.float32,0xff068a06,0x3eea8987,4 +np.float32,0xff01c0af,0x3f24cdfe,4 +np.float32,0x3f4def7e,0x3f84f802,4 +np.float32,0xbf1b4ae7,0xbf31a299,4 +np.float32,0x8077df2d,0x8077df2d,4 +np.float32,0x3f0155c5,0x3f0d9785,4 +np.float32,0x5a54b2,0x5a54b2,4 +np.float32,0x7f271f9e,0x3efb2ef3,4 +np.float32,0xbf0ff2ec,0xbf215217,4 +np.float32,0x7f500130,0xbf8a7fdd,4 +np.float32,0xfed9891c,0xbf65c872,4 +np.float32,0xfecbfaae,0x403bdbc2,4 +np.float32,0x3f3a5aba,0x3f642772,4 +np.float32,0x7ebc681e,0xbd8df059,4 +np.float32,0xfe05e400,0xbfe35d74,4 +np.float32,0xbf295ace,0xbf4750ea,4 +np.float32,0x7ea055b2,0x3f62d6be,4 +np.float32,0xbd00b520,0xbd00bff9,4 +np.float32,0xbf7677aa,0xbfb7e8cf,4 +np.float32,0x3e83f788,0x3e86f816,4 +np.float32,0x801f6710,0x801f6710,4 +np.float32,0x801133cc,0x801133cc,4 +np.float32,0x41da2a,0x41da2a,4 +np.float32,0xff1622fd,0x3f023650,4 +np.float32,0x806c7a72,0x806c7a72,4 +np.float32,0x3f10779c,0x3f220bb4,4 +np.float32,0xbf08cf94,0xbf17848d,4 +np.float32,0xbecb55b4,0xbed6bebd,4 +np.float32,0xbf0a1528,0xbf193d7b,4 +np.float32,0x806a16bd,0x806a16bd,4 +np.float32,0xc222a,0xc222a,4 +np.float32,0x3930de,0x3930de,4 +np.float32,0x3f5c3588,0x3f94bca2,4 +np.float32,0x1215ad,0x1215ad,4 +np.float32,0x3ed15030,0x3eddcf67,4 +np.float32,0x7da83b2e,0x3fce0d39,4 +np.float32,0x32b0a8,0x32b0a8,4 +np.float32,0x805aed6b,0x805aed6b,4 +np.float32,0x3ef8e02f,0x3f074346,4 +np.float32,0xbdeb6780,0xbdec7250,4 +np.float32,0x3f6e3cec,0x3fabda61,4 +np.float32,0xfefd467a,0x3ef7821a,4 +np.float32,0xfef090fe,0x3bb752a2,4 +np.float32,0x8019c538,0x8019c538,4 +np.float32,0x3e8cf284,0x3e909e81,4 +np.float32,0xbe6c6618,0xbe70b0a2,4 +np.float32,0x7f50a539,0x3f367be1,4 +np.float32,0x8019fe2f,0x8019fe2f,4 +np.float32,0x800c3f48,0x800c3f48,4 +np.float32,0xfd054cc0,0xc0f52802,4 +np.float32,0x3d0cca20,0x3d0cd853,4 +np.float32,0xbf4a7c44,0xbf816e74,4 +np.float32,0x3f46fc40,0x3f7be153,4 +np.float32,0x807c5849,0x807c5849,4 +np.float32,0xd7e41,0xd7e41,4 +np.float32,0x70589b,0x70589b,4 +np.float32,0x80357b95,0x80357b95,4 +np.float32,0x3de239f0,0x3de326a5,4 +np.float32,0x800b08e3,0x800b08e3,4 +np.float32,0x807ec946,0x807ec946,4 +np.float32,0x3e2e4b83,0x3e2fff76,4 +np.float32,0x3f198e0f,0x3f2f12a6,4 +np.float32,0xbecb1aca,0xbed67979,4 +np.float32,0x80134082,0x80134082,4 +np.float32,0x3f3a269f,0x3f63ca05,4 +np.float32,0x3f1381e4,0x3f265622,4 +np.float32,0xff293080,0xbf10be6f,4 +np.float32,0xff800000,0xffc00000,4 +np.float32,0x37d196,0x37d196,4 +np.float32,0x7e57eea7,0x3e7d8138,4 +np.float32,0x804b1dae,0x804b1dae,4 +np.float32,0x7d9508f9,0xc1075b35,4 +np.float32,0x3f7bf468,0x3fc095e0,4 +np.float32,0x55472c,0x55472c,4 +np.float32,0x3ecdcd86,0x3ed9a738,4 +np.float32,0x3ed9be0f,0x3ee7e4e9,4 +np.float32,0x3e7e0ddb,0x3e81b2fe,4 +np.float32,0x7ee6c1d3,0x3f850634,4 +np.float32,0x800f6fad,0x800f6fad,4 +np.float32,0xfefb3bd6,0xbff68ecc,4 +np.float32,0x8013d6e2,0x8013d6e2,4 +np.float32,0x3f3a2cb6,0x3f63d4ee,4 +np.float32,0xff383c84,0x3e7854bb,4 +np.float32,0x3f21946e,0x3f3b1cea,4 +np.float32,0xff322ea2,0x3fb22f31,4 +np.float32,0x8065a024,0x8065a024,4 +np.float32,0x7f395e30,0xbefe0de1,4 +np.float32,0x5b52db,0x5b52db,4 +np.float32,0x7f7caea7,0x3dac8ded,4 +np.float32,0xbf0431f8,0xbf1159b2,4 +np.float32,0x7f15b25b,0xc02a3833,4 +np.float32,0x80131abc,0x80131abc,4 +np.float32,0x7e829d81,0xbeb2e93d,4 +np.float32,0x3f2c64d7,0x3f4c3e4d,4 +np.float32,0x7f228d48,0xc1518c74,4 +np.float32,0xfc3c6f40,0xbf00d585,4 +np.float32,0x7f754f0f,0x3e2152f5,4 +np.float32,0xff65d32b,0xbe8bd56c,4 +np.float32,0xfea6b8c0,0x41608655,4 +np.float32,0x3f7d4b05,0x3fc2c96a,4 +np.float32,0x3f463230,0x3f7a54da,4 +np.float32,0x805117bb,0x805117bb,4 +np.float32,0xbf2ad4f7,0xbf49b30e,4 +np.float32,0x3eaa01ff,0x3eb08b56,4 +np.float32,0xff7a02bb,0x3f095f73,4 +np.float32,0x759176,0x759176,4 +np.float32,0x803c18d5,0x803c18d5,4 +np.float32,0xbe0722d8,0xbe07ed16,4 +np.float32,0x3f4b4a99,0x3f823fc6,4 +np.float32,0x3f7d0451,0x3fc25463,4 +np.float32,0xfee31e40,0xbfb41091,4 +np.float32,0xbf733d2c,0xbfb30cf1,4 +np.float32,0x7ed81015,0x417c380c,4 +np.float32,0x7daafc3e,0xbe2a37ed,4 +np.float32,0x3e44f82b,0x3e476f67,4 +np.float32,0x7c8d99,0x7c8d99,4 +np.float32,0x3f7aec5a,0x3fbee991,4 +np.float32,0xff09fd55,0x3e0709d3,4 +np.float32,0xff4ba4df,0x4173c01f,4 +np.float32,0x3f43d944,0x3f75c7bd,4 +np.float32,0xff6a9106,0x40a10eff,4 +np.float32,0x3bc8341c,0x3bc834bf,4 +np.float32,0x3eea82,0x3eea82,4 +np.float32,0xfea36a3c,0x435729b2,4 +np.float32,0x7dcc1fb0,0x3e330053,4 +np.float32,0x3f616ae6,0x3f9b01ae,4 +np.float32,0x8030963f,0x8030963f,4 +np.float32,0x10d1e2,0x10d1e2,4 +np.float32,0xfeb9a8a6,0x40e6daac,4 +np.float32,0xbe1aba00,0xbe1bea3a,4 +np.float32,0x3cb6b4ea,0x3cb6bcac,4 +np.float32,0x3d8b0b64,0x3d8b422f,4 +np.float32,0x7b6894,0x7b6894,4 +np.float32,0x3e89dcde,0x3e8d4b4b,4 +np.float32,0x3f12b952,0x3f253974,4 +np.float32,0x1c316c,0x1c316c,4 +np.float32,0x7e2da535,0x3f95fe6b,4 +np.float32,0x3ae9a494,0x3ae9a4a4,4 +np.float32,0xbc5f5500,0xbc5f588b,4 +np.float32,0x3e7850fc,0x3e7d4d0e,4 +np.float32,0xbf800000,0xbfc75923,4 +np.float32,0x3e652d69,0x3e691502,4 +np.float32,0xbf6bdd26,0xbfa89129,4 +np.float32,0x3f441cfc,0x3f764a02,4 +np.float32,0x7f5445ff,0xc0906191,4 +np.float32,0x807b2ee3,0x807b2ee3,4 +np.float32,0xbeb6cab8,0xbebef9c0,4 +np.float32,0xff737277,0xbf327011,4 +np.float32,0xfc832aa0,0x402fd52e,4 +np.float32,0xbf0c7538,0xbf1c7c0f,4 +np.float32,0x7e1301c7,0xbf0ee63e,4 +np.float64,0xbfe0ef7df7a1defc,0xbfe2b76a8d8aeb35,1 +np.float64,0x7fdd9c2eae3b385c,0xbfc00d6885485039,1 +np.float64,0xbfb484c710290990,0xbfb4900e0a527555,1 +np.float64,0x7fe73e5d6cee7cba,0x3fefbf70a56b60d3,1 +np.float64,0x800a110aa8d42216,0x800a110aa8d42216,1 +np.float64,0xffedd4f3f3bba9e7,0xbff076f8c4124919,1 +np.float64,0x800093407f812682,0x800093407f812682,1 +np.float64,0x800a23150e54462a,0x800a23150e54462a,1 +np.float64,0xbfb1076864220ed0,0xbfb10dd95a74b733,1 +np.float64,0x3fed1f8b37fa3f16,0x3ff496100985211f,1 +np.float64,0x3fdf762f84beec5f,0x3fe1223eb04a17e0,1 +np.float64,0x53fd4e0aa7faa,0x53fd4e0aa7faa,1 +np.float64,0x3fdbd283bdb7a507,0x3fddb7ec9856a546,1 +np.float64,0xbfe43f449d687e89,0xbfe77724a0d3072b,1 +np.float64,0x618b73bcc316f,0x618b73bcc316f,1 +np.float64,0x67759424ceeb3,0x67759424ceeb3,1 +np.float64,0xbfe4b6f7d9a96df0,0xbfe831371f3bd7a8,1 +np.float64,0x800a531b8b74a637,0x800a531b8b74a637,1 +np.float64,0xffeeffd5c37dffab,0x3fea140cbc2c3726,1 +np.float64,0x3fe648e2002c91c4,0x3feac1b8816f972a,1 +np.float64,0x800f16242a1e2c48,0x800f16242a1e2c48,1 +np.float64,0xffeeff8e1dbdff1b,0xc000b555f117dce7,1 +np.float64,0x3fdf1cf73fbe39f0,0x3fe0e9032401135b,1 +np.float64,0x7fe19c388b633870,0x3fd5271b69317d5b,1 +np.float64,0x918f226d231e5,0x918f226d231e5,1 +np.float64,0x4cc19ab499834,0x4cc19ab499834,1 +np.float64,0xbd3121d57a624,0xbd3121d57a624,1 +np.float64,0xbfd145d334a28ba6,0xbfd1b468866124d6,1 +np.float64,0x8bdbf41517b7f,0x8bdbf41517b7f,1 +np.float64,0x3fd1b8cb3ea37198,0x3fd2306b13396cae,1 +np.float64,0xbfd632a959ac6552,0xbfd7220fcfb5ef78,1 +np.float64,0x1cdaafc639b57,0x1cdaafc639b57,1 +np.float64,0x3febdcce1577b99c,0x3ff2fe076195a2bc,1 +np.float64,0x7fca6e945934dd28,0x3ff43040df7024e8,1 +np.float64,0x3fbe08e78e3c11cf,0x3fbe2c60e6b48f75,1 +np.float64,0x7fc1ed0d0523da19,0x3ff55f8dcad9440f,1 +np.float64,0xbfdc729b8cb8e538,0xbfde7b6e15dd60c4,1 +np.float64,0x3fd219404f243281,0x3fd298d7b3546531,1 +np.float64,0x3fe715c3f56e2b88,0x3fec255b5a59456e,1 +np.float64,0x7fe8b88e74b1711c,0x3ff60efd2c81d13d,1 +np.float64,0xa1d2b9fd43a57,0xa1d2b9fd43a57,1 +np.float64,0xffc1818223230304,0xbfb85c6c1e8018e7,1 +np.float64,0x3fde38ac8b3c7159,0x3fe0580c7e228576,1 +np.float64,0x8008faf7b491f5f0,0x8008faf7b491f5f0,1 +np.float64,0xffe7a1d751af43ae,0xbf7114cd7bbcd981,1 +np.float64,0xffec2db1b4b85b62,0xbff5cae759667f83,1 +np.float64,0x7fefce1ae27f9c35,0x3ff4b8b88f4876cf,1 +np.float64,0x7fd1ff56a523feac,0xbff342ce192f14dd,1 +np.float64,0x80026b3e3f84d67d,0x80026b3e3f84d67d,1 +np.float64,0xffedee5879bbdcb0,0xc02fae11508b2be0,1 +np.float64,0x8003c0dc822781ba,0x8003c0dc822781ba,1 +np.float64,0xffe38a79eca714f4,0xc008aa23b7a63980,1 +np.float64,0xbfda70411eb4e082,0xbfdc0d7e29c89010,1 +np.float64,0x800a5e34f574bc6a,0x800a5e34f574bc6a,1 +np.float64,0x3fc19fac6e233f59,0x3fc1bc66ac0d73d4,1 +np.float64,0x3a8a61ea7514d,0x3a8a61ea7514d,1 +np.float64,0x3fb57b536e2af6a0,0x3fb588451f72f44c,1 +np.float64,0x7fd68c6d082d18d9,0xc032ac926b665c9a,1 +np.float64,0xd5b87cfdab710,0xd5b87cfdab710,1 +np.float64,0xfe80b20bfd017,0xfe80b20bfd017,1 +np.float64,0x3fef8781e37f0f04,0x3ff8215fe2c1315a,1 +np.float64,0xffedddbb9c3bbb76,0x3fd959b82258a32a,1 +np.float64,0x3fc7d41f382fa83e,0x3fc81b94c3a091ba,1 +np.float64,0xffc3275dcf264ebc,0x3fb2b3d4985c6078,1 +np.float64,0x7fe34d2b7ba69a56,0x40001f3618e3c7c9,1 +np.float64,0x3fd64ae35fac95c7,0x3fd73d77e0b730f8,1 +np.float64,0x800e53bf6b3ca77f,0x800e53bf6b3ca77f,1 +np.float64,0xbfddf7c9083bef92,0xbfe02f392744d2d1,1 +np.float64,0x1c237cc038471,0x1c237cc038471,1 +np.float64,0x3fe4172beea82e58,0x3fe739b4bf16bc7e,1 +np.float64,0xfa950523f52a1,0xfa950523f52a1,1 +np.float64,0xffc839a2c5307344,0xbff70ff8a3c9247f,1 +np.float64,0x264f828c4c9f1,0x264f828c4c9f1,1 +np.float64,0x148a650a2914e,0x148a650a2914e,1 +np.float64,0x3fe8d255c0b1a4ac,0x3fef623c3ea8d6e3,1 +np.float64,0x800f4fbb28be9f76,0x800f4fbb28be9f76,1 +np.float64,0x7fdca57bcfb94af7,0x3ff51207563fb6cb,1 +np.float64,0x3fe4944107692882,0x3fe7fad593235364,1 +np.float64,0x800119b4f1a2336b,0x800119b4f1a2336b,1 +np.float64,0xbfe734075e6e680e,0xbfec5b35381069f2,1 +np.float64,0xffeb3c00db767801,0xbfbbd7d22df7b4b3,1 +np.float64,0xbfe95c658cb2b8cb,0xbff03ad5e0bc888a,1 +np.float64,0xffeefeb58fbdfd6a,0xbfd5c9264deb0e11,1 +np.float64,0x7fccc80fde39901f,0xc012c60f914f3ca2,1 +np.float64,0x3fe5da289c2bb451,0x3fea07ad00a0ca63,1 +np.float64,0x800e364b0a5c6c96,0x800e364b0a5c6c96,1 +np.float64,0x3fcf9ea7d23f3d50,0x3fd023b72e8c9dcf,1 +np.float64,0x800a475cfc948eba,0x800a475cfc948eba,1 +np.float64,0xffd4e0d757a9c1ae,0xbfa89d573352e011,1 +np.float64,0xbfd4dbec8229b7da,0xbfd5a165f12c7c40,1 +np.float64,0xffe307ab51260f56,0x3fe6b1639da58c3f,1 +np.float64,0xbfe6955a546d2ab4,0xbfeb44ae2183fee9,1 +np.float64,0xbfca1f18f5343e30,0xbfca7d804ccccdf4,1 +np.float64,0xe9f4dfebd3e9c,0xe9f4dfebd3e9c,1 +np.float64,0xfff0000000000000,0xfff8000000000000,1 +np.float64,0x8008e69c0fb1cd38,0x8008e69c0fb1cd38,1 +np.float64,0xbfead1ccf975a39a,0xbff1c84b3db8ca93,1 +np.float64,0x25a982424b531,0x25a982424b531,1 +np.float64,0x8010000000000000,0x8010000000000000,1 +np.float64,0x80056204ea0ac40b,0x80056204ea0ac40b,1 +np.float64,0x800d1442d07a2886,0x800d1442d07a2886,1 +np.float64,0xbfaef3dadc3de7b0,0xbfaefd85ae6205f0,1 +np.float64,0x7fe969ce4b32d39c,0xbff3c4364fc6778f,1 +np.float64,0x7fe418bac0a83175,0x402167d16b1efe0b,1 +np.float64,0x3fd7c82a25af9054,0x3fd8f0c701315672,1 +np.float64,0x80013782a7826f06,0x80013782a7826f06,1 +np.float64,0x7fc031c7ee20638f,0x400747ab705e6904,1 +np.float64,0x3fe8cf327ff19e65,0x3fef5c14f8aafa89,1 +np.float64,0xbfe331a416a66348,0xbfe5e2290a098dd4,1 +np.float64,0x800607b2116c0f65,0x800607b2116c0f65,1 +np.float64,0x7fb40448f0280891,0xbfd43d4f0ffa1d64,1 +np.float64,0x7fefffffffffffff,0xbf74530cfe729484,1 +np.float64,0x3fe39b5444a736a9,0x3fe67eaa0b6acf27,1 +np.float64,0x3fee4733c4fc8e68,0x3ff631eabeef9696,1 +np.float64,0xbfec840f3b79081e,0xbff3cc8563ab2e74,1 +np.float64,0xbfc8f6854c31ed0c,0xbfc948caacb3bba0,1 +np.float64,0xffbcf754a639eea8,0xbfc88d17cad3992b,1 +np.float64,0x8000bd3163417a64,0x8000bd3163417a64,1 +np.float64,0x3fe766d0eaeecda2,0x3fecb660882f7024,1 +np.float64,0xb6cc30156d986,0xb6cc30156d986,1 +np.float64,0xffc0161f9f202c40,0x3fe19bdefe5cf8b1,1 +np.float64,0xffe1e462caa3c8c5,0x3fe392c47feea17b,1 +np.float64,0x30a36a566146e,0x30a36a566146e,1 +np.float64,0x3fa996f580332deb,0x3fa99c6b4f2abebe,1 +np.float64,0x3fba71716e34e2e0,0x3fba899f35edba1d,1 +np.float64,0xbfe8f7e5e971efcc,0xbfefac431a0e3d55,1 +np.float64,0xf48f1803e91e3,0xf48f1803e91e3,1 +np.float64,0x7fe3edc0a127db80,0xc03d1a579a5d74a8,1 +np.float64,0xffeba82056375040,0x3fdfd701308700db,1 +np.float64,0xbfeb5a924cf6b524,0xbff2640de7cd107f,1 +np.float64,0xfa4cd1a9f499a,0xfa4cd1a9f499a,1 +np.float64,0x800de1be7b9bc37d,0x800de1be7b9bc37d,1 +np.float64,0xffd44e56ad289cae,0x3fdf4b8085db9b67,1 +np.float64,0xbfe4fb3aea69f676,0xbfe89d2cc46fcc50,1 +np.float64,0xbfe596495d6b2c92,0xbfe997a589a1f632,1 +np.float64,0x6f55a2b8deab5,0x6f55a2b8deab5,1 +np.float64,0x7fe72dc4712e5b88,0x4039c4586b28c2bc,1 +np.float64,0x89348bd712692,0x89348bd712692,1 +np.float64,0xffe062156120c42a,0x4005f0580973bc77,1 +np.float64,0xbfeabc714d7578e2,0xbff1b07e2fa57dc0,1 +np.float64,0x8003a56b3e874ad7,0x8003a56b3e874ad7,1 +np.float64,0x800eeadfb85dd5c0,0x800eeadfb85dd5c0,1 +np.float64,0x46d77a4c8daf0,0x46d77a4c8daf0,1 +np.float64,0x8000c06e7dc180de,0x8000c06e7dc180de,1 +np.float64,0x3fe428d211e851a4,0x3fe754b1c00a89bc,1 +np.float64,0xc5be11818b7c2,0xc5be11818b7c2,1 +np.float64,0x7fefc244893f8488,0x401133dc54f52de5,1 +np.float64,0x3fde30eee93c61de,0x3fe0532b827543a6,1 +np.float64,0xbfd447f48b288fea,0xbfd4fd0654f90718,1 +np.float64,0xbfde98dc7b3d31b8,0xbfe094df12f84a06,1 +np.float64,0x3fed2c1a1dfa5834,0x3ff4a6c4f3470a65,1 +np.float64,0xbfe992165073242d,0xbff071ab039c9177,1 +np.float64,0x3fd0145d1b2028ba,0x3fd06d3867b703dc,1 +np.float64,0x3fe179457362f28b,0x3fe3722f1d045fda,1 +np.float64,0x800e28964fbc512d,0x800e28964fbc512d,1 +np.float64,0x8004a5d785294bb0,0x8004a5d785294bb0,1 +np.float64,0xbfd652f2272ca5e4,0xbfd7469713125120,1 +np.float64,0x7fe61f49036c3e91,0xbf9b6ccdf2d87e70,1 +np.float64,0xffb7d47dd02fa8f8,0xc004449a82320b13,1 +np.float64,0x3feb82f996b705f3,0x3ff29336c738a4c5,1 +np.float64,0x3fbb7fceea36ffa0,0x3fbb9b02c8ad7f93,1 +np.float64,0x80004519fb208a35,0x80004519fb208a35,1 +np.float64,0xbfe0539114e0a722,0xbfe1e86dc5aa039c,1 +np.float64,0x0,0x0,1 +np.float64,0xbfe99d1125f33a22,0xbff07cf8ec04300f,1 +np.float64,0xffd4fbeecc29f7de,0x3ffab76775a8455f,1 +np.float64,0xbfbf1c618e3e38c0,0xbfbf43d2764a8333,1 +np.float64,0x800cae02a9d95c06,0x800cae02a9d95c06,1 +np.float64,0x3febc47d3bf788fa,0x3ff2e0d7cf8ef509,1 +np.float64,0x3fef838f767f071f,0x3ff81aeac309bca0,1 +np.float64,0xbfd5e70716abce0e,0xbfd6ccb033ef7a35,1 +np.float64,0x3f9116fa60222df5,0x3f9117625f008e0b,1 +np.float64,0xffe02b1e5f20563c,0xbfe6b2ec293520b7,1 +np.float64,0xbf9b5aec3036b5e0,0xbf9b5c96c4c7f951,1 +np.float64,0xfdb0169bfb603,0xfdb0169bfb603,1 +np.float64,0x7fcdd1d51c3ba3a9,0x401f0e12fa0b7570,1 +np.float64,0xbfd088103fa11020,0xbfd0e8c4a333ffb2,1 +np.float64,0x3fe22df82ee45bf0,0x3fe46d03a7c14de2,1 +np.float64,0xbfd57b0c28aaf618,0xbfd65349a6191de5,1 +np.float64,0x3fe0a42f50a1485f,0x3fe252e26775d9a4,1 +np.float64,0x800fab4e363f569c,0x800fab4e363f569c,1 +np.float64,0xffe9f0ed63f3e1da,0xbfe278c341b171d5,1 +np.float64,0x7fe26c244664d848,0xbfb325269dad1996,1 +np.float64,0xffe830410bf06081,0xc00181a39f606e96,1 +np.float64,0x800c548a0c78a914,0x800c548a0c78a914,1 +np.float64,0x800f94761ebf28ec,0x800f94761ebf28ec,1 +np.float64,0x3fe5984845eb3091,0x3fe99aeb653c666d,1 +np.float64,0x7fe93e5bf8f27cb7,0xc010d159fa27396a,1 +np.float64,0xffefffffffffffff,0x3f74530cfe729484,1 +np.float64,0x4c83f1269907f,0x4c83f1269907f,1 +np.float64,0x3fde0065a8bc00cc,0x3fe034a1cdf026d4,1 +np.float64,0x800743810d6e8703,0x800743810d6e8703,1 +np.float64,0x80040662d5280cc6,0x80040662d5280cc6,1 +np.float64,0x3fed20b2c5ba4166,0x3ff497988519d7aa,1 +np.float64,0xffe8fa15e5f1f42b,0x3fff82ca76d797b4,1 +np.float64,0xbb72e22f76e5d,0xbb72e22f76e5d,1 +np.float64,0x7fc18ffa7c231ff4,0xbff4b8b4c3315026,1 +np.float64,0xbfe8d1ac44f1a358,0xbfef60efc4f821e3,1 +np.float64,0x3fd38c1fe8271840,0x3fd42dc37ff7262b,1 +np.float64,0xe577bee5caef8,0xe577bee5caef8,1 +np.float64,0xbff0000000000000,0xbff8eb245cbee3a6,1 +np.float64,0xffcb3a9dd436753c,0x3fcd1a3aff1c3fc7,1 +np.float64,0x7fe44bf2172897e3,0x3ff60bfe82a379f4,1 +np.float64,0x8009203823924071,0x8009203823924071,1 +np.float64,0x7fef8e0abc7f1c14,0x3fe90e4962d47ce5,1 +np.float64,0xffda50004434a000,0x3fb50dee03e1418b,1 +np.float64,0x7fe2ff276ea5fe4e,0xc0355b7d2a0a8d9d,1 +np.float64,0x3fd0711ba5a0e238,0x3fd0d03823d2d259,1 +np.float64,0xe7625b03cec4c,0xe7625b03cec4c,1 +np.float64,0xbfd492c8d7a92592,0xbfd55006cde8d300,1 +np.float64,0x8001fee99f23fdd4,0x8001fee99f23fdd4,1 +np.float64,0x7ff4000000000000,0x7ffc000000000000,1 +np.float64,0xfa15df97f42bc,0xfa15df97f42bc,1 +np.float64,0xbfec3fdca9787fb9,0xbff377164b13c7a9,1 +np.float64,0xbcec10e579d82,0xbcec10e579d82,1 +np.float64,0xbfc3b4e2132769c4,0xbfc3dd1fcc7150a6,1 +np.float64,0x80045b149ee8b62a,0x80045b149ee8b62a,1 +np.float64,0xffe044554c2088aa,0xbff741436d558785,1 +np.float64,0xffcc65f09f38cbe0,0xc0172b4adc2d317d,1 +np.float64,0xf68b2d3bed166,0xf68b2d3bed166,1 +np.float64,0x7fc7f44c572fe898,0x3fec69f3b1eca790,1 +np.float64,0x3fac51f61438a3ec,0x3fac595d34156002,1 +np.float64,0xbfeaa9f256f553e5,0xbff19bfdf5984326,1 +np.float64,0x800e4742149c8e84,0x800e4742149c8e84,1 +np.float64,0xbfc493df132927c0,0xbfc4c1ba4268ead9,1 +np.float64,0xbfbf0c56383e18b0,0xbfbf3389fcf50c72,1 +np.float64,0xbf978a0e082f1420,0xbf978b1dd1da3d3c,1 +np.float64,0xbfe04375356086ea,0xbfe1d34c57314dd1,1 +np.float64,0x3feaeeb29b75dd65,0x3ff1e8b772374979,1 +np.float64,0xbfe15e42c3a2bc86,0xbfe34d45d56c5c15,1 +np.float64,0x3fe507429a6a0e85,0x3fe8b058176b3225,1 +np.float64,0x3feee2b26c3dc565,0x3ff71b73203de921,1 +np.float64,0xbfd496577aa92cae,0xbfd553fa7fe15a5f,1 +np.float64,0x7fe2c10953e58212,0x3fc8ead6a0d14bbf,1 +np.float64,0x800035b77aa06b70,0x800035b77aa06b70,1 +np.float64,0x2329201e46525,0x2329201e46525,1 +np.float64,0xbfe6225c9a6c44b9,0xbfea80861590fa02,1 +np.float64,0xbfd6925030ad24a0,0xbfd78e70b1c2215d,1 +np.float64,0xbfd82225c4b0444c,0xbfd958a60f845b39,1 +np.float64,0xbb03d8a17609,0xbb03d8a17609,1 +np.float64,0x7fc33967b12672ce,0x40001e00c9af4002,1 +np.float64,0xff9373c6d026e780,0xbff308654a459d3d,1 +np.float64,0x3feab1f9c5f563f4,0x3ff1a4e0fd2f093d,1 +np.float64,0xbf993ef768327de0,0xbf994046b64e308b,1 +np.float64,0xffb87382fc30e708,0xbfde0accb83c891b,1 +np.float64,0x800bb3a118176743,0x800bb3a118176743,1 +np.float64,0x800c810250d90205,0x800c810250d90205,1 +np.float64,0xbfd2c4eb9ba589d8,0xbfd3539508b4a4a8,1 +np.float64,0xbee1f5437dc3f,0xbee1f5437dc3f,1 +np.float64,0x3fc07aeab520f5d8,0x3fc0926272f9d8e2,1 +np.float64,0xbfe23747a3246e90,0xbfe47a20a6e98687,1 +np.float64,0x3fde1296debc252c,0x3fe0401143ff6b5c,1 +np.float64,0xbfcec8c2f73d9184,0xbfcf644e25ed3b74,1 +np.float64,0xff9314f2c82629e0,0x40559a0f9099dfd1,1 +np.float64,0xbfe27487afa4e910,0xbfe4d0e01200bde6,1 +np.float64,0xffb3d6637627acc8,0x3fe326d4b1e1834f,1 +np.float64,0xffe6f84d642df09a,0x3fc73fa9f57c3acb,1 +np.float64,0xffe67cf76fecf9ee,0xc01cf48c97937ef9,1 +np.float64,0x7fdc73fc12b8e7f7,0xbfcfcecde9331104,1 +np.float64,0xffdcf8789239f0f2,0x3fe345e3b8e28776,1 +np.float64,0x800a70af5314e15f,0x800a70af5314e15f,1 +np.float64,0xffc862300730c460,0x3fc4e9ea813beca7,1 +np.float64,0xbfcc6961bd38d2c4,0xbfcce33bfa6c6bd1,1 +np.float64,0xbfc9b76bbf336ed8,0xbfca117456ac37e5,1 +np.float64,0x7fb86e829430dd04,0x400a5bd7a18e302d,1 +np.float64,0x7fb9813ef833027d,0xbfe5a6494f143625,1 +np.float64,0x8005085e2c2a10bd,0x8005085e2c2a10bd,1 +np.float64,0xffe5af099d6b5e12,0x40369bbe31e03e06,1 +np.float64,0xffde03b1fd3c0764,0x3ff061120aa1f52a,1 +np.float64,0x7fa4eb6cdc29d6d9,0x3fe9defbe9010322,1 +np.float64,0x800803f4b11007ea,0x800803f4b11007ea,1 +np.float64,0x7febd50f6df7aa1e,0xbffcf540ccf220dd,1 +np.float64,0x7fed454f08fa8a9d,0xbffc2a8b81079403,1 +np.float64,0xbfed7e8c69bafd19,0xbff5161e51ba6634,1 +np.float64,0xffef92e78eff25ce,0xbffefeecddae0ad3,1 +np.float64,0x7fe5b9b413ab7367,0xbfc681ba29704176,1 +np.float64,0x29284e805252,0x29284e805252,1 +np.float64,0xffed3955bcfa72ab,0xbfc695acb5f468de,1 +np.float64,0x3fe464ee1ca8c9dc,0x3fe7b140ce50fdca,1 +np.float64,0xffe522ae4bea455c,0x3feb957c146e66ef,1 +np.float64,0x8000000000000000,0x8000000000000000,1 +np.float64,0x3fd0c353a2a186a8,0x3fd1283aaa43a411,1 +np.float64,0x3fdb30a749b6614f,0x3fdcf40df006ed10,1 +np.float64,0x800109213cc21243,0x800109213cc21243,1 +np.float64,0xbfe72aa0c5ee5542,0xbfec4a713f513bc5,1 +np.float64,0x800865344ad0ca69,0x800865344ad0ca69,1 +np.float64,0x7feb7df60eb6fbeb,0x3fb1df06a67aa22f,1 +np.float64,0x3fe83a5dd93074bc,0x3fee3d63cda72636,1 +np.float64,0xbfde70e548bce1ca,0xbfe07b8e19c9dac6,1 +np.float64,0xbfeea38d537d471b,0xbff6bb18c230c0be,1 +np.float64,0x3fefeebbc47fdd78,0x3ff8cdaa53b7c7b4,1 +np.float64,0x7fe6512e20eca25b,0xbff623cee44a22b5,1 +np.float64,0xf8fa5ca3f1f4c,0xf8fa5ca3f1f4c,1 +np.float64,0x7fd12d00ed225a01,0xbfe90d518ea61faf,1 +np.float64,0x80027db43504fb69,0x80027db43504fb69,1 +np.float64,0xffc10a01aa221404,0x3fcc2065b3d0157b,1 +np.float64,0xbfef8286e87f050e,0xbff8193a54449b59,1 +np.float64,0xbfc73178092e62f0,0xbfc7735072ba4593,1 +np.float64,0x3fc859d70630b3ae,0x3fc8a626522af1c0,1 +np.float64,0x3fe4654c4268ca99,0x3fe7b1d2913eda1a,1 +np.float64,0xbfce93cd843d279c,0xbfcf2c2ef16a0957,1 +np.float64,0xffbcaa16d4395430,0xbfd511ced032d784,1 +np.float64,0xbfe91f980e723f30,0xbfeffb39cf8c7746,1 +np.float64,0x800556fb6f0aadf8,0x800556fb6f0aadf8,1 +np.float64,0xffd009cde520139c,0x3fe4fa83b1e93d28,1 +np.float64,0x7febc0675e3780ce,0x3feb53930c004dae,1 +np.float64,0xbfe7f975bdeff2ec,0xbfedc36e6729b010,1 +np.float64,0x45aff57c8b5ff,0x45aff57c8b5ff,1 +np.float64,0xbfec7ebd0138fd7a,0xbff3c5cab680aae0,1 +np.float64,0x8009448003b28900,0x8009448003b28900,1 +np.float64,0x3fca4b992d349732,0x3fcaabebcc86aa9c,1 +np.float64,0x3fca069161340d20,0x3fca63ecc742ff3a,1 +np.float64,0x80063bc80bec7791,0x80063bc80bec7791,1 +np.float64,0xbfe1764bffe2ec98,0xbfe36e1cb30cec94,1 +np.float64,0xffd0dba72f21b74e,0x3fb1834964d57ef6,1 +np.float64,0xbfe31848fc263092,0xbfe5bd066445cbc3,1 +np.float64,0xbfd1fb227323f644,0xbfd278334e27f02d,1 +np.float64,0xffdc59069fb8b20e,0xbfdfc363f559ea2c,1 +np.float64,0x3fdea52a52bd4a55,0x3fe09cada4e5344c,1 +np.float64,0x3f715e55a022bd00,0x3f715e5c72a2809e,1 +np.float64,0x1d1ac6023a35a,0x1d1ac6023a35a,1 +np.float64,0x7feacc71627598e2,0x400486b82121da19,1 +np.float64,0xa0287fa340510,0xa0287fa340510,1 +np.float64,0xffe352c5abe6a58b,0xc002623346060543,1 +np.float64,0x7fed577a23baaef3,0x3fda19bc8fa3b21f,1 +np.float64,0x3fde8dd5263d1baa,0x3fe08de0fedf7029,1 +np.float64,0x3feddd3be2bbba78,0x3ff599b2f3e018cc,1 +np.float64,0xc7a009f58f401,0xc7a009f58f401,1 +np.float64,0xbfef03d5a4fe07ab,0xbff74ee08681f47b,1 +np.float64,0x7fe2cf60eea59ec1,0x3fe905fb44f8cc60,1 +np.float64,0xbfe498fcab6931fa,0xbfe8023a6ff8becf,1 +np.float64,0xbfef7142acfee285,0xbff7fd196133a595,1 +np.float64,0xd214ffdba42a0,0xd214ffdba42a0,1 +np.float64,0x8006de7d78cdbcfc,0x8006de7d78cdbcfc,1 +np.float64,0xb247d34f648fb,0xb247d34f648fb,1 +np.float64,0xbfdd5bece6bab7da,0xbfdf9ba63ca2c5b2,1 +np.float64,0x7fe874650af0e8c9,0x3fe74204e122c10f,1 +np.float64,0x800768c49baed18a,0x800768c49baed18a,1 +np.float64,0x3fb4c0a192298140,0x3fb4cc4c8aa43300,1 +np.float64,0xbfa740531c2e80a0,0xbfa7446b7c74ae8e,1 +np.float64,0x7fe10d6edf221add,0x3fedbcd2eae26657,1 +np.float64,0xbfe9175d0f722eba,0xbfefeaca7f32c6e3,1 +np.float64,0x953e11d32a7c2,0x953e11d32a7c2,1 +np.float64,0x80032df90c465bf3,0x80032df90c465bf3,1 +np.float64,0xffec5b799638b6f2,0xbfe95cd2c69be12c,1 +np.float64,0xffe0c3cfa9a1879f,0x3fe20b99b0c108ce,1 +np.float64,0x3fb610d8e22c21b2,0x3fb61ee0d6c16df8,1 +np.float64,0xffe16bb39962d766,0xc016d370381b6b42,1 +np.float64,0xbfdc72edb238e5dc,0xbfde7bd2de10717a,1 +np.float64,0xffed52dee3baa5bd,0xc01994c08899129a,1 +np.float64,0xffa92aab08325550,0xbff2b881ce363cbd,1 +np.float64,0x7fe028282de0504f,0xc0157ff96c69a9c7,1 +np.float64,0xbfdb2151bf3642a4,0xbfdce196fcc35857,1 +np.float64,0x3fcffbd13c3ff7a2,0x3fd0554b5f0371ac,1 +np.float64,0x800d206bff1a40d8,0x800d206bff1a40d8,1 +np.float64,0x458f818c8b1f1,0x458f818c8b1f1,1 +np.float64,0x800a7b56a234f6ae,0x800a7b56a234f6ae,1 +np.float64,0xffe3d86161e7b0c2,0xbff58d0dbde9f188,1 +np.float64,0xe8ed82e3d1db1,0xe8ed82e3d1db1,1 +np.float64,0x3fe234e0176469c0,0x3fe476bd36b96a75,1 +np.float64,0xbfc7cb9c132f9738,0xbfc812c46e185e0b,1 +np.float64,0xbfeba116c1f7422e,0xbff2b6b7563ad854,1 +np.float64,0x7fe7041de62e083b,0x3f5d2b42aca47274,1 +np.float64,0xbfcf60f4ff3ec1e8,0xbfd002eb83406436,1 +np.float64,0xbfc06067a520c0d0,0xbfc0776e5839ecda,1 +np.float64,0x4384965a87093,0x4384965a87093,1 +np.float64,0xd2ed9d01a5db4,0xd2ed9d01a5db4,1 +np.float64,0x3fbea88cb63d5119,0x3fbece49cc34a379,1 +np.float64,0x3fe7e982ebefd306,0x3feda5bd4c435d43,1 +np.float64,0xffdb60a3e036c148,0xbfcb7ed21e7a8f49,1 +np.float64,0x7fdba9231eb75245,0xbfd750cab1536398,1 +np.float64,0x800d593534dab26b,0x800d593534dab26b,1 +np.float64,0xffdf15fb683e2bf6,0x3fb3aaea23357f06,1 +np.float64,0xbfd6f8a2e5adf146,0xbfd802e509d67c67,1 +np.float64,0x3feeaa31513d5463,0x3ff6c52147dc053c,1 +np.float64,0xf2f6dfd3e5edc,0xf2f6dfd3e5edc,1 +np.float64,0x7fd58d8279ab1b04,0x403243f23d02af2a,1 +np.float64,0x8000000000000001,0x8000000000000001,1 +np.float64,0x3fdffb8e0ebff71c,0x3fe1786cb0a6b0f3,1 +np.float64,0xc999826b93331,0xc999826b93331,1 +np.float64,0xffc4966f19292ce0,0x3ff0836c75c56cc7,1 +np.float64,0x7fef95a4b2ff2b48,0xbfbbe2c27c78154f,1 +np.float64,0xb8f1307f71e26,0xb8f1307f71e26,1 +np.float64,0x3fe807bc7eb00f79,0x3fedde19f2d3c42d,1 +np.float64,0x5e4b6580bc98,0x5e4b6580bc98,1 +np.float64,0xffe19353576326a6,0xc0278c51fee07d36,1 +np.float64,0xbfb0ca6f3e2194e0,0xbfb0d09be673fa72,1 +np.float64,0x3fea724211b4e484,0x3ff15ee06f0a0a13,1 +np.float64,0xbfda21e1c4b443c4,0xbfdbb041f3c86832,1 +np.float64,0x8008082b24901057,0x8008082b24901057,1 +np.float64,0xbfd031aa4ea06354,0xbfd08c77729634bb,1 +np.float64,0xbfc407e153280fc4,0xbfc432275711df5f,1 +np.float64,0xbb4fa4b5769f5,0xbb4fa4b5769f5,1 +np.float64,0x7fed6d1daffada3a,0xc037a14bc7b41fab,1 +np.float64,0xffeee589943dcb12,0x3ff2abfe47037778,1 +np.float64,0x301379d260270,0x301379d260270,1 +np.float64,0xbfec2fefc2b85fe0,0xbff36362c0363e06,1 +np.float64,0xbfe0b1c82e216390,0xbfe264f503f7c22c,1 +np.float64,0xbfea2bce78f4579d,0xbff112d6f07935ea,1 +np.float64,0x18508ef230a13,0x18508ef230a13,1 +np.float64,0x800667a74d6ccf4f,0x800667a74d6ccf4f,1 +np.float64,0x79ce5c8cf39cc,0x79ce5c8cf39cc,1 +np.float64,0x3feda61c8efb4c39,0x3ff54c9ade076f54,1 +np.float64,0x3fe27e06b0e4fc0d,0x3fe4de665c1dc3ca,1 +np.float64,0xbfd15fea2722bfd4,0xbfd1d081c55813b0,1 +np.float64,0xbfe5222c4cea4458,0xbfe8db62deb7d2ad,1 +np.float64,0xbfe8a16c33b142d8,0xbfef02d5831592a8,1 +np.float64,0x3fdb60e7c4b6c1d0,0x3fdd2e4265c4c3b6,1 +np.float64,0x800076d62b60edad,0x800076d62b60edad,1 +np.float64,0xbfec8f1527791e2a,0xbff3da7ed3641e8d,1 +np.float64,0x2af03bfe55e08,0x2af03bfe55e08,1 +np.float64,0xa862ee0950c5e,0xa862ee0950c5e,1 +np.float64,0x7fea5a7c1eb4b4f7,0xbffa6f07d28ef211,1 +np.float64,0x90e118fb21c23,0x90e118fb21c23,1 +np.float64,0xbfead0721bf5a0e4,0xbff1c6c7a771a128,1 +np.float64,0x3f63f4a4c027e94a,0x3f63f4a75665da67,1 +np.float64,0x3fece0efa579c1e0,0x3ff443bec52f021e,1 +np.float64,0xbfdbe743b737ce88,0xbfddd129bff89c15,1 +np.float64,0x3fd48c9b8fa91938,0x3fd5492a630a8cb5,1 +np.float64,0x3ff0000000000000,0x3ff8eb245cbee3a6,1 +np.float64,0xbfd51ea33baa3d46,0xbfd5ebd5dc710204,1 +np.float64,0x3fcfbab0183f7560,0x3fd032a054580b00,1 +np.float64,0x8007abce13cf579d,0x8007abce13cf579d,1 +np.float64,0xbfef0f4723be1e8e,0xbff760c7008e8913,1 +np.float64,0x8006340f524c681f,0x8006340f524c681f,1 +np.float64,0x87b7d7010f71,0x87b7d7010f71,1 +np.float64,0x3fe9422da9b2845b,0x3ff02052e6148c45,1 +np.float64,0x7fddd259b93ba4b2,0xc000731aa33d84b6,1 +np.float64,0x3fe0156d12202ada,0x3fe1972ba309cb29,1 +np.float64,0x8004f1264b89e24d,0x8004f1264b89e24d,1 +np.float64,0x3fececdcacb9d9b9,0x3ff4534d5861f731,1 +np.float64,0x3fd1790ab822f215,0x3fd1eb97b1bb6fb4,1 +np.float64,0xffce5d11863cba24,0xbfcb4f38c17210da,1 +np.float64,0x800a30c32a546187,0x800a30c32a546187,1 +np.float64,0x3fa58cc61c2b198c,0x3fa59008add7233e,1 +np.float64,0xbfe0ac77d62158f0,0xbfe25de3dba0bc4a,1 +np.float64,0xeb8c5753d718b,0xeb8c5753d718b,1 +np.float64,0x3fee5438dafca872,0x3ff644fef7e7adb5,1 +np.float64,0x3faad1eb2c35a3e0,0x3faad83499f94057,1 +np.float64,0x3fe39152c46722a6,0x3fe66fba0b96ab6e,1 +np.float64,0xffd6fd17712dfa2e,0xc010d697d1ab8731,1 +np.float64,0x5214a888a4296,0x5214a888a4296,1 +np.float64,0x8000127a5da024f5,0x8000127a5da024f5,1 +np.float64,0x7feb3a366cb6746c,0x3fbe49bd8d5f213a,1 +np.float64,0xca479501948f3,0xca479501948f3,1 +np.float64,0x7fe7c799ce6f8f33,0xbfd796cd98dc620c,1 +np.float64,0xffe20bcf30a4179e,0xbff8ca5453fa088f,1 +np.float64,0x3fe624638a6c48c7,0x3fea83f123832c3c,1 +np.float64,0xbfe5f1377c6be26f,0xbfea2e143a2d522c,1 +np.float64,0x7fd193f9f8a327f3,0xbfb04ee2602574d4,1 +np.float64,0xbfe7419d2fee833a,0xbfec737f140d363d,1 +np.float64,0x1,0x1,1 +np.float64,0x7fe2ac246c655848,0x3fd14fee3237727a,1 +np.float64,0xa459b42948b37,0xa459b42948b37,1 +np.float64,0x3fb26155ae24c2ab,0x3fb2696fc446d4c6,1 +np.float64,0xbfdd7b332e3af666,0xbfdfc296c21f1aa8,1 +np.float64,0xbfe00dbda4a01b7c,0xbfe18d2b060f0506,1 +np.float64,0x8003bb22d3e77646,0x8003bb22d3e77646,1 +np.float64,0x3fee21b0a57c4361,0x3ff5fb6a21dc911c,1 +np.float64,0x80ca69270194d,0x80ca69270194d,1 +np.float64,0xbfd6d80350adb006,0xbfd7ddb501edbde0,1 +np.float64,0xd2f8b801a5f2,0xd2f8b801a5f2,1 +np.float64,0xbfe856b3f170ad68,0xbfee7334fdc49296,1 +np.float64,0x3fed5c1b20bab836,0x3ff4e73ee5d5c7f3,1 +np.float64,0xbfd58085a5ab010c,0xbfd6596ddc381ffa,1 +np.float64,0x3fe4f0134b29e027,0x3fe88b70602fbd21,1 +np.float64,0xffc9098fdc321320,0x4011c334a74a92cf,1 +np.float64,0x794749bef28ea,0x794749bef28ea,1 +np.float64,0xbfc86b547f30d6a8,0xbfc8b84a4fafe0af,1 +np.float64,0x7fe1356b9da26ad6,0x3fd270bca208d899,1 +np.float64,0x7fca0ef1aa341de2,0xbff851044c0734fa,1 +np.float64,0x80064cb8b62c9972,0x80064cb8b62c9972,1 +np.float64,0xffd3a09a83a74136,0x3ffb66dae0accdf5,1 +np.float64,0x800e301aa15c6035,0x800e301aa15c6035,1 +np.float64,0x800e51f323bca3e6,0x800e51f323bca3e6,1 +np.float64,0x7ff0000000000000,0xfff8000000000000,1 +np.float64,0x800c4278c87884f2,0x800c4278c87884f2,1 +np.float64,0xbfe8481649f0902c,0xbfee576772695096,1 +np.float64,0xffe2344e3fa4689c,0x3fb10442ec0888de,1 +np.float64,0xbfeada313d75b462,0xbff1d1aee3fab3a9,1 +np.float64,0x8009ddfb1333bbf7,0x8009ddfb1333bbf7,1 +np.float64,0x7fed3314c93a6629,0x3ff7a9b12dc1cd37,1 +np.float64,0x3fd55c26da2ab84e,0x3fd630a7b8aac78a,1 +np.float64,0x800cdb5203f9b6a4,0x800cdb5203f9b6a4,1 +np.float64,0xffd04a875da0950e,0x4009a13810ab121d,1 +np.float64,0x800f1acb527e3597,0x800f1acb527e3597,1 +np.float64,0xbf9519bf282a3380,0xbf951a82e9b955ff,1 +np.float64,0x3fcd7a42fa3af486,0x3fce028f3c51072d,1 +np.float64,0xbfdd3e21b73a7c44,0xbfdf769f2ff2480b,1 +np.float64,0xffd4361e2aa86c3c,0xbfc211ce8e9f792c,1 +np.float64,0x7fccf97f6939f2fe,0xbff8464bad830f06,1 +np.float64,0x800ce47fb939c900,0x800ce47fb939c900,1 +np.float64,0xffe9e51df173ca3b,0xbfceaf990d652c4e,1 +np.float64,0x3fe05bba5b20b775,0x3fe1f326e4455442,1 +np.float64,0x800a29b4b134536a,0x800a29b4b134536a,1 +np.float64,0xe6f794b7cdef3,0xe6f794b7cdef3,1 +np.float64,0xffb5b688ce2b6d10,0x3ff924bb97ae2f6d,1 +np.float64,0x7fa74105d82e820b,0x3fd49643aaa9eee4,1 +np.float64,0x80020d15f7a41a2d,0x80020d15f7a41a2d,1 +np.float64,0x3fd6a983d5ad5308,0x3fd7a8cc8835b5b8,1 +np.float64,0x7fcd9798f03b2f31,0x3fc534c2f7bf4721,1 +np.float64,0xffdd31873a3a630e,0xbfe3171fcdffb3f7,1 +np.float64,0x80075183234ea307,0x80075183234ea307,1 +np.float64,0x82f3132505e63,0x82f3132505e63,1 +np.float64,0x3febfd9cb837fb39,0x3ff325bbf812515d,1 +np.float64,0xbfb4630fda28c620,0xbfb46e1f802ec278,1 +np.float64,0x3feeed7c89fddafa,0x3ff72c20ce5a9ee4,1 +np.float64,0x7fd3dcb3c127b967,0x40123d27ec9ec31d,1 +np.float64,0xbfe923450c72468a,0xbff00149c5742725,1 +np.float64,0x7fdef7f91abdeff1,0xbfe02ceb21f7923d,1 +np.float64,0x7fdd70d28fbae1a4,0xbfefcc5c9d10cdfd,1 +np.float64,0x800ca445a8d9488c,0x800ca445a8d9488c,1 +np.float64,0x7fec2754e1f84ea9,0x40173f6c1c97f825,1 +np.float64,0x7fcbca31f7379463,0x401e26bd2667075b,1 +np.float64,0x8003fa1d0847f43b,0x8003fa1d0847f43b,1 +np.float64,0xffe95cf85932b9f0,0xc01308e60278aa11,1 +np.float64,0x8009c53948f38a73,0x8009c53948f38a73,1 +np.float64,0x3fdcca9226b99524,0x3fdee7a008f75d41,1 +np.float64,0xbfe9ee241f33dc48,0xbff0d16bfff6c8e9,1 +np.float64,0xbfb3365058266ca0,0xbfb33f9176ebb51d,1 +np.float64,0x7fa98e10f4331c21,0x3fdee04ffd31314e,1 +np.float64,0xbfe1a11aea634236,0xbfe3a8e3d84fda38,1 +np.float64,0xbfd8df051131be0a,0xbfda342805d1948b,1 +np.float64,0x3d49a2407a935,0x3d49a2407a935,1 +np.float64,0xfc51eefff8a3e,0xfc51eefff8a3e,1 +np.float64,0xda63950bb4c73,0xda63950bb4c73,1 +np.float64,0x80050f3d4fea1e7b,0x80050f3d4fea1e7b,1 +np.float64,0x3fcdbd6e453b7ae0,0x3fce497478c28e77,1 +np.float64,0x7ebd4932fd7aa,0x7ebd4932fd7aa,1 +np.float64,0x7fa3904eac27209c,0xc0015f3125efc151,1 +np.float64,0x7fc59f956b2b3f2a,0xc00c012e7a2c281f,1 +np.float64,0xbfd436d716a86dae,0xbfd4ea13533a942b,1 +np.float64,0x9347ae3d268f6,0x9347ae3d268f6,1 +np.float64,0xffd001764d2002ec,0xbffab3462e515623,1 +np.float64,0x3fe6f406662de80d,0x3febe9bac3954999,1 +np.float64,0x3f943ecaf8287d96,0x3f943f77dee5e77f,1 +np.float64,0x3fd6250efcac4a1c,0x3fd712afa947d56f,1 +np.float64,0xbfe849ff777093ff,0xbfee5b089d03391f,1 +np.float64,0xffd3b8ef8f2771e0,0x4000463ff7f29214,1 +np.float64,0xbfc3bae9252775d4,0xbfc3e34c133f1933,1 +np.float64,0xbfea93943df52728,0xbff18355e4fc341d,1 +np.float64,0x3fc4d922ad29b245,0x3fc508d66869ef29,1 +np.float64,0x4329694a8652e,0x4329694a8652e,1 +np.float64,0x8834f1a71069e,0x8834f1a71069e,1 +np.float64,0xe0e5be8dc1cb8,0xe0e5be8dc1cb8,1 +np.float64,0x7fef4d103afe9a1f,0xc0047b88b94554fe,1 +np.float64,0x3fe9b57af4f36af6,0x3ff0963831d51c3f,1 +np.float64,0x3fe081e2fa6103c6,0x3fe22572e41be655,1 +np.float64,0x3fd78cf7b42f19ef,0x3fd8acafa1ad776a,1 +np.float64,0x7fbffd58d43ffab1,0x3fb16092c7de6036,1 +np.float64,0xbfe1e8bfae23d180,0xbfe40c1c6277dd52,1 +np.float64,0x800a9f59fb153eb4,0x800a9f59fb153eb4,1 +np.float64,0xffebe14e33b7c29c,0x3fe0ec532f4deedd,1 +np.float64,0xffc36ca00426d940,0xc000806a712d6e83,1 +np.float64,0xbfcc2be82d3857d0,0xbfcca2a7d372ec64,1 +np.float64,0x800c03b908780772,0x800c03b908780772,1 +np.float64,0xf315a64be62b5,0xf315a64be62b5,1 +np.float64,0xbfe644043cec8808,0xbfeab974d3dc6d80,1 +np.float64,0x3fedb7de3cbb6fbc,0x3ff56549a5acd324,1 +np.float64,0xbfb1a875522350e8,0xbfb1afa41dee338d,1 +np.float64,0xffee8d4a407d1a94,0x3fead1749a636ff6,1 +np.float64,0x8004061c13080c39,0x8004061c13080c39,1 +np.float64,0x3fe650ae7feca15c,0x3feacefb8bc25f64,1 +np.float64,0x3fda8340e6b50682,0x3fdc24275cab1df8,1 +np.float64,0x8009084344321087,0x8009084344321087,1 +np.float64,0x7fdd19cb823a3396,0xbfd1d8fb35d89e3f,1 +np.float64,0xbfe893172571262e,0xbfeee716b592b93c,1 +np.float64,0x8ff5acc11fec,0x8ff5acc11fec,1 +np.float64,0xbfdca0c57cb9418a,0xbfdeb42465a1b59e,1 +np.float64,0xffd77bd2a3aef7a6,0x4012cd69e85b82d8,1 +np.float64,0xbfe6ea78982dd4f1,0xbfebd8ec61fb9e1f,1 +np.float64,0x7fe14b1d80a2963a,0xc02241642102cf71,1 +np.float64,0x3fe712bf286e257e,0x3fec20012329a7fb,1 +np.float64,0x7fcb6fa4d636df49,0x400b899d14a886b3,1 +np.float64,0x3fb82cb39a305960,0x3fb83f29c5f0822e,1 +np.float64,0x7fed694c8b3ad298,0xbfe2724373c69808,1 +np.float64,0xbfcd21229f3a4244,0xbfcda497fc3e1245,1 +np.float64,0x564d3770ac9a8,0x564d3770ac9a8,1 +np.float64,0xf4409e13e8814,0xf4409e13e8814,1 +np.float64,0x80068dca9a8d1b96,0x80068dca9a8d1b96,1 +np.float64,0xbfe13f82afe27f06,0xbfe3236ddded353f,1 +np.float64,0x80023f8114647f03,0x80023f8114647f03,1 +np.float64,0xeafba7dfd5f75,0xeafba7dfd5f75,1 +np.float64,0x3feca74ddeb94e9c,0x3ff3f95dcce5a227,1 +np.float64,0x10000000000000,0x10000000000000,1 +np.float64,0xbfebdb4141f7b682,0xbff2fc29823ac64a,1 +np.float64,0xbfcd75ee2f3aebdc,0xbfcdfdfd87cc6a29,1 +np.float64,0x7fc010cda420219a,0x3fae4ca2cf1f2657,1 +np.float64,0x1a90209e35205,0x1a90209e35205,1 +np.float64,0x8008057d01900afa,0x8008057d01900afa,1 +np.float64,0x3f9cb5f280396be5,0x3f9cb7dfb4e4be4e,1 +np.float64,0xffe1bbb60b63776c,0xc00011b1ffcb2561,1 +np.float64,0xffda883f6fb5107e,0x4044238ef4e2a198,1 +np.float64,0x3fc07c0b4a20f817,0x3fc09387de9eebcf,1 +np.float64,0x8003a9ebc0c753d8,0x8003a9ebc0c753d8,1 +np.float64,0x1d7fd5923affc,0x1d7fd5923affc,1 +np.float64,0xbfe9cd8cf9b39b1a,0xbff0af43e567ba4a,1 +np.float64,0x11285cb42250c,0x11285cb42250c,1 +np.float64,0xffe81ae1ccb035c3,0xbfe038be7eb563a6,1 +np.float64,0xbfe56473b1eac8e8,0xbfe94654d8ab9e75,1 +np.float64,0x3fee904619fd208c,0x3ff69e198152fe17,1 +np.float64,0xbfeeb9a2cbfd7346,0xbff6dc8d96da78cd,1 +np.float64,0x8006cdfa59ed9bf5,0x8006cdfa59ed9bf5,1 +np.float64,0x8008f2366d31e46d,0x8008f2366d31e46d,1 +np.float64,0x8008d5f91e31abf3,0x8008d5f91e31abf3,1 +np.float64,0x3fe85886f8b0b10e,0x3fee76af16f5a126,1 +np.float64,0x3fefb9b2b73f7365,0x3ff8745128fa3e3b,1 +np.float64,0x7fdf3e721f3e7ce3,0xbfb19381541ca2a8,1 +np.float64,0x3fd2768c41a4ed18,0x3fd2fe2f85a3f3a6,1 +np.float64,0xbfcabe3c6a357c78,0xbfcb239fb88bc260,1 +np.float64,0xffdffb6a3dbff6d4,0xbff7af4759fd557c,1 +np.float64,0x800817f75f302fef,0x800817f75f302fef,1 +np.float64,0xbfe6a1d1762d43a3,0xbfeb5a399a095ef3,1 +np.float64,0x7fd6f32f912de65e,0x40016dedc51aabd0,1 +np.float64,0x3fc6cb26652d964d,0x3fc7099f047d924a,1 +np.float64,0x3fe8b975d67172ec,0x3fef31946123c0e7,1 +np.float64,0xffe44a09d1e89413,0x3fdee9e5eac6e540,1 +np.float64,0xbfece76d4cb9cedb,0xbff44c34849d07ba,1 +np.float64,0x7feb76027036ec04,0x3fe08595a5e263ac,1 +np.float64,0xffe194f591a329ea,0x3fbe5bd626400a70,1 +np.float64,0xbfc170698122e0d4,0xbfc18c3de8b63565,1 +np.float64,0x3fc82b2c0f305658,0x3fc875c3b5fbcd08,1 +np.float64,0x3fd5015634aa02ac,0x3fd5cb1df07213c3,1 +np.float64,0x7fe640884b6c8110,0xbff66255a420abb5,1 +np.float64,0x5a245206b448b,0x5a245206b448b,1 +np.float64,0xffe9d9fa2f73b3f4,0xc0272b0dd34ab9bf,1 +np.float64,0x3fd990e8aab321d0,0x3fdb04cd3a29bcc3,1 +np.float64,0xde9dda8bbd3bc,0xde9dda8bbd3bc,1 +np.float64,0xbfe81b32b4703666,0xbfee029937fa9f5a,1 +np.float64,0xbfe68116886d022d,0xbfeb21c62081cb73,1 +np.float64,0x3fb8da191231b432,0x3fb8ee28c71507d3,1 +np.float64,0x3fb111395a222273,0x3fb117b57de3dea4,1 +np.float64,0xffbafadc6a35f5b8,0x3ffcc6d2370297b9,1 +np.float64,0x8002ca475b05948f,0x8002ca475b05948f,1 +np.float64,0xbfeafef57875fdeb,0xbff1fb1315676f24,1 +np.float64,0x7fcda427d73b484f,0xbff9f70212694d17,1 +np.float64,0xffe2517b3ba4a2f6,0xc029ca6707305bf4,1 +np.float64,0x7fc5ee156b2bdc2a,0xbff8384b59e9056e,1 +np.float64,0xbfec22af3278455e,0xbff3530fe25816b4,1 +np.float64,0x6b5a8c2cd6b52,0x6b5a8c2cd6b52,1 +np.float64,0xffdaf6c4b935ed8a,0x4002f00ce58affcf,1 +np.float64,0x800a41813c748303,0x800a41813c748303,1 +np.float64,0xbfd09a1269213424,0xbfd0fc0a0c5de8eb,1 +np.float64,0x7fa2cb74d42596e9,0x3fc3d40e000fa69d,1 +np.float64,0x7ff8000000000000,0x7ff8000000000000,1 +np.float64,0x3fbfbf8ed63f7f1e,0x3fbfe97bcad9f53a,1 +np.float64,0x7fe0ebba65a1d774,0x401b0f17b28618df,1 +np.float64,0x3fd02c3a25a05874,0x3fd086aa55b19c9c,1 +np.float64,0xec628f95d8c52,0xec628f95d8c52,1 +np.float64,0x3fd319329fa63264,0x3fd3afb04e0dec63,1 +np.float64,0x180e0ade301c2,0x180e0ade301c2,1 +np.float64,0xbfe8d78324f1af06,0xbfef6c66153064ee,1 +np.float64,0xffb89fa200313f48,0xbfeb96ff2d9358dc,1 +np.float64,0x7fe6abcf86ed579e,0xc0269f4de86365ec,1 +np.float64,0x7fdff8cd65bff19a,0xbfd0f7c6b9052c9a,1 +np.float64,0xbfd2e3a53d25c74a,0xbfd37520cda5f6b2,1 +np.float64,0x7fe844b096708960,0x3ff696a6182e5a7a,1 +np.float64,0x7fdce0c7a3b9c18e,0x3fd42875d69ed379,1 +np.float64,0xffba5a91cc34b520,0x4001b571e8991951,1 +np.float64,0xffe78fe4a6ef1fc9,0x3ff4507b31f5b3bc,1 +np.float64,0xbfd7047493ae08ea,0xbfd810618a53fffb,1 +np.float64,0xc6559def8cab4,0xc6559def8cab4,1 +np.float64,0x3fe75d67a76ebacf,0x3feca56817de65e4,1 +np.float64,0xffd24adbd6a495b8,0xc012c491addf2df5,1 +np.float64,0x7fed35e28dba6bc4,0x403a0fa555ff7ec6,1 +np.float64,0x80078c4afa0f1897,0x80078c4afa0f1897,1 +np.float64,0xa6ec39114dd87,0xa6ec39114dd87,1 +np.float64,0x7fb1bd33ba237a66,0x4010092bb6810fd4,1 +np.float64,0x800ecf215edd9e43,0x800ecf215edd9e43,1 +np.float64,0x3fb7c169242f82d2,0x3fb7d2ed30c462e6,1 +np.float64,0xbf71b46d60236900,0xbf71b4749a10c112,1 +np.float64,0x800d7851787af0a3,0x800d7851787af0a3,1 +np.float64,0x3fcb4a45e7369488,0x3fcbb61701a1bcec,1 +np.float64,0x3fd4e3682429c6d0,0x3fd5a9bcb916eb94,1 +np.float64,0x800497564c292ead,0x800497564c292ead,1 +np.float64,0xbfca3737a1346e70,0xbfca96a86ae5d687,1 +np.float64,0x19aa87e03356,0x19aa87e03356,1 +np.float64,0xffb2593fe624b280,0xc05fedb99b467ced,1 +np.float64,0xbfdd8748fbbb0e92,0xbfdfd1a7df17252c,1 +np.float64,0x8004c7afc7098f60,0x8004c7afc7098f60,1 +np.float64,0x7fde48b2bf3c9164,0xbfe36ef1158ed420,1 +np.float64,0xbfec8e0eb0f91c1d,0xbff3d9319705a602,1 +np.float64,0xffea1be204f437c3,0xc0144f67298c3e6f,1 +np.float64,0x7fdb906b593720d6,0xbfce99233396eda7,1 +np.float64,0x3fef0f114ffe1e22,0x3ff76072a258a51b,1 +np.float64,0x3fe3e284c8e7c50a,0x3fe6e9b05e17c999,1 +np.float64,0xbfbda9eef23b53e0,0xbfbdcc1abb443597,1 +np.float64,0x3feb6454d4f6c8aa,0x3ff26f65a85baba4,1 +np.float64,0x3fea317439f462e8,0x3ff118e2187ef33f,1 +np.float64,0x376ad0646ed5b,0x376ad0646ed5b,1 +np.float64,0x7fdd461a1c3a8c33,0x3f7ba20fb79e785f,1 +np.float64,0xebc520a3d78a4,0xebc520a3d78a4,1 +np.float64,0x3fca90fe53352200,0x3fcaf45c7fae234d,1 +np.float64,0xbfe80dd1de701ba4,0xbfede97e12cde9de,1 +np.float64,0x3fd242b00ea48560,0x3fd2c5cf9bf69a31,1 +np.float64,0x7fe46c057828d80a,0xbfe2f76837488f94,1 +np.float64,0x3fc162bea322c580,0x3fc17e517c958867,1 +np.float64,0xffebf0452ff7e08a,0x3ffc3fd95c257b54,1 +np.float64,0xffd88043c6310088,0x4008b05598d0d95f,1 +np.float64,0x800d8c49da5b1894,0x800d8c49da5b1894,1 +np.float64,0xbfed33b487ba6769,0xbff4b0ea941f8a6a,1 +np.float64,0x16b881e22d711,0x16b881e22d711,1 +np.float64,0x288bae0051177,0x288bae0051177,1 +np.float64,0xffc83a0fe8307420,0x4006eff03da17f86,1 +np.float64,0x3fc7868b252f0d18,0x3fc7cb4954290324,1 +np.float64,0xbfe195514b232aa2,0xbfe398aae6c8ed76,1 +np.float64,0x800c001ae7f80036,0x800c001ae7f80036,1 +np.float64,0x7feb82abe7370557,0xbff1e13fe6fad23c,1 +np.float64,0xffecf609cdf9ec13,0xc0112aa1805ae59e,1 +np.float64,0xffddd654f63bacaa,0x3fe46cce899f710d,1 +np.float64,0x3fe2163138642c62,0x3fe44b9c760acd4c,1 +np.float64,0x4e570dc09cae2,0x4e570dc09cae2,1 +np.float64,0x7fe9e8d091f3d1a0,0xc000fe20f8e9a4b5,1 +np.float64,0x7fe60042952c0084,0x3fd0aa740f394c2a,1 diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/data/umath-validation-set-tanh.csv b/.venv/lib/python3.12/site-packages/numpy/_core/tests/data/umath-validation-set-tanh.csv new file mode 100644 index 0000000..9e3ddc6 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/tests/data/umath-validation-set-tanh.csv @@ -0,0 +1,1429 @@ +dtype,input,output,ulperrortol +np.float32,0xbe26ebb0,0xbe25752f,2 +np.float32,0xbe22ecc0,0xbe219054,2 +np.float32,0x8010a6b3,0x8010a6b3,2 +np.float32,0x3135da,0x3135da,2 +np.float32,0xbe982afc,0xbe93d727,2 +np.float32,0x16a51f,0x16a51f,2 +np.float32,0x491e56,0x491e56,2 +np.float32,0x4bf7ca,0x4bf7ca,2 +np.float32,0x3eebc21c,0x3edc65b2,2 +np.float32,0x80155c94,0x80155c94,2 +np.float32,0x3e14f626,0x3e13eb6a,2 +np.float32,0x801a238f,0x801a238f,2 +np.float32,0xbde33a80,0xbde24cf9,2 +np.float32,0xbef8439c,0xbee67a51,2 +np.float32,0x7f60d0a5,0x3f800000,2 +np.float32,0x190ee3,0x190ee3,2 +np.float32,0x80759113,0x80759113,2 +np.float32,0x800afa9f,0x800afa9f,2 +np.float32,0x7110cf,0x7110cf,2 +np.float32,0x3cf709f0,0x3cf6f6c6,2 +np.float32,0x3ef58da4,0x3ee44fa7,2 +np.float32,0xbf220ff2,0xbf0f662c,2 +np.float32,0xfd888078,0xbf800000,2 +np.float32,0xbe324734,0xbe307f9b,2 +np.float32,0x3eb5cb4f,0x3eae8560,2 +np.float32,0xbf7e7d02,0xbf425493,2 +np.float32,0x3ddcdcf0,0x3ddc02c2,2 +np.float32,0x8026d27a,0x8026d27a,2 +np.float32,0x3d4c0fb1,0x3d4be484,2 +np.float32,0xbf27d2c9,0xbf134d7c,2 +np.float32,0x8029ff80,0x8029ff80,2 +np.float32,0x7f046d2c,0x3f800000,2 +np.float32,0x13f94b,0x13f94b,2 +np.float32,0x7f4ff922,0x3f800000,2 +np.float32,0x3f4ea2ed,0x3f2b03e4,2 +np.float32,0x3e7211f0,0x3e6da8cf,2 +np.float32,0x7f39d0cf,0x3f800000,2 +np.float32,0xfee57fc6,0xbf800000,2 +np.float32,0xff6fb326,0xbf800000,2 +np.float32,0xff800000,0xbf800000,2 +np.float32,0x3f0437a4,0x3ef32fcd,2 +np.float32,0xff546d1e,0xbf800000,2 +np.float32,0x3eb5645b,0x3eae2a5c,2 +np.float32,0x3f08a6e5,0x3ef9ff8f,2 +np.float32,0x80800000,0x80800000,2 +np.float32,0x7f3413da,0x3f800000,2 +np.float32,0xfd760140,0xbf800000,2 +np.float32,0x7f3ad24a,0x3f800000,2 +np.float32,0xbf56e812,0xbf2f7f14,2 +np.float32,0xbece0338,0xbec3920a,2 +np.float32,0xbeede54a,0xbede22ae,2 +np.float32,0x7eaeb215,0x3f800000,2 +np.float32,0x3c213c00,0x3c213aab,2 +np.float32,0x7eaac217,0x3f800000,2 +np.float32,0xbf2f740e,0xbf1851a6,2 +np.float32,0x7f6ca5b8,0x3f800000,2 +np.float32,0xff42ce95,0xbf800000,2 +np.float32,0x802e4189,0x802e4189,2 +np.float32,0x80000001,0x80000001,2 +np.float32,0xbf31f298,0xbf19ebbe,2 +np.float32,0x3dcb0e6c,0x3dca64c1,2 +np.float32,0xbf29599c,0xbf145204,2 +np.float32,0x2e33f2,0x2e33f2,2 +np.float32,0x1c11e7,0x1c11e7,2 +np.float32,0x3f3b188d,0x3f1fa302,2 +np.float32,0x113300,0x113300,2 +np.float32,0x8054589e,0x8054589e,2 +np.float32,0x2a9e69,0x2a9e69,2 +np.float32,0xff513af7,0xbf800000,2 +np.float32,0x7f2e987a,0x3f800000,2 +np.float32,0x807cd426,0x807cd426,2 +np.float32,0x7f0dc4e4,0x3f800000,2 +np.float32,0x7e7c0d56,0x3f800000,2 +np.float32,0x5cb076,0x5cb076,2 +np.float32,0x80576426,0x80576426,2 +np.float32,0xff616222,0xbf800000,2 +np.float32,0xbf7accb5,0xbf40c005,2 +np.float32,0xfe4118c8,0xbf800000,2 +np.float32,0x804b9327,0x804b9327,2 +np.float32,0x3ed2b428,0x3ec79026,2 +np.float32,0x3f4a048f,0x3f286d41,2 +np.float32,0x800000,0x800000,2 +np.float32,0x7efceb9f,0x3f800000,2 +np.float32,0xbf5fe2d3,0xbf34246f,2 +np.float32,0x807e086a,0x807e086a,2 +np.float32,0x7ef5e856,0x3f800000,2 +np.float32,0xfc546f00,0xbf800000,2 +np.float32,0x3a65b890,0x3a65b88c,2 +np.float32,0x800cfa70,0x800cfa70,2 +np.float32,0x80672ea7,0x80672ea7,2 +np.float32,0x3f2bf3f2,0x3f160a12,2 +np.float32,0xbf0ab67e,0xbefd2004,2 +np.float32,0x3f2a0bb4,0x3f14c824,2 +np.float32,0xbeff5374,0xbeec12d7,2 +np.float32,0xbf221b58,0xbf0f6dff,2 +np.float32,0x7cc1f3,0x7cc1f3,2 +np.float32,0x7f234e3c,0x3f800000,2 +np.float32,0x3f60ff10,0x3f34b37d,2 +np.float32,0xbdd957f0,0xbdd887fe,2 +np.float32,0x801ce048,0x801ce048,2 +np.float32,0x7f3a8f76,0x3f800000,2 +np.float32,0xfdd13d08,0xbf800000,2 +np.float32,0x3e9af4a4,0x3e966445,2 +np.float32,0x1e55f3,0x1e55f3,2 +np.float32,0x327905,0x327905,2 +np.float32,0xbf03cf0b,0xbef28dad,2 +np.float32,0x3f0223d3,0x3eeff4f4,2 +np.float32,0xfdd96ff8,0xbf800000,2 +np.float32,0x428db8,0x428db8,2 +np.float32,0xbd74a200,0xbd7457a5,2 +np.float32,0x2a63a3,0x2a63a3,2 +np.float32,0x7e8aa9d7,0x3f800000,2 +np.float32,0x7f50b810,0x3f800000,2 +np.float32,0xbce5ec80,0xbce5dd0d,2 +np.float32,0x54711,0x54711,2 +np.float32,0x8074212a,0x8074212a,2 +np.float32,0xbf13d0ec,0xbf0551b5,2 +np.float32,0x80217f89,0x80217f89,2 +np.float32,0x3f300824,0x3f18b12f,2 +np.float32,0x7d252462,0x3f800000,2 +np.float32,0x807a154c,0x807a154c,2 +np.float32,0x8064d4b9,0x8064d4b9,2 +np.float32,0x804543b4,0x804543b4,2 +np.float32,0x4c269e,0x4c269e,2 +np.float32,0xff39823b,0xbf800000,2 +np.float32,0x3f5040b1,0x3f2be80b,2 +np.float32,0xbf7028c1,0xbf3bfee5,2 +np.float32,0x3e94eb78,0x3e90db93,2 +np.float32,0x3ccc1b40,0x3ccc1071,2 +np.float32,0xbe8796f0,0xbe8481a1,2 +np.float32,0xfc767bc0,0xbf800000,2 +np.float32,0xbdd81ed0,0xbdd75259,2 +np.float32,0xbed31bfc,0xbec7e82d,2 +np.float32,0xbf350a9e,0xbf1be1c6,2 +np.float32,0x33d41f,0x33d41f,2 +np.float32,0x3f73e076,0x3f3db0b5,2 +np.float32,0x3f800000,0x3f42f7d6,2 +np.float32,0xfee27c14,0xbf800000,2 +np.float32,0x7f6e4388,0x3f800000,2 +np.float32,0x4ea19b,0x4ea19b,2 +np.float32,0xff2d75f2,0xbf800000,2 +np.float32,0x7ee225ca,0x3f800000,2 +np.float32,0x3f31cb4b,0x3f19d2a4,2 +np.float32,0x80554a9d,0x80554a9d,2 +np.float32,0x3f4d57fa,0x3f2a4c03,2 +np.float32,0x3eac6a88,0x3ea62e72,2 +np.float32,0x773520,0x773520,2 +np.float32,0x8079c20a,0x8079c20a,2 +np.float32,0xfeb1eb94,0xbf800000,2 +np.float32,0xfe8d81c0,0xbf800000,2 +np.float32,0xfeed6902,0xbf800000,2 +np.float32,0x8066bb65,0x8066bb65,2 +np.float32,0x7f800000,0x3f800000,2 +np.float32,0x1,0x1,2 +np.float32,0x3f2c66a4,0x3f16554a,2 +np.float32,0x3cd231,0x3cd231,2 +np.float32,0x3e932a64,0x3e8f3e0c,2 +np.float32,0xbf3ab1c3,0xbf1f6420,2 +np.float32,0xbc902b20,0xbc902751,2 +np.float32,0x7dac0a5b,0x3f800000,2 +np.float32,0x3f2b7e06,0x3f15bc93,2 +np.float32,0x75de0,0x75de0,2 +np.float32,0x8020b7bc,0x8020b7bc,2 +np.float32,0x3f257cda,0x3f11bb6b,2 +np.float32,0x807480e5,0x807480e5,2 +np.float32,0xfe00d758,0xbf800000,2 +np.float32,0xbd9b54e0,0xbd9b08cd,2 +np.float32,0x4dfbe3,0x4dfbe3,2 +np.float32,0xff645788,0xbf800000,2 +np.float32,0xbe92c80a,0xbe8ee360,2 +np.float32,0x3eb9b400,0x3eb1f77c,2 +np.float32,0xff20b69c,0xbf800000,2 +np.float32,0x623c28,0x623c28,2 +np.float32,0xff235748,0xbf800000,2 +np.float32,0xbf3bbc56,0xbf2006f3,2 +np.float32,0x7e6f78b1,0x3f800000,2 +np.float32,0x7e1584e9,0x3f800000,2 +np.float32,0xff463423,0xbf800000,2 +np.float32,0x8002861e,0x8002861e,2 +np.float32,0xbf0491d8,0xbef3bb6a,2 +np.float32,0x7ea3bc17,0x3f800000,2 +np.float32,0xbedde7ea,0xbed0fb49,2 +np.float32,0xbf4bac48,0xbf295c8b,2 +np.float32,0xff28e276,0xbf800000,2 +np.float32,0x7e8f3bf5,0x3f800000,2 +np.float32,0xbf0a4a73,0xbefc7c9d,2 +np.float32,0x7ec5bd96,0x3f800000,2 +np.float32,0xbf4c22e8,0xbf299f2c,2 +np.float32,0x3e3970a0,0x3e377064,2 +np.float32,0x3ecb1118,0x3ec10c88,2 +np.float32,0xff548a7a,0xbf800000,2 +np.float32,0xfe8ec550,0xbf800000,2 +np.float32,0x3e158985,0x3e147bb2,2 +np.float32,0x7eb79ad7,0x3f800000,2 +np.float32,0xbe811384,0xbe7cd1ab,2 +np.float32,0xbdc4b9e8,0xbdc41f94,2 +np.float32,0xe0fd5,0xe0fd5,2 +np.float32,0x3f2485f2,0x3f11142b,2 +np.float32,0xfdd3c3d8,0xbf800000,2 +np.float32,0xfe8458e6,0xbf800000,2 +np.float32,0x3f06e398,0x3ef74dd8,2 +np.float32,0xff4752cf,0xbf800000,2 +np.float32,0x6998e3,0x6998e3,2 +np.float32,0x626751,0x626751,2 +np.float32,0x806631d6,0x806631d6,2 +np.float32,0xbf0c3cf4,0xbeff6c54,2 +np.float32,0x802860f8,0x802860f8,2 +np.float32,0xff2952cb,0xbf800000,2 +np.float32,0xff31d40b,0xbf800000,2 +np.float32,0x7c389473,0x3f800000,2 +np.float32,0x3dcd2f1b,0x3dcc8010,2 +np.float32,0x3d70c29f,0x3d707bbc,2 +np.float32,0x3f6bd386,0x3f39f979,2 +np.float32,0x1efec9,0x1efec9,2 +np.float32,0x3f675518,0x3f37d338,2 +np.float32,0x5fdbe3,0x5fdbe3,2 +np.float32,0x5d684e,0x5d684e,2 +np.float32,0xbedfe748,0xbed2a4c7,2 +np.float32,0x3f0cb07a,0x3f000cdc,2 +np.float32,0xbf77151e,0xbf3f1f5d,2 +np.float32,0x7f038ea0,0x3f800000,2 +np.float32,0x3ea91be9,0x3ea3376f,2 +np.float32,0xbdf20738,0xbdf0e861,2 +np.float32,0x807ea380,0x807ea380,2 +np.float32,0x2760ca,0x2760ca,2 +np.float32,0x7f20a544,0x3f800000,2 +np.float32,0x76ed83,0x76ed83,2 +np.float32,0x15a441,0x15a441,2 +np.float32,0x74c76d,0x74c76d,2 +np.float32,0xff3d5c2a,0xbf800000,2 +np.float32,0x7f6a76a6,0x3f800000,2 +np.float32,0x3eb87067,0x3eb0dabe,2 +np.float32,0xbf515cfa,0xbf2c83af,2 +np.float32,0xbdececc0,0xbdebdf9d,2 +np.float32,0x7f51b7c2,0x3f800000,2 +np.float32,0x3eb867ac,0x3eb0d30d,2 +np.float32,0xff50fd84,0xbf800000,2 +np.float32,0x806945e9,0x806945e9,2 +np.float32,0x298eed,0x298eed,2 +np.float32,0x441f53,0x441f53,2 +np.float32,0x8066d4b0,0x8066d4b0,2 +np.float32,0x3f6a479c,0x3f393dae,2 +np.float32,0xbf6ce2a7,0xbf3a7921,2 +np.float32,0x8064c3cf,0x8064c3cf,2 +np.float32,0xbf2d8146,0xbf170dfd,2 +np.float32,0x3b0e82,0x3b0e82,2 +np.float32,0xbea97574,0xbea387dc,2 +np.float32,0x67ad15,0x67ad15,2 +np.float32,0xbf68478f,0xbf38485a,2 +np.float32,0xff6f593b,0xbf800000,2 +np.float32,0xbeda26f2,0xbecdd806,2 +np.float32,0xbd216d50,0xbd2157ee,2 +np.float32,0x7a8544db,0x3f800000,2 +np.float32,0x801df20b,0x801df20b,2 +np.float32,0xbe14ba24,0xbe13b0a8,2 +np.float32,0xfdc6d8a8,0xbf800000,2 +np.float32,0x1d6b49,0x1d6b49,2 +np.float32,0x7f5ff1b8,0x3f800000,2 +np.float32,0x3f75e032,0x3f3e9625,2 +np.float32,0x7f2c5687,0x3f800000,2 +np.float32,0x3d95fb6c,0x3d95b6ee,2 +np.float32,0xbea515e4,0xbe9f97c8,2 +np.float32,0x7f2b2cd7,0x3f800000,2 +np.float32,0x3f076f7a,0x3ef8241e,2 +np.float32,0x5178ca,0x5178ca,2 +np.float32,0xbeb5976a,0xbeae5781,2 +np.float32,0x3e3c3563,0x3e3a1e13,2 +np.float32,0xbd208530,0xbd20702a,2 +np.float32,0x3eb03b04,0x3ea995ef,2 +np.float32,0x17fb9c,0x17fb9c,2 +np.float32,0xfca68e40,0xbf800000,2 +np.float32,0xbf5e7433,0xbf336a9f,2 +np.float32,0xff5b8d3d,0xbf800000,2 +np.float32,0x8003121d,0x8003121d,2 +np.float32,0xbe6dd344,0xbe69a3b0,2 +np.float32,0x67cc4,0x67cc4,2 +np.float32,0x9b01d,0x9b01d,2 +np.float32,0x127c13,0x127c13,2 +np.float32,0xfea5e3d6,0xbf800000,2 +np.float32,0xbdf5c610,0xbdf499c1,2 +np.float32,0x3aff4c00,0x3aff4beb,2 +np.float32,0x3b00afd0,0x3b00afc5,2 +np.float32,0x479618,0x479618,2 +np.float32,0x801cbd05,0x801cbd05,2 +np.float32,0x3ec9249f,0x3ebf6579,2 +np.float32,0x3535c4,0x3535c4,2 +np.float32,0xbeb4f662,0xbeadc915,2 +np.float32,0x8006fda6,0x8006fda6,2 +np.float32,0xbf4f3097,0xbf2b5239,2 +np.float32,0xbf3cb9a8,0xbf20a0e9,2 +np.float32,0x32ced0,0x32ced0,2 +np.float32,0x7ea34e76,0x3f800000,2 +np.float32,0x80063046,0x80063046,2 +np.float32,0x80727e8b,0x80727e8b,2 +np.float32,0xfd6b5780,0xbf800000,2 +np.float32,0x80109815,0x80109815,2 +np.float32,0xfdcc8a78,0xbf800000,2 +np.float32,0x81562,0x81562,2 +np.float32,0x803dfacc,0x803dfacc,2 +np.float32,0xbe204318,0xbe1ef75f,2 +np.float32,0xbf745d34,0xbf3de8e2,2 +np.float32,0xff13fdcc,0xbf800000,2 +np.float32,0x7f75ba8c,0x3f800000,2 +np.float32,0x806c04b4,0x806c04b4,2 +np.float32,0x3ec61ca6,0x3ebcc877,2 +np.float32,0xbeaea984,0xbea8301f,2 +np.float32,0xbf4dcd0e,0xbf2a8d34,2 +np.float32,0x802a01d3,0x802a01d3,2 +np.float32,0xbf747be5,0xbf3df6ad,2 +np.float32,0xbf75cbd2,0xbf3e8d0f,2 +np.float32,0x7db86576,0x3f800000,2 +np.float32,0xff49a2c3,0xbf800000,2 +np.float32,0xbedc5314,0xbecfa978,2 +np.float32,0x8078877b,0x8078877b,2 +np.float32,0xbead4824,0xbea6f499,2 +np.float32,0xbf3926e3,0xbf1e716c,2 +np.float32,0x807f4a1c,0x807f4a1c,2 +np.float32,0x7f2cd8fd,0x3f800000,2 +np.float32,0x806cfcca,0x806cfcca,2 +np.float32,0xff1aa048,0xbf800000,2 +np.float32,0x7eb9ea08,0x3f800000,2 +np.float32,0xbf1034bc,0xbf02ab3a,2 +np.float32,0xbd087830,0xbd086b44,2 +np.float32,0x7e071034,0x3f800000,2 +np.float32,0xbefcc9de,0xbeea122f,2 +np.float32,0x80796d7a,0x80796d7a,2 +np.float32,0x33ce46,0x33ce46,2 +np.float32,0x8074a783,0x8074a783,2 +np.float32,0xbe95a56a,0xbe918691,2 +np.float32,0xbf2ff3f4,0xbf18a42d,2 +np.float32,0x1633e9,0x1633e9,2 +np.float32,0x7f0f104b,0x3f800000,2 +np.float32,0xbf800000,0xbf42f7d6,2 +np.float32,0x3d2cd6,0x3d2cd6,2 +np.float32,0xfed43e16,0xbf800000,2 +np.float32,0x3ee6faec,0x3ed87d2c,2 +np.float32,0x3f2c32d0,0x3f163352,2 +np.float32,0xff4290c0,0xbf800000,2 +np.float32,0xbf66500e,0xbf37546a,2 +np.float32,0x7dfb8fe3,0x3f800000,2 +np.float32,0x3f20ba5d,0x3f0e7b16,2 +np.float32,0xff30c7ae,0xbf800000,2 +np.float32,0x1728a4,0x1728a4,2 +np.float32,0x340d82,0x340d82,2 +np.float32,0xff7870b7,0xbf800000,2 +np.float32,0xbeac6ac4,0xbea62ea7,2 +np.float32,0xbef936fc,0xbee73c36,2 +np.float32,0x3ec7e12c,0x3ebe4ef8,2 +np.float32,0x80673488,0x80673488,2 +np.float32,0xfdf14c90,0xbf800000,2 +np.float32,0x3f182568,0x3f08726e,2 +np.float32,0x7ed7dcd0,0x3f800000,2 +np.float32,0x3de4da34,0x3de3e790,2 +np.float32,0xff7fffff,0xbf800000,2 +np.float32,0x4ff90c,0x4ff90c,2 +np.float32,0x3efb0d1c,0x3ee8b1d6,2 +np.float32,0xbf66e952,0xbf379ef4,2 +np.float32,0xba9dc,0xba9dc,2 +np.float32,0xff67c766,0xbf800000,2 +np.float32,0x7f1ffc29,0x3f800000,2 +np.float32,0x3f51c906,0x3f2cbe99,2 +np.float32,0x3f2e5792,0x3f179968,2 +np.float32,0x3ecb9750,0x3ec17fa0,2 +np.float32,0x7f3fcefc,0x3f800000,2 +np.float32,0xbe4e30fc,0xbe4b72f9,2 +np.float32,0x7e9bc4ce,0x3f800000,2 +np.float32,0x7e70aa1f,0x3f800000,2 +np.float32,0x14c6e9,0x14c6e9,2 +np.float32,0xbcf327c0,0xbcf3157a,2 +np.float32,0xff1fd204,0xbf800000,2 +np.float32,0x7d934a03,0x3f800000,2 +np.float32,0x8028bf1e,0x8028bf1e,2 +np.float32,0x7f0800b7,0x3f800000,2 +np.float32,0xfe04825c,0xbf800000,2 +np.float32,0x807210ac,0x807210ac,2 +np.float32,0x3f7faf7c,0x3f42d5fd,2 +np.float32,0x3e04a543,0x3e03e899,2 +np.float32,0x3e98ea15,0x3e94863e,2 +np.float32,0x3d2a2e48,0x3d2a153b,2 +np.float32,0x7fa00000,0x7fe00000,2 +np.float32,0x20a488,0x20a488,2 +np.float32,0x3f6ba86a,0x3f39e51a,2 +np.float32,0x0,0x0,2 +np.float32,0x3e892ddd,0x3e85fcfe,2 +np.float32,0x3e2da627,0x3e2c00e0,2 +np.float32,0xff000a50,0xbf800000,2 +np.float32,0x3eb749f4,0x3eafd739,2 +np.float32,0x8024c0ae,0x8024c0ae,2 +np.float32,0xfc8f3b40,0xbf800000,2 +np.float32,0xbf685fc7,0xbf385405,2 +np.float32,0x3f1510e6,0x3f063a4f,2 +np.float32,0x3f68e8ad,0x3f3895d8,2 +np.float32,0x3dba8608,0x3dba0271,2 +np.float32,0xbf16ea10,0xbf079017,2 +np.float32,0xb3928,0xb3928,2 +np.float32,0xfe447c00,0xbf800000,2 +np.float32,0x3db9cd57,0x3db94b45,2 +np.float32,0x803b66b0,0x803b66b0,2 +np.float32,0x805b5e02,0x805b5e02,2 +np.float32,0x7ec93f61,0x3f800000,2 +np.float32,0x8005a126,0x8005a126,2 +np.float32,0x6d8888,0x6d8888,2 +np.float32,0x3e21b7de,0x3e206314,2 +np.float32,0xbec9c31e,0xbebfedc2,2 +np.float32,0xbea88aa8,0xbea2b4e5,2 +np.float32,0x3d8fc310,0x3d8f86bb,2 +np.float32,0xbf3cc68a,0xbf20a8b8,2 +np.float32,0x432690,0x432690,2 +np.float32,0xbe51d514,0xbe4ef1a3,2 +np.float32,0xbcda6d20,0xbcda5fe1,2 +np.float32,0xfe24e458,0xbf800000,2 +np.float32,0xfedc8c14,0xbf800000,2 +np.float32,0x7f7e9bd4,0x3f800000,2 +np.float32,0x3ebcc880,0x3eb4ab44,2 +np.float32,0xbe0aa490,0xbe09cd44,2 +np.float32,0x3dc9158c,0x3dc870c3,2 +np.float32,0x3e5c319e,0x3e58dc90,2 +np.float32,0x1d4527,0x1d4527,2 +np.float32,0x2dbf5,0x2dbf5,2 +np.float32,0xbf1f121f,0xbf0d5534,2 +np.float32,0x7e3e9ab5,0x3f800000,2 +np.float32,0x7f74b5c1,0x3f800000,2 +np.float32,0xbf6321ba,0xbf35c42b,2 +np.float32,0xbe5c7488,0xbe591c79,2 +np.float32,0x7e7b02cd,0x3f800000,2 +np.float32,0xfe7cbfa4,0xbf800000,2 +np.float32,0xbeace360,0xbea69a86,2 +np.float32,0x7e149b00,0x3f800000,2 +np.float32,0xbf61a700,0xbf35079a,2 +np.float32,0x7eb592a7,0x3f800000,2 +np.float32,0x3f2105e6,0x3f0eaf30,2 +np.float32,0xfd997a88,0xbf800000,2 +np.float32,0xff5d093b,0xbf800000,2 +np.float32,0x63aede,0x63aede,2 +np.float32,0x6907ee,0x6907ee,2 +np.float32,0xbf7578ee,0xbf3e680f,2 +np.float32,0xfea971e8,0xbf800000,2 +np.float32,0x3f21d0f5,0x3f0f3aed,2 +np.float32,0x3a50e2,0x3a50e2,2 +np.float32,0x7f0f5b1e,0x3f800000,2 +np.float32,0x805b9765,0x805b9765,2 +np.float32,0xbe764ab8,0xbe71a664,2 +np.float32,0x3eafac7f,0x3ea91701,2 +np.float32,0x807f4130,0x807f4130,2 +np.float32,0x7c5f31,0x7c5f31,2 +np.float32,0xbdbe0e30,0xbdbd8300,2 +np.float32,0x7ecfe4e0,0x3f800000,2 +np.float32,0xff7cb628,0xbf800000,2 +np.float32,0xff1842bc,0xbf800000,2 +np.float32,0xfd4163c0,0xbf800000,2 +np.float32,0x800e11f7,0x800e11f7,2 +np.float32,0x7f3adec8,0x3f800000,2 +np.float32,0x7f597514,0x3f800000,2 +np.float32,0xbe986e14,0xbe9414a4,2 +np.float32,0x800fa9d7,0x800fa9d7,2 +np.float32,0xff5b79c4,0xbf800000,2 +np.float32,0x80070565,0x80070565,2 +np.float32,0xbee5628e,0xbed72d60,2 +np.float32,0x3f438ef2,0x3f24b3ca,2 +np.float32,0xcda91,0xcda91,2 +np.float32,0x7e64151a,0x3f800000,2 +np.float32,0xbe95d584,0xbe91b2c7,2 +np.float32,0x8022c2a1,0x8022c2a1,2 +np.float32,0x7e7097bf,0x3f800000,2 +np.float32,0x80139035,0x80139035,2 +np.float32,0x804de2cb,0x804de2cb,2 +np.float32,0xfde5d178,0xbf800000,2 +np.float32,0x6d238,0x6d238,2 +np.float32,0x807abedc,0x807abedc,2 +np.float32,0x3f450a12,0x3f259129,2 +np.float32,0x3ef1c120,0x3ee141f2,2 +np.float32,0xfeb64dae,0xbf800000,2 +np.float32,0x8001732c,0x8001732c,2 +np.float32,0x3f76062e,0x3f3ea711,2 +np.float32,0x3eddd550,0x3ed0ebc8,2 +np.float32,0xff5ca1d4,0xbf800000,2 +np.float32,0xbf49dc5e,0xbf285673,2 +np.float32,0x7e9e5438,0x3f800000,2 +np.float32,0x7e83625e,0x3f800000,2 +np.float32,0x3f5dc41c,0x3f3310da,2 +np.float32,0x3f583efa,0x3f30342f,2 +np.float32,0xbe26bf88,0xbe254a2d,2 +np.float32,0xff1e0beb,0xbf800000,2 +np.float32,0xbe2244c8,0xbe20ec86,2 +np.float32,0xff0b1630,0xbf800000,2 +np.float32,0xff338dd6,0xbf800000,2 +np.float32,0x3eafc22c,0x3ea92a51,2 +np.float32,0x800ea07f,0x800ea07f,2 +np.float32,0x3f46f006,0x3f26aa7e,2 +np.float32,0x3e5f57cd,0x3e5bde16,2 +np.float32,0xbf1b2d8e,0xbf0a9a93,2 +np.float32,0xfeacdbe0,0xbf800000,2 +np.float32,0x7e5ea4bc,0x3f800000,2 +np.float32,0xbf51cbe2,0xbf2cc027,2 +np.float32,0x8073644c,0x8073644c,2 +np.float32,0xff2d6bfe,0xbf800000,2 +np.float32,0x3f65f0f6,0x3f37260a,2 +np.float32,0xff4b37a6,0xbf800000,2 +np.float32,0x712df7,0x712df7,2 +np.float32,0x7f71ef17,0x3f800000,2 +np.float32,0x8042245c,0x8042245c,2 +np.float32,0x3e5dde7b,0x3e5a760d,2 +np.float32,0x8069317d,0x8069317d,2 +np.float32,0x807932dd,0x807932dd,2 +np.float32,0x802f847e,0x802f847e,2 +np.float32,0x7e9300,0x7e9300,2 +np.float32,0x8040b4ab,0x8040b4ab,2 +np.float32,0xff76ef8e,0xbf800000,2 +np.float32,0x4aae3a,0x4aae3a,2 +np.float32,0x8058de73,0x8058de73,2 +np.float32,0x7e4d58c0,0x3f800000,2 +np.float32,0x3d811b30,0x3d80ef79,2 +np.float32,0x7ec952cc,0x3f800000,2 +np.float32,0xfe162b1c,0xbf800000,2 +np.float32,0x3f0f1187,0x3f01d367,2 +np.float32,0xbf2f3458,0xbf182878,2 +np.float32,0x5ceb14,0x5ceb14,2 +np.float32,0xbec29476,0xbeb9b939,2 +np.float32,0x3e71f943,0x3e6d9176,2 +np.float32,0x3ededefc,0x3ed1c909,2 +np.float32,0x805df6ac,0x805df6ac,2 +np.float32,0x3e5ae2c8,0x3e579ca8,2 +np.float32,0x3f6ad2c3,0x3f397fdf,2 +np.float32,0x7d5f94d3,0x3f800000,2 +np.float32,0xbeec7fe4,0xbedd0037,2 +np.float32,0x3f645304,0x3f365b0d,2 +np.float32,0xbf69a087,0xbf38edef,2 +np.float32,0x8025102e,0x8025102e,2 +np.float32,0x800db486,0x800db486,2 +np.float32,0x4df6c7,0x4df6c7,2 +np.float32,0x806d8cdd,0x806d8cdd,2 +np.float32,0x7f0c78cc,0x3f800000,2 +np.float32,0x7e1cf70b,0x3f800000,2 +np.float32,0x3e0ae570,0x3e0a0cf7,2 +np.float32,0x80176ef8,0x80176ef8,2 +np.float32,0x3f38b60c,0x3f1e2bbb,2 +np.float32,0x3d3071e0,0x3d3055f5,2 +np.float32,0x3ebfcfdd,0x3eb750a9,2 +np.float32,0xfe2cdec0,0xbf800000,2 +np.float32,0x7eeb2eed,0x3f800000,2 +np.float32,0x8026c904,0x8026c904,2 +np.float32,0xbec79bde,0xbebe133a,2 +np.float32,0xbf7dfab6,0xbf421d47,2 +np.float32,0x805b3cfd,0x805b3cfd,2 +np.float32,0xfdfcfb68,0xbf800000,2 +np.float32,0xbd537ec0,0xbd534eaf,2 +np.float32,0x52ce73,0x52ce73,2 +np.float32,0xfeac6ea6,0xbf800000,2 +np.float32,0x3f2c2990,0x3f162d41,2 +np.float32,0x3e3354e0,0x3e318539,2 +np.float32,0x802db22b,0x802db22b,2 +np.float32,0x7f0faa83,0x3f800000,2 +np.float32,0x7f10e161,0x3f800000,2 +np.float32,0x7f165c60,0x3f800000,2 +np.float32,0xbf5a756f,0xbf315c82,2 +np.float32,0x7f5a4b68,0x3f800000,2 +np.float32,0xbd77fbf0,0xbd77ae7c,2 +np.float32,0x65d83c,0x65d83c,2 +np.float32,0x3e5f28,0x3e5f28,2 +np.float32,0x8040ec92,0x8040ec92,2 +np.float32,0xbf2b41a6,0xbf1594d5,2 +np.float32,0x7f2f88f1,0x3f800000,2 +np.float32,0xfdb64ab8,0xbf800000,2 +np.float32,0xbf7a3ff1,0xbf4082f5,2 +np.float32,0x1948fc,0x1948fc,2 +np.float32,0x802c1039,0x802c1039,2 +np.float32,0x80119274,0x80119274,2 +np.float32,0x7e885d7b,0x3f800000,2 +np.float32,0xfaf6a,0xfaf6a,2 +np.float32,0x3eba28c4,0x3eb25e1d,2 +np.float32,0x3e4df370,0x3e4b37da,2 +np.float32,0xbf19eff6,0xbf09b97d,2 +np.float32,0xbeddd3c6,0xbed0ea7f,2 +np.float32,0xff6fc971,0xbf800000,2 +np.float32,0x7e93de29,0x3f800000,2 +np.float32,0x3eb12332,0x3eaa6485,2 +np.float32,0x3eb7c6e4,0x3eb04563,2 +np.float32,0x4a67ee,0x4a67ee,2 +np.float32,0xff1cafde,0xbf800000,2 +np.float32,0x3f5e2812,0x3f3343da,2 +np.float32,0x3f060e04,0x3ef605d4,2 +np.float32,0x3e9027d8,0x3e8c76a6,2 +np.float32,0xe2d33,0xe2d33,2 +np.float32,0xff4c94fc,0xbf800000,2 +np.float32,0xbf574908,0xbf2fb26b,2 +np.float32,0xbf786c08,0xbf3fb68e,2 +np.float32,0x8011ecab,0x8011ecab,2 +np.float32,0xbf061c6a,0xbef61bfa,2 +np.float32,0x7eea5f9d,0x3f800000,2 +np.float32,0x3ea2e19c,0x3e9d99a5,2 +np.float32,0x8071550c,0x8071550c,2 +np.float32,0x41c70b,0x41c70b,2 +np.float32,0x80291fc8,0x80291fc8,2 +np.float32,0x43b1ec,0x43b1ec,2 +np.float32,0x32f5a,0x32f5a,2 +np.float32,0xbe9310ec,0xbe8f2692,2 +np.float32,0x7f75f6bf,0x3f800000,2 +np.float32,0x3e6642a6,0x3e6274d2,2 +np.float32,0x3ecb88e0,0x3ec1733f,2 +np.float32,0x804011b6,0x804011b6,2 +np.float32,0x80629cca,0x80629cca,2 +np.float32,0x8016b914,0x8016b914,2 +np.float32,0xbdd05fc0,0xbdcfa870,2 +np.float32,0x807b824d,0x807b824d,2 +np.float32,0xfeec2576,0xbf800000,2 +np.float32,0xbf54bf22,0xbf2e584c,2 +np.float32,0xbf185eb0,0xbf089b6b,2 +np.float32,0xfbc09480,0xbf800000,2 +np.float32,0x3f413054,0x3f234e25,2 +np.float32,0x7e9e32b8,0x3f800000,2 +np.float32,0x266296,0x266296,2 +np.float32,0x460284,0x460284,2 +np.float32,0x3eb0b056,0x3ea9fe5a,2 +np.float32,0x1a7be5,0x1a7be5,2 +np.float32,0x7f099895,0x3f800000,2 +np.float32,0x3f3614f0,0x3f1c88ef,2 +np.float32,0x7e757dc2,0x3f800000,2 +np.float32,0x801fc91e,0x801fc91e,2 +np.float32,0x3f5ce37d,0x3f329ddb,2 +np.float32,0x3e664d70,0x3e627f15,2 +np.float32,0xbf38ed78,0xbf1e4dfa,2 +np.float32,0xbf5c563d,0xbf325543,2 +np.float32,0xbe91cc54,0xbe8dfb24,2 +np.float32,0x3d767fbe,0x3d7633ac,2 +np.float32,0xbf6aeb40,0xbf398b7f,2 +np.float32,0x7f40508b,0x3f800000,2 +np.float32,0x2650df,0x2650df,2 +np.float32,0xbe8cea3c,0xbe897628,2 +np.float32,0x80515af8,0x80515af8,2 +np.float32,0x7f423986,0x3f800000,2 +np.float32,0xbdf250e8,0xbdf1310c,2 +np.float32,0xfe89288a,0xbf800000,2 +np.float32,0x397b3b,0x397b3b,2 +np.float32,0x7e5e91b0,0x3f800000,2 +np.float32,0x6866e2,0x6866e2,2 +np.float32,0x7f4d8877,0x3f800000,2 +np.float32,0x3e6c4a21,0x3e682ee3,2 +np.float32,0xfc3d5980,0xbf800000,2 +np.float32,0x7eae2cd0,0x3f800000,2 +np.float32,0xbf241222,0xbf10c579,2 +np.float32,0xfebc02de,0xbf800000,2 +np.float32,0xff6e0645,0xbf800000,2 +np.float32,0x802030b6,0x802030b6,2 +np.float32,0x7ef9a441,0x3f800000,2 +np.float32,0x3fcf9f,0x3fcf9f,2 +np.float32,0xbf0ccf13,0xbf0023cc,2 +np.float32,0xfefee688,0xbf800000,2 +np.float32,0xbf6c8e0c,0xbf3a5160,2 +np.float32,0xfe749c28,0xbf800000,2 +np.float32,0x7f7fffff,0x3f800000,2 +np.float32,0x58c1a0,0x58c1a0,2 +np.float32,0x3f2de0a1,0x3f174c17,2 +np.float32,0xbf5f7138,0xbf33eb03,2 +np.float32,0x3da15270,0x3da0fd3c,2 +np.float32,0x3da66560,0x3da607e4,2 +np.float32,0xbf306f9a,0xbf18f3c6,2 +np.float32,0x3e81a4de,0x3e7de293,2 +np.float32,0xbebb5fb8,0xbeb36f1a,2 +np.float32,0x14bf64,0x14bf64,2 +np.float32,0xbeac46c6,0xbea60e73,2 +np.float32,0xbdcdf210,0xbdcd4111,2 +np.float32,0x3f7e3cd9,0x3f42395e,2 +np.float32,0xbc4be640,0xbc4be38e,2 +np.float32,0xff5f53b4,0xbf800000,2 +np.float32,0xbf1315ae,0xbf04c90b,2 +np.float32,0x80000000,0x80000000,2 +np.float32,0xbf6a4149,0xbf393aaa,2 +np.float32,0x3f66b8ee,0x3f378772,2 +np.float32,0xff29293e,0xbf800000,2 +np.float32,0xbcc989c0,0xbcc97f58,2 +np.float32,0xbd9a1b70,0xbd99d125,2 +np.float32,0xfef353cc,0xbf800000,2 +np.float32,0xbdc30cf0,0xbdc27683,2 +np.float32,0xfdfd6768,0xbf800000,2 +np.float32,0x7ebac44c,0x3f800000,2 +np.float32,0xff453cd6,0xbf800000,2 +np.float32,0x3ef07720,0x3ee03787,2 +np.float32,0x80219c14,0x80219c14,2 +np.float32,0x805553a8,0x805553a8,2 +np.float32,0x80703928,0x80703928,2 +np.float32,0xff16d3a7,0xbf800000,2 +np.float32,0x3f1472bc,0x3f05c77b,2 +np.float32,0x3eeea37a,0x3edebcf9,2 +np.float32,0x3db801e6,0x3db7838d,2 +np.float32,0x800870d2,0x800870d2,2 +np.float32,0xbea1172c,0xbe9bfa32,2 +np.float32,0x3f1f5e7c,0x3f0d8a42,2 +np.float32,0x123cdb,0x123cdb,2 +np.float32,0x7f6e6b06,0x3f800000,2 +np.float32,0x3ed80573,0x3ecc0def,2 +np.float32,0xfea31b82,0xbf800000,2 +np.float32,0x6744e0,0x6744e0,2 +np.float32,0x695e8b,0x695e8b,2 +np.float32,0xbee3888a,0xbed5a67d,2 +np.float32,0x7f64bc2a,0x3f800000,2 +np.float32,0x7f204244,0x3f800000,2 +np.float32,0x7f647102,0x3f800000,2 +np.float32,0x3dd8ebc0,0x3dd81d03,2 +np.float32,0x801e7ab1,0x801e7ab1,2 +np.float32,0x7d034b56,0x3f800000,2 +np.float32,0x7fc00000,0x7fc00000,2 +np.float32,0x80194193,0x80194193,2 +np.float32,0xfe31c8d4,0xbf800000,2 +np.float32,0x7fc0c4,0x7fc0c4,2 +np.float32,0xd95bf,0xd95bf,2 +np.float32,0x7e4f991d,0x3f800000,2 +np.float32,0x7fc563,0x7fc563,2 +np.float32,0xbe3fcccc,0xbe3d968a,2 +np.float32,0xfdaaa1c8,0xbf800000,2 +np.float32,0xbf48e449,0xbf27c949,2 +np.float32,0x3eb6c584,0x3eaf625e,2 +np.float32,0xbea35a74,0xbe9e0702,2 +np.float32,0x3eeab47a,0x3edb89d5,2 +np.float32,0xbed99556,0xbecd5de5,2 +np.float64,0xbfb94a81e0329500,0xbfb935867ba761fe,2 +np.float64,0xbfec132f1678265e,0xbfe6900eb097abc3,2 +np.float64,0x5685ea72ad0be,0x5685ea72ad0be,2 +np.float64,0xbfd74d3169ae9a62,0xbfd652e09b9daf32,2 +np.float64,0xbfe28df53d651bea,0xbfe0b8a7f50ab433,2 +np.float64,0x0,0x0,2 +np.float64,0xbfed912738bb224e,0xbfe749e3732831ae,2 +np.float64,0x7fcc6faed838df5d,0x3ff0000000000000,2 +np.float64,0xbfe95fe9a432bfd3,0xbfe51f6349919910,2 +np.float64,0xbfc4d5900b29ab20,0xbfc4a6f496179b8b,2 +np.float64,0xbfcd6025033ac04c,0xbfccded7b34b49b0,2 +np.float64,0xbfdfa655b43f4cac,0xbfdd4ca1e5bb9db8,2 +np.float64,0xe7ea5c7fcfd4c,0xe7ea5c7fcfd4c,2 +np.float64,0xffa5449ca42a8940,0xbff0000000000000,2 +np.float64,0xffe63294c1ac6529,0xbff0000000000000,2 +np.float64,0x7feb9cbae7f73975,0x3ff0000000000000,2 +np.float64,0x800eb07c3e3d60f9,0x800eb07c3e3d60f9,2 +np.float64,0x3fc95777e932aef0,0x3fc9040391e20c00,2 +np.float64,0x800736052dee6c0b,0x800736052dee6c0b,2 +np.float64,0x3fe9ae4afd335c96,0x3fe54b569bab45c7,2 +np.float64,0x7fee4c94217c9927,0x3ff0000000000000,2 +np.float64,0x80094b594bd296b3,0x80094b594bd296b3,2 +np.float64,0xffe5adbcee6b5b7a,0xbff0000000000000,2 +np.float64,0x3fecb8eab47971d5,0x3fe6e236be6f27e9,2 +np.float64,0x44956914892ae,0x44956914892ae,2 +np.float64,0xbfe3bd18ef677a32,0xbfe190bf1e07200c,2 +np.float64,0x800104e5b46209cc,0x800104e5b46209cc,2 +np.float64,0x8008fbcecf71f79e,0x8008fbcecf71f79e,2 +np.float64,0x800f0a46a0be148d,0x800f0a46a0be148d,2 +np.float64,0x7fe657a0702caf40,0x3ff0000000000000,2 +np.float64,0xffd3ff1a9027fe36,0xbff0000000000000,2 +np.float64,0x3fe78bc87bef1790,0x3fe40d2e63aaf029,2 +np.float64,0x7feeabdc4c7d57b8,0x3ff0000000000000,2 +np.float64,0xbfabd28d8437a520,0xbfabcb8ce03a0e56,2 +np.float64,0xbfddc3a133bb8742,0xbfdbc9fdb2594451,2 +np.float64,0x7fec911565b9222a,0x3ff0000000000000,2 +np.float64,0x71302604e2605,0x71302604e2605,2 +np.float64,0xee919d2bdd234,0xee919d2bdd234,2 +np.float64,0xbfc04fcff3209fa0,0xbfc0395a739a2ce4,2 +np.float64,0xffe4668a36e8cd14,0xbff0000000000000,2 +np.float64,0xbfeeafeebefd5fde,0xbfe7cd5f3d61a3ec,2 +np.float64,0x7fddb34219bb6683,0x3ff0000000000000,2 +np.float64,0xbfd2cac6cba5958e,0xbfd24520abb2ff36,2 +np.float64,0xbfb857e49630afc8,0xbfb8452d5064dec2,2 +np.float64,0x3fd2dbf90b25b7f2,0x3fd254eaf48484c2,2 +np.float64,0x800af65c94f5ecba,0x800af65c94f5ecba,2 +np.float64,0xa0eef4bf41ddf,0xa0eef4bf41ddf,2 +np.float64,0xffd8e0a4adb1c14a,0xbff0000000000000,2 +np.float64,0xffe858f6e870b1ed,0xbff0000000000000,2 +np.float64,0x3f94c2c308298580,0x3f94c208a4bb006d,2 +np.float64,0xffb45f0d7428be18,0xbff0000000000000,2 +np.float64,0x800ed4f43dbda9e9,0x800ed4f43dbda9e9,2 +np.float64,0x8002dd697e85bad4,0x8002dd697e85bad4,2 +np.float64,0x787ceab2f0f9e,0x787ceab2f0f9e,2 +np.float64,0xbfdff5fcc2bfebfa,0xbfdd8b736b128589,2 +np.float64,0x7fdb2b4294365684,0x3ff0000000000000,2 +np.float64,0xffe711e5e92e23cc,0xbff0000000000000,2 +np.float64,0x800b1c93f1163928,0x800b1c93f1163928,2 +np.float64,0x7fc524d2f22a49a5,0x3ff0000000000000,2 +np.float64,0x7fc88013b5310026,0x3ff0000000000000,2 +np.float64,0x3fe1a910c5e35222,0x3fe00fd779ebaa2a,2 +np.float64,0xbfb57ec9ca2afd90,0xbfb571e47ecb9335,2 +np.float64,0x7fd7594b20aeb295,0x3ff0000000000000,2 +np.float64,0x7fba4641ca348c83,0x3ff0000000000000,2 +np.float64,0xffe61393706c2726,0xbff0000000000000,2 +np.float64,0x7fd54f3c7baa9e78,0x3ff0000000000000,2 +np.float64,0xffe65ffb12ecbff6,0xbff0000000000000,2 +np.float64,0xbfba3b0376347608,0xbfba239cbbbd1b11,2 +np.float64,0x800200886d640112,0x800200886d640112,2 +np.float64,0xbfecf0ba4679e174,0xbfe6fd59de44a3ec,2 +np.float64,0xffe5c57e122b8afc,0xbff0000000000000,2 +np.float64,0x7fdaad0143355a02,0x3ff0000000000000,2 +np.float64,0x46ab32c08d567,0x46ab32c08d567,2 +np.float64,0x7ff8000000000000,0x7ff8000000000000,2 +np.float64,0xbfda7980fdb4f302,0xbfd90fa9c8066109,2 +np.float64,0x3fe237703c646ee0,0x3fe07969f8d8805a,2 +np.float64,0x8000e9fcfc21d3fb,0x8000e9fcfc21d3fb,2 +np.float64,0xbfdfe6e958bfcdd2,0xbfdd7f952fe87770,2 +np.float64,0xbd7baf217af8,0xbd7baf217af8,2 +np.float64,0xbfceba9e4b3d753c,0xbfce26e54359869a,2 +np.float64,0xb95a2caf72b46,0xb95a2caf72b46,2 +np.float64,0x3fb407e25a280fc5,0x3fb3fd71e457b628,2 +np.float64,0xa1da09d943b41,0xa1da09d943b41,2 +np.float64,0xbfe9c7271cf38e4e,0xbfe559296b471738,2 +np.float64,0x3fefae6170ff5cc3,0x3fe83c70ba82f0e1,2 +np.float64,0x7fe7375348ae6ea6,0x3ff0000000000000,2 +np.float64,0xffe18c9cc6e31939,0xbff0000000000000,2 +np.float64,0x800483d13a6907a3,0x800483d13a6907a3,2 +np.float64,0x7fe772a18caee542,0x3ff0000000000000,2 +np.float64,0xffefff64e7bffec9,0xbff0000000000000,2 +np.float64,0x7fcffc31113ff861,0x3ff0000000000000,2 +np.float64,0x3fd91e067e323c0d,0x3fd7e70bf365a7b3,2 +np.float64,0xb0a6673d614cd,0xb0a6673d614cd,2 +np.float64,0xffef9a297e3f3452,0xbff0000000000000,2 +np.float64,0xffe87cc15e70f982,0xbff0000000000000,2 +np.float64,0xffefd6ad8e7fad5a,0xbff0000000000000,2 +np.float64,0x7fe3aaa3a8a75546,0x3ff0000000000000,2 +np.float64,0xddab0341bb561,0xddab0341bb561,2 +np.float64,0x3fe996d6d7332dae,0x3fe53e3ed5be2922,2 +np.float64,0x3fdbe66a18b7ccd4,0x3fda41e6053c1512,2 +np.float64,0x8914775d1228f,0x8914775d1228f,2 +np.float64,0x3fe44621d4688c44,0x3fe1ef9c7225f8bd,2 +np.float64,0xffab29a2a4365340,0xbff0000000000000,2 +np.float64,0xffc8d4a0c431a940,0xbff0000000000000,2 +np.float64,0xbfd426e085284dc2,0xbfd382e2a9617b87,2 +np.float64,0xbfd3b0a525a7614a,0xbfd3176856faccf1,2 +np.float64,0x80036dedcb06dbdc,0x80036dedcb06dbdc,2 +np.float64,0x3feb13823b762704,0x3fe60ca3facdb696,2 +np.float64,0x3fd7246b7bae48d8,0x3fd62f08afded155,2 +np.float64,0x1,0x1,2 +np.float64,0x3fe8ade4b9715bc9,0x3fe4b97cc1387d27,2 +np.float64,0x3fdf2dbec53e5b7e,0x3fdcecfeee33de95,2 +np.float64,0x3fe4292bf9685258,0x3fe1dbb5a6704090,2 +np.float64,0xbfd21acbb8243598,0xbfd1a2ff42174cae,2 +np.float64,0xdd0d2d01ba1a6,0xdd0d2d01ba1a6,2 +np.float64,0x3fa3f3d2f427e7a0,0x3fa3f13d6f101555,2 +np.float64,0x7fdabf4aceb57e95,0x3ff0000000000000,2 +np.float64,0xd4d9e39ba9b3d,0xd4d9e39ba9b3d,2 +np.float64,0xffec773396f8ee66,0xbff0000000000000,2 +np.float64,0x3fa88cc79031198f,0x3fa887f7ade722ba,2 +np.float64,0xffe63a92066c7524,0xbff0000000000000,2 +np.float64,0xbfcf514e2e3ea29c,0xbfceb510e99aaa19,2 +np.float64,0x9d78c19d3af18,0x9d78c19d3af18,2 +np.float64,0x7fdd748bfbbae917,0x3ff0000000000000,2 +np.float64,0xffb3594c4626b298,0xbff0000000000000,2 +np.float64,0x80068ce5b32d19cc,0x80068ce5b32d19cc,2 +np.float64,0x3fec63d60e78c7ac,0x3fe6b85536e44217,2 +np.float64,0x80080bad4dd0175b,0x80080bad4dd0175b,2 +np.float64,0xbfec6807baf8d010,0xbfe6ba69740f9687,2 +np.float64,0x7fedbae0bbfb75c0,0x3ff0000000000000,2 +np.float64,0x8001cb7aa3c396f6,0x8001cb7aa3c396f6,2 +np.float64,0x7fe1f1f03563e3df,0x3ff0000000000000,2 +np.float64,0x7fd83d3978307a72,0x3ff0000000000000,2 +np.float64,0xbfc05ffe9d20bffc,0xbfc049464e3f0af2,2 +np.float64,0xfe6e053ffcdc1,0xfe6e053ffcdc1,2 +np.float64,0xbfd3bdf39d277be8,0xbfd32386edf12726,2 +np.float64,0x800f41b27bde8365,0x800f41b27bde8365,2 +np.float64,0xbfe2c98390e59307,0xbfe0e3c9260fe798,2 +np.float64,0xffdd6206bcbac40e,0xbff0000000000000,2 +np.float64,0x67f35ef4cfe6c,0x67f35ef4cfe6c,2 +np.float64,0x800337e02ae66fc1,0x800337e02ae66fc1,2 +np.float64,0x3fe0ff70afe1fee1,0x3fdf1f46434330df,2 +np.float64,0x3fd7e0a1df2fc144,0x3fd6d3f82c8031e4,2 +np.float64,0x8008da5cd1b1b4ba,0x8008da5cd1b1b4ba,2 +np.float64,0x80065ec9e4ccbd95,0x80065ec9e4ccbd95,2 +np.float64,0x3fe1d1e559a3a3cb,0x3fe02e4f146aa1ab,2 +np.float64,0x7feb7d2f0836fa5d,0x3ff0000000000000,2 +np.float64,0xbfcb33ce9736679c,0xbfcaccd431b205bb,2 +np.float64,0x800e6d0adf5cda16,0x800e6d0adf5cda16,2 +np.float64,0x7fe46f272ca8de4d,0x3ff0000000000000,2 +np.float64,0x4fdfc73e9fbfa,0x4fdfc73e9fbfa,2 +np.float64,0x800958a13112b143,0x800958a13112b143,2 +np.float64,0xbfea01f877f403f1,0xbfe579a541594247,2 +np.float64,0xeefaf599ddf5f,0xeefaf599ddf5f,2 +np.float64,0x80038766c5e70ece,0x80038766c5e70ece,2 +np.float64,0x7fd31bc28ba63784,0x3ff0000000000000,2 +np.float64,0xbfe4df77eee9bef0,0xbfe257abe7083b77,2 +np.float64,0x7fe6790c78acf218,0x3ff0000000000000,2 +np.float64,0xffe7c66884af8cd0,0xbff0000000000000,2 +np.float64,0x800115e36f422bc8,0x800115e36f422bc8,2 +np.float64,0x3fc601945d2c0329,0x3fc5cab917bb20bc,2 +np.float64,0x3fd6ac9546ad592b,0x3fd5c55437ec3508,2 +np.float64,0xa7bd59294f7ab,0xa7bd59294f7ab,2 +np.float64,0x8005c26c8b8b84da,0x8005c26c8b8b84da,2 +np.float64,0x8257501704aea,0x8257501704aea,2 +np.float64,0x5b12aae0b6256,0x5b12aae0b6256,2 +np.float64,0x800232fe02c465fd,0x800232fe02c465fd,2 +np.float64,0x800dae28f85b5c52,0x800dae28f85b5c52,2 +np.float64,0x3fdade1ac135bc36,0x3fd964a2000ace25,2 +np.float64,0x3fed72ca04fae594,0x3fe73b9170d809f9,2 +np.float64,0x7fc6397e2b2c72fb,0x3ff0000000000000,2 +np.float64,0x3fe1f5296d23ea53,0x3fe048802d17621e,2 +np.float64,0xffe05544b920aa89,0xbff0000000000000,2 +np.float64,0xbfdb2e1588365c2c,0xbfd9a7e4113c713e,2 +np.float64,0xbfed6a06fa3ad40e,0xbfe7376be60535f8,2 +np.float64,0xbfe31dcaf5e63b96,0xbfe120417c46cac1,2 +np.float64,0xbfb7ed67ae2fdad0,0xbfb7dba14af33b00,2 +np.float64,0xffd32bb7eb265770,0xbff0000000000000,2 +np.float64,0x80039877b04730f0,0x80039877b04730f0,2 +np.float64,0x3f832e5630265cac,0x3f832e316f47f218,2 +np.float64,0xffe7fa7f732ff4fe,0xbff0000000000000,2 +np.float64,0x9649b87f2c937,0x9649b87f2c937,2 +np.float64,0xffaee447183dc890,0xbff0000000000000,2 +np.float64,0x7fe4e02dd869c05b,0x3ff0000000000000,2 +np.float64,0x3fe1d35e7463a6bd,0x3fe02f67bd21e86e,2 +np.float64,0xffe57f40fe2afe82,0xbff0000000000000,2 +np.float64,0xbfea1362b93426c6,0xbfe5833421dba8fc,2 +np.float64,0xffe9c689fe338d13,0xbff0000000000000,2 +np.float64,0xffc592dd102b25bc,0xbff0000000000000,2 +np.float64,0x3fd283c7aba5078f,0x3fd203d61d1398c3,2 +np.float64,0x8001d6820243ad05,0x8001d6820243ad05,2 +np.float64,0x3fe0ad5991e15ab4,0x3fdea14ef0d47fbd,2 +np.float64,0x3fe3916f2ee722de,0x3fe1722684a9ffb1,2 +np.float64,0xffef9e54e03f3ca9,0xbff0000000000000,2 +np.float64,0x7fe864faebb0c9f5,0x3ff0000000000000,2 +np.float64,0xbfed3587c3fa6b10,0xbfe71e7112df8a68,2 +np.float64,0xbfdd9efc643b3df8,0xbfdbac3a16caf208,2 +np.float64,0xbfd5ac08feab5812,0xbfd4e14575a6e41b,2 +np.float64,0xffda90fae6b521f6,0xbff0000000000000,2 +np.float64,0x8001380ecf22701e,0x8001380ecf22701e,2 +np.float64,0x7fed266fa5fa4cde,0x3ff0000000000000,2 +np.float64,0xffec6c0ac3b8d815,0xbff0000000000000,2 +np.float64,0x3fe7de43c32fbc88,0x3fe43ef62821a5a6,2 +np.float64,0x800bf4ffc357ea00,0x800bf4ffc357ea00,2 +np.float64,0x3fe125c975624b93,0x3fdf59b2de3eff5d,2 +np.float64,0x8004714c1028e299,0x8004714c1028e299,2 +np.float64,0x3fef1bfbf5fe37f8,0x3fe7fd2ba1b63c8a,2 +np.float64,0x800cae15c3195c2c,0x800cae15c3195c2c,2 +np.float64,0x7fde708e083ce11b,0x3ff0000000000000,2 +np.float64,0x7fbcee5df639dcbb,0x3ff0000000000000,2 +np.float64,0x800b1467141628cf,0x800b1467141628cf,2 +np.float64,0x3fe525e0d36a4bc2,0x3fe286b6e59e30f5,2 +np.float64,0xffe987f8b8330ff1,0xbff0000000000000,2 +np.float64,0x7e0a8284fc151,0x7e0a8284fc151,2 +np.float64,0x8006f982442df305,0x8006f982442df305,2 +np.float64,0xbfd75a3cb62eb47a,0xbfd65e54cee981c9,2 +np.float64,0x258e91104b1d3,0x258e91104b1d3,2 +np.float64,0xbfecd0056779a00b,0xbfe6ed7ae97fff1b,2 +np.float64,0x7fc3a4f9122749f1,0x3ff0000000000000,2 +np.float64,0x6e2b1024dc563,0x6e2b1024dc563,2 +np.float64,0x800d575ad4daaeb6,0x800d575ad4daaeb6,2 +np.float64,0xbfceafb1073d5f64,0xbfce1c93023d8414,2 +np.float64,0xffe895cb5f312b96,0xbff0000000000000,2 +np.float64,0x7fe7811ed4ef023d,0x3ff0000000000000,2 +np.float64,0xbfd93f952f327f2a,0xbfd803e6b5576b99,2 +np.float64,0xffdd883a3fbb1074,0xbff0000000000000,2 +np.float64,0x7fee5624eefcac49,0x3ff0000000000000,2 +np.float64,0xbfe264bb2624c976,0xbfe09a9b7cc896e7,2 +np.float64,0xffef14b417be2967,0xbff0000000000000,2 +np.float64,0xbfecbd0d94397a1b,0xbfe6e43bef852d9f,2 +np.float64,0xbfe20d9e4ba41b3c,0xbfe05a98e05846d9,2 +np.float64,0x10000000000000,0x10000000000000,2 +np.float64,0x7fefde93f7bfbd27,0x3ff0000000000000,2 +np.float64,0x80076b9e232ed73d,0x80076b9e232ed73d,2 +np.float64,0xbfe80df52c701bea,0xbfe45b754b433792,2 +np.float64,0x7fe3b5a637676b4b,0x3ff0000000000000,2 +np.float64,0x2c81d14c5903b,0x2c81d14c5903b,2 +np.float64,0x80038945c767128c,0x80038945c767128c,2 +np.float64,0xffeebaf544bd75ea,0xbff0000000000000,2 +np.float64,0xffdb1867d2b630d0,0xbff0000000000000,2 +np.float64,0x3fe3376eaee66ede,0x3fe13285579763d8,2 +np.float64,0xffddf65ca43becba,0xbff0000000000000,2 +np.float64,0xffec8e3e04791c7b,0xbff0000000000000,2 +np.float64,0x80064f4bde2c9e98,0x80064f4bde2c9e98,2 +np.float64,0x7fe534a085ea6940,0x3ff0000000000000,2 +np.float64,0xbfcbabe31d3757c8,0xbfcb3f8e70adf7e7,2 +np.float64,0xbfe45ca11e28b942,0xbfe1ff04515ef809,2 +np.float64,0x65f4df02cbe9d,0x65f4df02cbe9d,2 +np.float64,0xb08b0cbb61162,0xb08b0cbb61162,2 +np.float64,0x3feae2e8b975c5d1,0x3fe5f302b5e8eda2,2 +np.float64,0x7fcf277ff93e4eff,0x3ff0000000000000,2 +np.float64,0x80010999c4821334,0x80010999c4821334,2 +np.float64,0xbfd7f65911afecb2,0xbfd6e6e9cd098f8b,2 +np.float64,0x800e0560ec3c0ac2,0x800e0560ec3c0ac2,2 +np.float64,0x7fec4152ba3882a4,0x3ff0000000000000,2 +np.float64,0xbfb5c77cd42b8ef8,0xbfb5ba1336084908,2 +np.float64,0x457ff1b68afff,0x457ff1b68afff,2 +np.float64,0x5323ec56a647e,0x5323ec56a647e,2 +np.float64,0xbfeed16cf8bda2da,0xbfe7dc49fc9ae549,2 +np.float64,0xffe8446106b088c1,0xbff0000000000000,2 +np.float64,0xffb93cd13c3279a0,0xbff0000000000000,2 +np.float64,0x7fe515c2aeea2b84,0x3ff0000000000000,2 +np.float64,0x80099df83f933bf1,0x80099df83f933bf1,2 +np.float64,0x7fb3a375562746ea,0x3ff0000000000000,2 +np.float64,0x7fcd7efa243afdf3,0x3ff0000000000000,2 +np.float64,0xffe40cddb12819bb,0xbff0000000000000,2 +np.float64,0x8008b68eecd16d1e,0x8008b68eecd16d1e,2 +np.float64,0x2aec688055d8e,0x2aec688055d8e,2 +np.float64,0xffe23750bc646ea1,0xbff0000000000000,2 +np.float64,0x5adacf60b5b7,0x5adacf60b5b7,2 +np.float64,0x7fefb29b1cbf6535,0x3ff0000000000000,2 +np.float64,0xbfeadbf90175b7f2,0xbfe5ef55e2194794,2 +np.float64,0xeaad2885d55a5,0xeaad2885d55a5,2 +np.float64,0xffd7939fba2f2740,0xbff0000000000000,2 +np.float64,0x3fd187ea3aa30fd4,0x3fd11af023472386,2 +np.float64,0xbf6eb579c03d6b00,0xbf6eb57052f47019,2 +np.float64,0x3fefb67b3bff6cf6,0x3fe83fe4499969ac,2 +np.float64,0xbfe5183aacea3076,0xbfe27da1aa0b61a0,2 +np.float64,0xbfb83e47a2307c90,0xbfb82bcb0e12db42,2 +np.float64,0x80088849b1b11094,0x80088849b1b11094,2 +np.float64,0x800ceeed7399dddb,0x800ceeed7399dddb,2 +np.float64,0x80097cd90892f9b2,0x80097cd90892f9b2,2 +np.float64,0x7ec73feefd8e9,0x7ec73feefd8e9,2 +np.float64,0x7fe3291de5a6523b,0x3ff0000000000000,2 +np.float64,0xbfd537086daa6e10,0xbfd4787af5f60653,2 +np.float64,0x800e8ed4455d1da9,0x800e8ed4455d1da9,2 +np.float64,0x800ef8d19cbdf1a3,0x800ef8d19cbdf1a3,2 +np.float64,0x800dc4fa3a5b89f5,0x800dc4fa3a5b89f5,2 +np.float64,0xaa8b85cd55171,0xaa8b85cd55171,2 +np.float64,0xffd67a5f40acf4be,0xbff0000000000000,2 +np.float64,0xbfb7496db22e92d8,0xbfb7390a48130861,2 +np.float64,0x3fd86a8e7ab0d51d,0x3fd74bfba0f72616,2 +np.float64,0xffb7f5b7fc2feb70,0xbff0000000000000,2 +np.float64,0xbfea0960a7f412c1,0xbfe57db6d0ff4191,2 +np.float64,0x375f4fc26ebeb,0x375f4fc26ebeb,2 +np.float64,0x800c537e70b8a6fd,0x800c537e70b8a6fd,2 +np.float64,0x800b3f4506d67e8a,0x800b3f4506d67e8a,2 +np.float64,0x7fe61f2d592c3e5a,0x3ff0000000000000,2 +np.float64,0xffefffffffffffff,0xbff0000000000000,2 +np.float64,0x8005d0bb84eba178,0x8005d0bb84eba178,2 +np.float64,0x800c78b0ec18f162,0x800c78b0ec18f162,2 +np.float64,0xbfc42cccfb285998,0xbfc4027392f66b0d,2 +np.float64,0x3fd8fdc73fb1fb8e,0x3fd7cb46f928153f,2 +np.float64,0x800c71754298e2eb,0x800c71754298e2eb,2 +np.float64,0x3fe4aa7a96a954f5,0x3fe233f5d3bc1352,2 +np.float64,0x7fd53841f6aa7083,0x3ff0000000000000,2 +np.float64,0x3fd0a887b8a15110,0x3fd04ac3b9c0d1ca,2 +np.float64,0x8007b8e164cf71c4,0x8007b8e164cf71c4,2 +np.float64,0xbfddc35c66bb86b8,0xbfdbc9c5dddfb014,2 +np.float64,0x6a3756fed46eb,0x6a3756fed46eb,2 +np.float64,0xffd3dcd05527b9a0,0xbff0000000000000,2 +np.float64,0xbfd7dc75632fb8ea,0xbfd6d0538b340a98,2 +np.float64,0x17501f822ea05,0x17501f822ea05,2 +np.float64,0xbfe1f98b99a3f317,0xbfe04bbf8f8b6cb3,2 +np.float64,0x66ea65d2cdd4d,0x66ea65d2cdd4d,2 +np.float64,0xbfd12241e2224484,0xbfd0bc62f46ea5e1,2 +np.float64,0x3fed6e6fb3fadcdf,0x3fe7398249097285,2 +np.float64,0x3fe0b5ebeba16bd8,0x3fdeae84b3000a47,2 +np.float64,0x66d1bce8cda38,0x66d1bce8cda38,2 +np.float64,0x3fdd728db3bae51b,0x3fdb880f28c52713,2 +np.float64,0xffb45dbe5228bb80,0xbff0000000000000,2 +np.float64,0x1ff8990c3ff14,0x1ff8990c3ff14,2 +np.float64,0x800a68e8f294d1d2,0x800a68e8f294d1d2,2 +np.float64,0xbfe4d08b84a9a117,0xbfe24da40bff6be7,2 +np.float64,0x3fe0177f0ee02efe,0x3fddb83c5971df51,2 +np.float64,0xffc56893692ad128,0xbff0000000000000,2 +np.float64,0x51b44f6aa368b,0x51b44f6aa368b,2 +np.float64,0x2258ff4e44b21,0x2258ff4e44b21,2 +np.float64,0x3fe913649e7226c9,0x3fe4f3f119530f53,2 +np.float64,0xffe3767df766ecfc,0xbff0000000000000,2 +np.float64,0xbfe62ae12fec55c2,0xbfe33108f1f22a94,2 +np.float64,0x7fb6a6308e2d4c60,0x3ff0000000000000,2 +np.float64,0xbfe00f2085e01e41,0xbfddab19b6fc77d1,2 +np.float64,0x3fb66447dc2cc890,0x3fb655b4f46844f0,2 +np.float64,0x3fd80238f6b00470,0x3fd6f143be1617d6,2 +np.float64,0xbfd05bfeb3a0b7fe,0xbfd0031ab3455e15,2 +np.float64,0xffc3a50351274a08,0xbff0000000000000,2 +np.float64,0xffd8f4241cb1e848,0xbff0000000000000,2 +np.float64,0xbfca72a88c34e550,0xbfca13ebe85f2aca,2 +np.float64,0x3fd47d683ba8fad0,0x3fd3d13f1176ed8c,2 +np.float64,0x3fb6418e642c831d,0x3fb6333ebe479ff2,2 +np.float64,0x800fde8e023fbd1c,0x800fde8e023fbd1c,2 +np.float64,0x8001fb01e323f605,0x8001fb01e323f605,2 +np.float64,0x3febb21ff9f76440,0x3fe65ed788d52fee,2 +np.float64,0x3fe47553ffe8eaa8,0x3fe20fe01f853603,2 +np.float64,0x7fca20b3f9344167,0x3ff0000000000000,2 +np.float64,0x3fe704f4ec6e09ea,0x3fe3ba7277201805,2 +np.float64,0xf864359df0c87,0xf864359df0c87,2 +np.float64,0x4d96b01c9b2d7,0x4d96b01c9b2d7,2 +np.float64,0x3fe8a09fe9f14140,0x3fe4b1c6a2d2e095,2 +np.float64,0xffc46c61b228d8c4,0xbff0000000000000,2 +np.float64,0x3fe680a837ed0150,0x3fe3679d6eeb6485,2 +np.float64,0xbfecedc20f39db84,0xbfe6fbe9ee978bf6,2 +np.float64,0x3fb2314eae24629d,0x3fb2297ba6d55d2d,2 +np.float64,0x3fe9f0b8e7b3e172,0x3fe57026eae36db3,2 +np.float64,0x80097a132ed2f427,0x80097a132ed2f427,2 +np.float64,0x800ae5a41955cb49,0x800ae5a41955cb49,2 +np.float64,0xbfd7527279aea4e4,0xbfd6577de356e1bd,2 +np.float64,0x3fe27d3e01e4fa7c,0x3fe0ac7dd96f9179,2 +np.float64,0x7fedd8cb01bbb195,0x3ff0000000000000,2 +np.float64,0x78f8695af1f0e,0x78f8695af1f0e,2 +np.float64,0x800d2d0e927a5a1d,0x800d2d0e927a5a1d,2 +np.float64,0xffe74b46fb2e968e,0xbff0000000000000,2 +np.float64,0xbfdd12d4c8ba25aa,0xbfdb39dae49e1c10,2 +np.float64,0xbfd6c14710ad828e,0xbfd5d79ef5a8d921,2 +np.float64,0x921f4e55243ea,0x921f4e55243ea,2 +np.float64,0x800b4e4c80969c99,0x800b4e4c80969c99,2 +np.float64,0x7fe08c6ab7e118d4,0x3ff0000000000000,2 +np.float64,0xbfed290014fa5200,0xbfe71871f7e859ed,2 +np.float64,0x8008c1d5c59183ac,0x8008c1d5c59183ac,2 +np.float64,0x3fd339e68c2673cd,0x3fd2aaff3f165a9d,2 +np.float64,0xbfdd20d8113a41b0,0xbfdb4553ea2cb2fb,2 +np.float64,0x3fe52a25deea544c,0x3fe2898d5bf4442c,2 +np.float64,0x498602d4930c1,0x498602d4930c1,2 +np.float64,0x3fd8c450113188a0,0x3fd799b0b2a6c43c,2 +np.float64,0xbfd72bc2f2ae5786,0xbfd6357e15ba7f70,2 +np.float64,0xbfd076188ea0ec32,0xbfd01b8fce44d1af,2 +np.float64,0x9aace1713559c,0x9aace1713559c,2 +np.float64,0x8008a730e8914e62,0x8008a730e8914e62,2 +np.float64,0x7fe9e9a3d833d347,0x3ff0000000000000,2 +np.float64,0x800d3a0d69da741b,0x800d3a0d69da741b,2 +np.float64,0xbfe3e28a29e7c514,0xbfe1aad7643a2d19,2 +np.float64,0x7fe9894c71331298,0x3ff0000000000000,2 +np.float64,0xbfe7c6acb5ef8d5a,0xbfe430c9e258ce62,2 +np.float64,0xffb5a520a62b4a40,0xbff0000000000000,2 +np.float64,0x7fc02109ae204212,0x3ff0000000000000,2 +np.float64,0xb5c58f196b8b2,0xb5c58f196b8b2,2 +np.float64,0x3feb4ee82e769dd0,0x3fe62bae9a39d8b1,2 +np.float64,0x3fec5c3cf278b87a,0x3fe6b49000f12441,2 +np.float64,0x81f64b8103eca,0x81f64b8103eca,2 +np.float64,0xbfeab00d73f5601b,0xbfe5d7f755ab73d9,2 +np.float64,0x3fd016bf28a02d7e,0x3fcf843ea23bcd3c,2 +np.float64,0xbfa1db617423b6c0,0xbfa1d9872ddeb5a8,2 +np.float64,0x3fe83c879d70790f,0x3fe4771502d8f012,2 +np.float64,0x6b267586d64cf,0x6b267586d64cf,2 +np.float64,0x3fc91b6d3f3236d8,0x3fc8ca3eb4da25a9,2 +np.float64,0x7fd4e3f8f3a9c7f1,0x3ff0000000000000,2 +np.float64,0x800a75899214eb14,0x800a75899214eb14,2 +np.float64,0x7fdb1f2e07b63e5b,0x3ff0000000000000,2 +np.float64,0xffe7805a11ef00b4,0xbff0000000000000,2 +np.float64,0x3fc8e1b88a31c371,0x3fc892af45330818,2 +np.float64,0xbfe809fe447013fc,0xbfe45918f07da4d9,2 +np.float64,0xbfeb9d7f2ab73afe,0xbfe65446bfddc792,2 +np.float64,0x3fb47f0a5c28fe15,0x3fb473db9113e880,2 +np.float64,0x800a17ae3cb42f5d,0x800a17ae3cb42f5d,2 +np.float64,0xf5540945eaa81,0xf5540945eaa81,2 +np.float64,0xbfe577fc26aaeff8,0xbfe2bcfbf2cf69ff,2 +np.float64,0xbfb99b3e06333680,0xbfb98577b88e0515,2 +np.float64,0x7fd9290391b25206,0x3ff0000000000000,2 +np.float64,0x7fe1aa62ffa354c5,0x3ff0000000000000,2 +np.float64,0x7b0189a0f604,0x7b0189a0f604,2 +np.float64,0x3f9000ed602001db,0x3f900097fe168105,2 +np.float64,0x3fd576128d2aec25,0x3fd4b1002c92286f,2 +np.float64,0xffecc98ece79931d,0xbff0000000000000,2 +np.float64,0x800a1736c7f42e6e,0x800a1736c7f42e6e,2 +np.float64,0xbfed947548bb28eb,0xbfe74b71479ae739,2 +np.float64,0xa45c032148b9,0xa45c032148b9,2 +np.float64,0xbfc13d011c227a04,0xbfc1228447de5e9f,2 +np.float64,0xffed8baa6ebb1754,0xbff0000000000000,2 +np.float64,0x800ea2de243d45bc,0x800ea2de243d45bc,2 +np.float64,0x8001396be52272d9,0x8001396be52272d9,2 +np.float64,0xd018d1cda031a,0xd018d1cda031a,2 +np.float64,0x7fe1fece1fe3fd9b,0x3ff0000000000000,2 +np.float64,0x8009ac484c135891,0x8009ac484c135891,2 +np.float64,0x3fc560ad132ac15a,0x3fc52e5a9479f08e,2 +np.float64,0x3fd6f80ebe2df01d,0x3fd607f70ce8e3f4,2 +np.float64,0xbfd3e69e82a7cd3e,0xbfd34887c2a40699,2 +np.float64,0x3fe232d9baa465b3,0x3fe0760a822ada0c,2 +np.float64,0x3fe769bbc6eed378,0x3fe3f872680f6631,2 +np.float64,0xffe63dbd952c7b7a,0xbff0000000000000,2 +np.float64,0x4e0c00da9c181,0x4e0c00da9c181,2 +np.float64,0xffeae4d89735c9b0,0xbff0000000000000,2 +np.float64,0x3fe030bcbb606179,0x3fdddfc66660bfce,2 +np.float64,0x7fe35ca40d66b947,0x3ff0000000000000,2 +np.float64,0xbfd45bd66628b7ac,0xbfd3b2e04bfe7866,2 +np.float64,0x3fd1f0be2323e17c,0x3fd17c1c340d7a48,2 +np.float64,0x3fd7123b6cae2478,0x3fd61f0675aa9ae1,2 +np.float64,0xbfe918a377723147,0xbfe4f6efe66f5714,2 +np.float64,0x7fc400356f28006a,0x3ff0000000000000,2 +np.float64,0x7fd2dead70a5bd5a,0x3ff0000000000000,2 +np.float64,0xffe9c28f81f3851e,0xbff0000000000000,2 +np.float64,0x3fd09b1ec7a1363e,0x3fd03e3894320140,2 +np.float64,0x7fe6e80c646dd018,0x3ff0000000000000,2 +np.float64,0x7fec3760a4786ec0,0x3ff0000000000000,2 +np.float64,0x309eb6ee613d8,0x309eb6ee613d8,2 +np.float64,0x800731cb0ece6397,0x800731cb0ece6397,2 +np.float64,0xbfdb0c553db618aa,0xbfd98b8a4680ee60,2 +np.float64,0x3fd603a52eac074c,0x3fd52f6b53de7455,2 +np.float64,0x9ecb821b3d971,0x9ecb821b3d971,2 +np.float64,0x3feb7d64dc36faca,0x3fe643c2754bb7f4,2 +np.float64,0xffeb94825ef72904,0xbff0000000000000,2 +np.float64,0x24267418484cf,0x24267418484cf,2 +np.float64,0xbfa6b2fbac2d65f0,0xbfa6af2dca5bfa6f,2 +np.float64,0x8010000000000000,0x8010000000000000,2 +np.float64,0xffe6873978ed0e72,0xbff0000000000000,2 +np.float64,0x800447934ba88f27,0x800447934ba88f27,2 +np.float64,0x3fef305f09fe60be,0x3fe806156b8ca47c,2 +np.float64,0xffd441c697a8838e,0xbff0000000000000,2 +np.float64,0xbfa7684f6c2ed0a0,0xbfa764238d34830c,2 +np.float64,0xffb2c976142592f0,0xbff0000000000000,2 +np.float64,0xbfcc9d1585393a2c,0xbfcc25756bcbca1f,2 +np.float64,0xbfd477bb1ba8ef76,0xbfd3cc1d2114e77e,2 +np.float64,0xbfed1559983a2ab3,0xbfe70f03afd994ee,2 +np.float64,0xbfeb51139036a227,0xbfe62ccf56bc7fff,2 +np.float64,0x7d802890fb006,0x7d802890fb006,2 +np.float64,0x800e00af777c015f,0x800e00af777c015f,2 +np.float64,0x800647ce128c8f9d,0x800647ce128c8f9d,2 +np.float64,0x800a26da91d44db6,0x800a26da91d44db6,2 +np.float64,0x3fdc727eddb8e4fe,0x3fdab5fd9db630b3,2 +np.float64,0x7fd06def2ba0dbdd,0x3ff0000000000000,2 +np.float64,0xffe23678c4a46cf1,0xbff0000000000000,2 +np.float64,0xbfe7198e42ee331c,0xbfe3c7326c9c7553,2 +np.float64,0xffae465f3c3c8cc0,0xbff0000000000000,2 +np.float64,0xff9aea7c5035d500,0xbff0000000000000,2 +np.float64,0xbfeae49c0f35c938,0xbfe5f3e9326cb08b,2 +np.float64,0x3f9a16f300342de6,0x3f9a1581212be50f,2 +np.float64,0x8d99e2c31b33d,0x8d99e2c31b33d,2 +np.float64,0xffd58af253ab15e4,0xbff0000000000000,2 +np.float64,0xbfd205cd25a40b9a,0xbfd18f97155f8b25,2 +np.float64,0xbfebe839bbf7d074,0xbfe67a6024e8fefe,2 +np.float64,0xbfe4fb3595a9f66b,0xbfe26a42f99819ea,2 +np.float64,0x800e867c739d0cf9,0x800e867c739d0cf9,2 +np.float64,0x8bc4274f17885,0x8bc4274f17885,2 +np.float64,0xaec8914b5d912,0xaec8914b5d912,2 +np.float64,0x7fd1d64473a3ac88,0x3ff0000000000000,2 +np.float64,0xbfe6d6f69cedaded,0xbfe39dd61bc7e23e,2 +np.float64,0x7fed05039d7a0a06,0x3ff0000000000000,2 +np.float64,0xbfc40eab0f281d58,0xbfc3e50d14b79265,2 +np.float64,0x45179aec8a2f4,0x45179aec8a2f4,2 +np.float64,0xbfe717e362ee2fc7,0xbfe3c62a95b07d13,2 +np.float64,0xbfe5b8df0d6b71be,0xbfe2e76c7ec5013d,2 +np.float64,0x5c67ba6eb8cf8,0x5c67ba6eb8cf8,2 +np.float64,0xbfda72ce4cb4e59c,0xbfd909fdc7ecfe20,2 +np.float64,0x7fdf59a1e2beb343,0x3ff0000000000000,2 +np.float64,0xc4f7897f89ef1,0xc4f7897f89ef1,2 +np.float64,0x8fcd0a351f9a2,0x8fcd0a351f9a2,2 +np.float64,0x3fb161761022c2ec,0x3fb15aa31c464de2,2 +np.float64,0x8008a985be71530c,0x8008a985be71530c,2 +np.float64,0x3fca4ddb5e349bb7,0x3fc9f0a3b60e49c6,2 +np.float64,0x7fcc10a2d9382145,0x3ff0000000000000,2 +np.float64,0x78902b3af1206,0x78902b3af1206,2 +np.float64,0x7fe1e2765f23c4ec,0x3ff0000000000000,2 +np.float64,0xc1d288cf83a51,0xc1d288cf83a51,2 +np.float64,0x7fe8af692bb15ed1,0x3ff0000000000000,2 +np.float64,0x80057d90fb8afb23,0x80057d90fb8afb23,2 +np.float64,0x3fdc136b8fb826d8,0x3fda6749582b2115,2 +np.float64,0x800ec8ea477d91d5,0x800ec8ea477d91d5,2 +np.float64,0x4c0f4796981ea,0x4c0f4796981ea,2 +np.float64,0xec34c4a5d8699,0xec34c4a5d8699,2 +np.float64,0x7fce343dfb3c687b,0x3ff0000000000000,2 +np.float64,0xbfc95a98a332b530,0xbfc90705b2cc2fec,2 +np.float64,0x800d118e1dba231c,0x800d118e1dba231c,2 +np.float64,0x3fd354f310a6a9e8,0x3fd2c3bb90054154,2 +np.float64,0xbfdac0d4fab581aa,0xbfd94bf37424928e,2 +np.float64,0x3fe7f5391fefea72,0x3fe44cb49d51985b,2 +np.float64,0xd4c3c329a9879,0xd4c3c329a9879,2 +np.float64,0x3fc53977692a72f0,0x3fc50835d85c9ed1,2 +np.float64,0xbfd6989538ad312a,0xbfd5b3a2c08511fe,2 +np.float64,0xbfe329f2906653e5,0xbfe128ec1525a1c0,2 +np.float64,0x7ff0000000000000,0x3ff0000000000000,2 +np.float64,0xbfea57c90974af92,0xbfe5a87b04aa3116,2 +np.float64,0x7fdfba94043f7527,0x3ff0000000000000,2 +np.float64,0x3feedabddafdb57c,0x3fe7e06c0661978d,2 +np.float64,0x4bd9f3b697b3f,0x4bd9f3b697b3f,2 +np.float64,0x3fdd15bbfc3a2b78,0x3fdb3c3b8d070f7e,2 +np.float64,0x3fbd89ccd23b13a0,0x3fbd686b825cff80,2 +np.float64,0x7ff4000000000000,0x7ffc000000000000,2 +np.float64,0x3f9baa8928375512,0x3f9ba8d01ddd5300,2 +np.float64,0x4a3ebdf2947d8,0x4a3ebdf2947d8,2 +np.float64,0x3fe698d5c06d31ac,0x3fe376dff48312c8,2 +np.float64,0xffd5323df12a647c,0xbff0000000000000,2 +np.float64,0xffea7f111174fe22,0xbff0000000000000,2 +np.float64,0x3feb4656a9b68cad,0x3fe627392eb2156f,2 +np.float64,0x7fc1260e9c224c1c,0x3ff0000000000000,2 +np.float64,0x80056e45e5eadc8d,0x80056e45e5eadc8d,2 +np.float64,0x7fd0958ef6a12b1d,0x3ff0000000000000,2 +np.float64,0x8001f85664e3f0ae,0x8001f85664e3f0ae,2 +np.float64,0x3fe553853beaa70a,0x3fe2a4f5e7c83558,2 +np.float64,0xbfeb33ce6276679d,0xbfe61d8ec9e5ff8c,2 +np.float64,0xbfd1b24e21a3649c,0xbfd14245df6065e9,2 +np.float64,0x3fe286fc40650df9,0x3fe0b395c8059429,2 +np.float64,0xffed378058fa6f00,0xbff0000000000000,2 +np.float64,0xbfd0c4a2d7a18946,0xbfd06509a434d6a0,2 +np.float64,0xbfea31d581f463ab,0xbfe593d976139f94,2 +np.float64,0xbfe0705c85e0e0b9,0xbfde42efa978eb0c,2 +np.float64,0xe4c4c339c9899,0xe4c4c339c9899,2 +np.float64,0x3fd68befa9ad17df,0x3fd5a870b3f1f83e,2 +np.float64,0x8000000000000001,0x8000000000000001,2 +np.float64,0x3fe294256965284b,0x3fe0bd271e22d86b,2 +np.float64,0x8005327a862a64f6,0x8005327a862a64f6,2 +np.float64,0xbfdb8155ce3702ac,0xbfd9ed9ef97920f8,2 +np.float64,0xbff0000000000000,0xbfe85efab514f394,2 +np.float64,0xffe66988f1ecd312,0xbff0000000000000,2 +np.float64,0x3fb178a85e22f150,0x3fb171b9fbf95f1d,2 +np.float64,0x7f829b900025371f,0x3ff0000000000000,2 +np.float64,0x8000000000000000,0x8000000000000000,2 +np.float64,0x8006cb77f60d96f1,0x8006cb77f60d96f1,2 +np.float64,0x3fe0c5d53aa18baa,0x3fdec7012ab92b42,2 +np.float64,0x77266426ee4cd,0x77266426ee4cd,2 +np.float64,0xbfec95f468392be9,0xbfe6d11428f60136,2 +np.float64,0x3fedbf532dfb7ea6,0x3fe75f8436dd1d58,2 +np.float64,0x8002fadd3f85f5bb,0x8002fadd3f85f5bb,2 +np.float64,0xbfefebaa8d3fd755,0xbfe8566c6aa90fba,2 +np.float64,0xffc7dd2b712fba58,0xbff0000000000000,2 +np.float64,0x7fe5d3a6e8aba74d,0x3ff0000000000000,2 +np.float64,0x2da061525b40d,0x2da061525b40d,2 +np.float64,0x7fcb9b9953373732,0x3ff0000000000000,2 +np.float64,0x2ca2f6fc59460,0x2ca2f6fc59460,2 +np.float64,0xffeb84b05af70960,0xbff0000000000000,2 +np.float64,0xffe551e86c6aa3d0,0xbff0000000000000,2 +np.float64,0xbfdb311311366226,0xbfd9aa6688faafb9,2 +np.float64,0xbfd4f3875629e70e,0xbfd43bcd73534c66,2 +np.float64,0x7fe95666f932accd,0x3ff0000000000000,2 +np.float64,0x3fc73dfb482e7bf7,0x3fc6fd70c20ebf60,2 +np.float64,0x800cd9e40939b3c8,0x800cd9e40939b3c8,2 +np.float64,0x3fb0c9fa422193f0,0x3fb0c3d38879a2ac,2 +np.float64,0xffd59a38372b3470,0xbff0000000000000,2 +np.float64,0x3fa8320ef4306420,0x3fa82d739e937d35,2 +np.float64,0x3fd517f16caa2fe4,0x3fd45c8de1e93b37,2 +np.float64,0xaed921655db24,0xaed921655db24,2 +np.float64,0x93478fb9268f2,0x93478fb9268f2,2 +np.float64,0x1615e28a2c2bd,0x1615e28a2c2bd,2 +np.float64,0xbfead23010f5a460,0xbfe5ea24d5d8f820,2 +np.float64,0x774a6070ee94d,0x774a6070ee94d,2 +np.float64,0x3fdf5874bd3eb0e9,0x3fdd0ef121dd915c,2 +np.float64,0x8004b25f53a964bf,0x8004b25f53a964bf,2 +np.float64,0xbfddacdd2ebb59ba,0xbfdbb78198fab36b,2 +np.float64,0x8008a3acf271475a,0x8008a3acf271475a,2 +np.float64,0xbfdb537c8736a6fa,0xbfd9c741038bb8f0,2 +np.float64,0xbfe56a133f6ad426,0xbfe2b3d5b8d259a1,2 +np.float64,0xffda1db531343b6a,0xbff0000000000000,2 +np.float64,0x3fcbe05f3a37c0be,0x3fcb71a54a64ddfb,2 +np.float64,0x7fe1ccaa7da39954,0x3ff0000000000000,2 +np.float64,0x3faeadd8343d5bb0,0x3faea475608860e6,2 +np.float64,0x3fe662ba1c2cc574,0x3fe354a6176e90df,2 +np.float64,0xffe4d49f4e69a93e,0xbff0000000000000,2 +np.float64,0xbfeadbc424f5b788,0xbfe5ef39dbe66343,2 +np.float64,0x99cf66f1339ed,0x99cf66f1339ed,2 +np.float64,0x33af77a2675f0,0x33af77a2675f0,2 +np.float64,0x7fec7b32ecf8f665,0x3ff0000000000000,2 +np.float64,0xffef3e44993e7c88,0xbff0000000000000,2 +np.float64,0xffe8f8ceac31f19c,0xbff0000000000000,2 +np.float64,0x7fe0d15b6da1a2b6,0x3ff0000000000000,2 +np.float64,0x4ba795c2974f3,0x4ba795c2974f3,2 +np.float64,0x3fe361aa37a6c354,0x3fe15079021d6b15,2 +np.float64,0xffe709714f6e12e2,0xbff0000000000000,2 +np.float64,0xffe7ea6a872fd4d4,0xbff0000000000000,2 +np.float64,0xffdb9441c8b72884,0xbff0000000000000,2 +np.float64,0xffd5e11ae9abc236,0xbff0000000000000,2 +np.float64,0xffe092a08b612540,0xbff0000000000000,2 +np.float64,0x3fe1f27e1ca3e4fc,0x3fe04685b5131207,2 +np.float64,0xbfe71ce1bdee39c4,0xbfe3c940809a7081,2 +np.float64,0xffe8c3aa68318754,0xbff0000000000000,2 +np.float64,0x800d4e2919da9c52,0x800d4e2919da9c52,2 +np.float64,0x7fe6c8bca76d9178,0x3ff0000000000000,2 +np.float64,0x7fced8751e3db0e9,0x3ff0000000000000,2 +np.float64,0xd61d0c8bac3a2,0xd61d0c8bac3a2,2 +np.float64,0x3fec57732938aee6,0x3fe6b22f15f38352,2 +np.float64,0xff9251cc7024a3a0,0xbff0000000000000,2 +np.float64,0xf4a68cb9e94d2,0xf4a68cb9e94d2,2 +np.float64,0x3feed76703bdaece,0x3fe7def0fc9a080c,2 +np.float64,0xbfe8971ff7712e40,0xbfe4ac3eb8ebff07,2 +np.float64,0x3fe4825f682904bf,0x3fe218c1952fe67d,2 +np.float64,0xbfd60f7698ac1eee,0xbfd539f0979b4b0c,2 +np.float64,0x3fcf0845993e1088,0x3fce7032f7180144,2 +np.float64,0x7fc83443f3306887,0x3ff0000000000000,2 +np.float64,0x3fe93123ae726247,0x3fe504e4fc437e89,2 +np.float64,0x3fbf9eb8363f3d70,0x3fbf75cdfa6828d5,2 +np.float64,0xbf8b45e5d0368bc0,0xbf8b457c29dfe1a9,2 +np.float64,0x8006c2853d0d850b,0x8006c2853d0d850b,2 +np.float64,0xffef26e25ffe4dc4,0xbff0000000000000,2 +np.float64,0x7fefffffffffffff,0x3ff0000000000000,2 +np.float64,0xbfde98f2c2bd31e6,0xbfdc761bfab1c4cb,2 +np.float64,0xffb725e6222e4bd0,0xbff0000000000000,2 +np.float64,0x800c63ead5d8c7d6,0x800c63ead5d8c7d6,2 +np.float64,0x3fea087e95f410fd,0x3fe57d3ab440706c,2 +np.float64,0xbfdf9f8a603f3f14,0xbfdd4742d77dfa57,2 +np.float64,0xfff0000000000000,0xbff0000000000000,2 +np.float64,0xbfcdc0841d3b8108,0xbfcd3a401debba9a,2 +np.float64,0x800f0c8f4f7e191f,0x800f0c8f4f7e191f,2 +np.float64,0x800ba6e75fd74dcf,0x800ba6e75fd74dcf,2 +np.float64,0x7fee4927e8bc924f,0x3ff0000000000000,2 +np.float64,0x3fadf141903be283,0x3fade8878d9d3551,2 +np.float64,0x3efb1a267df64,0x3efb1a267df64,2 +np.float64,0xffebf55f22b7eabe,0xbff0000000000000,2 +np.float64,0x7fbe8045663d008a,0x3ff0000000000000,2 +np.float64,0x3fefc0129f7f8026,0x3fe843f8b7d6cf38,2 +np.float64,0xbfe846b420f08d68,0xbfe47d1709e43937,2 +np.float64,0x7fe8e87043f1d0e0,0x3ff0000000000000,2 +np.float64,0x3fcfb718453f6e31,0x3fcf14ecee7b32b4,2 +np.float64,0x7fe4306b71a860d6,0x3ff0000000000000,2 +np.float64,0x7fee08459f7c108a,0x3ff0000000000000,2 +np.float64,0x3fed705165fae0a3,0x3fe73a66369c5700,2 +np.float64,0x7fd0e63f4da1cc7e,0x3ff0000000000000,2 +np.float64,0xffd1a40c2ea34818,0xbff0000000000000,2 +np.float64,0xbfa369795c26d2f0,0xbfa36718218d46b3,2 +np.float64,0xef70b9f5dee17,0xef70b9f5dee17,2 +np.float64,0x3fb50a0a6e2a1410,0x3fb4fdf27724560a,2 +np.float64,0x7fe30a0f6166141e,0x3ff0000000000000,2 +np.float64,0xbfd7b3ca7daf6794,0xbfd6accb81032b2d,2 +np.float64,0x3fc21dceb3243b9d,0x3fc1ff15d5d277a3,2 +np.float64,0x3fe483e445a907c9,0x3fe219ca0e269552,2 +np.float64,0x3fb2b1e2a22563c0,0x3fb2a96554900eaf,2 +np.float64,0x4b1ff6409641,0x4b1ff6409641,2 +np.float64,0xbfd92eabc9b25d58,0xbfd7f55d7776d64e,2 +np.float64,0x8003b8604c8770c1,0x8003b8604c8770c1,2 +np.float64,0x800d20a9df1a4154,0x800d20a9df1a4154,2 +np.float64,0xecf8a535d9f15,0xecf8a535d9f15,2 +np.float64,0x3fe92d15bab25a2b,0x3fe50296aa15ae85,2 +np.float64,0x800239c205a47385,0x800239c205a47385,2 +np.float64,0x3fc48664a9290cc8,0x3fc459d126320ef6,2 +np.float64,0x3fe7620625eec40c,0x3fe3f3bcbee3e8c6,2 +np.float64,0x3fd242ff4ca48600,0x3fd1c81ed7a971c8,2 +np.float64,0xbfe39bafcfa73760,0xbfe17959c7a279db,2 +np.float64,0x7fdcd2567239a4ac,0x3ff0000000000000,2 +np.float64,0x3fe5f2f292ebe5e6,0x3fe30d12f05e2752,2 +np.float64,0x7fda3819d1347033,0x3ff0000000000000,2 +np.float64,0xffca5b4d4334b69c,0xbff0000000000000,2 +np.float64,0xb8a2b7cd71457,0xb8a2b7cd71457,2 +np.float64,0x3fee689603fcd12c,0x3fe7ad4ace26d6dd,2 +np.float64,0x7fe26541a564ca82,0x3ff0000000000000,2 +np.float64,0x3fe6912ee66d225e,0x3fe3720d242c4d82,2 +np.float64,0xffe6580c75ecb018,0xbff0000000000000,2 +np.float64,0x7fe01a3370603466,0x3ff0000000000000,2 +np.float64,0xffe84e3f84b09c7e,0xbff0000000000000,2 +np.float64,0x3ff0000000000000,0x3fe85efab514f394,2 +np.float64,0x3fe214d4266429a8,0x3fe05fec03a3c247,2 +np.float64,0x3fd00aec5da015d8,0x3fcf6e070ad4ad62,2 +np.float64,0x800aac8631f5590d,0x800aac8631f5590d,2 +np.float64,0xbfe7c4f5f76f89ec,0xbfe42fc1c57b4a13,2 +np.float64,0xaf146c7d5e28e,0xaf146c7d5e28e,2 +np.float64,0xbfe57188b66ae312,0xbfe2b8be4615ef75,2 +np.float64,0xffef8cb8e1ff1971,0xbff0000000000000,2 +np.float64,0x8001daf8aa63b5f2,0x8001daf8aa63b5f2,2 +np.float64,0x3fdddcc339bbb986,0x3fdbde5f3783538b,2 +np.float64,0xdd8c92c3bb193,0xdd8c92c3bb193,2 +np.float64,0xbfe861a148f0c342,0xbfe48cf1d228a336,2 +np.float64,0xffe260a32e24c146,0xbff0000000000000,2 +np.float64,0x1f7474b43ee8f,0x1f7474b43ee8f,2 +np.float64,0x3fe81dbd89703b7c,0x3fe464d78df92b7b,2 +np.float64,0x7fed0101177a0201,0x3ff0000000000000,2 +np.float64,0x7fd8b419a8316832,0x3ff0000000000000,2 +np.float64,0x3fe93debccf27bd8,0x3fe50c27727917f0,2 +np.float64,0xe5ead05bcbd5a,0xe5ead05bcbd5a,2 +np.float64,0xbfebbbc4cff7778a,0xbfe663c4ca003bbf,2 +np.float64,0xbfea343eb474687e,0xbfe59529f73ea151,2 +np.float64,0x3fbe74a5963ce94b,0x3fbe50123ed05d8d,2 +np.float64,0x3fd31d3a5d263a75,0x3fd290c026cb38a5,2 +np.float64,0xbfd79908acaf3212,0xbfd695620e31c3c6,2 +np.float64,0xbfc26a350324d46c,0xbfc249f335f3e465,2 +np.float64,0xbfac38d5583871b0,0xbfac31866d12a45e,2 +np.float64,0x3fe40cea672819d5,0x3fe1c83754e72c92,2 +np.float64,0xbfa74770642e8ee0,0xbfa74355fcf67332,2 +np.float64,0x7fc60942d32c1285,0x3ff0000000000000,2 diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/examples/cython/__pycache__/setup.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/tests/examples/cython/__pycache__/setup.cpython-312.pyc new file mode 100644 index 0000000..c26db55 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/tests/examples/cython/__pycache__/setup.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/examples/cython/checks.pyx b/.venv/lib/python3.12/site-packages/numpy/_core/tests/examples/cython/checks.pyx new file mode 100644 index 0000000..57df05c --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/tests/examples/cython/checks.pyx @@ -0,0 +1,373 @@ +#cython: language_level=3 + +""" +Functions in this module give python-space wrappers for cython functions +exposed in numpy/__init__.pxd, so they can be tested in test_cython.py +""" +cimport numpy as cnp +cnp.import_array() + + +def is_td64(obj): + return cnp.is_timedelta64_object(obj) + + +def is_dt64(obj): + return cnp.is_datetime64_object(obj) + + +def get_dt64_value(obj): + return cnp.get_datetime64_value(obj) + + +def get_td64_value(obj): + return cnp.get_timedelta64_value(obj) + + +def get_dt64_unit(obj): + return cnp.get_datetime64_unit(obj) + + +def is_integer(obj): + return isinstance(obj, (cnp.integer, int)) + + +def get_datetime_iso_8601_strlen(): + return cnp.get_datetime_iso_8601_strlen(0, cnp.NPY_FR_ns) + + +def convert_datetime64_to_datetimestruct(): + cdef: + cnp.npy_datetimestruct dts + cnp.PyArray_DatetimeMetaData meta + cnp.int64_t value = 1647374515260292 + # i.e. (time.time() * 10**6) at 2022-03-15 20:01:55.260292 UTC + + meta.base = cnp.NPY_FR_us + meta.num = 1 + cnp.convert_datetime64_to_datetimestruct(&meta, value, &dts) + return dts + + +def make_iso_8601_datetime(dt: "datetime"): + cdef: + cnp.npy_datetimestruct dts + char result[36] # 36 corresponds to NPY_FR_s passed below + int local = 0 + int utc = 0 + int tzoffset = 0 + + dts.year = dt.year + dts.month = dt.month + dts.day = dt.day + dts.hour = dt.hour + dts.min = dt.minute + dts.sec = dt.second + dts.us = dt.microsecond + dts.ps = dts.as = 0 + + cnp.make_iso_8601_datetime( + &dts, + result, + sizeof(result), + local, + utc, + cnp.NPY_FR_s, + tzoffset, + cnp.NPY_NO_CASTING, + ) + return result + + +cdef cnp.broadcast multiiter_from_broadcast_obj(object bcast): + cdef dict iter_map = { + 1: cnp.PyArray_MultiIterNew1, + 2: cnp.PyArray_MultiIterNew2, + 3: cnp.PyArray_MultiIterNew3, + 4: cnp.PyArray_MultiIterNew4, + 5: cnp.PyArray_MultiIterNew5, + } + arrays = [x.base for x in bcast.iters] + cdef cnp.broadcast result = iter_map[len(arrays)](*arrays) + return result + + +def get_multiiter_size(bcast: "broadcast"): + cdef cnp.broadcast multi = multiiter_from_broadcast_obj(bcast) + return multi.size + + +def get_multiiter_number_of_dims(bcast: "broadcast"): + cdef cnp.broadcast multi = multiiter_from_broadcast_obj(bcast) + return multi.nd + + +def get_multiiter_current_index(bcast: "broadcast"): + cdef cnp.broadcast multi = multiiter_from_broadcast_obj(bcast) + return multi.index + + +def get_multiiter_num_of_iterators(bcast: "broadcast"): + cdef cnp.broadcast multi = multiiter_from_broadcast_obj(bcast) + return multi.numiter + + +def get_multiiter_shape(bcast: "broadcast"): + cdef cnp.broadcast multi = multiiter_from_broadcast_obj(bcast) + return tuple([multi.dimensions[i] for i in range(bcast.nd)]) + + +def get_multiiter_iters(bcast: "broadcast"): + cdef cnp.broadcast multi = multiiter_from_broadcast_obj(bcast) + return tuple([multi.iters[i] for i in range(bcast.numiter)]) + + +def get_default_integer(): + if cnp.NPY_DEFAULT_INT == cnp.NPY_LONG: + return cnp.dtype("long") + if cnp.NPY_DEFAULT_INT == cnp.NPY_INTP: + return cnp.dtype("intp") + return None + +def get_ravel_axis(): + return cnp.NPY_RAVEL_AXIS + + +def conv_intp(cnp.intp_t val): + return val + + +def get_dtype_flags(cnp.dtype dtype): + return dtype.flags + + +cdef cnp.NpyIter* npyiter_from_nditer_obj(object it): + """A function to create a NpyIter struct from a nditer object. + + This function is only meant for testing purposes and only extracts the + necessary info from nditer to test the functionality of NpyIter methods + """ + cdef: + cnp.NpyIter* cit + cnp.PyArray_Descr* op_dtypes[3] + cnp.npy_uint32 op_flags[3] + cnp.PyArrayObject* ops[3] + cnp.npy_uint32 flags = 0 + + if it.has_index: + flags |= cnp.NPY_ITER_C_INDEX + if it.has_delayed_bufalloc: + flags |= cnp.NPY_ITER_BUFFERED | cnp.NPY_ITER_DELAY_BUFALLOC + if it.has_multi_index: + flags |= cnp.NPY_ITER_MULTI_INDEX + + # one of READWRITE, READONLY and WRTIEONLY at the minimum must be specified for op_flags + for i in range(it.nop): + op_flags[i] = cnp.NPY_ITER_READONLY + + for i in range(it.nop): + op_dtypes[i] = cnp.PyArray_DESCR(it.operands[i]) + ops[i] = it.operands[i] + + cit = cnp.NpyIter_MultiNew(it.nop, &ops[0], flags, cnp.NPY_KEEPORDER, + cnp.NPY_NO_CASTING, &op_flags[0], + NULL) + return cit + + +def get_npyiter_size(it: "nditer"): + cdef cnp.NpyIter* cit = npyiter_from_nditer_obj(it) + result = cnp.NpyIter_GetIterSize(cit) + cnp.NpyIter_Deallocate(cit) + return result + + +def get_npyiter_ndim(it: "nditer"): + cdef cnp.NpyIter* cit = npyiter_from_nditer_obj(it) + result = cnp.NpyIter_GetNDim(cit) + cnp.NpyIter_Deallocate(cit) + return result + + +def get_npyiter_nop(it: "nditer"): + cdef cnp.NpyIter* cit = npyiter_from_nditer_obj(it) + result = cnp.NpyIter_GetNOp(cit) + cnp.NpyIter_Deallocate(cit) + return result + + +def get_npyiter_operands(it: "nditer"): + cdef cnp.NpyIter* cit = npyiter_from_nditer_obj(it) + try: + arr = cnp.NpyIter_GetOperandArray(cit) + return tuple([arr[i] for i in range(it.nop)]) + finally: + cnp.NpyIter_Deallocate(cit) + + +def get_npyiter_itviews(it: "nditer"): + cdef cnp.NpyIter* cit = npyiter_from_nditer_obj(it) + result = tuple([cnp.NpyIter_GetIterView(cit, i) for i in range(it.nop)]) + cnp.NpyIter_Deallocate(cit) + return result + + +def get_npyiter_dtypes(it: "nditer"): + cdef cnp.NpyIter* cit = npyiter_from_nditer_obj(it) + try: + arr = cnp.NpyIter_GetDescrArray(cit) + return tuple([arr[i] for i in range(it.nop)]) + finally: + cnp.NpyIter_Deallocate(cit) + + +def npyiter_has_delayed_bufalloc(it: "nditer"): + cdef cnp.NpyIter* cit = npyiter_from_nditer_obj(it) + result = cnp.NpyIter_HasDelayedBufAlloc(cit) + cnp.NpyIter_Deallocate(cit) + return result + + +def npyiter_has_index(it: "nditer"): + cdef cnp.NpyIter* cit = npyiter_from_nditer_obj(it) + result = cnp.NpyIter_HasIndex(cit) + cnp.NpyIter_Deallocate(cit) + return result + + +def npyiter_has_multi_index(it: "nditer"): + cdef cnp.NpyIter* cit = npyiter_from_nditer_obj(it) + result = cnp.NpyIter_HasMultiIndex(cit) + cnp.NpyIter_Deallocate(cit) + return result + + +def test_get_multi_index_iter_next(it: "nditer", cnp.ndarray[cnp.float64_t, ndim=2] arr): + cdef cnp.NpyIter* cit = npyiter_from_nditer_obj(it) + cdef cnp.NpyIter_GetMultiIndexFunc get_multi_index = \ + cnp.NpyIter_GetGetMultiIndex(cit, NULL) + cdef cnp.NpyIter_IterNextFunc iternext = \ + cnp.NpyIter_GetIterNext(cit, NULL) + return 1 + + +def npyiter_has_finished(it: "nditer"): + cdef cnp.NpyIter* cit + try: + cit = npyiter_from_nditer_obj(it) + cnp.NpyIter_GotoIterIndex(cit, it.index) + return not (cnp.NpyIter_GetIterIndex(cit) < cnp.NpyIter_GetIterSize(cit)) + finally: + cnp.NpyIter_Deallocate(cit) + +def compile_fillwithbyte(): + # Regression test for gh-25878, mostly checks it compiles. + cdef cnp.npy_intp dims[2] + dims = (1, 2) + pos = cnp.PyArray_ZEROS(2, dims, cnp.NPY_UINT8, 0) + cnp.PyArray_FILLWBYTE(pos, 1) + return pos + +def inc2_cfloat_struct(cnp.ndarray[cnp.cfloat_t] arr): + # This works since we compile in C mode, it will fail in cpp mode + arr[1].real += 1 + arr[1].imag += 1 + # This works in both modes + arr[1].real = arr[1].real + 1 + arr[1].imag = arr[1].imag + 1 + + +def npystring_pack(arr): + cdef char *string = "Hello world" + cdef size_t size = 11 + + allocator = cnp.NpyString_acquire_allocator( + cnp.PyArray_DESCR(arr) + ) + + # copy string->packed_string, the pointer to the underlying array buffer + ret = cnp.NpyString_pack( + allocator, cnp.PyArray_DATA(arr), string, size, + ) + + cnp.NpyString_release_allocator(allocator) + return ret + + +def npystring_load(arr): + allocator = cnp.NpyString_acquire_allocator( + cnp.PyArray_DESCR(arr) + ) + + cdef cnp.npy_static_string sdata + sdata.size = 0 + sdata.buf = NULL + + cdef cnp.npy_packed_static_string *packed_string = cnp.PyArray_DATA(arr) + cdef int is_null = cnp.NpyString_load(allocator, packed_string, &sdata) + cnp.NpyString_release_allocator(allocator) + if is_null == -1: + raise ValueError("String unpacking failed.") + elif is_null == 1: + # String in the array buffer is the null string + return "" + else: + # Cython syntax for copying a c string to python bytestring: + # slice the char * by the length of the string + return sdata.buf[:sdata.size].decode('utf-8') + + +def npystring_pack_multiple(arr1, arr2): + cdef cnp.npy_string_allocator *allocators[2] + cdef cnp.PyArray_Descr *descrs[2] + descrs[0] = cnp.PyArray_DESCR(arr1) + descrs[1] = cnp.PyArray_DESCR(arr2) + + cnp.NpyString_acquire_allocators(2, descrs, allocators) + + # Write into the first element of each array + cdef int ret1 = cnp.NpyString_pack( + allocators[0], cnp.PyArray_DATA(arr1), "Hello world", 11, + ) + cdef int ret2 = cnp.NpyString_pack( + allocators[1], cnp.PyArray_DATA(arr2), "test this", 9, + ) + + # Write a null string into the last element + cdef cnp.npy_intp elsize = cnp.PyArray_ITEMSIZE(arr1) + cdef int ret3 = cnp.NpyString_pack_null( + allocators[0], + (cnp.PyArray_DATA(arr1) + 2*elsize), + ) + + cnp.NpyString_release_allocators(2, allocators) + if ret1 == -1 or ret2 == -1 or ret3 == -1: + return -1 + + return 0 + + +def npystring_allocators_other_types(arr1, arr2): + cdef cnp.npy_string_allocator *allocators[2] + cdef cnp.PyArray_Descr *descrs[2] + descrs[0] = cnp.PyArray_DESCR(arr1) + descrs[1] = cnp.PyArray_DESCR(arr2) + + cnp.NpyString_acquire_allocators(2, descrs, allocators) + + # None of the dtypes here are StringDType, so every allocator + # should be NULL upon acquisition. + cdef int ret = 0 + for allocator in allocators: + if allocator != NULL: + ret = -1 + break + + cnp.NpyString_release_allocators(2, allocators) + return ret + + +def check_npy_uintp_type_enum(): + # Regression test for gh-27890: cnp.NPY_UINTP was not defined. + # Cython would fail to compile this before gh-27890 was fixed. + return cnp.NPY_UINTP > 0 diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/examples/cython/meson.build b/.venv/lib/python3.12/site-packages/numpy/_core/tests/examples/cython/meson.build new file mode 100644 index 0000000..8362c33 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/tests/examples/cython/meson.build @@ -0,0 +1,43 @@ +project('checks', 'c', 'cython') + +py = import('python').find_installation(pure: false) + +cc = meson.get_compiler('c') +cy = meson.get_compiler('cython') + +# Keep synced with pyproject.toml +if not cy.version().version_compare('>=3.0.6') + error('tests requires Cython >= 3.0.6') +endif + +cython_args = [] +if cy.version().version_compare('>=3.1.0') + cython_args += ['-Xfreethreading_compatible=True'] +endif + +npy_include_path = run_command(py, [ + '-c', + 'import os; os.chdir(".."); import numpy; print(os.path.abspath(numpy.get_include()))' + ], check: true).stdout().strip() + +npy_path = run_command(py, [ + '-c', + 'import os; os.chdir(".."); import numpy; print(os.path.dirname(numpy.__file__).removesuffix("numpy"))' + ], check: true).stdout().strip() + +# TODO: This is a hack due to gh-25135, where cython may not find the right +# __init__.pyd file. +add_project_arguments('-I', npy_path, language : 'cython') + +py.extension_module( + 'checks', + 'checks.pyx', + install: false, + c_args: [ + '-DNPY_NO_DEPRECATED_API=0', # Cython still uses old NumPy C API + # Require 1.25+ to test datetime additions + '-DNPY_TARGET_VERSION=NPY_2_0_API_VERSION', + ], + include_directories: [npy_include_path], + cython_args: cython_args, +) diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/examples/cython/setup.py b/.venv/lib/python3.12/site-packages/numpy/_core/tests/examples/cython/setup.py new file mode 100644 index 0000000..eb57477 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/tests/examples/cython/setup.py @@ -0,0 +1,39 @@ +""" +Provide python-space access to the functions exposed in numpy/__init__.pxd +for testing. +""" + +import os +from distutils.core import setup + +import Cython +from Cython.Build import cythonize +from setuptools.extension import Extension + +import numpy as np +from numpy._utils import _pep440 + +macros = [ + ("NPY_NO_DEPRECATED_API", 0), + # Require 1.25+ to test datetime additions + ("NPY_TARGET_VERSION", "NPY_2_0_API_VERSION"), +] + +checks = Extension( + "checks", + sources=[os.path.join('.', "checks.pyx")], + include_dirs=[np.get_include()], + define_macros=macros, +) + +extensions = [checks] + +compiler_directives = {} +if _pep440.parse(Cython.__version__) >= _pep440.parse("3.1.0a0"): + compiler_directives['freethreading_compatible'] = True + +setup( + ext_modules=cythonize( + extensions, + compiler_directives=compiler_directives) +) diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/examples/limited_api/__pycache__/setup.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/tests/examples/limited_api/__pycache__/setup.cpython-312.pyc new file mode 100644 index 0000000..0783909 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/tests/examples/limited_api/__pycache__/setup.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/examples/limited_api/limited_api1.c b/.venv/lib/python3.12/site-packages/numpy/_core/tests/examples/limited_api/limited_api1.c new file mode 100644 index 0000000..3dbf569 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/tests/examples/limited_api/limited_api1.c @@ -0,0 +1,17 @@ +#define Py_LIMITED_API 0x03060000 + +#include +#include +#include + +static PyModuleDef moduledef = { + .m_base = PyModuleDef_HEAD_INIT, + .m_name = "limited_api1" +}; + +PyMODINIT_FUNC PyInit_limited_api1(void) +{ + import_array(); + import_umath(); + return PyModule_Create(&moduledef); +} diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/examples/limited_api/limited_api2.pyx b/.venv/lib/python3.12/site-packages/numpy/_core/tests/examples/limited_api/limited_api2.pyx new file mode 100644 index 0000000..327d5b0 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/tests/examples/limited_api/limited_api2.pyx @@ -0,0 +1,11 @@ +#cython: language_level=3 + +""" +Make sure cython can compile in limited API mode (see meson.build) +""" + +cdef extern from "numpy/arrayobject.h": + pass +cdef extern from "numpy/arrayscalars.h": + pass + diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/examples/limited_api/limited_api_latest.c b/.venv/lib/python3.12/site-packages/numpy/_core/tests/examples/limited_api/limited_api_latest.c new file mode 100644 index 0000000..13668f2 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/tests/examples/limited_api/limited_api_latest.c @@ -0,0 +1,19 @@ +#if Py_LIMITED_API != PY_VERSION_HEX & 0xffff0000 + # error "Py_LIMITED_API not defined to Python major+minor version" +#endif + +#include +#include +#include + +static PyModuleDef moduledef = { + .m_base = PyModuleDef_HEAD_INIT, + .m_name = "limited_api_latest" +}; + +PyMODINIT_FUNC PyInit_limited_api_latest(void) +{ + import_array(); + import_umath(); + return PyModule_Create(&moduledef); +} diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/examples/limited_api/meson.build b/.venv/lib/python3.12/site-packages/numpy/_core/tests/examples/limited_api/meson.build new file mode 100644 index 0000000..65287d8 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/tests/examples/limited_api/meson.build @@ -0,0 +1,59 @@ +project('checks', 'c', 'cython') + +py = import('python').find_installation(pure: false) + +cc = meson.get_compiler('c') +cy = meson.get_compiler('cython') + +# Keep synced with pyproject.toml +if not cy.version().version_compare('>=3.0.6') + error('tests requires Cython >= 3.0.6') +endif + +npy_include_path = run_command(py, [ + '-c', + 'import os; os.chdir(".."); import numpy; print(os.path.abspath(numpy.get_include()))' + ], check: true).stdout().strip() + +npy_path = run_command(py, [ + '-c', + 'import os; os.chdir(".."); import numpy; print(os.path.dirname(numpy.__file__).removesuffix("numpy"))' + ], check: true).stdout().strip() + +# TODO: This is a hack due to https://github.com/cython/cython/issues/5820, +# where cython may not find the right __init__.pyd file. +add_project_arguments('-I', npy_path, language : 'cython') + +py.extension_module( + 'limited_api1', + 'limited_api1.c', + c_args: [ + '-DNPY_NO_DEPRECATED_API=NPY_1_21_API_VERSION', + ], + include_directories: [npy_include_path], + limited_api: '3.6', +) + +py.extension_module( + 'limited_api_latest', + 'limited_api_latest.c', + c_args: [ + '-DNPY_NO_DEPRECATED_API=NPY_1_21_API_VERSION', + ], + include_directories: [npy_include_path], + limited_api: py.language_version(), +) + +py.extension_module( + 'limited_api2', + 'limited_api2.pyx', + install: false, + c_args: [ + '-DNPY_NO_DEPRECATED_API=0', + # Require 1.25+ to test datetime additions + '-DNPY_TARGET_VERSION=NPY_2_0_API_VERSION', + '-DCYTHON_LIMITED_API=1', + ], + include_directories: [npy_include_path], + limited_api: '3.7', +) diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/examples/limited_api/setup.py b/.venv/lib/python3.12/site-packages/numpy/_core/tests/examples/limited_api/setup.py new file mode 100644 index 0000000..16adcd1 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/tests/examples/limited_api/setup.py @@ -0,0 +1,24 @@ +""" +Build an example package using the limited Python C API. +""" + +import os + +from setuptools import Extension, setup + +import numpy as np + +macros = [("NPY_NO_DEPRECATED_API", 0), ("Py_LIMITED_API", "0x03060000")] + +limited_api = Extension( + "limited_api", + sources=[os.path.join('.', "limited_api.c")], + include_dirs=[np.get_include()], + define_macros=macros, +) + +extensions = [limited_api] + +setup( + ext_modules=extensions +) diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/test__exceptions.py b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test__exceptions.py new file mode 100644 index 0000000..35782e7 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test__exceptions.py @@ -0,0 +1,90 @@ +""" +Tests of the ._exceptions module. Primarily for exercising the __str__ methods. +""" + +import pickle + +import pytest + +import numpy as np +from numpy.exceptions import AxisError + +_ArrayMemoryError = np._core._exceptions._ArrayMemoryError +_UFuncNoLoopError = np._core._exceptions._UFuncNoLoopError + +class TestArrayMemoryError: + def test_pickling(self): + """ Test that _ArrayMemoryError can be pickled """ + error = _ArrayMemoryError((1023,), np.dtype(np.uint8)) + res = pickle.loads(pickle.dumps(error)) + assert res._total_size == error._total_size + + def test_str(self): + e = _ArrayMemoryError((1023,), np.dtype(np.uint8)) + str(e) # not crashing is enough + + # testing these properties is easier than testing the full string repr + def test__size_to_string(self): + """ Test e._size_to_string """ + f = _ArrayMemoryError._size_to_string + Ki = 1024 + assert f(0) == '0 bytes' + assert f(1) == '1 bytes' + assert f(1023) == '1023 bytes' + assert f(Ki) == '1.00 KiB' + assert f(Ki + 1) == '1.00 KiB' + assert f(10 * Ki) == '10.0 KiB' + assert f(int(999.4 * Ki)) == '999. KiB' + assert f(int(1023.4 * Ki)) == '1023. KiB' + assert f(int(1023.5 * Ki)) == '1.00 MiB' + assert f(Ki * Ki) == '1.00 MiB' + + # 1023.9999 Mib should round to 1 GiB + assert f(int(Ki * Ki * Ki * 0.9999)) == '1.00 GiB' + assert f(Ki * Ki * Ki * Ki * Ki * Ki) == '1.00 EiB' + # larger than sys.maxsize, adding larger prefixes isn't going to help + # anyway. + assert f(Ki * Ki * Ki * Ki * Ki * Ki * 123456) == '123456. EiB' + + def test__total_size(self): + """ Test e._total_size """ + e = _ArrayMemoryError((1,), np.dtype(np.uint8)) + assert e._total_size == 1 + + e = _ArrayMemoryError((2, 4), np.dtype((np.uint64, 16))) + assert e._total_size == 1024 + + +class TestUFuncNoLoopError: + def test_pickling(self): + """ Test that _UFuncNoLoopError can be pickled """ + assert isinstance(pickle.dumps(_UFuncNoLoopError), bytes) + + +@pytest.mark.parametrize("args", [ + (2, 1, None), + (2, 1, "test_prefix"), + ("test message",), +]) +class TestAxisError: + def test_attr(self, args): + """Validate attribute types.""" + exc = AxisError(*args) + if len(args) == 1: + assert exc.axis is None + assert exc.ndim is None + else: + axis, ndim, *_ = args + assert exc.axis == axis + assert exc.ndim == ndim + + def test_pickling(self, args): + """Test that `AxisError` can be pickled.""" + exc = AxisError(*args) + exc2 = pickle.loads(pickle.dumps(exc)) + + assert type(exc) is type(exc2) + for name in ("axis", "ndim", "args"): + attr1 = getattr(exc, name) + attr2 = getattr(exc2, name) + assert attr1 == attr2, name diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_abc.py b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_abc.py new file mode 100644 index 0000000..aee1904 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_abc.py @@ -0,0 +1,54 @@ +import numbers + +import numpy as np +from numpy._core.numerictypes import sctypes +from numpy.testing import assert_ + + +class TestABC: + def test_abstract(self): + assert_(issubclass(np.number, numbers.Number)) + + assert_(issubclass(np.inexact, numbers.Complex)) + assert_(issubclass(np.complexfloating, numbers.Complex)) + assert_(issubclass(np.floating, numbers.Real)) + + assert_(issubclass(np.integer, numbers.Integral)) + assert_(issubclass(np.signedinteger, numbers.Integral)) + assert_(issubclass(np.unsignedinteger, numbers.Integral)) + + def test_floats(self): + for t in sctypes['float']: + assert_(isinstance(t(), numbers.Real), + f"{t.__name__} is not instance of Real") + assert_(issubclass(t, numbers.Real), + f"{t.__name__} is not subclass of Real") + assert_(not isinstance(t(), numbers.Rational), + f"{t.__name__} is instance of Rational") + assert_(not issubclass(t, numbers.Rational), + f"{t.__name__} is subclass of Rational") + + def test_complex(self): + for t in sctypes['complex']: + assert_(isinstance(t(), numbers.Complex), + f"{t.__name__} is not instance of Complex") + assert_(issubclass(t, numbers.Complex), + f"{t.__name__} is not subclass of Complex") + assert_(not isinstance(t(), numbers.Real), + f"{t.__name__} is instance of Real") + assert_(not issubclass(t, numbers.Real), + f"{t.__name__} is subclass of Real") + + def test_int(self): + for t in sctypes['int']: + assert_(isinstance(t(), numbers.Integral), + f"{t.__name__} is not instance of Integral") + assert_(issubclass(t, numbers.Integral), + f"{t.__name__} is not subclass of Integral") + + def test_uint(self): + for t in sctypes['uint']: + assert_(isinstance(t(), numbers.Integral), + f"{t.__name__} is not instance of Integral") + assert_(issubclass(t, numbers.Integral), + f"{t.__name__} is not subclass of Integral") diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_api.py b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_api.py new file mode 100644 index 0000000..2599053 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_api.py @@ -0,0 +1,621 @@ +import sys + +import pytest +from numpy._core._rational_tests import rational + +import numpy as np +import numpy._core.umath as ncu +from numpy.testing import ( + HAS_REFCOUNT, + assert_, + assert_array_equal, + assert_equal, + assert_raises, + assert_warns, +) + + +def test_array_array(): + tobj = type(object) + ones11 = np.ones((1, 1), np.float64) + tndarray = type(ones11) + # Test is_ndarray + assert_equal(np.array(ones11, dtype=np.float64), ones11) + if HAS_REFCOUNT: + old_refcount = sys.getrefcount(tndarray) + np.array(ones11) + assert_equal(old_refcount, sys.getrefcount(tndarray)) + + # test None + assert_equal(np.array(None, dtype=np.float64), + np.array(np.nan, dtype=np.float64)) + if HAS_REFCOUNT: + old_refcount = sys.getrefcount(tobj) + np.array(None, dtype=np.float64) + assert_equal(old_refcount, sys.getrefcount(tobj)) + + # test scalar + assert_equal(np.array(1.0, dtype=np.float64), + np.ones((), dtype=np.float64)) + if HAS_REFCOUNT: + old_refcount = sys.getrefcount(np.float64) + np.array(np.array(1.0, dtype=np.float64), dtype=np.float64) + assert_equal(old_refcount, sys.getrefcount(np.float64)) + + # test string + S2 = np.dtype((bytes, 2)) + S3 = np.dtype((bytes, 3)) + S5 = np.dtype((bytes, 5)) + assert_equal(np.array(b"1.0", dtype=np.float64), + np.ones((), dtype=np.float64)) + assert_equal(np.array(b"1.0").dtype, S3) + assert_equal(np.array(b"1.0", dtype=bytes).dtype, S3) + assert_equal(np.array(b"1.0", dtype=S2), np.array(b"1.")) + assert_equal(np.array(b"1", dtype=S5), np.ones((), dtype=S5)) + + # test string + U2 = np.dtype((str, 2)) + U3 = np.dtype((str, 3)) + U5 = np.dtype((str, 5)) + assert_equal(np.array("1.0", dtype=np.float64), + np.ones((), dtype=np.float64)) + assert_equal(np.array("1.0").dtype, U3) + assert_equal(np.array("1.0", dtype=str).dtype, U3) + assert_equal(np.array("1.0", dtype=U2), np.array("1.")) + assert_equal(np.array("1", dtype=U5), np.ones((), dtype=U5)) + + builtins = getattr(__builtins__, '__dict__', __builtins__) + assert_(hasattr(builtins, 'get')) + + # test memoryview + dat = np.array(memoryview(b'1.0'), dtype=np.float64) + assert_equal(dat, [49.0, 46.0, 48.0]) + assert_(dat.dtype.type is np.float64) + + dat = np.array(memoryview(b'1.0')) + assert_equal(dat, [49, 46, 48]) + assert_(dat.dtype.type is np.uint8) + + # test array interface + a = np.array(100.0, dtype=np.float64) + o = type("o", (object,), + {"__array_interface__": a.__array_interface__}) + assert_equal(np.array(o, dtype=np.float64), a) + + # test array_struct interface + a = np.array([(1, 4.0, 'Hello'), (2, 6.0, 'World')], + dtype=[('f0', int), ('f1', float), ('f2', str)]) + o = type("o", (object,), + {"__array_struct__": a.__array_struct__}) + # wasn't what I expected... is np.array(o) supposed to equal a ? + # instead we get a array([...], dtype=">V18") + assert_equal(bytes(np.array(o).data), bytes(a.data)) + + # test array + def custom__array__(self, dtype=None, copy=None): + return np.array(100.0, dtype=dtype, copy=copy) + + o = type("o", (object,), {"__array__": custom__array__})() + assert_equal(np.array(o, dtype=np.float64), np.array(100.0, np.float64)) + + # test recursion + nested = 1.5 + for i in range(ncu.MAXDIMS): + nested = [nested] + + # no error + np.array(nested) + + # Exceeds recursion limit + assert_raises(ValueError, np.array, [nested], dtype=np.float64) + + # Try with lists... + # float32 + assert_equal(np.array([None] * 10, dtype=np.float32), + np.full((10,), np.nan, dtype=np.float32)) + assert_equal(np.array([[None]] * 10, dtype=np.float32), + np.full((10, 1), np.nan, dtype=np.float32)) + assert_equal(np.array([[None] * 10], dtype=np.float32), + np.full((1, 10), np.nan, dtype=np.float32)) + assert_equal(np.array([[None] * 10] * 10, dtype=np.float32), + np.full((10, 10), np.nan, dtype=np.float32)) + # float64 + assert_equal(np.array([None] * 10, dtype=np.float64), + np.full((10,), np.nan, dtype=np.float64)) + assert_equal(np.array([[None]] * 10, dtype=np.float64), + np.full((10, 1), np.nan, dtype=np.float64)) + assert_equal(np.array([[None] * 10], dtype=np.float64), + np.full((1, 10), np.nan, dtype=np.float64)) + assert_equal(np.array([[None] * 10] * 10, dtype=np.float64), + np.full((10, 10), np.nan, dtype=np.float64)) + + assert_equal(np.array([1.0] * 10, dtype=np.float64), + np.ones((10,), dtype=np.float64)) + assert_equal(np.array([[1.0]] * 10, dtype=np.float64), + np.ones((10, 1), dtype=np.float64)) + assert_equal(np.array([[1.0] * 10], dtype=np.float64), + np.ones((1, 10), dtype=np.float64)) + assert_equal(np.array([[1.0] * 10] * 10, dtype=np.float64), + np.ones((10, 10), dtype=np.float64)) + + # Try with tuples + assert_equal(np.array((None,) * 10, dtype=np.float64), + np.full((10,), np.nan, dtype=np.float64)) + assert_equal(np.array([(None,)] * 10, dtype=np.float64), + np.full((10, 1), np.nan, dtype=np.float64)) + assert_equal(np.array([(None,) * 10], dtype=np.float64), + np.full((1, 10), np.nan, dtype=np.float64)) + assert_equal(np.array([(None,) * 10] * 10, dtype=np.float64), + np.full((10, 10), np.nan, dtype=np.float64)) + + assert_equal(np.array((1.0,) * 10, dtype=np.float64), + np.ones((10,), dtype=np.float64)) + assert_equal(np.array([(1.0,)] * 10, dtype=np.float64), + np.ones((10, 1), dtype=np.float64)) + assert_equal(np.array([(1.0,) * 10], dtype=np.float64), + np.ones((1, 10), dtype=np.float64)) + assert_equal(np.array([(1.0,) * 10] * 10, dtype=np.float64), + np.ones((10, 10), dtype=np.float64)) + +@pytest.mark.parametrize("array", [True, False]) +def test_array_impossible_casts(array): + # All builtin types can be forcibly cast, at least theoretically, + # but user dtypes cannot necessarily. + rt = rational(1, 2) + if array: + rt = np.array(rt) + with assert_raises(TypeError): + np.array(rt, dtype="M8") + + +def test_array_astype(): + a = np.arange(6, dtype='f4').reshape(2, 3) + # Default behavior: allows unsafe casts, keeps memory layout, + # always copies. + b = a.astype('i4') + assert_equal(a, b) + assert_equal(b.dtype, np.dtype('i4')) + assert_equal(a.strides, b.strides) + b = a.T.astype('i4') + assert_equal(a.T, b) + assert_equal(b.dtype, np.dtype('i4')) + assert_equal(a.T.strides, b.strides) + b = a.astype('f4') + assert_equal(a, b) + assert_(not (a is b)) + + # copy=False parameter skips a copy + b = a.astype('f4', copy=False) + assert_(a is b) + + # order parameter allows overriding of the memory layout, + # forcing a copy if the layout is wrong + b = a.astype('f4', order='F', copy=False) + assert_equal(a, b) + assert_(not (a is b)) + assert_(b.flags.f_contiguous) + + b = a.astype('f4', order='C', copy=False) + assert_equal(a, b) + assert_(a is b) + assert_(b.flags.c_contiguous) + + # casting parameter allows catching bad casts + b = a.astype('c8', casting='safe') + assert_equal(a, b) + assert_equal(b.dtype, np.dtype('c8')) + + assert_raises(TypeError, a.astype, 'i4', casting='safe') + + # subok=False passes through a non-subclassed array + b = a.astype('f4', subok=0, copy=False) + assert_(a is b) + + class MyNDArray(np.ndarray): + pass + + a = np.array([[0, 1, 2], [3, 4, 5]], dtype='f4').view(MyNDArray) + + # subok=True passes through a subclass + b = a.astype('f4', subok=True, copy=False) + assert_(a is b) + + # subok=True is default, and creates a subtype on a cast + b = a.astype('i4', copy=False) + assert_equal(a, b) + assert_equal(type(b), MyNDArray) + + # subok=False never returns a subclass + b = a.astype('f4', subok=False, copy=False) + assert_equal(a, b) + assert_(not (a is b)) + assert_(type(b) is not MyNDArray) + + # Make sure converting from string object to fixed length string + # does not truncate. + a = np.array([b'a' * 100], dtype='O') + b = a.astype('S') + assert_equal(a, b) + assert_equal(b.dtype, np.dtype('S100')) + a = np.array(['a' * 100], dtype='O') + b = a.astype('U') + assert_equal(a, b) + assert_equal(b.dtype, np.dtype('U100')) + + # Same test as above but for strings shorter than 64 characters + a = np.array([b'a' * 10], dtype='O') + b = a.astype('S') + assert_equal(a, b) + assert_equal(b.dtype, np.dtype('S10')) + a = np.array(['a' * 10], dtype='O') + b = a.astype('U') + assert_equal(a, b) + assert_equal(b.dtype, np.dtype('U10')) + + a = np.array(123456789012345678901234567890, dtype='O').astype('S') + assert_array_equal(a, np.array(b'1234567890' * 3, dtype='S30')) + a = np.array(123456789012345678901234567890, dtype='O').astype('U') + assert_array_equal(a, np.array('1234567890' * 3, dtype='U30')) + + a = np.array([123456789012345678901234567890], dtype='O').astype('S') + assert_array_equal(a, np.array(b'1234567890' * 3, dtype='S30')) + a = np.array([123456789012345678901234567890], dtype='O').astype('U') + assert_array_equal(a, np.array('1234567890' * 3, dtype='U30')) + + a = np.array(123456789012345678901234567890, dtype='S') + assert_array_equal(a, np.array(b'1234567890' * 3, dtype='S30')) + a = np.array(123456789012345678901234567890, dtype='U') + assert_array_equal(a, np.array('1234567890' * 3, dtype='U30')) + + a = np.array('a\u0140', dtype='U') + b = np.ndarray(buffer=a, dtype='uint32', shape=2) + assert_(b.size == 2) + + a = np.array([1000], dtype='i4') + assert_raises(TypeError, a.astype, 'S1', casting='safe') + + a = np.array(1000, dtype='i4') + assert_raises(TypeError, a.astype, 'U1', casting='safe') + + # gh-24023 + assert_raises(TypeError, a.astype) + +@pytest.mark.parametrize("dt", ["S", "U"]) +def test_array_astype_to_string_discovery_empty(dt): + # See also gh-19085 + arr = np.array([""], dtype=object) + # Note, the itemsize is the `0 -> 1` logic, which should change. + # The important part the test is rather that it does not error. + assert arr.astype(dt).dtype.itemsize == np.dtype(f"{dt}1").itemsize + + # check the same thing for `np.can_cast` (since it accepts arrays) + assert np.can_cast(arr, dt, casting="unsafe") + assert not np.can_cast(arr, dt, casting="same_kind") + # as well as for the object as a descriptor: + assert np.can_cast("O", dt, casting="unsafe") + +@pytest.mark.parametrize("dt", ["d", "f", "S13", "U32"]) +def test_array_astype_to_void(dt): + dt = np.dtype(dt) + arr = np.array([], dtype=dt) + assert arr.astype("V").dtype.itemsize == dt.itemsize + +def test_object_array_astype_to_void(): + # This is different to `test_array_astype_to_void` as object arrays + # are inspected. The default void is "V8" (8 is the length of double) + arr = np.array([], dtype="O").astype("V") + assert arr.dtype == "V8" + +@pytest.mark.parametrize("t", + np._core.sctypes['uint'] + + np._core.sctypes['int'] + + np._core.sctypes['float'] +) +def test_array_astype_warning(t): + # test ComplexWarning when casting from complex to float or int + a = np.array(10, dtype=np.complex128) + assert_warns(np.exceptions.ComplexWarning, a.astype, t) + +@pytest.mark.parametrize(["dtype", "out_dtype"], + [(np.bytes_, np.bool), + (np.str_, np.bool), + (np.dtype("S10,S9"), np.dtype("?,?")), + # The following also checks unaligned unicode access: + (np.dtype("S7,U9"), np.dtype("?,?"))]) +def test_string_to_boolean_cast(dtype, out_dtype): + # Only the last two (empty) strings are falsy (the `\0` is stripped): + arr = np.array( + ["10", "10\0\0\0", "0\0\0", "0", "False", " ", "", "\0"], + dtype=dtype) + expected = np.array( + [True, True, True, True, True, True, False, False], + dtype=out_dtype) + assert_array_equal(arr.astype(out_dtype), expected) + # As it's similar, check that nonzero behaves the same (structs are + # nonzero if all entries are) + assert_array_equal(np.nonzero(arr), np.nonzero(expected)) + +@pytest.mark.parametrize("str_type", [str, bytes, np.str_]) +@pytest.mark.parametrize("scalar_type", + [np.complex64, np.complex128, np.clongdouble]) +def test_string_to_complex_cast(str_type, scalar_type): + value = scalar_type(b"1+3j") + assert scalar_type(value) == 1 + 3j + assert np.array([value], dtype=object).astype(scalar_type)[()] == 1 + 3j + assert np.array(value).astype(scalar_type)[()] == 1 + 3j + arr = np.zeros(1, dtype=scalar_type) + arr[0] = value + assert arr[0] == 1 + 3j + +@pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) +def test_none_to_nan_cast(dtype): + # Note that at the time of writing this test, the scalar constructors + # reject None + arr = np.zeros(1, dtype=dtype) + arr[0] = None + assert np.isnan(arr)[0] + assert np.isnan(np.array(None, dtype=dtype))[()] + assert np.isnan(np.array([None], dtype=dtype))[0] + assert np.isnan(np.array(None).astype(dtype))[()] + +def test_copyto_fromscalar(): + a = np.arange(6, dtype='f4').reshape(2, 3) + + # Simple copy + np.copyto(a, 1.5) + assert_equal(a, 1.5) + np.copyto(a.T, 2.5) + assert_equal(a, 2.5) + + # Where-masked copy + mask = np.array([[0, 1, 0], [0, 0, 1]], dtype='?') + np.copyto(a, 3.5, where=mask) + assert_equal(a, [[2.5, 3.5, 2.5], [2.5, 2.5, 3.5]]) + mask = np.array([[0, 1], [1, 1], [1, 0]], dtype='?') + np.copyto(a.T, 4.5, where=mask) + assert_equal(a, [[2.5, 4.5, 4.5], [4.5, 4.5, 3.5]]) + +def test_copyto(): + a = np.arange(6, dtype='i4').reshape(2, 3) + + # Simple copy + np.copyto(a, [[3, 1, 5], [6, 2, 1]]) + assert_equal(a, [[3, 1, 5], [6, 2, 1]]) + + # Overlapping copy should work + np.copyto(a[:, :2], a[::-1, 1::-1]) + assert_equal(a, [[2, 6, 5], [1, 3, 1]]) + + # Defaults to 'same_kind' casting + assert_raises(TypeError, np.copyto, a, 1.5) + + # Force a copy with 'unsafe' casting, truncating 1.5 to 1 + np.copyto(a, 1.5, casting='unsafe') + assert_equal(a, 1) + + # Copying with a mask + np.copyto(a, 3, where=[True, False, True]) + assert_equal(a, [[3, 1, 3], [3, 1, 3]]) + + # Casting rule still applies with a mask + assert_raises(TypeError, np.copyto, a, 3.5, where=[True, False, True]) + + # Lists of integer 0's and 1's is ok too + np.copyto(a, 4.0, casting='unsafe', where=[[0, 1, 1], [1, 0, 0]]) + assert_equal(a, [[3, 4, 4], [4, 1, 3]]) + + # Overlapping copy with mask should work + np.copyto(a[:, :2], a[::-1, 1::-1], where=[[0, 1], [1, 1]]) + assert_equal(a, [[3, 4, 4], [4, 3, 3]]) + + # 'dst' must be an array + assert_raises(TypeError, np.copyto, [1, 2, 3], [2, 3, 4]) + + +def test_copyto_cast_safety(): + with pytest.raises(TypeError): + np.copyto(np.arange(3), 3., casting="safe") + + # Can put integer and float scalars safely (and equiv): + np.copyto(np.arange(3), 3, casting="equiv") + np.copyto(np.arange(3.), 3., casting="equiv") + # And also with less precision safely: + np.copyto(np.arange(3, dtype="uint8"), 3, casting="safe") + np.copyto(np.arange(3., dtype="float32"), 3., casting="safe") + + # But not equiv: + with pytest.raises(TypeError): + np.copyto(np.arange(3, dtype="uint8"), 3, casting="equiv") + + with pytest.raises(TypeError): + np.copyto(np.arange(3., dtype="float32"), 3., casting="equiv") + + # As a special thing, object is equiv currently: + np.copyto(np.arange(3, dtype=object), 3, casting="equiv") + + # The following raises an overflow error/gives a warning but not + # type error (due to casting), though: + with pytest.raises(OverflowError): + np.copyto(np.arange(3), 2**80, casting="safe") + + with pytest.warns(RuntimeWarning): + np.copyto(np.arange(3, dtype=np.float32), 2e300, casting="safe") + + +def test_copyto_permut(): + # test explicit overflow case + pad = 500 + l = [True] * pad + [True, True, True, True] + r = np.zeros(len(l) - pad) + d = np.ones(len(l) - pad) + mask = np.array(l)[pad:] + np.copyto(r, d, where=mask[::-1]) + + # test all permutation of possible masks, 9 should be sufficient for + # current 4 byte unrolled code + power = 9 + d = np.ones(power) + for i in range(2**power): + r = np.zeros(power) + l = [(i & x) != 0 for x in range(power)] + mask = np.array(l) + np.copyto(r, d, where=mask) + assert_array_equal(r == 1, l) + assert_equal(r.sum(), sum(l)) + + r = np.zeros(power) + np.copyto(r, d, where=mask[::-1]) + assert_array_equal(r == 1, l[::-1]) + assert_equal(r.sum(), sum(l)) + + r = np.zeros(power) + np.copyto(r[::2], d[::2], where=mask[::2]) + assert_array_equal(r[::2] == 1, l[::2]) + assert_equal(r[::2].sum(), sum(l[::2])) + + r = np.zeros(power) + np.copyto(r[::2], d[::2], where=mask[::-2]) + assert_array_equal(r[::2] == 1, l[::-2]) + assert_equal(r[::2].sum(), sum(l[::-2])) + + for c in [0xFF, 0x7F, 0x02, 0x10]: + r = np.zeros(power) + mask = np.array(l) + imask = np.array(l).view(np.uint8) + imask[mask != 0] = c + np.copyto(r, d, where=mask) + assert_array_equal(r == 1, l) + assert_equal(r.sum(), sum(l)) + + r = np.zeros(power) + np.copyto(r, d, where=True) + assert_equal(r.sum(), r.size) + r = np.ones(power) + d = np.zeros(power) + np.copyto(r, d, where=False) + assert_equal(r.sum(), r.size) + +def test_copy_order(): + a = np.arange(24).reshape(2, 1, 3, 4) + b = a.copy(order='F') + c = np.arange(24).reshape(2, 1, 4, 3).swapaxes(2, 3) + + def check_copy_result(x, y, ccontig, fcontig, strides=False): + assert_(not (x is y)) + assert_equal(x, y) + assert_equal(res.flags.c_contiguous, ccontig) + assert_equal(res.flags.f_contiguous, fcontig) + + # Validate the initial state of a, b, and c + assert_(a.flags.c_contiguous) + assert_(not a.flags.f_contiguous) + assert_(not b.flags.c_contiguous) + assert_(b.flags.f_contiguous) + assert_(not c.flags.c_contiguous) + assert_(not c.flags.f_contiguous) + + # Copy with order='C' + res = a.copy(order='C') + check_copy_result(res, a, ccontig=True, fcontig=False, strides=True) + res = b.copy(order='C') + check_copy_result(res, b, ccontig=True, fcontig=False, strides=False) + res = c.copy(order='C') + check_copy_result(res, c, ccontig=True, fcontig=False, strides=False) + res = np.copy(a, order='C') + check_copy_result(res, a, ccontig=True, fcontig=False, strides=True) + res = np.copy(b, order='C') + check_copy_result(res, b, ccontig=True, fcontig=False, strides=False) + res = np.copy(c, order='C') + check_copy_result(res, c, ccontig=True, fcontig=False, strides=False) + + # Copy with order='F' + res = a.copy(order='F') + check_copy_result(res, a, ccontig=False, fcontig=True, strides=False) + res = b.copy(order='F') + check_copy_result(res, b, ccontig=False, fcontig=True, strides=True) + res = c.copy(order='F') + check_copy_result(res, c, ccontig=False, fcontig=True, strides=False) + res = np.copy(a, order='F') + check_copy_result(res, a, ccontig=False, fcontig=True, strides=False) + res = np.copy(b, order='F') + check_copy_result(res, b, ccontig=False, fcontig=True, strides=True) + res = np.copy(c, order='F') + check_copy_result(res, c, ccontig=False, fcontig=True, strides=False) + + # Copy with order='K' + res = a.copy(order='K') + check_copy_result(res, a, ccontig=True, fcontig=False, strides=True) + res = b.copy(order='K') + check_copy_result(res, b, ccontig=False, fcontig=True, strides=True) + res = c.copy(order='K') + check_copy_result(res, c, ccontig=False, fcontig=False, strides=True) + res = np.copy(a, order='K') + check_copy_result(res, a, ccontig=True, fcontig=False, strides=True) + res = np.copy(b, order='K') + check_copy_result(res, b, ccontig=False, fcontig=True, strides=True) + res = np.copy(c, order='K') + check_copy_result(res, c, ccontig=False, fcontig=False, strides=True) + +def test_contiguous_flags(): + a = np.ones((4, 4, 1))[::2, :, :] + a.strides = a.strides[:2] + (-123,) + b = np.ones((2, 2, 1, 2, 2)).swapaxes(3, 4) + + def check_contig(a, ccontig, fcontig): + assert_(a.flags.c_contiguous == ccontig) + assert_(a.flags.f_contiguous == fcontig) + + # Check if new arrays are correct: + check_contig(a, False, False) + check_contig(b, False, False) + check_contig(np.empty((2, 2, 0, 2, 2)), True, True) + check_contig(np.array([[[1], [2]]], order='F'), True, True) + check_contig(np.empty((2, 2)), True, False) + check_contig(np.empty((2, 2), order='F'), False, True) + + # Check that np.array creates correct contiguous flags: + check_contig(np.array(a, copy=None), False, False) + check_contig(np.array(a, copy=None, order='C'), True, False) + check_contig(np.array(a, ndmin=4, copy=None, order='F'), False, True) + + # Check slicing update of flags and : + check_contig(a[0], True, True) + check_contig(a[None, ::4, ..., None], True, True) + check_contig(b[0, 0, ...], False, True) + check_contig(b[:, :, 0:0, :, :], True, True) + + # Test ravel and squeeze. + check_contig(a.ravel(), True, True) + check_contig(np.ones((1, 3, 1)).squeeze(), True, True) + +def test_broadcast_arrays(): + # Test user defined dtypes + a = np.array([(1, 2, 3)], dtype='u4,u4,u4') + b = np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], dtype='u4,u4,u4') + result = np.broadcast_arrays(a, b) + assert_equal(result[0], np.array([(1, 2, 3), (1, 2, 3), (1, 2, 3)], dtype='u4,u4,u4')) + assert_equal(result[1], np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], dtype='u4,u4,u4')) + +@pytest.mark.parametrize(["shape", "fill_value", "expected_output"], + [((2, 2), [5.0, 6.0], np.array([[5.0, 6.0], [5.0, 6.0]])), + ((3, 2), [1.0, 2.0], np.array([[1.0, 2.0], [1.0, 2.0], [1.0, 2.0]]))]) +def test_full_from_list(shape, fill_value, expected_output): + output = np.full(shape, fill_value) + assert_equal(output, expected_output) + +def test_astype_copyflag(): + # test the various copyflag options + arr = np.arange(10, dtype=np.intp) + + res_true = arr.astype(np.intp, copy=True) + assert not np.shares_memory(arr, res_true) + + res_false = arr.astype(np.intp, copy=False) + assert np.shares_memory(arr, res_false) + + res_false_float = arr.astype(np.float64, copy=False) + assert not np.shares_memory(arr, res_false_float) + + # _CopyMode enum isn't allowed + assert_raises(ValueError, arr.astype, np.float64, + copy=np._CopyMode.NEVER) diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_argparse.py b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_argparse.py new file mode 100644 index 0000000..7f949c1 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_argparse.py @@ -0,0 +1,92 @@ +""" +Tests for the private NumPy argument parsing functionality. +They mainly exists to ensure good test coverage without having to try the +weirder cases on actual numpy functions but test them in one place. + +The test function is defined in C to be equivalent to (errors may not always +match exactly, and could be adjusted): + + def func(arg1, /, arg2, *, arg3): + i = integer(arg1) # reproducing the 'i' parsing in Python. + return None +""" + +import threading + +import pytest +from numpy._core._multiarray_tests import ( + argparse_example_function as func, +) +from numpy._core._multiarray_tests import ( + threaded_argparse_example_function as thread_func, +) + +import numpy as np +from numpy.testing import IS_WASM + + +@pytest.mark.skipif(IS_WASM, reason="wasm doesn't have support for threads") +def test_thread_safe_argparse_cache(): + b = threading.Barrier(8) + + def call_thread_func(): + b.wait() + thread_func(arg1=3, arg2=None) + + tasks = [threading.Thread(target=call_thread_func) for _ in range(8)] + [t.start() for t in tasks] + [t.join() for t in tasks] + + +def test_invalid_integers(): + with pytest.raises(TypeError, + match="integer argument expected, got float"): + func(1.) + with pytest.raises(OverflowError): + func(2**100) + + +def test_missing_arguments(): + with pytest.raises(TypeError, + match="missing required positional argument 0"): + func() + with pytest.raises(TypeError, + match="missing required positional argument 0"): + func(arg2=1, arg3=4) + with pytest.raises(TypeError, + match=r"missing required argument \'arg2\' \(pos 1\)"): + func(1, arg3=5) + + +def test_too_many_positional(): + # the second argument is positional but can be passed as keyword. + with pytest.raises(TypeError, + match="takes from 2 to 3 positional arguments but 4 were given"): + func(1, 2, 3, 4) + + +def test_multiple_values(): + with pytest.raises(TypeError, + match=r"given by name \('arg2'\) and position \(position 1\)"): + func(1, 2, arg2=3) + + +def test_string_fallbacks(): + # We can (currently?) use numpy strings to test the "slow" fallbacks + # that should normally not be taken due to string interning. + arg2 = np.str_("arg2") + missing_arg = np.str_("missing_arg") + func(1, **{arg2: 3}) + with pytest.raises(TypeError, + match="got an unexpected keyword argument 'missing_arg'"): + func(2, **{missing_arg: 3}) + + +def test_too_many_arguments_method_forwarding(): + # Not directly related to the standard argument parsing, but we sometimes + # forward methods to Python: arr.mean() calls np._core._methods._mean() + # This adds code coverage for this `npy_forward_method`. + arr = np.arange(3) + args = range(1000) + with pytest.raises(TypeError): + arr.mean(*args) diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_array_api_info.py b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_array_api_info.py new file mode 100644 index 0000000..4842dbf --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_array_api_info.py @@ -0,0 +1,113 @@ +import pytest + +import numpy as np + +info = np.__array_namespace_info__() + + +def test_capabilities(): + caps = info.capabilities() + assert caps["boolean indexing"] is True + assert caps["data-dependent shapes"] is True + + # This will be added in the 2024.12 release of the array API standard. + + # assert caps["max rank"] == 64 + # np.zeros((1,)*64) + # with pytest.raises(ValueError): + # np.zeros((1,)*65) + + +def test_default_device(): + assert info.default_device() == "cpu" == np.asarray(0).device + + +def test_default_dtypes(): + dtypes = info.default_dtypes() + assert dtypes["real floating"] == np.float64 == np.asarray(0.0).dtype + assert dtypes["complex floating"] == np.complex128 == \ + np.asarray(0.0j).dtype + assert dtypes["integral"] == np.intp == np.asarray(0).dtype + assert dtypes["indexing"] == np.intp == np.argmax(np.zeros(10)).dtype + + with pytest.raises(ValueError, match="Device not understood"): + info.default_dtypes(device="gpu") + + +def test_dtypes_all(): + dtypes = info.dtypes() + assert dtypes == { + "bool": np.bool_, + "int8": np.int8, + "int16": np.int16, + "int32": np.int32, + "int64": np.int64, + "uint8": np.uint8, + "uint16": np.uint16, + "uint32": np.uint32, + "uint64": np.uint64, + "float32": np.float32, + "float64": np.float64, + "complex64": np.complex64, + "complex128": np.complex128, + } + + +dtype_categories = { + "bool": {"bool": np.bool_}, + "signed integer": { + "int8": np.int8, + "int16": np.int16, + "int32": np.int32, + "int64": np.int64, + }, + "unsigned integer": { + "uint8": np.uint8, + "uint16": np.uint16, + "uint32": np.uint32, + "uint64": np.uint64, + }, + "integral": ("signed integer", "unsigned integer"), + "real floating": {"float32": np.float32, "float64": np.float64}, + "complex floating": {"complex64": np.complex64, "complex128": + np.complex128}, + "numeric": ("integral", "real floating", "complex floating"), +} + + +@pytest.mark.parametrize("kind", dtype_categories) +def test_dtypes_kind(kind): + expected = dtype_categories[kind] + if isinstance(expected, tuple): + assert info.dtypes(kind=kind) == info.dtypes(kind=expected) + else: + assert info.dtypes(kind=kind) == expected + + +def test_dtypes_tuple(): + dtypes = info.dtypes(kind=("bool", "integral")) + assert dtypes == { + "bool": np.bool_, + "int8": np.int8, + "int16": np.int16, + "int32": np.int32, + "int64": np.int64, + "uint8": np.uint8, + "uint16": np.uint16, + "uint32": np.uint32, + "uint64": np.uint64, + } + + +def test_dtypes_invalid_kind(): + with pytest.raises(ValueError, match="unsupported kind"): + info.dtypes(kind="invalid") + + +def test_dtypes_invalid_device(): + with pytest.raises(ValueError, match="Device not understood"): + info.dtypes(device="gpu") + + +def test_devices(): + assert info.devices() == ["cpu"] diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_array_coercion.py b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_array_coercion.py new file mode 100644 index 0000000..883aee6 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_array_coercion.py @@ -0,0 +1,911 @@ +""" +Tests for array coercion, mainly through testing `np.array` results directly. +Note that other such tests exist, e.g., in `test_api.py` and many corner-cases +are tested (sometimes indirectly) elsewhere. +""" + +from itertools import permutations, product + +import numpy._core._multiarray_umath as ncu +import pytest +from numpy._core._rational_tests import rational +from pytest import param + +import numpy as np +from numpy.testing import IS_64BIT, IS_PYPY, assert_array_equal + + +def arraylikes(): + """ + Generator for functions converting an array into various array-likes. + If full is True (default) it includes array-likes not capable of handling + all dtypes. + """ + # base array: + def ndarray(a): + return a + + yield param(ndarray, id="ndarray") + + # subclass: + class MyArr(np.ndarray): + pass + + def subclass(a): + return a.view(MyArr) + + yield subclass + + class _SequenceLike: + # Older NumPy versions, sometimes cared whether a protocol array was + # also _SequenceLike. This shouldn't matter, but keep it for now + # for __array__ and not the others. + def __len__(self): + raise TypeError + + def __getitem__(self, _, /): + raise TypeError + + # Array-interface + class ArrayDunder(_SequenceLike): + def __init__(self, a): + self.a = a + + def __array__(self, dtype=None, copy=None): + if dtype is None: + return self.a + return self.a.astype(dtype) + + yield param(ArrayDunder, id="__array__") + + # memory-view + yield param(memoryview, id="memoryview") + + # Array-interface + class ArrayInterface: + def __init__(self, a): + self.a = a # need to hold on to keep interface valid + self.__array_interface__ = a.__array_interface__ + + yield param(ArrayInterface, id="__array_interface__") + + # Array-Struct + class ArrayStruct: + def __init__(self, a): + self.a = a # need to hold on to keep struct valid + self.__array_struct__ = a.__array_struct__ + + yield param(ArrayStruct, id="__array_struct__") + + +def scalar_instances(times=True, extended_precision=True, user_dtype=True): + # Hard-coded list of scalar instances. + # Floats: + yield param(np.sqrt(np.float16(5)), id="float16") + yield param(np.sqrt(np.float32(5)), id="float32") + yield param(np.sqrt(np.float64(5)), id="float64") + if extended_precision: + yield param(np.sqrt(np.longdouble(5)), id="longdouble") + + # Complex: + yield param(np.sqrt(np.complex64(2 + 3j)), id="complex64") + yield param(np.sqrt(np.complex128(2 + 3j)), id="complex128") + if extended_precision: + yield param(np.sqrt(np.clongdouble(2 + 3j)), id="clongdouble") + + # Bool: + # XFAIL: Bool should be added, but has some bad properties when it + # comes to strings, see also gh-9875 + # yield param(np.bool(0), id="bool") + + # Integers: + yield param(np.int8(2), id="int8") + yield param(np.int16(2), id="int16") + yield param(np.int32(2), id="int32") + yield param(np.int64(2), id="int64") + + yield param(np.uint8(2), id="uint8") + yield param(np.uint16(2), id="uint16") + yield param(np.uint32(2), id="uint32") + yield param(np.uint64(2), id="uint64") + + # Rational: + if user_dtype: + yield param(rational(1, 2), id="rational") + + # Cannot create a structured void scalar directly: + structured = np.array([(1, 3)], "i,i")[0] + assert isinstance(structured, np.void) + assert structured.dtype == np.dtype("i,i") + yield param(structured, id="structured") + + if times: + # Datetimes and timedelta + yield param(np.timedelta64(2), id="timedelta64[generic]") + yield param(np.timedelta64(23, "s"), id="timedelta64[s]") + yield param(np.timedelta64("NaT", "s"), id="timedelta64[s](NaT)") + + yield param(np.datetime64("NaT"), id="datetime64[generic](NaT)") + yield param(np.datetime64("2020-06-07 12:43", "ms"), id="datetime64[ms]") + + # Strings and unstructured void: + yield param(np.bytes_(b"1234"), id="bytes") + yield param(np.str_("2345"), id="unicode") + yield param(np.void(b"4321"), id="unstructured_void") + + +def is_parametric_dtype(dtype): + """Returns True if the dtype is a parametric legacy dtype (itemsize + is 0, or a datetime without units) + """ + if dtype.itemsize == 0: + return True + if issubclass(dtype.type, (np.datetime64, np.timedelta64)): + if dtype.name.endswith("64"): + # Generic time units + return True + return False + + +class TestStringDiscovery: + @pytest.mark.parametrize("obj", + [object(), 1.2, 10**43, None, "string"], + ids=["object", "1.2", "10**43", "None", "string"]) + def test_basic_stringlength(self, obj): + length = len(str(obj)) + expected = np.dtype(f"S{length}") + + assert np.array(obj, dtype="S").dtype == expected + assert np.array([obj], dtype="S").dtype == expected + + # A nested array is also discovered correctly + arr = np.array(obj, dtype="O") + assert np.array(arr, dtype="S").dtype == expected + # Also if we use the dtype class + assert np.array(arr, dtype=type(expected)).dtype == expected + # Check that .astype() behaves identical + assert arr.astype("S").dtype == expected + # The DType class is accepted by `.astype()` + assert arr.astype(type(np.dtype("S"))).dtype == expected + + @pytest.mark.parametrize("obj", + [object(), 1.2, 10**43, None, "string"], + ids=["object", "1.2", "10**43", "None", "string"]) + def test_nested_arrays_stringlength(self, obj): + length = len(str(obj)) + expected = np.dtype(f"S{length}") + arr = np.array(obj, dtype="O") + assert np.array([arr, arr], dtype="S").dtype == expected + + @pytest.mark.parametrize("arraylike", arraylikes()) + def test_unpack_first_level(self, arraylike): + # We unpack exactly one level of array likes + obj = np.array([None]) + obj[0] = np.array(1.2) + # the length of the included item, not of the float dtype + length = len(str(obj[0])) + expected = np.dtype(f"S{length}") + + obj = arraylike(obj) + # casting to string usually calls str(obj) + arr = np.array([obj], dtype="S") + assert arr.shape == (1, 1) + assert arr.dtype == expected + + +class TestScalarDiscovery: + def test_void_special_case(self): + # Void dtypes with structures discover tuples as elements + arr = np.array((1, 2, 3), dtype="i,i,i") + assert arr.shape == () + arr = np.array([(1, 2, 3)], dtype="i,i,i") + assert arr.shape == (1,) + + def test_char_special_case(self): + arr = np.array("string", dtype="c") + assert arr.shape == (6,) + assert arr.dtype.char == "c" + arr = np.array(["string"], dtype="c") + assert arr.shape == (1, 6) + assert arr.dtype.char == "c" + + def test_char_special_case_deep(self): + # Check that the character special case errors correctly if the + # array is too deep: + nested = ["string"] # 2 dimensions (due to string being sequence) + for i in range(ncu.MAXDIMS - 2): + nested = [nested] + + arr = np.array(nested, dtype='c') + assert arr.shape == (1,) * (ncu.MAXDIMS - 1) + (6,) + with pytest.raises(ValueError): + np.array([nested], dtype="c") + + def test_unknown_object(self): + arr = np.array(object()) + assert arr.shape == () + assert arr.dtype == np.dtype("O") + + @pytest.mark.parametrize("scalar", scalar_instances()) + def test_scalar(self, scalar): + arr = np.array(scalar) + assert arr.shape == () + assert arr.dtype == scalar.dtype + + arr = np.array([[scalar, scalar]]) + assert arr.shape == (1, 2) + assert arr.dtype == scalar.dtype + + # Additionally to string this test also runs into a corner case + # with datetime promotion (the difference is the promotion order). + @pytest.mark.filterwarnings("ignore:Promotion of numbers:FutureWarning") + def test_scalar_promotion(self): + for sc1, sc2 in product(scalar_instances(), scalar_instances()): + sc1, sc2 = sc1.values[0], sc2.values[0] + # test all combinations: + try: + arr = np.array([sc1, sc2]) + except (TypeError, ValueError): + # The promotion between two times can fail + # XFAIL (ValueError): Some object casts are currently undefined + continue + assert arr.shape == (2,) + try: + dt1, dt2 = sc1.dtype, sc2.dtype + expected_dtype = np.promote_types(dt1, dt2) + assert arr.dtype == expected_dtype + except TypeError as e: + # Will currently always go to object dtype + assert arr.dtype == np.dtype("O") + + @pytest.mark.parametrize("scalar", scalar_instances()) + def test_scalar_coercion(self, scalar): + # This tests various scalar coercion paths, mainly for the numerical + # types. It includes some paths not directly related to `np.array`. + if isinstance(scalar, np.inexact): + # Ensure we have a full-precision number if available + scalar = type(scalar)((scalar * 2)**0.5) + + if type(scalar) is rational: + # Rational generally fails due to a missing cast. In the future + # object casts should automatically be defined based on `setitem`. + pytest.xfail("Rational to object cast is undefined currently.") + + # Use casting from object: + arr = np.array(scalar, dtype=object).astype(scalar.dtype) + + # Test various ways to create an array containing this scalar: + arr1 = np.array(scalar).reshape(1) + arr2 = np.array([scalar]) + arr3 = np.empty(1, dtype=scalar.dtype) + arr3[0] = scalar + arr4 = np.empty(1, dtype=scalar.dtype) + arr4[:] = [scalar] + # All of these methods should yield the same results + assert_array_equal(arr, arr1) + assert_array_equal(arr, arr2) + assert_array_equal(arr, arr3) + assert_array_equal(arr, arr4) + + @pytest.mark.xfail(IS_PYPY, reason="`int(np.complex128(3))` fails on PyPy") + @pytest.mark.filterwarnings("ignore::numpy.exceptions.ComplexWarning") + @pytest.mark.parametrize("cast_to", scalar_instances()) + def test_scalar_coercion_same_as_cast_and_assignment(self, cast_to): + """ + Test that in most cases: + * `np.array(scalar, dtype=dtype)` + * `np.empty((), dtype=dtype)[()] = scalar` + * `np.array(scalar).astype(dtype)` + should behave the same. The only exceptions are parametric dtypes + (mainly datetime/timedelta without unit) and void without fields. + """ + dtype = cast_to.dtype # use to parametrize only the target dtype + + for scalar in scalar_instances(times=False): + scalar = scalar.values[0] + + if dtype.type == np.void: + if scalar.dtype.fields is not None and dtype.fields is None: + # Here, coercion to "V6" works, but the cast fails. + # Since the types are identical, SETITEM takes care of + # this, but has different rules than the cast. + with pytest.raises(TypeError): + np.array(scalar).astype(dtype) + np.array(scalar, dtype=dtype) + np.array([scalar], dtype=dtype) + continue + + # The main test, we first try to use casting and if it succeeds + # continue below testing that things are the same, otherwise + # test that the alternative paths at least also fail. + try: + cast = np.array(scalar).astype(dtype) + except (TypeError, ValueError, RuntimeError): + # coercion should also raise (error type may change) + with pytest.raises(Exception): # noqa: B017 + np.array(scalar, dtype=dtype) + + if (isinstance(scalar, rational) and + np.issubdtype(dtype, np.signedinteger)): + return + + with pytest.raises(Exception): # noqa: B017 + np.array([scalar], dtype=dtype) + # assignment should also raise + res = np.zeros((), dtype=dtype) + with pytest.raises(Exception): # noqa: B017 + res[()] = scalar + + return + + # Non error path: + arr = np.array(scalar, dtype=dtype) + assert_array_equal(arr, cast) + # assignment behaves the same + ass = np.zeros((), dtype=dtype) + ass[()] = scalar + assert_array_equal(ass, cast) + + @pytest.mark.parametrize("pyscalar", [10, 10.32, 10.14j, 10**100]) + def test_pyscalar_subclasses(self, pyscalar): + """NumPy arrays are read/write which means that anything but invariant + behaviour is on thin ice. However, we currently are happy to discover + subclasses of Python float, int, complex the same as the base classes. + This should potentially be deprecated. + """ + class MyScalar(type(pyscalar)): + pass + + res = np.array(MyScalar(pyscalar)) + expected = np.array(pyscalar) + assert_array_equal(res, expected) + + @pytest.mark.parametrize("dtype_char", np.typecodes["All"]) + def test_default_dtype_instance(self, dtype_char): + if dtype_char in "SU": + dtype = np.dtype(dtype_char + "1") + elif dtype_char == "V": + # Legacy behaviour was to use V8. The reason was float64 being the + # default dtype and that having 8 bytes. + dtype = np.dtype("V8") + else: + dtype = np.dtype(dtype_char) + + discovered_dtype, _ = ncu._discover_array_parameters([], type(dtype)) + + assert discovered_dtype == dtype + assert discovered_dtype.itemsize == dtype.itemsize + + @pytest.mark.parametrize("dtype", np.typecodes["Integer"]) + @pytest.mark.parametrize(["scalar", "error"], + [(np.float64(np.nan), ValueError), + (np.array(-1).astype(np.ulonglong)[()], OverflowError)]) + def test_scalar_to_int_coerce_does_not_cast(self, dtype, scalar, error): + """ + Signed integers are currently different in that they do not cast other + NumPy scalar, but instead use scalar.__int__(). The hardcoded + exception to this rule is `np.array(scalar, dtype=integer)`. + """ + dtype = np.dtype(dtype) + + # This is a special case using casting logic. It warns for the NaN + # but allows the cast (giving undefined behaviour). + with np.errstate(invalid="ignore"): + coerced = np.array(scalar, dtype=dtype) + cast = np.array(scalar).astype(dtype) + assert_array_equal(coerced, cast) + + # However these fail: + with pytest.raises(error): + np.array([scalar], dtype=dtype) + with pytest.raises(error): + cast[()] = scalar + + +class TestTimeScalars: + @pytest.mark.parametrize("dtype", [np.int64, np.float32]) + @pytest.mark.parametrize("scalar", + [param(np.timedelta64("NaT", "s"), id="timedelta64[s](NaT)"), + param(np.timedelta64(123, "s"), id="timedelta64[s]"), + param(np.datetime64("NaT", "generic"), id="datetime64[generic](NaT)"), + param(np.datetime64(1, "D"), id="datetime64[D]")],) + def test_coercion_basic(self, dtype, scalar): + # Note the `[scalar]` is there because np.array(scalar) uses stricter + # `scalar.__int__()` rules for backward compatibility right now. + arr = np.array(scalar, dtype=dtype) + cast = np.array(scalar).astype(dtype) + assert_array_equal(arr, cast) + + ass = np.ones((), dtype=dtype) + if issubclass(dtype, np.integer): + with pytest.raises(TypeError): + # raises, as would np.array([scalar], dtype=dtype), this is + # conversion from times, but behaviour of integers. + ass[()] = scalar + else: + ass[()] = scalar + assert_array_equal(ass, cast) + + @pytest.mark.parametrize("dtype", [np.int64, np.float32]) + @pytest.mark.parametrize("scalar", + [param(np.timedelta64(123, "ns"), id="timedelta64[ns]"), + param(np.timedelta64(12, "generic"), id="timedelta64[generic]")]) + def test_coercion_timedelta_convert_to_number(self, dtype, scalar): + # Only "ns" and "generic" timedeltas can be converted to numbers + # so these are slightly special. + arr = np.array(scalar, dtype=dtype) + cast = np.array(scalar).astype(dtype) + ass = np.ones((), dtype=dtype) + ass[()] = scalar # raises, as would np.array([scalar], dtype=dtype) + + assert_array_equal(arr, cast) + assert_array_equal(cast, cast) + + @pytest.mark.parametrize("dtype", ["S6", "U6"]) + @pytest.mark.parametrize(["val", "unit"], + [param(123, "s", id="[s]"), param(123, "D", id="[D]")]) + def test_coercion_assignment_datetime(self, val, unit, dtype): + # String from datetime64 assignment is currently special cased to + # never use casting. This is because casting will error in this + # case, and traditionally in most cases the behaviour is maintained + # like this. (`np.array(scalar, dtype="U6")` would have failed before) + # TODO: This discrepancy _should_ be resolved, either by relaxing the + # cast, or by deprecating the first part. + scalar = np.datetime64(val, unit) + dtype = np.dtype(dtype) + cut_string = dtype.type(str(scalar)[:6]) + + arr = np.array(scalar, dtype=dtype) + assert arr[()] == cut_string + ass = np.ones((), dtype=dtype) + ass[()] = scalar + assert ass[()] == cut_string + + with pytest.raises(RuntimeError): + # However, unlike the above assignment using `str(scalar)[:6]` + # due to being handled by the string DType and not be casting + # the explicit cast fails: + np.array(scalar).astype(dtype) + + @pytest.mark.parametrize(["val", "unit"], + [param(123, "s", id="[s]"), param(123, "D", id="[D]")]) + def test_coercion_assignment_timedelta(self, val, unit): + scalar = np.timedelta64(val, unit) + + # Unlike datetime64, timedelta allows the unsafe cast: + np.array(scalar, dtype="S6") + cast = np.array(scalar).astype("S6") + ass = np.ones((), dtype="S6") + ass[()] = scalar + expected = scalar.astype("S")[:6] + assert cast[()] == expected + assert ass[()] == expected + +class TestNested: + def test_nested_simple(self): + initial = [1.2] + nested = initial + for i in range(ncu.MAXDIMS - 1): + nested = [nested] + + arr = np.array(nested, dtype="float64") + assert arr.shape == (1,) * ncu.MAXDIMS + with pytest.raises(ValueError): + np.array([nested], dtype="float64") + + with pytest.raises(ValueError, match=".*would exceed the maximum"): + np.array([nested]) # user must ask for `object` explicitly + + arr = np.array([nested], dtype=object) + assert arr.dtype == np.dtype("O") + assert arr.shape == (1,) * ncu.MAXDIMS + assert arr.item() is initial + + def test_pathological_self_containing(self): + # Test that this also works for two nested sequences + l = [] + l.append(l) + arr = np.array([l, l, l], dtype=object) + assert arr.shape == (3,) + (1,) * (ncu.MAXDIMS - 1) + + # Also check a ragged case: + arr = np.array([l, [None], l], dtype=object) + assert arr.shape == (3, 1) + + @pytest.mark.parametrize("arraylike", arraylikes()) + def test_nested_arraylikes(self, arraylike): + # We try storing an array like into an array, but the array-like + # will have too many dimensions. This means the shape discovery + # decides that the array-like must be treated as an object (a special + # case of ragged discovery). The result will be an array with one + # dimension less than the maximum dimensions, and the array being + # assigned to it (which does work for object or if `float(arraylike)` + # works). + initial = arraylike(np.ones((1, 1))) + + nested = initial + for i in range(ncu.MAXDIMS - 1): + nested = [nested] + + with pytest.raises(ValueError, match=".*would exceed the maximum"): + # It will refuse to assign the array into + np.array(nested, dtype="float64") + + # If this is object, we end up assigning a (1, 1) array into (1,) + # (due to running out of dimensions), this is currently supported but + # a special case which is not ideal. + arr = np.array(nested, dtype=object) + assert arr.shape == (1,) * ncu.MAXDIMS + assert arr.item() == np.array(initial).item() + + @pytest.mark.parametrize("arraylike", arraylikes()) + def test_uneven_depth_ragged(self, arraylike): + arr = np.arange(4).reshape((2, 2)) + arr = arraylike(arr) + + # Array is ragged in the second dimension already: + out = np.array([arr, [arr]], dtype=object) + assert out.shape == (2,) + assert out[0] is arr + assert type(out[1]) is list + + # Array is ragged in the third dimension: + with pytest.raises(ValueError): + # This is a broadcast error during assignment, because + # the array shape would be (2, 2, 2) but `arr[0, 0] = arr` fails. + np.array([arr, [arr, arr]], dtype=object) + + def test_empty_sequence(self): + arr = np.array([[], [1], [[1]]], dtype=object) + assert arr.shape == (3,) + + # The empty sequence stops further dimension discovery, so the + # result shape will be (0,) which leads to an error during: + with pytest.raises(ValueError): + np.array([[], np.empty((0, 1))], dtype=object) + + def test_array_of_different_depths(self): + # When multiple arrays (or array-likes) are included in a + # sequences and have different depth, we currently discover + # as many dimensions as they share. (see also gh-17224) + arr = np.zeros((3, 2)) + mismatch_first_dim = np.zeros((1, 2)) + mismatch_second_dim = np.zeros((3, 3)) + + dtype, shape = ncu._discover_array_parameters( + [arr, mismatch_second_dim], dtype=np.dtype("O")) + assert shape == (2, 3) + + dtype, shape = ncu._discover_array_parameters( + [arr, mismatch_first_dim], dtype=np.dtype("O")) + assert shape == (2,) + # The second case is currently supported because the arrays + # can be stored as objects: + res = np.asarray([arr, mismatch_first_dim], dtype=np.dtype("O")) + assert res[0] is arr + assert res[1] is mismatch_first_dim + + +class TestBadSequences: + # These are tests for bad objects passed into `np.array`, in general + # these have undefined behaviour. In the old code they partially worked + # when now they will fail. We could (and maybe should) create a copy + # of all sequences to be safe against bad-actors. + + def test_growing_list(self): + # List to coerce, `mylist` will append to it during coercion + obj = [] + + class mylist(list): + def __len__(self): + obj.append([1, 2]) + return super().__len__() + + obj.append(mylist([1, 2])) + + with pytest.raises(RuntimeError): + np.array(obj) + + # Note: We do not test a shrinking list. These do very evil things + # and the only way to fix them would be to copy all sequences. + # (which may be a real option in the future). + + def test_mutated_list(self): + # List to coerce, `mylist` will mutate the first element + obj = [] + + class mylist(list): + def __len__(self): + obj[0] = [2, 3] # replace with a different list. + return super().__len__() + + obj.append([2, 3]) + obj.append(mylist([1, 2])) + # Does not crash: + np.array(obj) + + def test_replace_0d_array(self): + # List to coerce, `mylist` will mutate the first element + obj = [] + + class baditem: + def __len__(self): + obj[0][0] = 2 # replace with a different list. + raise ValueError("not actually a sequence!") + + def __getitem__(self, _, /): + pass + + # Runs into a corner case in the new code, the `array(2)` is cached + # so replacing it invalidates the cache. + obj.append([np.array(2), baditem()]) + with pytest.raises(RuntimeError): + np.array(obj) + + +class TestArrayLikes: + @pytest.mark.parametrize("arraylike", arraylikes()) + def test_0d_object_special_case(self, arraylike): + arr = np.array(0.) + obj = arraylike(arr) + # A single array-like is always converted: + res = np.array(obj, dtype=object) + assert_array_equal(arr, res) + + # But a single 0-D nested array-like never: + res = np.array([obj], dtype=object) + assert res[0] is obj + + @pytest.mark.parametrize("arraylike", arraylikes()) + @pytest.mark.parametrize("arr", [np.array(0.), np.arange(4)]) + def test_object_assignment_special_case(self, arraylike, arr): + obj = arraylike(arr) + empty = np.arange(1, dtype=object) + empty[:] = [obj] + assert empty[0] is obj + + def test_0d_generic_special_case(self): + class ArraySubclass(np.ndarray): + def __float__(self): + raise TypeError("e.g. quantities raise on this") + + arr = np.array(0.) + obj = arr.view(ArraySubclass) + res = np.array(obj) + # The subclass is simply cast: + assert_array_equal(arr, res) + + # If the 0-D array-like is included, __float__ is currently + # guaranteed to be used. We may want to change that, quantities + # and masked arrays half make use of this. + with pytest.raises(TypeError): + np.array([obj]) + + # The same holds for memoryview: + obj = memoryview(arr) + res = np.array(obj) + assert_array_equal(arr, res) + with pytest.raises(ValueError): + # The error type does not matter much here. + np.array([obj]) + + def test_arraylike_classes(self): + # The classes of array-likes should generally be acceptable to be + # stored inside a numpy (object) array. This tests all of the + # special attributes (since all are checked during coercion). + arr = np.array(np.int64) + assert arr[()] is np.int64 + arr = np.array([np.int64]) + assert arr[0] is np.int64 + + # This also works for properties/unbound methods: + class ArrayLike: + @property + def __array_interface__(self): + pass + + @property + def __array_struct__(self): + pass + + def __array__(self, dtype=None, copy=None): + pass + + arr = np.array(ArrayLike) + assert arr[()] is ArrayLike + arr = np.array([ArrayLike]) + assert arr[0] is ArrayLike + + @pytest.mark.skipif(not IS_64BIT, reason="Needs 64bit platform") + def test_too_large_array_error_paths(self): + """Test the error paths, including for memory leaks""" + arr = np.array(0, dtype="uint8") + # Guarantees that a contiguous copy won't work: + arr = np.broadcast_to(arr, 2**62) + + for i in range(5): + # repeat, to ensure caching cannot have an effect: + with pytest.raises(MemoryError): + np.array(arr) + with pytest.raises(MemoryError): + np.array([arr]) + + @pytest.mark.parametrize("attribute", + ["__array_interface__", "__array__", "__array_struct__"]) + @pytest.mark.parametrize("error", [RecursionError, MemoryError]) + def test_bad_array_like_attributes(self, attribute, error): + # RecursionError and MemoryError are considered fatal. All errors + # (except AttributeError) should probably be raised in the future, + # but shapely made use of it, so it will require a deprecation. + + class BadInterface: + def __getattr__(self, attr): + if attr == attribute: + raise error + super().__getattr__(attr) + + with pytest.raises(error): + np.array(BadInterface()) + + @pytest.mark.parametrize("error", [RecursionError, MemoryError]) + def test_bad_array_like_bad_length(self, error): + # RecursionError and MemoryError are considered "critical" in + # sequences. We could expand this more generally though. (NumPy 1.20) + class BadSequence: + def __len__(self): + raise error + + def __getitem__(self, _, /): + # must have getitem to be a Sequence + return 1 + + with pytest.raises(error): + np.array(BadSequence()) + + def test_array_interface_descr_optional(self): + # The descr should be optional regression test for gh-27249 + arr = np.ones(10, dtype="V10") + iface = arr.__array_interface__ + iface.pop("descr") + + class MyClass: + __array_interface__ = iface + + assert_array_equal(np.asarray(MyClass), arr) + + +class TestAsArray: + """Test expected behaviors of ``asarray``.""" + + def test_dtype_identity(self): + """Confirm the intended behavior for *dtype* kwarg. + + The result of ``asarray()`` should have the dtype provided through the + keyword argument, when used. This forces unique array handles to be + produced for unique np.dtype objects, but (for equivalent dtypes), the + underlying data (the base object) is shared with the original array + object. + + Ref https://github.com/numpy/numpy/issues/1468 + """ + int_array = np.array([1, 2, 3], dtype='i') + assert np.asarray(int_array) is int_array + + # The character code resolves to the singleton dtype object provided + # by the numpy package. + assert np.asarray(int_array, dtype='i') is int_array + + # Derive a dtype from n.dtype('i'), but add a metadata object to force + # the dtype to be distinct. + unequal_type = np.dtype('i', metadata={'spam': True}) + annotated_int_array = np.asarray(int_array, dtype=unequal_type) + assert annotated_int_array is not int_array + assert annotated_int_array.base is int_array + # Create an equivalent descriptor with a new and distinct dtype + # instance. + equivalent_requirement = np.dtype('i', metadata={'spam': True}) + annotated_int_array_alt = np.asarray(annotated_int_array, + dtype=equivalent_requirement) + assert unequal_type == equivalent_requirement + assert unequal_type is not equivalent_requirement + assert annotated_int_array_alt is not annotated_int_array + assert annotated_int_array_alt.dtype is equivalent_requirement + + # Check the same logic for a pair of C types whose equivalence may vary + # between computing environments. + # Find an equivalent pair. + integer_type_codes = ('i', 'l', 'q') + integer_dtypes = [np.dtype(code) for code in integer_type_codes] + typeA = None + typeB = None + for typeA, typeB in permutations(integer_dtypes, r=2): + if typeA == typeB: + assert typeA is not typeB + break + assert isinstance(typeA, np.dtype) and isinstance(typeB, np.dtype) + + # These ``asarray()`` calls may produce a new view or a copy, + # but never the same object. + long_int_array = np.asarray(int_array, dtype='l') + long_long_int_array = np.asarray(int_array, dtype='q') + assert long_int_array is not int_array + assert long_long_int_array is not int_array + assert np.asarray(long_int_array, dtype='q') is not long_int_array + array_a = np.asarray(int_array, dtype=typeA) + assert typeA == typeB + assert typeA is not typeB + assert array_a.dtype is typeA + assert array_a is not np.asarray(array_a, dtype=typeB) + assert np.asarray(array_a, dtype=typeB).dtype is typeB + assert array_a is np.asarray(array_a, dtype=typeB).base + + +class TestSpecialAttributeLookupFailure: + # An exception was raised while fetching the attribute + + class WeirdArrayLike: + @property + def __array__(self, dtype=None, copy=None): # noqa: PLR0206 + raise RuntimeError("oops!") + + class WeirdArrayInterface: + @property + def __array_interface__(self): + raise RuntimeError("oops!") + + def test_deprecated(self): + with pytest.raises(RuntimeError): + np.array(self.WeirdArrayLike()) + with pytest.raises(RuntimeError): + np.array(self.WeirdArrayInterface()) + + +def test_subarray_from_array_construction(): + # Arrays are more complex, since they "broadcast" on success: + arr = np.array([1, 2]) + + res = arr.astype("2i") + assert_array_equal(res, [[1, 1], [2, 2]]) + + res = np.array(arr, dtype="(2,)i") + + assert_array_equal(res, [[1, 1], [2, 2]]) + + res = np.array([[(1,), (2,)], arr], dtype="2i") + assert_array_equal(res, [[[1, 1], [2, 2]], [[1, 1], [2, 2]]]) + + # Also try a multi-dimensional example: + arr = np.arange(5 * 2).reshape(5, 2) + expected = np.broadcast_to(arr[:, :, np.newaxis, np.newaxis], (5, 2, 2, 2)) + + res = arr.astype("(2,2)f") + assert_array_equal(res, expected) + + res = np.array(arr, dtype="(2,2)f") + assert_array_equal(res, expected) + + +def test_empty_string(): + # Empty strings are unfortunately often converted to S1 and we need to + # make sure we are filling the S1 and not the (possibly) detected S0 + # result. This should likely just return S0 and if not maybe the decision + # to return S1 should be moved. + res = np.array([""] * 10, dtype="S") + assert_array_equal(res, np.array("\0", "S1")) + assert res.dtype == "S1" + + arr = np.array([""] * 10, dtype=object) + + res = arr.astype("S") + assert_array_equal(res, b"") + assert res.dtype == "S1" + + res = np.array(arr, dtype="S") + assert_array_equal(res, b"") + # TODO: This is arguably weird/wrong, but seems old: + assert res.dtype == f"S{np.dtype('O').itemsize}" + + res = np.array([[""] * 10, arr], dtype="S") + assert_array_equal(res, b"") + assert res.shape == (2, 10) + assert res.dtype == "S1" diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_array_interface.py b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_array_interface.py new file mode 100644 index 0000000..afb19f4 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_array_interface.py @@ -0,0 +1,222 @@ +import sys +import sysconfig + +import pytest + +import numpy as np +from numpy.testing import IS_EDITABLE, IS_WASM, extbuild + + +@pytest.fixture +def get_module(tmp_path): + """ Some codes to generate data and manage temporary buffers use when + sharing with numpy via the array interface protocol. + """ + if sys.platform.startswith('cygwin'): + pytest.skip('link fails on cygwin') + if IS_WASM: + pytest.skip("Can't build module inside Wasm") + if IS_EDITABLE: + pytest.skip("Can't build module for editable install") + + prologue = ''' + #include + #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION + #include + #include + #include + + NPY_NO_EXPORT + void delete_array_struct(PyObject *cap) { + + /* get the array interface structure */ + PyArrayInterface *inter = (PyArrayInterface*) + PyCapsule_GetPointer(cap, NULL); + + /* get the buffer by which data was shared */ + double *ptr = (double*)PyCapsule_GetContext(cap); + + /* for the purposes of the regression test set the elements + to nan */ + for (npy_intp i = 0; i < inter->shape[0]; ++i) + ptr[i] = nan(""); + + /* free the shared buffer */ + free(ptr); + + /* free the array interface structure */ + free(inter->shape); + free(inter); + + fprintf(stderr, "delete_array_struct\\ncap = %ld inter = %ld" + " ptr = %ld\\n", (long)cap, (long)inter, (long)ptr); + } + ''' + + functions = [ + ("new_array_struct", "METH_VARARGS", """ + + long long n_elem = 0; + double value = 0.0; + + if (!PyArg_ParseTuple(args, "Ld", &n_elem, &value)) { + Py_RETURN_NONE; + } + + /* allocate and initialize the data to share with numpy */ + long long n_bytes = n_elem*sizeof(double); + double *data = (double*)malloc(n_bytes); + + if (!data) { + PyErr_Format(PyExc_MemoryError, + "Failed to malloc %lld bytes", n_bytes); + + Py_RETURN_NONE; + } + + for (long long i = 0; i < n_elem; ++i) { + data[i] = value; + } + + /* calculate the shape and stride */ + int nd = 1; + + npy_intp *ss = (npy_intp*)malloc(2*nd*sizeof(npy_intp)); + npy_intp *shape = ss; + npy_intp *stride = ss + nd; + + shape[0] = n_elem; + stride[0] = sizeof(double); + + /* construct the array interface */ + PyArrayInterface *inter = (PyArrayInterface*) + malloc(sizeof(PyArrayInterface)); + + memset(inter, 0, sizeof(PyArrayInterface)); + + inter->two = 2; + inter->nd = nd; + inter->typekind = 'f'; + inter->itemsize = sizeof(double); + inter->shape = shape; + inter->strides = stride; + inter->data = data; + inter->flags = NPY_ARRAY_WRITEABLE | NPY_ARRAY_NOTSWAPPED | + NPY_ARRAY_ALIGNED | NPY_ARRAY_C_CONTIGUOUS; + + /* package into a capsule */ + PyObject *cap = PyCapsule_New(inter, NULL, delete_array_struct); + + /* save the pointer to the data */ + PyCapsule_SetContext(cap, data); + + fprintf(stderr, "new_array_struct\\ncap = %ld inter = %ld" + " ptr = %ld\\n", (long)cap, (long)inter, (long)data); + + return cap; + """) + ] + + more_init = "import_array();" + + try: + import array_interface_testing + return array_interface_testing + except ImportError: + pass + + # if it does not exist, build and load it + if sysconfig.get_platform() == "win-arm64": + pytest.skip("Meson unable to find MSVC linker on win-arm64") + return extbuild.build_and_import_extension('array_interface_testing', + functions, + prologue=prologue, + include_dirs=[np.get_include()], + build_dir=tmp_path, + more_init=more_init) + + +@pytest.mark.slow +def test_cstruct(get_module): + + class data_source: + """ + This class is for testing the timing of the PyCapsule destructor + invoked when numpy release its reference to the shared data as part of + the numpy array interface protocol. If the PyCapsule destructor is + called early the shared data is freed and invalid memory accesses will + occur. + """ + + def __init__(self, size, value): + self.size = size + self.value = value + + @property + def __array_struct__(self): + return get_module.new_array_struct(self.size, self.value) + + # write to the same stream as the C code + stderr = sys.__stderr__ + + # used to validate the shared data. + expected_value = -3.1415 + multiplier = -10000.0 + + # create some data to share with numpy via the array interface + # assign the data an expected value. + stderr.write(' ---- create an object to share data ---- \n') + buf = data_source(256, expected_value) + stderr.write(' ---- OK!\n\n') + + # share the data + stderr.write(' ---- share data via the array interface protocol ---- \n') + arr = np.array(buf, copy=False) + stderr.write(f'arr.__array_interface___ = {str(arr.__array_interface__)}\n') + stderr.write(f'arr.base = {str(arr.base)}\n') + stderr.write(' ---- OK!\n\n') + + # release the source of the shared data. this will not release the data + # that was shared with numpy, that is done in the PyCapsule destructor. + stderr.write(' ---- destroy the object that shared data ---- \n') + buf = None + stderr.write(' ---- OK!\n\n') + + # check that we got the expected data. If the PyCapsule destructor we + # defined was prematurely called then this test will fail because our + # destructor sets the elements of the array to NaN before free'ing the + # buffer. Reading the values here may also cause a SEGV + assert np.allclose(arr, expected_value) + + # read the data. If the PyCapsule destructor we defined was prematurely + # called then reading the values here may cause a SEGV and will be reported + # as invalid reads by valgrind + stderr.write(' ---- read shared data ---- \n') + stderr.write(f'arr = {str(arr)}\n') + stderr.write(' ---- OK!\n\n') + + # write to the shared buffer. If the shared data was prematurely deleted + # this will may cause a SEGV and valgrind will report invalid writes + stderr.write(' ---- modify shared data ---- \n') + arr *= multiplier + expected_value *= multiplier + stderr.write(f'arr.__array_interface___ = {str(arr.__array_interface__)}\n') + stderr.write(f'arr.base = {str(arr.base)}\n') + stderr.write(' ---- OK!\n\n') + + # read the data. If the shared data was prematurely deleted this + # will may cause a SEGV and valgrind will report invalid reads + stderr.write(' ---- read modified shared data ---- \n') + stderr.write(f'arr = {str(arr)}\n') + stderr.write(' ---- OK!\n\n') + + # check that we got the expected data. If the PyCapsule destructor we + # defined was prematurely called then this test will fail because our + # destructor sets the elements of the array to NaN before free'ing the + # buffer. Reading the values here may also cause a SEGV + assert np.allclose(arr, expected_value) + + # free the shared data, the PyCapsule destructor should run here + stderr.write(' ---- free shared data ---- \n') + arr = None + stderr.write(' ---- OK!\n\n') diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_arraymethod.py b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_arraymethod.py new file mode 100644 index 0000000..d8baef7 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_arraymethod.py @@ -0,0 +1,84 @@ +""" +This file tests the generic aspects of ArrayMethod. At the time of writing +this is private API, but when added, public API may be added here. +""" + +import types +from typing import Any + +import pytest +from numpy._core._multiarray_umath import _get_castingimpl as get_castingimpl + +import numpy as np + + +class TestResolveDescriptors: + # Test mainly error paths of the resolve_descriptors function, + # note that the `casting_unittests` tests exercise this non-error paths. + + # Casting implementations are the main/only current user: + method = get_castingimpl(type(np.dtype("d")), type(np.dtype("f"))) + + @pytest.mark.parametrize("args", [ + (True,), # Not a tuple. + ((None,)), # Too few elements + ((None, None, None),), # Too many + ((None, None),), # Input dtype is None, which is invalid. + ((np.dtype("d"), True),), # Output dtype is not a dtype + ((np.dtype("f"), None),), # Input dtype does not match method + ]) + def test_invalid_arguments(self, args): + with pytest.raises(TypeError): + self.method._resolve_descriptors(*args) + + +class TestSimpleStridedCall: + # Test mainly error paths of the resolve_descriptors function, + # note that the `casting_unittests` tests exercise this non-error paths. + + # Casting implementations are the main/only current user: + method = get_castingimpl(type(np.dtype("d")), type(np.dtype("f"))) + + @pytest.mark.parametrize(["args", "error"], [ + ((True,), TypeError), # Not a tuple + (((None,),), TypeError), # Too few elements + ((None, None), TypeError), # Inputs are not arrays. + (((None, None, None),), TypeError), # Too many + (((np.arange(3), np.arange(3)),), TypeError), # Incorrect dtypes + (((np.ones(3, dtype=">d"), np.ones(3, dtype=" None: + """Test `ndarray.__class_getitem__`.""" + alias = cls[Any, Any] + assert isinstance(alias, types.GenericAlias) + assert alias.__origin__ is cls + + @pytest.mark.parametrize("arg_len", range(4)) + def test_subscript_tup(self, cls: type[np.ndarray], arg_len: int) -> None: + arg_tup = (Any,) * arg_len + if arg_len in (1, 2): + assert cls[arg_tup] + else: + match = f"Too {'few' if arg_len == 0 else 'many'} arguments" + with pytest.raises(TypeError, match=match): + cls[arg_tup] diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_arrayobject.py b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_arrayobject.py new file mode 100644 index 0000000..ffa1ba0 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_arrayobject.py @@ -0,0 +1,75 @@ +import pytest + +import numpy as np +from numpy.testing import assert_array_equal + + +def test_matrix_transpose_raises_error_for_1d(): + msg = "matrix transpose with ndim < 2 is undefined" + arr = np.arange(48) + with pytest.raises(ValueError, match=msg): + arr.mT + + +def test_matrix_transpose_equals_transpose_2d(): + arr = np.arange(48).reshape((6, 8)) + assert_array_equal(arr.T, arr.mT) + + +ARRAY_SHAPES_TO_TEST = ( + (5, 2), + (5, 2, 3), + (5, 2, 3, 4), +) + + +@pytest.mark.parametrize("shape", ARRAY_SHAPES_TO_TEST) +def test_matrix_transpose_equals_swapaxes(shape): + num_of_axes = len(shape) + vec = np.arange(shape[-1]) + arr = np.broadcast_to(vec, shape) + tgt = np.swapaxes(arr, num_of_axes - 2, num_of_axes - 1) + mT = arr.mT + assert_array_equal(tgt, mT) + + +class MyArr(np.ndarray): + def __array_wrap__(self, arr, context=None, return_scalar=None): + return super().__array_wrap__(arr, context, return_scalar) + + +class MyArrNoWrap(np.ndarray): + pass + + +@pytest.mark.parametrize("subclass_self", [np.ndarray, MyArr, MyArrNoWrap]) +@pytest.mark.parametrize("subclass_arr", [np.ndarray, MyArr, MyArrNoWrap]) +def test_array_wrap(subclass_self, subclass_arr): + # NumPy should allow `__array_wrap__` to be called on arrays, it's logic + # is designed in a way that: + # + # * Subclasses never return scalars by default (to preserve their + # information). They can choose to if they wish. + # * NumPy returns scalars, if `return_scalar` is passed as True to allow + # manual calls to `arr.__array_wrap__` to do the right thing. + # * The type of the input should be ignored (it should be a base-class + # array, but I am not sure this is guaranteed). + + arr = np.arange(3).view(subclass_self) + + arr0d = np.array(3, dtype=np.int8).view(subclass_arr) + # With third argument True, ndarray allows "decay" to scalar. + # (I don't think NumPy would pass `None`, but it seems clear to support) + if subclass_self is np.ndarray: + assert type(arr.__array_wrap__(arr0d, None, True)) is np.int8 + else: + assert type(arr.__array_wrap__(arr0d, None, True)) is type(arr) + + # Otherwise, result should be viewed as the subclass + assert type(arr.__array_wrap__(arr0d)) is type(arr) + assert type(arr.__array_wrap__(arr0d, None, None)) is type(arr) + assert type(arr.__array_wrap__(arr0d, None, False)) is type(arr) + + # Non 0-D array can't be converted to scalar, so we ignore that + arr1d = np.array([3], dtype=np.int8).view(subclass_arr) + assert type(arr.__array_wrap__(arr1d, None, True)) is type(arr) diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_arrayprint.py b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_arrayprint.py new file mode 100644 index 0000000..1fd4ac2 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_arrayprint.py @@ -0,0 +1,1328 @@ +import gc +import sys +import textwrap + +import pytest +from hypothesis import given +from hypothesis.extra import numpy as hynp + +import numpy as np +from numpy._core.arrayprint import _typelessdata +from numpy.testing import ( + HAS_REFCOUNT, + IS_WASM, + assert_, + assert_equal, + assert_raises, + assert_raises_regex, + assert_warns, +) +from numpy.testing._private.utils import run_threaded + + +class TestArrayRepr: + def test_nan_inf(self): + x = np.array([np.nan, np.inf]) + assert_equal(repr(x), 'array([nan, inf])') + + def test_subclass(self): + class sub(np.ndarray): + pass + + # one dimensional + x1d = np.array([1, 2]).view(sub) + assert_equal(repr(x1d), 'sub([1, 2])') + + # two dimensional + x2d = np.array([[1, 2], [3, 4]]).view(sub) + assert_equal(repr(x2d), + 'sub([[1, 2],\n' + ' [3, 4]])') + + # two dimensional with flexible dtype + xstruct = np.ones((2, 2), dtype=[('a', ' 1) + y = sub(None) + x[()] = y + y[()] = x + assert_equal(repr(x), + 'sub(sub(sub(..., dtype=object), dtype=object), dtype=object)') + assert_equal(str(x), '...') + x[()] = 0 # resolve circular references for garbage collector + + # nested 0d-subclass-object + x = sub(None) + x[()] = sub(None) + assert_equal(repr(x), 'sub(sub(None, dtype=object), dtype=object)') + assert_equal(str(x), 'None') + + # gh-10663 + class DuckCounter(np.ndarray): + def __getitem__(self, item): + result = super().__getitem__(item) + if not isinstance(result, DuckCounter): + result = result[...].view(DuckCounter) + return result + + def to_string(self): + return {0: 'zero', 1: 'one', 2: 'two'}.get(self.item(), 'many') + + def __str__(self): + if self.shape == (): + return self.to_string() + else: + fmt = {'all': lambda x: x.to_string()} + return np.array2string(self, formatter=fmt) + + dc = np.arange(5).view(DuckCounter) + assert_equal(str(dc), "[zero one two many many]") + assert_equal(str(dc[0]), "zero") + + def test_self_containing(self): + arr0d = np.array(None) + arr0d[()] = arr0d + assert_equal(repr(arr0d), + 'array(array(..., dtype=object), dtype=object)') + arr0d[()] = 0 # resolve recursion for garbage collector + + arr1d = np.array([None, None]) + arr1d[1] = arr1d + assert_equal(repr(arr1d), + 'array([None, array(..., dtype=object)], dtype=object)') + arr1d[1] = 0 # resolve recursion for garbage collector + + first = np.array(None) + second = np.array(None) + first[()] = second + second[()] = first + assert_equal(repr(first), + 'array(array(array(..., dtype=object), dtype=object), dtype=object)') + first[()] = 0 # resolve circular references for garbage collector + + def test_containing_list(self): + # printing square brackets directly would be ambiguous + arr1d = np.array([None, None]) + arr1d[0] = [1, 2] + arr1d[1] = [3] + assert_equal(repr(arr1d), + 'array([list([1, 2]), list([3])], dtype=object)') + + def test_void_scalar_recursion(self): + # gh-9345 + repr(np.void(b'test')) # RecursionError ? + + def test_fieldless_structured(self): + # gh-10366 + no_fields = np.dtype([]) + arr_no_fields = np.empty(4, dtype=no_fields) + assert_equal(repr(arr_no_fields), 'array([(), (), (), ()], dtype=[])') + + +class TestComplexArray: + def test_str(self): + rvals = [0, 1, -1, np.inf, -np.inf, np.nan] + cvals = [complex(rp, ip) for rp in rvals for ip in rvals] + dtypes = [np.complex64, np.cdouble, np.clongdouble] + actual = [str(np.array([c], dt)) for c in cvals for dt in dtypes] + wanted = [ + '[0.+0.j]', '[0.+0.j]', '[0.+0.j]', + '[0.+1.j]', '[0.+1.j]', '[0.+1.j]', + '[0.-1.j]', '[0.-1.j]', '[0.-1.j]', + '[0.+infj]', '[0.+infj]', '[0.+infj]', + '[0.-infj]', '[0.-infj]', '[0.-infj]', + '[0.+nanj]', '[0.+nanj]', '[0.+nanj]', + '[1.+0.j]', '[1.+0.j]', '[1.+0.j]', + '[1.+1.j]', '[1.+1.j]', '[1.+1.j]', + '[1.-1.j]', '[1.-1.j]', '[1.-1.j]', + '[1.+infj]', '[1.+infj]', '[1.+infj]', + '[1.-infj]', '[1.-infj]', '[1.-infj]', + '[1.+nanj]', '[1.+nanj]', '[1.+nanj]', + '[-1.+0.j]', '[-1.+0.j]', '[-1.+0.j]', + '[-1.+1.j]', '[-1.+1.j]', '[-1.+1.j]', + '[-1.-1.j]', '[-1.-1.j]', '[-1.-1.j]', + '[-1.+infj]', '[-1.+infj]', '[-1.+infj]', + '[-1.-infj]', '[-1.-infj]', '[-1.-infj]', + '[-1.+nanj]', '[-1.+nanj]', '[-1.+nanj]', + '[inf+0.j]', '[inf+0.j]', '[inf+0.j]', + '[inf+1.j]', '[inf+1.j]', '[inf+1.j]', + '[inf-1.j]', '[inf-1.j]', '[inf-1.j]', + '[inf+infj]', '[inf+infj]', '[inf+infj]', + '[inf-infj]', '[inf-infj]', '[inf-infj]', + '[inf+nanj]', '[inf+nanj]', '[inf+nanj]', + '[-inf+0.j]', '[-inf+0.j]', '[-inf+0.j]', + '[-inf+1.j]', '[-inf+1.j]', '[-inf+1.j]', + '[-inf-1.j]', '[-inf-1.j]', '[-inf-1.j]', + '[-inf+infj]', '[-inf+infj]', '[-inf+infj]', + '[-inf-infj]', '[-inf-infj]', '[-inf-infj]', + '[-inf+nanj]', '[-inf+nanj]', '[-inf+nanj]', + '[nan+0.j]', '[nan+0.j]', '[nan+0.j]', + '[nan+1.j]', '[nan+1.j]', '[nan+1.j]', + '[nan-1.j]', '[nan-1.j]', '[nan-1.j]', + '[nan+infj]', '[nan+infj]', '[nan+infj]', + '[nan-infj]', '[nan-infj]', '[nan-infj]', + '[nan+nanj]', '[nan+nanj]', '[nan+nanj]'] + + for res, val in zip(actual, wanted): + assert_equal(res, val) + +class TestArray2String: + def test_basic(self): + """Basic test of array2string.""" + a = np.arange(3) + assert_(np.array2string(a) == '[0 1 2]') + assert_(np.array2string(a, max_line_width=4, legacy='1.13') == '[0 1\n 2]') + assert_(np.array2string(a, max_line_width=4) == '[0\n 1\n 2]') + + def test_unexpected_kwarg(self): + # ensure than an appropriate TypeError + # is raised when array2string receives + # an unexpected kwarg + + with assert_raises_regex(TypeError, 'nonsense'): + np.array2string(np.array([1, 2, 3]), + nonsense=None) + + def test_format_function(self): + """Test custom format function for each element in array.""" + def _format_function(x): + if np.abs(x) < 1: + return '.' + elif np.abs(x) < 2: + return 'o' + else: + return 'O' + + x = np.arange(3) + x_hex = "[0x0 0x1 0x2]" + x_oct = "[0o0 0o1 0o2]" + assert_(np.array2string(x, formatter={'all': _format_function}) == + "[. o O]") + assert_(np.array2string(x, formatter={'int_kind': _format_function}) == + "[. o O]") + assert_(np.array2string(x, formatter={'all': lambda x: f"{x:.4f}"}) == + "[0.0000 1.0000 2.0000]") + assert_equal(np.array2string(x, formatter={'int': hex}), + x_hex) + assert_equal(np.array2string(x, formatter={'int': oct}), + x_oct) + + x = np.arange(3.) + assert_(np.array2string(x, formatter={'float_kind': lambda x: f"{x:.2f}"}) == + "[0.00 1.00 2.00]") + assert_(np.array2string(x, formatter={'float': lambda x: f"{x:.2f}"}) == + "[0.00 1.00 2.00]") + + s = np.array(['abc', 'def']) + assert_(np.array2string(s, formatter={'numpystr': lambda s: s * 2}) == + '[abcabc defdef]') + + def test_structure_format_mixed(self): + dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))]) + x = np.array([('Sarah', (8.0, 7.0)), ('John', (6.0, 7.0))], dtype=dt) + assert_equal(np.array2string(x), + "[('Sarah', [8., 7.]) ('John', [6., 7.])]") + + np.set_printoptions(legacy='1.13') + try: + # for issue #5692 + A = np.zeros(shape=10, dtype=[("A", "M8[s]")]) + A[5:].fill(np.datetime64('NaT')) + assert_equal( + np.array2string(A), + textwrap.dedent("""\ + [('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) + ('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) ('NaT',) ('NaT',) + ('NaT',) ('NaT',) ('NaT',)]""") + ) + finally: + np.set_printoptions(legacy=False) + + # same again, but with non-legacy behavior + assert_equal( + np.array2string(A), + textwrap.dedent("""\ + [('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) + ('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) + ('1970-01-01T00:00:00',) ( 'NaT',) + ( 'NaT',) ( 'NaT',) + ( 'NaT',) ( 'NaT',)]""") + ) + + # and again, with timedeltas + A = np.full(10, 123456, dtype=[("A", "m8[s]")]) + A[5:].fill(np.datetime64('NaT')) + assert_equal( + np.array2string(A), + textwrap.dedent("""\ + [(123456,) (123456,) (123456,) (123456,) (123456,) ( 'NaT',) ( 'NaT',) + ( 'NaT',) ( 'NaT',) ( 'NaT',)]""") + ) + + def test_structure_format_int(self): + # See #8160 + struct_int = np.array([([1, -1],), ([123, 1],)], + dtype=[('B', 'i4', 2)]) + assert_equal(np.array2string(struct_int), + "[([ 1, -1],) ([123, 1],)]") + struct_2dint = np.array([([[0, 1], [2, 3]],), ([[12, 0], [0, 0]],)], + dtype=[('B', 'i4', (2, 2))]) + assert_equal(np.array2string(struct_2dint), + "[([[ 0, 1], [ 2, 3]],) ([[12, 0], [ 0, 0]],)]") + + def test_structure_format_float(self): + # See #8172 + array_scalar = np.array( + (1., 2.1234567890123456789, 3.), dtype=('f8,f8,f8')) + assert_equal(np.array2string(array_scalar), "(1., 2.12345679, 3.)") + + def test_unstructured_void_repr(self): + a = np.array([27, 91, 50, 75, 7, 65, 10, 8, 27, 91, 51, 49, 109, 82, 101, 100], + dtype='u1').view('V8') + assert_equal(repr(a[0]), + r"np.void(b'\x1B\x5B\x32\x4B\x07\x41\x0A\x08')") + assert_equal(str(a[0]), r"b'\x1B\x5B\x32\x4B\x07\x41\x0A\x08'") + assert_equal(repr(a), + r"array([b'\x1B\x5B\x32\x4B\x07\x41\x0A\x08'," + "\n" + r" b'\x1B\x5B\x33\x31\x6D\x52\x65\x64'], dtype='|V8')") + + assert_equal(eval(repr(a), vars(np)), a) + assert_equal(eval(repr(a[0]), {'np': np}), a[0]) + + def test_edgeitems_kwarg(self): + # previously the global print options would be taken over the kwarg + arr = np.zeros(3, int) + assert_equal( + np.array2string(arr, edgeitems=1, threshold=0), + "[0 ... 0]" + ) + + def test_summarize_1d(self): + A = np.arange(1001) + strA = '[ 0 1 2 ... 998 999 1000]' + assert_equal(str(A), strA) + + reprA = 'array([ 0, 1, 2, ..., 998, 999, 1000])' + try: + np.set_printoptions(legacy='2.1') + assert_equal(repr(A), reprA) + finally: + np.set_printoptions(legacy=False) + + assert_equal(repr(A), reprA.replace(')', ', shape=(1001,))')) + + def test_summarize_2d(self): + A = np.arange(1002).reshape(2, 501) + strA = '[[ 0 1 2 ... 498 499 500]\n' \ + ' [ 501 502 503 ... 999 1000 1001]]' + assert_equal(str(A), strA) + + reprA = 'array([[ 0, 1, 2, ..., 498, 499, 500],\n' \ + ' [ 501, 502, 503, ..., 999, 1000, 1001]])' + try: + np.set_printoptions(legacy='2.1') + assert_equal(repr(A), reprA) + finally: + np.set_printoptions(legacy=False) + + assert_equal(repr(A), reprA.replace(')', ', shape=(2, 501))')) + + def test_summarize_2d_dtype(self): + A = np.arange(1002, dtype='i2').reshape(2, 501) + strA = '[[ 0 1 2 ... 498 499 500]\n' \ + ' [ 501 502 503 ... 999 1000 1001]]' + assert_equal(str(A), strA) + + reprA = ('array([[ 0, 1, 2, ..., 498, 499, 500],\n' + ' [ 501, 502, 503, ..., 999, 1000, 1001]],\n' + ' shape=(2, 501), dtype=int16)') + assert_equal(repr(A), reprA) + + def test_summarize_structure(self): + A = (np.arange(2002, dtype="i8", (2, 1001))]) + strB = "[([[1, 1, 1, ..., 1, 1, 1], [1, 1, 1, ..., 1, 1, 1]],)]" + assert_equal(str(B), strB) + + reprB = ( + "array([([[1, 1, 1, ..., 1, 1, 1], [1, 1, 1, ..., 1, 1, 1]],)],\n" + " dtype=[('i', '>i8', (2, 1001))])" + ) + assert_equal(repr(B), reprB) + + C = (np.arange(22, dtype=" 1: + # if the type is >1 byte, the non-native endian version + # must show endianness. + assert non_native_repr != native_repr + assert f"dtype='{non_native_dtype.byteorder}" in non_native_repr + + def test_linewidth_repr(self): + a = np.full(7, fill_value=2) + np.set_printoptions(linewidth=17) + assert_equal( + repr(a), + textwrap.dedent("""\ + array([2, 2, 2, + 2, 2, 2, + 2])""") + ) + np.set_printoptions(linewidth=17, legacy='1.13') + assert_equal( + repr(a), + textwrap.dedent("""\ + array([2, 2, 2, + 2, 2, 2, 2])""") + ) + + a = np.full(8, fill_value=2) + + np.set_printoptions(linewidth=18, legacy=False) + assert_equal( + repr(a), + textwrap.dedent("""\ + array([2, 2, 2, + 2, 2, 2, + 2, 2])""") + ) + + np.set_printoptions(linewidth=18, legacy='1.13') + assert_equal( + repr(a), + textwrap.dedent("""\ + array([2, 2, 2, 2, + 2, 2, 2, 2])""") + ) + + def test_linewidth_str(self): + a = np.full(18, fill_value=2) + np.set_printoptions(linewidth=18) + assert_equal( + str(a), + textwrap.dedent("""\ + [2 2 2 2 2 2 2 2 + 2 2 2 2 2 2 2 2 + 2 2]""") + ) + np.set_printoptions(linewidth=18, legacy='1.13') + assert_equal( + str(a), + textwrap.dedent("""\ + [2 2 2 2 2 2 2 2 2 + 2 2 2 2 2 2 2 2 2]""") + ) + + def test_edgeitems(self): + np.set_printoptions(edgeitems=1, threshold=1) + a = np.arange(27).reshape((3, 3, 3)) + assert_equal( + repr(a), + textwrap.dedent("""\ + array([[[ 0, ..., 2], + ..., + [ 6, ..., 8]], + + ..., + + [[18, ..., 20], + ..., + [24, ..., 26]]], shape=(3, 3, 3))""") + ) + + b = np.zeros((3, 3, 1, 1)) + assert_equal( + repr(b), + textwrap.dedent("""\ + array([[[[0.]], + + ..., + + [[0.]]], + + + ..., + + + [[[0.]], + + ..., + + [[0.]]]], shape=(3, 3, 1, 1))""") + ) + + # 1.13 had extra trailing spaces, and was missing newlines + try: + np.set_printoptions(legacy='1.13') + assert_equal(repr(a), ( + "array([[[ 0, ..., 2],\n" + " ..., \n" + " [ 6, ..., 8]],\n" + "\n" + " ..., \n" + " [[18, ..., 20],\n" + " ..., \n" + " [24, ..., 26]]])") + ) + assert_equal(repr(b), ( + "array([[[[ 0.]],\n" + "\n" + " ..., \n" + " [[ 0.]]],\n" + "\n" + "\n" + " ..., \n" + " [[[ 0.]],\n" + "\n" + " ..., \n" + " [[ 0.]]]])") + ) + finally: + np.set_printoptions(legacy=False) + + def test_edgeitems_structured(self): + np.set_printoptions(edgeitems=1, threshold=1) + A = np.arange(5 * 2 * 3, dtype="f4')])"), + (np.void(b'a'), r"void(b'\x61')", r"np.void(b'\x61')"), + ]) +def test_scalar_repr_special(scalar, legacy_repr, representation): + # Test NEP 51 scalar repr (and legacy option) for numeric types + assert repr(scalar) == representation + + with np.printoptions(legacy="1.25"): + assert repr(scalar) == legacy_repr + +def test_scalar_void_float_str(): + # Note that based on this currently we do not print the same as a tuple + # would, since the tuple would include the repr() inside for floats, but + # we do not do that. + scalar = np.void((1.0, 2.0), dtype=[('f0', 'f4')]) + assert str(scalar) == "(1.0, 2.0)" + +@pytest.mark.skipif(IS_WASM, reason="wasm doesn't support asyncio") +@pytest.mark.skipif(sys.version_info < (3, 11), + reason="asyncio.barrier was added in Python 3.11") +def test_printoptions_asyncio_safe(): + asyncio = pytest.importorskip("asyncio") + + b = asyncio.Barrier(2) + + async def legacy_113(): + np.set_printoptions(legacy='1.13', precision=12) + await b.wait() + po = np.get_printoptions() + assert po['legacy'] == '1.13' + assert po['precision'] == 12 + orig_linewidth = po['linewidth'] + with np.printoptions(linewidth=34, legacy='1.21'): + po = np.get_printoptions() + assert po['legacy'] == '1.21' + assert po['precision'] == 12 + assert po['linewidth'] == 34 + po = np.get_printoptions() + assert po['linewidth'] == orig_linewidth + assert po['legacy'] == '1.13' + assert po['precision'] == 12 + + async def legacy_125(): + np.set_printoptions(legacy='1.25', precision=7) + await b.wait() + po = np.get_printoptions() + assert po['legacy'] == '1.25' + assert po['precision'] == 7 + orig_linewidth = po['linewidth'] + with np.printoptions(linewidth=6, legacy='1.13'): + po = np.get_printoptions() + assert po['legacy'] == '1.13' + assert po['precision'] == 7 + assert po['linewidth'] == 6 + po = np.get_printoptions() + assert po['linewidth'] == orig_linewidth + assert po['legacy'] == '1.25' + assert po['precision'] == 7 + + async def main(): + await asyncio.gather(legacy_125(), legacy_125()) + + loop = asyncio.new_event_loop() + asyncio.run(main()) + loop.close() + +@pytest.mark.skipif(IS_WASM, reason="wasm doesn't support threads") +def test_multithreaded_array_printing(): + # the dragon4 implementation uses a static scratch space for performance + # reasons this test makes sure it is set up in a thread-safe manner + + run_threaded(TestPrintOptions().test_floatmode, 500) diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_casting_floatingpoint_errors.py b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_casting_floatingpoint_errors.py new file mode 100644 index 0000000..2f9c01f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_casting_floatingpoint_errors.py @@ -0,0 +1,154 @@ +import pytest +from pytest import param + +import numpy as np +from numpy.testing import IS_WASM + + +def values_and_dtypes(): + """ + Generate value+dtype pairs that generate floating point errors during + casts. The invalid casts to integers will generate "invalid" value + warnings, the float casts all generate "overflow". + + (The Python int/float paths don't need to get tested in all the same + situations, but it does not hurt.) + """ + # Casting to float16: + yield param(70000, "float16", id="int-to-f2") + yield param("70000", "float16", id="str-to-f2") + yield param(70000.0, "float16", id="float-to-f2") + yield param(np.longdouble(70000.), "float16", id="longdouble-to-f2") + yield param(np.float64(70000.), "float16", id="double-to-f2") + yield param(np.float32(70000.), "float16", id="float-to-f2") + # Casting to float32: + yield param(10**100, "float32", id="int-to-f4") + yield param(1e100, "float32", id="float-to-f2") + yield param(np.longdouble(1e300), "float32", id="longdouble-to-f2") + yield param(np.float64(1e300), "float32", id="double-to-f2") + # Casting to float64: + # If longdouble is double-double, its max can be rounded down to the double + # max. So we correct the double spacing (a bit weird, admittedly): + max_ld = np.finfo(np.longdouble).max + spacing = np.spacing(np.nextafter(np.finfo("f8").max, 0)) + if max_ld - spacing > np.finfo("f8").max: + yield param(np.finfo(np.longdouble).max, "float64", + id="longdouble-to-f8") + + # Cast to complex32: + yield param(2e300, "complex64", id="float-to-c8") + yield param(2e300 + 0j, "complex64", id="complex-to-c8") + yield param(2e300j, "complex64", id="complex-to-c8") + yield param(np.longdouble(2e300), "complex64", id="longdouble-to-c8") + + # Invalid float to integer casts: + with np.errstate(over="ignore"): + for to_dt in np.typecodes["AllInteger"]: + for value in [np.inf, np.nan]: + for from_dt in np.typecodes["AllFloat"]: + from_dt = np.dtype(from_dt) + from_val = from_dt.type(value) + + yield param(from_val, to_dt, id=f"{from_val}-to-{to_dt}") + + +def check_operations(dtype, value): + """ + There are many dedicated paths in NumPy which cast and should check for + floating point errors which occurred during those casts. + """ + if dtype.kind != 'i': + # These assignments use the stricter setitem logic: + def assignment(): + arr = np.empty(3, dtype=dtype) + arr[0] = value + + yield assignment + + def fill(): + arr = np.empty(3, dtype=dtype) + arr.fill(value) + + yield fill + + def copyto_scalar(): + arr = np.empty(3, dtype=dtype) + np.copyto(arr, value, casting="unsafe") + + yield copyto_scalar + + def copyto(): + arr = np.empty(3, dtype=dtype) + np.copyto(arr, np.array([value, value, value]), casting="unsafe") + + yield copyto + + def copyto_scalar_masked(): + arr = np.empty(3, dtype=dtype) + np.copyto(arr, value, casting="unsafe", + where=[True, False, True]) + + yield copyto_scalar_masked + + def copyto_masked(): + arr = np.empty(3, dtype=dtype) + np.copyto(arr, np.array([value, value, value]), casting="unsafe", + where=[True, False, True]) + + yield copyto_masked + + def direct_cast(): + np.array([value, value, value]).astype(dtype) + + yield direct_cast + + def direct_cast_nd_strided(): + arr = np.full((5, 5, 5), fill_value=value)[:, ::2, :] + arr.astype(dtype) + + yield direct_cast_nd_strided + + def boolean_array_assignment(): + arr = np.empty(3, dtype=dtype) + arr[[True, False, True]] = np.array([value, value]) + + yield boolean_array_assignment + + def integer_array_assignment(): + arr = np.empty(3, dtype=dtype) + values = np.array([value, value]) + + arr[[0, 1]] = values + + yield integer_array_assignment + + def integer_array_assignment_with_subspace(): + arr = np.empty((5, 3), dtype=dtype) + values = np.array([value, value, value]) + + arr[[0, 2]] = values + + yield integer_array_assignment_with_subspace + + def flat_assignment(): + arr = np.empty((3,), dtype=dtype) + values = np.array([value, value, value]) + arr.flat[:] = values + + yield flat_assignment + +@pytest.mark.skipif(IS_WASM, reason="no wasm fp exception support") +@pytest.mark.parametrize(["value", "dtype"], values_and_dtypes()) +@pytest.mark.filterwarnings("ignore::numpy.exceptions.ComplexWarning") +def test_floatingpoint_errors_casting(dtype, value): + dtype = np.dtype(dtype) + for operation in check_operations(dtype, value): + dtype = np.dtype(dtype) + + match = "invalid" if dtype.kind in 'iu' else "overflow" + with pytest.warns(RuntimeWarning, match=match): + operation() + + with np.errstate(all="raise"): + with pytest.raises(FloatingPointError, match=match): + operation() diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_casting_unittests.py b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_casting_unittests.py new file mode 100644 index 0000000..f8441ea --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_casting_unittests.py @@ -0,0 +1,817 @@ +""" +The tests exercise the casting machinery in a more low-level manner. +The reason is mostly to test a new implementation of the casting machinery. + +Unlike most tests in NumPy, these are closer to unit-tests rather +than integration tests. +""" + +import ctypes +import enum +import random +import textwrap + +import pytest +from numpy._core._multiarray_umath import _get_castingimpl as get_castingimpl + +import numpy as np +from numpy.lib.stride_tricks import as_strided +from numpy.testing import assert_array_equal + +# Simple skips object, parametric and long double (unsupported by struct) +simple_dtypes = "?bhilqBHILQefdFD" +if np.dtype("l").itemsize != np.dtype("q").itemsize: + # Remove l and L, the table was generated with 64bit linux in mind. + simple_dtypes = simple_dtypes.replace("l", "").replace("L", "") +simple_dtypes = [type(np.dtype(c)) for c in simple_dtypes] + + +def simple_dtype_instances(): + for dtype_class in simple_dtypes: + dt = dtype_class() + yield pytest.param(dt, id=str(dt)) + if dt.byteorder != "|": + dt = dt.newbyteorder() + yield pytest.param(dt, id=str(dt)) + + +def get_expected_stringlength(dtype): + """Returns the string length when casting the basic dtypes to strings. + """ + if dtype == np.bool: + return 5 + if dtype.kind in "iu": + if dtype.itemsize == 1: + length = 3 + elif dtype.itemsize == 2: + length = 5 + elif dtype.itemsize == 4: + length = 10 + elif dtype.itemsize == 8: + length = 20 + else: + raise AssertionError(f"did not find expected length for {dtype}") + + if dtype.kind == "i": + length += 1 # adds one character for the sign + + return length + + # Note: Can't do dtype comparison for longdouble on windows + if dtype.char == "g": + return 48 + elif dtype.char == "G": + return 48 * 2 + elif dtype.kind == "f": + return 32 # also for half apparently. + elif dtype.kind == "c": + return 32 * 2 + + raise AssertionError(f"did not find expected length for {dtype}") + + +class Casting(enum.IntEnum): + no = 0 + equiv = 1 + safe = 2 + same_kind = 3 + unsafe = 4 + + +def _get_cancast_table(): + table = textwrap.dedent(""" + X ? b h i l q B H I L Q e f d g F D G S U V O M m + ? # = = = = = = = = = = = = = = = = = = = = = . = + b . # = = = = . . . . . = = = = = = = = = = = . = + h . ~ # = = = . . . . . ~ = = = = = = = = = = . = + i . ~ ~ # = = . . . . . ~ ~ = = ~ = = = = = = . = + l . ~ ~ ~ # # . . . . . ~ ~ = = ~ = = = = = = . = + q . ~ ~ ~ # # . . . . . ~ ~ = = ~ = = = = = = . = + B . ~ = = = = # = = = = = = = = = = = = = = = . = + H . ~ ~ = = = ~ # = = = ~ = = = = = = = = = = . = + I . ~ ~ ~ = = ~ ~ # = = ~ ~ = = ~ = = = = = = . = + L . ~ ~ ~ ~ ~ ~ ~ ~ # # ~ ~ = = ~ = = = = = = . ~ + Q . ~ ~ ~ ~ ~ ~ ~ ~ # # ~ ~ = = ~ = = = = = = . ~ + e . . . . . . . . . . . # = = = = = = = = = = . . + f . . . . . . . . . . . ~ # = = = = = = = = = . . + d . . . . . . . . . . . ~ ~ # = ~ = = = = = = . . + g . . . . . . . . . . . ~ ~ ~ # ~ ~ = = = = = . . + F . . . . . . . . . . . . . . . # = = = = = = . . + D . . . . . . . . . . . . . . . ~ # = = = = = . . + G . . . . . . . . . . . . . . . ~ ~ # = = = = . . + S . . . . . . . . . . . . . . . . . . # = = = . . + U . . . . . . . . . . . . . . . . . . . # = = . . + V . . . . . . . . . . . . . . . . . . . . # = . . + O . . . . . . . . . . . . . . . . . . . . = # . . + M . . . . . . . . . . . . . . . . . . . . = = # . + m . . . . . . . . . . . . . . . . . . . . = = . # + """).strip().split("\n") + dtypes = [type(np.dtype(c)) for c in table[0][2::2]] + + convert_cast = {".": Casting.unsafe, "~": Casting.same_kind, + "=": Casting.safe, "#": Casting.equiv, + " ": -1} + + cancast = {} + for from_dt, row in zip(dtypes, table[1:]): + cancast[from_dt] = {} + for to_dt, c in zip(dtypes, row[2::2]): + cancast[from_dt][to_dt] = convert_cast[c] + + return cancast + + +CAST_TABLE = _get_cancast_table() + + +class TestChanges: + """ + These test cases exercise some behaviour changes + """ + @pytest.mark.parametrize("string", ["S", "U"]) + @pytest.mark.parametrize("floating", ["e", "f", "d", "g"]) + def test_float_to_string(self, floating, string): + assert np.can_cast(floating, string) + # 100 is long enough to hold any formatted floating + assert np.can_cast(floating, f"{string}100") + + def test_to_void(self): + # But in general, we do consider these safe: + assert np.can_cast("d", "V") + assert np.can_cast("S20", "V") + + # Do not consider it a safe cast if the void is too smaller: + assert not np.can_cast("d", "V1") + assert not np.can_cast("S20", "V1") + assert not np.can_cast("U1", "V1") + # Structured to unstructured is just like any other: + assert np.can_cast("d,i", "V", casting="same_kind") + # Unstructured void to unstructured is actually no cast at all: + assert np.can_cast("V3", "V", casting="no") + assert np.can_cast("V0", "V", casting="no") + + +class TestCasting: + size = 1500 # Best larger than NPY_LOWLEVEL_BUFFER_BLOCKSIZE * itemsize + + def get_data(self, dtype1, dtype2): + if dtype2 is None or dtype1.itemsize >= dtype2.itemsize: + length = self.size // dtype1.itemsize + else: + length = self.size // dtype2.itemsize + + # Assume that the base array is well enough aligned for all inputs. + arr1 = np.empty(length, dtype=dtype1) + assert arr1.flags.c_contiguous + assert arr1.flags.aligned + + values = [random.randrange(-128, 128) for _ in range(length)] + + for i, value in enumerate(values): + # Use item assignment to ensure this is not using casting: + if value < 0 and dtype1.kind == "u": + # Manually rollover unsigned integers (-1 -> int.max) + value = value + np.iinfo(dtype1).max + 1 + arr1[i] = value + + if dtype2 is None: + if dtype1.char == "?": + values = [bool(v) for v in values] + return arr1, values + + if dtype2.char == "?": + values = [bool(v) for v in values] + + arr2 = np.empty(length, dtype=dtype2) + assert arr2.flags.c_contiguous + assert arr2.flags.aligned + + for i, value in enumerate(values): + # Use item assignment to ensure this is not using casting: + if value < 0 and dtype2.kind == "u": + # Manually rollover unsigned integers (-1 -> int.max) + value = value + np.iinfo(dtype2).max + 1 + arr2[i] = value + + return arr1, arr2, values + + def get_data_variation(self, arr1, arr2, aligned=True, contig=True): + """ + Returns a copy of arr1 that may be non-contiguous or unaligned, and a + matching array for arr2 (although not a copy). + """ + if contig: + stride1 = arr1.dtype.itemsize + stride2 = arr2.dtype.itemsize + elif aligned: + stride1 = 2 * arr1.dtype.itemsize + stride2 = 2 * arr2.dtype.itemsize + else: + stride1 = arr1.dtype.itemsize + 1 + stride2 = arr2.dtype.itemsize + 1 + + max_size1 = len(arr1) * 3 * arr1.dtype.itemsize + 1 + max_size2 = len(arr2) * 3 * arr2.dtype.itemsize + 1 + from_bytes = np.zeros(max_size1, dtype=np.uint8) + to_bytes = np.zeros(max_size2, dtype=np.uint8) + + # Sanity check that the above is large enough: + assert stride1 * len(arr1) <= from_bytes.nbytes + assert stride2 * len(arr2) <= to_bytes.nbytes + + if aligned: + new1 = as_strided(from_bytes[:-1].view(arr1.dtype), + arr1.shape, (stride1,)) + new2 = as_strided(to_bytes[:-1].view(arr2.dtype), + arr2.shape, (stride2,)) + else: + new1 = as_strided(from_bytes[1:].view(arr1.dtype), + arr1.shape, (stride1,)) + new2 = as_strided(to_bytes[1:].view(arr2.dtype), + arr2.shape, (stride2,)) + + new1[...] = arr1 + + if not contig: + # Ensure we did not overwrite bytes that should not be written: + offset = arr1.dtype.itemsize if aligned else 0 + buf = from_bytes[offset::stride1].tobytes() + assert buf.count(b"\0") == len(buf) + + if contig: + assert new1.flags.c_contiguous + assert new2.flags.c_contiguous + else: + assert not new1.flags.c_contiguous + assert not new2.flags.c_contiguous + + if aligned: + assert new1.flags.aligned + assert new2.flags.aligned + else: + assert not new1.flags.aligned or new1.dtype.alignment == 1 + assert not new2.flags.aligned or new2.dtype.alignment == 1 + + return new1, new2 + + @pytest.mark.parametrize("from_Dt", simple_dtypes) + def test_simple_cancast(self, from_Dt): + for to_Dt in simple_dtypes: + cast = get_castingimpl(from_Dt, to_Dt) + + for from_dt in [from_Dt(), from_Dt().newbyteorder()]: + default = cast._resolve_descriptors((from_dt, None))[1][1] + assert default == to_Dt() + del default + + for to_dt in [to_Dt(), to_Dt().newbyteorder()]: + casting, (from_res, to_res), view_off = ( + cast._resolve_descriptors((from_dt, to_dt))) + assert type(from_res) == from_Dt + assert type(to_res) == to_Dt + if view_off is not None: + # If a view is acceptable, this is "no" casting + # and byte order must be matching. + assert casting == Casting.no + # The above table lists this as "equivalent" + assert Casting.equiv == CAST_TABLE[from_Dt][to_Dt] + # Note that to_res may not be the same as from_dt + assert from_res.isnative == to_res.isnative + else: + if from_Dt == to_Dt: + # Note that to_res may not be the same as from_dt + assert from_res.isnative != to_res.isnative + assert casting == CAST_TABLE[from_Dt][to_Dt] + + if from_Dt is to_Dt: + assert from_dt is from_res + assert to_dt is to_res + + @pytest.mark.filterwarnings("ignore::numpy.exceptions.ComplexWarning") + @pytest.mark.parametrize("from_dt", simple_dtype_instances()) + def test_simple_direct_casts(self, from_dt): + """ + This test checks numeric direct casts for dtypes supported also by the + struct module (plus complex). It tries to be test a wide range of + inputs, but skips over possibly undefined behaviour (e.g. int rollover). + Longdouble and CLongdouble are tested, but only using double precision. + + If this test creates issues, it should possibly just be simplified + or even removed (checking whether unaligned/non-contiguous casts give + the same results is useful, though). + """ + for to_dt in simple_dtype_instances(): + to_dt = to_dt.values[0] + cast = get_castingimpl(type(from_dt), type(to_dt)) + + casting, (from_res, to_res), view_off = cast._resolve_descriptors( + (from_dt, to_dt)) + + if from_res is not from_dt or to_res is not to_dt: + # Do not test this case, it is handled in multiple steps, + # each of which should is tested individually. + return + + safe = casting <= Casting.safe + del from_res, to_res, casting + + arr1, arr2, values = self.get_data(from_dt, to_dt) + + cast._simple_strided_call((arr1, arr2)) + + # Check via python list + assert arr2.tolist() == values + + # Check that the same results are achieved for strided loops + arr1_o, arr2_o = self.get_data_variation(arr1, arr2, True, False) + cast._simple_strided_call((arr1_o, arr2_o)) + + assert_array_equal(arr2_o, arr2) + assert arr2_o.tobytes() == arr2.tobytes() + + # Check if alignment makes a difference, but only if supported + # and only if the alignment can be wrong + if ((from_dt.alignment == 1 and to_dt.alignment == 1) or + not cast._supports_unaligned): + return + + arr1_o, arr2_o = self.get_data_variation(arr1, arr2, False, True) + cast._simple_strided_call((arr1_o, arr2_o)) + + assert_array_equal(arr2_o, arr2) + assert arr2_o.tobytes() == arr2.tobytes() + + arr1_o, arr2_o = self.get_data_variation(arr1, arr2, False, False) + cast._simple_strided_call((arr1_o, arr2_o)) + + assert_array_equal(arr2_o, arr2) + assert arr2_o.tobytes() == arr2.tobytes() + + del arr1_o, arr2_o, cast + + @pytest.mark.parametrize("from_Dt", simple_dtypes) + def test_numeric_to_times(self, from_Dt): + # We currently only implement contiguous loops, so only need to + # test those. + from_dt = from_Dt() + + time_dtypes = [np.dtype("M8"), np.dtype("M8[ms]"), np.dtype("M8[4D]"), + np.dtype("m8"), np.dtype("m8[ms]"), np.dtype("m8[4D]")] + for time_dt in time_dtypes: + cast = get_castingimpl(type(from_dt), type(time_dt)) + + casting, (from_res, to_res), view_off = cast._resolve_descriptors( + (from_dt, time_dt)) + + assert from_res is from_dt + assert to_res is time_dt + del from_res, to_res + + assert casting & CAST_TABLE[from_Dt][type(time_dt)] + assert view_off is None + + int64_dt = np.dtype(np.int64) + arr1, arr2, values = self.get_data(from_dt, int64_dt) + arr2 = arr2.view(time_dt) + arr2[...] = np.datetime64("NaT") + + if time_dt == np.dtype("M8"): + # This is a bit of a strange path, and could probably be removed + arr1[-1] = 0 # ensure at least one value is not NaT + + # The cast currently succeeds, but the values are invalid: + cast._simple_strided_call((arr1, arr2)) + with pytest.raises(ValueError): + str(arr2[-1]) # e.g. conversion to string fails + return + + cast._simple_strided_call((arr1, arr2)) + + assert [int(v) for v in arr2.tolist()] == values + + # Check that the same results are achieved for strided loops + arr1_o, arr2_o = self.get_data_variation(arr1, arr2, True, False) + cast._simple_strided_call((arr1_o, arr2_o)) + + assert_array_equal(arr2_o, arr2) + assert arr2_o.tobytes() == arr2.tobytes() + + @pytest.mark.parametrize( + ["from_dt", "to_dt", "expected_casting", "expected_view_off", + "nom", "denom"], + [("M8[ns]", None, Casting.no, 0, 1, 1), + (str(np.dtype("M8[ns]").newbyteorder()), None, + Casting.equiv, None, 1, 1), + ("M8", "M8[ms]", Casting.safe, 0, 1, 1), + # should be invalid cast: + ("M8[ms]", "M8", Casting.unsafe, None, 1, 1), + ("M8[5ms]", "M8[5ms]", Casting.no, 0, 1, 1), + ("M8[ns]", "M8[ms]", Casting.same_kind, None, 1, 10**6), + ("M8[ms]", "M8[ns]", Casting.safe, None, 10**6, 1), + ("M8[ms]", "M8[7ms]", Casting.same_kind, None, 1, 7), + ("M8[4D]", "M8[1M]", Casting.same_kind, None, None, + # give full values based on NumPy 1.19.x + [-2**63, 0, -1, 1314, -1315, 564442610]), + ("m8[ns]", None, Casting.no, 0, 1, 1), + (str(np.dtype("m8[ns]").newbyteorder()), None, + Casting.equiv, None, 1, 1), + ("m8", "m8[ms]", Casting.safe, 0, 1, 1), + # should be invalid cast: + ("m8[ms]", "m8", Casting.unsafe, None, 1, 1), + ("m8[5ms]", "m8[5ms]", Casting.no, 0, 1, 1), + ("m8[ns]", "m8[ms]", Casting.same_kind, None, 1, 10**6), + ("m8[ms]", "m8[ns]", Casting.safe, None, 10**6, 1), + ("m8[ms]", "m8[7ms]", Casting.same_kind, None, 1, 7), + ("m8[4D]", "m8[1M]", Casting.unsafe, None, None, + # give full values based on NumPy 1.19.x + [-2**63, 0, 0, 1314, -1315, 564442610])]) + def test_time_to_time(self, from_dt, to_dt, + expected_casting, expected_view_off, + nom, denom): + from_dt = np.dtype(from_dt) + if to_dt is not None: + to_dt = np.dtype(to_dt) + + # Test a few values for casting (results generated with NumPy 1.19) + values = np.array([-2**63, 1, 2**63 - 1, 10000, -10000, 2**32]) + values = values.astype(np.dtype("int64").newbyteorder(from_dt.byteorder)) + assert values.dtype.byteorder == from_dt.byteorder + assert np.isnat(values.view(from_dt)[0]) + + DType = type(from_dt) + cast = get_castingimpl(DType, DType) + casting, (from_res, to_res), view_off = cast._resolve_descriptors( + (from_dt, to_dt)) + assert from_res is from_dt + assert to_res is to_dt or to_dt is None + assert casting == expected_casting + assert view_off == expected_view_off + + if nom is not None: + expected_out = (values * nom // denom).view(to_res) + expected_out[0] = "NaT" + else: + expected_out = np.empty_like(values) + expected_out[...] = denom + expected_out = expected_out.view(to_dt) + + orig_arr = values.view(from_dt) + orig_out = np.empty_like(expected_out) + + if casting == Casting.unsafe and (to_dt == "m8" or to_dt == "M8"): # noqa: PLR1714 + # Casting from non-generic to generic units is an error and should + # probably be reported as an invalid cast earlier. + with pytest.raises(ValueError): + cast._simple_strided_call((orig_arr, orig_out)) + return + + for aligned in [True, True]: + for contig in [True, True]: + arr, out = self.get_data_variation( + orig_arr, orig_out, aligned, contig) + out[...] = 0 + cast._simple_strided_call((arr, out)) + assert_array_equal(out.view("int64"), expected_out.view("int64")) + + def string_with_modified_length(self, dtype, change_length): + fact = 1 if dtype.char == "S" else 4 + length = dtype.itemsize // fact + change_length + return np.dtype(f"{dtype.byteorder}{dtype.char}{length}") + + @pytest.mark.parametrize("other_DT", simple_dtypes) + @pytest.mark.parametrize("string_char", ["S", "U"]) + def test_string_cancast(self, other_DT, string_char): + fact = 1 if string_char == "S" else 4 + + string_DT = type(np.dtype(string_char)) + cast = get_castingimpl(other_DT, string_DT) + + other_dt = other_DT() + expected_length = get_expected_stringlength(other_dt) + string_dt = np.dtype(f"{string_char}{expected_length}") + + safety, (res_other_dt, res_dt), view_off = cast._resolve_descriptors( + (other_dt, None)) + assert res_dt.itemsize == expected_length * fact + assert safety == Casting.safe # we consider to string casts "safe" + assert view_off is None + assert isinstance(res_dt, string_DT) + + # These casts currently implement changing the string length, so + # check the cast-safety for too long/fixed string lengths: + for change_length in [-1, 0, 1]: + if change_length >= 0: + expected_safety = Casting.safe + else: + expected_safety = Casting.same_kind + + to_dt = self.string_with_modified_length(string_dt, change_length) + safety, (_, res_dt), view_off = cast._resolve_descriptors( + (other_dt, to_dt)) + assert res_dt is to_dt + assert safety == expected_safety + assert view_off is None + + # The opposite direction is always considered unsafe: + cast = get_castingimpl(string_DT, other_DT) + + safety, _, view_off = cast._resolve_descriptors((string_dt, other_dt)) + assert safety == Casting.unsafe + assert view_off is None + + cast = get_castingimpl(string_DT, other_DT) + safety, (_, res_dt), view_off = cast._resolve_descriptors( + (string_dt, None)) + assert safety == Casting.unsafe + assert view_off is None + assert other_dt is res_dt # returns the singleton for simple dtypes + + @pytest.mark.parametrize("string_char", ["S", "U"]) + @pytest.mark.parametrize("other_dt", simple_dtype_instances()) + def test_simple_string_casts_roundtrip(self, other_dt, string_char): + """ + Tests casts from and to string by checking the roundtripping property. + + The test also covers some string to string casts (but not all). + + If this test creates issues, it should possibly just be simplified + or even removed (checking whether unaligned/non-contiguous casts give + the same results is useful, though). + """ + string_DT = type(np.dtype(string_char)) + + cast = get_castingimpl(type(other_dt), string_DT) + cast_back = get_castingimpl(string_DT, type(other_dt)) + _, (res_other_dt, string_dt), _ = cast._resolve_descriptors( + (other_dt, None)) + + if res_other_dt is not other_dt: + # do not support non-native byteorder, skip test in that case + assert other_dt.byteorder != res_other_dt.byteorder + return + + orig_arr, values = self.get_data(other_dt, None) + str_arr = np.zeros(len(orig_arr), dtype=string_dt) + string_dt_short = self.string_with_modified_length(string_dt, -1) + str_arr_short = np.zeros(len(orig_arr), dtype=string_dt_short) + string_dt_long = self.string_with_modified_length(string_dt, 1) + str_arr_long = np.zeros(len(orig_arr), dtype=string_dt_long) + + assert not cast._supports_unaligned # if support is added, should test + assert not cast_back._supports_unaligned + + for contig in [True, False]: + other_arr, str_arr = self.get_data_variation( + orig_arr, str_arr, True, contig) + _, str_arr_short = self.get_data_variation( + orig_arr, str_arr_short.copy(), True, contig) + _, str_arr_long = self.get_data_variation( + orig_arr, str_arr_long, True, contig) + + cast._simple_strided_call((other_arr, str_arr)) + + cast._simple_strided_call((other_arr, str_arr_short)) + assert_array_equal(str_arr.astype(string_dt_short), str_arr_short) + + cast._simple_strided_call((other_arr, str_arr_long)) + assert_array_equal(str_arr, str_arr_long) + + if other_dt.kind == "b": + # Booleans do not roundtrip + continue + + other_arr[...] = 0 + cast_back._simple_strided_call((str_arr, other_arr)) + assert_array_equal(orig_arr, other_arr) + + other_arr[...] = 0 + cast_back._simple_strided_call((str_arr_long, other_arr)) + assert_array_equal(orig_arr, other_arr) + + @pytest.mark.parametrize("other_dt", ["S8", "U8"]) + @pytest.mark.parametrize("string_char", ["S", "U"]) + def test_string_to_string_cancast(self, other_dt, string_char): + other_dt = np.dtype(other_dt) + + fact = 1 if string_char == "S" else 4 + div = 1 if other_dt.char == "S" else 4 + + string_DT = type(np.dtype(string_char)) + cast = get_castingimpl(type(other_dt), string_DT) + + expected_length = other_dt.itemsize // div + string_dt = np.dtype(f"{string_char}{expected_length}") + + safety, (res_other_dt, res_dt), view_off = cast._resolve_descriptors( + (other_dt, None)) + assert res_dt.itemsize == expected_length * fact + assert isinstance(res_dt, string_DT) + + expected_view_off = None + if other_dt.char == string_char: + if other_dt.isnative: + expected_safety = Casting.no + expected_view_off = 0 + else: + expected_safety = Casting.equiv + elif string_char == "U": + expected_safety = Casting.safe + else: + expected_safety = Casting.unsafe + + assert view_off == expected_view_off + assert expected_safety == safety + + for change_length in [-1, 0, 1]: + to_dt = self.string_with_modified_length(string_dt, change_length) + safety, (_, res_dt), view_off = cast._resolve_descriptors( + (other_dt, to_dt)) + + assert res_dt is to_dt + if change_length <= 0: + assert view_off == expected_view_off + else: + assert view_off is None + if expected_safety == Casting.unsafe: + assert safety == expected_safety + elif change_length < 0: + assert safety == Casting.same_kind + elif change_length == 0: + assert safety == expected_safety + elif change_length > 0: + assert safety == Casting.safe + + @pytest.mark.parametrize("order1", [">", "<"]) + @pytest.mark.parametrize("order2", [">", "<"]) + def test_unicode_byteswapped_cast(self, order1, order2): + # Very specific tests (not using the castingimpl directly) + # that tests unicode bytedwaps including for unaligned array data. + dtype1 = np.dtype(f"{order1}U30") + dtype2 = np.dtype(f"{order2}U30") + data1 = np.empty(30 * 4 + 1, dtype=np.uint8)[1:].view(dtype1) + data2 = np.empty(30 * 4 + 1, dtype=np.uint8)[1:].view(dtype2) + if dtype1.alignment != 1: + # alignment should always be >1, but skip the check if not + assert not data1.flags.aligned + assert not data2.flags.aligned + + element = "this is a ünicode string‽" + data1[()] = element + # Test both `data1` and `data1.copy()` (which should be aligned) + for data in [data1, data1.copy()]: + data2[...] = data1 + assert data2[()] == element + assert data2.copy()[()] == element + + def test_void_to_string_special_case(self): + # Cover a small special case in void to string casting that could + # probably just as well be turned into an error (compare + # `test_object_to_parametric_internal_error` below). + assert np.array([], dtype="V5").astype("S").dtype.itemsize == 5 + assert np.array([], dtype="V5").astype("U").dtype.itemsize == 4 * 5 + + def test_object_to_parametric_internal_error(self): + # We reject casting from object to a parametric type, without + # figuring out the correct instance first. + object_dtype = type(np.dtype(object)) + other_dtype = type(np.dtype(str)) + cast = get_castingimpl(object_dtype, other_dtype) + with pytest.raises(TypeError, + match="casting from object to the parametric DType"): + cast._resolve_descriptors((np.dtype("O"), None)) + + @pytest.mark.parametrize("dtype", simple_dtype_instances()) + def test_object_and_simple_resolution(self, dtype): + # Simple test to exercise the cast when no instance is specified + object_dtype = type(np.dtype(object)) + cast = get_castingimpl(object_dtype, type(dtype)) + + safety, (_, res_dt), view_off = cast._resolve_descriptors( + (np.dtype("O"), dtype)) + assert safety == Casting.unsafe + assert view_off is None + assert res_dt is dtype + + safety, (_, res_dt), view_off = cast._resolve_descriptors( + (np.dtype("O"), None)) + assert safety == Casting.unsafe + assert view_off is None + assert res_dt == dtype.newbyteorder("=") + + @pytest.mark.parametrize("dtype", simple_dtype_instances()) + def test_simple_to_object_resolution(self, dtype): + # Simple test to exercise the cast when no instance is specified + object_dtype = type(np.dtype(object)) + cast = get_castingimpl(type(dtype), object_dtype) + + safety, (_, res_dt), view_off = cast._resolve_descriptors( + (dtype, None)) + assert safety == Casting.safe + assert view_off is None + assert res_dt is np.dtype("O") + + @pytest.mark.parametrize("casting", ["no", "unsafe"]) + def test_void_and_structured_with_subarray(self, casting): + # test case corresponding to gh-19325 + dtype = np.dtype([("foo", " casts may succeed or fail, but a NULL'ed array must + # behave the same as one filled with None's. + arr_normal = np.array([None] * 5) + arr_NULLs = np.empty_like(arr_normal) + ctypes.memset(arr_NULLs.ctypes.data, 0, arr_NULLs.nbytes) + # If the check fails (maybe it should) the test would lose its purpose: + assert arr_NULLs.tobytes() == b"\x00" * arr_NULLs.nbytes + + try: + expected = arr_normal.astype(dtype) + except TypeError: + with pytest.raises(TypeError): + arr_NULLs.astype(dtype) + else: + assert_array_equal(expected, arr_NULLs.astype(dtype)) + + @pytest.mark.parametrize("dtype", + np.typecodes["AllInteger"] + np.typecodes["AllFloat"]) + def test_nonstandard_bool_to_other(self, dtype): + # simple test for casting bool_ to numeric types, which should not + # expose the detail that NumPy bools can sometimes take values other + # than 0 and 1. See also gh-19514. + nonstandard_bools = np.array([0, 3, -7], dtype=np.int8).view(bool) + res = nonstandard_bools.astype(dtype) + expected = [0, 1, 1] + assert_array_equal(res, expected) diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_conversion_utils.py b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_conversion_utils.py new file mode 100644 index 0000000..03ba339 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_conversion_utils.py @@ -0,0 +1,206 @@ +""" +Tests for numpy/_core/src/multiarray/conversion_utils.c +""" +import re + +import numpy._core._multiarray_tests as mt +import pytest + +from numpy._core.multiarray import CLIP, RAISE, WRAP +from numpy.testing import assert_raises + + +class StringConverterTestCase: + allow_bytes = True + case_insensitive = True + exact_match = False + warn = True + + def _check_value_error(self, val): + pattern = fr'\(got {re.escape(repr(val))}\)' + with pytest.raises(ValueError, match=pattern) as exc: + self.conv(val) + + def _check_conv_assert_warn(self, val, expected): + if self.warn: + with assert_raises(ValueError) as exc: + assert self.conv(val) == expected + else: + assert self.conv(val) == expected + + def _check(self, val, expected): + """Takes valid non-deprecated inputs for converters, + runs converters on inputs, checks correctness of outputs, + warnings and errors""" + assert self.conv(val) == expected + + if self.allow_bytes: + assert self.conv(val.encode('ascii')) == expected + else: + with pytest.raises(TypeError): + self.conv(val.encode('ascii')) + + if len(val) != 1: + if self.exact_match: + self._check_value_error(val[:1]) + self._check_value_error(val + '\0') + else: + self._check_conv_assert_warn(val[:1], expected) + + if self.case_insensitive: + if val != val.lower(): + self._check_conv_assert_warn(val.lower(), expected) + if val != val.upper(): + self._check_conv_assert_warn(val.upper(), expected) + else: + if val != val.lower(): + self._check_value_error(val.lower()) + if val != val.upper(): + self._check_value_error(val.upper()) + + def test_wrong_type(self): + # common cases which apply to all the below + with pytest.raises(TypeError): + self.conv({}) + with pytest.raises(TypeError): + self.conv([]) + + def test_wrong_value(self): + # nonsense strings + self._check_value_error('') + self._check_value_error('\N{greek small letter pi}') + + if self.allow_bytes: + self._check_value_error(b'') + # bytes which can't be converted to strings via utf8 + self._check_value_error(b"\xFF") + if self.exact_match: + self._check_value_error("there's no way this is supported") + + +class TestByteorderConverter(StringConverterTestCase): + """ Tests of PyArray_ByteorderConverter """ + conv = mt.run_byteorder_converter + warn = False + + def test_valid(self): + for s in ['big', '>']: + self._check(s, 'NPY_BIG') + for s in ['little', '<']: + self._check(s, 'NPY_LITTLE') + for s in ['native', '=']: + self._check(s, 'NPY_NATIVE') + for s in ['ignore', '|']: + self._check(s, 'NPY_IGNORE') + for s in ['swap']: + self._check(s, 'NPY_SWAP') + + +class TestSortkindConverter(StringConverterTestCase): + """ Tests of PyArray_SortkindConverter """ + conv = mt.run_sortkind_converter + warn = False + + def test_valid(self): + self._check('quicksort', 'NPY_QUICKSORT') + self._check('heapsort', 'NPY_HEAPSORT') + self._check('mergesort', 'NPY_STABLESORT') # alias + self._check('stable', 'NPY_STABLESORT') + + +class TestSelectkindConverter(StringConverterTestCase): + """ Tests of PyArray_SelectkindConverter """ + conv = mt.run_selectkind_converter + case_insensitive = False + exact_match = True + + def test_valid(self): + self._check('introselect', 'NPY_INTROSELECT') + + +class TestSearchsideConverter(StringConverterTestCase): + """ Tests of PyArray_SearchsideConverter """ + conv = mt.run_searchside_converter + + def test_valid(self): + self._check('left', 'NPY_SEARCHLEFT') + self._check('right', 'NPY_SEARCHRIGHT') + + +class TestOrderConverter(StringConverterTestCase): + """ Tests of PyArray_OrderConverter """ + conv = mt.run_order_converter + warn = False + + def test_valid(self): + self._check('c', 'NPY_CORDER') + self._check('f', 'NPY_FORTRANORDER') + self._check('a', 'NPY_ANYORDER') + self._check('k', 'NPY_KEEPORDER') + + def test_flatten_invalid_order(self): + # invalid after gh-14596 + with pytest.raises(ValueError): + self.conv('Z') + for order in [False, True, 0, 8]: + with pytest.raises(TypeError): + self.conv(order) + + +class TestClipmodeConverter(StringConverterTestCase): + """ Tests of PyArray_ClipmodeConverter """ + conv = mt.run_clipmode_converter + + def test_valid(self): + self._check('clip', 'NPY_CLIP') + self._check('wrap', 'NPY_WRAP') + self._check('raise', 'NPY_RAISE') + + # integer values allowed here + assert self.conv(CLIP) == 'NPY_CLIP' + assert self.conv(WRAP) == 'NPY_WRAP' + assert self.conv(RAISE) == 'NPY_RAISE' + + +class TestCastingConverter(StringConverterTestCase): + """ Tests of PyArray_CastingConverter """ + conv = mt.run_casting_converter + case_insensitive = False + exact_match = True + + def test_valid(self): + self._check("no", "NPY_NO_CASTING") + self._check("equiv", "NPY_EQUIV_CASTING") + self._check("safe", "NPY_SAFE_CASTING") + self._check("same_kind", "NPY_SAME_KIND_CASTING") + self._check("unsafe", "NPY_UNSAFE_CASTING") + + +class TestIntpConverter: + """ Tests of PyArray_IntpConverter """ + conv = mt.run_intp_converter + + def test_basic(self): + assert self.conv(1) == (1,) + assert self.conv((1, 2)) == (1, 2) + assert self.conv([1, 2]) == (1, 2) + assert self.conv(()) == () + + def test_none(self): + with pytest.raises(TypeError): + assert self.conv(None) == () + + def test_float(self): + with pytest.raises(TypeError): + self.conv(1.0) + with pytest.raises(TypeError): + self.conv([1, 1.0]) + + def test_too_large(self): + with pytest.raises(ValueError): + self.conv(2**64) + + def test_too_many_dims(self): + assert self.conv([1] * 64) == (1,) * 64 + with pytest.raises(ValueError): + self.conv([1] * 65) diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_cpu_dispatcher.py b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_cpu_dispatcher.py new file mode 100644 index 0000000..0a47685 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_cpu_dispatcher.py @@ -0,0 +1,49 @@ +from numpy._core._multiarray_umath import ( + __cpu_baseline__, + __cpu_dispatch__, + __cpu_features__, +) + +from numpy._core import _umath_tests +from numpy.testing import assert_equal + + +def test_dispatcher(): + """ + Testing the utilities of the CPU dispatcher + """ + targets = ( + "SSE2", "SSE41", "AVX2", + "VSX", "VSX2", "VSX3", + "NEON", "ASIMD", "ASIMDHP", + "VX", "VXE", "LSX" + ) + highest_sfx = "" # no suffix for the baseline + all_sfx = [] + for feature in reversed(targets): + # skip baseline features, by the default `CCompilerOpt` do not generate separated objects + # for the baseline, just one object combined all of them via 'baseline' option + # within the configuration statements. + if feature in __cpu_baseline__: + continue + # check compiler and running machine support + if feature not in __cpu_dispatch__ or not __cpu_features__[feature]: + continue + + if not highest_sfx: + highest_sfx = "_" + feature + all_sfx.append("func" + "_" + feature) + + test = _umath_tests.test_dispatch() + assert_equal(test["func"], "func" + highest_sfx) + assert_equal(test["var"], "var" + highest_sfx) + + if highest_sfx: + assert_equal(test["func_xb"], "func" + highest_sfx) + assert_equal(test["var_xb"], "var" + highest_sfx) + else: + assert_equal(test["func_xb"], "nobase") + assert_equal(test["var_xb"], "nobase") + + all_sfx.append("func") # add the baseline + assert_equal(test["all"], all_sfx) diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_cpu_features.py b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_cpu_features.py new file mode 100644 index 0000000..d1e3dc6 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_cpu_features.py @@ -0,0 +1,432 @@ +import os +import pathlib +import platform +import re +import subprocess +import sys + +import pytest +from numpy._core._multiarray_umath import ( + __cpu_baseline__, + __cpu_dispatch__, + __cpu_features__, +) + + +def assert_features_equal(actual, desired, fname): + __tracebackhide__ = True # Hide traceback for py.test + actual, desired = str(actual), str(desired) + if actual == desired: + return + detected = str(__cpu_features__).replace("'", "") + try: + with open("/proc/cpuinfo") as fd: + cpuinfo = fd.read(2048) + except Exception as err: + cpuinfo = str(err) + + try: + import subprocess + auxv = subprocess.check_output(['/bin/true'], env={"LD_SHOW_AUXV": "1"}) + auxv = auxv.decode() + except Exception as err: + auxv = str(err) + + import textwrap + error_report = textwrap.indent( +f""" +########################################### +### Extra debugging information +########################################### +------------------------------------------- +--- NumPy Detections +------------------------------------------- +{detected} +------------------------------------------- +--- SYS / CPUINFO +------------------------------------------- +{cpuinfo}.... +------------------------------------------- +--- SYS / AUXV +------------------------------------------- +{auxv} +""", prefix='\r') + + raise AssertionError(( + "Failure Detection\n" + " NAME: '%s'\n" + " ACTUAL: %s\n" + " DESIRED: %s\n" + "%s" + ) % (fname, actual, desired, error_report)) + +def _text_to_list(txt): + out = txt.strip("][\n").replace("'", "").split(', ') + return None if out[0] == "" else out + +class AbstractTest: + features = [] + features_groups = {} + features_map = {} + features_flags = set() + + def load_flags(self): + # a hook + pass + + def test_features(self): + self.load_flags() + for gname, features in self.features_groups.items(): + test_features = [self.cpu_have(f) for f in features] + assert_features_equal(__cpu_features__.get(gname), all(test_features), gname) + + for feature_name in self.features: + cpu_have = self.cpu_have(feature_name) + npy_have = __cpu_features__.get(feature_name) + assert_features_equal(npy_have, cpu_have, feature_name) + + def cpu_have(self, feature_name): + map_names = self.features_map.get(feature_name, feature_name) + if isinstance(map_names, str): + return map_names in self.features_flags + return any(f in self.features_flags for f in map_names) + + def load_flags_cpuinfo(self, magic_key): + self.features_flags = self.get_cpuinfo_item(magic_key) + + def get_cpuinfo_item(self, magic_key): + values = set() + with open('/proc/cpuinfo') as fd: + for line in fd: + if not line.startswith(magic_key): + continue + flags_value = [s.strip() for s in line.split(':', 1)] + if len(flags_value) == 2: + values = values.union(flags_value[1].upper().split()) + return values + + def load_flags_auxv(self): + auxv = subprocess.check_output(['/bin/true'], env={"LD_SHOW_AUXV": "1"}) + for at in auxv.split(b'\n'): + if not at.startswith(b"AT_HWCAP"): + continue + hwcap_value = [s.strip() for s in at.split(b':', 1)] + if len(hwcap_value) == 2: + self.features_flags = self.features_flags.union( + hwcap_value[1].upper().decode().split() + ) + +@pytest.mark.skipif( + sys.platform == 'emscripten', + reason=( + "The subprocess module is not available on WASM platforms and" + " therefore this test class cannot be properly executed." + ), +) +class TestEnvPrivation: + cwd = pathlib.Path(__file__).parent.resolve() + env = os.environ.copy() + _enable = os.environ.pop('NPY_ENABLE_CPU_FEATURES', None) + _disable = os.environ.pop('NPY_DISABLE_CPU_FEATURES', None) + SUBPROCESS_ARGS = {"cwd": cwd, "capture_output": True, "text": True, "check": True} + unavailable_feats = [ + feat for feat in __cpu_dispatch__ if not __cpu_features__[feat] + ] + UNAVAILABLE_FEAT = ( + None if len(unavailable_feats) == 0 + else unavailable_feats[0] + ) + BASELINE_FEAT = None if len(__cpu_baseline__) == 0 else __cpu_baseline__[0] + SCRIPT = """ +def main(): + from numpy._core._multiarray_umath import ( + __cpu_features__, + __cpu_dispatch__ + ) + + detected = [feat for feat in __cpu_dispatch__ if __cpu_features__[feat]] + print(detected) + +if __name__ == "__main__": + main() + """ + + @pytest.fixture(autouse=True) + def setup_class(self, tmp_path_factory): + file = tmp_path_factory.mktemp("runtime_test_script") + file /= "_runtime_detect.py" + file.write_text(self.SCRIPT) + self.file = file + + def _run(self): + return subprocess.run( + [sys.executable, self.file], + env=self.env, + **self.SUBPROCESS_ARGS, + ) + + # Helper function mimicking pytest.raises for subprocess call + def _expect_error( + self, + msg, + err_type, + no_error_msg="Failed to generate error" + ): + try: + self._run() + except subprocess.CalledProcessError as e: + assertion_message = f"Expected: {msg}\nGot: {e.stderr}" + assert re.search(msg, e.stderr), assertion_message + + assertion_message = ( + f"Expected error of type: {err_type}; see full " + f"error:\n{e.stderr}" + ) + assert re.search(err_type, e.stderr), assertion_message + else: + assert False, no_error_msg + + def setup_method(self): + """Ensure that the environment is reset""" + self.env = os.environ.copy() + + def test_runtime_feature_selection(self): + """ + Ensure that when selecting `NPY_ENABLE_CPU_FEATURES`, only the + features exactly specified are dispatched. + """ + + # Capture runtime-enabled features + out = self._run() + non_baseline_features = _text_to_list(out.stdout) + + if non_baseline_features is None: + pytest.skip( + "No dispatchable features outside of baseline detected." + ) + feature = non_baseline_features[0] + + # Capture runtime-enabled features when `NPY_ENABLE_CPU_FEATURES` is + # specified + self.env['NPY_ENABLE_CPU_FEATURES'] = feature + out = self._run() + enabled_features = _text_to_list(out.stdout) + + # Ensure that only one feature is enabled, and it is exactly the one + # specified by `NPY_ENABLE_CPU_FEATURES` + assert set(enabled_features) == {feature} + + if len(non_baseline_features) < 2: + pytest.skip("Only one non-baseline feature detected.") + # Capture runtime-enabled features when `NPY_ENABLE_CPU_FEATURES` is + # specified + self.env['NPY_ENABLE_CPU_FEATURES'] = ",".join(non_baseline_features) + out = self._run() + enabled_features = _text_to_list(out.stdout) + + # Ensure that both features are enabled, and they are exactly the ones + # specified by `NPY_ENABLE_CPU_FEATURES` + assert set(enabled_features) == set(non_baseline_features) + + @pytest.mark.parametrize("enabled, disabled", + [ + ("feature", "feature"), + ("feature", "same"), + ]) + def test_both_enable_disable_set(self, enabled, disabled): + """ + Ensure that when both environment variables are set then an + ImportError is thrown + """ + self.env['NPY_ENABLE_CPU_FEATURES'] = enabled + self.env['NPY_DISABLE_CPU_FEATURES'] = disabled + msg = "Both NPY_DISABLE_CPU_FEATURES and NPY_ENABLE_CPU_FEATURES" + err_type = "ImportError" + self._expect_error(msg, err_type) + + @pytest.mark.skipif( + not __cpu_dispatch__, + reason=( + "NPY_*_CPU_FEATURES only parsed if " + "`__cpu_dispatch__` is non-empty" + ) + ) + @pytest.mark.parametrize("action", ["ENABLE", "DISABLE"]) + def test_variable_too_long(self, action): + """ + Test that an error is thrown if the environment variables are too long + to be processed. Current limit is 1024, but this may change later. + """ + MAX_VAR_LENGTH = 1024 + # Actual length is MAX_VAR_LENGTH + 1 due to null-termination + self.env[f'NPY_{action}_CPU_FEATURES'] = "t" * MAX_VAR_LENGTH + msg = ( + f"Length of environment variable 'NPY_{action}_CPU_FEATURES' is " + f"{MAX_VAR_LENGTH + 1}, only {MAX_VAR_LENGTH} accepted" + ) + err_type = "RuntimeError" + self._expect_error(msg, err_type) + + @pytest.mark.skipif( + not __cpu_dispatch__, + reason=( + "NPY_*_CPU_FEATURES only parsed if " + "`__cpu_dispatch__` is non-empty" + ) + ) + def test_impossible_feature_disable(self): + """ + Test that a RuntimeError is thrown if an impossible feature-disabling + request is made. This includes disabling a baseline feature. + """ + + if self.BASELINE_FEAT is None: + pytest.skip("There are no unavailable features to test with") + bad_feature = self.BASELINE_FEAT + self.env['NPY_DISABLE_CPU_FEATURES'] = bad_feature + msg = ( + f"You cannot disable CPU feature '{bad_feature}', since it is " + "part of the baseline optimizations" + ) + err_type = "RuntimeError" + self._expect_error(msg, err_type) + + def test_impossible_feature_enable(self): + """ + Test that a RuntimeError is thrown if an impossible feature-enabling + request is made. This includes enabling a feature not supported by the + machine, or disabling a baseline optimization. + """ + + if self.UNAVAILABLE_FEAT is None: + pytest.skip("There are no unavailable features to test with") + bad_feature = self.UNAVAILABLE_FEAT + self.env['NPY_ENABLE_CPU_FEATURES'] = bad_feature + msg = ( + f"You cannot enable CPU features \\({bad_feature}\\), since " + "they are not supported by your machine." + ) + err_type = "RuntimeError" + self._expect_error(msg, err_type) + + # Ensure that it fails even when providing garbage in addition + feats = f"{bad_feature}, Foobar" + self.env['NPY_ENABLE_CPU_FEATURES'] = feats + msg = ( + f"You cannot enable CPU features \\({bad_feature}\\), since they " + "are not supported by your machine." + ) + self._expect_error(msg, err_type) + + if self.BASELINE_FEAT is not None: + # Ensure that only the bad feature gets reported + feats = f"{bad_feature}, {self.BASELINE_FEAT}" + self.env['NPY_ENABLE_CPU_FEATURES'] = feats + msg = ( + f"You cannot enable CPU features \\({bad_feature}\\), since " + "they are not supported by your machine." + ) + self._expect_error(msg, err_type) + + +is_linux = sys.platform.startswith('linux') +is_cygwin = sys.platform.startswith('cygwin') +machine = platform.machine() +is_x86 = re.match(r"^(amd64|x86|i386|i686)", machine, re.IGNORECASE) +@pytest.mark.skipif( + not (is_linux or is_cygwin) or not is_x86, reason="Only for Linux and x86" +) +class Test_X86_Features(AbstractTest): + features = [ + "MMX", "SSE", "SSE2", "SSE3", "SSSE3", "SSE41", "POPCNT", "SSE42", + "AVX", "F16C", "XOP", "FMA4", "FMA3", "AVX2", "AVX512F", "AVX512CD", + "AVX512ER", "AVX512PF", "AVX5124FMAPS", "AVX5124VNNIW", "AVX512VPOPCNTDQ", + "AVX512VL", "AVX512BW", "AVX512DQ", "AVX512VNNI", "AVX512IFMA", + "AVX512VBMI", "AVX512VBMI2", "AVX512BITALG", "AVX512FP16", + ] + features_groups = { + "AVX512_KNL": ["AVX512F", "AVX512CD", "AVX512ER", "AVX512PF"], + "AVX512_KNM": ["AVX512F", "AVX512CD", "AVX512ER", "AVX512PF", "AVX5124FMAPS", + "AVX5124VNNIW", "AVX512VPOPCNTDQ"], + "AVX512_SKX": ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", "AVX512VL"], + "AVX512_CLX": ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", "AVX512VL", "AVX512VNNI"], + "AVX512_CNL": ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", "AVX512VL", "AVX512IFMA", + "AVX512VBMI"], + "AVX512_ICL": ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", "AVX512VL", "AVX512IFMA", + "AVX512VBMI", "AVX512VNNI", "AVX512VBMI2", "AVX512BITALG", "AVX512VPOPCNTDQ"], + "AVX512_SPR": ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", + "AVX512VL", "AVX512IFMA", "AVX512VBMI", "AVX512VNNI", + "AVX512VBMI2", "AVX512BITALG", "AVX512VPOPCNTDQ", + "AVX512FP16"], + } + features_map = { + "SSE3": "PNI", "SSE41": "SSE4_1", "SSE42": "SSE4_2", "FMA3": "FMA", + "AVX512VNNI": "AVX512_VNNI", "AVX512BITALG": "AVX512_BITALG", + "AVX512VBMI2": "AVX512_VBMI2", "AVX5124FMAPS": "AVX512_4FMAPS", + "AVX5124VNNIW": "AVX512_4VNNIW", "AVX512VPOPCNTDQ": "AVX512_VPOPCNTDQ", + "AVX512FP16": "AVX512_FP16", + } + + def load_flags(self): + self.load_flags_cpuinfo("flags") + + +is_power = re.match(r"^(powerpc|ppc)64", machine, re.IGNORECASE) +@pytest.mark.skipif(not is_linux or not is_power, reason="Only for Linux and Power") +class Test_POWER_Features(AbstractTest): + features = ["VSX", "VSX2", "VSX3", "VSX4"] + features_map = {"VSX2": "ARCH_2_07", "VSX3": "ARCH_3_00", "VSX4": "ARCH_3_1"} + + def load_flags(self): + self.load_flags_auxv() + + +is_zarch = re.match(r"^(s390x)", machine, re.IGNORECASE) +@pytest.mark.skipif(not is_linux or not is_zarch, + reason="Only for Linux and IBM Z") +class Test_ZARCH_Features(AbstractTest): + features = ["VX", "VXE", "VXE2"] + + def load_flags(self): + self.load_flags_auxv() + + +is_arm = re.match(r"^(arm|aarch64)", machine, re.IGNORECASE) +@pytest.mark.skipif(not is_linux or not is_arm, reason="Only for Linux and ARM") +class Test_ARM_Features(AbstractTest): + features = [ + "SVE", "NEON", "ASIMD", "FPHP", "ASIMDHP", "ASIMDDP", "ASIMDFHM" + ] + features_groups = { + "NEON_FP16": ["NEON", "HALF"], + "NEON_VFPV4": ["NEON", "VFPV4"], + } + + def load_flags(self): + self.load_flags_cpuinfo("Features") + arch = self.get_cpuinfo_item("CPU architecture") + # in case of mounting virtual filesystem of aarch64 kernel without linux32 + is_rootfs_v8 = ( + not re.match(r"^armv[0-9]+l$", machine) and + (int('0' + next(iter(arch))) > 7 if arch else 0) + ) + if re.match(r"^(aarch64|AARCH64)", machine) or is_rootfs_v8: + self.features_map = { + "NEON": "ASIMD", "HALF": "ASIMD", "VFPV4": "ASIMD" + } + else: + self.features_map = { + # ELF auxiliary vector and /proc/cpuinfo on Linux kernel(armv8 aarch32) + # doesn't provide information about ASIMD, so we assume that ASIMD is supported + # if the kernel reports any one of the following ARM8 features. + "ASIMD": ("AES", "SHA1", "SHA2", "PMULL", "CRC32") + } + + +is_loongarch = re.match(r"^(loongarch)", machine, re.IGNORECASE) +@pytest.mark.skipif(not is_linux or not is_loongarch, reason="Only for Linux and LoongArch") +class Test_LOONGARCH_Features(AbstractTest): + features = ["LSX"] + + def load_flags(self): + self.load_flags_cpuinfo("Features") diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_custom_dtypes.py b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_custom_dtypes.py new file mode 100644 index 0000000..66e6de3 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_custom_dtypes.py @@ -0,0 +1,315 @@ +from tempfile import NamedTemporaryFile + +import pytest +from numpy._core._multiarray_umath import ( + _discover_array_parameters as discover_array_params, +) +from numpy._core._multiarray_umath import _get_sfloat_dtype + +import numpy as np +from numpy.testing import assert_array_equal + +SF = _get_sfloat_dtype() + + +class TestSFloat: + def _get_array(self, scaling, aligned=True): + if not aligned: + a = np.empty(3 * 8 + 1, dtype=np.uint8)[1:] + a = a.view(np.float64) + a[:] = [1., 2., 3.] + else: + a = np.array([1., 2., 3.]) + + a *= 1. / scaling # the casting code also uses the reciprocal. + return a.view(SF(scaling)) + + def test_sfloat_rescaled(self): + sf = SF(1.) + sf2 = sf.scaled_by(2.) + assert sf2.get_scaling() == 2. + sf6 = sf2.scaled_by(3.) + assert sf6.get_scaling() == 6. + + def test_class_discovery(self): + # This does not test much, since we always discover the scaling as 1. + # But most of NumPy (when writing) does not understand DType classes + dt, _ = discover_array_params([1., 2., 3.], dtype=SF) + assert dt == SF(1.) + + @pytest.mark.parametrize("scaling", [1., -1., 2.]) + def test_scaled_float_from_floats(self, scaling): + a = np.array([1., 2., 3.], dtype=SF(scaling)) + + assert a.dtype.get_scaling() == scaling + assert_array_equal(scaling * a.view(np.float64), [1., 2., 3.]) + + def test_repr(self): + # Check the repr, mainly to cover the code paths: + assert repr(SF(scaling=1.)) == "_ScaledFloatTestDType(scaling=1.0)" + + def test_dtype_str(self): + assert SF(1.).str == "_ScaledFloatTestDType(scaling=1.0)" + + def test_dtype_name(self): + assert SF(1.).name == "_ScaledFloatTestDType64" + + def test_sfloat_structured_dtype_printing(self): + dt = np.dtype([("id", int), ("value", SF(0.5))]) + # repr of structured dtypes need special handling because the + # implementation bypasses the object repr + assert "('value', '_ScaledFloatTestDType64')" in repr(dt) + + @pytest.mark.parametrize("scaling", [1., -1., 2.]) + def test_sfloat_from_float(self, scaling): + a = np.array([1., 2., 3.]).astype(dtype=SF(scaling)) + + assert a.dtype.get_scaling() == scaling + assert_array_equal(scaling * a.view(np.float64), [1., 2., 3.]) + + @pytest.mark.parametrize("aligned", [True, False]) + @pytest.mark.parametrize("scaling", [1., -1., 2.]) + def test_sfloat_getitem(self, aligned, scaling): + a = self._get_array(1., aligned) + assert a.tolist() == [1., 2., 3.] + + @pytest.mark.parametrize("aligned", [True, False]) + def test_sfloat_casts(self, aligned): + a = self._get_array(1., aligned) + + assert np.can_cast(a, SF(-1.), casting="equiv") + assert not np.can_cast(a, SF(-1.), casting="no") + na = a.astype(SF(-1.)) + assert_array_equal(-1 * na.view(np.float64), a.view(np.float64)) + + assert np.can_cast(a, SF(2.), casting="same_kind") + assert not np.can_cast(a, SF(2.), casting="safe") + a2 = a.astype(SF(2.)) + assert_array_equal(2 * a2.view(np.float64), a.view(np.float64)) + + @pytest.mark.parametrize("aligned", [True, False]) + def test_sfloat_cast_internal_errors(self, aligned): + a = self._get_array(2e300, aligned) + + with pytest.raises(TypeError, + match="error raised inside the core-loop: non-finite factor!"): + a.astype(SF(2e-300)) + + def test_sfloat_promotion(self): + assert np.result_type(SF(2.), SF(3.)) == SF(3.) + assert np.result_type(SF(3.), SF(2.)) == SF(3.) + # Float64 -> SF(1.) and then promotes normally, so both of this work: + assert np.result_type(SF(3.), np.float64) == SF(3.) + assert np.result_type(np.float64, SF(0.5)) == SF(1.) + + # Test an undefined promotion: + with pytest.raises(TypeError): + np.result_type(SF(1.), np.int64) + + def test_basic_multiply(self): + a = self._get_array(2.) + b = self._get_array(4.) + + res = a * b + # multiplies dtype scaling and content separately: + assert res.dtype.get_scaling() == 8. + expected_view = a.view(np.float64) * b.view(np.float64) + assert_array_equal(res.view(np.float64), expected_view) + + def test_possible_and_impossible_reduce(self): + # For reductions to work, the first and last operand must have the + # same dtype. For this parametric DType that is not necessarily true. + a = self._get_array(2.) + # Addition reduction works (as of writing requires to pass initial + # because setting a scaled-float from the default `0` fails). + res = np.add.reduce(a, initial=0.) + assert res == a.astype(np.float64).sum() + + # But each multiplication changes the factor, so a reduction is not + # possible (the relaxed version of the old refusal to handle any + # flexible dtype). + with pytest.raises(TypeError, + match="the resolved dtypes are not compatible"): + np.multiply.reduce(a) + + def test_basic_ufunc_at(self): + float_a = np.array([1., 2., 3.]) + b = self._get_array(2.) + + float_b = b.view(np.float64).copy() + np.multiply.at(float_b, [1, 1, 1], float_a) + np.multiply.at(b, [1, 1, 1], float_a) + + assert_array_equal(b.view(np.float64), float_b) + + def test_basic_multiply_promotion(self): + float_a = np.array([1., 2., 3.]) + b = self._get_array(2.) + + res1 = float_a * b + res2 = b * float_a + + # one factor is one, so we get the factor of b: + assert res1.dtype == res2.dtype == b.dtype + expected_view = float_a * b.view(np.float64) + assert_array_equal(res1.view(np.float64), expected_view) + assert_array_equal(res2.view(np.float64), expected_view) + + # Check that promotion works when `out` is used: + np.multiply(b, float_a, out=res2) + with pytest.raises(TypeError): + # The promoter accepts this (maybe it should not), but the SFloat + # result cannot be cast to integer: + np.multiply(b, float_a, out=np.arange(3)) + + def test_basic_addition(self): + a = self._get_array(2.) + b = self._get_array(4.) + + res = a + b + # addition uses the type promotion rules for the result: + assert res.dtype == np.result_type(a.dtype, b.dtype) + expected_view = (a.astype(res.dtype).view(np.float64) + + b.astype(res.dtype).view(np.float64)) + assert_array_equal(res.view(np.float64), expected_view) + + def test_addition_cast_safety(self): + """The addition method is special for the scaled float, because it + includes the "cast" between different factors, thus cast-safety + is influenced by the implementation. + """ + a = self._get_array(2.) + b = self._get_array(-2.) + c = self._get_array(3.) + + # sign change is "equiv": + np.add(a, b, casting="equiv") + with pytest.raises(TypeError): + np.add(a, b, casting="no") + + # Different factor is "same_kind" (default) so check that "safe" fails + with pytest.raises(TypeError): + np.add(a, c, casting="safe") + + # Check that casting the output fails also (done by the ufunc here) + with pytest.raises(TypeError): + np.add(a, a, out=c, casting="safe") + + @pytest.mark.parametrize("ufunc", + [np.logical_and, np.logical_or, np.logical_xor]) + def test_logical_ufuncs_casts_to_bool(self, ufunc): + a = self._get_array(2.) + a[0] = 0. # make sure first element is considered False. + + float_equiv = a.astype(float) + expected = ufunc(float_equiv, float_equiv) + res = ufunc(a, a) + assert_array_equal(res, expected) + + # also check that the same works for reductions: + expected = ufunc.reduce(float_equiv) + res = ufunc.reduce(a) + assert_array_equal(res, expected) + + # The output casting does not match the bool, bool -> bool loop: + with pytest.raises(TypeError): + ufunc(a, a, out=np.empty(a.shape, dtype=int), casting="equiv") + + def test_wrapped_and_wrapped_reductions(self): + a = self._get_array(2.) + float_equiv = a.astype(float) + + expected = np.hypot(float_equiv, float_equiv) + res = np.hypot(a, a) + assert res.dtype == a.dtype + res_float = res.view(np.float64) * 2 + assert_array_equal(res_float, expected) + + # Also check reduction (keepdims, due to incorrect getitem) + res = np.hypot.reduce(a, keepdims=True) + assert res.dtype == a.dtype + expected = np.hypot.reduce(float_equiv, keepdims=True) + assert res.view(np.float64) * 2 == expected + + def test_astype_class(self): + # Very simple test that we accept `.astype()` also on the class. + # ScaledFloat always returns the default descriptor, but it does + # check the relevant code paths. + arr = np.array([1., 2., 3.], dtype=object) + + res = arr.astype(SF) # passing the class class + expected = arr.astype(SF(1.)) # above will have discovered 1. scaling + assert_array_equal(res.view(np.float64), expected.view(np.float64)) + + def test_creation_class(self): + # passing in a dtype class should return + # the default descriptor + arr1 = np.array([1., 2., 3.], dtype=SF) + assert arr1.dtype == SF(1.) + arr2 = np.array([1., 2., 3.], dtype=SF(1.)) + assert_array_equal(arr1.view(np.float64), arr2.view(np.float64)) + assert arr1.dtype == arr2.dtype + + assert np.empty(3, dtype=SF).dtype == SF(1.) + assert np.empty_like(arr1, dtype=SF).dtype == SF(1.) + assert np.zeros(3, dtype=SF).dtype == SF(1.) + assert np.zeros_like(arr1, dtype=SF).dtype == SF(1.) + + def test_np_save_load(self): + # this monkeypatch is needed because pickle + # uses the repr of a type to reconstruct it + np._ScaledFloatTestDType = SF + + arr = np.array([1.0, 2.0, 3.0], dtype=SF(1.0)) + + # adapted from RoundtripTest.roundtrip in np.save tests + with NamedTemporaryFile("wb", delete=False, suffix=".npz") as f: + with pytest.warns(UserWarning) as record: + np.savez(f.name, arr) + + assert len(record) == 1 + + with np.load(f.name, allow_pickle=True) as data: + larr = data["arr_0"] + assert_array_equal(arr.view(np.float64), larr.view(np.float64)) + assert larr.dtype == arr.dtype == SF(1.0) + + del np._ScaledFloatTestDType + + def test_flatiter(self): + arr = np.array([1.0, 2.0, 3.0], dtype=SF(1.0)) + + for i, val in enumerate(arr.flat): + assert arr[i] == val + + @pytest.mark.parametrize( + "index", [ + [1, 2], ..., slice(None, 2, None), + np.array([True, True, False]), np.array([0, 1]) + ], ids=["int_list", "ellipsis", "slice", "bool_array", "int_array"]) + def test_flatiter_index(self, index): + arr = np.array([1.0, 2.0, 3.0], dtype=SF(1.0)) + np.testing.assert_array_equal( + arr[index].view(np.float64), arr.flat[index].view(np.float64)) + + arr2 = arr.copy() + arr[index] = 5.0 + arr2.flat[index] = 5.0 + np.testing.assert_array_equal( + arr.view(np.float64), arr2.view(np.float64)) + +def test_type_pickle(): + # can't actually unpickle, but we can pickle (if in namespace) + import pickle + + np._ScaledFloatTestDType = SF + + s = pickle.dumps(SF) + res = pickle.loads(s) + assert res is SF + + del np._ScaledFloatTestDType + + +def test_is_numeric(): + assert SF._is_numeric diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_cython.py b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_cython.py new file mode 100644 index 0000000..fb3839f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_cython.py @@ -0,0 +1,351 @@ +import os +import subprocess +import sys +import sysconfig +from datetime import datetime + +import pytest + +import numpy as np +from numpy.testing import IS_EDITABLE, IS_WASM, assert_array_equal + +# This import is copied from random.tests.test_extending +try: + import cython + from Cython.Compiler.Version import version as cython_version +except ImportError: + cython = None +else: + from numpy._utils import _pep440 + + # Note: keep in sync with the one in pyproject.toml + required_version = "3.0.6" + if _pep440.parse(cython_version) < _pep440.Version(required_version): + # too old or wrong cython, skip the test + cython = None + +pytestmark = pytest.mark.skipif(cython is None, reason="requires cython") + + +if IS_EDITABLE: + pytest.skip( + "Editable install doesn't support tests with a compile step", + allow_module_level=True + ) + + +@pytest.fixture(scope='module') +def install_temp(tmpdir_factory): + # Based in part on test_cython from random.tests.test_extending + if IS_WASM: + pytest.skip("No subprocess") + + srcdir = os.path.join(os.path.dirname(__file__), 'examples', 'cython') + build_dir = tmpdir_factory.mktemp("cython_test") / "build" + os.makedirs(build_dir, exist_ok=True) + # Ensure we use the correct Python interpreter even when `meson` is + # installed in a different Python environment (see gh-24956) + native_file = str(build_dir / 'interpreter-native-file.ini') + with open(native_file, 'w') as f: + f.write("[binaries]\n") + f.write(f"python = '{sys.executable}'\n") + f.write(f"python3 = '{sys.executable}'") + + try: + subprocess.check_call(["meson", "--version"]) + except FileNotFoundError: + pytest.skip("No usable 'meson' found") + if sysconfig.get_platform() == "win-arm64": + pytest.skip("Meson unable to find MSVC linker on win-arm64") + if sys.platform == "win32": + subprocess.check_call(["meson", "setup", + "--buildtype=release", + "--vsenv", "--native-file", native_file, + str(srcdir)], + cwd=build_dir, + ) + else: + subprocess.check_call(["meson", "setup", + "--native-file", native_file, str(srcdir)], + cwd=build_dir + ) + try: + subprocess.check_call(["meson", "compile", "-vv"], cwd=build_dir) + except subprocess.CalledProcessError: + print("----------------") + print("meson build failed when doing") + print(f"'meson setup --native-file {native_file} {srcdir}'") + print("'meson compile -vv'") + print(f"in {build_dir}") + print("----------------") + raise + + sys.path.append(str(build_dir)) + + +def test_is_timedelta64_object(install_temp): + import checks + + assert checks.is_td64(np.timedelta64(1234)) + assert checks.is_td64(np.timedelta64(1234, "ns")) + assert checks.is_td64(np.timedelta64("NaT", "ns")) + + assert not checks.is_td64(1) + assert not checks.is_td64(None) + assert not checks.is_td64("foo") + assert not checks.is_td64(np.datetime64("now", "s")) + + +def test_is_datetime64_object(install_temp): + import checks + + assert checks.is_dt64(np.datetime64(1234, "ns")) + assert checks.is_dt64(np.datetime64("NaT", "ns")) + + assert not checks.is_dt64(1) + assert not checks.is_dt64(None) + assert not checks.is_dt64("foo") + assert not checks.is_dt64(np.timedelta64(1234)) + + +def test_get_datetime64_value(install_temp): + import checks + + dt64 = np.datetime64("2016-01-01", "ns") + + result = checks.get_dt64_value(dt64) + expected = dt64.view("i8") + + assert result == expected + + +def test_get_timedelta64_value(install_temp): + import checks + + td64 = np.timedelta64(12345, "h") + + result = checks.get_td64_value(td64) + expected = td64.view("i8") + + assert result == expected + + +def test_get_datetime64_unit(install_temp): + import checks + + dt64 = np.datetime64("2016-01-01", "ns") + result = checks.get_dt64_unit(dt64) + expected = 10 + assert result == expected + + td64 = np.timedelta64(12345, "h") + result = checks.get_dt64_unit(td64) + expected = 5 + assert result == expected + + +def test_abstract_scalars(install_temp): + import checks + + assert checks.is_integer(1) + assert checks.is_integer(np.int8(1)) + assert checks.is_integer(np.uint64(1)) + +def test_default_int(install_temp): + import checks + + assert checks.get_default_integer() is np.dtype(int) + + +def test_ravel_axis(install_temp): + import checks + + assert checks.get_ravel_axis() == np.iinfo("intc").min + + +def test_convert_datetime64_to_datetimestruct(install_temp): + # GH#21199 + import checks + + res = checks.convert_datetime64_to_datetimestruct() + + exp = { + "year": 2022, + "month": 3, + "day": 15, + "hour": 20, + "min": 1, + "sec": 55, + "us": 260292, + "ps": 0, + "as": 0, + } + + assert res == exp + + +class TestDatetimeStrings: + def test_make_iso_8601_datetime(self, install_temp): + # GH#21199 + import checks + dt = datetime(2016, 6, 2, 10, 45, 19) + # uses NPY_FR_s + result = checks.make_iso_8601_datetime(dt) + assert result == b"2016-06-02T10:45:19" + + def test_get_datetime_iso_8601_strlen(self, install_temp): + # GH#21199 + import checks + # uses NPY_FR_ns + res = checks.get_datetime_iso_8601_strlen() + assert res == 48 + + +@pytest.mark.parametrize( + "arrays", + [ + [np.random.rand(2)], + [np.random.rand(2), np.random.rand(3, 1)], + [np.random.rand(2), np.random.rand(2, 3, 2), np.random.rand(1, 3, 2)], + [np.random.rand(2, 1)] * 4 + [np.random.rand(1, 1, 1)], + ] +) +def test_multiiter_fields(install_temp, arrays): + import checks + bcast = np.broadcast(*arrays) + + assert bcast.ndim == checks.get_multiiter_number_of_dims(bcast) + assert bcast.size == checks.get_multiiter_size(bcast) + assert bcast.numiter == checks.get_multiiter_num_of_iterators(bcast) + assert bcast.shape == checks.get_multiiter_shape(bcast) + assert bcast.index == checks.get_multiiter_current_index(bcast) + assert all( + x.base is y.base + for x, y in zip(bcast.iters, checks.get_multiiter_iters(bcast)) + ) + + +def test_dtype_flags(install_temp): + import checks + dtype = np.dtype("i,O") # dtype with somewhat interesting flags + assert dtype.flags == checks.get_dtype_flags(dtype) + + +def test_conv_intp(install_temp): + import checks + + class myint: + def __int__(self): + return 3 + + # These conversion passes via `__int__`, not `__index__`: + assert checks.conv_intp(3.) == 3 + assert checks.conv_intp(myint()) == 3 + + +def test_npyiter_api(install_temp): + import checks + arr = np.random.rand(3, 2) + + it = np.nditer(arr) + assert checks.get_npyiter_size(it) == it.itersize == np.prod(arr.shape) + assert checks.get_npyiter_ndim(it) == it.ndim == 1 + assert checks.npyiter_has_index(it) == it.has_index == False + + it = np.nditer(arr, flags=["c_index"]) + assert checks.npyiter_has_index(it) == it.has_index == True + assert ( + checks.npyiter_has_delayed_bufalloc(it) + == it.has_delayed_bufalloc + == False + ) + + it = np.nditer(arr, flags=["buffered", "delay_bufalloc"]) + assert ( + checks.npyiter_has_delayed_bufalloc(it) + == it.has_delayed_bufalloc + == True + ) + + it = np.nditer(arr, flags=["multi_index"]) + assert checks.get_npyiter_size(it) == it.itersize == np.prod(arr.shape) + assert checks.npyiter_has_multi_index(it) == it.has_multi_index == True + assert checks.get_npyiter_ndim(it) == it.ndim == 2 + assert checks.test_get_multi_index_iter_next(it, arr) + + arr2 = np.random.rand(2, 1, 2) + it = np.nditer([arr, arr2]) + assert checks.get_npyiter_nop(it) == it.nop == 2 + assert checks.get_npyiter_size(it) == it.itersize == 12 + assert checks.get_npyiter_ndim(it) == it.ndim == 3 + assert all( + x is y for x, y in zip(checks.get_npyiter_operands(it), it.operands) + ) + assert all( + np.allclose(x, y) + for x, y in zip(checks.get_npyiter_itviews(it), it.itviews) + ) + + +def test_fillwithbytes(install_temp): + import checks + + arr = checks.compile_fillwithbyte() + assert_array_equal(arr, np.ones((1, 2))) + + +def test_complex(install_temp): + from checks import inc2_cfloat_struct + + arr = np.array([0, 10 + 10j], dtype="F") + inc2_cfloat_struct(arr) + assert arr[1] == (12 + 12j) + + +def test_npystring_pack(install_temp): + """Check that the cython API can write to a vstring array.""" + import checks + + arr = np.array(['a', 'b', 'c'], dtype='T') + assert checks.npystring_pack(arr) == 0 + + # checks.npystring_pack writes to the beginning of the array + assert arr[0] == "Hello world" + +def test_npystring_load(install_temp): + """Check that the cython API can load strings from a vstring array.""" + import checks + + arr = np.array(['abcd', 'b', 'c'], dtype='T') + result = checks.npystring_load(arr) + assert result == 'abcd' + + +def test_npystring_multiple_allocators(install_temp): + """Check that the cython API can acquire/release multiple vstring allocators.""" + import checks + + dt = np.dtypes.StringDType(na_object=None) + arr1 = np.array(['abcd', 'b', 'c'], dtype=dt) + arr2 = np.array(['a', 'b', 'c'], dtype=dt) + + assert checks.npystring_pack_multiple(arr1, arr2) == 0 + assert arr1[0] == "Hello world" + assert arr1[-1] is None + assert arr2[0] == "test this" + + +def test_npystring_allocators_other_dtype(install_temp): + """Check that allocators for non-StringDType arrays is NULL.""" + import checks + + arr1 = np.array([1, 2, 3], dtype='i') + arr2 = np.array([4, 5, 6], dtype='i') + + assert checks.npystring_allocators_other_types(arr1, arr2) == 0 + + +@pytest.mark.skipif(sysconfig.get_platform() == 'win-arm64', reason='no checks module on win-arm64') +def test_npy_uintp_type_enum(): + import checks + assert checks.check_npy_uintp_type_enum() diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_datetime.py b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_datetime.py new file mode 100644 index 0000000..1cbacb8 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_datetime.py @@ -0,0 +1,2710 @@ +import datetime +import pickle + +import pytest + +import numpy +import numpy as np +from numpy.testing import ( + IS_WASM, + assert_, + assert_array_equal, + assert_equal, + assert_raises, + assert_raises_regex, + assert_warns, + suppress_warnings, +) + +# Use pytz to test out various time zones if available +try: + from pytz import timezone as tz + _has_pytz = True +except ImportError: + _has_pytz = False + +try: + RecursionError +except NameError: + RecursionError = RuntimeError # python < 3.5 + + +def _assert_equal_hash(v1, v2): + assert v1 == v2 + assert hash(v1) == hash(v2) + assert v2 in {v1} + + +class TestDateTime: + + def test_string(self): + msg = "no explicit representation of timezones available for " \ + "np.datetime64" + with pytest.warns(UserWarning, match=msg): + np.datetime64('2000-01-01T00+01') + + def test_datetime(self): + msg = "no explicit representation of timezones available for " \ + "np.datetime64" + with pytest.warns(UserWarning, match=msg): + t0 = np.datetime64('2023-06-09T12:18:40Z', 'ns') + + t0 = np.datetime64('2023-06-09T12:18:40', 'ns') + + def test_datetime_dtype_creation(self): + for unit in ['Y', 'M', 'W', 'D', + 'h', 'm', 's', 'ms', 'us', + 'μs', # alias for us + 'ns', 'ps', 'fs', 'as']: + dt1 = np.dtype(f'M8[750{unit}]') + assert_(dt1 == np.dtype(f'datetime64[750{unit}]')) + dt2 = np.dtype(f'm8[{unit}]') + assert_(dt2 == np.dtype(f'timedelta64[{unit}]')) + + # Generic units shouldn't add [] to the end + assert_equal(str(np.dtype("M8")), "datetime64") + + # Should be possible to specify the endianness + assert_equal(np.dtype("=M8"), np.dtype("M8")) + assert_equal(np.dtype("=M8[s]"), np.dtype("M8[s]")) + assert_(np.dtype(">M8") == np.dtype("M8") or + np.dtype("M8[D]") == np.dtype("M8[D]") or + np.dtype("M8") != np.dtype("m8") == np.dtype("m8") or + np.dtype("m8[D]") == np.dtype("m8[D]") or + np.dtype("m8") != np.dtype(" Scalars + assert_equal(np.datetime64(b, '[s]'), np.datetime64('NaT', '[s]')) + assert_equal(np.datetime64(b, '[ms]'), np.datetime64('NaT', '[ms]')) + assert_equal(np.datetime64(b, '[M]'), np.datetime64('NaT', '[M]')) + assert_equal(np.datetime64(b, '[Y]'), np.datetime64('NaT', '[Y]')) + assert_equal(np.datetime64(b, '[W]'), np.datetime64('NaT', '[W]')) + + # Arrays -> Scalars + assert_equal(np.datetime64(a, '[s]'), np.datetime64('NaT', '[s]')) + assert_equal(np.datetime64(a, '[ms]'), np.datetime64('NaT', '[ms]')) + assert_equal(np.datetime64(a, '[M]'), np.datetime64('NaT', '[M]')) + assert_equal(np.datetime64(a, '[Y]'), np.datetime64('NaT', '[Y]')) + assert_equal(np.datetime64(a, '[W]'), np.datetime64('NaT', '[W]')) + + # NaN -> NaT + nan = np.array([np.nan] * 8 + [0]) + fnan = nan.astype('f') + lnan = nan.astype('g') + cnan = nan.astype('D') + cfnan = nan.astype('F') + clnan = nan.astype('G') + hnan = nan.astype(np.half) + + nat = np.array([np.datetime64('NaT')] * 8 + [np.datetime64(0, 'D')]) + assert_equal(nan.astype('M8[ns]'), nat) + assert_equal(fnan.astype('M8[ns]'), nat) + assert_equal(lnan.astype('M8[ns]'), nat) + assert_equal(cnan.astype('M8[ns]'), nat) + assert_equal(cfnan.astype('M8[ns]'), nat) + assert_equal(clnan.astype('M8[ns]'), nat) + assert_equal(hnan.astype('M8[ns]'), nat) + + nat = np.array([np.timedelta64('NaT')] * 8 + [np.timedelta64(0)]) + assert_equal(nan.astype('timedelta64[ns]'), nat) + assert_equal(fnan.astype('timedelta64[ns]'), nat) + assert_equal(lnan.astype('timedelta64[ns]'), nat) + assert_equal(cnan.astype('timedelta64[ns]'), nat) + assert_equal(cfnan.astype('timedelta64[ns]'), nat) + assert_equal(clnan.astype('timedelta64[ns]'), nat) + assert_equal(hnan.astype('timedelta64[ns]'), nat) + + def test_days_creation(self): + assert_equal(np.array('1599', dtype='M8[D]').astype('i8'), + (1600 - 1970) * 365 - (1972 - 1600) / 4 + 3 - 365) + assert_equal(np.array('1600', dtype='M8[D]').astype('i8'), + (1600 - 1970) * 365 - (1972 - 1600) / 4 + 3) + assert_equal(np.array('1601', dtype='M8[D]').astype('i8'), + (1600 - 1970) * 365 - (1972 - 1600) / 4 + 3 + 366) + assert_equal(np.array('1900', dtype='M8[D]').astype('i8'), + (1900 - 1970) * 365 - (1970 - 1900) // 4) + assert_equal(np.array('1901', dtype='M8[D]').astype('i8'), + (1900 - 1970) * 365 - (1970 - 1900) // 4 + 365) + assert_equal(np.array('1967', dtype='M8[D]').astype('i8'), -3 * 365 - 1) + assert_equal(np.array('1968', dtype='M8[D]').astype('i8'), -2 * 365 - 1) + assert_equal(np.array('1969', dtype='M8[D]').astype('i8'), -1 * 365) + assert_equal(np.array('1970', dtype='M8[D]').astype('i8'), 0 * 365) + assert_equal(np.array('1971', dtype='M8[D]').astype('i8'), 1 * 365) + assert_equal(np.array('1972', dtype='M8[D]').astype('i8'), 2 * 365) + assert_equal(np.array('1973', dtype='M8[D]').astype('i8'), 3 * 365 + 1) + assert_equal(np.array('1974', dtype='M8[D]').astype('i8'), 4 * 365 + 1) + assert_equal(np.array('2000', dtype='M8[D]').astype('i8'), + (2000 - 1970) * 365 + (2000 - 1972) // 4) + assert_equal(np.array('2001', dtype='M8[D]').astype('i8'), + (2000 - 1970) * 365 + (2000 - 1972) // 4 + 366) + assert_equal(np.array('2400', dtype='M8[D]').astype('i8'), + (2400 - 1970) * 365 + (2400 - 1972) // 4 - 3) + assert_equal(np.array('2401', dtype='M8[D]').astype('i8'), + (2400 - 1970) * 365 + (2400 - 1972) // 4 - 3 + 366) + + assert_equal(np.array('1600-02-29', dtype='M8[D]').astype('i8'), + (1600 - 1970) * 365 - (1972 - 1600) // 4 + 3 + 31 + 28) + assert_equal(np.array('1600-03-01', dtype='M8[D]').astype('i8'), + (1600 - 1970) * 365 - (1972 - 1600) // 4 + 3 + 31 + 29) + assert_equal(np.array('2000-02-29', dtype='M8[D]').astype('i8'), + (2000 - 1970) * 365 + (2000 - 1972) // 4 + 31 + 28) + assert_equal(np.array('2000-03-01', dtype='M8[D]').astype('i8'), + (2000 - 1970) * 365 + (2000 - 1972) // 4 + 31 + 29) + assert_equal(np.array('2001-03-22', dtype='M8[D]').astype('i8'), + (2000 - 1970) * 365 + (2000 - 1972) // 4 + 366 + 31 + 28 + 21) + + def test_days_to_pydate(self): + assert_equal(np.array('1599', dtype='M8[D]').astype('O'), + datetime.date(1599, 1, 1)) + assert_equal(np.array('1600', dtype='M8[D]').astype('O'), + datetime.date(1600, 1, 1)) + assert_equal(np.array('1601', dtype='M8[D]').astype('O'), + datetime.date(1601, 1, 1)) + assert_equal(np.array('1900', dtype='M8[D]').astype('O'), + datetime.date(1900, 1, 1)) + assert_equal(np.array('1901', dtype='M8[D]').astype('O'), + datetime.date(1901, 1, 1)) + assert_equal(np.array('2000', dtype='M8[D]').astype('O'), + datetime.date(2000, 1, 1)) + assert_equal(np.array('2001', dtype='M8[D]').astype('O'), + datetime.date(2001, 1, 1)) + assert_equal(np.array('1600-02-29', dtype='M8[D]').astype('O'), + datetime.date(1600, 2, 29)) + assert_equal(np.array('1600-03-01', dtype='M8[D]').astype('O'), + datetime.date(1600, 3, 1)) + assert_equal(np.array('2001-03-22', dtype='M8[D]').astype('O'), + datetime.date(2001, 3, 22)) + + def test_dtype_comparison(self): + assert_(not (np.dtype('M8[us]') == np.dtype('M8[ms]'))) + assert_(np.dtype('M8[us]') != np.dtype('M8[ms]')) + assert_(np.dtype('M8[2D]') != np.dtype('M8[D]')) + assert_(np.dtype('M8[D]') != np.dtype('M8[2D]')) + + def test_pydatetime_creation(self): + a = np.array(['1960-03-12', datetime.date(1960, 3, 12)], dtype='M8[D]') + assert_equal(a[0], a[1]) + a = np.array(['1999-12-31', datetime.date(1999, 12, 31)], dtype='M8[D]') + assert_equal(a[0], a[1]) + a = np.array(['2000-01-01', datetime.date(2000, 1, 1)], dtype='M8[D]') + assert_equal(a[0], a[1]) + # Will fail if the date changes during the exact right moment + a = np.array(['today', datetime.date.today()], dtype='M8[D]') + assert_equal(a[0], a[1]) + # datetime.datetime.now() returns local time, not UTC + #a = np.array(['now', datetime.datetime.now()], dtype='M8[s]') + #assert_equal(a[0], a[1]) + + # we can give a datetime.date time units + assert_equal(np.array(datetime.date(1960, 3, 12), dtype='M8[s]'), + np.array(np.datetime64('1960-03-12T00:00:00'))) + + def test_datetime_string_conversion(self): + a = ['2011-03-16', '1920-01-01', '2013-05-19'] + str_a = np.array(a, dtype='S') + uni_a = np.array(a, dtype='U') + dt_a = np.array(a, dtype='M') + + # String to datetime + assert_equal(dt_a, str_a.astype('M')) + assert_equal(dt_a.dtype, str_a.astype('M').dtype) + dt_b = np.empty_like(dt_a) + dt_b[...] = str_a + assert_equal(dt_a, dt_b) + + # Datetime to string + assert_equal(str_a, dt_a.astype('S0')) + str_b = np.empty_like(str_a) + str_b[...] = dt_a + assert_equal(str_a, str_b) + + # Unicode to datetime + assert_equal(dt_a, uni_a.astype('M')) + assert_equal(dt_a.dtype, uni_a.astype('M').dtype) + dt_b = np.empty_like(dt_a) + dt_b[...] = uni_a + assert_equal(dt_a, dt_b) + + # Datetime to unicode + assert_equal(uni_a, dt_a.astype('U')) + uni_b = np.empty_like(uni_a) + uni_b[...] = dt_a + assert_equal(uni_a, uni_b) + + # Datetime to long string - gh-9712 + assert_equal(str_a, dt_a.astype((np.bytes_, 128))) + str_b = np.empty(str_a.shape, dtype=(np.bytes_, 128)) + str_b[...] = dt_a + assert_equal(str_a, str_b) + + @pytest.mark.parametrize("time_dtype", ["m8[D]", "M8[Y]"]) + def test_time_byteswapping(self, time_dtype): + times = np.array(["2017", "NaT"], dtype=time_dtype) + times_swapped = times.astype(times.dtype.newbyteorder()) + assert_array_equal(times, times_swapped) + + unswapped = times_swapped.view(np.dtype("int64").newbyteorder()) + assert_array_equal(unswapped, times.view(np.int64)) + + @pytest.mark.parametrize(["time1", "time2"], + [("M8[s]", "M8[D]"), ("m8[s]", "m8[ns]")]) + def test_time_byteswapped_cast(self, time1, time2): + dtype1 = np.dtype(time1) + dtype2 = np.dtype(time2) + times = np.array(["2017", "NaT"], dtype=dtype1) + expected = times.astype(dtype2) + + # Test that every byte-swapping combination also returns the same + # results (previous tests check that this comparison works fine). + res = times.astype(dtype1.newbyteorder()).astype(dtype2) + assert_array_equal(res, expected) + res = times.astype(dtype2.newbyteorder()) + assert_array_equal(res, expected) + res = times.astype(dtype1.newbyteorder()).astype(dtype2.newbyteorder()) + assert_array_equal(res, expected) + + @pytest.mark.parametrize("time_dtype", ["m8[D]", "M8[Y]"]) + @pytest.mark.parametrize("str_dtype", ["U", "S"]) + def test_datetime_conversions_byteorders(self, str_dtype, time_dtype): + times = np.array(["2017", "NaT"], dtype=time_dtype) + # Unfortunately, timedelta does not roundtrip: + from_strings = np.array(["2017", "NaT"], dtype=str_dtype) + to_strings = times.astype(str_dtype) # assume this is correct + + # Check that conversion from times to string works if src is swapped: + times_swapped = times.astype(times.dtype.newbyteorder()) + res = times_swapped.astype(str_dtype) + assert_array_equal(res, to_strings) + # And also if both are swapped: + res = times_swapped.astype(to_strings.dtype.newbyteorder()) + assert_array_equal(res, to_strings) + # only destination is swapped: + res = times.astype(to_strings.dtype.newbyteorder()) + assert_array_equal(res, to_strings) + + # Check that conversion from string to times works if src is swapped: + from_strings_swapped = from_strings.astype( + from_strings.dtype.newbyteorder()) + res = from_strings_swapped.astype(time_dtype) + assert_array_equal(res, times) + # And if both are swapped: + res = from_strings_swapped.astype(times.dtype.newbyteorder()) + assert_array_equal(res, times) + # Only destination is swapped: + res = from_strings.astype(times.dtype.newbyteorder()) + assert_array_equal(res, times) + + def test_datetime_array_str(self): + a = np.array(['2011-03-16', '1920-01-01', '2013-05-19'], dtype='M') + assert_equal(str(a), "['2011-03-16' '1920-01-01' '2013-05-19']") + + a = np.array(['2011-03-16T13:55', '1920-01-01T03:12'], dtype='M') + assert_equal(np.array2string(a, separator=', ', + formatter={'datetime': lambda x: + f"'{np.datetime_as_string(x, timezone='UTC')}'"}), + "['2011-03-16T13:55Z', '1920-01-01T03:12Z']") + + # Check that one NaT doesn't corrupt subsequent entries + a = np.array(['2010', 'NaT', '2030']).astype('M') + assert_equal(str(a), "['2010' 'NaT' '2030']") + + def test_timedelta_array_str(self): + a = np.array([-1, 0, 100], dtype='m') + assert_equal(str(a), "[ -1 0 100]") + a = np.array(['NaT', 'NaT'], dtype='m') + assert_equal(str(a), "['NaT' 'NaT']") + # Check right-alignment with NaTs + a = np.array([-1, 'NaT', 0], dtype='m') + assert_equal(str(a), "[ -1 'NaT' 0]") + a = np.array([-1, 'NaT', 1234567], dtype='m') + assert_equal(str(a), "[ -1 'NaT' 1234567]") + + # Test with other byteorder: + a = np.array([-1, 'NaT', 1234567], dtype='>m') + assert_equal(str(a), "[ -1 'NaT' 1234567]") + a = np.array([-1, 'NaT', 1234567], dtype=''\np4\nNNNI-1\nI-1\nI0\n((dp5\n(S'us'\np6\n"\ + b"I1\nI1\nI1\ntp7\ntp8\ntp9\nb." + assert_equal(pickle.loads(pkl), np.dtype('>M8[us]')) + + def test_setstate(self): + "Verify that datetime dtype __setstate__ can handle bad arguments" + dt = np.dtype('>M8[us]') + assert_raises(ValueError, dt.__setstate__, (4, '>', None, None, None, -1, -1, 0, 1)) + assert_(dt.__reduce__()[2] == np.dtype('>M8[us]').__reduce__()[2]) + assert_raises(TypeError, dt.__setstate__, (4, '>', None, None, None, -1, -1, 0, ({}, 'xxx'))) + assert_(dt.__reduce__()[2] == np.dtype('>M8[us]').__reduce__()[2]) + + def test_dtype_promotion(self): + # datetime datetime computes the metadata gcd + # timedelta timedelta computes the metadata gcd + for mM in ['m', 'M']: + assert_equal( + np.promote_types(np.dtype(mM + '8[2Y]'), np.dtype(mM + '8[2Y]')), + np.dtype(mM + '8[2Y]')) + assert_equal( + np.promote_types(np.dtype(mM + '8[12Y]'), np.dtype(mM + '8[15Y]')), + np.dtype(mM + '8[3Y]')) + assert_equal( + np.promote_types(np.dtype(mM + '8[62M]'), np.dtype(mM + '8[24M]')), + np.dtype(mM + '8[2M]')) + assert_equal( + np.promote_types(np.dtype(mM + '8[1W]'), np.dtype(mM + '8[2D]')), + np.dtype(mM + '8[1D]')) + assert_equal( + np.promote_types(np.dtype(mM + '8[W]'), np.dtype(mM + '8[13s]')), + np.dtype(mM + '8[s]')) + assert_equal( + np.promote_types(np.dtype(mM + '8[13W]'), np.dtype(mM + '8[49s]')), + np.dtype(mM + '8[7s]')) + # timedelta timedelta raises when there is no reasonable gcd + assert_raises(TypeError, np.promote_types, + np.dtype('m8[Y]'), np.dtype('m8[D]')) + assert_raises(TypeError, np.promote_types, + np.dtype('m8[M]'), np.dtype('m8[W]')) + # timedelta and float cannot be safely cast with each other + assert_raises(TypeError, np.promote_types, "float32", "m8") + assert_raises(TypeError, np.promote_types, "m8", "float32") + assert_raises(TypeError, np.promote_types, "uint64", "m8") + assert_raises(TypeError, np.promote_types, "m8", "uint64") + + # timedelta timedelta may overflow with big unit ranges + assert_raises(OverflowError, np.promote_types, + np.dtype('m8[W]'), np.dtype('m8[fs]')) + assert_raises(OverflowError, np.promote_types, + np.dtype('m8[s]'), np.dtype('m8[as]')) + + def test_cast_overflow(self): + # gh-4486 + def cast(): + numpy.datetime64("1971-01-01 00:00:00.000000000000000").astype("datetime64[{unit}]') + assert_equal(np.isnat(arr), res) + arr = np.array([123, -321, "NaT"], dtype=f'timedelta64[{unit}]') + assert_equal(np.isnat(arr), res) + + def test_isnat_error(self): + # Test that only datetime dtype arrays are accepted + for t in np.typecodes["All"]: + if t in np.typecodes["Datetime"]: + continue + assert_raises(TypeError, np.isnat, np.zeros(10, t)) + + def test_isfinite_scalar(self): + assert_(not np.isfinite(np.datetime64('NaT', 'ms'))) + assert_(not np.isfinite(np.datetime64('NaT', 'ns'))) + assert_(np.isfinite(np.datetime64('2038-01-19T03:14:07'))) + + assert_(not np.isfinite(np.timedelta64('NaT', "ms"))) + assert_(np.isfinite(np.timedelta64(34, "ms"))) + + @pytest.mark.parametrize('unit', ['Y', 'M', 'W', 'D', 'h', 'm', 's', 'ms', + 'us', 'ns', 'ps', 'fs', 'as']) + @pytest.mark.parametrize('dstr', ['datetime64[%s]', + 'timedelta64[%s]']) + def test_isfinite_isinf_isnan_units(self, unit, dstr): + '''check isfinite, isinf, isnan for all units of M, m dtypes + ''' + arr_val = [123, -321, "NaT"] + arr = np.array(arr_val, dtype=(dstr % unit)) + pos = np.array([True, True, False]) + neg = np.array([False, False, True]) + false = np.array([False, False, False]) + assert_equal(np.isfinite(arr), pos) + assert_equal(np.isinf(arr), false) + assert_equal(np.isnan(arr), neg) + + def test_assert_equal(self): + assert_raises(AssertionError, assert_equal, + np.datetime64('nat'), np.timedelta64('nat')) + + def test_corecursive_input(self): + # construct a co-recursive list + a, b = [], [] + a.append(b) + b.append(a) + obj_arr = np.array([None]) + obj_arr[0] = a + + # At some point this caused a stack overflow (gh-11154). Now raises + # ValueError since the nested list cannot be converted to a datetime. + assert_raises(ValueError, obj_arr.astype, 'M8') + assert_raises(ValueError, obj_arr.astype, 'm8') + + @pytest.mark.parametrize("shape", [(), (1,)]) + def test_discovery_from_object_array(self, shape): + arr = np.array("2020-10-10", dtype=object).reshape(shape) + res = np.array("2020-10-10", dtype="M8").reshape(shape) + assert res.dtype == np.dtype("M8[D]") + assert_equal(arr.astype("M8"), res) + arr[...] = np.bytes_("2020-10-10") # try a numpy string type + assert_equal(arr.astype("M8"), res) + arr = arr.astype("S") + assert_equal(arr.astype("S").astype("M8"), res) + + @pytest.mark.parametrize("time_unit", [ + "Y", "M", "W", "D", "h", "m", "s", "ms", "us", "ns", "ps", "fs", "as", + # compound units + "10D", "2M", + ]) + def test_limit_symmetry(self, time_unit): + """ + Dates should have symmetric limits around the unix epoch at +/-np.int64 + """ + epoch = np.datetime64(0, time_unit) + latest = np.datetime64(np.iinfo(np.int64).max, time_unit) + earliest = np.datetime64(-np.iinfo(np.int64).max, time_unit) + + # above should not have overflowed + assert earliest < epoch < latest + + @pytest.mark.parametrize("time_unit", [ + "Y", "M", + pytest.param("W", marks=pytest.mark.xfail(reason="gh-13197")), + "D", "h", "m", + "s", "ms", "us", "ns", "ps", "fs", "as", + pytest.param("10D", marks=pytest.mark.xfail(reason="similar to gh-13197")), + ]) + @pytest.mark.parametrize("sign", [-1, 1]) + def test_limit_str_roundtrip(self, time_unit, sign): + """ + Limits should roundtrip when converted to strings. + + This tests the conversion to and from npy_datetimestruct. + """ + # TODO: add absolute (gold standard) time span limit strings + limit = np.datetime64(np.iinfo(np.int64).max * sign, time_unit) + + # Convert to string and back. Explicit unit needed since the day and + # week reprs are not distinguishable. + limit_via_str = np.datetime64(str(limit), time_unit) + assert limit_via_str == limit + + def test_datetime_hash_nat(self): + nat1 = np.datetime64() + nat2 = np.datetime64() + assert nat1 is not nat2 + assert nat1 != nat2 + assert hash(nat1) != hash(nat2) + + @pytest.mark.parametrize('unit', ('Y', 'M', 'W', 'D', 'h', 'm', 's', 'ms', 'us')) + def test_datetime_hash_weeks(self, unit): + dt = np.datetime64(2348, 'W') # 2015-01-01 + dt2 = np.datetime64(dt, unit) + _assert_equal_hash(dt, dt2) + + dt3 = np.datetime64(int(dt2.astype(int)) + 1, unit) + assert hash(dt) != hash(dt3) # doesn't collide + + @pytest.mark.parametrize('unit', ('h', 'm', 's', 'ms', 'us')) + def test_datetime_hash_weeks_vs_pydatetime(self, unit): + dt = np.datetime64(2348, 'W') # 2015-01-01 + dt2 = np.datetime64(dt, unit) + pydt = dt2.astype(datetime.datetime) + assert isinstance(pydt, datetime.datetime) + _assert_equal_hash(pydt, dt2) + + @pytest.mark.parametrize('unit', ('Y', 'M', 'W', 'D', 'h', 'm', 's', 'ms', 'us')) + def test_datetime_hash_big_negative(self, unit): + dt = np.datetime64(-102894, 'W') # -002-01-01 + dt2 = np.datetime64(dt, unit) + _assert_equal_hash(dt, dt2) + + # can only go down to "fs" before integer overflow + @pytest.mark.parametrize('unit', ('m', 's', 'ms', 'us', 'ns', 'ps', 'fs')) + def test_datetime_hash_minutes(self, unit): + dt = np.datetime64(3, 'm') + dt2 = np.datetime64(dt, unit) + _assert_equal_hash(dt, dt2) + + @pytest.mark.parametrize('unit', ('ns', 'ps', 'fs', 'as')) + def test_datetime_hash_ns(self, unit): + dt = np.datetime64(3, 'ns') + dt2 = np.datetime64(dt, unit) + _assert_equal_hash(dt, dt2) + + dt3 = np.datetime64(int(dt2.astype(int)) + 1, unit) + assert hash(dt) != hash(dt3) # doesn't collide + + @pytest.mark.parametrize('wk', range(500000, 500010)) # 11552-09-04 + @pytest.mark.parametrize('unit', ('W', 'D', 'h', 'm', 's', 'ms', 'us')) + def test_datetime_hash_big_positive(self, wk, unit): + dt = np.datetime64(wk, 'W') + dt2 = np.datetime64(dt, unit) + _assert_equal_hash(dt, dt2) + + def test_timedelta_hash_generic(self): + assert_raises(ValueError, hash, np.timedelta64(123)) # generic + + @pytest.mark.parametrize('unit', ('Y', 'M')) + def test_timedelta_hash_year_month(self, unit): + td = np.timedelta64(45, 'Y') + td2 = np.timedelta64(td, unit) + _assert_equal_hash(td, td2) + + @pytest.mark.parametrize('unit', ('W', 'D', 'h', 'm', 's', 'ms', 'us')) + def test_timedelta_hash_weeks(self, unit): + td = np.timedelta64(10, 'W') + td2 = np.timedelta64(td, unit) + _assert_equal_hash(td, td2) + + td3 = np.timedelta64(int(td2.astype(int)) + 1, unit) + assert hash(td) != hash(td3) # doesn't collide + + @pytest.mark.parametrize('unit', ('W', 'D', 'h', 'm', 's', 'ms', 'us')) + def test_timedelta_hash_weeks_vs_pydelta(self, unit): + td = np.timedelta64(10, 'W') + td2 = np.timedelta64(td, unit) + pytd = td2.astype(datetime.timedelta) + assert isinstance(pytd, datetime.timedelta) + _assert_equal_hash(pytd, td2) + + @pytest.mark.parametrize('unit', ('ms', 'us', 'ns', 'ps', 'fs', 'as')) + def test_timedelta_hash_ms(self, unit): + td = np.timedelta64(3, 'ms') + td2 = np.timedelta64(td, unit) + _assert_equal_hash(td, td2) + + td3 = np.timedelta64(int(td2.astype(int)) + 1, unit) + assert hash(td) != hash(td3) # doesn't collide + + @pytest.mark.parametrize('wk', range(500000, 500010)) + @pytest.mark.parametrize('unit', ('W', 'D', 'h', 'm', 's', 'ms', 'us')) + def test_timedelta_hash_big_positive(self, wk, unit): + td = np.timedelta64(wk, 'W') + td2 = np.timedelta64(td, unit) + _assert_equal_hash(td, td2) + + +class TestDateTimeData: + + def test_basic(self): + a = np.array(['1980-03-23'], dtype=np.datetime64) + assert_equal(np.datetime_data(a.dtype), ('D', 1)) + + def test_bytes(self): + # byte units are converted to unicode + dt = np.datetime64('2000', (b'ms', 5)) + assert np.datetime_data(dt.dtype) == ('ms', 5) + + dt = np.datetime64('2000', b'5ms') + assert np.datetime_data(dt.dtype) == ('ms', 5) + + def test_non_ascii(self): + # μs is normalized to μ + dt = np.datetime64('2000', ('μs', 5)) + assert np.datetime_data(dt.dtype) == ('us', 5) + + dt = np.datetime64('2000', '5μs') + assert np.datetime_data(dt.dtype) == ('us', 5) + + +def test_comparisons_return_not_implemented(): + # GH#17017 + + class custom: + __array_priority__ = 10000 + + obj = custom() + + dt = np.datetime64('2000', 'ns') + td = dt - dt + + for item in [dt, td]: + assert item.__eq__(obj) is NotImplemented + assert item.__ne__(obj) is NotImplemented + assert item.__le__(obj) is NotImplemented + assert item.__lt__(obj) is NotImplemented + assert item.__ge__(obj) is NotImplemented + assert item.__gt__(obj) is NotImplemented diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_defchararray.py b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_defchararray.py new file mode 100644 index 0000000..2607953 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_defchararray.py @@ -0,0 +1,825 @@ +import pytest + +import numpy as np +from numpy._core.multiarray import _vec_string +from numpy.testing import ( + assert_, + assert_array_equal, + assert_equal, + assert_raises, + assert_raises_regex, +) + +kw_unicode_true = {'unicode': True} # make 2to3 work properly +kw_unicode_false = {'unicode': False} + +class TestBasic: + def test_from_object_array(self): + A = np.array([['abc', 2], + ['long ', '0123456789']], dtype='O') + B = np.char.array(A) + assert_equal(B.dtype.itemsize, 10) + assert_array_equal(B, [[b'abc', b'2'], + [b'long', b'0123456789']]) + + def test_from_object_array_unicode(self): + A = np.array([['abc', 'Sigma \u03a3'], + ['long ', '0123456789']], dtype='O') + assert_raises(ValueError, np.char.array, (A,)) + B = np.char.array(A, **kw_unicode_true) + assert_equal(B.dtype.itemsize, 10 * np.array('a', 'U').dtype.itemsize) + assert_array_equal(B, [['abc', 'Sigma \u03a3'], + ['long', '0123456789']]) + + def test_from_string_array(self): + A = np.array([[b'abc', b'foo'], + [b'long ', b'0123456789']]) + assert_equal(A.dtype.type, np.bytes_) + B = np.char.array(A) + assert_array_equal(B, A) + assert_equal(B.dtype, A.dtype) + assert_equal(B.shape, A.shape) + B[0, 0] = 'changed' + assert_(B[0, 0] != A[0, 0]) + C = np.char.asarray(A) + assert_array_equal(C, A) + assert_equal(C.dtype, A.dtype) + C[0, 0] = 'changed again' + assert_(C[0, 0] != B[0, 0]) + assert_(C[0, 0] == A[0, 0]) + + def test_from_unicode_array(self): + A = np.array([['abc', 'Sigma \u03a3'], + ['long ', '0123456789']]) + assert_equal(A.dtype.type, np.str_) + B = np.char.array(A) + assert_array_equal(B, A) + assert_equal(B.dtype, A.dtype) + assert_equal(B.shape, A.shape) + B = np.char.array(A, **kw_unicode_true) + assert_array_equal(B, A) + assert_equal(B.dtype, A.dtype) + assert_equal(B.shape, A.shape) + + def fail(): + np.char.array(A, **kw_unicode_false) + + assert_raises(UnicodeEncodeError, fail) + + def test_unicode_upconvert(self): + A = np.char.array(['abc']) + B = np.char.array(['\u03a3']) + assert_(issubclass((A + B).dtype.type, np.str_)) + + def test_from_string(self): + A = np.char.array(b'abc') + assert_equal(len(A), 1) + assert_equal(len(A[0]), 3) + assert_(issubclass(A.dtype.type, np.bytes_)) + + def test_from_unicode(self): + A = np.char.array('\u03a3') + assert_equal(len(A), 1) + assert_equal(len(A[0]), 1) + assert_equal(A.itemsize, 4) + assert_(issubclass(A.dtype.type, np.str_)) + +class TestVecString: + def test_non_existent_method(self): + + def fail(): + _vec_string('a', np.bytes_, 'bogus') + + assert_raises(AttributeError, fail) + + def test_non_string_array(self): + + def fail(): + _vec_string(1, np.bytes_, 'strip') + + assert_raises(TypeError, fail) + + def test_invalid_args_tuple(self): + + def fail(): + _vec_string(['a'], np.bytes_, 'strip', 1) + + assert_raises(TypeError, fail) + + def test_invalid_type_descr(self): + + def fail(): + _vec_string(['a'], 'BOGUS', 'strip') + + assert_raises(TypeError, fail) + + def test_invalid_function_args(self): + + def fail(): + _vec_string(['a'], np.bytes_, 'strip', (1,)) + + assert_raises(TypeError, fail) + + def test_invalid_result_type(self): + + def fail(): + _vec_string(['a'], np.int_, 'strip') + + assert_raises(TypeError, fail) + + def test_broadcast_error(self): + + def fail(): + _vec_string([['abc', 'def']], np.int_, 'find', (['a', 'd', 'j'],)) + + assert_raises(ValueError, fail) + + +class TestWhitespace: + def setup_method(self): + self.A = np.array([['abc ', '123 '], + ['789 ', 'xyz ']]).view(np.char.chararray) + self.B = np.array([['abc', '123'], + ['789', 'xyz']]).view(np.char.chararray) + + def test1(self): + assert_(np.all(self.A == self.B)) + assert_(np.all(self.A >= self.B)) + assert_(np.all(self.A <= self.B)) + assert_(not np.any(self.A > self.B)) + assert_(not np.any(self.A < self.B)) + assert_(not np.any(self.A != self.B)) + +class TestChar: + def setup_method(self): + self.A = np.array('abc1', dtype='c').view(np.char.chararray) + + def test_it(self): + assert_equal(self.A.shape, (4,)) + assert_equal(self.A.upper()[:2].tobytes(), b'AB') + +class TestComparisons: + def setup_method(self): + self.A = np.array([['abc', 'abcc', '123'], + ['789', 'abc', 'xyz']]).view(np.char.chararray) + self.B = np.array([['efg', 'efg', '123 '], + ['051', 'efgg', 'tuv']]).view(np.char.chararray) + + def test_not_equal(self): + assert_array_equal((self.A != self.B), + [[True, True, False], [True, True, True]]) + + def test_equal(self): + assert_array_equal((self.A == self.B), + [[False, False, True], [False, False, False]]) + + def test_greater_equal(self): + assert_array_equal((self.A >= self.B), + [[False, False, True], [True, False, True]]) + + def test_less_equal(self): + assert_array_equal((self.A <= self.B), + [[True, True, True], [False, True, False]]) + + def test_greater(self): + assert_array_equal((self.A > self.B), + [[False, False, False], [True, False, True]]) + + def test_less(self): + assert_array_equal((self.A < self.B), + [[True, True, False], [False, True, False]]) + + def test_type(self): + out1 = np.char.equal(self.A, self.B) + out2 = np.char.equal('a', 'a') + assert_(isinstance(out1, np.ndarray)) + assert_(isinstance(out2, np.ndarray)) + +class TestComparisonsMixed1(TestComparisons): + """Ticket #1276""" + + def setup_method(self): + TestComparisons.setup_method(self) + self.B = np.array( + [['efg', 'efg', '123 '], + ['051', 'efgg', 'tuv']], np.str_).view(np.char.chararray) + +class TestComparisonsMixed2(TestComparisons): + """Ticket #1276""" + + def setup_method(self): + TestComparisons.setup_method(self) + self.A = np.array( + [['abc', 'abcc', '123'], + ['789', 'abc', 'xyz']], np.str_).view(np.char.chararray) + +class TestInformation: + def setup_method(self): + self.A = np.array([[' abc ', ''], + ['12345', 'MixedCase'], + ['123 \t 345 \0 ', 'UPPER']]) \ + .view(np.char.chararray) + self.B = np.array([[' \u03a3 ', ''], + ['12345', 'MixedCase'], + ['123 \t 345 \0 ', 'UPPER']]) \ + .view(np.char.chararray) + # Array with longer strings, > MEMCHR_CUT_OFF in code. + self.C = (np.array(['ABCDEFGHIJKLMNOPQRSTUVWXYZ', + '01234567890123456789012345']) + .view(np.char.chararray)) + + def test_len(self): + assert_(issubclass(np.char.str_len(self.A).dtype.type, np.integer)) + assert_array_equal(np.char.str_len(self.A), [[5, 0], [5, 9], [12, 5]]) + assert_array_equal(np.char.str_len(self.B), [[3, 0], [5, 9], [12, 5]]) + + def test_count(self): + assert_(issubclass(self.A.count('').dtype.type, np.integer)) + assert_array_equal(self.A.count('a'), [[1, 0], [0, 1], [0, 0]]) + assert_array_equal(self.A.count('123'), [[0, 0], [1, 0], [1, 0]]) + # Python doesn't seem to like counting NULL characters + # assert_array_equal(self.A.count('\0'), [[0, 0], [0, 0], [1, 0]]) + assert_array_equal(self.A.count('a', 0, 2), [[1, 0], [0, 0], [0, 0]]) + assert_array_equal(self.B.count('a'), [[0, 0], [0, 1], [0, 0]]) + assert_array_equal(self.B.count('123'), [[0, 0], [1, 0], [1, 0]]) + # assert_array_equal(self.B.count('\0'), [[0, 0], [0, 0], [1, 0]]) + + def test_endswith(self): + assert_(issubclass(self.A.endswith('').dtype.type, np.bool)) + assert_array_equal(self.A.endswith(' '), [[1, 0], [0, 0], [1, 0]]) + assert_array_equal(self.A.endswith('3', 0, 3), [[0, 0], [1, 0], [1, 0]]) + + def fail(): + self.A.endswith('3', 'fdjk') + + assert_raises(TypeError, fail) + + @pytest.mark.parametrize( + "dtype, encode", + [("U", str), + ("S", lambda x: x.encode('ascii')), + ]) + def test_find(self, dtype, encode): + A = self.A.astype(dtype) + assert_(issubclass(A.find(encode('a')).dtype.type, np.integer)) + assert_array_equal(A.find(encode('a')), + [[1, -1], [-1, 6], [-1, -1]]) + assert_array_equal(A.find(encode('3')), + [[-1, -1], [2, -1], [2, -1]]) + assert_array_equal(A.find(encode('a'), 0, 2), + [[1, -1], [-1, -1], [-1, -1]]) + assert_array_equal(A.find([encode('1'), encode('P')]), + [[-1, -1], [0, -1], [0, 1]]) + C = self.C.astype(dtype) + assert_array_equal(C.find(encode('M')), [12, -1]) + + def test_index(self): + + def fail(): + self.A.index('a') + + assert_raises(ValueError, fail) + assert_(np.char.index('abcba', 'b') == 1) + assert_(issubclass(np.char.index('abcba', 'b').dtype.type, np.integer)) + + def test_isalnum(self): + assert_(issubclass(self.A.isalnum().dtype.type, np.bool)) + assert_array_equal(self.A.isalnum(), [[False, False], [True, True], [False, True]]) + + def test_isalpha(self): + assert_(issubclass(self.A.isalpha().dtype.type, np.bool)) + assert_array_equal(self.A.isalpha(), [[False, False], [False, True], [False, True]]) + + def test_isdigit(self): + assert_(issubclass(self.A.isdigit().dtype.type, np.bool)) + assert_array_equal(self.A.isdigit(), [[False, False], [True, False], [False, False]]) + + def test_islower(self): + assert_(issubclass(self.A.islower().dtype.type, np.bool)) + assert_array_equal(self.A.islower(), [[True, False], [False, False], [False, False]]) + + def test_isspace(self): + assert_(issubclass(self.A.isspace().dtype.type, np.bool)) + assert_array_equal(self.A.isspace(), [[False, False], [False, False], [False, False]]) + + def test_istitle(self): + assert_(issubclass(self.A.istitle().dtype.type, np.bool)) + assert_array_equal(self.A.istitle(), [[False, False], [False, False], [False, False]]) + + def test_isupper(self): + assert_(issubclass(self.A.isupper().dtype.type, np.bool)) + assert_array_equal(self.A.isupper(), [[False, False], [False, False], [False, True]]) + + def test_rfind(self): + assert_(issubclass(self.A.rfind('a').dtype.type, np.integer)) + assert_array_equal(self.A.rfind('a'), [[1, -1], [-1, 6], [-1, -1]]) + assert_array_equal(self.A.rfind('3'), [[-1, -1], [2, -1], [6, -1]]) + assert_array_equal(self.A.rfind('a', 0, 2), [[1, -1], [-1, -1], [-1, -1]]) + assert_array_equal(self.A.rfind(['1', 'P']), [[-1, -1], [0, -1], [0, 2]]) + + def test_rindex(self): + + def fail(): + self.A.rindex('a') + + assert_raises(ValueError, fail) + assert_(np.char.rindex('abcba', 'b') == 3) + assert_(issubclass(np.char.rindex('abcba', 'b').dtype.type, np.integer)) + + def test_startswith(self): + assert_(issubclass(self.A.startswith('').dtype.type, np.bool)) + assert_array_equal(self.A.startswith(' '), [[1, 0], [0, 0], [0, 0]]) + assert_array_equal(self.A.startswith('1', 0, 3), [[0, 0], [1, 0], [1, 0]]) + + def fail(): + self.A.startswith('3', 'fdjk') + + assert_raises(TypeError, fail) + + +class TestMethods: + def setup_method(self): + self.A = np.array([[' abc ', ''], + ['12345', 'MixedCase'], + ['123 \t 345 \0 ', 'UPPER']], + dtype='S').view(np.char.chararray) + self.B = np.array([[' \u03a3 ', ''], + ['12345', 'MixedCase'], + ['123 \t 345 \0 ', 'UPPER']]).view( + np.char.chararray) + + def test_capitalize(self): + tgt = [[b' abc ', b''], + [b'12345', b'Mixedcase'], + [b'123 \t 345 \0 ', b'Upper']] + assert_(issubclass(self.A.capitalize().dtype.type, np.bytes_)) + assert_array_equal(self.A.capitalize(), tgt) + + tgt = [[' \u03c3 ', ''], + ['12345', 'Mixedcase'], + ['123 \t 345 \0 ', 'Upper']] + assert_(issubclass(self.B.capitalize().dtype.type, np.str_)) + assert_array_equal(self.B.capitalize(), tgt) + + def test_center(self): + assert_(issubclass(self.A.center(10).dtype.type, np.bytes_)) + C = self.A.center([10, 20]) + assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]]) + + C = self.A.center(20, b'#') + assert_(np.all(C.startswith(b'#'))) + assert_(np.all(C.endswith(b'#'))) + + C = np.char.center(b'FOO', [[10, 20], [15, 8]]) + tgt = [[b' FOO ', b' FOO '], + [b' FOO ', b' FOO ']] + assert_(issubclass(C.dtype.type, np.bytes_)) + assert_array_equal(C, tgt) + + def test_decode(self): + A = np.char.array([b'\\u03a3']) + assert_(A.decode('unicode-escape')[0] == '\u03a3') + + def test_encode(self): + B = self.B.encode('unicode_escape') + assert_(B[0][0] == ' \\u03a3 '.encode('latin1')) + + def test_expandtabs(self): + T = self.A.expandtabs() + assert_(T[2, 0] == b'123 345 \0') + + def test_join(self): + # NOTE: list(b'123') == [49, 50, 51] + # so that b','.join(b'123') results to an error on Py3 + A0 = self.A.decode('ascii') + + A = np.char.join([',', '#'], A0) + assert_(issubclass(A.dtype.type, np.str_)) + tgt = np.array([[' ,a,b,c, ', ''], + ['1,2,3,4,5', 'M#i#x#e#d#C#a#s#e'], + ['1,2,3, ,\t, ,3,4,5, ,\x00, ', 'U#P#P#E#R']]) + assert_array_equal(np.char.join([',', '#'], A0), tgt) + + def test_ljust(self): + assert_(issubclass(self.A.ljust(10).dtype.type, np.bytes_)) + + C = self.A.ljust([10, 20]) + assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]]) + + C = self.A.ljust(20, b'#') + assert_array_equal(C.startswith(b'#'), [ + [False, True], [False, False], [False, False]]) + assert_(np.all(C.endswith(b'#'))) + + C = np.char.ljust(b'FOO', [[10, 20], [15, 8]]) + tgt = [[b'FOO ', b'FOO '], + [b'FOO ', b'FOO ']] + assert_(issubclass(C.dtype.type, np.bytes_)) + assert_array_equal(C, tgt) + + def test_lower(self): + tgt = [[b' abc ', b''], + [b'12345', b'mixedcase'], + [b'123 \t 345 \0 ', b'upper']] + assert_(issubclass(self.A.lower().dtype.type, np.bytes_)) + assert_array_equal(self.A.lower(), tgt) + + tgt = [[' \u03c3 ', ''], + ['12345', 'mixedcase'], + ['123 \t 345 \0 ', 'upper']] + assert_(issubclass(self.B.lower().dtype.type, np.str_)) + assert_array_equal(self.B.lower(), tgt) + + def test_lstrip(self): + tgt = [[b'abc ', b''], + [b'12345', b'MixedCase'], + [b'123 \t 345 \0 ', b'UPPER']] + assert_(issubclass(self.A.lstrip().dtype.type, np.bytes_)) + assert_array_equal(self.A.lstrip(), tgt) + + tgt = [[b' abc', b''], + [b'2345', b'ixedCase'], + [b'23 \t 345 \x00', b'UPPER']] + assert_array_equal(self.A.lstrip([b'1', b'M']), tgt) + + tgt = [['\u03a3 ', ''], + ['12345', 'MixedCase'], + ['123 \t 345 \0 ', 'UPPER']] + assert_(issubclass(self.B.lstrip().dtype.type, np.str_)) + assert_array_equal(self.B.lstrip(), tgt) + + def test_partition(self): + P = self.A.partition([b'3', b'M']) + tgt = [[(b' abc ', b'', b''), (b'', b'', b'')], + [(b'12', b'3', b'45'), (b'', b'M', b'ixedCase')], + [(b'12', b'3', b' \t 345 \0 '), (b'UPPER', b'', b'')]] + assert_(issubclass(P.dtype.type, np.bytes_)) + assert_array_equal(P, tgt) + + def test_replace(self): + R = self.A.replace([b'3', b'a'], + [b'##########', b'@']) + tgt = [[b' abc ', b''], + [b'12##########45', b'MixedC@se'], + [b'12########## \t ##########45 \x00 ', b'UPPER']] + assert_(issubclass(R.dtype.type, np.bytes_)) + assert_array_equal(R, tgt) + # Test special cases that should just return the input array, + # since replacements are not possible or do nothing. + S1 = self.A.replace(b'A very long byte string, longer than A', b'') + assert_array_equal(S1, self.A) + S2 = self.A.replace(b'', b'') + assert_array_equal(S2, self.A) + S3 = self.A.replace(b'3', b'3') + assert_array_equal(S3, self.A) + S4 = self.A.replace(b'3', b'', count=0) + assert_array_equal(S4, self.A) + + def test_replace_count_and_size(self): + a = np.array(['0123456789' * i for i in range(4)] + ).view(np.char.chararray) + r1 = a.replace('5', 'ABCDE') + assert r1.dtype.itemsize == (3 * 10 + 3 * 4) * 4 + assert_array_equal(r1, np.array(['01234ABCDE6789' * i + for i in range(4)])) + r2 = a.replace('5', 'ABCDE', count=1) + assert r2.dtype.itemsize == (3 * 10 + 4) * 4 + r3 = a.replace('5', 'ABCDE', count=0) + assert r3.dtype.itemsize == a.dtype.itemsize + assert_array_equal(r3, a) + # Negative values mean to replace all. + r4 = a.replace('5', 'ABCDE', count=-1) + assert r4.dtype.itemsize == (3 * 10 + 3 * 4) * 4 + assert_array_equal(r4, r1) + # We can do count on an element-by-element basis. + r5 = a.replace('5', 'ABCDE', count=[-1, -1, -1, 1]) + assert r5.dtype.itemsize == (3 * 10 + 4) * 4 + assert_array_equal(r5, np.array( + ['01234ABCDE6789' * i for i in range(3)] + + ['01234ABCDE6789' + '0123456789' * 2])) + + def test_replace_broadcasting(self): + a = np.array('0,0,0').view(np.char.chararray) + r1 = a.replace('0', '1', count=np.arange(3)) + assert r1.dtype == a.dtype + assert_array_equal(r1, np.array(['0,0,0', '1,0,0', '1,1,0'])) + r2 = a.replace('0', [['1'], ['2']], count=np.arange(1, 4)) + assert_array_equal(r2, np.array([['1,0,0', '1,1,0', '1,1,1'], + ['2,0,0', '2,2,0', '2,2,2']])) + r3 = a.replace(['0', '0,0', '0,0,0'], 'X') + assert_array_equal(r3, np.array(['X,X,X', 'X,0', 'X'])) + + def test_rjust(self): + assert_(issubclass(self.A.rjust(10).dtype.type, np.bytes_)) + + C = self.A.rjust([10, 20]) + assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]]) + + C = self.A.rjust(20, b'#') + assert_(np.all(C.startswith(b'#'))) + assert_array_equal(C.endswith(b'#'), + [[False, True], [False, False], [False, False]]) + + C = np.char.rjust(b'FOO', [[10, 20], [15, 8]]) + tgt = [[b' FOO', b' FOO'], + [b' FOO', b' FOO']] + assert_(issubclass(C.dtype.type, np.bytes_)) + assert_array_equal(C, tgt) + + def test_rpartition(self): + P = self.A.rpartition([b'3', b'M']) + tgt = [[(b'', b'', b' abc '), (b'', b'', b'')], + [(b'12', b'3', b'45'), (b'', b'M', b'ixedCase')], + [(b'123 \t ', b'3', b'45 \0 '), (b'', b'', b'UPPER')]] + assert_(issubclass(P.dtype.type, np.bytes_)) + assert_array_equal(P, tgt) + + def test_rsplit(self): + A = self.A.rsplit(b'3') + tgt = [[[b' abc '], [b'']], + [[b'12', b'45'], [b'MixedCase']], + [[b'12', b' \t ', b'45 \x00 '], [b'UPPER']]] + assert_(issubclass(A.dtype.type, np.object_)) + assert_equal(A.tolist(), tgt) + + def test_rstrip(self): + assert_(issubclass(self.A.rstrip().dtype.type, np.bytes_)) + + tgt = [[b' abc', b''], + [b'12345', b'MixedCase'], + [b'123 \t 345', b'UPPER']] + assert_array_equal(self.A.rstrip(), tgt) + + tgt = [[b' abc ', b''], + [b'1234', b'MixedCase'], + [b'123 \t 345 \x00', b'UPP'] + ] + assert_array_equal(self.A.rstrip([b'5', b'ER']), tgt) + + tgt = [[' \u03a3', ''], + ['12345', 'MixedCase'], + ['123 \t 345', 'UPPER']] + assert_(issubclass(self.B.rstrip().dtype.type, np.str_)) + assert_array_equal(self.B.rstrip(), tgt) + + def test_strip(self): + tgt = [[b'abc', b''], + [b'12345', b'MixedCase'], + [b'123 \t 345', b'UPPER']] + assert_(issubclass(self.A.strip().dtype.type, np.bytes_)) + assert_array_equal(self.A.strip(), tgt) + + tgt = [[b' abc ', b''], + [b'234', b'ixedCas'], + [b'23 \t 345 \x00', b'UPP']] + assert_array_equal(self.A.strip([b'15', b'EReM']), tgt) + + tgt = [['\u03a3', ''], + ['12345', 'MixedCase'], + ['123 \t 345', 'UPPER']] + assert_(issubclass(self.B.strip().dtype.type, np.str_)) + assert_array_equal(self.B.strip(), tgt) + + def test_split(self): + A = self.A.split(b'3') + tgt = [ + [[b' abc '], [b'']], + [[b'12', b'45'], [b'MixedCase']], + [[b'12', b' \t ', b'45 \x00 '], [b'UPPER']]] + assert_(issubclass(A.dtype.type, np.object_)) + assert_equal(A.tolist(), tgt) + + def test_splitlines(self): + A = np.char.array(['abc\nfds\nwer']).splitlines() + assert_(issubclass(A.dtype.type, np.object_)) + assert_(A.shape == (1,)) + assert_(len(A[0]) == 3) + + def test_swapcase(self): + tgt = [[b' ABC ', b''], + [b'12345', b'mIXEDcASE'], + [b'123 \t 345 \0 ', b'upper']] + assert_(issubclass(self.A.swapcase().dtype.type, np.bytes_)) + assert_array_equal(self.A.swapcase(), tgt) + + tgt = [[' \u03c3 ', ''], + ['12345', 'mIXEDcASE'], + ['123 \t 345 \0 ', 'upper']] + assert_(issubclass(self.B.swapcase().dtype.type, np.str_)) + assert_array_equal(self.B.swapcase(), tgt) + + def test_title(self): + tgt = [[b' Abc ', b''], + [b'12345', b'Mixedcase'], + [b'123 \t 345 \0 ', b'Upper']] + assert_(issubclass(self.A.title().dtype.type, np.bytes_)) + assert_array_equal(self.A.title(), tgt) + + tgt = [[' \u03a3 ', ''], + ['12345', 'Mixedcase'], + ['123 \t 345 \0 ', 'Upper']] + assert_(issubclass(self.B.title().dtype.type, np.str_)) + assert_array_equal(self.B.title(), tgt) + + def test_upper(self): + tgt = [[b' ABC ', b''], + [b'12345', b'MIXEDCASE'], + [b'123 \t 345 \0 ', b'UPPER']] + assert_(issubclass(self.A.upper().dtype.type, np.bytes_)) + assert_array_equal(self.A.upper(), tgt) + + tgt = [[' \u03a3 ', ''], + ['12345', 'MIXEDCASE'], + ['123 \t 345 \0 ', 'UPPER']] + assert_(issubclass(self.B.upper().dtype.type, np.str_)) + assert_array_equal(self.B.upper(), tgt) + + def test_isnumeric(self): + + def fail(): + self.A.isnumeric() + + assert_raises(TypeError, fail) + assert_(issubclass(self.B.isnumeric().dtype.type, np.bool)) + assert_array_equal(self.B.isnumeric(), [ + [False, False], [True, False], [False, False]]) + + def test_isdecimal(self): + + def fail(): + self.A.isdecimal() + + assert_raises(TypeError, fail) + assert_(issubclass(self.B.isdecimal().dtype.type, np.bool)) + assert_array_equal(self.B.isdecimal(), [ + [False, False], [True, False], [False, False]]) + + +class TestOperations: + def setup_method(self): + self.A = np.array([['abc', '123'], + ['789', 'xyz']]).view(np.char.chararray) + self.B = np.array([['efg', '456'], + ['051', 'tuv']]).view(np.char.chararray) + + def test_add(self): + AB = np.array([['abcefg', '123456'], + ['789051', 'xyztuv']]).view(np.char.chararray) + assert_array_equal(AB, (self.A + self.B)) + assert_(len((self.A + self.B)[0][0]) == 6) + + def test_radd(self): + QA = np.array([['qabc', 'q123'], + ['q789', 'qxyz']]).view(np.char.chararray) + assert_array_equal(QA, ('q' + self.A)) + + def test_mul(self): + A = self.A + for r in (2, 3, 5, 7, 197): + Ar = np.array([[A[0, 0] * r, A[0, 1] * r], + [A[1, 0] * r, A[1, 1] * r]]).view(np.char.chararray) + + assert_array_equal(Ar, (self.A * r)) + + for ob in [object(), 'qrs']: + with assert_raises_regex(ValueError, + 'Can only multiply by integers'): + A * ob + + def test_rmul(self): + A = self.A + for r in (2, 3, 5, 7, 197): + Ar = np.array([[A[0, 0] * r, A[0, 1] * r], + [A[1, 0] * r, A[1, 1] * r]]).view(np.char.chararray) + assert_array_equal(Ar, (r * self.A)) + + for ob in [object(), 'qrs']: + with assert_raises_regex(ValueError, + 'Can only multiply by integers'): + ob * A + + def test_mod(self): + """Ticket #856""" + F = np.array([['%d', '%f'], ['%s', '%r']]).view(np.char.chararray) + C = np.array([[3, 7], [19, 1]], dtype=np.int64) + FC = np.array([['3', '7.000000'], + ['19', 'np.int64(1)']]).view(np.char.chararray) + assert_array_equal(FC, F % C) + + A = np.array([['%.3f', '%d'], ['%s', '%r']]).view(np.char.chararray) + A1 = np.array([['1.000', '1'], + ['1', repr(np.array(1)[()])]]).view(np.char.chararray) + assert_array_equal(A1, (A % 1)) + + A2 = np.array([['1.000', '2'], + ['3', repr(np.array(4)[()])]]).view(np.char.chararray) + assert_array_equal(A2, (A % [[1, 2], [3, 4]])) + + def test_rmod(self): + assert_(f"{self.A}" == str(self.A)) + assert_(f"{self.A!r}" == repr(self.A)) + + for ob in [42, object()]: + with assert_raises_regex( + TypeError, "unsupported operand type.* and 'chararray'"): + ob % self.A + + def test_slice(self): + """Regression test for https://github.com/numpy/numpy/issues/5982""" + + arr = np.array([['abc ', 'def '], ['geh ', 'ijk ']], + dtype='S4').view(np.char.chararray) + sl1 = arr[:] + assert_array_equal(sl1, arr) + assert_(sl1.base is arr) + assert_(sl1.base.base is arr.base) + + sl2 = arr[:, :] + assert_array_equal(sl2, arr) + assert_(sl2.base is arr) + assert_(sl2.base.base is arr.base) + + assert_(arr[0, 0] == b'abc') + + @pytest.mark.parametrize('data', [['plate', ' ', 'shrimp'], + [b'retro', b' ', b'encabulator']]) + def test_getitem_length_zero_item(self, data): + # Regression test for gh-26375. + a = np.char.array(data) + # a.dtype.type() will be an empty string or bytes instance. + # The equality test will fail if a[1] has the wrong type + # or does not have length 0. + assert_equal(a[1], a.dtype.type()) + + +class TestMethodsEmptyArray: + def setup_method(self): + self.U = np.array([], dtype='U') + self.S = np.array([], dtype='S') + + def test_encode(self): + res = np.char.encode(self.U) + assert_array_equal(res, []) + assert_(res.dtype.char == 'S') + + def test_decode(self): + res = np.char.decode(self.S) + assert_array_equal(res, []) + assert_(res.dtype.char == 'U') + + def test_decode_with_reshape(self): + res = np.char.decode(self.S.reshape((1, 0, 1))) + assert_(res.shape == (1, 0, 1)) + + +class TestMethodsScalarValues: + def test_mod(self): + A = np.array([[' abc ', ''], + ['12345', 'MixedCase'], + ['123 \t 345 \0 ', 'UPPER']], dtype='S') + tgt = [[b'123 abc ', b'123'], + [b'12312345', b'123MixedCase'], + [b'123123 \t 345 \0 ', b'123UPPER']] + assert_array_equal(np.char.mod(b"123%s", A), tgt) + + def test_decode(self): + bytestring = b'\x81\xc1\x81\xc1\x81\xc1' + assert_equal(np.char.decode(bytestring, encoding='cp037'), + 'aAaAaA') + + def test_encode(self): + unicode = 'aAaAaA' + assert_equal(np.char.encode(unicode, encoding='cp037'), + b'\x81\xc1\x81\xc1\x81\xc1') + + def test_expandtabs(self): + s = "\tone level of indentation\n\t\ttwo levels of indentation" + assert_equal( + np.char.expandtabs(s, tabsize=2), + " one level of indentation\n two levels of indentation" + ) + + def test_join(self): + seps = np.array(['-', '_']) + assert_array_equal(np.char.join(seps, 'hello'), + ['h-e-l-l-o', 'h_e_l_l_o']) + + def test_partition(self): + assert_equal(np.char.partition('This string', ' '), + ['This', ' ', 'string']) + + def test_rpartition(self): + assert_equal(np.char.rpartition('This string here', ' '), + ['This string', ' ', 'here']) + + def test_replace(self): + assert_equal(np.char.replace('Python is good', 'good', 'great'), + 'Python is great') + + +def test_empty_indexing(): + """Regression test for ticket 1948.""" + # Check that indexing a chararray with an empty list/array returns an + # empty chararray instead of a chararray with a single empty string in it. + s = np.char.chararray((4,)) + assert_(s[[]].size == 0) diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_deprecations.py b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_deprecations.py new file mode 100644 index 0000000..d90c155 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_deprecations.py @@ -0,0 +1,454 @@ +""" +Tests related to deprecation warnings. Also a convenient place +to document how deprecations should eventually be turned into errors. + +""" +import contextlib +import warnings + +import numpy._core._struct_ufunc_tests as struct_ufunc +import pytest +from numpy._core._multiarray_tests import fromstring_null_term_c_api # noqa: F401 + +import numpy as np +from numpy.testing import assert_raises, temppath + +try: + import pytz # noqa: F401 + _has_pytz = True +except ImportError: + _has_pytz = False + + +class _DeprecationTestCase: + # Just as warning: warnings uses re.match, so the start of this message + # must match. + message = '' + warning_cls = DeprecationWarning + + def setup_method(self): + self.warn_ctx = warnings.catch_warnings(record=True) + self.log = self.warn_ctx.__enter__() + + # Do *not* ignore other DeprecationWarnings. Ignoring warnings + # can give very confusing results because of + # https://bugs.python.org/issue4180 and it is probably simplest to + # try to keep the tests cleanly giving only the right warning type. + # (While checking them set to "error" those are ignored anyway) + # We still have them show up, because otherwise they would be raised + warnings.filterwarnings("always", category=self.warning_cls) + warnings.filterwarnings("always", message=self.message, + category=self.warning_cls) + + def teardown_method(self): + self.warn_ctx.__exit__() + + def assert_deprecated(self, function, num=1, ignore_others=False, + function_fails=False, + exceptions=np._NoValue, + args=(), kwargs={}): + """Test if DeprecationWarnings are given and raised. + + This first checks if the function when called gives `num` + DeprecationWarnings, after that it tries to raise these + DeprecationWarnings and compares them with `exceptions`. + The exceptions can be different for cases where this code path + is simply not anticipated and the exception is replaced. + + Parameters + ---------- + function : callable + The function to test + num : int + Number of DeprecationWarnings to expect. This should normally be 1. + ignore_others : bool + Whether warnings of the wrong type should be ignored (note that + the message is not checked) + function_fails : bool + If the function would normally fail, setting this will check for + warnings inside a try/except block. + exceptions : Exception or tuple of Exceptions + Exception to expect when turning the warnings into an error. + The default checks for DeprecationWarnings. If exceptions is + empty the function is expected to run successfully. + args : tuple + Arguments for `function` + kwargs : dict + Keyword arguments for `function` + """ + __tracebackhide__ = True # Hide traceback for py.test + + # reset the log + self.log[:] = [] + + if exceptions is np._NoValue: + exceptions = (self.warning_cls,) + + if function_fails: + context_manager = contextlib.suppress(Exception) + else: + context_manager = contextlib.nullcontext() + with context_manager: + function(*args, **kwargs) + + # just in case, clear the registry + num_found = 0 + for warning in self.log: + if warning.category is self.warning_cls: + num_found += 1 + elif not ignore_others: + raise AssertionError( + "expected %s but got: %s" % + (self.warning_cls.__name__, warning.category)) + if num is not None and num_found != num: + msg = f"{len(self.log)} warnings found but {num} expected." + lst = [str(w) for w in self.log] + raise AssertionError("\n".join([msg] + lst)) + + with warnings.catch_warnings(): + warnings.filterwarnings("error", message=self.message, + category=self.warning_cls) + try: + function(*args, **kwargs) + if exceptions != (): + raise AssertionError( + "No error raised during function call") + except exceptions: + if exceptions == (): + raise AssertionError( + "Error raised during function call") + + def assert_not_deprecated(self, function, args=(), kwargs={}): + """Test that warnings are not raised. + + This is just a shorthand for: + + self.assert_deprecated(function, num=0, ignore_others=True, + exceptions=tuple(), args=args, kwargs=kwargs) + """ + self.assert_deprecated(function, num=0, ignore_others=True, + exceptions=(), args=args, kwargs=kwargs) + + +class _VisibleDeprecationTestCase(_DeprecationTestCase): + warning_cls = np.exceptions.VisibleDeprecationWarning + + +class TestTestDeprecated: + def test_assert_deprecated(self): + test_case_instance = _DeprecationTestCase() + test_case_instance.setup_method() + assert_raises(AssertionError, + test_case_instance.assert_deprecated, + lambda: None) + + def foo(): + warnings.warn("foo", category=DeprecationWarning, stacklevel=2) + + test_case_instance.assert_deprecated(foo) + test_case_instance.teardown_method() + + +class TestBincount(_DeprecationTestCase): + # 2024-07-29, 2.1.0 + @pytest.mark.parametrize('badlist', [[0.5, 1.2, 1.5], + ['0', '1', '1']]) + def test_bincount_bad_list(self, badlist): + self.assert_deprecated(lambda: np.bincount(badlist)) + + +class TestGeneratorSum(_DeprecationTestCase): + # 2018-02-25, 1.15.0 + def test_generator_sum(self): + self.assert_deprecated(np.sum, args=((i for i in range(5)),)) + + +class BuiltInRoundComplexDType(_DeprecationTestCase): + # 2020-03-31 1.19.0 + deprecated_types = [np.csingle, np.cdouble, np.clongdouble] + not_deprecated_types = [ + np.int8, np.int16, np.int32, np.int64, + np.uint8, np.uint16, np.uint32, np.uint64, + np.float16, np.float32, np.float64, + ] + + def test_deprecated(self): + for scalar_type in self.deprecated_types: + scalar = scalar_type(0) + self.assert_deprecated(round, args=(scalar,)) + self.assert_deprecated(round, args=(scalar, 0)) + self.assert_deprecated(round, args=(scalar,), kwargs={'ndigits': 0}) + + def test_not_deprecated(self): + for scalar_type in self.not_deprecated_types: + scalar = scalar_type(0) + self.assert_not_deprecated(round, args=(scalar,)) + self.assert_not_deprecated(round, args=(scalar, 0)) + self.assert_not_deprecated(round, args=(scalar,), kwargs={'ndigits': 0}) + + +class FlatteningConcatenateUnsafeCast(_DeprecationTestCase): + # NumPy 1.20, 2020-09-03 + message = "concatenate with `axis=None` will use same-kind casting" + + def test_deprecated(self): + self.assert_deprecated(np.concatenate, + args=(([0.], [1.]),), + kwargs={'axis': None, 'out': np.empty(2, dtype=np.int64)}) + + def test_not_deprecated(self): + self.assert_not_deprecated(np.concatenate, + args=(([0.], [1.]),), + kwargs={'axis': None, 'out': np.empty(2, dtype=np.int64), + 'casting': "unsafe"}) + + with assert_raises(TypeError): + # Tests should notice if the deprecation warning is given first... + np.concatenate(([0.], [1.]), out=np.empty(2, dtype=np.int64), + casting="same_kind") + + +class TestCtypesGetter(_DeprecationTestCase): + # Deprecated 2021-05-18, Numpy 1.21.0 + warning_cls = DeprecationWarning + ctypes = np.array([1]).ctypes + + @pytest.mark.parametrize( + "name", ["get_data", "get_shape", "get_strides", "get_as_parameter"] + ) + def test_deprecated(self, name: str) -> None: + func = getattr(self.ctypes, name) + self.assert_deprecated(func) + + @pytest.mark.parametrize( + "name", ["data", "shape", "strides", "_as_parameter_"] + ) + def test_not_deprecated(self, name: str) -> None: + self.assert_not_deprecated(lambda: getattr(self.ctypes, name)) + + +class TestMachAr(_DeprecationTestCase): + # Deprecated 2022-11-22, NumPy 1.25 + warning_cls = DeprecationWarning + + def test_deprecated_module(self): + self.assert_deprecated(lambda: np._core.MachAr) + + +class TestQuantileInterpolationDeprecation(_DeprecationTestCase): + # Deprecated 2021-11-08, NumPy 1.22 + @pytest.mark.parametrize("func", + [np.percentile, np.quantile, np.nanpercentile, np.nanquantile]) + def test_deprecated(self, func): + self.assert_deprecated( + lambda: func([0., 1.], 0., interpolation="linear")) + self.assert_deprecated( + lambda: func([0., 1.], 0., interpolation="nearest")) + + @pytest.mark.parametrize("func", + [np.percentile, np.quantile, np.nanpercentile, np.nanquantile]) + def test_both_passed(self, func): + with warnings.catch_warnings(): + # catch the DeprecationWarning so that it does not raise: + warnings.simplefilter("always", DeprecationWarning) + with pytest.raises(TypeError): + func([0., 1.], 0., interpolation="nearest", method="nearest") + + +class TestScalarConversion(_DeprecationTestCase): + # 2023-01-02, 1.25.0 + def test_float_conversion(self): + self.assert_deprecated(float, args=(np.array([3.14]),)) + + def test_behaviour(self): + b = np.array([[3.14]]) + c = np.zeros(5) + with pytest.warns(DeprecationWarning): + c[0] = b + + +class TestPyIntConversion(_DeprecationTestCase): + message = r".*stop allowing conversion of out-of-bound.*" + + @pytest.mark.parametrize("dtype", np.typecodes["AllInteger"]) + def test_deprecated_scalar(self, dtype): + dtype = np.dtype(dtype) + info = np.iinfo(dtype) + + # Cover the most common creation paths (all end up in the + # same place): + def scalar(value, dtype): + dtype.type(value) + + def assign(value, dtype): + arr = np.array([0, 0, 0], dtype=dtype) + arr[2] = value + + def create(value, dtype): + np.array([value], dtype=dtype) + + for creation_func in [scalar, assign, create]: + try: + self.assert_deprecated( + lambda: creation_func(info.min - 1, dtype)) + except OverflowError: + pass # OverflowErrors always happened also before and are OK. + + try: + self.assert_deprecated( + lambda: creation_func(info.max + 1, dtype)) + except OverflowError: + pass # OverflowErrors always happened also before and are OK. + + +@pytest.mark.parametrize("name", ["str", "bytes", "object"]) +def test_future_scalar_attributes(name): + # FutureWarning added 2022-11-17, NumPy 1.24, + assert name not in dir(np) # we may want to not add them + with pytest.warns(FutureWarning, + match=f"In the future .*{name}"): + assert not hasattr(np, name) + + # Unfortunately, they are currently still valid via `np.dtype()` + np.dtype(name) + name in np._core.sctypeDict + + +# Ignore the above future attribute warning for this test. +@pytest.mark.filterwarnings("ignore:In the future:FutureWarning") +class TestRemovedGlobals: + # Removed 2023-01-12, NumPy 1.24.0 + # Not a deprecation, but the large error was added to aid those who missed + # the previous deprecation, and should be removed similarly to one + # (or faster). + @pytest.mark.parametrize("name", + ["object", "float", "complex", "str", "int"]) + def test_attributeerror_includes_info(self, name): + msg = f".*\n`np.{name}` was a deprecated alias for the builtin" + with pytest.raises(AttributeError, match=msg): + getattr(np, name) + + +class TestDeprecatedFinfo(_DeprecationTestCase): + # Deprecated in NumPy 1.25, 2023-01-16 + def test_deprecated_none(self): + self.assert_deprecated(np.finfo, args=(None,)) + + +class TestMathAlias(_DeprecationTestCase): + def test_deprecated_np_lib_math(self): + self.assert_deprecated(lambda: np.lib.math) + + +class TestLibImports(_DeprecationTestCase): + # Deprecated in Numpy 1.26.0, 2023-09 + def test_lib_functions_deprecation_call(self): + from numpy import in1d, row_stack, trapz + from numpy._core.numerictypes import maximum_sctype + from numpy.lib._function_base_impl import disp + from numpy.lib._npyio_impl import recfromcsv, recfromtxt + from numpy.lib._shape_base_impl import get_array_wrap + from numpy.lib._utils_impl import safe_eval + from numpy.lib.tests.test_io import TextIO + + self.assert_deprecated(lambda: safe_eval("None")) + + data_gen = lambda: TextIO('A,B\n0,1\n2,3') + kwargs = {'delimiter': ",", 'missing_values': "N/A", 'names': True} + self.assert_deprecated(lambda: recfromcsv(data_gen())) + self.assert_deprecated(lambda: recfromtxt(data_gen(), **kwargs)) + + self.assert_deprecated(lambda: disp("test")) + self.assert_deprecated(get_array_wrap) + self.assert_deprecated(lambda: maximum_sctype(int)) + + self.assert_deprecated(lambda: in1d([1], [1])) + self.assert_deprecated(lambda: row_stack([[]])) + self.assert_deprecated(lambda: trapz([1], [1])) + self.assert_deprecated(lambda: np.chararray) + + +class TestDeprecatedDTypeAliases(_DeprecationTestCase): + + def _check_for_warning(self, func): + with warnings.catch_warnings(record=True) as caught_warnings: + func() + assert len(caught_warnings) == 1 + w = caught_warnings[0] + assert w.category is DeprecationWarning + assert "alias 'a' was deprecated in NumPy 2.0" in str(w.message) + + def test_a_dtype_alias(self): + for dtype in ["a", "a10"]: + f = lambda: np.dtype(dtype) + self._check_for_warning(f) + self.assert_deprecated(f) + f = lambda: np.array(["hello", "world"]).astype("a10") + self._check_for_warning(f) + self.assert_deprecated(f) + + +class TestDeprecatedArrayWrap(_DeprecationTestCase): + message = "__array_wrap__.*" + + def test_deprecated(self): + class Test1: + def __array__(self, dtype=None, copy=None): + return np.arange(4) + + def __array_wrap__(self, arr, context=None): + self.called = True + return 'pass context' + + class Test2(Test1): + def __array_wrap__(self, arr): + self.called = True + return 'pass' + + test1 = Test1() + test2 = Test2() + self.assert_deprecated(lambda: np.negative(test1)) + assert test1.called + self.assert_deprecated(lambda: np.negative(test2)) + assert test2.called + + +class TestDeprecatedDTypeParenthesizedRepeatCount(_DeprecationTestCase): + message = "Passing in a parenthesized single number" + + @pytest.mark.parametrize("string", ["(2)i,", "(3)3S,", "f,(2)f"]) + def test_parenthesized_repeat_count(self, string): + self.assert_deprecated(np.dtype, args=(string,)) + + +class TestDeprecatedSaveFixImports(_DeprecationTestCase): + # Deprecated in Numpy 2.1, 2024-05 + message = "The 'fix_imports' flag is deprecated and has no effect." + + def test_deprecated(self): + with temppath(suffix='.npy') as path: + sample_args = (path, np.array(np.zeros((1024, 10)))) + self.assert_not_deprecated(np.save, args=sample_args) + self.assert_deprecated(np.save, args=sample_args, + kwargs={'fix_imports': True}) + self.assert_deprecated(np.save, args=sample_args, + kwargs={'fix_imports': False}) + for allow_pickle in [True, False]: + self.assert_not_deprecated(np.save, args=sample_args, + kwargs={'allow_pickle': allow_pickle}) + self.assert_deprecated(np.save, args=sample_args, + kwargs={'allow_pickle': allow_pickle, + 'fix_imports': True}) + self.assert_deprecated(np.save, args=sample_args, + kwargs={'allow_pickle': allow_pickle, + 'fix_imports': False}) + + +class TestAddNewdocUFunc(_DeprecationTestCase): + # Deprecated in Numpy 2.2, 2024-11 + def test_deprecated(self): + self.assert_deprecated( + lambda: np._core.umath._add_newdoc_ufunc( + struct_ufunc.add_triplet, "new docs" + ) + ) diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_dlpack.py b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_dlpack.py new file mode 100644 index 0000000..89c2403 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_dlpack.py @@ -0,0 +1,190 @@ +import sys + +import pytest + +import numpy as np +from numpy.testing import IS_PYPY, assert_array_equal + + +def new_and_old_dlpack(): + yield np.arange(5) + + class OldDLPack(np.ndarray): + # Support only the "old" version + def __dlpack__(self, stream=None): + return super().__dlpack__(stream=None) + + yield np.arange(5).view(OldDLPack) + + +class TestDLPack: + @pytest.mark.skipif(IS_PYPY, reason="PyPy can't get refcounts.") + @pytest.mark.parametrize("max_version", [(0, 0), None, (1, 0), (100, 3)]) + def test_dunder_dlpack_refcount(self, max_version): + x = np.arange(5) + y = x.__dlpack__(max_version=max_version) + startcount = sys.getrefcount(x) + del y + assert startcount - sys.getrefcount(x) == 1 + + def test_dunder_dlpack_stream(self): + x = np.arange(5) + x.__dlpack__(stream=None) + + with pytest.raises(RuntimeError): + x.__dlpack__(stream=1) + + def test_dunder_dlpack_copy(self): + # Checks the argument parsing of __dlpack__ explicitly. + # Honoring the flag is tested in the from_dlpack round-tripping test. + x = np.arange(5) + x.__dlpack__(copy=True) + x.__dlpack__(copy=None) + x.__dlpack__(copy=False) + + with pytest.raises(ValueError): + # NOTE: The copy converter should be stricter, but not just here. + x.__dlpack__(copy=np.array([1, 2, 3])) + + def test_strides_not_multiple_of_itemsize(self): + dt = np.dtype([('int', np.int32), ('char', np.int8)]) + y = np.zeros((5,), dtype=dt) + z = y['int'] + + with pytest.raises(BufferError): + np.from_dlpack(z) + + @pytest.mark.skipif(IS_PYPY, reason="PyPy can't get refcounts.") + @pytest.mark.parametrize("arr", new_and_old_dlpack()) + def test_from_dlpack_refcount(self, arr): + arr = arr.copy() + y = np.from_dlpack(arr) + startcount = sys.getrefcount(arr) + del y + assert startcount - sys.getrefcount(arr) == 1 + + @pytest.mark.parametrize("dtype", [ + np.bool, + np.int8, np.int16, np.int32, np.int64, + np.uint8, np.uint16, np.uint32, np.uint64, + np.float16, np.float32, np.float64, + np.complex64, np.complex128 + ]) + @pytest.mark.parametrize("arr", new_and_old_dlpack()) + def test_dtype_passthrough(self, arr, dtype): + x = arr.astype(dtype) + y = np.from_dlpack(x) + + assert y.dtype == x.dtype + assert_array_equal(x, y) + + def test_invalid_dtype(self): + x = np.asarray(np.datetime64('2021-05-27')) + + with pytest.raises(BufferError): + np.from_dlpack(x) + + def test_invalid_byte_swapping(self): + dt = np.dtype('=i8').newbyteorder() + x = np.arange(5, dtype=dt) + + with pytest.raises(BufferError): + np.from_dlpack(x) + + def test_non_contiguous(self): + x = np.arange(25).reshape((5, 5)) + + y1 = x[0] + assert_array_equal(y1, np.from_dlpack(y1)) + + y2 = x[:, 0] + assert_array_equal(y2, np.from_dlpack(y2)) + + y3 = x[1, :] + assert_array_equal(y3, np.from_dlpack(y3)) + + y4 = x[1] + assert_array_equal(y4, np.from_dlpack(y4)) + + y5 = np.diagonal(x).copy() + assert_array_equal(y5, np.from_dlpack(y5)) + + @pytest.mark.parametrize("ndim", range(33)) + def test_higher_dims(self, ndim): + shape = (1,) * ndim + x = np.zeros(shape, dtype=np.float64) + + assert shape == np.from_dlpack(x).shape + + def test_dlpack_device(self): + x = np.arange(5) + assert x.__dlpack_device__() == (1, 0) + y = np.from_dlpack(x) + assert y.__dlpack_device__() == (1, 0) + z = y[::2] + assert z.__dlpack_device__() == (1, 0) + + def dlpack_deleter_exception(self, max_version): + x = np.arange(5) + _ = x.__dlpack__(max_version=max_version) + raise RuntimeError + + @pytest.mark.parametrize("max_version", [None, (1, 0)]) + def test_dlpack_destructor_exception(self, max_version): + with pytest.raises(RuntimeError): + self.dlpack_deleter_exception(max_version=max_version) + + def test_readonly(self): + x = np.arange(5) + x.flags.writeable = False + # Raises without max_version + with pytest.raises(BufferError): + x.__dlpack__() + + # But works fine if we try with version + y = np.from_dlpack(x) + assert not y.flags.writeable + + def test_writeable(self): + x_new, x_old = new_and_old_dlpack() + + # new dlpacks respect writeability + y = np.from_dlpack(x_new) + assert y.flags.writeable + + # old dlpacks are not writeable for backwards compatibility + y = np.from_dlpack(x_old) + assert not y.flags.writeable + + def test_ndim0(self): + x = np.array(1.0) + y = np.from_dlpack(x) + assert_array_equal(x, y) + + def test_size1dims_arrays(self): + x = np.ndarray(dtype='f8', shape=(10, 5, 1), strides=(8, 80, 4), + buffer=np.ones(1000, dtype=np.uint8), order='F') + y = np.from_dlpack(x) + assert_array_equal(x, y) + + def test_copy(self): + x = np.arange(5) + + y = np.from_dlpack(x) + assert np.may_share_memory(x, y) + y = np.from_dlpack(x, copy=False) + assert np.may_share_memory(x, y) + y = np.from_dlpack(x, copy=True) + assert not np.may_share_memory(x, y) + + def test_device(self): + x = np.arange(5) + # requesting (1, 0), i.e. CPU device works in both calls: + x.__dlpack__(dl_device=(1, 0)) + np.from_dlpack(x, device="cpu") + np.from_dlpack(x, device=None) + + with pytest.raises(ValueError): + x.__dlpack__(dl_device=(10, 0)) + with pytest.raises(ValueError): + np.from_dlpack(x, device="gpu") diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_dtype.py b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_dtype.py new file mode 100644 index 0000000..684672a --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_dtype.py @@ -0,0 +1,1995 @@ +import ctypes +import gc +import operator +import pickle +import random +import sys +import types +from itertools import permutations +from typing import Any + +import hypothesis +import pytest +from hypothesis.extra import numpy as hynp +from numpy._core._multiarray_tests import create_custom_field_dtype +from numpy._core._rational_tests import rational + +import numpy as np +import numpy.dtypes +from numpy.testing import ( + HAS_REFCOUNT, + IS_PYSTON, + IS_WASM, + assert_, + assert_array_equal, + assert_equal, + assert_raises, +) + + +def assert_dtype_equal(a, b): + assert_equal(a, b) + assert_equal(hash(a), hash(b), + "two equivalent types do not hash to the same value !") + +def assert_dtype_not_equal(a, b): + assert_(a != b) + assert_(hash(a) != hash(b), + "two different types hash to the same value !") + +class TestBuiltin: + @pytest.mark.parametrize('t', [int, float, complex, np.int32, str, object]) + def test_run(self, t): + """Only test hash runs at all.""" + dt = np.dtype(t) + hash(dt) + + @pytest.mark.parametrize('t', [int, float]) + def test_dtype(self, t): + # Make sure equivalent byte order char hash the same (e.g. < and = on + # little endian) + dt = np.dtype(t) + dt2 = dt.newbyteorder("<") + dt3 = dt.newbyteorder(">") + if dt == dt2: + assert_(dt.byteorder != dt2.byteorder, "bogus test") + assert_dtype_equal(dt, dt2) + else: + assert_(dt.byteorder != dt3.byteorder, "bogus test") + assert_dtype_equal(dt, dt3) + + def test_equivalent_dtype_hashing(self): + # Make sure equivalent dtypes with different type num hash equal + uintp = np.dtype(np.uintp) + if uintp.itemsize == 4: + left = uintp + right = np.dtype(np.uint32) + else: + left = uintp + right = np.dtype(np.ulonglong) + assert_(left == right) + assert_(hash(left) == hash(right)) + + def test_invalid_types(self): + # Make sure invalid type strings raise an error + + assert_raises(TypeError, np.dtype, 'O3') + assert_raises(TypeError, np.dtype, 'O5') + assert_raises(TypeError, np.dtype, 'O7') + assert_raises(TypeError, np.dtype, 'b3') + assert_raises(TypeError, np.dtype, 'h4') + assert_raises(TypeError, np.dtype, 'I5') + assert_raises(TypeError, np.dtype, 'e3') + assert_raises(TypeError, np.dtype, 'f5') + + if np.dtype('g').itemsize == 8 or np.dtype('g').itemsize == 16: + assert_raises(TypeError, np.dtype, 'g12') + elif np.dtype('g').itemsize == 12: + assert_raises(TypeError, np.dtype, 'g16') + + if np.dtype('l').itemsize == 8: + assert_raises(TypeError, np.dtype, 'l4') + assert_raises(TypeError, np.dtype, 'L4') + else: + assert_raises(TypeError, np.dtype, 'l8') + assert_raises(TypeError, np.dtype, 'L8') + + if np.dtype('q').itemsize == 8: + assert_raises(TypeError, np.dtype, 'q4') + assert_raises(TypeError, np.dtype, 'Q4') + else: + assert_raises(TypeError, np.dtype, 'q8') + assert_raises(TypeError, np.dtype, 'Q8') + + # Make sure negative-sized dtype raises an error + assert_raises(TypeError, np.dtype, 'S-1') + assert_raises(TypeError, np.dtype, 'U-1') + assert_raises(TypeError, np.dtype, 'V-1') + + def test_richcompare_invalid_dtype_equality(self): + # Make sure objects that cannot be converted to valid + # dtypes results in False/True when compared to valid dtypes. + # Here 7 cannot be converted to dtype. No exceptions should be raised + + assert not np.dtype(np.int32) == 7, "dtype richcompare failed for ==" + assert np.dtype(np.int32) != 7, "dtype richcompare failed for !=" + + @pytest.mark.parametrize( + 'operation', + [operator.le, operator.lt, operator.ge, operator.gt]) + def test_richcompare_invalid_dtype_comparison(self, operation): + # Make sure TypeError is raised for comparison operators + # for invalid dtypes. Here 7 is an invalid dtype. + + with pytest.raises(TypeError): + operation(np.dtype(np.int32), 7) + + @pytest.mark.parametrize("dtype", + ['Bool', 'Bytes0', 'Complex32', 'Complex64', + 'Datetime64', 'Float16', 'Float32', 'Float64', + 'Int8', 'Int16', 'Int32', 'Int64', + 'Object0', 'Str0', 'Timedelta64', + 'UInt8', 'UInt16', 'Uint32', 'UInt32', + 'Uint64', 'UInt64', 'Void0', + "Float128", "Complex128"]) + def test_numeric_style_types_are_invalid(self, dtype): + with assert_raises(TypeError): + np.dtype(dtype) + + def test_expired_dtypes_with_bad_bytesize(self): + match: str = r".*removed in NumPy 2.0.*" + with pytest.raises(TypeError, match=match): + np.dtype("int0") + with pytest.raises(TypeError, match=match): + np.dtype("uint0") + with pytest.raises(TypeError, match=match): + np.dtype("bool8") + with pytest.raises(TypeError, match=match): + np.dtype("bytes0") + with pytest.raises(TypeError, match=match): + np.dtype("str0") + with pytest.raises(TypeError, match=match): + np.dtype("object0") + with pytest.raises(TypeError, match=match): + np.dtype("void0") + + @pytest.mark.parametrize( + 'value', + ['m8', 'M8', 'datetime64', 'timedelta64', + 'i4, (2,3)f8, f4', 'S3, 3u8, (3,4)S10', + '>f', '= (3, 12), + reason="Python 3.12 has immortal refcounts, this test will no longer " + "work. See gh-23986" +) +@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") +class TestStructuredObjectRefcounting: + """These tests cover various uses of complicated structured types which + include objects and thus require reference counting. + """ + @pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'], + iter_struct_object_dtypes()) + @pytest.mark.parametrize(["creation_func", "creation_obj"], [ + pytest.param(np.empty, None, + # None is probably used for too many things + marks=pytest.mark.skip("unreliable due to python's behaviour")), + (np.ones, 1), + (np.zeros, 0)]) + def test_structured_object_create_delete(self, dt, pat, count, singleton, + creation_func, creation_obj): + """Structured object reference counting in creation and deletion""" + # The test assumes that 0, 1, and None are singletons. + gc.collect() + before = sys.getrefcount(creation_obj) + arr = creation_func(3, dt) + + now = sys.getrefcount(creation_obj) + assert now - before == count * 3 + del arr + now = sys.getrefcount(creation_obj) + assert now == before + + @pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'], + iter_struct_object_dtypes()) + def test_structured_object_item_setting(self, dt, pat, count, singleton): + """Structured object reference counting for simple item setting""" + one = 1 + + gc.collect() + before = sys.getrefcount(singleton) + arr = np.array([pat] * 3, dt) + assert sys.getrefcount(singleton) - before == count * 3 + # Fill with `1` and check that it was replaced correctly: + before2 = sys.getrefcount(one) + arr[...] = one + after2 = sys.getrefcount(one) + assert after2 - before2 == count * 3 + del arr + gc.collect() + assert sys.getrefcount(one) == before2 + assert sys.getrefcount(singleton) == before + + @pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'], + iter_struct_object_dtypes()) + @pytest.mark.parametrize( + ['shape', 'index', 'items_changed'], + [((3,), ([0, 2],), 2), + ((3, 2), ([0, 2], slice(None)), 4), + ((3, 2), ([0, 2], [1]), 2), + ((3,), ([True, False, True]), 2)]) + def test_structured_object_indexing(self, shape, index, items_changed, + dt, pat, count, singleton): + """Structured object reference counting for advanced indexing.""" + # Use two small negative values (should be singletons, but less likely + # to run into race-conditions). This failed in some threaded envs + # When using 0 and 1. If it fails again, should remove all explicit + # checks, and rely on `pytest-leaks` reference count checker only. + val0 = -4 + val1 = -5 + + arr = np.full(shape, val0, dt) + + gc.collect() + before_val0 = sys.getrefcount(val0) + before_val1 = sys.getrefcount(val1) + # Test item getting: + part = arr[index] + after_val0 = sys.getrefcount(val0) + assert after_val0 - before_val0 == count * items_changed + del part + # Test item setting: + arr[index] = val1 + gc.collect() + after_val0 = sys.getrefcount(val0) + after_val1 = sys.getrefcount(val1) + assert before_val0 - after_val0 == count * items_changed + assert after_val1 - before_val1 == count * items_changed + + @pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'], + iter_struct_object_dtypes()) + def test_structured_object_take_and_repeat(self, dt, pat, count, singleton): + """Structured object reference counting for specialized functions. + The older functions such as take and repeat use different code paths + then item setting (when writing this). + """ + indices = [0, 1] + + arr = np.array([pat] * 3, dt) + gc.collect() + before = sys.getrefcount(singleton) + res = arr.take(indices) + after = sys.getrefcount(singleton) + assert after - before == count * 2 + new = res.repeat(10) + gc.collect() + after_repeat = sys.getrefcount(singleton) + assert after_repeat - after == count * 2 * 10 + + +class TestStructuredDtypeSparseFields: + """Tests subarray fields which contain sparse dtypes so that + not all memory is used by the dtype work. Such dtype's should + leave the underlying memory unchanged. + """ + dtype = np.dtype([('a', {'names': ['aa', 'ab'], 'formats': ['f', 'f'], + 'offsets': [0, 4]}, (2, 3))]) + sparse_dtype = np.dtype([('a', {'names': ['ab'], 'formats': ['f'], + 'offsets': [4]}, (2, 3))]) + + def test_sparse_field_assignment(self): + arr = np.zeros(3, self.dtype) + sparse_arr = arr.view(self.sparse_dtype) + + sparse_arr[...] = np.finfo(np.float32).max + # dtype is reduced when accessing the field, so shape is (3, 2, 3): + assert_array_equal(arr["a"]["aa"], np.zeros((3, 2, 3))) + + def test_sparse_field_assignment_fancy(self): + # Fancy assignment goes to the copyswap function for complex types: + arr = np.zeros(3, self.dtype) + sparse_arr = arr.view(self.sparse_dtype) + + sparse_arr[[0, 1, 2]] = np.finfo(np.float32).max + # dtype is reduced when accessing the field, so shape is (3, 2, 3): + assert_array_equal(arr["a"]["aa"], np.zeros((3, 2, 3))) + + +class TestMonsterType: + """Test deeply nested subtypes.""" + + def test1(self): + simple1 = np.dtype({'names': ['r', 'b'], 'formats': ['u1', 'u1'], + 'titles': ['Red pixel', 'Blue pixel']}) + a = np.dtype([('yo', int), ('ye', simple1), + ('yi', np.dtype((int, (3, 2))))]) + b = np.dtype([('yo', int), ('ye', simple1), + ('yi', np.dtype((int, (3, 2))))]) + assert_dtype_equal(a, b) + + c = np.dtype([('yo', int), ('ye', simple1), + ('yi', np.dtype((a, (3, 2))))]) + d = np.dtype([('yo', int), ('ye', simple1), + ('yi', np.dtype((a, (3, 2))))]) + assert_dtype_equal(c, d) + + @pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking") + @pytest.mark.skipif(IS_WASM, reason="Pyodide/WASM has limited stack size") + def test_list_recursion(self): + l = [] + l.append(('f', l)) + with pytest.raises(RecursionError): + np.dtype(l) + + @pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking") + @pytest.mark.skipif(IS_WASM, reason="Pyodide/WASM has limited stack size") + def test_tuple_recursion(self): + d = np.int32 + for i in range(100000): + d = (d, (1,)) + with pytest.raises(RecursionError): + np.dtype(d) + + @pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking") + @pytest.mark.skipif(IS_WASM, reason="Pyodide/WASM has limited stack size") + def test_dict_recursion(self): + d = {"names": ['self'], "formats": [None], "offsets": [0]} + d['formats'][0] = d + with pytest.raises(RecursionError): + np.dtype(d) + + +class TestMetadata: + def test_no_metadata(self): + d = np.dtype(int) + assert_(d.metadata is None) + + def test_metadata_takes_dict(self): + d = np.dtype(int, metadata={'datum': 1}) + assert_(d.metadata == {'datum': 1}) + + def test_metadata_rejects_nondict(self): + assert_raises(TypeError, np.dtype, int, metadata='datum') + assert_raises(TypeError, np.dtype, int, metadata=1) + assert_raises(TypeError, np.dtype, int, metadata=None) + + def test_nested_metadata(self): + d = np.dtype([('a', np.dtype(int, metadata={'datum': 1}))]) + assert_(d['a'].metadata == {'datum': 1}) + + def test_base_metadata_copied(self): + d = np.dtype((np.void, np.dtype('i4,i4', metadata={'datum': 1}))) + assert_(d.metadata == {'datum': 1}) + +class TestString: + def test_complex_dtype_str(self): + dt = np.dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)), + ('rtile', '>f4', (64, 36))], (3,)), + ('bottom', [('bleft', ('>f4', (8, 64)), (1,)), + ('bright', '>f4', (8, 36))])]) + assert_equal(str(dt), + "[('top', [('tiles', ('>f4', (64, 64)), (1,)), " + "('rtile', '>f4', (64, 36))], (3,)), " + "('bottom', [('bleft', ('>f4', (8, 64)), (1,)), " + "('bright', '>f4', (8, 36))])]") + + # If the sticky aligned flag is set to True, it makes the + # str() function use a dict representation with an 'aligned' flag + dt = np.dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)), + ('rtile', '>f4', (64, 36))], + (3,)), + ('bottom', [('bleft', ('>f4', (8, 64)), (1,)), + ('bright', '>f4', (8, 36))])], + align=True) + assert_equal(str(dt), + "{'names': ['top', 'bottom']," + " 'formats': [([('tiles', ('>f4', (64, 64)), (1,)), " + "('rtile', '>f4', (64, 36))], (3,)), " + "[('bleft', ('>f4', (8, 64)), (1,)), " + "('bright', '>f4', (8, 36))]]," + " 'offsets': [0, 76800]," + " 'itemsize': 80000," + " 'aligned': True}") + with np.printoptions(legacy='1.21'): + assert_equal(str(dt), + "{'names':['top','bottom'], " + "'formats':[([('tiles', ('>f4', (64, 64)), (1,)), " + "('rtile', '>f4', (64, 36))], (3,))," + "[('bleft', ('>f4', (8, 64)), (1,)), " + "('bright', '>f4', (8, 36))]], " + "'offsets':[0,76800], " + "'itemsize':80000, " + "'aligned':True}") + assert_equal(np.dtype(eval(str(dt))), dt) + + dt = np.dtype({'names': ['r', 'g', 'b'], 'formats': ['u1', 'u1', 'u1'], + 'offsets': [0, 1, 2], + 'titles': ['Red pixel', 'Green pixel', 'Blue pixel']}) + assert_equal(str(dt), + "[(('Red pixel', 'r'), 'u1'), " + "(('Green pixel', 'g'), 'u1'), " + "(('Blue pixel', 'b'), 'u1')]") + + dt = np.dtype({'names': ['rgba', 'r', 'g', 'b'], + 'formats': ['f4', (64, 64)), (1,)), + ('rtile', '>f4', (64, 36))], (3,)), + ('bottom', [('bleft', ('>f4', (8, 64)), (1,)), + ('bright', '>f4', (8, 36))])]) + assert_equal(repr(dt), + "dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)), " + "('rtile', '>f4', (64, 36))], (3,)), " + "('bottom', [('bleft', ('>f4', (8, 64)), (1,)), " + "('bright', '>f4', (8, 36))])])") + + dt = np.dtype({'names': ['r', 'g', 'b'], 'formats': ['u1', 'u1', 'u1'], + 'offsets': [0, 1, 2], + 'titles': ['Red pixel', 'Green pixel', 'Blue pixel']}, + align=True) + assert_equal(repr(dt), + "dtype([(('Red pixel', 'r'), 'u1'), " + "(('Green pixel', 'g'), 'u1'), " + "(('Blue pixel', 'b'), 'u1')], align=True)") + + def test_repr_structured_not_packed(self): + dt = np.dtype({'names': ['rgba', 'r', 'g', 'b'], + 'formats': ['i4") + assert np.result_type(dt).isnative + assert np.result_type(dt).num == dt.num + + # dtype with empty space: + struct_dt = np.dtype(">i4,i1,f4', (2, 1)), ('b', 'u4')]) + self.check(BigEndStruct, expected) + + def test_little_endian_structure_packed(self): + class LittleEndStruct(ctypes.LittleEndianStructure): + _fields_ = [ + ('one', ctypes.c_uint8), + ('two', ctypes.c_uint32) + ] + _pack_ = 1 + expected = np.dtype([('one', 'u1'), ('two', 'B'), + ('b', '>H') + ], align=True) + self.check(PaddedStruct, expected) + + def test_simple_endian_types(self): + self.check(ctypes.c_uint16.__ctype_le__, np.dtype('u2')) + self.check(ctypes.c_uint8.__ctype_le__, np.dtype('u1')) + self.check(ctypes.c_uint8.__ctype_be__, np.dtype('u1')) + + all_types = set(np.typecodes['All']) + all_pairs = permutations(all_types, 2) + + @pytest.mark.parametrize("pair", all_pairs) + def test_pairs(self, pair): + """ + Check that np.dtype('x,y') matches [np.dtype('x'), np.dtype('y')] + Example: np.dtype('d,I') -> dtype([('f0', ' None: + alias = np.dtype[Any] + assert isinstance(alias, types.GenericAlias) + assert alias.__origin__ is np.dtype + + @pytest.mark.parametrize("code", np.typecodes["All"]) + def test_dtype_subclass(self, code: str) -> None: + cls = type(np.dtype(code)) + alias = cls[Any] + assert isinstance(alias, types.GenericAlias) + assert alias.__origin__ is cls + + @pytest.mark.parametrize("arg_len", range(4)) + def test_subscript_tuple(self, arg_len: int) -> None: + arg_tup = (Any,) * arg_len + if arg_len == 1: + assert np.dtype[arg_tup] + else: + with pytest.raises(TypeError): + np.dtype[arg_tup] + + def test_subscript_scalar(self) -> None: + assert np.dtype[Any] + + +def test_result_type_integers_and_unitless_timedelta64(): + # Regression test for gh-20077. The following call of `result_type` + # would cause a seg. fault. + td = np.timedelta64(4) + result = np.result_type(0, td) + assert_dtype_equal(result, td.dtype) + + +def test_creating_dtype_with_dtype_class_errors(): + # Regression test for #25031, calling `np.dtype` with itself segfaulted. + with pytest.raises(TypeError, match="Cannot convert np.dtype into a"): + np.array(np.ones(10), dtype=np.dtype) diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_einsum.py b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_einsum.py new file mode 100644 index 0000000..0bd180b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_einsum.py @@ -0,0 +1,1317 @@ +import itertools + +import pytest + +import numpy as np +from numpy.testing import ( + assert_, + assert_allclose, + assert_almost_equal, + assert_array_equal, + assert_equal, + assert_raises, + assert_raises_regex, + suppress_warnings, +) + +# Setup for optimize einsum +chars = 'abcdefghij' +sizes = np.array([2, 3, 4, 5, 4, 3, 2, 6, 5, 4, 3]) +global_size_dict = dict(zip(chars, sizes)) + + +class TestEinsum: + @pytest.mark.parametrize("do_opt", [True, False]) + @pytest.mark.parametrize("einsum_fn", [np.einsum, np.einsum_path]) + def test_einsum_errors(self, do_opt, einsum_fn): + # Need enough arguments + assert_raises(ValueError, einsum_fn, optimize=do_opt) + assert_raises(ValueError, einsum_fn, "", optimize=do_opt) + + # subscripts must be a string + assert_raises(TypeError, einsum_fn, 0, 0, optimize=do_opt) + + # issue 4528 revealed a segfault with this call + assert_raises(TypeError, einsum_fn, *(None,) * 63, optimize=do_opt) + + # number of operands must match count in subscripts string + assert_raises(ValueError, einsum_fn, "", 0, 0, optimize=do_opt) + assert_raises(ValueError, einsum_fn, ",", 0, [0], [0], + optimize=do_opt) + assert_raises(ValueError, einsum_fn, ",", [0], optimize=do_opt) + + # can't have more subscripts than dimensions in the operand + assert_raises(ValueError, einsum_fn, "i", 0, optimize=do_opt) + assert_raises(ValueError, einsum_fn, "ij", [0, 0], optimize=do_opt) + assert_raises(ValueError, einsum_fn, "...i", 0, optimize=do_opt) + assert_raises(ValueError, einsum_fn, "i...j", [0, 0], optimize=do_opt) + assert_raises(ValueError, einsum_fn, "i...", 0, optimize=do_opt) + assert_raises(ValueError, einsum_fn, "ij...", [0, 0], optimize=do_opt) + + # invalid ellipsis + assert_raises(ValueError, einsum_fn, "i..", [0, 0], optimize=do_opt) + assert_raises(ValueError, einsum_fn, ".i...", [0, 0], optimize=do_opt) + assert_raises(ValueError, einsum_fn, "j->..j", [0, 0], optimize=do_opt) + assert_raises(ValueError, einsum_fn, "j->.j...", [0, 0], + optimize=do_opt) + + # invalid subscript character + assert_raises(ValueError, einsum_fn, "i%...", [0, 0], optimize=do_opt) + assert_raises(ValueError, einsum_fn, "...j$", [0, 0], optimize=do_opt) + assert_raises(ValueError, einsum_fn, "i->&", [0, 0], optimize=do_opt) + + # output subscripts must appear in input + assert_raises(ValueError, einsum_fn, "i->ij", [0, 0], optimize=do_opt) + + # output subscripts may only be specified once + assert_raises(ValueError, einsum_fn, "ij->jij", [[0, 0], [0, 0]], + optimize=do_opt) + + # dimensions must match when being collapsed + assert_raises(ValueError, einsum_fn, "ii", + np.arange(6).reshape(2, 3), optimize=do_opt) + assert_raises(ValueError, einsum_fn, "ii->i", + np.arange(6).reshape(2, 3), optimize=do_opt) + + with assert_raises_regex(ValueError, "'b'"): + # gh-11221 - 'c' erroneously appeared in the error message + a = np.ones((3, 3, 4, 5, 6)) + b = np.ones((3, 4, 5)) + einsum_fn('aabcb,abc', a, b) + + def test_einsum_sorting_behavior(self): + # Case 1: 26 dimensions (all lowercase indices) + n1 = 26 + x1 = np.random.random((1,) * n1) + path1 = np.einsum_path(x1, range(n1))[1] # Get einsum path details + output_indices1 = path1.split("->")[-1].strip() # Extract output indices + # Assert indices are only uppercase letters and sorted correctly + assert all(c.isupper() for c in output_indices1), ( + "Output indices for n=26 should use uppercase letters only: " + f"{output_indices1}" + ) + assert_equal( + output_indices1, + ''.join(sorted(output_indices1)), + err_msg=( + "Output indices for n=26 are not lexicographically sorted: " + f"{output_indices1}" + ) + ) + + # Case 2: 27 dimensions (includes uppercase indices) + n2 = 27 + x2 = np.random.random((1,) * n2) + path2 = np.einsum_path(x2, range(n2))[1] + output_indices2 = path2.split("->")[-1].strip() + # Assert indices include both uppercase and lowercase letters + assert any(c.islower() for c in output_indices2), ( + "Output indices for n=27 should include uppercase letters: " + f"{output_indices2}" + ) + # Assert output indices are sorted uppercase before lowercase + assert_equal( + output_indices2, + ''.join(sorted(output_indices2)), + err_msg=( + "Output indices for n=27 are not lexicographically sorted: " + f"{output_indices2}" + ) + ) + + # Additional Check: Ensure dimensions correspond correctly to indices + # Generate expected mapping of dimensions to indices + expected_indices = [ + chr(i + ord('A')) if i < 26 else chr(i - 26 + ord('a')) + for i in range(n2) + ] + assert_equal( + output_indices2, + ''.join(expected_indices), + err_msg=( + "Output indices do not map to the correct dimensions. Expected: " + f"{''.join(expected_indices)}, Got: {output_indices2}" + ) + ) + + @pytest.mark.parametrize("do_opt", [True, False]) + def test_einsum_specific_errors(self, do_opt): + # out parameter must be an array + assert_raises(TypeError, np.einsum, "", 0, out='test', + optimize=do_opt) + + # order parameter must be a valid order + assert_raises(ValueError, np.einsum, "", 0, order='W', + optimize=do_opt) + + # casting parameter must be a valid casting + assert_raises(ValueError, np.einsum, "", 0, casting='blah', + optimize=do_opt) + + # dtype parameter must be a valid dtype + assert_raises(TypeError, np.einsum, "", 0, dtype='bad_data_type', + optimize=do_opt) + + # other keyword arguments are rejected + assert_raises(TypeError, np.einsum, "", 0, bad_arg=0, optimize=do_opt) + + # broadcasting to new dimensions must be enabled explicitly + assert_raises(ValueError, np.einsum, "i", np.arange(6).reshape(2, 3), + optimize=do_opt) + assert_raises(ValueError, np.einsum, "i->i", [[0, 1], [0, 1]], + out=np.arange(4).reshape(2, 2), optimize=do_opt) + + # Check order kwarg, asanyarray allows 1d to pass through + assert_raises(ValueError, np.einsum, "i->i", + np.arange(6).reshape(-1, 1), optimize=do_opt, order='d') + + def test_einsum_object_errors(self): + # Exceptions created by object arithmetic should + # successfully propagate + + class CustomException(Exception): + pass + + class DestructoBox: + + def __init__(self, value, destruct): + self._val = value + self._destruct = destruct + + def __add__(self, other): + tmp = self._val + other._val + if tmp >= self._destruct: + raise CustomException + else: + self._val = tmp + return self + + def __radd__(self, other): + if other == 0: + return self + else: + return self.__add__(other) + + def __mul__(self, other): + tmp = self._val * other._val + if tmp >= self._destruct: + raise CustomException + else: + self._val = tmp + return self + + def __rmul__(self, other): + if other == 0: + return self + else: + return self.__mul__(other) + + a = np.array([DestructoBox(i, 5) for i in range(1, 10)], + dtype='object').reshape(3, 3) + + # raised from unbuffered_loop_nop1_ndim2 + assert_raises(CustomException, np.einsum, "ij->i", a) + + # raised from unbuffered_loop_nop1_ndim3 + b = np.array([DestructoBox(i, 100) for i in range(27)], + dtype='object').reshape(3, 3, 3) + assert_raises(CustomException, np.einsum, "i...k->...", b) + + # raised from unbuffered_loop_nop2_ndim2 + b = np.array([DestructoBox(i, 55) for i in range(1, 4)], + dtype='object') + assert_raises(CustomException, np.einsum, "ij, j", a, b) + + # raised from unbuffered_loop_nop2_ndim3 + assert_raises(CustomException, np.einsum, "ij, jh", a, a) + + # raised from PyArray_EinsteinSum + assert_raises(CustomException, np.einsum, "ij->", a) + + def test_einsum_views(self): + # pass-through + for do_opt in [True, False]: + a = np.arange(6) + a.shape = (2, 3) + + b = np.einsum("...", a, optimize=do_opt) + assert_(b.base is a) + + b = np.einsum(a, [Ellipsis], optimize=do_opt) + assert_(b.base is a) + + b = np.einsum("ij", a, optimize=do_opt) + assert_(b.base is a) + assert_equal(b, a) + + b = np.einsum(a, [0, 1], optimize=do_opt) + assert_(b.base is a) + assert_equal(b, a) + + # output is writeable whenever input is writeable + b = np.einsum("...", a, optimize=do_opt) + assert_(b.flags['WRITEABLE']) + a.flags['WRITEABLE'] = False + b = np.einsum("...", a, optimize=do_opt) + assert_(not b.flags['WRITEABLE']) + + # transpose + a = np.arange(6) + a.shape = (2, 3) + + b = np.einsum("ji", a, optimize=do_opt) + assert_(b.base is a) + assert_equal(b, a.T) + + b = np.einsum(a, [1, 0], optimize=do_opt) + assert_(b.base is a) + assert_equal(b, a.T) + + # diagonal + a = np.arange(9) + a.shape = (3, 3) + + b = np.einsum("ii->i", a, optimize=do_opt) + assert_(b.base is a) + assert_equal(b, [a[i, i] for i in range(3)]) + + b = np.einsum(a, [0, 0], [0], optimize=do_opt) + assert_(b.base is a) + assert_equal(b, [a[i, i] for i in range(3)]) + + # diagonal with various ways of broadcasting an additional dimension + a = np.arange(27) + a.shape = (3, 3, 3) + + b = np.einsum("...ii->...i", a, optimize=do_opt) + assert_(b.base is a) + assert_equal(b, [[x[i, i] for i in range(3)] for x in a]) + + b = np.einsum(a, [Ellipsis, 0, 0], [Ellipsis, 0], optimize=do_opt) + assert_(b.base is a) + assert_equal(b, [[x[i, i] for i in range(3)] for x in a]) + + b = np.einsum("ii...->...i", a, optimize=do_opt) + assert_(b.base is a) + assert_equal(b, [[x[i, i] for i in range(3)] + for x in a.transpose(2, 0, 1)]) + + b = np.einsum(a, [0, 0, Ellipsis], [Ellipsis, 0], optimize=do_opt) + assert_(b.base is a) + assert_equal(b, [[x[i, i] for i in range(3)] + for x in a.transpose(2, 0, 1)]) + + b = np.einsum("...ii->i...", a, optimize=do_opt) + assert_(b.base is a) + assert_equal(b, [a[:, i, i] for i in range(3)]) + + b = np.einsum(a, [Ellipsis, 0, 0], [0, Ellipsis], optimize=do_opt) + assert_(b.base is a) + assert_equal(b, [a[:, i, i] for i in range(3)]) + + b = np.einsum("jii->ij", a, optimize=do_opt) + assert_(b.base is a) + assert_equal(b, [a[:, i, i] for i in range(3)]) + + b = np.einsum(a, [1, 0, 0], [0, 1], optimize=do_opt) + assert_(b.base is a) + assert_equal(b, [a[:, i, i] for i in range(3)]) + + b = np.einsum("ii...->i...", a, optimize=do_opt) + assert_(b.base is a) + assert_equal(b, [a.transpose(2, 0, 1)[:, i, i] for i in range(3)]) + + b = np.einsum(a, [0, 0, Ellipsis], [0, Ellipsis], optimize=do_opt) + assert_(b.base is a) + assert_equal(b, [a.transpose(2, 0, 1)[:, i, i] for i in range(3)]) + + b = np.einsum("i...i->i...", a, optimize=do_opt) + assert_(b.base is a) + assert_equal(b, [a.transpose(1, 0, 2)[:, i, i] for i in range(3)]) + + b = np.einsum(a, [0, Ellipsis, 0], [0, Ellipsis], optimize=do_opt) + assert_(b.base is a) + assert_equal(b, [a.transpose(1, 0, 2)[:, i, i] for i in range(3)]) + + b = np.einsum("i...i->...i", a, optimize=do_opt) + assert_(b.base is a) + assert_equal(b, [[x[i, i] for i in range(3)] + for x in a.transpose(1, 0, 2)]) + + b = np.einsum(a, [0, Ellipsis, 0], [Ellipsis, 0], optimize=do_opt) + assert_(b.base is a) + assert_equal(b, [[x[i, i] for i in range(3)] + for x in a.transpose(1, 0, 2)]) + + # triple diagonal + a = np.arange(27) + a.shape = (3, 3, 3) + + b = np.einsum("iii->i", a, optimize=do_opt) + assert_(b.base is a) + assert_equal(b, [a[i, i, i] for i in range(3)]) + + b = np.einsum(a, [0, 0, 0], [0], optimize=do_opt) + assert_(b.base is a) + assert_equal(b, [a[i, i, i] for i in range(3)]) + + # swap axes + a = np.arange(24) + a.shape = (2, 3, 4) + + b = np.einsum("ijk->jik", a, optimize=do_opt) + assert_(b.base is a) + assert_equal(b, a.swapaxes(0, 1)) + + b = np.einsum(a, [0, 1, 2], [1, 0, 2], optimize=do_opt) + assert_(b.base is a) + assert_equal(b, a.swapaxes(0, 1)) + + def check_einsum_sums(self, dtype, do_opt=False): + dtype = np.dtype(dtype) + # Check various sums. Does many sizes to exercise unrolled loops. + + # sum(a, axis=-1) + for n in range(1, 17): + a = np.arange(n, dtype=dtype) + b = np.sum(a, axis=-1) + if hasattr(b, 'astype'): + b = b.astype(dtype) + assert_equal(np.einsum("i->", a, optimize=do_opt), b) + assert_equal(np.einsum(a, [0], [], optimize=do_opt), b) + + for n in range(1, 17): + a = np.arange(2 * 3 * n, dtype=dtype).reshape(2, 3, n) + b = np.sum(a, axis=-1) + if hasattr(b, 'astype'): + b = b.astype(dtype) + assert_equal(np.einsum("...i->...", a, optimize=do_opt), b) + assert_equal(np.einsum(a, [Ellipsis, 0], [Ellipsis], optimize=do_opt), b) + + # sum(a, axis=0) + for n in range(1, 17): + a = np.arange(2 * n, dtype=dtype).reshape(2, n) + b = np.sum(a, axis=0) + if hasattr(b, 'astype'): + b = b.astype(dtype) + assert_equal(np.einsum("i...->...", a, optimize=do_opt), b) + assert_equal(np.einsum(a, [0, Ellipsis], [Ellipsis], optimize=do_opt), b) + + for n in range(1, 17): + a = np.arange(2 * 3 * n, dtype=dtype).reshape(2, 3, n) + b = np.sum(a, axis=0) + if hasattr(b, 'astype'): + b = b.astype(dtype) + assert_equal(np.einsum("i...->...", a, optimize=do_opt), b) + assert_equal(np.einsum(a, [0, Ellipsis], [Ellipsis], optimize=do_opt), b) + + # trace(a) + for n in range(1, 17): + a = np.arange(n * n, dtype=dtype).reshape(n, n) + b = np.trace(a) + if hasattr(b, 'astype'): + b = b.astype(dtype) + assert_equal(np.einsum("ii", a, optimize=do_opt), b) + assert_equal(np.einsum(a, [0, 0], optimize=do_opt), b) + + # gh-15961: should accept numpy int64 type in subscript list + np_array = np.asarray([0, 0]) + assert_equal(np.einsum(a, np_array, optimize=do_opt), b) + assert_equal(np.einsum(a, list(np_array), optimize=do_opt), b) + + # multiply(a, b) + assert_equal(np.einsum("..., ...", 3, 4), 12) # scalar case + for n in range(1, 17): + a = np.arange(3 * n, dtype=dtype).reshape(3, n) + b = np.arange(2 * 3 * n, dtype=dtype).reshape(2, 3, n) + assert_equal(np.einsum("..., ...", a, b, optimize=do_opt), + np.multiply(a, b)) + assert_equal(np.einsum(a, [Ellipsis], b, [Ellipsis], optimize=do_opt), + np.multiply(a, b)) + + # inner(a,b) + for n in range(1, 17): + a = np.arange(2 * 3 * n, dtype=dtype).reshape(2, 3, n) + b = np.arange(n, dtype=dtype) + assert_equal(np.einsum("...i, ...i", a, b, optimize=do_opt), np.inner(a, b)) + assert_equal(np.einsum(a, [Ellipsis, 0], b, [Ellipsis, 0], optimize=do_opt), + np.inner(a, b)) + + for n in range(1, 11): + a = np.arange(n * 3 * 2, dtype=dtype).reshape(n, 3, 2) + b = np.arange(n, dtype=dtype) + assert_equal(np.einsum("i..., i...", a, b, optimize=do_opt), + np.inner(a.T, b.T).T) + assert_equal(np.einsum(a, [0, Ellipsis], b, [0, Ellipsis], optimize=do_opt), + np.inner(a.T, b.T).T) + + # outer(a,b) + for n in range(1, 17): + a = np.arange(3, dtype=dtype) + 1 + b = np.arange(n, dtype=dtype) + 1 + assert_equal(np.einsum("i,j", a, b, optimize=do_opt), + np.outer(a, b)) + assert_equal(np.einsum(a, [0], b, [1], optimize=do_opt), + np.outer(a, b)) + + # Suppress the complex warnings for the 'as f8' tests + with suppress_warnings() as sup: + sup.filter(np.exceptions.ComplexWarning) + + # matvec(a,b) / a.dot(b) where a is matrix, b is vector + for n in range(1, 17): + a = np.arange(4 * n, dtype=dtype).reshape(4, n) + b = np.arange(n, dtype=dtype) + assert_equal(np.einsum("ij, j", a, b, optimize=do_opt), + np.dot(a, b)) + assert_equal(np.einsum(a, [0, 1], b, [1], optimize=do_opt), + np.dot(a, b)) + + c = np.arange(4, dtype=dtype) + np.einsum("ij,j", a, b, out=c, + dtype='f8', casting='unsafe', optimize=do_opt) + assert_equal(c, + np.dot(a.astype('f8'), + b.astype('f8')).astype(dtype)) + c[...] = 0 + np.einsum(a, [0, 1], b, [1], out=c, + dtype='f8', casting='unsafe', optimize=do_opt) + assert_equal(c, + np.dot(a.astype('f8'), + b.astype('f8')).astype(dtype)) + + for n in range(1, 17): + a = np.arange(4 * n, dtype=dtype).reshape(4, n) + b = np.arange(n, dtype=dtype) + assert_equal(np.einsum("ji,j", a.T, b.T, optimize=do_opt), + np.dot(b.T, a.T)) + assert_equal(np.einsum(a.T, [1, 0], b.T, [1], optimize=do_opt), + np.dot(b.T, a.T)) + + c = np.arange(4, dtype=dtype) + np.einsum("ji,j", a.T, b.T, out=c, + dtype='f8', casting='unsafe', optimize=do_opt) + assert_equal(c, + np.dot(b.T.astype('f8'), + a.T.astype('f8')).astype(dtype)) + c[...] = 0 + np.einsum(a.T, [1, 0], b.T, [1], out=c, + dtype='f8', casting='unsafe', optimize=do_opt) + assert_equal(c, + np.dot(b.T.astype('f8'), + a.T.astype('f8')).astype(dtype)) + + # matmat(a,b) / a.dot(b) where a is matrix, b is matrix + for n in range(1, 17): + if n < 8 or dtype != 'f2': + a = np.arange(4 * n, dtype=dtype).reshape(4, n) + b = np.arange(n * 6, dtype=dtype).reshape(n, 6) + assert_equal(np.einsum("ij,jk", a, b, optimize=do_opt), + np.dot(a, b)) + assert_equal(np.einsum(a, [0, 1], b, [1, 2], optimize=do_opt), + np.dot(a, b)) + + for n in range(1, 17): + a = np.arange(4 * n, dtype=dtype).reshape(4, n) + b = np.arange(n * 6, dtype=dtype).reshape(n, 6) + c = np.arange(24, dtype=dtype).reshape(4, 6) + np.einsum("ij,jk", a, b, out=c, dtype='f8', casting='unsafe', + optimize=do_opt) + assert_equal(c, + np.dot(a.astype('f8'), + b.astype('f8')).astype(dtype)) + c[...] = 0 + np.einsum(a, [0, 1], b, [1, 2], out=c, + dtype='f8', casting='unsafe', optimize=do_opt) + assert_equal(c, + np.dot(a.astype('f8'), + b.astype('f8')).astype(dtype)) + + # matrix triple product (note this is not currently an efficient + # way to multiply 3 matrices) + a = np.arange(12, dtype=dtype).reshape(3, 4) + b = np.arange(20, dtype=dtype).reshape(4, 5) + c = np.arange(30, dtype=dtype).reshape(5, 6) + if dtype != 'f2': + assert_equal(np.einsum("ij,jk,kl", a, b, c, optimize=do_opt), + a.dot(b).dot(c)) + assert_equal(np.einsum(a, [0, 1], b, [1, 2], c, [2, 3], + optimize=do_opt), a.dot(b).dot(c)) + + d = np.arange(18, dtype=dtype).reshape(3, 6) + np.einsum("ij,jk,kl", a, b, c, out=d, + dtype='f8', casting='unsafe', optimize=do_opt) + tgt = a.astype('f8').dot(b.astype('f8')) + tgt = tgt.dot(c.astype('f8')).astype(dtype) + assert_equal(d, tgt) + + d[...] = 0 + np.einsum(a, [0, 1], b, [1, 2], c, [2, 3], out=d, + dtype='f8', casting='unsafe', optimize=do_opt) + tgt = a.astype('f8').dot(b.astype('f8')) + tgt = tgt.dot(c.astype('f8')).astype(dtype) + assert_equal(d, tgt) + + # tensordot(a, b) + if np.dtype(dtype) != np.dtype('f2'): + a = np.arange(60, dtype=dtype).reshape(3, 4, 5) + b = np.arange(24, dtype=dtype).reshape(4, 3, 2) + assert_equal(np.einsum("ijk, jil -> kl", a, b), + np.tensordot(a, b, axes=([1, 0], [0, 1]))) + assert_equal(np.einsum(a, [0, 1, 2], b, [1, 0, 3], [2, 3]), + np.tensordot(a, b, axes=([1, 0], [0, 1]))) + + c = np.arange(10, dtype=dtype).reshape(5, 2) + np.einsum("ijk,jil->kl", a, b, out=c, + dtype='f8', casting='unsafe', optimize=do_opt) + assert_equal(c, np.tensordot(a.astype('f8'), b.astype('f8'), + axes=([1, 0], [0, 1])).astype(dtype)) + c[...] = 0 + np.einsum(a, [0, 1, 2], b, [1, 0, 3], [2, 3], out=c, + dtype='f8', casting='unsafe', optimize=do_opt) + assert_equal(c, np.tensordot(a.astype('f8'), b.astype('f8'), + axes=([1, 0], [0, 1])).astype(dtype)) + + # logical_and(logical_and(a!=0, b!=0), c!=0) + neg_val = -2 if dtype.kind != "u" else np.iinfo(dtype).max - 1 + a = np.array([1, 3, neg_val, 0, 12, 13, 0, 1], dtype=dtype) + b = np.array([0, 3.5, 0., neg_val, 0, 1, 3, 12], dtype=dtype) + c = np.array([True, True, False, True, True, False, True, True]) + + assert_equal(np.einsum("i,i,i->i", a, b, c, + dtype='?', casting='unsafe', optimize=do_opt), + np.logical_and(np.logical_and(a != 0, b != 0), c != 0)) + assert_equal(np.einsum(a, [0], b, [0], c, [0], [0], + dtype='?', casting='unsafe'), + np.logical_and(np.logical_and(a != 0, b != 0), c != 0)) + + a = np.arange(9, dtype=dtype) + assert_equal(np.einsum(",i->", 3, a), 3 * np.sum(a)) + assert_equal(np.einsum(3, [], a, [0], []), 3 * np.sum(a)) + assert_equal(np.einsum("i,->", a, 3), 3 * np.sum(a)) + assert_equal(np.einsum(a, [0], 3, [], []), 3 * np.sum(a)) + + # Various stride0, contiguous, and SSE aligned variants + for n in range(1, 25): + a = np.arange(n, dtype=dtype) + if np.dtype(dtype).itemsize > 1: + assert_equal(np.einsum("...,...", a, a, optimize=do_opt), + np.multiply(a, a)) + assert_equal(np.einsum("i,i", a, a, optimize=do_opt), np.dot(a, a)) + assert_equal(np.einsum("i,->i", a, 2, optimize=do_opt), 2 * a) + assert_equal(np.einsum(",i->i", 2, a, optimize=do_opt), 2 * a) + assert_equal(np.einsum("i,->", a, 2, optimize=do_opt), 2 * np.sum(a)) + assert_equal(np.einsum(",i->", 2, a, optimize=do_opt), 2 * np.sum(a)) + + assert_equal(np.einsum("...,...", a[1:], a[:-1], optimize=do_opt), + np.multiply(a[1:], a[:-1])) + assert_equal(np.einsum("i,i", a[1:], a[:-1], optimize=do_opt), + np.dot(a[1:], a[:-1])) + assert_equal(np.einsum("i,->i", a[1:], 2, optimize=do_opt), 2 * a[1:]) + assert_equal(np.einsum(",i->i", 2, a[1:], optimize=do_opt), 2 * a[1:]) + assert_equal(np.einsum("i,->", a[1:], 2, optimize=do_opt), + 2 * np.sum(a[1:])) + assert_equal(np.einsum(",i->", 2, a[1:], optimize=do_opt), + 2 * np.sum(a[1:])) + + # An object array, summed as the data type + a = np.arange(9, dtype=object) + + b = np.einsum("i->", a, dtype=dtype, casting='unsafe') + assert_equal(b, np.sum(a)) + if hasattr(b, "dtype"): + # Can be a python object when dtype is object + assert_equal(b.dtype, np.dtype(dtype)) + + b = np.einsum(a, [0], [], dtype=dtype, casting='unsafe') + assert_equal(b, np.sum(a)) + if hasattr(b, "dtype"): + # Can be a python object when dtype is object + assert_equal(b.dtype, np.dtype(dtype)) + + # A case which was failing (ticket #1885) + p = np.arange(2) + 1 + q = np.arange(4).reshape(2, 2) + 3 + r = np.arange(4).reshape(2, 2) + 7 + assert_equal(np.einsum('z,mz,zm->', p, q, r), 253) + + # singleton dimensions broadcast (gh-10343) + p = np.ones((10, 2)) + q = np.ones((1, 2)) + assert_array_equal(np.einsum('ij,ij->j', p, q, optimize=True), + np.einsum('ij,ij->j', p, q, optimize=False)) + assert_array_equal(np.einsum('ij,ij->j', p, q, optimize=True), + [10.] * 2) + + # a blas-compatible contraction broadcasting case which was failing + # for optimize=True (ticket #10930) + x = np.array([2., 3.]) + y = np.array([4.]) + assert_array_equal(np.einsum("i, i", x, y, optimize=False), 20.) + assert_array_equal(np.einsum("i, i", x, y, optimize=True), 20.) + + # all-ones array was bypassing bug (ticket #10930) + p = np.ones((1, 5)) / 2 + q = np.ones((5, 5)) / 2 + for optimize in (True, False): + assert_array_equal(np.einsum("...ij,...jk->...ik", p, p, + optimize=optimize), + np.einsum("...ij,...jk->...ik", p, q, + optimize=optimize)) + assert_array_equal(np.einsum("...ij,...jk->...ik", p, q, + optimize=optimize), + np.full((1, 5), 1.25)) + + # Cases which were failing (gh-10899) + x = np.eye(2, dtype=dtype) + y = np.ones(2, dtype=dtype) + assert_array_equal(np.einsum("ji,i->", x, y, optimize=optimize), + [2.]) # contig_contig_outstride0_two + assert_array_equal(np.einsum("i,ij->", y, x, optimize=optimize), + [2.]) # stride0_contig_outstride0_two + assert_array_equal(np.einsum("ij,i->", x, y, optimize=optimize), + [2.]) # contig_stride0_outstride0_two + + def test_einsum_sums_int8(self): + self.check_einsum_sums('i1') + + def test_einsum_sums_uint8(self): + self.check_einsum_sums('u1') + + def test_einsum_sums_int16(self): + self.check_einsum_sums('i2') + + def test_einsum_sums_uint16(self): + self.check_einsum_sums('u2') + + def test_einsum_sums_int32(self): + self.check_einsum_sums('i4') + self.check_einsum_sums('i4', True) + + def test_einsum_sums_uint32(self): + self.check_einsum_sums('u4') + self.check_einsum_sums('u4', True) + + def test_einsum_sums_int64(self): + self.check_einsum_sums('i8') + + def test_einsum_sums_uint64(self): + self.check_einsum_sums('u8') + + def test_einsum_sums_float16(self): + self.check_einsum_sums('f2') + + def test_einsum_sums_float32(self): + self.check_einsum_sums('f4') + + def test_einsum_sums_float64(self): + self.check_einsum_sums('f8') + self.check_einsum_sums('f8', True) + + def test_einsum_sums_longdouble(self): + self.check_einsum_sums(np.longdouble) + + def test_einsum_sums_cfloat64(self): + self.check_einsum_sums('c8') + self.check_einsum_sums('c8', True) + + def test_einsum_sums_cfloat128(self): + self.check_einsum_sums('c16') + + def test_einsum_sums_clongdouble(self): + self.check_einsum_sums(np.clongdouble) + + def test_einsum_sums_object(self): + self.check_einsum_sums('object') + self.check_einsum_sums('object', True) + + def test_einsum_misc(self): + # This call used to crash because of a bug in + # PyArray_AssignZero + a = np.ones((1, 2)) + b = np.ones((2, 2, 1)) + assert_equal(np.einsum('ij...,j...->i...', a, b), [[[2], [2]]]) + assert_equal(np.einsum('ij...,j...->i...', a, b, optimize=True), [[[2], [2]]]) + + # Regression test for issue #10369 (test unicode inputs with Python 2) + assert_equal(np.einsum('ij...,j...->i...', a, b), [[[2], [2]]]) + assert_equal(np.einsum('...i,...i', [1, 2, 3], [2, 3, 4]), 20) + assert_equal(np.einsum('...i,...i', [1, 2, 3], [2, 3, 4], + optimize='greedy'), 20) + + # The iterator had an issue with buffering this reduction + a = np.ones((5, 12, 4, 2, 3), np.int64) + b = np.ones((5, 12, 11), np.int64) + assert_equal(np.einsum('ijklm,ijn,ijn->', a, b, b), + np.einsum('ijklm,ijn->', a, b)) + assert_equal(np.einsum('ijklm,ijn,ijn->', a, b, b, optimize=True), + np.einsum('ijklm,ijn->', a, b, optimize=True)) + + # Issue #2027, was a problem in the contiguous 3-argument + # inner loop implementation + a = np.arange(1, 3) + b = np.arange(1, 5).reshape(2, 2) + c = np.arange(1, 9).reshape(4, 2) + assert_equal(np.einsum('x,yx,zx->xzy', a, b, c), + [[[1, 3], [3, 9], [5, 15], [7, 21]], + [[8, 16], [16, 32], [24, 48], [32, 64]]]) + assert_equal(np.einsum('x,yx,zx->xzy', a, b, c, optimize=True), + [[[1, 3], [3, 9], [5, 15], [7, 21]], + [[8, 16], [16, 32], [24, 48], [32, 64]]]) + + # Ensure explicitly setting out=None does not cause an error + # see issue gh-15776 and issue gh-15256 + assert_equal(np.einsum('i,j', [1], [2], out=None), [[2]]) + + def test_object_loop(self): + + class Mult: + def __mul__(self, other): + return 42 + + objMult = np.array([Mult()]) + objNULL = np.ndarray(buffer=b'\0' * np.intp(0).itemsize, shape=1, dtype=object) + + with pytest.raises(TypeError): + np.einsum("i,j", [1], objNULL) + with pytest.raises(TypeError): + np.einsum("i,j", objNULL, [1]) + assert np.einsum("i,j", objMult, objMult) == 42 + + def test_subscript_range(self): + # Issue #7741, make sure that all letters of Latin alphabet (both uppercase & lowercase) can be used + # when creating a subscript from arrays + a = np.ones((2, 3)) + b = np.ones((3, 4)) + np.einsum(a, [0, 20], b, [20, 2], [0, 2], optimize=False) + np.einsum(a, [0, 27], b, [27, 2], [0, 2], optimize=False) + np.einsum(a, [0, 51], b, [51, 2], [0, 2], optimize=False) + assert_raises(ValueError, lambda: np.einsum(a, [0, 52], b, [52, 2], [0, 2], optimize=False)) + assert_raises(ValueError, lambda: np.einsum(a, [-1, 5], b, [5, 2], [-1, 2], optimize=False)) + + def test_einsum_broadcast(self): + # Issue #2455 change in handling ellipsis + # remove the 'middle broadcast' error + # only use the 'RIGHT' iteration in prepare_op_axes + # adds auto broadcast on left where it belongs + # broadcast on right has to be explicit + # We need to test the optimized parsing as well + + A = np.arange(2 * 3 * 4).reshape(2, 3, 4) + B = np.arange(3) + ref = np.einsum('ijk,j->ijk', A, B, optimize=False) + for opt in [True, False]: + assert_equal(np.einsum('ij...,j...->ij...', A, B, optimize=opt), ref) + assert_equal(np.einsum('ij...,...j->ij...', A, B, optimize=opt), ref) + assert_equal(np.einsum('ij...,j->ij...', A, B, optimize=opt), ref) # used to raise error + + A = np.arange(12).reshape((4, 3)) + B = np.arange(6).reshape((3, 2)) + ref = np.einsum('ik,kj->ij', A, B, optimize=False) + for opt in [True, False]: + assert_equal(np.einsum('ik...,k...->i...', A, B, optimize=opt), ref) + assert_equal(np.einsum('ik...,...kj->i...j', A, B, optimize=opt), ref) + assert_equal(np.einsum('...k,kj', A, B, optimize=opt), ref) # used to raise error + assert_equal(np.einsum('ik,k...->i...', A, B, optimize=opt), ref) # used to raise error + + dims = [2, 3, 4, 5] + a = np.arange(np.prod(dims)).reshape(dims) + v = np.arange(dims[2]) + ref = np.einsum('ijkl,k->ijl', a, v, optimize=False) + for opt in [True, False]: + assert_equal(np.einsum('ijkl,k', a, v, optimize=opt), ref) + assert_equal(np.einsum('...kl,k', a, v, optimize=opt), ref) # used to raise error + assert_equal(np.einsum('...kl,k...', a, v, optimize=opt), ref) + + J, K, M = 160, 160, 120 + A = np.arange(J * K * M).reshape(1, 1, 1, J, K, M) + B = np.arange(J * K * M * 3).reshape(J, K, M, 3) + ref = np.einsum('...lmn,...lmno->...o', A, B, optimize=False) + for opt in [True, False]: + assert_equal(np.einsum('...lmn,lmno->...o', A, B, + optimize=opt), ref) # used to raise error + + def test_einsum_fixedstridebug(self): + # Issue #4485 obscure einsum bug + # This case revealed a bug in nditer where it reported a stride + # as 'fixed' (0) when it was in fact not fixed during processing + # (0 or 4). The reason for the bug was that the check for a fixed + # stride was using the information from the 2D inner loop reuse + # to restrict the iteration dimensions it had to validate to be + # the same, but that 2D inner loop reuse logic is only triggered + # during the buffer copying step, and hence it was invalid to + # rely on those values. The fix is to check all the dimensions + # of the stride in question, which in the test case reveals that + # the stride is not fixed. + # + # NOTE: This test is triggered by the fact that the default buffersize, + # used by einsum, is 8192, and 3*2731 = 8193, is larger than that + # and results in a mismatch between the buffering and the + # striding for operand A. + A = np.arange(2 * 3).reshape(2, 3).astype(np.float32) + B = np.arange(2 * 3 * 2731).reshape(2, 3, 2731).astype(np.int16) + es = np.einsum('cl, cpx->lpx', A, B) + tp = np.tensordot(A, B, axes=(0, 0)) + assert_equal(es, tp) + # The following is the original test case from the bug report, + # made repeatable by changing random arrays to aranges. + A = np.arange(3 * 3).reshape(3, 3).astype(np.float64) + B = np.arange(3 * 3 * 64 * 64).reshape(3, 3, 64, 64).astype(np.float32) + es = np.einsum('cl, cpxy->lpxy', A, B) + tp = np.tensordot(A, B, axes=(0, 0)) + assert_equal(es, tp) + + def test_einsum_fixed_collapsingbug(self): + # Issue #5147. + # The bug only occurred when output argument of einssum was used. + x = np.random.normal(0, 1, (5, 5, 5, 5)) + y1 = np.zeros((5, 5)) + np.einsum('aabb->ab', x, out=y1) + idx = np.arange(5) + y2 = x[idx[:, None], idx[:, None], idx, idx] + assert_equal(y1, y2) + + def test_einsum_failed_on_p9_and_s390x(self): + # Issues gh-14692 and gh-12689 + # Bug with signed vs unsigned char errored on power9 and s390x Linux + tensor = np.random.random_sample((10, 10, 10, 10)) + x = np.einsum('ijij->', tensor) + y = tensor.trace(axis1=0, axis2=2).trace() + assert_allclose(x, y) + + def test_einsum_all_contig_non_contig_output(self): + # Issue gh-5907, tests that the all contiguous special case + # actually checks the contiguity of the output + x = np.ones((5, 5)) + out = np.ones(10)[::2] + correct_base = np.ones(10) + correct_base[::2] = 5 + # Always worked (inner iteration is done with 0-stride): + np.einsum('mi,mi,mi->m', x, x, x, out=out) + assert_array_equal(out.base, correct_base) + # Example 1: + out = np.ones(10)[::2] + np.einsum('im,im,im->m', x, x, x, out=out) + assert_array_equal(out.base, correct_base) + # Example 2, buffering causes x to be contiguous but + # special cases do not catch the operation before: + out = np.ones((2, 2, 2))[..., 0] + correct_base = np.ones((2, 2, 2)) + correct_base[..., 0] = 2 + x = np.ones((2, 2), np.float32) + np.einsum('ij,jk->ik', x, x, out=out) + assert_array_equal(out.base, correct_base) + + @pytest.mark.parametrize("dtype", + np.typecodes["AllFloat"] + np.typecodes["AllInteger"]) + def test_different_paths(self, dtype): + # Test originally added to cover broken float16 path: gh-20305 + # Likely most are covered elsewhere, at least partially. + dtype = np.dtype(dtype) + # Simple test, designed to exercise most specialized code paths, + # note the +0.5 for floats. This makes sure we use a float value + # where the results must be exact. + arr = (np.arange(7) + 0.5).astype(dtype) + scalar = np.array(2, dtype=dtype) + + # contig -> scalar: + res = np.einsum('i->', arr) + assert res == arr.sum() + # contig, contig -> contig: + res = np.einsum('i,i->i', arr, arr) + assert_array_equal(res, arr * arr) + # noncontig, noncontig -> contig: + res = np.einsum('i,i->i', arr.repeat(2)[::2], arr.repeat(2)[::2]) + assert_array_equal(res, arr * arr) + # contig + contig -> scalar + assert np.einsum('i,i->', arr, arr) == (arr * arr).sum() + # contig + scalar -> contig (with out) + out = np.ones(7, dtype=dtype) + res = np.einsum('i,->i', arr, dtype.type(2), out=out) + assert_array_equal(res, arr * dtype.type(2)) + # scalar + contig -> contig (with out) + res = np.einsum(',i->i', scalar, arr) + assert_array_equal(res, arr * dtype.type(2)) + # scalar + contig -> scalar + res = np.einsum(',i->', scalar, arr) + # Use einsum to compare to not have difference due to sum round-offs: + assert res == np.einsum('i->', scalar * arr) + # contig + scalar -> scalar + res = np.einsum('i,->', arr, scalar) + # Use einsum to compare to not have difference due to sum round-offs: + assert res == np.einsum('i->', scalar * arr) + # contig + contig + contig -> scalar + arr = np.array([0.5, 0.5, 0.25, 4.5, 3.], dtype=dtype) + res = np.einsum('i,i,i->', arr, arr, arr) + assert_array_equal(res, (arr * arr * arr).sum()) + # four arrays: + res = np.einsum('i,i,i,i->', arr, arr, arr, arr) + assert_array_equal(res, (arr * arr * arr * arr).sum()) + + def test_small_boolean_arrays(self): + # See gh-5946. + # Use array of True embedded in False. + a = np.zeros((16, 1, 1), dtype=np.bool)[:2] + a[...] = True + out = np.zeros((16, 1, 1), dtype=np.bool)[:2] + tgt = np.ones((2, 1, 1), dtype=np.bool) + res = np.einsum('...ij,...jk->...ik', a, a, out=out) + assert_equal(res, tgt) + + def test_out_is_res(self): + a = np.arange(9).reshape(3, 3) + res = np.einsum('...ij,...jk->...ik', a, a, out=a) + assert res is a + + def optimize_compare(self, subscripts, operands=None): + # Tests all paths of the optimization function against + # conventional einsum + if operands is None: + args = [subscripts] + terms = subscripts.split('->')[0].split(',') + for term in terms: + dims = [global_size_dict[x] for x in term] + args.append(np.random.rand(*dims)) + else: + args = [subscripts] + operands + + noopt = np.einsum(*args, optimize=False) + opt = np.einsum(*args, optimize='greedy') + assert_almost_equal(opt, noopt) + opt = np.einsum(*args, optimize='optimal') + assert_almost_equal(opt, noopt) + + def test_hadamard_like_products(self): + # Hadamard outer products + self.optimize_compare('a,ab,abc->abc') + self.optimize_compare('a,b,ab->ab') + + def test_index_transformations(self): + # Simple index transformation cases + self.optimize_compare('ea,fb,gc,hd,abcd->efgh') + self.optimize_compare('ea,fb,abcd,gc,hd->efgh') + self.optimize_compare('abcd,ea,fb,gc,hd->efgh') + + def test_complex(self): + # Long test cases + self.optimize_compare('acdf,jbje,gihb,hfac,gfac,gifabc,hfac') + self.optimize_compare('acdf,jbje,gihb,hfac,gfac,gifabc,hfac') + self.optimize_compare('cd,bdhe,aidb,hgca,gc,hgibcd,hgac') + self.optimize_compare('abhe,hidj,jgba,hiab,gab') + self.optimize_compare('bde,cdh,agdb,hica,ibd,hgicd,hiac') + self.optimize_compare('chd,bde,agbc,hiad,hgc,hgi,hiad') + self.optimize_compare('chd,bde,agbc,hiad,bdi,cgh,agdb') + self.optimize_compare('bdhe,acad,hiab,agac,hibd') + + def test_collapse(self): + # Inner products + self.optimize_compare('ab,ab,c->') + self.optimize_compare('ab,ab,c->c') + self.optimize_compare('ab,ab,cd,cd->') + self.optimize_compare('ab,ab,cd,cd->ac') + self.optimize_compare('ab,ab,cd,cd->cd') + self.optimize_compare('ab,ab,cd,cd,ef,ef->') + + def test_expand(self): + # Outer products + self.optimize_compare('ab,cd,ef->abcdef') + self.optimize_compare('ab,cd,ef->acdf') + self.optimize_compare('ab,cd,de->abcde') + self.optimize_compare('ab,cd,de->be') + self.optimize_compare('ab,bcd,cd->abcd') + self.optimize_compare('ab,bcd,cd->abd') + + def test_edge_cases(self): + # Difficult edge cases for optimization + self.optimize_compare('eb,cb,fb->cef') + self.optimize_compare('dd,fb,be,cdb->cef') + self.optimize_compare('bca,cdb,dbf,afc->') + self.optimize_compare('dcc,fce,ea,dbf->ab') + self.optimize_compare('fdf,cdd,ccd,afe->ae') + self.optimize_compare('abcd,ad') + self.optimize_compare('ed,fcd,ff,bcf->be') + self.optimize_compare('baa,dcf,af,cde->be') + self.optimize_compare('bd,db,eac->ace') + self.optimize_compare('fff,fae,bef,def->abd') + self.optimize_compare('efc,dbc,acf,fd->abe') + self.optimize_compare('ba,ac,da->bcd') + + def test_inner_product(self): + # Inner products + self.optimize_compare('ab,ab') + self.optimize_compare('ab,ba') + self.optimize_compare('abc,abc') + self.optimize_compare('abc,bac') + self.optimize_compare('abc,cba') + + def test_random_cases(self): + # Randomly built test cases + self.optimize_compare('aab,fa,df,ecc->bde') + self.optimize_compare('ecb,fef,bad,ed->ac') + self.optimize_compare('bcf,bbb,fbf,fc->') + self.optimize_compare('bb,ff,be->e') + self.optimize_compare('bcb,bb,fc,fff->') + self.optimize_compare('fbb,dfd,fc,fc->') + self.optimize_compare('afd,ba,cc,dc->bf') + self.optimize_compare('adb,bc,fa,cfc->d') + self.optimize_compare('bbd,bda,fc,db->acf') + self.optimize_compare('dba,ead,cad->bce') + self.optimize_compare('aef,fbc,dca->bde') + + def test_combined_views_mapping(self): + # gh-10792 + a = np.arange(9).reshape(1, 1, 3, 1, 3) + b = np.einsum('bbcdc->d', a) + assert_equal(b, [12]) + + def test_broadcasting_dot_cases(self): + # Ensures broadcasting cases are not mistaken for GEMM + + a = np.random.rand(1, 5, 4) + b = np.random.rand(4, 6) + c = np.random.rand(5, 6) + d = np.random.rand(10) + + self.optimize_compare('ijk,kl,jl', operands=[a, b, c]) + self.optimize_compare('ijk,kl,jl,i->i', operands=[a, b, c, d]) + + e = np.random.rand(1, 1, 5, 4) + f = np.random.rand(7, 7) + self.optimize_compare('abjk,kl,jl', operands=[e, b, c]) + self.optimize_compare('abjk,kl,jl,ab->ab', operands=[e, b, c, f]) + + # Edge case found in gh-11308 + g = np.arange(64).reshape(2, 4, 8) + self.optimize_compare('obk,ijk->ioj', operands=[g, g]) + + def test_output_order(self): + # Ensure output order is respected for optimize cases, the below + # contraction should yield a reshaped tensor view + # gh-16415 + + a = np.ones((2, 3, 5), order='F') + b = np.ones((4, 3), order='F') + + for opt in [True, False]: + tmp = np.einsum('...ft,mf->...mt', a, b, order='a', optimize=opt) + assert_(tmp.flags.f_contiguous) + + tmp = np.einsum('...ft,mf->...mt', a, b, order='f', optimize=opt) + assert_(tmp.flags.f_contiguous) + + tmp = np.einsum('...ft,mf->...mt', a, b, order='c', optimize=opt) + assert_(tmp.flags.c_contiguous) + + tmp = np.einsum('...ft,mf->...mt', a, b, order='k', optimize=opt) + assert_(tmp.flags.c_contiguous is False) + assert_(tmp.flags.f_contiguous is False) + + tmp = np.einsum('...ft,mf->...mt', a, b, optimize=opt) + assert_(tmp.flags.c_contiguous is False) + assert_(tmp.flags.f_contiguous is False) + + c = np.ones((4, 3), order='C') + for opt in [True, False]: + tmp = np.einsum('...ft,mf->...mt', a, c, order='a', optimize=opt) + assert_(tmp.flags.c_contiguous) + + d = np.ones((2, 3, 5), order='C') + for opt in [True, False]: + tmp = np.einsum('...ft,mf->...mt', d, c, order='a', optimize=opt) + assert_(tmp.flags.c_contiguous) + +class TestEinsumPath: + def build_operands(self, string, size_dict=global_size_dict): + + # Builds views based off initial operands + operands = [string] + terms = string.split('->')[0].split(',') + for term in terms: + dims = [size_dict[x] for x in term] + operands.append(np.random.rand(*dims)) + + return operands + + def assert_path_equal(self, comp, benchmark): + # Checks if list of tuples are equivalent + ret = (len(comp) == len(benchmark)) + assert_(ret) + for pos in range(len(comp) - 1): + ret &= isinstance(comp[pos + 1], tuple) + ret &= (comp[pos + 1] == benchmark[pos + 1]) + assert_(ret) + + def test_memory_contraints(self): + # Ensure memory constraints are satisfied + + outer_test = self.build_operands('a,b,c->abc') + + path, path_str = np.einsum_path(*outer_test, optimize=('greedy', 0)) + self.assert_path_equal(path, ['einsum_path', (0, 1, 2)]) + + path, path_str = np.einsum_path(*outer_test, optimize=('optimal', 0)) + self.assert_path_equal(path, ['einsum_path', (0, 1, 2)]) + + long_test = self.build_operands('acdf,jbje,gihb,hfac') + path, path_str = np.einsum_path(*long_test, optimize=('greedy', 0)) + self.assert_path_equal(path, ['einsum_path', (0, 1, 2, 3)]) + + path, path_str = np.einsum_path(*long_test, optimize=('optimal', 0)) + self.assert_path_equal(path, ['einsum_path', (0, 1, 2, 3)]) + + def test_long_paths(self): + # Long complex cases + + # Long test 1 + long_test1 = self.build_operands('acdf,jbje,gihb,hfac,gfac,gifabc,hfac') + path, path_str = np.einsum_path(*long_test1, optimize='greedy') + self.assert_path_equal(path, ['einsum_path', + (3, 6), (3, 4), (2, 4), (2, 3), (0, 2), (0, 1)]) + + path, path_str = np.einsum_path(*long_test1, optimize='optimal') + self.assert_path_equal(path, ['einsum_path', + (3, 6), (3, 4), (2, 4), (2, 3), (0, 2), (0, 1)]) + + # Long test 2 + long_test2 = self.build_operands('chd,bde,agbc,hiad,bdi,cgh,agdb') + path, path_str = np.einsum_path(*long_test2, optimize='greedy') + self.assert_path_equal(path, ['einsum_path', + (3, 4), (0, 3), (3, 4), (1, 3), (1, 2), (0, 1)]) + + path, path_str = np.einsum_path(*long_test2, optimize='optimal') + self.assert_path_equal(path, ['einsum_path', + (0, 5), (1, 4), (3, 4), (1, 3), (1, 2), (0, 1)]) + + def test_edge_paths(self): + # Difficult edge cases + + # Edge test1 + edge_test1 = self.build_operands('eb,cb,fb->cef') + path, path_str = np.einsum_path(*edge_test1, optimize='greedy') + self.assert_path_equal(path, ['einsum_path', (0, 2), (0, 1)]) + + path, path_str = np.einsum_path(*edge_test1, optimize='optimal') + self.assert_path_equal(path, ['einsum_path', (0, 2), (0, 1)]) + + # Edge test2 + edge_test2 = self.build_operands('dd,fb,be,cdb->cef') + path, path_str = np.einsum_path(*edge_test2, optimize='greedy') + self.assert_path_equal(path, ['einsum_path', (0, 3), (0, 1), (0, 1)]) + + path, path_str = np.einsum_path(*edge_test2, optimize='optimal') + self.assert_path_equal(path, ['einsum_path', (0, 3), (0, 1), (0, 1)]) + + # Edge test3 + edge_test3 = self.build_operands('bca,cdb,dbf,afc->') + path, path_str = np.einsum_path(*edge_test3, optimize='greedy') + self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 2), (0, 1)]) + + path, path_str = np.einsum_path(*edge_test3, optimize='optimal') + self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 2), (0, 1)]) + + # Edge test4 + edge_test4 = self.build_operands('dcc,fce,ea,dbf->ab') + path, path_str = np.einsum_path(*edge_test4, optimize='greedy') + self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 1), (0, 1)]) + + path, path_str = np.einsum_path(*edge_test4, optimize='optimal') + self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 2), (0, 1)]) + + # Edge test5 + edge_test4 = self.build_operands('a,ac,ab,ad,cd,bd,bc->', + size_dict={"a": 20, "b": 20, "c": 20, "d": 20}) + path, path_str = np.einsum_path(*edge_test4, optimize='greedy') + self.assert_path_equal(path, ['einsum_path', (0, 1), (0, 1, 2, 3, 4, 5)]) + + path, path_str = np.einsum_path(*edge_test4, optimize='optimal') + self.assert_path_equal(path, ['einsum_path', (0, 1), (0, 1, 2, 3, 4, 5)]) + + def test_path_type_input(self): + # Test explicit path handling + path_test = self.build_operands('dcc,fce,ea,dbf->ab') + + path, path_str = np.einsum_path(*path_test, optimize=False) + self.assert_path_equal(path, ['einsum_path', (0, 1, 2, 3)]) + + path, path_str = np.einsum_path(*path_test, optimize=True) + self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 1), (0, 1)]) + + exp_path = ['einsum_path', (0, 2), (0, 2), (0, 1)] + path, path_str = np.einsum_path(*path_test, optimize=exp_path) + self.assert_path_equal(path, exp_path) + + # Double check einsum works on the input path + noopt = np.einsum(*path_test, optimize=False) + opt = np.einsum(*path_test, optimize=exp_path) + assert_almost_equal(noopt, opt) + + def test_path_type_input_internal_trace(self): + # gh-20962 + path_test = self.build_operands('cab,cdd->ab') + exp_path = ['einsum_path', (1,), (0, 1)] + + path, path_str = np.einsum_path(*path_test, optimize=exp_path) + self.assert_path_equal(path, exp_path) + + # Double check einsum works on the input path + noopt = np.einsum(*path_test, optimize=False) + opt = np.einsum(*path_test, optimize=exp_path) + assert_almost_equal(noopt, opt) + + def test_path_type_input_invalid(self): + path_test = self.build_operands('ab,bc,cd,de->ae') + exp_path = ['einsum_path', (2, 3), (0, 1)] + assert_raises(RuntimeError, np.einsum, *path_test, optimize=exp_path) + assert_raises( + RuntimeError, np.einsum_path, *path_test, optimize=exp_path) + + path_test = self.build_operands('a,a,a->a') + exp_path = ['einsum_path', (1,), (0, 1)] + assert_raises(RuntimeError, np.einsum, *path_test, optimize=exp_path) + assert_raises( + RuntimeError, np.einsum_path, *path_test, optimize=exp_path) + + def test_spaces(self): + # gh-10794 + arr = np.array([[1]]) + for sp in itertools.product(['', ' '], repeat=4): + # no error for any spacing + np.einsum('{}...a{}->{}...a{}'.format(*sp), arr) + +def test_overlap(): + a = np.arange(9, dtype=int).reshape(3, 3) + b = np.arange(9, dtype=int).reshape(3, 3) + d = np.dot(a, b) + # sanity check + c = np.einsum('ij,jk->ik', a, b) + assert_equal(c, d) + # gh-10080, out overlaps one of the operands + c = np.einsum('ij,jk->ik', a, b, out=b) + assert_equal(c, d) + +def test_einsum_chunking_precision(): + """Most einsum operations are reductions and until NumPy 2.3 reductions + never (or almost never?) used the `GROWINNER` mechanism to increase the + inner loop size when no buffers are needed. + Because einsum reductions work roughly: + + def inner(*inputs, out): + accumulate = 0 + for vals in zip(*inputs): + accumulate += prod(vals) + out[0] += accumulate + + Calling the inner-loop more often actually improves accuracy slightly + (same effect as pairwise summation but much less). + Without adding pairwise summation to the inner-loop it seems best to just + not use GROWINNER, a quick tests suggest that is maybe 1% slowdown for + the simplest `einsum("i,i->i", x, x)` case. + + (It is not clear that we should guarantee precision to this extend.) + """ + num = 1_000_000 + value = 1. + np.finfo(np.float64).eps * 8196 + res = np.einsum("i->", np.broadcast_to(np.array(value), num)) / num + + # At with GROWINNER 11 decimals succeed (larger will be less) + assert_almost_equal(res, value, decimal=15) diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_errstate.py b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_errstate.py new file mode 100644 index 0000000..b72fb65 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_errstate.py @@ -0,0 +1,131 @@ +import sysconfig + +import pytest + +import numpy as np +from numpy.testing import IS_WASM, assert_raises + +# The floating point emulation on ARM EABI systems lacking a hardware FPU is +# known to be buggy. This is an attempt to identify these hosts. It may not +# catch all possible cases, but it catches the known cases of gh-413 and +# gh-15562. +hosttype = sysconfig.get_config_var('HOST_GNU_TYPE') +arm_softfloat = False if hosttype is None else hosttype.endswith('gnueabi') + +class TestErrstate: + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + @pytest.mark.skipif(arm_softfloat, + reason='platform/cpu issue with FPU (gh-413,-15562)') + def test_invalid(self): + with np.errstate(all='raise', under='ignore'): + a = -np.arange(3) + # This should work + with np.errstate(invalid='ignore'): + np.sqrt(a) + # While this should fail! + with assert_raises(FloatingPointError): + np.sqrt(a) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + @pytest.mark.skipif(arm_softfloat, + reason='platform/cpu issue with FPU (gh-15562)') + def test_divide(self): + with np.errstate(all='raise', under='ignore'): + a = -np.arange(3) + # This should work + with np.errstate(divide='ignore'): + a // 0 + # While this should fail! + with assert_raises(FloatingPointError): + a // 0 + # As should this, see gh-15562 + with assert_raises(FloatingPointError): + a // a + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + @pytest.mark.skipif(arm_softfloat, + reason='platform/cpu issue with FPU (gh-15562)') + def test_errcall(self): + count = 0 + + def foo(*args): + nonlocal count + count += 1 + + olderrcall = np.geterrcall() + with np.errstate(call=foo): + assert np.geterrcall() is foo + with np.errstate(call=None): + assert np.geterrcall() is None + assert np.geterrcall() is olderrcall + assert count == 0 + + with np.errstate(call=foo, invalid="call"): + np.array(np.inf) - np.array(np.inf) + + assert count == 1 + + def test_errstate_decorator(self): + @np.errstate(all='ignore') + def foo(): + a = -np.arange(3) + a // 0 + + foo() + + def test_errstate_enter_once(self): + errstate = np.errstate(invalid="warn") + with errstate: + pass + + # The errstate context cannot be entered twice as that would not be + # thread-safe + with pytest.raises(TypeError, + match="Cannot enter `np.errstate` twice"): + with errstate: + pass + + @pytest.mark.skipif(IS_WASM, reason="wasm doesn't support asyncio") + def test_asyncio_safe(self): + # asyncio may not always work, lets assume its fine if missing + # Pyodide/wasm doesn't support it. If this test makes problems, + # it should just be skipped liberally (or run differently). + asyncio = pytest.importorskip("asyncio") + + @np.errstate(invalid="ignore") + def decorated(): + # Decorated non-async function (it is not safe to decorate an + # async one) + assert np.geterr()["invalid"] == "ignore" + + async def func1(): + decorated() + await asyncio.sleep(0.1) + decorated() + + async def func2(): + with np.errstate(invalid="raise"): + assert np.geterr()["invalid"] == "raise" + await asyncio.sleep(0.125) + assert np.geterr()["invalid"] == "raise" + + # for good sport, a third one with yet another state: + async def func3(): + with np.errstate(invalid="print"): + assert np.geterr()["invalid"] == "print" + await asyncio.sleep(0.11) + assert np.geterr()["invalid"] == "print" + + async def main(): + # simply run all three function multiple times: + await asyncio.gather( + func1(), func2(), func3(), func1(), func2(), func3(), + func1(), func2(), func3(), func1(), func2(), func3()) + + loop = asyncio.new_event_loop() + with np.errstate(invalid="warn"): + asyncio.run(main()) + assert np.geterr()["invalid"] == "warn" + + assert np.geterr()["invalid"] == "warn" # the default + loop.close() diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_extint128.py b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_extint128.py new file mode 100644 index 0000000..1a05151 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_extint128.py @@ -0,0 +1,217 @@ +import contextlib +import itertools +import operator + +import numpy._core._multiarray_tests as mt +import pytest + +import numpy as np +from numpy.testing import assert_equal, assert_raises + +INT64_MAX = np.iinfo(np.int64).max +INT64_MIN = np.iinfo(np.int64).min +INT64_MID = 2**32 + +# int128 is not two's complement, the sign bit is separate +INT128_MAX = 2**128 - 1 +INT128_MIN = -INT128_MAX +INT128_MID = 2**64 + +INT64_VALUES = ( + [INT64_MIN + j for j in range(20)] + + [INT64_MAX - j for j in range(20)] + + [INT64_MID + j for j in range(-20, 20)] + + [2 * INT64_MID + j for j in range(-20, 20)] + + [INT64_MID // 2 + j for j in range(-20, 20)] + + list(range(-70, 70)) +) + +INT128_VALUES = ( + [INT128_MIN + j for j in range(20)] + + [INT128_MAX - j for j in range(20)] + + [INT128_MID + j for j in range(-20, 20)] + + [2 * INT128_MID + j for j in range(-20, 20)] + + [INT128_MID // 2 + j for j in range(-20, 20)] + + list(range(-70, 70)) + + [False] # negative zero +) + +INT64_POS_VALUES = [x for x in INT64_VALUES if x > 0] + + +@contextlib.contextmanager +def exc_iter(*args): + """ + Iterate over Cartesian product of *args, and if an exception is raised, + add information of the current iterate. + """ + + value = [None] + + def iterate(): + for v in itertools.product(*args): + value[0] = v + yield v + + try: + yield iterate() + except Exception: + import traceback + msg = f"At: {repr(value[0])!r}\n{traceback.format_exc()}" + raise AssertionError(msg) + + +def test_safe_binop(): + # Test checked arithmetic routines + + ops = [ + (operator.add, 1), + (operator.sub, 2), + (operator.mul, 3) + ] + + with exc_iter(ops, INT64_VALUES, INT64_VALUES) as it: + for xop, a, b in it: + pyop, op = xop + c = pyop(a, b) + + if not (INT64_MIN <= c <= INT64_MAX): + assert_raises(OverflowError, mt.extint_safe_binop, a, b, op) + else: + d = mt.extint_safe_binop(a, b, op) + if c != d: + # assert_equal is slow + assert_equal(d, c) + + +def test_to_128(): + with exc_iter(INT64_VALUES) as it: + for a, in it: + b = mt.extint_to_128(a) + if a != b: + assert_equal(b, a) + + +def test_to_64(): + with exc_iter(INT128_VALUES) as it: + for a, in it: + if not (INT64_MIN <= a <= INT64_MAX): + assert_raises(OverflowError, mt.extint_to_64, a) + else: + b = mt.extint_to_64(a) + if a != b: + assert_equal(b, a) + + +def test_mul_64_64(): + with exc_iter(INT64_VALUES, INT64_VALUES) as it: + for a, b in it: + c = a * b + d = mt.extint_mul_64_64(a, b) + if c != d: + assert_equal(d, c) + + +def test_add_128(): + with exc_iter(INT128_VALUES, INT128_VALUES) as it: + for a, b in it: + c = a + b + if not (INT128_MIN <= c <= INT128_MAX): + assert_raises(OverflowError, mt.extint_add_128, a, b) + else: + d = mt.extint_add_128(a, b) + if c != d: + assert_equal(d, c) + + +def test_sub_128(): + with exc_iter(INT128_VALUES, INT128_VALUES) as it: + for a, b in it: + c = a - b + if not (INT128_MIN <= c <= INT128_MAX): + assert_raises(OverflowError, mt.extint_sub_128, a, b) + else: + d = mt.extint_sub_128(a, b) + if c != d: + assert_equal(d, c) + + +def test_neg_128(): + with exc_iter(INT128_VALUES) as it: + for a, in it: + b = -a + c = mt.extint_neg_128(a) + if b != c: + assert_equal(c, b) + + +def test_shl_128(): + with exc_iter(INT128_VALUES) as it: + for a, in it: + if a < 0: + b = -(((-a) << 1) & (2**128 - 1)) + else: + b = (a << 1) & (2**128 - 1) + c = mt.extint_shl_128(a) + if b != c: + assert_equal(c, b) + + +def test_shr_128(): + with exc_iter(INT128_VALUES) as it: + for a, in it: + if a < 0: + b = -((-a) >> 1) + else: + b = a >> 1 + c = mt.extint_shr_128(a) + if b != c: + assert_equal(c, b) + + +def test_gt_128(): + with exc_iter(INT128_VALUES, INT128_VALUES) as it: + for a, b in it: + c = a > b + d = mt.extint_gt_128(a, b) + if c != d: + assert_equal(d, c) + + +@pytest.mark.slow +def test_divmod_128_64(): + with exc_iter(INT128_VALUES, INT64_POS_VALUES) as it: + for a, b in it: + if a >= 0: + c, cr = divmod(a, b) + else: + c, cr = divmod(-a, b) + c = -c + cr = -cr + + d, dr = mt.extint_divmod_128_64(a, b) + + if c != d or d != dr or b * d + dr != a: + assert_equal(d, c) + assert_equal(dr, cr) + assert_equal(b * d + dr, a) + + +def test_floordiv_128_64(): + with exc_iter(INT128_VALUES, INT64_POS_VALUES) as it: + for a, b in it: + c = a // b + d = mt.extint_floordiv_128_64(a, b) + + if c != d: + assert_equal(d, c) + + +def test_ceildiv_128_64(): + with exc_iter(INT128_VALUES, INT64_POS_VALUES) as it: + for a, b in it: + c = (a + b - 1) // b + d = mt.extint_ceildiv_128_64(a, b) + + if c != d: + assert_equal(d, c) diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_function_base.py b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_function_base.py new file mode 100644 index 0000000..c925cf1 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_function_base.py @@ -0,0 +1,503 @@ +import platform +import sys + +import pytest + +import numpy as np +from numpy import ( + arange, + array, + dtype, + errstate, + geomspace, + isnan, + linspace, + logspace, + ndarray, + nextafter, + sqrt, + stack, +) +from numpy._core import sctypes +from numpy._core.function_base import add_newdoc +from numpy.testing import ( + IS_PYPY, + assert_, + assert_allclose, + assert_array_equal, + assert_equal, + assert_raises, +) + + +def _is_armhf(): + # Check if the current platform is ARMHF (32-bit ARM architecture) + return platform.machine().startswith('arm') and platform.architecture()[0] == '32bit' + +class PhysicalQuantity(float): + def __new__(cls, value): + return float.__new__(cls, value) + + def __add__(self, x): + assert_(isinstance(x, PhysicalQuantity)) + return PhysicalQuantity(float(x) + float(self)) + __radd__ = __add__ + + def __sub__(self, x): + assert_(isinstance(x, PhysicalQuantity)) + return PhysicalQuantity(float(self) - float(x)) + + def __rsub__(self, x): + assert_(isinstance(x, PhysicalQuantity)) + return PhysicalQuantity(float(x) - float(self)) + + def __mul__(self, x): + return PhysicalQuantity(float(x) * float(self)) + __rmul__ = __mul__ + + def __truediv__(self, x): + return PhysicalQuantity(float(self) / float(x)) + + def __rtruediv__(self, x): + return PhysicalQuantity(float(x) / float(self)) + + +class PhysicalQuantity2(ndarray): + __array_priority__ = 10 + + +class TestLogspace: + + def test_basic(self): + y = logspace(0, 6) + assert_(len(y) == 50) + y = logspace(0, 6, num=100) + assert_(y[-1] == 10 ** 6) + y = logspace(0, 6, endpoint=False) + assert_(y[-1] < 10 ** 6) + y = logspace(0, 6, num=7) + assert_array_equal(y, [1, 10, 100, 1e3, 1e4, 1e5, 1e6]) + + def test_start_stop_array(self): + start = array([0., 1.]) + stop = array([6., 7.]) + t1 = logspace(start, stop, 6) + t2 = stack([logspace(_start, _stop, 6) + for _start, _stop in zip(start, stop)], axis=1) + assert_equal(t1, t2) + t3 = logspace(start, stop[0], 6) + t4 = stack([logspace(_start, stop[0], 6) + for _start in start], axis=1) + assert_equal(t3, t4) + t5 = logspace(start, stop, 6, axis=-1) + assert_equal(t5, t2.T) + + @pytest.mark.parametrize("axis", [0, 1, -1]) + def test_base_array(self, axis: int): + start = 1 + stop = 2 + num = 6 + base = array([1, 2]) + t1 = logspace(start, stop, num=num, base=base, axis=axis) + t2 = stack( + [logspace(start, stop, num=num, base=_base) for _base in base], + axis=(axis + 1) % t1.ndim, + ) + assert_equal(t1, t2) + + @pytest.mark.parametrize("axis", [0, 1, -1]) + def test_stop_base_array(self, axis: int): + start = 1 + stop = array([2, 3]) + num = 6 + base = array([1, 2]) + t1 = logspace(start, stop, num=num, base=base, axis=axis) + t2 = stack( + [logspace(start, _stop, num=num, base=_base) + for _stop, _base in zip(stop, base)], + axis=(axis + 1) % t1.ndim, + ) + assert_equal(t1, t2) + + def test_dtype(self): + y = logspace(0, 6, dtype='float32') + assert_equal(y.dtype, dtype('float32')) + y = logspace(0, 6, dtype='float64') + assert_equal(y.dtype, dtype('float64')) + y = logspace(0, 6, dtype='int32') + assert_equal(y.dtype, dtype('int32')) + + def test_physical_quantities(self): + a = PhysicalQuantity(1.0) + b = PhysicalQuantity(5.0) + assert_equal(logspace(a, b), logspace(1.0, 5.0)) + + def test_subclass(self): + a = array(1).view(PhysicalQuantity2) + b = array(7).view(PhysicalQuantity2) + ls = logspace(a, b) + assert type(ls) is PhysicalQuantity2 + assert_equal(ls, logspace(1.0, 7.0)) + ls = logspace(a, b, 1) + assert type(ls) is PhysicalQuantity2 + assert_equal(ls, logspace(1.0, 7.0, 1)) + + +class TestGeomspace: + + def test_basic(self): + y = geomspace(1, 1e6) + assert_(len(y) == 50) + y = geomspace(1, 1e6, num=100) + assert_(y[-1] == 10 ** 6) + y = geomspace(1, 1e6, endpoint=False) + assert_(y[-1] < 10 ** 6) + y = geomspace(1, 1e6, num=7) + assert_array_equal(y, [1, 10, 100, 1e3, 1e4, 1e5, 1e6]) + + y = geomspace(8, 2, num=3) + assert_allclose(y, [8, 4, 2]) + assert_array_equal(y.imag, 0) + + y = geomspace(-1, -100, num=3) + assert_array_equal(y, [-1, -10, -100]) + assert_array_equal(y.imag, 0) + + y = geomspace(-100, -1, num=3) + assert_array_equal(y, [-100, -10, -1]) + assert_array_equal(y.imag, 0) + + def test_boundaries_match_start_and_stop_exactly(self): + # make sure that the boundaries of the returned array exactly + # equal 'start' and 'stop' - this isn't obvious because + # np.exp(np.log(x)) isn't necessarily exactly equal to x + start = 0.3 + stop = 20.3 + + y = geomspace(start, stop, num=1) + assert_equal(y[0], start) + + y = geomspace(start, stop, num=1, endpoint=False) + assert_equal(y[0], start) + + y = geomspace(start, stop, num=3) + assert_equal(y[0], start) + assert_equal(y[-1], stop) + + y = geomspace(start, stop, num=3, endpoint=False) + assert_equal(y[0], start) + + def test_nan_interior(self): + with errstate(invalid='ignore'): + y = geomspace(-3, 3, num=4) + + assert_equal(y[0], -3.0) + assert_(isnan(y[1:-1]).all()) + assert_equal(y[3], 3.0) + + with errstate(invalid='ignore'): + y = geomspace(-3, 3, num=4, endpoint=False) + + assert_equal(y[0], -3.0) + assert_(isnan(y[1:]).all()) + + def test_complex(self): + # Purely imaginary + y = geomspace(1j, 16j, num=5) + assert_allclose(y, [1j, 2j, 4j, 8j, 16j]) + assert_array_equal(y.real, 0) + + y = geomspace(-4j, -324j, num=5) + assert_allclose(y, [-4j, -12j, -36j, -108j, -324j]) + assert_array_equal(y.real, 0) + + y = geomspace(1 + 1j, 1000 + 1000j, num=4) + assert_allclose(y, [1 + 1j, 10 + 10j, 100 + 100j, 1000 + 1000j]) + + y = geomspace(-1 + 1j, -1000 + 1000j, num=4) + assert_allclose(y, [-1 + 1j, -10 + 10j, -100 + 100j, -1000 + 1000j]) + + # Logarithmic spirals + y = geomspace(-1, 1, num=3, dtype=complex) + assert_allclose(y, [-1, 1j, +1]) + + y = geomspace(0 + 3j, -3 + 0j, 3) + assert_allclose(y, [0 + 3j, -3 / sqrt(2) + 3j / sqrt(2), -3 + 0j]) + y = geomspace(0 + 3j, 3 + 0j, 3) + assert_allclose(y, [0 + 3j, 3 / sqrt(2) + 3j / sqrt(2), 3 + 0j]) + y = geomspace(-3 + 0j, 0 - 3j, 3) + assert_allclose(y, [-3 + 0j, -3 / sqrt(2) - 3j / sqrt(2), 0 - 3j]) + y = geomspace(0 + 3j, -3 + 0j, 3) + assert_allclose(y, [0 + 3j, -3 / sqrt(2) + 3j / sqrt(2), -3 + 0j]) + y = geomspace(-2 - 3j, 5 + 7j, 7) + assert_allclose(y, [-2 - 3j, -0.29058977 - 4.15771027j, + 2.08885354 - 4.34146838j, 4.58345529 - 3.16355218j, + 6.41401745 - 0.55233457j, 6.75707386 + 3.11795092j, + 5 + 7j]) + + # Type promotion should prevent the -5 from becoming a NaN + y = geomspace(3j, -5, 2) + assert_allclose(y, [3j, -5]) + y = geomspace(-5, 3j, 2) + assert_allclose(y, [-5, 3j]) + + def test_complex_shortest_path(self): + # test the shortest logarithmic spiral is used, see gh-25644 + x = 1.2 + 3.4j + y = np.exp(1j * (np.pi - .1)) * x + z = np.geomspace(x, y, 5) + expected = np.array([1.2 + 3.4j, -1.47384 + 3.2905616j, + -3.33577588 + 1.36842949j, -3.36011056 - 1.30753855j, + -1.53343861 - 3.26321406j]) + np.testing.assert_array_almost_equal(z, expected) + + def test_dtype(self): + y = geomspace(1, 1e6, dtype='float32') + assert_equal(y.dtype, dtype('float32')) + y = geomspace(1, 1e6, dtype='float64') + assert_equal(y.dtype, dtype('float64')) + y = geomspace(1, 1e6, dtype='int32') + assert_equal(y.dtype, dtype('int32')) + + # Native types + y = geomspace(1, 1e6, dtype=float) + assert_equal(y.dtype, dtype('float64')) + y = geomspace(1, 1e6, dtype=complex) + assert_equal(y.dtype, dtype('complex128')) + + def test_start_stop_array_scalar(self): + lim1 = array([120, 100], dtype="int8") + lim2 = array([-120, -100], dtype="int8") + lim3 = array([1200, 1000], dtype="uint16") + t1 = geomspace(lim1[0], lim1[1], 5) + t2 = geomspace(lim2[0], lim2[1], 5) + t3 = geomspace(lim3[0], lim3[1], 5) + t4 = geomspace(120.0, 100.0, 5) + t5 = geomspace(-120.0, -100.0, 5) + t6 = geomspace(1200.0, 1000.0, 5) + + # t3 uses float32, t6 uses float64 + assert_allclose(t1, t4, rtol=1e-2) + assert_allclose(t2, t5, rtol=1e-2) + assert_allclose(t3, t6, rtol=1e-5) + + def test_start_stop_array(self): + # Try to use all special cases. + start = array([1.e0, 32., 1j, -4j, 1 + 1j, -1]) + stop = array([1.e4, 2., 16j, -324j, 10000 + 10000j, 1]) + t1 = geomspace(start, stop, 5) + t2 = stack([geomspace(_start, _stop, 5) + for _start, _stop in zip(start, stop)], axis=1) + assert_equal(t1, t2) + t3 = geomspace(start, stop[0], 5) + t4 = stack([geomspace(_start, stop[0], 5) + for _start in start], axis=1) + assert_equal(t3, t4) + t5 = geomspace(start, stop, 5, axis=-1) + assert_equal(t5, t2.T) + + def test_physical_quantities(self): + a = PhysicalQuantity(1.0) + b = PhysicalQuantity(5.0) + assert_equal(geomspace(a, b), geomspace(1.0, 5.0)) + + def test_subclass(self): + a = array(1).view(PhysicalQuantity2) + b = array(7).view(PhysicalQuantity2) + gs = geomspace(a, b) + assert type(gs) is PhysicalQuantity2 + assert_equal(gs, geomspace(1.0, 7.0)) + gs = geomspace(a, b, 1) + assert type(gs) is PhysicalQuantity2 + assert_equal(gs, geomspace(1.0, 7.0, 1)) + + def test_bounds(self): + assert_raises(ValueError, geomspace, 0, 10) + assert_raises(ValueError, geomspace, 10, 0) + assert_raises(ValueError, geomspace, 0, 0) + + +class TestLinspace: + + def test_basic(self): + y = linspace(0, 10) + assert_(len(y) == 50) + y = linspace(2, 10, num=100) + assert_(y[-1] == 10) + y = linspace(2, 10, endpoint=False) + assert_(y[-1] < 10) + assert_raises(ValueError, linspace, 0, 10, num=-1) + + def test_corner(self): + y = list(linspace(0, 1, 1)) + assert_(y == [0.0], y) + assert_raises(TypeError, linspace, 0, 1, num=2.5) + + def test_type(self): + t1 = linspace(0, 1, 0).dtype + t2 = linspace(0, 1, 1).dtype + t3 = linspace(0, 1, 2).dtype + assert_equal(t1, t2) + assert_equal(t2, t3) + + def test_dtype(self): + y = linspace(0, 6, dtype='float32') + assert_equal(y.dtype, dtype('float32')) + y = linspace(0, 6, dtype='float64') + assert_equal(y.dtype, dtype('float64')) + y = linspace(0, 6, dtype='int32') + assert_equal(y.dtype, dtype('int32')) + + def test_start_stop_array_scalar(self): + lim1 = array([-120, 100], dtype="int8") + lim2 = array([120, -100], dtype="int8") + lim3 = array([1200, 1000], dtype="uint16") + t1 = linspace(lim1[0], lim1[1], 5) + t2 = linspace(lim2[0], lim2[1], 5) + t3 = linspace(lim3[0], lim3[1], 5) + t4 = linspace(-120.0, 100.0, 5) + t5 = linspace(120.0, -100.0, 5) + t6 = linspace(1200.0, 1000.0, 5) + assert_equal(t1, t4) + assert_equal(t2, t5) + assert_equal(t3, t6) + + def test_start_stop_array(self): + start = array([-120, 120], dtype="int8") + stop = array([100, -100], dtype="int8") + t1 = linspace(start, stop, 5) + t2 = stack([linspace(_start, _stop, 5) + for _start, _stop in zip(start, stop)], axis=1) + assert_equal(t1, t2) + t3 = linspace(start, stop[0], 5) + t4 = stack([linspace(_start, stop[0], 5) + for _start in start], axis=1) + assert_equal(t3, t4) + t5 = linspace(start, stop, 5, axis=-1) + assert_equal(t5, t2.T) + + def test_complex(self): + lim1 = linspace(1 + 2j, 3 + 4j, 5) + t1 = array([1.0 + 2.j, 1.5 + 2.5j, 2.0 + 3j, 2.5 + 3.5j, 3.0 + 4j]) + lim2 = linspace(1j, 10, 5) + t2 = array([0.0 + 1.j, 2.5 + 0.75j, 5.0 + 0.5j, 7.5 + 0.25j, 10.0 + 0j]) + assert_equal(lim1, t1) + assert_equal(lim2, t2) + + def test_physical_quantities(self): + a = PhysicalQuantity(0.0) + b = PhysicalQuantity(1.0) + assert_equal(linspace(a, b), linspace(0.0, 1.0)) + + def test_subclass(self): + a = array(0).view(PhysicalQuantity2) + b = array(1).view(PhysicalQuantity2) + ls = linspace(a, b) + assert type(ls) is PhysicalQuantity2 + assert_equal(ls, linspace(0.0, 1.0)) + ls = linspace(a, b, 1) + assert type(ls) is PhysicalQuantity2 + assert_equal(ls, linspace(0.0, 1.0, 1)) + + def test_array_interface(self): + # Regression test for https://github.com/numpy/numpy/pull/6659 + # Ensure that start/stop can be objects that implement + # __array_interface__ and are convertible to numeric scalars + + class Arrayish: + """ + A generic object that supports the __array_interface__ and hence + can in principle be converted to a numeric scalar, but is not + otherwise recognized as numeric, but also happens to support + multiplication by floats. + + Data should be an object that implements the buffer interface, + and contains at least 4 bytes. + """ + + def __init__(self, data): + self._data = data + + @property + def __array_interface__(self): + return {'shape': (), 'typestr': ' 300) + assert_(len(np.lib._index_tricks_impl.mgrid.__doc__) > 300) + + @pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") + def test_errors_are_ignored(self): + prev_doc = np._core.flatiter.index.__doc__ + # nothing changed, but error ignored, this should probably + # give a warning (or even error) in the future. + add_newdoc("numpy._core", "flatiter", ("index", "bad docstring")) + assert prev_doc == np._core.flatiter.index.__doc__ diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_getlimits.py b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_getlimits.py new file mode 100644 index 0000000..721c6ac --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_getlimits.py @@ -0,0 +1,205 @@ +""" Test functions for limits module. + +""" +import types +import warnings + +import pytest + +import numpy as np +from numpy import double, half, longdouble, single +from numpy._core import finfo, iinfo +from numpy._core.getlimits import _discovered_machar, _float_ma +from numpy.testing import assert_, assert_equal, assert_raises + +################################################## + +class TestPythonFloat: + def test_singleton(self): + ftype = finfo(float) + ftype2 = finfo(float) + assert_equal(id(ftype), id(ftype2)) + +class TestHalf: + def test_singleton(self): + ftype = finfo(half) + ftype2 = finfo(half) + assert_equal(id(ftype), id(ftype2)) + +class TestSingle: + def test_singleton(self): + ftype = finfo(single) + ftype2 = finfo(single) + assert_equal(id(ftype), id(ftype2)) + +class TestDouble: + def test_singleton(self): + ftype = finfo(double) + ftype2 = finfo(double) + assert_equal(id(ftype), id(ftype2)) + +class TestLongdouble: + def test_singleton(self): + ftype = finfo(longdouble) + ftype2 = finfo(longdouble) + assert_equal(id(ftype), id(ftype2)) + +def assert_finfo_equal(f1, f2): + # assert two finfo instances have the same attributes + for attr in ('bits', 'eps', 'epsneg', 'iexp', 'machep', + 'max', 'maxexp', 'min', 'minexp', 'negep', 'nexp', + 'nmant', 'precision', 'resolution', 'tiny', + 'smallest_normal', 'smallest_subnormal'): + assert_equal(getattr(f1, attr), getattr(f2, attr), + f'finfo instances {f1} and {f2} differ on {attr}') + +def assert_iinfo_equal(i1, i2): + # assert two iinfo instances have the same attributes + for attr in ('bits', 'min', 'max'): + assert_equal(getattr(i1, attr), getattr(i2, attr), + f'iinfo instances {i1} and {i2} differ on {attr}') + +class TestFinfo: + def test_basic(self): + dts = list(zip(['f2', 'f4', 'f8', 'c8', 'c16'], + [np.float16, np.float32, np.float64, np.complex64, + np.complex128])) + for dt1, dt2 in dts: + assert_finfo_equal(finfo(dt1), finfo(dt2)) + + assert_raises(ValueError, finfo, 'i4') + + def test_regression_gh23108(self): + # np.float32(1.0) and np.float64(1.0) have the same hash and are + # equal under the == operator + f1 = np.finfo(np.float32(1.0)) + f2 = np.finfo(np.float64(1.0)) + assert f1 != f2 + + def test_regression_gh23867(self): + class NonHashableWithDtype: + __hash__ = None + dtype = np.dtype('float32') + + x = NonHashableWithDtype() + assert np.finfo(x) == np.finfo(x.dtype) + + +class TestIinfo: + def test_basic(self): + dts = list(zip(['i1', 'i2', 'i4', 'i8', + 'u1', 'u2', 'u4', 'u8'], + [np.int8, np.int16, np.int32, np.int64, + np.uint8, np.uint16, np.uint32, np.uint64])) + for dt1, dt2 in dts: + assert_iinfo_equal(iinfo(dt1), iinfo(dt2)) + + assert_raises(ValueError, iinfo, 'f4') + + def test_unsigned_max(self): + types = np._core.sctypes['uint'] + for T in types: + with np.errstate(over="ignore"): + max_calculated = T(0) - T(1) + assert_equal(iinfo(T).max, max_calculated) + +class TestRepr: + def test_iinfo_repr(self): + expected = "iinfo(min=-32768, max=32767, dtype=int16)" + assert_equal(repr(np.iinfo(np.int16)), expected) + + def test_finfo_repr(self): + expected = "finfo(resolution=1e-06, min=-3.4028235e+38,"\ + " max=3.4028235e+38, dtype=float32)" + assert_equal(repr(np.finfo(np.float32)), expected) + + +def test_instances(): + # Test the finfo and iinfo results on numeric instances agree with + # the results on the corresponding types + + for c in [int, np.int16, np.int32, np.int64]: + class_iinfo = iinfo(c) + instance_iinfo = iinfo(c(12)) + + assert_iinfo_equal(class_iinfo, instance_iinfo) + + for c in [float, np.float16, np.float32, np.float64]: + class_finfo = finfo(c) + instance_finfo = finfo(c(1.2)) + assert_finfo_equal(class_finfo, instance_finfo) + + with pytest.raises(ValueError): + iinfo(10.) + + with pytest.raises(ValueError): + iinfo('hi') + + with pytest.raises(ValueError): + finfo(np.int64(1)) + + +def assert_ma_equal(discovered, ma_like): + # Check MachAr-like objects same as calculated MachAr instances + for key, value in discovered.__dict__.items(): + assert_equal(value, getattr(ma_like, key)) + if hasattr(value, 'shape'): + assert_equal(value.shape, getattr(ma_like, key).shape) + assert_equal(value.dtype, getattr(ma_like, key).dtype) + + +def test_known_types(): + # Test we are correctly compiling parameters for known types + for ftype, ma_like in ((np.float16, _float_ma[16]), + (np.float32, _float_ma[32]), + (np.float64, _float_ma[64])): + assert_ma_equal(_discovered_machar(ftype), ma_like) + # Suppress warning for broken discovery of double double on PPC + with np.errstate(all='ignore'): + ld_ma = _discovered_machar(np.longdouble) + bytes = np.dtype(np.longdouble).itemsize + if (ld_ma.it, ld_ma.maxexp) == (63, 16384) and bytes in (12, 16): + # 80-bit extended precision + assert_ma_equal(ld_ma, _float_ma[80]) + elif (ld_ma.it, ld_ma.maxexp) == (112, 16384) and bytes == 16: + # IEE 754 128-bit + assert_ma_equal(ld_ma, _float_ma[128]) + + +def test_subnormal_warning(): + """Test that the subnormal is zero warning is not being raised.""" + with np.errstate(all='ignore'): + ld_ma = _discovered_machar(np.longdouble) + bytes = np.dtype(np.longdouble).itemsize + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + if (ld_ma.it, ld_ma.maxexp) == (63, 16384) and bytes in (12, 16): + # 80-bit extended precision + ld_ma.smallest_subnormal + assert len(w) == 0 + elif (ld_ma.it, ld_ma.maxexp) == (112, 16384) and bytes == 16: + # IEE 754 128-bit + ld_ma.smallest_subnormal + assert len(w) == 0 + else: + # Double double + ld_ma.smallest_subnormal + # This test may fail on some platforms + assert len(w) == 0 + + +def test_plausible_finfo(): + # Assert that finfo returns reasonable results for all types + for ftype in np._core.sctypes['float'] + np._core.sctypes['complex']: + info = np.finfo(ftype) + assert_(info.nmant > 1) + assert_(info.minexp < -1) + assert_(info.maxexp > 1) + + +class TestRuntimeSubscriptable: + def test_finfo_generic(self): + assert isinstance(np.finfo[np.float64], types.GenericAlias) + + def test_iinfo_generic(self): + assert isinstance(np.iinfo[np.int_], types.GenericAlias) diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_half.py b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_half.py new file mode 100644 index 0000000..68f17b2 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_half.py @@ -0,0 +1,568 @@ +import platform + +import pytest + +import numpy as np +from numpy import float16, float32, float64, uint16 +from numpy.testing import IS_WASM, assert_, assert_equal + + +def assert_raises_fpe(strmatch, callable, *args, **kwargs): + try: + callable(*args, **kwargs) + except FloatingPointError as exc: + assert_(str(exc).find(strmatch) >= 0, + f"Did not raise floating point {strmatch} error") + else: + assert_(False, + f"Did not raise floating point {strmatch} error") + +class TestHalf: + def setup_method(self): + # An array of all possible float16 values + self.all_f16 = np.arange(0x10000, dtype=uint16) + self.all_f16.dtype = float16 + + # NaN value can cause an invalid FP exception if HW is being used + with np.errstate(invalid='ignore'): + self.all_f32 = np.array(self.all_f16, dtype=float32) + self.all_f64 = np.array(self.all_f16, dtype=float64) + + # An array of all non-NaN float16 values, in sorted order + self.nonan_f16 = np.concatenate( + (np.arange(0xfc00, 0x7fff, -1, dtype=uint16), + np.arange(0x0000, 0x7c01, 1, dtype=uint16))) + self.nonan_f16.dtype = float16 + self.nonan_f32 = np.array(self.nonan_f16, dtype=float32) + self.nonan_f64 = np.array(self.nonan_f16, dtype=float64) + + # An array of all finite float16 values, in sorted order + self.finite_f16 = self.nonan_f16[1:-1] + self.finite_f32 = self.nonan_f32[1:-1] + self.finite_f64 = self.nonan_f64[1:-1] + + def test_half_conversions(self): + """Checks that all 16-bit values survive conversion + to/from 32-bit and 64-bit float""" + # Because the underlying routines preserve the NaN bits, every + # value is preserved when converting to/from other floats. + + # Convert from float32 back to float16 + with np.errstate(invalid='ignore'): + b = np.array(self.all_f32, dtype=float16) + # avoid testing NaNs due to differing bit patterns in Q/S NaNs + b_nn = b == b + assert_equal(self.all_f16[b_nn].view(dtype=uint16), + b[b_nn].view(dtype=uint16)) + + # Convert from float64 back to float16 + with np.errstate(invalid='ignore'): + b = np.array(self.all_f64, dtype=float16) + b_nn = b == b + assert_equal(self.all_f16[b_nn].view(dtype=uint16), + b[b_nn].view(dtype=uint16)) + + # Convert float16 to longdouble and back + # This doesn't necessarily preserve the extra NaN bits, + # so exclude NaNs. + a_ld = np.array(self.nonan_f16, dtype=np.longdouble) + b = np.array(a_ld, dtype=float16) + assert_equal(self.nonan_f16.view(dtype=uint16), + b.view(dtype=uint16)) + + # Check the range for which all integers can be represented + i_int = np.arange(-2048, 2049) + i_f16 = np.array(i_int, dtype=float16) + j = np.array(i_f16, dtype=int) + assert_equal(i_int, j) + + @pytest.mark.parametrize("string_dt", ["S", "U"]) + def test_half_conversion_to_string(self, string_dt): + # Currently uses S/U32 (which is sufficient for float32) + expected_dt = np.dtype(f"{string_dt}32") + assert np.promote_types(np.float16, string_dt) == expected_dt + assert np.promote_types(string_dt, np.float16) == expected_dt + + arr = np.ones(3, dtype=np.float16).astype(string_dt) + assert arr.dtype == expected_dt + + @pytest.mark.parametrize("string_dt", ["S", "U"]) + def test_half_conversion_from_string(self, string_dt): + string = np.array("3.1416", dtype=string_dt) + assert string.astype(np.float16) == np.array(3.1416, dtype=np.float16) + + @pytest.mark.parametrize("offset", [None, "up", "down"]) + @pytest.mark.parametrize("shift", [None, "up", "down"]) + @pytest.mark.parametrize("float_t", [np.float32, np.float64]) + def test_half_conversion_rounding(self, float_t, shift, offset): + # Assumes that round to even is used during casting. + max_pattern = np.float16(np.finfo(np.float16).max).view(np.uint16) + + # Test all (positive) finite numbers, denormals are most interesting + # however: + f16s_patterns = np.arange(0, max_pattern + 1, dtype=np.uint16) + f16s_float = f16s_patterns.view(np.float16).astype(float_t) + + # Shift the values by half a bit up or a down (or do not shift), + if shift == "up": + f16s_float = 0.5 * (f16s_float[:-1] + f16s_float[1:])[1:] + elif shift == "down": + f16s_float = 0.5 * (f16s_float[:-1] + f16s_float[1:])[:-1] + else: + f16s_float = f16s_float[1:-1] + + # Increase the float by a minimal value: + if offset == "up": + f16s_float = np.nextafter(f16s_float, float_t(np.inf)) + elif offset == "down": + f16s_float = np.nextafter(f16s_float, float_t(-np.inf)) + + # Convert back to float16 and its bit pattern: + res_patterns = f16s_float.astype(np.float16).view(np.uint16) + + # The above calculation tries the original values, or the exact + # midpoints between the float16 values. It then further offsets them + # by as little as possible. If no offset occurs, "round to even" + # logic will be necessary, an arbitrarily small offset should cause + # normal up/down rounding always. + + # Calculate the expected pattern: + cmp_patterns = f16s_patterns[1:-1].copy() + + if shift == "down" and offset != "up": + shift_pattern = -1 + elif shift == "up" and offset != "down": + shift_pattern = 1 + else: + # There cannot be a shift, either shift is None, so all rounding + # will go back to original, or shift is reduced by offset too much. + shift_pattern = 0 + + # If rounding occurs, is it normal rounding or round to even? + if offset is None: + # Round to even occurs, modify only non-even, cast to allow + (-1) + cmp_patterns[0::2].view(np.int16)[...] += shift_pattern + else: + cmp_patterns.view(np.int16)[...] += shift_pattern + + assert_equal(res_patterns, cmp_patterns) + + @pytest.mark.parametrize(["float_t", "uint_t", "bits"], + [(np.float32, np.uint32, 23), + (np.float64, np.uint64, 52)]) + def test_half_conversion_denormal_round_even(self, float_t, uint_t, bits): + # Test specifically that all bits are considered when deciding + # whether round to even should occur (i.e. no bits are lost at the + # end. Compare also gh-12721. The most bits can get lost for the + # smallest denormal: + smallest_value = np.uint16(1).view(np.float16).astype(float_t) + assert smallest_value == 2**-24 + + # Will be rounded to zero based on round to even rule: + rounded_to_zero = smallest_value / float_t(2) + assert rounded_to_zero.astype(np.float16) == 0 + + # The significand will be all 0 for the float_t, test that we do not + # lose the lower ones of these: + for i in range(bits): + # slightly increasing the value should make it round up: + larger_pattern = rounded_to_zero.view(uint_t) | uint_t(1 << i) + larger_value = larger_pattern.view(float_t) + assert larger_value.astype(np.float16) == smallest_value + + def test_nans_infs(self): + with np.errstate(all='ignore'): + # Check some of the ufuncs + assert_equal(np.isnan(self.all_f16), np.isnan(self.all_f32)) + assert_equal(np.isinf(self.all_f16), np.isinf(self.all_f32)) + assert_equal(np.isfinite(self.all_f16), np.isfinite(self.all_f32)) + assert_equal(np.signbit(self.all_f16), np.signbit(self.all_f32)) + assert_equal(np.spacing(float16(65504)), np.inf) + + # Check comparisons of all values with NaN + nan = float16(np.nan) + + assert_(not (self.all_f16 == nan).any()) + assert_(not (nan == self.all_f16).any()) + + assert_((self.all_f16 != nan).all()) + assert_((nan != self.all_f16).all()) + + assert_(not (self.all_f16 < nan).any()) + assert_(not (nan < self.all_f16).any()) + + assert_(not (self.all_f16 <= nan).any()) + assert_(not (nan <= self.all_f16).any()) + + assert_(not (self.all_f16 > nan).any()) + assert_(not (nan > self.all_f16).any()) + + assert_(not (self.all_f16 >= nan).any()) + assert_(not (nan >= self.all_f16).any()) + + def test_half_values(self): + """Confirms a small number of known half values""" + a = np.array([1.0, -1.0, + 2.0, -2.0, + 0.0999755859375, 0.333251953125, # 1/10, 1/3 + 65504, -65504, # Maximum magnitude + 2.0**(-14), -2.0**(-14), # Minimum normal + 2.0**(-24), -2.0**(-24), # Minimum subnormal + 0, -1 / 1e1000, # Signed zeros + np.inf, -np.inf]) + b = np.array([0x3c00, 0xbc00, + 0x4000, 0xc000, + 0x2e66, 0x3555, + 0x7bff, 0xfbff, + 0x0400, 0x8400, + 0x0001, 0x8001, + 0x0000, 0x8000, + 0x7c00, 0xfc00], dtype=uint16) + b.dtype = float16 + assert_equal(a, b) + + def test_half_rounding(self): + """Checks that rounding when converting to half is correct""" + a = np.array([2.0**-25 + 2.0**-35, # Rounds to minimum subnormal + 2.0**-25, # Underflows to zero (nearest even mode) + 2.0**-26, # Underflows to zero + 1.0 + 2.0**-11 + 2.0**-16, # rounds to 1.0+2**(-10) + 1.0 + 2.0**-11, # rounds to 1.0 (nearest even mode) + 1.0 + 2.0**-12, # rounds to 1.0 + 65519, # rounds to 65504 + 65520], # rounds to inf + dtype=float64) + rounded = [2.0**-24, + 0.0, + 0.0, + 1.0 + 2.0**(-10), + 1.0, + 1.0, + 65504, + np.inf] + + # Check float64->float16 rounding + with np.errstate(over="ignore"): + b = np.array(a, dtype=float16) + assert_equal(b, rounded) + + # Check float32->float16 rounding + a = np.array(a, dtype=float32) + with np.errstate(over="ignore"): + b = np.array(a, dtype=float16) + assert_equal(b, rounded) + + def test_half_correctness(self): + """Take every finite float16, and check the casting functions with + a manual conversion.""" + + # Create an array of all finite float16s + a_bits = self.finite_f16.view(dtype=uint16) + + # Convert to 64-bit float manually + a_sgn = (-1.0)**((a_bits & 0x8000) >> 15) + a_exp = np.array((a_bits & 0x7c00) >> 10, dtype=np.int32) - 15 + a_man = (a_bits & 0x03ff) * 2.0**(-10) + # Implicit bit of normalized floats + a_man[a_exp != -15] += 1 + # Denormalized exponent is -14 + a_exp[a_exp == -15] = -14 + + a_manual = a_sgn * a_man * 2.0**a_exp + + a32_fail = np.nonzero(self.finite_f32 != a_manual)[0] + if len(a32_fail) != 0: + bad_index = a32_fail[0] + assert_equal(self.finite_f32, a_manual, + "First non-equal is half value 0x%x -> %g != %g" % + (a_bits[bad_index], + self.finite_f32[bad_index], + a_manual[bad_index])) + + a64_fail = np.nonzero(self.finite_f64 != a_manual)[0] + if len(a64_fail) != 0: + bad_index = a64_fail[0] + assert_equal(self.finite_f64, a_manual, + "First non-equal is half value 0x%x -> %g != %g" % + (a_bits[bad_index], + self.finite_f64[bad_index], + a_manual[bad_index])) + + def test_half_ordering(self): + """Make sure comparisons are working right""" + + # All non-NaN float16 values in reverse order + a = self.nonan_f16[::-1].copy() + + # 32-bit float copy + b = np.array(a, dtype=float32) + + # Should sort the same + a.sort() + b.sort() + assert_equal(a, b) + + # Comparisons should work + assert_((a[:-1] <= a[1:]).all()) + assert_(not (a[:-1] > a[1:]).any()) + assert_((a[1:] >= a[:-1]).all()) + assert_(not (a[1:] < a[:-1]).any()) + # All != except for +/-0 + assert_equal(np.nonzero(a[:-1] < a[1:])[0].size, a.size - 2) + assert_equal(np.nonzero(a[1:] > a[:-1])[0].size, a.size - 2) + + def test_half_funcs(self): + """Test the various ArrFuncs""" + + # fill + assert_equal(np.arange(10, dtype=float16), + np.arange(10, dtype=float32)) + + # fillwithscalar + a = np.zeros((5,), dtype=float16) + a.fill(1) + assert_equal(a, np.ones((5,), dtype=float16)) + + # nonzero and copyswap + a = np.array([0, 0, -1, -1 / 1e20, 0, 2.0**-24, 7.629e-6], dtype=float16) + assert_equal(a.nonzero()[0], + [2, 5, 6]) + a = a.byteswap() + a = a.view(a.dtype.newbyteorder()) + assert_equal(a.nonzero()[0], + [2, 5, 6]) + + # dot + a = np.arange(0, 10, 0.5, dtype=float16) + b = np.ones((20,), dtype=float16) + assert_equal(np.dot(a, b), + 95) + + # argmax + a = np.array([0, -np.inf, -2, 0.5, 12.55, 7.3, 2.1, 12.4], dtype=float16) + assert_equal(a.argmax(), + 4) + a = np.array([0, -np.inf, -2, np.inf, 12.55, np.nan, 2.1, 12.4], dtype=float16) + assert_equal(a.argmax(), + 5) + + # getitem + a = np.arange(10, dtype=float16) + for i in range(10): + assert_equal(a.item(i), i) + + def test_spacing_nextafter(self): + """Test np.spacing and np.nextafter""" + # All non-negative finite #'s + a = np.arange(0x7c00, dtype=uint16) + hinf = np.array((np.inf,), dtype=float16) + hnan = np.array((np.nan,), dtype=float16) + a_f16 = a.view(dtype=float16) + + assert_equal(np.spacing(a_f16[:-1]), a_f16[1:] - a_f16[:-1]) + + assert_equal(np.nextafter(a_f16[:-1], hinf), a_f16[1:]) + assert_equal(np.nextafter(a_f16[0], -hinf), -a_f16[1]) + assert_equal(np.nextafter(a_f16[1:], -hinf), a_f16[:-1]) + + assert_equal(np.nextafter(hinf, a_f16), a_f16[-1]) + assert_equal(np.nextafter(-hinf, a_f16), -a_f16[-1]) + + assert_equal(np.nextafter(hinf, hinf), hinf) + assert_equal(np.nextafter(hinf, -hinf), a_f16[-1]) + assert_equal(np.nextafter(-hinf, hinf), -a_f16[-1]) + assert_equal(np.nextafter(-hinf, -hinf), -hinf) + + assert_equal(np.nextafter(a_f16, hnan), hnan[0]) + assert_equal(np.nextafter(hnan, a_f16), hnan[0]) + + assert_equal(np.nextafter(hnan, hnan), hnan) + assert_equal(np.nextafter(hinf, hnan), hnan) + assert_equal(np.nextafter(hnan, hinf), hnan) + + # switch to negatives + a |= 0x8000 + + assert_equal(np.spacing(a_f16[0]), np.spacing(a_f16[1])) + assert_equal(np.spacing(a_f16[1:]), a_f16[:-1] - a_f16[1:]) + + assert_equal(np.nextafter(a_f16[0], hinf), -a_f16[1]) + assert_equal(np.nextafter(a_f16[1:], hinf), a_f16[:-1]) + assert_equal(np.nextafter(a_f16[:-1], -hinf), a_f16[1:]) + + assert_equal(np.nextafter(hinf, a_f16), -a_f16[-1]) + assert_equal(np.nextafter(-hinf, a_f16), a_f16[-1]) + + assert_equal(np.nextafter(a_f16, hnan), hnan[0]) + assert_equal(np.nextafter(hnan, a_f16), hnan[0]) + + def test_half_ufuncs(self): + """Test the various ufuncs""" + + a = np.array([0, 1, 2, 4, 2], dtype=float16) + b = np.array([-2, 5, 1, 4, 3], dtype=float16) + c = np.array([0, -1, -np.inf, np.nan, 6], dtype=float16) + + assert_equal(np.add(a, b), [-2, 6, 3, 8, 5]) + assert_equal(np.subtract(a, b), [2, -4, 1, 0, -1]) + assert_equal(np.multiply(a, b), [0, 5, 2, 16, 6]) + assert_equal(np.divide(a, b), [0, 0.199951171875, 2, 1, 0.66650390625]) + + assert_equal(np.equal(a, b), [False, False, False, True, False]) + assert_equal(np.not_equal(a, b), [True, True, True, False, True]) + assert_equal(np.less(a, b), [False, True, False, False, True]) + assert_equal(np.less_equal(a, b), [False, True, False, True, True]) + assert_equal(np.greater(a, b), [True, False, True, False, False]) + assert_equal(np.greater_equal(a, b), [True, False, True, True, False]) + assert_equal(np.logical_and(a, b), [False, True, True, True, True]) + assert_equal(np.logical_or(a, b), [True, True, True, True, True]) + assert_equal(np.logical_xor(a, b), [True, False, False, False, False]) + assert_equal(np.logical_not(a), [True, False, False, False, False]) + + assert_equal(np.isnan(c), [False, False, False, True, False]) + assert_equal(np.isinf(c), [False, False, True, False, False]) + assert_equal(np.isfinite(c), [True, True, False, False, True]) + assert_equal(np.signbit(b), [True, False, False, False, False]) + + assert_equal(np.copysign(b, a), [2, 5, 1, 4, 3]) + + assert_equal(np.maximum(a, b), [0, 5, 2, 4, 3]) + + x = np.maximum(b, c) + assert_(np.isnan(x[3])) + x[3] = 0 + assert_equal(x, [0, 5, 1, 0, 6]) + + assert_equal(np.minimum(a, b), [-2, 1, 1, 4, 2]) + + x = np.minimum(b, c) + assert_(np.isnan(x[3])) + x[3] = 0 + assert_equal(x, [-2, -1, -np.inf, 0, 3]) + + assert_equal(np.fmax(a, b), [0, 5, 2, 4, 3]) + assert_equal(np.fmax(b, c), [0, 5, 1, 4, 6]) + assert_equal(np.fmin(a, b), [-2, 1, 1, 4, 2]) + assert_equal(np.fmin(b, c), [-2, -1, -np.inf, 4, 3]) + + assert_equal(np.floor_divide(a, b), [0, 0, 2, 1, 0]) + assert_equal(np.remainder(a, b), [0, 1, 0, 0, 2]) + assert_equal(np.divmod(a, b), ([0, 0, 2, 1, 0], [0, 1, 0, 0, 2])) + assert_equal(np.square(b), [4, 25, 1, 16, 9]) + assert_equal(np.reciprocal(b), [-0.5, 0.199951171875, 1, 0.25, 0.333251953125]) + assert_equal(np.ones_like(b), [1, 1, 1, 1, 1]) + assert_equal(np.conjugate(b), b) + assert_equal(np.absolute(b), [2, 5, 1, 4, 3]) + assert_equal(np.negative(b), [2, -5, -1, -4, -3]) + assert_equal(np.positive(b), b) + assert_equal(np.sign(b), [-1, 1, 1, 1, 1]) + assert_equal(np.modf(b), ([0, 0, 0, 0, 0], b)) + assert_equal(np.frexp(b), ([-0.5, 0.625, 0.5, 0.5, 0.75], [2, 3, 1, 3, 2])) + assert_equal(np.ldexp(b, [0, 1, 2, 4, 2]), [-2, 10, 4, 64, 12]) + + def test_half_coercion(self): + """Test that half gets coerced properly with the other types""" + a16 = np.array((1,), dtype=float16) + a32 = np.array((1,), dtype=float32) + b16 = float16(1) + b32 = float32(1) + + assert np.power(a16, 2).dtype == float16 + assert np.power(a16, 2.0).dtype == float16 + assert np.power(a16, b16).dtype == float16 + assert np.power(a16, b32).dtype == float32 + assert np.power(a16, a16).dtype == float16 + assert np.power(a16, a32).dtype == float32 + + assert np.power(b16, 2).dtype == float16 + assert np.power(b16, 2.0).dtype == float16 + assert np.power(b16, b16).dtype, float16 + assert np.power(b16, b32).dtype, float32 + assert np.power(b16, a16).dtype, float16 + assert np.power(b16, a32).dtype, float32 + + assert np.power(a32, a16).dtype == float32 + assert np.power(a32, b16).dtype == float32 + assert np.power(b32, a16).dtype == float32 + assert np.power(b32, b16).dtype == float32 + + @pytest.mark.skipif(platform.machine() == "armv5tel", + reason="See gh-413.") + @pytest.mark.skipif(IS_WASM, + reason="fp exceptions don't work in wasm.") + def test_half_fpe(self): + with np.errstate(all='raise'): + sx16 = np.array((1e-4,), dtype=float16) + bx16 = np.array((1e4,), dtype=float16) + sy16 = float16(1e-4) + by16 = float16(1e4) + + # Underflow errors + assert_raises_fpe('underflow', lambda a, b: a * b, sx16, sx16) + assert_raises_fpe('underflow', lambda a, b: a * b, sx16, sy16) + assert_raises_fpe('underflow', lambda a, b: a * b, sy16, sx16) + assert_raises_fpe('underflow', lambda a, b: a * b, sy16, sy16) + assert_raises_fpe('underflow', lambda a, b: a / b, sx16, bx16) + assert_raises_fpe('underflow', lambda a, b: a / b, sx16, by16) + assert_raises_fpe('underflow', lambda a, b: a / b, sy16, bx16) + assert_raises_fpe('underflow', lambda a, b: a / b, sy16, by16) + assert_raises_fpe('underflow', lambda a, b: a / b, + float16(2.**-14), float16(2**11)) + assert_raises_fpe('underflow', lambda a, b: a / b, + float16(-2.**-14), float16(2**11)) + assert_raises_fpe('underflow', lambda a, b: a / b, + float16(2.**-14 + 2**-24), float16(2)) + assert_raises_fpe('underflow', lambda a, b: a / b, + float16(-2.**-14 - 2**-24), float16(2)) + assert_raises_fpe('underflow', lambda a, b: a / b, + float16(2.**-14 + 2**-23), float16(4)) + + # Overflow errors + assert_raises_fpe('overflow', lambda a, b: a * b, bx16, bx16) + assert_raises_fpe('overflow', lambda a, b: a * b, bx16, by16) + assert_raises_fpe('overflow', lambda a, b: a * b, by16, bx16) + assert_raises_fpe('overflow', lambda a, b: a * b, by16, by16) + assert_raises_fpe('overflow', lambda a, b: a / b, bx16, sx16) + assert_raises_fpe('overflow', lambda a, b: a / b, bx16, sy16) + assert_raises_fpe('overflow', lambda a, b: a / b, by16, sx16) + assert_raises_fpe('overflow', lambda a, b: a / b, by16, sy16) + assert_raises_fpe('overflow', lambda a, b: a + b, + float16(65504), float16(17)) + assert_raises_fpe('overflow', lambda a, b: a - b, + float16(-65504), float16(17)) + assert_raises_fpe('overflow', np.nextafter, float16(65504), float16(np.inf)) + assert_raises_fpe('overflow', np.nextafter, float16(-65504), float16(-np.inf)) + assert_raises_fpe('overflow', np.spacing, float16(65504)) + + # Invalid value errors + assert_raises_fpe('invalid', np.divide, float16(np.inf), float16(np.inf)) + assert_raises_fpe('invalid', np.spacing, float16(np.inf)) + assert_raises_fpe('invalid', np.spacing, float16(np.nan)) + + # These should not raise + float16(65472) + float16(32) + float16(2**-13) / float16(2) + float16(2**-14) / float16(2**10) + np.spacing(float16(-65504)) + np.nextafter(float16(65504), float16(-np.inf)) + np.nextafter(float16(-65504), float16(np.inf)) + np.nextafter(float16(np.inf), float16(0)) + np.nextafter(float16(-np.inf), float16(0)) + np.nextafter(float16(0), float16(np.nan)) + np.nextafter(float16(np.nan), float16(0)) + float16(2**-14) / float16(2**10) + float16(-2**-14) / float16(2**10) + float16(2**-14 + 2**-23) / float16(2) + float16(-2**-14 - 2**-23) / float16(2) + + def test_half_array_interface(self): + """Test that half is compatible with __array_interface__""" + class Dummy: + pass + + a = np.ones((1,), dtype=float16) + b = Dummy() + b.__array_interface__ = a.__array_interface__ + c = np.array(b) + assert_(c.dtype == float16) + assert_equal(a, c) diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_hashtable.py b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_hashtable.py new file mode 100644 index 0000000..74be521 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_hashtable.py @@ -0,0 +1,35 @@ +import random + +import pytest +from numpy._core._multiarray_tests import identityhash_tester + + +@pytest.mark.parametrize("key_length", [1, 3, 6]) +@pytest.mark.parametrize("length", [1, 16, 2000]) +def test_identity_hashtable(key_length, length): + # use a 30 object pool for everything (duplicates will happen) + pool = [object() for i in range(20)] + keys_vals = [] + for i in range(length): + keys = tuple(random.choices(pool, k=key_length)) + keys_vals.append((keys, random.choice(pool))) + + dictionary = dict(keys_vals) + + # add a random item at the end: + keys_vals.append(random.choice(keys_vals)) + # the expected one could be different with duplicates: + expected = dictionary[keys_vals[-1][0]] + + res = identityhash_tester(key_length, keys_vals, replace=True) + assert res is expected + + if length == 1: + return + + # add a new item with a key that is already used and a new value, this + # should error if replace is False, see gh-26690 + new_key = (keys_vals[1][0], object()) + keys_vals[0] = new_key + with pytest.raises(RuntimeError): + identityhash_tester(key_length, keys_vals) diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_indexerrors.py b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_indexerrors.py new file mode 100644 index 0000000..02110c2 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_indexerrors.py @@ -0,0 +1,125 @@ +import numpy as np +from numpy.testing import ( + assert_raises, + assert_raises_regex, +) + + +class TestIndexErrors: + '''Tests to exercise indexerrors not covered by other tests.''' + + def test_arraytypes_fasttake(self): + 'take from a 0-length dimension' + x = np.empty((2, 3, 0, 4)) + assert_raises(IndexError, x.take, [0], axis=2) + assert_raises(IndexError, x.take, [1], axis=2) + assert_raises(IndexError, x.take, [0], axis=2, mode='wrap') + assert_raises(IndexError, x.take, [0], axis=2, mode='clip') + + def test_take_from_object(self): + # Check exception taking from object array + d = np.zeros(5, dtype=object) + assert_raises(IndexError, d.take, [6]) + + # Check exception taking from 0-d array + d = np.zeros((5, 0), dtype=object) + assert_raises(IndexError, d.take, [1], axis=1) + assert_raises(IndexError, d.take, [0], axis=1) + assert_raises(IndexError, d.take, [0]) + assert_raises(IndexError, d.take, [0], mode='wrap') + assert_raises(IndexError, d.take, [0], mode='clip') + + def test_multiindex_exceptions(self): + a = np.empty(5, dtype=object) + assert_raises(IndexError, a.item, 20) + a = np.empty((5, 0), dtype=object) + assert_raises(IndexError, a.item, (0, 0)) + + def test_put_exceptions(self): + a = np.zeros((5, 5)) + assert_raises(IndexError, a.put, 100, 0) + a = np.zeros((5, 5), dtype=object) + assert_raises(IndexError, a.put, 100, 0) + a = np.zeros((5, 5, 0)) + assert_raises(IndexError, a.put, 100, 0) + a = np.zeros((5, 5, 0), dtype=object) + assert_raises(IndexError, a.put, 100, 0) + + def test_iterators_exceptions(self): + "cases in iterators.c" + def assign(obj, ind, val): + obj[ind] = val + + a = np.zeros([1, 2, 3]) + assert_raises(IndexError, lambda: a[0, 5, None, 2]) + assert_raises(IndexError, lambda: a[0, 5, 0, 2]) + assert_raises(IndexError, lambda: assign(a, (0, 5, None, 2), 1)) + assert_raises(IndexError, lambda: assign(a, (0, 5, 0, 2), 1)) + + a = np.zeros([1, 0, 3]) + assert_raises(IndexError, lambda: a[0, 0, None, 2]) + assert_raises(IndexError, lambda: assign(a, (0, 0, None, 2), 1)) + + a = np.zeros([1, 2, 3]) + assert_raises(IndexError, lambda: a.flat[10]) + assert_raises(IndexError, lambda: assign(a.flat, 10, 5)) + a = np.zeros([1, 0, 3]) + assert_raises(IndexError, lambda: a.flat[10]) + assert_raises(IndexError, lambda: assign(a.flat, 10, 5)) + + a = np.zeros([1, 2, 3]) + assert_raises(IndexError, lambda: a.flat[np.array(10)]) + assert_raises(IndexError, lambda: assign(a.flat, np.array(10), 5)) + a = np.zeros([1, 0, 3]) + assert_raises(IndexError, lambda: a.flat[np.array(10)]) + assert_raises(IndexError, lambda: assign(a.flat, np.array(10), 5)) + + a = np.zeros([1, 2, 3]) + assert_raises(IndexError, lambda: a.flat[np.array([10])]) + assert_raises(IndexError, lambda: assign(a.flat, np.array([10]), 5)) + a = np.zeros([1, 0, 3]) + assert_raises(IndexError, lambda: a.flat[np.array([10])]) + assert_raises(IndexError, lambda: assign(a.flat, np.array([10]), 5)) + + def test_mapping(self): + "cases from mapping.c" + + def assign(obj, ind, val): + obj[ind] = val + + a = np.zeros((0, 10)) + assert_raises(IndexError, lambda: a[12]) + + a = np.zeros((3, 5)) + assert_raises(IndexError, lambda: a[(10, 20)]) + assert_raises(IndexError, lambda: assign(a, (10, 20), 1)) + a = np.zeros((3, 0)) + assert_raises(IndexError, lambda: a[(1, 0)]) + assert_raises(IndexError, lambda: assign(a, (1, 0), 1)) + + a = np.zeros((10,)) + assert_raises(IndexError, lambda: assign(a, 10, 1)) + a = np.zeros((0,)) + assert_raises(IndexError, lambda: assign(a, 10, 1)) + + a = np.zeros((3, 5)) + assert_raises(IndexError, lambda: a[(1, [1, 20])]) + assert_raises(IndexError, lambda: assign(a, (1, [1, 20]), 1)) + a = np.zeros((3, 0)) + assert_raises(IndexError, lambda: a[(1, [0, 1])]) + assert_raises(IndexError, lambda: assign(a, (1, [0, 1]), 1)) + + def test_mapping_error_message(self): + a = np.zeros((3, 5)) + index = (1, 2, 3, 4, 5) + assert_raises_regex( + IndexError, + "too many indices for array: " + "array is 2-dimensional, but 5 were indexed", + lambda: a[index]) + + def test_methods(self): + "cases from methods.c" + + a = np.zeros((3, 3)) + assert_raises(IndexError, lambda: a.item(100)) diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_indexing.py b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_indexing.py new file mode 100644 index 0000000..e722d0c --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_indexing.py @@ -0,0 +1,1455 @@ +import functools +import operator +import sys +import warnings +from itertools import product + +import pytest +from numpy._core._multiarray_tests import array_indexing + +import numpy as np +from numpy.exceptions import ComplexWarning, VisibleDeprecationWarning +from numpy.testing import ( + HAS_REFCOUNT, + assert_, + assert_array_equal, + assert_equal, + assert_raises, + assert_raises_regex, + assert_warns, +) + + +class TestIndexing: + def test_index_no_floats(self): + a = np.array([[[5]]]) + + assert_raises(IndexError, lambda: a[0.0]) + assert_raises(IndexError, lambda: a[0, 0.0]) + assert_raises(IndexError, lambda: a[0.0, 0]) + assert_raises(IndexError, lambda: a[0.0, :]) + assert_raises(IndexError, lambda: a[:, 0.0]) + assert_raises(IndexError, lambda: a[:, 0.0, :]) + assert_raises(IndexError, lambda: a[0.0, :, :]) + assert_raises(IndexError, lambda: a[0, 0, 0.0]) + assert_raises(IndexError, lambda: a[0.0, 0, 0]) + assert_raises(IndexError, lambda: a[0, 0.0, 0]) + assert_raises(IndexError, lambda: a[-1.4]) + assert_raises(IndexError, lambda: a[0, -1.4]) + assert_raises(IndexError, lambda: a[-1.4, 0]) + assert_raises(IndexError, lambda: a[-1.4, :]) + assert_raises(IndexError, lambda: a[:, -1.4]) + assert_raises(IndexError, lambda: a[:, -1.4, :]) + assert_raises(IndexError, lambda: a[-1.4, :, :]) + assert_raises(IndexError, lambda: a[0, 0, -1.4]) + assert_raises(IndexError, lambda: a[-1.4, 0, 0]) + assert_raises(IndexError, lambda: a[0, -1.4, 0]) + assert_raises(IndexError, lambda: a[0.0:, 0.0]) + assert_raises(IndexError, lambda: a[0.0:, 0.0, :]) + + def test_slicing_no_floats(self): + a = np.array([[5]]) + + # start as float. + assert_raises(TypeError, lambda: a[0.0:]) + assert_raises(TypeError, lambda: a[0:, 0.0:2]) + assert_raises(TypeError, lambda: a[0.0::2, :0]) + assert_raises(TypeError, lambda: a[0.0:1:2, :]) + assert_raises(TypeError, lambda: a[:, 0.0:]) + # stop as float. + assert_raises(TypeError, lambda: a[:0.0]) + assert_raises(TypeError, lambda: a[:0, 1:2.0]) + assert_raises(TypeError, lambda: a[:0.0:2, :0]) + assert_raises(TypeError, lambda: a[:0.0, :]) + assert_raises(TypeError, lambda: a[:, 0:4.0:2]) + # step as float. + assert_raises(TypeError, lambda: a[::1.0]) + assert_raises(TypeError, lambda: a[0:, :2:2.0]) + assert_raises(TypeError, lambda: a[1::4.0, :0]) + assert_raises(TypeError, lambda: a[::5.0, :]) + assert_raises(TypeError, lambda: a[:, 0:4:2.0]) + # mixed. + assert_raises(TypeError, lambda: a[1.0:2:2.0]) + assert_raises(TypeError, lambda: a[1.0::2.0]) + assert_raises(TypeError, lambda: a[0:, :2.0:2.0]) + assert_raises(TypeError, lambda: a[1.0:1:4.0, :0]) + assert_raises(TypeError, lambda: a[1.0:5.0:5.0, :]) + assert_raises(TypeError, lambda: a[:, 0.4:4.0:2.0]) + # should still get the DeprecationWarning if step = 0. + assert_raises(TypeError, lambda: a[::0.0]) + + def test_index_no_array_to_index(self): + # No non-scalar arrays. + a = np.array([[[1]]]) + + assert_raises(TypeError, lambda: a[a:a:a]) + + def test_none_index(self): + # `None` index adds newaxis + a = np.array([1, 2, 3]) + assert_equal(a[None], a[np.newaxis]) + assert_equal(a[None].ndim, a.ndim + 1) + + def test_empty_tuple_index(self): + # Empty tuple index creates a view + a = np.array([1, 2, 3]) + assert_equal(a[()], a) + assert_(a[()].base is a) + a = np.array(0) + assert_(isinstance(a[()], np.int_)) + + def test_void_scalar_empty_tuple(self): + s = np.zeros((), dtype='V4') + assert_equal(s[()].dtype, s.dtype) + assert_equal(s[()], s) + assert_equal(type(s[...]), np.ndarray) + + def test_same_kind_index_casting(self): + # Indexes should be cast with same-kind and not safe, even if that + # is somewhat unsafe. So test various different code paths. + index = np.arange(5) + u_index = index.astype(np.uintp) + arr = np.arange(10) + + assert_array_equal(arr[index], arr[u_index]) + arr[u_index] = np.arange(5) + assert_array_equal(arr, np.arange(10)) + + arr = np.arange(10).reshape(5, 2) + assert_array_equal(arr[index], arr[u_index]) + + arr[u_index] = np.arange(5)[:, None] + assert_array_equal(arr, np.arange(5)[:, None].repeat(2, axis=1)) + + arr = np.arange(25).reshape(5, 5) + assert_array_equal(arr[u_index, u_index], arr[index, index]) + + def test_empty_fancy_index(self): + # Empty list index creates an empty array + # with the same dtype (but with weird shape) + a = np.array([1, 2, 3]) + assert_equal(a[[]], []) + assert_equal(a[[]].dtype, a.dtype) + + b = np.array([], dtype=np.intp) + assert_equal(a[[]], []) + assert_equal(a[[]].dtype, a.dtype) + + b = np.array([]) + assert_raises(IndexError, a.__getitem__, b) + + def test_gh_26542(self): + a = np.array([0, 1, 2]) + idx = np.array([2, 1, 0]) + a[idx] = a + expected = np.array([2, 1, 0]) + assert_equal(a, expected) + + def test_gh_26542_2d(self): + a = np.array([[0, 1, 2]]) + idx_row = np.zeros(3, dtype=int) + idx_col = np.array([2, 1, 0]) + a[idx_row, idx_col] = a + expected = np.array([[2, 1, 0]]) + assert_equal(a, expected) + + def test_gh_26542_index_overlap(self): + arr = np.arange(100) + expected_vals = np.copy(arr[:-10]) + arr[10:] = arr[:-10] + actual_vals = arr[10:] + assert_equal(actual_vals, expected_vals) + + def test_gh_26844(self): + expected = [0, 1, 3, 3, 3] + a = np.arange(5) + a[2:][a[:-2]] = 3 + assert_equal(a, expected) + + def test_gh_26844_segfault(self): + # check for absence of segfault for: + # https://github.com/numpy/numpy/pull/26958/files#r1854589178 + a = np.arange(5) + expected = [0, 1, 3, 3, 3] + a[2:][None, a[:-2]] = 3 + assert_equal(a, expected) + + def test_ellipsis_index(self): + a = np.array([[1, 2, 3], + [4, 5, 6], + [7, 8, 9]]) + assert_(a[...] is not a) + assert_equal(a[...], a) + # `a[...]` was `a` in numpy <1.9. + assert_(a[...].base is a) + + # Slicing with ellipsis can skip an + # arbitrary number of dimensions + assert_equal(a[0, ...], a[0]) + assert_equal(a[0, ...], a[0, :]) + assert_equal(a[..., 0], a[:, 0]) + + # Slicing with ellipsis always results + # in an array, not a scalar + assert_equal(a[0, ..., 1], np.array(2)) + + # Assignment with `(Ellipsis,)` on 0-d arrays + b = np.array(1) + b[(Ellipsis,)] = 2 + assert_equal(b, 2) + + def test_single_int_index(self): + # Single integer index selects one row + a = np.array([[1, 2, 3], + [4, 5, 6], + [7, 8, 9]]) + + assert_equal(a[0], [1, 2, 3]) + assert_equal(a[-1], [7, 8, 9]) + + # Index out of bounds produces IndexError + assert_raises(IndexError, a.__getitem__, 1 << 30) + # Index overflow produces IndexError + assert_raises(IndexError, a.__getitem__, 1 << 64) + + def test_single_bool_index(self): + # Single boolean index + a = np.array([[1, 2, 3], + [4, 5, 6], + [7, 8, 9]]) + + assert_equal(a[np.array(True)], a[None]) + assert_equal(a[np.array(False)], a[None][0:0]) + + def test_boolean_shape_mismatch(self): + arr = np.ones((5, 4, 3)) + + index = np.array([True]) + assert_raises(IndexError, arr.__getitem__, index) + + index = np.array([False] * 6) + assert_raises(IndexError, arr.__getitem__, index) + + index = np.zeros((4, 4), dtype=bool) + assert_raises(IndexError, arr.__getitem__, index) + + assert_raises(IndexError, arr.__getitem__, (slice(None), index)) + + def test_boolean_indexing_onedim(self): + # Indexing a 2-dimensional array with + # boolean array of length one + a = np.array([[0., 0., 0.]]) + b = np.array([True], dtype=bool) + assert_equal(a[b], a) + # boolean assignment + a[b] = 1. + assert_equal(a, [[1., 1., 1.]]) + + def test_boolean_assignment_value_mismatch(self): + # A boolean assignment should fail when the shape of the values + # cannot be broadcast to the subscription. (see also gh-3458) + a = np.arange(4) + + def f(a, v): + a[a > -1] = v + + assert_raises(ValueError, f, a, []) + assert_raises(ValueError, f, a, [1, 2, 3]) + assert_raises(ValueError, f, a[:1], [1, 2, 3]) + + def test_boolean_assignment_needs_api(self): + # See also gh-7666 + # This caused a segfault on Python 2 due to the GIL not being + # held when the iterator does not need it, but the transfer function + # does + arr = np.zeros(1000) + indx = np.zeros(1000, dtype=bool) + indx[:100] = True + arr[indx] = np.ones(100, dtype=object) + + expected = np.zeros(1000) + expected[:100] = 1 + assert_array_equal(arr, expected) + + def test_boolean_indexing_twodim(self): + # Indexing a 2-dimensional array with + # 2-dimensional boolean array + a = np.array([[1, 2, 3], + [4, 5, 6], + [7, 8, 9]]) + b = np.array([[ True, False, True], + [False, True, False], + [ True, False, True]]) + assert_equal(a[b], [1, 3, 5, 7, 9]) + assert_equal(a[b[1]], [[4, 5, 6]]) + assert_equal(a[b[0]], a[b[2]]) + + # boolean assignment + a[b] = 0 + assert_equal(a, [[0, 2, 0], + [4, 0, 6], + [0, 8, 0]]) + + def test_boolean_indexing_list(self): + # Regression test for #13715. It's a use-after-free bug which the + # test won't directly catch, but it will show up in valgrind. + a = np.array([1, 2, 3]) + b = [True, False, True] + # Two variants of the test because the first takes a fast path + assert_equal(a[b], [1, 3]) + assert_equal(a[None, b], [[1, 3]]) + + def test_reverse_strides_and_subspace_bufferinit(self): + # This tests that the strides are not reversed for simple and + # subspace fancy indexing. + a = np.ones(5) + b = np.zeros(5, dtype=np.intp)[::-1] + c = np.arange(5)[::-1] + + a[b] = c + # If the strides are not reversed, the 0 in the arange comes last. + assert_equal(a[0], 0) + + # This also tests that the subspace buffer is initialized: + a = np.ones((5, 2)) + c = np.arange(10).reshape(5, 2)[::-1] + a[b, :] = c + assert_equal(a[0], [0, 1]) + + def test_reversed_strides_result_allocation(self): + # Test a bug when calculating the output strides for a result array + # when the subspace size was 1 (and test other cases as well) + a = np.arange(10)[:, None] + i = np.arange(10)[::-1] + assert_array_equal(a[i], a[i.copy('C')]) + + a = np.arange(20).reshape(-1, 2) + + def test_uncontiguous_subspace_assignment(self): + # During development there was a bug activating a skip logic + # based on ndim instead of size. + a = np.full((3, 4, 2), -1) + b = np.full((3, 4, 2), -1) + + a[[0, 1]] = np.arange(2 * 4 * 2).reshape(2, 4, 2).T + b[[0, 1]] = np.arange(2 * 4 * 2).reshape(2, 4, 2).T.copy() + + assert_equal(a, b) + + def test_too_many_fancy_indices_special_case(self): + # Just documents behaviour, this is a small limitation. + a = np.ones((1,) * 64) # 64 is NPY_MAXDIMS + assert_raises(IndexError, a.__getitem__, (np.array([0]),) * 64) + + def test_scalar_array_bool(self): + # NumPy bools can be used as boolean index (python ones as of yet not) + a = np.array(1) + assert_equal(a[np.bool(True)], a[np.array(True)]) + assert_equal(a[np.bool(False)], a[np.array(False)]) + + # After deprecating bools as integers: + #a = np.array([0,1,2]) + #assert_equal(a[True, :], a[None, :]) + #assert_equal(a[:, True], a[:, None]) + # + #assert_(not np.may_share_memory(a, a[True, :])) + + def test_everything_returns_views(self): + # Before `...` would return a itself. + a = np.arange(5) + + assert_(a is not a[()]) + assert_(a is not a[...]) + assert_(a is not a[:]) + + def test_broaderrors_indexing(self): + a = np.zeros((5, 5)) + assert_raises(IndexError, a.__getitem__, ([0, 1], [0, 1, 2])) + assert_raises(IndexError, a.__setitem__, ([0, 1], [0, 1, 2]), 0) + + def test_trivial_fancy_out_of_bounds(self): + a = np.zeros(5) + ind = np.ones(20, dtype=np.intp) + ind[-1] = 10 + assert_raises(IndexError, a.__getitem__, ind) + assert_raises(IndexError, a.__setitem__, ind, 0) + ind = np.ones(20, dtype=np.intp) + ind[0] = 11 + assert_raises(IndexError, a.__getitem__, ind) + assert_raises(IndexError, a.__setitem__, ind, 0) + + def test_trivial_fancy_not_possible(self): + # Test that the fast path for trivial assignment is not incorrectly + # used when the index is not contiguous or 1D, see also gh-11467. + a = np.arange(6) + idx = np.arange(6, dtype=np.intp).reshape(2, 1, 3)[:, :, 0] + assert_array_equal(a[idx], idx) + + # this case must not go into the fast path, note that idx is + # a non-contiguous none 1D array here. + a[idx] = -1 + res = np.arange(6) + res[0] = -1 + res[3] = -1 + assert_array_equal(a, res) + + def test_nonbaseclass_values(self): + class SubClass(np.ndarray): + def __array_finalize__(self, old): + # Have array finalize do funny things + self.fill(99) + + a = np.zeros((5, 5)) + s = a.copy().view(type=SubClass) + s.fill(1) + + a[[0, 1, 2, 3, 4], :] = s + assert_((a == 1).all()) + + # Subspace is last, so transposing might want to finalize + a[:, [0, 1, 2, 3, 4]] = s + assert_((a == 1).all()) + + a.fill(0) + a[...] = s + assert_((a == 1).all()) + + def test_array_like_values(self): + # Similar to the above test, but use a memoryview instead + a = np.zeros((5, 5)) + s = np.arange(25, dtype=np.float64).reshape(5, 5) + + a[[0, 1, 2, 3, 4], :] = memoryview(s) + assert_array_equal(a, s) + + a[:, [0, 1, 2, 3, 4]] = memoryview(s) + assert_array_equal(a, s) + + a[...] = memoryview(s) + assert_array_equal(a, s) + + @pytest.mark.parametrize("writeable", [True, False]) + def test_subclass_writeable(self, writeable): + d = np.rec.array([('NGC1001', 11), ('NGC1002', 1.), ('NGC1003', 1.)], + dtype=[('target', 'S20'), ('V_mag', '>f4')]) + d.flags.writeable = writeable + # Advanced indexing results are always writeable: + ind = np.array([False, True, True], dtype=bool) + assert d[ind].flags.writeable + ind = np.array([0, 1]) + assert d[ind].flags.writeable + # Views should be writeable if the original array is: + assert d[...].flags.writeable == writeable + assert d[0].flags.writeable == writeable + + def test_memory_order(self): + # This is not necessary to preserve. Memory layouts for + # more complex indices are not as simple. + a = np.arange(10) + b = np.arange(10).reshape(5, 2).T + assert_(a[b].flags.f_contiguous) + + # Takes a different implementation branch: + a = a.reshape(-1, 1) + assert_(a[b, 0].flags.f_contiguous) + + def test_scalar_return_type(self): + # Full scalar indices should return scalars and object + # arrays should not call PyArray_Return on their items + class Zero: + # The most basic valid indexing + def __index__(self): + return 0 + + z = Zero() + + class ArrayLike: + # Simple array, should behave like the array + def __array__(self, dtype=None, copy=None): + return np.array(0) + + a = np.zeros(()) + assert_(isinstance(a[()], np.float64)) + a = np.zeros(1) + assert_(isinstance(a[z], np.float64)) + a = np.zeros((1, 1)) + assert_(isinstance(a[z, np.array(0)], np.float64)) + assert_(isinstance(a[z, ArrayLike()], np.float64)) + + # And object arrays do not call it too often: + b = np.array(0) + a = np.array(0, dtype=object) + a[()] = b + assert_(isinstance(a[()], np.ndarray)) + a = np.array([b, None]) + assert_(isinstance(a[z], np.ndarray)) + a = np.array([[b, None]]) + assert_(isinstance(a[z, np.array(0)], np.ndarray)) + assert_(isinstance(a[z, ArrayLike()], np.ndarray)) + + def test_small_regressions(self): + # Reference count of intp for index checks + a = np.array([0]) + if HAS_REFCOUNT: + refcount = sys.getrefcount(np.dtype(np.intp)) + # item setting always checks indices in separate function: + a[np.array([0], dtype=np.intp)] = 1 + a[np.array([0], dtype=np.uint8)] = 1 + assert_raises(IndexError, a.__setitem__, + np.array([1], dtype=np.intp), 1) + assert_raises(IndexError, a.__setitem__, + np.array([1], dtype=np.uint8), 1) + + if HAS_REFCOUNT: + assert_equal(sys.getrefcount(np.dtype(np.intp)), refcount) + + def test_unaligned(self): + v = (np.zeros(64, dtype=np.int8) + ord('a'))[1:-7] + d = v.view(np.dtype("S8")) + # unaligned source + x = (np.zeros(16, dtype=np.int8) + ord('a'))[1:-7] + x = x.view(np.dtype("S8")) + x[...] = np.array("b" * 8, dtype="S") + b = np.arange(d.size) + # trivial + assert_equal(d[b], d) + d[b] = x + # nontrivial + # unaligned index array + b = np.zeros(d.size + 1).view(np.int8)[1:-(np.intp(0).itemsize - 1)] + b = b.view(np.intp)[:d.size] + b[...] = np.arange(d.size) + assert_equal(d[b.astype(np.int16)], d) + d[b.astype(np.int16)] = x + # boolean + d[b % 2 == 0] + d[b % 2 == 0] = x[::2] + + def test_tuple_subclass(self): + arr = np.ones((5, 5)) + + # A tuple subclass should also be an nd-index + class TupleSubclass(tuple): + pass + index = ([1], [1]) + index = TupleSubclass(index) + assert_(arr[index].shape == (1,)) + # Unlike the non nd-index: + assert_(arr[index,].shape != (1,)) + + def test_broken_sequence_not_nd_index(self): + # See gh-5063: + # If we have an object which claims to be a sequence, but fails + # on item getting, this should not be converted to an nd-index (tuple) + # If this object happens to be a valid index otherwise, it should work + # This object here is very dubious and probably bad though: + class SequenceLike: + def __index__(self): + return 0 + + def __len__(self): + return 1 + + def __getitem__(self, item): + raise IndexError('Not possible') + + arr = np.arange(10) + assert_array_equal(arr[SequenceLike()], arr[SequenceLike(),]) + + # also test that field indexing does not segfault + # for a similar reason, by indexing a structured array + arr = np.zeros((1,), dtype=[('f1', 'i8'), ('f2', 'i8')]) + assert_array_equal(arr[SequenceLike()], arr[SequenceLike(),]) + + def test_indexing_array_weird_strides(self): + # See also gh-6221 + # the shapes used here come from the issue and create the correct + # size for the iterator buffering size. + x = np.ones(10) + x2 = np.ones((10, 2)) + ind = np.arange(10)[:, None, None, None] + ind = np.broadcast_to(ind, (10, 55, 4, 4)) + + # single advanced index case + assert_array_equal(x[ind], x[ind.copy()]) + # higher dimensional advanced index + zind = np.zeros(4, dtype=np.intp) + assert_array_equal(x2[ind, zind], x2[ind.copy(), zind]) + + def test_indexing_array_negative_strides(self): + # From gh-8264, + # core dumps if negative strides are used in iteration + arro = np.zeros((4, 4)) + arr = arro[::-1, ::-1] + + slices = (slice(None), [0, 1, 2, 3]) + arr[slices] = 10 + assert_array_equal(arr, 10.) + + def test_character_assignment(self): + # This is an example a function going through CopyObject which + # used to have an untested special path for scalars + # (the character special dtype case, should be deprecated probably) + arr = np.zeros((1, 5), dtype="c") + arr[0] = np.str_("asdfg") # must assign as a sequence + assert_array_equal(arr[0], np.array("asdfg", dtype="c")) + assert arr[0, 1] == b"s" # make sure not all were set to "a" for both + + @pytest.mark.parametrize("index", + [True, False, np.array([0])]) + @pytest.mark.parametrize("num", [64, 80]) + @pytest.mark.parametrize("original_ndim", [1, 64]) + def test_too_many_advanced_indices(self, index, num, original_ndim): + # These are limitations based on the number of arguments we can process. + # For `num=32` (and all boolean cases), the result is actually define; + # but the use of NpyIter (NPY_MAXARGS) limits it for technical reasons. + arr = np.ones((1,) * original_ndim) + with pytest.raises(IndexError): + arr[(index,) * num] + with pytest.raises(IndexError): + arr[(index,) * num] = 1. + + def test_nontuple_ndindex(self): + a = np.arange(25).reshape((5, 5)) + assert_equal(a[[0, 1]], np.array([a[0], a[1]])) + assert_equal(a[[0, 1], [0, 1]], np.array([0, 6])) + assert_raises(IndexError, a.__getitem__, [slice(None)]) + + def test_flat_index_on_flatiter(self): + a = np.arange(9).reshape((3, 3)) + b = np.array([0, 5, 6]) + assert_equal(a.flat[b.flat], np.array([0, 5, 6])) + + def test_empty_string_flat_index_on_flatiter(self): + a = np.arange(9).reshape((3, 3)) + b = np.array([], dtype="S") + assert_equal(a.flat[b.flat], np.array([])) + + def test_nonempty_string_flat_index_on_flatiter(self): + a = np.arange(9).reshape((3, 3)) + b = np.array(["a"], dtype="S") + with pytest.raises(IndexError, match="unsupported iterator index"): + a.flat[b.flat] + + +class TestFieldIndexing: + def test_scalar_return_type(self): + # Field access on an array should return an array, even if it + # is 0-d. + a = np.zeros((), [('a', 'f8')]) + assert_(isinstance(a['a'], np.ndarray)) + assert_(isinstance(a[['a']], np.ndarray)) + + +class TestBroadcastedAssignments: + def assign(self, a, ind, val): + a[ind] = val + return a + + def test_prepending_ones(self): + a = np.zeros((3, 2)) + + a[...] = np.ones((1, 3, 2)) + # Fancy with subspace with and without transpose + a[[0, 1, 2], :] = np.ones((1, 3, 2)) + a[:, [0, 1]] = np.ones((1, 3, 2)) + # Fancy without subspace (with broadcasting) + a[[[0], [1], [2]], [0, 1]] = np.ones((1, 3, 2)) + + def test_prepend_not_one(self): + assign = self.assign + s_ = np.s_ + a = np.zeros(5) + + # Too large and not only ones. + assert_raises(ValueError, assign, a, s_[...], np.ones((2, 1))) + assert_raises(ValueError, assign, a, s_[[1, 2, 3],], np.ones((2, 1))) + assert_raises(ValueError, assign, a, s_[[[1], [2]],], np.ones((2, 2, 1))) + + def test_simple_broadcasting_errors(self): + assign = self.assign + s_ = np.s_ + a = np.zeros((5, 1)) + + assert_raises(ValueError, assign, a, s_[...], np.zeros((5, 2))) + assert_raises(ValueError, assign, a, s_[...], np.zeros((5, 0))) + assert_raises(ValueError, assign, a, s_[:, [0]], np.zeros((5, 2))) + assert_raises(ValueError, assign, a, s_[:, [0]], np.zeros((5, 0))) + assert_raises(ValueError, assign, a, s_[[0], :], np.zeros((2, 1))) + + @pytest.mark.parametrize("index", [ + (..., [1, 2], slice(None)), + ([0, 1], ..., 0), + (..., [1, 2], [1, 2])]) + def test_broadcast_error_reports_correct_shape(self, index): + values = np.zeros((100, 100)) # will never broadcast below + + arr = np.zeros((3, 4, 5, 6, 7)) + # We currently report without any spaces (could be changed) + shape_str = str(arr[index].shape).replace(" ", "") + + with pytest.raises(ValueError) as e: + arr[index] = values + + assert str(e.value).endswith(shape_str) + + def test_index_is_larger(self): + # Simple case of fancy index broadcasting of the index. + a = np.zeros((5, 5)) + a[[[0], [1], [2]], [0, 1, 2]] = [2, 3, 4] + + assert_((a[:3, :3] == [2, 3, 4]).all()) + + def test_broadcast_subspace(self): + a = np.zeros((100, 100)) + v = np.arange(100)[:, None] + b = np.arange(100)[::-1] + a[b] = v + assert_((a[::-1] == v).all()) + + +class TestSubclasses: + def test_basic(self): + # Test that indexing in various ways produces SubClass instances, + # and that the base is set up correctly: the original subclass + # instance for views, and a new ndarray for advanced/boolean indexing + # where a copy was made (latter a regression test for gh-11983). + class SubClass(np.ndarray): + pass + + a = np.arange(5) + s = a.view(SubClass) + s_slice = s[:3] + assert_(type(s_slice) is SubClass) + assert_(s_slice.base is s) + assert_array_equal(s_slice, a[:3]) + + s_fancy = s[[0, 1, 2]] + assert_(type(s_fancy) is SubClass) + assert_(s_fancy.base is not s) + assert_(type(s_fancy.base) is np.ndarray) + assert_array_equal(s_fancy, a[[0, 1, 2]]) + assert_array_equal(s_fancy.base, a[[0, 1, 2]]) + + s_bool = s[s > 0] + assert_(type(s_bool) is SubClass) + assert_(s_bool.base is not s) + assert_(type(s_bool.base) is np.ndarray) + assert_array_equal(s_bool, a[a > 0]) + assert_array_equal(s_bool.base, a[a > 0]) + + def test_fancy_on_read_only(self): + # Test that fancy indexing on read-only SubClass does not make a + # read-only copy (gh-14132) + class SubClass(np.ndarray): + pass + + a = np.arange(5) + s = a.view(SubClass) + s.flags.writeable = False + s_fancy = s[[0, 1, 2]] + assert_(s_fancy.flags.writeable) + + def test_finalize_gets_full_info(self): + # Array finalize should be called on the filled array. + class SubClass(np.ndarray): + def __array_finalize__(self, old): + self.finalize_status = np.array(self) + self.old = old + + s = np.arange(10).view(SubClass) + new_s = s[:3] + assert_array_equal(new_s.finalize_status, new_s) + assert_array_equal(new_s.old, s) + + new_s = s[[0, 1, 2, 3]] + assert_array_equal(new_s.finalize_status, new_s) + assert_array_equal(new_s.old, s) + + new_s = s[s > 0] + assert_array_equal(new_s.finalize_status, new_s) + assert_array_equal(new_s.old, s) + + +class TestFancyIndexingCast: + def test_boolean_index_cast_assign(self): + # Setup the boolean index and float arrays. + shape = (8, 63) + bool_index = np.zeros(shape).astype(bool) + bool_index[0, 1] = True + zero_array = np.zeros(shape) + + # Assigning float is fine. + zero_array[bool_index] = np.array([1]) + assert_equal(zero_array[0, 1], 1) + + # Fancy indexing works, although we get a cast warning. + assert_warns(ComplexWarning, + zero_array.__setitem__, ([0], [1]), np.array([2 + 1j])) + assert_equal(zero_array[0, 1], 2) # No complex part + + # Cast complex to float, throwing away the imaginary portion. + assert_warns(ComplexWarning, + zero_array.__setitem__, bool_index, np.array([1j])) + assert_equal(zero_array[0, 1], 0) + +class TestFancyIndexingEquivalence: + def test_object_assign(self): + # Check that the field and object special case using copyto is active. + # The right hand side cannot be converted to an array here. + a = np.arange(5, dtype=object) + b = a.copy() + a[:3] = [1, (1, 2), 3] + b[[0, 1, 2]] = [1, (1, 2), 3] + assert_array_equal(a, b) + + # test same for subspace fancy indexing + b = np.arange(5, dtype=object)[None, :] + b[[0], :3] = [[1, (1, 2), 3]] + assert_array_equal(a, b[0]) + + # Check that swapping of axes works. + # There was a bug that made the later assignment throw a ValueError + # do to an incorrectly transposed temporary right hand side (gh-5714) + b = b.T + b[:3, [0]] = [[1], [(1, 2)], [3]] + assert_array_equal(a, b[:, 0]) + + # Another test for the memory order of the subspace + arr = np.ones((3, 4, 5), dtype=object) + # Equivalent slicing assignment for comparison + cmp_arr = arr.copy() + cmp_arr[:1, ...] = [[[1], [2], [3], [4]]] + arr[[0], ...] = [[[1], [2], [3], [4]]] + assert_array_equal(arr, cmp_arr) + arr = arr.copy('F') + arr[[0], ...] = [[[1], [2], [3], [4]]] + assert_array_equal(arr, cmp_arr) + + def test_cast_equivalence(self): + # Yes, normal slicing uses unsafe casting. + a = np.arange(5) + b = a.copy() + + a[:3] = np.array(['2', '-3', '-1']) + b[[0, 2, 1]] = np.array(['2', '-1', '-3']) + assert_array_equal(a, b) + + # test the same for subspace fancy indexing + b = np.arange(5)[None, :] + b[[0], :3] = np.array([['2', '-3', '-1']]) + assert_array_equal(a, b[0]) + + +class TestMultiIndexingAutomated: + """ + These tests use code to mimic the C-Code indexing for selection. + + NOTE: + + * This still lacks tests for complex item setting. + * If you change behavior of indexing, you might want to modify + these tests to try more combinations. + * Behavior was written to match numpy version 1.8. (though a + first version matched 1.7.) + * Only tuple indices are supported by the mimicking code. + (and tested as of writing this) + * Error types should match most of the time as long as there + is only one error. For multiple errors, what gets raised + will usually not be the same one. They are *not* tested. + + Update 2016-11-30: It is probably not worth maintaining this test + indefinitely and it can be dropped if maintenance becomes a burden. + + """ + + def setup_method(self): + self.a = np.arange(np.prod([3, 1, 5, 6])).reshape(3, 1, 5, 6) + self.b = np.empty((3, 0, 5, 6)) + self.complex_indices = ['skip', Ellipsis, + 0, + # Boolean indices, up to 3-d for some special cases of eating up + # dimensions, also need to test all False + np.array([True, False, False]), + np.array([[True, False], [False, True]]), + np.array([[[False, False], [False, False]]]), + # Some slices: + slice(-5, 5, 2), + slice(1, 1, 100), + slice(4, -1, -2), + slice(None, None, -3), + # Some Fancy indexes: + np.empty((0, 1, 1), dtype=np.intp), # empty and can be broadcast + np.array([0, 1, -2]), + np.array([[2], [0], [1]]), + np.array([[0, -1], [0, 1]], dtype=np.dtype('intp').newbyteorder()), + np.array([2, -1], dtype=np.int8), + np.zeros([1] * 31, dtype=int), # trigger too large array. + np.array([0., 1.])] # invalid datatype + # Some simpler indices that still cover a bit more + self.simple_indices = [Ellipsis, None, -1, [1], np.array([True]), + 'skip'] + # Very simple ones to fill the rest: + self.fill_indices = [slice(None, None), 0] + + def _get_multi_index(self, arr, indices): + """Mimic multi dimensional indexing. + + Parameters + ---------- + arr : ndarray + Array to be indexed. + indices : tuple of index objects + + Returns + ------- + out : ndarray + An array equivalent to the indexing operation (but always a copy). + `arr[indices]` should be identical. + no_copy : bool + Whether the indexing operation requires a copy. If this is `True`, + `np.may_share_memory(arr, arr[indices])` should be `True` (with + some exceptions for scalars and possibly 0-d arrays). + + Notes + ----- + While the function may mostly match the errors of normal indexing this + is generally not the case. + """ + in_indices = list(indices) + indices = [] + # if False, this is a fancy or boolean index + no_copy = True + # number of fancy/scalar indexes that are not consecutive + num_fancy = 0 + # number of dimensions indexed by a "fancy" index + fancy_dim = 0 + # NOTE: This is a funny twist (and probably OK to change). + # The boolean array has illegal indexes, but this is + # allowed if the broadcast fancy-indices are 0-sized. + # This variable is to catch that case. + error_unless_broadcast_to_empty = False + + # We need to handle Ellipsis and make arrays from indices, also + # check if this is fancy indexing (set no_copy). + ndim = 0 + ellipsis_pos = None # define here mostly to replace all but first. + for i, indx in enumerate(in_indices): + if indx is None: + continue + if isinstance(indx, np.ndarray) and indx.dtype == bool: + no_copy = False + if indx.ndim == 0: + raise IndexError + # boolean indices can have higher dimensions + ndim += indx.ndim + fancy_dim += indx.ndim + continue + if indx is Ellipsis: + if ellipsis_pos is None: + ellipsis_pos = i + continue # do not increment ndim counter + raise IndexError + if isinstance(indx, slice): + ndim += 1 + continue + if not isinstance(indx, np.ndarray): + # This could be open for changes in numpy. + # numpy should maybe raise an error if casting to intp + # is not safe. It rejects np.array([1., 2.]) but not + # [1., 2.] as index (same for ie. np.take). + # (Note the importance of empty lists if changing this here) + try: + indx = np.array(indx, dtype=np.intp) + except ValueError: + raise IndexError + in_indices[i] = indx + elif indx.dtype.kind not in 'bi': + raise IndexError('arrays used as indices must be of ' + 'integer (or boolean) type') + if indx.ndim != 0: + no_copy = False + ndim += 1 + fancy_dim += 1 + + if arr.ndim - ndim < 0: + # we can't take more dimensions then we have, not even for 0-d + # arrays. since a[()] makes sense, but not a[(),]. We will + # raise an error later on, unless a broadcasting error occurs + # first. + raise IndexError + + if ndim == 0 and None not in in_indices: + # Well we have no indexes or one Ellipsis. This is legal. + return arr.copy(), no_copy + + if ellipsis_pos is not None: + in_indices[ellipsis_pos:ellipsis_pos + 1] = ([slice(None, None)] * + (arr.ndim - ndim)) + + for ax, indx in enumerate(in_indices): + if isinstance(indx, slice): + # convert to an index array + indx = np.arange(*indx.indices(arr.shape[ax])) + indices.append(['s', indx]) + continue + elif indx is None: + # this is like taking a slice with one element from a new axis: + indices.append(['n', np.array([0], dtype=np.intp)]) + arr = arr.reshape(arr.shape[:ax] + (1,) + arr.shape[ax:]) + continue + if isinstance(indx, np.ndarray) and indx.dtype == bool: + if indx.shape != arr.shape[ax:ax + indx.ndim]: + raise IndexError + + try: + flat_indx = np.ravel_multi_index(np.nonzero(indx), + arr.shape[ax:ax + indx.ndim], mode='raise') + except Exception: + error_unless_broadcast_to_empty = True + # fill with 0s instead, and raise error later + flat_indx = np.array([0] * indx.sum(), dtype=np.intp) + # concatenate axis into a single one: + if indx.ndim != 0: + arr = arr.reshape(arr.shape[:ax] + + (np.prod(arr.shape[ax:ax + indx.ndim]),) + + arr.shape[ax + indx.ndim:]) + indx = flat_indx + else: + # This could be changed, a 0-d boolean index can + # make sense (even outside the 0-d indexed array case) + # Note that originally this is could be interpreted as + # integer in the full integer special case. + raise IndexError + # If the index is a singleton, the bounds check is done + # before the broadcasting. This used to be different in <1.9 + elif indx.ndim == 0 and not ( + -arr.shape[ax] <= indx < arr.shape[ax] + ): + raise IndexError + if indx.ndim == 0: + # The index is a scalar. This used to be two fold, but if + # fancy indexing was active, the check was done later, + # possibly after broadcasting it away (1.7. or earlier). + # Now it is always done. + if indx >= arr.shape[ax] or indx < - arr.shape[ax]: + raise IndexError + if (len(indices) > 0 and + indices[-1][0] == 'f' and + ax != ellipsis_pos): + # NOTE: There could still have been a 0-sized Ellipsis + # between them. Checked that with ellipsis_pos. + indices[-1].append(indx) + else: + # We have a fancy index that is not after an existing one. + # NOTE: A 0-d array triggers this as well, while one may + # expect it to not trigger it, since a scalar would not be + # considered fancy indexing. + num_fancy += 1 + indices.append(['f', indx]) + + if num_fancy > 1 and not no_copy: + # We have to flush the fancy indexes left + new_indices = indices[:] + axes = list(range(arr.ndim)) + fancy_axes = [] + new_indices.insert(0, ['f']) + ni = 0 + ai = 0 + for indx in indices: + ni += 1 + if indx[0] == 'f': + new_indices[0].extend(indx[1:]) + del new_indices[ni] + ni -= 1 + for ax in range(ai, ai + len(indx[1:])): + fancy_axes.append(ax) + axes.remove(ax) + ai += len(indx) - 1 # axis we are at + indices = new_indices + # and now we need to transpose arr: + arr = arr.transpose(*(fancy_axes + axes)) + + # We only have one 'f' index now and arr is transposed accordingly. + # Now handle newaxis by reshaping... + ax = 0 + for indx in indices: + if indx[0] == 'f': + if len(indx) == 1: + continue + # First of all, reshape arr to combine fancy axes into one: + orig_shape = arr.shape + orig_slice = orig_shape[ax:ax + len(indx[1:])] + arr = arr.reshape(arr.shape[:ax] + + (np.prod(orig_slice).astype(int),) + + arr.shape[ax + len(indx[1:]):]) + + # Check if broadcasting works + res = np.broadcast(*indx[1:]) + # unfortunately the indices might be out of bounds. So check + # that first, and use mode='wrap' then. However only if + # there are any indices... + if res.size != 0: + if error_unless_broadcast_to_empty: + raise IndexError + for _indx, _size in zip(indx[1:], orig_slice): + if _indx.size == 0: + continue + if np.any(_indx >= _size) or np.any(_indx < -_size): + raise IndexError + if len(indx[1:]) == len(orig_slice): + if np.prod(orig_slice) == 0: + # Work around for a crash or IndexError with 'wrap' + # in some 0-sized cases. + try: + mi = np.ravel_multi_index(indx[1:], orig_slice, + mode='raise') + except Exception: + # This happens with 0-sized orig_slice (sometimes?) + # here it is a ValueError, but indexing gives a: + raise IndexError('invalid index into 0-sized') + else: + mi = np.ravel_multi_index(indx[1:], orig_slice, + mode='wrap') + else: + # Maybe never happens... + raise ValueError + arr = arr.take(mi.ravel(), axis=ax) + try: + arr = arr.reshape(arr.shape[:ax] + + mi.shape + + arr.shape[ax + 1:]) + except ValueError: + # too many dimensions, probably + raise IndexError + ax += mi.ndim + continue + + # If we are here, we have a 1D array for take: + arr = arr.take(indx[1], axis=ax) + ax += 1 + + return arr, no_copy + + def _check_multi_index(self, arr, index): + """Check a multi index item getting and simple setting. + + Parameters + ---------- + arr : ndarray + Array to be indexed, must be a reshaped arange. + index : tuple of indexing objects + Index being tested. + """ + # Test item getting + try: + mimic_get, no_copy = self._get_multi_index(arr, index) + except Exception as e: + if HAS_REFCOUNT: + prev_refcount = sys.getrefcount(arr) + assert_raises(type(e), arr.__getitem__, index) + assert_raises(type(e), arr.__setitem__, index, 0) + if HAS_REFCOUNT: + assert_equal(prev_refcount, sys.getrefcount(arr)) + return + + self._compare_index_result(arr, index, mimic_get, no_copy) + + def _check_single_index(self, arr, index): + """Check a single index item getting and simple setting. + + Parameters + ---------- + arr : ndarray + Array to be indexed, must be an arange. + index : indexing object + Index being tested. Must be a single index and not a tuple + of indexing objects (see also `_check_multi_index`). + """ + try: + mimic_get, no_copy = self._get_multi_index(arr, (index,)) + except Exception as e: + if HAS_REFCOUNT: + prev_refcount = sys.getrefcount(arr) + assert_raises(type(e), arr.__getitem__, index) + assert_raises(type(e), arr.__setitem__, index, 0) + if HAS_REFCOUNT: + assert_equal(prev_refcount, sys.getrefcount(arr)) + return + + self._compare_index_result(arr, index, mimic_get, no_copy) + + def _compare_index_result(self, arr, index, mimic_get, no_copy): + """Compare mimicked result to indexing result. + """ + arr = arr.copy() + if HAS_REFCOUNT: + startcount = sys.getrefcount(arr) + indexed_arr = arr[index] + assert_array_equal(indexed_arr, mimic_get) + # Check if we got a view, unless its a 0-sized or 0-d array. + # (then its not a view, and that does not matter) + if indexed_arr.size != 0 and indexed_arr.ndim != 0: + assert_(np.may_share_memory(indexed_arr, arr) == no_copy) + # Check reference count of the original array + if HAS_REFCOUNT: + if no_copy: + # refcount increases by one: + assert_equal(sys.getrefcount(arr), startcount + 1) + else: + assert_equal(sys.getrefcount(arr), startcount) + + # Test non-broadcast setitem: + b = arr.copy() + b[index] = mimic_get + 1000 + if b.size == 0: + return # nothing to compare here... + if no_copy and indexed_arr.ndim != 0: + # change indexed_arr in-place to manipulate original: + indexed_arr += 1000 + assert_array_equal(arr, b) + return + # Use the fact that the array is originally an arange: + arr.flat[indexed_arr.ravel()] += 1000 + assert_array_equal(arr, b) + + def test_boolean(self): + a = np.array(5) + assert_equal(a[np.array(True)], 5) + a[np.array(True)] = 1 + assert_equal(a, 1) + # NOTE: This is different from normal broadcasting, as + # arr[boolean_array] works like in a multi index. Which means + # it is aligned to the left. This is probably correct for + # consistency with arr[boolean_array,] also no broadcasting + # is done at all + self._check_multi_index( + self.a, (np.zeros_like(self.a, dtype=bool),)) + self._check_multi_index( + self.a, (np.zeros_like(self.a, dtype=bool)[..., 0],)) + self._check_multi_index( + self.a, (np.zeros_like(self.a, dtype=bool)[None, ...],)) + + def test_multidim(self): + # Automatically test combinations with complex indexes on 2nd (or 1st) + # spot and the simple ones in one other spot. + with warnings.catch_warnings(): + # This is so that np.array(True) is not accepted in a full integer + # index, when running the file separately. + warnings.filterwarnings('error', '', DeprecationWarning) + warnings.filterwarnings('error', '', VisibleDeprecationWarning) + + def isskip(idx): + return isinstance(idx, str) and idx == "skip" + + for simple_pos in [0, 2, 3]: + tocheck = [self.fill_indices, self.complex_indices, + self.fill_indices, self.fill_indices] + tocheck[simple_pos] = self.simple_indices + for index in product(*tocheck): + index = tuple(i for i in index if not isskip(i)) + self._check_multi_index(self.a, index) + self._check_multi_index(self.b, index) + + # Check very simple item getting: + self._check_multi_index(self.a, (0, 0, 0, 0)) + self._check_multi_index(self.b, (0, 0, 0, 0)) + # Also check (simple cases of) too many indices: + assert_raises(IndexError, self.a.__getitem__, (0, 0, 0, 0, 0)) + assert_raises(IndexError, self.a.__setitem__, (0, 0, 0, 0, 0), 0) + assert_raises(IndexError, self.a.__getitem__, (0, 0, [1], 0, 0)) + assert_raises(IndexError, self.a.__setitem__, (0, 0, [1], 0, 0), 0) + + def test_1d(self): + a = np.arange(10) + for index in self.complex_indices: + self._check_single_index(a, index) + +class TestFloatNonIntegerArgument: + """ + These test that ``TypeError`` is raised when you try to use + non-integers as arguments to for indexing and slicing e.g. ``a[0.0:5]`` + and ``a[0.5]``, or other functions like ``array.reshape(1., -1)``. + + """ + def test_valid_indexing(self): + # These should raise no errors. + a = np.array([[[5]]]) + + a[np.array([0])] + a[[0, 0]] + a[:, [0, 0]] + a[:, 0, :] + a[:, :, :] + + def test_valid_slicing(self): + # These should raise no errors. + a = np.array([[[5]]]) + + a[::] + a[0:] + a[:2] + a[0:2] + a[::2] + a[1::2] + a[:2:2] + a[1:2:2] + + def test_non_integer_argument_errors(self): + a = np.array([[5]]) + + assert_raises(TypeError, np.reshape, a, (1., 1., -1)) + assert_raises(TypeError, np.reshape, a, (np.array(1.), -1)) + assert_raises(TypeError, np.take, a, [0], 1.) + assert_raises(TypeError, np.take, a, [0], np.float64(1.)) + + def test_non_integer_sequence_multiplication(self): + # NumPy scalar sequence multiply should not work with non-integers + def mult(a, b): + return a * b + + assert_raises(TypeError, mult, [1], np.float64(3)) + # following should be OK + mult([1], np.int_(3)) + + def test_reduce_axis_float_index(self): + d = np.zeros((3, 3, 3)) + assert_raises(TypeError, np.min, d, 0.5) + assert_raises(TypeError, np.min, d, (0.5, 1)) + assert_raises(TypeError, np.min, d, (1, 2.2)) + assert_raises(TypeError, np.min, d, (.2, 1.2)) + + +class TestBooleanIndexing: + # Using a boolean as integer argument/indexing is an error. + def test_bool_as_int_argument_errors(self): + a = np.array([[[1]]]) + + assert_raises(TypeError, np.reshape, a, (True, -1)) + assert_raises(TypeError, np.reshape, a, (np.bool(True), -1)) + # Note that operator.index(np.array(True)) does not work, a boolean + # array is thus also deprecated, but not with the same message: + assert_raises(TypeError, operator.index, np.array(True)) + assert_raises(TypeError, operator.index, np.True_) + assert_raises(TypeError, np.take, args=(a, [0], False)) + + def test_boolean_indexing_weirdness(self): + # Weird boolean indexing things + a = np.ones((2, 3, 4)) + assert a[False, True, ...].shape == (0, 2, 3, 4) + assert a[True, [0, 1], True, True, [1], [[2]]].shape == (1, 2) + assert_raises(IndexError, lambda: a[False, [0, 1], ...]) + + def test_boolean_indexing_fast_path(self): + # These used to either give the wrong error, or incorrectly give no + # error. + a = np.ones((3, 3)) + + # This used to incorrectly work (and give an array of shape (0,)) + idx1 = np.array([[False] * 9]) + assert_raises_regex(IndexError, + "boolean index did not match indexed array along axis 0; " + "size of axis is 3 but size of corresponding boolean axis is 1", + lambda: a[idx1]) + + # This used to incorrectly give a ValueError: operands could not be broadcast together + idx2 = np.array([[False] * 8 + [True]]) + assert_raises_regex(IndexError, + "boolean index did not match indexed array along axis 0; " + "size of axis is 3 but size of corresponding boolean axis is 1", + lambda: a[idx2]) + + # This is the same as it used to be. The above two should work like this. + idx3 = np.array([[False] * 10]) + assert_raises_regex(IndexError, + "boolean index did not match indexed array along axis 0; " + "size of axis is 3 but size of corresponding boolean axis is 1", + lambda: a[idx3]) + + # This used to give ValueError: non-broadcastable operand + a = np.ones((1, 1, 2)) + idx = np.array([[[True], [False]]]) + assert_raises_regex(IndexError, + "boolean index did not match indexed array along axis 1; " + "size of axis is 1 but size of corresponding boolean axis is 2", + lambda: a[idx]) + + +class TestArrayToIndexDeprecation: + """Creating an index from array not 0-D is an error. + + """ + def test_array_to_index_error(self): + # so no exception is expected. The raising is effectively tested above. + a = np.array([[[1]]]) + + assert_raises(TypeError, operator.index, np.array([1])) + assert_raises(TypeError, np.reshape, a, (a, -1)) + assert_raises(TypeError, np.take, a, [0], a) + + +class TestNonIntegerArrayLike: + """Tests that array_likes only valid if can safely cast to integer. + + For instance, lists give IndexError when they cannot be safely cast to + an integer. + + """ + def test_basic(self): + a = np.arange(10) + + assert_raises(IndexError, a.__getitem__, [0.5, 1.5]) + assert_raises(IndexError, a.__getitem__, (['1', '2'],)) + + # The following is valid + a.__getitem__([]) + + +class TestMultipleEllipsisError: + """An index can only have a single ellipsis. + + """ + def test_basic(self): + a = np.arange(10) + assert_raises(IndexError, lambda: a[..., ...]) + assert_raises(IndexError, a.__getitem__, ((Ellipsis,) * 2,)) + assert_raises(IndexError, a.__getitem__, ((Ellipsis,) * 3,)) + + +class TestCApiAccess: + def test_getitem(self): + subscript = functools.partial(array_indexing, 0) + + # 0-d arrays don't work: + assert_raises(IndexError, subscript, np.ones(()), 0) + # Out of bound values: + assert_raises(IndexError, subscript, np.ones(10), 11) + assert_raises(IndexError, subscript, np.ones(10), -11) + assert_raises(IndexError, subscript, np.ones((10, 10)), 11) + assert_raises(IndexError, subscript, np.ones((10, 10)), -11) + + a = np.arange(10) + assert_array_equal(a[4], subscript(a, 4)) + a = a.reshape(5, 2) + assert_array_equal(a[-4], subscript(a, -4)) + + def test_setitem(self): + assign = functools.partial(array_indexing, 1) + + # Deletion is impossible: + assert_raises(ValueError, assign, np.ones(10), 0) + # 0-d arrays don't work: + assert_raises(IndexError, assign, np.ones(()), 0, 0) + # Out of bound values: + assert_raises(IndexError, assign, np.ones(10), 11, 0) + assert_raises(IndexError, assign, np.ones(10), -11, 0) + assert_raises(IndexError, assign, np.ones((10, 10)), 11, 0) + assert_raises(IndexError, assign, np.ones((10, 10)), -11, 0) + + a = np.arange(10) + assign(a, 4, 10) + assert_(a[4] == 10) + + a = a.reshape(5, 2) + assign(a, 4, 10) + assert_array_equal(a[-1], [10, 10]) diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_item_selection.py b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_item_selection.py new file mode 100644 index 0000000..79fb82d --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_item_selection.py @@ -0,0 +1,167 @@ +import sys + +import pytest + +import numpy as np +from numpy.testing import HAS_REFCOUNT, assert_, assert_array_equal, assert_raises + + +class TestTake: + def test_simple(self): + a = [[1, 2], [3, 4]] + a_str = [[b'1', b'2'], [b'3', b'4']] + modes = ['raise', 'wrap', 'clip'] + indices = [-1, 4] + index_arrays = [np.empty(0, dtype=np.intp), + np.empty((), dtype=np.intp), + np.empty((1, 1), dtype=np.intp)] + real_indices = {'raise': {-1: 1, 4: IndexError}, + 'wrap': {-1: 1, 4: 0}, + 'clip': {-1: 0, 4: 1}} + # Currently all types but object, use the same function generation. + # So it should not be necessary to test all. However test also a non + # refcounted struct on top of object, which has a size that hits the + # default (non-specialized) path. + types = int, object, np.dtype([('', 'i2', 3)]) + for t in types: + # ta works, even if the array may be odd if buffer interface is used + ta = np.array(a if np.issubdtype(t, np.number) else a_str, dtype=t) + tresult = list(ta.T.copy()) + for index_array in index_arrays: + if index_array.size != 0: + tresult[0].shape = (2,) + index_array.shape + tresult[1].shape = (2,) + index_array.shape + for mode in modes: + for index in indices: + real_index = real_indices[mode][index] + if real_index is IndexError and index_array.size != 0: + index_array.put(0, index) + assert_raises(IndexError, ta.take, index_array, + mode=mode, axis=1) + elif index_array.size != 0: + index_array.put(0, index) + res = ta.take(index_array, mode=mode, axis=1) + assert_array_equal(res, tresult[real_index]) + else: + res = ta.take(index_array, mode=mode, axis=1) + assert_(res.shape == (2,) + index_array.shape) + + def test_refcounting(self): + objects = [object() for i in range(10)] + if HAS_REFCOUNT: + orig_rcs = [sys.getrefcount(o) for o in objects] + for mode in ('raise', 'clip', 'wrap'): + a = np.array(objects) + b = np.array([2, 2, 4, 5, 3, 5]) + a.take(b, out=a[:6], mode=mode) + del a + if HAS_REFCOUNT: + assert_(all(sys.getrefcount(o) == rc + 1 + for o, rc in zip(objects, orig_rcs))) + # not contiguous, example: + a = np.array(objects * 2)[::2] + a.take(b, out=a[:6], mode=mode) + del a + if HAS_REFCOUNT: + assert_(all(sys.getrefcount(o) == rc + 1 + for o, rc in zip(objects, orig_rcs))) + + def test_unicode_mode(self): + d = np.arange(10) + k = b'\xc3\xa4'.decode("UTF8") + assert_raises(ValueError, d.take, 5, mode=k) + + def test_empty_partition(self): + # In reference to github issue #6530 + a_original = np.array([0, 2, 4, 6, 8, 10]) + a = a_original.copy() + + # An empty partition should be a successful no-op + a.partition(np.array([], dtype=np.int16)) + + assert_array_equal(a, a_original) + + def test_empty_argpartition(self): + # In reference to github issue #6530 + a = np.array([0, 2, 4, 6, 8, 10]) + a = a.argpartition(np.array([], dtype=np.int16)) + + b = np.array([0, 1, 2, 3, 4, 5]) + assert_array_equal(a, b) + + +class TestPutMask: + @pytest.mark.parametrize("dtype", list(np.typecodes["All"]) + ["i,O"]) + def test_simple(self, dtype): + if dtype.lower() == "m": + dtype += "8[ns]" + + # putmask is weird and doesn't care about value length (even shorter) + vals = np.arange(1001).astype(dtype=dtype) + + mask = np.random.randint(2, size=1000).astype(bool) + # Use vals.dtype in case of flexible dtype (i.e. string) + arr = np.zeros(1000, dtype=vals.dtype) + zeros = arr.copy() + + np.putmask(arr, mask, vals) + assert_array_equal(arr[mask], vals[:len(mask)][mask]) + assert_array_equal(arr[~mask], zeros[~mask]) + + @pytest.mark.parametrize("dtype", list(np.typecodes["All"])[1:] + ["i,O"]) + @pytest.mark.parametrize("mode", ["raise", "wrap", "clip"]) + def test_empty(self, dtype, mode): + arr = np.zeros(1000, dtype=dtype) + arr_copy = arr.copy() + mask = np.random.randint(2, size=1000).astype(bool) + + # Allowing empty values like this is weird... + np.put(arr, mask, []) + assert_array_equal(arr, arr_copy) + + +class TestPut: + @pytest.mark.parametrize("dtype", list(np.typecodes["All"])[1:] + ["i,O"]) + @pytest.mark.parametrize("mode", ["raise", "wrap", "clip"]) + def test_simple(self, dtype, mode): + if dtype.lower() == "m": + dtype += "8[ns]" + + # put is weird and doesn't care about value length (even shorter) + vals = np.arange(1001).astype(dtype=dtype) + + # Use vals.dtype in case of flexible dtype (i.e. string) + arr = np.zeros(1000, dtype=vals.dtype) + zeros = arr.copy() + + if mode == "clip": + # Special because 0 and -1 value are "reserved" for clip test + indx = np.random.permutation(len(arr) - 2)[:-500] + 1 + + indx[-1] = 0 + indx[-2] = len(arr) - 1 + indx_put = indx.copy() + indx_put[-1] = -1389 + indx_put[-2] = 1321 + else: + # Avoid duplicates (for simplicity) and fill half only + indx = np.random.permutation(len(arr) - 3)[:-500] + indx_put = indx + if mode == "wrap": + indx_put = indx_put + len(arr) + + np.put(arr, indx_put, vals, mode=mode) + assert_array_equal(arr[indx], vals[:len(indx)]) + untouched = np.ones(len(arr), dtype=bool) + untouched[indx] = False + assert_array_equal(arr[untouched], zeros[:untouched.sum()]) + + @pytest.mark.parametrize("dtype", list(np.typecodes["All"])[1:] + ["i,O"]) + @pytest.mark.parametrize("mode", ["raise", "wrap", "clip"]) + def test_empty(self, dtype, mode): + arr = np.zeros(1000, dtype=dtype) + arr_copy = arr.copy() + + # Allowing empty values like this is weird... + np.put(arr, [1, 2, 3], []) + assert_array_equal(arr, arr_copy) diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_limited_api.py b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_limited_api.py new file mode 100644 index 0000000..984210e --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_limited_api.py @@ -0,0 +1,102 @@ +import os +import subprocess +import sys +import sysconfig + +import pytest + +from numpy.testing import IS_EDITABLE, IS_PYPY, IS_WASM, NOGIL_BUILD + +# This import is copied from random.tests.test_extending +try: + import cython + from Cython.Compiler.Version import version as cython_version +except ImportError: + cython = None +else: + from numpy._utils import _pep440 + + # Note: keep in sync with the one in pyproject.toml + required_version = "3.0.6" + if _pep440.parse(cython_version) < _pep440.Version(required_version): + # too old or wrong cython, skip the test + cython = None + +pytestmark = pytest.mark.skipif(cython is None, reason="requires cython") + + +if IS_EDITABLE: + pytest.skip( + "Editable install doesn't support tests with a compile step", + allow_module_level=True + ) + + +@pytest.fixture(scope='module') +def install_temp(tmpdir_factory): + # Based in part on test_cython from random.tests.test_extending + if IS_WASM: + pytest.skip("No subprocess") + + srcdir = os.path.join(os.path.dirname(__file__), 'examples', 'limited_api') + build_dir = tmpdir_factory.mktemp("limited_api") / "build" + os.makedirs(build_dir, exist_ok=True) + # Ensure we use the correct Python interpreter even when `meson` is + # installed in a different Python environment (see gh-24956) + native_file = str(build_dir / 'interpreter-native-file.ini') + with open(native_file, 'w') as f: + f.write("[binaries]\n") + f.write(f"python = '{sys.executable}'\n") + f.write(f"python3 = '{sys.executable}'") + + try: + subprocess.check_call(["meson", "--version"]) + except FileNotFoundError: + pytest.skip("No usable 'meson' found") + if sysconfig.get_platform() == "win-arm64": + pytest.skip("Meson unable to find MSVC linker on win-arm64") + if sys.platform == "win32": + subprocess.check_call(["meson", "setup", + "--werror", + "--buildtype=release", + "--vsenv", "--native-file", native_file, + str(srcdir)], + cwd=build_dir, + ) + else: + subprocess.check_call(["meson", "setup", "--werror", + "--native-file", native_file, str(srcdir)], + cwd=build_dir + ) + try: + subprocess.check_call( + ["meson", "compile", "-vv"], cwd=build_dir) + except subprocess.CalledProcessError as p: + print(f"{p.stdout=}") + print(f"{p.stderr=}") + raise + + sys.path.append(str(build_dir)) + + +@pytest.mark.skipif(IS_WASM, reason="Can't start subprocess") +@pytest.mark.xfail( + sysconfig.get_config_var("Py_DEBUG"), + reason=( + "Py_LIMITED_API is incompatible with Py_DEBUG, Py_TRACE_REFS, " + "and Py_REF_DEBUG" + ), +) +@pytest.mark.xfail( + NOGIL_BUILD, + reason="Py_GIL_DISABLED builds do not currently support the limited API", +) +@pytest.mark.skipif(IS_PYPY, reason="no support for limited API in PyPy") +def test_limited_api(install_temp): + """Test building a third-party C extension with the limited API + and building a cython extension with the limited API + """ + + import limited_api1 # Earliest (3.6) # noqa: F401 + import limited_api2 # cython # noqa: F401 + import limited_api_latest # Latest version (current Python) # noqa: F401 diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_longdouble.py b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_longdouble.py new file mode 100644 index 0000000..f7edd97 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_longdouble.py @@ -0,0 +1,369 @@ +import platform +import warnings + +import pytest + +import numpy as np +from numpy._core.tests._locales import CommaDecimalPointLocale +from numpy.testing import ( + IS_MUSL, + assert_, + assert_array_equal, + assert_equal, + assert_raises, + temppath, +) + +LD_INFO = np.finfo(np.longdouble) +longdouble_longer_than_double = (LD_INFO.eps < np.finfo(np.double).eps) + + +_o = 1 + LD_INFO.eps +string_to_longdouble_inaccurate = (_o != np.longdouble(str(_o))) +del _o + + +def test_scalar_extraction(): + """Confirm that extracting a value doesn't convert to python float""" + o = 1 + LD_INFO.eps + a = np.array([o, o, o]) + assert_equal(a[1], o) + + +# Conversions string -> long double + +# 0.1 not exactly representable in base 2 floating point. +repr_precision = len(repr(np.longdouble(0.1))) +# +2 from macro block starting around line 842 in scalartypes.c.src. + + +@pytest.mark.skipif(IS_MUSL, + reason="test flaky on musllinux") +@pytest.mark.skipif(LD_INFO.precision + 2 >= repr_precision, + reason="repr precision not enough to show eps") +def test_str_roundtrip(): + # We will only see eps in repr if within printing precision. + o = 1 + LD_INFO.eps + assert_equal(np.longdouble(str(o)), o, f"str was {str(o)}") + + +@pytest.mark.skipif(string_to_longdouble_inaccurate, reason="Need strtold_l") +def test_str_roundtrip_bytes(): + o = 1 + LD_INFO.eps + assert_equal(np.longdouble(str(o).encode("ascii")), o) + + +@pytest.mark.skipif(string_to_longdouble_inaccurate, reason="Need strtold_l") +@pytest.mark.parametrize("strtype", (np.str_, np.bytes_, str, bytes)) +def test_array_and_stringlike_roundtrip(strtype): + """ + Test that string representations of long-double roundtrip both + for array casting and scalar coercion, see also gh-15608. + """ + o = 1 + LD_INFO.eps + + if strtype in (np.bytes_, bytes): + o_str = strtype(str(o).encode("ascii")) + else: + o_str = strtype(str(o)) + + # Test that `o` is correctly coerced from the string-like + assert o == np.longdouble(o_str) + + # Test that arrays also roundtrip correctly: + o_strarr = np.asarray([o] * 3, dtype=strtype) + assert (o == o_strarr.astype(np.longdouble)).all() + + # And array coercion and casting to string give the same as scalar repr: + assert (o_strarr == o_str).all() + assert (np.asarray([o] * 3).astype(strtype) == o_str).all() + + +def test_bogus_string(): + assert_raises(ValueError, np.longdouble, "spam") + assert_raises(ValueError, np.longdouble, "1.0 flub") + + +@pytest.mark.skipif(string_to_longdouble_inaccurate, reason="Need strtold_l") +def test_fromstring(): + o = 1 + LD_INFO.eps + s = (" " + str(o)) * 5 + a = np.array([o] * 5) + assert_equal(np.fromstring(s, sep=" ", dtype=np.longdouble), a, + err_msg=f"reading '{s}'") + + +def test_fromstring_complex(): + for ctype in ["complex", "cdouble"]: + # Check spacing between separator + assert_equal(np.fromstring("1, 2 , 3 ,4", sep=",", dtype=ctype), + np.array([1., 2., 3., 4.])) + # Real component not specified + assert_equal(np.fromstring("1j, -2j, 3j, 4e1j", sep=",", dtype=ctype), + np.array([1.j, -2.j, 3.j, 40.j])) + # Both components specified + assert_equal(np.fromstring("1+1j,2-2j, -3+3j, -4e1+4j", sep=",", dtype=ctype), + np.array([1. + 1.j, 2. - 2.j, - 3. + 3.j, - 40. + 4j])) + # Spaces at wrong places + with assert_raises(ValueError): + np.fromstring("1+2 j,3", dtype=ctype, sep=",") + with assert_raises(ValueError): + np.fromstring("1+ 2j,3", dtype=ctype, sep=",") + with assert_raises(ValueError): + np.fromstring("1 +2j,3", dtype=ctype, sep=",") + with assert_raises(ValueError): + np.fromstring("1+j", dtype=ctype, sep=",") + with assert_raises(ValueError): + np.fromstring("1+", dtype=ctype, sep=",") + with assert_raises(ValueError): + np.fromstring("1j+1", dtype=ctype, sep=",") + + +def test_fromstring_bogus(): + with assert_raises(ValueError): + np.fromstring("1. 2. 3. flop 4.", dtype=float, sep=" ") + + +def test_fromstring_empty(): + with assert_raises(ValueError): + np.fromstring("xxxxx", sep="x") + + +def test_fromstring_missing(): + with assert_raises(ValueError): + np.fromstring("1xx3x4x5x6", sep="x") + + +class TestFileBased: + + ldbl = 1 + LD_INFO.eps + tgt = np.array([ldbl] * 5) + out = ''.join([str(t) + '\n' for t in tgt]) + + def test_fromfile_bogus(self): + with temppath() as path: + with open(path, 'w') as f: + f.write("1. 2. 3. flop 4.\n") + + with assert_raises(ValueError): + np.fromfile(path, dtype=float, sep=" ") + + def test_fromfile_complex(self): + for ctype in ["complex", "cdouble"]: + # Check spacing between separator and only real component specified + with temppath() as path: + with open(path, 'w') as f: + f.write("1, 2 , 3 ,4\n") + + res = np.fromfile(path, dtype=ctype, sep=",") + assert_equal(res, np.array([1., 2., 3., 4.])) + + # Real component not specified + with temppath() as path: + with open(path, 'w') as f: + f.write("1j, -2j, 3j, 4e1j\n") + + res = np.fromfile(path, dtype=ctype, sep=",") + assert_equal(res, np.array([1.j, -2.j, 3.j, 40.j])) + + # Both components specified + with temppath() as path: + with open(path, 'w') as f: + f.write("1+1j,2-2j, -3+3j, -4e1+4j\n") + + res = np.fromfile(path, dtype=ctype, sep=",") + assert_equal(res, np.array([1. + 1.j, 2. - 2.j, - 3. + 3.j, - 40. + 4j])) + + # Spaces at wrong places + with temppath() as path: + with open(path, 'w') as f: + f.write("1+2 j,3\n") + + with assert_raises(ValueError): + np.fromfile(path, dtype=ctype, sep=",") + + # Spaces at wrong places + with temppath() as path: + with open(path, 'w') as f: + f.write("1+ 2j,3\n") + + with assert_raises(ValueError): + np.fromfile(path, dtype=ctype, sep=",") + + # Spaces at wrong places + with temppath() as path: + with open(path, 'w') as f: + f.write("1 +2j,3\n") + + with assert_raises(ValueError): + np.fromfile(path, dtype=ctype, sep=",") + + # Wrong sep + with temppath() as path: + with open(path, 'w') as f: + f.write("1+j\n") + + with assert_raises(ValueError): + np.fromfile(path, dtype=ctype, sep=",") + + # Wrong sep + with temppath() as path: + with open(path, 'w') as f: + f.write("1+\n") + + with assert_raises(ValueError): + np.fromfile(path, dtype=ctype, sep=",") + + # Wrong sep + with temppath() as path: + with open(path, 'w') as f: + f.write("1j+1\n") + + with assert_raises(ValueError): + np.fromfile(path, dtype=ctype, sep=",") + + @pytest.mark.skipif(string_to_longdouble_inaccurate, + reason="Need strtold_l") + def test_fromfile(self): + with temppath() as path: + with open(path, 'w') as f: + f.write(self.out) + res = np.fromfile(path, dtype=np.longdouble, sep="\n") + assert_equal(res, self.tgt) + + @pytest.mark.skipif(string_to_longdouble_inaccurate, + reason="Need strtold_l") + def test_genfromtxt(self): + with temppath() as path: + with open(path, 'w') as f: + f.write(self.out) + res = np.genfromtxt(path, dtype=np.longdouble) + assert_equal(res, self.tgt) + + @pytest.mark.skipif(string_to_longdouble_inaccurate, + reason="Need strtold_l") + def test_loadtxt(self): + with temppath() as path: + with open(path, 'w') as f: + f.write(self.out) + res = np.loadtxt(path, dtype=np.longdouble) + assert_equal(res, self.tgt) + + @pytest.mark.skipif(string_to_longdouble_inaccurate, + reason="Need strtold_l") + def test_tofile_roundtrip(self): + with temppath() as path: + self.tgt.tofile(path, sep=" ") + res = np.fromfile(path, dtype=np.longdouble, sep=" ") + assert_equal(res, self.tgt) + + +# Conversions long double -> string + + +def test_str_exact(): + o = 1 + LD_INFO.eps + assert_(str(o) != '1') + + +@pytest.mark.skipif(longdouble_longer_than_double, reason="BUG #2376") +@pytest.mark.skipif(string_to_longdouble_inaccurate, + reason="Need strtold_l") +def test_format(): + assert_(f"{1 + LD_INFO.eps:.40g}" != '1') + + +@pytest.mark.skipif(longdouble_longer_than_double, reason="BUG #2376") +@pytest.mark.skipif(string_to_longdouble_inaccurate, + reason="Need strtold_l") +def test_percent(): + o = 1 + LD_INFO.eps + assert_(f"{o:.40g}" != '1') + + +@pytest.mark.skipif(longdouble_longer_than_double, + reason="array repr problem") +@pytest.mark.skipif(string_to_longdouble_inaccurate, + reason="Need strtold_l") +def test_array_repr(): + o = 1 + LD_INFO.eps + a = np.array([o]) + b = np.array([1], dtype=np.longdouble) + if not np.all(a != b): + raise ValueError("precision loss creating arrays") + assert_(repr(a) != repr(b)) + +# +# Locale tests: scalar types formatting should be independent of the locale +# + +class TestCommaDecimalPointLocale(CommaDecimalPointLocale): + + def test_str_roundtrip_foreign(self): + o = 1.5 + assert_equal(o, np.longdouble(str(o))) + + def test_fromstring_foreign_repr(self): + f = 1.234 + a = np.fromstring(repr(f), dtype=float, sep=" ") + assert_equal(a[0], f) + + def test_fromstring_foreign(self): + s = "1.234" + a = np.fromstring(s, dtype=np.longdouble, sep=" ") + assert_equal(a[0], np.longdouble(s)) + + def test_fromstring_foreign_sep(self): + a = np.array([1, 2, 3, 4]) + b = np.fromstring("1,2,3,4,", dtype=np.longdouble, sep=",") + assert_array_equal(a, b) + + def test_fromstring_foreign_value(self): + with assert_raises(ValueError): + np.fromstring("1,234", dtype=np.longdouble, sep=" ") + + +@pytest.mark.parametrize("int_val", [ + # cases discussed in gh-10723 + # and gh-9968 + 2 ** 1024, 0]) +def test_longdouble_from_int(int_val): + # for issue gh-9968 + str_val = str(int_val) + # we'll expect a RuntimeWarning on platforms + # with np.longdouble equivalent to np.double + # for large integer input + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', RuntimeWarning) + # can be inf==inf on some platforms + assert np.longdouble(int_val) == np.longdouble(str_val) + # we can't directly compare the int and + # max longdouble value on all platforms + if np.allclose(np.finfo(np.longdouble).max, + np.finfo(np.double).max) and w: + assert w[0].category is RuntimeWarning + +@pytest.mark.parametrize("bool_val", [ + True, False]) +def test_longdouble_from_bool(bool_val): + assert np.longdouble(bool_val) == np.longdouble(int(bool_val)) + + +@pytest.mark.skipif( + not (IS_MUSL and platform.machine() == "x86_64"), + reason="only need to run on musllinux_x86_64" +) +def test_musllinux_x86_64_signature(): + # this test may fail if you're emulating musllinux_x86_64 on a different + # architecture, but should pass natively. + known_sigs = [b'\xcd\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xfb\xbf'] + sig = (np.longdouble(-1.0) / np.longdouble(10.0)) + sig = sig.view(sig.dtype.newbyteorder('<')).tobytes()[:10] + assert sig in known_sigs + + +def test_eps_positive(): + # np.finfo('g').eps should be positive on all platforms. If this isn't true + # then something may have gone wrong with the MachArLike, e.g. if + # np._core.getlimits._discovered_machar didn't work properly + assert np.finfo(np.longdouble).eps > 0. diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_machar.py b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_machar.py new file mode 100644 index 0000000..2d772dd --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_machar.py @@ -0,0 +1,30 @@ +""" +Test machar. Given recent changes to hardcode type data, we might want to get +rid of both MachAr and this test at some point. + +""" +import numpy._core.numerictypes as ntypes +from numpy import array, errstate +from numpy._core._machar import MachAr + + +class TestMachAr: + def _run_machar_highprec(self): + # Instantiate MachAr instance with high enough precision to cause + # underflow + try: + hiprec = ntypes.float96 + MachAr(lambda v: array(v, hiprec)) + except AttributeError: + # Fixme, this needs to raise a 'skip' exception. + "Skipping test: no ntypes.float96 available on this platform." + + def test_underlow(self): + # Regression test for #759: + # instantiating MachAr for dtype = np.float96 raises spurious warning. + with errstate(all='raise'): + try: + self._run_machar_highprec() + except FloatingPointError as e: + msg = f"Caught {e} exception, should not have been raised." + raise AssertionError(msg) diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_mem_overlap.py b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_mem_overlap.py new file mode 100644 index 0000000..d173567 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_mem_overlap.py @@ -0,0 +1,930 @@ +import itertools + +import pytest +from numpy._core._multiarray_tests import internal_overlap, solve_diophantine + +import numpy as np +from numpy._core import _umath_tests +from numpy.lib.stride_tricks import as_strided +from numpy.testing import assert_, assert_array_equal, assert_equal, assert_raises + +ndims = 2 +size = 10 +shape = tuple([size] * ndims) + +MAY_SHARE_BOUNDS = 0 +MAY_SHARE_EXACT = -1 + + +def _indices_for_nelems(nelems): + """Returns slices of length nelems, from start onwards, in direction sign.""" + + if nelems == 0: + return [size // 2] # int index + + res = [] + for step in (1, 2): + for sign in (-1, 1): + start = size // 2 - nelems * step * sign // 2 + stop = start + nelems * step * sign + res.append(slice(start, stop, step * sign)) + + return res + + +def _indices_for_axis(): + """Returns (src, dst) pairs of indices.""" + + res = [] + for nelems in (0, 2, 3): + ind = _indices_for_nelems(nelems) + res.extend(itertools.product(ind, ind)) # all assignments of size "nelems" + + return res + + +def _indices(ndims): + """Returns ((axis0_src, axis0_dst), (axis1_src, axis1_dst), ... ) index pairs.""" + + ind = _indices_for_axis() + return itertools.product(ind, repeat=ndims) + + +def _check_assignment(srcidx, dstidx): + """Check assignment arr[dstidx] = arr[srcidx] works.""" + + arr = np.arange(np.prod(shape)).reshape(shape) + + cpy = arr.copy() + + cpy[dstidx] = arr[srcidx] + arr[dstidx] = arr[srcidx] + + assert_(np.all(arr == cpy), + f'assigning arr[{dstidx}] = arr[{srcidx}]') + + +def test_overlapping_assignments(): + # Test automatically generated assignments which overlap in memory. + + inds = _indices(ndims) + + for ind in inds: + srcidx = tuple(a[0] for a in ind) + dstidx = tuple(a[1] for a in ind) + + _check_assignment(srcidx, dstidx) + + +@pytest.mark.slow +def test_diophantine_fuzz(): + # Fuzz test the diophantine solver + rng = np.random.RandomState(1234) + + max_int = np.iinfo(np.intp).max + + for ndim in range(10): + feasible_count = 0 + infeasible_count = 0 + + min_count = 500 // (ndim + 1) + + while min(feasible_count, infeasible_count) < min_count: + # Ensure big and small integer problems + A_max = 1 + rng.randint(0, 11, dtype=np.intp)**6 + U_max = rng.randint(0, 11, dtype=np.intp)**6 + + A_max = min(max_int, A_max) + U_max = min(max_int - 1, U_max) + + A = tuple(int(rng.randint(1, A_max + 1, dtype=np.intp)) + for j in range(ndim)) + U = tuple(int(rng.randint(0, U_max + 2, dtype=np.intp)) + for j in range(ndim)) + + b_ub = min(max_int - 2, sum(a * ub for a, ub in zip(A, U))) + b = int(rng.randint(-1, b_ub + 2, dtype=np.intp)) + + if ndim == 0 and feasible_count < min_count: + b = 0 + + X = solve_diophantine(A, U, b) + + if X is None: + # Check the simplified decision problem agrees + X_simplified = solve_diophantine(A, U, b, simplify=1) + assert_(X_simplified is None, (A, U, b, X_simplified)) + + # Check no solution exists (provided the problem is + # small enough so that brute force checking doesn't + # take too long) + ranges = tuple(range(0, a * ub + 1, a) for a, ub in zip(A, U)) + + size = 1 + for r in ranges: + size *= len(r) + if size < 100000: + assert_(not any(sum(w) == b for w in itertools.product(*ranges))) + infeasible_count += 1 + else: + # Check the simplified decision problem agrees + X_simplified = solve_diophantine(A, U, b, simplify=1) + assert_(X_simplified is not None, (A, U, b, X_simplified)) + + # Check validity + assert_(sum(a * x for a, x in zip(A, X)) == b) + assert_(all(0 <= x <= ub for x, ub in zip(X, U))) + feasible_count += 1 + + +def test_diophantine_overflow(): + # Smoke test integer overflow detection + max_intp = np.iinfo(np.intp).max + max_int64 = np.iinfo(np.int64).max + + if max_int64 <= max_intp: + # Check that the algorithm works internally in 128-bit; + # solving this problem requires large intermediate numbers + A = (max_int64 // 2, max_int64 // 2 - 10) + U = (max_int64 // 2, max_int64 // 2 - 10) + b = 2 * (max_int64 // 2) - 10 + + assert_equal(solve_diophantine(A, U, b), (1, 1)) + + +def check_may_share_memory_exact(a, b): + got = np.may_share_memory(a, b, max_work=MAY_SHARE_EXACT) + + assert_equal(np.may_share_memory(a, b), + np.may_share_memory(a, b, max_work=MAY_SHARE_BOUNDS)) + + a.fill(0) + b.fill(0) + a.fill(1) + exact = b.any() + + err_msg = "" + if got != exact: + err_msg = " " + "\n ".join([ + f"base_a - base_b = {a.__array_interface__['data'][0] - b.__array_interface__['data'][0]!r}", + f"shape_a = {a.shape!r}", + f"shape_b = {b.shape!r}", + f"strides_a = {a.strides!r}", + f"strides_b = {b.strides!r}", + f"size_a = {a.size!r}", + f"size_b = {b.size!r}" + ]) + + assert_equal(got, exact, err_msg=err_msg) + + +def test_may_share_memory_manual(): + # Manual test cases for may_share_memory + + # Base arrays + xs0 = [ + np.zeros([13, 21, 23, 22], dtype=np.int8), + np.zeros([13, 21, 23 * 2, 22], dtype=np.int8)[:, :, ::2, :] + ] + + # Generate all negative stride combinations + xs = [] + for x in xs0: + for ss in itertools.product(*(([slice(None), slice(None, None, -1)],) * 4)): + xp = x[ss] + xs.append(xp) + + for x in xs: + # The default is a simple extent check + assert_(np.may_share_memory(x[:, 0, :], x[:, 1, :])) + assert_(np.may_share_memory(x[:, 0, :], x[:, 1, :], max_work=None)) + + # Exact checks + check_may_share_memory_exact(x[:, 0, :], x[:, 1, :]) + check_may_share_memory_exact(x[:, ::7], x[:, 3::3]) + + try: + xp = x.ravel() + if xp.flags.owndata: + continue + xp = xp.view(np.int16) + except ValueError: + continue + + # 0-size arrays cannot overlap + check_may_share_memory_exact(x.ravel()[6:6], + xp.reshape(13, 21, 23, 11)[:, ::7]) + + # Test itemsize is dealt with + check_may_share_memory_exact(x[:, ::7], + xp.reshape(13, 21, 23, 11)) + check_may_share_memory_exact(x[:, ::7], + xp.reshape(13, 21, 23, 11)[:, 3::3]) + check_may_share_memory_exact(x.ravel()[6:7], + xp.reshape(13, 21, 23, 11)[:, ::7]) + + # Check unit size + x = np.zeros([1], dtype=np.int8) + check_may_share_memory_exact(x, x) + check_may_share_memory_exact(x, x.copy()) + + +def iter_random_view_pairs(x, same_steps=True, equal_size=False): + rng = np.random.RandomState(1234) + + if equal_size and same_steps: + raise ValueError + + def random_slice(n, step): + start = rng.randint(0, n + 1, dtype=np.intp) + stop = rng.randint(start, n + 1, dtype=np.intp) + if rng.randint(0, 2, dtype=np.intp) == 0: + stop, start = start, stop + step *= -1 + return slice(start, stop, step) + + def random_slice_fixed_size(n, step, size): + start = rng.randint(0, n + 1 - size * step) + stop = start + (size - 1) * step + 1 + if rng.randint(0, 2) == 0: + stop, start = start - 1, stop - 1 + if stop < 0: + stop = None + step *= -1 + return slice(start, stop, step) + + # First a few regular views + yield x, x + for j in range(1, 7, 3): + yield x[j:], x[:-j] + yield x[..., j:], x[..., :-j] + + # An array with zero stride internal overlap + strides = list(x.strides) + strides[0] = 0 + xp = as_strided(x, shape=x.shape, strides=strides) + yield x, xp + yield xp, xp + + # An array with non-zero stride internal overlap + strides = list(x.strides) + if strides[0] > 1: + strides[0] = 1 + xp = as_strided(x, shape=x.shape, strides=strides) + yield x, xp + yield xp, xp + + # Then discontiguous views + while True: + steps = tuple(rng.randint(1, 11, dtype=np.intp) + if rng.randint(0, 5, dtype=np.intp) == 0 else 1 + for j in range(x.ndim)) + s1 = tuple(random_slice(p, s) for p, s in zip(x.shape, steps)) + + t1 = np.arange(x.ndim) + rng.shuffle(t1) + + if equal_size: + t2 = t1 + else: + t2 = np.arange(x.ndim) + rng.shuffle(t2) + + a = x[s1] + + if equal_size: + if a.size == 0: + continue + + steps2 = tuple(rng.randint(1, max(2, p // (1 + pa))) + if rng.randint(0, 5) == 0 else 1 + for p, s, pa in zip(x.shape, s1, a.shape)) + s2 = tuple(random_slice_fixed_size(p, s, pa) + for p, s, pa in zip(x.shape, steps2, a.shape)) + elif same_steps: + steps2 = steps + else: + steps2 = tuple(rng.randint(1, 11, dtype=np.intp) + if rng.randint(0, 5, dtype=np.intp) == 0 else 1 + for j in range(x.ndim)) + + if not equal_size: + s2 = tuple(random_slice(p, s) for p, s in zip(x.shape, steps2)) + + a = a.transpose(t1) + b = x[s2].transpose(t2) + + yield a, b + + +def check_may_share_memory_easy_fuzz(get_max_work, same_steps, min_count): + # Check that overlap problems with common strides are solved with + # little work. + x = np.zeros([17, 34, 71, 97], dtype=np.int16) + + feasible = 0 + infeasible = 0 + + pair_iter = iter_random_view_pairs(x, same_steps) + + while min(feasible, infeasible) < min_count: + a, b = next(pair_iter) + + bounds_overlap = np.may_share_memory(a, b) + may_share_answer = np.may_share_memory(a, b) + easy_answer = np.may_share_memory(a, b, max_work=get_max_work(a, b)) + exact_answer = np.may_share_memory(a, b, max_work=MAY_SHARE_EXACT) + + if easy_answer != exact_answer: + # assert_equal is slow... + assert_equal(easy_answer, exact_answer) + + if may_share_answer != bounds_overlap: + assert_equal(may_share_answer, bounds_overlap) + + if bounds_overlap: + if exact_answer: + feasible += 1 + else: + infeasible += 1 + + +@pytest.mark.slow +def test_may_share_memory_easy_fuzz(): + # Check that overlap problems with common strides are always + # solved with little work. + + check_may_share_memory_easy_fuzz(get_max_work=lambda a, b: 1, + same_steps=True, + min_count=2000) + + +@pytest.mark.slow +def test_may_share_memory_harder_fuzz(): + # Overlap problems with not necessarily common strides take more + # work. + # + # The work bound below can't be reduced much. Harder problems can + # also exist but not be detected here, as the set of problems + # comes from RNG. + + check_may_share_memory_easy_fuzz(get_max_work=lambda a, b: max(a.size, b.size) // 2, + same_steps=False, + min_count=2000) + + +def test_shares_memory_api(): + x = np.zeros([4, 5, 6], dtype=np.int8) + + assert_equal(np.shares_memory(x, x), True) + assert_equal(np.shares_memory(x, x.copy()), False) + + a = x[:, ::2, ::3] + b = x[:, ::3, ::2] + assert_equal(np.shares_memory(a, b), True) + assert_equal(np.shares_memory(a, b, max_work=None), True) + assert_raises( + np.exceptions.TooHardError, np.shares_memory, a, b, max_work=1 + ) + + +def test_may_share_memory_bad_max_work(): + x = np.zeros([1]) + assert_raises(OverflowError, np.may_share_memory, x, x, max_work=10**100) + assert_raises(OverflowError, np.shares_memory, x, x, max_work=10**100) + + +def test_internal_overlap_diophantine(): + def check(A, U, exists=None): + X = solve_diophantine(A, U, 0, require_ub_nontrivial=1) + + if exists is None: + exists = (X is not None) + + if X is not None: + assert_(sum(a * x for a, x in zip(A, X)) == sum(a * u // 2 for a, u in zip(A, U))) + assert_(all(0 <= x <= u for x, u in zip(X, U))) + assert_(any(x != u // 2 for x, u in zip(X, U))) + + if exists: + assert_(X is not None, repr(X)) + else: + assert_(X is None, repr(X)) + + # Smoke tests + check((3, 2), (2 * 2, 3 * 2), exists=True) + check((3 * 2, 2), (15 * 2, (3 - 1) * 2), exists=False) + + +def test_internal_overlap_slices(): + # Slicing an array never generates internal overlap + + x = np.zeros([17, 34, 71, 97], dtype=np.int16) + + rng = np.random.RandomState(1234) + + def random_slice(n, step): + start = rng.randint(0, n + 1, dtype=np.intp) + stop = rng.randint(start, n + 1, dtype=np.intp) + if rng.randint(0, 2, dtype=np.intp) == 0: + stop, start = start, stop + step *= -1 + return slice(start, stop, step) + + cases = 0 + min_count = 5000 + + while cases < min_count: + steps = tuple(rng.randint(1, 11, dtype=np.intp) + if rng.randint(0, 5, dtype=np.intp) == 0 else 1 + for j in range(x.ndim)) + t1 = np.arange(x.ndim) + rng.shuffle(t1) + s1 = tuple(random_slice(p, s) for p, s in zip(x.shape, steps)) + a = x[s1].transpose(t1) + + assert_(not internal_overlap(a)) + cases += 1 + + +def check_internal_overlap(a, manual_expected=None): + got = internal_overlap(a) + + # Brute-force check + m = set() + ranges = tuple(range(n) for n in a.shape) + for v in itertools.product(*ranges): + offset = sum(s * w for s, w in zip(a.strides, v)) + if offset in m: + expected = True + break + else: + m.add(offset) + else: + expected = False + + # Compare + if got != expected: + assert_equal(got, expected, err_msg=repr((a.strides, a.shape))) + if manual_expected is not None and expected != manual_expected: + assert_equal(expected, manual_expected) + return got + + +def test_internal_overlap_manual(): + # Stride tricks can construct arrays with internal overlap + + # We don't care about memory bounds, the array is not + # read/write accessed + x = np.arange(1).astype(np.int8) + + # Check low-dimensional special cases + + check_internal_overlap(x, False) # 1-dim + check_internal_overlap(x.reshape([]), False) # 0-dim + + a = as_strided(x, strides=(3, 4), shape=(4, 4)) + check_internal_overlap(a, False) + + a = as_strided(x, strides=(3, 4), shape=(5, 4)) + check_internal_overlap(a, True) + + a = as_strided(x, strides=(0,), shape=(0,)) + check_internal_overlap(a, False) + + a = as_strided(x, strides=(0,), shape=(1,)) + check_internal_overlap(a, False) + + a = as_strided(x, strides=(0,), shape=(2,)) + check_internal_overlap(a, True) + + a = as_strided(x, strides=(0, -9993), shape=(87, 22)) + check_internal_overlap(a, True) + + a = as_strided(x, strides=(0, -9993), shape=(1, 22)) + check_internal_overlap(a, False) + + a = as_strided(x, strides=(0, -9993), shape=(0, 22)) + check_internal_overlap(a, False) + + +def test_internal_overlap_fuzz(): + # Fuzz check; the brute-force check is fairly slow + + x = np.arange(1).astype(np.int8) + + overlap = 0 + no_overlap = 0 + min_count = 100 + + rng = np.random.RandomState(1234) + + while min(overlap, no_overlap) < min_count: + ndim = rng.randint(1, 4, dtype=np.intp) + + strides = tuple(rng.randint(-1000, 1000, dtype=np.intp) + for j in range(ndim)) + shape = tuple(rng.randint(1, 30, dtype=np.intp) + for j in range(ndim)) + + a = as_strided(x, strides=strides, shape=shape) + result = check_internal_overlap(a) + + if result: + overlap += 1 + else: + no_overlap += 1 + + +def test_non_ndarray_inputs(): + # Regression check for gh-5604 + + class MyArray: + def __init__(self, data): + self.data = data + + @property + def __array_interface__(self): + return self.data.__array_interface__ + + class MyArray2: + def __init__(self, data): + self.data = data + + def __array__(self, dtype=None, copy=None): + return self.data + + for cls in [MyArray, MyArray2]: + x = np.arange(5) + + assert_(np.may_share_memory(cls(x[::2]), x[1::2])) + assert_(not np.shares_memory(cls(x[::2]), x[1::2])) + + assert_(np.shares_memory(cls(x[1::3]), x[::2])) + assert_(np.may_share_memory(cls(x[1::3]), x[::2])) + + +def view_element_first_byte(x): + """Construct an array viewing the first byte of each element of `x`""" + from numpy.lib._stride_tricks_impl import DummyArray + interface = dict(x.__array_interface__) + interface['typestr'] = '|b1' + interface['descr'] = [('', '|b1')] + return np.asarray(DummyArray(interface, x)) + + +def assert_copy_equivalent(operation, args, out, **kwargs): + """ + Check that operation(*args, out=out) produces results + equivalent to out[...] = operation(*args, out=out.copy()) + """ + + kwargs['out'] = out + kwargs2 = dict(kwargs) + kwargs2['out'] = out.copy() + + out_orig = out.copy() + out[...] = operation(*args, **kwargs2) + expected = out.copy() + out[...] = out_orig + + got = operation(*args, **kwargs).copy() + + if (got != expected).any(): + assert_equal(got, expected) + + +class TestUFunc: + """ + Test ufunc call memory overlap handling + """ + + def check_unary_fuzz(self, operation, get_out_axis_size, dtype=np.int16, + count=5000): + shapes = [7, 13, 8, 21, 29, 32] + + rng = np.random.RandomState(1234) + + for ndim in range(1, 6): + x = rng.randint(0, 2**16, size=shapes[:ndim]).astype(dtype) + + it = iter_random_view_pairs(x, same_steps=False, equal_size=True) + + min_count = count // (ndim + 1)**2 + + overlapping = 0 + while overlapping < min_count: + a, b = next(it) + + a_orig = a.copy() + b_orig = b.copy() + + if get_out_axis_size is None: + assert_copy_equivalent(operation, [a], out=b) + + if np.shares_memory(a, b): + overlapping += 1 + else: + for axis in itertools.chain(range(ndim), [None]): + a[...] = a_orig + b[...] = b_orig + + # Determine size for reduction axis (None if scalar) + outsize, scalarize = get_out_axis_size(a, b, axis) + if outsize == 'skip': + continue + + # Slice b to get an output array of the correct size + sl = [slice(None)] * ndim + if axis is None: + if outsize is None: + sl = [slice(0, 1)] + [0] * (ndim - 1) + else: + sl = [slice(0, outsize)] + [0] * (ndim - 1) + elif outsize is None: + k = b.shape[axis] // 2 + if ndim == 1: + sl[axis] = slice(k, k + 1) + else: + sl[axis] = k + else: + assert b.shape[axis] >= outsize + sl[axis] = slice(0, outsize) + b_out = b[tuple(sl)] + + if scalarize: + b_out = b_out.reshape([]) + + if np.shares_memory(a, b_out): + overlapping += 1 + + # Check result + assert_copy_equivalent(operation, [a], out=b_out, axis=axis) + + @pytest.mark.slow + def test_unary_ufunc_call_fuzz(self): + self.check_unary_fuzz(np.invert, None, np.int16) + + @pytest.mark.slow + def test_unary_ufunc_call_complex_fuzz(self): + # Complex typically has a smaller alignment than itemsize + self.check_unary_fuzz(np.negative, None, np.complex128, count=500) + + def test_binary_ufunc_accumulate_fuzz(self): + def get_out_axis_size(a, b, axis): + if axis is None: + if a.ndim == 1: + return a.size, False + else: + return 'skip', False # accumulate doesn't support this + else: + return a.shape[axis], False + + self.check_unary_fuzz(np.add.accumulate, get_out_axis_size, + dtype=np.int16, count=500) + + def test_binary_ufunc_reduce_fuzz(self): + def get_out_axis_size(a, b, axis): + return None, (axis is None or a.ndim == 1) + + self.check_unary_fuzz(np.add.reduce, get_out_axis_size, + dtype=np.int16, count=500) + + def test_binary_ufunc_reduceat_fuzz(self): + def get_out_axis_size(a, b, axis): + if axis is None: + if a.ndim == 1: + return a.size, False + else: + return 'skip', False # reduceat doesn't support this + else: + return a.shape[axis], False + + def do_reduceat(a, out, axis): + if axis is None: + size = len(a) + step = size // len(out) + else: + size = a.shape[axis] + step = a.shape[axis] // out.shape[axis] + idx = np.arange(0, size, step) + return np.add.reduceat(a, idx, out=out, axis=axis) + + self.check_unary_fuzz(do_reduceat, get_out_axis_size, + dtype=np.int16, count=500) + + def test_binary_ufunc_reduceat_manual(self): + def check(ufunc, a, ind, out): + c1 = ufunc.reduceat(a.copy(), ind.copy(), out=out.copy()) + c2 = ufunc.reduceat(a, ind, out=out) + assert_array_equal(c1, c2) + + # Exactly same input/output arrays + a = np.arange(10000, dtype=np.int16) + check(np.add, a, a[::-1].copy(), a) + + # Overlap with index + a = np.arange(10000, dtype=np.int16) + check(np.add, a, a[::-1], a) + + @pytest.mark.slow + def test_unary_gufunc_fuzz(self): + shapes = [7, 13, 8, 21, 29, 32] + gufunc = _umath_tests.euclidean_pdist + + rng = np.random.RandomState(1234) + + for ndim in range(2, 6): + x = rng.rand(*shapes[:ndim]) + + it = iter_random_view_pairs(x, same_steps=False, equal_size=True) + + min_count = 500 // (ndim + 1)**2 + + overlapping = 0 + while overlapping < min_count: + a, b = next(it) + + if min(a.shape[-2:]) < 2 or min(b.shape[-2:]) < 2 or a.shape[-1] < 2: + continue + + # Ensure the shapes are so that euclidean_pdist is happy + if b.shape[-1] > b.shape[-2]: + b = b[..., 0, :] + else: + b = b[..., :, 0] + + n = a.shape[-2] + p = n * (n - 1) // 2 + if p <= b.shape[-1] and p > 0: + b = b[..., :p] + else: + n = max(2, int(np.sqrt(b.shape[-1])) // 2) + p = n * (n - 1) // 2 + a = a[..., :n, :] + b = b[..., :p] + + # Call + if np.shares_memory(a, b): + overlapping += 1 + + with np.errstate(over='ignore', invalid='ignore'): + assert_copy_equivalent(gufunc, [a], out=b) + + def test_ufunc_at_manual(self): + def check(ufunc, a, ind, b=None): + a0 = a.copy() + if b is None: + ufunc.at(a0, ind.copy()) + c1 = a0.copy() + ufunc.at(a, ind) + c2 = a.copy() + else: + ufunc.at(a0, ind.copy(), b.copy()) + c1 = a0.copy() + ufunc.at(a, ind, b) + c2 = a.copy() + assert_array_equal(c1, c2) + + # Overlap with index + a = np.arange(10000, dtype=np.int16) + check(np.invert, a[::-1], a) + + # Overlap with second data array + a = np.arange(100, dtype=np.int16) + ind = np.arange(0, 100, 2, dtype=np.int16) + check(np.add, a, ind, a[25:75]) + + def test_unary_ufunc_1d_manual(self): + # Exercise ufunc fast-paths (that avoid creation of an `np.nditer`) + + def check(a, b): + a_orig = a.copy() + b_orig = b.copy() + + b0 = b.copy() + c1 = ufunc(a, out=b0) + c2 = ufunc(a, out=b) + assert_array_equal(c1, c2) + + # Trigger "fancy ufunc loop" code path + mask = view_element_first_byte(b).view(np.bool) + + a[...] = a_orig + b[...] = b_orig + c1 = ufunc(a, out=b.copy(), where=mask.copy()).copy() + + a[...] = a_orig + b[...] = b_orig + c2 = ufunc(a, out=b, where=mask.copy()).copy() + + # Also, mask overlapping with output + a[...] = a_orig + b[...] = b_orig + c3 = ufunc(a, out=b, where=mask).copy() + + assert_array_equal(c1, c2) + assert_array_equal(c1, c3) + + dtypes = [np.int8, np.int16, np.int32, np.int64, np.float32, + np.float64, np.complex64, np.complex128] + dtypes = [np.dtype(x) for x in dtypes] + + for dtype in dtypes: + if np.issubdtype(dtype, np.integer): + ufunc = np.invert + else: + ufunc = np.reciprocal + + n = 1000 + k = 10 + indices = [ + np.index_exp[:n], + np.index_exp[k:k + n], + np.index_exp[n - 1::-1], + np.index_exp[k + n - 1:k - 1:-1], + np.index_exp[:2 * n:2], + np.index_exp[k:k + 2 * n:2], + np.index_exp[2 * n - 1::-2], + np.index_exp[k + 2 * n - 1:k - 1:-2], + ] + + for xi, yi in itertools.product(indices, indices): + v = np.arange(1, 1 + n * 2 + k, dtype=dtype) + x = v[xi] + y = v[yi] + + with np.errstate(all='ignore'): + check(x, y) + + # Scalar cases + check(x[:1], y) + check(x[-1:], y) + check(x[:1].reshape([]), y) + check(x[-1:].reshape([]), y) + + def test_unary_ufunc_where_same(self): + # Check behavior at wheremask overlap + ufunc = np.invert + + def check(a, out, mask): + c1 = ufunc(a, out=out.copy(), where=mask.copy()) + c2 = ufunc(a, out=out, where=mask) + assert_array_equal(c1, c2) + + # Check behavior with same input and output arrays + x = np.arange(100).astype(np.bool) + check(x, x, x) + check(x, x.copy(), x) + check(x, x, x.copy()) + + @pytest.mark.slow + def test_binary_ufunc_1d_manual(self): + ufunc = np.add + + def check(a, b, c): + c0 = c.copy() + c1 = ufunc(a, b, out=c0) + c2 = ufunc(a, b, out=c) + assert_array_equal(c1, c2) + + for dtype in [np.int8, np.int16, np.int32, np.int64, + np.float32, np.float64, np.complex64, np.complex128]: + # Check different data dependency orders + + n = 1000 + k = 10 + + indices = [] + for p in [1, 2]: + indices.extend([ + np.index_exp[:p * n:p], + np.index_exp[k:k + p * n:p], + np.index_exp[p * n - 1::-p], + np.index_exp[k + p * n - 1:k - 1:-p], + ]) + + for x, y, z in itertools.product(indices, indices, indices): + v = np.arange(6 * n).astype(dtype) + x = v[x] + y = v[y] + z = v[z] + + check(x, y, z) + + # Scalar cases + check(x[:1], y, z) + check(x[-1:], y, z) + check(x[:1].reshape([]), y, z) + check(x[-1:].reshape([]), y, z) + check(x, y[:1], z) + check(x, y[-1:], z) + check(x, y[:1].reshape([]), z) + check(x, y[-1:].reshape([]), z) + + def test_inplace_op_simple_manual(self): + rng = np.random.RandomState(1234) + x = rng.rand(200, 200) # bigger than bufsize + + x += x.T + assert_array_equal(x - x.T, 0) diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_mem_policy.py b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_mem_policy.py new file mode 100644 index 0000000..b9f971e --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_mem_policy.py @@ -0,0 +1,452 @@ +import asyncio +import gc +import os +import sys +import sysconfig +import threading + +import pytest + +import numpy as np +from numpy._core.multiarray import get_handler_name +from numpy.testing import IS_EDITABLE, IS_WASM, assert_warns, extbuild + + +@pytest.fixture +def get_module(tmp_path): + """ Add a memory policy that returns a false pointer 64 bytes into the + actual allocation, and fill the prefix with some text. Then check at each + memory manipulation that the prefix exists, to make sure all alloc/realloc/ + free/calloc go via the functions here. + """ + if sys.platform.startswith('cygwin'): + pytest.skip('link fails on cygwin') + if IS_WASM: + pytest.skip("Can't build module inside Wasm") + if IS_EDITABLE: + pytest.skip("Can't build module for editable install") + + functions = [ + ("get_default_policy", "METH_NOARGS", """ + Py_INCREF(PyDataMem_DefaultHandler); + return PyDataMem_DefaultHandler; + """), + ("set_secret_data_policy", "METH_NOARGS", """ + PyObject *secret_data = + PyCapsule_New(&secret_data_handler, "mem_handler", NULL); + if (secret_data == NULL) { + return NULL; + } + PyObject *old = PyDataMem_SetHandler(secret_data); + Py_DECREF(secret_data); + return old; + """), + ("set_wrong_capsule_name_data_policy", "METH_NOARGS", """ + PyObject *wrong_name_capsule = + PyCapsule_New(&secret_data_handler, "not_mem_handler", NULL); + if (wrong_name_capsule == NULL) { + return NULL; + } + PyObject *old = PyDataMem_SetHandler(wrong_name_capsule); + Py_DECREF(wrong_name_capsule); + return old; + """), + ("set_old_policy", "METH_O", """ + PyObject *old; + if (args != NULL && PyCapsule_CheckExact(args)) { + old = PyDataMem_SetHandler(args); + } + else { + old = PyDataMem_SetHandler(NULL); + } + return old; + """), + ("get_array", "METH_NOARGS", """ + char *buf = (char *)malloc(20); + npy_intp dims[1]; + dims[0] = 20; + PyArray_Descr *descr = PyArray_DescrNewFromType(NPY_UINT8); + return PyArray_NewFromDescr(&PyArray_Type, descr, 1, dims, NULL, + buf, NPY_ARRAY_WRITEABLE, NULL); + """), + ("set_own", "METH_O", """ + if (!PyArray_Check(args)) { + PyErr_SetString(PyExc_ValueError, + "need an ndarray"); + return NULL; + } + PyArray_ENABLEFLAGS((PyArrayObject*)args, NPY_ARRAY_OWNDATA); + // Maybe try this too? + // PyArray_BASE(PyArrayObject *)args) = NULL; + Py_RETURN_NONE; + """), + ("get_array_with_base", "METH_NOARGS", """ + char *buf = (char *)malloc(20); + npy_intp dims[1]; + dims[0] = 20; + PyArray_Descr *descr = PyArray_DescrNewFromType(NPY_UINT8); + PyObject *arr = PyArray_NewFromDescr(&PyArray_Type, descr, 1, dims, + NULL, buf, + NPY_ARRAY_WRITEABLE, NULL); + if (arr == NULL) return NULL; + PyObject *obj = PyCapsule_New(buf, "buf capsule", + (PyCapsule_Destructor)&warn_on_free); + if (obj == NULL) { + Py_DECREF(arr); + return NULL; + } + if (PyArray_SetBaseObject((PyArrayObject *)arr, obj) < 0) { + Py_DECREF(arr); + Py_DECREF(obj); + return NULL; + } + return arr; + + """), + ] + prologue = ''' + #define NPY_TARGET_VERSION NPY_1_22_API_VERSION + #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION + #include + /* + * This struct allows the dynamic configuration of the allocator funcs + * of the `secret_data_allocator`. It is provided here for + * demonstration purposes, as a valid `ctx` use-case scenario. + */ + typedef struct { + void *(*malloc)(size_t); + void *(*calloc)(size_t, size_t); + void *(*realloc)(void *, size_t); + void (*free)(void *); + } SecretDataAllocatorFuncs; + + NPY_NO_EXPORT void * + shift_alloc(void *ctx, size_t sz) { + SecretDataAllocatorFuncs *funcs = (SecretDataAllocatorFuncs *)ctx; + char *real = (char *)funcs->malloc(sz + 64); + if (real == NULL) { + return NULL; + } + snprintf(real, 64, "originally allocated %ld", (unsigned long)sz); + return (void *)(real + 64); + } + NPY_NO_EXPORT void * + shift_zero(void *ctx, size_t sz, size_t cnt) { + SecretDataAllocatorFuncs *funcs = (SecretDataAllocatorFuncs *)ctx; + char *real = (char *)funcs->calloc(sz + 64, cnt); + if (real == NULL) { + return NULL; + } + snprintf(real, 64, "originally allocated %ld via zero", + (unsigned long)sz); + return (void *)(real + 64); + } + NPY_NO_EXPORT void + shift_free(void *ctx, void * p, npy_uintp sz) { + SecretDataAllocatorFuncs *funcs = (SecretDataAllocatorFuncs *)ctx; + if (p == NULL) { + return ; + } + char *real = (char *)p - 64; + if (strncmp(real, "originally allocated", 20) != 0) { + fprintf(stdout, "uh-oh, unmatched shift_free, " + "no appropriate prefix\\n"); + /* Make C runtime crash by calling free on the wrong address */ + funcs->free((char *)p + 10); + /* funcs->free(real); */ + } + else { + npy_uintp i = (npy_uintp)atoi(real +20); + if (i != sz) { + fprintf(stderr, "uh-oh, unmatched shift_free" + "(ptr, %ld) but allocated %ld\\n", sz, i); + /* This happens in some places, only print */ + funcs->free(real); + } + else { + funcs->free(real); + } + } + } + NPY_NO_EXPORT void * + shift_realloc(void *ctx, void * p, npy_uintp sz) { + SecretDataAllocatorFuncs *funcs = (SecretDataAllocatorFuncs *)ctx; + if (p != NULL) { + char *real = (char *)p - 64; + if (strncmp(real, "originally allocated", 20) != 0) { + fprintf(stdout, "uh-oh, unmatched shift_realloc\\n"); + return realloc(p, sz); + } + return (void *)((char *)funcs->realloc(real, sz + 64) + 64); + } + else { + char *real = (char *)funcs->realloc(p, sz + 64); + if (real == NULL) { + return NULL; + } + snprintf(real, 64, "originally allocated " + "%ld via realloc", (unsigned long)sz); + return (void *)(real + 64); + } + } + /* As an example, we use the standard {m|c|re}alloc/free funcs. */ + static SecretDataAllocatorFuncs secret_data_handler_ctx = { + malloc, + calloc, + realloc, + free + }; + static PyDataMem_Handler secret_data_handler = { + "secret_data_allocator", + 1, + { + &secret_data_handler_ctx, /* ctx */ + shift_alloc, /* malloc */ + shift_zero, /* calloc */ + shift_realloc, /* realloc */ + shift_free /* free */ + } + }; + void warn_on_free(void *capsule) { + PyErr_WarnEx(PyExc_UserWarning, "in warn_on_free", 1); + void * obj = PyCapsule_GetPointer(capsule, + PyCapsule_GetName(capsule)); + free(obj); + }; + ''' + more_init = "import_array();" + try: + import mem_policy + return mem_policy + except ImportError: + pass + # if it does not exist, build and load it + if sysconfig.get_platform() == "win-arm64": + pytest.skip("Meson unable to find MSVC linker on win-arm64") + return extbuild.build_and_import_extension('mem_policy', + functions, + prologue=prologue, + include_dirs=[np.get_include()], + build_dir=tmp_path, + more_init=more_init) + + +def test_set_policy(get_module): + + get_handler_name = np._core.multiarray.get_handler_name + get_handler_version = np._core.multiarray.get_handler_version + orig_policy_name = get_handler_name() + + a = np.arange(10).reshape((2, 5)) # a doesn't own its own data + assert get_handler_name(a) is None + assert get_handler_version(a) is None + assert get_handler_name(a.base) == orig_policy_name + assert get_handler_version(a.base) == 1 + + orig_policy = get_module.set_secret_data_policy() + + b = np.arange(10).reshape((2, 5)) # b doesn't own its own data + assert get_handler_name(b) is None + assert get_handler_version(b) is None + assert get_handler_name(b.base) == 'secret_data_allocator' + assert get_handler_version(b.base) == 1 + + if orig_policy_name == 'default_allocator': + get_module.set_old_policy(None) # tests PyDataMem_SetHandler(NULL) + assert get_handler_name() == 'default_allocator' + else: + get_module.set_old_policy(orig_policy) + assert get_handler_name() == orig_policy_name + + with pytest.raises(ValueError, + match="Capsule must be named 'mem_handler'"): + get_module.set_wrong_capsule_name_data_policy() + + +def test_default_policy_singleton(get_module): + get_handler_name = np._core.multiarray.get_handler_name + + # set the policy to default + orig_policy = get_module.set_old_policy(None) + + assert get_handler_name() == 'default_allocator' + + # re-set the policy to default + def_policy_1 = get_module.set_old_policy(None) + + assert get_handler_name() == 'default_allocator' + + # set the policy to original + def_policy_2 = get_module.set_old_policy(orig_policy) + + # since default policy is a singleton, + # these should be the same object + assert def_policy_1 is def_policy_2 is get_module.get_default_policy() + + +def test_policy_propagation(get_module): + # The memory policy goes hand-in-hand with flags.owndata + + class MyArr(np.ndarray): + pass + + get_handler_name = np._core.multiarray.get_handler_name + orig_policy_name = get_handler_name() + a = np.arange(10).view(MyArr).reshape((2, 5)) + assert get_handler_name(a) is None + assert a.flags.owndata is False + + assert get_handler_name(a.base) is None + assert a.base.flags.owndata is False + + assert get_handler_name(a.base.base) == orig_policy_name + assert a.base.base.flags.owndata is True + + +async def concurrent_context1(get_module, orig_policy_name, event): + if orig_policy_name == 'default_allocator': + get_module.set_secret_data_policy() + assert get_handler_name() == 'secret_data_allocator' + else: + get_module.set_old_policy(None) + assert get_handler_name() == 'default_allocator' + event.set() + + +async def concurrent_context2(get_module, orig_policy_name, event): + await event.wait() + # the policy is not affected by changes in parallel contexts + assert get_handler_name() == orig_policy_name + # change policy in the child context + if orig_policy_name == 'default_allocator': + get_module.set_secret_data_policy() + assert get_handler_name() == 'secret_data_allocator' + else: + get_module.set_old_policy(None) + assert get_handler_name() == 'default_allocator' + + +async def async_test_context_locality(get_module): + orig_policy_name = np._core.multiarray.get_handler_name() + + event = asyncio.Event() + # the child contexts inherit the parent policy + concurrent_task1 = asyncio.create_task( + concurrent_context1(get_module, orig_policy_name, event)) + concurrent_task2 = asyncio.create_task( + concurrent_context2(get_module, orig_policy_name, event)) + await concurrent_task1 + await concurrent_task2 + + # the parent context is not affected by child policy changes + assert np._core.multiarray.get_handler_name() == orig_policy_name + + +def test_context_locality(get_module): + if (sys.implementation.name == 'pypy' + and sys.pypy_version_info[:3] < (7, 3, 6)): + pytest.skip('no context-locality support in PyPy < 7.3.6') + asyncio.run(async_test_context_locality(get_module)) + + +def concurrent_thread1(get_module, event): + get_module.set_secret_data_policy() + assert np._core.multiarray.get_handler_name() == 'secret_data_allocator' + event.set() + + +def concurrent_thread2(get_module, event): + event.wait() + # the policy is not affected by changes in parallel threads + assert np._core.multiarray.get_handler_name() == 'default_allocator' + # change policy in the child thread + get_module.set_secret_data_policy() + + +def test_thread_locality(get_module): + orig_policy_name = np._core.multiarray.get_handler_name() + + event = threading.Event() + # the child threads do not inherit the parent policy + concurrent_task1 = threading.Thread(target=concurrent_thread1, + args=(get_module, event)) + concurrent_task2 = threading.Thread(target=concurrent_thread2, + args=(get_module, event)) + concurrent_task1.start() + concurrent_task2.start() + concurrent_task1.join() + concurrent_task2.join() + + # the parent thread is not affected by child policy changes + assert np._core.multiarray.get_handler_name() == orig_policy_name + + +@pytest.mark.skip(reason="too slow, see gh-23975") +def test_new_policy(get_module): + a = np.arange(10) + orig_policy_name = np._core.multiarray.get_handler_name(a) + + orig_policy = get_module.set_secret_data_policy() + + b = np.arange(10) + assert np._core.multiarray.get_handler_name(b) == 'secret_data_allocator' + + # test array manipulation. This is slow + if orig_policy_name == 'default_allocator': + # when the np._core.test tests recurse into this test, the + # policy will be set so this "if" will be false, preventing + # infinite recursion + # + # if needed, debug this by + # - running tests with -- -s (to not capture stdout/stderr + # - setting verbose=2 + # - setting extra_argv=['-vv'] here + assert np._core.test('full', verbose=1, extra_argv=[]) + # also try the ma tests, the pickling test is quite tricky + assert np.ma.test('full', verbose=1, extra_argv=[]) + + get_module.set_old_policy(orig_policy) + + c = np.arange(10) + assert np._core.multiarray.get_handler_name(c) == orig_policy_name + + +@pytest.mark.xfail(sys.implementation.name == "pypy", + reason=("bad interaction between getenv and " + "os.environ inside pytest")) +@pytest.mark.parametrize("policy", ["0", "1", None]) +def test_switch_owner(get_module, policy): + a = get_module.get_array() + assert np._core.multiarray.get_handler_name(a) is None + get_module.set_own(a) + + if policy is None: + # See what we expect to be set based on the env variable + policy = os.getenv("NUMPY_WARN_IF_NO_MEM_POLICY", "0") == "1" + oldval = None + else: + policy = policy == "1" + oldval = np._core._multiarray_umath._set_numpy_warn_if_no_mem_policy( + policy) + try: + # The policy should be NULL, so we have to assume we can call + # "free". A warning is given if the policy == "1" + if policy: + with assert_warns(RuntimeWarning) as w: + del a + gc.collect() + else: + del a + gc.collect() + + finally: + if oldval is not None: + np._core._multiarray_umath._set_numpy_warn_if_no_mem_policy(oldval) + + +def test_owner_is_base(get_module): + a = get_module.get_array_with_base() + with pytest.warns(UserWarning, match='warn_on_free'): + del a + gc.collect() + gc.collect() diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_memmap.py b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_memmap.py new file mode 100644 index 0000000..cbd8252 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_memmap.py @@ -0,0 +1,246 @@ +import mmap +import os +import sys +from pathlib import Path +from tempfile import NamedTemporaryFile, TemporaryFile + +import pytest + +from numpy import ( + add, + allclose, + arange, + asarray, + average, + isscalar, + memmap, + multiply, + ndarray, + prod, + subtract, + sum, +) +from numpy.testing import ( + IS_PYPY, + assert_, + assert_array_equal, + assert_equal, + break_cycles, + suppress_warnings, +) + + +class TestMemmap: + def setup_method(self): + self.tmpfp = NamedTemporaryFile(prefix='mmap') + self.shape = (3, 4) + self.dtype = 'float32' + self.data = arange(12, dtype=self.dtype) + self.data.resize(self.shape) + + def teardown_method(self): + self.tmpfp.close() + self.data = None + if IS_PYPY: + break_cycles() + break_cycles() + + def test_roundtrip(self): + # Write data to file + fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+', + shape=self.shape) + fp[:] = self.data[:] + del fp # Test __del__ machinery, which handles cleanup + + # Read data back from file + newfp = memmap(self.tmpfp, dtype=self.dtype, mode='r', + shape=self.shape) + assert_(allclose(self.data, newfp)) + assert_array_equal(self.data, newfp) + assert_equal(newfp.flags.writeable, False) + + def test_open_with_filename(self, tmp_path): + tmpname = tmp_path / 'mmap' + fp = memmap(tmpname, dtype=self.dtype, mode='w+', + shape=self.shape) + fp[:] = self.data[:] + del fp + + def test_unnamed_file(self): + with TemporaryFile() as f: + fp = memmap(f, dtype=self.dtype, shape=self.shape) + del fp + + def test_attributes(self): + offset = 1 + mode = "w+" + fp = memmap(self.tmpfp, dtype=self.dtype, mode=mode, + shape=self.shape, offset=offset) + assert_equal(offset, fp.offset) + assert_equal(mode, fp.mode) + del fp + + def test_filename(self, tmp_path): + tmpname = tmp_path / "mmap" + fp = memmap(tmpname, dtype=self.dtype, mode='w+', + shape=self.shape) + abspath = Path(os.path.abspath(tmpname)) + fp[:] = self.data[:] + assert_equal(abspath, fp.filename) + b = fp[:1] + assert_equal(abspath, b.filename) + del b + del fp + + def test_path(self, tmp_path): + tmpname = tmp_path / "mmap" + fp = memmap(Path(tmpname), dtype=self.dtype, mode='w+', + shape=self.shape) + # os.path.realpath does not resolve symlinks on Windows + # see: https://bugs.python.org/issue9949 + # use Path.resolve, just as memmap class does internally + abspath = str(Path(tmpname).resolve()) + fp[:] = self.data[:] + assert_equal(abspath, str(fp.filename.resolve())) + b = fp[:1] + assert_equal(abspath, str(b.filename.resolve())) + del b + del fp + + def test_filename_fileobj(self): + fp = memmap(self.tmpfp, dtype=self.dtype, mode="w+", + shape=self.shape) + assert_equal(fp.filename, self.tmpfp.name) + + @pytest.mark.skipif(sys.platform == 'gnu0', + reason="Known to fail on hurd") + def test_flush(self): + fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+', + shape=self.shape) + fp[:] = self.data[:] + assert_equal(fp[0], self.data[0]) + fp.flush() + + def test_del(self): + # Make sure a view does not delete the underlying mmap + fp_base = memmap(self.tmpfp, dtype=self.dtype, mode='w+', + shape=self.shape) + fp_base[0] = 5 + fp_view = fp_base[0:1] + assert_equal(fp_view[0], 5) + del fp_view + # Should still be able to access and assign values after + # deleting the view + assert_equal(fp_base[0], 5) + fp_base[0] = 6 + assert_equal(fp_base[0], 6) + + def test_arithmetic_drops_references(self): + fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+', + shape=self.shape) + tmp = (fp + 10) + if isinstance(tmp, memmap): + assert_(tmp._mmap is not fp._mmap) + + def test_indexing_drops_references(self): + fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+', + shape=self.shape) + tmp = fp[(1, 2), (2, 3)] + if isinstance(tmp, memmap): + assert_(tmp._mmap is not fp._mmap) + + def test_slicing_keeps_references(self): + fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+', + shape=self.shape) + assert_(fp[:2, :2]._mmap is fp._mmap) + + def test_view(self): + fp = memmap(self.tmpfp, dtype=self.dtype, shape=self.shape) + new1 = fp.view() + new2 = new1.view() + assert_(new1.base is fp) + assert_(new2.base is fp) + new_array = asarray(fp) + assert_(new_array.base is fp) + + def test_ufunc_return_ndarray(self): + fp = memmap(self.tmpfp, dtype=self.dtype, shape=self.shape) + fp[:] = self.data + + with suppress_warnings() as sup: + sup.filter(FutureWarning, "np.average currently does not preserve") + for unary_op in [sum, average, prod]: + result = unary_op(fp) + assert_(isscalar(result)) + assert_(result.__class__ is self.data[0, 0].__class__) + + assert_(unary_op(fp, axis=0).__class__ is ndarray) + assert_(unary_op(fp, axis=1).__class__ is ndarray) + + for binary_op in [add, subtract, multiply]: + assert_(binary_op(fp, self.data).__class__ is ndarray) + assert_(binary_op(self.data, fp).__class__ is ndarray) + assert_(binary_op(fp, fp).__class__ is ndarray) + + fp += 1 + assert fp.__class__ is memmap + add(fp, 1, out=fp) + assert fp.__class__ is memmap + + def test_getitem(self): + fp = memmap(self.tmpfp, dtype=self.dtype, shape=self.shape) + fp[:] = self.data + + assert_(fp[1:, :-1].__class__ is memmap) + # Fancy indexing returns a copy that is not memmapped + assert_(fp[[0, 1]].__class__ is ndarray) + + def test_memmap_subclass(self): + class MemmapSubClass(memmap): + pass + + fp = MemmapSubClass(self.tmpfp, dtype=self.dtype, shape=self.shape) + fp[:] = self.data + + # We keep previous behavior for subclasses of memmap, i.e. the + # ufunc and __getitem__ output is never turned into a ndarray + assert_(sum(fp, axis=0).__class__ is MemmapSubClass) + assert_(sum(fp).__class__ is MemmapSubClass) + assert_(fp[1:, :-1].__class__ is MemmapSubClass) + assert fp[[0, 1]].__class__ is MemmapSubClass + + def test_mmap_offset_greater_than_allocation_granularity(self): + size = 5 * mmap.ALLOCATIONGRANULARITY + offset = mmap.ALLOCATIONGRANULARITY + 1 + fp = memmap(self.tmpfp, shape=size, mode='w+', offset=offset) + assert_(fp.offset == offset) + + def test_empty_array_with_offset_multiple_of_allocation_granularity(self): + self.tmpfp.write(b'a' * mmap.ALLOCATIONGRANULARITY) + size = 0 + offset = mmap.ALLOCATIONGRANULARITY + fp = memmap(self.tmpfp, shape=size, mode='w+', offset=offset) + assert_equal(fp.offset, offset) + + def test_no_shape(self): + self.tmpfp.write(b'a' * 16) + mm = memmap(self.tmpfp, dtype='float64') + assert_equal(mm.shape, (2,)) + + def test_empty_array(self): + # gh-12653 + with pytest.raises(ValueError, match='empty file'): + memmap(self.tmpfp, shape=(0, 4), mode='r') + + # gh-27723 + # empty memmap works with mode in ('w+','r+') + memmap(self.tmpfp, shape=(0, 4), mode='w+') + + # ok now the file is not empty + memmap(self.tmpfp, shape=(0, 4), mode='w+') + + def test_shape_type(self): + memmap(self.tmpfp, shape=3, mode='w+') + memmap(self.tmpfp, shape=self.shape, mode='w+') + memmap(self.tmpfp, shape=list(self.shape), mode='w+') + memmap(self.tmpfp, shape=asarray(self.shape), mode='w+') diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_multiarray.py b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_multiarray.py new file mode 100644 index 0000000..26587b6 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_multiarray.py @@ -0,0 +1,10563 @@ +import builtins +import collections.abc +import ctypes +import functools +import gc +import io +import itertools +import mmap +import operator +import os +import pathlib +import pickle +import re +import sys +import tempfile +import warnings +import weakref +from contextlib import contextmanager + +# Need to test an object that does not fully implement math interface +from datetime import datetime, timedelta +from decimal import Decimal + +import numpy._core._multiarray_tests as _multiarray_tests +import pytest +from numpy._core._rational_tests import rational + +import numpy as np +from numpy._core.multiarray import _get_ndarray_c_version, dot +from numpy._core.tests._locales import CommaDecimalPointLocale +from numpy.exceptions import AxisError, ComplexWarning +from numpy.lib.recfunctions import repack_fields +from numpy.testing import ( + BLAS_SUPPORTS_FPE, + HAS_REFCOUNT, + IS_64BIT, + IS_PYPY, + IS_PYSTON, + IS_WASM, + assert_, + assert_allclose, + assert_almost_equal, + assert_array_almost_equal, + assert_array_compare, + assert_array_equal, + assert_array_less, + assert_equal, + assert_raises, + assert_raises_regex, + assert_warns, + break_cycles, + check_support_sve, + runstring, + suppress_warnings, + temppath, +) +from numpy.testing._private.utils import _no_tracing, requires_memory + + +def assert_arg_sorted(arr, arg): + # resulting array should be sorted and arg values should be unique + assert_equal(arr[arg], np.sort(arr)) + assert_equal(np.sort(arg), np.arange(len(arg))) + + +def assert_arr_partitioned(kth, k, arr_part): + assert_equal(arr_part[k], kth) + assert_array_compare(operator.__le__, arr_part[:k], kth) + assert_array_compare(operator.__ge__, arr_part[k:], kth) + + +def _aligned_zeros(shape, dtype=float, order="C", align=None): + """ + Allocate a new ndarray with aligned memory. + + The ndarray is guaranteed *not* aligned to twice the requested alignment. + Eg, if align=4, guarantees it is not aligned to 8. If align=None uses + dtype.alignment.""" + dtype = np.dtype(dtype) + if dtype == np.dtype(object): + # Can't do this, fall back to standard allocation (which + # should always be sufficiently aligned) + if align is not None: + raise ValueError("object array alignment not supported") + return np.zeros(shape, dtype=dtype, order=order) + if align is None: + align = dtype.alignment + if not hasattr(shape, '__len__'): + shape = (shape,) + size = functools.reduce(operator.mul, shape) * dtype.itemsize + buf = np.empty(size + 2 * align + 1, np.uint8) + + ptr = buf.__array_interface__['data'][0] + offset = ptr % align + if offset != 0: + offset = align - offset + if (ptr % (2 * align)) == 0: + offset += align + + # Note: slices producing 0-size arrays do not necessarily change + # data pointer --- so we use and allocate size+1 + buf = buf[offset:offset + size + 1][:-1] + buf.fill(0) + data = np.ndarray(shape, dtype, buf, order=order) + return data + + +class TestFlags: + def setup_method(self): + self.a = np.arange(10) + + def test_writeable(self): + mydict = locals() + self.a.flags.writeable = False + assert_raises(ValueError, runstring, 'self.a[0] = 3', mydict) + self.a.flags.writeable = True + self.a[0] = 5 + self.a[0] = 0 + + def test_writeable_any_base(self): + # Ensure that any base being writeable is sufficient to change flag; + # this is especially interesting for arrays from an array interface. + arr = np.arange(10) + + class subclass(np.ndarray): + pass + + # Create subclass so base will not be collapsed, this is OK to change + view1 = arr.view(subclass) + view2 = view1[...] + arr.flags.writeable = False + view2.flags.writeable = False + view2.flags.writeable = True # Can be set to True again. + + arr = np.arange(10) + + class frominterface: + def __init__(self, arr): + self.arr = arr + self.__array_interface__ = arr.__array_interface__ + + view1 = np.asarray(frominterface) + view2 = view1[...] + view2.flags.writeable = False + view2.flags.writeable = True + + view1.flags.writeable = False + view2.flags.writeable = False + with assert_raises(ValueError): + # Must assume not writeable, since only base is not: + view2.flags.writeable = True + + def test_writeable_from_readonly(self): + # gh-9440 - make sure fromstring, from buffer on readonly buffers + # set writeable False + data = b'\x00' * 100 + vals = np.frombuffer(data, 'B') + assert_raises(ValueError, vals.setflags, write=True) + types = np.dtype([('vals', 'u1'), ('res3', 'S4')]) + values = np._core.records.fromstring(data, types) + vals = values['vals'] + assert_raises(ValueError, vals.setflags, write=True) + + def test_writeable_from_buffer(self): + data = bytearray(b'\x00' * 100) + vals = np.frombuffer(data, 'B') + assert_(vals.flags.writeable) + vals.setflags(write=False) + assert_(vals.flags.writeable is False) + vals.setflags(write=True) + assert_(vals.flags.writeable) + types = np.dtype([('vals', 'u1'), ('res3', 'S4')]) + values = np._core.records.fromstring(data, types) + vals = values['vals'] + assert_(vals.flags.writeable) + vals.setflags(write=False) + assert_(vals.flags.writeable is False) + vals.setflags(write=True) + assert_(vals.flags.writeable) + + @pytest.mark.skipif(IS_PYPY, reason="PyPy always copies") + def test_writeable_pickle(self): + import pickle + # Small arrays will be copied without setting base. + # See condition for using PyArray_SetBaseObject in + # array_setstate. + a = np.arange(1000) + for v in range(pickle.HIGHEST_PROTOCOL): + vals = pickle.loads(pickle.dumps(a, v)) + assert_(vals.flags.writeable) + assert_(isinstance(vals.base, bytes)) + + def test_writeable_from_c_data(self): + # Test that the writeable flag can be changed for an array wrapping + # low level C-data, but not owning its data. + # Also see that this is deprecated to change from python. + from numpy._core._multiarray_tests import get_c_wrapping_array + + arr_writeable = get_c_wrapping_array(True) + assert not arr_writeable.flags.owndata + assert arr_writeable.flags.writeable + view = arr_writeable[...] + + # Toggling the writeable flag works on the view: + view.flags.writeable = False + assert not view.flags.writeable + view.flags.writeable = True + assert view.flags.writeable + # Flag can be unset on the arr_writeable: + arr_writeable.flags.writeable = False + + arr_readonly = get_c_wrapping_array(False) + assert not arr_readonly.flags.owndata + assert not arr_readonly.flags.writeable + + for arr in [arr_writeable, arr_readonly]: + view = arr[...] + view.flags.writeable = False # make sure it is readonly + arr.flags.writeable = False + assert not arr.flags.writeable + + with assert_raises(ValueError): + view.flags.writeable = True + + with assert_raises(ValueError): + arr.flags.writeable = True + + def test_warnonwrite(self): + a = np.arange(10) + a.flags._warn_on_write = True + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always') + a[1] = 10 + a[2] = 10 + # only warn once + assert_(len(w) == 1) + + @pytest.mark.parametrize(["flag", "flag_value", "writeable"], + [("writeable", True, True), + # Delete _warn_on_write after deprecation and simplify + # the parameterization: + ("_warn_on_write", True, False), + ("writeable", False, False)]) + def test_readonly_flag_protocols(self, flag, flag_value, writeable): + a = np.arange(10) + setattr(a.flags, flag, flag_value) + + class MyArr: + __array_struct__ = a.__array_struct__ + + assert memoryview(a).readonly is not writeable + assert a.__array_interface__['data'][1] is not writeable + assert np.asarray(MyArr()).flags.writeable is writeable + + def test_otherflags(self): + assert_equal(self.a.flags.carray, True) + assert_equal(self.a.flags['C'], True) + assert_equal(self.a.flags.farray, False) + assert_equal(self.a.flags.behaved, True) + assert_equal(self.a.flags.fnc, False) + assert_equal(self.a.flags.forc, True) + assert_equal(self.a.flags.owndata, True) + assert_equal(self.a.flags.writeable, True) + assert_equal(self.a.flags.aligned, True) + assert_equal(self.a.flags.writebackifcopy, False) + assert_equal(self.a.flags['X'], False) + assert_equal(self.a.flags['WRITEBACKIFCOPY'], False) + + def test_string_align(self): + a = np.zeros(4, dtype=np.dtype('|S4')) + assert_(a.flags.aligned) + # not power of two are accessed byte-wise and thus considered aligned + a = np.zeros(5, dtype=np.dtype('|S4')) + assert_(a.flags.aligned) + + def test_void_align(self): + a = np.zeros(4, dtype=np.dtype([("a", "i4"), ("b", "i4")])) + assert_(a.flags.aligned) + + @pytest.mark.parametrize("row_size", [5, 1 << 16]) + @pytest.mark.parametrize("row_count", [1, 5]) + @pytest.mark.parametrize("ndmin", [0, 1, 2]) + def test_xcontiguous_load_txt(self, row_size, row_count, ndmin): + s = io.StringIO('\n'.join(['1.0 ' * row_size] * row_count)) + a = np.loadtxt(s, ndmin=ndmin) + + assert a.flags.c_contiguous + x = [i for i in a.shape if i != 1] + assert a.flags.f_contiguous == (len(x) <= 1) + + +class TestHash: + # see #3793 + def test_int(self): + for st, ut, s in [(np.int8, np.uint8, 8), + (np.int16, np.uint16, 16), + (np.int32, np.uint32, 32), + (np.int64, np.uint64, 64)]: + for i in range(1, s): + assert_equal(hash(st(-2**i)), hash(-2**i), + err_msg="%r: -2**%d" % (st, i)) + assert_equal(hash(st(2**(i - 1))), hash(2**(i - 1)), + err_msg="%r: 2**%d" % (st, i - 1)) + assert_equal(hash(st(2**i - 1)), hash(2**i - 1), + err_msg="%r: 2**%d - 1" % (st, i)) + + i = max(i - 1, 1) + assert_equal(hash(ut(2**(i - 1))), hash(2**(i - 1)), + err_msg="%r: 2**%d" % (ut, i - 1)) + assert_equal(hash(ut(2**i - 1)), hash(2**i - 1), + err_msg="%r: 2**%d - 1" % (ut, i)) + + +class TestAttributes: + def setup_method(self): + self.one = np.arange(10) + self.two = np.arange(20).reshape(4, 5) + self.three = np.arange(60, dtype=np.float64).reshape(2, 5, 6) + + def test_attributes(self): + assert_equal(self.one.shape, (10,)) + assert_equal(self.two.shape, (4, 5)) + assert_equal(self.three.shape, (2, 5, 6)) + self.three.shape = (10, 3, 2) + assert_equal(self.three.shape, (10, 3, 2)) + self.three.shape = (2, 5, 6) + assert_equal(self.one.strides, (self.one.itemsize,)) + num = self.two.itemsize + assert_equal(self.two.strides, (5 * num, num)) + num = self.three.itemsize + assert_equal(self.three.strides, (30 * num, 6 * num, num)) + assert_equal(self.one.ndim, 1) + assert_equal(self.two.ndim, 2) + assert_equal(self.three.ndim, 3) + num = self.two.itemsize + assert_equal(self.two.size, 20) + assert_equal(self.two.nbytes, 20 * num) + assert_equal(self.two.itemsize, self.two.dtype.itemsize) + assert_equal(self.two.base, np.arange(20)) + + def test_dtypeattr(self): + assert_equal(self.one.dtype, np.dtype(np.int_)) + assert_equal(self.three.dtype, np.dtype(np.float64)) + assert_equal(self.one.dtype.char, np.dtype(int).char) + assert self.one.dtype.char in "lq" + assert_equal(self.three.dtype.char, 'd') + assert_(self.three.dtype.str[0] in '<>') + assert_equal(self.one.dtype.str[1], 'i') + assert_equal(self.three.dtype.str[1], 'f') + + def test_int_subclassing(self): + # Regression test for https://github.com/numpy/numpy/pull/3526 + + numpy_int = np.int_(0) + + # int_ doesn't inherit from Python int, because it's not fixed-width + assert_(not isinstance(numpy_int, int)) + + def test_stridesattr(self): + x = self.one + + def make_array(size, offset, strides): + return np.ndarray(size, buffer=x, dtype=int, + offset=offset * x.itemsize, + strides=strides * x.itemsize) + + assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1])) + assert_raises(ValueError, make_array, 4, 4, -2) + assert_raises(ValueError, make_array, 4, 2, -1) + assert_raises(ValueError, make_array, 8, 3, 1) + assert_equal(make_array(8, 3, 0), np.array([3] * 8)) + # Check behavior reported in gh-2503: + assert_raises(ValueError, make_array, (2, 3), 5, np.array([-2, -3])) + make_array(0, 0, 10) + + def test_set_stridesattr(self): + x = self.one + + def make_array(size, offset, strides): + try: + r = np.ndarray([size], dtype=int, buffer=x, + offset=offset * x.itemsize) + except Exception as e: + raise RuntimeError(e) + r.strides = strides = strides * x.itemsize + return r + + assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1])) + assert_equal(make_array(7, 3, 1), np.array([3, 4, 5, 6, 7, 8, 9])) + assert_raises(ValueError, make_array, 4, 4, -2) + assert_raises(ValueError, make_array, 4, 2, -1) + assert_raises(RuntimeError, make_array, 8, 3, 1) + # Check that the true extent of the array is used. + # Test relies on as_strided base not exposing a buffer. + x = np.lib.stride_tricks.as_strided(np.arange(1), (10, 10), (0, 0)) + + def set_strides(arr, strides): + arr.strides = strides + + assert_raises(ValueError, set_strides, x, (10 * x.itemsize, x.itemsize)) + + # Test for offset calculations: + x = np.lib.stride_tricks.as_strided(np.arange(10, dtype=np.int8)[-1], + shape=(10,), strides=(-1,)) + assert_raises(ValueError, set_strides, x[::-1], -1) + a = x[::-1] + a.strides = 1 + a[::2].strides = 2 + + # test 0d + arr_0d = np.array(0) + arr_0d.strides = () + assert_raises(TypeError, set_strides, arr_0d, None) + + def test_fill(self): + for t in "?bhilqpBHILQPfdgFDGO": + x = np.empty((3, 2, 1), t) + y = np.empty((3, 2, 1), t) + x.fill(1) + y[...] = 1 + assert_equal(x, y) + + def test_fill_max_uint64(self): + x = np.empty((3, 2, 1), dtype=np.uint64) + y = np.empty((3, 2, 1), dtype=np.uint64) + value = 2**64 - 1 + y[...] = value + x.fill(value) + assert_array_equal(x, y) + + def test_fill_struct_array(self): + # Filling from a scalar + x = np.array([(0, 0.0), (1, 1.0)], dtype='i4,f8') + x.fill(x[0]) + assert_equal(x['f1'][1], x['f1'][0]) + # Filling from a tuple that can be converted + # to a scalar + x = np.zeros(2, dtype=[('a', 'f8'), ('b', 'i4')]) + x.fill((3.5, -2)) + assert_array_equal(x['a'], [3.5, 3.5]) + assert_array_equal(x['b'], [-2, -2]) + + def test_fill_readonly(self): + # gh-22922 + a = np.zeros(11) + a.setflags(write=False) + with pytest.raises(ValueError, match=".*read-only"): + a.fill(0) + + def test_fill_subarrays(self): + # NOTE: + # This is also a regression test for a crash with PYTHONMALLOC=debug + + dtype = np.dtype("2i4')) + assert_(np.dtype([('a', 'i4')])) + + def test_structured_non_void(self): + fields = [('a', 'i8'), ('b', 'f8')]) + assert_equal(a == b, [False, True]) + assert_equal(a != b, [True, False]) + + a = np.array([(5, 42), (10, 1)], dtype=[('a', '>f8'), ('b', 'i8')]) + assert_equal(a == b, [False, True]) + assert_equal(a != b, [True, False]) + + # Including with embedded subarray dtype (although subarray comparison + # itself may still be a bit weird and compare the raw data) + a = np.array([(5, 42), (10, 1)], dtype=[('a', '10>f8'), ('b', '5i8')]) + assert_equal(a == b, [False, True]) + assert_equal(a != b, [True, False]) + + @pytest.mark.parametrize("op", [ + operator.eq, lambda x, y: operator.eq(y, x), + operator.ne, lambda x, y: operator.ne(y, x)]) + def test_void_comparison_failures(self, op): + # In principle, one could decide to return an array of False for some + # if comparisons are impossible. But right now we return TypeError + # when "void" dtype are involved. + x = np.zeros(3, dtype=[('a', 'i1')]) + y = np.zeros(3) + # Cannot compare non-structured to structured: + with pytest.raises(TypeError): + op(x, y) + + # Added title prevents promotion, but casts are OK: + y = np.zeros(3, dtype=[(('title', 'a'), 'i1')]) + assert np.can_cast(y.dtype, x.dtype) + with pytest.raises(TypeError): + op(x, y) + + x = np.zeros(3, dtype="V7") + y = np.zeros(3, dtype="V8") + with pytest.raises(TypeError): + op(x, y) + + def test_casting(self): + # Check that casting a structured array to change its byte order + # works + a = np.array([(1,)], dtype=[('a', 'i4')], casting='unsafe')) + b = a.astype([('a', '>i4')]) + a_tmp = a.byteswap() + a_tmp = a_tmp.view(a_tmp.dtype.newbyteorder()) + assert_equal(b, a_tmp) + assert_equal(a['a'][0], b['a'][0]) + + # Check that equality comparison works on structured arrays if + # they are 'equiv'-castable + a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i4'), ('b', 'f8')]) + assert_(np.can_cast(a.dtype, b.dtype, casting='equiv')) + assert_equal(a == b, [True, True]) + + # Check that 'equiv' casting can change byte order + assert_(np.can_cast(a.dtype, b.dtype, casting='equiv')) + c = a.astype(b.dtype, casting='equiv') + assert_equal(a == c, [True, True]) + + # Check that 'safe' casting can change byte order and up-cast + # fields + t = [('a', 'f8')] + assert_(np.can_cast(a.dtype, t, casting='safe')) + c = a.astype(t, casting='safe') + assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)), + [True, True]) + + # Check that 'same_kind' casting can change byte order and + # change field widths within a "kind" + t = [('a', 'f4')] + assert_(np.can_cast(a.dtype, t, casting='same_kind')) + c = a.astype(t, casting='same_kind') + assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)), + [True, True]) + + # Check that casting fails if the casting rule should fail on + # any of the fields + t = [('a', '>i8'), ('b', 'i2'), ('b', 'i8'), ('b', 'i4')] + assert_(not np.can_cast(a.dtype, t, casting=casting)) + t = [('a', '>i4'), ('b', 'i8") + ab = np.array([(1, 2)], dtype=[A, B]) + ba = np.array([(1, 2)], dtype=[B, A]) + assert_raises(TypeError, np.concatenate, ab, ba) + assert_raises(TypeError, np.result_type, ab.dtype, ba.dtype) + assert_raises(TypeError, np.promote_types, ab.dtype, ba.dtype) + + # dtypes with same field names/order but different memory offsets + # and byte-order are promotable to packed nbo. + assert_equal(np.promote_types(ab.dtype, ba[['a', 'b']].dtype), + repack_fields(ab.dtype.newbyteorder('N'))) + + # gh-13667 + # dtypes with different fieldnames but castable field types are castable + assert_equal(np.can_cast(ab.dtype, ba.dtype), True) + assert_equal(ab.astype(ba.dtype).dtype, ba.dtype) + assert_equal(np.can_cast('f8,i8', [('f0', 'f8'), ('f1', 'i8')]), True) + assert_equal(np.can_cast('f8,i8', [('f1', 'f8'), ('f0', 'i8')]), True) + assert_equal(np.can_cast('f8,i8', [('f1', 'i8'), ('f0', 'f8')]), False) + assert_equal(np.can_cast('f8,i8', [('f1', 'i8'), ('f0', 'f8')], + casting='unsafe'), True) + + ab[:] = ba # make sure assignment still works + + # tests of type-promotion of corresponding fields + dt1 = np.dtype([("", "i4")]) + dt2 = np.dtype([("", "i8")]) + assert_equal(np.promote_types(dt1, dt2), np.dtype([('f0', 'i8')])) + assert_equal(np.promote_types(dt2, dt1), np.dtype([('f0', 'i8')])) + assert_raises(TypeError, np.promote_types, dt1, np.dtype([("", "V3")])) + assert_equal(np.promote_types('i4,f8', 'i8,f4'), + np.dtype([('f0', 'i8'), ('f1', 'f8')])) + # test nested case + dt1nest = np.dtype([("", dt1)]) + dt2nest = np.dtype([("", dt2)]) + assert_equal(np.promote_types(dt1nest, dt2nest), + np.dtype([('f0', np.dtype([('f0', 'i8')]))])) + + # note that offsets are lost when promoting: + dt = np.dtype({'names': ['x'], 'formats': ['i4'], 'offsets': [8]}) + a = np.ones(3, dtype=dt) + assert_equal(np.concatenate([a, a]).dtype, np.dtype([('x', 'i4')])) + + @pytest.mark.parametrize("dtype_dict", [ + {"names": ["a", "b"], "formats": ["i4", "f"], "itemsize": 100}, + {"names": ["a", "b"], "formats": ["i4", "f"], + "offsets": [0, 12]}]) + @pytest.mark.parametrize("align", [True, False]) + def test_structured_promotion_packs(self, dtype_dict, align): + # Structured dtypes are packed when promoted (we consider the packed + # form to be "canonical"), so tere is no extra padding. + dtype = np.dtype(dtype_dict, align=align) + # Remove non "canonical" dtype options: + dtype_dict.pop("itemsize", None) + dtype_dict.pop("offsets", None) + expected = np.dtype(dtype_dict, align=align) + + res = np.promote_types(dtype, dtype) + assert res.itemsize == expected.itemsize + assert res.fields == expected.fields + + # But the "expected" one, should just be returned unchanged: + res = np.promote_types(expected, expected) + assert res is expected + + def test_structured_asarray_is_view(self): + # A scalar viewing an array preserves its view even when creating a + # new array. This test documents behaviour, it may not be the best + # desired behaviour. + arr = np.array([1], dtype="i,i") + scalar = arr[0] + assert not scalar.flags.owndata # view into the array + assert np.asarray(scalar).base is scalar + # But never when a dtype is passed in: + assert np.asarray(scalar, dtype=scalar.dtype).base is None + # A scalar which owns its data does not have this property. + # It is not easy to create one, one method is to use pickle: + scalar = pickle.loads(pickle.dumps(scalar)) + assert scalar.flags.owndata + assert np.asarray(scalar).base is None + +class TestBool: + def test_test_interning(self): + a0 = np.bool(0) + b0 = np.bool(False) + assert_(a0 is b0) + a1 = np.bool(1) + b1 = np.bool(True) + assert_(a1 is b1) + assert_(np.array([True])[0] is a1) + assert_(np.array(True)[()] is a1) + + def test_sum(self): + d = np.ones(101, dtype=bool) + assert_equal(d.sum(), d.size) + assert_equal(d[::2].sum(), d[::2].size) + assert_equal(d[::-2].sum(), d[::-2].size) + + d = np.frombuffer(b'\xff\xff' * 100, dtype=bool) + assert_equal(d.sum(), d.size) + assert_equal(d[::2].sum(), d[::2].size) + assert_equal(d[::-2].sum(), d[::-2].size) + + def check_count_nonzero(self, power, length): + powers = [2 ** i for i in range(length)] + for i in range(2**power): + l = [(i & x) != 0 for x in powers] + a = np.array(l, dtype=bool) + c = builtins.sum(l) + assert_equal(np.count_nonzero(a), c) + av = a.view(np.uint8) + av *= 3 + assert_equal(np.count_nonzero(a), c) + av *= 4 + assert_equal(np.count_nonzero(a), c) + av[av != 0] = 0xFF + assert_equal(np.count_nonzero(a), c) + + def test_count_nonzero(self): + # check all 12 bit combinations in a length 17 array + # covers most cases of the 16 byte unrolled code + self.check_count_nonzero(12, 17) + + @pytest.mark.slow + def test_count_nonzero_all(self): + # check all combinations in a length 17 array + # covers all cases of the 16 byte unrolled code + self.check_count_nonzero(17, 17) + + def test_count_nonzero_unaligned(self): + # prevent mistakes as e.g. gh-4060 + for o in range(7): + a = np.zeros((18,), dtype=bool)[o + 1:] + a[:o] = True + assert_equal(np.count_nonzero(a), builtins.sum(a.tolist())) + a = np.ones((18,), dtype=bool)[o + 1:] + a[:o] = False + assert_equal(np.count_nonzero(a), builtins.sum(a.tolist())) + + def _test_cast_from_flexible(self, dtype): + # empty string -> false + for n in range(3): + v = np.array(b'', (dtype, n)) + assert_equal(bool(v), False) + assert_equal(bool(v[()]), False) + assert_equal(v.astype(bool), False) + assert_(isinstance(v.astype(bool), np.ndarray)) + assert_(v[()].astype(bool) is np.False_) + + # anything else -> true + for n in range(1, 4): + for val in [b'a', b'0', b' ']: + v = np.array(val, (dtype, n)) + assert_equal(bool(v), True) + assert_equal(bool(v[()]), True) + assert_equal(v.astype(bool), True) + assert_(isinstance(v.astype(bool), np.ndarray)) + assert_(v[()].astype(bool) is np.True_) + + def test_cast_from_void(self): + self._test_cast_from_flexible(np.void) + + @pytest.mark.xfail(reason="See gh-9847") + def test_cast_from_unicode(self): + self._test_cast_from_flexible(np.str_) + + @pytest.mark.xfail(reason="See gh-9847") + def test_cast_from_bytes(self): + self._test_cast_from_flexible(np.bytes_) + + +class TestZeroSizeFlexible: + @staticmethod + def _zeros(shape, dtype=str): + dtype = np.dtype(dtype) + if dtype == np.void: + return np.zeros(shape, dtype=(dtype, 0)) + + # not constructable directly + dtype = np.dtype([('x', dtype, 0)]) + return np.zeros(shape, dtype=dtype)['x'] + + def test_create(self): + zs = self._zeros(10, bytes) + assert_equal(zs.itemsize, 0) + zs = self._zeros(10, np.void) + assert_equal(zs.itemsize, 0) + zs = self._zeros(10, str) + assert_equal(zs.itemsize, 0) + + def _test_sort_partition(self, name, kinds, **kwargs): + # Previously, these would all hang + for dt in [bytes, np.void, str]: + zs = self._zeros(10, dt) + sort_method = getattr(zs, name) + sort_func = getattr(np, name) + for kind in kinds: + sort_method(kind=kind, **kwargs) + sort_func(zs, kind=kind, **kwargs) + + def test_sort(self): + self._test_sort_partition('sort', kinds='qhs') + + def test_argsort(self): + self._test_sort_partition('argsort', kinds='qhs') + + def test_partition(self): + self._test_sort_partition('partition', kinds=['introselect'], kth=2) + + def test_argpartition(self): + self._test_sort_partition('argpartition', kinds=['introselect'], kth=2) + + def test_resize(self): + # previously an error + for dt in [bytes, np.void, str]: + zs = self._zeros(10, dt) + zs.resize(25) + zs.resize((10, 10)) + + def test_view(self): + for dt in [bytes, np.void, str]: + zs = self._zeros(10, dt) + + # viewing as itself should be allowed + assert_equal(zs.view(dt).dtype, np.dtype(dt)) + + # viewing as any non-empty type gives an empty result + assert_equal(zs.view((dt, 1)).shape, (0,)) + + def test_dumps(self): + zs = self._zeros(10, int) + assert_equal(zs, pickle.loads(zs.dumps())) + + def test_pickle(self): + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + for dt in [bytes, np.void, str]: + zs = self._zeros(10, dt) + p = pickle.dumps(zs, protocol=proto) + zs2 = pickle.loads(p) + + assert_equal(zs.dtype, zs2.dtype) + + def test_pickle_empty(self): + """Checking if an empty array pickled and un-pickled will not cause a + segmentation fault""" + arr = np.array([]).reshape(999999, 0) + pk_dmp = pickle.dumps(arr) + pk_load = pickle.loads(pk_dmp) + + assert pk_load.size == 0 + + @pytest.mark.skipif(pickle.HIGHEST_PROTOCOL < 5, + reason="requires pickle protocol 5") + def test_pickle_with_buffercallback(self): + array = np.arange(10) + buffers = [] + bytes_string = pickle.dumps(array, buffer_callback=buffers.append, + protocol=5) + array_from_buffer = pickle.loads(bytes_string, buffers=buffers) + # when using pickle protocol 5 with buffer callbacks, + # array_from_buffer is reconstructed from a buffer holding a view + # to the initial array's data, so modifying an element in array + # should modify it in array_from_buffer too. + array[0] = -1 + assert array_from_buffer[0] == -1, array_from_buffer[0] + + +class TestMethods: + + sort_kinds = ['quicksort', 'heapsort', 'stable'] + + def test_all_where(self): + a = np.array([[True, False, True], + [False, False, False], + [True, True, True]]) + wh_full = np.array([[True, False, True], + [False, False, False], + [True, False, True]]) + wh_lower = np.array([[False], + [False], + [True]]) + for _ax in [0, None]: + assert_equal(a.all(axis=_ax, where=wh_lower), + np.all(a[wh_lower[:, 0], :], axis=_ax)) + assert_equal(np.all(a, axis=_ax, where=wh_lower), + a[wh_lower[:, 0], :].all(axis=_ax)) + + assert_equal(a.all(where=wh_full), True) + assert_equal(np.all(a, where=wh_full), True) + assert_equal(a.all(where=False), True) + assert_equal(np.all(a, where=False), True) + + def test_any_where(self): + a = np.array([[True, False, True], + [False, False, False], + [True, True, True]]) + wh_full = np.array([[False, True, False], + [True, True, True], + [False, False, False]]) + wh_middle = np.array([[False], + [True], + [False]]) + for _ax in [0, None]: + assert_equal(a.any(axis=_ax, where=wh_middle), + np.any(a[wh_middle[:, 0], :], axis=_ax)) + assert_equal(np.any(a, axis=_ax, where=wh_middle), + a[wh_middle[:, 0], :].any(axis=_ax)) + assert_equal(a.any(where=wh_full), False) + assert_equal(np.any(a, where=wh_full), False) + assert_equal(a.any(where=False), False) + assert_equal(np.any(a, where=False), False) + + @pytest.mark.parametrize("dtype", + ["i8", "U10", "object", "datetime64[ms]"]) + def test_any_and_all_result_dtype(self, dtype): + arr = np.ones(3, dtype=dtype) + assert arr.any().dtype == np.bool + assert arr.all().dtype == np.bool + + def test_any_and_all_object_dtype(self): + # (seberg) Not sure we should even allow dtype here, but it is. + arr = np.ones(3, dtype=object) + # keepdims to prevent getting a scalar. + assert arr.any(dtype=object, keepdims=True).dtype == object + assert arr.all(dtype=object, keepdims=True).dtype == object + + def test_compress(self): + tgt = [[5, 6, 7, 8, 9]] + arr = np.arange(10).reshape(2, 5) + out = arr.compress([0, 1], axis=0) + assert_equal(out, tgt) + + tgt = [[1, 3], [6, 8]] + out = arr.compress([0, 1, 0, 1, 0], axis=1) + assert_equal(out, tgt) + + tgt = [[1], [6]] + arr = np.arange(10).reshape(2, 5) + out = arr.compress([0, 1], axis=1) + assert_equal(out, tgt) + + arr = np.arange(10).reshape(2, 5) + out = arr.compress([0, 1]) + assert_equal(out, 1) + + def test_choose(self): + x = 2 * np.ones((3,), dtype=int) + y = 3 * np.ones((3,), dtype=int) + x2 = 2 * np.ones((2, 3), dtype=int) + y2 = 3 * np.ones((2, 3), dtype=int) + ind = np.array([0, 0, 1]) + + A = ind.choose((x, y)) + assert_equal(A, [2, 2, 3]) + + A = ind.choose((x2, y2)) + assert_equal(A, [[2, 2, 3], [2, 2, 3]]) + + A = ind.choose((x, y2)) + assert_equal(A, [[2, 2, 3], [2, 2, 3]]) + + oned = np.ones(1) + # gh-12031, caused SEGFAULT + assert_raises(TypeError, oned.choose, np.void(0), [oned]) + + out = np.array(0) + ret = np.choose(np.array(1), [10, 20, 30], out=out) + assert out is ret + assert_equal(out[()], 20) + + # gh-6272 check overlap on out + x = np.arange(5) + y = np.choose([0, 0, 0], [x[:3], x[:3], x[:3]], out=x[1:4], mode='wrap') + assert_equal(y, np.array([0, 1, 2])) + + # gh_28206 check fail when out not writeable + x = np.arange(3) + out = np.zeros(3) + out.setflags(write=False) + assert_raises(ValueError, np.choose, [0, 1, 2], [x, x, x], out=out) + + def test_prod(self): + ba = [1, 2, 10, 11, 6, 5, 4] + ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]] + + for ctype in [np.int16, np.uint16, np.int32, np.uint32, + np.float32, np.float64, np.complex64, np.complex128]: + a = np.array(ba, ctype) + a2 = np.array(ba2, ctype) + if ctype in ['1', 'b']: + assert_raises(ArithmeticError, a.prod) + assert_raises(ArithmeticError, a2.prod, axis=1) + else: + assert_equal(a.prod(axis=0), 26400) + assert_array_equal(a2.prod(axis=0), + np.array([50, 36, 84, 180], ctype)) + assert_array_equal(a2.prod(axis=-1), + np.array([24, 1890, 600], ctype)) + + @pytest.mark.parametrize('dtype', [None, object]) + def test_repeat(self, dtype): + m = np.array([1, 2, 3, 4, 5, 6], dtype=dtype) + m_rect = m.reshape((2, 3)) + + A = m.repeat([1, 3, 2, 1, 1, 2]) + assert_equal(A, [1, 2, 2, 2, 3, + 3, 4, 5, 6, 6]) + + A = m.repeat(2) + assert_equal(A, [1, 1, 2, 2, 3, 3, + 4, 4, 5, 5, 6, 6]) + + A = m_rect.repeat([2, 1], axis=0) + assert_equal(A, [[1, 2, 3], + [1, 2, 3], + [4, 5, 6]]) + + A = m_rect.repeat([1, 3, 2], axis=1) + assert_equal(A, [[1, 2, 2, 2, 3, 3], + [4, 5, 5, 5, 6, 6]]) + + A = m_rect.repeat(2, axis=0) + assert_equal(A, [[1, 2, 3], + [1, 2, 3], + [4, 5, 6], + [4, 5, 6]]) + + A = m_rect.repeat(2, axis=1) + assert_equal(A, [[1, 1, 2, 2, 3, 3], + [4, 4, 5, 5, 6, 6]]) + + def test_reshape(self): + arr = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]) + + tgt = [[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]] + assert_equal(arr.reshape(2, 6), tgt) + + tgt = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]] + assert_equal(arr.reshape(3, 4), tgt) + + tgt = [[1, 10, 8, 6], [4, 2, 11, 9], [7, 5, 3, 12]] + assert_equal(arr.reshape((3, 4), order='F'), tgt) + + tgt = [[1, 4, 7, 10], [2, 5, 8, 11], [3, 6, 9, 12]] + assert_equal(arr.T.reshape((3, 4), order='C'), tgt) + + def test_round(self): + def check_round(arr, expected, *round_args): + assert_equal(arr.round(*round_args), expected) + # With output array + out = np.zeros_like(arr) + res = arr.round(*round_args, out=out) + assert_equal(out, expected) + assert out is res + + check_round(np.array([1.2, 1.5]), [1, 2]) + check_round(np.array(1.5), 2) + check_round(np.array([12.2, 15.5]), [10, 20], -1) + check_round(np.array([12.15, 15.51]), [12.2, 15.5], 1) + # Complex rounding + check_round(np.array([4.5 + 1.5j]), [4 + 2j]) + check_round(np.array([12.5 + 15.5j]), [10 + 20j], -1) + + def test_squeeze(self): + a = np.array([[[1], [2], [3]]]) + assert_equal(a.squeeze(), [1, 2, 3]) + assert_equal(a.squeeze(axis=(0,)), [[1], [2], [3]]) + assert_raises(ValueError, a.squeeze, axis=(1,)) + assert_equal(a.squeeze(axis=(2,)), [[1, 2, 3]]) + + def test_transpose(self): + a = np.array([[1, 2], [3, 4]]) + assert_equal(a.transpose(), [[1, 3], [2, 4]]) + assert_raises(ValueError, lambda: a.transpose(0)) + assert_raises(ValueError, lambda: a.transpose(0, 0)) + assert_raises(ValueError, lambda: a.transpose(0, 1, 2)) + + def test_sort(self): + # test ordering for floats and complex containing nans. It is only + # necessary to check the less-than comparison, so sorts that + # only follow the insertion sort path are sufficient. We only + # test doubles and complex doubles as the logic is the same. + + # check doubles + msg = "Test real sort order with nans" + a = np.array([np.nan, 1, 0]) + b = np.sort(a) + assert_equal(b, a[::-1], msg) + # check complex + msg = "Test complex sort order with nans" + a = np.zeros(9, dtype=np.complex128) + a.real += [np.nan, np.nan, np.nan, 1, 0, 1, 1, 0, 0] + a.imag += [np.nan, 1, 0, np.nan, np.nan, 1, 0, 1, 0] + b = np.sort(a) + assert_equal(b, a[::-1], msg) + + with assert_raises_regex( + ValueError, + "kind` and `stable` parameters can't be provided at the same time" + ): + np.sort(a, kind="stable", stable=True) + + # all c scalar sorts use the same code with different types + # so it suffices to run a quick check with one type. The number + # of sorted items must be greater than ~50 to check the actual + # algorithm because quick and merge sort fall over to insertion + # sort for small arrays. + + @pytest.mark.parametrize('dtype', [np.uint8, np.uint16, np.uint32, np.uint64, + np.float16, np.float32, np.float64, + np.longdouble]) + def test_sort_unsigned(self, dtype): + a = np.arange(101, dtype=dtype) + b = a[::-1].copy() + for kind in self.sort_kinds: + msg = f"scalar sort, kind={kind}" + c = a.copy() + c.sort(kind=kind) + assert_equal(c, a, msg) + c = b.copy() + c.sort(kind=kind) + assert_equal(c, a, msg) + + @pytest.mark.parametrize('dtype', + [np.int8, np.int16, np.int32, np.int64, np.float16, + np.float32, np.float64, np.longdouble]) + def test_sort_signed(self, dtype): + a = np.arange(-50, 51, dtype=dtype) + b = a[::-1].copy() + for kind in self.sort_kinds: + msg = f"scalar sort, kind={kind}" + c = a.copy() + c.sort(kind=kind) + assert_equal(c, a, msg) + c = b.copy() + c.sort(kind=kind) + assert_equal(c, a, msg) + + @pytest.mark.parametrize('dtype', [np.float32, np.float64, np.longdouble]) + @pytest.mark.parametrize('part', ['real', 'imag']) + def test_sort_complex(self, part, dtype): + # test complex sorts. These use the same code as the scalars + # but the compare function differs. + cdtype = { + np.single: np.csingle, + np.double: np.cdouble, + np.longdouble: np.clongdouble, + }[dtype] + a = np.arange(-50, 51, dtype=dtype) + b = a[::-1].copy() + ai = (a * (1 + 1j)).astype(cdtype) + bi = (b * (1 + 1j)).astype(cdtype) + setattr(ai, part, 1) + setattr(bi, part, 1) + for kind in self.sort_kinds: + msg = f"complex sort, {part} part == 1, kind={kind}" + c = ai.copy() + c.sort(kind=kind) + assert_equal(c, ai, msg) + c = bi.copy() + c.sort(kind=kind) + assert_equal(c, ai, msg) + + def test_sort_complex_byte_swapping(self): + # test sorting of complex arrays requiring byte-swapping, gh-5441 + for endianness in '<>': + for dt in np.typecodes['Complex']: + arr = np.array([1 + 3.j, 2 + 2.j, 3 + 1.j], dtype=endianness + dt) + c = arr.copy() + c.sort() + msg = f'byte-swapped complex sort, dtype={dt}' + assert_equal(c, arr, msg) + + @pytest.mark.parametrize('dtype', [np.bytes_, np.str_]) + def test_sort_string(self, dtype): + # np.array will perform the encoding to bytes for us in the bytes test + a = np.array(['aaaaaaaa' + chr(i) for i in range(101)], dtype=dtype) + b = a[::-1].copy() + for kind in self.sort_kinds: + msg = f"kind={kind}" + c = a.copy() + c.sort(kind=kind) + assert_equal(c, a, msg) + c = b.copy() + c.sort(kind=kind) + assert_equal(c, a, msg) + + def test_sort_object(self): + # test object array sorts. + a = np.empty((101,), dtype=object) + a[:] = list(range(101)) + b = a[::-1] + for kind in ['q', 'h', 'm']: + msg = f"kind={kind}" + c = a.copy() + c.sort(kind=kind) + assert_equal(c, a, msg) + c = b.copy() + c.sort(kind=kind) + assert_equal(c, a, msg) + + @pytest.mark.parametrize("dt", [ + np.dtype([('f', float), ('i', int)]), + np.dtype([('f', float), ('i', object)])]) + @pytest.mark.parametrize("step", [1, 2]) + def test_sort_structured(self, dt, step): + # test record array sorts. + a = np.array([(i, i) for i in range(101 * step)], dtype=dt) + b = a[::-1] + for kind in ['q', 'h', 'm']: + msg = f"kind={kind}" + c = a.copy()[::step] + indx = c.argsort(kind=kind) + c.sort(kind=kind) + assert_equal(c, a[::step], msg) + assert_equal(a[::step][indx], a[::step], msg) + c = b.copy()[::step] + indx = c.argsort(kind=kind) + c.sort(kind=kind) + assert_equal(c, a[step - 1::step], msg) + assert_equal(b[::step][indx], a[step - 1::step], msg) + + @pytest.mark.parametrize('dtype', ['datetime64[D]', 'timedelta64[D]']) + def test_sort_time(self, dtype): + # test datetime64 and timedelta64 sorts. + a = np.arange(0, 101, dtype=dtype) + b = a[::-1] + for kind in ['q', 'h', 'm']: + msg = f"kind={kind}" + c = a.copy() + c.sort(kind=kind) + assert_equal(c, a, msg) + c = b.copy() + c.sort(kind=kind) + assert_equal(c, a, msg) + + def test_sort_axis(self): + # check axis handling. This should be the same for all type + # specific sorts, so we only check it for one type and one kind + a = np.array([[3, 2], [1, 0]]) + b = np.array([[1, 0], [3, 2]]) + c = np.array([[2, 3], [0, 1]]) + d = a.copy() + d.sort(axis=0) + assert_equal(d, b, "test sort with axis=0") + d = a.copy() + d.sort(axis=1) + assert_equal(d, c, "test sort with axis=1") + d = a.copy() + d.sort() + assert_equal(d, c, "test sort with default axis") + + def test_sort_size_0(self): + # check axis handling for multidimensional empty arrays + a = np.array([]) + a.shape = (3, 2, 1, 0) + for axis in range(-a.ndim, a.ndim): + msg = f'test empty array sort with axis={axis}' + assert_equal(np.sort(a, axis=axis), a, msg) + msg = 'test empty array sort with axis=None' + assert_equal(np.sort(a, axis=None), a.ravel(), msg) + + def test_sort_bad_ordering(self): + # test generic class with bogus ordering, + # should not segfault. + class Boom: + def __lt__(self, other): + return True + + a = np.array([Boom()] * 100, dtype=object) + for kind in self.sort_kinds: + msg = f"kind={kind}" + c = a.copy() + c.sort(kind=kind) + assert_equal(c, a, msg) + + def test_void_sort(self): + # gh-8210 - previously segfaulted + for i in range(4): + rand = np.random.randint(256, size=4000, dtype=np.uint8) + arr = rand.view('V4') + arr[::-1].sort() + + dt = np.dtype([('val', 'i4', (1,))]) + for i in range(4): + rand = np.random.randint(256, size=4000, dtype=np.uint8) + arr = rand.view(dt) + arr[::-1].sort() + + def test_sort_raises(self): + # gh-9404 + arr = np.array([0, datetime.now(), 1], dtype=object) + for kind in self.sort_kinds: + assert_raises(TypeError, arr.sort, kind=kind) + # gh-3879 + + class Raiser: + def raises_anything(*args, **kwargs): + raise TypeError("SOMETHING ERRORED") + __eq__ = __ne__ = __lt__ = __gt__ = __ge__ = __le__ = raises_anything + arr = np.array([[Raiser(), n] for n in range(10)]).reshape(-1) + np.random.shuffle(arr) + for kind in self.sort_kinds: + assert_raises(TypeError, arr.sort, kind=kind) + + def test_sort_degraded(self): + # test degraded dataset would take minutes to run with normal qsort + d = np.arange(1000000) + do = d.copy() + x = d + # create a median of 3 killer where each median is the sorted second + # last element of the quicksort partition + while x.size > 3: + mid = x.size // 2 + x[mid], x[-2] = x[-2], x[mid] + x = x[:-2] + + assert_equal(np.sort(d), do) + assert_equal(d[np.argsort(d)], do) + + def test_copy(self): + def assert_fortran(arr): + assert_(arr.flags.fortran) + assert_(arr.flags.f_contiguous) + assert_(not arr.flags.c_contiguous) + + def assert_c(arr): + assert_(not arr.flags.fortran) + assert_(not arr.flags.f_contiguous) + assert_(arr.flags.c_contiguous) + + a = np.empty((2, 2), order='F') + # Test copying a Fortran array + assert_c(a.copy()) + assert_c(a.copy('C')) + assert_fortran(a.copy('F')) + assert_fortran(a.copy('A')) + + # Now test starting with a C array. + a = np.empty((2, 2), order='C') + assert_c(a.copy()) + assert_c(a.copy('C')) + assert_fortran(a.copy('F')) + assert_c(a.copy('A')) + + @pytest.mark.parametrize("dtype", ['O', np.int32, 'i,O']) + def test__deepcopy__(self, dtype): + # Force the entry of NULLs into array + a = np.empty(4, dtype=dtype) + ctypes.memset(a.ctypes.data, 0, a.nbytes) + + # Ensure no error is raised, see gh-21833 + b = a.__deepcopy__({}) + + a[0] = 42 + with pytest.raises(AssertionError): + assert_array_equal(a, b) + + def test__deepcopy__catches_failure(self): + class MyObj: + def __deepcopy__(self, *args, **kwargs): + raise RuntimeError + + arr = np.array([1, MyObj(), 3], dtype='O') + with pytest.raises(RuntimeError): + arr.__deepcopy__({}) + + def test_sort_order(self): + # Test sorting an array with fields + x1 = np.array([21, 32, 14]) + x2 = np.array(['my', 'first', 'name']) + x3 = np.array([3.1, 4.5, 6.2]) + r = np.rec.fromarrays([x1, x2, x3], names='id,word,number') + + r.sort(order=['id']) + assert_equal(r.id, np.array([14, 21, 32])) + assert_equal(r.word, np.array(['name', 'my', 'first'])) + assert_equal(r.number, np.array([6.2, 3.1, 4.5])) + + r.sort(order=['word']) + assert_equal(r.id, np.array([32, 21, 14])) + assert_equal(r.word, np.array(['first', 'my', 'name'])) + assert_equal(r.number, np.array([4.5, 3.1, 6.2])) + + r.sort(order=['number']) + assert_equal(r.id, np.array([21, 32, 14])) + assert_equal(r.word, np.array(['my', 'first', 'name'])) + assert_equal(r.number, np.array([3.1, 4.5, 6.2])) + + assert_raises_regex(ValueError, 'duplicate', + lambda: r.sort(order=['id', 'id'])) + + if sys.byteorder == 'little': + strtype = '>i2' + else: + strtype = '': + for dt in np.typecodes['Complex']: + arr = np.array([1 + 3.j, 2 + 2.j, 3 + 1.j], dtype=endianness + dt) + msg = f'byte-swapped complex argsort, dtype={dt}' + assert_equal(arr.argsort(), + np.arange(len(arr), dtype=np.intp), msg) + + # test string argsorts. + s = 'aaaaaaaa' + a = np.array([s + chr(i) for i in range(101)]) + b = a[::-1].copy() + r = np.arange(101) + rr = r[::-1] + for kind in self.sort_kinds: + msg = f"string argsort, kind={kind}" + assert_equal(a.copy().argsort(kind=kind), r, msg) + assert_equal(b.copy().argsort(kind=kind), rr, msg) + + # test unicode argsorts. + s = 'aaaaaaaa' + a = np.array([s + chr(i) for i in range(101)], dtype=np.str_) + b = a[::-1] + r = np.arange(101) + rr = r[::-1] + for kind in self.sort_kinds: + msg = f"unicode argsort, kind={kind}" + assert_equal(a.copy().argsort(kind=kind), r, msg) + assert_equal(b.copy().argsort(kind=kind), rr, msg) + + # test object array argsorts. + a = np.empty((101,), dtype=object) + a[:] = list(range(101)) + b = a[::-1] + r = np.arange(101) + rr = r[::-1] + for kind in self.sort_kinds: + msg = f"object argsort, kind={kind}" + assert_equal(a.copy().argsort(kind=kind), r, msg) + assert_equal(b.copy().argsort(kind=kind), rr, msg) + + # test structured array argsorts. + dt = np.dtype([('f', float), ('i', int)]) + a = np.array([(i, i) for i in range(101)], dtype=dt) + b = a[::-1] + r = np.arange(101) + rr = r[::-1] + for kind in self.sort_kinds: + msg = f"structured array argsort, kind={kind}" + assert_equal(a.copy().argsort(kind=kind), r, msg) + assert_equal(b.copy().argsort(kind=kind), rr, msg) + + # test datetime64 argsorts. + a = np.arange(0, 101, dtype='datetime64[D]') + b = a[::-1] + r = np.arange(101) + rr = r[::-1] + for kind in ['q', 'h', 'm']: + msg = f"datetime64 argsort, kind={kind}" + assert_equal(a.copy().argsort(kind=kind), r, msg) + assert_equal(b.copy().argsort(kind=kind), rr, msg) + + # test timedelta64 argsorts. + a = np.arange(0, 101, dtype='timedelta64[D]') + b = a[::-1] + r = np.arange(101) + rr = r[::-1] + for kind in ['q', 'h', 'm']: + msg = f"timedelta64 argsort, kind={kind}" + assert_equal(a.copy().argsort(kind=kind), r, msg) + assert_equal(b.copy().argsort(kind=kind), rr, msg) + + # check axis handling. This should be the same for all type + # specific argsorts, so we only check it for one type and one kind + a = np.array([[3, 2], [1, 0]]) + b = np.array([[1, 1], [0, 0]]) + c = np.array([[1, 0], [1, 0]]) + assert_equal(a.copy().argsort(axis=0), b) + assert_equal(a.copy().argsort(axis=1), c) + assert_equal(a.copy().argsort(), c) + + # check axis handling for multidimensional empty arrays + a = np.array([]) + a.shape = (3, 2, 1, 0) + for axis in range(-a.ndim, a.ndim): + msg = f'test empty array argsort with axis={axis}' + assert_equal(np.argsort(a, axis=axis), + np.zeros_like(a, dtype=np.intp), msg) + msg = 'test empty array argsort with axis=None' + assert_equal(np.argsort(a, axis=None), + np.zeros_like(a.ravel(), dtype=np.intp), msg) + + # check that stable argsorts are stable + r = np.arange(100) + # scalars + a = np.zeros(100) + assert_equal(a.argsort(kind='m'), r) + # complex + a = np.zeros(100, dtype=complex) + assert_equal(a.argsort(kind='m'), r) + # string + a = np.array(['aaaaaaaaa' for i in range(100)]) + assert_equal(a.argsort(kind='m'), r) + # unicode + a = np.array(['aaaaaaaaa' for i in range(100)], dtype=np.str_) + assert_equal(a.argsort(kind='m'), r) + + with assert_raises_regex( + ValueError, + "kind` and `stable` parameters can't be provided at the same time" + ): + np.argsort(a, kind="stable", stable=True) + + def test_sort_unicode_kind(self): + d = np.arange(10) + k = b'\xc3\xa4'.decode("UTF8") + assert_raises(ValueError, d.sort, kind=k) + assert_raises(ValueError, d.argsort, kind=k) + + @pytest.mark.parametrize('a', [ + np.array([0, 1, np.nan], dtype=np.float16), + np.array([0, 1, np.nan], dtype=np.float32), + np.array([0, 1, np.nan]), + ]) + def test_searchsorted_floats(self, a): + # test for floats arrays containing nans. Explicitly test + # half, single, and double precision floats to verify that + # the NaN-handling is correct. + msg = f"Test real ({a.dtype}) searchsorted with nans, side='l'" + b = a.searchsorted(a, side='left') + assert_equal(b, np.arange(3), msg) + msg = f"Test real ({a.dtype}) searchsorted with nans, side='r'" + b = a.searchsorted(a, side='right') + assert_equal(b, np.arange(1, 4), msg) + # check keyword arguments + a.searchsorted(v=1) + x = np.array([0, 1, np.nan], dtype='float32') + y = np.searchsorted(x, x[-1]) + assert_equal(y, 2) + + def test_searchsorted_complex(self): + # test for complex arrays containing nans. + # The search sorted routines use the compare functions for the + # array type, so this checks if that is consistent with the sort + # order. + # check double complex + a = np.zeros(9, dtype=np.complex128) + a.real += [0, 0, 1, 1, 0, 1, np.nan, np.nan, np.nan] + a.imag += [0, 1, 0, 1, np.nan, np.nan, 0, 1, np.nan] + msg = "Test complex searchsorted with nans, side='l'" + b = a.searchsorted(a, side='left') + assert_equal(b, np.arange(9), msg) + msg = "Test complex searchsorted with nans, side='r'" + b = a.searchsorted(a, side='right') + assert_equal(b, np.arange(1, 10), msg) + msg = "Test searchsorted with little endian, side='l'" + a = np.array([0, 128], dtype=' p[:, i]).all(), + msg="%d: %r < %r" % (i, p[:, i], p[:, i + 1:].T)) + for row in range(p.shape[0]): + self.assert_partitioned(p[row], [i]) + self.assert_partitioned(parg[row], [i]) + + p = np.partition(d0, i, axis=0, kind=k) + parg = d0[np.argpartition(d0, i, axis=0, kind=k), + np.arange(d0.shape[1])[None, :]] + aae(p[i, :], np.array([i] * d1.shape[0], dtype=dt)) + # array_less does not seem to work right + at((p[:i, :] <= p[i, :]).all(), + msg="%d: %r <= %r" % (i, p[i, :], p[:i, :])) + at((p[i + 1:, :] > p[i, :]).all(), + msg="%d: %r < %r" % (i, p[i, :], p[:, i + 1:])) + for col in range(p.shape[1]): + self.assert_partitioned(p[:, col], [i]) + self.assert_partitioned(parg[:, col], [i]) + + # check inplace + dc = d.copy() + dc.partition(i, kind=k) + assert_equal(dc, np.partition(d, i, kind=k)) + dc = d0.copy() + dc.partition(i, axis=0, kind=k) + assert_equal(dc, np.partition(d0, i, axis=0, kind=k)) + dc = d1.copy() + dc.partition(i, axis=1, kind=k) + assert_equal(dc, np.partition(d1, i, axis=1, kind=k)) + + def assert_partitioned(self, d, kth): + prev = 0 + for k in np.sort(kth): + assert_array_compare(operator.__le__, d[prev:k], d[k], + err_msg='kth %d' % k) + assert_((d[k:] >= d[k]).all(), + msg="kth %d, %r not greater equal %r" % (k, d[k:], d[k])) + prev = k + 1 + + def test_partition_iterative(self): + d = np.arange(17) + kth = (0, 1, 2, 429, 231) + assert_raises(ValueError, d.partition, kth) + assert_raises(ValueError, d.argpartition, kth) + d = np.arange(10).reshape((2, 5)) + assert_raises(ValueError, d.partition, kth, axis=0) + assert_raises(ValueError, d.partition, kth, axis=1) + assert_raises(ValueError, np.partition, d, kth, axis=1) + assert_raises(ValueError, np.partition, d, kth, axis=None) + + d = np.array([3, 4, 2, 1]) + p = np.partition(d, (0, 3)) + self.assert_partitioned(p, (0, 3)) + self.assert_partitioned(d[np.argpartition(d, (0, 3))], (0, 3)) + + assert_array_equal(p, np.partition(d, (-3, -1))) + assert_array_equal(p, d[np.argpartition(d, (-3, -1))]) + + d = np.arange(17) + np.random.shuffle(d) + d.partition(range(d.size)) + assert_array_equal(np.arange(17), d) + np.random.shuffle(d) + assert_array_equal(np.arange(17), d[d.argpartition(range(d.size))]) + + # test unsorted kth + d = np.arange(17) + np.random.shuffle(d) + keys = np.array([1, 3, 8, -2]) + np.random.shuffle(d) + p = np.partition(d, keys) + self.assert_partitioned(p, keys) + p = d[np.argpartition(d, keys)] + self.assert_partitioned(p, keys) + np.random.shuffle(keys) + assert_array_equal(np.partition(d, keys), p) + assert_array_equal(d[np.argpartition(d, keys)], p) + + # equal kth + d = np.arange(20)[::-1] + self.assert_partitioned(np.partition(d, [5] * 4), [5]) + self.assert_partitioned(np.partition(d, [5] * 4 + [6, 13]), + [5] * 4 + [6, 13]) + self.assert_partitioned(d[np.argpartition(d, [5] * 4)], [5]) + self.assert_partitioned(d[np.argpartition(d, [5] * 4 + [6, 13])], + [5] * 4 + [6, 13]) + + d = np.arange(12) + np.random.shuffle(d) + d1 = np.tile(np.arange(12), (4, 1)) + map(np.random.shuffle, d1) + d0 = np.transpose(d1) + + kth = (1, 6, 7, -1) + p = np.partition(d1, kth, axis=1) + pa = d1[np.arange(d1.shape[0])[:, None], + d1.argpartition(kth, axis=1)] + assert_array_equal(p, pa) + for i in range(d1.shape[0]): + self.assert_partitioned(p[i, :], kth) + p = np.partition(d0, kth, axis=0) + pa = d0[np.argpartition(d0, kth, axis=0), + np.arange(d0.shape[1])[None, :]] + assert_array_equal(p, pa) + for i in range(d0.shape[1]): + self.assert_partitioned(p[:, i], kth) + + def test_partition_cdtype(self): + d = np.array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41), + ('Lancelot', 1.9, 38)], + dtype=[('name', '|S10'), ('height', ' (numpy ufunc, has_in_place_version, preferred_dtype) + ops = { + 'add': (np.add, True, float), + 'sub': (np.subtract, True, float), + 'mul': (np.multiply, True, float), + 'truediv': (np.true_divide, True, float), + 'floordiv': (np.floor_divide, True, float), + 'mod': (np.remainder, True, float), + 'divmod': (np.divmod, False, float), + 'pow': (np.power, True, int), + 'lshift': (np.left_shift, True, int), + 'rshift': (np.right_shift, True, int), + 'and': (np.bitwise_and, True, int), + 'xor': (np.bitwise_xor, True, int), + 'or': (np.bitwise_or, True, int), + 'matmul': (np.matmul, True, float), + # 'ge': (np.less_equal, False), + # 'gt': (np.less, False), + # 'le': (np.greater_equal, False), + # 'lt': (np.greater, False), + # 'eq': (np.equal, False), + # 'ne': (np.not_equal, False), + } + + class Coerced(Exception): + pass + + def array_impl(self): + raise Coerced + + def op_impl(self, other): + return "forward" + + def rop_impl(self, other): + return "reverse" + + def iop_impl(self, other): + return "in-place" + + def array_ufunc_impl(self, ufunc, method, *args, **kwargs): + return ("__array_ufunc__", ufunc, method, args, kwargs) + + # Create an object with the given base, in the given module, with a + # bunch of placeholder __op__ methods, and optionally a + # __array_ufunc__ and __array_priority__. + def make_obj(base, array_priority=False, array_ufunc=False, + alleged_module="__main__"): + class_namespace = {"__array__": array_impl} + if array_priority is not False: + class_namespace["__array_priority__"] = array_priority + for op in ops: + class_namespace[f"__{op}__"] = op_impl + class_namespace[f"__r{op}__"] = rop_impl + class_namespace[f"__i{op}__"] = iop_impl + if array_ufunc is not False: + class_namespace["__array_ufunc__"] = array_ufunc + eval_namespace = {"base": base, + "class_namespace": class_namespace, + "__name__": alleged_module, + } + MyType = eval("type('MyType', (base,), class_namespace)", + eval_namespace) + if issubclass(MyType, np.ndarray): + # Use this range to avoid special case weirdnesses around + # divide-by-0, pow(x, 2), overflow due to pow(big, big), etc. + return np.arange(3, 7).reshape(2, 2).view(MyType) + else: + return MyType() + + def check(obj, binop_override_expected, ufunc_override_expected, + inplace_override_expected, check_scalar=True): + for op, (ufunc, has_inplace, dtype) in ops.items(): + err_msg = ('op: %s, ufunc: %s, has_inplace: %s, dtype: %s' + % (op, ufunc, has_inplace, dtype)) + check_objs = [np.arange(3, 7, dtype=dtype).reshape(2, 2)] + if check_scalar: + check_objs.append(check_objs[0][0]) + for arr in check_objs: + arr_method = getattr(arr, f"__{op}__") + + def first_out_arg(result): + if op == "divmod": + assert_(isinstance(result, tuple)) + return result[0] + else: + return result + + # arr __op__ obj + if binop_override_expected: + assert_equal(arr_method(obj), NotImplemented, err_msg) + elif ufunc_override_expected: + assert_equal(arr_method(obj)[0], "__array_ufunc__", + err_msg) + elif (isinstance(obj, np.ndarray) and + (type(obj).__array_ufunc__ is + np.ndarray.__array_ufunc__)): + # __array__ gets ignored + res = first_out_arg(arr_method(obj)) + assert_(res.__class__ is obj.__class__, err_msg) + else: + assert_raises((TypeError, Coerced), + arr_method, obj, err_msg=err_msg) + # obj __op__ arr + arr_rmethod = getattr(arr, f"__r{op}__") + if ufunc_override_expected: + res = arr_rmethod(obj) + assert_equal(res[0], "__array_ufunc__", + err_msg=err_msg) + assert_equal(res[1], ufunc, err_msg=err_msg) + elif (isinstance(obj, np.ndarray) and + (type(obj).__array_ufunc__ is + np.ndarray.__array_ufunc__)): + # __array__ gets ignored + res = first_out_arg(arr_rmethod(obj)) + assert_(res.__class__ is obj.__class__, err_msg) + else: + # __array_ufunc__ = "asdf" creates a TypeError + assert_raises((TypeError, Coerced), + arr_rmethod, obj, err_msg=err_msg) + + # arr __iop__ obj + # array scalars don't have in-place operators + if has_inplace and isinstance(arr, np.ndarray): + arr_imethod = getattr(arr, f"__i{op}__") + if inplace_override_expected: + assert_equal(arr_method(obj), NotImplemented, + err_msg=err_msg) + elif ufunc_override_expected: + res = arr_imethod(obj) + assert_equal(res[0], "__array_ufunc__", err_msg) + assert_equal(res[1], ufunc, err_msg) + assert_(type(res[-1]["out"]) is tuple, err_msg) + assert_(res[-1]["out"][0] is arr, err_msg) + elif (isinstance(obj, np.ndarray) and + (type(obj).__array_ufunc__ is + np.ndarray.__array_ufunc__)): + # __array__ gets ignored + assert_(arr_imethod(obj) is arr, err_msg) + else: + assert_raises((TypeError, Coerced), + arr_imethod, obj, + err_msg=err_msg) + + op_fn = getattr(operator, op, None) + if op_fn is None: + op_fn = getattr(operator, op + "_", None) + if op_fn is None: + op_fn = getattr(builtins, op) + assert_equal(op_fn(obj, arr), "forward", err_msg) + if not isinstance(obj, np.ndarray): + if binop_override_expected: + assert_equal(op_fn(arr, obj), "reverse", err_msg) + elif ufunc_override_expected: + assert_equal(op_fn(arr, obj)[0], "__array_ufunc__", + err_msg) + if ufunc_override_expected: + assert_equal(ufunc(obj, arr)[0], "__array_ufunc__", + err_msg) + + # No array priority, no array_ufunc -> nothing called + check(make_obj(object), False, False, False) + # Negative array priority, no array_ufunc -> nothing called + # (has to be very negative, because scalar priority is -1000000.0) + check(make_obj(object, array_priority=-2**30), False, False, False) + # Positive array priority, no array_ufunc -> binops and iops only + check(make_obj(object, array_priority=1), True, False, True) + # ndarray ignores array_priority for ndarray subclasses + check(make_obj(np.ndarray, array_priority=1), False, False, False, + check_scalar=False) + # Positive array_priority and array_ufunc -> array_ufunc only + check(make_obj(object, array_priority=1, + array_ufunc=array_ufunc_impl), False, True, False) + check(make_obj(np.ndarray, array_priority=1, + array_ufunc=array_ufunc_impl), False, True, False) + # array_ufunc set to None -> defer binops only + check(make_obj(object, array_ufunc=None), True, False, False) + check(make_obj(np.ndarray, array_ufunc=None), True, False, False, + check_scalar=False) + + @pytest.mark.parametrize("priority", [None, "runtime error"]) + def test_ufunc_binop_bad_array_priority(self, priority): + # Mainly checks that this does not crash. The second array has a lower + # priority than -1 ("error value"). If the __radd__ actually exists, + # bad things can happen (I think via the scalar paths). + # In principle both of these can probably just be errors in the future. + class BadPriority: + @property + def __array_priority__(self): + if priority == "runtime error": + raise RuntimeError("RuntimeError in __array_priority__!") + return priority + + def __radd__(self, other): + return "result" + + class LowPriority(np.ndarray): + __array_priority__ = -1000 + + # Priority failure uses the same as scalars (smaller -1000). So the + # LowPriority wins with 'result' for each element (inner operation). + res = np.arange(3).view(LowPriority) + BadPriority() + assert res.shape == (3,) + assert res[0] == 'result' + + @pytest.mark.parametrize("scalar", [ + np.longdouble(1), np.timedelta64(120, 'm')]) + @pytest.mark.parametrize("op", [operator.add, operator.xor]) + def test_scalar_binop_guarantees_ufunc(self, scalar, op): + # Test that __array_ufunc__ will always cause ufunc use even when + # we have to protect some other calls from recursing (see gh-26904). + class SomeClass: + def __array_ufunc__(self, ufunc, method, *inputs, **kw): + return "result" + + assert SomeClass() + scalar == "result" + assert scalar + SomeClass() == "result" + + def test_ufunc_override_normalize_signature(self): + # gh-5674 + class SomeClass: + def __array_ufunc__(self, ufunc, method, *inputs, **kw): + return kw + + a = SomeClass() + kw = np.add(a, [1]) + assert_('sig' not in kw and 'signature' not in kw) + kw = np.add(a, [1], sig='ii->i') + assert_('sig' not in kw and 'signature' in kw) + assert_equal(kw['signature'], 'ii->i') + kw = np.add(a, [1], signature='ii->i') + assert_('sig' not in kw and 'signature' in kw) + assert_equal(kw['signature'], 'ii->i') + + def test_array_ufunc_index(self): + # Check that index is set appropriately, also if only an output + # is passed on (latter is another regression tests for github bug 4753) + # This also checks implicitly that 'out' is always a tuple. + class CheckIndex: + def __array_ufunc__(self, ufunc, method, *inputs, **kw): + for i, a in enumerate(inputs): + if a is self: + return i + # calls below mean we must be in an output. + for j, a in enumerate(kw['out']): + if a is self: + return (j,) + + a = CheckIndex() + dummy = np.arange(2.) + # 1 input, 1 output + assert_equal(np.sin(a), 0) + assert_equal(np.sin(dummy, a), (0,)) + assert_equal(np.sin(dummy, out=a), (0,)) + assert_equal(np.sin(dummy, out=(a,)), (0,)) + assert_equal(np.sin(a, a), 0) + assert_equal(np.sin(a, out=a), 0) + assert_equal(np.sin(a, out=(a,)), 0) + # 1 input, 2 outputs + assert_equal(np.modf(dummy, a), (0,)) + assert_equal(np.modf(dummy, None, a), (1,)) + assert_equal(np.modf(dummy, dummy, a), (1,)) + assert_equal(np.modf(dummy, out=(a, None)), (0,)) + assert_equal(np.modf(dummy, out=(a, dummy)), (0,)) + assert_equal(np.modf(dummy, out=(None, a)), (1,)) + assert_equal(np.modf(dummy, out=(dummy, a)), (1,)) + assert_equal(np.modf(a, out=(dummy, a)), 0) + with assert_raises(TypeError): + # Out argument must be tuple, since there are multiple outputs + np.modf(dummy, out=a) + + assert_raises(ValueError, np.modf, dummy, out=(a,)) + + # 2 inputs, 1 output + assert_equal(np.add(a, dummy), 0) + assert_equal(np.add(dummy, a), 1) + assert_equal(np.add(dummy, dummy, a), (0,)) + assert_equal(np.add(dummy, a, a), 1) + assert_equal(np.add(dummy, dummy, out=a), (0,)) + assert_equal(np.add(dummy, dummy, out=(a,)), (0,)) + assert_equal(np.add(a, dummy, out=a), 0) + + def test_out_override(self): + # regression test for github bug 4753 + class OutClass(np.ndarray): + def __array_ufunc__(self, ufunc, method, *inputs, **kw): + if 'out' in kw: + tmp_kw = kw.copy() + tmp_kw.pop('out') + func = getattr(ufunc, method) + kw['out'][0][...] = func(*inputs, **tmp_kw) + + A = np.array([0]).view(OutClass) + B = np.array([5]) + C = np.array([6]) + np.multiply(C, B, A) + assert_equal(A[0], 30) + assert_(isinstance(A, OutClass)) + A[0] = 0 + np.multiply(C, B, out=A) + assert_equal(A[0], 30) + assert_(isinstance(A, OutClass)) + + def test_pow_array_object_dtype(self): + # test pow on arrays of object dtype + class SomeClass: + def __init__(self, num=None): + self.num = num + + # want to ensure a fast pow path is not taken + def __mul__(self, other): + raise AssertionError('__mul__ should not be called') + + def __truediv__(self, other): + raise AssertionError('__truediv__ should not be called') + + def __pow__(self, exp): + return SomeClass(num=self.num ** exp) + + def __eq__(self, other): + if isinstance(other, SomeClass): + return self.num == other.num + + __rpow__ = __pow__ + + def pow_for(exp, arr): + return np.array([x ** exp for x in arr]) + + obj_arr = np.array([SomeClass(1), SomeClass(2), SomeClass(3)]) + + assert_equal(obj_arr ** 0.5, pow_for(0.5, obj_arr)) + assert_equal(obj_arr ** 0, pow_for(0, obj_arr)) + assert_equal(obj_arr ** 1, pow_for(1, obj_arr)) + assert_equal(obj_arr ** -1, pow_for(-1, obj_arr)) + assert_equal(obj_arr ** 2, pow_for(2, obj_arr)) + + def test_pow_calls_square_structured_dtype(self): + # gh-29388 + dt = np.dtype([('a', 'i4'), ('b', 'i4')]) + a = np.array([(1, 2), (3, 4)], dtype=dt) + with pytest.raises(TypeError, match="ufunc 'square' not supported"): + a ** 2 + + def test_pos_array_ufunc_override(self): + class A(np.ndarray): + def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): + return getattr(ufunc, method)(*[i.view(np.ndarray) for + i in inputs], **kwargs) + tst = np.array('foo').view(A) + with assert_raises(TypeError): + +tst + + +class TestTemporaryElide: + # elision is only triggered on relatively large arrays + + def test_extension_incref_elide(self): + # test extension (e.g. cython) calling PyNumber_* slots without + # increasing the reference counts + # + # def incref_elide(a): + # d = input.copy() # refcount 1 + # return d, d + d # PyNumber_Add without increasing refcount + from numpy._core._multiarray_tests import incref_elide + d = np.ones(100000) + orig, res = incref_elide(d) + d + d + # the return original should not be changed to an inplace operation + assert_array_equal(orig, d) + assert_array_equal(res, d + d) + + def test_extension_incref_elide_stack(self): + # scanning if the refcount == 1 object is on the python stack to check + # that we are called directly from python is flawed as object may still + # be above the stack pointer and we have no access to the top of it + # + # def incref_elide_l(d): + # return l[4] + l[4] # PyNumber_Add without increasing refcount + from numpy._core._multiarray_tests import incref_elide_l + # padding with 1 makes sure the object on the stack is not overwritten + l = [1, 1, 1, 1, np.ones(100000)] + res = incref_elide_l(l) + # the return original should not be changed to an inplace operation + assert_array_equal(l[4], np.ones(100000)) + assert_array_equal(res, l[4] + l[4]) + + def test_temporary_with_cast(self): + # check that we don't elide into a temporary which would need casting + d = np.ones(200000, dtype=np.int64) + r = ((d + d) + np.array(2**222, dtype='O')) + assert_equal(r.dtype, np.dtype('O')) + + r = ((d + d) / 2) + assert_equal(r.dtype, np.dtype('f8')) + + r = np.true_divide((d + d), 2) + assert_equal(r.dtype, np.dtype('f8')) + + r = ((d + d) / 2.) + assert_equal(r.dtype, np.dtype('f8')) + + r = ((d + d) // 2) + assert_equal(r.dtype, np.dtype(np.int64)) + + # commutative elision into the astype result + f = np.ones(100000, dtype=np.float32) + assert_equal(((f + f) + f.astype(np.float64)).dtype, np.dtype('f8')) + + # no elision into lower type + d = f.astype(np.float64) + assert_equal(((f + f) + d).dtype, d.dtype) + l = np.ones(100000, dtype=np.longdouble) + assert_equal(((d + d) + l).dtype, l.dtype) + + # test unary abs with different output dtype + for dt in (np.complex64, np.complex128, np.clongdouble): + c = np.ones(100000, dtype=dt) + r = abs(c * 2.0) + assert_equal(r.dtype, np.dtype('f%d' % (c.itemsize // 2))) + + def test_elide_broadcast(self): + # test no elision on broadcast to higher dimension + # only triggers elision code path in debug mode as triggering it in + # normal mode needs 256kb large matching dimension, so a lot of memory + d = np.ones((2000, 1), dtype=int) + b = np.ones((2000), dtype=bool) + r = (1 - d) + b + assert_equal(r, 1) + assert_equal(r.shape, (2000, 2000)) + + def test_elide_scalar(self): + # check inplace op does not create ndarray from scalars + a = np.bool() + assert_(type(~(a & a)) is np.bool) + + def test_elide_scalar_readonly(self): + # The imaginary part of a real array is readonly. This needs to go + # through fast_scalar_power which is only called for powers of + # +1, -1, 0, 0.5, and 2, so use 2. Also need valid refcount for + # elision which can be gotten for the imaginary part of a real + # array. Should not error. + a = np.empty(100000, dtype=np.float64) + a.imag ** 2 + + def test_elide_readonly(self): + # don't try to elide readonly temporaries + r = np.asarray(np.broadcast_to(np.zeros(1), 100000).flat) * 0.0 + assert_equal(r, 0) + + def test_elide_updateifcopy(self): + a = np.ones(2**20)[::2] + b = a.flat.__array__() + 1 + del b + assert_equal(a, 1) + + +class TestCAPI: + def test_IsPythonScalar(self): + from numpy._core._multiarray_tests import IsPythonScalar + assert_(IsPythonScalar(b'foobar')) + assert_(IsPythonScalar(1)) + assert_(IsPythonScalar(2**80)) + assert_(IsPythonScalar(2.)) + assert_(IsPythonScalar("a")) + + @pytest.mark.parametrize("converter", + [_multiarray_tests.run_scalar_intp_converter, + _multiarray_tests.run_scalar_intp_from_sequence]) + def test_intp_sequence_converters(self, converter): + # Test simple values (-1 is special for error return paths) + assert converter(10) == (10,) + assert converter(-1) == (-1,) + # A 0-D array looks a bit like a sequence but must take the integer + # path: + assert converter(np.array(123)) == (123,) + # Test simple sequences (intp_from_sequence only supports length 1): + assert converter((10,)) == (10,) + assert converter(np.array([11])) == (11,) + + @pytest.mark.parametrize("converter", + [_multiarray_tests.run_scalar_intp_converter, + _multiarray_tests.run_scalar_intp_from_sequence]) + @pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), + reason="PyPy bug in error formatting") + def test_intp_sequence_converters_errors(self, converter): + with pytest.raises(TypeError, + match="expected a sequence of integers or a single integer, "): + converter(object()) + with pytest.raises(TypeError, + match="expected a sequence of integers or a single integer, " + "got '32.0'"): + converter(32.) + with pytest.raises(TypeError, + match="'float' object cannot be interpreted as an integer"): + converter([32.]) + with pytest.raises(ValueError, + match="Maximum allowed dimension"): + # These converters currently convert overflows to a ValueError + converter(2**64) + + +class TestSubscripting: + def test_test_zero_rank(self): + x = np.array([1, 2, 3]) + assert_(isinstance(x[0], np.int_)) + assert_(type(x[0, ...]) is np.ndarray) + + +class TestPickling: + @pytest.mark.skipif(pickle.HIGHEST_PROTOCOL >= 5, + reason=('this tests the error messages when trying to' + 'protocol 5 although it is not available')) + def test_correct_protocol5_error_message(self): + array = np.arange(10) + + def test_record_array_with_object_dtype(self): + my_object = object() + + arr_with_object = np.array( + [(my_object, 1, 2.0)], + dtype=[('a', object), ('b', int), ('c', float)]) + arr_without_object = np.array( + [('xxx', 1, 2.0)], + dtype=[('a', str), ('b', int), ('c', float)]) + + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + depickled_arr_with_object = pickle.loads( + pickle.dumps(arr_with_object, protocol=proto)) + depickled_arr_without_object = pickle.loads( + pickle.dumps(arr_without_object, protocol=proto)) + + assert_equal(arr_with_object.dtype, + depickled_arr_with_object.dtype) + assert_equal(arr_without_object.dtype, + depickled_arr_without_object.dtype) + + @pytest.mark.skipif(pickle.HIGHEST_PROTOCOL < 5, + reason="requires pickle protocol 5") + def test_f_contiguous_array(self): + f_contiguous_array = np.array([[1, 2, 3], [4, 5, 6]], order='F') + buffers = [] + + # When using pickle protocol 5, Fortran-contiguous arrays can be + # serialized using out-of-band buffers + bytes_string = pickle.dumps(f_contiguous_array, protocol=5, + buffer_callback=buffers.append) + + assert len(buffers) > 0 + + depickled_f_contiguous_array = pickle.loads(bytes_string, + buffers=buffers) + + assert_equal(f_contiguous_array, depickled_f_contiguous_array) + + @pytest.mark.skipif(pickle.HIGHEST_PROTOCOL < 5, reason="requires pickle protocol 5") + @pytest.mark.parametrize('transposed_contiguous_array', + [np.random.default_rng(42).random((2, 3, 4)).transpose((1, 0, 2)), + np.random.default_rng(42).random((2, 3, 4, 5)).transpose((1, 3, 0, 2))] + + [np.random.default_rng(42).random(np.arange(2, 7)).transpose(np.random.permutation(5)) for _ in range(3)]) + def test_transposed_contiguous_array(self, transposed_contiguous_array): + buffers = [] + # When using pickle protocol 5, arrays which can be transposed to c_contiguous + # can be serialized using out-of-band buffers + bytes_string = pickle.dumps(transposed_contiguous_array, protocol=5, + buffer_callback=buffers.append) + + assert len(buffers) > 0 + + depickled_transposed_contiguous_array = pickle.loads(bytes_string, + buffers=buffers) + + assert_equal(transposed_contiguous_array, depickled_transposed_contiguous_array) + + @pytest.mark.skipif(pickle.HIGHEST_PROTOCOL < 5, reason="requires pickle protocol 5") + def test_load_legacy_pkl_protocol5(self): + # legacy byte strs are dumped in 2.2.1 + c_contiguous_dumped = b'\x80\x05\x95\x90\x00\x00\x00\x00\x00\x00\x00\x8c\x13numpy._core.numeric\x94\x8c\x0b_frombuffer\x94\x93\x94(\x96\x18\x00\x00\x00\x00\x00\x00\x00\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x94\x8c\x05numpy\x94\x8c\x05dtype\x94\x93\x94\x8c\x02u1\x94\x89\x88\x87\x94R\x94(K\x03\x8c\x01|\x94NNNJ\xff\xff\xff\xffJ\xff\xff\xff\xffK\x00t\x94bK\x03K\x04K\x02\x87\x94\x8c\x01C\x94t\x94R\x94.' # noqa: E501 + f_contiguous_dumped = b'\x80\x05\x95\x90\x00\x00\x00\x00\x00\x00\x00\x8c\x13numpy._core.numeric\x94\x8c\x0b_frombuffer\x94\x93\x94(\x96\x18\x00\x00\x00\x00\x00\x00\x00\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x94\x8c\x05numpy\x94\x8c\x05dtype\x94\x93\x94\x8c\x02u1\x94\x89\x88\x87\x94R\x94(K\x03\x8c\x01|\x94NNNJ\xff\xff\xff\xffJ\xff\xff\xff\xffK\x00t\x94bK\x03K\x04K\x02\x87\x94\x8c\x01F\x94t\x94R\x94.' # noqa: E501 + transposed_contiguous_dumped = b'\x80\x05\x95\xa5\x00\x00\x00\x00\x00\x00\x00\x8c\x16numpy._core.multiarray\x94\x8c\x0c_reconstruct\x94\x93\x94\x8c\x05numpy\x94\x8c\x07ndarray\x94\x93\x94K\x00\x85\x94C\x01b\x94\x87\x94R\x94(K\x01K\x04K\x03K\x02\x87\x94h\x03\x8c\x05dtype\x94\x93\x94\x8c\x02u1\x94\x89\x88\x87\x94R\x94(K\x03\x8c\x01|\x94NNNJ\xff\xff\xff\xffJ\xff\xff\xff\xffK\x00t\x94b\x89C\x18\x00\x01\x08\t\x10\x11\x02\x03\n\x0b\x12\x13\x04\x05\x0c\r\x14\x15\x06\x07\x0e\x0f\x16\x17\x94t\x94b.' # noqa: E501 + no_contiguous_dumped = b'\x80\x05\x95\x91\x00\x00\x00\x00\x00\x00\x00\x8c\x16numpy._core.multiarray\x94\x8c\x0c_reconstruct\x94\x93\x94\x8c\x05numpy\x94\x8c\x07ndarray\x94\x93\x94K\x00\x85\x94C\x01b\x94\x87\x94R\x94(K\x01K\x03K\x02\x86\x94h\x03\x8c\x05dtype\x94\x93\x94\x8c\x02u1\x94\x89\x88\x87\x94R\x94(K\x03\x8c\x01|\x94NNNJ\xff\xff\xff\xffJ\xff\xff\xff\xffK\x00t\x94b\x89C\x06\x00\x01\x04\x05\x08\t\x94t\x94b.' # noqa: E501 + x = np.arange(24, dtype='uint8').reshape(3, 4, 2) + assert_equal(x, pickle.loads(c_contiguous_dumped)) + x = np.arange(24, dtype='uint8').reshape(3, 4, 2, order='F') + assert_equal(x, pickle.loads(f_contiguous_dumped)) + x = np.arange(24, dtype='uint8').reshape(3, 4, 2).transpose((1, 0, 2)) + assert_equal(x, pickle.loads(transposed_contiguous_dumped)) + x = np.arange(12, dtype='uint8').reshape(3, 4)[:, :2] + assert_equal(x, pickle.loads(no_contiguous_dumped)) + + def test_non_contiguous_array(self): + non_contiguous_array = np.arange(12).reshape(3, 4)[:, :2] + assert not non_contiguous_array.flags.c_contiguous + assert not non_contiguous_array.flags.f_contiguous + + # make sure non-contiguous arrays can be pickled-depickled + # using any protocol + buffers = [] + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + depickled_non_contiguous_array = pickle.loads( + pickle.dumps(non_contiguous_array, protocol=proto, + buffer_callback=buffers.append if proto >= 5 else None)) + + assert_equal(len(buffers), 0) + assert_equal(non_contiguous_array, depickled_non_contiguous_array) + + def test_roundtrip(self): + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + carray = np.array([[2, 9], [7, 0], [3, 8]]) + DATA = [ + carray, + np.transpose(carray), + np.array([('xxx', 1, 2.0)], dtype=[('a', (str, 3)), ('b', int), + ('c', float)]) + ] + + refs = [weakref.ref(a) for a in DATA] + for a in DATA: + assert_equal( + a, pickle.loads(pickle.dumps(a, protocol=proto)), + err_msg=f"{a!r}") + del a, DATA, carray + break_cycles() + # check for reference leaks (gh-12793) + for ref in refs: + assert ref() is None + + def _loads(self, obj): + return pickle.loads(obj, encoding='latin1') + + # version 0 pickles, using protocol=2 to pickle + # version 0 doesn't have a version field + def test_version0_int8(self): + s = b"\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb." + a = np.array([1, 2, 3, 4], dtype=np.int8) + p = self._loads(s) + assert_equal(a, p) + + def test_version0_float32(self): + s = b"\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(U\x01= g2, [g1[i] >= g2[i] for i in [0, 1, 2]]) + assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]]) + assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]]) + + def test_mixed(self): + g1 = np.array(["spam", "spa", "spammer", "and eggs"]) + g2 = "spam" + assert_array_equal(g1 == g2, [x == g2 for x in g1]) + assert_array_equal(g1 != g2, [x != g2 for x in g1]) + assert_array_equal(g1 < g2, [x < g2 for x in g1]) + assert_array_equal(g1 > g2, [x > g2 for x in g1]) + assert_array_equal(g1 <= g2, [x <= g2 for x in g1]) + assert_array_equal(g1 >= g2, [x >= g2 for x in g1]) + + def test_unicode(self): + g1 = np.array(["This", "is", "example"]) + g2 = np.array(["This", "was", "example"]) + assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0, 1, 2]]) + assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0, 1, 2]]) + assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0, 1, 2]]) + assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0, 1, 2]]) + assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]]) + assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]]) + +class TestArgmaxArgminCommon: + + sizes = [(), (3,), (3, 2), (2, 3), + (3, 3), (2, 3, 4), (4, 3, 2), + (1, 2, 3, 4), (2, 3, 4, 1), + (3, 4, 1, 2), (4, 1, 2, 3), + (64,), (128,), (256,)] + + @pytest.mark.parametrize("size, axis", itertools.chain(*[[(size, axis) + for axis in list(range(-len(size), len(size))) + [None]] + for size in sizes])) + @pytest.mark.parametrize('method', [np.argmax, np.argmin]) + def test_np_argmin_argmax_keepdims(self, size, axis, method): + + arr = np.random.normal(size=size) + + # contiguous arrays + if axis is None: + new_shape = [1 for _ in range(len(size))] + else: + new_shape = list(size) + new_shape[axis] = 1 + new_shape = tuple(new_shape) + + _res_orig = method(arr, axis=axis) + res_orig = _res_orig.reshape(new_shape) + res = method(arr, axis=axis, keepdims=True) + assert_equal(res, res_orig) + assert_(res.shape == new_shape) + outarray = np.empty(res.shape, dtype=res.dtype) + res1 = method(arr, axis=axis, out=outarray, + keepdims=True) + assert_(res1 is outarray) + assert_equal(res, outarray) + + if len(size) > 0: + wrong_shape = list(new_shape) + if axis is not None: + wrong_shape[axis] = 2 + else: + wrong_shape[0] = 2 + wrong_outarray = np.empty(wrong_shape, dtype=res.dtype) + with pytest.raises(ValueError): + method(arr.T, axis=axis, + out=wrong_outarray, keepdims=True) + + # non-contiguous arrays + if axis is None: + new_shape = [1 for _ in range(len(size))] + else: + new_shape = list(size)[::-1] + new_shape[axis] = 1 + new_shape = tuple(new_shape) + + _res_orig = method(arr.T, axis=axis) + res_orig = _res_orig.reshape(new_shape) + res = method(arr.T, axis=axis, keepdims=True) + assert_equal(res, res_orig) + assert_(res.shape == new_shape) + outarray = np.empty(new_shape[::-1], dtype=res.dtype) + outarray = outarray.T + res1 = method(arr.T, axis=axis, out=outarray, + keepdims=True) + assert_(res1 is outarray) + assert_equal(res, outarray) + + if len(size) > 0: + # one dimension lesser for non-zero sized + # array should raise an error + with pytest.raises(ValueError): + method(arr[0], axis=axis, + out=outarray, keepdims=True) + + if len(size) > 0: + wrong_shape = list(new_shape) + if axis is not None: + wrong_shape[axis] = 2 + else: + wrong_shape[0] = 2 + wrong_outarray = np.empty(wrong_shape, dtype=res.dtype) + with pytest.raises(ValueError): + method(arr.T, axis=axis, + out=wrong_outarray, keepdims=True) + + @pytest.mark.parametrize('method', ['max', 'min']) + def test_all(self, method): + a = np.random.normal(0, 1, (4, 5, 6, 7, 8)) + arg_method = getattr(a, 'arg' + method) + val_method = getattr(a, method) + for i in range(a.ndim): + a_maxmin = val_method(i) + aarg_maxmin = arg_method(i) + axes = list(range(a.ndim)) + axes.remove(i) + assert_(np.all(a_maxmin == aarg_maxmin.choose( + *a.transpose(i, *axes)))) + + @pytest.mark.parametrize('method', ['argmax', 'argmin']) + def test_output_shape(self, method): + # see also gh-616 + a = np.ones((10, 5)) + arg_method = getattr(a, method) + # Check some simple shape mismatches + out = np.ones(11, dtype=np.int_) + assert_raises(ValueError, arg_method, -1, out) + + out = np.ones((2, 5), dtype=np.int_) + assert_raises(ValueError, arg_method, -1, out) + + # these could be relaxed possibly (used to allow even the previous) + out = np.ones((1, 10), dtype=np.int_) + assert_raises(ValueError, arg_method, -1, out) + + out = np.ones(10, dtype=np.int_) + arg_method(-1, out=out) + assert_equal(out, arg_method(-1)) + + @pytest.mark.parametrize('ndim', [0, 1]) + @pytest.mark.parametrize('method', ['argmax', 'argmin']) + def test_ret_is_out(self, ndim, method): + a = np.ones((4,) + (256,) * ndim) + arg_method = getattr(a, method) + out = np.empty((256,) * ndim, dtype=np.intp) + ret = arg_method(axis=0, out=out) + assert ret is out + + @pytest.mark.parametrize('np_array, method, idx, val', + [(np.zeros, 'argmax', 5942, "as"), + (np.ones, 'argmin', 6001, "0")]) + def test_unicode(self, np_array, method, idx, val): + d = np_array(6031, dtype='= cmin)) + assert_(np.all(x <= cmax)) + + def _clip_type(self, type_group, array_max, + clip_min, clip_max, inplace=False, + expected_min=None, expected_max=None): + if expected_min is None: + expected_min = clip_min + if expected_max is None: + expected_max = clip_max + + for T in np._core.sctypes[type_group]: + if sys.byteorder == 'little': + byte_orders = ['=', '>'] + else: + byte_orders = ['<', '='] + + for byteorder in byte_orders: + dtype = np.dtype(T).newbyteorder(byteorder) + + x = (np.random.random(1000) * array_max).astype(dtype) + if inplace: + # The tests that call us pass clip_min and clip_max that + # might not fit in the destination dtype. They were written + # assuming the previous unsafe casting, which now must be + # passed explicitly to avoid a warning. + x.clip(clip_min, clip_max, x, casting='unsafe') + else: + x = x.clip(clip_min, clip_max) + byteorder = '=' + + if x.dtype.byteorder == '|': + byteorder = '|' + assert_equal(x.dtype.byteorder, byteorder) + self._check_range(x, expected_min, expected_max) + return x + + def test_basic(self): + for inplace in [False, True]: + self._clip_type( + 'float', 1024, -12.8, 100.2, inplace=inplace) + self._clip_type( + 'float', 1024, 0, 0, inplace=inplace) + + self._clip_type( + 'int', 1024, -120, 100, inplace=inplace) + self._clip_type( + 'int', 1024, 0, 0, inplace=inplace) + + self._clip_type( + 'uint', 1024, 0, 0, inplace=inplace) + self._clip_type( + 'uint', 1024, 10, 100, inplace=inplace) + + @pytest.mark.parametrize("inplace", [False, True]) + def test_int_out_of_range(self, inplace): + # Simple check for out-of-bound integers, also testing the in-place + # path. + x = (np.random.random(1000) * 255).astype("uint8") + out = np.empty_like(x) + res = x.clip(-1, 300, out=out if inplace else None) + assert res is out or not inplace + assert (res == x).all() + + res = x.clip(-1, 50, out=out if inplace else None) + assert res is out or not inplace + assert (res <= 50).all() + assert (res[x <= 50] == x[x <= 50]).all() + + res = x.clip(100, 1000, out=out if inplace else None) + assert res is out or not inplace + assert (res >= 100).all() + assert (res[x >= 100] == x[x >= 100]).all() + + def test_record_array(self): + rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)], + dtype=[('x', '= 3)) + x = val.clip(min=3) + assert_(np.all(x >= 3)) + x = val.clip(max=4) + assert_(np.all(x <= 4)) + + def test_nan(self): + input_arr = np.array([-2., np.nan, 0.5, 3., 0.25, np.nan]) + result = input_arr.clip(-1, 1) + expected = np.array([-1., np.nan, 0.5, 1., 0.25, np.nan]) + assert_array_equal(result, expected) + + +class TestCompress: + def test_axis(self): + tgt = [[5, 6, 7, 8, 9]] + arr = np.arange(10).reshape(2, 5) + out = np.compress([0, 1], arr, axis=0) + assert_equal(out, tgt) + + tgt = [[1, 3], [6, 8]] + out = np.compress([0, 1, 0, 1, 0], arr, axis=1) + assert_equal(out, tgt) + + def test_truncate(self): + tgt = [[1], [6]] + arr = np.arange(10).reshape(2, 5) + out = np.compress([0, 1], arr, axis=1) + assert_equal(out, tgt) + + def test_flatten(self): + arr = np.arange(10).reshape(2, 5) + out = np.compress([0, 1], arr) + assert_equal(out, 1) + + +class TestPutmask: + def tst_basic(self, x, T, mask, val): + np.putmask(x, mask, val) + assert_equal(x[mask], np.array(val, T)) + + def test_ip_types(self): + unchecked_types = [bytes, str, np.void] + + x = np.random.random(1000) * 100 + mask = x < 40 + + for val in [-100, 0, 15]: + for types in np._core.sctypes.values(): + for T in types: + if T not in unchecked_types: + if val < 0 and np.dtype(T).kind == "u": + val = np.iinfo(T).max - 99 + self.tst_basic(x.copy().astype(T), T, mask, val) + + # Also test string of a length which uses an untypical length + dt = np.dtype("S3") + self.tst_basic(x.astype(dt), dt.type, mask, dt.type(val)[:3]) + + def test_mask_size(self): + assert_raises(ValueError, np.putmask, np.array([1, 2, 3]), [True], 5) + + @pytest.mark.parametrize('dtype', ('>i4', 'f8'), ('z', '= 2, 3) + + def test_kwargs(self): + x = np.array([0, 0]) + np.putmask(x, [0, 1], [-1, -2]) + assert_array_equal(x, [0, -2]) + + x = np.array([0, 0]) + np.putmask(x, mask=[0, 1], values=[-1, -2]) + assert_array_equal(x, [0, -2]) + + x = np.array([0, 0]) + np.putmask(x, values=[-1, -2], mask=[0, 1]) + assert_array_equal(x, [0, -2]) + + with pytest.raises(TypeError): + np.putmask(a=x, values=[-1, -2], mask=[0, 1]) + + +class TestTake: + def tst_basic(self, x): + ind = list(range(x.shape[0])) + assert_array_equal(x.take(ind, axis=0), x) + + def test_ip_types(self): + unchecked_types = [bytes, str, np.void] + + x = np.random.random(24) * 100 + x.shape = 2, 3, 4 + for types in np._core.sctypes.values(): + for T in types: + if T not in unchecked_types: + self.tst_basic(x.copy().astype(T)) + + # Also test string of a length which uses an untypical length + self.tst_basic(x.astype("S3")) + + def test_raise(self): + x = np.random.random(24) * 100 + x.shape = 2, 3, 4 + assert_raises(IndexError, x.take, [0, 1, 2], axis=0) + assert_raises(IndexError, x.take, [-3], axis=0) + assert_array_equal(x.take([-1], axis=0)[0], x[1]) + + def test_clip(self): + x = np.random.random(24) * 100 + x.shape = 2, 3, 4 + assert_array_equal(x.take([-1], axis=0, mode='clip')[0], x[0]) + assert_array_equal(x.take([2], axis=0, mode='clip')[0], x[1]) + + def test_wrap(self): + x = np.random.random(24) * 100 + x.shape = 2, 3, 4 + assert_array_equal(x.take([-1], axis=0, mode='wrap')[0], x[1]) + assert_array_equal(x.take([2], axis=0, mode='wrap')[0], x[0]) + assert_array_equal(x.take([3], axis=0, mode='wrap')[0], x[1]) + + @pytest.mark.parametrize('dtype', ('>i4', 'f8'), ('z', ' 16MB + d = np.zeros(4 * 1024 ** 2) + d.tofile(tmp_filename) + assert_equal(os.path.getsize(tmp_filename), d.nbytes) + assert_array_equal(d, np.fromfile(tmp_filename)) + # check offset + with open(tmp_filename, "r+b") as f: + f.seek(d.nbytes) + d.tofile(f) + assert_equal(os.path.getsize(tmp_filename), d.nbytes * 2) + # check append mode (gh-8329) + open(tmp_filename, "w").close() # delete file contents + with open(tmp_filename, "ab") as f: + d.tofile(f) + assert_array_equal(d, np.fromfile(tmp_filename)) + with open(tmp_filename, "ab") as f: + d.tofile(f) + assert_equal(os.path.getsize(tmp_filename), d.nbytes * 2) + + def test_io_open_buffered_fromfile(self, x, tmp_filename): + # gh-6632 + x.tofile(tmp_filename) + with open(tmp_filename, 'rb', buffering=-1) as f: + y = np.fromfile(f, dtype=x.dtype) + assert_array_equal(y, x.flat) + + def test_file_position_after_fromfile(self, tmp_filename): + # gh-4118 + sizes = [io.DEFAULT_BUFFER_SIZE // 8, + io.DEFAULT_BUFFER_SIZE, + io.DEFAULT_BUFFER_SIZE * 8] + + for size in sizes: + with open(tmp_filename, 'wb') as f: + f.seek(size - 1) + f.write(b'\0') + + for mode in ['rb', 'r+b']: + err_msg = "%d %s" % (size, mode) + + with open(tmp_filename, mode) as f: + f.read(2) + np.fromfile(f, dtype=np.float64, count=1) + pos = f.tell() + assert_equal(pos, 10, err_msg=err_msg) + + def test_file_position_after_tofile(self, tmp_filename): + # gh-4118 + sizes = [io.DEFAULT_BUFFER_SIZE // 8, + io.DEFAULT_BUFFER_SIZE, + io.DEFAULT_BUFFER_SIZE * 8] + + for size in sizes: + err_msg = "%d" % (size,) + + with open(tmp_filename, 'wb') as f: + f.seek(size - 1) + f.write(b'\0') + f.seek(10) + f.write(b'12') + np.array([0], dtype=np.float64).tofile(f) + pos = f.tell() + assert_equal(pos, 10 + 2 + 8, err_msg=err_msg) + + with open(tmp_filename, 'r+b') as f: + f.read(2) + f.seek(0, 1) # seek between read&write required by ANSI C + np.array([0], dtype=np.float64).tofile(f) + pos = f.tell() + assert_equal(pos, 10, err_msg=err_msg) + + def test_load_object_array_fromfile(self, tmp_filename): + # gh-12300 + with open(tmp_filename, 'w') as f: + # Ensure we have a file with consistent contents + pass + + with open(tmp_filename, 'rb') as f: + assert_raises_regex(ValueError, "Cannot read into object array", + np.fromfile, f, dtype=object) + + assert_raises_regex(ValueError, "Cannot read into object array", + np.fromfile, tmp_filename, dtype=object) + + def test_fromfile_offset(self, x, tmp_filename): + with open(tmp_filename, 'wb') as f: + x.tofile(f) + + with open(tmp_filename, 'rb') as f: + y = np.fromfile(f, dtype=x.dtype, offset=0) + assert_array_equal(y, x.flat) + + with open(tmp_filename, 'rb') as f: + count_items = len(x.flat) // 8 + offset_items = len(x.flat) // 4 + offset_bytes = x.dtype.itemsize * offset_items + y = np.fromfile( + f, dtype=x.dtype, count=count_items, offset=offset_bytes + ) + assert_array_equal( + y, x.flat[offset_items:offset_items + count_items] + ) + + # subsequent seeks should stack + offset_bytes = x.dtype.itemsize + z = np.fromfile(f, dtype=x.dtype, offset=offset_bytes) + assert_array_equal(z, x.flat[offset_items + count_items + 1:]) + + with open(tmp_filename, 'wb') as f: + x.tofile(f, sep=",") + + with open(tmp_filename, 'rb') as f: + assert_raises_regex( + TypeError, + "'offset' argument only permitted for binary files", + np.fromfile, tmp_filename, dtype=x.dtype, + sep=",", offset=1) + + @pytest.mark.skipif(IS_PYPY, reason="bug in PyPy's PyNumber_AsSsize_t") + def test_fromfile_bad_dup(self, x, tmp_filename): + def dup_str(fd): + return 'abc' + + def dup_bigint(fd): + return 2**68 + + old_dup = os.dup + try: + with open(tmp_filename, 'wb') as f: + x.tofile(f) + for dup, exc in ((dup_str, TypeError), (dup_bigint, OSError)): + os.dup = dup + assert_raises(exc, np.fromfile, f) + finally: + os.dup = old_dup + + def _check_from(self, s, value, filename, **kw): + if 'sep' not in kw: + y = np.frombuffer(s, **kw) + else: + y = np.fromstring(s, **kw) + assert_array_equal(y, value) + + with open(filename, 'wb') as f: + f.write(s) + y = np.fromfile(filename, **kw) + assert_array_equal(y, value) + + @pytest.fixture(params=["period", "comma"]) + def decimal_sep_localization(self, request): + """ + Including this fixture in a test will automatically + execute it with both types of decimal separator. + + So:: + + def test_decimal(decimal_sep_localization): + pass + + is equivalent to the following two tests:: + + def test_decimal_period_separator(): + pass + + def test_decimal_comma_separator(): + with CommaDecimalPointLocale(): + pass + """ + if request.param == "period": + yield + elif request.param == "comma": + with CommaDecimalPointLocale(): + yield + else: + assert False, request.param + + def test_nan(self, tmp_filename, decimal_sep_localization): + self._check_from( + b"nan +nan -nan NaN nan(foo) +NaN(BAR) -NAN(q_u_u_x_)", + [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan], + tmp_filename, + sep=' ') + + def test_inf(self, tmp_filename, decimal_sep_localization): + self._check_from( + b"inf +inf -inf infinity -Infinity iNfInItY -inF", + [np.inf, np.inf, -np.inf, np.inf, -np.inf, np.inf, -np.inf], + tmp_filename, + sep=' ') + + def test_numbers(self, tmp_filename, decimal_sep_localization): + self._check_from( + b"1.234 -1.234 .3 .3e55 -123133.1231e+133", + [1.234, -1.234, .3, .3e55, -123133.1231e+133], + tmp_filename, + sep=' ') + + def test_binary(self, tmp_filename): + self._check_from( + b'\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@', + np.array([1, 2, 3, 4]), + tmp_filename, + dtype='']) + @pytest.mark.parametrize('dtype', [float, int, complex]) + def test_basic(self, byteorder, dtype): + dt = np.dtype(dtype).newbyteorder(byteorder) + x = (np.random.random((4, 7)) * 5).astype(dt) + buf = x.tobytes() + assert_array_equal(np.frombuffer(buf, dtype=dt), x.flat) + + @pytest.mark.parametrize("obj", [np.arange(10), b"12345678"]) + def test_array_base(self, obj): + # Objects (including NumPy arrays), which do not use the + # `release_buffer` slot should be directly used as a base object. + # See also gh-21612 + new = np.frombuffer(obj) + assert new.base is obj + + def test_empty(self): + assert_array_equal(np.frombuffer(b''), np.array([])) + + @pytest.mark.skipif(IS_PYPY, + reason="PyPy's memoryview currently does not track exports. See: " + "https://foss.heptapod.net/pypy/pypy/-/issues/3724") + def test_mmap_close(self): + # The old buffer protocol was not safe for some things that the new + # one is. But `frombuffer` always used the old one for a long time. + # Checks that it is safe with the new one (using memoryviews) + with tempfile.TemporaryFile(mode='wb') as tmp: + tmp.write(b"asdf") + tmp.flush() + mm = mmap.mmap(tmp.fileno(), 0) + arr = np.frombuffer(mm, dtype=np.uint8) + with pytest.raises(BufferError): + mm.close() # cannot close while array uses the buffer + del arr + mm.close() + +class TestFlat: + def setup_method(self): + a0 = np.arange(20.0) + a = a0.reshape(4, 5) + a0.shape = (4, 5) + a.flags.writeable = False + self.a = a + self.b = a[::2, ::2] + self.a0 = a0 + self.b0 = a0[::2, ::2] + + def test_contiguous(self): + testpassed = False + try: + self.a.flat[12] = 100.0 + except ValueError: + testpassed = True + assert_(testpassed) + assert_(self.a.flat[12] == 12.0) + + def test_discontiguous(self): + testpassed = False + try: + self.b.flat[4] = 100.0 + except ValueError: + testpassed = True + assert_(testpassed) + assert_(self.b.flat[4] == 12.0) + + def test___array__(self): + c = self.a.flat.__array__() + d = self.b.flat.__array__() + e = self.a0.flat.__array__() + f = self.b0.flat.__array__() + + assert_(c.flags.writeable is False) + assert_(d.flags.writeable is False) + assert_(e.flags.writeable is True) + assert_(f.flags.writeable is False) + assert_(c.flags.writebackifcopy is False) + assert_(d.flags.writebackifcopy is False) + assert_(e.flags.writebackifcopy is False) + assert_(f.flags.writebackifcopy is False) + + @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") + def test_refcount(self): + # includes regression test for reference count error gh-13165 + inds = [np.intp(0), np.array([True] * self.a.size), np.array([0]), None] + indtype = np.dtype(np.intp) + rc_indtype = sys.getrefcount(indtype) + for ind in inds: + rc_ind = sys.getrefcount(ind) + for _ in range(100): + try: + self.a.flat[ind] + except IndexError: + pass + assert_(abs(sys.getrefcount(ind) - rc_ind) < 50) + assert_(abs(sys.getrefcount(indtype) - rc_indtype) < 50) + + def test_index_getset(self): + it = np.arange(10).reshape(2, 1, 5).flat + with pytest.raises(AttributeError): + it.index = 10 + + for _ in it: + pass + # Check the value of `.index` is updated correctly (see also gh-19153) + # If the type was incorrect, this would show up on big-endian machines + assert it.index == it.base.size + + def test_maxdims(self): + # The flat iterator and thus attribute is currently unfortunately + # limited to only 32 dimensions (after bumping it to 64 for 2.0) + a = np.ones((1,) * 64) + + with pytest.raises(RuntimeError, + match=".*32 dimensions but the array has 64"): + a.flat + + +class TestResize: + + @_no_tracing + def test_basic(self): + x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) + if IS_PYPY: + x.resize((5, 5), refcheck=False) + else: + x.resize((5, 5)) + assert_array_equal(x.flat[:9], + np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]).flat) + assert_array_equal(x[9:].flat, 0) + + def test_check_reference(self): + x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) + y = x + assert_raises(ValueError, x.resize, (5, 1)) + + @_no_tracing + def test_int_shape(self): + x = np.eye(3) + if IS_PYPY: + x.resize(3, refcheck=False) + else: + x.resize(3) + assert_array_equal(x, np.eye(3)[0, :]) + + def test_none_shape(self): + x = np.eye(3) + x.resize(None) + assert_array_equal(x, np.eye(3)) + x.resize() + assert_array_equal(x, np.eye(3)) + + def test_0d_shape(self): + # to it multiple times to test it does not break alloc cache gh-9216 + for i in range(10): + x = np.empty((1,)) + x.resize(()) + assert_equal(x.shape, ()) + assert_equal(x.size, 1) + x = np.empty(()) + x.resize((1,)) + assert_equal(x.shape, (1,)) + assert_equal(x.size, 1) + + def test_invalid_arguments(self): + assert_raises(TypeError, np.eye(3).resize, 'hi') + assert_raises(ValueError, np.eye(3).resize, -1) + assert_raises(TypeError, np.eye(3).resize, order=1) + assert_raises(TypeError, np.eye(3).resize, refcheck='hi') + + @_no_tracing + def test_freeform_shape(self): + x = np.eye(3) + if IS_PYPY: + x.resize(3, 2, 1, refcheck=False) + else: + x.resize(3, 2, 1) + assert_(x.shape == (3, 2, 1)) + + @_no_tracing + def test_zeros_appended(self): + x = np.eye(3) + if IS_PYPY: + x.resize(2, 3, 3, refcheck=False) + else: + x.resize(2, 3, 3) + assert_array_equal(x[0], np.eye(3)) + assert_array_equal(x[1], np.zeros((3, 3))) + + @_no_tracing + def test_obj_obj(self): + # check memory is initialized on resize, gh-4857 + a = np.ones(10, dtype=[('k', object, 2)]) + if IS_PYPY: + a.resize(15, refcheck=False) + else: + a.resize(15,) + assert_equal(a.shape, (15,)) + assert_array_equal(a['k'][-5:], 0) + assert_array_equal(a['k'][:-5], 1) + + def test_empty_view(self): + # check that sizes containing a zero don't trigger a reallocate for + # already empty arrays + x = np.zeros((10, 0), int) + x_view = x[...] + x_view.resize((0, 10)) + x_view.resize((0, 100)) + + def test_check_weakref(self): + x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) + xref = weakref.ref(x) + assert_raises(ValueError, x.resize, (5, 1)) + + +class TestRecord: + def test_field_rename(self): + dt = np.dtype([('f', float), ('i', int)]) + dt.names = ['p', 'q'] + assert_equal(dt.names, ['p', 'q']) + + def test_multiple_field_name_occurrence(self): + def test_dtype_init(): + np.dtype([("A", "f8"), ("B", "f8"), ("A", "f8")]) + + # Error raised when multiple fields have the same name + assert_raises(ValueError, test_dtype_init) + + def test_bytes_fields(self): + # Bytes are not allowed in field names and not recognized in titles + # on Py3 + assert_raises(TypeError, np.dtype, [(b'a', int)]) + assert_raises(TypeError, np.dtype, [(('b', b'a'), int)]) + + dt = np.dtype([((b'a', 'b'), int)]) + assert_raises(TypeError, dt.__getitem__, b'a') + + x = np.array([(1,), (2,), (3,)], dtype=dt) + assert_raises(IndexError, x.__getitem__, b'a') + + y = x[0] + assert_raises(IndexError, y.__getitem__, b'a') + + def test_multiple_field_name_unicode(self): + def test_dtype_unicode(): + np.dtype([("\u20B9", "f8"), ("B", "f8"), ("\u20B9", "f8")]) + + # Error raised when multiple fields have the same name(unicode included) + assert_raises(ValueError, test_dtype_unicode) + + def test_fromarrays_unicode(self): + # A single name string provided to fromarrays() is allowed to be unicode + x = np._core.records.fromarrays( + [[0], [1]], names='a,b', formats='i4,i4') + assert_equal(x['a'][0], 0) + assert_equal(x['b'][0], 1) + + def test_unicode_order(self): + # Test that we can sort with order as a unicode field name + name = 'b' + x = np.array([1, 3, 2], dtype=[(name, int)]) + x.sort(order=name) + assert_equal(x['b'], np.array([1, 2, 3])) + + def test_field_names(self): + # Test unicode and 8-bit / byte strings can be used + a = np.zeros((1,), dtype=[('f1', 'i4'), + ('f2', 'i4'), + ('f3', [('sf1', 'i4')])]) + # byte string indexing fails gracefully + assert_raises(IndexError, a.__setitem__, b'f1', 1) + assert_raises(IndexError, a.__getitem__, b'f1') + assert_raises(IndexError, a['f1'].__setitem__, b'sf1', 1) + assert_raises(IndexError, a['f1'].__getitem__, b'sf1') + b = a.copy() + fn1 = 'f1' + b[fn1] = 1 + assert_equal(b[fn1], 1) + fnn = 'not at all' + assert_raises(ValueError, b.__setitem__, fnn, 1) + assert_raises(ValueError, b.__getitem__, fnn) + b[0][fn1] = 2 + assert_equal(b[fn1], 2) + # Subfield + assert_raises(ValueError, b[0].__setitem__, fnn, 1) + assert_raises(ValueError, b[0].__getitem__, fnn) + # Subfield + fn3 = 'f3' + sfn1 = 'sf1' + b[fn3][sfn1] = 1 + assert_equal(b[fn3][sfn1], 1) + assert_raises(ValueError, b[fn3].__setitem__, fnn, 1) + assert_raises(ValueError, b[fn3].__getitem__, fnn) + # multiple subfields + fn2 = 'f2' + b[fn2] = 3 + + assert_equal(b[['f1', 'f2']][0].tolist(), (2, 3)) + assert_equal(b[['f2', 'f1']][0].tolist(), (3, 2)) + assert_equal(b[['f1', 'f3']][0].tolist(), (2, (1,))) + + # non-ascii unicode field indexing is well behaved + assert_raises(ValueError, a.__setitem__, '\u03e0', 1) + assert_raises(ValueError, a.__getitem__, '\u03e0') + + def test_record_hash(self): + a = np.array([(1, 2), (1, 2)], dtype='i1,i2') + a.flags.writeable = False + b = np.array([(1, 2), (3, 4)], dtype=[('num1', 'i1'), ('num2', 'i2')]) + b.flags.writeable = False + c = np.array([(1, 2), (3, 4)], dtype='i1,i2') + c.flags.writeable = False + assert_(hash(a[0]) == hash(a[1])) + assert_(hash(a[0]) == hash(b[0])) + assert_(hash(a[0]) != hash(b[1])) + assert_(hash(c[0]) == hash(a[0]) and c[0] == a[0]) + + def test_record_no_hash(self): + a = np.array([(1, 2), (1, 2)], dtype='i1,i2') + assert_raises(TypeError, hash, a[0]) + + def test_empty_structure_creation(self): + # make sure these do not raise errors (gh-5631) + np.array([()], dtype={'names': [], 'formats': [], + 'offsets': [], 'itemsize': 12}) + np.array([(), (), (), (), ()], dtype={'names': [], 'formats': [], + 'offsets': [], 'itemsize': 12}) + + def test_multifield_indexing_view(self): + a = np.ones(3, dtype=[('a', 'i4'), ('b', 'f4'), ('c', 'u4')]) + v = a[['a', 'c']] + assert_(v.base is a) + assert_(v.dtype == np.dtype({'names': ['a', 'c'], + 'formats': ['i4', 'u4'], + 'offsets': [0, 8]})) + v[:] = (4, 5) + assert_equal(a[0].item(), (4, 1, 5)) + +class TestView: + def test_basic(self): + x = np.array([(1, 2, 3, 4), (5, 6, 7, 8)], + dtype=[('r', np.int8), ('g', np.int8), + ('b', np.int8), ('a', np.int8)]) + # We must be specific about the endianness here: + y = x.view(dtype=' 0) + assert_(issubclass(w[0].category, RuntimeWarning)) + + def test_empty(self): + A = np.zeros((0, 3)) + for f in self.funcs: + for axis in [0, None]: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + assert_(np.isnan(f(A, axis=axis)).all()) + assert_(len(w) > 0) + assert_(issubclass(w[0].category, RuntimeWarning)) + for axis in [1]: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + assert_equal(f(A, axis=axis), np.zeros([])) + + def test_mean_values(self): + for mat in [self.rmat, self.cmat, self.omat]: + for axis in [0, 1]: + tgt = mat.sum(axis=axis) + res = _mean(mat, axis=axis) * mat.shape[axis] + assert_almost_equal(res, tgt) + for axis in [None]: + tgt = mat.sum(axis=axis) + res = _mean(mat, axis=axis) * np.prod(mat.shape) + assert_almost_equal(res, tgt) + + def test_mean_float16(self): + # This fail if the sum inside mean is done in float16 instead + # of float32. + assert_(_mean(np.ones(100000, dtype='float16')) == 1) + + def test_mean_axis_error(self): + # Ensure that AxisError is raised instead of IndexError when axis is + # out of bounds, see gh-15817. + with assert_raises(np.exceptions.AxisError): + np.arange(10).mean(axis=2) + + def test_mean_where(self): + a = np.arange(16).reshape((4, 4)) + wh_full = np.array([[False, True, False, True], + [True, False, True, False], + [True, True, False, False], + [False, False, True, True]]) + wh_partial = np.array([[False], + [True], + [True], + [False]]) + _cases = [(1, True, [1.5, 5.5, 9.5, 13.5]), + (0, wh_full, [6., 5., 10., 9.]), + (1, wh_full, [2., 5., 8.5, 14.5]), + (0, wh_partial, [6., 7., 8., 9.])] + for _ax, _wh, _res in _cases: + assert_allclose(a.mean(axis=_ax, where=_wh), + np.array(_res)) + assert_allclose(np.mean(a, axis=_ax, where=_wh), + np.array(_res)) + + a3d = np.arange(16).reshape((2, 2, 4)) + _wh_partial = np.array([False, True, True, False]) + _res = [[1.5, 5.5], [9.5, 13.5]] + assert_allclose(a3d.mean(axis=2, where=_wh_partial), + np.array(_res)) + assert_allclose(np.mean(a3d, axis=2, where=_wh_partial), + np.array(_res)) + + with pytest.warns(RuntimeWarning) as w: + assert_allclose(a.mean(axis=1, where=wh_partial), + np.array([np.nan, 5.5, 9.5, np.nan])) + with pytest.warns(RuntimeWarning) as w: + assert_equal(a.mean(where=False), np.nan) + with pytest.warns(RuntimeWarning) as w: + assert_equal(np.mean(a, where=False), np.nan) + + def test_var_values(self): + for mat in [self.rmat, self.cmat, self.omat]: + for axis in [0, 1, None]: + msqr = _mean(mat * mat.conj(), axis=axis) + mean = _mean(mat, axis=axis) + tgt = msqr - mean * mean.conjugate() + res = _var(mat, axis=axis) + assert_almost_equal(res, tgt) + + @pytest.mark.parametrize(('complex_dtype', 'ndec'), ( + ('complex64', 6), + ('complex128', 7), + ('clongdouble', 7), + )) + def test_var_complex_values(self, complex_dtype, ndec): + # Test fast-paths for every builtin complex type + for axis in [0, 1, None]: + mat = self.cmat.copy().astype(complex_dtype) + msqr = _mean(mat * mat.conj(), axis=axis) + mean = _mean(mat, axis=axis) + tgt = msqr - mean * mean.conjugate() + res = _var(mat, axis=axis) + assert_almost_equal(res, tgt, decimal=ndec) + + def test_var_dimensions(self): + # _var paths for complex number introduce additions on views that + # increase dimensions. Ensure this generalizes to higher dims + mat = np.stack([self.cmat] * 3) + for axis in [0, 1, 2, -1, None]: + msqr = _mean(mat * mat.conj(), axis=axis) + mean = _mean(mat, axis=axis) + tgt = msqr - mean * mean.conjugate() + res = _var(mat, axis=axis) + assert_almost_equal(res, tgt) + + def test_var_complex_byteorder(self): + # Test that var fast-path does not cause failures for complex arrays + # with non-native byteorder + cmat = self.cmat.copy().astype('complex128') + cmat_swapped = cmat.astype(cmat.dtype.newbyteorder()) + assert_almost_equal(cmat.var(), cmat_swapped.var()) + + def test_var_axis_error(self): + # Ensure that AxisError is raised instead of IndexError when axis is + # out of bounds, see gh-15817. + with assert_raises(np.exceptions.AxisError): + np.arange(10).var(axis=2) + + def test_var_where(self): + a = np.arange(25).reshape((5, 5)) + wh_full = np.array([[False, True, False, True, True], + [True, False, True, True, False], + [True, True, False, False, True], + [False, True, True, False, True], + [True, False, True, True, False]]) + wh_partial = np.array([[False], + [True], + [True], + [False], + [True]]) + _cases = [(0, True, [50., 50., 50., 50., 50.]), + (1, True, [2., 2., 2., 2., 2.])] + for _ax, _wh, _res in _cases: + assert_allclose(a.var(axis=_ax, where=_wh), + np.array(_res)) + assert_allclose(np.var(a, axis=_ax, where=_wh), + np.array(_res)) + + a3d = np.arange(16).reshape((2, 2, 4)) + _wh_partial = np.array([False, True, True, False]) + _res = [[0.25, 0.25], [0.25, 0.25]] + assert_allclose(a3d.var(axis=2, where=_wh_partial), + np.array(_res)) + assert_allclose(np.var(a3d, axis=2, where=_wh_partial), + np.array(_res)) + + assert_allclose(np.var(a, axis=1, where=wh_full), + np.var(a[wh_full].reshape((5, 3)), axis=1)) + assert_allclose(np.var(a, axis=0, where=wh_partial), + np.var(a[wh_partial[:, 0]], axis=0)) + with pytest.warns(RuntimeWarning) as w: + assert_equal(a.var(where=False), np.nan) + with pytest.warns(RuntimeWarning) as w: + assert_equal(np.var(a, where=False), np.nan) + + def test_std_values(self): + for mat in [self.rmat, self.cmat, self.omat]: + for axis in [0, 1, None]: + tgt = np.sqrt(_var(mat, axis=axis)) + res = _std(mat, axis=axis) + assert_almost_equal(res, tgt) + + def test_std_where(self): + a = np.arange(25).reshape((5, 5))[::-1] + whf = np.array([[False, True, False, True, True], + [True, False, True, False, True], + [True, True, False, True, False], + [True, False, True, True, False], + [False, True, False, True, True]]) + whp = np.array([[False], + [False], + [True], + [True], + [False]]) + _cases = [ + (0, True, 7.07106781 * np.ones(5)), + (1, True, 1.41421356 * np.ones(5)), + (0, whf, + np.array([4.0824829, 8.16496581, 5., 7.39509973, 8.49836586])), + (0, whp, 2.5 * np.ones(5)) + ] + for _ax, _wh, _res in _cases: + assert_allclose(a.std(axis=_ax, where=_wh), _res) + assert_allclose(np.std(a, axis=_ax, where=_wh), _res) + + a3d = np.arange(16).reshape((2, 2, 4)) + _wh_partial = np.array([False, True, True, False]) + _res = [[0.5, 0.5], [0.5, 0.5]] + assert_allclose(a3d.std(axis=2, where=_wh_partial), + np.array(_res)) + assert_allclose(np.std(a3d, axis=2, where=_wh_partial), + np.array(_res)) + + assert_allclose(a.std(axis=1, where=whf), + np.std(a[whf].reshape((5, 3)), axis=1)) + assert_allclose(np.std(a, axis=1, where=whf), + (a[whf].reshape((5, 3))).std(axis=1)) + assert_allclose(a.std(axis=0, where=whp), + np.std(a[whp[:, 0]], axis=0)) + assert_allclose(np.std(a, axis=0, where=whp), + (a[whp[:, 0]]).std(axis=0)) + with pytest.warns(RuntimeWarning) as w: + assert_equal(a.std(where=False), np.nan) + with pytest.warns(RuntimeWarning) as w: + assert_equal(np.std(a, where=False), np.nan) + + def test_subclass(self): + class TestArray(np.ndarray): + def __new__(cls, data, info): + result = np.array(data) + result = result.view(cls) + result.info = info + return result + + def __array_finalize__(self, obj): + self.info = getattr(obj, "info", '') + + dat = TestArray([[1, 2, 3, 4], [5, 6, 7, 8]], 'jubba') + res = dat.mean(1) + assert_(res.info == dat.info) + res = dat.std(1) + assert_(res.info == dat.info) + res = dat.var(1) + assert_(res.info == dat.info) + + +class TestVdot: + def test_basic(self): + dt_numeric = np.typecodes['AllFloat'] + np.typecodes['AllInteger'] + dt_complex = np.typecodes['Complex'] + + # test real + a = np.eye(3) + for dt in dt_numeric + 'O': + b = a.astype(dt) + res = np.vdot(b, b) + assert_(np.isscalar(res)) + assert_equal(np.vdot(b, b), 3) + + # test complex + a = np.eye(3) * 1j + for dt in dt_complex + 'O': + b = a.astype(dt) + res = np.vdot(b, b) + assert_(np.isscalar(res)) + assert_equal(np.vdot(b, b), 3) + + # test boolean + b = np.eye(3, dtype=bool) + res = np.vdot(b, b) + assert_(np.isscalar(res)) + assert_equal(np.vdot(b, b), True) + + def test_vdot_array_order(self): + a = np.array([[1, 2], [3, 4]], order='C') + b = np.array([[1, 2], [3, 4]], order='F') + res = np.vdot(a, a) + + # integer arrays are exact + assert_equal(np.vdot(a, b), res) + assert_equal(np.vdot(b, a), res) + assert_equal(np.vdot(b, b), res) + + def test_vdot_uncontiguous(self): + for size in [2, 1000]: + # Different sizes match different branches in vdot. + a = np.zeros((size, 2, 2)) + b = np.zeros((size, 2, 2)) + a[:, 0, 0] = np.arange(size) + b[:, 0, 0] = np.arange(size) + 1 + # Make a and b uncontiguous: + a = a[..., 0] + b = b[..., 0] + + assert_equal(np.vdot(a, b), + np.vdot(a.flatten(), b.flatten())) + assert_equal(np.vdot(a, b.copy()), + np.vdot(a.flatten(), b.flatten())) + assert_equal(np.vdot(a.copy(), b), + np.vdot(a.flatten(), b.flatten())) + assert_equal(np.vdot(a.copy('F'), b), + np.vdot(a.flatten(), b.flatten())) + assert_equal(np.vdot(a, b.copy('F')), + np.vdot(a.flatten(), b.flatten())) + + +class TestDot: + def setup_method(self): + np.random.seed(128) + self.A = np.random.rand(4, 2) + self.b1 = np.random.rand(2, 1) + self.b2 = np.random.rand(2) + self.b3 = np.random.rand(1, 2) + self.b4 = np.random.rand(4) + self.N = 7 + + def test_dotmatmat(self): + A = self.A + res = np.dot(A.transpose(), A) + tgt = np.array([[1.45046013, 0.86323640], + [0.86323640, 0.84934569]]) + assert_almost_equal(res, tgt, decimal=self.N) + + def test_dotmatvec(self): + A, b1 = self.A, self.b1 + res = np.dot(A, b1) + tgt = np.array([[0.32114320], [0.04889721], + [0.15696029], [0.33612621]]) + assert_almost_equal(res, tgt, decimal=self.N) + + def test_dotmatvec2(self): + A, b2 = self.A, self.b2 + res = np.dot(A, b2) + tgt = np.array([0.29677940, 0.04518649, 0.14468333, 0.31039293]) + assert_almost_equal(res, tgt, decimal=self.N) + + def test_dotvecmat(self): + A, b4 = self.A, self.b4 + res = np.dot(b4, A) + tgt = np.array([1.23495091, 1.12222648]) + assert_almost_equal(res, tgt, decimal=self.N) + + def test_dotvecmat2(self): + b3, A = self.b3, self.A + res = np.dot(b3, A.transpose()) + tgt = np.array([[0.58793804, 0.08957460, 0.30605758, 0.62716383]]) + assert_almost_equal(res, tgt, decimal=self.N) + + def test_dotvecmat3(self): + A, b4 = self.A, self.b4 + res = np.dot(A.transpose(), b4) + tgt = np.array([1.23495091, 1.12222648]) + assert_almost_equal(res, tgt, decimal=self.N) + + def test_dotvecvecouter(self): + b1, b3 = self.b1, self.b3 + res = np.dot(b1, b3) + tgt = np.array([[0.20128610, 0.08400440], [0.07190947, 0.03001058]]) + assert_almost_equal(res, tgt, decimal=self.N) + + def test_dotvecvecinner(self): + b1, b3 = self.b1, self.b3 + res = np.dot(b3, b1) + tgt = np.array([[0.23129668]]) + assert_almost_equal(res, tgt, decimal=self.N) + + def test_dotcolumnvect1(self): + b1 = np.ones((3, 1)) + b2 = [5.3] + res = np.dot(b1, b2) + tgt = np.array([5.3, 5.3, 5.3]) + assert_almost_equal(res, tgt, decimal=self.N) + + def test_dotcolumnvect2(self): + b1 = np.ones((3, 1)).transpose() + b2 = [6.2] + res = np.dot(b2, b1) + tgt = np.array([6.2, 6.2, 6.2]) + assert_almost_equal(res, tgt, decimal=self.N) + + def test_dotvecscalar(self): + np.random.seed(100) + b1 = np.random.rand(1, 1) + b2 = np.random.rand(1, 4) + res = np.dot(b1, b2) + tgt = np.array([[0.15126730, 0.23068496, 0.45905553, 0.00256425]]) + assert_almost_equal(res, tgt, decimal=self.N) + + def test_dotvecscalar2(self): + np.random.seed(100) + b1 = np.random.rand(4, 1) + b2 = np.random.rand(1, 1) + res = np.dot(b1, b2) + tgt = np.array([[0.00256425], [0.00131359], [0.00200324], [0.00398638]]) + assert_almost_equal(res, tgt, decimal=self.N) + + def test_all(self): + dims = [(), (1,), (1, 1)] + dout = [(), (1,), (1, 1), (1,), (), (1,), (1, 1), (1,), (1, 1)] + for dim, (dim1, dim2) in zip(dout, itertools.product(dims, dims)): + b1 = np.zeros(dim1) + b2 = np.zeros(dim2) + res = np.dot(b1, b2) + tgt = np.zeros(dim) + assert_(res.shape == tgt.shape) + assert_almost_equal(res, tgt, decimal=self.N) + + def test_vecobject(self): + class Vec: + def __init__(self, sequence=None): + if sequence is None: + sequence = [] + self.array = np.array(sequence) + + def __add__(self, other): + out = Vec() + out.array = self.array + other.array + return out + + def __sub__(self, other): + out = Vec() + out.array = self.array - other.array + return out + + def __mul__(self, other): # with scalar + out = Vec(self.array.copy()) + out.array *= other + return out + + def __rmul__(self, other): + return self * other + + U_non_cont = np.transpose([[1., 1.], [1., 2.]]) + U_cont = np.ascontiguousarray(U_non_cont) + x = np.array([Vec([1., 0.]), Vec([0., 1.])]) + zeros = np.array([Vec([0., 0.]), Vec([0., 0.])]) + zeros_test = np.dot(U_cont, x) - np.dot(U_non_cont, x) + assert_equal(zeros[0].array, zeros_test[0].array) + assert_equal(zeros[1].array, zeros_test[1].array) + + def test_dot_2args(self): + + a = np.array([[1, 2], [3, 4]], dtype=float) + b = np.array([[1, 0], [1, 1]], dtype=float) + c = np.array([[3, 2], [7, 4]], dtype=float) + + d = dot(a, b) + assert_allclose(c, d) + + def test_dot_3args(self): + + np.random.seed(22) + f = np.random.random_sample((1024, 16)) + v = np.random.random_sample((16, 32)) + + r = np.empty((1024, 32)) + if HAS_REFCOUNT: + orig_refcount = sys.getrefcount(r) + for i in range(12): + dot(f, v, r) + if HAS_REFCOUNT: + assert_equal(sys.getrefcount(r), orig_refcount) + r2 = dot(f, v, out=None) + assert_array_equal(r2, r) + assert_(r is dot(f, v, out=r)) + + v = v[:, 0].copy() # v.shape == (16,) + r = r[:, 0].copy() # r.shape == (1024,) + r2 = dot(f, v) + assert_(r is dot(f, v, r)) + assert_array_equal(r2, r) + + def test_dot_3args_errors(self): + + np.random.seed(22) + f = np.random.random_sample((1024, 16)) + v = np.random.random_sample((16, 32)) + + r = np.empty((1024, 31)) + assert_raises(ValueError, dot, f, v, r) + + r = np.empty((1024,)) + assert_raises(ValueError, dot, f, v, r) + + r = np.empty((32,)) + assert_raises(ValueError, dot, f, v, r) + + r = np.empty((32, 1024)) + assert_raises(ValueError, dot, f, v, r) + assert_raises(ValueError, dot, f, v, r.T) + + r = np.empty((1024, 64)) + assert_raises(ValueError, dot, f, v, r[:, ::2]) + assert_raises(ValueError, dot, f, v, r[:, :32]) + + r = np.empty((1024, 32), dtype=np.float32) + assert_raises(ValueError, dot, f, v, r) + + r = np.empty((1024, 32), dtype=int) + assert_raises(ValueError, dot, f, v, r) + + def test_dot_out_result(self): + x = np.ones((), dtype=np.float16) + y = np.ones((5,), dtype=np.float16) + z = np.zeros((5,), dtype=np.float16) + res = x.dot(y, out=z) + assert np.array_equal(res, y) + assert np.array_equal(z, y) + + def test_dot_out_aliasing(self): + x = np.ones((), dtype=np.float16) + y = np.ones((5,), dtype=np.float16) + z = np.zeros((5,), dtype=np.float16) + res = x.dot(y, out=z) + z[0] = 2 + assert np.array_equal(res, z) + + def test_dot_array_order(self): + a = np.array([[1, 2], [3, 4]], order='C') + b = np.array([[1, 2], [3, 4]], order='F') + res = np.dot(a, a) + + # integer arrays are exact + assert_equal(np.dot(a, b), res) + assert_equal(np.dot(b, a), res) + assert_equal(np.dot(b, b), res) + + def test_accelerate_framework_sgemv_fix(self): + + def aligned_array(shape, align, dtype, order='C'): + d = dtype(0) + N = np.prod(shape) + tmp = np.zeros(N * d.nbytes + align, dtype=np.uint8) + address = tmp.__array_interface__["data"][0] + for offset in range(align): + if (address + offset) % align == 0: + break + tmp = tmp[offset:offset + N * d.nbytes].view(dtype=dtype) + return tmp.reshape(shape, order=order) + + def as_aligned(arr, align, dtype, order='C'): + aligned = aligned_array(arr.shape, align, dtype, order) + aligned[:] = arr[:] + return aligned + + def assert_dot_close(A, X, desired): + assert_allclose(np.dot(A, X), desired, rtol=1e-5, atol=1e-7) + + m = aligned_array(100, 15, np.float32) + s = aligned_array((100, 100), 15, np.float32) + np.dot(s, m) # this will always segfault if the bug is present + + testdata = itertools.product((15, 32), (10000,), (200, 89), ('C', 'F')) + for align, m, n, a_order in testdata: + # Calculation in double precision + A_d = np.random.rand(m, n) + X_d = np.random.rand(n) + desired = np.dot(A_d, X_d) + # Calculation with aligned single precision + A_f = as_aligned(A_d, align, np.float32, order=a_order) + X_f = as_aligned(X_d, align, np.float32) + assert_dot_close(A_f, X_f, desired) + # Strided A rows + A_d_2 = A_d[::2] + desired = np.dot(A_d_2, X_d) + A_f_2 = A_f[::2] + assert_dot_close(A_f_2, X_f, desired) + # Strided A columns, strided X vector + A_d_22 = A_d_2[:, ::2] + X_d_2 = X_d[::2] + desired = np.dot(A_d_22, X_d_2) + A_f_22 = A_f_2[:, ::2] + X_f_2 = X_f[::2] + assert_dot_close(A_f_22, X_f_2, desired) + # Check the strides are as expected + if a_order == 'F': + assert_equal(A_f_22.strides, (8, 8 * m)) + else: + assert_equal(A_f_22.strides, (8 * n, 8)) + assert_equal(X_f_2.strides, (8,)) + # Strides in A rows + cols only + X_f_2c = as_aligned(X_f_2, align, np.float32) + assert_dot_close(A_f_22, X_f_2c, desired) + # Strides just in A cols + A_d_12 = A_d[:, ::2] + desired = np.dot(A_d_12, X_d_2) + A_f_12 = A_f[:, ::2] + assert_dot_close(A_f_12, X_f_2c, desired) + # Strides in A cols and X + assert_dot_close(A_f_12, X_f_2, desired) + + @pytest.mark.slow + @pytest.mark.parametrize("dtype", [np.float64, np.complex128]) + @requires_memory(free_bytes=18e9) # complex case needs 18GiB+ + def test_huge_vectordot(self, dtype): + # Large vector multiplications are chunked with 32bit BLAS + # Test that the chunking does the right thing, see also gh-22262 + data = np.ones(2**30 + 100, dtype=dtype) + res = np.dot(data, data) + assert res == 2**30 + 100 + + def test_dtype_discovery_fails(self): + # See gh-14247, error checking was missing for failed dtype discovery + class BadObject: + def __array__(self, dtype=None, copy=None): + raise TypeError("just this tiny mint leaf") + + with pytest.raises(TypeError): + np.dot(BadObject(), BadObject()) + + with pytest.raises(TypeError): + np.dot(3.0, BadObject()) + + +class MatmulCommon: + """Common tests for '@' operator and numpy.matmul. + + """ + # Should work with these types. Will want to add + # "O" at some point + types = "?bhilqBHILQefdgFDGO" + + def test_exceptions(self): + dims = [ + ((1,), (2,)), # mismatched vector vector + ((2, 1,), (2,)), # mismatched matrix vector + ((2,), (1, 2)), # mismatched vector matrix + ((1, 2), (3, 1)), # mismatched matrix matrix + ((1,), ()), # vector scalar + ((), (1)), # scalar vector + ((1, 1), ()), # matrix scalar + ((), (1, 1)), # scalar matrix + ((2, 2, 1), (3, 1, 2)), # cannot broadcast + ] + + for dt, (dm1, dm2) in itertools.product(self.types, dims): + a = np.ones(dm1, dtype=dt) + b = np.ones(dm2, dtype=dt) + assert_raises(ValueError, self.matmul, a, b) + + def test_shapes(self): + dims = [ + ((1, 1), (2, 1, 1)), # broadcast first argument + ((2, 1, 1), (1, 1)), # broadcast second argument + ((2, 1, 1), (2, 1, 1)), # matrix stack sizes match + ] + + for dt, (dm1, dm2) in itertools.product(self.types, dims): + a = np.ones(dm1, dtype=dt) + b = np.ones(dm2, dtype=dt) + res = self.matmul(a, b) + assert_(res.shape == (2, 1, 1)) + + # vector vector returns scalars. + for dt in self.types: + a = np.ones((2,), dtype=dt) + b = np.ones((2,), dtype=dt) + c = self.matmul(a, b) + assert_(np.array(c).shape == ()) + + def test_result_types(self): + mat = np.ones((1, 1)) + vec = np.ones((1,)) + for dt in self.types: + m = mat.astype(dt) + v = vec.astype(dt) + for arg in [(m, v), (v, m), (m, m)]: + res = self.matmul(*arg) + assert_(res.dtype == dt) + + # vector vector returns scalars + if dt != "O": + res = self.matmul(v, v) + assert_(type(res) is np.dtype(dt).type) + + def test_scalar_output(self): + vec1 = np.array([2]) + vec2 = np.array([3, 4]).reshape(1, -1) + tgt = np.array([6, 8]) + for dt in self.types[1:]: + v1 = vec1.astype(dt) + v2 = vec2.astype(dt) + res = self.matmul(v1, v2) + assert_equal(res, tgt) + res = self.matmul(v2.T, v1) + assert_equal(res, tgt) + + # boolean type + vec = np.array([True, True], dtype='?').reshape(1, -1) + res = self.matmul(vec[:, 0], vec) + assert_equal(res, True) + + def test_vector_vector_values(self): + vec1 = np.array([1, 2]) + vec2 = np.array([3, 4]).reshape(-1, 1) + tgt1 = np.array([11]) + tgt2 = np.array([[3, 6], [4, 8]]) + for dt in self.types[1:]: + v1 = vec1.astype(dt) + v2 = vec2.astype(dt) + res = self.matmul(v1, v2) + assert_equal(res, tgt1) + # no broadcast, we must make v1 into a 2d ndarray + res = self.matmul(v2, v1.reshape(1, -1)) + assert_equal(res, tgt2) + + # boolean type + vec = np.array([True, True], dtype='?') + res = self.matmul(vec, vec) + assert_equal(res, True) + + def test_vector_matrix_values(self): + vec = np.array([1, 2]) + mat1 = np.array([[1, 2], [3, 4]]) + mat2 = np.stack([mat1] * 2, axis=0) + tgt1 = np.array([7, 10]) + tgt2 = np.stack([tgt1] * 2, axis=0) + for dt in self.types[1:]: + v = vec.astype(dt) + m1 = mat1.astype(dt) + m2 = mat2.astype(dt) + res = self.matmul(v, m1) + assert_equal(res, tgt1) + res = self.matmul(v, m2) + assert_equal(res, tgt2) + + # boolean type + vec = np.array([True, False]) + mat1 = np.array([[True, False], [False, True]]) + mat2 = np.stack([mat1] * 2, axis=0) + tgt1 = np.array([True, False]) + tgt2 = np.stack([tgt1] * 2, axis=0) + + res = self.matmul(vec, mat1) + assert_equal(res, tgt1) + res = self.matmul(vec, mat2) + assert_equal(res, tgt2) + + def test_matrix_vector_values(self): + vec = np.array([1, 2]) + mat1 = np.array([[1, 2], [3, 4]]) + mat2 = np.stack([mat1] * 2, axis=0) + tgt1 = np.array([5, 11]) + tgt2 = np.stack([tgt1] * 2, axis=0) + for dt in self.types[1:]: + v = vec.astype(dt) + m1 = mat1.astype(dt) + m2 = mat2.astype(dt) + res = self.matmul(m1, v) + assert_equal(res, tgt1) + res = self.matmul(m2, v) + assert_equal(res, tgt2) + + # boolean type + vec = np.array([True, False]) + mat1 = np.array([[True, False], [False, True]]) + mat2 = np.stack([mat1] * 2, axis=0) + tgt1 = np.array([True, False]) + tgt2 = np.stack([tgt1] * 2, axis=0) + + res = self.matmul(vec, mat1) + assert_equal(res, tgt1) + res = self.matmul(vec, mat2) + assert_equal(res, tgt2) + + def test_matrix_matrix_values(self): + mat1 = np.array([[1, 2], [3, 4]]) + mat2 = np.array([[1, 0], [1, 1]]) + mat12 = np.stack([mat1, mat2], axis=0) + mat21 = np.stack([mat2, mat1], axis=0) + tgt11 = np.array([[7, 10], [15, 22]]) + tgt12 = np.array([[3, 2], [7, 4]]) + tgt21 = np.array([[1, 2], [4, 6]]) + tgt12_21 = np.stack([tgt12, tgt21], axis=0) + tgt11_12 = np.stack((tgt11, tgt12), axis=0) + tgt11_21 = np.stack((tgt11, tgt21), axis=0) + for dt in self.types[1:]: + m1 = mat1.astype(dt) + m2 = mat2.astype(dt) + m12 = mat12.astype(dt) + m21 = mat21.astype(dt) + + # matrix @ matrix + res = self.matmul(m1, m2) + assert_equal(res, tgt12) + res = self.matmul(m2, m1) + assert_equal(res, tgt21) + + # stacked @ matrix + res = self.matmul(m12, m1) + assert_equal(res, tgt11_21) + + # matrix @ stacked + res = self.matmul(m1, m12) + assert_equal(res, tgt11_12) + + # stacked @ stacked + res = self.matmul(m12, m21) + assert_equal(res, tgt12_21) + + # boolean type + m1 = np.array([[1, 1], [0, 0]], dtype=np.bool) + m2 = np.array([[1, 0], [1, 1]], dtype=np.bool) + m12 = np.stack([m1, m2], axis=0) + m21 = np.stack([m2, m1], axis=0) + tgt11 = m1 + tgt12 = m1 + tgt21 = np.array([[1, 1], [1, 1]], dtype=np.bool) + tgt12_21 = np.stack([tgt12, tgt21], axis=0) + tgt11_12 = np.stack((tgt11, tgt12), axis=0) + tgt11_21 = np.stack((tgt11, tgt21), axis=0) + + # matrix @ matrix + res = self.matmul(m1, m2) + assert_equal(res, tgt12) + res = self.matmul(m2, m1) + assert_equal(res, tgt21) + + # stacked @ matrix + res = self.matmul(m12, m1) + assert_equal(res, tgt11_21) + + # matrix @ stacked + res = self.matmul(m1, m12) + assert_equal(res, tgt11_12) + + # stacked @ stacked + res = self.matmul(m12, m21) + assert_equal(res, tgt12_21) + + +class TestMatmul(MatmulCommon): + matmul = np.matmul + + def test_out_arg(self): + a = np.ones((5, 2), dtype=float) + b = np.array([[1, 3], [5, 7]], dtype=float) + tgt = np.dot(a, b) + + # test as positional argument + msg = "out positional argument" + out = np.zeros((5, 2), dtype=float) + self.matmul(a, b, out) + assert_array_equal(out, tgt, err_msg=msg) + + # test as keyword argument + msg = "out keyword argument" + out = np.zeros((5, 2), dtype=float) + self.matmul(a, b, out=out) + assert_array_equal(out, tgt, err_msg=msg) + + # test out with not allowed type cast (safe casting) + msg = "Cannot cast ufunc .* output" + out = np.zeros((5, 2), dtype=np.int32) + assert_raises_regex(TypeError, msg, self.matmul, a, b, out=out) + + # test out with type upcast to complex + out = np.zeros((5, 2), dtype=np.complex128) + c = self.matmul(a, b, out=out) + assert_(c is out) + with suppress_warnings() as sup: + sup.filter(ComplexWarning, '') + c = c.astype(tgt.dtype) + assert_array_equal(c, tgt) + + def test_empty_out(self): + # Check that the output cannot be broadcast, so that it cannot be + # size zero when the outer dimensions (iterator size) has size zero. + arr = np.ones((0, 1, 1)) + out = np.ones((1, 1, 1)) + assert self.matmul(arr, arr).shape == (0, 1, 1) + + with pytest.raises(ValueError, match=r"non-broadcastable"): + self.matmul(arr, arr, out=out) + + def test_out_contiguous(self): + a = np.ones((5, 2), dtype=float) + b = np.array([[1, 3], [5, 7]], dtype=float) + v = np.array([1, 3], dtype=float) + tgt = np.dot(a, b) + tgt_mv = np.dot(a, v) + + # test out non-contiguous + out = np.ones((5, 2, 2), dtype=float) + c = self.matmul(a, b, out=out[..., 0]) + assert c.base is out + assert_array_equal(c, tgt) + c = self.matmul(a, v, out=out[:, 0, 0]) + assert_array_equal(c, tgt_mv) + c = self.matmul(v, a.T, out=out[:, 0, 0]) + assert_array_equal(c, tgt_mv) + + # test out contiguous in only last dim + out = np.ones((10, 2), dtype=float) + c = self.matmul(a, b, out=out[::2, :]) + assert_array_equal(c, tgt) + + # test transposes of out, args + out = np.ones((5, 2), dtype=float) + c = self.matmul(b.T, a.T, out=out.T) + assert_array_equal(out, tgt) + + m1 = np.arange(15.).reshape(5, 3) + m2 = np.arange(21.).reshape(3, 7) + m3 = np.arange(30.).reshape(5, 6)[:, ::2] # non-contiguous + vc = np.arange(10.) + vr = np.arange(6.) + m0 = np.zeros((3, 0)) + + @pytest.mark.parametrize('args', ( + # matrix-matrix + (m1, m2), (m2.T, m1.T), (m2.T.copy(), m1.T), (m2.T, m1.T.copy()), + # matrix-matrix-transpose, contiguous and non + (m1, m1.T), (m1.T, m1), (m1, m3.T), (m3, m1.T), + (m3, m3.T), (m3.T, m3), + # matrix-matrix non-contiguous + (m3, m2), (m2.T, m3.T), (m2.T.copy(), m3.T), + # vector-matrix, matrix-vector, contiguous + (m1, vr[:3]), (vc[:5], m1), (m1.T, vc[:5]), (vr[:3], m1.T), + # vector-matrix, matrix-vector, vector non-contiguous + (m1, vr[::2]), (vc[::2], m1), (m1.T, vc[::2]), (vr[::2], m1.T), + # vector-matrix, matrix-vector, matrix non-contiguous + (m3, vr[:3]), (vc[:5], m3), (m3.T, vc[:5]), (vr[:3], m3.T), + # vector-matrix, matrix-vector, both non-contiguous + (m3, vr[::2]), (vc[::2], m3), (m3.T, vc[::2]), (vr[::2], m3.T), + # size == 0 + (m0, m0.T), (m0.T, m0), (m1, m0), (m0.T, m1.T), + )) + def test_dot_equivalent(self, args): + r1 = np.matmul(*args) + r2 = np.dot(*args) + assert_equal(r1, r2) + + r3 = np.matmul(args[0].copy(), args[1].copy()) + assert_equal(r1, r3) + + # matrix matrix, issue 29164 + if [len(args[0].shape), len(args[1].shape)] == [2, 2]: + out_f = np.zeros((r2.shape[0] * 2, r2.shape[1] * 2), order='F') + r4 = np.matmul(*args, out=out_f[::2, ::2]) + assert_equal(r2, r4) + + def test_matmul_object(self): + import fractions + + f = np.vectorize(fractions.Fraction) + + def random_ints(): + return np.random.randint(1, 1000, size=(10, 3, 3)) + M1 = f(random_ints(), random_ints()) + M2 = f(random_ints(), random_ints()) + + M3 = self.matmul(M1, M2) + + [N1, N2, N3] = [a.astype(float) for a in [M1, M2, M3]] + + assert_allclose(N3, self.matmul(N1, N2)) + + def test_matmul_object_type_scalar(self): + from fractions import Fraction as F + v = np.array([F(2, 3), F(5, 7)]) + res = self.matmul(v, v) + assert_(type(res) is F) + + def test_matmul_empty(self): + a = np.empty((3, 0), dtype=object) + b = np.empty((0, 3), dtype=object) + c = np.zeros((3, 3)) + assert_array_equal(np.matmul(a, b), c) + + def test_matmul_exception_multiply(self): + # test that matmul fails if `__mul__` is missing + class add_not_multiply: + def __add__(self, other): + return self + a = np.full((3, 3), add_not_multiply()) + with assert_raises(TypeError): + b = np.matmul(a, a) + + def test_matmul_exception_add(self): + # test that matmul fails if `__add__` is missing + class multiply_not_add: + def __mul__(self, other): + return self + a = np.full((3, 3), multiply_not_add()) + with assert_raises(TypeError): + b = np.matmul(a, a) + + def test_matmul_bool(self): + # gh-14439 + a = np.array([[1, 0], [1, 1]], dtype=bool) + assert np.max(a.view(np.uint8)) == 1 + b = np.matmul(a, a) + # matmul with boolean output should always be 0, 1 + assert np.max(b.view(np.uint8)) == 1 + + rg = np.random.default_rng(np.random.PCG64(43)) + d = rg.integers(2, size=4 * 5, dtype=np.int8) + d = d.reshape(4, 5) > 0 + out1 = np.matmul(d, d.reshape(5, 4)) + out2 = np.dot(d, d.reshape(5, 4)) + assert_equal(out1, out2) + + c = np.matmul(np.zeros((2, 0), dtype=bool), np.zeros(0, dtype=bool)) + assert not np.any(c) + + +class TestMatmulOperator(MatmulCommon): + import operator + matmul = operator.matmul + + def test_array_priority_override(self): + + class A: + __array_priority__ = 1000 + + def __matmul__(self, other): + return "A" + + def __rmatmul__(self, other): + return "A" + + a = A() + b = np.ones(2) + assert_equal(self.matmul(a, b), "A") + assert_equal(self.matmul(b, a), "A") + + def test_matmul_raises(self): + assert_raises(TypeError, self.matmul, np.int8(5), np.int8(5)) + assert_raises(TypeError, self.matmul, np.void(b'abc'), np.void(b'abc')) + assert_raises(TypeError, self.matmul, np.arange(10), np.void(b'abc')) + + +class TestMatmulInplace: + DTYPES = {} + for i in MatmulCommon.types: + for j in MatmulCommon.types: + if np.can_cast(j, i): + DTYPES[f"{i}-{j}"] = (np.dtype(i), np.dtype(j)) + + @pytest.mark.parametrize("dtype1,dtype2", DTYPES.values(), ids=DTYPES) + def test_basic(self, dtype1: np.dtype, dtype2: np.dtype) -> None: + a = np.arange(10).reshape(5, 2).astype(dtype1) + a_id = id(a) + b = np.ones((2, 2), dtype=dtype2) + + ref = a @ b + a @= b + + assert id(a) == a_id + assert a.dtype == dtype1 + assert a.shape == (5, 2) + if dtype1.kind in "fc": + np.testing.assert_allclose(a, ref) + else: + np.testing.assert_array_equal(a, ref) + + SHAPES = { + "2d_large": ((10**5, 10), (10, 10)), + "3d_large": ((10**4, 10, 10), (1, 10, 10)), + "1d": ((3,), (3,)), + "2d_1d": ((3, 3), (3,)), + "1d_2d": ((3,), (3, 3)), + "2d_broadcast": ((3, 3), (3, 1)), + "2d_broadcast_reverse": ((1, 3), (3, 3)), + "3d_broadcast1": ((3, 3, 3), (1, 3, 1)), + "3d_broadcast2": ((3, 3, 3), (1, 3, 3)), + "3d_broadcast3": ((3, 3, 3), (3, 3, 1)), + "3d_broadcast_reverse1": ((1, 3, 3), (3, 3, 3)), + "3d_broadcast_reverse2": ((3, 1, 3), (3, 3, 3)), + "3d_broadcast_reverse3": ((1, 1, 3), (3, 3, 3)), + } + + @pytest.mark.parametrize("a_shape,b_shape", SHAPES.values(), ids=SHAPES) + def test_shapes(self, a_shape: tuple[int, ...], b_shape: tuple[int, ...]): + a_size = np.prod(a_shape) + a = np.arange(a_size).reshape(a_shape).astype(np.float64) + a_id = id(a) + + b_size = np.prod(b_shape) + b = np.arange(b_size).reshape(b_shape) + + ref = a @ b + if ref.shape != a_shape: + with pytest.raises(ValueError): + a @= b + return + else: + a @= b + + assert id(a) == a_id + assert a.dtype.type == np.float64 + assert a.shape == a_shape + np.testing.assert_allclose(a, ref) + + +def test_matmul_axes(): + a = np.arange(3 * 4 * 5).reshape(3, 4, 5) + c = np.matmul(a, a, axes=[(-2, -1), (-1, -2), (1, 2)]) + assert c.shape == (3, 4, 4) + d = np.matmul(a, a, axes=[(-2, -1), (-1, -2), (0, 1)]) + assert d.shape == (4, 4, 3) + e = np.swapaxes(d, 0, 2) + assert_array_equal(e, c) + f = np.matmul(a, np.arange(3), axes=[(1, 0), (0), (0)]) + assert f.shape == (4, 5) + + +class TestInner: + + def test_inner_type_mismatch(self): + c = 1. + A = np.array((1, 1), dtype='i,i') + + assert_raises(TypeError, np.inner, c, A) + assert_raises(TypeError, np.inner, A, c) + + def test_inner_scalar_and_vector(self): + for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?': + sca = np.array(3, dtype=dt)[()] + vec = np.array([1, 2], dtype=dt) + desired = np.array([3, 6], dtype=dt) + assert_equal(np.inner(vec, sca), desired) + assert_equal(np.inner(sca, vec), desired) + + def test_vecself(self): + # Ticket 844. + # Inner product of a vector with itself segfaults or give + # meaningless result + a = np.zeros(shape=(1, 80), dtype=np.float64) + p = np.inner(a, a) + assert_almost_equal(p, 0, decimal=14) + + def test_inner_product_with_various_contiguities(self): + # github issue 6532 + for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?': + # check an inner product involving a matrix transpose + A = np.array([[1, 2], [3, 4]], dtype=dt) + B = np.array([[1, 3], [2, 4]], dtype=dt) + C = np.array([1, 1], dtype=dt) + desired = np.array([4, 6], dtype=dt) + assert_equal(np.inner(A.T, C), desired) + assert_equal(np.inner(C, A.T), desired) + assert_equal(np.inner(B, C), desired) + assert_equal(np.inner(C, B), desired) + # check a matrix product + desired = np.array([[7, 10], [15, 22]], dtype=dt) + assert_equal(np.inner(A, B), desired) + # check the syrk vs. gemm paths + desired = np.array([[5, 11], [11, 25]], dtype=dt) + assert_equal(np.inner(A, A), desired) + assert_equal(np.inner(A, A.copy()), desired) + # check an inner product involving an aliased and reversed view + a = np.arange(5).astype(dt) + b = a[::-1] + desired = np.array(10, dtype=dt).item() + assert_equal(np.inner(b, a), desired) + + def test_3d_tensor(self): + for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?': + a = np.arange(24).reshape(2, 3, 4).astype(dt) + b = np.arange(24, 48).reshape(2, 3, 4).astype(dt) + desired = np.array( + [[[[ 158, 182, 206], + [ 230, 254, 278]], + + [[ 566, 654, 742], + [ 830, 918, 1006]], + + [[ 974, 1126, 1278], + [1430, 1582, 1734]]], + + [[[1382, 1598, 1814], + [2030, 2246, 2462]], + + [[1790, 2070, 2350], + [2630, 2910, 3190]], + + [[2198, 2542, 2886], + [3230, 3574, 3918]]]] + ).astype(dt) + assert_equal(np.inner(a, b), desired) + assert_equal(np.inner(b, a).transpose(2, 3, 0, 1), desired) + + +class TestChoose: + def setup_method(self): + self.x = 2 * np.ones((3,), dtype=int) + self.y = 3 * np.ones((3,), dtype=int) + self.x2 = 2 * np.ones((2, 3), dtype=int) + self.y2 = 3 * np.ones((2, 3), dtype=int) + self.ind = [0, 0, 1] + + def test_basic(self): + A = np.choose(self.ind, (self.x, self.y)) + assert_equal(A, [2, 2, 3]) + + def test_broadcast1(self): + A = np.choose(self.ind, (self.x2, self.y2)) + assert_equal(A, [[2, 2, 3], [2, 2, 3]]) + + def test_broadcast2(self): + A = np.choose(self.ind, (self.x, self.y2)) + assert_equal(A, [[2, 2, 3], [2, 2, 3]]) + + @pytest.mark.parametrize("ops", + [(1000, np.array([1], dtype=np.uint8)), + (-1, np.array([1], dtype=np.uint8)), + (1., np.float32(3)), + (1., np.array([3], dtype=np.float32))],) + def test_output_dtype(self, ops): + expected_dt = np.result_type(*ops) + assert np.choose([0], ops).dtype == expected_dt + + def test_dimension_and_args_limit(self): + # Maxdims for the legacy iterator is 32, but the maximum number + # of arguments is actually larger (a itself also counts here) + a = np.ones((1,) * 32, dtype=np.intp) + res = a.choose([0, a] + [2] * 61) + with pytest.raises(ValueError, + match="Need at least 0 and at most 64 array objects"): + a.choose([0, a] + [2] * 62) + + assert_array_equal(res, a) + # Choose is unfortunately limited to 32 dims as of NumPy 2.0 + a = np.ones((1,) * 60, dtype=np.intp) + with pytest.raises(RuntimeError, + match=".*32 dimensions but the array has 60"): + a.choose([a, a]) + + +class TestRepeat: + def setup_method(self): + self.m = np.array([1, 2, 3, 4, 5, 6]) + self.m_rect = self.m.reshape((2, 3)) + + def test_basic(self): + A = np.repeat(self.m, [1, 3, 2, 1, 1, 2]) + assert_equal(A, [1, 2, 2, 2, 3, + 3, 4, 5, 6, 6]) + + def test_broadcast1(self): + A = np.repeat(self.m, 2) + assert_equal(A, [1, 1, 2, 2, 3, 3, + 4, 4, 5, 5, 6, 6]) + + def test_axis_spec(self): + A = np.repeat(self.m_rect, [2, 1], axis=0) + assert_equal(A, [[1, 2, 3], + [1, 2, 3], + [4, 5, 6]]) + + A = np.repeat(self.m_rect, [1, 3, 2], axis=1) + assert_equal(A, [[1, 2, 2, 2, 3, 3], + [4, 5, 5, 5, 6, 6]]) + + def test_broadcast2(self): + A = np.repeat(self.m_rect, 2, axis=0) + assert_equal(A, [[1, 2, 3], + [1, 2, 3], + [4, 5, 6], + [4, 5, 6]]) + + A = np.repeat(self.m_rect, 2, axis=1) + assert_equal(A, [[1, 1, 2, 2, 3, 3], + [4, 4, 5, 5, 6, 6]]) + + +# TODO: test for multidimensional +NEIGH_MODE = {'zero': 0, 'one': 1, 'constant': 2, 'circular': 3, 'mirror': 4} + + +@pytest.mark.parametrize('dt', [float, Decimal], ids=['float', 'object']) +class TestNeighborhoodIter: + # Simple, 2d tests + def test_simple2d(self, dt): + # Test zero and one padding for simple data type + x = np.array([[0, 1], [2, 3]], dtype=dt) + r = [np.array([[0, 0, 0], [0, 0, 1]], dtype=dt), + np.array([[0, 0, 0], [0, 1, 0]], dtype=dt), + np.array([[0, 0, 1], [0, 2, 3]], dtype=dt), + np.array([[0, 1, 0], [2, 3, 0]], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator( + x, [-1, 0, -1, 1], x[0], NEIGH_MODE['zero']) + assert_array_equal(l, r) + + r = [np.array([[1, 1, 1], [1, 0, 1]], dtype=dt), + np.array([[1, 1, 1], [0, 1, 1]], dtype=dt), + np.array([[1, 0, 1], [1, 2, 3]], dtype=dt), + np.array([[0, 1, 1], [2, 3, 1]], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator( + x, [-1, 0, -1, 1], x[0], NEIGH_MODE['one']) + assert_array_equal(l, r) + + r = [np.array([[4, 4, 4], [4, 0, 1]], dtype=dt), + np.array([[4, 4, 4], [0, 1, 4]], dtype=dt), + np.array([[4, 0, 1], [4, 2, 3]], dtype=dt), + np.array([[0, 1, 4], [2, 3, 4]], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator( + x, [-1, 0, -1, 1], 4, NEIGH_MODE['constant']) + assert_array_equal(l, r) + + # Test with start in the middle + r = [np.array([[4, 0, 1], [4, 2, 3]], dtype=dt), + np.array([[0, 1, 4], [2, 3, 4]], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator( + x, [-1, 0, -1, 1], 4, NEIGH_MODE['constant'], 2) + assert_array_equal(l, r) + + def test_mirror2d(self, dt): + x = np.array([[0, 1], [2, 3]], dtype=dt) + r = [np.array([[0, 0, 1], [0, 0, 1]], dtype=dt), + np.array([[0, 1, 1], [0, 1, 1]], dtype=dt), + np.array([[0, 0, 1], [2, 2, 3]], dtype=dt), + np.array([[0, 1, 1], [2, 3, 3]], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator( + x, [-1, 0, -1, 1], x[0], NEIGH_MODE['mirror']) + assert_array_equal(l, r) + + # Simple, 1d tests + def test_simple(self, dt): + # Test padding with constant values + x = np.linspace(1, 5, 5).astype(dt) + r = [[0, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 0]] + l = _multiarray_tests.test_neighborhood_iterator( + x, [-1, 1], x[0], NEIGH_MODE['zero']) + assert_array_equal(l, r) + + r = [[1, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 1]] + l = _multiarray_tests.test_neighborhood_iterator( + x, [-1, 1], x[0], NEIGH_MODE['one']) + assert_array_equal(l, r) + + r = [[x[4], 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, x[4]]] + l = _multiarray_tests.test_neighborhood_iterator( + x, [-1, 1], x[4], NEIGH_MODE['constant']) + assert_array_equal(l, r) + + # Test mirror modes + def test_mirror(self, dt): + x = np.linspace(1, 5, 5).astype(dt) + r = np.array([[2, 1, 1, 2, 3], [1, 1, 2, 3, 4], [1, 2, 3, 4, 5], + [2, 3, 4, 5, 5], [3, 4, 5, 5, 4]], dtype=dt) + l = _multiarray_tests.test_neighborhood_iterator( + x, [-2, 2], x[1], NEIGH_MODE['mirror']) + assert_([i.dtype == dt for i in l]) + assert_array_equal(l, r) + + # Circular mode + def test_circular(self, dt): + x = np.linspace(1, 5, 5).astype(dt) + r = np.array([[4, 5, 1, 2, 3], [5, 1, 2, 3, 4], [1, 2, 3, 4, 5], + [2, 3, 4, 5, 1], [3, 4, 5, 1, 2]], dtype=dt) + l = _multiarray_tests.test_neighborhood_iterator( + x, [-2, 2], x[0], NEIGH_MODE['circular']) + assert_array_equal(l, r) + + +# Test stacking neighborhood iterators +class TestStackedNeighborhoodIter: + # Simple, 1d test: stacking 2 constant-padded neigh iterators + def test_simple_const(self): + dt = np.float64 + # Test zero and one padding for simple data type + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([0], dtype=dt), + np.array([0], dtype=dt), + np.array([1], dtype=dt), + np.array([2], dtype=dt), + np.array([3], dtype=dt), + np.array([0], dtype=dt), + np.array([0], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator_oob( + x, [-2, 4], NEIGH_MODE['zero'], [0, 0], NEIGH_MODE['zero']) + assert_array_equal(l, r) + + r = [np.array([1, 0, 1], dtype=dt), + np.array([0, 1, 2], dtype=dt), + np.array([1, 2, 3], dtype=dt), + np.array([2, 3, 0], dtype=dt), + np.array([3, 0, 1], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator_oob( + x, [-1, 3], NEIGH_MODE['zero'], [-1, 1], NEIGH_MODE['one']) + assert_array_equal(l, r) + + # 2nd simple, 1d test: stacking 2 neigh iterators, mixing const padding and + # mirror padding + def test_simple_mirror(self): + dt = np.float64 + # Stacking zero on top of mirror + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([0, 1, 1], dtype=dt), + np.array([1, 1, 2], dtype=dt), + np.array([1, 2, 3], dtype=dt), + np.array([2, 3, 3], dtype=dt), + np.array([3, 3, 0], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator_oob( + x, [-1, 3], NEIGH_MODE['mirror'], [-1, 1], NEIGH_MODE['zero']) + assert_array_equal(l, r) + + # Stacking mirror on top of zero + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([1, 0, 0], dtype=dt), + np.array([0, 0, 1], dtype=dt), + np.array([0, 1, 2], dtype=dt), + np.array([1, 2, 3], dtype=dt), + np.array([2, 3, 0], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator_oob( + x, [-1, 3], NEIGH_MODE['zero'], [-2, 0], NEIGH_MODE['mirror']) + assert_array_equal(l, r) + + # Stacking mirror on top of zero: 2nd + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([0, 1, 2], dtype=dt), + np.array([1, 2, 3], dtype=dt), + np.array([2, 3, 0], dtype=dt), + np.array([3, 0, 0], dtype=dt), + np.array([0, 0, 3], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator_oob( + x, [-1, 3], NEIGH_MODE['zero'], [0, 2], NEIGH_MODE['mirror']) + assert_array_equal(l, r) + + # Stacking mirror on top of zero: 3rd + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([1, 0, 0, 1, 2], dtype=dt), + np.array([0, 0, 1, 2, 3], dtype=dt), + np.array([0, 1, 2, 3, 0], dtype=dt), + np.array([1, 2, 3, 0, 0], dtype=dt), + np.array([2, 3, 0, 0, 3], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator_oob( + x, [-1, 3], NEIGH_MODE['zero'], [-2, 2], NEIGH_MODE['mirror']) + assert_array_equal(l, r) + + # 3rd simple, 1d test: stacking 2 neigh iterators, mixing const padding and + # circular padding + def test_simple_circular(self): + dt = np.float64 + # Stacking zero on top of mirror + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([0, 3, 1], dtype=dt), + np.array([3, 1, 2], dtype=dt), + np.array([1, 2, 3], dtype=dt), + np.array([2, 3, 1], dtype=dt), + np.array([3, 1, 0], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator_oob( + x, [-1, 3], NEIGH_MODE['circular'], [-1, 1], NEIGH_MODE['zero']) + assert_array_equal(l, r) + + # Stacking mirror on top of zero + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([3, 0, 0], dtype=dt), + np.array([0, 0, 1], dtype=dt), + np.array([0, 1, 2], dtype=dt), + np.array([1, 2, 3], dtype=dt), + np.array([2, 3, 0], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator_oob( + x, [-1, 3], NEIGH_MODE['zero'], [-2, 0], NEIGH_MODE['circular']) + assert_array_equal(l, r) + + # Stacking mirror on top of zero: 2nd + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([0, 1, 2], dtype=dt), + np.array([1, 2, 3], dtype=dt), + np.array([2, 3, 0], dtype=dt), + np.array([3, 0, 0], dtype=dt), + np.array([0, 0, 1], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator_oob( + x, [-1, 3], NEIGH_MODE['zero'], [0, 2], NEIGH_MODE['circular']) + assert_array_equal(l, r) + + # Stacking mirror on top of zero: 3rd + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([3, 0, 0, 1, 2], dtype=dt), + np.array([0, 0, 1, 2, 3], dtype=dt), + np.array([0, 1, 2, 3, 0], dtype=dt), + np.array([1, 2, 3, 0, 0], dtype=dt), + np.array([2, 3, 0, 0, 1], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator_oob( + x, [-1, 3], NEIGH_MODE['zero'], [-2, 2], NEIGH_MODE['circular']) + assert_array_equal(l, r) + + # 4th simple, 1d test: stacking 2 neigh iterators, but with lower iterator + # being strictly within the array + def test_simple_strict_within(self): + dt = np.float64 + # Stacking zero on top of zero, first neighborhood strictly inside the + # array + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([1, 2, 3, 0], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator_oob( + x, [1, 1], NEIGH_MODE['zero'], [-1, 2], NEIGH_MODE['zero']) + assert_array_equal(l, r) + + # Stacking mirror on top of zero, first neighborhood strictly inside the + # array + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([1, 2, 3, 3], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator_oob( + x, [1, 1], NEIGH_MODE['zero'], [-1, 2], NEIGH_MODE['mirror']) + assert_array_equal(l, r) + + # Stacking mirror on top of zero, first neighborhood strictly inside the + # array + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([1, 2, 3, 1], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator_oob( + x, [1, 1], NEIGH_MODE['zero'], [-1, 2], NEIGH_MODE['circular']) + assert_array_equal(l, r) + +class TestWarnings: + + def test_complex_warning(self): + x = np.array([1, 2]) + y = np.array([1 - 2j, 1 + 2j]) + + with warnings.catch_warnings(): + warnings.simplefilter("error", ComplexWarning) + assert_raises(ComplexWarning, x.__setitem__, slice(None), y) + assert_equal(x, [1, 2]) + + +class TestMinScalarType: + + def test_usigned_shortshort(self): + dt = np.min_scalar_type(2**8 - 1) + wanted = np.dtype('uint8') + assert_equal(wanted, dt) + + def test_usigned_short(self): + dt = np.min_scalar_type(2**16 - 1) + wanted = np.dtype('uint16') + assert_equal(wanted, dt) + + def test_usigned_int(self): + dt = np.min_scalar_type(2**32 - 1) + wanted = np.dtype('uint32') + assert_equal(wanted, dt) + + def test_usigned_longlong(self): + dt = np.min_scalar_type(2**63 - 1) + wanted = np.dtype('uint64') + assert_equal(wanted, dt) + + def test_object(self): + dt = np.min_scalar_type(2**64) + wanted = np.dtype('O') + assert_equal(wanted, dt) + + +from numpy._core._internal import _dtype_from_pep3118 + + +class TestPEP3118Dtype: + def _check(self, spec, wanted): + dt = np.dtype(wanted) + actual = _dtype_from_pep3118(spec) + assert_equal(actual, dt, + err_msg=f"spec {spec!r} != dtype {wanted!r}") + + def test_native_padding(self): + align = np.dtype('i').alignment + for j in range(8): + if j == 0: + s = 'bi' + else: + s = 'b%dxi' % j + self._check('@' + s, {'f0': ('i1', 0), + 'f1': ('i', align * (1 + j // align))}) + self._check('=' + s, {'f0': ('i1', 0), + 'f1': ('i', 1 + j)}) + + def test_native_padding_2(self): + # Native padding should work also for structs and sub-arrays + self._check('x3T{xi}', {'f0': (({'f0': ('i', 4)}, (3,)), 4)}) + self._check('^x3T{xi}', {'f0': (({'f0': ('i', 1)}, (3,)), 1)}) + + def test_trailing_padding(self): + # Trailing padding should be included, *and*, the item size + # should match the alignment if in aligned mode + align = np.dtype('i').alignment + size = np.dtype('i').itemsize + + def aligned(n): + return align * (1 + (n - 1) // align) + + base = {"formats": ['i'], "names": ['f0']} + + self._check('ix', dict(itemsize=aligned(size + 1), **base)) + self._check('ixx', dict(itemsize=aligned(size + 2), **base)) + self._check('ixxx', dict(itemsize=aligned(size + 3), **base)) + self._check('ixxxx', dict(itemsize=aligned(size + 4), **base)) + self._check('i7x', dict(itemsize=aligned(size + 7), **base)) + + self._check('^ix', dict(itemsize=size + 1, **base)) + self._check('^ixx', dict(itemsize=size + 2, **base)) + self._check('^ixxx', dict(itemsize=size + 3, **base)) + self._check('^ixxxx', dict(itemsize=size + 4, **base)) + self._check('^i7x', dict(itemsize=size + 7, **base)) + + def test_native_padding_3(self): + dt = np.dtype( + [('a', 'b'), ('b', 'i'), + ('sub', np.dtype('b,i')), ('c', 'i')], + align=True) + self._check("T{b:a:xxxi:b:T{b:f0:=i:f1:}:sub:xxxi:c:}", dt) + + dt = np.dtype( + [('a', 'b'), ('b', 'i'), ('c', 'b'), ('d', 'b'), + ('e', 'b'), ('sub', np.dtype('b,i', align=True))]) + self._check("T{b:a:=i:b:b:c:b:d:b:e:T{b:f0:xxxi:f1:}:sub:}", dt) + + def test_padding_with_array_inside_struct(self): + dt = np.dtype( + [('a', 'b'), ('b', 'i'), ('c', 'b', (3,)), + ('d', 'i')], + align=True) + self._check("T{b:a:xxxi:b:3b:c:xi:d:}", dt) + + def test_byteorder_inside_struct(self): + # The byte order after @T{=i} should be '=', not '@'. + # Check this by noting the absence of native alignment. + self._check('@T{^i}xi', {'f0': ({'f0': ('i', 0)}, 0), + 'f1': ('i', 5)}) + + def test_intra_padding(self): + # Natively aligned sub-arrays may require some internal padding + align = np.dtype('i').alignment + size = np.dtype('i').itemsize + + def aligned(n): + return (align * (1 + (n - 1) // align)) + + self._check('(3)T{ix}', ({ + "names": ['f0'], + "formats": ['i'], + "offsets": [0], + "itemsize": aligned(size + 1) + }, (3,))) + + def test_char_vs_string(self): + dt = np.dtype('c') + self._check('c', dt) + + dt = np.dtype([('f0', 'S1', (4,)), ('f1', 'S4')]) + self._check('4c4s', dt) + + def test_field_order(self): + # gh-9053 - previously, we relied on dictionary key order + self._check("(0)I:a:f:b:", [('a', 'I', (0,)), ('b', 'f')]) + self._check("(0)I:b:f:a:", [('b', 'I', (0,)), ('a', 'f')]) + + def test_unnamed_fields(self): + self._check('ii', [('f0', 'i'), ('f1', 'i')]) + self._check('ii:f0:', [('f1', 'i'), ('f0', 'i')]) + + self._check('i', 'i') + self._check('i:f0:', [('f0', 'i')]) + + +class TestNewBufferProtocol: + """ Test PEP3118 buffers """ + + def _check_roundtrip(self, obj): + obj = np.asarray(obj) + x = memoryview(obj) + y = np.asarray(x) + y2 = np.array(x) + assert_(not y.flags.owndata) + assert_(y2.flags.owndata) + + assert_equal(y.dtype, obj.dtype) + assert_equal(y.shape, obj.shape) + assert_array_equal(obj, y) + + assert_equal(y2.dtype, obj.dtype) + assert_equal(y2.shape, obj.shape) + assert_array_equal(obj, y2) + + def test_roundtrip(self): + x = np.array([1, 2, 3, 4, 5], dtype='i4') + self._check_roundtrip(x) + + x = np.array([[1, 2], [3, 4]], dtype=np.float64) + self._check_roundtrip(x) + + x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0, :] + self._check_roundtrip(x) + + dt = [('a', 'b'), + ('b', 'h'), + ('c', 'i'), + ('d', 'l'), + ('dx', 'q'), + ('e', 'B'), + ('f', 'H'), + ('g', 'I'), + ('h', 'L'), + ('hx', 'Q'), + ('i', np.single), + ('j', np.double), + ('k', np.longdouble), + ('ix', np.csingle), + ('jx', np.cdouble), + ('kx', np.clongdouble), + ('l', 'S4'), + ('m', 'U4'), + ('n', 'V3'), + ('o', '?'), + ('p', np.half), + ] + x = np.array( + [(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + b'aaaa', 'bbbb', b'xxx', True, 1.0)], + dtype=dt) + self._check_roundtrip(x) + + x = np.array(([[1, 2], [3, 4]],), dtype=[('a', (int, (2, 2)))]) + self._check_roundtrip(x) + + x = np.array([1, 2, 3], dtype='>i2') + self._check_roundtrip(x) + + x = np.array([1, 2, 3], dtype='') + x = np.zeros(4, dtype=dt) + self._check_roundtrip(x) + + def test_roundtrip_scalar(self): + # Issue #4015. + self._check_roundtrip(0) + + def test_invalid_buffer_format(self): + # datetime64 cannot be used fully in a buffer yet + # Should be fixed in the next Numpy major release + dt = np.dtype([('a', 'uint16'), ('b', 'M8[s]')]) + a = np.empty(3, dt) + assert_raises((ValueError, BufferError), memoryview, a) + assert_raises((ValueError, BufferError), memoryview, np.array((3), 'M8[D]')) + + def test_export_simple_1d(self): + x = np.array([1, 2, 3, 4, 5], dtype='i') + y = memoryview(x) + assert_equal(y.format, 'i') + assert_equal(y.shape, (5,)) + assert_equal(y.ndim, 1) + assert_equal(y.strides, (4,)) + assert_equal(y.suboffsets, ()) + assert_equal(y.itemsize, 4) + + def test_export_simple_nd(self): + x = np.array([[1, 2], [3, 4]], dtype=np.float64) + y = memoryview(x) + assert_equal(y.format, 'd') + assert_equal(y.shape, (2, 2)) + assert_equal(y.ndim, 2) + assert_equal(y.strides, (16, 8)) + assert_equal(y.suboffsets, ()) + assert_equal(y.itemsize, 8) + + def test_export_discontiguous(self): + x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0, :] + y = memoryview(x) + assert_equal(y.format, 'f') + assert_equal(y.shape, (3, 3)) + assert_equal(y.ndim, 2) + assert_equal(y.strides, (36, 4)) + assert_equal(y.suboffsets, ()) + assert_equal(y.itemsize, 4) + + def test_export_record(self): + dt = [('a', 'b'), + ('b', 'h'), + ('c', 'i'), + ('d', 'l'), + ('dx', 'q'), + ('e', 'B'), + ('f', 'H'), + ('g', 'I'), + ('h', 'L'), + ('hx', 'Q'), + ('i', np.single), + ('j', np.double), + ('k', np.longdouble), + ('ix', np.csingle), + ('jx', np.cdouble), + ('kx', np.clongdouble), + ('l', 'S4'), + ('m', 'U4'), + ('n', 'V3'), + ('o', '?'), + ('p', np.half), + ] + x = np.array( + [(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + b'aaaa', 'bbbb', b' ', True, 1.0)], + dtype=dt) + y = memoryview(x) + assert_equal(y.shape, (1,)) + assert_equal(y.ndim, 1) + assert_equal(y.suboffsets, ()) + + sz = sum(np.dtype(b).itemsize for a, b in dt) + if np.dtype('l').itemsize == 4: + assert_equal(y.format, 'T{b:a:=h:b:i:c:l:d:q:dx:B:e:@H:f:=I:g:L:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}') + else: + assert_equal(y.format, 'T{b:a:=h:b:i:c:q:d:q:dx:B:e:@H:f:=I:g:Q:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}') + assert_equal(y.strides, (sz,)) + assert_equal(y.itemsize, sz) + + def test_export_subarray(self): + x = np.array(([[1, 2], [3, 4]],), dtype=[('a', ('i', (2, 2)))]) + y = memoryview(x) + assert_equal(y.format, 'T{(2,2)i:a:}') + assert_equal(y.shape, ()) + assert_equal(y.ndim, 0) + assert_equal(y.strides, ()) + assert_equal(y.suboffsets, ()) + assert_equal(y.itemsize, 16) + + def test_export_endian(self): + x = np.array([1, 2, 3], dtype='>i') + y = memoryview(x) + if sys.byteorder == 'little': + assert_equal(y.format, '>i') + else: + assert_equal(y.format, 'i') + + x = np.array([1, 2, 3], dtype=' np.array(0, dtype=dt1), f"type {dt1} failed") + assert_(not 1 < np.array(0, dtype=dt1), f"type {dt1} failed") + + for dt2 in np.typecodes['AllInteger']: + assert_(np.array(1, dtype=dt1) > np.array(0, dtype=dt2), + f"type {dt1} and {dt2} failed") + assert_(not np.array(1, dtype=dt1) < np.array(0, dtype=dt2), + f"type {dt1} and {dt2} failed") + + # Unsigned integers + for dt1 in 'BHILQP': + assert_(-1 < np.array(1, dtype=dt1), f"type {dt1} failed") + assert_(not -1 > np.array(1, dtype=dt1), f"type {dt1} failed") + assert_(-1 != np.array(1, dtype=dt1), f"type {dt1} failed") + + # Unsigned vs signed + for dt2 in 'bhilqp': + assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2), + f"type {dt1} and {dt2} failed") + assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2), + f"type {dt1} and {dt2} failed") + assert_(np.array(1, dtype=dt1) != np.array(-1, dtype=dt2), + f"type {dt1} and {dt2} failed") + + # Signed integers and floats + for dt1 in 'bhlqp' + np.typecodes['Float']: + assert_(1 > np.array(-1, dtype=dt1), f"type {dt1} failed") + assert_(not 1 < np.array(-1, dtype=dt1), f"type {dt1} failed") + assert_(-1 == np.array(-1, dtype=dt1), f"type {dt1} failed") + + for dt2 in 'bhlqp' + np.typecodes['Float']: + assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2), + f"type {dt1} and {dt2} failed") + assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2), + f"type {dt1} and {dt2} failed") + assert_(np.array(-1, dtype=dt1) == np.array(-1, dtype=dt2), + f"type {dt1} and {dt2} failed") + + def test_to_bool_scalar(self): + assert_equal(bool(np.array([False])), False) + assert_equal(bool(np.array([True])), True) + assert_equal(bool(np.array([[42]])), True) + + def test_to_bool_scalar_not_convertible(self): + + class NotConvertible: + def __bool__(self): + raise NotImplementedError + + assert_raises(NotImplementedError, bool, np.array(NotConvertible())) + assert_raises(NotImplementedError, bool, np.array([NotConvertible()])) + if IS_PYSTON: + pytest.skip("Pyston disables recursion checking") + if IS_WASM: + pytest.skip("Pyodide/WASM has limited stack size") + + self_containing = np.array([None]) + self_containing[0] = self_containing + + Error = RecursionError + + assert_raises(Error, bool, self_containing) # previously stack overflow + self_containing[0] = None # resolve circular reference + + def test_to_bool_scalar_size_errors(self): + with pytest.raises(ValueError, match=".*one element is ambiguous"): + bool(np.array([1, 2])) + + with pytest.raises(ValueError, match=".*empty array is ambiguous"): + bool(np.empty((3, 0))) + + with pytest.raises(ValueError, match=".*empty array is ambiguous"): + bool(np.empty((0,))) + + def test_to_int_scalar(self): + # gh-9972 means that these aren't always the same + int_funcs = (int, lambda x: x.__int__()) + for int_func in int_funcs: + assert_equal(int_func(np.array(0)), 0) + with assert_warns(DeprecationWarning): + assert_equal(int_func(np.array([1])), 1) + with assert_warns(DeprecationWarning): + assert_equal(int_func(np.array([[42]])), 42) + assert_raises(TypeError, int_func, np.array([1, 2])) + + # gh-9972 + assert_equal(4, int_func(np.array('4'))) + assert_equal(5, int_func(np.bytes_(b'5'))) + assert_equal(6, int_func(np.str_('6'))) + + class NotConvertible: + def __int__(self): + raise NotImplementedError + assert_raises(NotImplementedError, + int_func, np.array(NotConvertible())) + with assert_warns(DeprecationWarning): + assert_raises(NotImplementedError, + int_func, np.array([NotConvertible()])) + + +class TestWhere: + def test_basic(self): + dts = [bool, np.int16, np.int32, np.int64, np.double, np.complex128, + np.longdouble, np.clongdouble] + for dt in dts: + c = np.ones(53, dtype=bool) + assert_equal(np.where( c, dt(0), dt(1)), dt(0)) + assert_equal(np.where(~c, dt(0), dt(1)), dt(1)) + assert_equal(np.where(True, dt(0), dt(1)), dt(0)) + assert_equal(np.where(False, dt(0), dt(1)), dt(1)) + d = np.ones_like(c).astype(dt) + e = np.zeros_like(d) + r = d.astype(dt) + c[7] = False + r[7] = e[7] + assert_equal(np.where(c, e, e), e) + assert_equal(np.where(c, d, e), r) + assert_equal(np.where(c, d, e[0]), r) + assert_equal(np.where(c, d[0], e), r) + assert_equal(np.where(c[::2], d[::2], e[::2]), r[::2]) + assert_equal(np.where(c[1::2], d[1::2], e[1::2]), r[1::2]) + assert_equal(np.where(c[::3], d[::3], e[::3]), r[::3]) + assert_equal(np.where(c[1::3], d[1::3], e[1::3]), r[1::3]) + assert_equal(np.where(c[::-2], d[::-2], e[::-2]), r[::-2]) + assert_equal(np.where(c[::-3], d[::-3], e[::-3]), r[::-3]) + assert_equal(np.where(c[1::-3], d[1::-3], e[1::-3]), r[1::-3]) + + @pytest.mark.skipif(IS_WASM, reason="no wasm fp exception support") + def test_exotic(self): + # object + assert_array_equal(np.where(True, None, None), np.array(None)) + # zero sized + m = np.array([], dtype=bool).reshape(0, 3) + b = np.array([], dtype=np.float64).reshape(0, 3) + assert_array_equal(np.where(m, 0, b), np.array([]).reshape(0, 3)) + + # object cast + d = np.array([-1.34, -0.16, -0.54, -0.31, -0.08, -0.95, 0.000, 0.313, + 0.547, -0.18, 0.876, 0.236, 1.969, 0.310, 0.699, 1.013, + 1.267, 0.229, -1.39, 0.487]) + nan = float('NaN') + e = np.array(['5z', '0l', nan, 'Wz', nan, nan, 'Xq', 'cs', nan, nan, + 'QN', nan, nan, 'Fd', nan, nan, 'kp', nan, '36', 'i1'], + dtype=object) + m = np.array([0, 0, 1, 0, 1, 1, 0, 0, 1, 1, + 0, 1, 1, 0, 1, 1, 0, 1, 0, 0], dtype=bool) + + r = e[:] + r[np.where(m)] = d[np.where(m)] + assert_array_equal(np.where(m, d, e), r) + + r = e[:] + r[np.where(~m)] = d[np.where(~m)] + assert_array_equal(np.where(m, e, d), r) + + assert_array_equal(np.where(m, e, e), e) + + # minimal dtype result with NaN scalar (e.g required by pandas) + d = np.array([1., 2.], dtype=np.float32) + e = float('NaN') + assert_equal(np.where(True, d, e).dtype, np.float32) + e = float('Infinity') + assert_equal(np.where(True, d, e).dtype, np.float32) + e = float('-Infinity') + assert_equal(np.where(True, d, e).dtype, np.float32) + # With NEP 50 adopted, the float will overflow here: + e = 1e150 + with pytest.warns(RuntimeWarning, match="overflow"): + res = np.where(True, d, e) + assert res.dtype == np.float32 + + def test_ndim(self): + c = [True, False] + a = np.zeros((2, 25)) + b = np.ones((2, 25)) + r = np.where(np.array(c)[:, np.newaxis], a, b) + assert_array_equal(r[0], a[0]) + assert_array_equal(r[1], b[0]) + + a = a.T + b = b.T + r = np.where(c, a, b) + assert_array_equal(r[:, 0], a[:, 0]) + assert_array_equal(r[:, 1], b[:, 0]) + + def test_dtype_mix(self): + c = np.array([False, True, False, False, False, False, True, False, + False, False, True, False]) + a = np.uint32(1) + b = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.], + dtype=np.float64) + r = np.array([5., 1., 3., 2., -1., -4., 1., -10., 10., 1., 1., 3.], + dtype=np.float64) + assert_equal(np.where(c, a, b), r) + + a = a.astype(np.float32) + b = b.astype(np.int64) + assert_equal(np.where(c, a, b), r) + + # non bool mask + c = c.astype(int) + c[c != 0] = 34242324 + assert_equal(np.where(c, a, b), r) + # invert + tmpmask = c != 0 + c[c == 0] = 41247212 + c[tmpmask] = 0 + assert_equal(np.where(c, b, a), r) + + def test_foreign(self): + c = np.array([False, True, False, False, False, False, True, False, + False, False, True, False]) + r = np.array([5., 1., 3., 2., -1., -4., 1., -10., 10., 1., 1., 3.], + dtype=np.float64) + a = np.ones(1, dtype='>i4') + b = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.], + dtype=np.float64) + assert_equal(np.where(c, a, b), r) + + b = b.astype('>f8') + assert_equal(np.where(c, a, b), r) + + a = a.astype('i4') + assert_equal(np.where(c, a, b), r) + + def test_error(self): + c = [True, True] + a = np.ones((4, 5)) + b = np.ones((5, 5)) + assert_raises(ValueError, np.where, c, a, a) + assert_raises(ValueError, np.where, c[0], a, b) + + def test_string(self): + # gh-4778 check strings are properly filled with nulls + a = np.array("abc") + b = np.array("x" * 753) + assert_equal(np.where(True, a, b), "abc") + assert_equal(np.where(False, b, a), "abc") + + # check native datatype sized strings + a = np.array("abcd") + b = np.array("x" * 8) + assert_equal(np.where(True, a, b), "abcd") + assert_equal(np.where(False, b, a), "abcd") + + def test_empty_result(self): + # pass empty where result through an assignment which reads the data of + # empty arrays, error detectable with valgrind, see gh-8922 + x = np.zeros((1, 1)) + ibad = np.vstack(np.where(x == 99.)) + assert_array_equal(ibad, + np.atleast_2d(np.array([[], []], dtype=np.intp))) + + def test_largedim(self): + # invalid read regression gh-9304 + shape = [10, 2, 3, 4, 5, 6] + np.random.seed(2) + array = np.random.rand(*shape) + + for i in range(10): + benchmark = array.nonzero() + result = array.nonzero() + assert_array_equal(benchmark, result) + + def test_kwargs(self): + a = np.zeros(1) + with assert_raises(TypeError): + np.where(a, x=a, y=a) + + +if not IS_PYPY: + # sys.getsizeof() is not valid on PyPy + class TestSizeOf: + + def test_empty_array(self): + x = np.array([]) + assert_(sys.getsizeof(x) > 0) + + def check_array(self, dtype): + elem_size = dtype(0).itemsize + + for length in [10, 50, 100, 500]: + x = np.arange(length, dtype=dtype) + assert_(sys.getsizeof(x) > length * elem_size) + + def test_array_int32(self): + self.check_array(np.int32) + + def test_array_int64(self): + self.check_array(np.int64) + + def test_array_float32(self): + self.check_array(np.float32) + + def test_array_float64(self): + self.check_array(np.float64) + + def test_view(self): + d = np.ones(100) + assert_(sys.getsizeof(d[...]) < sys.getsizeof(d)) + + def test_reshape(self): + d = np.ones(100) + assert_(sys.getsizeof(d) < sys.getsizeof(d.reshape(100, 1, 1).copy())) + + @_no_tracing + def test_resize(self): + d = np.ones(100) + old = sys.getsizeof(d) + d.resize(50) + assert_(old > sys.getsizeof(d)) + d.resize(150) + assert_(old < sys.getsizeof(d)) + + @pytest.mark.parametrize("dtype", ["u4,f4", "u4,O"]) + def test_resize_structured(self, dtype): + a = np.array([(0, 0.0) for i in range(5)], dtype=dtype) + a.resize(1000) + assert_array_equal(a, np.zeros(1000, dtype=dtype)) + + def test_error(self): + d = np.ones(100) + assert_raises(TypeError, d.__sizeof__, "a") + + +class TestHashing: + + def test_arrays_not_hashable(self): + x = np.ones(3) + assert_raises(TypeError, hash, x) + + def test_collections_hashable(self): + x = np.array([]) + assert_(not isinstance(x, collections.abc.Hashable)) + + +class TestArrayPriority: + # This will go away when __array_priority__ is settled, meanwhile + # it serves to check unintended changes. + op = operator + binary_ops = [ + op.pow, op.add, op.sub, op.mul, op.floordiv, op.truediv, op.mod, + op.and_, op.or_, op.xor, op.lshift, op.rshift, op.mod, op.gt, + op.ge, op.lt, op.le, op.ne, op.eq + ] + + class Foo(np.ndarray): + __array_priority__ = 100. + + def __new__(cls, *args, **kwargs): + return np.array(*args, **kwargs).view(cls) + + class Bar(np.ndarray): + __array_priority__ = 101. + + def __new__(cls, *args, **kwargs): + return np.array(*args, **kwargs).view(cls) + + class Other: + __array_priority__ = 1000. + + def _all(self, other): + return self.__class__() + + __add__ = __radd__ = _all + __sub__ = __rsub__ = _all + __mul__ = __rmul__ = _all + __pow__ = __rpow__ = _all + __mod__ = __rmod__ = _all + __truediv__ = __rtruediv__ = _all + __floordiv__ = __rfloordiv__ = _all + __and__ = __rand__ = _all + __xor__ = __rxor__ = _all + __or__ = __ror__ = _all + __lshift__ = __rlshift__ = _all + __rshift__ = __rrshift__ = _all + __eq__ = _all + __ne__ = _all + __gt__ = _all + __ge__ = _all + __lt__ = _all + __le__ = _all + + def test_ndarray_subclass(self): + a = np.array([1, 2]) + b = self.Bar([1, 2]) + for f in self.binary_ops: + msg = repr(f) + assert_(isinstance(f(a, b), self.Bar), msg) + assert_(isinstance(f(b, a), self.Bar), msg) + + def test_ndarray_other(self): + a = np.array([1, 2]) + b = self.Other() + for f in self.binary_ops: + msg = repr(f) + assert_(isinstance(f(a, b), self.Other), msg) + assert_(isinstance(f(b, a), self.Other), msg) + + def test_subclass_subclass(self): + a = self.Foo([1, 2]) + b = self.Bar([1, 2]) + for f in self.binary_ops: + msg = repr(f) + assert_(isinstance(f(a, b), self.Bar), msg) + assert_(isinstance(f(b, a), self.Bar), msg) + + def test_subclass_other(self): + a = self.Foo([1, 2]) + b = self.Other() + for f in self.binary_ops: + msg = repr(f) + assert_(isinstance(f(a, b), self.Other), msg) + assert_(isinstance(f(b, a), self.Other), msg) + + +class TestBytestringArrayNonzero: + + def test_empty_bstring_array_is_falsey(self): + assert_(not np.array([''], dtype=str)) + + def test_whitespace_bstring_array_is_truthy(self): + a = np.array(['spam'], dtype=str) + a[0] = ' \0\0' + assert_(a) + + def test_all_null_bstring_array_is_falsey(self): + a = np.array(['spam'], dtype=str) + a[0] = '\0\0\0\0' + assert_(not a) + + def test_null_inside_bstring_array_is_truthy(self): + a = np.array(['spam'], dtype=str) + a[0] = ' \0 \0' + assert_(a) + + +class TestUnicodeEncoding: + """ + Tests for encoding related bugs, such as UCS2 vs UCS4, round-tripping + issues, etc + """ + def test_round_trip(self): + """ Tests that GETITEM, SETITEM, and PyArray_Scalar roundtrip """ + # gh-15363 + arr = np.zeros(shape=(), dtype="U1") + for i in range(1, sys.maxunicode + 1): + expected = chr(i) + arr[()] = expected + assert arr[()] == expected + assert arr.item() == expected + + def test_assign_scalar(self): + # gh-3258 + l = np.array(['aa', 'bb']) + l[:] = np.str_('cc') + assert_equal(l, ['cc', 'cc']) + + def test_fill_scalar(self): + # gh-7227 + l = np.array(['aa', 'bb']) + l.fill(np.str_('cc')) + assert_equal(l, ['cc', 'cc']) + + +class TestUnicodeArrayNonzero: + + def test_empty_ustring_array_is_falsey(self): + assert_(not np.array([''], dtype=np.str_)) + + def test_whitespace_ustring_array_is_truthy(self): + a = np.array(['eggs'], dtype=np.str_) + a[0] = ' \0\0' + assert_(a) + + def test_all_null_ustring_array_is_falsey(self): + a = np.array(['eggs'], dtype=np.str_) + a[0] = '\0\0\0\0' + assert_(not a) + + def test_null_inside_ustring_array_is_truthy(self): + a = np.array(['eggs'], dtype=np.str_) + a[0] = ' \0 \0' + assert_(a) + + +class TestFormat: + + def test_0d(self): + a = np.array(np.pi) + assert_equal(f'{a:0.3g}', '3.14') + assert_equal(f'{a[()]:0.3g}', '3.14') + + def test_1d_no_format(self): + a = np.array([np.pi]) + assert_equal(f'{a}', str(a)) + + def test_1d_format(self): + # until gh-5543, ensure that the behaviour matches what it used to be + a = np.array([np.pi]) + assert_raises(TypeError, '{:30}'.format, a) + + +from numpy.testing import IS_PYPY + + +class TestCTypes: + + def test_ctypes_is_available(self): + test_arr = np.array([[1, 2, 3], [4, 5, 6]]) + + assert_equal(ctypes, test_arr.ctypes._ctypes) + assert_equal(tuple(test_arr.ctypes.shape), (2, 3)) + + def test_ctypes_is_not_available(self): + from numpy._core import _internal + _internal.ctypes = None + try: + test_arr = np.array([[1, 2, 3], [4, 5, 6]]) + + assert_(isinstance(test_arr.ctypes._ctypes, + _internal._missing_ctypes)) + assert_equal(tuple(test_arr.ctypes.shape), (2, 3)) + finally: + _internal.ctypes = ctypes + + def _make_readonly(x): + x.flags.writeable = False + return x + + @pytest.mark.parametrize('arr', [ + np.array([1, 2, 3]), + np.array([['one', 'two'], ['three', 'four']]), + np.array((1, 2), dtype='i4,i4'), + np.zeros((2,), dtype=np.dtype({ + "formats": [' 2, [44, 55]) + assert_equal(a, np.array([[0, 44], [1, 55], [2, 44]])) + # hit one of the failing paths + assert_raises(ValueError, np.place, a, a > 20, []) + + def test_put_noncontiguous(self): + a = np.arange(6).reshape(2, 3).T # force non-c-contiguous + np.put(a, [0, 2], [44, 55]) + assert_equal(a, np.array([[44, 3], [55, 4], [2, 5]])) + + def test_putmask_noncontiguous(self): + a = np.arange(6).reshape(2, 3).T # force non-c-contiguous + # uses arr_putmask + np.putmask(a, a > 2, a**2) + assert_equal(a, np.array([[0, 9], [1, 16], [2, 25]])) + + def test_take_mode_raise(self): + a = np.arange(6, dtype='int') + out = np.empty(2, dtype='int') + np.take(a, [0, 2], out=out, mode='raise') + assert_equal(out, np.array([0, 2])) + + def test_choose_mod_raise(self): + a = np.array([[1, 0, 1], [0, 1, 0], [1, 0, 1]]) + out = np.empty((3, 3), dtype='int') + choices = [-10, 10] + np.choose(a, choices, out=out, mode='raise') + assert_equal(out, np.array([[ 10, -10, 10], + [-10, 10, -10], + [ 10, -10, 10]])) + + def test_flatiter__array__(self): + a = np.arange(9).reshape(3, 3) + b = a.T.flat + c = b.__array__() + # triggers the WRITEBACKIFCOPY resolution, assuming refcount semantics + del c + + def test_dot_out(self): + # if HAVE_CBLAS, will use WRITEBACKIFCOPY + a = np.arange(9, dtype=float).reshape(3, 3) + b = np.dot(a, a, out=a) + assert_equal(b, np.array([[15, 18, 21], [42, 54, 66], [69, 90, 111]])) + + def test_view_assign(self): + from numpy._core._multiarray_tests import ( + npy_create_writebackifcopy, + npy_resolve, + ) + + arr = np.arange(9).reshape(3, 3).T + arr_wb = npy_create_writebackifcopy(arr) + assert_(arr_wb.flags.writebackifcopy) + assert_(arr_wb.base is arr) + arr_wb[...] = -100 + npy_resolve(arr_wb) + # arr changes after resolve, even though we assigned to arr_wb + assert_equal(arr, -100) + # after resolve, the two arrays no longer reference each other + assert_(arr_wb.ctypes.data != 0) + assert_equal(arr_wb.base, None) + # assigning to arr_wb does not get transferred to arr + arr_wb[...] = 100 + assert_equal(arr, -100) + + @pytest.mark.leaks_references( + reason="increments self in dealloc; ignore since deprecated path.") + def test_dealloc_warning(self): + with suppress_warnings() as sup: + sup.record(RuntimeWarning) + arr = np.arange(9).reshape(3, 3) + v = arr.T + _multiarray_tests.npy_abuse_writebackifcopy(v) + assert len(sup.log) == 1 + + def test_view_discard_refcount(self): + from numpy._core._multiarray_tests import ( + npy_create_writebackifcopy, + npy_discard, + ) + + arr = np.arange(9).reshape(3, 3).T + orig = arr.copy() + if HAS_REFCOUNT: + arr_cnt = sys.getrefcount(arr) + arr_wb = npy_create_writebackifcopy(arr) + assert_(arr_wb.flags.writebackifcopy) + assert_(arr_wb.base is arr) + arr_wb[...] = -100 + npy_discard(arr_wb) + # arr remains unchanged after discard + assert_equal(arr, orig) + # after discard, the two arrays no longer reference each other + assert_(arr_wb.ctypes.data != 0) + assert_equal(arr_wb.base, None) + if HAS_REFCOUNT: + assert_equal(arr_cnt, sys.getrefcount(arr)) + # assigning to arr_wb does not get transferred to arr + arr_wb[...] = 100 + assert_equal(arr, orig) + + +class TestArange: + def test_infinite(self): + assert_raises_regex( + ValueError, "size exceeded", + np.arange, 0, np.inf + ) + + def test_nan_step(self): + assert_raises_regex( + ValueError, "cannot compute length", + np.arange, 0, 1, np.nan + ) + + def test_zero_step(self): + assert_raises(ZeroDivisionError, np.arange, 0, 10, 0) + assert_raises(ZeroDivisionError, np.arange, 0.0, 10.0, 0.0) + + # empty range + assert_raises(ZeroDivisionError, np.arange, 0, 0, 0) + assert_raises(ZeroDivisionError, np.arange, 0.0, 0.0, 0.0) + + def test_require_range(self): + assert_raises(TypeError, np.arange) + assert_raises(TypeError, np.arange, step=3) + assert_raises(TypeError, np.arange, dtype='int64') + assert_raises(TypeError, np.arange, start=4) + + def test_start_stop_kwarg(self): + keyword_stop = np.arange(stop=3) + keyword_zerotostop = np.arange(0, stop=3) + keyword_start_stop = np.arange(start=3, stop=9) + + assert len(keyword_stop) == 3 + assert len(keyword_zerotostop) == 3 + assert len(keyword_start_stop) == 6 + assert_array_equal(keyword_stop, keyword_zerotostop) + + def test_arange_booleans(self): + # Arange makes some sense for booleans and works up to length 2. + # But it is weird since `arange(2, 4, dtype=bool)` works. + # Arguably, much or all of this could be deprecated/removed. + res = np.arange(False, dtype=bool) + assert_array_equal(res, np.array([], dtype="bool")) + + res = np.arange(True, dtype="bool") + assert_array_equal(res, [False]) + + res = np.arange(2, dtype="bool") + assert_array_equal(res, [False, True]) + + # This case is especially weird, but drops out without special case: + res = np.arange(6, 8, dtype="bool") + assert_array_equal(res, [True, True]) + + with pytest.raises(TypeError): + np.arange(3, dtype="bool") + + @pytest.mark.parametrize("dtype", ["S3", "U", "5i"]) + def test_rejects_bad_dtypes(self, dtype): + dtype = np.dtype(dtype) + DType_name = re.escape(str(type(dtype))) + with pytest.raises(TypeError, + match=rf"arange\(\) not supported for inputs .* {DType_name}"): + np.arange(2, dtype=dtype) + + def test_rejects_strings(self): + # Explicitly test error for strings which may call "b" - "a": + DType_name = re.escape(str(type(np.array("a").dtype))) + with pytest.raises(TypeError, + match=rf"arange\(\) not supported for inputs .* {DType_name}"): + np.arange("a", "b") + + def test_byteswapped(self): + res_be = np.arange(1, 1000, dtype=">i4") + res_le = np.arange(1, 1000, dtype="i4" + assert res_le.dtype == " arr2 + + +@pytest.mark.parametrize("op", [ + operator.eq, operator.ne, operator.le, operator.lt, operator.ge, + operator.gt]) +def test_comparisons_forwards_error(op): + class NotArray: + def __array__(self, dtype=None, copy=None): + raise TypeError("run you fools") + + with pytest.raises(TypeError, match="run you fools"): + op(np.arange(2), NotArray()) + + with pytest.raises(TypeError, match="run you fools"): + op(NotArray(), np.arange(2)) + + +def test_richcompare_scalar_boolean_singleton_return(): + # These are currently guaranteed to be the boolean numpy singletons + assert (np.array(0) == "a") is np.bool_(False) + assert (np.array(0) != "a") is np.bool_(True) + assert (np.int16(0) == "a") is np.bool_(False) + assert (np.int16(0) != "a") is np.bool_(True) + + +@pytest.mark.parametrize("op", [ + operator.eq, operator.ne, operator.le, operator.lt, operator.ge, + operator.gt]) +def test_ragged_comparison_fails(op): + # This needs to convert the internal array to True/False, which fails: + a = np.array([1, np.array([1, 2, 3])], dtype=object) + b = np.array([1, np.array([1, 2, 3])], dtype=object) + + with pytest.raises(ValueError, match="The truth value.*ambiguous"): + op(a, b) + + +@pytest.mark.parametrize( + ["fun", "npfun"], + [ + (_multiarray_tests.npy_cabs, np.absolute), + (_multiarray_tests.npy_carg, np.angle) + ] +) +@pytest.mark.parametrize("x", [1, np.inf, -np.inf, np.nan]) +@pytest.mark.parametrize("y", [1, np.inf, -np.inf, np.nan]) +@pytest.mark.parametrize("test_dtype", np.complexfloating.__subclasses__()) +def test_npymath_complex(fun, npfun, x, y, test_dtype): + # Smoketest npymath functions + z = test_dtype(complex(x, y)) + with np.errstate(invalid='ignore'): + # Fallback implementations may emit a warning for +-inf (see gh-24876): + # RuntimeWarning: invalid value encountered in absolute + got = fun(z) + expected = npfun(z) + assert_allclose(got, expected) + + +def test_npymath_real(): + # Smoketest npymath functions + from numpy._core._multiarray_tests import ( + npy_cosh, + npy_log10, + npy_sinh, + npy_tan, + npy_tanh, + ) + + funcs = {npy_log10: np.log10, + npy_cosh: np.cosh, + npy_sinh: np.sinh, + npy_tan: np.tan, + npy_tanh: np.tanh} + vals = (1, np.inf, -np.inf, np.nan) + types = (np.float32, np.float64, np.longdouble) + + with np.errstate(all='ignore'): + for fun, npfun in funcs.items(): + for x, t in itertools.product(vals, types): + z = t(x) + got = fun(z) + expected = npfun(z) + assert_allclose(got, expected) + +def test_uintalignment_and_alignment(): + # alignment code needs to satisfy these requirements: + # 1. numpy structs match C struct layout + # 2. ufuncs/casting is safe wrt to aligned access + # 3. copy code is safe wrt to "uint alidned" access + # + # Complex types are the main problem, whose alignment may not be the same + # as their "uint alignment". + # + # This test might only fail on certain platforms, where uint64 alignment is + # not equal to complex64 alignment. The second 2 tests will only fail + # for DEBUG=1. + + d1 = np.dtype('u1,c8', align=True) + d2 = np.dtype('u4,c8', align=True) + d3 = np.dtype({'names': ['a', 'b'], 'formats': ['u1', d1]}, align=True) + + assert_equal(np.zeros(1, dtype=d1)['f1'].flags['ALIGNED'], True) + assert_equal(np.zeros(1, dtype=d2)['f1'].flags['ALIGNED'], True) + assert_equal(np.zeros(1, dtype='u1,c8')['f1'].flags['ALIGNED'], False) + + # check that C struct matches numpy struct size + s = _multiarray_tests.get_struct_alignments() + for d, (alignment, size) in zip([d1, d2, d3], s): + assert_equal(d.alignment, alignment) + assert_equal(d.itemsize, size) + + # check that ufuncs don't complain in debug mode + # (this is probably OK if the aligned flag is true above) + src = np.zeros((2, 2), dtype=d1)['f1'] # 4-byte aligned, often + np.exp(src) # assert fails? + + # check that copy code doesn't complain in debug mode + dst = np.zeros((2, 2), dtype='c8') + dst[:, 1] = src[:, 1] # assert in lowlevel_strided_loops fails? + +class TestAlignment: + # adapted from scipy._lib.tests.test__util.test__aligned_zeros + # Checks that unusual memory alignments don't trip up numpy. + + def check(self, shape, dtype, order, align): + err_msg = repr((shape, dtype, order, align)) + x = _aligned_zeros(shape, dtype, order, align=align) + if align is None: + align = np.dtype(dtype).alignment + assert_equal(x.__array_interface__['data'][0] % align, 0) + if hasattr(shape, '__len__'): + assert_equal(x.shape, shape, err_msg) + else: + assert_equal(x.shape, (shape,), err_msg) + assert_equal(x.dtype, dtype) + if order == "C": + assert_(x.flags.c_contiguous, err_msg) + elif order == "F": + if x.size > 0: + assert_(x.flags.f_contiguous, err_msg) + elif order is None: + assert_(x.flags.c_contiguous, err_msg) + else: + raise ValueError + + def test_various_alignments(self): + for align in [1, 2, 3, 4, 8, 12, 16, 32, 64, None]: + for n in [0, 1, 3, 11]: + for order in ["C", "F", None]: + for dtype in list(np.typecodes["All"]) + ['i4,i4,i4']: + if dtype == 'O': + # object dtype can't be misaligned + continue + for shape in [n, (1, 2, 3, n)]: + self.check(shape, np.dtype(dtype), order, align) + + def test_strided_loop_alignments(self): + # particularly test that complex64 and float128 use right alignment + # code-paths, since these are particularly problematic. It is useful to + # turn on USE_DEBUG for this test, so lowlevel-loop asserts are run. + for align in [1, 2, 4, 8, 12, 16, None]: + xf64 = _aligned_zeros(3, np.float64) + + xc64 = _aligned_zeros(3, np.complex64, align=align) + xf128 = _aligned_zeros(3, np.longdouble, align=align) + + # test casting, both to and from misaligned + with suppress_warnings() as sup: + sup.filter(ComplexWarning, "Casting complex values") + xc64.astype('f8') + xf64.astype(np.complex64) + test = xc64 + xf64 + + xf128.astype('f8') + xf64.astype(np.longdouble) + test = xf128 + xf64 + + test = xf128 + xc64 + + # test copy, both to and from misaligned + # contig copy + xf64[:] = xf64.copy() + xc64[:] = xc64.copy() + xf128[:] = xf128.copy() + # strided copy + xf64[::2] = xf64[::2].copy() + xc64[::2] = xc64[::2].copy() + xf128[::2] = xf128[::2].copy() + +def test_getfield(): + a = np.arange(32, dtype='uint16') + if sys.byteorder == 'little': + i = 0 + j = 1 + else: + i = 1 + j = 0 + b = a.getfield('int8', i) + assert_equal(b, a) + b = a.getfield('int8', j) + assert_equal(b, 0) + pytest.raises(ValueError, a.getfield, 'uint8', -1) + pytest.raises(ValueError, a.getfield, 'uint8', 16) + pytest.raises(ValueError, a.getfield, 'uint64', 0) + + +class TestViewDtype: + """ + Verify that making a view of a non-contiguous array works as expected. + """ + def test_smaller_dtype_multiple(self): + # x is non-contiguous + x = np.arange(10, dtype=' rc_a) + assert_(sys.getrefcount(dt) > rc_dt) + # del 'it' + it = None + assert_equal(sys.getrefcount(a), rc_a) + assert_equal(sys.getrefcount(dt), rc_dt) + + # With a copy + a = arange(6, dtype='f4') + dt = np.dtype('f4') + rc_a = sys.getrefcount(a) + rc_dt = sys.getrefcount(dt) + it = nditer(a, [], + [['readwrite']], + op_dtypes=[dt]) + rc2_a = sys.getrefcount(a) + rc2_dt = sys.getrefcount(dt) + it2 = it.copy() + assert_(sys.getrefcount(a) > rc2_a) + if sys.version_info < (3, 13): + # np.dtype('f4') is immortal after Python 3.13 + assert_(sys.getrefcount(dt) > rc2_dt) + it = None + assert_equal(sys.getrefcount(a), rc2_a) + assert_equal(sys.getrefcount(dt), rc2_dt) + it2 = None + assert_equal(sys.getrefcount(a), rc_a) + assert_equal(sys.getrefcount(dt), rc_dt) + +def test_iter_best_order(): + # The iterator should always find the iteration order + # with increasing memory addresses + + # Test the ordering for 1-D to 5-D shapes + for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]: + a = arange(np.prod(shape)) + # Test each combination of positive and negative strides + for dirs in range(2**len(shape)): + dirs_index = [slice(None)] * len(shape) + for bit in range(len(shape)): + if ((2**bit) & dirs): + dirs_index[bit] = slice(None, None, -1) + dirs_index = tuple(dirs_index) + + aview = a.reshape(shape)[dirs_index] + # C-order + i = nditer(aview, [], [['readonly']]) + assert_equal(list(i), a) + # Fortran-order + i = nditer(aview.T, [], [['readonly']]) + assert_equal(list(i), a) + # Other order + if len(shape) > 2: + i = nditer(aview.swapaxes(0, 1), [], [['readonly']]) + assert_equal(list(i), a) + +def test_iter_c_order(): + # Test forcing C order + + # Test the ordering for 1-D to 5-D shapes + for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]: + a = arange(np.prod(shape)) + # Test each combination of positive and negative strides + for dirs in range(2**len(shape)): + dirs_index = [slice(None)] * len(shape) + for bit in range(len(shape)): + if ((2**bit) & dirs): + dirs_index[bit] = slice(None, None, -1) + dirs_index = tuple(dirs_index) + + aview = a.reshape(shape)[dirs_index] + # C-order + i = nditer(aview, order='C') + assert_equal(list(i), aview.ravel(order='C')) + # Fortran-order + i = nditer(aview.T, order='C') + assert_equal(list(i), aview.T.ravel(order='C')) + # Other order + if len(shape) > 2: + i = nditer(aview.swapaxes(0, 1), order='C') + assert_equal(list(i), + aview.swapaxes(0, 1).ravel(order='C')) + +def test_iter_f_order(): + # Test forcing F order + + # Test the ordering for 1-D to 5-D shapes + for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]: + a = arange(np.prod(shape)) + # Test each combination of positive and negative strides + for dirs in range(2**len(shape)): + dirs_index = [slice(None)] * len(shape) + for bit in range(len(shape)): + if ((2**bit) & dirs): + dirs_index[bit] = slice(None, None, -1) + dirs_index = tuple(dirs_index) + + aview = a.reshape(shape)[dirs_index] + # C-order + i = nditer(aview, order='F') + assert_equal(list(i), aview.ravel(order='F')) + # Fortran-order + i = nditer(aview.T, order='F') + assert_equal(list(i), aview.T.ravel(order='F')) + # Other order + if len(shape) > 2: + i = nditer(aview.swapaxes(0, 1), order='F') + assert_equal(list(i), + aview.swapaxes(0, 1).ravel(order='F')) + +def test_iter_c_or_f_order(): + # Test forcing any contiguous (C or F) order + + # Test the ordering for 1-D to 5-D shapes + for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]: + a = arange(np.prod(shape)) + # Test each combination of positive and negative strides + for dirs in range(2**len(shape)): + dirs_index = [slice(None)] * len(shape) + for bit in range(len(shape)): + if ((2**bit) & dirs): + dirs_index[bit] = slice(None, None, -1) + dirs_index = tuple(dirs_index) + + aview = a.reshape(shape)[dirs_index] + # C-order + i = nditer(aview, order='A') + assert_equal(list(i), aview.ravel(order='A')) + # Fortran-order + i = nditer(aview.T, order='A') + assert_equal(list(i), aview.T.ravel(order='A')) + # Other order + if len(shape) > 2: + i = nditer(aview.swapaxes(0, 1), order='A') + assert_equal(list(i), + aview.swapaxes(0, 1).ravel(order='A')) + +def test_nditer_multi_index_set(): + # Test the multi_index set + a = np.arange(6).reshape(2, 3) + it = np.nditer(a, flags=['multi_index']) + + # Removes the iteration on two first elements of a[0] + it.multi_index = (0, 2,) + + assert_equal(list(it), [2, 3, 4, 5]) + +@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") +def test_nditer_multi_index_set_refcount(): + # Test if the reference count on index variable is decreased + + index = 0 + i = np.nditer(np.array([111, 222, 333, 444]), flags=['multi_index']) + + start_count = sys.getrefcount(index) + i.multi_index = (index,) + end_count = sys.getrefcount(index) + + assert_equal(start_count, end_count) + +def test_iter_best_order_multi_index_1d(): + # The multi-indices should be correct with any reordering + + a = arange(4) + # 1D order + i = nditer(a, ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), [(0,), (1,), (2,), (3,)]) + # 1D reversed order + i = nditer(a[::-1], ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), [(3,), (2,), (1,), (0,)]) + +def test_iter_best_order_multi_index_2d(): + # The multi-indices should be correct with any reordering + + a = arange(6) + # 2D C-order + i = nditer(a.reshape(2, 3), ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), [(0, 0), (0, 1), (0, 2), (1, 0), (1, 1), (1, 2)]) + # 2D Fortran-order + i = nditer(a.reshape(2, 3).copy(order='F'), ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), [(0, 0), (1, 0), (0, 1), (1, 1), (0, 2), (1, 2)]) + # 2D reversed C-order + i = nditer(a.reshape(2, 3)[::-1], ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), [(1, 0), (1, 1), (1, 2), (0, 0), (0, 1), (0, 2)]) + i = nditer(a.reshape(2, 3)[:, ::-1], ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), [(0, 2), (0, 1), (0, 0), (1, 2), (1, 1), (1, 0)]) + i = nditer(a.reshape(2, 3)[::-1, ::-1], ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), [(1, 2), (1, 1), (1, 0), (0, 2), (0, 1), (0, 0)]) + # 2D reversed Fortran-order + i = nditer(a.reshape(2, 3).copy(order='F')[::-1], ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), [(1, 0), (0, 0), (1, 1), (0, 1), (1, 2), (0, 2)]) + i = nditer(a.reshape(2, 3).copy(order='F')[:, ::-1], + ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), [(0, 2), (1, 2), (0, 1), (1, 1), (0, 0), (1, 0)]) + i = nditer(a.reshape(2, 3).copy(order='F')[::-1, ::-1], + ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), [(1, 2), (0, 2), (1, 1), (0, 1), (1, 0), (0, 0)]) + +def test_iter_best_order_multi_index_3d(): + # The multi-indices should be correct with any reordering + + a = arange(12) + # 3D C-order + i = nditer(a.reshape(2, 3, 2), ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), + [(0, 0, 0), (0, 0, 1), (0, 1, 0), (0, 1, 1), (0, 2, 0), (0, 2, 1), + (1, 0, 0), (1, 0, 1), (1, 1, 0), (1, 1, 1), (1, 2, 0), (1, 2, 1)]) + # 3D Fortran-order + i = nditer(a.reshape(2, 3, 2).copy(order='F'), ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), + [(0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0, 2, 0), (1, 2, 0), + (0, 0, 1), (1, 0, 1), (0, 1, 1), (1, 1, 1), (0, 2, 1), (1, 2, 1)]) + # 3D reversed C-order + i = nditer(a.reshape(2, 3, 2)[::-1], ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), + [(1, 0, 0), (1, 0, 1), (1, 1, 0), (1, 1, 1), (1, 2, 0), (1, 2, 1), + (0, 0, 0), (0, 0, 1), (0, 1, 0), (0, 1, 1), (0, 2, 0), (0, 2, 1)]) + i = nditer(a.reshape(2, 3, 2)[:, ::-1], ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), + [(0, 2, 0), (0, 2, 1), (0, 1, 0), (0, 1, 1), (0, 0, 0), (0, 0, 1), + (1, 2, 0), (1, 2, 1), (1, 1, 0), (1, 1, 1), (1, 0, 0), (1, 0, 1)]) + i = nditer(a.reshape(2, 3, 2)[:, :, ::-1], ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), + [(0, 0, 1), (0, 0, 0), (0, 1, 1), (0, 1, 0), (0, 2, 1), (0, 2, 0), + (1, 0, 1), (1, 0, 0), (1, 1, 1), (1, 1, 0), (1, 2, 1), (1, 2, 0)]) + # 3D reversed Fortran-order + i = nditer(a.reshape(2, 3, 2).copy(order='F')[::-1], + ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), + [(1, 0, 0), (0, 0, 0), (1, 1, 0), (0, 1, 0), (1, 2, 0), (0, 2, 0), + (1, 0, 1), (0, 0, 1), (1, 1, 1), (0, 1, 1), (1, 2, 1), (0, 2, 1)]) + i = nditer(a.reshape(2, 3, 2).copy(order='F')[:, ::-1], + ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), + [(0, 2, 0), (1, 2, 0), (0, 1, 0), (1, 1, 0), (0, 0, 0), (1, 0, 0), + (0, 2, 1), (1, 2, 1), (0, 1, 1), (1, 1, 1), (0, 0, 1), (1, 0, 1)]) + i = nditer(a.reshape(2, 3, 2).copy(order='F')[:, :, ::-1], + ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), + [(0, 0, 1), (1, 0, 1), (0, 1, 1), (1, 1, 1), (0, 2, 1), (1, 2, 1), + (0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0, 2, 0), (1, 2, 0)]) + +def test_iter_best_order_c_index_1d(): + # The C index should be correct with any reordering + + a = arange(4) + # 1D order + i = nditer(a, ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), [0, 1, 2, 3]) + # 1D reversed order + i = nditer(a[::-1], ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), [3, 2, 1, 0]) + +def test_iter_best_order_c_index_2d(): + # The C index should be correct with any reordering + + a = arange(6) + # 2D C-order + i = nditer(a.reshape(2, 3), ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), [0, 1, 2, 3, 4, 5]) + # 2D Fortran-order + i = nditer(a.reshape(2, 3).copy(order='F'), + ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), [0, 3, 1, 4, 2, 5]) + # 2D reversed C-order + i = nditer(a.reshape(2, 3)[::-1], ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), [3, 4, 5, 0, 1, 2]) + i = nditer(a.reshape(2, 3)[:, ::-1], ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), [2, 1, 0, 5, 4, 3]) + i = nditer(a.reshape(2, 3)[::-1, ::-1], ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), [5, 4, 3, 2, 1, 0]) + # 2D reversed Fortran-order + i = nditer(a.reshape(2, 3).copy(order='F')[::-1], + ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), [3, 0, 4, 1, 5, 2]) + i = nditer(a.reshape(2, 3).copy(order='F')[:, ::-1], + ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), [2, 5, 1, 4, 0, 3]) + i = nditer(a.reshape(2, 3).copy(order='F')[::-1, ::-1], + ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), [5, 2, 4, 1, 3, 0]) + +def test_iter_best_order_c_index_3d(): + # The C index should be correct with any reordering + + a = arange(12) + # 3D C-order + i = nditer(a.reshape(2, 3, 2), ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) + # 3D Fortran-order + i = nditer(a.reshape(2, 3, 2).copy(order='F'), + ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), + [0, 6, 2, 8, 4, 10, 1, 7, 3, 9, 5, 11]) + # 3D reversed C-order + i = nditer(a.reshape(2, 3, 2)[::-1], ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), + [6, 7, 8, 9, 10, 11, 0, 1, 2, 3, 4, 5]) + i = nditer(a.reshape(2, 3, 2)[:, ::-1], ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), + [4, 5, 2, 3, 0, 1, 10, 11, 8, 9, 6, 7]) + i = nditer(a.reshape(2, 3, 2)[:, :, ::-1], ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), + [1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10]) + # 3D reversed Fortran-order + i = nditer(a.reshape(2, 3, 2).copy(order='F')[::-1], + ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), + [6, 0, 8, 2, 10, 4, 7, 1, 9, 3, 11, 5]) + i = nditer(a.reshape(2, 3, 2).copy(order='F')[:, ::-1], + ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), + [4, 10, 2, 8, 0, 6, 5, 11, 3, 9, 1, 7]) + i = nditer(a.reshape(2, 3, 2).copy(order='F')[:, :, ::-1], + ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), + [1, 7, 3, 9, 5, 11, 0, 6, 2, 8, 4, 10]) + +def test_iter_best_order_f_index_1d(): + # The Fortran index should be correct with any reordering + + a = arange(4) + # 1D order + i = nditer(a, ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), [0, 1, 2, 3]) + # 1D reversed order + i = nditer(a[::-1], ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), [3, 2, 1, 0]) + +def test_iter_best_order_f_index_2d(): + # The Fortran index should be correct with any reordering + + a = arange(6) + # 2D C-order + i = nditer(a.reshape(2, 3), ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), [0, 2, 4, 1, 3, 5]) + # 2D Fortran-order + i = nditer(a.reshape(2, 3).copy(order='F'), + ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), [0, 1, 2, 3, 4, 5]) + # 2D reversed C-order + i = nditer(a.reshape(2, 3)[::-1], ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), [1, 3, 5, 0, 2, 4]) + i = nditer(a.reshape(2, 3)[:, ::-1], ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), [4, 2, 0, 5, 3, 1]) + i = nditer(a.reshape(2, 3)[::-1, ::-1], ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), [5, 3, 1, 4, 2, 0]) + # 2D reversed Fortran-order + i = nditer(a.reshape(2, 3).copy(order='F')[::-1], + ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), [1, 0, 3, 2, 5, 4]) + i = nditer(a.reshape(2, 3).copy(order='F')[:, ::-1], + ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), [4, 5, 2, 3, 0, 1]) + i = nditer(a.reshape(2, 3).copy(order='F')[::-1, ::-1], + ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), [5, 4, 3, 2, 1, 0]) + +def test_iter_best_order_f_index_3d(): + # The Fortran index should be correct with any reordering + + a = arange(12) + # 3D C-order + i = nditer(a.reshape(2, 3, 2), ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), + [0, 6, 2, 8, 4, 10, 1, 7, 3, 9, 5, 11]) + # 3D Fortran-order + i = nditer(a.reshape(2, 3, 2).copy(order='F'), + ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) + # 3D reversed C-order + i = nditer(a.reshape(2, 3, 2)[::-1], ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), + [1, 7, 3, 9, 5, 11, 0, 6, 2, 8, 4, 10]) + i = nditer(a.reshape(2, 3, 2)[:, ::-1], ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), + [4, 10, 2, 8, 0, 6, 5, 11, 3, 9, 1, 7]) + i = nditer(a.reshape(2, 3, 2)[:, :, ::-1], ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), + [6, 0, 8, 2, 10, 4, 7, 1, 9, 3, 11, 5]) + # 3D reversed Fortran-order + i = nditer(a.reshape(2, 3, 2).copy(order='F')[::-1], + ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), + [1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10]) + i = nditer(a.reshape(2, 3, 2).copy(order='F')[:, ::-1], + ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), + [4, 5, 2, 3, 0, 1, 10, 11, 8, 9, 6, 7]) + i = nditer(a.reshape(2, 3, 2).copy(order='F')[:, :, ::-1], + ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), + [6, 7, 8, 9, 10, 11, 0, 1, 2, 3, 4, 5]) + +def test_iter_no_inner_full_coalesce(): + # Check no_inner iterators which coalesce into a single inner loop + + for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]: + size = np.prod(shape) + a = arange(size) + # Test each combination of forward and backwards indexing + for dirs in range(2**len(shape)): + dirs_index = [slice(None)] * len(shape) + for bit in range(len(shape)): + if ((2**bit) & dirs): + dirs_index[bit] = slice(None, None, -1) + dirs_index = tuple(dirs_index) + + aview = a.reshape(shape)[dirs_index] + # C-order + i = nditer(aview, ['external_loop'], [['readonly']]) + assert_equal(i.ndim, 1) + assert_equal(i[0].shape, (size,)) + # Fortran-order + i = nditer(aview.T, ['external_loop'], [['readonly']]) + assert_equal(i.ndim, 1) + assert_equal(i[0].shape, (size,)) + # Other order + if len(shape) > 2: + i = nditer(aview.swapaxes(0, 1), + ['external_loop'], [['readonly']]) + assert_equal(i.ndim, 1) + assert_equal(i[0].shape, (size,)) + +def test_iter_no_inner_dim_coalescing(): + # Check no_inner iterators whose dimensions may not coalesce completely + + # Skipping the last element in a dimension prevents coalescing + # with the next-bigger dimension + a = arange(24).reshape(2, 3, 4)[:, :, :-1] + i = nditer(a, ['external_loop'], [['readonly']]) + assert_equal(i.ndim, 2) + assert_equal(i[0].shape, (3,)) + a = arange(24).reshape(2, 3, 4)[:, :-1, :] + i = nditer(a, ['external_loop'], [['readonly']]) + assert_equal(i.ndim, 2) + assert_equal(i[0].shape, (8,)) + a = arange(24).reshape(2, 3, 4)[:-1, :, :] + i = nditer(a, ['external_loop'], [['readonly']]) + assert_equal(i.ndim, 1) + assert_equal(i[0].shape, (12,)) + + # Even with lots of 1-sized dimensions, should still coalesce + a = arange(24).reshape(1, 1, 2, 1, 1, 3, 1, 1, 4, 1, 1) + i = nditer(a, ['external_loop'], [['readonly']]) + assert_equal(i.ndim, 1) + assert_equal(i[0].shape, (24,)) + +def test_iter_dim_coalescing(): + # Check that the correct number of dimensions are coalesced + + # Tracking a multi-index disables coalescing + a = arange(24).reshape(2, 3, 4) + i = nditer(a, ['multi_index'], [['readonly']]) + assert_equal(i.ndim, 3) + + # A tracked index can allow coalescing if it's compatible with the array + a3d = arange(24).reshape(2, 3, 4) + i = nditer(a3d, ['c_index'], [['readonly']]) + assert_equal(i.ndim, 1) + i = nditer(a3d.swapaxes(0, 1), ['c_index'], [['readonly']]) + assert_equal(i.ndim, 3) + i = nditer(a3d.T, ['c_index'], [['readonly']]) + assert_equal(i.ndim, 3) + i = nditer(a3d.T, ['f_index'], [['readonly']]) + assert_equal(i.ndim, 1) + i = nditer(a3d.T.swapaxes(0, 1), ['f_index'], [['readonly']]) + assert_equal(i.ndim, 3) + + # When C or F order is forced, coalescing may still occur + a3d = arange(24).reshape(2, 3, 4) + i = nditer(a3d, order='C') + assert_equal(i.ndim, 1) + i = nditer(a3d.T, order='C') + assert_equal(i.ndim, 3) + i = nditer(a3d, order='F') + assert_equal(i.ndim, 3) + i = nditer(a3d.T, order='F') + assert_equal(i.ndim, 1) + i = nditer(a3d, order='A') + assert_equal(i.ndim, 1) + i = nditer(a3d.T, order='A') + assert_equal(i.ndim, 1) + +def test_iter_broadcasting(): + # Standard NumPy broadcasting rules + + # 1D with scalar + i = nditer([arange(6), np.int32(2)], ['multi_index'], [['readonly']] * 2) + assert_equal(i.itersize, 6) + assert_equal(i.shape, (6,)) + + # 2D with scalar + i = nditer([arange(6).reshape(2, 3), np.int32(2)], + ['multi_index'], [['readonly']] * 2) + assert_equal(i.itersize, 6) + assert_equal(i.shape, (2, 3)) + # 2D with 1D + i = nditer([arange(6).reshape(2, 3), arange(3)], + ['multi_index'], [['readonly']] * 2) + assert_equal(i.itersize, 6) + assert_equal(i.shape, (2, 3)) + i = nditer([arange(2).reshape(2, 1), arange(3)], + ['multi_index'], [['readonly']] * 2) + assert_equal(i.itersize, 6) + assert_equal(i.shape, (2, 3)) + # 2D with 2D + i = nditer([arange(2).reshape(2, 1), arange(3).reshape(1, 3)], + ['multi_index'], [['readonly']] * 2) + assert_equal(i.itersize, 6) + assert_equal(i.shape, (2, 3)) + + # 3D with scalar + i = nditer([np.int32(2), arange(24).reshape(4, 2, 3)], + ['multi_index'], [['readonly']] * 2) + assert_equal(i.itersize, 24) + assert_equal(i.shape, (4, 2, 3)) + # 3D with 1D + i = nditer([arange(3), arange(24).reshape(4, 2, 3)], + ['multi_index'], [['readonly']] * 2) + assert_equal(i.itersize, 24) + assert_equal(i.shape, (4, 2, 3)) + i = nditer([arange(3), arange(8).reshape(4, 2, 1)], + ['multi_index'], [['readonly']] * 2) + assert_equal(i.itersize, 24) + assert_equal(i.shape, (4, 2, 3)) + # 3D with 2D + i = nditer([arange(6).reshape(2, 3), arange(24).reshape(4, 2, 3)], + ['multi_index'], [['readonly']] * 2) + assert_equal(i.itersize, 24) + assert_equal(i.shape, (4, 2, 3)) + i = nditer([arange(2).reshape(2, 1), arange(24).reshape(4, 2, 3)], + ['multi_index'], [['readonly']] * 2) + assert_equal(i.itersize, 24) + assert_equal(i.shape, (4, 2, 3)) + i = nditer([arange(3).reshape(1, 3), arange(8).reshape(4, 2, 1)], + ['multi_index'], [['readonly']] * 2) + assert_equal(i.itersize, 24) + assert_equal(i.shape, (4, 2, 3)) + # 3D with 3D + i = nditer([arange(2).reshape(1, 2, 1), arange(3).reshape(1, 1, 3), + arange(4).reshape(4, 1, 1)], + ['multi_index'], [['readonly']] * 3) + assert_equal(i.itersize, 24) + assert_equal(i.shape, (4, 2, 3)) + i = nditer([arange(6).reshape(1, 2, 3), arange(4).reshape(4, 1, 1)], + ['multi_index'], [['readonly']] * 2) + assert_equal(i.itersize, 24) + assert_equal(i.shape, (4, 2, 3)) + i = nditer([arange(24).reshape(4, 2, 3), arange(12).reshape(4, 1, 3)], + ['multi_index'], [['readonly']] * 2) + assert_equal(i.itersize, 24) + assert_equal(i.shape, (4, 2, 3)) + +def test_iter_itershape(): + # Check that allocated outputs work with a specified shape + a = np.arange(6, dtype='i2').reshape(2, 3) + i = nditer([a, None], [], [['readonly'], ['writeonly', 'allocate']], + op_axes=[[0, 1, None], None], + itershape=(-1, -1, 4)) + assert_equal(i.operands[1].shape, (2, 3, 4)) + assert_equal(i.operands[1].strides, (24, 8, 2)) + + i = nditer([a.T, None], [], [['readonly'], ['writeonly', 'allocate']], + op_axes=[[0, 1, None], None], + itershape=(-1, -1, 4)) + assert_equal(i.operands[1].shape, (3, 2, 4)) + assert_equal(i.operands[1].strides, (8, 24, 2)) + + i = nditer([a.T, None], [], [['readonly'], ['writeonly', 'allocate']], + order='F', + op_axes=[[0, 1, None], None], + itershape=(-1, -1, 4)) + assert_equal(i.operands[1].shape, (3, 2, 4)) + assert_equal(i.operands[1].strides, (2, 6, 12)) + + # If we specify 1 in the itershape, it shouldn't allow broadcasting + # of that dimension to a bigger value + assert_raises(ValueError, nditer, [a, None], [], + [['readonly'], ['writeonly', 'allocate']], + op_axes=[[0, 1, None], None], + itershape=(-1, 1, 4)) + # Test bug that for no op_axes but itershape, they are NULLed correctly + i = np.nditer([np.ones(2), None, None], itershape=(2,)) + +def test_iter_broadcasting_errors(): + # Check that errors are thrown for bad broadcasting shapes + + # 1D with 1D + assert_raises(ValueError, nditer, [arange(2), arange(3)], + [], [['readonly']] * 2) + # 2D with 1D + assert_raises(ValueError, nditer, + [arange(6).reshape(2, 3), arange(2)], + [], [['readonly']] * 2) + # 2D with 2D + assert_raises(ValueError, nditer, + [arange(6).reshape(2, 3), arange(9).reshape(3, 3)], + [], [['readonly']] * 2) + assert_raises(ValueError, nditer, + [arange(6).reshape(2, 3), arange(4).reshape(2, 2)], + [], [['readonly']] * 2) + # 3D with 3D + assert_raises(ValueError, nditer, + [arange(36).reshape(3, 3, 4), arange(24).reshape(2, 3, 4)], + [], [['readonly']] * 2) + assert_raises(ValueError, nditer, + [arange(8).reshape(2, 4, 1), arange(24).reshape(2, 3, 4)], + [], [['readonly']] * 2) + + # Verify that the error message mentions the right shapes + try: + nditer([arange(2).reshape(1, 2, 1), + arange(3).reshape(1, 3), + arange(6).reshape(2, 3)], + [], + [['readonly'], ['readonly'], ['writeonly', 'no_broadcast']]) + raise AssertionError('Should have raised a broadcast error') + except ValueError as e: + msg = str(e) + # The message should contain the shape of the 3rd operand + assert_(msg.find('(2,3)') >= 0, + f'Message "{msg}" doesn\'t contain operand shape (2,3)') + # The message should contain the broadcast shape + assert_(msg.find('(1,2,3)') >= 0, + f'Message "{msg}" doesn\'t contain broadcast shape (1,2,3)') + + try: + nditer([arange(6).reshape(2, 3), arange(2)], + [], + [['readonly'], ['readonly']], + op_axes=[[0, 1], [0, np.newaxis]], + itershape=(4, 3)) + raise AssertionError('Should have raised a broadcast error') + except ValueError as e: + msg = str(e) + # The message should contain "shape->remappedshape" for each operand + assert_(msg.find('(2,3)->(2,3)') >= 0, + f'Message "{msg}" doesn\'t contain operand shape (2,3)->(2,3)') + assert_(msg.find('(2,)->(2,newaxis)') >= 0, + ('Message "%s" doesn\'t contain remapped operand shape' + '(2,)->(2,newaxis)') % msg) + # The message should contain the itershape parameter + assert_(msg.find('(4,3)') >= 0, + f'Message "{msg}" doesn\'t contain itershape parameter (4,3)') + + try: + nditer([np.zeros((2, 1, 1)), np.zeros((2,))], + [], + [['writeonly', 'no_broadcast'], ['readonly']]) + raise AssertionError('Should have raised a broadcast error') + except ValueError as e: + msg = str(e) + # The message should contain the shape of the bad operand + assert_(msg.find('(2,1,1)') >= 0, + f'Message "{msg}" doesn\'t contain operand shape (2,1,1)') + # The message should contain the broadcast shape + assert_(msg.find('(2,1,2)') >= 0, + f'Message "{msg}" doesn\'t contain the broadcast shape (2,1,2)') + +def test_iter_flags_errors(): + # Check that bad combinations of flags produce errors + + a = arange(6) + + # Not enough operands + assert_raises(ValueError, nditer, [], [], []) + # Bad global flag + assert_raises(ValueError, nditer, [a], ['bad flag'], [['readonly']]) + # Bad op flag + assert_raises(ValueError, nditer, [a], [], [['readonly', 'bad flag']]) + # Bad order parameter + assert_raises(ValueError, nditer, [a], [], [['readonly']], order='G') + # Bad casting parameter + assert_raises(ValueError, nditer, [a], [], [['readonly']], casting='noon') + # op_flags must match ops + assert_raises(ValueError, nditer, [a] * 3, [], [['readonly']] * 2) + # Cannot track both a C and an F index + assert_raises(ValueError, nditer, a, + ['c_index', 'f_index'], [['readonly']]) + # Inner iteration and multi-indices/indices are incompatible + assert_raises(ValueError, nditer, a, + ['external_loop', 'multi_index'], [['readonly']]) + assert_raises(ValueError, nditer, a, + ['external_loop', 'c_index'], [['readonly']]) + assert_raises(ValueError, nditer, a, + ['external_loop', 'f_index'], [['readonly']]) + # Must specify exactly one of readwrite/readonly/writeonly per operand + assert_raises(ValueError, nditer, a, [], [[]]) + assert_raises(ValueError, nditer, a, [], [['readonly', 'writeonly']]) + assert_raises(ValueError, nditer, a, [], [['readonly', 'readwrite']]) + assert_raises(ValueError, nditer, a, [], [['writeonly', 'readwrite']]) + assert_raises(ValueError, nditer, a, + [], [['readonly', 'writeonly', 'readwrite']]) + # Python scalars are always readonly + assert_raises(TypeError, nditer, 1.5, [], [['writeonly']]) + assert_raises(TypeError, nditer, 1.5, [], [['readwrite']]) + # Array scalars are always readonly + assert_raises(TypeError, nditer, np.int32(1), [], [['writeonly']]) + assert_raises(TypeError, nditer, np.int32(1), [], [['readwrite']]) + # Check readonly array + a.flags.writeable = False + assert_raises(ValueError, nditer, a, [], [['writeonly']]) + assert_raises(ValueError, nditer, a, [], [['readwrite']]) + a.flags.writeable = True + # Multi-indices available only with the multi_index flag + i = nditer(arange(6), [], [['readonly']]) + assert_raises(ValueError, lambda i: i.multi_index, i) + # Index available only with an index flag + assert_raises(ValueError, lambda i: i.index, i) + # GotoCoords and GotoIndex incompatible with buffering or no_inner + + def assign_multi_index(i): + i.multi_index = (0,) + + def assign_index(i): + i.index = 0 + + def assign_iterindex(i): + i.iterindex = 0 + + def assign_iterrange(i): + i.iterrange = (0, 1) + i = nditer(arange(6), ['external_loop']) + assert_raises(ValueError, assign_multi_index, i) + assert_raises(ValueError, assign_index, i) + assert_raises(ValueError, assign_iterindex, i) + assert_raises(ValueError, assign_iterrange, i) + i = nditer(arange(6), ['buffered']) + assert_raises(ValueError, assign_multi_index, i) + assert_raises(ValueError, assign_index, i) + assert_raises(ValueError, assign_iterrange, i) + # Can't iterate if size is zero + assert_raises(ValueError, nditer, np.array([])) + +def test_iter_slice(): + a, b, c = np.arange(3), np.arange(3), np.arange(3.) + i = nditer([a, b, c], [], ['readwrite']) + with i: + i[0:2] = (3, 3) + assert_equal(a, [3, 1, 2]) + assert_equal(b, [3, 1, 2]) + assert_equal(c, [0, 1, 2]) + i[1] = 12 + assert_equal(i[0:2], [3, 12]) + +def test_iter_assign_mapping(): + a = np.arange(24, dtype='f8').reshape(2, 3, 4).T + it = np.nditer(a, [], [['readwrite', 'updateifcopy']], + casting='same_kind', op_dtypes=[np.dtype('f4')]) + with it: + it.operands[0][...] = 3 + it.operands[0][...] = 14 + assert_equal(a, 14) + it = np.nditer(a, [], [['readwrite', 'updateifcopy']], + casting='same_kind', op_dtypes=[np.dtype('f4')]) + with it: + x = it.operands[0][-1:1] + x[...] = 14 + it.operands[0][...] = -1234 + assert_equal(a, -1234) + # check for no warnings on dealloc + x = None + it = None + +def test_iter_nbo_align_contig(): + # Check that byte order, alignment, and contig changes work + + # Byte order change by requesting a specific dtype + a = np.arange(6, dtype='f4') + au = a.byteswap() + au = au.view(au.dtype.newbyteorder()) + assert_(a.dtype.byteorder != au.dtype.byteorder) + i = nditer(au, [], [['readwrite', 'updateifcopy']], + casting='equiv', + op_dtypes=[np.dtype('f4')]) + with i: + # context manager triggers WRITEBACKIFCOPY on i at exit + assert_equal(i.dtypes[0].byteorder, a.dtype.byteorder) + assert_equal(i.operands[0].dtype.byteorder, a.dtype.byteorder) + assert_equal(i.operands[0], a) + i.operands[0][:] = 2 + assert_equal(au, [2] * 6) + del i # should not raise a warning + # Byte order change by requesting NBO + a = np.arange(6, dtype='f4') + au = a.byteswap() + au = au.view(au.dtype.newbyteorder()) + assert_(a.dtype.byteorder != au.dtype.byteorder) + with nditer(au, [], [['readwrite', 'updateifcopy', 'nbo']], + casting='equiv') as i: + # context manager triggers UPDATEIFCOPY on i at exit + assert_equal(i.dtypes[0].byteorder, a.dtype.byteorder) + assert_equal(i.operands[0].dtype.byteorder, a.dtype.byteorder) + assert_equal(i.operands[0], a) + i.operands[0][:] = 12345 + i.operands[0][:] = 2 + assert_equal(au, [2] * 6) + + # Unaligned input + a = np.zeros((6 * 4 + 1,), dtype='i1')[1:] + a.dtype = 'f4' + a[:] = np.arange(6, dtype='f4') + assert_(not a.flags.aligned) + # Without 'aligned', shouldn't copy + i = nditer(a, [], [['readonly']]) + assert_(not i.operands[0].flags.aligned) + assert_equal(i.operands[0], a) + # With 'aligned', should make a copy + with nditer(a, [], [['readwrite', 'updateifcopy', 'aligned']]) as i: + assert_(i.operands[0].flags.aligned) + # context manager triggers UPDATEIFCOPY on i at exit + assert_equal(i.operands[0], a) + i.operands[0][:] = 3 + assert_equal(a, [3] * 6) + + # Discontiguous input + a = arange(12) + # If it is contiguous, shouldn't copy + i = nditer(a[:6], [], [['readonly']]) + assert_(i.operands[0].flags.contiguous) + assert_equal(i.operands[0], a[:6]) + # If it isn't contiguous, should buffer + i = nditer(a[::2], ['buffered', 'external_loop'], + [['readonly', 'contig']], + buffersize=10) + assert_(i[0].flags.contiguous) + assert_equal(i[0], a[::2]) + +def test_iter_array_cast(): + # Check that arrays are cast as requested + + # No cast 'f4' -> 'f4' + a = np.arange(6, dtype='f4').reshape(2, 3) + i = nditer(a, [], [['readwrite']], op_dtypes=[np.dtype('f4')]) + with i: + assert_equal(i.operands[0], a) + assert_equal(i.operands[0].dtype, np.dtype('f4')) + + # Byte-order cast ' '>f4' + a = np.arange(6, dtype='f4')]) as i: + assert_equal(i.operands[0], a) + assert_equal(i.operands[0].dtype, np.dtype('>f4')) + + # Safe case 'f4' -> 'f8' + a = np.arange(24, dtype='f4').reshape(2, 3, 4).swapaxes(1, 2) + i = nditer(a, [], [['readonly', 'copy']], + casting='safe', + op_dtypes=[np.dtype('f8')]) + assert_equal(i.operands[0], a) + assert_equal(i.operands[0].dtype, np.dtype('f8')) + # The memory layout of the temporary should match a (a is (48,4,16)) + # except negative strides get flipped to positive strides. + assert_equal(i.operands[0].strides, (96, 8, 32)) + a = a[::-1, :, ::-1] + i = nditer(a, [], [['readonly', 'copy']], + casting='safe', + op_dtypes=[np.dtype('f8')]) + assert_equal(i.operands[0], a) + assert_equal(i.operands[0].dtype, np.dtype('f8')) + assert_equal(i.operands[0].strides, (96, 8, 32)) + + # Same-kind cast 'f8' -> 'f4' -> 'f8' + a = np.arange(24, dtype='f8').reshape(2, 3, 4).T + with nditer(a, [], + [['readwrite', 'updateifcopy']], + casting='same_kind', + op_dtypes=[np.dtype('f4')]) as i: + assert_equal(i.operands[0], a) + assert_equal(i.operands[0].dtype, np.dtype('f4')) + assert_equal(i.operands[0].strides, (4, 16, 48)) + # Check that WRITEBACKIFCOPY is activated at exit + i.operands[0][2, 1, 1] = -12.5 + assert_(a[2, 1, 1] != -12.5) + assert_equal(a[2, 1, 1], -12.5) + + a = np.arange(6, dtype='i4')[::-2] + with nditer(a, [], + [['writeonly', 'updateifcopy']], + casting='unsafe', + op_dtypes=[np.dtype('f4')]) as i: + assert_equal(i.operands[0].dtype, np.dtype('f4')) + # Even though the stride was negative in 'a', it + # becomes positive in the temporary + assert_equal(i.operands[0].strides, (4,)) + i.operands[0][:] = [1, 2, 3] + assert_equal(a, [1, 2, 3]) + +def test_iter_array_cast_errors(): + # Check that invalid casts are caught + + # Need to enable copying for casts to occur + assert_raises(TypeError, nditer, arange(2, dtype='f4'), [], + [['readonly']], op_dtypes=[np.dtype('f8')]) + # Also need to allow casting for casts to occur + assert_raises(TypeError, nditer, arange(2, dtype='f4'), [], + [['readonly', 'copy']], casting='no', + op_dtypes=[np.dtype('f8')]) + assert_raises(TypeError, nditer, arange(2, dtype='f4'), [], + [['readonly', 'copy']], casting='equiv', + op_dtypes=[np.dtype('f8')]) + assert_raises(TypeError, nditer, arange(2, dtype='f8'), [], + [['writeonly', 'updateifcopy']], + casting='no', + op_dtypes=[np.dtype('f4')]) + assert_raises(TypeError, nditer, arange(2, dtype='f8'), [], + [['writeonly', 'updateifcopy']], + casting='equiv', + op_dtypes=[np.dtype('f4')]) + # ' '>f4' should not work with casting='no' + assert_raises(TypeError, nditer, arange(2, dtype='f4')]) + # 'f4' -> 'f8' is a safe cast, but 'f8' -> 'f4' isn't + assert_raises(TypeError, nditer, arange(2, dtype='f4'), [], + [['readwrite', 'updateifcopy']], + casting='safe', + op_dtypes=[np.dtype('f8')]) + assert_raises(TypeError, nditer, arange(2, dtype='f8'), [], + [['readwrite', 'updateifcopy']], + casting='safe', + op_dtypes=[np.dtype('f4')]) + # 'f4' -> 'i4' is neither a safe nor a same-kind cast + assert_raises(TypeError, nditer, arange(2, dtype='f4'), [], + [['readonly', 'copy']], + casting='same_kind', + op_dtypes=[np.dtype('i4')]) + assert_raises(TypeError, nditer, arange(2, dtype='i4'), [], + [['writeonly', 'updateifcopy']], + casting='same_kind', + op_dtypes=[np.dtype('f4')]) + +def test_iter_scalar_cast(): + # Check that scalars are cast as requested + + # No cast 'f4' -> 'f4' + i = nditer(np.float32(2.5), [], [['readonly']], + op_dtypes=[np.dtype('f4')]) + assert_equal(i.dtypes[0], np.dtype('f4')) + assert_equal(i.value.dtype, np.dtype('f4')) + assert_equal(i.value, 2.5) + # Safe cast 'f4' -> 'f8' + i = nditer(np.float32(2.5), [], + [['readonly', 'copy']], + casting='safe', + op_dtypes=[np.dtype('f8')]) + assert_equal(i.dtypes[0], np.dtype('f8')) + assert_equal(i.value.dtype, np.dtype('f8')) + assert_equal(i.value, 2.5) + # Same-kind cast 'f8' -> 'f4' + i = nditer(np.float64(2.5), [], + [['readonly', 'copy']], + casting='same_kind', + op_dtypes=[np.dtype('f4')]) + assert_equal(i.dtypes[0], np.dtype('f4')) + assert_equal(i.value.dtype, np.dtype('f4')) + assert_equal(i.value, 2.5) + # Unsafe cast 'f8' -> 'i4' + i = nditer(np.float64(3.0), [], + [['readonly', 'copy']], + casting='unsafe', + op_dtypes=[np.dtype('i4')]) + assert_equal(i.dtypes[0], np.dtype('i4')) + assert_equal(i.value.dtype, np.dtype('i4')) + assert_equal(i.value, 3) + # Readonly scalars may be cast even without setting COPY or BUFFERED + i = nditer(3, [], [['readonly']], op_dtypes=[np.dtype('f8')]) + assert_equal(i[0].dtype, np.dtype('f8')) + assert_equal(i[0], 3.) + +def test_iter_scalar_cast_errors(): + # Check that invalid casts are caught + + # Need to allow copying/buffering for write casts of scalars to occur + assert_raises(TypeError, nditer, np.float32(2), [], + [['readwrite']], op_dtypes=[np.dtype('f8')]) + assert_raises(TypeError, nditer, 2.5, [], + [['readwrite']], op_dtypes=[np.dtype('f4')]) + # 'f8' -> 'f4' isn't a safe cast if the value would overflow + assert_raises(TypeError, nditer, np.float64(1e60), [], + [['readonly']], + casting='safe', + op_dtypes=[np.dtype('f4')]) + # 'f4' -> 'i4' is neither a safe nor a same-kind cast + assert_raises(TypeError, nditer, np.float32(2), [], + [['readonly']], + casting='same_kind', + op_dtypes=[np.dtype('i4')]) + +def test_iter_object_arrays_basic(): + # Check that object arrays work + + obj = {'a': 3, 'b': 'd'} + a = np.array([[1, 2, 3], None, obj, None], dtype='O') + if HAS_REFCOUNT: + rc = sys.getrefcount(obj) + + # Need to allow references for object arrays + assert_raises(TypeError, nditer, a) + if HAS_REFCOUNT: + assert_equal(sys.getrefcount(obj), rc) + + i = nditer(a, ['refs_ok'], ['readonly']) + vals = [x_[()] for x_ in i] + assert_equal(np.array(vals, dtype='O'), a) + vals, i, x = [None] * 3 + if HAS_REFCOUNT: + assert_equal(sys.getrefcount(obj), rc) + + i = nditer(a.reshape(2, 2).T, ['refs_ok', 'buffered'], + ['readonly'], order='C') + assert_(i.iterationneedsapi) + vals = [x_[()] for x_ in i] + assert_equal(np.array(vals, dtype='O'), a.reshape(2, 2).ravel(order='F')) + vals, i, x = [None] * 3 + if HAS_REFCOUNT: + assert_equal(sys.getrefcount(obj), rc) + + i = nditer(a.reshape(2, 2).T, ['refs_ok', 'buffered'], + ['readwrite'], order='C') + with i: + for x in i: + x[...] = None + vals, i, x = [None] * 3 + if HAS_REFCOUNT: + assert_(sys.getrefcount(obj) == rc - 1) + assert_equal(a, np.array([None] * 4, dtype='O')) + +def test_iter_object_arrays_conversions(): + # Conversions to/from objects + a = np.arange(6, dtype='O') + i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'], + casting='unsafe', op_dtypes='i4') + with i: + for x in i: + x[...] += 1 + assert_equal(a, np.arange(6) + 1) + + a = np.arange(6, dtype='i4') + i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'], + casting='unsafe', op_dtypes='O') + with i: + for x in i: + x[...] += 1 + assert_equal(a, np.arange(6) + 1) + + # Non-contiguous object array + a = np.zeros((6,), dtype=[('p', 'i1'), ('a', 'O')]) + a = a['a'] + a[:] = np.arange(6) + i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'], + casting='unsafe', op_dtypes='i4') + with i: + for x in i: + x[...] += 1 + assert_equal(a, np.arange(6) + 1) + + # Non-contiguous value array + a = np.zeros((6,), dtype=[('p', 'i1'), ('a', 'i4')]) + a = a['a'] + a[:] = np.arange(6) + 98172488 + i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'], + casting='unsafe', op_dtypes='O') + with i: + ob = i[0][()] + if HAS_REFCOUNT: + rc = sys.getrefcount(ob) + for x in i: + x[...] += 1 + if HAS_REFCOUNT: + newrc = sys.getrefcount(ob) + assert_(newrc == rc - 1) + assert_equal(a, np.arange(6) + 98172489) + +def test_iter_common_dtype(): + # Check that the iterator finds a common data type correctly + # (some checks are somewhat duplicate after adopting NEP 50) + + i = nditer([array([3], dtype='f4'), array([0], dtype='f8')], + ['common_dtype'], + [['readonly', 'copy']] * 2, + casting='safe') + assert_equal(i.dtypes[0], np.dtype('f8')) + assert_equal(i.dtypes[1], np.dtype('f8')) + i = nditer([array([3], dtype='i4'), array([0], dtype='f4')], + ['common_dtype'], + [['readonly', 'copy']] * 2, + casting='safe') + assert_equal(i.dtypes[0], np.dtype('f8')) + assert_equal(i.dtypes[1], np.dtype('f8')) + i = nditer([array([3], dtype='f4'), array(0, dtype='f8')], + ['common_dtype'], + [['readonly', 'copy']] * 2, + casting='same_kind') + assert_equal(i.dtypes[0], np.dtype('f8')) + assert_equal(i.dtypes[1], np.dtype('f8')) + i = nditer([array([3], dtype='u4'), array(0, dtype='i4')], + ['common_dtype'], + [['readonly', 'copy']] * 2, + casting='safe') + assert_equal(i.dtypes[0], np.dtype('i8')) + assert_equal(i.dtypes[1], np.dtype('i8')) + i = nditer([array([3], dtype='u4'), array(-12, dtype='i4')], + ['common_dtype'], + [['readonly', 'copy']] * 2, + casting='safe') + assert_equal(i.dtypes[0], np.dtype('i8')) + assert_equal(i.dtypes[1], np.dtype('i8')) + i = nditer([array([3], dtype='u4'), array(-12, dtype='i4'), + array([2j], dtype='c8'), array([9], dtype='f8')], + ['common_dtype'], + [['readonly', 'copy']] * 4, + casting='safe') + assert_equal(i.dtypes[0], np.dtype('c16')) + assert_equal(i.dtypes[1], np.dtype('c16')) + assert_equal(i.dtypes[2], np.dtype('c16')) + assert_equal(i.dtypes[3], np.dtype('c16')) + assert_equal(i.value, (3, -12, 2j, 9)) + + # When allocating outputs, other outputs aren't factored in + i = nditer([array([3], dtype='i4'), None, array([2j], dtype='c16')], [], + [['readonly', 'copy'], + ['writeonly', 'allocate'], + ['writeonly']], + casting='safe') + assert_equal(i.dtypes[0], np.dtype('i4')) + assert_equal(i.dtypes[1], np.dtype('i4')) + assert_equal(i.dtypes[2], np.dtype('c16')) + # But, if common data types are requested, they are + i = nditer([array([3], dtype='i4'), None, array([2j], dtype='c16')], + ['common_dtype'], + [['readonly', 'copy'], + ['writeonly', 'allocate'], + ['writeonly']], + casting='safe') + assert_equal(i.dtypes[0], np.dtype('c16')) + assert_equal(i.dtypes[1], np.dtype('c16')) + assert_equal(i.dtypes[2], np.dtype('c16')) + +def test_iter_copy_if_overlap(): + # Ensure the iterator makes copies on read/write overlap, if requested + + # Copy not needed, 1 op + for flag in ['readonly', 'writeonly', 'readwrite']: + a = arange(10) + i = nditer([a], ['copy_if_overlap'], [[flag]]) + with i: + assert_(i.operands[0] is a) + + # Copy needed, 2 ops, read-write overlap + x = arange(10) + a = x[1:] + b = x[:-1] + with nditer([a, b], ['copy_if_overlap'], [['readonly'], ['readwrite']]) as i: + assert_(not np.shares_memory(*i.operands)) + + # Copy not needed with elementwise, 2 ops, exactly same arrays + x = arange(10) + a = x + b = x + i = nditer([a, b], ['copy_if_overlap'], [['readonly', 'overlap_assume_elementwise'], + ['readwrite', 'overlap_assume_elementwise']]) + with i: + assert_(i.operands[0] is a and i.operands[1] is b) + with nditer([a, b], ['copy_if_overlap'], [['readonly'], ['readwrite']]) as i: + assert_(i.operands[0] is a and not np.shares_memory(i.operands[1], b)) + + # Copy not needed, 2 ops, no overlap + x = arange(10) + a = x[::2] + b = x[1::2] + i = nditer([a, b], ['copy_if_overlap'], [['readonly'], ['writeonly']]) + assert_(i.operands[0] is a and i.operands[1] is b) + + # Copy needed, 2 ops, read-write overlap + x = arange(4, dtype=np.int8) + a = x[3:] + b = x.view(np.int32)[:1] + with nditer([a, b], ['copy_if_overlap'], [['readonly'], ['writeonly']]) as i: + assert_(not np.shares_memory(*i.operands)) + + # Copy needed, 3 ops, read-write overlap + for flag in ['writeonly', 'readwrite']: + x = np.ones([10, 10]) + a = x + b = x.T + c = x + with nditer([a, b, c], ['copy_if_overlap'], + [['readonly'], ['readonly'], [flag]]) as i: + a2, b2, c2 = i.operands + assert_(not np.shares_memory(a2, c2)) + assert_(not np.shares_memory(b2, c2)) + + # Copy not needed, 3 ops, read-only overlap + x = np.ones([10, 10]) + a = x + b = x.T + c = x + i = nditer([a, b, c], ['copy_if_overlap'], + [['readonly'], ['readonly'], ['readonly']]) + a2, b2, c2 = i.operands + assert_(a is a2) + assert_(b is b2) + assert_(c is c2) + + # Copy not needed, 3 ops, read-only overlap + x = np.ones([10, 10]) + a = x + b = np.ones([10, 10]) + c = x.T + i = nditer([a, b, c], ['copy_if_overlap'], + [['readonly'], ['writeonly'], ['readonly']]) + a2, b2, c2 = i.operands + assert_(a is a2) + assert_(b is b2) + assert_(c is c2) + + # Copy not needed, 3 ops, write-only overlap + x = np.arange(7) + a = x[:3] + b = x[3:6] + c = x[4:7] + i = nditer([a, b, c], ['copy_if_overlap'], + [['readonly'], ['writeonly'], ['writeonly']]) + a2, b2, c2 = i.operands + assert_(a is a2) + assert_(b is b2) + assert_(c is c2) + +def test_iter_op_axes(): + # Check that custom axes work + + # Reverse the axes + a = arange(6).reshape(2, 3) + i = nditer([a, a.T], [], [['readonly']] * 2, op_axes=[[0, 1], [1, 0]]) + assert_(all([x == y for (x, y) in i])) + a = arange(24).reshape(2, 3, 4) + i = nditer([a.T, a], [], [['readonly']] * 2, op_axes=[[2, 1, 0], None]) + assert_(all([x == y for (x, y) in i])) + + # Broadcast 1D to any dimension + a = arange(1, 31).reshape(2, 3, 5) + b = arange(1, 3) + i = nditer([a, b], [], [['readonly']] * 2, op_axes=[None, [0, -1, -1]]) + assert_equal([x * y for (x, y) in i], (a * b.reshape(2, 1, 1)).ravel()) + b = arange(1, 4) + i = nditer([a, b], [], [['readonly']] * 2, op_axes=[None, [-1, 0, -1]]) + assert_equal([x * y for (x, y) in i], (a * b.reshape(1, 3, 1)).ravel()) + b = arange(1, 6) + i = nditer([a, b], [], [['readonly']] * 2, + op_axes=[None, [np.newaxis, np.newaxis, 0]]) + assert_equal([x * y for (x, y) in i], (a * b.reshape(1, 1, 5)).ravel()) + + # Inner product-style broadcasting + a = arange(24).reshape(2, 3, 4) + b = arange(40).reshape(5, 2, 4) + i = nditer([a, b], ['multi_index'], [['readonly']] * 2, + op_axes=[[0, 1, -1, -1], [-1, -1, 0, 1]]) + assert_equal(i.shape, (2, 3, 5, 2)) + + # Matrix product-style broadcasting + a = arange(12).reshape(3, 4) + b = arange(20).reshape(4, 5) + i = nditer([a, b], ['multi_index'], [['readonly']] * 2, + op_axes=[[0, -1], [-1, 1]]) + assert_equal(i.shape, (3, 5)) + +def test_iter_op_axes_errors(): + # Check that custom axes throws errors for bad inputs + + # Wrong number of items in op_axes + a = arange(6).reshape(2, 3) + assert_raises(ValueError, nditer, [a, a], [], [['readonly']] * 2, + op_axes=[[0], [1], [0]]) + # Out of bounds items in op_axes + assert_raises(ValueError, nditer, [a, a], [], [['readonly']] * 2, + op_axes=[[2, 1], [0, 1]]) + assert_raises(ValueError, nditer, [a, a], [], [['readonly']] * 2, + op_axes=[[0, 1], [2, -1]]) + # Duplicate items in op_axes + assert_raises(ValueError, nditer, [a, a], [], [['readonly']] * 2, + op_axes=[[0, 0], [0, 1]]) + assert_raises(ValueError, nditer, [a, a], [], [['readonly']] * 2, + op_axes=[[0, 1], [1, 1]]) + + # Different sized arrays in op_axes + assert_raises(ValueError, nditer, [a, a], [], [['readonly']] * 2, + op_axes=[[0, 1], [0, 1, 0]]) + + # Non-broadcastable dimensions in the result + assert_raises(ValueError, nditer, [a, a], [], [['readonly']] * 2, + op_axes=[[0, 1], [1, 0]]) + +def test_iter_copy(): + # Check that copying the iterator works correctly + a = arange(24).reshape(2, 3, 4) + + # Simple iterator + i = nditer(a) + j = i.copy() + assert_equal([x[()] for x in i], [x[()] for x in j]) + + i.iterindex = 3 + j = i.copy() + assert_equal([x[()] for x in i], [x[()] for x in j]) + + # Buffered iterator + i = nditer(a, ['buffered', 'ranged'], order='F', buffersize=3) + j = i.copy() + assert_equal([x[()] for x in i], [x[()] for x in j]) + + i.iterindex = 3 + j = i.copy() + assert_equal([x[()] for x in i], [x[()] for x in j]) + + i.iterrange = (3, 9) + j = i.copy() + assert_equal([x[()] for x in i], [x[()] for x in j]) + + i.iterrange = (2, 18) + next(i) + next(i) + j = i.copy() + assert_equal([x[()] for x in i], [x[()] for x in j]) + + # Casting iterator + with nditer(a, ['buffered'], order='F', casting='unsafe', + op_dtypes='f8', buffersize=5) as i: + j = i.copy() + assert_equal([x[()] for x in j], a.ravel(order='F')) + + a = arange(24, dtype=' unstructured (any to object), and many other + # casts, which cause this to require all steps in the casting machinery + # one level down as well as the iterator copy (which uses NpyAuxData clone) + in_dtype = np.dtype([("a", np.dtype("i,")), + ("b", np.dtype(">i,d,S17,>d,3f,O,i1"))]) + out_dtype = np.dtype([("a", np.dtype("O")), + ("b", np.dtype(">i,>i,S17,>d,>U3,3d,i1,O"))]) + arr = np.ones(1000, dtype=in_dtype) + + it = np.nditer((arr,), ["buffered", "external_loop", "refs_ok"], + op_dtypes=[out_dtype], casting="unsafe") + it_copy = it.copy() + + res1 = next(it) + del it + res2 = next(it_copy) + del it_copy + + expected = arr["a"].astype(out_dtype["a"]) + assert_array_equal(res1["a"], expected) + assert_array_equal(res2["a"], expected) + + for field in in_dtype["b"].names: + # Note that the .base avoids the subarray field + expected = arr["b"][field].astype(out_dtype["b"][field].base) + assert_array_equal(res1["b"][field], expected) + assert_array_equal(res2["b"][field], expected) + + +def test_iter_copy_casts_structured2(): + # Similar to the above, this is a fairly arcane test to cover internals + in_dtype = np.dtype([("a", np.dtype("O,O")), + ("b", np.dtype("5O,3O,(1,)O,(1,)i,(1,)O"))]) + out_dtype = np.dtype([("a", np.dtype("O")), + ("b", np.dtype("O,3i,4O,4O,4i"))]) + + arr = np.ones(1, dtype=in_dtype) + it = np.nditer((arr,), ["buffered", "external_loop", "refs_ok"], + op_dtypes=[out_dtype], casting="unsafe") + it_copy = it.copy() + + res1 = next(it) + del it + res2 = next(it_copy) + del it_copy + + # Array of two structured scalars: + for res in res1, res2: + # Cast to tuple by getitem, which may be weird and changeable?: + assert isinstance(res["a"][0], tuple) + assert res["a"][0] == (1, 1) + + for res in res1, res2: + assert_array_equal(res["b"]["f0"][0], np.ones(5, dtype=object)) + assert_array_equal(res["b"]["f1"], np.ones((1, 3), dtype="i")) + assert res["b"]["f2"].shape == (1, 4) + assert_array_equal(res["b"]["f2"][0], np.ones(4, dtype=object)) + assert_array_equal(res["b"]["f3"][0], np.ones(4, dtype=object)) + assert_array_equal(res["b"]["f3"][0], np.ones(4, dtype="i")) + + +def test_iter_allocate_output_simple(): + # Check that the iterator will properly allocate outputs + + # Simple case + a = arange(6) + i = nditer([a, None], [], [['readonly'], ['writeonly', 'allocate']], + op_dtypes=[None, np.dtype('f4')]) + assert_equal(i.operands[1].shape, a.shape) + assert_equal(i.operands[1].dtype, np.dtype('f4')) + +def test_iter_allocate_output_buffered_readwrite(): + # Allocated output with buffering + delay_bufalloc + + a = arange(6) + i = nditer([a, None], ['buffered', 'delay_bufalloc'], + [['readonly'], ['allocate', 'readwrite']]) + with i: + i.operands[1][:] = 1 + i.reset() + for x in i: + x[1][...] += x[0][...] + assert_equal(i.operands[1], a + 1) + +def test_iter_allocate_output_itorder(): + # The allocated output should match the iteration order + + # C-order input, best iteration order + a = arange(6, dtype='i4').reshape(2, 3) + i = nditer([a, None], [], [['readonly'], ['writeonly', 'allocate']], + op_dtypes=[None, np.dtype('f4')]) + assert_equal(i.operands[1].shape, a.shape) + assert_equal(i.operands[1].strides, a.strides) + assert_equal(i.operands[1].dtype, np.dtype('f4')) + # F-order input, best iteration order + a = arange(24, dtype='i4').reshape(2, 3, 4).T + i = nditer([a, None], [], [['readonly'], ['writeonly', 'allocate']], + op_dtypes=[None, np.dtype('f4')]) + assert_equal(i.operands[1].shape, a.shape) + assert_equal(i.operands[1].strides, a.strides) + assert_equal(i.operands[1].dtype, np.dtype('f4')) + # Non-contiguous input, C iteration order + a = arange(24, dtype='i4').reshape(2, 3, 4).swapaxes(0, 1) + i = nditer([a, None], [], + [['readonly'], ['writeonly', 'allocate']], + order='C', + op_dtypes=[None, np.dtype('f4')]) + assert_equal(i.operands[1].shape, a.shape) + assert_equal(i.operands[1].strides, (32, 16, 4)) + assert_equal(i.operands[1].dtype, np.dtype('f4')) + +def test_iter_allocate_output_opaxes(): + # Specifying op_axes should work + + a = arange(24, dtype='i4').reshape(2, 3, 4) + i = nditer([None, a], [], [['writeonly', 'allocate'], ['readonly']], + op_dtypes=[np.dtype('u4'), None], + op_axes=[[1, 2, 0], None]) + assert_equal(i.operands[0].shape, (4, 2, 3)) + assert_equal(i.operands[0].strides, (4, 48, 16)) + assert_equal(i.operands[0].dtype, np.dtype('u4')) + +def test_iter_allocate_output_types_promotion(): + # Check type promotion of automatic outputs (this was more interesting + # before NEP 50...) + + i = nditer([array([3], dtype='f4'), array([0], dtype='f8'), None], [], + [['readonly']] * 2 + [['writeonly', 'allocate']]) + assert_equal(i.dtypes[2], np.dtype('f8')) + i = nditer([array([3], dtype='i4'), array([0], dtype='f4'), None], [], + [['readonly']] * 2 + [['writeonly', 'allocate']]) + assert_equal(i.dtypes[2], np.dtype('f8')) + i = nditer([array([3], dtype='f4'), array(0, dtype='f8'), None], [], + [['readonly']] * 2 + [['writeonly', 'allocate']]) + assert_equal(i.dtypes[2], np.dtype('f8')) + i = nditer([array([3], dtype='u4'), array(0, dtype='i4'), None], [], + [['readonly']] * 2 + [['writeonly', 'allocate']]) + assert_equal(i.dtypes[2], np.dtype('i8')) + i = nditer([array([3], dtype='u4'), array(-12, dtype='i4'), None], [], + [['readonly']] * 2 + [['writeonly', 'allocate']]) + assert_equal(i.dtypes[2], np.dtype('i8')) + +def test_iter_allocate_output_types_byte_order(): + # Verify the rules for byte order changes + + # When there's just one input, the output type exactly matches + a = array([3], dtype='u4') + a = a.view(a.dtype.newbyteorder()) + i = nditer([a, None], [], + [['readonly'], ['writeonly', 'allocate']]) + assert_equal(i.dtypes[0], i.dtypes[1]) + # With two or more inputs, the output type is in native byte order + i = nditer([a, a, None], [], + [['readonly'], ['readonly'], ['writeonly', 'allocate']]) + assert_(i.dtypes[0] != i.dtypes[2]) + assert_equal(i.dtypes[0].newbyteorder('='), i.dtypes[2]) + +def test_iter_allocate_output_types_scalar(): + # If the inputs are all scalars, the output should be a scalar + + i = nditer([None, 1, 2.3, np.float32(12), np.complex128(3)], [], + [['writeonly', 'allocate']] + [['readonly']] * 4) + assert_equal(i.operands[0].dtype, np.dtype('complex128')) + assert_equal(i.operands[0].ndim, 0) + +def test_iter_allocate_output_subtype(): + # Make sure that the subtype with priority wins + class MyNDArray(np.ndarray): + __array_priority__ = 15 + + # subclass vs ndarray + a = np.array([[1, 2], [3, 4]]).view(MyNDArray) + b = np.arange(4).reshape(2, 2).T + i = nditer([a, b, None], [], + [['readonly'], ['readonly'], ['writeonly', 'allocate']]) + assert_equal(type(a), type(i.operands[2])) + assert_(type(b) is not type(i.operands[2])) + assert_equal(i.operands[2].shape, (2, 2)) + + # If subtypes are disabled, we should get back an ndarray. + i = nditer([a, b, None], [], + [['readonly'], ['readonly'], + ['writeonly', 'allocate', 'no_subtype']]) + assert_equal(type(b), type(i.operands[2])) + assert_(type(a) is not type(i.operands[2])) + assert_equal(i.operands[2].shape, (2, 2)) + +def test_iter_allocate_output_errors(): + # Check that the iterator will throw errors for bad output allocations + + # Need an input if no output data type is specified + a = arange(6) + assert_raises(TypeError, nditer, [a, None], [], + [['writeonly'], ['writeonly', 'allocate']]) + # Allocated output should be flagged for writing + assert_raises(ValueError, nditer, [a, None], [], + [['readonly'], ['allocate', 'readonly']]) + # Allocated output can't have buffering without delayed bufalloc + assert_raises(ValueError, nditer, [a, None], ['buffered'], + ['allocate', 'readwrite']) + # Must specify dtype if there are no inputs (cannot promote existing ones; + # maybe this should use the 'f4' here, but it does not historically.) + assert_raises(TypeError, nditer, [None, None], [], + [['writeonly', 'allocate'], + ['writeonly', 'allocate']], + op_dtypes=[None, np.dtype('f4')]) + # If using op_axes, must specify all the axes + a = arange(24, dtype='i4').reshape(2, 3, 4) + assert_raises(ValueError, nditer, [a, None], [], + [['readonly'], ['writeonly', 'allocate']], + op_dtypes=[None, np.dtype('f4')], + op_axes=[None, [0, np.newaxis, 1]]) + # If using op_axes, the axes must be within bounds + assert_raises(ValueError, nditer, [a, None], [], + [['readonly'], ['writeonly', 'allocate']], + op_dtypes=[None, np.dtype('f4')], + op_axes=[None, [0, 3, 1]]) + # If using op_axes, there can't be duplicates + assert_raises(ValueError, nditer, [a, None], [], + [['readonly'], ['writeonly', 'allocate']], + op_dtypes=[None, np.dtype('f4')], + op_axes=[None, [0, 2, 1, 0]]) + # Not all axes may be specified if a reduction. If there is a hole + # in op_axes, this is an error. + a = arange(24, dtype='i4').reshape(2, 3, 4) + assert_raises(ValueError, nditer, [a, None], ["reduce_ok"], + [['readonly'], ['readwrite', 'allocate']], + op_dtypes=[None, np.dtype('f4')], + op_axes=[None, [0, np.newaxis, 2]]) + +def test_all_allocated(): + # When no output and no shape is given, `()` is used as shape. + i = np.nditer([None], op_dtypes=["int64"]) + assert i.operands[0].shape == () + assert i.dtypes == (np.dtype("int64"),) + + i = np.nditer([None], op_dtypes=["int64"], itershape=(2, 3, 4)) + assert i.operands[0].shape == (2, 3, 4) + +def test_iter_remove_axis(): + a = arange(24).reshape(2, 3, 4) + + i = nditer(a, ['multi_index']) + i.remove_axis(1) + assert_equal(list(i), a[:, 0, :].ravel()) + + a = a[::-1, :, :] + i = nditer(a, ['multi_index']) + i.remove_axis(0) + assert_equal(list(i), a[0, :, :].ravel()) + +def test_iter_remove_multi_index_inner_loop(): + # Check that removing multi-index support works + + a = arange(24).reshape(2, 3, 4) + + i = nditer(a, ['multi_index']) + assert_equal(i.ndim, 3) + assert_equal(i.shape, (2, 3, 4)) + assert_equal(i.itviews[0].shape, (2, 3, 4)) + + # Removing the multi-index tracking causes all dimensions to coalesce + before = list(i) + i.remove_multi_index() + after = list(i) + + assert_equal(before, after) + assert_equal(i.ndim, 1) + assert_raises(ValueError, lambda i: i.shape, i) + assert_equal(i.itviews[0].shape, (24,)) + + # Removing the inner loop means there's just one iteration + i.reset() + assert_equal(i.itersize, 24) + assert_equal(i[0].shape, ()) + i.enable_external_loop() + assert_equal(i.itersize, 24) + assert_equal(i[0].shape, (24,)) + assert_equal(i.value, arange(24)) + +def test_iter_iterindex(): + # Make sure iterindex works + + buffersize = 5 + a = arange(24).reshape(4, 3, 2) + for flags in ([], ['buffered']): + i = nditer(a, flags, buffersize=buffersize) + assert_equal(iter_iterindices(i), list(range(24))) + i.iterindex = 2 + assert_equal(iter_iterindices(i), list(range(2, 24))) + + i = nditer(a, flags, order='F', buffersize=buffersize) + assert_equal(iter_iterindices(i), list(range(24))) + i.iterindex = 5 + assert_equal(iter_iterindices(i), list(range(5, 24))) + + i = nditer(a[::-1], flags, order='F', buffersize=buffersize) + assert_equal(iter_iterindices(i), list(range(24))) + i.iterindex = 9 + assert_equal(iter_iterindices(i), list(range(9, 24))) + + i = nditer(a[::-1, ::-1], flags, order='C', buffersize=buffersize) + assert_equal(iter_iterindices(i), list(range(24))) + i.iterindex = 13 + assert_equal(iter_iterindices(i), list(range(13, 24))) + + i = nditer(a[::1, ::-1], flags, buffersize=buffersize) + assert_equal(iter_iterindices(i), list(range(24))) + i.iterindex = 23 + assert_equal(iter_iterindices(i), list(range(23, 24))) + i.reset() + i.iterindex = 2 + assert_equal(iter_iterindices(i), list(range(2, 24))) + +def test_iter_iterrange(): + # Make sure getting and resetting the iterrange works + + buffersize = 5 + a = arange(24, dtype='i4').reshape(4, 3, 2) + a_fort = a.ravel(order='F') + + i = nditer(a, ['ranged'], ['readonly'], order='F', + buffersize=buffersize) + assert_equal(i.iterrange, (0, 24)) + assert_equal([x[()] for x in i], a_fort) + for r in [(0, 24), (1, 2), (3, 24), (5, 5), (0, 20), (23, 24)]: + i.iterrange = r + assert_equal(i.iterrange, r) + assert_equal([x[()] for x in i], a_fort[r[0]:r[1]]) + + i = nditer(a, ['ranged', 'buffered'], ['readonly'], order='F', + op_dtypes='f8', buffersize=buffersize) + assert_equal(i.iterrange, (0, 24)) + assert_equal([x[()] for x in i], a_fort) + for r in [(0, 24), (1, 2), (3, 24), (5, 5), (0, 20), (23, 24)]: + i.iterrange = r + assert_equal(i.iterrange, r) + assert_equal([x[()] for x in i], a_fort[r[0]:r[1]]) + + def get_array(i): + val = np.array([], dtype='f8') + for x in i: + val = np.concatenate((val, x)) + return val + + i = nditer(a, ['ranged', 'buffered', 'external_loop'], + ['readonly'], order='F', + op_dtypes='f8', buffersize=buffersize) + assert_equal(i.iterrange, (0, 24)) + assert_equal(get_array(i), a_fort) + for r in [(0, 24), (1, 2), (3, 24), (5, 5), (0, 20), (23, 24)]: + i.iterrange = r + assert_equal(i.iterrange, r) + assert_equal(get_array(i), a_fort[r[0]:r[1]]) + +def test_iter_buffering(): + # Test buffering with several buffer sizes and types + arrays = [] + # F-order swapped array + _tmp = np.arange(24, dtype='c16').reshape(2, 3, 4).T + _tmp = _tmp.view(_tmp.dtype.newbyteorder()).byteswap() + arrays.append(_tmp) + # Contiguous 1-dimensional array + arrays.append(np.arange(10, dtype='f4')) + # Unaligned array + a = np.zeros((4 * 16 + 1,), dtype='i1')[1:] + a.dtype = 'i4' + a[:] = np.arange(16, dtype='i4') + arrays.append(a) + # 4-D F-order array + arrays.append(np.arange(120, dtype='i4').reshape(5, 3, 2, 4).T) + for a in arrays: + for buffersize in (1, 2, 3, 5, 8, 11, 16, 1024): + vals = [] + i = nditer(a, ['buffered', 'external_loop'], + [['readonly', 'nbo', 'aligned']], + order='C', + casting='equiv', + buffersize=buffersize) + while not i.finished: + assert_(i[0].size <= buffersize) + vals.append(i[0].copy()) + i.iternext() + assert_equal(np.concatenate(vals), a.ravel(order='C')) + +def test_iter_write_buffering(): + # Test that buffering of writes is working + + # F-order swapped array + a = np.arange(24).reshape(2, 3, 4).T + a = a.view(a.dtype.newbyteorder()).byteswap() + i = nditer(a, ['buffered'], + [['readwrite', 'nbo', 'aligned']], + casting='equiv', + order='C', + buffersize=16) + x = 0 + with i: + while not i.finished: + i[0] = x + x += 1 + i.iternext() + assert_equal(a.ravel(order='C'), np.arange(24)) + +def test_iter_buffering_delayed_alloc(): + # Test that delaying buffer allocation works + + a = np.arange(6) + b = np.arange(1, dtype='f4') + i = nditer([a, b], ['buffered', 'delay_bufalloc', 'multi_index', 'reduce_ok'], + ['readwrite'], + casting='unsafe', + op_dtypes='f4') + assert_(i.has_delayed_bufalloc) + assert_raises(ValueError, lambda i: i.multi_index, i) + assert_raises(ValueError, lambda i: i[0], i) + assert_raises(ValueError, lambda i: i[0:2], i) + + def assign_iter(i): + i[0] = 0 + assert_raises(ValueError, assign_iter, i) + + i.reset() + assert_(not i.has_delayed_bufalloc) + assert_equal(i.multi_index, (0,)) + with i: + assert_equal(i[0], 0) + i[1] = 1 + assert_equal(i[0:2], [0, 1]) + assert_equal([[x[0][()], x[1][()]] for x in i], list(zip(range(6), [1] * 6))) + +def test_iter_buffered_cast_simple(): + # Test that buffering can handle a simple cast + + a = np.arange(10, dtype='f4') + i = nditer(a, ['buffered', 'external_loop'], + [['readwrite', 'nbo', 'aligned']], + casting='same_kind', + op_dtypes=[np.dtype('f8')], + buffersize=3) + with i: + for v in i: + v[...] *= 2 + + assert_equal(a, 2 * np.arange(10, dtype='f4')) + +def test_iter_buffered_cast_byteswapped(): + # Test that buffering can handle a cast which requires swap->cast->swap + + a = np.arange(10, dtype='f4') + a = a.view(a.dtype.newbyteorder()).byteswap() + i = nditer(a, ['buffered', 'external_loop'], + [['readwrite', 'nbo', 'aligned']], + casting='same_kind', + op_dtypes=[np.dtype('f8').newbyteorder()], + buffersize=3) + with i: + for v in i: + v[...] *= 2 + + assert_equal(a, 2 * np.arange(10, dtype='f4')) + + with suppress_warnings() as sup: + sup.filter(np.exceptions.ComplexWarning) + + a = np.arange(10, dtype='f8') + a = a.view(a.dtype.newbyteorder()).byteswap() + i = nditer(a, ['buffered', 'external_loop'], + [['readwrite', 'nbo', 'aligned']], + casting='unsafe', + op_dtypes=[np.dtype('c8').newbyteorder()], + buffersize=3) + with i: + for v in i: + v[...] *= 2 + + assert_equal(a, 2 * np.arange(10, dtype='f8')) + +def test_iter_buffered_cast_byteswapped_complex(): + # Test that buffering can handle a cast which requires swap->cast->copy + + a = np.arange(10, dtype='c8') + a = a.view(a.dtype.newbyteorder()).byteswap() + a += 2j + i = nditer(a, ['buffered', 'external_loop'], + [['readwrite', 'nbo', 'aligned']], + casting='same_kind', + op_dtypes=[np.dtype('c16')], + buffersize=3) + with i: + for v in i: + v[...] *= 2 + assert_equal(a, 2 * np.arange(10, dtype='c8') + 4j) + + a = np.arange(10, dtype='c8') + a += 2j + i = nditer(a, ['buffered', 'external_loop'], + [['readwrite', 'nbo', 'aligned']], + casting='same_kind', + op_dtypes=[np.dtype('c16').newbyteorder()], + buffersize=3) + with i: + for v in i: + v[...] *= 2 + assert_equal(a, 2 * np.arange(10, dtype='c8') + 4j) + + a = np.arange(10, dtype=np.clongdouble) + a = a.view(a.dtype.newbyteorder()).byteswap() + a += 2j + i = nditer(a, ['buffered', 'external_loop'], + [['readwrite', 'nbo', 'aligned']], + casting='same_kind', + op_dtypes=[np.dtype('c16')], + buffersize=3) + with i: + for v in i: + v[...] *= 2 + assert_equal(a, 2 * np.arange(10, dtype=np.clongdouble) + 4j) + + a = np.arange(10, dtype=np.longdouble) + a = a.view(a.dtype.newbyteorder()).byteswap() + i = nditer(a, ['buffered', 'external_loop'], + [['readwrite', 'nbo', 'aligned']], + casting='same_kind', + op_dtypes=[np.dtype('f4')], + buffersize=7) + with i: + for v in i: + v[...] *= 2 + assert_equal(a, 2 * np.arange(10, dtype=np.longdouble)) + +def test_iter_buffered_cast_structured_type(): + # Tests buffering of structured types + + # simple -> struct type (duplicates the value) + sdt = [('a', 'f4'), ('b', 'i8'), ('c', 'c8', (2, 3)), ('d', 'O')] + a = np.arange(3, dtype='f4') + 0.5 + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes=sdt) + vals = [np.array(x) for x in i] + assert_equal(vals[0]['a'], 0.5) + assert_equal(vals[0]['b'], 0) + assert_equal(vals[0]['c'], [[(0.5)] * 3] * 2) + assert_equal(vals[0]['d'], 0.5) + assert_equal(vals[1]['a'], 1.5) + assert_equal(vals[1]['b'], 1) + assert_equal(vals[1]['c'], [[(1.5)] * 3] * 2) + assert_equal(vals[1]['d'], 1.5) + assert_equal(vals[0].dtype, np.dtype(sdt)) + + # object -> struct type + sdt = [('a', 'f4'), ('b', 'i8'), ('c', 'c8', (2, 3)), ('d', 'O')] + a = np.zeros((3,), dtype='O') + a[0] = (0.5, 0.5, [[0.5, 0.5, 0.5], [0.5, 0.5, 0.5]], 0.5) + a[1] = (1.5, 1.5, [[1.5, 1.5, 1.5], [1.5, 1.5, 1.5]], 1.5) + a[2] = (2.5, 2.5, [[2.5, 2.5, 2.5], [2.5, 2.5, 2.5]], 2.5) + if HAS_REFCOUNT: + rc = sys.getrefcount(a[0]) + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes=sdt) + vals = [x.copy() for x in i] + assert_equal(vals[0]['a'], 0.5) + assert_equal(vals[0]['b'], 0) + assert_equal(vals[0]['c'], [[(0.5)] * 3] * 2) + assert_equal(vals[0]['d'], 0.5) + assert_equal(vals[1]['a'], 1.5) + assert_equal(vals[1]['b'], 1) + assert_equal(vals[1]['c'], [[(1.5)] * 3] * 2) + assert_equal(vals[1]['d'], 1.5) + assert_equal(vals[0].dtype, np.dtype(sdt)) + vals, i, x = [None] * 3 + if HAS_REFCOUNT: + assert_equal(sys.getrefcount(a[0]), rc) + + # single-field struct type -> simple + sdt = [('a', 'f4')] + a = np.array([(5.5,), (8,)], dtype=sdt) + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes='i4') + assert_equal([x_[()] for x_ in i], [5, 8]) + + # make sure multi-field struct type -> simple doesn't work + sdt = [('a', 'f4'), ('b', 'i8'), ('d', 'O')] + a = np.array([(5.5, 7, 'test'), (8, 10, 11)], dtype=sdt) + assert_raises(TypeError, lambda: ( + nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes='i4'))) + + # struct type -> struct type (field-wise copy) + sdt1 = [('a', 'f4'), ('b', 'i8'), ('d', 'O')] + sdt2 = [('d', 'u2'), ('a', 'O'), ('b', 'f8')] + a = np.array([(1, 2, 3), (4, 5, 6)], dtype=sdt1) + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes=sdt2) + assert_equal(i[0].dtype, np.dtype(sdt2)) + assert_equal([np.array(x_) for x_ in i], + [np.array((1, 2, 3), dtype=sdt2), + np.array((4, 5, 6), dtype=sdt2)]) + + +def test_iter_buffered_cast_structured_type_failure_with_cleanup(): + # make sure struct type -> struct type with different + # number of fields fails + sdt1 = [('a', 'f4'), ('b', 'i8'), ('d', 'O')] + sdt2 = [('b', 'O'), ('a', 'f8')] + a = np.array([(1, 2, 3), (4, 5, 6)], dtype=sdt1) + + for intent in ["readwrite", "readonly", "writeonly"]: + # This test was initially designed to test an error at a different + # place, but will now raise earlier to to the cast not being possible: + # `assert np.can_cast(a.dtype, sdt2, casting="unsafe")` fails. + # Without a faulty DType, there is probably no reliable + # way to get the initial tested behaviour. + simple_arr = np.array([1, 2], dtype="i,i") # requires clean up + with pytest.raises(TypeError): + nditer((simple_arr, a), ['buffered', 'refs_ok'], [intent, intent], + casting='unsafe', op_dtypes=["f,f", sdt2]) + + +def test_buffered_cast_error_paths(): + with pytest.raises(ValueError): + # The input is cast into an `S3` buffer + np.nditer((np.array("a", dtype="S1"),), op_dtypes=["i"], + casting="unsafe", flags=["buffered"]) + + # The `M8[ns]` is cast into the `S3` output + it = np.nditer((np.array(1, dtype="i"),), op_dtypes=["S1"], + op_flags=["writeonly"], casting="unsafe", flags=["buffered"]) + with pytest.raises(ValueError): + with it: + buf = next(it) + buf[...] = "a" # cannot be converted to int. + +@pytest.mark.skipif(IS_WASM, reason="Cannot start subprocess") +@pytest.mark.skipif(not HAS_REFCOUNT, reason="PyPy seems to not hit this.") +def test_buffered_cast_error_paths_unraisable(): + # The following gives an unraisable error. Pytest sometimes captures that + # (depending python and/or pytest version). So with Python>=3.8 this can + # probably be cleaned out in the future to check for + # pytest.PytestUnraisableExceptionWarning: + code = textwrap.dedent(""" + import numpy as np + + it = np.nditer((np.array(1, dtype="i"),), op_dtypes=["S1"], + op_flags=["writeonly"], casting="unsafe", flags=["buffered"]) + buf = next(it) + buf[...] = "a" + del buf, it # Flushing only happens during deallocate right now. + """) + res = subprocess.check_output([sys.executable, "-c", code], + stderr=subprocess.STDOUT, text=True) + assert "ValueError" in res + + +def test_iter_buffered_cast_subarray(): + # Tests buffering of subarrays + + # one element -> many (copies it to all) + sdt1 = [('a', 'f4')] + sdt2 = [('a', 'f8', (3, 2, 2))] + a = np.zeros((6,), dtype=sdt1) + a['a'] = np.arange(6) + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes=sdt2) + assert_equal(i[0].dtype, np.dtype(sdt2)) + for x, count in zip(i, list(range(6))): + assert_(np.all(x['a'] == count)) + + # one element -> many -> back (copies it to all) + sdt1 = [('a', 'O', (1, 1))] + sdt2 = [('a', 'O', (3, 2, 2))] + a = np.zeros((6,), dtype=sdt1) + a['a'][:, 0, 0] = np.arange(6) + i = nditer(a, ['buffered', 'refs_ok'], ['readwrite'], + casting='unsafe', + op_dtypes=sdt2) + with i: + assert_equal(i[0].dtype, np.dtype(sdt2)) + count = 0 + for x in i: + assert_(np.all(x['a'] == count)) + x['a'][0] += 2 + count += 1 + assert_equal(a['a'], np.arange(6).reshape(6, 1, 1) + 2) + + # many -> one element -> back (copies just element 0) + sdt1 = [('a', 'O', (3, 2, 2))] + sdt2 = [('a', 'O', (1,))] + a = np.zeros((6,), dtype=sdt1) + a['a'][:, 0, 0, 0] = np.arange(6) + i = nditer(a, ['buffered', 'refs_ok'], ['readwrite'], + casting='unsafe', + op_dtypes=sdt2) + with i: + assert_equal(i[0].dtype, np.dtype(sdt2)) + count = 0 + for x in i: + assert_equal(x['a'], count) + x['a'] += 2 + count += 1 + assert_equal(a['a'], np.arange(6).reshape(6, 1, 1, 1) * np.ones((1, 3, 2, 2)) + 2) + + # many -> one element -> back (copies just element 0) + sdt1 = [('a', 'f8', (3, 2, 2))] + sdt2 = [('a', 'O', (1,))] + a = np.zeros((6,), dtype=sdt1) + a['a'][:, 0, 0, 0] = np.arange(6) + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes=sdt2) + assert_equal(i[0].dtype, np.dtype(sdt2)) + count = 0 + for x in i: + assert_equal(x['a'], count) + count += 1 + + # many -> one element (copies just element 0) + sdt1 = [('a', 'O', (3, 2, 2))] + sdt2 = [('a', 'f4', (1,))] + a = np.zeros((6,), dtype=sdt1) + a['a'][:, 0, 0, 0] = np.arange(6) + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes=sdt2) + assert_equal(i[0].dtype, np.dtype(sdt2)) + count = 0 + for x in i: + assert_equal(x['a'], count) + count += 1 + + # many -> matching shape (straightforward copy) + sdt1 = [('a', 'O', (3, 2, 2))] + sdt2 = [('a', 'f4', (3, 2, 2))] + a = np.zeros((6,), dtype=sdt1) + a['a'] = np.arange(6 * 3 * 2 * 2).reshape(6, 3, 2, 2) + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes=sdt2) + assert_equal(i[0].dtype, np.dtype(sdt2)) + count = 0 + for x in i: + assert_equal(x['a'], a[count]['a']) + count += 1 + + # vector -> smaller vector (truncates) + sdt1 = [('a', 'f8', (6,))] + sdt2 = [('a', 'f4', (2,))] + a = np.zeros((6,), dtype=sdt1) + a['a'] = np.arange(6 * 6).reshape(6, 6) + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes=sdt2) + assert_equal(i[0].dtype, np.dtype(sdt2)) + count = 0 + for x in i: + assert_equal(x['a'], a[count]['a'][:2]) + count += 1 + + # vector -> bigger vector (pads with zeros) + sdt1 = [('a', 'f8', (2,))] + sdt2 = [('a', 'f4', (6,))] + a = np.zeros((6,), dtype=sdt1) + a['a'] = np.arange(6 * 2).reshape(6, 2) + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes=sdt2) + assert_equal(i[0].dtype, np.dtype(sdt2)) + count = 0 + for x in i: + assert_equal(x['a'][:2], a[count]['a']) + assert_equal(x['a'][2:], [0, 0, 0, 0]) + count += 1 + + # vector -> matrix (broadcasts) + sdt1 = [('a', 'f8', (2,))] + sdt2 = [('a', 'f4', (2, 2))] + a = np.zeros((6,), dtype=sdt1) + a['a'] = np.arange(6 * 2).reshape(6, 2) + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes=sdt2) + assert_equal(i[0].dtype, np.dtype(sdt2)) + count = 0 + for x in i: + assert_equal(x['a'][0], a[count]['a']) + assert_equal(x['a'][1], a[count]['a']) + count += 1 + + # vector -> matrix (broadcasts and zero-pads) + sdt1 = [('a', 'f8', (2, 1))] + sdt2 = [('a', 'f4', (3, 2))] + a = np.zeros((6,), dtype=sdt1) + a['a'] = np.arange(6 * 2).reshape(6, 2, 1) + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes=sdt2) + assert_equal(i[0].dtype, np.dtype(sdt2)) + count = 0 + for x in i: + assert_equal(x['a'][:2, 0], a[count]['a'][:, 0]) + assert_equal(x['a'][:2, 1], a[count]['a'][:, 0]) + assert_equal(x['a'][2, :], [0, 0]) + count += 1 + + # matrix -> matrix (truncates and zero-pads) + sdt1 = [('a', 'f8', (2, 3))] + sdt2 = [('a', 'f4', (3, 2))] + a = np.zeros((6,), dtype=sdt1) + a['a'] = np.arange(6 * 2 * 3).reshape(6, 2, 3) + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes=sdt2) + assert_equal(i[0].dtype, np.dtype(sdt2)) + count = 0 + for x in i: + assert_equal(x['a'][:2, 0], a[count]['a'][:, 0]) + assert_equal(x['a'][:2, 1], a[count]['a'][:, 1]) + assert_equal(x['a'][2, :], [0, 0]) + count += 1 + +def test_iter_buffering_badwriteback(): + # Writing back from a buffer cannot combine elements + + # a needs write buffering, but had a broadcast dimension + a = np.arange(6).reshape(2, 3, 1) + b = np.arange(12).reshape(2, 3, 2) + assert_raises(ValueError, nditer, [a, b], + ['buffered', 'external_loop'], + [['readwrite'], ['writeonly']], + order='C') + + # But if a is readonly, it's fine + nditer([a, b], ['buffered', 'external_loop'], + [['readonly'], ['writeonly']], + order='C') + + # If a has just one element, it's fine too (constant 0 stride, a reduction) + a = np.arange(1).reshape(1, 1, 1) + nditer([a, b], ['buffered', 'external_loop', 'reduce_ok'], + [['readwrite'], ['writeonly']], + order='C') + + # check that it fails on other dimensions too + a = np.arange(6).reshape(1, 3, 2) + assert_raises(ValueError, nditer, [a, b], + ['buffered', 'external_loop'], + [['readwrite'], ['writeonly']], + order='C') + a = np.arange(4).reshape(2, 1, 2) + assert_raises(ValueError, nditer, [a, b], + ['buffered', 'external_loop'], + [['readwrite'], ['writeonly']], + order='C') + +def test_iter_buffering_string(): + # Safe casting disallows shrinking strings + a = np.array(['abc', 'a', 'abcd'], dtype=np.bytes_) + assert_equal(a.dtype, np.dtype('S4')) + assert_raises(TypeError, nditer, a, ['buffered'], ['readonly'], + op_dtypes='S2') + i = nditer(a, ['buffered'], ['readonly'], op_dtypes='S6') + assert_equal(i[0], b'abc') + assert_equal(i[0].dtype, np.dtype('S6')) + + a = np.array(['abc', 'a', 'abcd'], dtype=np.str_) + assert_equal(a.dtype, np.dtype('U4')) + assert_raises(TypeError, nditer, a, ['buffered'], ['readonly'], + op_dtypes='U2') + i = nditer(a, ['buffered'], ['readonly'], op_dtypes='U6') + assert_equal(i[0], 'abc') + assert_equal(i[0].dtype, np.dtype('U6')) + +def test_iter_buffering_growinner(): + # Test that the inner loop grows when no buffering is needed + a = np.arange(30) + i = nditer(a, ['buffered', 'growinner', 'external_loop'], + buffersize=5) + # Should end up with just one inner loop here + assert_equal(i[0].size, a.size) + + +@pytest.mark.parametrize("read_or_readwrite", ["readonly", "readwrite"]) +def test_iter_contig_flag_reduce_error(read_or_readwrite): + # Test that a non-contiguous operand is rejected without buffering. + # NOTE: This is true even for a reduction, where we return a 0-stride + # below! + with pytest.raises(TypeError, match="Iterator operand required buffering"): + it = np.nditer( + (np.zeros(()),), flags=["external_loop", "reduce_ok"], + op_flags=[(read_or_readwrite, "contig"),], itershape=(10,)) + + +@pytest.mark.parametrize("arr", [ + lambda: np.zeros(()), + lambda: np.zeros((20, 1))[::20], + lambda: np.zeros((1, 20))[:, ::20] + ]) +def test_iter_contig_flag_single_operand_strides(arr): + """ + Tests the strides with the contig flag for both broadcast and non-broadcast + operands in 3 cases where the logic is needed: + 1. When everything has a zero stride, the broadcast op needs to repeated + 2. When the reduce axis is the last axis (first to iterate). + 3. When the reduce axis is the first axis (last to iterate). + + NOTE: The semantics of the cast flag are not clearly defined when + it comes to reduction. It is unclear that there are any users. + """ + first_op = np.ones((10, 10)) + broadcast_op = arr() + red_op = arr() + # Add a first operand to ensure no axis-reordering and the result shape. + iterator = np.nditer( + (first_op, broadcast_op, red_op), + flags=["external_loop", "reduce_ok", "buffered", "delay_bufalloc"], + op_flags=[("readonly", "contig")] * 2 + [("readwrite", "contig")]) + + with iterator: + iterator.reset() + for f, b, r in iterator: + # The first operand is contigouos, we should have a view + assert np.shares_memory(f, first_op) + # Although broadcast, the second op always has a contiguous stride + assert b.strides[0] == 8 + assert not np.shares_memory(b, broadcast_op) + # The reduction has a contiguous stride or a 0 stride + if red_op.ndim == 0 or red_op.shape[-1] == 1: + assert r.strides[0] == 0 + else: + # The stride is 8, although it was not originally: + assert r.strides[0] == 8 + # If the reduce stride is 0, buffering makes no difference, but we + # do it anyway right now: + assert not np.shares_memory(r, red_op) + + +@pytest.mark.xfail(reason="The contig flag was always buggy.") +def test_iter_contig_flag_incorrect(): + # This case does the wrong thing... + iterator = np.nditer( + (np.ones((10, 10)).T, np.ones((1, 10))), + flags=["external_loop", "reduce_ok", "buffered", "delay_bufalloc"], + op_flags=[("readonly", "contig")] * 2) + + with iterator: + iterator.reset() + for a, b in iterator: + # Remove a and b from locals (pytest may want to format them) + a, b = a.strides, b.strides + assert a == 8 + assert b == 8 # should be 8 but is 0 due to axis reorder + + +@pytest.mark.slow +def test_iter_buffered_reduce_reuse(): + # large enough array for all views, including negative strides. + a = np.arange(2 * 3**5)[3**5:3**5 + 1] + flags = ['buffered', 'delay_bufalloc', 'multi_index', 'reduce_ok', 'refs_ok'] + op_flags = [('readonly',), ('readwrite', 'allocate')] + op_axes_list = [[(0, 1, 2), (0, 1, -1)], [(0, 1, 2), (0, -1, -1)]] + # wrong dtype to force buffering + op_dtypes = [float, a.dtype] + + def get_params(): + for xs in range(-3**2, 3**2 + 1): + for ys in range(xs, 3**2 + 1): + for op_axes in op_axes_list: + # last stride is reduced and because of that not + # important for this test, as it is the inner stride. + strides = (xs * a.itemsize, ys * a.itemsize, a.itemsize) + arr = np.lib.stride_tricks.as_strided(a, (3, 3, 3), strides) + + for skip in [0, 1]: + yield arr, op_axes, skip + + for arr, op_axes, skip in get_params(): + nditer2 = np.nditer([arr.copy(), None], + op_axes=op_axes, flags=flags, op_flags=op_flags, + op_dtypes=op_dtypes) + with nditer2: + nditer2.operands[-1][...] = 0 + nditer2.reset() + nditer2.iterindex = skip + + for (a2_in, b2_in) in nditer2: + b2_in += a2_in.astype(np.int_) + + comp_res = nditer2.operands[-1] + + for bufsize in range(3**3): + nditer1 = np.nditer([arr, None], + op_axes=op_axes, flags=flags, op_flags=op_flags, + buffersize=bufsize, op_dtypes=op_dtypes) + with nditer1: + nditer1.operands[-1][...] = 0 + nditer1.reset() + nditer1.iterindex = skip + + for (a1_in, b1_in) in nditer1: + b1_in += a1_in.astype(np.int_) + + res = nditer1.operands[-1] + assert_array_equal(res, comp_res) + + +def test_iter_buffered_reduce_reuse_core(): + # NumPy re-uses buffers for broadcast operands (as of writing when reading). + # Test this even if the offset is manually set at some point during + # the iteration. (not a particularly tricky path) + arr = np.empty((1, 6, 4, 1)).reshape(1, 6, 4, 1)[:, ::3, ::2, :] + arr[...] = np.arange(arr.size).reshape(arr.shape) + # First and last dimension are broadcast dimensions. + arr = np.broadcast_to(arr, (100, 2, 2, 2)) + + flags = ['buffered', 'reduce_ok', 'refs_ok', 'multi_index'] + op_flags = [('readonly',)] + + buffersize = 100 # small enough to not fit the whole array + it = np.nditer(arr, flags=flags, op_flags=op_flags, buffersize=100) + + # Iterate a bit (this will cause buffering internally) + expected = [next(it) for i in range(11)] + # Now, manually advance to inside the core (the +1) + it.iterindex = 10 * (2 * 2 * 2) + 1 + result = [next(it) for i in range(10)] + + assert expected[1:] == result + + +def test_iter_no_broadcast(): + # Test that the no_broadcast flag works + a = np.arange(24).reshape(2, 3, 4) + b = np.arange(6).reshape(2, 3, 1) + c = np.arange(12).reshape(3, 4) + + nditer([a, b, c], [], + [['readonly', 'no_broadcast'], + ['readonly'], ['readonly']]) + assert_raises(ValueError, nditer, [a, b, c], [], + [['readonly'], ['readonly', 'no_broadcast'], ['readonly']]) + assert_raises(ValueError, nditer, [a, b, c], [], + [['readonly'], ['readonly'], ['readonly', 'no_broadcast']]) + + +class TestIterNested: + + def test_basic(self): + # Test nested iteration basic usage + a = arange(12).reshape(2, 3, 2) + + i, j = np.nested_iters(a, [[0], [1, 2]]) + vals = [list(j) for _ in i] + assert_equal(vals, [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]]) + + i, j = np.nested_iters(a, [[0, 1], [2]]) + vals = [list(j) for _ in i] + assert_equal(vals, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]]) + + i, j = np.nested_iters(a, [[0, 2], [1]]) + vals = [list(j) for _ in i] + assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]]) + + def test_reorder(self): + # Test nested iteration basic usage + a = arange(12).reshape(2, 3, 2) + + # In 'K' order (default), it gets reordered + i, j = np.nested_iters(a, [[0], [2, 1]]) + vals = [list(j) for _ in i] + assert_equal(vals, [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]]) + + i, j = np.nested_iters(a, [[1, 0], [2]]) + vals = [list(j) for _ in i] + assert_equal(vals, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]]) + + i, j = np.nested_iters(a, [[2, 0], [1]]) + vals = [list(j) for _ in i] + assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]]) + + # In 'C' order, it doesn't + i, j = np.nested_iters(a, [[0], [2, 1]], order='C') + vals = [list(j) for _ in i] + assert_equal(vals, [[0, 2, 4, 1, 3, 5], [6, 8, 10, 7, 9, 11]]) + + i, j = np.nested_iters(a, [[1, 0], [2]], order='C') + vals = [list(j) for _ in i] + assert_equal(vals, [[0, 1], [6, 7], [2, 3], [8, 9], [4, 5], [10, 11]]) + + i, j = np.nested_iters(a, [[2, 0], [1]], order='C') + vals = [list(j) for _ in i] + assert_equal(vals, [[0, 2, 4], [6, 8, 10], [1, 3, 5], [7, 9, 11]]) + + def test_flip_axes(self): + # Test nested iteration with negative axes + a = arange(12).reshape(2, 3, 2)[::-1, ::-1, ::-1] + + # In 'K' order (default), the axes all get flipped + i, j = np.nested_iters(a, [[0], [1, 2]]) + vals = [list(j) for _ in i] + assert_equal(vals, [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]]) + + i, j = np.nested_iters(a, [[0, 1], [2]]) + vals = [list(j) for _ in i] + assert_equal(vals, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]]) + + i, j = np.nested_iters(a, [[0, 2], [1]]) + vals = [list(j) for _ in i] + assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]]) + + # In 'C' order, flipping axes is disabled + i, j = np.nested_iters(a, [[0], [1, 2]], order='C') + vals = [list(j) for _ in i] + assert_equal(vals, [[11, 10, 9, 8, 7, 6], [5, 4, 3, 2, 1, 0]]) + + i, j = np.nested_iters(a, [[0, 1], [2]], order='C') + vals = [list(j) for _ in i] + assert_equal(vals, [[11, 10], [9, 8], [7, 6], [5, 4], [3, 2], [1, 0]]) + + i, j = np.nested_iters(a, [[0, 2], [1]], order='C') + vals = [list(j) for _ in i] + assert_equal(vals, [[11, 9, 7], [10, 8, 6], [5, 3, 1], [4, 2, 0]]) + + def test_broadcast(self): + # Test nested iteration with broadcasting + a = arange(2).reshape(2, 1) + b = arange(3).reshape(1, 3) + + i, j = np.nested_iters([a, b], [[0], [1]]) + vals = [list(j) for _ in i] + assert_equal(vals, [[[0, 0], [0, 1], [0, 2]], [[1, 0], [1, 1], [1, 2]]]) + + i, j = np.nested_iters([a, b], [[1], [0]]) + vals = [list(j) for _ in i] + assert_equal(vals, [[[0, 0], [1, 0]], [[0, 1], [1, 1]], [[0, 2], [1, 2]]]) + + def test_dtype_copy(self): + # Test nested iteration with a copy to change dtype + + # copy + a = arange(6, dtype='i4').reshape(2, 3) + i, j = np.nested_iters(a, [[0], [1]], + op_flags=['readonly', 'copy'], + op_dtypes='f8') + assert_equal(j[0].dtype, np.dtype('f8')) + vals = [list(j) for _ in i] + assert_equal(vals, [[0, 1, 2], [3, 4, 5]]) + vals = None + + # writebackifcopy - using context manager + a = arange(6, dtype='f4').reshape(2, 3) + i, j = np.nested_iters(a, [[0], [1]], + op_flags=['readwrite', 'updateifcopy'], + casting='same_kind', + op_dtypes='f8') + with i, j: + assert_equal(j[0].dtype, np.dtype('f8')) + for x in i: + for y in j: + y[...] += 1 + assert_equal(a, [[0, 1, 2], [3, 4, 5]]) + assert_equal(a, [[1, 2, 3], [4, 5, 6]]) + + # writebackifcopy - using close() + a = arange(6, dtype='f4').reshape(2, 3) + i, j = np.nested_iters(a, [[0], [1]], + op_flags=['readwrite', 'updateifcopy'], + casting='same_kind', + op_dtypes='f8') + assert_equal(j[0].dtype, np.dtype('f8')) + for x in i: + for y in j: + y[...] += 1 + assert_equal(a, [[0, 1, 2], [3, 4, 5]]) + i.close() + j.close() + assert_equal(a, [[1, 2, 3], [4, 5, 6]]) + + def test_dtype_buffered(self): + # Test nested iteration with buffering to change dtype + + a = arange(6, dtype='f4').reshape(2, 3) + i, j = np.nested_iters(a, [[0], [1]], + flags=['buffered'], + op_flags=['readwrite'], + casting='same_kind', + op_dtypes='f8') + assert_equal(j[0].dtype, np.dtype('f8')) + for x in i: + for y in j: + y[...] += 1 + assert_equal(a, [[1, 2, 3], [4, 5, 6]]) + + def test_0d(self): + a = np.arange(12).reshape(2, 3, 2) + i, j = np.nested_iters(a, [[], [1, 0, 2]]) + vals = [list(j) for _ in i] + assert_equal(vals, [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]]) + + i, j = np.nested_iters(a, [[1, 0, 2], []]) + vals = [list(j) for _ in i] + assert_equal(vals, [[0], [1], [2], [3], [4], [5], [6], [7], [8], [9], [10], [11]]) + + i, j, k = np.nested_iters(a, [[2, 0], [], [1]]) + vals = [] + for x in i: + for y in j: + vals.append(list(k)) + assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]]) + + def test_iter_nested_iters_dtype_buffered(self): + # Test nested iteration with buffering to change dtype + + a = arange(6, dtype='f4').reshape(2, 3) + i, j = np.nested_iters(a, [[0], [1]], + flags=['buffered'], + op_flags=['readwrite'], + casting='same_kind', + op_dtypes='f8') + with i, j: + assert_equal(j[0].dtype, np.dtype('f8')) + for x in i: + for y in j: + y[...] += 1 + assert_equal(a, [[1, 2, 3], [4, 5, 6]]) + +def test_iter_reduction_error(): + + a = np.arange(6) + assert_raises(ValueError, nditer, [a, None], [], + [['readonly'], ['readwrite', 'allocate']], + op_axes=[[0], [-1]]) + + a = np.arange(6).reshape(2, 3) + assert_raises(ValueError, nditer, [a, None], ['external_loop'], + [['readonly'], ['readwrite', 'allocate']], + op_axes=[[0, 1], [-1, -1]]) + +def test_iter_reduction(): + # Test doing reductions with the iterator + + a = np.arange(6) + i = nditer([a, None], ['reduce_ok'], + [['readonly'], ['readwrite', 'allocate']], + op_axes=[[0], [-1]]) + # Need to initialize the output operand to the addition unit + with i: + i.operands[1][...] = 0 + # Do the reduction + for x, y in i: + y[...] += x + # Since no axes were specified, should have allocated a scalar + assert_equal(i.operands[1].ndim, 0) + assert_equal(i.operands[1], np.sum(a)) + + a = np.arange(6).reshape(2, 3) + i = nditer([a, None], ['reduce_ok', 'external_loop'], + [['readonly'], ['readwrite', 'allocate']], + op_axes=[[0, 1], [-1, -1]]) + # Need to initialize the output operand to the addition unit + with i: + i.operands[1][...] = 0 + # Reduction shape/strides for the output + assert_equal(i[1].shape, (6,)) + assert_equal(i[1].strides, (0,)) + # Do the reduction + for x, y in i: + # Use a for loop instead of ``y[...] += x`` + # (equivalent to ``y[...] = y[...].copy() + x``), + # because y has zero strides we use for the reduction + for j in range(len(y)): + y[j] += x[j] + # Since no axes were specified, should have allocated a scalar + assert_equal(i.operands[1].ndim, 0) + assert_equal(i.operands[1], np.sum(a)) + + # This is a tricky reduction case for the buffering double loop + # to handle + a = np.ones((2, 3, 5)) + it1 = nditer([a, None], ['reduce_ok', 'external_loop'], + [['readonly'], ['readwrite', 'allocate']], + op_axes=[None, [0, -1, 1]]) + it2 = nditer([a, None], ['reduce_ok', 'external_loop', + 'buffered', 'delay_bufalloc'], + [['readonly'], ['readwrite', 'allocate']], + op_axes=[None, [0, -1, 1]], buffersize=10) + with it1, it2: + it1.operands[1].fill(0) + it2.operands[1].fill(0) + it2.reset() + for x in it1: + x[1][...] += x[0] + for x in it2: + x[1][...] += x[0] + assert_equal(it1.operands[1], it2.operands[1]) + assert_equal(it2.operands[1].sum(), a.size) + +def test_iter_buffering_reduction(): + # Test doing buffered reductions with the iterator + + a = np.arange(6) + b = np.array(0., dtype='f8').byteswap() + b = b.view(b.dtype.newbyteorder()) + i = nditer([a, b], ['reduce_ok', 'buffered'], + [['readonly'], ['readwrite', 'nbo']], + op_axes=[[0], [-1]]) + with i: + assert_equal(i[1].dtype, np.dtype('f8')) + assert_(i[1].dtype != b.dtype) + # Do the reduction + for x, y in i: + y[...] += x + # Since no axes were specified, should have allocated a scalar + assert_equal(b, np.sum(a)) + + a = np.arange(6).reshape(2, 3) + b = np.array([0, 0], dtype='f8').byteswap() + b = b.view(b.dtype.newbyteorder()) + i = nditer([a, b], ['reduce_ok', 'external_loop', 'buffered'], + [['readonly'], ['readwrite', 'nbo']], + op_axes=[[0, 1], [0, -1]]) + # Reduction shape/strides for the output + with i: + assert_equal(i[1].shape, (3,)) + assert_equal(i[1].strides, (0,)) + # Do the reduction + for x, y in i: + # Use a for loop instead of ``y[...] += x`` + # (equivalent to ``y[...] = y[...].copy() + x``), + # because y has zero strides we use for the reduction + for j in range(len(y)): + y[j] += x[j] + assert_equal(b, np.sum(a, axis=1)) + + # Iterator inner double loop was wrong on this one + p = np.arange(2) + 1 + it = np.nditer([p, None], + ['delay_bufalloc', 'reduce_ok', 'buffered', 'external_loop'], + [['readonly'], ['readwrite', 'allocate']], + op_axes=[[-1, 0], [-1, -1]], + itershape=(2, 2)) + with it: + it.operands[1].fill(0) + it.reset() + assert_equal(it[0], [1, 2, 1, 2]) + + # Iterator inner loop should take argument contiguity into account + x = np.ones((7, 13, 8), np.int8)[4:6, 1:11:6, 1:5].transpose(1, 2, 0) + x[...] = np.arange(x.size).reshape(x.shape) + y_base = np.arange(4 * 4, dtype=np.int8).reshape(4, 4) + y_base_copy = y_base.copy() + y = y_base[::2, :, None] + + it = np.nditer([y, x], + ['buffered', 'external_loop', 'reduce_ok'], + [['readwrite'], ['readonly']]) + with it: + for a, b in it: + a.fill(2) + + assert_equal(y_base[1::2], y_base_copy[1::2]) + assert_equal(y_base[::2], 2) + +def test_iter_buffering_reduction_reuse_reduce_loops(): + # There was a bug triggering reuse of the reduce loop inappropriately, + # which caused processing to happen in unnecessarily small chunks + # and overran the buffer. + + a = np.zeros((2, 7)) + b = np.zeros((1, 7)) + it = np.nditer([a, b], flags=['reduce_ok', 'external_loop', 'buffered'], + op_flags=[['readonly'], ['readwrite']], + buffersize=5) + + with it: + bufsizes = [x.shape[0] for x, y in it] + assert_equal(bufsizes, [5, 2, 5, 2]) + assert_equal(sum(bufsizes), a.size) + +def test_iter_writemasked_badinput(): + a = np.zeros((2, 3)) + b = np.zeros((3,)) + m = np.array([[True, True, False], [False, True, False]]) + m2 = np.array([True, True, False]) + m3 = np.array([0, 1, 1], dtype='u1') + mbad1 = np.array([0, 1, 1], dtype='i1') + mbad2 = np.array([0, 1, 1], dtype='f4') + + # Need an 'arraymask' if any operand is 'writemasked' + assert_raises(ValueError, nditer, [a, m], [], + [['readwrite', 'writemasked'], ['readonly']]) + + # A 'writemasked' operand must not be readonly + assert_raises(ValueError, nditer, [a, m], [], + [['readonly', 'writemasked'], ['readonly', 'arraymask']]) + + # 'writemasked' and 'arraymask' may not be used together + assert_raises(ValueError, nditer, [a, m], [], + [['readonly'], ['readwrite', 'arraymask', 'writemasked']]) + + # 'arraymask' may only be specified once + assert_raises(ValueError, nditer, [a, m, m2], [], + [['readwrite', 'writemasked'], + ['readonly', 'arraymask'], + ['readonly', 'arraymask']]) + + # An 'arraymask' with nothing 'writemasked' also doesn't make sense + assert_raises(ValueError, nditer, [a, m], [], + [['readwrite'], ['readonly', 'arraymask']]) + + # A writemasked reduction requires a similarly smaller mask + assert_raises(ValueError, nditer, [a, b, m], ['reduce_ok'], + [['readonly'], + ['readwrite', 'writemasked'], + ['readonly', 'arraymask']]) + # But this should work with a smaller/equal mask to the reduction operand + np.nditer([a, b, m2], ['reduce_ok'], + [['readonly'], + ['readwrite', 'writemasked'], + ['readonly', 'arraymask']]) + # The arraymask itself cannot be a reduction + assert_raises(ValueError, nditer, [a, b, m2], ['reduce_ok'], + [['readonly'], + ['readwrite', 'writemasked'], + ['readwrite', 'arraymask']]) + + # A uint8 mask is ok too + np.nditer([a, m3], ['buffered'], + [['readwrite', 'writemasked'], + ['readonly', 'arraymask']], + op_dtypes=['f4', None], + casting='same_kind') + # An int8 mask isn't ok + assert_raises(TypeError, np.nditer, [a, mbad1], ['buffered'], + [['readwrite', 'writemasked'], + ['readonly', 'arraymask']], + op_dtypes=['f4', None], + casting='same_kind') + # A float32 mask isn't ok + assert_raises(TypeError, np.nditer, [a, mbad2], ['buffered'], + [['readwrite', 'writemasked'], + ['readonly', 'arraymask']], + op_dtypes=['f4', None], + casting='same_kind') + + +def _is_buffered(iterator): + try: + iterator.itviews + except ValueError: + return True + return False + +@pytest.mark.parametrize("a", + [np.zeros((3,), dtype='f8'), + np.zeros((9876, 3 * 5), dtype='f8')[::2, :], + np.zeros((4, 312, 124, 3), dtype='f8')[::2, :, ::2, :], + # Also test with the last dimension strided (so it does not fit if + # there is repeated access) + np.zeros((9,), dtype='f8')[::3], + np.zeros((9876, 3 * 10), dtype='f8')[::2, ::5], + np.zeros((4, 312, 124, 3), dtype='f8')[::2, :, ::2, ::-1]]) +def test_iter_writemasked(a): + # Note, the slicing above is to ensure that nditer cannot combine multiple + # axes into one. The repetition is just to make things a bit more + # interesting. + shape = a.shape + reps = shape[-1] // 3 + msk = np.empty(shape, dtype=bool) + msk[...] = [True, True, False] * reps + + # When buffering is unused, 'writemasked' effectively does nothing. + # It's up to the user of the iterator to obey the requested semantics. + it = np.nditer([a, msk], [], + [['readwrite', 'writemasked'], + ['readonly', 'arraymask']]) + with it: + for x, m in it: + x[...] = 1 + # Because we violated the semantics, all the values became 1 + assert_equal(a, np.broadcast_to([1, 1, 1] * reps, shape)) + + # Even if buffering is enabled, we still may be accessing the array + # directly. + it = np.nditer([a, msk], ['buffered'], + [['readwrite', 'writemasked'], + ['readonly', 'arraymask']]) + # @seberg: I honestly don't currently understand why a "buffered" iterator + # would end up not using a buffer for the small array here at least when + # "writemasked" is used, that seems confusing... Check by testing for + # actual memory overlap! + is_buffered = True + with it: + for x, m in it: + x[...] = 2.5 + if np.may_share_memory(x, a): + is_buffered = False + + if not is_buffered: + # Because we violated the semantics, all the values became 2.5 + assert_equal(a, np.broadcast_to([2.5, 2.5, 2.5] * reps, shape)) + else: + # For large sizes, the iterator may be buffered: + assert_equal(a, np.broadcast_to([2.5, 2.5, 1] * reps, shape)) + a[...] = 2.5 + + # If buffering will definitely happening, for instance because of + # a cast, only the items selected by the mask will be copied back from + # the buffer. + it = np.nditer([a, msk], ['buffered'], + [['readwrite', 'writemasked'], + ['readonly', 'arraymask']], + op_dtypes=['i8', None], + casting='unsafe') + with it: + for x, m in it: + x[...] = 3 + # Even though we violated the semantics, only the selected values + # were copied back + assert_equal(a, np.broadcast_to([3, 3, 2.5] * reps, shape)) + + +@pytest.mark.parametrize(["mask", "mask_axes"], [ + # Allocated operand (only broadcasts with -1) + (None, [-1, 0]), + # Reduction along the first dimension (with and without op_axes) + (np.zeros((1, 4), dtype="bool"), [0, 1]), + (np.zeros((1, 4), dtype="bool"), None), + # Test 0-D and -1 op_axes + (np.zeros(4, dtype="bool"), [-1, 0]), + (np.zeros((), dtype="bool"), [-1, -1]), + (np.zeros((), dtype="bool"), None)]) +def test_iter_writemasked_broadcast_error(mask, mask_axes): + # This assumes that a readwrite mask makes sense. This is likely not the + # case and should simply be deprecated. + arr = np.zeros((3, 4)) + itflags = ["reduce_ok"] + mask_flags = ["arraymask", "readwrite", "allocate"] + a_flags = ["writeonly", "writemasked"] + if mask_axes is None: + op_axes = None + else: + op_axes = [mask_axes, [0, 1]] + + with assert_raises(ValueError): + np.nditer((mask, arr), flags=itflags, op_flags=[mask_flags, a_flags], + op_axes=op_axes) + + +def test_iter_writemasked_decref(): + # force casting (to make it interesting) by using a structured dtype. + arr = np.arange(10000).astype(">i,O") + original = arr.copy() + mask = np.random.randint(0, 2, size=10000).astype(bool) + + it = np.nditer([arr, mask], ['buffered', "refs_ok"], + [['readwrite', 'writemasked'], + ['readonly', 'arraymask']], + op_dtypes=[" string -> longdouble` for the + # conversion. But Python may refuse `str(int)` for huge ints. + # In that case, RuntimeWarning would be correct, but conversion + # fails earlier (seems to happen on 32bit linux, possibly only debug). + if dtype in "gG": + try: + str(too_big_int) + except ValueError: + pytest.skip("`huge_int -> string -> longdouble` failed") + + # Otherwise, we overflow to infinity: + with pytest.warns(RuntimeWarning): + res = scalar_type(1) + too_big_int + assert res.dtype == dtype + assert res == np.inf + + with pytest.warns(RuntimeWarning): + # We force the dtype here, since windows may otherwise pick the + # double instead of the longdouble loop. That leads to slightly + # different results (conversion of the int fails as above). + res = np.add(np.array(1, dtype=dtype), too_big_int, dtype=dtype) + assert res.dtype == dtype + assert res == np.inf + + +@pytest.mark.parametrize("op", [operator.add, operator.pow]) +def test_weak_promotion_scalar_path(op): + # Some additional paths exercising the weak scalars. + + # Integer path: + res = op(np.uint8(3), 5) + assert res == op(3, 5) + assert res.dtype == np.uint8 or res.dtype == bool # noqa: PLR1714 + + with pytest.raises(OverflowError): + op(np.uint8(3), 1000) + + # Float path: + res = op(np.float32(3), 5.) + assert res == op(3., 5.) + assert res.dtype == np.float32 or res.dtype == bool # noqa: PLR1714 + + +def test_nep50_complex_promotion(): + with pytest.warns(RuntimeWarning, match=".*overflow"): + res = np.complex64(3) + complex(2**300) + + assert type(res) == np.complex64 + + +def test_nep50_integer_conversion_errors(): + # Implementation for error paths is mostly missing (as of writing) + with pytest.raises(OverflowError, match=".*uint8"): + np.array([1], np.uint8) + 300 + + with pytest.raises(OverflowError, match=".*uint8"): + np.uint8(1) + 300 + + # Error message depends on platform (maybe unsigned int or unsigned long) + with pytest.raises(OverflowError, + match="Python integer -1 out of bounds for uint8"): + np.uint8(1) + -1 + + +def test_nep50_with_axisconcatenator(): + # Concatenate/r_ does not promote, so this has to error: + with pytest.raises(OverflowError): + np.r_[np.arange(5, dtype=np.int8), 255] + + +@pytest.mark.parametrize("ufunc", [np.add, np.power]) +def test_nep50_huge_integers(ufunc): + # Very large integers are complicated, because they go to uint64 or + # object dtype. This tests covers a few possible paths. + with pytest.raises(OverflowError): + ufunc(np.int64(0), 2**63) # 2**63 too large for int64 + + with pytest.raises(OverflowError): + ufunc(np.uint64(0), 2**64) # 2**64 cannot be represented by uint64 + + # However, 2**63 can be represented by the uint64 (and that is used): + res = ufunc(np.uint64(1), 2**63) + + assert res.dtype == np.uint64 + assert res == ufunc(1, 2**63, dtype=object) + + # The following paths fail to warn correctly about the change: + with pytest.raises(OverflowError): + ufunc(np.int64(1), 2**63) # np.array(2**63) would go to uint + + with pytest.raises(OverflowError): + ufunc(np.int64(1), 2**100) # np.array(2**100) would go to object + + # This would go to object and thus a Python float, not a NumPy one: + res = ufunc(1.0, 2**100) + assert isinstance(res, np.float64) + + +def test_nep50_in_concat_and_choose(): + res = np.concatenate([np.float32(1), 1.], axis=None) + assert res.dtype == "float32" + + res = np.choose(1, [np.float32(1), 1.]) + assert res.dtype == "float32" + + +@pytest.mark.parametrize("expected,dtypes,optional_dtypes", [ + (np.float32, [np.float32], + [np.float16, 0.0, np.uint16, np.int16, np.int8, 0]), + (np.complex64, [np.float32, 0j], + [np.float16, 0.0, np.uint16, np.int16, np.int8, 0]), + (np.float32, [np.int16, np.uint16, np.float16], + [np.int8, np.uint8, np.float32, 0., 0]), + (np.int32, [np.int16, np.uint16], + [np.int8, np.uint8, 0, np.bool]), + ]) +@hypothesis.given(data=strategies.data()) +def test_expected_promotion(expected, dtypes, optional_dtypes, data): + # Sample randomly while ensuring "dtypes" is always present: + optional = data.draw(strategies.lists( + strategies.sampled_from(dtypes + optional_dtypes))) + all_dtypes = dtypes + optional + dtypes_sample = data.draw(strategies.permutations(all_dtypes)) + + res = np.result_type(*dtypes_sample) + assert res == expected + + +@pytest.mark.parametrize("sctype", + [np.int8, np.int16, np.int32, np.int64, + np.uint8, np.uint16, np.uint32, np.uint64]) +@pytest.mark.parametrize("other_val", + [-2 * 100, -1, 0, 9, 10, 11, 2**63, 2 * 100]) +@pytest.mark.parametrize("comp", + [operator.eq, operator.ne, operator.le, operator.lt, + operator.ge, operator.gt]) +def test_integer_comparison(sctype, other_val, comp): + # Test that comparisons with integers (especially out-of-bound) ones + # works correctly. + val_obj = 10 + val = sctype(val_obj) + # Check that the scalar behaves the same as the python int: + assert comp(10, other_val) == comp(val, other_val) + assert comp(val, other_val) == comp(10, other_val) + # Except for the result type: + assert type(comp(val, other_val)) is np.bool + + # Check that the integer array and object array behave the same: + val_obj = np.array([10, 10], dtype=object) + val = val_obj.astype(sctype) + assert_array_equal(comp(val_obj, other_val), comp(val, other_val)) + assert_array_equal(comp(other_val, val_obj), comp(other_val, val)) + + +@pytest.mark.parametrize("arr", [ + np.ones((100, 100), dtype=np.uint8)[::2], # not trivially iterable + np.ones(20000, dtype=">u4"), # cast and >buffersize + np.ones(100, dtype=">u4"), # fast path compatible with cast +]) +def test_integer_comparison_with_cast(arr): + # Similar to above, but mainly test a few cases that cover the slow path + # the test is limited to unsigned ints and -1 for simplicity. + res = arr >= -1 + assert_array_equal(res, np.ones_like(arr, dtype=bool)) + res = arr < -1 + assert_array_equal(res, np.zeros_like(arr, dtype=bool)) + + +@pytest.mark.parametrize("comp", + [np.equal, np.not_equal, np.less_equal, np.less, + np.greater_equal, np.greater]) +def test_integer_integer_comparison(comp): + # Test that the NumPy comparison ufuncs work with large Python integers + assert comp(2**200, -2**200) == comp(2**200, -2**200, dtype=object) + + +def create_with_scalar(sctype, value): + return sctype(value) + + +def create_with_array(sctype, value): + return np.array([value], dtype=sctype) + + +@pytest.mark.parametrize("sctype", + [np.int8, np.int16, np.int32, np.int64, + np.uint8, np.uint16, np.uint32, np.uint64]) +@pytest.mark.parametrize("create", [create_with_scalar, create_with_array]) +def test_oob_creation(sctype, create): + iinfo = np.iinfo(sctype) + + with pytest.raises(OverflowError): + create(sctype, iinfo.min - 1) + + with pytest.raises(OverflowError): + create(sctype, iinfo.max + 1) + + with pytest.raises(OverflowError): + create(sctype, str(iinfo.min - 1)) + + with pytest.raises(OverflowError): + create(sctype, str(iinfo.max + 1)) + + assert create(sctype, iinfo.min) == iinfo.min + assert create(sctype, iinfo.max) == iinfo.max diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_numeric.py b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_numeric.py new file mode 100644 index 0000000..5b58b34 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_numeric.py @@ -0,0 +1,4247 @@ +import itertools +import math +import platform +import sys +import warnings +from decimal import Decimal + +import pytest +from hypothesis import given +from hypothesis import strategies as st +from hypothesis.extra import numpy as hynp +from numpy._core._rational_tests import rational + +import numpy as np +from numpy import ma +from numpy._core import sctypes +from numpy._core.numerictypes import obj2sctype +from numpy.exceptions import AxisError +from numpy.random import rand, randint, randn +from numpy.testing import ( + HAS_REFCOUNT, + IS_WASM, + assert_, + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, + assert_array_max_ulp, + assert_equal, + assert_raises, + assert_raises_regex, +) + + +class TestResize: + def test_copies(self): + A = np.array([[1, 2], [3, 4]]) + Ar1 = np.array([[1, 2, 3, 4], [1, 2, 3, 4]]) + assert_equal(np.resize(A, (2, 4)), Ar1) + + Ar2 = np.array([[1, 2], [3, 4], [1, 2], [3, 4]]) + assert_equal(np.resize(A, (4, 2)), Ar2) + + Ar3 = np.array([[1, 2, 3], [4, 1, 2], [3, 4, 1], [2, 3, 4]]) + assert_equal(np.resize(A, (4, 3)), Ar3) + + def test_repeats(self): + A = np.array([1, 2, 3]) + Ar1 = np.array([[1, 2, 3, 1], [2, 3, 1, 2]]) + assert_equal(np.resize(A, (2, 4)), Ar1) + + Ar2 = np.array([[1, 2], [3, 1], [2, 3], [1, 2]]) + assert_equal(np.resize(A, (4, 2)), Ar2) + + Ar3 = np.array([[1, 2, 3], [1, 2, 3], [1, 2, 3], [1, 2, 3]]) + assert_equal(np.resize(A, (4, 3)), Ar3) + + def test_zeroresize(self): + A = np.array([[1, 2], [3, 4]]) + Ar = np.resize(A, (0,)) + assert_array_equal(Ar, np.array([])) + assert_equal(A.dtype, Ar.dtype) + + Ar = np.resize(A, (0, 2)) + assert_equal(Ar.shape, (0, 2)) + + Ar = np.resize(A, (2, 0)) + assert_equal(Ar.shape, (2, 0)) + + def test_reshape_from_zero(self): + # See also gh-6740 + A = np.zeros(0, dtype=[('a', np.float32)]) + Ar = np.resize(A, (2, 1)) + assert_array_equal(Ar, np.zeros((2, 1), Ar.dtype)) + assert_equal(A.dtype, Ar.dtype) + + def test_negative_resize(self): + A = np.arange(0, 10, dtype=np.float32) + new_shape = (-10, -1) + with pytest.raises(ValueError, match=r"negative"): + np.resize(A, new_shape=new_shape) + + def test_unsigned_resize(self): + # ensure unsigned integer sizes don't lead to underflows + for dt_pair in [(np.int32, np.uint32), (np.int64, np.uint64)]: + arr = np.array([[23, 95], [66, 37]]) + assert_array_equal(np.resize(arr, dt_pair[0](1)), + np.resize(arr, dt_pair[1](1))) + + def test_subclass(self): + class MyArray(np.ndarray): + __array_priority__ = 1. + + my_arr = np.array([1]).view(MyArray) + assert type(np.resize(my_arr, 5)) is MyArray + assert type(np.resize(my_arr, 0)) is MyArray + + my_arr = np.array([]).view(MyArray) + assert type(np.resize(my_arr, 5)) is MyArray + + +class TestNonarrayArgs: + # check that non-array arguments to functions wrap them in arrays + def test_choose(self): + choices = [[0, 1, 2], + [3, 4, 5], + [5, 6, 7]] + tgt = [5, 1, 5] + a = [2, 0, 1] + + out = np.choose(a, choices) + assert_equal(out, tgt) + + def test_clip(self): + arr = [-1, 5, 2, 3, 10, -4, -9] + out = np.clip(arr, 2, 7) + tgt = [2, 5, 2, 3, 7, 2, 2] + assert_equal(out, tgt) + + def test_compress(self): + arr = [[0, 1, 2, 3, 4], + [5, 6, 7, 8, 9]] + tgt = [[5, 6, 7, 8, 9]] + out = np.compress([0, 1], arr, axis=0) + assert_equal(out, tgt) + + def test_count_nonzero(self): + arr = [[0, 1, 7, 0, 0], + [3, 0, 0, 2, 19]] + tgt = np.array([2, 3]) + out = np.count_nonzero(arr, axis=1) + assert_equal(out, tgt) + + def test_diagonal(self): + a = [[0, 1, 2, 3], + [4, 5, 6, 7], + [8, 9, 10, 11]] + out = np.diagonal(a) + tgt = [0, 5, 10] + + assert_equal(out, tgt) + + def test_mean(self): + A = [[1, 2, 3], [4, 5, 6]] + assert_(np.mean(A) == 3.5) + assert_(np.all(np.mean(A, 0) == np.array([2.5, 3.5, 4.5]))) + assert_(np.all(np.mean(A, 1) == np.array([2., 5.]))) + + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', RuntimeWarning) + assert_(np.isnan(np.mean([]))) + assert_(w[0].category is RuntimeWarning) + + def test_ptp(self): + a = [3, 4, 5, 10, -3, -5, 6.0] + assert_equal(np.ptp(a, axis=0), 15.0) + + def test_prod(self): + arr = [[1, 2, 3, 4], + [5, 6, 7, 9], + [10, 3, 4, 5]] + tgt = [24, 1890, 600] + + assert_equal(np.prod(arr, axis=-1), tgt) + + def test_ravel(self): + a = [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]] + tgt = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12] + assert_equal(np.ravel(a), tgt) + + def test_repeat(self): + a = [1, 2, 3] + tgt = [1, 1, 2, 2, 3, 3] + + out = np.repeat(a, 2) + assert_equal(out, tgt) + + def test_reshape(self): + arr = [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]] + tgt = [[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]] + assert_equal(np.reshape(arr, (2, 6)), tgt) + + def test_reshape_shape_arg(self): + arr = np.arange(12) + shape = (3, 4) + expected = arr.reshape(shape) + + with pytest.raises( + TypeError, + match="You cannot specify 'newshape' and 'shape' " + "arguments at the same time." + ): + np.reshape(arr, shape=shape, newshape=shape) + with pytest.raises( + TypeError, + match=r"reshape\(\) missing 1 required positional " + "argument: 'shape'" + ): + np.reshape(arr) + + assert_equal(np.reshape(arr, shape), expected) + assert_equal(np.reshape(arr, shape, order="C"), expected) + assert_equal(np.reshape(arr, shape, "C"), expected) + assert_equal(np.reshape(arr, shape=shape), expected) + assert_equal(np.reshape(arr, shape=shape, order="C"), expected) + with pytest.warns(DeprecationWarning): + actual = np.reshape(arr, newshape=shape) + assert_equal(actual, expected) + + def test_reshape_copy_arg(self): + arr = np.arange(24).reshape(2, 3, 4) + arr_f_ord = np.array(arr, order="F") + shape = (12, 2) + + assert np.shares_memory(np.reshape(arr, shape), arr) + assert np.shares_memory(np.reshape(arr, shape, order="C"), arr) + assert np.shares_memory( + np.reshape(arr_f_ord, shape, order="F"), arr_f_ord) + assert np.shares_memory(np.reshape(arr, shape, copy=None), arr) + assert np.shares_memory(np.reshape(arr, shape, copy=False), arr) + assert np.shares_memory(arr.reshape(shape, copy=False), arr) + assert not np.shares_memory(np.reshape(arr, shape, copy=True), arr) + assert not np.shares_memory( + np.reshape(arr, shape, order="C", copy=True), arr) + assert not np.shares_memory( + np.reshape(arr, shape, order="F", copy=True), arr) + assert not np.shares_memory( + np.reshape(arr, shape, order="F", copy=None), arr) + + err_msg = "Unable to avoid creating a copy while reshaping." + with pytest.raises(ValueError, match=err_msg): + np.reshape(arr, shape, order="F", copy=False) + with pytest.raises(ValueError, match=err_msg): + np.reshape(arr_f_ord, shape, order="C", copy=False) + + def test_round(self): + arr = [1.56, 72.54, 6.35, 3.25] + tgt = [1.6, 72.5, 6.4, 3.2] + assert_equal(np.around(arr, decimals=1), tgt) + s = np.float64(1.) + assert_(isinstance(s.round(), np.float64)) + assert_equal(s.round(), 1.) + + @pytest.mark.parametrize('dtype', [ + np.int8, np.int16, np.int32, np.int64, + np.uint8, np.uint16, np.uint32, np.uint64, + np.float16, np.float32, np.float64, + ]) + def test_dunder_round(self, dtype): + s = dtype(1) + assert_(isinstance(round(s), int)) + assert_(isinstance(round(s, None), int)) + assert_(isinstance(round(s, ndigits=None), int)) + assert_equal(round(s), 1) + assert_equal(round(s, None), 1) + assert_equal(round(s, ndigits=None), 1) + + @pytest.mark.parametrize('val, ndigits', [ + pytest.param(2**31 - 1, -1, + marks=pytest.mark.skip(reason="Out of range of int32") + ), + (2**31 - 1, 1 - math.ceil(math.log10(2**31 - 1))), + (2**31 - 1, -math.ceil(math.log10(2**31 - 1))) + ]) + def test_dunder_round_edgecases(self, val, ndigits): + assert_equal(round(val, ndigits), round(np.int32(val), ndigits)) + + def test_dunder_round_accuracy(self): + f = np.float64(5.1 * 10**73) + assert_(isinstance(round(f, -73), np.float64)) + assert_array_max_ulp(round(f, -73), 5.0 * 10**73) + assert_(isinstance(round(f, ndigits=-73), np.float64)) + assert_array_max_ulp(round(f, ndigits=-73), 5.0 * 10**73) + + i = np.int64(501) + assert_(isinstance(round(i, -2), np.int64)) + assert_array_max_ulp(round(i, -2), 500) + assert_(isinstance(round(i, ndigits=-2), np.int64)) + assert_array_max_ulp(round(i, ndigits=-2), 500) + + @pytest.mark.xfail(raises=AssertionError, reason="gh-15896") + def test_round_py_consistency(self): + f = 5.1 * 10**73 + assert_equal(round(np.float64(f), -73), round(f, -73)) + + def test_searchsorted(self): + arr = [-8, -5, -1, 3, 6, 10] + out = np.searchsorted(arr, 0) + assert_equal(out, 3) + + def test_size(self): + A = [[1, 2, 3], [4, 5, 6]] + assert_(np.size(A) == 6) + assert_(np.size(A, 0) == 2) + assert_(np.size(A, 1) == 3) + + def test_squeeze(self): + A = [[[1, 1, 1], [2, 2, 2], [3, 3, 3]]] + assert_equal(np.squeeze(A).shape, (3, 3)) + assert_equal(np.squeeze(np.zeros((1, 3, 1))).shape, (3,)) + assert_equal(np.squeeze(np.zeros((1, 3, 1)), axis=0).shape, (3, 1)) + assert_equal(np.squeeze(np.zeros((1, 3, 1)), axis=-1).shape, (1, 3)) + assert_equal(np.squeeze(np.zeros((1, 3, 1)), axis=2).shape, (1, 3)) + assert_equal(np.squeeze([np.zeros((3, 1))]).shape, (3,)) + assert_equal(np.squeeze([np.zeros((3, 1))], axis=0).shape, (3, 1)) + assert_equal(np.squeeze([np.zeros((3, 1))], axis=2).shape, (1, 3)) + assert_equal(np.squeeze([np.zeros((3, 1))], axis=-1).shape, (1, 3)) + + def test_std(self): + A = [[1, 2, 3], [4, 5, 6]] + assert_almost_equal(np.std(A), 1.707825127659933) + assert_almost_equal(np.std(A, 0), np.array([1.5, 1.5, 1.5])) + assert_almost_equal(np.std(A, 1), np.array([0.81649658, 0.81649658])) + + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', RuntimeWarning) + assert_(np.isnan(np.std([]))) + assert_(w[0].category is RuntimeWarning) + + def test_swapaxes(self): + tgt = [[[0, 4], [2, 6]], [[1, 5], [3, 7]]] + a = [[[0, 1], [2, 3]], [[4, 5], [6, 7]]] + out = np.swapaxes(a, 0, 2) + assert_equal(out, tgt) + + def test_sum(self): + m = [[1, 2, 3], + [4, 5, 6], + [7, 8, 9]] + tgt = [[6], [15], [24]] + out = np.sum(m, axis=1, keepdims=True) + + assert_equal(tgt, out) + + def test_take(self): + tgt = [2, 3, 5] + indices = [1, 2, 4] + a = [1, 2, 3, 4, 5] + + out = np.take(a, indices) + assert_equal(out, tgt) + + pairs = [ + (np.int32, np.int32), (np.int32, np.int64), + (np.int64, np.int32), (np.int64, np.int64) + ] + for array_type, indices_type in pairs: + x = np.array([1, 2, 3, 4, 5], dtype=array_type) + ind = np.array([0, 2, 2, 3], dtype=indices_type) + tgt = np.array([1, 3, 3, 4], dtype=array_type) + out = np.take(x, ind) + assert_equal(out, tgt) + assert_equal(out.dtype, tgt.dtype) + + def test_trace(self): + c = [[1, 2], [3, 4], [5, 6]] + assert_equal(np.trace(c), 5) + + def test_transpose(self): + arr = [[1, 2], [3, 4], [5, 6]] + tgt = [[1, 3, 5], [2, 4, 6]] + assert_equal(np.transpose(arr, (1, 0)), tgt) + assert_equal(np.transpose(arr, (-1, -2)), tgt) + assert_equal(np.matrix_transpose(arr), tgt) + + def test_var(self): + A = [[1, 2, 3], [4, 5, 6]] + assert_almost_equal(np.var(A), 2.9166666666666665) + assert_almost_equal(np.var(A, 0), np.array([2.25, 2.25, 2.25])) + assert_almost_equal(np.var(A, 1), np.array([0.66666667, 0.66666667])) + + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', RuntimeWarning) + assert_(np.isnan(np.var([]))) + assert_(w[0].category is RuntimeWarning) + + B = np.array([None, 0]) + B[0] = 1j + assert_almost_equal(np.var(B), 0.25) + + def test_std_with_mean_keyword(self): + # Setting the seed to make the test reproducible + rng = np.random.RandomState(1234) + A = rng.randn(10, 20, 5) + 0.5 + + mean_out = np.zeros((10, 1, 5)) + std_out = np.zeros((10, 1, 5)) + + mean = np.mean(A, + out=mean_out, + axis=1, + keepdims=True) + + # The returned object should be the object specified during calling + assert mean_out is mean + + std = np.std(A, + out=std_out, + axis=1, + keepdims=True, + mean=mean) + + # The returned object should be the object specified during calling + assert std_out is std + + # Shape of returned mean and std should be same + assert std.shape == mean.shape + assert std.shape == (10, 1, 5) + + # Output should be the same as from the individual algorithms + std_old = np.std(A, axis=1, keepdims=True) + + assert std_old.shape == mean.shape + assert_almost_equal(std, std_old) + + def test_var_with_mean_keyword(self): + # Setting the seed to make the test reproducible + rng = np.random.RandomState(1234) + A = rng.randn(10, 20, 5) + 0.5 + + mean_out = np.zeros((10, 1, 5)) + var_out = np.zeros((10, 1, 5)) + + mean = np.mean(A, + out=mean_out, + axis=1, + keepdims=True) + + # The returned object should be the object specified during calling + assert mean_out is mean + + var = np.var(A, + out=var_out, + axis=1, + keepdims=True, + mean=mean) + + # The returned object should be the object specified during calling + assert var_out is var + + # Shape of returned mean and var should be same + assert var.shape == mean.shape + assert var.shape == (10, 1, 5) + + # Output should be the same as from the individual algorithms + var_old = np.var(A, axis=1, keepdims=True) + + assert var_old.shape == mean.shape + assert_almost_equal(var, var_old) + + def test_std_with_mean_keyword_keepdims_false(self): + rng = np.random.RandomState(1234) + A = rng.randn(10, 20, 5) + 0.5 + + mean = np.mean(A, + axis=1, + keepdims=True) + + std = np.std(A, + axis=1, + keepdims=False, + mean=mean) + + # Shape of returned mean and std should be same + assert std.shape == (10, 5) + + # Output should be the same as from the individual algorithms + std_old = np.std(A, axis=1, keepdims=False) + mean_old = np.mean(A, axis=1, keepdims=False) + + assert std_old.shape == mean_old.shape + assert_equal(std, std_old) + + def test_var_with_mean_keyword_keepdims_false(self): + rng = np.random.RandomState(1234) + A = rng.randn(10, 20, 5) + 0.5 + + mean = np.mean(A, + axis=1, + keepdims=True) + + var = np.var(A, + axis=1, + keepdims=False, + mean=mean) + + # Shape of returned mean and var should be same + assert var.shape == (10, 5) + + # Output should be the same as from the individual algorithms + var_old = np.var(A, axis=1, keepdims=False) + mean_old = np.mean(A, axis=1, keepdims=False) + + assert var_old.shape == mean_old.shape + assert_equal(var, var_old) + + def test_std_with_mean_keyword_where_nontrivial(self): + rng = np.random.RandomState(1234) + A = rng.randn(10, 20, 5) + 0.5 + + where = A > 0.5 + + mean = np.mean(A, + axis=1, + keepdims=True, + where=where) + + std = np.std(A, + axis=1, + keepdims=False, + mean=mean, + where=where) + + # Shape of returned mean and std should be same + assert std.shape == (10, 5) + + # Output should be the same as from the individual algorithms + std_old = np.std(A, axis=1, where=where) + mean_old = np.mean(A, axis=1, where=where) + + assert std_old.shape == mean_old.shape + assert_equal(std, std_old) + + def test_var_with_mean_keyword_where_nontrivial(self): + rng = np.random.RandomState(1234) + A = rng.randn(10, 20, 5) + 0.5 + + where = A > 0.5 + + mean = np.mean(A, + axis=1, + keepdims=True, + where=where) + + var = np.var(A, + axis=1, + keepdims=False, + mean=mean, + where=where) + + # Shape of returned mean and var should be same + assert var.shape == (10, 5) + + # Output should be the same as from the individual algorithms + var_old = np.var(A, axis=1, where=where) + mean_old = np.mean(A, axis=1, where=where) + + assert var_old.shape == mean_old.shape + assert_equal(var, var_old) + + def test_std_with_mean_keyword_multiple_axis(self): + # Setting the seed to make the test reproducible + rng = np.random.RandomState(1234) + A = rng.randn(10, 20, 5) + 0.5 + + axis = (0, 2) + + mean = np.mean(A, + out=None, + axis=axis, + keepdims=True) + + std = np.std(A, + out=None, + axis=axis, + keepdims=False, + mean=mean) + + # Shape of returned mean and std should be same + assert std.shape == (20,) + + # Output should be the same as from the individual algorithms + std_old = np.std(A, axis=axis, keepdims=False) + + assert_almost_equal(std, std_old) + + def test_std_with_mean_keyword_axis_None(self): + # Setting the seed to make the test reproducible + rng = np.random.RandomState(1234) + A = rng.randn(10, 20, 5) + 0.5 + + axis = None + + mean = np.mean(A, + out=None, + axis=axis, + keepdims=True) + + std = np.std(A, + out=None, + axis=axis, + keepdims=False, + mean=mean) + + # Shape of returned mean and std should be same + assert std.shape == () + + # Output should be the same as from the individual algorithms + std_old = np.std(A, axis=axis, keepdims=False) + + assert_almost_equal(std, std_old) + + def test_std_with_mean_keyword_keepdims_true_masked(self): + + A = ma.array([[2., 3., 4., 5.], + [1., 2., 3., 4.]], + mask=[[True, False, True, False], + [True, False, True, False]]) + + B = ma.array([[100., 3., 104., 5.], + [101., 2., 103., 4.]], + mask=[[True, False, True, False], + [True, False, True, False]]) + + mean_out = ma.array([[0., 0., 0., 0.]], + mask=[[False, False, False, False]]) + std_out = ma.array([[0., 0., 0., 0.]], + mask=[[False, False, False, False]]) + + axis = 0 + + mean = np.mean(A, out=mean_out, + axis=axis, keepdims=True) + + std = np.std(A, out=std_out, + axis=axis, keepdims=True, + mean=mean) + + # Shape of returned mean and std should be same + assert std.shape == mean.shape + assert std.shape == (1, 4) + + # Output should be the same as from the individual algorithms + std_old = np.std(A, axis=axis, keepdims=True) + mean_old = np.mean(A, axis=axis, keepdims=True) + + assert std_old.shape == mean_old.shape + assert_almost_equal(std, std_old) + assert_almost_equal(mean, mean_old) + + assert mean_out is mean + assert std_out is std + + # masked elements should be ignored + mean_b = np.mean(B, axis=axis, keepdims=True) + std_b = np.std(B, axis=axis, keepdims=True, mean=mean_b) + assert_almost_equal(std, std_b) + assert_almost_equal(mean, mean_b) + + def test_var_with_mean_keyword_keepdims_true_masked(self): + + A = ma.array([[2., 3., 4., 5.], + [1., 2., 3., 4.]], + mask=[[True, False, True, False], + [True, False, True, False]]) + + B = ma.array([[100., 3., 104., 5.], + [101., 2., 103., 4.]], + mask=[[True, False, True, False], + [True, False, True, False]]) + + mean_out = ma.array([[0., 0., 0., 0.]], + mask=[[False, False, False, False]]) + var_out = ma.array([[0., 0., 0., 0.]], + mask=[[False, False, False, False]]) + + axis = 0 + + mean = np.mean(A, out=mean_out, + axis=axis, keepdims=True) + + var = np.var(A, out=var_out, + axis=axis, keepdims=True, + mean=mean) + + # Shape of returned mean and var should be same + assert var.shape == mean.shape + assert var.shape == (1, 4) + + # Output should be the same as from the individual algorithms + var_old = np.var(A, axis=axis, keepdims=True) + mean_old = np.mean(A, axis=axis, keepdims=True) + + assert var_old.shape == mean_old.shape + assert_almost_equal(var, var_old) + assert_almost_equal(mean, mean_old) + + assert mean_out is mean + assert var_out is var + + # masked elements should be ignored + mean_b = np.mean(B, axis=axis, keepdims=True) + var_b = np.var(B, axis=axis, keepdims=True, mean=mean_b) + assert_almost_equal(var, var_b) + assert_almost_equal(mean, mean_b) + + +class TestIsscalar: + def test_isscalar(self): + assert_(np.isscalar(3.1)) + assert_(np.isscalar(np.int16(12345))) + assert_(np.isscalar(False)) + assert_(np.isscalar('numpy')) + assert_(not np.isscalar([3.1])) + assert_(not np.isscalar(None)) + + # PEP 3141 + from fractions import Fraction + assert_(np.isscalar(Fraction(5, 17))) + from numbers import Number + assert_(np.isscalar(Number())) + + +class TestBoolScalar: + def test_logical(self): + f = np.False_ + t = np.True_ + s = "xyz" + assert_((t and s) is s) + assert_((f and s) is f) + + def test_bitwise_or(self): + f = np.False_ + t = np.True_ + assert_((t | t) is t) + assert_((f | t) is t) + assert_((t | f) is t) + assert_((f | f) is f) + + def test_bitwise_and(self): + f = np.False_ + t = np.True_ + assert_((t & t) is t) + assert_((f & t) is f) + assert_((t & f) is f) + assert_((f & f) is f) + + def test_bitwise_xor(self): + f = np.False_ + t = np.True_ + assert_((t ^ t) is f) + assert_((f ^ t) is t) + assert_((t ^ f) is t) + assert_((f ^ f) is f) + + +class TestBoolArray: + def setup_method(self): + # offset for simd tests + self.t = np.array([True] * 41, dtype=bool)[1::] + self.f = np.array([False] * 41, dtype=bool)[1::] + self.o = np.array([False] * 42, dtype=bool)[2::] + self.nm = self.f.copy() + self.im = self.t.copy() + self.nm[3] = True + self.nm[-2] = True + self.im[3] = False + self.im[-2] = False + + def test_all_any(self): + assert_(self.t.all()) + assert_(self.t.any()) + assert_(not self.f.all()) + assert_(not self.f.any()) + assert_(self.nm.any()) + assert_(self.im.any()) + assert_(not self.nm.all()) + assert_(not self.im.all()) + # check bad element in all positions + for i in range(256 - 7): + d = np.array([False] * 256, dtype=bool)[7::] + d[i] = True + assert_(np.any(d)) + e = np.array([True] * 256, dtype=bool)[7::] + e[i] = False + assert_(not np.all(e)) + assert_array_equal(e, ~d) + # big array test for blocked libc loops + for i in list(range(9, 6000, 507)) + [7764, 90021, -10]: + d = np.array([False] * 100043, dtype=bool) + d[i] = True + assert_(np.any(d), msg=f"{i!r}") + e = np.array([True] * 100043, dtype=bool) + e[i] = False + assert_(not np.all(e), msg=f"{i!r}") + + def test_logical_not_abs(self): + assert_array_equal(~self.t, self.f) + assert_array_equal(np.abs(~self.t), self.f) + assert_array_equal(np.abs(~self.f), self.t) + assert_array_equal(np.abs(self.f), self.f) + assert_array_equal(~np.abs(self.f), self.t) + assert_array_equal(~np.abs(self.t), self.f) + assert_array_equal(np.abs(~self.nm), self.im) + np.logical_not(self.t, out=self.o) + assert_array_equal(self.o, self.f) + np.abs(self.t, out=self.o) + assert_array_equal(self.o, self.t) + + def test_logical_and_or_xor(self): + assert_array_equal(self.t | self.t, self.t) + assert_array_equal(self.f | self.f, self.f) + assert_array_equal(self.t | self.f, self.t) + assert_array_equal(self.f | self.t, self.t) + np.logical_or(self.t, self.t, out=self.o) + assert_array_equal(self.o, self.t) + assert_array_equal(self.t & self.t, self.t) + assert_array_equal(self.f & self.f, self.f) + assert_array_equal(self.t & self.f, self.f) + assert_array_equal(self.f & self.t, self.f) + np.logical_and(self.t, self.t, out=self.o) + assert_array_equal(self.o, self.t) + assert_array_equal(self.t ^ self.t, self.f) + assert_array_equal(self.f ^ self.f, self.f) + assert_array_equal(self.t ^ self.f, self.t) + assert_array_equal(self.f ^ self.t, self.t) + np.logical_xor(self.t, self.t, out=self.o) + assert_array_equal(self.o, self.f) + + assert_array_equal(self.nm & self.t, self.nm) + assert_array_equal(self.im & self.f, False) + assert_array_equal(self.nm & True, self.nm) + assert_array_equal(self.im & False, self.f) + assert_array_equal(self.nm | self.t, self.t) + assert_array_equal(self.im | self.f, self.im) + assert_array_equal(self.nm | True, self.t) + assert_array_equal(self.im | False, self.im) + assert_array_equal(self.nm ^ self.t, self.im) + assert_array_equal(self.im ^ self.f, self.im) + assert_array_equal(self.nm ^ True, self.im) + assert_array_equal(self.im ^ False, self.im) + + +class TestBoolCmp: + def setup_method(self): + self.f = np.ones(256, dtype=np.float32) + self.ef = np.ones(self.f.size, dtype=bool) + self.d = np.ones(128, dtype=np.float64) + self.ed = np.ones(self.d.size, dtype=bool) + # generate values for all permutation of 256bit simd vectors + s = 0 + for i in range(32): + self.f[s:s + 8] = [i & 2**x for x in range(8)] + self.ef[s:s + 8] = [(i & 2**x) != 0 for x in range(8)] + s += 8 + s = 0 + for i in range(16): + self.d[s:s + 4] = [i & 2**x for x in range(4)] + self.ed[s:s + 4] = [(i & 2**x) != 0 for x in range(4)] + s += 4 + + self.nf = self.f.copy() + self.nd = self.d.copy() + self.nf[self.ef] = np.nan + self.nd[self.ed] = np.nan + + self.inff = self.f.copy() + self.infd = self.d.copy() + self.inff[::3][self.ef[::3]] = np.inf + self.infd[::3][self.ed[::3]] = np.inf + self.inff[1::3][self.ef[1::3]] = -np.inf + self.infd[1::3][self.ed[1::3]] = -np.inf + self.inff[2::3][self.ef[2::3]] = np.nan + self.infd[2::3][self.ed[2::3]] = np.nan + self.efnonan = self.ef.copy() + self.efnonan[2::3] = False + self.ednonan = self.ed.copy() + self.ednonan[2::3] = False + + self.signf = self.f.copy() + self.signd = self.d.copy() + self.signf[self.ef] *= -1. + self.signd[self.ed] *= -1. + self.signf[1::6][self.ef[1::6]] = -np.inf + self.signd[1::6][self.ed[1::6]] = -np.inf + # On RISC-V, many operations that produce NaNs, such as converting + # a -NaN from f64 to f32, return a canonical NaN. The canonical + # NaNs are always positive. See section 11.3 NaN Generation and + # Propagation of the RISC-V Unprivileged ISA for more details. + # We disable the float32 sign test on riscv64 for -np.nan as the sign + # of the NaN will be lost when it's converted to a float32. + if platform.machine() != 'riscv64': + self.signf[3::6][self.ef[3::6]] = -np.nan + self.signd[3::6][self.ed[3::6]] = -np.nan + self.signf[4::6][self.ef[4::6]] = -0. + self.signd[4::6][self.ed[4::6]] = -0. + + def test_float(self): + # offset for alignment test + for i in range(4): + assert_array_equal(self.f[i:] > 0, self.ef[i:]) + assert_array_equal(self.f[i:] - 1 >= 0, self.ef[i:]) + assert_array_equal(self.f[i:] == 0, ~self.ef[i:]) + assert_array_equal(-self.f[i:] < 0, self.ef[i:]) + assert_array_equal(-self.f[i:] + 1 <= 0, self.ef[i:]) + r = self.f[i:] != 0 + assert_array_equal(r, self.ef[i:]) + r2 = self.f[i:] != np.zeros_like(self.f[i:]) + r3 = 0 != self.f[i:] + assert_array_equal(r, r2) + assert_array_equal(r, r3) + # check bool == 0x1 + assert_array_equal(r.view(np.int8), r.astype(np.int8)) + assert_array_equal(r2.view(np.int8), r2.astype(np.int8)) + assert_array_equal(r3.view(np.int8), r3.astype(np.int8)) + + # isnan on amd64 takes the same code path + assert_array_equal(np.isnan(self.nf[i:]), self.ef[i:]) + assert_array_equal(np.isfinite(self.nf[i:]), ~self.ef[i:]) + assert_array_equal(np.isfinite(self.inff[i:]), ~self.ef[i:]) + assert_array_equal(np.isinf(self.inff[i:]), self.efnonan[i:]) + assert_array_equal(np.signbit(self.signf[i:]), self.ef[i:]) + + def test_double(self): + # offset for alignment test + for i in range(2): + assert_array_equal(self.d[i:] > 0, self.ed[i:]) + assert_array_equal(self.d[i:] - 1 >= 0, self.ed[i:]) + assert_array_equal(self.d[i:] == 0, ~self.ed[i:]) + assert_array_equal(-self.d[i:] < 0, self.ed[i:]) + assert_array_equal(-self.d[i:] + 1 <= 0, self.ed[i:]) + r = self.d[i:] != 0 + assert_array_equal(r, self.ed[i:]) + r2 = self.d[i:] != np.zeros_like(self.d[i:]) + r3 = 0 != self.d[i:] + assert_array_equal(r, r2) + assert_array_equal(r, r3) + # check bool == 0x1 + assert_array_equal(r.view(np.int8), r.astype(np.int8)) + assert_array_equal(r2.view(np.int8), r2.astype(np.int8)) + assert_array_equal(r3.view(np.int8), r3.astype(np.int8)) + + # isnan on amd64 takes the same code path + assert_array_equal(np.isnan(self.nd[i:]), self.ed[i:]) + assert_array_equal(np.isfinite(self.nd[i:]), ~self.ed[i:]) + assert_array_equal(np.isfinite(self.infd[i:]), ~self.ed[i:]) + assert_array_equal(np.isinf(self.infd[i:]), self.ednonan[i:]) + assert_array_equal(np.signbit(self.signd[i:]), self.ed[i:]) + + +class TestSeterr: + def test_default(self): + err = np.geterr() + assert_equal(err, + {'divide': 'warn', + 'invalid': 'warn', + 'over': 'warn', + 'under': 'ignore'} + ) + + def test_set(self): + with np.errstate(): + err = np.seterr() + old = np.seterr(divide='print') + assert_(err == old) + new = np.seterr() + assert_(new['divide'] == 'print') + np.seterr(over='raise') + assert_(np.geterr()['over'] == 'raise') + assert_(new['divide'] == 'print') + np.seterr(**old) + assert_(np.geterr() == old) + + @pytest.mark.skipif(IS_WASM, reason="no wasm fp exception support") + @pytest.mark.skipif(platform.machine() == "armv5tel", reason="See gh-413.") + def test_divide_err(self): + with np.errstate(divide='raise'): + with assert_raises(FloatingPointError): + np.array([1.]) / np.array([0.]) + + np.seterr(divide='ignore') + np.array([1.]) / np.array([0.]) + + +class TestFloatExceptions: + def assert_raises_fpe(self, fpeerr, flop, x, y): + ftype = type(x) + try: + flop(x, y) + assert_(False, + f"Type {ftype} did not raise fpe error '{fpeerr}'.") + except FloatingPointError as exc: + assert_(str(exc).find(fpeerr) >= 0, + f"Type {ftype} raised wrong fpe error '{exc}'.") + + def assert_op_raises_fpe(self, fpeerr, flop, sc1, sc2): + # Check that fpe exception is raised. + # + # Given a floating operation `flop` and two scalar values, check that + # the operation raises the floating point exception specified by + # `fpeerr`. Tests all variants with 0-d array scalars as well. + + self.assert_raises_fpe(fpeerr, flop, sc1, sc2) + self.assert_raises_fpe(fpeerr, flop, sc1[()], sc2) + self.assert_raises_fpe(fpeerr, flop, sc1, sc2[()]) + self.assert_raises_fpe(fpeerr, flop, sc1[()], sc2[()]) + + # Test for all real and complex float types + @pytest.mark.skipif(IS_WASM, reason="no wasm fp exception support") + @pytest.mark.parametrize("typecode", np.typecodes["AllFloat"]) + def test_floating_exceptions(self, typecode): + if 'bsd' in sys.platform and typecode in 'gG': + pytest.skip(reason="Fallback impl for (c)longdouble may not raise " + "FPE errors as expected on BSD OSes, " + "see gh-24876, gh-23379") + + # Test basic arithmetic function errors + with np.errstate(all='raise'): + ftype = obj2sctype(typecode) + if np.dtype(ftype).kind == 'f': + # Get some extreme values for the type + fi = np.finfo(ftype) + ft_tiny = fi._machar.tiny + ft_max = fi.max + ft_eps = fi.eps + underflow = 'underflow' + divbyzero = 'divide by zero' + else: + # 'c', complex, corresponding real dtype + rtype = type(ftype(0).real) + fi = np.finfo(rtype) + ft_tiny = ftype(fi._machar.tiny) + ft_max = ftype(fi.max) + ft_eps = ftype(fi.eps) + # The complex types raise different exceptions + underflow = '' + divbyzero = '' + overflow = 'overflow' + invalid = 'invalid' + + # The value of tiny for double double is NaN, so we need to + # pass the assert + if not np.isnan(ft_tiny): + self.assert_raises_fpe(underflow, + lambda a, b: a / b, ft_tiny, ft_max) + self.assert_raises_fpe(underflow, + lambda a, b: a * b, ft_tiny, ft_tiny) + self.assert_raises_fpe(overflow, + lambda a, b: a * b, ft_max, ftype(2)) + self.assert_raises_fpe(overflow, + lambda a, b: a / b, ft_max, ftype(0.5)) + self.assert_raises_fpe(overflow, + lambda a, b: a + b, ft_max, ft_max * ft_eps) + self.assert_raises_fpe(overflow, + lambda a, b: a - b, -ft_max, ft_max * ft_eps) + self.assert_raises_fpe(overflow, + np.power, ftype(2), ftype(2**fi.nexp)) + self.assert_raises_fpe(divbyzero, + lambda a, b: a / b, ftype(1), ftype(0)) + self.assert_raises_fpe( + invalid, lambda a, b: a / b, ftype(np.inf), ftype(np.inf) + ) + self.assert_raises_fpe(invalid, + lambda a, b: a / b, ftype(0), ftype(0)) + self.assert_raises_fpe( + invalid, lambda a, b: a - b, ftype(np.inf), ftype(np.inf) + ) + self.assert_raises_fpe( + invalid, lambda a, b: a + b, ftype(np.inf), ftype(-np.inf) + ) + self.assert_raises_fpe(invalid, + lambda a, b: a * b, ftype(0), ftype(np.inf)) + + @pytest.mark.skipif(IS_WASM, reason="no wasm fp exception support") + def test_warnings(self): + # test warning code path + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + with np.errstate(all="warn"): + np.divide(1, 0.) + assert_equal(len(w), 1) + assert_("divide by zero" in str(w[0].message)) + np.array(1e300) * np.array(1e300) + assert_equal(len(w), 2) + assert_("overflow" in str(w[-1].message)) + np.array(np.inf) - np.array(np.inf) + assert_equal(len(w), 3) + assert_("invalid value" in str(w[-1].message)) + np.array(1e-300) * np.array(1e-300) + assert_equal(len(w), 4) + assert_("underflow" in str(w[-1].message)) + + +class TestTypes: + def check_promotion_cases(self, promote_func): + # tests that the scalars get coerced correctly. + b = np.bool(0) + i8, i16, i32, i64 = np.int8(0), np.int16(0), np.int32(0), np.int64(0) + u8, u16, u32, u64 = np.uint8(0), np.uint16(0), np.uint32(0), np.uint64(0) + f32, f64, fld = np.float32(0), np.float64(0), np.longdouble(0) + c64, c128, cld = np.complex64(0), np.complex128(0), np.clongdouble(0) + + # coercion within the same kind + assert_equal(promote_func(i8, i16), np.dtype(np.int16)) + assert_equal(promote_func(i32, i8), np.dtype(np.int32)) + assert_equal(promote_func(i16, i64), np.dtype(np.int64)) + assert_equal(promote_func(u8, u32), np.dtype(np.uint32)) + assert_equal(promote_func(f32, f64), np.dtype(np.float64)) + assert_equal(promote_func(fld, f32), np.dtype(np.longdouble)) + assert_equal(promote_func(f64, fld), np.dtype(np.longdouble)) + assert_equal(promote_func(c128, c64), np.dtype(np.complex128)) + assert_equal(promote_func(cld, c128), np.dtype(np.clongdouble)) + assert_equal(promote_func(c64, fld), np.dtype(np.clongdouble)) + + # coercion between kinds + assert_equal(promote_func(b, i32), np.dtype(np.int32)) + assert_equal(promote_func(b, u8), np.dtype(np.uint8)) + assert_equal(promote_func(i8, u8), np.dtype(np.int16)) + assert_equal(promote_func(u8, i32), np.dtype(np.int32)) + assert_equal(promote_func(i64, u32), np.dtype(np.int64)) + assert_equal(promote_func(u64, i32), np.dtype(np.float64)) + assert_equal(promote_func(i32, f32), np.dtype(np.float64)) + assert_equal(promote_func(i64, f32), np.dtype(np.float64)) + assert_equal(promote_func(f32, i16), np.dtype(np.float32)) + assert_equal(promote_func(f32, u32), np.dtype(np.float64)) + assert_equal(promote_func(f32, c64), np.dtype(np.complex64)) + assert_equal(promote_func(c128, f32), np.dtype(np.complex128)) + assert_equal(promote_func(cld, f64), np.dtype(np.clongdouble)) + + # coercion between scalars and 1-D arrays + assert_equal(promote_func(np.array([b]), i8), np.dtype(np.int8)) + assert_equal(promote_func(np.array([b]), u8), np.dtype(np.uint8)) + assert_equal(promote_func(np.array([b]), i32), np.dtype(np.int32)) + assert_equal(promote_func(np.array([b]), u32), np.dtype(np.uint32)) + assert_equal(promote_func(np.array([i8]), i64), np.dtype(np.int64)) + # unsigned and signed unfortunately tend to promote to float64: + assert_equal(promote_func(u64, np.array([i32])), np.dtype(np.float64)) + assert_equal(promote_func(i64, np.array([u32])), np.dtype(np.int64)) + assert_equal(promote_func(np.array([u16]), i32), np.dtype(np.int32)) + assert_equal(promote_func(np.int32(-1), np.array([u64])), + np.dtype(np.float64)) + assert_equal(promote_func(f64, np.array([f32])), np.dtype(np.float64)) + assert_equal(promote_func(fld, np.array([f32])), + np.dtype(np.longdouble)) + assert_equal(promote_func(np.array([f64]), fld), + np.dtype(np.longdouble)) + assert_equal(promote_func(fld, np.array([c64])), + np.dtype(np.clongdouble)) + assert_equal(promote_func(c64, np.array([f64])), + np.dtype(np.complex128)) + assert_equal(promote_func(np.complex64(3j), np.array([f64])), + np.dtype(np.complex128)) + assert_equal(promote_func(np.array([f32]), c128), + np.dtype(np.complex128)) + + # coercion between scalars and 1-D arrays, where + # the scalar has greater kind than the array + assert_equal(promote_func(np.array([b]), f64), np.dtype(np.float64)) + assert_equal(promote_func(np.array([b]), i64), np.dtype(np.int64)) + assert_equal(promote_func(np.array([b]), u64), np.dtype(np.uint64)) + assert_equal(promote_func(np.array([i8]), f64), np.dtype(np.float64)) + assert_equal(promote_func(np.array([u16]), f64), np.dtype(np.float64)) + + def test_coercion(self): + def res_type(a, b): + return np.add(a, b).dtype + + self.check_promotion_cases(res_type) + + # Use-case: float/complex scalar * bool/int8 array + # shouldn't narrow the float/complex type + for a in [np.array([True, False]), np.array([-3, 12], dtype=np.int8)]: + b = 1.234 * a + assert_equal(b.dtype, np.dtype('f8'), f"array type {a.dtype}") + b = np.longdouble(1.234) * a + assert_equal(b.dtype, np.dtype(np.longdouble), + f"array type {a.dtype}") + b = np.float64(1.234) * a + assert_equal(b.dtype, np.dtype('f8'), f"array type {a.dtype}") + b = np.float32(1.234) * a + assert_equal(b.dtype, np.dtype('f4'), f"array type {a.dtype}") + b = np.float16(1.234) * a + assert_equal(b.dtype, np.dtype('f2'), f"array type {a.dtype}") + + b = 1.234j * a + assert_equal(b.dtype, np.dtype('c16'), f"array type {a.dtype}") + b = np.clongdouble(1.234j) * a + assert_equal(b.dtype, np.dtype(np.clongdouble), + f"array type {a.dtype}") + b = np.complex128(1.234j) * a + assert_equal(b.dtype, np.dtype('c16'), f"array type {a.dtype}") + b = np.complex64(1.234j) * a + assert_equal(b.dtype, np.dtype('c8'), f"array type {a.dtype}") + + # The following use-case is problematic, and to resolve its + # tricky side-effects requires more changes. + # + # Use-case: (1-t)*a, where 't' is a boolean array and 'a' is + # a float32, shouldn't promote to float64 + # + # a = np.array([1.0, 1.5], dtype=np.float32) + # t = np.array([True, False]) + # b = t*a + # assert_equal(b, [1.0, 0.0]) + # assert_equal(b.dtype, np.dtype('f4')) + # b = (1-t)*a + # assert_equal(b, [0.0, 1.5]) + # assert_equal(b.dtype, np.dtype('f4')) + # + # Probably ~t (bitwise negation) is more proper to use here, + # but this is arguably less intuitive to understand at a glance, and + # would fail if 't' is actually an integer array instead of boolean: + # + # b = (~t)*a + # assert_equal(b, [0.0, 1.5]) + # assert_equal(b.dtype, np.dtype('f4')) + + def test_result_type(self): + self.check_promotion_cases(np.result_type) + assert_(np.result_type(None) == np.dtype(None)) + + def test_promote_types_endian(self): + # promote_types should always return native-endian types + assert_equal(np.promote_types('i8', '>i8'), np.dtype('i8')) + + assert_equal(np.promote_types('>i8', '>U16'), np.dtype('U21')) + assert_equal(np.promote_types('U16', '>i8'), np.dtype('U21')) + assert_equal(np.promote_types('S5', '>U8'), np.dtype('U8')) + assert_equal(np.promote_types('U8', '>S5'), np.dtype('U8')) + assert_equal(np.promote_types('U8', '>U5'), np.dtype('U8')) + + assert_equal(np.promote_types('M8', '>M8'), np.dtype('M8')) + assert_equal(np.promote_types('m8', '>m8'), np.dtype('m8')) + + def test_can_cast_and_promote_usertypes(self): + # The rational type defines safe casting for signed integers, + # boolean. Rational itself *does* cast safely to double. + # (rational does not actually cast to all signed integers, e.g. + # int64 can be both long and longlong and it registers only the first) + valid_types = ["int8", "int16", "int32", "int64", "bool"] + invalid_types = "BHILQP" + "FDG" + "mM" + "f" + "V" + + rational_dt = np.dtype(rational) + for numpy_dtype in valid_types: + numpy_dtype = np.dtype(numpy_dtype) + assert np.can_cast(numpy_dtype, rational_dt) + assert np.promote_types(numpy_dtype, rational_dt) is rational_dt + + for numpy_dtype in invalid_types: + numpy_dtype = np.dtype(numpy_dtype) + assert not np.can_cast(numpy_dtype, rational_dt) + with pytest.raises(TypeError): + np.promote_types(numpy_dtype, rational_dt) + + double_dt = np.dtype("double") + assert np.can_cast(rational_dt, double_dt) + assert np.promote_types(double_dt, rational_dt) is double_dt + + @pytest.mark.parametrize("swap", ["", "swap"]) + @pytest.mark.parametrize("string_dtype", ["U", "S"]) + def test_promote_types_strings(self, swap, string_dtype): + if swap == "swap": + promote_types = lambda a, b: np.promote_types(b, a) + else: + promote_types = np.promote_types + + S = string_dtype + + # Promote numeric with unsized string: + assert_equal(promote_types('bool', S), np.dtype(S + '5')) + assert_equal(promote_types('b', S), np.dtype(S + '4')) + assert_equal(promote_types('u1', S), np.dtype(S + '3')) + assert_equal(promote_types('u2', S), np.dtype(S + '5')) + assert_equal(promote_types('u4', S), np.dtype(S + '10')) + assert_equal(promote_types('u8', S), np.dtype(S + '20')) + assert_equal(promote_types('i1', S), np.dtype(S + '4')) + assert_equal(promote_types('i2', S), np.dtype(S + '6')) + assert_equal(promote_types('i4', S), np.dtype(S + '11')) + assert_equal(promote_types('i8', S), np.dtype(S + '21')) + # Promote numeric with sized string: + assert_equal(promote_types('bool', S + '1'), np.dtype(S + '5')) + assert_equal(promote_types('bool', S + '30'), np.dtype(S + '30')) + assert_equal(promote_types('b', S + '1'), np.dtype(S + '4')) + assert_equal(promote_types('b', S + '30'), np.dtype(S + '30')) + assert_equal(promote_types('u1', S + '1'), np.dtype(S + '3')) + assert_equal(promote_types('u1', S + '30'), np.dtype(S + '30')) + assert_equal(promote_types('u2', S + '1'), np.dtype(S + '5')) + assert_equal(promote_types('u2', S + '30'), np.dtype(S + '30')) + assert_equal(promote_types('u4', S + '1'), np.dtype(S + '10')) + assert_equal(promote_types('u4', S + '30'), np.dtype(S + '30')) + assert_equal(promote_types('u8', S + '1'), np.dtype(S + '20')) + assert_equal(promote_types('u8', S + '30'), np.dtype(S + '30')) + # Promote with object: + assert_equal(promote_types('O', S + '30'), np.dtype('O')) + + @pytest.mark.parametrize(["dtype1", "dtype2"], + [[np.dtype("V6"), np.dtype("V10")], # mismatch shape + # Mismatching names: + [np.dtype([("name1", "i8")]), np.dtype([("name2", "i8")])], + ]) + def test_invalid_void_promotion(self, dtype1, dtype2): + with pytest.raises(TypeError): + np.promote_types(dtype1, dtype2) + + @pytest.mark.parametrize(["dtype1", "dtype2"], + [[np.dtype("V10"), np.dtype("V10")], + [np.dtype([("name1", "i8")]), + np.dtype([("name1", np.dtype("i8").newbyteorder())])], + [np.dtype("i8,i8"), np.dtype("i8,>i8")], + [np.dtype("i8,i8"), np.dtype("i4,i4")], + ]) + def test_valid_void_promotion(self, dtype1, dtype2): + assert np.promote_types(dtype1, dtype2) == dtype1 + + @pytest.mark.parametrize("dtype", + list(np.typecodes["All"]) + + ["i,i", "10i", "S3", "S100", "U3", "U100", rational]) + def test_promote_identical_types_metadata(self, dtype): + # The same type passed in twice to promote types always + # preserves metadata + metadata = {1: 1} + dtype = np.dtype(dtype, metadata=metadata) + + res = np.promote_types(dtype, dtype) + assert res.metadata == dtype.metadata + + # byte-swapping preserves and makes the dtype native: + dtype = dtype.newbyteorder() + if dtype.isnative: + # The type does not have byte swapping + return + + res = np.promote_types(dtype, dtype) + + # Metadata is (currently) generally lost on byte-swapping (except for + # unicode. + if dtype.char != "U": + assert res.metadata is None + else: + assert res.metadata == metadata + assert res.isnative + + @pytest.mark.slow + @pytest.mark.filterwarnings('ignore:Promotion of numbers:FutureWarning') + @pytest.mark.parametrize(["dtype1", "dtype2"], + itertools.product( + list(np.typecodes["All"]) + + ["i,i", "S3", "S100", "U3", "U100", rational], + repeat=2)) + def test_promote_types_metadata(self, dtype1, dtype2): + """Metadata handling in promotion does not appear formalized + right now in NumPy. This test should thus be considered to + document behaviour, rather than test the correct definition of it. + + This test is very ugly, it was useful for rewriting part of the + promotion, but probably should eventually be replaced/deleted + (i.e. when metadata handling in promotion is better defined). + """ + metadata1 = {1: 1} + metadata2 = {2: 2} + dtype1 = np.dtype(dtype1, metadata=metadata1) + dtype2 = np.dtype(dtype2, metadata=metadata2) + + try: + res = np.promote_types(dtype1, dtype2) + except TypeError: + # Promotion failed, this test only checks metadata + return + + if res.char not in "USV" or res.names is not None or res.shape != (): + # All except string dtypes (and unstructured void) lose metadata + # on promotion (unless both dtypes are identical). + # At some point structured ones did not, but were restrictive. + assert res.metadata is None + elif res == dtype1: + # If one result is the result, it is usually returned unchanged: + assert res is dtype1 + elif res == dtype2: + # dtype1 may have been cast to the same type/kind as dtype2. + # If the resulting dtype is identical we currently pick the cast + # version of dtype1, which lost the metadata: + if np.promote_types(dtype1, dtype2.kind) == dtype2: + res.metadata is None + else: + res.metadata == metadata2 + else: + assert res.metadata is None + + # Try again for byteswapped version + dtype1 = dtype1.newbyteorder() + assert dtype1.metadata == metadata1 + res_bs = np.promote_types(dtype1, dtype2) + assert res_bs == res + assert res_bs.metadata == res.metadata + + def test_can_cast(self): + assert_(np.can_cast(np.int32, np.int64)) + assert_(np.can_cast(np.float64, complex)) + assert_(not np.can_cast(complex, float)) + + assert_(np.can_cast('i8', 'f8')) + assert_(not np.can_cast('i8', 'f4')) + assert_(np.can_cast('i4', 'S11')) + + assert_(np.can_cast('i8', 'i8', 'no')) + assert_(not np.can_cast('i8', 'no')) + + assert_(np.can_cast('i8', 'equiv')) + assert_(not np.can_cast('i8', 'equiv')) + + assert_(np.can_cast('i8', 'safe')) + assert_(not np.can_cast('i4', 'safe')) + + assert_(np.can_cast('i4', 'same_kind')) + assert_(not np.can_cast('u4', 'same_kind')) + + assert_(np.can_cast('u4', 'unsafe')) + + assert_(np.can_cast('bool', 'S5')) + assert_(not np.can_cast('bool', 'S4')) + + assert_(np.can_cast('b', 'S4')) + assert_(not np.can_cast('b', 'S3')) + + assert_(np.can_cast('u1', 'S3')) + assert_(not np.can_cast('u1', 'S2')) + assert_(np.can_cast('u2', 'S5')) + assert_(not np.can_cast('u2', 'S4')) + assert_(np.can_cast('u4', 'S10')) + assert_(not np.can_cast('u4', 'S9')) + assert_(np.can_cast('u8', 'S20')) + assert_(not np.can_cast('u8', 'S19')) + + assert_(np.can_cast('i1', 'S4')) + assert_(not np.can_cast('i1', 'S3')) + assert_(np.can_cast('i2', 'S6')) + assert_(not np.can_cast('i2', 'S5')) + assert_(np.can_cast('i4', 'S11')) + assert_(not np.can_cast('i4', 'S10')) + assert_(np.can_cast('i8', 'S21')) + assert_(not np.can_cast('i8', 'S20')) + + assert_(np.can_cast('bool', 'S5')) + assert_(not np.can_cast('bool', 'S4')) + + assert_(np.can_cast('b', 'U4')) + assert_(not np.can_cast('b', 'U3')) + + assert_(np.can_cast('u1', 'U3')) + assert_(not np.can_cast('u1', 'U2')) + assert_(np.can_cast('u2', 'U5')) + assert_(not np.can_cast('u2', 'U4')) + assert_(np.can_cast('u4', 'U10')) + assert_(not np.can_cast('u4', 'U9')) + assert_(np.can_cast('u8', 'U20')) + assert_(not np.can_cast('u8', 'U19')) + + assert_(np.can_cast('i1', 'U4')) + assert_(not np.can_cast('i1', 'U3')) + assert_(np.can_cast('i2', 'U6')) + assert_(not np.can_cast('i2', 'U5')) + assert_(np.can_cast('i4', 'U11')) + assert_(not np.can_cast('i4', 'U10')) + assert_(np.can_cast('i8', 'U21')) + assert_(not np.can_cast('i8', 'U20')) + + assert_raises(TypeError, np.can_cast, 'i4', None) + assert_raises(TypeError, np.can_cast, None, 'i4') + + # Also test keyword arguments + assert_(np.can_cast(from_=np.int32, to=np.int64)) + + def test_can_cast_simple_to_structured(self): + # Non-structured can only be cast to structured in 'unsafe' mode. + assert_(not np.can_cast('i4', 'i4,i4')) + assert_(not np.can_cast('i4', 'i4,i2')) + assert_(np.can_cast('i4', 'i4,i4', casting='unsafe')) + assert_(np.can_cast('i4', 'i4,i2', casting='unsafe')) + # Even if there is just a single field which is OK. + assert_(not np.can_cast('i2', [('f1', 'i4')])) + assert_(not np.can_cast('i2', [('f1', 'i4')], casting='same_kind')) + assert_(np.can_cast('i2', [('f1', 'i4')], casting='unsafe')) + # It should be the same for recursive structured or subarrays. + assert_(not np.can_cast('i2', [('f1', 'i4,i4')])) + assert_(np.can_cast('i2', [('f1', 'i4,i4')], casting='unsafe')) + assert_(not np.can_cast('i2', [('f1', '(2,3)i4')])) + assert_(np.can_cast('i2', [('f1', '(2,3)i4')], casting='unsafe')) + + def test_can_cast_structured_to_simple(self): + # Need unsafe casting for structured to simple. + assert_(not np.can_cast([('f1', 'i4')], 'i4')) + assert_(np.can_cast([('f1', 'i4')], 'i4', casting='unsafe')) + assert_(np.can_cast([('f1', 'i4')], 'i2', casting='unsafe')) + # Since it is unclear what is being cast, multiple fields to + # single should not work even for unsafe casting. + assert_(not np.can_cast('i4,i4', 'i4', casting='unsafe')) + # But a single field inside a single field is OK. + assert_(not np.can_cast([('f1', [('x', 'i4')])], 'i4')) + assert_(np.can_cast([('f1', [('x', 'i4')])], 'i4', casting='unsafe')) + # And a subarray is fine too - it will just take the first element + # (arguably not very consistently; might also take the first field). + assert_(not np.can_cast([('f0', '(3,)i4')], 'i4')) + assert_(np.can_cast([('f0', '(3,)i4')], 'i4', casting='unsafe')) + # But a structured subarray with multiple fields should fail. + assert_(not np.can_cast([('f0', ('i4,i4'), (2,))], 'i4', + casting='unsafe')) + + def test_can_cast_values(self): + # With NumPy 2 and NEP 50, can_cast errors on Python scalars. We could + # define this as (usually safe) at some point, and already do so + # in `copyto` and ufuncs (but there an error is raised if the integer + # is out of bounds and a warning for out-of-bound floats). + # Raises even for unsafe, previously checked within range (for floats + # that was approximately whether it would overflow to inf). + with pytest.raises(TypeError): + np.can_cast(4, "int8", casting="unsafe") + + with pytest.raises(TypeError): + np.can_cast(4.0, "float64", casting="unsafe") + + with pytest.raises(TypeError): + np.can_cast(4j, "complex128", casting="unsafe") + + @pytest.mark.parametrize("dtype", + list("?bhilqBHILQefdgFDG") + [rational]) + def test_can_cast_scalars(self, dtype): + # Basic test to ensure that scalars are supported in can-cast + # (does not check behavior exhaustively). + dtype = np.dtype(dtype) + scalar = dtype.type(0) + + assert np.can_cast(scalar, "int64") == np.can_cast(dtype, "int64") + assert np.can_cast(scalar, "float32", casting="unsafe") + + +# Custom exception class to test exception propagation in fromiter +class NIterError(Exception): + pass + + +class TestFromiter: + def makegen(self): + return (x**2 for x in range(24)) + + def test_types(self): + ai32 = np.fromiter(self.makegen(), np.int32) + ai64 = np.fromiter(self.makegen(), np.int64) + af = np.fromiter(self.makegen(), float) + assert_(ai32.dtype == np.dtype(np.int32)) + assert_(ai64.dtype == np.dtype(np.int64)) + assert_(af.dtype == np.dtype(float)) + + def test_lengths(self): + expected = np.array(list(self.makegen())) + a = np.fromiter(self.makegen(), int) + a20 = np.fromiter(self.makegen(), int, 20) + assert_(len(a) == len(expected)) + assert_(len(a20) == 20) + assert_raises(ValueError, np.fromiter, + self.makegen(), int, len(expected) + 10) + + def test_values(self): + expected = np.array(list(self.makegen())) + a = np.fromiter(self.makegen(), int) + a20 = np.fromiter(self.makegen(), int, 20) + assert_(np.all(a == expected, axis=0)) + assert_(np.all(a20 == expected[:20], axis=0)) + + def load_data(self, n, eindex): + # Utility method for the issue 2592 tests. + # Raise an exception at the desired index in the iterator. + for e in range(n): + if e == eindex: + raise NIterError(f'error at index {eindex}') + yield e + + @pytest.mark.parametrize("dtype", [int, object]) + @pytest.mark.parametrize(["count", "error_index"], [(10, 5), (10, 9)]) + def test_2592(self, count, error_index, dtype): + # Test iteration exceptions are correctly raised. The data/generator + # has `count` elements but errors at `error_index` + iterable = self.load_data(count, error_index) + with pytest.raises(NIterError): + np.fromiter(iterable, dtype=dtype, count=count) + + @pytest.mark.parametrize("dtype", ["S", "S0", "V0", "U0"]) + def test_empty_not_structured(self, dtype): + # Note, "S0" could be allowed at some point, so long "S" (without + # any length) is rejected. + with pytest.raises(ValueError, match="Must specify length"): + np.fromiter([], dtype=dtype) + + @pytest.mark.parametrize(["dtype", "data"], + [("d", [1, 2, 3, 4, 5, 6, 7, 8, 9]), + ("O", [1, 2, 3, 4, 5, 6, 7, 8, 9]), + ("i,O", [(1, 2), (5, 4), (2, 3), (9, 8), (6, 7)]), + # subarray dtypes (important because their dimensions end up + # in the result arrays dimension: + ("2i", [(1, 2), (5, 4), (2, 3), (9, 8), (6, 7)]), + (np.dtype(("O", (2, 3))), + [((1, 2, 3), (3, 4, 5)), ((3, 2, 1), (5, 4, 3))])]) + @pytest.mark.parametrize("length_hint", [0, 1]) + def test_growth_and_complicated_dtypes(self, dtype, data, length_hint): + dtype = np.dtype(dtype) + + data = data * 100 # make sure we realloc a bit + + class MyIter: + # Class/example from gh-15789 + def __length_hint__(self): + # only required to be an estimate, this is legal + return length_hint # 0 or 1 + + def __iter__(self): + return iter(data) + + res = np.fromiter(MyIter(), dtype=dtype) + expected = np.array(data, dtype=dtype) + + assert_array_equal(res, expected) + + def test_empty_result(self): + class MyIter: + def __length_hint__(self): + return 10 + + def __iter__(self): + return iter([]) # actual iterator is empty. + + res = np.fromiter(MyIter(), dtype="d") + assert res.shape == (0,) + assert res.dtype == "d" + + def test_too_few_items(self): + msg = "iterator too short: Expected 10 but iterator had only 3 items." + with pytest.raises(ValueError, match=msg): + np.fromiter([1, 2, 3], count=10, dtype=int) + + def test_failed_itemsetting(self): + with pytest.raises(TypeError): + np.fromiter([1, None, 3], dtype=int) + + # The following manages to hit somewhat trickier code paths: + iterable = ((2, 3, 4) for i in range(5)) + with pytest.raises(ValueError): + np.fromiter(iterable, dtype=np.dtype((int, 2))) + +class TestNonzero: + def test_nonzero_trivial(self): + assert_equal(np.count_nonzero(np.array([])), 0) + assert_equal(np.count_nonzero(np.array([], dtype='?')), 0) + assert_equal(np.nonzero(np.array([])), ([],)) + + assert_equal(np.count_nonzero(np.array([0])), 0) + assert_equal(np.count_nonzero(np.array([0], dtype='?')), 0) + assert_equal(np.nonzero(np.array([0])), ([],)) + + assert_equal(np.count_nonzero(np.array([1])), 1) + assert_equal(np.count_nonzero(np.array([1], dtype='?')), 1) + assert_equal(np.nonzero(np.array([1])), ([0],)) + + def test_nonzero_zerodim(self): + err_msg = "Calling nonzero on 0d arrays is not allowed" + with assert_raises_regex(ValueError, err_msg): + np.nonzero(np.array(0)) + with assert_raises_regex(ValueError, err_msg): + np.array(1).nonzero() + + def test_nonzero_onedim(self): + x = np.array([1, 0, 2, -1, 0, 0, 8]) + assert_equal(np.count_nonzero(x), 4) + assert_equal(np.count_nonzero(x), 4) + assert_equal(np.nonzero(x), ([0, 2, 3, 6],)) + + # x = np.array([(1, 2), (0, 0), (1, 1), (-1, 3), (0, 7)], + # dtype=[('a', 'i4'), ('b', 'i2')]) + x = np.array([(1, 2, -5, -3), (0, 0, 2, 7), (1, 1, 0, 1), (-1, 3, 1, 0), (0, 7, 0, 4)], + dtype=[('a', 'i4'), ('b', 'i2'), ('c', 'i1'), ('d', 'i8')]) + assert_equal(np.count_nonzero(x['a']), 3) + assert_equal(np.count_nonzero(x['b']), 4) + assert_equal(np.count_nonzero(x['c']), 3) + assert_equal(np.count_nonzero(x['d']), 4) + assert_equal(np.nonzero(x['a']), ([0, 2, 3],)) + assert_equal(np.nonzero(x['b']), ([0, 2, 3, 4],)) + + def test_nonzero_twodim(self): + x = np.array([[0, 1, 0], [2, 0, 3]]) + assert_equal(np.count_nonzero(x.astype('i1')), 3) + assert_equal(np.count_nonzero(x.astype('i2')), 3) + assert_equal(np.count_nonzero(x.astype('i4')), 3) + assert_equal(np.count_nonzero(x.astype('i8')), 3) + assert_equal(np.nonzero(x), ([0, 1, 1], [1, 0, 2])) + + x = np.eye(3) + assert_equal(np.count_nonzero(x.astype('i1')), 3) + assert_equal(np.count_nonzero(x.astype('i2')), 3) + assert_equal(np.count_nonzero(x.astype('i4')), 3) + assert_equal(np.count_nonzero(x.astype('i8')), 3) + assert_equal(np.nonzero(x), ([0, 1, 2], [0, 1, 2])) + + x = np.array([[(0, 1), (0, 0), (1, 11)], + [(1, 1), (1, 0), (0, 0)], + [(0, 0), (1, 5), (0, 1)]], dtype=[('a', 'f4'), ('b', 'u1')]) + assert_equal(np.count_nonzero(x['a']), 4) + assert_equal(np.count_nonzero(x['b']), 5) + assert_equal(np.nonzero(x['a']), ([0, 1, 1, 2], [2, 0, 1, 1])) + assert_equal(np.nonzero(x['b']), ([0, 0, 1, 2, 2], [0, 2, 0, 1, 2])) + + assert_(not x['a'].T.flags.aligned) + assert_equal(np.count_nonzero(x['a'].T), 4) + assert_equal(np.count_nonzero(x['b'].T), 5) + assert_equal(np.nonzero(x['a'].T), ([0, 1, 1, 2], [1, 1, 2, 0])) + assert_equal(np.nonzero(x['b'].T), ([0, 0, 1, 2, 2], [0, 1, 2, 0, 2])) + + def test_sparse(self): + # test special sparse condition boolean code path + for i in range(20): + c = np.zeros(200, dtype=bool) + c[i::20] = True + assert_equal(np.nonzero(c)[0], np.arange(i, 200 + i, 20)) + + c = np.zeros(400, dtype=bool) + c[10 + i:20 + i] = True + c[20 + i * 2] = True + assert_equal(np.nonzero(c)[0], + np.concatenate((np.arange(10 + i, 20 + i), [20 + i * 2]))) + + @pytest.mark.parametrize('dtype', [np.float32, np.float64]) + def test_nonzero_float_dtypes(self, dtype): + rng = np.random.default_rng(seed=10) + x = ((2**33) * rng.normal(size=100)).astype(dtype) + x[rng.choice(50, size=100)] = 0 + idxs = np.nonzero(x)[0] + assert_equal(np.array_equal(np.where(x != 0)[0], idxs), True) + + @pytest.mark.parametrize('dtype', [bool, np.int8, np.int16, np.int32, np.int64, + np.uint8, np.uint16, np.uint32, np.uint64]) + def test_nonzero_integer_dtypes(self, dtype): + rng = np.random.default_rng(seed=10) + x = rng.integers(0, 255, size=100).astype(dtype) + x[rng.choice(50, size=100)] = 0 + idxs = np.nonzero(x)[0] + assert_equal(np.array_equal(np.where(x != 0)[0], idxs), True) + + def test_return_type(self): + class C(np.ndarray): + pass + + for view in (C, np.ndarray): + for nd in range(1, 4): + shape = tuple(range(2, 2 + nd)) + x = np.arange(np.prod(shape)).reshape(shape).view(view) + for nzx in (np.nonzero(x), x.nonzero()): + for nzx_i in nzx: + assert_(type(nzx_i) is np.ndarray) + assert_(nzx_i.flags.writeable) + + def test_count_nonzero_axis(self): + # Basic check of functionality + m = np.array([[0, 1, 7, 0, 0], [3, 0, 0, 2, 19]]) + + expected = np.array([1, 1, 1, 1, 1]) + assert_equal(np.count_nonzero(m, axis=0), expected) + + expected = np.array([2, 3]) + assert_equal(np.count_nonzero(m, axis=1), expected) + + assert_raises(ValueError, np.count_nonzero, m, axis=(1, 1)) + assert_raises(TypeError, np.count_nonzero, m, axis='foo') + assert_raises(AxisError, np.count_nonzero, m, axis=3) + assert_raises(TypeError, np.count_nonzero, + m, axis=np.array([[1], [2]])) + + def test_count_nonzero_axis_all_dtypes(self): + # More thorough test that the axis argument is respected + # for all dtypes and responds correctly when presented with + # either integer or tuple arguments for axis + msg = "Mismatch for dtype: %s" + + def assert_equal_w_dt(a, b, err_msg): + assert_equal(a.dtype, b.dtype, err_msg=err_msg) + assert_equal(a, b, err_msg=err_msg) + + for dt in np.typecodes['All']: + err_msg = msg % (np.dtype(dt).name,) + + if dt != 'V': + if dt != 'M': + m = np.zeros((3, 3), dtype=dt) + n = np.ones(1, dtype=dt) + + m[0, 0] = n[0] + m[1, 0] = n[0] + + else: # np.zeros doesn't work for np.datetime64 + m = np.array(['1970-01-01'] * 9) + m = m.reshape((3, 3)) + + m[0, 0] = '1970-01-12' + m[1, 0] = '1970-01-12' + m = m.astype(dt) + + expected = np.array([2, 0, 0], dtype=np.intp) + assert_equal_w_dt(np.count_nonzero(m, axis=0), + expected, err_msg=err_msg) + + expected = np.array([1, 1, 0], dtype=np.intp) + assert_equal_w_dt(np.count_nonzero(m, axis=1), + expected, err_msg=err_msg) + + expected = np.array(2) + assert_equal(np.count_nonzero(m, axis=(0, 1)), + expected, err_msg=err_msg) + assert_equal(np.count_nonzero(m, axis=None), + expected, err_msg=err_msg) + assert_equal(np.count_nonzero(m), + expected, err_msg=err_msg) + + if dt == 'V': + # There are no 'nonzero' objects for np.void, so the testing + # setup is slightly different for this dtype + m = np.array([np.void(1)] * 6).reshape((2, 3)) + + expected = np.array([0, 0, 0], dtype=np.intp) + assert_equal_w_dt(np.count_nonzero(m, axis=0), + expected, err_msg=err_msg) + + expected = np.array([0, 0], dtype=np.intp) + assert_equal_w_dt(np.count_nonzero(m, axis=1), + expected, err_msg=err_msg) + + expected = np.array(0) + assert_equal(np.count_nonzero(m, axis=(0, 1)), + expected, err_msg=err_msg) + assert_equal(np.count_nonzero(m, axis=None), + expected, err_msg=err_msg) + assert_equal(np.count_nonzero(m), + expected, err_msg=err_msg) + + def test_count_nonzero_axis_consistent(self): + # Check that the axis behaviour for valid axes in + # non-special cases is consistent (and therefore + # correct) by checking it against an integer array + # that is then casted to the generic object dtype + from itertools import combinations, permutations + + axis = (0, 1, 2, 3) + size = (5, 5, 5, 5) + msg = "Mismatch for axis: %s" + + rng = np.random.RandomState(1234) + m = rng.randint(-100, 100, size=size) + n = m.astype(object) + + for length in range(len(axis)): + for combo in combinations(axis, length): + for perm in permutations(combo): + assert_equal( + np.count_nonzero(m, axis=perm), + np.count_nonzero(n, axis=perm), + err_msg=msg % (perm,)) + + def test_countnonzero_axis_empty(self): + a = np.array([[0, 0, 1], [1, 0, 1]]) + assert_equal(np.count_nonzero(a, axis=()), a.astype(bool)) + + def test_countnonzero_keepdims(self): + a = np.array([[0, 0, 1, 0], + [0, 3, 5, 0], + [7, 9, 2, 0]]) + assert_equal(np.count_nonzero(a, axis=0, keepdims=True), + [[1, 2, 3, 0]]) + assert_equal(np.count_nonzero(a, axis=1, keepdims=True), + [[1], [2], [3]]) + assert_equal(np.count_nonzero(a, keepdims=True), + [[6]]) + + def test_array_method(self): + # Tests that the array method + # call to nonzero works + m = np.array([[1, 0, 0], [4, 0, 6]]) + tgt = [[0, 1, 1], [0, 0, 2]] + + assert_equal(m.nonzero(), tgt) + + def test_nonzero_invalid_object(self): + # gh-9295 + a = np.array([np.array([1, 2]), 3], dtype=object) + assert_raises(ValueError, np.nonzero, a) + + class BoolErrors: + def __bool__(self): + raise ValueError("Not allowed") + + assert_raises(ValueError, np.nonzero, np.array([BoolErrors()])) + + def test_nonzero_sideeffect_safety(self): + # gh-13631 + class FalseThenTrue: + _val = False + + def __bool__(self): + try: + return self._val + finally: + self._val = True + + class TrueThenFalse: + _val = True + + def __bool__(self): + try: + return self._val + finally: + self._val = False + + # result grows on the second pass + a = np.array([True, FalseThenTrue()]) + assert_raises(RuntimeError, np.nonzero, a) + + a = np.array([[True], [FalseThenTrue()]]) + assert_raises(RuntimeError, np.nonzero, a) + + # result shrinks on the second pass + a = np.array([False, TrueThenFalse()]) + assert_raises(RuntimeError, np.nonzero, a) + + a = np.array([[False], [TrueThenFalse()]]) + assert_raises(RuntimeError, np.nonzero, a) + + def test_nonzero_sideffects_structured_void(self): + # Checks that structured void does not mutate alignment flag of + # original array. + arr = np.zeros(5, dtype="i1,i8,i8") # `ones` may short-circuit + assert arr.flags.aligned # structs are considered "aligned" + assert not arr["f2"].flags.aligned + # make sure that nonzero/count_nonzero do not flip the flag: + np.nonzero(arr) + assert arr.flags.aligned + np.count_nonzero(arr) + assert arr.flags.aligned + + def test_nonzero_exception_safe(self): + # gh-13930 + + class ThrowsAfter: + def __init__(self, iters): + self.iters_left = iters + + def __bool__(self): + if self.iters_left == 0: + raise ValueError("called `iters` times") + + self.iters_left -= 1 + return True + + """ + Test that a ValueError is raised instead of a SystemError + + If the __bool__ function is called after the error state is set, + Python (cpython) will raise a SystemError. + """ + + # assert that an exception in first pass is handled correctly + a = np.array([ThrowsAfter(5)] * 10) + assert_raises(ValueError, np.nonzero, a) + + # raise exception in second pass for 1-dimensional loop + a = np.array([ThrowsAfter(15)] * 10) + assert_raises(ValueError, np.nonzero, a) + + # raise exception in second pass for n-dimensional loop + a = np.array([[ThrowsAfter(15)]] * 10) + assert_raises(ValueError, np.nonzero, a) + + def test_nonzero_byteorder(self): + values = [0., -0., 1, float('nan'), 0, 1, + np.float16(0), np.float16(12.3)] + expected_values = [0, 0, 1, 1, 0, 1, 0, 1] + + for value, expected in zip(values, expected_values): + A = np.array([value]) + A_byteswapped = (A.view(A.dtype.newbyteorder()).byteswap()).copy() + + assert np.count_nonzero(A) == expected + assert np.count_nonzero(A_byteswapped) == expected + + def test_count_nonzero_non_aligned_array(self): + # gh-27523 + b = np.zeros(64 + 1, dtype=np.int8)[1:] + b = b.view(int) + b[:] = np.arange(b.size) + b[::2] = 0 + assert b.flags.aligned is False + assert np.count_nonzero(b) == b.size / 2 + + b = np.zeros(64 + 1, dtype=np.float16)[1:] + b = b.view(float) + b[:] = np.arange(b.size) + b[::2] = 0 + assert b.flags.aligned is False + assert np.count_nonzero(b) == b.size / 2 + + +class TestIndex: + def test_boolean(self): + a = rand(3, 5, 8) + V = rand(5, 8) + g1 = randint(0, 5, size=15) + g2 = randint(0, 8, size=15) + V[g1, g2] = -V[g1, g2] + assert_((np.array([a[0][V > 0], a[1][V > 0], a[2][V > 0]]) == a[:, V > 0]).all()) + + def test_boolean_edgecase(self): + a = np.array([], dtype='int32') + b = np.array([], dtype='bool') + c = a[b] + assert_equal(c, []) + assert_equal(c.dtype, np.dtype('int32')) + + +class TestBinaryRepr: + def test_zero(self): + assert_equal(np.binary_repr(0), '0') + + def test_positive(self): + assert_equal(np.binary_repr(10), '1010') + assert_equal(np.binary_repr(12522), + '11000011101010') + assert_equal(np.binary_repr(10736848), + '101000111101010011010000') + + def test_negative(self): + assert_equal(np.binary_repr(-1), '-1') + assert_equal(np.binary_repr(-10), '-1010') + assert_equal(np.binary_repr(-12522), + '-11000011101010') + assert_equal(np.binary_repr(-10736848), + '-101000111101010011010000') + + def test_sufficient_width(self): + assert_equal(np.binary_repr(0, width=5), '00000') + assert_equal(np.binary_repr(10, width=7), '0001010') + assert_equal(np.binary_repr(-5, width=7), '1111011') + + def test_neg_width_boundaries(self): + # see gh-8670 + + # Ensure that the example in the issue does not + # break before proceeding to a more thorough test. + assert_equal(np.binary_repr(-128, width=8), '10000000') + + for width in range(1, 11): + num = -2**(width - 1) + exp = '1' + (width - 1) * '0' + assert_equal(np.binary_repr(num, width=width), exp) + + def test_large_neg_int64(self): + # See gh-14289. + assert_equal(np.binary_repr(np.int64(-2**62), width=64), + '11' + '0' * 62) + + +class TestBaseRepr: + def test_base3(self): + assert_equal(np.base_repr(3**5, 3), '100000') + + def test_positive(self): + assert_equal(np.base_repr(12, 10), '12') + assert_equal(np.base_repr(12, 10, 4), '000012') + assert_equal(np.base_repr(12, 4), '30') + assert_equal(np.base_repr(3731624803700888, 36), '10QR0ROFCEW') + + def test_negative(self): + assert_equal(np.base_repr(-12, 10), '-12') + assert_equal(np.base_repr(-12, 10, 4), '-000012') + assert_equal(np.base_repr(-12, 4), '-30') + + def test_base_range(self): + with assert_raises(ValueError): + np.base_repr(1, 1) + with assert_raises(ValueError): + np.base_repr(1, 37) + + def test_minimal_signed_int(self): + assert_equal(np.base_repr(np.int8(-128)), '-10000000') + + +def _test_array_equal_parametrizations(): + """ + we pre-create arrays as we sometime want to pass the same instance + and sometime not. Passing the same instances may not mean the array are + equal, especially when containing None + """ + # those are 0-d arrays, it used to be a special case + # where (e0 == e0).all() would raise + e0 = np.array(0, dtype="int") + e1 = np.array(1, dtype="float") + # x,y, nan_equal, expected_result + yield (e0, e0.copy(), None, True) + yield (e0, e0.copy(), False, True) + yield (e0, e0.copy(), True, True) + + # + yield (e1, e1.copy(), None, True) + yield (e1, e1.copy(), False, True) + yield (e1, e1.copy(), True, True) + + # Non-nanable - those cannot hold nans + a12 = np.array([1, 2]) + a12b = a12.copy() + a123 = np.array([1, 2, 3]) + a13 = np.array([1, 3]) + a34 = np.array([3, 4]) + + aS1 = np.array(["a"], dtype="S1") + aS1b = aS1.copy() + aS1u4 = np.array([("a", 1)], dtype="S1,u4") + aS1u4b = aS1u4.copy() + + yield (a12, a12b, None, True) + yield (a12, a12, None, True) + yield (a12, a123, None, False) + yield (a12, a34, None, False) + yield (a12, a13, None, False) + yield (aS1, aS1b, None, True) + yield (aS1, aS1, None, True) + + # Non-float dtype - equal_nan should have no effect, + yield (a123, a123, None, True) + yield (a123, a123, False, True) + yield (a123, a123, True, True) + yield (a123, a123.copy(), None, True) + yield (a123, a123.copy(), False, True) + yield (a123, a123.copy(), True, True) + yield (a123.astype("float"), a123.astype("float"), None, True) + yield (a123.astype("float"), a123.astype("float"), False, True) + yield (a123.astype("float"), a123.astype("float"), True, True) + + # these can hold None + b1 = np.array([1, 2, np.nan]) + b2 = np.array([1, np.nan, 2]) + b3 = np.array([1, 2, np.inf]) + b4 = np.array(np.nan) + + # instances are the same + yield (b1, b1, None, False) + yield (b1, b1, False, False) + yield (b1, b1, True, True) + + # equal but not same instance + yield (b1, b1.copy(), None, False) + yield (b1, b1.copy(), False, False) + yield (b1, b1.copy(), True, True) + + # same once stripped of Nan + yield (b1, b2, None, False) + yield (b1, b2, False, False) + yield (b1, b2, True, False) + + # nan's not conflated with inf's + yield (b1, b3, None, False) + yield (b1, b3, False, False) + yield (b1, b3, True, False) + + # all Nan + yield (b4, b4, None, False) + yield (b4, b4, False, False) + yield (b4, b4, True, True) + yield (b4, b4.copy(), None, False) + yield (b4, b4.copy(), False, False) + yield (b4, b4.copy(), True, True) + + t1 = b1.astype("timedelta64") + t2 = b2.astype("timedelta64") + + # Timedeltas are particular + yield (t1, t1, None, False) + yield (t1, t1, False, False) + yield (t1, t1, True, True) + + yield (t1, t1.copy(), None, False) + yield (t1, t1.copy(), False, False) + yield (t1, t1.copy(), True, True) + + yield (t1, t2, None, False) + yield (t1, t2, False, False) + yield (t1, t2, True, False) + + # Multi-dimensional array + md1 = np.array([[0, 1], [np.nan, 1]]) + + yield (md1, md1, None, False) + yield (md1, md1, False, False) + yield (md1, md1, True, True) + yield (md1, md1.copy(), None, False) + yield (md1, md1.copy(), False, False) + yield (md1, md1.copy(), True, True) + # both complexes are nan+nan.j but the same instance + cplx1, cplx2 = [np.array([np.nan + np.nan * 1j])] * 2 + + # only real or img are nan. + cplx3, cplx4 = np.complex64(1, np.nan), np.complex64(np.nan, 1) + + # Complex values + yield (cplx1, cplx2, None, False) + yield (cplx1, cplx2, False, False) + yield (cplx1, cplx2, True, True) + + # Complex values, 1+nan, nan+1j + yield (cplx3, cplx4, None, False) + yield (cplx3, cplx4, False, False) + yield (cplx3, cplx4, True, True) + + +class TestArrayComparisons: + @pytest.mark.parametrize( + "bx,by,equal_nan,expected", _test_array_equal_parametrizations() + ) + def test_array_equal_equal_nan(self, bx, by, equal_nan, expected): + """ + This test array_equal for a few combinations: + + - are the two inputs the same object or not (same object may not + be equal if contains NaNs) + - Whether we should consider or not, NaNs, being equal. + + """ + if equal_nan is None: + res = np.array_equal(bx, by) + else: + res = np.array_equal(bx, by, equal_nan=equal_nan) + assert_(res is expected) + assert_(type(res) is bool) + + def test_array_equal_different_scalar_types(self): + # https://github.com/numpy/numpy/issues/27271 + a = np.array("foo") + b = np.array(1) + assert not np.array_equal(a, b) + assert not np.array_equiv(a, b) + + def test_none_compares_elementwise(self): + a = np.array([None, 1, None], dtype=object) + assert_equal(a == None, [True, False, True]) # noqa: E711 + assert_equal(a != None, [False, True, False]) # noqa: E711 + + a = np.ones(3) + assert_equal(a == None, [False, False, False]) # noqa: E711 + assert_equal(a != None, [True, True, True]) # noqa: E711 + + def test_array_equiv(self): + res = np.array_equiv(np.array([1, 2]), np.array([1, 2])) + assert_(res) + assert_(type(res) is bool) + res = np.array_equiv(np.array([1, 2]), np.array([1, 2, 3])) + assert_(not res) + assert_(type(res) is bool) + res = np.array_equiv(np.array([1, 2]), np.array([3, 4])) + assert_(not res) + assert_(type(res) is bool) + res = np.array_equiv(np.array([1, 2]), np.array([1, 3])) + assert_(not res) + assert_(type(res) is bool) + + res = np.array_equiv(np.array([1, 1]), np.array([1])) + assert_(res) + assert_(type(res) is bool) + res = np.array_equiv(np.array([1, 1]), np.array([[1], [1]])) + assert_(res) + assert_(type(res) is bool) + res = np.array_equiv(np.array([1, 2]), np.array([2])) + assert_(not res) + assert_(type(res) is bool) + res = np.array_equiv(np.array([1, 2]), np.array([[1], [2]])) + assert_(not res) + assert_(type(res) is bool) + res = np.array_equiv(np.array([1, 2]), np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])) + assert_(not res) + assert_(type(res) is bool) + + @pytest.mark.parametrize("dtype", ["V0", "V3", "V10"]) + def test_compare_unstructured_voids(self, dtype): + zeros = np.zeros(3, dtype=dtype) + + assert_array_equal(zeros, zeros) + assert not (zeros != zeros).any() + + if dtype == "V0": + # Can't test != of actually different data + return + + nonzeros = np.array([b"1", b"2", b"3"], dtype=dtype) + + assert not (zeros == nonzeros).any() + assert (zeros != nonzeros).all() + + +def assert_array_strict_equal(x, y): + assert_array_equal(x, y) + # Check flags, 32 bit arches typically don't provide 16 byte alignment + if ((x.dtype.alignment <= 8 or + np.intp().dtype.itemsize != 4) and + sys.platform != 'win32'): + assert_(x.flags == y.flags) + else: + assert_(x.flags.owndata == y.flags.owndata) + assert_(x.flags.writeable == y.flags.writeable) + assert_(x.flags.c_contiguous == y.flags.c_contiguous) + assert_(x.flags.f_contiguous == y.flags.f_contiguous) + assert_(x.flags.writebackifcopy == y.flags.writebackifcopy) + # check endianness + assert_(x.dtype.isnative == y.dtype.isnative) + + +class TestClip: + def setup_method(self): + self.nr = 5 + self.nc = 3 + + def fastclip(self, a, m, M, out=None, **kwargs): + return a.clip(m, M, out=out, **kwargs) + + def clip(self, a, m, M, out=None): + # use a.choose to verify fastclip result + selector = np.less(a, m) + 2 * np.greater(a, M) + return selector.choose((a, m, M), out=out) + + # Handy functions + def _generate_data(self, n, m): + return randn(n, m) + + def _generate_data_complex(self, n, m): + return randn(n, m) + 1.j * rand(n, m) + + def _generate_flt_data(self, n, m): + return (randn(n, m)).astype(np.float32) + + def _neg_byteorder(self, a): + a = np.asarray(a) + if sys.byteorder == 'little': + a = a.astype(a.dtype.newbyteorder('>')) + else: + a = a.astype(a.dtype.newbyteorder('<')) + return a + + def _generate_non_native_data(self, n, m): + data = randn(n, m) + data = self._neg_byteorder(data) + assert_(not data.dtype.isnative) + return data + + def _generate_int_data(self, n, m): + return (10 * rand(n, m)).astype(np.int64) + + def _generate_int32_data(self, n, m): + return (10 * rand(n, m)).astype(np.int32) + + # Now the real test cases + + @pytest.mark.parametrize("dtype", '?bhilqpBHILQPefdgFDGO') + def test_ones_pathological(self, dtype): + # for preservation of behavior described in + # gh-12519; amin > amax behavior may still change + # in the future + arr = np.ones(10, dtype=dtype) + expected = np.zeros(10, dtype=dtype) + actual = np.clip(arr, 1, 0) + if dtype == 'O': + assert actual.tolist() == expected.tolist() + else: + assert_equal(actual, expected) + + def test_simple_double(self): + # Test native double input with scalar min/max. + a = self._generate_data(self.nr, self.nc) + m = 0.1 + M = 0.6 + ac = self.fastclip(a, m, M) + act = self.clip(a, m, M) + assert_array_strict_equal(ac, act) + + def test_simple_int(self): + # Test native int input with scalar min/max. + a = self._generate_int_data(self.nr, self.nc) + a = a.astype(int) + m = -2 + M = 4 + ac = self.fastclip(a, m, M) + act = self.clip(a, m, M) + assert_array_strict_equal(ac, act) + + def test_array_double(self): + # Test native double input with array min/max. + a = self._generate_data(self.nr, self.nc) + m = np.zeros(a.shape) + M = m + 0.5 + ac = self.fastclip(a, m, M) + act = self.clip(a, m, M) + assert_array_strict_equal(ac, act) + + def test_simple_nonnative(self): + # Test non native double input with scalar min/max. + # Test native double input with non native double scalar min/max. + a = self._generate_non_native_data(self.nr, self.nc) + m = -0.5 + M = 0.6 + ac = self.fastclip(a, m, M) + act = self.clip(a, m, M) + assert_array_equal(ac, act) + + # Test native double input with non native double scalar min/max. + a = self._generate_data(self.nr, self.nc) + m = -0.5 + M = self._neg_byteorder(0.6) + assert_(not M.dtype.isnative) + ac = self.fastclip(a, m, M) + act = self.clip(a, m, M) + assert_array_equal(ac, act) + + def test_simple_complex(self): + # Test native complex input with native double scalar min/max. + # Test native input with complex double scalar min/max. + a = 3 * self._generate_data_complex(self.nr, self.nc) + m = -0.5 + M = 1. + ac = self.fastclip(a, m, M) + act = self.clip(a, m, M) + assert_array_strict_equal(ac, act) + + # Test native input with complex double scalar min/max. + a = 3 * self._generate_data(self.nr, self.nc) + m = -0.5 + 1.j + M = 1. + 2.j + ac = self.fastclip(a, m, M) + act = self.clip(a, m, M) + assert_array_strict_equal(ac, act) + + def test_clip_complex(self): + # Address Issue gh-5354 for clipping complex arrays + # Test native complex input without explicit min/max + # ie, either min=None or max=None + a = np.ones(10, dtype=complex) + m = a.min() + M = a.max() + am = self.fastclip(a, m, None) + aM = self.fastclip(a, None, M) + assert_array_strict_equal(am, a) + assert_array_strict_equal(aM, a) + + def test_clip_non_contig(self): + # Test clip for non contiguous native input and native scalar min/max. + a = self._generate_data(self.nr * 2, self.nc * 3) + a = a[::2, ::3] + assert_(not a.flags['F_CONTIGUOUS']) + assert_(not a.flags['C_CONTIGUOUS']) + ac = self.fastclip(a, -1.6, 1.7) + act = self.clip(a, -1.6, 1.7) + assert_array_strict_equal(ac, act) + + def test_simple_out(self): + # Test native double input with scalar min/max. + a = self._generate_data(self.nr, self.nc) + m = -0.5 + M = 0.6 + ac = np.zeros(a.shape) + act = np.zeros(a.shape) + self.fastclip(a, m, M, ac) + self.clip(a, m, M, act) + assert_array_strict_equal(ac, act) + + @pytest.mark.parametrize("casting", [None, "unsafe"]) + def test_simple_int32_inout(self, casting): + # Test native int32 input with double min/max and int32 out. + a = self._generate_int32_data(self.nr, self.nc) + m = np.float64(0) + M = np.float64(2) + ac = np.zeros(a.shape, dtype=np.int32) + act = ac.copy() + if casting is None: + with pytest.raises(TypeError): + self.fastclip(a, m, M, ac, casting=casting) + else: + # explicitly passing "unsafe" will silence warning + self.fastclip(a, m, M, ac, casting=casting) + self.clip(a, m, M, act) + assert_array_strict_equal(ac, act) + + def test_simple_int64_out(self): + # Test native int32 input with int32 scalar min/max and int64 out. + a = self._generate_int32_data(self.nr, self.nc) + m = np.int32(-1) + M = np.int32(1) + ac = np.zeros(a.shape, dtype=np.int64) + act = ac.copy() + self.fastclip(a, m, M, ac) + self.clip(a, m, M, act) + assert_array_strict_equal(ac, act) + + def test_simple_int64_inout(self): + # Test native int32 input with double array min/max and int32 out. + a = self._generate_int32_data(self.nr, self.nc) + m = np.zeros(a.shape, np.float64) + M = np.float64(1) + ac = np.zeros(a.shape, dtype=np.int32) + act = ac.copy() + self.fastclip(a, m, M, out=ac, casting="unsafe") + self.clip(a, m, M, act) + assert_array_strict_equal(ac, act) + + def test_simple_int32_out(self): + # Test native double input with scalar min/max and int out. + a = self._generate_data(self.nr, self.nc) + m = -1.0 + M = 2.0 + ac = np.zeros(a.shape, dtype=np.int32) + act = ac.copy() + self.fastclip(a, m, M, out=ac, casting="unsafe") + self.clip(a, m, M, act) + assert_array_strict_equal(ac, act) + + def test_simple_inplace_01(self): + # Test native double input with array min/max in-place. + a = self._generate_data(self.nr, self.nc) + ac = a.copy() + m = np.zeros(a.shape) + M = 1.0 + self.fastclip(a, m, M, a) + self.clip(a, m, M, ac) + assert_array_strict_equal(a, ac) + + def test_simple_inplace_02(self): + # Test native double input with scalar min/max in-place. + a = self._generate_data(self.nr, self.nc) + ac = a.copy() + m = -0.5 + M = 0.6 + self.fastclip(a, m, M, a) + self.clip(ac, m, M, ac) + assert_array_strict_equal(a, ac) + + def test_noncontig_inplace(self): + # Test non contiguous double input with double scalar min/max in-place. + a = self._generate_data(self.nr * 2, self.nc * 3) + a = a[::2, ::3] + assert_(not a.flags['F_CONTIGUOUS']) + assert_(not a.flags['C_CONTIGUOUS']) + ac = a.copy() + m = -0.5 + M = 0.6 + self.fastclip(a, m, M, a) + self.clip(ac, m, M, ac) + assert_array_equal(a, ac) + + def test_type_cast_01(self): + # Test native double input with scalar min/max. + a = self._generate_data(self.nr, self.nc) + m = -0.5 + M = 0.6 + ac = self.fastclip(a, m, M) + act = self.clip(a, m, M) + assert_array_strict_equal(ac, act) + + def test_type_cast_02(self): + # Test native int32 input with int32 scalar min/max. + a = self._generate_int_data(self.nr, self.nc) + a = a.astype(np.int32) + m = -2 + M = 4 + ac = self.fastclip(a, m, M) + act = self.clip(a, m, M) + assert_array_strict_equal(ac, act) + + def test_type_cast_03(self): + # Test native int32 input with float64 scalar min/max. + a = self._generate_int32_data(self.nr, self.nc) + m = -2 + M = 4 + ac = self.fastclip(a, np.float64(m), np.float64(M)) + act = self.clip(a, np.float64(m), np.float64(M)) + assert_array_strict_equal(ac, act) + + def test_type_cast_04(self): + # Test native int32 input with float32 scalar min/max. + a = self._generate_int32_data(self.nr, self.nc) + m = np.float32(-2) + M = np.float32(4) + act = self.fastclip(a, m, M) + ac = self.clip(a, m, M) + assert_array_strict_equal(ac, act) + + def test_type_cast_05(self): + # Test native int32 with double arrays min/max. + a = self._generate_int_data(self.nr, self.nc) + m = -0.5 + M = 1. + ac = self.fastclip(a, m * np.zeros(a.shape), M) + act = self.clip(a, m * np.zeros(a.shape), M) + assert_array_strict_equal(ac, act) + + def test_type_cast_06(self): + # Test native with NON native scalar min/max. + a = self._generate_data(self.nr, self.nc) + m = 0.5 + m_s = self._neg_byteorder(m) + M = 1. + act = self.clip(a, m_s, M) + ac = self.fastclip(a, m_s, M) + assert_array_strict_equal(ac, act) + + def test_type_cast_07(self): + # Test NON native with native array min/max. + a = self._generate_data(self.nr, self.nc) + m = -0.5 * np.ones(a.shape) + M = 1. + a_s = self._neg_byteorder(a) + assert_(not a_s.dtype.isnative) + act = a_s.clip(m, M) + ac = self.fastclip(a_s, m, M) + assert_array_strict_equal(ac, act) + + def test_type_cast_08(self): + # Test NON native with native scalar min/max. + a = self._generate_data(self.nr, self.nc) + m = -0.5 + M = 1. + a_s = self._neg_byteorder(a) + assert_(not a_s.dtype.isnative) + ac = self.fastclip(a_s, m, M) + act = a_s.clip(m, M) + assert_array_strict_equal(ac, act) + + def test_type_cast_09(self): + # Test native with NON native array min/max. + a = self._generate_data(self.nr, self.nc) + m = -0.5 * np.ones(a.shape) + M = 1. + m_s = self._neg_byteorder(m) + assert_(not m_s.dtype.isnative) + ac = self.fastclip(a, m_s, M) + act = self.clip(a, m_s, M) + assert_array_strict_equal(ac, act) + + def test_type_cast_10(self): + # Test native int32 with float min/max and float out for output argument. + a = self._generate_int_data(self.nr, self.nc) + b = np.zeros(a.shape, dtype=np.float32) + m = np.float32(-0.5) + M = np.float32(1) + act = self.clip(a, m, M, out=b) + ac = self.fastclip(a, m, M, out=b) + assert_array_strict_equal(ac, act) + + def test_type_cast_11(self): + # Test non native with native scalar, min/max, out non native + a = self._generate_non_native_data(self.nr, self.nc) + b = a.copy() + b = b.astype(b.dtype.newbyteorder('>')) + bt = b.copy() + m = -0.5 + M = 1. + self.fastclip(a, m, M, out=b) + self.clip(a, m, M, out=bt) + assert_array_strict_equal(b, bt) + + def test_type_cast_12(self): + # Test native int32 input and min/max and float out + a = self._generate_int_data(self.nr, self.nc) + b = np.zeros(a.shape, dtype=np.float32) + m = np.int32(0) + M = np.int32(1) + act = self.clip(a, m, M, out=b) + ac = self.fastclip(a, m, M, out=b) + assert_array_strict_equal(ac, act) + + def test_clip_with_out_simple(self): + # Test native double input with scalar min/max + a = self._generate_data(self.nr, self.nc) + m = -0.5 + M = 0.6 + ac = np.zeros(a.shape) + act = np.zeros(a.shape) + self.fastclip(a, m, M, ac) + self.clip(a, m, M, act) + assert_array_strict_equal(ac, act) + + def test_clip_with_out_simple2(self): + # Test native int32 input with double min/max and int32 out + a = self._generate_int32_data(self.nr, self.nc) + m = np.float64(0) + M = np.float64(2) + ac = np.zeros(a.shape, dtype=np.int32) + act = ac.copy() + self.fastclip(a, m, M, out=ac, casting="unsafe") + self.clip(a, m, M, act) + assert_array_strict_equal(ac, act) + + def test_clip_with_out_simple_int32(self): + # Test native int32 input with int32 scalar min/max and int64 out + a = self._generate_int32_data(self.nr, self.nc) + m = np.int32(-1) + M = np.int32(1) + ac = np.zeros(a.shape, dtype=np.int64) + act = ac.copy() + self.fastclip(a, m, M, ac) + self.clip(a, m, M, act) + assert_array_strict_equal(ac, act) + + def test_clip_with_out_array_int32(self): + # Test native int32 input with double array min/max and int32 out + a = self._generate_int32_data(self.nr, self.nc) + m = np.zeros(a.shape, np.float64) + M = np.float64(1) + ac = np.zeros(a.shape, dtype=np.int32) + act = ac.copy() + self.fastclip(a, m, M, out=ac, casting="unsafe") + self.clip(a, m, M, act) + assert_array_strict_equal(ac, act) + + def test_clip_with_out_array_outint32(self): + # Test native double input with scalar min/max and int out + a = self._generate_data(self.nr, self.nc) + m = -1.0 + M = 2.0 + ac = np.zeros(a.shape, dtype=np.int32) + act = ac.copy() + self.fastclip(a, m, M, out=ac, casting="unsafe") + self.clip(a, m, M, act) + assert_array_strict_equal(ac, act) + + def test_clip_with_out_transposed(self): + # Test that the out argument works when transposed + a = np.arange(16).reshape(4, 4) + out = np.empty_like(a).T + a.clip(4, 10, out=out) + expected = self.clip(a, 4, 10) + assert_array_equal(out, expected) + + def test_clip_with_out_memory_overlap(self): + # Test that the out argument works when it has memory overlap + a = np.arange(16).reshape(4, 4) + ac = a.copy() + a[:-1].clip(4, 10, out=a[1:]) + expected = self.clip(ac[:-1], 4, 10) + assert_array_equal(a[1:], expected) + + def test_clip_inplace_array(self): + # Test native double input with array min/max + a = self._generate_data(self.nr, self.nc) + ac = a.copy() + m = np.zeros(a.shape) + M = 1.0 + self.fastclip(a, m, M, a) + self.clip(a, m, M, ac) + assert_array_strict_equal(a, ac) + + def test_clip_inplace_simple(self): + # Test native double input with scalar min/max + a = self._generate_data(self.nr, self.nc) + ac = a.copy() + m = -0.5 + M = 0.6 + self.fastclip(a, m, M, a) + self.clip(a, m, M, ac) + assert_array_strict_equal(a, ac) + + def test_clip_func_takes_out(self): + # Ensure that the clip() function takes an out=argument. + a = self._generate_data(self.nr, self.nc) + ac = a.copy() + m = -0.5 + M = 0.6 + a2 = np.clip(a, m, M, out=a) + self.clip(a, m, M, ac) + assert_array_strict_equal(a2, ac) + assert_(a2 is a) + + def test_clip_nan(self): + d = np.arange(7.) + assert_equal(d.clip(min=np.nan), np.nan) + assert_equal(d.clip(max=np.nan), np.nan) + assert_equal(d.clip(min=np.nan, max=np.nan), np.nan) + assert_equal(d.clip(min=-2, max=np.nan), np.nan) + assert_equal(d.clip(min=np.nan, max=10), np.nan) + + def test_object_clip(self): + a = np.arange(10, dtype=object) + actual = np.clip(a, 1, 5) + expected = np.array([1, 1, 2, 3, 4, 5, 5, 5, 5, 5]) + assert actual.tolist() == expected.tolist() + + def test_clip_all_none(self): + arr = np.arange(10, dtype=object) + assert_equal(np.clip(arr, None, None), arr) + assert_equal(np.clip(arr), arr) + + def test_clip_invalid_casting(self): + a = np.arange(10, dtype=object) + with assert_raises_regex(ValueError, + 'casting must be one of'): + self.fastclip(a, 1, 8, casting="garbage") + + @pytest.mark.parametrize("amin, amax", [ + # two scalars + (1, 0), + # mix scalar and array + (1, np.zeros(10)), + # two arrays + (np.ones(10), np.zeros(10)), + ]) + def test_clip_value_min_max_flip(self, amin, amax): + a = np.arange(10, dtype=np.int64) + # requirement from ufunc_docstrings.py + expected = np.minimum(np.maximum(a, amin), amax) + actual = np.clip(a, amin, amax) + assert_equal(actual, expected) + + @pytest.mark.parametrize("arr, amin, amax, exp", [ + # for a bug in npy_ObjectClip, based on a + # case produced by hypothesis + (np.zeros(10, dtype=object), + 0, + -2**64 + 1, + np.full(10, -2**64 + 1, dtype=object)), + # for bugs in NPY_TIMEDELTA_MAX, based on a case + # produced by hypothesis + (np.zeros(10, dtype='m8') - 1, + 0, + 0, + np.zeros(10, dtype='m8')), + ]) + def test_clip_problem_cases(self, arr, amin, amax, exp): + actual = np.clip(arr, amin, amax) + assert_equal(actual, exp) + + @pytest.mark.parametrize("arr, amin, amax", [ + # problematic scalar nan case from hypothesis + (np.zeros(10, dtype=np.int64), + np.array(np.nan), + np.zeros(10, dtype=np.int32)), + ]) + def test_clip_scalar_nan_propagation(self, arr, amin, amax): + # enforcement of scalar nan propagation for comparisons + # called through clip() + expected = np.minimum(np.maximum(arr, amin), amax) + actual = np.clip(arr, amin, amax) + assert_equal(actual, expected) + + @pytest.mark.xfail(reason="propagation doesn't match spec") + @pytest.mark.parametrize("arr, amin, amax", [ + (np.array([1] * 10, dtype='m8'), + np.timedelta64('NaT'), + np.zeros(10, dtype=np.int32)), + ]) + @pytest.mark.filterwarnings("ignore::DeprecationWarning") + def test_NaT_propagation(self, arr, amin, amax): + # NOTE: the expected function spec doesn't + # propagate NaT, but clip() now does + expected = np.minimum(np.maximum(arr, amin), amax) + actual = np.clip(arr, amin, amax) + assert_equal(actual, expected) + + @given( + data=st.data(), + arr=hynp.arrays( + dtype=hynp.integer_dtypes() | hynp.floating_dtypes(), + shape=hynp.array_shapes() + ) + ) + def test_clip_property(self, data, arr): + """A property-based test using Hypothesis. + + This aims for maximum generality: it could in principle generate *any* + valid inputs to np.clip, and in practice generates much more varied + inputs than human testers come up with. + + Because many of the inputs have tricky dependencies - compatible dtypes + and mutually-broadcastable shapes - we use `st.data()` strategy draw + values *inside* the test function, from strategies we construct based + on previous values. An alternative would be to define a custom strategy + with `@st.composite`, but until we have duplicated code inline is fine. + + That accounts for most of the function; the actual test is just three + lines to calculate and compare actual vs expected results! + """ + numeric_dtypes = hynp.integer_dtypes() | hynp.floating_dtypes() + # Generate shapes for the bounds which can be broadcast with each other + # and with the base shape. Below, we might decide to use scalar bounds, + # but it's clearer to generate these shapes unconditionally in advance. + in_shapes, result_shape = data.draw( + hynp.mutually_broadcastable_shapes( + num_shapes=2, base_shape=arr.shape + ) + ) + # Scalar `nan` is deprecated due to the differing behaviour it shows. + s = numeric_dtypes.flatmap( + lambda x: hynp.from_dtype(x, allow_nan=False)) + amin = data.draw(s | hynp.arrays(dtype=numeric_dtypes, + shape=in_shapes[0], elements={"allow_nan": False})) + amax = data.draw(s | hynp.arrays(dtype=numeric_dtypes, + shape=in_shapes[1], elements={"allow_nan": False})) + + # Then calculate our result and expected result and check that they're + # equal! See gh-12519 and gh-19457 for discussion deciding on this + # property and the result_type argument. + result = np.clip(arr, amin, amax) + t = np.result_type(arr, amin, amax) + expected = np.minimum(amax, np.maximum(arr, amin, dtype=t), dtype=t) + assert result.dtype == t + assert_array_equal(result, expected) + + def test_clip_min_max_args(self): + arr = np.arange(5) + + assert_array_equal(np.clip(arr), arr) + assert_array_equal(np.clip(arr, min=2, max=3), np.clip(arr, 2, 3)) + assert_array_equal(np.clip(arr, min=None, max=2), + np.clip(arr, None, 2)) + + with assert_raises_regex(TypeError, "missing 1 required positional " + "argument: 'a_max'"): + np.clip(arr, 2) + with assert_raises_regex(TypeError, "missing 1 required positional " + "argument: 'a_min'"): + np.clip(arr, a_max=2) + msg = ("Passing `min` or `max` keyword argument when `a_min` and " + "`a_max` are provided is forbidden.") + with assert_raises_regex(ValueError, msg): + np.clip(arr, 2, 3, max=3) + with assert_raises_regex(ValueError, msg): + np.clip(arr, 2, 3, min=2) + + @pytest.mark.parametrize("dtype,min,max", [ + ("int32", -2**32 - 1, 2**32), + ("int32", -2**320, None), + ("int32", None, 2**300), + ("int32", -1000, 2**32), + ("int32", -2**32 - 1, 1000), + ("uint8", -1, 129), + ]) + def test_out_of_bound_pyints(self, dtype, min, max): + a = np.arange(10000).astype(dtype) + # Check min only + c = np.clip(a, min=min, max=max) + assert not np.may_share_memory(a, c) + assert c.dtype == a.dtype + if min is not None: + assert (c >= min).all() + if max is not None: + assert (c <= max).all() + +class TestAllclose: + rtol = 1e-5 + atol = 1e-8 + + def setup_method(self): + self.olderr = np.seterr(invalid='ignore') + + def teardown_method(self): + np.seterr(**self.olderr) + + def tst_allclose(self, x, y): + assert_(np.allclose(x, y), f"{x} and {y} not close") + + def tst_not_allclose(self, x, y): + assert_(not np.allclose(x, y), f"{x} and {y} shouldn't be close") + + def test_ip_allclose(self): + # Parametric test factory. + arr = np.array([100, 1000]) + aran = np.arange(125).reshape((5, 5, 5)) + + atol = self.atol + rtol = self.rtol + + data = [([1, 0], [1, 0]), + ([atol], [0]), + ([1], [1 + rtol + atol]), + (arr, arr + arr * rtol), + (arr, arr + arr * rtol + atol * 2), + (aran, aran + aran * rtol), + (np.inf, np.inf), + (np.inf, [np.inf])] + + for (x, y) in data: + self.tst_allclose(x, y) + + def test_ip_not_allclose(self): + # Parametric test factory. + aran = np.arange(125).reshape((5, 5, 5)) + + atol = self.atol + rtol = self.rtol + + data = [([np.inf, 0], [1, np.inf]), + ([np.inf, 0], [1, 0]), + ([np.inf, np.inf], [1, np.inf]), + ([np.inf, np.inf], [1, 0]), + ([-np.inf, 0], [np.inf, 0]), + ([np.nan, 0], [np.nan, 0]), + ([atol * 2], [0]), + ([1], [1 + rtol + atol * 2]), + (aran, aran + aran * atol + atol * 2), + (np.array([np.inf, 1]), np.array([0, np.inf]))] + + for (x, y) in data: + self.tst_not_allclose(x, y) + + def test_no_parameter_modification(self): + x = np.array([np.inf, 1]) + y = np.array([0, np.inf]) + np.allclose(x, y) + assert_array_equal(x, np.array([np.inf, 1])) + assert_array_equal(y, np.array([0, np.inf])) + + def test_min_int(self): + # Could make problems because of abs(min_int) == min_int + min_int = np.iinfo(np.int_).min + a = np.array([min_int], dtype=np.int_) + assert_(np.allclose(a, a)) + + def test_equalnan(self): + x = np.array([1.0, np.nan]) + assert_(np.allclose(x, x, equal_nan=True)) + + def test_return_class_is_ndarray(self): + # Issue gh-6475 + # Check that allclose does not preserve subtypes + class Foo(np.ndarray): + def __new__(cls, *args, **kwargs): + return np.array(*args, **kwargs).view(cls) + + a = Foo([1]) + assert_(type(np.allclose(a, a)) is bool) + + +class TestIsclose: + rtol = 1e-5 + atol = 1e-8 + + def _setup(self): + atol = self.atol + rtol = self.rtol + arr = np.array([100, 1000]) + aran = np.arange(125).reshape((5, 5, 5)) + + self.all_close_tests = [ + ([1, 0], [1, 0]), + ([atol], [0]), + ([1], [1 + rtol + atol]), + (arr, arr + arr * rtol), + (arr, arr + arr * rtol + atol), + (aran, aran + aran * rtol), + (np.inf, np.inf), + (np.inf, [np.inf]), + ([np.inf, -np.inf], [np.inf, -np.inf]), + ] + self.none_close_tests = [ + ([np.inf, 0], [1, np.inf]), + ([np.inf, -np.inf], [1, 0]), + ([np.inf, np.inf], [1, -np.inf]), + ([np.inf, np.inf], [1, 0]), + ([np.nan, 0], [np.nan, -np.inf]), + ([atol * 2], [0]), + ([1], [1 + rtol + atol * 2]), + (aran, aran + rtol * 1.1 * aran + atol * 1.1), + (np.array([np.inf, 1]), np.array([0, np.inf])), + ] + self.some_close_tests = [ + ([np.inf, 0], [np.inf, atol * 2]), + ([atol, 1, 1e6 * (1 + 2 * rtol) + atol], [0, np.nan, 1e6]), + (np.arange(3), [0, 1, 2.1]), + (np.nan, [np.nan, np.nan, np.nan]), + ([0], [atol, np.inf, -np.inf, np.nan]), + (0, [atol, np.inf, -np.inf, np.nan]), + ] + self.some_close_results = [ + [True, False], + [True, False, False], + [True, True, False], + [False, False, False], + [True, False, False, False], + [True, False, False, False], + ] + + def test_ip_isclose(self): + self._setup() + tests = self.some_close_tests + results = self.some_close_results + for (x, y), result in zip(tests, results): + assert_array_equal(np.isclose(x, y), result) + + x = np.array([2.1, 2.1, 2.1, 2.1, 5, np.nan]) + y = np.array([2, 2, 2, 2, np.nan, 5]) + atol = [0.11, 0.09, 1e-8, 1e-8, 1, 1] + rtol = [1e-8, 1e-8, 0.06, 0.04, 1, 1] + expected = np.array([True, False, True, False, False, False]) + assert_array_equal(np.isclose(x, y, rtol=rtol, atol=atol), expected) + + message = "operands could not be broadcast together..." + atol = np.array([1e-8, 1e-8]) + with assert_raises(ValueError, msg=message): + np.isclose(x, y, atol=atol) + + rtol = np.array([1e-5, 1e-5]) + with assert_raises(ValueError, msg=message): + np.isclose(x, y, rtol=rtol) + + def test_nep50_isclose(self): + below_one = float(1. - np.finfo('f8').eps) + f32 = np.array(below_one, 'f4') # This is just 1 at float32 precision + assert f32 > np.array(below_one) + # NEP 50 broadcasting of python scalars + assert f32 == below_one + # Test that it works for isclose arguments too (and that those fail if + # one uses a numpy float64). + assert np.isclose(f32, below_one, atol=0, rtol=0) + assert np.isclose(f32, np.float32(0), atol=below_one) + assert np.isclose(f32, 2, atol=0, rtol=below_one / 2) + assert not np.isclose(f32, np.float64(below_one), atol=0, rtol=0) + assert not np.isclose(f32, np.float32(0), atol=np.float64(below_one)) + assert not np.isclose(f32, 2, atol=0, rtol=np.float64(below_one / 2)) + + def tst_all_isclose(self, x, y): + assert_(np.all(np.isclose(x, y)), f"{x} and {y} not close") + + def tst_none_isclose(self, x, y): + msg = "%s and %s shouldn't be close" + assert_(not np.any(np.isclose(x, y)), msg % (x, y)) + + def tst_isclose_allclose(self, x, y): + msg = "isclose.all() and allclose aren't same for %s and %s" + msg2 = "isclose and allclose aren't same for %s and %s" + if np.isscalar(x) and np.isscalar(y): + assert_(np.isclose(x, y) == np.allclose(x, y), msg=msg2 % (x, y)) + else: + assert_array_equal(np.isclose(x, y).all(), np.allclose(x, y), msg % (x, y)) + + def test_ip_all_isclose(self): + self._setup() + for (x, y) in self.all_close_tests: + self.tst_all_isclose(x, y) + + x = np.array([2.3, 3.6, 4.4, np.nan]) + y = np.array([2, 3, 4, np.nan]) + atol = [0.31, 0, 0, 1] + rtol = [0, 0.21, 0.11, 1] + assert np.allclose(x, y, atol=atol, rtol=rtol, equal_nan=True) + assert not np.allclose(x, y, atol=0.1, rtol=0.1, equal_nan=True) + + # Show that gh-14330 is resolved + assert np.allclose([1, 2, float('nan')], [1, 2, float('nan')], + atol=[1, 1, 1], equal_nan=True) + + def test_ip_none_isclose(self): + self._setup() + for (x, y) in self.none_close_tests: + self.tst_none_isclose(x, y) + + def test_ip_isclose_allclose(self): + self._setup() + tests = (self.all_close_tests + self.none_close_tests + + self.some_close_tests) + for (x, y) in tests: + self.tst_isclose_allclose(x, y) + + def test_equal_nan(self): + assert_array_equal(np.isclose(np.nan, np.nan, equal_nan=True), [True]) + arr = np.array([1.0, np.nan]) + assert_array_equal(np.isclose(arr, arr, equal_nan=True), [True, True]) + + def test_masked_arrays(self): + # Make sure to test the output type when arguments are interchanged. + + x = np.ma.masked_where([True, True, False], np.arange(3)) + assert_(type(x) is type(np.isclose(2, x))) + assert_(type(x) is type(np.isclose(x, 2))) + + x = np.ma.masked_where([True, True, False], [np.nan, np.inf, np.nan]) + assert_(type(x) is type(np.isclose(np.inf, x))) + assert_(type(x) is type(np.isclose(x, np.inf))) + + x = np.ma.masked_where([True, True, False], [np.nan, np.nan, np.nan]) + y = np.isclose(np.nan, x, equal_nan=True) + assert_(type(x) is type(y)) + # Ensure that the mask isn't modified... + assert_array_equal([True, True, False], y.mask) + y = np.isclose(x, np.nan, equal_nan=True) + assert_(type(x) is type(y)) + # Ensure that the mask isn't modified... + assert_array_equal([True, True, False], y.mask) + + x = np.ma.masked_where([True, True, False], [np.nan, np.nan, np.nan]) + y = np.isclose(x, x, equal_nan=True) + assert_(type(x) is type(y)) + # Ensure that the mask isn't modified... + assert_array_equal([True, True, False], y.mask) + + def test_scalar_return(self): + assert_(np.isscalar(np.isclose(1, 1))) + + def test_no_parameter_modification(self): + x = np.array([np.inf, 1]) + y = np.array([0, np.inf]) + np.isclose(x, y) + assert_array_equal(x, np.array([np.inf, 1])) + assert_array_equal(y, np.array([0, np.inf])) + + def test_non_finite_scalar(self): + # GH7014, when two scalars are compared the output should also be a + # scalar + assert_(np.isclose(np.inf, -np.inf) is np.False_) + assert_(np.isclose(0, np.inf) is np.False_) + assert_(type(np.isclose(0, np.inf)) is np.bool) + + def test_timedelta(self): + # Allclose currently works for timedelta64 as long as `atol` is + # an integer or also a timedelta64 + a = np.array([[1, 2, 3, "NaT"]], dtype="m8[ns]") + assert np.isclose(a, a, atol=0, equal_nan=True).all() + assert np.isclose(a, a, atol=np.timedelta64(1, "ns"), equal_nan=True).all() + assert np.allclose(a, a, atol=0, equal_nan=True) + assert np.allclose(a, a, atol=np.timedelta64(1, "ns"), equal_nan=True) + + def test_tol_warnings(self): + a = np.array([1, 2, 3]) + b = np.array([np.inf, np.nan, 1]) + + for i in b: + for j in b: + # Making sure that i and j are not both numbers, because that won't create a warning + if (i == 1) and (j == 1): + continue + + with warnings.catch_warnings(record=True) as w: + + warnings.simplefilter("always") + c = np.isclose(a, a, atol=i, rtol=j) + assert len(w) == 1 + assert issubclass(w[-1].category, RuntimeWarning) + assert f"One of rtol or atol is not valid, atol: {i}, rtol: {j}" in str(w[-1].message) + + +class TestStdVar: + def setup_method(self): + self.A = np.array([1, -1, 1, -1]) + self.real_var = 1 + + def test_basic(self): + assert_almost_equal(np.var(self.A), self.real_var) + assert_almost_equal(np.std(self.A)**2, self.real_var) + + def test_scalars(self): + assert_equal(np.var(1), 0) + assert_equal(np.std(1), 0) + + def test_ddof1(self): + assert_almost_equal(np.var(self.A, ddof=1), + self.real_var * len(self.A) / (len(self.A) - 1)) + assert_almost_equal(np.std(self.A, ddof=1)**2, + self.real_var * len(self.A) / (len(self.A) - 1)) + + def test_ddof2(self): + assert_almost_equal(np.var(self.A, ddof=2), + self.real_var * len(self.A) / (len(self.A) - 2)) + assert_almost_equal(np.std(self.A, ddof=2)**2, + self.real_var * len(self.A) / (len(self.A) - 2)) + + def test_correction(self): + assert_almost_equal( + np.var(self.A, correction=1), np.var(self.A, ddof=1) + ) + assert_almost_equal( + np.std(self.A, correction=1), np.std(self.A, ddof=1) + ) + + err_msg = "ddof and correction can't be provided simultaneously." + + with assert_raises_regex(ValueError, err_msg): + np.var(self.A, ddof=1, correction=0) + + with assert_raises_regex(ValueError, err_msg): + np.std(self.A, ddof=1, correction=1) + + def test_out_scalar(self): + d = np.arange(10) + out = np.array(0.) + r = np.std(d, out=out) + assert_(r is out) + assert_array_equal(r, out) + r = np.var(d, out=out) + assert_(r is out) + assert_array_equal(r, out) + r = np.mean(d, out=out) + assert_(r is out) + assert_array_equal(r, out) + + +class TestStdVarComplex: + def test_basic(self): + A = np.array([1, 1.j, -1, -1.j]) + real_var = 1 + assert_almost_equal(np.var(A), real_var) + assert_almost_equal(np.std(A)**2, real_var) + + def test_scalars(self): + assert_equal(np.var(1j), 0) + assert_equal(np.std(1j), 0) + + +class TestCreationFuncs: + # Test ones, zeros, empty and full. + + def setup_method(self): + dtypes = {np.dtype(tp) for tp in itertools.chain(*sctypes.values())} + # void, bytes, str + variable_sized = {tp for tp in dtypes if tp.str.endswith('0')} + keyfunc = lambda dtype: dtype.str + self.dtypes = sorted(dtypes - variable_sized | + {np.dtype(tp.str.replace("0", str(i))) + for tp in variable_sized for i in range(1, 10)}, + key=keyfunc) + self.dtypes += [type(dt) for dt in sorted(dtypes, key=keyfunc)] + self.orders = {'C': 'c_contiguous', 'F': 'f_contiguous'} + self.ndims = 10 + + def check_function(self, func, fill_value=None): + par = ((0, 1, 2), + range(self.ndims), + self.orders, + self.dtypes) + fill_kwarg = {} + if fill_value is not None: + fill_kwarg = {'fill_value': fill_value} + + for size, ndims, order, dtype in itertools.product(*par): + shape = ndims * [size] + + is_void = dtype is np.dtypes.VoidDType or ( + isinstance(dtype, np.dtype) and dtype.str.startswith('|V')) + + # do not fill void type + if fill_kwarg and is_void: + continue + + arr = func(shape, order=order, dtype=dtype, + **fill_kwarg) + + if isinstance(dtype, np.dtype): + assert_equal(arr.dtype, dtype) + elif isinstance(dtype, type(np.dtype)): + if dtype in (np.dtypes.StrDType, np.dtypes.BytesDType): + dtype_str = np.dtype(dtype.type).str.replace('0', '1') + assert_equal(arr.dtype, np.dtype(dtype_str)) + else: + assert_equal(arr.dtype, np.dtype(dtype.type)) + assert_(getattr(arr.flags, self.orders[order])) + + if fill_value is not None: + if arr.dtype.str.startswith('|S'): + val = str(fill_value) + else: + val = fill_value + assert_equal(arr, dtype.type(val)) + + def test_zeros(self): + self.check_function(np.zeros) + + def test_ones(self): + self.check_function(np.ones) + + def test_empty(self): + self.check_function(np.empty) + + def test_full(self): + self.check_function(np.full, 0) + self.check_function(np.full, 1) + + @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") + def test_for_reference_leak(self): + # Make sure we have an object for reference + dim = 1 + beg = sys.getrefcount(dim) + np.zeros([dim] * 10) + assert_(sys.getrefcount(dim) == beg) + np.ones([dim] * 10) + assert_(sys.getrefcount(dim) == beg) + np.empty([dim] * 10) + assert_(sys.getrefcount(dim) == beg) + np.full([dim] * 10, 0) + assert_(sys.getrefcount(dim) == beg) + + +class TestLikeFuncs: + '''Test ones_like, zeros_like, empty_like and full_like''' + + def setup_method(self): + self.data = [ + # Array scalars + (np.array(3.), None), + (np.array(3), 'f8'), + # 1D arrays + (np.arange(6, dtype='f4'), None), + (np.arange(6), 'c16'), + # 2D C-layout arrays + (np.arange(6).reshape(2, 3), None), + (np.arange(6).reshape(3, 2), 'i1'), + # 2D F-layout arrays + (np.arange(6).reshape((2, 3), order='F'), None), + (np.arange(6).reshape((3, 2), order='F'), 'i1'), + # 3D C-layout arrays + (np.arange(24).reshape(2, 3, 4), None), + (np.arange(24).reshape(4, 3, 2), 'f4'), + # 3D F-layout arrays + (np.arange(24).reshape((2, 3, 4), order='F'), None), + (np.arange(24).reshape((4, 3, 2), order='F'), 'f4'), + # 3D non-C/F-layout arrays + (np.arange(24).reshape(2, 3, 4).swapaxes(0, 1), None), + (np.arange(24).reshape(4, 3, 2).swapaxes(0, 1), '?'), + ] + self.shapes = [(), (5,), (5, 6,), (5, 6, 7,)] + + def compare_array_value(self, dz, value, fill_value): + if value is not None: + if fill_value: + # Conversion is close to what np.full_like uses + # but we may want to convert directly in the future + # which may result in errors (where this does not). + z = np.array(value).astype(dz.dtype) + assert_(np.all(dz == z)) + else: + assert_(np.all(dz == value)) + + def check_like_function(self, like_function, value, fill_value=False): + if fill_value: + fill_kwarg = {'fill_value': value} + else: + fill_kwarg = {} + for d, dtype in self.data: + # default (K) order, dtype + dz = like_function(d, dtype=dtype, **fill_kwarg) + assert_equal(dz.shape, d.shape) + assert_equal(np.array(dz.strides) * d.dtype.itemsize, + np.array(d.strides) * dz.dtype.itemsize) + assert_equal(d.flags.c_contiguous, dz.flags.c_contiguous) + assert_equal(d.flags.f_contiguous, dz.flags.f_contiguous) + if dtype is None: + assert_equal(dz.dtype, d.dtype) + else: + assert_equal(dz.dtype, np.dtype(dtype)) + self.compare_array_value(dz, value, fill_value) + + # C order, default dtype + dz = like_function(d, order='C', dtype=dtype, **fill_kwarg) + assert_equal(dz.shape, d.shape) + assert_(dz.flags.c_contiguous) + if dtype is None: + assert_equal(dz.dtype, d.dtype) + else: + assert_equal(dz.dtype, np.dtype(dtype)) + self.compare_array_value(dz, value, fill_value) + + # F order, default dtype + dz = like_function(d, order='F', dtype=dtype, **fill_kwarg) + assert_equal(dz.shape, d.shape) + assert_(dz.flags.f_contiguous) + if dtype is None: + assert_equal(dz.dtype, d.dtype) + else: + assert_equal(dz.dtype, np.dtype(dtype)) + self.compare_array_value(dz, value, fill_value) + + # A order + dz = like_function(d, order='A', dtype=dtype, **fill_kwarg) + assert_equal(dz.shape, d.shape) + if d.flags.f_contiguous: + assert_(dz.flags.f_contiguous) + else: + assert_(dz.flags.c_contiguous) + if dtype is None: + assert_equal(dz.dtype, d.dtype) + else: + assert_equal(dz.dtype, np.dtype(dtype)) + self.compare_array_value(dz, value, fill_value) + + # Test the 'shape' parameter + for s in self.shapes: + for o in 'CFA': + sz = like_function(d, dtype=dtype, shape=s, order=o, + **fill_kwarg) + assert_equal(sz.shape, s) + if dtype is None: + assert_equal(sz.dtype, d.dtype) + else: + assert_equal(sz.dtype, np.dtype(dtype)) + if o == 'C' or (o == 'A' and d.flags.c_contiguous): + assert_(sz.flags.c_contiguous) + elif o == 'F' or (o == 'A' and d.flags.f_contiguous): + assert_(sz.flags.f_contiguous) + self.compare_array_value(sz, value, fill_value) + + if (d.ndim != len(s)): + assert_equal(np.argsort(like_function(d, dtype=dtype, + shape=s, order='K', + **fill_kwarg).strides), + np.argsort(np.empty(s, dtype=dtype, + order='C').strides)) + else: + assert_equal(np.argsort(like_function(d, dtype=dtype, + shape=s, order='K', + **fill_kwarg).strides), + np.argsort(d.strides)) + + # Test the 'subok' parameter + class MyNDArray(np.ndarray): + pass + + a = np.array([[1, 2], [3, 4]]).view(MyNDArray) + + b = like_function(a, **fill_kwarg) + assert_(type(b) is MyNDArray) + + b = like_function(a, subok=False, **fill_kwarg) + assert_(type(b) is not MyNDArray) + + # Test invalid dtype + with assert_raises(TypeError): + a = np.array(b"abc") + like_function(a, dtype="S-1", **fill_kwarg) + + def test_ones_like(self): + self.check_like_function(np.ones_like, 1) + + def test_zeros_like(self): + self.check_like_function(np.zeros_like, 0) + + def test_empty_like(self): + self.check_like_function(np.empty_like, None) + + def test_filled_like(self): + self.check_like_function(np.full_like, 0, True) + self.check_like_function(np.full_like, 1, True) + # Large integers may overflow, but using int64 is OK (casts) + # see also gh-27075 + with pytest.raises(OverflowError): + np.full_like(np.ones(3, dtype=np.int8), 1000) + self.check_like_function(np.full_like, np.int64(1000), True) + self.check_like_function(np.full_like, 123.456, True) + # Inf to integer casts cause invalid-value errors: ignore them. + with np.errstate(invalid="ignore"): + self.check_like_function(np.full_like, np.inf, True) + + @pytest.mark.parametrize('likefunc', [np.empty_like, np.full_like, + np.zeros_like, np.ones_like]) + @pytest.mark.parametrize('dtype', [str, bytes]) + def test_dtype_str_bytes(self, likefunc, dtype): + # Regression test for gh-19860 + a = np.arange(16).reshape(2, 8) + b = a[:, ::2] # Ensure b is not contiguous. + kwargs = {'fill_value': ''} if likefunc == np.full_like else {} + result = likefunc(b, dtype=dtype, **kwargs) + if dtype == str: + assert result.strides == (16, 4) + else: + # dtype is bytes + assert result.strides == (4, 1) + + +class TestCorrelate: + def _setup(self, dt): + self.x = np.array([1, 2, 3, 4, 5], dtype=dt) + self.xs = np.arange(1, 20)[::3] + self.y = np.array([-1, -2, -3], dtype=dt) + self.z1 = np.array([-3., -8., -14., -20., -26., -14., -5.], dtype=dt) + self.z1_4 = np.array([-2., -5., -8., -11., -14., -5.], dtype=dt) + self.z1r = np.array([-15., -22., -22., -16., -10., -4., -1.], dtype=dt) + self.z2 = np.array([-5., -14., -26., -20., -14., -8., -3.], dtype=dt) + self.z2r = np.array([-1., -4., -10., -16., -22., -22., -15.], dtype=dt) + self.zs = np.array([-3., -14., -30., -48., -66., -84., + -102., -54., -19.], dtype=dt) + + def test_float(self): + self._setup(float) + z = np.correlate(self.x, self.y, 'full') + assert_array_almost_equal(z, self.z1) + z = np.correlate(self.x, self.y[:-1], 'full') + assert_array_almost_equal(z, self.z1_4) + z = np.correlate(self.y, self.x, 'full') + assert_array_almost_equal(z, self.z2) + z = np.correlate(self.x[::-1], self.y, 'full') + assert_array_almost_equal(z, self.z1r) + z = np.correlate(self.y, self.x[::-1], 'full') + assert_array_almost_equal(z, self.z2r) + z = np.correlate(self.xs, self.y, 'full') + assert_array_almost_equal(z, self.zs) + + def test_object(self): + self._setup(Decimal) + z = np.correlate(self.x, self.y, 'full') + assert_array_almost_equal(z, self.z1) + z = np.correlate(self.y, self.x, 'full') + assert_array_almost_equal(z, self.z2) + + def test_no_overwrite(self): + d = np.ones(100) + k = np.ones(3) + np.correlate(d, k) + assert_array_equal(d, np.ones(100)) + assert_array_equal(k, np.ones(3)) + + def test_complex(self): + x = np.array([1, 2, 3, 4 + 1j], dtype=complex) + y = np.array([-1, -2j, 3 + 1j], dtype=complex) + r_z = np.array([3 - 1j, 6, 8 + 1j, 11 + 5j, -5 + 8j, -4 - 1j], dtype=complex) + r_z = r_z[::-1].conjugate() + z = np.correlate(y, x, mode='full') + assert_array_almost_equal(z, r_z) + + def test_zero_size(self): + with pytest.raises(ValueError): + np.correlate(np.array([]), np.ones(1000), mode='full') + with pytest.raises(ValueError): + np.correlate(np.ones(1000), np.array([]), mode='full') + + def test_mode(self): + d = np.ones(100) + k = np.ones(3) + default_mode = np.correlate(d, k, mode='valid') + with assert_raises(ValueError): + np.correlate(d, k, mode='v') + # integer mode + with assert_raises(ValueError): + np.correlate(d, k, mode=-1) + # assert_array_equal(np.correlate(d, k, mode=), default_mode) + # illegal arguments + with assert_raises(TypeError): + np.correlate(d, k, mode=None) + + +class TestConvolve: + def test_object(self): + d = [1.] * 100 + k = [1.] * 3 + assert_array_almost_equal(np.convolve(d, k)[2:-2], np.full(98, 3)) + + def test_no_overwrite(self): + d = np.ones(100) + k = np.ones(3) + np.convolve(d, k) + assert_array_equal(d, np.ones(100)) + assert_array_equal(k, np.ones(3)) + + def test_mode(self): + d = np.ones(100) + k = np.ones(3) + default_mode = np.convolve(d, k, mode='full') + with assert_raises(ValueError): + np.convolve(d, k, mode='f') + # integer mode + with assert_raises(ValueError): + np.convolve(d, k, mode=-1) + assert_array_equal(np.convolve(d, k, mode=2), default_mode) + # illegal arguments + with assert_raises(TypeError): + np.convolve(d, k, mode=None) + + +class TestArgwhere: + + @pytest.mark.parametrize('nd', [0, 1, 2]) + def test_nd(self, nd): + # get an nd array with multiple elements in every dimension + x = np.empty((2,) * nd, bool) + + # none + x[...] = False + assert_equal(np.argwhere(x).shape, (0, nd)) + + # only one + x[...] = False + x.flat[0] = True + assert_equal(np.argwhere(x).shape, (1, nd)) + + # all but one + x[...] = True + x.flat[0] = False + assert_equal(np.argwhere(x).shape, (x.size - 1, nd)) + + # all + x[...] = True + assert_equal(np.argwhere(x).shape, (x.size, nd)) + + def test_2D(self): + x = np.arange(6).reshape((2, 3)) + assert_array_equal(np.argwhere(x > 1), + [[0, 2], + [1, 0], + [1, 1], + [1, 2]]) + + def test_list(self): + assert_equal(np.argwhere([4, 0, 2, 1, 3]), [[0], [2], [3], [4]]) + + +class TestRoll: + def test_roll1d(self): + x = np.arange(10) + xr = np.roll(x, 2) + assert_equal(xr, np.array([8, 9, 0, 1, 2, 3, 4, 5, 6, 7])) + + def test_roll2d(self): + x2 = np.reshape(np.arange(10), (2, 5)) + x2r = np.roll(x2, 1) + assert_equal(x2r, np.array([[9, 0, 1, 2, 3], [4, 5, 6, 7, 8]])) + + x2r = np.roll(x2, 1, axis=0) + assert_equal(x2r, np.array([[5, 6, 7, 8, 9], [0, 1, 2, 3, 4]])) + + x2r = np.roll(x2, 1, axis=1) + assert_equal(x2r, np.array([[4, 0, 1, 2, 3], [9, 5, 6, 7, 8]])) + + # Roll multiple axes at once. + x2r = np.roll(x2, 1, axis=(0, 1)) + assert_equal(x2r, np.array([[9, 5, 6, 7, 8], [4, 0, 1, 2, 3]])) + + x2r = np.roll(x2, (1, 0), axis=(0, 1)) + assert_equal(x2r, np.array([[5, 6, 7, 8, 9], [0, 1, 2, 3, 4]])) + + x2r = np.roll(x2, (-1, 0), axis=(0, 1)) + assert_equal(x2r, np.array([[5, 6, 7, 8, 9], [0, 1, 2, 3, 4]])) + + x2r = np.roll(x2, (0, 1), axis=(0, 1)) + assert_equal(x2r, np.array([[4, 0, 1, 2, 3], [9, 5, 6, 7, 8]])) + + x2r = np.roll(x2, (0, -1), axis=(0, 1)) + assert_equal(x2r, np.array([[1, 2, 3, 4, 0], [6, 7, 8, 9, 5]])) + + x2r = np.roll(x2, (1, 1), axis=(0, 1)) + assert_equal(x2r, np.array([[9, 5, 6, 7, 8], [4, 0, 1, 2, 3]])) + + x2r = np.roll(x2, (-1, -1), axis=(0, 1)) + assert_equal(x2r, np.array([[6, 7, 8, 9, 5], [1, 2, 3, 4, 0]])) + + # Roll the same axis multiple times. + x2r = np.roll(x2, 1, axis=(0, 0)) + assert_equal(x2r, np.array([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]])) + + x2r = np.roll(x2, 1, axis=(1, 1)) + assert_equal(x2r, np.array([[3, 4, 0, 1, 2], [8, 9, 5, 6, 7]])) + + # Roll more than one turn in either direction. + x2r = np.roll(x2, 6, axis=1) + assert_equal(x2r, np.array([[4, 0, 1, 2, 3], [9, 5, 6, 7, 8]])) + + x2r = np.roll(x2, -4, axis=1) + assert_equal(x2r, np.array([[4, 0, 1, 2, 3], [9, 5, 6, 7, 8]])) + + def test_roll_empty(self): + x = np.array([]) + assert_equal(np.roll(x, 1), np.array([])) + + def test_roll_unsigned_shift(self): + x = np.arange(4) + shift = np.uint16(2) + assert_equal(np.roll(x, shift), np.roll(x, 2)) + + shift = np.uint64(2**63 + 2) + assert_equal(np.roll(x, shift), np.roll(x, 2)) + + def test_roll_big_int(self): + x = np.arange(4) + assert_equal(np.roll(x, 2**100), x) + + +class TestRollaxis: + + # expected shape indexed by (axis, start) for array of + # shape (1, 2, 3, 4) + tgtshape = {(0, 0): (1, 2, 3, 4), (0, 1): (1, 2, 3, 4), + (0, 2): (2, 1, 3, 4), (0, 3): (2, 3, 1, 4), + (0, 4): (2, 3, 4, 1), + (1, 0): (2, 1, 3, 4), (1, 1): (1, 2, 3, 4), + (1, 2): (1, 2, 3, 4), (1, 3): (1, 3, 2, 4), + (1, 4): (1, 3, 4, 2), + (2, 0): (3, 1, 2, 4), (2, 1): (1, 3, 2, 4), + (2, 2): (1, 2, 3, 4), (2, 3): (1, 2, 3, 4), + (2, 4): (1, 2, 4, 3), + (3, 0): (4, 1, 2, 3), (3, 1): (1, 4, 2, 3), + (3, 2): (1, 2, 4, 3), (3, 3): (1, 2, 3, 4), + (3, 4): (1, 2, 3, 4)} + + def test_exceptions(self): + a = np.arange(1 * 2 * 3 * 4).reshape(1, 2, 3, 4) + assert_raises(AxisError, np.rollaxis, a, -5, 0) + assert_raises(AxisError, np.rollaxis, a, 0, -5) + assert_raises(AxisError, np.rollaxis, a, 4, 0) + assert_raises(AxisError, np.rollaxis, a, 0, 5) + + def test_results(self): + a = np.arange(1 * 2 * 3 * 4).reshape(1, 2, 3, 4).copy() + aind = np.indices(a.shape) + assert_(a.flags['OWNDATA']) + for (i, j) in self.tgtshape: + # positive axis, positive start + res = np.rollaxis(a, axis=i, start=j) + i0, i1, i2, i3 = aind[np.array(res.shape) - 1] + assert_(np.all(res[i0, i1, i2, i3] == a)) + assert_(res.shape == self.tgtshape[(i, j)], str((i, j))) + assert_(not res.flags['OWNDATA']) + + # negative axis, positive start + ip = i + 1 + res = np.rollaxis(a, axis=-ip, start=j) + i0, i1, i2, i3 = aind[np.array(res.shape) - 1] + assert_(np.all(res[i0, i1, i2, i3] == a)) + assert_(res.shape == self.tgtshape[(4 - ip, j)]) + assert_(not res.flags['OWNDATA']) + + # positive axis, negative start + jp = j + 1 if j < 4 else j + res = np.rollaxis(a, axis=i, start=-jp) + i0, i1, i2, i3 = aind[np.array(res.shape) - 1] + assert_(np.all(res[i0, i1, i2, i3] == a)) + assert_(res.shape == self.tgtshape[(i, 4 - jp)]) + assert_(not res.flags['OWNDATA']) + + # negative axis, negative start + ip = i + 1 + jp = j + 1 if j < 4 else j + res = np.rollaxis(a, axis=-ip, start=-jp) + i0, i1, i2, i3 = aind[np.array(res.shape) - 1] + assert_(np.all(res[i0, i1, i2, i3] == a)) + assert_(res.shape == self.tgtshape[(4 - ip, 4 - jp)]) + assert_(not res.flags['OWNDATA']) + + +class TestMoveaxis: + def test_move_to_end(self): + x = np.random.randn(5, 6, 7) + for source, expected in [(0, (6, 7, 5)), + (1, (5, 7, 6)), + (2, (5, 6, 7)), + (-1, (5, 6, 7))]: + actual = np.moveaxis(x, source, -1).shape + assert_(actual, expected) + + def test_move_new_position(self): + x = np.random.randn(1, 2, 3, 4) + for source, destination, expected in [ + (0, 1, (2, 1, 3, 4)), + (1, 2, (1, 3, 2, 4)), + (1, -1, (1, 3, 4, 2)), + ]: + actual = np.moveaxis(x, source, destination).shape + assert_(actual, expected) + + def test_preserve_order(self): + x = np.zeros((1, 2, 3, 4)) + for source, destination in [ + (0, 0), + (3, -1), + (-1, 3), + ([0, -1], [0, -1]), + ([2, 0], [2, 0]), + (range(4), range(4)), + ]: + actual = np.moveaxis(x, source, destination).shape + assert_(actual, (1, 2, 3, 4)) + + def test_move_multiples(self): + x = np.zeros((0, 1, 2, 3)) + for source, destination, expected in [ + ([0, 1], [2, 3], (2, 3, 0, 1)), + ([2, 3], [0, 1], (2, 3, 0, 1)), + ([0, 1, 2], [2, 3, 0], (2, 3, 0, 1)), + ([3, 0], [1, 0], (0, 3, 1, 2)), + ([0, 3], [0, 1], (0, 3, 1, 2)), + ]: + actual = np.moveaxis(x, source, destination).shape + assert_(actual, expected) + + def test_errors(self): + x = np.random.randn(1, 2, 3) + assert_raises_regex(AxisError, 'source.*out of bounds', + np.moveaxis, x, 3, 0) + assert_raises_regex(AxisError, 'source.*out of bounds', + np.moveaxis, x, -4, 0) + assert_raises_regex(AxisError, 'destination.*out of bounds', + np.moveaxis, x, 0, 5) + assert_raises_regex(ValueError, 'repeated axis in `source`', + np.moveaxis, x, [0, 0], [0, 1]) + assert_raises_regex(ValueError, 'repeated axis in `destination`', + np.moveaxis, x, [0, 1], [1, 1]) + assert_raises_regex(ValueError, 'must have the same number', + np.moveaxis, x, 0, [0, 1]) + assert_raises_regex(ValueError, 'must have the same number', + np.moveaxis, x, [0, 1], [0]) + + def test_array_likes(self): + x = np.ma.zeros((1, 2, 3)) + result = np.moveaxis(x, 0, 0) + assert_(x.shape, result.shape) + assert_(isinstance(result, np.ma.MaskedArray)) + + x = [1, 2, 3] + result = np.moveaxis(x, 0, 0) + assert_(x, list(result)) + assert_(isinstance(result, np.ndarray)) + + +class TestCross: + @pytest.mark.filterwarnings( + "ignore:.*2-dimensional vectors.*:DeprecationWarning" + ) + def test_2x2(self): + u = [1, 2] + v = [3, 4] + z = -2 + cp = np.cross(u, v) + assert_equal(cp, z) + cp = np.cross(v, u) + assert_equal(cp, -z) + + @pytest.mark.filterwarnings( + "ignore:.*2-dimensional vectors.*:DeprecationWarning" + ) + def test_2x3(self): + u = [1, 2] + v = [3, 4, 5] + z = np.array([10, -5, -2]) + cp = np.cross(u, v) + assert_equal(cp, z) + cp = np.cross(v, u) + assert_equal(cp, -z) + + def test_3x3(self): + u = [1, 2, 3] + v = [4, 5, 6] + z = np.array([-3, 6, -3]) + cp = np.cross(u, v) + assert_equal(cp, z) + cp = np.cross(v, u) + assert_equal(cp, -z) + + @pytest.mark.filterwarnings( + "ignore:.*2-dimensional vectors.*:DeprecationWarning" + ) + def test_broadcasting(self): + # Ticket #2624 (Trac #2032) + u = np.tile([1, 2], (11, 1)) + v = np.tile([3, 4], (11, 1)) + z = -2 + assert_equal(np.cross(u, v), z) + assert_equal(np.cross(v, u), -z) + assert_equal(np.cross(u, u), 0) + + u = np.tile([1, 2], (11, 1)).T + v = np.tile([3, 4, 5], (11, 1)) + z = np.tile([10, -5, -2], (11, 1)) + assert_equal(np.cross(u, v, axisa=0), z) + assert_equal(np.cross(v, u.T), -z) + assert_equal(np.cross(v, v), 0) + + u = np.tile([1, 2, 3], (11, 1)).T + v = np.tile([3, 4], (11, 1)).T + z = np.tile([-12, 9, -2], (11, 1)) + assert_equal(np.cross(u, v, axisa=0, axisb=0), z) + assert_equal(np.cross(v.T, u.T), -z) + assert_equal(np.cross(u.T, u.T), 0) + + u = np.tile([1, 2, 3], (5, 1)) + v = np.tile([4, 5, 6], (5, 1)).T + z = np.tile([-3, 6, -3], (5, 1)) + assert_equal(np.cross(u, v, axisb=0), z) + assert_equal(np.cross(v.T, u), -z) + assert_equal(np.cross(u, u), 0) + + @pytest.mark.filterwarnings( + "ignore:.*2-dimensional vectors.*:DeprecationWarning" + ) + def test_broadcasting_shapes(self): + u = np.ones((2, 1, 3)) + v = np.ones((5, 3)) + assert_equal(np.cross(u, v).shape, (2, 5, 3)) + u = np.ones((10, 3, 5)) + v = np.ones((2, 5)) + assert_equal(np.cross(u, v, axisa=1, axisb=0).shape, (10, 5, 3)) + assert_raises(AxisError, np.cross, u, v, axisa=1, axisb=2) + assert_raises(AxisError, np.cross, u, v, axisa=3, axisb=0) + u = np.ones((10, 3, 5, 7)) + v = np.ones((5, 7, 2)) + assert_equal(np.cross(u, v, axisa=1, axisc=2).shape, (10, 5, 3, 7)) + assert_raises(AxisError, np.cross, u, v, axisa=-5, axisb=2) + assert_raises(AxisError, np.cross, u, v, axisa=1, axisb=-4) + # gh-5885 + u = np.ones((3, 4, 2)) + for axisc in range(-2, 2): + assert_equal(np.cross(u, u, axisc=axisc).shape, (3, 4)) + + def test_uint8_int32_mixed_dtypes(self): + # regression test for gh-19138 + u = np.array([[195, 8, 9]], np.uint8) + v = np.array([250, 166, 68], np.int32) + z = np.array([[950, 11010, -30370]], dtype=np.int32) + assert_equal(np.cross(v, u), z) + assert_equal(np.cross(u, v), -z) + + @pytest.mark.parametrize("a, b", [(0, [1, 2]), ([1, 2], 3)]) + def test_zero_dimension(self, a, b): + with pytest.raises(ValueError) as exc: + np.cross(a, b) + assert "At least one array has zero dimension" in str(exc.value) + + +def test_outer_out_param(): + arr1 = np.ones((5,)) + arr2 = np.ones((2,)) + arr3 = np.linspace(-2, 2, 5) + out1 = np.ndarray(shape=(5, 5)) + out2 = np.ndarray(shape=(2, 5)) + res1 = np.outer(arr1, arr3, out1) + assert_equal(res1, out1) + assert_equal(np.outer(arr2, arr3, out2), out2) + + +class TestIndices: + + def test_simple(self): + [x, y] = np.indices((4, 3)) + assert_array_equal(x, np.array([[0, 0, 0], + [1, 1, 1], + [2, 2, 2], + [3, 3, 3]])) + assert_array_equal(y, np.array([[0, 1, 2], + [0, 1, 2], + [0, 1, 2], + [0, 1, 2]])) + + def test_single_input(self): + [x] = np.indices((4,)) + assert_array_equal(x, np.array([0, 1, 2, 3])) + + [x] = np.indices((4,), sparse=True) + assert_array_equal(x, np.array([0, 1, 2, 3])) + + def test_scalar_input(self): + assert_array_equal([], np.indices(())) + assert_array_equal([], np.indices((), sparse=True)) + assert_array_equal([[]], np.indices((0,))) + assert_array_equal([[]], np.indices((0,), sparse=True)) + + def test_sparse(self): + [x, y] = np.indices((4, 3), sparse=True) + assert_array_equal(x, np.array([[0], [1], [2], [3]])) + assert_array_equal(y, np.array([[0, 1, 2]])) + + @pytest.mark.parametrize("dtype", [np.int32, np.int64, np.float32, np.float64]) + @pytest.mark.parametrize("dims", [(), (0,), (4, 3)]) + def test_return_type(self, dtype, dims): + inds = np.indices(dims, dtype=dtype) + assert_(inds.dtype == dtype) + + for arr in np.indices(dims, dtype=dtype, sparse=True): + assert_(arr.dtype == dtype) + + +class TestRequire: + flag_names = ['C', 'C_CONTIGUOUS', 'CONTIGUOUS', + 'F', 'F_CONTIGUOUS', 'FORTRAN', + 'A', 'ALIGNED', + 'W', 'WRITEABLE', + 'O', 'OWNDATA'] + + def generate_all_false(self, dtype): + arr = np.zeros((2, 2), [('junk', 'i1'), ('a', dtype)]) + arr.setflags(write=False) + a = arr['a'] + assert_(not a.flags['C']) + assert_(not a.flags['F']) + assert_(not a.flags['O']) + assert_(not a.flags['W']) + assert_(not a.flags['A']) + return a + + def set_and_check_flag(self, flag, dtype, arr): + if dtype is None: + dtype = arr.dtype + b = np.require(arr, dtype, [flag]) + assert_(b.flags[flag]) + assert_(b.dtype == dtype) + + # a further call to np.require ought to return the same array + # unless OWNDATA is specified. + c = np.require(b, None, [flag]) + if flag[0] != 'O': + assert_(c is b) + else: + assert_(c.flags[flag]) + + def test_require_each(self): + + id = ['f8', 'i4'] + fd = [None, 'f8', 'c16'] + for idtype, fdtype, flag in itertools.product(id, fd, self.flag_names): + a = self.generate_all_false(idtype) + self.set_and_check_flag(flag, fdtype, a) + + def test_unknown_requirement(self): + a = self.generate_all_false('f8') + assert_raises(KeyError, np.require, a, None, 'Q') + + def test_non_array_input(self): + a = np.require([1, 2, 3, 4], 'i4', ['C', 'A', 'O']) + assert_(a.flags['O']) + assert_(a.flags['C']) + assert_(a.flags['A']) + assert_(a.dtype == 'i4') + assert_equal(a, [1, 2, 3, 4]) + + def test_C_and_F_simul(self): + a = self.generate_all_false('f8') + assert_raises(ValueError, np.require, a, None, ['C', 'F']) + + def test_ensure_array(self): + class ArraySubclass(np.ndarray): + pass + + a = ArraySubclass((2, 2)) + b = np.require(a, None, ['E']) + assert_(type(b) is np.ndarray) + + def test_preserve_subtype(self): + class ArraySubclass(np.ndarray): + pass + + for flag in self.flag_names: + a = ArraySubclass((2, 2)) + self.set_and_check_flag(flag, None, a) + + +class TestBroadcast: + def test_broadcast_in_args(self): + # gh-5881 + arrs = [np.empty((6, 7)), np.empty((5, 6, 1)), np.empty((7,)), + np.empty((5, 1, 7))] + mits = [np.broadcast(*arrs), + np.broadcast(np.broadcast(*arrs[:0]), np.broadcast(*arrs[0:])), + np.broadcast(np.broadcast(*arrs[:1]), np.broadcast(*arrs[1:])), + np.broadcast(np.broadcast(*arrs[:2]), np.broadcast(*arrs[2:])), + np.broadcast(arrs[0], np.broadcast(*arrs[1:-1]), arrs[-1])] + for mit in mits: + assert_equal(mit.shape, (5, 6, 7)) + assert_equal(mit.ndim, 3) + assert_equal(mit.nd, 3) + assert_equal(mit.numiter, 4) + for a, ia in zip(arrs, mit.iters): + assert_(a is ia.base) + + def test_broadcast_single_arg(self): + # gh-6899 + arrs = [np.empty((5, 6, 7))] + mit = np.broadcast(*arrs) + assert_equal(mit.shape, (5, 6, 7)) + assert_equal(mit.ndim, 3) + assert_equal(mit.nd, 3) + assert_equal(mit.numiter, 1) + assert_(arrs[0] is mit.iters[0].base) + + def test_number_of_arguments(self): + arr = np.empty((5,)) + for j in range(70): + arrs = [arr] * j + if j > 64: + assert_raises(ValueError, np.broadcast, *arrs) + else: + mit = np.broadcast(*arrs) + assert_equal(mit.numiter, j) + + def test_broadcast_error_kwargs(self): + # gh-13455 + arrs = [np.empty((5, 6, 7))] + mit = np.broadcast(*arrs) + mit2 = np.broadcast(*arrs, **{}) # noqa: PIE804 + assert_equal(mit.shape, mit2.shape) + assert_equal(mit.ndim, mit2.ndim) + assert_equal(mit.nd, mit2.nd) + assert_equal(mit.numiter, mit2.numiter) + assert_(mit.iters[0].base is mit2.iters[0].base) + + assert_raises(ValueError, np.broadcast, 1, x=1) + + def test_shape_mismatch_error_message(self): + with pytest.raises(ValueError, match=r"arg 0 with shape \(1, 3\) and " + r"arg 2 with shape \(2,\)"): + np.broadcast([[1, 2, 3]], [[4], [5]], [6, 7]) + + +class TestKeepdims: + + class sub_array(np.ndarray): + def sum(self, axis=None, dtype=None, out=None): + return np.ndarray.sum(self, axis, dtype, out, keepdims=True) + + def test_raise(self): + sub_class = self.sub_array + x = np.arange(30).view(sub_class) + assert_raises(TypeError, np.sum, x, keepdims=True) + + +class TestTensordot: + + def test_zero_dimension(self): + # Test resolution to issue #5663 + a = np.ndarray((3, 0)) + b = np.ndarray((0, 4)) + td = np.tensordot(a, b, (1, 0)) + assert_array_equal(td, np.dot(a, b)) + assert_array_equal(td, np.einsum('ij,jk', a, b)) + + def test_zero_dimensional(self): + # gh-12130 + arr_0d = np.array(1) + ret = np.tensordot(arr_0d, arr_0d, ([], [])) # contracting no axes is well defined + assert_array_equal(ret, arr_0d) + + +class TestAsType: + + def test_astype(self): + data = [[1, 2], [3, 4]] + actual = np.astype( + np.array(data, dtype=np.int64), np.uint32 + ) + expected = np.array(data, dtype=np.uint32) + + assert_array_equal(actual, expected) + assert_equal(actual.dtype, expected.dtype) + + assert np.shares_memory( + actual, np.astype(actual, actual.dtype, copy=False) + ) + + actual = np.astype(np.int64(10), np.float64) + expected = np.float64(10) + assert_equal(actual, expected) + assert_equal(actual.dtype, expected.dtype) + + with pytest.raises(TypeError, match="Input should be a NumPy array"): + np.astype(data, np.float64) diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_numerictypes.py b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_numerictypes.py new file mode 100644 index 0000000..c9a2ac0 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_numerictypes.py @@ -0,0 +1,622 @@ +import itertools +import sys + +import pytest + +import numpy as np +import numpy._core.numerictypes as nt +from numpy._core.numerictypes import issctype, maximum_sctype, sctype2char, sctypes +from numpy.testing import ( + IS_PYPY, + assert_, + assert_equal, + assert_raises, + assert_raises_regex, +) + +# This is the structure of the table used for plain objects: +# +# +-+-+-+ +# |x|y|z| +# +-+-+-+ + +# Structure of a plain array description: +Pdescr = [ + ('x', 'i4', (2,)), + ('y', 'f8', (2, 2)), + ('z', 'u1')] + +# A plain list of tuples with values for testing: +PbufferT = [ + # x y z + ([3, 2], [[6., 4.], [6., 4.]], 8), + ([4, 3], [[7., 5.], [7., 5.]], 9), + ] + + +# This is the structure of the table used for nested objects (DON'T PANIC!): +# +# +-+---------------------------------+-----+----------+-+-+ +# |x|Info |color|info |y|z| +# | +-----+--+----------------+----+--+ +----+-----+ | | +# | |value|y2|Info2 |name|z2| |Name|Value| | | +# | | | +----+-----+--+--+ | | | | | | | +# | | | |name|value|y3|z3| | | | | | | | +# +-+-----+--+----+-----+--+--+----+--+-----+----+-----+-+-+ +# + +# The corresponding nested array description: +Ndescr = [ + ('x', 'i4', (2,)), + ('Info', [ + ('value', 'c16'), + ('y2', 'f8'), + ('Info2', [ + ('name', 'S2'), + ('value', 'c16', (2,)), + ('y3', 'f8', (2,)), + ('z3', 'u4', (2,))]), + ('name', 'S2'), + ('z2', 'b1')]), + ('color', 'S2'), + ('info', [ + ('Name', 'U8'), + ('Value', 'c16')]), + ('y', 'f8', (2, 2)), + ('z', 'u1')] + +NbufferT = [ + # x Info color info y z + # value y2 Info2 name z2 Name Value + # name value y3 z3 + ([3, 2], (6j, 6., (b'nn', [6j, 4j], [6., 4.], [1, 2]), b'NN', True), + b'cc', ('NN', 6j), [[6., 4.], [6., 4.]], 8), + ([4, 3], (7j, 7., (b'oo', [7j, 5j], [7., 5.], [2, 1]), b'OO', False), + b'dd', ('OO', 7j), [[7., 5.], [7., 5.]], 9), + ] + + +byteorder = {'little': '<', 'big': '>'}[sys.byteorder] + +def normalize_descr(descr): + "Normalize a description adding the platform byteorder." + + out = [] + for item in descr: + dtype = item[1] + if isinstance(dtype, str): + if dtype[0] not in ['|', '<', '>']: + onebyte = dtype[1:] == "1" + if onebyte or dtype[0] in ['S', 'V', 'b']: + dtype = "|" + dtype + else: + dtype = byteorder + dtype + if len(item) > 2 and np.prod(item[2]) > 1: + nitem = (item[0], dtype, item[2]) + else: + nitem = (item[0], dtype) + out.append(nitem) + elif isinstance(dtype, list): + l = normalize_descr(dtype) + out.append((item[0], l)) + else: + raise ValueError(f"Expected a str or list and got {type(item)}") + return out + + +############################################################ +# Creation tests +############################################################ + +class CreateZeros: + """Check the creation of heterogeneous arrays zero-valued""" + + def test_zeros0D(self): + """Check creation of 0-dimensional objects""" + h = np.zeros((), dtype=self._descr) + assert_(normalize_descr(self._descr) == h.dtype.descr) + assert_(h.dtype.fields['x'][0].name[:4] == 'void') + assert_(h.dtype.fields['x'][0].char == 'V') + assert_(h.dtype.fields['x'][0].type == np.void) + # A small check that data is ok + assert_equal(h['z'], np.zeros((), dtype='u1')) + + def test_zerosSD(self): + """Check creation of single-dimensional objects""" + h = np.zeros((2,), dtype=self._descr) + assert_(normalize_descr(self._descr) == h.dtype.descr) + assert_(h.dtype['y'].name[:4] == 'void') + assert_(h.dtype['y'].char == 'V') + assert_(h.dtype['y'].type == np.void) + # A small check that data is ok + assert_equal(h['z'], np.zeros((2,), dtype='u1')) + + def test_zerosMD(self): + """Check creation of multi-dimensional objects""" + h = np.zeros((2, 3), dtype=self._descr) + assert_(normalize_descr(self._descr) == h.dtype.descr) + assert_(h.dtype['z'].name == 'uint8') + assert_(h.dtype['z'].char == 'B') + assert_(h.dtype['z'].type == np.uint8) + # A small check that data is ok + assert_equal(h['z'], np.zeros((2, 3), dtype='u1')) + + +class TestCreateZerosPlain(CreateZeros): + """Check the creation of heterogeneous arrays zero-valued (plain)""" + _descr = Pdescr + +class TestCreateZerosNested(CreateZeros): + """Check the creation of heterogeneous arrays zero-valued (nested)""" + _descr = Ndescr + + +class CreateValues: + """Check the creation of heterogeneous arrays with values""" + + def test_tuple(self): + """Check creation from tuples""" + h = np.array(self._buffer, dtype=self._descr) + assert_(normalize_descr(self._descr) == h.dtype.descr) + if self.multiple_rows: + assert_(h.shape == (2,)) + else: + assert_(h.shape == ()) + + def test_list_of_tuple(self): + """Check creation from list of tuples""" + h = np.array([self._buffer], dtype=self._descr) + assert_(normalize_descr(self._descr) == h.dtype.descr) + if self.multiple_rows: + assert_(h.shape == (1, 2)) + else: + assert_(h.shape == (1,)) + + def test_list_of_list_of_tuple(self): + """Check creation from list of list of tuples""" + h = np.array([[self._buffer]], dtype=self._descr) + assert_(normalize_descr(self._descr) == h.dtype.descr) + if self.multiple_rows: + assert_(h.shape == (1, 1, 2)) + else: + assert_(h.shape == (1, 1)) + + +class TestCreateValuesPlainSingle(CreateValues): + """Check the creation of heterogeneous arrays (plain, single row)""" + _descr = Pdescr + multiple_rows = 0 + _buffer = PbufferT[0] + +class TestCreateValuesPlainMultiple(CreateValues): + """Check the creation of heterogeneous arrays (plain, multiple rows)""" + _descr = Pdescr + multiple_rows = 1 + _buffer = PbufferT + +class TestCreateValuesNestedSingle(CreateValues): + """Check the creation of heterogeneous arrays (nested, single row)""" + _descr = Ndescr + multiple_rows = 0 + _buffer = NbufferT[0] + +class TestCreateValuesNestedMultiple(CreateValues): + """Check the creation of heterogeneous arrays (nested, multiple rows)""" + _descr = Ndescr + multiple_rows = 1 + _buffer = NbufferT + + +############################################################ +# Reading tests +############################################################ + +class ReadValuesPlain: + """Check the reading of values in heterogeneous arrays (plain)""" + + def test_access_fields(self): + h = np.array(self._buffer, dtype=self._descr) + if not self.multiple_rows: + assert_(h.shape == ()) + assert_equal(h['x'], np.array(self._buffer[0], dtype='i4')) + assert_equal(h['y'], np.array(self._buffer[1], dtype='f8')) + assert_equal(h['z'], np.array(self._buffer[2], dtype='u1')) + else: + assert_(len(h) == 2) + assert_equal(h['x'], np.array([self._buffer[0][0], + self._buffer[1][0]], dtype='i4')) + assert_equal(h['y'], np.array([self._buffer[0][1], + self._buffer[1][1]], dtype='f8')) + assert_equal(h['z'], np.array([self._buffer[0][2], + self._buffer[1][2]], dtype='u1')) + + +class TestReadValuesPlainSingle(ReadValuesPlain): + """Check the creation of heterogeneous arrays (plain, single row)""" + _descr = Pdescr + multiple_rows = 0 + _buffer = PbufferT[0] + +class TestReadValuesPlainMultiple(ReadValuesPlain): + """Check the values of heterogeneous arrays (plain, multiple rows)""" + _descr = Pdescr + multiple_rows = 1 + _buffer = PbufferT + +class ReadValuesNested: + """Check the reading of values in heterogeneous arrays (nested)""" + + def test_access_top_fields(self): + """Check reading the top fields of a nested array""" + h = np.array(self._buffer, dtype=self._descr) + if not self.multiple_rows: + assert_(h.shape == ()) + assert_equal(h['x'], np.array(self._buffer[0], dtype='i4')) + assert_equal(h['y'], np.array(self._buffer[4], dtype='f8')) + assert_equal(h['z'], np.array(self._buffer[5], dtype='u1')) + else: + assert_(len(h) == 2) + assert_equal(h['x'], np.array([self._buffer[0][0], + self._buffer[1][0]], dtype='i4')) + assert_equal(h['y'], np.array([self._buffer[0][4], + self._buffer[1][4]], dtype='f8')) + assert_equal(h['z'], np.array([self._buffer[0][5], + self._buffer[1][5]], dtype='u1')) + + def test_nested1_acessors(self): + """Check reading the nested fields of a nested array (1st level)""" + h = np.array(self._buffer, dtype=self._descr) + if not self.multiple_rows: + assert_equal(h['Info']['value'], + np.array(self._buffer[1][0], dtype='c16')) + assert_equal(h['Info']['y2'], + np.array(self._buffer[1][1], dtype='f8')) + assert_equal(h['info']['Name'], + np.array(self._buffer[3][0], dtype='U2')) + assert_equal(h['info']['Value'], + np.array(self._buffer[3][1], dtype='c16')) + else: + assert_equal(h['Info']['value'], + np.array([self._buffer[0][1][0], + self._buffer[1][1][0]], + dtype='c16')) + assert_equal(h['Info']['y2'], + np.array([self._buffer[0][1][1], + self._buffer[1][1][1]], + dtype='f8')) + assert_equal(h['info']['Name'], + np.array([self._buffer[0][3][0], + self._buffer[1][3][0]], + dtype='U2')) + assert_equal(h['info']['Value'], + np.array([self._buffer[0][3][1], + self._buffer[1][3][1]], + dtype='c16')) + + def test_nested2_acessors(self): + """Check reading the nested fields of a nested array (2nd level)""" + h = np.array(self._buffer, dtype=self._descr) + if not self.multiple_rows: + assert_equal(h['Info']['Info2']['value'], + np.array(self._buffer[1][2][1], dtype='c16')) + assert_equal(h['Info']['Info2']['z3'], + np.array(self._buffer[1][2][3], dtype='u4')) + else: + assert_equal(h['Info']['Info2']['value'], + np.array([self._buffer[0][1][2][1], + self._buffer[1][1][2][1]], + dtype='c16')) + assert_equal(h['Info']['Info2']['z3'], + np.array([self._buffer[0][1][2][3], + self._buffer[1][1][2][3]], + dtype='u4')) + + def test_nested1_descriptor(self): + """Check access nested descriptors of a nested array (1st level)""" + h = np.array(self._buffer, dtype=self._descr) + assert_(h.dtype['Info']['value'].name == 'complex128') + assert_(h.dtype['Info']['y2'].name == 'float64') + assert_(h.dtype['info']['Name'].name == 'str256') + assert_(h.dtype['info']['Value'].name == 'complex128') + + def test_nested2_descriptor(self): + """Check access nested descriptors of a nested array (2nd level)""" + h = np.array(self._buffer, dtype=self._descr) + assert_(h.dtype['Info']['Info2']['value'].name == 'void256') + assert_(h.dtype['Info']['Info2']['z3'].name == 'void64') + + +class TestReadValuesNestedSingle(ReadValuesNested): + """Check the values of heterogeneous arrays (nested, single row)""" + _descr = Ndescr + multiple_rows = False + _buffer = NbufferT[0] + +class TestReadValuesNestedMultiple(ReadValuesNested): + """Check the values of heterogeneous arrays (nested, multiple rows)""" + _descr = Ndescr + multiple_rows = True + _buffer = NbufferT + +class TestEmptyField: + def test_assign(self): + a = np.arange(10, dtype=np.float32) + a.dtype = [("int", "<0i4"), ("float", "<2f4")] + assert_(a['int'].shape == (5, 0)) + assert_(a['float'].shape == (5, 2)) + + +class TestMultipleFields: + def setup_method(self): + self.ary = np.array([(1, 2, 3, 4), (5, 6, 7, 8)], dtype='i4,f4,i2,c8') + + def _bad_call(self): + return self.ary['f0', 'f1'] + + def test_no_tuple(self): + assert_raises(IndexError, self._bad_call) + + def test_return(self): + res = self.ary[['f0', 'f2']].tolist() + assert_(res == [(1, 3), (5, 7)]) + + +class TestIsSubDType: + # scalar types can be promoted into dtypes + wrappers = [np.dtype, lambda x: x] + + def test_both_abstract(self): + assert_(np.issubdtype(np.floating, np.inexact)) + assert_(not np.issubdtype(np.inexact, np.floating)) + + def test_same(self): + for cls in (np.float32, np.int32): + for w1, w2 in itertools.product(self.wrappers, repeat=2): + assert_(np.issubdtype(w1(cls), w2(cls))) + + def test_subclass(self): + # note we cannot promote floating to a dtype, as it would turn into a + # concrete type + for w in self.wrappers: + assert_(np.issubdtype(w(np.float32), np.floating)) + assert_(np.issubdtype(w(np.float64), np.floating)) + + def test_subclass_backwards(self): + for w in self.wrappers: + assert_(not np.issubdtype(np.floating, w(np.float32))) + assert_(not np.issubdtype(np.floating, w(np.float64))) + + def test_sibling_class(self): + for w1, w2 in itertools.product(self.wrappers, repeat=2): + assert_(not np.issubdtype(w1(np.float32), w2(np.float64))) + assert_(not np.issubdtype(w1(np.float64), w2(np.float32))) + + def test_nondtype_nonscalartype(self): + # See gh-14619 and gh-9505 which introduced the deprecation to fix + # this. These tests are directly taken from gh-9505 + assert not np.issubdtype(np.float32, 'float64') + assert not np.issubdtype(np.float32, 'f8') + assert not np.issubdtype(np.int32, str) + assert not np.issubdtype(np.int32, 'int64') + assert not np.issubdtype(np.str_, 'void') + # for the following the correct spellings are + # np.integer, np.floating, or np.complexfloating respectively: + assert not np.issubdtype(np.int8, int) # np.int8 is never np.int_ + assert not np.issubdtype(np.float32, float) + assert not np.issubdtype(np.complex64, complex) + assert not np.issubdtype(np.float32, "float") + assert not np.issubdtype(np.float64, "f") + + # Test the same for the correct first datatype and abstract one + # in the case of int, float, complex: + assert np.issubdtype(np.float64, 'float64') + assert np.issubdtype(np.float64, 'f8') + assert np.issubdtype(np.str_, str) + assert np.issubdtype(np.int64, 'int64') + assert np.issubdtype(np.void, 'void') + assert np.issubdtype(np.int8, np.integer) + assert np.issubdtype(np.float32, np.floating) + assert np.issubdtype(np.complex64, np.complexfloating) + assert np.issubdtype(np.float64, "float") + assert np.issubdtype(np.float32, "f") + + +class TestIsDType: + """ + Check correctness of `np.isdtype`. The test considers different argument + configurations: `np.isdtype(dtype, k1)` and `np.isdtype(dtype, (k1, k2))` + with concrete dtypes and dtype groups. + """ + dtype_group_dict = { + "signed integer": sctypes["int"], + "unsigned integer": sctypes["uint"], + "integral": sctypes["int"] + sctypes["uint"], + "real floating": sctypes["float"], + "complex floating": sctypes["complex"], + "numeric": ( + sctypes["int"] + sctypes["uint"] + sctypes["float"] + + sctypes["complex"] + ) + } + + @pytest.mark.parametrize( + "dtype,close_dtype", + [ + (np.int64, np.int32), (np.uint64, np.uint32), + (np.float64, np.float32), (np.complex128, np.complex64) + ] + ) + @pytest.mark.parametrize( + "dtype_group", + [ + None, "signed integer", "unsigned integer", "integral", + "real floating", "complex floating", "numeric" + ] + ) + def test_isdtype(self, dtype, close_dtype, dtype_group): + # First check if same dtypes return `true` and different ones + # give `false` (even if they're close in the dtype hierarchy!) + if dtype_group is None: + assert np.isdtype(dtype, dtype) + assert not np.isdtype(dtype, close_dtype) + assert np.isdtype(dtype, (dtype, close_dtype)) + + # Check that dtype and a dtype group that it belongs to + # return `true`, and `false` otherwise. + elif dtype in self.dtype_group_dict[dtype_group]: + assert np.isdtype(dtype, dtype_group) + assert np.isdtype(dtype, (close_dtype, dtype_group)) + else: + assert not np.isdtype(dtype, dtype_group) + + def test_isdtype_invalid_args(self): + with assert_raises_regex(TypeError, r".*must be a NumPy dtype.*"): + np.isdtype("int64", np.int64) + with assert_raises_regex(TypeError, r".*kind argument must.*"): + np.isdtype(np.int64, 1) + with assert_raises_regex(ValueError, r".*not a known kind name.*"): + np.isdtype(np.int64, "int64") + + def test_sctypes_complete(self): + # issue 26439: int32/intc were masking each other on 32-bit builds + assert np.int32 in sctypes['int'] + assert np.intc in sctypes['int'] + assert np.int64 in sctypes['int'] + assert np.uint32 in sctypes['uint'] + assert np.uintc in sctypes['uint'] + assert np.uint64 in sctypes['uint'] + +class TestSctypeDict: + def test_longdouble(self): + assert_(np._core.sctypeDict['float64'] is not np.longdouble) + assert_(np._core.sctypeDict['complex128'] is not np.clongdouble) + + def test_ulong(self): + assert np._core.sctypeDict['ulong'] is np.ulong + assert np.dtype(np.ulong) is np.dtype("ulong") + assert np.dtype(np.ulong).itemsize == np.dtype(np.long).itemsize + + +@pytest.mark.filterwarnings("ignore:.*maximum_sctype.*:DeprecationWarning") +class TestMaximumSctype: + + # note that parametrizing with sctype['int'] and similar would skip types + # with the same size (gh-11923) + + @pytest.mark.parametrize( + 't', [np.byte, np.short, np.intc, np.long, np.longlong] + ) + def test_int(self, t): + assert_equal(maximum_sctype(t), np._core.sctypes['int'][-1]) + + @pytest.mark.parametrize( + 't', [np.ubyte, np.ushort, np.uintc, np.ulong, np.ulonglong] + ) + def test_uint(self, t): + assert_equal(maximum_sctype(t), np._core.sctypes['uint'][-1]) + + @pytest.mark.parametrize('t', [np.half, np.single, np.double, np.longdouble]) + def test_float(self, t): + assert_equal(maximum_sctype(t), np._core.sctypes['float'][-1]) + + @pytest.mark.parametrize('t', [np.csingle, np.cdouble, np.clongdouble]) + def test_complex(self, t): + assert_equal(maximum_sctype(t), np._core.sctypes['complex'][-1]) + + @pytest.mark.parametrize('t', [np.bool, np.object_, np.str_, np.bytes_, + np.void]) + def test_other(self, t): + assert_equal(maximum_sctype(t), t) + + +class Test_sctype2char: + # This function is old enough that we're really just documenting the quirks + # at this point. + + def test_scalar_type(self): + assert_equal(sctype2char(np.double), 'd') + assert_equal(sctype2char(np.long), 'l') + assert_equal(sctype2char(np.int_), np.array(0).dtype.char) + assert_equal(sctype2char(np.str_), 'U') + assert_equal(sctype2char(np.bytes_), 'S') + + def test_other_type(self): + assert_equal(sctype2char(float), 'd') + assert_equal(sctype2char(list), 'O') + assert_equal(sctype2char(np.ndarray), 'O') + + def test_third_party_scalar_type(self): + from numpy._core._rational_tests import rational + assert_raises(KeyError, sctype2char, rational) + assert_raises(KeyError, sctype2char, rational(1)) + + def test_array_instance(self): + assert_equal(sctype2char(np.array([1.0, 2.0])), 'd') + + def test_abstract_type(self): + assert_raises(KeyError, sctype2char, np.floating) + + def test_non_type(self): + assert_raises(ValueError, sctype2char, 1) + +@pytest.mark.parametrize("rep, expected", [ + (np.int32, True), + (list, False), + (1.1, False), + (str, True), + (np.dtype(np.float64), True), + (np.dtype((np.int16, (3, 4))), True), + (np.dtype([('a', np.int8)]), True), + ]) +def test_issctype(rep, expected): + # ensure proper identification of scalar + # data-types by issctype() + actual = issctype(rep) + assert type(actual) is bool + assert_equal(actual, expected) + + +@pytest.mark.skipif(sys.flags.optimize > 1, + reason="no docstrings present to inspect when PYTHONOPTIMIZE/Py_OptimizeFlag > 1") +@pytest.mark.xfail(IS_PYPY, + reason="PyPy cannot modify tp_doc after PyType_Ready") +class TestDocStrings: + def test_platform_dependent_aliases(self): + if np.int64 is np.int_: + assert_('int64' in np.int_.__doc__) + elif np.int64 is np.longlong: + assert_('int64' in np.longlong.__doc__) + + +class TestScalarTypeNames: + # gh-9799 + + numeric_types = [ + np.byte, np.short, np.intc, np.long, np.longlong, + np.ubyte, np.ushort, np.uintc, np.ulong, np.ulonglong, + np.half, np.single, np.double, np.longdouble, + np.csingle, np.cdouble, np.clongdouble, + ] + + def test_names_are_unique(self): + # none of the above may be aliases for each other + assert len(set(self.numeric_types)) == len(self.numeric_types) + + # names must be unique + names = [t.__name__ for t in self.numeric_types] + assert len(set(names)) == len(names) + + @pytest.mark.parametrize('t', numeric_types) + def test_names_reflect_attributes(self, t): + """ Test that names correspond to where the type is under ``np.`` """ + assert getattr(np, t.__name__) is t + + @pytest.mark.parametrize('t', numeric_types) + def test_names_are_undersood_by_dtype(self, t): + """ Test the dtype constructor maps names back to the type """ + assert np.dtype(t.__name__).type is t + + +class TestBoolDefinition: + def test_bool_definition(self): + assert nt.bool is np.bool diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_overrides.py b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_overrides.py new file mode 100644 index 0000000..b0d7337 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_overrides.py @@ -0,0 +1,791 @@ +import inspect +import os +import pickle +import sys +import tempfile +from io import StringIO +from unittest import mock + +import pytest + +import numpy as np +from numpy._core.overrides import ( + _get_implementing_args, + array_function_dispatch, + verify_matching_signatures, +) +from numpy.testing import assert_, assert_equal, assert_raises, assert_raises_regex +from numpy.testing.overrides import get_overridable_numpy_array_functions + + +def _return_not_implemented(self, *args, **kwargs): + return NotImplemented + + +# need to define this at the top level to test pickling +@array_function_dispatch(lambda array: (array,)) +def dispatched_one_arg(array): + """Docstring.""" + return 'original' + + +@array_function_dispatch(lambda array1, array2: (array1, array2)) +def dispatched_two_arg(array1, array2): + """Docstring.""" + return 'original' + + +class TestGetImplementingArgs: + + def test_ndarray(self): + array = np.array(1) + + args = _get_implementing_args([array]) + assert_equal(list(args), [array]) + + args = _get_implementing_args([array, array]) + assert_equal(list(args), [array]) + + args = _get_implementing_args([array, 1]) + assert_equal(list(args), [array]) + + args = _get_implementing_args([1, array]) + assert_equal(list(args), [array]) + + def test_ndarray_subclasses(self): + + class OverrideSub(np.ndarray): + __array_function__ = _return_not_implemented + + class NoOverrideSub(np.ndarray): + pass + + array = np.array(1).view(np.ndarray) + override_sub = np.array(1).view(OverrideSub) + no_override_sub = np.array(1).view(NoOverrideSub) + + args = _get_implementing_args([array, override_sub]) + assert_equal(list(args), [override_sub, array]) + + args = _get_implementing_args([array, no_override_sub]) + assert_equal(list(args), [no_override_sub, array]) + + args = _get_implementing_args( + [override_sub, no_override_sub]) + assert_equal(list(args), [override_sub, no_override_sub]) + + def test_ndarray_and_duck_array(self): + + class Other: + __array_function__ = _return_not_implemented + + array = np.array(1) + other = Other() + + args = _get_implementing_args([other, array]) + assert_equal(list(args), [other, array]) + + args = _get_implementing_args([array, other]) + assert_equal(list(args), [array, other]) + + def test_ndarray_subclass_and_duck_array(self): + + class OverrideSub(np.ndarray): + __array_function__ = _return_not_implemented + + class Other: + __array_function__ = _return_not_implemented + + array = np.array(1) + subarray = np.array(1).view(OverrideSub) + other = Other() + + assert_equal(_get_implementing_args([array, subarray, other]), + [subarray, array, other]) + assert_equal(_get_implementing_args([array, other, subarray]), + [subarray, array, other]) + + def test_many_duck_arrays(self): + + class A: + __array_function__ = _return_not_implemented + + class B(A): + __array_function__ = _return_not_implemented + + class C(A): + __array_function__ = _return_not_implemented + + class D: + __array_function__ = _return_not_implemented + + a = A() + b = B() + c = C() + d = D() + + assert_equal(_get_implementing_args([1]), []) + assert_equal(_get_implementing_args([a]), [a]) + assert_equal(_get_implementing_args([a, 1]), [a]) + assert_equal(_get_implementing_args([a, a, a]), [a]) + assert_equal(_get_implementing_args([a, d, a]), [a, d]) + assert_equal(_get_implementing_args([a, b]), [b, a]) + assert_equal(_get_implementing_args([b, a]), [b, a]) + assert_equal(_get_implementing_args([a, b, c]), [b, c, a]) + assert_equal(_get_implementing_args([a, c, b]), [c, b, a]) + + def test_too_many_duck_arrays(self): + namespace = {'__array_function__': _return_not_implemented} + types = [type('A' + str(i), (object,), namespace) for i in range(65)] + relevant_args = [t() for t in types] + + actual = _get_implementing_args(relevant_args[:64]) + assert_equal(actual, relevant_args[:64]) + + with assert_raises_regex(TypeError, 'distinct argument types'): + _get_implementing_args(relevant_args) + + +class TestNDArrayArrayFunction: + + def test_method(self): + + class Other: + __array_function__ = _return_not_implemented + + class NoOverrideSub(np.ndarray): + pass + + class OverrideSub(np.ndarray): + __array_function__ = _return_not_implemented + + array = np.array([1]) + other = Other() + no_override_sub = array.view(NoOverrideSub) + override_sub = array.view(OverrideSub) + + result = array.__array_function__(func=dispatched_two_arg, + types=(np.ndarray,), + args=(array, 1.), kwargs={}) + assert_equal(result, 'original') + + result = array.__array_function__(func=dispatched_two_arg, + types=(np.ndarray, Other), + args=(array, other), kwargs={}) + assert_(result is NotImplemented) + + result = array.__array_function__(func=dispatched_two_arg, + types=(np.ndarray, NoOverrideSub), + args=(array, no_override_sub), + kwargs={}) + assert_equal(result, 'original') + + result = array.__array_function__(func=dispatched_two_arg, + types=(np.ndarray, OverrideSub), + args=(array, override_sub), + kwargs={}) + assert_equal(result, 'original') + + with assert_raises_regex(TypeError, 'no implementation found'): + np.concatenate((array, other)) + + expected = np.concatenate((array, array)) + result = np.concatenate((array, no_override_sub)) + assert_equal(result, expected.view(NoOverrideSub)) + result = np.concatenate((array, override_sub)) + assert_equal(result, expected.view(OverrideSub)) + + def test_no_wrapper(self): + # Regular numpy functions have wrappers, but do not presume + # all functions do (array creation ones do not): check that + # we just call the function in that case. + array = np.array(1) + func = lambda x: x * 2 + result = array.__array_function__(func=func, types=(np.ndarray,), + args=(array,), kwargs={}) + assert_equal(result, array * 2) + + def test_wrong_arguments(self): + # Check our implementation guards against wrong arguments. + a = np.array([1, 2]) + with pytest.raises(TypeError, match="args must be a tuple"): + a.__array_function__(np.reshape, (np.ndarray,), a, (2, 1)) + with pytest.raises(TypeError, match="kwargs must be a dict"): + a.__array_function__(np.reshape, (np.ndarray,), (a,), (2, 1)) + + +class TestArrayFunctionDispatch: + + def test_pickle(self): + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + roundtripped = pickle.loads( + pickle.dumps(dispatched_one_arg, protocol=proto)) + assert_(roundtripped is dispatched_one_arg) + + def test_name_and_docstring(self): + assert_equal(dispatched_one_arg.__name__, 'dispatched_one_arg') + if sys.flags.optimize < 2: + assert_equal(dispatched_one_arg.__doc__, 'Docstring.') + + def test_interface(self): + + class MyArray: + def __array_function__(self, func, types, args, kwargs): + return (self, func, types, args, kwargs) + + original = MyArray() + (obj, func, types, args, kwargs) = dispatched_one_arg(original) + assert_(obj is original) + assert_(func is dispatched_one_arg) + assert_equal(set(types), {MyArray}) + # assert_equal uses the overloaded np.iscomplexobj() internally + assert_(args == (original,)) + assert_equal(kwargs, {}) + + def test_not_implemented(self): + + class MyArray: + def __array_function__(self, func, types, args, kwargs): + return NotImplemented + + array = MyArray() + with assert_raises_regex(TypeError, 'no implementation found'): + dispatched_one_arg(array) + + def test_where_dispatch(self): + + class DuckArray: + def __array_function__(self, ufunc, method, *inputs, **kwargs): + return "overridden" + + array = np.array(1) + duck_array = DuckArray() + + result = np.std(array, where=duck_array) + + assert_equal(result, "overridden") + + +class TestVerifyMatchingSignatures: + + def test_verify_matching_signatures(self): + + verify_matching_signatures(lambda x: 0, lambda x: 0) + verify_matching_signatures(lambda x=None: 0, lambda x=None: 0) + verify_matching_signatures(lambda x=1: 0, lambda x=None: 0) + + with assert_raises(RuntimeError): + verify_matching_signatures(lambda a: 0, lambda b: 0) + with assert_raises(RuntimeError): + verify_matching_signatures(lambda x: 0, lambda x=None: 0) + with assert_raises(RuntimeError): + verify_matching_signatures(lambda x=None: 0, lambda y=None: 0) + with assert_raises(RuntimeError): + verify_matching_signatures(lambda x=1: 0, lambda y=1: 0) + + def test_array_function_dispatch(self): + + with assert_raises(RuntimeError): + @array_function_dispatch(lambda x: (x,)) + def f(y): + pass + + # should not raise + @array_function_dispatch(lambda x: (x,), verify=False) + def f(y): + pass + + +def _new_duck_type_and_implements(): + """Create a duck array type and implements functions.""" + HANDLED_FUNCTIONS = {} + + class MyArray: + def __array_function__(self, func, types, args, kwargs): + if func not in HANDLED_FUNCTIONS: + return NotImplemented + if not all(issubclass(t, MyArray) for t in types): + return NotImplemented + return HANDLED_FUNCTIONS[func](*args, **kwargs) + + def implements(numpy_function): + """Register an __array_function__ implementations.""" + def decorator(func): + HANDLED_FUNCTIONS[numpy_function] = func + return func + return decorator + + return (MyArray, implements) + + +class TestArrayFunctionImplementation: + + def test_one_arg(self): + MyArray, implements = _new_duck_type_and_implements() + + @implements(dispatched_one_arg) + def _(array): + return 'myarray' + + assert_equal(dispatched_one_arg(1), 'original') + assert_equal(dispatched_one_arg(MyArray()), 'myarray') + + def test_optional_args(self): + MyArray, implements = _new_duck_type_and_implements() + + @array_function_dispatch(lambda array, option=None: (array,)) + def func_with_option(array, option='default'): + return option + + @implements(func_with_option) + def my_array_func_with_option(array, new_option='myarray'): + return new_option + + # we don't need to implement every option on __array_function__ + # implementations + assert_equal(func_with_option(1), 'default') + assert_equal(func_with_option(1, option='extra'), 'extra') + assert_equal(func_with_option(MyArray()), 'myarray') + with assert_raises(TypeError): + func_with_option(MyArray(), option='extra') + + # but new options on implementations can't be used + result = my_array_func_with_option(MyArray(), new_option='yes') + assert_equal(result, 'yes') + with assert_raises(TypeError): + func_with_option(MyArray(), new_option='no') + + def test_not_implemented(self): + MyArray, implements = _new_duck_type_and_implements() + + @array_function_dispatch(lambda array: (array,), module='my') + def func(array): + return array + + array = np.array(1) + assert_(func(array) is array) + assert_equal(func.__module__, 'my') + + with assert_raises_regex( + TypeError, "no implementation found for 'my.func'"): + func(MyArray()) + + @pytest.mark.parametrize("name", ["concatenate", "mean", "asarray"]) + def test_signature_error_message_simple(self, name): + func = getattr(np, name) + try: + # all of these functions need an argument: + func() + except TypeError as e: + exc = e + + assert exc.args[0].startswith(f"{name}()") + + def test_signature_error_message(self): + # The lambda function will be named "", but the TypeError + # should show the name as "func" + def _dispatcher(): + return () + + @array_function_dispatch(_dispatcher) + def func(): + pass + + try: + func._implementation(bad_arg=3) + except TypeError as e: + expected_exception = e + + try: + func(bad_arg=3) + raise AssertionError("must fail") + except TypeError as exc: + if exc.args[0].startswith("_dispatcher"): + # We replace the qualname currently, but it used `__name__` + # (relevant functions have the same name and qualname anyway) + pytest.skip("Python version is not using __qualname__ for " + "TypeError formatting.") + + assert exc.args == expected_exception.args + + @pytest.mark.parametrize("value", [234, "this func is not replaced"]) + def test_dispatcher_error(self, value): + # If the dispatcher raises an error, we must not attempt to mutate it + error = TypeError(value) + + def dispatcher(): + raise error + + @array_function_dispatch(dispatcher) + def func(): + return 3 + + try: + func() + raise AssertionError("must fail") + except TypeError as exc: + assert exc is error # unmodified exception + + def test_properties(self): + # Check that str and repr are sensible + func = dispatched_two_arg + assert str(func) == str(func._implementation) + repr_no_id = repr(func).split("at ")[0] + repr_no_id_impl = repr(func._implementation).split("at ")[0] + assert repr_no_id == repr_no_id_impl + + @pytest.mark.parametrize("func", [ + lambda x, y: 0, # no like argument + lambda like=None: 0, # not keyword only + lambda *, like=None, a=3: 0, # not last (not that it matters) + ]) + def test_bad_like_sig(self, func): + # We sanity check the signature, and these should fail. + with pytest.raises(RuntimeError): + array_function_dispatch()(func) + + def test_bad_like_passing(self): + # Cover internal sanity check for passing like as first positional arg + def func(*, like=None): + pass + + func_with_like = array_function_dispatch()(func) + with pytest.raises(TypeError): + func_with_like() + with pytest.raises(TypeError): + func_with_like(like=234) + + def test_too_many_args(self): + # Mainly a unit-test to increase coverage + objs = [] + for i in range(80): + class MyArr: + def __array_function__(self, *args, **kwargs): + return NotImplemented + + objs.append(MyArr()) + + def _dispatch(*args): + return args + + @array_function_dispatch(_dispatch) + def func(*args): + pass + + with pytest.raises(TypeError, match="maximum number"): + func(*objs) + + +class TestNDArrayMethods: + + def test_repr(self): + # gh-12162: should still be defined even if __array_function__ doesn't + # implement np.array_repr() + + class MyArray(np.ndarray): + def __array_function__(*args, **kwargs): + return NotImplemented + + array = np.array(1).view(MyArray) + assert_equal(repr(array), 'MyArray(1)') + assert_equal(str(array), '1') + + +class TestNumPyFunctions: + + def test_set_module(self): + assert_equal(np.sum.__module__, 'numpy') + assert_equal(np.char.equal.__module__, 'numpy.char') + assert_equal(np.fft.fft.__module__, 'numpy.fft') + assert_equal(np.linalg.solve.__module__, 'numpy.linalg') + + def test_inspect_sum(self): + signature = inspect.signature(np.sum) + assert_('axis' in signature.parameters) + + def test_override_sum(self): + MyArray, implements = _new_duck_type_and_implements() + + @implements(np.sum) + def _(array): + return 'yes' + + assert_equal(np.sum(MyArray()), 'yes') + + def test_sum_on_mock_array(self): + + # We need a proxy for mocks because __array_function__ is only looked + # up in the class dict + class ArrayProxy: + def __init__(self, value): + self.value = value + + def __array_function__(self, *args, **kwargs): + return self.value.__array_function__(*args, **kwargs) + + def __array__(self, *args, **kwargs): + return self.value.__array__(*args, **kwargs) + + proxy = ArrayProxy(mock.Mock(spec=ArrayProxy)) + proxy.value.__array_function__.return_value = 1 + result = np.sum(proxy) + assert_equal(result, 1) + proxy.value.__array_function__.assert_called_once_with( + np.sum, (ArrayProxy,), (proxy,), {}) + proxy.value.__array__.assert_not_called() + + def test_sum_forwarding_implementation(self): + + class MyArray(np.ndarray): + + def sum(self, axis, out): + return 'summed' + + def __array_function__(self, func, types, args, kwargs): + return super().__array_function__(func, types, args, kwargs) + + # note: the internal implementation of np.sum() calls the .sum() method + array = np.array(1).view(MyArray) + assert_equal(np.sum(array), 'summed') + + +class TestArrayLike: + def setup_method(self): + class MyArray: + def __init__(self, function=None): + self.function = function + + def __array_function__(self, func, types, args, kwargs): + assert func is getattr(np, func.__name__) + try: + my_func = getattr(self, func.__name__) + except AttributeError: + return NotImplemented + return my_func(*args, **kwargs) + + self.MyArray = MyArray + + class MyNoArrayFunctionArray: + def __init__(self, function=None): + self.function = function + + self.MyNoArrayFunctionArray = MyNoArrayFunctionArray + + class MySubclass(np.ndarray): + def __array_function__(self, func, types, args, kwargs): + result = super().__array_function__(func, types, args, kwargs) + return result.view(self.__class__) + + self.MySubclass = MySubclass + + def add_method(self, name, arr_class, enable_value_error=False): + def _definition(*args, **kwargs): + # Check that `like=` isn't propagated downstream + assert 'like' not in kwargs + + if enable_value_error and 'value_error' in kwargs: + raise ValueError + + return arr_class(getattr(arr_class, name)) + setattr(arr_class, name, _definition) + + def func_args(*args, **kwargs): + return args, kwargs + + def test_array_like_not_implemented(self): + self.add_method('array', self.MyArray) + + ref = self.MyArray.array() + + with assert_raises_regex(TypeError, 'no implementation found'): + array_like = np.asarray(1, like=ref) + + _array_tests = [ + ('array', *func_args((1,))), + ('asarray', *func_args((1,))), + ('asanyarray', *func_args((1,))), + ('ascontiguousarray', *func_args((2, 3))), + ('asfortranarray', *func_args((2, 3))), + ('require', *func_args((np.arange(6).reshape(2, 3),), + requirements=['A', 'F'])), + ('empty', *func_args((1,))), + ('full', *func_args((1,), 2)), + ('ones', *func_args((1,))), + ('zeros', *func_args((1,))), + ('arange', *func_args(3)), + ('frombuffer', *func_args(b'\x00' * 8, dtype=int)), + ('fromiter', *func_args(range(3), dtype=int)), + ('fromstring', *func_args('1,2', dtype=int, sep=',')), + ('loadtxt', *func_args(lambda: StringIO('0 1\n2 3'))), + ('genfromtxt', *func_args(lambda: StringIO('1,2.1'), + dtype=[('int', 'i8'), ('float', 'f8')], + delimiter=',')), + ] + + def test_nep35_functions_as_array_functions(self,): + all_array_functions = get_overridable_numpy_array_functions() + like_array_functions_subset = { + getattr(np, func_name) for func_name, *_ in self.__class__._array_tests + } + assert like_array_functions_subset.issubset(all_array_functions) + + nep35_python_functions = { + np.eye, np.fromfunction, np.full, np.genfromtxt, + np.identity, np.loadtxt, np.ones, np.require, np.tri, + } + assert nep35_python_functions.issubset(all_array_functions) + + nep35_C_functions = { + np.arange, np.array, np.asanyarray, np.asarray, + np.ascontiguousarray, np.asfortranarray, np.empty, + np.frombuffer, np.fromfile, np.fromiter, np.fromstring, + np.zeros, + } + assert nep35_C_functions.issubset(all_array_functions) + + @pytest.mark.parametrize('function, args, kwargs', _array_tests) + @pytest.mark.parametrize('numpy_ref', [True, False]) + def test_array_like(self, function, args, kwargs, numpy_ref): + self.add_method('array', self.MyArray) + self.add_method(function, self.MyArray) + np_func = getattr(np, function) + my_func = getattr(self.MyArray, function) + + if numpy_ref is True: + ref = np.array(1) + else: + ref = self.MyArray.array() + + like_args = tuple(a() if callable(a) else a for a in args) + array_like = np_func(*like_args, **kwargs, like=ref) + + if numpy_ref is True: + assert type(array_like) is np.ndarray + + np_args = tuple(a() if callable(a) else a for a in args) + np_arr = np_func(*np_args, **kwargs) + + # Special-case np.empty to ensure values match + if function == "empty": + np_arr.fill(1) + array_like.fill(1) + + assert_equal(array_like, np_arr) + else: + assert type(array_like) is self.MyArray + assert array_like.function is my_func + + @pytest.mark.parametrize('function, args, kwargs', _array_tests) + @pytest.mark.parametrize('ref', [1, [1], "MyNoArrayFunctionArray"]) + def test_no_array_function_like(self, function, args, kwargs, ref): + self.add_method('array', self.MyNoArrayFunctionArray) + self.add_method(function, self.MyNoArrayFunctionArray) + np_func = getattr(np, function) + + # Instantiate ref if it's the MyNoArrayFunctionArray class + if ref == "MyNoArrayFunctionArray": + ref = self.MyNoArrayFunctionArray.array() + + like_args = tuple(a() if callable(a) else a for a in args) + + with assert_raises_regex(TypeError, + 'The `like` argument must be an array-like that implements'): + np_func(*like_args, **kwargs, like=ref) + + @pytest.mark.parametrize('function, args, kwargs', _array_tests) + def test_subclass(self, function, args, kwargs): + ref = np.array(1).view(self.MySubclass) + np_func = getattr(np, function) + like_args = tuple(a() if callable(a) else a for a in args) + array_like = np_func(*like_args, **kwargs, like=ref) + assert type(array_like) is self.MySubclass + if np_func is np.empty: + return + np_args = tuple(a() if callable(a) else a for a in args) + np_arr = np_func(*np_args, **kwargs) + assert_equal(array_like.view(np.ndarray), np_arr) + + @pytest.mark.parametrize('numpy_ref', [True, False]) + def test_array_like_fromfile(self, numpy_ref): + self.add_method('array', self.MyArray) + self.add_method("fromfile", self.MyArray) + + if numpy_ref is True: + ref = np.array(1) + else: + ref = self.MyArray.array() + + data = np.random.random(5) + + with tempfile.TemporaryDirectory() as tmpdir: + fname = os.path.join(tmpdir, "testfile") + data.tofile(fname) + + array_like = np.fromfile(fname, like=ref) + if numpy_ref is True: + assert type(array_like) is np.ndarray + np_res = np.fromfile(fname, like=ref) + assert_equal(np_res, data) + assert_equal(array_like, np_res) + else: + assert type(array_like) is self.MyArray + assert array_like.function is self.MyArray.fromfile + + def test_exception_handling(self): + self.add_method('array', self.MyArray, enable_value_error=True) + + ref = self.MyArray.array() + + with assert_raises(TypeError): + # Raises the error about `value_error` being invalid first + np.array(1, value_error=True, like=ref) + + @pytest.mark.parametrize('function, args, kwargs', _array_tests) + def test_like_as_none(self, function, args, kwargs): + self.add_method('array', self.MyArray) + self.add_method(function, self.MyArray) + np_func = getattr(np, function) + + like_args = tuple(a() if callable(a) else a for a in args) + # required for loadtxt and genfromtxt to init w/o error. + like_args_exp = tuple(a() if callable(a) else a for a in args) + + array_like = np_func(*like_args, **kwargs, like=None) + expected = np_func(*like_args_exp, **kwargs) + # Special-case np.empty to ensure values match + if function == "empty": + array_like.fill(1) + expected.fill(1) + assert_equal(array_like, expected) + + +def test_function_like(): + # We provide a `__get__` implementation, make sure it works + assert type(np.mean) is np._core._multiarray_umath._ArrayFunctionDispatcher + + class MyClass: + def __array__(self, dtype=None, copy=None): + # valid argument to mean: + return np.arange(3) + + func1 = staticmethod(np.mean) + func2 = np.mean + func3 = classmethod(np.mean) + + m = MyClass() + assert m.func1([10]) == 10 + assert m.func2() == 1 # mean of the arange + with pytest.raises(TypeError, match="unsupported operand type"): + # Tries to operate on the class + m.func3() + + # Manual binding also works (the above may shortcut): + bound = np.mean.__get__(m, MyClass) + assert bound() == 1 + + bound = np.mean.__get__(None, MyClass) # unbound actually + assert bound([10]) == 10 + + bound = np.mean.__get__(MyClass) # classmethod + with pytest.raises(TypeError, match="unsupported operand type"): + bound() diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_print.py b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_print.py new file mode 100644 index 0000000..d99b279 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_print.py @@ -0,0 +1,200 @@ +import sys +from io import StringIO + +import pytest + +import numpy as np +from numpy._core.tests._locales import CommaDecimalPointLocale +from numpy.testing import IS_MUSL, assert_, assert_equal + +_REF = {np.inf: 'inf', -np.inf: '-inf', np.nan: 'nan'} + + +@pytest.mark.parametrize('tp', [np.float32, np.double, np.longdouble]) +def test_float_types(tp): + """ Check formatting. + + This is only for the str function, and only for simple types. + The precision of np.float32 and np.longdouble aren't the same as the + python float precision. + + """ + for x in [0, 1, -1, 1e20]: + assert_equal(str(tp(x)), str(float(x)), + err_msg=f'Failed str formatting for type {tp}') + + if tp(1e16).itemsize > 4: + assert_equal(str(tp(1e16)), str(float('1e16')), + err_msg=f'Failed str formatting for type {tp}') + else: + ref = '1e+16' + assert_equal(str(tp(1e16)), ref, + err_msg=f'Failed str formatting for type {tp}') + + +@pytest.mark.parametrize('tp', [np.float32, np.double, np.longdouble]) +def test_nan_inf_float(tp): + """ Check formatting of nan & inf. + + This is only for the str function, and only for simple types. + The precision of np.float32 and np.longdouble aren't the same as the + python float precision. + + """ + for x in [np.inf, -np.inf, np.nan]: + assert_equal(str(tp(x)), _REF[x], + err_msg=f'Failed str formatting for type {tp}') + + +@pytest.mark.parametrize('tp', [np.complex64, np.cdouble, np.clongdouble]) +def test_complex_types(tp): + """Check formatting of complex types. + + This is only for the str function, and only for simple types. + The precision of np.float32 and np.longdouble aren't the same as the + python float precision. + + """ + for x in [0, 1, -1, 1e20]: + assert_equal(str(tp(x)), str(complex(x)), + err_msg=f'Failed str formatting for type {tp}') + assert_equal(str(tp(x * 1j)), str(complex(x * 1j)), + err_msg=f'Failed str formatting for type {tp}') + assert_equal(str(tp(x + x * 1j)), str(complex(x + x * 1j)), + err_msg=f'Failed str formatting for type {tp}') + + if tp(1e16).itemsize > 8: + assert_equal(str(tp(1e16)), str(complex(1e16)), + err_msg=f'Failed str formatting for type {tp}') + else: + ref = '(1e+16+0j)' + assert_equal(str(tp(1e16)), ref, + err_msg=f'Failed str formatting for type {tp}') + + +@pytest.mark.parametrize('dtype', [np.complex64, np.cdouble, np.clongdouble]) +def test_complex_inf_nan(dtype): + """Check inf/nan formatting of complex types.""" + TESTS = { + complex(np.inf, 0): "(inf+0j)", + complex(0, np.inf): "infj", + complex(-np.inf, 0): "(-inf+0j)", + complex(0, -np.inf): "-infj", + complex(np.inf, 1): "(inf+1j)", + complex(1, np.inf): "(1+infj)", + complex(-np.inf, 1): "(-inf+1j)", + complex(1, -np.inf): "(1-infj)", + complex(np.nan, 0): "(nan+0j)", + complex(0, np.nan): "nanj", + complex(-np.nan, 0): "(nan+0j)", + complex(0, -np.nan): "nanj", + complex(np.nan, 1): "(nan+1j)", + complex(1, np.nan): "(1+nanj)", + complex(-np.nan, 1): "(nan+1j)", + complex(1, -np.nan): "(1+nanj)", + } + for c, s in TESTS.items(): + assert_equal(str(dtype(c)), s) + + +# print tests +def _test_redirected_print(x, tp, ref=None): + file = StringIO() + file_tp = StringIO() + stdout = sys.stdout + try: + sys.stdout = file_tp + print(tp(x)) + sys.stdout = file + if ref: + print(ref) + else: + print(x) + finally: + sys.stdout = stdout + + assert_equal(file.getvalue(), file_tp.getvalue(), + err_msg=f'print failed for type{tp}') + + +@pytest.mark.parametrize('tp', [np.float32, np.double, np.longdouble]) +def test_float_type_print(tp): + """Check formatting when using print """ + for x in [0, 1, -1, 1e20]: + _test_redirected_print(float(x), tp) + + for x in [np.inf, -np.inf, np.nan]: + _test_redirected_print(float(x), tp, _REF[x]) + + if tp(1e16).itemsize > 4: + _test_redirected_print(1e16, tp) + else: + ref = '1e+16' + _test_redirected_print(1e16, tp, ref) + + +@pytest.mark.parametrize('tp', [np.complex64, np.cdouble, np.clongdouble]) +def test_complex_type_print(tp): + """Check formatting when using print """ + # We do not create complex with inf/nan directly because the feature is + # missing in python < 2.6 + for x in [0, 1, -1, 1e20]: + _test_redirected_print(complex(x), tp) + + if tp(1e16).itemsize > 8: + _test_redirected_print(complex(1e16), tp) + else: + ref = '(1e+16+0j)' + _test_redirected_print(complex(1e16), tp, ref) + + _test_redirected_print(complex(np.inf, 1), tp, '(inf+1j)') + _test_redirected_print(complex(-np.inf, 1), tp, '(-inf+1j)') + _test_redirected_print(complex(-np.nan, 1), tp, '(nan+1j)') + + +def test_scalar_format(): + """Test the str.format method with NumPy scalar types""" + tests = [('{0}', True, np.bool), + ('{0}', False, np.bool), + ('{0:d}', 130, np.uint8), + ('{0:d}', 50000, np.uint16), + ('{0:d}', 3000000000, np.uint32), + ('{0:d}', 15000000000000000000, np.uint64), + ('{0:d}', -120, np.int8), + ('{0:d}', -30000, np.int16), + ('{0:d}', -2000000000, np.int32), + ('{0:d}', -7000000000000000000, np.int64), + ('{0:g}', 1.5, np.float16), + ('{0:g}', 1.5, np.float32), + ('{0:g}', 1.5, np.float64), + ('{0:g}', 1.5, np.longdouble), + ('{0:g}', 1.5 + 0.5j, np.complex64), + ('{0:g}', 1.5 + 0.5j, np.complex128), + ('{0:g}', 1.5 + 0.5j, np.clongdouble)] + + for (fmat, val, valtype) in tests: + try: + assert_equal(fmat.format(val), fmat.format(valtype(val)), + f"failed with val {val}, type {valtype}") + except ValueError as e: + assert_(False, + "format raised exception (fmt='%s', val=%s, type=%s, exc='%s')" % + (fmat, repr(val), repr(valtype), str(e))) + + +# +# Locale tests: scalar types formatting should be independent of the locale +# + +class TestCommaDecimalPointLocale(CommaDecimalPointLocale): + + def test_locale_single(self): + assert_equal(str(np.float32(1.2)), str(1.2)) + + def test_locale_double(self): + assert_equal(str(np.double(1.2)), str(1.2)) + + @pytest.mark.skipif(IS_MUSL, + reason="test flaky on musllinux") + def test_locale_longdouble(self): + assert_equal(str(np.longdouble('1.2')), str(1.2)) diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_protocols.py b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_protocols.py new file mode 100644 index 0000000..96bb600 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_protocols.py @@ -0,0 +1,46 @@ +import warnings + +import pytest + +import numpy as np + + +@pytest.mark.filterwarnings("error") +def test_getattr_warning(): + # issue gh-14735: make sure we clear only getattr errors, and let warnings + # through + class Wrapper: + def __init__(self, array): + self.array = array + + def __len__(self): + return len(self.array) + + def __getitem__(self, item): + return type(self)(self.array[item]) + + def __getattr__(self, name): + if name.startswith("__array_"): + warnings.warn("object got converted", UserWarning, stacklevel=1) + + return getattr(self.array, name) + + def __repr__(self): + return f"" + + array = Wrapper(np.arange(10)) + with pytest.raises(UserWarning, match="object got converted"): + np.asarray(array) + + +def test_array_called(): + class Wrapper: + val = '0' * 100 + + def __array__(self, dtype=None, copy=None): + return np.array([self.val], dtype=dtype, copy=copy) + + wrapped = Wrapper() + arr = np.array(wrapped, dtype=str) + assert arr.dtype == 'U100' + assert arr[0] == Wrapper.val diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_records.py b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_records.py new file mode 100644 index 0000000..b4b93ae --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_records.py @@ -0,0 +1,544 @@ +import collections.abc +import pickle +import textwrap +from io import BytesIO +from os import path +from pathlib import Path + +import pytest + +import numpy as np +from numpy.testing import ( + assert_, + assert_array_almost_equal, + assert_array_equal, + assert_equal, + assert_raises, + temppath, +) + + +class TestFromrecords: + def test_fromrecords(self): + r = np.rec.fromrecords([[456, 'dbe', 1.2], [2, 'de', 1.3]], + names='col1,col2,col3') + assert_equal(r[0].item(), (456, 'dbe', 1.2)) + assert_equal(r['col1'].dtype.kind, 'i') + assert_equal(r['col2'].dtype.kind, 'U') + assert_equal(r['col2'].dtype.itemsize, 12) + assert_equal(r['col3'].dtype.kind, 'f') + + def test_fromrecords_0len(self): + """ Verify fromrecords works with a 0-length input """ + dtype = [('a', float), ('b', float)] + r = np.rec.fromrecords([], dtype=dtype) + assert_equal(r.shape, (0,)) + + def test_fromrecords_2d(self): + data = [ + [(1, 2), (3, 4), (5, 6)], + [(6, 5), (4, 3), (2, 1)] + ] + expected_a = [[1, 3, 5], [6, 4, 2]] + expected_b = [[2, 4, 6], [5, 3, 1]] + + # try with dtype + r1 = np.rec.fromrecords(data, dtype=[('a', int), ('b', int)]) + assert_equal(r1['a'], expected_a) + assert_equal(r1['b'], expected_b) + + # try with names + r2 = np.rec.fromrecords(data, names=['a', 'b']) + assert_equal(r2['a'], expected_a) + assert_equal(r2['b'], expected_b) + + assert_equal(r1, r2) + + def test_method_array(self): + r = np.rec.array( + b'abcdefg' * 100, formats='i2,S3,i4', shape=3, byteorder='big' + ) + assert_equal(r[1].item(), (25444, b'efg', 1633837924)) + + def test_method_array2(self): + r = np.rec.array( + [ + (1, 11, 'a'), (2, 22, 'b'), (3, 33, 'c'), (4, 44, 'd'), + (5, 55, 'ex'), (6, 66, 'f'), (7, 77, 'g') + ], + formats='u1,f4,S1' + ) + assert_equal(r[1].item(), (2, 22.0, b'b')) + + def test_recarray_slices(self): + r = np.rec.array( + [ + (1, 11, 'a'), (2, 22, 'b'), (3, 33, 'c'), (4, 44, 'd'), + (5, 55, 'ex'), (6, 66, 'f'), (7, 77, 'g') + ], + formats='u1,f4,S1' + ) + assert_equal(r[1::2][1].item(), (4, 44.0, b'd')) + + def test_recarray_fromarrays(self): + x1 = np.array([1, 2, 3, 4]) + x2 = np.array(['a', 'dd', 'xyz', '12']) + x3 = np.array([1.1, 2, 3, 4]) + r = np.rec.fromarrays([x1, x2, x3], names='a,b,c') + assert_equal(r[1].item(), (2, 'dd', 2.0)) + x1[1] = 34 + assert_equal(r.a, np.array([1, 2, 3, 4])) + + def test_recarray_fromfile(self): + data_dir = path.join(path.dirname(__file__), 'data') + filename = path.join(data_dir, 'recarray_from_file.fits') + fd = open(filename, 'rb') + fd.seek(2880 * 2) + r1 = np.rec.fromfile(fd, formats='f8,i4,S5', shape=3, byteorder='big') + fd.seek(2880 * 2) + r2 = np.rec.array(fd, formats='f8,i4,S5', shape=3, byteorder='big') + fd.seek(2880 * 2) + bytes_array = BytesIO() + bytes_array.write(fd.read()) + bytes_array.seek(0) + r3 = np.rec.fromfile( + bytes_array, formats='f8,i4,S5', shape=3, byteorder='big' + ) + fd.close() + assert_equal(r1, r2) + assert_equal(r2, r3) + + def test_recarray_from_obj(self): + count = 10 + a = np.zeros(count, dtype='O') + b = np.zeros(count, dtype='f8') + c = np.zeros(count, dtype='f8') + for i in range(len(a)): + a[i] = list(range(1, 10)) + + mine = np.rec.fromarrays([a, b, c], names='date,data1,data2') + for i in range(len(a)): + assert_(mine.date[i] == list(range(1, 10))) + assert_(mine.data1[i] == 0.0) + assert_(mine.data2[i] == 0.0) + + def test_recarray_repr(self): + a = np.array([(1, 0.1), (2, 0.2)], + dtype=[('foo', ' 2) & (a < 6)) + xb = np.where((b > 2) & (b < 6)) + ya = ((a > 2) & (a < 6)) + yb = ((b > 2) & (b < 6)) + assert_array_almost_equal(xa, ya.nonzero()) + assert_array_almost_equal(xb, yb.nonzero()) + assert_(np.all(a[ya] > 0.5)) + assert_(np.all(b[yb] > 0.5)) + + def test_endian_where(self): + # GitHub issue #369 + net = np.zeros(3, dtype='>f4') + net[1] = 0.00458849 + net[2] = 0.605202 + max_net = net.max() + test = np.where(net <= 0., max_net, net) + correct = np.array([0.60520202, 0.00458849, 0.60520202]) + assert_array_almost_equal(test, correct) + + def test_endian_recarray(self): + # Ticket #2185 + dt = np.dtype([ + ('head', '>u4'), + ('data', '>u4', 2), + ]) + buf = np.recarray(1, dtype=dt) + buf[0]['head'] = 1 + buf[0]['data'][:] = [1, 1] + + h = buf[0]['head'] + d = buf[0]['data'][0] + buf[0]['head'] = h + buf[0]['data'][0] = d + assert_(buf[0]['head'] == 1) + + def test_mem_dot(self): + # Ticket #106 + x = np.random.randn(0, 1) + y = np.random.randn(10, 1) + # Dummy array to detect bad memory access: + _z = np.ones(10) + _dummy = np.empty((0, 10)) + z = np.lib.stride_tricks.as_strided(_z, _dummy.shape, _dummy.strides) + np.dot(x, np.transpose(y), out=z) + assert_equal(_z, np.ones(10)) + # Do the same for the built-in dot: + np._core.multiarray.dot(x, np.transpose(y), out=z) + assert_equal(_z, np.ones(10)) + + def test_arange_endian(self): + # Ticket #111 + ref = np.arange(10) + x = np.arange(10, dtype=' 1 and x['two'] > 2) + + def test_method_args(self): + # Make sure methods and functions have same default axis + # keyword and arguments + funcs1 = ['argmax', 'argmin', 'sum', 'any', 'all', 'cumsum', + 'cumprod', 'prod', 'std', 'var', 'mean', + 'round', 'min', 'max', 'argsort', 'sort'] + funcs2 = ['compress', 'take', 'repeat'] + + for func in funcs1: + arr = np.random.rand(8, 7) + arr2 = arr.copy() + res1 = getattr(arr, func)() + res2 = getattr(np, func)(arr2) + if res1 is None: + res1 = arr + + if res1.dtype.kind in 'uib': + assert_((res1 == res2).all(), func) + else: + assert_(abs(res1 - res2).max() < 1e-8, func) + + for func in funcs2: + arr1 = np.random.rand(8, 7) + arr2 = np.random.rand(8, 7) + res1 = None + if func == 'compress': + arr1 = arr1.ravel() + res1 = getattr(arr2, func)(arr1) + else: + arr2 = (15 * arr2).astype(int).ravel() + if res1 is None: + res1 = getattr(arr1, func)(arr2) + res2 = getattr(np, func)(arr1, arr2) + assert_(abs(res1 - res2).max() < 1e-8, func) + + def test_mem_lexsort_strings(self): + # Ticket #298 + lst = ['abc', 'cde', 'fgh'] + np.lexsort((lst,)) + + def test_fancy_index(self): + # Ticket #302 + x = np.array([1, 2])[np.array([0])] + assert_equal(x.shape, (1,)) + + def test_recarray_copy(self): + # Ticket #312 + dt = [('x', np.int16), ('y', np.float64)] + ra = np.array([(1, 2.3)], dtype=dt) + rb = np.rec.array(ra, dtype=dt) + rb['x'] = 2. + assert_(ra['x'] != rb['x']) + + def test_rec_fromarray(self): + # Ticket #322 + x1 = np.array([[1, 2], [3, 4], [5, 6]]) + x2 = np.array(['a', 'dd', 'xyz']) + x3 = np.array([1.1, 2, 3]) + np.rec.fromarrays([x1, x2, x3], formats="(2,)i4,S3,f8") + + def test_object_array_assign(self): + x = np.empty((2, 2), object) + x.flat[2] = (1, 2, 3) + assert_equal(x.flat[2], (1, 2, 3)) + + def test_ndmin_float64(self): + # Ticket #324 + x = np.array([1, 2, 3], dtype=np.float64) + assert_equal(np.array(x, dtype=np.float32, ndmin=2).ndim, 2) + assert_equal(np.array(x, dtype=np.float64, ndmin=2).ndim, 2) + + def test_ndmin_order(self): + # Issue #465 and related checks + assert_(np.array([1, 2], order='C', ndmin=3).flags.c_contiguous) + assert_(np.array([1, 2], order='F', ndmin=3).flags.f_contiguous) + assert_(np.array(np.ones((2, 2), order='F'), ndmin=3).flags.f_contiguous) + assert_(np.array(np.ones((2, 2), order='C'), ndmin=3).flags.c_contiguous) + + def test_mem_axis_minimization(self): + # Ticket #327 + data = np.arange(5) + data = np.add.outer(data, data) + + def test_mem_float_imag(self): + # Ticket #330 + np.float64(1.0).imag + + def test_dtype_tuple(self): + # Ticket #334 + assert_(np.dtype('i4') == np.dtype(('i4', ()))) + + def test_dtype_posttuple(self): + # Ticket #335 + np.dtype([('col1', '()i4')]) + + def test_numeric_carray_compare(self): + # Ticket #341 + assert_equal(np.array(['X'], 'c'), b'X') + + def test_string_array_size(self): + # Ticket #342 + assert_raises(ValueError, + np.array, [['X'], ['X', 'X', 'X']], '|S1') + + def test_dtype_repr(self): + # Ticket #344 + dt1 = np.dtype(('uint32', 2)) + dt2 = np.dtype(('uint32', (2,))) + assert_equal(dt1.__repr__(), dt2.__repr__()) + + def test_reshape_order(self): + # Make sure reshape order works. + a = np.arange(6).reshape(2, 3, order='F') + assert_equal(a, [[0, 2, 4], [1, 3, 5]]) + a = np.array([[1, 2], [3, 4], [5, 6], [7, 8]]) + b = a[:, 1] + assert_equal(b.reshape(2, 2, order='F'), [[2, 6], [4, 8]]) + + def test_reshape_zero_strides(self): + # Issue #380, test reshaping of zero strided arrays + a = np.ones(1) + a = np.lib.stride_tricks.as_strided(a, shape=(5,), strides=(0,)) + assert_(a.reshape(5, 1).strides[0] == 0) + + def test_reshape_zero_size(self): + # GitHub Issue #2700, setting shape failed for 0-sized arrays + a = np.ones((0, 2)) + a.shape = (-1, 2) + + def test_reshape_trailing_ones_strides(self): + # GitHub issue gh-2949, bad strides for trailing ones of new shape + a = np.zeros(12, dtype=np.int32)[::2] # not contiguous + strides_c = (16, 8, 8, 8) + strides_f = (8, 24, 48, 48) + assert_equal(a.reshape(3, 2, 1, 1).strides, strides_c) + assert_equal(a.reshape(3, 2, 1, 1, order='F').strides, strides_f) + assert_equal(np.array(0, dtype=np.int32).reshape(1, 1).strides, (4, 4)) + + def test_repeat_discont(self): + # Ticket #352 + a = np.arange(12).reshape(4, 3)[:, 2] + assert_equal(a.repeat(3), [2, 2, 2, 5, 5, 5, 8, 8, 8, 11, 11, 11]) + + def test_array_index(self): + # Make sure optimization is not called in this case. + a = np.array([1, 2, 3]) + a2 = np.array([[1, 2, 3]]) + assert_equal(a[np.where(a == 3)], a2[np.where(a2 == 3)]) + + def test_object_argmax(self): + a = np.array([1, 2, 3], dtype=object) + assert_(a.argmax() == 2) + + def test_recarray_fields(self): + # Ticket #372 + dt0 = np.dtype([('f0', 'i4'), ('f1', 'i4')]) + dt1 = np.dtype([('f0', 'i8'), ('f1', 'i8')]) + for a in [np.array([(1, 2), (3, 4)], "i4,i4"), + np.rec.array([(1, 2), (3, 4)], "i4,i4"), + np.rec.array([(1, 2), (3, 4)]), + np.rec.fromarrays([(1, 2), (3, 4)], "i4,i4"), + np.rec.fromarrays([(1, 2), (3, 4)])]: + assert_(a.dtype in [dt0, dt1]) + + def test_random_shuffle(self): + # Ticket #374 + a = np.arange(5).reshape((5, 1)) + b = a.copy() + np.random.shuffle(b) + assert_equal(np.sort(b, axis=0), a) + + def test_refcount_vdot(self): + # Changeset #3443 + _assert_valid_refcount(np.vdot) + + def test_startswith(self): + ca = np.char.array(['Hi', 'There']) + assert_equal(ca.startswith('H'), [True, False]) + + def test_noncommutative_reduce_accumulate(self): + # Ticket #413 + tosubtract = np.arange(5) + todivide = np.array([2.0, 0.5, 0.25]) + assert_equal(np.subtract.reduce(tosubtract), -10) + assert_equal(np.divide.reduce(todivide), 16.0) + assert_array_equal(np.subtract.accumulate(tosubtract), + np.array([0, -1, -3, -6, -10])) + assert_array_equal(np.divide.accumulate(todivide), + np.array([2., 4., 16.])) + + def test_convolve_empty(self): + # Convolve should raise an error for empty input array. + assert_raises(ValueError, np.convolve, [], [1]) + assert_raises(ValueError, np.convolve, [1], []) + + def test_multidim_byteswap(self): + # Ticket #449 + r = np.array([(1, (0, 1, 2))], dtype="i2,3i2") + assert_array_equal(r.byteswap(), + np.array([(256, (0, 256, 512))], r.dtype)) + + def test_string_NULL(self): + # Changeset 3557 + assert_equal(np.array("a\x00\x0b\x0c\x00").item(), + 'a\x00\x0b\x0c') + + def test_junk_in_string_fields_of_recarray(self): + # Ticket #483 + r = np.array([[b'abc']], dtype=[('var1', '|S20')]) + assert_(asbytes(r['var1'][0][0]) == b'abc') + + def test_take_output(self): + # Ensure that 'take' honours output parameter. + x = np.arange(12).reshape((3, 4)) + a = np.take(x, [0, 2], axis=1) + b = np.zeros_like(a) + np.take(x, [0, 2], axis=1, out=b) + assert_array_equal(a, b) + + def test_take_object_fail(self): + # Issue gh-3001 + d = 123. + a = np.array([d, 1], dtype=object) + if HAS_REFCOUNT: + ref_d = sys.getrefcount(d) + try: + a.take([0, 100]) + except IndexError: + pass + if HAS_REFCOUNT: + assert_(ref_d == sys.getrefcount(d)) + + def test_array_str_64bit(self): + # Ticket #501 + s = np.array([1, np.nan], dtype=np.float64) + with np.errstate(all='raise'): + np.array_str(s) # Should succeed + + def test_frompyfunc_endian(self): + # Ticket #503 + from math import radians + uradians = np.frompyfunc(radians, 1, 1) + big_endian = np.array([83.4, 83.5], dtype='>f8') + little_endian = np.array([83.4, 83.5], dtype=' object + # casting succeeds + def rs(): + x = np.ones([484, 286]) + y = np.zeros([484, 286]) + x |= y + + assert_raises(TypeError, rs) + + def test_unicode_scalar(self): + # Ticket #600 + x = np.array(["DROND", "DROND1"], dtype="U6") + el = x[1] + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + new = pickle.loads(pickle.dumps(el, protocol=proto)) + assert_equal(new, el) + + def test_arange_non_native_dtype(self): + # Ticket #616 + for T in ('>f4', ' 0)] = v + + assert_raises(IndexError, ia, x, s, np.zeros(9, dtype=float)) + assert_raises(IndexError, ia, x, s, np.zeros(11, dtype=float)) + + # Old special case (different code path): + assert_raises(ValueError, ia, x.flat, s, np.zeros(9, dtype=float)) + assert_raises(ValueError, ia, x.flat, s, np.zeros(11, dtype=float)) + + def test_mem_scalar_indexing(self): + # Ticket #603 + x = np.array([0], dtype=float) + index = np.array(0, dtype=np.int32) + x[index] + + def test_binary_repr_0_width(self): + assert_equal(np.binary_repr(0, width=3), '000') + + def test_fromstring(self): + assert_equal(np.fromstring("12:09:09", dtype=int, sep=":"), + [12, 9, 9]) + + def test_searchsorted_variable_length(self): + x = np.array(['a', 'aa', 'b']) + y = np.array(['d', 'e']) + assert_equal(x.searchsorted(y), [3, 3]) + + def test_string_argsort_with_zeros(self): + # Check argsort for strings containing zeros. + x = np.frombuffer(b"\x00\x02\x00\x01", dtype="|S2") + assert_array_equal(x.argsort(kind='m'), np.array([1, 0])) + assert_array_equal(x.argsort(kind='q'), np.array([1, 0])) + + def test_string_sort_with_zeros(self): + # Check sort for strings containing zeros. + x = np.frombuffer(b"\x00\x02\x00\x01", dtype="|S2") + y = np.frombuffer(b"\x00\x01\x00\x02", dtype="|S2") + assert_array_equal(np.sort(x, kind="q"), y) + + def test_copy_detection_zero_dim(self): + # Ticket #658 + np.indices((0, 3, 4)).T.reshape(-1, 3) + + def test_flat_byteorder(self): + # Ticket #657 + x = np.arange(10) + assert_array_equal(x.astype('>i4'), x.astype('i4').flat[:], x.astype('i4')): + x = np.array([-1, 0, 1], dtype=dt) + assert_equal(x.flat[0].dtype, x[0].dtype) + + def test_copy_detection_corner_case(self): + # Ticket #658 + np.indices((0, 3, 4)).T.reshape(-1, 3) + + def test_object_array_refcounting(self): + # Ticket #633 + if not hasattr(sys, 'getrefcount'): + return + + # NB. this is probably CPython-specific + + cnt = sys.getrefcount + + a = object() + b = object() + c = object() + + cnt0_a = cnt(a) + cnt0_b = cnt(b) + cnt0_c = cnt(c) + + # -- 0d -> 1-d broadcast slice assignment + + arr = np.zeros(5, dtype=np.object_) + + arr[:] = a + assert_equal(cnt(a), cnt0_a + 5) + + arr[:] = b + assert_equal(cnt(a), cnt0_a) + assert_equal(cnt(b), cnt0_b + 5) + + arr[:2] = c + assert_equal(cnt(b), cnt0_b + 3) + assert_equal(cnt(c), cnt0_c + 2) + + del arr + + # -- 1-d -> 2-d broadcast slice assignment + + arr = np.zeros((5, 2), dtype=np.object_) + arr0 = np.zeros(2, dtype=np.object_) + + arr0[0] = a + assert_(cnt(a) == cnt0_a + 1) + arr0[1] = b + assert_(cnt(b) == cnt0_b + 1) + + arr[:, :] = arr0 + assert_(cnt(a) == cnt0_a + 6) + assert_(cnt(b) == cnt0_b + 6) + + arr[:, 0] = None + assert_(cnt(a) == cnt0_a + 1) + + del arr, arr0 + + # -- 2-d copying + flattening + + arr = np.zeros((5, 2), dtype=np.object_) + + arr[:, 0] = a + arr[:, 1] = b + assert_(cnt(a) == cnt0_a + 5) + assert_(cnt(b) == cnt0_b + 5) + + arr2 = arr.copy() + assert_(cnt(a) == cnt0_a + 10) + assert_(cnt(b) == cnt0_b + 10) + + arr2 = arr[:, 0].copy() + assert_(cnt(a) == cnt0_a + 10) + assert_(cnt(b) == cnt0_b + 5) + + arr2 = arr.flatten() + assert_(cnt(a) == cnt0_a + 10) + assert_(cnt(b) == cnt0_b + 10) + + del arr, arr2 + + # -- concatenate, repeat, take, choose + + arr1 = np.zeros((5, 1), dtype=np.object_) + arr2 = np.zeros((5, 1), dtype=np.object_) + + arr1[...] = a + arr2[...] = b + assert_(cnt(a) == cnt0_a + 5) + assert_(cnt(b) == cnt0_b + 5) + + tmp = np.concatenate((arr1, arr2)) + assert_(cnt(a) == cnt0_a + 5 + 5) + assert_(cnt(b) == cnt0_b + 5 + 5) + + tmp = arr1.repeat(3, axis=0) + assert_(cnt(a) == cnt0_a + 5 + 3 * 5) + + tmp = arr1.take([1, 2, 3], axis=0) + assert_(cnt(a) == cnt0_a + 5 + 3) + + x = np.array([[0], [1], [0], [1], [1]], int) + tmp = x.choose(arr1, arr2) + assert_(cnt(a) == cnt0_a + 5 + 2) + assert_(cnt(b) == cnt0_b + 5 + 3) + + def test_mem_custom_float_to_array(self): + # Ticket 702 + class MyFloat: + def __float__(self): + return 1.0 + + tmp = np.atleast_1d([MyFloat()]) + tmp.astype(float) # Should succeed + + def test_object_array_refcount_self_assign(self): + # Ticket #711 + class VictimObject: + deleted = False + + def __del__(self): + self.deleted = True + + d = VictimObject() + arr = np.zeros(5, dtype=np.object_) + arr[:] = d + del d + arr[:] = arr # refcount of 'd' might hit zero here + assert_(not arr[0].deleted) + arr[:] = arr # trying to induce a segfault by doing it again... + assert_(not arr[0].deleted) + + def test_mem_fromiter_invalid_dtype_string(self): + x = [1, 2, 3] + assert_raises(ValueError, + np.fromiter, list(x), dtype='S') + + def test_reduce_big_object_array(self): + # Ticket #713 + oldsize = np.setbufsize(10 * 16) + a = np.array([None] * 161, object) + assert_(not np.any(a)) + np.setbufsize(oldsize) + + def test_mem_0d_array_index(self): + # Ticket #714 + np.zeros(10)[np.array(0)] + + def test_nonnative_endian_fill(self): + # Non-native endian arrays were incorrectly filled with scalars + # before r5034. + if sys.byteorder == 'little': + dtype = np.dtype('>i4') + else: + dtype = np.dtype('data contains non-zero floats + x = np.array([123456789e199], dtype=np.float64) + if IS_PYPY: + x.resize((m, 0), refcheck=False) + else: + x.resize((m, 0)) + y = np.array([123456789e199], dtype=np.float64) + if IS_PYPY: + y.resize((0, n), refcheck=False) + else: + y.resize((0, n)) + + # `dot` should just return zero (m, n) matrix + z = np.dot(x, y) + assert_(np.all(z == 0)) + assert_(z.shape == (m, n)) + + def test_zeros(self): + # Regression test for #1061. + # Set a size which cannot fit into a 64 bits signed integer + sz = 2 ** 64 + with assert_raises_regex(ValueError, + 'Maximum allowed dimension exceeded'): + np.empty(sz) + + def test_huge_arange(self): + # Regression test for #1062. + # Set a size which cannot fit into a 64 bits signed integer + sz = 2 ** 64 + with assert_raises_regex(ValueError, + 'Maximum allowed size exceeded'): + np.arange(sz) + assert_(np.size == sz) + + def test_fromiter_bytes(self): + # Ticket #1058 + a = np.fromiter(list(range(10)), dtype='b') + b = np.fromiter(list(range(10)), dtype='B') + assert_(np.all(a == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]))) + assert_(np.all(b == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]))) + + def test_array_from_sequence_scalar_array(self): + # Ticket #1078: segfaults when creating an array with a sequence of + # 0d arrays. + a = np.array((np.ones(2), np.array(2)), dtype=object) + assert_equal(a.shape, (2,)) + assert_equal(a.dtype, np.dtype(object)) + assert_equal(a[0], np.ones(2)) + assert_equal(a[1], np.array(2)) + + a = np.array(((1,), np.array(1)), dtype=object) + assert_equal(a.shape, (2,)) + assert_equal(a.dtype, np.dtype(object)) + assert_equal(a[0], (1,)) + assert_equal(a[1], np.array(1)) + + def test_array_from_sequence_scalar_array2(self): + # Ticket #1081: weird array with strange input... + t = np.array([np.array([]), np.array(0, object)], dtype=object) + assert_equal(t.shape, (2,)) + assert_equal(t.dtype, np.dtype(object)) + + def test_array_too_big(self): + # Ticket #1080. + assert_raises(ValueError, np.zeros, [975] * 7, np.int8) + assert_raises(ValueError, np.zeros, [26244] * 5, np.int8) + + def test_dtype_keyerrors_(self): + # Ticket #1106. + dt = np.dtype([('f1', np.uint)]) + assert_raises(KeyError, dt.__getitem__, "f2") + assert_raises(IndexError, dt.__getitem__, 1) + assert_raises(TypeError, dt.__getitem__, 0.0) + + def test_lexsort_buffer_length(self): + # Ticket #1217, don't segfault. + a = np.ones(100, dtype=np.int8) + b = np.ones(100, dtype=np.int32) + i = np.lexsort((a[::-1], b)) + assert_equal(i, np.arange(100, dtype=int)) + + def test_object_array_to_fixed_string(self): + # Ticket #1235. + a = np.array(['abcdefgh', 'ijklmnop'], dtype=np.object_) + b = np.array(a, dtype=(np.str_, 8)) + assert_equal(a, b) + c = np.array(a, dtype=(np.str_, 5)) + assert_equal(c, np.array(['abcde', 'ijklm'])) + d = np.array(a, dtype=(np.str_, 12)) + assert_equal(a, d) + e = np.empty((2, ), dtype=(np.str_, 8)) + e[:] = a[:] + assert_equal(a, e) + + def test_unicode_to_string_cast(self): + # Ticket #1240. + a = np.array([['abc', '\u03a3'], + ['asdf', 'erw']], + dtype='U') + assert_raises(UnicodeEncodeError, np.array, a, 'S4') + + def test_unicode_to_string_cast_error(self): + # gh-15790 + a = np.array(['\x80'] * 129, dtype='U3') + assert_raises(UnicodeEncodeError, np.array, a, 'S') + b = a.reshape(3, 43)[:-1, :-1] + assert_raises(UnicodeEncodeError, np.array, b, 'S') + + def test_mixed_string_byte_array_creation(self): + a = np.array(['1234', b'123']) + assert_(a.itemsize == 16) + a = np.array([b'123', '1234']) + assert_(a.itemsize == 16) + a = np.array(['1234', b'123', '12345']) + assert_(a.itemsize == 20) + a = np.array([b'123', '1234', b'12345']) + assert_(a.itemsize == 20) + a = np.array([b'123', '1234', b'1234']) + assert_(a.itemsize == 16) + + def test_misaligned_objects_segfault(self): + # Ticket #1198 and #1267 + a1 = np.zeros((10,), dtype='O,c') + a2 = np.array(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j'], 'S10') + a1['f0'] = a2 + repr(a1) + np.argmax(a1['f0']) + a1['f0'][1] = "FOO" + a1['f0'] = "FOO" + np.array(a1['f0'], dtype='S') + np.nonzero(a1['f0']) + a1.sort() + copy.deepcopy(a1) + + def test_misaligned_scalars_segfault(self): + # Ticket #1267 + s1 = np.array(('a', 'Foo'), dtype='c,O') + s2 = np.array(('b', 'Bar'), dtype='c,O') + s1['f1'] = s2['f1'] + s1['f1'] = 'Baz' + + def test_misaligned_dot_product_objects(self): + # Ticket #1267 + # This didn't require a fix, but it's worth testing anyway, because + # it may fail if .dot stops enforcing the arrays to be BEHAVED + a = np.array([[(1, 'a'), (0, 'a')], [(0, 'a'), (1, 'a')]], dtype='O,c') + b = np.array([[(4, 'a'), (1, 'a')], [(2, 'a'), (2, 'a')]], dtype='O,c') + np.dot(a['f0'], b['f0']) + + def test_byteswap_complex_scalar(self): + # Ticket #1259 and gh-441 + for dtype in [np.dtype('<' + t) for t in np.typecodes['Complex']]: + z = np.array([2.2 - 1.1j], dtype) + x = z[0] # always native-endian + y = x.byteswap() + if x.dtype.byteorder == z.dtype.byteorder: + # little-endian machine + assert_equal(x, np.frombuffer(y.tobytes(), dtype=dtype.newbyteorder())) + else: + # big-endian machine + assert_equal(x, np.frombuffer(y.tobytes(), dtype=dtype)) + # double check real and imaginary parts: + assert_equal(x.real, y.real.byteswap()) + assert_equal(x.imag, y.imag.byteswap()) + + def test_structured_arrays_with_objects1(self): + # Ticket #1299 + stra = 'aaaa' + strb = 'bbbb' + x = np.array([[(0, stra), (1, strb)]], 'i8,O') + x[x.nonzero()] = x.ravel()[:1] + assert_(x[0, 1] == x[0, 0]) + + @pytest.mark.skipif( + sys.version_info >= (3, 12), + reason="Python 3.12 has immortal refcounts, this test no longer works." + ) + @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") + def test_structured_arrays_with_objects2(self): + # Ticket #1299 second test + stra = 'aaaa' + strb = 'bbbb' + numb = sys.getrefcount(strb) + numa = sys.getrefcount(stra) + x = np.array([[(0, stra), (1, strb)]], 'i8,O') + x[x.nonzero()] = x.ravel()[:1] + assert_(sys.getrefcount(strb) == numb) + assert_(sys.getrefcount(stra) == numa + 2) + + def test_duplicate_title_and_name(self): + # Ticket #1254 + dtspec = [(('a', 'a'), 'i'), ('b', 'i')] + assert_raises(ValueError, np.dtype, dtspec) + + def test_signed_integer_division_overflow(self): + # Ticket #1317. + def test_type(t): + min = np.array([np.iinfo(t).min]) + min //= -1 + + with np.errstate(over="ignore"): + for t in (np.int8, np.int16, np.int32, np.int64, int): + test_type(t) + + def test_buffer_hashlib(self): + from hashlib import sha256 + + x = np.array([1, 2, 3], dtype=np.dtype('c') + + def test_log1p_compiler_shenanigans(self): + # Check if log1p is behaving on 32 bit intel systems. + assert_(np.isfinite(np.log1p(np.exp2(-53)))) + + def test_fromiter_comparison(self): + a = np.fromiter(list(range(10)), dtype='b') + b = np.fromiter(list(range(10)), dtype='B') + assert_(np.all(a == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]))) + assert_(np.all(b == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]))) + + def test_fromstring_crash(self): + with assert_raises(ValueError): + np.fromstring(b'aa, aa, 1.0', sep=',') + + def test_ticket_1539(self): + dtypes = [x for x in np._core.sctypeDict.values() + if (issubclass(x, np.number) + and not issubclass(x, np.timedelta64))] + a = np.array([], np.bool) # not x[0] because it is unordered + failures = [] + + for x in dtypes: + b = a.astype(x) + for y in dtypes: + c = a.astype(y) + try: + d = np.dot(b, c) + except TypeError: + failures.append((x, y)) + else: + if d != 0: + failures.append((x, y)) + if failures: + raise AssertionError(f"Failures: {failures!r}") + + def test_ticket_1538(self): + x = np.finfo(np.float32) + for name in ('eps', 'epsneg', 'max', 'min', 'resolution', 'tiny'): + assert_equal(type(getattr(x, name)), np.float32, + err_msg=name) + + def test_ticket_1434(self): + # Check that the out= argument in var and std has an effect + data = np.array(((1, 2, 3), (4, 5, 6), (7, 8, 9))) + out = np.zeros((3,)) + + ret = data.var(axis=1, out=out) + assert_(ret is out) + assert_array_equal(ret, data.var(axis=1)) + + ret = data.std(axis=1, out=out) + assert_(ret is out) + assert_array_equal(ret, data.std(axis=1)) + + def test_complex_nan_maximum(self): + cnan = complex(0, np.nan) + assert_equal(np.maximum(1, cnan), cnan) + + def test_subclass_int_tuple_assignment(self): + # ticket #1563 + class Subclass(np.ndarray): + def __new__(cls, i): + return np.ones((i,)).view(cls) + + x = Subclass(5) + x[(0,)] = 2 # shouldn't raise an exception + assert_equal(x[0], 2) + + def test_ufunc_no_unnecessary_views(self): + # ticket #1548 + class Subclass(np.ndarray): + pass + x = np.array([1, 2, 3]).view(Subclass) + y = np.add(x, x, x) + assert_equal(id(x), id(y)) + + @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") + def test_take_refcount(self): + # ticket #939 + a = np.arange(16, dtype=float) + a.shape = (4, 4) + lut = np.ones((5 + 3, 4), float) + rgba = np.empty(shape=a.shape + (4,), dtype=lut.dtype) + c1 = sys.getrefcount(rgba) + try: + lut.take(a, axis=0, mode='clip', out=rgba) + except TypeError: + pass + c2 = sys.getrefcount(rgba) + assert_equal(c1, c2) + + def test_fromfile_tofile_seeks(self): + # tofile/fromfile used to get (#1610) the Python file handle out of sync + with tempfile.NamedTemporaryFile() as f: + f.write(np.arange(255, dtype='u1').tobytes()) + + f.seek(20) + ret = np.fromfile(f, count=4, dtype='u1') + assert_equal(ret, np.array([20, 21, 22, 23], dtype='u1')) + assert_equal(f.tell(), 24) + + f.seek(40) + np.array([1, 2, 3], dtype='u1').tofile(f) + assert_equal(f.tell(), 43) + + f.seek(40) + data = f.read(3) + assert_equal(data, b"\x01\x02\x03") + + f.seek(80) + f.read(4) + data = np.fromfile(f, dtype='u1', count=4) + assert_equal(data, np.array([84, 85, 86, 87], dtype='u1')) + + def test_complex_scalar_warning(self): + for tp in [np.csingle, np.cdouble, np.clongdouble]: + x = tp(1 + 2j) + assert_warns(ComplexWarning, float, x) + with suppress_warnings() as sup: + sup.filter(ComplexWarning) + assert_equal(float(x), float(x.real)) + + def test_complex_scalar_complex_cast(self): + for tp in [np.csingle, np.cdouble, np.clongdouble]: + x = tp(1 + 2j) + assert_equal(complex(x), 1 + 2j) + + def test_complex_boolean_cast(self): + # Ticket #2218 + for tp in [np.csingle, np.cdouble, np.clongdouble]: + x = np.array([0, 0 + 0.5j, 0.5 + 0j], dtype=tp) + assert_equal(x.astype(bool), np.array([0, 1, 1], dtype=bool)) + assert_(np.any(x)) + assert_(np.all(x[1:])) + + def test_uint_int_conversion(self): + x = 2**64 - 1 + assert_equal(int(np.uint64(x)), x) + + def test_duplicate_field_names_assign(self): + ra = np.fromiter(((i * 3, i * 2) for i in range(10)), dtype='i8,f8') + ra.dtype.names = ('f1', 'f2') + repr(ra) # should not cause a segmentation fault + assert_raises(ValueError, setattr, ra.dtype, 'names', ('f1', 'f1')) + + def test_eq_string_and_object_array(self): + # From e-mail thread "__eq__ with str and object" (Keith Goodman) + a1 = np.array(['a', 'b'], dtype=object) + a2 = np.array(['a', 'c']) + assert_array_equal(a1 == a2, [True, False]) + assert_array_equal(a2 == a1, [True, False]) + + def test_nonzero_byteswap(self): + a = np.array([0x80000000, 0x00000080, 0], dtype=np.uint32) + a.dtype = np.float32 + assert_equal(a.nonzero()[0], [1]) + a = a.byteswap() + a = a.view(a.dtype.newbyteorder()) + assert_equal(a.nonzero()[0], [1]) # [0] if nonzero() ignores swap + + def test_empty_mul(self): + a = np.array([1.]) + a[1:1] *= 2 + assert_equal(a, [1.]) + + def test_array_side_effect(self): + # The second use of itemsize was throwing an exception because in + # ctors.c, discover_itemsize was calling PyObject_Length without + # checking the return code. This failed to get the length of the + # number 2, and the exception hung around until something checked + # PyErr_Occurred() and returned an error. + assert_equal(np.dtype('S10').itemsize, 10) + np.array([['abc', 2], ['long ', '0123456789']], dtype=np.bytes_) + assert_equal(np.dtype('S10').itemsize, 10) + + def test_any_float(self): + # all and any for floats + a = np.array([0.1, 0.9]) + assert_(np.any(a)) + assert_(np.all(a)) + + def test_large_float_sum(self): + a = np.arange(10000, dtype='f') + assert_equal(a.sum(dtype='d'), a.astype('d').sum()) + + def test_ufunc_casting_out(self): + a = np.array(1.0, dtype=np.float32) + b = np.array(1.0, dtype=np.float64) + c = np.array(1.0, dtype=np.float32) + np.add(a, b, out=c) + assert_equal(c, 2.0) + + def test_array_scalar_contiguous(self): + # Array scalars are both C and Fortran contiguous + assert_(np.array(1.0).flags.c_contiguous) + assert_(np.array(1.0).flags.f_contiguous) + assert_(np.array(np.float32(1.0)).flags.c_contiguous) + assert_(np.array(np.float32(1.0)).flags.f_contiguous) + + def test_squeeze_contiguous(self): + # Similar to GitHub issue #387 + a = np.zeros((1, 2)).squeeze() + b = np.zeros((2, 2, 2), order='F')[:, :, ::2].squeeze() + assert_(a.flags.c_contiguous) + assert_(a.flags.f_contiguous) + assert_(b.flags.f_contiguous) + + def test_squeeze_axis_handling(self): + # Issue #10779 + # Ensure proper handling of objects + # that don't support axis specification + # when squeezing + + class OldSqueeze(np.ndarray): + + def __new__(cls, + input_array): + obj = np.asarray(input_array).view(cls) + return obj + + # it is perfectly reasonable that prior + # to numpy version 1.7.0 a subclass of ndarray + # might have been created that did not expect + # squeeze to have an axis argument + # NOTE: this example is somewhat artificial; + # it is designed to simulate an old API + # expectation to guard against regression + def squeeze(self): + return super().squeeze() + + oldsqueeze = OldSqueeze(np.array([[1], [2], [3]])) + + # if no axis argument is specified the old API + # expectation should give the correct result + assert_equal(np.squeeze(oldsqueeze), + np.array([1, 2, 3])) + + # likewise, axis=None should work perfectly well + # with the old API expectation + assert_equal(np.squeeze(oldsqueeze, axis=None), + np.array([1, 2, 3])) + + # however, specification of any particular axis + # should raise a TypeError in the context of the + # old API specification, even when using a valid + # axis specification like 1 for this array + with assert_raises(TypeError): + # this would silently succeed for array + # subclasses / objects that did not support + # squeeze axis argument handling before fixing + # Issue #10779 + np.squeeze(oldsqueeze, axis=1) + + # check for the same behavior when using an invalid + # axis specification -- in this case axis=0 does not + # have size 1, but the priority should be to raise + # a TypeError for the axis argument and NOT a + # ValueError for squeezing a non-empty dimension + with assert_raises(TypeError): + np.squeeze(oldsqueeze, axis=0) + + # the new API knows how to handle the axis + # argument and will return a ValueError if + # attempting to squeeze an axis that is not + # of length 1 + with assert_raises(ValueError): + np.squeeze(np.array([[1], [2], [3]]), axis=0) + + def test_reduce_contiguous(self): + # GitHub issue #387 + a = np.add.reduce(np.zeros((2, 1, 2)), (0, 1)) + b = np.add.reduce(np.zeros((2, 1, 2)), 1) + assert_(a.flags.c_contiguous) + assert_(a.flags.f_contiguous) + assert_(b.flags.c_contiguous) + + @pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking") + @pytest.mark.skipif(IS_WASM, reason="Pyodide/WASM has limited stack size") + def test_object_array_self_reference(self): + # Object arrays with references to themselves can cause problems + a = np.array(0, dtype=object) + a[()] = a + assert_raises(RecursionError, int, a) + assert_raises(RecursionError, float, a) + a[()] = None + + @pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking") + @pytest.mark.skipif(IS_WASM, reason="Pyodide/WASM has limited stack size") + def test_object_array_circular_reference(self): + # Test the same for a circular reference. + a = np.array(0, dtype=object) + b = np.array(0, dtype=object) + a[()] = b + b[()] = a + assert_raises(RecursionError, int, a) + # NumPy has no tp_traverse currently, so circular references + # cannot be detected. So resolve it: + a[()] = None + + # This was causing a to become like the above + a = np.array(0, dtype=object) + a[...] += 1 + assert_equal(a, 1) + + def test_object_array_nested(self): + # but is fine with a reference to a different array + a = np.array(0, dtype=object) + b = np.array(0, dtype=object) + a[()] = b + assert_equal(int(a), int(0)) # noqa: UP018 + assert_equal(float(a), float(0)) + + def test_object_array_self_copy(self): + # An object array being copied into itself DECREF'ed before INCREF'ing + # causing segmentation faults (gh-3787) + a = np.array(object(), dtype=object) + np.copyto(a, a) + if HAS_REFCOUNT: + assert_(sys.getrefcount(a[()]) == 2) + a[()].__class__ # will segfault if object was deleted + + def test_zerosize_accumulate(self): + "Ticket #1733" + x = np.array([[42, 0]], dtype=np.uint32) + assert_equal(np.add.accumulate(x[:-1, 0]), []) + + def test_objectarray_setfield(self): + # Setfield should not overwrite Object fields with non-Object data + x = np.array([1, 2, 3], dtype=object) + assert_raises(TypeError, x.setfield, 4, np.int32, 0) + + def test_setting_rank0_string(self): + "Ticket #1736" + s1 = b"hello1" + s2 = b"hello2" + a = np.zeros((), dtype="S10") + a[()] = s1 + assert_equal(a, np.array(s1)) + a[()] = np.array(s2) + assert_equal(a, np.array(s2)) + + a = np.zeros((), dtype='f4') + a[()] = 3 + assert_equal(a, np.array(3)) + a[()] = np.array(4) + assert_equal(a, np.array(4)) + + def test_string_astype(self): + "Ticket #1748" + s1 = b'black' + s2 = b'white' + s3 = b'other' + a = np.array([[s1], [s2], [s3]]) + assert_equal(a.dtype, np.dtype('S5')) + b = a.astype(np.dtype('S0')) + assert_equal(b.dtype, np.dtype('S5')) + + def test_ticket_1756(self): + # Ticket #1756 + s = b'0123456789abcdef' + a = np.array([s] * 5) + for i in range(1, 17): + a1 = np.array(a, "|S%d" % i) + a2 = np.array([s[:i]] * 5) + assert_equal(a1, a2) + + def test_fields_strides(self): + "gh-2355" + r = np.frombuffer(b'abcdefghijklmnop' * 4 * 3, dtype='i4,(2,3)u2') + assert_equal(r[0:3:2]['f1'], r['f1'][0:3:2]) + assert_equal(r[0:3:2]['f1'][0], r[0:3:2][0]['f1']) + assert_equal(r[0:3:2]['f1'][0][()], r[0:3:2][0]['f1'][()]) + assert_equal(r[0:3:2]['f1'][0].strides, r[0:3:2][0]['f1'].strides) + + def test_alignment_update(self): + # Check that alignment flag is updated on stride setting + a = np.arange(10) + assert_(a.flags.aligned) + a.strides = 3 + assert_(not a.flags.aligned) + + def test_ticket_1770(self): + "Should not segfault on python 3k" + import numpy as np + try: + a = np.zeros((1,), dtype=[('f1', 'f')]) + a['f1'] = 1 + a['f2'] = 1 + except ValueError: + pass + except Exception: + raise AssertionError + + def test_ticket_1608(self): + "x.flat shouldn't modify data" + x = np.array([[1, 2], [3, 4]]).T + np.array(x.flat) + assert_equal(x, [[1, 3], [2, 4]]) + + def test_pickle_string_overwrite(self): + import re + + data = np.array([1], dtype='b') + blob = pickle.dumps(data, protocol=1) + data = pickle.loads(blob) + + # Check that loads does not clobber interned strings + s = re.sub(r"a(.)", "\x01\\1", "a_") + assert_equal(s[0], "\x01") + data[0] = 0x6a + s = re.sub(r"a(.)", "\x01\\1", "a_") + assert_equal(s[0], "\x01") + + def test_pickle_bytes_overwrite(self): + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + data = np.array([1], dtype='b') + data = pickle.loads(pickle.dumps(data, protocol=proto)) + data[0] = 0x7d + bytestring = "\x01 ".encode('ascii') + assert_equal(bytestring[0:1], '\x01'.encode('ascii')) + + def test_pickle_py2_array_latin1_hack(self): + # Check that unpickling hacks in Py3 that support + # encoding='latin1' work correctly. + + # Python2 output for pickle.dumps(numpy.array([129], dtype='b')) + data = b"cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\np1\n(I0\ntp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\np7\n(S'i1'\np8\nI0\nI1\ntp9\nRp10\n(I3\nS'|'\np11\nNNNI-1\nI-1\nI0\ntp12\nbI00\nS'\\x81'\np13\ntp14\nb." + # This should work: + result = pickle.loads(data, encoding='latin1') + assert_array_equal(result, np.array([129]).astype('b')) + # Should not segfault: + assert_raises(Exception, pickle.loads, data, encoding='koi8-r') + + def test_pickle_py2_scalar_latin1_hack(self): + # Check that scalar unpickling hack in Py3 that supports + # encoding='latin1' work correctly. + + # Python2 output for pickle.dumps(...) + datas = [ + # (original, python2_pickle, koi8r_validity) + (np.str_('\u6bd2'), + b"cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n(S'U1'\np2\nI0\nI1\ntp3\nRp4\n(I3\nS'<'\np5\nNNNI4\nI4\nI0\ntp6\nbS'\\xd2k\\x00\\x00'\np7\ntp8\nRp9\n.", + 'invalid'), + + (np.float64(9e123), + b"cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n(S'f8'\np2\nI0\nI1\ntp3\nRp4\n(I3\nS'<'\np5\nNNNI-1\nI-1\nI0\ntp6\nbS'O\\x81\\xb7Z\\xaa:\\xabY'\np7\ntp8\nRp9\n.", + 'invalid'), + + # different 8-bit code point in KOI8-R vs latin1 + (np.bytes_(b'\x9c'), + b"cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n(S'S1'\np2\nI0\nI1\ntp3\nRp4\n(I3\nS'|'\np5\nNNNI1\nI1\nI0\ntp6\nbS'\\x9c'\np7\ntp8\nRp9\n.", + 'different'), + ] + for original, data, koi8r_validity in datas: + result = pickle.loads(data, encoding='latin1') + assert_equal(result, original) + + # Decoding under non-latin1 encoding (e.g.) KOI8-R can + # produce bad results, but should not segfault. + if koi8r_validity == 'different': + # Unicode code points happen to lie within latin1, + # but are different in koi8-r, resulting to silent + # bogus results + result = pickle.loads(data, encoding='koi8-r') + assert_(result != original) + elif koi8r_validity == 'invalid': + # Unicode code points outside latin1, so results + # to an encoding exception + assert_raises( + ValueError, pickle.loads, data, encoding='koi8-r' + ) + else: + raise ValueError(koi8r_validity) + + def test_structured_type_to_object(self): + a_rec = np.array([(0, 1), (3, 2)], dtype='i4,i8') + a_obj = np.empty((2,), dtype=object) + a_obj[0] = (0, 1) + a_obj[1] = (3, 2) + # astype records -> object + assert_equal(a_rec.astype(object), a_obj) + # '=' records -> object + b = np.empty_like(a_obj) + b[...] = a_rec + assert_equal(b, a_obj) + # '=' object -> records + b = np.empty_like(a_rec) + b[...] = a_obj + assert_equal(b, a_rec) + + def test_assign_obj_listoflists(self): + # Ticket # 1870 + # The inner list should get assigned to the object elements + a = np.zeros(4, dtype=object) + b = a.copy() + a[0] = [1] + a[1] = [2] + a[2] = [3] + a[3] = [4] + b[...] = [[1], [2], [3], [4]] + assert_equal(a, b) + # The first dimension should get broadcast + a = np.zeros((2, 2), dtype=object) + a[...] = [[1, 2]] + assert_equal(a, [[1, 2], [1, 2]]) + + @pytest.mark.slow_pypy + def test_memoryleak(self): + # Ticket #1917 - ensure that array data doesn't leak + for i in range(1000): + # 100MB times 1000 would give 100GB of memory usage if it leaks + a = np.empty((100000000,), dtype='i1') + del a + + @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") + def test_ufunc_reduce_memoryleak(self): + a = np.arange(6) + acnt = sys.getrefcount(a) + np.add.reduce(a) + assert_equal(sys.getrefcount(a), acnt) + + def test_search_sorted_invalid_arguments(self): + # Ticket #2021, should not segfault. + x = np.arange(0, 4, dtype='datetime64[D]') + assert_raises(TypeError, x.searchsorted, 1) + + def test_string_truncation(self): + # Ticket #1990 - Data can be truncated in creation of an array from a + # mixed sequence of numeric values and strings (gh-2583) + for val in [True, 1234, 123.4, complex(1, 234)]: + for tostr, dtype in [(asunicode, "U"), (asbytes, "S")]: + b = np.array([val, tostr('xx')], dtype=dtype) + assert_equal(tostr(b[0]), tostr(val)) + b = np.array([tostr('xx'), val], dtype=dtype) + assert_equal(tostr(b[1]), tostr(val)) + + # test also with longer strings + b = np.array([val, tostr('xxxxxxxxxx')], dtype=dtype) + assert_equal(tostr(b[0]), tostr(val)) + b = np.array([tostr('xxxxxxxxxx'), val], dtype=dtype) + assert_equal(tostr(b[1]), tostr(val)) + + def test_string_truncation_ucs2(self): + # Ticket #2081. Python compiled with two byte unicode + # can lead to truncation if itemsize is not properly + # adjusted for NumPy's four byte unicode. + a = np.array(['abcd']) + assert_equal(a.dtype.itemsize, 16) + + def test_unique_stable(self): + # Ticket #2063 must always choose stable sort for argsort to + # get consistent results + v = np.array(([0] * 5 + [1] * 6 + [2] * 6) * 4) + res = np.unique(v, return_index=True) + tgt = (np.array([0, 1, 2]), np.array([0, 5, 11])) + assert_equal(res, tgt) + + def test_unicode_alloc_dealloc_match(self): + # Ticket #1578, the mismatch only showed up when running + # python-debug for python versions >= 2.7, and then as + # a core dump and error message. + a = np.array(['abc'], dtype=np.str_)[0] + del a + + def test_refcount_error_in_clip(self): + # Ticket #1588 + a = np.zeros((2,), dtype='>i2').clip(min=0) + x = a + a + # This used to segfault: + y = str(x) + # Check the final string: + assert_(y == "[0 0]") + + def test_searchsorted_wrong_dtype(self): + # Ticket #2189, it used to segfault, so we check that it raises the + # proper exception. + a = np.array([('a', 1)], dtype='S1, int') + assert_raises(TypeError, np.searchsorted, a, 1.2) + # Ticket #2066, similar problem: + dtype = np.rec.format_parser(['i4', 'i4'], [], []) + a = np.recarray((2,), dtype) + a[...] = [(1, 2), (3, 4)] + assert_raises(TypeError, np.searchsorted, a, 1) + + def test_complex64_alignment(self): + # Issue gh-2668 (trac 2076), segfault on sparc due to misalignment + dtt = np.complex64 + arr = np.arange(10, dtype=dtt) + # 2D array + arr2 = np.reshape(arr, (2, 5)) + # Fortran write followed by (C or F) read caused bus error + data_str = arr2.tobytes('F') + data_back = np.ndarray(arr2.shape, + arr2.dtype, + buffer=data_str, + order='F') + assert_array_equal(arr2, data_back) + + def test_structured_count_nonzero(self): + arr = np.array([0, 1]).astype('i4, 2i4')[:1] + count = np.count_nonzero(arr) + assert_equal(count, 0) + + def test_copymodule_preserves_f_contiguity(self): + a = np.empty((2, 2), order='F') + b = copy.copy(a) + c = copy.deepcopy(a) + assert_(b.flags.fortran) + assert_(b.flags.f_contiguous) + assert_(c.flags.fortran) + assert_(c.flags.f_contiguous) + + def test_fortran_order_buffer(self): + import numpy as np + a = np.array([['Hello', 'Foob']], dtype='U5', order='F') + arr = np.ndarray(shape=[1, 2, 5], dtype='U1', buffer=a) + arr2 = np.array([[['H', 'e', 'l', 'l', 'o'], + ['F', 'o', 'o', 'b', '']]]) + assert_array_equal(arr, arr2) + + def test_assign_from_sequence_error(self): + # Ticket #4024. + arr = np.array([1, 2, 3]) + assert_raises(ValueError, arr.__setitem__, slice(None), [9, 9]) + arr.__setitem__(slice(None), [9]) + assert_equal(arr, [9, 9, 9]) + + def test_format_on_flex_array_element(self): + # Ticket #4369. + dt = np.dtype([('date', ' 0: + # unpickling ndarray goes through _frombuffer for protocol 5 + assert b'numpy._core.numeric' in s + else: + assert b'numpy._core.multiarray' in s + + def test_object_casting_errors(self): + # gh-11993 update to ValueError (see gh-16909), since strings can in + # principle be converted to complex, but this string cannot. + arr = np.array(['AAAAA', 18465886.0, 18465886.0], dtype=object) + assert_raises(ValueError, arr.astype, 'c8') + + def test_eff1d_casting(self): + # gh-12711 + x = np.array([1, 2, 4, 7, 0], dtype=np.int16) + res = np.ediff1d(x, to_begin=-99, to_end=np.array([88, 99])) + assert_equal(res, [-99, 1, 2, 3, -7, 88, 99]) + + # The use of safe casting means, that 1<<20 is cast unsafely, an + # error may be better, but currently there is no mechanism for it. + res = np.ediff1d(x, to_begin=(1 << 20), to_end=(1 << 20)) + assert_equal(res, [0, 1, 2, 3, -7, 0]) + + def test_pickle_datetime64_array(self): + # gh-12745 (would fail with pickle5 installed) + d = np.datetime64('2015-07-04 12:59:59.50', 'ns') + arr = np.array([d]) + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + dumped = pickle.dumps(arr, protocol=proto) + assert_equal(pickle.loads(dumped), arr) + + def test_bad_array_interface(self): + class T: + __array_interface__ = {} + + with assert_raises(ValueError): + np.array([T()]) + + def test_2d__array__shape(self): + class T: + def __array__(self, dtype=None, copy=None): + return np.ndarray(shape=(0, 0)) + + # Make sure __array__ is used instead of Sequence methods. + def __iter__(self): + return iter([]) + + def __getitem__(self, idx): + raise AssertionError("__getitem__ was called") + + def __len__(self): + return 0 + + t = T() + # gh-13659, would raise in broadcasting [x=t for x in result] + arr = np.array([t]) + assert arr.shape == (1, 0, 0) + + @pytest.mark.skipif(sys.maxsize < 2 ** 31 + 1, reason='overflows 32-bit python') + def test_to_ctypes(self): + # gh-14214 + arr = np.zeros((2 ** 31 + 1,), 'b') + assert arr.size * arr.itemsize > 2 ** 31 + c_arr = np.ctypeslib.as_ctypes(arr) + assert_equal(c_arr._length_, arr.size) + + def test_complex_conversion_error(self): + # gh-17068 + with pytest.raises(TypeError, match=r"Unable to convert dtype.*"): + complex(np.array("now", np.datetime64)) + + def test__array_interface__descr(self): + # gh-17068 + dt = np.dtype({'names': ['a', 'b'], + 'offsets': [0, 0], + 'formats': [np.int64, np.int64]}) + descr = np.array((1, 1), dtype=dt).__array_interface__['descr'] + assert descr == [('', '|V8')] # instead of [(b'', '|V8')] + + @pytest.mark.skipif(sys.maxsize < 2 ** 31 + 1, reason='overflows 32-bit python') + @requires_memory(free_bytes=9e9) + def test_dot_big_stride(self): + # gh-17111 + # blas stride = stride//itemsize > int32 max + int32_max = np.iinfo(np.int32).max + n = int32_max + 3 + a = np.empty([n], dtype=np.float32) + b = a[::n - 1] + b[...] = 1 + assert b.strides[0] > int32_max * b.dtype.itemsize + assert np.dot(b, b) == 2.0 + + def test_frompyfunc_name(self): + # name conversion was failing for python 3 strings + # resulting in the default '?' name. Also test utf-8 + # encoding using non-ascii name. + def cassé(x): + return x + + f = np.frompyfunc(cassé, 1, 1) + assert str(f) == "" + + @pytest.mark.parametrize("operation", [ + 'add', 'subtract', 'multiply', 'floor_divide', + 'conjugate', 'fmod', 'square', 'reciprocal', + 'power', 'absolute', 'negative', 'positive', + 'greater', 'greater_equal', 'less', + 'less_equal', 'equal', 'not_equal', 'logical_and', + 'logical_not', 'logical_or', 'bitwise_and', 'bitwise_or', + 'bitwise_xor', 'invert', 'left_shift', 'right_shift', + 'gcd', 'lcm' + ] + ) + @pytest.mark.parametrize("order", [ + ('b->', 'B->'), + ('h->', 'H->'), + ('i->', 'I->'), + ('l->', 'L->'), + ('q->', 'Q->'), + ] + ) + def test_ufunc_order(self, operation, order): + # gh-18075 + # Ensure signed types before unsigned + def get_idx(string, str_lst): + for i, s in enumerate(str_lst): + if string in s: + return i + raise ValueError(f"{string} not in list") + types = getattr(np, operation).types + assert get_idx(order[0], types) < get_idx(order[1], types), ( + f"Unexpected types order of ufunc in {operation}" + f"for {order}. Possible fix: Use signed before unsigned" + "in generate_umath.py") + + def test_nonbool_logical(self): + # gh-22845 + # create two arrays with bit patterns that do not overlap. + # needs to be large enough to test both SIMD and scalar paths + size = 100 + a = np.frombuffer(b'\x01' * size, dtype=np.bool) + b = np.frombuffer(b'\x80' * size, dtype=np.bool) + expected = np.ones(size, dtype=np.bool) + assert_array_equal(np.logical_and(a, b), expected) + + @pytest.mark.skipif(IS_PYPY, reason="PyPy issue 2742") + def test_gh_23737(self): + with pytest.raises(TypeError, match="not an acceptable base type"): + class Y(np.flexible): + pass + + with pytest.raises(TypeError, match="not an acceptable base type"): + class X(np.flexible, np.ma.core.MaskedArray): + pass + + def test_load_ufunc_pickle(self): + # ufuncs are pickled with a semi-private path in + # numpy.core._multiarray_umath and must be loadable without warning + # despite np.core being deprecated. + test_data = b'\x80\x04\x95(\x00\x00\x00\x00\x00\x00\x00\x8c\x1cnumpy.core._multiarray_umath\x94\x8c\x03add\x94\x93\x94.' + result = pickle.loads(test_data, encoding='bytes') + assert result is np.add + + def test__array_namespace__(self): + arr = np.arange(2) + + xp = arr.__array_namespace__() + assert xp is np + xp = arr.__array_namespace__(api_version="2021.12") + assert xp is np + xp = arr.__array_namespace__(api_version="2022.12") + assert xp is np + xp = arr.__array_namespace__(api_version="2023.12") + assert xp is np + xp = arr.__array_namespace__(api_version="2024.12") + assert xp is np + xp = arr.__array_namespace__(api_version=None) + assert xp is np + + with pytest.raises( + ValueError, + match="Version \"2025.12\" of the Array API Standard " + "is not supported." + ): + arr.__array_namespace__(api_version="2025.12") + + with pytest.raises( + ValueError, + match="Only None and strings are allowed as the Array API version" + ): + arr.__array_namespace__(api_version=2024) + + def test_isin_refcnt_bug(self): + # gh-25295 + for _ in range(1000): + np.isclose(np.int64(2), np.int64(2), atol=1e-15, rtol=1e-300) + + def test_replace_regression(self): + # gh-25513 segfault + carr = np.char.chararray((2,), itemsize=25) + test_strings = [b' 4.52173913043478315E+00', + b' 4.95652173913043548E+00'] + carr[:] = test_strings + out = carr.replace(b"E", b"D") + expected = np.char.chararray((2,), itemsize=25) + expected[:] = [s.replace(b"E", b"D") for s in test_strings] + assert_array_equal(out, expected) + + def test_logspace_base_does_not_determine_dtype(self): + # gh-24957 and cupy/cupy/issues/7946 + start = np.array([0, 2], dtype=np.float16) + stop = np.array([2, 0], dtype=np.float16) + out = np.logspace(start, stop, num=5, axis=1, dtype=np.float32) + expected = np.array([[1., 3.1621094, 10., 31.625, 100.], + [100., 31.625, 10., 3.1621094, 1.]], + dtype=np.float32) + assert_almost_equal(out, expected) + # Check test fails if the calculation is done in float64, as happened + # before when a python float base incorrectly influenced the dtype. + out2 = np.logspace(start, stop, num=5, axis=1, dtype=np.float32, + base=np.array([10.0])) + with pytest.raises(AssertionError, match="not almost equal"): + assert_almost_equal(out2, expected) + + def test_vectorize_fixed_width_string(self): + arr = np.array(["SOme wOrd DŽ ß ᾛ ΣΣ ffi⁵Å Ç Ⅰ"]).astype(np.str_) + f = str.casefold + res = np.vectorize(f, otypes=[arr.dtype])(arr) + assert res.dtype == "U30" + + def test_repeated_square_consistency(self): + # gh-26940 + buf = np.array([-5.171866611150749e-07 + 2.5618634555957426e-07j, + 0, 0, 0, 0, 0]) + # Test buffer with regular and reverse strides + for in_vec in [buf[:3], buf[:3][::-1]]: + expected_res = np.square(in_vec) + # Output vector immediately follows input vector + # to reproduce off-by-one in nomemoverlap check. + for res in [buf[3:], buf[3:][::-1]]: + res = buf[3:] + np.square(in_vec, out=res) + assert_equal(res, expected_res) + + def test_sort_unique_crash(self): + # gh-27037 + for _ in range(4): + vals = np.linspace(0, 1, num=128) + data = np.broadcast_to(vals, (128, 128, 128)) + data = data.transpose(0, 2, 1).copy() + np.unique(data) + + def test_sort_overlap(self): + # gh-27273 + size = 100 + inp = np.linspace(0, size, num=size, dtype=np.intc) + out = np.sort(inp) + assert_equal(inp, out) + + def test_searchsorted_structured(self): + # gh-28190 + x = np.array([(0, 1.)], dtype=[('time', ' None: + cls = np.dtype(code).type + value = cls(str_value) + assert not value.is_integer() + + @pytest.mark.parametrize( + "code", np.typecodes["Float"] + np.typecodes["AllInteger"] + ) + def test_true(self, code: str) -> None: + float_array = np.arange(-5, 5).astype(code) + for value in float_array: + assert value.is_integer() + + @pytest.mark.parametrize("code", np.typecodes["Float"]) + def test_false(self, code: str) -> None: + float_array = np.arange(-5, 5).astype(code) + float_array *= 1.1 + for value in float_array: + if value == 0: + continue + assert not value.is_integer() + + +class TestClassGetItem: + @pytest.mark.parametrize("cls", [ + np.number, + np.integer, + np.inexact, + np.unsignedinteger, + np.signedinteger, + np.floating, + ]) + def test_abc(self, cls: type[np.number]) -> None: + alias = cls[Any] + assert isinstance(alias, types.GenericAlias) + assert alias.__origin__ is cls + + def test_abc_complexfloating(self) -> None: + alias = np.complexfloating[Any, Any] + assert isinstance(alias, types.GenericAlias) + assert alias.__origin__ is np.complexfloating + + @pytest.mark.parametrize("arg_len", range(4)) + def test_abc_complexfloating_subscript_tuple(self, arg_len: int) -> None: + arg_tup = (Any,) * arg_len + if arg_len in (1, 2): + assert np.complexfloating[arg_tup] + else: + match = f"Too {'few' if arg_len == 0 else 'many'} arguments" + with pytest.raises(TypeError, match=match): + np.complexfloating[arg_tup] + + @pytest.mark.parametrize("cls", [np.generic, np.flexible, np.character]) + def test_abc_non_numeric(self, cls: type[np.generic]) -> None: + with pytest.raises(TypeError): + cls[Any] + + @pytest.mark.parametrize("code", np.typecodes["All"]) + def test_concrete(self, code: str) -> None: + cls = np.dtype(code).type + with pytest.raises(TypeError): + cls[Any] + + @pytest.mark.parametrize("arg_len", range(4)) + def test_subscript_tuple(self, arg_len: int) -> None: + arg_tup = (Any,) * arg_len + if arg_len == 1: + assert np.number[arg_tup] + else: + with pytest.raises(TypeError): + np.number[arg_tup] + + def test_subscript_scalar(self) -> None: + assert np.number[Any] + + +class TestBitCount: + # derived in part from the cpython test "test_bit_count" + + @pytest.mark.parametrize("itype", sctypes['int'] + sctypes['uint']) + def test_small(self, itype): + for a in range(max(np.iinfo(itype).min, 0), 128): + msg = f"Smoke test for {itype}({a}).bit_count()" + assert itype(a).bit_count() == a.bit_count(), msg + + def test_bit_count(self): + for exp in [10, 17, 63]: + a = 2**exp + assert np.uint64(a).bit_count() == 1 + assert np.uint64(a - 1).bit_count() == exp + assert np.uint64(a ^ 63).bit_count() == 7 + assert np.uint64((a - 1) ^ 510).bit_count() == exp - 8 + + +class TestDevice: + """ + Test scalar.device attribute and scalar.to_device() method. + """ + scalars = [np.bool(True), np.int64(1), np.uint64(1), np.float64(1.0), + np.complex128(1 + 1j)] + + @pytest.mark.parametrize("scalar", scalars) + def test_device(self, scalar): + assert scalar.device == "cpu" + + @pytest.mark.parametrize("scalar", scalars) + def test_to_device(self, scalar): + assert scalar.to_device("cpu") is scalar + + @pytest.mark.parametrize("scalar", scalars) + def test___array_namespace__(self, scalar): + assert scalar.__array_namespace__() is np + + +@pytest.mark.parametrize("scalar", [np.bool(True), np.int8(1), np.float64(1)]) +def test_array_wrap(scalar): + # Test scalars array wrap as long as it exists. NumPy itself should + # probably not use it, so it may not be necessary to keep it around. + + arr0d = np.array(3, dtype=np.int8) + # Third argument not passed, None, or True "decays" to scalar. + # (I don't think NumPy would pass `None`, but it seems clear to support) + assert type(scalar.__array_wrap__(arr0d)) is np.int8 + assert type(scalar.__array_wrap__(arr0d, None, None)) is np.int8 + assert type(scalar.__array_wrap__(arr0d, None, True)) is np.int8 + + # Otherwise, result should be the input + assert scalar.__array_wrap__(arr0d, None, False) is arr0d + + # An old bug. A non 0-d array cannot be converted to scalar: + arr1d = np.array([3], dtype=np.int8) + assert scalar.__array_wrap__(arr1d) is arr1d + assert scalar.__array_wrap__(arr1d, None, True) is arr1d diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_scalarbuffer.py b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_scalarbuffer.py new file mode 100644 index 0000000..4d6b5bd --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_scalarbuffer.py @@ -0,0 +1,153 @@ +""" +Test scalar buffer interface adheres to PEP 3118 +""" +import pytest +from numpy._core._multiarray_tests import get_buffer_info +from numpy._core._rational_tests import rational + +import numpy as np +from numpy.testing import assert_, assert_equal, assert_raises + +# PEP3118 format strings for native (standard alignment and byteorder) types +scalars_and_codes = [ + (np.bool, '?'), + (np.byte, 'b'), + (np.short, 'h'), + (np.intc, 'i'), + (np.long, 'l'), + (np.longlong, 'q'), + (np.ubyte, 'B'), + (np.ushort, 'H'), + (np.uintc, 'I'), + (np.ulong, 'L'), + (np.ulonglong, 'Q'), + (np.half, 'e'), + (np.single, 'f'), + (np.double, 'd'), + (np.longdouble, 'g'), + (np.csingle, 'Zf'), + (np.cdouble, 'Zd'), + (np.clongdouble, 'Zg'), +] +scalars_only, codes_only = zip(*scalars_and_codes) + + +class TestScalarPEP3118: + + @pytest.mark.parametrize('scalar', scalars_only, ids=codes_only) + def test_scalar_match_array(self, scalar): + x = scalar() + a = np.array([], dtype=np.dtype(scalar)) + mv_x = memoryview(x) + mv_a = memoryview(a) + assert_equal(mv_x.format, mv_a.format) + + @pytest.mark.parametrize('scalar', scalars_only, ids=codes_only) + def test_scalar_dim(self, scalar): + x = scalar() + mv_x = memoryview(x) + assert_equal(mv_x.itemsize, np.dtype(scalar).itemsize) + assert_equal(mv_x.ndim, 0) + assert_equal(mv_x.shape, ()) + assert_equal(mv_x.strides, ()) + assert_equal(mv_x.suboffsets, ()) + + @pytest.mark.parametrize('scalar, code', scalars_and_codes, ids=codes_only) + def test_scalar_code_and_properties(self, scalar, code): + x = scalar() + expected = {'strides': (), 'itemsize': x.dtype.itemsize, 'ndim': 0, + 'shape': (), 'format': code, 'readonly': True} + + mv_x = memoryview(x) + assert self._as_dict(mv_x) == expected + + @pytest.mark.parametrize('scalar', scalars_only, ids=codes_only) + def test_scalar_buffers_readonly(self, scalar): + x = scalar() + with pytest.raises(BufferError, match="scalar buffer is readonly"): + get_buffer_info(x, ["WRITABLE"]) + + def test_void_scalar_structured_data(self): + dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))]) + x = np.array(('ndarray_scalar', (1.2, 3.0)), dtype=dt)[()] + assert_(isinstance(x, np.void)) + mv_x = memoryview(x) + expected_size = 16 * np.dtype((np.str_, 1)).itemsize + expected_size += 2 * np.dtype(np.float64).itemsize + assert_equal(mv_x.itemsize, expected_size) + assert_equal(mv_x.ndim, 0) + assert_equal(mv_x.shape, ()) + assert_equal(mv_x.strides, ()) + assert_equal(mv_x.suboffsets, ()) + + # check scalar format string against ndarray format string + a = np.array([('Sarah', (8.0, 7.0)), ('John', (6.0, 7.0))], dtype=dt) + assert_(isinstance(a, np.ndarray)) + mv_a = memoryview(a) + assert_equal(mv_x.itemsize, mv_a.itemsize) + assert_equal(mv_x.format, mv_a.format) + + # Check that we do not allow writeable buffer export (technically + # we could allow it sometimes here...) + with pytest.raises(BufferError, match="scalar buffer is readonly"): + get_buffer_info(x, ["WRITABLE"]) + + def _as_dict(self, m): + return {'strides': m.strides, 'shape': m.shape, 'itemsize': m.itemsize, + 'ndim': m.ndim, 'format': m.format, 'readonly': m.readonly} + + def test_datetime_memoryview(self): + # gh-11656 + # Values verified with v1.13.3, shape is not () as in test_scalar_dim + + dt1 = np.datetime64('2016-01-01') + dt2 = np.datetime64('2017-01-01') + expected = {'strides': (1,), 'itemsize': 1, 'ndim': 1, 'shape': (8,), + 'format': 'B', 'readonly': True} + v = memoryview(dt1) + assert self._as_dict(v) == expected + + v = memoryview(dt2 - dt1) + assert self._as_dict(v) == expected + + dt = np.dtype([('a', 'uint16'), ('b', 'M8[s]')]) + a = np.empty(1, dt) + # Fails to create a PEP 3118 valid buffer + assert_raises((ValueError, BufferError), memoryview, a[0]) + + # Check that we do not allow writeable buffer export + with pytest.raises(BufferError, match="scalar buffer is readonly"): + get_buffer_info(dt1, ["WRITABLE"]) + + @pytest.mark.parametrize('s', [ + pytest.param("\x32\x32", id="ascii"), + pytest.param("\uFE0F\uFE0F", id="basic multilingual"), + pytest.param("\U0001f4bb\U0001f4bb", id="non-BMP"), + ]) + def test_str_ucs4(self, s): + s = np.str_(s) # only our subclass implements the buffer protocol + + # all the same, characters always encode as ucs4 + expected = {'strides': (), 'itemsize': 8, 'ndim': 0, 'shape': (), 'format': '2w', + 'readonly': True} + + v = memoryview(s) + assert self._as_dict(v) == expected + + # integers of the paltform-appropriate endianness + code_points = np.frombuffer(v, dtype='i4') + + assert_equal(code_points, [ord(c) for c in s]) + + # Check that we do not allow writeable buffer export + with pytest.raises(BufferError, match="scalar buffer is readonly"): + get_buffer_info(s, ["WRITABLE"]) + + def test_user_scalar_fails_buffer(self): + r = rational(1) + with assert_raises(TypeError): + memoryview(r) + + # Check that we do not allow writeable buffer export + with pytest.raises(BufferError, match="scalar buffer is readonly"): + get_buffer_info(r, ["WRITABLE"]) diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_scalarinherit.py b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_scalarinherit.py new file mode 100644 index 0000000..746a157 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_scalarinherit.py @@ -0,0 +1,105 @@ +""" Test printing of scalar types. + +""" +import pytest + +import numpy as np +from numpy.testing import assert_, assert_raises + + +class A: + pass +class B(A, np.float64): + pass + +class C(B): + pass +class D(C, B): + pass + +class B0(np.float64, A): + pass +class C0(B0): + pass + +class HasNew: + def __new__(cls, *args, **kwargs): + return cls, args, kwargs + +class B1(np.float64, HasNew): + pass + + +class TestInherit: + def test_init(self): + x = B(1.0) + assert_(str(x) == '1.0') + y = C(2.0) + assert_(str(y) == '2.0') + z = D(3.0) + assert_(str(z) == '3.0') + + def test_init2(self): + x = B0(1.0) + assert_(str(x) == '1.0') + y = C0(2.0) + assert_(str(y) == '2.0') + + def test_gh_15395(self): + # HasNew is the second base, so `np.float64` should have priority + x = B1(1.0) + assert_(str(x) == '1.0') + + # previously caused RecursionError!? + with pytest.raises(TypeError): + B1(1.0, 2.0) + + def test_int_repr(self): + # Test that integer repr works correctly for subclasses (gh-27106) + class my_int16(np.int16): + pass + + s = repr(my_int16(3)) + assert s == "my_int16(3)" + +class TestCharacter: + def test_char_radd(self): + # GH issue 9620, reached gentype_add and raise TypeError + np_s = np.bytes_('abc') + np_u = np.str_('abc') + s = b'def' + u = 'def' + assert_(np_s.__radd__(np_s) is NotImplemented) + assert_(np_s.__radd__(np_u) is NotImplemented) + assert_(np_s.__radd__(s) is NotImplemented) + assert_(np_s.__radd__(u) is NotImplemented) + assert_(np_u.__radd__(np_s) is NotImplemented) + assert_(np_u.__radd__(np_u) is NotImplemented) + assert_(np_u.__radd__(s) is NotImplemented) + assert_(np_u.__radd__(u) is NotImplemented) + assert_(s + np_s == b'defabc') + assert_(u + np_u == 'defabc') + + class MyStr(str, np.generic): + # would segfault + pass + + with assert_raises(TypeError): + # Previously worked, but gave completely wrong result + ret = s + MyStr('abc') + + class MyBytes(bytes, np.generic): + # would segfault + pass + + ret = s + MyBytes(b'abc') + assert type(ret) is type(s) + assert ret == b"defabc" + + def test_char_repeat(self): + np_s = np.bytes_('abc') + np_u = np.str_('abc') + res_s = b'abc' * 5 + res_u = 'abc' * 5 + assert_(np_s * 5 == res_s) + assert_(np_u * 5 == res_u) diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_scalarmath.py b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_scalarmath.py new file mode 100644 index 0000000..fc37897 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_scalarmath.py @@ -0,0 +1,1176 @@ +import contextlib +import itertools +import operator +import platform +import sys +import warnings + +import pytest +from hypothesis import given, settings +from hypothesis.extra import numpy as hynp +from hypothesis.strategies import sampled_from +from numpy._core._rational_tests import rational + +import numpy as np +from numpy._utils import _pep440 +from numpy.exceptions import ComplexWarning +from numpy.testing import ( + IS_PYPY, + _gen_alignment_data, + assert_, + assert_almost_equal, + assert_array_equal, + assert_equal, + assert_raises, + check_support_sve, + suppress_warnings, +) + +types = [np.bool, np.byte, np.ubyte, np.short, np.ushort, np.intc, np.uintc, + np.int_, np.uint, np.longlong, np.ulonglong, + np.single, np.double, np.longdouble, np.csingle, + np.cdouble, np.clongdouble] + +floating_types = np.floating.__subclasses__() +complex_floating_types = np.complexfloating.__subclasses__() + +objecty_things = [object(), None, np.array(None, dtype=object)] + +binary_operators_for_scalars = [ + operator.lt, operator.le, operator.eq, operator.ne, operator.ge, + operator.gt, operator.add, operator.floordiv, operator.mod, + operator.mul, operator.pow, operator.sub, operator.truediv +] +binary_operators_for_scalar_ints = binary_operators_for_scalars + [ + operator.xor, operator.or_, operator.and_ +] + + +# This compares scalarmath against ufuncs. + +class TestTypes: + def test_types(self): + for atype in types: + a = atype(1) + assert_(a == 1, f"error with {atype!r}: got {a!r}") + + def test_type_add(self): + # list of types + for k, atype in enumerate(types): + a_scalar = atype(3) + a_array = np.array([3], dtype=atype) + for l, btype in enumerate(types): + b_scalar = btype(1) + b_array = np.array([1], dtype=btype) + c_scalar = a_scalar + b_scalar + c_array = a_array + b_array + # It was comparing the type numbers, but the new ufunc + # function-finding mechanism finds the lowest function + # to which both inputs can be cast - which produces 'l' + # when you do 'q' + 'b'. The old function finding mechanism + # skipped ahead based on the first argument, but that + # does not produce properly symmetric results... + assert_equal(c_scalar.dtype, c_array.dtype, + "error with types (%d/'%c' + %d/'%c')" % + (k, np.dtype(atype).char, l, np.dtype(btype).char)) + + def test_type_create(self): + for atype in types: + a = np.array([1, 2, 3], atype) + b = atype([1, 2, 3]) + assert_equal(a, b) + + def test_leak(self): + # test leak of scalar objects + # a leak would show up in valgrind as still-reachable of ~2.6MB + for i in range(200000): + np.add(1, 1) + + +def check_ufunc_scalar_equivalence(op, arr1, arr2): + scalar1 = arr1[()] + scalar2 = arr2[()] + assert isinstance(scalar1, np.generic) + assert isinstance(scalar2, np.generic) + + if arr1.dtype.kind == "c" or arr2.dtype.kind == "c": + comp_ops = {operator.ge, operator.gt, operator.le, operator.lt} + if op in comp_ops and (np.isnan(scalar1) or np.isnan(scalar2)): + pytest.xfail("complex comp ufuncs use sort-order, scalars do not.") + if op == operator.pow and arr2.item() in [-1, 0, 0.5, 1, 2]: + # array**scalar special case can have different result dtype + # (Other powers may have issues also, but are not hit here.) + # TODO: It would be nice to resolve this issue. + pytest.skip("array**2 can have incorrect/weird result dtype") + + # ignore fpe's since they may just mismatch for integers anyway. + with warnings.catch_warnings(), np.errstate(all="ignore"): + # Comparisons DeprecationWarnings replacing errors (2022-03): + warnings.simplefilter("error", DeprecationWarning) + try: + res = op(arr1, arr2) + except Exception as e: + with pytest.raises(type(e)): + op(scalar1, scalar2) + else: + scalar_res = op(scalar1, scalar2) + assert_array_equal(scalar_res, res, strict=True) + + +@pytest.mark.slow +@settings(max_examples=10000, deadline=2000) +@given(sampled_from(binary_operators_for_scalars), + hynp.arrays(dtype=hynp.scalar_dtypes(), shape=()), + hynp.arrays(dtype=hynp.scalar_dtypes(), shape=())) +def test_array_scalar_ufunc_equivalence(op, arr1, arr2): + """ + This is a thorough test attempting to cover important promotion paths + and ensuring that arrays and scalars stay as aligned as possible. + However, if it creates troubles, it should maybe just be removed. + """ + check_ufunc_scalar_equivalence(op, arr1, arr2) + + +@pytest.mark.slow +@given(sampled_from(binary_operators_for_scalars), + hynp.scalar_dtypes(), hynp.scalar_dtypes()) +def test_array_scalar_ufunc_dtypes(op, dt1, dt2): + # Same as above, but don't worry about sampling weird values so that we + # do not have to sample as much + arr1 = np.array(2, dtype=dt1) + arr2 = np.array(3, dtype=dt2) # some power do weird things. + + check_ufunc_scalar_equivalence(op, arr1, arr2) + + +@pytest.mark.parametrize("fscalar", [np.float16, np.float32]) +def test_int_float_promotion_truediv(fscalar): + # Promotion for mixed int and float32/float16 must not go to float64 + i = np.int8(1) + f = fscalar(1) + expected = np.result_type(i, f) + assert (i / f).dtype == expected + assert (f / i).dtype == expected + # But normal int / int true division goes to float64: + assert (i / i).dtype == np.dtype("float64") + # For int16, result has to be ast least float32 (takes ufunc path): + assert (np.int16(1) / f).dtype == np.dtype("float32") + + +class TestBaseMath: + @pytest.mark.xfail(check_support_sve(), reason="gh-22982") + def test_blocked(self): + # test alignments offsets for simd instructions + # alignments for vz + 2 * (vs - 1) + 1 + for dt, sz in [(np.float32, 11), (np.float64, 7), (np.int32, 11)]: + for out, inp1, inp2, msg in _gen_alignment_data(dtype=dt, + type='binary', + max_size=sz): + exp1 = np.ones_like(inp1) + inp1[...] = np.ones_like(inp1) + inp2[...] = np.zeros_like(inp2) + assert_almost_equal(np.add(inp1, inp2), exp1, err_msg=msg) + assert_almost_equal(np.add(inp1, 2), exp1 + 2, err_msg=msg) + assert_almost_equal(np.add(1, inp2), exp1, err_msg=msg) + + np.add(inp1, inp2, out=out) + assert_almost_equal(out, exp1, err_msg=msg) + + inp2[...] += np.arange(inp2.size, dtype=dt) + 1 + assert_almost_equal(np.square(inp2), + np.multiply(inp2, inp2), err_msg=msg) + # skip true divide for ints + if dt != np.int32: + assert_almost_equal(np.reciprocal(inp2), + np.divide(1, inp2), err_msg=msg) + + inp1[...] = np.ones_like(inp1) + np.add(inp1, 2, out=out) + assert_almost_equal(out, exp1 + 2, err_msg=msg) + inp2[...] = np.ones_like(inp2) + np.add(2, inp2, out=out) + assert_almost_equal(out, exp1 + 2, err_msg=msg) + + def test_lower_align(self): + # check data that is not aligned to element size + # i.e doubles are aligned to 4 bytes on i386 + d = np.zeros(23 * 8, dtype=np.int8)[4:-4].view(np.float64) + o = np.zeros(23 * 8, dtype=np.int8)[4:-4].view(np.float64) + assert_almost_equal(d + d, d * 2) + np.add(d, d, out=o) + np.add(np.ones_like(d), d, out=o) + np.add(d, np.ones_like(d), out=o) + np.add(np.ones_like(d), d) + np.add(d, np.ones_like(d)) + + +class TestPower: + def test_small_types(self): + for t in [np.int8, np.int16, np.float16]: + a = t(3) + b = a ** 4 + assert_(b == 81, f"error with {t!r}: got {b!r}") + + def test_large_types(self): + for t in [np.int32, np.int64, np.float32, np.float64, np.longdouble]: + a = t(51) + b = a ** 4 + msg = f"error with {t!r}: got {b!r}" + if np.issubdtype(t, np.integer): + assert_(b == 6765201, msg) + else: + assert_almost_equal(b, 6765201, err_msg=msg) + + def test_integers_to_negative_integer_power(self): + # Note that the combination of uint64 with a signed integer + # has common type np.float64. The other combinations should all + # raise a ValueError for integer ** negative integer. + exp = [np.array(-1, dt)[()] for dt in 'bhilq'] + + # 1 ** -1 possible special case + base = [np.array(1, dt)[()] for dt in 'bhilqBHILQ'] + for i1, i2 in itertools.product(base, exp): + if i1.dtype != np.uint64: + assert_raises(ValueError, operator.pow, i1, i2) + else: + res = operator.pow(i1, i2) + assert_(res.dtype.type is np.float64) + assert_almost_equal(res, 1.) + + # -1 ** -1 possible special case + base = [np.array(-1, dt)[()] for dt in 'bhilq'] + for i1, i2 in itertools.product(base, exp): + if i1.dtype != np.uint64: + assert_raises(ValueError, operator.pow, i1, i2) + else: + res = operator.pow(i1, i2) + assert_(res.dtype.type is np.float64) + assert_almost_equal(res, -1.) + + # 2 ** -1 perhaps generic + base = [np.array(2, dt)[()] for dt in 'bhilqBHILQ'] + for i1, i2 in itertools.product(base, exp): + if i1.dtype != np.uint64: + assert_raises(ValueError, operator.pow, i1, i2) + else: + res = operator.pow(i1, i2) + assert_(res.dtype.type is np.float64) + assert_almost_equal(res, .5) + + def test_mixed_types(self): + typelist = [np.int8, np.int16, np.float16, + np.float32, np.float64, np.int8, + np.int16, np.int32, np.int64] + for t1 in typelist: + for t2 in typelist: + a = t1(3) + b = t2(2) + result = a**b + msg = f"error with {t1!r} and {t2!r}:got {result!r}, expected {9!r}" + if np.issubdtype(np.dtype(result), np.integer): + assert_(result == 9, msg) + else: + assert_almost_equal(result, 9, err_msg=msg) + + def test_modular_power(self): + # modular power is not implemented, so ensure it errors + a = 5 + b = 4 + c = 10 + expected = pow(a, b, c) # noqa: F841 + for t in (np.int32, np.float32, np.complex64): + # note that 3-operand power only dispatches on the first argument + assert_raises(TypeError, operator.pow, t(a), b, c) + assert_raises(TypeError, operator.pow, np.array(t(a)), b, c) + + +def floordiv_and_mod(x, y): + return (x // y, x % y) + + +def _signs(dt): + if dt in np.typecodes['UnsignedInteger']: + return (+1,) + else: + return (+1, -1) + + +class TestModulus: + + def test_modulus_basic(self): + dt = np.typecodes['AllInteger'] + np.typecodes['Float'] + for op in [floordiv_and_mod, divmod]: + for dt1, dt2 in itertools.product(dt, dt): + for sg1, sg2 in itertools.product(_signs(dt1), _signs(dt2)): + fmt = 'op: %s, dt1: %s, dt2: %s, sg1: %s, sg2: %s' + msg = fmt % (op.__name__, dt1, dt2, sg1, sg2) + a = np.array(sg1 * 71, dtype=dt1)[()] + b = np.array(sg2 * 19, dtype=dt2)[()] + div, rem = op(a, b) + assert_equal(div * b + rem, a, err_msg=msg) + if sg2 == -1: + assert_(b < rem <= 0, msg) + else: + assert_(b > rem >= 0, msg) + + def test_float_modulus_exact(self): + # test that float results are exact for small integers. This also + # holds for the same integers scaled by powers of two. + nlst = list(range(-127, 0)) + plst = list(range(1, 128)) + dividend = nlst + [0] + plst + divisor = nlst + plst + arg = list(itertools.product(dividend, divisor)) + tgt = [divmod(*t) for t in arg] + + a, b = np.array(arg, dtype=int).T + # convert exact integer results from Python to float so that + # signed zero can be used, it is checked. + tgtdiv, tgtrem = np.array(tgt, dtype=float).T + tgtdiv = np.where((tgtdiv == 0.0) & ((b < 0) ^ (a < 0)), -0.0, tgtdiv) + tgtrem = np.where((tgtrem == 0.0) & (b < 0), -0.0, tgtrem) + + for op in [floordiv_and_mod, divmod]: + for dt in np.typecodes['Float']: + msg = f'op: {op.__name__}, dtype: {dt}' + fa = a.astype(dt) + fb = b.astype(dt) + # use list comprehension so a_ and b_ are scalars + div, rem = zip(*[op(a_, b_) for a_, b_ in zip(fa, fb)]) + assert_equal(div, tgtdiv, err_msg=msg) + assert_equal(rem, tgtrem, err_msg=msg) + + def test_float_modulus_roundoff(self): + # gh-6127 + dt = np.typecodes['Float'] + for op in [floordiv_and_mod, divmod]: + for dt1, dt2 in itertools.product(dt, dt): + for sg1, sg2 in itertools.product((+1, -1), (+1, -1)): + fmt = 'op: %s, dt1: %s, dt2: %s, sg1: %s, sg2: %s' + msg = fmt % (op.__name__, dt1, dt2, sg1, sg2) + a = np.array(sg1 * 78 * 6e-8, dtype=dt1)[()] + b = np.array(sg2 * 6e-8, dtype=dt2)[()] + div, rem = op(a, b) + # Equal assertion should hold when fmod is used + assert_equal(div * b + rem, a, err_msg=msg) + if sg2 == -1: + assert_(b < rem <= 0, msg) + else: + assert_(b > rem >= 0, msg) + + def test_float_modulus_corner_cases(self): + # Check remainder magnitude. + for dt in np.typecodes['Float']: + b = np.array(1.0, dtype=dt) + a = np.nextafter(np.array(0.0, dtype=dt), -b) + rem = operator.mod(a, b) + assert_(rem <= b, f'dt: {dt}') + rem = operator.mod(-a, -b) + assert_(rem >= -b, f'dt: {dt}') + + # Check nans, inf + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "invalid value encountered in remainder") + sup.filter(RuntimeWarning, "divide by zero encountered in remainder") + sup.filter(RuntimeWarning, "divide by zero encountered in floor_divide") + sup.filter(RuntimeWarning, "divide by zero encountered in divmod") + sup.filter(RuntimeWarning, "invalid value encountered in divmod") + for dt in np.typecodes['Float']: + fone = np.array(1.0, dtype=dt) + fzer = np.array(0.0, dtype=dt) + finf = np.array(np.inf, dtype=dt) + fnan = np.array(np.nan, dtype=dt) + rem = operator.mod(fone, fzer) + assert_(np.isnan(rem), f'dt: {dt}') + # MSVC 2008 returns NaN here, so disable the check. + #rem = operator.mod(fone, finf) + #assert_(rem == fone, 'dt: %s' % dt) + rem = operator.mod(fone, fnan) + assert_(np.isnan(rem), f'dt: {dt}') + rem = operator.mod(finf, fone) + assert_(np.isnan(rem), f'dt: {dt}') + for op in [floordiv_and_mod, divmod]: + div, mod = op(fone, fzer) + assert_(np.isinf(div)) and assert_(np.isnan(mod)) + + def test_inplace_floordiv_handling(self): + # issue gh-12927 + # this only applies to in-place floordiv //=, because the output type + # promotes to float which does not fit + a = np.array([1, 2], np.int64) + b = np.array([1, 2], np.uint64) + with pytest.raises(TypeError, + match=r"Cannot cast ufunc 'floor_divide' output from"): + a //= b + +class TestComparison: + def test_comparision_different_types(self): + x = np.array(1) + y = np.array('s') + eq = x == y + neq = x != y + assert eq is np.bool_(False) + assert neq is np.bool_(True) + + +class TestComplexDivision: + def test_zero_division(self): + with np.errstate(all="ignore"): + for t in [np.complex64, np.complex128]: + a = t(0.0) + b = t(1.0) + assert_(np.isinf(b / a)) + b = t(complex(np.inf, np.inf)) + assert_(np.isinf(b / a)) + b = t(complex(np.inf, np.nan)) + assert_(np.isinf(b / a)) + b = t(complex(np.nan, np.inf)) + assert_(np.isinf(b / a)) + b = t(complex(np.nan, np.nan)) + assert_(np.isnan(b / a)) + b = t(0.) + assert_(np.isnan(b / a)) + + def test_signed_zeros(self): + with np.errstate(all="ignore"): + for t in [np.complex64, np.complex128]: + # tupled (numerator, denominator, expected) + # for testing as expected == numerator/denominator + data = ( + (( 0.0, -1.0), ( 0.0, 1.0), (-1.0, -0.0)), + (( 0.0, -1.0), ( 0.0, -1.0), ( 1.0, -0.0)), + (( 0.0, -1.0), (-0.0, -1.0), ( 1.0, 0.0)), + (( 0.0, -1.0), (-0.0, 1.0), (-1.0, 0.0)), + (( 0.0, 1.0), ( 0.0, -1.0), (-1.0, 0.0)), + (( 0.0, -1.0), ( 0.0, -1.0), ( 1.0, -0.0)), + ((-0.0, -1.0), ( 0.0, -1.0), ( 1.0, -0.0)), + ((-0.0, 1.0), ( 0.0, -1.0), (-1.0, -0.0)) + ) + for cases in data: + n = cases[0] + d = cases[1] + ex = cases[2] + result = t(complex(n[0], n[1])) / t(complex(d[0], d[1])) + # check real and imag parts separately to avoid comparison + # in array context, which does not account for signed zeros + assert_equal(result.real, ex[0]) + assert_equal(result.imag, ex[1]) + + def test_branches(self): + with np.errstate(all="ignore"): + for t in [np.complex64, np.complex128]: + # tupled (numerator, denominator, expected) + # for testing as expected == numerator/denominator + data = [] + + # trigger branch: real(fabs(denom)) > imag(fabs(denom)) + # followed by else condition as neither are == 0 + data.append((( 2.0, 1.0), ( 2.0, 1.0), (1.0, 0.0))) + + # trigger branch: real(fabs(denom)) > imag(fabs(denom)) + # followed by if condition as both are == 0 + # is performed in test_zero_division(), so this is skipped + + # trigger else if branch: real(fabs(denom)) < imag(fabs(denom)) + data.append(((1.0, 2.0), (1.0, 2.0), (1.0, 0.0))) + + for cases in data: + n = cases[0] + d = cases[1] + ex = cases[2] + result = t(complex(n[0], n[1])) / t(complex(d[0], d[1])) + # check real and imag parts separately to avoid comparison + # in array context, which does not account for signed zeros + assert_equal(result.real, ex[0]) + assert_equal(result.imag, ex[1]) + + +class TestConversion: + def test_int_from_long(self): + l = [1e6, 1e12, 1e18, -1e6, -1e12, -1e18] + li = [10**6, 10**12, 10**18, -10**6, -10**12, -10**18] + for T in [None, np.float64, np.int64]: + a = np.array(l, dtype=T) + assert_equal([int(_m) for _m in a], li) + + a = np.array(l[:3], dtype=np.uint64) + assert_equal([int(_m) for _m in a], li[:3]) + + def test_iinfo_long_values(self): + for code in 'bBhH': + with pytest.raises(OverflowError): + np.array(np.iinfo(code).max + 1, dtype=code) + + for code in np.typecodes['AllInteger']: + res = np.array(np.iinfo(code).max, dtype=code) + tgt = np.iinfo(code).max + assert_(res == tgt) + + for code in np.typecodes['AllInteger']: + res = np.dtype(code).type(np.iinfo(code).max) + tgt = np.iinfo(code).max + assert_(res == tgt) + + def test_int_raise_behaviour(self): + def overflow_error_func(dtype): + dtype(np.iinfo(dtype).max + 1) + + for code in [np.int_, np.uint, np.longlong, np.ulonglong]: + assert_raises(OverflowError, overflow_error_func, code) + + def test_int_from_infinite_longdouble(self): + # gh-627 + x = np.longdouble(np.inf) + assert_raises(OverflowError, int, x) + with suppress_warnings() as sup: + sup.record(ComplexWarning) + x = np.clongdouble(np.inf) + assert_raises(OverflowError, int, x) + assert_equal(len(sup.log), 1) + + @pytest.mark.skipif(not IS_PYPY, reason="Test is PyPy only (gh-9972)") + def test_int_from_infinite_longdouble___int__(self): + x = np.longdouble(np.inf) + assert_raises(OverflowError, x.__int__) + with suppress_warnings() as sup: + sup.record(ComplexWarning) + x = np.clongdouble(np.inf) + assert_raises(OverflowError, x.__int__) + assert_equal(len(sup.log), 1) + + @pytest.mark.skipif(np.finfo(np.double) == np.finfo(np.longdouble), + reason="long double is same as double") + @pytest.mark.skipif(platform.machine().startswith("ppc"), + reason="IBM double double") + def test_int_from_huge_longdouble(self): + # Produce a longdouble that would overflow a double, + # use exponent that avoids bug in Darwin pow function. + exp = np.finfo(np.double).maxexp - 1 + huge_ld = 2 * 1234 * np.longdouble(2) ** exp + huge_i = 2 * 1234 * 2 ** exp + assert_(huge_ld != np.inf) + assert_equal(int(huge_ld), huge_i) + + def test_int_from_longdouble(self): + x = np.longdouble(1.5) + assert_equal(int(x), 1) + x = np.longdouble(-10.5) + assert_equal(int(x), -10) + + def test_numpy_scalar_relational_operators(self): + # All integer + for dt1 in np.typecodes['AllInteger']: + assert_(1 > np.array(0, dtype=dt1)[()], f"type {dt1} failed") + assert_(not 1 < np.array(0, dtype=dt1)[()], f"type {dt1} failed") + + for dt2 in np.typecodes['AllInteger']: + assert_(np.array(1, dtype=dt1)[()] > np.array(0, dtype=dt2)[()], + f"type {dt1} and {dt2} failed") + assert_(not np.array(1, dtype=dt1)[()] < np.array(0, dtype=dt2)[()], + f"type {dt1} and {dt2} failed") + + # Unsigned integers + for dt1 in 'BHILQP': + assert_(-1 < np.array(1, dtype=dt1)[()], f"type {dt1} failed") + assert_(not -1 > np.array(1, dtype=dt1)[()], f"type {dt1} failed") + assert_(-1 != np.array(1, dtype=dt1)[()], f"type {dt1} failed") + + # unsigned vs signed + for dt2 in 'bhilqp': + assert_(np.array(1, dtype=dt1)[()] > np.array(-1, dtype=dt2)[()], + f"type {dt1} and {dt2} failed") + assert_(not np.array(1, dtype=dt1)[()] < np.array(-1, dtype=dt2)[()], + f"type {dt1} and {dt2} failed") + assert_(np.array(1, dtype=dt1)[()] != np.array(-1, dtype=dt2)[()], + f"type {dt1} and {dt2} failed") + + # Signed integers and floats + for dt1 in 'bhlqp' + np.typecodes['Float']: + assert_(1 > np.array(-1, dtype=dt1)[()], f"type {dt1} failed") + assert_(not 1 < np.array(-1, dtype=dt1)[()], f"type {dt1} failed") + assert_(-1 == np.array(-1, dtype=dt1)[()], f"type {dt1} failed") + + for dt2 in 'bhlqp' + np.typecodes['Float']: + assert_(np.array(1, dtype=dt1)[()] > np.array(-1, dtype=dt2)[()], + f"type {dt1} and {dt2} failed") + assert_(not np.array(1, dtype=dt1)[()] < np.array(-1, dtype=dt2)[()], + f"type {dt1} and {dt2} failed") + assert_(np.array(-1, dtype=dt1)[()] == np.array(-1, dtype=dt2)[()], + f"type {dt1} and {dt2} failed") + + def test_scalar_comparison_to_none(self): + # Scalars should just return False and not give a warnings. + # The comparisons are flagged by pep8, ignore that. + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', FutureWarning) + assert_(not np.float32(1) == None) # noqa: E711 + assert_(not np.str_('test') == None) # noqa: E711 + # This is dubious (see below): + assert_(not np.datetime64('NaT') == None) # noqa: E711 + + assert_(np.float32(1) != None) # noqa: E711 + assert_(np.str_('test') != None) # noqa: E711 + # This is dubious (see below): + assert_(np.datetime64('NaT') != None) # noqa: E711 + assert_(len(w) == 0) + + # For documentation purposes, this is why the datetime is dubious. + # At the time of deprecation this was no behaviour change, but + # it has to be considered when the deprecations are done. + assert_(np.equal(np.datetime64('NaT'), None)) + + +#class TestRepr: +# def test_repr(self): +# for t in types: +# val = t(1197346475.0137341) +# val_repr = repr(val) +# val2 = eval(val_repr) +# assert_equal( val, val2 ) + + +class TestRepr: + def _test_type_repr(self, t): + finfo = np.finfo(t) + last_fraction_bit_idx = finfo.nexp + finfo.nmant + last_exponent_bit_idx = finfo.nexp + storage_bytes = np.dtype(t).itemsize * 8 + # could add some more types to the list below + for which in ['small denorm', 'small norm']: + # Values from https://en.wikipedia.org/wiki/IEEE_754 + constr = np.array([0x00] * storage_bytes, dtype=np.uint8) + if which == 'small denorm': + byte = last_fraction_bit_idx // 8 + bytebit = 7 - (last_fraction_bit_idx % 8) + constr[byte] = 1 << bytebit + elif which == 'small norm': + byte = last_exponent_bit_idx // 8 + bytebit = 7 - (last_exponent_bit_idx % 8) + constr[byte] = 1 << bytebit + else: + raise ValueError('hmm') + val = constr.view(t)[0] + val_repr = repr(val) + val2 = t(eval(val_repr)) + if not (val2 == 0 and val < 1e-100): + assert_equal(val, val2) + + def test_float_repr(self): + # long double test cannot work, because eval goes through a python + # float + for t in [np.float32, np.float64]: + self._test_type_repr(t) + + +if not IS_PYPY: + # sys.getsizeof() is not valid on PyPy + class TestSizeOf: + + def test_equal_nbytes(self): + for type in types: + x = type(0) + assert_(sys.getsizeof(x) > x.nbytes) + + def test_error(self): + d = np.float32() + assert_raises(TypeError, d.__sizeof__, "a") + + +class TestMultiply: + def test_seq_repeat(self): + # Test that basic sequences get repeated when multiplied with + # numpy integers. And errors are raised when multiplied with others. + # Some of this behaviour may be controversial and could be open for + # change. + accepted_types = set(np.typecodes["AllInteger"]) + deprecated_types = {'?'} + forbidden_types = ( + set(np.typecodes["All"]) - accepted_types - deprecated_types) + forbidden_types -= {'V'} # can't default-construct void scalars + + for seq_type in (list, tuple): + seq = seq_type([1, 2, 3]) + for numpy_type in accepted_types: + i = np.dtype(numpy_type).type(2) + assert_equal(seq * i, seq * int(i)) + assert_equal(i * seq, int(i) * seq) + + for numpy_type in deprecated_types: + i = np.dtype(numpy_type).type() + with assert_raises(TypeError): + operator.mul(seq, i) + + for numpy_type in forbidden_types: + i = np.dtype(numpy_type).type() + assert_raises(TypeError, operator.mul, seq, i) + assert_raises(TypeError, operator.mul, i, seq) + + def test_no_seq_repeat_basic_array_like(self): + # Test that an array-like which does not know how to be multiplied + # does not attempt sequence repeat (raise TypeError). + # See also gh-7428. + class ArrayLike: + def __init__(self, arr): + self.arr = arr + + def __array__(self, dtype=None, copy=None): + return self.arr + + # Test for simple ArrayLike above and memoryviews (original report) + for arr_like in (ArrayLike(np.ones(3)), memoryview(np.ones(3))): + assert_array_equal(arr_like * np.float32(3.), np.full(3, 3.)) + assert_array_equal(np.float32(3.) * arr_like, np.full(3, 3.)) + assert_array_equal(arr_like * np.int_(3), np.full(3, 3)) + assert_array_equal(np.int_(3) * arr_like, np.full(3, 3)) + + +class TestNegative: + def test_exceptions(self): + a = np.ones((), dtype=np.bool)[()] + assert_raises(TypeError, operator.neg, a) + + def test_result(self): + types = np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + with suppress_warnings() as sup: + sup.filter(RuntimeWarning) + for dt in types: + a = np.ones((), dtype=dt)[()] + if dt in np.typecodes['UnsignedInteger']: + st = np.dtype(dt).type + max = st(np.iinfo(dt).max) + assert_equal(operator.neg(a), max) + else: + assert_equal(operator.neg(a) + a, 0) + +class TestSubtract: + def test_exceptions(self): + a = np.ones((), dtype=np.bool)[()] + assert_raises(TypeError, operator.sub, a, a) + + def test_result(self): + types = np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + with suppress_warnings() as sup: + sup.filter(RuntimeWarning) + for dt in types: + a = np.ones((), dtype=dt)[()] + assert_equal(operator.sub(a, a), 0) + + +class TestAbs: + def _test_abs_func(self, absfunc, test_dtype): + x = test_dtype(-1.5) + assert_equal(absfunc(x), 1.5) + x = test_dtype(0.0) + res = absfunc(x) + # assert_equal() checks zero signedness + assert_equal(res, 0.0) + x = test_dtype(-0.0) + res = absfunc(x) + assert_equal(res, 0.0) + + x = test_dtype(np.finfo(test_dtype).max) + assert_equal(absfunc(x), x.real) + + with suppress_warnings() as sup: + sup.filter(UserWarning) + x = test_dtype(np.finfo(test_dtype).tiny) + assert_equal(absfunc(x), x.real) + + x = test_dtype(np.finfo(test_dtype).min) + assert_equal(absfunc(x), -x.real) + + @pytest.mark.parametrize("dtype", floating_types + complex_floating_types) + def test_builtin_abs(self, dtype): + if ( + sys.platform == "cygwin" and dtype == np.clongdouble and + ( + _pep440.parse(platform.release().split("-")[0]) + < _pep440.Version("3.3.0") + ) + ): + pytest.xfail( + reason="absl is computed in double precision on cygwin < 3.3" + ) + self._test_abs_func(abs, dtype) + + @pytest.mark.parametrize("dtype", floating_types + complex_floating_types) + def test_numpy_abs(self, dtype): + if ( + sys.platform == "cygwin" and dtype == np.clongdouble and + ( + _pep440.parse(platform.release().split("-")[0]) + < _pep440.Version("3.3.0") + ) + ): + pytest.xfail( + reason="absl is computed in double precision on cygwin < 3.3" + ) + self._test_abs_func(np.abs, dtype) + +class TestBitShifts: + + @pytest.mark.parametrize('type_code', np.typecodes['AllInteger']) + @pytest.mark.parametrize('op', + [operator.rshift, operator.lshift], ids=['>>', '<<']) + def test_shift_all_bits(self, type_code, op): + """Shifts where the shift amount is the width of the type or wider """ + # gh-2449 + dt = np.dtype(type_code) + nbits = dt.itemsize * 8 + for val in [5, -5]: + for shift in [nbits, nbits + 4]: + val_scl = np.array(val).astype(dt)[()] + shift_scl = dt.type(shift) + res_scl = op(val_scl, shift_scl) + if val_scl < 0 and op is operator.rshift: + # sign bit is preserved + assert_equal(res_scl, -1) + else: + assert_equal(res_scl, 0) + + # Result on scalars should be the same as on arrays + val_arr = np.array([val_scl] * 32, dtype=dt) + shift_arr = np.array([shift] * 32, dtype=dt) + res_arr = op(val_arr, shift_arr) + assert_equal(res_arr, res_scl) + + +class TestHash: + @pytest.mark.parametrize("type_code", np.typecodes['AllInteger']) + def test_integer_hashes(self, type_code): + scalar = np.dtype(type_code).type + for i in range(128): + assert hash(i) == hash(scalar(i)) + + @pytest.mark.parametrize("type_code", np.typecodes['AllFloat']) + def test_float_and_complex_hashes(self, type_code): + scalar = np.dtype(type_code).type + for val in [np.pi, np.inf, 3, 6.]: + numpy_val = scalar(val) + # Cast back to Python, in case the NumPy scalar has less precision + if numpy_val.dtype.kind == 'c': + val = complex(numpy_val) + else: + val = float(numpy_val) + assert val == numpy_val + assert hash(val) == hash(numpy_val) + + if hash(float(np.nan)) != hash(float(np.nan)): + # If Python distinguishes different NaNs we do so too (gh-18833) + assert hash(scalar(np.nan)) != hash(scalar(np.nan)) + + @pytest.mark.parametrize("type_code", np.typecodes['Complex']) + def test_complex_hashes(self, type_code): + # Test some complex valued hashes specifically: + scalar = np.dtype(type_code).type + for val in [np.pi + 1j, np.inf - 3j, 3j, 6. + 1j]: + numpy_val = scalar(val) + assert hash(complex(numpy_val)) == hash(numpy_val) + + +@contextlib.contextmanager +def recursionlimit(n): + o = sys.getrecursionlimit() + try: + sys.setrecursionlimit(n) + yield + finally: + sys.setrecursionlimit(o) + + +@given(sampled_from(objecty_things), + sampled_from(binary_operators_for_scalar_ints), + sampled_from(types + [rational])) +def test_operator_object_left(o, op, type_): + try: + with recursionlimit(200): + op(o, type_(1)) + except TypeError: + pass + + +@given(sampled_from(objecty_things), + sampled_from(binary_operators_for_scalar_ints), + sampled_from(types + [rational])) +def test_operator_object_right(o, op, type_): + try: + with recursionlimit(200): + op(type_(1), o) + except TypeError: + pass + + +@given(sampled_from(binary_operators_for_scalars), + sampled_from(types), + sampled_from(types)) +def test_operator_scalars(op, type1, type2): + try: + op(type1(1), type2(1)) + except TypeError: + pass + + +@pytest.mark.parametrize("op", binary_operators_for_scalars) +@pytest.mark.parametrize("sctype", [np.longdouble, np.clongdouble]) +def test_longdouble_operators_with_obj(sctype, op): + # This is/used to be tricky, because NumPy generally falls back to + # using the ufunc via `np.asarray()`, this effectively might do: + # longdouble + None + # -> asarray(longdouble) + np.array(None, dtype=object) + # -> asarray(longdouble).astype(object) + np.array(None, dtype=object) + # And after getting the scalars in the inner loop: + # -> longdouble + None + # + # That would recurse infinitely. Other scalars return the python object + # on cast, so this type of things works OK. + # + # As of NumPy 2.1, this has been consolidated into the np.generic binops + # and now checks `.item()`. That also allows the below path to work now. + try: + op(sctype(3), None) + except TypeError: + pass + try: + op(None, sctype(3)) + except TypeError: + pass + + +@pytest.mark.parametrize("op", [operator.add, operator.pow, operator.sub]) +@pytest.mark.parametrize("sctype", [np.longdouble, np.clongdouble]) +def test_longdouble_with_arrlike(sctype, op): + # As of NumPy 2.1, longdouble behaves like other types and can coerce + # e.g. lists. (Not necessarily better, but consistent.) + assert_array_equal(op(sctype(3), [1, 2]), op(3, np.array([1, 2]))) + assert_array_equal(op([1, 2], sctype(3)), op(np.array([1, 2]), 3)) + + +@pytest.mark.parametrize("op", binary_operators_for_scalars) +@pytest.mark.parametrize("sctype", [np.longdouble, np.clongdouble]) +@np.errstate(all="ignore") +def test_longdouble_operators_with_large_int(sctype, op): + # (See `test_longdouble_operators_with_obj` for why longdouble is special) + # NEP 50 means that the result is clearly a (c)longdouble here: + if sctype == np.clongdouble and op in [operator.mod, operator.floordiv]: + # The above operators are not support for complex though... + with pytest.raises(TypeError): + op(sctype(3), 2**64) + with pytest.raises(TypeError): + op(sctype(3), 2**64) + else: + assert op(sctype(3), -2**64) == op(sctype(3), sctype(-2**64)) + assert op(2**64, sctype(3)) == op(sctype(2**64), sctype(3)) + + +@pytest.mark.parametrize("dtype", np.typecodes["AllInteger"]) +@pytest.mark.parametrize("operation", [ + lambda min, max: max + max, + lambda min, max: min - max, + lambda min, max: max * max], ids=["+", "-", "*"]) +def test_scalar_integer_operation_overflow(dtype, operation): + st = np.dtype(dtype).type + min = st(np.iinfo(dtype).min) + max = st(np.iinfo(dtype).max) + + with pytest.warns(RuntimeWarning, match="overflow encountered"): + operation(min, max) + + +@pytest.mark.parametrize("dtype", np.typecodes["Integer"]) +@pytest.mark.parametrize("operation", [ + lambda min, neg_1: -min, + lambda min, neg_1: abs(min), + lambda min, neg_1: min * neg_1, + pytest.param(lambda min, neg_1: min // neg_1, + marks=pytest.mark.skip(reason="broken on some platforms"))], + ids=["neg", "abs", "*", "//"]) +def test_scalar_signed_integer_overflow(dtype, operation): + # The minimum signed integer can "overflow" for some additional operations + st = np.dtype(dtype).type + min = st(np.iinfo(dtype).min) + neg_1 = st(-1) + + with pytest.warns(RuntimeWarning, match="overflow encountered"): + operation(min, neg_1) + + +@pytest.mark.parametrize("dtype", np.typecodes["UnsignedInteger"]) +def test_scalar_unsigned_integer_overflow(dtype): + val = np.dtype(dtype).type(8) + with pytest.warns(RuntimeWarning, match="overflow encountered"): + -val + + zero = np.dtype(dtype).type(0) + -zero # does not warn + +@pytest.mark.parametrize("dtype", np.typecodes["AllInteger"]) +@pytest.mark.parametrize("operation", [ + lambda val, zero: val // zero, + lambda val, zero: val % zero, ], ids=["//", "%"]) +def test_scalar_integer_operation_divbyzero(dtype, operation): + st = np.dtype(dtype).type + val = st(100) + zero = st(0) + + with pytest.warns(RuntimeWarning, match="divide by zero"): + operation(val, zero) + + +ops_with_names = [ + ("__lt__", "__gt__", operator.lt, True), + ("__le__", "__ge__", operator.le, True), + ("__eq__", "__eq__", operator.eq, True), + # Note __op__ and __rop__ may be identical here: + ("__ne__", "__ne__", operator.ne, True), + ("__gt__", "__lt__", operator.gt, True), + ("__ge__", "__le__", operator.ge, True), + ("__floordiv__", "__rfloordiv__", operator.floordiv, False), + ("__truediv__", "__rtruediv__", operator.truediv, False), + ("__add__", "__radd__", operator.add, False), + ("__mod__", "__rmod__", operator.mod, False), + ("__mul__", "__rmul__", operator.mul, False), + ("__pow__", "__rpow__", operator.pow, False), + ("__sub__", "__rsub__", operator.sub, False), +] + + +@pytest.mark.parametrize(["__op__", "__rop__", "op", "cmp"], ops_with_names) +@pytest.mark.parametrize("sctype", [np.float32, np.float64, np.longdouble]) +def test_subclass_deferral(sctype, __op__, __rop__, op, cmp): + """ + This test covers scalar subclass deferral. Note that this is exceedingly + complicated, especially since it tends to fall back to the array paths and + these additionally add the "array priority" mechanism. + + The behaviour was modified subtly in 1.22 (to make it closer to how Python + scalars work). Due to its complexity and the fact that subclassing NumPy + scalars is probably a bad idea to begin with. There is probably room + for adjustments here. + """ + class myf_simple1(sctype): + pass + + class myf_simple2(sctype): + pass + + def op_func(self, other): + return __op__ + + def rop_func(self, other): + return __rop__ + + myf_op = type("myf_op", (sctype,), {__op__: op_func, __rop__: rop_func}) + + # inheritance has to override, or this is correctly lost: + res = op(myf_simple1(1), myf_simple2(2)) + assert type(res) == sctype or type(res) == np.bool + assert op(myf_simple1(1), myf_simple2(2)) == op(1, 2) # inherited + + # Two independent subclasses do not really define an order. This could + # be attempted, but we do not since Python's `int` does neither: + assert op(myf_op(1), myf_simple1(2)) == __op__ + assert op(myf_simple1(1), myf_op(2)) == op(1, 2) # inherited + + +def test_longdouble_complex(): + # Simple test to check longdouble and complex combinations, since these + # need to go through promotion, which longdouble needs to be careful about. + x = np.longdouble(1) + assert x + 1j == 1 + 1j + assert 1j + x == 1 + 1j + + +@pytest.mark.parametrize(["__op__", "__rop__", "op", "cmp"], ops_with_names) +@pytest.mark.parametrize("subtype", [float, int, complex, np.float16]) +def test_pyscalar_subclasses(subtype, __op__, __rop__, op, cmp): + # This tests that python scalar subclasses behave like a float64 (if they + # don't override it). + # In an earlier version of NEP 50, they behaved like the Python buildins. + def op_func(self, other): + return __op__ + + def rop_func(self, other): + return __rop__ + + # Check that deferring is indicated using `__array_ufunc__`: + myt = type("myt", (subtype,), + {__op__: op_func, __rop__: rop_func, "__array_ufunc__": None}) + + # Just like normally, we should never presume we can modify the float. + assert op(myt(1), np.float64(2)) == __op__ + assert op(np.float64(1), myt(2)) == __rop__ + + if op in {operator.mod, operator.floordiv} and subtype == complex: + return # module is not support for complex. Do not test. + + if __rop__ == __op__: + return + + # When no deferring is indicated, subclasses are handled normally. + myt = type("myt", (subtype,), {__rop__: rop_func}) + behaves_like = lambda x: np.array(subtype(x))[()] + + # Check for float32, as a float subclass float64 may behave differently + res = op(myt(1), np.float16(2)) + expected = op(behaves_like(1), np.float16(2)) + assert res == expected + assert type(res) == type(expected) + res = op(np.float32(2), myt(1)) + expected = op(np.float32(2), behaves_like(1)) + assert res == expected + assert type(res) == type(expected) + + # Same check for longdouble (compare via dtype to accept float64 when + # longdouble has the identical size), which is currently not perfectly + # consistent. + res = op(myt(1), np.longdouble(2)) + expected = op(behaves_like(1), np.longdouble(2)) + assert res == expected + assert np.dtype(type(res)) == np.dtype(type(expected)) + res = op(np.float32(2), myt(1)) + expected = op(np.float32(2), behaves_like(1)) + assert res == expected + assert np.dtype(type(res)) == np.dtype(type(expected)) + + +def test_truediv_int(): + # This should work, as the result is float: + assert np.uint8(3) / 123454 == np.float64(3) / 123454 + + +@pytest.mark.slow +@pytest.mark.parametrize("op", + # TODO: Power is a bit special, but here mostly bools seem to behave oddly + [op for op in binary_operators_for_scalars if op is not operator.pow]) +@pytest.mark.parametrize("sctype", types) +@pytest.mark.parametrize("other_type", [float, int, complex]) +@pytest.mark.parametrize("rop", [True, False]) +def test_scalar_matches_array_op_with_pyscalar(op, sctype, other_type, rop): + # Check that the ufunc path matches by coercing to an array explicitly + val1 = sctype(2) + val2 = other_type(2) + + if rop: + _op = op + op = lambda x, y: _op(y, x) + + try: + res = op(val1, val2) + except TypeError: + try: + expected = op(np.asarray(val1), val2) + raise AssertionError("ufunc didn't raise.") + except TypeError: + return + else: + expected = op(np.asarray(val1), val2) + + # Note that we only check dtype equivalency, as ufuncs may pick the lower + # dtype if they are equivalent. + assert res == expected + if isinstance(val1, float) and other_type is complex and rop: + # Python complex accepts float subclasses, so we don't get a chance + # and the result may be a Python complex (thus, the `np.array()``) + assert np.array(res).dtype == expected.dtype + else: + assert res.dtype == expected.dtype diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_scalarprint.py b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_scalarprint.py new file mode 100644 index 0000000..38ed778 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_scalarprint.py @@ -0,0 +1,403 @@ +""" Test printing of scalar types. + +""" +import platform + +import pytest + +import numpy as np +from numpy.testing import IS_MUSL, assert_, assert_equal, assert_raises + + +class TestRealScalars: + def test_str(self): + svals = [0.0, -0.0, 1, -1, np.inf, -np.inf, np.nan] + styps = [np.float16, np.float32, np.float64, np.longdouble] + wanted = [ + ['0.0', '0.0', '0.0', '0.0' ], # noqa: E202 + ['-0.0', '-0.0', '-0.0', '-0.0'], + ['1.0', '1.0', '1.0', '1.0' ], # noqa: E202 + ['-1.0', '-1.0', '-1.0', '-1.0'], + ['inf', 'inf', 'inf', 'inf' ], # noqa: E202 + ['-inf', '-inf', '-inf', '-inf'], + ['nan', 'nan', 'nan', 'nan' ]] # noqa: E202 + + for wants, val in zip(wanted, svals): + for want, styp in zip(wants, styps): + msg = f'for str({np.dtype(styp).name}({val!r}))' + assert_equal(str(styp(val)), want, err_msg=msg) + + def test_scalar_cutoffs(self): + # test that both the str and repr of np.float64 behaves + # like python floats in python3. + def check(v): + assert_equal(str(np.float64(v)), str(v)) + assert_equal(str(np.float64(v)), repr(v)) + assert_equal(repr(np.float64(v)), f"np.float64({v!r})") + assert_equal(repr(np.float64(v)), f"np.float64({v})") + + # check we use the same number of significant digits + check(1.12345678901234567890) + check(0.0112345678901234567890) + + # check switch from scientific output to positional and back + check(1e-5) + check(1e-4) + check(1e15) + check(1e16) + + test_cases_gh_28679 = [ + (np.half, -0.000099, "-9.9e-05"), + (np.half, 0.0001, "0.0001"), + (np.half, 999, "999.0"), + (np.half, -1000, "-1e+03"), + (np.single, 0.000099, "9.9e-05"), + (np.single, -0.000100001, "-0.000100001"), + (np.single, 999999, "999999.0"), + (np.single, -1000000, "-1e+06") + ] + + @pytest.mark.parametrize("dtype, input_val, expected_str", test_cases_gh_28679) + def test_gh_28679(self, dtype, input_val, expected_str): + # test cutoff to exponent notation for half and single + assert_equal(str(dtype(input_val)), expected_str) + + test_cases_legacy_2_2 = [ + (np.half(65504), "65500.0"), + (np.single(1.e15), "1000000000000000.0"), + (np.single(1.e16), "1e+16"), + ] + + @pytest.mark.parametrize("input_val, expected_str", test_cases_legacy_2_2) + def test_legacy_2_2_mode(self, input_val, expected_str): + # test legacy cutoff to exponent notation for half and single + with np.printoptions(legacy='2.2'): + assert_equal(str(input_val), expected_str) + + def test_dragon4(self): + # these tests are adapted from Ryan Juckett's dragon4 implementation, + # see dragon4.c for details. + + fpos32 = lambda x, **k: np.format_float_positional(np.float32(x), **k) + fsci32 = lambda x, **k: np.format_float_scientific(np.float32(x), **k) + fpos64 = lambda x, **k: np.format_float_positional(np.float64(x), **k) + fsci64 = lambda x, **k: np.format_float_scientific(np.float64(x), **k) + + preckwd = lambda prec: {'unique': False, 'precision': prec} + + assert_equal(fpos32('1.0'), "1.") + assert_equal(fsci32('1.0'), "1.e+00") + assert_equal(fpos32('10.234'), "10.234") + assert_equal(fpos32('-10.234'), "-10.234") + assert_equal(fsci32('10.234'), "1.0234e+01") + assert_equal(fsci32('-10.234'), "-1.0234e+01") + assert_equal(fpos32('1000.0'), "1000.") + assert_equal(fpos32('1.0', precision=0), "1.") + assert_equal(fsci32('1.0', precision=0), "1.e+00") + assert_equal(fpos32('10.234', precision=0), "10.") + assert_equal(fpos32('-10.234', precision=0), "-10.") + assert_equal(fsci32('10.234', precision=0), "1.e+01") + assert_equal(fsci32('-10.234', precision=0), "-1.e+01") + assert_equal(fpos32('10.234', precision=2), "10.23") + assert_equal(fsci32('-10.234', precision=2), "-1.02e+01") + assert_equal(fsci64('9.9999999999999995e-08', **preckwd(16)), + '9.9999999999999995e-08') + assert_equal(fsci64('9.8813129168249309e-324', **preckwd(16)), + '9.8813129168249309e-324') + assert_equal(fsci64('9.9999999999999694e-311', **preckwd(16)), + '9.9999999999999694e-311') + + # test rounding + # 3.1415927410 is closest float32 to np.pi + assert_equal(fpos32('3.14159265358979323846', **preckwd(10)), + "3.1415927410") + assert_equal(fsci32('3.14159265358979323846', **preckwd(10)), + "3.1415927410e+00") + assert_equal(fpos64('3.14159265358979323846', **preckwd(10)), + "3.1415926536") + assert_equal(fsci64('3.14159265358979323846', **preckwd(10)), + "3.1415926536e+00") + # 299792448 is closest float32 to 299792458 + assert_equal(fpos32('299792458.0', **preckwd(5)), "299792448.00000") + assert_equal(fsci32('299792458.0', **preckwd(5)), "2.99792e+08") + assert_equal(fpos64('299792458.0', **preckwd(5)), "299792458.00000") + assert_equal(fsci64('299792458.0', **preckwd(5)), "2.99792e+08") + + assert_equal(fpos32('3.14159265358979323846', **preckwd(25)), + "3.1415927410125732421875000") + assert_equal(fpos64('3.14159265358979323846', **preckwd(50)), + "3.14159265358979311599796346854418516159057617187500") + assert_equal(fpos64('3.14159265358979323846'), "3.141592653589793") + + # smallest numbers + assert_equal(fpos32(0.5**(126 + 23), unique=False, precision=149), + "0.00000000000000000000000000000000000000000000140129846432" + "4817070923729583289916131280261941876515771757068283889791" + "08268586060148663818836212158203125") + + assert_equal(fpos64(5e-324, unique=False, precision=1074), + "0.00000000000000000000000000000000000000000000000000000000" + "0000000000000000000000000000000000000000000000000000000000" + "0000000000000000000000000000000000000000000000000000000000" + "0000000000000000000000000000000000000000000000000000000000" + "0000000000000000000000000000000000000000000000000000000000" + "0000000000000000000000000000000000049406564584124654417656" + "8792868221372365059802614324764425585682500675507270208751" + "8652998363616359923797965646954457177309266567103559397963" + "9877479601078187812630071319031140452784581716784898210368" + "8718636056998730723050006387409153564984387312473397273169" + "6151400317153853980741262385655911710266585566867681870395" + "6031062493194527159149245532930545654440112748012970999954" + "1931989409080416563324524757147869014726780159355238611550" + "1348035264934720193790268107107491703332226844753335720832" + "4319360923828934583680601060115061698097530783422773183292" + "4790498252473077637592724787465608477820373446969953364701" + "7972677717585125660551199131504891101451037862738167250955" + "8373897335989936648099411642057026370902792427675445652290" + "87538682506419718265533447265625") + + # largest numbers + f32x = np.finfo(np.float32).max + assert_equal(fpos32(f32x, **preckwd(0)), + "340282346638528859811704183484516925440.") + assert_equal(fpos64(np.finfo(np.float64).max, **preckwd(0)), + "1797693134862315708145274237317043567980705675258449965989" + "1747680315726078002853876058955863276687817154045895351438" + "2464234321326889464182768467546703537516986049910576551282" + "0762454900903893289440758685084551339423045832369032229481" + "6580855933212334827479782620414472316873817718091929988125" + "0404026184124858368.") + # Warning: In unique mode only the integer digits necessary for + # uniqueness are computed, the rest are 0. + assert_equal(fpos32(f32x), + "340282350000000000000000000000000000000.") + + # Further tests of zero-padding vs rounding in different combinations + # of unique, fractional, precision, min_digits + # precision can only reduce digits, not add them. + # min_digits can only extend digits, not reduce them. + assert_equal(fpos32(f32x, unique=True, fractional=True, precision=0), + "340282350000000000000000000000000000000.") + assert_equal(fpos32(f32x, unique=True, fractional=True, precision=4), + "340282350000000000000000000000000000000.") + assert_equal(fpos32(f32x, unique=True, fractional=True, min_digits=0), + "340282346638528859811704183484516925440.") + assert_equal(fpos32(f32x, unique=True, fractional=True, min_digits=4), + "340282346638528859811704183484516925440.0000") + assert_equal(fpos32(f32x, unique=True, fractional=True, + min_digits=4, precision=4), + "340282346638528859811704183484516925440.0000") + assert_raises(ValueError, fpos32, f32x, unique=True, fractional=False, + precision=0) + assert_equal(fpos32(f32x, unique=True, fractional=False, precision=4), + "340300000000000000000000000000000000000.") + assert_equal(fpos32(f32x, unique=True, fractional=False, precision=20), + "340282350000000000000000000000000000000.") + assert_equal(fpos32(f32x, unique=True, fractional=False, min_digits=4), + "340282350000000000000000000000000000000.") + assert_equal(fpos32(f32x, unique=True, fractional=False, + min_digits=20), + "340282346638528859810000000000000000000.") + assert_equal(fpos32(f32x, unique=True, fractional=False, + min_digits=15), + "340282346638529000000000000000000000000.") + assert_equal(fpos32(f32x, unique=False, fractional=False, precision=4), + "340300000000000000000000000000000000000.") + # test that unique rounding is preserved when precision is supplied + # but no extra digits need to be printed (gh-18609) + a = np.float64.fromhex('-1p-97') + assert_equal(fsci64(a, unique=True), '-6.310887241768095e-30') + assert_equal(fsci64(a, unique=False, precision=15), + '-6.310887241768094e-30') + assert_equal(fsci64(a, unique=True, precision=15), + '-6.310887241768095e-30') + assert_equal(fsci64(a, unique=True, min_digits=15), + '-6.310887241768095e-30') + assert_equal(fsci64(a, unique=True, precision=15, min_digits=15), + '-6.310887241768095e-30') + # adds/remove digits in unique mode with unbiased rnding + assert_equal(fsci64(a, unique=True, precision=14), + '-6.31088724176809e-30') + assert_equal(fsci64(a, unique=True, min_digits=16), + '-6.3108872417680944e-30') + assert_equal(fsci64(a, unique=True, precision=16), + '-6.310887241768095e-30') + assert_equal(fsci64(a, unique=True, min_digits=14), + '-6.310887241768095e-30') + # test min_digits in unique mode with different rounding cases + assert_equal(fsci64('1e120', min_digits=3), '1.000e+120') + assert_equal(fsci64('1e100', min_digits=3), '1.000e+100') + + # test trailing zeros + assert_equal(fpos32('1.0', unique=False, precision=3), "1.000") + assert_equal(fpos64('1.0', unique=False, precision=3), "1.000") + assert_equal(fsci32('1.0', unique=False, precision=3), "1.000e+00") + assert_equal(fsci64('1.0', unique=False, precision=3), "1.000e+00") + assert_equal(fpos32('1.5', unique=False, precision=3), "1.500") + assert_equal(fpos64('1.5', unique=False, precision=3), "1.500") + assert_equal(fsci32('1.5', unique=False, precision=3), "1.500e+00") + assert_equal(fsci64('1.5', unique=False, precision=3), "1.500e+00") + # gh-10713 + assert_equal(fpos64('324', unique=False, precision=5, + fractional=False), "324.00") + + available_float_dtypes = [np.float16, np.float32, np.float64, np.float128]\ + if hasattr(np, 'float128') else [np.float16, np.float32, np.float64] + + @pytest.mark.parametrize("tp", available_float_dtypes) + def test_dragon4_positional_interface(self, tp): + # test is flaky for musllinux on np.float128 + if IS_MUSL and tp == np.float128: + pytest.skip("Skipping flaky test of float128 on musllinux") + + fpos = np.format_float_positional + + # test padding + assert_equal(fpos(tp('1.0'), pad_left=4, pad_right=4), " 1. ") + assert_equal(fpos(tp('-1.0'), pad_left=4, pad_right=4), " -1. ") + assert_equal(fpos(tp('-10.2'), + pad_left=4, pad_right=4), " -10.2 ") + + # test fixed (non-unique) mode + assert_equal(fpos(tp('1.0'), unique=False, precision=4), "1.0000") + + @pytest.mark.parametrize("tp", available_float_dtypes) + def test_dragon4_positional_interface_trim(self, tp): + # test is flaky for musllinux on np.float128 + if IS_MUSL and tp == np.float128: + pytest.skip("Skipping flaky test of float128 on musllinux") + + fpos = np.format_float_positional + # test trimming + # trim of 'k' or '.' only affects non-unique mode, since unique + # mode will not output trailing 0s. + assert_equal(fpos(tp('1.'), unique=False, precision=4, trim='k'), + "1.0000") + + assert_equal(fpos(tp('1.'), unique=False, precision=4, trim='.'), + "1.") + assert_equal(fpos(tp('1.2'), unique=False, precision=4, trim='.'), + "1.2" if tp != np.float16 else "1.2002") + + assert_equal(fpos(tp('1.'), unique=False, precision=4, trim='0'), + "1.0") + assert_equal(fpos(tp('1.2'), unique=False, precision=4, trim='0'), + "1.2" if tp != np.float16 else "1.2002") + assert_equal(fpos(tp('1.'), trim='0'), "1.0") + + assert_equal(fpos(tp('1.'), unique=False, precision=4, trim='-'), + "1") + assert_equal(fpos(tp('1.2'), unique=False, precision=4, trim='-'), + "1.2" if tp != np.float16 else "1.2002") + assert_equal(fpos(tp('1.'), trim='-'), "1") + assert_equal(fpos(tp('1.001'), precision=1, trim='-'), "1") + + @pytest.mark.parametrize("tp", available_float_dtypes) + @pytest.mark.parametrize("pad_val", [10**5, np.iinfo("int32").max]) + def test_dragon4_positional_interface_overflow(self, tp, pad_val): + # test is flaky for musllinux on np.float128 + if IS_MUSL and tp == np.float128: + pytest.skip("Skipping flaky test of float128 on musllinux") + + fpos = np.format_float_positional + + # gh-28068 + with pytest.raises(RuntimeError, + match="Float formatting result too large"): + fpos(tp('1.047'), unique=False, precision=pad_val) + + with pytest.raises(RuntimeError, + match="Float formatting result too large"): + fpos(tp('1.047'), precision=2, pad_left=pad_val) + + with pytest.raises(RuntimeError, + match="Float formatting result too large"): + fpos(tp('1.047'), precision=2, pad_right=pad_val) + + @pytest.mark.parametrize("tp", available_float_dtypes) + def test_dragon4_scientific_interface(self, tp): + # test is flaky for musllinux on np.float128 + if IS_MUSL and tp == np.float128: + pytest.skip("Skipping flaky test of float128 on musllinux") + + fsci = np.format_float_scientific + + # test exp_digits + assert_equal(fsci(tp('1.23e1'), exp_digits=5), "1.23e+00001") + + # test fixed (non-unique) mode + assert_equal(fsci(tp('1.0'), unique=False, precision=4), + "1.0000e+00") + + @pytest.mark.skipif(not platform.machine().startswith("ppc64"), + reason="only applies to ppc float128 values") + def test_ppc64_ibm_double_double128(self): + # check that the precision decreases once we get into the subnormal + # range. Unlike float64, this starts around 1e-292 instead of 1e-308, + # which happens when the first double is normal and the second is + # subnormal. + x = np.float128('2.123123123123123123123123123123123e-286') + got = [str(x / np.float128('2e' + str(i))) for i in range(40)] + expected = [ + "1.06156156156156156156156156156157e-286", + "1.06156156156156156156156156156158e-287", + "1.06156156156156156156156156156159e-288", + "1.0615615615615615615615615615616e-289", + "1.06156156156156156156156156156157e-290", + "1.06156156156156156156156156156156e-291", + "1.0615615615615615615615615615616e-292", + "1.0615615615615615615615615615615e-293", + "1.061561561561561561561561561562e-294", + "1.06156156156156156156156156155e-295", + "1.0615615615615615615615615616e-296", + "1.06156156156156156156156156e-297", + "1.06156156156156156156156157e-298", + "1.0615615615615615615615616e-299", + "1.06156156156156156156156e-300", + "1.06156156156156156156155e-301", + "1.0615615615615615615616e-302", + "1.061561561561561561562e-303", + "1.06156156156156156156e-304", + "1.0615615615615615618e-305", + "1.06156156156156156e-306", + "1.06156156156156157e-307", + "1.0615615615615616e-308", + "1.06156156156156e-309", + "1.06156156156157e-310", + "1.0615615615616e-311", + "1.06156156156e-312", + "1.06156156154e-313", + "1.0615615616e-314", + "1.06156156e-315", + "1.06156155e-316", + "1.061562e-317", + "1.06156e-318", + "1.06155e-319", + "1.0617e-320", + "1.06e-321", + "1.04e-322", + "1e-323", + "0.0", + "0.0"] + assert_equal(got, expected) + + # Note: we follow glibc behavior, but it (or gcc) might not be right. + # In particular we can get two values that print the same but are not + # equal: + a = np.float128('2') / np.float128('3') + b = np.float128(str(a)) + assert_equal(str(a), str(b)) + assert_(a != b) + + def float32_roundtrip(self): + # gh-9360 + x = np.float32(1024 - 2**-14) + y = np.float32(1024 - 2**-13) + assert_(repr(x) != repr(y)) + assert_equal(np.float32(repr(x)), x) + assert_equal(np.float32(repr(y)), y) + + def float64_vs_python(self): + # gh-2643, gh-6136, gh-6908 + assert_equal(repr(np.float64(0.1)), repr(0.1)) + assert_(repr(np.float64(0.20000000000000004)) != repr(0.2)) diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_shape_base.py b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_shape_base.py new file mode 100644 index 0000000..8de2427 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_shape_base.py @@ -0,0 +1,891 @@ +import sys + +import pytest + +import numpy as np +from numpy._core import ( + arange, + array, + atleast_1d, + atleast_2d, + atleast_3d, + block, + concatenate, + hstack, + newaxis, + stack, + vstack, +) +from numpy._core.shape_base import ( + _block_concatenate, + _block_dispatcher, + _block_setup, + _block_slicing, +) +from numpy.exceptions import AxisError +from numpy.testing import ( + IS_PYPY, + assert_, + assert_array_equal, + assert_equal, + assert_raises, + assert_raises_regex, +) +from numpy.testing._private.utils import requires_memory + + +class TestAtleast1d: + def test_0D_array(self): + a = array(1) + b = array(2) + res = [atleast_1d(a), atleast_1d(b)] + desired = [array([1]), array([2])] + assert_array_equal(res, desired) + + def test_1D_array(self): + a = array([1, 2]) + b = array([2, 3]) + res = [atleast_1d(a), atleast_1d(b)] + desired = [array([1, 2]), array([2, 3])] + assert_array_equal(res, desired) + + def test_2D_array(self): + a = array([[1, 2], [1, 2]]) + b = array([[2, 3], [2, 3]]) + res = [atleast_1d(a), atleast_1d(b)] + desired = [a, b] + assert_array_equal(res, desired) + + def test_3D_array(self): + a = array([[1, 2], [1, 2]]) + b = array([[2, 3], [2, 3]]) + a = array([a, a]) + b = array([b, b]) + res = [atleast_1d(a), atleast_1d(b)] + desired = [a, b] + assert_array_equal(res, desired) + + def test_r1array(self): + """ Test to make sure equivalent Travis O's r1array function + """ + assert_(atleast_1d(3).shape == (1,)) + assert_(atleast_1d(3j).shape == (1,)) + assert_(atleast_1d(3.0).shape == (1,)) + assert_(atleast_1d([[2, 3], [4, 5]]).shape == (2, 2)) + + +class TestAtleast2d: + def test_0D_array(self): + a = array(1) + b = array(2) + res = [atleast_2d(a), atleast_2d(b)] + desired = [array([[1]]), array([[2]])] + assert_array_equal(res, desired) + + def test_1D_array(self): + a = array([1, 2]) + b = array([2, 3]) + res = [atleast_2d(a), atleast_2d(b)] + desired = [array([[1, 2]]), array([[2, 3]])] + assert_array_equal(res, desired) + + def test_2D_array(self): + a = array([[1, 2], [1, 2]]) + b = array([[2, 3], [2, 3]]) + res = [atleast_2d(a), atleast_2d(b)] + desired = [a, b] + assert_array_equal(res, desired) + + def test_3D_array(self): + a = array([[1, 2], [1, 2]]) + b = array([[2, 3], [2, 3]]) + a = array([a, a]) + b = array([b, b]) + res = [atleast_2d(a), atleast_2d(b)] + desired = [a, b] + assert_array_equal(res, desired) + + def test_r2array(self): + """ Test to make sure equivalent Travis O's r2array function + """ + assert_(atleast_2d(3).shape == (1, 1)) + assert_(atleast_2d([3j, 1]).shape == (1, 2)) + assert_(atleast_2d([[[3, 1], [4, 5]], [[3, 5], [1, 2]]]).shape == (2, 2, 2)) + + +class TestAtleast3d: + def test_0D_array(self): + a = array(1) + b = array(2) + res = [atleast_3d(a), atleast_3d(b)] + desired = [array([[[1]]]), array([[[2]]])] + assert_array_equal(res, desired) + + def test_1D_array(self): + a = array([1, 2]) + b = array([2, 3]) + res = [atleast_3d(a), atleast_3d(b)] + desired = [array([[[1], [2]]]), array([[[2], [3]]])] + assert_array_equal(res, desired) + + def test_2D_array(self): + a = array([[1, 2], [1, 2]]) + b = array([[2, 3], [2, 3]]) + res = [atleast_3d(a), atleast_3d(b)] + desired = [a[:, :, newaxis], b[:, :, newaxis]] + assert_array_equal(res, desired) + + def test_3D_array(self): + a = array([[1, 2], [1, 2]]) + b = array([[2, 3], [2, 3]]) + a = array([a, a]) + b = array([b, b]) + res = [atleast_3d(a), atleast_3d(b)] + desired = [a, b] + assert_array_equal(res, desired) + + +class TestHstack: + def test_non_iterable(self): + assert_raises(TypeError, hstack, 1) + + def test_empty_input(self): + assert_raises(ValueError, hstack, ()) + + def test_0D_array(self): + a = array(1) + b = array(2) + res = hstack([a, b]) + desired = array([1, 2]) + assert_array_equal(res, desired) + + def test_1D_array(self): + a = array([1]) + b = array([2]) + res = hstack([a, b]) + desired = array([1, 2]) + assert_array_equal(res, desired) + + def test_2D_array(self): + a = array([[1], [2]]) + b = array([[1], [2]]) + res = hstack([a, b]) + desired = array([[1, 1], [2, 2]]) + assert_array_equal(res, desired) + + def test_generator(self): + with pytest.raises(TypeError, match="arrays to stack must be"): + hstack(np.arange(3) for _ in range(2)) + with pytest.raises(TypeError, match="arrays to stack must be"): + hstack(x for x in np.ones((3, 2))) + + def test_casting_and_dtype(self): + a = np.array([1, 2, 3]) + b = np.array([2.5, 3.5, 4.5]) + res = np.hstack((a, b), casting="unsafe", dtype=np.int64) + expected_res = np.array([1, 2, 3, 2, 3, 4]) + assert_array_equal(res, expected_res) + + def test_casting_and_dtype_type_error(self): + a = np.array([1, 2, 3]) + b = np.array([2.5, 3.5, 4.5]) + with pytest.raises(TypeError): + hstack((a, b), casting="safe", dtype=np.int64) + + +class TestVstack: + def test_non_iterable(self): + assert_raises(TypeError, vstack, 1) + + def test_empty_input(self): + assert_raises(ValueError, vstack, ()) + + def test_0D_array(self): + a = array(1) + b = array(2) + res = vstack([a, b]) + desired = array([[1], [2]]) + assert_array_equal(res, desired) + + def test_1D_array(self): + a = array([1]) + b = array([2]) + res = vstack([a, b]) + desired = array([[1], [2]]) + assert_array_equal(res, desired) + + def test_2D_array(self): + a = array([[1], [2]]) + b = array([[1], [2]]) + res = vstack([a, b]) + desired = array([[1], [2], [1], [2]]) + assert_array_equal(res, desired) + + def test_2D_array2(self): + a = array([1, 2]) + b = array([1, 2]) + res = vstack([a, b]) + desired = array([[1, 2], [1, 2]]) + assert_array_equal(res, desired) + + def test_generator(self): + with pytest.raises(TypeError, match="arrays to stack must be"): + vstack(np.arange(3) for _ in range(2)) + + def test_casting_and_dtype(self): + a = np.array([1, 2, 3]) + b = np.array([2.5, 3.5, 4.5]) + res = np.vstack((a, b), casting="unsafe", dtype=np.int64) + expected_res = np.array([[1, 2, 3], [2, 3, 4]]) + assert_array_equal(res, expected_res) + + def test_casting_and_dtype_type_error(self): + a = np.array([1, 2, 3]) + b = np.array([2.5, 3.5, 4.5]) + with pytest.raises(TypeError): + vstack((a, b), casting="safe", dtype=np.int64) + + +class TestConcatenate: + def test_returns_copy(self): + a = np.eye(3) + b = np.concatenate([a]) + b[0, 0] = 2 + assert b[0, 0] != a[0, 0] + + def test_exceptions(self): + # test axis must be in bounds + for ndim in [1, 2, 3]: + a = np.ones((1,) * ndim) + np.concatenate((a, a), axis=0) # OK + assert_raises(AxisError, np.concatenate, (a, a), axis=ndim) + assert_raises(AxisError, np.concatenate, (a, a), axis=-(ndim + 1)) + + # Scalars cannot be concatenated + assert_raises(ValueError, concatenate, (0,)) + assert_raises(ValueError, concatenate, (np.array(0),)) + + # dimensionality must match + assert_raises_regex( + ValueError, + r"all the input arrays must have same number of dimensions, but " + r"the array at index 0 has 1 dimension\(s\) and the array at " + r"index 1 has 2 dimension\(s\)", + np.concatenate, (np.zeros(1), np.zeros((1, 1)))) + + # test shapes must match except for concatenation axis + a = np.ones((1, 2, 3)) + b = np.ones((2, 2, 3)) + axis = list(range(3)) + for i in range(3): + np.concatenate((a, b), axis=axis[0]) # OK + assert_raises_regex( + ValueError, + "all the input array dimensions except for the concatenation axis " + f"must match exactly, but along dimension {i}, the array at " + "index 0 has size 1 and the array at index 1 has size 2", + np.concatenate, (a, b), axis=axis[1]) + assert_raises(ValueError, np.concatenate, (a, b), axis=axis[2]) + a = np.moveaxis(a, -1, 0) + b = np.moveaxis(b, -1, 0) + axis.append(axis.pop(0)) + + # No arrays to concatenate raises ValueError + assert_raises(ValueError, concatenate, ()) + + @pytest.mark.slow + @pytest.mark.skipif(sys.maxsize < 2**32, reason="only problematic on 64bit platforms") + @requires_memory(2 * np.iinfo(np.intc).max) + def test_huge_list_error(self): + a = np.array([1]) + max_int = np.iinfo(np.intc).max + arrs = (a,) * (max_int + 1) + msg = fr"concatenate\(\) only supports up to {max_int} arrays but got {max_int + 1}." + with pytest.raises(ValueError, match=msg): + np.concatenate(arrs) + + def test_concatenate_axis_None(self): + a = np.arange(4, dtype=np.float64).reshape((2, 2)) + b = list(range(3)) + c = ['x'] + r = np.concatenate((a, a), axis=None) + assert_equal(r.dtype, a.dtype) + assert_equal(r.ndim, 1) + r = np.concatenate((a, b), axis=None) + assert_equal(r.size, a.size + len(b)) + assert_equal(r.dtype, a.dtype) + r = np.concatenate((a, b, c), axis=None, dtype="U") + d = array(['0.0', '1.0', '2.0', '3.0', + '0', '1', '2', 'x']) + assert_array_equal(r, d) + + out = np.zeros(a.size + len(b)) + r = np.concatenate((a, b), axis=None) + rout = np.concatenate((a, b), axis=None, out=out) + assert_(out is rout) + assert_equal(r, rout) + + def test_large_concatenate_axis_None(self): + # When no axis is given, concatenate uses flattened versions. + # This also had a bug with many arrays (see gh-5979). + x = np.arange(1, 100) + r = np.concatenate(x, None) + assert_array_equal(x, r) + + # Once upon a time, this was the same as `axis=None` now it fails + # (with an unspecified error, as multiple things are wrong here) + with pytest.raises(ValueError): + np.concatenate(x, 100) + + def test_concatenate(self): + # Test concatenate function + # One sequence returns unmodified (but as array) + r4 = list(range(4)) + assert_array_equal(concatenate((r4,)), r4) + # Any sequence + assert_array_equal(concatenate((tuple(r4),)), r4) + assert_array_equal(concatenate((array(r4),)), r4) + # 1D default concatenation + r3 = list(range(3)) + assert_array_equal(concatenate((r4, r3)), r4 + r3) + # Mixed sequence types + assert_array_equal(concatenate((tuple(r4), r3)), r4 + r3) + assert_array_equal(concatenate((array(r4), r3)), r4 + r3) + # Explicit axis specification + assert_array_equal(concatenate((r4, r3), 0), r4 + r3) + # Including negative + assert_array_equal(concatenate((r4, r3), -1), r4 + r3) + # 2D + a23 = array([[10, 11, 12], [13, 14, 15]]) + a13 = array([[0, 1, 2]]) + res = array([[10, 11, 12], [13, 14, 15], [0, 1, 2]]) + assert_array_equal(concatenate((a23, a13)), res) + assert_array_equal(concatenate((a23, a13), 0), res) + assert_array_equal(concatenate((a23.T, a13.T), 1), res.T) + assert_array_equal(concatenate((a23.T, a13.T), -1), res.T) + # Arrays much match shape + assert_raises(ValueError, concatenate, (a23.T, a13.T), 0) + # 3D + res = arange(2 * 3 * 7).reshape((2, 3, 7)) + a0 = res[..., :4] + a1 = res[..., 4:6] + a2 = res[..., 6:] + assert_array_equal(concatenate((a0, a1, a2), 2), res) + assert_array_equal(concatenate((a0, a1, a2), -1), res) + assert_array_equal(concatenate((a0.T, a1.T, a2.T), 0), res.T) + + out = res.copy() + rout = concatenate((a0, a1, a2), 2, out=out) + assert_(out is rout) + assert_equal(res, rout) + + @pytest.mark.skipif(IS_PYPY, reason="PYPY handles sq_concat, nb_add differently than cpython") + def test_operator_concat(self): + import operator + a = array([1, 2]) + b = array([3, 4]) + n = [1, 2] + res = array([1, 2, 3, 4]) + assert_raises(TypeError, operator.concat, a, b) + assert_raises(TypeError, operator.concat, a, n) + assert_raises(TypeError, operator.concat, n, a) + assert_raises(TypeError, operator.concat, a, 1) + assert_raises(TypeError, operator.concat, 1, a) + + def test_bad_out_shape(self): + a = array([1, 2]) + b = array([3, 4]) + + assert_raises(ValueError, concatenate, (a, b), out=np.empty(5)) + assert_raises(ValueError, concatenate, (a, b), out=np.empty((4, 1))) + assert_raises(ValueError, concatenate, (a, b), out=np.empty((1, 4))) + concatenate((a, b), out=np.empty(4)) + + @pytest.mark.parametrize("axis", [None, 0]) + @pytest.mark.parametrize("out_dtype", ["c8", "f4", "f8", ">f8", "i8", "S4"]) + @pytest.mark.parametrize("casting", + ['no', 'equiv', 'safe', 'same_kind', 'unsafe']) + def test_out_and_dtype(self, axis, out_dtype, casting): + # Compare usage of `out=out` with `dtype=out.dtype` + out = np.empty(4, dtype=out_dtype) + to_concat = (array([1.1, 2.2]), array([3.3, 4.4])) + + if not np.can_cast(to_concat[0], out_dtype, casting=casting): + with assert_raises(TypeError): + concatenate(to_concat, out=out, axis=axis, casting=casting) + with assert_raises(TypeError): + concatenate(to_concat, dtype=out.dtype, + axis=axis, casting=casting) + else: + res_out = concatenate(to_concat, out=out, + axis=axis, casting=casting) + res_dtype = concatenate(to_concat, dtype=out.dtype, + axis=axis, casting=casting) + assert res_out is out + assert_array_equal(out, res_dtype) + assert res_dtype.dtype == out_dtype + + with assert_raises(TypeError): + concatenate(to_concat, out=out, dtype=out_dtype, axis=axis) + + @pytest.mark.parametrize("axis", [None, 0]) + @pytest.mark.parametrize("string_dt", ["S", "U", "S0", "U0"]) + @pytest.mark.parametrize("arrs", + [([0.],), ([0.], [1]), ([0], ["string"], [1.])]) + def test_dtype_with_promotion(self, arrs, string_dt, axis): + # Note that U0 and S0 should be deprecated eventually and changed to + # actually give the empty string result (together with `np.array`) + res = np.concatenate(arrs, axis=axis, dtype=string_dt, casting="unsafe") + # The actual dtype should be identical to a cast (of a double array): + assert res.dtype == np.array(1.).astype(string_dt).dtype + + @pytest.mark.parametrize("axis", [None, 0]) + def test_string_dtype_does_not_inspect(self, axis): + with pytest.raises(TypeError): + np.concatenate(([None], [1]), dtype="S", axis=axis) + with pytest.raises(TypeError): + np.concatenate(([None], [1]), dtype="U", axis=axis) + + @pytest.mark.parametrize("axis", [None, 0]) + def test_subarray_error(self, axis): + with pytest.raises(TypeError, match=".*subarray dtype"): + np.concatenate(([1], [1]), dtype="(2,)i", axis=axis) + + +def test_stack(): + # non-iterable input + assert_raises(TypeError, stack, 1) + + # 0d input + for input_ in [(1, 2, 3), + [np.int32(1), np.int32(2), np.int32(3)], + [np.array(1), np.array(2), np.array(3)]]: + assert_array_equal(stack(input_), [1, 2, 3]) + # 1d input examples + a = np.array([1, 2, 3]) + b = np.array([4, 5, 6]) + r1 = array([[1, 2, 3], [4, 5, 6]]) + assert_array_equal(np.stack((a, b)), r1) + assert_array_equal(np.stack((a, b), axis=1), r1.T) + # all input types + assert_array_equal(np.stack([a, b]), r1) + assert_array_equal(np.stack(array([a, b])), r1) + # all shapes for 1d input + arrays = [np.random.randn(3) for _ in range(10)] + axes = [0, 1, -1, -2] + expected_shapes = [(10, 3), (3, 10), (3, 10), (10, 3)] + for axis, expected_shape in zip(axes, expected_shapes): + assert_equal(np.stack(arrays, axis).shape, expected_shape) + assert_raises_regex(AxisError, 'out of bounds', stack, arrays, axis=2) + assert_raises_regex(AxisError, 'out of bounds', stack, arrays, axis=-3) + # all shapes for 2d input + arrays = [np.random.randn(3, 4) for _ in range(10)] + axes = [0, 1, 2, -1, -2, -3] + expected_shapes = [(10, 3, 4), (3, 10, 4), (3, 4, 10), + (3, 4, 10), (3, 10, 4), (10, 3, 4)] + for axis, expected_shape in zip(axes, expected_shapes): + assert_equal(np.stack(arrays, axis).shape, expected_shape) + # empty arrays + assert_(stack([[], [], []]).shape == (3, 0)) + assert_(stack([[], [], []], axis=1).shape == (0, 3)) + # out + out = np.zeros_like(r1) + np.stack((a, b), out=out) + assert_array_equal(out, r1) + # edge cases + assert_raises_regex(ValueError, 'need at least one array', stack, []) + assert_raises_regex(ValueError, 'must have the same shape', + stack, [1, np.arange(3)]) + assert_raises_regex(ValueError, 'must have the same shape', + stack, [np.arange(3), 1]) + assert_raises_regex(ValueError, 'must have the same shape', + stack, [np.arange(3), 1], axis=1) + assert_raises_regex(ValueError, 'must have the same shape', + stack, [np.zeros((3, 3)), np.zeros(3)], axis=1) + assert_raises_regex(ValueError, 'must have the same shape', + stack, [np.arange(2), np.arange(3)]) + + # do not accept generators + with pytest.raises(TypeError, match="arrays to stack must be"): + stack(x for x in range(3)) + + # casting and dtype test + a = np.array([1, 2, 3]) + b = np.array([2.5, 3.5, 4.5]) + res = np.stack((a, b), axis=1, casting="unsafe", dtype=np.int64) + expected_res = np.array([[1, 2], [2, 3], [3, 4]]) + assert_array_equal(res, expected_res) + # casting and dtype with TypeError + with assert_raises(TypeError): + stack((a, b), dtype=np.int64, axis=1, casting="safe") + + +def test_unstack(): + a = np.arange(24).reshape((2, 3, 4)) + + for stacks in [np.unstack(a), + np.unstack(a, axis=0), + np.unstack(a, axis=-3)]: + assert isinstance(stacks, tuple) + assert len(stacks) == 2 + assert_array_equal(stacks[0], a[0]) + assert_array_equal(stacks[1], a[1]) + + for stacks in [np.unstack(a, axis=1), + np.unstack(a, axis=-2)]: + assert isinstance(stacks, tuple) + assert len(stacks) == 3 + assert_array_equal(stacks[0], a[:, 0]) + assert_array_equal(stacks[1], a[:, 1]) + assert_array_equal(stacks[2], a[:, 2]) + + for stacks in [np.unstack(a, axis=2), + np.unstack(a, axis=-1)]: + assert isinstance(stacks, tuple) + assert len(stacks) == 4 + assert_array_equal(stacks[0], a[:, :, 0]) + assert_array_equal(stacks[1], a[:, :, 1]) + assert_array_equal(stacks[2], a[:, :, 2]) + assert_array_equal(stacks[3], a[:, :, 3]) + + assert_raises(ValueError, np.unstack, a, axis=3) + assert_raises(ValueError, np.unstack, a, axis=-4) + assert_raises(ValueError, np.unstack, np.array(0), axis=0) + + +@pytest.mark.parametrize("axis", [0]) +@pytest.mark.parametrize("out_dtype", ["c8", "f4", "f8", ">f8", "i8"]) +@pytest.mark.parametrize("casting", + ['no', 'equiv', 'safe', 'same_kind', 'unsafe']) +def test_stack_out_and_dtype(axis, out_dtype, casting): + to_concat = (array([1, 2]), array([3, 4])) + res = array([[1, 2], [3, 4]]) + out = np.zeros_like(res) + + if not np.can_cast(to_concat[0], out_dtype, casting=casting): + with assert_raises(TypeError): + stack(to_concat, dtype=out_dtype, + axis=axis, casting=casting) + else: + res_out = stack(to_concat, out=out, + axis=axis, casting=casting) + res_dtype = stack(to_concat, dtype=out_dtype, + axis=axis, casting=casting) + assert res_out is out + assert_array_equal(out, res_dtype) + assert res_dtype.dtype == out_dtype + + with assert_raises(TypeError): + stack(to_concat, out=out, dtype=out_dtype, axis=axis) + + +class TestBlock: + @pytest.fixture(params=['block', 'force_concatenate', 'force_slicing']) + def block(self, request): + # blocking small arrays and large arrays go through different paths. + # the algorithm is triggered depending on the number of element + # copies required. + # We define a test fixture that forces most tests to go through + # both code paths. + # Ultimately, this should be removed if a single algorithm is found + # to be faster for both small and large arrays. + def _block_force_concatenate(arrays): + arrays, list_ndim, result_ndim, _ = _block_setup(arrays) + return _block_concatenate(arrays, list_ndim, result_ndim) + + def _block_force_slicing(arrays): + arrays, list_ndim, result_ndim, _ = _block_setup(arrays) + return _block_slicing(arrays, list_ndim, result_ndim) + + if request.param == 'force_concatenate': + return _block_force_concatenate + elif request.param == 'force_slicing': + return _block_force_slicing + elif request.param == 'block': + return block + else: + raise ValueError('Unknown blocking request. There is a typo in the tests.') + + def test_returns_copy(self, block): + a = np.eye(3) + b = block(a) + b[0, 0] = 2 + assert b[0, 0] != a[0, 0] + + def test_block_total_size_estimate(self, block): + _, _, _, total_size = _block_setup([1]) + assert total_size == 1 + + _, _, _, total_size = _block_setup([[1]]) + assert total_size == 1 + + _, _, _, total_size = _block_setup([[1, 1]]) + assert total_size == 2 + + _, _, _, total_size = _block_setup([[1], [1]]) + assert total_size == 2 + + _, _, _, total_size = _block_setup([[1, 2], [3, 4]]) + assert total_size == 4 + + def test_block_simple_row_wise(self, block): + a_2d = np.ones((2, 2)) + b_2d = 2 * a_2d + desired = np.array([[1, 1, 2, 2], + [1, 1, 2, 2]]) + result = block([a_2d, b_2d]) + assert_equal(desired, result) + + def test_block_simple_column_wise(self, block): + a_2d = np.ones((2, 2)) + b_2d = 2 * a_2d + expected = np.array([[1, 1], + [1, 1], + [2, 2], + [2, 2]]) + result = block([[a_2d], [b_2d]]) + assert_equal(expected, result) + + def test_block_with_1d_arrays_row_wise(self, block): + # # # 1-D vectors are treated as row arrays + a = np.array([1, 2, 3]) + b = np.array([2, 3, 4]) + expected = np.array([1, 2, 3, 2, 3, 4]) + result = block([a, b]) + assert_equal(expected, result) + + def test_block_with_1d_arrays_multiple_rows(self, block): + a = np.array([1, 2, 3]) + b = np.array([2, 3, 4]) + expected = np.array([[1, 2, 3, 2, 3, 4], + [1, 2, 3, 2, 3, 4]]) + result = block([[a, b], [a, b]]) + assert_equal(expected, result) + + def test_block_with_1d_arrays_column_wise(self, block): + # # # 1-D vectors are treated as row arrays + a_1d = np.array([1, 2, 3]) + b_1d = np.array([2, 3, 4]) + expected = np.array([[1, 2, 3], + [2, 3, 4]]) + result = block([[a_1d], [b_1d]]) + assert_equal(expected, result) + + def test_block_mixed_1d_and_2d(self, block): + a_2d = np.ones((2, 2)) + b_1d = np.array([2, 2]) + result = block([[a_2d], [b_1d]]) + expected = np.array([[1, 1], + [1, 1], + [2, 2]]) + assert_equal(expected, result) + + def test_block_complicated(self, block): + # a bit more complicated + one_2d = np.array([[1, 1, 1]]) + two_2d = np.array([[2, 2, 2]]) + three_2d = np.array([[3, 3, 3, 3, 3, 3]]) + four_1d = np.array([4, 4, 4, 4, 4, 4]) + five_0d = np.array(5) + six_1d = np.array([6, 6, 6, 6, 6]) + zero_2d = np.zeros((2, 6)) + + expected = np.array([[1, 1, 1, 2, 2, 2], + [3, 3, 3, 3, 3, 3], + [4, 4, 4, 4, 4, 4], + [5, 6, 6, 6, 6, 6], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0]]) + + result = block([[one_2d, two_2d], + [three_2d], + [four_1d], + [five_0d, six_1d], + [zero_2d]]) + assert_equal(result, expected) + + def test_nested(self, block): + one = np.array([1, 1, 1]) + two = np.array([[2, 2, 2], [2, 2, 2], [2, 2, 2]]) + three = np.array([3, 3, 3]) + four = np.array([4, 4, 4]) + five = np.array(5) + six = np.array([6, 6, 6, 6, 6]) + zero = np.zeros((2, 6)) + + result = block([ + [ + block([ + [one], + [three], + [four] + ]), + two + ], + [five, six], + [zero] + ]) + expected = np.array([[1, 1, 1, 2, 2, 2], + [3, 3, 3, 2, 2, 2], + [4, 4, 4, 2, 2, 2], + [5, 6, 6, 6, 6, 6], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0]]) + + assert_equal(result, expected) + + def test_3d(self, block): + a000 = np.ones((2, 2, 2), int) * 1 + + a100 = np.ones((3, 2, 2), int) * 2 + a010 = np.ones((2, 3, 2), int) * 3 + a001 = np.ones((2, 2, 3), int) * 4 + + a011 = np.ones((2, 3, 3), int) * 5 + a101 = np.ones((3, 2, 3), int) * 6 + a110 = np.ones((3, 3, 2), int) * 7 + + a111 = np.ones((3, 3, 3), int) * 8 + + result = block([ + [ + [a000, a001], + [a010, a011], + ], + [ + [a100, a101], + [a110, a111], + ] + ]) + expected = array([[[1, 1, 4, 4, 4], + [1, 1, 4, 4, 4], + [3, 3, 5, 5, 5], + [3, 3, 5, 5, 5], + [3, 3, 5, 5, 5]], + + [[1, 1, 4, 4, 4], + [1, 1, 4, 4, 4], + [3, 3, 5, 5, 5], + [3, 3, 5, 5, 5], + [3, 3, 5, 5, 5]], + + [[2, 2, 6, 6, 6], + [2, 2, 6, 6, 6], + [7, 7, 8, 8, 8], + [7, 7, 8, 8, 8], + [7, 7, 8, 8, 8]], + + [[2, 2, 6, 6, 6], + [2, 2, 6, 6, 6], + [7, 7, 8, 8, 8], + [7, 7, 8, 8, 8], + [7, 7, 8, 8, 8]], + + [[2, 2, 6, 6, 6], + [2, 2, 6, 6, 6], + [7, 7, 8, 8, 8], + [7, 7, 8, 8, 8], + [7, 7, 8, 8, 8]]]) + + assert_array_equal(result, expected) + + def test_block_with_mismatched_shape(self, block): + a = np.array([0, 0]) + b = np.eye(2) + assert_raises(ValueError, block, [a, b]) + assert_raises(ValueError, block, [b, a]) + + to_block = [[np.ones((2, 3)), np.ones((2, 2))], + [np.ones((2, 2)), np.ones((2, 2))]] + assert_raises(ValueError, block, to_block) + + def test_no_lists(self, block): + assert_equal(block(1), np.array(1)) + assert_equal(block(np.eye(3)), np.eye(3)) + + def test_invalid_nesting(self, block): + msg = 'depths are mismatched' + assert_raises_regex(ValueError, msg, block, [1, [2]]) + assert_raises_regex(ValueError, msg, block, [1, []]) + assert_raises_regex(ValueError, msg, block, [[1], 2]) + assert_raises_regex(ValueError, msg, block, [[], 2]) + assert_raises_regex(ValueError, msg, block, [ + [[1], [2]], + [[3, 4]], + [5] # missing brackets + ]) + + def test_empty_lists(self, block): + assert_raises_regex(ValueError, 'empty', block, []) + assert_raises_regex(ValueError, 'empty', block, [[]]) + assert_raises_regex(ValueError, 'empty', block, [[1], []]) + + def test_tuple(self, block): + assert_raises_regex(TypeError, 'tuple', block, ([1, 2], [3, 4])) + assert_raises_regex(TypeError, 'tuple', block, [(1, 2), (3, 4)]) + + def test_different_ndims(self, block): + a = 1. + b = 2 * np.ones((1, 2)) + c = 3 * np.ones((1, 1, 3)) + + result = block([a, b, c]) + expected = np.array([[[1., 2., 2., 3., 3., 3.]]]) + + assert_equal(result, expected) + + def test_different_ndims_depths(self, block): + a = 1. + b = 2 * np.ones((1, 2)) + c = 3 * np.ones((1, 2, 3)) + + result = block([[a, b], [c]]) + expected = np.array([[[1., 2., 2.], + [3., 3., 3.], + [3., 3., 3.]]]) + + assert_equal(result, expected) + + def test_block_memory_order(self, block): + # 3D + arr_c = np.zeros((3,) * 3, order='C') + arr_f = np.zeros((3,) * 3, order='F') + + b_c = [[[arr_c, arr_c], + [arr_c, arr_c]], + [[arr_c, arr_c], + [arr_c, arr_c]]] + + b_f = [[[arr_f, arr_f], + [arr_f, arr_f]], + [[arr_f, arr_f], + [arr_f, arr_f]]] + + assert block(b_c).flags['C_CONTIGUOUS'] + assert block(b_f).flags['F_CONTIGUOUS'] + + arr_c = np.zeros((3, 3), order='C') + arr_f = np.zeros((3, 3), order='F') + # 2D + b_c = [[arr_c, arr_c], + [arr_c, arr_c]] + + b_f = [[arr_f, arr_f], + [arr_f, arr_f]] + + assert block(b_c).flags['C_CONTIGUOUS'] + assert block(b_f).flags['F_CONTIGUOUS'] + + +def test_block_dispatcher(): + class ArrayLike: + pass + a = ArrayLike() + b = ArrayLike() + c = ArrayLike() + assert_equal(list(_block_dispatcher(a)), [a]) + assert_equal(list(_block_dispatcher([a])), [a]) + assert_equal(list(_block_dispatcher([a, b])), [a, b]) + assert_equal(list(_block_dispatcher([[a], [b, [c]]])), [a, b, c]) + # don't recurse into non-lists + assert_equal(list(_block_dispatcher((a, b))), [(a, b)]) diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_simd.py b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_simd.py new file mode 100644 index 0000000..697d89b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_simd.py @@ -0,0 +1,1341 @@ +# NOTE: Please avoid the use of numpy.testing since NPYV intrinsics +# may be involved in their functionality. +import itertools +import math +import operator +import re + +import pytest +from numpy._core._multiarray_umath import __cpu_baseline__ + +from numpy._core._simd import clear_floatstatus, get_floatstatus, targets + + +def check_floatstatus(divbyzero=False, overflow=False, + underflow=False, invalid=False, + all=False): + #define NPY_FPE_DIVIDEBYZERO 1 + #define NPY_FPE_OVERFLOW 2 + #define NPY_FPE_UNDERFLOW 4 + #define NPY_FPE_INVALID 8 + err = get_floatstatus() + ret = (all or divbyzero) and (err & 1) != 0 + ret |= (all or overflow) and (err & 2) != 0 + ret |= (all or underflow) and (err & 4) != 0 + ret |= (all or invalid) and (err & 8) != 0 + return ret + +class _Test_Utility: + # submodule of the desired SIMD extension, e.g. targets["AVX512F"] + npyv = None + # the current data type suffix e.g. 's8' + sfx = None + # target name can be 'baseline' or one or more of CPU features + target_name = None + + def __getattr__(self, attr): + """ + To call NPV intrinsics without the attribute 'npyv' and + auto suffixing intrinsics according to class attribute 'sfx' + """ + return getattr(self.npyv, attr + "_" + self.sfx) + + def _x2(self, intrin_name): + return getattr(self.npyv, f"{intrin_name}_{self.sfx}x2") + + def _data(self, start=None, count=None, reverse=False): + """ + Create list of consecutive numbers according to number of vector's lanes. + """ + if start is None: + start = 1 + if count is None: + count = self.nlanes + rng = range(start, start + count) + if reverse: + rng = reversed(rng) + if self._is_fp(): + return [x / 1.0 for x in rng] + return list(rng) + + def _is_unsigned(self): + return self.sfx[0] == 'u' + + def _is_signed(self): + return self.sfx[0] == 's' + + def _is_fp(self): + return self.sfx[0] == 'f' + + def _scalar_size(self): + return int(self.sfx[1:]) + + def _int_clip(self, seq): + if self._is_fp(): + return seq + max_int = self._int_max() + min_int = self._int_min() + return [min(max(v, min_int), max_int) for v in seq] + + def _int_max(self): + if self._is_fp(): + return None + max_u = self._to_unsigned(self.setall(-1))[0] + if self._is_signed(): + return max_u // 2 + return max_u + + def _int_min(self): + if self._is_fp(): + return None + if self._is_unsigned(): + return 0 + return -(self._int_max() + 1) + + def _true_mask(self): + max_unsig = getattr(self.npyv, "setall_u" + self.sfx[1:])(-1) + return max_unsig[0] + + def _to_unsigned(self, vector): + if isinstance(vector, (list, tuple)): + return getattr(self.npyv, "load_u" + self.sfx[1:])(vector) + else: + sfx = vector.__name__.replace("npyv_", "") + if sfx[0] == "b": + cvt_intrin = "cvt_u{0}_b{0}" + else: + cvt_intrin = "reinterpret_u{0}_{1}" + return getattr(self.npyv, cvt_intrin.format(sfx[1:], sfx))(vector) + + def _pinfinity(self): + return float("inf") + + def _ninfinity(self): + return -float("inf") + + def _nan(self): + return float("nan") + + def _cpu_features(self): + target = self.target_name + if target == "baseline": + target = __cpu_baseline__ + else: + target = target.split('__') # multi-target separator + return ' '.join(target) + +class _SIMD_BOOL(_Test_Utility): + """ + To test all boolean vector types at once + """ + def _nlanes(self): + return getattr(self.npyv, "nlanes_u" + self.sfx[1:]) + + def _data(self, start=None, count=None, reverse=False): + true_mask = self._true_mask() + rng = range(self._nlanes()) + if reverse: + rng = reversed(rng) + return [true_mask if x % 2 else 0 for x in rng] + + def _load_b(self, data): + len_str = self.sfx[1:] + load = getattr(self.npyv, "load_u" + len_str) + cvt = getattr(self.npyv, f"cvt_b{len_str}_u{len_str}") + return cvt(load(data)) + + def test_operators_logical(self): + """ + Logical operations for boolean types. + Test intrinsics: + npyv_xor_##SFX, npyv_and_##SFX, npyv_or_##SFX, npyv_not_##SFX, + npyv_andc_b8, npvy_orc_b8, nvpy_xnor_b8 + """ + data_a = self._data() + data_b = self._data(reverse=True) + vdata_a = self._load_b(data_a) + vdata_b = self._load_b(data_b) + + data_and = [a & b for a, b in zip(data_a, data_b)] + vand = getattr(self, "and")(vdata_a, vdata_b) + assert vand == data_and + + data_or = [a | b for a, b in zip(data_a, data_b)] + vor = getattr(self, "or")(vdata_a, vdata_b) + assert vor == data_or + + data_xor = [a ^ b for a, b in zip(data_a, data_b)] + vxor = self.xor(vdata_a, vdata_b) + assert vxor == data_xor + + vnot = getattr(self, "not")(vdata_a) + assert vnot == data_b + + # among the boolean types, andc, orc and xnor only support b8 + if self.sfx not in ("b8"): + return + + data_andc = [(a & ~b) & 0xFF for a, b in zip(data_a, data_b)] + vandc = self.andc(vdata_a, vdata_b) + assert data_andc == vandc + + data_orc = [(a | ~b) & 0xFF for a, b in zip(data_a, data_b)] + vorc = self.orc(vdata_a, vdata_b) + assert data_orc == vorc + + data_xnor = [~(a ^ b) & 0xFF for a, b in zip(data_a, data_b)] + vxnor = self.xnor(vdata_a, vdata_b) + assert data_xnor == vxnor + + def test_tobits(self): + data2bits = lambda data: sum(int(x != 0) << i for i, x in enumerate(data, 0)) + for data in (self._data(), self._data(reverse=True)): + vdata = self._load_b(data) + data_bits = data2bits(data) + tobits = self.tobits(vdata) + bin_tobits = bin(tobits) + assert bin_tobits == bin(data_bits) + + def test_pack(self): + """ + Pack multiple vectors into one + Test intrinsics: + npyv_pack_b8_b16 + npyv_pack_b8_b32 + npyv_pack_b8_b64 + """ + if self.sfx not in ("b16", "b32", "b64"): + return + # create the vectors + data = self._data() + rdata = self._data(reverse=True) + vdata = self._load_b(data) + vrdata = self._load_b(rdata) + pack_simd = getattr(self.npyv, f"pack_b8_{self.sfx}") + # for scalar execution, concatenate the elements of the multiple lists + # into a single list (spack) and then iterate over the elements of + # the created list applying a mask to capture the first byte of them. + if self.sfx == "b16": + spack = [(i & 0xFF) for i in (list(rdata) + list(data))] + vpack = pack_simd(vrdata, vdata) + elif self.sfx == "b32": + spack = [(i & 0xFF) for i in (2 * list(rdata) + 2 * list(data))] + vpack = pack_simd(vrdata, vrdata, vdata, vdata) + elif self.sfx == "b64": + spack = [(i & 0xFF) for i in (4 * list(rdata) + 4 * list(data))] + vpack = pack_simd(vrdata, vrdata, vrdata, vrdata, + vdata, vdata, vdata, vdata) + assert vpack == spack + + @pytest.mark.parametrize("intrin", ["any", "all"]) + @pytest.mark.parametrize("data", ( + [-1, 0], + [0, -1], + [-1], + [0] + )) + def test_operators_crosstest(self, intrin, data): + """ + Test intrinsics: + npyv_any_##SFX + npyv_all_##SFX + """ + data_a = self._load_b(data * self._nlanes()) + func = eval(intrin) + intrin = getattr(self, intrin) + desired = func(data_a) + simd = intrin(data_a) + assert not not simd == desired + +class _SIMD_INT(_Test_Utility): + """ + To test all integer vector types at once + """ + def test_operators_shift(self): + if self.sfx in ("u8", "s8"): + return + + data_a = self._data(self._int_max() - self.nlanes) + data_b = self._data(self._int_min(), reverse=True) + vdata_a, vdata_b = self.load(data_a), self.load(data_b) + + for count in range(self._scalar_size()): + # load to cast + data_shl_a = self.load([a << count for a in data_a]) + # left shift + shl = self.shl(vdata_a, count) + assert shl == data_shl_a + # load to cast + data_shr_a = self.load([a >> count for a in data_a]) + # right shift + shr = self.shr(vdata_a, count) + assert shr == data_shr_a + + # shift by zero or max or out-range immediate constant is not applicable and illogical + for count in range(1, self._scalar_size()): + # load to cast + data_shl_a = self.load([a << count for a in data_a]) + # left shift by an immediate constant + shli = self.shli(vdata_a, count) + assert shli == data_shl_a + # load to cast + data_shr_a = self.load([a >> count for a in data_a]) + # right shift by an immediate constant + shri = self.shri(vdata_a, count) + assert shri == data_shr_a + + def test_arithmetic_subadd_saturated(self): + if self.sfx in ("u32", "s32", "u64", "s64"): + return + + data_a = self._data(self._int_max() - self.nlanes) + data_b = self._data(self._int_min(), reverse=True) + vdata_a, vdata_b = self.load(data_a), self.load(data_b) + + data_adds = self._int_clip([a + b for a, b in zip(data_a, data_b)]) + adds = self.adds(vdata_a, vdata_b) + assert adds == data_adds + + data_subs = self._int_clip([a - b for a, b in zip(data_a, data_b)]) + subs = self.subs(vdata_a, vdata_b) + assert subs == data_subs + + def test_math_max_min(self): + data_a = self._data() + data_b = self._data(self.nlanes) + vdata_a, vdata_b = self.load(data_a), self.load(data_b) + + data_max = [max(a, b) for a, b in zip(data_a, data_b)] + simd_max = self.max(vdata_a, vdata_b) + assert simd_max == data_max + + data_min = [min(a, b) for a, b in zip(data_a, data_b)] + simd_min = self.min(vdata_a, vdata_b) + assert simd_min == data_min + + @pytest.mark.parametrize("start", [-100, -10000, 0, 100, 10000]) + def test_reduce_max_min(self, start): + """ + Test intrinsics: + npyv_reduce_max_##sfx + npyv_reduce_min_##sfx + """ + vdata_a = self.load(self._data(start)) + assert self.reduce_max(vdata_a) == max(vdata_a) + assert self.reduce_min(vdata_a) == min(vdata_a) + + +class _SIMD_FP32(_Test_Utility): + """ + To only test single precision + """ + def test_conversions(self): + """ + Round to nearest even integer, assume CPU control register is set to rounding. + Test intrinsics: + npyv_round_s32_##SFX + """ + features = self._cpu_features() + if not self.npyv.simd_f64 and re.match(r".*(NEON|ASIMD)", features): + # very costly to emulate nearest even on Armv7 + # instead we round halves to up. e.g. 0.5 -> 1, -0.5 -> -1 + _round = lambda v: int(v + (0.5 if v >= 0 else -0.5)) + else: + _round = round + vdata_a = self.load(self._data()) + vdata_a = self.sub(vdata_a, self.setall(0.5)) + data_round = [_round(x) for x in vdata_a] + vround = self.round_s32(vdata_a) + assert vround == data_round + +class _SIMD_FP64(_Test_Utility): + """ + To only test double precision + """ + def test_conversions(self): + """ + Round to nearest even integer, assume CPU control register is set to rounding. + Test intrinsics: + npyv_round_s32_##SFX + """ + vdata_a = self.load(self._data()) + vdata_a = self.sub(vdata_a, self.setall(0.5)) + vdata_b = self.mul(vdata_a, self.setall(-1.5)) + data_round = [round(x) for x in list(vdata_a) + list(vdata_b)] + vround = self.round_s32(vdata_a, vdata_b) + assert vround == data_round + +class _SIMD_FP(_Test_Utility): + """ + To test all float vector types at once + """ + def test_arithmetic_fused(self): + vdata_a, vdata_b, vdata_c = [self.load(self._data())] * 3 + vdata_cx2 = self.add(vdata_c, vdata_c) + # multiply and add, a*b + c + data_fma = self.load([a * b + c for a, b, c in zip(vdata_a, vdata_b, vdata_c)]) + fma = self.muladd(vdata_a, vdata_b, vdata_c) + assert fma == data_fma + # multiply and subtract, a*b - c + fms = self.mulsub(vdata_a, vdata_b, vdata_c) + data_fms = self.sub(data_fma, vdata_cx2) + assert fms == data_fms + # negate multiply and add, -(a*b) + c + nfma = self.nmuladd(vdata_a, vdata_b, vdata_c) + data_nfma = self.sub(vdata_cx2, data_fma) + assert nfma == data_nfma + # negate multiply and subtract, -(a*b) - c + nfms = self.nmulsub(vdata_a, vdata_b, vdata_c) + data_nfms = self.mul(data_fma, self.setall(-1)) + assert nfms == data_nfms + # multiply, add for odd elements and subtract even elements. + # (a * b) -+ c + fmas = list(self.muladdsub(vdata_a, vdata_b, vdata_c)) + assert fmas[0::2] == list(data_fms)[0::2] + assert fmas[1::2] == list(data_fma)[1::2] + + def test_abs(self): + pinf, ninf, nan = self._pinfinity(), self._ninfinity(), self._nan() + data = self._data() + vdata = self.load(self._data()) + + abs_cases = ((-0, 0), (ninf, pinf), (pinf, pinf), (nan, nan)) + for case, desired in abs_cases: + data_abs = [desired] * self.nlanes + vabs = self.abs(self.setall(case)) + assert vabs == pytest.approx(data_abs, nan_ok=True) + + vabs = self.abs(self.mul(vdata, self.setall(-1))) + assert vabs == data + + def test_sqrt(self): + pinf, ninf, nan = self._pinfinity(), self._ninfinity(), self._nan() + data = self._data() + vdata = self.load(self._data()) + + sqrt_cases = ((-0.0, -0.0), (0.0, 0.0), (-1.0, nan), (ninf, nan), (pinf, pinf)) + for case, desired in sqrt_cases: + data_sqrt = [desired] * self.nlanes + sqrt = self.sqrt(self.setall(case)) + assert sqrt == pytest.approx(data_sqrt, nan_ok=True) + + data_sqrt = self.load([math.sqrt(x) for x in data]) # load to truncate precision + sqrt = self.sqrt(vdata) + assert sqrt == data_sqrt + + def test_square(self): + pinf, ninf, nan = self._pinfinity(), self._ninfinity(), self._nan() + data = self._data() + vdata = self.load(self._data()) + # square + square_cases = ((nan, nan), (pinf, pinf), (ninf, pinf)) + for case, desired in square_cases: + data_square = [desired] * self.nlanes + square = self.square(self.setall(case)) + assert square == pytest.approx(data_square, nan_ok=True) + + data_square = [x * x for x in data] + square = self.square(vdata) + assert square == data_square + + @pytest.mark.parametrize("intrin, func", [("ceil", math.ceil), + ("trunc", math.trunc), ("floor", math.floor), ("rint", round)]) + def test_rounding(self, intrin, func): + """ + Test intrinsics: + npyv_rint_##SFX + npyv_ceil_##SFX + npyv_trunc_##SFX + npyv_floor##SFX + """ + intrin_name = intrin + intrin = getattr(self, intrin) + pinf, ninf, nan = self._pinfinity(), self._ninfinity(), self._nan() + # special cases + round_cases = ((nan, nan), (pinf, pinf), (ninf, ninf)) + for case, desired in round_cases: + data_round = [desired] * self.nlanes + _round = intrin(self.setall(case)) + assert _round == pytest.approx(data_round, nan_ok=True) + + for x in range(0, 2**20, 256**2): + for w in (-1.05, -1.10, -1.15, 1.05, 1.10, 1.15): + data = self.load([(x + a) * w for a in range(self.nlanes)]) + data_round = [func(x) for x in data] + _round = intrin(data) + assert _round == data_round + + # test large numbers + for i in ( + 1.1529215045988576e+18, 4.6116860183954304e+18, + 5.902958103546122e+20, 2.3611832414184488e+21 + ): + x = self.setall(i) + y = intrin(x) + data_round = [func(n) for n in x] + assert y == data_round + + # signed zero + if intrin_name == "floor": + data_szero = (-0.0,) + else: + data_szero = (-0.0, -0.25, -0.30, -0.45, -0.5) + + for w in data_szero: + _round = self._to_unsigned(intrin(self.setall(w))) + data_round = self._to_unsigned(self.setall(-0.0)) + assert _round == data_round + + @pytest.mark.parametrize("intrin", [ + "max", "maxp", "maxn", "min", "minp", "minn" + ]) + def test_max_min(self, intrin): + """ + Test intrinsics: + npyv_max_##sfx + npyv_maxp_##sfx + npyv_maxn_##sfx + npyv_min_##sfx + npyv_minp_##sfx + npyv_minn_##sfx + npyv_reduce_max_##sfx + npyv_reduce_maxp_##sfx + npyv_reduce_maxn_##sfx + npyv_reduce_min_##sfx + npyv_reduce_minp_##sfx + npyv_reduce_minn_##sfx + """ + pinf, ninf, nan = self._pinfinity(), self._ninfinity(), self._nan() + chk_nan = {"xp": 1, "np": 1, "nn": 2, "xn": 2}.get(intrin[-2:], 0) + func = eval(intrin[:3]) + reduce_intrin = getattr(self, "reduce_" + intrin) + intrin = getattr(self, intrin) + hf_nlanes = self.nlanes // 2 + + cases = ( + ([0.0, -0.0], [-0.0, 0.0]), + ([10, -10], [10, -10]), + ([pinf, 10], [10, ninf]), + ([10, pinf], [ninf, 10]), + ([10, -10], [10, -10]), + ([-10, 10], [-10, 10]) + ) + for op1, op2 in cases: + vdata_a = self.load(op1 * hf_nlanes) + vdata_b = self.load(op2 * hf_nlanes) + data = func(vdata_a, vdata_b) + simd = intrin(vdata_a, vdata_b) + assert simd == data + data = func(vdata_a) + simd = reduce_intrin(vdata_a) + assert simd == data + + if not chk_nan: + return + if chk_nan == 1: + test_nan = lambda a, b: ( + b if math.isnan(a) else a if math.isnan(b) else b + ) + else: + test_nan = lambda a, b: ( + nan if math.isnan(a) or math.isnan(b) else b + ) + cases = ( + (nan, 10), + (10, nan), + (nan, pinf), + (pinf, nan), + (nan, nan) + ) + for op1, op2 in cases: + vdata_ab = self.load([op1, op2] * hf_nlanes) + data = test_nan(op1, op2) + simd = reduce_intrin(vdata_ab) + assert simd == pytest.approx(data, nan_ok=True) + vdata_a = self.setall(op1) + vdata_b = self.setall(op2) + data = [data] * self.nlanes + simd = intrin(vdata_a, vdata_b) + assert simd == pytest.approx(data, nan_ok=True) + + def test_reciprocal(self): + pinf, ninf, nan = self._pinfinity(), self._ninfinity(), self._nan() + data = self._data() + vdata = self.load(self._data()) + + recip_cases = ((nan, nan), (pinf, 0.0), (ninf, -0.0), (0.0, pinf), (-0.0, ninf)) + for case, desired in recip_cases: + data_recip = [desired] * self.nlanes + recip = self.recip(self.setall(case)) + assert recip == pytest.approx(data_recip, nan_ok=True) + + data_recip = self.load([1 / x for x in data]) # load to truncate precision + recip = self.recip(vdata) + assert recip == data_recip + + def test_special_cases(self): + """ + Compare Not NaN. Test intrinsics: + npyv_notnan_##SFX + """ + nnan = self.notnan(self.setall(self._nan())) + assert nnan == [0] * self.nlanes + + @pytest.mark.parametrize("intrin_name", [ + "rint", "trunc", "ceil", "floor" + ]) + def test_unary_invalid_fpexception(self, intrin_name): + intrin = getattr(self, intrin_name) + for d in [float("nan"), float("inf"), -float("inf")]: + v = self.setall(d) + clear_floatstatus() + intrin(v) + assert check_floatstatus(invalid=True) is False + + @pytest.mark.parametrize('py_comp,np_comp', [ + (operator.lt, "cmplt"), + (operator.le, "cmple"), + (operator.gt, "cmpgt"), + (operator.ge, "cmpge"), + (operator.eq, "cmpeq"), + (operator.ne, "cmpneq") + ]) + def test_comparison_with_nan(self, py_comp, np_comp): + pinf, ninf, nan = self._pinfinity(), self._ninfinity(), self._nan() + mask_true = self._true_mask() + + def to_bool(vector): + return [lane == mask_true for lane in vector] + + intrin = getattr(self, np_comp) + cmp_cases = ((0, nan), (nan, 0), (nan, nan), (pinf, nan), + (ninf, nan), (-0.0, +0.0)) + for case_operand1, case_operand2 in cmp_cases: + data_a = [case_operand1] * self.nlanes + data_b = [case_operand2] * self.nlanes + vdata_a = self.setall(case_operand1) + vdata_b = self.setall(case_operand2) + vcmp = to_bool(intrin(vdata_a, vdata_b)) + data_cmp = [py_comp(a, b) for a, b in zip(data_a, data_b)] + assert vcmp == data_cmp + + @pytest.mark.parametrize("intrin", ["any", "all"]) + @pytest.mark.parametrize("data", ( + [float("nan"), 0], + [0, float("nan")], + [float("nan"), 1], + [1, float("nan")], + [float("nan"), float("nan")], + [0.0, -0.0], + [-0.0, 0.0], + [1.0, -0.0] + )) + def test_operators_crosstest(self, intrin, data): + """ + Test intrinsics: + npyv_any_##SFX + npyv_all_##SFX + """ + data_a = self.load(data * self.nlanes) + func = eval(intrin) + intrin = getattr(self, intrin) + desired = func(data_a) + simd = intrin(data_a) + assert not not simd == desired + +class _SIMD_ALL(_Test_Utility): + """ + To test all vector types at once + """ + def test_memory_load(self): + data = self._data() + # unaligned load + load_data = self.load(data) + assert load_data == data + # aligned load + loada_data = self.loada(data) + assert loada_data == data + # stream load + loads_data = self.loads(data) + assert loads_data == data + # load lower part + loadl = self.loadl(data) + loadl_half = list(loadl)[:self.nlanes // 2] + data_half = data[:self.nlanes // 2] + assert loadl_half == data_half + assert loadl != data # detect overflow + + def test_memory_store(self): + data = self._data() + vdata = self.load(data) + # unaligned store + store = [0] * self.nlanes + self.store(store, vdata) + assert store == data + # aligned store + store_a = [0] * self.nlanes + self.storea(store_a, vdata) + assert store_a == data + # stream store + store_s = [0] * self.nlanes + self.stores(store_s, vdata) + assert store_s == data + # store lower part + store_l = [0] * self.nlanes + self.storel(store_l, vdata) + assert store_l[:self.nlanes // 2] == data[:self.nlanes // 2] + assert store_l != vdata # detect overflow + # store higher part + store_h = [0] * self.nlanes + self.storeh(store_h, vdata) + assert store_h[:self.nlanes // 2] == data[self.nlanes // 2:] + assert store_h != vdata # detect overflow + + @pytest.mark.parametrize("intrin, elsizes, scale, fill", [ + ("self.load_tillz, self.load_till", (32, 64), 1, [0xffff]), + ("self.load2_tillz, self.load2_till", (32, 64), 2, [0xffff, 0x7fff]), + ]) + def test_memory_partial_load(self, intrin, elsizes, scale, fill): + if self._scalar_size() not in elsizes: + return + npyv_load_tillz, npyv_load_till = eval(intrin) + data = self._data() + lanes = list(range(1, self.nlanes + 1)) + lanes += [self.nlanes**2, self.nlanes**4] # test out of range + for n in lanes: + load_till = npyv_load_till(data, n, *fill) + load_tillz = npyv_load_tillz(data, n) + n *= scale + data_till = data[:n] + fill * ((self.nlanes - n) // scale) + assert load_till == data_till + data_tillz = data[:n] + [0] * (self.nlanes - n) + assert load_tillz == data_tillz + + @pytest.mark.parametrize("intrin, elsizes, scale", [ + ("self.store_till", (32, 64), 1), + ("self.store2_till", (32, 64), 2), + ]) + def test_memory_partial_store(self, intrin, elsizes, scale): + if self._scalar_size() not in elsizes: + return + npyv_store_till = eval(intrin) + data = self._data() + data_rev = self._data(reverse=True) + vdata = self.load(data) + lanes = list(range(1, self.nlanes + 1)) + lanes += [self.nlanes**2, self.nlanes**4] + for n in lanes: + data_till = data_rev.copy() + data_till[:n * scale] = data[:n * scale] + store_till = self._data(reverse=True) + npyv_store_till(store_till, n, vdata) + assert store_till == data_till + + @pytest.mark.parametrize("intrin, elsizes, scale", [ + ("self.loadn", (32, 64), 1), + ("self.loadn2", (32, 64), 2), + ]) + def test_memory_noncont_load(self, intrin, elsizes, scale): + if self._scalar_size() not in elsizes: + return + npyv_loadn = eval(intrin) + for stride in range(-64, 64): + if stride < 0: + data = self._data(stride, -stride * self.nlanes) + data_stride = list(itertools.chain( + *zip(*[data[-i::stride] for i in range(scale, 0, -1)]) + )) + elif stride == 0: + data = self._data() + data_stride = data[0:scale] * (self.nlanes // scale) + else: + data = self._data(count=stride * self.nlanes) + data_stride = list(itertools.chain( + *zip(*[data[i::stride] for i in range(scale)])) + ) + data_stride = self.load(data_stride) # cast unsigned + loadn = npyv_loadn(data, stride) + assert loadn == data_stride + + @pytest.mark.parametrize("intrin, elsizes, scale, fill", [ + ("self.loadn_tillz, self.loadn_till", (32, 64), 1, [0xffff]), + ("self.loadn2_tillz, self.loadn2_till", (32, 64), 2, [0xffff, 0x7fff]), + ]) + def test_memory_noncont_partial_load(self, intrin, elsizes, scale, fill): + if self._scalar_size() not in elsizes: + return + npyv_loadn_tillz, npyv_loadn_till = eval(intrin) + lanes = list(range(1, self.nlanes + 1)) + lanes += [self.nlanes**2, self.nlanes**4] + for stride in range(-64, 64): + if stride < 0: + data = self._data(stride, -stride * self.nlanes) + data_stride = list(itertools.chain( + *zip(*[data[-i::stride] for i in range(scale, 0, -1)]) + )) + elif stride == 0: + data = self._data() + data_stride = data[0:scale] * (self.nlanes // scale) + else: + data = self._data(count=stride * self.nlanes) + data_stride = list(itertools.chain( + *zip(*[data[i::stride] for i in range(scale)]) + )) + data_stride = list(self.load(data_stride)) # cast unsigned + for n in lanes: + nscale = n * scale + llanes = self.nlanes - nscale + data_stride_till = ( + data_stride[:nscale] + fill * (llanes // scale) + ) + loadn_till = npyv_loadn_till(data, stride, n, *fill) + assert loadn_till == data_stride_till + data_stride_tillz = data_stride[:nscale] + [0] * llanes + loadn_tillz = npyv_loadn_tillz(data, stride, n) + assert loadn_tillz == data_stride_tillz + + @pytest.mark.parametrize("intrin, elsizes, scale", [ + ("self.storen", (32, 64), 1), + ("self.storen2", (32, 64), 2), + ]) + def test_memory_noncont_store(self, intrin, elsizes, scale): + if self._scalar_size() not in elsizes: + return + npyv_storen = eval(intrin) + data = self._data() + vdata = self.load(data) + hlanes = self.nlanes // scale + for stride in range(1, 64): + data_storen = [0xff] * stride * self.nlanes + for s in range(0, hlanes * stride, stride): + i = (s // stride) * scale + data_storen[s:s + scale] = data[i:i + scale] + storen = [0xff] * stride * self.nlanes + storen += [0x7f] * 64 + npyv_storen(storen, stride, vdata) + assert storen[:-64] == data_storen + assert storen[-64:] == [0x7f] * 64 # detect overflow + + for stride in range(-64, 0): + data_storen = [0xff] * -stride * self.nlanes + for s in range(0, hlanes * stride, stride): + i = (s // stride) * scale + data_storen[s - scale:s or None] = data[i:i + scale] + storen = [0x7f] * 64 + storen += [0xff] * -stride * self.nlanes + npyv_storen(storen, stride, vdata) + assert storen[64:] == data_storen + assert storen[:64] == [0x7f] * 64 # detect overflow + # stride 0 + data_storen = [0x7f] * self.nlanes + storen = data_storen.copy() + data_storen[0:scale] = data[-scale:] + npyv_storen(storen, 0, vdata) + assert storen == data_storen + + @pytest.mark.parametrize("intrin, elsizes, scale", [ + ("self.storen_till", (32, 64), 1), + ("self.storen2_till", (32, 64), 2), + ]) + def test_memory_noncont_partial_store(self, intrin, elsizes, scale): + if self._scalar_size() not in elsizes: + return + npyv_storen_till = eval(intrin) + data = self._data() + vdata = self.load(data) + lanes = list(range(1, self.nlanes + 1)) + lanes += [self.nlanes**2, self.nlanes**4] + hlanes = self.nlanes // scale + for stride in range(1, 64): + for n in lanes: + data_till = [0xff] * stride * self.nlanes + tdata = data[:n * scale] + [0xff] * (self.nlanes - n * scale) + for s in range(0, hlanes * stride, stride)[:n]: + i = (s // stride) * scale + data_till[s:s + scale] = tdata[i:i + scale] + storen_till = [0xff] * stride * self.nlanes + storen_till += [0x7f] * 64 + npyv_storen_till(storen_till, stride, n, vdata) + assert storen_till[:-64] == data_till + assert storen_till[-64:] == [0x7f] * 64 # detect overflow + + for stride in range(-64, 0): + for n in lanes: + data_till = [0xff] * -stride * self.nlanes + tdata = data[:n * scale] + [0xff] * (self.nlanes - n * scale) + for s in range(0, hlanes * stride, stride)[:n]: + i = (s // stride) * scale + data_till[s - scale:s or None] = tdata[i:i + scale] + storen_till = [0x7f] * 64 + storen_till += [0xff] * -stride * self.nlanes + npyv_storen_till(storen_till, stride, n, vdata) + assert storen_till[64:] == data_till + assert storen_till[:64] == [0x7f] * 64 # detect overflow + + # stride 0 + for n in lanes: + data_till = [0x7f] * self.nlanes + storen_till = data_till.copy() + data_till[0:scale] = data[:n * scale][-scale:] + npyv_storen_till(storen_till, 0, n, vdata) + assert storen_till == data_till + + @pytest.mark.parametrize("intrin, table_size, elsize", [ + ("self.lut32", 32, 32), + ("self.lut16", 16, 64) + ]) + def test_lut(self, intrin, table_size, elsize): + """ + Test lookup table intrinsics: + npyv_lut32_##sfx + npyv_lut16_##sfx + """ + if elsize != self._scalar_size(): + return + intrin = eval(intrin) + idx_itrin = getattr(self.npyv, f"setall_u{elsize}") + table = range(table_size) + for i in table: + broadi = self.setall(i) + idx = idx_itrin(i) + lut = intrin(table, idx) + assert lut == broadi + + def test_misc(self): + broadcast_zero = self.zero() + assert broadcast_zero == [0] * self.nlanes + for i in range(1, 10): + broadcasti = self.setall(i) + assert broadcasti == [i] * self.nlanes + + data_a, data_b = self._data(), self._data(reverse=True) + vdata_a, vdata_b = self.load(data_a), self.load(data_b) + + # py level of npyv_set_* don't support ignoring the extra specified lanes or + # fill non-specified lanes with zero. + vset = self.set(*data_a) + assert vset == data_a + # py level of npyv_setf_* don't support ignoring the extra specified lanes or + # fill non-specified lanes with the specified scalar. + vsetf = self.setf(10, *data_a) + assert vsetf == data_a + + # We're testing the sanity of _simd's type-vector, + # reinterpret* intrinsics itself are tested via compiler + # during the build of _simd module + sfxes = ["u8", "s8", "u16", "s16", "u32", "s32", "u64", "s64"] + if self.npyv.simd_f64: + sfxes.append("f64") + if self.npyv.simd_f32: + sfxes.append("f32") + for sfx in sfxes: + vec_name = getattr(self, "reinterpret_" + sfx)(vdata_a).__name__ + assert vec_name == "npyv_" + sfx + + # select & mask operations + select_a = self.select(self.cmpeq(self.zero(), self.zero()), vdata_a, vdata_b) + assert select_a == data_a + select_b = self.select(self.cmpneq(self.zero(), self.zero()), vdata_a, vdata_b) + assert select_b == data_b + + # test extract elements + assert self.extract0(vdata_b) == vdata_b[0] + + # cleanup intrinsic is only used with AVX for + # zeroing registers to avoid the AVX-SSE transition penalty, + # so nothing to test here + self.npyv.cleanup() + + def test_reorder(self): + data_a, data_b = self._data(), self._data(reverse=True) + vdata_a, vdata_b = self.load(data_a), self.load(data_b) + # lower half part + data_a_lo = data_a[:self.nlanes // 2] + data_b_lo = data_b[:self.nlanes // 2] + # higher half part + data_a_hi = data_a[self.nlanes // 2:] + data_b_hi = data_b[self.nlanes // 2:] + # combine two lower parts + combinel = self.combinel(vdata_a, vdata_b) + assert combinel == data_a_lo + data_b_lo + # combine two higher parts + combineh = self.combineh(vdata_a, vdata_b) + assert combineh == data_a_hi + data_b_hi + # combine x2 + combine = self.combine(vdata_a, vdata_b) + assert combine == (data_a_lo + data_b_lo, data_a_hi + data_b_hi) + + # zip(interleave) + data_zipl = self.load([ + v for p in zip(data_a_lo, data_b_lo) for v in p + ]) + data_ziph = self.load([ + v for p in zip(data_a_hi, data_b_hi) for v in p + ]) + vzip = self.zip(vdata_a, vdata_b) + assert vzip == (data_zipl, data_ziph) + vzip = [0] * self.nlanes * 2 + self._x2("store")(vzip, (vdata_a, vdata_b)) + assert vzip == list(data_zipl) + list(data_ziph) + + # unzip(deinterleave) + unzip = self.unzip(data_zipl, data_ziph) + assert unzip == (data_a, data_b) + unzip = self._x2("load")(list(data_zipl) + list(data_ziph)) + assert unzip == (data_a, data_b) + + def test_reorder_rev64(self): + # Reverse elements of each 64-bit lane + ssize = self._scalar_size() + if ssize == 64: + return + data_rev64 = [ + y for x in range(0, self.nlanes, 64 // ssize) + for y in reversed(range(x, x + 64 // ssize)) + ] + rev64 = self.rev64(self.load(range(self.nlanes))) + assert rev64 == data_rev64 + + def test_reorder_permi128(self): + """ + Test permuting elements for each 128-bit lane. + npyv_permi128_##sfx + """ + ssize = self._scalar_size() + if ssize < 32: + return + data = self.load(self._data()) + permn = 128 // ssize + permd = permn - 1 + nlane128 = self.nlanes // permn + shfl = [0, 1] if ssize == 64 else [0, 2, 4, 6] + for i in range(permn): + indices = [(i >> shf) & permd for shf in shfl] + vperm = self.permi128(data, *indices) + data_vperm = [ + data[j + (e & -permn)] + for e, j in enumerate(indices * nlane128) + ] + assert vperm == data_vperm + + @pytest.mark.parametrize('func, intrin', [ + (operator.lt, "cmplt"), + (operator.le, "cmple"), + (operator.gt, "cmpgt"), + (operator.ge, "cmpge"), + (operator.eq, "cmpeq") + ]) + def test_operators_comparison(self, func, intrin): + if self._is_fp(): + data_a = self._data() + else: + data_a = self._data(self._int_max() - self.nlanes) + data_b = self._data(self._int_min(), reverse=True) + vdata_a, vdata_b = self.load(data_a), self.load(data_b) + intrin = getattr(self, intrin) + + mask_true = self._true_mask() + + def to_bool(vector): + return [lane == mask_true for lane in vector] + + data_cmp = [func(a, b) for a, b in zip(data_a, data_b)] + cmp = to_bool(intrin(vdata_a, vdata_b)) + assert cmp == data_cmp + + def test_operators_logical(self): + if self._is_fp(): + data_a = self._data() + else: + data_a = self._data(self._int_max() - self.nlanes) + data_b = self._data(self._int_min(), reverse=True) + vdata_a, vdata_b = self.load(data_a), self.load(data_b) + + if self._is_fp(): + data_cast_a = self._to_unsigned(vdata_a) + data_cast_b = self._to_unsigned(vdata_b) + cast, cast_data = self._to_unsigned, self._to_unsigned + else: + data_cast_a, data_cast_b = data_a, data_b + cast, cast_data = lambda a: a, self.load + + data_xor = cast_data([a ^ b for a, b in zip(data_cast_a, data_cast_b)]) + vxor = cast(self.xor(vdata_a, vdata_b)) + assert vxor == data_xor + + data_or = cast_data([a | b for a, b in zip(data_cast_a, data_cast_b)]) + vor = cast(getattr(self, "or")(vdata_a, vdata_b)) + assert vor == data_or + + data_and = cast_data([a & b for a, b in zip(data_cast_a, data_cast_b)]) + vand = cast(getattr(self, "and")(vdata_a, vdata_b)) + assert vand == data_and + + data_not = cast_data([~a for a in data_cast_a]) + vnot = cast(getattr(self, "not")(vdata_a)) + assert vnot == data_not + + if self.sfx not in ("u8"): + return + data_andc = [a & ~b for a, b in zip(data_cast_a, data_cast_b)] + vandc = cast(self.andc(vdata_a, vdata_b)) + assert vandc == data_andc + + @pytest.mark.parametrize("intrin", ["any", "all"]) + @pytest.mark.parametrize("data", ( + [1, 2, 3, 4], + [-1, -2, -3, -4], + [0, 1, 2, 3, 4], + [0x7f, 0x7fff, 0x7fffffff, 0x7fffffffffffffff], + [0, -1, -2, -3, 4], + [0], + [1], + [-1] + )) + def test_operators_crosstest(self, intrin, data): + """ + Test intrinsics: + npyv_any_##SFX + npyv_all_##SFX + """ + data_a = self.load(data * self.nlanes) + func = eval(intrin) + intrin = getattr(self, intrin) + desired = func(data_a) + simd = intrin(data_a) + assert not not simd == desired + + def test_conversion_boolean(self): + bsfx = "b" + self.sfx[1:] + to_boolean = getattr(self.npyv, f"cvt_{bsfx}_{self.sfx}") + from_boolean = getattr(self.npyv, f"cvt_{self.sfx}_{bsfx}") + + false_vb = to_boolean(self.setall(0)) + true_vb = self.cmpeq(self.setall(0), self.setall(0)) + assert false_vb != true_vb + + false_vsfx = from_boolean(false_vb) + true_vsfx = from_boolean(true_vb) + assert false_vsfx != true_vsfx + + def test_conversion_expand(self): + """ + Test expand intrinsics: + npyv_expand_u16_u8 + npyv_expand_u32_u16 + """ + if self.sfx not in ("u8", "u16"): + return + totype = self.sfx[0] + str(int(self.sfx[1:]) * 2) + expand = getattr(self.npyv, f"expand_{totype}_{self.sfx}") + # close enough from the edge to detect any deviation + data = self._data(self._int_max() - self.nlanes) + vdata = self.load(data) + edata = expand(vdata) + # lower half part + data_lo = data[:self.nlanes // 2] + # higher half part + data_hi = data[self.nlanes // 2:] + assert edata == (data_lo, data_hi) + + def test_arithmetic_subadd(self): + if self._is_fp(): + data_a = self._data() + else: + data_a = self._data(self._int_max() - self.nlanes) + data_b = self._data(self._int_min(), reverse=True) + vdata_a, vdata_b = self.load(data_a), self.load(data_b) + + # non-saturated + data_add = self.load([a + b for a, b in zip(data_a, data_b)]) # load to cast + add = self.add(vdata_a, vdata_b) + assert add == data_add + data_sub = self.load([a - b for a, b in zip(data_a, data_b)]) + sub = self.sub(vdata_a, vdata_b) + assert sub == data_sub + + def test_arithmetic_mul(self): + if self.sfx in ("u64", "s64"): + return + + if self._is_fp(): + data_a = self._data() + else: + data_a = self._data(self._int_max() - self.nlanes) + data_b = self._data(self._int_min(), reverse=True) + vdata_a, vdata_b = self.load(data_a), self.load(data_b) + + data_mul = self.load([a * b for a, b in zip(data_a, data_b)]) + mul = self.mul(vdata_a, vdata_b) + assert mul == data_mul + + def test_arithmetic_div(self): + if not self._is_fp(): + return + + data_a, data_b = self._data(), self._data(reverse=True) + vdata_a, vdata_b = self.load(data_a), self.load(data_b) + + # load to truncate f64 to precision of f32 + data_div = self.load([a / b for a, b in zip(data_a, data_b)]) + div = self.div(vdata_a, vdata_b) + assert div == data_div + + def test_arithmetic_intdiv(self): + """ + Test integer division intrinsics: + npyv_divisor_##sfx + npyv_divc_##sfx + """ + if self._is_fp(): + return + + int_min = self._int_min() + + def trunc_div(a, d): + """ + Divide towards zero works with large integers > 2^53, + and wrap around overflow similar to what C does. + """ + if d == -1 and a == int_min: + return a + sign_a, sign_d = a < 0, d < 0 + if a == 0 or sign_a == sign_d: + return a // d + return (a + sign_d - sign_a) // d + 1 + + data = [1, -int_min] # to test overflow + data += range(0, 2**8, 2**5) + data += range(0, 2**8, 2**5 - 1) + bsize = self._scalar_size() + if bsize > 8: + data += range(2**8, 2**16, 2**13) + data += range(2**8, 2**16, 2**13 - 1) + if bsize > 16: + data += range(2**16, 2**32, 2**29) + data += range(2**16, 2**32, 2**29 - 1) + if bsize > 32: + data += range(2**32, 2**64, 2**61) + data += range(2**32, 2**64, 2**61 - 1) + # negate + data += [-x for x in data] + for dividend, divisor in itertools.product(data, data): + divisor = self.setall(divisor)[0] # cast + if divisor == 0: + continue + dividend = self.load(self._data(dividend)) + data_divc = [trunc_div(a, divisor) for a in dividend] + divisor_parms = self.divisor(divisor) + divc = self.divc(dividend, divisor_parms) + assert divc == data_divc + + def test_arithmetic_reduce_sum(self): + """ + Test reduce sum intrinsics: + npyv_sum_##sfx + """ + if self.sfx not in ("u32", "u64", "f32", "f64"): + return + # reduce sum + data = self._data() + vdata = self.load(data) + + data_sum = sum(data) + vsum = self.sum(vdata) + assert vsum == data_sum + + def test_arithmetic_reduce_sumup(self): + """ + Test extend reduce sum intrinsics: + npyv_sumup_##sfx + """ + if self.sfx not in ("u8", "u16"): + return + rdata = (0, self.nlanes, self._int_min(), self._int_max() - self.nlanes) + for r in rdata: + data = self._data(r) + vdata = self.load(data) + data_sum = sum(data) + vsum = self.sumup(vdata) + assert vsum == data_sum + + def test_mask_conditional(self): + """ + Conditional addition and subtraction for all supported data types. + Test intrinsics: + npyv_ifadd_##SFX, npyv_ifsub_##SFX + """ + vdata_a = self.load(self._data()) + vdata_b = self.load(self._data(reverse=True)) + true_mask = self.cmpeq(self.zero(), self.zero()) + false_mask = self.cmpneq(self.zero(), self.zero()) + + data_sub = self.sub(vdata_b, vdata_a) + ifsub = self.ifsub(true_mask, vdata_b, vdata_a, vdata_b) + assert ifsub == data_sub + ifsub = self.ifsub(false_mask, vdata_a, vdata_b, vdata_b) + assert ifsub == vdata_b + + data_add = self.add(vdata_b, vdata_a) + ifadd = self.ifadd(true_mask, vdata_b, vdata_a, vdata_b) + assert ifadd == data_add + ifadd = self.ifadd(false_mask, vdata_a, vdata_b, vdata_b) + assert ifadd == vdata_b + + if not self._is_fp(): + return + data_div = self.div(vdata_b, vdata_a) + ifdiv = self.ifdiv(true_mask, vdata_b, vdata_a, vdata_b) + assert ifdiv == data_div + ifdivz = self.ifdivz(true_mask, vdata_b, vdata_a) + assert ifdivz == data_div + ifdiv = self.ifdiv(false_mask, vdata_a, vdata_b, vdata_b) + assert ifdiv == vdata_b + ifdivz = self.ifdivz(false_mask, vdata_a, vdata_b) + assert ifdivz == self.zero() + + +bool_sfx = ("b8", "b16", "b32", "b64") +int_sfx = ("u8", "s8", "u16", "s16", "u32", "s32", "u64", "s64") +fp_sfx = ("f32", "f64") +all_sfx = int_sfx + fp_sfx +tests_registry = { + bool_sfx: _SIMD_BOOL, + int_sfx: _SIMD_INT, + fp_sfx: _SIMD_FP, + ("f32",): _SIMD_FP32, + ("f64",): _SIMD_FP64, + all_sfx: _SIMD_ALL +} +for target_name, npyv in targets.items(): + simd_width = npyv.simd if npyv else '' + pretty_name = target_name.split('__') # multi-target separator + if len(pretty_name) > 1: + # multi-target + pretty_name = f"({' '.join(pretty_name)})" + else: + pretty_name = pretty_name[0] + + skip = "" + skip_sfx = {} + if not npyv: + skip = f"target '{pretty_name}' isn't supported by current machine" + elif not npyv.simd: + skip = f"target '{pretty_name}' isn't supported by NPYV" + else: + if not npyv.simd_f32: + skip_sfx["f32"] = f"target '{pretty_name}' "\ + "doesn't support single-precision" + if not npyv.simd_f64: + skip_sfx["f64"] = f"target '{pretty_name}' doesn't"\ + "support double-precision" + + for sfxes, cls in tests_registry.items(): + for sfx in sfxes: + skip_m = skip_sfx.get(sfx, skip) + inhr = (cls,) + attr = {"npyv": targets[target_name], "sfx": sfx, "target_name": target_name} + tcls = type(f"Test{cls.__name__}_{simd_width}_{target_name}_{sfx}", inhr, attr) + if skip_m: + pytest.mark.skip(reason=skip_m)(tcls) + globals()[tcls.__name__] = tcls diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_simd_module.py b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_simd_module.py new file mode 100644 index 0000000..dca83fd --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_simd_module.py @@ -0,0 +1,103 @@ +import pytest + +from numpy._core._simd import targets + +""" +This testing unit only for checking the sanity of common functionality, +therefore all we need is just to take one submodule that represents any +of enabled SIMD extensions to run the test on it and the second submodule +required to run only one check related to the possibility of mixing +the data types among each submodule. +""" +npyvs = [npyv_mod for npyv_mod in targets.values() if npyv_mod and npyv_mod.simd] +npyv, npyv2 = (npyvs + [None, None])[:2] + +unsigned_sfx = ["u8", "u16", "u32", "u64"] +signed_sfx = ["s8", "s16", "s32", "s64"] +fp_sfx = [] +if npyv and npyv.simd_f32: + fp_sfx.append("f32") +if npyv and npyv.simd_f64: + fp_sfx.append("f64") + +int_sfx = unsigned_sfx + signed_sfx +all_sfx = unsigned_sfx + int_sfx + +@pytest.mark.skipif(not npyv, reason="could not find any SIMD extension with NPYV support") +class Test_SIMD_MODULE: + + @pytest.mark.parametrize('sfx', all_sfx) + def test_num_lanes(self, sfx): + nlanes = getattr(npyv, "nlanes_" + sfx) + vector = getattr(npyv, "setall_" + sfx)(1) + assert len(vector) == nlanes + + @pytest.mark.parametrize('sfx', all_sfx) + def test_type_name(self, sfx): + vector = getattr(npyv, "setall_" + sfx)(1) + assert vector.__name__ == "npyv_" + sfx + + def test_raises(self): + a, b = [npyv.setall_u32(1)] * 2 + for sfx in all_sfx: + vcb = lambda intrin: getattr(npyv, f"{intrin}_{sfx}") + pytest.raises(TypeError, vcb("add"), a) + pytest.raises(TypeError, vcb("add"), a, b, a) + pytest.raises(TypeError, vcb("setall")) + pytest.raises(TypeError, vcb("setall"), [1]) + pytest.raises(TypeError, vcb("load"), 1) + pytest.raises(ValueError, vcb("load"), [1]) + pytest.raises(ValueError, vcb("store"), [1], getattr(npyv, f"reinterpret_{sfx}_u32")(a)) + + @pytest.mark.skipif(not npyv2, reason=( + "could not find a second SIMD extension with NPYV support" + )) + def test_nomix(self): + # mix among submodules isn't allowed + a = npyv.setall_u32(1) + a2 = npyv2.setall_u32(1) + pytest.raises(TypeError, npyv.add_u32, a2, a2) + pytest.raises(TypeError, npyv2.add_u32, a, a) + + @pytest.mark.parametrize('sfx', unsigned_sfx) + def test_unsigned_overflow(self, sfx): + nlanes = getattr(npyv, "nlanes_" + sfx) + maxu = (1 << int(sfx[1:])) - 1 + maxu_72 = (1 << 72) - 1 + lane = getattr(npyv, "setall_" + sfx)(maxu_72)[0] + assert lane == maxu + lanes = getattr(npyv, "load_" + sfx)([maxu_72] * nlanes) + assert lanes == [maxu] * nlanes + lane = getattr(npyv, "setall_" + sfx)(-1)[0] + assert lane == maxu + lanes = getattr(npyv, "load_" + sfx)([-1] * nlanes) + assert lanes == [maxu] * nlanes + + @pytest.mark.parametrize('sfx', signed_sfx) + def test_signed_overflow(self, sfx): + nlanes = getattr(npyv, "nlanes_" + sfx) + maxs_72 = (1 << 71) - 1 + lane = getattr(npyv, "setall_" + sfx)(maxs_72)[0] + assert lane == -1 + lanes = getattr(npyv, "load_" + sfx)([maxs_72] * nlanes) + assert lanes == [-1] * nlanes + mins_72 = -1 << 71 + lane = getattr(npyv, "setall_" + sfx)(mins_72)[0] + assert lane == 0 + lanes = getattr(npyv, "load_" + sfx)([mins_72] * nlanes) + assert lanes == [0] * nlanes + + def test_truncate_f32(self): + if not npyv.simd_f32: + pytest.skip("F32 isn't support by the SIMD extension") + f32 = npyv.setall_f32(0.1)[0] + assert f32 != 0.1 + assert round(f32, 1) == 0.1 + + def test_compare(self): + data_range = range(npyv.nlanes_u32) + vdata = npyv.load_u32(data_range) + assert vdata == list(data_range) + assert vdata == tuple(data_range) + for i in data_range: + assert vdata[i] == data_range[i] diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_stringdtype.py b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_stringdtype.py new file mode 100644 index 0000000..e39d746 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_stringdtype.py @@ -0,0 +1,1807 @@ +import copy +import itertools +import os +import pickle +import sys +import tempfile + +import pytest + +import numpy as np +from numpy._core.tests._natype import get_stringdtype_dtype as get_dtype +from numpy._core.tests._natype import pd_NA +from numpy.dtypes import StringDType +from numpy.testing import IS_PYPY, assert_array_equal + + +@pytest.fixture +def string_list(): + return ["abc", "def", "ghi" * 10, "A¢☃€ 😊" * 100, "Abc" * 1000, "DEF"] + + +# second copy for cast tests to do a cartesian product over dtypes +@pytest.fixture(params=[True, False]) +def coerce2(request): + return request.param + + +@pytest.fixture( + params=["unset", None, pd_NA, np.nan, float("nan"), "__nan__"], + ids=["unset", "None", "pandas.NA", "np.nan", "float('nan')", "string nan"], +) +def na_object2(request): + return request.param + + +@pytest.fixture() +def dtype2(na_object2, coerce2): + # explicit is check for pd_NA because != with pd_NA returns pd_NA + if na_object2 is pd_NA or na_object2 != "unset": + return StringDType(na_object=na_object2, coerce=coerce2) + else: + return StringDType(coerce=coerce2) + + +def test_dtype_creation(): + hashes = set() + dt = StringDType() + assert not hasattr(dt, "na_object") and dt.coerce is True + hashes.add(hash(dt)) + + dt = StringDType(na_object=None) + assert dt.na_object is None and dt.coerce is True + hashes.add(hash(dt)) + + dt = StringDType(coerce=False) + assert not hasattr(dt, "na_object") and dt.coerce is False + hashes.add(hash(dt)) + + dt = StringDType(na_object=None, coerce=False) + assert dt.na_object is None and dt.coerce is False + hashes.add(hash(dt)) + + assert len(hashes) == 4 + + dt = np.dtype("T") + assert dt == StringDType() + assert dt.kind == "T" + assert dt.char == "T" + + hashes.add(hash(dt)) + assert len(hashes) == 4 + + +def test_dtype_equality(dtype): + assert dtype == dtype + for ch in "SU": + assert dtype != np.dtype(ch) + assert dtype != np.dtype(f"{ch}8") + + +def test_dtype_repr(dtype): + if not hasattr(dtype, "na_object") and dtype.coerce: + assert repr(dtype) == "StringDType()" + elif dtype.coerce: + assert repr(dtype) == f"StringDType(na_object={dtype.na_object!r})" + elif not hasattr(dtype, "na_object"): + assert repr(dtype) == "StringDType(coerce=False)" + else: + assert ( + repr(dtype) + == f"StringDType(na_object={dtype.na_object!r}, coerce=False)" + ) + + +def test_create_with_na(dtype): + if not hasattr(dtype, "na_object"): + pytest.skip("does not have an na object") + na_val = dtype.na_object + string_list = ["hello", na_val, "world"] + arr = np.array(string_list, dtype=dtype) + assert str(arr) == "[" + " ".join([repr(s) for s in string_list]) + "]" + assert arr[1] is dtype.na_object + + +@pytest.mark.parametrize("i", list(range(5))) +def test_set_replace_na(i): + # Test strings of various lengths can be set to NaN and then replaced. + s_empty = "" + s_short = "0123456789" + s_medium = "abcdefghijklmnopqrstuvwxyz" + s_long = "-=+" * 100 + strings = [s_medium, s_empty, s_short, s_medium, s_long] + a = np.array(strings, StringDType(na_object=np.nan)) + for s in [a[i], s_medium + s_short, s_short, s_empty, s_long]: + a[i] = np.nan + assert np.isnan(a[i]) + a[i] = s + assert a[i] == s + assert_array_equal(a, strings[:i] + [s] + strings[i + 1:]) + + +def test_null_roundtripping(): + data = ["hello\0world", "ABC\0DEF\0\0"] + arr = np.array(data, dtype="T") + assert data[0] == arr[0] + assert data[1] == arr[1] + + +def test_string_too_large_error(): + arr = np.array(["a", "b", "c"], dtype=StringDType()) + with pytest.raises(OverflowError): + arr * (sys.maxsize + 1) + + +@pytest.mark.parametrize( + "data", + [ + ["abc", "def", "ghi"], + ["🤣", "📵", "😰"], + ["🚜", "🙃", "😾"], + ["😹", "🚠", "🚌"], + ], +) +def test_array_creation_utf8(dtype, data): + arr = np.array(data, dtype=dtype) + assert str(arr) == "[" + " ".join(["'" + str(d) + "'" for d in data]) + "]" + assert arr.dtype == dtype + + +@pytest.mark.parametrize( + "data", + [ + [1, 2, 3], + [b"abc", b"def", b"ghi"], + [object, object, object], + ], +) +def test_scalars_string_conversion(data, dtype): + try: + str_vals = [str(d.decode('utf-8')) for d in data] + except AttributeError: + str_vals = [str(d) for d in data] + if dtype.coerce: + assert_array_equal( + np.array(data, dtype=dtype), + np.array(str_vals, dtype=dtype), + ) + else: + with pytest.raises(ValueError): + np.array(data, dtype=dtype) + + +@pytest.mark.parametrize( + ("strings"), + [ + ["this", "is", "an", "array"], + ["€", "", "😊"], + ["A¢☃€ 😊", " A☃€¢😊", "☃€😊 A¢", "😊☃A¢ €"], + ], +) +def test_self_casts(dtype, dtype2, strings): + if hasattr(dtype, "na_object"): + strings = strings + [dtype.na_object] + elif hasattr(dtype2, "na_object"): + strings = strings + [""] + arr = np.array(strings, dtype=dtype) + newarr = arr.astype(dtype2) + + if hasattr(dtype, "na_object") and not hasattr(dtype2, "na_object"): + assert newarr[-1] == str(dtype.na_object) + with pytest.raises(TypeError): + arr.astype(dtype2, casting="safe") + elif hasattr(dtype, "na_object") and hasattr(dtype2, "na_object"): + assert newarr[-1] is dtype2.na_object + arr.astype(dtype2, casting="safe") + elif hasattr(dtype2, "na_object"): + assert newarr[-1] == "" + arr.astype(dtype2, casting="safe") + else: + arr.astype(dtype2, casting="safe") + + if hasattr(dtype, "na_object") and hasattr(dtype2, "na_object"): + na1 = dtype.na_object + na2 = dtype2.na_object + if (na1 is not na2 and + # check for pd_NA first because bool(pd_NA) is an error + ((na1 is pd_NA or na2 is pd_NA) or + # the second check is a NaN check, spelled this way + # to avoid errors from math.isnan and np.isnan + (na1 != na2 and not (na1 != na1 and na2 != na2)))): + with pytest.raises(TypeError): + arr[:-1] == newarr[:-1] + return + assert_array_equal(arr[:-1], newarr[:-1]) + + +@pytest.mark.parametrize( + ("strings"), + [ + ["this", "is", "an", "array"], + ["€", "", "😊"], + ["A¢☃€ 😊", " A☃€¢😊", "☃€😊 A¢", "😊☃A¢ €"], + ], +) +class TestStringLikeCasts: + def test_unicode_casts(self, dtype, strings): + arr = np.array(strings, dtype=np.str_).astype(dtype) + expected = np.array(strings, dtype=dtype) + assert_array_equal(arr, expected) + + arr_as_U8 = expected.astype("U8") + assert_array_equal(arr_as_U8, np.array(strings, dtype="U8")) + assert_array_equal(arr_as_U8.astype(dtype), arr) + arr_as_U3 = expected.astype("U3") + assert_array_equal(arr_as_U3, np.array(strings, dtype="U3")) + assert_array_equal( + arr_as_U3.astype(dtype), + np.array([s[:3] for s in strings], dtype=dtype), + ) + + def test_void_casts(self, dtype, strings): + sarr = np.array(strings, dtype=dtype) + utf8_bytes = [s.encode("utf-8") for s in strings] + void_dtype = f"V{max(len(s) for s in utf8_bytes)}" + varr = np.array(utf8_bytes, dtype=void_dtype) + assert_array_equal(varr, sarr.astype(void_dtype)) + assert_array_equal(varr.astype(dtype), sarr) + + def test_bytes_casts(self, dtype, strings): + sarr = np.array(strings, dtype=dtype) + try: + utf8_bytes = [s.encode("ascii") for s in strings] + bytes_dtype = f"S{max(len(s) for s in utf8_bytes)}" + barr = np.array(utf8_bytes, dtype=bytes_dtype) + assert_array_equal(barr, sarr.astype(bytes_dtype)) + assert_array_equal(barr.astype(dtype), sarr) + if dtype.coerce: + barr = np.array(utf8_bytes, dtype=dtype) + assert_array_equal(barr, sarr) + barr = np.array(utf8_bytes, dtype="O") + assert_array_equal(barr.astype(dtype), sarr) + else: + with pytest.raises(ValueError): + np.array(utf8_bytes, dtype=dtype) + except UnicodeEncodeError: + with pytest.raises(UnicodeEncodeError): + sarr.astype("S20") + + +def test_additional_unicode_cast(random_string_list, dtype): + arr = np.array(random_string_list, dtype=dtype) + # test that this short-circuits correctly + assert_array_equal(arr, arr.astype(arr.dtype)) + # tests the casts via the comparison promoter + assert_array_equal(arr, arr.astype(random_string_list.dtype)) + + +def test_insert_scalar(dtype, string_list): + """Test that inserting a scalar works.""" + arr = np.array(string_list, dtype=dtype) + scalar_instance = "what" + arr[1] = scalar_instance + assert_array_equal( + arr, + np.array(string_list[:1] + ["what"] + string_list[2:], dtype=dtype), + ) + + +comparison_operators = [ + np.equal, + np.not_equal, + np.greater, + np.greater_equal, + np.less, + np.less_equal, +] + + +@pytest.mark.parametrize("op", comparison_operators) +@pytest.mark.parametrize("o_dtype", [np.str_, object, StringDType()]) +def test_comparisons(string_list, dtype, op, o_dtype): + sarr = np.array(string_list, dtype=dtype) + oarr = np.array(string_list, dtype=o_dtype) + + # test that comparison operators work + res = op(sarr, sarr) + ores = op(oarr, oarr) + # test that promotion works as well + orres = op(sarr, oarr) + olres = op(oarr, sarr) + + assert_array_equal(res, ores) + assert_array_equal(res, orres) + assert_array_equal(res, olres) + + # test we get the correct answer for unequal length strings + sarr2 = np.array([s + "2" for s in string_list], dtype=dtype) + oarr2 = np.array([s + "2" for s in string_list], dtype=o_dtype) + + res = op(sarr, sarr2) + ores = op(oarr, oarr2) + olres = op(oarr, sarr2) + orres = op(sarr, oarr2) + + assert_array_equal(res, ores) + assert_array_equal(res, olres) + assert_array_equal(res, orres) + + res = op(sarr2, sarr) + ores = op(oarr2, oarr) + olres = op(oarr2, sarr) + orres = op(sarr2, oarr) + + assert_array_equal(res, ores) + assert_array_equal(res, olres) + assert_array_equal(res, orres) + + +def test_isnan(dtype, string_list): + if not hasattr(dtype, "na_object"): + pytest.skip("no na support") + sarr = np.array(string_list + [dtype.na_object], dtype=dtype) + is_nan = isinstance(dtype.na_object, float) and np.isnan(dtype.na_object) + bool_errors = 0 + try: + bool(dtype.na_object) + except TypeError: + bool_errors = 1 + if is_nan or bool_errors: + # isnan is only true when na_object is a NaN + assert_array_equal( + np.isnan(sarr), + np.array([0] * len(string_list) + [1], dtype=np.bool), + ) + else: + assert not np.any(np.isnan(sarr)) + + +def test_pickle(dtype, string_list): + arr = np.array(string_list, dtype=dtype) + + with tempfile.NamedTemporaryFile("wb", delete=False) as f: + pickle.dump([arr, dtype], f) + + with open(f.name, "rb") as f: + res = pickle.load(f) + + assert_array_equal(res[0], arr) + assert res[1] == dtype + + os.remove(f.name) + + +def test_stdlib_copy(dtype, string_list): + arr = np.array(string_list, dtype=dtype) + + assert_array_equal(copy.copy(arr), arr) + assert_array_equal(copy.deepcopy(arr), arr) + + +@pytest.mark.parametrize( + "strings", + [ + ["left", "right", "leftovers", "righty", "up", "down"], + [ + "left" * 10, + "right" * 10, + "leftovers" * 10, + "righty" * 10, + "up" * 10, + ], + ["🤣🤣", "🤣", "📵", "😰"], + ["🚜", "🙃", "😾"], + ["😹", "🚠", "🚌"], + ["A¢☃€ 😊", " A☃€¢😊", "☃€😊 A¢", "😊☃A¢ €"], + ], +) +def test_sort(dtype, strings): + """Test that sorting matches python's internal sorting.""" + + def test_sort(strings, arr_sorted): + arr = np.array(strings, dtype=dtype) + na_object = getattr(arr.dtype, "na_object", "") + if na_object is None and None in strings: + with pytest.raises( + ValueError, + match="Cannot compare null that is not a nan-like value", + ): + np.argsort(arr) + argsorted = None + elif na_object is pd_NA or na_object != '': + argsorted = None + else: + argsorted = np.argsort(arr) + np.random.default_rng().shuffle(arr) + if na_object is None and None in strings: + with pytest.raises( + ValueError, + match="Cannot compare null that is not a nan-like value", + ): + arr.sort() + else: + arr.sort() + assert np.array_equal(arr, arr_sorted, equal_nan=True) + if argsorted is not None: + assert np.array_equal(argsorted, np.argsort(strings)) + + # make a copy so we don't mutate the lists in the fixture + strings = strings.copy() + arr_sorted = np.array(sorted(strings), dtype=dtype) + test_sort(strings, arr_sorted) + + if not hasattr(dtype, "na_object"): + return + + # make sure NAs get sorted to the end of the array and string NAs get + # sorted like normal strings + strings.insert(0, dtype.na_object) + strings.insert(2, dtype.na_object) + # can't use append because doing that with NA converts + # the result to object dtype + if not isinstance(dtype.na_object, str): + arr_sorted = np.array( + arr_sorted.tolist() + [dtype.na_object, dtype.na_object], + dtype=dtype, + ) + else: + arr_sorted = np.array(sorted(strings), dtype=dtype) + + test_sort(strings, arr_sorted) + + +@pytest.mark.parametrize( + "strings", + [ + ["A¢☃€ 😊", " A☃€¢😊", "☃€😊 A¢", "😊☃A¢ €"], + ["A¢☃€ 😊", "", " ", " "], + ["", "a", "😸", "ááðfáíóåéë"], + ], +) +def test_nonzero(strings, na_object): + dtype = get_dtype(na_object) + arr = np.array(strings, dtype=dtype) + is_nonzero = np.array( + [i for i, item in enumerate(strings) if len(item) != 0]) + assert_array_equal(arr.nonzero()[0], is_nonzero) + + if na_object is not pd_NA and na_object == 'unset': + return + + strings_with_na = np.array(strings + [na_object], dtype=dtype) + is_nan = np.isnan(np.array([dtype.na_object], dtype=dtype))[0] + + if is_nan: + assert strings_with_na.nonzero()[0][-1] == 4 + else: + assert strings_with_na.nonzero()[0][-1] == 3 + + # check that the casting to bool and nonzero give consistent results + assert_array_equal(strings_with_na[strings_with_na.nonzero()], + strings_with_na[strings_with_na.astype(bool)]) + + +def test_where(string_list, na_object): + dtype = get_dtype(na_object) + a = np.array(string_list, dtype=dtype) + b = a[::-1] + res = np.where([True, False, True, False, True, False], a, b) + assert_array_equal(res, [a[0], b[1], a[2], b[3], a[4], b[5]]) + + +def test_fancy_indexing(string_list): + sarr = np.array(string_list, dtype="T") + assert_array_equal(sarr, sarr[np.arange(sarr.shape[0])]) + + inds = [ + [True, True], + [0, 1], + ..., + np.array([0, 1], dtype='uint8'), + ] + + lops = [ + ['a' * 25, 'b' * 25], + ['', ''], + ['hello', 'world'], + ['hello', 'world' * 25], + ] + + # see gh-27003 and gh-27053 + for ind in inds: + for lop in lops: + a = np.array(lop, dtype="T") + assert_array_equal(a[ind], a) + rop = ['d' * 25, 'e' * 25] + for b in [rop, np.array(rop, dtype="T")]: + a[ind] = b + assert_array_equal(a, b) + assert a[0] == 'd' * 25 + + # see gh-29279 + data = [ + ["AAAAAAAAAAAAAAAAA"], + ["BBBBBBBBBBBBBBBBBBBBBBBBBBBBB"], + ["CCCCCCCCCCCCCCCCC"], + ["DDDDDDDDDDDDDDDDD"], + ] + sarr = np.array(data, dtype=np.dtypes.StringDType()) + uarr = np.array(data, dtype="U30") + for ind in [[0], [1], [2], [3], [[0, 0]], [[1, 1, 3]], [[1, 1]]]: + assert_array_equal(sarr[ind], uarr[ind]) + + +def test_creation_functions(): + assert_array_equal(np.zeros(3, dtype="T"), ["", "", ""]) + assert_array_equal(np.empty(3, dtype="T"), ["", "", ""]) + + assert np.zeros(3, dtype="T")[0] == "" + assert np.empty(3, dtype="T")[0] == "" + + +def test_concatenate(string_list): + sarr = np.array(string_list, dtype="T") + sarr_cat = np.array(string_list + string_list, dtype="T") + + assert_array_equal(np.concatenate([sarr], axis=0), sarr) + + +def test_resize_method(string_list): + sarr = np.array(string_list, dtype="T") + if IS_PYPY: + sarr.resize(len(string_list) + 3, refcheck=False) + else: + sarr.resize(len(string_list) + 3) + assert_array_equal(sarr, np.array(string_list + [''] * 3, dtype="T")) + + +def test_create_with_copy_none(string_list): + arr = np.array(string_list, dtype=StringDType()) + # create another stringdtype array with an arena that has a different + # in-memory layout than the first array + arr_rev = np.array(string_list[::-1], dtype=StringDType()) + + # this should create a copy and the resulting array + # shouldn't share an allocator or arena with arr_rev, despite + # explicitly passing arr_rev.dtype + arr_copy = np.array(arr, copy=None, dtype=arr_rev.dtype) + np.testing.assert_array_equal(arr, arr_copy) + assert arr_copy.base is None + + with pytest.raises(ValueError, match="Unable to avoid copy"): + np.array(arr, copy=False, dtype=arr_rev.dtype) + + # because we're using arr's dtype instance, the view is safe + arr_view = np.array(arr, copy=None, dtype=arr.dtype) + np.testing.assert_array_equal(arr, arr) + np.testing.assert_array_equal(arr_view[::-1], arr_rev) + assert arr_view is arr + + +def test_astype_copy_false(): + orig_dt = StringDType() + arr = np.array(["hello", "world"], dtype=StringDType()) + assert not arr.astype(StringDType(coerce=False), copy=False).dtype.coerce + + assert arr.astype(orig_dt, copy=False).dtype is orig_dt + +@pytest.mark.parametrize( + "strings", + [ + ["left", "right", "leftovers", "righty", "up", "down"], + ["🤣🤣", "🤣", "📵", "😰"], + ["🚜", "🙃", "😾"], + ["😹", "🚠", "🚌"], + ["A¢☃€ 😊", " A☃€¢😊", "☃€😊 A¢", "😊☃A¢ €"], + ], +) +def test_argmax(strings): + """Test that argmax/argmin matches what python calculates.""" + arr = np.array(strings, dtype="T") + assert np.argmax(arr) == strings.index(max(strings)) + assert np.argmin(arr) == strings.index(min(strings)) + + +@pytest.mark.parametrize( + "arrfunc,expected", + [ + [np.sort, None], + [np.nonzero, (np.array([], dtype=np.int_),)], + [np.argmax, 0], + [np.argmin, 0], + ], +) +def test_arrfuncs_zeros(arrfunc, expected): + arr = np.zeros(10, dtype="T") + result = arrfunc(arr) + if expected is None: + expected = arr + assert_array_equal(result, expected, strict=True) + + +@pytest.mark.parametrize( + ("strings", "cast_answer", "any_answer", "all_answer"), + [ + [["hello", "world"], [True, True], True, True], + [["", ""], [False, False], False, False], + [["hello", ""], [True, False], True, False], + [["", "world"], [False, True], True, False], + ], +) +def test_cast_to_bool(strings, cast_answer, any_answer, all_answer): + sarr = np.array(strings, dtype="T") + assert_array_equal(sarr.astype("bool"), cast_answer) + + assert np.any(sarr) == any_answer + assert np.all(sarr) == all_answer + + +@pytest.mark.parametrize( + ("strings", "cast_answer"), + [ + [[True, True], ["True", "True"]], + [[False, False], ["False", "False"]], + [[True, False], ["True", "False"]], + [[False, True], ["False", "True"]], + ], +) +def test_cast_from_bool(strings, cast_answer): + barr = np.array(strings, dtype=bool) + assert_array_equal(barr.astype("T"), np.array(cast_answer, dtype="T")) + + +@pytest.mark.parametrize("bitsize", [8, 16, 32, 64]) +@pytest.mark.parametrize("signed", [True, False]) +def test_sized_integer_casts(bitsize, signed): + idtype = f"int{bitsize}" + if signed: + inp = [-(2**p - 1) for p in reversed(range(bitsize - 1))] + inp += [2**p - 1 for p in range(1, bitsize - 1)] + else: + idtype = "u" + idtype + inp = [2**p - 1 for p in range(bitsize)] + ainp = np.array(inp, dtype=idtype) + assert_array_equal(ainp, ainp.astype("T").astype(idtype)) + + # safe casting works + ainp.astype("T", casting="safe") + + with pytest.raises(TypeError): + ainp.astype("T").astype(idtype, casting="safe") + + oob = [str(2**bitsize), str(-(2**bitsize))] + with pytest.raises(OverflowError): + np.array(oob, dtype="T").astype(idtype) + + with pytest.raises(ValueError): + np.array(["1", np.nan, "3"], + dtype=StringDType(na_object=np.nan)).astype(idtype) + + +@pytest.mark.parametrize("typename", ["byte", "short", "int", "longlong"]) +@pytest.mark.parametrize("signed", ["", "u"]) +def test_unsized_integer_casts(typename, signed): + idtype = f"{signed}{typename}" + + inp = [1, 2, 3, 4] + ainp = np.array(inp, dtype=idtype) + assert_array_equal(ainp, ainp.astype("T").astype(idtype)) + + +@pytest.mark.parametrize( + "typename", + [ + pytest.param( + "longdouble", + marks=pytest.mark.xfail( + np.dtypes.LongDoubleDType() != np.dtypes.Float64DType(), + reason="numpy lacks an ld2a implementation", + strict=True, + ), + ), + "float64", + "float32", + "float16", + ], +) +def test_float_casts(typename): + inp = [1.1, 2.8, -3.2, 2.7e4] + ainp = np.array(inp, dtype=typename) + assert_array_equal(ainp, ainp.astype("T").astype(typename)) + + inp = [0.1] + sres = np.array(inp, dtype=typename).astype("T") + res = sres.astype(typename) + assert_array_equal(np.array(inp, dtype=typename), res) + assert sres[0] == "0.1" + + if typename == "longdouble": + # let's not worry about platform-dependent rounding of longdouble + return + + fi = np.finfo(typename) + + inp = [1e-324, fi.smallest_subnormal, -1e-324, -fi.smallest_subnormal] + eres = [0, fi.smallest_subnormal, -0, -fi.smallest_subnormal] + res = np.array(inp, dtype=typename).astype("T").astype(typename) + assert_array_equal(eres, res) + + inp = [2e308, fi.max, -2e308, fi.min] + eres = [np.inf, fi.max, -np.inf, fi.min] + res = np.array(inp, dtype=typename).astype("T").astype(typename) + assert_array_equal(eres, res) + + +def test_float_nan_cast_na_object(): + # gh-28157 + dt = np.dtypes.StringDType(na_object=np.nan) + arr1 = np.full((1,), fill_value=np.nan, dtype=dt) + arr2 = np.full_like(arr1, fill_value=np.nan) + + assert arr1.item() is np.nan + assert arr2.item() is np.nan + + inp = [1.2, 2.3, np.nan] + arr = np.array(inp).astype(dt) + assert arr[2] is np.nan + assert arr[0] == '1.2' + + +@pytest.mark.parametrize( + "typename", + [ + "csingle", + "cdouble", + pytest.param( + "clongdouble", + marks=pytest.mark.xfail( + np.dtypes.CLongDoubleDType() != np.dtypes.Complex128DType(), + reason="numpy lacks an ld2a implementation", + strict=True, + ), + ), + ], +) +def test_cfloat_casts(typename): + inp = [1.1 + 1.1j, 2.8 + 2.8j, -3.2 - 3.2j, 2.7e4 + 2.7e4j] + ainp = np.array(inp, dtype=typename) + assert_array_equal(ainp, ainp.astype("T").astype(typename)) + + inp = [0.1 + 0.1j] + sres = np.array(inp, dtype=typename).astype("T") + res = sres.astype(typename) + assert_array_equal(np.array(inp, dtype=typename), res) + assert sres[0] == "(0.1+0.1j)" + + +def test_take(string_list): + sarr = np.array(string_list, dtype="T") + res = sarr.take(np.arange(len(string_list))) + assert_array_equal(sarr, res) + + # make sure it also works for out + out = np.empty(len(string_list), dtype="T") + out[0] = "hello" + res = sarr.take(np.arange(len(string_list)), out=out) + assert res is out + assert_array_equal(sarr, res) + + +@pytest.mark.parametrize("use_out", [True, False]) +@pytest.mark.parametrize( + "ufunc_name,func", + [ + ("min", min), + ("max", max), + ], +) +def test_ufuncs_minmax(string_list, ufunc_name, func, use_out): + """Test that the min/max ufuncs match Python builtin min/max behavior.""" + arr = np.array(string_list, dtype="T") + uarr = np.array(string_list, dtype=str) + res = np.array(func(string_list), dtype="T") + assert_array_equal(getattr(arr, ufunc_name)(), res) + + ufunc = getattr(np, ufunc_name + "imum") + + if use_out: + res = ufunc(arr, arr, out=arr) + else: + res = ufunc(arr, arr) + + assert_array_equal(uarr, res) + assert_array_equal(getattr(arr, ufunc_name)(), func(string_list)) + + +def test_max_regression(): + arr = np.array(['y', 'y', 'z'], dtype="T") + assert arr.max() == 'z' + + +@pytest.mark.parametrize("use_out", [True, False]) +@pytest.mark.parametrize( + "other_strings", + [ + ["abc", "def" * 500, "ghi" * 16, "🤣" * 100, "📵", "😰"], + ["🚜", "🙃", "😾", "😹", "🚠", "🚌"], + ["🥦", "¨", "⨯", "∰ ", "⨌ ", "⎶ "], + ], +) +def test_ufunc_add(dtype, string_list, other_strings, use_out): + arr1 = np.array(string_list, dtype=dtype) + arr2 = np.array(other_strings, dtype=dtype) + result = np.array([a + b for a, b in zip(arr1, arr2)], dtype=dtype) + + if use_out: + res = np.add(arr1, arr2, out=arr1) + else: + res = np.add(arr1, arr2) + + assert_array_equal(res, result) + + if not hasattr(dtype, "na_object"): + return + + is_nan = isinstance(dtype.na_object, float) and np.isnan(dtype.na_object) + is_str = isinstance(dtype.na_object, str) + bool_errors = 0 + try: + bool(dtype.na_object) + except TypeError: + bool_errors = 1 + + arr1 = np.array([dtype.na_object] + string_list, dtype=dtype) + arr2 = np.array(other_strings + [dtype.na_object], dtype=dtype) + + if is_nan or bool_errors or is_str: + res = np.add(arr1, arr2) + assert_array_equal(res[1:-1], arr1[1:-1] + arr2[1:-1]) + if not is_str: + assert res[0] is dtype.na_object and res[-1] is dtype.na_object + else: + assert res[0] == dtype.na_object + arr2[0] + assert res[-1] == arr1[-1] + dtype.na_object + else: + with pytest.raises(ValueError): + np.add(arr1, arr2) + + +def test_ufunc_add_reduce(dtype): + values = ["a", "this is a long string", "c"] + arr = np.array(values, dtype=dtype) + out = np.empty((), dtype=dtype) + + expected = np.array("".join(values), dtype=dtype) + assert_array_equal(np.add.reduce(arr), expected) + + np.add.reduce(arr, out=out) + assert_array_equal(out, expected) + + +def test_add_promoter(string_list): + arr = np.array(string_list, dtype=StringDType()) + lresult = np.array(["hello" + s for s in string_list], dtype=StringDType()) + rresult = np.array([s + "hello" for s in string_list], dtype=StringDType()) + + for op in ["hello", np.str_("hello"), np.array(["hello"])]: + assert_array_equal(op + arr, lresult) + assert_array_equal(arr + op, rresult) + + # The promoter should be able to handle things if users pass `dtype=` + res = np.add("hello", string_list, dtype=StringDType) + assert res.dtype == StringDType() + + # The promoter should not kick in if users override the input, + # which means arr is cast, this fails because of the unknown length. + with pytest.raises(TypeError, match="cannot cast dtype"): + np.add(arr, "add", signature=("U", "U", None), casting="unsafe") + + # But it must simply reject the following: + with pytest.raises(TypeError, match=".*did not contain a loop"): + np.add(arr, "add", signature=(None, "U", None)) + + with pytest.raises(TypeError, match=".*did not contain a loop"): + np.add("a", "b", signature=("U", "U", StringDType)) + + +def test_add_no_legacy_promote_with_signature(): + # Possibly misplaced, but useful to test with string DType. We check that + # if there is clearly no loop found, a stray `dtype=` doesn't break things + # Regression test for the bad error in gh-26735 + # (If legacy promotion is gone, this can be deleted...) + with pytest.raises(TypeError, match=".*did not contain a loop"): + np.add("3", 6, dtype=StringDType) + + +def test_add_promoter_reduce(): + # Exact TypeError could change, but ensure StringDtype doesn't match + with pytest.raises(TypeError, match="the resolved dtypes are not"): + np.add.reduce(np.array(["a", "b"], dtype="U")) + + # On the other hand, using `dtype=T` in the *ufunc* should work. + np.add.reduce(np.array(["a", "b"], dtype="U"), dtype=np.dtypes.StringDType) + + +def test_multiply_reduce(): + # At the time of writing (NumPy 2.0) this is very limited (and rather + # ridiculous anyway). But it works and actually makes some sense... + # (NumPy does not allow non-scalar initial values) + repeats = np.array([2, 3, 4]) + val = "school-🚌" + res = np.multiply.reduce(repeats, initial=val, dtype=np.dtypes.StringDType) + assert res == val * np.prod(repeats) + + +def test_multiply_two_string_raises(): + arr = np.array(["hello", "world"], dtype="T") + with pytest.raises(np._core._exceptions._UFuncNoLoopError): + np.multiply(arr, arr) + + +@pytest.mark.parametrize("use_out", [True, False]) +@pytest.mark.parametrize("other", [2, [2, 1, 3, 4, 1, 3]]) +@pytest.mark.parametrize( + "other_dtype", + [ + None, + "int8", + "int16", + "int32", + "int64", + "uint8", + "uint16", + "uint32", + "uint64", + "short", + "int", + "intp", + "long", + "longlong", + "ushort", + "uint", + "uintp", + "ulong", + "ulonglong", + ], +) +def test_ufunc_multiply(dtype, string_list, other, other_dtype, use_out): + """Test the two-argument ufuncs match python builtin behavior.""" + arr = np.array(string_list, dtype=dtype) + if other_dtype is not None: + other_dtype = np.dtype(other_dtype) + try: + len(other) + result = [s * o for s, o in zip(string_list, other)] + other = np.array(other) + if other_dtype is not None: + other = other.astype(other_dtype) + except TypeError: + if other_dtype is not None: + other = other_dtype.type(other) + result = [s * other for s in string_list] + + if use_out: + arr_cache = arr.copy() + lres = np.multiply(arr, other, out=arr) + assert_array_equal(lres, result) + arr[:] = arr_cache + assert lres is arr + arr *= other + assert_array_equal(arr, result) + arr[:] = arr_cache + rres = np.multiply(other, arr, out=arr) + assert rres is arr + assert_array_equal(rres, result) + else: + lres = arr * other + assert_array_equal(lres, result) + rres = other * arr + assert_array_equal(rres, result) + + if not hasattr(dtype, "na_object"): + return + + is_nan = np.isnan(np.array([dtype.na_object], dtype=dtype))[0] + is_str = isinstance(dtype.na_object, str) + bool_errors = 0 + try: + bool(dtype.na_object) + except TypeError: + bool_errors = 1 + + arr = np.array(string_list + [dtype.na_object], dtype=dtype) + + try: + len(other) + other = np.append(other, 3) + if other_dtype is not None: + other = other.astype(other_dtype) + except TypeError: + pass + + if is_nan or bool_errors or is_str: + for res in [arr * other, other * arr]: + assert_array_equal(res[:-1], result) + if not is_str: + assert res[-1] is dtype.na_object + else: + try: + assert res[-1] == dtype.na_object * other[-1] + except (IndexError, TypeError): + assert res[-1] == dtype.na_object * other + else: + with pytest.raises(TypeError): + arr * other + with pytest.raises(TypeError): + other * arr + + +def test_findlike_promoters(): + r = "Wally" + l = "Where's Wally?" + s = np.int32(3) + e = np.int8(13) + for dtypes in [("T", "U"), ("U", "T")]: + for function, answer in [ + (np.strings.index, 8), + (np.strings.endswith, True), + ]: + assert answer == function( + np.array(l, dtype=dtypes[0]), np.array(r, dtype=dtypes[1]), s, e + ) + + +def test_strip_promoter(): + arg = ["Hello!!!!", "Hello??!!"] + strip_char = "!" + answer = ["Hello", "Hello??"] + for dtypes in [("T", "U"), ("U", "T")]: + result = np.strings.strip( + np.array(arg, dtype=dtypes[0]), + np.array(strip_char, dtype=dtypes[1]) + ) + assert_array_equal(result, answer) + assert result.dtype.char == "T" + + +def test_replace_promoter(): + arg = ["Hello, planet!", "planet, Hello!"] + old = "planet" + new = "world" + answer = ["Hello, world!", "world, Hello!"] + for dtypes in itertools.product("TU", repeat=3): + if dtypes == ("U", "U", "U"): + continue + answer_arr = np.strings.replace( + np.array(arg, dtype=dtypes[0]), + np.array(old, dtype=dtypes[1]), + np.array(new, dtype=dtypes[2]), + ) + assert_array_equal(answer_arr, answer) + assert answer_arr.dtype.char == "T" + + +def test_center_promoter(): + arg = ["Hello", "planet!"] + fillchar = "/" + for dtypes in [("T", "U"), ("U", "T")]: + answer = np.strings.center( + np.array(arg, dtype=dtypes[0]), 9, np.array(fillchar, dtype=dtypes[1]) + ) + assert_array_equal(answer, ["//Hello//", "/planet!/"]) + assert answer.dtype.char == "T" + + +DATETIME_INPUT = [ + np.datetime64("1923-04-14T12:43:12"), + np.datetime64("1994-06-21T14:43:15"), + np.datetime64("2001-10-15T04:10:32"), + np.datetime64("NaT"), + np.datetime64("1995-11-25T16:02:16"), + np.datetime64("2005-01-04T03:14:12"), + np.datetime64("2041-12-03T14:05:03"), +] + + +TIMEDELTA_INPUT = [ + np.timedelta64(12358, "s"), + np.timedelta64(23, "s"), + np.timedelta64(74, "s"), + np.timedelta64("NaT"), + np.timedelta64(23, "s"), + np.timedelta64(73, "s"), + np.timedelta64(7, "s"), +] + + +@pytest.mark.parametrize( + "input_data, input_dtype", + [ + (DATETIME_INPUT, "M8[s]"), + (TIMEDELTA_INPUT, "m8[s]") + ] +) +def test_datetime_timedelta_cast(dtype, input_data, input_dtype): + + a = np.array(input_data, dtype=input_dtype) + + has_na = hasattr(dtype, "na_object") + is_str = isinstance(getattr(dtype, "na_object", None), str) + + if not has_na or is_str: + a = np.delete(a, 3) + + sa = a.astype(dtype) + ra = sa.astype(a.dtype) + + if has_na and not is_str: + assert sa[3] is dtype.na_object + assert np.isnat(ra[3]) + + assert_array_equal(a, ra) + + if has_na and not is_str: + # don't worry about comparing how NaT is converted + sa = np.delete(sa, 3) + a = np.delete(a, 3) + + if input_dtype.startswith("M"): + assert_array_equal(sa, a.astype("U")) + else: + # The timedelta to unicode cast produces strings + # that aren't round-trippable and we don't want to + # reproduce that behavior in stringdtype + assert_array_equal(sa, a.astype("int64").astype("U")) + + +def test_nat_casts(): + s = 'nat' + all_nats = itertools.product(*zip(s.upper(), s.lower())) + all_nats = list(map(''.join, all_nats)) + NaT_dt = np.datetime64('NaT') + NaT_td = np.timedelta64('NaT') + for na_object in [np._NoValue, None, np.nan, 'nat', '']: + # numpy treats empty string and all case combinations of 'nat' as NaT + dtype = StringDType(na_object=na_object) + arr = np.array([''] + all_nats, dtype=dtype) + dt_array = arr.astype('M8[s]') + td_array = arr.astype('m8[s]') + assert_array_equal(dt_array, NaT_dt) + assert_array_equal(td_array, NaT_td) + + if na_object is np._NoValue: + output_object = 'NaT' + else: + output_object = na_object + + for arr in [dt_array, td_array]: + assert_array_equal( + arr.astype(dtype), + np.array([output_object] * arr.size, dtype=dtype)) + + +def test_nat_conversion(): + for nat in [np.datetime64("NaT", "s"), np.timedelta64("NaT", "s")]: + with pytest.raises(ValueError, match="string coercion is disabled"): + np.array(["a", nat], dtype=StringDType(coerce=False)) + + +def test_growing_strings(dtype): + # growing a string leads to a heap allocation, this tests to make sure + # we do that bookkeeping correctly for all possible starting cases + data = [ + "hello", # a short string + "abcdefghijklmnopqestuvwxyz", # a medium heap-allocated string + "hello" * 200, # a long heap-allocated string + ] + + arr = np.array(data, dtype=dtype) + uarr = np.array(data, dtype=str) + + for _ in range(5): + arr = arr + arr + uarr = uarr + uarr + + assert_array_equal(arr, uarr) + + +def test_assign_medium_strings(): + # see gh-29261 + N = 9 + src = np.array( + ( + ['0' * 256] * 3 + ['0' * 255] + ['0' * 256] + ['0' * 255] + + ['0' * 256] * 2 + ['0' * 255] + ), dtype='T') + dst = np.array( + ( + ['0' * 255] + ['0' * 256] * 2 + ['0' * 255] + ['0' * 256] + + ['0' * 255] + [''] * 5 + ), dtype='T') + + dst[1:N + 1] = src + assert_array_equal(dst[1:N + 1], src) + + +UFUNC_TEST_DATA = [ + "hello" * 10, + "Ae¢☃€ 😊" * 20, + "entry\nwith\nnewlines", + "entry\twith\ttabs", +] + + +@pytest.fixture +def string_array(dtype): + return np.array(UFUNC_TEST_DATA, dtype=dtype) + + +@pytest.fixture +def unicode_array(): + return np.array(UFUNC_TEST_DATA, dtype=np.str_) + + +NAN_PRESERVING_FUNCTIONS = [ + "capitalize", + "expandtabs", + "lower", + "lstrip", + "rstrip", + "splitlines", + "strip", + "swapcase", + "title", + "upper", +] + +BOOL_OUTPUT_FUNCTIONS = [ + "isalnum", + "isalpha", + "isdigit", + "islower", + "isspace", + "istitle", + "isupper", + "isnumeric", + "isdecimal", +] + +UNARY_FUNCTIONS = [ + "str_len", + "capitalize", + "expandtabs", + "isalnum", + "isalpha", + "isdigit", + "islower", + "isspace", + "istitle", + "isupper", + "lower", + "lstrip", + "rstrip", + "splitlines", + "strip", + "swapcase", + "title", + "upper", + "isnumeric", + "isdecimal", + "isalnum", + "islower", + "istitle", + "isupper", +] + +UNIMPLEMENTED_VEC_STRING_FUNCTIONS = [ + "capitalize", + "expandtabs", + "lower", + "splitlines", + "swapcase", + "title", + "upper", +] + +ONLY_IN_NP_CHAR = [ + "join", + "split", + "rsplit", + "splitlines" +] + + +@pytest.mark.parametrize("function_name", UNARY_FUNCTIONS) +def test_unary(string_array, unicode_array, function_name): + if function_name in ONLY_IN_NP_CHAR: + func = getattr(np.char, function_name) + else: + func = getattr(np.strings, function_name) + dtype = string_array.dtype + sres = func(string_array) + ures = func(unicode_array) + if sres.dtype == StringDType(): + ures = ures.astype(StringDType()) + assert_array_equal(sres, ures) + + if not hasattr(dtype, "na_object"): + return + + is_nan = np.isnan(np.array([dtype.na_object], dtype=dtype))[0] + is_str = isinstance(dtype.na_object, str) + na_arr = np.insert(string_array, 0, dtype.na_object) + + if function_name in UNIMPLEMENTED_VEC_STRING_FUNCTIONS: + if not is_str: + # to avoid these errors we'd need to add NA support to _vec_string + with pytest.raises((ValueError, TypeError)): + func(na_arr) + elif function_name == "splitlines": + assert func(na_arr)[0] == func(dtype.na_object)[()] + else: + assert func(na_arr)[0] == func(dtype.na_object) + return + if function_name == "str_len" and not is_str: + # str_len always errors for any non-string null, even NA ones because + # it has an integer result + with pytest.raises(ValueError): + func(na_arr) + return + if function_name in BOOL_OUTPUT_FUNCTIONS: + if is_nan: + assert func(na_arr)[0] is np.False_ + elif is_str: + assert func(na_arr)[0] == func(dtype.na_object) + else: + with pytest.raises(ValueError): + func(na_arr) + return + if not (is_nan or is_str): + with pytest.raises(ValueError): + func(na_arr) + return + res = func(na_arr) + if is_nan and function_name in NAN_PRESERVING_FUNCTIONS: + assert res[0] is dtype.na_object + elif is_str: + assert res[0] == func(dtype.na_object) + + +unicode_bug_fail = pytest.mark.xfail( + reason="unicode output width is buggy", strict=True +) + +# None means that the argument is a string array +BINARY_FUNCTIONS = [ + ("add", (None, None)), + ("multiply", (None, 2)), + ("mod", ("format: %s", None)), + ("center", (None, 25)), + ("count", (None, "A")), + ("encode", (None, "UTF-8")), + ("endswith", (None, "lo")), + ("find", (None, "A")), + ("index", (None, "e")), + ("join", ("-", None)), + ("ljust", (None, 12)), + ("lstrip", (None, "A")), + ("partition", (None, "A")), + ("replace", (None, "A", "B")), + ("rfind", (None, "A")), + ("rindex", (None, "e")), + ("rjust", (None, 12)), + ("rsplit", (None, "A")), + ("rstrip", (None, "A")), + ("rpartition", (None, "A")), + ("split", (None, "A")), + ("strip", (None, "A")), + ("startswith", (None, "A")), + ("zfill", (None, 12)), +] + +PASSES_THROUGH_NAN_NULLS = [ + "add", + "center", + "ljust", + "multiply", + "replace", + "rjust", + "strip", + "lstrip", + "rstrip", + "replace" + "zfill", +] + +NULLS_ARE_FALSEY = [ + "startswith", + "endswith", +] + +NULLS_ALWAYS_ERROR = [ + "count", + "find", + "rfind", +] + +SUPPORTS_NULLS = ( + PASSES_THROUGH_NAN_NULLS + + NULLS_ARE_FALSEY + + NULLS_ALWAYS_ERROR +) + + +def call_func(func, args, array, sanitize=True): + if args == (None, None): + return func(array, array) + if args[0] is None: + if sanitize: + san_args = tuple( + np.array(arg, dtype=array.dtype) if isinstance(arg, str) else + arg for arg in args[1:] + ) + else: + san_args = args[1:] + return func(array, *san_args) + if args[1] is None: + return func(args[0], array) + # shouldn't ever happen + assert 0 + + +@pytest.mark.parametrize("function_name, args", BINARY_FUNCTIONS) +def test_binary(string_array, unicode_array, function_name, args): + if function_name in ONLY_IN_NP_CHAR: + func = getattr(np.char, function_name) + else: + func = getattr(np.strings, function_name) + sres = call_func(func, args, string_array) + ures = call_func(func, args, unicode_array, sanitize=False) + if not isinstance(sres, tuple) and sres.dtype == StringDType(): + ures = ures.astype(StringDType()) + assert_array_equal(sres, ures) + + dtype = string_array.dtype + if function_name not in SUPPORTS_NULLS or not hasattr(dtype, "na_object"): + return + + na_arr = np.insert(string_array, 0, dtype.na_object) + is_nan = np.isnan(np.array([dtype.na_object], dtype=dtype))[0] + is_str = isinstance(dtype.na_object, str) + should_error = not (is_nan or is_str) + + if ( + (function_name in NULLS_ALWAYS_ERROR and not is_str) + or (function_name in PASSES_THROUGH_NAN_NULLS and should_error) + or (function_name in NULLS_ARE_FALSEY and should_error) + ): + with pytest.raises((ValueError, TypeError)): + call_func(func, args, na_arr) + return + + res = call_func(func, args, na_arr) + + if is_str: + assert res[0] == call_func(func, args, na_arr[:1]) + elif function_name in NULLS_ARE_FALSEY: + assert res[0] is np.False_ + elif function_name in PASSES_THROUGH_NAN_NULLS: + assert res[0] is dtype.na_object + else: + # shouldn't ever get here + assert 0 + + +@pytest.mark.parametrize("function, expected", [ + (np.strings.find, [[2, -1], [1, -1]]), + (np.strings.startswith, [[False, False], [True, False]])]) +@pytest.mark.parametrize("start, stop", [ + (1, 4), + (np.int8(1), np.int8(4)), + (np.array([1, 1], dtype='u2'), np.array([4, 4], dtype='u2'))]) +def test_non_default_start_stop(function, start, stop, expected): + a = np.array([["--🐍--", "--🦜--"], + ["-🐍---", "-🦜---"]], "T") + indx = function(a, "🐍", start, stop) + assert_array_equal(indx, expected) + + +@pytest.mark.parametrize("count", [2, np.int8(2), np.array([2, 2], 'u2')]) +def test_replace_non_default_repeat(count): + a = np.array(["🐍--", "🦜-🦜-"], "T") + result = np.strings.replace(a, "🦜-", "🦜†", count) + assert_array_equal(result, np.array(["🐍--", "🦜†🦜†"], "T")) + + +def test_strip_ljust_rjust_consistency(string_array, unicode_array): + rjs = np.char.rjust(string_array, 1000) + rju = np.char.rjust(unicode_array, 1000) + + ljs = np.char.ljust(string_array, 1000) + lju = np.char.ljust(unicode_array, 1000) + + assert_array_equal( + np.char.lstrip(rjs), + np.char.lstrip(rju).astype(StringDType()), + ) + + assert_array_equal( + np.char.rstrip(ljs), + np.char.rstrip(lju).astype(StringDType()), + ) + + assert_array_equal( + np.char.strip(ljs), + np.char.strip(lju).astype(StringDType()), + ) + + assert_array_equal( + np.char.strip(rjs), + np.char.strip(rju).astype(StringDType()), + ) + + +def test_unset_na_coercion(): + # a dtype instance with an unset na object is compatible + # with a dtype that has one set + + # this test uses the "add" and "equal" ufunc but all ufuncs that + # accept more than one string argument and produce a string should + # behave this way + # TODO: generalize to more ufuncs + inp = ["hello", "world"] + arr = np.array(inp, dtype=StringDType(na_object=None)) + for op_dtype in [None, StringDType(), StringDType(coerce=False), + StringDType(na_object=None)]: + if op_dtype is None: + op = "2" + else: + op = np.array("2", dtype=op_dtype) + res = arr + op + assert_array_equal(res, ["hello2", "world2"]) + + # dtype instances with distinct explicitly set NA objects are incompatible + for op_dtype in [StringDType(na_object=pd_NA), StringDType(na_object="")]: + op = np.array("2", dtype=op_dtype) + with pytest.raises(TypeError): + arr + op + + # comparisons only consider the na_object + for op_dtype in [None, StringDType(), StringDType(coerce=True), + StringDType(na_object=None)]: + if op_dtype is None: + op = inp + else: + op = np.array(inp, dtype=op_dtype) + assert_array_equal(arr, op) + + for op_dtype in [StringDType(na_object=pd_NA), + StringDType(na_object=np.nan)]: + op = np.array(inp, dtype=op_dtype) + with pytest.raises(TypeError): + arr == op + + +def test_repeat(string_array): + res = string_array.repeat(1000) + # Create an empty array with expanded dimension, and fill it. Then, + # reshape it to the expected result. + expected = np.empty_like(string_array, shape=string_array.shape + (1000,)) + expected[...] = string_array[:, np.newaxis] + expected = expected.reshape(-1) + + assert_array_equal(res, expected, strict=True) + + +@pytest.mark.parametrize("tile", [1, 6, (2, 5)]) +def test_accumulation(string_array, tile): + """Accumulation is odd for StringDType but tests dtypes with references. + """ + # Fill with mostly empty strings to not create absurdly big strings + arr = np.zeros_like(string_array, shape=(100,)) + arr[:len(string_array)] = string_array + arr[-len(string_array):] = string_array + + # Bloat size a bit (get above thresholds and test >1 ndim). + arr = np.tile(string_array, tile) + + res = np.add.accumulate(arr, axis=0) + res_obj = np.add.accumulate(arr.astype(object), axis=0) + assert_array_equal(res, res_obj.astype(arr.dtype), strict=True) + + if arr.ndim > 1: + res = np.add.accumulate(arr, axis=-1) + res_obj = np.add.accumulate(arr.astype(object), axis=-1) + + assert_array_equal(res, res_obj.astype(arr.dtype), strict=True) + + +class TestImplementation: + """Check that strings are stored in the arena when possible. + + This tests implementation details, so should be adjusted if + the implementation changes. + """ + + @classmethod + def setup_class(self): + self.MISSING = 0x80 + self.INITIALIZED = 0x40 + self.OUTSIDE_ARENA = 0x20 + self.LONG = 0x10 + self.dtype = StringDType(na_object=np.nan) + self.sizeofstr = self.dtype.itemsize + sp = self.dtype.itemsize // 2 # pointer size = sizeof(size_t) + # Below, size is not strictly correct, since it really uses + # 7 (or 3) bytes, but good enough for the tests here. + self.view_dtype = np.dtype([ + ('offset', f'u{sp}'), + ('size', f'u{sp // 2}'), + ('xsiz', f'V{sp // 2 - 1}'), + ('size_and_flags', 'u1'), + ] if sys.byteorder == 'little' else [ + ('size_and_flags', 'u1'), + ('xsiz', f'V{sp // 2 - 1}'), + ('size', f'u{sp // 2}'), + ('offset', f'u{sp}'), + ]) + self.s_empty = "" + self.s_short = "01234" + self.s_medium = "abcdefghijklmnopqrstuvwxyz" + self.s_long = "-=+" * 100 + self.a = np.array( + [self.s_empty, self.s_short, self.s_medium, self.s_long], + self.dtype) + + def get_view(self, a): + # Cannot view a StringDType as anything else directly, since + # it has references. So, we use a stride trick hack. + from numpy.lib._stride_tricks_impl import DummyArray + interface = dict(a.__array_interface__) + interface['descr'] = self.view_dtype.descr + interface['typestr'] = self.view_dtype.str + return np.asarray(DummyArray(interface, base=a)) + + def get_flags(self, a): + return self.get_view(a)['size_and_flags'] & 0xf0 + + def is_short(self, a): + return self.get_flags(a) == self.INITIALIZED | self.OUTSIDE_ARENA + + def is_on_heap(self, a): + return self.get_flags(a) == (self.INITIALIZED + | self.OUTSIDE_ARENA + | self.LONG) + + def is_missing(self, a): + return self.get_flags(a) & self.MISSING == self.MISSING + + def in_arena(self, a): + return (self.get_flags(a) & (self.INITIALIZED | self.OUTSIDE_ARENA) + == self.INITIALIZED) + + def test_setup(self): + is_short = self.is_short(self.a) + length = np.strings.str_len(self.a) + assert_array_equal(is_short, (length > 0) & (length <= 15)) + assert_array_equal(self.in_arena(self.a), [False, False, True, True]) + assert_array_equal(self.is_on_heap(self.a), False) + assert_array_equal(self.is_missing(self.a), False) + view = self.get_view(self.a) + sizes = np.where(is_short, view['size_and_flags'] & 0xf, + view['size']) + assert_array_equal(sizes, np.strings.str_len(self.a)) + assert_array_equal(view['xsiz'][2:], + np.void(b'\x00' * (self.sizeofstr // 4 - 1))) + # Check that the medium string uses only 1 byte for its length + # in the arena, while the long string takes 8 (or 4). + offsets = view['offset'] + assert offsets[2] == 1 + assert offsets[3] == 1 + len(self.s_medium) + self.sizeofstr // 2 + + def test_empty(self): + e = np.empty((3,), self.dtype) + assert_array_equal(self.get_flags(e), 0) + assert_array_equal(e, "") + + def test_zeros(self): + z = np.zeros((2,), self.dtype) + assert_array_equal(self.get_flags(z), 0) + assert_array_equal(z, "") + + def test_copy(self): + for c in [self.a.copy(), copy.copy(self.a), copy.deepcopy(self.a)]: + assert_array_equal(self.get_flags(c), self.get_flags(self.a)) + assert_array_equal(c, self.a) + offsets = self.get_view(c)['offset'] + assert offsets[2] == 1 + assert offsets[3] == 1 + len(self.s_medium) + self.sizeofstr // 2 + + def test_arena_use_with_setting(self): + c = np.zeros_like(self.a) + assert_array_equal(self.get_flags(c), 0) + c[:] = self.a + assert_array_equal(self.get_flags(c), self.get_flags(self.a)) + assert_array_equal(c, self.a) + + def test_arena_reuse_with_setting(self): + c = self.a.copy() + c[:] = self.a + assert_array_equal(self.get_flags(c), self.get_flags(self.a)) + assert_array_equal(c, self.a) + + def test_arena_reuse_after_missing(self): + c = self.a.copy() + c[:] = np.nan + assert np.all(self.is_missing(c)) + # Replacing with the original strings, the arena should be reused. + c[:] = self.a + assert_array_equal(self.get_flags(c), self.get_flags(self.a)) + assert_array_equal(c, self.a) + + def test_arena_reuse_after_empty(self): + c = self.a.copy() + c[:] = "" + assert_array_equal(c, "") + # Replacing with the original strings, the arena should be reused. + c[:] = self.a + assert_array_equal(self.get_flags(c), self.get_flags(self.a)) + assert_array_equal(c, self.a) + + def test_arena_reuse_for_shorter(self): + c = self.a.copy() + # A string slightly shorter than the shortest in the arena + # should be used for all strings in the arena. + c[:] = self.s_medium[:-1] + assert_array_equal(c, self.s_medium[:-1]) + # first empty string in original was never initialized, so + # filling it in now leaves it initialized inside the arena. + # second string started as a short string so it can never live + # in the arena. + in_arena = np.array([True, False, True, True]) + assert_array_equal(self.in_arena(c), in_arena) + # But when a short string is replaced, it will go on the heap. + assert_array_equal(self.is_short(c), False) + assert_array_equal(self.is_on_heap(c), ~in_arena) + # We can put the originals back, and they'll still fit, + # and short strings are back as short strings + c[:] = self.a + assert_array_equal(c, self.a) + assert_array_equal(self.in_arena(c), in_arena) + assert_array_equal(self.is_short(c), self.is_short(self.a)) + assert_array_equal(self.is_on_heap(c), False) + + def test_arena_reuse_if_possible(self): + c = self.a.copy() + # A slightly longer string will not fit in the arena for + # the medium string, but will fit for the longer one. + c[:] = self.s_medium + "±" + assert_array_equal(c, self.s_medium + "±") + in_arena_exp = np.strings.str_len(self.a) >= len(self.s_medium) + 1 + # first entry started uninitialized and empty, so filling it leaves + # it in the arena + in_arena_exp[0] = True + assert not np.all(in_arena_exp == self.in_arena(self.a)) + assert_array_equal(self.in_arena(c), in_arena_exp) + assert_array_equal(self.is_short(c), False) + assert_array_equal(self.is_on_heap(c), ~in_arena_exp) + # And once outside arena, it stays outside, since offset is lost. + # But short strings are used again. + c[:] = self.a + is_short_exp = self.is_short(self.a) + assert_array_equal(c, self.a) + assert_array_equal(self.in_arena(c), in_arena_exp) + assert_array_equal(self.is_short(c), is_short_exp) + assert_array_equal(self.is_on_heap(c), ~in_arena_exp & ~is_short_exp) + + def test_arena_no_reuse_after_short(self): + c = self.a.copy() + # If we replace a string with a short string, it cannot + # go into the arena after because the offset is lost. + c[:] = self.s_short + assert_array_equal(c, self.s_short) + assert_array_equal(self.in_arena(c), False) + c[:] = self.a + assert_array_equal(c, self.a) + assert_array_equal(self.in_arena(c), False) + assert_array_equal(self.is_on_heap(c), self.in_arena(self.a)) diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_strings.py b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_strings.py new file mode 100644 index 0000000..e29151a --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_strings.py @@ -0,0 +1,1454 @@ +import operator +import sys + +import pytest + +import numpy as np +from numpy.testing import IS_PYPY, assert_array_equal, assert_raises +from numpy.testing._private.utils import requires_memory + +COMPARISONS = [ + (operator.eq, np.equal, "=="), + (operator.ne, np.not_equal, "!="), + (operator.lt, np.less, "<"), + (operator.le, np.less_equal, "<="), + (operator.gt, np.greater, ">"), + (operator.ge, np.greater_equal, ">="), +] + +MAX = np.iinfo(np.int64).max + +IS_PYPY_LT_7_3_16 = IS_PYPY and sys.implementation.version < (7, 3, 16) + +@pytest.mark.parametrize(["op", "ufunc", "sym"], COMPARISONS) +def test_mixed_string_comparison_ufuncs_fail(op, ufunc, sym): + arr_string = np.array(["a", "b"], dtype="S") + arr_unicode = np.array(["a", "c"], dtype="U") + + with pytest.raises(TypeError, match="did not contain a loop"): + ufunc(arr_string, arr_unicode) + + with pytest.raises(TypeError, match="did not contain a loop"): + ufunc(arr_unicode, arr_string) + +@pytest.mark.parametrize(["op", "ufunc", "sym"], COMPARISONS) +def test_mixed_string_comparisons_ufuncs_with_cast(op, ufunc, sym): + arr_string = np.array(["a", "b"], dtype="S") + arr_unicode = np.array(["a", "c"], dtype="U") + + # While there is no loop, manual casting is acceptable: + res1 = ufunc(arr_string, arr_unicode, signature="UU->?", casting="unsafe") + res2 = ufunc(arr_string, arr_unicode, signature="SS->?", casting="unsafe") + + expected = op(arr_string.astype("U"), arr_unicode) + assert_array_equal(res1, expected) + assert_array_equal(res2, expected) + + +@pytest.mark.parametrize(["op", "ufunc", "sym"], COMPARISONS) +@pytest.mark.parametrize("dtypes", [ + ("S2", "S2"), ("S2", "S10"), + ("U1"), (">U1", ">U1"), + ("U10")]) +@pytest.mark.parametrize("aligned", [True, False]) +def test_string_comparisons(op, ufunc, sym, dtypes, aligned): + # ensure native byte-order for the first view to stay within unicode range + native_dt = np.dtype(dtypes[0]).newbyteorder("=") + arr = np.arange(2**15).view(native_dt).astype(dtypes[0]) + if not aligned: + # Make `arr` unaligned: + new = np.zeros(arr.nbytes + 1, dtype=np.uint8)[1:].view(dtypes[0]) + new[...] = arr + arr = new + + arr2 = arr.astype(dtypes[1], copy=True) + np.random.shuffle(arr2) + arr[0] = arr2[0] # make sure one matches + + expected = [op(d1, d2) for d1, d2 in zip(arr.tolist(), arr2.tolist())] + assert_array_equal(op(arr, arr2), expected) + assert_array_equal(ufunc(arr, arr2), expected) + assert_array_equal( + np.char.compare_chararrays(arr, arr2, sym, False), expected + ) + + expected = [op(d2, d1) for d1, d2 in zip(arr.tolist(), arr2.tolist())] + assert_array_equal(op(arr2, arr), expected) + assert_array_equal(ufunc(arr2, arr), expected) + assert_array_equal( + np.char.compare_chararrays(arr2, arr, sym, False), expected + ) + + +@pytest.mark.parametrize(["op", "ufunc", "sym"], COMPARISONS) +@pytest.mark.parametrize("dtypes", [ + ("S2", "S2"), ("S2", "S10"), ("U10")]) +def test_string_comparisons_empty(op, ufunc, sym, dtypes): + arr = np.empty((1, 0, 1, 5), dtype=dtypes[0]) + arr2 = np.empty((100, 1, 0, 1), dtype=dtypes[1]) + + expected = np.empty(np.broadcast_shapes(arr.shape, arr2.shape), dtype=bool) + assert_array_equal(op(arr, arr2), expected) + assert_array_equal(ufunc(arr, arr2), expected) + assert_array_equal( + np.char.compare_chararrays(arr, arr2, sym, False), expected + ) + + +@pytest.mark.parametrize("str_dt", ["S", "U"]) +@pytest.mark.parametrize("float_dt", np.typecodes["AllFloat"]) +def test_float_to_string_cast(str_dt, float_dt): + float_dt = np.dtype(float_dt) + fi = np.finfo(float_dt) + arr = np.array([np.nan, np.inf, -np.inf, fi.max, fi.min], dtype=float_dt) + expected = ["nan", "inf", "-inf", str(fi.max), str(fi.min)] + if float_dt.kind == "c": + expected = [f"({r}+0j)" for r in expected] + + res = arr.astype(str_dt) + assert_array_equal(res, np.array(expected, dtype=str_dt)) + + +@pytest.mark.parametrize("str_dt", "US") +@pytest.mark.parametrize("size", [-1, np.iinfo(np.intc).max]) +def test_string_size_dtype_errors(str_dt, size): + if size > 0: + size = size // np.dtype(f"{str_dt}1").itemsize + 1 + + with pytest.raises(ValueError): + np.dtype((str_dt, size)) + with pytest.raises(TypeError): + np.dtype(f"{str_dt}{size}") + + +@pytest.mark.parametrize("str_dt", "US") +def test_string_size_dtype_large_repr(str_dt): + size = np.iinfo(np.intc).max // np.dtype(f"{str_dt}1").itemsize + size_str = str(size) + + dtype = np.dtype((str_dt, size)) + assert size_str in dtype.str + assert size_str in str(dtype) + assert size_str in repr(dtype) + + +@pytest.mark.slow +@requires_memory(2 * np.iinfo(np.intc).max) +@pytest.mark.parametrize("str_dt", "US") +def test_large_string_coercion_error(str_dt): + very_large = np.iinfo(np.intc).max // np.dtype(f"{str_dt}1").itemsize + try: + large_string = "A" * (very_large + 1) + except Exception: + # We may not be able to create this Python string on 32bit. + pytest.skip("python failed to create huge string") + + class MyStr: + def __str__(self): + return large_string + + try: + # TypeError from NumPy, or OverflowError from 32bit Python. + with pytest.raises((TypeError, OverflowError)): + np.array([large_string], dtype=str_dt) + + # Same as above, but input has to be converted to a string. + with pytest.raises((TypeError, OverflowError)): + np.array([MyStr()], dtype=str_dt) + except MemoryError: + # Catch memory errors, because `requires_memory` would do so. + raise AssertionError("Ops should raise before any large allocation.") + +@pytest.mark.slow +@requires_memory(2 * np.iinfo(np.intc).max) +@pytest.mark.parametrize("str_dt", "US") +def test_large_string_addition_error(str_dt): + very_large = np.iinfo(np.intc).max // np.dtype(f"{str_dt}1").itemsize + + a = np.array(["A" * very_large], dtype=str_dt) + b = np.array("B", dtype=str_dt) + try: + with pytest.raises(TypeError): + np.add(a, b) + with pytest.raises(TypeError): + np.add(a, a) + except MemoryError: + # Catch memory errors, because `requires_memory` would do so. + raise AssertionError("Ops should raise before any large allocation.") + + +def test_large_string_cast(): + very_large = np.iinfo(np.intc).max // 4 + # Could be nice to test very large path, but it makes too many huge + # allocations right now (need non-legacy cast loops for this). + # a = np.array([], dtype=np.dtype(("S", very_large))) + # assert a.astype("U").dtype.itemsize == very_large * 4 + + a = np.array([], dtype=np.dtype(("S", very_large + 1))) + # It is not perfect but OK if this raises a MemoryError during setup + # (this happens due clunky code and/or buffer setup.) + with pytest.raises((TypeError, MemoryError)): + a.astype("U") + + +@pytest.mark.parametrize("dt", ["S", "U", "T"]) +class TestMethods: + + @pytest.mark.parametrize("in1,in2,out", [ + ("", "", ""), + ("abc", "abc", "abcabc"), + ("12345", "12345", "1234512345"), + ("MixedCase", "MixedCase", "MixedCaseMixedCase"), + ("12345 \0 ", "12345 \0 ", "12345 \0 12345 \0 "), + ("UPPER", "UPPER", "UPPERUPPER"), + (["abc", "def"], ["hello", "world"], ["abchello", "defworld"]), + ]) + def test_add(self, in1, in2, out, dt): + in1 = np.array(in1, dtype=dt) + in2 = np.array(in2, dtype=dt) + out = np.array(out, dtype=dt) + assert_array_equal(np.strings.add(in1, in2), out) + + @pytest.mark.parametrize("in1,in2,out", [ + ("abc", 3, "abcabcabc"), + ("abc", 0, ""), + ("abc", -1, ""), + (["abc", "def"], [1, 4], ["abc", "defdefdefdef"]), + ]) + def test_multiply(self, in1, in2, out, dt): + in1 = np.array(in1, dtype=dt) + out = np.array(out, dtype=dt) + assert_array_equal(np.strings.multiply(in1, in2), out) + + def test_multiply_raises(self, dt): + with pytest.raises(TypeError, match="unsupported type"): + np.strings.multiply(np.array("abc", dtype=dt), 3.14) + + with pytest.raises(OverflowError): + np.strings.multiply(np.array("abc", dtype=dt), sys.maxsize) + + def test_inplace_multiply(self, dt): + arr = np.array(['foo ', 'bar'], dtype=dt) + arr *= 2 + if dt != "T": + assert_array_equal(arr, np.array(['foo ', 'barb'], dtype=dt)) + else: + assert_array_equal(arr, ['foo foo ', 'barbar']) + + with pytest.raises(OverflowError): + arr *= sys.maxsize + + @pytest.mark.parametrize("i_dt", [np.int8, np.int16, np.int32, + np.int64, np.int_]) + def test_multiply_integer_dtypes(self, i_dt, dt): + a = np.array("abc", dtype=dt) + i = np.array(3, dtype=i_dt) + res = np.array("abcabcabc", dtype=dt) + assert_array_equal(np.strings.multiply(a, i), res) + + @pytest.mark.parametrize("in_,out", [ + ("", False), + ("a", True), + ("A", True), + ("\n", False), + ("abc", True), + ("aBc123", False), + ("abc\n", False), + (["abc", "aBc123"], [True, False]), + ]) + def test_isalpha(self, in_, out, dt): + in_ = np.array(in_, dtype=dt) + assert_array_equal(np.strings.isalpha(in_), out) + + @pytest.mark.parametrize("in_,out", [ + ('', False), + ('a', True), + ('A', True), + ('\n', False), + ('123abc456', True), + ('a1b3c', True), + ('aBc000 ', False), + ('abc\n', False), + ]) + def test_isalnum(self, in_, out, dt): + in_ = np.array(in_, dtype=dt) + assert_array_equal(np.strings.isalnum(in_), out) + + @pytest.mark.parametrize("in_,out", [ + ("", False), + ("a", False), + ("0", True), + ("012345", True), + ("012345a", False), + (["a", "012345"], [False, True]), + ]) + def test_isdigit(self, in_, out, dt): + in_ = np.array(in_, dtype=dt) + assert_array_equal(np.strings.isdigit(in_), out) + + @pytest.mark.parametrize("in_,out", [ + ("", False), + ("a", False), + ("1", False), + (" ", True), + ("\t", True), + ("\r", True), + ("\n", True), + (" \t\r \n", True), + (" \t\r\na", False), + (["\t1", " \t\r \n"], [False, True]) + ]) + def test_isspace(self, in_, out, dt): + in_ = np.array(in_, dtype=dt) + assert_array_equal(np.strings.isspace(in_), out) + + @pytest.mark.parametrize("in_,out", [ + ('', False), + ('a', True), + ('A', False), + ('\n', False), + ('abc', True), + ('aBc', False), + ('abc\n', True), + ]) + def test_islower(self, in_, out, dt): + in_ = np.array(in_, dtype=dt) + assert_array_equal(np.strings.islower(in_), out) + + @pytest.mark.parametrize("in_,out", [ + ('', False), + ('a', False), + ('A', True), + ('\n', False), + ('ABC', True), + ('AbC', False), + ('ABC\n', True), + ]) + def test_isupper(self, in_, out, dt): + in_ = np.array(in_, dtype=dt) + assert_array_equal(np.strings.isupper(in_), out) + + @pytest.mark.parametrize("in_,out", [ + ('', False), + ('a', False), + ('A', True), + ('\n', False), + ('A Titlecased Line', True), + ('A\nTitlecased Line', True), + ('A Titlecased, Line', True), + ('Not a capitalized String', False), + ('Not\ta Titlecase String', False), + ('Not--a Titlecase String', False), + ('NOT', False), + ]) + def test_istitle(self, in_, out, dt): + in_ = np.array(in_, dtype=dt) + assert_array_equal(np.strings.istitle(in_), out) + + @pytest.mark.parametrize("in_,out", [ + ("", 0), + ("abc", 3), + ("12345", 5), + ("MixedCase", 9), + ("12345 \x00 ", 8), + ("UPPER", 5), + (["abc", "12345 \x00 "], [3, 8]), + ]) + def test_str_len(self, in_, out, dt): + in_ = np.array(in_, dtype=dt) + assert_array_equal(np.strings.str_len(in_), out) + + @pytest.mark.parametrize("a,sub,start,end,out", [ + ("abcdefghiabc", "abc", 0, None, 0), + ("abcdefghiabc", "abc", 1, None, 9), + ("abcdefghiabc", "def", 4, None, -1), + ("abc", "", 0, None, 0), + ("abc", "", 3, None, 3), + ("abc", "", 4, None, -1), + ("rrarrrrrrrrra", "a", 0, None, 2), + ("rrarrrrrrrrra", "a", 4, None, 12), + ("rrarrrrrrrrra", "a", 4, 6, -1), + ("", "", 0, None, 0), + ("", "", 1, 1, -1), + ("", "", MAX, 0, -1), + ("", "xx", 0, None, -1), + ("", "xx", 1, 1, -1), + ("", "xx", MAX, 0, -1), + pytest.param(99 * "a" + "b", "b", 0, None, 99, + id="99*a+b-b-0-None-99"), + pytest.param(98 * "a" + "ba", "ba", 0, None, 98, + id="98*a+ba-ba-0-None-98"), + pytest.param(100 * "a", "b", 0, None, -1, + id="100*a-b-0-None--1"), + pytest.param(30000 * "a" + 100 * "b", 100 * "b", 0, None, 30000, + id="30000*a+100*b-100*b-0-None-30000"), + pytest.param(30000 * "a", 100 * "b", 0, None, -1, + id="30000*a-100*b-0-None--1"), + pytest.param(15000 * "a" + 15000 * "b", 15000 * "b", 0, None, 15000, + id="15000*a+15000*b-15000*b-0-None-15000"), + pytest.param(15000 * "a" + 15000 * "b", 15000 * "c", 0, None, -1, + id="15000*a+15000*b-15000*c-0-None--1"), + (["abcdefghiabc", "rrarrrrrrrrra"], ["def", "arr"], [0, 3], + None, [3, -1]), + ("Ae¢☃€ 😊" * 2, "😊", 0, None, 6), + ("Ae¢☃€ 😊" * 2, "😊", 7, None, 13), + pytest.param("A" * (2 ** 17), r"[\w]+\Z", 0, None, -1, + id=r"A*2**17-[\w]+\Z-0-None--1"), + ]) + def test_find(self, a, sub, start, end, out, dt): + if "😊" in a and dt == "S": + pytest.skip("Bytes dtype does not support non-ascii input") + a = np.array(a, dtype=dt) + sub = np.array(sub, dtype=dt) + assert_array_equal(np.strings.find(a, sub, start, end), out) + + @pytest.mark.parametrize("a,sub,start,end,out", [ + ("abcdefghiabc", "abc", 0, None, 9), + ("abcdefghiabc", "", 0, None, 12), + ("abcdefghiabc", "abcd", 0, None, 0), + ("abcdefghiabc", "abcz", 0, None, -1), + ("abc", "", 0, None, 3), + ("abc", "", 3, None, 3), + ("abc", "", 4, None, -1), + ("rrarrrrrrrrra", "a", 0, None, 12), + ("rrarrrrrrrrra", "a", 4, None, 12), + ("rrarrrrrrrrra", "a", 4, 6, -1), + (["abcdefghiabc", "rrarrrrrrrrra"], ["abc", "a"], [0, 0], + None, [9, 12]), + ("Ae¢☃€ 😊" * 2, "😊", 0, None, 13), + ("Ae¢☃€ 😊" * 2, "😊", 0, 7, 6), + ]) + def test_rfind(self, a, sub, start, end, out, dt): + if "😊" in a and dt == "S": + pytest.skip("Bytes dtype does not support non-ascii input") + a = np.array(a, dtype=dt) + sub = np.array(sub, dtype=dt) + assert_array_equal(np.strings.rfind(a, sub, start, end), out) + + @pytest.mark.parametrize("a,sub,start,end,out", [ + ("aaa", "a", 0, None, 3), + ("aaa", "b", 0, None, 0), + ("aaa", "a", 1, None, 2), + ("aaa", "a", 10, None, 0), + ("aaa", "a", -1, None, 1), + ("aaa", "a", -10, None, 3), + ("aaa", "a", 0, 1, 1), + ("aaa", "a", 0, 10, 3), + ("aaa", "a", 0, -1, 2), + ("aaa", "a", 0, -10, 0), + ("aaa", "", 1, None, 3), + ("aaa", "", 3, None, 1), + ("aaa", "", 10, None, 0), + ("aaa", "", -1, None, 2), + ("aaa", "", -10, None, 4), + ("aaa", "aaaa", 0, None, 0), + pytest.param(98 * "a" + "ba", "ba", 0, None, 1, + id="98*a+ba-ba-0-None-1"), + pytest.param(30000 * "a" + 100 * "b", 100 * "b", 0, None, 1, + id="30000*a+100*b-100*b-0-None-1"), + pytest.param(30000 * "a", 100 * "b", 0, None, 0, + id="30000*a-100*b-0-None-0"), + pytest.param(30000 * "a" + 100 * "ab", "ab", 0, None, 100, + id="30000*a+100*ab-ab-0-None-100"), + pytest.param(15000 * "a" + 15000 * "b", 15000 * "b", 0, None, 1, + id="15000*a+15000*b-15000*b-0-None-1"), + pytest.param(15000 * "a" + 15000 * "b", 15000 * "c", 0, None, 0, + id="15000*a+15000*b-15000*c-0-None-0"), + ("", "", 0, None, 1), + ("", "", 1, 1, 0), + ("", "", MAX, 0, 0), + ("", "xx", 0, None, 0), + ("", "xx", 1, 1, 0), + ("", "xx", MAX, 0, 0), + (["aaa", ""], ["a", ""], [0, 0], None, [3, 1]), + ("Ae¢☃€ 😊" * 100, "😊", 0, None, 100), + ]) + def test_count(self, a, sub, start, end, out, dt): + if "😊" in a and dt == "S": + pytest.skip("Bytes dtype does not support non-ascii input") + a = np.array(a, dtype=dt) + sub = np.array(sub, dtype=dt) + assert_array_equal(np.strings.count(a, sub, start, end), out) + + @pytest.mark.parametrize("a,prefix,start,end,out", [ + ("hello", "he", 0, None, True), + ("hello", "hello", 0, None, True), + ("hello", "hello world", 0, None, False), + ("hello", "", 0, None, True), + ("hello", "ello", 0, None, False), + ("hello", "ello", 1, None, True), + ("hello", "o", 4, None, True), + ("hello", "o", 5, None, False), + ("hello", "", 5, None, True), + ("hello", "lo", 6, None, False), + ("helloworld", "lowo", 3, None, True), + ("helloworld", "lowo", 3, 7, True), + ("helloworld", "lowo", 3, 6, False), + ("", "", 0, 1, True), + ("", "", 0, 0, True), + ("", "", 1, 0, False), + ("hello", "he", 0, -1, True), + ("hello", "he", -53, -1, True), + ("hello", "hello", 0, -1, False), + ("hello", "hello world", -1, -10, False), + ("hello", "ello", -5, None, False), + ("hello", "ello", -4, None, True), + ("hello", "o", -2, None, False), + ("hello", "o", -1, None, True), + ("hello", "", -3, -3, True), + ("hello", "lo", -9, None, False), + (["hello", ""], ["he", ""], [0, 0], None, [True, True]), + ]) + def test_startswith(self, a, prefix, start, end, out, dt): + a = np.array(a, dtype=dt) + prefix = np.array(prefix, dtype=dt) + assert_array_equal(np.strings.startswith(a, prefix, start, end), out) + + @pytest.mark.parametrize("a,suffix,start,end,out", [ + ("hello", "lo", 0, None, True), + ("hello", "he", 0, None, False), + ("hello", "", 0, None, True), + ("hello", "hello world", 0, None, False), + ("helloworld", "worl", 0, None, False), + ("helloworld", "worl", 3, 9, True), + ("helloworld", "world", 3, 12, True), + ("helloworld", "lowo", 1, 7, True), + ("helloworld", "lowo", 2, 7, True), + ("helloworld", "lowo", 3, 7, True), + ("helloworld", "lowo", 4, 7, False), + ("helloworld", "lowo", 3, 8, False), + ("ab", "ab", 0, 1, False), + ("ab", "ab", 0, 0, False), + ("", "", 0, 1, True), + ("", "", 0, 0, True), + ("", "", 1, 0, False), + ("hello", "lo", -2, None, True), + ("hello", "he", -2, None, False), + ("hello", "", -3, -3, True), + ("hello", "hello world", -10, -2, False), + ("helloworld", "worl", -6, None, False), + ("helloworld", "worl", -5, -1, True), + ("helloworld", "worl", -5, 9, True), + ("helloworld", "world", -7, 12, True), + ("helloworld", "lowo", -99, -3, True), + ("helloworld", "lowo", -8, -3, True), + ("helloworld", "lowo", -7, -3, True), + ("helloworld", "lowo", 3, -4, False), + ("helloworld", "lowo", -8, -2, False), + (["hello", "helloworld"], ["lo", "worl"], [0, -6], None, + [True, False]), + ]) + def test_endswith(self, a, suffix, start, end, out, dt): + a = np.array(a, dtype=dt) + suffix = np.array(suffix, dtype=dt) + assert_array_equal(np.strings.endswith(a, suffix, start, end), out) + + @pytest.mark.parametrize("a,chars,out", [ + ("", None, ""), + (" hello ", None, "hello "), + ("hello", None, "hello"), + (" \t\n\r\f\vabc \t\n\r\f\v", None, "abc \t\n\r\f\v"), + ([" hello ", "hello"], None, ["hello ", "hello"]), + ("", "", ""), + ("", "xyz", ""), + ("hello", "", "hello"), + ("xyzzyhelloxyzzy", "xyz", "helloxyzzy"), + ("hello", "xyz", "hello"), + ("xyxz", "xyxz", ""), + ("xyxzx", "x", "yxzx"), + (["xyzzyhelloxyzzy", "hello"], ["xyz", "xyz"], + ["helloxyzzy", "hello"]), + (["ba", "ac", "baa", "bba"], "b", ["a", "ac", "aa", "a"]), + ]) + def test_lstrip(self, a, chars, out, dt): + a = np.array(a, dtype=dt) + out = np.array(out, dtype=dt) + if chars is not None: + chars = np.array(chars, dtype=dt) + assert_array_equal(np.strings.lstrip(a, chars), out) + else: + assert_array_equal(np.strings.lstrip(a), out) + + @pytest.mark.parametrize("a,chars,out", [ + ("", None, ""), + (" hello ", None, " hello"), + ("hello", None, "hello"), + (" \t\n\r\f\vabc \t\n\r\f\v", None, " \t\n\r\f\vabc"), + ([" hello ", "hello"], None, [" hello", "hello"]), + ("", "", ""), + ("", "xyz", ""), + ("hello", "", "hello"), + (["hello ", "abcdefghijklmnop"], None, + ["hello", "abcdefghijklmnop"]), + ("xyzzyhelloxyzzy", "xyz", "xyzzyhello"), + ("hello", "xyz", "hello"), + ("xyxz", "xyxz", ""), + (" ", None, ""), + ("xyxzx", "x", "xyxz"), + (["xyzzyhelloxyzzy", "hello"], ["xyz", "xyz"], + ["xyzzyhello", "hello"]), + (["ab", "ac", "aab", "abb"], "b", ["a", "ac", "aa", "a"]), + ]) + def test_rstrip(self, a, chars, out, dt): + a = np.array(a, dtype=dt) + out = np.array(out, dtype=dt) + if chars is not None: + chars = np.array(chars, dtype=dt) + assert_array_equal(np.strings.rstrip(a, chars), out) + else: + assert_array_equal(np.strings.rstrip(a), out) + + @pytest.mark.parametrize("a,chars,out", [ + ("", None, ""), + (" hello ", None, "hello"), + ("hello", None, "hello"), + (" \t\n\r\f\vabc \t\n\r\f\v", None, "abc"), + ([" hello ", "hello"], None, ["hello", "hello"]), + ("", "", ""), + ("", "xyz", ""), + ("hello", "", "hello"), + ("xyzzyhelloxyzzy", "xyz", "hello"), + ("hello", "xyz", "hello"), + ("xyxz", "xyxz", ""), + ("xyxzx", "x", "yxz"), + (["xyzzyhelloxyzzy", "hello"], ["xyz", "xyz"], + ["hello", "hello"]), + (["bab", "ac", "baab", "bbabb"], "b", ["a", "ac", "aa", "a"]), + ]) + def test_strip(self, a, chars, out, dt): + a = np.array(a, dtype=dt) + if chars is not None: + chars = np.array(chars, dtype=dt) + out = np.array(out, dtype=dt) + assert_array_equal(np.strings.strip(a, chars), out) + + @pytest.mark.parametrize("buf,old,new,count,res", [ + ("", "", "", -1, ""), + ("", "", "A", -1, "A"), + ("", "A", "", -1, ""), + ("", "A", "A", -1, ""), + ("", "", "", 100, ""), + ("", "", "A", 100, "A"), + ("A", "", "", -1, "A"), + ("A", "", "*", -1, "*A*"), + ("A", "", "*1", -1, "*1A*1"), + ("A", "", "*-#", -1, "*-#A*-#"), + ("AA", "", "*-", -1, "*-A*-A*-"), + ("AA", "", "*-", -1, "*-A*-A*-"), + ("AA", "", "*-", 4, "*-A*-A*-"), + ("AA", "", "*-", 3, "*-A*-A*-"), + ("AA", "", "*-", 2, "*-A*-A"), + ("AA", "", "*-", 1, "*-AA"), + ("AA", "", "*-", 0, "AA"), + ("A", "A", "", -1, ""), + ("AAA", "A", "", -1, ""), + ("AAA", "A", "", -1, ""), + ("AAA", "A", "", 4, ""), + ("AAA", "A", "", 3, ""), + ("AAA", "A", "", 2, "A"), + ("AAA", "A", "", 1, "AA"), + ("AAA", "A", "", 0, "AAA"), + ("AAAAAAAAAA", "A", "", -1, ""), + ("ABACADA", "A", "", -1, "BCD"), + ("ABACADA", "A", "", -1, "BCD"), + ("ABACADA", "A", "", 5, "BCD"), + ("ABACADA", "A", "", 4, "BCD"), + ("ABACADA", "A", "", 3, "BCDA"), + ("ABACADA", "A", "", 2, "BCADA"), + ("ABACADA", "A", "", 1, "BACADA"), + ("ABACADA", "A", "", 0, "ABACADA"), + ("ABCAD", "A", "", -1, "BCD"), + ("ABCADAA", "A", "", -1, "BCD"), + ("BCD", "A", "", -1, "BCD"), + ("*************", "A", "", -1, "*************"), + ("^" + "A" * 1000 + "^", "A", "", 999, "^A^"), + ("the", "the", "", -1, ""), + ("theater", "the", "", -1, "ater"), + ("thethe", "the", "", -1, ""), + ("thethethethe", "the", "", -1, ""), + ("theatheatheathea", "the", "", -1, "aaaa"), + ("that", "the", "", -1, "that"), + ("thaet", "the", "", -1, "thaet"), + ("here and there", "the", "", -1, "here and re"), + ("here and there and there", "the", "", -1, "here and re and re"), + ("here and there and there", "the", "", 3, "here and re and re"), + ("here and there and there", "the", "", 2, "here and re and re"), + ("here and there and there", "the", "", 1, "here and re and there"), + ("here and there and there", "the", "", 0, "here and there and there"), + ("here and there and there", "the", "", -1, "here and re and re"), + ("abc", "the", "", -1, "abc"), + ("abcdefg", "the", "", -1, "abcdefg"), + ("bbobob", "bob", "", -1, "bob"), + ("bbobobXbbobob", "bob", "", -1, "bobXbob"), + ("aaaaaaabob", "bob", "", -1, "aaaaaaa"), + ("aaaaaaa", "bob", "", -1, "aaaaaaa"), + ("Who goes there?", "o", "o", -1, "Who goes there?"), + ("Who goes there?", "o", "O", -1, "WhO gOes there?"), + ("Who goes there?", "o", "O", -1, "WhO gOes there?"), + ("Who goes there?", "o", "O", 3, "WhO gOes there?"), + ("Who goes there?", "o", "O", 2, "WhO gOes there?"), + ("Who goes there?", "o", "O", 1, "WhO goes there?"), + ("Who goes there?", "o", "O", 0, "Who goes there?"), + ("Who goes there?", "a", "q", -1, "Who goes there?"), + ("Who goes there?", "W", "w", -1, "who goes there?"), + ("WWho goes there?WW", "W", "w", -1, "wwho goes there?ww"), + ("Who goes there?", "?", "!", -1, "Who goes there!"), + ("Who goes there??", "?", "!", -1, "Who goes there!!"), + ("Who goes there?", ".", "!", -1, "Who goes there?"), + ("This is a tissue", "is", "**", -1, "Th** ** a t**sue"), + ("This is a tissue", "is", "**", -1, "Th** ** a t**sue"), + ("This is a tissue", "is", "**", 4, "Th** ** a t**sue"), + ("This is a tissue", "is", "**", 3, "Th** ** a t**sue"), + ("This is a tissue", "is", "**", 2, "Th** ** a tissue"), + ("This is a tissue", "is", "**", 1, "Th** is a tissue"), + ("This is a tissue", "is", "**", 0, "This is a tissue"), + ("bobob", "bob", "cob", -1, "cobob"), + ("bobobXbobobob", "bob", "cob", -1, "cobobXcobocob"), + ("bobob", "bot", "bot", -1, "bobob"), + ("Reykjavik", "k", "KK", -1, "ReyKKjaviKK"), + ("Reykjavik", "k", "KK", -1, "ReyKKjaviKK"), + ("Reykjavik", "k", "KK", 2, "ReyKKjaviKK"), + ("Reykjavik", "k", "KK", 1, "ReyKKjavik"), + ("Reykjavik", "k", "KK", 0, "Reykjavik"), + ("A.B.C.", ".", "----", -1, "A----B----C----"), + ("Reykjavik", "q", "KK", -1, "Reykjavik"), + ("spam, spam, eggs and spam", "spam", "ham", -1, + "ham, ham, eggs and ham"), + ("spam, spam, eggs and spam", "spam", "ham", -1, + "ham, ham, eggs and ham"), + ("spam, spam, eggs and spam", "spam", "ham", 4, + "ham, ham, eggs and ham"), + ("spam, spam, eggs and spam", "spam", "ham", 3, + "ham, ham, eggs and ham"), + ("spam, spam, eggs and spam", "spam", "ham", 2, + "ham, ham, eggs and spam"), + ("spam, spam, eggs and spam", "spam", "ham", 1, + "ham, spam, eggs and spam"), + ("spam, spam, eggs and spam", "spam", "ham", 0, + "spam, spam, eggs and spam"), + ("bobobob", "bobob", "bob", -1, "bobob"), + ("bobobobXbobobob", "bobob", "bob", -1, "bobobXbobob"), + ("BOBOBOB", "bob", "bobby", -1, "BOBOBOB"), + ("one!two!three!", "!", "@", 1, "one@two!three!"), + ("one!two!three!", "!", "", -1, "onetwothree"), + ("one!two!three!", "!", "@", 2, "one@two@three!"), + ("one!two!three!", "!", "@", 3, "one@two@three@"), + ("one!two!three!", "!", "@", 4, "one@two@three@"), + ("one!two!three!", "!", "@", 0, "one!two!three!"), + ("one!two!three!", "!", "@", -1, "one@two@three@"), + ("one!two!three!", "x", "@", -1, "one!two!three!"), + ("one!two!three!", "x", "@", 2, "one!two!three!"), + ("abc", "", "-", -1, "-a-b-c-"), + ("abc", "", "-", 3, "-a-b-c"), + ("abc", "", "-", 0, "abc"), + ("abc", "ab", "--", 0, "abc"), + ("abc", "xy", "--", -1, "abc"), + (["abbc", "abbd"], "b", "z", [1, 2], ["azbc", "azzd"]), + ]) + def test_replace(self, buf, old, new, count, res, dt): + if "😊" in buf and dt == "S": + pytest.skip("Bytes dtype does not support non-ascii input") + buf = np.array(buf, dtype=dt) + old = np.array(old, dtype=dt) + new = np.array(new, dtype=dt) + res = np.array(res, dtype=dt) + assert_array_equal(np.strings.replace(buf, old, new, count), res) + + @pytest.mark.parametrize("buf,sub,start,end,res", [ + ("abcdefghiabc", "", 0, None, 0), + ("abcdefghiabc", "def", 0, None, 3), + ("abcdefghiabc", "abc", 0, None, 0), + ("abcdefghiabc", "abc", 1, None, 9), + ]) + def test_index(self, buf, sub, start, end, res, dt): + buf = np.array(buf, dtype=dt) + sub = np.array(sub, dtype=dt) + assert_array_equal(np.strings.index(buf, sub, start, end), res) + + @pytest.mark.parametrize("buf,sub,start,end", [ + ("abcdefghiabc", "hib", 0, None), + ("abcdefghiab", "abc", 1, None), + ("abcdefghi", "ghi", 8, None), + ("abcdefghi", "ghi", -1, None), + ("rrarrrrrrrrra", "a", 4, 6), + ]) + def test_index_raises(self, buf, sub, start, end, dt): + buf = np.array(buf, dtype=dt) + sub = np.array(sub, dtype=dt) + with pytest.raises(ValueError, match="substring not found"): + np.strings.index(buf, sub, start, end) + + @pytest.mark.parametrize("buf,sub,start,end,res", [ + ("abcdefghiabc", "", 0, None, 12), + ("abcdefghiabc", "def", 0, None, 3), + ("abcdefghiabc", "abc", 0, None, 9), + ("abcdefghiabc", "abc", 0, -1, 0), + ]) + def test_rindex(self, buf, sub, start, end, res, dt): + buf = np.array(buf, dtype=dt) + sub = np.array(sub, dtype=dt) + assert_array_equal(np.strings.rindex(buf, sub, start, end), res) + + @pytest.mark.parametrize("buf,sub,start,end", [ + ("abcdefghiabc", "hib", 0, None), + ("defghiabc", "def", 1, None), + ("defghiabc", "abc", 0, -1), + ("abcdefghi", "ghi", 0, 8), + ("abcdefghi", "ghi", 0, -1), + ("rrarrrrrrrrra", "a", 4, 6), + ]) + def test_rindex_raises(self, buf, sub, start, end, dt): + buf = np.array(buf, dtype=dt) + sub = np.array(sub, dtype=dt) + with pytest.raises(ValueError, match="substring not found"): + np.strings.rindex(buf, sub, start, end) + + @pytest.mark.parametrize("buf,tabsize,res", [ + ("abc\rab\tdef\ng\thi", 8, "abc\rab def\ng hi"), + ("abc\rab\tdef\ng\thi", 4, "abc\rab def\ng hi"), + ("abc\r\nab\tdef\ng\thi", 8, "abc\r\nab def\ng hi"), + ("abc\r\nab\tdef\ng\thi", 4, "abc\r\nab def\ng hi"), + ("abc\r\nab\r\ndef\ng\r\nhi", 4, "abc\r\nab\r\ndef\ng\r\nhi"), + (" \ta\n\tb", 1, " a\n b"), + ]) + def test_expandtabs(self, buf, tabsize, res, dt): + buf = np.array(buf, dtype=dt) + res = np.array(res, dtype=dt) + assert_array_equal(np.strings.expandtabs(buf, tabsize), res) + + def test_expandtabs_raises_overflow(self, dt): + with pytest.raises(OverflowError, match="new string is too long"): + np.strings.expandtabs(np.array("\ta\n\tb", dtype=dt), sys.maxsize) + np.strings.expandtabs(np.array("\ta\n\tb", dtype=dt), 2**61) + + FILL_ERROR = "The fill character must be exactly one character long" + + def test_center_raises_multiple_character_fill(self, dt): + buf = np.array("abc", dtype=dt) + fill = np.array("**", dtype=dt) + with pytest.raises(TypeError, match=self.FILL_ERROR): + np.strings.center(buf, 10, fill) + + def test_ljust_raises_multiple_character_fill(self, dt): + buf = np.array("abc", dtype=dt) + fill = np.array("**", dtype=dt) + with pytest.raises(TypeError, match=self.FILL_ERROR): + np.strings.ljust(buf, 10, fill) + + def test_rjust_raises_multiple_character_fill(self, dt): + buf = np.array("abc", dtype=dt) + fill = np.array("**", dtype=dt) + with pytest.raises(TypeError, match=self.FILL_ERROR): + np.strings.rjust(buf, 10, fill) + + @pytest.mark.parametrize("buf,width,fillchar,res", [ + ('abc', 10, ' ', ' abc '), + ('abc', 6, ' ', ' abc '), + ('abc', 3, ' ', 'abc'), + ('abc', 2, ' ', 'abc'), + ('abc', -2, ' ', 'abc'), + ('abc', 10, '*', '***abc****'), + ]) + def test_center(self, buf, width, fillchar, res, dt): + buf = np.array(buf, dtype=dt) + fillchar = np.array(fillchar, dtype=dt) + res = np.array(res, dtype=dt) + assert_array_equal(np.strings.center(buf, width, fillchar), res) + + @pytest.mark.parametrize("buf,width,fillchar,res", [ + ('abc', 10, ' ', 'abc '), + ('abc', 6, ' ', 'abc '), + ('abc', 3, ' ', 'abc'), + ('abc', 2, ' ', 'abc'), + ('abc', -2, ' ', 'abc'), + ('abc', 10, '*', 'abc*******'), + ]) + def test_ljust(self, buf, width, fillchar, res, dt): + buf = np.array(buf, dtype=dt) + fillchar = np.array(fillchar, dtype=dt) + res = np.array(res, dtype=dt) + assert_array_equal(np.strings.ljust(buf, width, fillchar), res) + + @pytest.mark.parametrize("buf,width,fillchar,res", [ + ('abc', 10, ' ', ' abc'), + ('abc', 6, ' ', ' abc'), + ('abc', 3, ' ', 'abc'), + ('abc', 2, ' ', 'abc'), + ('abc', -2, ' ', 'abc'), + ('abc', 10, '*', '*******abc'), + ]) + def test_rjust(self, buf, width, fillchar, res, dt): + buf = np.array(buf, dtype=dt) + fillchar = np.array(fillchar, dtype=dt) + res = np.array(res, dtype=dt) + assert_array_equal(np.strings.rjust(buf, width, fillchar), res) + + @pytest.mark.parametrize("buf,width,res", [ + ('123', 2, '123'), + ('123', 3, '123'), + ('0123', 4, '0123'), + ('+123', 3, '+123'), + ('+123', 4, '+123'), + ('+123', 5, '+0123'), + ('+0123', 5, '+0123'), + ('-123', 3, '-123'), + ('-123', 4, '-123'), + ('-0123', 5, '-0123'), + ('000', 3, '000'), + ('34', 1, '34'), + ('34', -1, '34'), + ('0034', 4, '0034'), + ]) + def test_zfill(self, buf, width, res, dt): + buf = np.array(buf, dtype=dt) + res = np.array(res, dtype=dt) + assert_array_equal(np.strings.zfill(buf, width), res) + + @pytest.mark.parametrize("buf,sep,res1,res2,res3", [ + ("this is the partition method", "ti", "this is the par", + "ti", "tion method"), + ("http://www.python.org", "://", "http", "://", "www.python.org"), + ("http://www.python.org", "?", "http://www.python.org", "", ""), + ("http://www.python.org", "http://", "", "http://", "www.python.org"), + ("http://www.python.org", "org", "http://www.python.", "org", ""), + ("http://www.python.org", ["://", "?", "http://", "org"], + ["http", "http://www.python.org", "", "http://www.python."], + ["://", "", "http://", "org"], + ["www.python.org", "", "www.python.org", ""]), + ("mississippi", "ss", "mi", "ss", "issippi"), + ("mississippi", "i", "m", "i", "ssissippi"), + ("mississippi", "w", "mississippi", "", ""), + ]) + def test_partition(self, buf, sep, res1, res2, res3, dt): + buf = np.array(buf, dtype=dt) + sep = np.array(sep, dtype=dt) + res1 = np.array(res1, dtype=dt) + res2 = np.array(res2, dtype=dt) + res3 = np.array(res3, dtype=dt) + act1, act2, act3 = np.strings.partition(buf, sep) + assert_array_equal(act1, res1) + assert_array_equal(act2, res2) + assert_array_equal(act3, res3) + assert_array_equal(act1 + act2 + act3, buf) + + @pytest.mark.parametrize("buf,sep,res1,res2,res3", [ + ("this is the partition method", "ti", "this is the parti", + "ti", "on method"), + ("http://www.python.org", "://", "http", "://", "www.python.org"), + ("http://www.python.org", "?", "", "", "http://www.python.org"), + ("http://www.python.org", "http://", "", "http://", "www.python.org"), + ("http://www.python.org", "org", "http://www.python.", "org", ""), + ("http://www.python.org", ["://", "?", "http://", "org"], + ["http", "", "", "http://www.python."], + ["://", "", "http://", "org"], + ["www.python.org", "http://www.python.org", "www.python.org", ""]), + ("mississippi", "ss", "missi", "ss", "ippi"), + ("mississippi", "i", "mississipp", "i", ""), + ("mississippi", "w", "", "", "mississippi"), + ]) + def test_rpartition(self, buf, sep, res1, res2, res3, dt): + buf = np.array(buf, dtype=dt) + sep = np.array(sep, dtype=dt) + res1 = np.array(res1, dtype=dt) + res2 = np.array(res2, dtype=dt) + res3 = np.array(res3, dtype=dt) + act1, act2, act3 = np.strings.rpartition(buf, sep) + assert_array_equal(act1, res1) + assert_array_equal(act2, res2) + assert_array_equal(act3, res3) + assert_array_equal(act1 + act2 + act3, buf) + + @pytest.mark.parametrize("args", [ + (None,), + (0,), + (1,), + (3,), + (5,), + (6,), # test index past the end + (-1,), + (-3,), + ([3, 4],), + ([2, 4],), + ([-3, 5],), + ([0, -5],), + (1, 4), + (-3, 5), + (None, -1), + (0, [4, 2]), + ([1, 2], [-1, -2]), + (1, 5, 2), + (None, None, -1), + ([0, 6], [-1, 0], [2, -1]), + ]) + def test_slice(self, args, dt): + buf = np.array(["hello", "world"], dtype=dt) + act = np.strings.slice(buf, *args) + bcast_args = tuple(np.broadcast_to(arg, buf.shape) for arg in args) + res = np.array([s[slice(*arg)] + for s, arg in zip(buf, zip(*bcast_args))], + dtype=dt) + assert_array_equal(act, res) + + def test_slice_unsupported(self, dt): + with pytest.raises(TypeError, match="did not contain a loop"): + np.strings.slice(np.array([1, 2, 3]), 4) + + with pytest.raises(TypeError, match=r"Cannot cast ufunc '_slice' input .* from .* to dtype\('int(64|32)'\)"): + np.strings.slice(np.array(['foo', 'bar'], dtype=dt), np.array(['foo', 'bar'], dtype=dt)) + + @pytest.mark.parametrize("int_dt", [np.int8, np.int16, np.int32, np.int64, + np.uint8, np.uint16, np.uint32, np.uint64]) + def test_slice_int_type_promotion(self, int_dt, dt): + buf = np.array(["hello", "world"], dtype=dt) + + assert_array_equal(np.strings.slice(buf, int_dt(4)), np.array(["hell", "worl"], dtype=dt)) + assert_array_equal(np.strings.slice(buf, np.array([4, 4], dtype=int_dt)), np.array(["hell", "worl"], dtype=dt)) + + assert_array_equal(np.strings.slice(buf, int_dt(2), int_dt(4)), np.array(["ll", "rl"], dtype=dt)) + assert_array_equal(np.strings.slice(buf, np.array([2, 2], dtype=int_dt), np.array([4, 4], dtype=int_dt)), np.array(["ll", "rl"], dtype=dt)) + + assert_array_equal(np.strings.slice(buf, int_dt(0), int_dt(4), int_dt(2)), np.array(["hl", "wr"], dtype=dt)) + assert_array_equal(np.strings.slice(buf, np.array([0, 0], dtype=int_dt), np.array([4, 4], dtype=int_dt), np.array([2, 2], dtype=int_dt)), np.array(["hl", "wr"], dtype=dt)) + +@pytest.mark.parametrize("dt", ["U", "T"]) +class TestMethodsWithUnicode: + @pytest.mark.parametrize("in_,out", [ + ("", False), + ("a", False), + ("0", True), + ("\u2460", False), # CIRCLED DIGIT 1 + ("\xbc", False), # VULGAR FRACTION ONE QUARTER + ("\u0660", True), # ARABIC_INDIC DIGIT ZERO + ("012345", True), + ("012345a", False), + (["0", "a"], [True, False]), + ]) + def test_isdecimal_unicode(self, in_, out, dt): + buf = np.array(in_, dtype=dt) + assert_array_equal(np.strings.isdecimal(buf), out) + + @pytest.mark.parametrize("in_,out", [ + ("", False), + ("a", False), + ("0", True), + ("\u2460", True), # CIRCLED DIGIT 1 + ("\xbc", True), # VULGAR FRACTION ONE QUARTER + ("\u0660", True), # ARABIC_INDIC DIGIT ZERO + ("012345", True), + ("012345a", False), + (["0", "a"], [True, False]), + ]) + def test_isnumeric_unicode(self, in_, out, dt): + buf = np.array(in_, dtype=dt) + assert_array_equal(np.strings.isnumeric(buf), out) + + @pytest.mark.parametrize("buf,old,new,count,res", [ + ("...\u043c......<", "<", "<", -1, "...\u043c......<"), + ("Ae¢☃€ 😊" * 2, "A", "B", -1, "Be¢☃€ 😊Be¢☃€ 😊"), + ("Ae¢☃€ 😊" * 2, "😊", "B", -1, "Ae¢☃€ BAe¢☃€ B"), + ]) + def test_replace_unicode(self, buf, old, new, count, res, dt): + buf = np.array(buf, dtype=dt) + old = np.array(old, dtype=dt) + new = np.array(new, dtype=dt) + res = np.array(res, dtype=dt) + assert_array_equal(np.strings.replace(buf, old, new, count), res) + + @pytest.mark.parametrize("in_", [ + '\U00010401', + '\U00010427', + '\U00010429', + '\U0001044E', + '\U0001D7F6', + '\U00011066', + '\U000104A0', + pytest.param('\U0001F107', marks=pytest.mark.xfail( + sys.platform == 'win32' and IS_PYPY_LT_7_3_16, + reason="PYPY bug in Py_UNICODE_ISALNUM", + strict=True)), + ]) + def test_isalnum_unicode(self, in_, dt): + in_ = np.array(in_, dtype=dt) + assert_array_equal(np.strings.isalnum(in_), True) + + @pytest.mark.parametrize("in_,out", [ + ('\u1FFc', False), + ('\u2167', False), + ('\U00010401', False), + ('\U00010427', False), + ('\U0001F40D', False), + ('\U0001F46F', False), + ('\u2177', True), + pytest.param('\U00010429', True, marks=pytest.mark.xfail( + sys.platform == 'win32' and IS_PYPY_LT_7_3_16, + reason="PYPY bug in Py_UNICODE_ISLOWER", + strict=True)), + ('\U0001044E', True), + ]) + def test_islower_unicode(self, in_, out, dt): + in_ = np.array(in_, dtype=dt) + assert_array_equal(np.strings.islower(in_), out) + + @pytest.mark.parametrize("in_,out", [ + ('\u1FFc', False), + ('\u2167', True), + ('\U00010401', True), + ('\U00010427', True), + ('\U0001F40D', False), + ('\U0001F46F', False), + ('\u2177', False), + pytest.param('\U00010429', False, marks=pytest.mark.xfail( + sys.platform == 'win32' and IS_PYPY_LT_7_3_16, + reason="PYPY bug in Py_UNICODE_ISUPPER", + strict=True)), + ('\U0001044E', False), + ]) + def test_isupper_unicode(self, in_, out, dt): + in_ = np.array(in_, dtype=dt) + assert_array_equal(np.strings.isupper(in_), out) + + @pytest.mark.parametrize("in_,out", [ + ('\u1FFc', True), + ('Greek \u1FFcitlecases ...', True), + pytest.param('\U00010401\U00010429', True, marks=pytest.mark.xfail( + sys.platform == 'win32' and IS_PYPY_LT_7_3_16, + reason="PYPY bug in Py_UNICODE_ISISTITLE", + strict=True)), + ('\U00010427\U0001044E', True), + pytest.param('\U00010429', False, marks=pytest.mark.xfail( + sys.platform == 'win32' and IS_PYPY_LT_7_3_16, + reason="PYPY bug in Py_UNICODE_ISISTITLE", + strict=True)), + ('\U0001044E', False), + ('\U0001F40D', False), + ('\U0001F46F', False), + ]) + def test_istitle_unicode(self, in_, out, dt): + in_ = np.array(in_, dtype=dt) + assert_array_equal(np.strings.istitle(in_), out) + + @pytest.mark.parametrize("buf,sub,start,end,res", [ + ("Ae¢☃€ 😊" * 2, "😊", 0, None, 6), + ("Ae¢☃€ 😊" * 2, "😊", 7, None, 13), + ]) + def test_index_unicode(self, buf, sub, start, end, res, dt): + buf = np.array(buf, dtype=dt) + sub = np.array(sub, dtype=dt) + assert_array_equal(np.strings.index(buf, sub, start, end), res) + + def test_index_raises_unicode(self, dt): + with pytest.raises(ValueError, match="substring not found"): + np.strings.index("Ae¢☃€ 😊", "😀") + + @pytest.mark.parametrize("buf,res", [ + ("Ae¢☃€ \t 😊", "Ae¢☃€ 😊"), + ("\t\U0001044E", " \U0001044E"), + ]) + def test_expandtabs(self, buf, res, dt): + buf = np.array(buf, dtype=dt) + res = np.array(res, dtype=dt) + assert_array_equal(np.strings.expandtabs(buf), res) + + @pytest.mark.parametrize("buf,width,fillchar,res", [ + ('x', 2, '\U0001044E', 'x\U0001044E'), + ('x', 3, '\U0001044E', '\U0001044Ex\U0001044E'), + ('x', 4, '\U0001044E', '\U0001044Ex\U0001044E\U0001044E'), + ]) + def test_center(self, buf, width, fillchar, res, dt): + buf = np.array(buf, dtype=dt) + fillchar = np.array(fillchar, dtype=dt) + res = np.array(res, dtype=dt) + assert_array_equal(np.strings.center(buf, width, fillchar), res) + + @pytest.mark.parametrize("buf,width,fillchar,res", [ + ('x', 2, '\U0001044E', 'x\U0001044E'), + ('x', 3, '\U0001044E', 'x\U0001044E\U0001044E'), + ('x', 4, '\U0001044E', 'x\U0001044E\U0001044E\U0001044E'), + ]) + def test_ljust(self, buf, width, fillchar, res, dt): + buf = np.array(buf, dtype=dt) + fillchar = np.array(fillchar, dtype=dt) + res = np.array(res, dtype=dt) + assert_array_equal(np.strings.ljust(buf, width, fillchar), res) + + @pytest.mark.parametrize("buf,width,fillchar,res", [ + ('x', 2, '\U0001044E', '\U0001044Ex'), + ('x', 3, '\U0001044E', '\U0001044E\U0001044Ex'), + ('x', 4, '\U0001044E', '\U0001044E\U0001044E\U0001044Ex'), + ]) + def test_rjust(self, buf, width, fillchar, res, dt): + buf = np.array(buf, dtype=dt) + fillchar = np.array(fillchar, dtype=dt) + res = np.array(res, dtype=dt) + assert_array_equal(np.strings.rjust(buf, width, fillchar), res) + + @pytest.mark.parametrize("buf,sep,res1,res2,res3", [ + ("āāāāĀĀĀĀ", "Ă", "āāāāĀĀĀĀ", "", ""), + ("āāāāĂĀĀĀĀ", "Ă", "āāāā", "Ă", "ĀĀĀĀ"), + ("āāāāĂĂĀĀĀĀ", "ĂĂ", "āāāā", "ĂĂ", "ĀĀĀĀ"), + ("𐌁𐌁𐌁𐌁𐌀𐌀𐌀𐌀", "𐌂", "𐌁𐌁𐌁𐌁𐌀𐌀𐌀𐌀", "", ""), + ("𐌁𐌁𐌁𐌁𐌂𐌀𐌀𐌀𐌀", "𐌂", "𐌁𐌁𐌁𐌁", "𐌂", "𐌀𐌀𐌀𐌀"), + ("𐌁𐌁𐌁𐌁𐌂𐌂𐌀𐌀𐌀𐌀", "𐌂𐌂", "𐌁𐌁𐌁𐌁", "𐌂𐌂", "𐌀𐌀𐌀𐌀"), + ("𐌁𐌁𐌁𐌁𐌂𐌂𐌂𐌂𐌀𐌀𐌀𐌀", "𐌂𐌂𐌂𐌂", "𐌁𐌁𐌁𐌁", "𐌂𐌂𐌂𐌂", "𐌀𐌀𐌀𐌀"), + ]) + def test_partition(self, buf, sep, res1, res2, res3, dt): + buf = np.array(buf, dtype=dt) + sep = np.array(sep, dtype=dt) + res1 = np.array(res1, dtype=dt) + res2 = np.array(res2, dtype=dt) + res3 = np.array(res3, dtype=dt) + act1, act2, act3 = np.strings.partition(buf, sep) + assert_array_equal(act1, res1) + assert_array_equal(act2, res2) + assert_array_equal(act3, res3) + assert_array_equal(act1 + act2 + act3, buf) + + @pytest.mark.parametrize("buf,sep,res1,res2,res3", [ + ("āāāāĀĀĀĀ", "Ă", "", "", "āāāāĀĀĀĀ"), + ("āāāāĂĀĀĀĀ", "Ă", "āāāā", "Ă", "ĀĀĀĀ"), + ("āāāāĂĂĀĀĀĀ", "ĂĂ", "āāāā", "ĂĂ", "ĀĀĀĀ"), + ("𐌁𐌁𐌁𐌁𐌀𐌀𐌀𐌀", "𐌂", "", "", "𐌁𐌁𐌁𐌁𐌀𐌀𐌀𐌀"), + ("𐌁𐌁𐌁𐌁𐌂𐌀𐌀𐌀𐌀", "𐌂", "𐌁𐌁𐌁𐌁", "𐌂", "𐌀𐌀𐌀𐌀"), + ("𐌁𐌁𐌁𐌁𐌂𐌂𐌀𐌀𐌀𐌀", "𐌂𐌂", "𐌁𐌁𐌁𐌁", "𐌂𐌂", "𐌀𐌀𐌀𐌀"), + ]) + def test_rpartition(self, buf, sep, res1, res2, res3, dt): + buf = np.array(buf, dtype=dt) + sep = np.array(sep, dtype=dt) + res1 = np.array(res1, dtype=dt) + res2 = np.array(res2, dtype=dt) + res3 = np.array(res3, dtype=dt) + act1, act2, act3 = np.strings.rpartition(buf, sep) + assert_array_equal(act1, res1) + assert_array_equal(act2, res2) + assert_array_equal(act3, res3) + assert_array_equal(act1 + act2 + act3, buf) + + @pytest.mark.parametrize("method", ["strip", "lstrip", "rstrip"]) + @pytest.mark.parametrize( + "source,strip", + [ + ("λμ", "μ"), + ("λμ", "λ"), + ("λ" * 5 + "μ" * 2, "μ"), + ("λ" * 5 + "μ" * 2, "λ"), + ("λ" * 5 + "A" + "μ" * 2, "μλ"), + ("λμ" * 5, "μ"), + ("λμ" * 5, "λ"), + ]) + def test_strip_functions_unicode(self, source, strip, method, dt): + src_array = np.array([source], dtype=dt) + + npy_func = getattr(np.strings, method) + py_func = getattr(str, method) + + expected = np.array([py_func(source, strip)], dtype=dt) + actual = npy_func(src_array, strip) + + assert_array_equal(actual, expected) + + @pytest.mark.parametrize("args", [ + (None,), + (0,), + (1,), + (5,), + (15,), + (22,), + (-1,), + (-3,), + ([3, 4],), + ([-5, 5],), + ([0, -8],), + (1, 12), + (-12, 15), + (None, -1), + (0, [17, 6]), + ([1, 2], [-1, -2]), + (1, 11, 2), + (None, None, -1), + ([0, 10], [-1, 0], [2, -1]), + ]) + def test_slice(self, args, dt): + buf = np.array(["Приве́т नमस्ते שָׁלוֹם", "😀😃😄😁😆😅🤣😂🙂🙃"], + dtype=dt) + act = np.strings.slice(buf, *args) + bcast_args = tuple(np.broadcast_to(arg, buf.shape) for arg in args) + res = np.array([s[slice(*arg)] + for s, arg in zip(buf, zip(*bcast_args))], + dtype=dt) + assert_array_equal(act, res) + + +class TestMixedTypeMethods: + def test_center(self): + buf = np.array("😊", dtype="U") + fill = np.array("*", dtype="S") + res = np.array("*😊*", dtype="U") + assert_array_equal(np.strings.center(buf, 3, fill), res) + + buf = np.array("s", dtype="S") + fill = np.array("*", dtype="U") + res = np.array("*s*", dtype="S") + assert_array_equal(np.strings.center(buf, 3, fill), res) + + with pytest.raises(ValueError, match="'ascii' codec can't encode"): + buf = np.array("s", dtype="S") + fill = np.array("😊", dtype="U") + np.strings.center(buf, 3, fill) + + def test_ljust(self): + buf = np.array("😊", dtype="U") + fill = np.array("*", dtype="S") + res = np.array("😊**", dtype="U") + assert_array_equal(np.strings.ljust(buf, 3, fill), res) + + buf = np.array("s", dtype="S") + fill = np.array("*", dtype="U") + res = np.array("s**", dtype="S") + assert_array_equal(np.strings.ljust(buf, 3, fill), res) + + with pytest.raises(ValueError, match="'ascii' codec can't encode"): + buf = np.array("s", dtype="S") + fill = np.array("😊", dtype="U") + np.strings.ljust(buf, 3, fill) + + def test_rjust(self): + buf = np.array("😊", dtype="U") + fill = np.array("*", dtype="S") + res = np.array("**😊", dtype="U") + assert_array_equal(np.strings.rjust(buf, 3, fill), res) + + buf = np.array("s", dtype="S") + fill = np.array("*", dtype="U") + res = np.array("**s", dtype="S") + assert_array_equal(np.strings.rjust(buf, 3, fill), res) + + with pytest.raises(ValueError, match="'ascii' codec can't encode"): + buf = np.array("s", dtype="S") + fill = np.array("😊", dtype="U") + np.strings.rjust(buf, 3, fill) + + +class TestUnicodeOnlyMethodsRaiseWithBytes: + def test_isdecimal_raises(self): + in_ = np.array(b"1") + with assert_raises(TypeError): + np.strings.isdecimal(in_) + + def test_isnumeric_bytes(self): + in_ = np.array(b"1") + with assert_raises(TypeError): + np.strings.isnumeric(in_) + + +def check_itemsize(n_elem, dt): + if dt == "T": + return np.dtype(dt).itemsize + if dt == "S": + return n_elem + if dt == "U": + return n_elem * 4 + +@pytest.mark.parametrize("dt", ["S", "U", "T"]) +class TestReplaceOnArrays: + + def test_replace_count_and_size(self, dt): + a = np.array(["0123456789" * i for i in range(4)], dtype=dt) + r1 = np.strings.replace(a, "5", "ABCDE") + assert r1.dtype.itemsize == check_itemsize(3 * 10 + 3 * 4, dt) + r1_res = np.array(["01234ABCDE6789" * i for i in range(4)], dtype=dt) + assert_array_equal(r1, r1_res) + r2 = np.strings.replace(a, "5", "ABCDE", 1) + assert r2.dtype.itemsize == check_itemsize(3 * 10 + 4, dt) + r3 = np.strings.replace(a, "5", "ABCDE", 0) + assert r3.dtype.itemsize == a.dtype.itemsize + assert_array_equal(r3, a) + # Negative values mean to replace all. + r4 = np.strings.replace(a, "5", "ABCDE", -1) + assert r4.dtype.itemsize == check_itemsize(3 * 10 + 3 * 4, dt) + assert_array_equal(r4, r1) + # We can do count on an element-by-element basis. + r5 = np.strings.replace(a, "5", "ABCDE", [-1, -1, -1, 1]) + assert r5.dtype.itemsize == check_itemsize(3 * 10 + 4, dt) + assert_array_equal(r5, np.array( + ["01234ABCDE6789" * i for i in range(3)] + + ["01234ABCDE6789" + "0123456789" * 2], dtype=dt)) + + def test_replace_broadcasting(self, dt): + a = np.array("0,0,0", dtype=dt) + r1 = np.strings.replace(a, "0", "1", np.arange(3)) + assert r1.dtype == a.dtype + assert_array_equal(r1, np.array(["0,0,0", "1,0,0", "1,1,0"], dtype=dt)) + r2 = np.strings.replace(a, "0", [["1"], ["2"]], np.arange(1, 4)) + assert_array_equal(r2, np.array([["1,0,0", "1,1,0", "1,1,1"], + ["2,0,0", "2,2,0", "2,2,2"]], + dtype=dt)) + r3 = np.strings.replace(a, ["0", "0,0", "0,0,0"], "X") + assert_array_equal(r3, np.array(["X,X,X", "X,0", "X"], dtype=dt)) + + +class TestOverride: + @classmethod + def setup_class(cls): + class Override: + + def __array_function__(self, *args, **kwargs): + return "function" + + def __array_ufunc__(self, *args, **kwargs): + return "ufunc" + + cls.override = Override() + + @pytest.mark.parametrize("func, kwargs", [ + (np.strings.center, dict(width=10)), + (np.strings.capitalize, {}), + (np.strings.decode, {}), + (np.strings.encode, {}), + (np.strings.expandtabs, {}), + (np.strings.ljust, dict(width=10)), + (np.strings.lower, {}), + (np.strings.mod, dict(values=2)), + (np.strings.multiply, dict(i=2)), + (np.strings.partition, dict(sep="foo")), + (np.strings.rjust, dict(width=10)), + (np.strings.rpartition, dict(sep="foo")), + (np.strings.swapcase, {}), + (np.strings.title, {}), + (np.strings.translate, dict(table=None)), + (np.strings.upper, {}), + (np.strings.zfill, dict(width=10)), + ]) + def test_override_function(self, func, kwargs): + assert func(self.override, **kwargs) == "function" + + @pytest.mark.parametrize("func, args, kwargs", [ + (np.strings.add, (None, ), {}), + (np.strings.lstrip, (), {}), + (np.strings.rstrip, (), {}), + (np.strings.strip, (), {}), + (np.strings.equal, (None, ), {}), + (np.strings.not_equal, (None, ), {}), + (np.strings.greater_equal, (None, ), {}), + (np.strings.less_equal, (None, ), {}), + (np.strings.greater, (None, ), {}), + (np.strings.less, (None, ), {}), + (np.strings.count, ("foo", ), {}), + (np.strings.endswith, ("foo", ), {}), + (np.strings.find, ("foo", ), {}), + (np.strings.index, ("foo", ), {}), + (np.strings.isalnum, (), {}), + (np.strings.isalpha, (), {}), + (np.strings.isdecimal, (), {}), + (np.strings.isdigit, (), {}), + (np.strings.islower, (), {}), + (np.strings.isnumeric, (), {}), + (np.strings.isspace, (), {}), + (np.strings.istitle, (), {}), + (np.strings.isupper, (), {}), + (np.strings.rfind, ("foo", ), {}), + (np.strings.rindex, ("foo", ), {}), + (np.strings.startswith, ("foo", ), {}), + (np.strings.str_len, (), {}), + ]) + def test_override_ufunc(self, func, args, kwargs): + assert func(self.override, *args, **kwargs) == "ufunc" diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_ufunc.py b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_ufunc.py new file mode 100644 index 0000000..af22dce --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_ufunc.py @@ -0,0 +1,3313 @@ +import ctypes as ct +import itertools +import pickle +import sys +import warnings + +import numpy._core._operand_flag_tests as opflag_tests +import numpy._core._rational_tests as _rational_tests +import numpy._core._umath_tests as umt +import pytest +from pytest import param + +import numpy as np +import numpy._core.umath as ncu +import numpy.linalg._umath_linalg as uml +from numpy.exceptions import AxisError +from numpy.testing import ( + HAS_REFCOUNT, + IS_PYPY, + IS_WASM, + assert_, + assert_allclose, + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, + assert_equal, + assert_no_warnings, + assert_raises, + suppress_warnings, +) +from numpy.testing._private.utils import requires_memory + +UNARY_UFUNCS = [obj for obj in np._core.umath.__dict__.values() + if isinstance(obj, np.ufunc)] +UNARY_OBJECT_UFUNCS = [uf for uf in UNARY_UFUNCS if "O->O" in uf.types] + +# Remove functions that do not support `floats` +UNARY_OBJECT_UFUNCS.remove(np.bitwise_count) + + +class TestUfuncKwargs: + def test_kwarg_exact(self): + assert_raises(TypeError, np.add, 1, 2, castingx='safe') + assert_raises(TypeError, np.add, 1, 2, dtypex=int) + assert_raises(TypeError, np.add, 1, 2, extobjx=[4096]) + assert_raises(TypeError, np.add, 1, 2, outx=None) + assert_raises(TypeError, np.add, 1, 2, sigx='ii->i') + assert_raises(TypeError, np.add, 1, 2, signaturex='ii->i') + assert_raises(TypeError, np.add, 1, 2, subokx=False) + assert_raises(TypeError, np.add, 1, 2, wherex=[True]) + + def test_sig_signature(self): + assert_raises(TypeError, np.add, 1, 2, sig='ii->i', + signature='ii->i') + + def test_sig_dtype(self): + assert_raises(TypeError, np.add, 1, 2, sig='ii->i', + dtype=int) + assert_raises(TypeError, np.add, 1, 2, signature='ii->i', + dtype=int) + + def test_extobj_removed(self): + assert_raises(TypeError, np.add, 1, 2, extobj=[4096]) + + +class TestUfuncGenericLoops: + """Test generic loops. + + The loops to be tested are: + + PyUFunc_ff_f_As_dd_d + PyUFunc_ff_f + PyUFunc_dd_d + PyUFunc_gg_g + PyUFunc_FF_F_As_DD_D + PyUFunc_DD_D + PyUFunc_FF_F + PyUFunc_GG_G + PyUFunc_OO_O + PyUFunc_OO_O_method + PyUFunc_f_f_As_d_d + PyUFunc_d_d + PyUFunc_f_f + PyUFunc_g_g + PyUFunc_F_F_As_D_D + PyUFunc_F_F + PyUFunc_D_D + PyUFunc_G_G + PyUFunc_O_O + PyUFunc_O_O_method + PyUFunc_On_Om + + Where: + + f -- float + d -- double + g -- long double + F -- complex float + D -- complex double + G -- complex long double + O -- python object + + It is difficult to assure that each of these loops is entered from the + Python level as the special cased loops are a moving target and the + corresponding types are architecture dependent. We probably need to + define C level testing ufuncs to get at them. For the time being, I've + just looked at the signatures registered in the build directory to find + relevant functions. + + """ + np_dtypes = [ + (np.single, np.single), (np.single, np.double), + (np.csingle, np.csingle), (np.csingle, np.cdouble), + (np.double, np.double), (np.longdouble, np.longdouble), + (np.cdouble, np.cdouble), (np.clongdouble, np.clongdouble)] + + @pytest.mark.parametrize('input_dtype,output_dtype', np_dtypes) + def test_unary_PyUFunc(self, input_dtype, output_dtype, f=np.exp, x=0, y=1): + xs = np.full(10, input_dtype(x), dtype=output_dtype) + ys = f(xs)[::2] + assert_allclose(ys, y) + assert_equal(ys.dtype, output_dtype) + + def f2(x, y): + return x**y + + @pytest.mark.parametrize('input_dtype,output_dtype', np_dtypes) + def test_binary_PyUFunc(self, input_dtype, output_dtype, f=f2, x=0, y=1): + xs = np.full(10, input_dtype(x), dtype=output_dtype) + ys = f(xs, xs)[::2] + assert_allclose(ys, y) + assert_equal(ys.dtype, output_dtype) + + # class to use in testing object method loops + class foo: + def conjugate(self): + return np.bool(1) + + def logical_xor(self, obj): + return np.bool(1) + + def test_unary_PyUFunc_O_O(self): + x = np.ones(10, dtype=object) + assert_(np.all(np.abs(x) == 1)) + + def test_unary_PyUFunc_O_O_method_simple(self, foo=foo): + x = np.full(10, foo(), dtype=object) + assert_(np.all(np.conjugate(x) == True)) + + def test_binary_PyUFunc_OO_O(self): + x = np.ones(10, dtype=object) + assert_(np.all(np.add(x, x) == 2)) + + def test_binary_PyUFunc_OO_O_method(self, foo=foo): + x = np.full(10, foo(), dtype=object) + assert_(np.all(np.logical_xor(x, x))) + + def test_binary_PyUFunc_On_Om_method(self, foo=foo): + x = np.full((10, 2, 3), foo(), dtype=object) + assert_(np.all(np.logical_xor(x, x))) + + def test_python_complex_conjugate(self): + # The conjugate ufunc should fall back to calling the method: + arr = np.array([1 + 2j, 3 - 4j], dtype="O") + assert isinstance(arr[0], complex) + res = np.conjugate(arr) + assert res.dtype == np.dtype("O") + assert_array_equal(res, np.array([1 - 2j, 3 + 4j], dtype="O")) + + @pytest.mark.parametrize("ufunc", UNARY_OBJECT_UFUNCS) + def test_unary_PyUFunc_O_O_method_full(self, ufunc): + """Compare the result of the object loop with non-object one""" + val = np.float64(np.pi / 4) + + class MyFloat(np.float64): + def __getattr__(self, attr): + try: + return super().__getattr__(attr) + except AttributeError: + return lambda: getattr(np._core.umath, attr)(val) + + # Use 0-D arrays, to ensure the same element call + num_arr = np.array(val, dtype=np.float64) + obj_arr = np.array(MyFloat(val), dtype="O") + + with np.errstate(all="raise"): + try: + res_num = ufunc(num_arr) + except Exception as exc: + with assert_raises(type(exc)): + ufunc(obj_arr) + else: + res_obj = ufunc(obj_arr) + assert_array_almost_equal(res_num.astype("O"), res_obj) + + +def _pickleable_module_global(): + pass + + +class TestUfunc: + def test_pickle(self): + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + assert_(pickle.loads(pickle.dumps(np.sin, + protocol=proto)) is np.sin) + + # Check that ufunc not defined in the top level numpy namespace + # such as numpy._core._rational_tests.test_add can also be pickled + res = pickle.loads(pickle.dumps(_rational_tests.test_add, + protocol=proto)) + assert_(res is _rational_tests.test_add) + + def test_pickle_withstring(self): + astring = (b"cnumpy.core\n_ufunc_reconstruct\np0\n" + b"(S'numpy._core.umath'\np1\nS'cos'\np2\ntp3\nRp4\n.") + assert_(pickle.loads(astring) is np.cos) + + @pytest.mark.skipif(IS_PYPY, reason="'is' check does not work on PyPy") + def test_pickle_name_is_qualname(self): + # This tests that a simplification of our ufunc pickle code will + # lead to allowing qualnames as names. Future ufuncs should + # possible add a specific qualname, or a hook into pickling instead + # (dask+numba may benefit). + _pickleable_module_global.ufunc = umt._pickleable_module_global_ufunc + + obj = pickle.loads(pickle.dumps(_pickleable_module_global.ufunc)) + assert obj is umt._pickleable_module_global_ufunc + + def test_reduceat_shifting_sum(self): + L = 6 + x = np.arange(L) + idx = np.array(list(zip(np.arange(L - 2), np.arange(L - 2) + 2))).ravel() + assert_array_equal(np.add.reduceat(x, idx)[::2], [1, 3, 5, 7]) + + def test_all_ufunc(self): + """Try to check presence and results of all ufuncs. + + The list of ufuncs comes from generate_umath.py and is as follows: + + ===== ==== ============= =============== ======================== + done args function types notes + ===== ==== ============= =============== ======================== + n 1 conjugate nums + O + n 1 absolute nums + O complex -> real + n 1 negative nums + O + n 1 sign nums + O -> int + n 1 invert bool + ints + O flts raise an error + n 1 degrees real + M cmplx raise an error + n 1 radians real + M cmplx raise an error + n 1 arccos flts + M + n 1 arccosh flts + M + n 1 arcsin flts + M + n 1 arcsinh flts + M + n 1 arctan flts + M + n 1 arctanh flts + M + n 1 cos flts + M + n 1 sin flts + M + n 1 tan flts + M + n 1 cosh flts + M + n 1 sinh flts + M + n 1 tanh flts + M + n 1 exp flts + M + n 1 expm1 flts + M + n 1 log flts + M + n 1 log10 flts + M + n 1 log1p flts + M + n 1 sqrt flts + M real x < 0 raises error + n 1 ceil real + M + n 1 trunc real + M + n 1 floor real + M + n 1 fabs real + M + n 1 rint flts + M + n 1 isnan flts -> bool + n 1 isinf flts -> bool + n 1 isfinite flts -> bool + n 1 signbit real -> bool + n 1 modf real -> (frac, int) + n 1 logical_not bool + nums + M -> bool + n 2 left_shift ints + O flts raise an error + n 2 right_shift ints + O flts raise an error + n 2 add bool + nums + O boolean + is || + n 2 subtract bool + nums + O boolean - is ^ + n 2 multiply bool + nums + O boolean * is & + n 2 divide nums + O + n 2 floor_divide nums + O + n 2 true_divide nums + O bBhH -> f, iIlLqQ -> d + n 2 fmod nums + M + n 2 power nums + O + n 2 greater bool + nums + O -> bool + n 2 greater_equal bool + nums + O -> bool + n 2 less bool + nums + O -> bool + n 2 less_equal bool + nums + O -> bool + n 2 equal bool + nums + O -> bool + n 2 not_equal bool + nums + O -> bool + n 2 logical_and bool + nums + M -> bool + n 2 logical_or bool + nums + M -> bool + n 2 logical_xor bool + nums + M -> bool + n 2 maximum bool + nums + O + n 2 minimum bool + nums + O + n 2 bitwise_and bool + ints + O flts raise an error + n 2 bitwise_or bool + ints + O flts raise an error + n 2 bitwise_xor bool + ints + O flts raise an error + n 2 arctan2 real + M + n 2 remainder ints + real + O + n 2 hypot real + M + ===== ==== ============= =============== ======================== + + Types other than those listed will be accepted, but they are cast to + the smallest compatible type for which the function is defined. The + casting rules are: + + bool -> int8 -> float32 + ints -> double + + """ + pass + + # from include/numpy/ufuncobject.h + size_inferred = 2 + can_ignore = 4 + + def test_signature0(self): + # the arguments to test_signature are: nin, nout, core_signature + enabled, num_dims, ixs, flags, sizes = umt.test_signature( + 2, 1, "(i),(i)->()") + assert_equal(enabled, 1) + assert_equal(num_dims, (1, 1, 0)) + assert_equal(ixs, (0, 0)) + assert_equal(flags, (self.size_inferred,)) + assert_equal(sizes, (-1,)) + + def test_signature1(self): + # empty core signature; treat as plain ufunc (with trivial core) + enabled, num_dims, ixs, flags, sizes = umt.test_signature( + 2, 1, "(),()->()") + assert_equal(enabled, 0) + assert_equal(num_dims, (0, 0, 0)) + assert_equal(ixs, ()) + assert_equal(flags, ()) + assert_equal(sizes, ()) + + def test_signature2(self): + # more complicated names for variables + enabled, num_dims, ixs, flags, sizes = umt.test_signature( + 2, 1, "(i1,i2),(J_1)->(_kAB)") + assert_equal(enabled, 1) + assert_equal(num_dims, (2, 1, 1)) + assert_equal(ixs, (0, 1, 2, 3)) + assert_equal(flags, (self.size_inferred,) * 4) + assert_equal(sizes, (-1, -1, -1, -1)) + + def test_signature3(self): + enabled, num_dims, ixs, flags, sizes = umt.test_signature( + 2, 1, "(i1, i12), (J_1)->(i12, i2)") + assert_equal(enabled, 1) + assert_equal(num_dims, (2, 1, 2)) + assert_equal(ixs, (0, 1, 2, 1, 3)) + assert_equal(flags, (self.size_inferred,) * 4) + assert_equal(sizes, (-1, -1, -1, -1)) + + def test_signature4(self): + # matrix_multiply signature from _umath_tests + enabled, num_dims, ixs, flags, sizes = umt.test_signature( + 2, 1, "(n,k),(k,m)->(n,m)") + assert_equal(enabled, 1) + assert_equal(num_dims, (2, 2, 2)) + assert_equal(ixs, (0, 1, 1, 2, 0, 2)) + assert_equal(flags, (self.size_inferred,) * 3) + assert_equal(sizes, (-1, -1, -1)) + + def test_signature5(self): + # matmul signature from _umath_tests + enabled, num_dims, ixs, flags, sizes = umt.test_signature( + 2, 1, "(n?,k),(k,m?)->(n?,m?)") + assert_equal(enabled, 1) + assert_equal(num_dims, (2, 2, 2)) + assert_equal(ixs, (0, 1, 1, 2, 0, 2)) + assert_equal(flags, (self.size_inferred | self.can_ignore, + self.size_inferred, + self.size_inferred | self.can_ignore)) + assert_equal(sizes, (-1, -1, -1)) + + def test_signature6(self): + enabled, num_dims, ixs, flags, sizes = umt.test_signature( + 1, 1, "(3)->()") + assert_equal(enabled, 1) + assert_equal(num_dims, (1, 0)) + assert_equal(ixs, (0,)) + assert_equal(flags, (0,)) + assert_equal(sizes, (3,)) + + def test_signature7(self): + enabled, num_dims, ixs, flags, sizes = umt.test_signature( + 3, 1, "(3),(03,3),(n)->(9)") + assert_equal(enabled, 1) + assert_equal(num_dims, (1, 2, 1, 1)) + assert_equal(ixs, (0, 0, 0, 1, 2)) + assert_equal(flags, (0, self.size_inferred, 0)) + assert_equal(sizes, (3, -1, 9)) + + def test_signature8(self): + enabled, num_dims, ixs, flags, sizes = umt.test_signature( + 3, 1, "(3?),(3?,3?),(n)->(9)") + assert_equal(enabled, 1) + assert_equal(num_dims, (1, 2, 1, 1)) + assert_equal(ixs, (0, 0, 0, 1, 2)) + assert_equal(flags, (self.can_ignore, self.size_inferred, 0)) + assert_equal(sizes, (3, -1, 9)) + + def test_signature9(self): + enabled, num_dims, ixs, flags, sizes = umt.test_signature( + 1, 1, "( 3) -> ( )") + assert_equal(enabled, 1) + assert_equal(num_dims, (1, 0)) + assert_equal(ixs, (0,)) + assert_equal(flags, (0,)) + assert_equal(sizes, (3,)) + + def test_signature10(self): + enabled, num_dims, ixs, flags, sizes = umt.test_signature( + 3, 1, "( 3? ) , (3? , 3?) ,(n )-> ( 9)") + assert_equal(enabled, 1) + assert_equal(num_dims, (1, 2, 1, 1)) + assert_equal(ixs, (0, 0, 0, 1, 2)) + assert_equal(flags, (self.can_ignore, self.size_inferred, 0)) + assert_equal(sizes, (3, -1, 9)) + + def test_signature_failure_extra_parenthesis(self): + with assert_raises(ValueError): + umt.test_signature(2, 1, "((i)),(i)->()") + + def test_signature_failure_mismatching_parenthesis(self): + with assert_raises(ValueError): + umt.test_signature(2, 1, "(i),)i(->()") + + def test_signature_failure_signature_missing_input_arg(self): + with assert_raises(ValueError): + umt.test_signature(2, 1, "(i),->()") + + def test_signature_failure_signature_missing_output_arg(self): + with assert_raises(ValueError): + umt.test_signature(2, 2, "(i),(i)->()") + + def test_get_signature(self): + assert_equal(np.vecdot.signature, "(n),(n)->()") + + def test_forced_sig(self): + a = 0.5 * np.arange(3, dtype='f8') + assert_equal(np.add(a, 0.5), [0.5, 1, 1.5]) + with assert_raises(TypeError): + np.add(a, 0.5, sig='i', casting='unsafe') + assert_equal(np.add(a, 0.5, sig='ii->i', casting='unsafe'), [0, 0, 1]) + with assert_raises(TypeError): + np.add(a, 0.5, sig=('i4',), casting='unsafe') + assert_equal(np.add(a, 0.5, sig=('i4', 'i4', 'i4'), + casting='unsafe'), [0, 0, 1]) + + b = np.zeros((3,), dtype='f8') + np.add(a, 0.5, out=b) + assert_equal(b, [0.5, 1, 1.5]) + b[:] = 0 + with assert_raises(TypeError): + np.add(a, 0.5, sig='i', out=b, casting='unsafe') + assert_equal(b, [0, 0, 0]) + np.add(a, 0.5, sig='ii->i', out=b, casting='unsafe') + assert_equal(b, [0, 0, 1]) + b[:] = 0 + with assert_raises(TypeError): + np.add(a, 0.5, sig=('i4',), out=b, casting='unsafe') + assert_equal(b, [0, 0, 0]) + np.add(a, 0.5, sig=('i4', 'i4', 'i4'), out=b, casting='unsafe') + assert_equal(b, [0, 0, 1]) + + def test_signature_all_None(self): + # signature all None, is an acceptable alternative (since 1.21) + # to not providing a signature. + res1 = np.add([3], [4], sig=(None, None, None)) + res2 = np.add([3], [4]) + assert_array_equal(res1, res2) + res1 = np.maximum([3], [4], sig=(None, None, None)) + res2 = np.maximum([3], [4]) + assert_array_equal(res1, res2) + + with pytest.raises(TypeError): + # special case, that would be deprecated anyway, so errors: + np.add(3, 4, signature=(None,)) + + def test_signature_dtype_type(self): + # Since that will be the normal behaviour (past NumPy 1.21) + # we do support the types already: + float_dtype = type(np.dtype(np.float64)) + np.add(3, 4, signature=(float_dtype, float_dtype, None)) + + @pytest.mark.parametrize("get_kwarg", [ + param(lambda dt: {"dtype": dt}, id="dtype"), + param(lambda dt: {"signature": (dt, None, None)}, id="signature")]) + def test_signature_dtype_instances_allowed(self, get_kwarg): + # We allow certain dtype instances when there is a clear singleton + # and the given one is equivalent; mainly for backcompat. + int64 = np.dtype("int64") + int64_2 = pickle.loads(pickle.dumps(int64)) + # Relies on pickling behavior, if assert fails just remove test... + assert int64 is not int64_2 + + assert np.add(1, 2, **get_kwarg(int64_2)).dtype == int64 + td = np.timedelta64(2, "s") + assert np.add(td, td, **get_kwarg("m8")).dtype == "m8[s]" + + msg = "The `dtype` and `signature` arguments to ufuncs" + + with pytest.raises(TypeError, match=msg): + np.add(3, 5, **get_kwarg(np.dtype("int64").newbyteorder())) + with pytest.raises(TypeError, match=msg): + np.add(3, 5, **get_kwarg(np.dtype("m8[ns]"))) + with pytest.raises(TypeError, match=msg): + np.add(3, 5, **get_kwarg("m8[ns]")) + + @pytest.mark.parametrize("casting", ["unsafe", "same_kind", "safe"]) + def test_partial_signature_mismatch(self, casting): + # If the second argument matches already, no need to specify it: + res = np.ldexp(np.float32(1.), np.int_(2), dtype="d") + assert res.dtype == "d" + res = np.ldexp(np.float32(1.), np.int_(2), signature=(None, None, "d")) + assert res.dtype == "d" + + # ldexp only has a loop for long input as second argument, overriding + # the output cannot help with that (no matter the casting) + with pytest.raises(TypeError): + np.ldexp(1., np.uint64(3), dtype="d") + with pytest.raises(TypeError): + np.ldexp(1., np.uint64(3), signature=(None, None, "d")) + + def test_partial_signature_mismatch_with_cache(self): + with pytest.raises(TypeError): + np.add(np.float16(1), np.uint64(2), sig=("e", "d", None)) + # Ensure e,d->None is in the dispatching cache (double loop) + np.add(np.float16(1), np.float64(2)) + # The error must still be raised: + with pytest.raises(TypeError): + np.add(np.float16(1), np.uint64(2), sig=("e", "d", None)) + + def test_use_output_signature_for_all_arguments(self): + # Test that providing only `dtype=` or `signature=(None, None, dtype)` + # is sufficient if falling back to a homogeneous signature works. + # In this case, the `intp, intp -> intp` loop is chosen. + res = np.power(1.5, 2.8, dtype=np.intp, casting="unsafe") + assert res == 1 # the cast happens first. + res = np.power(1.5, 2.8, signature=(None, None, np.intp), + casting="unsafe") + assert res == 1 + with pytest.raises(TypeError): + # the unsafe casting would normally cause errors though: + np.power(1.5, 2.8, dtype=np.intp) + + def test_signature_errors(self): + with pytest.raises(TypeError, + match="the signature object to ufunc must be a string or"): + np.add(3, 4, signature=123.) # neither a string nor a tuple + + with pytest.raises(ValueError): + # bad symbols that do not translate to dtypes + np.add(3, 4, signature="%^->#") + + with pytest.raises(ValueError): + np.add(3, 4, signature=b"ii-i") # incomplete and byte string + + with pytest.raises(ValueError): + np.add(3, 4, signature="ii>i") # incomplete string + + with pytest.raises(ValueError): + np.add(3, 4, signature=(None, "f8")) # bad length + + with pytest.raises(UnicodeDecodeError): + np.add(3, 4, signature=b"\xff\xff->i") + + def test_forced_dtype_times(self): + # Signatures only set the type numbers (not the actual loop dtypes) + # so using `M` in a signature/dtype should generally work: + a = np.array(['2010-01-02', '1999-03-14', '1833-03'], dtype='>M8[D]') + np.maximum(a, a, dtype="M") + np.maximum.reduce(a, dtype="M") + + arr = np.arange(10, dtype="m8[s]") + np.add(arr, arr, dtype="m") + np.maximum(arr, arr, dtype="m") + + @pytest.mark.parametrize("ufunc", [np.add, np.sqrt]) + def test_cast_safety(self, ufunc): + """Basic test for the safest casts, because ufuncs inner loops can + indicate a cast-safety as well (which is normally always "no"). + """ + def call_ufunc(arr, **kwargs): + return ufunc(*(arr,) * ufunc.nin, **kwargs) + + arr = np.array([1., 2., 3.], dtype=np.float32) + arr_bs = arr.astype(arr.dtype.newbyteorder()) + expected = call_ufunc(arr) + # Normally, a "no" cast: + res = call_ufunc(arr, casting="no") + assert_array_equal(expected, res) + # Byte-swapping is not allowed with "no" though: + with pytest.raises(TypeError): + call_ufunc(arr_bs, casting="no") + + # But is allowed with "equiv": + res = call_ufunc(arr_bs, casting="equiv") + assert_array_equal(expected, res) + + # Casting to float64 is safe, but not equiv: + with pytest.raises(TypeError): + call_ufunc(arr_bs, dtype=np.float64, casting="equiv") + + # but it is safe cast: + res = call_ufunc(arr_bs, dtype=np.float64, casting="safe") + expected = call_ufunc(arr.astype(np.float64)) # upcast + assert_array_equal(expected, res) + + @pytest.mark.parametrize("ufunc", [np.add, np.equal]) + def test_cast_safety_scalar(self, ufunc): + # We test add and equal, because equal has special scalar handling + # Note that the "equiv" casting behavior should maybe be considered + # a current implementation detail. + with pytest.raises(TypeError): + # this picks an integer loop, which is not safe + ufunc(3., 4., dtype=int, casting="safe") + + with pytest.raises(TypeError): + # We accept python float as float64 but not float32 for equiv. + ufunc(3., 4., dtype="float32", casting="equiv") + + # Special case for object and equal (note that equiv implies safe) + ufunc(3, 4, dtype=object, casting="equiv") + # Picks a double loop for both, first is equiv, second safe: + ufunc(np.array([3.]), 3., casting="equiv") + ufunc(np.array([3.]), 3, casting="safe") + ufunc(np.array([3]), 3, casting="equiv") + + def test_cast_safety_scalar_special(self): + # We allow this (and it succeeds) via object, although the equiv + # part may not be important. + np.equal(np.array([3]), 2**300, casting="equiv") + + def test_true_divide(self): + a = np.array(10) + b = np.array(20) + tgt = np.array(0.5) + + for tc in 'bhilqBHILQefdgFDG': + dt = np.dtype(tc) + aa = a.astype(dt) + bb = b.astype(dt) + + # Check result value and dtype. + for x, y in itertools.product([aa, -aa], [bb, -bb]): + + # Check with no output type specified + if tc in 'FDG': + tgt = complex(x) / complex(y) + else: + tgt = float(x) / float(y) + + res = np.true_divide(x, y) + rtol = max(np.finfo(res).resolution, 1e-15) + assert_allclose(res, tgt, rtol=rtol) + + if tc in 'bhilqBHILQ': + assert_(res.dtype.name == 'float64') + else: + assert_(res.dtype.name == dt.name) + + # Check with output type specified. This also checks for the + # incorrect casts in issue gh-3484 because the unary '-' does + # not change types, even for unsigned types, Hence casts in the + # ufunc from signed to unsigned and vice versa will lead to + # errors in the values. + for tcout in 'bhilqBHILQ': + dtout = np.dtype(tcout) + assert_raises(TypeError, np.true_divide, x, y, dtype=dtout) + + for tcout in 'efdg': + dtout = np.dtype(tcout) + if tc in 'FDG': + # Casting complex to float is not allowed + assert_raises(TypeError, np.true_divide, x, y, dtype=dtout) + else: + tgt = float(x) / float(y) + rtol = max(np.finfo(dtout).resolution, 1e-15) + # The value of tiny for double double is NaN + with suppress_warnings() as sup: + sup.filter(UserWarning) + if not np.isnan(np.finfo(dtout).tiny): + atol = max(np.finfo(dtout).tiny, 3e-308) + else: + atol = 3e-308 + # Some test values result in invalid for float16 + # and the cast to it may overflow to inf. + with np.errstate(invalid='ignore', over='ignore'): + res = np.true_divide(x, y, dtype=dtout) + if not np.isfinite(res) and tcout == 'e': + continue + assert_allclose(res, tgt, rtol=rtol, atol=atol) + assert_(res.dtype.name == dtout.name) + + for tcout in 'FDG': + dtout = np.dtype(tcout) + tgt = complex(x) / complex(y) + rtol = max(np.finfo(dtout).resolution, 1e-15) + # The value of tiny for double double is NaN + with suppress_warnings() as sup: + sup.filter(UserWarning) + if not np.isnan(np.finfo(dtout).tiny): + atol = max(np.finfo(dtout).tiny, 3e-308) + else: + atol = 3e-308 + res = np.true_divide(x, y, dtype=dtout) + if not np.isfinite(res): + continue + assert_allclose(res, tgt, rtol=rtol, atol=atol) + assert_(res.dtype.name == dtout.name) + + # Check booleans + a = np.ones((), dtype=np.bool) + res = np.true_divide(a, a) + assert_(res == 1.0) + assert_(res.dtype.name == 'float64') + res = np.true_divide(~a, a) + assert_(res == 0.0) + assert_(res.dtype.name == 'float64') + + def test_sum_stability(self): + a = np.ones(500, dtype=np.float32) + assert_almost_equal((a / 10.).sum() - a.size / 10., 0, 4) + + a = np.ones(500, dtype=np.float64) + assert_almost_equal((a / 10.).sum() - a.size / 10., 0, 13) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + def test_sum(self): + for dt in (int, np.float16, np.float32, np.float64, np.longdouble): + for v in (0, 1, 2, 7, 8, 9, 15, 16, 19, 127, + 128, 1024, 1235): + # warning if sum overflows, which it does in float16 + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always", RuntimeWarning) + + tgt = dt(v * (v + 1) / 2) + overflow = not np.isfinite(tgt) + assert_equal(len(w), 1 * overflow) + + d = np.arange(1, v + 1, dtype=dt) + + assert_almost_equal(np.sum(d), tgt) + assert_equal(len(w), 2 * overflow) + + assert_almost_equal(np.sum(d[::-1]), tgt) + assert_equal(len(w), 3 * overflow) + + d = np.ones(500, dtype=dt) + assert_almost_equal(np.sum(d[::2]), 250.) + assert_almost_equal(np.sum(d[1::2]), 250.) + assert_almost_equal(np.sum(d[::3]), 167.) + assert_almost_equal(np.sum(d[1::3]), 167.) + assert_almost_equal(np.sum(d[::-2]), 250.) + assert_almost_equal(np.sum(d[-1::-2]), 250.) + assert_almost_equal(np.sum(d[::-3]), 167.) + assert_almost_equal(np.sum(d[-1::-3]), 167.) + # sum with first reduction entry != 0 + d = np.ones((1,), dtype=dt) + d += d + assert_almost_equal(d, 2.) + + def test_sum_complex(self): + for dt in (np.complex64, np.complex128, np.clongdouble): + for v in (0, 1, 2, 7, 8, 9, 15, 16, 19, 127, + 128, 1024, 1235): + tgt = dt(v * (v + 1) / 2) - dt((v * (v + 1) / 2) * 1j) + d = np.empty(v, dtype=dt) + d.real = np.arange(1, v + 1) + d.imag = -np.arange(1, v + 1) + assert_almost_equal(np.sum(d), tgt) + assert_almost_equal(np.sum(d[::-1]), tgt) + + d = np.ones(500, dtype=dt) + 1j + assert_almost_equal(np.sum(d[::2]), 250. + 250j) + assert_almost_equal(np.sum(d[1::2]), 250. + 250j) + assert_almost_equal(np.sum(d[::3]), 167. + 167j) + assert_almost_equal(np.sum(d[1::3]), 167. + 167j) + assert_almost_equal(np.sum(d[::-2]), 250. + 250j) + assert_almost_equal(np.sum(d[-1::-2]), 250. + 250j) + assert_almost_equal(np.sum(d[::-3]), 167. + 167j) + assert_almost_equal(np.sum(d[-1::-3]), 167. + 167j) + # sum with first reduction entry != 0 + d = np.ones((1,), dtype=dt) + 1j + d += d + assert_almost_equal(d, 2. + 2j) + + def test_sum_initial(self): + # Integer, single axis + assert_equal(np.sum([3], initial=2), 5) + + # Floating point + assert_almost_equal(np.sum([0.2], initial=0.1), 0.3) + + # Multiple non-adjacent axes + assert_equal(np.sum(np.ones((2, 3, 5), dtype=np.int64), axis=(0, 2), initial=2), + [12, 12, 12]) + + def test_sum_where(self): + # More extensive tests done in test_reduction_with_where. + assert_equal(np.sum([[1., 2.], [3., 4.]], where=[True, False]), 4.) + assert_equal(np.sum([[1., 2.], [3., 4.]], axis=0, initial=5., + where=[True, False]), [9., 5.]) + + def test_vecdot(self): + arr1 = np.arange(6).reshape((2, 3)) + arr2 = np.arange(3).reshape((1, 3)) + + actual = np.vecdot(arr1, arr2) + expected = np.array([5, 14]) + + assert_array_equal(actual, expected) + + actual2 = np.vecdot(arr1.T, arr2.T, axis=-2) + assert_array_equal(actual2, expected) + + actual3 = np.vecdot(arr1.astype("object"), arr2) + assert_array_equal(actual3, expected.astype("object")) + + def test_matvec(self): + arr1 = np.arange(6).reshape((2, 3)) + arr2 = np.arange(3).reshape((1, 3)) + + actual = np.matvec(arr1, arr2) + expected = np.array([[5, 14]]) + + assert_array_equal(actual, expected) + + actual2 = np.matvec(arr1.T, arr2.T, axes=[(-1, -2), -2, -1]) + assert_array_equal(actual2, expected) + + actual3 = np.matvec(arr1.astype("object"), arr2) + assert_array_equal(actual3, expected.astype("object")) + + @pytest.mark.parametrize("vec", [ + np.array([[1., 2., 3.], [4., 5., 6.]]), + np.array([[1., 2j, 3.], [4., 5., 6j]]), + np.array([[1., 2., 3.], [4., 5., 6.]], dtype=object), + np.array([[1., 2j, 3.], [4., 5., 6j]], dtype=object)]) + @pytest.mark.parametrize("matrix", [ + None, + np.array([[1. + 1j, 0.5, -0.5j], + [0.25, 2j, 0.], + [4., 0., -1j]])]) + def test_vecmatvec_identity(self, matrix, vec): + """Check that (x†A)x equals x†(Ax).""" + mat = matrix if matrix is not None else np.eye(3) + matvec = np.matvec(mat, vec) # Ax + vecmat = np.vecmat(vec, mat) # x†A + if matrix is None: + assert_array_equal(matvec, vec) + assert_array_equal(vecmat.conj(), vec) + assert_array_equal(matvec, (mat @ vec[..., np.newaxis]).squeeze(-1)) + assert_array_equal(vecmat, (vec[..., np.newaxis].mT.conj() + @ mat).squeeze(-2)) + expected = np.einsum('...i,ij,...j', vec.conj(), mat, vec) + vec_matvec = (vec.conj() * matvec).sum(-1) + vecmat_vec = (vecmat * vec).sum(-1) + assert_array_equal(vec_matvec, expected) + assert_array_equal(vecmat_vec, expected) + + @pytest.mark.parametrize("ufunc, shape1, shape2, conj", [ + (np.vecdot, (3,), (3,), True), + (np.vecmat, (3,), (3, 1), True), + (np.matvec, (1, 3), (3,), False), + (np.matmul, (1, 3), (3, 1), False), + ]) + def test_vecdot_matvec_vecmat_complex(self, ufunc, shape1, shape2, conj): + arr1 = np.array([1, 2j, 3]) + arr2 = np.array([1, 2, 3]) + + actual1 = ufunc(arr1.reshape(shape1), arr2.reshape(shape2)) + expected1 = np.array(((arr1.conj() if conj else arr1) * arr2).sum(), + ndmin=min(len(shape1), len(shape2))) + assert_array_equal(actual1, expected1) + # This would fail for conj=True, since matmul omits the conjugate. + if not conj: + assert_array_equal(arr1.reshape(shape1) @ arr2.reshape(shape2), + expected1) + + actual2 = ufunc(arr2.reshape(shape1), arr1.reshape(shape2)) + expected2 = np.array(((arr2.conj() if conj else arr2) * arr1).sum(), + ndmin=min(len(shape1), len(shape2))) + assert_array_equal(actual2, expected2) + + actual3 = ufunc(arr1.reshape(shape1).astype("object"), + arr2.reshape(shape2).astype("object")) + expected3 = expected1.astype(object) + assert_array_equal(actual3, expected3) + + def test_vecdot_subclass(self): + class MySubclass(np.ndarray): + pass + + arr1 = np.arange(6).reshape((2, 3)).view(MySubclass) + arr2 = np.arange(3).reshape((1, 3)).view(MySubclass) + result = np.vecdot(arr1, arr2) + assert isinstance(result, MySubclass) + + def test_vecdot_object_no_conjugate(self): + arr = np.array(["1", "2"], dtype=object) + with pytest.raises(AttributeError, match="conjugate"): + np.vecdot(arr, arr) + + def test_vecdot_object_breaks_outer_loop_on_error(self): + arr1 = np.ones((3, 3)).astype(object) + arr2 = arr1.copy() + arr2[1, 1] = None + out = np.zeros(3).astype(object) + with pytest.raises(TypeError, match=r"\*: 'float' and 'NoneType'"): + np.vecdot(arr1, arr2, out=out) + assert out[0] == 3 + assert out[1] == out[2] == 0 + + def test_broadcast(self): + msg = "broadcast" + a = np.arange(4).reshape((2, 1, 2)) + b = np.arange(4).reshape((1, 2, 2)) + assert_array_equal(np.vecdot(a, b), np.sum(a * b, axis=-1), err_msg=msg) + msg = "extend & broadcast loop dimensions" + b = np.arange(4).reshape((2, 2)) + assert_array_equal(np.vecdot(a, b), np.sum(a * b, axis=-1), err_msg=msg) + # Broadcast in core dimensions should fail + a = np.arange(8).reshape((4, 2)) + b = np.arange(4).reshape((4, 1)) + assert_raises(ValueError, np.vecdot, a, b) + # Extend core dimensions should fail + a = np.arange(8).reshape((4, 2)) + b = np.array(7) + assert_raises(ValueError, np.vecdot, a, b) + # Broadcast should fail + a = np.arange(2).reshape((2, 1, 1)) + b = np.arange(3).reshape((3, 1, 1)) + assert_raises(ValueError, np.vecdot, a, b) + + # Writing to a broadcasted array with overlap should warn, gh-2705 + a = np.arange(2) + b = np.arange(4).reshape((2, 2)) + u, v = np.broadcast_arrays(a, b) + assert_equal(u.strides[0], 0) + x = u + v + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + u += v + assert_equal(len(w), 1) + assert_(x[0, 0] != u[0, 0]) + + # Output reduction should not be allowed. + # See gh-15139 + a = np.arange(6).reshape(3, 2) + b = np.ones(2) + out = np.empty(()) + assert_raises(ValueError, np.vecdot, a, b, out) + out2 = np.empty(3) + c = np.vecdot(a, b, out2) + assert_(c is out2) + + def test_out_broadcasts(self): + # For ufuncs and gufuncs (not for reductions), we currently allow + # the output to cause broadcasting of the input arrays. + # both along dimensions with shape 1 and dimensions which do not + # exist at all in the inputs. + arr = np.arange(3).reshape(1, 3) + out = np.empty((5, 4, 3)) + np.add(arr, arr, out=out) + assert (out == np.arange(3) * 2).all() + + # The same holds for gufuncs (gh-16484) + np.vecdot(arr, arr, out=out) + # the result would be just a scalar `5`, but is broadcast fully: + assert (out == 5).all() + + @pytest.mark.parametrize(["arr", "out"], [ + ([2], np.empty(())), + ([1, 2], np.empty(1)), + (np.ones((4, 3)), np.empty((4, 1)))], + ids=["(1,)->()", "(2,)->(1,)", "(4, 3)->(4, 1)"]) + def test_out_broadcast_errors(self, arr, out): + # Output is (currently) allowed to broadcast inputs, but it cannot be + # smaller than the actual result. + with pytest.raises(ValueError, match="non-broadcastable"): + np.positive(arr, out=out) + + with pytest.raises(ValueError, match="non-broadcastable"): + np.add(np.ones(()), arr, out=out) + + def test_type_cast(self): + msg = "type cast" + a = np.arange(6, dtype='short').reshape((2, 3)) + assert_array_equal(np.vecdot(a, a), np.sum(a * a, axis=-1), + err_msg=msg) + msg = "type cast on one argument" + a = np.arange(6).reshape((2, 3)) + b = a + 0.1 + assert_array_almost_equal(np.vecdot(a, b), np.sum(a * b, axis=-1), + err_msg=msg) + + def test_endian(self): + msg = "big endian" + a = np.arange(6, dtype='>i4').reshape((2, 3)) + assert_array_equal(np.vecdot(a, a), np.sum(a * a, axis=-1), + err_msg=msg) + msg = "little endian" + a = np.arange(6, dtype='()' + a = np.arange(27.).reshape((3, 3, 3)) + b = np.arange(10., 19.).reshape((3, 1, 3)) + # basic tests on inputs (outputs tested below with matrix_multiply). + c = np.vecdot(a, b) + assert_array_equal(c, (a * b).sum(-1)) + # default + c = np.vecdot(a, b, axes=[(-1,), (-1,), ()]) + assert_array_equal(c, (a * b).sum(-1)) + # integers ok for single axis. + c = np.vecdot(a, b, axes=[-1, -1, ()]) + assert_array_equal(c, (a * b).sum(-1)) + # mix fine + c = np.vecdot(a, b, axes=[(-1,), -1, ()]) + assert_array_equal(c, (a * b).sum(-1)) + # can omit last axis. + c = np.vecdot(a, b, axes=[-1, -1]) + assert_array_equal(c, (a * b).sum(-1)) + # can pass in other types of integer (with __index__ protocol) + c = np.vecdot(a, b, axes=[np.int8(-1), np.array(-1, dtype=np.int32)]) + assert_array_equal(c, (a * b).sum(-1)) + # swap some axes + c = np.vecdot(a, b, axes=[0, 0]) + assert_array_equal(c, (a * b).sum(0)) + c = np.vecdot(a, b, axes=[0, 2]) + assert_array_equal(c, (a.transpose(1, 2, 0) * b).sum(-1)) + # Check errors for improperly constructed axes arguments. + # should have list. + assert_raises(TypeError, np.vecdot, a, b, axes=-1) + # needs enough elements + assert_raises(ValueError, np.vecdot, a, b, axes=[-1]) + # should pass in indices. + assert_raises(TypeError, np.vecdot, a, b, axes=[-1.0, -1.0]) + assert_raises(TypeError, np.vecdot, a, b, axes=[(-1.0,), -1]) + assert_raises(TypeError, np.vecdot, a, b, axes=[None, 1]) + # cannot pass an index unless there is only one dimension + # (output is wrong in this case) + assert_raises(AxisError, np.vecdot, a, b, axes=[-1, -1, -1]) + # or pass in generally the wrong number of axes + assert_raises(AxisError, np.vecdot, a, b, axes=[-1, -1, (-1,)]) + assert_raises(AxisError, np.vecdot, a, b, axes=[-1, (-2, -1), ()]) + # axes need to have same length. + assert_raises(ValueError, np.vecdot, a, b, axes=[0, 1]) + + # matrix_multiply signature: '(m,n),(n,p)->(m,p)' + mm = umt.matrix_multiply + a = np.arange(12).reshape((2, 3, 2)) + b = np.arange(8).reshape((2, 2, 2, 1)) + 1 + # Sanity check. + c = mm(a, b) + assert_array_equal(c, np.matmul(a, b)) + # Default axes. + c = mm(a, b, axes=[(-2, -1), (-2, -1), (-2, -1)]) + assert_array_equal(c, np.matmul(a, b)) + # Default with explicit axes. + c = mm(a, b, axes=[(1, 2), (2, 3), (2, 3)]) + assert_array_equal(c, np.matmul(a, b)) + # swap some axes. + c = mm(a, b, axes=[(0, -1), (1, 2), (-2, -1)]) + assert_array_equal(c, np.matmul(a.transpose(1, 0, 2), + b.transpose(0, 3, 1, 2))) + # Default with output array. + c = np.empty((2, 2, 3, 1)) + d = mm(a, b, out=c, axes=[(1, 2), (2, 3), (2, 3)]) + assert_(c is d) + assert_array_equal(c, np.matmul(a, b)) + # Transposed output array + c = np.empty((1, 2, 2, 3)) + d = mm(a, b, out=c, axes=[(-2, -1), (-2, -1), (3, 0)]) + assert_(c is d) + assert_array_equal(c, np.matmul(a, b).transpose(3, 0, 1, 2)) + # Check errors for improperly constructed axes arguments. + # wrong argument + assert_raises(TypeError, mm, a, b, axis=1) + # axes should be list + assert_raises(TypeError, mm, a, b, axes=1) + assert_raises(TypeError, mm, a, b, axes=((-2, -1), (-2, -1), (-2, -1))) + # list needs to have right length + assert_raises(ValueError, mm, a, b, axes=[]) + assert_raises(ValueError, mm, a, b, axes=[(-2, -1)]) + # list should not contain None, or lists + assert_raises(TypeError, mm, a, b, axes=[None, None, None]) + assert_raises(TypeError, + mm, a, b, axes=[[-2, -1], [-2, -1], [-2, -1]]) + assert_raises(TypeError, + mm, a, b, axes=[(-2, -1), (-2, -1), [-2, -1]]) + assert_raises(TypeError, mm, a, b, axes=[(-2, -1), (-2, -1), None]) + # single integers are AxisErrors if more are required + assert_raises(AxisError, mm, a, b, axes=[-1, -1, -1]) + assert_raises(AxisError, mm, a, b, axes=[(-2, -1), (-2, -1), -1]) + # tuples should not have duplicated values + assert_raises(ValueError, mm, a, b, axes=[(-2, -1), (-2, -1), (-2, -2)]) + # arrays should have enough axes. + z = np.zeros((2, 2)) + assert_raises(ValueError, mm, z, z[0]) + assert_raises(ValueError, mm, z, z, out=z[:, 0]) + assert_raises(ValueError, mm, z[1], z, axes=[0, 1]) + assert_raises(ValueError, mm, z, z, out=z[0], axes=[0, 1]) + # Regular ufuncs should not accept axes. + assert_raises(TypeError, np.add, 1., 1., axes=[0]) + # should be able to deal with bad unrelated kwargs. + assert_raises(TypeError, mm, z, z, axes=[0, 1], parrot=True) + + def test_axis_argument(self): + # vecdot signature: '(n),(n)->()' + a = np.arange(27.).reshape((3, 3, 3)) + b = np.arange(10., 19.).reshape((3, 1, 3)) + c = np.vecdot(a, b) + assert_array_equal(c, (a * b).sum(-1)) + c = np.vecdot(a, b, axis=-1) + assert_array_equal(c, (a * b).sum(-1)) + out = np.zeros_like(c) + d = np.vecdot(a, b, axis=-1, out=out) + assert_(d is out) + assert_array_equal(d, c) + c = np.vecdot(a, b, axis=0) + assert_array_equal(c, (a * b).sum(0)) + # Sanity checks on innerwt and cumsum. + a = np.arange(6).reshape((2, 3)) + b = np.arange(10, 16).reshape((2, 3)) + w = np.arange(20, 26).reshape((2, 3)) + assert_array_equal(umt.innerwt(a, b, w, axis=0), + np.sum(a * b * w, axis=0)) + assert_array_equal(umt.cumsum(a, axis=0), np.cumsum(a, axis=0)) + assert_array_equal(umt.cumsum(a, axis=-1), np.cumsum(a, axis=-1)) + out = np.empty_like(a) + b = umt.cumsum(a, out=out, axis=0) + assert_(out is b) + assert_array_equal(b, np.cumsum(a, axis=0)) + b = umt.cumsum(a, out=out, axis=1) + assert_(out is b) + assert_array_equal(b, np.cumsum(a, axis=-1)) + # Check errors. + # Cannot pass in both axis and axes. + assert_raises(TypeError, np.vecdot, a, b, axis=0, axes=[0, 0]) + # Not an integer. + assert_raises(TypeError, np.vecdot, a, b, axis=[0]) + # more than 1 core dimensions. + mm = umt.matrix_multiply + assert_raises(TypeError, mm, a, b, axis=1) + # Output wrong size in axis. + out = np.empty((1, 2, 3), dtype=a.dtype) + assert_raises(ValueError, umt.cumsum, a, out=out, axis=0) + # Regular ufuncs should not accept axis. + assert_raises(TypeError, np.add, 1., 1., axis=0) + + def test_keepdims_argument(self): + # vecdot signature: '(n),(n)->()' + a = np.arange(27.).reshape((3, 3, 3)) + b = np.arange(10., 19.).reshape((3, 1, 3)) + c = np.vecdot(a, b) + assert_array_equal(c, (a * b).sum(-1)) + c = np.vecdot(a, b, keepdims=False) + assert_array_equal(c, (a * b).sum(-1)) + c = np.vecdot(a, b, keepdims=True) + assert_array_equal(c, (a * b).sum(-1, keepdims=True)) + out = np.zeros_like(c) + d = np.vecdot(a, b, keepdims=True, out=out) + assert_(d is out) + assert_array_equal(d, c) + # Now combined with axis and axes. + c = np.vecdot(a, b, axis=-1, keepdims=False) + assert_array_equal(c, (a * b).sum(-1, keepdims=False)) + c = np.vecdot(a, b, axis=-1, keepdims=True) + assert_array_equal(c, (a * b).sum(-1, keepdims=True)) + c = np.vecdot(a, b, axis=0, keepdims=False) + assert_array_equal(c, (a * b).sum(0, keepdims=False)) + c = np.vecdot(a, b, axis=0, keepdims=True) + assert_array_equal(c, (a * b).sum(0, keepdims=True)) + c = np.vecdot(a, b, axes=[(-1,), (-1,), ()], keepdims=False) + assert_array_equal(c, (a * b).sum(-1)) + c = np.vecdot(a, b, axes=[(-1,), (-1,), (-1,)], keepdims=True) + assert_array_equal(c, (a * b).sum(-1, keepdims=True)) + c = np.vecdot(a, b, axes=[0, 0], keepdims=False) + assert_array_equal(c, (a * b).sum(0)) + c = np.vecdot(a, b, axes=[0, 0, 0], keepdims=True) + assert_array_equal(c, (a * b).sum(0, keepdims=True)) + c = np.vecdot(a, b, axes=[0, 2], keepdims=False) + assert_array_equal(c, (a.transpose(1, 2, 0) * b).sum(-1)) + c = np.vecdot(a, b, axes=[0, 2], keepdims=True) + assert_array_equal(c, (a.transpose(1, 2, 0) * b).sum(-1, + keepdims=True)) + c = np.vecdot(a, b, axes=[0, 2, 2], keepdims=True) + assert_array_equal(c, (a.transpose(1, 2, 0) * b).sum(-1, + keepdims=True)) + c = np.vecdot(a, b, axes=[0, 2, 0], keepdims=True) + assert_array_equal(c, (a * b.transpose(2, 0, 1)).sum(0, keepdims=True)) + # Hardly useful, but should work. + c = np.vecdot(a, b, axes=[0, 2, 1], keepdims=True) + assert_array_equal(c, (a.transpose(1, 0, 2) * b.transpose(0, 2, 1)) + .sum(1, keepdims=True)) + # Check with two core dimensions. + a = np.eye(3) * np.arange(4.)[:, np.newaxis, np.newaxis] + expected = uml.det(a) + c = uml.det(a, keepdims=False) + assert_array_equal(c, expected) + c = uml.det(a, keepdims=True) + assert_array_equal(c, expected[:, np.newaxis, np.newaxis]) + a = np.eye(3) * np.arange(4.)[:, np.newaxis, np.newaxis] + expected_s, expected_l = uml.slogdet(a) + cs, cl = uml.slogdet(a, keepdims=False) + assert_array_equal(cs, expected_s) + assert_array_equal(cl, expected_l) + cs, cl = uml.slogdet(a, keepdims=True) + assert_array_equal(cs, expected_s[:, np.newaxis, np.newaxis]) + assert_array_equal(cl, expected_l[:, np.newaxis, np.newaxis]) + # Sanity check on innerwt. + a = np.arange(6).reshape((2, 3)) + b = np.arange(10, 16).reshape((2, 3)) + w = np.arange(20, 26).reshape((2, 3)) + assert_array_equal(umt.innerwt(a, b, w, keepdims=True), + np.sum(a * b * w, axis=-1, keepdims=True)) + assert_array_equal(umt.innerwt(a, b, w, axis=0, keepdims=True), + np.sum(a * b * w, axis=0, keepdims=True)) + # Check errors. + # Not a boolean + assert_raises(TypeError, np.vecdot, a, b, keepdims='true') + # More than 1 core dimension, and core output dimensions. + mm = umt.matrix_multiply + assert_raises(TypeError, mm, a, b, keepdims=True) + assert_raises(TypeError, mm, a, b, keepdims=False) + # Regular ufuncs should not accept keepdims. + assert_raises(TypeError, np.add, 1., 1., keepdims=False) + + def test_innerwt(self): + a = np.arange(6).reshape((2, 3)) + b = np.arange(10, 16).reshape((2, 3)) + w = np.arange(20, 26).reshape((2, 3)) + assert_array_equal(umt.innerwt(a, b, w), np.sum(a * b * w, axis=-1)) + a = np.arange(100, 124).reshape((2, 3, 4)) + b = np.arange(200, 224).reshape((2, 3, 4)) + w = np.arange(300, 324).reshape((2, 3, 4)) + assert_array_equal(umt.innerwt(a, b, w), np.sum(a * b * w, axis=-1)) + + def test_innerwt_empty(self): + """Test generalized ufunc with zero-sized operands""" + a = np.array([], dtype='f8') + b = np.array([], dtype='f8') + w = np.array([], dtype='f8') + assert_array_equal(umt.innerwt(a, b, w), np.sum(a * b * w, axis=-1)) + + def test_cross1d(self): + """Test with fixed-sized signature.""" + a = np.eye(3) + assert_array_equal(umt.cross1d(a, a), np.zeros((3, 3))) + out = np.zeros((3, 3)) + result = umt.cross1d(a[0], a, out) + assert_(result is out) + assert_array_equal(result, np.vstack((np.zeros(3), a[2], -a[1]))) + assert_raises(ValueError, umt.cross1d, np.eye(4), np.eye(4)) + assert_raises(ValueError, umt.cross1d, a, np.arange(4.)) + # Wrong output core dimension. + assert_raises(ValueError, umt.cross1d, a, np.arange(3.), np.zeros((3, 4))) + # Wrong output broadcast dimension (see gh-15139). + assert_raises(ValueError, umt.cross1d, a, np.arange(3.), np.zeros(3)) + + def test_can_ignore_signature(self): + # Comparing the effects of ? in signature: + # matrix_multiply: (m,n),(n,p)->(m,p) # all must be there. + # matmul: (m?,n),(n,p?)->(m?,p?) # allow missing m, p. + mat = np.arange(12).reshape((2, 3, 2)) + single_vec = np.arange(2) + col_vec = single_vec[:, np.newaxis] + col_vec_array = np.arange(8).reshape((2, 2, 2, 1)) + 1 + # matrix @ single column vector with proper dimension + mm_col_vec = umt.matrix_multiply(mat, col_vec) + # matmul does the same thing + matmul_col_vec = umt.matmul(mat, col_vec) + assert_array_equal(matmul_col_vec, mm_col_vec) + # matrix @ vector without dimension making it a column vector. + # matrix multiply fails -> missing core dim. + assert_raises(ValueError, umt.matrix_multiply, mat, single_vec) + # matmul mimicker passes, and returns a vector. + matmul_col = umt.matmul(mat, single_vec) + assert_array_equal(matmul_col, mm_col_vec.squeeze()) + # Now with a column array: same as for column vector, + # broadcasting sensibly. + mm_col_vec = umt.matrix_multiply(mat, col_vec_array) + matmul_col_vec = umt.matmul(mat, col_vec_array) + assert_array_equal(matmul_col_vec, mm_col_vec) + # As above, but for row vector + single_vec = np.arange(3) + row_vec = single_vec[np.newaxis, :] + row_vec_array = np.arange(24).reshape((4, 2, 1, 1, 3)) + 1 + # row vector @ matrix + mm_row_vec = umt.matrix_multiply(row_vec, mat) + matmul_row_vec = umt.matmul(row_vec, mat) + assert_array_equal(matmul_row_vec, mm_row_vec) + # single row vector @ matrix + assert_raises(ValueError, umt.matrix_multiply, single_vec, mat) + matmul_row = umt.matmul(single_vec, mat) + assert_array_equal(matmul_row, mm_row_vec.squeeze()) + # row vector array @ matrix + mm_row_vec = umt.matrix_multiply(row_vec_array, mat) + matmul_row_vec = umt.matmul(row_vec_array, mat) + assert_array_equal(matmul_row_vec, mm_row_vec) + # Now for vector combinations + # row vector @ column vector + col_vec = row_vec.T + col_vec_array = row_vec_array.swapaxes(-2, -1) + mm_row_col_vec = umt.matrix_multiply(row_vec, col_vec) + matmul_row_col_vec = umt.matmul(row_vec, col_vec) + assert_array_equal(matmul_row_col_vec, mm_row_col_vec) + # single row vector @ single col vector + assert_raises(ValueError, umt.matrix_multiply, single_vec, single_vec) + matmul_row_col = umt.matmul(single_vec, single_vec) + assert_array_equal(matmul_row_col, mm_row_col_vec.squeeze()) + # row vector array @ matrix + mm_row_col_array = umt.matrix_multiply(row_vec_array, col_vec_array) + matmul_row_col_array = umt.matmul(row_vec_array, col_vec_array) + assert_array_equal(matmul_row_col_array, mm_row_col_array) + # Finally, check that things are *not* squeezed if one gives an + # output. + out = np.zeros_like(mm_row_col_array) + out = umt.matrix_multiply(row_vec_array, col_vec_array, out=out) + assert_array_equal(out, mm_row_col_array) + out[:] = 0 + out = umt.matmul(row_vec_array, col_vec_array, out=out) + assert_array_equal(out, mm_row_col_array) + # And check one cannot put missing dimensions back. + out = np.zeros_like(mm_row_col_vec) + assert_raises(ValueError, umt.matrix_multiply, single_vec, single_vec, + out) + # But fine for matmul, since it is just a broadcast. + out = umt.matmul(single_vec, single_vec, out) + assert_array_equal(out, mm_row_col_vec.squeeze()) + + def test_matrix_multiply(self): + self.compare_matrix_multiply_results(np.int64) + self.compare_matrix_multiply_results(np.double) + + def test_matrix_multiply_umath_empty(self): + res = umt.matrix_multiply(np.ones((0, 10)), np.ones((10, 0))) + assert_array_equal(res, np.zeros((0, 0))) + res = umt.matrix_multiply(np.ones((10, 0)), np.ones((0, 10))) + assert_array_equal(res, np.zeros((10, 10))) + + def compare_matrix_multiply_results(self, tp): + d1 = np.array(np.random.rand(2, 3, 4), dtype=tp) + d2 = np.array(np.random.rand(2, 3, 4), dtype=tp) + msg = f"matrix multiply on type {d1.dtype.name}" + + def permute_n(n): + if n == 1: + return ([0],) + ret = () + base = permute_n(n - 1) + for perm in base: + for i in range(n): + new = perm + [n - 1] + new[n - 1] = new[i] + new[i] = n - 1 + ret += (new,) + return ret + + def slice_n(n): + if n == 0: + return ((),) + ret = () + base = slice_n(n - 1) + for sl in base: + ret += (sl + (slice(None),),) + ret += (sl + (slice(0, 1),),) + return ret + + def broadcastable(s1, s2): + return s1 == s2 or 1 in {s1, s2} + + permute_3 = permute_n(3) + slice_3 = slice_n(3) + ((slice(None, None, -1),) * 3,) + + ref = True + for p1 in permute_3: + for p2 in permute_3: + for s1 in slice_3: + for s2 in slice_3: + a1 = d1.transpose(p1)[s1] + a2 = d2.transpose(p2)[s2] + ref = ref and a1.base is not None + ref = ref and a2.base is not None + if (a1.shape[-1] == a2.shape[-2] and + broadcastable(a1.shape[0], a2.shape[0])): + assert_array_almost_equal( + umt.matrix_multiply(a1, a2), + np.sum(a2[..., np.newaxis].swapaxes(-3, -1) * + a1[..., np.newaxis, :], axis=-1), + err_msg=msg + f' {str(a1.shape)} {str(a2.shape)}') + + assert_equal(ref, True, err_msg="reference check") + + def test_euclidean_pdist(self): + a = np.arange(12, dtype=float).reshape(4, 3) + out = np.empty((a.shape[0] * (a.shape[0] - 1) // 2,), dtype=a.dtype) + umt.euclidean_pdist(a, out) + b = np.sqrt(np.sum((a[:, None] - a)**2, axis=-1)) + b = b[~np.tri(a.shape[0], dtype=bool)] + assert_almost_equal(out, b) + # An output array is required to determine p with signature (n,d)->(p) + assert_raises(ValueError, umt.euclidean_pdist, a) + + def test_cumsum(self): + a = np.arange(10) + result = umt.cumsum(a) + assert_array_equal(result, a.cumsum()) + + def test_object_logical(self): + a = np.array([3, None, True, False, "test", ""], dtype=object) + assert_equal(np.logical_or(a, None), + np.array([x or None for x in a], dtype=object)) + assert_equal(np.logical_or(a, True), + np.array([x or True for x in a], dtype=object)) + assert_equal(np.logical_or(a, 12), + np.array([x or 12 for x in a], dtype=object)) + assert_equal(np.logical_or(a, "blah"), + np.array([x or "blah" for x in a], dtype=object)) + + assert_equal(np.logical_and(a, None), + np.array([x and None for x in a], dtype=object)) + assert_equal(np.logical_and(a, True), + np.array([x and True for x in a], dtype=object)) + assert_equal(np.logical_and(a, 12), + np.array([x and 12 for x in a], dtype=object)) + assert_equal(np.logical_and(a, "blah"), + np.array([x and "blah" for x in a], dtype=object)) + + assert_equal(np.logical_not(a), + np.array([not x for x in a], dtype=object)) + + assert_equal(np.logical_or.reduce(a), 3) + assert_equal(np.logical_and.reduce(a), None) + + def test_object_comparison(self): + class HasComparisons: + def __eq__(self, other): + return '==' + + arr0d = np.array(HasComparisons()) + assert_equal(arr0d == arr0d, True) + assert_equal(np.equal(arr0d, arr0d), True) # normal behavior is a cast + + arr1d = np.array([HasComparisons()]) + assert_equal(arr1d == arr1d, np.array([True])) + assert_equal(np.equal(arr1d, arr1d), np.array([True])) # normal behavior is a cast + assert_equal(np.equal(arr1d, arr1d, dtype=object), np.array(['=='])) + + def test_object_array_reduction(self): + # Reductions on object arrays + a = np.array(['a', 'b', 'c'], dtype=object) + assert_equal(np.sum(a), 'abc') + assert_equal(np.max(a), 'c') + assert_equal(np.min(a), 'a') + a = np.array([True, False, True], dtype=object) + assert_equal(np.sum(a), 2) + assert_equal(np.prod(a), 0) + assert_equal(np.any(a), True) + assert_equal(np.all(a), False) + assert_equal(np.max(a), True) + assert_equal(np.min(a), False) + assert_equal(np.array([[1]], dtype=object).sum(), 1) + assert_equal(np.array([[[1, 2]]], dtype=object).sum((0, 1)), [1, 2]) + assert_equal(np.array([1], dtype=object).sum(initial=1), 2) + assert_equal(np.array([[1], [2, 3]], dtype=object) + .sum(initial=[0], where=[False, True]), [0, 2, 3]) + + def test_object_array_accumulate_inplace(self): + # Checks that in-place accumulates work, see also gh-7402 + arr = np.ones(4, dtype=object) + arr[:] = [[1] for i in range(4)] + # Twice reproduced also for tuples: + np.add.accumulate(arr, out=arr) + np.add.accumulate(arr, out=arr) + assert_array_equal(arr, + np.array([[1] * i for i in [1, 3, 6, 10]], dtype=object), + ) + + # And the same if the axis argument is used + arr = np.ones((2, 4), dtype=object) + arr[0, :] = [[2] for i in range(4)] + np.add.accumulate(arr, out=arr, axis=-1) + np.add.accumulate(arr, out=arr, axis=-1) + assert_array_equal(arr[0, :], + np.array([[2] * i for i in [1, 3, 6, 10]], dtype=object), + ) + + def test_object_array_accumulate_failure(self): + # Typical accumulation on object works as expected: + res = np.add.accumulate(np.array([1, 0, 2], dtype=object)) + assert_array_equal(res, np.array([1, 1, 3], dtype=object)) + # But errors are propagated from the inner-loop if they occur: + with pytest.raises(TypeError): + np.add.accumulate([1, None, 2]) + + def test_object_array_reduceat_inplace(self): + # Checks that in-place reduceats work, see also gh-7465 + arr = np.empty(4, dtype=object) + arr[:] = [[1] for i in range(4)] + out = np.empty(4, dtype=object) + out[:] = [[1] for i in range(4)] + np.add.reduceat(arr, np.arange(4), out=arr) + np.add.reduceat(arr, np.arange(4), out=arr) + assert_array_equal(arr, out) + + # And the same if the axis argument is used + arr = np.ones((2, 4), dtype=object) + arr[0, :] = [[2] for i in range(4)] + out = np.ones((2, 4), dtype=object) + out[0, :] = [[2] for i in range(4)] + np.add.reduceat(arr, np.arange(4), out=arr, axis=-1) + np.add.reduceat(arr, np.arange(4), out=arr, axis=-1) + assert_array_equal(arr, out) + + def test_object_array_reduceat_failure(self): + # Reduceat works as expected when no invalid operation occurs (None is + # not involved in an operation here) + res = np.add.reduceat(np.array([1, None, 2], dtype=object), [1, 2]) + assert_array_equal(res, np.array([None, 2], dtype=object)) + # But errors when None would be involved in an operation: + with pytest.raises(TypeError): + np.add.reduceat([1, None, 2], [0, 2]) + + def test_zerosize_reduction(self): + # Test with default dtype and object dtype + for a in [[], np.array([], dtype=object)]: + assert_equal(np.sum(a), 0) + assert_equal(np.prod(a), 1) + assert_equal(np.any(a), False) + assert_equal(np.all(a), True) + assert_raises(ValueError, np.max, a) + assert_raises(ValueError, np.min, a) + + def test_axis_out_of_bounds(self): + a = np.array([False, False]) + assert_raises(AxisError, a.all, axis=1) + a = np.array([False, False]) + assert_raises(AxisError, a.all, axis=-2) + + a = np.array([False, False]) + assert_raises(AxisError, a.any, axis=1) + a = np.array([False, False]) + assert_raises(AxisError, a.any, axis=-2) + + def test_scalar_reduction(self): + # The functions 'sum', 'prod', etc allow specifying axis=0 + # even for scalars + assert_equal(np.sum(3, axis=0), 3) + assert_equal(np.prod(3.5, axis=0), 3.5) + assert_equal(np.any(True, axis=0), True) + assert_equal(np.all(False, axis=0), False) + assert_equal(np.max(3, axis=0), 3) + assert_equal(np.min(2.5, axis=0), 2.5) + + # Check scalar behaviour for ufuncs without an identity + assert_equal(np.power.reduce(3), 3) + + # Make sure that scalars are coming out from this operation + assert_(type(np.prod(np.float32(2.5), axis=0)) is np.float32) + assert_(type(np.sum(np.float32(2.5), axis=0)) is np.float32) + assert_(type(np.max(np.float32(2.5), axis=0)) is np.float32) + assert_(type(np.min(np.float32(2.5), axis=0)) is np.float32) + + # check if scalars/0-d arrays get cast + assert_(type(np.any(0, axis=0)) is np.bool) + + # assert that 0-d arrays get wrapped + class MyArray(np.ndarray): + pass + a = np.array(1).view(MyArray) + assert_(type(np.any(a)) is MyArray) + + def test_casting_out_param(self): + # Test that it's possible to do casts on output + a = np.ones((200, 100), np.int64) + b = np.ones((200, 100), np.int64) + c = np.ones((200, 100), np.float64) + np.add(a, b, out=c) + assert_equal(c, 2) + + a = np.zeros(65536) + b = np.zeros(65536, dtype=np.float32) + np.subtract(a, 0, out=b) + assert_equal(b, 0) + + def test_where_param(self): + # Test that the where= ufunc parameter works with regular arrays + a = np.arange(7) + b = np.ones(7) + c = np.zeros(7) + np.add(a, b, out=c, where=(a % 2 == 1)) + assert_equal(c, [0, 2, 0, 4, 0, 6, 0]) + + a = np.arange(4).reshape(2, 2) + 2 + np.power(a, [2, 3], out=a, where=[[0, 1], [1, 0]]) + assert_equal(a, [[2, 27], [16, 5]]) + # Broadcasting the where= parameter + np.subtract(a, 2, out=a, where=[True, False]) + assert_equal(a, [[0, 27], [14, 5]]) + + def test_where_param_buffer_output(self): + # This test is temporarily skipped because it requires + # adding masking features to the nditer to work properly + + # With casting on output + a = np.ones(10, np.int64) + b = np.ones(10, np.int64) + c = 1.5 * np.ones(10, np.float64) + np.add(a, b, out=c, where=[1, 0, 0, 1, 0, 0, 1, 1, 1, 0]) + assert_equal(c, [2, 1.5, 1.5, 2, 1.5, 1.5, 2, 2, 2, 1.5]) + + def test_where_param_alloc(self): + # With casting and allocated output + a = np.array([1], dtype=np.int64) + m = np.array([True], dtype=bool) + assert_equal(np.sqrt(a, where=m), [1]) + + # No casting and allocated output + a = np.array([1], dtype=np.float64) + m = np.array([True], dtype=bool) + assert_equal(np.sqrt(a, where=m), [1]) + + def test_where_with_broadcasting(self): + # See gh-17198 + a = np.random.random((5000, 4)) + b = np.random.random((5000, 1)) + + where = a > 0.3 + out = np.full_like(a, 0) + np.less(a, b, where=where, out=out) + b_where = np.broadcast_to(b, a.shape)[where] + assert_array_equal((a[where] < b_where), out[where].astype(bool)) + assert not out[~where].any() # outside mask, out remains all 0 + + @staticmethod + def identityless_reduce_arrs(): + yield np.empty((2, 3, 4), order='C') + yield np.empty((2, 3, 4), order='F') + # Mixed order (reduce order differs outer) + yield np.empty((2, 4, 3), order='C').swapaxes(1, 2) + # Reversed order + yield np.empty((2, 3, 4), order='C')[::-1, ::-1, ::-1] + # Not contiguous + yield np.empty((3, 5, 4), order='C').swapaxes(1, 2)[1:, 1:, 1:] + # Not contiguous and not aligned + a = np.empty((3 * 4 * 5 * 8 + 1,), dtype='i1') + a = a[1:].view(dtype='f8') + a.shape = (3, 4, 5) + a = a[1:, 1:, 1:] + yield a + + @pytest.mark.parametrize("a", identityless_reduce_arrs()) + @pytest.mark.parametrize("pos", [(1, 0, 0), (0, 1, 0), (0, 0, 1)]) + def test_identityless_reduction(self, a, pos): + # np.minimum.reduce is an identityless reduction + a[...] = 1 + a[pos] = 0 + + for axis in [None, (0, 1), (0, 2), (1, 2), 0, 1, 2, ()]: + if axis is None: + axes = np.array([], dtype=np.intp) + else: + axes = np.delete(np.arange(a.ndim), axis) + + expected_pos = tuple(np.array(pos)[axes]) + expected = np.ones(np.array(a.shape)[axes]) + expected[expected_pos] = 0 + + res = np.minimum.reduce(a, axis=axis) + assert_equal(res, expected, strict=True) + + res = np.full_like(res, np.nan) + np.minimum.reduce(a, axis=axis, out=res) + assert_equal(res, expected, strict=True) + + @requires_memory(6 * 1024**3) + @pytest.mark.skipif(sys.maxsize < 2**32, + reason="test array too large for 32bit platform") + def test_identityless_reduction_huge_array(self): + # Regression test for gh-20921 (copying identity incorrectly failed) + arr = np.zeros((2, 2**31), 'uint8') + arr[:, 0] = [1, 3] + arr[:, -1] = [4, 1] + res = np.maximum.reduce(arr, axis=0) + del arr + assert res[0] == 3 + assert res[-1] == 4 + + def test_reduce_identity_depends_on_loop(self): + """ + The type of the result should always depend on the selected loop, not + necessarily the output (only relevant for object arrays). + """ + # For an object loop, the default value 0 with type int is used: + assert type(np.add.reduce([], dtype=object)) is int + out = np.array(None, dtype=object) + # When the loop is float64 but `out` is object this does not happen, + # the result is float64 cast to object (which gives Python `float`). + np.add.reduce([], out=out, dtype=np.float64) + assert type(out[()]) is float + + def test_initial_reduction(self): + # np.minimum.reduce is an identityless reduction + + # For cases like np.maximum(np.abs(...), initial=0) + # More generally, a supremum over non-negative numbers. + assert_equal(np.maximum.reduce([], initial=0), 0) + + # For cases like reduction of an empty array over the reals. + assert_equal(np.minimum.reduce([], initial=np.inf), np.inf) + assert_equal(np.maximum.reduce([], initial=-np.inf), -np.inf) + + # Random tests + assert_equal(np.minimum.reduce([5], initial=4), 4) + assert_equal(np.maximum.reduce([4], initial=5), 5) + assert_equal(np.maximum.reduce([5], initial=4), 5) + assert_equal(np.minimum.reduce([4], initial=5), 4) + + # Check initial=None raises ValueError for both types of ufunc reductions + assert_raises(ValueError, np.minimum.reduce, [], initial=None) + assert_raises(ValueError, np.add.reduce, [], initial=None) + # Also in the somewhat special object case: + with pytest.raises(ValueError): + np.add.reduce([], initial=None, dtype=object) + + # Check that np._NoValue gives default behavior. + assert_equal(np.add.reduce([], initial=np._NoValue), 0) + + # Check that initial kwarg behaves as intended for dtype=object + a = np.array([10], dtype=object) + res = np.add.reduce(a, initial=5) + assert_equal(res, 15) + + def test_empty_reduction_and_identity(self): + arr = np.zeros((0, 5)) + # OK, since the reduction itself is *not* empty, the result is + assert np.true_divide.reduce(arr, axis=1).shape == (0,) + # Not OK, the reduction itself is empty and we have no identity + with pytest.raises(ValueError): + np.true_divide.reduce(arr, axis=0) + + # Test that an empty reduction fails also if the result is empty + arr = np.zeros((0, 0, 5)) + with pytest.raises(ValueError): + np.true_divide.reduce(arr, axis=1) + + # Division reduction makes sense with `initial=1` (empty or not): + res = np.true_divide.reduce(arr, axis=1, initial=1) + assert_array_equal(res, np.ones((0, 5))) + + @pytest.mark.parametrize('axis', (0, 1, None)) + @pytest.mark.parametrize('where', (np.array([False, True, True]), + np.array([[True], [False], [True]]), + np.array([[True, False, False], + [False, True, False], + [False, True, True]]))) + def test_reduction_with_where(self, axis, where): + a = np.arange(9.).reshape(3, 3) + a_copy = a.copy() + a_check = np.zeros_like(a) + np.positive(a, out=a_check, where=where) + + res = np.add.reduce(a, axis=axis, where=where) + check = a_check.sum(axis) + assert_equal(res, check) + # Check we do not overwrite elements of a internally. + assert_array_equal(a, a_copy) + + @pytest.mark.parametrize(('axis', 'where'), + ((0, np.array([True, False, True])), + (1, [True, True, False]), + (None, True))) + @pytest.mark.parametrize('initial', (-np.inf, 5.)) + def test_reduction_with_where_and_initial(self, axis, where, initial): + a = np.arange(9.).reshape(3, 3) + a_copy = a.copy() + a_check = np.full(a.shape, -np.inf) + np.positive(a, out=a_check, where=where) + + res = np.maximum.reduce(a, axis=axis, where=where, initial=initial) + check = a_check.max(axis, initial=initial) + assert_equal(res, check) + + def test_reduction_where_initial_needed(self): + a = np.arange(9.).reshape(3, 3) + m = [False, True, False] + assert_raises(ValueError, np.maximum.reduce, a, where=m) + + def test_identityless_reduction_nonreorderable(self): + a = np.array([[8.0, 2.0, 2.0], [1.0, 0.5, 0.25]]) + + res = np.divide.reduce(a, axis=0) + assert_equal(res, [8.0, 4.0, 8.0]) + + res = np.divide.reduce(a, axis=1) + assert_equal(res, [2.0, 8.0]) + + res = np.divide.reduce(a, axis=()) + assert_equal(res, a) + + assert_raises(ValueError, np.divide.reduce, a, axis=(0, 1)) + + def test_reduce_zero_axis(self): + # If we have a n x m array and do a reduction with axis=1, then we are + # doing n reductions, and each reduction takes an m-element array. For + # a reduction operation without an identity, then: + # n > 0, m > 0: fine + # n = 0, m > 0: fine, doing 0 reductions of m-element arrays + # n > 0, m = 0: can't reduce a 0-element array, ValueError + # n = 0, m = 0: can't reduce a 0-element array, ValueError (for + # consistency with the above case) + # This test doesn't actually look at return values, it just checks to + # make sure that error we get an error in exactly those cases where we + # expect one, and assumes the calculations themselves are done + # correctly. + + def ok(f, *args, **kwargs): + f(*args, **kwargs) + + def err(f, *args, **kwargs): + assert_raises(ValueError, f, *args, **kwargs) + + def t(expect, func, n, m): + expect(func, np.zeros((n, m)), axis=1) + expect(func, np.zeros((m, n)), axis=0) + expect(func, np.zeros((n // 2, n // 2, m)), axis=2) + expect(func, np.zeros((n // 2, m, n // 2)), axis=1) + expect(func, np.zeros((n, m // 2, m // 2)), axis=(1, 2)) + expect(func, np.zeros((m // 2, n, m // 2)), axis=(0, 2)) + expect(func, np.zeros((m // 3, m // 3, m // 3, + n // 2, n // 2)), + axis=(0, 1, 2)) + # Check what happens if the inner (resp. outer) dimensions are a + # mix of zero and non-zero: + expect(func, np.zeros((10, m, n)), axis=(0, 1)) + expect(func, np.zeros((10, n, m)), axis=(0, 2)) + expect(func, np.zeros((m, 10, n)), axis=0) + expect(func, np.zeros((10, m, n)), axis=1) + expect(func, np.zeros((10, n, m)), axis=2) + + # np.maximum is just an arbitrary ufunc with no reduction identity + assert_equal(np.maximum.identity, None) + t(ok, np.maximum.reduce, 30, 30) + t(ok, np.maximum.reduce, 0, 30) + t(err, np.maximum.reduce, 30, 0) + t(err, np.maximum.reduce, 0, 0) + err(np.maximum.reduce, []) + np.maximum.reduce(np.zeros((0, 0)), axis=()) + + # all of the combinations are fine for a reduction that has an + # identity + t(ok, np.add.reduce, 30, 30) + t(ok, np.add.reduce, 0, 30) + t(ok, np.add.reduce, 30, 0) + t(ok, np.add.reduce, 0, 0) + np.add.reduce([]) + np.add.reduce(np.zeros((0, 0)), axis=()) + + # OTOH, accumulate always makes sense for any combination of n and m, + # because it maps an m-element array to an m-element array. These + # tests are simpler because accumulate doesn't accept multiple axes. + for uf in (np.maximum, np.add): + uf.accumulate(np.zeros((30, 0)), axis=0) + uf.accumulate(np.zeros((0, 30)), axis=0) + uf.accumulate(np.zeros((30, 30)), axis=0) + uf.accumulate(np.zeros((0, 0)), axis=0) + + def test_safe_casting(self): + # In old versions of numpy, in-place operations used the 'unsafe' + # casting rules. In versions >= 1.10, 'same_kind' is the + # default and an exception is raised instead of a warning. + # when 'same_kind' is not satisfied. + a = np.array([1, 2, 3], dtype=int) + # Non-in-place addition is fine + assert_array_equal(assert_no_warnings(np.add, a, 1.1), + [2.1, 3.1, 4.1]) + assert_raises(TypeError, np.add, a, 1.1, out=a) + + def add_inplace(a, b): + a += b + + assert_raises(TypeError, add_inplace, a, 1.1) + # Make sure that explicitly overriding the exception is allowed: + assert_no_warnings(np.add, a, 1.1, out=a, casting="unsafe") + assert_array_equal(a, [2, 3, 4]) + + def test_ufunc_custom_out(self): + # Test ufunc with built in input types and custom output type + + a = np.array([0, 1, 2], dtype='i8') + b = np.array([0, 1, 2], dtype='i8') + c = np.empty(3, dtype=_rational_tests.rational) + + # Output must be specified so numpy knows what + # ufunc signature to look for + result = _rational_tests.test_add(a, b, c) + target = np.array([0, 2, 4], dtype=_rational_tests.rational) + assert_equal(result, target) + + # The new resolution means that we can (usually) find custom loops + # as long as they match exactly: + result = _rational_tests.test_add(a, b) + assert_equal(result, target) + + # This works even more generally, so long the default common-dtype + # promoter works out: + result = _rational_tests.test_add(a, b.astype(np.uint16), out=c) + assert_equal(result, target) + + # This scalar path used to go into legacy promotion, but doesn't now: + result = _rational_tests.test_add(a, np.uint16(2)) + target = np.array([2, 3, 4], dtype=_rational_tests.rational) + assert_equal(result, target) + + def test_operand_flags(self): + a = np.arange(16, dtype=int).reshape(4, 4) + b = np.arange(9, dtype=int).reshape(3, 3) + opflag_tests.inplace_add(a[:-1, :-1], b) + assert_equal(a, np.array([[0, 2, 4, 3], [7, 9, 11, 7], + [14, 16, 18, 11], [12, 13, 14, 15]])) + + a = np.array(0) + opflag_tests.inplace_add(a, 3) + assert_equal(a, 3) + opflag_tests.inplace_add(a, [3, 4]) + assert_equal(a, 10) + + def test_struct_ufunc(self): + import numpy._core._struct_ufunc_tests as struct_ufunc + + a = np.array([(1, 2, 3)], dtype='u8,u8,u8') + b = np.array([(1, 2, 3)], dtype='u8,u8,u8') + + result = struct_ufunc.add_triplet(a, b) + assert_equal(result, np.array([(2, 4, 6)], dtype='u8,u8,u8')) + assert_raises(RuntimeError, struct_ufunc.register_fail) + + def test_custom_ufunc(self): + a = np.array( + [_rational_tests.rational(1, 2), + _rational_tests.rational(1, 3), + _rational_tests.rational(1, 4)], + dtype=_rational_tests.rational) + b = np.array( + [_rational_tests.rational(1, 2), + _rational_tests.rational(1, 3), + _rational_tests.rational(1, 4)], + dtype=_rational_tests.rational) + + result = _rational_tests.test_add_rationals(a, b) + expected = np.array( + [_rational_tests.rational(1), + _rational_tests.rational(2, 3), + _rational_tests.rational(1, 2)], + dtype=_rational_tests.rational) + assert_equal(result, expected) + + def test_custom_ufunc_forced_sig(self): + # gh-9351 - looking for a non-first userloop would previously hang + with assert_raises(TypeError): + np.multiply(_rational_tests.rational(1), 1, + signature=(_rational_tests.rational, int, None)) + + def test_custom_array_like(self): + + class MyThing: + __array_priority__ = 1000 + + rmul_count = 0 + getitem_count = 0 + + def __init__(self, shape): + self.shape = shape + + def __len__(self): + return self.shape[0] + + def __getitem__(self, i): + MyThing.getitem_count += 1 + if not isinstance(i, tuple): + i = (i,) + if len(i) > self.ndim: + raise IndexError("boo") + + return MyThing(self.shape[len(i):]) + + def __rmul__(self, other): + MyThing.rmul_count += 1 + return self + + np.float64(5) * MyThing((3, 3)) + assert_(MyThing.rmul_count == 1, MyThing.rmul_count) + assert_(MyThing.getitem_count <= 2, MyThing.getitem_count) + + def test_array_wrap_array_priority(self): + class ArrayPriorityBase(np.ndarray): + @classmethod + def __array_wrap__(cls, array, context=None, return_scalar=False): + return cls + + class ArrayPriorityMinus0(ArrayPriorityBase): + __array_priority__ = 0 + + class ArrayPriorityMinus1000(ArrayPriorityBase): + __array_priority__ = -1000 + + class ArrayPriorityMinus1000b(ArrayPriorityBase): + __array_priority__ = -1000 + + class ArrayPriorityMinus2000(ArrayPriorityBase): + __array_priority__ = -2000 + + x = np.ones(2).view(ArrayPriorityMinus1000) + xb = np.ones(2).view(ArrayPriorityMinus1000b) + y = np.ones(2).view(ArrayPriorityMinus2000) + + assert np.add(x, y) is ArrayPriorityMinus1000 + assert np.add(y, x) is ArrayPriorityMinus1000 + assert np.add(x, xb) is ArrayPriorityMinus1000 + assert np.add(xb, x) is ArrayPriorityMinus1000b + y_minus0 = np.zeros(2).view(ArrayPriorityMinus0) + assert np.add(np.zeros(2), y_minus0) is ArrayPriorityMinus0 + assert type(np.add(xb, x, np.zeros(2))) is np.ndarray + + @pytest.mark.parametrize("a", ( + np.arange(10, dtype=int), + np.arange(10, dtype=_rational_tests.rational), + )) + def test_ufunc_at_basic(self, a): + + aa = a.copy() + np.add.at(aa, [2, 5, 2], 1) + assert_equal(aa, [0, 1, 4, 3, 4, 6, 6, 7, 8, 9]) + + with pytest.raises(ValueError): + # missing second operand + np.add.at(aa, [2, 5, 3]) + + aa = a.copy() + np.negative.at(aa, [2, 5, 3]) + assert_equal(aa, [0, 1, -2, -3, 4, -5, 6, 7, 8, 9]) + + aa = a.copy() + b = np.array([100, 100, 100]) + np.add.at(aa, [2, 5, 2], b) + assert_equal(aa, [0, 1, 202, 3, 4, 105, 6, 7, 8, 9]) + + with pytest.raises(ValueError): + # extraneous second operand + np.negative.at(a, [2, 5, 3], [1, 2, 3]) + + with pytest.raises(ValueError): + # second operand cannot be converted to an array + np.add.at(a, [2, 5, 3], [[1, 2], 1]) + + # ufuncs with indexed loops for performance in ufunc.at + indexed_ufuncs = [np.add, np.subtract, np.multiply, np.floor_divide, + np.maximum, np.minimum, np.fmax, np.fmin] + + @pytest.mark.parametrize( + "typecode", np.typecodes['AllInteger'] + np.typecodes['Float']) + @pytest.mark.parametrize("ufunc", indexed_ufuncs) + def test_ufunc_at_inner_loops(self, typecode, ufunc): + if ufunc is np.divide and typecode in np.typecodes['AllInteger']: + # Avoid divide-by-zero and inf for integer divide + a = np.ones(100, dtype=typecode) + indx = np.random.randint(100, size=30, dtype=np.intp) + vals = np.arange(1, 31, dtype=typecode) + else: + a = np.ones(1000, dtype=typecode) + indx = np.random.randint(1000, size=3000, dtype=np.intp) + vals = np.arange(3000, dtype=typecode) + atag = a.copy() + # Do the calculation twice and compare the answers + with warnings.catch_warnings(record=True) as w_at: + warnings.simplefilter('always') + ufunc.at(a, indx, vals) + with warnings.catch_warnings(record=True) as w_loop: + warnings.simplefilter('always') + for i, v in zip(indx, vals): + # Make sure all the work happens inside the ufunc + # in order to duplicate error/warning handling + ufunc(atag[i], v, out=atag[i:i + 1], casting="unsafe") + assert_equal(atag, a) + # If w_loop warned, make sure w_at warned as well + if len(w_loop) > 0: + # + assert len(w_at) > 0 + assert w_at[0].category == w_loop[0].category + assert str(w_at[0].message)[:10] == str(w_loop[0].message)[:10] + + @pytest.mark.parametrize("typecode", np.typecodes['Complex']) + @pytest.mark.parametrize("ufunc", [np.add, np.subtract, np.multiply]) + def test_ufunc_at_inner_loops_complex(self, typecode, ufunc): + a = np.ones(10, dtype=typecode) + indx = np.concatenate([np.ones(6, dtype=np.intp), + np.full(18, 4, dtype=np.intp)]) + value = a.dtype.type(1j) + ufunc.at(a, indx, value) + expected = np.ones_like(a) + if ufunc is np.multiply: + expected[1] = expected[4] = -1 + else: + expected[1] += 6 * (value if ufunc is np.add else -value) + expected[4] += 18 * (value if ufunc is np.add else -value) + + assert_array_equal(a, expected) + + def test_ufunc_at_ellipsis(self): + # Make sure the indexed loop check does not choke on iters + # with subspaces + arr = np.zeros(5) + np.add.at(arr, slice(None), np.ones(5)) + assert_array_equal(arr, np.ones(5)) + + def test_ufunc_at_negative(self): + arr = np.ones(5, dtype=np.int32) + indx = np.arange(5) + umt.indexed_negative.at(arr, indx) + # If it is [-1, -1, -1, -100, 0] then the regular strided loop was used + assert np.all(arr == [-1, -1, -1, -200, -1]) + + def test_ufunc_at_large(self): + # issue gh-23457 + indices = np.zeros(8195, dtype=np.int16) + b = np.zeros(8195, dtype=float) + b[0] = 10 + b[1] = 5 + b[8192:] = 100 + a = np.zeros(1, dtype=float) + np.add.at(a, indices, b) + assert a[0] == b.sum() + + def test_cast_index_fastpath(self): + arr = np.zeros(10) + values = np.ones(100000) + # index must be cast, which may be buffered in chunks: + index = np.zeros(len(values), dtype=np.uint8) + np.add.at(arr, index, values) + assert arr[0] == len(values) + + @pytest.mark.parametrize("value", [ + np.ones(1), np.ones(()), np.float64(1.), 1.]) + def test_ufunc_at_scalar_value_fastpath(self, value): + arr = np.zeros(1000) + # index must be cast, which may be buffered in chunks: + index = np.repeat(np.arange(1000), 2) + np.add.at(arr, index, value) + assert_array_equal(arr, np.full_like(arr, 2 * value)) + + def test_ufunc_at_multiD(self): + a = np.arange(9).reshape(3, 3) + b = np.array([[100, 100, 100], [200, 200, 200], [300, 300, 300]]) + np.add.at(a, (slice(None), [1, 2, 1]), b) + assert_equal(a, [[0, 201, 102], [3, 404, 205], [6, 607, 308]]) + + a = np.arange(27).reshape(3, 3, 3) + b = np.array([100, 200, 300]) + np.add.at(a, (slice(None), slice(None), [1, 2, 1]), b) + assert_equal(a, + [[[0, 401, 202], + [3, 404, 205], + [6, 407, 208]], + + [[9, 410, 211], + [12, 413, 214], + [15, 416, 217]], + + [[18, 419, 220], + [21, 422, 223], + [24, 425, 226]]]) + + a = np.arange(9).reshape(3, 3) + b = np.array([[100, 100, 100], [200, 200, 200], [300, 300, 300]]) + np.add.at(a, ([1, 2, 1], slice(None)), b) + assert_equal(a, [[0, 1, 2], [403, 404, 405], [206, 207, 208]]) + + a = np.arange(27).reshape(3, 3, 3) + b = np.array([100, 200, 300]) + np.add.at(a, (slice(None), [1, 2, 1], slice(None)), b) + assert_equal(a, + [[[0, 1, 2], + [203, 404, 605], + [106, 207, 308]], + + [[9, 10, 11], + [212, 413, 614], + [115, 216, 317]], + + [[18, 19, 20], + [221, 422, 623], + [124, 225, 326]]]) + + a = np.arange(9).reshape(3, 3) + b = np.array([100, 200, 300]) + np.add.at(a, (0, [1, 2, 1]), b) + assert_equal(a, [[0, 401, 202], [3, 4, 5], [6, 7, 8]]) + + a = np.arange(27).reshape(3, 3, 3) + b = np.array([100, 200, 300]) + np.add.at(a, ([1, 2, 1], 0, slice(None)), b) + assert_equal(a, + [[[0, 1, 2], + [3, 4, 5], + [6, 7, 8]], + + [[209, 410, 611], + [12, 13, 14], + [15, 16, 17]], + + [[118, 219, 320], + [21, 22, 23], + [24, 25, 26]]]) + + a = np.arange(27).reshape(3, 3, 3) + b = np.array([100, 200, 300]) + np.add.at(a, (slice(None), slice(None), slice(None)), b) + assert_equal(a, + [[[100, 201, 302], + [103, 204, 305], + [106, 207, 308]], + + [[109, 210, 311], + [112, 213, 314], + [115, 216, 317]], + + [[118, 219, 320], + [121, 222, 323], + [124, 225, 326]]]) + + def test_ufunc_at_0D(self): + a = np.array(0) + np.add.at(a, (), 1) + assert_equal(a, 1) + + assert_raises(IndexError, np.add.at, a, 0, 1) + assert_raises(IndexError, np.add.at, a, [], 1) + + def test_ufunc_at_dtypes(self): + # Test mixed dtypes + a = np.arange(10) + np.power.at(a, [1, 2, 3, 2], 3.5) + assert_equal(a, np.array([0, 1, 4414, 46, 4, 5, 6, 7, 8, 9])) + + def test_ufunc_at_boolean(self): + # Test boolean indexing and boolean ufuncs + a = np.arange(10) + index = a % 2 == 0 + np.equal.at(a, index, [0, 2, 4, 6, 8]) + assert_equal(a, [1, 1, 1, 3, 1, 5, 1, 7, 1, 9]) + + # Test unary operator + a = np.arange(10, dtype='u4') + np.invert.at(a, [2, 5, 2]) + assert_equal(a, [0, 1, 2, 3, 4, 5 ^ 0xffffffff, 6, 7, 8, 9]) + + def test_ufunc_at_advanced(self): + # Test empty subspace + orig = np.arange(4) + a = orig[:, None][:, 0:0] + np.add.at(a, [0, 1], 3) + assert_array_equal(orig, np.arange(4)) + + # Test with swapped byte order + index = np.array([1, 2, 1], np.dtype('i').newbyteorder()) + values = np.array([1, 2, 3, 4], np.dtype('f').newbyteorder()) + np.add.at(values, index, 3) + assert_array_equal(values, [1, 8, 6, 4]) + + # Test exception thrown + values = np.array(['a', 1], dtype=object) + assert_raises(TypeError, np.add.at, values, [0, 1], 1) + assert_array_equal(values, np.array(['a', 1], dtype=object)) + + # Test multiple output ufuncs raise error, gh-5665 + assert_raises(ValueError, np.modf.at, np.arange(10), [1]) + + # Test maximum + a = np.array([1, 2, 3]) + np.maximum.at(a, [0], 0) + assert_equal(a, np.array([1, 2, 3])) + + @pytest.mark.parametrize("dtype", + np.typecodes['AllInteger'] + np.typecodes['Float']) + @pytest.mark.parametrize("ufunc", + [np.add, np.subtract, np.divide, np.minimum, np.maximum]) + def test_at_negative_indexes(self, dtype, ufunc): + a = np.arange(0, 10).astype(dtype) + indxs = np.array([-1, 1, -1, 2]).astype(np.intp) + vals = np.array([1, 5, 2, 10], dtype=a.dtype) + + expected = a.copy() + for i, v in zip(indxs, vals): + expected[i] = ufunc(expected[i], v) + + ufunc.at(a, indxs, vals) + assert_array_equal(a, expected) + assert np.all(indxs == [-1, 1, -1, 2]) + + def test_at_not_none_signature(self): + # Test ufuncs with non-trivial signature raise a TypeError + a = np.ones((2, 2, 2)) + b = np.ones((1, 2, 2)) + assert_raises(TypeError, np.matmul.at, a, [0], b) + + a = np.array([[[1, 2], [3, 4]]]) + assert_raises(TypeError, np.linalg._umath_linalg.det.at, a, [0]) + + def test_at_no_loop_for_op(self): + # str dtype does not have a ufunc loop for np.add + arr = np.ones(10, dtype=str) + with pytest.raises(np._core._exceptions._UFuncNoLoopError): + np.add.at(arr, [0, 1], [0, 1]) + + def test_at_output_casting(self): + arr = np.array([-1]) + np.equal.at(arr, [0], [0]) + assert arr[0] == 0 + + def test_at_broadcast_failure(self): + arr = np.arange(5) + with pytest.raises(ValueError): + np.add.at(arr, [0, 1], [1, 2, 3]) + + def test_reduce_arguments(self): + f = np.add.reduce + d = np.ones((5, 2), dtype=int) + o = np.ones((2,), dtype=d.dtype) + r = o * 5 + assert_equal(f(d), r) + # a, axis=0, dtype=None, out=None, keepdims=False + assert_equal(f(d, axis=0), r) + assert_equal(f(d, 0), r) + assert_equal(f(d, 0, dtype=None), r) + assert_equal(f(d, 0, dtype='i'), r) + assert_equal(f(d, 0, 'i'), r) + assert_equal(f(d, 0, None), r) + assert_equal(f(d, 0, None, out=None), r) + assert_equal(f(d, 0, None, out=o), r) + assert_equal(f(d, 0, None, o), r) + assert_equal(f(d, 0, None, None), r) + assert_equal(f(d, 0, None, None, keepdims=False), r) + assert_equal(f(d, 0, None, None, True), r.reshape((1,) + r.shape)) + assert_equal(f(d, 0, None, None, False, 0), r) + assert_equal(f(d, 0, None, None, False, initial=0), r) + assert_equal(f(d, 0, None, None, False, 0, True), r) + assert_equal(f(d, 0, None, None, False, 0, where=True), r) + # multiple keywords + assert_equal(f(d, axis=0, dtype=None, out=None, keepdims=False), r) + assert_equal(f(d, 0, dtype=None, out=None, keepdims=False), r) + assert_equal(f(d, 0, None, out=None, keepdims=False), r) + assert_equal(f(d, 0, None, out=None, keepdims=False, initial=0, + where=True), r) + + # too little + assert_raises(TypeError, f) + # too much + assert_raises(TypeError, f, d, 0, None, None, False, 0, True, 1) + # invalid axis + assert_raises(TypeError, f, d, "invalid") + assert_raises(TypeError, f, d, axis="invalid") + assert_raises(TypeError, f, d, axis="invalid", dtype=None, + keepdims=True) + # invalid dtype + assert_raises(TypeError, f, d, 0, "invalid") + assert_raises(TypeError, f, d, dtype="invalid") + assert_raises(TypeError, f, d, dtype="invalid", out=None) + # invalid out + assert_raises(TypeError, f, d, 0, None, "invalid") + assert_raises(TypeError, f, d, out="invalid") + assert_raises(TypeError, f, d, out="invalid", dtype=None) + # keepdims boolean, no invalid value + # assert_raises(TypeError, f, d, 0, None, None, "invalid") + # assert_raises(TypeError, f, d, keepdims="invalid", axis=0, dtype=None) + # invalid mix + assert_raises(TypeError, f, d, 0, keepdims="invalid", dtype="invalid", + out=None) + + # invalid keyword + assert_raises(TypeError, f, d, axis=0, dtype=None, invalid=0) + assert_raises(TypeError, f, d, invalid=0) + assert_raises(TypeError, f, d, 0, keepdims=True, invalid="invalid", + out=None) + assert_raises(TypeError, f, d, axis=0, dtype=None, keepdims=True, + out=None, invalid=0) + assert_raises(TypeError, f, d, axis=0, dtype=None, + out=None, invalid=0) + + def test_structured_equal(self): + # https://github.com/numpy/numpy/issues/4855 + + class MyA(np.ndarray): + def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): + return getattr(ufunc, method)(*(input.view(np.ndarray) + for input in inputs), **kwargs) + a = np.arange(12.).reshape(4, 3) + ra = a.view(dtype=('f8,f8,f8')).squeeze() + mra = ra.view(MyA) + + target = np.array([True, False, False, False], dtype=bool) + assert_equal(np.all(target == (mra == ra[0])), True) + + def test_scalar_equal(self): + # Scalar comparisons should always work, without deprecation warnings. + # even when the ufunc fails. + a = np.array(0.) + b = np.array('a') + assert_(a != b) + assert_(b != a) + assert_(not (a == b)) + assert_(not (b == a)) + + def test_NotImplemented_not_returned(self): + # See gh-5964 and gh-2091. Some of these functions are not operator + # related and were fixed for other reasons in the past. + binary_funcs = [ + np.power, np.add, np.subtract, np.multiply, np.divide, + np.true_divide, np.floor_divide, np.bitwise_and, np.bitwise_or, + np.bitwise_xor, np.left_shift, np.right_shift, np.fmax, + np.fmin, np.fmod, np.hypot, np.logaddexp, np.logaddexp2, + np.maximum, np.minimum, np.mod, + np.greater, np.greater_equal, np.less, np.less_equal, + np.equal, np.not_equal] + + a = np.array('1') + b = 1 + c = np.array([1., 2.]) + for f in binary_funcs: + assert_raises(TypeError, f, a, b) + assert_raises(TypeError, f, c, a) + + @pytest.mark.parametrize("ufunc", + [np.logical_and, np.logical_or]) # logical_xor object loop is bad + @pytest.mark.parametrize("signature", + [(None, None, object), (object, None, None), + (None, object, None)]) + def test_logical_ufuncs_object_signatures(self, ufunc, signature): + a = np.array([True, None, False], dtype=object) + res = ufunc(a, a, signature=signature) + assert res.dtype == object + + @pytest.mark.parametrize("ufunc", + [np.logical_and, np.logical_or, np.logical_xor]) + @pytest.mark.parametrize("signature", + [(bool, None, object), (object, None, bool), + (None, object, bool)]) + def test_logical_ufuncs_mixed_object_signatures(self, ufunc, signature): + # Most mixed signatures fail (except those with bool out, e.g. `OO->?`) + a = np.array([True, None, False]) + with pytest.raises(TypeError): + ufunc(a, a, signature=signature) + + @pytest.mark.parametrize("ufunc", + [np.logical_and, np.logical_or, np.logical_xor]) + def test_logical_ufuncs_support_anything(self, ufunc): + # The logical ufuncs support even input that can't be promoted: + a = np.array(b'1', dtype="V3") + c = np.array([1., 2.]) + assert_array_equal(ufunc(a, c), ufunc([True, True], True)) + assert ufunc.reduce(a) == True + # check that the output has no effect: + out = np.zeros(2, dtype=np.int32) + expected = ufunc([True, True], True).astype(out.dtype) + assert_array_equal(ufunc(a, c, out=out), expected) + out = np.zeros((), dtype=np.int32) + assert ufunc.reduce(a, out=out) == True + # Last check, test reduction when out and a match (the complexity here + # is that the "i,i->?" may seem right, but should not match. + a = np.array([3], dtype="i") + out = np.zeros((), dtype=a.dtype) + assert ufunc.reduce(a, out=out) == 1 + + @pytest.mark.parametrize("ufunc", + [np.logical_and, np.logical_or, np.logical_xor]) + @pytest.mark.parametrize("dtype", ["S", "U"]) + @pytest.mark.parametrize("values", [["1", "hi", "0"], ["", ""]]) + def test_logical_ufuncs_supports_string(self, ufunc, dtype, values): + # note that values are either all true or all false + arr = np.array(values, dtype=dtype) + obj_arr = np.array(values, dtype=object) + res = ufunc(arr, arr) + expected = ufunc(obj_arr, obj_arr, dtype=bool) + + assert_array_equal(res, expected) + + res = ufunc.reduce(arr) + expected = ufunc.reduce(obj_arr, dtype=bool) + assert_array_equal(res, expected) + + @pytest.mark.parametrize("ufunc", + [np.logical_and, np.logical_or, np.logical_xor]) + def test_logical_ufuncs_out_cast_check(self, ufunc): + a = np.array('1') + c = np.array([1., 2.]) + out = a.copy() + with pytest.raises(TypeError): + # It would be safe, but not equiv casting: + ufunc(a, c, out=out, casting="equiv") + + def test_reducelike_byteorder_resolution(self): + # See gh-20699, byte-order changes need some extra care in the type + # resolution to make the following succeed: + arr_be = np.arange(10, dtype=">i8") + arr_le = np.arange(10, dtype="i + if 'O' in typ or '?' in typ: + continue + inp, out = typ.split('->') + args = [np.ones((3, 3), t) for t in inp] + with warnings.catch_warnings(record=True): + warnings.filterwarnings("always") + res = ufunc(*args) + if isinstance(res, tuple): + outs = tuple(out) + assert len(res) == len(outs) + for r, t in zip(res, outs): + assert r.dtype == np.dtype(t) + else: + assert res.dtype == np.dtype(out) + +@pytest.mark.parametrize('ufunc', [getattr(np, x) for x in dir(np) + if isinstance(getattr(np, x), np.ufunc)]) +def test_ufunc_noncontiguous(ufunc): + ''' + Check that contiguous and non-contiguous calls to ufuncs + have the same results for values in range(9) + ''' + for typ in ufunc.types: + # types is a list of strings like ii->i + if any(set('O?mM') & set(typ)): + # bool, object, datetime are too irregular for this simple test + continue + inp, out = typ.split('->') + args_c = [np.empty((6, 6), t) for t in inp] + # non contiguous (2, 3 step on the two dimensions) + args_n = [np.empty((12, 18), t)[::2, ::3] for t in inp] + # alignment != itemsize is possible. So create an array with such + # an odd step manually. + args_o = [] + for t in inp: + orig_dt = np.dtype(t) + off_dt = f"S{orig_dt.alignment}" # offset by alignment + dtype = np.dtype([("_", off_dt), ("t", orig_dt)], align=False) + args_o.append(np.empty((6, 6), dtype=dtype)["t"]) + for a in args_c + args_n + args_o: + a.flat = range(1, 37) + + with warnings.catch_warnings(record=True): + warnings.filterwarnings("always") + res_c = ufunc(*args_c) + res_n = ufunc(*args_n) + res_o = ufunc(*args_o) + if len(out) == 1: + res_c = (res_c,) + res_n = (res_n,) + res_o = (res_o,) + for c_ar, n_ar, o_ar in zip(res_c, res_n, res_o): + dt = c_ar.dtype + if np.issubdtype(dt, np.floating): + # for floating point results allow a small fuss in comparisons + # since different algorithms (libm vs. intrinsics) can be used + # for different input strides + res_eps = np.finfo(dt).eps + tol = 3 * res_eps + assert_allclose(res_c, res_n, atol=tol, rtol=tol) + assert_allclose(res_c, res_o, atol=tol, rtol=tol) + else: + assert_equal(c_ar, n_ar) + assert_equal(c_ar, o_ar) + + +@pytest.mark.parametrize('ufunc', [np.sign, np.equal]) +def test_ufunc_warn_with_nan(ufunc): + # issue gh-15127 + # test that calling certain ufuncs with a non-standard `nan` value does not + # emit a warning + # `b` holds a 64 bit signaling nan: the most significant bit of the + # significand is zero. + b = np.array([0x7ff0000000000001], 'i8').view('f8') + assert np.isnan(b) + if ufunc.nin == 1: + ufunc(b) + elif ufunc.nin == 2: + ufunc(b, b.copy()) + else: + raise ValueError('ufunc with more than 2 inputs') + + +@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") +def test_ufunc_out_casterrors(): + # Tests that casting errors are correctly reported and buffers are + # cleared. + # The following array can be added to itself as an object array, but + # the result cannot be cast to an integer output: + value = 123 # relies on python cache (leak-check will still find it) + arr = np.array([value] * int(ncu.BUFSIZE * 1.5) + + ["string"] + + [value] * int(1.5 * ncu.BUFSIZE), dtype=object) + out = np.ones(len(arr), dtype=np.intp) + + count = sys.getrefcount(value) + with pytest.raises(ValueError): + # Output casting failure: + np.add(arr, arr, out=out, casting="unsafe") + + assert count == sys.getrefcount(value) + # output is unchanged after the error, this shows that the iteration + # was aborted (this is not necessarily defined behaviour) + assert out[-1] == 1 + + with pytest.raises(ValueError): + # Input casting failure: + np.add(arr, arr, out=out, dtype=np.intp, casting="unsafe") + + assert count == sys.getrefcount(value) + # output is unchanged after the error, this shows that the iteration + # was aborted (this is not necessarily defined behaviour) + assert out[-1] == 1 + + +@pytest.mark.parametrize("bad_offset", [0, int(ncu.BUFSIZE * 1.5)]) +def test_ufunc_input_casterrors(bad_offset): + value = 123 + arr = np.array([value] * bad_offset + + ["string"] + + [value] * int(1.5 * ncu.BUFSIZE), dtype=object) + with pytest.raises(ValueError): + # Force cast inputs, but the buffered cast of `arr` to intp fails: + np.add(arr, arr, dtype=np.intp, casting="unsafe") + + +@pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") +@pytest.mark.parametrize("bad_offset", [0, int(ncu.BUFSIZE * 1.5)]) +def test_ufunc_input_floatingpoint_error(bad_offset): + value = 123 + arr = np.array([value] * bad_offset + + [np.nan] + + [value] * int(1.5 * ncu.BUFSIZE)) + with np.errstate(invalid="raise"), pytest.raises(FloatingPointError): + # Force cast inputs, but the buffered cast of `arr` to intp fails: + np.add(arr, arr, dtype=np.intp, casting="unsafe") + + +def test_trivial_loop_invalid_cast(): + # This tests the fast-path "invalid cast", see gh-19904. + with pytest.raises(TypeError, + match="cast ufunc 'add' input 0"): + # the void dtype definitely cannot cast to double: + np.add(np.array(1, "i,i"), 3, signature="dd->d") + + +@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") +@pytest.mark.parametrize("offset", + [0, ncu.BUFSIZE // 2, int(1.5 * ncu.BUFSIZE)]) +def test_reduce_casterrors(offset): + # Test reporting of casting errors in reductions, we test various + # offsets to where the casting error will occur, since these may occur + # at different places during the reduction procedure. For example + # the first item may be special. + value = 123 # relies on python cache (leak-check will still find it) + arr = np.array([value] * offset + + ["string"] + + [value] * int(1.5 * ncu.BUFSIZE), dtype=object) + out = np.array(-1, dtype=np.intp) + + count = sys.getrefcount(value) + with pytest.raises(ValueError, match="invalid literal"): + # This is an unsafe cast, but we currently always allow that. + # Note that the double loop is picked, but the cast fails. + # `initial=None` disables the use of an identity here to test failures + # while copying the first values path (not used when identity exists). + np.add.reduce(arr, dtype=np.intp, out=out, initial=None) + assert count == sys.getrefcount(value) + # If an error occurred during casting, the operation is done at most until + # the error occurs (the result of which would be `value * offset`) and -1 + # if the error happened immediately. + # This does not define behaviour, the output is invalid and thus undefined + assert out[()] < value * offset + + +@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") +def test_reduction_no_reference_leak(): + # Test that the generic reduction does not leak references. + # gh-29358 + arr = np.array([1, 2, 3], dtype=np.int32) + count = sys.getrefcount(arr) + + np.add.reduce(arr, dtype=np.int32, initial=0) + assert count == sys.getrefcount(arr) + + np.add.accumulate(arr, dtype=np.int32) + assert count == sys.getrefcount(arr) + + np.add.reduceat(arr, [0, 1], dtype=np.int32) + assert count == sys.getrefcount(arr) + + # with `out=` the reference count is not changed + out = np.empty((), dtype=np.int32) + out_count = sys.getrefcount(out) + + np.add.reduce(arr, dtype=np.int32, out=out, initial=0) + assert count == sys.getrefcount(arr) + assert out_count == sys.getrefcount(out) + + out = np.empty(arr.shape, dtype=np.int32) + out_count = sys.getrefcount(out) + + np.add.accumulate(arr, dtype=np.int32, out=out) + assert count == sys.getrefcount(arr) + assert out_count == sys.getrefcount(out) + + out = np.empty((2,), dtype=np.int32) + out_count = sys.getrefcount(out) + + np.add.reduceat(arr, [0, 1], dtype=np.int32, out=out) + assert count == sys.getrefcount(arr) + assert out_count == sys.getrefcount(out) + + +def test_object_reduce_cleanup_on_failure(): + # Test cleanup, including of the initial value (manually provided or not) + with pytest.raises(TypeError): + np.add.reduce([1, 2, None], initial=4) + + with pytest.raises(TypeError): + np.add.reduce([1, 2, None]) + + +@pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") +@pytest.mark.parametrize("method", + [np.add.accumulate, np.add.reduce, + pytest.param(lambda x: np.add.reduceat(x, [0]), id="reduceat"), + pytest.param(lambda x: np.log.at(x, [2]), id="at")]) +def test_ufunc_methods_floaterrors(method): + # adding inf and -inf (or log(-inf) creates an invalid float and warns + arr = np.array([np.inf, 0, -np.inf]) + with np.errstate(all="warn"): + with pytest.warns(RuntimeWarning, match="invalid value"): + method(arr) + + arr = np.array([np.inf, 0, -np.inf]) + with np.errstate(all="raise"): + with pytest.raises(FloatingPointError): + method(arr) + + +def _check_neg_zero(value): + if value != 0.0: + return False + if not np.signbit(value.real): + return False + if value.dtype.kind == "c": + return np.signbit(value.imag) + return True + +@pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) +def test_addition_negative_zero(dtype): + dtype = np.dtype(dtype) + if dtype.kind == "c": + neg_zero = dtype.type(complex(-0.0, -0.0)) + else: + neg_zero = dtype.type(-0.0) + + arr = np.array(neg_zero) + arr2 = np.array(neg_zero) + + assert _check_neg_zero(arr + arr2) + # In-place ops may end up on a different path (reduce path) see gh-21211 + arr += arr2 + assert _check_neg_zero(arr) + + +@pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) +@pytest.mark.parametrize("use_initial", [True, False]) +def test_addition_reduce_negative_zero(dtype, use_initial): + dtype = np.dtype(dtype) + if dtype.kind == "c": + neg_zero = dtype.type(complex(-0.0, -0.0)) + else: + neg_zero = dtype.type(-0.0) + + kwargs = {} + if use_initial: + kwargs["initial"] = neg_zero + else: + pytest.xfail("-0. propagation in sum currently requires initial") + + # Test various length, in case SIMD paths or chunking play a role. + # 150 extends beyond the pairwise blocksize; probably not important. + for i in range(150): + arr = np.array([neg_zero] * i, dtype=dtype) + res = np.sum(arr, **kwargs) + if i > 0 or use_initial: + assert _check_neg_zero(res) + else: + # `sum([])` should probably be 0.0 and not -0.0 like `sum([-0.0])` + assert not np.signbit(res.real) + assert not np.signbit(res.imag) + + +@pytest.mark.parametrize(["dt1", "dt2"], + [("S", "U"), ("U", "S"), ("S", "d"), ("S", "V"), ("U", "l")]) +def test_addition_string_types(dt1, dt2): + arr1 = np.array([1234234], dtype=dt1) + arr2 = np.array([b"423"], dtype=dt2) + with pytest.raises(np._core._exceptions.UFuncTypeError) as exc: + np.add(arr1, arr2) + + +@pytest.mark.parametrize("order1,order2", + [(">", ">"), ("<", "<"), (">", "<"), ("<", ">")]) +def test_addition_unicode_inverse_byte_order(order1, order2): + element = 'abcd' + arr1 = np.array([element], dtype=f"{order1}U4") + arr2 = np.array([element], dtype=f"{order2}U4") + result = arr1 + arr2 + assert result == 2 * element + + +@pytest.mark.parametrize("dtype", [np.int8, np.int16, np.int32, np.int64]) +def test_find_non_long_args(dtype): + element = 'abcd' + start = dtype(0) + end = dtype(len(element)) + arr = np.array([element]) + result = np._core.umath.find(arr, "a", start, end) + assert result.dtype == np.dtype("intp") + assert result == 0 + + +def test_find_access_past_buffer(): + # This checks that no read past the string buffer occurs in + # string_fastsearch.h. The buffer class makes sure this is checked. + # To see it in action, you can remove the checks in the buffer and + # this test will produce an 'Invalid read' if run under valgrind. + arr = np.array([b'abcd', b'ebcd']) + result = np._core.umath.find(arr, b'cde', 0, np.iinfo(np.int64).max) + assert np.all(result == -1) + + +class TestLowlevelAPIAccess: + def test_resolve_dtypes_basic(self): + # Basic test for dtype resolution: + i4 = np.dtype("i4") + f4 = np.dtype("f4") + f8 = np.dtype("f8") + + r = np.add.resolve_dtypes((i4, f4, None)) + assert r == (f8, f8, f8) + + # Signature uses the same logic to parse as ufunc (less strict) + # the following is "same-kind" casting so works: + r = np.add.resolve_dtypes(( + i4, i4, None), signature=(None, None, "f4")) + assert r == (f4, f4, f4) + + # Check NEP 50 "weak" promotion also: + r = np.add.resolve_dtypes((f4, int, None)) + assert r == (f4, f4, f4) + + with pytest.raises(TypeError): + np.add.resolve_dtypes((i4, f4, None), casting="no") + + def test_resolve_dtypes_comparison(self): + i4 = np.dtype("i4") + i8 = np.dtype("i8") + b = np.dtype("?") + r = np.equal.resolve_dtypes((i4, i8, None)) + assert r == (i8, i8, b) + + def test_weird_dtypes(self): + S0 = np.dtype("S0") + # S0 is often converted by NumPy to S1, but not here: + r = np.equal.resolve_dtypes((S0, S0, None)) + assert r == (S0, S0, np.dtype(bool)) + + # Subarray dtypes are weird and may not work fully, we preserve them + # leading to a TypeError (currently no equal loop for void/structured) + dts = np.dtype("10i") + with pytest.raises(TypeError): + np.equal.resolve_dtypes((dts, dts, None)) + + def test_resolve_dtypes_reduction(self): + i2 = np.dtype("i2") + default_int_ = np.dtype(np.int_) + # Check special addition resolution: + res = np.add.resolve_dtypes((None, i2, None), reduction=True) + assert res == (default_int_, default_int_, default_int_) + + def test_resolve_dtypes_reduction_no_output(self): + i4 = np.dtype("i4") + with pytest.raises(TypeError): + # May be allowable at some point? + np.add.resolve_dtypes((i4, i4, i4), reduction=True) + + @pytest.mark.parametrize("dtypes", [ + (np.dtype("i"), np.dtype("i")), + (None, np.dtype("i"), np.dtype("f")), + (np.dtype("i"), None, np.dtype("f")), + ("i4", "i4", None)]) + def test_resolve_dtypes_errors(self, dtypes): + with pytest.raises(TypeError): + np.add.resolve_dtypes(dtypes) + + def test_resolve_dtypes_reduction_errors(self): + i2 = np.dtype("i2") + + with pytest.raises(TypeError): + np.add.resolve_dtypes((None, i2, i2)) + + with pytest.raises(TypeError): + np.add.signature((None, None, "i4")) + + @pytest.mark.skipif(not hasattr(ct, "pythonapi"), + reason="`ctypes.pythonapi` required for capsule unpacking.") + def test_loop_access(self): + # This is a basic test for the full strided loop access + data_t = ct.c_char_p * 2 + dim_t = ct.c_ssize_t * 1 + strides_t = ct.c_ssize_t * 2 + strided_loop_t = ct.CFUNCTYPE( + ct.c_int, ct.c_void_p, data_t, dim_t, strides_t, ct.c_void_p) + + class call_info_t(ct.Structure): + _fields_ = [ + ("strided_loop", strided_loop_t), + ("context", ct.c_void_p), + ("auxdata", ct.c_void_p), + ("requires_pyapi", ct.c_byte), + ("no_floatingpoint_errors", ct.c_byte), + ] + + i4 = np.dtype("i4") + dt, call_info_obj = np.negative._resolve_dtypes_and_context((i4, i4)) + assert dt == (i4, i4) # can be used without casting + + # Fill in the rest of the information: + np.negative._get_strided_loop(call_info_obj) + + ct.pythonapi.PyCapsule_GetPointer.restype = ct.c_void_p + call_info = ct.pythonapi.PyCapsule_GetPointer( + ct.py_object(call_info_obj), + ct.c_char_p(b"numpy_1.24_ufunc_call_info")) + + call_info = ct.cast(call_info, ct.POINTER(call_info_t)).contents + + arr = np.arange(10, dtype=i4) + call_info.strided_loop( + call_info.context, + data_t(arr.ctypes.data, arr.ctypes.data), + arr.ctypes.shape, # is a C-array with 10 here + strides_t(arr.ctypes.strides[0], arr.ctypes.strides[0]), + call_info.auxdata) + + # We just directly called the negative inner-loop in-place: + assert_array_equal(arr, -np.arange(10, dtype=i4)) + + @pytest.mark.parametrize("strides", [1, (1, 2, 3), (1, "2")]) + def test__get_strided_loop_errors_bad_strides(self, strides): + i4 = np.dtype("i4") + dt, call_info = np.negative._resolve_dtypes_and_context((i4, i4)) + + with pytest.raises(TypeError, match="fixed_strides.*tuple.*or None"): + np.negative._get_strided_loop(call_info, fixed_strides=strides) + + def test__get_strided_loop_errors_bad_call_info(self): + i4 = np.dtype("i4") + dt, call_info = np.negative._resolve_dtypes_and_context((i4, i4)) + + with pytest.raises(ValueError, match="PyCapsule"): + np.negative._get_strided_loop("not the capsule!") + + with pytest.raises(TypeError, match=".*incompatible context"): + np.add._get_strided_loop(call_info) + + np.negative._get_strided_loop(call_info) + with pytest.raises(TypeError): + # cannot call it a second time: + np.negative._get_strided_loop(call_info) + + def test_long_arrays(self): + t = np.zeros((1029, 917), dtype=np.single) + t[0][0] = 1 + t[28][414] = 1 + tc = np.cos(t) + assert_equal(tc[0][0], tc[28][414]) diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_umath.py b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_umath.py new file mode 100644 index 0000000..13e139d --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_umath.py @@ -0,0 +1,4916 @@ +import fnmatch +import itertools +import operator +import platform +import sys +import warnings +from collections import namedtuple +from fractions import Fraction +from functools import reduce + +import pytest + +import numpy as np +import numpy._core.umath as ncu +from numpy._core import _umath_tests as ncu_tests +from numpy._core import sctypes +from numpy.testing import ( + HAS_REFCOUNT, + IS_MUSL, + IS_PYPY, + IS_WASM, + _gen_alignment_data, + assert_, + assert_allclose, + assert_almost_equal, + assert_array_almost_equal, + assert_array_almost_equal_nulp, + assert_array_equal, + assert_array_max_ulp, + assert_equal, + assert_no_warnings, + assert_raises, + assert_raises_regex, + suppress_warnings, +) +from numpy.testing._private.utils import _glibc_older_than + +UFUNCS = [obj for obj in np._core.umath.__dict__.values() + if isinstance(obj, np.ufunc)] + +UFUNCS_UNARY = [ + uf for uf in UFUNCS if uf.nin == 1 +] +UFUNCS_UNARY_FP = [ + uf for uf in UFUNCS_UNARY if 'f->f' in uf.types +] + +UFUNCS_BINARY = [ + uf for uf in UFUNCS if uf.nin == 2 +] +UFUNCS_BINARY_ACC = [ + uf for uf in UFUNCS_BINARY if hasattr(uf, "accumulate") and uf.nout == 1 +] + +def interesting_binop_operands(val1, val2, dtype): + """ + Helper to create "interesting" operands to cover common code paths: + * scalar inputs + * only first "values" is an array (e.g. scalar division fast-paths) + * Longer array (SIMD) placing the value of interest at different positions + * Oddly strided arrays which may not be SIMD compatible + + It does not attempt to cover unaligned access or mixed dtypes. + These are normally handled by the casting/buffering machinery. + + This is not a fixture (currently), since I believe a fixture normally + only yields once? + """ + fill_value = 1 # could be a parameter, but maybe not an optional one? + + arr1 = np.full(10003, dtype=dtype, fill_value=fill_value) + arr2 = np.full(10003, dtype=dtype, fill_value=fill_value) + + arr1[0] = val1 + arr2[0] = val2 + + extractor = lambda res: res + yield arr1[0], arr2[0], extractor, "scalars" + + extractor = lambda res: res + yield arr1[0, ...], arr2[0, ...], extractor, "scalar-arrays" + + # reset array values to fill_value: + arr1[0] = fill_value + arr2[0] = fill_value + + for pos in [0, 1, 2, 3, 4, 5, -1, -2, -3, -4]: + arr1[pos] = val1 + arr2[pos] = val2 + + extractor = lambda res: res[pos] + yield arr1, arr2, extractor, f"off-{pos}" + yield arr1, arr2[pos], extractor, f"off-{pos}-with-scalar" + + arr1[pos] = fill_value + arr2[pos] = fill_value + + for stride in [-1, 113]: + op1 = arr1[::stride] + op2 = arr2[::stride] + op1[10] = val1 + op2[10] = val2 + + extractor = lambda res: res[10] + yield op1, op2, extractor, f"stride-{stride}" + + op1[10] = fill_value + op2[10] = fill_value + + +def on_powerpc(): + """ True if we are running on a Power PC platform.""" + return platform.processor() == 'powerpc' or \ + platform.machine().startswith('ppc') + + +def bad_arcsinh(): + """The blocklisted trig functions are not accurate on aarch64/PPC for + complex256. Rather than dig through the actual problem skip the + test. This should be fixed when we can move past glibc2.17 + which is the version in manylinux2014 + """ + if platform.machine() == 'aarch64': + x = 1.78e-10 + elif on_powerpc(): + x = 2.16e-10 + else: + return False + v1 = np.arcsinh(np.float128(x)) + v2 = np.arcsinh(np.complex256(x)).real + # The eps for float128 is 1-e33, so this is way bigger + return abs((v1 / v2) - 1.0) > 1e-23 + + +class _FilterInvalids: + def setup_method(self): + self.olderr = np.seterr(invalid='ignore') + + def teardown_method(self): + np.seterr(**self.olderr) + + +class TestConstants: + def test_pi(self): + assert_allclose(ncu.pi, 3.141592653589793, 1e-15) + + def test_e(self): + assert_allclose(ncu.e, 2.718281828459045, 1e-15) + + def test_euler_gamma(self): + assert_allclose(ncu.euler_gamma, 0.5772156649015329, 1e-15) + + +class TestOut: + def test_out_subok(self): + for subok in (True, False): + a = np.array(0.5) + o = np.empty(()) + + r = np.add(a, 2, o, subok=subok) + assert_(r is o) + r = np.add(a, 2, out=o, subok=subok) + assert_(r is o) + r = np.add(a, 2, out=(o,), subok=subok) + assert_(r is o) + + d = np.array(5.7) + o1 = np.empty(()) + o2 = np.empty((), dtype=np.int32) + + r1, r2 = np.frexp(d, o1, None, subok=subok) + assert_(r1 is o1) + r1, r2 = np.frexp(d, None, o2, subok=subok) + assert_(r2 is o2) + r1, r2 = np.frexp(d, o1, o2, subok=subok) + assert_(r1 is o1) + assert_(r2 is o2) + + r1, r2 = np.frexp(d, out=(o1, None), subok=subok) + assert_(r1 is o1) + r1, r2 = np.frexp(d, out=(None, o2), subok=subok) + assert_(r2 is o2) + r1, r2 = np.frexp(d, out=(o1, o2), subok=subok) + assert_(r1 is o1) + assert_(r2 is o2) + + with assert_raises(TypeError): + # Out argument must be tuple, since there are multiple outputs. + r1, r2 = np.frexp(d, out=o1, subok=subok) + + assert_raises(TypeError, np.add, a, 2, o, o, subok=subok) + assert_raises(TypeError, np.add, a, 2, o, out=o, subok=subok) + assert_raises(TypeError, np.add, a, 2, None, out=o, subok=subok) + assert_raises(ValueError, np.add, a, 2, out=(o, o), subok=subok) + assert_raises(ValueError, np.add, a, 2, out=(), subok=subok) + assert_raises(TypeError, np.add, a, 2, [], subok=subok) + assert_raises(TypeError, np.add, a, 2, out=[], subok=subok) + assert_raises(TypeError, np.add, a, 2, out=([],), subok=subok) + o.flags.writeable = False + assert_raises(ValueError, np.add, a, 2, o, subok=subok) + assert_raises(ValueError, np.add, a, 2, out=o, subok=subok) + assert_raises(ValueError, np.add, a, 2, out=(o,), subok=subok) + + def test_out_wrap_subok(self): + class ArrayWrap(np.ndarray): + __array_priority__ = 10 + + def __new__(cls, arr): + return np.asarray(arr).view(cls).copy() + + def __array_wrap__(self, arr, context=None, return_scalar=False): + return arr.view(type(self)) + + for subok in (True, False): + a = ArrayWrap([0.5]) + + r = np.add(a, 2, subok=subok) + if subok: + assert_(isinstance(r, ArrayWrap)) + else: + assert_(type(r) == np.ndarray) + + r = np.add(a, 2, None, subok=subok) + if subok: + assert_(isinstance(r, ArrayWrap)) + else: + assert_(type(r) == np.ndarray) + + r = np.add(a, 2, out=None, subok=subok) + if subok: + assert_(isinstance(r, ArrayWrap)) + else: + assert_(type(r) == np.ndarray) + + r = np.add(a, 2, out=(None,), subok=subok) + if subok: + assert_(isinstance(r, ArrayWrap)) + else: + assert_(type(r) == np.ndarray) + + d = ArrayWrap([5.7]) + o1 = np.empty((1,)) + o2 = np.empty((1,), dtype=np.int32) + + r1, r2 = np.frexp(d, o1, subok=subok) + if subok: + assert_(isinstance(r2, ArrayWrap)) + else: + assert_(type(r2) == np.ndarray) + + r1, r2 = np.frexp(d, o1, None, subok=subok) + if subok: + assert_(isinstance(r2, ArrayWrap)) + else: + assert_(type(r2) == np.ndarray) + + r1, r2 = np.frexp(d, None, o2, subok=subok) + if subok: + assert_(isinstance(r1, ArrayWrap)) + else: + assert_(type(r1) == np.ndarray) + + r1, r2 = np.frexp(d, out=(o1, None), subok=subok) + if subok: + assert_(isinstance(r2, ArrayWrap)) + else: + assert_(type(r2) == np.ndarray) + + r1, r2 = np.frexp(d, out=(None, o2), subok=subok) + if subok: + assert_(isinstance(r1, ArrayWrap)) + else: + assert_(type(r1) == np.ndarray) + + with assert_raises(TypeError): + # Out argument must be tuple, since there are multiple outputs. + r1, r2 = np.frexp(d, out=o1, subok=subok) + + @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") + def test_out_wrap_no_leak(self): + # Regression test for gh-26545 + class ArrSubclass(np.ndarray): + pass + + arr = np.arange(10).view(ArrSubclass) + orig_refcount = sys.getrefcount(arr) + arr *= 1 + assert sys.getrefcount(arr) == orig_refcount + + +class TestComparisons: + import operator + + @pytest.mark.parametrize('dtype', sctypes['uint'] + sctypes['int'] + + sctypes['float'] + [np.bool]) + @pytest.mark.parametrize('py_comp,np_comp', [ + (operator.lt, np.less), + (operator.le, np.less_equal), + (operator.gt, np.greater), + (operator.ge, np.greater_equal), + (operator.eq, np.equal), + (operator.ne, np.not_equal) + ]) + def test_comparison_functions(self, dtype, py_comp, np_comp): + # Initialize input arrays + if dtype == np.bool: + a = np.random.choice(a=[False, True], size=1000) + b = np.random.choice(a=[False, True], size=1000) + scalar = True + else: + a = np.random.randint(low=1, high=10, size=1000).astype(dtype) + b = np.random.randint(low=1, high=10, size=1000).astype(dtype) + scalar = 5 + np_scalar = np.dtype(dtype).type(scalar) + a_lst = a.tolist() + b_lst = b.tolist() + + # (Binary) Comparison (x1=array, x2=array) + comp_b = np_comp(a, b).view(np.uint8) + comp_b_list = [int(py_comp(x, y)) for x, y in zip(a_lst, b_lst)] + + # (Scalar1) Comparison (x1=scalar, x2=array) + comp_s1 = np_comp(np_scalar, b).view(np.uint8) + comp_s1_list = [int(py_comp(scalar, x)) for x in b_lst] + + # (Scalar2) Comparison (x1=array, x2=scalar) + comp_s2 = np_comp(a, np_scalar).view(np.uint8) + comp_s2_list = [int(py_comp(x, scalar)) for x in a_lst] + + # Sequence: Binary, Scalar1 and Scalar2 + assert_(comp_b.tolist() == comp_b_list, + f"Failed comparison ({py_comp.__name__})") + assert_(comp_s1.tolist() == comp_s1_list, + f"Failed comparison ({py_comp.__name__})") + assert_(comp_s2.tolist() == comp_s2_list, + f"Failed comparison ({py_comp.__name__})") + + def test_ignore_object_identity_in_equal(self): + # Check comparing identical objects whose comparison + # is not a simple boolean, e.g., arrays that are compared elementwise. + a = np.array([np.array([1, 2, 3]), None], dtype=object) + assert_raises(ValueError, np.equal, a, a) + + # Check error raised when comparing identical non-comparable objects. + class FunkyType: + def __eq__(self, other): + raise TypeError("I won't compare") + + a = np.array([FunkyType()]) + assert_raises(TypeError, np.equal, a, a) + + # Check identity doesn't override comparison mismatch. + a = np.array([np.nan], dtype=object) + assert_equal(np.equal(a, a), [False]) + + def test_ignore_object_identity_in_not_equal(self): + # Check comparing identical objects whose comparison + # is not a simple boolean, e.g., arrays that are compared elementwise. + a = np.array([np.array([1, 2, 3]), None], dtype=object) + assert_raises(ValueError, np.not_equal, a, a) + + # Check error raised when comparing identical non-comparable objects. + class FunkyType: + def __ne__(self, other): + raise TypeError("I won't compare") + + a = np.array([FunkyType()]) + assert_raises(TypeError, np.not_equal, a, a) + + # Check identity doesn't override comparison mismatch. + a = np.array([np.nan], dtype=object) + assert_equal(np.not_equal(a, a), [True]) + + def test_error_in_equal_reduce(self): + # gh-20929 + # make sure np.equal.reduce raises a TypeError if an array is passed + # without specifying the dtype + a = np.array([0, 0]) + assert_equal(np.equal.reduce(a, dtype=bool), True) + assert_raises(TypeError, np.equal.reduce, a) + + def test_object_dtype(self): + assert np.equal(1, [1], dtype=object).dtype == object + assert np.equal(1, [1], signature=(None, None, "O")).dtype == object + + def test_object_nonbool_dtype_error(self): + # bool output dtype is fine of course: + assert np.equal(1, [1], dtype=bool).dtype == bool + + # but the following are examples do not have a loop: + with pytest.raises(TypeError, match="No loop matching"): + np.equal(1, 1, dtype=np.int64) + + with pytest.raises(TypeError, match="No loop matching"): + np.equal(1, 1, sig=(None, None, "l")) + + @pytest.mark.parametrize("dtypes", ["qQ", "Qq"]) + @pytest.mark.parametrize('py_comp, np_comp', [ + (operator.lt, np.less), + (operator.le, np.less_equal), + (operator.gt, np.greater), + (operator.ge, np.greater_equal), + (operator.eq, np.equal), + (operator.ne, np.not_equal) + ]) + @pytest.mark.parametrize("vals", [(2**60, 2**60 + 1), (2**60 + 1, 2**60)]) + def test_large_integer_direct_comparison( + self, dtypes, py_comp, np_comp, vals): + # Note that float(2**60) + 1 == float(2**60). + a1 = np.array([2**60], dtype=dtypes[0]) + a2 = np.array([2**60 + 1], dtype=dtypes[1]) + expected = py_comp(2**60, 2**60 + 1) + + assert py_comp(a1, a2) == expected + assert np_comp(a1, a2) == expected + # Also check the scalars: + s1 = a1[0] + s2 = a2[0] + assert isinstance(s1, np.integer) + assert isinstance(s2, np.integer) + # The Python operator here is mainly interesting: + assert py_comp(s1, s2) == expected + assert np_comp(s1, s2) == expected + + @pytest.mark.parametrize("dtype", np.typecodes['UnsignedInteger']) + @pytest.mark.parametrize('py_comp_func, np_comp_func', [ + (operator.lt, np.less), + (operator.le, np.less_equal), + (operator.gt, np.greater), + (operator.ge, np.greater_equal), + (operator.eq, np.equal), + (operator.ne, np.not_equal) + ]) + @pytest.mark.parametrize("flip", [True, False]) + def test_unsigned_signed_direct_comparison( + self, dtype, py_comp_func, np_comp_func, flip): + if flip: + py_comp = lambda x, y: py_comp_func(y, x) + np_comp = lambda x, y: np_comp_func(y, x) + else: + py_comp = py_comp_func + np_comp = np_comp_func + + arr = np.array([np.iinfo(dtype).max], dtype=dtype) + expected = py_comp(int(arr[0]), -1) + + assert py_comp(arr, -1) == expected + assert np_comp(arr, -1) == expected + + scalar = arr[0] + assert isinstance(scalar, np.integer) + # The Python operator here is mainly interesting: + assert py_comp(scalar, -1) == expected + assert np_comp(scalar, -1) == expected + + +class TestAdd: + def test_reduce_alignment(self): + # gh-9876 + # make sure arrays with weird strides work with the optimizations in + # pairwise_sum_@TYPE@. On x86, the 'b' field will count as aligned at a + # 4 byte offset, even though its itemsize is 8. + a = np.zeros(2, dtype=[('a', np.int32), ('b', np.float64)]) + a['a'] = -1 + assert_equal(a['b'].sum(), 0) + + +class TestDivision: + def test_division_int(self): + # int division should follow Python + x = np.array([5, 10, 90, 100, -5, -10, -90, -100, -120]) + if 5 / 10 == 0.5: + assert_equal(x / 100, [0.05, 0.1, 0.9, 1, + -0.05, -0.1, -0.9, -1, -1.2]) + else: + assert_equal(x / 100, [0, 0, 0, 1, -1, -1, -1, -1, -2]) + assert_equal(x // 100, [0, 0, 0, 1, -1, -1, -1, -1, -2]) + assert_equal(x % 100, [5, 10, 90, 0, 95, 90, 10, 0, 80]) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + @pytest.mark.parametrize("dtype,ex_val", itertools.product( + sctypes['int'] + sctypes['uint'], ( + ( + # dividend + "np.array(range(fo.max-lsize, fo.max)).astype(dtype)," + # divisors + "np.arange(lsize).astype(dtype)," + # scalar divisors + "range(15)" + ), + ( + # dividend + "np.arange(fo.min, fo.min+lsize).astype(dtype)," + # divisors + "np.arange(lsize//-2, lsize//2).astype(dtype)," + # scalar divisors + "range(fo.min, fo.min + 15)" + ), ( + # dividend + "np.array(range(fo.max-lsize, fo.max)).astype(dtype)," + # divisors + "np.arange(lsize).astype(dtype)," + # scalar divisors + "[1,3,9,13,neg, fo.min+1, fo.min//2, fo.max//3, fo.max//4]" + ) + ) + )) + def test_division_int_boundary(self, dtype, ex_val): + fo = np.iinfo(dtype) + neg = -1 if fo.min < 0 else 1 + # Large enough to test SIMD loops and remainder elements + lsize = 512 + 7 + a, b, divisors = eval(ex_val) + a_lst, b_lst = a.tolist(), b.tolist() + + c_div = lambda n, d: ( + 0 if d == 0 else ( + fo.min if (n and n == fo.min and d == -1) else n // d + ) + ) + with np.errstate(divide='ignore'): + ac = a.copy() + ac //= b + div_ab = a // b + div_lst = [c_div(x, y) for x, y in zip(a_lst, b_lst)] + + msg = "Integer arrays floor division check (//)" + assert all(div_ab == div_lst), msg + msg_eq = "Integer arrays floor division check (//=)" + assert all(ac == div_lst), msg_eq + + for divisor in divisors: + ac = a.copy() + with np.errstate(divide='ignore', over='ignore'): + div_a = a // divisor + ac //= divisor + div_lst = [c_div(i, divisor) for i in a_lst] + + assert all(div_a == div_lst), msg + assert all(ac == div_lst), msg_eq + + with np.errstate(divide='raise', over='raise'): + if 0 in b: + # Verify overflow case + with pytest.raises(FloatingPointError, + match="divide by zero encountered in floor_divide"): + a // b + else: + a // b + if fo.min and fo.min in a: + with pytest.raises(FloatingPointError, + match='overflow encountered in floor_divide'): + a // -1 + elif fo.min: + a // -1 + with pytest.raises(FloatingPointError, + match="divide by zero encountered in floor_divide"): + a // 0 + with pytest.raises(FloatingPointError, + match="divide by zero encountered in floor_divide"): + ac = a.copy() + ac //= 0 + + np.array([], dtype=dtype) // 0 + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + @pytest.mark.parametrize("dtype,ex_val", itertools.product( + sctypes['int'] + sctypes['uint'], ( + "np.array([fo.max, 1, 2, 1, 1, 2, 3], dtype=dtype)", + "np.array([fo.min, 1, -2, 1, 1, 2, -3]).astype(dtype)", + "np.arange(fo.min, fo.min+(100*10), 10, dtype=dtype)", + "np.array(range(fo.max-(100*7), fo.max, 7)).astype(dtype)", + ) + )) + def test_division_int_reduce(self, dtype, ex_val): + fo = np.iinfo(dtype) + a = eval(ex_val) + lst = a.tolist() + c_div = lambda n, d: ( + 0 if d == 0 or (n and n == fo.min and d == -1) else n // d + ) + + with np.errstate(divide='ignore'): + div_a = np.floor_divide.reduce(a) + div_lst = reduce(c_div, lst) + msg = "Reduce floor integer division check" + assert div_a == div_lst, msg + + with np.errstate(divide='raise', over='raise'): + with pytest.raises(FloatingPointError, + match="divide by zero encountered in reduce"): + np.floor_divide.reduce(np.arange(-100, 100).astype(dtype)) + if fo.min: + with pytest.raises(FloatingPointError, + match='overflow encountered in reduce'): + np.floor_divide.reduce( + np.array([fo.min, 1, -1], dtype=dtype) + ) + + @pytest.mark.parametrize( + "dividend,divisor,quotient", + [(np.timedelta64(2, 'Y'), np.timedelta64(2, 'M'), 12), + (np.timedelta64(2, 'Y'), np.timedelta64(-2, 'M'), -12), + (np.timedelta64(-2, 'Y'), np.timedelta64(2, 'M'), -12), + (np.timedelta64(-2, 'Y'), np.timedelta64(-2, 'M'), 12), + (np.timedelta64(2, 'M'), np.timedelta64(-2, 'Y'), -1), + (np.timedelta64(2, 'Y'), np.timedelta64(0, 'M'), 0), + (np.timedelta64(2, 'Y'), 2, np.timedelta64(1, 'Y')), + (np.timedelta64(2, 'Y'), -2, np.timedelta64(-1, 'Y')), + (np.timedelta64(-2, 'Y'), 2, np.timedelta64(-1, 'Y')), + (np.timedelta64(-2, 'Y'), -2, np.timedelta64(1, 'Y')), + (np.timedelta64(-2, 'Y'), -2, np.timedelta64(1, 'Y')), + (np.timedelta64(-2, 'Y'), -3, np.timedelta64(0, 'Y')), + (np.timedelta64(-2, 'Y'), 0, np.timedelta64('Nat', 'Y')), + ]) + def test_division_int_timedelta(self, dividend, divisor, quotient): + # If either divisor is 0 or quotient is Nat, check for division by 0 + if divisor and (isinstance(quotient, int) or not np.isnat(quotient)): + msg = "Timedelta floor division check" + assert dividend // divisor == quotient, msg + + # Test for arrays as well + msg = "Timedelta arrays floor division check" + dividend_array = np.array([dividend] * 5) + quotient_array = np.array([quotient] * 5) + assert all(dividend_array // divisor == quotient_array), msg + else: + if IS_WASM: + pytest.skip("fp errors don't work in wasm") + with np.errstate(divide='raise', invalid='raise'): + with pytest.raises(FloatingPointError): + dividend // divisor + + def test_division_complex(self): + # check that implementation is correct + msg = "Complex division implementation check" + x = np.array([1. + 1. * 1j, 1. + .5 * 1j, 1. + 2. * 1j], dtype=np.complex128) + assert_almost_equal(x**2 / x, x, err_msg=msg) + # check overflow, underflow + msg = "Complex division overflow/underflow check" + x = np.array([1.e+110, 1.e-110], dtype=np.complex128) + y = x**2 / x + assert_almost_equal(y / x, [1, 1], err_msg=msg) + + def test_zero_division_complex(self): + with np.errstate(invalid="ignore", divide="ignore"): + x = np.array([0.0], dtype=np.complex128) + y = 1.0 / x + assert_(np.isinf(y)[0]) + y = complex(np.inf, np.nan) / x + assert_(np.isinf(y)[0]) + y = complex(np.nan, np.inf) / x + assert_(np.isinf(y)[0]) + y = complex(np.inf, np.inf) / x + assert_(np.isinf(y)[0]) + y = 0.0 / x + assert_(np.isnan(y)[0]) + + def test_floor_division_complex(self): + # check that floor division, divmod and remainder raises type errors + x = np.array([.9 + 1j, -.1 + 1j, .9 + .5 * 1j, .9 + 2. * 1j], dtype=np.complex128) + with pytest.raises(TypeError): + x // 7 + with pytest.raises(TypeError): + np.divmod(x, 7) + with pytest.raises(TypeError): + np.remainder(x, 7) + + def test_floor_division_signed_zero(self): + # Check that the sign bit is correctly set when dividing positive and + # negative zero by one. + x = np.zeros(10) + assert_equal(np.signbit(x // 1), 0) + assert_equal(np.signbit((-x) // 1), 1) + + @pytest.mark.skipif(hasattr(np.__config__, "blas_ssl2_info"), + reason="gh-22982") + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + @pytest.mark.parametrize('dtype', np.typecodes['Float']) + def test_floor_division_errors(self, dtype): + fnan = np.array(np.nan, dtype=dtype) + fone = np.array(1.0, dtype=dtype) + fzer = np.array(0.0, dtype=dtype) + finf = np.array(np.inf, dtype=dtype) + # divide by zero error check + with np.errstate(divide='raise', invalid='ignore'): + assert_raises(FloatingPointError, np.floor_divide, fone, fzer) + with np.errstate(divide='ignore', invalid='raise'): + np.floor_divide(fone, fzer) + + # The following already contain a NaN and should not warn + with np.errstate(all='raise'): + np.floor_divide(fnan, fone) + np.floor_divide(fone, fnan) + np.floor_divide(fnan, fzer) + np.floor_divide(fzer, fnan) + + @pytest.mark.parametrize('dtype', np.typecodes['Float']) + def test_floor_division_corner_cases(self, dtype): + # test corner cases like 1.0//0.0 for errors and return vals + x = np.zeros(10, dtype=dtype) + y = np.ones(10, dtype=dtype) + fnan = np.array(np.nan, dtype=dtype) + fone = np.array(1.0, dtype=dtype) + fzer = np.array(0.0, dtype=dtype) + finf = np.array(np.inf, dtype=dtype) + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "invalid value encountered in floor_divide") + div = np.floor_divide(fnan, fone) + assert np.isnan(div), f"div: {div}" + div = np.floor_divide(fone, fnan) + assert np.isnan(div), f"div: {div}" + div = np.floor_divide(fnan, fzer) + assert np.isnan(div), f"div: {div}" + # verify 1.0//0.0 computations return inf + with np.errstate(divide='ignore'): + z = np.floor_divide(y, x) + assert_(np.isinf(z).all()) + +def floor_divide_and_remainder(x, y): + return (np.floor_divide(x, y), np.remainder(x, y)) + + +def _signs(dt): + if dt in np.typecodes['UnsignedInteger']: + return (+1,) + else: + return (+1, -1) + + +class TestRemainder: + + def test_remainder_basic(self): + dt = np.typecodes['AllInteger'] + np.typecodes['Float'] + for op in [floor_divide_and_remainder, np.divmod]: + for dt1, dt2 in itertools.product(dt, dt): + for sg1, sg2 in itertools.product(_signs(dt1), _signs(dt2)): + fmt = 'op: %s, dt1: %s, dt2: %s, sg1: %s, sg2: %s' + msg = fmt % (op.__name__, dt1, dt2, sg1, sg2) + a = np.array(sg1 * 71, dtype=dt1) + b = np.array(sg2 * 19, dtype=dt2) + div, rem = op(a, b) + assert_equal(div * b + rem, a, err_msg=msg) + if sg2 == -1: + assert_(b < rem <= 0, msg) + else: + assert_(b > rem >= 0, msg) + + def test_float_remainder_exact(self): + # test that float results are exact for small integers. This also + # holds for the same integers scaled by powers of two. + nlst = list(range(-127, 0)) + plst = list(range(1, 128)) + dividend = nlst + [0] + plst + divisor = nlst + plst + arg = list(itertools.product(dividend, divisor)) + tgt = [divmod(*t) for t in arg] + + a, b = np.array(arg, dtype=int).T + # convert exact integer results from Python to float so that + # signed zero can be used, it is checked. + tgtdiv, tgtrem = np.array(tgt, dtype=float).T + tgtdiv = np.where((tgtdiv == 0.0) & ((b < 0) ^ (a < 0)), -0.0, tgtdiv) + tgtrem = np.where((tgtrem == 0.0) & (b < 0), -0.0, tgtrem) + + for op in [floor_divide_and_remainder, np.divmod]: + for dt in np.typecodes['Float']: + msg = f'op: {op.__name__}, dtype: {dt}' + fa = a.astype(dt) + fb = b.astype(dt) + div, rem = op(fa, fb) + assert_equal(div, tgtdiv, err_msg=msg) + assert_equal(rem, tgtrem, err_msg=msg) + + def test_float_remainder_roundoff(self): + # gh-6127 + dt = np.typecodes['Float'] + for op in [floor_divide_and_remainder, np.divmod]: + for dt1, dt2 in itertools.product(dt, dt): + for sg1, sg2 in itertools.product((+1, -1), (+1, -1)): + fmt = 'op: %s, dt1: %s, dt2: %s, sg1: %s, sg2: %s' + msg = fmt % (op.__name__, dt1, dt2, sg1, sg2) + a = np.array(sg1 * 78 * 6e-8, dtype=dt1) + b = np.array(sg2 * 6e-8, dtype=dt2) + div, rem = op(a, b) + # Equal assertion should hold when fmod is used + assert_equal(div * b + rem, a, err_msg=msg) + if sg2 == -1: + assert_(b < rem <= 0, msg) + else: + assert_(b > rem >= 0, msg) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + @pytest.mark.xfail(sys.platform.startswith("darwin"), + reason="MacOS seems to not give the correct 'invalid' warning for " + "`fmod`. Hopefully, others always do.") + @pytest.mark.parametrize('dtype', np.typecodes['Float']) + def test_float_divmod_errors(self, dtype): + # Check valid errors raised for divmod and remainder + fzero = np.array(0.0, dtype=dtype) + fone = np.array(1.0, dtype=dtype) + finf = np.array(np.inf, dtype=dtype) + fnan = np.array(np.nan, dtype=dtype) + # since divmod is combination of both remainder and divide + # ops it will set both dividebyzero and invalid flags + with np.errstate(divide='raise', invalid='ignore'): + assert_raises(FloatingPointError, np.divmod, fone, fzero) + with np.errstate(divide='ignore', invalid='raise'): + assert_raises(FloatingPointError, np.divmod, fone, fzero) + with np.errstate(invalid='raise'): + assert_raises(FloatingPointError, np.divmod, fzero, fzero) + with np.errstate(invalid='raise'): + assert_raises(FloatingPointError, np.divmod, finf, finf) + with np.errstate(divide='ignore', invalid='raise'): + assert_raises(FloatingPointError, np.divmod, finf, fzero) + with np.errstate(divide='raise', invalid='ignore'): + # inf / 0 does not set any flags, only the modulo creates a NaN + np.divmod(finf, fzero) + + @pytest.mark.skipif(hasattr(np.__config__, "blas_ssl2_info"), + reason="gh-22982") + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + @pytest.mark.xfail(sys.platform.startswith("darwin"), + reason="MacOS seems to not give the correct 'invalid' warning for " + "`fmod`. Hopefully, others always do.") + @pytest.mark.parametrize('dtype', np.typecodes['Float']) + @pytest.mark.parametrize('fn', [np.fmod, np.remainder]) + def test_float_remainder_errors(self, dtype, fn): + fzero = np.array(0.0, dtype=dtype) + fone = np.array(1.0, dtype=dtype) + finf = np.array(np.inf, dtype=dtype) + fnan = np.array(np.nan, dtype=dtype) + + # The following already contain a NaN and should not warn. + with np.errstate(all='raise'): + with pytest.raises(FloatingPointError, + match="invalid value"): + fn(fone, fzero) + fn(fnan, fzero) + fn(fzero, fnan) + fn(fone, fnan) + fn(fnan, fone) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + def test_float_remainder_overflow(self): + a = np.finfo(np.float64).tiny + with np.errstate(over='ignore', invalid='ignore'): + div, mod = np.divmod(4, a) + np.isinf(div) + assert_(mod == 0) + with np.errstate(over='raise', invalid='ignore'): + assert_raises(FloatingPointError, np.divmod, 4, a) + with np.errstate(invalid='raise', over='ignore'): + assert_raises(FloatingPointError, np.divmod, 4, a) + + def test_float_divmod_corner_cases(self): + # check nan cases + for dt in np.typecodes['Float']: + fnan = np.array(np.nan, dtype=dt) + fone = np.array(1.0, dtype=dt) + fzer = np.array(0.0, dtype=dt) + finf = np.array(np.inf, dtype=dt) + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "invalid value encountered in divmod") + sup.filter(RuntimeWarning, "divide by zero encountered in divmod") + div, rem = np.divmod(fone, fzer) + assert np.isinf(div), f'dt: {dt}, div: {rem}' + assert np.isnan(rem), f'dt: {dt}, rem: {rem}' + div, rem = np.divmod(fzer, fzer) + assert np.isnan(rem), f'dt: {dt}, rem: {rem}' + assert_(np.isnan(div)), f'dt: {dt}, rem: {rem}' + div, rem = np.divmod(finf, finf) + assert np.isnan(div), f'dt: {dt}, rem: {rem}' + assert np.isnan(rem), f'dt: {dt}, rem: {rem}' + div, rem = np.divmod(finf, fzer) + assert np.isinf(div), f'dt: {dt}, rem: {rem}' + assert np.isnan(rem), f'dt: {dt}, rem: {rem}' + div, rem = np.divmod(fnan, fone) + assert np.isnan(rem), f"dt: {dt}, rem: {rem}" + assert np.isnan(div), f"dt: {dt}, rem: {rem}" + div, rem = np.divmod(fone, fnan) + assert np.isnan(rem), f"dt: {dt}, rem: {rem}" + assert np.isnan(div), f"dt: {dt}, rem: {rem}" + div, rem = np.divmod(fnan, fzer) + assert np.isnan(rem), f"dt: {dt}, rem: {rem}" + assert np.isnan(div), f"dt: {dt}, rem: {rem}" + + def test_float_remainder_corner_cases(self): + # Check remainder magnitude. + for dt in np.typecodes['Float']: + fone = np.array(1.0, dtype=dt) + fzer = np.array(0.0, dtype=dt) + fnan = np.array(np.nan, dtype=dt) + b = np.array(1.0, dtype=dt) + a = np.nextafter(np.array(0.0, dtype=dt), -b) + rem = np.remainder(a, b) + assert_(rem <= b, f'dt: {dt}') + rem = np.remainder(-a, -b) + assert_(rem >= -b, f'dt: {dt}') + + # Check nans, inf + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "invalid value encountered in remainder") + sup.filter(RuntimeWarning, "invalid value encountered in fmod") + for dt in np.typecodes['Float']: + fone = np.array(1.0, dtype=dt) + fzer = np.array(0.0, dtype=dt) + finf = np.array(np.inf, dtype=dt) + fnan = np.array(np.nan, dtype=dt) + rem = np.remainder(fone, fzer) + assert_(np.isnan(rem), f'dt: {dt}, rem: {rem}') + # MSVC 2008 returns NaN here, so disable the check. + #rem = np.remainder(fone, finf) + #assert_(rem == fone, 'dt: %s, rem: %s' % (dt, rem)) + rem = np.remainder(finf, fone) + fmod = np.fmod(finf, fone) + assert_(np.isnan(fmod), f'dt: {dt}, fmod: {fmod}') + assert_(np.isnan(rem), f'dt: {dt}, rem: {rem}') + rem = np.remainder(finf, finf) + fmod = np.fmod(finf, fone) + assert_(np.isnan(rem), f'dt: {dt}, rem: {rem}') + assert_(np.isnan(fmod), f'dt: {dt}, fmod: {fmod}') + rem = np.remainder(finf, fzer) + fmod = np.fmod(finf, fzer) + assert_(np.isnan(rem), f'dt: {dt}, rem: {rem}') + assert_(np.isnan(fmod), f'dt: {dt}, fmod: {fmod}') + rem = np.remainder(fone, fnan) + fmod = np.fmod(fone, fnan) + assert_(np.isnan(rem), f'dt: {dt}, rem: {rem}') + assert_(np.isnan(fmod), f'dt: {dt}, fmod: {fmod}') + rem = np.remainder(fnan, fzer) + fmod = np.fmod(fnan, fzer) + assert_(np.isnan(rem), f'dt: {dt}, rem: {rem}') + assert_(np.isnan(fmod), f'dt: {dt}, fmod: {rem}') + rem = np.remainder(fnan, fone) + fmod = np.fmod(fnan, fone) + assert_(np.isnan(rem), f'dt: {dt}, rem: {rem}') + assert_(np.isnan(fmod), f'dt: {dt}, fmod: {rem}') + + +class TestDivisionIntegerOverflowsAndDivideByZero: + result_type = namedtuple('result_type', + ['nocast', 'casted']) + helper_lambdas = { + 'zero': lambda dtype: 0, + 'min': lambda dtype: np.iinfo(dtype).min, + 'neg_min': lambda dtype: -np.iinfo(dtype).min, + 'min-zero': lambda dtype: (np.iinfo(dtype).min, 0), + 'neg_min-zero': lambda dtype: (-np.iinfo(dtype).min, 0), + } + overflow_results = { + np.remainder: result_type( + helper_lambdas['zero'], helper_lambdas['zero']), + np.fmod: result_type( + helper_lambdas['zero'], helper_lambdas['zero']), + operator.mod: result_type( + helper_lambdas['zero'], helper_lambdas['zero']), + operator.floordiv: result_type( + helper_lambdas['min'], helper_lambdas['neg_min']), + np.floor_divide: result_type( + helper_lambdas['min'], helper_lambdas['neg_min']), + np.divmod: result_type( + helper_lambdas['min-zero'], helper_lambdas['neg_min-zero']) + } + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + @pytest.mark.parametrize("dtype", np.typecodes["Integer"]) + def test_signed_division_overflow(self, dtype): + to_check = interesting_binop_operands(np.iinfo(dtype).min, -1, dtype) + for op1, op2, extractor, operand_identifier in to_check: + with pytest.warns(RuntimeWarning, match="overflow encountered"): + res = op1 // op2 + + assert res.dtype == op1.dtype + assert extractor(res) == np.iinfo(op1.dtype).min + + # Remainder is well defined though, and does not warn: + res = op1 % op2 + assert res.dtype == op1.dtype + assert extractor(res) == 0 + # Check fmod as well: + res = np.fmod(op1, op2) + assert extractor(res) == 0 + + # Divmod warns for the division part: + with pytest.warns(RuntimeWarning, match="overflow encountered"): + res1, res2 = np.divmod(op1, op2) + + assert res1.dtype == res2.dtype == op1.dtype + assert extractor(res1) == np.iinfo(op1.dtype).min + assert extractor(res2) == 0 + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + @pytest.mark.parametrize("dtype", np.typecodes["AllInteger"]) + def test_divide_by_zero(self, dtype): + # Note that the return value cannot be well defined here, but NumPy + # currently uses 0 consistently. This could be changed. + to_check = interesting_binop_operands(1, 0, dtype) + for op1, op2, extractor, operand_identifier in to_check: + with pytest.warns(RuntimeWarning, match="divide by zero"): + res = op1 // op2 + + assert res.dtype == op1.dtype + assert extractor(res) == 0 + + with pytest.warns(RuntimeWarning, match="divide by zero"): + res1, res2 = np.divmod(op1, op2) + + assert res1.dtype == res2.dtype == op1.dtype + assert extractor(res1) == 0 + assert extractor(res2) == 0 + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + @pytest.mark.parametrize("dividend_dtype", sctypes['int']) + @pytest.mark.parametrize("divisor_dtype", sctypes['int']) + @pytest.mark.parametrize("operation", + [np.remainder, np.fmod, np.divmod, np.floor_divide, + operator.mod, operator.floordiv]) + @np.errstate(divide='warn', over='warn') + def test_overflows(self, dividend_dtype, divisor_dtype, operation): + # SIMD tries to perform the operation on as many elements as possible + # that is a multiple of the register's size. We resort to the + # default implementation for the leftover elements. + # We try to cover all paths here. + arrays = [np.array([np.iinfo(dividend_dtype).min] * i, + dtype=dividend_dtype) for i in range(1, 129)] + divisor = np.array([-1], dtype=divisor_dtype) + # If dividend is a larger type than the divisor (`else` case), + # then, result will be a larger type than dividend and will not + # result in an overflow for `divmod` and `floor_divide`. + if np.dtype(dividend_dtype).itemsize >= np.dtype( + divisor_dtype).itemsize and operation in ( + np.divmod, np.floor_divide, operator.floordiv): + with pytest.warns( + RuntimeWarning, + match="overflow encountered in"): + result = operation( + dividend_dtype(np.iinfo(dividend_dtype).min), + divisor_dtype(-1) + ) + assert result == self.overflow_results[operation].nocast( + dividend_dtype) + + # Arrays + for a in arrays: + # In case of divmod, we need to flatten the result + # column first as we get a column vector of quotient and + # remainder and a normal flatten of the expected result. + with pytest.warns( + RuntimeWarning, + match="overflow encountered in"): + result = np.array(operation(a, divisor)).flatten('f') + expected_array = np.array( + [self.overflow_results[operation].nocast( + dividend_dtype)] * len(a)).flatten() + assert_array_equal(result, expected_array) + else: + # Scalars + result = operation( + dividend_dtype(np.iinfo(dividend_dtype).min), + divisor_dtype(-1) + ) + assert result == self.overflow_results[operation].casted( + dividend_dtype) + + # Arrays + for a in arrays: + # See above comment on flatten + result = np.array(operation(a, divisor)).flatten('f') + expected_array = np.array( + [self.overflow_results[operation].casted( + dividend_dtype)] * len(a)).flatten() + assert_array_equal(result, expected_array) + + +class TestCbrt: + def test_cbrt_scalar(self): + assert_almost_equal((np.cbrt(np.float32(-2.5)**3)), -2.5) + + def test_cbrt(self): + x = np.array([1., 2., -3., np.inf, -np.inf]) + assert_almost_equal(np.cbrt(x**3), x) + + assert_(np.isnan(np.cbrt(np.nan))) + assert_equal(np.cbrt(np.inf), np.inf) + assert_equal(np.cbrt(-np.inf), -np.inf) + + +class TestPower: + def test_power_float(self): + x = np.array([1., 2., 3.]) + assert_equal(x**0, [1., 1., 1.]) + assert_equal(x**1, x) + assert_equal(x**2, [1., 4., 9.]) + y = x.copy() + y **= 2 + assert_equal(y, [1., 4., 9.]) + assert_almost_equal(x**(-1), [1., 0.5, 1. / 3]) + assert_almost_equal(x**(0.5), [1., ncu.sqrt(2), ncu.sqrt(3)]) + + for out, inp, msg in _gen_alignment_data(dtype=np.float32, + type='unary', + max_size=11): + exp = [ncu.sqrt(i) for i in inp] + assert_almost_equal(inp**(0.5), exp, err_msg=msg) + np.sqrt(inp, out=out) + assert_equal(out, exp, err_msg=msg) + + for out, inp, msg in _gen_alignment_data(dtype=np.float64, + type='unary', + max_size=7): + exp = [ncu.sqrt(i) for i in inp] + assert_almost_equal(inp**(0.5), exp, err_msg=msg) + np.sqrt(inp, out=out) + assert_equal(out, exp, err_msg=msg) + + def test_power_complex(self): + x = np.array([1 + 2j, 2 + 3j, 3 + 4j]) + assert_equal(x**0, [1., 1., 1.]) + assert_equal(x**1, x) + assert_almost_equal(x**2, [-3 + 4j, -5 + 12j, -7 + 24j]) + assert_almost_equal(x**3, [(1 + 2j)**3, (2 + 3j)**3, (3 + 4j)**3]) + assert_almost_equal(x**4, [(1 + 2j)**4, (2 + 3j)**4, (3 + 4j)**4]) + assert_almost_equal(x**(-1), [1 / (1 + 2j), 1 / (2 + 3j), 1 / (3 + 4j)]) + assert_almost_equal(x**(-2), [1 / (1 + 2j)**2, 1 / (2 + 3j)**2, 1 / (3 + 4j)**2]) + assert_almost_equal(x**(-3), [(-11 + 2j) / 125, (-46 - 9j) / 2197, + (-117 - 44j) / 15625]) + assert_almost_equal(x**(0.5), [ncu.sqrt(1 + 2j), ncu.sqrt(2 + 3j), + ncu.sqrt(3 + 4j)]) + norm = 1. / ((x**14)[0]) + assert_almost_equal(x**14 * norm, + [i * norm for i in [-76443 + 16124j, 23161315 + 58317492j, + 5583548873 + 2465133864j]]) + + # Ticket #836 + def assert_complex_equal(x, y): + assert_array_equal(x.real, y.real) + assert_array_equal(x.imag, y.imag) + + for z in [complex(0, np.inf), complex(1, np.inf)]: + z = np.array([z], dtype=np.complex128) + with np.errstate(invalid="ignore"): + assert_complex_equal(z**1, z) + assert_complex_equal(z**2, z * z) + assert_complex_equal(z**3, z * z * z) + + def test_power_zero(self): + # ticket #1271 + zero = np.array([0j]) + one = np.array([1 + 0j]) + cnan = np.array([complex(np.nan, np.nan)]) + # FIXME cinf not tested. + #cinf = np.array([complex(np.inf, 0)]) + + def assert_complex_equal(x, y): + x, y = np.asarray(x), np.asarray(y) + assert_array_equal(x.real, y.real) + assert_array_equal(x.imag, y.imag) + + # positive powers + for p in [0.33, 0.5, 1, 1.5, 2, 3, 4, 5, 6.6]: + assert_complex_equal(np.power(zero, p), zero) + + # zero power + assert_complex_equal(np.power(zero, 0), one) + with np.errstate(invalid="ignore"): + assert_complex_equal(np.power(zero, 0 + 1j), cnan) + + # negative power + for p in [0.33, 0.5, 1, 1.5, 2, 3, 4, 5, 6.6]: + assert_complex_equal(np.power(zero, -p), cnan) + assert_complex_equal(np.power(zero, -1 + 0.2j), cnan) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + def test_zero_power_nonzero(self): + # Testing 0^{Non-zero} issue 18378 + zero = np.array([0.0 + 0.0j]) + cnan = np.array([complex(np.nan, np.nan)]) + + def assert_complex_equal(x, y): + assert_array_equal(x.real, y.real) + assert_array_equal(x.imag, y.imag) + + # Complex powers with positive real part will not generate a warning + assert_complex_equal(np.power(zero, 1 + 4j), zero) + assert_complex_equal(np.power(zero, 2 - 3j), zero) + # Testing zero values when real part is greater than zero + assert_complex_equal(np.power(zero, 1 + 1j), zero) + assert_complex_equal(np.power(zero, 1 + 0j), zero) + assert_complex_equal(np.power(zero, 1 - 1j), zero) + # Complex powers will negative real part or 0 (provided imaginary + # part is not zero) will generate a NAN and hence a RUNTIME warning + with pytest.warns(expected_warning=RuntimeWarning) as r: + assert_complex_equal(np.power(zero, -1 + 1j), cnan) + assert_complex_equal(np.power(zero, -2 - 3j), cnan) + assert_complex_equal(np.power(zero, -7 + 0j), cnan) + assert_complex_equal(np.power(zero, 0 + 1j), cnan) + assert_complex_equal(np.power(zero, 0 - 1j), cnan) + assert len(r) == 5 + + def test_fast_power(self): + x = np.array([1, 2, 3], np.int16) + res = x**2.0 + assert_((x**2.00001).dtype is res.dtype) + assert_array_equal(res, [1, 4, 9]) + # check the inplace operation on the casted copy doesn't mess with x + assert_(not np.may_share_memory(res, x)) + assert_array_equal(x, [1, 2, 3]) + + # Check that the fast path ignores 1-element not 0-d arrays + res = x ** np.array([[[2]]]) + assert_equal(res.shape, (1, 1, 3)) + + def test_integer_power(self): + a = np.array([15, 15], 'i8') + b = np.power(a, a) + assert_equal(b, [437893890380859375, 437893890380859375]) + + def test_integer_power_with_integer_zero_exponent(self): + dtypes = np.typecodes['Integer'] + for dt in dtypes: + arr = np.arange(-10, 10, dtype=dt) + assert_equal(np.power(arr, 0), np.ones_like(arr)) + + dtypes = np.typecodes['UnsignedInteger'] + for dt in dtypes: + arr = np.arange(10, dtype=dt) + assert_equal(np.power(arr, 0), np.ones_like(arr)) + + def test_integer_power_of_1(self): + dtypes = np.typecodes['AllInteger'] + for dt in dtypes: + arr = np.arange(10, dtype=dt) + assert_equal(np.power(1, arr), np.ones_like(arr)) + + def test_integer_power_of_zero(self): + dtypes = np.typecodes['AllInteger'] + for dt in dtypes: + arr = np.arange(1, 10, dtype=dt) + assert_equal(np.power(0, arr), np.zeros_like(arr)) + + def test_integer_to_negative_power(self): + dtypes = np.typecodes['Integer'] + for dt in dtypes: + a = np.array([0, 1, 2, 3], dtype=dt) + b = np.array([0, 1, 2, -3], dtype=dt) + one = np.array(1, dtype=dt) + minusone = np.array(-1, dtype=dt) + assert_raises(ValueError, np.power, a, b) + assert_raises(ValueError, np.power, a, minusone) + assert_raises(ValueError, np.power, one, b) + assert_raises(ValueError, np.power, one, minusone) + + def test_float_to_inf_power(self): + for dt in [np.float32, np.float64]: + a = np.array([1, 1, 2, 2, -2, -2, np.inf, -np.inf], dt) + b = np.array([np.inf, -np.inf, np.inf, -np.inf, + np.inf, -np.inf, np.inf, -np.inf], dt) + r = np.array([1, 1, np.inf, 0, np.inf, 0, np.inf, 0], dt) + assert_equal(np.power(a, b), r) + + def test_power_fast_paths(self): + # gh-26055 + for dt in [np.float32, np.float64]: + a = np.array([0, 1.1, 2, 12e12, -10., np.inf, -np.inf], dt) + expected = np.array([0.0, 1.21, 4., 1.44e+26, 100, np.inf, np.inf]) + result = np.power(a, 2.) + assert_array_max_ulp(result, expected.astype(dt), maxulp=1) + + a = np.array([0, 1.1, 2, 12e12], dt) + expected = np.sqrt(a).astype(dt) + result = np.power(a, 0.5) + assert_array_max_ulp(result, expected, maxulp=1) + + +class TestFloat_power: + def test_type_conversion(self): + arg_type = '?bhilBHILefdgFDG' + res_type = 'ddddddddddddgDDG' + for dtin, dtout in zip(arg_type, res_type): + msg = f"dtin: {dtin}, dtout: {dtout}" + arg = np.ones(1, dtype=dtin) + res = np.float_power(arg, arg) + assert_(res.dtype.name == np.dtype(dtout).name, msg) + + +class TestLog2: + @pytest.mark.parametrize('dt', ['f', 'd', 'g']) + def test_log2_values(self, dt): + x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024] + y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + xf = np.array(x, dtype=dt) + yf = np.array(y, dtype=dt) + assert_almost_equal(np.log2(xf), yf) + + @pytest.mark.parametrize("i", range(1, 65)) + def test_log2_ints(self, i): + # a good log2 implementation should provide this, + # might fail on OS with bad libm + v = np.log2(2.**i) + assert_equal(v, float(i), err_msg='at exponent %d' % i) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + def test_log2_special(self): + assert_equal(np.log2(1.), 0.) + assert_equal(np.log2(np.inf), np.inf) + assert_(np.isnan(np.log2(np.nan))) + + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', RuntimeWarning) + assert_(np.isnan(np.log2(-1.))) + assert_(np.isnan(np.log2(-np.inf))) + assert_equal(np.log2(0.), -np.inf) + assert_(w[0].category is RuntimeWarning) + assert_(w[1].category is RuntimeWarning) + assert_(w[2].category is RuntimeWarning) + + +class TestExp2: + def test_exp2_values(self): + x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024] + y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + for dt in ['f', 'd', 'g']: + xf = np.array(x, dtype=dt) + yf = np.array(y, dtype=dt) + assert_almost_equal(np.exp2(yf), xf) + + +class TestLogAddExp2(_FilterInvalids): + # Need test for intermediate precisions + def test_logaddexp2_values(self): + x = [1, 2, 3, 4, 5] + y = [5, 4, 3, 2, 1] + z = [6, 6, 6, 6, 6] + for dt, dec_ in zip(['f', 'd', 'g'], [6, 15, 15]): + xf = np.log2(np.array(x, dtype=dt)) + yf = np.log2(np.array(y, dtype=dt)) + zf = np.log2(np.array(z, dtype=dt)) + assert_almost_equal(np.logaddexp2(xf, yf), zf, decimal=dec_) + + def test_logaddexp2_range(self): + x = [1000000, -1000000, 1000200, -1000200] + y = [1000200, -1000200, 1000000, -1000000] + z = [1000200, -1000000, 1000200, -1000000] + for dt in ['f', 'd', 'g']: + logxf = np.array(x, dtype=dt) + logyf = np.array(y, dtype=dt) + logzf = np.array(z, dtype=dt) + assert_almost_equal(np.logaddexp2(logxf, logyf), logzf) + + def test_inf(self): + inf = np.inf + x = [inf, -inf, inf, -inf, inf, 1, -inf, 1] # noqa: E221 + y = [inf, inf, -inf, -inf, 1, inf, 1, -inf] # noqa: E221 + z = [inf, inf, inf, -inf, inf, inf, 1, 1] + with np.errstate(invalid='raise'): + for dt in ['f', 'd', 'g']: + logxf = np.array(x, dtype=dt) + logyf = np.array(y, dtype=dt) + logzf = np.array(z, dtype=dt) + assert_equal(np.logaddexp2(logxf, logyf), logzf) + + def test_nan(self): + assert_(np.isnan(np.logaddexp2(np.nan, np.inf))) + assert_(np.isnan(np.logaddexp2(np.inf, np.nan))) + assert_(np.isnan(np.logaddexp2(np.nan, 0))) + assert_(np.isnan(np.logaddexp2(0, np.nan))) + assert_(np.isnan(np.logaddexp2(np.nan, np.nan))) + + def test_reduce(self): + assert_equal(np.logaddexp2.identity, -np.inf) + assert_equal(np.logaddexp2.reduce([]), -np.inf) + assert_equal(np.logaddexp2.reduce([-np.inf]), -np.inf) + assert_equal(np.logaddexp2.reduce([-np.inf, 0]), 0) + + +class TestLog: + def test_log_values(self): + x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024] + y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + for dt in ['f', 'd', 'g']: + log2_ = 0.69314718055994530943 + xf = np.array(x, dtype=dt) + yf = np.array(y, dtype=dt) * log2_ + assert_almost_equal(np.log(xf), yf) + + # test aliasing(issue #17761) + x = np.array([2, 0.937500, 3, 0.947500, 1.054697]) + xf = np.log(x) + assert_almost_equal(np.log(x, out=x), xf) + + def test_log_values_maxofdtype(self): + # test log() of max for dtype does not raise + dtypes = [np.float32, np.float64] + # This is failing at least on linux aarch64 (see gh-25460), and on most + # other non x86-64 platforms checking `longdouble` isn't too useful as + # it's an alias for float64. + if platform.machine() == 'x86_64': + dtypes += [np.longdouble] + + for dt in dtypes: + with np.errstate(all='raise'): + x = np.finfo(dt).max + np.log(x) + + def test_log_strides(self): + np.random.seed(42) + strides = np.array([-4, -3, -2, -1, 1, 2, 3, 4]) + sizes = np.arange(2, 100) + for ii in sizes: + x_f64 = np.float64(np.random.uniform(low=0.01, high=100.0, size=ii)) + x_special = x_f64.copy() + x_special[3:-1:4] = 1.0 + y_true = np.log(x_f64) + y_special = np.log(x_special) + for jj in strides: + assert_array_almost_equal_nulp(np.log(x_f64[::jj]), y_true[::jj], nulp=2) + assert_array_almost_equal_nulp(np.log(x_special[::jj]), y_special[::jj], nulp=2) + + # Reference values were computed with mpmath, with mp.dps = 200. + @pytest.mark.parametrize( + 'z, wref', + [(1 + 1e-12j, 5e-25 + 1e-12j), + (1.000000000000001 + 3e-08j, + 1.5602230246251546e-15 + 2.999999999999996e-08j), + (0.9999995000000417 + 0.0009999998333333417j, + 7.831475869017683e-18 + 0.001j), + (0.9999999999999996 + 2.999999999999999e-08j, + 5.9107901499372034e-18 + 3e-08j), + (0.99995000042 - 0.009999833j, + -7.015159763822903e-15 - 0.009999999665816696j)], + ) + def test_log_precision_float64(self, z, wref): + w = np.log(z) + assert_allclose(w, wref, rtol=1e-15) + + # Reference values were computed with mpmath, with mp.dps = 200. + @pytest.mark.parametrize( + 'z, wref', + [(np.complex64(1.0 + 3e-6j), np.complex64(4.5e-12 + 3e-06j)), + (np.complex64(1.0 - 2e-5j), np.complex64(1.9999999e-10 - 2e-5j)), + (np.complex64(0.9999999 + 1e-06j), + np.complex64(-1.192088e-07 + 1.0000001e-06j))], + ) + def test_log_precision_float32(self, z, wref): + w = np.log(z) + assert_allclose(w, wref, rtol=1e-6) + + +class TestExp: + def test_exp_values(self): + x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024] + y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + for dt in ['f', 'd', 'g']: + log2_ = 0.69314718055994530943 + xf = np.array(x, dtype=dt) + yf = np.array(y, dtype=dt) * log2_ + assert_almost_equal(np.exp(yf), xf) + + def test_exp_strides(self): + np.random.seed(42) + strides = np.array([-4, -3, -2, -1, 1, 2, 3, 4]) + sizes = np.arange(2, 100) + for ii in sizes: + x_f64 = np.float64(np.random.uniform(low=0.01, high=709.1, size=ii)) + y_true = np.exp(x_f64) + for jj in strides: + assert_array_almost_equal_nulp(np.exp(x_f64[::jj]), y_true[::jj], nulp=2) + +class TestSpecialFloats: + def test_exp_values(self): + with np.errstate(under='raise', over='raise'): + x = [np.nan, np.nan, np.inf, 0.] + y = [np.nan, -np.nan, np.inf, -np.inf] + for dt in ['e', 'f', 'd', 'g']: + xf = np.array(x, dtype=dt) + yf = np.array(y, dtype=dt) + assert_equal(np.exp(yf), xf) + + # See: https://github.com/numpy/numpy/issues/19192 + @pytest.mark.xfail( + _glibc_older_than("2.17"), + reason="Older glibc versions may not raise appropriate FP exceptions" + ) + def test_exp_exceptions(self): + with np.errstate(over='raise'): + assert_raises(FloatingPointError, np.exp, np.float16(11.0899)) + assert_raises(FloatingPointError, np.exp, np.float32(100.)) + assert_raises(FloatingPointError, np.exp, np.float32(1E19)) + assert_raises(FloatingPointError, np.exp, np.float64(800.)) + assert_raises(FloatingPointError, np.exp, np.float64(1E19)) + + with np.errstate(under='raise'): + assert_raises(FloatingPointError, np.exp, np.float16(-17.5)) + assert_raises(FloatingPointError, np.exp, np.float32(-1000.)) + assert_raises(FloatingPointError, np.exp, np.float32(-1E19)) + assert_raises(FloatingPointError, np.exp, np.float64(-1000.)) + assert_raises(FloatingPointError, np.exp, np.float64(-1E19)) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + def test_log_values(self): + with np.errstate(all='ignore'): + x = [np.nan, np.nan, np.inf, np.nan, -np.inf, np.nan] + y = [np.nan, -np.nan, np.inf, -np.inf, 0.0, -1.0] + y1p = [np.nan, -np.nan, np.inf, -np.inf, -1.0, -2.0] + for dt in ['e', 'f', 'd', 'g']: + xf = np.array(x, dtype=dt) + yf = np.array(y, dtype=dt) + yf1p = np.array(y1p, dtype=dt) + assert_equal(np.log(yf), xf) + assert_equal(np.log2(yf), xf) + assert_equal(np.log10(yf), xf) + assert_equal(np.log1p(yf1p), xf) + + with np.errstate(divide='raise'): + for dt in ['e', 'f', 'd']: + assert_raises(FloatingPointError, np.log, + np.array(0.0, dtype=dt)) + assert_raises(FloatingPointError, np.log2, + np.array(0.0, dtype=dt)) + assert_raises(FloatingPointError, np.log10, + np.array(0.0, dtype=dt)) + assert_raises(FloatingPointError, np.log1p, + np.array(-1.0, dtype=dt)) + + with np.errstate(invalid='raise'): + for dt in ['e', 'f', 'd']: + assert_raises(FloatingPointError, np.log, + np.array(-np.inf, dtype=dt)) + assert_raises(FloatingPointError, np.log, + np.array(-1.0, dtype=dt)) + assert_raises(FloatingPointError, np.log2, + np.array(-np.inf, dtype=dt)) + assert_raises(FloatingPointError, np.log2, + np.array(-1.0, dtype=dt)) + assert_raises(FloatingPointError, np.log10, + np.array(-np.inf, dtype=dt)) + assert_raises(FloatingPointError, np.log10, + np.array(-1.0, dtype=dt)) + assert_raises(FloatingPointError, np.log1p, + np.array(-np.inf, dtype=dt)) + assert_raises(FloatingPointError, np.log1p, + np.array(-2.0, dtype=dt)) + + # See https://github.com/numpy/numpy/issues/18005 + with assert_no_warnings(): + a = np.array(1e9, dtype='float32') + np.log(a) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + @pytest.mark.parametrize('dtype', ['e', 'f', 'd', 'g']) + def test_sincos_values(self, dtype): + with np.errstate(all='ignore'): + x = [np.nan, np.nan, np.nan, np.nan] + y = [np.nan, -np.nan, np.inf, -np.inf] + xf = np.array(x, dtype=dtype) + yf = np.array(y, dtype=dtype) + assert_equal(np.sin(yf), xf) + assert_equal(np.cos(yf), xf) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + @pytest.mark.xfail( + sys.platform.startswith("darwin"), + reason="underflow is triggered for scalar 'sin'" + ) + def test_sincos_underflow(self): + with np.errstate(under='raise'): + underflow_trigger = np.array( + float.fromhex("0x1.f37f47a03f82ap-511"), + dtype=np.float64 + ) + np.sin(underflow_trigger) + np.cos(underflow_trigger) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + @pytest.mark.parametrize('callable', [np.sin, np.cos]) + @pytest.mark.parametrize('dtype', ['e', 'f', 'd']) + @pytest.mark.parametrize('value', [np.inf, -np.inf]) + def test_sincos_errors(self, callable, dtype, value): + with np.errstate(invalid='raise'): + assert_raises(FloatingPointError, callable, + np.array([value], dtype=dtype)) + + @pytest.mark.parametrize('callable', [np.sin, np.cos]) + @pytest.mark.parametrize('dtype', ['f', 'd']) + @pytest.mark.parametrize('stride', [-1, 1, 2, 4, 5]) + def test_sincos_overlaps(self, callable, dtype, stride): + N = 100 + M = N // abs(stride) + rng = np.random.default_rng(42) + x = rng.standard_normal(N, dtype) + y = callable(x[::stride]) + callable(x[::stride], out=x[:M]) + assert_equal(x[:M], y) + + @pytest.mark.parametrize('dt', ['e', 'f', 'd', 'g']) + def test_sqrt_values(self, dt): + with np.errstate(all='ignore'): + x = [np.nan, np.nan, np.inf, np.nan, 0.] + y = [np.nan, -np.nan, np.inf, -np.inf, 0.] + xf = np.array(x, dtype=dt) + yf = np.array(y, dtype=dt) + assert_equal(np.sqrt(yf), xf) + + # with np.errstate(invalid='raise'): + # assert_raises( + # FloatingPointError, np.sqrt, np.array(-100., dtype=dt) + # ) + + def test_abs_values(self): + x = [np.nan, np.nan, np.inf, np.inf, 0., 0., 1.0, 1.0] + y = [np.nan, -np.nan, np.inf, -np.inf, 0., -0., -1.0, 1.0] + for dt in ['e', 'f', 'd', 'g']: + xf = np.array(x, dtype=dt) + yf = np.array(y, dtype=dt) + assert_equal(np.abs(yf), xf) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + def test_square_values(self): + x = [np.nan, np.nan, np.inf, np.inf] + y = [np.nan, -np.nan, np.inf, -np.inf] + with np.errstate(all='ignore'): + for dt in ['e', 'f', 'd', 'g']: + xf = np.array(x, dtype=dt) + yf = np.array(y, dtype=dt) + assert_equal(np.square(yf), xf) + + with np.errstate(over='raise'): + assert_raises(FloatingPointError, np.square, + np.array(1E3, dtype='e')) + assert_raises(FloatingPointError, np.square, + np.array(1E32, dtype='f')) + assert_raises(FloatingPointError, np.square, + np.array(1E200, dtype='d')) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + def test_reciprocal_values(self): + with np.errstate(all='ignore'): + x = [np.nan, np.nan, 0.0, -0.0, np.inf, -np.inf] + y = [np.nan, -np.nan, np.inf, -np.inf, 0., -0.] + for dt in ['e', 'f', 'd', 'g']: + xf = np.array(x, dtype=dt) + yf = np.array(y, dtype=dt) + assert_equal(np.reciprocal(yf), xf) + + with np.errstate(divide='raise'): + for dt in ['e', 'f', 'd', 'g']: + assert_raises(FloatingPointError, np.reciprocal, + np.array(-0.0, dtype=dt)) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + def test_tan(self): + with np.errstate(all='ignore'): + in_ = [np.nan, -np.nan, 0.0, -0.0, np.inf, -np.inf] + out = [np.nan, np.nan, 0.0, -0.0, np.nan, np.nan] + for dt in ['e', 'f', 'd']: + in_arr = np.array(in_, dtype=dt) + out_arr = np.array(out, dtype=dt) + assert_equal(np.tan(in_arr), out_arr) + + with np.errstate(invalid='raise'): + for dt in ['e', 'f', 'd']: + assert_raises(FloatingPointError, np.tan, + np.array(np.inf, dtype=dt)) + assert_raises(FloatingPointError, np.tan, + np.array(-np.inf, dtype=dt)) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + def test_arcsincos(self): + with np.errstate(all='ignore'): + in_ = [np.nan, -np.nan, np.inf, -np.inf] + out = [np.nan, np.nan, np.nan, np.nan] + for dt in ['e', 'f', 'd']: + in_arr = np.array(in_, dtype=dt) + out_arr = np.array(out, dtype=dt) + assert_equal(np.arcsin(in_arr), out_arr) + assert_equal(np.arccos(in_arr), out_arr) + + for callable in [np.arcsin, np.arccos]: + for value in [np.inf, -np.inf, 2.0, -2.0]: + for dt in ['e', 'f', 'd']: + with np.errstate(invalid='raise'): + assert_raises(FloatingPointError, callable, + np.array(value, dtype=dt)) + + def test_arctan(self): + with np.errstate(all='ignore'): + in_ = [np.nan, -np.nan] + out = [np.nan, np.nan] + for dt in ['e', 'f', 'd']: + in_arr = np.array(in_, dtype=dt) + out_arr = np.array(out, dtype=dt) + assert_equal(np.arctan(in_arr), out_arr) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + def test_sinh(self): + in_ = [np.nan, -np.nan, np.inf, -np.inf] + out = [np.nan, np.nan, np.inf, -np.inf] + for dt in ['e', 'f', 'd']: + in_arr = np.array(in_, dtype=dt) + out_arr = np.array(out, dtype=dt) + assert_equal(np.sinh(in_arr), out_arr) + + with np.errstate(over='raise'): + assert_raises(FloatingPointError, np.sinh, + np.array(12.0, dtype='e')) + assert_raises(FloatingPointError, np.sinh, + np.array(120.0, dtype='f')) + assert_raises(FloatingPointError, np.sinh, + np.array(1200.0, dtype='d')) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + @pytest.mark.skipif('bsd' in sys.platform, + reason="fallback implementation may not raise, see gh-2487") + def test_cosh(self): + in_ = [np.nan, -np.nan, np.inf, -np.inf] + out = [np.nan, np.nan, np.inf, np.inf] + for dt in ['e', 'f', 'd']: + in_arr = np.array(in_, dtype=dt) + out_arr = np.array(out, dtype=dt) + assert_equal(np.cosh(in_arr), out_arr) + + with np.errstate(over='raise'): + assert_raises(FloatingPointError, np.cosh, + np.array(12.0, dtype='e')) + assert_raises(FloatingPointError, np.cosh, + np.array(120.0, dtype='f')) + assert_raises(FloatingPointError, np.cosh, + np.array(1200.0, dtype='d')) + + def test_tanh(self): + in_ = [np.nan, -np.nan, np.inf, -np.inf] + out = [np.nan, np.nan, 1.0, -1.0] + for dt in ['e', 'f', 'd']: + in_arr = np.array(in_, dtype=dt) + out_arr = np.array(out, dtype=dt) + assert_array_max_ulp(np.tanh(in_arr), out_arr, 3) + + def test_arcsinh(self): + in_ = [np.nan, -np.nan, np.inf, -np.inf] + out = [np.nan, np.nan, np.inf, -np.inf] + for dt in ['e', 'f', 'd']: + in_arr = np.array(in_, dtype=dt) + out_arr = np.array(out, dtype=dt) + assert_equal(np.arcsinh(in_arr), out_arr) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + def test_arccosh(self): + with np.errstate(all='ignore'): + in_ = [np.nan, -np.nan, np.inf, -np.inf, 1.0, 0.0] + out = [np.nan, np.nan, np.inf, np.nan, 0.0, np.nan] + for dt in ['e', 'f', 'd']: + in_arr = np.array(in_, dtype=dt) + out_arr = np.array(out, dtype=dt) + assert_equal(np.arccosh(in_arr), out_arr) + + for value in [0.0, -np.inf]: + with np.errstate(invalid='raise'): + for dt in ['e', 'f', 'd']: + assert_raises(FloatingPointError, np.arccosh, + np.array(value, dtype=dt)) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + def test_arctanh(self): + with np.errstate(all='ignore'): + in_ = [np.nan, -np.nan, np.inf, -np.inf, 1.0, -1.0, 2.0] + out = [np.nan, np.nan, np.nan, np.nan, np.inf, -np.inf, np.nan] + for dt in ['e', 'f', 'd']: + in_arr = np.array(in_, dtype=dt) + out_arr = np.array(out, dtype=dt) + assert_equal(np.arctanh(in_arr), out_arr) + + for value in [1.01, np.inf, -np.inf, 1.0, -1.0]: + with np.errstate(invalid='raise', divide='raise'): + for dt in ['e', 'f', 'd']: + assert_raises(FloatingPointError, np.arctanh, + np.array(value, dtype=dt)) + + # Make sure glibc < 2.18 atanh is not used, issue 25087 + assert np.signbit(np.arctanh(-1j).real) + + # See: https://github.com/numpy/numpy/issues/20448 + @pytest.mark.xfail( + _glibc_older_than("2.17"), + reason="Older glibc versions may not raise appropriate FP exceptions" + ) + def test_exp2(self): + with np.errstate(all='ignore'): + in_ = [np.nan, -np.nan, np.inf, -np.inf] + out = [np.nan, np.nan, np.inf, 0.0] + for dt in ['e', 'f', 'd']: + in_arr = np.array(in_, dtype=dt) + out_arr = np.array(out, dtype=dt) + assert_equal(np.exp2(in_arr), out_arr) + + for value in [2000.0, -2000.0]: + with np.errstate(over='raise', under='raise'): + for dt in ['e', 'f', 'd']: + assert_raises(FloatingPointError, np.exp2, + np.array(value, dtype=dt)) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + def test_expm1(self): + with np.errstate(all='ignore'): + in_ = [np.nan, -np.nan, np.inf, -np.inf] + out = [np.nan, np.nan, np.inf, -1.0] + for dt in ['e', 'f', 'd']: + in_arr = np.array(in_, dtype=dt) + out_arr = np.array(out, dtype=dt) + assert_equal(np.expm1(in_arr), out_arr) + + for value in [200.0, 2000.0]: + with np.errstate(over='raise'): + for dt in ['e', 'f']: + assert_raises(FloatingPointError, np.expm1, + np.array(value, dtype=dt)) + + # test to ensure no spurious FP exceptions are raised due to SIMD + INF_INVALID_ERR = [ + np.cos, np.sin, np.tan, np.arccos, np.arcsin, np.spacing, np.arctanh + ] + NEG_INVALID_ERR = [ + np.log, np.log2, np.log10, np.log1p, np.sqrt, np.arccosh, + np.arctanh + ] + ONE_INVALID_ERR = [ + np.arctanh, + ] + LTONE_INVALID_ERR = [ + np.arccosh, + ] + BYZERO_ERR = [ + np.log, np.log2, np.log10, np.reciprocal, np.arccosh + ] + + @pytest.mark.parametrize("ufunc", UFUNCS_UNARY_FP) + @pytest.mark.parametrize("dtype", ('e', 'f', 'd')) + @pytest.mark.parametrize("data, escape", ( + ([0.03], LTONE_INVALID_ERR), + ([0.03] * 32, LTONE_INVALID_ERR), + # neg + ([-1.0], NEG_INVALID_ERR), + ([-1.0] * 32, NEG_INVALID_ERR), + # flat + ([1.0], ONE_INVALID_ERR), + ([1.0] * 32, ONE_INVALID_ERR), + # zero + ([0.0], BYZERO_ERR), + ([0.0] * 32, BYZERO_ERR), + ([-0.0], BYZERO_ERR), + ([-0.0] * 32, BYZERO_ERR), + # nan + ([0.5, 0.5, 0.5, np.nan], LTONE_INVALID_ERR), + ([0.5, 0.5, 0.5, np.nan] * 32, LTONE_INVALID_ERR), + ([np.nan, 1.0, 1.0, 1.0], ONE_INVALID_ERR), + ([np.nan, 1.0, 1.0, 1.0] * 32, ONE_INVALID_ERR), + ([np.nan], []), + ([np.nan] * 32, []), + # inf + ([0.5, 0.5, 0.5, np.inf], INF_INVALID_ERR + LTONE_INVALID_ERR), + ([0.5, 0.5, 0.5, np.inf] * 32, INF_INVALID_ERR + LTONE_INVALID_ERR), + ([np.inf, 1.0, 1.0, 1.0], INF_INVALID_ERR), + ([np.inf, 1.0, 1.0, 1.0] * 32, INF_INVALID_ERR), + ([np.inf], INF_INVALID_ERR), + ([np.inf] * 32, INF_INVALID_ERR), + # ninf + ([0.5, 0.5, 0.5, -np.inf], + NEG_INVALID_ERR + INF_INVALID_ERR + LTONE_INVALID_ERR), + ([0.5, 0.5, 0.5, -np.inf] * 32, + NEG_INVALID_ERR + INF_INVALID_ERR + LTONE_INVALID_ERR), + ([-np.inf, 1.0, 1.0, 1.0], NEG_INVALID_ERR + INF_INVALID_ERR), + ([-np.inf, 1.0, 1.0, 1.0] * 32, NEG_INVALID_ERR + INF_INVALID_ERR), + ([-np.inf], NEG_INVALID_ERR + INF_INVALID_ERR), + ([-np.inf] * 32, NEG_INVALID_ERR + INF_INVALID_ERR), + )) + def test_unary_spurious_fpexception(self, ufunc, dtype, data, escape): + if escape and ufunc in escape: + return + # FIXME: NAN raises FP invalid exception: + # - ceil/float16 on MSVC:32-bit + # - spacing/float16 on almost all platforms + # - spacing all floats on MSVC vs2022 + if ufunc == np.spacing: + return + if ufunc == np.ceil and dtype == 'e': + return + array = np.array(data, dtype=dtype) + with assert_no_warnings(): + ufunc(array) + + @pytest.mark.parametrize("dtype", ('e', 'f', 'd')) + def test_divide_spurious_fpexception(self, dtype): + dt = np.dtype(dtype) + dt_info = np.finfo(dt) + subnorm = dt_info.smallest_subnormal + # Verify a bug fix caused due to filling the remaining lanes of the + # partially loaded dividend SIMD vector with ones, which leads to + # raising an overflow warning when the divisor is denormal. + # see https://github.com/numpy/numpy/issues/25097 + with assert_no_warnings(): + np.zeros(128 + 1, dtype=dt) / subnorm + +class TestFPClass: + @pytest.mark.parametrize("stride", [-5, -4, -3, -2, -1, 1, + 2, 4, 5, 6, 7, 8, 9, 10]) + def test_fpclass(self, stride): + arr_f64 = np.array([np.nan, -np.nan, np.inf, -np.inf, -1.0, 1.0, -0.0, 0.0, 2.2251e-308, -2.2251e-308], dtype='d') + arr_f32 = np.array([np.nan, -np.nan, np.inf, -np.inf, -1.0, 1.0, -0.0, 0.0, 1.4013e-045, -1.4013e-045], dtype='f') + nan = np.array([True, True, False, False, False, False, False, False, False, False]) # noqa: E221 + inf = np.array([False, False, True, True, False, False, False, False, False, False]) # noqa: E221 + sign = np.array([False, True, False, True, True, False, True, False, False, True]) # noqa: E221 + finite = np.array([False, False, False, False, True, True, True, True, True, True]) # noqa: E221 + assert_equal(np.isnan(arr_f32[::stride]), nan[::stride]) + assert_equal(np.isnan(arr_f64[::stride]), nan[::stride]) + assert_equal(np.isinf(arr_f32[::stride]), inf[::stride]) + assert_equal(np.isinf(arr_f64[::stride]), inf[::stride]) + if platform.machine() == 'riscv64': + # On RISC-V, many operations that produce NaNs, such as converting + # a -NaN from f64 to f32, return a canonical NaN. The canonical + # NaNs are always positive. See section 11.3 NaN Generation and + # Propagation of the RISC-V Unprivileged ISA for more details. + # We disable the sign test on riscv64 for -np.nan as we + # cannot assume that its sign will be honoured in these tests. + arr_f64_rv = np.copy(arr_f64) + arr_f32_rv = np.copy(arr_f32) + arr_f64_rv[1] = -1.0 + arr_f32_rv[1] = -1.0 + assert_equal(np.signbit(arr_f32_rv[::stride]), sign[::stride]) + assert_equal(np.signbit(arr_f64_rv[::stride]), sign[::stride]) + else: + assert_equal(np.signbit(arr_f32[::stride]), sign[::stride]) + assert_equal(np.signbit(arr_f64[::stride]), sign[::stride]) + assert_equal(np.isfinite(arr_f32[::stride]), finite[::stride]) + assert_equal(np.isfinite(arr_f64[::stride]), finite[::stride]) + + @pytest.mark.parametrize("dtype", ['d', 'f']) + def test_fp_noncontiguous(self, dtype): + data = np.array([np.nan, -np.nan, np.inf, -np.inf, -1.0, + 1.0, -0.0, 0.0, 2.2251e-308, + -2.2251e-308], dtype=dtype) + nan = np.array([True, True, False, False, False, False, + False, False, False, False]) + inf = np.array([False, False, True, True, False, False, + False, False, False, False]) + sign = np.array([False, True, False, True, True, False, + True, False, False, True]) + finite = np.array([False, False, False, False, True, True, + True, True, True, True]) + out = np.ndarray(data.shape, dtype='bool') + ncontig_in = data[1::3] + ncontig_out = out[1::3] + contig_in = np.array(ncontig_in) + + if platform.machine() == 'riscv64': + # Disable the -np.nan signbit tests on riscv64. See comments in + # test_fpclass for more details. + data_rv = np.copy(data) + data_rv[1] = -1.0 + ncontig_sign_in = data_rv[1::3] + contig_sign_in = np.array(ncontig_sign_in) + else: + ncontig_sign_in = ncontig_in + contig_sign_in = contig_in + + assert_equal(ncontig_in.flags.c_contiguous, False) + assert_equal(ncontig_out.flags.c_contiguous, False) + assert_equal(contig_in.flags.c_contiguous, True) + assert_equal(ncontig_sign_in.flags.c_contiguous, False) + assert_equal(contig_sign_in.flags.c_contiguous, True) + # ncontig in, ncontig out + assert_equal(np.isnan(ncontig_in, out=ncontig_out), nan[1::3]) + assert_equal(np.isinf(ncontig_in, out=ncontig_out), inf[1::3]) + assert_equal(np.signbit(ncontig_sign_in, out=ncontig_out), sign[1::3]) + assert_equal(np.isfinite(ncontig_in, out=ncontig_out), finite[1::3]) + # contig in, ncontig out + assert_equal(np.isnan(contig_in, out=ncontig_out), nan[1::3]) + assert_equal(np.isinf(contig_in, out=ncontig_out), inf[1::3]) + assert_equal(np.signbit(contig_sign_in, out=ncontig_out), sign[1::3]) + assert_equal(np.isfinite(contig_in, out=ncontig_out), finite[1::3]) + # ncontig in, contig out + assert_equal(np.isnan(ncontig_in), nan[1::3]) + assert_equal(np.isinf(ncontig_in), inf[1::3]) + assert_equal(np.signbit(ncontig_sign_in), sign[1::3]) + assert_equal(np.isfinite(ncontig_in), finite[1::3]) + # contig in, contig out, nd stride + data_split = np.array(np.array_split(data, 2)) + nan_split = np.array(np.array_split(nan, 2)) + inf_split = np.array(np.array_split(inf, 2)) + sign_split = np.array(np.array_split(sign, 2)) + finite_split = np.array(np.array_split(finite, 2)) + assert_equal(np.isnan(data_split), nan_split) + assert_equal(np.isinf(data_split), inf_split) + if platform.machine() == 'riscv64': + data_split_rv = np.array(np.array_split(data_rv, 2)) + assert_equal(np.signbit(data_split_rv), sign_split) + else: + assert_equal(np.signbit(data_split), sign_split) + assert_equal(np.isfinite(data_split), finite_split) + +class TestLDExp: + @pytest.mark.parametrize("stride", [-4, -2, -1, 1, 2, 4]) + @pytest.mark.parametrize("dtype", ['f', 'd']) + def test_ldexp(self, dtype, stride): + mant = np.array([0.125, 0.25, 0.5, 1., 1., 2., 4., 8.], dtype=dtype) + exp = np.array([3, 2, 1, 0, 0, -1, -2, -3], dtype='i') + out = np.zeros(8, dtype=dtype) + assert_equal(np.ldexp(mant[::stride], exp[::stride], out=out[::stride]), np.ones(8, dtype=dtype)[::stride]) + assert_equal(out[::stride], np.ones(8, dtype=dtype)[::stride]) + +class TestFRExp: + @pytest.mark.parametrize("stride", [-4, -2, -1, 1, 2, 4]) + @pytest.mark.parametrize("dtype", ['f', 'd']) + @pytest.mark.skipif(not sys.platform.startswith('linux'), + reason="np.frexp gives different answers for NAN/INF on windows and linux") + @pytest.mark.xfail(IS_MUSL, reason="gh23049") + def test_frexp(self, dtype, stride): + arr = np.array([np.nan, np.nan, np.inf, -np.inf, 0.0, -0.0, 1.0, -1.0], dtype=dtype) + mant_true = np.array([np.nan, np.nan, np.inf, -np.inf, 0.0, -0.0, 0.5, -0.5], dtype=dtype) + exp_true = np.array([0, 0, 0, 0, 0, 0, 1, 1], dtype='i') + out_mant = np.ones(8, dtype=dtype) + out_exp = 2 * np.ones(8, dtype='i') + mant, exp = np.frexp(arr[::stride], out=(out_mant[::stride], out_exp[::stride])) + assert_equal(mant_true[::stride], mant) + assert_equal(exp_true[::stride], exp) + assert_equal(out_mant[::stride], mant_true[::stride]) + assert_equal(out_exp[::stride], exp_true[::stride]) + + +# func : [maxulperror, low, high] +avx_ufuncs = {'sqrt' : [1, 0., 100.], # noqa: E203 + 'absolute' : [0, -100., 100.], # noqa: E203 + 'reciprocal' : [1, 1., 100.], # noqa: E203 + 'square' : [1, -100., 100.], # noqa: E203 + 'rint' : [0, -100., 100.], # noqa: E203 + 'floor' : [0, -100., 100.], # noqa: E203 + 'ceil' : [0, -100., 100.], # noqa: E203 + 'trunc' : [0, -100., 100.]} # noqa: E203 + +class TestAVXUfuncs: + def test_avx_based_ufunc(self): + strides = np.array([-4, -3, -2, -1, 1, 2, 3, 4]) + np.random.seed(42) + for func, prop in avx_ufuncs.items(): + maxulperr = prop[0] + minval = prop[1] + maxval = prop[2] + # various array sizes to ensure masking in AVX is tested + for size in range(1, 32): + myfunc = getattr(np, func) + x_f32 = np.random.uniform(low=minval, high=maxval, + size=size).astype(np.float32) + x_f64 = x_f32.astype(np.float64) + x_f128 = x_f32.astype(np.longdouble) + y_true128 = myfunc(x_f128) + if maxulperr == 0: + assert_equal(myfunc(x_f32), y_true128.astype(np.float32)) + assert_equal(myfunc(x_f64), y_true128.astype(np.float64)) + else: + assert_array_max_ulp(myfunc(x_f32), + y_true128.astype(np.float32), + maxulp=maxulperr) + assert_array_max_ulp(myfunc(x_f64), + y_true128.astype(np.float64), + maxulp=maxulperr) + # various strides to test gather instruction + if size > 1: + y_true32 = myfunc(x_f32) + y_true64 = myfunc(x_f64) + for jj in strides: + assert_equal(myfunc(x_f64[::jj]), y_true64[::jj]) + assert_equal(myfunc(x_f32[::jj]), y_true32[::jj]) + +class TestAVXFloat32Transcendental: + def test_exp_float32(self): + np.random.seed(42) + x_f32 = np.float32(np.random.uniform(low=0.0, high=88.1, size=1000000)) + x_f64 = np.float64(x_f32) + assert_array_max_ulp(np.exp(x_f32), np.float32(np.exp(x_f64)), maxulp=3) + + def test_log_float32(self): + np.random.seed(42) + x_f32 = np.float32(np.random.uniform(low=0.0, high=1000, size=1000000)) + x_f64 = np.float64(x_f32) + assert_array_max_ulp(np.log(x_f32), np.float32(np.log(x_f64)), maxulp=4) + + def test_sincos_float32(self): + np.random.seed(42) + N = 1000000 + M = np.int_(N / 20) + index = np.random.randint(low=0, high=N, size=M) + x_f32 = np.float32(np.random.uniform(low=-100., high=100., size=N)) + if not _glibc_older_than("2.17"): + # test coverage for elements > 117435.992f for which glibc is used + # this is known to be problematic on old glibc, so skip it there + x_f32[index] = np.float32(10E+10 * np.random.rand(M)) + x_f64 = np.float64(x_f32) + assert_array_max_ulp(np.sin(x_f32), np.float32(np.sin(x_f64)), maxulp=2) + assert_array_max_ulp(np.cos(x_f32), np.float32(np.cos(x_f64)), maxulp=2) + # test aliasing(issue #17761) + tx_f32 = x_f32.copy() + assert_array_max_ulp(np.sin(x_f32, out=x_f32), np.float32(np.sin(x_f64)), maxulp=2) + assert_array_max_ulp(np.cos(tx_f32, out=tx_f32), np.float32(np.cos(x_f64)), maxulp=2) + + def test_strided_float32(self): + np.random.seed(42) + strides = np.array([-4, -3, -2, -1, 1, 2, 3, 4]) + sizes = np.arange(2, 100) + for ii in sizes: + x_f32 = np.float32(np.random.uniform(low=0.01, high=88.1, size=ii)) + x_f32_large = x_f32.copy() + x_f32_large[3:-1:4] = 120000.0 + exp_true = np.exp(x_f32) + log_true = np.log(x_f32) + sin_true = np.sin(x_f32_large) + cos_true = np.cos(x_f32_large) + for jj in strides: + assert_array_almost_equal_nulp(np.exp(x_f32[::jj]), exp_true[::jj], nulp=2) + assert_array_almost_equal_nulp(np.log(x_f32[::jj]), log_true[::jj], nulp=2) + assert_array_almost_equal_nulp(np.sin(x_f32_large[::jj]), sin_true[::jj], nulp=2) + assert_array_almost_equal_nulp(np.cos(x_f32_large[::jj]), cos_true[::jj], nulp=2) + +class TestLogAddExp(_FilterInvalids): + def test_logaddexp_values(self): + x = [1, 2, 3, 4, 5] + y = [5, 4, 3, 2, 1] + z = [6, 6, 6, 6, 6] + for dt, dec_ in zip(['f', 'd', 'g'], [6, 15, 15]): + xf = np.log(np.array(x, dtype=dt)) + yf = np.log(np.array(y, dtype=dt)) + zf = np.log(np.array(z, dtype=dt)) + assert_almost_equal(np.logaddexp(xf, yf), zf, decimal=dec_) + + def test_logaddexp_range(self): + x = [1000000, -1000000, 1000200, -1000200] + y = [1000200, -1000200, 1000000, -1000000] + z = [1000200, -1000000, 1000200, -1000000] + for dt in ['f', 'd', 'g']: + logxf = np.array(x, dtype=dt) + logyf = np.array(y, dtype=dt) + logzf = np.array(z, dtype=dt) + assert_almost_equal(np.logaddexp(logxf, logyf), logzf) + + def test_inf(self): + inf = np.inf + x = [inf, -inf, inf, -inf, inf, 1, -inf, 1] # noqa: E221 + y = [inf, inf, -inf, -inf, 1, inf, 1, -inf] # noqa: E221 + z = [inf, inf, inf, -inf, inf, inf, 1, 1] + with np.errstate(invalid='raise'): + for dt in ['f', 'd', 'g']: + logxf = np.array(x, dtype=dt) + logyf = np.array(y, dtype=dt) + logzf = np.array(z, dtype=dt) + assert_equal(np.logaddexp(logxf, logyf), logzf) + + def test_nan(self): + assert_(np.isnan(np.logaddexp(np.nan, np.inf))) + assert_(np.isnan(np.logaddexp(np.inf, np.nan))) + assert_(np.isnan(np.logaddexp(np.nan, 0))) + assert_(np.isnan(np.logaddexp(0, np.nan))) + assert_(np.isnan(np.logaddexp(np.nan, np.nan))) + + def test_reduce(self): + assert_equal(np.logaddexp.identity, -np.inf) + assert_equal(np.logaddexp.reduce([]), -np.inf) + + +class TestLog1p: + def test_log1p(self): + assert_almost_equal(ncu.log1p(0.2), ncu.log(1.2)) + assert_almost_equal(ncu.log1p(1e-6), ncu.log(1 + 1e-6)) + + def test_special(self): + with np.errstate(invalid="ignore", divide="ignore"): + assert_equal(ncu.log1p(np.nan), np.nan) + assert_equal(ncu.log1p(np.inf), np.inf) + assert_equal(ncu.log1p(-1.), -np.inf) + assert_equal(ncu.log1p(-2.), np.nan) + assert_equal(ncu.log1p(-np.inf), np.nan) + + +class TestExpm1: + def test_expm1(self): + assert_almost_equal(ncu.expm1(0.2), ncu.exp(0.2) - 1) + assert_almost_equal(ncu.expm1(1e-6), ncu.exp(1e-6) - 1) + + def test_special(self): + assert_equal(ncu.expm1(np.inf), np.inf) + assert_equal(ncu.expm1(0.), 0.) + assert_equal(ncu.expm1(-0.), -0.) + assert_equal(ncu.expm1(np.inf), np.inf) + assert_equal(ncu.expm1(-np.inf), -1.) + + def test_complex(self): + x = np.asarray(1e-12) + assert_allclose(x, ncu.expm1(x)) + x = x.astype(np.complex128) + assert_allclose(x, ncu.expm1(x)) + + +class TestHypot: + def test_simple(self): + assert_almost_equal(ncu.hypot(1, 1), ncu.sqrt(2)) + assert_almost_equal(ncu.hypot(0, 0), 0) + + def test_reduce(self): + assert_almost_equal(ncu.hypot.reduce([3.0, 4.0]), 5.0) + assert_almost_equal(ncu.hypot.reduce([3.0, 4.0, 0]), 5.0) + assert_almost_equal(ncu.hypot.reduce([9.0, 12.0, 20.0]), 25.0) + assert_equal(ncu.hypot.reduce([]), 0.0) + + +def assert_hypot_isnan(x, y): + with np.errstate(invalid='ignore'): + assert_(np.isnan(ncu.hypot(x, y)), + f"hypot({x}, {y}) is {ncu.hypot(x, y)}, not nan") + + +def assert_hypot_isinf(x, y): + with np.errstate(invalid='ignore'): + assert_(np.isinf(ncu.hypot(x, y)), + f"hypot({x}, {y}) is {ncu.hypot(x, y)}, not inf") + + +class TestHypotSpecialValues: + def test_nan_outputs(self): + assert_hypot_isnan(np.nan, np.nan) + assert_hypot_isnan(np.nan, 1) + + def test_nan_outputs2(self): + assert_hypot_isinf(np.nan, np.inf) + assert_hypot_isinf(np.inf, np.nan) + assert_hypot_isinf(np.inf, 0) + assert_hypot_isinf(0, np.inf) + assert_hypot_isinf(np.inf, np.inf) + assert_hypot_isinf(np.inf, 23.0) + + def test_no_fpe(self): + assert_no_warnings(ncu.hypot, np.inf, 0) + + +def assert_arctan2_isnan(x, y): + assert_(np.isnan(ncu.arctan2(x, y)), f"arctan({x}, {y}) is {ncu.arctan2(x, y)}, not nan") + + +def assert_arctan2_ispinf(x, y): + assert_((np.isinf(ncu.arctan2(x, y)) and ncu.arctan2(x, y) > 0), f"arctan({x}, {y}) is {ncu.arctan2(x, y)}, not +inf") + + +def assert_arctan2_isninf(x, y): + assert_((np.isinf(ncu.arctan2(x, y)) and ncu.arctan2(x, y) < 0), f"arctan({x}, {y}) is {ncu.arctan2(x, y)}, not -inf") + + +def assert_arctan2_ispzero(x, y): + assert_((ncu.arctan2(x, y) == 0 and not np.signbit(ncu.arctan2(x, y))), f"arctan({x}, {y}) is {ncu.arctan2(x, y)}, not +0") + + +def assert_arctan2_isnzero(x, y): + assert_((ncu.arctan2(x, y) == 0 and np.signbit(ncu.arctan2(x, y))), f"arctan({x}, {y}) is {ncu.arctan2(x, y)}, not -0") + + +class TestArctan2SpecialValues: + def test_one_one(self): + # atan2(1, 1) returns pi/4. + assert_almost_equal(ncu.arctan2(1, 1), 0.25 * np.pi) + assert_almost_equal(ncu.arctan2(-1, 1), -0.25 * np.pi) + assert_almost_equal(ncu.arctan2(1, -1), 0.75 * np.pi) + + def test_zero_nzero(self): + # atan2(+-0, -0) returns +-pi. + assert_almost_equal(ncu.arctan2(ncu.PZERO, ncu.NZERO), np.pi) + assert_almost_equal(ncu.arctan2(ncu.NZERO, ncu.NZERO), -np.pi) + + def test_zero_pzero(self): + # atan2(+-0, +0) returns +-0. + assert_arctan2_ispzero(ncu.PZERO, ncu.PZERO) + assert_arctan2_isnzero(ncu.NZERO, ncu.PZERO) + + def test_zero_negative(self): + # atan2(+-0, x) returns +-pi for x < 0. + assert_almost_equal(ncu.arctan2(ncu.PZERO, -1), np.pi) + assert_almost_equal(ncu.arctan2(ncu.NZERO, -1), -np.pi) + + def test_zero_positive(self): + # atan2(+-0, x) returns +-0 for x > 0. + assert_arctan2_ispzero(ncu.PZERO, 1) + assert_arctan2_isnzero(ncu.NZERO, 1) + + def test_positive_zero(self): + # atan2(y, +-0) returns +pi/2 for y > 0. + assert_almost_equal(ncu.arctan2(1, ncu.PZERO), 0.5 * np.pi) + assert_almost_equal(ncu.arctan2(1, ncu.NZERO), 0.5 * np.pi) + + def test_negative_zero(self): + # atan2(y, +-0) returns -pi/2 for y < 0. + assert_almost_equal(ncu.arctan2(-1, ncu.PZERO), -0.5 * np.pi) + assert_almost_equal(ncu.arctan2(-1, ncu.NZERO), -0.5 * np.pi) + + def test_any_ninf(self): + # atan2(+-y, -infinity) returns +-pi for finite y > 0. + assert_almost_equal(ncu.arctan2(1, -np.inf), np.pi) + assert_almost_equal(ncu.arctan2(-1, -np.inf), -np.pi) + + def test_any_pinf(self): + # atan2(+-y, +infinity) returns +-0 for finite y > 0. + assert_arctan2_ispzero(1, np.inf) + assert_arctan2_isnzero(-1, np.inf) + + def test_inf_any(self): + # atan2(+-infinity, x) returns +-pi/2 for finite x. + assert_almost_equal(ncu.arctan2( np.inf, 1), 0.5 * np.pi) + assert_almost_equal(ncu.arctan2(-np.inf, 1), -0.5 * np.pi) + + def test_inf_ninf(self): + # atan2(+-infinity, -infinity) returns +-3*pi/4. + assert_almost_equal(ncu.arctan2( np.inf, -np.inf), 0.75 * np.pi) + assert_almost_equal(ncu.arctan2(-np.inf, -np.inf), -0.75 * np.pi) + + def test_inf_pinf(self): + # atan2(+-infinity, +infinity) returns +-pi/4. + assert_almost_equal(ncu.arctan2( np.inf, np.inf), 0.25 * np.pi) + assert_almost_equal(ncu.arctan2(-np.inf, np.inf), -0.25 * np.pi) + + def test_nan_any(self): + # atan2(nan, x) returns nan for any x, including inf + assert_arctan2_isnan(np.nan, np.inf) + assert_arctan2_isnan(np.inf, np.nan) + assert_arctan2_isnan(np.nan, np.nan) + + +class TestLdexp: + def _check_ldexp(self, tp): + assert_almost_equal(ncu.ldexp(np.array(2., np.float32), + np.array(3, tp)), 16.) + assert_almost_equal(ncu.ldexp(np.array(2., np.float64), + np.array(3, tp)), 16.) + assert_almost_equal(ncu.ldexp(np.array(2., np.longdouble), + np.array(3, tp)), 16.) + + def test_ldexp(self): + # The default Python int type should work + assert_almost_equal(ncu.ldexp(2., 3), 16.) + # The following int types should all be accepted + self._check_ldexp(np.int8) + self._check_ldexp(np.int16) + self._check_ldexp(np.int32) + self._check_ldexp('i') + self._check_ldexp('l') + + def test_ldexp_overflow(self): + # silence warning emitted on overflow + with np.errstate(over="ignore"): + imax = np.iinfo(np.dtype('l')).max + imin = np.iinfo(np.dtype('l')).min + assert_equal(ncu.ldexp(2., imax), np.inf) + assert_equal(ncu.ldexp(2., imin), 0) + + +class TestMaximum(_FilterInvalids): + def test_reduce(self): + dflt = np.typecodes['AllFloat'] + dint = np.typecodes['AllInteger'] + seq1 = np.arange(11) + seq2 = seq1[::-1] + func = np.maximum.reduce + for dt in dint: + tmp1 = seq1.astype(dt) + tmp2 = seq2.astype(dt) + assert_equal(func(tmp1), 10) + assert_equal(func(tmp2), 10) + for dt in dflt: + tmp1 = seq1.astype(dt) + tmp2 = seq2.astype(dt) + assert_equal(func(tmp1), 10) + assert_equal(func(tmp2), 10) + tmp1[::2] = np.nan + tmp2[::2] = np.nan + assert_equal(func(tmp1), np.nan) + assert_equal(func(tmp2), np.nan) + + def test_reduce_complex(self): + assert_equal(np.maximum.reduce([1, 2j]), 1) + assert_equal(np.maximum.reduce([1 + 3j, 2j]), 1 + 3j) + + def test_float_nans(self): + nan = np.nan + arg1 = np.array([0, nan, nan]) + arg2 = np.array([nan, 0, nan]) + out = np.array([nan, nan, nan]) + assert_equal(np.maximum(arg1, arg2), out) + + def test_object_nans(self): + # Multiple checks to give this a chance to + # fail if cmp is used instead of rich compare. + # Failure cannot be guaranteed. + for i in range(1): + x = np.array(float('nan'), object) + y = 1.0 + z = np.array(float('nan'), object) + assert_(np.maximum(x, y) == 1.0) + assert_(np.maximum(z, y) == 1.0) + + def test_complex_nans(self): + nan = np.nan + for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)]: + arg1 = np.array([0, cnan, cnan], dtype=complex) + arg2 = np.array([cnan, 0, cnan], dtype=complex) + out = np.array([nan, nan, nan], dtype=complex) + assert_equal(np.maximum(arg1, arg2), out) + + def test_object_array(self): + arg1 = np.arange(5, dtype=object) + arg2 = arg1 + 1 + assert_equal(np.maximum(arg1, arg2), arg2) + + def test_strided_array(self): + arr1 = np.array([-4.0, 1.0, 10.0, 0.0, np.nan, -np.nan, np.inf, -np.inf]) + arr2 = np.array([-2.0, -1.0, np.nan, 1.0, 0.0, np.nan, 1.0, -3.0]) # noqa: E221 + maxtrue = np.array([-2.0, 1.0, np.nan, 1.0, np.nan, np.nan, np.inf, -3.0]) + out = np.ones(8) + out_maxtrue = np.array([-2.0, 1.0, 1.0, 10.0, 1.0, 1.0, np.nan, 1.0]) + assert_equal(np.maximum(arr1, arr2), maxtrue) + assert_equal(np.maximum(arr1[::2], arr2[::2]), maxtrue[::2]) + assert_equal(np.maximum(arr1[:4:], arr2[::2]), np.array([-2.0, np.nan, 10.0, 1.0])) + assert_equal(np.maximum(arr1[::3], arr2[:3:]), np.array([-2.0, 0.0, np.nan])) + assert_equal(np.maximum(arr1[:6:2], arr2[::3], out=out[::3]), np.array([-2.0, 10., np.nan])) + assert_equal(out, out_maxtrue) + + def test_precision(self): + dtypes = [np.float16, np.float32, np.float64, np.longdouble] + + for dt in dtypes: + dtmin = np.finfo(dt).min + dtmax = np.finfo(dt).max + d1 = dt(0.1) + d1_next = np.nextafter(d1, np.inf) + + test_cases = [ + # v1 v2 expected + (dtmin, -np.inf, dtmin), + (dtmax, -np.inf, dtmax), + (d1, d1_next, d1_next), + (dtmax, np.nan, np.nan), + ] + + for v1, v2, expected in test_cases: + assert_equal(np.maximum([v1], [v2]), [expected]) + assert_equal(np.maximum.reduce([v1, v2]), expected) + + +class TestMinimum(_FilterInvalids): + def test_reduce(self): + dflt = np.typecodes['AllFloat'] + dint = np.typecodes['AllInteger'] + seq1 = np.arange(11) + seq2 = seq1[::-1] + func = np.minimum.reduce + for dt in dint: + tmp1 = seq1.astype(dt) + tmp2 = seq2.astype(dt) + assert_equal(func(tmp1), 0) + assert_equal(func(tmp2), 0) + for dt in dflt: + tmp1 = seq1.astype(dt) + tmp2 = seq2.astype(dt) + assert_equal(func(tmp1), 0) + assert_equal(func(tmp2), 0) + tmp1[::2] = np.nan + tmp2[::2] = np.nan + assert_equal(func(tmp1), np.nan) + assert_equal(func(tmp2), np.nan) + + def test_reduce_complex(self): + assert_equal(np.minimum.reduce([1, 2j]), 2j) + assert_equal(np.minimum.reduce([1 + 3j, 2j]), 2j) + + def test_float_nans(self): + nan = np.nan + arg1 = np.array([0, nan, nan]) + arg2 = np.array([nan, 0, nan]) + out = np.array([nan, nan, nan]) + assert_equal(np.minimum(arg1, arg2), out) + + def test_object_nans(self): + # Multiple checks to give this a chance to + # fail if cmp is used instead of rich compare. + # Failure cannot be guaranteed. + for i in range(1): + x = np.array(float('nan'), object) + y = 1.0 + z = np.array(float('nan'), object) + assert_(np.minimum(x, y) == 1.0) + assert_(np.minimum(z, y) == 1.0) + + def test_complex_nans(self): + nan = np.nan + for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)]: + arg1 = np.array([0, cnan, cnan], dtype=complex) + arg2 = np.array([cnan, 0, cnan], dtype=complex) + out = np.array([nan, nan, nan], dtype=complex) + assert_equal(np.minimum(arg1, arg2), out) + + def test_object_array(self): + arg1 = np.arange(5, dtype=object) + arg2 = arg1 + 1 + assert_equal(np.minimum(arg1, arg2), arg1) + + def test_strided_array(self): + arr1 = np.array([-4.0, 1.0, 10.0, 0.0, np.nan, -np.nan, np.inf, -np.inf]) + arr2 = np.array([-2.0, -1.0, np.nan, 1.0, 0.0, np.nan, 1.0, -3.0]) + mintrue = np.array([-4.0, -1.0, np.nan, 0.0, np.nan, np.nan, 1.0, -np.inf]) + out = np.ones(8) + out_mintrue = np.array([-4.0, 1.0, 1.0, 1.0, 1.0, 1.0, np.nan, 1.0]) + assert_equal(np.minimum(arr1, arr2), mintrue) + assert_equal(np.minimum(arr1[::2], arr2[::2]), mintrue[::2]) + assert_equal(np.minimum(arr1[:4:], arr2[::2]), np.array([-4.0, np.nan, 0.0, 0.0])) + assert_equal(np.minimum(arr1[::3], arr2[:3:]), np.array([-4.0, -1.0, np.nan])) + assert_equal(np.minimum(arr1[:6:2], arr2[::3], out=out[::3]), np.array([-4.0, 1.0, np.nan])) + assert_equal(out, out_mintrue) + + def test_precision(self): + dtypes = [np.float16, np.float32, np.float64, np.longdouble] + + for dt in dtypes: + dtmin = np.finfo(dt).min + dtmax = np.finfo(dt).max + d1 = dt(0.1) + d1_next = np.nextafter(d1, np.inf) + + test_cases = [ + # v1 v2 expected + (dtmin, np.inf, dtmin), + (dtmax, np.inf, dtmax), + (d1, d1_next, d1), + (dtmin, np.nan, np.nan), + ] + + for v1, v2, expected in test_cases: + assert_equal(np.minimum([v1], [v2]), [expected]) + assert_equal(np.minimum.reduce([v1, v2]), expected) + + +class TestFmax(_FilterInvalids): + def test_reduce(self): + dflt = np.typecodes['AllFloat'] + dint = np.typecodes['AllInteger'] + seq1 = np.arange(11) + seq2 = seq1[::-1] + func = np.fmax.reduce + for dt in dint: + tmp1 = seq1.astype(dt) + tmp2 = seq2.astype(dt) + assert_equal(func(tmp1), 10) + assert_equal(func(tmp2), 10) + for dt in dflt: + tmp1 = seq1.astype(dt) + tmp2 = seq2.astype(dt) + assert_equal(func(tmp1), 10) + assert_equal(func(tmp2), 10) + tmp1[::2] = np.nan + tmp2[::2] = np.nan + assert_equal(func(tmp1), 9) + assert_equal(func(tmp2), 9) + + def test_reduce_complex(self): + assert_equal(np.fmax.reduce([1, 2j]), 1) + assert_equal(np.fmax.reduce([1 + 3j, 2j]), 1 + 3j) + + def test_float_nans(self): + nan = np.nan + arg1 = np.array([0, nan, nan]) + arg2 = np.array([nan, 0, nan]) + out = np.array([0, 0, nan]) + assert_equal(np.fmax(arg1, arg2), out) + + def test_complex_nans(self): + nan = np.nan + for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)]: + arg1 = np.array([0, cnan, cnan], dtype=complex) + arg2 = np.array([cnan, 0, cnan], dtype=complex) + out = np.array([0, 0, nan], dtype=complex) + assert_equal(np.fmax(arg1, arg2), out) + + def test_precision(self): + dtypes = [np.float16, np.float32, np.float64, np.longdouble] + + for dt in dtypes: + dtmin = np.finfo(dt).min + dtmax = np.finfo(dt).max + d1 = dt(0.1) + d1_next = np.nextafter(d1, np.inf) + + test_cases = [ + # v1 v2 expected + (dtmin, -np.inf, dtmin), + (dtmax, -np.inf, dtmax), + (d1, d1_next, d1_next), + (dtmax, np.nan, dtmax), + ] + + for v1, v2, expected in test_cases: + assert_equal(np.fmax([v1], [v2]), [expected]) + assert_equal(np.fmax.reduce([v1, v2]), expected) + + +class TestFmin(_FilterInvalids): + def test_reduce(self): + dflt = np.typecodes['AllFloat'] + dint = np.typecodes['AllInteger'] + seq1 = np.arange(11) + seq2 = seq1[::-1] + func = np.fmin.reduce + for dt in dint: + tmp1 = seq1.astype(dt) + tmp2 = seq2.astype(dt) + assert_equal(func(tmp1), 0) + assert_equal(func(tmp2), 0) + for dt in dflt: + tmp1 = seq1.astype(dt) + tmp2 = seq2.astype(dt) + assert_equal(func(tmp1), 0) + assert_equal(func(tmp2), 0) + tmp1[::2] = np.nan + tmp2[::2] = np.nan + assert_equal(func(tmp1), 1) + assert_equal(func(tmp2), 1) + + def test_reduce_complex(self): + assert_equal(np.fmin.reduce([1, 2j]), 2j) + assert_equal(np.fmin.reduce([1 + 3j, 2j]), 2j) + + def test_float_nans(self): + nan = np.nan + arg1 = np.array([0, nan, nan]) + arg2 = np.array([nan, 0, nan]) + out = np.array([0, 0, nan]) + assert_equal(np.fmin(arg1, arg2), out) + + def test_complex_nans(self): + nan = np.nan + for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)]: + arg1 = np.array([0, cnan, cnan], dtype=complex) + arg2 = np.array([cnan, 0, cnan], dtype=complex) + out = np.array([0, 0, nan], dtype=complex) + assert_equal(np.fmin(arg1, arg2), out) + + def test_precision(self): + dtypes = [np.float16, np.float32, np.float64, np.longdouble] + + for dt in dtypes: + dtmin = np.finfo(dt).min + dtmax = np.finfo(dt).max + d1 = dt(0.1) + d1_next = np.nextafter(d1, np.inf) + + test_cases = [ + # v1 v2 expected + (dtmin, np.inf, dtmin), + (dtmax, np.inf, dtmax), + (d1, d1_next, d1), + (dtmin, np.nan, dtmin), + ] + + for v1, v2, expected in test_cases: + assert_equal(np.fmin([v1], [v2]), [expected]) + assert_equal(np.fmin.reduce([v1, v2]), expected) + + +class TestBool: + def test_exceptions(self): + a = np.ones(1, dtype=np.bool) + assert_raises(TypeError, np.negative, a) + assert_raises(TypeError, np.positive, a) + assert_raises(TypeError, np.subtract, a, a) + + def test_truth_table_logical(self): + # 2, 3 and 4 serves as true values + input1 = [0, 0, 3, 2] + input2 = [0, 4, 0, 2] + + typecodes = (np.typecodes['AllFloat'] + + np.typecodes['AllInteger'] + + '?') # boolean + for dtype in map(np.dtype, typecodes): + arg1 = np.asarray(input1, dtype=dtype) + arg2 = np.asarray(input2, dtype=dtype) + + # OR + out = [False, True, True, True] + for func in (np.logical_or, np.maximum): + assert_equal(func(arg1, arg2).astype(bool), out) + # AND + out = [False, False, False, True] + for func in (np.logical_and, np.minimum): + assert_equal(func(arg1, arg2).astype(bool), out) + # XOR + out = [False, True, True, False] + for func in (np.logical_xor, np.not_equal): + assert_equal(func(arg1, arg2).astype(bool), out) + + def test_truth_table_bitwise(self): + arg1 = [False, False, True, True] + arg2 = [False, True, False, True] + + out = [False, True, True, True] + assert_equal(np.bitwise_or(arg1, arg2), out) + + out = [False, False, False, True] + assert_equal(np.bitwise_and(arg1, arg2), out) + + out = [False, True, True, False] + assert_equal(np.bitwise_xor(arg1, arg2), out) + + def test_reduce(self): + none = np.array([0, 0, 0, 0], bool) + some = np.array([1, 0, 1, 1], bool) + every = np.array([1, 1, 1, 1], bool) + empty = np.array([], bool) + + arrs = [none, some, every, empty] + + for arr in arrs: + assert_equal(np.logical_and.reduce(arr), all(arr)) + + for arr in arrs: + assert_equal(np.logical_or.reduce(arr), any(arr)) + + for arr in arrs: + assert_equal(np.logical_xor.reduce(arr), arr.sum() % 2 == 1) + + +class TestBitwiseUFuncs: + + _all_ints_bits = [ + np.dtype(c).itemsize * 8 for c in np.typecodes["AllInteger"]] + bitwise_types = [ + np.dtype(c) for c in '?' + np.typecodes["AllInteger"] + 'O'] + bitwise_bits = [ + 2, # boolean type + *_all_ints_bits, # All integers + max(_all_ints_bits) + 1, # Object_ type + ] + + def test_values(self): + for dt in self.bitwise_types: + zeros = np.array([0], dtype=dt) + ones = np.array([-1]).astype(dt) + msg = f"dt = '{dt.char}'" + + assert_equal(np.bitwise_not(zeros), ones, err_msg=msg) + assert_equal(np.bitwise_not(ones), zeros, err_msg=msg) + + assert_equal(np.bitwise_or(zeros, zeros), zeros, err_msg=msg) + assert_equal(np.bitwise_or(zeros, ones), ones, err_msg=msg) + assert_equal(np.bitwise_or(ones, zeros), ones, err_msg=msg) + assert_equal(np.bitwise_or(ones, ones), ones, err_msg=msg) + + assert_equal(np.bitwise_xor(zeros, zeros), zeros, err_msg=msg) + assert_equal(np.bitwise_xor(zeros, ones), ones, err_msg=msg) + assert_equal(np.bitwise_xor(ones, zeros), ones, err_msg=msg) + assert_equal(np.bitwise_xor(ones, ones), zeros, err_msg=msg) + + assert_equal(np.bitwise_and(zeros, zeros), zeros, err_msg=msg) + assert_equal(np.bitwise_and(zeros, ones), zeros, err_msg=msg) + assert_equal(np.bitwise_and(ones, zeros), zeros, err_msg=msg) + assert_equal(np.bitwise_and(ones, ones), ones, err_msg=msg) + + def test_types(self): + for dt in self.bitwise_types: + zeros = np.array([0], dtype=dt) + ones = np.array([-1]).astype(dt) + msg = f"dt = '{dt.char}'" + + assert_(np.bitwise_not(zeros).dtype == dt, msg) + assert_(np.bitwise_or(zeros, zeros).dtype == dt, msg) + assert_(np.bitwise_xor(zeros, zeros).dtype == dt, msg) + assert_(np.bitwise_and(zeros, zeros).dtype == dt, msg) + + def test_identity(self): + assert_(np.bitwise_or.identity == 0, 'bitwise_or') + assert_(np.bitwise_xor.identity == 0, 'bitwise_xor') + assert_(np.bitwise_and.identity == -1, 'bitwise_and') + + def test_reduction(self): + binary_funcs = (np.bitwise_or, np.bitwise_xor, np.bitwise_and) + + for dt in self.bitwise_types: + zeros = np.array([0], dtype=dt) + ones = np.array([-1]).astype(dt) + for f in binary_funcs: + msg = f"dt: '{dt}', f: '{f}'" + assert_equal(f.reduce(zeros), zeros, err_msg=msg) + assert_equal(f.reduce(ones), ones, err_msg=msg) + + # Test empty reduction, no object dtype + for dt in self.bitwise_types[:-1]: + # No object array types + empty = np.array([], dtype=dt) + for f in binary_funcs: + msg = f"dt: '{dt}', f: '{f}'" + tgt = np.array(f.identity).astype(dt) + res = f.reduce(empty) + assert_equal(res, tgt, err_msg=msg) + assert_(res.dtype == tgt.dtype, msg) + + # Empty object arrays use the identity. Note that the types may + # differ, the actual type used is determined by the assign_identity + # function and is not the same as the type returned by the identity + # method. + for f in binary_funcs: + msg = f"dt: '{f}'" + empty = np.array([], dtype=object) + tgt = f.identity + res = f.reduce(empty) + assert_equal(res, tgt, err_msg=msg) + + # Non-empty object arrays do not use the identity + for f in binary_funcs: + msg = f"dt: '{f}'" + btype = np.array([True], dtype=object) + assert_(type(f.reduce(btype)) is bool, msg) + + @pytest.mark.parametrize("input_dtype_obj, bitsize", + zip(bitwise_types, bitwise_bits)) + def test_bitwise_count(self, input_dtype_obj, bitsize): + input_dtype = input_dtype_obj.type + + for i in range(1, bitsize): + num = 2**i - 1 + msg = f"bitwise_count for {num}" + assert i == np.bitwise_count(input_dtype(num)), msg + if np.issubdtype( + input_dtype, np.signedinteger) or input_dtype == np.object_: + assert i == np.bitwise_count(input_dtype(-num)), msg + + a = np.array([2**i - 1 for i in range(1, bitsize)], dtype=input_dtype) + bitwise_count_a = np.bitwise_count(a) + expected = np.arange(1, bitsize, dtype=input_dtype) + + msg = f"array bitwise_count for {input_dtype}" + assert all(bitwise_count_a == expected), msg + + +class TestInt: + def test_logical_not(self): + x = np.ones(10, dtype=np.int16) + o = np.ones(10 * 2, dtype=bool) + tgt = o.copy() + tgt[::2] = False + os = o[::2] + assert_array_equal(np.logical_not(x, out=os), False) + assert_array_equal(o, tgt) + + +class TestFloatingPoint: + def test_floating_point(self): + assert_equal(ncu.FLOATING_POINT_SUPPORT, 1) + + +class TestDegrees: + def test_degrees(self): + assert_almost_equal(ncu.degrees(np.pi), 180.0) + assert_almost_equal(ncu.degrees(-0.5 * np.pi), -90.0) + + +class TestRadians: + def test_radians(self): + assert_almost_equal(ncu.radians(180.0), np.pi) + assert_almost_equal(ncu.radians(-90.0), -0.5 * np.pi) + + +class TestHeavside: + def test_heaviside(self): + x = np.array([[-30.0, -0.1, 0.0, 0.2], [7.5, np.nan, np.inf, -np.inf]]) + expectedhalf = np.array([[0.0, 0.0, 0.5, 1.0], [1.0, np.nan, 1.0, 0.0]]) + expected1 = expectedhalf.copy() + expected1[0, 2] = 1 + + h = ncu.heaviside(x, 0.5) + assert_equal(h, expectedhalf) + + h = ncu.heaviside(x, 1.0) + assert_equal(h, expected1) + + x = x.astype(np.float32) + + h = ncu.heaviside(x, np.float32(0.5)) + assert_equal(h, expectedhalf.astype(np.float32)) + + h = ncu.heaviside(x, np.float32(1.0)) + assert_equal(h, expected1.astype(np.float32)) + + +class TestSign: + def test_sign(self): + a = np.array([np.inf, -np.inf, np.nan, 0.0, 3.0, -3.0]) + out = np.zeros(a.shape) + tgt = np.array([1., -1., np.nan, 0.0, 1.0, -1.0]) + + with np.errstate(invalid='ignore'): + res = ncu.sign(a) + assert_equal(res, tgt) + res = ncu.sign(a, out) + assert_equal(res, tgt) + assert_equal(out, tgt) + + def test_sign_complex(self): + a = np.array([ + np.inf, -np.inf, complex(0, np.inf), complex(0, -np.inf), + complex(np.inf, np.inf), complex(np.inf, -np.inf), # nan + np.nan, complex(0, np.nan), complex(np.nan, np.nan), # nan + 0.0, # 0. + 3.0, -3.0, -2j, 3.0 + 4.0j, -8.0 + 6.0j + ]) + out = np.zeros(a.shape, a.dtype) + tgt = np.array([ + 1., -1., 1j, -1j, + ] + [complex(np.nan, np.nan)] * 5 + [ + 0.0, + 1.0, -1.0, -1j, 0.6 + 0.8j, -0.8 + 0.6j]) + + with np.errstate(invalid='ignore'): + res = ncu.sign(a) + assert_equal(res, tgt) + res = ncu.sign(a, out) + assert_(res is out) + assert_equal(res, tgt) + + def test_sign_dtype_object(self): + # In reference to github issue #6229 + + foo = np.array([-.1, 0, .1]) + a = np.sign(foo.astype(object)) + b = np.sign(foo) + + assert_array_equal(a, b) + + def test_sign_dtype_nan_object(self): + # In reference to github issue #6229 + def test_nan(): + foo = np.array([np.nan]) + # FIXME: a not used + a = np.sign(foo.astype(object)) + + assert_raises(TypeError, test_nan) + +class TestMinMax: + def test_minmax_blocked(self): + # simd tests on max/min, test all alignments, slow but important + # for 2 * vz + 2 * (vs - 1) + 1 (unrolled once) + for dt, sz in [(np.float32, 15), (np.float64, 7)]: + for out, inp, msg in _gen_alignment_data(dtype=dt, type='unary', + max_size=sz): + for i in range(inp.size): + inp[:] = np.arange(inp.size, dtype=dt) + inp[i] = np.nan + emsg = lambda: f'{inp!r}\n{msg}' + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, + "invalid value encountered in reduce") + assert_(np.isnan(inp.max()), msg=emsg) + assert_(np.isnan(inp.min()), msg=emsg) + + inp[i] = 1e10 + assert_equal(inp.max(), 1e10, err_msg=msg) + inp[i] = -1e10 + assert_equal(inp.min(), -1e10, err_msg=msg) + + def test_lower_align(self): + # check data that is not aligned to element size + # i.e doubles are aligned to 4 bytes on i386 + d = np.zeros(23 * 8, dtype=np.int8)[4:-4].view(np.float64) + assert_equal(d.max(), d[0]) + assert_equal(d.min(), d[0]) + + def test_reduce_reorder(self): + # gh 10370, 11029 Some compilers reorder the call to npy_getfloatstatus + # and put it before the call to an intrinsic function that causes + # invalid status to be set. Also make sure warnings are not emitted + for n in (2, 4, 8, 16, 32): + for dt in (np.float32, np.float16, np.complex64): + for r in np.diagflat(np.array([np.nan] * n, dtype=dt)): + assert_equal(np.min(r), np.nan) + + def test_minimize_no_warns(self): + a = np.minimum(np.nan, 1) + assert_equal(a, np.nan) + + +class TestAbsoluteNegative: + def test_abs_neg_blocked(self): + # simd tests on abs, test all alignments for vz + 2 * (vs - 1) + 1 + for dt, sz in [(np.float32, 11), (np.float64, 5)]: + for out, inp, msg in _gen_alignment_data(dtype=dt, type='unary', + max_size=sz): + tgt = [ncu.absolute(i) for i in inp] + np.absolute(inp, out=out) + assert_equal(out, tgt, err_msg=msg) + assert_((out >= 0).all()) + + tgt = [-1 * (i) for i in inp] + np.negative(inp, out=out) + assert_equal(out, tgt, err_msg=msg) + + for v in [np.nan, -np.inf, np.inf]: + for i in range(inp.size): + d = np.arange(inp.size, dtype=dt) + inp[:] = -d + inp[i] = v + d[i] = -v if v == -np.inf else v + assert_array_equal(np.abs(inp), d, err_msg=msg) + np.abs(inp, out=out) + assert_array_equal(out, d, err_msg=msg) + + assert_array_equal(-inp, -1 * inp, err_msg=msg) + d = -1 * inp + np.negative(inp, out=out) + assert_array_equal(out, d, err_msg=msg) + + def test_lower_align(self): + # check data that is not aligned to element size + # i.e doubles are aligned to 4 bytes on i386 + d = np.zeros(23 * 8, dtype=np.int8)[4:-4].view(np.float64) + assert_equal(np.abs(d), d) + assert_equal(np.negative(d), -d) + np.negative(d, out=d) + np.negative(np.ones_like(d), out=d) + np.abs(d, out=d) + np.abs(np.ones_like(d), out=d) + + @pytest.mark.parametrize("dtype", ['d', 'f', 'int32', 'int64']) + @pytest.mark.parametrize("big", [True, False]) + def test_noncontiguous(self, dtype, big): + data = np.array([-1.0, 1.0, -0.0, 0.0, 2.2251e-308, -2.5, 2.5, -6, + 6, -2.2251e-308, -8, 10], dtype=dtype) + expect = np.array([1.0, -1.0, 0.0, -0.0, -2.2251e-308, 2.5, -2.5, 6, + -6, 2.2251e-308, 8, -10], dtype=dtype) + if big: + data = np.repeat(data, 10) + expect = np.repeat(expect, 10) + out = np.ndarray(data.shape, dtype=dtype) + ncontig_in = data[1::2] + ncontig_out = out[1::2] + contig_in = np.array(ncontig_in) + # contig in, contig out + assert_array_equal(np.negative(contig_in), expect[1::2]) + # contig in, ncontig out + assert_array_equal(np.negative(contig_in, out=ncontig_out), + expect[1::2]) + # ncontig in, contig out + assert_array_equal(np.negative(ncontig_in), expect[1::2]) + # ncontig in, ncontig out + assert_array_equal(np.negative(ncontig_in, out=ncontig_out), + expect[1::2]) + # contig in, contig out, nd stride + data_split = np.array(np.array_split(data, 2)) + expect_split = np.array(np.array_split(expect, 2)) + assert_equal(np.negative(data_split), expect_split) + + +class TestPositive: + def test_valid(self): + valid_dtypes = [int, float, complex, object] + for dtype in valid_dtypes: + x = np.arange(5, dtype=dtype) + result = np.positive(x) + assert_equal(x, result, err_msg=str(dtype)) + + def test_invalid(self): + with assert_raises(TypeError): + np.positive(True) + with assert_raises(TypeError): + np.positive(np.datetime64('2000-01-01')) + with assert_raises(TypeError): + np.positive(np.array(['foo'], dtype=str)) + with assert_raises(TypeError): + np.positive(np.array(['bar'], dtype=object)) + + +class TestSpecialMethods: + def test_wrap(self): + + class with_wrap: + def __array__(self, dtype=None, copy=None): + return np.zeros(1) + + def __array_wrap__(self, arr, context, return_scalar): + r = with_wrap() + r.arr = arr + r.context = context + return r + + a = with_wrap() + x = ncu.minimum(a, a) + assert_equal(x.arr, np.zeros(1)) + func, args, i = x.context + assert_(func is ncu.minimum) + assert_equal(len(args), 2) + assert_equal(args[0], a) + assert_equal(args[1], a) + assert_equal(i, 0) + + def test_wrap_out(self): + # Calling convention for out should not affect how special methods are + # called + + class StoreArrayPrepareWrap(np.ndarray): + _wrap_args = None + _prepare_args = None + + def __new__(cls): + return np.zeros(()).view(cls) + + def __array_wrap__(self, obj, context, return_scalar): + self._wrap_args = context[1] + return obj + + @property + def args(self): + # We need to ensure these are fetched at the same time, before + # any other ufuncs are called by the assertions + return self._wrap_args + + def __repr__(self): + return "a" # for short test output + + def do_test(f_call, f_expected): + a = StoreArrayPrepareWrap() + + f_call(a) + + w = a.args + expected = f_expected(a) + try: + assert w == expected + except AssertionError as e: + # assert_equal produces truly useless error messages + raise AssertionError("\n".join([ + "Bad arguments passed in ufunc call", + f" expected: {expected}", + f" __array_wrap__ got: {w}" + ])) + + # method not on the out argument + do_test(lambda a: np.add(a, 0), lambda a: (a, 0)) + do_test(lambda a: np.add(a, 0, None), lambda a: (a, 0)) + do_test(lambda a: np.add(a, 0, out=None), lambda a: (a, 0)) + do_test(lambda a: np.add(a, 0, out=(None,)), lambda a: (a, 0)) + + # method on the out argument + do_test(lambda a: np.add(0, 0, a), lambda a: (0, 0, a)) + do_test(lambda a: np.add(0, 0, out=a), lambda a: (0, 0, a)) + do_test(lambda a: np.add(0, 0, out=(a,)), lambda a: (0, 0, a)) + + # Also check the where mask handling: + do_test(lambda a: np.add(a, 0, where=False), lambda a: (a, 0)) + do_test(lambda a: np.add(0, 0, a, where=False), lambda a: (0, 0, a)) + + def test_wrap_with_iterable(self): + # test fix for bug #1026: + + class with_wrap(np.ndarray): + __array_priority__ = 10 + + def __new__(cls): + return np.asarray(1).view(cls).copy() + + def __array_wrap__(self, arr, context, return_scalar): + return arr.view(type(self)) + + a = with_wrap() + x = ncu.multiply(a, (1, 2, 3)) + assert_(isinstance(x, with_wrap)) + assert_array_equal(x, np.array((1, 2, 3))) + + def test_priority_with_scalar(self): + # test fix for bug #826: + + class A(np.ndarray): + __array_priority__ = 10 + + def __new__(cls): + return np.asarray(1.0, 'float64').view(cls).copy() + + a = A() + x = np.float64(1) * a + assert_(isinstance(x, A)) + assert_array_equal(x, np.array(1)) + + def test_priority(self): + + class A: + def __array__(self, dtype=None, copy=None): + return np.zeros(1) + + def __array_wrap__(self, arr, context, return_scalar): + r = type(self)() + r.arr = arr + r.context = context + return r + + class B(A): + __array_priority__ = 20. + + class C(A): + __array_priority__ = 40. + + x = np.zeros(1) + a = A() + b = B() + c = C() + f = ncu.minimum + assert_(type(f(x, x)) is np.ndarray) + assert_(type(f(x, a)) is A) + assert_(type(f(x, b)) is B) + assert_(type(f(x, c)) is C) + assert_(type(f(a, x)) is A) + assert_(type(f(b, x)) is B) + assert_(type(f(c, x)) is C) + + assert_(type(f(a, a)) is A) + assert_(type(f(a, b)) is B) + assert_(type(f(b, a)) is B) + assert_(type(f(b, b)) is B) + assert_(type(f(b, c)) is C) + assert_(type(f(c, b)) is C) + assert_(type(f(c, c)) is C) + + assert_(type(ncu.exp(a) is A)) + assert_(type(ncu.exp(b) is B)) + assert_(type(ncu.exp(c) is C)) + + def test_failing_wrap(self): + + class A: + def __array__(self, dtype=None, copy=None): + return np.zeros(2) + + def __array_wrap__(self, arr, context, return_scalar): + raise RuntimeError + + a = A() + assert_raises(RuntimeError, ncu.maximum, a, a) + assert_raises(RuntimeError, ncu.maximum.reduce, a) + + def test_failing_out_wrap(self): + + singleton = np.array([1.0]) + + class Ok(np.ndarray): + def __array_wrap__(self, obj, context, return_scalar): + return singleton + + class Bad(np.ndarray): + def __array_wrap__(self, obj, context, return_scalar): + raise RuntimeError + + ok = np.empty(1).view(Ok) + bad = np.empty(1).view(Bad) + # double-free (segfault) of "ok" if "bad" raises an exception + for i in range(10): + assert_raises(RuntimeError, ncu.frexp, 1, ok, bad) + + def test_none_wrap(self): + # Tests that issue #8507 is resolved. Previously, this would segfault + + class A: + def __array__(self, dtype=None, copy=None): + return np.zeros(1) + + def __array_wrap__(self, arr, context=None, return_scalar=False): + return None + + a = A() + assert_equal(ncu.maximum(a, a), None) + + def test_default_prepare(self): + + class with_wrap: + __array_priority__ = 10 + + def __array__(self, dtype=None, copy=None): + return np.zeros(1) + + def __array_wrap__(self, arr, context, return_scalar): + return arr + + a = with_wrap() + x = ncu.minimum(a, a) + assert_equal(x, np.zeros(1)) + assert_equal(type(x), np.ndarray) + + def test_array_too_many_args(self): + + class A: + def __array__(self, dtype, context, copy=None): + return np.zeros(1) + + a = A() + assert_raises_regex(TypeError, '2 required positional', np.sum, a) + + def test_ufunc_override(self): + # check override works even with instance with high priority. + class A: + def __array_ufunc__(self, func, method, *inputs, **kwargs): + return self, func, method, inputs, kwargs + + class MyNDArray(np.ndarray): + __array_priority__ = 100 + + a = A() + b = np.array([1]).view(MyNDArray) + res0 = np.multiply(a, b) + res1 = np.multiply(b, b, out=a) + + # self + assert_equal(res0[0], a) + assert_equal(res1[0], a) + assert_equal(res0[1], np.multiply) + assert_equal(res1[1], np.multiply) + assert_equal(res0[2], '__call__') + assert_equal(res1[2], '__call__') + assert_equal(res0[3], (a, b)) + assert_equal(res1[3], (b, b)) + assert_equal(res0[4], {}) + assert_equal(res1[4], {'out': (a,)}) + + def test_ufunc_override_mro(self): + + # Some multi arg functions for testing. + def tres_mul(a, b, c): + return a * b * c + + def quatro_mul(a, b, c, d): + return a * b * c * d + + # Make these into ufuncs. + three_mul_ufunc = np.frompyfunc(tres_mul, 3, 1) + four_mul_ufunc = np.frompyfunc(quatro_mul, 4, 1) + + class A: + def __array_ufunc__(self, func, method, *inputs, **kwargs): + return "A" + + class ASub(A): + def __array_ufunc__(self, func, method, *inputs, **kwargs): + return "ASub" + + class B: + def __array_ufunc__(self, func, method, *inputs, **kwargs): + return "B" + + class C: + def __init__(self): + self.count = 0 + + def __array_ufunc__(self, func, method, *inputs, **kwargs): + self.count += 1 + return NotImplemented + + class CSub(C): + def __array_ufunc__(self, func, method, *inputs, **kwargs): + self.count += 1 + return NotImplemented + + a = A() + a_sub = ASub() + b = B() + c = C() + + # Standard + res = np.multiply(a, a_sub) + assert_equal(res, "ASub") + res = np.multiply(a_sub, b) + assert_equal(res, "ASub") + + # With 1 NotImplemented + res = np.multiply(c, a) + assert_equal(res, "A") + assert_equal(c.count, 1) + # Check our counter works, so we can trust tests below. + res = np.multiply(c, a) + assert_equal(c.count, 2) + + # Both NotImplemented. + c = C() + c_sub = CSub() + assert_raises(TypeError, np.multiply, c, c_sub) + assert_equal(c.count, 1) + assert_equal(c_sub.count, 1) + c.count = c_sub.count = 0 + assert_raises(TypeError, np.multiply, c_sub, c) + assert_equal(c.count, 1) + assert_equal(c_sub.count, 1) + c.count = 0 + assert_raises(TypeError, np.multiply, c, c) + assert_equal(c.count, 1) + c.count = 0 + assert_raises(TypeError, np.multiply, 2, c) + assert_equal(c.count, 1) + + # Ternary testing. + assert_equal(three_mul_ufunc(a, 1, 2), "A") + assert_equal(three_mul_ufunc(1, a, 2), "A") + assert_equal(three_mul_ufunc(1, 2, a), "A") + + assert_equal(three_mul_ufunc(a, a, 6), "A") + assert_equal(three_mul_ufunc(a, 2, a), "A") + assert_equal(three_mul_ufunc(a, 2, b), "A") + assert_equal(three_mul_ufunc(a, 2, a_sub), "ASub") + assert_equal(three_mul_ufunc(a, a_sub, 3), "ASub") + c.count = 0 + assert_equal(three_mul_ufunc(c, a_sub, 3), "ASub") + assert_equal(c.count, 1) + c.count = 0 + assert_equal(three_mul_ufunc(1, a_sub, c), "ASub") + assert_equal(c.count, 0) + + c.count = 0 + assert_equal(three_mul_ufunc(a, b, c), "A") + assert_equal(c.count, 0) + c_sub.count = 0 + assert_equal(three_mul_ufunc(a, b, c_sub), "A") + assert_equal(c_sub.count, 0) + assert_equal(three_mul_ufunc(1, 2, b), "B") + + assert_raises(TypeError, three_mul_ufunc, 1, 2, c) + assert_raises(TypeError, three_mul_ufunc, c_sub, 2, c) + assert_raises(TypeError, three_mul_ufunc, c_sub, 2, 3) + + # Quaternary testing. + assert_equal(four_mul_ufunc(a, 1, 2, 3), "A") + assert_equal(four_mul_ufunc(1, a, 2, 3), "A") + assert_equal(four_mul_ufunc(1, 1, a, 3), "A") + assert_equal(four_mul_ufunc(1, 1, 2, a), "A") + + assert_equal(four_mul_ufunc(a, b, 2, 3), "A") + assert_equal(four_mul_ufunc(1, a, 2, b), "A") + assert_equal(four_mul_ufunc(b, 1, a, 3), "B") + assert_equal(four_mul_ufunc(a_sub, 1, 2, a), "ASub") + assert_equal(four_mul_ufunc(a, 1, 2, a_sub), "ASub") + + c = C() + c_sub = CSub() + assert_raises(TypeError, four_mul_ufunc, 1, 2, 3, c) + assert_equal(c.count, 1) + c.count = 0 + assert_raises(TypeError, four_mul_ufunc, 1, 2, c_sub, c) + assert_equal(c_sub.count, 1) + assert_equal(c.count, 1) + c2 = C() + c.count = c_sub.count = 0 + assert_raises(TypeError, four_mul_ufunc, 1, c, c_sub, c2) + assert_equal(c_sub.count, 1) + assert_equal(c.count, 1) + assert_equal(c2.count, 0) + c.count = c2.count = c_sub.count = 0 + assert_raises(TypeError, four_mul_ufunc, c2, c, c_sub, c) + assert_equal(c_sub.count, 1) + assert_equal(c.count, 0) + assert_equal(c2.count, 1) + + def test_ufunc_override_methods(self): + + class A: + def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): + return self, ufunc, method, inputs, kwargs + + # __call__ + a = A() + with assert_raises(TypeError): + np.multiply.__call__(1, a, foo='bar', answer=42) + res = np.multiply.__call__(1, a, subok='bar', where=42) + assert_equal(res[0], a) + assert_equal(res[1], np.multiply) + assert_equal(res[2], '__call__') + assert_equal(res[3], (1, a)) + assert_equal(res[4], {'subok': 'bar', 'where': 42}) + + # __call__, wrong args + assert_raises(TypeError, np.multiply, a) + assert_raises(TypeError, np.multiply, a, a, a, a) + assert_raises(TypeError, np.multiply, a, a, sig='a', signature='a') + assert_raises(TypeError, ncu_tests.inner1d, a, a, axis=0, axes=[0, 0]) + + # reduce, positional args + res = np.multiply.reduce(a, 'axis0', 'dtype0', 'out0', 'keep0') + assert_equal(res[0], a) + assert_equal(res[1], np.multiply) + assert_equal(res[2], 'reduce') + assert_equal(res[3], (a,)) + assert_equal(res[4], {'dtype': 'dtype0', + 'out': ('out0',), + 'keepdims': 'keep0', + 'axis': 'axis0'}) + + # reduce, kwargs + res = np.multiply.reduce(a, axis='axis0', dtype='dtype0', out='out0', + keepdims='keep0', initial='init0', + where='where0') + assert_equal(res[0], a) + assert_equal(res[1], np.multiply) + assert_equal(res[2], 'reduce') + assert_equal(res[3], (a,)) + assert_equal(res[4], {'dtype': 'dtype0', + 'out': ('out0',), + 'keepdims': 'keep0', + 'axis': 'axis0', + 'initial': 'init0', + 'where': 'where0'}) + + # reduce, output equal to None removed, but not other explicit ones, + # even if they are at their default value. + res = np.multiply.reduce(a, 0, None, None, False) + assert_equal(res[4], {'axis': 0, 'dtype': None, 'keepdims': False}) + res = np.multiply.reduce(a, out=None, axis=0, keepdims=True) + assert_equal(res[4], {'axis': 0, 'keepdims': True}) + res = np.multiply.reduce(a, None, out=(None,), dtype=None) + assert_equal(res[4], {'axis': None, 'dtype': None}) + res = np.multiply.reduce(a, 0, None, None, False, 2, True) + assert_equal(res[4], {'axis': 0, 'dtype': None, 'keepdims': False, + 'initial': 2, 'where': True}) + # np._NoValue ignored for initial + res = np.multiply.reduce(a, 0, None, None, False, + np._NoValue, True) + assert_equal(res[4], {'axis': 0, 'dtype': None, 'keepdims': False, + 'where': True}) + # None kept for initial, True for where. + res = np.multiply.reduce(a, 0, None, None, False, None, True) + assert_equal(res[4], {'axis': 0, 'dtype': None, 'keepdims': False, + 'initial': None, 'where': True}) + + # reduce, wrong args + assert_raises(ValueError, np.multiply.reduce, a, out=()) + assert_raises(ValueError, np.multiply.reduce, a, out=('out0', 'out1')) + assert_raises(TypeError, np.multiply.reduce, a, 'axis0', axis='axis0') + + # accumulate, pos args + res = np.multiply.accumulate(a, 'axis0', 'dtype0', 'out0') + assert_equal(res[0], a) + assert_equal(res[1], np.multiply) + assert_equal(res[2], 'accumulate') + assert_equal(res[3], (a,)) + assert_equal(res[4], {'dtype': 'dtype0', + 'out': ('out0',), + 'axis': 'axis0'}) + + # accumulate, kwargs + res = np.multiply.accumulate(a, axis='axis0', dtype='dtype0', + out='out0') + assert_equal(res[0], a) + assert_equal(res[1], np.multiply) + assert_equal(res[2], 'accumulate') + assert_equal(res[3], (a,)) + assert_equal(res[4], {'dtype': 'dtype0', + 'out': ('out0',), + 'axis': 'axis0'}) + + # accumulate, output equal to None removed. + res = np.multiply.accumulate(a, 0, None, None) + assert_equal(res[4], {'axis': 0, 'dtype': None}) + res = np.multiply.accumulate(a, out=None, axis=0, dtype='dtype1') + assert_equal(res[4], {'axis': 0, 'dtype': 'dtype1'}) + res = np.multiply.accumulate(a, None, out=(None,), dtype=None) + assert_equal(res[4], {'axis': None, 'dtype': None}) + + # accumulate, wrong args + assert_raises(ValueError, np.multiply.accumulate, a, out=()) + assert_raises(ValueError, np.multiply.accumulate, a, + out=('out0', 'out1')) + assert_raises(TypeError, np.multiply.accumulate, a, + 'axis0', axis='axis0') + + # reduceat, pos args + res = np.multiply.reduceat(a, [4, 2], 'axis0', 'dtype0', 'out0') + assert_equal(res[0], a) + assert_equal(res[1], np.multiply) + assert_equal(res[2], 'reduceat') + assert_equal(res[3], (a, [4, 2])) + assert_equal(res[4], {'dtype': 'dtype0', + 'out': ('out0',), + 'axis': 'axis0'}) + + # reduceat, kwargs + res = np.multiply.reduceat(a, [4, 2], axis='axis0', dtype='dtype0', + out='out0') + assert_equal(res[0], a) + assert_equal(res[1], np.multiply) + assert_equal(res[2], 'reduceat') + assert_equal(res[3], (a, [4, 2])) + assert_equal(res[4], {'dtype': 'dtype0', + 'out': ('out0',), + 'axis': 'axis0'}) + + # reduceat, output equal to None removed. + res = np.multiply.reduceat(a, [4, 2], 0, None, None) + assert_equal(res[4], {'axis': 0, 'dtype': None}) + res = np.multiply.reduceat(a, [4, 2], axis=None, out=None, dtype='dt') + assert_equal(res[4], {'axis': None, 'dtype': 'dt'}) + res = np.multiply.reduceat(a, [4, 2], None, None, out=(None,)) + assert_equal(res[4], {'axis': None, 'dtype': None}) + + # reduceat, wrong args + assert_raises(ValueError, np.multiply.reduce, a, [4, 2], out=()) + assert_raises(ValueError, np.multiply.reduce, a, [4, 2], + out=('out0', 'out1')) + assert_raises(TypeError, np.multiply.reduce, a, [4, 2], + 'axis0', axis='axis0') + + # outer + res = np.multiply.outer(a, 42) + assert_equal(res[0], a) + assert_equal(res[1], np.multiply) + assert_equal(res[2], 'outer') + assert_equal(res[3], (a, 42)) + assert_equal(res[4], {}) + + # outer, wrong args + assert_raises(TypeError, np.multiply.outer, a) + assert_raises(TypeError, np.multiply.outer, a, a, a, a) + assert_raises(TypeError, np.multiply.outer, a, a, sig='a', signature='a') + + # at + res = np.multiply.at(a, [4, 2], 'b0') + assert_equal(res[0], a) + assert_equal(res[1], np.multiply) + assert_equal(res[2], 'at') + assert_equal(res[3], (a, [4, 2], 'b0')) + + # at, wrong args + assert_raises(TypeError, np.multiply.at, a) + assert_raises(TypeError, np.multiply.at, a, a, a, a) + + def test_ufunc_override_out(self): + + class A: + def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): + return kwargs + + class B: + def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): + return kwargs + + a = A() + b = B() + res0 = np.multiply(a, b, 'out_arg') + res1 = np.multiply(a, b, out='out_arg') + res2 = np.multiply(2, b, 'out_arg') + res3 = np.multiply(3, b, out='out_arg') + res4 = np.multiply(a, 4, 'out_arg') + res5 = np.multiply(a, 5, out='out_arg') + + assert_equal(res0['out'][0], 'out_arg') + assert_equal(res1['out'][0], 'out_arg') + assert_equal(res2['out'][0], 'out_arg') + assert_equal(res3['out'][0], 'out_arg') + assert_equal(res4['out'][0], 'out_arg') + assert_equal(res5['out'][0], 'out_arg') + + # ufuncs with multiple output modf and frexp. + res6 = np.modf(a, 'out0', 'out1') + res7 = np.frexp(a, 'out0', 'out1') + assert_equal(res6['out'][0], 'out0') + assert_equal(res6['out'][1], 'out1') + assert_equal(res7['out'][0], 'out0') + assert_equal(res7['out'][1], 'out1') + + # While we're at it, check that default output is never passed on. + assert_(np.sin(a, None) == {}) + assert_(np.sin(a, out=None) == {}) + assert_(np.sin(a, out=(None,)) == {}) + assert_(np.modf(a, None) == {}) + assert_(np.modf(a, None, None) == {}) + assert_(np.modf(a, out=(None, None)) == {}) + with assert_raises(TypeError): + # Out argument must be tuple, since there are multiple outputs. + np.modf(a, out=None) + + # don't give positional and output argument, or too many arguments. + # wrong number of arguments in the tuple is an error too. + assert_raises(TypeError, np.multiply, a, b, 'one', out='two') + assert_raises(TypeError, np.multiply, a, b, 'one', 'two') + assert_raises(ValueError, np.multiply, a, b, out=('one', 'two')) + assert_raises(TypeError, np.multiply, a, out=()) + assert_raises(TypeError, np.modf, a, 'one', out=('two', 'three')) + assert_raises(TypeError, np.modf, a, 'one', 'two', 'three') + assert_raises(ValueError, np.modf, a, out=('one', 'two', 'three')) + assert_raises(ValueError, np.modf, a, out=('one',)) + + def test_ufunc_override_where(self): + + class OverriddenArrayOld(np.ndarray): + + def _unwrap(self, objs): + cls = type(self) + result = [] + for obj in objs: + if isinstance(obj, cls): + obj = np.array(obj) + elif type(obj) != np.ndarray: + return NotImplemented + result.append(obj) + return result + + def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): + + inputs = self._unwrap(inputs) + if inputs is NotImplemented: + return NotImplemented + + kwargs = kwargs.copy() + if "out" in kwargs: + kwargs["out"] = self._unwrap(kwargs["out"]) + if kwargs["out"] is NotImplemented: + return NotImplemented + + r = super().__array_ufunc__(ufunc, method, *inputs, **kwargs) + if r is not NotImplemented: + r = r.view(type(self)) + + return r + + class OverriddenArrayNew(OverriddenArrayOld): + def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): + + kwargs = kwargs.copy() + if "where" in kwargs: + kwargs["where"] = self._unwrap((kwargs["where"], )) + if kwargs["where"] is NotImplemented: + return NotImplemented + else: + kwargs["where"] = kwargs["where"][0] + + r = super().__array_ufunc__(ufunc, method, *inputs, **kwargs) + if r is not NotImplemented: + r = r.view(type(self)) + + return r + + ufunc = np.negative + + array = np.array([1, 2, 3]) + where = np.array([True, False, True]) + expected = ufunc(array, where=where) + + with pytest.raises(TypeError): + ufunc(array, where=where.view(OverriddenArrayOld)) + + result_1 = ufunc( + array, + where=where.view(OverriddenArrayNew) + ) + assert isinstance(result_1, OverriddenArrayNew) + assert np.all(np.array(result_1) == expected, where=where) + + result_2 = ufunc( + array.view(OverriddenArrayNew), + where=where.view(OverriddenArrayNew) + ) + assert isinstance(result_2, OverriddenArrayNew) + assert np.all(np.array(result_2) == expected, where=where) + + def test_ufunc_override_exception(self): + + class A: + def __array_ufunc__(self, *a, **kwargs): + raise ValueError("oops") + + a = A() + assert_raises(ValueError, np.negative, 1, out=a) + assert_raises(ValueError, np.negative, a) + assert_raises(ValueError, np.divide, 1., a) + + def test_ufunc_override_not_implemented(self): + + class A: + def __array_ufunc__(self, *args, **kwargs): + return NotImplemented + + msg = ("operand type(s) all returned NotImplemented from " + "__array_ufunc__(, '__call__', <*>): 'A'") + with assert_raises_regex(TypeError, fnmatch.translate(msg)): + np.negative(A()) + + msg = ("operand type(s) all returned NotImplemented from " + "__array_ufunc__(, '__call__', <*>, , " + "out=(1,)): 'A', 'object', 'int'") + with assert_raises_regex(TypeError, fnmatch.translate(msg)): + np.add(A(), object(), out=1) + + def test_ufunc_override_disabled(self): + + class OptOut: + __array_ufunc__ = None + + opt_out = OptOut() + + # ufuncs always raise + msg = "operand 'OptOut' does not support ufuncs" + with assert_raises_regex(TypeError, msg): + np.add(opt_out, 1) + with assert_raises_regex(TypeError, msg): + np.add(1, opt_out) + with assert_raises_regex(TypeError, msg): + np.negative(opt_out) + + # opt-outs still hold even when other arguments have pathological + # __array_ufunc__ implementations + + class GreedyArray: + def __array_ufunc__(self, *args, **kwargs): + return self + + greedy = GreedyArray() + assert_(np.negative(greedy) is greedy) + with assert_raises_regex(TypeError, msg): + np.add(greedy, opt_out) + with assert_raises_regex(TypeError, msg): + np.add(greedy, 1, out=opt_out) + + def test_gufunc_override(self): + # gufunc are just ufunc instances, but follow a different path, + # so check __array_ufunc__ overrides them properly. + class A: + def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): + return self, ufunc, method, inputs, kwargs + + inner1d = ncu_tests.inner1d + a = A() + res = inner1d(a, a) + assert_equal(res[0], a) + assert_equal(res[1], inner1d) + assert_equal(res[2], '__call__') + assert_equal(res[3], (a, a)) + assert_equal(res[4], {}) + + res = inner1d(1, 1, out=a) + assert_equal(res[0], a) + assert_equal(res[1], inner1d) + assert_equal(res[2], '__call__') + assert_equal(res[3], (1, 1)) + assert_equal(res[4], {'out': (a,)}) + + # wrong number of arguments in the tuple is an error too. + assert_raises(TypeError, inner1d, a, out='two') + assert_raises(TypeError, inner1d, a, a, 'one', out='two') + assert_raises(TypeError, inner1d, a, a, 'one', 'two') + assert_raises(ValueError, inner1d, a, a, out=('one', 'two')) + assert_raises(ValueError, inner1d, a, a, out=()) + + def test_ufunc_override_with_super(self): + # NOTE: this class is used in doc/source/user/basics.subclassing.rst + # if you make any changes here, do update it there too. + class A(np.ndarray): + def __array_ufunc__(self, ufunc, method, *inputs, out=None, **kwargs): + args = [] + in_no = [] + for i, input_ in enumerate(inputs): + if isinstance(input_, A): + in_no.append(i) + args.append(input_.view(np.ndarray)) + else: + args.append(input_) + + outputs = out + out_no = [] + if outputs: + out_args = [] + for j, output in enumerate(outputs): + if isinstance(output, A): + out_no.append(j) + out_args.append(output.view(np.ndarray)) + else: + out_args.append(output) + kwargs['out'] = tuple(out_args) + else: + outputs = (None,) * ufunc.nout + + info = {} + if in_no: + info['inputs'] = in_no + if out_no: + info['outputs'] = out_no + + results = super().__array_ufunc__(ufunc, method, + *args, **kwargs) + if results is NotImplemented: + return NotImplemented + + if method == 'at': + if isinstance(inputs[0], A): + inputs[0].info = info + return + + if ufunc.nout == 1: + results = (results,) + + results = tuple((np.asarray(result).view(A) + if output is None else output) + for result, output in zip(results, outputs)) + if results and isinstance(results[0], A): + results[0].info = info + + return results[0] if len(results) == 1 else results + + class B: + def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): + if any(isinstance(input_, A) for input_ in inputs): + return "A!" + else: + return NotImplemented + + d = np.arange(5.) + # 1 input, 1 output + a = np.arange(5.).view(A) + b = np.sin(a) + check = np.sin(d) + assert_(np.all(check == b)) + assert_equal(b.info, {'inputs': [0]}) + b = np.sin(d, out=(a,)) + assert_(np.all(check == b)) + assert_equal(b.info, {'outputs': [0]}) + assert_(b is a) + a = np.arange(5.).view(A) + b = np.sin(a, out=a) + assert_(np.all(check == b)) + assert_equal(b.info, {'inputs': [0], 'outputs': [0]}) + + # 1 input, 2 outputs + a = np.arange(5.).view(A) + b1, b2 = np.modf(a) + assert_equal(b1.info, {'inputs': [0]}) + b1, b2 = np.modf(d, out=(None, a)) + assert_(b2 is a) + assert_equal(b1.info, {'outputs': [1]}) + a = np.arange(5.).view(A) + b = np.arange(5.).view(A) + c1, c2 = np.modf(a, out=(a, b)) + assert_(c1 is a) + assert_(c2 is b) + assert_equal(c1.info, {'inputs': [0], 'outputs': [0, 1]}) + + # 2 input, 1 output + a = np.arange(5.).view(A) + b = np.arange(5.).view(A) + c = np.add(a, b, out=a) + assert_(c is a) + assert_equal(c.info, {'inputs': [0, 1], 'outputs': [0]}) + # some tests with a non-ndarray subclass + a = np.arange(5.) + b = B() + assert_(a.__array_ufunc__(np.add, '__call__', a, b) is NotImplemented) + assert_(b.__array_ufunc__(np.add, '__call__', a, b) is NotImplemented) + assert_raises(TypeError, np.add, a, b) + a = a.view(A) + assert_(a.__array_ufunc__(np.add, '__call__', a, b) is NotImplemented) + assert_(b.__array_ufunc__(np.add, '__call__', a, b) == "A!") + assert_(np.add(a, b) == "A!") + # regression check for gh-9102 -- tests ufunc.reduce implicitly. + d = np.array([[1, 2, 3], [1, 2, 3]]) + a = d.view(A) + c = a.any() + check = d.any() + assert_equal(c, check) + assert_(c.info, {'inputs': [0]}) + c = a.max() + check = d.max() + assert_equal(c, check) + assert_(c.info, {'inputs': [0]}) + b = np.array(0).view(A) + c = a.max(out=b) + assert_equal(c, check) + assert_(c is b) + assert_(c.info, {'inputs': [0], 'outputs': [0]}) + check = a.max(axis=0) + b = np.zeros_like(check).view(A) + c = a.max(axis=0, out=b) + assert_equal(c, check) + assert_(c is b) + assert_(c.info, {'inputs': [0], 'outputs': [0]}) + # simple explicit tests of reduce, accumulate, reduceat + check = np.add.reduce(d, axis=1) + c = np.add.reduce(a, axis=1) + assert_equal(c, check) + assert_(c.info, {'inputs': [0]}) + b = np.zeros_like(c) + c = np.add.reduce(a, 1, None, b) + assert_equal(c, check) + assert_(c is b) + assert_(c.info, {'inputs': [0], 'outputs': [0]}) + check = np.add.accumulate(d, axis=0) + c = np.add.accumulate(a, axis=0) + assert_equal(c, check) + assert_(c.info, {'inputs': [0]}) + b = np.zeros_like(c) + c = np.add.accumulate(a, 0, None, b) + assert_equal(c, check) + assert_(c is b) + assert_(c.info, {'inputs': [0], 'outputs': [0]}) + indices = [0, 2, 1] + check = np.add.reduceat(d, indices, axis=1) + c = np.add.reduceat(a, indices, axis=1) + assert_equal(c, check) + assert_(c.info, {'inputs': [0]}) + b = np.zeros_like(c) + c = np.add.reduceat(a, indices, 1, None, b) + assert_equal(c, check) + assert_(c is b) + assert_(c.info, {'inputs': [0], 'outputs': [0]}) + # and a few tests for at + d = np.array([[1, 2, 3], [1, 2, 3]]) + check = d.copy() + a = d.copy().view(A) + np.add.at(check, ([0, 1], [0, 2]), 1.) + np.add.at(a, ([0, 1], [0, 2]), 1.) + assert_equal(a, check) + assert_(a.info, {'inputs': [0]}) + b = np.array(1.).view(A) + a = d.copy().view(A) + np.add.at(a, ([0, 1], [0, 2]), b) + assert_equal(a, check) + assert_(a.info, {'inputs': [0, 2]}) + + def test_array_ufunc_direct_call(self): + # This is mainly a regression test for gh-24023 (shouldn't segfault) + a = np.array(1) + with pytest.raises(TypeError): + a.__array_ufunc__() + + # No kwargs means kwargs may be NULL on the C-level + with pytest.raises(TypeError): + a.__array_ufunc__(1, 2) + + # And the same with a valid call: + res = a.__array_ufunc__(np.add, "__call__", a, a) + assert_array_equal(res, a + a) + + def test_ufunc_docstring(self): + original_doc = np.add.__doc__ + new_doc = "new docs" + expected_dict = ( + {} if IS_PYPY else {"__module__": "numpy", "__qualname__": "add"} + ) + + np.add.__doc__ = new_doc + assert np.add.__doc__ == new_doc + assert np.add.__dict__["__doc__"] == new_doc + + del np.add.__doc__ + assert np.add.__doc__ == original_doc + assert np.add.__dict__ == expected_dict + + np.add.__dict__["other"] = 1 + np.add.__dict__["__doc__"] = new_doc + assert np.add.__doc__ == new_doc + + del np.add.__dict__["__doc__"] + assert np.add.__doc__ == original_doc + del np.add.__dict__["other"] + assert np.add.__dict__ == expected_dict + + +class TestChoose: + def test_mixed(self): + c = np.array([True, True]) + a = np.array([True, True]) + assert_equal(np.choose(c, (a, 1)), np.array([1, 1])) + + +class TestRationalFunctions: + def test_lcm(self): + self._test_lcm_inner(np.int16) + self._test_lcm_inner(np.uint16) + + def test_lcm_object(self): + self._test_lcm_inner(np.object_) + + def test_gcd(self): + self._test_gcd_inner(np.int16) + self._test_lcm_inner(np.uint16) + + def test_gcd_object(self): + self._test_gcd_inner(np.object_) + + def _test_lcm_inner(self, dtype): + # basic use + a = np.array([12, 120], dtype=dtype) + b = np.array([20, 200], dtype=dtype) + assert_equal(np.lcm(a, b), [60, 600]) + + if not issubclass(dtype, np.unsignedinteger): + # negatives are ignored + a = np.array([12, -12, 12, -12], dtype=dtype) + b = np.array([20, 20, -20, -20], dtype=dtype) + assert_equal(np.lcm(a, b), [60] * 4) + + # reduce + a = np.array([3, 12, 20], dtype=dtype) + assert_equal(np.lcm.reduce([3, 12, 20]), 60) + + # broadcasting, and a test including 0 + a = np.arange(6).astype(dtype) + b = 20 + assert_equal(np.lcm(a, b), [0, 20, 20, 60, 20, 20]) + + def _test_gcd_inner(self, dtype): + # basic use + a = np.array([12, 120], dtype=dtype) + b = np.array([20, 200], dtype=dtype) + assert_equal(np.gcd(a, b), [4, 40]) + + if not issubclass(dtype, np.unsignedinteger): + # negatives are ignored + a = np.array([12, -12, 12, -12], dtype=dtype) + b = np.array([20, 20, -20, -20], dtype=dtype) + assert_equal(np.gcd(a, b), [4] * 4) + + # reduce + a = np.array([15, 25, 35], dtype=dtype) + assert_equal(np.gcd.reduce(a), 5) + + # broadcasting, and a test including 0 + a = np.arange(6).astype(dtype) + b = 20 + assert_equal(np.gcd(a, b), [20, 1, 2, 1, 4, 5]) + + def test_lcm_overflow(self): + # verify that we don't overflow when a*b does overflow + big = np.int32(np.iinfo(np.int32).max // 11) + a = 2 * big + b = 5 * big + assert_equal(np.lcm(a, b), 10 * big) + + def test_gcd_overflow(self): + for dtype in (np.int32, np.int64): + # verify that we don't overflow when taking abs(x) + # not relevant for lcm, where the result is unrepresentable anyway + a = dtype(np.iinfo(dtype).min) # negative power of two + q = -(a // 4) + assert_equal(np.gcd(a, q * 3), q) + assert_equal(np.gcd(a, -q * 3), q) + + def test_decimal(self): + from decimal import Decimal + a = np.array([1, 1, -1, -1]) * Decimal('0.20') + b = np.array([1, -1, 1, -1]) * Decimal('0.12') + + assert_equal(np.gcd(a, b), 4 * [Decimal('0.04')]) + assert_equal(np.lcm(a, b), 4 * [Decimal('0.60')]) + + def test_float(self): + # not well-defined on float due to rounding errors + assert_raises(TypeError, np.gcd, 0.3, 0.4) + assert_raises(TypeError, np.lcm, 0.3, 0.4) + + def test_huge_integers(self): + # Converting to an array first is a bit different as it means we + # have an explicit object dtype: + assert_equal(np.array(2**200), 2**200) + # Special promotion rules should ensure that this also works for + # two Python integers (even if slow). + # (We do this for comparisons, as the result is always bool and + # we also special case array comparisons with Python integers) + np.equal(2**200, 2**200) + + # But, we cannot do this when it would affect the result dtype: + with pytest.raises(OverflowError): + np.gcd(2**100, 3**100) + + # Asking for `object` explicitly is fine, though: + assert np.gcd(2**100, 3**100, dtype=object) == 1 + + # As of now, the below work, because it is using arrays (which + # will be object arrays) + a = np.array(2**100 * 3**5) + b = np.array([2**100 * 5**7, 2**50 * 3**10]) + assert_equal(np.gcd(a, b), [2**100, 2**50 * 3**5]) + assert_equal(np.lcm(a, b), [2**100 * 3**5 * 5**7, 2**100 * 3**10]) + + def test_inf_and_nan(self): + inf = np.array([np.inf], dtype=np.object_) + assert_raises(ValueError, np.gcd, inf, 1) + assert_raises(ValueError, np.gcd, 1, inf) + assert_raises(ValueError, np.gcd, np.nan, inf) + assert_raises(TypeError, np.gcd, 4, float(np.inf)) + + +class TestRoundingFunctions: + + def test_object_direct(self): + """ test direct implementation of these magic methods """ + class C: + def __floor__(self): + return 1 + + def __ceil__(self): + return 2 + + def __trunc__(self): + return 3 + + arr = np.array([C(), C()]) + assert_equal(np.floor(arr), [1, 1]) + assert_equal(np.ceil(arr), [2, 2]) + assert_equal(np.trunc(arr), [3, 3]) + + def test_object_indirect(self): + """ test implementations via __float__ """ + class C: + def __float__(self): + return -2.5 + + arr = np.array([C(), C()]) + assert_equal(np.floor(arr), [-3, -3]) + assert_equal(np.ceil(arr), [-2, -2]) + with pytest.raises(TypeError): + np.trunc(arr) # consistent with math.trunc + + def test_fraction(self): + f = Fraction(-4, 3) + assert_equal(np.floor(f), -2) + assert_equal(np.ceil(f), -1) + assert_equal(np.trunc(f), -1) + + @pytest.mark.parametrize('func', [np.floor, np.ceil, np.trunc]) + @pytest.mark.parametrize('dtype', [np.bool, np.float64, np.float32, + np.int64, np.uint32]) + def test_output_dtype(self, func, dtype): + arr = np.array([-2, 0, 4, 8]).astype(dtype) + result = func(arr) + assert_equal(arr, result) + assert result.dtype == dtype + + +class TestComplexFunctions: + funcs = [np.arcsin, np.arccos, np.arctan, np.arcsinh, np.arccosh, + np.arctanh, np.sin, np.cos, np.tan, np.exp, + np.exp2, np.log, np.sqrt, np.log10, np.log2, + np.log1p] + + def test_it(self): + for f in self.funcs: + if f is np.arccosh: + x = 1.5 + else: + x = .5 + fr = f(x) + fz = f(complex(x)) + assert_almost_equal(fz.real, fr, err_msg=f'real part {f}') + assert_almost_equal(fz.imag, 0., err_msg=f'imag part {f}') + + @pytest.mark.xfail(IS_WASM, reason="doesn't work") + def test_precisions_consistent(self): + z = 1 + 1j + for f in self.funcs: + fcf = f(np.csingle(z)) + fcd = f(np.cdouble(z)) + fcl = f(np.clongdouble(z)) + assert_almost_equal(fcf, fcd, decimal=6, err_msg=f'fch-fcd {f}') + assert_almost_equal(fcl, fcd, decimal=15, err_msg=f'fch-fcl {f}') + + @pytest.mark.xfail(IS_WASM, reason="doesn't work") + def test_branch_cuts(self): + # check branch cuts and continuity on them + _check_branch_cut(np.log, -0.5, 1j, 1, -1, True) # noqa: E221 + _check_branch_cut(np.log2, -0.5, 1j, 1, -1, True) # noqa: E221 + _check_branch_cut(np.log10, -0.5, 1j, 1, -1, True) + _check_branch_cut(np.log1p, -1.5, 1j, 1, -1, True) + _check_branch_cut(np.sqrt, -0.5, 1j, 1, -1, True) # noqa: E221 + + _check_branch_cut(np.arcsin, [ -2, 2], [1j, 1j], 1, -1, True) + _check_branch_cut(np.arccos, [ -2, 2], [1j, 1j], 1, -1, True) + _check_branch_cut(np.arctan, [0 - 2j, 2j], [1, 1], -1, 1, True) + + _check_branch_cut(np.arcsinh, [0 - 2j, 2j], [1, 1], -1, 1, True) + _check_branch_cut(np.arccosh, [ -1, 0.5], [1j, 1j], 1, -1, True) + _check_branch_cut(np.arctanh, [ -2, 2], [1j, 1j], 1, -1, True) + + # check against bogus branch cuts: assert continuity between quadrants + _check_branch_cut(np.arcsin, [0 - 2j, 2j], [ 1, 1], 1, 1) + _check_branch_cut(np.arccos, [0 - 2j, 2j], [ 1, 1], 1, 1) + _check_branch_cut(np.arctan, [ -2, 2], [1j, 1j], 1, 1) + + _check_branch_cut(np.arcsinh, [ -2, 2, 0], [1j, 1j, 1], 1, 1) + _check_branch_cut(np.arccosh, [0 - 2j, 2j, 2], [1, 1, 1j], 1, 1) + _check_branch_cut(np.arctanh, [0 - 2j, 2j, 0], [1, 1, 1j], 1, 1) + + @pytest.mark.xfail(IS_WASM, reason="doesn't work") + def test_branch_cuts_complex64(self): + # check branch cuts and continuity on them + _check_branch_cut(np.log, -0.5, 1j, 1, -1, True, np.complex64) # noqa: E221 + _check_branch_cut(np.log2, -0.5, 1j, 1, -1, True, np.complex64) # noqa: E221 + _check_branch_cut(np.log10, -0.5, 1j, 1, -1, True, np.complex64) + _check_branch_cut(np.log1p, -1.5, 1j, 1, -1, True, np.complex64) + _check_branch_cut(np.sqrt, -0.5, 1j, 1, -1, True, np.complex64) # noqa: E221 + + _check_branch_cut(np.arcsin, [ -2, 2], [1j, 1j], 1, -1, True, np.complex64) + _check_branch_cut(np.arccos, [ -2, 2], [1j, 1j], 1, -1, True, np.complex64) + _check_branch_cut(np.arctan, [0 - 2j, 2j], [1, 1], -1, 1, True, np.complex64) + + _check_branch_cut(np.arcsinh, [0 - 2j, 2j], [1, 1], -1, 1, True, np.complex64) + _check_branch_cut(np.arccosh, [ -1, 0.5], [1j, 1j], 1, -1, True, np.complex64) + _check_branch_cut(np.arctanh, [ -2, 2], [1j, 1j], 1, -1, True, np.complex64) + + # check against bogus branch cuts: assert continuity between quadrants + _check_branch_cut(np.arcsin, [0 - 2j, 2j], [ 1, 1], 1, 1, False, np.complex64) + _check_branch_cut(np.arccos, [0 - 2j, 2j], [ 1, 1], 1, 1, False, np.complex64) + _check_branch_cut(np.arctan, [ -2, 2], [1j, 1j], 1, 1, False, np.complex64) + + _check_branch_cut(np.arcsinh, [ -2, 2, 0], [1j, 1j, 1], 1, 1, False, np.complex64) + _check_branch_cut(np.arccosh, [0 - 2j, 2j, 2], [1, 1, 1j], 1, 1, False, np.complex64) + _check_branch_cut(np.arctanh, [0 - 2j, 2j, 0], [1, 1, 1j], 1, 1, False, np.complex64) + + def test_against_cmath(self): + import cmath + + points = [-1 - 1j, -1 + 1j, +1 - 1j, +1 + 1j] + name_map = {'arcsin': 'asin', 'arccos': 'acos', 'arctan': 'atan', + 'arcsinh': 'asinh', 'arccosh': 'acosh', 'arctanh': 'atanh'} + atol = 4 * np.finfo(complex).eps + for func in self.funcs: + fname = func.__name__.split('.')[-1] + cname = name_map.get(fname, fname) + try: + cfunc = getattr(cmath, cname) + except AttributeError: + continue + for p in points: + a = complex(func(np.complex128(p))) + b = cfunc(p) + assert_( + abs(a - b) < atol, + f"{fname} {p}: {a}; cmath: {b}" + ) + + @pytest.mark.xfail( + # manylinux2014 uses glibc2.17 + _glibc_older_than("2.18"), + reason="Older glibc versions are imprecise (maybe passes with SIMD?)" + ) + @pytest.mark.xfail(IS_WASM, reason="doesn't work") + @pytest.mark.parametrize('dtype', [ + np.complex64, np.complex128, np.clongdouble + ]) + def test_loss_of_precision(self, dtype): + """Check loss of precision in complex arc* functions""" + if dtype is np.clongdouble and platform.machine() != 'x86_64': + # Failures on musllinux, aarch64, s390x, ppc64le (see gh-17554) + pytest.skip('Only works reliably for x86-64 and recent glibc') + + # Check against known-good functions + + info = np.finfo(dtype) + real_dtype = dtype(0.).real.dtype + eps = info.eps + + def check(x, rtol): + x = x.astype(real_dtype) + + z = x.astype(dtype) + d = np.absolute(np.arcsinh(x) / np.arcsinh(z).real - 1) + assert_(np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(), + 'arcsinh')) + + z = (1j * x).astype(dtype) + d = np.absolute(np.arcsinh(x) / np.arcsin(z).imag - 1) + assert_(np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(), + 'arcsin')) + + z = x.astype(dtype) + d = np.absolute(np.arctanh(x) / np.arctanh(z).real - 1) + assert_(np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(), + 'arctanh')) + + z = (1j * x).astype(dtype) + d = np.absolute(np.arctanh(x) / np.arctan(z).imag - 1) + assert_(np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(), + 'arctan')) + + # The switchover was chosen as 1e-3; hence there can be up to + # ~eps/1e-3 of relative cancellation error before it + + x_series = np.logspace(-20, -3.001, 200) + x_basic = np.logspace(-2.999, 0, 10, endpoint=False) + + if dtype is np.clongdouble: + if bad_arcsinh(): + pytest.skip("Trig functions of np.clongdouble values known " + "to be inaccurate on aarch64 and PPC for some " + "compilation configurations.") + # It's not guaranteed that the system-provided arc functions + # are accurate down to a few epsilons. (Eg. on Linux 64-bit) + # So, give more leeway for long complex tests here: + check(x_series, 50.0 * eps) + else: + check(x_series, 2.1 * eps) + check(x_basic, 2.0 * eps / 1e-3) + + # Check a few points + + z = np.array([1e-5 * (1 + 1j)], dtype=dtype) + p = 9.999999999333333333e-6 + 1.000000000066666666e-5j + d = np.absolute(1 - np.arctanh(z) / p) + assert_(np.all(d < 1e-15)) + + p = 1.0000000000333333333e-5 + 9.999999999666666667e-6j + d = np.absolute(1 - np.arcsinh(z) / p) + assert_(np.all(d < 1e-15)) + + p = 9.999999999333333333e-6j + 1.000000000066666666e-5 + d = np.absolute(1 - np.arctan(z) / p) + assert_(np.all(d < 1e-15)) + + p = 1.0000000000333333333e-5j + 9.999999999666666667e-6 + d = np.absolute(1 - np.arcsin(z) / p) + assert_(np.all(d < 1e-15)) + + # Check continuity across switchover points + + def check(func, z0, d=1): + z0 = np.asarray(z0, dtype=dtype) + zp = z0 + abs(z0) * d * eps * 2 + zm = z0 - abs(z0) * d * eps * 2 + assert_(np.all(zp != zm), (zp, zm)) + + # NB: the cancellation error at the switchover is at least eps + good = (abs(func(zp) - func(zm)) < 2 * eps) + assert_(np.all(good), (func, z0[~good])) + + for func in (np.arcsinh, np.arcsinh, np.arcsin, np.arctanh, np.arctan): + pts = [rp + 1j * ip for rp in (-1e-3, 0, 1e-3) for ip in (-1e-3, 0, 1e-3) + if rp != 0 or ip != 0] + check(func, pts, 1) + check(func, pts, 1j) + check(func, pts, 1 + 1j) + + @np.errstate(all="ignore") + def test_promotion_corner_cases(self): + for func in self.funcs: + assert func(np.float16(1)).dtype == np.float16 + # Integer to low precision float promotion is a dubious choice: + assert func(np.uint8(1)).dtype == np.float16 + assert func(np.int16(1)).dtype == np.float32 + + +class TestAttributes: + def test_attributes(self): + add = ncu.add + assert_equal(add.__name__, 'add') + assert_(add.ntypes >= 18) # don't fail if types added + assert_('ii->i' in add.types) + assert_equal(add.nin, 2) + assert_equal(add.nout, 1) + assert_equal(add.identity, 0) + + def test_doc(self): + # don't bother checking the long list of kwargs, which are likely to + # change + assert_(ncu.add.__doc__.startswith( + "add(x1, x2, /, out=None, *, where=True")) + assert_(ncu.frexp.__doc__.startswith( + "frexp(x[, out1, out2], / [, out=(None, None)], *, where=True")) + + +class TestSubclass: + + def test_subclass_op(self): + + class simple(np.ndarray): + def __new__(subtype, shape): + self = np.ndarray.__new__(subtype, shape, dtype=object) + self.fill(0) + return self + + a = simple((3, 4)) + assert_equal(a + a, a) + + +class TestFrompyfunc: + + def test_identity(self): + def mul(a, b): + return a * b + + # with identity=value + mul_ufunc = np.frompyfunc(mul, nin=2, nout=1, identity=1) + assert_equal(mul_ufunc.reduce([2, 3, 4]), 24) + assert_equal(mul_ufunc.reduce(np.ones((2, 2)), axis=(0, 1)), 1) + assert_equal(mul_ufunc.reduce([]), 1) + + # with identity=None (reorderable) + mul_ufunc = np.frompyfunc(mul, nin=2, nout=1, identity=None) + assert_equal(mul_ufunc.reduce([2, 3, 4]), 24) + assert_equal(mul_ufunc.reduce(np.ones((2, 2)), axis=(0, 1)), 1) + assert_raises(ValueError, lambda: mul_ufunc.reduce([])) + + # with no identity (not reorderable) + mul_ufunc = np.frompyfunc(mul, nin=2, nout=1) + assert_equal(mul_ufunc.reduce([2, 3, 4]), 24) + assert_raises(ValueError, lambda: mul_ufunc.reduce(np.ones((2, 2)), axis=(0, 1))) + assert_raises(ValueError, lambda: mul_ufunc.reduce([])) + + +def _check_branch_cut(f, x0, dx, re_sign=1, im_sign=-1, sig_zero_ok=False, + dtype=complex): + """ + Check for a branch cut in a function. + + Assert that `x0` lies on a branch cut of function `f` and `f` is + continuous from the direction `dx`. + + Parameters + ---------- + f : func + Function to check + x0 : array-like + Point on branch cut + dx : array-like + Direction to check continuity in + re_sign, im_sign : {1, -1} + Change of sign of the real or imaginary part expected + sig_zero_ok : bool + Whether to check if the branch cut respects signed zero (if applicable) + dtype : dtype + Dtype to check (should be complex) + + """ + x0 = np.atleast_1d(x0).astype(dtype) + dx = np.atleast_1d(dx).astype(dtype) + + if np.dtype(dtype).char == 'F': + scale = np.finfo(dtype).eps * 1e2 + atol = np.float32(1e-2) + else: + scale = np.finfo(dtype).eps * 1e3 + atol = 1e-4 + + y0 = f(x0) + yp = f(x0 + dx * scale * np.absolute(x0) / np.absolute(dx)) + ym = f(x0 - dx * scale * np.absolute(x0) / np.absolute(dx)) + + assert_(np.all(np.absolute(y0.real - yp.real) < atol), (y0, yp)) + assert_(np.all(np.absolute(y0.imag - yp.imag) < atol), (y0, yp)) + assert_(np.all(np.absolute(y0.real - ym.real * re_sign) < atol), (y0, ym)) + assert_(np.all(np.absolute(y0.imag - ym.imag * im_sign) < atol), (y0, ym)) + + if sig_zero_ok: + # check that signed zeros also work as a displacement + jr = (x0.real == 0) & (dx.real != 0) + ji = (x0.imag == 0) & (dx.imag != 0) + if np.any(jr): + x = x0[jr] + x.real = ncu.NZERO + ym = f(x) + assert_(np.all(np.absolute(y0[jr].real - ym.real * re_sign) < atol), (y0[jr], ym)) + assert_(np.all(np.absolute(y0[jr].imag - ym.imag * im_sign) < atol), (y0[jr], ym)) + + if np.any(ji): + x = x0[ji] + x.imag = ncu.NZERO + ym = f(x) + assert_(np.all(np.absolute(y0[ji].real - ym.real * re_sign) < atol), (y0[ji], ym)) + assert_(np.all(np.absolute(y0[ji].imag - ym.imag * im_sign) < atol), (y0[ji], ym)) + +def test_copysign(): + assert_(np.copysign(1, -1) == -1) + with np.errstate(divide="ignore"): + assert_(1 / np.copysign(0, -1) < 0) + assert_(1 / np.copysign(0, 1) > 0) + assert_(np.signbit(np.copysign(np.nan, -1))) + assert_(not np.signbit(np.copysign(np.nan, 1))) + +def _test_nextafter(t): + one = t(1) + two = t(2) + zero = t(0) + eps = np.finfo(t).eps + assert_(np.nextafter(one, two) - one == eps) + assert_(np.nextafter(one, zero) - one < 0) + assert_(np.isnan(np.nextafter(np.nan, one))) + assert_(np.isnan(np.nextafter(one, np.nan))) + assert_(np.nextafter(one, one) == one) + +def test_nextafter(): + return _test_nextafter(np.float64) + + +def test_nextafterf(): + return _test_nextafter(np.float32) + + +@pytest.mark.skipif(np.finfo(np.double) == np.finfo(np.longdouble), + reason="long double is same as double") +@pytest.mark.xfail(condition=platform.machine().startswith("ppc64"), + reason="IBM double double") +def test_nextafterl(): + return _test_nextafter(np.longdouble) + + +def test_nextafter_0(): + for t, direction in itertools.product(np._core.sctypes['float'], (1, -1)): + # The value of tiny for double double is NaN, so we need to pass the + # assert + with suppress_warnings() as sup: + sup.filter(UserWarning) + if not np.isnan(np.finfo(t).tiny): + tiny = np.finfo(t).tiny + assert_( + 0. < direction * np.nextafter(t(0), t(direction)) < tiny) + assert_equal(np.nextafter(t(0), t(direction)) / t(2.1), direction * 0.0) + +def _test_spacing(t): + one = t(1) + eps = np.finfo(t).eps + nan = t(np.nan) + inf = t(np.inf) + with np.errstate(invalid='ignore'): + assert_equal(np.spacing(one), eps) + assert_(np.isnan(np.spacing(nan))) + assert_(np.isnan(np.spacing(inf))) + assert_(np.isnan(np.spacing(-inf))) + assert_(np.spacing(t(1e30)) != 0) + +def test_spacing(): + return _test_spacing(np.float64) + +def test_spacingf(): + return _test_spacing(np.float32) + + +@pytest.mark.skipif(np.finfo(np.double) == np.finfo(np.longdouble), + reason="long double is same as double") +@pytest.mark.xfail(condition=platform.machine().startswith("ppc64"), + reason="IBM double double") +def test_spacingl(): + return _test_spacing(np.longdouble) + +def test_spacing_gfortran(): + # Reference from this fortran file, built with gfortran 4.3.3 on linux + # 32bits: + # PROGRAM test_spacing + # INTEGER, PARAMETER :: SGL = SELECTED_REAL_KIND(p=6, r=37) + # INTEGER, PARAMETER :: DBL = SELECTED_REAL_KIND(p=13, r=200) + # + # WRITE(*,*) spacing(0.00001_DBL) + # WRITE(*,*) spacing(1.0_DBL) + # WRITE(*,*) spacing(1000._DBL) + # WRITE(*,*) spacing(10500._DBL) + # + # WRITE(*,*) spacing(0.00001_SGL) + # WRITE(*,*) spacing(1.0_SGL) + # WRITE(*,*) spacing(1000._SGL) + # WRITE(*,*) spacing(10500._SGL) + # END PROGRAM + ref = {np.float64: [1.69406589450860068E-021, + 2.22044604925031308E-016, + 1.13686837721616030E-013, + 1.81898940354585648E-012], + np.float32: [9.09494702E-13, + 1.19209290E-07, + 6.10351563E-05, + 9.76562500E-04]} + + for dt, dec_ in zip([np.float32, np.float64], (10, 20)): + x = np.array([1e-5, 1, 1000, 10500], dtype=dt) + assert_array_almost_equal(np.spacing(x), ref[dt], decimal=dec_) + +def test_nextafter_vs_spacing(): + # XXX: spacing does not handle long double yet + for t in [np.float32, np.float64]: + for _f in [1, 1e-5, 1000]: + f = t(_f) + f1 = t(_f + 1) + assert_(np.nextafter(f, f1) - f == np.spacing(f)) + +def test_pos_nan(): + """Check np.nan is a positive nan.""" + assert_(np.signbit(np.nan) == 0) + +def test_reduceat(): + """Test bug in reduceat when structured arrays are not copied.""" + db = np.dtype([('name', 'S11'), ('time', np.int64), ('value', np.float32)]) + a = np.empty([100], dtype=db) + a['name'] = 'Simple' + a['time'] = 10 + a['value'] = 100 + indx = [0, 7, 15, 25] + + h2 = [] + val1 = indx[0] + for val2 in indx[1:]: + h2.append(np.add.reduce(a['value'][val1:val2])) + val1 = val2 + h2.append(np.add.reduce(a['value'][val1:])) + h2 = np.array(h2) + + # test buffered -- this should work + h1 = np.add.reduceat(a['value'], indx) + assert_array_almost_equal(h1, h2) + + # This is when the error occurs. + # test no buffer + np.setbufsize(32) + h1 = np.add.reduceat(a['value'], indx) + np.setbufsize(ncu.UFUNC_BUFSIZE_DEFAULT) + assert_array_almost_equal(h1, h2) + +def test_reduceat_empty(): + """Reduceat should work with empty arrays""" + indices = np.array([], 'i4') + x = np.array([], 'f8') + result = np.add.reduceat(x, indices) + assert_equal(result.dtype, x.dtype) + assert_equal(result.shape, (0,)) + # Another case with a slightly different zero-sized shape + x = np.ones((5, 2)) + result = np.add.reduceat(x, [], axis=0) + assert_equal(result.dtype, x.dtype) + assert_equal(result.shape, (0, 2)) + result = np.add.reduceat(x, [], axis=1) + assert_equal(result.dtype, x.dtype) + assert_equal(result.shape, (5, 0)) + +def test_complex_nan_comparisons(): + nans = [complex(np.nan, 0), complex(0, np.nan), complex(np.nan, np.nan)] + fins = [complex(1, 0), complex(-1, 0), complex(0, 1), complex(0, -1), + complex(1, 1), complex(-1, -1), complex(0, 0)] + + with np.errstate(invalid='ignore'): + for x in nans + fins: + x = np.array([x]) + for y in nans + fins: + y = np.array([y]) + + if np.isfinite(x) and np.isfinite(y): + continue + + assert_equal(x < y, False, err_msg=f"{x!r} < {y!r}") + assert_equal(x > y, False, err_msg=f"{x!r} > {y!r}") + assert_equal(x <= y, False, err_msg=f"{x!r} <= {y!r}") + assert_equal(x >= y, False, err_msg=f"{x!r} >= {y!r}") + assert_equal(x == y, False, err_msg=f"{x!r} == {y!r}") + + +def test_rint_big_int(): + # np.rint bug for large integer values on Windows 32-bit and MKL + # https://github.com/numpy/numpy/issues/6685 + val = 4607998452777363968 + # This is exactly representable in floating point + assert_equal(val, int(float(val))) + # Rint should not change the value + assert_equal(val, np.rint(val)) + + +@pytest.mark.parametrize('ftype', [np.float32, np.float64]) +def test_memoverlap_accumulate(ftype): + # Reproduces bug https://github.com/numpy/numpy/issues/15597 + arr = np.array([0.61, 0.60, 0.77, 0.41, 0.19], dtype=ftype) + out_max = np.array([0.61, 0.61, 0.77, 0.77, 0.77], dtype=ftype) + out_min = np.array([0.61, 0.60, 0.60, 0.41, 0.19], dtype=ftype) + assert_equal(np.maximum.accumulate(arr), out_max) + assert_equal(np.minimum.accumulate(arr), out_min) + +@pytest.mark.parametrize("ufunc, dtype", [ + (ufunc, t[0]) + for ufunc in UFUNCS_BINARY_ACC + for t in ufunc.types + if t[-1] == '?' and t[0] not in 'DFGMmO' +]) +def test_memoverlap_accumulate_cmp(ufunc, dtype): + if ufunc.signature: + pytest.skip('For generic signatures only') + for size in (2, 8, 32, 64, 128, 256): + arr = np.array([0, 1, 1] * size, dtype=dtype) + acc = ufunc.accumulate(arr, dtype='?') + acc_u8 = acc.view(np.uint8) + exp = np.array(list(itertools.accumulate(arr, ufunc)), dtype=np.uint8) + assert_equal(exp, acc_u8) + +@pytest.mark.parametrize("ufunc, dtype", [ + (ufunc, t[0]) + for ufunc in UFUNCS_BINARY_ACC + for t in ufunc.types + if t[0] == t[1] and t[0] == t[-1] and t[0] not in 'DFGMmO?' +]) +def test_memoverlap_accumulate_symmetric(ufunc, dtype): + if ufunc.signature: + pytest.skip('For generic signatures only') + with np.errstate(all='ignore'): + for size in (2, 8, 32, 64, 128, 256): + arr = np.array([0, 1, 2] * size).astype(dtype) + acc = ufunc.accumulate(arr, dtype=dtype) + exp = np.array(list(itertools.accumulate(arr, ufunc)), dtype=dtype) + assert_equal(exp, acc) + +def test_signaling_nan_exceptions(): + with assert_no_warnings(): + a = np.ndarray(shape=(), dtype='float32', buffer=b'\x00\xe0\xbf\xff') + np.isnan(a) + +@pytest.mark.parametrize("arr", [ + np.arange(2), + np.matrix([0, 1]), + np.matrix([[0, 1], [2, 5]]), + ]) +def test_outer_subclass_preserve(arr): + # for gh-8661 + class foo(np.ndarray): + pass + actual = np.multiply.outer(arr.view(foo), arr.view(foo)) + assert actual.__class__.__name__ == 'foo' + +def test_outer_bad_subclass(): + class BadArr1(np.ndarray): + def __array_finalize__(self, obj): + # The outer call reshapes to 3 dims, try to do a bad reshape. + if self.ndim == 3: + self.shape = self.shape + (1,) + + class BadArr2(np.ndarray): + def __array_finalize__(self, obj): + if isinstance(obj, BadArr2): + # outer inserts 1-sized dims. In that case disturb them. + if self.shape[-1] == 1: + self.shape = self.shape[::-1] + + for cls in [BadArr1, BadArr2]: + arr = np.ones((2, 3)).view(cls) + with assert_raises(TypeError) as a: + # The first array gets reshaped (not the second one) + np.add.outer(arr, [1, 2]) + + # This actually works, since we only see the reshaping error: + arr = np.ones((2, 3)).view(cls) + assert type(np.add.outer([1, 2], arr)) is cls + +def test_outer_exceeds_maxdims(): + deep = np.ones((1,) * 33) + with assert_raises(ValueError): + np.add.outer(deep, deep) + +def test_bad_legacy_ufunc_silent_errors(): + # legacy ufuncs can't report errors and NumPy can't check if the GIL + # is released. So NumPy has to check after the GIL is released just to + # cover all bases. `np.power` uses/used to use this. + arr = np.arange(3).astype(np.float64) + + with pytest.raises(RuntimeError, match=r"How unexpected :\)!"): + ncu_tests.always_error(arr, arr) + + with pytest.raises(RuntimeError, match=r"How unexpected :\)!"): + # not contiguous means the fast-path cannot be taken + non_contig = arr.repeat(20).reshape(-1, 6)[:, ::2] + ncu_tests.always_error(non_contig, arr) + + with pytest.raises(RuntimeError, match=r"How unexpected :\)!"): + ncu_tests.always_error.outer(arr, arr) + + with pytest.raises(RuntimeError, match=r"How unexpected :\)!"): + ncu_tests.always_error.reduce(arr) + + with pytest.raises(RuntimeError, match=r"How unexpected :\)!"): + ncu_tests.always_error.reduceat(arr, [0, 1]) + + with pytest.raises(RuntimeError, match=r"How unexpected :\)!"): + ncu_tests.always_error.accumulate(arr) + + with pytest.raises(RuntimeError, match=r"How unexpected :\)!"): + ncu_tests.always_error.at(arr, [0, 1, 2], arr) + + +@pytest.mark.parametrize('x1', [np.arange(3.0), [0.0, 1.0, 2.0]]) +def test_bad_legacy_gufunc_silent_errors(x1): + # Verify that an exception raised in a gufunc loop propagates correctly. + # The signature of always_error_gufunc is '(i),()->()'. + with pytest.raises(RuntimeError, match=r"How unexpected :\)!"): + ncu_tests.always_error_gufunc(x1, 0.0) + + +class TestAddDocstring: + @pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") + @pytest.mark.skipif(IS_PYPY, reason="PyPy does not modify tp_doc") + def test_add_same_docstring(self): + # test for attributes (which are C-level defined) + ncu.add_docstring(np.ndarray.flat, np.ndarray.flat.__doc__) + + # And typical functions: + def func(): + """docstring""" + return + + ncu.add_docstring(func, func.__doc__) + + @pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") + def test_different_docstring_fails(self): + # test for attributes (which are C-level defined) + with assert_raises(RuntimeError): + ncu.add_docstring(np.ndarray.flat, "different docstring") + + # And typical functions: + def func(): + """docstring""" + return + + with assert_raises(RuntimeError): + ncu.add_docstring(func, "different docstring") + + +class TestAdd_newdoc_ufunc: + @pytest.mark.filterwarnings("ignore:_add_newdoc_ufunc:DeprecationWarning") + def test_ufunc_arg(self): + assert_raises(TypeError, ncu._add_newdoc_ufunc, 2, "blah") + assert_raises(ValueError, ncu._add_newdoc_ufunc, np.add, "blah") + + @pytest.mark.filterwarnings("ignore:_add_newdoc_ufunc:DeprecationWarning") + def test_string_arg(self): + assert_raises(TypeError, ncu._add_newdoc_ufunc, np.add, 3) diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_umath_accuracy.py b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_umath_accuracy.py new file mode 100644 index 0000000..5707e92 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_umath_accuracy.py @@ -0,0 +1,124 @@ +import os +import sys +from ctypes import POINTER, c_double, c_float, c_int, c_longlong, cast, pointer +from os import path + +import pytest +from numpy._core._multiarray_umath import __cpu_features__ + +import numpy as np +from numpy.testing import assert_array_max_ulp +from numpy.testing._private.utils import _glibc_older_than + +UNARY_UFUNCS = [obj for obj in np._core.umath.__dict__.values() if + isinstance(obj, np.ufunc)] +UNARY_OBJECT_UFUNCS = [uf for uf in UNARY_UFUNCS if "O->O" in uf.types] + +# Remove functions that do not support `floats` +UNARY_OBJECT_UFUNCS.remove(np.invert) +UNARY_OBJECT_UFUNCS.remove(np.bitwise_count) + +IS_AVX = __cpu_features__.get('AVX512F', False) or \ + (__cpu_features__.get('FMA3', False) and __cpu_features__.get('AVX2', False)) + +IS_AVX512FP16 = __cpu_features__.get('AVX512FP16', False) + +# only run on linux with AVX, also avoid old glibc (numpy/numpy#20448). +runtest = (sys.platform.startswith('linux') + and IS_AVX and not _glibc_older_than("2.17")) +platform_skip = pytest.mark.skipif(not runtest, + reason="avoid testing inconsistent platform " + "library implementations") + +# convert string to hex function taken from: +# https://stackoverflow.com/questions/1592158/convert-hex-to-float # +def convert(s, datatype="np.float32"): + i = int(s, 16) # convert from hex to a Python int + if (datatype == "np.float64"): + cp = pointer(c_longlong(i)) # make this into a c long long integer + fp = cast(cp, POINTER(c_double)) # cast the int pointer to a double pointer + else: + cp = pointer(c_int(i)) # make this into a c integer + fp = cast(cp, POINTER(c_float)) # cast the int pointer to a float pointer + + return fp.contents.value # dereference the pointer, get the float + + +str_to_float = np.vectorize(convert) + +class TestAccuracy: + @platform_skip + def test_validate_transcendentals(self): + with np.errstate(all='ignore'): + data_dir = path.join(path.dirname(__file__), 'data') + files = os.listdir(data_dir) + files = list(filter(lambda f: f.endswith('.csv'), files)) + for filename in files: + filepath = path.join(data_dir, filename) + with open(filepath) as fid: + file_without_comments = ( + r for r in fid if r[0] not in ('$', '#') + ) + data = np.genfromtxt(file_without_comments, + dtype=('|S39', '|S39', '|S39', int), + names=('type', 'input', 'output', 'ulperr'), + delimiter=',', + skip_header=1) + npname = path.splitext(filename)[0].split('-')[3] + npfunc = getattr(np, npname) + for datatype in np.unique(data['type']): + data_subset = data[data['type'] == datatype] + inval = np.array(str_to_float(data_subset['input'].astype(str), data_subset['type'].astype(str)), dtype=eval(datatype)) + outval = np.array(str_to_float(data_subset['output'].astype(str), data_subset['type'].astype(str)), dtype=eval(datatype)) + perm = np.random.permutation(len(inval)) + inval = inval[perm] + outval = outval[perm] + maxulperr = data_subset['ulperr'].max() + assert_array_max_ulp(npfunc(inval), outval, maxulperr) + + @pytest.mark.skipif(IS_AVX512FP16, + reason="SVML FP16 have slightly higher ULP errors") + @pytest.mark.parametrize("ufunc", UNARY_OBJECT_UFUNCS) + def test_validate_fp16_transcendentals(self, ufunc): + with np.errstate(all='ignore'): + arr = np.arange(65536, dtype=np.int16) + datafp16 = np.frombuffer(arr.tobytes(), dtype=np.float16) + datafp32 = datafp16.astype(np.float32) + assert_array_max_ulp(ufunc(datafp16), ufunc(datafp32), + maxulp=1, dtype=np.float16) + + @pytest.mark.skipif(not IS_AVX512FP16, + reason="lower ULP only apply for SVML FP16") + def test_validate_svml_fp16(self): + max_ulp_err = { + "arccos": 2.54, + "arccosh": 2.09, + "arcsin": 3.06, + "arcsinh": 1.51, + "arctan": 2.61, + "arctanh": 1.88, + "cbrt": 1.57, + "cos": 1.43, + "cosh": 1.33, + "exp2": 1.33, + "exp": 1.27, + "expm1": 0.53, + "log": 1.80, + "log10": 1.27, + "log1p": 1.88, + "log2": 1.80, + "sin": 1.88, + "sinh": 2.05, + "tan": 2.26, + "tanh": 3.00, + } + + with np.errstate(all='ignore'): + arr = np.arange(65536, dtype=np.int16) + datafp16 = np.frombuffer(arr.tobytes(), dtype=np.float16) + datafp32 = datafp16.astype(np.float32) + for func in max_ulp_err: + ufunc = getattr(np, func) + ulp = np.ceil(max_ulp_err[func]) + assert_array_max_ulp(ufunc(datafp16), ufunc(datafp32), + maxulp=ulp, dtype=np.float16) diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_umath_complex.py b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_umath_complex.py new file mode 100644 index 0000000..a97af47 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_core/tests/test_umath_complex.py @@ -0,0 +1,626 @@ +import platform +import sys + +# import the c-extension module directly since _arg is not exported via umath +import numpy._core._multiarray_umath as ncu +import pytest + +import numpy as np +from numpy.testing import ( + assert_almost_equal, + assert_array_equal, + assert_array_max_ulp, + assert_equal, + assert_raises, +) + +# TODO: branch cuts (use Pauli code) +# TODO: conj 'symmetry' +# TODO: FPU exceptions + +# At least on Windows the results of many complex functions are not conforming +# to the C99 standard. See ticket 1574. +# Ditto for Solaris (ticket 1642) and OS X on PowerPC. +# FIXME: this will probably change when we require full C99 compatibility +with np.errstate(all='ignore'): + functions_seem_flaky = ((np.exp(complex(np.inf, 0)).imag != 0) + or (np.log(complex(ncu.NZERO, 0)).imag != np.pi)) +# TODO: replace with a check on whether platform-provided C99 funcs are used +xfail_complex_tests = (not sys.platform.startswith('linux') or functions_seem_flaky) + +# TODO This can be xfail when the generator functions are got rid of. +platform_skip = pytest.mark.skipif(xfail_complex_tests, + reason="Inadequate C99 complex support") + + +class TestCexp: + def test_simple(self): + check = check_complex_value + f = np.exp + + check(f, 1, 0, np.exp(1), 0, False) + check(f, 0, 1, np.cos(1), np.sin(1), False) + + ref = np.exp(1) * complex(np.cos(1), np.sin(1)) + check(f, 1, 1, ref.real, ref.imag, False) + + @platform_skip + def test_special_values(self): + # C99: Section G 6.3.1 + + check = check_complex_value + f = np.exp + + # cexp(+-0 + 0i) is 1 + 0i + check(f, ncu.PZERO, 0, 1, 0, False) + check(f, ncu.NZERO, 0, 1, 0, False) + + # cexp(x + infi) is nan + nani for finite x and raises 'invalid' FPU + # exception + check(f, 1, np.inf, np.nan, np.nan) + check(f, -1, np.inf, np.nan, np.nan) + check(f, 0, np.inf, np.nan, np.nan) + + # cexp(inf + 0i) is inf + 0i + check(f, np.inf, 0, np.inf, 0) + + # cexp(-inf + yi) is +0 * (cos(y) + i sin(y)) for finite y + check(f, -np.inf, 1, ncu.PZERO, ncu.PZERO) + check(f, -np.inf, 0.75 * np.pi, ncu.NZERO, ncu.PZERO) + + # cexp(inf + yi) is +inf * (cos(y) + i sin(y)) for finite y + check(f, np.inf, 1, np.inf, np.inf) + check(f, np.inf, 0.75 * np.pi, -np.inf, np.inf) + + # cexp(-inf + inf i) is +-0 +- 0i (signs unspecified) + def _check_ninf_inf(dummy): + msgform = "cexp(-inf, inf) is (%f, %f), expected (+-0, +-0)" + with np.errstate(invalid='ignore'): + z = f(np.array(complex(-np.inf, np.inf))) + if z.real != 0 or z.imag != 0: + raise AssertionError(msgform % (z.real, z.imag)) + + _check_ninf_inf(None) + + # cexp(inf + inf i) is +-inf + NaNi and raised invalid FPU ex. + def _check_inf_inf(dummy): + msgform = "cexp(inf, inf) is (%f, %f), expected (+-inf, nan)" + with np.errstate(invalid='ignore'): + z = f(np.array(complex(np.inf, np.inf))) + if not np.isinf(z.real) or not np.isnan(z.imag): + raise AssertionError(msgform % (z.real, z.imag)) + + _check_inf_inf(None) + + # cexp(-inf + nan i) is +-0 +- 0i + def _check_ninf_nan(dummy): + msgform = "cexp(-inf, nan) is (%f, %f), expected (+-0, +-0)" + with np.errstate(invalid='ignore'): + z = f(np.array(complex(-np.inf, np.nan))) + if z.real != 0 or z.imag != 0: + raise AssertionError(msgform % (z.real, z.imag)) + + _check_ninf_nan(None) + + # cexp(inf + nan i) is +-inf + nan + def _check_inf_nan(dummy): + msgform = "cexp(-inf, nan) is (%f, %f), expected (+-inf, nan)" + with np.errstate(invalid='ignore'): + z = f(np.array(complex(np.inf, np.nan))) + if not np.isinf(z.real) or not np.isnan(z.imag): + raise AssertionError(msgform % (z.real, z.imag)) + + _check_inf_nan(None) + + # cexp(nan + yi) is nan + nani for y != 0 (optional: raises invalid FPU + # ex) + check(f, np.nan, 1, np.nan, np.nan) + check(f, np.nan, -1, np.nan, np.nan) + + check(f, np.nan, np.inf, np.nan, np.nan) + check(f, np.nan, -np.inf, np.nan, np.nan) + + # cexp(nan + nani) is nan + nani + check(f, np.nan, np.nan, np.nan, np.nan) + + # TODO This can be xfail when the generator functions are got rid of. + @pytest.mark.skip(reason="cexp(nan + 0I) is wrong on most platforms") + def test_special_values2(self): + # XXX: most implementations get it wrong here (including glibc <= 2.10) + # cexp(nan + 0i) is nan + 0i + check = check_complex_value + f = np.exp + + check(f, np.nan, 0, np.nan, 0) + +class TestClog: + def test_simple(self): + x = np.array([1 + 0j, 1 + 2j]) + y_r = np.log(np.abs(x)) + 1j * np.angle(x) + y = np.log(x) + assert_almost_equal(y, y_r) + + @platform_skip + @pytest.mark.skipif(platform.machine() == "armv5tel", reason="See gh-413.") + def test_special_values(self): + xl = [] + yl = [] + + # From C99 std (Sec 6.3.2) + # XXX: check exceptions raised + # --- raise for invalid fails. + + # clog(-0 + i0) returns -inf + i pi and raises the 'divide-by-zero' + # floating-point exception. + with np.errstate(divide='raise'): + x = np.array([ncu.NZERO], dtype=complex) + y = complex(-np.inf, np.pi) + assert_raises(FloatingPointError, np.log, x) + with np.errstate(divide='ignore'): + assert_almost_equal(np.log(x), y) + + xl.append(x) + yl.append(y) + + # clog(+0 + i0) returns -inf + i0 and raises the 'divide-by-zero' + # floating-point exception. + with np.errstate(divide='raise'): + x = np.array([0], dtype=complex) + y = complex(-np.inf, 0) + assert_raises(FloatingPointError, np.log, x) + with np.errstate(divide='ignore'): + assert_almost_equal(np.log(x), y) + + xl.append(x) + yl.append(y) + + # clog(x + i inf returns +inf + i pi /2, for finite x. + x = np.array([complex(1, np.inf)], dtype=complex) + y = complex(np.inf, 0.5 * np.pi) + assert_almost_equal(np.log(x), y) + xl.append(x) + yl.append(y) + + x = np.array([complex(-1, np.inf)], dtype=complex) + assert_almost_equal(np.log(x), y) + xl.append(x) + yl.append(y) + + # clog(x + iNaN) returns NaN + iNaN and optionally raises the + # 'invalid' floating- point exception, for finite x. + with np.errstate(invalid='raise'): + x = np.array([complex(1., np.nan)], dtype=complex) + y = complex(np.nan, np.nan) + #assert_raises(FloatingPointError, np.log, x) + with np.errstate(invalid='ignore'): + assert_almost_equal(np.log(x), y) + + xl.append(x) + yl.append(y) + + with np.errstate(invalid='raise'): + x = np.array([np.inf + 1j * np.nan], dtype=complex) + #assert_raises(FloatingPointError, np.log, x) + with np.errstate(invalid='ignore'): + assert_almost_equal(np.log(x), y) + + xl.append(x) + yl.append(y) + + # clog(- inf + iy) returns +inf + ipi , for finite positive-signed y. + x = np.array([-np.inf + 1j], dtype=complex) + y = complex(np.inf, np.pi) + assert_almost_equal(np.log(x), y) + xl.append(x) + yl.append(y) + + # clog(+ inf + iy) returns +inf + i0, for finite positive-signed y. + x = np.array([np.inf + 1j], dtype=complex) + y = complex(np.inf, 0) + assert_almost_equal(np.log(x), y) + xl.append(x) + yl.append(y) + + # clog(- inf + i inf) returns +inf + i3pi /4. + x = np.array([complex(-np.inf, np.inf)], dtype=complex) + y = complex(np.inf, 0.75 * np.pi) + assert_almost_equal(np.log(x), y) + xl.append(x) + yl.append(y) + + # clog(+ inf + i inf) returns +inf + ipi /4. + x = np.array([complex(np.inf, np.inf)], dtype=complex) + y = complex(np.inf, 0.25 * np.pi) + assert_almost_equal(np.log(x), y) + xl.append(x) + yl.append(y) + + # clog(+/- inf + iNaN) returns +inf + iNaN. + x = np.array([complex(np.inf, np.nan)], dtype=complex) + y = complex(np.inf, np.nan) + assert_almost_equal(np.log(x), y) + xl.append(x) + yl.append(y) + + x = np.array([complex(-np.inf, np.nan)], dtype=complex) + assert_almost_equal(np.log(x), y) + xl.append(x) + yl.append(y) + + # clog(NaN + iy) returns NaN + iNaN and optionally raises the + # 'invalid' floating-point exception, for finite y. + x = np.array([complex(np.nan, 1)], dtype=complex) + y = complex(np.nan, np.nan) + assert_almost_equal(np.log(x), y) + xl.append(x) + yl.append(y) + + # clog(NaN + i inf) returns +inf + iNaN. + x = np.array([complex(np.nan, np.inf)], dtype=complex) + y = complex(np.inf, np.nan) + assert_almost_equal(np.log(x), y) + xl.append(x) + yl.append(y) + + # clog(NaN + iNaN) returns NaN + iNaN. + x = np.array([complex(np.nan, np.nan)], dtype=complex) + y = complex(np.nan, np.nan) + assert_almost_equal(np.log(x), y) + xl.append(x) + yl.append(y) + + # clog(conj(z)) = conj(clog(z)). + xa = np.array(xl, dtype=complex) + ya = np.array(yl, dtype=complex) + with np.errstate(divide='ignore'): + for i in range(len(xa)): + assert_almost_equal(np.log(xa[i].conj()), ya[i].conj()) + + +class TestCsqrt: + + def test_simple(self): + # sqrt(1) + check_complex_value(np.sqrt, 1, 0, 1, 0) + + # sqrt(1i) + rres = 0.5 * np.sqrt(2) + ires = rres + check_complex_value(np.sqrt, 0, 1, rres, ires, False) + + # sqrt(-1) + check_complex_value(np.sqrt, -1, 0, 0, 1) + + def test_simple_conjugate(self): + ref = np.conj(np.sqrt(complex(1, 1))) + + def f(z): + return np.sqrt(np.conj(z)) + + check_complex_value(f, 1, 1, ref.real, ref.imag, False) + + #def test_branch_cut(self): + # _check_branch_cut(f, -1, 0, 1, -1) + + @platform_skip + def test_special_values(self): + # C99: Sec G 6.4.2 + + check = check_complex_value + f = np.sqrt + + # csqrt(+-0 + 0i) is 0 + 0i + check(f, ncu.PZERO, 0, 0, 0) + check(f, ncu.NZERO, 0, 0, 0) + + # csqrt(x + infi) is inf + infi for any x (including NaN) + check(f, 1, np.inf, np.inf, np.inf) + check(f, -1, np.inf, np.inf, np.inf) + + check(f, ncu.PZERO, np.inf, np.inf, np.inf) + check(f, ncu.NZERO, np.inf, np.inf, np.inf) + check(f, np.inf, np.inf, np.inf, np.inf) + check(f, -np.inf, np.inf, np.inf, np.inf) # noqa: E221 + check(f, -np.nan, np.inf, np.inf, np.inf) # noqa: E221 + + # csqrt(x + nani) is nan + nani for any finite x + check(f, 1, np.nan, np.nan, np.nan) + check(f, -1, np.nan, np.nan, np.nan) + check(f, 0, np.nan, np.nan, np.nan) + + # csqrt(-inf + yi) is +0 + infi for any finite y > 0 + check(f, -np.inf, 1, ncu.PZERO, np.inf) + + # csqrt(inf + yi) is +inf + 0i for any finite y > 0 + check(f, np.inf, 1, np.inf, ncu.PZERO) + + # csqrt(-inf + nani) is nan +- infi (both +i infi are valid) + def _check_ninf_nan(dummy): + msgform = "csqrt(-inf, nan) is (%f, %f), expected (nan, +-inf)" + z = np.sqrt(np.array(complex(-np.inf, np.nan))) + # FIXME: ugly workaround for isinf bug. + with np.errstate(invalid='ignore'): + if not (np.isnan(z.real) and np.isinf(z.imag)): + raise AssertionError(msgform % (z.real, z.imag)) + + _check_ninf_nan(None) + + # csqrt(+inf + nani) is inf + nani + check(f, np.inf, np.nan, np.inf, np.nan) + + # csqrt(nan + yi) is nan + nani for any finite y (infinite handled in x + # + nani) + check(f, np.nan, 0, np.nan, np.nan) + check(f, np.nan, 1, np.nan, np.nan) + check(f, np.nan, np.nan, np.nan, np.nan) + + # XXX: check for conj(csqrt(z)) == csqrt(conj(z)) (need to fix branch + # cuts first) + +class TestCpow: + def setup_method(self): + self.olderr = np.seterr(invalid='ignore') + + def teardown_method(self): + np.seterr(**self.olderr) + + def test_simple(self): + x = np.array([1 + 1j, 0 + 2j, 1 + 2j, np.inf, np.nan]) + y_r = x ** 2 + y = np.power(x, 2) + assert_almost_equal(y, y_r) + + def test_scalar(self): + x = np.array([1, 1j, 2, 2.5 + .37j, np.inf, np.nan]) + y = np.array([1, 1j, -0.5 + 1.5j, -0.5 + 1.5j, 2, 3]) + lx = list(range(len(x))) + + # Hardcode the expected `builtins.complex` values, + # as complex exponentiation is broken as of bpo-44698 + p_r = [ + 1 + 0j, + 0.20787957635076193 + 0j, + 0.35812203996480685 + 0.6097119028618724j, + 0.12659112128185032 + 0.48847676699581527j, + complex(np.inf, np.nan), + complex(np.nan, np.nan), + ] + + n_r = [x[i] ** y[i] for i in lx] + for i in lx: + assert_almost_equal(n_r[i], p_r[i], err_msg='Loop %d\n' % i) + + def test_array(self): + x = np.array([1, 1j, 2, 2.5 + .37j, np.inf, np.nan]) + y = np.array([1, 1j, -0.5 + 1.5j, -0.5 + 1.5j, 2, 3]) + lx = list(range(len(x))) + + # Hardcode the expected `builtins.complex` values, + # as complex exponentiation is broken as of bpo-44698 + p_r = [ + 1 + 0j, + 0.20787957635076193 + 0j, + 0.35812203996480685 + 0.6097119028618724j, + 0.12659112128185032 + 0.48847676699581527j, + complex(np.inf, np.nan), + complex(np.nan, np.nan), + ] + + n_r = x ** y + for i in lx: + assert_almost_equal(n_r[i], p_r[i], err_msg='Loop %d\n' % i) + +class TestCabs: + def setup_method(self): + self.olderr = np.seterr(invalid='ignore') + + def teardown_method(self): + np.seterr(**self.olderr) + + def test_simple(self): + x = np.array([1 + 1j, 0 + 2j, 1 + 2j, np.inf, np.nan]) + y_r = np.array([np.sqrt(2.), 2, np.sqrt(5), np.inf, np.nan]) + y = np.abs(x) + assert_almost_equal(y, y_r) + + def test_fabs(self): + # Test that np.abs(x +- 0j) == np.abs(x) (as mandated by C99 for cabs) + x = np.array([1 + 0j], dtype=complex) + assert_array_equal(np.abs(x), np.real(x)) + + x = np.array([complex(1, ncu.NZERO)], dtype=complex) + assert_array_equal(np.abs(x), np.real(x)) + + x = np.array([complex(np.inf, ncu.NZERO)], dtype=complex) + assert_array_equal(np.abs(x), np.real(x)) + + x = np.array([complex(np.nan, ncu.NZERO)], dtype=complex) + assert_array_equal(np.abs(x), np.real(x)) + + def test_cabs_inf_nan(self): + x, y = [], [] + + # cabs(+-nan + nani) returns nan + x.append(np.nan) + y.append(np.nan) + check_real_value(np.abs, np.nan, np.nan, np.nan) + + x.append(np.nan) + y.append(-np.nan) + check_real_value(np.abs, -np.nan, np.nan, np.nan) + + # According to C99 standard, if exactly one of the real/part is inf and + # the other nan, then cabs should return inf + x.append(np.inf) + y.append(np.nan) + check_real_value(np.abs, np.inf, np.nan, np.inf) + + x.append(-np.inf) + y.append(np.nan) + check_real_value(np.abs, -np.inf, np.nan, np.inf) + + # cabs(conj(z)) == conj(cabs(z)) (= cabs(z)) + def f(a): + return np.abs(np.conj(a)) + + def g(a, b): + return np.abs(complex(a, b)) + + xa = np.array(x, dtype=complex) + assert len(xa) == len(x) == len(y) + for xi, yi in zip(x, y): + ref = g(xi, yi) + check_real_value(f, xi, yi, ref) + +class TestCarg: + def test_simple(self): + check_real_value(ncu._arg, 1, 0, 0, False) + check_real_value(ncu._arg, 0, 1, 0.5 * np.pi, False) + + check_real_value(ncu._arg, 1, 1, 0.25 * np.pi, False) + check_real_value(ncu._arg, ncu.PZERO, ncu.PZERO, ncu.PZERO) + + # TODO This can be xfail when the generator functions are got rid of. + @pytest.mark.skip( + reason="Complex arithmetic with signed zero fails on most platforms") + def test_zero(self): + # carg(-0 +- 0i) returns +- pi + check_real_value(ncu._arg, ncu.NZERO, ncu.PZERO, np.pi, False) + check_real_value(ncu._arg, ncu.NZERO, ncu.NZERO, -np.pi, False) + + # carg(+0 +- 0i) returns +- 0 + check_real_value(ncu._arg, ncu.PZERO, ncu.PZERO, ncu.PZERO) + check_real_value(ncu._arg, ncu.PZERO, ncu.NZERO, ncu.NZERO) + + # carg(x +- 0i) returns +- 0 for x > 0 + check_real_value(ncu._arg, 1, ncu.PZERO, ncu.PZERO, False) + check_real_value(ncu._arg, 1, ncu.NZERO, ncu.NZERO, False) + + # carg(x +- 0i) returns +- pi for x < 0 + check_real_value(ncu._arg, -1, ncu.PZERO, np.pi, False) + check_real_value(ncu._arg, -1, ncu.NZERO, -np.pi, False) + + # carg(+- 0 + yi) returns pi/2 for y > 0 + check_real_value(ncu._arg, ncu.PZERO, 1, 0.5 * np.pi, False) + check_real_value(ncu._arg, ncu.NZERO, 1, 0.5 * np.pi, False) + + # carg(+- 0 + yi) returns -pi/2 for y < 0 + check_real_value(ncu._arg, ncu.PZERO, -1, 0.5 * np.pi, False) + check_real_value(ncu._arg, ncu.NZERO, -1, -0.5 * np.pi, False) + + #def test_branch_cuts(self): + # _check_branch_cut(ncu._arg, -1, 1j, -1, 1) + + def test_special_values(self): + # carg(-np.inf +- yi) returns +-pi for finite y > 0 + check_real_value(ncu._arg, -np.inf, 1, np.pi, False) + check_real_value(ncu._arg, -np.inf, -1, -np.pi, False) + + # carg(np.inf +- yi) returns +-0 for finite y > 0 + check_real_value(ncu._arg, np.inf, 1, ncu.PZERO, False) + check_real_value(ncu._arg, np.inf, -1, ncu.NZERO, False) + + # carg(x +- np.infi) returns +-pi/2 for finite x + check_real_value(ncu._arg, 1, np.inf, 0.5 * np.pi, False) + check_real_value(ncu._arg, 1, -np.inf, -0.5 * np.pi, False) + + # carg(-np.inf +- np.infi) returns +-3pi/4 + check_real_value(ncu._arg, -np.inf, np.inf, 0.75 * np.pi, False) + check_real_value(ncu._arg, -np.inf, -np.inf, -0.75 * np.pi, False) + + # carg(np.inf +- np.infi) returns +-pi/4 + check_real_value(ncu._arg, np.inf, np.inf, 0.25 * np.pi, False) + check_real_value(ncu._arg, np.inf, -np.inf, -0.25 * np.pi, False) + + # carg(x + yi) returns np.nan if x or y is nan + check_real_value(ncu._arg, np.nan, 0, np.nan, False) + check_real_value(ncu._arg, 0, np.nan, np.nan, False) + + check_real_value(ncu._arg, np.nan, np.inf, np.nan, False) + check_real_value(ncu._arg, np.inf, np.nan, np.nan, False) + + +def check_real_value(f, x1, y1, x, exact=True): + z1 = np.array([complex(x1, y1)]) + if exact: + assert_equal(f(z1), x) + else: + assert_almost_equal(f(z1), x) + + +def check_complex_value(f, x1, y1, x2, y2, exact=True): + z1 = np.array([complex(x1, y1)]) + z2 = complex(x2, y2) + with np.errstate(invalid='ignore'): + if exact: + assert_equal(f(z1), z2) + else: + assert_almost_equal(f(z1), z2) + +class TestSpecialComplexAVX: + @pytest.mark.parametrize("stride", [-4, -2, -1, 1, 2, 4]) + @pytest.mark.parametrize("astype", [np.complex64, np.complex128]) + def test_array(self, stride, astype): + arr = np.array([complex(np.nan, np.nan), + complex(np.nan, np.inf), + complex(np.inf, np.nan), + complex(np.inf, np.inf), + complex(0., np.inf), + complex(np.inf, 0.), + complex(0., 0.), + complex(0., np.nan), + complex(np.nan, 0.)], dtype=astype) + abs_true = np.array([np.nan, np.inf, np.inf, np.inf, np.inf, np.inf, 0., np.nan, np.nan], dtype=arr.real.dtype) + sq_true = np.array([complex(np.nan, np.nan), + complex(np.nan, np.nan), + complex(np.nan, np.nan), + complex(np.nan, np.inf), + complex(-np.inf, np.nan), + complex(np.inf, np.nan), + complex(0., 0.), + complex(np.nan, np.nan), + complex(np.nan, np.nan)], dtype=astype) + with np.errstate(invalid='ignore'): + assert_equal(np.abs(arr[::stride]), abs_true[::stride]) + assert_equal(np.square(arr[::stride]), sq_true[::stride]) + +class TestComplexAbsoluteAVX: + @pytest.mark.parametrize("arraysize", [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 18, 19]) + @pytest.mark.parametrize("stride", [-4, -3, -2, -1, 1, 2, 3, 4]) + @pytest.mark.parametrize("astype", [np.complex64, np.complex128]) + # test to ensure masking and strides work as intended in the AVX implementation + def test_array(self, arraysize, stride, astype): + arr = np.ones(arraysize, dtype=astype) + abs_true = np.ones(arraysize, dtype=arr.real.dtype) + assert_equal(np.abs(arr[::stride]), abs_true[::stride]) + +# Testcase taken as is from https://github.com/numpy/numpy/issues/16660 +class TestComplexAbsoluteMixedDTypes: + @pytest.mark.parametrize("stride", [-4, -3, -2, -1, 1, 2, 3, 4]) + @pytest.mark.parametrize("astype", [np.complex64, np.complex128]) + @pytest.mark.parametrize("func", ['abs', 'square', 'conjugate']) + def test_array(self, stride, astype, func): + dtype = [('template_id', 'U') + uni_arr2 = str_arr.astype(').itemsize` instead.", + "byte_bounds": "Now it's available under `np.lib.array_utils.byte_bounds`", + "compare_chararrays": + "It's still available as `np.char.compare_chararrays`.", + "format_parser": "It's still available as `np.rec.format_parser`.", + "alltrue": "Use `np.all` instead.", + "sometrue": "Use `np.any` instead.", +} diff --git a/.venv/lib/python3.12/site-packages/numpy/_expired_attrs_2_0.pyi b/.venv/lib/python3.12/site-packages/numpy/_expired_attrs_2_0.pyi new file mode 100644 index 0000000..1452468 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_expired_attrs_2_0.pyi @@ -0,0 +1,62 @@ +from typing import Final, TypedDict, final, type_check_only + +@final +@type_check_only +class _ExpiredAttributesType(TypedDict): + geterrobj: str + seterrobj: str + cast: str + source: str + lookfor: str + who: str + fastCopyAndTranspose: str + set_numeric_ops: str + NINF: str + PINF: str + NZERO: str + PZERO: str + add_newdoc: str + add_docstring: str + add_newdoc_ufunc: str + safe_eval: str + float_: str + complex_: str + longfloat: str + singlecomplex: str + cfloat: str + longcomplex: str + clongfloat: str + string_: str + unicode_: str + Inf: str + Infinity: str + NaN: str + infty: str + issctype: str + maximum_sctype: str + obj2sctype: str + sctype2char: str + sctypes: str + issubsctype: str + set_string_function: str + asfarray: str + issubclass_: str + tracemalloc_domain: str + mat: str + recfromcsv: str + recfromtxt: str + deprecate: str + deprecate_with_doc: str + disp: str + find_common_type: str + round_: str + get_array_wrap: str + DataSource: str + nbytes: str + byte_bounds: str + compare_chararrays: str + format_parser: str + alltrue: str + sometrue: str + +__expired_attributes__: Final[_ExpiredAttributesType] = ... diff --git a/.venv/lib/python3.12/site-packages/numpy/_globals.py b/.venv/lib/python3.12/site-packages/numpy/_globals.py new file mode 100644 index 0000000..5f838ba --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_globals.py @@ -0,0 +1,96 @@ +""" +Module defining global singleton classes. + +This module raises a RuntimeError if an attempt to reload it is made. In that +way the identities of the classes defined here are fixed and will remain so +even if numpy itself is reloaded. In particular, a function like the following +will still work correctly after numpy is reloaded:: + + def foo(arg=np._NoValue): + if arg is np._NoValue: + ... + +That was not the case when the singleton classes were defined in the numpy +``__init__.py`` file. See gh-7844 for a discussion of the reload problem that +motivated this module. + +""" +import enum + +from ._utils import set_module as _set_module + +__all__ = ['_NoValue', '_CopyMode'] + + +# Disallow reloading this module so as to preserve the identities of the +# classes defined here. +if '_is_loaded' in globals(): + raise RuntimeError('Reloading numpy._globals is not allowed') +_is_loaded = True + + +class _NoValueType: + """Special keyword value. + + The instance of this class may be used as the default value assigned to a + keyword if no other obvious default (e.g., `None`) is suitable, + + Common reasons for using this keyword are: + + - A new keyword is added to a function, and that function forwards its + inputs to another function or method which can be defined outside of + NumPy. For example, ``np.std(x)`` calls ``x.std``, so when a ``keepdims`` + keyword was added that could only be forwarded if the user explicitly + specified ``keepdims``; downstream array libraries may not have added + the same keyword, so adding ``x.std(..., keepdims=keepdims)`` + unconditionally could have broken previously working code. + - A keyword is being deprecated, and a deprecation warning must only be + emitted when the keyword is used. + + """ + __instance = None + + def __new__(cls): + # ensure that only one instance exists + if not cls.__instance: + cls.__instance = super().__new__(cls) + return cls.__instance + + def __repr__(self): + return "" + + +_NoValue = _NoValueType() + + +@_set_module("numpy") +class _CopyMode(enum.Enum): + """ + An enumeration for the copy modes supported + by numpy.copy() and numpy.array(). The following three modes are supported, + + - ALWAYS: This means that a deep copy of the input + array will always be taken. + - IF_NEEDED: This means that a deep copy of the input + array will be taken only if necessary. + - NEVER: This means that the deep copy will never be taken. + If a copy cannot be avoided then a `ValueError` will be + raised. + + Note that the buffer-protocol could in theory do copies. NumPy currently + assumes an object exporting the buffer protocol will never do this. + """ + + ALWAYS = True + NEVER = False + IF_NEEDED = 2 + + def __bool__(self): + # For backwards compatibility + if self == _CopyMode.ALWAYS: + return True + + if self == _CopyMode.NEVER: + return False + + raise ValueError(f"{self} is neither True nor False.") diff --git a/.venv/lib/python3.12/site-packages/numpy/_globals.pyi b/.venv/lib/python3.12/site-packages/numpy/_globals.pyi new file mode 100644 index 0000000..b2231a9 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_globals.pyi @@ -0,0 +1,17 @@ +__all__ = ["_CopyMode", "_NoValue"] + +import enum +from typing import Final, final + +@final +class _CopyMode(enum.Enum): + ALWAYS = True + NEVER = False + IF_NEEDED = 2 + + def __bool__(self, /) -> bool: ... + +@final +class _NoValueType: ... + +_NoValue: Final[_NoValueType] = ... diff --git a/.venv/lib/python3.12/site-packages/numpy/_pyinstaller/__init__.py b/.venv/lib/python3.12/site-packages/numpy/_pyinstaller/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/.venv/lib/python3.12/site-packages/numpy/_pyinstaller/__init__.pyi b/.venv/lib/python3.12/site-packages/numpy/_pyinstaller/__init__.pyi new file mode 100644 index 0000000..e69de29 diff --git a/.venv/lib/python3.12/site-packages/numpy/_pyinstaller/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_pyinstaller/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..56c4a87 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_pyinstaller/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_pyinstaller/__pycache__/hook-numpy.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_pyinstaller/__pycache__/hook-numpy.cpython-312.pyc new file mode 100644 index 0000000..2a1fa0b Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_pyinstaller/__pycache__/hook-numpy.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_pyinstaller/hook-numpy.py b/.venv/lib/python3.12/site-packages/numpy/_pyinstaller/hook-numpy.py new file mode 100644 index 0000000..61c224b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_pyinstaller/hook-numpy.py @@ -0,0 +1,36 @@ +"""This hook should collect all binary files and any hidden modules that numpy +needs. + +Our (some-what inadequate) docs for writing PyInstaller hooks are kept here: +https://pyinstaller.readthedocs.io/en/stable/hooks.html + +""" +from PyInstaller.compat import is_pure_conda +from PyInstaller.utils.hooks import collect_dynamic_libs + +# Collect all DLLs inside numpy's installation folder, dump them into built +# app's root. +binaries = collect_dynamic_libs("numpy", ".") + +# If using Conda without any non-conda virtual environment manager: +if is_pure_conda: + # Assume running the NumPy from Conda-forge and collect it's DLLs from the + # communal Conda bin directory. DLLs from NumPy's dependencies must also be + # collected to capture MKL, OpenBlas, OpenMP, etc. + from PyInstaller.utils.hooks import conda_support + datas = conda_support.collect_dynamic_libs("numpy", dependencies=True) + +# Submodules PyInstaller cannot detect. `_dtype_ctypes` is only imported +# from C and `_multiarray_tests` is used in tests (which are not packed). +hiddenimports = ['numpy._core._dtype_ctypes', 'numpy._core._multiarray_tests'] + +# Remove testing and building code and packages that are referenced throughout +# NumPy but are not really dependencies. +excludedimports = [ + "scipy", + "pytest", + "f2py", + "setuptools", + "distutils", + "numpy.distutils", +] diff --git a/.venv/lib/python3.12/site-packages/numpy/_pyinstaller/hook-numpy.pyi b/.venv/lib/python3.12/site-packages/numpy/_pyinstaller/hook-numpy.pyi new file mode 100644 index 0000000..2642996 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_pyinstaller/hook-numpy.pyi @@ -0,0 +1,13 @@ +from typing import Final + +# from `PyInstaller.compat` +is_conda: Final[bool] +is_pure_conda: Final[bool] + +# from `PyInstaller.utils.hooks` +def is_module_satisfies(requirements: str, version: None = None, version_attr: None = None) -> bool: ... + +binaries: Final[list[tuple[str, str]]] + +hiddenimports: Final[list[str]] +excludedimports: Final[list[str]] diff --git a/.venv/lib/python3.12/site-packages/numpy/_pyinstaller/tests/__init__.py b/.venv/lib/python3.12/site-packages/numpy/_pyinstaller/tests/__init__.py new file mode 100644 index 0000000..4ed8fdd --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_pyinstaller/tests/__init__.py @@ -0,0 +1,16 @@ +import pytest + +from numpy.testing import IS_EDITABLE, IS_WASM + +if IS_WASM: + pytest.skip( + "WASM/Pyodide does not use or support Fortran", + allow_module_level=True + ) + + +if IS_EDITABLE: + pytest.skip( + "Editable install doesn't support tests with a compile step", + allow_module_level=True + ) diff --git a/.venv/lib/python3.12/site-packages/numpy/_pyinstaller/tests/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_pyinstaller/tests/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..d3f52af Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_pyinstaller/tests/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_pyinstaller/tests/__pycache__/pyinstaller-smoke.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_pyinstaller/tests/__pycache__/pyinstaller-smoke.cpython-312.pyc new file mode 100644 index 0000000..95efbcb Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_pyinstaller/tests/__pycache__/pyinstaller-smoke.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_pyinstaller/tests/__pycache__/test_pyinstaller.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_pyinstaller/tests/__pycache__/test_pyinstaller.cpython-312.pyc new file mode 100644 index 0000000..3e13f55 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_pyinstaller/tests/__pycache__/test_pyinstaller.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_pyinstaller/tests/pyinstaller-smoke.py b/.venv/lib/python3.12/site-packages/numpy/_pyinstaller/tests/pyinstaller-smoke.py new file mode 100644 index 0000000..eb28070 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_pyinstaller/tests/pyinstaller-smoke.py @@ -0,0 +1,32 @@ +"""A crude *bit of everything* smoke test to verify PyInstaller compatibility. + +PyInstaller typically goes wrong by forgetting to package modules, extension +modules or shared libraries. This script should aim to touch as many of those +as possible in an attempt to trip a ModuleNotFoundError or a DLL load failure +due to an uncollected resource. Missing resources are unlikely to lead to +arithmetic errors so there's generally no need to verify any calculation's +output - merely that it made it to the end OK. This script should not +explicitly import any of numpy's submodules as that gives PyInstaller undue +hints that those submodules exist and should be collected (accessing implicitly +loaded submodules is OK). + +""" +import numpy as np + +a = np.arange(1., 10.).reshape((3, 3)) % 5 +np.linalg.det(a) +a @ a +a @ a.T +np.linalg.inv(a) +np.sin(np.exp(a)) +np.linalg.svd(a) +np.linalg.eigh(a) + +np.unique(np.random.randint(0, 10, 100)) +np.sort(np.random.uniform(0, 10, 100)) + +np.fft.fft(np.exp(2j * np.pi * np.arange(8) / 8)) +np.ma.masked_array(np.arange(10), np.random.rand(10) < .5).sum() +np.polynomial.Legendre([7, 8, 9]).roots() + +print("I made it!") diff --git a/.venv/lib/python3.12/site-packages/numpy/_pyinstaller/tests/test_pyinstaller.py b/.venv/lib/python3.12/site-packages/numpy/_pyinstaller/tests/test_pyinstaller.py new file mode 100644 index 0000000..a9061da --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_pyinstaller/tests/test_pyinstaller.py @@ -0,0 +1,35 @@ +import subprocess +from pathlib import Path + +import pytest + + +# PyInstaller has been very unproactive about replacing 'imp' with 'importlib'. +@pytest.mark.filterwarnings('ignore::DeprecationWarning') +# It also leaks io.BytesIO()s. +@pytest.mark.filterwarnings('ignore::ResourceWarning') +@pytest.mark.parametrize("mode", ["--onedir", "--onefile"]) +@pytest.mark.slow +def test_pyinstaller(mode, tmp_path): + """Compile and run pyinstaller-smoke.py using PyInstaller.""" + + pyinstaller_cli = pytest.importorskip("PyInstaller.__main__").run + + source = Path(__file__).with_name("pyinstaller-smoke.py").resolve() + args = [ + # Place all generated files in ``tmp_path``. + '--workpath', str(tmp_path / "build"), + '--distpath', str(tmp_path / "dist"), + '--specpath', str(tmp_path), + mode, + str(source), + ] + pyinstaller_cli(args) + + if mode == "--onefile": + exe = tmp_path / "dist" / source.stem + else: + exe = tmp_path / "dist" / source.stem / source.stem + + p = subprocess.run([str(exe)], check=True, stdout=subprocess.PIPE) + assert p.stdout.strip() == b"I made it!" diff --git a/.venv/lib/python3.12/site-packages/numpy/_pytesttester.py b/.venv/lib/python3.12/site-packages/numpy/_pytesttester.py new file mode 100644 index 0000000..77342e4 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_pytesttester.py @@ -0,0 +1,201 @@ +""" +Pytest test running. + +This module implements the ``test()`` function for NumPy modules. The usual +boiler plate for doing that is to put the following in the module +``__init__.py`` file:: + + from numpy._pytesttester import PytestTester + test = PytestTester(__name__) + del PytestTester + + +Warnings filtering and other runtime settings should be dealt with in the +``pytest.ini`` file in the numpy repo root. The behavior of the test depends on +whether or not that file is found as follows: + +* ``pytest.ini`` is present (develop mode) + All warnings except those explicitly filtered out are raised as error. +* ``pytest.ini`` is absent (release mode) + DeprecationWarnings and PendingDeprecationWarnings are ignored, other + warnings are passed through. + +In practice, tests run from the numpy repo are run in development mode with +``spin``, through the standard ``spin test`` invocation or from an inplace +build with ``pytest numpy``. + +This module is imported by every numpy subpackage, so lies at the top level to +simplify circular import issues. For the same reason, it contains no numpy +imports at module scope, instead importing numpy within function calls. +""" +import os +import sys + +__all__ = ['PytestTester'] + + +def _show_numpy_info(): + import numpy as np + + print(f"NumPy version {np.__version__}") + info = np.lib._utils_impl._opt_info() + print("NumPy CPU features: ", (info or 'nothing enabled')) + + +class PytestTester: + """ + Pytest test runner. + + A test function is typically added to a package's __init__.py like so:: + + from numpy._pytesttester import PytestTester + test = PytestTester(__name__).test + del PytestTester + + Calling this test function finds and runs all tests associated with the + module and all its sub-modules. + + Attributes + ---------- + module_name : str + Full path to the package to test. + + Parameters + ---------- + module_name : module name + The name of the module to test. + + Notes + ----- + Unlike the previous ``nose``-based implementation, this class is not + publicly exposed as it performs some ``numpy``-specific warning + suppression. + + """ + def __init__(self, module_name): + self.module_name = module_name + self.__module__ = module_name + + def __call__(self, label='fast', verbose=1, extra_argv=None, + doctests=False, coverage=False, durations=-1, tests=None): + """ + Run tests for module using pytest. + + Parameters + ---------- + label : {'fast', 'full'}, optional + Identifies the tests to run. When set to 'fast', tests decorated + with `pytest.mark.slow` are skipped, when 'full', the slow marker + is ignored. + verbose : int, optional + Verbosity value for test outputs, in the range 1-3. Default is 1. + extra_argv : list, optional + List with any extra arguments to pass to pytests. + doctests : bool, optional + .. note:: Not supported + coverage : bool, optional + If True, report coverage of NumPy code. Default is False. + Requires installation of (pip) pytest-cov. + durations : int, optional + If < 0, do nothing, If 0, report time of all tests, if > 0, + report the time of the slowest `timer` tests. Default is -1. + tests : test or list of tests + Tests to be executed with pytest '--pyargs' + + Returns + ------- + result : bool + Return True on success, false otherwise. + + Notes + ----- + Each NumPy module exposes `test` in its namespace to run all tests for + it. For example, to run all tests for numpy.lib: + + >>> np.lib.test() #doctest: +SKIP + + Examples + -------- + >>> result = np.lib.test() #doctest: +SKIP + ... + 1023 passed, 2 skipped, 6 deselected, 1 xfailed in 10.39 seconds + >>> result + True + + """ + import warnings + + import pytest + + module = sys.modules[self.module_name] + module_path = os.path.abspath(module.__path__[0]) + + # setup the pytest arguments + pytest_args = ["-l"] + + # offset verbosity. The "-q" cancels a "-v". + pytest_args += ["-q"] + + if sys.version_info < (3, 12): + with warnings.catch_warnings(): + warnings.simplefilter("always") + # Filter out distutils cpu warnings (could be localized to + # distutils tests). ASV has problems with top level import, + # so fetch module for suppression here. + from numpy.distutils import cpuinfo # noqa: F401 + + # Filter out annoying import messages. Want these in both develop and + # release mode. + pytest_args += [ + "-W ignore:Not importing directory", + "-W ignore:numpy.dtype size changed", + "-W ignore:numpy.ufunc size changed", + "-W ignore::UserWarning:cpuinfo", + ] + + # When testing matrices, ignore their PendingDeprecationWarnings + pytest_args += [ + "-W ignore:the matrix subclass is not", + "-W ignore:Importing from numpy.matlib is", + ] + + if doctests: + pytest_args += ["--doctest-modules"] + + if extra_argv: + pytest_args += list(extra_argv) + + if verbose > 1: + pytest_args += ["-" + "v" * (verbose - 1)] + + if coverage: + pytest_args += ["--cov=" + module_path] + + if label == "fast": + # not importing at the top level to avoid circular import of module + from numpy.testing import IS_PYPY + if IS_PYPY: + pytest_args += ["-m", "not slow and not slow_pypy"] + else: + pytest_args += ["-m", "not slow"] + + elif label != "full": + pytest_args += ["-m", label] + + if durations >= 0: + pytest_args += [f"--durations={durations}"] + + if tests is None: + tests = [self.module_name] + + pytest_args += ["--pyargs"] + list(tests) + + # run tests. + _show_numpy_info() + + try: + code = pytest.main(pytest_args) + except SystemExit as exc: + code = exc.code + + return code == 0 diff --git a/.venv/lib/python3.12/site-packages/numpy/_pytesttester.pyi b/.venv/lib/python3.12/site-packages/numpy/_pytesttester.pyi new file mode 100644 index 0000000..a12abb1 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_pytesttester.pyi @@ -0,0 +1,18 @@ +from collections.abc import Iterable +from typing import Literal as L + +__all__ = ["PytestTester"] + +class PytestTester: + module_name: str + def __init__(self, module_name: str) -> None: ... + def __call__( + self, + label: L["fast", "full"] = ..., + verbose: int = ..., + extra_argv: Iterable[str] | None = ..., + doctests: L[False] = ..., + coverage: bool = ..., + durations: int = ..., + tests: Iterable[str] | None = ..., + ) -> bool: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/_typing/__init__.py b/.venv/lib/python3.12/site-packages/numpy/_typing/__init__.py new file mode 100644 index 0000000..16a7eee --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_typing/__init__.py @@ -0,0 +1,148 @@ +"""Private counterpart of ``numpy.typing``.""" + +from ._array_like import ArrayLike as ArrayLike +from ._array_like import NDArray as NDArray +from ._array_like import _ArrayLike as _ArrayLike +from ._array_like import _ArrayLikeAnyString_co as _ArrayLikeAnyString_co +from ._array_like import _ArrayLikeBool_co as _ArrayLikeBool_co +from ._array_like import _ArrayLikeBytes_co as _ArrayLikeBytes_co +from ._array_like import _ArrayLikeComplex128_co as _ArrayLikeComplex128_co +from ._array_like import _ArrayLikeComplex_co as _ArrayLikeComplex_co +from ._array_like import _ArrayLikeDT64_co as _ArrayLikeDT64_co +from ._array_like import _ArrayLikeFloat64_co as _ArrayLikeFloat64_co +from ._array_like import _ArrayLikeFloat_co as _ArrayLikeFloat_co +from ._array_like import _ArrayLikeInt as _ArrayLikeInt +from ._array_like import _ArrayLikeInt_co as _ArrayLikeInt_co +from ._array_like import _ArrayLikeNumber_co as _ArrayLikeNumber_co +from ._array_like import _ArrayLikeObject_co as _ArrayLikeObject_co +from ._array_like import _ArrayLikeStr_co as _ArrayLikeStr_co +from ._array_like import _ArrayLikeString_co as _ArrayLikeString_co +from ._array_like import _ArrayLikeTD64_co as _ArrayLikeTD64_co +from ._array_like import _ArrayLikeUInt_co as _ArrayLikeUInt_co +from ._array_like import _ArrayLikeVoid_co as _ArrayLikeVoid_co +from ._array_like import _FiniteNestedSequence as _FiniteNestedSequence +from ._array_like import _SupportsArray as _SupportsArray +from ._array_like import _SupportsArrayFunc as _SupportsArrayFunc + +# +from ._char_codes import _BoolCodes as _BoolCodes +from ._char_codes import _ByteCodes as _ByteCodes +from ._char_codes import _BytesCodes as _BytesCodes +from ._char_codes import _CDoubleCodes as _CDoubleCodes +from ._char_codes import _CharacterCodes as _CharacterCodes +from ._char_codes import _CLongDoubleCodes as _CLongDoubleCodes +from ._char_codes import _Complex64Codes as _Complex64Codes +from ._char_codes import _Complex128Codes as _Complex128Codes +from ._char_codes import _ComplexFloatingCodes as _ComplexFloatingCodes +from ._char_codes import _CSingleCodes as _CSingleCodes +from ._char_codes import _DoubleCodes as _DoubleCodes +from ._char_codes import _DT64Codes as _DT64Codes +from ._char_codes import _FlexibleCodes as _FlexibleCodes +from ._char_codes import _Float16Codes as _Float16Codes +from ._char_codes import _Float32Codes as _Float32Codes +from ._char_codes import _Float64Codes as _Float64Codes +from ._char_codes import _FloatingCodes as _FloatingCodes +from ._char_codes import _GenericCodes as _GenericCodes +from ._char_codes import _HalfCodes as _HalfCodes +from ._char_codes import _InexactCodes as _InexactCodes +from ._char_codes import _Int8Codes as _Int8Codes +from ._char_codes import _Int16Codes as _Int16Codes +from ._char_codes import _Int32Codes as _Int32Codes +from ._char_codes import _Int64Codes as _Int64Codes +from ._char_codes import _IntCCodes as _IntCCodes +from ._char_codes import _IntCodes as _IntCodes +from ._char_codes import _IntegerCodes as _IntegerCodes +from ._char_codes import _IntPCodes as _IntPCodes +from ._char_codes import _LongCodes as _LongCodes +from ._char_codes import _LongDoubleCodes as _LongDoubleCodes +from ._char_codes import _LongLongCodes as _LongLongCodes +from ._char_codes import _NumberCodes as _NumberCodes +from ._char_codes import _ObjectCodes as _ObjectCodes +from ._char_codes import _ShortCodes as _ShortCodes +from ._char_codes import _SignedIntegerCodes as _SignedIntegerCodes +from ._char_codes import _SingleCodes as _SingleCodes +from ._char_codes import _StrCodes as _StrCodes +from ._char_codes import _StringCodes as _StringCodes +from ._char_codes import _TD64Codes as _TD64Codes +from ._char_codes import _UByteCodes as _UByteCodes +from ._char_codes import _UInt8Codes as _UInt8Codes +from ._char_codes import _UInt16Codes as _UInt16Codes +from ._char_codes import _UInt32Codes as _UInt32Codes +from ._char_codes import _UInt64Codes as _UInt64Codes +from ._char_codes import _UIntCCodes as _UIntCCodes +from ._char_codes import _UIntCodes as _UIntCodes +from ._char_codes import _UIntPCodes as _UIntPCodes +from ._char_codes import _ULongCodes as _ULongCodes +from ._char_codes import _ULongLongCodes as _ULongLongCodes +from ._char_codes import _UnsignedIntegerCodes as _UnsignedIntegerCodes +from ._char_codes import _UShortCodes as _UShortCodes +from ._char_codes import _VoidCodes as _VoidCodes + +# +from ._dtype_like import DTypeLike as DTypeLike +from ._dtype_like import _DTypeLike as _DTypeLike +from ._dtype_like import _DTypeLikeBool as _DTypeLikeBool +from ._dtype_like import _DTypeLikeBytes as _DTypeLikeBytes +from ._dtype_like import _DTypeLikeComplex as _DTypeLikeComplex +from ._dtype_like import _DTypeLikeComplex_co as _DTypeLikeComplex_co +from ._dtype_like import _DTypeLikeDT64 as _DTypeLikeDT64 +from ._dtype_like import _DTypeLikeFloat as _DTypeLikeFloat +from ._dtype_like import _DTypeLikeInt as _DTypeLikeInt +from ._dtype_like import _DTypeLikeObject as _DTypeLikeObject +from ._dtype_like import _DTypeLikeStr as _DTypeLikeStr +from ._dtype_like import _DTypeLikeTD64 as _DTypeLikeTD64 +from ._dtype_like import _DTypeLikeUInt as _DTypeLikeUInt +from ._dtype_like import _DTypeLikeVoid as _DTypeLikeVoid +from ._dtype_like import _SupportsDType as _SupportsDType +from ._dtype_like import _VoidDTypeLike as _VoidDTypeLike + +# +from ._nbit import _NBitByte as _NBitByte +from ._nbit import _NBitDouble as _NBitDouble +from ._nbit import _NBitHalf as _NBitHalf +from ._nbit import _NBitInt as _NBitInt +from ._nbit import _NBitIntC as _NBitIntC +from ._nbit import _NBitIntP as _NBitIntP +from ._nbit import _NBitLong as _NBitLong +from ._nbit import _NBitLongDouble as _NBitLongDouble +from ._nbit import _NBitLongLong as _NBitLongLong +from ._nbit import _NBitShort as _NBitShort +from ._nbit import _NBitSingle as _NBitSingle + +# +from ._nbit_base import ( + NBitBase as NBitBase, # type: ignore[deprecated] # pyright: ignore[reportDeprecated] +) +from ._nbit_base import _8Bit as _8Bit +from ._nbit_base import _16Bit as _16Bit +from ._nbit_base import _32Bit as _32Bit +from ._nbit_base import _64Bit as _64Bit +from ._nbit_base import _96Bit as _96Bit +from ._nbit_base import _128Bit as _128Bit + +# +from ._nested_sequence import _NestedSequence as _NestedSequence + +# +from ._scalars import _BoolLike_co as _BoolLike_co +from ._scalars import _CharLike_co as _CharLike_co +from ._scalars import _ComplexLike_co as _ComplexLike_co +from ._scalars import _FloatLike_co as _FloatLike_co +from ._scalars import _IntLike_co as _IntLike_co +from ._scalars import _NumberLike_co as _NumberLike_co +from ._scalars import _ScalarLike_co as _ScalarLike_co +from ._scalars import _TD64Like_co as _TD64Like_co +from ._scalars import _UIntLike_co as _UIntLike_co +from ._scalars import _VoidLike_co as _VoidLike_co + +# +from ._shape import _AnyShape as _AnyShape +from ._shape import _Shape as _Shape +from ._shape import _ShapeLike as _ShapeLike + +# +from ._ufunc import _GUFunc_Nin2_Nout1 as _GUFunc_Nin2_Nout1 +from ._ufunc import _UFunc_Nin1_Nout1 as _UFunc_Nin1_Nout1 +from ._ufunc import _UFunc_Nin1_Nout2 as _UFunc_Nin1_Nout2 +from ._ufunc import _UFunc_Nin2_Nout1 as _UFunc_Nin2_Nout1 +from ._ufunc import _UFunc_Nin2_Nout2 as _UFunc_Nin2_Nout2 diff --git a/.venv/lib/python3.12/site-packages/numpy/_typing/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_typing/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..67d4164 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_typing/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_typing/__pycache__/_add_docstring.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_typing/__pycache__/_add_docstring.cpython-312.pyc new file mode 100644 index 0000000..e44c917 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_typing/__pycache__/_add_docstring.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_typing/__pycache__/_array_like.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_typing/__pycache__/_array_like.cpython-312.pyc new file mode 100644 index 0000000..b2f82de Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_typing/__pycache__/_array_like.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_typing/__pycache__/_char_codes.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_typing/__pycache__/_char_codes.cpython-312.pyc new file mode 100644 index 0000000..afdba3e Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_typing/__pycache__/_char_codes.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_typing/__pycache__/_dtype_like.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_typing/__pycache__/_dtype_like.cpython-312.pyc new file mode 100644 index 0000000..9253e6b Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_typing/__pycache__/_dtype_like.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_typing/__pycache__/_extended_precision.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_typing/__pycache__/_extended_precision.cpython-312.pyc new file mode 100644 index 0000000..2c06b47 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_typing/__pycache__/_extended_precision.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_typing/__pycache__/_nbit.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_typing/__pycache__/_nbit.cpython-312.pyc new file mode 100644 index 0000000..2ece775 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_typing/__pycache__/_nbit.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_typing/__pycache__/_nbit_base.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_typing/__pycache__/_nbit_base.cpython-312.pyc new file mode 100644 index 0000000..3794b7a Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_typing/__pycache__/_nbit_base.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_typing/__pycache__/_nested_sequence.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_typing/__pycache__/_nested_sequence.cpython-312.pyc new file mode 100644 index 0000000..67f2737 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_typing/__pycache__/_nested_sequence.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_typing/__pycache__/_scalars.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_typing/__pycache__/_scalars.cpython-312.pyc new file mode 100644 index 0000000..85f31c2 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_typing/__pycache__/_scalars.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_typing/__pycache__/_shape.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_typing/__pycache__/_shape.cpython-312.pyc new file mode 100644 index 0000000..b439f4b Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_typing/__pycache__/_shape.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_typing/__pycache__/_ufunc.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_typing/__pycache__/_ufunc.cpython-312.pyc new file mode 100644 index 0000000..c625672 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_typing/__pycache__/_ufunc.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_typing/_add_docstring.py b/.venv/lib/python3.12/site-packages/numpy/_typing/_add_docstring.py new file mode 100644 index 0000000..5330a6b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_typing/_add_docstring.py @@ -0,0 +1,153 @@ +"""A module for creating docstrings for sphinx ``data`` domains.""" + +import re +import textwrap + +from ._array_like import NDArray + +_docstrings_list = [] + + +def add_newdoc(name: str, value: str, doc: str) -> None: + """Append ``_docstrings_list`` with a docstring for `name`. + + Parameters + ---------- + name : str + The name of the object. + value : str + A string-representation of the object. + doc : str + The docstring of the object. + + """ + _docstrings_list.append((name, value, doc)) + + +def _parse_docstrings() -> str: + """Convert all docstrings in ``_docstrings_list`` into a single + sphinx-legible text block. + + """ + type_list_ret = [] + for name, value, doc in _docstrings_list: + s = textwrap.dedent(doc).replace("\n", "\n ") + + # Replace sections by rubrics + lines = s.split("\n") + new_lines = [] + indent = "" + for line in lines: + m = re.match(r'^(\s+)[-=]+\s*$', line) + if m and new_lines: + prev = textwrap.dedent(new_lines.pop()) + if prev == "Examples": + indent = "" + new_lines.append(f'{m.group(1)}.. rubric:: {prev}') + else: + indent = 4 * " " + new_lines.append(f'{m.group(1)}.. admonition:: {prev}') + new_lines.append("") + else: + new_lines.append(f"{indent}{line}") + + s = "\n".join(new_lines) + s_block = f""".. data:: {name}\n :value: {value}\n {s}""" + type_list_ret.append(s_block) + return "\n".join(type_list_ret) + + +add_newdoc('ArrayLike', 'typing.Union[...]', + """ + A `~typing.Union` representing objects that can be coerced + into an `~numpy.ndarray`. + + Among others this includes the likes of: + + * Scalars. + * (Nested) sequences. + * Objects implementing the `~class.__array__` protocol. + + .. versionadded:: 1.20 + + See Also + -------- + :term:`array_like`: + Any scalar or sequence that can be interpreted as an ndarray. + + Examples + -------- + .. code-block:: python + + >>> import numpy as np + >>> import numpy.typing as npt + + >>> def as_array(a: npt.ArrayLike) -> np.ndarray: + ... return np.array(a) + + """) + +add_newdoc('DTypeLike', 'typing.Union[...]', + """ + A `~typing.Union` representing objects that can be coerced + into a `~numpy.dtype`. + + Among others this includes the likes of: + + * :class:`type` objects. + * Character codes or the names of :class:`type` objects. + * Objects with the ``.dtype`` attribute. + + .. versionadded:: 1.20 + + See Also + -------- + :ref:`Specifying and constructing data types ` + A comprehensive overview of all objects that can be coerced + into data types. + + Examples + -------- + .. code-block:: python + + >>> import numpy as np + >>> import numpy.typing as npt + + >>> def as_dtype(d: npt.DTypeLike) -> np.dtype: + ... return np.dtype(d) + + """) + +add_newdoc('NDArray', repr(NDArray), + """ + A `np.ndarray[tuple[Any, ...], np.dtype[ScalarT]] ` + type alias :term:`generic ` w.r.t. its + `dtype.type `. + + Can be used during runtime for typing arrays with a given dtype + and unspecified shape. + + .. versionadded:: 1.21 + + Examples + -------- + .. code-block:: python + + >>> import numpy as np + >>> import numpy.typing as npt + + >>> print(npt.NDArray) + numpy.ndarray[tuple[typing.Any, ...], numpy.dtype[~_ScalarT]] + + >>> print(npt.NDArray[np.float64]) + numpy.ndarray[tuple[typing.Any, ...], numpy.dtype[numpy.float64]] + + >>> NDArrayInt = npt.NDArray[np.int_] + >>> a: NDArrayInt = np.arange(10) + + >>> def func(a: npt.ArrayLike) -> npt.NDArray[Any]: + ... return np.array(a) + + """) + +_docstrings = _parse_docstrings() diff --git a/.venv/lib/python3.12/site-packages/numpy/_typing/_array_like.py b/.venv/lib/python3.12/site-packages/numpy/_typing/_array_like.py new file mode 100644 index 0000000..6b071f4 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_typing/_array_like.py @@ -0,0 +1,106 @@ +import sys +from collections.abc import Callable, Collection, Sequence +from typing import TYPE_CHECKING, Any, Protocol, TypeAlias, TypeVar, runtime_checkable + +import numpy as np +from numpy import dtype + +from ._nbit_base import _32Bit, _64Bit +from ._nested_sequence import _NestedSequence +from ._shape import _AnyShape + +if TYPE_CHECKING: + StringDType = np.dtypes.StringDType +else: + # at runtime outside of type checking importing this from numpy.dtypes + # would lead to a circular import + from numpy._core.multiarray import StringDType + +_T = TypeVar("_T") +_ScalarT = TypeVar("_ScalarT", bound=np.generic) +_DTypeT = TypeVar("_DTypeT", bound=dtype[Any]) +_DTypeT_co = TypeVar("_DTypeT_co", covariant=True, bound=dtype[Any]) + +NDArray: TypeAlias = np.ndarray[_AnyShape, dtype[_ScalarT]] + +# The `_SupportsArray` protocol only cares about the default dtype +# (i.e. `dtype=None` or no `dtype` parameter at all) of the to-be returned +# array. +# Concrete implementations of the protocol are responsible for adding +# any and all remaining overloads +@runtime_checkable +class _SupportsArray(Protocol[_DTypeT_co]): + def __array__(self) -> np.ndarray[Any, _DTypeT_co]: ... + + +@runtime_checkable +class _SupportsArrayFunc(Protocol): + """A protocol class representing `~class.__array_function__`.""" + def __array_function__( + self, + func: Callable[..., Any], + types: Collection[type[Any]], + args: tuple[Any, ...], + kwargs: dict[str, Any], + ) -> object: ... + + +# TODO: Wait until mypy supports recursive objects in combination with typevars +_FiniteNestedSequence: TypeAlias = ( + _T + | Sequence[_T] + | Sequence[Sequence[_T]] + | Sequence[Sequence[Sequence[_T]]] + | Sequence[Sequence[Sequence[Sequence[_T]]]] +) + +# A subset of `npt.ArrayLike` that can be parametrized w.r.t. `np.generic` +_ArrayLike: TypeAlias = ( + _SupportsArray[dtype[_ScalarT]] + | _NestedSequence[_SupportsArray[dtype[_ScalarT]]] +) + +# A union representing array-like objects; consists of two typevars: +# One representing types that can be parametrized w.r.t. `np.dtype` +# and another one for the rest +_DualArrayLike: TypeAlias = ( + _SupportsArray[_DTypeT] + | _NestedSequence[_SupportsArray[_DTypeT]] + | _T + | _NestedSequence[_T] +) + +if sys.version_info >= (3, 12): + from collections.abc import Buffer as _Buffer +else: + @runtime_checkable + class _Buffer(Protocol): + def __buffer__(self, flags: int, /) -> memoryview: ... + +ArrayLike: TypeAlias = _Buffer | _DualArrayLike[dtype[Any], complex | bytes | str] + +# `ArrayLike_co`: array-like objects that can be coerced into `X` +# given the casting rules `same_kind` +_ArrayLikeBool_co: TypeAlias = _DualArrayLike[dtype[np.bool], bool] +_ArrayLikeUInt_co: TypeAlias = _DualArrayLike[dtype[np.bool | np.unsignedinteger], bool] +_ArrayLikeInt_co: TypeAlias = _DualArrayLike[dtype[np.bool | np.integer], int] +_ArrayLikeFloat_co: TypeAlias = _DualArrayLike[dtype[np.bool | np.integer | np.floating], float] +_ArrayLikeComplex_co: TypeAlias = _DualArrayLike[dtype[np.bool | np.number], complex] +_ArrayLikeNumber_co: TypeAlias = _ArrayLikeComplex_co +_ArrayLikeTD64_co: TypeAlias = _DualArrayLike[dtype[np.bool | np.integer | np.timedelta64], int] +_ArrayLikeDT64_co: TypeAlias = _ArrayLike[np.datetime64] +_ArrayLikeObject_co: TypeAlias = _ArrayLike[np.object_] + +_ArrayLikeVoid_co: TypeAlias = _ArrayLike[np.void] +_ArrayLikeBytes_co: TypeAlias = _DualArrayLike[dtype[np.bytes_], bytes] +_ArrayLikeStr_co: TypeAlias = _DualArrayLike[dtype[np.str_], str] +_ArrayLikeString_co: TypeAlias = _DualArrayLike[StringDType, str] +_ArrayLikeAnyString_co: TypeAlias = _DualArrayLike[dtype[np.character] | StringDType, bytes | str] + +__Float64_co: TypeAlias = np.floating[_64Bit] | np.float32 | np.float16 | np.integer | np.bool +__Complex128_co: TypeAlias = np.number[_64Bit] | np.number[_32Bit] | np.float16 | np.integer | np.bool +_ArrayLikeFloat64_co: TypeAlias = _DualArrayLike[dtype[__Float64_co], float] +_ArrayLikeComplex128_co: TypeAlias = _DualArrayLike[dtype[__Complex128_co], complex] + +# NOTE: This includes `builtins.bool`, but not `numpy.bool`. +_ArrayLikeInt: TypeAlias = _DualArrayLike[dtype[np.integer], int] diff --git a/.venv/lib/python3.12/site-packages/numpy/_typing/_callable.pyi b/.venv/lib/python3.12/site-packages/numpy/_typing/_callable.pyi new file mode 100644 index 0000000..21df1d9 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_typing/_callable.pyi @@ -0,0 +1,366 @@ +""" +A module with various ``typing.Protocol`` subclasses that implement +the ``__call__`` magic method. + +See the `Mypy documentation`_ on protocols for more details. + +.. _`Mypy documentation`: https://mypy.readthedocs.io/en/stable/protocols.html#callback-protocols + +""" + +from typing import ( + Any, + NoReturn, + Protocol, + TypeAlias, + TypeVar, + final, + overload, + type_check_only, +) + +import numpy as np +from numpy import ( + complex128, + complexfloating, + float64, + floating, + generic, + int8, + int_, + integer, + number, + signedinteger, + unsignedinteger, +) + +from . import NBitBase +from ._array_like import NDArray +from ._nbit import _NBitInt +from ._nested_sequence import _NestedSequence +from ._scalars import ( + _BoolLike_co, + _IntLike_co, + _NumberLike_co, +) + +_T1 = TypeVar("_T1") +_T2 = TypeVar("_T2") +_T1_contra = TypeVar("_T1_contra", contravariant=True) +_T2_contra = TypeVar("_T2_contra", contravariant=True) + +_2Tuple: TypeAlias = tuple[_T1, _T1] + +_NBit1 = TypeVar("_NBit1", bound=NBitBase) +_NBit2 = TypeVar("_NBit2", bound=NBitBase) + +_IntType = TypeVar("_IntType", bound=integer) +_FloatType = TypeVar("_FloatType", bound=floating) +_NumberType = TypeVar("_NumberType", bound=number) +_NumberType_co = TypeVar("_NumberType_co", covariant=True, bound=number) +_GenericType_co = TypeVar("_GenericType_co", covariant=True, bound=generic) + +@type_check_only +class _BoolOp(Protocol[_GenericType_co]): + @overload + def __call__(self, other: _BoolLike_co, /) -> _GenericType_co: ... + @overload # platform dependent + def __call__(self, other: int, /) -> int_: ... + @overload + def __call__(self, other: float, /) -> float64: ... + @overload + def __call__(self, other: complex, /) -> complex128: ... + @overload + def __call__(self, other: _NumberType, /) -> _NumberType: ... + +@type_check_only +class _BoolBitOp(Protocol[_GenericType_co]): + @overload + def __call__(self, other: _BoolLike_co, /) -> _GenericType_co: ... + @overload # platform dependent + def __call__(self, other: int, /) -> int_: ... + @overload + def __call__(self, other: _IntType, /) -> _IntType: ... + +@type_check_only +class _BoolSub(Protocol): + # Note that `other: bool` is absent here + @overload + def __call__(self, other: bool, /) -> NoReturn: ... + @overload # platform dependent + def __call__(self, other: int, /) -> int_: ... + @overload + def __call__(self, other: float, /) -> float64: ... + @overload + def __call__(self, other: complex, /) -> complex128: ... + @overload + def __call__(self, other: _NumberType, /) -> _NumberType: ... + +@type_check_only +class _BoolTrueDiv(Protocol): + @overload + def __call__(self, other: float | _IntLike_co, /) -> float64: ... + @overload + def __call__(self, other: complex, /) -> complex128: ... + @overload + def __call__(self, other: _NumberType, /) -> _NumberType: ... + +@type_check_only +class _BoolMod(Protocol): + @overload + def __call__(self, other: _BoolLike_co, /) -> int8: ... + @overload # platform dependent + def __call__(self, other: int, /) -> int_: ... + @overload + def __call__(self, other: float, /) -> float64: ... + @overload + def __call__(self, other: _IntType, /) -> _IntType: ... + @overload + def __call__(self, other: _FloatType, /) -> _FloatType: ... + +@type_check_only +class _BoolDivMod(Protocol): + @overload + def __call__(self, other: _BoolLike_co, /) -> _2Tuple[int8]: ... + @overload # platform dependent + def __call__(self, other: int, /) -> _2Tuple[int_]: ... + @overload + def __call__(self, other: float, /) -> _2Tuple[np.float64]: ... + @overload + def __call__(self, other: _IntType, /) -> _2Tuple[_IntType]: ... + @overload + def __call__(self, other: _FloatType, /) -> _2Tuple[_FloatType]: ... + +@type_check_only +class _IntTrueDiv(Protocol[_NBit1]): + @overload + def __call__(self, other: bool, /) -> floating[_NBit1]: ... + @overload + def __call__(self, other: int, /) -> floating[_NBit1] | floating[_NBitInt]: ... + @overload + def __call__(self, other: float, /) -> floating[_NBit1] | float64: ... + @overload + def __call__( + self, other: complex, / + ) -> complexfloating[_NBit1, _NBit1] | complex128: ... + @overload + def __call__( + self, other: integer[_NBit2], / + ) -> floating[_NBit1] | floating[_NBit2]: ... + +@type_check_only +class _UnsignedIntOp(Protocol[_NBit1]): + # NOTE: `uint64 + signedinteger -> float64` + @overload + def __call__(self, other: int, /) -> unsignedinteger[_NBit1]: ... + @overload + def __call__(self, other: float, /) -> float64: ... + @overload + def __call__(self, other: complex, /) -> complex128: ... + @overload + def __call__(self, other: unsignedinteger[_NBit2], /) -> unsignedinteger[_NBit1] | unsignedinteger[_NBit2]: ... + @overload + def __call__(self, other: signedinteger, /) -> Any: ... + +@type_check_only +class _UnsignedIntBitOp(Protocol[_NBit1]): + @overload + def __call__(self, other: bool, /) -> unsignedinteger[_NBit1]: ... + @overload + def __call__(self, other: int, /) -> signedinteger: ... + @overload + def __call__(self, other: signedinteger, /) -> signedinteger: ... + @overload + def __call__( + self, other: unsignedinteger[_NBit2], / + ) -> unsignedinteger[_NBit1] | unsignedinteger[_NBit2]: ... + +@type_check_only +class _UnsignedIntMod(Protocol[_NBit1]): + @overload + def __call__(self, other: bool, /) -> unsignedinteger[_NBit1]: ... + @overload + def __call__(self, other: int | signedinteger, /) -> Any: ... + @overload + def __call__(self, other: float, /) -> floating[_NBit1] | float64: ... + @overload + def __call__( + self, other: unsignedinteger[_NBit2], / + ) -> unsignedinteger[_NBit1] | unsignedinteger[_NBit2]: ... + +@type_check_only +class _UnsignedIntDivMod(Protocol[_NBit1]): + @overload + def __call__(self, other: bool, /) -> _2Tuple[signedinteger[_NBit1]]: ... + @overload + def __call__(self, other: int | signedinteger, /) -> _2Tuple[Any]: ... + @overload + def __call__(self, other: float, /) -> _2Tuple[floating[_NBit1]] | _2Tuple[float64]: ... + @overload + def __call__( + self, other: unsignedinteger[_NBit2], / + ) -> _2Tuple[unsignedinteger[_NBit1]] | _2Tuple[unsignedinteger[_NBit2]]: ... + +@type_check_only +class _SignedIntOp(Protocol[_NBit1]): + @overload + def __call__(self, other: int, /) -> signedinteger[_NBit1]: ... + @overload + def __call__(self, other: float, /) -> float64: ... + @overload + def __call__(self, other: complex, /) -> complex128: ... + @overload + def __call__(self, other: signedinteger[_NBit2], /) -> signedinteger[_NBit1] | signedinteger[_NBit2]: ... + +@type_check_only +class _SignedIntBitOp(Protocol[_NBit1]): + @overload + def __call__(self, other: bool, /) -> signedinteger[_NBit1]: ... + @overload + def __call__(self, other: int, /) -> signedinteger[_NBit1] | int_: ... + @overload + def __call__( + self, other: signedinteger[_NBit2], / + ) -> signedinteger[_NBit1] | signedinteger[_NBit2]: ... + +@type_check_only +class _SignedIntMod(Protocol[_NBit1]): + @overload + def __call__(self, other: bool, /) -> signedinteger[_NBit1]: ... + @overload + def __call__(self, other: int, /) -> signedinteger[_NBit1] | int_: ... + @overload + def __call__(self, other: float, /) -> floating[_NBit1] | float64: ... + @overload + def __call__( + self, other: signedinteger[_NBit2], / + ) -> signedinteger[_NBit1] | signedinteger[_NBit2]: ... + +@type_check_only +class _SignedIntDivMod(Protocol[_NBit1]): + @overload + def __call__(self, other: bool, /) -> _2Tuple[signedinteger[_NBit1]]: ... + @overload + def __call__(self, other: int, /) -> _2Tuple[signedinteger[_NBit1]] | _2Tuple[int_]: ... + @overload + def __call__(self, other: float, /) -> _2Tuple[floating[_NBit1]] | _2Tuple[float64]: ... + @overload + def __call__( + self, other: signedinteger[_NBit2], / + ) -> _2Tuple[signedinteger[_NBit1]] | _2Tuple[signedinteger[_NBit2]]: ... + +@type_check_only +class _FloatOp(Protocol[_NBit1]): + @overload + def __call__(self, other: int, /) -> floating[_NBit1]: ... + @overload + def __call__(self, other: float, /) -> floating[_NBit1] | float64: ... + @overload + def __call__( + self, other: complex, / + ) -> complexfloating[_NBit1, _NBit1] | complex128: ... + @overload + def __call__( + self, other: integer[_NBit2] | floating[_NBit2], / + ) -> floating[_NBit1] | floating[_NBit2]: ... + +@type_check_only +class _FloatMod(Protocol[_NBit1]): + @overload + def __call__(self, other: bool, /) -> floating[_NBit1]: ... + @overload + def __call__(self, other: int, /) -> floating[_NBit1] | floating[_NBitInt]: ... + @overload + def __call__(self, other: float, /) -> floating[_NBit1] | float64: ... + @overload + def __call__( + self, other: integer[_NBit2] | floating[_NBit2], / + ) -> floating[_NBit1] | floating[_NBit2]: ... + +class _FloatDivMod(Protocol[_NBit1]): + @overload + def __call__(self, other: bool, /) -> _2Tuple[floating[_NBit1]]: ... + @overload + def __call__( + self, other: int, / + ) -> _2Tuple[floating[_NBit1]] | _2Tuple[floating[_NBitInt]]: ... + @overload + def __call__( + self, other: float, / + ) -> _2Tuple[floating[_NBit1]] | _2Tuple[float64]: ... + @overload + def __call__( + self, other: integer[_NBit2] | floating[_NBit2], / + ) -> _2Tuple[floating[_NBit1]] | _2Tuple[floating[_NBit2]]: ... + +@type_check_only +class _NumberOp(Protocol): + def __call__(self, other: _NumberLike_co, /) -> Any: ... + +@final +@type_check_only +class _SupportsLT(Protocol): + def __lt__(self, other: Any, /) -> Any: ... + +@final +@type_check_only +class _SupportsLE(Protocol): + def __le__(self, other: Any, /) -> Any: ... + +@final +@type_check_only +class _SupportsGT(Protocol): + def __gt__(self, other: Any, /) -> Any: ... + +@final +@type_check_only +class _SupportsGE(Protocol): + def __ge__(self, other: Any, /) -> Any: ... + +@final +@type_check_only +class _ComparisonOpLT(Protocol[_T1_contra, _T2_contra]): + @overload + def __call__(self, other: _T1_contra, /) -> np.bool: ... + @overload + def __call__(self, other: _T2_contra, /) -> NDArray[np.bool]: ... + @overload + def __call__(self, other: _NestedSequence[_SupportsGT], /) -> NDArray[np.bool]: ... + @overload + def __call__(self, other: _SupportsGT, /) -> np.bool: ... + +@final +@type_check_only +class _ComparisonOpLE(Protocol[_T1_contra, _T2_contra]): + @overload + def __call__(self, other: _T1_contra, /) -> np.bool: ... + @overload + def __call__(self, other: _T2_contra, /) -> NDArray[np.bool]: ... + @overload + def __call__(self, other: _NestedSequence[_SupportsGE], /) -> NDArray[np.bool]: ... + @overload + def __call__(self, other: _SupportsGE, /) -> np.bool: ... + +@final +@type_check_only +class _ComparisonOpGT(Protocol[_T1_contra, _T2_contra]): + @overload + def __call__(self, other: _T1_contra, /) -> np.bool: ... + @overload + def __call__(self, other: _T2_contra, /) -> NDArray[np.bool]: ... + @overload + def __call__(self, other: _NestedSequence[_SupportsLT], /) -> NDArray[np.bool]: ... + @overload + def __call__(self, other: _SupportsLT, /) -> np.bool: ... + +@final +@type_check_only +class _ComparisonOpGE(Protocol[_T1_contra, _T2_contra]): + @overload + def __call__(self, other: _T1_contra, /) -> np.bool: ... + @overload + def __call__(self, other: _T2_contra, /) -> NDArray[np.bool]: ... + @overload + def __call__(self, other: _NestedSequence[_SupportsGT], /) -> NDArray[np.bool]: ... + @overload + def __call__(self, other: _SupportsGT, /) -> np.bool: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/_typing/_char_codes.py b/.venv/lib/python3.12/site-packages/numpy/_typing/_char_codes.py new file mode 100644 index 0000000..7b6fad2 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_typing/_char_codes.py @@ -0,0 +1,213 @@ +from typing import Literal + +_BoolCodes = Literal[ + "bool", "bool_", + "?", "|?", "=?", "?", + "b1", "|b1", "=b1", "b1", +] # fmt: skip + +_UInt8Codes = Literal["uint8", "u1", "|u1", "=u1", "u1"] +_UInt16Codes = Literal["uint16", "u2", "|u2", "=u2", "u2"] +_UInt32Codes = Literal["uint32", "u4", "|u4", "=u4", "u4"] +_UInt64Codes = Literal["uint64", "u8", "|u8", "=u8", "u8"] + +_Int8Codes = Literal["int8", "i1", "|i1", "=i1", "i1"] +_Int16Codes = Literal["int16", "i2", "|i2", "=i2", "i2"] +_Int32Codes = Literal["int32", "i4", "|i4", "=i4", "i4"] +_Int64Codes = Literal["int64", "i8", "|i8", "=i8", "i8"] + +_Float16Codes = Literal["float16", "f2", "|f2", "=f2", "f2"] +_Float32Codes = Literal["float32", "f4", "|f4", "=f4", "f4"] +_Float64Codes = Literal["float64", "f8", "|f8", "=f8", "f8"] + +_Complex64Codes = Literal["complex64", "c8", "|c8", "=c8", "c8"] +_Complex128Codes = Literal["complex128", "c16", "|c16", "=c16", "c16"] + +_ByteCodes = Literal["byte", "b", "|b", "=b", "b"] +_ShortCodes = Literal["short", "h", "|h", "=h", "h"] +_IntCCodes = Literal["intc", "i", "|i", "=i", "i"] +_IntPCodes = Literal["intp", "int", "int_", "n", "|n", "=n", "n"] +_LongCodes = Literal["long", "l", "|l", "=l", "l"] +_IntCodes = _IntPCodes +_LongLongCodes = Literal["longlong", "q", "|q", "=q", "q"] + +_UByteCodes = Literal["ubyte", "B", "|B", "=B", "B"] +_UShortCodes = Literal["ushort", "H", "|H", "=H", "H"] +_UIntCCodes = Literal["uintc", "I", "|I", "=I", "I"] +_UIntPCodes = Literal["uintp", "uint", "N", "|N", "=N", "N"] +_ULongCodes = Literal["ulong", "L", "|L", "=L", "L"] +_UIntCodes = _UIntPCodes +_ULongLongCodes = Literal["ulonglong", "Q", "|Q", "=Q", "Q"] + +_HalfCodes = Literal["half", "e", "|e", "=e", "e"] +_SingleCodes = Literal["single", "f", "|f", "=f", "f"] +_DoubleCodes = Literal["double", "float", "d", "|d", "=d", "d"] +_LongDoubleCodes = Literal["longdouble", "g", "|g", "=g", "g"] + +_CSingleCodes = Literal["csingle", "F", "|F", "=F", "F"] +_CDoubleCodes = Literal["cdouble", "complex", "D", "|D", "=D", "D"] +_CLongDoubleCodes = Literal["clongdouble", "G", "|G", "=G", "G"] + +_StrCodes = Literal["str", "str_", "unicode", "U", "|U", "=U", "U"] +_BytesCodes = Literal["bytes", "bytes_", "S", "|S", "=S", "S"] +_VoidCodes = Literal["void", "V", "|V", "=V", "V"] +_ObjectCodes = Literal["object", "object_", "O", "|O", "=O", "O"] + +_DT64Codes = Literal[ + "datetime64", "|datetime64", "=datetime64", + "datetime64", + "datetime64[Y]", "|datetime64[Y]", "=datetime64[Y]", + "datetime64[Y]", + "datetime64[M]", "|datetime64[M]", "=datetime64[M]", + "datetime64[M]", + "datetime64[W]", "|datetime64[W]", "=datetime64[W]", + "datetime64[W]", + "datetime64[D]", "|datetime64[D]", "=datetime64[D]", + "datetime64[D]", + "datetime64[h]", "|datetime64[h]", "=datetime64[h]", + "datetime64[h]", + "datetime64[m]", "|datetime64[m]", "=datetime64[m]", + "datetime64[m]", + "datetime64[s]", "|datetime64[s]", "=datetime64[s]", + "datetime64[s]", + "datetime64[ms]", "|datetime64[ms]", "=datetime64[ms]", + "datetime64[ms]", + "datetime64[us]", "|datetime64[us]", "=datetime64[us]", + "datetime64[us]", + "datetime64[ns]", "|datetime64[ns]", "=datetime64[ns]", + "datetime64[ns]", + "datetime64[ps]", "|datetime64[ps]", "=datetime64[ps]", + "datetime64[ps]", + "datetime64[fs]", "|datetime64[fs]", "=datetime64[fs]", + "datetime64[fs]", + "datetime64[as]", "|datetime64[as]", "=datetime64[as]", + "datetime64[as]", + "M", "|M", "=M", "M", + "M8", "|M8", "=M8", "M8", + "M8[Y]", "|M8[Y]", "=M8[Y]", "M8[Y]", + "M8[M]", "|M8[M]", "=M8[M]", "M8[M]", + "M8[W]", "|M8[W]", "=M8[W]", "M8[W]", + "M8[D]", "|M8[D]", "=M8[D]", "M8[D]", + "M8[h]", "|M8[h]", "=M8[h]", "M8[h]", + "M8[m]", "|M8[m]", "=M8[m]", "M8[m]", + "M8[s]", "|M8[s]", "=M8[s]", "M8[s]", + "M8[ms]", "|M8[ms]", "=M8[ms]", "M8[ms]", + "M8[us]", "|M8[us]", "=M8[us]", "M8[us]", + "M8[ns]", "|M8[ns]", "=M8[ns]", "M8[ns]", + "M8[ps]", "|M8[ps]", "=M8[ps]", "M8[ps]", + "M8[fs]", "|M8[fs]", "=M8[fs]", "M8[fs]", + "M8[as]", "|M8[as]", "=M8[as]", "M8[as]", +] +_TD64Codes = Literal[ + "timedelta64", "|timedelta64", "=timedelta64", + "timedelta64", + "timedelta64[Y]", "|timedelta64[Y]", "=timedelta64[Y]", + "timedelta64[Y]", + "timedelta64[M]", "|timedelta64[M]", "=timedelta64[M]", + "timedelta64[M]", + "timedelta64[W]", "|timedelta64[W]", "=timedelta64[W]", + "timedelta64[W]", + "timedelta64[D]", "|timedelta64[D]", "=timedelta64[D]", + "timedelta64[D]", + "timedelta64[h]", "|timedelta64[h]", "=timedelta64[h]", + "timedelta64[h]", + "timedelta64[m]", "|timedelta64[m]", "=timedelta64[m]", + "timedelta64[m]", + "timedelta64[s]", "|timedelta64[s]", "=timedelta64[s]", + "timedelta64[s]", + "timedelta64[ms]", "|timedelta64[ms]", "=timedelta64[ms]", + "timedelta64[ms]", + "timedelta64[us]", "|timedelta64[us]", "=timedelta64[us]", + "timedelta64[us]", + "timedelta64[ns]", "|timedelta64[ns]", "=timedelta64[ns]", + "timedelta64[ns]", + "timedelta64[ps]", "|timedelta64[ps]", "=timedelta64[ps]", + "timedelta64[ps]", + "timedelta64[fs]", "|timedelta64[fs]", "=timedelta64[fs]", + "timedelta64[fs]", + "timedelta64[as]", "|timedelta64[as]", "=timedelta64[as]", + "timedelta64[as]", + "m", "|m", "=m", "m", + "m8", "|m8", "=m8", "m8", + "m8[Y]", "|m8[Y]", "=m8[Y]", "m8[Y]", + "m8[M]", "|m8[M]", "=m8[M]", "m8[M]", + "m8[W]", "|m8[W]", "=m8[W]", "m8[W]", + "m8[D]", "|m8[D]", "=m8[D]", "m8[D]", + "m8[h]", "|m8[h]", "=m8[h]", "m8[h]", + "m8[m]", "|m8[m]", "=m8[m]", "m8[m]", + "m8[s]", "|m8[s]", "=m8[s]", "m8[s]", + "m8[ms]", "|m8[ms]", "=m8[ms]", "m8[ms]", + "m8[us]", "|m8[us]", "=m8[us]", "m8[us]", + "m8[ns]", "|m8[ns]", "=m8[ns]", "m8[ns]", + "m8[ps]", "|m8[ps]", "=m8[ps]", "m8[ps]", + "m8[fs]", "|m8[fs]", "=m8[fs]", "m8[fs]", + "m8[as]", "|m8[as]", "=m8[as]", "m8[as]", +] + +# NOTE: `StringDType' has no scalar type, and therefore has no name that can +# be passed to the `dtype` constructor +_StringCodes = Literal["T", "|T", "=T", "T"] + +# NOTE: Nested literals get flattened and de-duplicated at runtime, which isn't +# the case for a `Union` of `Literal`s. +# So even though they're equivalent when type-checking, they differ at runtime. +# Another advantage of nesting, is that they always have a "flat" +# `Literal.__args__`, which is a tuple of *literally* all its literal values. + +_UnsignedIntegerCodes = Literal[ + _UInt8Codes, + _UInt16Codes, + _UInt32Codes, + _UInt64Codes, + _UIntCodes, + _UByteCodes, + _UShortCodes, + _UIntCCodes, + _ULongCodes, + _ULongLongCodes, +] +_SignedIntegerCodes = Literal[ + _Int8Codes, + _Int16Codes, + _Int32Codes, + _Int64Codes, + _IntCodes, + _ByteCodes, + _ShortCodes, + _IntCCodes, + _LongCodes, + _LongLongCodes, +] +_FloatingCodes = Literal[ + _Float16Codes, + _Float32Codes, + _Float64Codes, + _HalfCodes, + _SingleCodes, + _DoubleCodes, + _LongDoubleCodes +] +_ComplexFloatingCodes = Literal[ + _Complex64Codes, + _Complex128Codes, + _CSingleCodes, + _CDoubleCodes, + _CLongDoubleCodes, +] +_IntegerCodes = Literal[_UnsignedIntegerCodes, _SignedIntegerCodes] +_InexactCodes = Literal[_FloatingCodes, _ComplexFloatingCodes] +_NumberCodes = Literal[_IntegerCodes, _InexactCodes] + +_CharacterCodes = Literal[_StrCodes, _BytesCodes] +_FlexibleCodes = Literal[_VoidCodes, _CharacterCodes] + +_GenericCodes = Literal[ + _BoolCodes, + _NumberCodes, + _FlexibleCodes, + _DT64Codes, + _TD64Codes, + _ObjectCodes, + # TODO: add `_StringCodes` once it has a scalar type + # _StringCodes, +] diff --git a/.venv/lib/python3.12/site-packages/numpy/_typing/_dtype_like.py b/.venv/lib/python3.12/site-packages/numpy/_typing/_dtype_like.py new file mode 100644 index 0000000..c406b30 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_typing/_dtype_like.py @@ -0,0 +1,114 @@ +from collections.abc import Sequence # noqa: F811 +from typing import ( + Any, + Protocol, + TypeAlias, + TypedDict, + TypeVar, + runtime_checkable, +) + +import numpy as np + +from ._char_codes import ( + _BoolCodes, + _BytesCodes, + _ComplexFloatingCodes, + _DT64Codes, + _FloatingCodes, + _NumberCodes, + _ObjectCodes, + _SignedIntegerCodes, + _StrCodes, + _TD64Codes, + _UnsignedIntegerCodes, + _VoidCodes, +) + +_ScalarT = TypeVar("_ScalarT", bound=np.generic) +_DTypeT_co = TypeVar("_DTypeT_co", bound=np.dtype, covariant=True) + +_DTypeLikeNested: TypeAlias = Any # TODO: wait for support for recursive types + + +# Mandatory keys +class _DTypeDictBase(TypedDict): + names: Sequence[str] + formats: Sequence[_DTypeLikeNested] + + +# Mandatory + optional keys +class _DTypeDict(_DTypeDictBase, total=False): + # Only `str` elements are usable as indexing aliases, + # but `titles` can in principle accept any object + offsets: Sequence[int] + titles: Sequence[Any] + itemsize: int + aligned: bool + + +# A protocol for anything with the dtype attribute +@runtime_checkable +class _SupportsDType(Protocol[_DTypeT_co]): + @property + def dtype(self) -> _DTypeT_co: ... + + +# A subset of `npt.DTypeLike` that can be parametrized w.r.t. `np.generic` +_DTypeLike: TypeAlias = type[_ScalarT] | np.dtype[_ScalarT] | _SupportsDType[np.dtype[_ScalarT]] + + +# Would create a dtype[np.void] +_VoidDTypeLike: TypeAlias = ( + # If a tuple, then it can be either: + # - (flexible_dtype, itemsize) + # - (fixed_dtype, shape) + # - (base_dtype, new_dtype) + # But because `_DTypeLikeNested = Any`, the first two cases are redundant + + # tuple[_DTypeLikeNested, int] | tuple[_DTypeLikeNested, _ShapeLike] | + tuple[_DTypeLikeNested, _DTypeLikeNested] + + # [(field_name, field_dtype, field_shape), ...] + # The type here is quite broad because NumPy accepts quite a wide + # range of inputs inside the list; see the tests for some examples. + | list[Any] + + # {'names': ..., 'formats': ..., 'offsets': ..., 'titles': ..., 'itemsize': ...} + | _DTypeDict +) + +# Aliases for commonly used dtype-like objects. +# Note that the precision of `np.number` subclasses is ignored herein. +_DTypeLikeBool: TypeAlias = type[bool] | _DTypeLike[np.bool] | _BoolCodes +_DTypeLikeInt: TypeAlias = ( + type[int] | _DTypeLike[np.signedinteger] | _SignedIntegerCodes +) +_DTypeLikeUInt: TypeAlias = _DTypeLike[np.unsignedinteger] | _UnsignedIntegerCodes +_DTypeLikeFloat: TypeAlias = type[float] | _DTypeLike[np.floating] | _FloatingCodes +_DTypeLikeComplex: TypeAlias = ( + type[complex] | _DTypeLike[np.complexfloating] | _ComplexFloatingCodes +) +_DTypeLikeComplex_co: TypeAlias = ( + type[complex] | _DTypeLike[np.bool | np.number] | _BoolCodes | _NumberCodes +) +_DTypeLikeDT64: TypeAlias = _DTypeLike[np.timedelta64] | _TD64Codes +_DTypeLikeTD64: TypeAlias = _DTypeLike[np.datetime64] | _DT64Codes +_DTypeLikeBytes: TypeAlias = type[bytes] | _DTypeLike[np.bytes_] | _BytesCodes +_DTypeLikeStr: TypeAlias = type[str] | _DTypeLike[np.str_] | _StrCodes +_DTypeLikeVoid: TypeAlias = ( + type[memoryview] | _DTypeLike[np.void] | _VoidDTypeLike | _VoidCodes +) +_DTypeLikeObject: TypeAlias = type[object] | _DTypeLike[np.object_] | _ObjectCodes + + +# Anything that can be coerced into numpy.dtype. +# Reference: https://docs.scipy.org/doc/numpy/reference/arrays.dtypes.html +DTypeLike: TypeAlias = _DTypeLike[Any] | _VoidDTypeLike | str | None + +# NOTE: while it is possible to provide the dtype as a dict of +# dtype-like objects (e.g. `{'field1': ..., 'field2': ..., ...}`), +# this syntax is officially discouraged and +# therefore not included in the type-union defining `DTypeLike`. +# +# See https://github.com/numpy/numpy/issues/16891 for more details. diff --git a/.venv/lib/python3.12/site-packages/numpy/_typing/_extended_precision.py b/.venv/lib/python3.12/site-packages/numpy/_typing/_extended_precision.py new file mode 100644 index 0000000..c707e72 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_typing/_extended_precision.py @@ -0,0 +1,15 @@ +"""A module with platform-specific extended precision +`numpy.number` subclasses. + +The subclasses are defined here (instead of ``__init__.pyi``) such +that they can be imported conditionally via the numpy's mypy plugin. +""" + +import numpy as np + +from . import _96Bit, _128Bit + +float96 = np.floating[_96Bit] +float128 = np.floating[_128Bit] +complex192 = np.complexfloating[_96Bit, _96Bit] +complex256 = np.complexfloating[_128Bit, _128Bit] diff --git a/.venv/lib/python3.12/site-packages/numpy/_typing/_nbit.py b/.venv/lib/python3.12/site-packages/numpy/_typing/_nbit.py new file mode 100644 index 0000000..60bce32 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_typing/_nbit.py @@ -0,0 +1,19 @@ +"""A module with the precisions of platform-specific `~numpy.number`s.""" + +from typing import TypeAlias + +from ._nbit_base import _8Bit, _16Bit, _32Bit, _64Bit, _96Bit, _128Bit + +# To-be replaced with a `npt.NBitBase` subclass by numpy's mypy plugin +_NBitByte: TypeAlias = _8Bit +_NBitShort: TypeAlias = _16Bit +_NBitIntC: TypeAlias = _32Bit +_NBitIntP: TypeAlias = _32Bit | _64Bit +_NBitInt: TypeAlias = _NBitIntP +_NBitLong: TypeAlias = _32Bit | _64Bit +_NBitLongLong: TypeAlias = _64Bit + +_NBitHalf: TypeAlias = _16Bit +_NBitSingle: TypeAlias = _32Bit +_NBitDouble: TypeAlias = _64Bit +_NBitLongDouble: TypeAlias = _64Bit | _96Bit | _128Bit diff --git a/.venv/lib/python3.12/site-packages/numpy/_typing/_nbit_base.py b/.venv/lib/python3.12/site-packages/numpy/_typing/_nbit_base.py new file mode 100644 index 0000000..28d3e63 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_typing/_nbit_base.py @@ -0,0 +1,94 @@ +"""A module with the precisions of generic `~numpy.number` types.""" +from typing import final + +from numpy._utils import set_module + + +@final # Disallow the creation of arbitrary `NBitBase` subclasses +@set_module("numpy.typing") +class NBitBase: + """ + A type representing `numpy.number` precision during static type checking. + + Used exclusively for the purpose of static type checking, `NBitBase` + represents the base of a hierarchical set of subclasses. + Each subsequent subclass is herein used for representing a lower level + of precision, *e.g.* ``64Bit > 32Bit > 16Bit``. + + .. versionadded:: 1.20 + + .. deprecated:: 2.3 + Use ``@typing.overload`` or a ``TypeVar`` with a scalar-type as upper + bound, instead. + + Examples + -------- + Below is a typical usage example: `NBitBase` is herein used for annotating + a function that takes a float and integer of arbitrary precision + as arguments and returns a new float of whichever precision is largest + (*e.g.* ``np.float16 + np.int64 -> np.float64``). + + .. code-block:: python + + >>> from typing import TypeVar, TYPE_CHECKING + >>> import numpy as np + >>> import numpy.typing as npt + + >>> S = TypeVar("S", bound=npt.NBitBase) + >>> T = TypeVar("T", bound=npt.NBitBase) + + >>> def add(a: np.floating[S], b: np.integer[T]) -> np.floating[S | T]: + ... return a + b + + >>> a = np.float16() + >>> b = np.int64() + >>> out = add(a, b) + + >>> if TYPE_CHECKING: + ... reveal_locals() + ... # note: Revealed local types are: + ... # note: a: numpy.floating[numpy.typing._16Bit*] + ... # note: b: numpy.signedinteger[numpy.typing._64Bit*] + ... # note: out: numpy.floating[numpy.typing._64Bit*] + + """ + # Deprecated in NumPy 2.3, 2025-05-01 + + def __init_subclass__(cls) -> None: + allowed_names = { + "NBitBase", "_128Bit", "_96Bit", "_64Bit", "_32Bit", "_16Bit", "_8Bit" + } + if cls.__name__ not in allowed_names: + raise TypeError('cannot inherit from final class "NBitBase"') + super().__init_subclass__() + +@final +@set_module("numpy._typing") +# Silence errors about subclassing a `@final`-decorated class +class _128Bit(NBitBase): # type: ignore[misc] # pyright: ignore[reportGeneralTypeIssues] + pass + +@final +@set_module("numpy._typing") +class _96Bit(_128Bit): # type: ignore[misc] # pyright: ignore[reportGeneralTypeIssues] + pass + +@final +@set_module("numpy._typing") +class _64Bit(_96Bit): # type: ignore[misc] # pyright: ignore[reportGeneralTypeIssues] + pass + +@final +@set_module("numpy._typing") +class _32Bit(_64Bit): # type: ignore[misc] # pyright: ignore[reportGeneralTypeIssues] + pass + +@final +@set_module("numpy._typing") +class _16Bit(_32Bit): # type: ignore[misc] # pyright: ignore[reportGeneralTypeIssues] + pass + +@final +@set_module("numpy._typing") +class _8Bit(_16Bit): # type: ignore[misc] # pyright: ignore[reportGeneralTypeIssues] + pass diff --git a/.venv/lib/python3.12/site-packages/numpy/_typing/_nbit_base.pyi b/.venv/lib/python3.12/site-packages/numpy/_typing/_nbit_base.pyi new file mode 100644 index 0000000..ccf8f5c --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_typing/_nbit_base.pyi @@ -0,0 +1,40 @@ +# pyright: reportDeprecated=false +# pyright: reportGeneralTypeIssues=false +# mypy: disable-error-code=misc + +from typing import final + +from typing_extensions import deprecated + +# Deprecated in NumPy 2.3, 2025-05-01 +@deprecated( + "`NBitBase` is deprecated and will be removed from numpy.typing in the " + "future. Use `@typing.overload` or a `TypeVar` with a scalar-type as upper " + "bound, instead. (deprecated in NumPy 2.3)", +) +@final +class NBitBase: ... + +@final +class _256Bit(NBitBase): ... + +@final +class _128Bit(_256Bit): ... + +@final +class _96Bit(_128Bit): ... + +@final +class _80Bit(_96Bit): ... + +@final +class _64Bit(_80Bit): ... + +@final +class _32Bit(_64Bit): ... + +@final +class _16Bit(_32Bit): ... + +@final +class _8Bit(_16Bit): ... diff --git a/.venv/lib/python3.12/site-packages/numpy/_typing/_nested_sequence.py b/.venv/lib/python3.12/site-packages/numpy/_typing/_nested_sequence.py new file mode 100644 index 0000000..e3362a9 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_typing/_nested_sequence.py @@ -0,0 +1,79 @@ +"""A module containing the `_NestedSequence` protocol.""" + +from typing import TYPE_CHECKING, Any, Protocol, TypeVar, runtime_checkable + +if TYPE_CHECKING: + from collections.abc import Iterator + +__all__ = ["_NestedSequence"] + +_T_co = TypeVar("_T_co", covariant=True) + + +@runtime_checkable +class _NestedSequence(Protocol[_T_co]): + """A protocol for representing nested sequences. + + Warning + ------- + `_NestedSequence` currently does not work in combination with typevars, + *e.g.* ``def func(a: _NestedSequnce[T]) -> T: ...``. + + See Also + -------- + collections.abc.Sequence + ABCs for read-only and mutable :term:`sequences`. + + Examples + -------- + .. code-block:: python + + >>> from typing import TYPE_CHECKING + >>> import numpy as np + >>> from numpy._typing import _NestedSequence + + >>> def get_dtype(seq: _NestedSequence[float]) -> np.dtype[np.float64]: + ... return np.asarray(seq).dtype + + >>> a = get_dtype([1.0]) + >>> b = get_dtype([[1.0]]) + >>> c = get_dtype([[[1.0]]]) + >>> d = get_dtype([[[[1.0]]]]) + + >>> if TYPE_CHECKING: + ... reveal_locals() + ... # note: Revealed local types are: + ... # note: a: numpy.dtype[numpy.floating[numpy._typing._64Bit]] + ... # note: b: numpy.dtype[numpy.floating[numpy._typing._64Bit]] + ... # note: c: numpy.dtype[numpy.floating[numpy._typing._64Bit]] + ... # note: d: numpy.dtype[numpy.floating[numpy._typing._64Bit]] + + """ + + def __len__(self, /) -> int: + """Implement ``len(self)``.""" + raise NotImplementedError + + def __getitem__(self, index: int, /) -> "_T_co | _NestedSequence[_T_co]": + """Implement ``self[x]``.""" + raise NotImplementedError + + def __contains__(self, x: object, /) -> bool: + """Implement ``x in self``.""" + raise NotImplementedError + + def __iter__(self, /) -> "Iterator[_T_co | _NestedSequence[_T_co]]": + """Implement ``iter(self)``.""" + raise NotImplementedError + + def __reversed__(self, /) -> "Iterator[_T_co | _NestedSequence[_T_co]]": + """Implement ``reversed(self)``.""" + raise NotImplementedError + + def count(self, value: Any, /) -> int: + """Return the number of occurrences of `value`.""" + raise NotImplementedError + + def index(self, value: Any, /) -> int: + """Return the first index of `value`.""" + raise NotImplementedError diff --git a/.venv/lib/python3.12/site-packages/numpy/_typing/_scalars.py b/.venv/lib/python3.12/site-packages/numpy/_typing/_scalars.py new file mode 100644 index 0000000..b0de66d --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_typing/_scalars.py @@ -0,0 +1,20 @@ +from typing import Any, TypeAlias + +import numpy as np + +# NOTE: `_StrLike_co` and `_BytesLike_co` are pointless, as `np.str_` and +# `np.bytes_` are already subclasses of their builtin counterpart +_CharLike_co: TypeAlias = str | bytes + +# The `Like_co` type-aliases below represent all scalars that can be +# coerced into `` (with the casting rule `same_kind`) +_BoolLike_co: TypeAlias = bool | np.bool +_UIntLike_co: TypeAlias = bool | np.unsignedinteger | np.bool +_IntLike_co: TypeAlias = int | np.integer | np.bool +_FloatLike_co: TypeAlias = float | np.floating | np.integer | np.bool +_ComplexLike_co: TypeAlias = complex | np.number | np.bool +_NumberLike_co: TypeAlias = _ComplexLike_co +_TD64Like_co: TypeAlias = int | np.timedelta64 | np.integer | np.bool +# `_VoidLike_co` is technically not a scalar, but it's close enough +_VoidLike_co: TypeAlias = tuple[Any, ...] | np.void +_ScalarLike_co: TypeAlias = complex | str | bytes | np.generic diff --git a/.venv/lib/python3.12/site-packages/numpy/_typing/_shape.py b/.venv/lib/python3.12/site-packages/numpy/_typing/_shape.py new file mode 100644 index 0000000..e297aef --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_typing/_shape.py @@ -0,0 +1,8 @@ +from collections.abc import Sequence +from typing import Any, SupportsIndex, TypeAlias + +_Shape: TypeAlias = tuple[int, ...] +_AnyShape: TypeAlias = tuple[Any, ...] + +# Anything that can be coerced to a shape tuple +_ShapeLike: TypeAlias = SupportsIndex | Sequence[SupportsIndex] diff --git a/.venv/lib/python3.12/site-packages/numpy/_typing/_ufunc.py b/.venv/lib/python3.12/site-packages/numpy/_typing/_ufunc.py new file mode 100644 index 0000000..db52a1f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_typing/_ufunc.py @@ -0,0 +1,7 @@ +from numpy import ufunc + +_UFunc_Nin1_Nout1 = ufunc +_UFunc_Nin2_Nout1 = ufunc +_UFunc_Nin1_Nout2 = ufunc +_UFunc_Nin2_Nout2 = ufunc +_GUFunc_Nin2_Nout1 = ufunc diff --git a/.venv/lib/python3.12/site-packages/numpy/_typing/_ufunc.pyi b/.venv/lib/python3.12/site-packages/numpy/_typing/_ufunc.pyi new file mode 100644 index 0000000..766cde1 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_typing/_ufunc.pyi @@ -0,0 +1,941 @@ +"""A module with private type-check-only `numpy.ufunc` subclasses. + +The signatures of the ufuncs are too varied to reasonably type +with a single class. So instead, `ufunc` has been expanded into +four private subclasses, one for each combination of +`~ufunc.nin` and `~ufunc.nout`. +""" + +from typing import ( + Any, + Generic, + Literal, + LiteralString, + NoReturn, + Protocol, + SupportsIndex, + TypeAlias, + TypedDict, + TypeVar, + Unpack, + overload, + type_check_only, +) + +import numpy as np +from numpy import _CastingKind, _OrderKACF, ufunc +from numpy.typing import NDArray + +from ._array_like import ArrayLike, _ArrayLikeBool_co, _ArrayLikeInt_co +from ._dtype_like import DTypeLike +from ._scalars import _ScalarLike_co +from ._shape import _ShapeLike + +_T = TypeVar("_T") +_2Tuple: TypeAlias = tuple[_T, _T] +_3Tuple: TypeAlias = tuple[_T, _T, _T] +_4Tuple: TypeAlias = tuple[_T, _T, _T, _T] + +_2PTuple: TypeAlias = tuple[_T, _T, *tuple[_T, ...]] +_3PTuple: TypeAlias = tuple[_T, _T, _T, *tuple[_T, ...]] +_4PTuple: TypeAlias = tuple[_T, _T, _T, _T, *tuple[_T, ...]] + +_NTypes = TypeVar("_NTypes", bound=int, covariant=True) +_IDType = TypeVar("_IDType", covariant=True) +_NameType = TypeVar("_NameType", bound=LiteralString, covariant=True) +_Signature = TypeVar("_Signature", bound=LiteralString, covariant=True) + +_NIn = TypeVar("_NIn", bound=int, covariant=True) +_NOut = TypeVar("_NOut", bound=int, covariant=True) +_ReturnType_co = TypeVar("_ReturnType_co", covariant=True) +_ArrayT = TypeVar("_ArrayT", bound=np.ndarray[Any, Any]) + +@type_check_only +class _SupportsArrayUFunc(Protocol): + def __array_ufunc__( + self, + ufunc: ufunc, + method: Literal["__call__", "reduce", "reduceat", "accumulate", "outer", "at"], + *inputs: Any, + **kwargs: Any, + ) -> Any: ... + +@type_check_only +class _UFunc3Kwargs(TypedDict, total=False): + where: _ArrayLikeBool_co | None + casting: _CastingKind + order: _OrderKACF + subok: bool + signature: _3Tuple[str | None] | str | None + +# NOTE: `reduce`, `accumulate`, `reduceat` and `outer` raise a ValueError for +# ufuncs that don't accept two input arguments and return one output argument. +# In such cases the respective methods return `NoReturn` + +# NOTE: Similarly, `at` won't be defined for ufuncs that return +# multiple outputs; in such cases `at` is typed to return `NoReturn` + +# NOTE: If 2 output types are returned then `out` must be a +# 2-tuple of arrays. Otherwise `None` or a plain array are also acceptable + +# pyright: reportIncompatibleMethodOverride=false + +@type_check_only +class _UFunc_Nin1_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc] + @property + def __name__(self) -> _NameType: ... + @property + def __qualname__(self) -> _NameType: ... + @property + def ntypes(self) -> _NTypes: ... + @property + def identity(self) -> _IDType: ... + @property + def nin(self) -> Literal[1]: ... + @property + def nout(self) -> Literal[1]: ... + @property + def nargs(self) -> Literal[2]: ... + @property + def signature(self) -> None: ... + + @overload + def __call__( + self, + __x1: _ScalarLike_co, + out: None = ..., + *, + where: _ArrayLikeBool_co | None = ..., + casting: _CastingKind = ..., + order: _OrderKACF = ..., + dtype: DTypeLike = ..., + subok: bool = ..., + signature: str | _2Tuple[str | None] = ..., + ) -> Any: ... + @overload + def __call__( + self, + __x1: ArrayLike, + out: NDArray[Any] | tuple[NDArray[Any]] | None = ..., + *, + where: _ArrayLikeBool_co | None = ..., + casting: _CastingKind = ..., + order: _OrderKACF = ..., + dtype: DTypeLike = ..., + subok: bool = ..., + signature: str | _2Tuple[str | None] = ..., + ) -> NDArray[Any]: ... + @overload + def __call__( + self, + __x1: _SupportsArrayUFunc, + out: NDArray[Any] | tuple[NDArray[Any]] | None = ..., + *, + where: _ArrayLikeBool_co | None = ..., + casting: _CastingKind = ..., + order: _OrderKACF = ..., + dtype: DTypeLike = ..., + subok: bool = ..., + signature: str | _2Tuple[str | None] = ..., + ) -> Any: ... + + def at( + self, + a: _SupportsArrayUFunc, + indices: _ArrayLikeInt_co, + /, + ) -> None: ... + + def reduce(self, *args, **kwargs) -> NoReturn: ... + def accumulate(self, *args, **kwargs) -> NoReturn: ... + def reduceat(self, *args, **kwargs) -> NoReturn: ... + def outer(self, *args, **kwargs) -> NoReturn: ... + +@type_check_only +class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc] + @property + def __name__(self) -> _NameType: ... + @property + def __qualname__(self) -> _NameType: ... + @property + def ntypes(self) -> _NTypes: ... + @property + def identity(self) -> _IDType: ... + @property + def nin(self) -> Literal[2]: ... + @property + def nout(self) -> Literal[1]: ... + @property + def nargs(self) -> Literal[3]: ... + @property + def signature(self) -> None: ... + + @overload # (scalar, scalar) -> scalar + def __call__( + self, + x1: _ScalarLike_co, + x2: _ScalarLike_co, + /, + out: None = None, + *, + dtype: DTypeLike | None = None, + **kwds: Unpack[_UFunc3Kwargs], + ) -> Any: ... + @overload # (array-like, array) -> array + def __call__( + self, + x1: ArrayLike, + x2: NDArray[np.generic], + /, + out: NDArray[np.generic] | tuple[NDArray[np.generic]] | None = None, + *, + dtype: DTypeLike | None = None, + **kwds: Unpack[_UFunc3Kwargs], + ) -> NDArray[Any]: ... + @overload # (array, array-like) -> array + def __call__( + self, + x1: NDArray[np.generic], + x2: ArrayLike, + /, + out: NDArray[np.generic] | tuple[NDArray[np.generic]] | None = None, + *, + dtype: DTypeLike | None = None, + **kwds: Unpack[_UFunc3Kwargs], + ) -> NDArray[Any]: ... + @overload # (array-like, array-like, out=array) -> array + def __call__( + self, + x1: ArrayLike, + x2: ArrayLike, + /, + out: NDArray[np.generic] | tuple[NDArray[np.generic]], + *, + dtype: DTypeLike | None = None, + **kwds: Unpack[_UFunc3Kwargs], + ) -> NDArray[Any]: ... + @overload # (array-like, array-like) -> array | scalar + def __call__( + self, + x1: ArrayLike, + x2: ArrayLike, + /, + out: NDArray[np.generic] | tuple[NDArray[np.generic]] | None = None, + *, + dtype: DTypeLike | None = None, + **kwds: Unpack[_UFunc3Kwargs], + ) -> NDArray[Any] | Any: ... + + def at( + self, + a: NDArray[Any], + indices: _ArrayLikeInt_co, + b: ArrayLike, + /, + ) -> None: ... + + def reduce( + self, + array: ArrayLike, + axis: _ShapeLike | None = ..., + dtype: DTypeLike = ..., + out: NDArray[Any] | None = ..., + keepdims: bool = ..., + initial: Any = ..., + where: _ArrayLikeBool_co = ..., + ) -> Any: ... + + def accumulate( + self, + array: ArrayLike, + axis: SupportsIndex = ..., + dtype: DTypeLike = ..., + out: NDArray[Any] | None = ..., + ) -> NDArray[Any]: ... + + def reduceat( + self, + array: ArrayLike, + indices: _ArrayLikeInt_co, + axis: SupportsIndex = ..., + dtype: DTypeLike = ..., + out: NDArray[Any] | None = ..., + ) -> NDArray[Any]: ... + + @overload # (scalar, scalar) -> scalar + def outer( + self, + A: _ScalarLike_co, + B: _ScalarLike_co, + /, + *, + out: None = None, + dtype: DTypeLike | None = None, + **kwds: Unpack[_UFunc3Kwargs], + ) -> Any: ... + @overload # (array-like, array) -> array + def outer( + self, + A: ArrayLike, + B: NDArray[np.generic], + /, + *, + out: NDArray[np.generic] | tuple[NDArray[np.generic]] | None = None, + dtype: DTypeLike | None = None, + **kwds: Unpack[_UFunc3Kwargs], + ) -> NDArray[Any]: ... + @overload # (array, array-like) -> array + def outer( + self, + A: NDArray[np.generic], + B: ArrayLike, + /, + *, + out: NDArray[np.generic] | tuple[NDArray[np.generic]] | None = None, + dtype: DTypeLike | None = None, + **kwds: Unpack[_UFunc3Kwargs], + ) -> NDArray[Any]: ... + @overload # (array-like, array-like, out=array) -> array + def outer( + self, + A: ArrayLike, + B: ArrayLike, + /, + *, + out: NDArray[np.generic] | tuple[NDArray[np.generic]], + dtype: DTypeLike | None = None, + **kwds: Unpack[_UFunc3Kwargs], + ) -> NDArray[Any]: ... + @overload # (array-like, array-like) -> array | scalar + def outer( + self, + A: ArrayLike, + B: ArrayLike, + /, + *, + out: NDArray[np.generic] | tuple[NDArray[np.generic]] | None = None, + dtype: DTypeLike | None = None, + **kwds: Unpack[_UFunc3Kwargs], + ) -> NDArray[Any] | Any: ... + +@type_check_only +class _UFunc_Nin1_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc] + @property + def __name__(self) -> _NameType: ... + @property + def __qualname__(self) -> _NameType: ... + @property + def ntypes(self) -> _NTypes: ... + @property + def identity(self) -> _IDType: ... + @property + def nin(self) -> Literal[1]: ... + @property + def nout(self) -> Literal[2]: ... + @property + def nargs(self) -> Literal[3]: ... + @property + def signature(self) -> None: ... + + @overload + def __call__( + self, + __x1: _ScalarLike_co, + __out1: None = ..., + __out2: None = ..., + *, + where: _ArrayLikeBool_co | None = ..., + casting: _CastingKind = ..., + order: _OrderKACF = ..., + dtype: DTypeLike = ..., + subok: bool = ..., + signature: str | _3Tuple[str | None] = ..., + ) -> _2Tuple[Any]: ... + @overload + def __call__( + self, + __x1: ArrayLike, + __out1: NDArray[Any] | None = ..., + __out2: NDArray[Any] | None = ..., + *, + out: _2Tuple[NDArray[Any]] = ..., + where: _ArrayLikeBool_co | None = ..., + casting: _CastingKind = ..., + order: _OrderKACF = ..., + dtype: DTypeLike = ..., + subok: bool = ..., + signature: str | _3Tuple[str | None] = ..., + ) -> _2Tuple[NDArray[Any]]: ... + @overload + def __call__( + self, + __x1: _SupportsArrayUFunc, + __out1: NDArray[Any] | None = ..., + __out2: NDArray[Any] | None = ..., + *, + out: _2Tuple[NDArray[Any]] = ..., + where: _ArrayLikeBool_co | None = ..., + casting: _CastingKind = ..., + order: _OrderKACF = ..., + dtype: DTypeLike = ..., + subok: bool = ..., + signature: str | _3Tuple[str | None] = ..., + ) -> _2Tuple[Any]: ... + + def at(self, *args, **kwargs) -> NoReturn: ... + def reduce(self, *args, **kwargs) -> NoReturn: ... + def accumulate(self, *args, **kwargs) -> NoReturn: ... + def reduceat(self, *args, **kwargs) -> NoReturn: ... + def outer(self, *args, **kwargs) -> NoReturn: ... + +@type_check_only +class _UFunc_Nin2_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc] + @property + def __name__(self) -> _NameType: ... + @property + def __qualname__(self) -> _NameType: ... + @property + def ntypes(self) -> _NTypes: ... + @property + def identity(self) -> _IDType: ... + @property + def nin(self) -> Literal[2]: ... + @property + def nout(self) -> Literal[2]: ... + @property + def nargs(self) -> Literal[4]: ... + @property + def signature(self) -> None: ... + + @overload + def __call__( + self, + __x1: _ScalarLike_co, + __x2: _ScalarLike_co, + __out1: None = ..., + __out2: None = ..., + *, + where: _ArrayLikeBool_co | None = ..., + casting: _CastingKind = ..., + order: _OrderKACF = ..., + dtype: DTypeLike = ..., + subok: bool = ..., + signature: str | _4Tuple[str | None] = ..., + ) -> _2Tuple[Any]: ... + @overload + def __call__( + self, + __x1: ArrayLike, + __x2: ArrayLike, + __out1: NDArray[Any] | None = ..., + __out2: NDArray[Any] | None = ..., + *, + out: _2Tuple[NDArray[Any]] = ..., + where: _ArrayLikeBool_co | None = ..., + casting: _CastingKind = ..., + order: _OrderKACF = ..., + dtype: DTypeLike = ..., + subok: bool = ..., + signature: str | _4Tuple[str | None] = ..., + ) -> _2Tuple[NDArray[Any]]: ... + + def at(self, *args, **kwargs) -> NoReturn: ... + def reduce(self, *args, **kwargs) -> NoReturn: ... + def accumulate(self, *args, **kwargs) -> NoReturn: ... + def reduceat(self, *args, **kwargs) -> NoReturn: ... + def outer(self, *args, **kwargs) -> NoReturn: ... + +@type_check_only +class _GUFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType, _Signature]): # type: ignore[misc] + @property + def __name__(self) -> _NameType: ... + @property + def __qualname__(self) -> _NameType: ... + @property + def ntypes(self) -> _NTypes: ... + @property + def identity(self) -> _IDType: ... + @property + def nin(self) -> Literal[2]: ... + @property + def nout(self) -> Literal[1]: ... + @property + def nargs(self) -> Literal[3]: ... + @property + def signature(self) -> _Signature: ... + + # Scalar for 1D array-likes; ndarray otherwise + @overload + def __call__( + self, + __x1: ArrayLike, + __x2: ArrayLike, + out: None = ..., + *, + casting: _CastingKind = ..., + order: _OrderKACF = ..., + dtype: DTypeLike = ..., + subok: bool = ..., + signature: str | _3Tuple[str | None] = ..., + axes: list[_2Tuple[SupportsIndex]] = ..., + ) -> Any: ... + @overload + def __call__( + self, + __x1: ArrayLike, + __x2: ArrayLike, + out: NDArray[Any] | tuple[NDArray[Any]], + *, + casting: _CastingKind = ..., + order: _OrderKACF = ..., + dtype: DTypeLike = ..., + subok: bool = ..., + signature: str | _3Tuple[str | None] = ..., + axes: list[_2Tuple[SupportsIndex]] = ..., + ) -> NDArray[Any]: ... + + def at(self, *args, **kwargs) -> NoReturn: ... + def reduce(self, *args, **kwargs) -> NoReturn: ... + def accumulate(self, *args, **kwargs) -> NoReturn: ... + def reduceat(self, *args, **kwargs) -> NoReturn: ... + def outer(self, *args, **kwargs) -> NoReturn: ... + +@type_check_only +class _PyFunc_Kwargs_Nargs2(TypedDict, total=False): + where: _ArrayLikeBool_co | None + casting: _CastingKind + order: _OrderKACF + dtype: DTypeLike + subok: bool + signature: str | tuple[DTypeLike, DTypeLike] + +@type_check_only +class _PyFunc_Kwargs_Nargs3(TypedDict, total=False): + where: _ArrayLikeBool_co | None + casting: _CastingKind + order: _OrderKACF + dtype: DTypeLike + subok: bool + signature: str | tuple[DTypeLike, DTypeLike, DTypeLike] + +@type_check_only +class _PyFunc_Kwargs_Nargs3P(TypedDict, total=False): + where: _ArrayLikeBool_co | None + casting: _CastingKind + order: _OrderKACF + dtype: DTypeLike + subok: bool + signature: str | _3PTuple[DTypeLike] + +@type_check_only +class _PyFunc_Kwargs_Nargs4P(TypedDict, total=False): + where: _ArrayLikeBool_co | None + casting: _CastingKind + order: _OrderKACF + dtype: DTypeLike + subok: bool + signature: str | _4PTuple[DTypeLike] + +@type_check_only +class _PyFunc_Nin1_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: ignore[misc] + @property + def identity(self) -> _IDType: ... + @property + def nin(self) -> Literal[1]: ... + @property + def nout(self) -> Literal[1]: ... + @property + def nargs(self) -> Literal[2]: ... + @property + def ntypes(self) -> Literal[1]: ... + @property + def signature(self) -> None: ... + + @overload + def __call__( + self, + x1: _ScalarLike_co, + /, + out: None = ..., + **kwargs: Unpack[_PyFunc_Kwargs_Nargs2], + ) -> _ReturnType_co: ... + @overload + def __call__( + self, + x1: ArrayLike, + /, + out: None = ..., + **kwargs: Unpack[_PyFunc_Kwargs_Nargs2], + ) -> _ReturnType_co | NDArray[np.object_]: ... + @overload + def __call__( + self, + x1: ArrayLike, + /, + out: _ArrayT | tuple[_ArrayT], + **kwargs: Unpack[_PyFunc_Kwargs_Nargs2], + ) -> _ArrayT: ... + @overload + def __call__( + self, + x1: _SupportsArrayUFunc, + /, + out: NDArray[Any] | tuple[NDArray[Any]] | None = ..., + **kwargs: Unpack[_PyFunc_Kwargs_Nargs2], + ) -> Any: ... + + def at(self, a: _SupportsArrayUFunc, ixs: _ArrayLikeInt_co, /) -> None: ... + def reduce(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... + def accumulate(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... + def reduceat(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... + def outer(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... + +@type_check_only +class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: ignore[misc] + @property + def identity(self) -> _IDType: ... + @property + def nin(self) -> Literal[2]: ... + @property + def nout(self) -> Literal[1]: ... + @property + def nargs(self) -> Literal[3]: ... + @property + def ntypes(self) -> Literal[1]: ... + @property + def signature(self) -> None: ... + + @overload + def __call__( + self, + x1: _ScalarLike_co, + x2: _ScalarLike_co, + /, + out: None = ..., + **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], + ) -> _ReturnType_co: ... + @overload + def __call__( + self, + x1: ArrayLike, + x2: ArrayLike, + /, + out: None = ..., + **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], + ) -> _ReturnType_co | NDArray[np.object_]: ... + @overload + def __call__( + self, + x1: ArrayLike, + x2: ArrayLike, + /, + out: _ArrayT | tuple[_ArrayT], + **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], + ) -> _ArrayT: ... + @overload + def __call__( + self, + x1: _SupportsArrayUFunc, + x2: _SupportsArrayUFunc | ArrayLike, + /, + out: NDArray[Any] | tuple[NDArray[Any]] | None = ..., + **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], + ) -> Any: ... + @overload + def __call__( + self, + x1: ArrayLike, + x2: _SupportsArrayUFunc, + /, + out: NDArray[Any] | tuple[NDArray[Any]] | None = ..., + **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], + ) -> Any: ... + + def at(self, a: _SupportsArrayUFunc, ixs: _ArrayLikeInt_co, b: ArrayLike, /) -> None: ... + + @overload + def reduce( + self, + array: ArrayLike, + axis: _ShapeLike | None, + dtype: DTypeLike, + out: _ArrayT, + /, + keepdims: bool = ..., + initial: _ScalarLike_co = ..., + where: _ArrayLikeBool_co = ..., + ) -> _ArrayT: ... + @overload + def reduce( + self, + /, + array: ArrayLike, + axis: _ShapeLike | None = ..., + dtype: DTypeLike = ..., + *, + out: _ArrayT | tuple[_ArrayT], + keepdims: bool = ..., + initial: _ScalarLike_co = ..., + where: _ArrayLikeBool_co = ..., + ) -> _ArrayT: ... + @overload + def reduce( + self, + /, + array: ArrayLike, + axis: _ShapeLike | None = ..., + dtype: DTypeLike = ..., + out: None = ..., + *, + keepdims: Literal[True], + initial: _ScalarLike_co = ..., + where: _ArrayLikeBool_co = ..., + ) -> NDArray[np.object_]: ... + @overload + def reduce( + self, + /, + array: ArrayLike, + axis: _ShapeLike | None = ..., + dtype: DTypeLike = ..., + out: None = ..., + keepdims: bool = ..., + initial: _ScalarLike_co = ..., + where: _ArrayLikeBool_co = ..., + ) -> _ReturnType_co | NDArray[np.object_]: ... + + @overload + def reduceat( + self, + array: ArrayLike, + indices: _ArrayLikeInt_co, + axis: SupportsIndex, + dtype: DTypeLike, + out: _ArrayT, + /, + ) -> _ArrayT: ... + @overload + def reduceat( + self, + /, + array: ArrayLike, + indices: _ArrayLikeInt_co, + axis: SupportsIndex = ..., + dtype: DTypeLike = ..., + *, + out: _ArrayT | tuple[_ArrayT], + ) -> _ArrayT: ... + @overload + def reduceat( + self, + /, + array: ArrayLike, + indices: _ArrayLikeInt_co, + axis: SupportsIndex = ..., + dtype: DTypeLike = ..., + out: None = ..., + ) -> NDArray[np.object_]: ... + @overload + def reduceat( + self, + /, + array: _SupportsArrayUFunc, + indices: _ArrayLikeInt_co, + axis: SupportsIndex = ..., + dtype: DTypeLike = ..., + out: NDArray[Any] | tuple[NDArray[Any]] | None = ..., + ) -> Any: ... + + @overload + def accumulate( + self, + array: ArrayLike, + axis: SupportsIndex, + dtype: DTypeLike, + out: _ArrayT, + /, + ) -> _ArrayT: ... + @overload + def accumulate( + self, + array: ArrayLike, + axis: SupportsIndex = ..., + dtype: DTypeLike = ..., + *, + out: _ArrayT | tuple[_ArrayT], + ) -> _ArrayT: ... + @overload + def accumulate( + self, + /, + array: ArrayLike, + axis: SupportsIndex = ..., + dtype: DTypeLike = ..., + out: None = ..., + ) -> NDArray[np.object_]: ... + + @overload + def outer( + self, + A: _ScalarLike_co, + B: _ScalarLike_co, + /, *, + out: None = ..., + **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], + ) -> _ReturnType_co: ... + @overload + def outer( + self, + A: ArrayLike, + B: ArrayLike, + /, *, + out: None = ..., + **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], + ) -> _ReturnType_co | NDArray[np.object_]: ... + @overload + def outer( + self, + A: ArrayLike, + B: ArrayLike, + /, *, + out: _ArrayT, + **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], + ) -> _ArrayT: ... + @overload + def outer( + self, + A: _SupportsArrayUFunc, + B: _SupportsArrayUFunc | ArrayLike, + /, *, + out: None = ..., + **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], + ) -> Any: ... + @overload + def outer( + self, + A: _ScalarLike_co, + B: _SupportsArrayUFunc | ArrayLike, + /, *, + out: None = ..., + **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], + ) -> Any: ... + +@type_check_only +class _PyFunc_Nin3P_Nout1(ufunc, Generic[_ReturnType_co, _IDType, _NIn]): # type: ignore[misc] + @property + def identity(self) -> _IDType: ... + @property + def nin(self) -> _NIn: ... + @property + def nout(self) -> Literal[1]: ... + @property + def ntypes(self) -> Literal[1]: ... + @property + def signature(self) -> None: ... + + @overload + def __call__( + self, + x1: _ScalarLike_co, + x2: _ScalarLike_co, + x3: _ScalarLike_co, + /, + *xs: _ScalarLike_co, + out: None = ..., + **kwargs: Unpack[_PyFunc_Kwargs_Nargs4P], + ) -> _ReturnType_co: ... + @overload + def __call__( + self, + x1: ArrayLike, + x2: ArrayLike, + x3: ArrayLike, + /, + *xs: ArrayLike, + out: None = ..., + **kwargs: Unpack[_PyFunc_Kwargs_Nargs4P], + ) -> _ReturnType_co | NDArray[np.object_]: ... + @overload + def __call__( + self, + x1: ArrayLike, + x2: ArrayLike, + x3: ArrayLike, + /, + *xs: ArrayLike, + out: _ArrayT | tuple[_ArrayT], + **kwargs: Unpack[_PyFunc_Kwargs_Nargs4P], + ) -> _ArrayT: ... + @overload + def __call__( + self, + x1: _SupportsArrayUFunc | ArrayLike, + x2: _SupportsArrayUFunc | ArrayLike, + x3: _SupportsArrayUFunc | ArrayLike, + /, + *xs: _SupportsArrayUFunc | ArrayLike, + out: NDArray[Any] | tuple[NDArray[Any]] | None = ..., + **kwargs: Unpack[_PyFunc_Kwargs_Nargs4P], + ) -> Any: ... + + def at(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... + def reduce(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... + def accumulate(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... + def reduceat(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... + def outer(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... + +@type_check_only +class _PyFunc_Nin1P_Nout2P(ufunc, Generic[_ReturnType_co, _IDType, _NIn, _NOut]): # type: ignore[misc] + @property + def identity(self) -> _IDType: ... + @property + def nin(self) -> _NIn: ... + @property + def nout(self) -> _NOut: ... + @property + def ntypes(self) -> Literal[1]: ... + @property + def signature(self) -> None: ... + + @overload + def __call__( + self, + x1: _ScalarLike_co, + /, + *xs: _ScalarLike_co, + out: None = ..., + **kwargs: Unpack[_PyFunc_Kwargs_Nargs3P], + ) -> _2PTuple[_ReturnType_co]: ... + @overload + def __call__( + self, + x1: ArrayLike, + /, + *xs: ArrayLike, + out: None = ..., + **kwargs: Unpack[_PyFunc_Kwargs_Nargs3P], + ) -> _2PTuple[_ReturnType_co | NDArray[np.object_]]: ... + @overload + def __call__( + self, + x1: ArrayLike, + /, + *xs: ArrayLike, + out: _2PTuple[_ArrayT], + **kwargs: Unpack[_PyFunc_Kwargs_Nargs3P], + ) -> _2PTuple[_ArrayT]: ... + @overload + def __call__( + self, + x1: _SupportsArrayUFunc | ArrayLike, + /, + *xs: _SupportsArrayUFunc | ArrayLike, + out: _2PTuple[NDArray[Any]] | None = ..., + **kwargs: Unpack[_PyFunc_Kwargs_Nargs3P], + ) -> Any: ... + + def at(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... + def reduce(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... + def accumulate(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... + def reduceat(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... + def outer(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/_utils/__init__.py b/.venv/lib/python3.12/site-packages/numpy/_utils/__init__.py new file mode 100644 index 0000000..84ee99d --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_utils/__init__.py @@ -0,0 +1,95 @@ +""" +This is a module for defining private helpers which do not depend on the +rest of NumPy. + +Everything in here must be self-contained so that it can be +imported anywhere else without creating circular imports. +If a utility requires the import of NumPy, it probably belongs +in ``numpy._core``. +""" + +import functools +import warnings + +from ._convertions import asbytes, asunicode + + +def set_module(module): + """Private decorator for overriding __module__ on a function or class. + + Example usage:: + + @set_module('numpy') + def example(): + pass + + assert example.__module__ == 'numpy' + """ + def decorator(func): + if module is not None: + if isinstance(func, type): + try: + func._module_source = func.__module__ + except (AttributeError): + pass + + func.__module__ = module + return func + return decorator + + +def _rename_parameter(old_names, new_names, dep_version=None): + """ + Generate decorator for backward-compatible keyword renaming. + + Apply the decorator generated by `_rename_parameter` to functions with a + renamed parameter to maintain backward-compatibility. + + After decoration, the function behaves as follows: + If only the new parameter is passed into the function, behave as usual. + If only the old parameter is passed into the function (as a keyword), raise + a DeprecationWarning if `dep_version` is provided, and behave as usual + otherwise. + If both old and new parameters are passed into the function, raise a + DeprecationWarning if `dep_version` is provided, and raise the appropriate + TypeError (function got multiple values for argument). + + Parameters + ---------- + old_names : list of str + Old names of parameters + new_name : list of str + New names of parameters + dep_version : str, optional + Version of NumPy in which old parameter was deprecated in the format + 'X.Y.Z'. If supplied, the deprecation message will indicate that + support for the old parameter will be removed in version 'X.Y+2.Z' + + Notes + ----- + Untested with functions that accept *args. Probably won't work as written. + + """ + def decorator(fun): + @functools.wraps(fun) + def wrapper(*args, **kwargs): + __tracebackhide__ = True # Hide traceback for py.test + for old_name, new_name in zip(old_names, new_names): + if old_name in kwargs: + if dep_version: + end_version = dep_version.split('.') + end_version[1] = str(int(end_version[1]) + 2) + end_version = '.'.join(end_version) + msg = (f"Use of keyword argument `{old_name}` is " + f"deprecated and replaced by `{new_name}`. " + f"Support for `{old_name}` will be removed " + f"in NumPy {end_version}.") + warnings.warn(msg, DeprecationWarning, stacklevel=2) + if new_name in kwargs: + msg = (f"{fun.__name__}() got multiple values for " + f"argument now known as `{new_name}`") + raise TypeError(msg) + kwargs[new_name] = kwargs.pop(old_name) + return fun(*args, **kwargs) + return wrapper + return decorator diff --git a/.venv/lib/python3.12/site-packages/numpy/_utils/__init__.pyi b/.venv/lib/python3.12/site-packages/numpy/_utils/__init__.pyi new file mode 100644 index 0000000..f3472df --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_utils/__init__.pyi @@ -0,0 +1,30 @@ +from collections.abc import Callable, Iterable +from typing import Protocol, TypeVar, overload, type_check_only + +from _typeshed import IdentityFunction + +from ._convertions import asbytes as asbytes +from ._convertions import asunicode as asunicode + +### + +_T = TypeVar("_T") +_HasModuleT = TypeVar("_HasModuleT", bound=_HasModule) + +@type_check_only +class _HasModule(Protocol): + __module__: str + +### + +@overload +def set_module(module: None) -> IdentityFunction: ... +@overload +def set_module(module: str) -> Callable[[_HasModuleT], _HasModuleT]: ... + +# +def _rename_parameter( + old_names: Iterable[str], + new_names: Iterable[str], + dep_version: str | None = None, +) -> Callable[[Callable[..., _T]], Callable[..., _T]]: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/_utils/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_utils/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..8abeda7 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_utils/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_utils/__pycache__/_convertions.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_utils/__pycache__/_convertions.cpython-312.pyc new file mode 100644 index 0000000..282ceac Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_utils/__pycache__/_convertions.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_utils/__pycache__/_inspect.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_utils/__pycache__/_inspect.cpython-312.pyc new file mode 100644 index 0000000..3d69f9b Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_utils/__pycache__/_inspect.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_utils/__pycache__/_pep440.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_utils/__pycache__/_pep440.cpython-312.pyc new file mode 100644 index 0000000..96f0304 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_utils/__pycache__/_pep440.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/_utils/_convertions.py b/.venv/lib/python3.12/site-packages/numpy/_utils/_convertions.py new file mode 100644 index 0000000..ab15a8b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_utils/_convertions.py @@ -0,0 +1,18 @@ +""" +A set of methods retained from np.compat module that +are still used across codebase. +""" + +__all__ = ["asunicode", "asbytes"] + + +def asunicode(s): + if isinstance(s, bytes): + return s.decode('latin1') + return str(s) + + +def asbytes(s): + if isinstance(s, bytes): + return s + return str(s).encode('latin1') diff --git a/.venv/lib/python3.12/site-packages/numpy/_utils/_convertions.pyi b/.venv/lib/python3.12/site-packages/numpy/_utils/_convertions.pyi new file mode 100644 index 0000000..6cc599a --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_utils/_convertions.pyi @@ -0,0 +1,4 @@ +__all__ = ["asbytes", "asunicode"] + +def asunicode(s: bytes | str) -> str: ... +def asbytes(s: bytes | str) -> str: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/_utils/_inspect.py b/.venv/lib/python3.12/site-packages/numpy/_utils/_inspect.py new file mode 100644 index 0000000..b499f58 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_utils/_inspect.py @@ -0,0 +1,192 @@ +"""Subset of inspect module from upstream python + +We use this instead of upstream because upstream inspect is slow to import, and +significantly contributes to numpy import times. Importing this copy has almost +no overhead. + +""" +import types + +__all__ = ['getargspec', 'formatargspec'] + +# ----------------------------------------------------------- type-checking +def ismethod(object): + """Return true if the object is an instance method. + + Instance method objects provide these attributes: + __doc__ documentation string + __name__ name with which this method was defined + im_class class object in which this method belongs + im_func function object containing implementation of method + im_self instance to which this method is bound, or None + + """ + return isinstance(object, types.MethodType) + +def isfunction(object): + """Return true if the object is a user-defined function. + + Function objects provide these attributes: + __doc__ documentation string + __name__ name with which this function was defined + func_code code object containing compiled function bytecode + func_defaults tuple of any default values for arguments + func_doc (same as __doc__) + func_globals global namespace in which this function was defined + func_name (same as __name__) + + """ + return isinstance(object, types.FunctionType) + +def iscode(object): + """Return true if the object is a code object. + + Code objects provide these attributes: + co_argcount number of arguments (not including * or ** args) + co_code string of raw compiled bytecode + co_consts tuple of constants used in the bytecode + co_filename name of file in which this code object was created + co_firstlineno number of first line in Python source code + co_flags bitmap: 1=optimized | 2=newlocals | 4=*arg | 8=**arg + co_lnotab encoded mapping of line numbers to bytecode indices + co_name name with which this code object was defined + co_names tuple of names of local variables + co_nlocals number of local variables + co_stacksize virtual machine stack space required + co_varnames tuple of names of arguments and local variables + + """ + return isinstance(object, types.CodeType) + + +# ------------------------------------------------ argument list extraction +# These constants are from Python's compile.h. +CO_OPTIMIZED, CO_NEWLOCALS, CO_VARARGS, CO_VARKEYWORDS = 1, 2, 4, 8 + +def getargs(co): + """Get information about the arguments accepted by a code object. + + Three things are returned: (args, varargs, varkw), where 'args' is + a list of argument names (possibly containing nested lists), and + 'varargs' and 'varkw' are the names of the * and ** arguments or None. + + """ + + if not iscode(co): + raise TypeError('arg is not a code object') + + nargs = co.co_argcount + names = co.co_varnames + args = list(names[:nargs]) + + # The following acrobatics are for anonymous (tuple) arguments. + # Which we do not need to support, so remove to avoid importing + # the dis module. + for i in range(nargs): + if args[i][:1] in ['', '.']: + raise TypeError("tuple function arguments are not supported") + varargs = None + if co.co_flags & CO_VARARGS: + varargs = co.co_varnames[nargs] + nargs = nargs + 1 + varkw = None + if co.co_flags & CO_VARKEYWORDS: + varkw = co.co_varnames[nargs] + return args, varargs, varkw + +def getargspec(func): + """Get the names and default values of a function's arguments. + + A tuple of four things is returned: (args, varargs, varkw, defaults). + 'args' is a list of the argument names (it may contain nested lists). + 'varargs' and 'varkw' are the names of the * and ** arguments or None. + 'defaults' is an n-tuple of the default values of the last n arguments. + + """ + + if ismethod(func): + func = func.__func__ + if not isfunction(func): + raise TypeError('arg is not a Python function') + args, varargs, varkw = getargs(func.__code__) + return args, varargs, varkw, func.__defaults__ + +def getargvalues(frame): + """Get information about arguments passed into a particular frame. + + A tuple of four things is returned: (args, varargs, varkw, locals). + 'args' is a list of the argument names (it may contain nested lists). + 'varargs' and 'varkw' are the names of the * and ** arguments or None. + 'locals' is the locals dictionary of the given frame. + + """ + args, varargs, varkw = getargs(frame.f_code) + return args, varargs, varkw, frame.f_locals + +def joinseq(seq): + if len(seq) == 1: + return '(' + seq[0] + ',)' + else: + return '(' + ', '.join(seq) + ')' + +def strseq(object, convert, join=joinseq): + """Recursively walk a sequence, stringifying each element. + + """ + if type(object) in [list, tuple]: + return join([strseq(_o, convert, join) for _o in object]) + else: + return convert(object) + +def formatargspec(args, varargs=None, varkw=None, defaults=None, + formatarg=str, + formatvarargs=lambda name: '*' + name, + formatvarkw=lambda name: '**' + name, + formatvalue=lambda value: '=' + repr(value), + join=joinseq): + """Format an argument spec from the 4 values returned by getargspec. + + The first four arguments are (args, varargs, varkw, defaults). The + other four arguments are the corresponding optional formatting functions + that are called to turn names and values into strings. The ninth + argument is an optional function to format the sequence of arguments. + + """ + specs = [] + if defaults: + firstdefault = len(args) - len(defaults) + for i in range(len(args)): + spec = strseq(args[i], formatarg, join) + if defaults and i >= firstdefault: + spec = spec + formatvalue(defaults[i - firstdefault]) + specs.append(spec) + if varargs is not None: + specs.append(formatvarargs(varargs)) + if varkw is not None: + specs.append(formatvarkw(varkw)) + return '(' + ', '.join(specs) + ')' + +def formatargvalues(args, varargs, varkw, locals, + formatarg=str, + formatvarargs=lambda name: '*' + name, + formatvarkw=lambda name: '**' + name, + formatvalue=lambda value: '=' + repr(value), + join=joinseq): + """Format an argument spec from the 4 values returned by getargvalues. + + The first four arguments are (args, varargs, varkw, locals). The + next four arguments are the corresponding optional formatting functions + that are called to turn names and values into strings. The ninth + argument is an optional function to format the sequence of arguments. + + """ + def convert(name, locals=locals, + formatarg=formatarg, formatvalue=formatvalue): + return formatarg(name) + formatvalue(locals[name]) + specs = [strseq(arg, convert, join) for arg in args] + + if varargs: + specs.append(formatvarargs(varargs) + formatvalue(locals[varargs])) + if varkw: + specs.append(formatvarkw(varkw) + formatvalue(locals[varkw])) + return '(' + ', '.join(specs) + ')' diff --git a/.venv/lib/python3.12/site-packages/numpy/_utils/_inspect.pyi b/.venv/lib/python3.12/site-packages/numpy/_utils/_inspect.pyi new file mode 100644 index 0000000..d53c3c4 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_utils/_inspect.pyi @@ -0,0 +1,71 @@ +import types +from collections.abc import Callable, Mapping +from typing import Any, Final, TypeAlias, TypeVar, overload + +from _typeshed import SupportsLenAndGetItem +from typing_extensions import TypeIs + +__all__ = ["formatargspec", "getargspec"] + +### + +_T = TypeVar("_T") +_RT = TypeVar("_RT") + +_StrSeq: TypeAlias = SupportsLenAndGetItem[str] +_NestedSeq: TypeAlias = list[_T | _NestedSeq[_T]] | tuple[_T | _NestedSeq[_T], ...] + +_JoinFunc: TypeAlias = Callable[[list[_T]], _T] +_FormatFunc: TypeAlias = Callable[[_T], str] + +### + +CO_OPTIMIZED: Final = 1 +CO_NEWLOCALS: Final = 2 +CO_VARARGS: Final = 4 +CO_VARKEYWORDS: Final = 8 + +### + +def ismethod(object: object) -> TypeIs[types.MethodType]: ... +def isfunction(object: object) -> TypeIs[types.FunctionType]: ... +def iscode(object: object) -> TypeIs[types.CodeType]: ... + +### + +def getargs(co: types.CodeType) -> tuple[list[str], str | None, str | None]: ... +def getargspec(func: types.MethodType | types.FunctionType) -> tuple[list[str], str | None, str | None, tuple[Any, ...]]: ... +def getargvalues(frame: types.FrameType) -> tuple[list[str], str | None, str | None, dict[str, Any]]: ... + +# +def joinseq(seq: _StrSeq) -> str: ... + +# +@overload +def strseq(object: _NestedSeq[str], convert: Callable[[Any], Any], join: _JoinFunc[str] = ...) -> str: ... +@overload +def strseq(object: _NestedSeq[_T], convert: Callable[[_T], _RT], join: _JoinFunc[_RT]) -> _RT: ... + +# +def formatargspec( + args: _StrSeq, + varargs: str | None = None, + varkw: str | None = None, + defaults: SupportsLenAndGetItem[object] | None = None, + formatarg: _FormatFunc[str] = ..., # str + formatvarargs: _FormatFunc[str] = ..., # "*{}".format + formatvarkw: _FormatFunc[str] = ..., # "**{}".format + formatvalue: _FormatFunc[object] = ..., # "={!r}".format + join: _JoinFunc[str] = ..., # joinseq +) -> str: ... +def formatargvalues( + args: _StrSeq, + varargs: str | None, + varkw: str | None, + locals: Mapping[str, object] | None, + formatarg: _FormatFunc[str] = ..., # str + formatvarargs: _FormatFunc[str] = ..., # "*{}".format + formatvarkw: _FormatFunc[str] = ..., # "**{}".format + formatvalue: _FormatFunc[object] = ..., # "={!r}".format + join: _JoinFunc[str] = ..., # joinseq +) -> str: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/_utils/_pep440.py b/.venv/lib/python3.12/site-packages/numpy/_utils/_pep440.py new file mode 100644 index 0000000..035a069 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/_utils/_pep440.py @@ -0,0 +1,486 @@ +"""Utility to compare pep440 compatible version strings. + +The LooseVersion and StrictVersion classes that distutils provides don't +work; they don't recognize anything like alpha/beta/rc/dev versions. +""" + +# Copyright (c) Donald Stufft and individual contributors. +# All rights reserved. + +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: + +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. + +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. + +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +import collections +import itertools +import re + +__all__ = [ + "parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN", +] + + +# BEGIN packaging/_structures.py + + +class Infinity: + def __repr__(self): + return "Infinity" + + def __hash__(self): + return hash(repr(self)) + + def __lt__(self, other): + return False + + def __le__(self, other): + return False + + def __eq__(self, other): + return isinstance(other, self.__class__) + + def __ne__(self, other): + return not isinstance(other, self.__class__) + + def __gt__(self, other): + return True + + def __ge__(self, other): + return True + + def __neg__(self): + return NegativeInfinity + + +Infinity = Infinity() + + +class NegativeInfinity: + def __repr__(self): + return "-Infinity" + + def __hash__(self): + return hash(repr(self)) + + def __lt__(self, other): + return True + + def __le__(self, other): + return True + + def __eq__(self, other): + return isinstance(other, self.__class__) + + def __ne__(self, other): + return not isinstance(other, self.__class__) + + def __gt__(self, other): + return False + + def __ge__(self, other): + return False + + def __neg__(self): + return Infinity + + +# BEGIN packaging/version.py + + +NegativeInfinity = NegativeInfinity() + +_Version = collections.namedtuple( + "_Version", + ["epoch", "release", "dev", "pre", "post", "local"], +) + + +def parse(version): + """ + Parse the given version string and return either a :class:`Version` object + or a :class:`LegacyVersion` object depending on if the given version is + a valid PEP 440 version or a legacy version. + """ + try: + return Version(version) + except InvalidVersion: + return LegacyVersion(version) + + +class InvalidVersion(ValueError): + """ + An invalid version was found, users should refer to PEP 440. + """ + + +class _BaseVersion: + + def __hash__(self): + return hash(self._key) + + def __lt__(self, other): + return self._compare(other, lambda s, o: s < o) + + def __le__(self, other): + return self._compare(other, lambda s, o: s <= o) + + def __eq__(self, other): + return self._compare(other, lambda s, o: s == o) + + def __ge__(self, other): + return self._compare(other, lambda s, o: s >= o) + + def __gt__(self, other): + return self._compare(other, lambda s, o: s > o) + + def __ne__(self, other): + return self._compare(other, lambda s, o: s != o) + + def _compare(self, other, method): + if not isinstance(other, _BaseVersion): + return NotImplemented + + return method(self._key, other._key) + + +class LegacyVersion(_BaseVersion): + + def __init__(self, version): + self._version = str(version) + self._key = _legacy_cmpkey(self._version) + + def __str__(self): + return self._version + + def __repr__(self): + return f"" + + @property + def public(self): + return self._version + + @property + def base_version(self): + return self._version + + @property + def local(self): + return None + + @property + def is_prerelease(self): + return False + + @property + def is_postrelease(self): + return False + + +_legacy_version_component_re = re.compile( + r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE, +) + +_legacy_version_replacement_map = { + "pre": "c", "preview": "c", "-": "final-", "rc": "c", "dev": "@", +} + + +def _parse_version_parts(s): + for part in _legacy_version_component_re.split(s): + part = _legacy_version_replacement_map.get(part, part) + + if not part or part == ".": + continue + + if part[:1] in "0123456789": + # pad for numeric comparison + yield part.zfill(8) + else: + yield "*" + part + + # ensure that alpha/beta/candidate are before final + yield "*final" + + +def _legacy_cmpkey(version): + # We hardcode an epoch of -1 here. A PEP 440 version can only have an epoch + # greater than or equal to 0. This will effectively put the LegacyVersion, + # which uses the defacto standard originally implemented by setuptools, + # as before all PEP 440 versions. + epoch = -1 + + # This scheme is taken from pkg_resources.parse_version setuptools prior to + # its adoption of the packaging library. + parts = [] + for part in _parse_version_parts(version.lower()): + if part.startswith("*"): + # remove "-" before a prerelease tag + if part < "*final": + while parts and parts[-1] == "*final-": + parts.pop() + + # remove trailing zeros from each series of numeric parts + while parts and parts[-1] == "00000000": + parts.pop() + + parts.append(part) + parts = tuple(parts) + + return epoch, parts + + +# Deliberately not anchored to the start and end of the string, to make it +# easier for 3rd party code to reuse +VERSION_PATTERN = r""" + v? + (?: + (?:(?P[0-9]+)!)? # epoch + (?P[0-9]+(?:\.[0-9]+)*) # release segment + (?P
                                          # pre-release
+            [-_\.]?
+            (?P(a|b|c|rc|alpha|beta|pre|preview))
+            [-_\.]?
+            (?P[0-9]+)?
+        )?
+        (?P                                         # post release
+            (?:-(?P[0-9]+))
+            |
+            (?:
+                [-_\.]?
+                (?Ppost|rev|r)
+                [-_\.]?
+                (?P[0-9]+)?
+            )
+        )?
+        (?P                                          # dev release
+            [-_\.]?
+            (?Pdev)
+            [-_\.]?
+            (?P[0-9]+)?
+        )?
+    )
+    (?:\+(?P[a-z0-9]+(?:[-_\.][a-z0-9]+)*))?       # local version
+"""
+
+
+class Version(_BaseVersion):
+
+    _regex = re.compile(
+        r"^\s*" + VERSION_PATTERN + r"\s*$",
+        re.VERBOSE | re.IGNORECASE,
+    )
+
+    def __init__(self, version):
+        # Validate the version and parse it into pieces
+        match = self._regex.search(version)
+        if not match:
+            raise InvalidVersion(f"Invalid version: '{version}'")
+
+        # Store the parsed out pieces of the version
+        self._version = _Version(
+            epoch=int(match.group("epoch")) if match.group("epoch") else 0,
+            release=tuple(int(i) for i in match.group("release").split(".")),
+            pre=_parse_letter_version(
+                match.group("pre_l"),
+                match.group("pre_n"),
+            ),
+            post=_parse_letter_version(
+                match.group("post_l"),
+                match.group("post_n1") or match.group("post_n2"),
+            ),
+            dev=_parse_letter_version(
+                match.group("dev_l"),
+                match.group("dev_n"),
+            ),
+            local=_parse_local_version(match.group("local")),
+        )
+
+        # Generate a key which will be used for sorting
+        self._key = _cmpkey(
+            self._version.epoch,
+            self._version.release,
+            self._version.pre,
+            self._version.post,
+            self._version.dev,
+            self._version.local,
+        )
+
+    def __repr__(self):
+        return f""
+
+    def __str__(self):
+        parts = []
+
+        # Epoch
+        if self._version.epoch != 0:
+            parts.append(f"{self._version.epoch}!")
+
+        # Release segment
+        parts.append(".".join(str(x) for x in self._version.release))
+
+        # Pre-release
+        if self._version.pre is not None:
+            parts.append("".join(str(x) for x in self._version.pre))
+
+        # Post-release
+        if self._version.post is not None:
+            parts.append(f".post{self._version.post[1]}")
+
+        # Development release
+        if self._version.dev is not None:
+            parts.append(f".dev{self._version.dev[1]}")
+
+        # Local version segment
+        if self._version.local is not None:
+            parts.append(
+                f"+{'.'.join(str(x) for x in self._version.local)}"
+            )
+
+        return "".join(parts)
+
+    @property
+    def public(self):
+        return str(self).split("+", 1)[0]
+
+    @property
+    def base_version(self):
+        parts = []
+
+        # Epoch
+        if self._version.epoch != 0:
+            parts.append(f"{self._version.epoch}!")
+
+        # Release segment
+        parts.append(".".join(str(x) for x in self._version.release))
+
+        return "".join(parts)
+
+    @property
+    def local(self):
+        version_string = str(self)
+        if "+" in version_string:
+            return version_string.split("+", 1)[1]
+
+    @property
+    def is_prerelease(self):
+        return bool(self._version.dev or self._version.pre)
+
+    @property
+    def is_postrelease(self):
+        return bool(self._version.post)
+
+
+def _parse_letter_version(letter, number):
+    if letter:
+        # We assume there is an implicit 0 in a pre-release if there is
+        # no numeral associated with it.
+        if number is None:
+            number = 0
+
+        # We normalize any letters to their lower-case form
+        letter = letter.lower()
+
+        # We consider some words to be alternate spellings of other words and
+        # in those cases we want to normalize the spellings to our preferred
+        # spelling.
+        if letter == "alpha":
+            letter = "a"
+        elif letter == "beta":
+            letter = "b"
+        elif letter in ["c", "pre", "preview"]:
+            letter = "rc"
+        elif letter in ["rev", "r"]:
+            letter = "post"
+
+        return letter, int(number)
+    if not letter and number:
+        # We assume that if we are given a number but not given a letter,
+        # then this is using the implicit post release syntax (e.g., 1.0-1)
+        letter = "post"
+
+        return letter, int(number)
+
+
+_local_version_seperators = re.compile(r"[\._-]")
+
+
+def _parse_local_version(local):
+    """
+    Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
+    """
+    if local is not None:
+        return tuple(
+            part.lower() if not part.isdigit() else int(part)
+            for part in _local_version_seperators.split(local)
+        )
+
+
+def _cmpkey(epoch, release, pre, post, dev, local):
+    # When we compare a release version, we want to compare it with all of the
+    # trailing zeros removed. So we'll use a reverse the list, drop all the now
+    # leading zeros until we come to something non-zero, then take the rest,
+    # re-reverse it back into the correct order, and make it a tuple and use
+    # that for our sorting key.
+    release = tuple(
+        reversed(list(
+            itertools.dropwhile(
+                lambda x: x == 0,
+                reversed(release),
+            )
+        ))
+    )
+
+    # We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
+    # We'll do this by abusing the pre-segment, but we _only_ want to do this
+    # if there is no pre- or a post-segment. If we have one of those, then
+    # the normal sorting rules will handle this case correctly.
+    if pre is None and post is None and dev is not None:
+        pre = -Infinity
+    # Versions without a pre-release (except as noted above) should sort after
+    # those with one.
+    elif pre is None:
+        pre = Infinity
+
+    # Versions without a post-segment should sort before those with one.
+    if post is None:
+        post = -Infinity
+
+    # Versions without a development segment should sort after those with one.
+    if dev is None:
+        dev = Infinity
+
+    if local is None:
+        # Versions without a local segment should sort before those with one.
+        local = -Infinity
+    else:
+        # Versions with a local segment need that segment parsed to implement
+        # the sorting rules in PEP440.
+        # - Alphanumeric segments sort before numeric segments
+        # - Alphanumeric segments sort lexicographically
+        # - Numeric segments sort numerically
+        # - Shorter versions sort before longer versions when the prefixes
+        #   match exactly
+        local = tuple(
+            (i, "") if isinstance(i, int) else (-Infinity, i)
+            for i in local
+        )
+
+    return epoch, release, pre, post, dev, local
diff --git a/.venv/lib/python3.12/site-packages/numpy/_utils/_pep440.pyi b/.venv/lib/python3.12/site-packages/numpy/_utils/_pep440.pyi
new file mode 100644
index 0000000..29dd4c9
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/_utils/_pep440.pyi
@@ -0,0 +1,121 @@
+import re
+from collections.abc import Callable
+from typing import (
+    Any,
+    ClassVar,
+    Final,
+    Generic,
+    NamedTuple,
+    TypeVar,
+    final,
+    type_check_only,
+)
+from typing import (
+    Literal as L,
+)
+
+from typing_extensions import TypeIs
+
+__all__ = ["VERSION_PATTERN", "InvalidVersion", "LegacyVersion", "Version", "parse"]
+
+###
+
+_CmpKeyT = TypeVar("_CmpKeyT", bound=tuple[object, ...])
+_CmpKeyT_co = TypeVar("_CmpKeyT_co", bound=tuple[object, ...], default=tuple[Any, ...], covariant=True)
+
+###
+
+VERSION_PATTERN: Final[str] = ...
+
+class InvalidVersion(ValueError): ...
+
+@type_check_only
+@final
+class _InfinityType:
+    def __hash__(self) -> int: ...
+    def __eq__(self, other: object, /) -> TypeIs[_InfinityType]: ...
+    def __ne__(self, other: object, /) -> bool: ...
+    def __lt__(self, other: object, /) -> L[False]: ...
+    def __le__(self, other: object, /) -> L[False]: ...
+    def __gt__(self, other: object, /) -> L[True]: ...
+    def __ge__(self, other: object, /) -> L[True]: ...
+    def __neg__(self) -> _NegativeInfinityType: ...
+
+Infinity: Final[_InfinityType] = ...
+
+@type_check_only
+@final
+class _NegativeInfinityType:
+    def __hash__(self) -> int: ...
+    def __eq__(self, other: object, /) -> TypeIs[_NegativeInfinityType]: ...
+    def __ne__(self, other: object, /) -> bool: ...
+    def __lt__(self, other: object, /) -> L[True]: ...
+    def __le__(self, other: object, /) -> L[True]: ...
+    def __gt__(self, other: object, /) -> L[False]: ...
+    def __ge__(self, other: object, /) -> L[False]: ...
+    def __neg__(self) -> _InfinityType: ...
+
+NegativeInfinity: Final[_NegativeInfinityType] = ...
+
+class _Version(NamedTuple):
+    epoch: int
+    release: tuple[int, ...]
+    dev: tuple[str, int] | None
+    pre: tuple[str, int] | None
+    post: tuple[str, int] | None
+    local: tuple[str | int, ...] | None
+
+class _BaseVersion(Generic[_CmpKeyT_co]):
+    _key: _CmpKeyT_co
+    def __hash__(self) -> int: ...
+    def __eq__(self, other: _BaseVersion, /) -> bool: ...  # type: ignore[override]  # pyright: ignore[reportIncompatibleMethodOverride]
+    def __ne__(self, other: _BaseVersion, /) -> bool: ...  # type: ignore[override]  # pyright: ignore[reportIncompatibleMethodOverride]
+    def __lt__(self, other: _BaseVersion, /) -> bool: ...
+    def __le__(self, other: _BaseVersion, /) -> bool: ...
+    def __ge__(self, other: _BaseVersion, /) -> bool: ...
+    def __gt__(self, other: _BaseVersion, /) -> bool: ...
+    def _compare(self, /, other: _BaseVersion[_CmpKeyT], method: Callable[[_CmpKeyT_co, _CmpKeyT], bool]) -> bool: ...
+
+class LegacyVersion(_BaseVersion[tuple[L[-1], tuple[str, ...]]]):
+    _version: Final[str]
+    def __init__(self, /, version: str) -> None: ...
+    @property
+    def public(self) -> str: ...
+    @property
+    def base_version(self) -> str: ...
+    @property
+    def local(self) -> None: ...
+    @property
+    def is_prerelease(self) -> L[False]: ...
+    @property
+    def is_postrelease(self) -> L[False]: ...
+
+class Version(
+    _BaseVersion[
+        tuple[
+            int,  # epoch
+            tuple[int, ...],  # release
+            tuple[str, int] | _InfinityType | _NegativeInfinityType,  # pre
+            tuple[str, int] | _NegativeInfinityType,  # post
+            tuple[str, int] | _InfinityType,  # dev
+            tuple[tuple[int, L[""]] | tuple[_NegativeInfinityType, str], ...] | _NegativeInfinityType,  # local
+        ],
+    ],
+):
+    _regex: ClassVar[re.Pattern[str]] = ...
+    _version: Final[str]
+
+    def __init__(self, /, version: str) -> None: ...
+    @property
+    def public(self) -> str: ...
+    @property
+    def base_version(self) -> str: ...
+    @property
+    def local(self) -> str | None: ...
+    @property
+    def is_prerelease(self) -> bool: ...
+    @property
+    def is_postrelease(self) -> bool: ...
+
+#
+def parse(version: str) -> Version | LegacyVersion: ...
diff --git a/.venv/lib/python3.12/site-packages/numpy/char/__init__.py b/.venv/lib/python3.12/site-packages/numpy/char/__init__.py
new file mode 100644
index 0000000..d98d38c
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/char/__init__.py
@@ -0,0 +1,2 @@
+from numpy._core.defchararray import *
+from numpy._core.defchararray import __all__, __doc__
diff --git a/.venv/lib/python3.12/site-packages/numpy/char/__init__.pyi b/.venv/lib/python3.12/site-packages/numpy/char/__init__.pyi
new file mode 100644
index 0000000..e151f20
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/char/__init__.pyi
@@ -0,0 +1,111 @@
+from numpy._core.defchararray import (
+    add,
+    array,
+    asarray,
+    capitalize,
+    center,
+    chararray,
+    compare_chararrays,
+    count,
+    decode,
+    encode,
+    endswith,
+    equal,
+    expandtabs,
+    find,
+    greater,
+    greater_equal,
+    index,
+    isalnum,
+    isalpha,
+    isdecimal,
+    isdigit,
+    islower,
+    isnumeric,
+    isspace,
+    istitle,
+    isupper,
+    join,
+    less,
+    less_equal,
+    ljust,
+    lower,
+    lstrip,
+    mod,
+    multiply,
+    not_equal,
+    partition,
+    replace,
+    rfind,
+    rindex,
+    rjust,
+    rpartition,
+    rsplit,
+    rstrip,
+    split,
+    splitlines,
+    startswith,
+    str_len,
+    strip,
+    swapcase,
+    title,
+    translate,
+    upper,
+    zfill,
+)
+
+__all__ = [
+    "equal",
+    "not_equal",
+    "greater_equal",
+    "less_equal",
+    "greater",
+    "less",
+    "str_len",
+    "add",
+    "multiply",
+    "mod",
+    "capitalize",
+    "center",
+    "count",
+    "decode",
+    "encode",
+    "endswith",
+    "expandtabs",
+    "find",
+    "index",
+    "isalnum",
+    "isalpha",
+    "isdigit",
+    "islower",
+    "isspace",
+    "istitle",
+    "isupper",
+    "join",
+    "ljust",
+    "lower",
+    "lstrip",
+    "partition",
+    "replace",
+    "rfind",
+    "rindex",
+    "rjust",
+    "rpartition",
+    "rsplit",
+    "rstrip",
+    "split",
+    "splitlines",
+    "startswith",
+    "strip",
+    "swapcase",
+    "title",
+    "translate",
+    "upper",
+    "zfill",
+    "isnumeric",
+    "isdecimal",
+    "array",
+    "asarray",
+    "compare_chararrays",
+    "chararray",
+]
diff --git a/.venv/lib/python3.12/site-packages/numpy/char/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/char/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 0000000..2846b9f
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/char/__pycache__/__init__.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/conftest.py b/.venv/lib/python3.12/site-packages/numpy/conftest.py
new file mode 100644
index 0000000..fde4def
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/conftest.py
@@ -0,0 +1,258 @@
+"""
+Pytest configuration and fixtures for the Numpy test suite.
+"""
+import os
+import string
+import sys
+import tempfile
+import warnings
+from contextlib import contextmanager
+
+import hypothesis
+import pytest
+
+import numpy
+import numpy as np
+from numpy._core._multiarray_tests import get_fpu_mode
+from numpy._core.tests._natype import get_stringdtype_dtype, pd_NA
+from numpy.testing._private.utils import NOGIL_BUILD
+
+try:
+    from scipy_doctest.conftest import dt_config
+    HAVE_SCPDT = True
+except ModuleNotFoundError:
+    HAVE_SCPDT = False
+
+
+_old_fpu_mode = None
+_collect_results = {}
+
+# Use a known and persistent tmpdir for hypothesis' caches, which
+# can be automatically cleared by the OS or user.
+hypothesis.configuration.set_hypothesis_home_dir(
+    os.path.join(tempfile.gettempdir(), ".hypothesis")
+)
+
+# We register two custom profiles for Numpy - for details see
+# https://hypothesis.readthedocs.io/en/latest/settings.html
+# The first is designed for our own CI runs; the latter also
+# forces determinism and is designed for use via np.test()
+hypothesis.settings.register_profile(
+    name="numpy-profile", deadline=None, print_blob=True,
+)
+hypothesis.settings.register_profile(
+    name="np.test() profile",
+    deadline=None, print_blob=True, database=None, derandomize=True,
+    suppress_health_check=list(hypothesis.HealthCheck),
+)
+# Note that the default profile is chosen based on the presence
+# of pytest.ini, but can be overridden by passing the
+# --hypothesis-profile=NAME argument to pytest.
+_pytest_ini = os.path.join(os.path.dirname(__file__), "..", "pytest.ini")
+hypothesis.settings.load_profile(
+    "numpy-profile" if os.path.isfile(_pytest_ini) else "np.test() profile"
+)
+
+# The experimentalAPI is used in _umath_tests
+os.environ["NUMPY_EXPERIMENTAL_DTYPE_API"] = "1"
+
+def pytest_configure(config):
+    config.addinivalue_line("markers",
+        "valgrind_error: Tests that are known to error under valgrind.")
+    config.addinivalue_line("markers",
+        "leaks_references: Tests that are known to leak references.")
+    config.addinivalue_line("markers",
+        "slow: Tests that are very slow.")
+    config.addinivalue_line("markers",
+        "slow_pypy: Tests that are very slow on pypy.")
+
+
+def pytest_addoption(parser):
+    parser.addoption("--available-memory", action="store", default=None,
+                     help=("Set amount of memory available for running the "
+                           "test suite. This can result to tests requiring "
+                           "especially large amounts of memory to be skipped. "
+                           "Equivalent to setting environment variable "
+                           "NPY_AVAILABLE_MEM. Default: determined"
+                           "automatically."))
+
+
+gil_enabled_at_start = True
+if NOGIL_BUILD:
+    gil_enabled_at_start = sys._is_gil_enabled()
+
+
+def pytest_sessionstart(session):
+    available_mem = session.config.getoption('available_memory')
+    if available_mem is not None:
+        os.environ['NPY_AVAILABLE_MEM'] = available_mem
+
+
+def pytest_terminal_summary(terminalreporter, exitstatus, config):
+    if NOGIL_BUILD and not gil_enabled_at_start and sys._is_gil_enabled():
+        tr = terminalreporter
+        tr.ensure_newline()
+        tr.section("GIL re-enabled", sep="=", red=True, bold=True)
+        tr.line("The GIL was re-enabled at runtime during the tests.")
+        tr.line("This can happen with no test failures if the RuntimeWarning")
+        tr.line("raised by Python when this happens is filtered by a test.")
+        tr.line("")
+        tr.line("Please ensure all new C modules declare support for running")
+        tr.line("without the GIL. Any new tests that intentionally imports ")
+        tr.line("code that re-enables the GIL should do so in a subprocess.")
+        pytest.exit("GIL re-enabled during tests", returncode=1)
+
+# FIXME when yield tests are gone.
+@pytest.hookimpl()
+def pytest_itemcollected(item):
+    """
+    Check FPU precision mode was not changed during test collection.
+
+    The clumsy way we do it here is mainly necessary because numpy
+    still uses yield tests, which can execute code at test collection
+    time.
+    """
+    global _old_fpu_mode
+
+    mode = get_fpu_mode()
+
+    if _old_fpu_mode is None:
+        _old_fpu_mode = mode
+    elif mode != _old_fpu_mode:
+        _collect_results[item] = (_old_fpu_mode, mode)
+        _old_fpu_mode = mode
+
+
+@pytest.fixture(scope="function", autouse=True)
+def check_fpu_mode(request):
+    """
+    Check FPU precision mode was not changed during the test.
+    """
+    old_mode = get_fpu_mode()
+    yield
+    new_mode = get_fpu_mode()
+
+    if old_mode != new_mode:
+        raise AssertionError(f"FPU precision mode changed from {old_mode:#x} to "
+                             f"{new_mode:#x} during the test")
+
+    collect_result = _collect_results.get(request.node)
+    if collect_result is not None:
+        old_mode, new_mode = collect_result
+        raise AssertionError(f"FPU precision mode changed from {old_mode:#x} to "
+                             f"{new_mode:#x} when collecting the test")
+
+
+@pytest.fixture(autouse=True)
+def add_np(doctest_namespace):
+    doctest_namespace['np'] = numpy
+
+@pytest.fixture(autouse=True)
+def env_setup(monkeypatch):
+    monkeypatch.setenv('PYTHONHASHSEED', '0')
+
+
+if HAVE_SCPDT:
+
+    @contextmanager
+    def warnings_errors_and_rng(test=None):
+        """Filter out the wall of DeprecationWarnings.
+        """
+        msgs = ["The numpy.linalg.linalg",
+                "The numpy.fft.helper",
+                "dep_util",
+                "pkg_resources",
+                "numpy.core.umath",
+                "msvccompiler",
+                "Deprecated call",
+                "numpy.core",
+                "Importing from numpy.matlib",
+                "This function is deprecated.",    # random_integers
+                "Data type alias 'a'",     # numpy.rec.fromfile
+                "Arrays of 2-dimensional vectors",   # matlib.cross
+                "`in1d` is deprecated", ]
+        msg = "|".join(msgs)
+
+        msgs_r = [
+            "invalid value encountered",
+            "divide by zero encountered"
+        ]
+        msg_r = "|".join(msgs_r)
+
+        with warnings.catch_warnings():
+            warnings.filterwarnings(
+                'ignore', category=DeprecationWarning, message=msg
+            )
+            warnings.filterwarnings(
+                'ignore', category=RuntimeWarning, message=msg_r
+            )
+            yield
+
+    # find and check doctests under this context manager
+    dt_config.user_context_mgr = warnings_errors_and_rng
+
+    # numpy specific tweaks from refguide-check
+    dt_config.rndm_markers.add('#uninitialized')
+    dt_config.rndm_markers.add('# uninitialized')
+
+    # make the checker pick on mismatched dtypes
+    dt_config.strict_check = True
+
+    import doctest
+    dt_config.optionflags = doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS
+
+    # recognize the StringDType repr
+    dt_config.check_namespace['StringDType'] = numpy.dtypes.StringDType
+
+    # temporary skips
+    dt_config.skiplist = {
+        'numpy.savez',    # unclosed file
+        'numpy.matlib.savez',
+        'numpy.__array_namespace_info__',
+        'numpy.matlib.__array_namespace_info__',
+    }
+
+    # xfail problematic tutorials
+    dt_config.pytest_extra_xfail = {
+        'how-to-verify-bug.rst': '',
+        'c-info.ufunc-tutorial.rst': '',
+        'basics.interoperability.rst': 'needs pandas',
+        'basics.dispatch.rst': 'errors out in /testing/overrides.py',
+        'basics.subclassing.rst': '.. testcode:: admonitions not understood',
+        'misc.rst': 'manipulates warnings',
+    }
+
+    # ignores are for things fail doctest collection (optionals etc)
+    dt_config.pytest_extra_ignore = [
+        'numpy/distutils',
+        'numpy/_core/cversions.py',
+        'numpy/_pyinstaller',
+        'numpy/random/_examples',
+        'numpy/f2py/_backends/_distutils.py',
+    ]
+
+
+@pytest.fixture
+def random_string_list():
+    chars = list(string.ascii_letters + string.digits)
+    chars = np.array(chars, dtype="U1")
+    ret = np.random.choice(chars, size=100 * 10, replace=True)
+    return ret.view("U100")
+
+
+@pytest.fixture(params=[True, False])
+def coerce(request):
+    return request.param
+
+
+@pytest.fixture(
+    params=["unset", None, pd_NA, np.nan, float("nan"), "__nan__"],
+    ids=["unset", "None", "pandas.NA", "np.nan", "float('nan')", "string nan"],
+)
+def na_object(request):
+    return request.param
+
+
+@pytest.fixture()
+def dtype(na_object, coerce):
+    return get_stringdtype_dtype(na_object, coerce)
diff --git a/.venv/lib/python3.12/site-packages/numpy/core/__init__.py b/.venv/lib/python3.12/site-packages/numpy/core/__init__.py
new file mode 100644
index 0000000..cfd96ed
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/core/__init__.py
@@ -0,0 +1,33 @@
+"""
+The `numpy.core` submodule exists solely for backward compatibility
+purposes. The original `core` was renamed to `_core` and made private.
+`numpy.core` will be removed in the future.
+"""
+from numpy import _core
+
+from ._utils import _raise_warning
+
+
+# We used to use `np.core._ufunc_reconstruct` to unpickle.
+# This is unnecessary, but old pickles saved before 1.20 will be using it,
+# and there is no reason to break loading them.
+def _ufunc_reconstruct(module, name):
+    # The `fromlist` kwarg is required to ensure that `mod` points to the
+    # inner-most module rather than the parent package when module name is
+    # nested. This makes it possible to pickle non-toplevel ufuncs such as
+    # scipy.special.expit for instance.
+    mod = __import__(module, fromlist=[name])
+    return getattr(mod, name)
+
+
+# force lazy-loading of submodules to ensure a warning is printed
+
+__all__ = ["arrayprint", "defchararray", "_dtype_ctypes", "_dtype",  # noqa: F822
+           "einsumfunc", "fromnumeric", "function_base", "getlimits",
+           "_internal", "multiarray", "_multiarray_umath", "numeric",
+           "numerictypes", "overrides", "records", "shape_base", "umath"]
+
+def __getattr__(attr_name):
+    attr = getattr(_core, attr_name)
+    _raise_warning(attr_name)
+    return attr
diff --git a/.venv/lib/python3.12/site-packages/numpy/core/__init__.pyi b/.venv/lib/python3.12/site-packages/numpy/core/__init__.pyi
new file mode 100644
index 0000000..e69de29
diff --git a/.venv/lib/python3.12/site-packages/numpy/core/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/core/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 0000000..b1cafa9
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/core/__pycache__/__init__.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/core/__pycache__/_dtype.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/core/__pycache__/_dtype.cpython-312.pyc
new file mode 100644
index 0000000..63fe96e
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/core/__pycache__/_dtype.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/core/__pycache__/_dtype_ctypes.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/core/__pycache__/_dtype_ctypes.cpython-312.pyc
new file mode 100644
index 0000000..00608ea
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/core/__pycache__/_dtype_ctypes.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/core/__pycache__/_internal.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/core/__pycache__/_internal.cpython-312.pyc
new file mode 100644
index 0000000..c6b02f4
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/core/__pycache__/_internal.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/core/__pycache__/_multiarray_umath.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/core/__pycache__/_multiarray_umath.cpython-312.pyc
new file mode 100644
index 0000000..d5fddef
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/core/__pycache__/_multiarray_umath.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/core/__pycache__/_utils.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/core/__pycache__/_utils.cpython-312.pyc
new file mode 100644
index 0000000..89d33af
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/core/__pycache__/_utils.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/core/__pycache__/arrayprint.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/core/__pycache__/arrayprint.cpython-312.pyc
new file mode 100644
index 0000000..37e6ea5
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/core/__pycache__/arrayprint.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/core/__pycache__/defchararray.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/core/__pycache__/defchararray.cpython-312.pyc
new file mode 100644
index 0000000..d7a00f9
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/core/__pycache__/defchararray.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/core/__pycache__/einsumfunc.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/core/__pycache__/einsumfunc.cpython-312.pyc
new file mode 100644
index 0000000..b51b0b4
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/core/__pycache__/einsumfunc.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/core/__pycache__/fromnumeric.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/core/__pycache__/fromnumeric.cpython-312.pyc
new file mode 100644
index 0000000..78c87ac
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/core/__pycache__/fromnumeric.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/core/__pycache__/function_base.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/core/__pycache__/function_base.cpython-312.pyc
new file mode 100644
index 0000000..71d8965
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/core/__pycache__/function_base.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/core/__pycache__/getlimits.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/core/__pycache__/getlimits.cpython-312.pyc
new file mode 100644
index 0000000..b9dba58
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/core/__pycache__/getlimits.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/core/__pycache__/multiarray.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/core/__pycache__/multiarray.cpython-312.pyc
new file mode 100644
index 0000000..c88aa9f
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/core/__pycache__/multiarray.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/core/__pycache__/numeric.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/core/__pycache__/numeric.cpython-312.pyc
new file mode 100644
index 0000000..f3785d1
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/core/__pycache__/numeric.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/core/__pycache__/numerictypes.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/core/__pycache__/numerictypes.cpython-312.pyc
new file mode 100644
index 0000000..83d421e
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/core/__pycache__/numerictypes.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/core/__pycache__/overrides.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/core/__pycache__/overrides.cpython-312.pyc
new file mode 100644
index 0000000..681b72f
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/core/__pycache__/overrides.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/core/__pycache__/records.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/core/__pycache__/records.cpython-312.pyc
new file mode 100644
index 0000000..ffb8960
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/core/__pycache__/records.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/core/__pycache__/shape_base.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/core/__pycache__/shape_base.cpython-312.pyc
new file mode 100644
index 0000000..9c55718
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/core/__pycache__/shape_base.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/core/__pycache__/umath.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/core/__pycache__/umath.cpython-312.pyc
new file mode 100644
index 0000000..03306e3
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/core/__pycache__/umath.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/core/_dtype.py b/.venv/lib/python3.12/site-packages/numpy/core/_dtype.py
new file mode 100644
index 0000000..5446079
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/core/_dtype.py
@@ -0,0 +1,10 @@
+def __getattr__(attr_name):
+    from numpy._core import _dtype
+
+    from ._utils import _raise_warning
+    ret = getattr(_dtype, attr_name, None)
+    if ret is None:
+        raise AttributeError(
+            f"module 'numpy.core._dtype' has no attribute {attr_name}")
+    _raise_warning(attr_name, "_dtype")
+    return ret
diff --git a/.venv/lib/python3.12/site-packages/numpy/core/_dtype.pyi b/.venv/lib/python3.12/site-packages/numpy/core/_dtype.pyi
new file mode 100644
index 0000000..e69de29
diff --git a/.venv/lib/python3.12/site-packages/numpy/core/_dtype_ctypes.py b/.venv/lib/python3.12/site-packages/numpy/core/_dtype_ctypes.py
new file mode 100644
index 0000000..10cfba2
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/core/_dtype_ctypes.py
@@ -0,0 +1,10 @@
+def __getattr__(attr_name):
+    from numpy._core import _dtype_ctypes
+
+    from ._utils import _raise_warning
+    ret = getattr(_dtype_ctypes, attr_name, None)
+    if ret is None:
+        raise AttributeError(
+            f"module 'numpy.core._dtype_ctypes' has no attribute {attr_name}")
+    _raise_warning(attr_name, "_dtype_ctypes")
+    return ret
diff --git a/.venv/lib/python3.12/site-packages/numpy/core/_dtype_ctypes.pyi b/.venv/lib/python3.12/site-packages/numpy/core/_dtype_ctypes.pyi
new file mode 100644
index 0000000..e69de29
diff --git a/.venv/lib/python3.12/site-packages/numpy/core/_internal.py b/.venv/lib/python3.12/site-packages/numpy/core/_internal.py
new file mode 100644
index 0000000..63a6ccc
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/core/_internal.py
@@ -0,0 +1,27 @@
+from numpy._core import _internal
+
+
+# Build a new array from the information in a pickle.
+# Note that the name numpy.core._internal._reconstruct is embedded in
+# pickles of ndarrays made with NumPy before release 1.0
+# so don't remove the name here, or you'll
+# break backward compatibility.
+def _reconstruct(subtype, shape, dtype):
+    from numpy import ndarray
+    return ndarray.__new__(subtype, shape, dtype)
+
+
+# Pybind11 (in versions <= 2.11.1) imports _dtype_from_pep3118 from the
+# _internal submodule, therefore it must be importable without a warning.
+_dtype_from_pep3118 = _internal._dtype_from_pep3118
+
+def __getattr__(attr_name):
+    from numpy._core import _internal
+
+    from ._utils import _raise_warning
+    ret = getattr(_internal, attr_name, None)
+    if ret is None:
+        raise AttributeError(
+            f"module 'numpy.core._internal' has no attribute {attr_name}")
+    _raise_warning(attr_name, "_internal")
+    return ret
diff --git a/.venv/lib/python3.12/site-packages/numpy/core/_multiarray_umath.py b/.venv/lib/python3.12/site-packages/numpy/core/_multiarray_umath.py
new file mode 100644
index 0000000..c1e6b4e
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/core/_multiarray_umath.py
@@ -0,0 +1,57 @@
+from numpy import ufunc
+from numpy._core import _multiarray_umath
+
+for item in _multiarray_umath.__dir__():
+    # ufuncs appear in pickles with a path in numpy.core._multiarray_umath
+    # and so must import from this namespace without warning or error
+    attr = getattr(_multiarray_umath, item)
+    if isinstance(attr, ufunc):
+        globals()[item] = attr
+
+
+def __getattr__(attr_name):
+    from numpy._core import _multiarray_umath
+
+    from ._utils import _raise_warning
+
+    if attr_name in {"_ARRAY_API", "_UFUNC_API"}:
+        import sys
+        import textwrap
+        import traceback
+
+        from numpy.version import short_version
+
+        msg = textwrap.dedent(f"""
+            A module that was compiled using NumPy 1.x cannot be run in
+            NumPy {short_version} as it may crash. To support both 1.x and 2.x
+            versions of NumPy, modules must be compiled with NumPy 2.0.
+            Some module may need to rebuild instead e.g. with 'pybind11>=2.12'.
+
+            If you are a user of the module, the easiest solution will be to
+            downgrade to 'numpy<2' or try to upgrade the affected module.
+            We expect that some modules will need time to support NumPy 2.
+
+            """)
+        tb_msg = "Traceback (most recent call last):"
+        for line in traceback.format_stack()[:-1]:
+            if "frozen importlib" in line:
+                continue
+            tb_msg += line
+
+        # Also print the message (with traceback).  This is because old versions
+        # of NumPy unfortunately set up the import to replace (and hide) the
+        # error.  The traceback shouldn't be needed, but e.g. pytest plugins
+        # seem to swallow it and we should be failing anyway...
+        sys.stderr.write(msg + tb_msg)
+        raise ImportError(msg)
+
+    ret = getattr(_multiarray_umath, attr_name, None)
+    if ret is None:
+        raise AttributeError(
+            "module 'numpy.core._multiarray_umath' has no attribute "
+            f"{attr_name}")
+    _raise_warning(attr_name, "_multiarray_umath")
+    return ret
+
+
+del _multiarray_umath, ufunc
diff --git a/.venv/lib/python3.12/site-packages/numpy/core/_utils.py b/.venv/lib/python3.12/site-packages/numpy/core/_utils.py
new file mode 100644
index 0000000..5f47f4b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/core/_utils.py
@@ -0,0 +1,21 @@
+import warnings
+
+
+def _raise_warning(attr: str, submodule: str | None = None) -> None:
+    new_module = "numpy._core"
+    old_module = "numpy.core"
+    if submodule is not None:
+        new_module = f"{new_module}.{submodule}"
+        old_module = f"{old_module}.{submodule}"
+    warnings.warn(
+        f"{old_module} is deprecated and has been renamed to {new_module}. "
+        "The numpy._core namespace contains private NumPy internals and its "
+        "use is discouraged, as NumPy internals can change without warning in "
+        "any release. In practice, most real-world usage of numpy.core is to "
+        "access functionality in the public NumPy API. If that is the case, "
+        "use the public NumPy API. If not, you are using NumPy internals. "
+        "If you would still like to access an internal attribute, "
+        f"use {new_module}.{attr}.",
+        DeprecationWarning,
+        stacklevel=3
+    )
diff --git a/.venv/lib/python3.12/site-packages/numpy/core/arrayprint.py b/.venv/lib/python3.12/site-packages/numpy/core/arrayprint.py
new file mode 100644
index 0000000..8be5c5c
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/core/arrayprint.py
@@ -0,0 +1,10 @@
+def __getattr__(attr_name):
+    from numpy._core import arrayprint
+
+    from ._utils import _raise_warning
+    ret = getattr(arrayprint, attr_name, None)
+    if ret is None:
+        raise AttributeError(
+            f"module 'numpy.core.arrayprint' has no attribute {attr_name}")
+    _raise_warning(attr_name, "arrayprint")
+    return ret
diff --git a/.venv/lib/python3.12/site-packages/numpy/core/defchararray.py b/.venv/lib/python3.12/site-packages/numpy/core/defchararray.py
new file mode 100644
index 0000000..1c87068
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/core/defchararray.py
@@ -0,0 +1,10 @@
+def __getattr__(attr_name):
+    from numpy._core import defchararray
+
+    from ._utils import _raise_warning
+    ret = getattr(defchararray, attr_name, None)
+    if ret is None:
+        raise AttributeError(
+            f"module 'numpy.core.defchararray' has no attribute {attr_name}")
+    _raise_warning(attr_name, "defchararray")
+    return ret
diff --git a/.venv/lib/python3.12/site-packages/numpy/core/einsumfunc.py b/.venv/lib/python3.12/site-packages/numpy/core/einsumfunc.py
new file mode 100644
index 0000000..fe5aa39
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/core/einsumfunc.py
@@ -0,0 +1,10 @@
+def __getattr__(attr_name):
+    from numpy._core import einsumfunc
+
+    from ._utils import _raise_warning
+    ret = getattr(einsumfunc, attr_name, None)
+    if ret is None:
+        raise AttributeError(
+            f"module 'numpy.core.einsumfunc' has no attribute {attr_name}")
+    _raise_warning(attr_name, "einsumfunc")
+    return ret
diff --git a/.venv/lib/python3.12/site-packages/numpy/core/fromnumeric.py b/.venv/lib/python3.12/site-packages/numpy/core/fromnumeric.py
new file mode 100644
index 0000000..fae7a03
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/core/fromnumeric.py
@@ -0,0 +1,10 @@
+def __getattr__(attr_name):
+    from numpy._core import fromnumeric
+
+    from ._utils import _raise_warning
+    ret = getattr(fromnumeric, attr_name, None)
+    if ret is None:
+        raise AttributeError(
+            f"module 'numpy.core.fromnumeric' has no attribute {attr_name}")
+    _raise_warning(attr_name, "fromnumeric")
+    return ret
diff --git a/.venv/lib/python3.12/site-packages/numpy/core/function_base.py b/.venv/lib/python3.12/site-packages/numpy/core/function_base.py
new file mode 100644
index 0000000..e15c971
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/core/function_base.py
@@ -0,0 +1,10 @@
+def __getattr__(attr_name):
+    from numpy._core import function_base
+
+    from ._utils import _raise_warning
+    ret = getattr(function_base, attr_name, None)
+    if ret is None:
+        raise AttributeError(
+            f"module 'numpy.core.function_base' has no attribute {attr_name}")
+    _raise_warning(attr_name, "function_base")
+    return ret
diff --git a/.venv/lib/python3.12/site-packages/numpy/core/getlimits.py b/.venv/lib/python3.12/site-packages/numpy/core/getlimits.py
new file mode 100644
index 0000000..dc009cb
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/core/getlimits.py
@@ -0,0 +1,10 @@
+def __getattr__(attr_name):
+    from numpy._core import getlimits
+
+    from ._utils import _raise_warning
+    ret = getattr(getlimits, attr_name, None)
+    if ret is None:
+        raise AttributeError(
+            f"module 'numpy.core.getlimits' has no attribute {attr_name}")
+    _raise_warning(attr_name, "getlimits")
+    return ret
diff --git a/.venv/lib/python3.12/site-packages/numpy/core/multiarray.py b/.venv/lib/python3.12/site-packages/numpy/core/multiarray.py
new file mode 100644
index 0000000..b226709
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/core/multiarray.py
@@ -0,0 +1,25 @@
+from numpy._core import multiarray
+
+# these must import without warning or error from numpy.core.multiarray to
+# support old pickle files
+for item in ["_reconstruct", "scalar"]:
+    globals()[item] = getattr(multiarray, item)
+
+# Pybind11 (in versions <= 2.11.1) imports _ARRAY_API from the multiarray
+# submodule as a part of NumPy initialization, therefore it must be importable
+# without a warning.
+_ARRAY_API = multiarray._ARRAY_API
+
+def __getattr__(attr_name):
+    from numpy._core import multiarray
+
+    from ._utils import _raise_warning
+    ret = getattr(multiarray, attr_name, None)
+    if ret is None:
+        raise AttributeError(
+            f"module 'numpy.core.multiarray' has no attribute {attr_name}")
+    _raise_warning(attr_name, "multiarray")
+    return ret
+
+
+del multiarray
diff --git a/.venv/lib/python3.12/site-packages/numpy/core/numeric.py b/.venv/lib/python3.12/site-packages/numpy/core/numeric.py
new file mode 100644
index 0000000..ddd70b3
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/core/numeric.py
@@ -0,0 +1,12 @@
+def __getattr__(attr_name):
+    from numpy._core import numeric
+
+    from ._utils import _raise_warning
+
+    sentinel = object()
+    ret = getattr(numeric, attr_name, sentinel)
+    if ret is sentinel:
+        raise AttributeError(
+            f"module 'numpy.core.numeric' has no attribute {attr_name}")
+    _raise_warning(attr_name, "numeric")
+    return ret
diff --git a/.venv/lib/python3.12/site-packages/numpy/core/numerictypes.py b/.venv/lib/python3.12/site-packages/numpy/core/numerictypes.py
new file mode 100644
index 0000000..cf2ad99
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/core/numerictypes.py
@@ -0,0 +1,10 @@
+def __getattr__(attr_name):
+    from numpy._core import numerictypes
+
+    from ._utils import _raise_warning
+    ret = getattr(numerictypes, attr_name, None)
+    if ret is None:
+        raise AttributeError(
+            f"module 'numpy.core.numerictypes' has no attribute {attr_name}")
+    _raise_warning(attr_name, "numerictypes")
+    return ret
diff --git a/.venv/lib/python3.12/site-packages/numpy/core/overrides.py b/.venv/lib/python3.12/site-packages/numpy/core/overrides.py
new file mode 100644
index 0000000..17830ed
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/core/overrides.py
@@ -0,0 +1,10 @@
+def __getattr__(attr_name):
+    from numpy._core import overrides
+
+    from ._utils import _raise_warning
+    ret = getattr(overrides, attr_name, None)
+    if ret is None:
+        raise AttributeError(
+            f"module 'numpy.core.overrides' has no attribute {attr_name}")
+    _raise_warning(attr_name, "overrides")
+    return ret
diff --git a/.venv/lib/python3.12/site-packages/numpy/core/overrides.pyi b/.venv/lib/python3.12/site-packages/numpy/core/overrides.pyi
new file mode 100644
index 0000000..fab3512
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/core/overrides.pyi
@@ -0,0 +1,7 @@
+# NOTE: At runtime, this submodule dynamically re-exports any `numpy._core.overrides`
+# member, and issues a `DeprecationWarning` when accessed. But since there is no
+# `__dir__` or `__all__` present, these annotations would be unverifiable. Because
+# this module is also deprecated in favor of `numpy._core`, and therefore not part of
+# the public API, we omit the "re-exports", which in practice would require literal
+# duplication of the stubs in order for the `@deprecated` decorator to be understood
+# by type-checkers.
diff --git a/.venv/lib/python3.12/site-packages/numpy/core/records.py b/.venv/lib/python3.12/site-packages/numpy/core/records.py
new file mode 100644
index 0000000..0cc4503
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/core/records.py
@@ -0,0 +1,10 @@
+def __getattr__(attr_name):
+    from numpy._core import records
+
+    from ._utils import _raise_warning
+    ret = getattr(records, attr_name, None)
+    if ret is None:
+        raise AttributeError(
+            f"module 'numpy.core.records' has no attribute {attr_name}")
+    _raise_warning(attr_name, "records")
+    return ret
diff --git a/.venv/lib/python3.12/site-packages/numpy/core/shape_base.py b/.venv/lib/python3.12/site-packages/numpy/core/shape_base.py
new file mode 100644
index 0000000..9cffce7
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/core/shape_base.py
@@ -0,0 +1,10 @@
+def __getattr__(attr_name):
+    from numpy._core import shape_base
+
+    from ._utils import _raise_warning
+    ret = getattr(shape_base, attr_name, None)
+    if ret is None:
+        raise AttributeError(
+            f"module 'numpy.core.shape_base' has no attribute {attr_name}")
+    _raise_warning(attr_name, "shape_base")
+    return ret
diff --git a/.venv/lib/python3.12/site-packages/numpy/core/umath.py b/.venv/lib/python3.12/site-packages/numpy/core/umath.py
new file mode 100644
index 0000000..25a60cc
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/core/umath.py
@@ -0,0 +1,10 @@
+def __getattr__(attr_name):
+    from numpy._core import umath
+
+    from ._utils import _raise_warning
+    ret = getattr(umath, attr_name, None)
+    if ret is None:
+        raise AttributeError(
+            f"module 'numpy.core.umath' has no attribute {attr_name}")
+    _raise_warning(attr_name, "umath")
+    return ret
diff --git a/.venv/lib/python3.12/site-packages/numpy/ctypeslib/__init__.py b/.venv/lib/python3.12/site-packages/numpy/ctypeslib/__init__.py
new file mode 100644
index 0000000..fd3c773
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/ctypeslib/__init__.py
@@ -0,0 +1,13 @@
+from ._ctypeslib import (
+    __all__,
+    __doc__,
+    _concrete_ndptr,
+    _ndptr,
+    as_array,
+    as_ctypes,
+    as_ctypes_type,
+    c_intp,
+    ctypes,
+    load_library,
+    ndpointer,
+)
diff --git a/.venv/lib/python3.12/site-packages/numpy/ctypeslib/__init__.pyi b/.venv/lib/python3.12/site-packages/numpy/ctypeslib/__init__.pyi
new file mode 100644
index 0000000..adc51da
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/ctypeslib/__init__.pyi
@@ -0,0 +1,33 @@
+import ctypes
+from ctypes import c_int64 as _c_intp
+
+from ._ctypeslib import (
+    __all__ as __all__,
+)
+from ._ctypeslib import (
+    __doc__ as __doc__,
+)
+from ._ctypeslib import (
+    _concrete_ndptr as _concrete_ndptr,
+)
+from ._ctypeslib import (
+    _ndptr as _ndptr,
+)
+from ._ctypeslib import (
+    as_array as as_array,
+)
+from ._ctypeslib import (
+    as_ctypes as as_ctypes,
+)
+from ._ctypeslib import (
+    as_ctypes_type as as_ctypes_type,
+)
+from ._ctypeslib import (
+    c_intp as c_intp,
+)
+from ._ctypeslib import (
+    load_library as load_library,
+)
+from ._ctypeslib import (
+    ndpointer as ndpointer,
+)
diff --git a/.venv/lib/python3.12/site-packages/numpy/ctypeslib/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/ctypeslib/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 0000000..0ba9501
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/ctypeslib/__pycache__/__init__.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/ctypeslib/__pycache__/_ctypeslib.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/ctypeslib/__pycache__/_ctypeslib.cpython-312.pyc
new file mode 100644
index 0000000..0fb927e
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/ctypeslib/__pycache__/_ctypeslib.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/ctypeslib/_ctypeslib.py b/.venv/lib/python3.12/site-packages/numpy/ctypeslib/_ctypeslib.py
new file mode 100644
index 0000000..9255603
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/ctypeslib/_ctypeslib.py
@@ -0,0 +1,603 @@
+"""
+============================
+``ctypes`` Utility Functions
+============================
+
+See Also
+--------
+load_library : Load a C library.
+ndpointer : Array restype/argtype with verification.
+as_ctypes : Create a ctypes array from an ndarray.
+as_array : Create an ndarray from a ctypes array.
+
+References
+----------
+.. [1] "SciPy Cookbook: ctypes", https://scipy-cookbook.readthedocs.io/items/Ctypes.html
+
+Examples
+--------
+Load the C library:
+
+>>> _lib = np.ctypeslib.load_library('libmystuff', '.')     #doctest: +SKIP
+
+Our result type, an ndarray that must be of type double, be 1-dimensional
+and is C-contiguous in memory:
+
+>>> array_1d_double = np.ctypeslib.ndpointer(
+...                          dtype=np.double,
+...                          ndim=1, flags='CONTIGUOUS')    #doctest: +SKIP
+
+Our C-function typically takes an array and updates its values
+in-place.  For example::
+
+    void foo_func(double* x, int length)
+    {
+        int i;
+        for (i = 0; i < length; i++) {
+            x[i] = i*i;
+        }
+    }
+
+We wrap it using:
+
+>>> _lib.foo_func.restype = None                      #doctest: +SKIP
+>>> _lib.foo_func.argtypes = [array_1d_double, c_int] #doctest: +SKIP
+
+Then, we're ready to call ``foo_func``:
+
+>>> out = np.empty(15, dtype=np.double)
+>>> _lib.foo_func(out, len(out))                #doctest: +SKIP
+
+"""
+__all__ = ['load_library', 'ndpointer', 'c_intp', 'as_ctypes', 'as_array',
+           'as_ctypes_type']
+
+import os
+
+import numpy as np
+import numpy._core.multiarray as mu
+from numpy._utils import set_module
+
+try:
+    import ctypes
+except ImportError:
+    ctypes = None
+
+if ctypes is None:
+    @set_module("numpy.ctypeslib")
+    def _dummy(*args, **kwds):
+        """
+        Dummy object that raises an ImportError if ctypes is not available.
+
+        Raises
+        ------
+        ImportError
+            If ctypes is not available.
+
+        """
+        raise ImportError("ctypes is not available.")
+    load_library = _dummy
+    as_ctypes = _dummy
+    as_ctypes_type = _dummy
+    as_array = _dummy
+    ndpointer = _dummy
+    from numpy import intp as c_intp
+    _ndptr_base = object
+else:
+    import numpy._core._internal as nic
+    c_intp = nic._getintp_ctype()
+    del nic
+    _ndptr_base = ctypes.c_void_p
+
+    # Adapted from Albert Strasheim
+    @set_module("numpy.ctypeslib")
+    def load_library(libname, loader_path):
+        """
+        It is possible to load a library using
+
+        >>> lib = ctypes.cdll[] # doctest: +SKIP
+
+        But there are cross-platform considerations, such as library file extensions,
+        plus the fact Windows will just load the first library it finds with that name.
+        NumPy supplies the load_library function as a convenience.
+
+        .. versionchanged:: 1.20.0
+            Allow libname and loader_path to take any
+            :term:`python:path-like object`.
+
+        Parameters
+        ----------
+        libname : path-like
+            Name of the library, which can have 'lib' as a prefix,
+            but without an extension.
+        loader_path : path-like
+            Where the library can be found.
+
+        Returns
+        -------
+        ctypes.cdll[libpath] : library object
+           A ctypes library object
+
+        Raises
+        ------
+        OSError
+            If there is no library with the expected extension, or the
+            library is defective and cannot be loaded.
+        """
+        # Convert path-like objects into strings
+        libname = os.fsdecode(libname)
+        loader_path = os.fsdecode(loader_path)
+
+        ext = os.path.splitext(libname)[1]
+        if not ext:
+            import sys
+            import sysconfig
+            # Try to load library with platform-specific name, otherwise
+            # default to libname.[so|dll|dylib].  Sometimes, these files are
+            # built erroneously on non-linux platforms.
+            base_ext = ".so"
+            if sys.platform.startswith("darwin"):
+                base_ext = ".dylib"
+            elif sys.platform.startswith("win"):
+                base_ext = ".dll"
+            libname_ext = [libname + base_ext]
+            so_ext = sysconfig.get_config_var("EXT_SUFFIX")
+            if not so_ext == base_ext:
+                libname_ext.insert(0, libname + so_ext)
+        else:
+            libname_ext = [libname]
+
+        loader_path = os.path.abspath(loader_path)
+        if not os.path.isdir(loader_path):
+            libdir = os.path.dirname(loader_path)
+        else:
+            libdir = loader_path
+
+        for ln in libname_ext:
+            libpath = os.path.join(libdir, ln)
+            if os.path.exists(libpath):
+                try:
+                    return ctypes.cdll[libpath]
+                except OSError:
+                    # defective lib file
+                    raise
+        # if no successful return in the libname_ext loop:
+        raise OSError("no file with expected extension")
+
+
+def _num_fromflags(flaglist):
+    num = 0
+    for val in flaglist:
+        num += mu._flagdict[val]
+    return num
+
+
+_flagnames = ['C_CONTIGUOUS', 'F_CONTIGUOUS', 'ALIGNED', 'WRITEABLE',
+              'OWNDATA', 'WRITEBACKIFCOPY']
+def _flags_fromnum(num):
+    res = []
+    for key in _flagnames:
+        value = mu._flagdict[key]
+        if (num & value):
+            res.append(key)
+    return res
+
+
+class _ndptr(_ndptr_base):
+    @classmethod
+    def from_param(cls, obj):
+        if not isinstance(obj, np.ndarray):
+            raise TypeError("argument must be an ndarray")
+        if cls._dtype_ is not None \
+               and obj.dtype != cls._dtype_:
+            raise TypeError(f"array must have data type {cls._dtype_}")
+        if cls._ndim_ is not None \
+               and obj.ndim != cls._ndim_:
+            raise TypeError("array must have %d dimension(s)" % cls._ndim_)
+        if cls._shape_ is not None \
+               and obj.shape != cls._shape_:
+            raise TypeError(f"array must have shape {str(cls._shape_)}")
+        if cls._flags_ is not None \
+               and ((obj.flags.num & cls._flags_) != cls._flags_):
+            raise TypeError(f"array must have flags {_flags_fromnum(cls._flags_)}")
+        return obj.ctypes
+
+
+class _concrete_ndptr(_ndptr):
+    """
+    Like _ndptr, but with `_shape_` and `_dtype_` specified.
+
+    Notably, this means the pointer has enough information to reconstruct
+    the array, which is not generally true.
+    """
+    def _check_retval_(self):
+        """
+        This method is called when this class is used as the .restype
+        attribute for a shared-library function, to automatically wrap the
+        pointer into an array.
+        """
+        return self.contents
+
+    @property
+    def contents(self):
+        """
+        Get an ndarray viewing the data pointed to by this pointer.
+
+        This mirrors the `contents` attribute of a normal ctypes pointer
+        """
+        full_dtype = np.dtype((self._dtype_, self._shape_))
+        full_ctype = ctypes.c_char * full_dtype.itemsize
+        buffer = ctypes.cast(self, ctypes.POINTER(full_ctype)).contents
+        return np.frombuffer(buffer, dtype=full_dtype).squeeze(axis=0)
+
+
+# Factory for an array-checking class with from_param defined for
+# use with ctypes argtypes mechanism
+_pointer_type_cache = {}
+
+@set_module("numpy.ctypeslib")
+def ndpointer(dtype=None, ndim=None, shape=None, flags=None):
+    """
+    Array-checking restype/argtypes.
+
+    An ndpointer instance is used to describe an ndarray in restypes
+    and argtypes specifications.  This approach is more flexible than
+    using, for example, ``POINTER(c_double)``, since several restrictions
+    can be specified, which are verified upon calling the ctypes function.
+    These include data type, number of dimensions, shape and flags.  If a
+    given array does not satisfy the specified restrictions,
+    a ``TypeError`` is raised.
+
+    Parameters
+    ----------
+    dtype : data-type, optional
+        Array data-type.
+    ndim : int, optional
+        Number of array dimensions.
+    shape : tuple of ints, optional
+        Array shape.
+    flags : str or tuple of str
+        Array flags; may be one or more of:
+
+        - C_CONTIGUOUS / C / CONTIGUOUS
+        - F_CONTIGUOUS / F / FORTRAN
+        - OWNDATA / O
+        - WRITEABLE / W
+        - ALIGNED / A
+        - WRITEBACKIFCOPY / X
+
+    Returns
+    -------
+    klass : ndpointer type object
+        A type object, which is an ``_ndtpr`` instance containing
+        dtype, ndim, shape and flags information.
+
+    Raises
+    ------
+    TypeError
+        If a given array does not satisfy the specified restrictions.
+
+    Examples
+    --------
+    >>> clib.somefunc.argtypes = [np.ctypeslib.ndpointer(dtype=np.float64,
+    ...                                                  ndim=1,
+    ...                                                  flags='C_CONTIGUOUS')]
+    ... #doctest: +SKIP
+    >>> clib.somefunc(np.array([1, 2, 3], dtype=np.float64))
+    ... #doctest: +SKIP
+
+    """
+
+    # normalize dtype to dtype | None
+    if dtype is not None:
+        dtype = np.dtype(dtype)
+
+    # normalize flags to int | None
+    num = None
+    if flags is not None:
+        if isinstance(flags, str):
+            flags = flags.split(',')
+        elif isinstance(flags, (int, np.integer)):
+            num = flags
+            flags = _flags_fromnum(num)
+        elif isinstance(flags, mu.flagsobj):
+            num = flags.num
+            flags = _flags_fromnum(num)
+        if num is None:
+            try:
+                flags = [x.strip().upper() for x in flags]
+            except Exception as e:
+                raise TypeError("invalid flags specification") from e
+            num = _num_fromflags(flags)
+
+    # normalize shape to tuple | None
+    if shape is not None:
+        try:
+            shape = tuple(shape)
+        except TypeError:
+            # single integer -> 1-tuple
+            shape = (shape,)
+
+    cache_key = (dtype, ndim, shape, num)
+
+    try:
+        return _pointer_type_cache[cache_key]
+    except KeyError:
+        pass
+
+    # produce a name for the new type
+    if dtype is None:
+        name = 'any'
+    elif dtype.names is not None:
+        name = str(id(dtype))
+    else:
+        name = dtype.str
+    if ndim is not None:
+        name += "_%dd" % ndim
+    if shape is not None:
+        name += "_" + "x".join(str(x) for x in shape)
+    if flags is not None:
+        name += "_" + "_".join(flags)
+
+    if dtype is not None and shape is not None:
+        base = _concrete_ndptr
+    else:
+        base = _ndptr
+
+    klass = type(f"ndpointer_{name}", (base,),
+                 {"_dtype_": dtype,
+                  "_shape_": shape,
+                  "_ndim_": ndim,
+                  "_flags_": num})
+    _pointer_type_cache[cache_key] = klass
+    return klass
+
+
+if ctypes is not None:
+    def _ctype_ndarray(element_type, shape):
+        """ Create an ndarray of the given element type and shape """
+        for dim in shape[::-1]:
+            element_type = dim * element_type
+            # prevent the type name include np.ctypeslib
+            element_type.__module__ = None
+        return element_type
+
+    def _get_scalar_type_map():
+        """
+        Return a dictionary mapping native endian scalar dtype to ctypes types
+        """
+        ct = ctypes
+        simple_types = [
+            ct.c_byte, ct.c_short, ct.c_int, ct.c_long, ct.c_longlong,
+            ct.c_ubyte, ct.c_ushort, ct.c_uint, ct.c_ulong, ct.c_ulonglong,
+            ct.c_float, ct.c_double,
+            ct.c_bool,
+        ]
+        return {np.dtype(ctype): ctype for ctype in simple_types}
+
+    _scalar_type_map = _get_scalar_type_map()
+
+    def _ctype_from_dtype_scalar(dtype):
+        # swapping twice ensure that `=` is promoted to <, >, or |
+        dtype_with_endian = dtype.newbyteorder('S').newbyteorder('S')
+        dtype_native = dtype.newbyteorder('=')
+        try:
+            ctype = _scalar_type_map[dtype_native]
+        except KeyError as e:
+            raise NotImplementedError(
+                f"Converting {dtype!r} to a ctypes type"
+            ) from None
+
+        if dtype_with_endian.byteorder == '>':
+            ctype = ctype.__ctype_be__
+        elif dtype_with_endian.byteorder == '<':
+            ctype = ctype.__ctype_le__
+
+        return ctype
+
+    def _ctype_from_dtype_subarray(dtype):
+        element_dtype, shape = dtype.subdtype
+        ctype = _ctype_from_dtype(element_dtype)
+        return _ctype_ndarray(ctype, shape)
+
+    def _ctype_from_dtype_structured(dtype):
+        # extract offsets of each field
+        field_data = []
+        for name in dtype.names:
+            field_dtype, offset = dtype.fields[name][:2]
+            field_data.append((offset, name, _ctype_from_dtype(field_dtype)))
+
+        # ctypes doesn't care about field order
+        field_data = sorted(field_data, key=lambda f: f[0])
+
+        if len(field_data) > 1 and all(offset == 0 for offset, _, _ in field_data):
+            # union, if multiple fields all at address 0
+            size = 0
+            _fields_ = []
+            for offset, name, ctype in field_data:
+                _fields_.append((name, ctype))
+                size = max(size, ctypes.sizeof(ctype))
+
+            # pad to the right size
+            if dtype.itemsize != size:
+                _fields_.append(('', ctypes.c_char * dtype.itemsize))
+
+            # we inserted manual padding, so always `_pack_`
+            return type('union', (ctypes.Union,), {
+                '_fields_': _fields_,
+                '_pack_': 1,
+                '__module__': None,
+            })
+        else:
+            last_offset = 0
+            _fields_ = []
+            for offset, name, ctype in field_data:
+                padding = offset - last_offset
+                if padding < 0:
+                    raise NotImplementedError("Overlapping fields")
+                if padding > 0:
+                    _fields_.append(('', ctypes.c_char * padding))
+
+                _fields_.append((name, ctype))
+                last_offset = offset + ctypes.sizeof(ctype)
+
+            padding = dtype.itemsize - last_offset
+            if padding > 0:
+                _fields_.append(('', ctypes.c_char * padding))
+
+            # we inserted manual padding, so always `_pack_`
+            return type('struct', (ctypes.Structure,), {
+                '_fields_': _fields_,
+                '_pack_': 1,
+                '__module__': None,
+            })
+
+    def _ctype_from_dtype(dtype):
+        if dtype.fields is not None:
+            return _ctype_from_dtype_structured(dtype)
+        elif dtype.subdtype is not None:
+            return _ctype_from_dtype_subarray(dtype)
+        else:
+            return _ctype_from_dtype_scalar(dtype)
+
+    @set_module("numpy.ctypeslib")
+    def as_ctypes_type(dtype):
+        r"""
+        Convert a dtype into a ctypes type.
+
+        Parameters
+        ----------
+        dtype : dtype
+            The dtype to convert
+
+        Returns
+        -------
+        ctype
+            A ctype scalar, union, array, or struct
+
+        Raises
+        ------
+        NotImplementedError
+            If the conversion is not possible
+
+        Notes
+        -----
+        This function does not losslessly round-trip in either direction.
+
+        ``np.dtype(as_ctypes_type(dt))`` will:
+
+        - insert padding fields
+        - reorder fields to be sorted by offset
+        - discard field titles
+
+        ``as_ctypes_type(np.dtype(ctype))`` will:
+
+        - discard the class names of `ctypes.Structure`\ s and
+          `ctypes.Union`\ s
+        - convert single-element `ctypes.Union`\ s into single-element
+          `ctypes.Structure`\ s
+        - insert padding fields
+
+        Examples
+        --------
+        Converting a simple dtype:
+
+        >>> dt = np.dtype('int8')
+        >>> ctype = np.ctypeslib.as_ctypes_type(dt)
+        >>> ctype
+        
+
+        Converting a structured dtype:
+
+        >>> dt = np.dtype([('x', 'i4'), ('y', 'f4')])
+        >>> ctype = np.ctypeslib.as_ctypes_type(dt)
+        >>> ctype
+        
+
+        """
+        return _ctype_from_dtype(np.dtype(dtype))
+
+    @set_module("numpy.ctypeslib")
+    def as_array(obj, shape=None):
+        """
+        Create a numpy array from a ctypes array or POINTER.
+
+        The numpy array shares the memory with the ctypes object.
+
+        The shape parameter must be given if converting from a ctypes POINTER.
+        The shape parameter is ignored if converting from a ctypes array
+
+        Examples
+        --------
+        Converting a ctypes integer array:
+
+        >>> import ctypes
+        >>> ctypes_array = (ctypes.c_int * 5)(0, 1, 2, 3, 4)
+        >>> np_array = np.ctypeslib.as_array(ctypes_array)
+        >>> np_array
+        array([0, 1, 2, 3, 4], dtype=int32)
+
+        Converting a ctypes POINTER:
+
+        >>> import ctypes
+        >>> buffer = (ctypes.c_int * 5)(0, 1, 2, 3, 4)
+        >>> pointer = ctypes.cast(buffer, ctypes.POINTER(ctypes.c_int))
+        >>> np_array = np.ctypeslib.as_array(pointer, (5,))
+        >>> np_array
+        array([0, 1, 2, 3, 4], dtype=int32)
+
+        """
+        if isinstance(obj, ctypes._Pointer):
+            # convert pointers to an array of the desired shape
+            if shape is None:
+                raise TypeError(
+                    'as_array() requires a shape argument when called on a '
+                    'pointer')
+            p_arr_type = ctypes.POINTER(_ctype_ndarray(obj._type_, shape))
+            obj = ctypes.cast(obj, p_arr_type).contents
+
+        return np.asarray(obj)
+
+    @set_module("numpy.ctypeslib")
+    def as_ctypes(obj):
+        """
+        Create and return a ctypes object from a numpy array.  Actually
+        anything that exposes the __array_interface__ is accepted.
+
+        Examples
+        --------
+        Create ctypes object from inferred int ``np.array``:
+
+        >>> inferred_int_array = np.array([1, 2, 3])
+        >>> c_int_array = np.ctypeslib.as_ctypes(inferred_int_array)
+        >>> type(c_int_array)
+        
+        >>> c_int_array[:]
+        [1, 2, 3]
+
+        Create ctypes object from explicit 8 bit unsigned int ``np.array`` :
+
+        >>> exp_int_array = np.array([1, 2, 3], dtype=np.uint8)
+        >>> c_int_array = np.ctypeslib.as_ctypes(exp_int_array)
+        >>> type(c_int_array)
+        
+        >>> c_int_array[:]
+        [1, 2, 3]
+
+        """
+        ai = obj.__array_interface__
+        if ai["strides"]:
+            raise TypeError("strided arrays not supported")
+        if ai["version"] != 3:
+            raise TypeError("only __array_interface__ version 3 supported")
+        addr, readonly = ai["data"]
+        if readonly:
+            raise TypeError("readonly arrays unsupported")
+
+        # can't use `_dtype((ai["typestr"], ai["shape"]))` here, as it overflows
+        # dtype.itemsize (gh-14214)
+        ctype_scalar = as_ctypes_type(ai["typestr"])
+        result_type = _ctype_ndarray(ctype_scalar, ai["shape"])
+        result = result_type.from_address(addr)
+        result.__keep = obj
+        return result
diff --git a/.venv/lib/python3.12/site-packages/numpy/ctypeslib/_ctypeslib.pyi b/.venv/lib/python3.12/site-packages/numpy/ctypeslib/_ctypeslib.pyi
new file mode 100644
index 0000000..e26d605
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/ctypeslib/_ctypeslib.pyi
@@ -0,0 +1,245 @@
+# NOTE: Numpy's mypy plugin is used for importing the correct
+# platform-specific `ctypes._SimpleCData[int]` sub-type
+import ctypes
+from collections.abc import Iterable, Sequence
+from ctypes import c_int64 as _c_intp
+from typing import (
+    Any,
+    ClassVar,
+    Generic,
+    TypeAlias,
+    TypeVar,
+    overload,
+)
+from typing import Literal as L
+
+from _typeshed import StrOrBytesPath
+
+import numpy as np
+from numpy import (
+    byte,
+    double,
+    dtype,
+    generic,
+    intc,
+    long,
+    longdouble,
+    longlong,
+    ndarray,
+    short,
+    single,
+    ubyte,
+    uintc,
+    ulong,
+    ulonglong,
+    ushort,
+    void,
+)
+from numpy._core._internal import _ctypes
+from numpy._core.multiarray import flagsobj
+from numpy._typing import (
+    DTypeLike,
+    NDArray,
+    _AnyShape,
+    _ArrayLike,
+    _BoolCodes,
+    _ByteCodes,
+    _DoubleCodes,
+    _DTypeLike,
+    _IntCCodes,
+    _LongCodes,
+    _LongDoubleCodes,
+    _LongLongCodes,
+    _ShapeLike,
+    _ShortCodes,
+    _SingleCodes,
+    _UByteCodes,
+    _UIntCCodes,
+    _ULongCodes,
+    _ULongLongCodes,
+    _UShortCodes,
+    _VoidDTypeLike,
+)
+
+__all__ = ["load_library", "ndpointer", "c_intp", "as_ctypes", "as_array", "as_ctypes_type"]
+
+# TODO: Add a proper `_Shape` bound once we've got variadic typevars
+_DTypeT = TypeVar("_DTypeT", bound=dtype)
+_DTypeOptionalT = TypeVar("_DTypeOptionalT", bound=dtype | None)
+_ScalarT = TypeVar("_ScalarT", bound=generic)
+
+_FlagsKind: TypeAlias = L[
+    'C_CONTIGUOUS', 'CONTIGUOUS', 'C',
+    'F_CONTIGUOUS', 'FORTRAN', 'F',
+    'ALIGNED', 'A',
+    'WRITEABLE', 'W',
+    'OWNDATA', 'O',
+    'WRITEBACKIFCOPY', 'X',
+]
+
+# TODO: Add a shape typevar once we have variadic typevars (PEP 646)
+class _ndptr(ctypes.c_void_p, Generic[_DTypeOptionalT]):
+    # In practice these 4 classvars are defined in the dynamic class
+    # returned by `ndpointer`
+    _dtype_: ClassVar[_DTypeOptionalT]
+    _shape_: ClassVar[None]
+    _ndim_: ClassVar[int | None]
+    _flags_: ClassVar[list[_FlagsKind] | None]
+
+    @overload
+    @classmethod
+    def from_param(cls: type[_ndptr[None]], obj: NDArray[Any]) -> _ctypes[Any]: ...
+    @overload
+    @classmethod
+    def from_param(cls: type[_ndptr[_DTypeT]], obj: ndarray[Any, _DTypeT]) -> _ctypes[Any]: ...
+
+class _concrete_ndptr(_ndptr[_DTypeT]):
+    _dtype_: ClassVar[_DTypeT]
+    _shape_: ClassVar[_AnyShape]
+    @property
+    def contents(self) -> ndarray[_AnyShape, _DTypeT]: ...
+
+def load_library(libname: StrOrBytesPath, loader_path: StrOrBytesPath) -> ctypes.CDLL: ...
+
+c_intp = _c_intp
+
+@overload
+def ndpointer(
+    dtype: None = ...,
+    ndim: int = ...,
+    shape: _ShapeLike | None = ...,
+    flags: _FlagsKind | Iterable[_FlagsKind] | int | flagsobj | None = ...,
+) -> type[_ndptr[None]]: ...
+@overload
+def ndpointer(
+    dtype: _DTypeLike[_ScalarT],
+    ndim: int = ...,
+    *,
+    shape: _ShapeLike,
+    flags: _FlagsKind | Iterable[_FlagsKind] | int | flagsobj | None = ...,
+) -> type[_concrete_ndptr[dtype[_ScalarT]]]: ...
+@overload
+def ndpointer(
+    dtype: DTypeLike,
+    ndim: int = ...,
+    *,
+    shape: _ShapeLike,
+    flags: _FlagsKind | Iterable[_FlagsKind] | int | flagsobj | None = ...,
+) -> type[_concrete_ndptr[dtype]]: ...
+@overload
+def ndpointer(
+    dtype: _DTypeLike[_ScalarT],
+    ndim: int = ...,
+    shape: None = ...,
+    flags: _FlagsKind | Iterable[_FlagsKind] | int | flagsobj | None = ...,
+) -> type[_ndptr[dtype[_ScalarT]]]: ...
+@overload
+def ndpointer(
+    dtype: DTypeLike,
+    ndim: int = ...,
+    shape: None = ...,
+    flags: _FlagsKind | Iterable[_FlagsKind] | int | flagsobj | None = ...,
+) -> type[_ndptr[dtype]]: ...
+
+@overload
+def as_ctypes_type(dtype: _BoolCodes | _DTypeLike[np.bool] | type[ctypes.c_bool]) -> type[ctypes.c_bool]: ...
+@overload
+def as_ctypes_type(dtype: _ByteCodes | _DTypeLike[byte] | type[ctypes.c_byte]) -> type[ctypes.c_byte]: ...
+@overload
+def as_ctypes_type(dtype: _ShortCodes | _DTypeLike[short] | type[ctypes.c_short]) -> type[ctypes.c_short]: ...
+@overload
+def as_ctypes_type(dtype: _IntCCodes | _DTypeLike[intc] | type[ctypes.c_int]) -> type[ctypes.c_int]: ...
+@overload
+def as_ctypes_type(dtype: _LongCodes | _DTypeLike[long] | type[ctypes.c_long]) -> type[ctypes.c_long]: ...
+@overload
+def as_ctypes_type(dtype: type[int]) -> type[c_intp]: ...
+@overload
+def as_ctypes_type(dtype: _LongLongCodes | _DTypeLike[longlong] | type[ctypes.c_longlong]) -> type[ctypes.c_longlong]: ...
+@overload
+def as_ctypes_type(dtype: _UByteCodes | _DTypeLike[ubyte] | type[ctypes.c_ubyte]) -> type[ctypes.c_ubyte]: ...
+@overload
+def as_ctypes_type(dtype: _UShortCodes | _DTypeLike[ushort] | type[ctypes.c_ushort]) -> type[ctypes.c_ushort]: ...
+@overload
+def as_ctypes_type(dtype: _UIntCCodes | _DTypeLike[uintc] | type[ctypes.c_uint]) -> type[ctypes.c_uint]: ...
+@overload
+def as_ctypes_type(dtype: _ULongCodes | _DTypeLike[ulong] | type[ctypes.c_ulong]) -> type[ctypes.c_ulong]: ...
+@overload
+def as_ctypes_type(dtype: _ULongLongCodes | _DTypeLike[ulonglong] | type[ctypes.c_ulonglong]) -> type[ctypes.c_ulonglong]: ...
+@overload
+def as_ctypes_type(dtype: _SingleCodes | _DTypeLike[single] | type[ctypes.c_float]) -> type[ctypes.c_float]: ...
+@overload
+def as_ctypes_type(dtype: _DoubleCodes | _DTypeLike[double] | type[float | ctypes.c_double]) -> type[ctypes.c_double]: ...
+@overload
+def as_ctypes_type(dtype: _LongDoubleCodes | _DTypeLike[longdouble] | type[ctypes.c_longdouble]) -> type[ctypes.c_longdouble]: ...
+@overload
+def as_ctypes_type(dtype: _VoidDTypeLike) -> type[Any]: ...  # `ctypes.Union` or `ctypes.Structure`
+@overload
+def as_ctypes_type(dtype: str) -> type[Any]: ...
+
+@overload
+def as_array(obj: ctypes._PointerLike, shape: Sequence[int]) -> NDArray[Any]: ...
+@overload
+def as_array(obj: _ArrayLike[_ScalarT], shape: _ShapeLike | None = ...) -> NDArray[_ScalarT]: ...
+@overload
+def as_array(obj: object, shape: _ShapeLike | None = ...) -> NDArray[Any]: ...
+
+@overload
+def as_ctypes(obj: np.bool) -> ctypes.c_bool: ...
+@overload
+def as_ctypes(obj: byte) -> ctypes.c_byte: ...
+@overload
+def as_ctypes(obj: short) -> ctypes.c_short: ...
+@overload
+def as_ctypes(obj: intc) -> ctypes.c_int: ...
+@overload
+def as_ctypes(obj: long) -> ctypes.c_long: ...
+@overload
+def as_ctypes(obj: longlong) -> ctypes.c_longlong: ...
+@overload
+def as_ctypes(obj: ubyte) -> ctypes.c_ubyte: ...
+@overload
+def as_ctypes(obj: ushort) -> ctypes.c_ushort: ...
+@overload
+def as_ctypes(obj: uintc) -> ctypes.c_uint: ...
+@overload
+def as_ctypes(obj: ulong) -> ctypes.c_ulong: ...
+@overload
+def as_ctypes(obj: ulonglong) -> ctypes.c_ulonglong: ...
+@overload
+def as_ctypes(obj: single) -> ctypes.c_float: ...
+@overload
+def as_ctypes(obj: double) -> ctypes.c_double: ...
+@overload
+def as_ctypes(obj: longdouble) -> ctypes.c_longdouble: ...
+@overload
+def as_ctypes(obj: void) -> Any: ...  # `ctypes.Union` or `ctypes.Structure`
+@overload
+def as_ctypes(obj: NDArray[np.bool]) -> ctypes.Array[ctypes.c_bool]: ...
+@overload
+def as_ctypes(obj: NDArray[byte]) -> ctypes.Array[ctypes.c_byte]: ...
+@overload
+def as_ctypes(obj: NDArray[short]) -> ctypes.Array[ctypes.c_short]: ...
+@overload
+def as_ctypes(obj: NDArray[intc]) -> ctypes.Array[ctypes.c_int]: ...
+@overload
+def as_ctypes(obj: NDArray[long]) -> ctypes.Array[ctypes.c_long]: ...
+@overload
+def as_ctypes(obj: NDArray[longlong]) -> ctypes.Array[ctypes.c_longlong]: ...
+@overload
+def as_ctypes(obj: NDArray[ubyte]) -> ctypes.Array[ctypes.c_ubyte]: ...
+@overload
+def as_ctypes(obj: NDArray[ushort]) -> ctypes.Array[ctypes.c_ushort]: ...
+@overload
+def as_ctypes(obj: NDArray[uintc]) -> ctypes.Array[ctypes.c_uint]: ...
+@overload
+def as_ctypes(obj: NDArray[ulong]) -> ctypes.Array[ctypes.c_ulong]: ...
+@overload
+def as_ctypes(obj: NDArray[ulonglong]) -> ctypes.Array[ctypes.c_ulonglong]: ...
+@overload
+def as_ctypes(obj: NDArray[single]) -> ctypes.Array[ctypes.c_float]: ...
+@overload
+def as_ctypes(obj: NDArray[double]) -> ctypes.Array[ctypes.c_double]: ...
+@overload
+def as_ctypes(obj: NDArray[longdouble]) -> ctypes.Array[ctypes.c_longdouble]: ...
+@overload
+def as_ctypes(obj: NDArray[void]) -> ctypes.Array[Any]: ...  # `ctypes.Union` or `ctypes.Structure`
diff --git a/.venv/lib/python3.12/site-packages/numpy/doc/__pycache__/ufuncs.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/doc/__pycache__/ufuncs.cpython-312.pyc
new file mode 100644
index 0000000..54b9714
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/doc/__pycache__/ufuncs.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/doc/ufuncs.py b/.venv/lib/python3.12/site-packages/numpy/doc/ufuncs.py
new file mode 100644
index 0000000..7324168
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/doc/ufuncs.py
@@ -0,0 +1,138 @@
+"""
+===================
+Universal Functions
+===================
+
+Ufuncs are, generally speaking, mathematical functions or operations that are
+applied element-by-element to the contents of an array. That is, the result
+in each output array element only depends on the value in the corresponding
+input array (or arrays) and on no other array elements. NumPy comes with a
+large suite of ufuncs, and scipy extends that suite substantially. The simplest
+example is the addition operator: ::
+
+ >>> np.array([0,2,3,4]) + np.array([1,1,-1,2])
+ array([1, 3, 2, 6])
+
+The ufunc module lists all the available ufuncs in numpy. Documentation on
+the specific ufuncs may be found in those modules. This documentation is
+intended to address the more general aspects of ufuncs common to most of
+them. All of the ufuncs that make use of Python operators (e.g., +, -, etc.)
+have equivalent functions defined (e.g. add() for +)
+
+Type coercion
+=============
+
+What happens when a binary operator (e.g., +,-,\\*,/, etc) deals with arrays of
+two different types? What is the type of the result? Typically, the result is
+the higher of the two types. For example: ::
+
+ float32 + float64 -> float64
+ int8 + int32 -> int32
+ int16 + float32 -> float32
+ float32 + complex64 -> complex64
+
+There are some less obvious cases generally involving mixes of types
+(e.g. uints, ints and floats) where equal bit sizes for each are not
+capable of saving all the information in a different type of equivalent
+bit size. Some examples are int32 vs float32 or uint32 vs int32.
+Generally, the result is the higher type of larger size than both
+(if available). So: ::
+
+ int32 + float32 -> float64
+ uint32 + int32 -> int64
+
+Finally, the type coercion behavior when expressions involve Python
+scalars is different than that seen for arrays. Since Python has a
+limited number of types, combining a Python int with a dtype=np.int8
+array does not coerce to the higher type but instead, the type of the
+array prevails. So the rules for Python scalars combined with arrays is
+that the result will be that of the array equivalent the Python scalar
+if the Python scalar is of a higher 'kind' than the array (e.g., float
+vs. int), otherwise the resultant type will be that of the array.
+For example: ::
+
+  Python int + int8 -> int8
+  Python float + int8 -> float64
+
+ufunc methods
+=============
+
+Binary ufuncs support 4 methods.
+
+**.reduce(arr)** applies the binary operator to elements of the array in
+  sequence. For example: ::
+
+ >>> np.add.reduce(np.arange(10))  # adds all elements of array
+ 45
+
+For multidimensional arrays, the first dimension is reduced by default: ::
+
+ >>> np.add.reduce(np.arange(10).reshape(2,5))
+     array([ 5,  7,  9, 11, 13])
+
+The axis keyword can be used to specify different axes to reduce: ::
+
+ >>> np.add.reduce(np.arange(10).reshape(2,5),axis=1)
+ array([10, 35])
+
+**.accumulate(arr)** applies the binary operator and generates an
+equivalently shaped array that includes the accumulated amount for each
+element of the array. A couple examples: ::
+
+ >>> np.add.accumulate(np.arange(10))
+ array([ 0,  1,  3,  6, 10, 15, 21, 28, 36, 45])
+ >>> np.multiply.accumulate(np.arange(1,9))
+ array([    1,     2,     6,    24,   120,   720,  5040, 40320])
+
+The behavior for multidimensional arrays is the same as for .reduce(),
+as is the use of the axis keyword).
+
+**.reduceat(arr,indices)** allows one to apply reduce to selected parts
+  of an array. It is a difficult method to understand. See the documentation
+  at:
+
+**.outer(arr1,arr2)** generates an outer operation on the two arrays arr1 and
+  arr2. It will work on multidimensional arrays (the shape of the result is
+  the concatenation of the two input shapes.: ::
+
+ >>> np.multiply.outer(np.arange(3),np.arange(4))
+ array([[0, 0, 0, 0],
+        [0, 1, 2, 3],
+        [0, 2, 4, 6]])
+
+Output arguments
+================
+
+All ufuncs accept an optional output array. The array must be of the expected
+output shape. Beware that if the type of the output array is of a different
+(and lower) type than the output result, the results may be silently truncated
+or otherwise corrupted in the downcast to the lower type. This usage is useful
+when one wants to avoid creating large temporary arrays and instead allows one
+to reuse the same array memory repeatedly (at the expense of not being able to
+use more convenient operator notation in expressions). Note that when the
+output argument is used, the ufunc still returns a reference to the result.
+
+ >>> x = np.arange(2)
+ >>> np.add(np.arange(2, dtype=float), np.arange(2, dtype=float), x,
+ ...        casting='unsafe')
+ array([0, 2])
+ >>> x
+ array([0, 2])
+
+and & or as ufuncs
+==================
+
+Invariably people try to use the python 'and' and 'or' as logical operators
+(and quite understandably). But these operators do not behave as normal
+operators since Python treats these quite differently. They cannot be
+overloaded with array equivalents. Thus using 'and' or 'or' with an array
+results in an error. There are two alternatives:
+
+ 1) use the ufunc functions logical_and() and logical_or().
+ 2) use the bitwise operators & and \\|. The drawback of these is that if
+    the arguments to these operators are not boolean arrays, the result is
+    likely incorrect. On the other hand, most usages of logical_and and
+    logical_or are with boolean arrays. As long as one is careful, this is
+    a convenient way to apply these operators.
+
+"""
diff --git a/.venv/lib/python3.12/site-packages/numpy/dtypes.py b/.venv/lib/python3.12/site-packages/numpy/dtypes.py
new file mode 100644
index 0000000..550a29e
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/dtypes.py
@@ -0,0 +1,41 @@
+"""
+This module is home to specific dtypes related functionality and their classes.
+For more general information about dtypes, also see `numpy.dtype` and
+:ref:`arrays.dtypes`.
+
+Similar to the builtin ``types`` module, this submodule defines types (classes)
+that are not widely used directly.
+
+.. versionadded:: NumPy 1.25
+
+    The dtypes module is new in NumPy 1.25.  Previously DType classes were
+    only accessible indirectly.
+
+
+DType classes
+-------------
+
+The following are the classes of the corresponding NumPy dtype instances and
+NumPy scalar types.  The classes can be used in ``isinstance`` checks and can
+also be instantiated or used directly.  Direct use of these classes is not
+typical, since their scalar counterparts (e.g. ``np.float64``) or strings
+like ``"float64"`` can be used.
+"""
+
+# See doc/source/reference/routines.dtypes.rst for module-level docs
+
+__all__ = []
+
+
+def _add_dtype_helper(DType, alias):
+    # Function to add DTypes a bit more conveniently without channeling them
+    # through `numpy._core._multiarray_umath` namespace or similar.
+    from numpy import dtypes
+
+    setattr(dtypes, DType.__name__, DType)
+    __all__.append(DType.__name__)
+
+    if alias:
+        alias = alias.removeprefix("numpy.dtypes.")
+        setattr(dtypes, alias, DType)
+        __all__.append(alias)
diff --git a/.venv/lib/python3.12/site-packages/numpy/dtypes.pyi b/.venv/lib/python3.12/site-packages/numpy/dtypes.pyi
new file mode 100644
index 0000000..007dc64
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/dtypes.pyi
@@ -0,0 +1,631 @@
+# ruff: noqa: ANN401
+from typing import (
+    Any,
+    Generic,
+    LiteralString,
+    Never,
+    NoReturn,
+    Self,
+    TypeAlias,
+    final,
+    overload,
+    type_check_only,
+)
+from typing import Literal as L
+
+from typing_extensions import TypeVar
+
+import numpy as np
+
+__all__ = [  # noqa: RUF022
+    'BoolDType',
+    'Int8DType',
+    'ByteDType',
+    'UInt8DType',
+    'UByteDType',
+    'Int16DType',
+    'ShortDType',
+    'UInt16DType',
+    'UShortDType',
+    'Int32DType',
+    'IntDType',
+    'UInt32DType',
+    'UIntDType',
+    'Int64DType',
+    'LongDType',
+    'UInt64DType',
+    'ULongDType',
+    'LongLongDType',
+    'ULongLongDType',
+    'Float16DType',
+    'Float32DType',
+    'Float64DType',
+    'LongDoubleDType',
+    'Complex64DType',
+    'Complex128DType',
+    'CLongDoubleDType',
+    'ObjectDType',
+    'BytesDType',
+    'StrDType',
+    'VoidDType',
+    'DateTime64DType',
+    'TimeDelta64DType',
+    'StringDType',
+]
+
+# Helper base classes (typing-only)
+
+_ScalarT_co = TypeVar("_ScalarT_co", bound=np.generic, covariant=True)
+
+@type_check_only
+class _SimpleDType(np.dtype[_ScalarT_co], Generic[_ScalarT_co]):  # type: ignore[misc]  # pyright: ignore[reportGeneralTypeIssues]
+    names: None  # pyright: ignore[reportIncompatibleVariableOverride]
+    def __new__(cls, /) -> Self: ...
+    def __getitem__(self, key: Any, /) -> NoReturn: ...
+    @property
+    def base(self) -> np.dtype[_ScalarT_co]: ...
+    @property
+    def fields(self) -> None: ...
+    @property
+    def isalignedstruct(self) -> L[False]: ...
+    @property
+    def isnative(self) -> L[True]: ...
+    @property
+    def ndim(self) -> L[0]: ...
+    @property
+    def shape(self) -> tuple[()]: ...
+    @property
+    def subdtype(self) -> None: ...
+
+@type_check_only
+class _LiteralDType(_SimpleDType[_ScalarT_co], Generic[_ScalarT_co]):  # type: ignore[misc]
+    @property
+    def flags(self) -> L[0]: ...
+    @property
+    def hasobject(self) -> L[False]: ...
+
+# Helper mixins (typing-only):
+
+_KindT_co = TypeVar("_KindT_co", bound=LiteralString, covariant=True)
+_CharT_co = TypeVar("_CharT_co", bound=LiteralString, covariant=True)
+_NumT_co = TypeVar("_NumT_co", bound=int, covariant=True)
+
+@type_check_only
+class _TypeCodes(Generic[_KindT_co, _CharT_co, _NumT_co]):
+    @final
+    @property
+    def kind(self) -> _KindT_co: ...
+    @final
+    @property
+    def char(self) -> _CharT_co: ...
+    @final
+    @property
+    def num(self) -> _NumT_co: ...
+
+@type_check_only
+class _NoOrder:
+    @final
+    @property
+    def byteorder(self) -> L["|"]: ...
+
+@type_check_only
+class _NativeOrder:
+    @final
+    @property
+    def byteorder(self) -> L["="]: ...
+
+_DataSize_co = TypeVar("_DataSize_co", bound=int, covariant=True)
+_ItemSize_co = TypeVar("_ItemSize_co", bound=int, covariant=True, default=int)
+
+@type_check_only
+class _NBit(Generic[_DataSize_co, _ItemSize_co]):
+    @final
+    @property
+    def alignment(self) -> _DataSize_co: ...
+    @final
+    @property
+    def itemsize(self) -> _ItemSize_co: ...
+
+@type_check_only
+class _8Bit(_NoOrder, _NBit[L[1], L[1]]): ...
+
+# Boolean:
+
+@final
+class BoolDType(  # type: ignore[misc]
+    _TypeCodes[L["b"], L["?"], L[0]],
+    _8Bit,
+    _LiteralDType[np.bool],
+):
+    @property
+    def name(self) -> L["bool"]: ...
+    @property
+    def str(self) -> L["|b1"]: ...
+
+# Sized integers:
+
+@final
+class Int8DType(  # type: ignore[misc]
+    _TypeCodes[L["i"], L["b"], L[1]],
+    _8Bit,
+    _LiteralDType[np.int8],
+):
+    @property
+    def name(self) -> L["int8"]: ...
+    @property
+    def str(self) -> L["|i1"]: ...
+
+@final
+class UInt8DType(  # type: ignore[misc]
+    _TypeCodes[L["u"], L["B"], L[2]],
+    _8Bit,
+    _LiteralDType[np.uint8],
+):
+    @property
+    def name(self) -> L["uint8"]: ...
+    @property
+    def str(self) -> L["|u1"]: ...
+
+@final
+class Int16DType(  # type: ignore[misc]
+    _TypeCodes[L["i"], L["h"], L[3]],
+    _NativeOrder,
+    _NBit[L[2], L[2]],
+    _LiteralDType[np.int16],
+):
+    @property
+    def name(self) -> L["int16"]: ...
+    @property
+    def str(self) -> L["i2"]: ...
+
+@final
+class UInt16DType(  # type: ignore[misc]
+    _TypeCodes[L["u"], L["H"], L[4]],
+    _NativeOrder,
+    _NBit[L[2], L[2]],
+    _LiteralDType[np.uint16],
+):
+    @property
+    def name(self) -> L["uint16"]: ...
+    @property
+    def str(self) -> L["u2"]: ...
+
+@final
+class Int32DType(  # type: ignore[misc]
+    _TypeCodes[L["i"], L["i", "l"], L[5, 7]],
+    _NativeOrder,
+    _NBit[L[4], L[4]],
+    _LiteralDType[np.int32],
+):
+    @property
+    def name(self) -> L["int32"]: ...
+    @property
+    def str(self) -> L["i4"]: ...
+
+@final
+class UInt32DType(  # type: ignore[misc]
+    _TypeCodes[L["u"], L["I", "L"], L[6, 8]],
+    _NativeOrder,
+    _NBit[L[4], L[4]],
+    _LiteralDType[np.uint32],
+):
+    @property
+    def name(self) -> L["uint32"]: ...
+    @property
+    def str(self) -> L["u4"]: ...
+
+@final
+class Int64DType(  # type: ignore[misc]
+    _TypeCodes[L["i"], L["l", "q"], L[7, 9]],
+    _NativeOrder,
+    _NBit[L[8], L[8]],
+    _LiteralDType[np.int64],
+):
+    @property
+    def name(self) -> L["int64"]: ...
+    @property
+    def str(self) -> L["i8"]: ...
+
+@final
+class UInt64DType(  # type: ignore[misc]
+    _TypeCodes[L["u"], L["L", "Q"], L[8, 10]],
+    _NativeOrder,
+    _NBit[L[8], L[8]],
+    _LiteralDType[np.uint64],
+):
+    @property
+    def name(self) -> L["uint64"]: ...
+    @property
+    def str(self) -> L["u8"]: ...
+
+# Standard C-named version/alias:
+# NOTE: Don't make these `Final`: it will break stubtest
+ByteDType = Int8DType
+UByteDType = UInt8DType
+ShortDType = Int16DType
+UShortDType = UInt16DType
+
+@final
+class IntDType(  # type: ignore[misc]
+    _TypeCodes[L["i"], L["i"], L[5]],
+    _NativeOrder,
+    _NBit[L[4], L[4]],
+    _LiteralDType[np.intc],
+):
+    @property
+    def name(self) -> L["int32"]: ...
+    @property
+    def str(self) -> L["i4"]: ...
+
+@final
+class UIntDType(  # type: ignore[misc]
+    _TypeCodes[L["u"], L["I"], L[6]],
+    _NativeOrder,
+    _NBit[L[4], L[4]],
+    _LiteralDType[np.uintc],
+):
+    @property
+    def name(self) -> L["uint32"]: ...
+    @property
+    def str(self) -> L["u4"]: ...
+
+@final
+class LongDType(  # type: ignore[misc]
+    _TypeCodes[L["i"], L["l"], L[7]],
+    _NativeOrder,
+    _NBit[L[4, 8], L[4, 8]],
+    _LiteralDType[np.long],
+):
+    @property
+    def name(self) -> L["int32", "int64"]: ...
+    @property
+    def str(self) -> L["i4", "i8"]: ...
+
+@final
+class ULongDType(  # type: ignore[misc]
+    _TypeCodes[L["u"], L["L"], L[8]],
+    _NativeOrder,
+    _NBit[L[4, 8], L[4, 8]],
+    _LiteralDType[np.ulong],
+):
+    @property
+    def name(self) -> L["uint32", "uint64"]: ...
+    @property
+    def str(self) -> L["u4", "u8"]: ...
+
+@final
+class LongLongDType(  # type: ignore[misc]
+    _TypeCodes[L["i"], L["q"], L[9]],
+    _NativeOrder,
+    _NBit[L[8], L[8]],
+    _LiteralDType[np.longlong],
+):
+    @property
+    def name(self) -> L["int64"]: ...
+    @property
+    def str(self) -> L["i8"]: ...
+
+@final
+class ULongLongDType(  # type: ignore[misc]
+    _TypeCodes[L["u"], L["Q"], L[10]],
+    _NativeOrder,
+    _NBit[L[8], L[8]],
+    _LiteralDType[np.ulonglong],
+):
+    @property
+    def name(self) -> L["uint64"]: ...
+    @property
+    def str(self) -> L["u8"]: ...
+
+# Floats:
+
+@final
+class Float16DType(  # type: ignore[misc]
+    _TypeCodes[L["f"], L["e"], L[23]],
+    _NativeOrder,
+    _NBit[L[2], L[2]],
+    _LiteralDType[np.float16],
+):
+    @property
+    def name(self) -> L["float16"]: ...
+    @property
+    def str(self) -> L["f2"]: ...
+
+@final
+class Float32DType(  # type: ignore[misc]
+    _TypeCodes[L["f"], L["f"], L[11]],
+    _NativeOrder,
+    _NBit[L[4], L[4]],
+    _LiteralDType[np.float32],
+):
+    @property
+    def name(self) -> L["float32"]: ...
+    @property
+    def str(self) -> L["f4"]: ...
+
+@final
+class Float64DType(  # type: ignore[misc]
+    _TypeCodes[L["f"], L["d"], L[12]],
+    _NativeOrder,
+    _NBit[L[8], L[8]],
+    _LiteralDType[np.float64],
+):
+    @property
+    def name(self) -> L["float64"]: ...
+    @property
+    def str(self) -> L["f8"]: ...
+
+@final
+class LongDoubleDType(  # type: ignore[misc]
+    _TypeCodes[L["f"], L["g"], L[13]],
+    _NativeOrder,
+    _NBit[L[8, 12, 16], L[8, 12, 16]],
+    _LiteralDType[np.longdouble],
+):
+    @property
+    def name(self) -> L["float64", "float96", "float128"]: ...
+    @property
+    def str(self) -> L["f8", "f12", "f16"]: ...
+
+# Complex:
+
+@final
+class Complex64DType(  # type: ignore[misc]
+    _TypeCodes[L["c"], L["F"], L[14]],
+    _NativeOrder,
+    _NBit[L[4], L[8]],
+    _LiteralDType[np.complex64],
+):
+    @property
+    def name(self) -> L["complex64"]: ...
+    @property
+    def str(self) -> L["c8"]: ...
+
+@final
+class Complex128DType(  # type: ignore[misc]
+    _TypeCodes[L["c"], L["D"], L[15]],
+    _NativeOrder,
+    _NBit[L[8], L[16]],
+    _LiteralDType[np.complex128],
+):
+    @property
+    def name(self) -> L["complex128"]: ...
+    @property
+    def str(self) -> L["c16"]: ...
+
+@final
+class CLongDoubleDType(  # type: ignore[misc]
+    _TypeCodes[L["c"], L["G"], L[16]],
+    _NativeOrder,
+    _NBit[L[8, 12, 16], L[16, 24, 32]],
+    _LiteralDType[np.clongdouble],
+):
+    @property
+    def name(self) -> L["complex128", "complex192", "complex256"]: ...
+    @property
+    def str(self) -> L["c16", "c24", "c32"]: ...
+
+# Python objects:
+
+@final
+class ObjectDType(  # type: ignore[misc]
+    _TypeCodes[L["O"], L["O"], L[17]],
+    _NoOrder,
+    _NBit[L[8], L[8]],
+    _SimpleDType[np.object_],
+):
+    @property
+    def hasobject(self) -> L[True]: ...
+    @property
+    def name(self) -> L["object"]: ...
+    @property
+    def str(self) -> L["|O"]: ...
+
+# Flexible:
+
+@final
+class BytesDType(  # type: ignore[misc]
+    _TypeCodes[L["S"], L["S"], L[18]],
+    _NoOrder,
+    _NBit[L[1], _ItemSize_co],
+    _SimpleDType[np.bytes_],
+    Generic[_ItemSize_co],
+):
+    def __new__(cls, size: _ItemSize_co, /) -> BytesDType[_ItemSize_co]: ...
+    @property
+    def hasobject(self) -> L[False]: ...
+    @property
+    def name(self) -> LiteralString: ...
+    @property
+    def str(self) -> LiteralString: ...
+
+@final
+class StrDType(  # type: ignore[misc]
+    _TypeCodes[L["U"], L["U"], L[19]],
+    _NativeOrder,
+    _NBit[L[4], _ItemSize_co],
+    _SimpleDType[np.str_],
+    Generic[_ItemSize_co],
+):
+    def __new__(cls, size: _ItemSize_co, /) -> StrDType[_ItemSize_co]: ...
+    @property
+    def hasobject(self) -> L[False]: ...
+    @property
+    def name(self) -> LiteralString: ...
+    @property
+    def str(self) -> LiteralString: ...
+
+@final
+class VoidDType(  # type: ignore[misc]
+    _TypeCodes[L["V"], L["V"], L[20]],
+    _NoOrder,
+    _NBit[L[1], _ItemSize_co],
+    np.dtype[np.void],  # pyright: ignore[reportGeneralTypeIssues]
+    Generic[_ItemSize_co],
+):
+    # NOTE: `VoidDType(...)` raises a `TypeError` at the moment
+    def __new__(cls, length: _ItemSize_co, /) -> NoReturn: ...
+    @property
+    def base(self) -> Self: ...
+    @property
+    def isalignedstruct(self) -> L[False]: ...
+    @property
+    def isnative(self) -> L[True]: ...
+    @property
+    def ndim(self) -> L[0]: ...
+    @property
+    def shape(self) -> tuple[()]: ...
+    @property
+    def subdtype(self) -> None: ...
+    @property
+    def name(self) -> LiteralString: ...
+    @property
+    def str(self) -> LiteralString: ...
+
+# Other:
+
+_DateUnit: TypeAlias = L["Y", "M", "W", "D"]
+_TimeUnit: TypeAlias = L["h", "m", "s", "ms", "us", "ns", "ps", "fs", "as"]
+_DateTimeUnit: TypeAlias = _DateUnit | _TimeUnit
+
+@final
+class DateTime64DType(  # type: ignore[misc]
+    _TypeCodes[L["M"], L["M"], L[21]],
+    _NativeOrder,
+    _NBit[L[8], L[8]],
+    _LiteralDType[np.datetime64],
+):
+    # NOTE: `DateTime64DType(...)` raises a `TypeError` at the moment
+    # TODO: Once implemented, don't forget the`unit: L["μs"]` overload.
+    def __new__(cls, unit: _DateTimeUnit, /) -> NoReturn: ...
+    @property
+    def name(self) -> L[
+        "datetime64",
+        "datetime64[Y]",
+        "datetime64[M]",
+        "datetime64[W]",
+        "datetime64[D]",
+        "datetime64[h]",
+        "datetime64[m]",
+        "datetime64[s]",
+        "datetime64[ms]",
+        "datetime64[us]",
+        "datetime64[ns]",
+        "datetime64[ps]",
+        "datetime64[fs]",
+        "datetime64[as]",
+    ]: ...
+    @property
+    def str(self) -> L[
+        "M8",
+        "M8[Y]",
+        "M8[M]",
+        "M8[W]",
+        "M8[D]",
+        "M8[h]",
+        "M8[m]",
+        "M8[s]",
+        "M8[ms]",
+        "M8[us]",
+        "M8[ns]",
+        "M8[ps]",
+        "M8[fs]",
+        "M8[as]",
+    ]: ...
+
+@final
+class TimeDelta64DType(  # type: ignore[misc]
+    _TypeCodes[L["m"], L["m"], L[22]],
+    _NativeOrder,
+    _NBit[L[8], L[8]],
+    _LiteralDType[np.timedelta64],
+):
+    # NOTE: `TimeDelta64DType(...)` raises a `TypeError` at the moment
+    # TODO: Once implemented, don't forget to overload on `unit: L["μs"]`.
+    def __new__(cls, unit: _DateTimeUnit, /) -> NoReturn: ...
+    @property
+    def name(self) -> L[
+        "timedelta64",
+        "timedelta64[Y]",
+        "timedelta64[M]",
+        "timedelta64[W]",
+        "timedelta64[D]",
+        "timedelta64[h]",
+        "timedelta64[m]",
+        "timedelta64[s]",
+        "timedelta64[ms]",
+        "timedelta64[us]",
+        "timedelta64[ns]",
+        "timedelta64[ps]",
+        "timedelta64[fs]",
+        "timedelta64[as]",
+    ]: ...
+    @property
+    def str(self) -> L[
+        "m8",
+        "m8[Y]",
+        "m8[M]",
+        "m8[W]",
+        "m8[D]",
+        "m8[h]",
+        "m8[m]",
+        "m8[s]",
+        "m8[ms]",
+        "m8[us]",
+        "m8[ns]",
+        "m8[ps]",
+        "m8[fs]",
+        "m8[as]",
+    ]: ...
+
+_NaObjectT_co = TypeVar("_NaObjectT_co", default=Never, covariant=True)
+
+@final
+class StringDType(  # type: ignore[misc]
+    _TypeCodes[L["T"], L["T"], L[2056]],
+    _NativeOrder,
+    _NBit[L[8], L[16]],
+    # TODO(jorenham): change once we have a string scalar type:
+    # https://github.com/numpy/numpy/issues/28165
+    np.dtype[str],  # type: ignore[type-var]  # pyright: ignore[reportGeneralTypeIssues, reportInvalidTypeArguments]
+    Generic[_NaObjectT_co],
+):
+    @property
+    def na_object(self) -> _NaObjectT_co: ...
+    @property
+    def coerce(self) -> L[True]: ...
+
+    #
+    @overload
+    def __new__(cls, /, *, coerce: bool = True) -> Self: ...
+    @overload
+    def __new__(cls, /, *, na_object: _NaObjectT_co, coerce: bool = True) -> Self: ...
+
+    #
+    def __getitem__(self, key: Never, /) -> NoReturn: ...  # type: ignore[override]  # pyright: ignore[reportIncompatibleMethodOverride]
+    @property
+    def fields(self) -> None: ...
+    @property
+    def base(self) -> Self: ...
+    @property
+    def ndim(self) -> L[0]: ...
+    @property
+    def shape(self) -> tuple[()]: ...
+
+    #
+    @property
+    def name(self) -> L["StringDType64", "StringDType128"]: ...
+    @property
+    def subdtype(self) -> None: ...
+    @property
+    def type(self) -> type[str]: ...
+    @property
+    def str(self) -> L["|T8", "|T16"]: ...
+
+    #
+    @property
+    def hasobject(self) -> L[True]: ...
+    @property
+    def isalignedstruct(self) -> L[False]: ...
+    @property
+    def isnative(self) -> L[True]: ...
diff --git a/.venv/lib/python3.12/site-packages/numpy/exceptions.py b/.venv/lib/python3.12/site-packages/numpy/exceptions.py
new file mode 100644
index 0000000..0e8688a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/exceptions.py
@@ -0,0 +1,247 @@
+"""
+Exceptions and Warnings
+=======================
+
+General exceptions used by NumPy.  Note that some exceptions may be module
+specific, such as linear algebra errors.
+
+.. versionadded:: NumPy 1.25
+
+    The exceptions module is new in NumPy 1.25.  Older exceptions remain
+    available through the main NumPy namespace for compatibility.
+
+.. currentmodule:: numpy.exceptions
+
+Warnings
+--------
+.. autosummary::
+   :toctree: generated/
+
+   ComplexWarning             Given when converting complex to real.
+   VisibleDeprecationWarning  Same as a DeprecationWarning, but more visible.
+   RankWarning                Issued when the design matrix is rank deficient.
+
+Exceptions
+----------
+.. autosummary::
+   :toctree: generated/
+
+    AxisError          Given when an axis was invalid.
+    DTypePromotionError   Given when no common dtype could be found.
+    TooHardError       Error specific to `numpy.shares_memory`.
+
+"""
+
+
+__all__ = [
+    "ComplexWarning", "VisibleDeprecationWarning", "ModuleDeprecationWarning",
+    "TooHardError", "AxisError", "DTypePromotionError"]
+
+
+# Disallow reloading this module so as to preserve the identities of the
+# classes defined here.
+if '_is_loaded' in globals():
+    raise RuntimeError('Reloading numpy._globals is not allowed')
+_is_loaded = True
+
+
+class ComplexWarning(RuntimeWarning):
+    """
+    The warning raised when casting a complex dtype to a real dtype.
+
+    As implemented, casting a complex number to a real discards its imaginary
+    part, but this behavior may not be what the user actually wants.
+
+    """
+    pass
+
+
+class ModuleDeprecationWarning(DeprecationWarning):
+    """Module deprecation warning.
+
+    .. warning::
+
+        This warning should not be used, since nose testing is not relevant
+        anymore.
+
+    The nose tester turns ordinary Deprecation warnings into test failures.
+    That makes it hard to deprecate whole modules, because they get
+    imported by default. So this is a special Deprecation warning that the
+    nose tester will let pass without making tests fail.
+
+    """
+    pass
+
+
+class VisibleDeprecationWarning(UserWarning):
+    """Visible deprecation warning.
+
+    By default, python will not show deprecation warnings, so this class
+    can be used when a very visible warning is helpful, for example because
+    the usage is most likely a user bug.
+
+    """
+    pass
+
+
+class RankWarning(RuntimeWarning):
+    """Matrix rank warning.
+
+    Issued by polynomial functions when the design matrix is rank deficient.
+
+    """
+    pass
+
+
+# Exception used in shares_memory()
+class TooHardError(RuntimeError):
+    """``max_work`` was exceeded.
+
+    This is raised whenever the maximum number of candidate solutions
+    to consider specified by the ``max_work`` parameter is exceeded.
+    Assigning a finite number to ``max_work`` may have caused the operation
+    to fail.
+
+    """
+    pass
+
+
+class AxisError(ValueError, IndexError):
+    """Axis supplied was invalid.
+
+    This is raised whenever an ``axis`` parameter is specified that is larger
+    than the number of array dimensions.
+    For compatibility with code written against older numpy versions, which
+    raised a mixture of :exc:`ValueError` and :exc:`IndexError` for this
+    situation, this exception subclasses both to ensure that
+    ``except ValueError`` and ``except IndexError`` statements continue
+    to catch ``AxisError``.
+
+    Parameters
+    ----------
+    axis : int or str
+        The out of bounds axis or a custom exception message.
+        If an axis is provided, then `ndim` should be specified as well.
+    ndim : int, optional
+        The number of array dimensions.
+    msg_prefix : str, optional
+        A prefix for the exception message.
+
+    Attributes
+    ----------
+    axis : int, optional
+        The out of bounds axis or ``None`` if a custom exception
+        message was provided. This should be the axis as passed by
+        the user, before any normalization to resolve negative indices.
+
+        .. versionadded:: 1.22
+    ndim : int, optional
+        The number of array dimensions or ``None`` if a custom exception
+        message was provided.
+
+        .. versionadded:: 1.22
+
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> array_1d = np.arange(10)
+    >>> np.cumsum(array_1d, axis=1)
+    Traceback (most recent call last):
+      ...
+    numpy.exceptions.AxisError: axis 1 is out of bounds for array of dimension 1
+
+    Negative axes are preserved:
+
+    >>> np.cumsum(array_1d, axis=-2)
+    Traceback (most recent call last):
+      ...
+    numpy.exceptions.AxisError: axis -2 is out of bounds for array of dimension 1
+
+    The class constructor generally takes the axis and arrays'
+    dimensionality as arguments:
+
+    >>> print(np.exceptions.AxisError(2, 1, msg_prefix='error'))
+    error: axis 2 is out of bounds for array of dimension 1
+
+    Alternatively, a custom exception message can be passed:
+
+    >>> print(np.exceptions.AxisError('Custom error message'))
+    Custom error message
+
+    """
+
+    __slots__ = ("_msg", "axis", "ndim")
+
+    def __init__(self, axis, ndim=None, msg_prefix=None):
+        if ndim is msg_prefix is None:
+            # single-argument form: directly set the error message
+            self._msg = axis
+            self.axis = None
+            self.ndim = None
+        else:
+            self._msg = msg_prefix
+            self.axis = axis
+            self.ndim = ndim
+
+    def __str__(self):
+        axis = self.axis
+        ndim = self.ndim
+
+        if axis is ndim is None:
+            return self._msg
+        else:
+            msg = f"axis {axis} is out of bounds for array of dimension {ndim}"
+            if self._msg is not None:
+                msg = f"{self._msg}: {msg}"
+            return msg
+
+
+class DTypePromotionError(TypeError):
+    """Multiple DTypes could not be converted to a common one.
+
+    This exception derives from ``TypeError`` and is raised whenever dtypes
+    cannot be converted to a single common one.  This can be because they
+    are of a different category/class or incompatible instances of the same
+    one (see Examples).
+
+    Notes
+    -----
+    Many functions will use promotion to find the correct result and
+    implementation.  For these functions the error will typically be chained
+    with a more specific error indicating that no implementation was found
+    for the input dtypes.
+
+    Typically promotion should be considered "invalid" between the dtypes of
+    two arrays when `arr1 == arr2` can safely return all ``False`` because the
+    dtypes are fundamentally different.
+
+    Examples
+    --------
+    Datetimes and complex numbers are incompatible classes and cannot be
+    promoted:
+
+    >>> import numpy as np
+    >>> np.result_type(np.dtype("M8[s]"), np.complex128)  # doctest: +IGNORE_EXCEPTION_DETAIL
+    Traceback (most recent call last):
+     ...
+    DTypePromotionError: The DType  could not
+    be promoted by . This means that no common
+    DType exists for the given inputs. For example they cannot be stored in a
+    single array unless the dtype is `object`. The full list of DTypes is:
+    (, )
+
+    For example for structured dtypes, the structure can mismatch and the
+    same ``DTypePromotionError`` is given when two structured dtypes with
+    a mismatch in their number of fields is given:
+
+    >>> dtype1 = np.dtype([("field1", np.float64), ("field2", np.int64)])
+    >>> dtype2 = np.dtype([("field1", np.float64)])
+    >>> np.promote_types(dtype1, dtype2)  # doctest: +IGNORE_EXCEPTION_DETAIL
+    Traceback (most recent call last):
+     ...
+    DTypePromotionError: field names `('field1', 'field2')` and `('field1',)`
+    mismatch.
+
+    """  # noqa: E501
+    pass
diff --git a/.venv/lib/python3.12/site-packages/numpy/exceptions.pyi b/.venv/lib/python3.12/site-packages/numpy/exceptions.pyi
new file mode 100644
index 0000000..9ed5092
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/exceptions.pyi
@@ -0,0 +1,25 @@
+from typing import overload
+
+__all__ = [
+    "ComplexWarning",
+    "VisibleDeprecationWarning",
+    "ModuleDeprecationWarning",
+    "TooHardError",
+    "AxisError",
+    "DTypePromotionError",
+]
+
+class ComplexWarning(RuntimeWarning): ...
+class ModuleDeprecationWarning(DeprecationWarning): ...
+class VisibleDeprecationWarning(UserWarning): ...
+class RankWarning(RuntimeWarning): ...
+class TooHardError(RuntimeError): ...
+class DTypePromotionError(TypeError): ...
+
+class AxisError(ValueError, IndexError):
+    axis: int | None
+    ndim: int | None
+    @overload
+    def __init__(self, axis: str, ndim: None = ..., msg_prefix: None = ...) -> None: ...
+    @overload
+    def __init__(self, axis: int, ndim: int, msg_prefix: str | None = ...) -> None: ...
diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/__init__.py b/.venv/lib/python3.12/site-packages/numpy/f2py/__init__.py
new file mode 100644
index 0000000..e34dd99
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/f2py/__init__.py
@@ -0,0 +1,86 @@
+"""Fortran to Python Interface Generator.
+
+Copyright 1999 -- 2011 Pearu Peterson all rights reserved.
+Copyright 2011 -- present NumPy Developers.
+Permission to use, modify, and distribute this software is given under the terms
+of the NumPy License.
+
+NO WARRANTY IS EXPRESSED OR IMPLIED.  USE AT YOUR OWN RISK.
+"""
+__all__ = ['run_main', 'get_include']
+
+import os
+import subprocess
+import sys
+import warnings
+
+from numpy.exceptions import VisibleDeprecationWarning
+
+from . import diagnose, f2py2e
+
+run_main = f2py2e.run_main
+main = f2py2e.main
+
+
+def get_include():
+    """
+    Return the directory that contains the ``fortranobject.c`` and ``.h`` files.
+
+    .. note::
+
+        This function is not needed when building an extension with
+        `numpy.distutils` directly from ``.f`` and/or ``.pyf`` files
+        in one go.
+
+    Python extension modules built with f2py-generated code need to use
+    ``fortranobject.c`` as a source file, and include the ``fortranobject.h``
+    header. This function can be used to obtain the directory containing
+    both of these files.
+
+    Returns
+    -------
+    include_path : str
+        Absolute path to the directory containing ``fortranobject.c`` and
+        ``fortranobject.h``.
+
+    Notes
+    -----
+    .. versionadded:: 1.21.1
+
+    Unless the build system you are using has specific support for f2py,
+    building a Python extension using a ``.pyf`` signature file is a two-step
+    process. For a module ``mymod``:
+
+    * Step 1: run ``python -m numpy.f2py mymod.pyf --quiet``. This
+      generates ``mymodmodule.c`` and (if needed)
+      ``mymod-f2pywrappers.f`` files next to ``mymod.pyf``.
+    * Step 2: build your Python extension module. This requires the
+      following source files:
+
+      * ``mymodmodule.c``
+      * ``mymod-f2pywrappers.f`` (if it was generated in Step 1)
+      * ``fortranobject.c``
+
+    See Also
+    --------
+    numpy.get_include : function that returns the numpy include directory
+
+    """
+    return os.path.join(os.path.dirname(__file__), 'src')
+
+
+def __getattr__(attr):
+
+    # Avoid importing things that aren't needed for building
+    # which might import the main numpy module
+    if attr == "test":
+        from numpy._pytesttester import PytestTester
+        test = PytestTester(__name__)
+        return test
+
+    else:
+        raise AttributeError(f"module {__name__!r} has no attribute {attr!r}")
+
+
+def __dir__():
+    return list(globals().keys() | {"test"})
diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/__init__.pyi b/.venv/lib/python3.12/site-packages/numpy/f2py/__init__.pyi
new file mode 100644
index 0000000..d12f47e
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/f2py/__init__.pyi
@@ -0,0 +1,6 @@
+from .f2py2e import main as main
+from .f2py2e import run_main
+
+__all__ = ["get_include", "run_main"]
+
+def get_include() -> str: ...
diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/__main__.py b/.venv/lib/python3.12/site-packages/numpy/f2py/__main__.py
new file mode 100644
index 0000000..936a753
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/f2py/__main__.py
@@ -0,0 +1,5 @@
+# See:
+# https://web.archive.org/web/20140822061353/http://cens.ioc.ee/projects/f2py2e
+from numpy.f2py.f2py2e import main
+
+main()
diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/f2py/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 0000000..ab6f51d
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/f2py/__pycache__/__init__.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/__pycache__/__main__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/f2py/__pycache__/__main__.cpython-312.pyc
new file mode 100644
index 0000000..2026457
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/f2py/__pycache__/__main__.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/__pycache__/__version__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/f2py/__pycache__/__version__.cpython-312.pyc
new file mode 100644
index 0000000..048faed
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/f2py/__pycache__/__version__.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/__pycache__/_isocbind.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/f2py/__pycache__/_isocbind.cpython-312.pyc
new file mode 100644
index 0000000..9f07cc4
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/f2py/__pycache__/_isocbind.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/__pycache__/_src_pyf.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/f2py/__pycache__/_src_pyf.cpython-312.pyc
new file mode 100644
index 0000000..9a8f2e8
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/f2py/__pycache__/_src_pyf.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/__pycache__/auxfuncs.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/f2py/__pycache__/auxfuncs.cpython-312.pyc
new file mode 100644
index 0000000..9adc23b
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/f2py/__pycache__/auxfuncs.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/__pycache__/capi_maps.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/f2py/__pycache__/capi_maps.cpython-312.pyc
new file mode 100644
index 0000000..b6cd36e
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/f2py/__pycache__/capi_maps.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/__pycache__/cb_rules.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/f2py/__pycache__/cb_rules.cpython-312.pyc
new file mode 100644
index 0000000..b593078
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/f2py/__pycache__/cb_rules.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/__pycache__/cfuncs.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/f2py/__pycache__/cfuncs.cpython-312.pyc
new file mode 100644
index 0000000..2dc1138
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/f2py/__pycache__/cfuncs.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/__pycache__/common_rules.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/f2py/__pycache__/common_rules.cpython-312.pyc
new file mode 100644
index 0000000..8807e6a
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/f2py/__pycache__/common_rules.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/__pycache__/crackfortran.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/f2py/__pycache__/crackfortran.cpython-312.pyc
new file mode 100644
index 0000000..7c0fb64
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/f2py/__pycache__/crackfortran.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/__pycache__/diagnose.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/f2py/__pycache__/diagnose.cpython-312.pyc
new file mode 100644
index 0000000..e7e280b
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/f2py/__pycache__/diagnose.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/__pycache__/f2py2e.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/f2py/__pycache__/f2py2e.cpython-312.pyc
new file mode 100644
index 0000000..6e1525a
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/f2py/__pycache__/f2py2e.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/__pycache__/f90mod_rules.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/f2py/__pycache__/f90mod_rules.cpython-312.pyc
new file mode 100644
index 0000000..932155e
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/f2py/__pycache__/f90mod_rules.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/__pycache__/func2subr.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/f2py/__pycache__/func2subr.cpython-312.pyc
new file mode 100644
index 0000000..45d0037
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/f2py/__pycache__/func2subr.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/__pycache__/rules.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/f2py/__pycache__/rules.cpython-312.pyc
new file mode 100644
index 0000000..8af7f08
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/f2py/__pycache__/rules.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/__pycache__/symbolic.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/f2py/__pycache__/symbolic.cpython-312.pyc
new file mode 100644
index 0000000..eaae474
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/f2py/__pycache__/symbolic.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/__pycache__/use_rules.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/f2py/__pycache__/use_rules.cpython-312.pyc
new file mode 100644
index 0000000..727e167
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/f2py/__pycache__/use_rules.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/__version__.py b/.venv/lib/python3.12/site-packages/numpy/f2py/__version__.py
new file mode 100644
index 0000000..8d12d95
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/f2py/__version__.py
@@ -0,0 +1 @@
+from numpy.version import version  # noqa: F401
diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/__version__.pyi b/.venv/lib/python3.12/site-packages/numpy/f2py/__version__.pyi
new file mode 100644
index 0000000..85b4225
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/f2py/__version__.pyi
@@ -0,0 +1 @@
+from numpy.version import version as version
diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/_backends/__init__.py b/.venv/lib/python3.12/site-packages/numpy/f2py/_backends/__init__.py
new file mode 100644
index 0000000..e91393c
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/f2py/_backends/__init__.py
@@ -0,0 +1,9 @@
+def f2py_build_generator(name):
+    if name == "meson":
+        from ._meson import MesonBackend
+        return MesonBackend
+    elif name == "distutils":
+        from ._distutils import DistutilsBackend
+        return DistutilsBackend
+    else:
+        raise ValueError(f"Unknown backend: {name}")
diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/_backends/__init__.pyi b/.venv/lib/python3.12/site-packages/numpy/f2py/_backends/__init__.pyi
new file mode 100644
index 0000000..43625c6
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/f2py/_backends/__init__.pyi
@@ -0,0 +1,5 @@
+from typing import Literal as L
+
+from ._backend import Backend
+
+def f2py_build_generator(name: L["distutils", "meson"]) -> Backend: ...
diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/_backends/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/f2py/_backends/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 0000000..2133a79
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/f2py/_backends/__pycache__/__init__.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/_backends/__pycache__/_backend.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/f2py/_backends/__pycache__/_backend.cpython-312.pyc
new file mode 100644
index 0000000..bd84bd4
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/f2py/_backends/__pycache__/_backend.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/_backends/__pycache__/_distutils.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/f2py/_backends/__pycache__/_distutils.cpython-312.pyc
new file mode 100644
index 0000000..9d9594c
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/f2py/_backends/__pycache__/_distutils.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/_backends/__pycache__/_meson.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/f2py/_backends/__pycache__/_meson.cpython-312.pyc
new file mode 100644
index 0000000..e1a5d84
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/f2py/_backends/__pycache__/_meson.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/_backends/_backend.py b/.venv/lib/python3.12/site-packages/numpy/f2py/_backends/_backend.py
new file mode 100644
index 0000000..5dda400
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/f2py/_backends/_backend.py
@@ -0,0 +1,44 @@
+from abc import ABC, abstractmethod
+
+
+class Backend(ABC):
+    def __init__(
+        self,
+        modulename,
+        sources,
+        extra_objects,
+        build_dir,
+        include_dirs,
+        library_dirs,
+        libraries,
+        define_macros,
+        undef_macros,
+        f2py_flags,
+        sysinfo_flags,
+        fc_flags,
+        flib_flags,
+        setup_flags,
+        remove_build_dir,
+        extra_dat,
+    ):
+        self.modulename = modulename
+        self.sources = sources
+        self.extra_objects = extra_objects
+        self.build_dir = build_dir
+        self.include_dirs = include_dirs
+        self.library_dirs = library_dirs
+        self.libraries = libraries
+        self.define_macros = define_macros
+        self.undef_macros = undef_macros
+        self.f2py_flags = f2py_flags
+        self.sysinfo_flags = sysinfo_flags
+        self.fc_flags = fc_flags
+        self.flib_flags = flib_flags
+        self.setup_flags = setup_flags
+        self.remove_build_dir = remove_build_dir
+        self.extra_dat = extra_dat
+
+    @abstractmethod
+    def compile(self) -> None:
+        """Compile the wrapper."""
+        pass
diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/_backends/_backend.pyi b/.venv/lib/python3.12/site-packages/numpy/f2py/_backends/_backend.pyi
new file mode 100644
index 0000000..ed24519
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/f2py/_backends/_backend.pyi
@@ -0,0 +1,46 @@
+import abc
+from pathlib import Path
+from typing import Any, Final
+
+class Backend(abc.ABC):
+    modulename: Final[str]
+    sources: Final[list[str | Path]]
+    extra_objects: Final[list[str]]
+    build_dir: Final[str | Path]
+    include_dirs: Final[list[str | Path]]
+    library_dirs: Final[list[str | Path]]
+    libraries: Final[list[str]]
+    define_macros: Final[list[tuple[str, str | None]]]
+    undef_macros: Final[list[str]]
+    f2py_flags: Final[list[str]]
+    sysinfo_flags: Final[list[str]]
+    fc_flags: Final[list[str]]
+    flib_flags: Final[list[str]]
+    setup_flags: Final[list[str]]
+    remove_build_dir: Final[bool]
+    extra_dat: Final[dict[str, Any]]
+
+    def __init__(
+        self,
+        /,
+        modulename: str,
+        sources: list[str | Path],
+        extra_objects: list[str],
+        build_dir: str | Path,
+        include_dirs: list[str | Path],
+        library_dirs: list[str | Path],
+        libraries: list[str],
+        define_macros: list[tuple[str, str | None]],
+        undef_macros: list[str],
+        f2py_flags: list[str],
+        sysinfo_flags: list[str],
+        fc_flags: list[str],
+        flib_flags: list[str],
+        setup_flags: list[str],
+        remove_build_dir: bool,
+        extra_dat: dict[str, Any],
+    ) -> None: ...
+
+    #
+    @abc.abstractmethod
+    def compile(self) -> None: ...
diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/_backends/_distutils.py b/.venv/lib/python3.12/site-packages/numpy/f2py/_backends/_distutils.py
new file mode 100644
index 0000000..5c8f109
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/f2py/_backends/_distutils.py
@@ -0,0 +1,76 @@
+import os
+import shutil
+import sys
+import warnings
+
+from numpy.distutils.core import Extension, setup
+from numpy.distutils.misc_util import dict_append
+from numpy.distutils.system_info import get_info
+from numpy.exceptions import VisibleDeprecationWarning
+
+from ._backend import Backend
+
+
+class DistutilsBackend(Backend):
+    def __init__(sef, *args, **kwargs):
+        warnings.warn(
+            "\ndistutils has been deprecated since NumPy 1.26.x\n"
+            "Use the Meson backend instead, or generate wrappers"
+            " without -c and use a custom build script",
+            VisibleDeprecationWarning,
+            stacklevel=2,
+        )
+        super().__init__(*args, **kwargs)
+
+    def compile(self):
+        num_info = {}
+        if num_info:
+            self.include_dirs.extend(num_info.get("include_dirs", []))
+        ext_args = {
+            "name": self.modulename,
+            "sources": self.sources,
+            "include_dirs": self.include_dirs,
+            "library_dirs": self.library_dirs,
+            "libraries": self.libraries,
+            "define_macros": self.define_macros,
+            "undef_macros": self.undef_macros,
+            "extra_objects": self.extra_objects,
+            "f2py_options": self.f2py_flags,
+        }
+
+        if self.sysinfo_flags:
+            for n in self.sysinfo_flags:
+                i = get_info(n)
+                if not i:
+                    print(
+                        f"No {n!r} resources found"
+                        "in system (try `f2py --help-link`)"
+                    )
+                dict_append(ext_args, **i)
+
+        ext = Extension(**ext_args)
+
+        sys.argv = [sys.argv[0]] + self.setup_flags
+        sys.argv.extend(
+            [
+                "build",
+                "--build-temp",
+                self.build_dir,
+                "--build-base",
+                self.build_dir,
+                "--build-platlib",
+                ".",
+                "--disable-optimization",
+            ]
+        )
+
+        if self.fc_flags:
+            sys.argv.extend(["config_fc"] + self.fc_flags)
+        if self.flib_flags:
+            sys.argv.extend(["build_ext"] + self.flib_flags)
+
+        setup(ext_modules=[ext])
+
+        if self.remove_build_dir and os.path.exists(self.build_dir):
+            print(f"Removing build directory {self.build_dir}")
+            shutil.rmtree(self.build_dir)
diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/_backends/_distutils.pyi b/.venv/lib/python3.12/site-packages/numpy/f2py/_backends/_distutils.pyi
new file mode 100644
index 0000000..56bbf7e
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/f2py/_backends/_distutils.pyi
@@ -0,0 +1,13 @@
+from typing_extensions import deprecated, override
+
+from ._backend import Backend
+
+class DistutilsBackend(Backend):
+    @deprecated(
+        "distutils has been deprecated since NumPy 1.26.x. Use the Meson backend instead, or generate wrappers without -c and "
+        "use a custom build script"
+    )
+    # NOTE: the `sef` typo matches runtime
+    def __init__(sef, *args: object, **kwargs: object) -> None: ...
+    @override
+    def compile(self) -> None: ...
diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/_backends/_meson.py b/.venv/lib/python3.12/site-packages/numpy/f2py/_backends/_meson.py
new file mode 100644
index 0000000..cbd9b0e
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/f2py/_backends/_meson.py
@@ -0,0 +1,231 @@
+import errno
+import os
+import re
+import shutil
+import subprocess
+import sys
+from itertools import chain
+from pathlib import Path
+from string import Template
+
+from ._backend import Backend
+
+
+class MesonTemplate:
+    """Template meson build file generation class."""
+
+    def __init__(
+        self,
+        modulename: str,
+        sources: list[Path],
+        deps: list[str],
+        libraries: list[str],
+        library_dirs: list[Path],
+        include_dirs: list[Path],
+        object_files: list[Path],
+        linker_args: list[str],
+        fortran_args: list[str],
+        build_type: str,
+        python_exe: str,
+    ):
+        self.modulename = modulename
+        self.build_template_path = (
+            Path(__file__).parent.absolute() / "meson.build.template"
+        )
+        self.sources = sources
+        self.deps = deps
+        self.libraries = libraries
+        self.library_dirs = library_dirs
+        if include_dirs is not None:
+            self.include_dirs = include_dirs
+        else:
+            self.include_dirs = []
+        self.substitutions = {}
+        self.objects = object_files
+        # Convert args to '' wrapped variant for meson
+        self.fortran_args = [
+            f"'{x}'" if not (x.startswith("'") and x.endswith("'")) else x
+            for x in fortran_args
+        ]
+        self.pipeline = [
+            self.initialize_template,
+            self.sources_substitution,
+            self.deps_substitution,
+            self.include_substitution,
+            self.libraries_substitution,
+            self.fortran_args_substitution,
+        ]
+        self.build_type = build_type
+        self.python_exe = python_exe
+        self.indent = " " * 21
+
+    def meson_build_template(self) -> str:
+        if not self.build_template_path.is_file():
+            raise FileNotFoundError(
+                errno.ENOENT,
+                "Meson build template"
+                f" {self.build_template_path.absolute()}"
+                " does not exist.",
+            )
+        return self.build_template_path.read_text()
+
+    def initialize_template(self) -> None:
+        self.substitutions["modulename"] = self.modulename
+        self.substitutions["buildtype"] = self.build_type
+        self.substitutions["python"] = self.python_exe
+
+    def sources_substitution(self) -> None:
+        self.substitutions["source_list"] = ",\n".join(
+            [f"{self.indent}'''{source}'''," for source in self.sources]
+        )
+
+    def deps_substitution(self) -> None:
+        self.substitutions["dep_list"] = f",\n{self.indent}".join(
+            [f"{self.indent}dependency('{dep}')," for dep in self.deps]
+        )
+
+    def libraries_substitution(self) -> None:
+        self.substitutions["lib_dir_declarations"] = "\n".join(
+            [
+                f"lib_dir_{i} = declare_dependency(link_args : ['''-L{lib_dir}'''])"
+                for i, lib_dir in enumerate(self.library_dirs)
+            ]
+        )
+
+        self.substitutions["lib_declarations"] = "\n".join(
+            [
+                f"{lib.replace('.', '_')} = declare_dependency(link_args : ['-l{lib}'])"
+                for lib in self.libraries
+            ]
+        )
+
+        self.substitutions["lib_list"] = f"\n{self.indent}".join(
+            [f"{self.indent}{lib.replace('.', '_')}," for lib in self.libraries]
+        )
+        self.substitutions["lib_dir_list"] = f"\n{self.indent}".join(
+            [f"{self.indent}lib_dir_{i}," for i in range(len(self.library_dirs))]
+        )
+
+    def include_substitution(self) -> None:
+        self.substitutions["inc_list"] = f",\n{self.indent}".join(
+            [f"{self.indent}'''{inc}'''," for inc in self.include_dirs]
+        )
+
+    def fortran_args_substitution(self) -> None:
+        if self.fortran_args:
+            self.substitutions["fortran_args"] = (
+                f"{self.indent}fortran_args: [{', '.join(list(self.fortran_args))}],"
+            )
+        else:
+            self.substitutions["fortran_args"] = ""
+
+    def generate_meson_build(self):
+        for node in self.pipeline:
+            node()
+        template = Template(self.meson_build_template())
+        meson_build = template.substitute(self.substitutions)
+        meson_build = meson_build.replace(",,", ",")
+        return meson_build
+
+
+class MesonBackend(Backend):
+    def __init__(self, *args, **kwargs):
+        super().__init__(*args, **kwargs)
+        self.dependencies = self.extra_dat.get("dependencies", [])
+        self.meson_build_dir = "bbdir"
+        self.build_type = (
+            "debug" if any("debug" in flag for flag in self.fc_flags) else "release"
+        )
+        self.fc_flags = _get_flags(self.fc_flags)
+
+    def _move_exec_to_root(self, build_dir: Path):
+        walk_dir = Path(build_dir) / self.meson_build_dir
+        path_objects = chain(
+            walk_dir.glob(f"{self.modulename}*.so"),
+            walk_dir.glob(f"{self.modulename}*.pyd"),
+            walk_dir.glob(f"{self.modulename}*.dll"),
+        )
+        # Same behavior as distutils
+        # https://github.com/numpy/numpy/issues/24874#issuecomment-1835632293
+        for path_object in path_objects:
+            dest_path = Path.cwd() / path_object.name
+            if dest_path.exists():
+                dest_path.unlink()
+            shutil.copy2(path_object, dest_path)
+            os.remove(path_object)
+
+    def write_meson_build(self, build_dir: Path) -> None:
+        """Writes the meson build file at specified location"""
+        meson_template = MesonTemplate(
+            self.modulename,
+            self.sources,
+            self.dependencies,
+            self.libraries,
+            self.library_dirs,
+            self.include_dirs,
+            self.extra_objects,
+            self.flib_flags,
+            self.fc_flags,
+            self.build_type,
+            sys.executable,
+        )
+        src = meson_template.generate_meson_build()
+        Path(build_dir).mkdir(parents=True, exist_ok=True)
+        meson_build_file = Path(build_dir) / "meson.build"
+        meson_build_file.write_text(src)
+        return meson_build_file
+
+    def _run_subprocess_command(self, command, cwd):
+        subprocess.run(command, cwd=cwd, check=True)
+
+    def run_meson(self, build_dir: Path):
+        setup_command = ["meson", "setup", self.meson_build_dir]
+        self._run_subprocess_command(setup_command, build_dir)
+        compile_command = ["meson", "compile", "-C", self.meson_build_dir]
+        self._run_subprocess_command(compile_command, build_dir)
+
+    def compile(self) -> None:
+        self.sources = _prepare_sources(self.modulename, self.sources, self.build_dir)
+        self.write_meson_build(self.build_dir)
+        self.run_meson(self.build_dir)
+        self._move_exec_to_root(self.build_dir)
+
+
+def _prepare_sources(mname, sources, bdir):
+    extended_sources = sources.copy()
+    Path(bdir).mkdir(parents=True, exist_ok=True)
+    # Copy sources
+    for source in sources:
+        if Path(source).exists() and Path(source).is_file():
+            shutil.copy(source, bdir)
+    generated_sources = [
+        Path(f"{mname}module.c"),
+        Path(f"{mname}-f2pywrappers2.f90"),
+        Path(f"{mname}-f2pywrappers.f"),
+    ]
+    bdir = Path(bdir)
+    for generated_source in generated_sources:
+        if generated_source.exists():
+            shutil.copy(generated_source, bdir / generated_source.name)
+            extended_sources.append(generated_source.name)
+            generated_source.unlink()
+    extended_sources = [
+        Path(source).name
+        for source in extended_sources
+        if not Path(source).suffix == ".pyf"
+    ]
+    return extended_sources
+
+
+def _get_flags(fc_flags):
+    flag_values = []
+    flag_pattern = re.compile(r"--f(77|90)flags=(.*)")
+    for flag in fc_flags:
+        match_result = flag_pattern.match(flag)
+        if match_result:
+            values = match_result.group(2).strip().split()
+            values = [val.strip("'\"") for val in values]
+            flag_values.extend(values)
+    # Hacky way to preserve order of flags
+    unique_flags = list(dict.fromkeys(flag_values))
+    return unique_flags
diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/_backends/_meson.pyi b/.venv/lib/python3.12/site-packages/numpy/f2py/_backends/_meson.pyi
new file mode 100644
index 0000000..b9f9595
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/f2py/_backends/_meson.pyi
@@ -0,0 +1,63 @@
+from collections.abc import Callable
+from pathlib import Path
+from typing import Final
+from typing import Literal as L
+
+from typing_extensions import override
+
+from ._backend import Backend
+
+class MesonTemplate:
+    modulename: Final[str]
+    build_template_path: Final[Path]
+    sources: Final[list[str | Path]]
+    deps: Final[list[str]]
+    libraries: Final[list[str]]
+    library_dirs: Final[list[str | Path]]
+    include_dirs: Final[list[str | Path]]
+    substitutions: Final[dict[str, str]]
+    objects: Final[list[str | Path]]
+    fortran_args: Final[list[str]]
+    pipeline: Final[list[Callable[[], None]]]
+    build_type: Final[str]
+    python_exe: Final[str]
+    indent: Final[str]
+
+    def __init__(
+        self,
+        /,
+        modulename: str,
+        sources: list[Path],
+        deps: list[str],
+        libraries: list[str],
+        library_dirs: list[str | Path],
+        include_dirs: list[str | Path],
+        object_files: list[str | Path],
+        linker_args: list[str],
+        fortran_args: list[str],
+        build_type: str,
+        python_exe: str,
+    ) -> None: ...
+
+    #
+    def initialize_template(self) -> None: ...
+    def sources_substitution(self) -> None: ...
+    def deps_substitution(self) -> None: ...
+    def libraries_substitution(self) -> None: ...
+    def include_substitution(self) -> None: ...
+    def fortran_args_substitution(self) -> None: ...
+
+    #
+    def meson_build_template(self) -> str: ...
+    def generate_meson_build(self) -> str: ...
+
+class MesonBackend(Backend):
+    dependencies: list[str]
+    meson_build_dir: L["bdir"]
+    build_type: L["debug", "release"]
+
+    def __init__(self, /, *args: object, **kwargs: object) -> None: ...
+    def write_meson_build(self, /, build_dir: Path) -> None: ...
+    def run_meson(self, /, build_dir: Path) -> None: ...
+    @override
+    def compile(self) -> None: ...
diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/_backends/meson.build.template b/.venv/lib/python3.12/site-packages/numpy/f2py/_backends/meson.build.template
new file mode 100644
index 0000000..fdcc1b1
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/f2py/_backends/meson.build.template
@@ -0,0 +1,55 @@
+project('${modulename}',
+        ['c', 'fortran'],
+        version : '0.1',
+        meson_version: '>= 1.1.0',
+        default_options : [
+                            'warning_level=1',
+                            'buildtype=${buildtype}'
+                          ])
+fc = meson.get_compiler('fortran')
+
+py = import('python').find_installation('''${python}''', pure: false)
+py_dep = py.dependency()
+
+incdir_numpy = run_command(py,
+  ['-c', 'import os; os.chdir(".."); import numpy; print(numpy.get_include())'],
+  check : true
+).stdout().strip()
+
+incdir_f2py = run_command(py,
+    ['-c', 'import os; os.chdir(".."); import numpy.f2py; print(numpy.f2py.get_include())'],
+    check : true
+).stdout().strip()
+
+inc_np = include_directories(incdir_numpy)
+np_dep = declare_dependency(include_directories: inc_np)
+
+incdir_f2py = incdir_numpy / '..' / '..' / 'f2py' / 'src'
+inc_f2py = include_directories(incdir_f2py)
+fortranobject_c = incdir_f2py / 'fortranobject.c'
+
+inc_np = include_directories(incdir_numpy, incdir_f2py)
+# gh-25000
+quadmath_dep = fc.find_library('quadmath', required: false)
+
+${lib_declarations}
+${lib_dir_declarations}
+
+py.extension_module('${modulename}',
+                     [
+${source_list},
+                     fortranobject_c
+                     ],
+                     include_directories: [
+                     inc_np,
+${inc_list}
+                     ],
+                     dependencies : [
+                     py_dep,
+                     quadmath_dep,
+${dep_list}
+${lib_list}
+${lib_dir_list}
+                     ],
+${fortran_args}
+                     install : true)
diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/_isocbind.py b/.venv/lib/python3.12/site-packages/numpy/f2py/_isocbind.py
new file mode 100644
index 0000000..3043c5d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/f2py/_isocbind.py
@@ -0,0 +1,62 @@
+"""
+ISO_C_BINDING maps for f2py2e.
+Only required declarations/macros/functions will be used.
+
+Copyright 1999 -- 2011 Pearu Peterson all rights reserved.
+Copyright 2011 -- present NumPy Developers.
+Permission to use, modify, and distribute this software is given under the
+terms of the NumPy License.
+
+NO WARRANTY IS EXPRESSED OR IMPLIED.  USE AT YOUR OWN RISK.
+"""
+# These map to keys in c2py_map, via forced casting for now, see gh-25229
+iso_c_binding_map = {
+    'integer': {
+        'c_int': 'int',
+        'c_short': 'short',  # 'short' <=> 'int' for now
+        'c_long': 'long',  # 'long' <=> 'int' for now
+        'c_long_long': 'long_long',
+        'c_signed_char': 'signed_char',
+        'c_size_t': 'unsigned',  # size_t <=> 'unsigned' for now
+        'c_int8_t': 'signed_char',  # int8_t <=> 'signed_char' for now
+        'c_int16_t': 'short',  # int16_t <=> 'short' for now
+        'c_int32_t': 'int',  # int32_t <=> 'int' for now
+        'c_int64_t': 'long_long',
+        'c_int_least8_t': 'signed_char',  # int_least8_t <=> 'signed_char' for now
+        'c_int_least16_t': 'short',  # int_least16_t <=> 'short' for now
+        'c_int_least32_t': 'int',  # int_least32_t <=> 'int' for now
+        'c_int_least64_t': 'long_long',
+        'c_int_fast8_t': 'signed_char',  # int_fast8_t <=> 'signed_char' for now
+        'c_int_fast16_t': 'short',  # int_fast16_t <=> 'short' for now
+        'c_int_fast32_t': 'int',  # int_fast32_t <=> 'int' for now
+        'c_int_fast64_t': 'long_long',
+        'c_intmax_t': 'long_long',  # intmax_t <=> 'long_long' for now
+        'c_intptr_t': 'long',  # intptr_t <=> 'long' for now
+        'c_ptrdiff_t': 'long',  # ptrdiff_t <=> 'long' for now
+    },
+    'real': {
+        'c_float': 'float',
+        'c_double': 'double',
+        'c_long_double': 'long_double'
+    },
+    'complex': {
+        'c_float_complex': 'complex_float',
+        'c_double_complex': 'complex_double',
+        'c_long_double_complex': 'complex_long_double'
+    },
+    'logical': {
+        'c_bool': 'unsigned_char'  # _Bool <=> 'unsigned_char' for now
+    },
+    'character': {
+        'c_char': 'char'
+    }
+}
+
+# TODO: See gh-25229
+isoc_c2pycode_map = {}
+iso_c2py_map = {}
+
+isoc_kindmap = {}
+for fortran_type, c_type_dict in iso_c_binding_map.items():
+    for c_type in c_type_dict.keys():
+        isoc_kindmap[c_type] = fortran_type
diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/_isocbind.pyi b/.venv/lib/python3.12/site-packages/numpy/f2py/_isocbind.pyi
new file mode 100644
index 0000000..b972f56
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/f2py/_isocbind.pyi
@@ -0,0 +1,13 @@
+from typing import Any, Final
+
+iso_c_binding_map: Final[dict[str, dict[str, str]]] = ...
+
+isoc_c2pycode_map: Final[dict[str, Any]] = {}  # not implemented
+iso_c2py_map: Final[dict[str, Any]] = {}  # not implemented
+
+isoc_kindmap: Final[dict[str, str]] = ...
+
+# namespace pollution
+c_type: str
+c_type_dict: dict[str, str]
+fortran_type: str
diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/_src_pyf.py b/.venv/lib/python3.12/site-packages/numpy/f2py/_src_pyf.py
new file mode 100644
index 0000000..b5c424f
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/f2py/_src_pyf.py
@@ -0,0 +1,247 @@
+import os
+import re
+
+# START OF CODE VENDORED FROM `numpy.distutils.from_template`
+#############################################################
+"""
+process_file(filename)
+
+  takes templated file .xxx.src and produces .xxx file where .xxx
+  is .pyf .f90 or .f using the following template rules:
+
+  '<..>' denotes a template.
+
+  All function and subroutine blocks in a source file with names that
+  contain '<..>' will be replicated according to the rules in '<..>'.
+
+  The number of comma-separated words in '<..>' will determine the number of
+  replicates.
+
+  '<..>' may have two different forms, named and short. For example,
+
+  named:
+    where anywhere inside a block '

' will be replaced with + 'd', 's', 'z', and 'c' for each replicate of the block. + + <_c> is already defined: <_c=s,d,c,z> + <_t> is already defined: <_t=real,double precision,complex,double complex> + + short: + , a short form of the named, useful when no

appears inside + a block. + + In general, '<..>' contains a comma separated list of arbitrary + expressions. If these expression must contain a comma|leftarrow|rightarrow, + then prepend the comma|leftarrow|rightarrow with a backslash. + + If an expression matches '\\' then it will be replaced + by -th expression. + + Note that all '<..>' forms in a block must have the same number of + comma-separated entries. + + Predefined named template rules: + + + + + +""" + +routine_start_re = re.compile(r'(\n|\A)(( (\$|\*))|)\s*(subroutine|function)\b', re.I) +routine_end_re = re.compile(r'\n\s*end\s*(subroutine|function)\b.*(\n|\Z)', re.I) +function_start_re = re.compile(r'\n (\$|\*)\s*function\b', re.I) + +def parse_structure(astr): + """ Return a list of tuples for each function or subroutine each + tuple is the start and end of a subroutine or function to be + expanded. + """ + + spanlist = [] + ind = 0 + while True: + m = routine_start_re.search(astr, ind) + if m is None: + break + start = m.start() + if function_start_re.match(astr, start, m.end()): + while True: + i = astr.rfind('\n', ind, start) + if i == -1: + break + start = i + if astr[i:i + 7] != '\n $': + break + start += 1 + m = routine_end_re.search(astr, m.end()) + ind = end = (m and m.end() - 1) or len(astr) + spanlist.append((start, end)) + return spanlist + + +template_re = re.compile(r"<\s*(\w[\w\d]*)\s*>") +named_re = re.compile(r"<\s*(\w[\w\d]*)\s*=\s*(.*?)\s*>") +list_re = re.compile(r"<\s*((.*?))\s*>") + +def find_repl_patterns(astr): + reps = named_re.findall(astr) + names = {} + for rep in reps: + name = rep[0].strip() or unique_key(names) + repl = rep[1].replace(r'\,', '@comma@') + thelist = conv(repl) + names[name] = thelist + return names + +def find_and_remove_repl_patterns(astr): + names = find_repl_patterns(astr) + astr = re.subn(named_re, '', astr)[0] + return astr, names + + +item_re = re.compile(r"\A\\(?P\d+)\Z") +def conv(astr): + b = astr.split(',') + l = [x.strip() for x in b] + for i in range(len(l)): + m = item_re.match(l[i]) + if m: + j = int(m.group('index')) + l[i] = l[j] + return ','.join(l) + +def unique_key(adict): + """ Obtain a unique key given a dictionary.""" + allkeys = list(adict.keys()) + done = False + n = 1 + while not done: + newkey = f'__l{n}' + if newkey in allkeys: + n += 1 + else: + done = True + return newkey + + +template_name_re = re.compile(r'\A\s*(\w[\w\d]*)\s*\Z') +def expand_sub(substr, names): + substr = substr.replace(r'\>', '@rightarrow@') + substr = substr.replace(r'\<', '@leftarrow@') + lnames = find_repl_patterns(substr) + substr = named_re.sub(r"<\1>", substr) # get rid of definition templates + + def listrepl(mobj): + thelist = conv(mobj.group(1).replace(r'\,', '@comma@')) + if template_name_re.match(thelist): + return f"<{thelist}>" + name = None + for key in lnames.keys(): # see if list is already in dictionary + if lnames[key] == thelist: + name = key + if name is None: # this list is not in the dictionary yet + name = unique_key(lnames) + lnames[name] = thelist + return f"<{name}>" + + # convert all lists to named templates + # new names are constructed as needed + substr = list_re.sub(listrepl, substr) + + numsubs = None + base_rule = None + rules = {} + for r in template_re.findall(substr): + if r not in rules: + thelist = lnames.get(r, names.get(r, None)) + if thelist is None: + raise ValueError(f'No replicates found for <{r}>') + if r not in names and not thelist.startswith('_'): + names[r] = thelist + rule = [i.replace('@comma@', ',') for i in thelist.split(',')] + num = len(rule) + + if numsubs is None: + numsubs = num + rules[r] = rule + base_rule = r + elif num == numsubs: + rules[r] = rule + else: + rules_base_rule = ','.join(rules[base_rule]) + print("Mismatch in number of replacements " + f"(base <{base_rule}={rules_base_rule}>) " + f"for <{r}={thelist}>. Ignoring.") + if not rules: + return substr + + def namerepl(mobj): + name = mobj.group(1) + return rules.get(name, (k + 1) * [name])[k] + + newstr = '' + for k in range(numsubs): + newstr += template_re.sub(namerepl, substr) + '\n\n' + + newstr = newstr.replace('@rightarrow@', '>') + newstr = newstr.replace('@leftarrow@', '<') + return newstr + +def process_str(allstr): + newstr = allstr + writestr = '' + + struct = parse_structure(newstr) + + oldend = 0 + names = {} + names.update(_special_names) + for sub in struct: + cleanedstr, defs = find_and_remove_repl_patterns(newstr[oldend:sub[0]]) + writestr += cleanedstr + names.update(defs) + writestr += expand_sub(newstr[sub[0]:sub[1]], names) + oldend = sub[1] + writestr += newstr[oldend:] + + return writestr + + +include_src_re = re.compile(r"(\n|\A)\s*include\s*['\"](?P[\w\d./\\]+\.src)['\"]", re.I) + +def resolve_includes(source): + d = os.path.dirname(source) + with open(source) as fid: + lines = [] + for line in fid: + m = include_src_re.match(line) + if m: + fn = m.group('name') + if not os.path.isabs(fn): + fn = os.path.join(d, fn) + if os.path.isfile(fn): + lines.extend(resolve_includes(fn)) + else: + lines.append(line) + else: + lines.append(line) + return lines + +def process_file(source): + lines = resolve_includes(source) + return process_str(''.join(lines)) + + +_special_names = find_repl_patterns(''' +<_c=s,d,c,z> +<_t=real,double precision,complex,double complex> + + + + + +''') + +# END OF CODE VENDORED FROM `numpy.distutils.from_template` +########################################################### diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/_src_pyf.pyi b/.venv/lib/python3.12/site-packages/numpy/f2py/_src_pyf.pyi new file mode 100644 index 0000000..f5aecbf --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/_src_pyf.pyi @@ -0,0 +1,29 @@ +import re +from collections.abc import Mapping +from typing import Final + +from _typeshed import StrOrBytesPath + +routine_start_re: Final[re.Pattern[str]] = ... +routine_end_re: Final[re.Pattern[str]] = ... +function_start_re: Final[re.Pattern[str]] = ... +template_re: Final[re.Pattern[str]] = ... +named_re: Final[re.Pattern[str]] = ... +list_re: Final[re.Pattern[str]] = ... +item_re: Final[re.Pattern[str]] = ... +template_name_re: Final[re.Pattern[str]] = ... +include_src_re: Final[re.Pattern[str]] = ... + +def parse_structure(astr: str) -> list[tuple[int, int]]: ... +def find_repl_patterns(astr: str) -> dict[str, str]: ... +def find_and_remove_repl_patterns(astr: str) -> tuple[str, dict[str, str]]: ... +def conv(astr: str) -> str: ... + +# +def unique_key(adict: Mapping[str, object]) -> str: ... +def expand_sub(substr: str, names: dict[str, str]) -> str: ... +def process_str(allstr: str) -> str: ... + +# +def resolve_includes(source: StrOrBytesPath) -> list[str]: ... +def process_file(source: StrOrBytesPath) -> str: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/auxfuncs.py b/.venv/lib/python3.12/site-packages/numpy/f2py/auxfuncs.py new file mode 100644 index 0000000..a5af31d --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/auxfuncs.py @@ -0,0 +1,1004 @@ +""" +Auxiliary functions for f2py2e. + +Copyright 1999 -- 2011 Pearu Peterson all rights reserved. +Copyright 2011 -- present NumPy Developers. +Permission to use, modify, and distribute this software is given under the +terms of the NumPy (BSD style) LICENSE. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +""" +import pprint +import re +import sys +import types +from functools import reduce + +from . import __version__, cfuncs +from .cfuncs import errmess + +__all__ = [ + 'applyrules', 'debugcapi', 'dictappend', 'errmess', 'gentitle', + 'getargs2', 'getcallprotoargument', 'getcallstatement', + 'getfortranname', 'getpymethoddef', 'getrestdoc', 'getusercode', + 'getusercode1', 'getdimension', 'hasbody', 'hascallstatement', 'hascommon', + 'hasexternals', 'hasinitvalue', 'hasnote', 'hasresultnote', + 'isallocatable', 'isarray', 'isarrayofstrings', + 'ischaracter', 'ischaracterarray', 'ischaracter_or_characterarray', + 'iscomplex', 'iscstyledirective', + 'iscomplexarray', 'iscomplexfunction', 'iscomplexfunction_warn', + 'isdouble', 'isdummyroutine', 'isexternal', 'isfunction', + 'isfunction_wrap', 'isint1', 'isint1array', 'isinteger', 'isintent_aux', + 'isintent_c', 'isintent_callback', 'isintent_copy', 'isintent_dict', + 'isintent_hide', 'isintent_in', 'isintent_inout', 'isintent_inplace', + 'isintent_nothide', 'isintent_out', 'isintent_overwrite', 'islogical', + 'islogicalfunction', 'islong_complex', 'islong_double', + 'islong_doublefunction', 'islong_long', 'islong_longfunction', + 'ismodule', 'ismoduleroutine', 'isoptional', 'isprivate', 'isvariable', + 'isrequired', 'isroutine', 'isscalar', 'issigned_long_longarray', + 'isstring', 'isstringarray', 'isstring_or_stringarray', 'isstringfunction', + 'issubroutine', 'get_f2py_modulename', 'issubroutine_wrap', 'isthreadsafe', + 'isunsigned', 'isunsigned_char', 'isunsigned_chararray', + 'isunsigned_long_long', 'isunsigned_long_longarray', 'isunsigned_short', + 'isunsigned_shortarray', 'l_and', 'l_not', 'l_or', 'outmess', 'replace', + 'show', 'stripcomma', 'throw_error', 'isattr_value', 'getuseblocks', + 'process_f2cmap_dict', 'containscommon', 'containsderivedtypes' +] + + +f2py_version = __version__.version + + +show = pprint.pprint + +options = {} +debugoptions = [] +wrapfuncs = 1 + + +def outmess(t): + if options.get('verbose', 1): + sys.stdout.write(t) + + +def debugcapi(var): + return 'capi' in debugoptions + + +def _ischaracter(var): + return 'typespec' in var and var['typespec'] == 'character' and \ + not isexternal(var) + + +def _isstring(var): + return 'typespec' in var and var['typespec'] == 'character' and \ + not isexternal(var) + + +def ischaracter_or_characterarray(var): + return _ischaracter(var) and 'charselector' not in var + + +def ischaracter(var): + return ischaracter_or_characterarray(var) and not isarray(var) + + +def ischaracterarray(var): + return ischaracter_or_characterarray(var) and isarray(var) + + +def isstring_or_stringarray(var): + return _ischaracter(var) and 'charselector' in var + + +def isstring(var): + return isstring_or_stringarray(var) and not isarray(var) + + +def isstringarray(var): + return isstring_or_stringarray(var) and isarray(var) + + +def isarrayofstrings(var): # obsolete? + # leaving out '*' for now so that `character*(*) a(m)` and `character + # a(m,*)` are treated differently. Luckily `character**` is illegal. + return isstringarray(var) and var['dimension'][-1] == '(*)' + + +def isarray(var): + return 'dimension' in var and not isexternal(var) + + +def isscalar(var): + return not (isarray(var) or isstring(var) or isexternal(var)) + + +def iscomplex(var): + return isscalar(var) and \ + var.get('typespec') in ['complex', 'double complex'] + + +def islogical(var): + return isscalar(var) and var.get('typespec') == 'logical' + + +def isinteger(var): + return isscalar(var) and var.get('typespec') == 'integer' + + +def isreal(var): + return isscalar(var) and var.get('typespec') == 'real' + + +def get_kind(var): + try: + return var['kindselector']['*'] + except KeyError: + try: + return var['kindselector']['kind'] + except KeyError: + pass + + +def isint1(var): + return var.get('typespec') == 'integer' \ + and get_kind(var) == '1' and not isarray(var) + + +def islong_long(var): + if not isscalar(var): + return 0 + if var.get('typespec') not in ['integer', 'logical']: + return 0 + return get_kind(var) == '8' + + +def isunsigned_char(var): + if not isscalar(var): + return 0 + if var.get('typespec') != 'integer': + return 0 + return get_kind(var) == '-1' + + +def isunsigned_short(var): + if not isscalar(var): + return 0 + if var.get('typespec') != 'integer': + return 0 + return get_kind(var) == '-2' + + +def isunsigned(var): + if not isscalar(var): + return 0 + if var.get('typespec') != 'integer': + return 0 + return get_kind(var) == '-4' + + +def isunsigned_long_long(var): + if not isscalar(var): + return 0 + if var.get('typespec') != 'integer': + return 0 + return get_kind(var) == '-8' + + +def isdouble(var): + if not isscalar(var): + return 0 + if not var.get('typespec') == 'real': + return 0 + return get_kind(var) == '8' + + +def islong_double(var): + if not isscalar(var): + return 0 + if not var.get('typespec') == 'real': + return 0 + return get_kind(var) == '16' + + +def islong_complex(var): + if not iscomplex(var): + return 0 + return get_kind(var) == '32' + + +def iscomplexarray(var): + return isarray(var) and \ + var.get('typespec') in ['complex', 'double complex'] + + +def isint1array(var): + return isarray(var) and var.get('typespec') == 'integer' \ + and get_kind(var) == '1' + + +def isunsigned_chararray(var): + return isarray(var) and var.get('typespec') in ['integer', 'logical']\ + and get_kind(var) == '-1' + + +def isunsigned_shortarray(var): + return isarray(var) and var.get('typespec') in ['integer', 'logical']\ + and get_kind(var) == '-2' + + +def isunsignedarray(var): + return isarray(var) and var.get('typespec') in ['integer', 'logical']\ + and get_kind(var) == '-4' + + +def isunsigned_long_longarray(var): + return isarray(var) and var.get('typespec') in ['integer', 'logical']\ + and get_kind(var) == '-8' + + +def issigned_chararray(var): + return isarray(var) and var.get('typespec') in ['integer', 'logical']\ + and get_kind(var) == '1' + + +def issigned_shortarray(var): + return isarray(var) and var.get('typespec') in ['integer', 'logical']\ + and get_kind(var) == '2' + + +def issigned_array(var): + return isarray(var) and var.get('typespec') in ['integer', 'logical']\ + and get_kind(var) == '4' + + +def issigned_long_longarray(var): + return isarray(var) and var.get('typespec') in ['integer', 'logical']\ + and get_kind(var) == '8' + + +def isallocatable(var): + return 'attrspec' in var and 'allocatable' in var['attrspec'] + + +def ismutable(var): + return not ('dimension' not in var or isstring(var)) + + +def ismoduleroutine(rout): + return 'modulename' in rout + + +def ismodule(rout): + return 'block' in rout and 'module' == rout['block'] + + +def isfunction(rout): + return 'block' in rout and 'function' == rout['block'] + + +def isfunction_wrap(rout): + if isintent_c(rout): + return 0 + return wrapfuncs and isfunction(rout) and (not isexternal(rout)) + + +def issubroutine(rout): + return 'block' in rout and 'subroutine' == rout['block'] + + +def issubroutine_wrap(rout): + if isintent_c(rout): + return 0 + return issubroutine(rout) and hasassumedshape(rout) + +def isattr_value(var): + return 'value' in var.get('attrspec', []) + + +def hasassumedshape(rout): + if rout.get('hasassumedshape'): + return True + for a in rout['args']: + for d in rout['vars'].get(a, {}).get('dimension', []): + if d == ':': + rout['hasassumedshape'] = True + return True + return False + + +def requiresf90wrapper(rout): + return ismoduleroutine(rout) or hasassumedshape(rout) + + +def isroutine(rout): + return isfunction(rout) or issubroutine(rout) + + +def islogicalfunction(rout): + if not isfunction(rout): + return 0 + if 'result' in rout: + a = rout['result'] + else: + a = rout['name'] + if a in rout['vars']: + return islogical(rout['vars'][a]) + return 0 + + +def islong_longfunction(rout): + if not isfunction(rout): + return 0 + if 'result' in rout: + a = rout['result'] + else: + a = rout['name'] + if a in rout['vars']: + return islong_long(rout['vars'][a]) + return 0 + + +def islong_doublefunction(rout): + if not isfunction(rout): + return 0 + if 'result' in rout: + a = rout['result'] + else: + a = rout['name'] + if a in rout['vars']: + return islong_double(rout['vars'][a]) + return 0 + + +def iscomplexfunction(rout): + if not isfunction(rout): + return 0 + if 'result' in rout: + a = rout['result'] + else: + a = rout['name'] + if a in rout['vars']: + return iscomplex(rout['vars'][a]) + return 0 + + +def iscomplexfunction_warn(rout): + if iscomplexfunction(rout): + outmess("""\ + ************************************************************** + Warning: code with a function returning complex value + may not work correctly with your Fortran compiler. + When using GNU gcc/g77 compilers, codes should work + correctly for callbacks with: + f2py -c -DF2PY_CB_RETURNCOMPLEX + **************************************************************\n""") + return 1 + return 0 + + +def isstringfunction(rout): + if not isfunction(rout): + return 0 + if 'result' in rout: + a = rout['result'] + else: + a = rout['name'] + if a in rout['vars']: + return isstring(rout['vars'][a]) + return 0 + + +def hasexternals(rout): + return 'externals' in rout and rout['externals'] + + +def isthreadsafe(rout): + return 'f2pyenhancements' in rout and \ + 'threadsafe' in rout['f2pyenhancements'] + + +def hasvariables(rout): + return 'vars' in rout and rout['vars'] + + +def isoptional(var): + return ('attrspec' in var and 'optional' in var['attrspec'] and + 'required' not in var['attrspec']) and isintent_nothide(var) + + +def isexternal(var): + return 'attrspec' in var and 'external' in var['attrspec'] + + +def getdimension(var): + dimpattern = r"\((.*?)\)" + if 'attrspec' in var.keys(): + if any('dimension' in s for s in var['attrspec']): + return next(re.findall(dimpattern, v) for v in var['attrspec']) + + +def isrequired(var): + return not isoptional(var) and isintent_nothide(var) + + +def iscstyledirective(f2py_line): + directives = {"callstatement", "callprotoargument", "pymethoddef"} + return any(directive in f2py_line.lower() for directive in directives) + + +def isintent_in(var): + if 'intent' not in var: + return 1 + if 'hide' in var['intent']: + return 0 + if 'inplace' in var['intent']: + return 0 + if 'in' in var['intent']: + return 1 + if 'out' in var['intent']: + return 0 + if 'inout' in var['intent']: + return 0 + if 'outin' in var['intent']: + return 0 + return 1 + + +def isintent_inout(var): + return ('intent' in var and ('inout' in var['intent'] or + 'outin' in var['intent']) and 'in' not in var['intent'] and + 'hide' not in var['intent'] and 'inplace' not in var['intent']) + + +def isintent_out(var): + return 'out' in var.get('intent', []) + + +def isintent_hide(var): + return ('intent' in var and ('hide' in var['intent'] or + ('out' in var['intent'] and 'in' not in var['intent'] and + (not l_or(isintent_inout, isintent_inplace)(var))))) + + +def isintent_nothide(var): + return not isintent_hide(var) + + +def isintent_c(var): + return 'c' in var.get('intent', []) + + +def isintent_cache(var): + return 'cache' in var.get('intent', []) + + +def isintent_copy(var): + return 'copy' in var.get('intent', []) + + +def isintent_overwrite(var): + return 'overwrite' in var.get('intent', []) + + +def isintent_callback(var): + return 'callback' in var.get('intent', []) + + +def isintent_inplace(var): + return 'inplace' in var.get('intent', []) + + +def isintent_aux(var): + return 'aux' in var.get('intent', []) + + +def isintent_aligned4(var): + return 'aligned4' in var.get('intent', []) + + +def isintent_aligned8(var): + return 'aligned8' in var.get('intent', []) + + +def isintent_aligned16(var): + return 'aligned16' in var.get('intent', []) + + +isintent_dict = {isintent_in: 'INTENT_IN', isintent_inout: 'INTENT_INOUT', + isintent_out: 'INTENT_OUT', isintent_hide: 'INTENT_HIDE', + isintent_cache: 'INTENT_CACHE', + isintent_c: 'INTENT_C', isoptional: 'OPTIONAL', + isintent_inplace: 'INTENT_INPLACE', + isintent_aligned4: 'INTENT_ALIGNED4', + isintent_aligned8: 'INTENT_ALIGNED8', + isintent_aligned16: 'INTENT_ALIGNED16', + } + + +def isprivate(var): + return 'attrspec' in var and 'private' in var['attrspec'] + + +def isvariable(var): + # heuristic to find public/private declarations of filtered subroutines + if len(var) == 1 and 'attrspec' in var and \ + var['attrspec'][0] in ('public', 'private'): + is_var = False + else: + is_var = True + return is_var + +def hasinitvalue(var): + return '=' in var + + +def hasinitvalueasstring(var): + if not hasinitvalue(var): + return 0 + return var['='][0] in ['"', "'"] + + +def hasnote(var): + return 'note' in var + + +def hasresultnote(rout): + if not isfunction(rout): + return 0 + if 'result' in rout: + a = rout['result'] + else: + a = rout['name'] + if a in rout['vars']: + return hasnote(rout['vars'][a]) + return 0 + + +def hascommon(rout): + return 'common' in rout + + +def containscommon(rout): + if hascommon(rout): + return 1 + if hasbody(rout): + for b in rout['body']: + if containscommon(b): + return 1 + return 0 + + +def hasderivedtypes(rout): + return ('block' in rout) and rout['block'] == 'type' + + +def containsderivedtypes(rout): + if hasderivedtypes(rout): + return 1 + if hasbody(rout): + for b in rout['body']: + if hasderivedtypes(b): + return 1 + return 0 + + +def containsmodule(block): + if ismodule(block): + return 1 + if not hasbody(block): + return 0 + for b in block['body']: + if containsmodule(b): + return 1 + return 0 + + +def hasbody(rout): + return 'body' in rout + + +def hascallstatement(rout): + return getcallstatement(rout) is not None + + +def istrue(var): + return 1 + + +def isfalse(var): + return 0 + + +class F2PYError(Exception): + pass + + +class throw_error: + + def __init__(self, mess): + self.mess = mess + + def __call__(self, var): + mess = f'\n\n var = {var}\n Message: {self.mess}\n' + raise F2PYError(mess) + + +def l_and(*f): + l1, l2 = 'lambda v', [] + for i in range(len(f)): + l1 = '%s,f%d=f[%d]' % (l1, i, i) + l2.append('f%d(v)' % (i)) + return eval(f"{l1}:{' and '.join(l2)}") + + +def l_or(*f): + l1, l2 = 'lambda v', [] + for i in range(len(f)): + l1 = '%s,f%d=f[%d]' % (l1, i, i) + l2.append('f%d(v)' % (i)) + return eval(f"{l1}:{' or '.join(l2)}") + + +def l_not(f): + return eval('lambda v,f=f:not f(v)') + + +def isdummyroutine(rout): + try: + return rout['f2pyenhancements']['fortranname'] == '' + except KeyError: + return 0 + + +def getfortranname(rout): + try: + name = rout['f2pyenhancements']['fortranname'] + if name == '': + raise KeyError + if not name: + errmess(f"Failed to use fortranname from {rout['f2pyenhancements']}\n") + raise KeyError + except KeyError: + name = rout['name'] + return name + + +def getmultilineblock(rout, blockname, comment=1, counter=0): + try: + r = rout['f2pyenhancements'].get(blockname) + except KeyError: + return + if not r: + return + if counter > 0 and isinstance(r, str): + return + if isinstance(r, list): + if counter >= len(r): + return + r = r[counter] + if r[:3] == "'''": + if comment: + r = '\t/* start ' + blockname + \ + ' multiline (' + repr(counter) + ') */\n' + r[3:] + else: + r = r[3:] + if r[-3:] == "'''": + if comment: + r = r[:-3] + '\n\t/* end multiline (' + repr(counter) + ')*/' + else: + r = r[:-3] + else: + errmess(f"{blockname} multiline block should end with `'''`: {repr(r)}\n") + return r + + +def getcallstatement(rout): + return getmultilineblock(rout, 'callstatement') + + +def getcallprotoargument(rout, cb_map={}): + r = getmultilineblock(rout, 'callprotoargument', comment=0) + if r: + return r + if hascallstatement(rout): + outmess( + 'warning: callstatement is defined without callprotoargument\n') + return + from .capi_maps import getctype + arg_types, arg_types2 = [], [] + if l_and(isstringfunction, l_not(isfunction_wrap))(rout): + arg_types.extend(['char*', 'size_t']) + for n in rout['args']: + var = rout['vars'][n] + if isintent_callback(var): + continue + if n in cb_map: + ctype = cb_map[n] + '_typedef' + else: + ctype = getctype(var) + if l_and(isintent_c, l_or(isscalar, iscomplex))(var): + pass + elif isstring(var): + pass + elif not isattr_value(var): + ctype = ctype + '*' + if (isstring(var) + or isarrayofstrings(var) # obsolete? + or isstringarray(var)): + arg_types2.append('size_t') + arg_types.append(ctype) + + proto_args = ','.join(arg_types + arg_types2) + if not proto_args: + proto_args = 'void' + return proto_args + + +def getusercode(rout): + return getmultilineblock(rout, 'usercode') + + +def getusercode1(rout): + return getmultilineblock(rout, 'usercode', counter=1) + + +def getpymethoddef(rout): + return getmultilineblock(rout, 'pymethoddef') + + +def getargs(rout): + sortargs, args = [], [] + if 'args' in rout: + args = rout['args'] + if 'sortvars' in rout: + for a in rout['sortvars']: + if a in args: + sortargs.append(a) + for a in args: + if a not in sortargs: + sortargs.append(a) + else: + sortargs = rout['args'] + return args, sortargs + + +def getargs2(rout): + sortargs, args = [], rout.get('args', []) + auxvars = [a for a in rout['vars'].keys() if isintent_aux(rout['vars'][a]) + and a not in args] + args = auxvars + args + if 'sortvars' in rout: + for a in rout['sortvars']: + if a in args: + sortargs.append(a) + for a in args: + if a not in sortargs: + sortargs.append(a) + else: + sortargs = auxvars + rout['args'] + return args, sortargs + + +def getrestdoc(rout): + if 'f2pymultilines' not in rout: + return None + k = None + if rout['block'] == 'python module': + k = rout['block'], rout['name'] + return rout['f2pymultilines'].get(k, None) + + +def gentitle(name): + ln = (80 - len(name) - 6) // 2 + return f"/*{ln * '*'} {name} {ln * '*'}*/" + + +def flatlist(lst): + if isinstance(lst, list): + return reduce(lambda x, y, f=flatlist: x + f(y), lst, []) + return [lst] + + +def stripcomma(s): + if s and s[-1] == ',': + return s[:-1] + return s + + +def replace(str, d, defaultsep=''): + if isinstance(d, list): + return [replace(str, _m, defaultsep) for _m in d] + if isinstance(str, list): + return [replace(_m, d, defaultsep) for _m in str] + for k in 2 * list(d.keys()): + if k == 'separatorsfor': + continue + if 'separatorsfor' in d and k in d['separatorsfor']: + sep = d['separatorsfor'][k] + else: + sep = defaultsep + if isinstance(d[k], list): + str = str.replace(f'#{k}#', sep.join(flatlist(d[k]))) + else: + str = str.replace(f'#{k}#', d[k]) + return str + + +def dictappend(rd, ar): + if isinstance(ar, list): + for a in ar: + rd = dictappend(rd, a) + return rd + for k in ar.keys(): + if k[0] == '_': + continue + if k in rd: + if isinstance(rd[k], str): + rd[k] = [rd[k]] + if isinstance(rd[k], list): + if isinstance(ar[k], list): + rd[k] = rd[k] + ar[k] + else: + rd[k].append(ar[k]) + elif isinstance(rd[k], dict): + if isinstance(ar[k], dict): + if k == 'separatorsfor': + for k1 in ar[k].keys(): + if k1 not in rd[k]: + rd[k][k1] = ar[k][k1] + else: + rd[k] = dictappend(rd[k], ar[k]) + else: + rd[k] = ar[k] + return rd + + +def applyrules(rules, d, var={}): + ret = {} + if isinstance(rules, list): + for r in rules: + rr = applyrules(r, d, var) + ret = dictappend(ret, rr) + if '_break' in rr: + break + return ret + if '_check' in rules and (not rules['_check'](var)): + return ret + if 'need' in rules: + res = applyrules({'needs': rules['need']}, d, var) + if 'needs' in res: + cfuncs.append_needs(res['needs']) + + for k in rules.keys(): + if k == 'separatorsfor': + ret[k] = rules[k] + continue + if isinstance(rules[k], str): + ret[k] = replace(rules[k], d) + elif isinstance(rules[k], list): + ret[k] = [] + for i in rules[k]: + ar = applyrules({k: i}, d, var) + if k in ar: + ret[k].append(ar[k]) + elif k[0] == '_': + continue + elif isinstance(rules[k], dict): + ret[k] = [] + for k1 in rules[k].keys(): + if isinstance(k1, types.FunctionType) and k1(var): + if isinstance(rules[k][k1], list): + for i in rules[k][k1]: + if isinstance(i, dict): + res = applyrules({'supertext': i}, d, var) + i = res.get('supertext', '') + ret[k].append(replace(i, d)) + else: + i = rules[k][k1] + if isinstance(i, dict): + res = applyrules({'supertext': i}, d) + i = res.get('supertext', '') + ret[k].append(replace(i, d)) + else: + errmess(f'applyrules: ignoring rule {repr(rules[k])}.\n') + if isinstance(ret[k], list): + if len(ret[k]) == 1: + ret[k] = ret[k][0] + if ret[k] == []: + del ret[k] + return ret + + +_f2py_module_name_match = re.compile(r'\s*python\s*module\s*(?P[\w_]+)', + re.I).match +_f2py_user_module_name_match = re.compile(r'\s*python\s*module\s*(?P[\w_]*?' + r'__user__[\w_]*)', re.I).match + +def get_f2py_modulename(source): + name = None + with open(source) as f: + for line in f: + m = _f2py_module_name_match(line) + if m: + if _f2py_user_module_name_match(line): # skip *__user__* names + continue + name = m.group('name') + break + return name + +def getuseblocks(pymod): + all_uses = [] + for inner in pymod['body']: + for modblock in inner['body']: + if modblock.get('use'): + all_uses.extend([x for x in modblock.get("use").keys() if "__" not in x]) + return all_uses + +def process_f2cmap_dict(f2cmap_all, new_map, c2py_map, verbose=False): + """ + Update the Fortran-to-C type mapping dictionary with new mappings and + return a list of successfully mapped C types. + + This function integrates a new mapping dictionary into an existing + Fortran-to-C type mapping dictionary. It ensures that all keys are in + lowercase and validates new entries against a given C-to-Python mapping + dictionary. Redefinitions and invalid entries are reported with a warning. + + Parameters + ---------- + f2cmap_all : dict + The existing Fortran-to-C type mapping dictionary that will be updated. + It should be a dictionary of dictionaries where the main keys represent + Fortran types and the nested dictionaries map Fortran type specifiers + to corresponding C types. + + new_map : dict + A dictionary containing new type mappings to be added to `f2cmap_all`. + The structure should be similar to `f2cmap_all`, with keys representing + Fortran types and values being dictionaries of type specifiers and their + C type equivalents. + + c2py_map : dict + A dictionary used for validating the C types in `new_map`. It maps C + types to corresponding Python types and is used to ensure that the C + types specified in `new_map` are valid. + + verbose : boolean + A flag used to provide information about the types mapped + + Returns + ------- + tuple of (dict, list) + The updated Fortran-to-C type mapping dictionary and a list of + successfully mapped C types. + """ + f2cmap_mapped = [] + + new_map_lower = {} + for k, d1 in new_map.items(): + d1_lower = {k1.lower(): v1 for k1, v1 in d1.items()} + new_map_lower[k.lower()] = d1_lower + + for k, d1 in new_map_lower.items(): + if k not in f2cmap_all: + f2cmap_all[k] = {} + + for k1, v1 in d1.items(): + if v1 in c2py_map: + if k1 in f2cmap_all[k]: + outmess( + "\tWarning: redefinition of {'%s':{'%s':'%s'->'%s'}}\n" + % (k, k1, f2cmap_all[k][k1], v1) + ) + f2cmap_all[k][k1] = v1 + if verbose: + outmess(f'\tMapping "{k}(kind={k1})" to "{v1}\"\n') + f2cmap_mapped.append(v1) + elif verbose: + errmess( + "\tIgnoring map {'%s':{'%s':'%s'}}: '%s' must be in %s\n" + % (k, k1, v1, v1, list(c2py_map.keys())) + ) + + return f2cmap_all, f2cmap_mapped diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/auxfuncs.pyi b/.venv/lib/python3.12/site-packages/numpy/f2py/auxfuncs.pyi new file mode 100644 index 0000000..f2ff09f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/auxfuncs.pyi @@ -0,0 +1,264 @@ +from collections.abc import Callable, Mapping +from pprint import pprint as show +from typing import Any, Final, Never, TypeAlias, TypeVar, overload +from typing import Literal as L + +from _typeshed import FileDescriptorOrPath + +from .cfuncs import errmess + +__all__ = [ + "applyrules", + "containscommon", + "containsderivedtypes", + "debugcapi", + "dictappend", + "errmess", + "gentitle", + "get_f2py_modulename", + "getargs2", + "getcallprotoargument", + "getcallstatement", + "getdimension", + "getfortranname", + "getpymethoddef", + "getrestdoc", + "getuseblocks", + "getusercode", + "getusercode1", + "hasbody", + "hascallstatement", + "hascommon", + "hasexternals", + "hasinitvalue", + "hasnote", + "hasresultnote", + "isallocatable", + "isarray", + "isarrayofstrings", + "isattr_value", + "ischaracter", + "ischaracter_or_characterarray", + "ischaracterarray", + "iscomplex", + "iscomplexarray", + "iscomplexfunction", + "iscomplexfunction_warn", + "iscstyledirective", + "isdouble", + "isdummyroutine", + "isexternal", + "isfunction", + "isfunction_wrap", + "isint1", + "isint1array", + "isinteger", + "isintent_aux", + "isintent_c", + "isintent_callback", + "isintent_copy", + "isintent_dict", + "isintent_hide", + "isintent_in", + "isintent_inout", + "isintent_inplace", + "isintent_nothide", + "isintent_out", + "isintent_overwrite", + "islogical", + "islogicalfunction", + "islong_complex", + "islong_double", + "islong_doublefunction", + "islong_long", + "islong_longfunction", + "ismodule", + "ismoduleroutine", + "isoptional", + "isprivate", + "isrequired", + "isroutine", + "isscalar", + "issigned_long_longarray", + "isstring", + "isstring_or_stringarray", + "isstringarray", + "isstringfunction", + "issubroutine", + "issubroutine_wrap", + "isthreadsafe", + "isunsigned", + "isunsigned_char", + "isunsigned_chararray", + "isunsigned_long_long", + "isunsigned_long_longarray", + "isunsigned_short", + "isunsigned_shortarray", + "isvariable", + "l_and", + "l_not", + "l_or", + "outmess", + "process_f2cmap_dict", + "replace", + "show", + "stripcomma", + "throw_error", +] + +### + +_VT = TypeVar("_VT") +_RT = TypeVar("_RT") + +_Var: TypeAlias = Mapping[str, list[str]] +_ROut: TypeAlias = Mapping[str, str] +_F2CMap: TypeAlias = Mapping[str, Mapping[str, str]] + +_Bool: TypeAlias = bool | L[0, 1] +_Intent: TypeAlias = L[ + "INTENT_IN", + "INTENT_OUT", + "INTENT_INOUT", + "INTENT_C", + "INTENT_CACHE", + "INTENT_HIDE", + "INTENT_INPLACE", + "INTENT_ALIGNED4", + "INTENT_ALIGNED8", + "INTENT_ALIGNED16", + "OPTIONAL", +] + +### + +isintent_dict: dict[Callable[[_Var], _Bool], _Intent] + +class F2PYError(Exception): ... + +class throw_error: + mess: Final[str] + def __init__(self, /, mess: str) -> None: ... + def __call__(self, /, var: _Var) -> Never: ... # raises F2PYError + +# +def l_and(*f: tuple[str, Callable[[_VT], _RT]]) -> Callable[[_VT], _RT]: ... +def l_or(*f: tuple[str, Callable[[_VT], _RT]]) -> Callable[[_VT], _RT]: ... +def l_not(f: tuple[str, Callable[[_VT], _RT]]) -> Callable[[_VT], _RT]: ... + +# +def outmess(t: str) -> None: ... +def debugcapi(var: _Var) -> bool: ... + +# +def hasinitvalue(var: _Var | str) -> bool: ... +def hasnote(var: _Var | str) -> bool: ... +def ischaracter(var: _Var) -> bool: ... +def ischaracterarray(var: _Var) -> bool: ... +def ischaracter_or_characterarray(var: _Var) -> bool: ... +def isstring(var: _Var) -> bool: ... +def isstringarray(var: _Var) -> bool: ... +def isstring_or_stringarray(var: _Var) -> bool: ... +def isarray(var: _Var) -> bool: ... +def isarrayofstrings(var: _Var) -> bool: ... +def isscalar(var: _Var) -> bool: ... +def iscomplex(var: _Var) -> bool: ... +def islogical(var: _Var) -> bool: ... +def isinteger(var: _Var) -> bool: ... +def isint1(var: _Var) -> bool: ... +def isint1array(var: _Var) -> bool: ... +def islong_long(var: _Var) -> _Bool: ... +def isunsigned(var: _Var) -> _Bool: ... +def isunsigned_char(var: _Var) -> _Bool: ... +def isunsigned_chararray(var: _Var) -> bool: ... +def isunsigned_short(var: _Var) -> _Bool: ... +def isunsigned_shortarray(var: _Var) -> bool: ... +def isunsigned_long_long(var: _Var) -> _Bool: ... +def isunsigned_long_longarray(var: _Var) -> bool: ... +def issigned_long_longarray(var: _Var) -> bool: ... +def isdouble(var: _Var) -> _Bool: ... +def islong_double(var: _Var) -> _Bool: ... +def islong_complex(var: _Var) -> _Bool: ... +def iscomplexarray(var: _Var) -> bool: ... +def isallocatable(var: _Var) -> bool: ... +def isattr_value(var: _Var) -> bool: ... +def isoptional(var: _Var) -> bool: ... +def isexternal(var: _Var) -> bool: ... +def isrequired(var: _Var) -> bool: ... +def isprivate(var: _Var) -> bool: ... +def isvariable(var: _Var) -> bool: ... +def isintent_in(var: _Var) -> _Bool: ... +def isintent_inout(var: _Var) -> bool: ... +def isintent_out(var: _Var) -> bool: ... +def isintent_hide(var: _Var) -> bool: ... +def isintent_nothide(var: _Var) -> bool: ... +def isintent_c(var: _Var) -> bool: ... +def isintent_cache(var: _Var) -> bool: ... +def isintent_copy(var: _Var) -> bool: ... +def isintent_overwrite(var: _Var) -> bool: ... +def isintent_callback(var: _Var) -> bool: ... +def isintent_inplace(var: _Var) -> bool: ... +def isintent_aux(var: _Var) -> bool: ... + +# +def containsderivedtypes(rout: _ROut) -> L[0, 1]: ... +def containscommon(rout: _ROut) -> _Bool: ... +def hasexternals(rout: _ROut) -> bool: ... +def hasresultnote(rout: _ROut) -> _Bool: ... +def hasbody(rout: _ROut) -> _Bool: ... +def hascommon(rout: _ROut) -> bool: ... +def hasderivedtypes(rout: _ROut) -> bool: ... +def hascallstatement(rout: _ROut) -> bool: ... +def isroutine(rout: _ROut) -> bool: ... +def ismodule(rout: _ROut) -> bool: ... +def ismoduleroutine(rout: _ROut) -> bool: ... +def issubroutine(rout: _ROut) -> bool: ... +def issubroutine_wrap(rout: _ROut) -> _Bool: ... +def isfunction(rout: _ROut) -> bool: ... +def isfunction_wrap(rout: _ROut) -> _Bool: ... +def islogicalfunction(rout: _ROut) -> _Bool: ... +def islong_longfunction(rout: _ROut) -> _Bool: ... +def islong_doublefunction(rout: _ROut) -> _Bool: ... +def iscomplexfunction(rout: _ROut) -> _Bool: ... +def iscomplexfunction_warn(rout: _ROut) -> _Bool: ... +def isstringfunction(rout: _ROut) -> _Bool: ... +def isthreadsafe(rout: _ROut) -> bool: ... +def isdummyroutine(rout: _ROut) -> _Bool: ... +def iscstyledirective(f2py_line: str) -> bool: ... + +# . +def getdimension(var: _Var) -> list[Any] | None: ... +def getfortranname(rout: _ROut) -> str: ... +def getmultilineblock(rout: _ROut, blockname: str, comment: _Bool = 1, counter: int = 0) -> str | None: ... +def getcallstatement(rout: _ROut) -> str | None: ... +def getcallprotoargument(rout: _ROut, cb_map: dict[str, str] = {}) -> str: ... +def getusercode(rout: _ROut) -> str | None: ... +def getusercode1(rout: _ROut) -> str | None: ... +def getpymethoddef(rout: _ROut) -> str | None: ... +def getargs(rout: _ROut) -> tuple[list[str], list[str]]: ... +def getargs2(rout: _ROut) -> tuple[list[str], list[str]]: ... +def getrestdoc(rout: _ROut) -> str | None: ... + +# +def gentitle(name: str) -> str: ... +def stripcomma(s: str) -> str: ... +@overload +def replace(str: str, d: list[str], defaultsep: str = "") -> list[str]: ... +@overload +def replace(str: list[str], d: str, defaultsep: str = "") -> list[str]: ... +@overload +def replace(str: str, d: str, defaultsep: str = "") -> str: ... + +# +def dictappend(rd: Mapping[str, object], ar: Mapping[str, object] | list[Mapping[str, object]]) -> dict[str, Any]: ... +def applyrules(rules: Mapping[str, object], d: Mapping[str, object], var: _Var = {}) -> dict[str, Any]: ... + +# +def get_f2py_modulename(source: FileDescriptorOrPath) -> str: ... +def getuseblocks(pymod: Mapping[str, Mapping[str, Mapping[str, str]]]) -> list[str]: ... +def process_f2cmap_dict( + f2cmap_all: _F2CMap, + new_map: _F2CMap, + c2py_map: _F2CMap, + verbose: bool = False, +) -> tuple[dict[str, dict[str, str]], list[str]]: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/capi_maps.py b/.venv/lib/python3.12/site-packages/numpy/f2py/capi_maps.py new file mode 100644 index 0000000..290ac2f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/capi_maps.py @@ -0,0 +1,811 @@ +""" +Copyright 1999 -- 2011 Pearu Peterson all rights reserved. +Copyright 2011 -- present NumPy Developers. +Permission to use, modify, and distribute this software is given under the +terms of the NumPy License. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +""" +from . import __version__ + +f2py_version = __version__.version + +import copy +import os +import re + +from . import cb_rules +from ._isocbind import iso_c2py_map, iso_c_binding_map, isoc_c2pycode_map + +# The environment provided by auxfuncs.py is needed for some calls to eval. +# As the needed functions cannot be determined by static inspection of the +# code, it is safest to use import * pending a major refactoring of f2py. +from .auxfuncs import * +from .crackfortran import markoutercomma + +__all__ = [ + 'getctype', 'getstrlength', 'getarrdims', 'getpydocsign', + 'getarrdocsign', 'getinit', 'sign2map', 'routsign2map', 'modsign2map', + 'cb_sign2map', 'cb_routsign2map', 'common_sign2map', 'process_f2cmap_dict' +] + + +depargs = [] +lcb_map = {} +lcb2_map = {} +# forced casting: mainly caused by the fact that Python or Numeric +# C/APIs do not support the corresponding C types. +c2py_map = {'double': 'float', + 'float': 'float', # forced casting + 'long_double': 'float', # forced casting + 'char': 'int', # forced casting + 'signed_char': 'int', # forced casting + 'unsigned_char': 'int', # forced casting + 'short': 'int', # forced casting + 'unsigned_short': 'int', # forced casting + 'int': 'int', # forced casting + 'long': 'int', + 'long_long': 'long', + 'unsigned': 'int', # forced casting + 'complex_float': 'complex', # forced casting + 'complex_double': 'complex', + 'complex_long_double': 'complex', # forced casting + 'string': 'string', + 'character': 'bytes', + } + +c2capi_map = {'double': 'NPY_DOUBLE', + 'float': 'NPY_FLOAT', + 'long_double': 'NPY_LONGDOUBLE', + 'char': 'NPY_BYTE', + 'unsigned_char': 'NPY_UBYTE', + 'signed_char': 'NPY_BYTE', + 'short': 'NPY_SHORT', + 'unsigned_short': 'NPY_USHORT', + 'int': 'NPY_INT', + 'unsigned': 'NPY_UINT', + 'long': 'NPY_LONG', + 'unsigned_long': 'NPY_ULONG', + 'long_long': 'NPY_LONGLONG', + 'unsigned_long_long': 'NPY_ULONGLONG', + 'complex_float': 'NPY_CFLOAT', + 'complex_double': 'NPY_CDOUBLE', + 'complex_long_double': 'NPY_CDOUBLE', + 'string': 'NPY_STRING', + 'character': 'NPY_STRING'} + +c2pycode_map = {'double': 'd', + 'float': 'f', + 'long_double': 'g', + 'char': 'b', + 'unsigned_char': 'B', + 'signed_char': 'b', + 'short': 'h', + 'unsigned_short': 'H', + 'int': 'i', + 'unsigned': 'I', + 'long': 'l', + 'unsigned_long': 'L', + 'long_long': 'q', + 'unsigned_long_long': 'Q', + 'complex_float': 'F', + 'complex_double': 'D', + 'complex_long_double': 'G', + 'string': 'S', + 'character': 'c'} + +# https://docs.python.org/3/c-api/arg.html#building-values +c2buildvalue_map = {'double': 'd', + 'float': 'f', + 'char': 'b', + 'signed_char': 'b', + 'short': 'h', + 'int': 'i', + 'long': 'l', + 'long_long': 'L', + 'complex_float': 'N', + 'complex_double': 'N', + 'complex_long_double': 'N', + 'string': 'y', + 'character': 'c'} + +f2cmap_all = {'real': {'': 'float', '4': 'float', '8': 'double', + '12': 'long_double', '16': 'long_double'}, + 'integer': {'': 'int', '1': 'signed_char', '2': 'short', + '4': 'int', '8': 'long_long', + '-1': 'unsigned_char', '-2': 'unsigned_short', + '-4': 'unsigned', '-8': 'unsigned_long_long'}, + 'complex': {'': 'complex_float', '8': 'complex_float', + '16': 'complex_double', '24': 'complex_long_double', + '32': 'complex_long_double'}, + 'complexkind': {'': 'complex_float', '4': 'complex_float', + '8': 'complex_double', '12': 'complex_long_double', + '16': 'complex_long_double'}, + 'logical': {'': 'int', '1': 'char', '2': 'short', '4': 'int', + '8': 'long_long'}, + 'double complex': {'': 'complex_double'}, + 'double precision': {'': 'double'}, + 'byte': {'': 'char'}, + } + +# Add ISO_C handling +c2pycode_map.update(isoc_c2pycode_map) +c2py_map.update(iso_c2py_map) +f2cmap_all, _ = process_f2cmap_dict(f2cmap_all, iso_c_binding_map, c2py_map) +# End ISO_C handling +f2cmap_default = copy.deepcopy(f2cmap_all) + +f2cmap_mapped = [] + +def load_f2cmap_file(f2cmap_file): + global f2cmap_all, f2cmap_mapped + + f2cmap_all = copy.deepcopy(f2cmap_default) + + if f2cmap_file is None: + # Default value + f2cmap_file = '.f2py_f2cmap' + if not os.path.isfile(f2cmap_file): + return + + # User defined additions to f2cmap_all. + # f2cmap_file must contain a dictionary of dictionaries, only. For + # example, {'real':{'low':'float'}} means that Fortran 'real(low)' is + # interpreted as C 'float'. This feature is useful for F90/95 users if + # they use PARAMETERS in type specifications. + try: + outmess(f'Reading f2cmap from {f2cmap_file!r} ...\n') + with open(f2cmap_file) as f: + d = eval(f.read().lower(), {}, {}) + f2cmap_all, f2cmap_mapped = process_f2cmap_dict(f2cmap_all, d, c2py_map, True) + outmess('Successfully applied user defined f2cmap changes\n') + except Exception as msg: + errmess(f'Failed to apply user defined f2cmap changes: {msg}. Skipping.\n') + + +cformat_map = {'double': '%g', + 'float': '%g', + 'long_double': '%Lg', + 'char': '%d', + 'signed_char': '%d', + 'unsigned_char': '%hhu', + 'short': '%hd', + 'unsigned_short': '%hu', + 'int': '%d', + 'unsigned': '%u', + 'long': '%ld', + 'unsigned_long': '%lu', + 'long_long': '%ld', + 'complex_float': '(%g,%g)', + 'complex_double': '(%g,%g)', + 'complex_long_double': '(%Lg,%Lg)', + 'string': '\\"%s\\"', + 'character': "'%c'", + } + +# Auxiliary functions + + +def getctype(var): + """ + Determines C type + """ + ctype = 'void' + if isfunction(var): + if 'result' in var: + a = var['result'] + else: + a = var['name'] + if a in var['vars']: + return getctype(var['vars'][a]) + else: + errmess(f'getctype: function {a} has no return value?!\n') + elif issubroutine(var): + return ctype + elif ischaracter_or_characterarray(var): + return 'character' + elif isstring_or_stringarray(var): + return 'string' + elif 'typespec' in var and var['typespec'].lower() in f2cmap_all: + typespec = var['typespec'].lower() + f2cmap = f2cmap_all[typespec] + ctype = f2cmap[''] # default type + if 'kindselector' in var: + if '*' in var['kindselector']: + try: + ctype = f2cmap[var['kindselector']['*']] + except KeyError: + errmess('getctype: "%s %s %s" not supported.\n' % + (var['typespec'], '*', var['kindselector']['*'])) + elif 'kind' in var['kindselector']: + if typespec + 'kind' in f2cmap_all: + f2cmap = f2cmap_all[typespec + 'kind'] + try: + ctype = f2cmap[var['kindselector']['kind']] + except KeyError: + if typespec in f2cmap_all: + f2cmap = f2cmap_all[typespec] + try: + ctype = f2cmap[str(var['kindselector']['kind'])] + except KeyError: + errmess('getctype: "%s(kind=%s)" is mapped to C "%s" (to override define dict(%s = dict(%s="")) in %s/.f2py_f2cmap file).\n' + % (typespec, var['kindselector']['kind'], ctype, + typespec, var['kindselector']['kind'], os.getcwd())) + elif not isexternal(var): + errmess(f'getctype: No C-type found in "{var}", assuming void.\n') + return ctype + + +def f2cexpr(expr): + """Rewrite Fortran expression as f2py supported C expression. + + Due to the lack of a proper expression parser in f2py, this + function uses a heuristic approach that assumes that Fortran + arithmetic expressions are valid C arithmetic expressions when + mapping Fortran function calls to the corresponding C function/CPP + macros calls. + + """ + # TODO: support Fortran `len` function with optional kind parameter + expr = re.sub(r'\blen\b', 'f2py_slen', expr) + return expr + + +def getstrlength(var): + if isstringfunction(var): + if 'result' in var: + a = var['result'] + else: + a = var['name'] + if a in var['vars']: + return getstrlength(var['vars'][a]) + else: + errmess(f'getstrlength: function {a} has no return value?!\n') + if not isstring(var): + errmess( + f'getstrlength: expected a signature of a string but got: {repr(var)}\n') + len = '1' + if 'charselector' in var: + a = var['charselector'] + if '*' in a: + len = a['*'] + elif 'len' in a: + len = f2cexpr(a['len']) + if re.match(r'\(\s*(\*|:)\s*\)', len) or re.match(r'(\*|:)', len): + if isintent_hide(var): + errmess('getstrlength:intent(hide): expected a string with defined length but got: %s\n' % ( + repr(var))) + len = '-1' + return len + + +def getarrdims(a, var, verbose=0): + ret = {} + if isstring(var) and not isarray(var): + ret['size'] = getstrlength(var) + ret['rank'] = '0' + ret['dims'] = '' + elif isscalar(var): + ret['size'] = '1' + ret['rank'] = '0' + ret['dims'] = '' + elif isarray(var): + dim = copy.copy(var['dimension']) + ret['size'] = '*'.join(dim) + try: + ret['size'] = repr(eval(ret['size'])) + except Exception: + pass + ret['dims'] = ','.join(dim) + ret['rank'] = repr(len(dim)) + ret['rank*[-1]'] = repr(len(dim) * [-1])[1:-1] + for i in range(len(dim)): # solve dim for dependencies + v = [] + if dim[i] in depargs: + v = [dim[i]] + else: + for va in depargs: + if re.match(r'.*?\b%s\b.*' % va, dim[i]): + v.append(va) + for va in v: + if depargs.index(va) > depargs.index(a): + dim[i] = '*' + break + ret['setdims'], i = '', -1 + for d in dim: + i = i + 1 + if d not in ['*', ':', '(*)', '(:)']: + ret['setdims'] = '%s#varname#_Dims[%d]=%s,' % ( + ret['setdims'], i, d) + if ret['setdims']: + ret['setdims'] = ret['setdims'][:-1] + ret['cbsetdims'], i = '', -1 + for d in var['dimension']: + i = i + 1 + if d not in ['*', ':', '(*)', '(:)']: + ret['cbsetdims'] = '%s#varname#_Dims[%d]=%s,' % ( + ret['cbsetdims'], i, d) + elif isintent_in(var): + outmess('getarrdims:warning: assumed shape array, using 0 instead of %r\n' + % (d)) + ret['cbsetdims'] = '%s#varname#_Dims[%d]=%s,' % ( + ret['cbsetdims'], i, 0) + elif verbose: + errmess( + f'getarrdims: If in call-back function: array argument {repr(a)} must have bounded dimensions: got {repr(d)}\n') + if ret['cbsetdims']: + ret['cbsetdims'] = ret['cbsetdims'][:-1] +# if not isintent_c(var): +# var['dimension'].reverse() + return ret + + +def getpydocsign(a, var): + global lcb_map + if isfunction(var): + if 'result' in var: + af = var['result'] + else: + af = var['name'] + if af in var['vars']: + return getpydocsign(af, var['vars'][af]) + else: + errmess(f'getctype: function {af} has no return value?!\n') + return '', '' + sig, sigout = a, a + opt = '' + if isintent_in(var): + opt = 'input' + elif isintent_inout(var): + opt = 'in/output' + out_a = a + if isintent_out(var): + for k in var['intent']: + if k[:4] == 'out=': + out_a = k[4:] + break + init = '' + ctype = getctype(var) + + if hasinitvalue(var): + init, showinit = getinit(a, var) + init = f', optional\\n Default: {showinit}' + if isscalar(var): + if isintent_inout(var): + sig = '%s : %s rank-0 array(%s,\'%s\')%s' % (a, opt, c2py_map[ctype], + c2pycode_map[ctype], init) + else: + sig = f'{a} : {opt} {c2py_map[ctype]}{init}' + sigout = f'{out_a} : {c2py_map[ctype]}' + elif isstring(var): + if isintent_inout(var): + sig = '%s : %s rank-0 array(string(len=%s),\'c\')%s' % ( + a, opt, getstrlength(var), init) + else: + sig = f'{a} : {opt} string(len={getstrlength(var)}){init}' + sigout = f'{out_a} : string(len={getstrlength(var)})' + elif isarray(var): + dim = var['dimension'] + rank = repr(len(dim)) + sig = '%s : %s rank-%s array(\'%s\') with bounds (%s)%s' % (a, opt, rank, + c2pycode_map[ + ctype], + ','.join(dim), init) + if a == out_a: + sigout = '%s : rank-%s array(\'%s\') with bounds (%s)'\ + % (a, rank, c2pycode_map[ctype], ','.join(dim)) + else: + sigout = '%s : rank-%s array(\'%s\') with bounds (%s) and %s storage'\ + % (out_a, rank, c2pycode_map[ctype], ','.join(dim), a) + elif isexternal(var): + ua = '' + if a in lcb_map and lcb_map[a] in lcb2_map and 'argname' in lcb2_map[lcb_map[a]]: + ua = lcb2_map[lcb_map[a]]['argname'] + if not ua == a: + ua = f' => {ua}' + else: + ua = '' + sig = f'{a} : call-back function{ua}' + sigout = sig + else: + errmess( + f'getpydocsign: Could not resolve docsignature for "{a}".\n') + return sig, sigout + + +def getarrdocsign(a, var): + ctype = getctype(var) + if isstring(var) and (not isarray(var)): + sig = f'{a} : rank-0 array(string(len={getstrlength(var)}),\'c\')' + elif isscalar(var): + sig = f'{a} : rank-0 array({c2py_map[ctype]},\'{c2pycode_map[ctype]}\')' + elif isarray(var): + dim = var['dimension'] + rank = repr(len(dim)) + sig = '%s : rank-%s array(\'%s\') with bounds (%s)' % (a, rank, + c2pycode_map[ + ctype], + ','.join(dim)) + return sig + + +def getinit(a, var): + if isstring(var): + init, showinit = '""', "''" + else: + init, showinit = '', '' + if hasinitvalue(var): + init = var['='] + showinit = init + if iscomplex(var) or iscomplexarray(var): + ret = {} + + try: + v = var["="] + if ',' in v: + ret['init.r'], ret['init.i'] = markoutercomma( + v[1:-1]).split('@,@') + else: + v = eval(v, {}, {}) + ret['init.r'], ret['init.i'] = str(v.real), str(v.imag) + except Exception: + raise ValueError( + f'getinit: expected complex number `(r,i)\' but got `{init}\' as initial value of {a!r}.') + if isarray(var): + init = f"(capi_c.r={ret['init.r']},capi_c.i={ret['init.i']},capi_c)" + elif isstring(var): + if not init: + init, showinit = '""', "''" + if init[0] == "'": + init = '"%s"' % (init[1:-1].replace('"', '\\"')) + if init[0] == '"': + showinit = f"'{init[1:-1]}'" + return init, showinit + + +def get_elsize(var): + if isstring(var) or isstringarray(var): + elsize = getstrlength(var) + # override with user-specified length when available: + elsize = var['charselector'].get('f2py_len', elsize) + return elsize + if ischaracter(var) or ischaracterarray(var): + return '1' + # for numerical types, PyArray_New* functions ignore specified + # elsize, so we just return 1 and let elsize be determined at + # runtime, see fortranobject.c + return '1' + + +def sign2map(a, var): + """ + varname,ctype,atype + init,init.r,init.i,pytype + vardebuginfo,vardebugshowvalue,varshowvalue + varrformat + + intent + """ + out_a = a + if isintent_out(var): + for k in var['intent']: + if k[:4] == 'out=': + out_a = k[4:] + break + ret = {'varname': a, 'outvarname': out_a, 'ctype': getctype(var)} + intent_flags = [] + for f, s in isintent_dict.items(): + if f(var): + intent_flags.append(f'F2PY_{s}') + if intent_flags: + # TODO: Evaluate intent_flags here. + ret['intent'] = '|'.join(intent_flags) + else: + ret['intent'] = 'F2PY_INTENT_IN' + if isarray(var): + ret['varrformat'] = 'N' + elif ret['ctype'] in c2buildvalue_map: + ret['varrformat'] = c2buildvalue_map[ret['ctype']] + else: + ret['varrformat'] = 'O' + ret['init'], ret['showinit'] = getinit(a, var) + if hasinitvalue(var) and iscomplex(var) and not isarray(var): + ret['init.r'], ret['init.i'] = markoutercomma( + ret['init'][1:-1]).split('@,@') + if isexternal(var): + ret['cbnamekey'] = a + if a in lcb_map: + ret['cbname'] = lcb_map[a] + ret['maxnofargs'] = lcb2_map[lcb_map[a]]['maxnofargs'] + ret['nofoptargs'] = lcb2_map[lcb_map[a]]['nofoptargs'] + ret['cbdocstr'] = lcb2_map[lcb_map[a]]['docstr'] + ret['cblatexdocstr'] = lcb2_map[lcb_map[a]]['latexdocstr'] + else: + ret['cbname'] = a + errmess('sign2map: Confused: external %s is not in lcb_map%s.\n' % ( + a, list(lcb_map.keys()))) + if isstring(var): + ret['length'] = getstrlength(var) + if isarray(var): + ret = dictappend(ret, getarrdims(a, var)) + dim = copy.copy(var['dimension']) + if ret['ctype'] in c2capi_map: + ret['atype'] = c2capi_map[ret['ctype']] + ret['elsize'] = get_elsize(var) + # Debug info + if debugcapi(var): + il = [isintent_in, 'input', isintent_out, 'output', + isintent_inout, 'inoutput', isrequired, 'required', + isoptional, 'optional', isintent_hide, 'hidden', + iscomplex, 'complex scalar', + l_and(isscalar, l_not(iscomplex)), 'scalar', + isstring, 'string', isarray, 'array', + iscomplexarray, 'complex array', isstringarray, 'string array', + iscomplexfunction, 'complex function', + l_and(isfunction, l_not(iscomplexfunction)), 'function', + isexternal, 'callback', + isintent_callback, 'callback', + isintent_aux, 'auxiliary', + ] + rl = [] + for i in range(0, len(il), 2): + if il[i](var): + rl.append(il[i + 1]) + if isstring(var): + rl.append(f"slen({a})={ret['length']}") + if isarray(var): + ddim = ','.join( + map(lambda x, y: f'{x}|{y}', var['dimension'], dim)) + rl.append(f'dims({ddim})') + if isexternal(var): + ret['vardebuginfo'] = f"debug-capi:{a}=>{ret['cbname']}:{','.join(rl)}" + else: + ret['vardebuginfo'] = 'debug-capi:%s %s=%s:%s' % ( + ret['ctype'], a, ret['showinit'], ','.join(rl)) + if isscalar(var): + if ret['ctype'] in cformat_map: + ret['vardebugshowvalue'] = f"debug-capi:{a}={cformat_map[ret['ctype']]}" + if isstring(var): + ret['vardebugshowvalue'] = 'debug-capi:slen(%s)=%%d %s=\\"%%s\\"' % ( + a, a) + if isexternal(var): + ret['vardebugshowvalue'] = f'debug-capi:{a}=%p' + if ret['ctype'] in cformat_map: + ret['varshowvalue'] = f"#name#:{a}={cformat_map[ret['ctype']]}" + ret['showvalueformat'] = f"{cformat_map[ret['ctype']]}" + if isstring(var): + ret['varshowvalue'] = '#name#:slen(%s)=%%d %s=\\"%%s\\"' % (a, a) + ret['pydocsign'], ret['pydocsignout'] = getpydocsign(a, var) + if hasnote(var): + ret['note'] = var['note'] + return ret + + +def routsign2map(rout): + """ + name,NAME,begintitle,endtitle + rname,ctype,rformat + routdebugshowvalue + """ + global lcb_map + name = rout['name'] + fname = getfortranname(rout) + ret = {'name': name, + 'texname': name.replace('_', '\\_'), + 'name_lower': name.lower(), + 'NAME': name.upper(), + 'begintitle': gentitle(name), + 'endtitle': gentitle(f'end of {name}'), + 'fortranname': fname, + 'FORTRANNAME': fname.upper(), + 'callstatement': getcallstatement(rout) or '', + 'usercode': getusercode(rout) or '', + 'usercode1': getusercode1(rout) or '', + } + if '_' in fname: + ret['F_FUNC'] = 'F_FUNC_US' + else: + ret['F_FUNC'] = 'F_FUNC' + if '_' in name: + ret['F_WRAPPEDFUNC'] = 'F_WRAPPEDFUNC_US' + else: + ret['F_WRAPPEDFUNC'] = 'F_WRAPPEDFUNC' + lcb_map = {} + if 'use' in rout: + for u in rout['use'].keys(): + if u in cb_rules.cb_map: + for un in cb_rules.cb_map[u]: + ln = un[0] + if 'map' in rout['use'][u]: + for k in rout['use'][u]['map'].keys(): + if rout['use'][u]['map'][k] == un[0]: + ln = k + break + lcb_map[ln] = un[1] + elif rout.get('externals'): + errmess('routsign2map: Confused: function %s has externals %s but no "use" statement.\n' % ( + ret['name'], repr(rout['externals']))) + ret['callprotoargument'] = getcallprotoargument(rout, lcb_map) or '' + if isfunction(rout): + if 'result' in rout: + a = rout['result'] + else: + a = rout['name'] + ret['rname'] = a + ret['pydocsign'], ret['pydocsignout'] = getpydocsign(a, rout) + ret['ctype'] = getctype(rout['vars'][a]) + if hasresultnote(rout): + ret['resultnote'] = rout['vars'][a]['note'] + rout['vars'][a]['note'] = ['See elsewhere.'] + if ret['ctype'] in c2buildvalue_map: + ret['rformat'] = c2buildvalue_map[ret['ctype']] + else: + ret['rformat'] = 'O' + errmess('routsign2map: no c2buildvalue key for type %s\n' % + (repr(ret['ctype']))) + if debugcapi(rout): + if ret['ctype'] in cformat_map: + ret['routdebugshowvalue'] = 'debug-capi:%s=%s' % ( + a, cformat_map[ret['ctype']]) + if isstringfunction(rout): + ret['routdebugshowvalue'] = 'debug-capi:slen(%s)=%%d %s=\\"%%s\\"' % ( + a, a) + if isstringfunction(rout): + ret['rlength'] = getstrlength(rout['vars'][a]) + if ret['rlength'] == '-1': + errmess('routsign2map: expected explicit specification of the length of the string returned by the fortran function %s; taking 10.\n' % ( + repr(rout['name']))) + ret['rlength'] = '10' + if hasnote(rout): + ret['note'] = rout['note'] + rout['note'] = ['See elsewhere.'] + return ret + + +def modsign2map(m): + """ + modulename + """ + if ismodule(m): + ret = {'f90modulename': m['name'], + 'F90MODULENAME': m['name'].upper(), + 'texf90modulename': m['name'].replace('_', '\\_')} + else: + ret = {'modulename': m['name'], + 'MODULENAME': m['name'].upper(), + 'texmodulename': m['name'].replace('_', '\\_')} + ret['restdoc'] = getrestdoc(m) or [] + if hasnote(m): + ret['note'] = m['note'] + ret['usercode'] = getusercode(m) or '' + ret['usercode1'] = getusercode1(m) or '' + if m['body']: + ret['interface_usercode'] = getusercode(m['body'][0]) or '' + else: + ret['interface_usercode'] = '' + ret['pymethoddef'] = getpymethoddef(m) or '' + if 'gil_used' in m: + ret['gil_used'] = m['gil_used'] + if 'coutput' in m: + ret['coutput'] = m['coutput'] + if 'f2py_wrapper_output' in m: + ret['f2py_wrapper_output'] = m['f2py_wrapper_output'] + return ret + + +def cb_sign2map(a, var, index=None): + ret = {'varname': a} + ret['varname_i'] = ret['varname'] + ret['ctype'] = getctype(var) + if ret['ctype'] in c2capi_map: + ret['atype'] = c2capi_map[ret['ctype']] + ret['elsize'] = get_elsize(var) + if ret['ctype'] in cformat_map: + ret['showvalueformat'] = f"{cformat_map[ret['ctype']]}" + if isarray(var): + ret = dictappend(ret, getarrdims(a, var)) + ret['pydocsign'], ret['pydocsignout'] = getpydocsign(a, var) + if hasnote(var): + ret['note'] = var['note'] + var['note'] = ['See elsewhere.'] + return ret + + +def cb_routsign2map(rout, um): + """ + name,begintitle,endtitle,argname + ctype,rctype,maxnofargs,nofoptargs,returncptr + """ + ret = {'name': f"cb_{rout['name']}_in_{um}", + 'returncptr': ''} + if isintent_callback(rout): + if '_' in rout['name']: + F_FUNC = 'F_FUNC_US' + else: + F_FUNC = 'F_FUNC' + ret['callbackname'] = f"{F_FUNC}({rout['name'].lower()},{rout['name'].upper()})" + ret['static'] = 'extern' + else: + ret['callbackname'] = ret['name'] + ret['static'] = 'static' + ret['argname'] = rout['name'] + ret['begintitle'] = gentitle(ret['name']) + ret['endtitle'] = gentitle(f"end of {ret['name']}") + ret['ctype'] = getctype(rout) + ret['rctype'] = 'void' + if ret['ctype'] == 'string': + ret['rctype'] = 'void' + else: + ret['rctype'] = ret['ctype'] + if ret['rctype'] != 'void': + if iscomplexfunction(rout): + ret['returncptr'] = """ +#ifdef F2PY_CB_RETURNCOMPLEX +return_value= +#endif +""" + else: + ret['returncptr'] = 'return_value=' + if ret['ctype'] in cformat_map: + ret['showvalueformat'] = f"{cformat_map[ret['ctype']]}" + if isstringfunction(rout): + ret['strlength'] = getstrlength(rout) + if isfunction(rout): + if 'result' in rout: + a = rout['result'] + else: + a = rout['name'] + if hasnote(rout['vars'][a]): + ret['note'] = rout['vars'][a]['note'] + rout['vars'][a]['note'] = ['See elsewhere.'] + ret['rname'] = a + ret['pydocsign'], ret['pydocsignout'] = getpydocsign(a, rout) + if iscomplexfunction(rout): + ret['rctype'] = """ +#ifdef F2PY_CB_RETURNCOMPLEX +#ctype# +#else +void +#endif +""" + elif hasnote(rout): + ret['note'] = rout['note'] + rout['note'] = ['See elsewhere.'] + nofargs = 0 + nofoptargs = 0 + if 'args' in rout and 'vars' in rout: + for a in rout['args']: + var = rout['vars'][a] + if l_or(isintent_in, isintent_inout)(var): + nofargs = nofargs + 1 + if isoptional(var): + nofoptargs = nofoptargs + 1 + ret['maxnofargs'] = repr(nofargs) + ret['nofoptargs'] = repr(nofoptargs) + if hasnote(rout) and isfunction(rout) and 'result' in rout: + ret['routnote'] = rout['note'] + rout['note'] = ['See elsewhere.'] + return ret + + +def common_sign2map(a, var): # obsolete + ret = {'varname': a, 'ctype': getctype(var)} + if isstringarray(var): + ret['ctype'] = 'char' + if ret['ctype'] in c2capi_map: + ret['atype'] = c2capi_map[ret['ctype']] + ret['elsize'] = get_elsize(var) + if ret['ctype'] in cformat_map: + ret['showvalueformat'] = f"{cformat_map[ret['ctype']]}" + if isarray(var): + ret = dictappend(ret, getarrdims(a, var)) + elif isstring(var): + ret['size'] = getstrlength(var) + ret['rank'] = '1' + ret['pydocsign'], ret['pydocsignout'] = getpydocsign(a, var) + if hasnote(var): + ret['note'] = var['note'] + var['note'] = ['See elsewhere.'] + # for strings this returns 0-rank but actually is 1-rank + ret['arrdocstr'] = getarrdocsign(a, var) + return ret diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/capi_maps.pyi b/.venv/lib/python3.12/site-packages/numpy/f2py/capi_maps.pyi new file mode 100644 index 0000000..9266003 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/capi_maps.pyi @@ -0,0 +1,33 @@ +from .auxfuncs import _ROut, _Var, process_f2cmap_dict + +__all__ = [ + "cb_routsign2map", + "cb_sign2map", + "common_sign2map", + "getarrdims", + "getarrdocsign", + "getctype", + "getinit", + "getpydocsign", + "getstrlength", + "modsign2map", + "process_f2cmap_dict", + "routsign2map", + "sign2map", +] + +### + +def getctype(var: _Var) -> str: ... +def f2cexpr(expr: str) -> str: ... +def getstrlength(var: _Var) -> str: ... +def getarrdims(a: str, var: _Var, verbose: int = 0) -> dict[str, str]: ... +def getpydocsign(a: str, var: _Var) -> tuple[str, str]: ... +def getarrdocsign(a: str, var: _Var) -> str: ... +def getinit(a: str, var: _Var) -> tuple[str, str]: ... +def sign2map(a: str, var: _Var) -> dict[str, str]: ... +def routsign2map(rout: _ROut) -> dict[str, str]: ... +def modsign2map(m: _ROut) -> dict[str, str]: ... +def cb_sign2map(a: str, var: _Var, index: object | None = None) -> dict[str, str]: ... +def cb_routsign2map(rout: _ROut, um: str) -> dict[str, str]: ... +def common_sign2map(a: str, var: _Var) -> dict[str, str]: ... # obsolete diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/cb_rules.py b/.venv/lib/python3.12/site-packages/numpy/f2py/cb_rules.py new file mode 100644 index 0000000..238d473 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/cb_rules.py @@ -0,0 +1,665 @@ +""" +Build call-back mechanism for f2py2e. + +Copyright 1999 -- 2011 Pearu Peterson all rights reserved. +Copyright 2011 -- present NumPy Developers. +Permission to use, modify, and distribute this software is given under the +terms of the NumPy License. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +""" +from . import __version__, cfuncs +from .auxfuncs import ( + applyrules, + debugcapi, + dictappend, + errmess, + getargs, + hasnote, + isarray, + iscomplex, + iscomplexarray, + iscomplexfunction, + isfunction, + isintent_c, + isintent_hide, + isintent_in, + isintent_inout, + isintent_nothide, + isintent_out, + isoptional, + isrequired, + isscalar, + isstring, + isstringfunction, + issubroutine, + l_and, + l_not, + l_or, + outmess, + replace, + stripcomma, + throw_error, +) + +f2py_version = __version__.version + + +################## Rules for callback function ############## + +cb_routine_rules = { + 'cbtypedefs': 'typedef #rctype#(*#name#_typedef)(#optargs_td##args_td##strarglens_td##noargs#);', + 'body': """ +#begintitle# +typedef struct { + PyObject *capi; + PyTupleObject *args_capi; + int nofargs; + jmp_buf jmpbuf; +} #name#_t; + +#if defined(F2PY_THREAD_LOCAL_DECL) && !defined(F2PY_USE_PYTHON_TLS) + +static F2PY_THREAD_LOCAL_DECL #name#_t *_active_#name# = NULL; + +static #name#_t *swap_active_#name#(#name#_t *ptr) { + #name#_t *prev = _active_#name#; + _active_#name# = ptr; + return prev; +} + +static #name#_t *get_active_#name#(void) { + return _active_#name#; +} + +#else + +static #name#_t *swap_active_#name#(#name#_t *ptr) { + char *key = "__f2py_cb_#name#"; + return (#name#_t *)F2PySwapThreadLocalCallbackPtr(key, ptr); +} + +static #name#_t *get_active_#name#(void) { + char *key = "__f2py_cb_#name#"; + return (#name#_t *)F2PyGetThreadLocalCallbackPtr(key); +} + +#endif + +/*typedef #rctype#(*#name#_typedef)(#optargs_td##args_td##strarglens_td##noargs#);*/ +#static# #rctype# #callbackname# (#optargs##args##strarglens##noargs#) { + #name#_t cb_local = { NULL, NULL, 0 }; + #name#_t *cb = NULL; + PyTupleObject *capi_arglist = NULL; + PyObject *capi_return = NULL; + PyObject *capi_tmp = NULL; + PyObject *capi_arglist_list = NULL; + int capi_j,capi_i = 0; + int capi_longjmp_ok = 1; +#decl# +#ifdef F2PY_REPORT_ATEXIT +f2py_cb_start_clock(); +#endif + cb = get_active_#name#(); + if (cb == NULL) { + capi_longjmp_ok = 0; + cb = &cb_local; + } + capi_arglist = cb->args_capi; + CFUNCSMESS(\"cb:Call-back function #name# (maxnofargs=#maxnofargs#(-#nofoptargs#))\\n\"); + CFUNCSMESSPY(\"cb:#name#_capi=\",cb->capi); + if (cb->capi==NULL) { + capi_longjmp_ok = 0; + cb->capi = PyObject_GetAttrString(#modulename#_module,\"#argname#\"); + CFUNCSMESSPY(\"cb:#name#_capi=\",cb->capi); + } + if (cb->capi==NULL) { + PyErr_SetString(#modulename#_error,\"cb: Callback #argname# not defined (as an argument or module #modulename# attribute).\\n\"); + goto capi_fail; + } + if (F2PyCapsule_Check(cb->capi)) { + #name#_typedef #name#_cptr; + #name#_cptr = F2PyCapsule_AsVoidPtr(cb->capi); + #returncptr#(*#name#_cptr)(#optargs_nm##args_nm##strarglens_nm#); + #return# + } + if (capi_arglist==NULL) { + capi_longjmp_ok = 0; + capi_tmp = PyObject_GetAttrString(#modulename#_module,\"#argname#_extra_args\"); + if (capi_tmp) { + capi_arglist = (PyTupleObject *)PySequence_Tuple(capi_tmp); + Py_DECREF(capi_tmp); + if (capi_arglist==NULL) { + PyErr_SetString(#modulename#_error,\"Failed to convert #modulename#.#argname#_extra_args to tuple.\\n\"); + goto capi_fail; + } + } else { + PyErr_Clear(); + capi_arglist = (PyTupleObject *)Py_BuildValue(\"()\"); + } + } + if (capi_arglist == NULL) { + PyErr_SetString(#modulename#_error,\"Callback #argname# argument list is not set.\\n\"); + goto capi_fail; + } +#setdims# +#ifdef PYPY_VERSION +#define CAPI_ARGLIST_SETITEM(idx, value) PyList_SetItem((PyObject *)capi_arglist_list, idx, value) + capi_arglist_list = PySequence_List((PyObject *)capi_arglist); + if (capi_arglist_list == NULL) goto capi_fail; +#else +#define CAPI_ARGLIST_SETITEM(idx, value) PyTuple_SetItem((PyObject *)capi_arglist, idx, value) +#endif +#pyobjfrom# +#undef CAPI_ARGLIST_SETITEM +#ifdef PYPY_VERSION + CFUNCSMESSPY(\"cb:capi_arglist=\",capi_arglist_list); +#else + CFUNCSMESSPY(\"cb:capi_arglist=\",capi_arglist); +#endif + CFUNCSMESS(\"cb:Call-back calling Python function #argname#.\\n\"); +#ifdef F2PY_REPORT_ATEXIT +f2py_cb_start_call_clock(); +#endif +#ifdef PYPY_VERSION + capi_return = PyObject_CallObject(cb->capi,(PyObject *)capi_arglist_list); + Py_DECREF(capi_arglist_list); + capi_arglist_list = NULL; +#else + capi_return = PyObject_CallObject(cb->capi,(PyObject *)capi_arglist); +#endif +#ifdef F2PY_REPORT_ATEXIT +f2py_cb_stop_call_clock(); +#endif + CFUNCSMESSPY(\"cb:capi_return=\",capi_return); + if (capi_return == NULL) { + fprintf(stderr,\"capi_return is NULL\\n\"); + goto capi_fail; + } + if (capi_return == Py_None) { + Py_DECREF(capi_return); + capi_return = Py_BuildValue(\"()\"); + } + else if (!PyTuple_Check(capi_return)) { + capi_return = Py_BuildValue(\"(N)\",capi_return); + } + capi_j = PyTuple_Size(capi_return); + capi_i = 0; +#frompyobj# + CFUNCSMESS(\"cb:#name#:successful\\n\"); + Py_DECREF(capi_return); +#ifdef F2PY_REPORT_ATEXIT +f2py_cb_stop_clock(); +#endif + goto capi_return_pt; +capi_fail: + fprintf(stderr,\"Call-back #name# failed.\\n\"); + Py_XDECREF(capi_return); + Py_XDECREF(capi_arglist_list); + if (capi_longjmp_ok) { + longjmp(cb->jmpbuf,-1); + } +capi_return_pt: + ; +#return# +} +#endtitle# +""", + 'need': ['setjmp.h', 'CFUNCSMESS', 'F2PY_THREAD_LOCAL_DECL'], + 'maxnofargs': '#maxnofargs#', + 'nofoptargs': '#nofoptargs#', + 'docstr': """\ + def #argname#(#docsignature#): return #docreturn#\\n\\ +#docstrsigns#""", + 'latexdocstr': """ +{{}\\verb@def #argname#(#latexdocsignature#): return #docreturn#@{}} +#routnote# + +#latexdocstrsigns#""", + 'docstrshort': 'def #argname#(#docsignature#): return #docreturn#' +} +cb_rout_rules = [ + { # Init + 'separatorsfor': {'decl': '\n', + 'args': ',', 'optargs': '', 'pyobjfrom': '\n', 'freemem': '\n', + 'args_td': ',', 'optargs_td': '', + 'args_nm': ',', 'optargs_nm': '', + 'frompyobj': '\n', 'setdims': '\n', + 'docstrsigns': '\\n"\n"', + 'latexdocstrsigns': '\n', + 'latexdocstrreq': '\n', 'latexdocstropt': '\n', + 'latexdocstrout': '\n', 'latexdocstrcbs': '\n', + }, + 'decl': '/*decl*/', 'pyobjfrom': '/*pyobjfrom*/', 'frompyobj': '/*frompyobj*/', + 'args': [], 'optargs': '', 'return': '', 'strarglens': '', 'freemem': '/*freemem*/', + 'args_td': [], 'optargs_td': '', 'strarglens_td': '', + 'args_nm': [], 'optargs_nm': '', 'strarglens_nm': '', + 'noargs': '', + 'setdims': '/*setdims*/', + 'docstrsigns': '', 'latexdocstrsigns': '', + 'docstrreq': ' Required arguments:', + 'docstropt': ' Optional arguments:', + 'docstrout': ' Return objects:', + 'docstrcbs': ' Call-back functions:', + 'docreturn': '', 'docsign': '', 'docsignopt': '', + 'latexdocstrreq': '\\noindent Required arguments:', + 'latexdocstropt': '\\noindent Optional arguments:', + 'latexdocstrout': '\\noindent Return objects:', + 'latexdocstrcbs': '\\noindent Call-back functions:', + 'routnote': {hasnote: '--- #note#', l_not(hasnote): ''}, + }, { # Function + 'decl': ' #ctype# return_value = 0;', + 'frompyobj': [ + {debugcapi: ' CFUNCSMESS("cb:Getting return_value->");'}, + '''\ + if (capi_j>capi_i) { + GETSCALARFROMPYTUPLE(capi_return,capi_i++,&return_value,#ctype#, + "#ctype#_from_pyobj failed in converting return_value of" + " call-back function #name# to C #ctype#\\n"); + } else { + fprintf(stderr,"Warning: call-back function #name# did not provide" + " return value (index=%d, type=#ctype#)\\n",capi_i); + }''', + {debugcapi: + ' fprintf(stderr,"#showvalueformat#.\\n",return_value);'} + ], + 'need': ['#ctype#_from_pyobj', {debugcapi: 'CFUNCSMESS'}, 'GETSCALARFROMPYTUPLE'], + 'return': ' return return_value;', + '_check': l_and(isfunction, l_not(isstringfunction), l_not(iscomplexfunction)) + }, + { # String function + 'pyobjfrom': {debugcapi: ' fprintf(stderr,"debug-capi:cb:#name#:%d:\\n",return_value_len);'}, + 'args': '#ctype# return_value,int return_value_len', + 'args_nm': 'return_value,&return_value_len', + 'args_td': '#ctype# ,int', + 'frompyobj': [ + {debugcapi: ' CFUNCSMESS("cb:Getting return_value->\\"");'}, + """\ + if (capi_j>capi_i) { + GETSTRFROMPYTUPLE(capi_return,capi_i++,return_value,return_value_len); + } else { + fprintf(stderr,"Warning: call-back function #name# did not provide" + " return value (index=%d, type=#ctype#)\\n",capi_i); + }""", + {debugcapi: + ' fprintf(stderr,"#showvalueformat#\\".\\n",return_value);'} + ], + 'need': ['#ctype#_from_pyobj', {debugcapi: 'CFUNCSMESS'}, + 'string.h', 'GETSTRFROMPYTUPLE'], + 'return': 'return;', + '_check': isstringfunction + }, + { # Complex function + 'optargs': """ +#ifndef F2PY_CB_RETURNCOMPLEX +#ctype# *return_value +#endif +""", + 'optargs_nm': """ +#ifndef F2PY_CB_RETURNCOMPLEX +return_value +#endif +""", + 'optargs_td': """ +#ifndef F2PY_CB_RETURNCOMPLEX +#ctype# * +#endif +""", + 'decl': """ +#ifdef F2PY_CB_RETURNCOMPLEX + #ctype# return_value = {0, 0}; +#endif +""", + 'frompyobj': [ + {debugcapi: ' CFUNCSMESS("cb:Getting return_value->");'}, + """\ + if (capi_j>capi_i) { +#ifdef F2PY_CB_RETURNCOMPLEX + GETSCALARFROMPYTUPLE(capi_return,capi_i++,&return_value,#ctype#, + \"#ctype#_from_pyobj failed in converting return_value of call-back\" + \" function #name# to C #ctype#\\n\"); +#else + GETSCALARFROMPYTUPLE(capi_return,capi_i++,return_value,#ctype#, + \"#ctype#_from_pyobj failed in converting return_value of call-back\" + \" function #name# to C #ctype#\\n\"); +#endif + } else { + fprintf(stderr, + \"Warning: call-back function #name# did not provide\" + \" return value (index=%d, type=#ctype#)\\n\",capi_i); + }""", + {debugcapi: """\ +#ifdef F2PY_CB_RETURNCOMPLEX + fprintf(stderr,\"#showvalueformat#.\\n\",(return_value).r,(return_value).i); +#else + fprintf(stderr,\"#showvalueformat#.\\n\",(*return_value).r,(*return_value).i); +#endif +"""} + ], + 'return': """ +#ifdef F2PY_CB_RETURNCOMPLEX + return return_value; +#else + return; +#endif +""", + 'need': ['#ctype#_from_pyobj', {debugcapi: 'CFUNCSMESS'}, + 'string.h', 'GETSCALARFROMPYTUPLE', '#ctype#'], + '_check': iscomplexfunction + }, + {'docstrout': ' #pydocsignout#', + 'latexdocstrout': ['\\item[]{{}\\verb@#pydocsignout#@{}}', + {hasnote: '--- #note#'}], + 'docreturn': '#rname#,', + '_check': isfunction}, + {'_check': issubroutine, 'return': 'return;'} +] + +cb_arg_rules = [ + { # Doc + 'docstropt': {l_and(isoptional, isintent_nothide): ' #pydocsign#'}, + 'docstrreq': {l_and(isrequired, isintent_nothide): ' #pydocsign#'}, + 'docstrout': {isintent_out: ' #pydocsignout#'}, + 'latexdocstropt': {l_and(isoptional, isintent_nothide): ['\\item[]{{}\\verb@#pydocsign#@{}}', + {hasnote: '--- #note#'}]}, + 'latexdocstrreq': {l_and(isrequired, isintent_nothide): ['\\item[]{{}\\verb@#pydocsign#@{}}', + {hasnote: '--- #note#'}]}, + 'latexdocstrout': {isintent_out: ['\\item[]{{}\\verb@#pydocsignout#@{}}', + {l_and(hasnote, isintent_hide): '--- #note#', + l_and(hasnote, isintent_nothide): '--- See above.'}]}, + 'docsign': {l_and(isrequired, isintent_nothide): '#varname#,'}, + 'docsignopt': {l_and(isoptional, isintent_nothide): '#varname#,'}, + 'depend': '' + }, + { + 'args': { + l_and(isscalar, isintent_c): '#ctype# #varname_i#', + l_and(isscalar, l_not(isintent_c)): '#ctype# *#varname_i#_cb_capi', + isarray: '#ctype# *#varname_i#', + isstring: '#ctype# #varname_i#' + }, + 'args_nm': { + l_and(isscalar, isintent_c): '#varname_i#', + l_and(isscalar, l_not(isintent_c)): '#varname_i#_cb_capi', + isarray: '#varname_i#', + isstring: '#varname_i#' + }, + 'args_td': { + l_and(isscalar, isintent_c): '#ctype#', + l_and(isscalar, l_not(isintent_c)): '#ctype# *', + isarray: '#ctype# *', + isstring: '#ctype#' + }, + 'need': {l_or(isscalar, isarray, isstring): '#ctype#'}, + # untested with multiple args + 'strarglens': {isstring: ',int #varname_i#_cb_len'}, + 'strarglens_td': {isstring: ',int'}, # untested with multiple args + # untested with multiple args + 'strarglens_nm': {isstring: ',#varname_i#_cb_len'}, + }, + { # Scalars + 'decl': {l_not(isintent_c): ' #ctype# #varname_i#=(*#varname_i#_cb_capi);'}, + 'error': {l_and(isintent_c, isintent_out, + throw_error('intent(c,out) is forbidden for callback scalar arguments')): + ''}, + 'frompyobj': [{debugcapi: ' CFUNCSMESS("cb:Getting #varname#->");'}, + {isintent_out: + ' if (capi_j>capi_i)\n GETSCALARFROMPYTUPLE(capi_return,capi_i++,#varname_i#_cb_capi,#ctype#,"#ctype#_from_pyobj failed in converting argument #varname# of call-back function #name# to C #ctype#\\n");'}, + {l_and(debugcapi, l_and(l_not(iscomplex), isintent_c)): + ' fprintf(stderr,"#showvalueformat#.\\n",#varname_i#);'}, + {l_and(debugcapi, l_and(l_not(iscomplex), l_not(isintent_c))): + ' fprintf(stderr,"#showvalueformat#.\\n",*#varname_i#_cb_capi);'}, + {l_and(debugcapi, l_and(iscomplex, isintent_c)): + ' fprintf(stderr,"#showvalueformat#.\\n",(#varname_i#).r,(#varname_i#).i);'}, + {l_and(debugcapi, l_and(iscomplex, l_not(isintent_c))): + ' fprintf(stderr,"#showvalueformat#.\\n",(*#varname_i#_cb_capi).r,(*#varname_i#_cb_capi).i);'}, + ], + 'need': [{isintent_out: ['#ctype#_from_pyobj', 'GETSCALARFROMPYTUPLE']}, + {debugcapi: 'CFUNCSMESS'}], + '_check': isscalar + }, { + 'pyobjfrom': [{isintent_in: """\ + if (cb->nofargs>capi_i) + if (CAPI_ARGLIST_SETITEM(capi_i++,pyobj_from_#ctype#1(#varname_i#))) + goto capi_fail;"""}, + {isintent_inout: """\ + if (cb->nofargs>capi_i) + if (CAPI_ARGLIST_SETITEM(capi_i++,pyarr_from_p_#ctype#1(#varname_i#_cb_capi))) + goto capi_fail;"""}], + 'need': [{isintent_in: 'pyobj_from_#ctype#1'}, + {isintent_inout: 'pyarr_from_p_#ctype#1'}, + {iscomplex: '#ctype#'}], + '_check': l_and(isscalar, isintent_nothide), + '_optional': '' + }, { # String + 'frompyobj': [{debugcapi: ' CFUNCSMESS("cb:Getting #varname#->\\"");'}, + """ if (capi_j>capi_i) + GETSTRFROMPYTUPLE(capi_return,capi_i++,#varname_i#,#varname_i#_cb_len);""", + {debugcapi: + ' fprintf(stderr,"#showvalueformat#\\":%d:.\\n",#varname_i#,#varname_i#_cb_len);'}, + ], + 'need': ['#ctype#', 'GETSTRFROMPYTUPLE', + {debugcapi: 'CFUNCSMESS'}, 'string.h'], + '_check': l_and(isstring, isintent_out) + }, { + 'pyobjfrom': [ + {debugcapi: + (' fprintf(stderr,"debug-capi:cb:#varname#=#showvalueformat#:' + '%d:\\n",#varname_i#,#varname_i#_cb_len);')}, + {isintent_in: """\ + if (cb->nofargs>capi_i) + if (CAPI_ARGLIST_SETITEM(capi_i++,pyobj_from_#ctype#1size(#varname_i#,#varname_i#_cb_len))) + goto capi_fail;"""}, + {isintent_inout: """\ + if (cb->nofargs>capi_i) { + int #varname_i#_cb_dims[] = {#varname_i#_cb_len}; + if (CAPI_ARGLIST_SETITEM(capi_i++,pyarr_from_p_#ctype#1(#varname_i#,#varname_i#_cb_dims))) + goto capi_fail; + }"""}], + 'need': [{isintent_in: 'pyobj_from_#ctype#1size'}, + {isintent_inout: 'pyarr_from_p_#ctype#1'}], + '_check': l_and(isstring, isintent_nothide), + '_optional': '' + }, + # Array ... + { + 'decl': ' npy_intp #varname_i#_Dims[#rank#] = {#rank*[-1]#};', + 'setdims': ' #cbsetdims#;', + '_check': isarray, + '_depend': '' + }, + { + 'pyobjfrom': [{debugcapi: ' fprintf(stderr,"debug-capi:cb:#varname#\\n");'}, + {isintent_c: """\ + if (cb->nofargs>capi_i) { + /* tmp_arr will be inserted to capi_arglist_list that will be + destroyed when leaving callback function wrapper together + with tmp_arr. */ + PyArrayObject *tmp_arr = (PyArrayObject *)PyArray_New(&PyArray_Type, + #rank#,#varname_i#_Dims,#atype#,NULL,(char*)#varname_i#,#elsize#, + NPY_ARRAY_CARRAY,NULL); +""", + l_not(isintent_c): """\ + if (cb->nofargs>capi_i) { + /* tmp_arr will be inserted to capi_arglist_list that will be + destroyed when leaving callback function wrapper together + with tmp_arr. */ + PyArrayObject *tmp_arr = (PyArrayObject *)PyArray_New(&PyArray_Type, + #rank#,#varname_i#_Dims,#atype#,NULL,(char*)#varname_i#,#elsize#, + NPY_ARRAY_FARRAY,NULL); +""", + }, + """ + if (tmp_arr==NULL) + goto capi_fail; + if (CAPI_ARGLIST_SETITEM(capi_i++,(PyObject *)tmp_arr)) + goto capi_fail; +}"""], + '_check': l_and(isarray, isintent_nothide, l_or(isintent_in, isintent_inout)), + '_optional': '', + }, { + 'frompyobj': [{debugcapi: ' CFUNCSMESS("cb:Getting #varname#->");'}, + """ if (capi_j>capi_i) { + PyArrayObject *rv_cb_arr = NULL; + if ((capi_tmp = PyTuple_GetItem(capi_return,capi_i++))==NULL) goto capi_fail; + rv_cb_arr = array_from_pyobj(#atype#,#varname_i#_Dims,#rank#,F2PY_INTENT_IN""", + {isintent_c: '|F2PY_INTENT_C'}, + """,capi_tmp); + if (rv_cb_arr == NULL) { + fprintf(stderr,\"rv_cb_arr is NULL\\n\"); + goto capi_fail; + } + MEMCOPY(#varname_i#,PyArray_DATA(rv_cb_arr),PyArray_NBYTES(rv_cb_arr)); + if (capi_tmp != (PyObject *)rv_cb_arr) { + Py_DECREF(rv_cb_arr); + } + }""", + {debugcapi: ' fprintf(stderr,"<-.\\n");'}, + ], + 'need': ['MEMCOPY', {iscomplexarray: '#ctype#'}], + '_check': l_and(isarray, isintent_out) + }, { + 'docreturn': '#varname#,', + '_check': isintent_out + } +] + +################## Build call-back module ############# +cb_map = {} + + +def buildcallbacks(m): + cb_map[m['name']] = [] + for bi in m['body']: + if bi['block'] == 'interface': + for b in bi['body']: + if b: + buildcallback(b, m['name']) + else: + errmess(f"warning: empty body for {m['name']}\n") + + +def buildcallback(rout, um): + from . import capi_maps + + outmess(f" Constructing call-back function \"cb_{rout['name']}_in_{um}\"\n") + args, depargs = getargs(rout) + capi_maps.depargs = depargs + var = rout['vars'] + vrd = capi_maps.cb_routsign2map(rout, um) + rd = dictappend({}, vrd) + cb_map[um].append([rout['name'], rd['name']]) + for r in cb_rout_rules: + if ('_check' in r and r['_check'](rout)) or ('_check' not in r): + ar = applyrules(r, vrd, rout) + rd = dictappend(rd, ar) + savevrd = {} + for i, a in enumerate(args): + vrd = capi_maps.cb_sign2map(a, var[a], index=i) + savevrd[a] = vrd + for r in cb_arg_rules: + if '_depend' in r: + continue + if '_optional' in r and isoptional(var[a]): + continue + if ('_check' in r and r['_check'](var[a])) or ('_check' not in r): + ar = applyrules(r, vrd, var[a]) + rd = dictappend(rd, ar) + if '_break' in r: + break + for a in args: + vrd = savevrd[a] + for r in cb_arg_rules: + if '_depend' in r: + continue + if ('_optional' not in r) or ('_optional' in r and isrequired(var[a])): + continue + if ('_check' in r and r['_check'](var[a])) or ('_check' not in r): + ar = applyrules(r, vrd, var[a]) + rd = dictappend(rd, ar) + if '_break' in r: + break + for a in depargs: + vrd = savevrd[a] + for r in cb_arg_rules: + if '_depend' not in r: + continue + if '_optional' in r: + continue + if ('_check' in r and r['_check'](var[a])) or ('_check' not in r): + ar = applyrules(r, vrd, var[a]) + rd = dictappend(rd, ar) + if '_break' in r: + break + if 'args' in rd and 'optargs' in rd: + if isinstance(rd['optargs'], list): + rd['optargs'] = rd['optargs'] + [""" +#ifndef F2PY_CB_RETURNCOMPLEX +, +#endif +"""] + rd['optargs_nm'] = rd['optargs_nm'] + [""" +#ifndef F2PY_CB_RETURNCOMPLEX +, +#endif +"""] + rd['optargs_td'] = rd['optargs_td'] + [""" +#ifndef F2PY_CB_RETURNCOMPLEX +, +#endif +"""] + if isinstance(rd['docreturn'], list): + rd['docreturn'] = stripcomma( + replace('#docreturn#', {'docreturn': rd['docreturn']})) + optargs = stripcomma(replace('#docsignopt#', + {'docsignopt': rd['docsignopt']} + )) + if optargs == '': + rd['docsignature'] = stripcomma( + replace('#docsign#', {'docsign': rd['docsign']})) + else: + rd['docsignature'] = replace('#docsign#[#docsignopt#]', + {'docsign': rd['docsign'], + 'docsignopt': optargs, + }) + rd['latexdocsignature'] = rd['docsignature'].replace('_', '\\_') + rd['latexdocsignature'] = rd['latexdocsignature'].replace(',', ', ') + rd['docstrsigns'] = [] + rd['latexdocstrsigns'] = [] + for k in ['docstrreq', 'docstropt', 'docstrout', 'docstrcbs']: + if k in rd and isinstance(rd[k], list): + rd['docstrsigns'] = rd['docstrsigns'] + rd[k] + k = 'latex' + k + if k in rd and isinstance(rd[k], list): + rd['latexdocstrsigns'] = rd['latexdocstrsigns'] + rd[k][0:1] +\ + ['\\begin{description}'] + rd[k][1:] +\ + ['\\end{description}'] + if 'args' not in rd: + rd['args'] = '' + rd['args_td'] = '' + rd['args_nm'] = '' + if not (rd.get('args') or rd.get('optargs') or rd.get('strarglens')): + rd['noargs'] = 'void' + + ar = applyrules(cb_routine_rules, rd) + cfuncs.callbacks[rd['name']] = ar['body'] + if isinstance(ar['need'], str): + ar['need'] = [ar['need']] + + if 'need' in rd: + for t in cfuncs.typedefs.keys(): + if t in rd['need']: + ar['need'].append(t) + + cfuncs.typedefs_generated[rd['name'] + '_typedef'] = ar['cbtypedefs'] + ar['need'].append(rd['name'] + '_typedef') + cfuncs.needs[rd['name']] = ar['need'] + + capi_maps.lcb2_map[rd['name']] = {'maxnofargs': ar['maxnofargs'], + 'nofoptargs': ar['nofoptargs'], + 'docstr': ar['docstr'], + 'latexdocstr': ar['latexdocstr'], + 'argname': rd['argname'] + } + outmess(f" {ar['docstrshort']}\n") +################## Build call-back function ############# diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/cb_rules.pyi b/.venv/lib/python3.12/site-packages/numpy/f2py/cb_rules.pyi new file mode 100644 index 0000000..b22f544 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/cb_rules.pyi @@ -0,0 +1,17 @@ +from collections.abc import Mapping +from typing import Any, Final + +from .__version__ import version + +## + +f2py_version: Final = version + +cb_routine_rules: Final[dict[str, str | list[str]]] = ... +cb_rout_rules: Final[list[dict[str, str | Any]]] = ... +cb_arg_rules: Final[list[dict[str, str | Any]]] = ... + +cb_map: Final[dict[str, list[list[str]]]] = ... + +def buildcallbacks(m: Mapping[str, object]) -> None: ... +def buildcallback(rout: Mapping[str, object], um: Mapping[str, object]) -> None: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/cfuncs.py b/.venv/lib/python3.12/site-packages/numpy/f2py/cfuncs.py new file mode 100644 index 0000000..b2b1cad --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/cfuncs.py @@ -0,0 +1,1563 @@ +""" +C declarations, CPP macros, and C functions for f2py2e. +Only required declarations/macros/functions will be used. + +Copyright 1999 -- 2011 Pearu Peterson all rights reserved. +Copyright 2011 -- present NumPy Developers. +Permission to use, modify, and distribute this software is given under the +terms of the NumPy License. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +""" +import copy +import sys + +from . import __version__ + +f2py_version = __version__.version + + +def errmess(s: str) -> None: + """ + Write an error message to stderr. + + This indirection is needed because sys.stderr might not always be available (see #26862). + """ + if sys.stderr is not None: + sys.stderr.write(s) + +##################### Definitions ################## + + +outneeds = {'includes0': [], 'includes': [], 'typedefs': [], 'typedefs_generated': [], + 'userincludes': [], + 'cppmacros': [], 'cfuncs': [], 'callbacks': [], 'f90modhooks': [], + 'commonhooks': []} +needs = {} +includes0 = {'includes0': '/*need_includes0*/'} +includes = {'includes': '/*need_includes*/'} +userincludes = {'userincludes': '/*need_userincludes*/'} +typedefs = {'typedefs': '/*need_typedefs*/'} +typedefs_generated = {'typedefs_generated': '/*need_typedefs_generated*/'} +cppmacros = {'cppmacros': '/*need_cppmacros*/'} +cfuncs = {'cfuncs': '/*need_cfuncs*/'} +callbacks = {'callbacks': '/*need_callbacks*/'} +f90modhooks = {'f90modhooks': '/*need_f90modhooks*/', + 'initf90modhooksstatic': '/*initf90modhooksstatic*/', + 'initf90modhooksdynamic': '/*initf90modhooksdynamic*/', + } +commonhooks = {'commonhooks': '/*need_commonhooks*/', + 'initcommonhooks': '/*need_initcommonhooks*/', + } + +############ Includes ################### + +includes0['math.h'] = '#include ' +includes0['string.h'] = '#include ' +includes0['setjmp.h'] = '#include ' + +includes['arrayobject.h'] = '''#define PY_ARRAY_UNIQUE_SYMBOL PyArray_API +#include "arrayobject.h"''' +includes['npy_math.h'] = '#include "numpy/npy_math.h"' + +includes['arrayobject.h'] = '#include "fortranobject.h"' +includes['stdarg.h'] = '#include ' + +############# Type definitions ############### + +typedefs['unsigned_char'] = 'typedef unsigned char unsigned_char;' +typedefs['unsigned_short'] = 'typedef unsigned short unsigned_short;' +typedefs['unsigned_long'] = 'typedef unsigned long unsigned_long;' +typedefs['signed_char'] = 'typedef signed char signed_char;' +typedefs['long_long'] = """ +#if defined(NPY_OS_WIN32) +typedef __int64 long_long; +#else +typedef long long long_long; +typedef unsigned long long unsigned_long_long; +#endif +""" +typedefs['unsigned_long_long'] = """ +#if defined(NPY_OS_WIN32) +typedef __uint64 long_long; +#else +typedef unsigned long long unsigned_long_long; +#endif +""" +typedefs['long_double'] = """ +#ifndef _LONG_DOUBLE +typedef long double long_double; +#endif +""" +typedefs[ + 'complex_long_double'] = 'typedef struct {long double r,i;} complex_long_double;' +typedefs['complex_float'] = 'typedef struct {float r,i;} complex_float;' +typedefs['complex_double'] = 'typedef struct {double r,i;} complex_double;' +typedefs['string'] = """typedef char * string;""" +typedefs['character'] = """typedef char character;""" + + +############### CPP macros #################### +cppmacros['CFUNCSMESS'] = """ +#ifdef DEBUGCFUNCS +#define CFUNCSMESS(mess) fprintf(stderr,\"debug-capi:\"mess); +#define CFUNCSMESSPY(mess,obj) CFUNCSMESS(mess) \\ + PyObject_Print((PyObject *)obj,stderr,Py_PRINT_RAW);\\ + fprintf(stderr,\"\\n\"); +#else +#define CFUNCSMESS(mess) +#define CFUNCSMESSPY(mess,obj) +#endif +""" +cppmacros['F_FUNC'] = """ +#if defined(PREPEND_FORTRAN) +#if defined(NO_APPEND_FORTRAN) +#if defined(UPPERCASE_FORTRAN) +#define F_FUNC(f,F) _##F +#else +#define F_FUNC(f,F) _##f +#endif +#else +#if defined(UPPERCASE_FORTRAN) +#define F_FUNC(f,F) _##F##_ +#else +#define F_FUNC(f,F) _##f##_ +#endif +#endif +#else +#if defined(NO_APPEND_FORTRAN) +#if defined(UPPERCASE_FORTRAN) +#define F_FUNC(f,F) F +#else +#define F_FUNC(f,F) f +#endif +#else +#if defined(UPPERCASE_FORTRAN) +#define F_FUNC(f,F) F##_ +#else +#define F_FUNC(f,F) f##_ +#endif +#endif +#endif +#if defined(UNDERSCORE_G77) +#define F_FUNC_US(f,F) F_FUNC(f##_,F##_) +#else +#define F_FUNC_US(f,F) F_FUNC(f,F) +#endif +""" +cppmacros['F_WRAPPEDFUNC'] = """ +#if defined(PREPEND_FORTRAN) +#if defined(NO_APPEND_FORTRAN) +#if defined(UPPERCASE_FORTRAN) +#define F_WRAPPEDFUNC(f,F) _F2PYWRAP##F +#else +#define F_WRAPPEDFUNC(f,F) _f2pywrap##f +#endif +#else +#if defined(UPPERCASE_FORTRAN) +#define F_WRAPPEDFUNC(f,F) _F2PYWRAP##F##_ +#else +#define F_WRAPPEDFUNC(f,F) _f2pywrap##f##_ +#endif +#endif +#else +#if defined(NO_APPEND_FORTRAN) +#if defined(UPPERCASE_FORTRAN) +#define F_WRAPPEDFUNC(f,F) F2PYWRAP##F +#else +#define F_WRAPPEDFUNC(f,F) f2pywrap##f +#endif +#else +#if defined(UPPERCASE_FORTRAN) +#define F_WRAPPEDFUNC(f,F) F2PYWRAP##F##_ +#else +#define F_WRAPPEDFUNC(f,F) f2pywrap##f##_ +#endif +#endif +#endif +#if defined(UNDERSCORE_G77) +#define F_WRAPPEDFUNC_US(f,F) F_WRAPPEDFUNC(f##_,F##_) +#else +#define F_WRAPPEDFUNC_US(f,F) F_WRAPPEDFUNC(f,F) +#endif +""" +cppmacros['F_MODFUNC'] = """ +#if defined(F90MOD2CCONV1) /*E.g. Compaq Fortran */ +#if defined(NO_APPEND_FORTRAN) +#define F_MODFUNCNAME(m,f) $ ## m ## $ ## f +#else +#define F_MODFUNCNAME(m,f) $ ## m ## $ ## f ## _ +#endif +#endif + +#if defined(F90MOD2CCONV2) /*E.g. IBM XL Fortran, not tested though */ +#if defined(NO_APPEND_FORTRAN) +#define F_MODFUNCNAME(m,f) __ ## m ## _MOD_ ## f +#else +#define F_MODFUNCNAME(m,f) __ ## m ## _MOD_ ## f ## _ +#endif +#endif + +#if defined(F90MOD2CCONV3) /*E.g. MIPSPro Compilers */ +#if defined(NO_APPEND_FORTRAN) +#define F_MODFUNCNAME(m,f) f ## .in. ## m +#else +#define F_MODFUNCNAME(m,f) f ## .in. ## m ## _ +#endif +#endif +/* +#if defined(UPPERCASE_FORTRAN) +#define F_MODFUNC(m,M,f,F) F_MODFUNCNAME(M,F) +#else +#define F_MODFUNC(m,M,f,F) F_MODFUNCNAME(m,f) +#endif +*/ + +#define F_MODFUNC(m,f) (*(f2pymodstruct##m##.##f)) +""" +cppmacros['SWAPUNSAFE'] = """ +#define SWAP(a,b) (size_t)(a) = ((size_t)(a) ^ (size_t)(b));\\ + (size_t)(b) = ((size_t)(a) ^ (size_t)(b));\\ + (size_t)(a) = ((size_t)(a) ^ (size_t)(b)) +""" +cppmacros['SWAP'] = """ +#define SWAP(a,b,t) {\\ + t *c;\\ + c = a;\\ + a = b;\\ + b = c;} +""" +# cppmacros['ISCONTIGUOUS']='#define ISCONTIGUOUS(m) (PyArray_FLAGS(m) & +# NPY_ARRAY_C_CONTIGUOUS)' +cppmacros['PRINTPYOBJERR'] = """ +#define PRINTPYOBJERR(obj)\\ + fprintf(stderr,\"#modulename#.error is related to \");\\ + PyObject_Print((PyObject *)obj,stderr,Py_PRINT_RAW);\\ + fprintf(stderr,\"\\n\"); +""" +cppmacros['MINMAX'] = """ +#ifndef max +#define max(a,b) ((a > b) ? (a) : (b)) +#endif +#ifndef min +#define min(a,b) ((a < b) ? (a) : (b)) +#endif +#ifndef MAX +#define MAX(a,b) ((a > b) ? (a) : (b)) +#endif +#ifndef MIN +#define MIN(a,b) ((a < b) ? (a) : (b)) +#endif +""" +cppmacros['len..'] = """ +/* See fortranobject.h for definitions. The macros here are provided for BC. */ +#define rank f2py_rank +#define shape f2py_shape +#define fshape f2py_shape +#define len f2py_len +#define flen f2py_flen +#define slen f2py_slen +#define size f2py_size +""" +cppmacros['pyobj_from_char1'] = r""" +#define pyobj_from_char1(v) (PyLong_FromLong(v)) +""" +cppmacros['pyobj_from_short1'] = r""" +#define pyobj_from_short1(v) (PyLong_FromLong(v)) +""" +needs['pyobj_from_int1'] = ['signed_char'] +cppmacros['pyobj_from_int1'] = r""" +#define pyobj_from_int1(v) (PyLong_FromLong(v)) +""" +cppmacros['pyobj_from_long1'] = r""" +#define pyobj_from_long1(v) (PyLong_FromLong(v)) +""" +needs['pyobj_from_long_long1'] = ['long_long'] +cppmacros['pyobj_from_long_long1'] = """ +#ifdef HAVE_LONG_LONG +#define pyobj_from_long_long1(v) (PyLong_FromLongLong(v)) +#else +#warning HAVE_LONG_LONG is not available. Redefining pyobj_from_long_long. +#define pyobj_from_long_long1(v) (PyLong_FromLong(v)) +#endif +""" +needs['pyobj_from_long_double1'] = ['long_double'] +cppmacros['pyobj_from_long_double1'] = """ +#define pyobj_from_long_double1(v) (PyFloat_FromDouble(v))""" +cppmacros['pyobj_from_double1'] = """ +#define pyobj_from_double1(v) (PyFloat_FromDouble(v))""" +cppmacros['pyobj_from_float1'] = """ +#define pyobj_from_float1(v) (PyFloat_FromDouble(v))""" +needs['pyobj_from_complex_long_double1'] = ['complex_long_double'] +cppmacros['pyobj_from_complex_long_double1'] = """ +#define pyobj_from_complex_long_double1(v) (PyComplex_FromDoubles(v.r,v.i))""" +needs['pyobj_from_complex_double1'] = ['complex_double'] +cppmacros['pyobj_from_complex_double1'] = """ +#define pyobj_from_complex_double1(v) (PyComplex_FromDoubles(v.r,v.i))""" +needs['pyobj_from_complex_float1'] = ['complex_float'] +cppmacros['pyobj_from_complex_float1'] = """ +#define pyobj_from_complex_float1(v) (PyComplex_FromDoubles(v.r,v.i))""" +needs['pyobj_from_string1'] = ['string'] +cppmacros['pyobj_from_string1'] = """ +#define pyobj_from_string1(v) (PyUnicode_FromString((char *)v))""" +needs['pyobj_from_string1size'] = ['string'] +cppmacros['pyobj_from_string1size'] = """ +#define pyobj_from_string1size(v,len) (PyUnicode_FromStringAndSize((char *)v, len))""" +needs['TRYPYARRAYTEMPLATE'] = ['PRINTPYOBJERR'] +cppmacros['TRYPYARRAYTEMPLATE'] = """ +/* New SciPy */ +#define TRYPYARRAYTEMPLATECHAR case NPY_STRING: *(char *)(PyArray_DATA(arr))=*v; break; +#define TRYPYARRAYTEMPLATELONG case NPY_LONG: *(long *)(PyArray_DATA(arr))=*v; break; +#define TRYPYARRAYTEMPLATEOBJECT case NPY_OBJECT: PyArray_SETITEM(arr,PyArray_DATA(arr),pyobj_from_ ## ctype ## 1(*v)); break; + +#define TRYPYARRAYTEMPLATE(ctype,typecode) \\ + PyArrayObject *arr = NULL;\\ + if (!obj) return -2;\\ + if (!PyArray_Check(obj)) return -1;\\ + if (!(arr=(PyArrayObject *)obj)) {fprintf(stderr,\"TRYPYARRAYTEMPLATE:\");PRINTPYOBJERR(obj);return 0;}\\ + if (PyArray_DESCR(arr)->type==typecode) {*(ctype *)(PyArray_DATA(arr))=*v; return 1;}\\ + switch (PyArray_TYPE(arr)) {\\ + case NPY_DOUBLE: *(npy_double *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_INT: *(npy_int *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_LONG: *(npy_long *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_FLOAT: *(npy_float *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_CDOUBLE: *(npy_double *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_CFLOAT: *(npy_float *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_BOOL: *(npy_bool *)(PyArray_DATA(arr))=(*v!=0); break;\\ + case NPY_UBYTE: *(npy_ubyte *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_BYTE: *(npy_byte *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_SHORT: *(npy_short *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_USHORT: *(npy_ushort *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_UINT: *(npy_uint *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_ULONG: *(npy_ulong *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_LONGLONG: *(npy_longlong *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_ULONGLONG: *(npy_ulonglong *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_LONGDOUBLE: *(npy_longdouble *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_CLONGDOUBLE: *(npy_longdouble *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_OBJECT: PyArray_SETITEM(arr, PyArray_DATA(arr), pyobj_from_ ## ctype ## 1(*v)); break;\\ + default: return -2;\\ + };\\ + return 1 +""" + +needs['TRYCOMPLEXPYARRAYTEMPLATE'] = ['PRINTPYOBJERR'] +cppmacros['TRYCOMPLEXPYARRAYTEMPLATE'] = """ +#define TRYCOMPLEXPYARRAYTEMPLATEOBJECT case NPY_OBJECT: PyArray_SETITEM(arr, PyArray_DATA(arr), pyobj_from_complex_ ## ctype ## 1((*v))); break; +#define TRYCOMPLEXPYARRAYTEMPLATE(ctype,typecode)\\ + PyArrayObject *arr = NULL;\\ + if (!obj) return -2;\\ + if (!PyArray_Check(obj)) return -1;\\ + if (!(arr=(PyArrayObject *)obj)) {fprintf(stderr,\"TRYCOMPLEXPYARRAYTEMPLATE:\");PRINTPYOBJERR(obj);return 0;}\\ + if (PyArray_DESCR(arr)->type==typecode) {\\ + *(ctype *)(PyArray_DATA(arr))=(*v).r;\\ + *(ctype *)(PyArray_DATA(arr)+sizeof(ctype))=(*v).i;\\ + return 1;\\ + }\\ + switch (PyArray_TYPE(arr)) {\\ + case NPY_CDOUBLE: *(npy_double *)(PyArray_DATA(arr))=(*v).r;\\ + *(npy_double *)(PyArray_DATA(arr)+sizeof(npy_double))=(*v).i;\\ + break;\\ + case NPY_CFLOAT: *(npy_float *)(PyArray_DATA(arr))=(*v).r;\\ + *(npy_float *)(PyArray_DATA(arr)+sizeof(npy_float))=(*v).i;\\ + break;\\ + case NPY_DOUBLE: *(npy_double *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_LONG: *(npy_long *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_FLOAT: *(npy_float *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_INT: *(npy_int *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_SHORT: *(npy_short *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_UBYTE: *(npy_ubyte *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_BYTE: *(npy_byte *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_BOOL: *(npy_bool *)(PyArray_DATA(arr))=((*v).r!=0 && (*v).i!=0); break;\\ + case NPY_USHORT: *(npy_ushort *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_UINT: *(npy_uint *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_ULONG: *(npy_ulong *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_LONGLONG: *(npy_longlong *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_ULONGLONG: *(npy_ulonglong *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_LONGDOUBLE: *(npy_longdouble *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_CLONGDOUBLE: *(npy_longdouble *)(PyArray_DATA(arr))=(*v).r;\\ + *(npy_longdouble *)(PyArray_DATA(arr)+sizeof(npy_longdouble))=(*v).i;\\ + break;\\ + case NPY_OBJECT: PyArray_SETITEM(arr, PyArray_DATA(arr), pyobj_from_complex_ ## ctype ## 1((*v))); break;\\ + default: return -2;\\ + };\\ + return -1; +""" +# cppmacros['NUMFROMARROBJ']=""" +# define NUMFROMARROBJ(typenum,ctype) \\ +# if (PyArray_Check(obj)) arr = (PyArrayObject *)obj;\\ +# else arr = (PyArrayObject *)PyArray_ContiguousFromObject(obj,typenum,0,0);\\ +# if (arr) {\\ +# if (PyArray_TYPE(arr)==NPY_OBJECT) {\\ +# if (!ctype ## _from_pyobj(v,(PyArray_DESCR(arr)->getitem)(PyArray_DATA(arr)),\"\"))\\ +# goto capi_fail;\\ +# } else {\\ +# (PyArray_DESCR(arr)->cast[typenum])(PyArray_DATA(arr),1,(char*)v,1,1);\\ +# }\\ +# if ((PyObject *)arr != obj) { Py_DECREF(arr); }\\ +# return 1;\\ +# } +# """ +# XXX: Note that CNUMFROMARROBJ is identical with NUMFROMARROBJ +# cppmacros['CNUMFROMARROBJ']=""" +# define CNUMFROMARROBJ(typenum,ctype) \\ +# if (PyArray_Check(obj)) arr = (PyArrayObject *)obj;\\ +# else arr = (PyArrayObject *)PyArray_ContiguousFromObject(obj,typenum,0,0);\\ +# if (arr) {\\ +# if (PyArray_TYPE(arr)==NPY_OBJECT) {\\ +# if (!ctype ## _from_pyobj(v,(PyArray_DESCR(arr)->getitem)(PyArray_DATA(arr)),\"\"))\\ +# goto capi_fail;\\ +# } else {\\ +# (PyArray_DESCR(arr)->cast[typenum])((void *)(PyArray_DATA(arr)),1,(void *)(v),1,1);\\ +# }\\ +# if ((PyObject *)arr != obj) { Py_DECREF(arr); }\\ +# return 1;\\ +# } +# """ + + +needs['GETSTRFROMPYTUPLE'] = ['STRINGCOPYN', 'PRINTPYOBJERR'] +cppmacros['GETSTRFROMPYTUPLE'] = """ +#define GETSTRFROMPYTUPLE(tuple,index,str,len) {\\ + PyObject *rv_cb_str = PyTuple_GetItem((tuple),(index));\\ + if (rv_cb_str == NULL)\\ + goto capi_fail;\\ + if (PyBytes_Check(rv_cb_str)) {\\ + str[len-1]='\\0';\\ + STRINGCOPYN((str),PyBytes_AS_STRING((PyBytesObject*)rv_cb_str),(len));\\ + } else {\\ + PRINTPYOBJERR(rv_cb_str);\\ + PyErr_SetString(#modulename#_error,\"string object expected\");\\ + goto capi_fail;\\ + }\\ + } +""" +cppmacros['GETSCALARFROMPYTUPLE'] = """ +#define GETSCALARFROMPYTUPLE(tuple,index,var,ctype,mess) {\\ + if ((capi_tmp = PyTuple_GetItem((tuple),(index)))==NULL) goto capi_fail;\\ + if (!(ctype ## _from_pyobj((var),capi_tmp,mess)))\\ + goto capi_fail;\\ + } +""" + +cppmacros['FAILNULL'] = """\ +#define FAILNULL(p) do { \\ + if ((p) == NULL) { \\ + PyErr_SetString(PyExc_MemoryError, "NULL pointer found"); \\ + goto capi_fail; \\ + } \\ +} while (0) +""" +needs['MEMCOPY'] = ['string.h', 'FAILNULL'] +cppmacros['MEMCOPY'] = """ +#define MEMCOPY(to,from,n)\\ + do { FAILNULL(to); FAILNULL(from); (void)memcpy(to,from,n); } while (0) +""" +cppmacros['STRINGMALLOC'] = """ +#define STRINGMALLOC(str,len)\\ + if ((str = (string)malloc(len+1)) == NULL) {\\ + PyErr_SetString(PyExc_MemoryError, \"out of memory\");\\ + goto capi_fail;\\ + } else {\\ + (str)[len] = '\\0';\\ + } +""" +cppmacros['STRINGFREE'] = """ +#define STRINGFREE(str) do {if (!(str == NULL)) free(str);} while (0) +""" +needs['STRINGPADN'] = ['string.h'] +cppmacros['STRINGPADN'] = """ +/* +STRINGPADN replaces null values with padding values from the right. + +`to` must have size of at least N bytes. + +If the `to[N-1]` has null value, then replace it and all the +preceding, nulls with the given padding. + +STRINGPADN(to, N, PADDING, NULLVALUE) is an inverse operation. +*/ +#define STRINGPADN(to, N, NULLVALUE, PADDING) \\ + do { \\ + int _m = (N); \\ + char *_to = (to); \\ + for (_m -= 1; _m >= 0 && _to[_m] == NULLVALUE; _m--) { \\ + _to[_m] = PADDING; \\ + } \\ + } while (0) +""" +needs['STRINGCOPYN'] = ['string.h', 'FAILNULL'] +cppmacros['STRINGCOPYN'] = """ +/* +STRINGCOPYN copies N bytes. + +`to` and `from` buffers must have sizes of at least N bytes. +*/ +#define STRINGCOPYN(to,from,N) \\ + do { \\ + int _m = (N); \\ + char *_to = (to); \\ + char *_from = (from); \\ + FAILNULL(_to); FAILNULL(_from); \\ + (void)strncpy(_to, _from, _m); \\ + } while (0) +""" +needs['STRINGCOPY'] = ['string.h', 'FAILNULL'] +cppmacros['STRINGCOPY'] = """ +#define STRINGCOPY(to,from)\\ + do { FAILNULL(to); FAILNULL(from); (void)strcpy(to,from); } while (0) +""" +cppmacros['CHECKGENERIC'] = """ +#define CHECKGENERIC(check,tcheck,name) \\ + if (!(check)) {\\ + PyErr_SetString(#modulename#_error,\"(\"tcheck\") failed for \"name);\\ + /*goto capi_fail;*/\\ + } else """ +cppmacros['CHECKARRAY'] = """ +#define CHECKARRAY(check,tcheck,name) \\ + if (!(check)) {\\ + PyErr_SetString(#modulename#_error,\"(\"tcheck\") failed for \"name);\\ + /*goto capi_fail;*/\\ + } else """ +cppmacros['CHECKSTRING'] = """ +#define CHECKSTRING(check,tcheck,name,show,var)\\ + if (!(check)) {\\ + char errstring[256];\\ + sprintf(errstring, \"%s: \"show, \"(\"tcheck\") failed for \"name, slen(var), var);\\ + PyErr_SetString(#modulename#_error, errstring);\\ + /*goto capi_fail;*/\\ + } else """ +cppmacros['CHECKSCALAR'] = """ +#define CHECKSCALAR(check,tcheck,name,show,var)\\ + if (!(check)) {\\ + char errstring[256];\\ + sprintf(errstring, \"%s: \"show, \"(\"tcheck\") failed for \"name, var);\\ + PyErr_SetString(#modulename#_error,errstring);\\ + /*goto capi_fail;*/\\ + } else """ +# cppmacros['CHECKDIMS']=""" +# define CHECKDIMS(dims,rank) \\ +# for (int i=0;i<(rank);i++)\\ +# if (dims[i]<0) {\\ +# fprintf(stderr,\"Unspecified array argument requires a complete dimension specification.\\n\");\\ +# goto capi_fail;\\ +# } +# """ +cppmacros[ + 'ARRSIZE'] = '#define ARRSIZE(dims,rank) (_PyArray_multiply_list(dims,rank))' +cppmacros['OLDPYNUM'] = """ +#ifdef OLDPYNUM +#error You need to install NumPy version 0.13 or higher. See https://scipy.org/install.html +#endif +""" + +# Defining the correct value to indicate thread-local storage in C without +# running a compile-time check (which we have no control over in generated +# code used outside of NumPy) is hard. Therefore we support overriding this +# via an external define - the f2py-using package can then use the same +# compile-time checks as we use for `NPY_TLS` when building NumPy (see +# scipy#21860 for an example of that). +# +# __STDC_NO_THREADS__ should not be coupled to the availability of _Thread_local. +# In case we get a bug report, guard it with __STDC_NO_THREADS__ after all. +# +# `thread_local` has become a keyword in C23, but don't try to use that yet +# (too new, doing so while C23 support is preliminary will likely cause more +# problems than it solves). +# +# Note: do not try to use `threads.h`, its availability is very low +# *and* threads.h isn't actually used where `F2PY_THREAD_LOCAL_DECL` is +# in the generated code. See gh-27718 for more details. +cppmacros["F2PY_THREAD_LOCAL_DECL"] = """ +#ifndef F2PY_THREAD_LOCAL_DECL +#if defined(_MSC_VER) +#define F2PY_THREAD_LOCAL_DECL __declspec(thread) +#elif defined(NPY_OS_MINGW) +#define F2PY_THREAD_LOCAL_DECL __thread +#elif defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) +#define F2PY_THREAD_LOCAL_DECL _Thread_local +#elif defined(__GNUC__) \\ + && (__GNUC__ > 4 || (__GNUC__ == 4 && (__GNUC_MINOR__ >= 4))) +#define F2PY_THREAD_LOCAL_DECL __thread +#endif +#endif +""" +################# C functions ############### + +cfuncs['calcarrindex'] = """ +static int calcarrindex(int *i,PyArrayObject *arr) { + int k,ii = i[0]; + for (k=1; k < PyArray_NDIM(arr); k++) + ii += (ii*(PyArray_DIM(arr,k) - 1)+i[k]); /* assuming contiguous arr */ + return ii; +}""" +cfuncs['calcarrindextr'] = """ +static int calcarrindextr(int *i,PyArrayObject *arr) { + int k,ii = i[PyArray_NDIM(arr)-1]; + for (k=1; k < PyArray_NDIM(arr); k++) + ii += (ii*(PyArray_DIM(arr,PyArray_NDIM(arr)-k-1) - 1)+i[PyArray_NDIM(arr)-k-1]); /* assuming contiguous arr */ + return ii; +}""" +cfuncs['forcomb'] = """ +struct ForcombCache { int nd;npy_intp *d;int *i,*i_tr,tr; }; +static int initforcomb(struct ForcombCache *cache, npy_intp *dims,int nd,int tr) { + int k; + if (dims==NULL) return 0; + if (nd<0) return 0; + cache->nd = nd; + cache->d = dims; + cache->tr = tr; + + cache->i = (int *)malloc(sizeof(int)*nd); + if (cache->i==NULL) return 0; + cache->i_tr = (int *)malloc(sizeof(int)*nd); + if (cache->i_tr==NULL) {free(cache->i); return 0;}; + + for (k=1;ki[k] = cache->i_tr[nd-k-1] = 0; + } + cache->i[0] = cache->i_tr[nd-1] = -1; + return 1; +} +static int *nextforcomb(struct ForcombCache *cache) { + if (cache==NULL) return NULL; + int j,*i,*i_tr,k; + int nd=cache->nd; + if ((i=cache->i) == NULL) return NULL; + if ((i_tr=cache->i_tr) == NULL) return NULL; + if (cache->d == NULL) return NULL; + i[0]++; + if (i[0]==cache->d[0]) { + j=1; + while ((jd[j]-1)) j++; + if (j==nd) { + free(i); + free(i_tr); + return NULL; + } + for (k=0;ktr) return i_tr; + return i; +}""" +needs['try_pyarr_from_string'] = ['STRINGCOPYN', 'PRINTPYOBJERR', 'string'] +cfuncs['try_pyarr_from_string'] = """ +/* + try_pyarr_from_string copies str[:len(obj)] to the data of an `ndarray`. + + If obj is an `ndarray`, it is assumed to be contiguous. + + If the specified len==-1, str must be null-terminated. +*/ +static int try_pyarr_from_string(PyObject *obj, + const string str, const int len) { +#ifdef DEBUGCFUNCS +fprintf(stderr, "try_pyarr_from_string(str='%s', len=%d, obj=%p)\\n", + (char*)str,len, obj); +#endif + if (!obj) return -2; /* Object missing */ + if (obj == Py_None) return -1; /* None */ + if (!PyArray_Check(obj)) goto capi_fail; /* not an ndarray */ + if (PyArray_Check(obj)) { + PyArrayObject *arr = (PyArrayObject *)obj; + assert(ISCONTIGUOUS(arr)); + string buf = PyArray_DATA(arr); + npy_intp n = len; + if (n == -1) { + /* Assuming null-terminated str. */ + n = strlen(str); + } + if (n > PyArray_NBYTES(arr)) { + n = PyArray_NBYTES(arr); + } + STRINGCOPYN(buf, str, n); + return 1; + } +capi_fail: + PRINTPYOBJERR(obj); + PyErr_SetString(#modulename#_error, \"try_pyarr_from_string failed\"); + return 0; +} +""" +needs['string_from_pyobj'] = ['string', 'STRINGMALLOC', 'STRINGCOPYN'] +cfuncs['string_from_pyobj'] = """ +/* + Create a new string buffer `str` of at most length `len` from a + Python string-like object `obj`. + + The string buffer has given size (len) or the size of inistr when len==-1. + + The string buffer is padded with blanks: in Fortran, trailing blanks + are insignificant contrary to C nulls. + */ +static int +string_from_pyobj(string *str, int *len, const string inistr, PyObject *obj, + const char *errmess) +{ + PyObject *tmp = NULL; + string buf = NULL; + npy_intp n = -1; +#ifdef DEBUGCFUNCS +fprintf(stderr,\"string_from_pyobj(str='%s',len=%d,inistr='%s',obj=%p)\\n\", + (char*)str, *len, (char *)inistr, obj); +#endif + if (obj == Py_None) { + n = strlen(inistr); + buf = inistr; + } + else if (PyArray_Check(obj)) { + PyArrayObject *arr = (PyArrayObject *)obj; + if (!ISCONTIGUOUS(arr)) { + PyErr_SetString(PyExc_ValueError, + \"array object is non-contiguous.\"); + goto capi_fail; + } + n = PyArray_NBYTES(arr); + buf = PyArray_DATA(arr); + n = strnlen(buf, n); + } + else { + if (PyBytes_Check(obj)) { + tmp = obj; + Py_INCREF(tmp); + } + else if (PyUnicode_Check(obj)) { + tmp = PyUnicode_AsASCIIString(obj); + } + else { + PyObject *tmp2; + tmp2 = PyObject_Str(obj); + if (tmp2) { + tmp = PyUnicode_AsASCIIString(tmp2); + Py_DECREF(tmp2); + } + else { + tmp = NULL; + } + } + if (tmp == NULL) goto capi_fail; + n = PyBytes_GET_SIZE(tmp); + buf = PyBytes_AS_STRING(tmp); + } + if (*len == -1) { + /* TODO: change the type of `len` so that we can remove this */ + if (n > NPY_MAX_INT) { + PyErr_SetString(PyExc_OverflowError, + "object too large for a 32-bit int"); + goto capi_fail; + } + *len = n; + } + else if (*len < n) { + /* discard the last (len-n) bytes of input buf */ + n = *len; + } + if (n < 0 || *len < 0 || buf == NULL) { + goto capi_fail; + } + STRINGMALLOC(*str, *len); // *str is allocated with size (*len + 1) + if (n < *len) { + /* + Pad fixed-width string with nulls. The caller will replace + nulls with blanks when the corresponding argument is not + intent(c). + */ + memset(*str + n, '\\0', *len - n); + } + STRINGCOPYN(*str, buf, n); + Py_XDECREF(tmp); + return 1; +capi_fail: + Py_XDECREF(tmp); + { + PyObject* err = PyErr_Occurred(); + if (err == NULL) { + err = #modulename#_error; + } + PyErr_SetString(err, errmess); + } + return 0; +} +""" + +cfuncs['character_from_pyobj'] = """ +static int +character_from_pyobj(character* v, PyObject *obj, const char *errmess) { + if (PyBytes_Check(obj)) { + /* empty bytes has trailing null, so dereferencing is always safe */ + *v = PyBytes_AS_STRING(obj)[0]; + return 1; + } else if (PyUnicode_Check(obj)) { + PyObject* tmp = PyUnicode_AsASCIIString(obj); + if (tmp != NULL) { + *v = PyBytes_AS_STRING(tmp)[0]; + Py_DECREF(tmp); + return 1; + } + } else if (PyArray_Check(obj)) { + PyArrayObject* arr = (PyArrayObject*)obj; + if (F2PY_ARRAY_IS_CHARACTER_COMPATIBLE(arr)) { + *v = PyArray_BYTES(arr)[0]; + return 1; + } else if (F2PY_IS_UNICODE_ARRAY(arr)) { + // TODO: update when numpy will support 1-byte and + // 2-byte unicode dtypes + PyObject* tmp = PyUnicode_FromKindAndData( + PyUnicode_4BYTE_KIND, + PyArray_BYTES(arr), + (PyArray_NBYTES(arr)>0?1:0)); + if (tmp != NULL) { + if (character_from_pyobj(v, tmp, errmess)) { + Py_DECREF(tmp); + return 1; + } + Py_DECREF(tmp); + } + } + } else if (PySequence_Check(obj)) { + PyObject* tmp = PySequence_GetItem(obj,0); + if (tmp != NULL) { + if (character_from_pyobj(v, tmp, errmess)) { + Py_DECREF(tmp); + return 1; + } + Py_DECREF(tmp); + } + } + { + /* TODO: This error (and most other) error handling needs cleaning. */ + char mess[F2PY_MESSAGE_BUFFER_SIZE]; + strcpy(mess, errmess); + PyObject* err = PyErr_Occurred(); + if (err == NULL) { + err = PyExc_TypeError; + Py_INCREF(err); + } + else { + Py_INCREF(err); + PyErr_Clear(); + } + sprintf(mess + strlen(mess), + " -- expected str|bytes|sequence-of-str-or-bytes, got "); + f2py_describe(obj, mess + strlen(mess)); + PyErr_SetString(err, mess); + Py_DECREF(err); + } + return 0; +} +""" + +# TODO: These should be dynamically generated, too many mapped to int things, +# see note in _isocbind.py +needs['char_from_pyobj'] = ['int_from_pyobj'] +cfuncs['char_from_pyobj'] = """ +static int +char_from_pyobj(char* v, PyObject *obj, const char *errmess) { + int i = 0; + if (int_from_pyobj(&i, obj, errmess)) { + *v = (char)i; + return 1; + } + return 0; +} +""" + + +needs['signed_char_from_pyobj'] = ['int_from_pyobj', 'signed_char'] +cfuncs['signed_char_from_pyobj'] = """ +static int +signed_char_from_pyobj(signed_char* v, PyObject *obj, const char *errmess) { + int i = 0; + if (int_from_pyobj(&i, obj, errmess)) { + *v = (signed_char)i; + return 1; + } + return 0; +} +""" + + +needs['short_from_pyobj'] = ['int_from_pyobj'] +cfuncs['short_from_pyobj'] = """ +static int +short_from_pyobj(short* v, PyObject *obj, const char *errmess) { + int i = 0; + if (int_from_pyobj(&i, obj, errmess)) { + *v = (short)i; + return 1; + } + return 0; +} +""" + + +cfuncs['int_from_pyobj'] = """ +static int +int_from_pyobj(int* v, PyObject *obj, const char *errmess) +{ + PyObject* tmp = NULL; + + if (PyLong_Check(obj)) { + *v = Npy__PyLong_AsInt(obj); + return !(*v == -1 && PyErr_Occurred()); + } + + tmp = PyNumber_Long(obj); + if (tmp) { + *v = Npy__PyLong_AsInt(tmp); + Py_DECREF(tmp); + return !(*v == -1 && PyErr_Occurred()); + } + + if (PyComplex_Check(obj)) { + PyErr_Clear(); + tmp = PyObject_GetAttrString(obj,\"real\"); + } + else if (PyBytes_Check(obj) || PyUnicode_Check(obj)) { + /*pass*/; + } + else if (PySequence_Check(obj)) { + PyErr_Clear(); + tmp = PySequence_GetItem(obj, 0); + } + + if (tmp) { + if (int_from_pyobj(v, tmp, errmess)) { + Py_DECREF(tmp); + return 1; + } + Py_DECREF(tmp); + } + + { + PyObject* err = PyErr_Occurred(); + if (err == NULL) { + err = #modulename#_error; + } + PyErr_SetString(err, errmess); + } + return 0; +} +""" + + +cfuncs['long_from_pyobj'] = """ +static int +long_from_pyobj(long* v, PyObject *obj, const char *errmess) { + PyObject* tmp = NULL; + + if (PyLong_Check(obj)) { + *v = PyLong_AsLong(obj); + return !(*v == -1 && PyErr_Occurred()); + } + + tmp = PyNumber_Long(obj); + if (tmp) { + *v = PyLong_AsLong(tmp); + Py_DECREF(tmp); + return !(*v == -1 && PyErr_Occurred()); + } + + if (PyComplex_Check(obj)) { + PyErr_Clear(); + tmp = PyObject_GetAttrString(obj,\"real\"); + } + else if (PyBytes_Check(obj) || PyUnicode_Check(obj)) { + /*pass*/; + } + else if (PySequence_Check(obj)) { + PyErr_Clear(); + tmp = PySequence_GetItem(obj, 0); + } + + if (tmp) { + if (long_from_pyobj(v, tmp, errmess)) { + Py_DECREF(tmp); + return 1; + } + Py_DECREF(tmp); + } + { + PyObject* err = PyErr_Occurred(); + if (err == NULL) { + err = #modulename#_error; + } + PyErr_SetString(err, errmess); + } + return 0; +} +""" + + +needs['long_long_from_pyobj'] = ['long_long'] +cfuncs['long_long_from_pyobj'] = """ +static int +long_long_from_pyobj(long_long* v, PyObject *obj, const char *errmess) +{ + PyObject* tmp = NULL; + + if (PyLong_Check(obj)) { + *v = PyLong_AsLongLong(obj); + return !(*v == -1 && PyErr_Occurred()); + } + + tmp = PyNumber_Long(obj); + if (tmp) { + *v = PyLong_AsLongLong(tmp); + Py_DECREF(tmp); + return !(*v == -1 && PyErr_Occurred()); + } + + if (PyComplex_Check(obj)) { + PyErr_Clear(); + tmp = PyObject_GetAttrString(obj,\"real\"); + } + else if (PyBytes_Check(obj) || PyUnicode_Check(obj)) { + /*pass*/; + } + else if (PySequence_Check(obj)) { + PyErr_Clear(); + tmp = PySequence_GetItem(obj, 0); + } + + if (tmp) { + if (long_long_from_pyobj(v, tmp, errmess)) { + Py_DECREF(tmp); + return 1; + } + Py_DECREF(tmp); + } + { + PyObject* err = PyErr_Occurred(); + if (err == NULL) { + err = #modulename#_error; + } + PyErr_SetString(err,errmess); + } + return 0; +} +""" + + +needs['long_double_from_pyobj'] = ['double_from_pyobj', 'long_double'] +cfuncs['long_double_from_pyobj'] = """ +static int +long_double_from_pyobj(long_double* v, PyObject *obj, const char *errmess) +{ + double d=0; + if (PyArray_CheckScalar(obj)){ + if PyArray_IsScalar(obj, LongDouble) { + PyArray_ScalarAsCtype(obj, v); + return 1; + } + else if (PyArray_Check(obj)) { + PyArrayObject *arr = (PyArrayObject *)obj; + if (PyArray_TYPE(arr) == NPY_LONGDOUBLE) { + (*v) = *((npy_longdouble *)PyArray_DATA(arr)); + return 1; + } + } + } + if (double_from_pyobj(&d, obj, errmess)) { + *v = (long_double)d; + return 1; + } + return 0; +} +""" + + +cfuncs['double_from_pyobj'] = """ +static int +double_from_pyobj(double* v, PyObject *obj, const char *errmess) +{ + PyObject* tmp = NULL; + if (PyFloat_Check(obj)) { + *v = PyFloat_AsDouble(obj); + return !(*v == -1.0 && PyErr_Occurred()); + } + + tmp = PyNumber_Float(obj); + if (tmp) { + *v = PyFloat_AsDouble(tmp); + Py_DECREF(tmp); + return !(*v == -1.0 && PyErr_Occurred()); + } + + if (PyComplex_Check(obj)) { + PyErr_Clear(); + tmp = PyObject_GetAttrString(obj,\"real\"); + } + else if (PyBytes_Check(obj) || PyUnicode_Check(obj)) { + /*pass*/; + } + else if (PySequence_Check(obj)) { + PyErr_Clear(); + tmp = PySequence_GetItem(obj, 0); + } + + if (tmp) { + if (double_from_pyobj(v,tmp,errmess)) {Py_DECREF(tmp); return 1;} + Py_DECREF(tmp); + } + { + PyObject* err = PyErr_Occurred(); + if (err==NULL) err = #modulename#_error; + PyErr_SetString(err,errmess); + } + return 0; +} +""" + + +needs['float_from_pyobj'] = ['double_from_pyobj'] +cfuncs['float_from_pyobj'] = """ +static int +float_from_pyobj(float* v, PyObject *obj, const char *errmess) +{ + double d=0.0; + if (double_from_pyobj(&d,obj,errmess)) { + *v = (float)d; + return 1; + } + return 0; +} +""" + + +needs['complex_long_double_from_pyobj'] = ['complex_long_double', 'long_double', + 'complex_double_from_pyobj', 'npy_math.h'] +cfuncs['complex_long_double_from_pyobj'] = """ +static int +complex_long_double_from_pyobj(complex_long_double* v, PyObject *obj, const char *errmess) +{ + complex_double cd = {0.0,0.0}; + if (PyArray_CheckScalar(obj)){ + if PyArray_IsScalar(obj, CLongDouble) { + PyArray_ScalarAsCtype(obj, v); + return 1; + } + else if (PyArray_Check(obj)) { + PyArrayObject *arr = (PyArrayObject *)obj; + if (PyArray_TYPE(arr)==NPY_CLONGDOUBLE) { + (*v).r = npy_creall(*(((npy_clongdouble *)PyArray_DATA(arr)))); + (*v).i = npy_cimagl(*(((npy_clongdouble *)PyArray_DATA(arr)))); + return 1; + } + } + } + if (complex_double_from_pyobj(&cd,obj,errmess)) { + (*v).r = (long_double)cd.r; + (*v).i = (long_double)cd.i; + return 1; + } + return 0; +} +""" + + +needs['complex_double_from_pyobj'] = ['complex_double', 'npy_math.h'] +cfuncs['complex_double_from_pyobj'] = """ +static int +complex_double_from_pyobj(complex_double* v, PyObject *obj, const char *errmess) { + Py_complex c; + if (PyComplex_Check(obj)) { + c = PyComplex_AsCComplex(obj); + (*v).r = c.real; + (*v).i = c.imag; + return 1; + } + if (PyArray_IsScalar(obj, ComplexFloating)) { + if (PyArray_IsScalar(obj, CFloat)) { + npy_cfloat new; + PyArray_ScalarAsCtype(obj, &new); + (*v).r = (double)npy_crealf(new); + (*v).i = (double)npy_cimagf(new); + } + else if (PyArray_IsScalar(obj, CLongDouble)) { + npy_clongdouble new; + PyArray_ScalarAsCtype(obj, &new); + (*v).r = (double)npy_creall(new); + (*v).i = (double)npy_cimagl(new); + } + else { /* if (PyArray_IsScalar(obj, CDouble)) */ + PyArray_ScalarAsCtype(obj, v); + } + return 1; + } + if (PyArray_CheckScalar(obj)) { /* 0-dim array or still array scalar */ + PyArrayObject *arr; + if (PyArray_Check(obj)) { + arr = (PyArrayObject *)PyArray_Cast((PyArrayObject *)obj, NPY_CDOUBLE); + } + else { + arr = (PyArrayObject *)PyArray_FromScalar(obj, PyArray_DescrFromType(NPY_CDOUBLE)); + } + if (arr == NULL) { + return 0; + } + (*v).r = npy_creal(*(((npy_cdouble *)PyArray_DATA(arr)))); + (*v).i = npy_cimag(*(((npy_cdouble *)PyArray_DATA(arr)))); + Py_DECREF(arr); + return 1; + } + /* Python does not provide PyNumber_Complex function :-( */ + (*v).i = 0.0; + if (PyFloat_Check(obj)) { + (*v).r = PyFloat_AsDouble(obj); + return !((*v).r == -1.0 && PyErr_Occurred()); + } + if (PyLong_Check(obj)) { + (*v).r = PyLong_AsDouble(obj); + return !((*v).r == -1.0 && PyErr_Occurred()); + } + if (PySequence_Check(obj) && !(PyBytes_Check(obj) || PyUnicode_Check(obj))) { + PyObject *tmp = PySequence_GetItem(obj,0); + if (tmp) { + if (complex_double_from_pyobj(v,tmp,errmess)) { + Py_DECREF(tmp); + return 1; + } + Py_DECREF(tmp); + } + } + { + PyObject* err = PyErr_Occurred(); + if (err==NULL) + err = PyExc_TypeError; + PyErr_SetString(err,errmess); + } + return 0; +} +""" + + +needs['complex_float_from_pyobj'] = [ + 'complex_float', 'complex_double_from_pyobj'] +cfuncs['complex_float_from_pyobj'] = """ +static int +complex_float_from_pyobj(complex_float* v,PyObject *obj,const char *errmess) +{ + complex_double cd={0.0,0.0}; + if (complex_double_from_pyobj(&cd,obj,errmess)) { + (*v).r = (float)cd.r; + (*v).i = (float)cd.i; + return 1; + } + return 0; +} +""" + + +cfuncs['try_pyarr_from_character'] = """ +static int try_pyarr_from_character(PyObject* obj, character* v) { + PyArrayObject *arr = (PyArrayObject*)obj; + if (!obj) return -2; + if (PyArray_Check(obj)) { + if (F2PY_ARRAY_IS_CHARACTER_COMPATIBLE(arr)) { + *(character *)(PyArray_DATA(arr)) = *v; + return 1; + } + } + { + char mess[F2PY_MESSAGE_BUFFER_SIZE]; + PyObject* err = PyErr_Occurred(); + if (err == NULL) { + err = PyExc_ValueError; + strcpy(mess, "try_pyarr_from_character failed" + " -- expected bytes array-scalar|array, got "); + f2py_describe(obj, mess + strlen(mess)); + PyErr_SetString(err, mess); + } + } + return 0; +} +""" + +needs['try_pyarr_from_char'] = ['pyobj_from_char1', 'TRYPYARRAYTEMPLATE'] +cfuncs[ + 'try_pyarr_from_char'] = 'static int try_pyarr_from_char(PyObject* obj,char* v) {\n TRYPYARRAYTEMPLATE(char,\'c\');\n}\n' +needs['try_pyarr_from_signed_char'] = ['TRYPYARRAYTEMPLATE', 'unsigned_char'] +cfuncs[ + 'try_pyarr_from_unsigned_char'] = 'static int try_pyarr_from_unsigned_char(PyObject* obj,unsigned_char* v) {\n TRYPYARRAYTEMPLATE(unsigned_char,\'b\');\n}\n' +needs['try_pyarr_from_signed_char'] = ['TRYPYARRAYTEMPLATE', 'signed_char'] +cfuncs[ + 'try_pyarr_from_signed_char'] = 'static int try_pyarr_from_signed_char(PyObject* obj,signed_char* v) {\n TRYPYARRAYTEMPLATE(signed_char,\'1\');\n}\n' +needs['try_pyarr_from_short'] = ['pyobj_from_short1', 'TRYPYARRAYTEMPLATE'] +cfuncs[ + 'try_pyarr_from_short'] = 'static int try_pyarr_from_short(PyObject* obj,short* v) {\n TRYPYARRAYTEMPLATE(short,\'s\');\n}\n' +needs['try_pyarr_from_int'] = ['pyobj_from_int1', 'TRYPYARRAYTEMPLATE'] +cfuncs[ + 'try_pyarr_from_int'] = 'static int try_pyarr_from_int(PyObject* obj,int* v) {\n TRYPYARRAYTEMPLATE(int,\'i\');\n}\n' +needs['try_pyarr_from_long'] = ['pyobj_from_long1', 'TRYPYARRAYTEMPLATE'] +cfuncs[ + 'try_pyarr_from_long'] = 'static int try_pyarr_from_long(PyObject* obj,long* v) {\n TRYPYARRAYTEMPLATE(long,\'l\');\n}\n' +needs['try_pyarr_from_long_long'] = [ + 'pyobj_from_long_long1', 'TRYPYARRAYTEMPLATE', 'long_long'] +cfuncs[ + 'try_pyarr_from_long_long'] = 'static int try_pyarr_from_long_long(PyObject* obj,long_long* v) {\n TRYPYARRAYTEMPLATE(long_long,\'L\');\n}\n' +needs['try_pyarr_from_float'] = ['pyobj_from_float1', 'TRYPYARRAYTEMPLATE'] +cfuncs[ + 'try_pyarr_from_float'] = 'static int try_pyarr_from_float(PyObject* obj,float* v) {\n TRYPYARRAYTEMPLATE(float,\'f\');\n}\n' +needs['try_pyarr_from_double'] = ['pyobj_from_double1', 'TRYPYARRAYTEMPLATE'] +cfuncs[ + 'try_pyarr_from_double'] = 'static int try_pyarr_from_double(PyObject* obj,double* v) {\n TRYPYARRAYTEMPLATE(double,\'d\');\n}\n' +needs['try_pyarr_from_complex_float'] = [ + 'pyobj_from_complex_float1', 'TRYCOMPLEXPYARRAYTEMPLATE', 'complex_float'] +cfuncs[ + 'try_pyarr_from_complex_float'] = 'static int try_pyarr_from_complex_float(PyObject* obj,complex_float* v) {\n TRYCOMPLEXPYARRAYTEMPLATE(float,\'F\');\n}\n' +needs['try_pyarr_from_complex_double'] = [ + 'pyobj_from_complex_double1', 'TRYCOMPLEXPYARRAYTEMPLATE', 'complex_double'] +cfuncs[ + 'try_pyarr_from_complex_double'] = 'static int try_pyarr_from_complex_double(PyObject* obj,complex_double* v) {\n TRYCOMPLEXPYARRAYTEMPLATE(double,\'D\');\n}\n' + + +needs['create_cb_arglist'] = ['CFUNCSMESS', 'PRINTPYOBJERR', 'MINMAX'] +# create the list of arguments to be used when calling back to python +cfuncs['create_cb_arglist'] = """ +static int +create_cb_arglist(PyObject* fun, PyTupleObject* xa , const int maxnofargs, + const int nofoptargs, int *nofargs, PyTupleObject **args, + const char *errmess) +{ + PyObject *tmp = NULL; + PyObject *tmp_fun = NULL; + Py_ssize_t tot, opt, ext, siz, i, di = 0; + CFUNCSMESS(\"create_cb_arglist\\n\"); + tot=opt=ext=siz=0; + /* Get the total number of arguments */ + if (PyFunction_Check(fun)) { + tmp_fun = fun; + Py_INCREF(tmp_fun); + } + else { + di = 1; + if (PyObject_HasAttrString(fun,\"im_func\")) { + tmp_fun = PyObject_GetAttrString(fun,\"im_func\"); + } + else if (PyObject_HasAttrString(fun,\"__call__\")) { + tmp = PyObject_GetAttrString(fun,\"__call__\"); + if (PyObject_HasAttrString(tmp,\"im_func\")) + tmp_fun = PyObject_GetAttrString(tmp,\"im_func\"); + else { + tmp_fun = fun; /* built-in function */ + Py_INCREF(tmp_fun); + tot = maxnofargs; + if (PyCFunction_Check(fun)) { + /* In case the function has a co_argcount (like on PyPy) */ + di = 0; + } + if (xa != NULL) + tot += PyTuple_Size((PyObject *)xa); + } + Py_XDECREF(tmp); + } + else if (PyFortran_Check(fun) || PyFortran_Check1(fun)) { + tot = maxnofargs; + if (xa != NULL) + tot += PyTuple_Size((PyObject *)xa); + tmp_fun = fun; + Py_INCREF(tmp_fun); + } + else if (F2PyCapsule_Check(fun)) { + tot = maxnofargs; + if (xa != NULL) + ext = PyTuple_Size((PyObject *)xa); + if(ext>0) { + fprintf(stderr,\"extra arguments tuple cannot be used with PyCapsule call-back\\n\"); + goto capi_fail; + } + tmp_fun = fun; + Py_INCREF(tmp_fun); + } + } + + if (tmp_fun == NULL) { + fprintf(stderr, + \"Call-back argument must be function|instance|instance.__call__|f2py-function \" + \"but got %s.\\n\", + ((fun == NULL) ? \"NULL\" : Py_TYPE(fun)->tp_name)); + goto capi_fail; + } + + if (PyObject_HasAttrString(tmp_fun,\"__code__\")) { + if (PyObject_HasAttrString(tmp = PyObject_GetAttrString(tmp_fun,\"__code__\"),\"co_argcount\")) { + PyObject *tmp_argcount = PyObject_GetAttrString(tmp,\"co_argcount\"); + Py_DECREF(tmp); + if (tmp_argcount == NULL) { + goto capi_fail; + } + tot = PyLong_AsSsize_t(tmp_argcount) - di; + Py_DECREF(tmp_argcount); + } + } + /* Get the number of optional arguments */ + if (PyObject_HasAttrString(tmp_fun,\"__defaults__\")) { + if (PyTuple_Check(tmp = PyObject_GetAttrString(tmp_fun,\"__defaults__\"))) + opt = PyTuple_Size(tmp); + Py_XDECREF(tmp); + } + /* Get the number of extra arguments */ + if (xa != NULL) + ext = PyTuple_Size((PyObject *)xa); + /* Calculate the size of call-backs argument list */ + siz = MIN(maxnofargs+ext,tot); + *nofargs = MAX(0,siz-ext); + +#ifdef DEBUGCFUNCS + fprintf(stderr, + \"debug-capi:create_cb_arglist:maxnofargs(-nofoptargs),\" + \"tot,opt,ext,siz,nofargs = %d(-%d), %zd, %zd, %zd, %zd, %d\\n\", + maxnofargs, nofoptargs, tot, opt, ext, siz, *nofargs); +#endif + + if (siz < tot-opt) { + fprintf(stderr, + \"create_cb_arglist: Failed to build argument list \" + \"(siz) with enough arguments (tot-opt) required by \" + \"user-supplied function (siz,tot,opt=%zd, %zd, %zd).\\n\", + siz, tot, opt); + goto capi_fail; + } + + /* Initialize argument list */ + *args = (PyTupleObject *)PyTuple_New(siz); + for (i=0;i<*nofargs;i++) { + Py_INCREF(Py_None); + PyTuple_SET_ITEM((PyObject *)(*args),i,Py_None); + } + if (xa != NULL) + for (i=(*nofargs);i 0: + if outneeds[n][0] not in needs: + out.append(outneeds[n][0]) + del outneeds[n][0] + else: + flag = 0 + for k in outneeds[n][1:]: + if k in needs[outneeds[n][0]]: + flag = 1 + break + if flag: + outneeds[n] = outneeds[n][1:] + [outneeds[n][0]] + else: + out.append(outneeds[n][0]) + del outneeds[n][0] + if saveout and (0 not in map(lambda x, y: x == y, saveout, outneeds[n])) \ + and outneeds[n] != []: + print(n, saveout) + errmess( + 'get_needs: no progress in sorting needs, probably circular dependence, skipping.\n') + out = out + saveout + break + saveout = copy.copy(outneeds[n]) + if out == []: + out = [n] + res[n] = out + return res diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/cfuncs.pyi b/.venv/lib/python3.12/site-packages/numpy/f2py/cfuncs.pyi new file mode 100644 index 0000000..5887177 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/cfuncs.pyi @@ -0,0 +1,31 @@ +from typing import Final, TypeAlias + +from .__version__ import version + +### + +_NeedListDict: TypeAlias = dict[str, list[str]] +_NeedDict: TypeAlias = dict[str, str] + +### + +f2py_version: Final = version + +outneeds: Final[_NeedListDict] = ... +needs: Final[_NeedListDict] = ... + +includes0: Final[_NeedDict] = ... +includes: Final[_NeedDict] = ... +userincludes: Final[_NeedDict] = ... +typedefs: Final[_NeedDict] = ... +typedefs_generated: Final[_NeedDict] = ... +cppmacros: Final[_NeedDict] = ... +cfuncs: Final[_NeedDict] = ... +callbacks: Final[_NeedDict] = ... +f90modhooks: Final[_NeedDict] = ... +commonhooks: Final[_NeedDict] = ... + +def errmess(s: str) -> None: ... +def buildcfuncs() -> None: ... +def get_needs() -> _NeedListDict: ... +def append_needs(need: str | list[str], flag: int = 1) -> _NeedListDict: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/common_rules.py b/.venv/lib/python3.12/site-packages/numpy/f2py/common_rules.py new file mode 100644 index 0000000..cef757b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/common_rules.py @@ -0,0 +1,143 @@ +""" +Build common block mechanism for f2py2e. + +Copyright 1999 -- 2011 Pearu Peterson all rights reserved. +Copyright 2011 -- present NumPy Developers. +Permission to use, modify, and distribute this software is given under the +terms of the NumPy License + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +""" +from . import __version__ + +f2py_version = __version__.version + +from . import capi_maps, func2subr +from .auxfuncs import getuseblocks, hasbody, hascommon, hasnote, isintent_hide, outmess +from .crackfortran import rmbadname + + +def findcommonblocks(block, top=1): + ret = [] + if hascommon(block): + for key, value in block['common'].items(): + vars_ = {v: block['vars'][v] for v in value} + ret.append((key, value, vars_)) + elif hasbody(block): + for b in block['body']: + ret = ret + findcommonblocks(b, 0) + if top: + tret = [] + names = [] + for t in ret: + if t[0] not in names: + names.append(t[0]) + tret.append(t) + return tret + return ret + + +def buildhooks(m): + ret = {'commonhooks': [], 'initcommonhooks': [], + 'docs': ['"COMMON blocks:\\n"']} + fwrap = [''] + + def fadd(line, s=fwrap): + s[0] = f'{s[0]}\n {line}' + chooks = [''] + + def cadd(line, s=chooks): + s[0] = f'{s[0]}\n{line}' + ihooks = [''] + + def iadd(line, s=ihooks): + s[0] = f'{s[0]}\n{line}' + doc = [''] + + def dadd(line, s=doc): + s[0] = f'{s[0]}\n{line}' + for (name, vnames, vars) in findcommonblocks(m): + lower_name = name.lower() + hnames, inames = [], [] + for n in vnames: + if isintent_hide(vars[n]): + hnames.append(n) + else: + inames.append(n) + if hnames: + outmess('\t\tConstructing COMMON block support for "%s"...\n\t\t %s\n\t\t Hidden: %s\n' % ( + name, ','.join(inames), ','.join(hnames))) + else: + outmess('\t\tConstructing COMMON block support for "%s"...\n\t\t %s\n' % ( + name, ','.join(inames))) + fadd(f'subroutine f2pyinit{name}(setupfunc)') + for usename in getuseblocks(m): + fadd(f'use {usename}') + fadd('external setupfunc') + for n in vnames: + fadd(func2subr.var2fixfortran(vars, n)) + if name == '_BLNK_': + fadd(f"common {','.join(vnames)}") + else: + fadd(f"common /{name}/ {','.join(vnames)}") + fadd(f"call setupfunc({','.join(inames)})") + fadd('end\n') + cadd('static FortranDataDef f2py_%s_def[] = {' % (name)) + idims = [] + for n in inames: + ct = capi_maps.getctype(vars[n]) + elsize = capi_maps.get_elsize(vars[n]) + at = capi_maps.c2capi_map[ct] + dm = capi_maps.getarrdims(n, vars[n]) + if dm['dims']: + idims.append(f"({dm['dims']})") + else: + idims.append('') + dms = dm['dims'].strip() + if not dms: + dms = '-1' + cadd('\t{\"%s\",%s,{{%s}},%s, %s},' + % (n, dm['rank'], dms, at, elsize)) + cadd('\t{NULL}\n};') + inames1 = rmbadname(inames) + inames1_tps = ','.join(['char *' + s for s in inames1]) + cadd('static void f2py_setup_%s(%s) {' % (name, inames1_tps)) + cadd('\tint i_f2py=0;') + for n in inames1: + cadd(f'\tf2py_{name}_def[i_f2py++].data = {n};') + cadd('}') + if '_' in lower_name: + F_FUNC = 'F_FUNC_US' + else: + F_FUNC = 'F_FUNC' + cadd('extern void %s(f2pyinit%s,F2PYINIT%s)(void(*)(%s));' + % (F_FUNC, lower_name, name.upper(), + ','.join(['char*'] * len(inames1)))) + cadd('static void f2py_init_%s(void) {' % name) + cadd('\t%s(f2pyinit%s,F2PYINIT%s)(f2py_setup_%s);' + % (F_FUNC, lower_name, name.upper(), name)) + cadd('}\n') + iadd(f'\ttmp = PyFortranObject_New(f2py_{name}_def,f2py_init_{name});') + iadd('\tif (tmp == NULL) return NULL;') + iadd(f'\tif (F2PyDict_SetItemString(d, "{name}", tmp) == -1) return NULL;') + iadd('\tPy_DECREF(tmp);') + tname = name.replace('_', '\\_') + dadd('\\subsection{Common block \\texttt{%s}}\n' % (tname)) + dadd('\\begin{description}') + for n in inames: + dadd('\\item[]{{}\\verb@%s@{}}' % + (capi_maps.getarrdocsign(n, vars[n]))) + if hasnote(vars[n]): + note = vars[n]['note'] + if isinstance(note, list): + note = '\n'.join(note) + dadd(f'--- {note}') + dadd('\\end{description}') + ret['docs'].append( + f"\"\t/{name}/ {','.join(map(lambda v, d: v + d, inames, idims))}\\n\"") + ret['commonhooks'] = chooks + ret['initcommonhooks'] = ihooks + ret['latexdoc'] = doc[0] + if len(ret['docs']) <= 1: + ret['docs'] = '' + return ret, fwrap[0] diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/common_rules.pyi b/.venv/lib/python3.12/site-packages/numpy/f2py/common_rules.pyi new file mode 100644 index 0000000..d840de0 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/common_rules.pyi @@ -0,0 +1,9 @@ +from collections.abc import Mapping +from typing import Any, Final + +from .__version__ import version + +f2py_version: Final = version + +def findcommonblocks(block: Mapping[str, object], top: int = 1) -> list[tuple[str, list[str], dict[str, Any]]]: ... +def buildhooks(m: Mapping[str, object]) -> tuple[dict[str, Any], str]: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/crackfortran.py b/.venv/lib/python3.12/site-packages/numpy/f2py/crackfortran.py new file mode 100644 index 0000000..22d8043 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/crackfortran.py @@ -0,0 +1,3725 @@ +""" +crackfortran --- read fortran (77,90) code and extract declaration information. + +Copyright 1999 -- 2011 Pearu Peterson all rights reserved. +Copyright 2011 -- present NumPy Developers. +Permission to use, modify, and distribute this software is given under the +terms of the NumPy License. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. + + +Usage of crackfortran: +====================== +Command line keys: -quiet,-verbose,-fix,-f77,-f90,-show,-h + -m ,--ignore-contains +Functions: crackfortran, crack2fortran +The following Fortran statements/constructions are supported +(or will be if needed): + block data,byte,call,character,common,complex,contains,data, + dimension,double complex,double precision,end,external,function, + implicit,integer,intent,interface,intrinsic, + logical,module,optional,parameter,private,public, + program,real,(sequence?),subroutine,type,use,virtual, + include,pythonmodule +Note: 'virtual' is mapped to 'dimension'. +Note: 'implicit integer (z) static (z)' is 'implicit static (z)' (this is minor bug). +Note: code after 'contains' will be ignored until its scope ends. +Note: 'common' statement is extended: dimensions are moved to variable definitions +Note: f2py directive: f2py is read as +Note: pythonmodule is introduced to represent Python module + +Usage: + `postlist=crackfortran(files)` + `postlist` contains declaration information read from the list of files `files`. + `crack2fortran(postlist)` returns a fortran code to be saved to pyf-file + + `postlist` has the following structure: + *** it is a list of dictionaries containing `blocks': + B = {'block','body','vars','parent_block'[,'name','prefix','args','result', + 'implicit','externals','interfaced','common','sortvars', + 'commonvars','note']} + B['block'] = 'interface' | 'function' | 'subroutine' | 'module' | + 'program' | 'block data' | 'type' | 'pythonmodule' | + 'abstract interface' + B['body'] --- list containing `subblocks' with the same structure as `blocks' + B['parent_block'] --- dictionary of a parent block: + C['body'][]['parent_block'] is C + B['vars'] --- dictionary of variable definitions + B['sortvars'] --- dictionary of variable definitions sorted by dependence (independent first) + B['name'] --- name of the block (not if B['block']=='interface') + B['prefix'] --- prefix string (only if B['block']=='function') + B['args'] --- list of argument names if B['block']== 'function' | 'subroutine' + B['result'] --- name of the return value (only if B['block']=='function') + B['implicit'] --- dictionary {'a':,'b':...} | None + B['externals'] --- list of variables being external + B['interfaced'] --- list of variables being external and defined + B['common'] --- dictionary of common blocks (list of objects) + B['commonvars'] --- list of variables used in common blocks (dimensions are moved to variable definitions) + B['from'] --- string showing the 'parents' of the current block + B['use'] --- dictionary of modules used in current block: + {:{['only':<0|1>],['map':{:,...}]}} + B['note'] --- list of LaTeX comments on the block + B['f2pyenhancements'] --- optional dictionary + {'threadsafe':'','fortranname':, + 'callstatement':|, + 'callprotoargument':, + 'usercode':|, + 'pymethoddef:' + } + B['entry'] --- dictionary {entryname:argslist,..} + B['varnames'] --- list of variable names given in the order of reading the + Fortran code, useful for derived types. + B['saved_interface'] --- a string of scanned routine signature, defines explicit interface + *** Variable definition is a dictionary + D = B['vars'][] = + {'typespec'[,'attrspec','kindselector','charselector','=','typename']} + D['typespec'] = 'byte' | 'character' | 'complex' | 'double complex' | + 'double precision' | 'integer' | 'logical' | 'real' | 'type' + D['attrspec'] --- list of attributes (e.g. 'dimension()', + 'external','intent(in|out|inout|hide|c|callback|cache|aligned4|aligned8|aligned16)', + 'optional','required', etc) + K = D['kindselector'] = {['*','kind']} (only if D['typespec'] = + 'complex' | 'integer' | 'logical' | 'real' ) + C = D['charselector'] = {['*','len','kind','f2py_len']} + (only if D['typespec']=='character') + D['='] --- initialization expression string + D['typename'] --- name of the type if D['typespec']=='type' + D['dimension'] --- list of dimension bounds + D['intent'] --- list of intent specifications + D['depend'] --- list of variable names on which current variable depends on + D['check'] --- list of C-expressions; if C-expr returns zero, exception is raised + D['note'] --- list of LaTeX comments on the variable + *** Meaning of kind/char selectors (few examples): + D['typespec>']*K['*'] + D['typespec'](kind=K['kind']) + character*C['*'] + character(len=C['len'],kind=C['kind'], f2py_len=C['f2py_len']) + (see also fortran type declaration statement formats below) + +Fortran 90 type declaration statement format (F77 is subset of F90) +==================================================================== +(Main source: IBM XL Fortran 5.1 Language Reference Manual) +type declaration = [[]::] + = byte | + character[] | + complex[] | + double complex | + double precision | + integer[] | + logical[] | + real[] | + type() + = * | + ([len=][,[kind=]]) | + (kind=[,len=]) + = * | + ([kind=]) + = comma separated list of attributes. + Only the following attributes are used in + building up the interface: + external + (parameter --- affects '=' key) + optional + intent + Other attributes are ignored. + = in | out | inout + = comma separated list of dimension bounds. + = [[*][()] | [()]*] + [// | =] [,] + +In addition, the following attributes are used: check,depend,note + +TODO: + * Apply 'parameter' attribute (e.g. 'integer parameter :: i=2' 'real x(i)' + -> 'real x(2)') + The above may be solved by creating appropriate preprocessor program, for example. + +""" +import codecs +import copy +import fileinput +import os +import platform +import re +import string +import sys +from pathlib import Path + +try: + import charset_normalizer +except ImportError: + charset_normalizer = None + +from . import __version__, symbolic + +# The environment provided by auxfuncs.py is needed for some calls to eval. +# As the needed functions cannot be determined by static inspection of the +# code, it is safest to use import * pending a major refactoring of f2py. +from .auxfuncs import * + +f2py_version = __version__.version + +# Global flags: +strictf77 = 1 # Ignore `!' comments unless line[0]=='!' +sourcecodeform = 'fix' # 'fix','free' +quiet = 0 # Be verbose if 0 (Obsolete: not used any more) +verbose = 1 # Be quiet if 0, extra verbose if > 1. +tabchar = 4 * ' ' +pyffilename = '' +f77modulename = '' +skipemptyends = 0 # for old F77 programs without 'program' statement +ignorecontains = 1 +dolowercase = 1 +debug = [] + +# Global variables +beginpattern = '' +currentfilename = '' +expectbegin = 1 +f90modulevars = {} +filepositiontext = '' +gotnextfile = 1 +groupcache = None +groupcounter = 0 +grouplist = {groupcounter: []} +groupname = '' +include_paths = [] +neededmodule = -1 +onlyfuncs = [] +previous_context = None +skipblocksuntil = -1 +skipfuncs = [] +skipfunctions = [] +usermodules = [] + + +def reset_global_f2py_vars(): + global groupcounter, grouplist, neededmodule, expectbegin + global skipblocksuntil, usermodules, f90modulevars, gotnextfile + global filepositiontext, currentfilename, skipfunctions, skipfuncs + global onlyfuncs, include_paths, previous_context + global strictf77, sourcecodeform, quiet, verbose, tabchar, pyffilename + global f77modulename, skipemptyends, ignorecontains, dolowercase, debug + + # flags + strictf77 = 1 + sourcecodeform = 'fix' + quiet = 0 + verbose = 1 + tabchar = 4 * ' ' + pyffilename = '' + f77modulename = '' + skipemptyends = 0 + ignorecontains = 1 + dolowercase = 1 + debug = [] + # variables + groupcounter = 0 + grouplist = {groupcounter: []} + neededmodule = -1 + expectbegin = 1 + skipblocksuntil = -1 + usermodules = [] + f90modulevars = {} + gotnextfile = 1 + filepositiontext = '' + currentfilename = '' + skipfunctions = [] + skipfuncs = [] + onlyfuncs = [] + include_paths = [] + previous_context = None + + +def outmess(line, flag=1): + global filepositiontext + + if not verbose: + return + if not quiet: + if flag: + sys.stdout.write(filepositiontext) + sys.stdout.write(line) + + +re._MAXCACHE = 50 +defaultimplicitrules = {} +for c in "abcdefghopqrstuvwxyz$_": + defaultimplicitrules[c] = {'typespec': 'real'} +for c in "ijklmn": + defaultimplicitrules[c] = {'typespec': 'integer'} +badnames = {} +invbadnames = {} +for n in ['int', 'double', 'float', 'char', 'short', 'long', 'void', 'case', 'while', + 'return', 'signed', 'unsigned', 'if', 'for', 'typedef', 'sizeof', 'union', + 'struct', 'static', 'register', 'new', 'break', 'do', 'goto', 'switch', + 'continue', 'else', 'inline', 'extern', 'delete', 'const', 'auto', + 'len', 'rank', 'shape', 'index', 'slen', 'size', '_i', + 'max', 'min', + 'flen', 'fshape', + 'string', 'complex_double', 'float_double', 'stdin', 'stderr', 'stdout', + 'type', 'default']: + badnames[n] = n + '_bn' + invbadnames[n + '_bn'] = n + + +def rmbadname1(name): + if name in badnames: + errmess(f'rmbadname1: Replacing "{name}" with "{badnames[name]}".\n') + return badnames[name] + return name + + +def rmbadname(names): + return [rmbadname1(_m) for _m in names] + + +def undo_rmbadname1(name): + if name in invbadnames: + errmess(f'undo_rmbadname1: Replacing "{name}" with "{invbadnames[name]}".\n') + return invbadnames[name] + return name + + +def undo_rmbadname(names): + return [undo_rmbadname1(_m) for _m in names] + + +_has_f_header = re.compile(r'-\*-\s*fortran\s*-\*-', re.I).search +_has_f90_header = re.compile(r'-\*-\s*f90\s*-\*-', re.I).search +_has_fix_header = re.compile(r'-\*-\s*fix\s*-\*-', re.I).search +_free_f90_start = re.compile(r'[^c*]\s*[^\s\d\t]', re.I).match + +# Extensions +COMMON_FREE_EXTENSIONS = ['.f90', '.f95', '.f03', '.f08'] +COMMON_FIXED_EXTENSIONS = ['.for', '.ftn', '.f77', '.f'] + + +def openhook(filename, mode): + """Ensures that filename is opened with correct encoding parameter. + + This function uses charset_normalizer package, when available, for + determining the encoding of the file to be opened. When charset_normalizer + is not available, the function detects only UTF encodings, otherwise, ASCII + encoding is used as fallback. + """ + # Reads in the entire file. Robust detection of encoding. + # Correctly handles comments or late stage unicode characters + # gh-22871 + if charset_normalizer is not None: + encoding = charset_normalizer.from_path(filename).best().encoding + else: + # hint: install charset_normalizer for correct encoding handling + # No need to read the whole file for trying with startswith + nbytes = min(32, os.path.getsize(filename)) + with open(filename, 'rb') as fhandle: + raw = fhandle.read(nbytes) + if raw.startswith(codecs.BOM_UTF8): + encoding = 'UTF-8-SIG' + elif raw.startswith((codecs.BOM_UTF32_LE, codecs.BOM_UTF32_BE)): + encoding = 'UTF-32' + elif raw.startswith((codecs.BOM_LE, codecs.BOM_BE)): + encoding = 'UTF-16' + else: + # Fallback, without charset_normalizer + encoding = 'ascii' + return open(filename, mode, encoding=encoding) + + +def is_free_format(fname): + """Check if file is in free format Fortran.""" + # f90 allows both fixed and free format, assuming fixed unless + # signs of free format are detected. + result = False + if Path(fname).suffix.lower() in COMMON_FREE_EXTENSIONS: + result = True + with openhook(fname, 'r') as fhandle: + line = fhandle.readline() + n = 15 # the number of non-comment lines to scan for hints + if _has_f_header(line): + n = 0 + elif _has_f90_header(line): + n = 0 + result = True + while n > 0 and line: + if line[0] != '!' and line.strip(): + n -= 1 + if (line[0] != '\t' and _free_f90_start(line[:5])) or line[-2:-1] == '&': + result = True + break + line = fhandle.readline() + return result + + +# Read fortran (77,90) code +def readfortrancode(ffile, dowithline=show, istop=1): + """ + Read fortran codes from files and + 1) Get rid of comments, line continuations, and empty lines; lower cases. + 2) Call dowithline(line) on every line. + 3) Recursively call itself when statement \"include ''\" is met. + """ + global gotnextfile, filepositiontext, currentfilename, sourcecodeform, strictf77 + global beginpattern, quiet, verbose, dolowercase, include_paths + + if not istop: + saveglobals = gotnextfile, filepositiontext, currentfilename, sourcecodeform, strictf77,\ + beginpattern, quiet, verbose, dolowercase + if ffile == []: + return + localdolowercase = dolowercase + # cont: set to True when the content of the last line read + # indicates statement continuation + cont = False + finalline = '' + ll = '' + includeline = re.compile( + r'\s*include\s*(\'|")(?P[^\'"]*)(\'|")', re.I) + cont1 = re.compile(r'(?P.*)&\s*\Z') + cont2 = re.compile(r'(\s*&|)(?P.*)') + mline_mark = re.compile(r".*?'''") + if istop: + dowithline('', -1) + ll, l1 = '', '' + spacedigits = [' '] + [str(_m) for _m in range(10)] + filepositiontext = '' + fin = fileinput.FileInput(ffile, openhook=openhook) + while True: + try: + l = fin.readline() + except UnicodeDecodeError as msg: + raise Exception( + f'readfortrancode: reading {fin.filename()}#{fin.lineno()}' + f' failed with\n{msg}.\nIt is likely that installing charset_normalizer' + ' package will help f2py determine the input file encoding' + ' correctly.') + if not l: + break + if fin.isfirstline(): + filepositiontext = '' + currentfilename = fin.filename() + gotnextfile = 1 + l1 = l + strictf77 = 0 + sourcecodeform = 'fix' + ext = os.path.splitext(currentfilename)[1] + if Path(currentfilename).suffix.lower() in COMMON_FIXED_EXTENSIONS and \ + not (_has_f90_header(l) or _has_fix_header(l)): + strictf77 = 1 + elif is_free_format(currentfilename) and not _has_fix_header(l): + sourcecodeform = 'free' + if strictf77: + beginpattern = beginpattern77 + else: + beginpattern = beginpattern90 + outmess('\tReading file %s (format:%s%s)\n' + % (repr(currentfilename), sourcecodeform, + (strictf77 and ',strict') or '')) + + l = l.expandtabs().replace('\xa0', ' ') + # Get rid of newline characters + while not l == '': + if l[-1] not in "\n\r\f": + break + l = l[:-1] + # Do not lower for directives, gh-2547, gh-27697, gh-26681 + is_f2py_directive = False + # Unconditionally remove comments + (l, rl) = split_by_unquoted(l, '!') + l += ' ' + if rl[:5].lower() == '!f2py': # f2py directive + l, _ = split_by_unquoted(l + 4 * ' ' + rl[5:], '!') + is_f2py_directive = True + if l.strip() == '': # Skip empty line + if sourcecodeform == 'free': + # In free form, a statement continues in the next line + # that is not a comment line [3.3.2.4^1], lines with + # blanks are comment lines [3.3.2.3^1]. Hence, the + # line continuation flag must retain its state. + pass + else: + # In fixed form, statement continuation is determined + # by a non-blank character at the 6-th position. Empty + # line indicates a start of a new statement + # [3.3.3.3^1]. Hence, the line continuation flag must + # be reset. + cont = False + continue + if sourcecodeform == 'fix': + if l[0] in ['*', 'c', '!', 'C', '#']: + if l[1:5].lower() == 'f2py': # f2py directive + l = ' ' + l[5:] + is_f2py_directive = True + else: # Skip comment line + cont = False + is_f2py_directive = False + continue + elif strictf77: + if len(l) > 72: + l = l[:72] + if l[0] not in spacedigits: + raise Exception('readfortrancode: Found non-(space,digit) char ' + 'in the first column.\n\tAre you sure that ' + 'this code is in fix form?\n\tline=%s' % repr(l)) + + if (not cont or strictf77) and (len(l) > 5 and not l[5] == ' '): + # Continuation of a previous line + ll = ll + l[6:] + finalline = '' + origfinalline = '' + else: + r = cont1.match(l) + if r: + l = r.group('line') # Continuation follows .. + if cont: + ll = ll + cont2.match(l).group('line') + finalline = '' + origfinalline = '' + else: + # clean up line beginning from possible digits. + l = ' ' + l[5:] + # f2py directives are already stripped by this point + if localdolowercase: + finalline = ll.lower() + else: + finalline = ll + origfinalline = ll + ll = l + + elif sourcecodeform == 'free': + if not cont and ext == '.pyf' and mline_mark.match(l): + l = l + '\n' + while True: + lc = fin.readline() + if not lc: + errmess( + 'Unexpected end of file when reading multiline\n') + break + l = l + lc + if mline_mark.match(lc): + break + l = l.rstrip() + r = cont1.match(l) + if r: + l = r.group('line') # Continuation follows .. + if cont: + ll = ll + cont2.match(l).group('line') + finalline = '' + origfinalline = '' + else: + if localdolowercase: + # only skip lowering for C style constructs + # gh-2547, gh-27697, gh-26681, gh-28014 + finalline = ll.lower() if not (is_f2py_directive and iscstyledirective(ll)) else ll + else: + finalline = ll + origfinalline = ll + ll = l + cont = (r is not None) + else: + raise ValueError( + f"Flag sourcecodeform must be either 'fix' or 'free': {repr(sourcecodeform)}") + filepositiontext = 'Line #%d in %s:"%s"\n\t' % ( + fin.filelineno() - 1, currentfilename, l1) + m = includeline.match(origfinalline) + if m: + fn = m.group('name') + if os.path.isfile(fn): + readfortrancode(fn, dowithline=dowithline, istop=0) + else: + include_dirs = [ + os.path.dirname(currentfilename)] + include_paths + foundfile = 0 + for inc_dir in include_dirs: + fn1 = os.path.join(inc_dir, fn) + if os.path.isfile(fn1): + foundfile = 1 + readfortrancode(fn1, dowithline=dowithline, istop=0) + break + if not foundfile: + outmess('readfortrancode: could not find include file %s in %s. Ignoring.\n' % ( + repr(fn), os.pathsep.join(include_dirs))) + else: + dowithline(finalline) + l1 = ll + # Last line should never have an f2py directive anyway + if localdolowercase: + finalline = ll.lower() + else: + finalline = ll + origfinalline = ll + filepositiontext = 'Line #%d in %s:"%s"\n\t' % ( + fin.filelineno() - 1, currentfilename, l1) + m = includeline.match(origfinalline) + if m: + fn = m.group('name') + if os.path.isfile(fn): + readfortrancode(fn, dowithline=dowithline, istop=0) + else: + include_dirs = [os.path.dirname(currentfilename)] + include_paths + foundfile = 0 + for inc_dir in include_dirs: + fn1 = os.path.join(inc_dir, fn) + if os.path.isfile(fn1): + foundfile = 1 + readfortrancode(fn1, dowithline=dowithline, istop=0) + break + if not foundfile: + outmess('readfortrancode: could not find include file %s in %s. Ignoring.\n' % ( + repr(fn), os.pathsep.join(include_dirs))) + else: + dowithline(finalline) + filepositiontext = '' + fin.close() + if istop: + dowithline('', 1) + else: + gotnextfile, filepositiontext, currentfilename, sourcecodeform, strictf77,\ + beginpattern, quiet, verbose, dolowercase = saveglobals + + +# Crack line +beforethisafter = r'\s*(?P%s(?=\s*(\b(%s)\b)))'\ + r'\s*(?P(\b(%s)\b))'\ + r'\s*(?P%s)\s*\Z' +## +fortrantypes = r'character|logical|integer|real|complex|double\s*(precision\s*(complex|)|complex)|type(?=\s*\([\w\s,=(*)]*\))|byte' +typespattern = re.compile( + beforethisafter % ('', fortrantypes, fortrantypes, '.*'), re.I), 'type' +typespattern4implicit = re.compile(beforethisafter % ( + '', fortrantypes + '|static|automatic|undefined', fortrantypes + '|static|automatic|undefined', '.*'), re.I) +# +functionpattern = re.compile(beforethisafter % ( + r'([a-z]+[\w\s(=*+-/)]*?|)', 'function', 'function', '.*'), re.I), 'begin' +subroutinepattern = re.compile(beforethisafter % ( + r'[a-z\s]*?', 'subroutine', 'subroutine', '.*'), re.I), 'begin' +# modulepattern=re.compile(beforethisafter%('[a-z\s]*?','module','module','.*'),re.I),'begin' +# +groupbegins77 = r'program|block\s*data' +beginpattern77 = re.compile( + beforethisafter % ('', groupbegins77, groupbegins77, '.*'), re.I), 'begin' +groupbegins90 = groupbegins77 + \ + r'|module(?!\s*procedure)|python\s*module|(abstract|)\s*interface|'\ + r'type(?!\s*\()' +beginpattern90 = re.compile( + beforethisafter % ('', groupbegins90, groupbegins90, '.*'), re.I), 'begin' +groupends = (r'end|endprogram|endblockdata|endmodule|endpythonmodule|' + r'endinterface|endsubroutine|endfunction') +endpattern = re.compile( + beforethisafter % ('', groupends, groupends, '.*'), re.I), 'end' +# block, the Fortran 2008 construct needs special handling in the rest of the file +endifs = r'end\s*(if|do|where|select|while|forall|associate|'\ + r'critical|enum|team)' +endifpattern = re.compile( + beforethisafter % (r'[\w]*?', endifs, endifs, '.*'), re.I), 'endif' +# +moduleprocedures = r'module\s*procedure' +moduleprocedurepattern = re.compile( + beforethisafter % ('', moduleprocedures, moduleprocedures, '.*'), re.I), \ + 'moduleprocedure' +implicitpattern = re.compile( + beforethisafter % ('', 'implicit', 'implicit', '.*'), re.I), 'implicit' +dimensionpattern = re.compile(beforethisafter % ( + '', 'dimension|virtual', 'dimension|virtual', '.*'), re.I), 'dimension' +externalpattern = re.compile( + beforethisafter % ('', 'external', 'external', '.*'), re.I), 'external' +optionalpattern = re.compile( + beforethisafter % ('', 'optional', 'optional', '.*'), re.I), 'optional' +requiredpattern = re.compile( + beforethisafter % ('', 'required', 'required', '.*'), re.I), 'required' +publicpattern = re.compile( + beforethisafter % ('', 'public', 'public', '.*'), re.I), 'public' +privatepattern = re.compile( + beforethisafter % ('', 'private', 'private', '.*'), re.I), 'private' +intrinsicpattern = re.compile( + beforethisafter % ('', 'intrinsic', 'intrinsic', '.*'), re.I), 'intrinsic' +intentpattern = re.compile(beforethisafter % ( + '', 'intent|depend|note|check', 'intent|depend|note|check', r'\s*\(.*?\).*'), re.I), 'intent' +parameterpattern = re.compile( + beforethisafter % ('', 'parameter', 'parameter', r'\s*\(.*'), re.I), 'parameter' +datapattern = re.compile( + beforethisafter % ('', 'data', 'data', '.*'), re.I), 'data' +callpattern = re.compile( + beforethisafter % ('', 'call', 'call', '.*'), re.I), 'call' +entrypattern = re.compile( + beforethisafter % ('', 'entry', 'entry', '.*'), re.I), 'entry' +callfunpattern = re.compile( + beforethisafter % ('', 'callfun', 'callfun', '.*'), re.I), 'callfun' +commonpattern = re.compile( + beforethisafter % ('', 'common', 'common', '.*'), re.I), 'common' +usepattern = re.compile( + beforethisafter % ('', 'use', 'use', '.*'), re.I), 'use' +containspattern = re.compile( + beforethisafter % ('', 'contains', 'contains', ''), re.I), 'contains' +formatpattern = re.compile( + beforethisafter % ('', 'format', 'format', '.*'), re.I), 'format' +# Non-fortran and f2py-specific statements +f2pyenhancementspattern = re.compile(beforethisafter % ('', 'threadsafe|fortranname|callstatement|callprotoargument|usercode|pymethoddef', + 'threadsafe|fortranname|callstatement|callprotoargument|usercode|pymethoddef', '.*'), re.I | re.S), 'f2pyenhancements' +multilinepattern = re.compile( + r"\s*(?P''')(?P.*?)(?P''')\s*\Z", re.S), 'multiline' +## + +def split_by_unquoted(line, characters): + """ + Splits the line into (line[:i], line[i:]), + where i is the index of first occurrence of one of the characters + not within quotes, or len(line) if no such index exists + """ + assert not (set('"\'') & set(characters)), "cannot split by unquoted quotes" + r = re.compile( + r"\A(?P({single_quoted}|{double_quoted}|{not_quoted})*)" + r"(?P{char}.*)\Z".format( + not_quoted=f"[^\"'{re.escape(characters)}]", + char=f"[{re.escape(characters)}]", + single_quoted=r"('([^'\\]|(\\.))*')", + double_quoted=r'("([^"\\]|(\\.))*")')) + m = r.match(line) + if m: + d = m.groupdict() + return (d["before"], d["after"]) + return (line, "") + +def _simplifyargs(argsline): + a = [] + for n in markoutercomma(argsline).split('@,@'): + for r in '(),': + n = n.replace(r, '_') + a.append(n) + return ','.join(a) + + +crackline_re_1 = re.compile(r'\s*(?P\b[a-z]+\w*\b)\s*=.*', re.I) +crackline_bind_1 = re.compile(r'\s*(?P\b[a-z]+\w*\b)\s*=.*', re.I) +crackline_bindlang = re.compile(r'\s*bind\(\s*(?P[^,]+)\s*,\s*name\s*=\s*"(?P[^"]+)"\s*\)', re.I) + +def crackline(line, reset=0): + """ + reset=-1 --- initialize + reset=0 --- crack the line + reset=1 --- final check if mismatch of blocks occurred + + Cracked data is saved in grouplist[0]. + """ + global beginpattern, groupcounter, groupname, groupcache, grouplist + global filepositiontext, currentfilename, neededmodule, expectbegin + global skipblocksuntil, skipemptyends, previous_context, gotnextfile + + _, has_semicolon = split_by_unquoted(line, ";") + if has_semicolon and not (f2pyenhancementspattern[0].match(line) or + multilinepattern[0].match(line)): + # XXX: non-zero reset values need testing + assert reset == 0, repr(reset) + # split line on unquoted semicolons + line, semicolon_line = split_by_unquoted(line, ";") + while semicolon_line: + crackline(line, reset) + line, semicolon_line = split_by_unquoted(semicolon_line[1:], ";") + crackline(line, reset) + return + if reset < 0: + groupcounter = 0 + groupname = {groupcounter: ''} + groupcache = {groupcounter: {}} + grouplist = {groupcounter: []} + groupcache[groupcounter]['body'] = [] + groupcache[groupcounter]['vars'] = {} + groupcache[groupcounter]['block'] = '' + groupcache[groupcounter]['name'] = '' + neededmodule = -1 + skipblocksuntil = -1 + return + if reset > 0: + fl = 0 + if f77modulename and neededmodule == groupcounter: + fl = 2 + while groupcounter > fl: + outmess('crackline: groupcounter=%s groupname=%s\n' % + (repr(groupcounter), repr(groupname))) + outmess( + 'crackline: Mismatch of blocks encountered. Trying to fix it by assuming "end" statement.\n') + grouplist[groupcounter - 1].append(groupcache[groupcounter]) + grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter] + del grouplist[groupcounter] + groupcounter = groupcounter - 1 + if f77modulename and neededmodule == groupcounter: + grouplist[groupcounter - 1].append(groupcache[groupcounter]) + grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter] + del grouplist[groupcounter] + groupcounter = groupcounter - 1 # end interface + grouplist[groupcounter - 1].append(groupcache[groupcounter]) + grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter] + del grouplist[groupcounter] + groupcounter = groupcounter - 1 # end module + neededmodule = -1 + return + if line == '': + return + flag = 0 + for pat in [dimensionpattern, externalpattern, intentpattern, optionalpattern, + requiredpattern, + parameterpattern, datapattern, publicpattern, privatepattern, + intrinsicpattern, + endifpattern, endpattern, + formatpattern, + beginpattern, functionpattern, subroutinepattern, + implicitpattern, typespattern, commonpattern, + callpattern, usepattern, containspattern, + entrypattern, + f2pyenhancementspattern, + multilinepattern, + moduleprocedurepattern + ]: + m = pat[0].match(line) + if m: + break + flag = flag + 1 + if not m: + re_1 = crackline_re_1 + if 0 <= skipblocksuntil <= groupcounter: + return + if 'externals' in groupcache[groupcounter]: + for name in groupcache[groupcounter]['externals']: + if name in invbadnames: + name = invbadnames[name] + if 'interfaced' in groupcache[groupcounter] and name in groupcache[groupcounter]['interfaced']: + continue + m1 = re.match( + r'(?P[^"]*)\b%s\b\s*@\(@(?P[^@]*)@\)@.*\Z' % name, markouterparen(line), re.I) + if m1: + m2 = re_1.match(m1.group('before')) + a = _simplifyargs(m1.group('args')) + if m2: + line = f"callfun {name}({a}) result ({m2.group('result')})" + else: + line = f'callfun {name}({a})' + m = callfunpattern[0].match(line) + if not m: + outmess( + f'crackline: could not resolve function call for line={repr(line)}.\n') + return + analyzeline(m, 'callfun', line) + return + if verbose > 1 or (verbose == 1 and currentfilename.lower().endswith('.pyf')): + previous_context = None + outmess('crackline:%d: No pattern for line\n' % (groupcounter)) + return + elif pat[1] == 'end': + if 0 <= skipblocksuntil < groupcounter: + groupcounter = groupcounter - 1 + if skipblocksuntil <= groupcounter: + return + if groupcounter <= 0: + raise Exception('crackline: groupcounter(=%s) is nonpositive. ' + 'Check the blocks.' + % (groupcounter)) + m1 = beginpattern[0].match(line) + if (m1) and (not m1.group('this') == groupname[groupcounter]): + raise Exception('crackline: End group %s does not match with ' + 'previous Begin group %s\n\t%s' % + (repr(m1.group('this')), repr(groupname[groupcounter]), + filepositiontext) + ) + if skipblocksuntil == groupcounter: + skipblocksuntil = -1 + grouplist[groupcounter - 1].append(groupcache[groupcounter]) + grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter] + del grouplist[groupcounter] + groupcounter = groupcounter - 1 + if not skipemptyends: + expectbegin = 1 + elif pat[1] == 'begin': + if 0 <= skipblocksuntil <= groupcounter: + groupcounter = groupcounter + 1 + return + gotnextfile = 0 + analyzeline(m, pat[1], line) + expectbegin = 0 + elif pat[1] == 'endif': + pass + elif pat[1] == 'moduleprocedure': + analyzeline(m, pat[1], line) + elif pat[1] == 'contains': + if ignorecontains: + return + if 0 <= skipblocksuntil <= groupcounter: + return + skipblocksuntil = groupcounter + else: + if 0 <= skipblocksuntil <= groupcounter: + return + analyzeline(m, pat[1], line) + + +def markouterparen(line): + l = '' + f = 0 + for c in line: + if c == '(': + f = f + 1 + if f == 1: + l = l + '@(@' + continue + elif c == ')': + f = f - 1 + if f == 0: + l = l + '@)@' + continue + l = l + c + return l + + +def markoutercomma(line, comma=','): + l = '' + f = 0 + before, after = split_by_unquoted(line, comma + '()') + l += before + while after: + if (after[0] == comma) and (f == 0): + l += '@' + comma + '@' + else: + l += after[0] + if after[0] == '(': + f += 1 + elif after[0] == ')': + f -= 1 + before, after = split_by_unquoted(after[1:], comma + '()') + l += before + assert not f, repr((f, line, l)) + return l + +def unmarkouterparen(line): + r = line.replace('@(@', '(').replace('@)@', ')') + return r + + +def appenddecl(decl, decl2, force=1): + if not decl: + decl = {} + if not decl2: + return decl + if decl is decl2: + return decl + for k in list(decl2.keys()): + if k == 'typespec': + if force or k not in decl: + decl[k] = decl2[k] + elif k == 'attrspec': + for l in decl2[k]: + decl = setattrspec(decl, l, force) + elif k == 'kindselector': + decl = setkindselector(decl, decl2[k], force) + elif k == 'charselector': + decl = setcharselector(decl, decl2[k], force) + elif k in ['=', 'typename']: + if force or k not in decl: + decl[k] = decl2[k] + elif k == 'note': + pass + elif k in ['intent', 'check', 'dimension', 'optional', + 'required', 'depend']: + errmess(f'appenddecl: "{k}" not implemented.\n') + else: + raise Exception('appenddecl: Unknown variable definition key: ' + + str(k)) + return decl + + +selectpattern = re.compile( + r'\s*(?P(@\(@.*?@\)@|\*[\d*]+|\*\s*@\(@.*?@\)@|))(?P.*)\Z', re.I) +typedefpattern = re.compile( + r'(?:,(?P[\w(),]+))?(::)?(?P\b[a-z$_][\w$]*\b)' + r'(?:\((?P[\w,]*)\))?\Z', re.I) +nameargspattern = re.compile( + r'\s*(?P\b[\w$]+\b)\s*(@\(@\s*(?P[\w\s,]*)\s*@\)@|)\s*((result(\s*@\(@\s*(?P\b[\w$]+\b)\s*@\)@|))|(bind\s*@\(@\s*(?P(?:(?!@\)@).)*)\s*@\)@))*\s*\Z', re.I) +operatorpattern = re.compile( + r'\s*(?P(operator|assignment))' + r'@\(@\s*(?P[^)]+)\s*@\)@\s*\Z', re.I) +callnameargspattern = re.compile( + r'\s*(?P\b[\w$]+\b)\s*@\(@\s*(?P.*)\s*@\)@\s*\Z', re.I) +real16pattern = re.compile( + r'([-+]?(?:\d+(?:\.\d*)?|\d*\.\d+))[dD]((?:[-+]?\d+)?)') +real8pattern = re.compile( + r'([-+]?((?:\d+(?:\.\d*)?|\d*\.\d+))[eE]((?:[-+]?\d+)?)|(\d+\.\d*))') + +_intentcallbackpattern = re.compile(r'intent\s*\(.*?\bcallback\b', re.I) + + +def _is_intent_callback(vdecl): + for a in vdecl.get('attrspec', []): + if _intentcallbackpattern.match(a): + return 1 + return 0 + + +def _resolvetypedefpattern(line): + line = ''.join(line.split()) # removes whitespace + m1 = typedefpattern.match(line) + print(line, m1) + if m1: + attrs = m1.group('attributes') + attrs = [a.lower() for a in attrs.split(',')] if attrs else [] + return m1.group('name'), attrs, m1.group('params') + return None, [], None + +def parse_name_for_bind(line): + pattern = re.compile(r'bind\(\s*(?P[^,]+)(?:\s*,\s*name\s*=\s*["\'](?P[^"\']+)["\']\s*)?\)', re.I) + match = pattern.search(line) + bind_statement = None + if match: + bind_statement = match.group(0) + # Remove the 'bind' construct from the line. + line = line[:match.start()] + line[match.end():] + return line, bind_statement + +def _resolvenameargspattern(line): + line, bind_cname = parse_name_for_bind(line) + line = markouterparen(line) + m1 = nameargspattern.match(line) + if m1: + return m1.group('name'), m1.group('args'), m1.group('result'), bind_cname + m1 = operatorpattern.match(line) + if m1: + name = m1.group('scheme') + '(' + m1.group('name') + ')' + return name, [], None, None + m1 = callnameargspattern.match(line) + if m1: + return m1.group('name'), m1.group('args'), None, None + return None, [], None, None + + +def analyzeline(m, case, line): + """ + Reads each line in the input file in sequence and updates global vars. + + Effectively reads and collects information from the input file to the + global variable groupcache, a dictionary containing info about each part + of the fortran module. + + At the end of analyzeline, information is filtered into the correct dict + keys, but parameter values and dimensions are not yet interpreted. + """ + global groupcounter, groupname, groupcache, grouplist, filepositiontext + global currentfilename, f77modulename, neededinterface, neededmodule + global expectbegin, gotnextfile, previous_context + + block = m.group('this') + if case != 'multiline': + previous_context = None + if expectbegin and case not in ['begin', 'call', 'callfun', 'type'] \ + and not skipemptyends and groupcounter < 1: + newname = os.path.basename(currentfilename).split('.')[0] + outmess( + f'analyzeline: no group yet. Creating program group with name "{newname}".\n') + gotnextfile = 0 + groupcounter = groupcounter + 1 + groupname[groupcounter] = 'program' + groupcache[groupcounter] = {} + grouplist[groupcounter] = [] + groupcache[groupcounter]['body'] = [] + groupcache[groupcounter]['vars'] = {} + groupcache[groupcounter]['block'] = 'program' + groupcache[groupcounter]['name'] = newname + groupcache[groupcounter]['from'] = 'fromsky' + expectbegin = 0 + if case in ['begin', 'call', 'callfun']: + # Crack line => block,name,args,result + block = block.lower() + if re.match(r'block\s*data', block, re.I): + block = 'block data' + elif re.match(r'python\s*module', block, re.I): + block = 'python module' + elif re.match(r'abstract\s*interface', block, re.I): + block = 'abstract interface' + if block == 'type': + name, attrs, _ = _resolvetypedefpattern(m.group('after')) + groupcache[groupcounter]['vars'][name] = {'attrspec': attrs} + args = [] + result = None + else: + name, args, result, bindcline = _resolvenameargspattern(m.group('after')) + if name is None: + if block == 'block data': + name = '_BLOCK_DATA_' + else: + name = '' + if block not in ['interface', 'block data', 'abstract interface']: + outmess('analyzeline: No name/args pattern found for line.\n') + + previous_context = (block, name, groupcounter) + if args: + args = rmbadname([x.strip() + for x in markoutercomma(args).split('@,@')]) + else: + args = [] + if '' in args: + while '' in args: + args.remove('') + outmess( + 'analyzeline: argument list is malformed (missing argument).\n') + + # end of crack line => block,name,args,result + needmodule = 0 + needinterface = 0 + + if case in ['call', 'callfun']: + needinterface = 1 + if 'args' not in groupcache[groupcounter]: + return + if name not in groupcache[groupcounter]['args']: + return + for it in grouplist[groupcounter]: + if it['name'] == name: + return + if name in groupcache[groupcounter]['interfaced']: + return + block = {'call': 'subroutine', 'callfun': 'function'}[case] + if f77modulename and neededmodule == -1 and groupcounter <= 1: + neededmodule = groupcounter + 2 + needmodule = 1 + if block not in ['interface', 'abstract interface']: + needinterface = 1 + # Create new block(s) + groupcounter = groupcounter + 1 + groupcache[groupcounter] = {} + grouplist[groupcounter] = [] + if needmodule: + if verbose > 1: + outmess('analyzeline: Creating module block %s\n' % + repr(f77modulename), 0) + groupname[groupcounter] = 'module' + groupcache[groupcounter]['block'] = 'python module' + groupcache[groupcounter]['name'] = f77modulename + groupcache[groupcounter]['from'] = '' + groupcache[groupcounter]['body'] = [] + groupcache[groupcounter]['externals'] = [] + groupcache[groupcounter]['interfaced'] = [] + groupcache[groupcounter]['vars'] = {} + groupcounter = groupcounter + 1 + groupcache[groupcounter] = {} + grouplist[groupcounter] = [] + if needinterface: + if verbose > 1: + outmess('analyzeline: Creating additional interface block (groupcounter=%s).\n' % ( + groupcounter), 0) + groupname[groupcounter] = 'interface' + groupcache[groupcounter]['block'] = 'interface' + groupcache[groupcounter]['name'] = 'unknown_interface' + groupcache[groupcounter]['from'] = '%s:%s' % ( + groupcache[groupcounter - 1]['from'], groupcache[groupcounter - 1]['name']) + groupcache[groupcounter]['body'] = [] + groupcache[groupcounter]['externals'] = [] + groupcache[groupcounter]['interfaced'] = [] + groupcache[groupcounter]['vars'] = {} + groupcounter = groupcounter + 1 + groupcache[groupcounter] = {} + grouplist[groupcounter] = [] + groupname[groupcounter] = block + groupcache[groupcounter]['block'] = block + if not name: + name = 'unknown_' + block.replace(' ', '_') + groupcache[groupcounter]['prefix'] = m.group('before') + groupcache[groupcounter]['name'] = rmbadname1(name) + groupcache[groupcounter]['result'] = result + if groupcounter == 1: + groupcache[groupcounter]['from'] = currentfilename + elif f77modulename and groupcounter == 3: + groupcache[groupcounter]['from'] = '%s:%s' % ( + groupcache[groupcounter - 1]['from'], currentfilename) + else: + groupcache[groupcounter]['from'] = '%s:%s' % ( + groupcache[groupcounter - 1]['from'], groupcache[groupcounter - 1]['name']) + for k in list(groupcache[groupcounter].keys()): + if not groupcache[groupcounter][k]: + del groupcache[groupcounter][k] + + groupcache[groupcounter]['args'] = args + groupcache[groupcounter]['body'] = [] + groupcache[groupcounter]['externals'] = [] + groupcache[groupcounter]['interfaced'] = [] + groupcache[groupcounter]['vars'] = {} + groupcache[groupcounter]['entry'] = {} + # end of creation + if block == 'type': + groupcache[groupcounter]['varnames'] = [] + + if case in ['call', 'callfun']: # set parents variables + if name not in groupcache[groupcounter - 2]['externals']: + groupcache[groupcounter - 2]['externals'].append(name) + groupcache[groupcounter]['vars'] = copy.deepcopy( + groupcache[groupcounter - 2]['vars']) + try: + del groupcache[groupcounter]['vars'][name][ + groupcache[groupcounter]['vars'][name]['attrspec'].index('external')] + except Exception: + pass + if block in ['function', 'subroutine']: # set global attributes + # name is fortran name + if bindcline: + bindcdat = re.search(crackline_bindlang, bindcline) + if bindcdat: + groupcache[groupcounter]['bindlang'] = {name: {}} + groupcache[groupcounter]['bindlang'][name]["lang"] = bindcdat.group('lang') + if bindcdat.group('lang_name'): + groupcache[groupcounter]['bindlang'][name]["name"] = bindcdat.group('lang_name') + try: + groupcache[groupcounter]['vars'][name] = appenddecl( + groupcache[groupcounter]['vars'][name], groupcache[groupcounter - 2]['vars']['']) + except Exception: + pass + if case == 'callfun': # return type + if result and result in groupcache[groupcounter]['vars']: + if not name == result: + groupcache[groupcounter]['vars'][name] = appenddecl( + groupcache[groupcounter]['vars'][name], groupcache[groupcounter]['vars'][result]) + # if groupcounter>1: # name is interfaced + try: + groupcache[groupcounter - 2]['interfaced'].append(name) + except Exception: + pass + if block == 'function': + t = typespattern[0].match(m.group('before') + ' ' + name) + if t: + typespec, selector, attr, edecl = cracktypespec0( + t.group('this'), t.group('after')) + updatevars(typespec, selector, attr, edecl) + + if case in ['call', 'callfun']: + grouplist[groupcounter - 1].append(groupcache[groupcounter]) + grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter] + del grouplist[groupcounter] + groupcounter = groupcounter - 1 # end routine + grouplist[groupcounter - 1].append(groupcache[groupcounter]) + grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter] + del grouplist[groupcounter] + groupcounter = groupcounter - 1 # end interface + + elif case == 'entry': + name, args, result, _ = _resolvenameargspattern(m.group('after')) + if name is not None: + if args: + args = rmbadname([x.strip() + for x in markoutercomma(args).split('@,@')]) + else: + args = [] + assert result is None, repr(result) + groupcache[groupcounter]['entry'][name] = args + previous_context = ('entry', name, groupcounter) + elif case == 'type': + typespec, selector, attr, edecl = cracktypespec0( + block, m.group('after')) + last_name = updatevars(typespec, selector, attr, edecl) + if last_name is not None: + previous_context = ('variable', last_name, groupcounter) + elif case in ['dimension', 'intent', 'optional', 'required', 'external', 'public', 'private', 'intrinsic']: + edecl = groupcache[groupcounter]['vars'] + ll = m.group('after').strip() + i = ll.find('::') + if i < 0 and case == 'intent': + i = markouterparen(ll).find('@)@') - 2 + ll = ll[:i + 1] + '::' + ll[i + 1:] + i = ll.find('::') + if ll[i:] == '::' and 'args' in groupcache[groupcounter]: + outmess('All arguments will have attribute %s%s\n' % + (m.group('this'), ll[:i])) + ll = ll + ','.join(groupcache[groupcounter]['args']) + if i < 0: + i = 0 + pl = '' + else: + pl = ll[:i].strip() + ll = ll[i + 2:] + ch = markoutercomma(pl).split('@,@') + if len(ch) > 1: + pl = ch[0] + outmess('analyzeline: cannot handle multiple attributes without type specification. Ignoring %r.\n' % ( + ','.join(ch[1:]))) + last_name = None + + for e in [x.strip() for x in markoutercomma(ll).split('@,@')]: + m1 = namepattern.match(e) + if not m1: + if case in ['public', 'private']: + k = '' + else: + print(m.groupdict()) + outmess('analyzeline: no name pattern found in %s statement for %s. Skipping.\n' % ( + case, repr(e))) + continue + else: + k = rmbadname1(m1.group('name')) + if case in ['public', 'private'] and k in {'operator', 'assignment'}: + k += m1.group('after') + if k not in edecl: + edecl[k] = {} + if case == 'dimension': + ap = case + m1.group('after') + if case == 'intent': + ap = m.group('this') + pl + if _intentcallbackpattern.match(ap): + if k not in groupcache[groupcounter]['args']: + if groupcounter > 1: + if '__user__' not in groupcache[groupcounter - 2]['name']: + outmess( + 'analyzeline: missing __user__ module (could be nothing)\n') + # fixes ticket 1693 + if k != groupcache[groupcounter]['name']: + outmess('analyzeline: appending intent(callback) %s' + ' to %s arguments\n' % (k, groupcache[groupcounter]['name'])) + groupcache[groupcounter]['args'].append(k) + else: + errmess( + f'analyzeline: intent(callback) {k} is ignored\n') + else: + errmess('analyzeline: intent(callback) %s is already' + ' in argument list\n' % (k)) + if case in ['optional', 'required', 'public', 'external', 'private', 'intrinsic']: + ap = case + if 'attrspec' in edecl[k]: + edecl[k]['attrspec'].append(ap) + else: + edecl[k]['attrspec'] = [ap] + if case == 'external': + if groupcache[groupcounter]['block'] == 'program': + outmess('analyzeline: ignoring program arguments\n') + continue + if k not in groupcache[groupcounter]['args']: + continue + if 'externals' not in groupcache[groupcounter]: + groupcache[groupcounter]['externals'] = [] + groupcache[groupcounter]['externals'].append(k) + last_name = k + groupcache[groupcounter]['vars'] = edecl + if last_name is not None: + previous_context = ('variable', last_name, groupcounter) + elif case == 'moduleprocedure': + groupcache[groupcounter]['implementedby'] = \ + [x.strip() for x in m.group('after').split(',')] + elif case == 'parameter': + edecl = groupcache[groupcounter]['vars'] + ll = m.group('after').strip()[1:-1] + last_name = None + for e in markoutercomma(ll).split('@,@'): + try: + k, initexpr = [x.strip() for x in e.split('=')] + except Exception: + outmess( + f'analyzeline: could not extract name,expr in parameter statement "{e}" of "{ll}\"\n') + continue + params = get_parameters(edecl) + k = rmbadname1(k) + if k not in edecl: + edecl[k] = {} + if '=' in edecl[k] and (not edecl[k]['='] == initexpr): + outmess('analyzeline: Overwriting the value of parameter "%s" ("%s") with "%s".\n' % ( + k, edecl[k]['='], initexpr)) + t = determineexprtype(initexpr, params) + if t: + if t.get('typespec') == 'real': + tt = list(initexpr) + for m in real16pattern.finditer(initexpr): + tt[m.start():m.end()] = list( + initexpr[m.start():m.end()].lower().replace('d', 'e')) + initexpr = ''.join(tt) + elif t.get('typespec') == 'complex': + initexpr = initexpr[1:].lower().replace('d', 'e').\ + replace(',', '+1j*(') + try: + v = eval(initexpr, {}, params) + except (SyntaxError, NameError, TypeError) as msg: + errmess('analyzeline: Failed to evaluate %r. Ignoring: %s\n' + % (initexpr, msg)) + continue + edecl[k]['='] = repr(v) + if 'attrspec' in edecl[k]: + edecl[k]['attrspec'].append('parameter') + else: + edecl[k]['attrspec'] = ['parameter'] + last_name = k + groupcache[groupcounter]['vars'] = edecl + if last_name is not None: + previous_context = ('variable', last_name, groupcounter) + elif case == 'implicit': + if m.group('after').strip().lower() == 'none': + groupcache[groupcounter]['implicit'] = None + elif m.group('after'): + impl = groupcache[groupcounter].get('implicit', {}) + if impl is None: + outmess( + 'analyzeline: Overwriting earlier "implicit none" statement.\n') + impl = {} + for e in markoutercomma(m.group('after')).split('@,@'): + decl = {} + m1 = re.match( + r'\s*(?P.*?)\s*(\(\s*(?P[a-z-, ]+)\s*\)\s*|)\Z', e, re.I) + if not m1: + outmess( + f'analyzeline: could not extract info of implicit statement part "{e}\"\n') + continue + m2 = typespattern4implicit.match(m1.group('this')) + if not m2: + outmess( + f'analyzeline: could not extract types pattern of implicit statement part "{e}\"\n') + continue + typespec, selector, attr, edecl = cracktypespec0( + m2.group('this'), m2.group('after')) + kindselect, charselect, typename = cracktypespec( + typespec, selector) + decl['typespec'] = typespec + decl['kindselector'] = kindselect + decl['charselector'] = charselect + decl['typename'] = typename + for k in list(decl.keys()): + if not decl[k]: + del decl[k] + for r in markoutercomma(m1.group('after')).split('@,@'): + if '-' in r: + try: + begc, endc = [x.strip() for x in r.split('-')] + except Exception: + outmess( + f'analyzeline: expected "-" instead of "{r}" in range list of implicit statement\n') + continue + else: + begc = endc = r.strip() + if not len(begc) == len(endc) == 1: + outmess( + f'analyzeline: expected "-" instead of "{r}" in range list of implicit statement (2)\n') + continue + for o in range(ord(begc), ord(endc) + 1): + impl[chr(o)] = decl + groupcache[groupcounter]['implicit'] = impl + elif case == 'data': + ll = [] + dl = '' + il = '' + f = 0 + fc = 1 + inp = 0 + for c in m.group('after'): + if not inp: + if c == "'": + fc = not fc + if c == '/' and fc: + f = f + 1 + continue + if c == '(': + inp = inp + 1 + elif c == ')': + inp = inp - 1 + if f == 0: + dl = dl + c + elif f == 1: + il = il + c + elif f == 2: + dl = dl.strip() + if dl.startswith(','): + dl = dl[1:].strip() + ll.append([dl, il]) + dl = c + il = '' + f = 0 + if f == 2: + dl = dl.strip() + if dl.startswith(','): + dl = dl[1:].strip() + ll.append([dl, il]) + vars = groupcache[groupcounter].get('vars', {}) + last_name = None + for l in ll: + l[0], l[1] = l[0].strip().removeprefix(','), l[1].strip() + if l[0].startswith('('): + outmess(f'analyzeline: implied-DO list "{l[0]}" is not supported. Skipping.\n') + continue + for idx, v in enumerate(rmbadname([x.strip() for x in markoutercomma(l[0]).split('@,@')])): + if v.startswith('('): + outmess(f'analyzeline: implied-DO list "{v}" is not supported. Skipping.\n') + # XXX: subsequent init expressions may get wrong values. + # Ignoring since data statements are irrelevant for + # wrapping. + continue + if '!' in l[1]: + # Fixes gh-24746 pyf generation + # XXX: This essentially ignores the value for generating the pyf which is fine: + # integer dimension(3) :: mytab + # common /mycom/ mytab + # Since in any case it is initialized in the Fortran code + outmess(f'Comment line in declaration "{l[1]}" is not supported. Skipping.\n') + continue + vars.setdefault(v, {}) + vtype = vars[v].get('typespec') + vdim = getdimension(vars[v]) + matches = re.findall(r"\(.*?\)", l[1]) if vtype == 'complex' else l[1].split(',') + try: + new_val = f"(/{', '.join(matches)}/)" if vdim else matches[idx] + except IndexError: + # gh-24746 + # Runs only if above code fails. Fixes the line + # DATA IVAR1, IVAR2, IVAR3, IVAR4, EVAR5 /4*0,0.0D0/ + # by expanding to ['0', '0', '0', '0', '0.0d0'] + if any("*" in m for m in matches): + expanded_list = [] + for match in matches: + if "*" in match: + try: + multiplier, value = match.split("*") + expanded_list.extend([value.strip()] * int(multiplier)) + except ValueError: # if int(multiplier) fails + expanded_list.append(match.strip()) + else: + expanded_list.append(match.strip()) + matches = expanded_list + new_val = f"(/{', '.join(matches)}/)" if vdim else matches[idx] + current_val = vars[v].get('=') + if current_val and (current_val != new_val): + outmess(f'analyzeline: changing init expression of "{v}" ("{current_val}") to "{new_val}\"\n') + vars[v]['='] = new_val + last_name = v + groupcache[groupcounter]['vars'] = vars + if last_name: + previous_context = ('variable', last_name, groupcounter) + elif case == 'common': + line = m.group('after').strip() + if not line[0] == '/': + line = '//' + line + + cl = [] + [_, bn, ol] = re.split('/', line, maxsplit=2) # noqa: RUF039 + bn = bn.strip() + if not bn: + bn = '_BLNK_' + cl.append([bn, ol]) + commonkey = {} + if 'common' in groupcache[groupcounter]: + commonkey = groupcache[groupcounter]['common'] + for c in cl: + if c[0] not in commonkey: + commonkey[c[0]] = [] + for i in [x.strip() for x in markoutercomma(c[1]).split('@,@')]: + if i: + commonkey[c[0]].append(i) + groupcache[groupcounter]['common'] = commonkey + previous_context = ('common', bn, groupcounter) + elif case == 'use': + m1 = re.match( + r'\A\s*(?P\b\w+\b)\s*((,(\s*\bonly\b\s*:|(?P))\s*(?P.*))|)\s*\Z', m.group('after'), re.I) + if m1: + mm = m1.groupdict() + if 'use' not in groupcache[groupcounter]: + groupcache[groupcounter]['use'] = {} + name = m1.group('name') + groupcache[groupcounter]['use'][name] = {} + isonly = 0 + if 'list' in mm and mm['list'] is not None: + if 'notonly' in mm and mm['notonly'] is None: + isonly = 1 + groupcache[groupcounter]['use'][name]['only'] = isonly + ll = [x.strip() for x in mm['list'].split(',')] + rl = {} + for l in ll: + if '=' in l: + m2 = re.match( + r'\A\s*(?P\b\w+\b)\s*=\s*>\s*(?P\b\w+\b)\s*\Z', l, re.I) + if m2: + rl[m2.group('local').strip()] = m2.group( + 'use').strip() + else: + outmess( + f'analyzeline: Not local=>use pattern found in {repr(l)}\n') + else: + rl[l] = l + groupcache[groupcounter]['use'][name]['map'] = rl + else: + print(m.groupdict()) + outmess('analyzeline: Could not crack the use statement.\n') + elif case in ['f2pyenhancements']: + if 'f2pyenhancements' not in groupcache[groupcounter]: + groupcache[groupcounter]['f2pyenhancements'] = {} + d = groupcache[groupcounter]['f2pyenhancements'] + if m.group('this') == 'usercode' and 'usercode' in d: + if isinstance(d['usercode'], str): + d['usercode'] = [d['usercode']] + d['usercode'].append(m.group('after')) + else: + d[m.group('this')] = m.group('after') + elif case == 'multiline': + if previous_context is None: + if verbose: + outmess('analyzeline: No context for multiline block.\n') + return + gc = groupcounter + appendmultiline(groupcache[gc], + previous_context[:2], + m.group('this')) + elif verbose > 1: + print(m.groupdict()) + outmess('analyzeline: No code implemented for line.\n') + + +def appendmultiline(group, context_name, ml): + if 'f2pymultilines' not in group: + group['f2pymultilines'] = {} + d = group['f2pymultilines'] + if context_name not in d: + d[context_name] = [] + d[context_name].append(ml) + + +def cracktypespec0(typespec, ll): + selector = None + attr = None + if re.match(r'double\s*complex', typespec, re.I): + typespec = 'double complex' + elif re.match(r'double\s*precision', typespec, re.I): + typespec = 'double precision' + else: + typespec = typespec.strip().lower() + m1 = selectpattern.match(markouterparen(ll)) + if not m1: + outmess( + 'cracktypespec0: no kind/char_selector pattern found for line.\n') + return + d = m1.groupdict() + for k in list(d.keys()): + d[k] = unmarkouterparen(d[k]) + if typespec in ['complex', 'integer', 'logical', 'real', 'character', 'type']: + selector = d['this'] + ll = d['after'] + i = ll.find('::') + if i >= 0: + attr = ll[:i].strip() + ll = ll[i + 2:] + return typespec, selector, attr, ll + + +##### +namepattern = re.compile(r'\s*(?P\b\w+\b)\s*(?P.*)\s*\Z', re.I) +kindselector = re.compile( + r'\s*(\(\s*(kind\s*=)?\s*(?P.*)\s*\)|\*\s*(?P.*?))\s*\Z', re.I) +charselector = re.compile( + r'\s*(\((?P.*)\)|\*\s*(?P.*))\s*\Z', re.I) +lenkindpattern = re.compile( + r'\s*(kind\s*=\s*(?P.*?)\s*(@,@\s*len\s*=\s*(?P.*)|)' + r'|(len\s*=\s*|)(?P.*?)\s*(@,@\s*(kind\s*=\s*|)(?P.*)' + r'|(f2py_len\s*=\s*(?P.*))|))\s*\Z', re.I) +lenarraypattern = re.compile( + r'\s*(@\(@\s*(?!/)\s*(?P.*?)\s*@\)@\s*\*\s*(?P.*?)|(\*\s*(?P.*?)|)\s*(@\(@\s*(?!/)\s*(?P.*?)\s*@\)@|))\s*(=\s*(?P.*?)|(@\(@|)/\s*(?P.*?)\s*/(@\)@|)|)\s*\Z', re.I) + + +def removespaces(expr): + expr = expr.strip() + if len(expr) <= 1: + return expr + expr2 = expr[0] + for i in range(1, len(expr) - 1): + if (expr[i] == ' ' and + ((expr[i + 1] in "()[]{}=+-/* ") or + (expr[i - 1] in "()[]{}=+-/* "))): + continue + expr2 = expr2 + expr[i] + expr2 = expr2 + expr[-1] + return expr2 + + +def markinnerspaces(line): + """ + The function replace all spaces in the input variable line which are + surrounded with quotation marks, with the triplet "@_@". + + For instance, for the input "a 'b c'" the function returns "a 'b@_@c'" + + Parameters + ---------- + line : str + + Returns + ------- + str + + """ + fragment = '' + inside = False + current_quote = None + escaped = '' + for c in line: + if escaped == '\\' and c in ['\\', '\'', '"']: + fragment += c + escaped = c + continue + if not inside and c in ['\'', '"']: + current_quote = c + if c == current_quote: + inside = not inside + elif c == ' ' and inside: + fragment += '@_@' + continue + fragment += c + escaped = c # reset to non-backslash + return fragment + + +def updatevars(typespec, selector, attrspec, entitydecl): + """ + Returns last_name, the variable name without special chars, parenthesis + or dimension specifiers. + + Alters groupcache to add the name, typespec, attrspec (and possibly value) + of current variable. + """ + global groupcache, groupcounter + + last_name = None + kindselect, charselect, typename = cracktypespec(typespec, selector) + # Clean up outer commas, whitespace and undesired chars from attrspec + if attrspec: + attrspec = [x.strip() for x in markoutercomma(attrspec).split('@,@')] + l = [] + c = re.compile(r'(?P[a-zA-Z]+)') + for a in attrspec: + if not a: + continue + m = c.match(a) + if m: + s = m.group('start').lower() + a = s + a[len(s):] + l.append(a) + attrspec = l + el = [x.strip() for x in markoutercomma(entitydecl).split('@,@')] + el1 = [] + for e in el: + for e1 in [x.strip() for x in markoutercomma(removespaces(markinnerspaces(e)), comma=' ').split('@ @')]: + if e1: + el1.append(e1.replace('@_@', ' ')) + for e in el1: + m = namepattern.match(e) + if not m: + outmess( + f'updatevars: no name pattern found for entity={repr(e)}. Skipping.\n') + continue + ename = rmbadname1(m.group('name')) + edecl = {} + if ename in groupcache[groupcounter]['vars']: + edecl = groupcache[groupcounter]['vars'][ename].copy() + not_has_typespec = 'typespec' not in edecl + if not_has_typespec: + edecl['typespec'] = typespec + elif typespec and (not typespec == edecl['typespec']): + outmess('updatevars: attempt to change the type of "%s" ("%s") to "%s". Ignoring.\n' % ( + ename, edecl['typespec'], typespec)) + if 'kindselector' not in edecl: + edecl['kindselector'] = copy.copy(kindselect) + elif kindselect: + for k in list(kindselect.keys()): + if k in edecl['kindselector'] and (not kindselect[k] == edecl['kindselector'][k]): + outmess('updatevars: attempt to change the kindselector "%s" of "%s" ("%s") to "%s". Ignoring.\n' % ( + k, ename, edecl['kindselector'][k], kindselect[k])) + else: + edecl['kindselector'][k] = copy.copy(kindselect[k]) + if 'charselector' not in edecl and charselect: + if not_has_typespec: + edecl['charselector'] = charselect + else: + errmess('updatevars:%s: attempt to change empty charselector to %r. Ignoring.\n' + % (ename, charselect)) + elif charselect: + for k in list(charselect.keys()): + if k in edecl['charselector'] and (not charselect[k] == edecl['charselector'][k]): + outmess('updatevars: attempt to change the charselector "%s" of "%s" ("%s") to "%s". Ignoring.\n' % ( + k, ename, edecl['charselector'][k], charselect[k])) + else: + edecl['charselector'][k] = copy.copy(charselect[k]) + if 'typename' not in edecl: + edecl['typename'] = typename + elif typename and (not edecl['typename'] == typename): + outmess('updatevars: attempt to change the typename of "%s" ("%s") to "%s". Ignoring.\n' % ( + ename, edecl['typename'], typename)) + if 'attrspec' not in edecl: + edecl['attrspec'] = copy.copy(attrspec) + elif attrspec: + for a in attrspec: + if a not in edecl['attrspec']: + edecl['attrspec'].append(a) + else: + edecl['typespec'] = copy.copy(typespec) + edecl['kindselector'] = copy.copy(kindselect) + edecl['charselector'] = copy.copy(charselect) + edecl['typename'] = typename + edecl['attrspec'] = copy.copy(attrspec) + if 'external' in (edecl.get('attrspec') or []) and e in groupcache[groupcounter]['args']: + if 'externals' not in groupcache[groupcounter]: + groupcache[groupcounter]['externals'] = [] + groupcache[groupcounter]['externals'].append(e) + if m.group('after'): + m1 = lenarraypattern.match(markouterparen(m.group('after'))) + if m1: + d1 = m1.groupdict() + for lk in ['len', 'array', 'init']: + if d1[lk + '2'] is not None: + d1[lk] = d1[lk + '2'] + del d1[lk + '2'] + for k in list(d1.keys()): + if d1[k] is not None: + d1[k] = unmarkouterparen(d1[k]) + else: + del d1[k] + + if 'len' in d1 and 'array' in d1: + if d1['len'] == '': + d1['len'] = d1['array'] + del d1['array'] + elif typespec == 'character': + if ('charselector' not in edecl) or (not edecl['charselector']): + edecl['charselector'] = {} + if 'len' in edecl['charselector']: + del edecl['charselector']['len'] + edecl['charselector']['*'] = d1['len'] + del d1['len'] + else: + d1['array'] = d1['array'] + ',' + d1['len'] + del d1['len'] + errmess('updatevars: "%s %s" is mapped to "%s %s(%s)"\n' % ( + typespec, e, typespec, ename, d1['array'])) + + if 'len' in d1: + if typespec in ['complex', 'integer', 'logical', 'real']: + if ('kindselector' not in edecl) or (not edecl['kindselector']): + edecl['kindselector'] = {} + edecl['kindselector']['*'] = d1['len'] + del d1['len'] + elif typespec == 'character': + if ('charselector' not in edecl) or (not edecl['charselector']): + edecl['charselector'] = {} + if 'len' in edecl['charselector']: + del edecl['charselector']['len'] + edecl['charselector']['*'] = d1['len'] + del d1['len'] + + if 'init' in d1: + if '=' in edecl and (not edecl['='] == d1['init']): + outmess('updatevars: attempt to change the init expression of "%s" ("%s") to "%s". Ignoring.\n' % ( + ename, edecl['='], d1['init'])) + else: + edecl['='] = d1['init'] + + if 'array' in d1: + dm = f"dimension({d1['array']})" + if 'attrspec' not in edecl or (not edecl['attrspec']): + edecl['attrspec'] = [dm] + else: + edecl['attrspec'].append(dm) + for dm1 in edecl['attrspec']: + if dm1[:9] == 'dimension' and dm1 != dm: + del edecl['attrspec'][-1] + errmess('updatevars:%s: attempt to change %r to %r. Ignoring.\n' + % (ename, dm1, dm)) + break + + else: + outmess('updatevars: could not crack entity declaration "%s". Ignoring.\n' % ( + ename + m.group('after'))) + for k in list(edecl.keys()): + if not edecl[k]: + del edecl[k] + groupcache[groupcounter]['vars'][ename] = edecl + if 'varnames' in groupcache[groupcounter]: + groupcache[groupcounter]['varnames'].append(ename) + last_name = ename + return last_name + + +def cracktypespec(typespec, selector): + kindselect = None + charselect = None + typename = None + if selector: + if typespec in ['complex', 'integer', 'logical', 'real']: + kindselect = kindselector.match(selector) + if not kindselect: + outmess( + f'cracktypespec: no kindselector pattern found for {repr(selector)}\n') + return + kindselect = kindselect.groupdict() + kindselect['*'] = kindselect['kind2'] + del kindselect['kind2'] + for k in list(kindselect.keys()): + if not kindselect[k]: + del kindselect[k] + for k, i in list(kindselect.items()): + kindselect[k] = rmbadname1(i) + elif typespec == 'character': + charselect = charselector.match(selector) + if not charselect: + outmess( + f'cracktypespec: no charselector pattern found for {repr(selector)}\n') + return + charselect = charselect.groupdict() + charselect['*'] = charselect['charlen'] + del charselect['charlen'] + if charselect['lenkind']: + lenkind = lenkindpattern.match( + markoutercomma(charselect['lenkind'])) + lenkind = lenkind.groupdict() + for lk in ['len', 'kind']: + if lenkind[lk + '2']: + lenkind[lk] = lenkind[lk + '2'] + charselect[lk] = lenkind[lk] + del lenkind[lk + '2'] + if lenkind['f2py_len'] is not None: + # used to specify the length of assumed length strings + charselect['f2py_len'] = lenkind['f2py_len'] + del charselect['lenkind'] + for k in list(charselect.keys()): + if not charselect[k]: + del charselect[k] + for k, i in list(charselect.items()): + charselect[k] = rmbadname1(i) + elif typespec == 'type': + typename = re.match(r'\s*\(\s*(?P\w+)\s*\)', selector, re.I) + if typename: + typename = typename.group('name') + else: + outmess('cracktypespec: no typename found in %s\n' % + (repr(typespec + selector))) + else: + outmess(f'cracktypespec: no selector used for {repr(selector)}\n') + return kindselect, charselect, typename +###### + + +def setattrspec(decl, attr, force=0): + if not decl: + decl = {} + if not attr: + return decl + if 'attrspec' not in decl: + decl['attrspec'] = [attr] + return decl + if force: + decl['attrspec'].append(attr) + if attr in decl['attrspec']: + return decl + if attr == 'static' and 'automatic' not in decl['attrspec']: + decl['attrspec'].append(attr) + elif attr == 'automatic' and 'static' not in decl['attrspec']: + decl['attrspec'].append(attr) + elif attr == 'public': + if 'private' not in decl['attrspec']: + decl['attrspec'].append(attr) + elif attr == 'private': + if 'public' not in decl['attrspec']: + decl['attrspec'].append(attr) + else: + decl['attrspec'].append(attr) + return decl + + +def setkindselector(decl, sel, force=0): + if not decl: + decl = {} + if not sel: + return decl + if 'kindselector' not in decl: + decl['kindselector'] = sel + return decl + for k in list(sel.keys()): + if force or k not in decl['kindselector']: + decl['kindselector'][k] = sel[k] + return decl + + +def setcharselector(decl, sel, force=0): + if not decl: + decl = {} + if not sel: + return decl + if 'charselector' not in decl: + decl['charselector'] = sel + return decl + + for k in list(sel.keys()): + if force or k not in decl['charselector']: + decl['charselector'][k] = sel[k] + return decl + + +def getblockname(block, unknown='unknown'): + if 'name' in block: + return block['name'] + return unknown + +# post processing + + +def setmesstext(block): + global filepositiontext + + try: + filepositiontext = f"In: {block['from']}:{block['name']}\n" + except Exception: + pass + + +def get_usedict(block): + usedict = {} + if 'parent_block' in block: + usedict = get_usedict(block['parent_block']) + if 'use' in block: + usedict.update(block['use']) + return usedict + + +def get_useparameters(block, param_map=None): + global f90modulevars + + if param_map is None: + param_map = {} + usedict = get_usedict(block) + if not usedict: + return param_map + for usename, mapping in list(usedict.items()): + usename = usename.lower() + if usename not in f90modulevars: + outmess('get_useparameters: no module %s info used by %s\n' % + (usename, block.get('name'))) + continue + mvars = f90modulevars[usename] + params = get_parameters(mvars) + if not params: + continue + # XXX: apply mapping + if mapping: + errmess(f'get_useparameters: mapping for {mapping} not impl.\n') + for k, v in list(params.items()): + if k in param_map: + outmess('get_useparameters: overriding parameter %s with' + ' value from module %s\n' % (repr(k), repr(usename))) + param_map[k] = v + + return param_map + + +def postcrack2(block, tab='', param_map=None): + global f90modulevars + + if not f90modulevars: + return block + if isinstance(block, list): + ret = [postcrack2(g, tab=tab + '\t', param_map=param_map) + for g in block] + return ret + setmesstext(block) + outmess(f"{tab}Block: {block['name']}\n", 0) + + if param_map is None: + param_map = get_useparameters(block) + + if param_map is not None and 'vars' in block: + vars = block['vars'] + for n in list(vars.keys()): + var = vars[n] + if 'kindselector' in var: + kind = var['kindselector'] + if 'kind' in kind: + val = kind['kind'] + if val in param_map: + kind['kind'] = param_map[val] + new_body = [postcrack2(b, tab=tab + '\t', param_map=param_map) + for b in block['body']] + block['body'] = new_body + + return block + + +def postcrack(block, args=None, tab=''): + """ + TODO: + function return values + determine expression types if in argument list + """ + global usermodules, onlyfunctions + + if isinstance(block, list): + gret = [] + uret = [] + for g in block: + setmesstext(g) + g = postcrack(g, tab=tab + '\t') + # sort user routines to appear first + if 'name' in g and '__user__' in g['name']: + uret.append(g) + else: + gret.append(g) + return uret + gret + setmesstext(block) + if not isinstance(block, dict) and 'block' not in block: + raise Exception('postcrack: Expected block dictionary instead of ' + + str(block)) + if 'name' in block and not block['name'] == 'unknown_interface': + outmess(f"{tab}Block: {block['name']}\n", 0) + block = analyzeargs(block) + block = analyzecommon(block) + block['vars'] = analyzevars(block) + block['sortvars'] = sortvarnames(block['vars']) + if block.get('args'): + args = block['args'] + block['body'] = analyzebody(block, args, tab=tab) + + userisdefined = [] + if 'use' in block: + useblock = block['use'] + for k in list(useblock.keys()): + if '__user__' in k: + userisdefined.append(k) + else: + useblock = {} + name = '' + if 'name' in block: + name = block['name'] + # and not userisdefined: # Build a __user__ module + if block.get('externals'): + interfaced = [] + if 'interfaced' in block: + interfaced = block['interfaced'] + mvars = copy.copy(block['vars']) + if name: + mname = name + '__user__routines' + else: + mname = 'unknown__user__routines' + if mname in userisdefined: + i = 1 + while f"{mname}_{i}" in userisdefined: + i = i + 1 + mname = f"{mname}_{i}" + interface = {'block': 'interface', 'body': [], + 'vars': {}, 'name': name + '_user_interface'} + for e in block['externals']: + if e in interfaced: + edef = [] + j = -1 + for b in block['body']: + j = j + 1 + if b['block'] == 'interface': + i = -1 + for bb in b['body']: + i = i + 1 + if 'name' in bb and bb['name'] == e: + edef = copy.copy(bb) + del b['body'][i] + break + if edef: + if not b['body']: + del block['body'][j] + del interfaced[interfaced.index(e)] + break + interface['body'].append(edef) + elif e in mvars and not isexternal(mvars[e]): + interface['vars'][e] = mvars[e] + if interface['vars'] or interface['body']: + block['interfaced'] = interfaced + mblock = {'block': 'python module', 'body': [ + interface], 'vars': {}, 'name': mname, 'interfaced': block['externals']} + useblock[mname] = {} + usermodules.append(mblock) + if useblock: + block['use'] = useblock + return block + + +def sortvarnames(vars): + indep = [] + dep = [] + for v in list(vars.keys()): + if 'depend' in vars[v] and vars[v]['depend']: + dep.append(v) + else: + indep.append(v) + n = len(dep) + i = 0 + while dep: # XXX: How to catch dependence cycles correctly? + v = dep[0] + fl = 0 + for w in dep[1:]: + if w in vars[v]['depend']: + fl = 1 + break + if fl: + dep = dep[1:] + [v] + i = i + 1 + if i > n: + errmess('sortvarnames: failed to compute dependencies because' + ' of cyclic dependencies between ' + + ', '.join(dep) + '\n') + indep = indep + dep + break + else: + indep.append(v) + dep = dep[1:] + n = len(dep) + i = 0 + return indep + + +def analyzecommon(block): + if not hascommon(block): + return block + commonvars = [] + for k in list(block['common'].keys()): + comvars = [] + for e in block['common'][k]: + m = re.match( + r'\A\s*\b(?P.*?)\b\s*(\((?P.*?)\)|)\s*\Z', e, re.I) + if m: + dims = [] + if m.group('dims'): + dims = [x.strip() + for x in markoutercomma(m.group('dims')).split('@,@')] + n = rmbadname1(m.group('name').strip()) + if n in block['vars']: + if 'attrspec' in block['vars'][n]: + block['vars'][n]['attrspec'].append( + f"dimension({','.join(dims)})") + else: + block['vars'][n]['attrspec'] = [ + f"dimension({','.join(dims)})"] + elif dims: + block['vars'][n] = { + 'attrspec': [f"dimension({','.join(dims)})"]} + else: + block['vars'][n] = {} + if n not in commonvars: + commonvars.append(n) + else: + n = e + errmess( + f'analyzecommon: failed to extract "[()]" from "{e}" in common /{k}/.\n') + comvars.append(n) + block['common'][k] = comvars + if 'commonvars' not in block: + block['commonvars'] = commonvars + else: + block['commonvars'] = block['commonvars'] + commonvars + return block + + +def analyzebody(block, args, tab=''): + global usermodules, skipfuncs, onlyfuncs, f90modulevars + + setmesstext(block) + + maybe_private = { + key: value + for key, value in block['vars'].items() + if 'attrspec' not in value or 'public' not in value['attrspec'] + } + + body = [] + for b in block['body']: + b['parent_block'] = block + if b['block'] in ['function', 'subroutine']: + if args is not None and b['name'] not in args: + continue + else: + as_ = b['args'] + # Add private members to skipfuncs for gh-23879 + if b['name'] in maybe_private.keys(): + skipfuncs.append(b['name']) + if b['name'] in skipfuncs: + continue + if onlyfuncs and b['name'] not in onlyfuncs: + continue + b['saved_interface'] = crack2fortrangen( + b, '\n' + ' ' * 6, as_interface=True) + + else: + as_ = args + b = postcrack(b, as_, tab=tab + '\t') + if b['block'] in ['interface', 'abstract interface'] and \ + not b['body'] and not b.get('implementedby'): + if 'f2pyenhancements' not in b: + continue + if b['block'].replace(' ', '') == 'pythonmodule': + usermodules.append(b) + else: + if b['block'] == 'module': + f90modulevars[b['name']] = b['vars'] + body.append(b) + return body + + +def buildimplicitrules(block): + setmesstext(block) + implicitrules = defaultimplicitrules + attrrules = {} + if 'implicit' in block: + if block['implicit'] is None: + implicitrules = None + if verbose > 1: + outmess( + f"buildimplicitrules: no implicit rules for routine {repr(block['name'])}.\n") + else: + for k in list(block['implicit'].keys()): + if block['implicit'][k].get('typespec') not in ['static', 'automatic']: + implicitrules[k] = block['implicit'][k] + else: + attrrules[k] = block['implicit'][k]['typespec'] + return implicitrules, attrrules + + +def myeval(e, g=None, l=None): + """ Like `eval` but returns only integers and floats """ + r = eval(e, g, l) + if type(r) in [int, float]: + return r + raise ValueError(f'r={r!r}') + + +getlincoef_re_1 = re.compile(r'\A\b\w+\b\Z', re.I) + + +def getlincoef(e, xset): # e = a*x+b ; x in xset + """ + Obtain ``a`` and ``b`` when ``e == "a*x+b"``, where ``x`` is a symbol in + xset. + + >>> getlincoef('2*x + 1', {'x'}) + (2, 1, 'x') + >>> getlincoef('3*x + x*2 + 2 + 1', {'x'}) + (5, 3, 'x') + >>> getlincoef('0', {'x'}) + (0, 0, None) + >>> getlincoef('0*x', {'x'}) + (0, 0, 'x') + >>> getlincoef('x*x', {'x'}) + (None, None, None) + + This can be tricked by sufficiently complex expressions + + >>> getlincoef('(x - 0.5)*(x - 1.5)*(x - 1)*x + 2*x + 3', {'x'}) + (2.0, 3.0, 'x') + """ + try: + c = int(myeval(e, {}, {})) + return 0, c, None + except Exception: + pass + if getlincoef_re_1.match(e): + return 1, 0, e + len_e = len(e) + for x in xset: + if len(x) > len_e: + continue + if re.search(r'\w\s*\([^)]*\b' + x + r'\b', e): + # skip function calls having x as an argument, e.g max(1, x) + continue + re_1 = re.compile(r'(?P.*?)\b' + x + r'\b(?P.*)', re.I) + m = re_1.match(e) + if m: + try: + m1 = re_1.match(e) + while m1: + ee = f"{m1.group('before')}({0}){m1.group('after')}" + m1 = re_1.match(ee) + b = myeval(ee, {}, {}) + m1 = re_1.match(e) + while m1: + ee = f"{m1.group('before')}({1}){m1.group('after')}" + m1 = re_1.match(ee) + a = myeval(ee, {}, {}) - b + m1 = re_1.match(e) + while m1: + ee = f"{m1.group('before')}({0.5}){m1.group('after')}" + m1 = re_1.match(ee) + c = myeval(ee, {}, {}) + # computing another point to be sure that expression is linear + m1 = re_1.match(e) + while m1: + ee = f"{m1.group('before')}({1.5}){m1.group('after')}" + m1 = re_1.match(ee) + c2 = myeval(ee, {}, {}) + if (a * 0.5 + b == c and a * 1.5 + b == c2): + return a, b, x + except Exception: + pass + break + return None, None, None + + +word_pattern = re.compile(r'\b[a-z][\w$]*\b', re.I) + + +def _get_depend_dict(name, vars, deps): + if name in vars: + words = vars[name].get('depend', []) + + if '=' in vars[name] and not isstring(vars[name]): + for word in word_pattern.findall(vars[name]['=']): + # The word_pattern may return values that are not + # only variables, they can be string content for instance + if word not in words and word in vars and word != name: + words.append(word) + for word in words[:]: + for w in deps.get(word, []) \ + or _get_depend_dict(word, vars, deps): + if w not in words: + words.append(w) + else: + outmess(f'_get_depend_dict: no dependence info for {repr(name)}\n') + words = [] + deps[name] = words + return words + + +def _calc_depend_dict(vars): + names = list(vars.keys()) + depend_dict = {} + for n in names: + _get_depend_dict(n, vars, depend_dict) + return depend_dict + + +def get_sorted_names(vars): + depend_dict = _calc_depend_dict(vars) + names = [] + for name in list(depend_dict.keys()): + if not depend_dict[name]: + names.append(name) + del depend_dict[name] + while depend_dict: + for name, lst in list(depend_dict.items()): + new_lst = [n for n in lst if n in depend_dict] + if not new_lst: + names.append(name) + del depend_dict[name] + else: + depend_dict[name] = new_lst + return [name for name in names if name in vars] + + +def _kind_func(string): + # XXX: return something sensible. + if string[0] in "'\"": + string = string[1:-1] + if real16pattern.match(string): + return 8 + elif real8pattern.match(string): + return 4 + return 'kind(' + string + ')' + + +def _selected_int_kind_func(r): + # XXX: This should be processor dependent + m = 10 ** r + if m <= 2 ** 8: + return 1 + if m <= 2 ** 16: + return 2 + if m <= 2 ** 32: + return 4 + if m <= 2 ** 63: + return 8 + if m <= 2 ** 128: + return 16 + return -1 + + +def _selected_real_kind_func(p, r=0, radix=0): + # XXX: This should be processor dependent + # This is only verified for 0 <= p <= 20, possibly good for p <= 33 and above + if p < 7: + return 4 + if p < 16: + return 8 + machine = platform.machine().lower() + if machine.startswith(('aarch64', 'alpha', 'arm64', 'loongarch', 'mips', 'power', 'ppc', 'riscv', 's390x', 'sparc')): + if p <= 33: + return 16 + elif p < 19: + return 10 + elif p <= 33: + return 16 + return -1 + + +def get_parameters(vars, global_params={}): + params = copy.copy(global_params) + g_params = copy.copy(global_params) + for name, func in [('kind', _kind_func), + ('selected_int_kind', _selected_int_kind_func), + ('selected_real_kind', _selected_real_kind_func), ]: + if name not in g_params: + g_params[name] = func + param_names = [] + for n in get_sorted_names(vars): + if 'attrspec' in vars[n] and 'parameter' in vars[n]['attrspec']: + param_names.append(n) + kind_re = re.compile(r'\bkind\s*\(\s*(?P.*)\s*\)', re.I) + selected_int_kind_re = re.compile( + r'\bselected_int_kind\s*\(\s*(?P.*)\s*\)', re.I) + selected_kind_re = re.compile( + r'\bselected_(int|real)_kind\s*\(\s*(?P.*)\s*\)', re.I) + for n in param_names: + if '=' in vars[n]: + v = vars[n]['='] + if islogical(vars[n]): + v = v.lower() + for repl in [ + ('.false.', 'False'), + ('.true.', 'True'), + # TODO: test .eq., .neq., etc replacements. + ]: + v = v.replace(*repl) + + v = kind_re.sub(r'kind("\1")', v) + v = selected_int_kind_re.sub(r'selected_int_kind(\1)', v) + + # We need to act according to the data. + # The easy case is if the data has a kind-specifier, + # then we may easily remove those specifiers. + # However, it may be that the user uses other specifiers...(!) + is_replaced = False + + if 'kindselector' in vars[n]: + # Remove kind specifier (including those defined + # by parameters) + if 'kind' in vars[n]['kindselector']: + orig_v_len = len(v) + v = v.replace('_' + vars[n]['kindselector']['kind'], '') + # Again, this will be true if even a single specifier + # has been replaced, see comment above. + is_replaced = len(v) < orig_v_len + + if not is_replaced: + if not selected_kind_re.match(v): + v_ = v.split('_') + # In case there are additive parameters + if len(v_) > 1: + v = ''.join(v_[:-1]).lower().replace(v_[-1].lower(), '') + + # Currently this will not work for complex numbers. + # There is missing code for extracting a complex number, + # which may be defined in either of these: + # a) (Re, Im) + # b) cmplx(Re, Im) + # c) dcmplx(Re, Im) + # d) cmplx(Re, Im, ) + + if isdouble(vars[n]): + tt = list(v) + for m in real16pattern.finditer(v): + tt[m.start():m.end()] = list( + v[m.start():m.end()].lower().replace('d', 'e')) + v = ''.join(tt) + + elif iscomplex(vars[n]): + outmess(f'get_parameters[TODO]: ' + f'implement evaluation of complex expression {v}\n') + + dimspec = ([s.removeprefix('dimension').strip() + for s in vars[n]['attrspec'] + if s.startswith('dimension')] or [None])[0] + + # Handle _dp for gh-6624 + # Also fixes gh-20460 + if real16pattern.search(v): + v = 8 + elif real8pattern.search(v): + v = 4 + try: + params[n] = param_eval(v, g_params, params, dimspec=dimspec) + except Exception as msg: + params[n] = v + outmess(f'get_parameters: got "{msg}" on {n!r}\n') + + if isstring(vars[n]) and isinstance(params[n], int): + params[n] = chr(params[n]) + nl = n.lower() + if nl != n: + params[nl] = params[n] + else: + print(vars[n]) + outmess(f'get_parameters:parameter {n!r} does not have value?!\n') + return params + + +def _eval_length(length, params): + if length in ['(:)', '(*)', '*']: + return '(*)' + return _eval_scalar(length, params) + + +_is_kind_number = re.compile(r'\d+_').match + + +def _eval_scalar(value, params): + if _is_kind_number(value): + value = value.split('_')[0] + try: + # TODO: use symbolic from PR #19805 + value = eval(value, {}, params) + value = (repr if isinstance(value, str) else str)(value) + except (NameError, SyntaxError, TypeError): + return value + except Exception as msg: + errmess('"%s" in evaluating %r ' + '(available names: %s)\n' + % (msg, value, list(params.keys()))) + return value + + +def analyzevars(block): + """ + Sets correct dimension information for each variable/parameter + """ + + global f90modulevars + + setmesstext(block) + implicitrules, attrrules = buildimplicitrules(block) + vars = copy.copy(block['vars']) + if block['block'] == 'function' and block['name'] not in vars: + vars[block['name']] = {} + if '' in block['vars']: + del vars[''] + if 'attrspec' in block['vars']['']: + gen = block['vars']['']['attrspec'] + for n in set(vars) | {b['name'] for b in block['body']}: + for k in ['public', 'private']: + if k in gen: + vars[n] = setattrspec(vars.get(n, {}), k) + svars = [] + args = block['args'] + for a in args: + try: + vars[a] + svars.append(a) + except KeyError: + pass + for n in list(vars.keys()): + if n not in args: + svars.append(n) + + params = get_parameters(vars, get_useparameters(block)) + # At this point, params are read and interpreted, but + # the params used to define vars are not yet parsed + dep_matches = {} + name_match = re.compile(r'[A-Za-z][\w$]*').match + for v in list(vars.keys()): + m = name_match(v) + if m: + n = v[m.start():m.end()] + try: + dep_matches[n] + except KeyError: + dep_matches[n] = re.compile(r'.*\b%s\b' % (v), re.I).match + for n in svars: + if n[0] in list(attrrules.keys()): + vars[n] = setattrspec(vars[n], attrrules[n[0]]) + if 'typespec' not in vars[n]: + if not ('attrspec' in vars[n] and 'external' in vars[n]['attrspec']): + if implicitrules: + ln0 = n[0].lower() + for k in list(implicitrules[ln0].keys()): + if k == 'typespec' and implicitrules[ln0][k] == 'undefined': + continue + if k not in vars[n]: + vars[n][k] = implicitrules[ln0][k] + elif k == 'attrspec': + for l in implicitrules[ln0][k]: + vars[n] = setattrspec(vars[n], l) + elif n in block['args']: + outmess('analyzevars: typespec of variable %s is not defined in routine %s.\n' % ( + repr(n), block['name'])) + if 'charselector' in vars[n]: + if 'len' in vars[n]['charselector']: + l = vars[n]['charselector']['len'] + try: + l = str(eval(l, {}, params)) + except Exception: + pass + vars[n]['charselector']['len'] = l + + if 'kindselector' in vars[n]: + if 'kind' in vars[n]['kindselector']: + l = vars[n]['kindselector']['kind'] + try: + l = str(eval(l, {}, params)) + except Exception: + pass + vars[n]['kindselector']['kind'] = l + + dimension_exprs = {} + if 'attrspec' in vars[n]: + attr = vars[n]['attrspec'] + attr.reverse() + vars[n]['attrspec'] = [] + dim, intent, depend, check, note = None, None, None, None, None + for a in attr: + if a[:9] == 'dimension': + dim = (a[9:].strip())[1:-1] + elif a[:6] == 'intent': + intent = (a[6:].strip())[1:-1] + elif a[:6] == 'depend': + depend = (a[6:].strip())[1:-1] + elif a[:5] == 'check': + check = (a[5:].strip())[1:-1] + elif a[:4] == 'note': + note = (a[4:].strip())[1:-1] + else: + vars[n] = setattrspec(vars[n], a) + if intent: + if 'intent' not in vars[n]: + vars[n]['intent'] = [] + for c in [x.strip() for x in markoutercomma(intent).split('@,@')]: + # Remove spaces so that 'in out' becomes 'inout' + tmp = c.replace(' ', '') + if tmp not in vars[n]['intent']: + vars[n]['intent'].append(tmp) + intent = None + if note: + note = note.replace('\\n\\n', '\n\n') + note = note.replace('\\n ', '\n') + if 'note' not in vars[n]: + vars[n]['note'] = [note] + else: + vars[n]['note'].append(note) + note = None + if depend is not None: + if 'depend' not in vars[n]: + vars[n]['depend'] = [] + for c in rmbadname([x.strip() for x in markoutercomma(depend).split('@,@')]): + if c not in vars[n]['depend']: + vars[n]['depend'].append(c) + depend = None + if check is not None: + if 'check' not in vars[n]: + vars[n]['check'] = [] + for c in [x.strip() for x in markoutercomma(check).split('@,@')]: + if c not in vars[n]['check']: + vars[n]['check'].append(c) + check = None + if dim and 'dimension' not in vars[n]: + vars[n]['dimension'] = [] + for d in rmbadname( + [x.strip() for x in markoutercomma(dim).split('@,@')] + ): + # d is the expression inside the dimension declaration + # Evaluate `d` with respect to params + try: + # the dimension for this variable depends on a + # previously defined parameter + d = param_parse(d, params) + except (ValueError, IndexError, KeyError): + outmess( + 'analyzevars: could not parse dimension for ' + f'variable {d!r}\n' + ) + + dim_char = ':' if d == ':' else '*' + if d == dim_char: + dl = [dim_char] + else: + dl = markoutercomma(d, ':').split('@:@') + if len(dl) == 2 and '*' in dl: # e.g. dimension(5:*) + dl = ['*'] + d = '*' + if len(dl) == 1 and dl[0] != dim_char: + dl = ['1', dl[0]] + if len(dl) == 2: + d1, d2 = map(symbolic.Expr.parse, dl) + dsize = d2 - d1 + 1 + d = dsize.tostring(language=symbolic.Language.C) + # find variables v that define d as a linear + # function, `d == a * v + b`, and store + # coefficients a and b for further analysis. + solver_and_deps = {} + for v in block['vars']: + s = symbolic.as_symbol(v) + if dsize.contains(s): + try: + a, b = dsize.linear_solve(s) + + def solve_v(s, a=a, b=b): + return (s - b) / a + + all_symbols = set(a.symbols()) + all_symbols.update(b.symbols()) + except RuntimeError as msg: + # d is not a linear function of v, + # however, if v can be determined + # from d using other means, + # implement the corresponding + # solve_v function here. + solve_v = None + all_symbols = set(dsize.symbols()) + v_deps = { + s.data for s in all_symbols + if s.data in vars} + solver_and_deps[v] = solve_v, list(v_deps) + # Note that dsize may contain symbols that are + # not defined in block['vars']. Here we assume + # these correspond to Fortran/C intrinsic + # functions or that are defined by other + # means. We'll let the compiler validate the + # definiteness of such symbols. + dimension_exprs[d] = solver_and_deps + vars[n]['dimension'].append(d) + + if 'check' not in vars[n] and 'args' in block and n in block['args']: + # n is an argument that has no checks defined. Here we + # generate some consistency checks for n, and when n is an + # array, generate checks for its dimensions and construct + # initialization expressions. + n_deps = vars[n].get('depend', []) + n_checks = [] + n_is_input = l_or(isintent_in, isintent_inout, + isintent_inplace)(vars[n]) + if isarray(vars[n]): # n is array + for i, d in enumerate(vars[n]['dimension']): + coeffs_and_deps = dimension_exprs.get(d) + if coeffs_and_deps is None: + # d is `:` or `*` or a constant expression + pass + elif n_is_input: + # n is an input array argument and its shape + # may define variables used in dimension + # specifications. + for v, (solver, deps) in coeffs_and_deps.items(): + def compute_deps(v, deps): + for v1 in coeffs_and_deps.get(v, [None, []])[1]: + if v1 not in deps: + deps.add(v1) + compute_deps(v1, deps) + all_deps = set() + compute_deps(v, all_deps) + if (v in n_deps + or '=' in vars[v] + or 'depend' in vars[v]): + # Skip a variable that + # - n depends on + # - has user-defined initialization expression + # - has user-defined dependencies + continue + if solver is not None and v not in all_deps: + # v can be solved from d, hence, we + # make it an optional argument with + # initialization expression: + is_required = False + init = solver(symbolic.as_symbol( + f'shape({n}, {i})')) + init = init.tostring( + language=symbolic.Language.C) + vars[v]['='] = init + # n needs to be initialized before v. So, + # making v dependent on n and on any + # variables in solver or d. + vars[v]['depend'] = [n] + deps + if 'check' not in vars[v]: + # add check only when no + # user-specified checks exist + vars[v]['check'] = [ + f'shape({n}, {i}) == {d}'] + else: + # d is a non-linear function on v, + # hence, v must be a required input + # argument that n will depend on + is_required = True + if 'intent' not in vars[v]: + vars[v]['intent'] = [] + if 'in' not in vars[v]['intent']: + vars[v]['intent'].append('in') + # v needs to be initialized before n + n_deps.append(v) + n_checks.append( + f'shape({n}, {i}) == {d}') + v_attr = vars[v].get('attrspec', []) + if not ('optional' in v_attr + or 'required' in v_attr): + v_attr.append( + 'required' if is_required else 'optional') + if v_attr: + vars[v]['attrspec'] = v_attr + if coeffs_and_deps is not None: + # extend v dependencies with ones specified in attrspec + for v, (solver, deps) in coeffs_and_deps.items(): + v_deps = vars[v].get('depend', []) + for aa in vars[v].get('attrspec', []): + if aa.startswith('depend'): + aa = ''.join(aa.split()) + v_deps.extend(aa[7:-1].split(',')) + if v_deps: + vars[v]['depend'] = list(set(v_deps)) + if n not in v_deps: + n_deps.append(v) + elif isstring(vars[n]): + if 'charselector' in vars[n]: + if '*' in vars[n]['charselector']: + length = _eval_length(vars[n]['charselector']['*'], + params) + vars[n]['charselector']['*'] = length + elif 'len' in vars[n]['charselector']: + length = _eval_length(vars[n]['charselector']['len'], + params) + del vars[n]['charselector']['len'] + vars[n]['charselector']['*'] = length + if n_checks: + vars[n]['check'] = n_checks + if n_deps: + vars[n]['depend'] = list(set(n_deps)) + + if '=' in vars[n]: + if 'attrspec' not in vars[n]: + vars[n]['attrspec'] = [] + if ('optional' not in vars[n]['attrspec']) and \ + ('required' not in vars[n]['attrspec']): + vars[n]['attrspec'].append('optional') + if 'depend' not in vars[n]: + vars[n]['depend'] = [] + for v, m in list(dep_matches.items()): + if m(vars[n]['=']): + vars[n]['depend'].append(v) + if not vars[n]['depend']: + del vars[n]['depend'] + if isscalar(vars[n]): + vars[n]['='] = _eval_scalar(vars[n]['='], params) + + for n in list(vars.keys()): + if n == block['name']: # n is block name + if 'note' in vars[n]: + block['note'] = vars[n]['note'] + if block['block'] == 'function': + if 'result' in block and block['result'] in vars: + vars[n] = appenddecl(vars[n], vars[block['result']]) + if 'prefix' in block: + pr = block['prefix'] + pr1 = pr.replace('pure', '') + ispure = (not pr == pr1) + pr = pr1.replace('recursive', '') + isrec = (not pr == pr1) + m = typespattern[0].match(pr) + if m: + typespec, selector, attr, edecl = cracktypespec0( + m.group('this'), m.group('after')) + kindselect, charselect, typename = cracktypespec( + typespec, selector) + vars[n]['typespec'] = typespec + try: + if block['result']: + vars[block['result']]['typespec'] = typespec + except Exception: + pass + if kindselect: + if 'kind' in kindselect: + try: + kindselect['kind'] = eval( + kindselect['kind'], {}, params) + except Exception: + pass + vars[n]['kindselector'] = kindselect + if charselect: + vars[n]['charselector'] = charselect + if typename: + vars[n]['typename'] = typename + if ispure: + vars[n] = setattrspec(vars[n], 'pure') + if isrec: + vars[n] = setattrspec(vars[n], 'recursive') + else: + outmess( + f"analyzevars: prefix ({repr(block['prefix'])}) were not used\n") + if block['block'] not in ['module', 'pythonmodule', 'python module', 'block data']: + if 'commonvars' in block: + neededvars = copy.copy(block['args'] + block['commonvars']) + else: + neededvars = copy.copy(block['args']) + for n in list(vars.keys()): + if l_or(isintent_callback, isintent_aux)(vars[n]): + neededvars.append(n) + if 'entry' in block: + neededvars.extend(list(block['entry'].keys())) + for k in list(block['entry'].keys()): + for n in block['entry'][k]: + if n not in neededvars: + neededvars.append(n) + if block['block'] == 'function': + if 'result' in block: + neededvars.append(block['result']) + else: + neededvars.append(block['name']) + if block['block'] in ['subroutine', 'function']: + name = block['name'] + if name in vars and 'intent' in vars[name]: + block['intent'] = vars[name]['intent'] + if block['block'] == 'type': + neededvars.extend(list(vars.keys())) + for n in list(vars.keys()): + if n not in neededvars: + del vars[n] + return vars + + +analyzeargs_re_1 = re.compile(r'\A[a-z]+[\w$]*\Z', re.I) + + +def param_eval(v, g_params, params, dimspec=None): + """ + Creates a dictionary of indices and values for each parameter in a + parameter array to be evaluated later. + + WARNING: It is not possible to initialize multidimensional array + parameters e.g. dimension(-3:1, 4, 3:5) at this point. This is because in + Fortran initialization through array constructor requires the RESHAPE + intrinsic function. Since the right-hand side of the parameter declaration + is not executed in f2py, but rather at the compiled c/fortran extension, + later, it is not possible to execute a reshape of a parameter array. + One issue remains: if the user wants to access the array parameter from + python, we should either + 1) allow them to access the parameter array using python standard indexing + (which is often incompatible with the original fortran indexing) + 2) allow the parameter array to be accessed in python as a dictionary with + fortran indices as keys + We are choosing 2 for now. + """ + if dimspec is None: + try: + p = eval(v, g_params, params) + except Exception as msg: + p = v + outmess(f'param_eval: got "{msg}" on {v!r}\n') + return p + + # This is an array parameter. + # First, we parse the dimension information + if len(dimspec) < 2 or dimspec[::len(dimspec) - 1] != "()": + raise ValueError(f'param_eval: dimension {dimspec} can\'t be parsed') + dimrange = dimspec[1:-1].split(',') + if len(dimrange) == 1: + # e.g. dimension(2) or dimension(-1:1) + dimrange = dimrange[0].split(':') + # now, dimrange is a list of 1 or 2 elements + if len(dimrange) == 1: + bound = param_parse(dimrange[0], params) + dimrange = range(1, int(bound) + 1) + else: + lbound = param_parse(dimrange[0], params) + ubound = param_parse(dimrange[1], params) + dimrange = range(int(lbound), int(ubound) + 1) + else: + raise ValueError('param_eval: multidimensional array parameters ' + f'{dimspec} not supported') + + # Parse parameter value + v = (v[2:-2] if v.startswith('(/') else v).split(',') + v_eval = [] + for item in v: + try: + item = eval(item, g_params, params) + except Exception as msg: + outmess(f'param_eval: got "{msg}" on {item!r}\n') + v_eval.append(item) + + p = dict(zip(dimrange, v_eval)) + + return p + + +def param_parse(d, params): + """Recursively parse array dimensions. + + Parses the declaration of an array variable or parameter + `dimension` keyword, and is called recursively if the + dimension for this array is a previously defined parameter + (found in `params`). + + Parameters + ---------- + d : str + Fortran expression describing the dimension of an array. + params : dict + Previously parsed parameters declared in the Fortran source file. + + Returns + ------- + out : str + Parsed dimension expression. + + Examples + -------- + + * If the line being analyzed is + + `integer, parameter, dimension(2) :: pa = (/ 3, 5 /)` + + then `d = 2` and we return immediately, with + + >>> d = '2' + >>> param_parse(d, params) + 2 + + * If the line being analyzed is + + `integer, parameter, dimension(pa) :: pb = (/1, 2, 3/)` + + then `d = 'pa'`; since `pa` is a previously parsed parameter, + and `pa = 3`, we call `param_parse` recursively, to obtain + + >>> d = 'pa' + >>> params = {'pa': 3} + >>> param_parse(d, params) + 3 + + * If the line being analyzed is + + `integer, parameter, dimension(pa(1)) :: pb = (/1, 2, 3/)` + + then `d = 'pa(1)'`; since `pa` is a previously parsed parameter, + and `pa(1) = 3`, we call `param_parse` recursively, to obtain + + >>> d = 'pa(1)' + >>> params = dict(pa={1: 3, 2: 5}) + >>> param_parse(d, params) + 3 + """ + if "(" in d: + # this dimension expression is an array + dname = d[:d.find("(")] + ddims = d[d.find("(") + 1:d.rfind(")")] + # this dimension expression is also a parameter; + # parse it recursively + index = int(param_parse(ddims, params)) + return str(params[dname][index]) + elif d in params: + return str(params[d]) + else: + for p in params: + re_1 = re.compile( + r'(?P.*?)\b' + p + r'\b(?P.*)', re.I + ) + m = re_1.match(d) + while m: + d = m.group('before') + \ + str(params[p]) + m.group('after') + m = re_1.match(d) + return d + + +def expr2name(a, block, args=[]): + orig_a = a + a_is_expr = not analyzeargs_re_1.match(a) + if a_is_expr: # `a` is an expression + implicitrules, attrrules = buildimplicitrules(block) + at = determineexprtype(a, block['vars'], implicitrules) + na = 'e_' + for c in a: + c = c.lower() + if c not in string.ascii_lowercase + string.digits: + c = '_' + na = na + c + if na[-1] == '_': + na = na + 'e' + else: + na = na + '_e' + a = na + while a in block['vars'] or a in block['args']: + a = a + 'r' + if a in args: + k = 1 + while a + str(k) in args: + k = k + 1 + a = a + str(k) + if a_is_expr: + block['vars'][a] = at + else: + if a not in block['vars']: + block['vars'][a] = block['vars'].get(orig_a, {}) + if 'externals' in block and orig_a in block['externals'] + block['interfaced']: + block['vars'][a] = setattrspec(block['vars'][a], 'external') + return a + + +def analyzeargs(block): + setmesstext(block) + implicitrules, _ = buildimplicitrules(block) + if 'args' not in block: + block['args'] = [] + args = [] + for a in block['args']: + a = expr2name(a, block, args) + args.append(a) + block['args'] = args + if 'entry' in block: + for k, args1 in list(block['entry'].items()): + for a in args1: + if a not in block['vars']: + block['vars'][a] = {} + + for b in block['body']: + if b['name'] in args: + if 'externals' not in block: + block['externals'] = [] + if b['name'] not in block['externals']: + block['externals'].append(b['name']) + if 'result' in block and block['result'] not in block['vars']: + block['vars'][block['result']] = {} + return block + + +determineexprtype_re_1 = re.compile(r'\A\(.+?,.+?\)\Z', re.I) +determineexprtype_re_2 = re.compile(r'\A[+-]?\d+(_(?P\w+)|)\Z', re.I) +determineexprtype_re_3 = re.compile( + r'\A[+-]?[\d.]+[-\d+de.]*(_(?P\w+)|)\Z', re.I) +determineexprtype_re_4 = re.compile(r'\A\(.*\)\Z', re.I) +determineexprtype_re_5 = re.compile(r'\A(?P\w+)\s*\(.*?\)\s*\Z', re.I) + + +def _ensure_exprdict(r): + if isinstance(r, int): + return {'typespec': 'integer'} + if isinstance(r, float): + return {'typespec': 'real'} + if isinstance(r, complex): + return {'typespec': 'complex'} + if isinstance(r, dict): + return r + raise AssertionError(repr(r)) + + +def determineexprtype(expr, vars, rules={}): + if expr in vars: + return _ensure_exprdict(vars[expr]) + expr = expr.strip() + if determineexprtype_re_1.match(expr): + return {'typespec': 'complex'} + m = determineexprtype_re_2.match(expr) + if m: + if 'name' in m.groupdict() and m.group('name'): + outmess( + f'determineexprtype: selected kind types not supported ({repr(expr)})\n') + return {'typespec': 'integer'} + m = determineexprtype_re_3.match(expr) + if m: + if 'name' in m.groupdict() and m.group('name'): + outmess( + f'determineexprtype: selected kind types not supported ({repr(expr)})\n') + return {'typespec': 'real'} + for op in ['+', '-', '*', '/']: + for e in [x.strip() for x in markoutercomma(expr, comma=op).split('@' + op + '@')]: + if e in vars: + return _ensure_exprdict(vars[e]) + t = {} + if determineexprtype_re_4.match(expr): # in parenthesis + t = determineexprtype(expr[1:-1], vars, rules) + else: + m = determineexprtype_re_5.match(expr) + if m: + rn = m.group('name') + t = determineexprtype(m.group('name'), vars, rules) + if t and 'attrspec' in t: + del t['attrspec'] + if not t: + if rn[0] in rules: + return _ensure_exprdict(rules[rn[0]]) + if expr[0] in '\'"': + return {'typespec': 'character', 'charselector': {'*': '*'}} + if not t: + outmess( + f'determineexprtype: could not determine expressions ({repr(expr)}) type.\n') + return t + +###### + + +def crack2fortrangen(block, tab='\n', as_interface=False): + global skipfuncs, onlyfuncs + + setmesstext(block) + ret = '' + if isinstance(block, list): + for g in block: + if g and g['block'] in ['function', 'subroutine']: + if g['name'] in skipfuncs: + continue + if onlyfuncs and g['name'] not in onlyfuncs: + continue + ret = ret + crack2fortrangen(g, tab, as_interface=as_interface) + return ret + prefix = '' + name = '' + args = '' + blocktype = block['block'] + if blocktype == 'program': + return '' + argsl = [] + if 'name' in block: + name = block['name'] + if 'args' in block: + vars = block['vars'] + for a in block['args']: + a = expr2name(a, block, argsl) + if not isintent_callback(vars[a]): + argsl.append(a) + if block['block'] == 'function' or argsl: + args = f"({','.join(argsl)})" + f2pyenhancements = '' + if 'f2pyenhancements' in block: + for k in list(block['f2pyenhancements'].keys()): + f2pyenhancements = '%s%s%s %s' % ( + f2pyenhancements, tab + tabchar, k, block['f2pyenhancements'][k]) + intent_lst = block.get('intent', [])[:] + if blocktype == 'function' and 'callback' in intent_lst: + intent_lst.remove('callback') + if intent_lst: + f2pyenhancements = '%s%sintent(%s) %s' %\ + (f2pyenhancements, tab + tabchar, + ','.join(intent_lst), name) + use = '' + if 'use' in block: + use = use2fortran(block['use'], tab + tabchar) + common = '' + if 'common' in block: + common = common2fortran(block['common'], tab + tabchar) + if name == 'unknown_interface': + name = '' + result = '' + if 'result' in block: + result = f" result ({block['result']})" + if block['result'] not in argsl: + argsl.append(block['result']) + body = crack2fortrangen(block['body'], tab + tabchar, as_interface=as_interface) + vars = vars2fortran( + block, block['vars'], argsl, tab + tabchar, as_interface=as_interface) + mess = '' + if 'from' in block and not as_interface: + mess = f"! in {block['from']}" + if 'entry' in block: + entry_stmts = '' + for k, i in list(block['entry'].items()): + entry_stmts = f"{entry_stmts}{tab + tabchar}entry {k}({','.join(i)})" + body = body + entry_stmts + if blocktype == 'block data' and name == '_BLOCK_DATA_': + name = '' + ret = '%s%s%s %s%s%s %s%s%s%s%s%s%send %s %s' % ( + tab, prefix, blocktype, name, args, result, mess, f2pyenhancements, use, vars, common, body, tab, blocktype, name) + return ret + + +def common2fortran(common, tab=''): + ret = '' + for k in list(common.keys()): + if k == '_BLNK_': + ret = f"{ret}{tab}common {','.join(common[k])}" + else: + ret = f"{ret}{tab}common /{k}/ {','.join(common[k])}" + return ret + + +def use2fortran(use, tab=''): + ret = '' + for m in list(use.keys()): + ret = f'{ret}{tab}use {m},' + if use[m] == {}: + if ret and ret[-1] == ',': + ret = ret[:-1] + continue + if 'only' in use[m] and use[m]['only']: + ret = f'{ret} only:' + if 'map' in use[m] and use[m]['map']: + c = ' ' + for k in list(use[m]['map'].keys()): + if k == use[m]['map'][k]: + ret = f'{ret}{c}{k}' + c = ',' + else: + ret = f"{ret}{c}{k}=>{use[m]['map'][k]}" + c = ',' + if ret and ret[-1] == ',': + ret = ret[:-1] + return ret + + +def true_intent_list(var): + lst = var['intent'] + ret = [] + for intent in lst: + try: + f = globals()[f'isintent_{intent}'] + except KeyError: + pass + else: + if f(var): + ret.append(intent) + return ret + + +def vars2fortran(block, vars, args, tab='', as_interface=False): + setmesstext(block) + ret = '' + nout = [] + for a in args: + if a in block['vars']: + nout.append(a) + if 'commonvars' in block: + for a in block['commonvars']: + if a in vars: + if a not in nout: + nout.append(a) + else: + errmess( + f'vars2fortran: Confused?!: "{a}" is not defined in vars.\n') + if 'varnames' in block: + nout.extend(block['varnames']) + if not as_interface: + for a in list(vars.keys()): + if a not in nout: + nout.append(a) + for a in nout: + if 'depend' in vars[a]: + for d in vars[a]['depend']: + if d in vars and 'depend' in vars[d] and a in vars[d]['depend']: + errmess( + f'vars2fortran: Warning: cross-dependence between variables "{a}" and "{d}\"\n') + if 'externals' in block and a in block['externals']: + if isintent_callback(vars[a]): + ret = f'{ret}{tab}intent(callback) {a}' + ret = f'{ret}{tab}external {a}' + if isoptional(vars[a]): + ret = f'{ret}{tab}optional {a}' + if a in vars and 'typespec' not in vars[a]: + continue + cont = 1 + for b in block['body']: + if a == b['name'] and b['block'] == 'function': + cont = 0 + break + if cont: + continue + if a not in vars: + show(vars) + outmess(f'vars2fortran: No definition for argument "{a}".\n') + continue + if a == block['name']: + if block['block'] != 'function' or block.get('result'): + # 1) skip declaring a variable that name matches with + # subroutine name + # 2) skip declaring function when its type is + # declared via `result` construction + continue + if 'typespec' not in vars[a]: + if 'attrspec' in vars[a] and 'external' in vars[a]['attrspec']: + if a in args: + ret = f'{ret}{tab}external {a}' + continue + show(vars[a]) + outmess(f'vars2fortran: No typespec for argument "{a}".\n') + continue + vardef = vars[a]['typespec'] + if vardef == 'type' and 'typename' in vars[a]: + vardef = f"{vardef}({vars[a]['typename']})" + selector = {} + if 'kindselector' in vars[a]: + selector = vars[a]['kindselector'] + elif 'charselector' in vars[a]: + selector = vars[a]['charselector'] + if '*' in selector: + if selector['*'] in ['*', ':']: + vardef = f"{vardef}*({selector['*']})" + else: + vardef = f"{vardef}*{selector['*']}" + elif 'len' in selector: + vardef = f"{vardef}(len={selector['len']}" + if 'kind' in selector: + vardef = f"{vardef},kind={selector['kind']})" + else: + vardef = f'{vardef})' + elif 'kind' in selector: + vardef = f"{vardef}(kind={selector['kind']})" + c = ' ' + if 'attrspec' in vars[a]: + attr = [l for l in vars[a]['attrspec'] + if l not in ['external']] + if as_interface and 'intent(in)' in attr and 'intent(out)' in attr: + # In Fortran, intent(in, out) are conflicting while + # intent(in, out) can be specified only via + # `!f2py intent(out) ..`. + # So, for the Fortran interface, we'll drop + # intent(out) to resolve the conflict. + attr.remove('intent(out)') + if attr: + vardef = f"{vardef}, {','.join(attr)}" + c = ',' + if 'dimension' in vars[a]: + vardef = f"{vardef}{c}dimension({','.join(vars[a]['dimension'])})" + c = ',' + if 'intent' in vars[a]: + lst = true_intent_list(vars[a]) + if lst: + vardef = f"{vardef}{c}intent({','.join(lst)})" + c = ',' + if 'check' in vars[a]: + vardef = f"{vardef}{c}check({','.join(vars[a]['check'])})" + c = ',' + if 'depend' in vars[a]: + vardef = f"{vardef}{c}depend({','.join(vars[a]['depend'])})" + c = ',' + if '=' in vars[a]: + v = vars[a]['='] + if vars[a]['typespec'] in ['complex', 'double complex']: + try: + v = eval(v) + v = f'({v.real},{v.imag})' + except Exception: + pass + vardef = f'{vardef} :: {a}={v}' + else: + vardef = f'{vardef} :: {a}' + ret = f'{ret}{tab}{vardef}' + return ret +###### + + +# We expose post_processing_hooks as global variable so that +# user-libraries could register their own hooks to f2py. +post_processing_hooks = [] + + +def crackfortran(files): + global usermodules, post_processing_hooks + + outmess('Reading fortran codes...\n', 0) + readfortrancode(files, crackline) + outmess('Post-processing...\n', 0) + usermodules = [] + postlist = postcrack(grouplist[0]) + outmess('Applying post-processing hooks...\n', 0) + for hook in post_processing_hooks: + outmess(f' {hook.__name__}\n', 0) + postlist = traverse(postlist, hook) + outmess('Post-processing (stage 2)...\n', 0) + postlist = postcrack2(postlist) + return usermodules + postlist + + +def crack2fortran(block): + global f2py_version + + pyf = crack2fortrangen(block) + '\n' + header = """! -*- f90 -*- +! Note: the context of this file is case sensitive. +""" + footer = """ +! This file was auto-generated with f2py (version:%s). +! See: +! https://web.archive.org/web/20140822061353/http://cens.ioc.ee/projects/f2py2e +""" % (f2py_version) + return header + pyf + footer + + +def _is_visit_pair(obj): + return (isinstance(obj, tuple) + and len(obj) == 2 + and isinstance(obj[0], (int, str))) + + +def traverse(obj, visit, parents=[], result=None, *args, **kwargs): + '''Traverse f2py data structure with the following visit function: + + def visit(item, parents, result, *args, **kwargs): + """ + + parents is a list of key-"f2py data structure" pairs from which + items are taken from. + + result is a f2py data structure that is filled with the + return value of the visit function. + + item is 2-tuple (index, value) if parents[-1][1] is a list + item is 2-tuple (key, value) if parents[-1][1] is a dict + + The return value of visit must be None, or of the same kind as + item, that is, if parents[-1] is a list, the return value must + be 2-tuple (new_index, new_value), or if parents[-1] is a + dict, the return value must be 2-tuple (new_key, new_value). + + If new_index or new_value is None, the return value of visit + is ignored, that is, it will not be added to the result. + + If the return value is None, the content of obj will be + traversed, otherwise not. + """ + ''' + + if _is_visit_pair(obj): + if obj[0] == 'parent_block': + # avoid infinite recursion + return obj + new_result = visit(obj, parents, result, *args, **kwargs) + if new_result is not None: + assert _is_visit_pair(new_result) + return new_result + parent = obj + result_key, obj = obj + else: + parent = (None, obj) + result_key = None + + if isinstance(obj, list): + new_result = [] + for index, value in enumerate(obj): + new_index, new_item = traverse((index, value), visit, + parents + [parent], result, + *args, **kwargs) + if new_index is not None: + new_result.append(new_item) + elif isinstance(obj, dict): + new_result = {} + for key, value in obj.items(): + new_key, new_value = traverse((key, value), visit, + parents + [parent], result, + *args, **kwargs) + if new_key is not None: + new_result[new_key] = new_value + else: + new_result = obj + + if result_key is None: + return new_result + return result_key, new_result + + +def character_backward_compatibility_hook(item, parents, result, + *args, **kwargs): + """Previously, Fortran character was incorrectly treated as + character*1. This hook fixes the usage of the corresponding + variables in `check`, `dimension`, `=`, and `callstatement` + expressions. + + The usage of `char*` in `callprotoargument` expression can be left + unchanged because C `character` is C typedef of `char`, although, + new implementations should use `character*` in the corresponding + expressions. + + See https://github.com/numpy/numpy/pull/19388 for more information. + + """ + parent_key, parent_value = parents[-1] + key, value = item + + def fix_usage(varname, value): + value = re.sub(r'[*]\s*\b' + varname + r'\b', varname, value) + value = re.sub(r'\b' + varname + r'\b\s*[\[]\s*0\s*[\]]', + varname, value) + return value + + if parent_key in ['dimension', 'check']: + assert parents[-3][0] == 'vars' + vars_dict = parents[-3][1] + elif key == '=': + assert parents[-2][0] == 'vars' + vars_dict = parents[-2][1] + else: + vars_dict = None + + new_value = None + if vars_dict is not None: + new_value = value + for varname, vd in vars_dict.items(): + if ischaracter(vd): + new_value = fix_usage(varname, new_value) + elif key == 'callstatement': + vars_dict = parents[-2][1]['vars'] + new_value = value + for varname, vd in vars_dict.items(): + if ischaracter(vd): + # replace all occurrences of `` with + # `&` in argument passing + new_value = re.sub( + r'(? `{new_value}`\n', 1) + return (key, new_value) + + +post_processing_hooks.append(character_backward_compatibility_hook) + + +if __name__ == "__main__": + files = [] + funcs = [] + f = 1 + f2 = 0 + f3 = 0 + showblocklist = 0 + for l in sys.argv[1:]: + if l == '': + pass + elif l[0] == ':': + f = 0 + elif l == '-quiet': + quiet = 1 + verbose = 0 + elif l == '-verbose': + verbose = 2 + quiet = 0 + elif l == '-fix': + if strictf77: + outmess( + 'Use option -f90 before -fix if Fortran 90 code is in fix form.\n', 0) + skipemptyends = 1 + sourcecodeform = 'fix' + elif l == '-skipemptyends': + skipemptyends = 1 + elif l == '--ignore-contains': + ignorecontains = 1 + elif l == '-f77': + strictf77 = 1 + sourcecodeform = 'fix' + elif l == '-f90': + strictf77 = 0 + sourcecodeform = 'free' + skipemptyends = 1 + elif l == '-h': + f2 = 1 + elif l == '-show': + showblocklist = 1 + elif l == '-m': + f3 = 1 + elif l[0] == '-': + errmess(f'Unknown option {repr(l)}\n') + elif f2: + f2 = 0 + pyffilename = l + elif f3: + f3 = 0 + f77modulename = l + elif f: + try: + open(l).close() + files.append(l) + except OSError as detail: + errmess(f'OSError: {detail!s}\n') + else: + funcs.append(l) + if not strictf77 and f77modulename and not skipemptyends: + outmess("""\ + Warning: You have specified module name for non Fortran 77 code that + should not need one (expect if you are scanning F90 code for non + module blocks but then you should use flag -skipemptyends and also + be sure that the files do not contain programs without program + statement). +""", 0) + + postlist = crackfortran(files) + if pyffilename: + outmess(f'Writing fortran code to file {repr(pyffilename)}\n', 0) + pyf = crack2fortran(postlist) + with open(pyffilename, 'w') as f: + f.write(pyf) + if showblocklist: + show(postlist) diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/crackfortran.pyi b/.venv/lib/python3.12/site-packages/numpy/f2py/crackfortran.pyi new file mode 100644 index 0000000..6b08f87 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/crackfortran.pyi @@ -0,0 +1,258 @@ +import re +from collections.abc import Callable, Iterable, Mapping +from typing import IO, Any, Concatenate, Final, Never, ParamSpec, TypeAlias, overload +from typing import Literal as L + +from _typeshed import StrOrBytesPath, StrPath + +from .__version__ import version +from .auxfuncs import isintent_dict as isintent_dict + +### + +_Tss = ParamSpec("_Tss") + +_VisitResult: TypeAlias = list[Any] | dict[str, Any] | None +_VisitItem: TypeAlias = tuple[str | None, _VisitResult] +_VisitFunc: TypeAlias = Callable[Concatenate[_VisitItem, list[_VisitItem], _VisitResult, _Tss], _VisitItem | None] + +### + +COMMON_FREE_EXTENSIONS: Final[list[str]] = ... +COMMON_FIXED_EXTENSIONS: Final[list[str]] = ... + +f2py_version: Final = version +tabchar: Final[str] = " " + +f77modulename: str +pyffilename: str +sourcecodeform: L["fix", "gree"] +strictf77: L[0, 1] +quiet: L[0, 1] +verbose: L[0, 1, 2] +skipemptyends: L[0, 1] +ignorecontains: L[1] +dolowercase: L[1] + +beginpattern: str | re.Pattern[str] +currentfilename: str +filepositiontext: str +expectbegin: L[0, 1] +gotnextfile: L[0, 1] +neededmodule: int +skipblocksuntil: int +groupcounter: int +groupname: dict[int, str] | str +groupcache: dict[int, dict[str, Any]] | None +grouplist: dict[int, list[dict[str, Any]]] | None +previous_context: tuple[str, str, int] | None + +f90modulevars: dict[str, dict[str, Any]] = {} +debug: list[Never] = [] +include_paths: list[str] = [] +onlyfuncs: list[str] = [] +skipfuncs: list[str] = [] +skipfunctions: Final[list[str]] = [] +usermodules: Final[list[dict[str, Any]]] = [] + +defaultimplicitrules: Final[dict[str, dict[str, str]]] = {} +badnames: Final[dict[str, str]] = {} +invbadnames: Final[dict[str, str]] = {} + +beforethisafter: Final[str] = ... +fortrantypes: Final[str] = ... +groupbegins77: Final[str] = ... +groupbegins90: Final[str] = ... +groupends: Final[str] = ... +endifs: Final[str] = ... +moduleprocedures: Final[str] = ... + +beginpattern77: Final[tuple[re.Pattern[str], L["begin"]]] = ... +beginpattern90: Final[tuple[re.Pattern[str], L["begin"]]] = ... +callpattern: Final[tuple[re.Pattern[str], L["call"]]] = ... +callfunpattern: Final[tuple[re.Pattern[str], L["callfun"]]] = ... +commonpattern: Final[tuple[re.Pattern[str], L["common"]]] = ... +containspattern: Final[tuple[re.Pattern[str], L["contains"]]] = ... +datapattern: Final[tuple[re.Pattern[str], L["data"]]] = ... +dimensionpattern: Final[tuple[re.Pattern[str], L["dimension"]]] = ... +endifpattern: Final[tuple[re.Pattern[str], L["endif"]]] = ... +endpattern: Final[tuple[re.Pattern[str], L["end"]]] = ... +entrypattern: Final[tuple[re.Pattern[str], L["entry"]]] = ... +externalpattern: Final[tuple[re.Pattern[str], L["external"]]] = ... +f2pyenhancementspattern: Final[tuple[re.Pattern[str], L["f2pyenhancements"]]] = ... +formatpattern: Final[tuple[re.Pattern[str], L["format"]]] = ... +functionpattern: Final[tuple[re.Pattern[str], L["begin"]]] = ... +implicitpattern: Final[tuple[re.Pattern[str], L["implicit"]]] = ... +intentpattern: Final[tuple[re.Pattern[str], L["intent"]]] = ... +intrinsicpattern: Final[tuple[re.Pattern[str], L["intrinsic"]]] = ... +optionalpattern: Final[tuple[re.Pattern[str], L["optional"]]] = ... +moduleprocedurepattern: Final[tuple[re.Pattern[str], L["moduleprocedure"]]] = ... +multilinepattern: Final[tuple[re.Pattern[str], L["multiline"]]] = ... +parameterpattern: Final[tuple[re.Pattern[str], L["parameter"]]] = ... +privatepattern: Final[tuple[re.Pattern[str], L["private"]]] = ... +publicpattern: Final[tuple[re.Pattern[str], L["public"]]] = ... +requiredpattern: Final[tuple[re.Pattern[str], L["required"]]] = ... +subroutinepattern: Final[tuple[re.Pattern[str], L["begin"]]] = ... +typespattern: Final[tuple[re.Pattern[str], L["type"]]] = ... +usepattern: Final[tuple[re.Pattern[str], L["use"]]] = ... + +analyzeargs_re_1: Final[re.Pattern[str]] = ... +callnameargspattern: Final[re.Pattern[str]] = ... +charselector: Final[re.Pattern[str]] = ... +crackline_bind_1: Final[re.Pattern[str]] = ... +crackline_bindlang: Final[re.Pattern[str]] = ... +crackline_re_1: Final[re.Pattern[str]] = ... +determineexprtype_re_1: Final[re.Pattern[str]] = ... +determineexprtype_re_2: Final[re.Pattern[str]] = ... +determineexprtype_re_3: Final[re.Pattern[str]] = ... +determineexprtype_re_4: Final[re.Pattern[str]] = ... +determineexprtype_re_5: Final[re.Pattern[str]] = ... +getlincoef_re_1: Final[re.Pattern[str]] = ... +kindselector: Final[re.Pattern[str]] = ... +lenarraypattern: Final[re.Pattern[str]] = ... +lenkindpattern: Final[re.Pattern[str]] = ... +namepattern: Final[re.Pattern[str]] = ... +nameargspattern: Final[re.Pattern[str]] = ... +operatorpattern: Final[re.Pattern[str]] = ... +real16pattern: Final[re.Pattern[str]] = ... +real8pattern: Final[re.Pattern[str]] = ... +selectpattern: Final[re.Pattern[str]] = ... +typedefpattern: Final[re.Pattern[str]] = ... +typespattern4implicit: Final[re.Pattern[str]] = ... +word_pattern: Final[re.Pattern[str]] = ... + +post_processing_hooks: Final[list[_VisitFunc[...]]] = [] + +# +def outmess(line: str, flag: int = 1) -> None: ... +def reset_global_f2py_vars() -> None: ... + +# +def rmbadname1(name: str) -> str: ... +def undo_rmbadname1(name: str) -> str: ... +def rmbadname(names: Iterable[str]) -> list[str]: ... +def undo_rmbadname(names: Iterable[str]) -> list[str]: ... + +# +def openhook(filename: StrPath, mode: str) -> IO[Any]: ... +def is_free_format(fname: StrPath) -> bool: ... +def readfortrancode( + ffile: StrOrBytesPath | Iterable[StrOrBytesPath], + dowithline: Callable[[str, int], object] = ..., + istop: int = 1, +) -> None: ... + +# +def split_by_unquoted(line: str, characters: str) -> tuple[str, str]: ... + +# +def crackline(line: str, reset: int = 0) -> None: ... +def markouterparen(line: str) -> str: ... +def markoutercomma(line: str, comma: str = ",") -> str: ... +def unmarkouterparen(line: str) -> str: ... +def appenddecl(decl: Mapping[str, object] | None, decl2: Mapping[str, object] | None, force: int = 1) -> dict[str, Any]: ... + +# +def parse_name_for_bind(line: str) -> tuple[str, str | None]: ... +def analyzeline(m: re.Match[str], case: str, line: str) -> None: ... +def appendmultiline(group: dict[str, Any], context_name: str, ml: str) -> None: ... +def cracktypespec0(typespec: str, ll: str | None) -> tuple[str, str | None, str | None, str | None]: ... + +# +def removespaces(expr: str) -> str: ... +def markinnerspaces(line: str) -> str: ... +def updatevars(typespec: str, selector: str | None, attrspec: str, entitydecl: str) -> str: ... +def cracktypespec(typespec: str, selector: str | None) -> tuple[dict[str, str] | None, dict[str, str] | None, str | None]: ... + +# +def setattrspec(decl: dict[str, list[str]], attr: str | None, force: int = 0) -> dict[str, list[str]]: ... +def setkindselector(decl: dict[str, dict[str, str]], sel: dict[str, str], force: int = 0) -> dict[str, dict[str, str]]: ... +def setcharselector(decl: dict[str, dict[str, str]], sel: dict[str, str], force: int = 0) -> dict[str, dict[str, str]]: ... +def getblockname(block: Mapping[str, object], unknown: str = "unknown") -> str: ... +def setmesstext(block: Mapping[str, object]) -> None: ... +def get_usedict(block: Mapping[str, object]) -> dict[str, str]: ... +def get_useparameters(block: Mapping[str, object], param_map: Mapping[str, str] | None = None) -> dict[str, str]: ... + +# +@overload +def postcrack2( + block: dict[str, Any], + tab: str = "", + param_map: Mapping[str, str] | None = None, +) -> dict[str, str | Any]: ... +@overload +def postcrack2( + block: list[dict[str, Any]], + tab: str = "", + param_map: Mapping[str, str] | None = None, +) -> list[dict[str, str | Any]]: ... + +# +@overload +def postcrack(block: dict[str, Any], args: Mapping[str, str] | None = None, tab: str = "") -> dict[str, Any]: ... +@overload +def postcrack(block: list[dict[str, str]], args: Mapping[str, str] | None = None, tab: str = "") -> list[dict[str, Any]]: ... + +# +def sortvarnames(vars: Mapping[str, object]) -> list[str]: ... +def analyzecommon(block: Mapping[str, object]) -> dict[str, Any]: ... +def analyzebody(block: Mapping[str, object], args: Mapping[str, str], tab: str = "") -> list[dict[str, Any]]: ... +def buildimplicitrules(block: Mapping[str, object]) -> tuple[dict[str, dict[str, str]], dict[str, str]]: ... +def myeval(e: str, g: object | None = None, l: object | None = None) -> float: ... + +# +def getlincoef(e: str, xset: set[str]) -> tuple[float | None, float | None, str | None]: ... + +# +def get_sorted_names(vars: Mapping[str, Mapping[str, str]]) -> list[str]: ... +def get_parameters(vars: Mapping[str, Mapping[str, str]], global_params: dict[str, str] = {}) -> dict[str, str]: ... + +# +def analyzevars(block: Mapping[str, Any]) -> dict[str, dict[str, str]]: ... + +# +def param_eval(v: str, g_params: dict[str, Any], params: Mapping[str, object], dimspec: str | None = None) -> dict[str, Any]: ... +def param_parse(d: str, params: Mapping[str, str]) -> str: ... +def expr2name(a: str, block: Mapping[str, object], args: list[str] = []) -> str: ... +def analyzeargs(block: Mapping[str, object]) -> dict[str, Any]: ... + +# +def determineexprtype(expr: str, vars: Mapping[str, object], rules: dict[str, Any] = {}) -> dict[str, Any]: ... +def crack2fortrangen(block: Mapping[str, object], tab: str = "\n", as_interface: bool = False) -> str: ... +def common2fortran(common: Mapping[str, object], tab: str = "") -> str: ... +def use2fortran(use: Mapping[str, object], tab: str = "") -> str: ... +def true_intent_list(var: dict[str, list[str]]) -> list[str]: ... +def vars2fortran( + block: Mapping[str, Mapping[str, object]], + vars: Mapping[str, object], + args: Mapping[str, str], + tab: str = "", + as_interface: bool = False, +) -> str: ... + +# +def crackfortran(files: StrOrBytesPath | Iterable[StrOrBytesPath]) -> list[dict[str, Any]]: ... +def crack2fortran(block: Mapping[str, Any]) -> str: ... + +# +def traverse( + obj: tuple[str | None, _VisitResult], + visit: _VisitFunc[_Tss], + parents: list[tuple[str | None, _VisitResult]] = [], + result: list[Any] | dict[str, Any] | None = None, + *args: _Tss.args, + **kwargs: _Tss.kwargs, +) -> _VisitItem | _VisitResult: ... + +# +def character_backward_compatibility_hook( + item: _VisitItem, + parents: list[_VisitItem], + result: object, # ignored + *args: object, # ignored + **kwargs: object, # ignored +) -> _VisitItem | None: ... + +# namespace pollution +c: str +n: str diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/diagnose.py b/.venv/lib/python3.12/site-packages/numpy/f2py/diagnose.py new file mode 100644 index 0000000..7eb1697 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/diagnose.py @@ -0,0 +1,149 @@ +#!/usr/bin/env python3 +import os +import sys +import tempfile + + +def run(): + _path = os.getcwd() + os.chdir(tempfile.gettempdir()) + print('------') + print(f'os.name={os.name!r}') + print('------') + print(f'sys.platform={sys.platform!r}') + print('------') + print('sys.version:') + print(sys.version) + print('------') + print('sys.prefix:') + print(sys.prefix) + print('------') + print(f"sys.path={':'.join(sys.path)!r}") + print('------') + + try: + import numpy + has_newnumpy = 1 + except ImportError as e: + print('Failed to import new numpy:', e) + has_newnumpy = 0 + + try: + from numpy.f2py import f2py2e + has_f2py2e = 1 + except ImportError as e: + print('Failed to import f2py2e:', e) + has_f2py2e = 0 + + try: + import numpy.distutils + has_numpy_distutils = 2 + except ImportError: + try: + import numpy_distutils + has_numpy_distutils = 1 + except ImportError as e: + print('Failed to import numpy_distutils:', e) + has_numpy_distutils = 0 + + if has_newnumpy: + try: + print(f'Found new numpy version {numpy.__version__!r} in {numpy.__file__}') + except Exception as msg: + print('error:', msg) + print('------') + + if has_f2py2e: + try: + print('Found f2py2e version %r in %s' % + (f2py2e.__version__.version, f2py2e.__file__)) + except Exception as msg: + print('error:', msg) + print('------') + + if has_numpy_distutils: + try: + if has_numpy_distutils == 2: + print('Found numpy.distutils version %r in %r' % ( + numpy.distutils.__version__, + numpy.distutils.__file__)) + else: + print('Found numpy_distutils version %r in %r' % ( + numpy_distutils.numpy_distutils_version.numpy_distutils_version, + numpy_distutils.__file__)) + print('------') + except Exception as msg: + print('error:', msg) + print('------') + try: + if has_numpy_distutils == 1: + print( + 'Importing numpy_distutils.command.build_flib ...', end=' ') + import numpy_distutils.command.build_flib as build_flib + print('ok') + print('------') + try: + print( + 'Checking availability of supported Fortran compilers:') + for compiler_class in build_flib.all_compilers: + compiler_class(verbose=1).is_available() + print('------') + except Exception as msg: + print('error:', msg) + print('------') + except Exception as msg: + print( + 'error:', msg, '(ignore it, build_flib is obsolete for numpy.distutils 0.2.2 and up)') + print('------') + try: + if has_numpy_distutils == 2: + print('Importing numpy.distutils.fcompiler ...', end=' ') + import numpy.distutils.fcompiler as fcompiler + else: + print('Importing numpy_distutils.fcompiler ...', end=' ') + import numpy_distutils.fcompiler as fcompiler + print('ok') + print('------') + try: + print('Checking availability of supported Fortran compilers:') + fcompiler.show_fcompilers() + print('------') + except Exception as msg: + print('error:', msg) + print('------') + except Exception as msg: + print('error:', msg) + print('------') + try: + if has_numpy_distutils == 2: + print('Importing numpy.distutils.cpuinfo ...', end=' ') + from numpy.distutils.cpuinfo import cpuinfo + print('ok') + print('------') + else: + try: + print( + 'Importing numpy_distutils.command.cpuinfo ...', end=' ') + from numpy_distutils.command.cpuinfo import cpuinfo + print('ok') + print('------') + except Exception as msg: + print('error:', msg, '(ignore it)') + print('Importing numpy_distutils.cpuinfo ...', end=' ') + from numpy_distutils.cpuinfo import cpuinfo + print('ok') + print('------') + cpu = cpuinfo() + print('CPU information:', end=' ') + for name in dir(cpuinfo): + if name[0] == '_' and name[1] != '_' and getattr(cpu, name[1:])(): + print(name[1:], end=' ') + print('------') + except Exception as msg: + print('error:', msg) + print('------') + os.chdir(_path) + + +if __name__ == "__main__": + run() diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/diagnose.pyi b/.venv/lib/python3.12/site-packages/numpy/f2py/diagnose.pyi new file mode 100644 index 0000000..b88194a --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/diagnose.pyi @@ -0,0 +1 @@ +def run() -> None: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/f2py2e.py b/.venv/lib/python3.12/site-packages/numpy/f2py/f2py2e.py new file mode 100644 index 0000000..459299f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/f2py2e.py @@ -0,0 +1,786 @@ +""" + +f2py2e - Fortran to Python C/API generator. 2nd Edition. + See __usage__ below. + +Copyright 1999 -- 2011 Pearu Peterson all rights reserved. +Copyright 2011 -- present NumPy Developers. +Permission to use, modify, and distribute this software is given under the +terms of the NumPy License. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +""" +import argparse +import os +import pprint +import re +import sys + +from numpy.f2py._backends import f2py_build_generator + +from . import ( + __version__, + auxfuncs, + capi_maps, + cb_rules, + cfuncs, + crackfortran, + f90mod_rules, + rules, +) +from .cfuncs import errmess + +f2py_version = __version__.version +numpy_version = __version__.version + +# outmess=sys.stdout.write +show = pprint.pprint +outmess = auxfuncs.outmess +MESON_ONLY_VER = (sys.version_info >= (3, 12)) + +__usage__ =\ +f"""Usage: + +1) To construct extension module sources: + + f2py [] [[[only:]||[skip:]] \\ + ] \\ + [: ...] + +2) To compile fortran files and build extension modules: + + f2py -c [, , ] + +3) To generate signature files: + + f2py -h ...< same options as in (1) > + +Description: This program generates a Python C/API file (module.c) + that contains wrappers for given fortran functions so that they + can be called from Python. With the -c option the corresponding + extension modules are built. + +Options: + + -h Write signatures of the fortran routines to file + and exit. You can then edit and use it instead + of . If ==stdout then the + signatures are printed to stdout. + Names of fortran routines for which Python C/API + functions will be generated. Default is all that are found + in . + Paths to fortran/signature files that will be scanned for + in order to determine their signatures. + skip: Ignore fortran functions that follow until `:'. + only: Use only fortran functions that follow until `:'. + : Get back to mode. + + -m Name of the module; f2py generates a Python/C API + file module.c or extension module . + Default is 'untitled'. + + '-include

' Writes additional headers in the C wrapper, can be passed + multiple times, generates #include
each time. + + --[no-]lower Do [not] lower the cases in . By default, + --lower is assumed with -h key, and --no-lower without -h key. + + --build-dir All f2py generated files are created in . + Default is tempfile.mkdtemp(). + + --overwrite-signature Overwrite existing signature file. + + --[no-]latex-doc Create (or not) module.tex. + Default is --no-latex-doc. + --short-latex Create 'incomplete' LaTeX document (without commands + \\documentclass, \\tableofcontents, and \\begin{{document}}, + \\end{{document}}). + + --[no-]rest-doc Create (or not) module.rst. + Default is --no-rest-doc. + + --debug-capi Create C/API code that reports the state of the wrappers + during runtime. Useful for debugging. + + --[no-]wrap-functions Create Fortran subroutine wrappers to Fortran 77 + functions. --wrap-functions is default because it ensures + maximum portability/compiler independence. + + --[no-]freethreading-compatible Create a module that declares it does or + doesn't require the GIL. The default is + --freethreading-compatible for backward + compatibility. Inspect the Fortran code you are wrapping for + thread safety issues before passing + --no-freethreading-compatible, as f2py does not analyze + fortran code for thread safety issues. + + --include-paths ::... Search include files from the given + directories. + + --help-link [..] List system resources found by system_info.py. See also + --link- switch below. [..] is optional list + of resources names. E.g. try 'f2py --help-link lapack_opt'. + + --f2cmap Load Fortran-to-Python KIND specification from the given + file. Default: .f2py_f2cmap in current directory. + + --quiet Run quietly. + --verbose Run with extra verbosity. + --skip-empty-wrappers Only generate wrapper files when needed. + -v Print f2py version ID and exit. + + +build backend options (only effective with -c) +[NO_MESON] is used to indicate an option not meant to be used +with the meson backend or above Python 3.12: + + --fcompiler= Specify Fortran compiler type by vendor [NO_MESON] + --compiler= Specify distutils C compiler type [NO_MESON] + + --help-fcompiler List available Fortran compilers and exit [NO_MESON] + --f77exec= Specify the path to F77 compiler [NO_MESON] + --f90exec= Specify the path to F90 compiler [NO_MESON] + --f77flags= Specify F77 compiler flags + --f90flags= Specify F90 compiler flags + --opt= Specify optimization flags [NO_MESON] + --arch= Specify architecture specific optimization flags [NO_MESON] + --noopt Compile without optimization [NO_MESON] + --noarch Compile without arch-dependent optimization [NO_MESON] + --debug Compile with debugging information + + --dep + Specify a meson dependency for the module. This may + be passed multiple times for multiple dependencies. + Dependencies are stored in a list for further processing. + + Example: --dep lapack --dep scalapack + This will identify "lapack" and "scalapack" as dependencies + and remove them from argv, leaving a dependencies list + containing ["lapack", "scalapack"]. + + --backend + Specify the build backend for the compilation process. + The supported backends are 'meson' and 'distutils'. + If not specified, defaults to 'distutils'. On + Python 3.12 or higher, the default is 'meson'. + +Extra options (only effective with -c): + + --link- Link extension module with as defined + by numpy.distutils/system_info.py. E.g. to link + with optimized LAPACK libraries (vecLib on MacOSX, + ATLAS elsewhere), use --link-lapack_opt. + See also --help-link switch. [NO_MESON] + + -L/path/to/lib/ -l + -D -U + -I/path/to/include/ + .o .so .a + + Using the following macros may be required with non-gcc Fortran + compilers: + -DPREPEND_FORTRAN -DNO_APPEND_FORTRAN -DUPPERCASE_FORTRAN + + When using -DF2PY_REPORT_ATEXIT, a performance report of F2PY + interface is printed out at exit (platforms: Linux). + + When using -DF2PY_REPORT_ON_ARRAY_COPY=, a message is + sent to stderr whenever F2PY interface makes a copy of an + array. Integer sets the threshold for array sizes when + a message should be shown. + +Version: {f2py_version} +numpy Version: {numpy_version} +License: NumPy license (see LICENSE.txt in the NumPy source code) +Copyright 1999 -- 2011 Pearu Peterson all rights reserved. +Copyright 2011 -- present NumPy Developers. +https://numpy.org/doc/stable/f2py/index.html\n""" + + +def scaninputline(inputline): + files, skipfuncs, onlyfuncs, debug = [], [], [], [] + f, f2, f3, f5, f6, f8, f9, f10 = 1, 0, 0, 0, 0, 0, 0, 0 + verbose = 1 + emptygen = True + dolc = -1 + dolatexdoc = 0 + dorestdoc = 0 + wrapfuncs = 1 + buildpath = '.' + include_paths, freethreading_compatible, inputline = get_newer_options(inputline) + signsfile, modulename = None, None + options = {'buildpath': buildpath, + 'coutput': None, + 'f2py_wrapper_output': None} + for l in inputline: + if l == '': + pass + elif l == 'only:': + f = 0 + elif l == 'skip:': + f = -1 + elif l == ':': + f = 1 + elif l[:8] == '--debug-': + debug.append(l[8:]) + elif l == '--lower': + dolc = 1 + elif l == '--build-dir': + f6 = 1 + elif l == '--no-lower': + dolc = 0 + elif l == '--quiet': + verbose = 0 + elif l == '--verbose': + verbose += 1 + elif l == '--latex-doc': + dolatexdoc = 1 + elif l == '--no-latex-doc': + dolatexdoc = 0 + elif l == '--rest-doc': + dorestdoc = 1 + elif l == '--no-rest-doc': + dorestdoc = 0 + elif l == '--wrap-functions': + wrapfuncs = 1 + elif l == '--no-wrap-functions': + wrapfuncs = 0 + elif l == '--short-latex': + options['shortlatex'] = 1 + elif l == '--coutput': + f8 = 1 + elif l == '--f2py-wrapper-output': + f9 = 1 + elif l == '--f2cmap': + f10 = 1 + elif l == '--overwrite-signature': + options['h-overwrite'] = 1 + elif l == '-h': + f2 = 1 + elif l == '-m': + f3 = 1 + elif l[:2] == '-v': + print(f2py_version) + sys.exit() + elif l == '--show-compilers': + f5 = 1 + elif l[:8] == '-include': + cfuncs.outneeds['userincludes'].append(l[9:-1]) + cfuncs.userincludes[l[9:-1]] = '#include ' + l[8:] + elif l == '--skip-empty-wrappers': + emptygen = False + elif l[0] == '-': + errmess(f'Unknown option {repr(l)}\n') + sys.exit() + elif f2: + f2 = 0 + signsfile = l + elif f3: + f3 = 0 + modulename = l + elif f6: + f6 = 0 + buildpath = l + elif f8: + f8 = 0 + options["coutput"] = l + elif f9: + f9 = 0 + options["f2py_wrapper_output"] = l + elif f10: + f10 = 0 + options["f2cmap_file"] = l + elif f == 1: + try: + with open(l): + pass + files.append(l) + except OSError as detail: + errmess(f'OSError: {detail!s}. Skipping file "{l!s}".\n') + elif f == -1: + skipfuncs.append(l) + elif f == 0: + onlyfuncs.append(l) + if not f5 and not files and not modulename: + print(__usage__) + sys.exit() + if not os.path.isdir(buildpath): + if not verbose: + outmess(f'Creating build directory {buildpath}\n') + os.mkdir(buildpath) + if signsfile: + signsfile = os.path.join(buildpath, signsfile) + if signsfile and os.path.isfile(signsfile) and 'h-overwrite' not in options: + errmess( + f'Signature file "{signsfile}" exists!!! Use --overwrite-signature to overwrite.\n') + sys.exit() + + options['emptygen'] = emptygen + options['debug'] = debug + options['verbose'] = verbose + if dolc == -1 and not signsfile: + options['do-lower'] = 0 + else: + options['do-lower'] = dolc + if modulename: + options['module'] = modulename + if signsfile: + options['signsfile'] = signsfile + if onlyfuncs: + options['onlyfuncs'] = onlyfuncs + if skipfuncs: + options['skipfuncs'] = skipfuncs + options['dolatexdoc'] = dolatexdoc + options['dorestdoc'] = dorestdoc + options['wrapfuncs'] = wrapfuncs + options['buildpath'] = buildpath + options['include_paths'] = include_paths + options['requires_gil'] = not freethreading_compatible + options.setdefault('f2cmap_file', None) + return files, options + + +def callcrackfortran(files, options): + rules.options = options + crackfortran.debug = options['debug'] + crackfortran.verbose = options['verbose'] + if 'module' in options: + crackfortran.f77modulename = options['module'] + if 'skipfuncs' in options: + crackfortran.skipfuncs = options['skipfuncs'] + if 'onlyfuncs' in options: + crackfortran.onlyfuncs = options['onlyfuncs'] + crackfortran.include_paths[:] = options['include_paths'] + crackfortran.dolowercase = options['do-lower'] + postlist = crackfortran.crackfortran(files) + if 'signsfile' in options: + outmess(f"Saving signatures to file \"{options['signsfile']}\"\n") + pyf = crackfortran.crack2fortran(postlist) + if options['signsfile'][-6:] == 'stdout': + sys.stdout.write(pyf) + else: + with open(options['signsfile'], 'w') as f: + f.write(pyf) + if options["coutput"] is None: + for mod in postlist: + mod["coutput"] = f"{mod['name']}module.c" + else: + for mod in postlist: + mod["coutput"] = options["coutput"] + if options["f2py_wrapper_output"] is None: + for mod in postlist: + mod["f2py_wrapper_output"] = f"{mod['name']}-f2pywrappers.f" + else: + for mod in postlist: + mod["f2py_wrapper_output"] = options["f2py_wrapper_output"] + for mod in postlist: + if options["requires_gil"]: + mod['gil_used'] = 'Py_MOD_GIL_USED' + else: + mod['gil_used'] = 'Py_MOD_GIL_NOT_USED' + return postlist + + +def buildmodules(lst): + cfuncs.buildcfuncs() + outmess('Building modules...\n') + modules, mnames, isusedby = [], [], {} + for item in lst: + if '__user__' in item['name']: + cb_rules.buildcallbacks(item) + else: + if 'use' in item: + for u in item['use'].keys(): + if u not in isusedby: + isusedby[u] = [] + isusedby[u].append(item['name']) + modules.append(item) + mnames.append(item['name']) + ret = {} + for module, name in zip(modules, mnames): + if name in isusedby: + outmess('\tSkipping module "%s" which is used by %s.\n' % ( + name, ','.join('"%s"' % s for s in isusedby[name]))) + else: + um = [] + if 'use' in module: + for u in module['use'].keys(): + if u in isusedby and u in mnames: + um.append(modules[mnames.index(u)]) + else: + outmess( + f'\tModule "{name}" uses nonexisting "{u}" ' + 'which will be ignored.\n') + ret[name] = {} + dict_append(ret[name], rules.buildmodule(module, um)) + return ret + + +def dict_append(d_out, d_in): + for (k, v) in d_in.items(): + if k not in d_out: + d_out[k] = [] + if isinstance(v, list): + d_out[k] = d_out[k] + v + else: + d_out[k].append(v) + + +def run_main(comline_list): + """ + Equivalent to running:: + + f2py + + where ``=string.join(,' ')``, but in Python. Unless + ``-h`` is used, this function returns a dictionary containing + information on generated modules and their dependencies on source + files. + + You cannot build extension modules with this function, that is, + using ``-c`` is not allowed. Use the ``compile`` command instead. + + Examples + -------- + The command ``f2py -m scalar scalar.f`` can be executed from Python as + follows. + + .. literalinclude:: ../../source/f2py/code/results/run_main_session.dat + :language: python + + """ + crackfortran.reset_global_f2py_vars() + f2pydir = os.path.dirname(os.path.abspath(cfuncs.__file__)) + fobjhsrc = os.path.join(f2pydir, 'src', 'fortranobject.h') + fobjcsrc = os.path.join(f2pydir, 'src', 'fortranobject.c') + # gh-22819 -- begin + parser = make_f2py_compile_parser() + args, comline_list = parser.parse_known_args(comline_list) + pyf_files, _ = filter_files("", "[.]pyf([.]src|)", comline_list) + # Checks that no existing modulename is defined in a pyf file + # TODO: Remove all this when scaninputline is replaced + if args.module_name: + if "-h" in comline_list: + modname = ( + args.module_name + ) # Directly use from args when -h is present + else: + modname = validate_modulename( + pyf_files, args.module_name + ) # Validate modname when -h is not present + comline_list += ['-m', modname] # needed for the rest of scaninputline + # gh-22819 -- end + files, options = scaninputline(comline_list) + auxfuncs.options = options + capi_maps.load_f2cmap_file(options['f2cmap_file']) + postlist = callcrackfortran(files, options) + isusedby = {} + for plist in postlist: + if 'use' in plist: + for u in plist['use'].keys(): + if u not in isusedby: + isusedby[u] = [] + isusedby[u].append(plist['name']) + for plist in postlist: + module_name = plist['name'] + if plist['block'] == 'python module' and '__user__' in module_name: + if module_name in isusedby: + # if not quiet: + usedby = ','.join(f'"{s}"' for s in isusedby[module_name]) + outmess( + f'Skipping Makefile build for module "{module_name}" ' + f'which is used by {usedby}\n') + if 'signsfile' in options: + if options['verbose'] > 1: + outmess( + 'Stopping. Edit the signature file and then run f2py on the signature file: ') + outmess(f"{os.path.basename(sys.argv[0])} {options['signsfile']}\n") + return + for plist in postlist: + if plist['block'] != 'python module': + if 'python module' not in options: + errmess( + 'Tip: If your original code is Fortran source then you must use -m option.\n') + raise TypeError('All blocks must be python module blocks but got %s' % ( + repr(plist['block']))) + auxfuncs.debugoptions = options['debug'] + f90mod_rules.options = options + auxfuncs.wrapfuncs = options['wrapfuncs'] + + ret = buildmodules(postlist) + + for mn in ret.keys(): + dict_append(ret[mn], {'csrc': fobjcsrc, 'h': fobjhsrc}) + return ret + + +def filter_files(prefix, suffix, files, remove_prefix=None): + """ + Filter files by prefix and suffix. + """ + filtered, rest = [], [] + match = re.compile(prefix + r'.*' + suffix + r'\Z').match + if remove_prefix: + ind = len(prefix) + else: + ind = 0 + for file in [x.strip() for x in files]: + if match(file): + filtered.append(file[ind:]) + else: + rest.append(file) + return filtered, rest + + +def get_prefix(module): + p = os.path.dirname(os.path.dirname(module.__file__)) + return p + + +class CombineIncludePaths(argparse.Action): + def __call__(self, parser, namespace, values, option_string=None): + include_paths_set = set(getattr(namespace, 'include_paths', []) or []) + if option_string == "--include_paths": + outmess("Use --include-paths or -I instead of --include_paths which will be removed") + if option_string in {"--include-paths", "--include_paths"}: + include_paths_set.update(values.split(':')) + else: + include_paths_set.add(values) + namespace.include_paths = list(include_paths_set) + +def f2py_parser(): + parser = argparse.ArgumentParser(add_help=False) + parser.add_argument("-I", dest="include_paths", action=CombineIncludePaths) + parser.add_argument("--include-paths", dest="include_paths", action=CombineIncludePaths) + parser.add_argument("--include_paths", dest="include_paths", action=CombineIncludePaths) + parser.add_argument("--freethreading-compatible", dest="ftcompat", action=argparse.BooleanOptionalAction) + return parser + +def get_newer_options(iline): + iline = (' '.join(iline)).split() + parser = f2py_parser() + args, remain = parser.parse_known_args(iline) + ipaths = args.include_paths + if args.include_paths is None: + ipaths = [] + return ipaths, args.ftcompat, remain + +def make_f2py_compile_parser(): + parser = argparse.ArgumentParser(add_help=False) + parser.add_argument("--dep", action="append", dest="dependencies") + parser.add_argument("--backend", choices=['meson', 'distutils'], default='distutils') + parser.add_argument("-m", dest="module_name") + return parser + +def preparse_sysargv(): + # To keep backwards bug compatibility, newer flags are handled by argparse, + # and `sys.argv` is passed to the rest of `f2py` as is. + parser = make_f2py_compile_parser() + + args, remaining_argv = parser.parse_known_args() + sys.argv = [sys.argv[0]] + remaining_argv + + backend_key = args.backend + if MESON_ONLY_VER and backend_key == 'distutils': + outmess("Cannot use distutils backend with Python>=3.12," + " using meson backend instead.\n") + backend_key = "meson" + + return { + "dependencies": args.dependencies or [], + "backend": backend_key, + "modulename": args.module_name, + } + +def run_compile(): + """ + Do it all in one call! + """ + import tempfile + + # Collect dependency flags, preprocess sys.argv + argy = preparse_sysargv() + modulename = argy["modulename"] + if modulename is None: + modulename = 'untitled' + dependencies = argy["dependencies"] + backend_key = argy["backend"] + build_backend = f2py_build_generator(backend_key) + + i = sys.argv.index('-c') + del sys.argv[i] + + remove_build_dir = 0 + try: + i = sys.argv.index('--build-dir') + except ValueError: + i = None + if i is not None: + build_dir = sys.argv[i + 1] + del sys.argv[i + 1] + del sys.argv[i] + else: + remove_build_dir = 1 + build_dir = tempfile.mkdtemp() + + _reg1 = re.compile(r'--link-') + sysinfo_flags = [_m for _m in sys.argv[1:] if _reg1.match(_m)] + sys.argv = [_m for _m in sys.argv if _m not in sysinfo_flags] + if sysinfo_flags: + sysinfo_flags = [f[7:] for f in sysinfo_flags] + + _reg2 = re.compile( + r'--((no-|)(wrap-functions|lower|freethreading-compatible)|debug-capi|quiet|skip-empty-wrappers)|-include') + f2py_flags = [_m for _m in sys.argv[1:] if _reg2.match(_m)] + sys.argv = [_m for _m in sys.argv if _m not in f2py_flags] + f2py_flags2 = [] + fl = 0 + for a in sys.argv[1:]: + if a in ['only:', 'skip:']: + fl = 1 + elif a == ':': + fl = 0 + if fl or a == ':': + f2py_flags2.append(a) + if f2py_flags2 and f2py_flags2[-1] != ':': + f2py_flags2.append(':') + f2py_flags.extend(f2py_flags2) + sys.argv = [_m for _m in sys.argv if _m not in f2py_flags2] + _reg3 = re.compile( + r'--((f(90)?compiler(-exec|)|compiler)=|help-compiler)') + flib_flags = [_m for _m in sys.argv[1:] if _reg3.match(_m)] + sys.argv = [_m for _m in sys.argv if _m not in flib_flags] + # TODO: Once distutils is dropped completely, i.e. min_ver >= 3.12, unify into --fflags + reg_f77_f90_flags = re.compile(r'--f(77|90)flags=') + reg_distutils_flags = re.compile(r'--((f(77|90)exec|opt|arch)=|(debug|noopt|noarch|help-fcompiler))') + fc_flags = [_m for _m in sys.argv[1:] if reg_f77_f90_flags.match(_m)] + distutils_flags = [_m for _m in sys.argv[1:] if reg_distutils_flags.match(_m)] + if not (MESON_ONLY_VER or backend_key == 'meson'): + fc_flags.extend(distutils_flags) + sys.argv = [_m for _m in sys.argv if _m not in (fc_flags + distutils_flags)] + + del_list = [] + for s in flib_flags: + v = '--fcompiler=' + if s[:len(v)] == v: + if MESON_ONLY_VER or backend_key == 'meson': + outmess( + "--fcompiler cannot be used with meson," + "set compiler with the FC environment variable\n" + ) + else: + from numpy.distutils import fcompiler + fcompiler.load_all_fcompiler_classes() + allowed_keys = list(fcompiler.fcompiler_class.keys()) + nv = ov = s[len(v):].lower() + if ov not in allowed_keys: + vmap = {} # XXX + try: + nv = vmap[ov] + except KeyError: + if ov not in vmap.values(): + print(f'Unknown vendor: "{s[len(v):]}"') + nv = ov + i = flib_flags.index(s) + flib_flags[i] = '--fcompiler=' + nv # noqa: B909 + continue + for s in del_list: + i = flib_flags.index(s) + del flib_flags[i] + assert len(flib_flags) <= 2, repr(flib_flags) + + _reg5 = re.compile(r'--(verbose)') + setup_flags = [_m for _m in sys.argv[1:] if _reg5.match(_m)] + sys.argv = [_m for _m in sys.argv if _m not in setup_flags] + + if '--quiet' in f2py_flags: + setup_flags.append('--quiet') + + # Ugly filter to remove everything but sources + sources = sys.argv[1:] + f2cmapopt = '--f2cmap' + if f2cmapopt in sys.argv: + i = sys.argv.index(f2cmapopt) + f2py_flags.extend(sys.argv[i:i + 2]) + del sys.argv[i + 1], sys.argv[i] + sources = sys.argv[1:] + + pyf_files, _sources = filter_files("", "[.]pyf([.]src|)", sources) + sources = pyf_files + _sources + modulename = validate_modulename(pyf_files, modulename) + extra_objects, sources = filter_files('', '[.](o|a|so|dylib)', sources) + library_dirs, sources = filter_files('-L', '', sources, remove_prefix=1) + libraries, sources = filter_files('-l', '', sources, remove_prefix=1) + undef_macros, sources = filter_files('-U', '', sources, remove_prefix=1) + define_macros, sources = filter_files('-D', '', sources, remove_prefix=1) + for i in range(len(define_macros)): + name_value = define_macros[i].split('=', 1) + if len(name_value) == 1: + name_value.append(None) + if len(name_value) == 2: + define_macros[i] = tuple(name_value) + else: + print('Invalid use of -D:', name_value) + + # Construct wrappers / signatures / things + if backend_key == 'meson': + if not pyf_files: + outmess('Using meson backend\nWill pass --lower to f2py\nSee https://numpy.org/doc/stable/f2py/buildtools/meson.html\n') + f2py_flags.append('--lower') + run_main(f" {' '.join(f2py_flags)} -m {modulename} {' '.join(sources)}".split()) + else: + run_main(f" {' '.join(f2py_flags)} {' '.join(pyf_files)}".split()) + + # Order matters here, includes are needed for run_main above + include_dirs, _, sources = get_newer_options(sources) + # Now use the builder + builder = build_backend( + modulename, + sources, + extra_objects, + build_dir, + include_dirs, + library_dirs, + libraries, + define_macros, + undef_macros, + f2py_flags, + sysinfo_flags, + fc_flags, + flib_flags, + setup_flags, + remove_build_dir, + {"dependencies": dependencies}, + ) + + builder.compile() + + +def validate_modulename(pyf_files, modulename='untitled'): + if len(pyf_files) > 1: + raise ValueError("Only one .pyf file per call") + if pyf_files: + pyff = pyf_files[0] + pyf_modname = auxfuncs.get_f2py_modulename(pyff) + if modulename != pyf_modname: + outmess( + f"Ignoring -m {modulename}.\n" + f"{pyff} defines {pyf_modname} to be the modulename.\n" + ) + modulename = pyf_modname + return modulename + +def main(): + if '--help-link' in sys.argv[1:]: + sys.argv.remove('--help-link') + if MESON_ONLY_VER: + outmess("Use --dep for meson builds\n") + else: + from numpy.distutils.system_info import show_all + show_all() + return + + if '-c' in sys.argv[1:]: + run_compile() + else: + run_main(sys.argv[1:]) diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/f2py2e.pyi b/.venv/lib/python3.12/site-packages/numpy/f2py/f2py2e.pyi new file mode 100644 index 0000000..dd1d0c3 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/f2py2e.pyi @@ -0,0 +1,76 @@ +import argparse +import pprint +from collections.abc import Hashable, Iterable, Mapping, MutableMapping, Sequence +from types import ModuleType +from typing import Any, Final, NotRequired, TypedDict, type_check_only + +from typing_extensions import TypeVar, override + +from .__version__ import version +from .auxfuncs import _Bool +from .auxfuncs import outmess as outmess + +### + +_KT = TypeVar("_KT", bound=Hashable) +_VT = TypeVar("_VT") + +@type_check_only +class _F2PyDict(TypedDict): + csrc: list[str] + h: list[str] + fsrc: NotRequired[list[str]] + ltx: NotRequired[list[str]] + +@type_check_only +class _PreparseResult(TypedDict): + dependencies: list[str] + backend: str + modulename: str + +### + +MESON_ONLY_VER: Final[bool] +f2py_version: Final = version +numpy_version: Final = version +__usage__: Final[str] + +show = pprint.pprint + +class CombineIncludePaths(argparse.Action): + @override + def __call__( + self, + /, + parser: argparse.ArgumentParser, + namespace: argparse.Namespace, + values: str | Sequence[str] | None, + option_string: str | None = None, + ) -> None: ... + +# +def run_main(comline_list: Iterable[str]) -> dict[str, _F2PyDict]: ... +def run_compile() -> None: ... +def main() -> None: ... + +# +def scaninputline(inputline: Iterable[str]) -> tuple[list[str], dict[str, _Bool]]: ... +def callcrackfortran(files: list[str], options: dict[str, bool]) -> list[dict[str, Any]]: ... +def buildmodules(lst: Iterable[Mapping[str, object]]) -> dict[str, dict[str, Any]]: ... +def dict_append(d_out: MutableMapping[_KT, _VT], d_in: Mapping[_KT, _VT]) -> None: ... +def filter_files( + prefix: str, + suffix: str, + files: Iterable[str], + remove_prefix: _Bool | None = None, +) -> tuple[list[str], list[str]]: ... +def get_prefix(module: ModuleType) -> str: ... +def get_newer_options(iline: Iterable[str]) -> tuple[list[str], Any, list[str]]: ... + +# +def f2py_parser() -> argparse.ArgumentParser: ... +def make_f2py_compile_parser() -> argparse.ArgumentParser: ... + +# +def preparse_sysargv() -> _PreparseResult: ... +def validate_modulename(pyf_files: Sequence[str], modulename: str = "untitled") -> str: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/f90mod_rules.py b/.venv/lib/python3.12/site-packages/numpy/f2py/f90mod_rules.py new file mode 100644 index 0000000..d13a42a --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/f90mod_rules.py @@ -0,0 +1,269 @@ +""" +Build F90 module support for f2py2e. + +Copyright 1999 -- 2011 Pearu Peterson all rights reserved. +Copyright 2011 -- present NumPy Developers. +Permission to use, modify, and distribute this software is given under the +terms of the NumPy License. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +""" +__version__ = "$Revision: 1.27 $"[10:-1] + +f2py_version = 'See `f2py -v`' + +import numpy as np + +from . import capi_maps, func2subr + +# The environment provided by auxfuncs.py is needed for some calls to eval. +# As the needed functions cannot be determined by static inspection of the +# code, it is safest to use import * pending a major refactoring of f2py. +from .auxfuncs import * +from .crackfortran import undo_rmbadname, undo_rmbadname1 + +options = {} + + +def findf90modules(m): + if ismodule(m): + return [m] + if not hasbody(m): + return [] + ret = [] + for b in m['body']: + if ismodule(b): + ret.append(b) + else: + ret = ret + findf90modules(b) + return ret + + +fgetdims1 = """\ + external f2pysetdata + logical ns + integer r,i + integer(%d) s(*) + ns = .FALSE. + if (allocated(d)) then + do i=1,r + if ((size(d,i).ne.s(i)).and.(s(i).ge.0)) then + ns = .TRUE. + end if + end do + if (ns) then + deallocate(d) + end if + end if + if ((.not.allocated(d)).and.(s(1).ge.1)) then""" % np.intp().itemsize + +fgetdims2 = """\ + end if + if (allocated(d)) then + do i=1,r + s(i) = size(d,i) + end do + end if + flag = 1 + call f2pysetdata(d,allocated(d))""" + +fgetdims2_sa = """\ + end if + if (allocated(d)) then + do i=1,r + s(i) = size(d,i) + end do + !s(r) must be equal to len(d(1)) + end if + flag = 2 + call f2pysetdata(d,allocated(d))""" + + +def buildhooks(pymod): + from . import rules + ret = {'f90modhooks': [], 'initf90modhooks': [], 'body': [], + 'need': ['F_FUNC', 'arrayobject.h'], + 'separatorsfor': {'includes0': '\n', 'includes': '\n'}, + 'docs': ['"Fortran 90/95 modules:\\n"'], + 'latexdoc': []} + fhooks = [''] + + def fadd(line, s=fhooks): + s[0] = f'{s[0]}\n {line}' + doc = [''] + + def dadd(line, s=doc): + s[0] = f'{s[0]}\n{line}' + + usenames = getuseblocks(pymod) + for m in findf90modules(pymod): + sargs, fargs, efargs, modobjs, notvars, onlyvars = [], [], [], [], [ + m['name']], [] + sargsp = [] + ifargs = [] + mfargs = [] + if hasbody(m): + for b in m['body']: + notvars.append(b['name']) + for n in m['vars'].keys(): + var = m['vars'][n] + + if (n not in notvars and isvariable(var)) and (not l_or(isintent_hide, isprivate)(var)): + onlyvars.append(n) + mfargs.append(n) + outmess(f"\t\tConstructing F90 module support for \"{m['name']}\"...\n") + if len(onlyvars) == 0 and len(notvars) == 1 and m['name'] in notvars: + outmess(f"\t\t\tSkipping {m['name']} since there are no public vars/func in this module...\n") + continue + + # gh-25186 + if m['name'] in usenames and containscommon(m): + outmess(f"\t\t\tSkipping {m['name']} since it is in 'use' and contains a common block...\n") + continue + # skip modules with derived types + if m['name'] in usenames and containsderivedtypes(m): + outmess(f"\t\t\tSkipping {m['name']} since it is in 'use' and contains a derived type...\n") + continue + if onlyvars: + outmess(f"\t\t Variables: {' '.join(onlyvars)}\n") + chooks = [''] + + def cadd(line, s=chooks): + s[0] = f'{s[0]}\n{line}' + ihooks = [''] + + def iadd(line, s=ihooks): + s[0] = f'{s[0]}\n{line}' + + vrd = capi_maps.modsign2map(m) + cadd('static FortranDataDef f2py_%s_def[] = {' % (m['name'])) + dadd('\\subsection{Fortran 90/95 module \\texttt{%s}}\n' % (m['name'])) + if hasnote(m): + note = m['note'] + if isinstance(note, list): + note = '\n'.join(note) + dadd(note) + if onlyvars: + dadd('\\begin{description}') + for n in onlyvars: + var = m['vars'][n] + modobjs.append(n) + ct = capi_maps.getctype(var) + at = capi_maps.c2capi_map[ct] + dm = capi_maps.getarrdims(n, var) + dms = dm['dims'].replace('*', '-1').strip() + dms = dms.replace(':', '-1').strip() + if not dms: + dms = '-1' + use_fgetdims2 = fgetdims2 + cadd('\t{"%s",%s,{{%s}},%s, %s},' % + (undo_rmbadname1(n), dm['rank'], dms, at, + capi_maps.get_elsize(var))) + dadd('\\item[]{{}\\verb@%s@{}}' % + (capi_maps.getarrdocsign(n, var))) + if hasnote(var): + note = var['note'] + if isinstance(note, list): + note = '\n'.join(note) + dadd(f'--- {note}') + if isallocatable(var): + fargs.append(f"f2py_{m['name']}_getdims_{n}") + efargs.append(fargs[-1]) + sargs.append( + f'void (*{n})(int*,npy_intp*,void(*)(char*,npy_intp*),int*)') + sargsp.append('void (*)(int*,npy_intp*,void(*)(char*,npy_intp*),int*)') + iadd(f"\tf2py_{m['name']}_def[i_f2py++].func = {n};") + fadd(f'subroutine {fargs[-1]}(r,s,f2pysetdata,flag)') + fadd(f"use {m['name']}, only: d => {undo_rmbadname1(n)}\n") + fadd('integer flag\n') + fhooks[0] = fhooks[0] + fgetdims1 + dms = range(1, int(dm['rank']) + 1) + fadd(' allocate(d(%s))\n' % + (','.join(['s(%s)' % i for i in dms]))) + fhooks[0] = fhooks[0] + use_fgetdims2 + fadd(f'end subroutine {fargs[-1]}') + else: + fargs.append(n) + sargs.append(f'char *{n}') + sargsp.append('char*') + iadd(f"\tf2py_{m['name']}_def[i_f2py++].data = {n};") + if onlyvars: + dadd('\\end{description}') + if hasbody(m): + for b in m['body']: + if not isroutine(b): + outmess("f90mod_rules.buildhooks:" + f" skipping {b['block']} {b['name']}\n") + continue + modobjs.append(f"{b['name']}()") + b['modulename'] = m['name'] + api, wrap = rules.buildapi(b) + if isfunction(b): + fhooks[0] = fhooks[0] + wrap + fargs.append(f"f2pywrap_{m['name']}_{b['name']}") + ifargs.append(func2subr.createfuncwrapper(b, signature=1)) + elif wrap: + fhooks[0] = fhooks[0] + wrap + fargs.append(f"f2pywrap_{m['name']}_{b['name']}") + ifargs.append( + func2subr.createsubrwrapper(b, signature=1)) + else: + fargs.append(b['name']) + mfargs.append(fargs[-1]) + api['externroutines'] = [] + ar = applyrules(api, vrd) + ar['docs'] = [] + ar['docshort'] = [] + ret = dictappend(ret, ar) + cadd(('\t{"%s",-1,{{-1}},0,0,NULL,(void *)' + 'f2py_rout_#modulename#_%s_%s,' + 'doc_f2py_rout_#modulename#_%s_%s},') + % (b['name'], m['name'], b['name'], m['name'], b['name'])) + sargs.append(f"char *{b['name']}") + sargsp.append('char *') + iadd(f"\tf2py_{m['name']}_def[i_f2py++].data = {b['name']};") + cadd('\t{NULL}\n};\n') + iadd('}') + ihooks[0] = 'static void f2py_setup_%s(%s) {\n\tint i_f2py=0;%s' % ( + m['name'], ','.join(sargs), ihooks[0]) + if '_' in m['name']: + F_FUNC = 'F_FUNC_US' + else: + F_FUNC = 'F_FUNC' + iadd('extern void %s(f2pyinit%s,F2PYINIT%s)(void (*)(%s));' + % (F_FUNC, m['name'], m['name'].upper(), ','.join(sargsp))) + iadd('static void f2py_init_%s(void) {' % (m['name'])) + iadd('\t%s(f2pyinit%s,F2PYINIT%s)(f2py_setup_%s);' + % (F_FUNC, m['name'], m['name'].upper(), m['name'])) + iadd('}\n') + ret['f90modhooks'] = ret['f90modhooks'] + chooks + ihooks + ret['initf90modhooks'] = ['\tPyDict_SetItemString(d, "%s", PyFortranObject_New(f2py_%s_def,f2py_init_%s));' % ( + m['name'], m['name'], m['name'])] + ret['initf90modhooks'] + fadd('') + fadd(f"subroutine f2pyinit{m['name']}(f2pysetupfunc)") + if mfargs: + for a in undo_rmbadname(mfargs): + fadd(f"use {m['name']}, only : {a}") + if ifargs: + fadd(' '.join(['interface'] + ifargs)) + fadd('end interface') + fadd('external f2pysetupfunc') + if efargs: + for a in undo_rmbadname(efargs): + fadd(f'external {a}') + fadd(f"call f2pysetupfunc({','.join(undo_rmbadname(fargs))})") + fadd(f"end subroutine f2pyinit{m['name']}\n") + + dadd('\n'.join(ret['latexdoc']).replace( + r'\subsection{', r'\subsubsection{')) + + ret['latexdoc'] = [] + ret['docs'].append(f"\"\t{m['name']} --- {','.join(undo_rmbadname(modobjs))}\"") + + ret['routine_defs'] = '' + ret['doc'] = [] + ret['docshort'] = [] + ret['latexdoc'] = doc[0] + if len(ret['docs']) <= 1: + ret['docs'] = '' + return ret, fhooks[0] diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/f90mod_rules.pyi b/.venv/lib/python3.12/site-packages/numpy/f2py/f90mod_rules.pyi new file mode 100644 index 0000000..4df004e --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/f90mod_rules.pyi @@ -0,0 +1,16 @@ +from collections.abc import Mapping +from typing import Any, Final + +from .auxfuncs import isintent_dict as isintent_dict + +__version__: Final[str] = ... +f2py_version: Final = "See `f2py -v`" + +options: Final[dict[str, bool]] + +fgetdims1: Final[str] = ... +fgetdims2: Final[str] = ... +fgetdims2_sa: Final[str] = ... + +def findf90modules(m: Mapping[str, object]) -> list[dict[str, Any]]: ... +def buildhooks(pymod: Mapping[str, object]) -> dict[str, Any]: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/func2subr.py b/.venv/lib/python3.12/site-packages/numpy/f2py/func2subr.py new file mode 100644 index 0000000..0a87500 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/func2subr.py @@ -0,0 +1,329 @@ +""" + +Rules for building C/API module with f2py2e. + +Copyright 1999 -- 2011 Pearu Peterson all rights reserved. +Copyright 2011 -- present NumPy Developers. +Permission to use, modify, and distribute this software is given under the +terms of the NumPy License. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +""" +import copy + +from ._isocbind import isoc_kindmap +from .auxfuncs import ( + getfortranname, + isexternal, + isfunction, + isfunction_wrap, + isintent_in, + isintent_out, + islogicalfunction, + ismoduleroutine, + isscalar, + issubroutine, + issubroutine_wrap, + outmess, + show, +) + + +def var2fixfortran(vars, a, fa=None, f90mode=None): + if fa is None: + fa = a + if a not in vars: + show(vars) + outmess(f'var2fixfortran: No definition for argument "{a}".\n') + return '' + if 'typespec' not in vars[a]: + show(vars[a]) + outmess(f'var2fixfortran: No typespec for argument "{a}".\n') + return '' + vardef = vars[a]['typespec'] + if vardef == 'type' and 'typename' in vars[a]: + vardef = f"{vardef}({vars[a]['typename']})" + selector = {} + lk = '' + if 'kindselector' in vars[a]: + selector = vars[a]['kindselector'] + lk = 'kind' + elif 'charselector' in vars[a]: + selector = vars[a]['charselector'] + lk = 'len' + if '*' in selector: + if f90mode: + if selector['*'] in ['*', ':', '(*)']: + vardef = f'{vardef}(len=*)' + else: + vardef = f"{vardef}({lk}={selector['*']})" + elif selector['*'] in ['*', ':']: + vardef = f"{vardef}*({selector['*']})" + else: + vardef = f"{vardef}*{selector['*']}" + elif 'len' in selector: + vardef = f"{vardef}(len={selector['len']}" + if 'kind' in selector: + vardef = f"{vardef},kind={selector['kind']})" + else: + vardef = f'{vardef})' + elif 'kind' in selector: + vardef = f"{vardef}(kind={selector['kind']})" + + vardef = f'{vardef} {fa}' + if 'dimension' in vars[a]: + vardef = f"{vardef}({','.join(vars[a]['dimension'])})" + return vardef + +def useiso_c_binding(rout): + useisoc = False + for key, value in rout['vars'].items(): + kind_value = value.get('kindselector', {}).get('kind') + if kind_value in isoc_kindmap: + return True + return useisoc + +def createfuncwrapper(rout, signature=0): + assert isfunction(rout) + + extra_args = [] + vars = rout['vars'] + for a in rout['args']: + v = rout['vars'][a] + for i, d in enumerate(v.get('dimension', [])): + if d == ':': + dn = f'f2py_{a}_d{i}' + dv = {'typespec': 'integer', 'intent': ['hide']} + dv['='] = f'shape({a}, {i})' + extra_args.append(dn) + vars[dn] = dv + v['dimension'][i] = dn + rout['args'].extend(extra_args) + need_interface = bool(extra_args) + + ret = [''] + + def add(line, ret=ret): + ret[0] = f'{ret[0]}\n {line}' + name = rout['name'] + fortranname = getfortranname(rout) + f90mode = ismoduleroutine(rout) + newname = f'{name}f2pywrap' + + if newname not in vars: + vars[newname] = vars[name] + args = [newname] + rout['args'][1:] + else: + args = [newname] + rout['args'] + + l_tmpl = var2fixfortran(vars, name, '@@@NAME@@@', f90mode) + if l_tmpl[:13] == 'character*(*)': + if f90mode: + l_tmpl = 'character(len=10)' + l_tmpl[13:] + else: + l_tmpl = 'character*10' + l_tmpl[13:] + charselect = vars[name]['charselector'] + if charselect.get('*', '') == '(*)': + charselect['*'] = '10' + + l1 = l_tmpl.replace('@@@NAME@@@', newname) + rl = None + + useisoc = useiso_c_binding(rout) + sargs = ', '.join(args) + if f90mode: + # gh-23598 fix warning + # Essentially, this gets called again with modules where the name of the + # function is added to the arguments, which is not required, and removed + sargs = sargs.replace(f"{name}, ", '') + args = [arg for arg in args if arg != name] + rout['args'] = args + add(f"subroutine f2pywrap_{rout['modulename']}_{name} ({sargs})") + if not signature: + add(f"use {rout['modulename']}, only : {fortranname}") + if useisoc: + add('use iso_c_binding') + else: + add(f'subroutine f2pywrap{name} ({sargs})') + if useisoc: + add('use iso_c_binding') + if not need_interface: + add(f'external {fortranname}') + rl = l_tmpl.replace('@@@NAME@@@', '') + ' ' + fortranname + + if need_interface: + for line in rout['saved_interface'].split('\n'): + if line.lstrip().startswith('use ') and '__user__' not in line: + add(line) + + args = args[1:] + dumped_args = [] + for a in args: + if isexternal(vars[a]): + add(f'external {a}') + dumped_args.append(a) + for a in args: + if a in dumped_args: + continue + if isscalar(vars[a]): + add(var2fixfortran(vars, a, f90mode=f90mode)) + dumped_args.append(a) + for a in args: + if a in dumped_args: + continue + if isintent_in(vars[a]): + add(var2fixfortran(vars, a, f90mode=f90mode)) + dumped_args.append(a) + for a in args: + if a in dumped_args: + continue + add(var2fixfortran(vars, a, f90mode=f90mode)) + + add(l1) + if rl is not None: + add(rl) + + if need_interface: + if f90mode: + # f90 module already defines needed interface + pass + else: + add('interface') + add(rout['saved_interface'].lstrip()) + add('end interface') + + sargs = ', '.join([a for a in args if a not in extra_args]) + + if not signature: + if islogicalfunction(rout): + add(f'{newname} = .not.(.not.{fortranname}({sargs}))') + else: + add(f'{newname} = {fortranname}({sargs})') + if f90mode: + add(f"end subroutine f2pywrap_{rout['modulename']}_{name}") + else: + add('end') + return ret[0] + + +def createsubrwrapper(rout, signature=0): + assert issubroutine(rout) + + extra_args = [] + vars = rout['vars'] + for a in rout['args']: + v = rout['vars'][a] + for i, d in enumerate(v.get('dimension', [])): + if d == ':': + dn = f'f2py_{a}_d{i}' + dv = {'typespec': 'integer', 'intent': ['hide']} + dv['='] = f'shape({a}, {i})' + extra_args.append(dn) + vars[dn] = dv + v['dimension'][i] = dn + rout['args'].extend(extra_args) + need_interface = bool(extra_args) + + ret = [''] + + def add(line, ret=ret): + ret[0] = f'{ret[0]}\n {line}' + name = rout['name'] + fortranname = getfortranname(rout) + f90mode = ismoduleroutine(rout) + + args = rout['args'] + + useisoc = useiso_c_binding(rout) + sargs = ', '.join(args) + if f90mode: + add(f"subroutine f2pywrap_{rout['modulename']}_{name} ({sargs})") + if useisoc: + add('use iso_c_binding') + if not signature: + add(f"use {rout['modulename']}, only : {fortranname}") + else: + add(f'subroutine f2pywrap{name} ({sargs})') + if useisoc: + add('use iso_c_binding') + if not need_interface: + add(f'external {fortranname}') + + if need_interface: + for line in rout['saved_interface'].split('\n'): + if line.lstrip().startswith('use ') and '__user__' not in line: + add(line) + + dumped_args = [] + for a in args: + if isexternal(vars[a]): + add(f'external {a}') + dumped_args.append(a) + for a in args: + if a in dumped_args: + continue + if isscalar(vars[a]): + add(var2fixfortran(vars, a, f90mode=f90mode)) + dumped_args.append(a) + for a in args: + if a in dumped_args: + continue + add(var2fixfortran(vars, a, f90mode=f90mode)) + + if need_interface: + if f90mode: + # f90 module already defines needed interface + pass + else: + add('interface') + for line in rout['saved_interface'].split('\n'): + if line.lstrip().startswith('use ') and '__user__' in line: + continue + add(line) + add('end interface') + + sargs = ', '.join([a for a in args if a not in extra_args]) + + if not signature: + add(f'call {fortranname}({sargs})') + if f90mode: + add(f"end subroutine f2pywrap_{rout['modulename']}_{name}") + else: + add('end') + return ret[0] + + +def assubr(rout): + if isfunction_wrap(rout): + fortranname = getfortranname(rout) + name = rout['name'] + outmess('\t\tCreating wrapper for Fortran function "%s"("%s")...\n' % ( + name, fortranname)) + rout = copy.copy(rout) + fname = name + rname = fname + if 'result' in rout: + rname = rout['result'] + rout['vars'][fname] = rout['vars'][rname] + fvar = rout['vars'][fname] + if not isintent_out(fvar): + if 'intent' not in fvar: + fvar['intent'] = [] + fvar['intent'].append('out') + flag = 1 + for i in fvar['intent']: + if i.startswith('out='): + flag = 0 + break + if flag: + fvar['intent'].append(f'out={rname}') + rout['args'][:] = [fname] + rout['args'] + return rout, createfuncwrapper(rout) + if issubroutine_wrap(rout): + fortranname = getfortranname(rout) + name = rout['name'] + outmess('\t\tCreating wrapper for Fortran subroutine "%s"("%s")...\n' + % (name, fortranname)) + rout = copy.copy(rout) + return rout, createsubrwrapper(rout) + return rout, '' diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/func2subr.pyi b/.venv/lib/python3.12/site-packages/numpy/f2py/func2subr.pyi new file mode 100644 index 0000000..8d2b3db --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/func2subr.pyi @@ -0,0 +1,7 @@ +from .auxfuncs import _Bool, _ROut, _Var + +def var2fixfortran(vars: _Var, a: str, fa: str | None = None, f90mode: _Bool | None = None) -> str: ... +def useiso_c_binding(rout: _ROut) -> bool: ... +def createfuncwrapper(rout: _ROut, signature: int = 0) -> str: ... +def createsubrwrapper(rout: _ROut, signature: int = 0) -> str: ... +def assubr(rout: _ROut) -> tuple[dict[str, str], str]: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/rules.py b/.venv/lib/python3.12/site-packages/numpy/f2py/rules.py new file mode 100644 index 0000000..667ef28 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/rules.py @@ -0,0 +1,1629 @@ +""" + +Rules for building C/API module with f2py2e. + +Here is a skeleton of a new wrapper function (13Dec2001): + +wrapper_function(args) + declarations + get_python_arguments, say, `a' and `b' + + get_a_from_python + if (successful) { + + get_b_from_python + if (successful) { + + callfortran + if (successful) { + + put_a_to_python + if (successful) { + + put_b_to_python + if (successful) { + + buildvalue = ... + + } + + } + + } + + } + cleanup_b + + } + cleanup_a + + return buildvalue + +Copyright 1999 -- 2011 Pearu Peterson all rights reserved. +Copyright 2011 -- present NumPy Developers. +Permission to use, modify, and distribute this software is given under the +terms of the NumPy License. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +""" +import copy +import os +import sys +import time +from pathlib import Path + +# __version__.version is now the same as the NumPy version +from . import ( + __version__, + capi_maps, + cfuncs, + common_rules, + f90mod_rules, + func2subr, + use_rules, +) +from .auxfuncs import ( + applyrules, + debugcapi, + dictappend, + errmess, + gentitle, + getargs2, + hascallstatement, + hasexternals, + hasinitvalue, + hasnote, + hasresultnote, + isarray, + isarrayofstrings, + isattr_value, + ischaracter, + ischaracter_or_characterarray, + ischaracterarray, + iscomplex, + iscomplexarray, + iscomplexfunction, + iscomplexfunction_warn, + isdummyroutine, + isexternal, + isfunction, + isfunction_wrap, + isint1, + isint1array, + isintent_aux, + isintent_c, + isintent_callback, + isintent_copy, + isintent_hide, + isintent_inout, + isintent_nothide, + isintent_out, + isintent_overwrite, + islogical, + islong_complex, + islong_double, + islong_doublefunction, + islong_long, + islong_longfunction, + ismoduleroutine, + isoptional, + isrequired, + isscalar, + issigned_long_longarray, + isstring, + isstringarray, + isstringfunction, + issubroutine, + issubroutine_wrap, + isthreadsafe, + isunsigned, + isunsigned_char, + isunsigned_chararray, + isunsigned_long_long, + isunsigned_long_longarray, + isunsigned_short, + isunsigned_shortarray, + l_and, + l_not, + l_or, + outmess, + replace, + requiresf90wrapper, + stripcomma, +) + +f2py_version = __version__.version +numpy_version = __version__.version + +options = {} +sepdict = {} +# for k in ['need_cfuncs']: sepdict[k]=',' +for k in ['decl', + 'frompyobj', + 'cleanupfrompyobj', + 'topyarr', 'method', + 'pyobjfrom', 'closepyobjfrom', + 'freemem', + 'userincludes', + 'includes0', 'includes', 'typedefs', 'typedefs_generated', + 'cppmacros', 'cfuncs', 'callbacks', + 'latexdoc', + 'restdoc', + 'routine_defs', 'externroutines', + 'initf2pywraphooks', + 'commonhooks', 'initcommonhooks', + 'f90modhooks', 'initf90modhooks']: + sepdict[k] = '\n' + +#################### Rules for C/API module ################# + +generationtime = int(os.environ.get('SOURCE_DATE_EPOCH', time.time())) +module_rules = { + 'modulebody': """\ +/* File: #modulename#module.c + * This file is auto-generated with f2py (version:#f2py_version#). + * f2py is a Fortran to Python Interface Generator (FPIG), Second Edition, + * written by Pearu Peterson . + * Generation date: """ + time.asctime(time.gmtime(generationtime)) + """ + * Do not edit this file directly unless you know what you are doing!!! + */ + +#ifdef __cplusplus +extern \"C\" { +#endif + +#ifndef PY_SSIZE_T_CLEAN +#define PY_SSIZE_T_CLEAN +#endif /* PY_SSIZE_T_CLEAN */ + +/* Unconditionally included */ +#include +#include + +""" + gentitle("See f2py2e/cfuncs.py: includes") + """ +#includes# +#includes0# + +""" + gentitle("See f2py2e/rules.py: mod_rules['modulebody']") + """ +static PyObject *#modulename#_error; +static PyObject *#modulename#_module; + +""" + gentitle("See f2py2e/cfuncs.py: typedefs") + """ +#typedefs# + +""" + gentitle("See f2py2e/cfuncs.py: typedefs_generated") + """ +#typedefs_generated# + +""" + gentitle("See f2py2e/cfuncs.py: cppmacros") + """ +#cppmacros# + +""" + gentitle("See f2py2e/cfuncs.py: cfuncs") + """ +#cfuncs# + +""" + gentitle("See f2py2e/cfuncs.py: userincludes") + """ +#userincludes# + +""" + gentitle("See f2py2e/capi_rules.py: usercode") + """ +#usercode# + +/* See f2py2e/rules.py */ +#externroutines# + +""" + gentitle("See f2py2e/capi_rules.py: usercode1") + """ +#usercode1# + +""" + gentitle("See f2py2e/cb_rules.py: buildcallback") + """ +#callbacks# + +""" + gentitle("See f2py2e/rules.py: buildapi") + """ +#body# + +""" + gentitle("See f2py2e/f90mod_rules.py: buildhooks") + """ +#f90modhooks# + +""" + gentitle("See f2py2e/rules.py: module_rules['modulebody']") + """ + +""" + gentitle("See f2py2e/common_rules.py: buildhooks") + """ +#commonhooks# + +""" + gentitle("See f2py2e/rules.py") + """ + +static FortranDataDef f2py_routine_defs[] = { +#routine_defs# + {NULL} +}; + +static PyMethodDef f2py_module_methods[] = { +#pymethoddef# + {NULL,NULL} +}; + +static struct PyModuleDef moduledef = { + PyModuleDef_HEAD_INIT, + "#modulename#", + NULL, + -1, + f2py_module_methods, + NULL, + NULL, + NULL, + NULL +}; + +PyMODINIT_FUNC PyInit_#modulename#(void) { + int i; + PyObject *m,*d, *s, *tmp; + m = #modulename#_module = PyModule_Create(&moduledef); + Py_SET_TYPE(&PyFortran_Type, &PyType_Type); + import_array(); + if (PyErr_Occurred()) + {PyErr_SetString(PyExc_ImportError, \"can't initialize module #modulename# (failed to import numpy)\"); return m;} + d = PyModule_GetDict(m); + s = PyUnicode_FromString(\"#f2py_version#\"); + PyDict_SetItemString(d, \"__version__\", s); + Py_DECREF(s); + s = PyUnicode_FromString( + \"This module '#modulename#' is auto-generated with f2py (version:#f2py_version#).\\nFunctions:\\n\"\n#docs#\".\"); + PyDict_SetItemString(d, \"__doc__\", s); + Py_DECREF(s); + s = PyUnicode_FromString(\"""" + numpy_version + """\"); + PyDict_SetItemString(d, \"__f2py_numpy_version__\", s); + Py_DECREF(s); + #modulename#_error = PyErr_NewException (\"#modulename#.error\", NULL, NULL); + /* + * Store the error object inside the dict, so that it could get deallocated. + * (in practice, this is a module, so it likely will not and cannot.) + */ + PyDict_SetItemString(d, \"_#modulename#_error\", #modulename#_error); + Py_DECREF(#modulename#_error); + for(i=0;f2py_routine_defs[i].name!=NULL;i++) { + tmp = PyFortranObject_NewAsAttr(&f2py_routine_defs[i]); + PyDict_SetItemString(d, f2py_routine_defs[i].name, tmp); + Py_DECREF(tmp); + } +#initf2pywraphooks# +#initf90modhooks# +#initcommonhooks# +#interface_usercode# + +#if Py_GIL_DISABLED + // signal whether this module supports running with the GIL disabled + PyUnstable_Module_SetGIL(m , #gil_used#); +#endif + +#ifdef F2PY_REPORT_ATEXIT + if (! PyErr_Occurred()) + on_exit(f2py_report_on_exit,(void*)\"#modulename#\"); +#endif + + if (PyType_Ready(&PyFortran_Type) < 0) { + return NULL; + } + + return m; +} +#ifdef __cplusplus +} +#endif +""", + 'separatorsfor': {'latexdoc': '\n\n', + 'restdoc': '\n\n'}, + 'latexdoc': ['\\section{Module \\texttt{#texmodulename#}}\n', + '#modnote#\n', + '#latexdoc#'], + 'restdoc': ['Module #modulename#\n' + '=' * 80, + '\n#restdoc#'] +} + +defmod_rules = [ + {'body': '/*eof body*/', + 'method': '/*eof method*/', + 'externroutines': '/*eof externroutines*/', + 'routine_defs': '/*eof routine_defs*/', + 'initf90modhooks': '/*eof initf90modhooks*/', + 'initf2pywraphooks': '/*eof initf2pywraphooks*/', + 'initcommonhooks': '/*eof initcommonhooks*/', + 'latexdoc': '', + 'restdoc': '', + 'modnote': {hasnote: '#note#', l_not(hasnote): ''}, + } +] + +routine_rules = { + 'separatorsfor': sepdict, + 'body': """ +#begintitle# +static char doc_#apiname#[] = \"\\\n#docreturn##name#(#docsignatureshort#)\\n\\nWrapper for ``#name#``.\\\n\\n#docstrsigns#\"; +/* #declfortranroutine# */ +static PyObject *#apiname#(const PyObject *capi_self, + PyObject *capi_args, + PyObject *capi_keywds, + #functype# (*f2py_func)(#callprotoargument#)) { + PyObject * volatile capi_buildvalue = NULL; + volatile int f2py_success = 1; +#decl# + static char *capi_kwlist[] = {#kwlist##kwlistopt##kwlistxa#NULL}; +#usercode# +#routdebugenter# +#ifdef F2PY_REPORT_ATEXIT +f2py_start_clock(); +#endif + if (!PyArg_ParseTupleAndKeywords(capi_args,capi_keywds,\\ + \"#argformat#|#keyformat##xaformat#:#pyname#\",\\ + capi_kwlist#args_capi##keys_capi##keys_xa#))\n return NULL; +#frompyobj# +/*end of frompyobj*/ +#ifdef F2PY_REPORT_ATEXIT +f2py_start_call_clock(); +#endif +#callfortranroutine# +if (PyErr_Occurred()) + f2py_success = 0; +#ifdef F2PY_REPORT_ATEXIT +f2py_stop_call_clock(); +#endif +/*end of callfortranroutine*/ + if (f2py_success) { +#pyobjfrom# +/*end of pyobjfrom*/ + CFUNCSMESS(\"Building return value.\\n\"); + capi_buildvalue = Py_BuildValue(\"#returnformat#\"#return#); +/*closepyobjfrom*/ +#closepyobjfrom# + } /*if (f2py_success) after callfortranroutine*/ +/*cleanupfrompyobj*/ +#cleanupfrompyobj# + if (capi_buildvalue == NULL) { +#routdebugfailure# + } else { +#routdebugleave# + } + CFUNCSMESS(\"Freeing memory.\\n\"); +#freemem# +#ifdef F2PY_REPORT_ATEXIT +f2py_stop_clock(); +#endif + return capi_buildvalue; +} +#endtitle# +""", + 'routine_defs': '#routine_def#', + 'initf2pywraphooks': '#initf2pywraphook#', + 'externroutines': '#declfortranroutine#', + 'doc': '#docreturn##name#(#docsignature#)', + 'docshort': '#docreturn##name#(#docsignatureshort#)', + 'docs': '" #docreturn##name#(#docsignature#)\\n"\n', + 'need': ['arrayobject.h', 'CFUNCSMESS', 'MINMAX'], + 'cppmacros': {debugcapi: '#define DEBUGCFUNCS'}, + 'latexdoc': ['\\subsection{Wrapper function \\texttt{#texname#}}\n', + """ +\\noindent{{}\\verb@#docreturn##name#@{}}\\texttt{(#latexdocsignatureshort#)} +#routnote# + +#latexdocstrsigns# +"""], + 'restdoc': ['Wrapped function ``#name#``\n' + '-' * 80, + + ] +} + +################## Rules for C/API function ############## + +rout_rules = [ + { # Init + 'separatorsfor': {'callfortranroutine': '\n', 'routdebugenter': '\n', 'decl': '\n', + 'routdebugleave': '\n', 'routdebugfailure': '\n', + 'setjmpbuf': ' || ', + 'docstrreq': '\n', 'docstropt': '\n', 'docstrout': '\n', + 'docstrcbs': '\n', 'docstrsigns': '\\n"\n"', + 'latexdocstrsigns': '\n', + 'latexdocstrreq': '\n', 'latexdocstropt': '\n', + 'latexdocstrout': '\n', 'latexdocstrcbs': '\n', + }, + 'kwlist': '', 'kwlistopt': '', 'callfortran': '', 'callfortranappend': '', + 'docsign': '', 'docsignopt': '', 'decl': '/*decl*/', + 'freemem': '/*freemem*/', + 'docsignshort': '', 'docsignoptshort': '', + 'docstrsigns': '', 'latexdocstrsigns': '', + 'docstrreq': '\\nParameters\\n----------', + 'docstropt': '\\nOther Parameters\\n----------------', + 'docstrout': '\\nReturns\\n-------', + 'docstrcbs': '\\nNotes\\n-----\\nCall-back functions::\\n', + 'latexdocstrreq': '\\noindent Required arguments:', + 'latexdocstropt': '\\noindent Optional arguments:', + 'latexdocstrout': '\\noindent Return objects:', + 'latexdocstrcbs': '\\noindent Call-back functions:', + 'args_capi': '', 'keys_capi': '', 'functype': '', + 'frompyobj': '/*frompyobj*/', + # this list will be reversed + 'cleanupfrompyobj': ['/*end of cleanupfrompyobj*/'], + 'pyobjfrom': '/*pyobjfrom*/', + # this list will be reversed + 'closepyobjfrom': ['/*end of closepyobjfrom*/'], + 'topyarr': '/*topyarr*/', 'routdebugleave': '/*routdebugleave*/', + 'routdebugenter': '/*routdebugenter*/', + 'routdebugfailure': '/*routdebugfailure*/', + 'callfortranroutine': '/*callfortranroutine*/', + 'argformat': '', 'keyformat': '', 'need_cfuncs': '', + 'docreturn': '', 'return': '', 'returnformat': '', 'rformat': '', + 'kwlistxa': '', 'keys_xa': '', 'xaformat': '', 'docsignxa': '', 'docsignxashort': '', + 'initf2pywraphook': '', + 'routnote': {hasnote: '--- #note#', l_not(hasnote): ''}, + }, { + 'apiname': 'f2py_rout_#modulename#_#name#', + 'pyname': '#modulename#.#name#', + 'decl': '', + '_check': l_not(ismoduleroutine) + }, { + 'apiname': 'f2py_rout_#modulename#_#f90modulename#_#name#', + 'pyname': '#modulename#.#f90modulename#.#name#', + 'decl': '', + '_check': ismoduleroutine + }, { # Subroutine + 'functype': 'void', + 'declfortranroutine': {l_and(l_not(l_or(ismoduleroutine, isintent_c)), l_not(isdummyroutine)): 'extern void #F_FUNC#(#fortranname#,#FORTRANNAME#)(#callprotoargument#);', + l_and(l_not(ismoduleroutine), isintent_c, l_not(isdummyroutine)): 'extern void #fortranname#(#callprotoargument#);', + ismoduleroutine: '', + isdummyroutine: '' + }, + 'routine_def': { + l_not(l_or(ismoduleroutine, isintent_c, isdummyroutine)): + ' {\"#name#\",-1,{{-1}},0,0,(char *)' + ' #F_FUNC#(#fortranname#,#FORTRANNAME#),' + ' (f2py_init_func)#apiname#,doc_#apiname#},', + l_and(l_not(ismoduleroutine), isintent_c, l_not(isdummyroutine)): + ' {\"#name#\",-1,{{-1}},0,0,(char *)#fortranname#,' + ' (f2py_init_func)#apiname#,doc_#apiname#},', + l_and(l_not(ismoduleroutine), isdummyroutine): + ' {\"#name#\",-1,{{-1}},0,0,NULL,' + ' (f2py_init_func)#apiname#,doc_#apiname#},', + }, + 'need': {l_and(l_not(l_or(ismoduleroutine, isintent_c)), l_not(isdummyroutine)): 'F_FUNC'}, + 'callfortranroutine': [ + {debugcapi: [ + """ fprintf(stderr,\"debug-capi:Fortran subroutine `#fortranname#(#callfortran#)\'\\n\");"""]}, + {hasexternals: """\ + if (#setjmpbuf#) { + f2py_success = 0; + } else {"""}, + {isthreadsafe: ' Py_BEGIN_ALLOW_THREADS'}, + {hascallstatement: ''' #callstatement#; + /*(*f2py_func)(#callfortran#);*/'''}, + {l_not(l_or(hascallstatement, isdummyroutine)) + : ' (*f2py_func)(#callfortran#);'}, + {isthreadsafe: ' Py_END_ALLOW_THREADS'}, + {hasexternals: """ }"""} + ], + '_check': l_and(issubroutine, l_not(issubroutine_wrap)), + }, { # Wrapped function + 'functype': 'void', + 'declfortranroutine': {l_not(l_or(ismoduleroutine, isdummyroutine)): 'extern void #F_WRAPPEDFUNC#(#name_lower#,#NAME#)(#callprotoargument#);', + isdummyroutine: '', + }, + + 'routine_def': { + l_not(l_or(ismoduleroutine, isdummyroutine)): + ' {\"#name#\",-1,{{-1}},0,0,(char *)' + ' #F_WRAPPEDFUNC#(#name_lower#,#NAME#),' + ' (f2py_init_func)#apiname#,doc_#apiname#},', + isdummyroutine: + ' {\"#name#\",-1,{{-1}},0,0,NULL,' + ' (f2py_init_func)#apiname#,doc_#apiname#},', + }, + 'initf2pywraphook': {l_not(l_or(ismoduleroutine, isdummyroutine)): ''' + { + extern #ctype# #F_FUNC#(#name_lower#,#NAME#)(void); + PyObject* o = PyDict_GetItemString(d,"#name#"); + tmp = F2PyCapsule_FromVoidPtr((void*)#F_WRAPPEDFUNC#(#name_lower#,#NAME#),NULL); + PyObject_SetAttrString(o,"_cpointer", tmp); + Py_DECREF(tmp); + s = PyUnicode_FromString("#name#"); + PyObject_SetAttrString(o,"__name__", s); + Py_DECREF(s); + } + '''}, + 'need': {l_not(l_or(ismoduleroutine, isdummyroutine)): ['F_WRAPPEDFUNC', 'F_FUNC']}, + 'callfortranroutine': [ + {debugcapi: [ + """ fprintf(stderr,\"debug-capi:Fortran subroutine `f2pywrap#name_lower#(#callfortran#)\'\\n\");"""]}, + {hasexternals: """\ + if (#setjmpbuf#) { + f2py_success = 0; + } else {"""}, + {isthreadsafe: ' Py_BEGIN_ALLOW_THREADS'}, + {l_not(l_or(hascallstatement, isdummyroutine)) + : ' (*f2py_func)(#callfortran#);'}, + {hascallstatement: + ' #callstatement#;\n /*(*f2py_func)(#callfortran#);*/'}, + {isthreadsafe: ' Py_END_ALLOW_THREADS'}, + {hasexternals: ' }'} + ], + '_check': isfunction_wrap, + }, { # Wrapped subroutine + 'functype': 'void', + 'declfortranroutine': {l_not(l_or(ismoduleroutine, isdummyroutine)): 'extern void #F_WRAPPEDFUNC#(#name_lower#,#NAME#)(#callprotoargument#);', + isdummyroutine: '', + }, + + 'routine_def': { + l_not(l_or(ismoduleroutine, isdummyroutine)): + ' {\"#name#\",-1,{{-1}},0,0,(char *)' + ' #F_WRAPPEDFUNC#(#name_lower#,#NAME#),' + ' (f2py_init_func)#apiname#,doc_#apiname#},', + isdummyroutine: + ' {\"#name#\",-1,{{-1}},0,0,NULL,' + ' (f2py_init_func)#apiname#,doc_#apiname#},', + }, + 'initf2pywraphook': {l_not(l_or(ismoduleroutine, isdummyroutine)): ''' + { + extern void #F_FUNC#(#name_lower#,#NAME#)(void); + PyObject* o = PyDict_GetItemString(d,"#name#"); + tmp = F2PyCapsule_FromVoidPtr((void*)#F_FUNC#(#name_lower#,#NAME#),NULL); + PyObject_SetAttrString(o,"_cpointer", tmp); + Py_DECREF(tmp); + s = PyUnicode_FromString("#name#"); + PyObject_SetAttrString(o,"__name__", s); + Py_DECREF(s); + } + '''}, + 'need': {l_not(l_or(ismoduleroutine, isdummyroutine)): ['F_WRAPPEDFUNC', 'F_FUNC']}, + 'callfortranroutine': [ + {debugcapi: [ + """ fprintf(stderr,\"debug-capi:Fortran subroutine `f2pywrap#name_lower#(#callfortran#)\'\\n\");"""]}, + {hasexternals: """\ + if (#setjmpbuf#) { + f2py_success = 0; + } else {"""}, + {isthreadsafe: ' Py_BEGIN_ALLOW_THREADS'}, + {l_not(l_or(hascallstatement, isdummyroutine)) + : ' (*f2py_func)(#callfortran#);'}, + {hascallstatement: + ' #callstatement#;\n /*(*f2py_func)(#callfortran#);*/'}, + {isthreadsafe: ' Py_END_ALLOW_THREADS'}, + {hasexternals: ' }'} + ], + '_check': issubroutine_wrap, + }, { # Function + 'functype': '#ctype#', + 'docreturn': {l_not(isintent_hide): '#rname#,'}, + 'docstrout': '#pydocsignout#', + 'latexdocstrout': ['\\item[]{{}\\verb@#pydocsignout#@{}}', + {hasresultnote: '--- #resultnote#'}], + 'callfortranroutine': [{l_and(debugcapi, isstringfunction): """\ +#ifdef USESCOMPAQFORTRAN + fprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callcompaqfortran#)\\n\"); +#else + fprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callfortran#)\\n\"); +#endif +"""}, + {l_and(debugcapi, l_not(isstringfunction)): """\ + fprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callfortran#)\\n\"); +"""} + ], + '_check': l_and(isfunction, l_not(isfunction_wrap)) + }, { # Scalar function + 'declfortranroutine': {l_and(l_not(l_or(ismoduleroutine, isintent_c)), l_not(isdummyroutine)): 'extern #ctype# #F_FUNC#(#fortranname#,#FORTRANNAME#)(#callprotoargument#);', + l_and(l_not(ismoduleroutine), isintent_c, l_not(isdummyroutine)): 'extern #ctype# #fortranname#(#callprotoargument#);', + isdummyroutine: '' + }, + 'routine_def': { + l_and(l_not(l_or(ismoduleroutine, isintent_c)), + l_not(isdummyroutine)): + (' {\"#name#\",-1,{{-1}},0,0,(char *)' + ' #F_FUNC#(#fortranname#,#FORTRANNAME#),' + ' (f2py_init_func)#apiname#,doc_#apiname#},'), + l_and(l_not(ismoduleroutine), isintent_c, l_not(isdummyroutine)): + (' {\"#name#\",-1,{{-1}},0,0,(char *)#fortranname#,' + ' (f2py_init_func)#apiname#,doc_#apiname#},'), + isdummyroutine: + ' {\"#name#\",-1,{{-1}},0,0,NULL,' + '(f2py_init_func)#apiname#,doc_#apiname#},', + }, + 'decl': [{iscomplexfunction_warn: ' #ctype# #name#_return_value={0,0};', + l_not(iscomplexfunction): ' #ctype# #name#_return_value=0;'}, + {iscomplexfunction: + ' PyObject *#name#_return_value_capi = Py_None;'} + ], + 'callfortranroutine': [ + {hasexternals: """\ + if (#setjmpbuf#) { + f2py_success = 0; + } else {"""}, + {isthreadsafe: ' Py_BEGIN_ALLOW_THREADS'}, + {hascallstatement: ''' #callstatement#; +/* #name#_return_value = (*f2py_func)(#callfortran#);*/ +'''}, + {l_not(l_or(hascallstatement, isdummyroutine)) + : ' #name#_return_value = (*f2py_func)(#callfortran#);'}, + {isthreadsafe: ' Py_END_ALLOW_THREADS'}, + {hasexternals: ' }'}, + {l_and(debugcapi, iscomplexfunction) + : ' fprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value.r,#name#_return_value.i);'}, + {l_and(debugcapi, l_not(iscomplexfunction)): ' fprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value);'}], + 'pyobjfrom': {iscomplexfunction: ' #name#_return_value_capi = pyobj_from_#ctype#1(#name#_return_value);'}, + 'need': [{l_not(isdummyroutine): 'F_FUNC'}, + {iscomplexfunction: 'pyobj_from_#ctype#1'}, + {islong_longfunction: 'long_long'}, + {islong_doublefunction: 'long_double'}], + 'returnformat': {l_not(isintent_hide): '#rformat#'}, + 'return': {iscomplexfunction: ',#name#_return_value_capi', + l_not(l_or(iscomplexfunction, isintent_hide)): ',#name#_return_value'}, + '_check': l_and(isfunction, l_not(isstringfunction), l_not(isfunction_wrap)) + }, { # String function # in use for --no-wrap + 'declfortranroutine': 'extern void #F_FUNC#(#fortranname#,#FORTRANNAME#)(#callprotoargument#);', + 'routine_def': {l_not(l_or(ismoduleroutine, isintent_c)): + ' {\"#name#\",-1,{{-1}},0,0,(char *)#F_FUNC#(#fortranname#,#FORTRANNAME#),(f2py_init_func)#apiname#,doc_#apiname#},', + l_and(l_not(ismoduleroutine), isintent_c): + ' {\"#name#\",-1,{{-1}},0,0,(char *)#fortranname#,(f2py_init_func)#apiname#,doc_#apiname#},' + }, + 'decl': [' #ctype# #name#_return_value = NULL;', + ' int #name#_return_value_len = 0;'], + 'callfortran': '#name#_return_value,#name#_return_value_len,', + 'callfortranroutine': [' #name#_return_value_len = #rlength#;', + ' if ((#name#_return_value = (string)malloc(#name#_return_value_len+1) == NULL) {', + ' PyErr_SetString(PyExc_MemoryError, \"out of memory\");', + ' f2py_success = 0;', + ' } else {', + " (#name#_return_value)[#name#_return_value_len] = '\\0';", + ' }', + ' if (f2py_success) {', + {hasexternals: """\ + if (#setjmpbuf#) { + f2py_success = 0; + } else {"""}, + {isthreadsafe: ' Py_BEGIN_ALLOW_THREADS'}, + """\ +#ifdef USESCOMPAQFORTRAN + (*f2py_func)(#callcompaqfortran#); +#else + (*f2py_func)(#callfortran#); +#endif +""", + {isthreadsafe: ' Py_END_ALLOW_THREADS'}, + {hasexternals: ' }'}, + {debugcapi: + ' fprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value_len,#name#_return_value);'}, + ' } /* if (f2py_success) after (string)malloc */', + ], + 'returnformat': '#rformat#', + 'return': ',#name#_return_value', + 'freemem': ' STRINGFREE(#name#_return_value);', + 'need': ['F_FUNC', '#ctype#', 'STRINGFREE'], + '_check': l_and(isstringfunction, l_not(isfunction_wrap)) # ???obsolete + }, + { # Debugging + 'routdebugenter': ' fprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#(#docsignature#)\\n");', + 'routdebugleave': ' fprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#: successful.\\n");', + 'routdebugfailure': ' fprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#: failure.\\n");', + '_check': debugcapi + } +] + +################ Rules for arguments ################## + +typedef_need_dict = {islong_long: 'long_long', + islong_double: 'long_double', + islong_complex: 'complex_long_double', + isunsigned_char: 'unsigned_char', + isunsigned_short: 'unsigned_short', + isunsigned: 'unsigned', + isunsigned_long_long: 'unsigned_long_long', + isunsigned_chararray: 'unsigned_char', + isunsigned_shortarray: 'unsigned_short', + isunsigned_long_longarray: 'unsigned_long_long', + issigned_long_longarray: 'long_long', + isint1: 'signed_char', + ischaracter_or_characterarray: 'character', + } + +aux_rules = [ + { + 'separatorsfor': sepdict + }, + { # Common + 'frompyobj': [' /* Processing auxiliary variable #varname# */', + {debugcapi: ' fprintf(stderr,"#vardebuginfo#\\n");'}, ], + 'cleanupfrompyobj': ' /* End of cleaning variable #varname# */', + 'need': typedef_need_dict, + }, + # Scalars (not complex) + { # Common + 'decl': ' #ctype# #varname# = 0;', + 'need': {hasinitvalue: 'math.h'}, + 'frompyobj': {hasinitvalue: ' #varname# = #init#;'}, + '_check': l_and(isscalar, l_not(iscomplex)), + }, + { + 'return': ',#varname#', + 'docstrout': '#pydocsignout#', + 'docreturn': '#outvarname#,', + 'returnformat': '#varrformat#', + '_check': l_and(isscalar, l_not(iscomplex), isintent_out), + }, + # Complex scalars + { # Common + 'decl': ' #ctype# #varname#;', + 'frompyobj': {hasinitvalue: ' #varname#.r = #init.r#, #varname#.i = #init.i#;'}, + '_check': iscomplex + }, + # String + { # Common + 'decl': [' #ctype# #varname# = NULL;', + ' int slen(#varname#);', + ], + 'need': ['len..'], + '_check': isstring + }, + # Array + { # Common + 'decl': [' #ctype# *#varname# = NULL;', + ' npy_intp #varname#_Dims[#rank#] = {#rank*[-1]#};', + ' const int #varname#_Rank = #rank#;', + ], + 'need': ['len..', {hasinitvalue: 'forcomb'}, {hasinitvalue: 'CFUNCSMESS'}], + '_check': isarray + }, + # Scalararray + { # Common + '_check': l_and(isarray, l_not(iscomplexarray)) + }, { # Not hidden + '_check': l_and(isarray, l_not(iscomplexarray), isintent_nothide) + }, + # Integer*1 array + {'need': '#ctype#', + '_check': isint1array, + '_depend': '' + }, + # Integer*-1 array + {'need': '#ctype#', + '_check': l_or(isunsigned_chararray, isunsigned_char), + '_depend': '' + }, + # Integer*-2 array + {'need': '#ctype#', + '_check': isunsigned_shortarray, + '_depend': '' + }, + # Integer*-8 array + {'need': '#ctype#', + '_check': isunsigned_long_longarray, + '_depend': '' + }, + # Complexarray + {'need': '#ctype#', + '_check': iscomplexarray, + '_depend': '' + }, + # Stringarray + { + 'callfortranappend': {isarrayofstrings: 'flen(#varname#),'}, + 'need': 'string', + '_check': isstringarray + } +] + +arg_rules = [ + { + 'separatorsfor': sepdict + }, + { # Common + 'frompyobj': [' /* Processing variable #varname# */', + {debugcapi: ' fprintf(stderr,"#vardebuginfo#\\n");'}, ], + 'cleanupfrompyobj': ' /* End of cleaning variable #varname# */', + '_depend': '', + 'need': typedef_need_dict, + }, + # Doc signatures + { + 'docstropt': {l_and(isoptional, isintent_nothide): '#pydocsign#'}, + 'docstrreq': {l_and(isrequired, isintent_nothide): '#pydocsign#'}, + 'docstrout': {isintent_out: '#pydocsignout#'}, + 'latexdocstropt': {l_and(isoptional, isintent_nothide): ['\\item[]{{}\\verb@#pydocsign#@{}}', + {hasnote: '--- #note#'}]}, + 'latexdocstrreq': {l_and(isrequired, isintent_nothide): ['\\item[]{{}\\verb@#pydocsign#@{}}', + {hasnote: '--- #note#'}]}, + 'latexdocstrout': {isintent_out: ['\\item[]{{}\\verb@#pydocsignout#@{}}', + {l_and(hasnote, isintent_hide): '--- #note#', + l_and(hasnote, isintent_nothide): '--- See above.'}]}, + 'depend': '' + }, + # Required/Optional arguments + { + 'kwlist': '"#varname#",', + 'docsign': '#varname#,', + '_check': l_and(isintent_nothide, l_not(isoptional)) + }, + { + 'kwlistopt': '"#varname#",', + 'docsignopt': '#varname#=#showinit#,', + 'docsignoptshort': '#varname#,', + '_check': l_and(isintent_nothide, isoptional) + }, + # Docstring/BuildValue + { + 'docreturn': '#outvarname#,', + 'returnformat': '#varrformat#', + '_check': isintent_out + }, + # Externals (call-back functions) + { # Common + 'docsignxa': {isintent_nothide: '#varname#_extra_args=(),'}, + 'docsignxashort': {isintent_nothide: '#varname#_extra_args,'}, + 'docstropt': {isintent_nothide: '#varname#_extra_args : input tuple, optional\\n Default: ()'}, + 'docstrcbs': '#cbdocstr#', + 'latexdocstrcbs': '\\item[] #cblatexdocstr#', + 'latexdocstropt': {isintent_nothide: '\\item[]{{}\\verb@#varname#_extra_args := () input tuple@{}} --- Extra arguments for call-back function {{}\\verb@#varname#@{}}.'}, + 'decl': [' #cbname#_t #varname#_cb = { Py_None, NULL, 0 };', + ' #cbname#_t *#varname#_cb_ptr = &#varname#_cb;', + ' PyTupleObject *#varname#_xa_capi = NULL;', + {l_not(isintent_callback): + ' #cbname#_typedef #varname#_cptr;'} + ], + 'kwlistxa': {isintent_nothide: '"#varname#_extra_args",'}, + 'argformat': {isrequired: 'O'}, + 'keyformat': {isoptional: 'O'}, + 'xaformat': {isintent_nothide: 'O!'}, + 'args_capi': {isrequired: ',&#varname#_cb.capi'}, + 'keys_capi': {isoptional: ',&#varname#_cb.capi'}, + 'keys_xa': ',&PyTuple_Type,&#varname#_xa_capi', + 'setjmpbuf': '(setjmp(#varname#_cb.jmpbuf))', + 'callfortran': {l_not(isintent_callback): '#varname#_cptr,'}, + 'need': ['#cbname#', 'setjmp.h'], + '_check': isexternal + }, + { + 'frompyobj': [{l_not(isintent_callback): """\ +if(F2PyCapsule_Check(#varname#_cb.capi)) { + #varname#_cptr = F2PyCapsule_AsVoidPtr(#varname#_cb.capi); +} else { + #varname#_cptr = #cbname#; +} +"""}, {isintent_callback: """\ +if (#varname#_cb.capi==Py_None) { + #varname#_cb.capi = PyObject_GetAttrString(#modulename#_module,\"#varname#\"); + if (#varname#_cb.capi) { + if (#varname#_xa_capi==NULL) { + if (PyObject_HasAttrString(#modulename#_module,\"#varname#_extra_args\")) { + PyObject* capi_tmp = PyObject_GetAttrString(#modulename#_module,\"#varname#_extra_args\"); + if (capi_tmp) { + #varname#_xa_capi = (PyTupleObject *)PySequence_Tuple(capi_tmp); + Py_DECREF(capi_tmp); + } + else { + #varname#_xa_capi = (PyTupleObject *)Py_BuildValue(\"()\"); + } + if (#varname#_xa_capi==NULL) { + PyErr_SetString(#modulename#_error,\"Failed to convert #modulename#.#varname#_extra_args to tuple.\\n\"); + return NULL; + } + } + } + } + if (#varname#_cb.capi==NULL) { + PyErr_SetString(#modulename#_error,\"Callback #varname# not defined (as an argument or module #modulename# attribute).\\n\"); + return NULL; + } +} +"""}, + """\ + if (create_cb_arglist(#varname#_cb.capi,#varname#_xa_capi,#maxnofargs#,#nofoptargs#,&#varname#_cb.nofargs,&#varname#_cb.args_capi,\"failed in processing argument list for call-back #varname#.\")) { +""", + {debugcapi: ["""\ + fprintf(stderr,\"debug-capi:Assuming %d arguments; at most #maxnofargs#(-#nofoptargs#) is expected.\\n\",#varname#_cb.nofargs); + CFUNCSMESSPY(\"for #varname#=\",#varname#_cb.capi);""", + {l_not(isintent_callback): """ fprintf(stderr,\"#vardebugshowvalue# (call-back in C).\\n\",#cbname#);"""}]}, + """\ + CFUNCSMESS(\"Saving callback variables for `#varname#`.\\n\"); + #varname#_cb_ptr = swap_active_#cbname#(#varname#_cb_ptr);""", + ], + 'cleanupfrompyobj': + """\ + CFUNCSMESS(\"Restoring callback variables for `#varname#`.\\n\"); + #varname#_cb_ptr = swap_active_#cbname#(#varname#_cb_ptr); + Py_DECREF(#varname#_cb.args_capi); + }""", + 'need': ['SWAP', 'create_cb_arglist'], + '_check': isexternal, + '_depend': '' + }, + # Scalars (not complex) + { # Common + 'decl': ' #ctype# #varname# = 0;', + 'pyobjfrom': {debugcapi: ' fprintf(stderr,"#vardebugshowvalue#\\n",#varname#);'}, + 'callfortran': {l_or(isintent_c, isattr_value): '#varname#,', l_not(l_or(isintent_c, isattr_value)): '&#varname#,'}, + 'return': {isintent_out: ',#varname#'}, + '_check': l_and(isscalar, l_not(iscomplex)) + }, { + 'need': {hasinitvalue: 'math.h'}, + '_check': l_and(isscalar, l_not(iscomplex)), + }, { # Not hidden + 'decl': ' PyObject *#varname#_capi = Py_None;', + 'argformat': {isrequired: 'O'}, + 'keyformat': {isoptional: 'O'}, + 'args_capi': {isrequired: ',&#varname#_capi'}, + 'keys_capi': {isoptional: ',&#varname#_capi'}, + 'pyobjfrom': {isintent_inout: """\ + f2py_success = try_pyarr_from_#ctype#(#varname#_capi,&#varname#); + if (f2py_success) {"""}, + 'closepyobjfrom': {isintent_inout: " } /*if (f2py_success) of #varname# pyobjfrom*/"}, + 'need': {isintent_inout: 'try_pyarr_from_#ctype#'}, + '_check': l_and(isscalar, l_not(iscomplex), l_not(isstring), + isintent_nothide) + }, { + 'frompyobj': [ + # hasinitvalue... + # if pyobj is None: + # varname = init + # else + # from_pyobj(varname) + # + # isoptional and noinitvalue... + # if pyobj is not None: + # from_pyobj(varname) + # else: + # varname is uninitialized + # + # ... + # from_pyobj(varname) + # + {hasinitvalue: ' if (#varname#_capi == Py_None) #varname# = #init#; else', + '_depend': ''}, + {l_and(isoptional, l_not(hasinitvalue)): ' if (#varname#_capi != Py_None)', + '_depend': ''}, + {l_not(islogical): '''\ + f2py_success = #ctype#_from_pyobj(&#varname#,#varname#_capi,"#pyname#() #nth# (#varname#) can\'t be converted to #ctype#"); + if (f2py_success) {'''}, + {islogical: '''\ + #varname# = (#ctype#)PyObject_IsTrue(#varname#_capi); + f2py_success = 1; + if (f2py_success) {'''}, + ], + 'cleanupfrompyobj': ' } /*if (f2py_success) of #varname#*/', + 'need': {l_not(islogical): '#ctype#_from_pyobj'}, + '_check': l_and(isscalar, l_not(iscomplex), isintent_nothide), + '_depend': '' + }, { # Hidden + 'frompyobj': {hasinitvalue: ' #varname# = #init#;'}, + 'need': typedef_need_dict, + '_check': l_and(isscalar, l_not(iscomplex), isintent_hide), + '_depend': '' + }, { # Common + 'frompyobj': {debugcapi: ' fprintf(stderr,"#vardebugshowvalue#\\n",#varname#);'}, + '_check': l_and(isscalar, l_not(iscomplex)), + '_depend': '' + }, + # Complex scalars + { # Common + 'decl': ' #ctype# #varname#;', + 'callfortran': {isintent_c: '#varname#,', l_not(isintent_c): '&#varname#,'}, + 'pyobjfrom': {debugcapi: ' fprintf(stderr,"#vardebugshowvalue#\\n",#varname#.r,#varname#.i);'}, + 'return': {isintent_out: ',#varname#_capi'}, + '_check': iscomplex + }, { # Not hidden + 'decl': ' PyObject *#varname#_capi = Py_None;', + 'argformat': {isrequired: 'O'}, + 'keyformat': {isoptional: 'O'}, + 'args_capi': {isrequired: ',&#varname#_capi'}, + 'keys_capi': {isoptional: ',&#varname#_capi'}, + 'need': {isintent_inout: 'try_pyarr_from_#ctype#'}, + 'pyobjfrom': {isintent_inout: """\ + f2py_success = try_pyarr_from_#ctype#(#varname#_capi,&#varname#); + if (f2py_success) {"""}, + 'closepyobjfrom': {isintent_inout: " } /*if (f2py_success) of #varname# pyobjfrom*/"}, + '_check': l_and(iscomplex, isintent_nothide) + }, { + 'frompyobj': [{hasinitvalue: ' if (#varname#_capi==Py_None) {#varname#.r = #init.r#, #varname#.i = #init.i#;} else'}, + {l_and(isoptional, l_not(hasinitvalue)) + : ' if (#varname#_capi != Py_None)'}, + ' f2py_success = #ctype#_from_pyobj(&#varname#,#varname#_capi,"#pyname#() #nth# (#varname#) can\'t be converted to #ctype#");' + '\n if (f2py_success) {'], + 'cleanupfrompyobj': ' } /*if (f2py_success) of #varname# frompyobj*/', + 'need': ['#ctype#_from_pyobj'], + '_check': l_and(iscomplex, isintent_nothide), + '_depend': '' + }, { # Hidden + 'decl': {isintent_out: ' PyObject *#varname#_capi = Py_None;'}, + '_check': l_and(iscomplex, isintent_hide) + }, { + 'frompyobj': {hasinitvalue: ' #varname#.r = #init.r#, #varname#.i = #init.i#;'}, + '_check': l_and(iscomplex, isintent_hide), + '_depend': '' + }, { # Common + 'pyobjfrom': {isintent_out: ' #varname#_capi = pyobj_from_#ctype#1(#varname#);'}, + 'need': ['pyobj_from_#ctype#1'], + '_check': iscomplex + }, { + 'frompyobj': {debugcapi: ' fprintf(stderr,"#vardebugshowvalue#\\n",#varname#.r,#varname#.i);'}, + '_check': iscomplex, + '_depend': '' + }, + # String + { # Common + 'decl': [' #ctype# #varname# = NULL;', + ' int slen(#varname#);', + ' PyObject *#varname#_capi = Py_None;'], + 'callfortran': '#varname#,', + 'callfortranappend': 'slen(#varname#),', + 'pyobjfrom': [ + {debugcapi: + ' fprintf(stderr,' + '"#vardebugshowvalue#\\n",slen(#varname#),#varname#);'}, + # The trailing null value for Fortran is blank. + {l_and(isintent_out, l_not(isintent_c)): + " STRINGPADN(#varname#, slen(#varname#), ' ', '\\0');"}, + ], + 'return': {isintent_out: ',#varname#'}, + 'need': ['len..', + {l_and(isintent_out, l_not(isintent_c)): 'STRINGPADN'}], + '_check': isstring + }, { # Common + 'frompyobj': [ + """\ + slen(#varname#) = #elsize#; + f2py_success = #ctype#_from_pyobj(&#varname#,&slen(#varname#),#init#,""" +"""#varname#_capi,\"#ctype#_from_pyobj failed in converting #nth#""" +"""`#varname#\' of #pyname# to C #ctype#\"); + if (f2py_success) {""", + # The trailing null value for Fortran is blank. + {l_not(isintent_c): + " STRINGPADN(#varname#, slen(#varname#), '\\0', ' ');"}, + ], + 'cleanupfrompyobj': """\ + STRINGFREE(#varname#); + } /*if (f2py_success) of #varname#*/""", + 'need': ['#ctype#_from_pyobj', 'len..', 'STRINGFREE', + {l_not(isintent_c): 'STRINGPADN'}], + '_check': isstring, + '_depend': '' + }, { # Not hidden + 'argformat': {isrequired: 'O'}, + 'keyformat': {isoptional: 'O'}, + 'args_capi': {isrequired: ',&#varname#_capi'}, + 'keys_capi': {isoptional: ',&#varname#_capi'}, + 'pyobjfrom': [ + {l_and(isintent_inout, l_not(isintent_c)): + " STRINGPADN(#varname#, slen(#varname#), ' ', '\\0');"}, + {isintent_inout: '''\ + f2py_success = try_pyarr_from_#ctype#(#varname#_capi, #varname#, + slen(#varname#)); + if (f2py_success) {'''}], + 'closepyobjfrom': {isintent_inout: ' } /*if (f2py_success) of #varname# pyobjfrom*/'}, + 'need': {isintent_inout: 'try_pyarr_from_#ctype#', + l_and(isintent_inout, l_not(isintent_c)): 'STRINGPADN'}, + '_check': l_and(isstring, isintent_nothide) + }, { # Hidden + '_check': l_and(isstring, isintent_hide) + }, { + 'frompyobj': {debugcapi: ' fprintf(stderr,"#vardebugshowvalue#\\n",slen(#varname#),#varname#);'}, + '_check': isstring, + '_depend': '' + }, + # Array + { # Common + 'decl': [' #ctype# *#varname# = NULL;', + ' npy_intp #varname#_Dims[#rank#] = {#rank*[-1]#};', + ' const int #varname#_Rank = #rank#;', + ' PyArrayObject *capi_#varname#_as_array = NULL;', + ' int capi_#varname#_intent = 0;', + {isstringarray: ' int slen(#varname#) = 0;'}, + ], + 'callfortran': '#varname#,', + 'callfortranappend': {isstringarray: 'slen(#varname#),'}, + 'return': {isintent_out: ',capi_#varname#_as_array'}, + 'need': 'len..', + '_check': isarray + }, { # intent(overwrite) array + 'decl': ' int capi_overwrite_#varname# = 1;', + 'kwlistxa': '"overwrite_#varname#",', + 'xaformat': 'i', + 'keys_xa': ',&capi_overwrite_#varname#', + 'docsignxa': 'overwrite_#varname#=1,', + 'docsignxashort': 'overwrite_#varname#,', + 'docstropt': 'overwrite_#varname# : input int, optional\\n Default: 1', + '_check': l_and(isarray, isintent_overwrite), + }, { + 'frompyobj': ' capi_#varname#_intent |= (capi_overwrite_#varname#?0:F2PY_INTENT_COPY);', + '_check': l_and(isarray, isintent_overwrite), + '_depend': '', + }, + { # intent(copy) array + 'decl': ' int capi_overwrite_#varname# = 0;', + 'kwlistxa': '"overwrite_#varname#",', + 'xaformat': 'i', + 'keys_xa': ',&capi_overwrite_#varname#', + 'docsignxa': 'overwrite_#varname#=0,', + 'docsignxashort': 'overwrite_#varname#,', + 'docstropt': 'overwrite_#varname# : input int, optional\\n Default: 0', + '_check': l_and(isarray, isintent_copy), + }, { + 'frompyobj': ' capi_#varname#_intent |= (capi_overwrite_#varname#?0:F2PY_INTENT_COPY);', + '_check': l_and(isarray, isintent_copy), + '_depend': '', + }, { + 'need': [{hasinitvalue: 'forcomb'}, {hasinitvalue: 'CFUNCSMESS'}], + '_check': isarray, + '_depend': '' + }, { # Not hidden + 'decl': ' PyObject *#varname#_capi = Py_None;', + 'argformat': {isrequired: 'O'}, + 'keyformat': {isoptional: 'O'}, + 'args_capi': {isrequired: ',&#varname#_capi'}, + 'keys_capi': {isoptional: ',&#varname#_capi'}, + '_check': l_and(isarray, isintent_nothide) + }, { + 'frompyobj': [ + ' #setdims#;', + ' capi_#varname#_intent |= #intent#;', + (' const char capi_errmess[] = "#modulename#.#pyname#:' + ' failed to create array from the #nth# `#varname#`";'), + {isintent_hide: + ' capi_#varname#_as_array = ndarray_from_pyobj(' + ' #atype#,#elsize#,#varname#_Dims,#varname#_Rank,' + ' capi_#varname#_intent,Py_None,capi_errmess);'}, + {isintent_nothide: + ' capi_#varname#_as_array = ndarray_from_pyobj(' + ' #atype#,#elsize#,#varname#_Dims,#varname#_Rank,' + ' capi_#varname#_intent,#varname#_capi,capi_errmess);'}, + """\ + if (capi_#varname#_as_array == NULL) { + PyObject* capi_err = PyErr_Occurred(); + if (capi_err == NULL) { + capi_err = #modulename#_error; + PyErr_SetString(capi_err, capi_errmess); + } + } else { + #varname# = (#ctype# *)(PyArray_DATA(capi_#varname#_as_array)); +""", + {isstringarray: + ' slen(#varname#) = f2py_itemsize(#varname#);'}, + {hasinitvalue: [ + {isintent_nothide: + ' if (#varname#_capi == Py_None) {'}, + {isintent_hide: ' {'}, + {iscomplexarray: ' #ctype# capi_c;'}, + """\ + int *_i,capi_i=0; + CFUNCSMESS(\"#name#: Initializing #varname#=#init#\\n\"); + struct ForcombCache cache; + if (initforcomb(&cache, PyArray_DIMS(capi_#varname#_as_array), + PyArray_NDIM(capi_#varname#_as_array),1)) { + while ((_i = nextforcomb(&cache))) + #varname#[capi_i++] = #init#; /* fortran way */ + } else { + PyObject *exc, *val, *tb; + PyErr_Fetch(&exc, &val, &tb); + PyErr_SetString(exc ? exc : #modulename#_error, + \"Initialization of #nth# #varname# failed (initforcomb).\"); + npy_PyErr_ChainExceptionsCause(exc, val, tb); + f2py_success = 0; + } + } + if (f2py_success) {"""]}, + ], + 'cleanupfrompyobj': [ # note that this list will be reversed + ' } ' + '/* if (capi_#varname#_as_array == NULL) ... else of #varname# */', + {l_not(l_or(isintent_out, isintent_hide)): """\ + if((PyObject *)capi_#varname#_as_array!=#varname#_capi) { + Py_XDECREF(capi_#varname#_as_array); }"""}, + {l_and(isintent_hide, l_not(isintent_out)) + : """ Py_XDECREF(capi_#varname#_as_array);"""}, + {hasinitvalue: ' } /*if (f2py_success) of #varname# init*/'}, + ], + '_check': isarray, + '_depend': '' + }, + # Scalararray + { # Common + '_check': l_and(isarray, l_not(iscomplexarray)) + }, { # Not hidden + '_check': l_and(isarray, l_not(iscomplexarray), isintent_nothide) + }, + # Integer*1 array + {'need': '#ctype#', + '_check': isint1array, + '_depend': '' + }, + # Integer*-1 array + {'need': '#ctype#', + '_check': isunsigned_chararray, + '_depend': '' + }, + # Integer*-2 array + {'need': '#ctype#', + '_check': isunsigned_shortarray, + '_depend': '' + }, + # Integer*-8 array + {'need': '#ctype#', + '_check': isunsigned_long_longarray, + '_depend': '' + }, + # Complexarray + {'need': '#ctype#', + '_check': iscomplexarray, + '_depend': '' + }, + # Character + { + 'need': 'string', + '_check': ischaracter, + }, + # Character array + { + 'need': 'string', + '_check': ischaracterarray, + }, + # Stringarray + { + 'callfortranappend': {isarrayofstrings: 'flen(#varname#),'}, + 'need': 'string', + '_check': isstringarray + } +] + +################# Rules for checking ############### + +check_rules = [ + { + 'frompyobj': {debugcapi: ' fprintf(stderr,\"debug-capi:Checking `#check#\'\\n\");'}, + 'need': 'len..' + }, { + 'frompyobj': ' CHECKSCALAR(#check#,\"#check#\",\"#nth# #varname#\",\"#varshowvalue#\",#varname#) {', + 'cleanupfrompyobj': ' } /*CHECKSCALAR(#check#)*/', + 'need': 'CHECKSCALAR', + '_check': l_and(isscalar, l_not(iscomplex)), + '_break': '' + }, { + 'frompyobj': ' CHECKSTRING(#check#,\"#check#\",\"#nth# #varname#\",\"#varshowvalue#\",#varname#) {', + 'cleanupfrompyobj': ' } /*CHECKSTRING(#check#)*/', + 'need': 'CHECKSTRING', + '_check': isstring, + '_break': '' + }, { + 'need': 'CHECKARRAY', + 'frompyobj': ' CHECKARRAY(#check#,\"#check#\",\"#nth# #varname#\") {', + 'cleanupfrompyobj': ' } /*CHECKARRAY(#check#)*/', + '_check': isarray, + '_break': '' + }, { + 'need': 'CHECKGENERIC', + 'frompyobj': ' CHECKGENERIC(#check#,\"#check#\",\"#nth# #varname#\") {', + 'cleanupfrompyobj': ' } /*CHECKGENERIC(#check#)*/', + } +] + +########## Applying the rules. No need to modify what follows ############# + +#################### Build C/API module ####################### + + +def buildmodule(m, um): + """ + Return + """ + outmess(f" Building module \"{m['name']}\"...\n") + ret = {} + mod_rules = defmod_rules[:] + vrd = capi_maps.modsign2map(m) + rd = dictappend({'f2py_version': f2py_version}, vrd) + funcwrappers = [] + funcwrappers2 = [] # F90 codes + for n in m['interfaced']: + nb = None + for bi in m['body']: + if bi['block'] not in ['interface', 'abstract interface']: + errmess('buildmodule: Expected interface block. Skipping.\n') + continue + for b in bi['body']: + if b['name'] == n: + nb = b + break + + if not nb: + print( + f'buildmodule: Could not find the body of interfaced routine "{n}". Skipping.\n', file=sys.stderr) + continue + nb_list = [nb] + if 'entry' in nb: + for k, a in nb['entry'].items(): + nb1 = copy.deepcopy(nb) + del nb1['entry'] + nb1['name'] = k + nb1['args'] = a + nb_list.append(nb1) + for nb in nb_list: + # requiresf90wrapper must be called before buildapi as it + # rewrites assumed shape arrays as automatic arrays. + isf90 = requiresf90wrapper(nb) + # options is in scope here + if options['emptygen']: + b_path = options['buildpath'] + m_name = vrd['modulename'] + outmess(' Generating possibly empty wrappers"\n') + Path(f"{b_path}/{vrd['coutput']}").touch() + if isf90: + # f77 + f90 wrappers + outmess(f' Maybe empty "{m_name}-f2pywrappers2.f90"\n') + Path(f'{b_path}/{m_name}-f2pywrappers2.f90').touch() + outmess(f' Maybe empty "{m_name}-f2pywrappers.f"\n') + Path(f'{b_path}/{m_name}-f2pywrappers.f').touch() + else: + # only f77 wrappers + outmess(f' Maybe empty "{m_name}-f2pywrappers.f"\n') + Path(f'{b_path}/{m_name}-f2pywrappers.f').touch() + api, wrap = buildapi(nb) + if wrap: + if isf90: + funcwrappers2.append(wrap) + else: + funcwrappers.append(wrap) + ar = applyrules(api, vrd) + rd = dictappend(rd, ar) + + # Construct COMMON block support + cr, wrap = common_rules.buildhooks(m) + if wrap: + funcwrappers.append(wrap) + ar = applyrules(cr, vrd) + rd = dictappend(rd, ar) + + # Construct F90 module support + mr, wrap = f90mod_rules.buildhooks(m) + if wrap: + funcwrappers2.append(wrap) + ar = applyrules(mr, vrd) + rd = dictappend(rd, ar) + + for u in um: + ar = use_rules.buildusevars(u, m['use'][u['name']]) + rd = dictappend(rd, ar) + + needs = cfuncs.get_needs() + # Add mapped definitions + needs['typedefs'] += [cvar for cvar in capi_maps.f2cmap_mapped # + if cvar in typedef_need_dict.values()] + code = {} + for n in needs.keys(): + code[n] = [] + for k in needs[n]: + c = '' + if k in cfuncs.includes0: + c = cfuncs.includes0[k] + elif k in cfuncs.includes: + c = cfuncs.includes[k] + elif k in cfuncs.userincludes: + c = cfuncs.userincludes[k] + elif k in cfuncs.typedefs: + c = cfuncs.typedefs[k] + elif k in cfuncs.typedefs_generated: + c = cfuncs.typedefs_generated[k] + elif k in cfuncs.cppmacros: + c = cfuncs.cppmacros[k] + elif k in cfuncs.cfuncs: + c = cfuncs.cfuncs[k] + elif k in cfuncs.callbacks: + c = cfuncs.callbacks[k] + elif k in cfuncs.f90modhooks: + c = cfuncs.f90modhooks[k] + elif k in cfuncs.commonhooks: + c = cfuncs.commonhooks[k] + else: + errmess(f'buildmodule: unknown need {repr(k)}.\n') + continue + code[n].append(c) + mod_rules.append(code) + for r in mod_rules: + if ('_check' in r and r['_check'](m)) or ('_check' not in r): + ar = applyrules(r, vrd, m) + rd = dictappend(rd, ar) + ar = applyrules(module_rules, rd) + + fn = os.path.join(options['buildpath'], vrd['coutput']) + ret['csrc'] = fn + with open(fn, 'w') as f: + f.write(ar['modulebody'].replace('\t', 2 * ' ')) + outmess(f" Wrote C/API module \"{m['name']}\" to file \"{fn}\"\n") + + if options['dorestdoc']: + fn = os.path.join( + options['buildpath'], vrd['modulename'] + 'module.rest') + with open(fn, 'w') as f: + f.write('.. -*- rest -*-\n') + f.write('\n'.join(ar['restdoc'])) + outmess(' ReST Documentation is saved to file "%s/%smodule.rest"\n' % + (options['buildpath'], vrd['modulename'])) + if options['dolatexdoc']: + fn = os.path.join( + options['buildpath'], vrd['modulename'] + 'module.tex') + ret['ltx'] = fn + with open(fn, 'w') as f: + f.write( + f'% This file is auto-generated with f2py (version:{f2py_version})\n') + if 'shortlatex' not in options: + f.write( + '\\documentclass{article}\n\\usepackage{a4wide}\n\\begin{document}\n\\tableofcontents\n\n') + f.write('\n'.join(ar['latexdoc'])) + if 'shortlatex' not in options: + f.write('\\end{document}') + outmess(' Documentation is saved to file "%s/%smodule.tex"\n' % + (options['buildpath'], vrd['modulename'])) + if funcwrappers: + wn = os.path.join(options['buildpath'], vrd['f2py_wrapper_output']) + ret['fsrc'] = wn + with open(wn, 'w') as f: + f.write('C -*- fortran -*-\n') + f.write( + f'C This file is autogenerated with f2py (version:{f2py_version})\n') + f.write( + 'C It contains Fortran 77 wrappers to fortran functions.\n') + lines = [] + for l in ('\n\n'.join(funcwrappers) + '\n').split('\n'): + if 0 <= l.find('!') < 66: + # don't split comment lines + lines.append(l + '\n') + elif l and l[0] == ' ': + while len(l) >= 66: + lines.append(l[:66] + '\n &') + l = l[66:] + lines.append(l + '\n') + else: + lines.append(l + '\n') + lines = ''.join(lines).replace('\n &\n', '\n') + f.write(lines) + outmess(f' Fortran 77 wrappers are saved to "{wn}\"\n') + if funcwrappers2: + wn = os.path.join( + options['buildpath'], f"{vrd['modulename']}-f2pywrappers2.f90") + ret['fsrc'] = wn + with open(wn, 'w') as f: + f.write('! -*- f90 -*-\n') + f.write( + f'! This file is autogenerated with f2py (version:{f2py_version})\n') + f.write( + '! It contains Fortran 90 wrappers to fortran functions.\n') + lines = [] + for l in ('\n\n'.join(funcwrappers2) + '\n').split('\n'): + if 0 <= l.find('!') < 72: + # don't split comment lines + lines.append(l + '\n') + elif len(l) > 72 and l[0] == ' ': + lines.append(l[:72] + '&\n &') + l = l[72:] + while len(l) > 66: + lines.append(l[:66] + '&\n &') + l = l[66:] + lines.append(l + '\n') + else: + lines.append(l + '\n') + lines = ''.join(lines).replace('\n &\n', '\n') + f.write(lines) + outmess(f' Fortran 90 wrappers are saved to "{wn}\"\n') + return ret + +################## Build C/API function ############# + + +stnd = {1: 'st', 2: 'nd', 3: 'rd', 4: 'th', 5: 'th', + 6: 'th', 7: 'th', 8: 'th', 9: 'th', 0: 'th'} + + +def buildapi(rout): + rout, wrap = func2subr.assubr(rout) + args, depargs = getargs2(rout) + capi_maps.depargs = depargs + var = rout['vars'] + + if ismoduleroutine(rout): + outmess(' Constructing wrapper function "%s.%s"...\n' % + (rout['modulename'], rout['name'])) + else: + outmess(f" Constructing wrapper function \"{rout['name']}\"...\n") + # Routine + vrd = capi_maps.routsign2map(rout) + rd = dictappend({}, vrd) + for r in rout_rules: + if ('_check' in r and r['_check'](rout)) or ('_check' not in r): + ar = applyrules(r, vrd, rout) + rd = dictappend(rd, ar) + + # Args + nth, nthk = 0, 0 + savevrd = {} + for a in args: + vrd = capi_maps.sign2map(a, var[a]) + if isintent_aux(var[a]): + _rules = aux_rules + else: + _rules = arg_rules + if not isintent_hide(var[a]): + if not isoptional(var[a]): + nth = nth + 1 + vrd['nth'] = repr(nth) + stnd[nth % 10] + ' argument' + else: + nthk = nthk + 1 + vrd['nth'] = repr(nthk) + stnd[nthk % 10] + ' keyword' + else: + vrd['nth'] = 'hidden' + savevrd[a] = vrd + for r in _rules: + if '_depend' in r: + continue + if ('_check' in r and r['_check'](var[a])) or ('_check' not in r): + ar = applyrules(r, vrd, var[a]) + rd = dictappend(rd, ar) + if '_break' in r: + break + for a in depargs: + if isintent_aux(var[a]): + _rules = aux_rules + else: + _rules = arg_rules + vrd = savevrd[a] + for r in _rules: + if '_depend' not in r: + continue + if ('_check' in r and r['_check'](var[a])) or ('_check' not in r): + ar = applyrules(r, vrd, var[a]) + rd = dictappend(rd, ar) + if '_break' in r: + break + if 'check' in var[a]: + for c in var[a]['check']: + vrd['check'] = c + ar = applyrules(check_rules, vrd, var[a]) + rd = dictappend(rd, ar) + if isinstance(rd['cleanupfrompyobj'], list): + rd['cleanupfrompyobj'].reverse() + if isinstance(rd['closepyobjfrom'], list): + rd['closepyobjfrom'].reverse() + rd['docsignature'] = stripcomma(replace('#docsign##docsignopt##docsignxa#', + {'docsign': rd['docsign'], + 'docsignopt': rd['docsignopt'], + 'docsignxa': rd['docsignxa']})) + optargs = stripcomma(replace('#docsignopt##docsignxa#', + {'docsignxa': rd['docsignxashort'], + 'docsignopt': rd['docsignoptshort']} + )) + if optargs == '': + rd['docsignatureshort'] = stripcomma( + replace('#docsign#', {'docsign': rd['docsign']})) + else: + rd['docsignatureshort'] = replace('#docsign#[#docsignopt#]', + {'docsign': rd['docsign'], + 'docsignopt': optargs, + }) + rd['latexdocsignatureshort'] = rd['docsignatureshort'].replace('_', '\\_') + rd['latexdocsignatureshort'] = rd[ + 'latexdocsignatureshort'].replace(',', ', ') + cfs = stripcomma(replace('#callfortran##callfortranappend#', { + 'callfortran': rd['callfortran'], 'callfortranappend': rd['callfortranappend']})) + if len(rd['callfortranappend']) > 1: + rd['callcompaqfortran'] = stripcomma(replace('#callfortran# 0,#callfortranappend#', { + 'callfortran': rd['callfortran'], 'callfortranappend': rd['callfortranappend']})) + else: + rd['callcompaqfortran'] = cfs + rd['callfortran'] = cfs + if isinstance(rd['docreturn'], list): + rd['docreturn'] = stripcomma( + replace('#docreturn#', {'docreturn': rd['docreturn']})) + ' = ' + rd['docstrsigns'] = [] + rd['latexdocstrsigns'] = [] + for k in ['docstrreq', 'docstropt', 'docstrout', 'docstrcbs']: + if k in rd and isinstance(rd[k], list): + rd['docstrsigns'] = rd['docstrsigns'] + rd[k] + k = 'latex' + k + if k in rd and isinstance(rd[k], list): + rd['latexdocstrsigns'] = rd['latexdocstrsigns'] + rd[k][0:1] +\ + ['\\begin{description}'] + rd[k][1:] +\ + ['\\end{description}'] + + ar = applyrules(routine_rules, rd) + if ismoduleroutine(rout): + outmess(f" {ar['docshort']}\n") + else: + outmess(f" {ar['docshort']}\n") + return ar, wrap + + +#################### EOF rules.py ####################### diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/rules.pyi b/.venv/lib/python3.12/site-packages/numpy/f2py/rules.pyi new file mode 100644 index 0000000..aa91e94 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/rules.pyi @@ -0,0 +1,43 @@ +from collections.abc import Callable, Iterable, Mapping +from typing import Any, Final, TypeAlias +from typing import Literal as L + +from typing_extensions import TypeVar + +from .__version__ import version +from .auxfuncs import _Bool, _Var + +### + +_VT = TypeVar("_VT", default=str) + +_Predicate: TypeAlias = Callable[[_Var], _Bool] +_RuleDict: TypeAlias = dict[str, _VT] +_DefDict: TypeAlias = dict[_Predicate, _VT] + +### + +f2py_version: Final = version +numpy_version: Final = version + +options: Final[dict[str, bool]] = ... +sepdict: Final[dict[str, str]] = ... + +generationtime: Final[int] = ... +typedef_need_dict: Final[_DefDict[str]] = ... + +module_rules: Final[_RuleDict[str | list[str] | _RuleDict]] = ... +routine_rules: Final[_RuleDict[str | list[str] | _DefDict | _RuleDict]] = ... +defmod_rules: Final[list[_RuleDict[str | _DefDict]]] = ... +rout_rules: Final[list[_RuleDict[str | Any]]] = ... +aux_rules: Final[list[_RuleDict[str | Any]]] = ... +arg_rules: Final[list[_RuleDict[str | Any]]] = ... +check_rules: Final[list[_RuleDict[str | Any]]] = ... + +stnd: Final[dict[L[1, 2, 3, 4, 5, 6, 7, 8, 9, 0], L["st", "nd", "rd", "th"]]] = ... + +def buildmodule(m: Mapping[str, str | Any], um: Iterable[Mapping[str, str | Any]]) -> _RuleDict: ... +def buildapi(rout: Mapping[str, str]) -> tuple[_RuleDict, str]: ... + +# namespace pollution +k: str diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/setup.cfg b/.venv/lib/python3.12/site-packages/numpy/f2py/setup.cfg new file mode 100644 index 0000000..1466954 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/setup.cfg @@ -0,0 +1,3 @@ +[bdist_rpm] +doc_files = docs/ + tests/ \ No newline at end of file diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/src/fortranobject.c b/.venv/lib/python3.12/site-packages/numpy/f2py/src/fortranobject.c new file mode 100644 index 0000000..5c2b4bd --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/src/fortranobject.c @@ -0,0 +1,1436 @@ +#define FORTRANOBJECT_C +#include "fortranobject.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include +#include + +/* + This file implements: FortranObject, array_from_pyobj, copy_ND_array + + Author: Pearu Peterson + $Revision: 1.52 $ + $Date: 2005/07/11 07:44:20 $ +*/ + +int +F2PyDict_SetItemString(PyObject *dict, char *name, PyObject *obj) +{ + if (obj == NULL) { + fprintf(stderr, "Error loading %s\n", name); + if (PyErr_Occurred()) { + PyErr_Print(); + PyErr_Clear(); + } + return -1; + } + return PyDict_SetItemString(dict, name, obj); +} + +/* + * Python-only fallback for thread-local callback pointers + */ +void * +F2PySwapThreadLocalCallbackPtr(char *key, void *ptr) +{ + PyObject *local_dict, *value; + void *prev; + + local_dict = PyThreadState_GetDict(); + if (local_dict == NULL) { + Py_FatalError( + "F2PySwapThreadLocalCallbackPtr: PyThreadState_GetDict " + "failed"); + } + + value = PyDict_GetItemString(local_dict, key); + if (value != NULL) { + prev = PyLong_AsVoidPtr(value); + if (PyErr_Occurred()) { + Py_FatalError( + "F2PySwapThreadLocalCallbackPtr: PyLong_AsVoidPtr failed"); + } + } + else { + prev = NULL; + } + + value = PyLong_FromVoidPtr((void *)ptr); + if (value == NULL) { + Py_FatalError( + "F2PySwapThreadLocalCallbackPtr: PyLong_FromVoidPtr failed"); + } + + if (PyDict_SetItemString(local_dict, key, value) != 0) { + Py_FatalError( + "F2PySwapThreadLocalCallbackPtr: PyDict_SetItemString failed"); + } + + Py_DECREF(value); + + return prev; +} + +void * +F2PyGetThreadLocalCallbackPtr(char *key) +{ + PyObject *local_dict, *value; + void *prev; + + local_dict = PyThreadState_GetDict(); + if (local_dict == NULL) { + Py_FatalError( + "F2PyGetThreadLocalCallbackPtr: PyThreadState_GetDict failed"); + } + + value = PyDict_GetItemString(local_dict, key); + if (value != NULL) { + prev = PyLong_AsVoidPtr(value); + if (PyErr_Occurred()) { + Py_FatalError( + "F2PyGetThreadLocalCallbackPtr: PyLong_AsVoidPtr failed"); + } + } + else { + prev = NULL; + } + + return prev; +} + +static PyArray_Descr * +get_descr_from_type_and_elsize(const int type_num, const int elsize) { + PyArray_Descr * descr = PyArray_DescrFromType(type_num); + if (type_num == NPY_STRING) { + // PyArray_DescrFromType returns descr with elsize = 0. + PyArray_DESCR_REPLACE(descr); + if (descr == NULL) { + return NULL; + } + PyDataType_SET_ELSIZE(descr, elsize); + } + return descr; +} + +/************************* FortranObject *******************************/ + +typedef PyObject *(*fortranfunc)(PyObject *, PyObject *, PyObject *, void *); + +PyObject * +PyFortranObject_New(FortranDataDef *defs, f2py_void_func init) +{ + int i; + PyFortranObject *fp = NULL; + PyObject *v = NULL; + if (init != NULL) { /* Initialize F90 module objects */ + (*(init))(); + } + fp = PyObject_New(PyFortranObject, &PyFortran_Type); + if (fp == NULL) { + return NULL; + } + if ((fp->dict = PyDict_New()) == NULL) { + Py_DECREF(fp); + return NULL; + } + fp->len = 0; + while (defs[fp->len].name != NULL) { + fp->len++; + } + if (fp->len == 0) { + goto fail; + } + fp->defs = defs; + for (i = 0; i < fp->len; i++) { + if (fp->defs[i].rank == -1) { /* Is Fortran routine */ + v = PyFortranObject_NewAsAttr(&(fp->defs[i])); + if (v == NULL) { + goto fail; + } + PyDict_SetItemString(fp->dict, fp->defs[i].name, v); + Py_XDECREF(v); + } + else if ((fp->defs[i].data) != + NULL) { /* Is Fortran variable or array (not allocatable) */ + PyArray_Descr * + descr = get_descr_from_type_and_elsize(fp->defs[i].type, + fp->defs[i].elsize); + if (descr == NULL) { + goto fail; + } + v = PyArray_NewFromDescr(&PyArray_Type, descr, fp->defs[i].rank, + fp->defs[i].dims.d, NULL, fp->defs[i].data, + NPY_ARRAY_FARRAY, NULL); + if (v == NULL) { + Py_DECREF(descr); + goto fail; + } + PyDict_SetItemString(fp->dict, fp->defs[i].name, v); + Py_XDECREF(v); + } + } + return (PyObject *)fp; +fail: + Py_XDECREF(fp); + return NULL; +} + +PyObject * +PyFortranObject_NewAsAttr(FortranDataDef *defs) +{ /* used for calling F90 module routines */ + PyFortranObject *fp = NULL; + fp = PyObject_New(PyFortranObject, &PyFortran_Type); + if (fp == NULL) + return NULL; + if ((fp->dict = PyDict_New()) == NULL) { + PyObject_Del(fp); + return NULL; + } + fp->len = 1; + fp->defs = defs; + if (defs->rank == -1) { + PyDict_SetItemString(fp->dict, "__name__", PyUnicode_FromFormat("function %s", defs->name)); + } else if (defs->rank == 0) { + PyDict_SetItemString(fp->dict, "__name__", PyUnicode_FromFormat("scalar %s", defs->name)); + } else { + PyDict_SetItemString(fp->dict, "__name__", PyUnicode_FromFormat("array %s", defs->name)); + } + return (PyObject *)fp; +} + +/* Fortran methods */ + +static void +fortran_dealloc(PyFortranObject *fp) +{ + Py_XDECREF(fp->dict); + PyObject_Del(fp); +} + +/* Returns number of bytes consumed from buf, or -1 on error. */ +static Py_ssize_t +format_def(char *buf, Py_ssize_t size, FortranDataDef def) +{ + char *p = buf; + int i; + npy_intp n; + + n = PyOS_snprintf(p, size, "array(%" NPY_INTP_FMT, def.dims.d[0]); + if (n < 0 || n >= size) { + return -1; + } + p += n; + size -= n; + + for (i = 1; i < def.rank; i++) { + n = PyOS_snprintf(p, size, ",%" NPY_INTP_FMT, def.dims.d[i]); + if (n < 0 || n >= size) { + return -1; + } + p += n; + size -= n; + } + + if (size <= 0) { + return -1; + } + + *p++ = ')'; + size--; + + if (def.data == NULL) { + static const char notalloc[] = ", not allocated"; + if ((size_t)size < sizeof(notalloc)) { + return -1; + } + memcpy(p, notalloc, sizeof(notalloc)); + p += sizeof(notalloc); + size -= sizeof(notalloc); + } + + return p - buf; +} + +static PyObject * +fortran_doc(FortranDataDef def) +{ + char *buf, *p; + PyObject *s = NULL; + Py_ssize_t n, origsize, size = 100; + + if (def.doc != NULL) { + size += strlen(def.doc); + } + origsize = size; + buf = p = (char *)PyMem_Malloc(size); + if (buf == NULL) { + return PyErr_NoMemory(); + } + + if (def.rank == -1) { + if (def.doc) { + n = strlen(def.doc); + if (n > size) { + goto fail; + } + memcpy(p, def.doc, n); + p += n; + size -= n; + } + else { + n = PyOS_snprintf(p, size, "%s - no docs available", def.name); + if (n < 0 || n >= size) { + goto fail; + } + p += n; + size -= n; + } + } + else { + PyArray_Descr *d = PyArray_DescrFromType(def.type); + n = PyOS_snprintf(p, size, "%s : '%c'-", def.name, d->type); + Py_DECREF(d); + if (n < 0 || n >= size) { + goto fail; + } + p += n; + size -= n; + + if (def.data == NULL) { + n = format_def(p, size, def); + if (n < 0) { + goto fail; + } + p += n; + size -= n; + } + else if (def.rank > 0) { + n = format_def(p, size, def); + if (n < 0) { + goto fail; + } + p += n; + size -= n; + } + else { + n = strlen("scalar"); + if (size < n) { + goto fail; + } + memcpy(p, "scalar", n); + p += n; + size -= n; + } + } + if (size <= 1) { + goto fail; + } + *p++ = '\n'; + size--; + + /* p now points one beyond the last character of the string in buf */ + s = PyUnicode_FromStringAndSize(buf, p - buf); + + PyMem_Free(buf); + return s; + +fail: + fprintf(stderr, + "fortranobject.c: fortran_doc: len(p)=%zd>%zd=size:" + " too long docstring required, increase size\n", + p - buf, origsize); + PyMem_Free(buf); + return NULL; +} + +static FortranDataDef *save_def; /* save pointer of an allocatable array */ +static void +set_data(char *d, npy_intp *f) +{ /* callback from Fortran */ + if (*f) /* In fortran f=allocated(d) */ + save_def->data = d; + else + save_def->data = NULL; + /* printf("set_data: d=%p,f=%d\n",d,*f); */ +} + +static PyObject * +fortran_getattr(PyFortranObject *fp, char *name) +{ + int i, j, k, flag; + if (fp->dict != NULL) { + // python 3.13 added PyDict_GetItemRef +#if PY_VERSION_HEX < 0x030D0000 + PyObject *v = _PyDict_GetItemStringWithError(fp->dict, name); + if (v == NULL && PyErr_Occurred()) { + return NULL; + } + else if (v != NULL) { + Py_INCREF(v); + return v; + } +#else + PyObject *v; + int result = PyDict_GetItemStringRef(fp->dict, name, &v); + if (result == -1) { + return NULL; + } + else if (result == 1) { + return v; + } +#endif + + } + for (i = 0, j = 1; i < fp->len && (j = strcmp(name, fp->defs[i].name)); + i++) + ; + if (j == 0) + if (fp->defs[i].rank != -1) { /* F90 allocatable array */ + if (fp->defs[i].func == NULL) + return NULL; + for (k = 0; k < fp->defs[i].rank; ++k) fp->defs[i].dims.d[k] = -1; + save_def = &fp->defs[i]; + (*(fp->defs[i].func))(&fp->defs[i].rank, fp->defs[i].dims.d, + set_data, &flag); + if (flag == 2) + k = fp->defs[i].rank + 1; + else + k = fp->defs[i].rank; + if (fp->defs[i].data != NULL) { /* array is allocated */ + PyObject *v = PyArray_New( + &PyArray_Type, k, fp->defs[i].dims.d, fp->defs[i].type, + NULL, fp->defs[i].data, 0, NPY_ARRAY_FARRAY, NULL); + if (v == NULL) + return NULL; + /* Py_INCREF(v); */ + return v; + } + else { /* array is not allocated */ + Py_RETURN_NONE; + } + } + if (strcmp(name, "__dict__") == 0) { + Py_INCREF(fp->dict); + return fp->dict; + } + if (strcmp(name, "__doc__") == 0) { + PyObject *s = PyUnicode_FromString(""), *s2, *s3; + for (i = 0; i < fp->len; i++) { + s2 = fortran_doc(fp->defs[i]); + s3 = PyUnicode_Concat(s, s2); + Py_DECREF(s2); + Py_DECREF(s); + s = s3; + } + if (PyDict_SetItemString(fp->dict, name, s)) + return NULL; + return s; + } + if ((strcmp(name, "_cpointer") == 0) && (fp->len == 1)) { + PyObject *cobj = + F2PyCapsule_FromVoidPtr((void *)(fp->defs[0].data), NULL); + if (PyDict_SetItemString(fp->dict, name, cobj)) + return NULL; + return cobj; + } + PyObject *str, *ret; + str = PyUnicode_FromString(name); + ret = PyObject_GenericGetAttr((PyObject *)fp, str); + Py_DECREF(str); + return ret; +} + +static int +fortran_setattr(PyFortranObject *fp, char *name, PyObject *v) +{ + int i, j, flag; + PyArrayObject *arr = NULL; + for (i = 0, j = 1; i < fp->len && (j = strcmp(name, fp->defs[i].name)); + i++) + ; + if (j == 0) { + if (fp->defs[i].rank == -1) { + PyErr_SetString(PyExc_AttributeError, + "over-writing fortran routine"); + return -1; + } + if (fp->defs[i].func != NULL) { /* is allocatable array */ + npy_intp dims[F2PY_MAX_DIMS]; + int k; + save_def = &fp->defs[i]; + if (v != Py_None) { /* set new value (reallocate if needed -- + see f2py generated code for more + details ) */ + for (k = 0; k < fp->defs[i].rank; k++) dims[k] = -1; + if ((arr = array_from_pyobj(fp->defs[i].type, dims, + fp->defs[i].rank, F2PY_INTENT_IN, + v)) == NULL) + return -1; + (*(fp->defs[i].func))(&fp->defs[i].rank, PyArray_DIMS(arr), + set_data, &flag); + } + else { /* deallocate */ + for (k = 0; k < fp->defs[i].rank; k++) dims[k] = 0; + (*(fp->defs[i].func))(&fp->defs[i].rank, dims, set_data, + &flag); + for (k = 0; k < fp->defs[i].rank; k++) dims[k] = -1; + } + memcpy(fp->defs[i].dims.d, dims, + fp->defs[i].rank * sizeof(npy_intp)); + } + else { /* not allocatable array */ + if ((arr = array_from_pyobj(fp->defs[i].type, fp->defs[i].dims.d, + fp->defs[i].rank, F2PY_INTENT_IN, + v)) == NULL) + return -1; + } + if (fp->defs[i].data != + NULL) { /* copy Python object to Fortran array */ + npy_intp s = PyArray_MultiplyList(fp->defs[i].dims.d, + PyArray_NDIM(arr)); + if (s == -1) + s = PyArray_MultiplyList(PyArray_DIMS(arr), PyArray_NDIM(arr)); + if (s < 0 || (memcpy(fp->defs[i].data, PyArray_DATA(arr), + s * PyArray_ITEMSIZE(arr))) == NULL) { + if ((PyObject *)arr != v) { + Py_DECREF(arr); + } + return -1; + } + if ((PyObject *)arr != v) { + Py_DECREF(arr); + } + } + else + return (fp->defs[i].func == NULL ? -1 : 0); + return 0; /* successful */ + } + if (fp->dict == NULL) { + fp->dict = PyDict_New(); + if (fp->dict == NULL) + return -1; + } + if (v == NULL) { + int rv = PyDict_DelItemString(fp->dict, name); + if (rv < 0) + PyErr_SetString(PyExc_AttributeError, + "delete non-existing fortran attribute"); + return rv; + } + else + return PyDict_SetItemString(fp->dict, name, v); +} + +static PyObject * +fortran_call(PyFortranObject *fp, PyObject *arg, PyObject *kw) +{ + int i = 0; + /* printf("fortran call + name=%s,func=%p,data=%p,%p\n",fp->defs[i].name, + fp->defs[i].func,fp->defs[i].data,&fp->defs[i].data); */ + if (fp->defs[i].rank == -1) { /* is Fortran routine */ + if (fp->defs[i].func == NULL) { + PyErr_Format(PyExc_RuntimeError, "no function to call"); + return NULL; + } + else if (fp->defs[i].data == NULL) + /* dummy routine */ + return (*((fortranfunc)(fp->defs[i].func)))((PyObject *)fp, arg, + kw, NULL); + else + return (*((fortranfunc)(fp->defs[i].func)))( + (PyObject *)fp, arg, kw, (void *)fp->defs[i].data); + } + PyErr_Format(PyExc_TypeError, "this fortran object is not callable"); + return NULL; +} + +static PyObject * +fortran_repr(PyFortranObject *fp) +{ + PyObject *name = NULL, *repr = NULL; + name = PyObject_GetAttrString((PyObject *)fp, "__name__"); + PyErr_Clear(); + if (name != NULL && PyUnicode_Check(name)) { + repr = PyUnicode_FromFormat("", name); + } + else { + repr = PyUnicode_FromString(""); + } + Py_XDECREF(name); + return repr; +} + +PyTypeObject PyFortran_Type = { + PyVarObject_HEAD_INIT(NULL, 0).tp_name = "fortran", + .tp_basicsize = sizeof(PyFortranObject), + .tp_dealloc = (destructor)fortran_dealloc, + .tp_getattr = (getattrfunc)fortran_getattr, + .tp_setattr = (setattrfunc)fortran_setattr, + .tp_repr = (reprfunc)fortran_repr, + .tp_call = (ternaryfunc)fortran_call, +}; + +/************************* f2py_report_atexit *******************************/ + +#ifdef F2PY_REPORT_ATEXIT +static int passed_time = 0; +static int passed_counter = 0; +static int passed_call_time = 0; +static struct timeb start_time; +static struct timeb stop_time; +static struct timeb start_call_time; +static struct timeb stop_call_time; +static int cb_passed_time = 0; +static int cb_passed_counter = 0; +static int cb_passed_call_time = 0; +static struct timeb cb_start_time; +static struct timeb cb_stop_time; +static struct timeb cb_start_call_time; +static struct timeb cb_stop_call_time; + +extern void +f2py_start_clock(void) +{ + ftime(&start_time); +} +extern void +f2py_start_call_clock(void) +{ + f2py_stop_clock(); + ftime(&start_call_time); +} +extern void +f2py_stop_clock(void) +{ + ftime(&stop_time); + passed_time += 1000 * (stop_time.time - start_time.time); + passed_time += stop_time.millitm - start_time.millitm; +} +extern void +f2py_stop_call_clock(void) +{ + ftime(&stop_call_time); + passed_call_time += 1000 * (stop_call_time.time - start_call_time.time); + passed_call_time += stop_call_time.millitm - start_call_time.millitm; + passed_counter += 1; + f2py_start_clock(); +} + +extern void +f2py_cb_start_clock(void) +{ + ftime(&cb_start_time); +} +extern void +f2py_cb_start_call_clock(void) +{ + f2py_cb_stop_clock(); + ftime(&cb_start_call_time); +} +extern void +f2py_cb_stop_clock(void) +{ + ftime(&cb_stop_time); + cb_passed_time += 1000 * (cb_stop_time.time - cb_start_time.time); + cb_passed_time += cb_stop_time.millitm - cb_start_time.millitm; +} +extern void +f2py_cb_stop_call_clock(void) +{ + ftime(&cb_stop_call_time); + cb_passed_call_time += + 1000 * (cb_stop_call_time.time - cb_start_call_time.time); + cb_passed_call_time += + cb_stop_call_time.millitm - cb_start_call_time.millitm; + cb_passed_counter += 1; + f2py_cb_start_clock(); +} + +static int f2py_report_on_exit_been_here = 0; +extern void +f2py_report_on_exit(int exit_flag, void *name) +{ + if (f2py_report_on_exit_been_here) { + fprintf(stderr, " %s\n", (char *)name); + return; + } + f2py_report_on_exit_been_here = 1; + fprintf(stderr, " /-----------------------\\\n"); + fprintf(stderr, " < F2PY performance report >\n"); + fprintf(stderr, " \\-----------------------/\n"); + fprintf(stderr, "Overall time spent in ...\n"); + fprintf(stderr, "(a) wrapped (Fortran/C) functions : %8d msec\n", + passed_call_time); + fprintf(stderr, "(b) f2py interface, %6d calls : %8d msec\n", + passed_counter, passed_time); + fprintf(stderr, "(c) call-back (Python) functions : %8d msec\n", + cb_passed_call_time); + fprintf(stderr, "(d) f2py call-back interface, %6d calls : %8d msec\n", + cb_passed_counter, cb_passed_time); + + fprintf(stderr, + "(e) wrapped (Fortran/C) functions (actual) : %8d msec\n\n", + passed_call_time - cb_passed_call_time - cb_passed_time); + fprintf(stderr, + "Use -DF2PY_REPORT_ATEXIT_DISABLE to disable this message.\n"); + fprintf(stderr, "Exit status: %d\n", exit_flag); + fprintf(stderr, "Modules : %s\n", (char *)name); +} +#endif + +/********************** report on array copy ****************************/ + +#ifdef F2PY_REPORT_ON_ARRAY_COPY +static void +f2py_report_on_array_copy(PyArrayObject *arr) +{ + const npy_intp arr_size = PyArray_Size((PyObject *)arr); + if (arr_size > F2PY_REPORT_ON_ARRAY_COPY) { + fprintf(stderr, + "copied an array: size=%ld, elsize=%" NPY_INTP_FMT "\n", + arr_size, (npy_intp)PyArray_ITEMSIZE(arr)); + } +} +static void +f2py_report_on_array_copy_fromany(void) +{ + fprintf(stderr, "created an array from object\n"); +} + +#define F2PY_REPORT_ON_ARRAY_COPY_FROMARR \ + f2py_report_on_array_copy((PyArrayObject *)arr) +#define F2PY_REPORT_ON_ARRAY_COPY_FROMANY f2py_report_on_array_copy_fromany() +#else +#define F2PY_REPORT_ON_ARRAY_COPY_FROMARR +#define F2PY_REPORT_ON_ARRAY_COPY_FROMANY +#endif + +/************************* array_from_obj *******************************/ + +/* + * File: array_from_pyobj.c + * + * Description: + * ------------ + * Provides array_from_pyobj function that returns a contiguous array + * object with the given dimensions and required storage order, either + * in row-major (C) or column-major (Fortran) order. The function + * array_from_pyobj is very flexible about its Python object argument + * that can be any number, list, tuple, or array. + * + * array_from_pyobj is used in f2py generated Python extension + * modules. + * + * Author: Pearu Peterson + * Created: 13-16 January 2002 + * $Id: fortranobject.c,v 1.52 2005/07/11 07:44:20 pearu Exp $ + */ + +static int check_and_fix_dimensions(const PyArrayObject* arr, + const int rank, + npy_intp *dims, + const char *errmess); + +static int +find_first_negative_dimension(const int rank, const npy_intp *dims) +{ + int i; + for (i = 0; i < rank; ++i) { + if (dims[i] < 0) { + return i; + } + } + return -1; +} + +#ifdef DEBUG_COPY_ND_ARRAY +void +dump_dims(int rank, npy_intp const *dims) +{ + int i; + printf("["); + for (i = 0; i < rank; ++i) { + printf("%3" NPY_INTP_FMT, dims[i]); + } + printf("]\n"); +} +void +dump_attrs(const PyArrayObject *obj) +{ + const PyArrayObject_fields *arr = (const PyArrayObject_fields *)obj; + int rank = PyArray_NDIM(arr); + npy_intp size = PyArray_Size((PyObject *)arr); + printf("\trank = %d, flags = %d, size = %" NPY_INTP_FMT "\n", rank, + arr->flags, size); + printf("\tstrides = "); + dump_dims(rank, arr->strides); + printf("\tdimensions = "); + dump_dims(rank, arr->dimensions); +} +#endif + +#define SWAPTYPE(a, b, t) \ + { \ + t c; \ + c = (a); \ + (a) = (b); \ + (b) = c; \ + } + +static int +swap_arrays(PyArrayObject *obj1, PyArrayObject *obj2) +{ + PyArrayObject_fields *arr1 = (PyArrayObject_fields *)obj1, + *arr2 = (PyArrayObject_fields *)obj2; + SWAPTYPE(arr1->data, arr2->data, char *); + SWAPTYPE(arr1->nd, arr2->nd, int); + SWAPTYPE(arr1->dimensions, arr2->dimensions, npy_intp *); + SWAPTYPE(arr1->strides, arr2->strides, npy_intp *); + SWAPTYPE(arr1->base, arr2->base, PyObject *); + SWAPTYPE(arr1->descr, arr2->descr, PyArray_Descr *); + SWAPTYPE(arr1->flags, arr2->flags, int); + /* SWAPTYPE(arr1->weakreflist,arr2->weakreflist,PyObject*); */ + return 0; +} + +#define ARRAY_ISCOMPATIBLE(arr,type_num) \ + ((PyArray_ISINTEGER(arr) && PyTypeNum_ISINTEGER(type_num)) || \ + (PyArray_ISFLOAT(arr) && PyTypeNum_ISFLOAT(type_num)) || \ + (PyArray_ISCOMPLEX(arr) && PyTypeNum_ISCOMPLEX(type_num)) || \ + (PyArray_ISBOOL(arr) && PyTypeNum_ISBOOL(type_num)) || \ + (PyArray_ISSTRING(arr) && PyTypeNum_ISSTRING(type_num))) + +static int +get_elsize(PyObject *obj) { + /* + get_elsize determines array itemsize from a Python object. Returns + elsize if successful, -1 otherwise. + + Supported types of the input are: numpy.ndarray, bytes, str, tuple, + list. + */ + + if (PyArray_Check(obj)) { + return PyArray_ITEMSIZE((PyArrayObject *)obj); + } else if (PyBytes_Check(obj)) { + return PyBytes_GET_SIZE(obj); + } else if (PyUnicode_Check(obj)) { + return PyUnicode_GET_LENGTH(obj); + } else if (PySequence_Check(obj)) { + PyObject* fast = PySequence_Fast(obj, "f2py:fortranobject.c:get_elsize"); + if (fast != NULL) { + Py_ssize_t i, n = PySequence_Fast_GET_SIZE(fast); + int sz, elsize = 0; + for (i=0; i elsize) { + elsize = sz; + } + } + Py_DECREF(fast); + return elsize; + } + } + return -1; +} + +extern PyArrayObject * +ndarray_from_pyobj(const int type_num, + const int elsize_, + npy_intp *dims, + const int rank, + const int intent, + PyObject *obj, + const char *errmess) { + /* + * Return an array with given element type and shape from a Python + * object while taking into account the usage intent of the array. + * + * - element type is defined by type_num and elsize + * - shape is defined by dims and rank + * + * ndarray_from_pyobj is used to convert Python object arguments + * to numpy ndarrays with given type and shape that data is passed + * to interfaced Fortran or C functions. + * + * errmess (if not NULL), contains a prefix of an error message + * for an exception to be triggered within this function. + * + * Negative elsize value means that elsize is to be determined + * from the Python object in runtime. + * + * Note on strings + * --------------- + * + * String type (type_num == NPY_STRING) does not have fixed + * element size and, by default, the type object sets it to + * 0. Therefore, for string types, one has to use elsize + * argument. For other types, elsize value is ignored. + * + * NumPy defines the type of a fixed-width string as + * dtype('S'). In addition, there is also dtype('c'), that + * appears as dtype('S1') (these have the same type_num value), + * but is actually different (.char attribute is either 'S' or + * 'c', respectively). + * + * In Fortran, character arrays and strings are different + * concepts. The relation between Fortran types, NumPy dtypes, + * and type_num-elsize pairs, is defined as follows: + * + * character*5 foo | dtype('S5') | elsize=5, shape=() + * character(5) foo | dtype('S1') | elsize=1, shape=(5) + * character*5 foo(n) | dtype('S5') | elsize=5, shape=(n,) + * character(5) foo(n) | dtype('S1') | elsize=1, shape=(5, n) + * character*(*) foo | dtype('S') | elsize=-1, shape=() + * + * Note about reference counting + * ----------------------------- + * + * If the caller returns the array to Python, it must be done with + * Py_BuildValue("N",arr). Otherwise, if obj!=arr then the caller + * must call Py_DECREF(arr). + * + * Note on intent(cache,out,..) + * ---------------------------- + * Don't expect correct data when returning intent(cache) array. + * + */ + char mess[F2PY_MESSAGE_BUFFER_SIZE]; + PyArrayObject *arr = NULL; + int elsize = (elsize_ < 0 ? get_elsize(obj) : elsize_); + if (elsize < 0) { + if (errmess != NULL) { + strcpy(mess, errmess); + } + sprintf(mess + strlen(mess), + " -- failed to determine element size from %s", + Py_TYPE(obj)->tp_name); + PyErr_SetString(PyExc_SystemError, mess); + return NULL; + } + PyArray_Descr * descr = get_descr_from_type_and_elsize(type_num, elsize); // new reference + if (descr == NULL) { + return NULL; + } + elsize = PyDataType_ELSIZE(descr); + if ((intent & F2PY_INTENT_HIDE) + || ((intent & F2PY_INTENT_CACHE) && (obj == Py_None)) + || ((intent & F2PY_OPTIONAL) && (obj == Py_None)) + ) { + /* intent(cache), optional, intent(hide) */ + int ineg = find_first_negative_dimension(rank, dims); + if (ineg >= 0) { + int i; + strcpy(mess, "failed to create intent(cache|hide)|optional array" + "-- must have defined dimensions but got ("); + for(i = 0; i < rank; ++i) + sprintf(mess + strlen(mess), "%" NPY_INTP_FMT ",", dims[i]); + strcat(mess, ")"); + PyErr_SetString(PyExc_ValueError, mess); + Py_DECREF(descr); + return NULL; + } + arr = (PyArrayObject *) \ + PyArray_NewFromDescr(&PyArray_Type, descr, rank, dims, + NULL, NULL, !(intent & F2PY_INTENT_C), NULL); + if (arr == NULL) { + Py_DECREF(descr); + return NULL; + } + if (PyArray_ITEMSIZE(arr) != elsize) { + strcpy(mess, "failed to create intent(cache|hide)|optional array"); + sprintf(mess+strlen(mess)," -- expected elsize=%d got %" NPY_INTP_FMT, elsize, (npy_intp)PyArray_ITEMSIZE(arr)); + PyErr_SetString(PyExc_ValueError,mess); + Py_DECREF(arr); + return NULL; + } + if (!(intent & F2PY_INTENT_CACHE)) { + PyArray_FILLWBYTE(arr, 0); + } + return arr; + } + + if (PyArray_Check(obj)) { + arr = (PyArrayObject *)obj; + if (intent & F2PY_INTENT_CACHE) { + /* intent(cache) */ + if (PyArray_ISONESEGMENT(arr) + && PyArray_ITEMSIZE(arr) >= elsize) { + if (check_and_fix_dimensions(arr, rank, dims, errmess)) { + Py_DECREF(descr); + return NULL; + } + if (intent & F2PY_INTENT_OUT) + Py_INCREF(arr); + Py_DECREF(descr); + return arr; + } + strcpy(mess, "failed to initialize intent(cache) array"); + if (!PyArray_ISONESEGMENT(arr)) + strcat(mess, " -- input must be in one segment"); + if (PyArray_ITEMSIZE(arr) < elsize) + sprintf(mess + strlen(mess), + " -- expected at least elsize=%d but got " + "%" NPY_INTP_FMT, + elsize, (npy_intp)PyArray_ITEMSIZE(arr)); + PyErr_SetString(PyExc_ValueError, mess); + Py_DECREF(descr); + return NULL; + } + + /* here we have always intent(in) or intent(inout) or intent(inplace) + */ + + if (check_and_fix_dimensions(arr, rank, dims, errmess)) { + Py_DECREF(descr); + return NULL; + } + /* + printf("intent alignment=%d\n", F2PY_GET_ALIGNMENT(intent)); + printf("alignment check=%d\n", F2PY_CHECK_ALIGNMENT(arr, intent)); + int i; + for (i=1;i<=16;i++) + printf("i=%d isaligned=%d\n", i, ARRAY_ISALIGNED(arr, i)); + */ + if ((! (intent & F2PY_INTENT_COPY)) && + PyArray_ITEMSIZE(arr) == elsize && + ARRAY_ISCOMPATIBLE(arr,type_num) && + F2PY_CHECK_ALIGNMENT(arr, intent)) { + if ((intent & F2PY_INTENT_INOUT || intent & F2PY_INTENT_INPLACE) + ? ((intent & F2PY_INTENT_C) ? PyArray_ISCARRAY(arr) : PyArray_ISFARRAY(arr)) + : ((intent & F2PY_INTENT_C) ? PyArray_ISCARRAY_RO(arr) : PyArray_ISFARRAY_RO(arr))) { + if ((intent & F2PY_INTENT_OUT)) { + Py_INCREF(arr); + } + /* Returning input array */ + Py_DECREF(descr); + return arr; + } + } + if (intent & F2PY_INTENT_INOUT) { + strcpy(mess, "failed to initialize intent(inout) array"); + /* Must use PyArray_IS*ARRAY because intent(inout) requires + * writable input */ + if ((intent & F2PY_INTENT_C) && !PyArray_ISCARRAY(arr)) + strcat(mess, " -- input not contiguous"); + if (!(intent & F2PY_INTENT_C) && !PyArray_ISFARRAY(arr)) + strcat(mess, " -- input not fortran contiguous"); + if (PyArray_ITEMSIZE(arr) != elsize) + sprintf(mess + strlen(mess), + " -- expected elsize=%d but got %" NPY_INTP_FMT, + elsize, + (npy_intp)PyArray_ITEMSIZE(arr) + ); + if (!(ARRAY_ISCOMPATIBLE(arr, type_num))) { + sprintf(mess + strlen(mess), + " -- input '%c' not compatible to '%c'", + PyArray_DESCR(arr)->type, descr->type); + } + if (!(F2PY_CHECK_ALIGNMENT(arr, intent))) + sprintf(mess + strlen(mess), " -- input not %d-aligned", + F2PY_GET_ALIGNMENT(intent)); + PyErr_SetString(PyExc_ValueError, mess); + Py_DECREF(descr); + return NULL; + } + + /* here we have always intent(in) or intent(inplace) */ + + { + PyArrayObject * retarr = (PyArrayObject *) \ + PyArray_NewFromDescr(&PyArray_Type, descr, PyArray_NDIM(arr), PyArray_DIMS(arr), + NULL, NULL, !(intent & F2PY_INTENT_C), NULL); + if (retarr==NULL) { + Py_DECREF(descr); + return NULL; + } + F2PY_REPORT_ON_ARRAY_COPY_FROMARR; + if (PyArray_CopyInto(retarr, arr)) { + Py_DECREF(retarr); + return NULL; + } + if (intent & F2PY_INTENT_INPLACE) { + if (swap_arrays(arr,retarr)) { + Py_DECREF(retarr); + return NULL; /* XXX: set exception */ + } + Py_XDECREF(retarr); + if (intent & F2PY_INTENT_OUT) + Py_INCREF(arr); + } else { + arr = retarr; + } + } + return arr; + } + + if ((intent & F2PY_INTENT_INOUT) || (intent & F2PY_INTENT_INPLACE) || + (intent & F2PY_INTENT_CACHE)) { + PyErr_Format(PyExc_TypeError, + "failed to initialize intent(inout|inplace|cache) " + "array, input '%s' object is not an array", + Py_TYPE(obj)->tp_name); + Py_DECREF(descr); + return NULL; + } + + { + F2PY_REPORT_ON_ARRAY_COPY_FROMANY; + arr = (PyArrayObject *)PyArray_FromAny( + obj, descr, 0, 0, + ((intent & F2PY_INTENT_C) ? NPY_ARRAY_CARRAY + : NPY_ARRAY_FARRAY) | + NPY_ARRAY_FORCECAST, + NULL); + // Warning: in the case of NPY_STRING, PyArray_FromAny may + // reset descr->elsize, e.g. dtype('S0') becomes dtype('S1'). + if (arr == NULL) { + Py_DECREF(descr); + return NULL; + } + if (type_num != NPY_STRING && PyArray_ITEMSIZE(arr) != elsize) { + // This is internal sanity tests: elsize has been set to + // descr->elsize in the beginning of this function. + strcpy(mess, "failed to initialize intent(in) array"); + sprintf(mess + strlen(mess), + " -- expected elsize=%d got %" NPY_INTP_FMT, elsize, + (npy_intp)PyArray_ITEMSIZE(arr)); + PyErr_SetString(PyExc_ValueError, mess); + Py_DECREF(arr); + return NULL; + } + if (check_and_fix_dimensions(arr, rank, dims, errmess)) { + Py_DECREF(arr); + return NULL; + } + return arr; + } +} + +extern PyArrayObject * +array_from_pyobj(const int type_num, + npy_intp *dims, + const int rank, + const int intent, + PyObject *obj) { + /* + Same as ndarray_from_pyobj but with elsize determined from type, + if possible. Provided for backward compatibility. + */ + PyArray_Descr* descr = PyArray_DescrFromType(type_num); + int elsize = PyDataType_ELSIZE(descr); + Py_DECREF(descr); + return ndarray_from_pyobj(type_num, elsize, dims, rank, intent, obj, NULL); +} + +/*****************************************/ +/* Helper functions for array_from_pyobj */ +/*****************************************/ + +static int +check_and_fix_dimensions(const PyArrayObject* arr, const int rank, + npy_intp *dims, const char *errmess) +{ + /* + * This function fills in blanks (that are -1's) in dims list using + * the dimensions from arr. It also checks that non-blank dims will + * match with the corresponding values in arr dimensions. + * + * Returns 0 if the function is successful. + * + * If an error condition is detected, an exception is set and 1 is + * returned. + */ + char mess[F2PY_MESSAGE_BUFFER_SIZE]; + const npy_intp arr_size = + (PyArray_NDIM(arr)) ? PyArray_Size((PyObject *)arr) : 1; +#ifdef DEBUG_COPY_ND_ARRAY + dump_attrs(arr); + printf("check_and_fix_dimensions:init: dims="); + dump_dims(rank, dims); +#endif + if (rank > PyArray_NDIM(arr)) { /* [1,2] -> [[1],[2]]; 1 -> [[1]] */ + npy_intp new_size = 1; + int free_axe = -1; + int i; + npy_intp d; + /* Fill dims where -1 or 0; check dimensions; calc new_size; */ + for (i = 0; i < PyArray_NDIM(arr); ++i) { + d = PyArray_DIM(arr, i); + if (dims[i] >= 0) { + if (d > 1 && dims[i] != d) { + PyErr_Format( + PyExc_ValueError, + "%d-th dimension must be fixed to %" NPY_INTP_FMT + " but got %" NPY_INTP_FMT "\n", + i, dims[i], d); + return 1; + } + if (!dims[i]) + dims[i] = 1; + } + else { + dims[i] = d ? d : 1; + } + new_size *= dims[i]; + } + for (i = PyArray_NDIM(arr); i < rank; ++i) + if (dims[i] > 1) { + PyErr_Format(PyExc_ValueError, + "%d-th dimension must be %" NPY_INTP_FMT + " but got 0 (not defined).\n", + i, dims[i]); + return 1; + } + else if (free_axe < 0) + free_axe = i; + else + dims[i] = 1; + if (free_axe >= 0) { + dims[free_axe] = arr_size / new_size; + new_size *= dims[free_axe]; + } + if (new_size != arr_size) { + PyErr_Format(PyExc_ValueError, + "unexpected array size: new_size=%" NPY_INTP_FMT + ", got array with arr_size=%" NPY_INTP_FMT + " (maybe too many free indices)\n", + new_size, arr_size); + return 1; + } + } + else if (rank == PyArray_NDIM(arr)) { + npy_intp new_size = 1; + int i; + npy_intp d; + for (i = 0; i < rank; ++i) { + d = PyArray_DIM(arr, i); + if (dims[i] >= 0) { + if (d > 1 && d != dims[i]) { + if (errmess != NULL) { + strcpy(mess, errmess); + } + sprintf(mess + strlen(mess), + " -- %d-th dimension must be fixed to %" + NPY_INTP_FMT " but got %" NPY_INTP_FMT, + i, dims[i], d); + PyErr_SetString(PyExc_ValueError, mess); + return 1; + } + if (!dims[i]) + dims[i] = 1; + } + else + dims[i] = d; + new_size *= dims[i]; + } + if (new_size != arr_size) { + PyErr_Format(PyExc_ValueError, + "unexpected array size: new_size=%" NPY_INTP_FMT + ", got array with arr_size=%" NPY_INTP_FMT "\n", + new_size, arr_size); + return 1; + } + } + else { /* [[1,2]] -> [[1],[2]] */ + int i, j; + npy_intp d; + int effrank; + npy_intp size; + for (i = 0, effrank = 0; i < PyArray_NDIM(arr); ++i) + if (PyArray_DIM(arr, i) > 1) + ++effrank; + if (dims[rank - 1] >= 0) + if (effrank > rank) { + PyErr_Format(PyExc_ValueError, + "too many axes: %d (effrank=%d), " + "expected rank=%d\n", + PyArray_NDIM(arr), effrank, rank); + return 1; + } + + for (i = 0, j = 0; i < rank; ++i) { + while (j < PyArray_NDIM(arr) && PyArray_DIM(arr, j) < 2) ++j; + if (j >= PyArray_NDIM(arr)) + d = 1; + else + d = PyArray_DIM(arr, j++); + if (dims[i] >= 0) { + if (d > 1 && d != dims[i]) { + if (errmess != NULL) { + strcpy(mess, errmess); + } + sprintf(mess + strlen(mess), + " -- %d-th dimension must be fixed to %" + NPY_INTP_FMT " but got %" NPY_INTP_FMT + " (real index=%d)\n", + i, dims[i], d, j-1); + PyErr_SetString(PyExc_ValueError, mess); + return 1; + } + if (!dims[i]) + dims[i] = 1; + } + else + dims[i] = d; + } + + for (i = rank; i < PyArray_NDIM(arr); + ++i) { /* [[1,2],[3,4]] -> [1,2,3,4] */ + while (j < PyArray_NDIM(arr) && PyArray_DIM(arr, j) < 2) ++j; + if (j >= PyArray_NDIM(arr)) + d = 1; + else + d = PyArray_DIM(arr, j++); + dims[rank - 1] *= d; + } + for (i = 0, size = 1; i < rank; ++i) size *= dims[i]; + if (size != arr_size) { + char msg[200]; + int len; + snprintf(msg, sizeof(msg), + "unexpected array size: size=%" NPY_INTP_FMT + ", arr_size=%" NPY_INTP_FMT + ", rank=%d, effrank=%d, arr.nd=%d, dims=[", + size, arr_size, rank, effrank, PyArray_NDIM(arr)); + for (i = 0; i < rank; ++i) { + len = strlen(msg); + snprintf(msg + len, sizeof(msg) - len, " %" NPY_INTP_FMT, + dims[i]); + } + len = strlen(msg); + snprintf(msg + len, sizeof(msg) - len, " ], arr.dims=["); + for (i = 0; i < PyArray_NDIM(arr); ++i) { + len = strlen(msg); + snprintf(msg + len, sizeof(msg) - len, " %" NPY_INTP_FMT, + PyArray_DIM(arr, i)); + } + len = strlen(msg); + snprintf(msg + len, sizeof(msg) - len, " ]\n"); + PyErr_SetString(PyExc_ValueError, msg); + return 1; + } + } +#ifdef DEBUG_COPY_ND_ARRAY + printf("check_and_fix_dimensions:end: dims="); + dump_dims(rank, dims); +#endif + return 0; +} + +/* End of file: array_from_pyobj.c */ + +/************************* copy_ND_array *******************************/ + +extern int +copy_ND_array(const PyArrayObject *arr, PyArrayObject *out) +{ + F2PY_REPORT_ON_ARRAY_COPY_FROMARR; + return PyArray_CopyInto(out, (PyArrayObject *)arr); +} + +/********************* Various utility functions ***********************/ + +extern int +f2py_describe(PyObject *obj, char *buf) { + /* + Write the description of a Python object to buf. The caller must + provide buffer with size sufficient to write the description. + + Return 1 on success. + */ + char localbuf[F2PY_MESSAGE_BUFFER_SIZE]; + if (PyBytes_Check(obj)) { + sprintf(localbuf, "%d-%s", (npy_int)PyBytes_GET_SIZE(obj), Py_TYPE(obj)->tp_name); + } else if (PyUnicode_Check(obj)) { + sprintf(localbuf, "%d-%s", (npy_int)PyUnicode_GET_LENGTH(obj), Py_TYPE(obj)->tp_name); + } else if (PyArray_CheckScalar(obj)) { + PyArrayObject* arr = (PyArrayObject*)obj; + sprintf(localbuf, "%c%" NPY_INTP_FMT "-%s-scalar", PyArray_DESCR(arr)->kind, PyArray_ITEMSIZE(arr), Py_TYPE(obj)->tp_name); + } else if (PyArray_Check(obj)) { + int i; + PyArrayObject* arr = (PyArrayObject*)obj; + strcpy(localbuf, "("); + for (i=0; ikind, PyArray_ITEMSIZE(arr), Py_TYPE(obj)->tp_name); + } else if (PySequence_Check(obj)) { + sprintf(localbuf, "%d-%s", (npy_int)PySequence_Length(obj), Py_TYPE(obj)->tp_name); + } else { + sprintf(localbuf, "%s instance", Py_TYPE(obj)->tp_name); + } + // TODO: detect the size of buf and make sure that size(buf) >= size(localbuf). + strcpy(buf, localbuf); + return 1; +} + +extern npy_intp +f2py_size_impl(PyArrayObject* var, ...) +{ + npy_intp sz = 0; + npy_intp dim; + npy_intp rank; + va_list argp; + va_start(argp, var); + dim = va_arg(argp, npy_int); + if (dim==-1) + { + sz = PyArray_SIZE(var); + } + else + { + rank = PyArray_NDIM(var); + if (dim>=1 && dim<=rank) + sz = PyArray_DIM(var, dim-1); + else + fprintf(stderr, "f2py_size: 2nd argument value=%" NPY_INTP_FMT + " fails to satisfy 1<=value<=%" NPY_INTP_FMT + ". Result will be 0.\n", dim, rank); + } + va_end(argp); + return sz; +} + +/*********************************************/ +/* Compatibility functions for Python >= 3.0 */ +/*********************************************/ + +PyObject * +F2PyCapsule_FromVoidPtr(void *ptr, void (*dtor)(PyObject *)) +{ + PyObject *ret = PyCapsule_New(ptr, NULL, dtor); + if (ret == NULL) { + PyErr_Clear(); + } + return ret; +} + +void * +F2PyCapsule_AsVoidPtr(PyObject *obj) +{ + void *ret = PyCapsule_GetPointer(obj, NULL); + if (ret == NULL) { + PyErr_Clear(); + } + return ret; +} + +int +F2PyCapsule_Check(PyObject *ptr) +{ + return PyCapsule_CheckExact(ptr); +} + +#ifdef __cplusplus +} +#endif +/************************* EOF fortranobject.c *******************************/ diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/src/fortranobject.h b/.venv/lib/python3.12/site-packages/numpy/f2py/src/fortranobject.h new file mode 100644 index 0000000..4aed2f6 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/src/fortranobject.h @@ -0,0 +1,173 @@ +#ifndef Py_FORTRANOBJECT_H +#define Py_FORTRANOBJECT_H +#ifdef __cplusplus +extern "C" { +#endif + +#include + +#ifndef NPY_NO_DEPRECATED_API +#define NPY_NO_DEPRECATED_API NPY_API_VERSION +#endif +#ifdef FORTRANOBJECT_C +#define NO_IMPORT_ARRAY +#endif +#define PY_ARRAY_UNIQUE_SYMBOL _npy_f2py_ARRAY_API +#include "numpy/arrayobject.h" +#include "numpy/npy_3kcompat.h" + +#ifdef F2PY_REPORT_ATEXIT +#include +// clang-format off +extern void f2py_start_clock(void); +extern void f2py_stop_clock(void); +extern void f2py_start_call_clock(void); +extern void f2py_stop_call_clock(void); +extern void f2py_cb_start_clock(void); +extern void f2py_cb_stop_clock(void); +extern void f2py_cb_start_call_clock(void); +extern void f2py_cb_stop_call_clock(void); +extern void f2py_report_on_exit(int, void *); +// clang-format on +#endif + +#ifdef DMALLOC +#include "dmalloc.h" +#endif + +/* Fortran object interface */ + +/* +123456789-123456789-123456789-123456789-123456789-123456789-123456789-12 + +PyFortranObject represents various Fortran objects: +Fortran (module) routines, COMMON blocks, module data. + +Author: Pearu Peterson +*/ + +#define F2PY_MAX_DIMS 40 +#define F2PY_MESSAGE_BUFFER_SIZE 300 // Increase on "stack smashing detected" + +typedef void (*f2py_set_data_func)(char *, npy_intp *); +typedef void (*f2py_void_func)(void); +typedef void (*f2py_init_func)(int *, npy_intp *, f2py_set_data_func, int *); + +/*typedef void* (*f2py_c_func)(void*,...);*/ + +typedef void *(*f2pycfunc)(void); + +typedef struct { + char *name; /* attribute (array||routine) name */ + int rank; /* array rank, 0 for scalar, max is F2PY_MAX_DIMS, + || rank=-1 for Fortran routine */ + struct { + npy_intp d[F2PY_MAX_DIMS]; + } dims; /* dimensions of the array, || not used */ + int type; /* PyArray_ || not used */ + int elsize; /* Element size || not used */ + char *data; /* pointer to array || Fortran routine */ + f2py_init_func func; /* initialization function for + allocatable arrays: + func(&rank,dims,set_ptr_func,name,len(name)) + || C/API wrapper for Fortran routine */ + char *doc; /* documentation string; only recommended + for routines. */ +} FortranDataDef; + +typedef struct { + PyObject_HEAD + int len; /* Number of attributes */ + FortranDataDef *defs; /* An array of FortranDataDef's */ + PyObject *dict; /* Fortran object attribute dictionary */ +} PyFortranObject; + +#define PyFortran_Check(op) (Py_TYPE(op) == &PyFortran_Type) +#define PyFortran_Check1(op) (0 == strcmp(Py_TYPE(op)->tp_name, "fortran")) + +extern PyTypeObject PyFortran_Type; +extern int +F2PyDict_SetItemString(PyObject *dict, char *name, PyObject *obj); +extern PyObject * +PyFortranObject_New(FortranDataDef *defs, f2py_void_func init); +extern PyObject * +PyFortranObject_NewAsAttr(FortranDataDef *defs); + +PyObject * +F2PyCapsule_FromVoidPtr(void *ptr, void (*dtor)(PyObject *)); +void * +F2PyCapsule_AsVoidPtr(PyObject *obj); +int +F2PyCapsule_Check(PyObject *ptr); + +extern void * +F2PySwapThreadLocalCallbackPtr(char *key, void *ptr); +extern void * +F2PyGetThreadLocalCallbackPtr(char *key); + +#define ISCONTIGUOUS(m) (PyArray_FLAGS(m) & NPY_ARRAY_C_CONTIGUOUS) +#define F2PY_INTENT_IN 1 +#define F2PY_INTENT_INOUT 2 +#define F2PY_INTENT_OUT 4 +#define F2PY_INTENT_HIDE 8 +#define F2PY_INTENT_CACHE 16 +#define F2PY_INTENT_COPY 32 +#define F2PY_INTENT_C 64 +#define F2PY_OPTIONAL 128 +#define F2PY_INTENT_INPLACE 256 +#define F2PY_INTENT_ALIGNED4 512 +#define F2PY_INTENT_ALIGNED8 1024 +#define F2PY_INTENT_ALIGNED16 2048 + +#define ARRAY_ISALIGNED(ARR, SIZE) ((size_t)(PyArray_DATA(ARR)) % (SIZE) == 0) +#define F2PY_ALIGN4(intent) (intent & F2PY_INTENT_ALIGNED4) +#define F2PY_ALIGN8(intent) (intent & F2PY_INTENT_ALIGNED8) +#define F2PY_ALIGN16(intent) (intent & F2PY_INTENT_ALIGNED16) + +#define F2PY_GET_ALIGNMENT(intent) \ + (F2PY_ALIGN4(intent) \ + ? 4 \ + : (F2PY_ALIGN8(intent) ? 8 : (F2PY_ALIGN16(intent) ? 16 : 1))) +#define F2PY_CHECK_ALIGNMENT(arr, intent) \ + ARRAY_ISALIGNED(arr, F2PY_GET_ALIGNMENT(intent)) +#define F2PY_ARRAY_IS_CHARACTER_COMPATIBLE(arr) ((PyArray_DESCR(arr)->type_num == NPY_STRING && PyArray_ITEMSIZE(arr) >= 1) \ + || PyArray_DESCR(arr)->type_num == NPY_UINT8) +#define F2PY_IS_UNICODE_ARRAY(arr) (PyArray_DESCR(arr)->type_num == NPY_UNICODE) + +extern PyArrayObject * +ndarray_from_pyobj(const int type_num, const int elsize_, npy_intp *dims, + const int rank, const int intent, PyObject *obj, + const char *errmess); + +extern PyArrayObject * +array_from_pyobj(const int type_num, npy_intp *dims, const int rank, + const int intent, PyObject *obj); +extern int +copy_ND_array(const PyArrayObject *in, PyArrayObject *out); + +#ifdef DEBUG_COPY_ND_ARRAY +extern void +dump_attrs(const PyArrayObject *arr); +#endif + + extern int f2py_describe(PyObject *obj, char *buf); + + /* Utility CPP macros and functions that can be used in signature file + expressions. See signature-file.rst for documentation. + */ + +#define f2py_itemsize(var) (PyArray_ITEMSIZE(capi_ ## var ## _as_array)) +#define f2py_size(var, ...) f2py_size_impl((PyArrayObject *)(capi_ ## var ## _as_array), ## __VA_ARGS__, -1) +#define f2py_rank(var) var ## _Rank +#define f2py_shape(var,dim) var ## _Dims[dim] +#define f2py_len(var) f2py_shape(var,0) +#define f2py_fshape(var,dim) f2py_shape(var,rank(var)-dim-1) +#define f2py_flen(var) f2py_fshape(var,0) +#define f2py_slen(var) capi_ ## var ## _len + + extern npy_intp f2py_size_impl(PyArrayObject* var, ...); + +#ifdef __cplusplus +} +#endif +#endif /* !Py_FORTRANOBJECT_H */ diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/symbolic.py b/.venv/lib/python3.12/site-packages/numpy/f2py/symbolic.py new file mode 100644 index 0000000..1164517 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/symbolic.py @@ -0,0 +1,1516 @@ +"""Fortran/C symbolic expressions + +References: +- J3/21-007: Draft Fortran 202x. https://j3-fortran.org/doc/year/21/21-007.pdf + +Copyright 1999 -- 2011 Pearu Peterson all rights reserved. +Copyright 2011 -- present NumPy Developers. +Permission to use, modify, and distribute this software is given under the +terms of the NumPy License. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +""" + +# To analyze Fortran expressions to solve dimensions specifications, +# for instances, we implement a minimal symbolic engine for parsing +# expressions into a tree of expression instances. As a first +# instance, we care only about arithmetic expressions involving +# integers and operations like addition (+), subtraction (-), +# multiplication (*), division (Fortran / is Python //, Fortran // is +# concatenate), and exponentiation (**). In addition, .pyf files may +# contain C expressions that support here is implemented as well. +# +# TODO: support logical constants (Op.BOOLEAN) +# TODO: support logical operators (.AND., ...) +# TODO: support defined operators (.MYOP., ...) +# +__all__ = ['Expr'] + + +import re +import warnings +from enum import Enum +from math import gcd + + +class Language(Enum): + """ + Used as Expr.tostring language argument. + """ + Python = 0 + Fortran = 1 + C = 2 + + +class Op(Enum): + """ + Used as Expr op attribute. + """ + INTEGER = 10 + REAL = 12 + COMPLEX = 15 + STRING = 20 + ARRAY = 30 + SYMBOL = 40 + TERNARY = 100 + APPLY = 200 + INDEXING = 210 + CONCAT = 220 + RELATIONAL = 300 + TERMS = 1000 + FACTORS = 2000 + REF = 3000 + DEREF = 3001 + + +class RelOp(Enum): + """ + Used in Op.RELATIONAL expression to specify the function part. + """ + EQ = 1 + NE = 2 + LT = 3 + LE = 4 + GT = 5 + GE = 6 + + @classmethod + def fromstring(cls, s, language=Language.C): + if language is Language.Fortran: + return {'.eq.': RelOp.EQ, '.ne.': RelOp.NE, + '.lt.': RelOp.LT, '.le.': RelOp.LE, + '.gt.': RelOp.GT, '.ge.': RelOp.GE}[s.lower()] + return {'==': RelOp.EQ, '!=': RelOp.NE, '<': RelOp.LT, + '<=': RelOp.LE, '>': RelOp.GT, '>=': RelOp.GE}[s] + + def tostring(self, language=Language.C): + if language is Language.Fortran: + return {RelOp.EQ: '.eq.', RelOp.NE: '.ne.', + RelOp.LT: '.lt.', RelOp.LE: '.le.', + RelOp.GT: '.gt.', RelOp.GE: '.ge.'}[self] + return {RelOp.EQ: '==', RelOp.NE: '!=', + RelOp.LT: '<', RelOp.LE: '<=', + RelOp.GT: '>', RelOp.GE: '>='}[self] + + +class ArithOp(Enum): + """ + Used in Op.APPLY expression to specify the function part. + """ + POS = 1 + NEG = 2 + ADD = 3 + SUB = 4 + MUL = 5 + DIV = 6 + POW = 7 + + +class OpError(Exception): + pass + + +class Precedence(Enum): + """ + Used as Expr.tostring precedence argument. + """ + ATOM = 0 + POWER = 1 + UNARY = 2 + PRODUCT = 3 + SUM = 4 + LT = 6 + EQ = 7 + LAND = 11 + LOR = 12 + TERNARY = 13 + ASSIGN = 14 + TUPLE = 15 + NONE = 100 + + +integer_types = (int,) +number_types = (int, float) + + +def _pairs_add(d, k, v): + # Internal utility method for updating terms and factors data. + c = d.get(k) + if c is None: + d[k] = v + else: + c = c + v + if c: + d[k] = c + else: + del d[k] + + +class ExprWarning(UserWarning): + pass + + +def ewarn(message): + warnings.warn(message, ExprWarning, stacklevel=2) + + +class Expr: + """Represents a Fortran expression as a op-data pair. + + Expr instances are hashable and sortable. + """ + + @staticmethod + def parse(s, language=Language.C): + """Parse a Fortran expression to a Expr. + """ + return fromstring(s, language=language) + + def __init__(self, op, data): + assert isinstance(op, Op) + + # sanity checks + if op is Op.INTEGER: + # data is a 2-tuple of numeric object and a kind value + # (default is 4) + assert isinstance(data, tuple) and len(data) == 2 + assert isinstance(data[0], int) + assert isinstance(data[1], (int, str)), data + elif op is Op.REAL: + # data is a 2-tuple of numeric object and a kind value + # (default is 4) + assert isinstance(data, tuple) and len(data) == 2 + assert isinstance(data[0], float) + assert isinstance(data[1], (int, str)), data + elif op is Op.COMPLEX: + # data is a 2-tuple of constant expressions + assert isinstance(data, tuple) and len(data) == 2 + elif op is Op.STRING: + # data is a 2-tuple of quoted string and a kind value + # (default is 1) + assert isinstance(data, tuple) and len(data) == 2 + assert (isinstance(data[0], str) + and data[0][::len(data[0]) - 1] in ('""', "''", '@@')) + assert isinstance(data[1], (int, str)), data + elif op is Op.SYMBOL: + # data is any hashable object + assert hash(data) is not None + elif op in (Op.ARRAY, Op.CONCAT): + # data is a tuple of expressions + assert isinstance(data, tuple) + assert all(isinstance(item, Expr) for item in data), data + elif op in (Op.TERMS, Op.FACTORS): + # data is {:} where dict values + # are nonzero Python integers + assert isinstance(data, dict) + elif op is Op.APPLY: + # data is (, , ) where + # operands are Expr instances + assert isinstance(data, tuple) and len(data) == 3 + # function is any hashable object + assert hash(data[0]) is not None + assert isinstance(data[1], tuple) + assert isinstance(data[2], dict) + elif op is Op.INDEXING: + # data is (, ) + assert isinstance(data, tuple) and len(data) == 2 + # function is any hashable object + assert hash(data[0]) is not None + elif op is Op.TERNARY: + # data is (, , ) + assert isinstance(data, tuple) and len(data) == 3 + elif op in (Op.REF, Op.DEREF): + # data is Expr instance + assert isinstance(data, Expr) + elif op is Op.RELATIONAL: + # data is (, , ) + assert isinstance(data, tuple) and len(data) == 3 + else: + raise NotImplementedError( + f'unknown op or missing sanity check: {op}') + + self.op = op + self.data = data + + def __eq__(self, other): + return (isinstance(other, Expr) + and self.op is other.op + and self.data == other.data) + + def __hash__(self): + if self.op in (Op.TERMS, Op.FACTORS): + data = tuple(sorted(self.data.items())) + elif self.op is Op.APPLY: + data = self.data[:2] + tuple(sorted(self.data[2].items())) + else: + data = self.data + return hash((self.op, data)) + + def __lt__(self, other): + if isinstance(other, Expr): + if self.op is not other.op: + return self.op.value < other.op.value + if self.op in (Op.TERMS, Op.FACTORS): + return (tuple(sorted(self.data.items())) + < tuple(sorted(other.data.items()))) + if self.op is Op.APPLY: + if self.data[:2] != other.data[:2]: + return self.data[:2] < other.data[:2] + return tuple(sorted(self.data[2].items())) < tuple( + sorted(other.data[2].items())) + return self.data < other.data + return NotImplemented + + def __le__(self, other): return self == other or self < other + + def __gt__(self, other): return not (self <= other) + + def __ge__(self, other): return not (self < other) + + def __repr__(self): + return f'{type(self).__name__}({self.op}, {self.data!r})' + + def __str__(self): + return self.tostring() + + def tostring(self, parent_precedence=Precedence.NONE, + language=Language.Fortran): + """Return a string representation of Expr. + """ + if self.op in (Op.INTEGER, Op.REAL): + precedence = (Precedence.SUM if self.data[0] < 0 + else Precedence.ATOM) + r = str(self.data[0]) + (f'_{self.data[1]}' + if self.data[1] != 4 else '') + elif self.op is Op.COMPLEX: + r = ', '.join(item.tostring(Precedence.TUPLE, language=language) + for item in self.data) + r = '(' + r + ')' + precedence = Precedence.ATOM + elif self.op is Op.SYMBOL: + precedence = Precedence.ATOM + r = str(self.data) + elif self.op is Op.STRING: + r = self.data[0] + if self.data[1] != 1: + r = self.data[1] + '_' + r + precedence = Precedence.ATOM + elif self.op is Op.ARRAY: + r = ', '.join(item.tostring(Precedence.TUPLE, language=language) + for item in self.data) + r = '[' + r + ']' + precedence = Precedence.ATOM + elif self.op is Op.TERMS: + terms = [] + for term, coeff in sorted(self.data.items()): + if coeff < 0: + op = ' - ' + coeff = -coeff + else: + op = ' + ' + if coeff == 1: + term = term.tostring(Precedence.SUM, language=language) + elif term == as_number(1): + term = str(coeff) + else: + term = f'{coeff} * ' + term.tostring( + Precedence.PRODUCT, language=language) + if terms: + terms.append(op) + elif op == ' - ': + terms.append('-') + terms.append(term) + r = ''.join(terms) or '0' + precedence = Precedence.SUM if terms else Precedence.ATOM + elif self.op is Op.FACTORS: + factors = [] + tail = [] + for base, exp in sorted(self.data.items()): + op = ' * ' + if exp == 1: + factor = base.tostring(Precedence.PRODUCT, + language=language) + elif language is Language.C: + if exp in range(2, 10): + factor = base.tostring(Precedence.PRODUCT, + language=language) + factor = ' * '.join([factor] * exp) + elif exp in range(-10, 0): + factor = base.tostring(Precedence.PRODUCT, + language=language) + tail += [factor] * -exp + continue + else: + factor = base.tostring(Precedence.TUPLE, + language=language) + factor = f'pow({factor}, {exp})' + else: + factor = base.tostring(Precedence.POWER, + language=language) + f' ** {exp}' + if factors: + factors.append(op) + factors.append(factor) + if tail: + if not factors: + factors += ['1'] + factors += ['/', '(', ' * '.join(tail), ')'] + r = ''.join(factors) or '1' + precedence = Precedence.PRODUCT if factors else Precedence.ATOM + elif self.op is Op.APPLY: + name, args, kwargs = self.data + if name is ArithOp.DIV and language is Language.C: + numer, denom = [arg.tostring(Precedence.PRODUCT, + language=language) + for arg in args] + r = f'{numer} / {denom}' + precedence = Precedence.PRODUCT + else: + args = [arg.tostring(Precedence.TUPLE, language=language) + for arg in args] + args += [k + '=' + v.tostring(Precedence.NONE) + for k, v in kwargs.items()] + r = f'{name}({", ".join(args)})' + precedence = Precedence.ATOM + elif self.op is Op.INDEXING: + name = self.data[0] + args = [arg.tostring(Precedence.TUPLE, language=language) + for arg in self.data[1:]] + r = f'{name}[{", ".join(args)}]' + precedence = Precedence.ATOM + elif self.op is Op.CONCAT: + args = [arg.tostring(Precedence.PRODUCT, language=language) + for arg in self.data] + r = " // ".join(args) + precedence = Precedence.PRODUCT + elif self.op is Op.TERNARY: + cond, expr1, expr2 = [a.tostring(Precedence.TUPLE, + language=language) + for a in self.data] + if language is Language.C: + r = f'({cond}?{expr1}:{expr2})' + elif language is Language.Python: + r = f'({expr1} if {cond} else {expr2})' + elif language is Language.Fortran: + r = f'merge({expr1}, {expr2}, {cond})' + else: + raise NotImplementedError( + f'tostring for {self.op} and {language}') + precedence = Precedence.ATOM + elif self.op is Op.REF: + r = '&' + self.data.tostring(Precedence.UNARY, language=language) + precedence = Precedence.UNARY + elif self.op is Op.DEREF: + r = '*' + self.data.tostring(Precedence.UNARY, language=language) + precedence = Precedence.UNARY + elif self.op is Op.RELATIONAL: + rop, left, right = self.data + precedence = (Precedence.EQ if rop in (RelOp.EQ, RelOp.NE) + else Precedence.LT) + left = left.tostring(precedence, language=language) + right = right.tostring(precedence, language=language) + rop = rop.tostring(language=language) + r = f'{left} {rop} {right}' + else: + raise NotImplementedError(f'tostring for op {self.op}') + if parent_precedence.value < precedence.value: + # If parent precedence is higher than operand precedence, + # operand will be enclosed in parenthesis. + return '(' + r + ')' + return r + + def __pos__(self): + return self + + def __neg__(self): + return self * -1 + + def __add__(self, other): + other = as_expr(other) + if isinstance(other, Expr): + if self.op is other.op: + if self.op in (Op.INTEGER, Op.REAL): + return as_number( + self.data[0] + other.data[0], + max(self.data[1], other.data[1])) + if self.op is Op.COMPLEX: + r1, i1 = self.data + r2, i2 = other.data + return as_complex(r1 + r2, i1 + i2) + if self.op is Op.TERMS: + r = Expr(self.op, dict(self.data)) + for k, v in other.data.items(): + _pairs_add(r.data, k, v) + return normalize(r) + if self.op is Op.COMPLEX and other.op in (Op.INTEGER, Op.REAL): + return self + as_complex(other) + elif self.op in (Op.INTEGER, Op.REAL) and other.op is Op.COMPLEX: + return as_complex(self) + other + elif self.op is Op.REAL and other.op is Op.INTEGER: + return self + as_real(other, kind=self.data[1]) + elif self.op is Op.INTEGER and other.op is Op.REAL: + return as_real(self, kind=other.data[1]) + other + return as_terms(self) + as_terms(other) + return NotImplemented + + def __radd__(self, other): + if isinstance(other, number_types): + return as_number(other) + self + return NotImplemented + + def __sub__(self, other): + return self + (-other) + + def __rsub__(self, other): + if isinstance(other, number_types): + return as_number(other) - self + return NotImplemented + + def __mul__(self, other): + other = as_expr(other) + if isinstance(other, Expr): + if self.op is other.op: + if self.op in (Op.INTEGER, Op.REAL): + return as_number(self.data[0] * other.data[0], + max(self.data[1], other.data[1])) + elif self.op is Op.COMPLEX: + r1, i1 = self.data + r2, i2 = other.data + return as_complex(r1 * r2 - i1 * i2, r1 * i2 + r2 * i1) + + if self.op is Op.FACTORS: + r = Expr(self.op, dict(self.data)) + for k, v in other.data.items(): + _pairs_add(r.data, k, v) + return normalize(r) + elif self.op is Op.TERMS: + r = Expr(self.op, {}) + for t1, c1 in self.data.items(): + for t2, c2 in other.data.items(): + _pairs_add(r.data, t1 * t2, c1 * c2) + return normalize(r) + + if self.op is Op.COMPLEX and other.op in (Op.INTEGER, Op.REAL): + return self * as_complex(other) + elif other.op is Op.COMPLEX and self.op in (Op.INTEGER, Op.REAL): + return as_complex(self) * other + elif self.op is Op.REAL and other.op is Op.INTEGER: + return self * as_real(other, kind=self.data[1]) + elif self.op is Op.INTEGER and other.op is Op.REAL: + return as_real(self, kind=other.data[1]) * other + + if self.op is Op.TERMS: + return self * as_terms(other) + elif other.op is Op.TERMS: + return as_terms(self) * other + + return as_factors(self) * as_factors(other) + return NotImplemented + + def __rmul__(self, other): + if isinstance(other, number_types): + return as_number(other) * self + return NotImplemented + + def __pow__(self, other): + other = as_expr(other) + if isinstance(other, Expr): + if other.op is Op.INTEGER: + exponent = other.data[0] + # TODO: other kind not used + if exponent == 0: + return as_number(1) + if exponent == 1: + return self + if exponent > 0: + if self.op is Op.FACTORS: + r = Expr(self.op, {}) + for k, v in self.data.items(): + r.data[k] = v * exponent + return normalize(r) + return self * (self ** (exponent - 1)) + elif exponent != -1: + return (self ** (-exponent)) ** -1 + return Expr(Op.FACTORS, {self: exponent}) + return as_apply(ArithOp.POW, self, other) + return NotImplemented + + def __truediv__(self, other): + other = as_expr(other) + if isinstance(other, Expr): + # Fortran / is different from Python /: + # - `/` is a truncate operation for integer operands + return normalize(as_apply(ArithOp.DIV, self, other)) + return NotImplemented + + def __rtruediv__(self, other): + other = as_expr(other) + if isinstance(other, Expr): + return other / self + return NotImplemented + + def __floordiv__(self, other): + other = as_expr(other) + if isinstance(other, Expr): + # Fortran // is different from Python //: + # - `//` is a concatenate operation for string operands + return normalize(Expr(Op.CONCAT, (self, other))) + return NotImplemented + + def __rfloordiv__(self, other): + other = as_expr(other) + if isinstance(other, Expr): + return other // self + return NotImplemented + + def __call__(self, *args, **kwargs): + # In Fortran, parenthesis () are use for both function call as + # well as indexing operations. + # + # TODO: implement a method for deciding when __call__ should + # return an INDEXING expression. + return as_apply(self, *map(as_expr, args), + **{k: as_expr(v) for k, v in kwargs.items()}) + + def __getitem__(self, index): + # Provided to support C indexing operations that .pyf files + # may contain. + index = as_expr(index) + if not isinstance(index, tuple): + index = index, + if len(index) > 1: + ewarn(f'C-index should be a single expression but got `{index}`') + return Expr(Op.INDEXING, (self,) + index) + + def substitute(self, symbols_map): + """Recursively substitute symbols with values in symbols map. + + Symbols map is a dictionary of symbol-expression pairs. + """ + if self.op is Op.SYMBOL: + value = symbols_map.get(self) + if value is None: + return self + m = re.match(r'\A(@__f2py_PARENTHESIS_(\w+)_\d+@)\Z', self.data) + if m: + # complement to fromstring method + items, paren = m.groups() + if paren in ['ROUNDDIV', 'SQUARE']: + return as_array(value) + assert paren == 'ROUND', (paren, value) + return value + if self.op in (Op.INTEGER, Op.REAL, Op.STRING): + return self + if self.op in (Op.ARRAY, Op.COMPLEX): + return Expr(self.op, tuple(item.substitute(symbols_map) + for item in self.data)) + if self.op is Op.CONCAT: + return normalize(Expr(self.op, tuple(item.substitute(symbols_map) + for item in self.data))) + if self.op is Op.TERMS: + r = None + for term, coeff in self.data.items(): + if r is None: + r = term.substitute(symbols_map) * coeff + else: + r += term.substitute(symbols_map) * coeff + if r is None: + ewarn('substitute: empty TERMS expression interpreted as' + ' int-literal 0') + return as_number(0) + return r + if self.op is Op.FACTORS: + r = None + for base, exponent in self.data.items(): + if r is None: + r = base.substitute(symbols_map) ** exponent + else: + r *= base.substitute(symbols_map) ** exponent + if r is None: + ewarn('substitute: empty FACTORS expression interpreted' + ' as int-literal 1') + return as_number(1) + return r + if self.op is Op.APPLY: + target, args, kwargs = self.data + if isinstance(target, Expr): + target = target.substitute(symbols_map) + args = tuple(a.substitute(symbols_map) for a in args) + kwargs = {k: v.substitute(symbols_map) + for k, v in kwargs.items()} + return normalize(Expr(self.op, (target, args, kwargs))) + if self.op is Op.INDEXING: + func = self.data[0] + if isinstance(func, Expr): + func = func.substitute(symbols_map) + args = tuple(a.substitute(symbols_map) for a in self.data[1:]) + return normalize(Expr(self.op, (func,) + args)) + if self.op is Op.TERNARY: + operands = tuple(a.substitute(symbols_map) for a in self.data) + return normalize(Expr(self.op, operands)) + if self.op in (Op.REF, Op.DEREF): + return normalize(Expr(self.op, self.data.substitute(symbols_map))) + if self.op is Op.RELATIONAL: + rop, left, right = self.data + left = left.substitute(symbols_map) + right = right.substitute(symbols_map) + return normalize(Expr(self.op, (rop, left, right))) + raise NotImplementedError(f'substitute method for {self.op}: {self!r}') + + def traverse(self, visit, *args, **kwargs): + """Traverse expression tree with visit function. + + The visit function is applied to an expression with given args + and kwargs. + + Traverse call returns an expression returned by visit when not + None, otherwise return a new normalized expression with + traverse-visit sub-expressions. + """ + result = visit(self, *args, **kwargs) + if result is not None: + return result + + if self.op in (Op.INTEGER, Op.REAL, Op.STRING, Op.SYMBOL): + return self + elif self.op in (Op.COMPLEX, Op.ARRAY, Op.CONCAT, Op.TERNARY): + return normalize(Expr(self.op, tuple( + item.traverse(visit, *args, **kwargs) + for item in self.data))) + elif self.op in (Op.TERMS, Op.FACTORS): + data = {} + for k, v in self.data.items(): + k = k.traverse(visit, *args, **kwargs) + v = (v.traverse(visit, *args, **kwargs) + if isinstance(v, Expr) else v) + if k in data: + v = data[k] + v + data[k] = v + return normalize(Expr(self.op, data)) + elif self.op is Op.APPLY: + obj = self.data[0] + func = (obj.traverse(visit, *args, **kwargs) + if isinstance(obj, Expr) else obj) + operands = tuple(operand.traverse(visit, *args, **kwargs) + for operand in self.data[1]) + kwoperands = {k: v.traverse(visit, *args, **kwargs) + for k, v in self.data[2].items()} + return normalize(Expr(self.op, (func, operands, kwoperands))) + elif self.op is Op.INDEXING: + obj = self.data[0] + obj = (obj.traverse(visit, *args, **kwargs) + if isinstance(obj, Expr) else obj) + indices = tuple(index.traverse(visit, *args, **kwargs) + for index in self.data[1:]) + return normalize(Expr(self.op, (obj,) + indices)) + elif self.op in (Op.REF, Op.DEREF): + return normalize(Expr(self.op, + self.data.traverse(visit, *args, **kwargs))) + elif self.op is Op.RELATIONAL: + rop, left, right = self.data + left = left.traverse(visit, *args, **kwargs) + right = right.traverse(visit, *args, **kwargs) + return normalize(Expr(self.op, (rop, left, right))) + raise NotImplementedError(f'traverse method for {self.op}') + + def contains(self, other): + """Check if self contains other. + """ + found = [] + + def visit(expr, found=found): + if found: + return expr + elif expr == other: + found.append(1) + return expr + + self.traverse(visit) + + return len(found) != 0 + + def symbols(self): + """Return a set of symbols contained in self. + """ + found = set() + + def visit(expr, found=found): + if expr.op is Op.SYMBOL: + found.add(expr) + + self.traverse(visit) + + return found + + def polynomial_atoms(self): + """Return a set of expressions used as atoms in polynomial self. + """ + found = set() + + def visit(expr, found=found): + if expr.op is Op.FACTORS: + for b in expr.data: + b.traverse(visit) + return expr + if expr.op in (Op.TERMS, Op.COMPLEX): + return + if expr.op is Op.APPLY and isinstance(expr.data[0], ArithOp): + if expr.data[0] is ArithOp.POW: + expr.data[1][0].traverse(visit) + return expr + return + if expr.op in (Op.INTEGER, Op.REAL): + return expr + + found.add(expr) + + if expr.op in (Op.INDEXING, Op.APPLY): + return expr + + self.traverse(visit) + + return found + + def linear_solve(self, symbol): + """Return a, b such that a * symbol + b == self. + + If self is not linear with respect to symbol, raise RuntimeError. + """ + b = self.substitute({symbol: as_number(0)}) + ax = self - b + a = ax.substitute({symbol: as_number(1)}) + + zero, _ = as_numer_denom(a * symbol - ax) + + if zero != as_number(0): + raise RuntimeError(f'not a {symbol}-linear equation:' + f' {a} * {symbol} + {b} == {self}') + return a, b + + +def normalize(obj): + """Normalize Expr and apply basic evaluation methods. + """ + if not isinstance(obj, Expr): + return obj + + if obj.op is Op.TERMS: + d = {} + for t, c in obj.data.items(): + if c == 0: + continue + if t.op is Op.COMPLEX and c != 1: + t = t * c + c = 1 + if t.op is Op.TERMS: + for t1, c1 in t.data.items(): + _pairs_add(d, t1, c1 * c) + else: + _pairs_add(d, t, c) + if len(d) == 0: + # TODO: determine correct kind + return as_number(0) + elif len(d) == 1: + (t, c), = d.items() + if c == 1: + return t + return Expr(Op.TERMS, d) + + if obj.op is Op.FACTORS: + coeff = 1 + d = {} + for b, e in obj.data.items(): + if e == 0: + continue + if b.op is Op.TERMS and isinstance(e, integer_types) and e > 1: + # expand integer powers of sums + b = b * (b ** (e - 1)) + e = 1 + + if b.op in (Op.INTEGER, Op.REAL): + if e == 1: + coeff *= b.data[0] + elif e > 0: + coeff *= b.data[0] ** e + else: + _pairs_add(d, b, e) + elif b.op is Op.FACTORS: + if e > 0 and isinstance(e, integer_types): + for b1, e1 in b.data.items(): + _pairs_add(d, b1, e1 * e) + else: + _pairs_add(d, b, e) + else: + _pairs_add(d, b, e) + if len(d) == 0 or coeff == 0: + # TODO: determine correct kind + assert isinstance(coeff, number_types) + return as_number(coeff) + elif len(d) == 1: + (b, e), = d.items() + if e == 1: + t = b + else: + t = Expr(Op.FACTORS, d) + if coeff == 1: + return t + return Expr(Op.TERMS, {t: coeff}) + elif coeff == 1: + return Expr(Op.FACTORS, d) + else: + return Expr(Op.TERMS, {Expr(Op.FACTORS, d): coeff}) + + if obj.op is Op.APPLY and obj.data[0] is ArithOp.DIV: + dividend, divisor = obj.data[1] + t1, c1 = as_term_coeff(dividend) + t2, c2 = as_term_coeff(divisor) + if isinstance(c1, integer_types) and isinstance(c2, integer_types): + g = gcd(c1, c2) + c1, c2 = c1 // g, c2 // g + else: + c1, c2 = c1 / c2, 1 + + if t1.op is Op.APPLY and t1.data[0] is ArithOp.DIV: + numer = t1.data[1][0] * c1 + denom = t1.data[1][1] * t2 * c2 + return as_apply(ArithOp.DIV, numer, denom) + + if t2.op is Op.APPLY and t2.data[0] is ArithOp.DIV: + numer = t2.data[1][1] * t1 * c1 + denom = t2.data[1][0] * c2 + return as_apply(ArithOp.DIV, numer, denom) + + d = dict(as_factors(t1).data) + for b, e in as_factors(t2).data.items(): + _pairs_add(d, b, -e) + numer, denom = {}, {} + for b, e in d.items(): + if e > 0: + numer[b] = e + else: + denom[b] = -e + numer = normalize(Expr(Op.FACTORS, numer)) * c1 + denom = normalize(Expr(Op.FACTORS, denom)) * c2 + + if denom.op in (Op.INTEGER, Op.REAL) and denom.data[0] == 1: + # TODO: denom kind not used + return numer + return as_apply(ArithOp.DIV, numer, denom) + + if obj.op is Op.CONCAT: + lst = [obj.data[0]] + for s in obj.data[1:]: + last = lst[-1] + if ( + last.op is Op.STRING + and s.op is Op.STRING + and last.data[0][0] in '"\'' + and s.data[0][0] == last.data[0][-1] + ): + new_last = as_string(last.data[0][:-1] + s.data[0][1:], + max(last.data[1], s.data[1])) + lst[-1] = new_last + else: + lst.append(s) + if len(lst) == 1: + return lst[0] + return Expr(Op.CONCAT, tuple(lst)) + + if obj.op is Op.TERNARY: + cond, expr1, expr2 = map(normalize, obj.data) + if cond.op is Op.INTEGER: + return expr1 if cond.data[0] else expr2 + return Expr(Op.TERNARY, (cond, expr1, expr2)) + + return obj + + +def as_expr(obj): + """Convert non-Expr objects to Expr objects. + """ + if isinstance(obj, complex): + return as_complex(obj.real, obj.imag) + if isinstance(obj, number_types): + return as_number(obj) + if isinstance(obj, str): + # STRING expression holds string with boundary quotes, hence + # applying repr: + return as_string(repr(obj)) + if isinstance(obj, tuple): + return tuple(map(as_expr, obj)) + return obj + + +def as_symbol(obj): + """Return object as SYMBOL expression (variable or unparsed expression). + """ + return Expr(Op.SYMBOL, obj) + + +def as_number(obj, kind=4): + """Return object as INTEGER or REAL constant. + """ + if isinstance(obj, int): + return Expr(Op.INTEGER, (obj, kind)) + if isinstance(obj, float): + return Expr(Op.REAL, (obj, kind)) + if isinstance(obj, Expr): + if obj.op in (Op.INTEGER, Op.REAL): + return obj + raise OpError(f'cannot convert {obj} to INTEGER or REAL constant') + + +def as_integer(obj, kind=4): + """Return object as INTEGER constant. + """ + if isinstance(obj, int): + return Expr(Op.INTEGER, (obj, kind)) + if isinstance(obj, Expr): + if obj.op is Op.INTEGER: + return obj + raise OpError(f'cannot convert {obj} to INTEGER constant') + + +def as_real(obj, kind=4): + """Return object as REAL constant. + """ + if isinstance(obj, int): + return Expr(Op.REAL, (float(obj), kind)) + if isinstance(obj, float): + return Expr(Op.REAL, (obj, kind)) + if isinstance(obj, Expr): + if obj.op is Op.REAL: + return obj + elif obj.op is Op.INTEGER: + return Expr(Op.REAL, (float(obj.data[0]), kind)) + raise OpError(f'cannot convert {obj} to REAL constant') + + +def as_string(obj, kind=1): + """Return object as STRING expression (string literal constant). + """ + return Expr(Op.STRING, (obj, kind)) + + +def as_array(obj): + """Return object as ARRAY expression (array constant). + """ + if isinstance(obj, Expr): + obj = obj, + return Expr(Op.ARRAY, obj) + + +def as_complex(real, imag=0): + """Return object as COMPLEX expression (complex literal constant). + """ + return Expr(Op.COMPLEX, (as_expr(real), as_expr(imag))) + + +def as_apply(func, *args, **kwargs): + """Return object as APPLY expression (function call, constructor, etc.) + """ + return Expr(Op.APPLY, + (func, tuple(map(as_expr, args)), + {k: as_expr(v) for k, v in kwargs.items()})) + + +def as_ternary(cond, expr1, expr2): + """Return object as TERNARY expression (cond?expr1:expr2). + """ + return Expr(Op.TERNARY, (cond, expr1, expr2)) + + +def as_ref(expr): + """Return object as referencing expression. + """ + return Expr(Op.REF, expr) + + +def as_deref(expr): + """Return object as dereferencing expression. + """ + return Expr(Op.DEREF, expr) + + +def as_eq(left, right): + return Expr(Op.RELATIONAL, (RelOp.EQ, left, right)) + + +def as_ne(left, right): + return Expr(Op.RELATIONAL, (RelOp.NE, left, right)) + + +def as_lt(left, right): + return Expr(Op.RELATIONAL, (RelOp.LT, left, right)) + + +def as_le(left, right): + return Expr(Op.RELATIONAL, (RelOp.LE, left, right)) + + +def as_gt(left, right): + return Expr(Op.RELATIONAL, (RelOp.GT, left, right)) + + +def as_ge(left, right): + return Expr(Op.RELATIONAL, (RelOp.GE, left, right)) + + +def as_terms(obj): + """Return expression as TERMS expression. + """ + if isinstance(obj, Expr): + obj = normalize(obj) + if obj.op is Op.TERMS: + return obj + if obj.op is Op.INTEGER: + return Expr(Op.TERMS, {as_integer(1, obj.data[1]): obj.data[0]}) + if obj.op is Op.REAL: + return Expr(Op.TERMS, {as_real(1, obj.data[1]): obj.data[0]}) + return Expr(Op.TERMS, {obj: 1}) + raise OpError(f'cannot convert {type(obj)} to terms Expr') + + +def as_factors(obj): + """Return expression as FACTORS expression. + """ + if isinstance(obj, Expr): + obj = normalize(obj) + if obj.op is Op.FACTORS: + return obj + if obj.op is Op.TERMS: + if len(obj.data) == 1: + (term, coeff), = obj.data.items() + if coeff == 1: + return Expr(Op.FACTORS, {term: 1}) + return Expr(Op.FACTORS, {term: 1, Expr.number(coeff): 1}) + if (obj.op is Op.APPLY + and obj.data[0] is ArithOp.DIV + and not obj.data[2]): + return Expr(Op.FACTORS, {obj.data[1][0]: 1, obj.data[1][1]: -1}) + return Expr(Op.FACTORS, {obj: 1}) + raise OpError(f'cannot convert {type(obj)} to terms Expr') + + +def as_term_coeff(obj): + """Return expression as term-coefficient pair. + """ + if isinstance(obj, Expr): + obj = normalize(obj) + if obj.op is Op.INTEGER: + return as_integer(1, obj.data[1]), obj.data[0] + if obj.op is Op.REAL: + return as_real(1, obj.data[1]), obj.data[0] + if obj.op is Op.TERMS: + if len(obj.data) == 1: + (term, coeff), = obj.data.items() + return term, coeff + # TODO: find common divisor of coefficients + if obj.op is Op.APPLY and obj.data[0] is ArithOp.DIV: + t, c = as_term_coeff(obj.data[1][0]) + return as_apply(ArithOp.DIV, t, obj.data[1][1]), c + return obj, 1 + raise OpError(f'cannot convert {type(obj)} to term and coeff') + + +def as_numer_denom(obj): + """Return expression as numer-denom pair. + """ + if isinstance(obj, Expr): + obj = normalize(obj) + if obj.op in (Op.INTEGER, Op.REAL, Op.COMPLEX, Op.SYMBOL, + Op.INDEXING, Op.TERNARY): + return obj, as_number(1) + elif obj.op is Op.APPLY: + if obj.data[0] is ArithOp.DIV and not obj.data[2]: + numers, denoms = map(as_numer_denom, obj.data[1]) + return numers[0] * denoms[1], numers[1] * denoms[0] + return obj, as_number(1) + elif obj.op is Op.TERMS: + numers, denoms = [], [] + for term, coeff in obj.data.items(): + n, d = as_numer_denom(term) + n = n * coeff + numers.append(n) + denoms.append(d) + numer, denom = as_number(0), as_number(1) + for i in range(len(numers)): + n = numers[i] + for j in range(len(numers)): + if i != j: + n *= denoms[j] + numer += n + denom *= denoms[i] + if denom.op in (Op.INTEGER, Op.REAL) and denom.data[0] < 0: + numer, denom = -numer, -denom + return numer, denom + elif obj.op is Op.FACTORS: + numer, denom = as_number(1), as_number(1) + for b, e in obj.data.items(): + bnumer, bdenom = as_numer_denom(b) + if e > 0: + numer *= bnumer ** e + denom *= bdenom ** e + elif e < 0: + numer *= bdenom ** (-e) + denom *= bnumer ** (-e) + return numer, denom + raise OpError(f'cannot convert {type(obj)} to numer and denom') + + +def _counter(): + # Used internally to generate unique dummy symbols + counter = 0 + while True: + counter += 1 + yield counter + + +COUNTER = _counter() + + +def eliminate_quotes(s): + """Replace quoted substrings of input string. + + Return a new string and a mapping of replacements. + """ + d = {} + + def repl(m): + kind, value = m.groups()[:2] + if kind: + # remove trailing underscore + kind = kind[:-1] + p = {"'": "SINGLE", '"': "DOUBLE"}[value[0]] + k = f'{kind}@__f2py_QUOTES_{p}_{COUNTER.__next__()}@' + d[k] = value + return k + + new_s = re.sub(r'({kind}_|)({single_quoted}|{double_quoted})'.format( + kind=r'\w[\w\d_]*', + single_quoted=r"('([^'\\]|(\\.))*')", + double_quoted=r'("([^"\\]|(\\.))*")'), + repl, s) + + assert '"' not in new_s + assert "'" not in new_s + + return new_s, d + + +def insert_quotes(s, d): + """Inverse of eliminate_quotes. + """ + for k, v in d.items(): + kind = k[:k.find('@')] + if kind: + kind += '_' + s = s.replace(k, kind + v) + return s + + +def replace_parenthesis(s): + """Replace substrings of input that are enclosed in parenthesis. + + Return a new string and a mapping of replacements. + """ + # Find a parenthesis pair that appears first. + + # Fortran deliminator are `(`, `)`, `[`, `]`, `(/', '/)`, `/`. + # We don't handle `/` deliminator because it is not a part of an + # expression. + left, right = None, None + mn_i = len(s) + for left_, right_ in (('(/', '/)'), + '()', + '{}', # to support C literal structs + '[]'): + i = s.find(left_) + if i == -1: + continue + if i < mn_i: + mn_i = i + left, right = left_, right_ + + if left is None: + return s, {} + + i = mn_i + j = s.find(right, i) + + while s.count(left, i + 1, j) != s.count(right, i + 1, j): + j = s.find(right, j + 1) + if j == -1: + raise ValueError(f'Mismatch of {left + right} parenthesis in {s!r}') + + p = {'(': 'ROUND', '[': 'SQUARE', '{': 'CURLY', '(/': 'ROUNDDIV'}[left] + + k = f'@__f2py_PARENTHESIS_{p}_{COUNTER.__next__()}@' + v = s[i + len(left):j] + r, d = replace_parenthesis(s[j + len(right):]) + d[k] = v + return s[:i] + k + r, d + + +def _get_parenthesis_kind(s): + assert s.startswith('@__f2py_PARENTHESIS_'), s + return s.split('_')[4] + + +def unreplace_parenthesis(s, d): + """Inverse of replace_parenthesis. + """ + for k, v in d.items(): + p = _get_parenthesis_kind(k) + left = {'ROUND': '(', 'SQUARE': '[', 'CURLY': '{', 'ROUNDDIV': '(/'}[p] + right = {'ROUND': ')', 'SQUARE': ']', 'CURLY': '}', 'ROUNDDIV': '/)'}[p] + s = s.replace(k, left + v + right) + return s + + +def fromstring(s, language=Language.C): + """Create an expression from a string. + + This is a "lazy" parser, that is, only arithmetic operations are + resolved, non-arithmetic operations are treated as symbols. + """ + r = _FromStringWorker(language=language).parse(s) + if isinstance(r, Expr): + return r + raise ValueError(f'failed to parse `{s}` to Expr instance: got `{r}`') + + +class _Pair: + # Internal class to represent a pair of expressions + + def __init__(self, left, right): + self.left = left + self.right = right + + def substitute(self, symbols_map): + left, right = self.left, self.right + if isinstance(left, Expr): + left = left.substitute(symbols_map) + if isinstance(right, Expr): + right = right.substitute(symbols_map) + return _Pair(left, right) + + def __repr__(self): + return f'{type(self).__name__}({self.left}, {self.right})' + + +class _FromStringWorker: + + def __init__(self, language=Language.C): + self.original = None + self.quotes_map = None + self.language = language + + def finalize_string(self, s): + return insert_quotes(s, self.quotes_map) + + def parse(self, inp): + self.original = inp + unquoted, self.quotes_map = eliminate_quotes(inp) + return self.process(unquoted) + + def process(self, s, context='expr'): + """Parse string within the given context. + + The context may define the result in case of ambiguous + expressions. For instance, consider expressions `f(x, y)` and + `(x, y) + (a, b)` where `f` is a function and pair `(x, y)` + denotes complex number. Specifying context as "args" or + "expr", the subexpression `(x, y)` will be parse to an + argument list or to a complex number, respectively. + """ + if isinstance(s, (list, tuple)): + return type(s)(self.process(s_, context) for s_ in s) + + assert isinstance(s, str), (type(s), s) + + # replace subexpressions in parenthesis with f2py @-names + r, raw_symbols_map = replace_parenthesis(s) + r = r.strip() + + def restore(r): + # restores subexpressions marked with f2py @-names + if isinstance(r, (list, tuple)): + return type(r)(map(restore, r)) + return unreplace_parenthesis(r, raw_symbols_map) + + # comma-separated tuple + if ',' in r: + operands = restore(r.split(',')) + if context == 'args': + return tuple(self.process(operands)) + if context == 'expr': + if len(operands) == 2: + # complex number literal + return as_complex(*self.process(operands)) + raise NotImplementedError( + f'parsing comma-separated list (context={context}): {r}') + + # ternary operation + m = re.match(r'\A([^?]+)[?]([^:]+)[:](.+)\Z', r) + if m: + assert context == 'expr', context + oper, expr1, expr2 = restore(m.groups()) + oper = self.process(oper) + expr1 = self.process(expr1) + expr2 = self.process(expr2) + return as_ternary(oper, expr1, expr2) + + # relational expression + if self.language is Language.Fortran: + m = re.match( + r'\A(.+)\s*[.](eq|ne|lt|le|gt|ge)[.]\s*(.+)\Z', r, re.I) + else: + m = re.match( + r'\A(.+)\s*([=][=]|[!][=]|[<][=]|[<]|[>][=]|[>])\s*(.+)\Z', r) + if m: + left, rop, right = m.groups() + if self.language is Language.Fortran: + rop = '.' + rop + '.' + left, right = self.process(restore((left, right))) + rop = RelOp.fromstring(rop, language=self.language) + return Expr(Op.RELATIONAL, (rop, left, right)) + + # keyword argument + m = re.match(r'\A(\w[\w\d_]*)\s*[=](.*)\Z', r) + if m: + keyname, value = m.groups() + value = restore(value) + return _Pair(keyname, self.process(value)) + + # addition/subtraction operations + operands = re.split(r'((? 1: + result = self.process(restore(operands[0] or '0')) + for op, operand in zip(operands[1::2], operands[2::2]): + operand = self.process(restore(operand)) + op = op.strip() + if op == '+': + result += operand + else: + assert op == '-' + result -= operand + return result + + # string concatenate operation + if self.language is Language.Fortran and '//' in r: + operands = restore(r.split('//')) + return Expr(Op.CONCAT, + tuple(self.process(operands))) + + # multiplication/division operations + operands = re.split(r'(?<=[@\w\d_])\s*([*]|/)', + (r if self.language is Language.C + else r.replace('**', '@__f2py_DOUBLE_STAR@'))) + if len(operands) > 1: + operands = restore(operands) + if self.language is not Language.C: + operands = [operand.replace('@__f2py_DOUBLE_STAR@', '**') + for operand in operands] + # Expression is an arithmetic product + result = self.process(operands[0]) + for op, operand in zip(operands[1::2], operands[2::2]): + operand = self.process(operand) + op = op.strip() + if op == '*': + result *= operand + else: + assert op == '/' + result /= operand + return result + + # referencing/dereferencing + if r.startswith(('*', '&')): + op = {'*': Op.DEREF, '&': Op.REF}[r[0]] + operand = self.process(restore(r[1:])) + return Expr(op, operand) + + # exponentiation operations + if self.language is not Language.C and '**' in r: + operands = list(reversed(restore(r.split('**')))) + result = self.process(operands[0]) + for operand in operands[1:]: + operand = self.process(operand) + result = operand ** result + return result + + # int-literal-constant + m = re.match(r'\A({digit_string})({kind}|)\Z'.format( + digit_string=r'\d+', + kind=r'_(\d+|\w[\w\d_]*)'), r) + if m: + value, _, kind = m.groups() + if kind and kind.isdigit(): + kind = int(kind) + return as_integer(int(value), kind or 4) + + # real-literal-constant + m = re.match(r'\A({significant}({exponent}|)|\d+{exponent})({kind}|)\Z' + .format( + significant=r'[.]\d+|\d+[.]\d*', + exponent=r'[edED][+-]?\d+', + kind=r'_(\d+|\w[\w\d_]*)'), r) + if m: + value, _, _, kind = m.groups() + if kind and kind.isdigit(): + kind = int(kind) + value = value.lower() + if 'd' in value: + return as_real(float(value.replace('d', 'e')), kind or 8) + return as_real(float(value), kind or 4) + + # string-literal-constant with kind parameter specification + if r in self.quotes_map: + kind = r[:r.find('@')] + return as_string(self.quotes_map[r], kind or 1) + + # array constructor or literal complex constant or + # parenthesized expression + if r in raw_symbols_map: + paren = _get_parenthesis_kind(r) + items = self.process(restore(raw_symbols_map[r]), + 'expr' if paren == 'ROUND' else 'args') + if paren == 'ROUND': + if isinstance(items, Expr): + return items + if paren in ['ROUNDDIV', 'SQUARE']: + # Expression is a array constructor + if isinstance(items, Expr): + items = (items,) + return as_array(items) + + # function call/indexing + m = re.match(r'\A(.+)\s*(@__f2py_PARENTHESIS_(ROUND|SQUARE)_\d+@)\Z', + r) + if m: + target, args, paren = m.groups() + target = self.process(restore(target)) + args = self.process(restore(args)[1:-1], 'args') + if not isinstance(args, tuple): + args = args, + if paren == 'ROUND': + kwargs = {a.left: a.right for a in args + if isinstance(a, _Pair)} + args = tuple(a for a in args if not isinstance(a, _Pair)) + # Warning: this could also be Fortran indexing operation.. + return as_apply(target, *args, **kwargs) + else: + # Expression is a C/Python indexing operation + # (e.g. used in .pyf files) + assert paren == 'SQUARE' + return target[args] + + # Fortran standard conforming identifier + m = re.match(r'\A\w[\w\d_]*\Z', r) + if m: + return as_symbol(r) + + # fall-back to symbol + r = self.finalize_string(restore(r)) + ewarn( + f'fromstring: treating {r!r} as symbol (original={self.original})') + return as_symbol(r) diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/symbolic.pyi b/.venv/lib/python3.12/site-packages/numpy/f2py/symbolic.pyi new file mode 100644 index 0000000..74e7a48 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/symbolic.pyi @@ -0,0 +1,221 @@ +from collections.abc import Callable, Mapping +from enum import Enum +from typing import Any, Generic, ParamSpec, Self, TypeAlias, overload +from typing import Literal as L + +from typing_extensions import TypeVar + +__all__ = ["Expr"] + +### + +_Tss = ParamSpec("_Tss") +_ExprT = TypeVar("_ExprT", bound=Expr) +_ExprT1 = TypeVar("_ExprT1", bound=Expr) +_ExprT2 = TypeVar("_ExprT2", bound=Expr) +_OpT_co = TypeVar("_OpT_co", bound=Op, default=Op, covariant=True) +_LanguageT_co = TypeVar("_LanguageT_co", bound=Language, default=Language, covariant=True) +_DataT_co = TypeVar("_DataT_co", default=Any, covariant=True) +_LeftT_co = TypeVar("_LeftT_co", default=Any, covariant=True) +_RightT_co = TypeVar("_RightT_co", default=Any, covariant=True) + +_RelCOrPy: TypeAlias = L["==", "!=", "<", "<=", ">", ">="] +_RelFortran: TypeAlias = L[".eq.", ".ne.", ".lt.", ".le.", ".gt.", ".ge."] + +_ToExpr: TypeAlias = Expr | complex | str +_ToExprN: TypeAlias = _ToExpr | tuple[_ToExprN, ...] +_NestedString: TypeAlias = str | tuple[_NestedString, ...] | list[_NestedString] + +### + +class OpError(Exception): ... +class ExprWarning(UserWarning): ... + +class Language(Enum): + Python = 0 + Fortran = 1 + C = 2 + +class Op(Enum): + INTEGER = 10 + REAL = 12 + COMPLEX = 15 + STRING = 20 + ARRAY = 30 + SYMBOL = 40 + TERNARY = 100 + APPLY = 200 + INDEXING = 210 + CONCAT = 220 + RELATIONAL = 300 + TERMS = 1_000 + FACTORS = 2_000 + REF = 3_000 + DEREF = 3_001 + +class RelOp(Enum): + EQ = 1 + NE = 2 + LT = 3 + LE = 4 + GT = 5 + GE = 6 + + @overload + @classmethod + def fromstring(cls, s: _RelCOrPy, language: L[Language.C, Language.Python] = ...) -> RelOp: ... + @overload + @classmethod + def fromstring(cls, s: _RelFortran, language: L[Language.Fortran]) -> RelOp: ... + + # + @overload + def tostring(self, /, language: L[Language.C, Language.Python] = ...) -> _RelCOrPy: ... + @overload + def tostring(self, /, language: L[Language.Fortran]) -> _RelFortran: ... + +class ArithOp(Enum): + POS = 1 + NEG = 2 + ADD = 3 + SUB = 4 + MUL = 5 + DIV = 6 + POW = 7 + +class Precedence(Enum): + ATOM = 0 + POWER = 1 + UNARY = 2 + PRODUCT = 3 + SUM = 4 + LT = 6 + EQ = 7 + LAND = 11 + LOR = 12 + TERNARY = 13 + ASSIGN = 14 + TUPLE = 15 + NONE = 100 + +class Expr(Generic[_OpT_co, _DataT_co]): + op: _OpT_co + data: _DataT_co + + @staticmethod + def parse(s: str, language: Language = ...) -> Expr: ... + + # + def __init__(self, /, op: Op, data: _DataT_co) -> None: ... + + # + def __lt__(self, other: Expr, /) -> bool: ... + def __le__(self, other: Expr, /) -> bool: ... + def __gt__(self, other: Expr, /) -> bool: ... + def __ge__(self, other: Expr, /) -> bool: ... + + # + def __pos__(self, /) -> Self: ... + def __neg__(self, /) -> Expr: ... + + # + def __add__(self, other: Expr, /) -> Expr: ... + def __radd__(self, other: Expr, /) -> Expr: ... + + # + def __sub__(self, other: Expr, /) -> Expr: ... + def __rsub__(self, other: Expr, /) -> Expr: ... + + # + def __mul__(self, other: Expr, /) -> Expr: ... + def __rmul__(self, other: Expr, /) -> Expr: ... + + # + def __pow__(self, other: Expr, /) -> Expr: ... + + # + def __truediv__(self, other: Expr, /) -> Expr: ... + def __rtruediv__(self, other: Expr, /) -> Expr: ... + + # + def __floordiv__(self, other: Expr, /) -> Expr: ... + def __rfloordiv__(self, other: Expr, /) -> Expr: ... + + # + def __call__( + self, + /, + *args: _ToExprN, + **kwargs: _ToExprN, + ) -> Expr[L[Op.APPLY], tuple[Self, tuple[Expr, ...], dict[str, Expr]]]: ... + + # + @overload + def __getitem__(self, index: _ExprT | tuple[_ExprT], /) -> Expr[L[Op.INDEXING], tuple[Self, _ExprT]]: ... + @overload + def __getitem__(self, index: _ToExpr | tuple[_ToExpr], /) -> Expr[L[Op.INDEXING], tuple[Self, Expr]]: ... + + # + def substitute(self, /, symbols_map: Mapping[Expr, Expr]) -> Expr: ... + + # + @overload + def traverse(self, /, visit: Callable[_Tss, None], *args: _Tss.args, **kwargs: _Tss.kwargs) -> Expr: ... + @overload + def traverse(self, /, visit: Callable[_Tss, _ExprT], *args: _Tss.args, **kwargs: _Tss.kwargs) -> _ExprT: ... + + # + def contains(self, /, other: Expr) -> bool: ... + + # + def symbols(self, /) -> set[Expr]: ... + def polynomial_atoms(self, /) -> set[Expr]: ... + + # + def linear_solve(self, /, symbol: Expr) -> tuple[Expr, Expr]: ... + + # + def tostring(self, /, parent_precedence: Precedence = ..., language: Language = ...) -> str: ... + +class _Pair(Generic[_LeftT_co, _RightT_co]): + left: _LeftT_co + right: _RightT_co + + def __init__(self, /, left: _LeftT_co, right: _RightT_co) -> None: ... + + # + @overload + def substitute(self: _Pair[_ExprT1, _ExprT2], /, symbols_map: Mapping[Expr, Expr]) -> _Pair[Expr, Expr]: ... + @overload + def substitute(self: _Pair[_ExprT1, object], /, symbols_map: Mapping[Expr, Expr]) -> _Pair[Expr, Any]: ... + @overload + def substitute(self: _Pair[object, _ExprT2], /, symbols_map: Mapping[Expr, Expr]) -> _Pair[Any, Expr]: ... + @overload + def substitute(self, /, symbols_map: Mapping[Expr, Expr]) -> _Pair: ... + +class _FromStringWorker(Generic[_LanguageT_co]): + language: _LanguageT_co + + original: str | None + quotes_map: dict[str, str] + + @overload + def __init__(self: _FromStringWorker[L[Language.C]], /, language: L[Language.C] = ...) -> None: ... + @overload + def __init__(self, /, language: _LanguageT_co) -> None: ... + + # + def finalize_string(self, /, s: str) -> str: ... + + # + def parse(self, /, inp: str) -> Expr | _Pair: ... + + # + @overload + def process(self, /, s: str, context: str = "expr") -> Expr | _Pair: ... + @overload + def process(self, /, s: list[str], context: str = "expr") -> list[Expr | _Pair]: ... + @overload + def process(self, /, s: tuple[str, ...], context: str = "expr") -> tuple[Expr | _Pair, ...]: ... + @overload + def process(self, /, s: _NestedString, context: str = "expr") -> Any: ... # noqa: ANN401 diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__init__.py b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__init__.py new file mode 100644 index 0000000..4ed8fdd --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__init__.py @@ -0,0 +1,16 @@ +import pytest + +from numpy.testing import IS_EDITABLE, IS_WASM + +if IS_WASM: + pytest.skip( + "WASM/Pyodide does not use or support Fortran", + allow_module_level=True + ) + + +if IS_EDITABLE: + pytest.skip( + "Editable install doesn't support tests with a compile step", + allow_module_level=True + ) diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..b4e61fe Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_abstract_interface.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_abstract_interface.cpython-312.pyc new file mode 100644 index 0000000..afb21ae Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_abstract_interface.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_array_from_pyobj.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_array_from_pyobj.cpython-312.pyc new file mode 100644 index 0000000..bf3cfa1 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_array_from_pyobj.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_assumed_shape.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_assumed_shape.cpython-312.pyc new file mode 100644 index 0000000..d23a2ab Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_assumed_shape.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_block_docstring.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_block_docstring.cpython-312.pyc new file mode 100644 index 0000000..6c22d24 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_block_docstring.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_callback.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_callback.cpython-312.pyc new file mode 100644 index 0000000..1060810 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_callback.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_character.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_character.cpython-312.pyc new file mode 100644 index 0000000..b3fe4fe Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_character.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_common.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_common.cpython-312.pyc new file mode 100644 index 0000000..f45bf6c Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_common.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_crackfortran.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_crackfortran.cpython-312.pyc new file mode 100644 index 0000000..f7eb82b Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_crackfortran.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_data.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_data.cpython-312.pyc new file mode 100644 index 0000000..6173c49 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_data.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_docs.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_docs.cpython-312.pyc new file mode 100644 index 0000000..7e4d5ea Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_docs.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_f2cmap.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_f2cmap.cpython-312.pyc new file mode 100644 index 0000000..318354d Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_f2cmap.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_f2py2e.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_f2py2e.cpython-312.pyc new file mode 100644 index 0000000..9a4c7c7 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_f2py2e.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_isoc.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_isoc.cpython-312.pyc new file mode 100644 index 0000000..2f0a2a2 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_isoc.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_kind.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_kind.cpython-312.pyc new file mode 100644 index 0000000..d2fe85f Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_kind.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_mixed.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_mixed.cpython-312.pyc new file mode 100644 index 0000000..07e9d8c Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_mixed.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_modules.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_modules.cpython-312.pyc new file mode 100644 index 0000000..464290d Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_modules.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_parameter.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_parameter.cpython-312.pyc new file mode 100644 index 0000000..c16ffdc Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_parameter.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_pyf_src.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_pyf_src.cpython-312.pyc new file mode 100644 index 0000000..718b233 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_pyf_src.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_quoted_character.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_quoted_character.cpython-312.pyc new file mode 100644 index 0000000..3c47e3d Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_quoted_character.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_regression.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_regression.cpython-312.pyc new file mode 100644 index 0000000..12bb2a9 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_regression.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_return_character.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_return_character.cpython-312.pyc new file mode 100644 index 0000000..51955a1 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_return_character.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_return_complex.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_return_complex.cpython-312.pyc new file mode 100644 index 0000000..f4a3e44 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_return_complex.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_return_integer.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_return_integer.cpython-312.pyc new file mode 100644 index 0000000..3e59485 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_return_integer.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_return_logical.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_return_logical.cpython-312.pyc new file mode 100644 index 0000000..67ccc10 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_return_logical.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_return_real.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_return_real.cpython-312.pyc new file mode 100644 index 0000000..8fca71e Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_return_real.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_routines.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_routines.cpython-312.pyc new file mode 100644 index 0000000..481478e Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_routines.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_semicolon_split.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_semicolon_split.cpython-312.pyc new file mode 100644 index 0000000..e6b79ec Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_semicolon_split.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_size.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_size.cpython-312.pyc new file mode 100644 index 0000000..888e8de Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_size.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_string.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_string.cpython-312.pyc new file mode 100644 index 0000000..8132deb Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_string.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_symbolic.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_symbolic.cpython-312.pyc new file mode 100644 index 0000000..47c9da0 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_symbolic.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_value_attrspec.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_value_attrspec.cpython-312.pyc new file mode 100644 index 0000000..a7a2623 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/test_value_attrspec.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/util.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/util.cpython-312.pyc new file mode 100644 index 0000000..ccba96f Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/__pycache__/util.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/abstract_interface/foo.f90 b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/abstract_interface/foo.f90 new file mode 100644 index 0000000..76d16aa --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/abstract_interface/foo.f90 @@ -0,0 +1,34 @@ +module ops_module + + abstract interface + subroutine op(x, y, z) + integer, intent(in) :: x, y + integer, intent(out) :: z + end subroutine + end interface + +contains + + subroutine foo(x, y, r1, r2) + integer, intent(in) :: x, y + integer, intent(out) :: r1, r2 + procedure (op) add1, add2 + procedure (op), pointer::p + p=>add1 + call p(x, y, r1) + p=>add2 + call p(x, y, r2) + end subroutine +end module + +subroutine add1(x, y, z) + integer, intent(in) :: x, y + integer, intent(out) :: z + z = x + y +end subroutine + +subroutine add2(x, y, z) + integer, intent(in) :: x, y + integer, intent(out) :: z + z = x + 2 * y +end subroutine diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/abstract_interface/gh18403_mod.f90 b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/abstract_interface/gh18403_mod.f90 new file mode 100644 index 0000000..36791e4 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/abstract_interface/gh18403_mod.f90 @@ -0,0 +1,6 @@ +module test + abstract interface + subroutine foo() + end subroutine + end interface +end module test diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c new file mode 100644 index 0000000..b66672a --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c @@ -0,0 +1,235 @@ +/* + * This file was auto-generated with f2py (version:2_1330) and hand edited by + * Pearu for testing purposes. Do not edit this file unless you know what you + * are doing!!! + */ + +#ifdef __cplusplus +extern "C" { +#endif + +/*********************** See f2py2e/cfuncs.py: includes ***********************/ + +#define PY_SSIZE_T_CLEAN +#include +#include "fortranobject.h" +#include + +static PyObject *wrap_error; +static PyObject *wrap_module; + +/************************************ call ************************************/ +static char doc_f2py_rout_wrap_call[] = "\ +Function signature:\n\ + arr = call(type_num,dims,intent,obj)\n\ +Required arguments:\n" +" type_num : input int\n" +" dims : input int-sequence\n" +" intent : input int\n" +" obj : input python object\n" +"Return objects:\n" +" arr : array"; +static PyObject *f2py_rout_wrap_call(PyObject *capi_self, + PyObject *capi_args) { + PyObject * volatile capi_buildvalue = NULL; + int type_num = 0; + int elsize = 0; + npy_intp *dims = NULL; + PyObject *dims_capi = Py_None; + int rank = 0; + int intent = 0; + PyArrayObject *capi_arr_tmp = NULL; + PyObject *arr_capi = Py_None; + int i; + + if (!PyArg_ParseTuple(capi_args,"iiOiO|:wrap.call",\ + &type_num,&elsize,&dims_capi,&intent,&arr_capi)) + return NULL; + rank = PySequence_Length(dims_capi); + dims = malloc(rank*sizeof(npy_intp)); + for (i=0;ikind, + PyArray_DESCR(arr)->type, + PyArray_TYPE(arr), + PyArray_ITEMSIZE(arr), + PyDataType_ALIGNMENT(PyArray_DESCR(arr)), + PyArray_FLAGS(arr), + PyArray_ITEMSIZE(arr)); +} + +static PyMethodDef f2py_module_methods[] = { + + {"call",f2py_rout_wrap_call,METH_VARARGS,doc_f2py_rout_wrap_call}, + {"array_attrs",f2py_rout_wrap_attrs,METH_VARARGS,doc_f2py_rout_wrap_attrs}, + {NULL,NULL} +}; + +static struct PyModuleDef moduledef = { + PyModuleDef_HEAD_INIT, + "test_array_from_pyobj_ext", + NULL, + -1, + f2py_module_methods, + NULL, + NULL, + NULL, + NULL +}; + +PyMODINIT_FUNC PyInit_test_array_from_pyobj_ext(void) { + PyObject *m,*d, *s; + m = wrap_module = PyModule_Create(&moduledef); + Py_SET_TYPE(&PyFortran_Type, &PyType_Type); + import_array(); + if (PyErr_Occurred()) + Py_FatalError("can't initialize module wrap (failed to import numpy)"); + d = PyModule_GetDict(m); + s = PyUnicode_FromString("This module 'wrap' is auto-generated with f2py (version:2_1330).\nFunctions:\n" + " arr = call(type_num,dims,intent,obj)\n" + "."); + PyDict_SetItemString(d, "__doc__", s); + wrap_error = PyErr_NewException ("wrap.error", NULL, NULL); + Py_DECREF(s); + +#define ADDCONST(NAME, CONST) \ + s = PyLong_FromLong(CONST); \ + PyDict_SetItemString(d, NAME, s); \ + Py_DECREF(s) + + ADDCONST("F2PY_INTENT_IN", F2PY_INTENT_IN); + ADDCONST("F2PY_INTENT_INOUT", F2PY_INTENT_INOUT); + ADDCONST("F2PY_INTENT_OUT", F2PY_INTENT_OUT); + ADDCONST("F2PY_INTENT_HIDE", F2PY_INTENT_HIDE); + ADDCONST("F2PY_INTENT_CACHE", F2PY_INTENT_CACHE); + ADDCONST("F2PY_INTENT_COPY", F2PY_INTENT_COPY); + ADDCONST("F2PY_INTENT_C", F2PY_INTENT_C); + ADDCONST("F2PY_OPTIONAL", F2PY_OPTIONAL); + ADDCONST("F2PY_INTENT_INPLACE", F2PY_INTENT_INPLACE); + ADDCONST("NPY_BOOL", NPY_BOOL); + ADDCONST("NPY_BYTE", NPY_BYTE); + ADDCONST("NPY_UBYTE", NPY_UBYTE); + ADDCONST("NPY_SHORT", NPY_SHORT); + ADDCONST("NPY_USHORT", NPY_USHORT); + ADDCONST("NPY_INT", NPY_INT); + ADDCONST("NPY_UINT", NPY_UINT); + ADDCONST("NPY_INTP", NPY_INTP); + ADDCONST("NPY_UINTP", NPY_UINTP); + ADDCONST("NPY_LONG", NPY_LONG); + ADDCONST("NPY_ULONG", NPY_ULONG); + ADDCONST("NPY_LONGLONG", NPY_LONGLONG); + ADDCONST("NPY_ULONGLONG", NPY_ULONGLONG); + ADDCONST("NPY_FLOAT", NPY_FLOAT); + ADDCONST("NPY_DOUBLE", NPY_DOUBLE); + ADDCONST("NPY_LONGDOUBLE", NPY_LONGDOUBLE); + ADDCONST("NPY_CFLOAT", NPY_CFLOAT); + ADDCONST("NPY_CDOUBLE", NPY_CDOUBLE); + ADDCONST("NPY_CLONGDOUBLE", NPY_CLONGDOUBLE); + ADDCONST("NPY_OBJECT", NPY_OBJECT); + ADDCONST("NPY_STRING", NPY_STRING); + ADDCONST("NPY_UNICODE", NPY_UNICODE); + ADDCONST("NPY_VOID", NPY_VOID); + ADDCONST("NPY_NTYPES_LEGACY", NPY_NTYPES_LEGACY); + ADDCONST("NPY_NOTYPE", NPY_NOTYPE); + ADDCONST("NPY_USERDEF", NPY_USERDEF); + + ADDCONST("CONTIGUOUS", NPY_ARRAY_C_CONTIGUOUS); + ADDCONST("FORTRAN", NPY_ARRAY_F_CONTIGUOUS); + ADDCONST("OWNDATA", NPY_ARRAY_OWNDATA); + ADDCONST("FORCECAST", NPY_ARRAY_FORCECAST); + ADDCONST("ENSURECOPY", NPY_ARRAY_ENSURECOPY); + ADDCONST("ENSUREARRAY", NPY_ARRAY_ENSUREARRAY); + ADDCONST("ALIGNED", NPY_ARRAY_ALIGNED); + ADDCONST("WRITEABLE", NPY_ARRAY_WRITEABLE); + ADDCONST("WRITEBACKIFCOPY", NPY_ARRAY_WRITEBACKIFCOPY); + + ADDCONST("BEHAVED", NPY_ARRAY_BEHAVED); + ADDCONST("BEHAVED_NS", NPY_ARRAY_BEHAVED_NS); + ADDCONST("CARRAY", NPY_ARRAY_CARRAY); + ADDCONST("FARRAY", NPY_ARRAY_FARRAY); + ADDCONST("CARRAY_RO", NPY_ARRAY_CARRAY_RO); + ADDCONST("FARRAY_RO", NPY_ARRAY_FARRAY_RO); + ADDCONST("DEFAULT", NPY_ARRAY_DEFAULT); + ADDCONST("UPDATE_ALL", NPY_ARRAY_UPDATE_ALL); + +#undef ADDCONST + + if (PyErr_Occurred()) + Py_FatalError("can't initialize module wrap"); + +#ifdef F2PY_REPORT_ATEXIT + on_exit(f2py_report_on_exit,(void*)"array_from_pyobj.wrap.call"); +#endif + +#if Py_GIL_DISABLED + // signal whether this module supports running with the GIL disabled + PyUnstable_Module_SetGIL(m, Py_MOD_GIL_NOT_USED); +#endif + + return m; +} +#ifdef __cplusplus +} +#endif diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/assumed_shape/.f2py_f2cmap b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/assumed_shape/.f2py_f2cmap new file mode 100644 index 0000000..2665f89 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/assumed_shape/.f2py_f2cmap @@ -0,0 +1 @@ +dict(real=dict(rk="double")) diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/assumed_shape/foo_free.f90 b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/assumed_shape/foo_free.f90 new file mode 100644 index 0000000..b301710 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/assumed_shape/foo_free.f90 @@ -0,0 +1,34 @@ + +subroutine sum(x, res) + implicit none + real, intent(in) :: x(:) + real, intent(out) :: res + + integer :: i + + !print *, "sum: size(x) = ", size(x) + + res = 0.0 + + do i = 1, size(x) + res = res + x(i) + enddo + +end subroutine sum + +function fsum(x) result (res) + implicit none + real, intent(in) :: x(:) + real :: res + + integer :: i + + !print *, "fsum: size(x) = ", size(x) + + res = 0.0 + + do i = 1, size(x) + res = res + x(i) + enddo + +end function fsum diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/assumed_shape/foo_mod.f90 b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/assumed_shape/foo_mod.f90 new file mode 100644 index 0000000..cbe6317 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/assumed_shape/foo_mod.f90 @@ -0,0 +1,41 @@ + +module mod + +contains + +subroutine sum(x, res) + implicit none + real, intent(in) :: x(:) + real, intent(out) :: res + + integer :: i + + !print *, "sum: size(x) = ", size(x) + + res = 0.0 + + do i = 1, size(x) + res = res + x(i) + enddo + +end subroutine sum + +function fsum(x) result (res) + implicit none + real, intent(in) :: x(:) + real :: res + + integer :: i + + !print *, "fsum: size(x) = ", size(x) + + res = 0.0 + + do i = 1, size(x) + res = res + x(i) + enddo + +end function fsum + + +end module mod diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/assumed_shape/foo_use.f90 b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/assumed_shape/foo_use.f90 new file mode 100644 index 0000000..337465a --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/assumed_shape/foo_use.f90 @@ -0,0 +1,19 @@ +subroutine sum_with_use(x, res) + use precision + + implicit none + + real(kind=rk), intent(in) :: x(:) + real(kind=rk), intent(out) :: res + + integer :: i + + !print *, "size(x) = ", size(x) + + res = 0.0 + + do i = 1, size(x) + res = res + x(i) + enddo + + end subroutine diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/assumed_shape/precision.f90 b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/assumed_shape/precision.f90 new file mode 100644 index 0000000..ed6c70c --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/assumed_shape/precision.f90 @@ -0,0 +1,4 @@ +module precision + integer, parameter :: rk = selected_real_kind(8) + integer, parameter :: ik = selected_real_kind(4) +end module diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/block_docstring/foo.f b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/block_docstring/foo.f new file mode 100644 index 0000000..c8315f1 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/block_docstring/foo.f @@ -0,0 +1,6 @@ + SUBROUTINE FOO() + INTEGER BAR(2, 3) + + COMMON /BLOCK/ BAR + RETURN + END diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/callback/foo.f b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/callback/foo.f new file mode 100644 index 0000000..ba397bb --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/callback/foo.f @@ -0,0 +1,62 @@ + subroutine t(fun,a) + integer a +cf2py intent(out) a + external fun + call fun(a) + end + + subroutine func(a) +cf2py intent(in,out) a + integer a + a = a + 11 + end + + subroutine func0(a) +cf2py intent(out) a + integer a + a = 11 + end + + subroutine t2(a) +cf2py intent(callback) fun + integer a +cf2py intent(out) a + external fun + call fun(a) + end + + subroutine string_callback(callback, a) + external callback + double precision callback + double precision a + character*1 r +cf2py intent(out) a + r = 'r' + a = callback(r) + end + + subroutine string_callback_array(callback, cu, lencu, a) + external callback + integer callback + integer lencu + character*8 cu(lencu) + integer a +cf2py intent(out) a + + a = callback(cu, lencu) + end + + subroutine hidden_callback(a, r) + external global_f +cf2py intent(callback, hide) global_f + integer a, r, global_f +cf2py intent(out) r + r = global_f(a) + end + + subroutine hidden_callback2(a, r) + external global_f + integer a, r, global_f +cf2py intent(out) r + r = global_f(a) + end diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/callback/gh17797.f90 b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/callback/gh17797.f90 new file mode 100644 index 0000000..49853af --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/callback/gh17797.f90 @@ -0,0 +1,7 @@ +function gh17797(f, y) result(r) + external f + integer(8) :: r, f + integer(8), dimension(:) :: y + r = f(0) + r = r + sum(y) +end function gh17797 diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/callback/gh18335.f90 b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/callback/gh18335.f90 new file mode 100644 index 0000000..92b6d75 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/callback/gh18335.f90 @@ -0,0 +1,17 @@ + ! When gh18335_workaround is defined as an extension, + ! the issue cannot be reproduced. + !subroutine gh18335_workaround(f, y) + ! implicit none + ! external f + ! integer(kind=1) :: y(1) + ! call f(y) + !end subroutine gh18335_workaround + + function gh18335(f) result (r) + implicit none + external f + integer(kind=1) :: y(1), r + y(1) = 123 + call f(y) + r = y(1) + end function gh18335 diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/callback/gh25211.f b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/callback/gh25211.f new file mode 100644 index 0000000..ba727a1 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/callback/gh25211.f @@ -0,0 +1,10 @@ + SUBROUTINE FOO(FUN,R) + EXTERNAL FUN + INTEGER I + REAL*8 R, FUN +Cf2py intent(out) r + R = 0D0 + DO I=-5,5 + R = R + FUN(I) + ENDDO + END diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/callback/gh25211.pyf b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/callback/gh25211.pyf new file mode 100644 index 0000000..f120111 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/callback/gh25211.pyf @@ -0,0 +1,18 @@ +python module __user__routines + interface + function fun(i) result (r) + integer :: i + real*8 :: r + end function fun + end interface +end python module __user__routines + +python module callback2 + interface + subroutine foo(f,r) + use __user__routines, f=>fun + external f + real*8 intent(out) :: r + end subroutine foo + end interface +end python module callback2 diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/callback/gh26681.f90 b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/callback/gh26681.f90 new file mode 100644 index 0000000..00c0ec9 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/callback/gh26681.f90 @@ -0,0 +1,18 @@ +module utils + implicit none + contains + subroutine my_abort(message) + implicit none + character(len=*), intent(in) :: message + !f2py callstatement PyErr_SetString(PyExc_ValueError, message);f2py_success = 0; + !f2py callprotoargument char* + write(0,*) "THIS SHOULD NOT APPEAR" + stop 1 + end subroutine my_abort + + subroutine do_something(message) + !f2py intent(callback, hide) mypy_abort + character(len=*), intent(in) :: message + call mypy_abort(message) + end subroutine do_something +end module utils diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/cli/gh_22819.pyf b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/cli/gh_22819.pyf new file mode 100644 index 0000000..8eb5bb1 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/cli/gh_22819.pyf @@ -0,0 +1,6 @@ +python module test_22819 + interface + subroutine hello() + end subroutine hello + end interface +end python module test_22819 diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/cli/hi77.f b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/cli/hi77.f new file mode 100644 index 0000000..8b916eb --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/cli/hi77.f @@ -0,0 +1,3 @@ + SUBROUTINE HI + PRINT*, "HELLO WORLD" + END SUBROUTINE diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/cli/hiworld.f90 b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/cli/hiworld.f90 new file mode 100644 index 0000000..981f877 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/cli/hiworld.f90 @@ -0,0 +1,3 @@ +function hi() + print*, "Hello World" +end function diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/common/block.f b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/common/block.f new file mode 100644 index 0000000..7ea7968 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/common/block.f @@ -0,0 +1,11 @@ + SUBROUTINE INITCB + DOUBLE PRECISION LONG + CHARACTER STRING + INTEGER OK + + COMMON /BLOCK/ LONG, STRING, OK + LONG = 1.0 + STRING = '2' + OK = 3 + RETURN + END diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/common/gh19161.f90 b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/common/gh19161.f90 new file mode 100644 index 0000000..a2f4073 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/common/gh19161.f90 @@ -0,0 +1,10 @@ +module typedefmod + use iso_fortran_env, only: real32 +end module typedefmod + +module data + use typedefmod, only: real32 + implicit none + real(kind=real32) :: x + common/test/x +end module data diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/crackfortran/accesstype.f90 b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/crackfortran/accesstype.f90 new file mode 100644 index 0000000..e2cbd44 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/crackfortran/accesstype.f90 @@ -0,0 +1,13 @@ +module foo + public + type, private, bind(c) :: a + integer :: i + end type a + type, bind(c) :: b_ + integer :: j + end type b_ + public :: b_ + type :: c + integer :: k + end type c +end module foo diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/crackfortran/common_with_division.f b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/crackfortran/common_with_division.f new file mode 100644 index 0000000..4aa12cf --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/crackfortran/common_with_division.f @@ -0,0 +1,17 @@ + subroutine common_with_division + integer lmu,lb,lub,lpmin + parameter (lmu=1) + parameter (lb=20) +c crackfortran fails to parse this +c parameter (lub=(lb-1)*lmu+1) +c crackfortran can successfully parse this though + parameter (lub=lb*lmu-lmu+1) + parameter (lpmin=2) + +c crackfortran fails to parse this correctly +c common /mortmp/ ctmp((lub*(lub+1)*(lub+1))/lpmin+1) + + common /mortmp/ ctmp(lub/lpmin+1) + + return + end diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/crackfortran/data_common.f b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/crackfortran/data_common.f new file mode 100644 index 0000000..5ffd865 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/crackfortran/data_common.f @@ -0,0 +1,8 @@ + BLOCK DATA PARAM_INI + COMMON /MYCOM/ MYDATA + DATA MYDATA /0/ + END + SUBROUTINE SUB1 + COMMON /MYCOM/ MYDATA + MYDATA = MYDATA + 1 + END diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/crackfortran/data_multiplier.f b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/crackfortran/data_multiplier.f new file mode 100644 index 0000000..19ff8a8 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/crackfortran/data_multiplier.f @@ -0,0 +1,5 @@ + BLOCK DATA MYBLK + IMPLICIT DOUBLE PRECISION (A-H,O-Z) + COMMON /MYCOM/ IVAR1, IVAR2, IVAR3, IVAR4, EVAR5 + DATA IVAR1, IVAR2, IVAR3, IVAR4, EVAR5 /2*3,2*2,0.0D0/ + END diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/crackfortran/data_stmts.f90 b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/crackfortran/data_stmts.f90 new file mode 100644 index 0000000..576c5e4 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/crackfortran/data_stmts.f90 @@ -0,0 +1,20 @@ +! gh-23276 +module cmplxdat + implicit none + integer :: i, j + real :: x, y + real, dimension(2) :: z + real(kind=8) :: pi + complex(kind=8), target :: medium_ref_index + complex(kind=8), target :: ref_index_one, ref_index_two + complex(kind=8), dimension(2) :: my_array + real(kind=8), dimension(3) :: my_real_array = (/1.0d0, 2.0d0, 3.0d0/) + + data i, j / 2, 3 / + data x, y / 1.5, 2.0 / + data z / 3.5, 7.0 / + data medium_ref_index / (1.d0, 0.d0) / + data ref_index_one, ref_index_two / (13.0d0, 21.0d0), (-30.0d0, 43.0d0) / + data my_array / (1.0d0, 2.0d0), (-3.0d0, 4.0d0) / + data pi / 3.1415926535897932384626433832795028841971693993751058209749445923078164062d0 / +end module cmplxdat diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/crackfortran/data_with_comments.f b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/crackfortran/data_with_comments.f new file mode 100644 index 0000000..4128f00 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/crackfortran/data_with_comments.f @@ -0,0 +1,8 @@ + BLOCK DATA PARAM_INI + COMMON /MYCOM/ MYTAB + INTEGER MYTAB(3) + DATA MYTAB/ + * 0, ! 1 and more commenty stuff + * 4, ! 2 + * 0 / + END diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/crackfortran/foo_deps.f90 b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/crackfortran/foo_deps.f90 new file mode 100644 index 0000000..e327b25 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/crackfortran/foo_deps.f90 @@ -0,0 +1,6 @@ +module foo + type bar + character(len = 4) :: text + end type bar + type(bar), parameter :: abar = bar('abar') +end module foo diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/crackfortran/gh15035.f b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/crackfortran/gh15035.f new file mode 100644 index 0000000..1bb2e67 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/crackfortran/gh15035.f @@ -0,0 +1,16 @@ + subroutine subb(k) + real(8), intent(inout) :: k(:) + k=k+1 + endsubroutine + + subroutine subc(w,k) + real(8), intent(in) :: w(:) + real(8), intent(out) :: k(size(w)) + k=w+1 + endsubroutine + + function t0(value) + character value + character t0 + t0 = value + endfunction diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/crackfortran/gh17859.f b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/crackfortran/gh17859.f new file mode 100644 index 0000000..9959538 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/crackfortran/gh17859.f @@ -0,0 +1,12 @@ + integer(8) function external_as_statement(fcn) + implicit none + external fcn + integer(8) :: fcn + external_as_statement = fcn(0) + end + + integer(8) function external_as_attribute(fcn) + implicit none + integer(8), external :: fcn + external_as_attribute = fcn(0) + end diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/crackfortran/gh22648.pyf b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/crackfortran/gh22648.pyf new file mode 100644 index 0000000..b3454f1 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/crackfortran/gh22648.pyf @@ -0,0 +1,7 @@ +python module iri16py ! in + interface ! in :iri16py + block data ! in :iri16py:iridreg_modified.for + COMMON /fircom/ eden,tabhe,tabla,tabmo,tabza,tabfl + end block data + end interface +end python module iri16py diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/crackfortran/gh23533.f b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/crackfortran/gh23533.f new file mode 100644 index 0000000..db522af --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/crackfortran/gh23533.f @@ -0,0 +1,5 @@ + SUBROUTINE EXAMPLE( ) + IF( .TRUE. ) THEN + CALL DO_SOMETHING() + END IF ! ** .TRUE. ** + END diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/crackfortran/gh23598.f90 b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/crackfortran/gh23598.f90 new file mode 100644 index 0000000..e0dffb5 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/crackfortran/gh23598.f90 @@ -0,0 +1,4 @@ +integer function intproduct(a, b) result(res) + integer, intent(in) :: a, b + res = a*b +end function diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/crackfortran/gh23598Warn.f90 b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/crackfortran/gh23598Warn.f90 new file mode 100644 index 0000000..3b44efc --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/crackfortran/gh23598Warn.f90 @@ -0,0 +1,11 @@ +module test_bug + implicit none + private + public :: intproduct + +contains + integer function intproduct(a, b) result(res) + integer, intent(in) :: a, b + res = a*b + end function +end module diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/crackfortran/gh23879.f90 b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/crackfortran/gh23879.f90 new file mode 100644 index 0000000..fac262d --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/crackfortran/gh23879.f90 @@ -0,0 +1,20 @@ +module gh23879 + implicit none + private + public :: foo + + contains + + subroutine foo(a, b) + integer, intent(in) :: a + integer, intent(out) :: b + b = a + call bar(b) + end subroutine + + subroutine bar(x) + integer, intent(inout) :: x + x = 2*x + end subroutine + + end module gh23879 diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/crackfortran/gh27697.f90 b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/crackfortran/gh27697.f90 new file mode 100644 index 0000000..a5eae4e --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/crackfortran/gh27697.f90 @@ -0,0 +1,12 @@ +module utils + implicit none + contains + subroutine my_abort(message) + implicit none + character(len=*), intent(in) :: message + !f2py callstatement PyErr_SetString(PyExc_ValueError, message);f2py_success = 0; + !f2py callprotoargument char* + write(0,*) "THIS SHOULD NOT APPEAR" + stop 1 + end subroutine my_abort +end module utils diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/crackfortran/gh2848.f90 b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/crackfortran/gh2848.f90 new file mode 100644 index 0000000..31ea932 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/crackfortran/gh2848.f90 @@ -0,0 +1,13 @@ + subroutine gh2848( & + ! first 2 parameters + par1, par2,& + ! last 2 parameters + par3, par4) + + integer, intent(in) :: par1, par2 + integer, intent(out) :: par3, par4 + + par3 = par1 + par4 = par2 + + end subroutine gh2848 diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/crackfortran/operators.f90 b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/crackfortran/operators.f90 new file mode 100644 index 0000000..1d060a3 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/crackfortran/operators.f90 @@ -0,0 +1,49 @@ +module foo + type bar + character(len = 32) :: item + end type bar + interface operator(.item.) + module procedure item_int, item_real + end interface operator(.item.) + interface operator(==) + module procedure items_are_equal + end interface operator(==) + interface assignment(=) + module procedure get_int, get_real + end interface assignment(=) +contains + function item_int(val) result(elem) + integer, intent(in) :: val + type(bar) :: elem + + write(elem%item, "(I32)") val + end function item_int + + function item_real(val) result(elem) + real, intent(in) :: val + type(bar) :: elem + + write(elem%item, "(1PE32.12)") val + end function item_real + + function items_are_equal(val1, val2) result(equal) + type(bar), intent(in) :: val1, val2 + logical :: equal + + equal = (val1%item == val2%item) + end function items_are_equal + + subroutine get_real(rval, item) + real, intent(out) :: rval + type(bar), intent(in) :: item + + read(item%item, *) rval + end subroutine get_real + + subroutine get_int(rval, item) + integer, intent(out) :: rval + type(bar), intent(in) :: item + + read(item%item, *) rval + end subroutine get_int +end module foo diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/crackfortran/privatemod.f90 b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/crackfortran/privatemod.f90 new file mode 100644 index 0000000..2674c21 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/crackfortran/privatemod.f90 @@ -0,0 +1,11 @@ +module foo + private + integer :: a + public :: setA + integer :: b +contains + subroutine setA(v) + integer, intent(in) :: v + a = v + end subroutine setA +end module foo diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/crackfortran/publicmod.f90 b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/crackfortran/publicmod.f90 new file mode 100644 index 0000000..1db76e3 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/crackfortran/publicmod.f90 @@ -0,0 +1,10 @@ +module foo + public + integer, private :: a + public :: setA +contains + subroutine setA(v) + integer, intent(in) :: v + a = v + end subroutine setA +end module foo diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/crackfortran/pubprivmod.f90 b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/crackfortran/pubprivmod.f90 new file mode 100644 index 0000000..46bef7c --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/crackfortran/pubprivmod.f90 @@ -0,0 +1,10 @@ +module foo + public + integer, private :: a + integer :: b +contains + subroutine setA(v) + integer, intent(in) :: v + a = v + end subroutine setA +end module foo diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/crackfortran/unicode_comment.f90 b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/crackfortran/unicode_comment.f90 new file mode 100644 index 0000000..13515ce --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/crackfortran/unicode_comment.f90 @@ -0,0 +1,4 @@ +subroutine foo(x) + real(8), intent(in) :: x + ! Écrit à l'écran la valeur de x +end subroutine diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/f2cmap/.f2py_f2cmap b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/f2cmap/.f2py_f2cmap new file mode 100644 index 0000000..a4425f8 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/f2cmap/.f2py_f2cmap @@ -0,0 +1 @@ +dict(real=dict(real32='float', real64='double'), integer=dict(int64='long_long')) diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/f2cmap/isoFortranEnvMap.f90 b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/f2cmap/isoFortranEnvMap.f90 new file mode 100644 index 0000000..1e1dc1d --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/f2cmap/isoFortranEnvMap.f90 @@ -0,0 +1,9 @@ + subroutine func1(n, x, res) + use, intrinsic :: iso_fortran_env, only: int64, real64 + implicit none + integer(int64), intent(in) :: n + real(real64), intent(in) :: x(n) + real(real64), intent(out) :: res +!f2py intent(hide) :: n + res = sum(x) + end diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/isocintrin/isoCtests.f90 b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/isocintrin/isoCtests.f90 new file mode 100644 index 0000000..765f7c1 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/isocintrin/isoCtests.f90 @@ -0,0 +1,34 @@ + module coddity + use iso_c_binding, only: c_double, c_int, c_int64_t + implicit none + contains + subroutine c_add(a, b, c) bind(c, name="c_add") + real(c_double), intent(in) :: a, b + real(c_double), intent(out) :: c + c = a + b + end subroutine c_add + ! gh-9693 + function wat(x, y) result(z) bind(c) + integer(c_int), intent(in) :: x, y + integer(c_int) :: z + + z = x + 7 + end function wat + ! gh-25207 + subroutine c_add_int64(a, b, c) bind(c) + integer(c_int64_t), intent(in) :: a, b + integer(c_int64_t), intent(out) :: c + c = a + b + end subroutine c_add_int64 + ! gh-25207 + subroutine add_arr(A, B, C) + integer(c_int64_t), intent(in) :: A(3) + integer(c_int64_t), intent(in) :: B(3) + integer(c_int64_t), intent(out) :: C(3) + integer :: j + + do j = 1, 3 + C(j) = A(j)+B(j) + end do + end subroutine + end module coddity diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/kind/foo.f90 b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/kind/foo.f90 new file mode 100644 index 0000000..d3d15cf --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/kind/foo.f90 @@ -0,0 +1,20 @@ + + +subroutine selectedrealkind(p, r, res) + implicit none + + integer, intent(in) :: p, r + !f2py integer :: r=0 + integer, intent(out) :: res + res = selected_real_kind(p, r) + +end subroutine + +subroutine selectedintkind(p, res) + implicit none + + integer, intent(in) :: p + integer, intent(out) :: res + res = selected_int_kind(p) + +end subroutine diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/mixed/foo.f b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/mixed/foo.f new file mode 100644 index 0000000..c347425 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/mixed/foo.f @@ -0,0 +1,5 @@ + subroutine bar11(a) +cf2py intent(out) a + integer a + a = 11 + end diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/mixed/foo_fixed.f90 b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/mixed/foo_fixed.f90 new file mode 100644 index 0000000..7543a6a --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/mixed/foo_fixed.f90 @@ -0,0 +1,8 @@ + module foo_fixed + contains + subroutine bar12(a) +!f2py intent(out) a + integer a + a = 12 + end subroutine bar12 + end module foo_fixed diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/mixed/foo_free.f90 b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/mixed/foo_free.f90 new file mode 100644 index 0000000..c1b641f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/mixed/foo_free.f90 @@ -0,0 +1,8 @@ +module foo_free +contains + subroutine bar13(a) + !f2py intent(out) a + integer a + a = 13 + end subroutine bar13 +end module foo_free diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/modules/gh25337/data.f90 b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/modules/gh25337/data.f90 new file mode 100644 index 0000000..483d13c --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/modules/gh25337/data.f90 @@ -0,0 +1,8 @@ +module data + real(8) :: shift +contains + subroutine set_shift(in_shift) + real(8), intent(in) :: in_shift + shift = in_shift + end subroutine set_shift +end module data diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/modules/gh25337/use_data.f90 b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/modules/gh25337/use_data.f90 new file mode 100644 index 0000000..b3fae8b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/modules/gh25337/use_data.f90 @@ -0,0 +1,6 @@ +subroutine shift_a(dim_a, a) + use data, only: shift + integer, intent(in) :: dim_a + real(8), intent(inout), dimension(dim_a) :: a + a = a + shift +end subroutine shift_a diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/modules/gh26920/two_mods_with_no_public_entities.f90 b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/modules/gh26920/two_mods_with_no_public_entities.f90 new file mode 100644 index 0000000..07adce5 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/modules/gh26920/two_mods_with_no_public_entities.f90 @@ -0,0 +1,21 @@ + module mod2 + implicit none + private mod2_func1 + contains + + subroutine mod2_func1() + print*, "mod2_func1" + end subroutine mod2_func1 + + end module mod2 + + module mod1 + implicit none + private :: mod1_func1 + contains + + subroutine mod1_func1() + print*, "mod1_func1" + end subroutine mod1_func1 + + end module mod1 diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/modules/gh26920/two_mods_with_one_public_routine.f90 b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/modules/gh26920/two_mods_with_one_public_routine.f90 new file mode 100644 index 0000000..b7fb95b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/modules/gh26920/two_mods_with_one_public_routine.f90 @@ -0,0 +1,21 @@ + module mod2 + implicit none + PUBLIC :: mod2_func1 + contains + + subroutine mod2_func1() + print*, "mod2_func1" + end subroutine mod2_func1 + + end module mod2 + + module mod1 + implicit none + PUBLIC :: mod1_func1 + contains + + subroutine mod1_func1() + print*, "mod1_func1" + end subroutine mod1_func1 + + end module mod1 diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/modules/module_data_docstring.f90 b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/modules/module_data_docstring.f90 new file mode 100644 index 0000000..4505e0c --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/modules/module_data_docstring.f90 @@ -0,0 +1,12 @@ +module mod + integer :: i + integer :: x(4) + real, dimension(2,3) :: a + real, allocatable, dimension(:,:) :: b +contains + subroutine foo + integer :: k + k = 1 + a(1,2) = a(1,2)+3 + end subroutine foo +end module mod diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/modules/use_modules.f90 b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/modules/use_modules.f90 new file mode 100644 index 0000000..aa40c86 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/modules/use_modules.f90 @@ -0,0 +1,20 @@ +module mathops + implicit none +contains + function add(a, b) result(c) + integer, intent(in) :: a, b + integer :: c + c = a + b + end function add +end module mathops + +module useops + use mathops, only: add + implicit none +contains + function sum_and_double(a, b) result(d) + integer, intent(in) :: a, b + integer :: d + d = 2 * add(a, b) + end function sum_and_double +end module useops diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/negative_bounds/issue_20853.f90 b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/negative_bounds/issue_20853.f90 new file mode 100644 index 0000000..bf1fa92 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/negative_bounds/issue_20853.f90 @@ -0,0 +1,7 @@ +subroutine foo(is_, ie_, arr, tout) + implicit none + integer :: is_,ie_ + real, intent(in) :: arr(is_:ie_) + real, intent(out) :: tout(is_:ie_) + tout = arr +end diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/parameter/constant_array.f90 b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/parameter/constant_array.f90 new file mode 100644 index 0000000..9a6bf81 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/parameter/constant_array.f90 @@ -0,0 +1,45 @@ +! Check that parameter arrays are correctly intercepted. +subroutine foo_array(x, y, z) + implicit none + integer, parameter :: dp = selected_real_kind(15) + integer, parameter :: pa = 2 + integer, parameter :: intparamarray(2) = (/ 3, 5 /) + integer, dimension(pa), parameter :: pb = (/ 2, 10 /) + integer, parameter, dimension(intparamarray(1)) :: pc = (/ 2, 10, 20 /) + real(dp), parameter :: doubleparamarray(3) = (/ 3.14_dp, 4._dp, 6.44_dp /) + real(dp), intent(inout) :: x(intparamarray(1)) + real(dp), intent(inout) :: y(intparamarray(2)) + real(dp), intent(out) :: z + + x = x/pb(2) + y = y*pc(2) + z = doubleparamarray(1)*doubleparamarray(2) + doubleparamarray(3) + + return +end subroutine + +subroutine foo_array_any_index(x, y) + implicit none + integer, parameter :: dp = selected_real_kind(15) + integer, parameter, dimension(-1:1) :: myparamarray = (/ 6, 3, 1 /) + integer, parameter, dimension(2) :: nested = (/ 2, 0 /) + integer, parameter :: dim = 2 + real(dp), intent(in) :: x(myparamarray(-1)) + real(dp), intent(out) :: y(nested(1), myparamarray(nested(dim))) + + y = reshape(x, (/nested(1), myparamarray(nested(2))/)) + + return +end subroutine + +subroutine foo_array_delims(x) + implicit none + integer, parameter :: dp = selected_real_kind(15) + integer, parameter, dimension(2) :: myparamarray = (/ (6), 1 /) + integer, parameter, dimension(3) :: test = (/2, 1, (3)/) + real(dp), intent(out) :: x + + x = myparamarray(1)+test(3) + + return +end subroutine diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/parameter/constant_both.f90 b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/parameter/constant_both.f90 new file mode 100644 index 0000000..ac90ced --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/parameter/constant_both.f90 @@ -0,0 +1,57 @@ +! Check that parameters are correct intercepted. +! Constants with comma separations are commonly +! used, for instance Pi = 3._dp +subroutine foo(x) + implicit none + integer, parameter :: sp = selected_real_kind(6) + integer, parameter :: dp = selected_real_kind(15) + integer, parameter :: ii = selected_int_kind(9) + integer, parameter :: il = selected_int_kind(18) + real(dp), intent(inout) :: x + dimension x(3) + real(sp), parameter :: three_s = 3._sp + real(dp), parameter :: three_d = 3._dp + integer(ii), parameter :: three_i = 3_ii + integer(il), parameter :: three_l = 3_il + x(1) = x(1) + x(2) * three_s * three_i + x(3) * three_d * three_l + x(2) = x(2) * three_s + x(3) = x(3) * three_l + return +end subroutine + + +subroutine foo_no(x) + implicit none + integer, parameter :: sp = selected_real_kind(6) + integer, parameter :: dp = selected_real_kind(15) + integer, parameter :: ii = selected_int_kind(9) + integer, parameter :: il = selected_int_kind(18) + real(dp), intent(inout) :: x + dimension x(3) + real(sp), parameter :: three_s = 3. + real(dp), parameter :: three_d = 3. + integer(ii), parameter :: three_i = 3 + integer(il), parameter :: three_l = 3 + x(1) = x(1) + x(2) * three_s * three_i + x(3) * three_d * three_l + x(2) = x(2) * three_s + x(3) = x(3) * three_l + return +end subroutine + +subroutine foo_sum(x) + implicit none + integer, parameter :: sp = selected_real_kind(6) + integer, parameter :: dp = selected_real_kind(15) + integer, parameter :: ii = selected_int_kind(9) + integer, parameter :: il = selected_int_kind(18) + real(dp), intent(inout) :: x + dimension x(3) + real(sp), parameter :: three_s = 2._sp + 1._sp + real(dp), parameter :: three_d = 1._dp + 2._dp + integer(ii), parameter :: three_i = 2_ii + 1_ii + integer(il), parameter :: three_l = 1_il + 2_il + x(1) = x(1) + x(2) * three_s * three_i + x(3) * three_d * three_l + x(2) = x(2) * three_s + x(3) = x(3) * three_l + return +end subroutine diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/parameter/constant_compound.f90 b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/parameter/constant_compound.f90 new file mode 100644 index 0000000..e51f5e9 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/parameter/constant_compound.f90 @@ -0,0 +1,15 @@ +! Check that parameters are correct intercepted. +! Constants with comma separations are commonly +! used, for instance Pi = 3._dp +subroutine foo_compound_int(x) + implicit none + integer, parameter :: ii = selected_int_kind(9) + integer(ii), intent(inout) :: x + dimension x(3) + integer(ii), parameter :: three = 3_ii + integer(ii), parameter :: two = 2_ii + integer(ii), parameter :: six = three * 1_ii * two + + x(1) = x(1) + x(2) + x(3) * six + return +end subroutine diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/parameter/constant_integer.f90 b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/parameter/constant_integer.f90 new file mode 100644 index 0000000..aaa83d2 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/parameter/constant_integer.f90 @@ -0,0 +1,22 @@ +! Check that parameters are correct intercepted. +! Constants with comma separations are commonly +! used, for instance Pi = 3._dp +subroutine foo_int(x) + implicit none + integer, parameter :: ii = selected_int_kind(9) + integer(ii), intent(inout) :: x + dimension x(3) + integer(ii), parameter :: three = 3_ii + x(1) = x(1) + x(2) + x(3) * three + return +end subroutine + +subroutine foo_long(x) + implicit none + integer, parameter :: ii = selected_int_kind(18) + integer(ii), intent(inout) :: x + dimension x(3) + integer(ii), parameter :: three = 3_ii + x(1) = x(1) + x(2) + x(3) * three + return +end subroutine diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/parameter/constant_non_compound.f90 b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/parameter/constant_non_compound.f90 new file mode 100644 index 0000000..62c9a5b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/parameter/constant_non_compound.f90 @@ -0,0 +1,23 @@ +! Check that parameters are correct intercepted. +! Specifically that types of constants without +! compound kind specs are correctly inferred +! adapted Gibbs iteration code from pymc +! for this test case +subroutine foo_non_compound_int(x) + implicit none + integer, parameter :: ii = selected_int_kind(9) + + integer(ii) maxiterates + parameter (maxiterates=2) + + integer(ii) maxseries + parameter (maxseries=2) + + integer(ii) wasize + parameter (wasize=maxiterates*maxseries) + integer(ii), intent(inout) :: x + dimension x(wasize) + + x(1) = x(1) + x(2) + x(3) + x(4) * wasize + return +end subroutine diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/parameter/constant_real.f90 b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/parameter/constant_real.f90 new file mode 100644 index 0000000..02ac9dd --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/parameter/constant_real.f90 @@ -0,0 +1,23 @@ +! Check that parameters are correct intercepted. +! Constants with comma separations are commonly +! used, for instance Pi = 3._dp +subroutine foo_single(x) + implicit none + integer, parameter :: rp = selected_real_kind(6) + real(rp), intent(inout) :: x + dimension x(3) + real(rp), parameter :: three = 3._rp + x(1) = x(1) + x(2) + x(3) * three + return +end subroutine + +subroutine foo_double(x) + implicit none + integer, parameter :: rp = selected_real_kind(15) + real(rp), intent(inout) :: x + dimension x(3) + real(rp), parameter :: three = 3._rp + x(1) = x(1) + x(2) + x(3) * three + return +end subroutine + diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/quoted_character/foo.f b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/quoted_character/foo.f new file mode 100644 index 0000000..9dc1cfa --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/quoted_character/foo.f @@ -0,0 +1,14 @@ + SUBROUTINE FOO(OUT1, OUT2, OUT3, OUT4, OUT5, OUT6) + CHARACTER SINGLE, DOUBLE, SEMICOL, EXCLA, OPENPAR, CLOSEPAR + PARAMETER (SINGLE="'", DOUBLE='"', SEMICOL=';', EXCLA="!", + 1 OPENPAR="(", CLOSEPAR=")") + CHARACTER OUT1, OUT2, OUT3, OUT4, OUT5, OUT6 +Cf2py intent(out) OUT1, OUT2, OUT3, OUT4, OUT5, OUT6 + OUT1 = SINGLE + OUT2 = DOUBLE + OUT3 = SEMICOL + OUT4 = EXCLA + OUT5 = OPENPAR + OUT6 = CLOSEPAR + RETURN + END diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/regression/AB.inc b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/regression/AB.inc new file mode 100644 index 0000000..8a02f63 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/regression/AB.inc @@ -0,0 +1 @@ +real(8) b, n, m diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/regression/assignOnlyModule.f90 b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/regression/assignOnlyModule.f90 new file mode 100644 index 0000000..479ac79 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/regression/assignOnlyModule.f90 @@ -0,0 +1,25 @@ + MODULE MOD_TYPES + INTEGER, PARAMETER :: SP = SELECTED_REAL_KIND(6, 37) + INTEGER, PARAMETER :: DP = SELECTED_REAL_KIND(15, 307) + END MODULE +! + MODULE F_GLOBALS + USE MOD_TYPES + IMPLICIT NONE + INTEGER, PARAMETER :: N_MAX = 16 + INTEGER, PARAMETER :: I_MAX = 18 + INTEGER, PARAMETER :: J_MAX = 72 + REAL(SP) :: XREF + END MODULE F_GLOBALS +! + SUBROUTINE DUMMY () +! + USE F_GLOBALS + USE MOD_TYPES + IMPLICIT NONE +! + REAL(SP) :: MINIMAL + MINIMAL = 0.01*XREF + RETURN +! + END SUBROUTINE DUMMY diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/regression/datonly.f90 b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/regression/datonly.f90 new file mode 100644 index 0000000..67fc4ac --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/regression/datonly.f90 @@ -0,0 +1,17 @@ +module datonly + implicit none + integer, parameter :: max_value = 100 + real, dimension(:), allocatable :: data_array +end module datonly + +module dat + implicit none + integer, parameter :: max_= 1009 +end module dat + +subroutine simple_subroutine(ain, aout) + use dat, only: max_ + integer, intent(in) :: ain + integer, intent(out) :: aout + aout = ain + max_ +end subroutine simple_subroutine diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/regression/f77comments.f b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/regression/f77comments.f new file mode 100644 index 0000000..452a01a --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/regression/f77comments.f @@ -0,0 +1,26 @@ + SUBROUTINE TESTSUB( + & INPUT1, INPUT2, !Input + & OUTPUT1, OUTPUT2) !Output + + IMPLICIT NONE + INTEGER, INTENT(IN) :: INPUT1, INPUT2 + INTEGER, INTENT(OUT) :: OUTPUT1, OUTPUT2 + + OUTPUT1 = INPUT1 + INPUT2 + OUTPUT2 = INPUT1 * INPUT2 + + RETURN + END SUBROUTINE TESTSUB + + SUBROUTINE TESTSUB2(OUTPUT) + IMPLICIT NONE + INTEGER, PARAMETER :: N = 10 ! Array dimension + REAL, INTENT(OUT) :: OUTPUT(N) + INTEGER :: I + + DO I = 1, N + OUTPUT(I) = I * 2.0 + END DO + + RETURN + END diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/regression/f77fixedform.f95 b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/regression/f77fixedform.f95 new file mode 100644 index 0000000..e47a13f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/regression/f77fixedform.f95 @@ -0,0 +1,5 @@ +C This is an invalid file, but it does compile with -ffixed-form + subroutine mwe( + & x) + real x + end subroutine mwe diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/regression/f90continuation.f90 b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/regression/f90continuation.f90 new file mode 100644 index 0000000..879e716 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/regression/f90continuation.f90 @@ -0,0 +1,9 @@ +SUBROUTINE TESTSUB(INPUT1, & ! Hello +! commenty +INPUT2, OUTPUT1, OUTPUT2) ! more comments + INTEGER, INTENT(IN) :: INPUT1, INPUT2 + INTEGER, INTENT(OUT) :: OUTPUT1, OUTPUT2 + OUTPUT1 = INPUT1 + & + INPUT2 + OUTPUT2 = INPUT1 * INPUT2 +END SUBROUTINE TESTSUB diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/regression/incfile.f90 b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/regression/incfile.f90 new file mode 100644 index 0000000..276ef3a --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/regression/incfile.f90 @@ -0,0 +1,5 @@ +function add(n,m) result(b) + implicit none + include 'AB.inc' + b = n + m +end function add diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/regression/inout.f90 b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/regression/inout.f90 new file mode 100644 index 0000000..80cdad9 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/regression/inout.f90 @@ -0,0 +1,9 @@ +! Check that intent(in out) translates as intent(inout). +! The separation seems to be a common usage. + subroutine foo(x) + implicit none + real(4), intent(in out) :: x + dimension x(3) + x(1) = x(1) + x(2) + x(3) + return + end diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/regression/lower_f2py_fortran.f90 b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/regression/lower_f2py_fortran.f90 new file mode 100644 index 0000000..1c4b8c1 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/regression/lower_f2py_fortran.f90 @@ -0,0 +1,5 @@ +subroutine inquire_next(IU) + IMPLICIT NONE + integer :: IU + !f2py intent(in) IU +end subroutine diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/regression/mod_derived_types.f90 b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/regression/mod_derived_types.f90 new file mode 100644 index 0000000..7692c82 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/regression/mod_derived_types.f90 @@ -0,0 +1,23 @@ +module mtypes + implicit none + integer, parameter :: value1 = 100 + type :: master_data + integer :: idat = 200 + end type master_data + type(master_data) :: masterdata +end module mtypes + + +subroutine no_type_subroutine(ain, aout) + use mtypes, only: value1 + integer, intent(in) :: ain + integer, intent(out) :: aout + aout = ain + value1 +end subroutine no_type_subroutine + +subroutine type_subroutine(ain, aout) + use mtypes, only: masterdata + integer, intent(in) :: ain + integer, intent(out) :: aout + aout = ain + masterdata%idat +end subroutine type_subroutine \ No newline at end of file diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/return_character/foo77.f b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/return_character/foo77.f new file mode 100644 index 0000000..facae10 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/return_character/foo77.f @@ -0,0 +1,45 @@ + function t0(value) + character value + character t0 + t0 = value + end + function t1(value) + character*1 value + character*1 t1 + t1 = value + end + function t5(value) + character*5 value + character*5 t5 + t5 = value + end + function ts(value) + character*(*) value + character*(*) ts + ts = value + end + + subroutine s0(t0,value) + character value + character t0 +cf2py intent(out) t0 + t0 = value + end + subroutine s1(t1,value) + character*1 value + character*1 t1 +cf2py intent(out) t1 + t1 = value + end + subroutine s5(t5,value) + character*5 value + character*5 t5 +cf2py intent(out) t5 + t5 = value + end + subroutine ss(ts,value) + character*(*) value + character*10 ts +cf2py intent(out) ts + ts = value + end diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/return_character/foo90.f90 b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/return_character/foo90.f90 new file mode 100644 index 0000000..36182bc --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/return_character/foo90.f90 @@ -0,0 +1,48 @@ +module f90_return_char + contains + function t0(value) + character :: value + character :: t0 + t0 = value + end function t0 + function t1(value) + character(len=1) :: value + character(len=1) :: t1 + t1 = value + end function t1 + function t5(value) + character(len=5) :: value + character(len=5) :: t5 + t5 = value + end function t5 + function ts(value) + character(len=*) :: value + character(len=10) :: ts + ts = value + end function ts + + subroutine s0(t0,value) + character :: value + character :: t0 +!f2py intent(out) t0 + t0 = value + end subroutine s0 + subroutine s1(t1,value) + character(len=1) :: value + character(len=1) :: t1 +!f2py intent(out) t1 + t1 = value + end subroutine s1 + subroutine s5(t5,value) + character(len=5) :: value + character(len=5) :: t5 +!f2py intent(out) t5 + t5 = value + end subroutine s5 + subroutine ss(ts,value) + character(len=*) :: value + character(len=10) :: ts +!f2py intent(out) ts + ts = value + end subroutine ss +end module f90_return_char diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/return_complex/foo77.f b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/return_complex/foo77.f new file mode 100644 index 0000000..37a1ec8 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/return_complex/foo77.f @@ -0,0 +1,45 @@ + function t0(value) + complex value + complex t0 + t0 = value + end + function t8(value) + complex*8 value + complex*8 t8 + t8 = value + end + function t16(value) + complex*16 value + complex*16 t16 + t16 = value + end + function td(value) + double complex value + double complex td + td = value + end + + subroutine s0(t0,value) + complex value + complex t0 +cf2py intent(out) t0 + t0 = value + end + subroutine s8(t8,value) + complex*8 value + complex*8 t8 +cf2py intent(out) t8 + t8 = value + end + subroutine s16(t16,value) + complex*16 value + complex*16 t16 +cf2py intent(out) t16 + t16 = value + end + subroutine sd(td,value) + double complex value + double complex td +cf2py intent(out) td + td = value + end diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/return_complex/foo90.f90 b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/return_complex/foo90.f90 new file mode 100644 index 0000000..adc27b4 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/return_complex/foo90.f90 @@ -0,0 +1,48 @@ +module f90_return_complex + contains + function t0(value) + complex :: value + complex :: t0 + t0 = value + end function t0 + function t8(value) + complex(kind=4) :: value + complex(kind=4) :: t8 + t8 = value + end function t8 + function t16(value) + complex(kind=8) :: value + complex(kind=8) :: t16 + t16 = value + end function t16 + function td(value) + double complex :: value + double complex :: td + td = value + end function td + + subroutine s0(t0,value) + complex :: value + complex :: t0 +!f2py intent(out) t0 + t0 = value + end subroutine s0 + subroutine s8(t8,value) + complex(kind=4) :: value + complex(kind=4) :: t8 +!f2py intent(out) t8 + t8 = value + end subroutine s8 + subroutine s16(t16,value) + complex(kind=8) :: value + complex(kind=8) :: t16 +!f2py intent(out) t16 + t16 = value + end subroutine s16 + subroutine sd(td,value) + double complex :: value + double complex :: td +!f2py intent(out) td + td = value + end subroutine sd +end module f90_return_complex diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/return_integer/foo77.f b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/return_integer/foo77.f new file mode 100644 index 0000000..1ab895b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/return_integer/foo77.f @@ -0,0 +1,56 @@ + function t0(value) + integer value + integer t0 + t0 = value + end + function t1(value) + integer*1 value + integer*1 t1 + t1 = value + end + function t2(value) + integer*2 value + integer*2 t2 + t2 = value + end + function t4(value) + integer*4 value + integer*4 t4 + t4 = value + end + function t8(value) + integer*8 value + integer*8 t8 + t8 = value + end + + subroutine s0(t0,value) + integer value + integer t0 +cf2py intent(out) t0 + t0 = value + end + subroutine s1(t1,value) + integer*1 value + integer*1 t1 +cf2py intent(out) t1 + t1 = value + end + subroutine s2(t2,value) + integer*2 value + integer*2 t2 +cf2py intent(out) t2 + t2 = value + end + subroutine s4(t4,value) + integer*4 value + integer*4 t4 +cf2py intent(out) t4 + t4 = value + end + subroutine s8(t8,value) + integer*8 value + integer*8 t8 +cf2py intent(out) t8 + t8 = value + end diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/return_integer/foo90.f90 b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/return_integer/foo90.f90 new file mode 100644 index 0000000..ba9249a --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/return_integer/foo90.f90 @@ -0,0 +1,59 @@ +module f90_return_integer + contains + function t0(value) + integer :: value + integer :: t0 + t0 = value + end function t0 + function t1(value) + integer(kind=1) :: value + integer(kind=1) :: t1 + t1 = value + end function t1 + function t2(value) + integer(kind=2) :: value + integer(kind=2) :: t2 + t2 = value + end function t2 + function t4(value) + integer(kind=4) :: value + integer(kind=4) :: t4 + t4 = value + end function t4 + function t8(value) + integer(kind=8) :: value + integer(kind=8) :: t8 + t8 = value + end function t8 + + subroutine s0(t0,value) + integer :: value + integer :: t0 +!f2py intent(out) t0 + t0 = value + end subroutine s0 + subroutine s1(t1,value) + integer(kind=1) :: value + integer(kind=1) :: t1 +!f2py intent(out) t1 + t1 = value + end subroutine s1 + subroutine s2(t2,value) + integer(kind=2) :: value + integer(kind=2) :: t2 +!f2py intent(out) t2 + t2 = value + end subroutine s2 + subroutine s4(t4,value) + integer(kind=4) :: value + integer(kind=4) :: t4 +!f2py intent(out) t4 + t4 = value + end subroutine s4 + subroutine s8(t8,value) + integer(kind=8) :: value + integer(kind=8) :: t8 +!f2py intent(out) t8 + t8 = value + end subroutine s8 +end module f90_return_integer diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/return_logical/foo77.f b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/return_logical/foo77.f new file mode 100644 index 0000000..ef53014 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/return_logical/foo77.f @@ -0,0 +1,56 @@ + function t0(value) + logical value + logical t0 + t0 = value + end + function t1(value) + logical*1 value + logical*1 t1 + t1 = value + end + function t2(value) + logical*2 value + logical*2 t2 + t2 = value + end + function t4(value) + logical*4 value + logical*4 t4 + t4 = value + end +c function t8(value) +c logical*8 value +c logical*8 t8 +c t8 = value +c end + + subroutine s0(t0,value) + logical value + logical t0 +cf2py intent(out) t0 + t0 = value + end + subroutine s1(t1,value) + logical*1 value + logical*1 t1 +cf2py intent(out) t1 + t1 = value + end + subroutine s2(t2,value) + logical*2 value + logical*2 t2 +cf2py intent(out) t2 + t2 = value + end + subroutine s4(t4,value) + logical*4 value + logical*4 t4 +cf2py intent(out) t4 + t4 = value + end +c subroutine s8(t8,value) +c logical*8 value +c logical*8 t8 +cf2py intent(out) t8 +c t8 = value +c end diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/return_logical/foo90.f90 b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/return_logical/foo90.f90 new file mode 100644 index 0000000..a452646 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/return_logical/foo90.f90 @@ -0,0 +1,59 @@ +module f90_return_logical + contains + function t0(value) + logical :: value + logical :: t0 + t0 = value + end function t0 + function t1(value) + logical(kind=1) :: value + logical(kind=1) :: t1 + t1 = value + end function t1 + function t2(value) + logical(kind=2) :: value + logical(kind=2) :: t2 + t2 = value + end function t2 + function t4(value) + logical(kind=4) :: value + logical(kind=4) :: t4 + t4 = value + end function t4 + function t8(value) + logical(kind=8) :: value + logical(kind=8) :: t8 + t8 = value + end function t8 + + subroutine s0(t0,value) + logical :: value + logical :: t0 +!f2py intent(out) t0 + t0 = value + end subroutine s0 + subroutine s1(t1,value) + logical(kind=1) :: value + logical(kind=1) :: t1 +!f2py intent(out) t1 + t1 = value + end subroutine s1 + subroutine s2(t2,value) + logical(kind=2) :: value + logical(kind=2) :: t2 +!f2py intent(out) t2 + t2 = value + end subroutine s2 + subroutine s4(t4,value) + logical(kind=4) :: value + logical(kind=4) :: t4 +!f2py intent(out) t4 + t4 = value + end subroutine s4 + subroutine s8(t8,value) + logical(kind=8) :: value + logical(kind=8) :: t8 +!f2py intent(out) t8 + t8 = value + end subroutine s8 +end module f90_return_logical diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/return_real/foo77.f b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/return_real/foo77.f new file mode 100644 index 0000000..bf43dbf --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/return_real/foo77.f @@ -0,0 +1,45 @@ + function t0(value) + real value + real t0 + t0 = value + end + function t4(value) + real*4 value + real*4 t4 + t4 = value + end + function t8(value) + real*8 value + real*8 t8 + t8 = value + end + function td(value) + double precision value + double precision td + td = value + end + + subroutine s0(t0,value) + real value + real t0 +cf2py intent(out) t0 + t0 = value + end + subroutine s4(t4,value) + real*4 value + real*4 t4 +cf2py intent(out) t4 + t4 = value + end + subroutine s8(t8,value) + real*8 value + real*8 t8 +cf2py intent(out) t8 + t8 = value + end + subroutine sd(td,value) + double precision value + double precision td +cf2py intent(out) td + td = value + end diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/return_real/foo90.f90 b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/return_real/foo90.f90 new file mode 100644 index 0000000..df97199 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/return_real/foo90.f90 @@ -0,0 +1,48 @@ +module f90_return_real + contains + function t0(value) + real :: value + real :: t0 + t0 = value + end function t0 + function t4(value) + real(kind=4) :: value + real(kind=4) :: t4 + t4 = value + end function t4 + function t8(value) + real(kind=8) :: value + real(kind=8) :: t8 + t8 = value + end function t8 + function td(value) + double precision :: value + double precision :: td + td = value + end function td + + subroutine s0(t0,value) + real :: value + real :: t0 +!f2py intent(out) t0 + t0 = value + end subroutine s0 + subroutine s4(t4,value) + real(kind=4) :: value + real(kind=4) :: t4 +!f2py intent(out) t4 + t4 = value + end subroutine s4 + subroutine s8(t8,value) + real(kind=8) :: value + real(kind=8) :: t8 +!f2py intent(out) t8 + t8 = value + end subroutine s8 + subroutine sd(td,value) + double precision :: value + double precision :: td +!f2py intent(out) td + td = value + end subroutine sd +end module f90_return_real diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/routines/funcfortranname.f b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/routines/funcfortranname.f new file mode 100644 index 0000000..89be972 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/routines/funcfortranname.f @@ -0,0 +1,5 @@ + REAL*8 FUNCTION FUNCFORTRANNAME(A,B) + REAL*8 A, B + FUNCFORTRANNAME = A + B + RETURN + END FUNCTION diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/routines/funcfortranname.pyf b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/routines/funcfortranname.pyf new file mode 100644 index 0000000..8730ca6 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/routines/funcfortranname.pyf @@ -0,0 +1,11 @@ +python module funcfortranname ! in + interface ! in :funcfortranname + function funcfortranname_default(a,b) ! in :funcfortranname:funcfortranname.f + fortranname funcfortranname + real*8 :: a + real*8 :: b + real*8 :: funcfortranname_default + real*8, intent(out) :: funcfortranname + end function funcfortranname_default + end interface +end python module funcfortranname diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/routines/subrout.f b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/routines/subrout.f new file mode 100644 index 0000000..1d1eeae --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/routines/subrout.f @@ -0,0 +1,4 @@ + SUBROUTINE SUBROUT(A,B,C) + REAL*8 A, B, C + C = A + B + END SUBROUTINE diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/routines/subrout.pyf b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/routines/subrout.pyf new file mode 100644 index 0000000..e27cbe1 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/routines/subrout.pyf @@ -0,0 +1,10 @@ +python module subrout ! in + interface ! in :subrout + subroutine subrout_default(a,b,c) ! in :subrout:subrout.f + fortranname subrout + real*8 :: a + real*8 :: b + real*8, intent(out) :: c + end subroutine subrout_default + end interface +end python module subrout diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/size/foo.f90 b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/size/foo.f90 new file mode 100644 index 0000000..5b66f8c --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/size/foo.f90 @@ -0,0 +1,44 @@ + +subroutine foo(a, n, m, b) + implicit none + + real, intent(in) :: a(n, m) + integer, intent(in) :: n, m + real, intent(out) :: b(size(a, 1)) + + integer :: i + + do i = 1, size(b) + b(i) = sum(a(i,:)) + enddo +end subroutine + +subroutine trans(x,y) + implicit none + real, intent(in), dimension(:,:) :: x + real, intent(out), dimension( size(x,2), size(x,1) ) :: y + integer :: N, M, i, j + N = size(x,1) + M = size(x,2) + DO i=1,N + do j=1,M + y(j,i) = x(i,j) + END DO + END DO +end subroutine trans + +subroutine flatten(x,y) + implicit none + real, intent(in), dimension(:,:) :: x + real, intent(out), dimension( size(x) ) :: y + integer :: N, M, i, j, k + N = size(x,1) + M = size(x,2) + k = 1 + DO i=1,N + do j=1,M + y(k) = x(i,j) + k = k + 1 + END DO + END DO +end subroutine flatten diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/string/char.f90 b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/string/char.f90 new file mode 100644 index 0000000..bb7985c --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/string/char.f90 @@ -0,0 +1,29 @@ +MODULE char_test + +CONTAINS + +SUBROUTINE change_strings(strings, n_strs, out_strings) + IMPLICIT NONE + + ! Inputs + INTEGER, INTENT(IN) :: n_strs + CHARACTER, INTENT(IN), DIMENSION(2,n_strs) :: strings + CHARACTER, INTENT(OUT), DIMENSION(2,n_strs) :: out_strings + +!f2py INTEGER, INTENT(IN) :: n_strs +!f2py CHARACTER, INTENT(IN), DIMENSION(2,n_strs) :: strings +!f2py CHARACTER, INTENT(OUT), DIMENSION(2,n_strs) :: strings + + ! Misc. + INTEGER*4 :: j + + + DO j=1, n_strs + out_strings(1,j) = strings(1,j) + out_strings(2,j) = 'A' + END DO + +END SUBROUTINE change_strings + +END MODULE char_test + diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/string/fixed_string.f90 b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/string/fixed_string.f90 new file mode 100644 index 0000000..7fd1585 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/string/fixed_string.f90 @@ -0,0 +1,34 @@ +function sint(s) result(i) + implicit none + character(len=*) :: s + integer :: j, i + i = 0 + do j=len(s), 1, -1 + if (.not.((i.eq.0).and.(s(j:j).eq.' '))) then + i = i + ichar(s(j:j)) * 10 ** (j - 1) + endif + end do + return + end function sint + + function test_in_bytes4(a) result (i) + implicit none + integer :: sint + character(len=4) :: a + integer :: i + i = sint(a) + a(1:1) = 'A' + return + end function test_in_bytes4 + + function test_inout_bytes4(a) result (i) + implicit none + integer :: sint + character(len=4), intent(inout) :: a + integer :: i + if (a(1:1).ne.' ') then + a(1:1) = 'E' + endif + i = sint(a) + return + end function test_inout_bytes4 diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/string/gh24008.f b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/string/gh24008.f new file mode 100644 index 0000000..ab64cf7 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/string/gh24008.f @@ -0,0 +1,8 @@ + SUBROUTINE GREET(NAME, GREETING) + CHARACTER NAME*(*), GREETING*(*) + CHARACTER*(50) MESSAGE + + MESSAGE = 'Hello, ' // NAME // ', ' // GREETING +c$$$ PRINT *, MESSAGE + + END SUBROUTINE GREET diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/string/gh24662.f90 b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/string/gh24662.f90 new file mode 100644 index 0000000..ca53413 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/string/gh24662.f90 @@ -0,0 +1,7 @@ +subroutine string_inout_optional(output) + implicit none + character*(32), optional, intent(inout) :: output + if (present(output)) then + output="output string" + endif +end subroutine diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/string/gh25286.f90 b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/string/gh25286.f90 new file mode 100644 index 0000000..db1c710 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/string/gh25286.f90 @@ -0,0 +1,14 @@ +subroutine charint(trans, info) + character, intent(in) :: trans + integer, intent(out) :: info + if (trans == 'N') then + info = 1 + else if (trans == 'T') then + info = 2 + else if (trans == 'C') then + info = 3 + else + info = -1 + end if + +end subroutine charint diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/string/gh25286.pyf b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/string/gh25286.pyf new file mode 100644 index 0000000..7b96090 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/string/gh25286.pyf @@ -0,0 +1,12 @@ +python module _char_handling_test + interface + subroutine charint(trans, info) + callstatement (*f2py_func)(&trans, &info) + callprotoargument char*, int* + + character, intent(in), check(trans=='N'||trans=='T'||trans=='C') :: trans = 'N' + integer intent(out) :: info + + end subroutine charint + end interface +end python module _char_handling_test diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/string/gh25286_bc.pyf b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/string/gh25286_bc.pyf new file mode 100644 index 0000000..e7b10fa --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/string/gh25286_bc.pyf @@ -0,0 +1,12 @@ +python module _char_handling_test + interface + subroutine charint(trans, info) + callstatement (*f2py_func)(&trans, &info) + callprotoargument char*, int* + + character, intent(in), check(*trans=='N'||*trans=='T'||*trans=='C') :: trans = 'N' + integer intent(out) :: info + + end subroutine charint + end interface +end python module _char_handling_test diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/string/scalar_string.f90 b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/string/scalar_string.f90 new file mode 100644 index 0000000..f8f0761 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/string/scalar_string.f90 @@ -0,0 +1,9 @@ +MODULE string_test + + character(len=8) :: string + character string77 * 8 + + character(len=12), dimension(5,7) :: strarr + character strarr77(5,7) * 12 + +END MODULE string_test diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/string/string.f b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/string/string.f new file mode 100644 index 0000000..5210ca4 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/string/string.f @@ -0,0 +1,12 @@ +C FILE: STRING.F + SUBROUTINE FOO(A,B,C,D) + CHARACTER*5 A, B + CHARACTER*(*) C,D +Cf2py intent(in) a,c +Cf2py intent(inout) b,d + A(1:1) = 'A' + B(1:1) = 'B' + C(1:1) = 'C' + D(1:1) = 'D' + END +C END OF FILE STRING.F diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/value_attrspec/gh21665.f90 b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/value_attrspec/gh21665.f90 new file mode 100644 index 0000000..7d9dc0f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/src/value_attrspec/gh21665.f90 @@ -0,0 +1,9 @@ +module fortfuncs + implicit none +contains + subroutine square(x,y) + integer, intent(in), value :: x + integer, intent(out) :: y + y = x*x + end subroutine square +end module fortfuncs diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/test_abstract_interface.py b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/test_abstract_interface.py new file mode 100644 index 0000000..21e77db --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/test_abstract_interface.py @@ -0,0 +1,26 @@ +import pytest + +from numpy.f2py import crackfortran +from numpy.testing import IS_WASM + +from . import util + + +@pytest.mark.skipif(IS_WASM, reason="Cannot start subprocess") +@pytest.mark.slow +class TestAbstractInterface(util.F2PyTest): + sources = [util.getpath("tests", "src", "abstract_interface", "foo.f90")] + + skip = ["add1", "add2"] + + def test_abstract_interface(self): + assert self.module.ops_module.foo(3, 5) == (8, 13) + + def test_parse_abstract_interface(self): + # Test gh18403 + fpath = util.getpath("tests", "src", "abstract_interface", + "gh18403_mod.f90") + mod = crackfortran.crackfortran([str(fpath)]) + assert len(mod) == 1 + assert len(mod[0]["body"]) == 1 + assert mod[0]["body"][0]["block"] == "abstract interface" diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/test_array_from_pyobj.py b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/test_array_from_pyobj.py new file mode 100644 index 0000000..a8f9527 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/test_array_from_pyobj.py @@ -0,0 +1,678 @@ +import copy +import platform +import sys +from pathlib import Path + +import pytest + +import numpy as np +from numpy._core._type_aliases import c_names_dict as _c_names_dict + +from . import util + +wrap = None + +# Extend core typeinfo with CHARACTER to test dtype('c') +c_names_dict = dict( + CHARACTER=np.dtype("c"), + **_c_names_dict +) + + +def get_testdir(): + testroot = Path(__file__).resolve().parent / "src" + return testroot / "array_from_pyobj" + +def setup_module(): + """ + Build the required testing extension module + + """ + global wrap + + if wrap is None: + src = [ + get_testdir() / "wrapmodule.c", + ] + wrap = util.build_meson(src, module_name="test_array_from_pyobj_ext") + + +def flags_info(arr): + flags = wrap.array_attrs(arr)[6] + return flags2names(flags) + + +def flags2names(flags): + info = [] + for flagname in [ + "CONTIGUOUS", + "FORTRAN", + "OWNDATA", + "ENSURECOPY", + "ENSUREARRAY", + "ALIGNED", + "NOTSWAPPED", + "WRITEABLE", + "WRITEBACKIFCOPY", + "UPDATEIFCOPY", + "BEHAVED", + "BEHAVED_RO", + "CARRAY", + "FARRAY", + ]: + if abs(flags) & getattr(wrap, flagname, 0): + info.append(flagname) + return info + + +class Intent: + def __init__(self, intent_list=[]): + self.intent_list = intent_list[:] + flags = 0 + for i in intent_list: + if i == "optional": + flags |= wrap.F2PY_OPTIONAL + else: + flags |= getattr(wrap, "F2PY_INTENT_" + i.upper()) + self.flags = flags + + def __getattr__(self, name): + name = name.lower() + if name == "in_": + name = "in" + return self.__class__(self.intent_list + [name]) + + def __str__(self): + return f"intent({','.join(self.intent_list)})" + + def __repr__(self): + return f"Intent({self.intent_list!r})" + + def is_intent(self, *names): + return all(name in self.intent_list for name in names) + + def is_intent_exact(self, *names): + return len(self.intent_list) == len(names) and self.is_intent(*names) + + +intent = Intent() + +_type_names = [ + "BOOL", + "BYTE", + "UBYTE", + "SHORT", + "USHORT", + "INT", + "UINT", + "LONG", + "ULONG", + "LONGLONG", + "ULONGLONG", + "FLOAT", + "DOUBLE", + "CFLOAT", + "STRING1", + "STRING5", + "CHARACTER", +] + +_cast_dict = {"BOOL": ["BOOL"]} +_cast_dict["BYTE"] = _cast_dict["BOOL"] + ["BYTE"] +_cast_dict["UBYTE"] = _cast_dict["BOOL"] + ["UBYTE"] +_cast_dict["BYTE"] = ["BYTE"] +_cast_dict["UBYTE"] = ["UBYTE"] +_cast_dict["SHORT"] = _cast_dict["BYTE"] + ["UBYTE", "SHORT"] +_cast_dict["USHORT"] = _cast_dict["UBYTE"] + ["BYTE", "USHORT"] +_cast_dict["INT"] = _cast_dict["SHORT"] + ["USHORT", "INT"] +_cast_dict["UINT"] = _cast_dict["USHORT"] + ["SHORT", "UINT"] + +_cast_dict["LONG"] = _cast_dict["INT"] + ["LONG"] +_cast_dict["ULONG"] = _cast_dict["UINT"] + ["ULONG"] + +_cast_dict["LONGLONG"] = _cast_dict["LONG"] + ["LONGLONG"] +_cast_dict["ULONGLONG"] = _cast_dict["ULONG"] + ["ULONGLONG"] + +_cast_dict["FLOAT"] = _cast_dict["SHORT"] + ["USHORT", "FLOAT"] +_cast_dict["DOUBLE"] = _cast_dict["INT"] + ["UINT", "FLOAT", "DOUBLE"] + +_cast_dict["CFLOAT"] = _cast_dict["FLOAT"] + ["CFLOAT"] + +_cast_dict['STRING1'] = ['STRING1'] +_cast_dict['STRING5'] = ['STRING5'] +_cast_dict['CHARACTER'] = ['CHARACTER'] + +# 32 bit system malloc typically does not provide the alignment required by +# 16 byte long double types this means the inout intent cannot be satisfied +# and several tests fail as the alignment flag can be randomly true or false +# when numpy gains an aligned allocator the tests could be enabled again +# +# Furthermore, on macOS ARM64, LONGDOUBLE is an alias for DOUBLE. +if ((np.intp().dtype.itemsize != 4 or np.clongdouble().dtype.alignment <= 8) + and sys.platform != "win32" + and (platform.system(), platform.processor()) != ("Darwin", "arm")): + _type_names.extend(["LONGDOUBLE", "CDOUBLE", "CLONGDOUBLE"]) + _cast_dict["LONGDOUBLE"] = _cast_dict["LONG"] + [ + "ULONG", + "FLOAT", + "DOUBLE", + "LONGDOUBLE", + ] + _cast_dict["CLONGDOUBLE"] = _cast_dict["LONGDOUBLE"] + [ + "CFLOAT", + "CDOUBLE", + "CLONGDOUBLE", + ] + _cast_dict["CDOUBLE"] = _cast_dict["DOUBLE"] + ["CFLOAT", "CDOUBLE"] + + +class Type: + _type_cache = {} + + def __new__(cls, name): + if isinstance(name, np.dtype): + dtype0 = name + name = None + for n, i in c_names_dict.items(): + if not isinstance(i, type) and dtype0.type is i.type: + name = n + break + obj = cls._type_cache.get(name.upper(), None) + if obj is not None: + return obj + obj = object.__new__(cls) + obj._init(name) + cls._type_cache[name.upper()] = obj + return obj + + def _init(self, name): + self.NAME = name.upper() + + if self.NAME == 'CHARACTER': + info = c_names_dict[self.NAME] + self.type_num = wrap.NPY_STRING + self.elsize = 1 + self.dtype = np.dtype('c') + elif self.NAME.startswith('STRING'): + info = c_names_dict[self.NAME[:6]] + self.type_num = wrap.NPY_STRING + self.elsize = int(self.NAME[6:] or 0) + self.dtype = np.dtype(f'S{self.elsize}') + else: + info = c_names_dict[self.NAME] + self.type_num = getattr(wrap, 'NPY_' + self.NAME) + self.elsize = info.itemsize + self.dtype = np.dtype(info.type) + + assert self.type_num == info.num + self.type = info.type + self.dtypechar = info.char + + def __repr__(self): + return (f"Type({self.NAME})|type_num={self.type_num}," + f" dtype={self.dtype}," + f" type={self.type}, elsize={self.elsize}," + f" dtypechar={self.dtypechar}") + + def cast_types(self): + return [self.__class__(_m) for _m in _cast_dict[self.NAME]] + + def all_types(self): + return [self.__class__(_m) for _m in _type_names] + + def smaller_types(self): + bits = c_names_dict[self.NAME].alignment + types = [] + for name in _type_names: + if c_names_dict[name].alignment < bits: + types.append(Type(name)) + return types + + def equal_types(self): + bits = c_names_dict[self.NAME].alignment + types = [] + for name in _type_names: + if name == self.NAME: + continue + if c_names_dict[name].alignment == bits: + types.append(Type(name)) + return types + + def larger_types(self): + bits = c_names_dict[self.NAME].alignment + types = [] + for name in _type_names: + if c_names_dict[name].alignment > bits: + types.append(Type(name)) + return types + + +class Array: + + def __repr__(self): + return (f'Array({self.type}, {self.dims}, {self.intent},' + f' {self.obj})|arr={self.arr}') + + def __init__(self, typ, dims, intent, obj): + self.type = typ + self.dims = dims + self.intent = intent + self.obj_copy = copy.deepcopy(obj) + self.obj = obj + + # arr.dtypechar may be different from typ.dtypechar + self.arr = wrap.call(typ.type_num, + typ.elsize, + dims, intent.flags, obj) + + assert isinstance(self.arr, np.ndarray) + + self.arr_attr = wrap.array_attrs(self.arr) + + if len(dims) > 1: + if self.intent.is_intent("c"): + assert (intent.flags & wrap.F2PY_INTENT_C) + assert not self.arr.flags["FORTRAN"] + assert self.arr.flags["CONTIGUOUS"] + assert (not self.arr_attr[6] & wrap.FORTRAN) + else: + assert (not intent.flags & wrap.F2PY_INTENT_C) + assert self.arr.flags["FORTRAN"] + assert not self.arr.flags["CONTIGUOUS"] + assert (self.arr_attr[6] & wrap.FORTRAN) + + if obj is None: + self.pyarr = None + self.pyarr_attr = None + return + + if intent.is_intent("cache"): + assert isinstance(obj, np.ndarray), repr(type(obj)) + self.pyarr = np.array(obj).reshape(*dims).copy() + else: + self.pyarr = np.array( + np.array(obj, dtype=typ.dtypechar).reshape(*dims), + order=(self.intent.is_intent("c") and "C") or "F", + ) + assert self.pyarr.dtype == typ + self.pyarr.setflags(write=self.arr.flags["WRITEABLE"]) + assert self.pyarr.flags["OWNDATA"], (obj, intent) + self.pyarr_attr = wrap.array_attrs(self.pyarr) + + if len(dims) > 1: + if self.intent.is_intent("c"): + assert not self.pyarr.flags["FORTRAN"] + assert self.pyarr.flags["CONTIGUOUS"] + assert (not self.pyarr_attr[6] & wrap.FORTRAN) + else: + assert self.pyarr.flags["FORTRAN"] + assert not self.pyarr.flags["CONTIGUOUS"] + assert (self.pyarr_attr[6] & wrap.FORTRAN) + + assert self.arr_attr[1] == self.pyarr_attr[1] # nd + assert self.arr_attr[2] == self.pyarr_attr[2] # dimensions + if self.arr_attr[1] <= 1: + assert self.arr_attr[3] == self.pyarr_attr[3], repr(( + self.arr_attr[3], + self.pyarr_attr[3], + self.arr.tobytes(), + self.pyarr.tobytes(), + )) # strides + assert self.arr_attr[5][-2:] == self.pyarr_attr[5][-2:], repr(( + self.arr_attr[5], self.pyarr_attr[5] + )) # descr + assert self.arr_attr[6] == self.pyarr_attr[6], repr(( + self.arr_attr[6], + self.pyarr_attr[6], + flags2names(0 * self.arr_attr[6] - self.pyarr_attr[6]), + flags2names(self.arr_attr[6]), + intent, + )) # flags + + if intent.is_intent("cache"): + assert self.arr_attr[5][3] >= self.type.elsize + else: + assert self.arr_attr[5][3] == self.type.elsize + assert (self.arr_equal(self.pyarr, self.arr)) + + if isinstance(self.obj, np.ndarray): + if typ.elsize == Type(obj.dtype).elsize: + if not intent.is_intent("copy") and self.arr_attr[1] <= 1: + assert self.has_shared_memory() + + def arr_equal(self, arr1, arr2): + if arr1.shape != arr2.shape: + return False + return (arr1 == arr2).all() + + def __str__(self): + return str(self.arr) + + def has_shared_memory(self): + """Check that created array shares data with input array.""" + if self.obj is self.arr: + return True + if not isinstance(self.obj, np.ndarray): + return False + obj_attr = wrap.array_attrs(self.obj) + return obj_attr[0] == self.arr_attr[0] + + +class TestIntent: + def test_in_out(self): + assert str(intent.in_.out) == "intent(in,out)" + assert intent.in_.c.is_intent("c") + assert not intent.in_.c.is_intent_exact("c") + assert intent.in_.c.is_intent_exact("c", "in") + assert intent.in_.c.is_intent_exact("in", "c") + assert not intent.in_.is_intent("c") + + +class TestSharedMemory: + + @pytest.fixture(autouse=True, scope="class", params=_type_names) + def setup_type(self, request): + request.cls.type = Type(request.param) + request.cls.array = lambda self, dims, intent, obj: Array( + Type(request.param), dims, intent, obj) + + @property + def num2seq(self): + if self.type.NAME.startswith('STRING'): + elsize = self.type.elsize + return ['1' * elsize, '2' * elsize] + return [1, 2] + + @property + def num23seq(self): + if self.type.NAME.startswith('STRING'): + elsize = self.type.elsize + return [['1' * elsize, '2' * elsize, '3' * elsize], + ['4' * elsize, '5' * elsize, '6' * elsize]] + return [[1, 2, 3], [4, 5, 6]] + + def test_in_from_2seq(self): + a = self.array([2], intent.in_, self.num2seq) + assert not a.has_shared_memory() + + def test_in_from_2casttype(self): + for t in self.type.cast_types(): + obj = np.array(self.num2seq, dtype=t.dtype) + a = self.array([len(self.num2seq)], intent.in_, obj) + if t.elsize == self.type.elsize: + assert a.has_shared_memory(), repr((self.type.dtype, t.dtype)) + else: + assert not a.has_shared_memory() + + @pytest.mark.parametrize("write", ["w", "ro"]) + @pytest.mark.parametrize("order", ["C", "F"]) + @pytest.mark.parametrize("inp", ["2seq", "23seq"]) + def test_in_nocopy(self, write, order, inp): + """Test if intent(in) array can be passed without copies""" + seq = getattr(self, "num" + inp) + obj = np.array(seq, dtype=self.type.dtype, order=order) + obj.setflags(write=(write == 'w')) + a = self.array(obj.shape, + ((order == 'C' and intent.in_.c) or intent.in_), obj) + assert a.has_shared_memory() + + def test_inout_2seq(self): + obj = np.array(self.num2seq, dtype=self.type.dtype) + a = self.array([len(self.num2seq)], intent.inout, obj) + assert a.has_shared_memory() + + try: + a = self.array([2], intent.in_.inout, self.num2seq) + except TypeError as msg: + if not str(msg).startswith( + "failed to initialize intent(inout|inplace|cache) array"): + raise + else: + raise SystemError("intent(inout) should have failed on sequence") + + def test_f_inout_23seq(self): + obj = np.array(self.num23seq, dtype=self.type.dtype, order="F") + shape = (len(self.num23seq), len(self.num23seq[0])) + a = self.array(shape, intent.in_.inout, obj) + assert a.has_shared_memory() + + obj = np.array(self.num23seq, dtype=self.type.dtype, order="C") + shape = (len(self.num23seq), len(self.num23seq[0])) + try: + a = self.array(shape, intent.in_.inout, obj) + except ValueError as msg: + if not str(msg).startswith( + "failed to initialize intent(inout) array"): + raise + else: + raise SystemError( + "intent(inout) should have failed on improper array") + + def test_c_inout_23seq(self): + obj = np.array(self.num23seq, dtype=self.type.dtype) + shape = (len(self.num23seq), len(self.num23seq[0])) + a = self.array(shape, intent.in_.c.inout, obj) + assert a.has_shared_memory() + + def test_in_copy_from_2casttype(self): + for t in self.type.cast_types(): + obj = np.array(self.num2seq, dtype=t.dtype) + a = self.array([len(self.num2seq)], intent.in_.copy, obj) + assert not a.has_shared_memory() + + def test_c_in_from_23seq(self): + a = self.array( + [len(self.num23seq), len(self.num23seq[0])], intent.in_, + self.num23seq) + assert not a.has_shared_memory() + + def test_in_from_23casttype(self): + for t in self.type.cast_types(): + obj = np.array(self.num23seq, dtype=t.dtype) + a = self.array( + [len(self.num23seq), len(self.num23seq[0])], intent.in_, obj) + assert not a.has_shared_memory() + + def test_f_in_from_23casttype(self): + for t in self.type.cast_types(): + obj = np.array(self.num23seq, dtype=t.dtype, order="F") + a = self.array( + [len(self.num23seq), len(self.num23seq[0])], intent.in_, obj) + if t.elsize == self.type.elsize: + assert a.has_shared_memory() + else: + assert not a.has_shared_memory() + + def test_c_in_from_23casttype(self): + for t in self.type.cast_types(): + obj = np.array(self.num23seq, dtype=t.dtype) + a = self.array( + [len(self.num23seq), len(self.num23seq[0])], intent.in_.c, obj) + if t.elsize == self.type.elsize: + assert a.has_shared_memory() + else: + assert not a.has_shared_memory() + + def test_f_copy_in_from_23casttype(self): + for t in self.type.cast_types(): + obj = np.array(self.num23seq, dtype=t.dtype, order="F") + a = self.array( + [len(self.num23seq), len(self.num23seq[0])], intent.in_.copy, + obj) + assert not a.has_shared_memory() + + def test_c_copy_in_from_23casttype(self): + for t in self.type.cast_types(): + obj = np.array(self.num23seq, dtype=t.dtype) + a = self.array( + [len(self.num23seq), len(self.num23seq[0])], intent.in_.c.copy, + obj) + assert not a.has_shared_memory() + + def test_in_cache_from_2casttype(self): + for t in self.type.all_types(): + if t.elsize != self.type.elsize: + continue + obj = np.array(self.num2seq, dtype=t.dtype) + shape = (len(self.num2seq), ) + a = self.array(shape, intent.in_.c.cache, obj) + assert a.has_shared_memory() + + a = self.array(shape, intent.in_.cache, obj) + assert a.has_shared_memory() + + obj = np.array(self.num2seq, dtype=t.dtype, order="F") + a = self.array(shape, intent.in_.c.cache, obj) + assert a.has_shared_memory() + + a = self.array(shape, intent.in_.cache, obj) + assert a.has_shared_memory(), repr(t.dtype) + + try: + a = self.array(shape, intent.in_.cache, obj[::-1]) + except ValueError as msg: + if not str(msg).startswith( + "failed to initialize intent(cache) array"): + raise + else: + raise SystemError( + "intent(cache) should have failed on multisegmented array") + + def test_in_cache_from_2casttype_failure(self): + for t in self.type.all_types(): + if t.NAME == 'STRING': + # string elsize is 0, so skipping the test + continue + if t.elsize >= self.type.elsize: + continue + is_int = np.issubdtype(t.dtype, np.integer) + if is_int and int(self.num2seq[0]) > np.iinfo(t.dtype).max: + # skip test if num2seq would trigger an overflow error + continue + obj = np.array(self.num2seq, dtype=t.dtype) + shape = (len(self.num2seq), ) + try: + self.array(shape, intent.in_.cache, obj) # Should succeed + except ValueError as msg: + if not str(msg).startswith( + "failed to initialize intent(cache) array"): + raise + else: + raise SystemError( + "intent(cache) should have failed on smaller array") + + def test_cache_hidden(self): + shape = (2, ) + a = self.array(shape, intent.cache.hide, None) + assert a.arr.shape == shape + + shape = (2, 3) + a = self.array(shape, intent.cache.hide, None) + assert a.arr.shape == shape + + shape = (-1, 3) + try: + a = self.array(shape, intent.cache.hide, None) + except ValueError as msg: + if not str(msg).startswith( + "failed to create intent(cache|hide)|optional array"): + raise + else: + raise SystemError( + "intent(cache) should have failed on undefined dimensions") + + def test_hidden(self): + shape = (2, ) + a = self.array(shape, intent.hide, None) + assert a.arr.shape == shape + assert a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype)) + + shape = (2, 3) + a = self.array(shape, intent.hide, None) + assert a.arr.shape == shape + assert a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype)) + assert a.arr.flags["FORTRAN"] and not a.arr.flags["CONTIGUOUS"] + + shape = (2, 3) + a = self.array(shape, intent.c.hide, None) + assert a.arr.shape == shape + assert a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype)) + assert not a.arr.flags["FORTRAN"] and a.arr.flags["CONTIGUOUS"] + + shape = (-1, 3) + try: + a = self.array(shape, intent.hide, None) + except ValueError as msg: + if not str(msg).startswith( + "failed to create intent(cache|hide)|optional array"): + raise + else: + raise SystemError( + "intent(hide) should have failed on undefined dimensions") + + def test_optional_none(self): + shape = (2, ) + a = self.array(shape, intent.optional, None) + assert a.arr.shape == shape + assert a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype)) + + shape = (2, 3) + a = self.array(shape, intent.optional, None) + assert a.arr.shape == shape + assert a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype)) + assert a.arr.flags["FORTRAN"] and not a.arr.flags["CONTIGUOUS"] + + shape = (2, 3) + a = self.array(shape, intent.c.optional, None) + assert a.arr.shape == shape + assert a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype)) + assert not a.arr.flags["FORTRAN"] and a.arr.flags["CONTIGUOUS"] + + def test_optional_from_2seq(self): + obj = self.num2seq + shape = (len(obj), ) + a = self.array(shape, intent.optional, obj) + assert a.arr.shape == shape + assert not a.has_shared_memory() + + def test_optional_from_23seq(self): + obj = self.num23seq + shape = (len(obj), len(obj[0])) + a = self.array(shape, intent.optional, obj) + assert a.arr.shape == shape + assert not a.has_shared_memory() + + a = self.array(shape, intent.optional.c, obj) + assert a.arr.shape == shape + assert not a.has_shared_memory() + + def test_inplace(self): + obj = np.array(self.num23seq, dtype=self.type.dtype) + assert not obj.flags["FORTRAN"] and obj.flags["CONTIGUOUS"] + shape = obj.shape + a = self.array(shape, intent.inplace, obj) + assert obj[1][2] == a.arr[1][2], repr((obj, a.arr)) + a.arr[1][2] = 54 + assert obj[1][2] == a.arr[1][2] == np.array(54, dtype=self.type.dtype) + assert a.arr is obj + assert obj.flags["FORTRAN"] # obj attributes are changed inplace! + assert not obj.flags["CONTIGUOUS"] + + def test_inplace_from_casttype(self): + for t in self.type.cast_types(): + if t is self.type: + continue + obj = np.array(self.num23seq, dtype=t.dtype) + assert obj.dtype.type == t.type + assert obj.dtype.type is not self.type.type + assert not obj.flags["FORTRAN"] and obj.flags["CONTIGUOUS"] + shape = obj.shape + a = self.array(shape, intent.inplace, obj) + assert obj[1][2] == a.arr[1][2], repr((obj, a.arr)) + a.arr[1][2] = 54 + assert obj[1][2] == a.arr[1][2] == np.array(54, + dtype=self.type.dtype) + assert a.arr is obj + assert obj.flags["FORTRAN"] # obj attributes changed inplace! + assert not obj.flags["CONTIGUOUS"] + assert obj.dtype.type is self.type.type # obj changed inplace! diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/test_assumed_shape.py b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/test_assumed_shape.py new file mode 100644 index 0000000..cf75644 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/test_assumed_shape.py @@ -0,0 +1,50 @@ +import os +import tempfile + +import pytest + +from . import util + + +class TestAssumedShapeSumExample(util.F2PyTest): + sources = [ + util.getpath("tests", "src", "assumed_shape", "foo_free.f90"), + util.getpath("tests", "src", "assumed_shape", "foo_use.f90"), + util.getpath("tests", "src", "assumed_shape", "precision.f90"), + util.getpath("tests", "src", "assumed_shape", "foo_mod.f90"), + util.getpath("tests", "src", "assumed_shape", ".f2py_f2cmap"), + ] + + @pytest.mark.slow + def test_all(self): + r = self.module.fsum([1, 2]) + assert r == 3 + r = self.module.sum([1, 2]) + assert r == 3 + r = self.module.sum_with_use([1, 2]) + assert r == 3 + + r = self.module.mod.sum([1, 2]) + assert r == 3 + r = self.module.mod.fsum([1, 2]) + assert r == 3 + + +class TestF2cmapOption(TestAssumedShapeSumExample): + def setup_method(self): + # Use a custom file name for .f2py_f2cmap + self.sources = list(self.sources) + f2cmap_src = self.sources.pop(-1) + + self.f2cmap_file = tempfile.NamedTemporaryFile(delete=False) + with open(f2cmap_src, "rb") as f: + self.f2cmap_file.write(f.read()) + self.f2cmap_file.close() + + self.sources.append(self.f2cmap_file.name) + self.options = ["--f2cmap", self.f2cmap_file.name] + + super().setup_method() + + def teardown_method(self): + os.unlink(self.f2cmap_file.name) diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/test_block_docstring.py b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/test_block_docstring.py new file mode 100644 index 0000000..ba255a1 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/test_block_docstring.py @@ -0,0 +1,20 @@ +import sys + +import pytest + +from numpy.testing import IS_PYPY + +from . import util + + +@pytest.mark.slow +class TestBlockDocString(util.F2PyTest): + sources = [util.getpath("tests", "src", "block_docstring", "foo.f")] + + @pytest.mark.skipif(sys.platform == "win32", + reason="Fails with MinGW64 Gfortran (Issue #9673)") + @pytest.mark.xfail(IS_PYPY, + reason="PyPy cannot modify tp_doc after PyType_Ready") + def test_block_docstring(self): + expected = "bar : 'i'-array(2,3)\n" + assert self.module.block.__doc__ == expected diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/test_callback.py b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/test_callback.py new file mode 100644 index 0000000..6614efb --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/test_callback.py @@ -0,0 +1,263 @@ +import math +import platform +import sys +import textwrap +import threading +import time +import traceback + +import pytest + +import numpy as np +from numpy.testing import IS_PYPY + +from . import util + + +class TestF77Callback(util.F2PyTest): + sources = [util.getpath("tests", "src", "callback", "foo.f")] + + @pytest.mark.parametrize("name", ["t", "t2"]) + @pytest.mark.slow + def test_all(self, name): + self.check_function(name) + + @pytest.mark.xfail(IS_PYPY, + reason="PyPy cannot modify tp_doc after PyType_Ready") + def test_docstring(self): + expected = textwrap.dedent("""\ + a = t(fun,[fun_extra_args]) + + Wrapper for ``t``. + + Parameters + ---------- + fun : call-back function + + Other Parameters + ---------------- + fun_extra_args : input tuple, optional + Default: () + + Returns + ------- + a : int + + Notes + ----- + Call-back functions:: + + def fun(): return a + Return objects: + a : int + """) + assert self.module.t.__doc__ == expected + + def check_function(self, name): + t = getattr(self.module, name) + r = t(lambda: 4) + assert r == 4 + r = t(lambda a: 5, fun_extra_args=(6, )) + assert r == 5 + r = t(lambda a: a, fun_extra_args=(6, )) + assert r == 6 + r = t(lambda a: 5 + a, fun_extra_args=(7, )) + assert r == 12 + r = t(math.degrees, fun_extra_args=(math.pi, )) + assert r == 180 + r = t(math.degrees, fun_extra_args=(math.pi, )) + assert r == 180 + + r = t(self.module.func, fun_extra_args=(6, )) + assert r == 17 + r = t(self.module.func0) + assert r == 11 + r = t(self.module.func0._cpointer) + assert r == 11 + + class A: + def __call__(self): + return 7 + + def mth(self): + return 9 + + a = A() + r = t(a) + assert r == 7 + r = t(a.mth) + assert r == 9 + + @pytest.mark.skipif(sys.platform == 'win32', + reason='Fails with MinGW64 Gfortran (Issue #9673)') + def test_string_callback(self): + def callback(code): + if code == "r": + return 0 + else: + return 1 + + f = self.module.string_callback + r = f(callback) + assert r == 0 + + @pytest.mark.skipif(sys.platform == 'win32', + reason='Fails with MinGW64 Gfortran (Issue #9673)') + def test_string_callback_array(self): + # See gh-10027 + cu1 = np.zeros((1, ), "S8") + cu2 = np.zeros((1, 8), "c") + cu3 = np.array([""], "S8") + + def callback(cu, lencu): + if cu.shape != (lencu,): + return 1 + if cu.dtype != "S8": + return 2 + if not np.all(cu == b""): + return 3 + return 0 + + f = self.module.string_callback_array + for cu in [cu1, cu2, cu3]: + res = f(callback, cu, cu.size) + assert res == 0 + + def test_threadsafety(self): + # Segfaults if the callback handling is not threadsafe + + errors = [] + + def cb(): + # Sleep here to make it more likely for another thread + # to call their callback at the same time. + time.sleep(1e-3) + + # Check reentrancy + r = self.module.t(lambda: 123) + assert r == 123 + + return 42 + + def runner(name): + try: + for j in range(50): + r = self.module.t(cb) + assert r == 42 + self.check_function(name) + except Exception: + errors.append(traceback.format_exc()) + + threads = [ + threading.Thread(target=runner, args=(arg, )) + for arg in ("t", "t2") for n in range(20) + ] + + for t in threads: + t.start() + + for t in threads: + t.join() + + errors = "\n\n".join(errors) + if errors: + raise AssertionError(errors) + + def test_hidden_callback(self): + try: + self.module.hidden_callback(2) + except Exception as msg: + assert str(msg).startswith("Callback global_f not defined") + + try: + self.module.hidden_callback2(2) + except Exception as msg: + assert str(msg).startswith("cb: Callback global_f not defined") + + self.module.global_f = lambda x: x + 1 + r = self.module.hidden_callback(2) + assert r == 3 + + self.module.global_f = lambda x: x + 2 + r = self.module.hidden_callback(2) + assert r == 4 + + del self.module.global_f + try: + self.module.hidden_callback(2) + except Exception as msg: + assert str(msg).startswith("Callback global_f not defined") + + self.module.global_f = lambda x=0: x + 3 + r = self.module.hidden_callback(2) + assert r == 5 + + # reproducer of gh18341 + r = self.module.hidden_callback2(2) + assert r == 3 + + +class TestF77CallbackPythonTLS(TestF77Callback): + """ + Callback tests using Python thread-local storage instead of + compiler-provided + """ + + options = ["-DF2PY_USE_PYTHON_TLS"] + + +class TestF90Callback(util.F2PyTest): + sources = [util.getpath("tests", "src", "callback", "gh17797.f90")] + + @pytest.mark.slow + def test_gh17797(self): + def incr(x): + return x + 123 + + y = np.array([1, 2, 3], dtype=np.int64) + r = self.module.gh17797(incr, y) + assert r == 123 + 1 + 2 + 3 + + +class TestGH18335(util.F2PyTest): + """The reproduction of the reported issue requires specific input that + extensions may break the issue conditions, so the reproducer is + implemented as a separate test class. Do not extend this test with + other tests! + """ + sources = [util.getpath("tests", "src", "callback", "gh18335.f90")] + + @pytest.mark.slow + def test_gh18335(self): + def foo(x): + x[0] += 1 + + r = self.module.gh18335(foo) + assert r == 123 + 1 + + +class TestGH25211(util.F2PyTest): + sources = [util.getpath("tests", "src", "callback", "gh25211.f"), + util.getpath("tests", "src", "callback", "gh25211.pyf")] + module_name = "callback2" + + def test_gh25211(self): + def bar(x): + return x * x + + res = self.module.foo(bar) + assert res == 110 + + +@pytest.mark.slow +@pytest.mark.xfail(condition=(platform.system().lower() == 'darwin'), + run=False, + reason="Callback aborts cause CI failures on macOS") +class TestCBFortranCallstatement(util.F2PyTest): + sources = [util.getpath("tests", "src", "callback", "gh26681.f90")] + options = ['--lower'] + + def test_callstatement_fortran(self): + with pytest.raises(ValueError, match='helpme') as exc: + self.module.mypy_abort = self.module.utils.my_abort + self.module.utils.do_something('helpme') diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/test_character.py b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/test_character.py new file mode 100644 index 0000000..74868a6 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/test_character.py @@ -0,0 +1,641 @@ +import textwrap + +import pytest + +import numpy as np +from numpy.f2py.tests import util +from numpy.testing import assert_array_equal, assert_equal, assert_raises + + +@pytest.mark.slow +class TestCharacterString(util.F2PyTest): + # options = ['--debug-capi', '--build-dir', '/tmp/test-build-f2py'] + suffix = '.f90' + fprefix = 'test_character_string' + length_list = ['1', '3', 'star'] + + code = '' + for length in length_list: + fsuffix = length + clength = {'star': '(*)'}.get(length, length) + + code += textwrap.dedent(f""" + + subroutine {fprefix}_input_{fsuffix}(c, o, n) + character*{clength}, intent(in) :: c + integer n + !f2py integer, depend(c), intent(hide) :: n = slen(c) + integer*1, dimension(n) :: o + !f2py intent(out) o + o = transfer(c, o) + end subroutine {fprefix}_input_{fsuffix} + + subroutine {fprefix}_output_{fsuffix}(c, o, n) + character*{clength}, intent(out) :: c + integer n + integer*1, dimension(n), intent(in) :: o + !f2py integer, depend(o), intent(hide) :: n = len(o) + c = transfer(o, c) + end subroutine {fprefix}_output_{fsuffix} + + subroutine {fprefix}_array_input_{fsuffix}(c, o, m, n) + integer m, i, n + character*{clength}, intent(in), dimension(m) :: c + !f2py integer, depend(c), intent(hide) :: m = len(c) + !f2py integer, depend(c), intent(hide) :: n = f2py_itemsize(c) + integer*1, dimension(m, n), intent(out) :: o + do i=1,m + o(i, :) = transfer(c(i), o(i, :)) + end do + end subroutine {fprefix}_array_input_{fsuffix} + + subroutine {fprefix}_array_output_{fsuffix}(c, o, m, n) + character*{clength}, intent(out), dimension(m) :: c + integer n + integer*1, dimension(m, n), intent(in) :: o + !f2py character(f2py_len=n) :: c + !f2py integer, depend(o), intent(hide) :: m = len(o) + !f2py integer, depend(o), intent(hide) :: n = shape(o, 1) + do i=1,m + c(i) = transfer(o(i, :), c(i)) + end do + end subroutine {fprefix}_array_output_{fsuffix} + + subroutine {fprefix}_2d_array_input_{fsuffix}(c, o, m1, m2, n) + integer m1, m2, i, j, n + character*{clength}, intent(in), dimension(m1, m2) :: c + !f2py integer, depend(c), intent(hide) :: m1 = len(c) + !f2py integer, depend(c), intent(hide) :: m2 = shape(c, 1) + !f2py integer, depend(c), intent(hide) :: n = f2py_itemsize(c) + integer*1, dimension(m1, m2, n), intent(out) :: o + do i=1,m1 + do j=1,m2 + o(i, j, :) = transfer(c(i, j), o(i, j, :)) + end do + end do + end subroutine {fprefix}_2d_array_input_{fsuffix} + """) + + @pytest.mark.parametrize("length", length_list) + def test_input(self, length): + fsuffix = {'(*)': 'star'}.get(length, length) + f = getattr(self.module, self.fprefix + '_input_' + fsuffix) + + a = {'1': 'a', '3': 'abc', 'star': 'abcde' * 3}[length] + + assert_array_equal(f(a), np.array(list(map(ord, a)), dtype='u1')) + + @pytest.mark.parametrize("length", length_list[:-1]) + def test_output(self, length): + fsuffix = length + f = getattr(self.module, self.fprefix + '_output_' + fsuffix) + + a = {'1': 'a', '3': 'abc'}[length] + + assert_array_equal(f(np.array(list(map(ord, a)), dtype='u1')), + a.encode()) + + @pytest.mark.parametrize("length", length_list) + def test_array_input(self, length): + fsuffix = length + f = getattr(self.module, self.fprefix + '_array_input_' + fsuffix) + + a = np.array([{'1': 'a', '3': 'abc', 'star': 'abcde' * 3}[length], + {'1': 'A', '3': 'ABC', 'star': 'ABCDE' * 3}[length], + ], dtype='S') + + expected = np.array([list(s) for s in a], dtype='u1') + assert_array_equal(f(a), expected) + + @pytest.mark.parametrize("length", length_list) + def test_array_output(self, length): + fsuffix = length + f = getattr(self.module, self.fprefix + '_array_output_' + fsuffix) + + expected = np.array( + [{'1': 'a', '3': 'abc', 'star': 'abcde' * 3}[length], + {'1': 'A', '3': 'ABC', 'star': 'ABCDE' * 3}[length]], dtype='S') + + a = np.array([list(s) for s in expected], dtype='u1') + assert_array_equal(f(a), expected) + + @pytest.mark.parametrize("length", length_list) + def test_2d_array_input(self, length): + fsuffix = length + f = getattr(self.module, self.fprefix + '_2d_array_input_' + fsuffix) + + a = np.array([[{'1': 'a', '3': 'abc', 'star': 'abcde' * 3}[length], + {'1': 'A', '3': 'ABC', 'star': 'ABCDE' * 3}[length]], + [{'1': 'f', '3': 'fgh', 'star': 'fghij' * 3}[length], + {'1': 'F', '3': 'FGH', 'star': 'FGHIJ' * 3}[length]]], + dtype='S') + expected = np.array([[list(item) for item in row] for row in a], + dtype='u1', order='F') + assert_array_equal(f(a), expected) + + +class TestCharacter(util.F2PyTest): + # options = ['--debug-capi', '--build-dir', '/tmp/test-build-f2py'] + suffix = '.f90' + fprefix = 'test_character' + + code = textwrap.dedent(f""" + subroutine {fprefix}_input(c, o) + character, intent(in) :: c + integer*1 o + !f2py intent(out) o + o = transfer(c, o) + end subroutine {fprefix}_input + + subroutine {fprefix}_output(c, o) + character :: c + integer*1, intent(in) :: o + !f2py intent(out) c + c = transfer(o, c) + end subroutine {fprefix}_output + + subroutine {fprefix}_input_output(c, o) + character, intent(in) :: c + character o + !f2py intent(out) o + o = c + end subroutine {fprefix}_input_output + + subroutine {fprefix}_inout(c, n) + character :: c, n + !f2py intent(in) n + !f2py intent(inout) c + c = n + end subroutine {fprefix}_inout + + function {fprefix}_return(o) result (c) + character :: c + character, intent(in) :: o + c = transfer(o, c) + end function {fprefix}_return + + subroutine {fprefix}_array_input(c, o) + character, intent(in) :: c(3) + integer*1 o(3) + !f2py intent(out) o + integer i + do i=1,3 + o(i) = transfer(c(i), o(i)) + end do + end subroutine {fprefix}_array_input + + subroutine {fprefix}_2d_array_input(c, o) + character, intent(in) :: c(2, 3) + integer*1 o(2, 3) + !f2py intent(out) o + integer i, j + do i=1,2 + do j=1,3 + o(i, j) = transfer(c(i, j), o(i, j)) + end do + end do + end subroutine {fprefix}_2d_array_input + + subroutine {fprefix}_array_output(c, o) + character :: c(3) + integer*1, intent(in) :: o(3) + !f2py intent(out) c + do i=1,3 + c(i) = transfer(o(i), c(i)) + end do + end subroutine {fprefix}_array_output + + subroutine {fprefix}_array_inout(c, n) + character :: c(3), n(3) + !f2py intent(in) n(3) + !f2py intent(inout) c(3) + do i=1,3 + c(i) = n(i) + end do + end subroutine {fprefix}_array_inout + + subroutine {fprefix}_2d_array_inout(c, n) + character :: c(2, 3), n(2, 3) + !f2py intent(in) n(2, 3) + !f2py intent(inout) c(2. 3) + integer i, j + do i=1,2 + do j=1,3 + c(i, j) = n(i, j) + end do + end do + end subroutine {fprefix}_2d_array_inout + + function {fprefix}_array_return(o) result (c) + character, dimension(3) :: c + character, intent(in) :: o(3) + do i=1,3 + c(i) = o(i) + end do + end function {fprefix}_array_return + + function {fprefix}_optional(o) result (c) + character, intent(in) :: o + !f2py character o = "a" + character :: c + c = o + end function {fprefix}_optional + """) + + @pytest.mark.parametrize("dtype", ['c', 'S1']) + def test_input(self, dtype): + f = getattr(self.module, self.fprefix + '_input') + + assert_equal(f(np.array('a', dtype=dtype)), ord('a')) + assert_equal(f(np.array(b'a', dtype=dtype)), ord('a')) + assert_equal(f(np.array(['a'], dtype=dtype)), ord('a')) + assert_equal(f(np.array('abc', dtype=dtype)), ord('a')) + assert_equal(f(np.array([['a']], dtype=dtype)), ord('a')) + + def test_input_varia(self): + f = getattr(self.module, self.fprefix + '_input') + + assert_equal(f('a'), ord('a')) + assert_equal(f(b'a'), ord(b'a')) + assert_equal(f(''), 0) + assert_equal(f(b''), 0) + assert_equal(f(b'\0'), 0) + assert_equal(f('ab'), ord('a')) + assert_equal(f(b'ab'), ord('a')) + assert_equal(f(['a']), ord('a')) + + assert_equal(f(np.array(b'a')), ord('a')) + assert_equal(f(np.array([b'a'])), ord('a')) + a = np.array('a') + assert_equal(f(a), ord('a')) + a = np.array(['a']) + assert_equal(f(a), ord('a')) + + try: + f([]) + except IndexError as msg: + if not str(msg).endswith(' got 0-list'): + raise + else: + raise SystemError(f'{f.__name__} should have failed on empty list') + + try: + f(97) + except TypeError as msg: + if not str(msg).endswith(' got int instance'): + raise + else: + raise SystemError(f'{f.__name__} should have failed on int value') + + @pytest.mark.parametrize("dtype", ['c', 'S1', 'U1']) + def test_array_input(self, dtype): + f = getattr(self.module, self.fprefix + '_array_input') + + assert_array_equal(f(np.array(['a', 'b', 'c'], dtype=dtype)), + np.array(list(map(ord, 'abc')), dtype='i1')) + assert_array_equal(f(np.array([b'a', b'b', b'c'], dtype=dtype)), + np.array(list(map(ord, 'abc')), dtype='i1')) + + def test_array_input_varia(self): + f = getattr(self.module, self.fprefix + '_array_input') + assert_array_equal(f(['a', 'b', 'c']), + np.array(list(map(ord, 'abc')), dtype='i1')) + assert_array_equal(f([b'a', b'b', b'c']), + np.array(list(map(ord, 'abc')), dtype='i1')) + + try: + f(['a', 'b', 'c', 'd']) + except ValueError as msg: + if not str(msg).endswith( + 'th dimension must be fixed to 3 but got 4'): + raise + else: + raise SystemError( + f'{f.__name__} should have failed on wrong input') + + @pytest.mark.parametrize("dtype", ['c', 'S1', 'U1']) + def test_2d_array_input(self, dtype): + f = getattr(self.module, self.fprefix + '_2d_array_input') + + a = np.array([['a', 'b', 'c'], + ['d', 'e', 'f']], dtype=dtype, order='F') + expected = a.view(np.uint32 if dtype == 'U1' else np.uint8) + assert_array_equal(f(a), expected) + + def test_output(self): + f = getattr(self.module, self.fprefix + '_output') + + assert_equal(f(ord(b'a')), b'a') + assert_equal(f(0), b'\0') + + def test_array_output(self): + f = getattr(self.module, self.fprefix + '_array_output') + + assert_array_equal(f(list(map(ord, 'abc'))), + np.array(list('abc'), dtype='S1')) + + def test_input_output(self): + f = getattr(self.module, self.fprefix + '_input_output') + + assert_equal(f(b'a'), b'a') + assert_equal(f('a'), b'a') + assert_equal(f(''), b'\0') + + @pytest.mark.parametrize("dtype", ['c', 'S1']) + def test_inout(self, dtype): + f = getattr(self.module, self.fprefix + '_inout') + + a = np.array(list('abc'), dtype=dtype) + f(a, 'A') + assert_array_equal(a, np.array(list('Abc'), dtype=a.dtype)) + f(a[1:], 'B') + assert_array_equal(a, np.array(list('ABc'), dtype=a.dtype)) + + a = np.array(['abc'], dtype=dtype) + f(a, 'A') + assert_array_equal(a, np.array(['Abc'], dtype=a.dtype)) + + def test_inout_varia(self): + f = getattr(self.module, self.fprefix + '_inout') + a = np.array('abc', dtype='S3') + f(a, 'A') + assert_array_equal(a, np.array('Abc', dtype=a.dtype)) + + a = np.array(['abc'], dtype='S3') + f(a, 'A') + assert_array_equal(a, np.array(['Abc'], dtype=a.dtype)) + + try: + f('abc', 'A') + except ValueError as msg: + if not str(msg).endswith(' got 3-str'): + raise + else: + raise SystemError(f'{f.__name__} should have failed on str value') + + @pytest.mark.parametrize("dtype", ['c', 'S1']) + def test_array_inout(self, dtype): + f = getattr(self.module, self.fprefix + '_array_inout') + n = np.array(['A', 'B', 'C'], dtype=dtype, order='F') + + a = np.array(['a', 'b', 'c'], dtype=dtype, order='F') + f(a, n) + assert_array_equal(a, n) + + a = np.array(['a', 'b', 'c', 'd'], dtype=dtype) + f(a[1:], n) + assert_array_equal(a, np.array(['a', 'A', 'B', 'C'], dtype=dtype)) + + a = np.array([['a', 'b', 'c']], dtype=dtype, order='F') + f(a, n) + assert_array_equal(a, np.array([['A', 'B', 'C']], dtype=dtype)) + + a = np.array(['a', 'b', 'c', 'd'], dtype=dtype, order='F') + try: + f(a, n) + except ValueError as msg: + if not str(msg).endswith( + 'th dimension must be fixed to 3 but got 4'): + raise + else: + raise SystemError( + f'{f.__name__} should have failed on wrong input') + + @pytest.mark.parametrize("dtype", ['c', 'S1']) + def test_2d_array_inout(self, dtype): + f = getattr(self.module, self.fprefix + '_2d_array_inout') + n = np.array([['A', 'B', 'C'], + ['D', 'E', 'F']], + dtype=dtype, order='F') + a = np.array([['a', 'b', 'c'], + ['d', 'e', 'f']], + dtype=dtype, order='F') + f(a, n) + assert_array_equal(a, n) + + def test_return(self): + f = getattr(self.module, self.fprefix + '_return') + + assert_equal(f('a'), b'a') + + @pytest.mark.skip('fortran function returning array segfaults') + def test_array_return(self): + f = getattr(self.module, self.fprefix + '_array_return') + + a = np.array(list('abc'), dtype='S1') + assert_array_equal(f(a), a) + + def test_optional(self): + f = getattr(self.module, self.fprefix + '_optional') + + assert_equal(f(), b"a") + assert_equal(f(b'B'), b"B") + + +class TestMiscCharacter(util.F2PyTest): + # options = ['--debug-capi', '--build-dir', '/tmp/test-build-f2py'] + suffix = '.f90' + fprefix = 'test_misc_character' + + code = textwrap.dedent(f""" + subroutine {fprefix}_gh18684(x, y, m) + character(len=5), dimension(m), intent(in) :: x + character*5, dimension(m), intent(out) :: y + integer i, m + !f2py integer, intent(hide), depend(x) :: m = f2py_len(x) + do i=1,m + y(i) = x(i) + end do + end subroutine {fprefix}_gh18684 + + subroutine {fprefix}_gh6308(x, i) + integer i + !f2py check(i>=0 && i<12) i + character*5 name, x + common name(12) + name(i + 1) = x + end subroutine {fprefix}_gh6308 + + subroutine {fprefix}_gh4519(x) + character(len=*), intent(in) :: x(:) + !f2py intent(out) x + integer :: i + ! Uncomment for debug printing: + !do i=1, size(x) + ! print*, "x(",i,")=", x(i) + !end do + end subroutine {fprefix}_gh4519 + + pure function {fprefix}_gh3425(x) result (y) + character(len=*), intent(in) :: x + character(len=len(x)) :: y + integer :: i + do i = 1, len(x) + j = iachar(x(i:i)) + if (j>=iachar("a") .and. j<=iachar("z") ) then + y(i:i) = achar(j-32) + else + y(i:i) = x(i:i) + endif + end do + end function {fprefix}_gh3425 + + subroutine {fprefix}_character_bc_new(x, y, z) + character, intent(in) :: x + character, intent(out) :: y + !f2py character, depend(x) :: y = x + !f2py character, dimension((x=='a'?1:2)), depend(x), intent(out) :: z + character, dimension(*) :: z + !f2py character, optional, check(x == 'a' || x == 'b') :: x = 'a' + !f2py callstatement (*f2py_func)(&x, &y, z) + !f2py callprotoargument character*, character*, character* + if (y.eq.x) then + y = x + else + y = 'e' + endif + z(1) = 'c' + end subroutine {fprefix}_character_bc_new + + subroutine {fprefix}_character_bc_old(x, y, z) + character, intent(in) :: x + character, intent(out) :: y + !f2py character, depend(x) :: y = x[0] + !f2py character, dimension((*x=='a'?1:2)), depend(x), intent(out) :: z + character, dimension(*) :: z + !f2py character, optional, check(*x == 'a' || x[0] == 'b') :: x = 'a' + !f2py callstatement (*f2py_func)(x, y, z) + !f2py callprotoargument char*, char*, char* + if (y.eq.x) then + y = x + else + y = 'e' + endif + z(1) = 'c' + end subroutine {fprefix}_character_bc_old + """) + + @pytest.mark.slow + def test_gh18684(self): + # Test character(len=5) and character*5 usages + f = getattr(self.module, self.fprefix + '_gh18684') + x = np.array(["abcde", "fghij"], dtype='S5') + y = f(x) + + assert_array_equal(x, y) + + def test_gh6308(self): + # Test character string array in a common block + f = getattr(self.module, self.fprefix + '_gh6308') + + assert_equal(self.module._BLNK_.name.dtype, np.dtype('S5')) + assert_equal(len(self.module._BLNK_.name), 12) + f("abcde", 0) + assert_equal(self.module._BLNK_.name[0], b"abcde") + f("12345", 5) + assert_equal(self.module._BLNK_.name[5], b"12345") + + def test_gh4519(self): + # Test array of assumed length strings + f = getattr(self.module, self.fprefix + '_gh4519') + + for x, expected in [ + ('a', {'shape': (), 'dtype': np.dtype('S1')}), + ('text', {'shape': (), 'dtype': np.dtype('S4')}), + (np.array(['1', '2', '3'], dtype='S1'), + {'shape': (3,), 'dtype': np.dtype('S1')}), + (['1', '2', '34'], + {'shape': (3,), 'dtype': np.dtype('S2')}), + (['', ''], {'shape': (2,), 'dtype': np.dtype('S1')})]: + r = f(x) + for k, v in expected.items(): + assert_equal(getattr(r, k), v) + + def test_gh3425(self): + # Test returning a copy of assumed length string + f = getattr(self.module, self.fprefix + '_gh3425') + # f is equivalent to bytes.upper + + assert_equal(f('abC'), b'ABC') + assert_equal(f(''), b'') + assert_equal(f('abC12d'), b'ABC12D') + + @pytest.mark.parametrize("state", ['new', 'old']) + def test_character_bc(self, state): + f = getattr(self.module, self.fprefix + '_character_bc_' + state) + + c, a = f() + assert_equal(c, b'a') + assert_equal(len(a), 1) + + c, a = f(b'b') + assert_equal(c, b'b') + assert_equal(len(a), 2) + + assert_raises(Exception, lambda: f(b'c')) + + +class TestStringScalarArr(util.F2PyTest): + sources = [util.getpath("tests", "src", "string", "scalar_string.f90")] + + def test_char(self): + for out in (self.module.string_test.string, + self.module.string_test.string77): + expected = () + assert out.shape == expected + expected = '|S8' + assert out.dtype == expected + + def test_char_arr(self): + for out in (self.module.string_test.strarr, + self.module.string_test.strarr77): + expected = (5, 7) + assert out.shape == expected + expected = '|S12' + assert out.dtype == expected + +class TestStringAssumedLength(util.F2PyTest): + sources = [util.getpath("tests", "src", "string", "gh24008.f")] + + def test_gh24008(self): + self.module.greet("joe", "bob") + +@pytest.mark.slow +class TestStringOptionalInOut(util.F2PyTest): + sources = [util.getpath("tests", "src", "string", "gh24662.f90")] + + def test_gh24662(self): + self.module.string_inout_optional() + a = np.array('hi', dtype='S32') + self.module.string_inout_optional(a) + assert "output string" in a.tobytes().decode() + with pytest.raises(Exception): # noqa: B017 + aa = "Hi" + self.module.string_inout_optional(aa) + + +@pytest.mark.slow +class TestNewCharHandling(util.F2PyTest): + # from v1.24 onwards, gh-19388 + sources = [ + util.getpath("tests", "src", "string", "gh25286.pyf"), + util.getpath("tests", "src", "string", "gh25286.f90") + ] + module_name = "_char_handling_test" + + def test_gh25286(self): + info = self.module.charint('T') + assert info == 2 + +@pytest.mark.slow +class TestBCCharHandling(util.F2PyTest): + # SciPy style, "incorrect" bindings with a hook + sources = [ + util.getpath("tests", "src", "string", "gh25286_bc.pyf"), + util.getpath("tests", "src", "string", "gh25286.f90") + ] + module_name = "_char_handling_test" + + def test_gh25286(self): + info = self.module.charint('T') + assert info == 2 diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/test_common.py b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/test_common.py new file mode 100644 index 0000000..b9fbd84 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/test_common.py @@ -0,0 +1,23 @@ +import pytest + +import numpy as np + +from . import util + + +@pytest.mark.slow +class TestCommonBlock(util.F2PyTest): + sources = [util.getpath("tests", "src", "common", "block.f")] + + def test_common_block(self): + self.module.initcb() + assert self.module.block.long_bn == np.array(1.0, dtype=np.float64) + assert self.module.block.string_bn == np.array("2", dtype="|S1") + assert self.module.block.ok == np.array(3, dtype=np.int32) + + +class TestCommonWithUse(util.F2PyTest): + sources = [util.getpath("tests", "src", "common", "gh19161.f90")] + + def test_common_gh19161(self): + assert self.module.data.x == 0 diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/test_crackfortran.py b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/test_crackfortran.py new file mode 100644 index 0000000..c3967cf --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/test_crackfortran.py @@ -0,0 +1,421 @@ +import contextlib +import importlib +import io +import textwrap +import time + +import pytest + +import numpy as np +from numpy.f2py import crackfortran +from numpy.f2py.crackfortran import markinnerspaces, nameargspattern + +from . import util + + +class TestNoSpace(util.F2PyTest): + # issue gh-15035: add handling for endsubroutine, endfunction with no space + # between "end" and the block name + sources = [util.getpath("tests", "src", "crackfortran", "gh15035.f")] + + def test_module(self): + k = np.array([1, 2, 3], dtype=np.float64) + w = np.array([1, 2, 3], dtype=np.float64) + self.module.subb(k) + assert np.allclose(k, w + 1) + self.module.subc([w, k]) + assert np.allclose(k, w + 1) + assert self.module.t0("23") == b"2" + + +class TestPublicPrivate: + def test_defaultPrivate(self): + fpath = util.getpath("tests", "src", "crackfortran", "privatemod.f90") + mod = crackfortran.crackfortran([str(fpath)]) + assert len(mod) == 1 + mod = mod[0] + assert "private" in mod["vars"]["a"]["attrspec"] + assert "public" not in mod["vars"]["a"]["attrspec"] + assert "private" in mod["vars"]["b"]["attrspec"] + assert "public" not in mod["vars"]["b"]["attrspec"] + assert "private" not in mod["vars"]["seta"]["attrspec"] + assert "public" in mod["vars"]["seta"]["attrspec"] + + def test_defaultPublic(self, tmp_path): + fpath = util.getpath("tests", "src", "crackfortran", "publicmod.f90") + mod = crackfortran.crackfortran([str(fpath)]) + assert len(mod) == 1 + mod = mod[0] + assert "private" in mod["vars"]["a"]["attrspec"] + assert "public" not in mod["vars"]["a"]["attrspec"] + assert "private" not in mod["vars"]["seta"]["attrspec"] + assert "public" in mod["vars"]["seta"]["attrspec"] + + def test_access_type(self, tmp_path): + fpath = util.getpath("tests", "src", "crackfortran", "accesstype.f90") + mod = crackfortran.crackfortran([str(fpath)]) + assert len(mod) == 1 + tt = mod[0]['vars'] + assert set(tt['a']['attrspec']) == {'private', 'bind(c)'} + assert set(tt['b_']['attrspec']) == {'public', 'bind(c)'} + assert set(tt['c']['attrspec']) == {'public'} + + def test_nowrap_private_proceedures(self, tmp_path): + fpath = util.getpath("tests", "src", "crackfortran", "gh23879.f90") + mod = crackfortran.crackfortran([str(fpath)]) + assert len(mod) == 1 + pyf = crackfortran.crack2fortran(mod) + assert 'bar' not in pyf + +class TestModuleProcedure: + def test_moduleOperators(self, tmp_path): + fpath = util.getpath("tests", "src", "crackfortran", "operators.f90") + mod = crackfortran.crackfortran([str(fpath)]) + assert len(mod) == 1 + mod = mod[0] + assert "body" in mod and len(mod["body"]) == 9 + assert mod["body"][1]["name"] == "operator(.item.)" + assert "implementedby" in mod["body"][1] + assert mod["body"][1]["implementedby"] == \ + ["item_int", "item_real"] + assert mod["body"][2]["name"] == "operator(==)" + assert "implementedby" in mod["body"][2] + assert mod["body"][2]["implementedby"] == ["items_are_equal"] + assert mod["body"][3]["name"] == "assignment(=)" + assert "implementedby" in mod["body"][3] + assert mod["body"][3]["implementedby"] == \ + ["get_int", "get_real"] + + def test_notPublicPrivate(self, tmp_path): + fpath = util.getpath("tests", "src", "crackfortran", "pubprivmod.f90") + mod = crackfortran.crackfortran([str(fpath)]) + assert len(mod) == 1 + mod = mod[0] + assert mod['vars']['a']['attrspec'] == ['private', ] + assert mod['vars']['b']['attrspec'] == ['public', ] + assert mod['vars']['seta']['attrspec'] == ['public', ] + + +class TestExternal(util.F2PyTest): + # issue gh-17859: add external attribute support + sources = [util.getpath("tests", "src", "crackfortran", "gh17859.f")] + + def test_external_as_statement(self): + def incr(x): + return x + 123 + + r = self.module.external_as_statement(incr) + assert r == 123 + + def test_external_as_attribute(self): + def incr(x): + return x + 123 + + r = self.module.external_as_attribute(incr) + assert r == 123 + + +class TestCrackFortran(util.F2PyTest): + # gh-2848: commented lines between parameters in subroutine parameter lists + sources = [util.getpath("tests", "src", "crackfortran", "gh2848.f90"), + util.getpath("tests", "src", "crackfortran", "common_with_division.f") + ] + + def test_gh2848(self): + r = self.module.gh2848(1, 2) + assert r == (1, 2) + + def test_common_with_division(self): + assert len(self.module.mortmp.ctmp) == 11 + +class TestMarkinnerspaces: + # gh-14118: markinnerspaces does not handle multiple quotations + + def test_do_not_touch_normal_spaces(self): + test_list = ["a ", " a", "a b c", "'abcdefghij'"] + for i in test_list: + assert markinnerspaces(i) == i + + def test_one_relevant_space(self): + assert markinnerspaces("a 'b c' \\' \\'") == "a 'b@_@c' \\' \\'" + assert markinnerspaces(r'a "b c" \" \"') == r'a "b@_@c" \" \"' + + def test_ignore_inner_quotes(self): + assert markinnerspaces("a 'b c\" \" d' e") == "a 'b@_@c\"@_@\"@_@d' e" + assert markinnerspaces("a \"b c' ' d\" e") == "a \"b@_@c'@_@'@_@d\" e" + + def test_multiple_relevant_spaces(self): + assert markinnerspaces("a 'b c' 'd e'") == "a 'b@_@c' 'd@_@e'" + assert markinnerspaces(r'a "b c" "d e"') == r'a "b@_@c" "d@_@e"' + + +class TestDimSpec(util.F2PyTest): + """This test suite tests various expressions that are used as dimension + specifications. + + There exists two usage cases where analyzing dimensions + specifications are important. + + In the first case, the size of output arrays must be defined based + on the inputs to a Fortran function. Because Fortran supports + arbitrary bases for indexing, for instance, `arr(lower:upper)`, + f2py has to evaluate an expression `upper - lower + 1` where + `lower` and `upper` are arbitrary expressions of input parameters. + The evaluation is performed in C, so f2py has to translate Fortran + expressions to valid C expressions (an alternative approach is + that a developer specifies the corresponding C expressions in a + .pyf file). + + In the second case, when user provides an input array with a given + size but some hidden parameters used in dimensions specifications + need to be determined based on the input array size. This is a + harder problem because f2py has to solve the inverse problem: find + a parameter `p` such that `upper(p) - lower(p) + 1` equals to the + size of input array. In the case when this equation cannot be + solved (e.g. because the input array size is wrong), raise an + error before calling the Fortran function (that otherwise would + likely crash Python process when the size of input arrays is + wrong). f2py currently supports this case only when the equation + is linear with respect to unknown parameter. + + """ + + suffix = ".f90" + + code_template = textwrap.dedent(""" + function get_arr_size_{count}(a, n) result (length) + integer, intent(in) :: n + integer, dimension({dimspec}), intent(out) :: a + integer length + length = size(a) + end function + + subroutine get_inv_arr_size_{count}(a, n) + integer :: n + ! the value of n is computed in f2py wrapper + !f2py intent(out) n + integer, dimension({dimspec}), intent(in) :: a + if (a({first}).gt.0) then + ! print*, "a=", a + endif + end subroutine + """) + + linear_dimspecs = [ + "n", "2*n", "2:n", "n/2", "5 - n/2", "3*n:20", "n*(n+1):n*(n+5)", + "2*n, n" + ] + nonlinear_dimspecs = ["2*n:3*n*n+2*n"] + all_dimspecs = linear_dimspecs + nonlinear_dimspecs + + code = "" + for count, dimspec in enumerate(all_dimspecs): + lst = [(d.split(":")[0] if ":" in d else "1") for d in dimspec.split(',')] + code += code_template.format( + count=count, + dimspec=dimspec, + first=", ".join(lst), + ) + + @pytest.mark.parametrize("dimspec", all_dimspecs) + @pytest.mark.slow + def test_array_size(self, dimspec): + + count = self.all_dimspecs.index(dimspec) + get_arr_size = getattr(self.module, f"get_arr_size_{count}") + + for n in [1, 2, 3, 4, 5]: + sz, a = get_arr_size(n) + assert a.size == sz + + @pytest.mark.parametrize("dimspec", all_dimspecs) + def test_inv_array_size(self, dimspec): + + count = self.all_dimspecs.index(dimspec) + get_arr_size = getattr(self.module, f"get_arr_size_{count}") + get_inv_arr_size = getattr(self.module, f"get_inv_arr_size_{count}") + + for n in [1, 2, 3, 4, 5]: + sz, a = get_arr_size(n) + if dimspec in self.nonlinear_dimspecs: + # one must specify n as input, the call we'll ensure + # that a and n are compatible: + n1 = get_inv_arr_size(a, n) + else: + # in case of linear dependence, n can be determined + # from the shape of a: + n1 = get_inv_arr_size(a) + # n1 may be different from n (for instance, when `a` size + # is a function of some `n` fraction) but it must produce + # the same sized array + sz1, _ = get_arr_size(n1) + assert sz == sz1, (n, n1, sz, sz1) + + +class TestModuleDeclaration: + def test_dependencies(self, tmp_path): + fpath = util.getpath("tests", "src", "crackfortran", "foo_deps.f90") + mod = crackfortran.crackfortran([str(fpath)]) + assert len(mod) == 1 + assert mod[0]["vars"]["abar"]["="] == "bar('abar')" + + +class TestEval(util.F2PyTest): + def test_eval_scalar(self): + eval_scalar = crackfortran._eval_scalar + + assert eval_scalar('123', {}) == '123' + assert eval_scalar('12 + 3', {}) == '15' + assert eval_scalar('a + b', {"a": 1, "b": 2}) == '3' + assert eval_scalar('"123"', {}) == "'123'" + + +class TestFortranReader(util.F2PyTest): + @pytest.mark.parametrize("encoding", + ['ascii', 'utf-8', 'utf-16', 'utf-32']) + def test_input_encoding(self, tmp_path, encoding): + # gh-635 + f_path = tmp_path / f"input_with_{encoding}_encoding.f90" + with f_path.open('w', encoding=encoding) as ff: + ff.write(""" + subroutine foo() + end subroutine foo + """) + mod = crackfortran.crackfortran([str(f_path)]) + assert mod[0]['name'] == 'foo' + + +@pytest.mark.slow +class TestUnicodeComment(util.F2PyTest): + sources = [util.getpath("tests", "src", "crackfortran", "unicode_comment.f90")] + + @pytest.mark.skipif( + (importlib.util.find_spec("charset_normalizer") is None), + reason="test requires charset_normalizer which is not installed", + ) + def test_encoding_comment(self): + self.module.foo(3) + + +class TestNameArgsPatternBacktracking: + @pytest.mark.parametrize( + ['adversary'], + [ + ('@)@bind@(@',), + ('@)@bind @(@',), + ('@)@bind foo bar baz@(@',) + ] + ) + def test_nameargspattern_backtracking(self, adversary): + '''address ReDOS vulnerability: + https://github.com/numpy/numpy/issues/23338''' + trials_per_batch = 12 + batches_per_regex = 4 + start_reps, end_reps = 15, 25 + for ii in range(start_reps, end_reps): + repeated_adversary = adversary * ii + # test times in small batches. + # this gives us more chances to catch a bad regex + # while still catching it before too long if it is bad + for _ in range(batches_per_regex): + times = [] + for _ in range(trials_per_batch): + t0 = time.perf_counter() + mtch = nameargspattern.search(repeated_adversary) + times.append(time.perf_counter() - t0) + # our pattern should be much faster than 0.2s per search + # it's unlikely that a bad regex will pass even on fast CPUs + assert np.median(times) < 0.2 + assert not mtch + # if the adversary is capped with @)@, it becomes acceptable + # according to the old version of the regex. + # that should still be true. + good_version_of_adversary = repeated_adversary + '@)@' + assert nameargspattern.search(good_version_of_adversary) + +class TestFunctionReturn(util.F2PyTest): + sources = [util.getpath("tests", "src", "crackfortran", "gh23598.f90")] + + @pytest.mark.slow + def test_function_rettype(self): + # gh-23598 + assert self.module.intproduct(3, 4) == 12 + + +class TestFortranGroupCounters(util.F2PyTest): + def test_end_if_comment(self): + # gh-23533 + fpath = util.getpath("tests", "src", "crackfortran", "gh23533.f") + try: + crackfortran.crackfortran([str(fpath)]) + except Exception as exc: + assert False, f"'crackfortran.crackfortran' raised an exception {exc}" + + +class TestF77CommonBlockReader: + def test_gh22648(self, tmp_path): + fpath = util.getpath("tests", "src", "crackfortran", "gh22648.pyf") + with contextlib.redirect_stdout(io.StringIO()) as stdout_f2py: + mod = crackfortran.crackfortran([str(fpath)]) + assert "Mismatch" not in stdout_f2py.getvalue() + +class TestParamEval: + # issue gh-11612, array parameter parsing + def test_param_eval_nested(self): + v = '(/3.14, 4./)' + g_params = {"kind": crackfortran._kind_func, + "selected_int_kind": crackfortran._selected_int_kind_func, + "selected_real_kind": crackfortran._selected_real_kind_func} + params = {'dp': 8, 'intparamarray': {1: 3, 2: 5}, + 'nested': {1: 1, 2: 2, 3: 3}} + dimspec = '(2)' + ret = crackfortran.param_eval(v, g_params, params, dimspec=dimspec) + assert ret == {1: 3.14, 2: 4.0} + + def test_param_eval_nonstandard_range(self): + v = '(/ 6, 3, 1 /)' + g_params = {"kind": crackfortran._kind_func, + "selected_int_kind": crackfortran._selected_int_kind_func, + "selected_real_kind": crackfortran._selected_real_kind_func} + params = {} + dimspec = '(-1:1)' + ret = crackfortran.param_eval(v, g_params, params, dimspec=dimspec) + assert ret == {-1: 6, 0: 3, 1: 1} + + def test_param_eval_empty_range(self): + v = '6' + g_params = {"kind": crackfortran._kind_func, + "selected_int_kind": crackfortran._selected_int_kind_func, + "selected_real_kind": crackfortran._selected_real_kind_func} + params = {} + dimspec = '' + pytest.raises(ValueError, crackfortran.param_eval, v, g_params, params, + dimspec=dimspec) + + def test_param_eval_non_array_param(self): + v = '3.14_dp' + g_params = {"kind": crackfortran._kind_func, + "selected_int_kind": crackfortran._selected_int_kind_func, + "selected_real_kind": crackfortran._selected_real_kind_func} + params = {} + ret = crackfortran.param_eval(v, g_params, params, dimspec=None) + assert ret == '3.14_dp' + + def test_param_eval_too_many_dims(self): + v = 'reshape((/ (i, i=1, 250) /), (/5, 10, 5/))' + g_params = {"kind": crackfortran._kind_func, + "selected_int_kind": crackfortran._selected_int_kind_func, + "selected_real_kind": crackfortran._selected_real_kind_func} + params = {} + dimspec = '(0:4, 3:12, 5)' + pytest.raises(ValueError, crackfortran.param_eval, v, g_params, params, + dimspec=dimspec) + +@pytest.mark.slow +class TestLowerF2PYDirective(util.F2PyTest): + sources = [util.getpath("tests", "src", "crackfortran", "gh27697.f90")] + options = ['--lower'] + + def test_no_lower_fail(self): + with pytest.raises(ValueError, match='aborting directly') as exc: + self.module.utils.my_abort('aborting directly') diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/test_data.py b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/test_data.py new file mode 100644 index 0000000..0cea556 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/test_data.py @@ -0,0 +1,71 @@ +import pytest + +import numpy as np +from numpy.f2py.crackfortran import crackfortran + +from . import util + + +class TestData(util.F2PyTest): + sources = [util.getpath("tests", "src", "crackfortran", "data_stmts.f90")] + + # For gh-23276 + @pytest.mark.slow + def test_data_stmts(self): + assert self.module.cmplxdat.i == 2 + assert self.module.cmplxdat.j == 3 + assert self.module.cmplxdat.x == 1.5 + assert self.module.cmplxdat.y == 2.0 + assert self.module.cmplxdat.pi == 3.1415926535897932384626433832795028841971693993751058209749445923078164062 + assert self.module.cmplxdat.medium_ref_index == np.array(1. + 0.j) + assert np.all(self.module.cmplxdat.z == np.array([3.5, 7.0])) + assert np.all(self.module.cmplxdat.my_array == np.array([ 1. + 2.j, -3. + 4.j])) + assert np.all(self.module.cmplxdat.my_real_array == np.array([ 1., 2., 3.])) + assert np.all(self.module.cmplxdat.ref_index_one == np.array([13.0 + 21.0j])) + assert np.all(self.module.cmplxdat.ref_index_two == np.array([-30.0 + 43.0j])) + + def test_crackedlines(self): + mod = crackfortran(self.sources) + assert mod[0]['vars']['x']['='] == '1.5' + assert mod[0]['vars']['y']['='] == '2.0' + assert mod[0]['vars']['pi']['='] == '3.1415926535897932384626433832795028841971693993751058209749445923078164062d0' + assert mod[0]['vars']['my_real_array']['='] == '(/1.0d0, 2.0d0, 3.0d0/)' + assert mod[0]['vars']['ref_index_one']['='] == '(13.0d0, 21.0d0)' + assert mod[0]['vars']['ref_index_two']['='] == '(-30.0d0, 43.0d0)' + assert mod[0]['vars']['my_array']['='] == '(/(1.0d0, 2.0d0), (-3.0d0, 4.0d0)/)' + assert mod[0]['vars']['z']['='] == '(/3.5, 7.0/)' + +class TestDataF77(util.F2PyTest): + sources = [util.getpath("tests", "src", "crackfortran", "data_common.f")] + + # For gh-23276 + def test_data_stmts(self): + assert self.module.mycom.mydata == 0 + + def test_crackedlines(self): + mod = crackfortran(str(self.sources[0])) + print(mod[0]['vars']) + assert mod[0]['vars']['mydata']['='] == '0' + + +class TestDataMultiplierF77(util.F2PyTest): + sources = [util.getpath("tests", "src", "crackfortran", "data_multiplier.f")] + + # For gh-23276 + def test_data_stmts(self): + assert self.module.mycom.ivar1 == 3 + assert self.module.mycom.ivar2 == 3 + assert self.module.mycom.ivar3 == 2 + assert self.module.mycom.ivar4 == 2 + assert self.module.mycom.evar5 == 0 + + +class TestDataWithCommentsF77(util.F2PyTest): + sources = [util.getpath("tests", "src", "crackfortran", "data_with_comments.f")] + + # For gh-23276 + def test_data_stmts(self): + assert len(self.module.mycom.mytab) == 3 + assert self.module.mycom.mytab[0] == 0 + assert self.module.mycom.mytab[1] == 4 + assert self.module.mycom.mytab[2] == 0 diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/test_docs.py b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/test_docs.py new file mode 100644 index 0000000..5d9aaac --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/test_docs.py @@ -0,0 +1,64 @@ +from pathlib import Path + +import pytest + +import numpy as np +from numpy.testing import assert_array_equal, assert_equal + +from . import util + + +def get_docdir(): + parents = Path(__file__).resolve().parents + try: + # Assumes that spin is used to run tests + nproot = parents[8] + except IndexError: + docdir = None + else: + docdir = nproot / "doc" / "source" / "f2py" / "code" + if docdir and docdir.is_dir(): + return docdir + # Assumes that an editable install is used to run tests + return parents[3] / "doc" / "source" / "f2py" / "code" + + +pytestmark = pytest.mark.skipif( + not get_docdir().is_dir(), + reason=f"Could not find f2py documentation sources" + f"({get_docdir()} does not exist)", +) + +def _path(*args): + return get_docdir().joinpath(*args) + +@pytest.mark.slow +class TestDocAdvanced(util.F2PyTest): + # options = ['--debug-capi', '--build-dir', '/tmp/build-f2py'] + sources = [_path('asterisk1.f90'), _path('asterisk2.f90'), + _path('ftype.f')] + + def test_asterisk1(self): + foo = self.module.foo1 + assert_equal(foo(), b'123456789A12') + + def test_asterisk2(self): + foo = self.module.foo2 + assert_equal(foo(2), b'12') + assert_equal(foo(12), b'123456789A12') + assert_equal(foo(20), b'123456789A123456789B') + + def test_ftype(self): + ftype = self.module + ftype.foo() + assert_equal(ftype.data.a, 0) + ftype.data.a = 3 + ftype.data.x = [1, 2, 3] + assert_equal(ftype.data.a, 3) + assert_array_equal(ftype.data.x, + np.array([1, 2, 3], dtype=np.float32)) + ftype.data.x[1] = 45 + assert_array_equal(ftype.data.x, + np.array([1, 45, 3], dtype=np.float32)) + + # TODO: implement test methods for other example Fortran codes diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/test_f2cmap.py b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/test_f2cmap.py new file mode 100644 index 0000000..a35320c --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/test_f2cmap.py @@ -0,0 +1,17 @@ +import numpy as np + +from . import util + + +class TestF2Cmap(util.F2PyTest): + sources = [ + util.getpath("tests", "src", "f2cmap", "isoFortranEnvMap.f90"), + util.getpath("tests", "src", "f2cmap", ".f2py_f2cmap") + ] + + # gh-15095 + def test_gh15095(self): + inp = np.ones(3) + out = self.module.func1(inp) + exp_out = 3 + assert out == exp_out diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/test_f2py2e.py b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/test_f2py2e.py new file mode 100644 index 0000000..2f91eb7 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/test_f2py2e.py @@ -0,0 +1,964 @@ +import platform +import re +import shlex +import subprocess +import sys +import textwrap +from collections import namedtuple +from pathlib import Path + +import pytest + +from numpy.f2py.f2py2e import main as f2pycli +from numpy.testing._private.utils import NOGIL_BUILD + +from . import util + +####################### +# F2PY Test utilities # +###################### + +# Tests for CLI commands which call meson will fail if no compilers are present, these are to be skipped + +def compiler_check_f2pycli(): + if not util.has_fortran_compiler(): + pytest.skip("CLI command needs a Fortran compiler") + else: + f2pycli() + +######################### +# CLI utils and classes # +######################### + + +PPaths = namedtuple("PPaths", "finp, f90inp, pyf, wrap77, wrap90, cmodf") + + +def get_io_paths(fname_inp, mname="untitled"): + """Takes in a temporary file for testing and returns the expected output and input paths + + Here expected output is essentially one of any of the possible generated + files. + + ..note:: + + Since this does not actually run f2py, none of these are guaranteed to + exist, and module names are typically incorrect + + Parameters + ---------- + fname_inp : str + The input filename + mname : str, optional + The name of the module, untitled by default + + Returns + ------- + genp : NamedTuple PPaths + The possible paths which are generated, not all of which exist + """ + bpath = Path(fname_inp) + return PPaths( + finp=bpath.with_suffix(".f"), + f90inp=bpath.with_suffix(".f90"), + pyf=bpath.with_suffix(".pyf"), + wrap77=bpath.with_name(f"{mname}-f2pywrappers.f"), + wrap90=bpath.with_name(f"{mname}-f2pywrappers2.f90"), + cmodf=bpath.with_name(f"{mname}module.c"), + ) + + +################ +# CLI Fixtures # +################ + + +@pytest.fixture(scope="session") +def hello_world_f90(tmpdir_factory): + """Generates a single f90 file for testing""" + fdat = util.getpath("tests", "src", "cli", "hiworld.f90").read_text() + fn = tmpdir_factory.getbasetemp() / "hello.f90" + fn.write_text(fdat, encoding="ascii") + return fn + + +@pytest.fixture(scope="session") +def gh23598_warn(tmpdir_factory): + """F90 file for testing warnings in gh23598""" + fdat = util.getpath("tests", "src", "crackfortran", "gh23598Warn.f90").read_text() + fn = tmpdir_factory.getbasetemp() / "gh23598Warn.f90" + fn.write_text(fdat, encoding="ascii") + return fn + + +@pytest.fixture(scope="session") +def gh22819_cli(tmpdir_factory): + """F90 file for testing disallowed CLI arguments in ghff819""" + fdat = util.getpath("tests", "src", "cli", "gh_22819.pyf").read_text() + fn = tmpdir_factory.getbasetemp() / "gh_22819.pyf" + fn.write_text(fdat, encoding="ascii") + return fn + + +@pytest.fixture(scope="session") +def hello_world_f77(tmpdir_factory): + """Generates a single f77 file for testing""" + fdat = util.getpath("tests", "src", "cli", "hi77.f").read_text() + fn = tmpdir_factory.getbasetemp() / "hello.f" + fn.write_text(fdat, encoding="ascii") + return fn + + +@pytest.fixture(scope="session") +def retreal_f77(tmpdir_factory): + """Generates a single f77 file for testing""" + fdat = util.getpath("tests", "src", "return_real", "foo77.f").read_text() + fn = tmpdir_factory.getbasetemp() / "foo.f" + fn.write_text(fdat, encoding="ascii") + return fn + +@pytest.fixture(scope="session") +def f2cmap_f90(tmpdir_factory): + """Generates a single f90 file for testing""" + fdat = util.getpath("tests", "src", "f2cmap", "isoFortranEnvMap.f90").read_text() + f2cmap = util.getpath("tests", "src", "f2cmap", ".f2py_f2cmap").read_text() + fn = tmpdir_factory.getbasetemp() / "f2cmap.f90" + fmap = tmpdir_factory.getbasetemp() / "mapfile" + fn.write_text(fdat, encoding="ascii") + fmap.write_text(f2cmap, encoding="ascii") + return fn + +######### +# Tests # +######### + +def test_gh22819_cli(capfd, gh22819_cli, monkeypatch): + """Check that module names are handled correctly + gh-22819 + Essentially, the -m name cannot be used to import the module, so the module + named in the .pyf needs to be used instead + + CLI :: -m and a .pyf file + """ + ipath = Path(gh22819_cli) + monkeypatch.setattr(sys, "argv", f"f2py -m blah {ipath}".split()) + with util.switchdir(ipath.parent): + f2pycli() + gen_paths = [item.name for item in ipath.parent.rglob("*") if item.is_file()] + assert "blahmodule.c" not in gen_paths # shouldn't be generated + assert "blah-f2pywrappers.f" not in gen_paths + assert "test_22819-f2pywrappers.f" in gen_paths + assert "test_22819module.c" in gen_paths + + +def test_gh22819_many_pyf(capfd, gh22819_cli, monkeypatch): + """Only one .pyf file allowed + gh-22819 + CLI :: .pyf files + """ + ipath = Path(gh22819_cli) + monkeypatch.setattr(sys, "argv", f"f2py -m blah {ipath} hello.pyf".split()) + with util.switchdir(ipath.parent): + with pytest.raises(ValueError, match="Only one .pyf file per call"): + f2pycli() + + +def test_gh23598_warn(capfd, gh23598_warn, monkeypatch): + foutl = get_io_paths(gh23598_warn, mname="test") + ipath = foutl.f90inp + monkeypatch.setattr( + sys, "argv", + f'f2py {ipath} -m test'.split()) + + with util.switchdir(ipath.parent): + f2pycli() # Generate files + wrapper = foutl.wrap90.read_text() + assert "intproductf2pywrap, intpr" not in wrapper + + +def test_gen_pyf(capfd, hello_world_f90, monkeypatch): + """Ensures that a signature file is generated via the CLI + CLI :: -h + """ + ipath = Path(hello_world_f90) + opath = Path(hello_world_f90).stem + ".pyf" + monkeypatch.setattr(sys, "argv", f'f2py -h {opath} {ipath}'.split()) + + with util.switchdir(ipath.parent): + f2pycli() # Generate wrappers + out, _ = capfd.readouterr() + assert "Saving signatures to file" in out + assert Path(f'{opath}').exists() + + +def test_gen_pyf_stdout(capfd, hello_world_f90, monkeypatch): + """Ensures that a signature file can be dumped to stdout + CLI :: -h + """ + ipath = Path(hello_world_f90) + monkeypatch.setattr(sys, "argv", f'f2py -h stdout {ipath}'.split()) + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert "Saving signatures to file" in out + assert "function hi() ! in " in out + + +def test_gen_pyf_no_overwrite(capfd, hello_world_f90, monkeypatch): + """Ensures that the CLI refuses to overwrite signature files + CLI :: -h without --overwrite-signature + """ + ipath = Path(hello_world_f90) + monkeypatch.setattr(sys, "argv", f'f2py -h faker.pyf {ipath}'.split()) + + with util.switchdir(ipath.parent): + Path("faker.pyf").write_text("Fake news", encoding="ascii") + with pytest.raises(SystemExit): + f2pycli() # Refuse to overwrite + _, err = capfd.readouterr() + assert "Use --overwrite-signature to overwrite" in err + + +@pytest.mark.skipif(sys.version_info <= (3, 12), reason="Python 3.12 required") +def test_untitled_cli(capfd, hello_world_f90, monkeypatch): + """Check that modules are named correctly + + CLI :: defaults + """ + ipath = Path(hello_world_f90) + monkeypatch.setattr(sys, "argv", f"f2py --backend meson -c {ipath}".split()) + with util.switchdir(ipath.parent): + compiler_check_f2pycli() + out, _ = capfd.readouterr() + assert "untitledmodule.c" in out + + +@pytest.mark.skipif((platform.system() != 'Linux') or (sys.version_info <= (3, 12)), reason='Compiler and 3.12 required') +def test_no_py312_distutils_fcompiler(capfd, hello_world_f90, monkeypatch): + """Check that no distutils imports are performed on 3.12 + CLI :: --fcompiler --help-link --backend distutils + """ + MNAME = "hi" + foutl = get_io_paths(hello_world_f90, mname=MNAME) + ipath = foutl.f90inp + monkeypatch.setattr( + sys, "argv", f"f2py {ipath} -c --fcompiler=gfortran -m {MNAME}".split() + ) + with util.switchdir(ipath.parent): + compiler_check_f2pycli() + out, _ = capfd.readouterr() + assert "--fcompiler cannot be used with meson" in out + monkeypatch.setattr( + sys, "argv", ["f2py", "--help-link"] + ) + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert "Use --dep for meson builds" in out + MNAME = "hi2" # Needs to be different for a new -c + monkeypatch.setattr( + sys, "argv", f"f2py {ipath} -c -m {MNAME} --backend distutils".split() + ) + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert "Cannot use distutils backend with Python>=3.12" in out + + +@pytest.mark.xfail +def test_f2py_skip(capfd, retreal_f77, monkeypatch): + """Tests that functions can be skipped + CLI :: skip: + """ + foutl = get_io_paths(retreal_f77, mname="test") + ipath = foutl.finp + toskip = "t0 t4 t8 sd s8 s4" + remaining = "td s0" + monkeypatch.setattr( + sys, "argv", + f'f2py {ipath} -m test skip: {toskip}'.split()) + + with util.switchdir(ipath.parent): + f2pycli() + out, err = capfd.readouterr() + for skey in toskip.split(): + assert ( + f'buildmodule: Could not found the body of interfaced routine "{skey}". Skipping.' + in err) + for rkey in remaining.split(): + assert f'Constructing wrapper function "{rkey}"' in out + + +def test_f2py_only(capfd, retreal_f77, monkeypatch): + """Test that functions can be kept by only: + CLI :: only: + """ + foutl = get_io_paths(retreal_f77, mname="test") + ipath = foutl.finp + toskip = "t0 t4 t8 sd s8 s4" + tokeep = "td s0" + monkeypatch.setattr( + sys, "argv", + f'f2py {ipath} -m test only: {tokeep}'.split()) + + with util.switchdir(ipath.parent): + f2pycli() + out, err = capfd.readouterr() + for skey in toskip.split(): + assert ( + f'buildmodule: Could not find the body of interfaced routine "{skey}". Skipping.' + in err) + for rkey in tokeep.split(): + assert f'Constructing wrapper function "{rkey}"' in out + + +def test_file_processing_switch(capfd, hello_world_f90, retreal_f77, + monkeypatch): + """Tests that it is possible to return to file processing mode + CLI :: : + BUG: numpy-gh #20520 + """ + foutl = get_io_paths(retreal_f77, mname="test") + ipath = foutl.finp + toskip = "t0 t4 t8 sd s8 s4" + ipath2 = Path(hello_world_f90) + tokeep = "td s0 hi" # hi is in ipath2 + mname = "blah" + monkeypatch.setattr( + sys, + "argv", + f'f2py {ipath} -m {mname} only: {tokeep} : {ipath2}'.split( + ), + ) + + with util.switchdir(ipath.parent): + f2pycli() + out, err = capfd.readouterr() + for skey in toskip.split(): + assert ( + f'buildmodule: Could not find the body of interfaced routine "{skey}". Skipping.' + in err) + for rkey in tokeep.split(): + assert f'Constructing wrapper function "{rkey}"' in out + + +def test_mod_gen_f77(capfd, hello_world_f90, monkeypatch): + """Checks the generation of files based on a module name + CLI :: -m + """ + MNAME = "hi" + foutl = get_io_paths(hello_world_f90, mname=MNAME) + ipath = foutl.f90inp + monkeypatch.setattr(sys, "argv", f'f2py {ipath} -m {MNAME}'.split()) + with util.switchdir(ipath.parent): + f2pycli() + + # Always generate C module + assert Path.exists(foutl.cmodf) + # File contains a function, check for F77 wrappers + assert Path.exists(foutl.wrap77) + + +def test_mod_gen_gh25263(capfd, hello_world_f77, monkeypatch): + """Check that pyf files are correctly generated with module structure + CLI :: -m -h pyf_file + BUG: numpy-gh #20520 + """ + MNAME = "hi" + foutl = get_io_paths(hello_world_f77, mname=MNAME) + ipath = foutl.finp + monkeypatch.setattr(sys, "argv", f'f2py {ipath} -m {MNAME} -h hi.pyf'.split()) + with util.switchdir(ipath.parent): + f2pycli() + with Path('hi.pyf').open() as hipyf: + pyfdat = hipyf.read() + assert "python module hi" in pyfdat + + +def test_lower_cmod(capfd, hello_world_f77, monkeypatch): + """Lowers cases by flag or when -h is present + + CLI :: --[no-]lower + """ + foutl = get_io_paths(hello_world_f77, mname="test") + ipath = foutl.finp + capshi = re.compile(r"HI\(\)") + capslo = re.compile(r"hi\(\)") + # Case I: --lower is passed + monkeypatch.setattr(sys, "argv", f'f2py {ipath} -m test --lower'.split()) + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert capslo.search(out) is not None + assert capshi.search(out) is None + # Case II: --no-lower is passed + monkeypatch.setattr(sys, "argv", + f'f2py {ipath} -m test --no-lower'.split()) + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert capslo.search(out) is None + assert capshi.search(out) is not None + + +def test_lower_sig(capfd, hello_world_f77, monkeypatch): + """Lowers cases in signature files by flag or when -h is present + + CLI :: --[no-]lower -h + """ + foutl = get_io_paths(hello_world_f77, mname="test") + ipath = foutl.finp + # Signature files + capshi = re.compile(r"Block: HI") + capslo = re.compile(r"Block: hi") + # Case I: --lower is implied by -h + # TODO: Clean up to prevent passing --overwrite-signature + monkeypatch.setattr( + sys, + "argv", + f'f2py {ipath} -h {foutl.pyf} -m test --overwrite-signature'.split(), + ) + + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert capslo.search(out) is not None + assert capshi.search(out) is None + + # Case II: --no-lower overrides -h + monkeypatch.setattr( + sys, + "argv", + f'f2py {ipath} -h {foutl.pyf} -m test --overwrite-signature --no-lower' + .split(), + ) + + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert capslo.search(out) is None + assert capshi.search(out) is not None + + +def test_build_dir(capfd, hello_world_f90, monkeypatch): + """Ensures that the build directory can be specified + + CLI :: --build-dir + """ + ipath = Path(hello_world_f90) + mname = "blah" + odir = "tttmp" + monkeypatch.setattr(sys, "argv", + f'f2py -m {mname} {ipath} --build-dir {odir}'.split()) + + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert f"Wrote C/API module \"{mname}\"" in out + + +def test_overwrite(capfd, hello_world_f90, monkeypatch): + """Ensures that the build directory can be specified + + CLI :: --overwrite-signature + """ + ipath = Path(hello_world_f90) + monkeypatch.setattr( + sys, "argv", + f'f2py -h faker.pyf {ipath} --overwrite-signature'.split()) + + with util.switchdir(ipath.parent): + Path("faker.pyf").write_text("Fake news", encoding="ascii") + f2pycli() + out, _ = capfd.readouterr() + assert "Saving signatures to file" in out + + +def test_latexdoc(capfd, hello_world_f90, monkeypatch): + """Ensures that TeX documentation is written out + + CLI :: --latex-doc + """ + ipath = Path(hello_world_f90) + mname = "blah" + monkeypatch.setattr(sys, "argv", + f'f2py -m {mname} {ipath} --latex-doc'.split()) + + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert "Documentation is saved to file" in out + with Path(f"{mname}module.tex").open() as otex: + assert "\\documentclass" in otex.read() + + +def test_nolatexdoc(capfd, hello_world_f90, monkeypatch): + """Ensures that TeX documentation is written out + + CLI :: --no-latex-doc + """ + ipath = Path(hello_world_f90) + mname = "blah" + monkeypatch.setattr(sys, "argv", + f'f2py -m {mname} {ipath} --no-latex-doc'.split()) + + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert "Documentation is saved to file" not in out + + +def test_shortlatex(capfd, hello_world_f90, monkeypatch): + """Ensures that truncated documentation is written out + + TODO: Test to ensure this has no effect without --latex-doc + CLI :: --latex-doc --short-latex + """ + ipath = Path(hello_world_f90) + mname = "blah" + monkeypatch.setattr( + sys, + "argv", + f'f2py -m {mname} {ipath} --latex-doc --short-latex'.split(), + ) + + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert "Documentation is saved to file" in out + with Path(f"./{mname}module.tex").open() as otex: + assert "\\documentclass" not in otex.read() + + +def test_restdoc(capfd, hello_world_f90, monkeypatch): + """Ensures that RsT documentation is written out + + CLI :: --rest-doc + """ + ipath = Path(hello_world_f90) + mname = "blah" + monkeypatch.setattr(sys, "argv", + f'f2py -m {mname} {ipath} --rest-doc'.split()) + + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert "ReST Documentation is saved to file" in out + with Path(f"./{mname}module.rest").open() as orst: + assert r".. -*- rest -*-" in orst.read() + + +def test_norestexdoc(capfd, hello_world_f90, monkeypatch): + """Ensures that TeX documentation is written out + + CLI :: --no-rest-doc + """ + ipath = Path(hello_world_f90) + mname = "blah" + monkeypatch.setattr(sys, "argv", + f'f2py -m {mname} {ipath} --no-rest-doc'.split()) + + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert "ReST Documentation is saved to file" not in out + + +def test_debugcapi(capfd, hello_world_f90, monkeypatch): + """Ensures that debugging wrappers are written + + CLI :: --debug-capi + """ + ipath = Path(hello_world_f90) + mname = "blah" + monkeypatch.setattr(sys, "argv", + f'f2py -m {mname} {ipath} --debug-capi'.split()) + + with util.switchdir(ipath.parent): + f2pycli() + with Path(f"./{mname}module.c").open() as ocmod: + assert r"#define DEBUGCFUNCS" in ocmod.read() + + +@pytest.mark.skip(reason="Consistently fails on CI; noisy so skip not xfail.") +def test_debugcapi_bld(hello_world_f90, monkeypatch): + """Ensures that debugging wrappers work + + CLI :: --debug-capi -c + """ + ipath = Path(hello_world_f90) + mname = "blah" + monkeypatch.setattr(sys, "argv", + f'f2py -m {mname} {ipath} -c --debug-capi'.split()) + + with util.switchdir(ipath.parent): + f2pycli() + cmd_run = shlex.split(f"{sys.executable} -c \"import blah; blah.hi()\"") + rout = subprocess.run(cmd_run, capture_output=True, encoding='UTF-8') + eout = ' Hello World\n' + eerr = textwrap.dedent("""\ +debug-capi:Python C/API function blah.hi() +debug-capi:float hi=:output,hidden,scalar +debug-capi:hi=0 +debug-capi:Fortran subroutine `f2pywraphi(&hi)' +debug-capi:hi=0 +debug-capi:Building return value. +debug-capi:Python C/API function blah.hi: successful. +debug-capi:Freeing memory. + """) + assert rout.stdout == eout + assert rout.stderr == eerr + + +def test_wrapfunc_def(capfd, hello_world_f90, monkeypatch): + """Ensures that fortran subroutine wrappers for F77 are included by default + + CLI :: --[no]-wrap-functions + """ + # Implied + ipath = Path(hello_world_f90) + mname = "blah" + monkeypatch.setattr(sys, "argv", f'f2py -m {mname} {ipath}'.split()) + + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert r"Fortran 77 wrappers are saved to" in out + + # Explicit + monkeypatch.setattr(sys, "argv", + f'f2py -m {mname} {ipath} --wrap-functions'.split()) + + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert r"Fortran 77 wrappers are saved to" in out + + +def test_nowrapfunc(capfd, hello_world_f90, monkeypatch): + """Ensures that fortran subroutine wrappers for F77 can be disabled + + CLI :: --no-wrap-functions + """ + ipath = Path(hello_world_f90) + mname = "blah" + monkeypatch.setattr(sys, "argv", + f'f2py -m {mname} {ipath} --no-wrap-functions'.split()) + + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert r"Fortran 77 wrappers are saved to" not in out + + +def test_inclheader(capfd, hello_world_f90, monkeypatch): + """Add to the include directories + + CLI :: -include + TODO: Document this in the help string + """ + ipath = Path(hello_world_f90) + mname = "blah" + monkeypatch.setattr( + sys, + "argv", + f'f2py -m {mname} {ipath} -include -include '. + split(), + ) + + with util.switchdir(ipath.parent): + f2pycli() + with Path(f"./{mname}module.c").open() as ocmod: + ocmr = ocmod.read() + assert "#include " in ocmr + assert "#include " in ocmr + + +def test_inclpath(): + """Add to the include directories + + CLI :: --include-paths + """ + # TODO: populate + pass + + +def test_hlink(): + """Add to the include directories + + CLI :: --help-link + """ + # TODO: populate + pass + + +def test_f2cmap(capfd, f2cmap_f90, monkeypatch): + """Check that Fortran-to-Python KIND specs can be passed + + CLI :: --f2cmap + """ + ipath = Path(f2cmap_f90) + monkeypatch.setattr(sys, "argv", f'f2py -m blah {ipath} --f2cmap mapfile'.split()) + + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert "Reading f2cmap from 'mapfile' ..." in out + assert "Mapping \"real(kind=real32)\" to \"float\"" in out + assert "Mapping \"real(kind=real64)\" to \"double\"" in out + assert "Mapping \"integer(kind=int64)\" to \"long_long\"" in out + assert "Successfully applied user defined f2cmap changes" in out + + +def test_quiet(capfd, hello_world_f90, monkeypatch): + """Reduce verbosity + + CLI :: --quiet + """ + ipath = Path(hello_world_f90) + monkeypatch.setattr(sys, "argv", f'f2py -m blah {ipath} --quiet'.split()) + + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert len(out) == 0 + + +def test_verbose(capfd, hello_world_f90, monkeypatch): + """Increase verbosity + + CLI :: --verbose + """ + ipath = Path(hello_world_f90) + monkeypatch.setattr(sys, "argv", f'f2py -m blah {ipath} --verbose'.split()) + + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert "analyzeline" in out + + +def test_version(capfd, monkeypatch): + """Ensure version + + CLI :: -v + """ + monkeypatch.setattr(sys, "argv", ["f2py", "-v"]) + # TODO: f2py2e should not call sys.exit() after printing the version + with pytest.raises(SystemExit): + f2pycli() + out, _ = capfd.readouterr() + import numpy as np + assert np.__version__ == out.strip() + + +@pytest.mark.skip(reason="Consistently fails on CI; noisy so skip not xfail.") +def test_npdistop(hello_world_f90, monkeypatch): + """ + CLI :: -c + """ + ipath = Path(hello_world_f90) + monkeypatch.setattr(sys, "argv", f'f2py -m blah {ipath} -c'.split()) + + with util.switchdir(ipath.parent): + f2pycli() + cmd_run = shlex.split(f"{sys.executable} -c \"import blah; blah.hi()\"") + rout = subprocess.run(cmd_run, capture_output=True, encoding='UTF-8') + eout = ' Hello World\n' + assert rout.stdout == eout + + +@pytest.mark.skipif((platform.system() != 'Linux') or sys.version_info <= (3, 12), + reason='Compiler and Python 3.12 or newer required') +def test_no_freethreading_compatible(hello_world_f90, monkeypatch): + """ + CLI :: --no-freethreading-compatible + """ + ipath = Path(hello_world_f90) + monkeypatch.setattr(sys, "argv", f'f2py -m blah {ipath} -c --no-freethreading-compatible'.split()) + + with util.switchdir(ipath.parent): + compiler_check_f2pycli() + cmd = f"{sys.executable} -c \"import blah; blah.hi();" + if NOGIL_BUILD: + cmd += "import sys; assert sys._is_gil_enabled() is True\"" + else: + cmd += "\"" + cmd_run = shlex.split(cmd) + rout = subprocess.run(cmd_run, capture_output=True, encoding='UTF-8') + eout = ' Hello World\n' + assert rout.stdout == eout + if NOGIL_BUILD: + assert "The global interpreter lock (GIL) has been enabled to load module 'blah'" in rout.stderr + assert rout.returncode == 0 + + +@pytest.mark.skipif((platform.system() != 'Linux') or sys.version_info <= (3, 12), + reason='Compiler and Python 3.12 or newer required') +def test_freethreading_compatible(hello_world_f90, monkeypatch): + """ + CLI :: --freethreading_compatible + """ + ipath = Path(hello_world_f90) + monkeypatch.setattr(sys, "argv", f'f2py -m blah {ipath} -c --freethreading-compatible'.split()) + + with util.switchdir(ipath.parent): + compiler_check_f2pycli() + cmd = f"{sys.executable} -c \"import blah; blah.hi();" + if NOGIL_BUILD: + cmd += "import sys; assert sys._is_gil_enabled() is False\"" + else: + cmd += "\"" + cmd_run = shlex.split(cmd) + rout = subprocess.run(cmd_run, capture_output=True, encoding='UTF-8') + eout = ' Hello World\n' + assert rout.stdout == eout + assert rout.stderr == "" + assert rout.returncode == 0 + + +# Numpy distutils flags +# TODO: These should be tested separately + +def test_npd_fcompiler(): + """ + CLI :: -c --fcompiler + """ + # TODO: populate + pass + + +def test_npd_compiler(): + """ + CLI :: -c --compiler + """ + # TODO: populate + pass + + +def test_npd_help_fcompiler(): + """ + CLI :: -c --help-fcompiler + """ + # TODO: populate + pass + + +def test_npd_f77exec(): + """ + CLI :: -c --f77exec + """ + # TODO: populate + pass + + +def test_npd_f90exec(): + """ + CLI :: -c --f90exec + """ + # TODO: populate + pass + + +def test_npd_f77flags(): + """ + CLI :: -c --f77flags + """ + # TODO: populate + pass + + +def test_npd_f90flags(): + """ + CLI :: -c --f90flags + """ + # TODO: populate + pass + + +def test_npd_opt(): + """ + CLI :: -c --opt + """ + # TODO: populate + pass + + +def test_npd_arch(): + """ + CLI :: -c --arch + """ + # TODO: populate + pass + + +def test_npd_noopt(): + """ + CLI :: -c --noopt + """ + # TODO: populate + pass + + +def test_npd_noarch(): + """ + CLI :: -c --noarch + """ + # TODO: populate + pass + + +def test_npd_debug(): + """ + CLI :: -c --debug + """ + # TODO: populate + pass + + +def test_npd_link_auto(): + """ + CLI :: -c --link- + """ + # TODO: populate + pass + + +def test_npd_lib(): + """ + CLI :: -c -L/path/to/lib/ -l + """ + # TODO: populate + pass + + +def test_npd_define(): + """ + CLI :: -D + """ + # TODO: populate + pass + + +def test_npd_undefine(): + """ + CLI :: -U + """ + # TODO: populate + pass + + +def test_npd_incl(): + """ + CLI :: -I/path/to/include/ + """ + # TODO: populate + pass + + +def test_npd_linker(): + """ + CLI :: .o .so .a + """ + # TODO: populate + pass diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/test_isoc.py b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/test_isoc.py new file mode 100644 index 0000000..f3450f1 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/test_isoc.py @@ -0,0 +1,56 @@ +import pytest + +import numpy as np +from numpy.testing import assert_allclose + +from . import util + + +class TestISOC(util.F2PyTest): + sources = [ + util.getpath("tests", "src", "isocintrin", "isoCtests.f90"), + ] + + # gh-24553 + @pytest.mark.slow + def test_c_double(self): + out = self.module.coddity.c_add(1, 2) + exp_out = 3 + assert out == exp_out + + # gh-9693 + def test_bindc_function(self): + out = self.module.coddity.wat(1, 20) + exp_out = 8 + assert out == exp_out + + # gh-25207 + def test_bindc_kinds(self): + out = self.module.coddity.c_add_int64(1, 20) + exp_out = 21 + assert out == exp_out + + # gh-25207 + def test_bindc_add_arr(self): + a = np.array([1, 2, 3]) + b = np.array([1, 2, 3]) + out = self.module.coddity.add_arr(a, b) + exp_out = a * 2 + assert_allclose(out, exp_out) + + +def test_process_f2cmap_dict(): + from numpy.f2py.auxfuncs import process_f2cmap_dict + + f2cmap_all = {"integer": {"8": "rubbish_type"}} + new_map = {"INTEGER": {"4": "int"}} + c2py_map = {"int": "int", "rubbish_type": "long"} + + exp_map, exp_maptyp = ({"integer": {"8": "rubbish_type", "4": "int"}}, ["int"]) + + # Call the function + res_map, res_maptyp = process_f2cmap_dict(f2cmap_all, new_map, c2py_map) + + # Assert the result is as expected + assert res_map == exp_map + assert res_maptyp == exp_maptyp diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/test_kind.py b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/test_kind.py new file mode 100644 index 0000000..ce223a5 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/test_kind.py @@ -0,0 +1,53 @@ +import platform +import sys + +import pytest + +from numpy.f2py.crackfortran import ( + _selected_int_kind_func as selected_int_kind, +) +from numpy.f2py.crackfortran import ( + _selected_real_kind_func as selected_real_kind, +) + +from . import util + + +class TestKind(util.F2PyTest): + sources = [util.getpath("tests", "src", "kind", "foo.f90")] + + @pytest.mark.skipif(sys.maxsize < 2 ** 31 + 1, + reason="Fails for 32 bit machines") + def test_int(self): + """Test `int` kind_func for integers up to 10**40.""" + selectedintkind = self.module.selectedintkind + + for i in range(40): + assert selectedintkind(i) == selected_int_kind( + i + ), f"selectedintkind({i}): expected {selected_int_kind(i)!r} but got {selectedintkind(i)!r}" + + def test_real(self): + """ + Test (processor-dependent) `real` kind_func for real numbers + of up to 31 digits precision (extended/quadruple). + """ + selectedrealkind = self.module.selectedrealkind + + for i in range(32): + assert selectedrealkind(i) == selected_real_kind( + i + ), f"selectedrealkind({i}): expected {selected_real_kind(i)!r} but got {selectedrealkind(i)!r}" + + @pytest.mark.xfail(platform.machine().lower().startswith("ppc"), + reason="Some PowerPC may not support full IEEE 754 precision") + def test_quad_precision(self): + """ + Test kind_func for quadruple precision [`real(16)`] of 32+ digits . + """ + selectedrealkind = self.module.selectedrealkind + + for i in range(32, 40): + assert selectedrealkind(i) == selected_real_kind( + i + ), f"selectedrealkind({i}): expected {selected_real_kind(i)!r} but got {selectedrealkind(i)!r}" diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/test_mixed.py b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/test_mixed.py new file mode 100644 index 0000000..07f43e2 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/test_mixed.py @@ -0,0 +1,35 @@ +import textwrap + +import pytest + +from numpy.testing import IS_PYPY + +from . import util + + +class TestMixed(util.F2PyTest): + sources = [ + util.getpath("tests", "src", "mixed", "foo.f"), + util.getpath("tests", "src", "mixed", "foo_fixed.f90"), + util.getpath("tests", "src", "mixed", "foo_free.f90"), + ] + + @pytest.mark.slow + def test_all(self): + assert self.module.bar11() == 11 + assert self.module.foo_fixed.bar12() == 12 + assert self.module.foo_free.bar13() == 13 + + @pytest.mark.xfail(IS_PYPY, + reason="PyPy cannot modify tp_doc after PyType_Ready") + def test_docstring(self): + expected = textwrap.dedent("""\ + a = bar11() + + Wrapper for ``bar11``. + + Returns + ------- + a : int + """) + assert self.module.bar11.__doc__ == expected diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/test_modules.py b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/test_modules.py new file mode 100644 index 0000000..96d5ffc --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/test_modules.py @@ -0,0 +1,83 @@ +import textwrap + +import pytest + +from numpy.testing import IS_PYPY + +from . import util + + +@pytest.mark.slow +class TestModuleFilterPublicEntities(util.F2PyTest): + sources = [ + util.getpath( + "tests", "src", "modules", "gh26920", + "two_mods_with_one_public_routine.f90" + ) + ] + # we filter the only public function mod2 + only = ["mod1_func1", ] + + def test_gh26920(self): + # if it compiles and can be loaded, things are fine + pass + + +@pytest.mark.slow +class TestModuleWithoutPublicEntities(util.F2PyTest): + sources = [ + util.getpath( + "tests", "src", "modules", "gh26920", + "two_mods_with_no_public_entities.f90" + ) + ] + only = ["mod1_func1", ] + + def test_gh26920(self): + # if it compiles and can be loaded, things are fine + pass + + +@pytest.mark.slow +class TestModuleDocString(util.F2PyTest): + sources = [util.getpath("tests", "src", "modules", "module_data_docstring.f90")] + + @pytest.mark.xfail(IS_PYPY, reason="PyPy cannot modify tp_doc after PyType_Ready") + def test_module_docstring(self): + assert self.module.mod.__doc__ == textwrap.dedent( + """\ + i : 'i'-scalar + x : 'i'-array(4) + a : 'f'-array(2,3) + b : 'f'-array(-1,-1), not allocated\x00 + foo()\n + Wrapper for ``foo``.\n\n""" + ) + + +@pytest.mark.slow +class TestModuleAndSubroutine(util.F2PyTest): + module_name = "example" + sources = [ + util.getpath("tests", "src", "modules", "gh25337", "data.f90"), + util.getpath("tests", "src", "modules", "gh25337", "use_data.f90"), + ] + + def test_gh25337(self): + self.module.data.set_shift(3) + assert "data" in dir(self.module) + + +@pytest.mark.slow +class TestUsedModule(util.F2PyTest): + module_name = "fmath" + sources = [ + util.getpath("tests", "src", "modules", "use_modules.f90"), + ] + + def test_gh25867(self): + compiled_mods = [x for x in dir(self.module) if "__" not in x] + assert "useops" in compiled_mods + assert self.module.useops.sum_and_double(3, 7) == 20 + assert "mathops" in compiled_mods + assert self.module.mathops.add(3, 7) == 10 diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/test_parameter.py b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/test_parameter.py new file mode 100644 index 0000000..513d021 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/test_parameter.py @@ -0,0 +1,129 @@ +import pytest + +import numpy as np + +from . import util + + +class TestParameters(util.F2PyTest): + # Check that intent(in out) translates as intent(inout) + sources = [ + util.getpath("tests", "src", "parameter", "constant_real.f90"), + util.getpath("tests", "src", "parameter", "constant_integer.f90"), + util.getpath("tests", "src", "parameter", "constant_both.f90"), + util.getpath("tests", "src", "parameter", "constant_compound.f90"), + util.getpath("tests", "src", "parameter", "constant_non_compound.f90"), + util.getpath("tests", "src", "parameter", "constant_array.f90"), + ] + + @pytest.mark.slow + def test_constant_real_single(self): + # non-contiguous should raise error + x = np.arange(6, dtype=np.float32)[::2] + pytest.raises(ValueError, self.module.foo_single, x) + + # check values with contiguous array + x = np.arange(3, dtype=np.float32) + self.module.foo_single(x) + assert np.allclose(x, [0 + 1 + 2 * 3, 1, 2]) + + @pytest.mark.slow + def test_constant_real_double(self): + # non-contiguous should raise error + x = np.arange(6, dtype=np.float64)[::2] + pytest.raises(ValueError, self.module.foo_double, x) + + # check values with contiguous array + x = np.arange(3, dtype=np.float64) + self.module.foo_double(x) + assert np.allclose(x, [0 + 1 + 2 * 3, 1, 2]) + + @pytest.mark.slow + def test_constant_compound_int(self): + # non-contiguous should raise error + x = np.arange(6, dtype=np.int32)[::2] + pytest.raises(ValueError, self.module.foo_compound_int, x) + + # check values with contiguous array + x = np.arange(3, dtype=np.int32) + self.module.foo_compound_int(x) + assert np.allclose(x, [0 + 1 + 2 * 6, 1, 2]) + + @pytest.mark.slow + def test_constant_non_compound_int(self): + # check values + x = np.arange(4, dtype=np.int32) + self.module.foo_non_compound_int(x) + assert np.allclose(x, [0 + 1 + 2 + 3 * 4, 1, 2, 3]) + + @pytest.mark.slow + def test_constant_integer_int(self): + # non-contiguous should raise error + x = np.arange(6, dtype=np.int32)[::2] + pytest.raises(ValueError, self.module.foo_int, x) + + # check values with contiguous array + x = np.arange(3, dtype=np.int32) + self.module.foo_int(x) + assert np.allclose(x, [0 + 1 + 2 * 3, 1, 2]) + + @pytest.mark.slow + def test_constant_integer_long(self): + # non-contiguous should raise error + x = np.arange(6, dtype=np.int64)[::2] + pytest.raises(ValueError, self.module.foo_long, x) + + # check values with contiguous array + x = np.arange(3, dtype=np.int64) + self.module.foo_long(x) + assert np.allclose(x, [0 + 1 + 2 * 3, 1, 2]) + + @pytest.mark.slow + def test_constant_both(self): + # non-contiguous should raise error + x = np.arange(6, dtype=np.float64)[::2] + pytest.raises(ValueError, self.module.foo, x) + + # check values with contiguous array + x = np.arange(3, dtype=np.float64) + self.module.foo(x) + assert np.allclose(x, [0 + 1 * 3 * 3 + 2 * 3 * 3, 1 * 3, 2 * 3]) + + @pytest.mark.slow + def test_constant_no(self): + # non-contiguous should raise error + x = np.arange(6, dtype=np.float64)[::2] + pytest.raises(ValueError, self.module.foo_no, x) + + # check values with contiguous array + x = np.arange(3, dtype=np.float64) + self.module.foo_no(x) + assert np.allclose(x, [0 + 1 * 3 * 3 + 2 * 3 * 3, 1 * 3, 2 * 3]) + + @pytest.mark.slow + def test_constant_sum(self): + # non-contiguous should raise error + x = np.arange(6, dtype=np.float64)[::2] + pytest.raises(ValueError, self.module.foo_sum, x) + + # check values with contiguous array + x = np.arange(3, dtype=np.float64) + self.module.foo_sum(x) + assert np.allclose(x, [0 + 1 * 3 * 3 + 2 * 3 * 3, 1 * 3, 2 * 3]) + + def test_constant_array(self): + x = np.arange(3, dtype=np.float64) + y = np.arange(5, dtype=np.float64) + z = self.module.foo_array(x, y) + assert np.allclose(x, [0.0, 1. / 10, 2. / 10]) + assert np.allclose(y, [0.0, 1. * 10, 2. * 10, 3. * 10, 4. * 10]) + assert np.allclose(z, 19.0) + + def test_constant_array_any_index(self): + x = np.arange(6, dtype=np.float64) + y = self.module.foo_array_any_index(x) + assert np.allclose(y, x.reshape((2, 3), order='F')) + + def test_constant_array_delims(self): + x = self.module.foo_array_delims() + assert x == 9 diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/test_pyf_src.py b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/test_pyf_src.py new file mode 100644 index 0000000..2ecb0fb --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/test_pyf_src.py @@ -0,0 +1,43 @@ +# This test is ported from numpy.distutils +from numpy.f2py._src_pyf import process_str +from numpy.testing import assert_equal + +pyf_src = """ +python module foo + <_rd=real,double precision> + interface + subroutine foosub(tol) + <_rd>, intent(in,out) :: tol + end subroutine foosub + end interface +end python module foo +""" + +expected_pyf = """ +python module foo + interface + subroutine sfoosub(tol) + real, intent(in,out) :: tol + end subroutine sfoosub + subroutine dfoosub(tol) + double precision, intent(in,out) :: tol + end subroutine dfoosub + end interface +end python module foo +""" + + +def normalize_whitespace(s): + """ + Remove leading and trailing whitespace, and convert internal + stretches of whitespace to a single space. + """ + return ' '.join(s.split()) + + +def test_from_template(): + """Regression test for gh-10712.""" + pyf = process_str(pyf_src) + normalized_pyf = normalize_whitespace(pyf) + normalized_expected_pyf = normalize_whitespace(expected_pyf) + assert_equal(normalized_pyf, normalized_expected_pyf) diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/test_quoted_character.py b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/test_quoted_character.py new file mode 100644 index 0000000..3cbcb3c --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/test_quoted_character.py @@ -0,0 +1,18 @@ +"""See https://github.com/numpy/numpy/pull/10676. + +""" +import sys + +import pytest + +from . import util + + +class TestQuotedCharacter(util.F2PyTest): + sources = [util.getpath("tests", "src", "quoted_character", "foo.f")] + + @pytest.mark.skipif(sys.platform == "win32", + reason="Fails with MinGW64 Gfortran (Issue #9673)") + @pytest.mark.slow + def test_quoted_character(self): + assert self.module.foo() == (b"'", b'"', b";", b"!", b"(", b")") diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/test_regression.py b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/test_regression.py new file mode 100644 index 0000000..93eb29e --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/test_regression.py @@ -0,0 +1,187 @@ +import os +import platform + +import pytest + +import numpy as np +import numpy.testing as npt + +from . import util + + +class TestIntentInOut(util.F2PyTest): + # Check that intent(in out) translates as intent(inout) + sources = [util.getpath("tests", "src", "regression", "inout.f90")] + + @pytest.mark.slow + def test_inout(self): + # non-contiguous should raise error + x = np.arange(6, dtype=np.float32)[::2] + pytest.raises(ValueError, self.module.foo, x) + + # check values with contiguous array + x = np.arange(3, dtype=np.float32) + self.module.foo(x) + assert np.allclose(x, [3, 1, 2]) + + +class TestDataOnlyMultiModule(util.F2PyTest): + # Check that modules without subroutines work + sources = [util.getpath("tests", "src", "regression", "datonly.f90")] + + @pytest.mark.slow + def test_mdat(self): + assert self.module.datonly.max_value == 100 + assert self.module.dat.max_ == 1009 + int_in = 5 + assert self.module.simple_subroutine(5) == 1014 + + +class TestModuleWithDerivedType(util.F2PyTest): + # Check that modules with derived types work + sources = [util.getpath("tests", "src", "regression", "mod_derived_types.f90")] + + @pytest.mark.slow + def test_mtypes(self): + assert self.module.no_type_subroutine(10) == 110 + assert self.module.type_subroutine(10) == 210 + + +class TestNegativeBounds(util.F2PyTest): + # Check that negative bounds work correctly + sources = [util.getpath("tests", "src", "negative_bounds", "issue_20853.f90")] + + @pytest.mark.slow + def test_negbound(self): + xvec = np.arange(12) + xlow = -6 + xhigh = 4 + + # Calculate the upper bound, + # Keeping the 1 index in mind + + def ubound(xl, xh): + return xh - xl + 1 + rval = self.module.foo(is_=xlow, ie_=xhigh, + arr=xvec[:ubound(xlow, xhigh)]) + expval = np.arange(11, dtype=np.float32) + assert np.allclose(rval, expval) + + +class TestNumpyVersionAttribute(util.F2PyTest): + # Check that th attribute __f2py_numpy_version__ is present + # in the compiled module and that has the value np.__version__. + sources = [util.getpath("tests", "src", "regression", "inout.f90")] + + @pytest.mark.slow + def test_numpy_version_attribute(self): + + # Check that self.module has an attribute named "__f2py_numpy_version__" + assert hasattr(self.module, "__f2py_numpy_version__") + + # Check that the attribute __f2py_numpy_version__ is a string + assert isinstance(self.module.__f2py_numpy_version__, str) + + # Check that __f2py_numpy_version__ has the value numpy.__version__ + assert np.__version__ == self.module.__f2py_numpy_version__ + + +def test_include_path(): + incdir = np.f2py.get_include() + fnames_in_dir = os.listdir(incdir) + for fname in ("fortranobject.c", "fortranobject.h"): + assert fname in fnames_in_dir + + +class TestIncludeFiles(util.F2PyTest): + sources = [util.getpath("tests", "src", "regression", "incfile.f90")] + options = [f"-I{util.getpath('tests', 'src', 'regression')}", + f"--include-paths {util.getpath('tests', 'src', 'regression')}"] + + @pytest.mark.slow + def test_gh25344(self): + exp = 7.0 + res = self.module.add(3.0, 4.0) + assert exp == res + +class TestF77Comments(util.F2PyTest): + # Check that comments are stripped from F77 continuation lines + sources = [util.getpath("tests", "src", "regression", "f77comments.f")] + + @pytest.mark.slow + def test_gh26148(self): + x1 = np.array(3, dtype=np.int32) + x2 = np.array(5, dtype=np.int32) + res = self.module.testsub(x1, x2) + assert res[0] == 8 + assert res[1] == 15 + + @pytest.mark.slow + def test_gh26466(self): + # Check that comments after PARAMETER directions are stripped + expected = np.arange(1, 11, dtype=np.float32) * 2 + res = self.module.testsub2() + npt.assert_allclose(expected, res) + +class TestF90Contiuation(util.F2PyTest): + # Check that comments are stripped from F90 continuation lines + sources = [util.getpath("tests", "src", "regression", "f90continuation.f90")] + + @pytest.mark.slow + def test_gh26148b(self): + x1 = np.array(3, dtype=np.int32) + x2 = np.array(5, dtype=np.int32) + res = self.module.testsub(x1, x2) + assert res[0] == 8 + assert res[1] == 15 + +class TestLowerF2PYDirectives(util.F2PyTest): + # Check variables are cased correctly + sources = [util.getpath("tests", "src", "regression", "lower_f2py_fortran.f90")] + + @pytest.mark.slow + def test_gh28014(self): + self.module.inquire_next(3) + assert True + +@pytest.mark.slow +def test_gh26623(): + # Including libraries with . should not generate an incorrect meson.build + try: + aa = util.build_module( + [util.getpath("tests", "src", "regression", "f90continuation.f90")], + ["-lfoo.bar"], + module_name="Blah", + ) + except RuntimeError as rerr: + assert "lparen got assign" not in str(rerr) + + +@pytest.mark.slow +@pytest.mark.skipif(platform.system() not in ['Linux', 'Darwin'], reason='Unsupported on this platform for now') +def test_gh25784(): + # Compile dubious file using passed flags + try: + aa = util.build_module( + [util.getpath("tests", "src", "regression", "f77fixedform.f95")], + options=[ + # Meson will collect and dedup these to pass to fortran_args: + "--f77flags='-ffixed-form -O2'", + "--f90flags=\"-ffixed-form -Og\"", + ], + module_name="Blah", + ) + except ImportError as rerr: + assert "unknown_subroutine_" in str(rerr) + + +@pytest.mark.slow +class TestAssignmentOnlyModules(util.F2PyTest): + # Ensure that variables are exposed without functions or subroutines in a module + sources = [util.getpath("tests", "src", "regression", "assignOnlyModule.f90")] + + @pytest.mark.slow + def test_gh27167(self): + assert (self.module.f_globals.n_max == 16) + assert (self.module.f_globals.i_max == 18) + assert (self.module.f_globals.j_max == 72) diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/test_return_character.py b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/test_return_character.py new file mode 100644 index 0000000..aae3f0f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/test_return_character.py @@ -0,0 +1,48 @@ +import platform + +import pytest + +from numpy import array + +from . import util + +IS_S390X = platform.machine() == "s390x" + + +@pytest.mark.slow +class TestReturnCharacter(util.F2PyTest): + def check_function(self, t, tname): + if tname in ["t0", "t1", "s0", "s1"]: + assert t("23") == b"2" + r = t("ab") + assert r == b"a" + r = t(array("ab")) + assert r == b"a" + r = t(array(77, "u1")) + assert r == b"M" + elif tname in ["ts", "ss"]: + assert t(23) == b"23" + assert t("123456789abcdef") == b"123456789a" + elif tname in ["t5", "s5"]: + assert t(23) == b"23" + assert t("ab") == b"ab" + assert t("123456789abcdef") == b"12345" + else: + raise NotImplementedError + + +class TestFReturnCharacter(TestReturnCharacter): + sources = [ + util.getpath("tests", "src", "return_character", "foo77.f"), + util.getpath("tests", "src", "return_character", "foo90.f90"), + ] + + @pytest.mark.xfail(IS_S390X, reason="callback returns ' '") + @pytest.mark.parametrize("name", ["t0", "t1", "t5", "s0", "s1", "s5", "ss"]) + def test_all_f77(self, name): + self.check_function(getattr(self.module, name), name) + + @pytest.mark.xfail(IS_S390X, reason="callback returns ' '") + @pytest.mark.parametrize("name", ["t0", "t1", "t5", "ts", "s0", "s1", "s5", "ss"]) + def test_all_f90(self, name): + self.check_function(getattr(self.module.f90_return_char, name), name) diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/test_return_complex.py b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/test_return_complex.py new file mode 100644 index 0000000..aa3f28e --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/test_return_complex.py @@ -0,0 +1,67 @@ +import pytest + +from numpy import array + +from . import util + + +@pytest.mark.slow +class TestReturnComplex(util.F2PyTest): + def check_function(self, t, tname): + if tname in ["t0", "t8", "s0", "s8"]: + err = 1e-5 + else: + err = 0.0 + assert abs(t(234j) - 234.0j) <= err + assert abs(t(234.6) - 234.6) <= err + assert abs(t(234) - 234.0) <= err + assert abs(t(234.6 + 3j) - (234.6 + 3j)) <= err + # assert abs(t('234')-234.)<=err + # assert abs(t('234.6')-234.6)<=err + assert abs(t(-234) + 234.0) <= err + assert abs(t([234]) - 234.0) <= err + assert abs(t((234, )) - 234.0) <= err + assert abs(t(array(234)) - 234.0) <= err + assert abs(t(array(23 + 4j, "F")) - (23 + 4j)) <= err + assert abs(t(array([234])) - 234.0) <= err + assert abs(t(array([[234]])) - 234.0) <= err + assert abs(t(array([234]).astype("b")) + 22.0) <= err + assert abs(t(array([234], "h")) - 234.0) <= err + assert abs(t(array([234], "i")) - 234.0) <= err + assert abs(t(array([234], "l")) - 234.0) <= err + assert abs(t(array([234], "q")) - 234.0) <= err + assert abs(t(array([234], "f")) - 234.0) <= err + assert abs(t(array([234], "d")) - 234.0) <= err + assert abs(t(array([234 + 3j], "F")) - (234 + 3j)) <= err + assert abs(t(array([234], "D")) - 234.0) <= err + + # pytest.raises(TypeError, t, array([234], 'S1')) + pytest.raises(TypeError, t, "abc") + + pytest.raises(IndexError, t, []) + pytest.raises(IndexError, t, ()) + + pytest.raises(TypeError, t, t) + pytest.raises(TypeError, t, {}) + + try: + r = t(10**400) + assert repr(r) in ["(inf+0j)", "(Infinity+0j)"] + except OverflowError: + pass + + +class TestFReturnComplex(TestReturnComplex): + sources = [ + util.getpath("tests", "src", "return_complex", "foo77.f"), + util.getpath("tests", "src", "return_complex", "foo90.f90"), + ] + + @pytest.mark.parametrize("name", ["t0", "t8", "t16", "td", "s0", "s8", "s16", "sd"]) + def test_all_f77(self, name): + self.check_function(getattr(self.module, name), name) + + @pytest.mark.parametrize("name", ["t0", "t8", "t16", "td", "s0", "s8", "s16", "sd"]) + def test_all_f90(self, name): + self.check_function(getattr(self.module.f90_return_complex, name), + name) diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/test_return_integer.py b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/test_return_integer.py new file mode 100644 index 0000000..13a9f86 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/test_return_integer.py @@ -0,0 +1,55 @@ +import pytest + +from numpy import array + +from . import util + + +@pytest.mark.slow +class TestReturnInteger(util.F2PyTest): + def check_function(self, t, tname): + assert t(123) == 123 + assert t(123.6) == 123 + assert t("123") == 123 + assert t(-123) == -123 + assert t([123]) == 123 + assert t((123, )) == 123 + assert t(array(123)) == 123 + assert t(array(123, "b")) == 123 + assert t(array(123, "h")) == 123 + assert t(array(123, "i")) == 123 + assert t(array(123, "l")) == 123 + assert t(array(123, "B")) == 123 + assert t(array(123, "f")) == 123 + assert t(array(123, "d")) == 123 + + # pytest.raises(ValueError, t, array([123],'S3')) + pytest.raises(ValueError, t, "abc") + + pytest.raises(IndexError, t, []) + pytest.raises(IndexError, t, ()) + + pytest.raises(Exception, t, t) + pytest.raises(Exception, t, {}) + + if tname in ["t8", "s8"]: + pytest.raises(OverflowError, t, 100000000000000000000000) + pytest.raises(OverflowError, t, 10000000011111111111111.23) + + +class TestFReturnInteger(TestReturnInteger): + sources = [ + util.getpath("tests", "src", "return_integer", "foo77.f"), + util.getpath("tests", "src", "return_integer", "foo90.f90"), + ] + + @pytest.mark.parametrize("name", + ["t0", "t1", "t2", "t4", "t8", "s0", "s1", "s2", "s4", "s8"]) + def test_all_f77(self, name): + self.check_function(getattr(self.module, name), name) + + @pytest.mark.parametrize("name", + ["t0", "t1", "t2", "t4", "t8", "s0", "s1", "s2", "s4", "s8"]) + def test_all_f90(self, name): + self.check_function(getattr(self.module.f90_return_integer, name), + name) diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/test_return_logical.py b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/test_return_logical.py new file mode 100644 index 0000000..a4a3395 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/test_return_logical.py @@ -0,0 +1,65 @@ +import pytest + +from numpy import array + +from . import util + + +class TestReturnLogical(util.F2PyTest): + def check_function(self, t): + assert t(True) == 1 + assert t(False) == 0 + assert t(0) == 0 + assert t(None) == 0 + assert t(0.0) == 0 + assert t(0j) == 0 + assert t(1j) == 1 + assert t(234) == 1 + assert t(234.6) == 1 + assert t(234.6 + 3j) == 1 + assert t("234") == 1 + assert t("aaa") == 1 + assert t("") == 0 + assert t([]) == 0 + assert t(()) == 0 + assert t({}) == 0 + assert t(t) == 1 + assert t(-234) == 1 + assert t(10**100) == 1 + assert t([234]) == 1 + assert t((234, )) == 1 + assert t(array(234)) == 1 + assert t(array([234])) == 1 + assert t(array([[234]])) == 1 + assert t(array([127], "b")) == 1 + assert t(array([234], "h")) == 1 + assert t(array([234], "i")) == 1 + assert t(array([234], "l")) == 1 + assert t(array([234], "f")) == 1 + assert t(array([234], "d")) == 1 + assert t(array([234 + 3j], "F")) == 1 + assert t(array([234], "D")) == 1 + assert t(array(0)) == 0 + assert t(array([0])) == 0 + assert t(array([[0]])) == 0 + assert t(array([0j])) == 0 + assert t(array([1])) == 1 + pytest.raises(ValueError, t, array([0, 0])) + + +class TestFReturnLogical(TestReturnLogical): + sources = [ + util.getpath("tests", "src", "return_logical", "foo77.f"), + util.getpath("tests", "src", "return_logical", "foo90.f90"), + ] + + @pytest.mark.slow + @pytest.mark.parametrize("name", ["t0", "t1", "t2", "t4", "s0", "s1", "s2", "s4"]) + def test_all_f77(self, name): + self.check_function(getattr(self.module, name)) + + @pytest.mark.slow + @pytest.mark.parametrize("name", + ["t0", "t1", "t2", "t4", "t8", "s0", "s1", "s2", "s4", "s8"]) + def test_all_f90(self, name): + self.check_function(getattr(self.module.f90_return_logical, name)) diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/test_return_real.py b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/test_return_real.py new file mode 100644 index 0000000..c871ed3 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/test_return_real.py @@ -0,0 +1,109 @@ +import platform + +import pytest + +from numpy import array +from numpy.testing import IS_64BIT + +from . import util + + +@pytest.mark.slow +class TestReturnReal(util.F2PyTest): + def check_function(self, t, tname): + if tname in ["t0", "t4", "s0", "s4"]: + err = 1e-5 + else: + err = 0.0 + assert abs(t(234) - 234.0) <= err + assert abs(t(234.6) - 234.6) <= err + assert abs(t("234") - 234) <= err + assert abs(t("234.6") - 234.6) <= err + assert abs(t(-234) + 234) <= err + assert abs(t([234]) - 234) <= err + assert abs(t((234, )) - 234.0) <= err + assert abs(t(array(234)) - 234.0) <= err + assert abs(t(array(234).astype("b")) + 22) <= err + assert abs(t(array(234, "h")) - 234.0) <= err + assert abs(t(array(234, "i")) - 234.0) <= err + assert abs(t(array(234, "l")) - 234.0) <= err + assert abs(t(array(234, "B")) - 234.0) <= err + assert abs(t(array(234, "f")) - 234.0) <= err + assert abs(t(array(234, "d")) - 234.0) <= err + if tname in ["t0", "t4", "s0", "s4"]: + assert t(1e200) == t(1e300) # inf + + # pytest.raises(ValueError, t, array([234], 'S1')) + pytest.raises(ValueError, t, "abc") + + pytest.raises(IndexError, t, []) + pytest.raises(IndexError, t, ()) + + pytest.raises(Exception, t, t) + pytest.raises(Exception, t, {}) + + try: + r = t(10**400) + assert repr(r) in ["inf", "Infinity"] + except OverflowError: + pass + + +@pytest.mark.skipif( + platform.system() == "Darwin", + reason="Prone to error when run with numpy/f2py/tests on mac os, " + "but not when run in isolation", +) +@pytest.mark.skipif( + not IS_64BIT, reason="32-bit builds are buggy" +) +class TestCReturnReal(TestReturnReal): + suffix = ".pyf" + module_name = "c_ext_return_real" + code = """ +python module c_ext_return_real +usercode \'\'\' +float t4(float value) { return value; } +void s4(float *t4, float value) { *t4 = value; } +double t8(double value) { return value; } +void s8(double *t8, double value) { *t8 = value; } +\'\'\' +interface + function t4(value) + real*4 intent(c) :: t4,value + end + function t8(value) + real*8 intent(c) :: t8,value + end + subroutine s4(t4,value) + intent(c) s4 + real*4 intent(out) :: t4 + real*4 intent(c) :: value + end + subroutine s8(t8,value) + intent(c) s8 + real*8 intent(out) :: t8 + real*8 intent(c) :: value + end +end interface +end python module c_ext_return_real + """ + + @pytest.mark.parametrize("name", ["t4", "t8", "s4", "s8"]) + def test_all(self, name): + self.check_function(getattr(self.module, name), name) + + +class TestFReturnReal(TestReturnReal): + sources = [ + util.getpath("tests", "src", "return_real", "foo77.f"), + util.getpath("tests", "src", "return_real", "foo90.f90"), + ] + + @pytest.mark.parametrize("name", ["t0", "t4", "t8", "td", "s0", "s4", "s8", "sd"]) + def test_all_f77(self, name): + self.check_function(getattr(self.module, name), name) + + @pytest.mark.parametrize("name", ["t0", "t4", "t8", "td", "s0", "s4", "s8", "sd"]) + def test_all_f90(self, name): + self.check_function(getattr(self.module.f90_return_real, name), name) diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/test_routines.py b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/test_routines.py new file mode 100644 index 0000000..01135dd --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/test_routines.py @@ -0,0 +1,29 @@ +import pytest + +from . import util + + +@pytest.mark.slow +class TestRenamedFunc(util.F2PyTest): + sources = [ + util.getpath("tests", "src", "routines", "funcfortranname.f"), + util.getpath("tests", "src", "routines", "funcfortranname.pyf"), + ] + module_name = "funcfortranname" + + def test_gh25799(self): + assert dir(self.module) + assert self.module.funcfortranname_default(200, 12) == 212 + + +@pytest.mark.slow +class TestRenamedSubroutine(util.F2PyTest): + sources = [ + util.getpath("tests", "src", "routines", "subrout.f"), + util.getpath("tests", "src", "routines", "subrout.pyf"), + ] + module_name = "subrout" + + def test_renamed_subroutine(self): + assert dir(self.module) + assert self.module.subrout_default(200, 12) == 212 diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/test_semicolon_split.py b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/test_semicolon_split.py new file mode 100644 index 0000000..2a16b19 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/test_semicolon_split.py @@ -0,0 +1,75 @@ +import platform + +import pytest + +from numpy.testing import IS_64BIT + +from . import util + + +@pytest.mark.skipif( + platform.system() == "Darwin", + reason="Prone to error when run with numpy/f2py/tests on mac os, " + "but not when run in isolation", +) +@pytest.mark.skipif( + not IS_64BIT, reason="32-bit builds are buggy" +) +class TestMultiline(util.F2PyTest): + suffix = ".pyf" + module_name = "multiline" + code = f""" +python module {module_name} + usercode ''' +void foo(int* x) {{ + char dummy = ';'; + *x = 42; +}} +''' + interface + subroutine foo(x) + intent(c) foo + integer intent(out) :: x + end subroutine foo + end interface +end python module {module_name} + """ + + def test_multiline(self): + assert self.module.foo() == 42 + + +@pytest.mark.skipif( + platform.system() == "Darwin", + reason="Prone to error when run with numpy/f2py/tests on mac os, " + "but not when run in isolation", +) +@pytest.mark.skipif( + not IS_64BIT, reason="32-bit builds are buggy" +) +@pytest.mark.slow +class TestCallstatement(util.F2PyTest): + suffix = ".pyf" + module_name = "callstatement" + code = f""" +python module {module_name} + usercode ''' +void foo(int* x) {{ +}} +''' + interface + subroutine foo(x) + intent(c) foo + integer intent(out) :: x + callprotoargument int* + callstatement {{ & + ; & + x = 42; & + }} + end subroutine foo + end interface +end python module {module_name} + """ + + def test_callstatement(self): + assert self.module.foo() == 42 diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/test_size.py b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/test_size.py new file mode 100644 index 0000000..ac2eaf1 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/test_size.py @@ -0,0 +1,45 @@ +import pytest + +import numpy as np + +from . import util + + +class TestSizeSumExample(util.F2PyTest): + sources = [util.getpath("tests", "src", "size", "foo.f90")] + + @pytest.mark.slow + def test_all(self): + r = self.module.foo([[]]) + assert r == [0] + + r = self.module.foo([[1, 2]]) + assert r == [3] + + r = self.module.foo([[1, 2], [3, 4]]) + assert np.allclose(r, [3, 7]) + + r = self.module.foo([[1, 2], [3, 4], [5, 6]]) + assert np.allclose(r, [3, 7, 11]) + + @pytest.mark.slow + def test_transpose(self): + r = self.module.trans([[]]) + assert np.allclose(r.T, np.array([[]])) + + r = self.module.trans([[1, 2]]) + assert np.allclose(r, [[1.], [2.]]) + + r = self.module.trans([[1, 2, 3], [4, 5, 6]]) + assert np.allclose(r, [[1, 4], [2, 5], [3, 6]]) + + @pytest.mark.slow + def test_flatten(self): + r = self.module.flatten([[]]) + assert np.allclose(r, []) + + r = self.module.flatten([[1, 2]]) + assert np.allclose(r, [1, 2]) + + r = self.module.flatten([[1, 2, 3], [4, 5, 6]]) + assert np.allclose(r, [1, 2, 3, 4, 5, 6]) diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/test_string.py b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/test_string.py new file mode 100644 index 0000000..f484ea3 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/test_string.py @@ -0,0 +1,100 @@ +import pytest + +import numpy as np + +from . import util + + +class TestString(util.F2PyTest): + sources = [util.getpath("tests", "src", "string", "char.f90")] + + @pytest.mark.slow + def test_char(self): + strings = np.array(["ab", "cd", "ef"], dtype="c").T + inp, out = self.module.char_test.change_strings( + strings, strings.shape[1]) + assert inp == pytest.approx(strings) + expected = strings.copy() + expected[1, :] = "AAA" + assert out == pytest.approx(expected) + + +class TestDocStringArguments(util.F2PyTest): + sources = [util.getpath("tests", "src", "string", "string.f")] + + def test_example(self): + a = np.array(b"123\0\0") + b = np.array(b"123\0\0") + c = np.array(b"123") + d = np.array(b"123") + + self.module.foo(a, b, c, d) + + assert a.tobytes() == b"123\0\0" + assert b.tobytes() == b"B23\0\0" + assert c.tobytes() == b"123" + assert d.tobytes() == b"D23" + + +class TestFixedString(util.F2PyTest): + sources = [util.getpath("tests", "src", "string", "fixed_string.f90")] + + @staticmethod + def _sint(s, start=0, end=None): + """Return the content of a string buffer as integer value. + + For example: + _sint('1234') -> 4321 + _sint('123A') -> 17321 + """ + if isinstance(s, np.ndarray): + s = s.tobytes() + elif isinstance(s, str): + s = s.encode() + assert isinstance(s, bytes) + if end is None: + end = len(s) + i = 0 + for j in range(start, min(end, len(s))): + i += s[j] * 10**j + return i + + def _get_input(self, intent="in"): + if intent in ["in"]: + yield "" + yield "1" + yield "1234" + yield "12345" + yield b"" + yield b"\0" + yield b"1" + yield b"\01" + yield b"1\0" + yield b"1234" + yield b"12345" + yield np.ndarray((), np.bytes_, buffer=b"") # array(b'', dtype='|S0') + yield np.array(b"") # array(b'', dtype='|S1') + yield np.array(b"\0") + yield np.array(b"1") + yield np.array(b"1\0") + yield np.array(b"\01") + yield np.array(b"1234") + yield np.array(b"123\0") + yield np.array(b"12345") + + def test_intent_in(self): + for s in self._get_input(): + r = self.module.test_in_bytes4(s) + # also checks that s is not changed inplace + expected = self._sint(s, end=4) + assert r == expected, s + + def test_intent_inout(self): + for s in self._get_input(intent="inout"): + rest = self._sint(s, start=4) + r = self.module.test_inout_bytes4(s) + expected = self._sint(s, end=4) + assert r == expected + + # check that the rest of input string is preserved + assert rest == self._sint(s, start=4) diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/test_symbolic.py b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/test_symbolic.py new file mode 100644 index 0000000..ec23f52 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/test_symbolic.py @@ -0,0 +1,495 @@ +import pytest + +from numpy.f2py.symbolic import ( + ArithOp, + Expr, + Language, + Op, + as_apply, + as_array, + as_complex, + as_deref, + as_eq, + as_expr, + as_factors, + as_ge, + as_gt, + as_le, + as_lt, + as_ne, + as_number, + as_numer_denom, + as_ref, + as_string, + as_symbol, + as_terms, + as_ternary, + eliminate_quotes, + fromstring, + insert_quotes, + normalize, +) + +from . import util + + +class TestSymbolic(util.F2PyTest): + def test_eliminate_quotes(self): + def worker(s): + r, d = eliminate_quotes(s) + s1 = insert_quotes(r, d) + assert s1 == s + + for kind in ["", "mykind_"]: + worker(kind + '"1234" // "ABCD"') + worker(kind + '"1234" // ' + kind + '"ABCD"') + worker(kind + "\"1234\" // 'ABCD'") + worker(kind + '"1234" // ' + kind + "'ABCD'") + worker(kind + '"1\\"2\'AB\'34"') + worker("a = " + kind + "'1\\'2\"AB\"34'") + + def test_sanity(self): + x = as_symbol("x") + y = as_symbol("y") + z = as_symbol("z") + + assert x.op == Op.SYMBOL + assert repr(x) == "Expr(Op.SYMBOL, 'x')" + assert x == x + assert x != y + assert hash(x) is not None + + n = as_number(123) + m = as_number(456) + assert n.op == Op.INTEGER + assert repr(n) == "Expr(Op.INTEGER, (123, 4))" + assert n == n + assert n != m + assert hash(n) is not None + + fn = as_number(12.3) + fm = as_number(45.6) + assert fn.op == Op.REAL + assert repr(fn) == "Expr(Op.REAL, (12.3, 4))" + assert fn == fn + assert fn != fm + assert hash(fn) is not None + + c = as_complex(1, 2) + c2 = as_complex(3, 4) + assert c.op == Op.COMPLEX + assert repr(c) == ("Expr(Op.COMPLEX, (Expr(Op.INTEGER, (1, 4))," + " Expr(Op.INTEGER, (2, 4))))") + assert c == c + assert c != c2 + assert hash(c) is not None + + s = as_string("'123'") + s2 = as_string('"ABC"') + assert s.op == Op.STRING + assert repr(s) == "Expr(Op.STRING, (\"'123'\", 1))", repr(s) + assert s == s + assert s != s2 + + a = as_array((n, m)) + b = as_array((n, )) + assert a.op == Op.ARRAY + assert repr(a) == ("Expr(Op.ARRAY, (Expr(Op.INTEGER, (123, 4))," + " Expr(Op.INTEGER, (456, 4))))") + assert a == a + assert a != b + + t = as_terms(x) + u = as_terms(y) + assert t.op == Op.TERMS + assert repr(t) == "Expr(Op.TERMS, {Expr(Op.SYMBOL, 'x'): 1})" + assert t == t + assert t != u + assert hash(t) is not None + + v = as_factors(x) + w = as_factors(y) + assert v.op == Op.FACTORS + assert repr(v) == "Expr(Op.FACTORS, {Expr(Op.SYMBOL, 'x'): 1})" + assert v == v + assert w != v + assert hash(v) is not None + + t = as_ternary(x, y, z) + u = as_ternary(x, z, y) + assert t.op == Op.TERNARY + assert t == t + assert t != u + assert hash(t) is not None + + e = as_eq(x, y) + f = as_lt(x, y) + assert e.op == Op.RELATIONAL + assert e == e + assert e != f + assert hash(e) is not None + + def test_tostring_fortran(self): + x = as_symbol("x") + y = as_symbol("y") + z = as_symbol("z") + n = as_number(123) + m = as_number(456) + a = as_array((n, m)) + c = as_complex(n, m) + + assert str(x) == "x" + assert str(n) == "123" + assert str(a) == "[123, 456]" + assert str(c) == "(123, 456)" + + assert str(Expr(Op.TERMS, {x: 1})) == "x" + assert str(Expr(Op.TERMS, {x: 2})) == "2 * x" + assert str(Expr(Op.TERMS, {x: -1})) == "-x" + assert str(Expr(Op.TERMS, {x: -2})) == "-2 * x" + assert str(Expr(Op.TERMS, {x: 1, y: 1})) == "x + y" + assert str(Expr(Op.TERMS, {x: -1, y: -1})) == "-x - y" + assert str(Expr(Op.TERMS, {x: 2, y: 3})) == "2 * x + 3 * y" + assert str(Expr(Op.TERMS, {x: -2, y: 3})) == "-2 * x + 3 * y" + assert str(Expr(Op.TERMS, {x: 2, y: -3})) == "2 * x - 3 * y" + + assert str(Expr(Op.FACTORS, {x: 1})) == "x" + assert str(Expr(Op.FACTORS, {x: 2})) == "x ** 2" + assert str(Expr(Op.FACTORS, {x: -1})) == "x ** -1" + assert str(Expr(Op.FACTORS, {x: -2})) == "x ** -2" + assert str(Expr(Op.FACTORS, {x: 1, y: 1})) == "x * y" + assert str(Expr(Op.FACTORS, {x: 2, y: 3})) == "x ** 2 * y ** 3" + + v = Expr(Op.FACTORS, {x: 2, Expr(Op.TERMS, {x: 1, y: 1}): 3}) + assert str(v) == "x ** 2 * (x + y) ** 3", str(v) + v = Expr(Op.FACTORS, {x: 2, Expr(Op.FACTORS, {x: 1, y: 1}): 3}) + assert str(v) == "x ** 2 * (x * y) ** 3", str(v) + + assert str(Expr(Op.APPLY, ("f", (), {}))) == "f()" + assert str(Expr(Op.APPLY, ("f", (x, ), {}))) == "f(x)" + assert str(Expr(Op.APPLY, ("f", (x, y), {}))) == "f(x, y)" + assert str(Expr(Op.INDEXING, ("f", x))) == "f[x]" + + assert str(as_ternary(x, y, z)) == "merge(y, z, x)" + assert str(as_eq(x, y)) == "x .eq. y" + assert str(as_ne(x, y)) == "x .ne. y" + assert str(as_lt(x, y)) == "x .lt. y" + assert str(as_le(x, y)) == "x .le. y" + assert str(as_gt(x, y)) == "x .gt. y" + assert str(as_ge(x, y)) == "x .ge. y" + + def test_tostring_c(self): + language = Language.C + x = as_symbol("x") + y = as_symbol("y") + z = as_symbol("z") + n = as_number(123) + + assert Expr(Op.FACTORS, {x: 2}).tostring(language=language) == "x * x" + assert (Expr(Op.FACTORS, { + x + y: 2 + }).tostring(language=language) == "(x + y) * (x + y)") + assert Expr(Op.FACTORS, { + x: 12 + }).tostring(language=language) == "pow(x, 12)" + + assert as_apply(ArithOp.DIV, x, + y).tostring(language=language) == "x / y" + assert (as_apply(ArithOp.DIV, x, + x + y).tostring(language=language) == "x / (x + y)") + assert (as_apply(ArithOp.DIV, x - y, x + + y).tostring(language=language) == "(x - y) / (x + y)") + assert (x + (x - y) / (x + y) + + n).tostring(language=language) == "123 + x + (x - y) / (x + y)" + + assert as_ternary(x, y, z).tostring(language=language) == "(x?y:z)" + assert as_eq(x, y).tostring(language=language) == "x == y" + assert as_ne(x, y).tostring(language=language) == "x != y" + assert as_lt(x, y).tostring(language=language) == "x < y" + assert as_le(x, y).tostring(language=language) == "x <= y" + assert as_gt(x, y).tostring(language=language) == "x > y" + assert as_ge(x, y).tostring(language=language) == "x >= y" + + def test_operations(self): + x = as_symbol("x") + y = as_symbol("y") + z = as_symbol("z") + + assert x + x == Expr(Op.TERMS, {x: 2}) + assert x - x == Expr(Op.INTEGER, (0, 4)) + assert x + y == Expr(Op.TERMS, {x: 1, y: 1}) + assert x - y == Expr(Op.TERMS, {x: 1, y: -1}) + assert x * x == Expr(Op.FACTORS, {x: 2}) + assert x * y == Expr(Op.FACTORS, {x: 1, y: 1}) + + assert +x == x + assert -x == Expr(Op.TERMS, {x: -1}), repr(-x) + assert 2 * x == Expr(Op.TERMS, {x: 2}) + assert 2 + x == Expr(Op.TERMS, {x: 1, as_number(1): 2}) + assert 2 * x + 3 * y == Expr(Op.TERMS, {x: 2, y: 3}) + assert (x + y) * 2 == Expr(Op.TERMS, {x: 2, y: 2}) + + assert x**2 == Expr(Op.FACTORS, {x: 2}) + assert (x + y)**2 == Expr( + Op.TERMS, + { + Expr(Op.FACTORS, {x: 2}): 1, + Expr(Op.FACTORS, {y: 2}): 1, + Expr(Op.FACTORS, { + x: 1, + y: 1 + }): 2, + }, + ) + assert (x + y) * x == x**2 + x * y + assert (x + y)**2 == x**2 + 2 * x * y + y**2 + assert (x + y)**2 + (x - y)**2 == 2 * x**2 + 2 * y**2 + assert (x + y) * z == x * z + y * z + assert z * (x + y) == x * z + y * z + + assert (x / 2) == as_apply(ArithOp.DIV, x, as_number(2)) + assert (2 * x / 2) == x + assert (3 * x / 2) == as_apply(ArithOp.DIV, 3 * x, as_number(2)) + assert (4 * x / 2) == 2 * x + assert (5 * x / 2) == as_apply(ArithOp.DIV, 5 * x, as_number(2)) + assert (6 * x / 2) == 3 * x + assert ((3 * 5) * x / 6) == as_apply(ArithOp.DIV, 5 * x, as_number(2)) + assert (30 * x**2 * y**4 / (24 * x**3 * y**3)) == as_apply( + ArithOp.DIV, 5 * y, 4 * x) + assert ((15 * x / 6) / 5) == as_apply(ArithOp.DIV, x, + as_number(2)), (15 * x / 6) / 5 + assert (x / (5 / x)) == as_apply(ArithOp.DIV, x**2, as_number(5)) + + assert (x / 2.0) == Expr(Op.TERMS, {x: 0.5}) + + s = as_string('"ABC"') + t = as_string('"123"') + + assert s // t == Expr(Op.STRING, ('"ABC123"', 1)) + assert s // x == Expr(Op.CONCAT, (s, x)) + assert x // s == Expr(Op.CONCAT, (x, s)) + + c = as_complex(1.0, 2.0) + assert -c == as_complex(-1.0, -2.0) + assert c + c == as_expr((1 + 2j) * 2) + assert c * c == as_expr((1 + 2j)**2) + + def test_substitute(self): + x = as_symbol("x") + y = as_symbol("y") + z = as_symbol("z") + a = as_array((x, y)) + + assert x.substitute({x: y}) == y + assert (x + y).substitute({x: z}) == y + z + assert (x * y).substitute({x: z}) == y * z + assert (x**4).substitute({x: z}) == z**4 + assert (x / y).substitute({x: z}) == z / y + assert x.substitute({x: y + z}) == y + z + assert a.substitute({x: y + z}) == as_array((y + z, y)) + + assert as_ternary(x, y, + z).substitute({x: y + z}) == as_ternary(y + z, y, z) + assert as_eq(x, y).substitute({x: y + z}) == as_eq(y + z, y) + + def test_fromstring(self): + + x = as_symbol("x") + y = as_symbol("y") + z = as_symbol("z") + f = as_symbol("f") + s = as_string('"ABC"') + t = as_string('"123"') + a = as_array((x, y)) + + assert fromstring("x") == x + assert fromstring("+ x") == x + assert fromstring("- x") == -x + assert fromstring("x + y") == x + y + assert fromstring("x + 1") == x + 1 + assert fromstring("x * y") == x * y + assert fromstring("x * 2") == x * 2 + assert fromstring("x / y") == x / y + assert fromstring("x ** 2", language=Language.Python) == x**2 + assert fromstring("x ** 2 ** 3", language=Language.Python) == x**2**3 + assert fromstring("(x + y) * z") == (x + y) * z + + assert fromstring("f(x)") == f(x) + assert fromstring("f(x,y)") == f(x, y) + assert fromstring("f[x]") == f[x] + assert fromstring("f[x][y]") == f[x][y] + + assert fromstring('"ABC"') == s + assert (normalize( + fromstring('"ABC" // "123" ', + language=Language.Fortran)) == s // t) + assert fromstring('f("ABC")') == f(s) + assert fromstring('MYSTRKIND_"ABC"') == as_string('"ABC"', "MYSTRKIND") + + assert fromstring("(/x, y/)") == a, fromstring("(/x, y/)") + assert fromstring("f((/x, y/))") == f(a) + assert fromstring("(/(x+y)*z/)") == as_array(((x + y) * z, )) + + assert fromstring("123") == as_number(123) + assert fromstring("123_2") == as_number(123, 2) + assert fromstring("123_myintkind") == as_number(123, "myintkind") + + assert fromstring("123.0") == as_number(123.0, 4) + assert fromstring("123.0_4") == as_number(123.0, 4) + assert fromstring("123.0_8") == as_number(123.0, 8) + assert fromstring("123.0e0") == as_number(123.0, 4) + assert fromstring("123.0d0") == as_number(123.0, 8) + assert fromstring("123d0") == as_number(123.0, 8) + assert fromstring("123e-0") == as_number(123.0, 4) + assert fromstring("123d+0") == as_number(123.0, 8) + assert fromstring("123.0_myrealkind") == as_number(123.0, "myrealkind") + assert fromstring("3E4") == as_number(30000.0, 4) + + assert fromstring("(1, 2)") == as_complex(1, 2) + assert fromstring("(1e2, PI)") == as_complex(as_number(100.0), + as_symbol("PI")) + + assert fromstring("[1, 2]") == as_array((as_number(1), as_number(2))) + + assert fromstring("POINT(x, y=1)") == as_apply(as_symbol("POINT"), + x, + y=as_number(1)) + assert fromstring( + 'PERSON(name="John", age=50, shape=(/34, 23/))') == as_apply( + as_symbol("PERSON"), + name=as_string('"John"'), + age=as_number(50), + shape=as_array((as_number(34), as_number(23))), + ) + + assert fromstring("x?y:z") == as_ternary(x, y, z) + + assert fromstring("*x") == as_deref(x) + assert fromstring("**x") == as_deref(as_deref(x)) + assert fromstring("&x") == as_ref(x) + assert fromstring("(*x) * (*y)") == as_deref(x) * as_deref(y) + assert fromstring("(*x) * *y") == as_deref(x) * as_deref(y) + assert fromstring("*x * *y") == as_deref(x) * as_deref(y) + assert fromstring("*x**y") == as_deref(x) * as_deref(y) + + assert fromstring("x == y") == as_eq(x, y) + assert fromstring("x != y") == as_ne(x, y) + assert fromstring("x < y") == as_lt(x, y) + assert fromstring("x > y") == as_gt(x, y) + assert fromstring("x <= y") == as_le(x, y) + assert fromstring("x >= y") == as_ge(x, y) + + assert fromstring("x .eq. y", language=Language.Fortran) == as_eq(x, y) + assert fromstring("x .ne. y", language=Language.Fortran) == as_ne(x, y) + assert fromstring("x .lt. y", language=Language.Fortran) == as_lt(x, y) + assert fromstring("x .gt. y", language=Language.Fortran) == as_gt(x, y) + assert fromstring("x .le. y", language=Language.Fortran) == as_le(x, y) + assert fromstring("x .ge. y", language=Language.Fortran) == as_ge(x, y) + + def test_traverse(self): + x = as_symbol("x") + y = as_symbol("y") + z = as_symbol("z") + f = as_symbol("f") + + # Use traverse to substitute a symbol + def replace_visit(s, r=z): + if s == x: + return r + + assert x.traverse(replace_visit) == z + assert y.traverse(replace_visit) == y + assert z.traverse(replace_visit) == z + assert (f(y)).traverse(replace_visit) == f(y) + assert (f(x)).traverse(replace_visit) == f(z) + assert (f[y]).traverse(replace_visit) == f[y] + assert (f[z]).traverse(replace_visit) == f[z] + assert (x + y + z).traverse(replace_visit) == (2 * z + y) + assert (x + + f(y, x - z)).traverse(replace_visit) == (z + + f(y, as_number(0))) + assert as_eq(x, y).traverse(replace_visit) == as_eq(z, y) + + # Use traverse to collect symbols, method 1 + function_symbols = set() + symbols = set() + + def collect_symbols(s): + if s.op is Op.APPLY: + oper = s.data[0] + function_symbols.add(oper) + if oper in symbols: + symbols.remove(oper) + elif s.op is Op.SYMBOL and s not in function_symbols: + symbols.add(s) + + (x + f(y, x - z)).traverse(collect_symbols) + assert function_symbols == {f} + assert symbols == {x, y, z} + + # Use traverse to collect symbols, method 2 + def collect_symbols2(expr, symbols): + if expr.op is Op.SYMBOL: + symbols.add(expr) + + symbols = set() + (x + f(y, x - z)).traverse(collect_symbols2, symbols) + assert symbols == {x, y, z, f} + + # Use traverse to partially collect symbols + def collect_symbols3(expr, symbols): + if expr.op is Op.APPLY: + # skip traversing function calls + return expr + if expr.op is Op.SYMBOL: + symbols.add(expr) + + symbols = set() + (x + f(y, x - z)).traverse(collect_symbols3, symbols) + assert symbols == {x} + + def test_linear_solve(self): + x = as_symbol("x") + y = as_symbol("y") + z = as_symbol("z") + + assert x.linear_solve(x) == (as_number(1), as_number(0)) + assert (x + 1).linear_solve(x) == (as_number(1), as_number(1)) + assert (2 * x).linear_solve(x) == (as_number(2), as_number(0)) + assert (2 * x + 3).linear_solve(x) == (as_number(2), as_number(3)) + assert as_number(3).linear_solve(x) == (as_number(0), as_number(3)) + assert y.linear_solve(x) == (as_number(0), y) + assert (y * z).linear_solve(x) == (as_number(0), y * z) + + assert (x + y).linear_solve(x) == (as_number(1), y) + assert (z * x + y).linear_solve(x) == (z, y) + assert ((z + y) * x + y).linear_solve(x) == (z + y, y) + assert (z * y * x + y).linear_solve(x) == (z * y, y) + + pytest.raises(RuntimeError, lambda: (x * x).linear_solve(x)) + + def test_as_numer_denom(self): + x = as_symbol("x") + y = as_symbol("y") + n = as_number(123) + + assert as_numer_denom(x) == (x, as_number(1)) + assert as_numer_denom(x / n) == (x, n) + assert as_numer_denom(n / x) == (n, x) + assert as_numer_denom(x / y) == (x, y) + assert as_numer_denom(x * y) == (x * y, as_number(1)) + assert as_numer_denom(n + x / y) == (x + n * y, y) + assert as_numer_denom(n + x / (y - x / n)) == (y * n**2, y * n - x) + + def test_polynomial_atoms(self): + x = as_symbol("x") + y = as_symbol("y") + n = as_number(123) + + assert x.polynomial_atoms() == {x} + assert n.polynomial_atoms() == set() + assert (y[x]).polynomial_atoms() == {y[x]} + assert (y(x)).polynomial_atoms() == {y(x)} + assert (y(x) + x).polynomial_atoms() == {y(x), x} + assert (y(x) * x[y]).polynomial_atoms() == {y(x), x[y]} + assert (y(x)**x).polynomial_atoms() == {y(x)} diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/test_value_attrspec.py b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/test_value_attrspec.py new file mode 100644 index 0000000..1afae08 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/test_value_attrspec.py @@ -0,0 +1,15 @@ +import pytest + +from . import util + + +class TestValueAttr(util.F2PyTest): + sources = [util.getpath("tests", "src", "value_attrspec", "gh21665.f90")] + + # gh-21665 + @pytest.mark.slow + def test_gh21665(self): + inp = 2 + out = self.module.fortfuncs.square(inp) + exp_out = 4 + assert out == exp_out diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/tests/util.py b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/util.py new file mode 100644 index 0000000..35e5d3b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/tests/util.py @@ -0,0 +1,442 @@ +""" +Utility functions for + +- building and importing modules on test time, using a temporary location +- detecting if compilers are present +- determining paths to tests + +""" +import atexit +import concurrent.futures +import contextlib +import glob +import os +import shutil +import subprocess +import sys +import tempfile +from importlib import import_module +from pathlib import Path + +import pytest + +import numpy +from numpy._utils import asunicode +from numpy.f2py._backends._meson import MesonBackend +from numpy.testing import IS_WASM, temppath + +# +# Check if compilers are available at all... +# + +def check_language(lang, code_snippet=None): + if sys.platform == "win32": + pytest.skip("No Fortran tests on Windows (Issue #25134)", allow_module_level=True) + tmpdir = tempfile.mkdtemp() + try: + meson_file = os.path.join(tmpdir, "meson.build") + with open(meson_file, "w") as f: + f.write("project('check_compilers')\n") + f.write(f"add_languages('{lang}')\n") + if code_snippet: + f.write(f"{lang}_compiler = meson.get_compiler('{lang}')\n") + f.write(f"{lang}_code = '''{code_snippet}'''\n") + f.write( + f"_have_{lang}_feature =" + f"{lang}_compiler.compiles({lang}_code," + f" name: '{lang} feature check')\n" + ) + try: + runmeson = subprocess.run( + ["meson", "setup", "btmp"], + check=False, + cwd=tmpdir, + capture_output=True, + ) + except subprocess.CalledProcessError: + pytest.skip("meson not present, skipping compiler dependent test", allow_module_level=True) + return runmeson.returncode == 0 + finally: + shutil.rmtree(tmpdir) + + +fortran77_code = ''' +C Example Fortran 77 code + PROGRAM HELLO + PRINT *, 'Hello, Fortran 77!' + END +''' + +fortran90_code = ''' +! Example Fortran 90 code +program hello90 + type :: greeting + character(len=20) :: text + end type greeting + + type(greeting) :: greet + greet%text = 'hello, fortran 90!' + print *, greet%text +end program hello90 +''' + +# Dummy class for caching relevant checks +class CompilerChecker: + def __init__(self): + self.compilers_checked = False + self.has_c = False + self.has_f77 = False + self.has_f90 = False + + def check_compilers(self): + if (not self.compilers_checked) and (not sys.platform == "cygwin"): + with concurrent.futures.ThreadPoolExecutor() as executor: + futures = [ + executor.submit(check_language, "c"), + executor.submit(check_language, "fortran", fortran77_code), + executor.submit(check_language, "fortran", fortran90_code) + ] + + self.has_c = futures[0].result() + self.has_f77 = futures[1].result() + self.has_f90 = futures[2].result() + + self.compilers_checked = True + + +if not IS_WASM: + checker = CompilerChecker() + checker.check_compilers() + +def has_c_compiler(): + return checker.has_c + +def has_f77_compiler(): + return checker.has_f77 + +def has_f90_compiler(): + return checker.has_f90 + +def has_fortran_compiler(): + return (checker.has_f90 and checker.has_f77) + + +# +# Maintaining a temporary module directory +# + +_module_dir = None +_module_num = 5403 + +if sys.platform == "cygwin": + NUMPY_INSTALL_ROOT = Path(__file__).parent.parent.parent + _module_list = list(NUMPY_INSTALL_ROOT.glob("**/*.dll")) + + +def _cleanup(): + global _module_dir + if _module_dir is not None: + try: + sys.path.remove(_module_dir) + except ValueError: + pass + try: + shutil.rmtree(_module_dir) + except OSError: + pass + _module_dir = None + + +def get_module_dir(): + global _module_dir + if _module_dir is None: + _module_dir = tempfile.mkdtemp() + atexit.register(_cleanup) + if _module_dir not in sys.path: + sys.path.insert(0, _module_dir) + return _module_dir + + +def get_temp_module_name(): + # Assume single-threaded, and the module dir usable only by this thread + global _module_num + get_module_dir() + name = "_test_ext_module_%d" % _module_num + _module_num += 1 + if name in sys.modules: + # this should not be possible, but check anyway + raise RuntimeError("Temporary module name already in use.") + return name + + +def _memoize(func): + memo = {} + + def wrapper(*a, **kw): + key = repr((a, kw)) + if key not in memo: + try: + memo[key] = func(*a, **kw) + except Exception as e: + memo[key] = e + raise + ret = memo[key] + if isinstance(ret, Exception): + raise ret + return ret + + wrapper.__name__ = func.__name__ + return wrapper + + +# +# Building modules +# + + +@_memoize +def build_module(source_files, options=[], skip=[], only=[], module_name=None): + """ + Compile and import a f2py module, built from the given files. + + """ + + code = f"import sys; sys.path = {sys.path!r}; import numpy.f2py; numpy.f2py.main()" + + d = get_module_dir() + # gh-27045 : Skip if no compilers are found + if not has_fortran_compiler(): + pytest.skip("No Fortran compiler available") + + # Copy files + dst_sources = [] + f2py_sources = [] + for fn in source_files: + if not os.path.isfile(fn): + raise RuntimeError(f"{fn} is not a file") + dst = os.path.join(d, os.path.basename(fn)) + shutil.copyfile(fn, dst) + dst_sources.append(dst) + + base, ext = os.path.splitext(dst) + if ext in (".f90", ".f95", ".f", ".c", ".pyf"): + f2py_sources.append(dst) + + assert f2py_sources + + # Prepare options + if module_name is None: + module_name = get_temp_module_name() + gil_options = [] + if '--freethreading-compatible' not in options and '--no-freethreading-compatible' not in options: + # default to disabling the GIL if unset in options + gil_options = ['--freethreading-compatible'] + f2py_opts = ["-c", "-m", module_name] + options + gil_options + f2py_sources + f2py_opts += ["--backend", "meson"] + if skip: + f2py_opts += ["skip:"] + skip + if only: + f2py_opts += ["only:"] + only + + # Build + cwd = os.getcwd() + try: + os.chdir(d) + cmd = [sys.executable, "-c", code] + f2py_opts + p = subprocess.Popen(cmd, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT) + out, err = p.communicate() + if p.returncode != 0: + raise RuntimeError(f"Running f2py failed: {cmd[4:]}\n{asunicode(out)}") + finally: + os.chdir(cwd) + + # Partial cleanup + for fn in dst_sources: + os.unlink(fn) + + # Rebase (Cygwin-only) + if sys.platform == "cygwin": + # If someone starts deleting modules after import, this will + # need to change to record how big each module is, rather than + # relying on rebase being able to find that from the files. + _module_list.extend( + glob.glob(os.path.join(d, f"{module_name:s}*")) + ) + subprocess.check_call( + ["/usr/bin/rebase", "--database", "--oblivious", "--verbose"] + + _module_list + ) + + # Import + return import_module(module_name) + + +@_memoize +def build_code(source_code, + options=[], + skip=[], + only=[], + suffix=None, + module_name=None): + """ + Compile and import Fortran code using f2py. + + """ + if suffix is None: + suffix = ".f" + with temppath(suffix=suffix) as path: + with open(path, "w") as f: + f.write(source_code) + return build_module([path], + options=options, + skip=skip, + only=only, + module_name=module_name) + + +# +# Building with meson +# + + +class SimplifiedMesonBackend(MesonBackend): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + def compile(self): + self.write_meson_build(self.build_dir) + self.run_meson(self.build_dir) + + +def build_meson(source_files, module_name=None, **kwargs): + """ + Build a module via Meson and import it. + """ + + # gh-27045 : Skip if no compilers are found + if not has_fortran_compiler(): + pytest.skip("No Fortran compiler available") + + build_dir = get_module_dir() + if module_name is None: + module_name = get_temp_module_name() + + # Initialize the MesonBackend + backend = SimplifiedMesonBackend( + modulename=module_name, + sources=source_files, + extra_objects=kwargs.get("extra_objects", []), + build_dir=build_dir, + include_dirs=kwargs.get("include_dirs", []), + library_dirs=kwargs.get("library_dirs", []), + libraries=kwargs.get("libraries", []), + define_macros=kwargs.get("define_macros", []), + undef_macros=kwargs.get("undef_macros", []), + f2py_flags=kwargs.get("f2py_flags", []), + sysinfo_flags=kwargs.get("sysinfo_flags", []), + fc_flags=kwargs.get("fc_flags", []), + flib_flags=kwargs.get("flib_flags", []), + setup_flags=kwargs.get("setup_flags", []), + remove_build_dir=kwargs.get("remove_build_dir", False), + extra_dat=kwargs.get("extra_dat", {}), + ) + + backend.compile() + + # Import the compiled module + sys.path.insert(0, f"{build_dir}/{backend.meson_build_dir}") + return import_module(module_name) + + +# +# Unittest convenience +# + + +class F2PyTest: + code = None + sources = None + options = [] + skip = [] + only = [] + suffix = ".f" + module = None + _has_c_compiler = None + _has_f77_compiler = None + _has_f90_compiler = None + + @property + def module_name(self): + cls = type(self) + return f'_{cls.__module__.rsplit(".", 1)[-1]}_{cls.__name__}_ext_module' + + @classmethod + def setup_class(cls): + if sys.platform == "win32": + pytest.skip("Fails with MinGW64 Gfortran (Issue #9673)") + F2PyTest._has_c_compiler = has_c_compiler() + F2PyTest._has_f77_compiler = has_f77_compiler() + F2PyTest._has_f90_compiler = has_f90_compiler() + F2PyTest._has_fortran_compiler = has_fortran_compiler() + + def setup_method(self): + if self.module is not None: + return + + codes = self.sources or [] + if self.code: + codes.append(self.suffix) + + needs_f77 = any(str(fn).endswith(".f") for fn in codes) + needs_f90 = any(str(fn).endswith(".f90") for fn in codes) + needs_pyf = any(str(fn).endswith(".pyf") for fn in codes) + + if needs_f77 and not self._has_f77_compiler: + pytest.skip("No Fortran 77 compiler available") + if needs_f90 and not self._has_f90_compiler: + pytest.skip("No Fortran 90 compiler available") + if needs_pyf and not self._has_fortran_compiler: + pytest.skip("No Fortran compiler available") + + # Build the module + if self.code is not None: + self.module = build_code( + self.code, + options=self.options, + skip=self.skip, + only=self.only, + suffix=self.suffix, + module_name=self.module_name, + ) + + if self.sources is not None: + self.module = build_module( + self.sources, + options=self.options, + skip=self.skip, + only=self.only, + module_name=self.module_name, + ) + + +# +# Helper functions +# + + +def getpath(*a): + # Package root + d = Path(numpy.f2py.__file__).parent.resolve() + return d.joinpath(*a) + + +@contextlib.contextmanager +def switchdir(path): + curpath = Path.cwd() + os.chdir(path) + try: + yield + finally: + os.chdir(curpath) diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/use_rules.py b/.venv/lib/python3.12/site-packages/numpy/f2py/use_rules.py new file mode 100644 index 0000000..1e06f6c --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/use_rules.py @@ -0,0 +1,99 @@ +""" +Build 'use others module data' mechanism for f2py2e. + +Copyright 1999 -- 2011 Pearu Peterson all rights reserved. +Copyright 2011 -- present NumPy Developers. +Permission to use, modify, and distribute this software is given under the +terms of the NumPy License. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +""" +__version__ = "$Revision: 1.3 $"[10:-1] + +f2py_version = 'See `f2py -v`' + + +from .auxfuncs import applyrules, dictappend, gentitle, hasnote, outmess + +usemodule_rules = { + 'body': """ +#begintitle# +static char doc_#apiname#[] = \"\\\nVariable wrapper signature:\\n\\ +\t #name# = get_#name#()\\n\\ +Arguments:\\n\\ +#docstr#\"; +extern F_MODFUNC(#usemodulename#,#USEMODULENAME#,#realname#,#REALNAME#); +static PyObject *#apiname#(PyObject *capi_self, PyObject *capi_args) { +/*#decl#*/ +\tif (!PyArg_ParseTuple(capi_args, \"\")) goto capi_fail; +printf(\"c: %d\\n\",F_MODFUNC(#usemodulename#,#USEMODULENAME#,#realname#,#REALNAME#)); +\treturn Py_BuildValue(\"\"); +capi_fail: +\treturn NULL; +} +""", + 'method': '\t{\"get_#name#\",#apiname#,METH_VARARGS|METH_KEYWORDS,doc_#apiname#},', + 'need': ['F_MODFUNC'] +} + +################ + + +def buildusevars(m, r): + ret = {} + outmess( + f"\t\tBuilding use variable hooks for module \"{m['name']}\" (feature only for F90/F95)...\n") + varsmap = {} + revmap = {} + if 'map' in r: + for k in r['map'].keys(): + if r['map'][k] in revmap: + outmess('\t\t\tVariable "%s<=%s" is already mapped by "%s". Skipping.\n' % ( + r['map'][k], k, revmap[r['map'][k]])) + else: + revmap[r['map'][k]] = k + if r.get('only'): + for v in r['map'].keys(): + if r['map'][v] in m['vars']: + + if revmap[r['map'][v]] == v: + varsmap[v] = r['map'][v] + else: + outmess(f"\t\t\tIgnoring map \"{v}=>{r['map'][v]}\". See above.\n") + else: + outmess( + f"\t\t\tNo definition for variable \"{v}=>{r['map'][v]}\". Skipping.\n") + else: + for v in m['vars'].keys(): + varsmap[v] = revmap.get(v, v) + for v in varsmap.keys(): + ret = dictappend(ret, buildusevar(v, varsmap[v], m['vars'], m['name'])) + return ret + + +def buildusevar(name, realname, vars, usemodulename): + outmess('\t\t\tConstructing wrapper function for variable "%s=>%s"...\n' % ( + name, realname)) + ret = {} + vrd = {'name': name, + 'realname': realname, + 'REALNAME': realname.upper(), + 'usemodulename': usemodulename, + 'USEMODULENAME': usemodulename.upper(), + 'texname': name.replace('_', '\\_'), + 'begintitle': gentitle(f'{name}=>{realname}'), + 'endtitle': gentitle(f'end of {name}=>{realname}'), + 'apiname': f'#modulename#_use_{realname}_from_{usemodulename}' + } + nummap = {0: 'Ro', 1: 'Ri', 2: 'Rii', 3: 'Riii', 4: 'Riv', + 5: 'Rv', 6: 'Rvi', 7: 'Rvii', 8: 'Rviii', 9: 'Rix'} + vrd['texnamename'] = name + for i in nummap.keys(): + vrd['texnamename'] = vrd['texnamename'].replace(repr(i), nummap[i]) + if hasnote(vars[realname]): + vrd['note'] = vars[realname]['note'] + rd = dictappend({}, vrd) + + print(name, realname, vars[realname]) + ret = applyrules(usemodule_rules, rd) + return ret diff --git a/.venv/lib/python3.12/site-packages/numpy/f2py/use_rules.pyi b/.venv/lib/python3.12/site-packages/numpy/f2py/use_rules.pyi new file mode 100644 index 0000000..58c7f9b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/f2py/use_rules.pyi @@ -0,0 +1,9 @@ +from collections.abc import Mapping +from typing import Any, Final + +__version__: Final[str] = ... +f2py_version: Final = "See `f2py -v`" +usemodule_rules: Final[dict[str, str | list[str]]] = ... + +def buildusevars(m: Mapping[str, object], r: Mapping[str, Mapping[str, object]]) -> dict[str, Any]: ... +def buildusevar(name: str, realname: str, vars: Mapping[str, Mapping[str, object]], usemodulename: str) -> dict[str, Any]: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/fft/__init__.py b/.venv/lib/python3.12/site-packages/numpy/fft/__init__.py new file mode 100644 index 0000000..55f7320 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/fft/__init__.py @@ -0,0 +1,215 @@ +""" +Discrete Fourier Transform +========================== + +.. currentmodule:: numpy.fft + +The SciPy module `scipy.fft` is a more comprehensive superset +of `numpy.fft`, which includes only a basic set of routines. + +Standard FFTs +------------- + +.. autosummary:: + :toctree: generated/ + + fft Discrete Fourier transform. + ifft Inverse discrete Fourier transform. + fft2 Discrete Fourier transform in two dimensions. + ifft2 Inverse discrete Fourier transform in two dimensions. + fftn Discrete Fourier transform in N-dimensions. + ifftn Inverse discrete Fourier transform in N dimensions. + +Real FFTs +--------- + +.. autosummary:: + :toctree: generated/ + + rfft Real discrete Fourier transform. + irfft Inverse real discrete Fourier transform. + rfft2 Real discrete Fourier transform in two dimensions. + irfft2 Inverse real discrete Fourier transform in two dimensions. + rfftn Real discrete Fourier transform in N dimensions. + irfftn Inverse real discrete Fourier transform in N dimensions. + +Hermitian FFTs +-------------- + +.. autosummary:: + :toctree: generated/ + + hfft Hermitian discrete Fourier transform. + ihfft Inverse Hermitian discrete Fourier transform. + +Helper routines +--------------- + +.. autosummary:: + :toctree: generated/ + + fftfreq Discrete Fourier Transform sample frequencies. + rfftfreq DFT sample frequencies (for usage with rfft, irfft). + fftshift Shift zero-frequency component to center of spectrum. + ifftshift Inverse of fftshift. + + +Background information +---------------------- + +Fourier analysis is fundamentally a method for expressing a function as a +sum of periodic components, and for recovering the function from those +components. When both the function and its Fourier transform are +replaced with discretized counterparts, it is called the discrete Fourier +transform (DFT). The DFT has become a mainstay of numerical computing in +part because of a very fast algorithm for computing it, called the Fast +Fourier Transform (FFT), which was known to Gauss (1805) and was brought +to light in its current form by Cooley and Tukey [CT]_. Press et al. [NR]_ +provide an accessible introduction to Fourier analysis and its +applications. + +Because the discrete Fourier transform separates its input into +components that contribute at discrete frequencies, it has a great number +of applications in digital signal processing, e.g., for filtering, and in +this context the discretized input to the transform is customarily +referred to as a *signal*, which exists in the *time domain*. The output +is called a *spectrum* or *transform* and exists in the *frequency +domain*. + +Implementation details +---------------------- + +There are many ways to define the DFT, varying in the sign of the +exponent, normalization, etc. In this implementation, the DFT is defined +as + +.. math:: + A_k = \\sum_{m=0}^{n-1} a_m \\exp\\left\\{-2\\pi i{mk \\over n}\\right\\} + \\qquad k = 0,\\ldots,n-1. + +The DFT is in general defined for complex inputs and outputs, and a +single-frequency component at linear frequency :math:`f` is +represented by a complex exponential +:math:`a_m = \\exp\\{2\\pi i\\,f m\\Delta t\\}`, where :math:`\\Delta t` +is the sampling interval. + +The values in the result follow so-called "standard" order: If ``A = +fft(a, n)``, then ``A[0]`` contains the zero-frequency term (the sum of +the signal), which is always purely real for real inputs. Then ``A[1:n/2]`` +contains the positive-frequency terms, and ``A[n/2+1:]`` contains the +negative-frequency terms, in order of decreasingly negative frequency. +For an even number of input points, ``A[n/2]`` represents both positive and +negative Nyquist frequency, and is also purely real for real input. For +an odd number of input points, ``A[(n-1)/2]`` contains the largest positive +frequency, while ``A[(n+1)/2]`` contains the largest negative frequency. +The routine ``np.fft.fftfreq(n)`` returns an array giving the frequencies +of corresponding elements in the output. The routine +``np.fft.fftshift(A)`` shifts transforms and their frequencies to put the +zero-frequency components in the middle, and ``np.fft.ifftshift(A)`` undoes +that shift. + +When the input `a` is a time-domain signal and ``A = fft(a)``, ``np.abs(A)`` +is its amplitude spectrum and ``np.abs(A)**2`` is its power spectrum. +The phase spectrum is obtained by ``np.angle(A)``. + +The inverse DFT is defined as + +.. math:: + a_m = \\frac{1}{n}\\sum_{k=0}^{n-1}A_k\\exp\\left\\{2\\pi i{mk\\over n}\\right\\} + \\qquad m = 0,\\ldots,n-1. + +It differs from the forward transform by the sign of the exponential +argument and the default normalization by :math:`1/n`. + +Type Promotion +-------------- + +`numpy.fft` promotes ``float32`` and ``complex64`` arrays to ``float64`` and +``complex128`` arrays respectively. For an FFT implementation that does not +promote input arrays, see `scipy.fftpack`. + +Normalization +------------- + +The argument ``norm`` indicates which direction of the pair of direct/inverse +transforms is scaled and with what normalization factor. +The default normalization (``"backward"``) has the direct (forward) transforms +unscaled and the inverse (backward) transforms scaled by :math:`1/n`. It is +possible to obtain unitary transforms by setting the keyword argument ``norm`` +to ``"ortho"`` so that both direct and inverse transforms are scaled by +:math:`1/\\sqrt{n}`. Finally, setting the keyword argument ``norm`` to +``"forward"`` has the direct transforms scaled by :math:`1/n` and the inverse +transforms unscaled (i.e. exactly opposite to the default ``"backward"``). +`None` is an alias of the default option ``"backward"`` for backward +compatibility. + +Real and Hermitian transforms +----------------------------- + +When the input is purely real, its transform is Hermitian, i.e., the +component at frequency :math:`f_k` is the complex conjugate of the +component at frequency :math:`-f_k`, which means that for real +inputs there is no information in the negative frequency components that +is not already available from the positive frequency components. +The family of `rfft` functions is +designed to operate on real inputs, and exploits this symmetry by +computing only the positive frequency components, up to and including the +Nyquist frequency. Thus, ``n`` input points produce ``n/2+1`` complex +output points. The inverses of this family assumes the same symmetry of +its input, and for an output of ``n`` points uses ``n/2+1`` input points. + +Correspondingly, when the spectrum is purely real, the signal is +Hermitian. The `hfft` family of functions exploits this symmetry by +using ``n/2+1`` complex points in the input (time) domain for ``n`` real +points in the frequency domain. + +In higher dimensions, FFTs are used, e.g., for image analysis and +filtering. The computational efficiency of the FFT means that it can +also be a faster way to compute large convolutions, using the property +that a convolution in the time domain is equivalent to a point-by-point +multiplication in the frequency domain. + +Higher dimensions +----------------- + +In two dimensions, the DFT is defined as + +.. math:: + A_{kl} = \\sum_{m=0}^{M-1} \\sum_{n=0}^{N-1} + a_{mn}\\exp\\left\\{-2\\pi i \\left({mk\\over M}+{nl\\over N}\\right)\\right\\} + \\qquad k = 0, \\ldots, M-1;\\quad l = 0, \\ldots, N-1, + +which extends in the obvious way to higher dimensions, and the inverses +in higher dimensions also extend in the same way. + +References +---------- + +.. [CT] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the + machine calculation of complex Fourier series," *Math. Comput.* + 19: 297-301. + +.. [NR] Press, W., Teukolsky, S., Vetterline, W.T., and Flannery, B.P., + 2007, *Numerical Recipes: The Art of Scientific Computing*, ch. + 12-13. Cambridge Univ. Press, Cambridge, UK. + +Examples +-------- + +For examples, see the various functions. + +""" + +# TODO: `numpy.fft.helper`` was deprecated in NumPy 2.0. It should +# be deleted once downstream libraries move to `numpy.fft`. +from . import _helper, _pocketfft, helper +from ._helper import * +from ._pocketfft import * + +__all__ = _pocketfft.__all__.copy() # noqa: PLE0605 +__all__ += _helper.__all__ + +from numpy._pytesttester import PytestTester + +test = PytestTester(__name__) +del PytestTester diff --git a/.venv/lib/python3.12/site-packages/numpy/fft/__init__.pyi b/.venv/lib/python3.12/site-packages/numpy/fft/__init__.pyi new file mode 100644 index 0000000..54d0ea8 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/fft/__init__.pyi @@ -0,0 +1,43 @@ +from ._helper import ( + fftfreq, + fftshift, + ifftshift, + rfftfreq, +) +from ._pocketfft import ( + fft, + fft2, + fftn, + hfft, + ifft, + ifft2, + ifftn, + ihfft, + irfft, + irfft2, + irfftn, + rfft, + rfft2, + rfftn, +) + +__all__ = [ + "fft", + "ifft", + "rfft", + "irfft", + "hfft", + "ihfft", + "rfftn", + "irfftn", + "rfft2", + "irfft2", + "fft2", + "ifft2", + "fftn", + "ifftn", + "fftshift", + "ifftshift", + "fftfreq", + "rfftfreq", +] diff --git a/.venv/lib/python3.12/site-packages/numpy/fft/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/fft/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..83e5e34 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/fft/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/fft/__pycache__/_helper.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/fft/__pycache__/_helper.cpython-312.pyc new file mode 100644 index 0000000..d39f4fe Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/fft/__pycache__/_helper.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/fft/__pycache__/_pocketfft.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/fft/__pycache__/_pocketfft.cpython-312.pyc new file mode 100644 index 0000000..0705dbb Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/fft/__pycache__/_pocketfft.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/fft/__pycache__/helper.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/fft/__pycache__/helper.cpython-312.pyc new file mode 100644 index 0000000..ec32c53 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/fft/__pycache__/helper.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/fft/_helper.py b/.venv/lib/python3.12/site-packages/numpy/fft/_helper.py new file mode 100644 index 0000000..77adeac --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/fft/_helper.py @@ -0,0 +1,235 @@ +""" +Discrete Fourier Transforms - _helper.py + +""" +from numpy._core import arange, asarray, empty, integer, roll +from numpy._core.overrides import array_function_dispatch, set_module + +# Created by Pearu Peterson, September 2002 + +__all__ = ['fftshift', 'ifftshift', 'fftfreq', 'rfftfreq'] + +integer_types = (int, integer) + + +def _fftshift_dispatcher(x, axes=None): + return (x,) + + +@array_function_dispatch(_fftshift_dispatcher, module='numpy.fft') +def fftshift(x, axes=None): + """ + Shift the zero-frequency component to the center of the spectrum. + + This function swaps half-spaces for all axes listed (defaults to all). + Note that ``y[0]`` is the Nyquist component only if ``len(x)`` is even. + + Parameters + ---------- + x : array_like + Input array. + axes : int or shape tuple, optional + Axes over which to shift. Default is None, which shifts all axes. + + Returns + ------- + y : ndarray + The shifted array. + + See Also + -------- + ifftshift : The inverse of `fftshift`. + + Examples + -------- + >>> import numpy as np + >>> freqs = np.fft.fftfreq(10, 0.1) + >>> freqs + array([ 0., 1., 2., ..., -3., -2., -1.]) + >>> np.fft.fftshift(freqs) + array([-5., -4., -3., -2., -1., 0., 1., 2., 3., 4.]) + + Shift the zero-frequency component only along the second axis: + + >>> freqs = np.fft.fftfreq(9, d=1./9).reshape(3, 3) + >>> freqs + array([[ 0., 1., 2.], + [ 3., 4., -4.], + [-3., -2., -1.]]) + >>> np.fft.fftshift(freqs, axes=(1,)) + array([[ 2., 0., 1.], + [-4., 3., 4.], + [-1., -3., -2.]]) + + """ + x = asarray(x) + if axes is None: + axes = tuple(range(x.ndim)) + shift = [dim // 2 for dim in x.shape] + elif isinstance(axes, integer_types): + shift = x.shape[axes] // 2 + else: + shift = [x.shape[ax] // 2 for ax in axes] + + return roll(x, shift, axes) + + +@array_function_dispatch(_fftshift_dispatcher, module='numpy.fft') +def ifftshift(x, axes=None): + """ + The inverse of `fftshift`. Although identical for even-length `x`, the + functions differ by one sample for odd-length `x`. + + Parameters + ---------- + x : array_like + Input array. + axes : int or shape tuple, optional + Axes over which to calculate. Defaults to None, which shifts all axes. + + Returns + ------- + y : ndarray + The shifted array. + + See Also + -------- + fftshift : Shift zero-frequency component to the center of the spectrum. + + Examples + -------- + >>> import numpy as np + >>> freqs = np.fft.fftfreq(9, d=1./9).reshape(3, 3) + >>> freqs + array([[ 0., 1., 2.], + [ 3., 4., -4.], + [-3., -2., -1.]]) + >>> np.fft.ifftshift(np.fft.fftshift(freqs)) + array([[ 0., 1., 2.], + [ 3., 4., -4.], + [-3., -2., -1.]]) + + """ + x = asarray(x) + if axes is None: + axes = tuple(range(x.ndim)) + shift = [-(dim // 2) for dim in x.shape] + elif isinstance(axes, integer_types): + shift = -(x.shape[axes] // 2) + else: + shift = [-(x.shape[ax] // 2) for ax in axes] + + return roll(x, shift, axes) + + +@set_module('numpy.fft') +def fftfreq(n, d=1.0, device=None): + """ + Return the Discrete Fourier Transform sample frequencies. + + The returned float array `f` contains the frequency bin centers in cycles + per unit of the sample spacing (with zero at the start). For instance, if + the sample spacing is in seconds, then the frequency unit is cycles/second. + + Given a window length `n` and a sample spacing `d`:: + + f = [0, 1, ..., n/2-1, -n/2, ..., -1] / (d*n) if n is even + f = [0, 1, ..., (n-1)/2, -(n-1)/2, ..., -1] / (d*n) if n is odd + + Parameters + ---------- + n : int + Window length. + d : scalar, optional + Sample spacing (inverse of the sampling rate). Defaults to 1. + device : str, optional + The device on which to place the created array. Default: ``None``. + For Array-API interoperability only, so must be ``"cpu"`` if passed. + + .. versionadded:: 2.0.0 + + Returns + ------- + f : ndarray + Array of length `n` containing the sample frequencies. + + Examples + -------- + >>> import numpy as np + >>> signal = np.array([-2, 8, 6, 4, 1, 0, 3, 5], dtype=float) + >>> fourier = np.fft.fft(signal) + >>> n = signal.size + >>> timestep = 0.1 + >>> freq = np.fft.fftfreq(n, d=timestep) + >>> freq + array([ 0. , 1.25, 2.5 , ..., -3.75, -2.5 , -1.25]) + + """ + if not isinstance(n, integer_types): + raise ValueError("n should be an integer") + val = 1.0 / (n * d) + results = empty(n, int, device=device) + N = (n - 1) // 2 + 1 + p1 = arange(0, N, dtype=int, device=device) + results[:N] = p1 + p2 = arange(-(n // 2), 0, dtype=int, device=device) + results[N:] = p2 + return results * val + + +@set_module('numpy.fft') +def rfftfreq(n, d=1.0, device=None): + """ + Return the Discrete Fourier Transform sample frequencies + (for usage with rfft, irfft). + + The returned float array `f` contains the frequency bin centers in cycles + per unit of the sample spacing (with zero at the start). For instance, if + the sample spacing is in seconds, then the frequency unit is cycles/second. + + Given a window length `n` and a sample spacing `d`:: + + f = [0, 1, ..., n/2-1, n/2] / (d*n) if n is even + f = [0, 1, ..., (n-1)/2-1, (n-1)/2] / (d*n) if n is odd + + Unlike `fftfreq` (but like `scipy.fftpack.rfftfreq`) + the Nyquist frequency component is considered to be positive. + + Parameters + ---------- + n : int + Window length. + d : scalar, optional + Sample spacing (inverse of the sampling rate). Defaults to 1. + device : str, optional + The device on which to place the created array. Default: ``None``. + For Array-API interoperability only, so must be ``"cpu"`` if passed. + + .. versionadded:: 2.0.0 + + Returns + ------- + f : ndarray + Array of length ``n//2 + 1`` containing the sample frequencies. + + Examples + -------- + >>> import numpy as np + >>> signal = np.array([-2, 8, 6, 4, 1, 0, 3, 5, -3, 4], dtype=float) + >>> fourier = np.fft.rfft(signal) + >>> n = signal.size + >>> sample_rate = 100 + >>> freq = np.fft.fftfreq(n, d=1./sample_rate) + >>> freq + array([ 0., 10., 20., ..., -30., -20., -10.]) + >>> freq = np.fft.rfftfreq(n, d=1./sample_rate) + >>> freq + array([ 0., 10., 20., 30., 40., 50.]) + + """ + if not isinstance(n, integer_types): + raise ValueError("n should be an integer") + val = 1.0 / (n * d) + N = n // 2 + 1 + results = arange(0, N, dtype=int, device=device) + return results * val diff --git a/.venv/lib/python3.12/site-packages/numpy/fft/_helper.pyi b/.venv/lib/python3.12/site-packages/numpy/fft/_helper.pyi new file mode 100644 index 0000000..d06bda7 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/fft/_helper.pyi @@ -0,0 +1,45 @@ +from typing import Any, Final, TypeVar, overload +from typing import Literal as L + +from numpy import complexfloating, floating, generic, integer +from numpy._typing import ( + ArrayLike, + NDArray, + _ArrayLike, + _ArrayLikeComplex_co, + _ArrayLikeFloat_co, + _ShapeLike, +) + +__all__ = ["fftfreq", "fftshift", "ifftshift", "rfftfreq"] + +_ScalarT = TypeVar("_ScalarT", bound=generic) + +### + +integer_types: Final[tuple[type[int], type[integer]]] = ... + +### + +@overload +def fftshift(x: _ArrayLike[_ScalarT], axes: _ShapeLike | None = None) -> NDArray[_ScalarT]: ... +@overload +def fftshift(x: ArrayLike, axes: _ShapeLike | None = None) -> NDArray[Any]: ... + +# +@overload +def ifftshift(x: _ArrayLike[_ScalarT], axes: _ShapeLike | None = None) -> NDArray[_ScalarT]: ... +@overload +def ifftshift(x: ArrayLike, axes: _ShapeLike | None = None) -> NDArray[Any]: ... + +# +@overload +def fftfreq(n: int | integer, d: _ArrayLikeFloat_co = 1.0, device: L["cpu"] | None = None) -> NDArray[floating]: ... +@overload +def fftfreq(n: int | integer, d: _ArrayLikeComplex_co = 1.0, device: L["cpu"] | None = None) -> NDArray[complexfloating]: ... + +# +@overload +def rfftfreq(n: int | integer, d: _ArrayLikeFloat_co = 1.0, device: L["cpu"] | None = None) -> NDArray[floating]: ... +@overload +def rfftfreq(n: int | integer, d: _ArrayLikeComplex_co = 1.0, device: L["cpu"] | None = None) -> NDArray[complexfloating]: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/fft/_pocketfft.py b/.venv/lib/python3.12/site-packages/numpy/fft/_pocketfft.py new file mode 100644 index 0000000..c7f2f6a --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/fft/_pocketfft.py @@ -0,0 +1,1693 @@ +""" +Discrete Fourier Transforms + +Routines in this module: + +fft(a, n=None, axis=-1, norm="backward") +ifft(a, n=None, axis=-1, norm="backward") +rfft(a, n=None, axis=-1, norm="backward") +irfft(a, n=None, axis=-1, norm="backward") +hfft(a, n=None, axis=-1, norm="backward") +ihfft(a, n=None, axis=-1, norm="backward") +fftn(a, s=None, axes=None, norm="backward") +ifftn(a, s=None, axes=None, norm="backward") +rfftn(a, s=None, axes=None, norm="backward") +irfftn(a, s=None, axes=None, norm="backward") +fft2(a, s=None, axes=(-2,-1), norm="backward") +ifft2(a, s=None, axes=(-2, -1), norm="backward") +rfft2(a, s=None, axes=(-2,-1), norm="backward") +irfft2(a, s=None, axes=(-2, -1), norm="backward") + +i = inverse transform +r = transform of purely real data +h = Hermite transform +n = n-dimensional transform +2 = 2-dimensional transform +(Note: 2D routines are just nD routines with different default +behavior.) + +""" +__all__ = ['fft', 'ifft', 'rfft', 'irfft', 'hfft', 'ihfft', 'rfftn', + 'irfftn', 'rfft2', 'irfft2', 'fft2', 'ifft2', 'fftn', 'ifftn'] + +import functools +import warnings + +from numpy._core import ( + asarray, + conjugate, + empty_like, + overrides, + reciprocal, + result_type, + sqrt, + take, +) +from numpy.lib.array_utils import normalize_axis_index + +from . import _pocketfft_umath as pfu + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy.fft') + + +# `inv_norm` is a float by which the result of the transform needs to be +# divided. This replaces the original, more intuitive 'fct` parameter to avoid +# divisions by zero (or alternatively additional checks) in the case of +# zero-length axes during its computation. +def _raw_fft(a, n, axis, is_real, is_forward, norm, out=None): + if n < 1: + raise ValueError(f"Invalid number of FFT data points ({n}) specified.") + + # Calculate the normalization factor, passing in the array dtype to + # avoid precision loss in the possible sqrt or reciprocal. + if not is_forward: + norm = _swap_direction(norm) + + real_dtype = result_type(a.real.dtype, 1.0) + if norm is None or norm == "backward": + fct = 1 + elif norm == "ortho": + fct = reciprocal(sqrt(n, dtype=real_dtype)) + elif norm == "forward": + fct = reciprocal(n, dtype=real_dtype) + else: + raise ValueError(f'Invalid norm value {norm}; should be "backward",' + '"ortho" or "forward".') + + n_out = n + if is_real: + if is_forward: + ufunc = pfu.rfft_n_even if n % 2 == 0 else pfu.rfft_n_odd + n_out = n // 2 + 1 + else: + ufunc = pfu.irfft + else: + ufunc = pfu.fft if is_forward else pfu.ifft + + axis = normalize_axis_index(axis, a.ndim) + + if out is None: + if is_real and not is_forward: # irfft, complex in, real output. + out_dtype = real_dtype + else: # Others, complex output. + out_dtype = result_type(a.dtype, 1j) + out = empty_like(a, shape=a.shape[:axis] + (n_out,) + a.shape[axis + 1:], + dtype=out_dtype) + elif ((shape := getattr(out, "shape", None)) is not None + and (len(shape) != a.ndim or shape[axis] != n_out)): + raise ValueError("output array has wrong shape.") + + return ufunc(a, fct, axes=[(axis,), (), (axis,)], out=out) + + +_SWAP_DIRECTION_MAP = {"backward": "forward", None: "forward", + "ortho": "ortho", "forward": "backward"} + + +def _swap_direction(norm): + try: + return _SWAP_DIRECTION_MAP[norm] + except KeyError: + raise ValueError(f'Invalid norm value {norm}; should be "backward", ' + '"ortho" or "forward".') from None + + +def _fft_dispatcher(a, n=None, axis=None, norm=None, out=None): + return (a, out) + + +@array_function_dispatch(_fft_dispatcher) +def fft(a, n=None, axis=-1, norm=None, out=None): + """ + Compute the one-dimensional discrete Fourier Transform. + + This function computes the one-dimensional *n*-point discrete Fourier + Transform (DFT) with the efficient Fast Fourier Transform (FFT) + algorithm [CT]. + + Parameters + ---------- + a : array_like + Input array, can be complex. + n : int, optional + Length of the transformed axis of the output. + If `n` is smaller than the length of the input, the input is cropped. + If it is larger, the input is padded with zeros. If `n` is not given, + the length of the input along the axis specified by `axis` is used. + axis : int, optional + Axis over which to compute the FFT. If not given, the last axis is + used. + norm : {"backward", "ortho", "forward"}, optional + Normalization mode (see `numpy.fft`). Default is "backward". + Indicates which direction of the forward/backward pair of transforms + is scaled and with what normalization factor. + + .. versionadded:: 1.20.0 + + The "backward", "forward" values were added. + out : complex ndarray, optional + If provided, the result will be placed in this array. It should be + of the appropriate shape and dtype. + + .. versionadded:: 2.0.0 + + Returns + ------- + out : complex ndarray + The truncated or zero-padded input, transformed along the axis + indicated by `axis`, or the last one if `axis` is not specified. + + Raises + ------ + IndexError + If `axis` is not a valid axis of `a`. + + See Also + -------- + numpy.fft : for definition of the DFT and conventions used. + ifft : The inverse of `fft`. + fft2 : The two-dimensional FFT. + fftn : The *n*-dimensional FFT. + rfftn : The *n*-dimensional FFT of real input. + fftfreq : Frequency bins for given FFT parameters. + + Notes + ----- + FFT (Fast Fourier Transform) refers to a way the discrete Fourier + Transform (DFT) can be calculated efficiently, by using symmetries in the + calculated terms. The symmetry is highest when `n` is a power of 2, and + the transform is therefore most efficient for these sizes. + + The DFT is defined, with the conventions used in this implementation, in + the documentation for the `numpy.fft` module. + + References + ---------- + .. [CT] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the + machine calculation of complex Fourier series," *Math. Comput.* + 19: 297-301. + + Examples + -------- + >>> import numpy as np + >>> np.fft.fft(np.exp(2j * np.pi * np.arange(8) / 8)) + array([-2.33486982e-16+1.14423775e-17j, 8.00000000e+00-1.25557246e-15j, + 2.33486982e-16+2.33486982e-16j, 0.00000000e+00+1.22464680e-16j, + -1.14423775e-17+2.33486982e-16j, 0.00000000e+00+5.20784380e-16j, + 1.14423775e-17+1.14423775e-17j, 0.00000000e+00+1.22464680e-16j]) + + In this example, real input has an FFT which is Hermitian, i.e., symmetric + in the real part and anti-symmetric in the imaginary part, as described in + the `numpy.fft` documentation: + + >>> import matplotlib.pyplot as plt + >>> t = np.arange(256) + >>> sp = np.fft.fft(np.sin(t)) + >>> freq = np.fft.fftfreq(t.shape[-1]) + >>> _ = plt.plot(freq, sp.real, freq, sp.imag) + >>> plt.show() + + """ + a = asarray(a) + if n is None: + n = a.shape[axis] + output = _raw_fft(a, n, axis, False, True, norm, out) + return output + + +@array_function_dispatch(_fft_dispatcher) +def ifft(a, n=None, axis=-1, norm=None, out=None): + """ + Compute the one-dimensional inverse discrete Fourier Transform. + + This function computes the inverse of the one-dimensional *n*-point + discrete Fourier transform computed by `fft`. In other words, + ``ifft(fft(a)) == a`` to within numerical accuracy. + For a general description of the algorithm and definitions, + see `numpy.fft`. + + The input should be ordered in the same way as is returned by `fft`, + i.e., + + * ``a[0]`` should contain the zero frequency term, + * ``a[1:n//2]`` should contain the positive-frequency terms, + * ``a[n//2 + 1:]`` should contain the negative-frequency terms, in + increasing order starting from the most negative frequency. + + For an even number of input points, ``A[n//2]`` represents the sum of + the values at the positive and negative Nyquist frequencies, as the two + are aliased together. See `numpy.fft` for details. + + Parameters + ---------- + a : array_like + Input array, can be complex. + n : int, optional + Length of the transformed axis of the output. + If `n` is smaller than the length of the input, the input is cropped. + If it is larger, the input is padded with zeros. If `n` is not given, + the length of the input along the axis specified by `axis` is used. + See notes about padding issues. + axis : int, optional + Axis over which to compute the inverse DFT. If not given, the last + axis is used. + norm : {"backward", "ortho", "forward"}, optional + Normalization mode (see `numpy.fft`). Default is "backward". + Indicates which direction of the forward/backward pair of transforms + is scaled and with what normalization factor. + + .. versionadded:: 1.20.0 + + The "backward", "forward" values were added. + + out : complex ndarray, optional + If provided, the result will be placed in this array. It should be + of the appropriate shape and dtype. + + .. versionadded:: 2.0.0 + + Returns + ------- + out : complex ndarray + The truncated or zero-padded input, transformed along the axis + indicated by `axis`, or the last one if `axis` is not specified. + + Raises + ------ + IndexError + If `axis` is not a valid axis of `a`. + + See Also + -------- + numpy.fft : An introduction, with definitions and general explanations. + fft : The one-dimensional (forward) FFT, of which `ifft` is the inverse + ifft2 : The two-dimensional inverse FFT. + ifftn : The n-dimensional inverse FFT. + + Notes + ----- + If the input parameter `n` is larger than the size of the input, the input + is padded by appending zeros at the end. Even though this is the common + approach, it might lead to surprising results. If a different padding is + desired, it must be performed before calling `ifft`. + + Examples + -------- + >>> import numpy as np + >>> np.fft.ifft([0, 4, 0, 0]) + array([ 1.+0.j, 0.+1.j, -1.+0.j, 0.-1.j]) # may vary + + Create and plot a band-limited signal with random phases: + + >>> import matplotlib.pyplot as plt + >>> t = np.arange(400) + >>> n = np.zeros((400,), dtype=complex) + >>> n[40:60] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20,))) + >>> s = np.fft.ifft(n) + >>> plt.plot(t, s.real, label='real') + [] + >>> plt.plot(t, s.imag, '--', label='imaginary') + [] + >>> plt.legend() + + >>> plt.show() + + """ + a = asarray(a) + if n is None: + n = a.shape[axis] + output = _raw_fft(a, n, axis, False, False, norm, out=out) + return output + + +@array_function_dispatch(_fft_dispatcher) +def rfft(a, n=None, axis=-1, norm=None, out=None): + """ + Compute the one-dimensional discrete Fourier Transform for real input. + + This function computes the one-dimensional *n*-point discrete Fourier + Transform (DFT) of a real-valued array by means of an efficient algorithm + called the Fast Fourier Transform (FFT). + + Parameters + ---------- + a : array_like + Input array + n : int, optional + Number of points along transformation axis in the input to use. + If `n` is smaller than the length of the input, the input is cropped. + If it is larger, the input is padded with zeros. If `n` is not given, + the length of the input along the axis specified by `axis` is used. + axis : int, optional + Axis over which to compute the FFT. If not given, the last axis is + used. + norm : {"backward", "ortho", "forward"}, optional + Normalization mode (see `numpy.fft`). Default is "backward". + Indicates which direction of the forward/backward pair of transforms + is scaled and with what normalization factor. + + .. versionadded:: 1.20.0 + + The "backward", "forward" values were added. + + out : complex ndarray, optional + If provided, the result will be placed in this array. It should be + of the appropriate shape and dtype. + + .. versionadded:: 2.0.0 + + Returns + ------- + out : complex ndarray + The truncated or zero-padded input, transformed along the axis + indicated by `axis`, or the last one if `axis` is not specified. + If `n` is even, the length of the transformed axis is ``(n/2)+1``. + If `n` is odd, the length is ``(n+1)/2``. + + Raises + ------ + IndexError + If `axis` is not a valid axis of `a`. + + See Also + -------- + numpy.fft : For definition of the DFT and conventions used. + irfft : The inverse of `rfft`. + fft : The one-dimensional FFT of general (complex) input. + fftn : The *n*-dimensional FFT. + rfftn : The *n*-dimensional FFT of real input. + + Notes + ----- + When the DFT is computed for purely real input, the output is + Hermitian-symmetric, i.e. the negative frequency terms are just the complex + conjugates of the corresponding positive-frequency terms, and the + negative-frequency terms are therefore redundant. This function does not + compute the negative frequency terms, and the length of the transformed + axis of the output is therefore ``n//2 + 1``. + + When ``A = rfft(a)`` and fs is the sampling frequency, ``A[0]`` contains + the zero-frequency term 0*fs, which is real due to Hermitian symmetry. + + If `n` is even, ``A[-1]`` contains the term representing both positive + and negative Nyquist frequency (+fs/2 and -fs/2), and must also be purely + real. If `n` is odd, there is no term at fs/2; ``A[-1]`` contains + the largest positive frequency (fs/2*(n-1)/n), and is complex in the + general case. + + If the input `a` contains an imaginary part, it is silently discarded. + + Examples + -------- + >>> import numpy as np + >>> np.fft.fft([0, 1, 0, 0]) + array([ 1.+0.j, 0.-1.j, -1.+0.j, 0.+1.j]) # may vary + >>> np.fft.rfft([0, 1, 0, 0]) + array([ 1.+0.j, 0.-1.j, -1.+0.j]) # may vary + + Notice how the final element of the `fft` output is the complex conjugate + of the second element, for real input. For `rfft`, this symmetry is + exploited to compute only the non-negative frequency terms. + + """ + a = asarray(a) + if n is None: + n = a.shape[axis] + output = _raw_fft(a, n, axis, True, True, norm, out=out) + return output + + +@array_function_dispatch(_fft_dispatcher) +def irfft(a, n=None, axis=-1, norm=None, out=None): + """ + Computes the inverse of `rfft`. + + This function computes the inverse of the one-dimensional *n*-point + discrete Fourier Transform of real input computed by `rfft`. + In other words, ``irfft(rfft(a), len(a)) == a`` to within numerical + accuracy. (See Notes below for why ``len(a)`` is necessary here.) + + The input is expected to be in the form returned by `rfft`, i.e. the + real zero-frequency term followed by the complex positive frequency terms + in order of increasing frequency. Since the discrete Fourier Transform of + real input is Hermitian-symmetric, the negative frequency terms are taken + to be the complex conjugates of the corresponding positive frequency terms. + + Parameters + ---------- + a : array_like + The input array. + n : int, optional + Length of the transformed axis of the output. + For `n` output points, ``n//2+1`` input points are necessary. If the + input is longer than this, it is cropped. If it is shorter than this, + it is padded with zeros. If `n` is not given, it is taken to be + ``2*(m-1)`` where ``m`` is the length of the input along the axis + specified by `axis`. + axis : int, optional + Axis over which to compute the inverse FFT. If not given, the last + axis is used. + norm : {"backward", "ortho", "forward"}, optional + Normalization mode (see `numpy.fft`). Default is "backward". + Indicates which direction of the forward/backward pair of transforms + is scaled and with what normalization factor. + + .. versionadded:: 1.20.0 + + The "backward", "forward" values were added. + + out : ndarray, optional + If provided, the result will be placed in this array. It should be + of the appropriate shape and dtype. + + .. versionadded:: 2.0.0 + + Returns + ------- + out : ndarray + The truncated or zero-padded input, transformed along the axis + indicated by `axis`, or the last one if `axis` is not specified. + The length of the transformed axis is `n`, or, if `n` is not given, + ``2*(m-1)`` where ``m`` is the length of the transformed axis of the + input. To get an odd number of output points, `n` must be specified. + + Raises + ------ + IndexError + If `axis` is not a valid axis of `a`. + + See Also + -------- + numpy.fft : For definition of the DFT and conventions used. + rfft : The one-dimensional FFT of real input, of which `irfft` is inverse. + fft : The one-dimensional FFT. + irfft2 : The inverse of the two-dimensional FFT of real input. + irfftn : The inverse of the *n*-dimensional FFT of real input. + + Notes + ----- + Returns the real valued `n`-point inverse discrete Fourier transform + of `a`, where `a` contains the non-negative frequency terms of a + Hermitian-symmetric sequence. `n` is the length of the result, not the + input. + + If you specify an `n` such that `a` must be zero-padded or truncated, the + extra/removed values will be added/removed at high frequencies. One can + thus resample a series to `m` points via Fourier interpolation by: + ``a_resamp = irfft(rfft(a), m)``. + + The correct interpretation of the hermitian input depends on the length of + the original data, as given by `n`. This is because each input shape could + correspond to either an odd or even length signal. By default, `irfft` + assumes an even output length which puts the last entry at the Nyquist + frequency; aliasing with its symmetric counterpart. By Hermitian symmetry, + the value is thus treated as purely real. To avoid losing information, the + correct length of the real input **must** be given. + + Examples + -------- + >>> import numpy as np + >>> np.fft.ifft([1, -1j, -1, 1j]) + array([0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]) # may vary + >>> np.fft.irfft([1, -1j, -1]) + array([0., 1., 0., 0.]) + + Notice how the last term in the input to the ordinary `ifft` is the + complex conjugate of the second term, and the output has zero imaginary + part everywhere. When calling `irfft`, the negative frequencies are not + specified, and the output array is purely real. + + """ + a = asarray(a) + if n is None: + n = (a.shape[axis] - 1) * 2 + output = _raw_fft(a, n, axis, True, False, norm, out=out) + return output + + +@array_function_dispatch(_fft_dispatcher) +def hfft(a, n=None, axis=-1, norm=None, out=None): + """ + Compute the FFT of a signal that has Hermitian symmetry, i.e., a real + spectrum. + + Parameters + ---------- + a : array_like + The input array. + n : int, optional + Length of the transformed axis of the output. For `n` output + points, ``n//2 + 1`` input points are necessary. If the input is + longer than this, it is cropped. If it is shorter than this, it is + padded with zeros. If `n` is not given, it is taken to be ``2*(m-1)`` + where ``m`` is the length of the input along the axis specified by + `axis`. + axis : int, optional + Axis over which to compute the FFT. If not given, the last + axis is used. + norm : {"backward", "ortho", "forward"}, optional + Normalization mode (see `numpy.fft`). Default is "backward". + Indicates which direction of the forward/backward pair of transforms + is scaled and with what normalization factor. + + .. versionadded:: 1.20.0 + + The "backward", "forward" values were added. + + out : ndarray, optional + If provided, the result will be placed in this array. It should be + of the appropriate shape and dtype. + + .. versionadded:: 2.0.0 + + Returns + ------- + out : ndarray + The truncated or zero-padded input, transformed along the axis + indicated by `axis`, or the last one if `axis` is not specified. + The length of the transformed axis is `n`, or, if `n` is not given, + ``2*m - 2`` where ``m`` is the length of the transformed axis of + the input. To get an odd number of output points, `n` must be + specified, for instance as ``2*m - 1`` in the typical case, + + Raises + ------ + IndexError + If `axis` is not a valid axis of `a`. + + See also + -------- + rfft : Compute the one-dimensional FFT for real input. + ihfft : The inverse of `hfft`. + + Notes + ----- + `hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the + opposite case: here the signal has Hermitian symmetry in the time + domain and is real in the frequency domain. So here it's `hfft` for + which you must supply the length of the result if it is to be odd. + + * even: ``ihfft(hfft(a, 2*len(a) - 2)) == a``, within roundoff error, + * odd: ``ihfft(hfft(a, 2*len(a) - 1)) == a``, within roundoff error. + + The correct interpretation of the hermitian input depends on the length of + the original data, as given by `n`. This is because each input shape could + correspond to either an odd or even length signal. By default, `hfft` + assumes an even output length which puts the last entry at the Nyquist + frequency; aliasing with its symmetric counterpart. By Hermitian symmetry, + the value is thus treated as purely real. To avoid losing information, the + shape of the full signal **must** be given. + + Examples + -------- + >>> import numpy as np + >>> signal = np.array([1, 2, 3, 4, 3, 2]) + >>> np.fft.fft(signal) + array([15.+0.j, -4.+0.j, 0.+0.j, -1.-0.j, 0.+0.j, -4.+0.j]) # may vary + >>> np.fft.hfft(signal[:4]) # Input first half of signal + array([15., -4., 0., -1., 0., -4.]) + >>> np.fft.hfft(signal, 6) # Input entire signal and truncate + array([15., -4., 0., -1., 0., -4.]) + + + >>> signal = np.array([[1, 1.j], [-1.j, 2]]) + >>> np.conj(signal.T) - signal # check Hermitian symmetry + array([[ 0.-0.j, -0.+0.j], # may vary + [ 0.+0.j, 0.-0.j]]) + >>> freq_spectrum = np.fft.hfft(signal) + >>> freq_spectrum + array([[ 1., 1.], + [ 2., -2.]]) + + """ + a = asarray(a) + if n is None: + n = (a.shape[axis] - 1) * 2 + new_norm = _swap_direction(norm) + output = irfft(conjugate(a), n, axis, norm=new_norm, out=None) + return output + + +@array_function_dispatch(_fft_dispatcher) +def ihfft(a, n=None, axis=-1, norm=None, out=None): + """ + Compute the inverse FFT of a signal that has Hermitian symmetry. + + Parameters + ---------- + a : array_like + Input array. + n : int, optional + Length of the inverse FFT, the number of points along + transformation axis in the input to use. If `n` is smaller than + the length of the input, the input is cropped. If it is larger, + the input is padded with zeros. If `n` is not given, the length of + the input along the axis specified by `axis` is used. + axis : int, optional + Axis over which to compute the inverse FFT. If not given, the last + axis is used. + norm : {"backward", "ortho", "forward"}, optional + Normalization mode (see `numpy.fft`). Default is "backward". + Indicates which direction of the forward/backward pair of transforms + is scaled and with what normalization factor. + + .. versionadded:: 1.20.0 + + The "backward", "forward" values were added. + + out : complex ndarray, optional + If provided, the result will be placed in this array. It should be + of the appropriate shape and dtype. + + .. versionadded:: 2.0.0 + + Returns + ------- + out : complex ndarray + The truncated or zero-padded input, transformed along the axis + indicated by `axis`, or the last one if `axis` is not specified. + The length of the transformed axis is ``n//2 + 1``. + + See also + -------- + hfft, irfft + + Notes + ----- + `hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the + opposite case: here the signal has Hermitian symmetry in the time + domain and is real in the frequency domain. So here it's `hfft` for + which you must supply the length of the result if it is to be odd: + + * even: ``ihfft(hfft(a, 2*len(a) - 2)) == a``, within roundoff error, + * odd: ``ihfft(hfft(a, 2*len(a) - 1)) == a``, within roundoff error. + + Examples + -------- + >>> import numpy as np + >>> spectrum = np.array([ 15, -4, 0, -1, 0, -4]) + >>> np.fft.ifft(spectrum) + array([1.+0.j, 2.+0.j, 3.+0.j, 4.+0.j, 3.+0.j, 2.+0.j]) # may vary + >>> np.fft.ihfft(spectrum) + array([ 1.-0.j, 2.-0.j, 3.-0.j, 4.-0.j]) # may vary + + """ + a = asarray(a) + if n is None: + n = a.shape[axis] + new_norm = _swap_direction(norm) + out = rfft(a, n, axis, norm=new_norm, out=out) + return conjugate(out, out=out) + + +def _cook_nd_args(a, s=None, axes=None, invreal=0): + if s is None: + shapeless = True + if axes is None: + s = list(a.shape) + else: + s = take(a.shape, axes) + else: + shapeless = False + s = list(s) + if axes is None: + if not shapeless: + msg = ("`axes` should not be `None` if `s` is not `None` " + "(Deprecated in NumPy 2.0). In a future version of NumPy, " + "this will raise an error and `s[i]` will correspond to " + "the size along the transformed axis specified by " + "`axes[i]`. To retain current behaviour, pass a sequence " + "[0, ..., k-1] to `axes` for an array of dimension k.") + warnings.warn(msg, DeprecationWarning, stacklevel=3) + axes = list(range(-len(s), 0)) + if len(s) != len(axes): + raise ValueError("Shape and axes have different lengths.") + if invreal and shapeless: + s[-1] = (a.shape[axes[-1]] - 1) * 2 + if None in s: + msg = ("Passing an array containing `None` values to `s` is " + "deprecated in NumPy 2.0 and will raise an error in " + "a future version of NumPy. To use the default behaviour " + "of the corresponding 1-D transform, pass the value matching " + "the default for its `n` parameter. To use the default " + "behaviour for every axis, the `s` argument can be omitted.") + warnings.warn(msg, DeprecationWarning, stacklevel=3) + # use the whole input array along axis `i` if `s[i] == -1` + s = [a.shape[_a] if _s == -1 else _s for _s, _a in zip(s, axes)] + return s, axes + + +def _raw_fftnd(a, s=None, axes=None, function=fft, norm=None, out=None): + a = asarray(a) + s, axes = _cook_nd_args(a, s, axes) + itl = list(range(len(axes))) + itl.reverse() + for ii in itl: + a = function(a, n=s[ii], axis=axes[ii], norm=norm, out=out) + return a + + +def _fftn_dispatcher(a, s=None, axes=None, norm=None, out=None): + return (a, out) + + +@array_function_dispatch(_fftn_dispatcher) +def fftn(a, s=None, axes=None, norm=None, out=None): + """ + Compute the N-dimensional discrete Fourier Transform. + + This function computes the *N*-dimensional discrete Fourier Transform over + any number of axes in an *M*-dimensional array by means of the Fast Fourier + Transform (FFT). + + Parameters + ---------- + a : array_like + Input array, can be complex. + s : sequence of ints, optional + Shape (length of each transformed axis) of the output + (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). + This corresponds to ``n`` for ``fft(x, n)``. + Along any axis, if the given shape is smaller than that of the input, + the input is cropped. If it is larger, the input is padded with zeros. + + .. versionchanged:: 2.0 + + If it is ``-1``, the whole input is used (no padding/trimming). + + If `s` is not given, the shape of the input along the axes specified + by `axes` is used. + + .. deprecated:: 2.0 + + If `s` is not ``None``, `axes` must not be ``None`` either. + + .. deprecated:: 2.0 + + `s` must contain only ``int`` s, not ``None`` values. ``None`` + values currently mean that the default value for ``n`` is used + in the corresponding 1-D transform, but this behaviour is + deprecated. + + axes : sequence of ints, optional + Axes over which to compute the FFT. If not given, the last ``len(s)`` + axes are used, or all axes if `s` is also not specified. + Repeated indices in `axes` means that the transform over that axis is + performed multiple times. + + .. deprecated:: 2.0 + + If `s` is specified, the corresponding `axes` to be transformed + must be explicitly specified too. + + norm : {"backward", "ortho", "forward"}, optional + Normalization mode (see `numpy.fft`). Default is "backward". + Indicates which direction of the forward/backward pair of transforms + is scaled and with what normalization factor. + + .. versionadded:: 1.20.0 + + The "backward", "forward" values were added. + + out : complex ndarray, optional + If provided, the result will be placed in this array. It should be + of the appropriate shape and dtype for all axes (and hence is + incompatible with passing in all but the trivial ``s``). + + .. versionadded:: 2.0.0 + + Returns + ------- + out : complex ndarray + The truncated or zero-padded input, transformed along the axes + indicated by `axes`, or by a combination of `s` and `a`, + as explained in the parameters section above. + + Raises + ------ + ValueError + If `s` and `axes` have different length. + IndexError + If an element of `axes` is larger than than the number of axes of `a`. + + See Also + -------- + numpy.fft : Overall view of discrete Fourier transforms, with definitions + and conventions used. + ifftn : The inverse of `fftn`, the inverse *n*-dimensional FFT. + fft : The one-dimensional FFT, with definitions and conventions used. + rfftn : The *n*-dimensional FFT of real input. + fft2 : The two-dimensional FFT. + fftshift : Shifts zero-frequency terms to centre of array + + Notes + ----- + The output, analogously to `fft`, contains the term for zero frequency in + the low-order corner of all axes, the positive frequency terms in the + first half of all axes, the term for the Nyquist frequency in the middle + of all axes and the negative frequency terms in the second half of all + axes, in order of decreasingly negative frequency. + + See `numpy.fft` for details, definitions and conventions used. + + Examples + -------- + >>> import numpy as np + >>> a = np.mgrid[:3, :3, :3][0] + >>> np.fft.fftn(a, axes=(1, 2)) + array([[[ 0.+0.j, 0.+0.j, 0.+0.j], # may vary + [ 0.+0.j, 0.+0.j, 0.+0.j], + [ 0.+0.j, 0.+0.j, 0.+0.j]], + [[ 9.+0.j, 0.+0.j, 0.+0.j], + [ 0.+0.j, 0.+0.j, 0.+0.j], + [ 0.+0.j, 0.+0.j, 0.+0.j]], + [[18.+0.j, 0.+0.j, 0.+0.j], + [ 0.+0.j, 0.+0.j, 0.+0.j], + [ 0.+0.j, 0.+0.j, 0.+0.j]]]) + >>> np.fft.fftn(a, (2, 2), axes=(0, 1)) + array([[[ 2.+0.j, 2.+0.j, 2.+0.j], # may vary + [ 0.+0.j, 0.+0.j, 0.+0.j]], + [[-2.+0.j, -2.+0.j, -2.+0.j], + [ 0.+0.j, 0.+0.j, 0.+0.j]]]) + + >>> import matplotlib.pyplot as plt + >>> [X, Y] = np.meshgrid(2 * np.pi * np.arange(200) / 12, + ... 2 * np.pi * np.arange(200) / 34) + >>> S = np.sin(X) + np.cos(Y) + np.random.uniform(0, 1, X.shape) + >>> FS = np.fft.fftn(S) + >>> plt.imshow(np.log(np.abs(np.fft.fftshift(FS))**2)) + + >>> plt.show() + + """ + return _raw_fftnd(a, s, axes, fft, norm, out=out) + + +@array_function_dispatch(_fftn_dispatcher) +def ifftn(a, s=None, axes=None, norm=None, out=None): + """ + Compute the N-dimensional inverse discrete Fourier Transform. + + This function computes the inverse of the N-dimensional discrete + Fourier Transform over any number of axes in an M-dimensional array by + means of the Fast Fourier Transform (FFT). In other words, + ``ifftn(fftn(a)) == a`` to within numerical accuracy. + For a description of the definitions and conventions used, see `numpy.fft`. + + The input, analogously to `ifft`, should be ordered in the same way as is + returned by `fftn`, i.e. it should have the term for zero frequency + in all axes in the low-order corner, the positive frequency terms in the + first half of all axes, the term for the Nyquist frequency in the middle + of all axes and the negative frequency terms in the second half of all + axes, in order of decreasingly negative frequency. + + Parameters + ---------- + a : array_like + Input array, can be complex. + s : sequence of ints, optional + Shape (length of each transformed axis) of the output + (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). + This corresponds to ``n`` for ``ifft(x, n)``. + Along any axis, if the given shape is smaller than that of the input, + the input is cropped. If it is larger, the input is padded with zeros. + + .. versionchanged:: 2.0 + + If it is ``-1``, the whole input is used (no padding/trimming). + + If `s` is not given, the shape of the input along the axes specified + by `axes` is used. See notes for issue on `ifft` zero padding. + + .. deprecated:: 2.0 + + If `s` is not ``None``, `axes` must not be ``None`` either. + + .. deprecated:: 2.0 + + `s` must contain only ``int`` s, not ``None`` values. ``None`` + values currently mean that the default value for ``n`` is used + in the corresponding 1-D transform, but this behaviour is + deprecated. + + axes : sequence of ints, optional + Axes over which to compute the IFFT. If not given, the last ``len(s)`` + axes are used, or all axes if `s` is also not specified. + Repeated indices in `axes` means that the inverse transform over that + axis is performed multiple times. + + .. deprecated:: 2.0 + + If `s` is specified, the corresponding `axes` to be transformed + must be explicitly specified too. + + norm : {"backward", "ortho", "forward"}, optional + Normalization mode (see `numpy.fft`). Default is "backward". + Indicates which direction of the forward/backward pair of transforms + is scaled and with what normalization factor. + + .. versionadded:: 1.20.0 + + The "backward", "forward" values were added. + + out : complex ndarray, optional + If provided, the result will be placed in this array. It should be + of the appropriate shape and dtype for all axes (and hence is + incompatible with passing in all but the trivial ``s``). + + .. versionadded:: 2.0.0 + + Returns + ------- + out : complex ndarray + The truncated or zero-padded input, transformed along the axes + indicated by `axes`, or by a combination of `s` or `a`, + as explained in the parameters section above. + + Raises + ------ + ValueError + If `s` and `axes` have different length. + IndexError + If an element of `axes` is larger than than the number of axes of `a`. + + See Also + -------- + numpy.fft : Overall view of discrete Fourier transforms, with definitions + and conventions used. + fftn : The forward *n*-dimensional FFT, of which `ifftn` is the inverse. + ifft : The one-dimensional inverse FFT. + ifft2 : The two-dimensional inverse FFT. + ifftshift : Undoes `fftshift`, shifts zero-frequency terms to beginning + of array. + + Notes + ----- + See `numpy.fft` for definitions and conventions used. + + Zero-padding, analogously with `ifft`, is performed by appending zeros to + the input along the specified dimension. Although this is the common + approach, it might lead to surprising results. If another form of zero + padding is desired, it must be performed before `ifftn` is called. + + Examples + -------- + >>> import numpy as np + >>> a = np.eye(4) + >>> np.fft.ifftn(np.fft.fftn(a, axes=(0,)), axes=(1,)) + array([[1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], # may vary + [0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j], + [0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j], + [0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j]]) + + + Create and plot an image with band-limited frequency content: + + >>> import matplotlib.pyplot as plt + >>> n = np.zeros((200,200), dtype=complex) + >>> n[60:80, 20:40] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20, 20))) + >>> im = np.fft.ifftn(n).real + >>> plt.imshow(im) + + >>> plt.show() + + """ + return _raw_fftnd(a, s, axes, ifft, norm, out=out) + + +@array_function_dispatch(_fftn_dispatcher) +def fft2(a, s=None, axes=(-2, -1), norm=None, out=None): + """ + Compute the 2-dimensional discrete Fourier Transform. + + This function computes the *n*-dimensional discrete Fourier Transform + over any axes in an *M*-dimensional array by means of the + Fast Fourier Transform (FFT). By default, the transform is computed over + the last two axes of the input array, i.e., a 2-dimensional FFT. + + Parameters + ---------- + a : array_like + Input array, can be complex + s : sequence of ints, optional + Shape (length of each transformed axis) of the output + (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). + This corresponds to ``n`` for ``fft(x, n)``. + Along each axis, if the given shape is smaller than that of the input, + the input is cropped. If it is larger, the input is padded with zeros. + + .. versionchanged:: 2.0 + + If it is ``-1``, the whole input is used (no padding/trimming). + + If `s` is not given, the shape of the input along the axes specified + by `axes` is used. + + .. deprecated:: 2.0 + + If `s` is not ``None``, `axes` must not be ``None`` either. + + .. deprecated:: 2.0 + + `s` must contain only ``int`` s, not ``None`` values. ``None`` + values currently mean that the default value for ``n`` is used + in the corresponding 1-D transform, but this behaviour is + deprecated. + + axes : sequence of ints, optional + Axes over which to compute the FFT. If not given, the last two + axes are used. A repeated index in `axes` means the transform over + that axis is performed multiple times. A one-element sequence means + that a one-dimensional FFT is performed. Default: ``(-2, -1)``. + + .. deprecated:: 2.0 + + If `s` is specified, the corresponding `axes` to be transformed + must not be ``None``. + + norm : {"backward", "ortho", "forward"}, optional + Normalization mode (see `numpy.fft`). Default is "backward". + Indicates which direction of the forward/backward pair of transforms + is scaled and with what normalization factor. + + .. versionadded:: 1.20.0 + + The "backward", "forward" values were added. + + out : complex ndarray, optional + If provided, the result will be placed in this array. It should be + of the appropriate shape and dtype for all axes (and hence only the + last axis can have ``s`` not equal to the shape at that axis). + + .. versionadded:: 2.0.0 + + Returns + ------- + out : complex ndarray + The truncated or zero-padded input, transformed along the axes + indicated by `axes`, or the last two axes if `axes` is not given. + + Raises + ------ + ValueError + If `s` and `axes` have different length, or `axes` not given and + ``len(s) != 2``. + IndexError + If an element of `axes` is larger than than the number of axes of `a`. + + See Also + -------- + numpy.fft : Overall view of discrete Fourier transforms, with definitions + and conventions used. + ifft2 : The inverse two-dimensional FFT. + fft : The one-dimensional FFT. + fftn : The *n*-dimensional FFT. + fftshift : Shifts zero-frequency terms to the center of the array. + For two-dimensional input, swaps first and third quadrants, and second + and fourth quadrants. + + Notes + ----- + `fft2` is just `fftn` with a different default for `axes`. + + The output, analogously to `fft`, contains the term for zero frequency in + the low-order corner of the transformed axes, the positive frequency terms + in the first half of these axes, the term for the Nyquist frequency in the + middle of the axes and the negative frequency terms in the second half of + the axes, in order of decreasingly negative frequency. + + See `fftn` for details and a plotting example, and `numpy.fft` for + definitions and conventions used. + + + Examples + -------- + >>> import numpy as np + >>> a = np.mgrid[:5, :5][0] + >>> np.fft.fft2(a) + array([[ 50. +0.j , 0. +0.j , 0. +0.j , # may vary + 0. +0.j , 0. +0.j ], + [-12.5+17.20477401j, 0. +0.j , 0. +0.j , + 0. +0.j , 0. +0.j ], + [-12.5 +4.0614962j , 0. +0.j , 0. +0.j , + 0. +0.j , 0. +0.j ], + [-12.5 -4.0614962j , 0. +0.j , 0. +0.j , + 0. +0.j , 0. +0.j ], + [-12.5-17.20477401j, 0. +0.j , 0. +0.j , + 0. +0.j , 0. +0.j ]]) + + """ + return _raw_fftnd(a, s, axes, fft, norm, out=out) + + +@array_function_dispatch(_fftn_dispatcher) +def ifft2(a, s=None, axes=(-2, -1), norm=None, out=None): + """ + Compute the 2-dimensional inverse discrete Fourier Transform. + + This function computes the inverse of the 2-dimensional discrete Fourier + Transform over any number of axes in an M-dimensional array by means of + the Fast Fourier Transform (FFT). In other words, ``ifft2(fft2(a)) == a`` + to within numerical accuracy. By default, the inverse transform is + computed over the last two axes of the input array. + + The input, analogously to `ifft`, should be ordered in the same way as is + returned by `fft2`, i.e. it should have the term for zero frequency + in the low-order corner of the two axes, the positive frequency terms in + the first half of these axes, the term for the Nyquist frequency in the + middle of the axes and the negative frequency terms in the second half of + both axes, in order of decreasingly negative frequency. + + Parameters + ---------- + a : array_like + Input array, can be complex. + s : sequence of ints, optional + Shape (length of each axis) of the output (``s[0]`` refers to axis 0, + ``s[1]`` to axis 1, etc.). This corresponds to `n` for ``ifft(x, n)``. + Along each axis, if the given shape is smaller than that of the input, + the input is cropped. If it is larger, the input is padded with zeros. + + .. versionchanged:: 2.0 + + If it is ``-1``, the whole input is used (no padding/trimming). + + If `s` is not given, the shape of the input along the axes specified + by `axes` is used. See notes for issue on `ifft` zero padding. + + .. deprecated:: 2.0 + + If `s` is not ``None``, `axes` must not be ``None`` either. + + .. deprecated:: 2.0 + + `s` must contain only ``int`` s, not ``None`` values. ``None`` + values currently mean that the default value for ``n`` is used + in the corresponding 1-D transform, but this behaviour is + deprecated. + + axes : sequence of ints, optional + Axes over which to compute the FFT. If not given, the last two + axes are used. A repeated index in `axes` means the transform over + that axis is performed multiple times. A one-element sequence means + that a one-dimensional FFT is performed. Default: ``(-2, -1)``. + + .. deprecated:: 2.0 + + If `s` is specified, the corresponding `axes` to be transformed + must not be ``None``. + + norm : {"backward", "ortho", "forward"}, optional + Normalization mode (see `numpy.fft`). Default is "backward". + Indicates which direction of the forward/backward pair of transforms + is scaled and with what normalization factor. + + .. versionadded:: 1.20.0 + + The "backward", "forward" values were added. + + out : complex ndarray, optional + If provided, the result will be placed in this array. It should be + of the appropriate shape and dtype for all axes (and hence is + incompatible with passing in all but the trivial ``s``). + + .. versionadded:: 2.0.0 + + Returns + ------- + out : complex ndarray + The truncated or zero-padded input, transformed along the axes + indicated by `axes`, or the last two axes if `axes` is not given. + + Raises + ------ + ValueError + If `s` and `axes` have different length, or `axes` not given and + ``len(s) != 2``. + IndexError + If an element of `axes` is larger than than the number of axes of `a`. + + See Also + -------- + numpy.fft : Overall view of discrete Fourier transforms, with definitions + and conventions used. + fft2 : The forward 2-dimensional FFT, of which `ifft2` is the inverse. + ifftn : The inverse of the *n*-dimensional FFT. + fft : The one-dimensional FFT. + ifft : The one-dimensional inverse FFT. + + Notes + ----- + `ifft2` is just `ifftn` with a different default for `axes`. + + See `ifftn` for details and a plotting example, and `numpy.fft` for + definition and conventions used. + + Zero-padding, analogously with `ifft`, is performed by appending zeros to + the input along the specified dimension. Although this is the common + approach, it might lead to surprising results. If another form of zero + padding is desired, it must be performed before `ifft2` is called. + + Examples + -------- + >>> import numpy as np + >>> a = 4 * np.eye(4) + >>> np.fft.ifft2(a) + array([[1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], # may vary + [0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j], + [0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j], + [0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]]) + + """ + return _raw_fftnd(a, s, axes, ifft, norm, out=None) + + +@array_function_dispatch(_fftn_dispatcher) +def rfftn(a, s=None, axes=None, norm=None, out=None): + """ + Compute the N-dimensional discrete Fourier Transform for real input. + + This function computes the N-dimensional discrete Fourier Transform over + any number of axes in an M-dimensional real array by means of the Fast + Fourier Transform (FFT). By default, all axes are transformed, with the + real transform performed over the last axis, while the remaining + transforms are complex. + + Parameters + ---------- + a : array_like + Input array, taken to be real. + s : sequence of ints, optional + Shape (length along each transformed axis) to use from the input. + (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). + The final element of `s` corresponds to `n` for ``rfft(x, n)``, while + for the remaining axes, it corresponds to `n` for ``fft(x, n)``. + Along any axis, if the given shape is smaller than that of the input, + the input is cropped. If it is larger, the input is padded with zeros. + + .. versionchanged:: 2.0 + + If it is ``-1``, the whole input is used (no padding/trimming). + + If `s` is not given, the shape of the input along the axes specified + by `axes` is used. + + .. deprecated:: 2.0 + + If `s` is not ``None``, `axes` must not be ``None`` either. + + .. deprecated:: 2.0 + + `s` must contain only ``int`` s, not ``None`` values. ``None`` + values currently mean that the default value for ``n`` is used + in the corresponding 1-D transform, but this behaviour is + deprecated. + + axes : sequence of ints, optional + Axes over which to compute the FFT. If not given, the last ``len(s)`` + axes are used, or all axes if `s` is also not specified. + + .. deprecated:: 2.0 + + If `s` is specified, the corresponding `axes` to be transformed + must be explicitly specified too. + + norm : {"backward", "ortho", "forward"}, optional + Normalization mode (see `numpy.fft`). Default is "backward". + Indicates which direction of the forward/backward pair of transforms + is scaled and with what normalization factor. + + .. versionadded:: 1.20.0 + + The "backward", "forward" values were added. + + out : complex ndarray, optional + If provided, the result will be placed in this array. It should be + of the appropriate shape and dtype for all axes (and hence is + incompatible with passing in all but the trivial ``s``). + + .. versionadded:: 2.0.0 + + Returns + ------- + out : complex ndarray + The truncated or zero-padded input, transformed along the axes + indicated by `axes`, or by a combination of `s` and `a`, + as explained in the parameters section above. + The length of the last axis transformed will be ``s[-1]//2+1``, + while the remaining transformed axes will have lengths according to + `s`, or unchanged from the input. + + Raises + ------ + ValueError + If `s` and `axes` have different length. + IndexError + If an element of `axes` is larger than than the number of axes of `a`. + + See Also + -------- + irfftn : The inverse of `rfftn`, i.e. the inverse of the n-dimensional FFT + of real input. + fft : The one-dimensional FFT, with definitions and conventions used. + rfft : The one-dimensional FFT of real input. + fftn : The n-dimensional FFT. + rfft2 : The two-dimensional FFT of real input. + + Notes + ----- + The transform for real input is performed over the last transformation + axis, as by `rfft`, then the transform over the remaining axes is + performed as by `fftn`. The order of the output is as for `rfft` for the + final transformation axis, and as for `fftn` for the remaining + transformation axes. + + See `fft` for details, definitions and conventions used. + + Examples + -------- + >>> import numpy as np + >>> a = np.ones((2, 2, 2)) + >>> np.fft.rfftn(a) + array([[[8.+0.j, 0.+0.j], # may vary + [0.+0.j, 0.+0.j]], + [[0.+0.j, 0.+0.j], + [0.+0.j, 0.+0.j]]]) + + >>> np.fft.rfftn(a, axes=(2, 0)) + array([[[4.+0.j, 0.+0.j], # may vary + [4.+0.j, 0.+0.j]], + [[0.+0.j, 0.+0.j], + [0.+0.j, 0.+0.j]]]) + + """ + a = asarray(a) + s, axes = _cook_nd_args(a, s, axes) + a = rfft(a, s[-1], axes[-1], norm, out=out) + for ii in range(len(axes) - 2, -1, -1): + a = fft(a, s[ii], axes[ii], norm, out=out) + return a + + +@array_function_dispatch(_fftn_dispatcher) +def rfft2(a, s=None, axes=(-2, -1), norm=None, out=None): + """ + Compute the 2-dimensional FFT of a real array. + + Parameters + ---------- + a : array + Input array, taken to be real. + s : sequence of ints, optional + Shape of the FFT. + + .. versionchanged:: 2.0 + + If it is ``-1``, the whole input is used (no padding/trimming). + + .. deprecated:: 2.0 + + If `s` is not ``None``, `axes` must not be ``None`` either. + + .. deprecated:: 2.0 + + `s` must contain only ``int`` s, not ``None`` values. ``None`` + values currently mean that the default value for ``n`` is used + in the corresponding 1-D transform, but this behaviour is + deprecated. + + axes : sequence of ints, optional + Axes over which to compute the FFT. Default: ``(-2, -1)``. + + .. deprecated:: 2.0 + + If `s` is specified, the corresponding `axes` to be transformed + must not be ``None``. + + norm : {"backward", "ortho", "forward"}, optional + Normalization mode (see `numpy.fft`). Default is "backward". + Indicates which direction of the forward/backward pair of transforms + is scaled and with what normalization factor. + + .. versionadded:: 1.20.0 + + The "backward", "forward" values were added. + + out : complex ndarray, optional + If provided, the result will be placed in this array. It should be + of the appropriate shape and dtype for the last inverse transform. + incompatible with passing in all but the trivial ``s``). + + .. versionadded:: 2.0.0 + + Returns + ------- + out : ndarray + The result of the real 2-D FFT. + + See Also + -------- + rfftn : Compute the N-dimensional discrete Fourier Transform for real + input. + + Notes + ----- + This is really just `rfftn` with different default behavior. + For more details see `rfftn`. + + Examples + -------- + >>> import numpy as np + >>> a = np.mgrid[:5, :5][0] + >>> np.fft.rfft2(a) + array([[ 50. +0.j , 0. +0.j , 0. +0.j ], + [-12.5+17.20477401j, 0. +0.j , 0. +0.j ], + [-12.5 +4.0614962j , 0. +0.j , 0. +0.j ], + [-12.5 -4.0614962j , 0. +0.j , 0. +0.j ], + [-12.5-17.20477401j, 0. +0.j , 0. +0.j ]]) + """ + return rfftn(a, s, axes, norm, out=out) + + +@array_function_dispatch(_fftn_dispatcher) +def irfftn(a, s=None, axes=None, norm=None, out=None): + """ + Computes the inverse of `rfftn`. + + This function computes the inverse of the N-dimensional discrete + Fourier Transform for real input over any number of axes in an + M-dimensional array by means of the Fast Fourier Transform (FFT). In + other words, ``irfftn(rfftn(a), a.shape) == a`` to within numerical + accuracy. (The ``a.shape`` is necessary like ``len(a)`` is for `irfft`, + and for the same reason.) + + The input should be ordered in the same way as is returned by `rfftn`, + i.e. as for `irfft` for the final transformation axis, and as for `ifftn` + along all the other axes. + + Parameters + ---------- + a : array_like + Input array. + s : sequence of ints, optional + Shape (length of each transformed axis) of the output + (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). `s` is also the + number of input points used along this axis, except for the last axis, + where ``s[-1]//2+1`` points of the input are used. + Along any axis, if the shape indicated by `s` is smaller than that of + the input, the input is cropped. If it is larger, the input is padded + with zeros. + + .. versionchanged:: 2.0 + + If it is ``-1``, the whole input is used (no padding/trimming). + + If `s` is not given, the shape of the input along the axes + specified by axes is used. Except for the last axis which is taken to + be ``2*(m-1)`` where ``m`` is the length of the input along that axis. + + .. deprecated:: 2.0 + + If `s` is not ``None``, `axes` must not be ``None`` either. + + .. deprecated:: 2.0 + + `s` must contain only ``int`` s, not ``None`` values. ``None`` + values currently mean that the default value for ``n`` is used + in the corresponding 1-D transform, but this behaviour is + deprecated. + + axes : sequence of ints, optional + Axes over which to compute the inverse FFT. If not given, the last + `len(s)` axes are used, or all axes if `s` is also not specified. + Repeated indices in `axes` means that the inverse transform over that + axis is performed multiple times. + + .. deprecated:: 2.0 + + If `s` is specified, the corresponding `axes` to be transformed + must be explicitly specified too. + + norm : {"backward", "ortho", "forward"}, optional + Normalization mode (see `numpy.fft`). Default is "backward". + Indicates which direction of the forward/backward pair of transforms + is scaled and with what normalization factor. + + .. versionadded:: 1.20.0 + + The "backward", "forward" values were added. + + out : ndarray, optional + If provided, the result will be placed in this array. It should be + of the appropriate shape and dtype for the last transformation. + + .. versionadded:: 2.0.0 + + Returns + ------- + out : ndarray + The truncated or zero-padded input, transformed along the axes + indicated by `axes`, or by a combination of `s` or `a`, + as explained in the parameters section above. + The length of each transformed axis is as given by the corresponding + element of `s`, or the length of the input in every axis except for the + last one if `s` is not given. In the final transformed axis the length + of the output when `s` is not given is ``2*(m-1)`` where ``m`` is the + length of the final transformed axis of the input. To get an odd + number of output points in the final axis, `s` must be specified. + + Raises + ------ + ValueError + If `s` and `axes` have different length. + IndexError + If an element of `axes` is larger than than the number of axes of `a`. + + See Also + -------- + rfftn : The forward n-dimensional FFT of real input, + of which `ifftn` is the inverse. + fft : The one-dimensional FFT, with definitions and conventions used. + irfft : The inverse of the one-dimensional FFT of real input. + irfft2 : The inverse of the two-dimensional FFT of real input. + + Notes + ----- + See `fft` for definitions and conventions used. + + See `rfft` for definitions and conventions used for real input. + + The correct interpretation of the hermitian input depends on the shape of + the original data, as given by `s`. This is because each input shape could + correspond to either an odd or even length signal. By default, `irfftn` + assumes an even output length which puts the last entry at the Nyquist + frequency; aliasing with its symmetric counterpart. When performing the + final complex to real transform, the last value is thus treated as purely + real. To avoid losing information, the correct shape of the real input + **must** be given. + + Examples + -------- + >>> import numpy as np + >>> a = np.zeros((3, 2, 2)) + >>> a[0, 0, 0] = 3 * 2 * 2 + >>> np.fft.irfftn(a) + array([[[1., 1.], + [1., 1.]], + [[1., 1.], + [1., 1.]], + [[1., 1.], + [1., 1.]]]) + + """ + a = asarray(a) + s, axes = _cook_nd_args(a, s, axes, invreal=1) + for ii in range(len(axes) - 1): + a = ifft(a, s[ii], axes[ii], norm) + a = irfft(a, s[-1], axes[-1], norm, out=out) + return a + + +@array_function_dispatch(_fftn_dispatcher) +def irfft2(a, s=None, axes=(-2, -1), norm=None, out=None): + """ + Computes the inverse of `rfft2`. + + Parameters + ---------- + a : array_like + The input array + s : sequence of ints, optional + Shape of the real output to the inverse FFT. + + .. versionchanged:: 2.0 + + If it is ``-1``, the whole input is used (no padding/trimming). + + .. deprecated:: 2.0 + + If `s` is not ``None``, `axes` must not be ``None`` either. + + .. deprecated:: 2.0 + + `s` must contain only ``int`` s, not ``None`` values. ``None`` + values currently mean that the default value for ``n`` is used + in the corresponding 1-D transform, but this behaviour is + deprecated. + + axes : sequence of ints, optional + The axes over which to compute the inverse fft. + Default: ``(-2, -1)``, the last two axes. + + .. deprecated:: 2.0 + + If `s` is specified, the corresponding `axes` to be transformed + must not be ``None``. + + norm : {"backward", "ortho", "forward"}, optional + Normalization mode (see `numpy.fft`). Default is "backward". + Indicates which direction of the forward/backward pair of transforms + is scaled and with what normalization factor. + + .. versionadded:: 1.20.0 + + The "backward", "forward" values were added. + + out : ndarray, optional + If provided, the result will be placed in this array. It should be + of the appropriate shape and dtype for the last transformation. + + .. versionadded:: 2.0.0 + + Returns + ------- + out : ndarray + The result of the inverse real 2-D FFT. + + See Also + -------- + rfft2 : The forward two-dimensional FFT of real input, + of which `irfft2` is the inverse. + rfft : The one-dimensional FFT for real input. + irfft : The inverse of the one-dimensional FFT of real input. + irfftn : Compute the inverse of the N-dimensional FFT of real input. + + Notes + ----- + This is really `irfftn` with different defaults. + For more details see `irfftn`. + + Examples + -------- + >>> import numpy as np + >>> a = np.mgrid[:5, :5][0] + >>> A = np.fft.rfft2(a) + >>> np.fft.irfft2(A, s=a.shape) + array([[0., 0., 0., 0., 0.], + [1., 1., 1., 1., 1.], + [2., 2., 2., 2., 2.], + [3., 3., 3., 3., 3.], + [4., 4., 4., 4., 4.]]) + """ + return irfftn(a, s, axes, norm, out=None) diff --git a/.venv/lib/python3.12/site-packages/numpy/fft/_pocketfft.pyi b/.venv/lib/python3.12/site-packages/numpy/fft/_pocketfft.pyi new file mode 100644 index 0000000..215cf14 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/fft/_pocketfft.pyi @@ -0,0 +1,138 @@ +from collections.abc import Sequence +from typing import Literal as L +from typing import TypeAlias + +from numpy import complex128, float64 +from numpy._typing import ArrayLike, NDArray, _ArrayLikeNumber_co + +__all__ = [ + "fft", + "ifft", + "rfft", + "irfft", + "hfft", + "ihfft", + "rfftn", + "irfftn", + "rfft2", + "irfft2", + "fft2", + "ifft2", + "fftn", + "ifftn", +] + +_NormKind: TypeAlias = L["backward", "ortho", "forward"] | None + +def fft( + a: ArrayLike, + n: int | None = ..., + axis: int = ..., + norm: _NormKind = ..., + out: NDArray[complex128] | None = ..., +) -> NDArray[complex128]: ... + +def ifft( + a: ArrayLike, + n: int | None = ..., + axis: int = ..., + norm: _NormKind = ..., + out: NDArray[complex128] | None = ..., +) -> NDArray[complex128]: ... + +def rfft( + a: ArrayLike, + n: int | None = ..., + axis: int = ..., + norm: _NormKind = ..., + out: NDArray[complex128] | None = ..., +) -> NDArray[complex128]: ... + +def irfft( + a: ArrayLike, + n: int | None = ..., + axis: int = ..., + norm: _NormKind = ..., + out: NDArray[float64] | None = ..., +) -> NDArray[float64]: ... + +# Input array must be compatible with `np.conjugate` +def hfft( + a: _ArrayLikeNumber_co, + n: int | None = ..., + axis: int = ..., + norm: _NormKind = ..., + out: NDArray[float64] | None = ..., +) -> NDArray[float64]: ... + +def ihfft( + a: ArrayLike, + n: int | None = ..., + axis: int = ..., + norm: _NormKind = ..., + out: NDArray[complex128] | None = ..., +) -> NDArray[complex128]: ... + +def fftn( + a: ArrayLike, + s: Sequence[int] | None = ..., + axes: Sequence[int] | None = ..., + norm: _NormKind = ..., + out: NDArray[complex128] | None = ..., +) -> NDArray[complex128]: ... + +def ifftn( + a: ArrayLike, + s: Sequence[int] | None = ..., + axes: Sequence[int] | None = ..., + norm: _NormKind = ..., + out: NDArray[complex128] | None = ..., +) -> NDArray[complex128]: ... + +def rfftn( + a: ArrayLike, + s: Sequence[int] | None = ..., + axes: Sequence[int] | None = ..., + norm: _NormKind = ..., + out: NDArray[complex128] | None = ..., +) -> NDArray[complex128]: ... + +def irfftn( + a: ArrayLike, + s: Sequence[int] | None = ..., + axes: Sequence[int] | None = ..., + norm: _NormKind = ..., + out: NDArray[float64] | None = ..., +) -> NDArray[float64]: ... + +def fft2( + a: ArrayLike, + s: Sequence[int] | None = ..., + axes: Sequence[int] | None = ..., + norm: _NormKind = ..., + out: NDArray[complex128] | None = ..., +) -> NDArray[complex128]: ... + +def ifft2( + a: ArrayLike, + s: Sequence[int] | None = ..., + axes: Sequence[int] | None = ..., + norm: _NormKind = ..., + out: NDArray[complex128] | None = ..., +) -> NDArray[complex128]: ... + +def rfft2( + a: ArrayLike, + s: Sequence[int] | None = ..., + axes: Sequence[int] | None = ..., + norm: _NormKind = ..., + out: NDArray[complex128] | None = ..., +) -> NDArray[complex128]: ... + +def irfft2( + a: ArrayLike, + s: Sequence[int] | None = ..., + axes: Sequence[int] | None = ..., + norm: _NormKind = ..., + out: NDArray[float64] | None = ..., +) -> NDArray[float64]: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/fft/_pocketfft_umath.cpython-312-x86_64-linux-gnu.so b/.venv/lib/python3.12/site-packages/numpy/fft/_pocketfft_umath.cpython-312-x86_64-linux-gnu.so new file mode 100755 index 0000000..df57850 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/fft/_pocketfft_umath.cpython-312-x86_64-linux-gnu.so differ diff --git a/.venv/lib/python3.12/site-packages/numpy/fft/helper.py b/.venv/lib/python3.12/site-packages/numpy/fft/helper.py new file mode 100644 index 0000000..08d5662 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/fft/helper.py @@ -0,0 +1,17 @@ +def __getattr__(attr_name): + import warnings + + from numpy.fft import _helper + ret = getattr(_helper, attr_name, None) + if ret is None: + raise AttributeError( + f"module 'numpy.fft.helper' has no attribute {attr_name}") + warnings.warn( + "The numpy.fft.helper has been made private and renamed to " + "numpy.fft._helper. All four functions exported by it (i.e. fftshift, " + "ifftshift, fftfreq, rfftfreq) are available from numpy.fft. " + f"Please use numpy.fft.{attr_name} instead.", + DeprecationWarning, + stacklevel=3 + ) + return ret diff --git a/.venv/lib/python3.12/site-packages/numpy/fft/helper.pyi b/.venv/lib/python3.12/site-packages/numpy/fft/helper.pyi new file mode 100644 index 0000000..887cbe7 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/fft/helper.pyi @@ -0,0 +1,22 @@ +from typing import Any +from typing import Literal as L + +from typing_extensions import deprecated + +import numpy as np +from numpy._typing import ArrayLike, NDArray, _ShapeLike + +from ._helper import integer_types as integer_types + +__all__ = ["fftfreq", "fftshift", "ifftshift", "rfftfreq"] + +### + +@deprecated("Please use `numpy.fft.fftshift` instead.") +def fftshift(x: ArrayLike, axes: _ShapeLike | None = None) -> NDArray[Any]: ... +@deprecated("Please use `numpy.fft.ifftshift` instead.") +def ifftshift(x: ArrayLike, axes: _ShapeLike | None = None) -> NDArray[Any]: ... +@deprecated("Please use `numpy.fft.fftfreq` instead.") +def fftfreq(n: int | np.integer, d: ArrayLike = 1.0, device: L["cpu"] | None = None) -> NDArray[Any]: ... +@deprecated("Please use `numpy.fft.rfftfreq` instead.") +def rfftfreq(n: int | np.integer, d: ArrayLike = 1.0, device: L["cpu"] | None = None) -> NDArray[Any]: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/fft/tests/__init__.py b/.venv/lib/python3.12/site-packages/numpy/fft/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/.venv/lib/python3.12/site-packages/numpy/fft/tests/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/fft/tests/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..f2fb847 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/fft/tests/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/fft/tests/__pycache__/test_helper.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/fft/tests/__pycache__/test_helper.cpython-312.pyc new file mode 100644 index 0000000..a1e4182 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/fft/tests/__pycache__/test_helper.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/fft/tests/__pycache__/test_pocketfft.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/fft/tests/__pycache__/test_pocketfft.cpython-312.pyc new file mode 100644 index 0000000..853bb4e Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/fft/tests/__pycache__/test_pocketfft.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/fft/tests/test_helper.py b/.venv/lib/python3.12/site-packages/numpy/fft/tests/test_helper.py new file mode 100644 index 0000000..c02a736 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/fft/tests/test_helper.py @@ -0,0 +1,167 @@ +"""Test functions for fftpack.helper module + +Copied from fftpack.helper by Pearu Peterson, October 2005 + +""" +import numpy as np +from numpy import fft, pi +from numpy.testing import assert_array_almost_equal + + +class TestFFTShift: + + def test_definition(self): + x = [0, 1, 2, 3, 4, -4, -3, -2, -1] + y = [-4, -3, -2, -1, 0, 1, 2, 3, 4] + assert_array_almost_equal(fft.fftshift(x), y) + assert_array_almost_equal(fft.ifftshift(y), x) + x = [0, 1, 2, 3, 4, -5, -4, -3, -2, -1] + y = [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4] + assert_array_almost_equal(fft.fftshift(x), y) + assert_array_almost_equal(fft.ifftshift(y), x) + + def test_inverse(self): + for n in [1, 4, 9, 100, 211]: + x = np.random.random((n,)) + assert_array_almost_equal(fft.ifftshift(fft.fftshift(x)), x) + + def test_axes_keyword(self): + freqs = [[0, 1, 2], [3, 4, -4], [-3, -2, -1]] + shifted = [[-1, -3, -2], [2, 0, 1], [-4, 3, 4]] + assert_array_almost_equal(fft.fftshift(freqs, axes=(0, 1)), shifted) + assert_array_almost_equal(fft.fftshift(freqs, axes=0), + fft.fftshift(freqs, axes=(0,))) + assert_array_almost_equal(fft.ifftshift(shifted, axes=(0, 1)), freqs) + assert_array_almost_equal(fft.ifftshift(shifted, axes=0), + fft.ifftshift(shifted, axes=(0,))) + + assert_array_almost_equal(fft.fftshift(freqs), shifted) + assert_array_almost_equal(fft.ifftshift(shifted), freqs) + + def test_uneven_dims(self): + """ Test 2D input, which has uneven dimension sizes """ + freqs = [ + [0, 1], + [2, 3], + [4, 5] + ] + + # shift in dimension 0 + shift_dim0 = [ + [4, 5], + [0, 1], + [2, 3] + ] + assert_array_almost_equal(fft.fftshift(freqs, axes=0), shift_dim0) + assert_array_almost_equal(fft.ifftshift(shift_dim0, axes=0), freqs) + assert_array_almost_equal(fft.fftshift(freqs, axes=(0,)), shift_dim0) + assert_array_almost_equal(fft.ifftshift(shift_dim0, axes=[0]), freqs) + + # shift in dimension 1 + shift_dim1 = [ + [1, 0], + [3, 2], + [5, 4] + ] + assert_array_almost_equal(fft.fftshift(freqs, axes=1), shift_dim1) + assert_array_almost_equal(fft.ifftshift(shift_dim1, axes=1), freqs) + + # shift in both dimensions + shift_dim_both = [ + [5, 4], + [1, 0], + [3, 2] + ] + assert_array_almost_equal(fft.fftshift(freqs, axes=(0, 1)), shift_dim_both) + assert_array_almost_equal(fft.ifftshift(shift_dim_both, axes=(0, 1)), freqs) + assert_array_almost_equal(fft.fftshift(freqs, axes=[0, 1]), shift_dim_both) + assert_array_almost_equal(fft.ifftshift(shift_dim_both, axes=[0, 1]), freqs) + + # axes=None (default) shift in all dimensions + assert_array_almost_equal(fft.fftshift(freqs, axes=None), shift_dim_both) + assert_array_almost_equal(fft.ifftshift(shift_dim_both, axes=None), freqs) + assert_array_almost_equal(fft.fftshift(freqs), shift_dim_both) + assert_array_almost_equal(fft.ifftshift(shift_dim_both), freqs) + + def test_equal_to_original(self): + """ Test the new (>=v1.15) and old implementations are equal (see #10073) """ + from numpy._core import arange, asarray, concatenate, take + + def original_fftshift(x, axes=None): + """ How fftshift was implemented in v1.14""" + tmp = asarray(x) + ndim = tmp.ndim + if axes is None: + axes = list(range(ndim)) + elif isinstance(axes, int): + axes = (axes,) + y = tmp + for k in axes: + n = tmp.shape[k] + p2 = (n + 1) // 2 + mylist = concatenate((arange(p2, n), arange(p2))) + y = take(y, mylist, k) + return y + + def original_ifftshift(x, axes=None): + """ How ifftshift was implemented in v1.14 """ + tmp = asarray(x) + ndim = tmp.ndim + if axes is None: + axes = list(range(ndim)) + elif isinstance(axes, int): + axes = (axes,) + y = tmp + for k in axes: + n = tmp.shape[k] + p2 = n - (n + 1) // 2 + mylist = concatenate((arange(p2, n), arange(p2))) + y = take(y, mylist, k) + return y + + # create possible 2d array combinations and try all possible keywords + # compare output to original functions + for i in range(16): + for j in range(16): + for axes_keyword in [0, 1, None, (0,), (0, 1)]: + inp = np.random.rand(i, j) + + assert_array_almost_equal(fft.fftshift(inp, axes_keyword), + original_fftshift(inp, axes_keyword)) + + assert_array_almost_equal(fft.ifftshift(inp, axes_keyword), + original_ifftshift(inp, axes_keyword)) + + +class TestFFTFreq: + + def test_definition(self): + x = [0, 1, 2, 3, 4, -4, -3, -2, -1] + assert_array_almost_equal(9 * fft.fftfreq(9), x) + assert_array_almost_equal(9 * pi * fft.fftfreq(9, pi), x) + x = [0, 1, 2, 3, 4, -5, -4, -3, -2, -1] + assert_array_almost_equal(10 * fft.fftfreq(10), x) + assert_array_almost_equal(10 * pi * fft.fftfreq(10, pi), x) + + +class TestRFFTFreq: + + def test_definition(self): + x = [0, 1, 2, 3, 4] + assert_array_almost_equal(9 * fft.rfftfreq(9), x) + assert_array_almost_equal(9 * pi * fft.rfftfreq(9, pi), x) + x = [0, 1, 2, 3, 4, 5] + assert_array_almost_equal(10 * fft.rfftfreq(10), x) + assert_array_almost_equal(10 * pi * fft.rfftfreq(10, pi), x) + + +class TestIRFFTN: + + def test_not_last_axis_success(self): + ar, ai = np.random.random((2, 16, 8, 32)) + a = ar + 1j * ai + + axes = (-2,) + + # Should not raise error + fft.irfftn(a, axes=axes) diff --git a/.venv/lib/python3.12/site-packages/numpy/fft/tests/test_pocketfft.py b/.venv/lib/python3.12/site-packages/numpy/fft/tests/test_pocketfft.py new file mode 100644 index 0000000..0211818 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/fft/tests/test_pocketfft.py @@ -0,0 +1,589 @@ +import queue +import threading + +import pytest + +import numpy as np +from numpy.random import random +from numpy.testing import IS_WASM, assert_allclose, assert_array_equal, assert_raises + + +def fft1(x): + L = len(x) + phase = -2j * np.pi * (np.arange(L) / L) + phase = np.arange(L).reshape(-1, 1) * phase + return np.sum(x * np.exp(phase), axis=1) + + +class TestFFTShift: + + def test_fft_n(self): + assert_raises(ValueError, np.fft.fft, [1, 2, 3], 0) + + +class TestFFT1D: + + def test_identity(self): + maxlen = 512 + x = random(maxlen) + 1j * random(maxlen) + xr = random(maxlen) + for i in range(1, maxlen): + assert_allclose(np.fft.ifft(np.fft.fft(x[0:i])), x[0:i], + atol=1e-12) + assert_allclose(np.fft.irfft(np.fft.rfft(xr[0:i]), i), + xr[0:i], atol=1e-12) + + @pytest.mark.parametrize("dtype", [np.single, np.double, np.longdouble]) + def test_identity_long_short(self, dtype): + # Test with explicitly given number of points, both for n + # smaller and for n larger than the input size. + maxlen = 16 + atol = 5 * np.spacing(np.array(1., dtype=dtype)) + x = random(maxlen).astype(dtype) + 1j * random(maxlen).astype(dtype) + xx = np.concatenate([x, np.zeros_like(x)]) + xr = random(maxlen).astype(dtype) + xxr = np.concatenate([xr, np.zeros_like(xr)]) + for i in range(1, maxlen * 2): + check_c = np.fft.ifft(np.fft.fft(x, n=i), n=i) + assert check_c.real.dtype == dtype + assert_allclose(check_c, xx[0:i], atol=atol, rtol=0) + check_r = np.fft.irfft(np.fft.rfft(xr, n=i), n=i) + assert check_r.dtype == dtype + assert_allclose(check_r, xxr[0:i], atol=atol, rtol=0) + + @pytest.mark.parametrize("dtype", [np.single, np.double, np.longdouble]) + def test_identity_long_short_reversed(self, dtype): + # Also test explicitly given number of points in reversed order. + maxlen = 16 + atol = 5 * np.spacing(np.array(1., dtype=dtype)) + x = random(maxlen).astype(dtype) + 1j * random(maxlen).astype(dtype) + xx = np.concatenate([x, np.zeros_like(x)]) + for i in range(1, maxlen * 2): + check_via_c = np.fft.fft(np.fft.ifft(x, n=i), n=i) + assert check_via_c.dtype == x.dtype + assert_allclose(check_via_c, xx[0:i], atol=atol, rtol=0) + # For irfft, we can neither recover the imaginary part of + # the first element, nor the imaginary part of the last + # element if npts is even. So, set to 0 for the comparison. + y = x.copy() + n = i // 2 + 1 + y.imag[0] = 0 + if i % 2 == 0: + y.imag[n - 1:] = 0 + yy = np.concatenate([y, np.zeros_like(y)]) + check_via_r = np.fft.rfft(np.fft.irfft(x, n=i), n=i) + assert check_via_r.dtype == x.dtype + assert_allclose(check_via_r, yy[0:n], atol=atol, rtol=0) + + def test_fft(self): + x = random(30) + 1j * random(30) + assert_allclose(fft1(x), np.fft.fft(x), atol=1e-6) + assert_allclose(fft1(x), np.fft.fft(x, norm="backward"), atol=1e-6) + assert_allclose(fft1(x) / np.sqrt(30), + np.fft.fft(x, norm="ortho"), atol=1e-6) + assert_allclose(fft1(x) / 30., + np.fft.fft(x, norm="forward"), atol=1e-6) + + @pytest.mark.parametrize("axis", (0, 1)) + @pytest.mark.parametrize("dtype", (complex, float)) + @pytest.mark.parametrize("transpose", (True, False)) + def test_fft_out_argument(self, dtype, transpose, axis): + def zeros_like(x): + if transpose: + return np.zeros_like(x.T).T + else: + return np.zeros_like(x) + + # tests below only test the out parameter + if dtype is complex: + y = random((10, 20)) + 1j * random((10, 20)) + fft, ifft = np.fft.fft, np.fft.ifft + else: + y = random((10, 20)) + fft, ifft = np.fft.rfft, np.fft.irfft + + expected = fft(y, axis=axis) + out = zeros_like(expected) + result = fft(y, out=out, axis=axis) + assert result is out + assert_array_equal(result, expected) + + expected2 = ifft(expected, axis=axis) + out2 = out if dtype is complex else zeros_like(expected2) + result2 = ifft(out, out=out2, axis=axis) + assert result2 is out2 + assert_array_equal(result2, expected2) + + @pytest.mark.parametrize("axis", [0, 1]) + def test_fft_inplace_out(self, axis): + # Test some weirder in-place combinations + y = random((20, 20)) + 1j * random((20, 20)) + # Fully in-place. + y1 = y.copy() + expected1 = np.fft.fft(y1, axis=axis) + result1 = np.fft.fft(y1, axis=axis, out=y1) + assert result1 is y1 + assert_array_equal(result1, expected1) + # In-place of part of the array; rest should be unchanged. + y2 = y.copy() + out2 = y2[:10] if axis == 0 else y2[:, :10] + expected2 = np.fft.fft(y2, n=10, axis=axis) + result2 = np.fft.fft(y2, n=10, axis=axis, out=out2) + assert result2 is out2 + assert_array_equal(result2, expected2) + if axis == 0: + assert_array_equal(y2[10:], y[10:]) + else: + assert_array_equal(y2[:, 10:], y[:, 10:]) + # In-place of another part of the array. + y3 = y.copy() + y3_sel = y3[5:] if axis == 0 else y3[:, 5:] + out3 = y3[5:15] if axis == 0 else y3[:, 5:15] + expected3 = np.fft.fft(y3_sel, n=10, axis=axis) + result3 = np.fft.fft(y3_sel, n=10, axis=axis, out=out3) + assert result3 is out3 + assert_array_equal(result3, expected3) + if axis == 0: + assert_array_equal(y3[:5], y[:5]) + assert_array_equal(y3[15:], y[15:]) + else: + assert_array_equal(y3[:, :5], y[:, :5]) + assert_array_equal(y3[:, 15:], y[:, 15:]) + # In-place with n > nin; rest should be unchanged. + y4 = y.copy() + y4_sel = y4[:10] if axis == 0 else y4[:, :10] + out4 = y4[:15] if axis == 0 else y4[:, :15] + expected4 = np.fft.fft(y4_sel, n=15, axis=axis) + result4 = np.fft.fft(y4_sel, n=15, axis=axis, out=out4) + assert result4 is out4 + assert_array_equal(result4, expected4) + if axis == 0: + assert_array_equal(y4[15:], y[15:]) + else: + assert_array_equal(y4[:, 15:], y[:, 15:]) + # Overwrite in a transpose. + y5 = y.copy() + out5 = y5.T + result5 = np.fft.fft(y5, axis=axis, out=out5) + assert result5 is out5 + assert_array_equal(result5, expected1) + # Reverse strides. + y6 = y.copy() + out6 = y6[::-1] if axis == 0 else y6[:, ::-1] + result6 = np.fft.fft(y6, axis=axis, out=out6) + assert result6 is out6 + assert_array_equal(result6, expected1) + + def test_fft_bad_out(self): + x = np.arange(30.) + with pytest.raises(TypeError, match="must be of ArrayType"): + np.fft.fft(x, out="") + with pytest.raises(ValueError, match="has wrong shape"): + np.fft.fft(x, out=np.zeros_like(x).reshape(5, -1)) + with pytest.raises(TypeError, match="Cannot cast"): + np.fft.fft(x, out=np.zeros_like(x, dtype=float)) + + @pytest.mark.parametrize('norm', (None, 'backward', 'ortho', 'forward')) + def test_ifft(self, norm): + x = random(30) + 1j * random(30) + assert_allclose( + x, np.fft.ifft(np.fft.fft(x, norm=norm), norm=norm), + atol=1e-6) + # Ensure we get the correct error message + with pytest.raises(ValueError, + match='Invalid number of FFT data points'): + np.fft.ifft([], norm=norm) + + def test_fft2(self): + x = random((30, 20)) + 1j * random((30, 20)) + assert_allclose(np.fft.fft(np.fft.fft(x, axis=1), axis=0), + np.fft.fft2(x), atol=1e-6) + assert_allclose(np.fft.fft2(x), + np.fft.fft2(x, norm="backward"), atol=1e-6) + assert_allclose(np.fft.fft2(x) / np.sqrt(30 * 20), + np.fft.fft2(x, norm="ortho"), atol=1e-6) + assert_allclose(np.fft.fft2(x) / (30. * 20.), + np.fft.fft2(x, norm="forward"), atol=1e-6) + + def test_ifft2(self): + x = random((30, 20)) + 1j * random((30, 20)) + assert_allclose(np.fft.ifft(np.fft.ifft(x, axis=1), axis=0), + np.fft.ifft2(x), atol=1e-6) + assert_allclose(np.fft.ifft2(x), + np.fft.ifft2(x, norm="backward"), atol=1e-6) + assert_allclose(np.fft.ifft2(x) * np.sqrt(30 * 20), + np.fft.ifft2(x, norm="ortho"), atol=1e-6) + assert_allclose(np.fft.ifft2(x) * (30. * 20.), + np.fft.ifft2(x, norm="forward"), atol=1e-6) + + def test_fftn(self): + x = random((30, 20, 10)) + 1j * random((30, 20, 10)) + assert_allclose( + np.fft.fft(np.fft.fft(np.fft.fft(x, axis=2), axis=1), axis=0), + np.fft.fftn(x), atol=1e-6) + assert_allclose(np.fft.fftn(x), + np.fft.fftn(x, norm="backward"), atol=1e-6) + assert_allclose(np.fft.fftn(x) / np.sqrt(30 * 20 * 10), + np.fft.fftn(x, norm="ortho"), atol=1e-6) + assert_allclose(np.fft.fftn(x) / (30. * 20. * 10.), + np.fft.fftn(x, norm="forward"), atol=1e-6) + + def test_ifftn(self): + x = random((30, 20, 10)) + 1j * random((30, 20, 10)) + assert_allclose( + np.fft.ifft(np.fft.ifft(np.fft.ifft(x, axis=2), axis=1), axis=0), + np.fft.ifftn(x), atol=1e-6) + assert_allclose(np.fft.ifftn(x), + np.fft.ifftn(x, norm="backward"), atol=1e-6) + assert_allclose(np.fft.ifftn(x) * np.sqrt(30 * 20 * 10), + np.fft.ifftn(x, norm="ortho"), atol=1e-6) + assert_allclose(np.fft.ifftn(x) * (30. * 20. * 10.), + np.fft.ifftn(x, norm="forward"), atol=1e-6) + + def test_rfft(self): + x = random(30) + for n in [x.size, 2 * x.size]: + for norm in [None, 'backward', 'ortho', 'forward']: + assert_allclose( + np.fft.fft(x, n=n, norm=norm)[:(n // 2 + 1)], + np.fft.rfft(x, n=n, norm=norm), atol=1e-6) + assert_allclose( + np.fft.rfft(x, n=n), + np.fft.rfft(x, n=n, norm="backward"), atol=1e-6) + assert_allclose( + np.fft.rfft(x, n=n) / np.sqrt(n), + np.fft.rfft(x, n=n, norm="ortho"), atol=1e-6) + assert_allclose( + np.fft.rfft(x, n=n) / n, + np.fft.rfft(x, n=n, norm="forward"), atol=1e-6) + + def test_rfft_even(self): + x = np.arange(8) + n = 4 + y = np.fft.rfft(x, n) + assert_allclose(y, np.fft.fft(x[:n])[:n // 2 + 1], rtol=1e-14) + + def test_rfft_odd(self): + x = np.array([1, 0, 2, 3, -3]) + y = np.fft.rfft(x) + assert_allclose(y, np.fft.fft(x)[:3], rtol=1e-14) + + def test_irfft(self): + x = random(30) + assert_allclose(x, np.fft.irfft(np.fft.rfft(x)), atol=1e-6) + assert_allclose(x, np.fft.irfft(np.fft.rfft(x, norm="backward"), + norm="backward"), atol=1e-6) + assert_allclose(x, np.fft.irfft(np.fft.rfft(x, norm="ortho"), + norm="ortho"), atol=1e-6) + assert_allclose(x, np.fft.irfft(np.fft.rfft(x, norm="forward"), + norm="forward"), atol=1e-6) + + def test_rfft2(self): + x = random((30, 20)) + assert_allclose(np.fft.fft2(x)[:, :11], np.fft.rfft2(x), atol=1e-6) + assert_allclose(np.fft.rfft2(x), + np.fft.rfft2(x, norm="backward"), atol=1e-6) + assert_allclose(np.fft.rfft2(x) / np.sqrt(30 * 20), + np.fft.rfft2(x, norm="ortho"), atol=1e-6) + assert_allclose(np.fft.rfft2(x) / (30. * 20.), + np.fft.rfft2(x, norm="forward"), atol=1e-6) + + def test_irfft2(self): + x = random((30, 20)) + assert_allclose(x, np.fft.irfft2(np.fft.rfft2(x)), atol=1e-6) + assert_allclose(x, np.fft.irfft2(np.fft.rfft2(x, norm="backward"), + norm="backward"), atol=1e-6) + assert_allclose(x, np.fft.irfft2(np.fft.rfft2(x, norm="ortho"), + norm="ortho"), atol=1e-6) + assert_allclose(x, np.fft.irfft2(np.fft.rfft2(x, norm="forward"), + norm="forward"), atol=1e-6) + + def test_rfftn(self): + x = random((30, 20, 10)) + assert_allclose(np.fft.fftn(x)[:, :, :6], np.fft.rfftn(x), atol=1e-6) + assert_allclose(np.fft.rfftn(x), + np.fft.rfftn(x, norm="backward"), atol=1e-6) + assert_allclose(np.fft.rfftn(x) / np.sqrt(30 * 20 * 10), + np.fft.rfftn(x, norm="ortho"), atol=1e-6) + assert_allclose(np.fft.rfftn(x) / (30. * 20. * 10.), + np.fft.rfftn(x, norm="forward"), atol=1e-6) + # Regression test for gh-27159 + x = np.ones((2, 3)) + result = np.fft.rfftn(x, axes=(0, 0, 1), s=(10, 20, 40)) + assert result.shape == (10, 21) + expected = np.fft.fft(np.fft.fft(np.fft.rfft(x, axis=1, n=40), + axis=0, n=20), axis=0, n=10) + assert expected.shape == (10, 21) + assert_allclose(result, expected, atol=1e-6) + + def test_irfftn(self): + x = random((30, 20, 10)) + assert_allclose(x, np.fft.irfftn(np.fft.rfftn(x)), atol=1e-6) + assert_allclose(x, np.fft.irfftn(np.fft.rfftn(x, norm="backward"), + norm="backward"), atol=1e-6) + assert_allclose(x, np.fft.irfftn(np.fft.rfftn(x, norm="ortho"), + norm="ortho"), atol=1e-6) + assert_allclose(x, np.fft.irfftn(np.fft.rfftn(x, norm="forward"), + norm="forward"), atol=1e-6) + + def test_hfft(self): + x = random(14) + 1j * random(14) + x_herm = np.concatenate((random(1), x, random(1))) + x = np.concatenate((x_herm, x[::-1].conj())) + assert_allclose(np.fft.fft(x), np.fft.hfft(x_herm), atol=1e-6) + assert_allclose(np.fft.hfft(x_herm), + np.fft.hfft(x_herm, norm="backward"), atol=1e-6) + assert_allclose(np.fft.hfft(x_herm) / np.sqrt(30), + np.fft.hfft(x_herm, norm="ortho"), atol=1e-6) + assert_allclose(np.fft.hfft(x_herm) / 30., + np.fft.hfft(x_herm, norm="forward"), atol=1e-6) + + def test_ihfft(self): + x = random(14) + 1j * random(14) + x_herm = np.concatenate((random(1), x, random(1))) + x = np.concatenate((x_herm, x[::-1].conj())) + assert_allclose(x_herm, np.fft.ihfft(np.fft.hfft(x_herm)), atol=1e-6) + assert_allclose(x_herm, np.fft.ihfft(np.fft.hfft(x_herm, + norm="backward"), norm="backward"), atol=1e-6) + assert_allclose(x_herm, np.fft.ihfft(np.fft.hfft(x_herm, + norm="ortho"), norm="ortho"), atol=1e-6) + assert_allclose(x_herm, np.fft.ihfft(np.fft.hfft(x_herm, + norm="forward"), norm="forward"), atol=1e-6) + + @pytest.mark.parametrize("op", [np.fft.fftn, np.fft.ifftn, + np.fft.rfftn, np.fft.irfftn]) + def test_axes(self, op): + x = random((30, 20, 10)) + axes = [(0, 1, 2), (0, 2, 1), (1, 0, 2), (1, 2, 0), (2, 0, 1), (2, 1, 0)] + for a in axes: + op_tr = op(np.transpose(x, a)) + tr_op = np.transpose(op(x, axes=a), a) + assert_allclose(op_tr, tr_op, atol=1e-6) + + @pytest.mark.parametrize("op", [np.fft.fftn, np.fft.ifftn, + np.fft.fft2, np.fft.ifft2]) + def test_s_negative_1(self, op): + x = np.arange(100).reshape(10, 10) + # should use the whole input array along the first axis + assert op(x, s=(-1, 5), axes=(0, 1)).shape == (10, 5) + + @pytest.mark.parametrize("op", [np.fft.fftn, np.fft.ifftn, + np.fft.rfftn, np.fft.irfftn]) + def test_s_axes_none(self, op): + x = np.arange(100).reshape(10, 10) + with pytest.warns(match='`axes` should not be `None` if `s`'): + op(x, s=(-1, 5)) + + @pytest.mark.parametrize("op", [np.fft.fft2, np.fft.ifft2]) + def test_s_axes_none_2D(self, op): + x = np.arange(100).reshape(10, 10) + with pytest.warns(match='`axes` should not be `None` if `s`'): + op(x, s=(-1, 5), axes=None) + + @pytest.mark.parametrize("op", [np.fft.fftn, np.fft.ifftn, + np.fft.rfftn, np.fft.irfftn, + np.fft.fft2, np.fft.ifft2]) + def test_s_contains_none(self, op): + x = random((30, 20, 10)) + with pytest.warns(match='array containing `None` values to `s`'): + op(x, s=(10, None, 10), axes=(0, 1, 2)) + + def test_all_1d_norm_preserving(self): + # verify that round-trip transforms are norm-preserving + x = random(30) + x_norm = np.linalg.norm(x) + n = x.size * 2 + func_pairs = [(np.fft.fft, np.fft.ifft), + (np.fft.rfft, np.fft.irfft), + # hfft: order so the first function takes x.size samples + # (necessary for comparison to x_norm above) + (np.fft.ihfft, np.fft.hfft), + ] + for forw, back in func_pairs: + for n in [x.size, 2 * x.size]: + for norm in [None, 'backward', 'ortho', 'forward']: + tmp = forw(x, n=n, norm=norm) + tmp = back(tmp, n=n, norm=norm) + assert_allclose(x_norm, + np.linalg.norm(tmp), atol=1e-6) + + @pytest.mark.parametrize("axes", [(0, 1), (0, 2), None]) + @pytest.mark.parametrize("dtype", (complex, float)) + @pytest.mark.parametrize("transpose", (True, False)) + def test_fftn_out_argument(self, dtype, transpose, axes): + def zeros_like(x): + if transpose: + return np.zeros_like(x.T).T + else: + return np.zeros_like(x) + + # tests below only test the out parameter + if dtype is complex: + x = random((10, 5, 6)) + 1j * random((10, 5, 6)) + fft, ifft = np.fft.fftn, np.fft.ifftn + else: + x = random((10, 5, 6)) + fft, ifft = np.fft.rfftn, np.fft.irfftn + + expected = fft(x, axes=axes) + out = zeros_like(expected) + result = fft(x, out=out, axes=axes) + assert result is out + assert_array_equal(result, expected) + + expected2 = ifft(expected, axes=axes) + out2 = out if dtype is complex else zeros_like(expected2) + result2 = ifft(out, out=out2, axes=axes) + assert result2 is out2 + assert_array_equal(result2, expected2) + + @pytest.mark.parametrize("fft", [np.fft.fftn, np.fft.ifftn, np.fft.rfftn]) + def test_fftn_out_and_s_interaction(self, fft): + # With s, shape varies, so generally one cannot pass in out. + if fft is np.fft.rfftn: + x = random((10, 5, 6)) + else: + x = random((10, 5, 6)) + 1j * random((10, 5, 6)) + with pytest.raises(ValueError, match="has wrong shape"): + fft(x, out=np.zeros_like(x), s=(3, 3, 3), axes=(0, 1, 2)) + # Except on the first axis done (which is the last of axes). + s = (10, 5, 5) + expected = fft(x, s=s, axes=(0, 1, 2)) + out = np.zeros_like(expected) + result = fft(x, s=s, axes=(0, 1, 2), out=out) + assert result is out + assert_array_equal(result, expected) + + @pytest.mark.parametrize("s", [(9, 5, 5), (3, 3, 3)]) + def test_irfftn_out_and_s_interaction(self, s): + # Since for irfftn, the output is real and thus cannot be used for + # intermediate steps, it should always work. + x = random((9, 5, 6, 2)) + 1j * random((9, 5, 6, 2)) + expected = np.fft.irfftn(x, s=s, axes=(0, 1, 2)) + out = np.zeros_like(expected) + result = np.fft.irfftn(x, s=s, axes=(0, 1, 2), out=out) + assert result is out + assert_array_equal(result, expected) + + +@pytest.mark.parametrize( + "dtype", + [np.float32, np.float64, np.complex64, np.complex128]) +@pytest.mark.parametrize("order", ["F", 'non-contiguous']) +@pytest.mark.parametrize( + "fft", + [np.fft.fft, np.fft.fft2, np.fft.fftn, + np.fft.ifft, np.fft.ifft2, np.fft.ifftn]) +def test_fft_with_order(dtype, order, fft): + # Check that FFT/IFFT produces identical results for C, Fortran and + # non contiguous arrays + rng = np.random.RandomState(42) + X = rng.rand(8, 7, 13).astype(dtype, copy=False) + # See discussion in pull/14178 + _tol = 8.0 * np.sqrt(np.log2(X.size)) * np.finfo(X.dtype).eps + if order == 'F': + Y = np.asfortranarray(X) + else: + # Make a non contiguous array + Y = X[::-1] + X = np.ascontiguousarray(X[::-1]) + + if fft.__name__.endswith('fft'): + for axis in range(3): + X_res = fft(X, axis=axis) + Y_res = fft(Y, axis=axis) + assert_allclose(X_res, Y_res, atol=_tol, rtol=_tol) + elif fft.__name__.endswith(('fft2', 'fftn')): + axes = [(0, 1), (1, 2), (0, 2)] + if fft.__name__.endswith('fftn'): + axes.extend([(0,), (1,), (2,), None]) + for ax in axes: + X_res = fft(X, axes=ax) + Y_res = fft(Y, axes=ax) + assert_allclose(X_res, Y_res, atol=_tol, rtol=_tol) + else: + raise ValueError + + +@pytest.mark.parametrize("order", ["F", "C"]) +@pytest.mark.parametrize("n", [None, 7, 12]) +def test_fft_output_order(order, n): + rng = np.random.RandomState(42) + x = rng.rand(10) + x = np.asarray(x, dtype=np.complex64, order=order) + res = np.fft.fft(x, n=n) + assert res.flags.c_contiguous == x.flags.c_contiguous + assert res.flags.f_contiguous == x.flags.f_contiguous + +@pytest.mark.skipif(IS_WASM, reason="Cannot start thread") +class TestFFTThreadSafe: + threads = 16 + input_shape = (800, 200) + + def _test_mtsame(self, func, *args): + def worker(args, q): + q.put(func(*args)) + + q = queue.Queue() + expected = func(*args) + + # Spin off a bunch of threads to call the same function simultaneously + t = [threading.Thread(target=worker, args=(args, q)) + for i in range(self.threads)] + [x.start() for x in t] + + [x.join() for x in t] + # Make sure all threads returned the correct value + for i in range(self.threads): + assert_array_equal(q.get(timeout=5), expected, + 'Function returned wrong value in multithreaded context') + + def test_fft(self): + a = np.ones(self.input_shape) * 1 + 0j + self._test_mtsame(np.fft.fft, a) + + def test_ifft(self): + a = np.ones(self.input_shape) * 1 + 0j + self._test_mtsame(np.fft.ifft, a) + + def test_rfft(self): + a = np.ones(self.input_shape) + self._test_mtsame(np.fft.rfft, a) + + def test_irfft(self): + a = np.ones(self.input_shape) * 1 + 0j + self._test_mtsame(np.fft.irfft, a) + + +def test_irfft_with_n_1_regression(): + # Regression test for gh-25661 + x = np.arange(10) + np.fft.irfft(x, n=1) + np.fft.hfft(x, n=1) + np.fft.irfft(np.array([0], complex), n=10) + + +def test_irfft_with_n_large_regression(): + # Regression test for gh-25679 + x = np.arange(5) * (1 + 1j) + result = np.fft.hfft(x, n=10) + expected = np.array([20., 9.91628173, -11.8819096, 7.1048486, + -6.62459848, 4., -3.37540152, -0.16057669, + 1.8819096, -20.86055364]) + assert_allclose(result, expected) + + +@pytest.mark.parametrize("fft", [ + np.fft.fft, np.fft.ifft, np.fft.rfft, np.fft.irfft +]) +@pytest.mark.parametrize("data", [ + np.array([False, True, False]), + np.arange(10, dtype=np.uint8), + np.arange(5, dtype=np.int16), +]) +def test_fft_with_integer_or_bool_input(data, fft): + # Regression test for gh-25819 + result = fft(data) + float_data = data.astype(np.result_type(data, 1.)) + expected = fft(float_data) + assert_array_equal(result, expected) diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/__init__.py b/.venv/lib/python3.12/site-packages/numpy/lib/__init__.py new file mode 100644 index 0000000..a248d04 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/lib/__init__.py @@ -0,0 +1,97 @@ +""" +``numpy.lib`` is mostly a space for implementing functions that don't +belong in core or in another NumPy submodule with a clear purpose +(e.g. ``random``, ``fft``, ``linalg``, ``ma``). + +``numpy.lib``'s private submodules contain basic functions that are used by +other public modules and are useful to have in the main name-space. + +""" + +# Public submodules +# Note: recfunctions is public, but not imported +from numpy._core._multiarray_umath import add_docstring, tracemalloc_domain +from numpy._core.function_base import add_newdoc + +# Private submodules +# load module names. See https://github.com/networkx/networkx/issues/5838 +from . import ( + _arraypad_impl, + _arraysetops_impl, + _arrayterator_impl, + _function_base_impl, + _histograms_impl, + _index_tricks_impl, + _nanfunctions_impl, + _npyio_impl, + _polynomial_impl, + _shape_base_impl, + _stride_tricks_impl, + _twodim_base_impl, + _type_check_impl, + _ufunclike_impl, + _utils_impl, + _version, + array_utils, + format, + introspect, + mixins, + npyio, + scimath, + stride_tricks, +) + +# numpy.lib namespace members +from ._arrayterator_impl import Arrayterator +from ._version import NumpyVersion + +__all__ = [ + "Arrayterator", "add_docstring", "add_newdoc", "array_utils", + "format", "introspect", "mixins", "NumpyVersion", "npyio", "scimath", + "stride_tricks", "tracemalloc_domain", +] + +add_newdoc.__module__ = "numpy.lib" + +from numpy._pytesttester import PytestTester + +test = PytestTester(__name__) +del PytestTester + +def __getattr__(attr): + # Warn for deprecated/removed aliases + import math + import warnings + + if attr == "math": + warnings.warn( + "`np.lib.math` is a deprecated alias for the standard library " + "`math` module (Deprecated Numpy 1.25). Replace usages of " + "`numpy.lib.math` with `math`", DeprecationWarning, stacklevel=2) + return math + elif attr == "emath": + raise AttributeError( + "numpy.lib.emath was an alias for emath module that was removed " + "in NumPy 2.0. Replace usages of numpy.lib.emath with " + "numpy.emath.", + name=None + ) + elif attr in ( + "histograms", "type_check", "nanfunctions", "function_base", + "arraypad", "arraysetops", "ufunclike", "utils", "twodim_base", + "shape_base", "polynomial", "index_tricks", + ): + raise AttributeError( + f"numpy.lib.{attr} is now private. If you are using a public " + "function, it should be available in the main numpy namespace, " + "otherwise check the NumPy 2.0 migration guide.", + name=None + ) + elif attr == "arrayterator": + raise AttributeError( + "numpy.lib.arrayterator submodule is now private. To access " + "Arrayterator class use numpy.lib.Arrayterator.", + name=None + ) + else: + raise AttributeError(f"module {__name__!r} has no attribute {attr!r}") diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/__init__.pyi b/.venv/lib/python3.12/site-packages/numpy/lib/__init__.pyi new file mode 100644 index 0000000..6185a49 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/lib/__init__.pyi @@ -0,0 +1,44 @@ +from numpy._core.function_base import add_newdoc +from numpy._core.multiarray import add_docstring, tracemalloc_domain + +# all submodules of `lib` are accessible at runtime through `__getattr__`, +# so we implicitly re-export them here +from . import _array_utils_impl as _array_utils_impl +from . import _arraypad_impl as _arraypad_impl +from . import _arraysetops_impl as _arraysetops_impl +from . import _arrayterator_impl as _arrayterator_impl +from . import _datasource as _datasource +from . import _format_impl as _format_impl +from . import _function_base_impl as _function_base_impl +from . import _histograms_impl as _histograms_impl +from . import _index_tricks_impl as _index_tricks_impl +from . import _iotools as _iotools +from . import _nanfunctions_impl as _nanfunctions_impl +from . import _npyio_impl as _npyio_impl +from . import _polynomial_impl as _polynomial_impl +from . import _scimath_impl as _scimath_impl +from . import _shape_base_impl as _shape_base_impl +from . import _stride_tricks_impl as _stride_tricks_impl +from . import _twodim_base_impl as _twodim_base_impl +from . import _type_check_impl as _type_check_impl +from . import _ufunclike_impl as _ufunclike_impl +from . import _utils_impl as _utils_impl +from . import _version as _version +from . import array_utils, format, introspect, mixins, npyio, scimath, stride_tricks +from ._arrayterator_impl import Arrayterator +from ._version import NumpyVersion + +__all__ = [ + "Arrayterator", + "add_docstring", + "add_newdoc", + "array_utils", + "format", + "introspect", + "mixins", + "NumpyVersion", + "npyio", + "scimath", + "stride_tricks", + "tracemalloc_domain", +] diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..1c5fea7 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_array_utils_impl.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_array_utils_impl.cpython-312.pyc new file mode 100644 index 0000000..9d1bad4 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_array_utils_impl.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_arraypad_impl.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_arraypad_impl.cpython-312.pyc new file mode 100644 index 0000000..cab5c63 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_arraypad_impl.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_arraysetops_impl.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_arraysetops_impl.cpython-312.pyc new file mode 100644 index 0000000..a234cd7 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_arraysetops_impl.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_arrayterator_impl.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_arrayterator_impl.cpython-312.pyc new file mode 100644 index 0000000..4cb830a Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_arrayterator_impl.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_datasource.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_datasource.cpython-312.pyc new file mode 100644 index 0000000..5c27b16 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_datasource.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_format_impl.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_format_impl.cpython-312.pyc new file mode 100644 index 0000000..0948a7b Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_format_impl.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_function_base_impl.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_function_base_impl.cpython-312.pyc new file mode 100644 index 0000000..124bec3 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_function_base_impl.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_histograms_impl.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_histograms_impl.cpython-312.pyc new file mode 100644 index 0000000..eb55724 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_histograms_impl.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_index_tricks_impl.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_index_tricks_impl.cpython-312.pyc new file mode 100644 index 0000000..d49d7a3 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_index_tricks_impl.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_iotools.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_iotools.cpython-312.pyc new file mode 100644 index 0000000..9c76d6c Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_iotools.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_nanfunctions_impl.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_nanfunctions_impl.cpython-312.pyc new file mode 100644 index 0000000..66783aa Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_nanfunctions_impl.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_npyio_impl.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_npyio_impl.cpython-312.pyc new file mode 100644 index 0000000..55e9a46 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_npyio_impl.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_polynomial_impl.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_polynomial_impl.cpython-312.pyc new file mode 100644 index 0000000..6dd6ded Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_polynomial_impl.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_scimath_impl.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_scimath_impl.cpython-312.pyc new file mode 100644 index 0000000..3b62648 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_scimath_impl.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_shape_base_impl.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_shape_base_impl.cpython-312.pyc new file mode 100644 index 0000000..e3c1fe4 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_shape_base_impl.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_stride_tricks_impl.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_stride_tricks_impl.cpython-312.pyc new file mode 100644 index 0000000..8cebc75 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_stride_tricks_impl.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_twodim_base_impl.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_twodim_base_impl.cpython-312.pyc new file mode 100644 index 0000000..753465d Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_twodim_base_impl.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_type_check_impl.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_type_check_impl.cpython-312.pyc new file mode 100644 index 0000000..0baef8e Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_type_check_impl.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_ufunclike_impl.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_ufunclike_impl.cpython-312.pyc new file mode 100644 index 0000000..2d4780d Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_ufunclike_impl.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_user_array_impl.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_user_array_impl.cpython-312.pyc new file mode 100644 index 0000000..1ec40c3 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_user_array_impl.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_utils_impl.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_utils_impl.cpython-312.pyc new file mode 100644 index 0000000..f82bdde Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_utils_impl.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_version.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_version.cpython-312.pyc new file mode 100644 index 0000000..903b5a1 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_version.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/array_utils.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/array_utils.cpython-312.pyc new file mode 100644 index 0000000..3b05c66 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/array_utils.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/format.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/format.cpython-312.pyc new file mode 100644 index 0000000..a5bf7ce Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/format.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/introspect.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/introspect.cpython-312.pyc new file mode 100644 index 0000000..202121b Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/introspect.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/mixins.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/mixins.cpython-312.pyc new file mode 100644 index 0000000..a7e79bc Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/mixins.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/npyio.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/npyio.cpython-312.pyc new file mode 100644 index 0000000..c1d402b Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/npyio.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/recfunctions.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/recfunctions.cpython-312.pyc new file mode 100644 index 0000000..a819167 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/recfunctions.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/scimath.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/scimath.cpython-312.pyc new file mode 100644 index 0000000..d3275b8 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/scimath.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/stride_tricks.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/stride_tricks.cpython-312.pyc new file mode 100644 index 0000000..a9dc97b Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/stride_tricks.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/user_array.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/user_array.cpython-312.pyc new file mode 100644 index 0000000..f083806 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/user_array.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/_array_utils_impl.py b/.venv/lib/python3.12/site-packages/numpy/lib/_array_utils_impl.py new file mode 100644 index 0000000..c3996e1 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/lib/_array_utils_impl.py @@ -0,0 +1,62 @@ +""" +Miscellaneous utils. +""" +from numpy._core import asarray +from numpy._core.numeric import normalize_axis_index, normalize_axis_tuple +from numpy._utils import set_module + +__all__ = ["byte_bounds", "normalize_axis_tuple", "normalize_axis_index"] + + +@set_module("numpy.lib.array_utils") +def byte_bounds(a): + """ + Returns pointers to the end-points of an array. + + Parameters + ---------- + a : ndarray + Input array. It must conform to the Python-side of the array + interface. + + Returns + ------- + (low, high) : tuple of 2 integers + The first integer is the first byte of the array, the second + integer is just past the last byte of the array. If `a` is not + contiguous it will not use every byte between the (`low`, `high`) + values. + + Examples + -------- + >>> import numpy as np + >>> I = np.eye(2, dtype='f'); I.dtype + dtype('float32') + >>> low, high = np.lib.array_utils.byte_bounds(I) + >>> high - low == I.size*I.itemsize + True + >>> I = np.eye(2); I.dtype + dtype('float64') + >>> low, high = np.lib.array_utils.byte_bounds(I) + >>> high - low == I.size*I.itemsize + True + + """ + ai = a.__array_interface__ + a_data = ai['data'][0] + astrides = ai['strides'] + ashape = ai['shape'] + bytes_a = asarray(a).dtype.itemsize + + a_low = a_high = a_data + if astrides is None: + # contiguous case + a_high += a.size * bytes_a + else: + for shape, stride in zip(ashape, astrides): + if stride < 0: + a_low += (shape - 1) * stride + else: + a_high += (shape - 1) * stride + a_high += bytes_a + return a_low, a_high diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/_array_utils_impl.pyi b/.venv/lib/python3.12/site-packages/numpy/lib/_array_utils_impl.pyi new file mode 100644 index 0000000..d3e0714 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/lib/_array_utils_impl.pyi @@ -0,0 +1,26 @@ +from collections.abc import Iterable +from typing import Any + +from numpy import generic +from numpy.typing import NDArray + +__all__ = ["byte_bounds", "normalize_axis_tuple", "normalize_axis_index"] + +# NOTE: In practice `byte_bounds` can (potentially) take any object +# implementing the `__array_interface__` protocol. The caveat is +# that certain keys, marked as optional in the spec, must be present for +# `byte_bounds`. This concerns `"strides"` and `"data"`. +def byte_bounds(a: generic | NDArray[Any]) -> tuple[int, int]: ... + +def normalize_axis_tuple( + axis: int | Iterable[int], + ndim: int = ..., + argname: str | None = ..., + allow_duplicate: bool | None = ..., +) -> tuple[int, int]: ... + +def normalize_axis_index( + axis: int = ..., + ndim: int = ..., + msg_prefix: str | None = ..., +) -> int: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/_arraypad_impl.py b/.venv/lib/python3.12/site-packages/numpy/lib/_arraypad_impl.py new file mode 100644 index 0000000..507a0ab --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/lib/_arraypad_impl.py @@ -0,0 +1,890 @@ +""" +The arraypad module contains a group of functions to pad values onto the edges +of an n-dimensional array. + +""" +import numpy as np +from numpy._core.overrides import array_function_dispatch +from numpy.lib._index_tricks_impl import ndindex + +__all__ = ['pad'] + + +############################################################################### +# Private utility functions. + + +def _round_if_needed(arr, dtype): + """ + Rounds arr inplace if destination dtype is integer. + + Parameters + ---------- + arr : ndarray + Input array. + dtype : dtype + The dtype of the destination array. + """ + if np.issubdtype(dtype, np.integer): + arr.round(out=arr) + + +def _slice_at_axis(sl, axis): + """ + Construct tuple of slices to slice an array in the given dimension. + + Parameters + ---------- + sl : slice + The slice for the given dimension. + axis : int + The axis to which `sl` is applied. All other dimensions are left + "unsliced". + + Returns + ------- + sl : tuple of slices + A tuple with slices matching `shape` in length. + + Examples + -------- + >>> np._slice_at_axis(slice(None, 3, -1), 1) + (slice(None, None, None), slice(None, 3, -1), (...,)) + """ + return (slice(None),) * axis + (sl,) + (...,) + + +def _view_roi(array, original_area_slice, axis): + """ + Get a view of the current region of interest during iterative padding. + + When padding multiple dimensions iteratively corner values are + unnecessarily overwritten multiple times. This function reduces the + working area for the first dimensions so that corners are excluded. + + Parameters + ---------- + array : ndarray + The array with the region of interest. + original_area_slice : tuple of slices + Denotes the area with original values of the unpadded array. + axis : int + The currently padded dimension assuming that `axis` is padded before + `axis` + 1. + + Returns + ------- + roi : ndarray + The region of interest of the original `array`. + """ + axis += 1 + sl = (slice(None),) * axis + original_area_slice[axis:] + return array[sl] + + +def _pad_simple(array, pad_width, fill_value=None): + """ + Pad array on all sides with either a single value or undefined values. + + Parameters + ---------- + array : ndarray + Array to grow. + pad_width : sequence of tuple[int, int] + Pad width on both sides for each dimension in `arr`. + fill_value : scalar, optional + If provided the padded area is filled with this value, otherwise + the pad area left undefined. + + Returns + ------- + padded : ndarray + The padded array with the same dtype as`array`. Its order will default + to C-style if `array` is not F-contiguous. + original_area_slice : tuple + A tuple of slices pointing to the area of the original array. + """ + # Allocate grown array + new_shape = tuple( + left + size + right + for size, (left, right) in zip(array.shape, pad_width) + ) + order = 'F' if array.flags.fnc else 'C' # Fortran and not also C-order + padded = np.empty(new_shape, dtype=array.dtype, order=order) + + if fill_value is not None: + padded.fill(fill_value) + + # Copy old array into correct space + original_area_slice = tuple( + slice(left, left + size) + for size, (left, right) in zip(array.shape, pad_width) + ) + padded[original_area_slice] = array + + return padded, original_area_slice + + +def _set_pad_area(padded, axis, width_pair, value_pair): + """ + Set empty-padded area in given dimension. + + Parameters + ---------- + padded : ndarray + Array with the pad area which is modified inplace. + axis : int + Dimension with the pad area to set. + width_pair : (int, int) + Pair of widths that mark the pad area on both sides in the given + dimension. + value_pair : tuple of scalars or ndarrays + Values inserted into the pad area on each side. It must match or be + broadcastable to the shape of `arr`. + """ + left_slice = _slice_at_axis(slice(None, width_pair[0]), axis) + padded[left_slice] = value_pair[0] + + right_slice = _slice_at_axis( + slice(padded.shape[axis] - width_pair[1], None), axis) + padded[right_slice] = value_pair[1] + + +def _get_edges(padded, axis, width_pair): + """ + Retrieve edge values from empty-padded array in given dimension. + + Parameters + ---------- + padded : ndarray + Empty-padded array. + axis : int + Dimension in which the edges are considered. + width_pair : (int, int) + Pair of widths that mark the pad area on both sides in the given + dimension. + + Returns + ------- + left_edge, right_edge : ndarray + Edge values of the valid area in `padded` in the given dimension. Its + shape will always match `padded` except for the dimension given by + `axis` which will have a length of 1. + """ + left_index = width_pair[0] + left_slice = _slice_at_axis(slice(left_index, left_index + 1), axis) + left_edge = padded[left_slice] + + right_index = padded.shape[axis] - width_pair[1] + right_slice = _slice_at_axis(slice(right_index - 1, right_index), axis) + right_edge = padded[right_slice] + + return left_edge, right_edge + + +def _get_linear_ramps(padded, axis, width_pair, end_value_pair): + """ + Construct linear ramps for empty-padded array in given dimension. + + Parameters + ---------- + padded : ndarray + Empty-padded array. + axis : int + Dimension in which the ramps are constructed. + width_pair : (int, int) + Pair of widths that mark the pad area on both sides in the given + dimension. + end_value_pair : (scalar, scalar) + End values for the linear ramps which form the edge of the fully padded + array. These values are included in the linear ramps. + + Returns + ------- + left_ramp, right_ramp : ndarray + Linear ramps to set on both sides of `padded`. + """ + edge_pair = _get_edges(padded, axis, width_pair) + + left_ramp, right_ramp = ( + np.linspace( + start=end_value, + stop=edge.squeeze(axis), # Dimension is replaced by linspace + num=width, + endpoint=False, + dtype=padded.dtype, + axis=axis + ) + for end_value, edge, width in zip( + end_value_pair, edge_pair, width_pair + ) + ) + + # Reverse linear space in appropriate dimension + right_ramp = right_ramp[_slice_at_axis(slice(None, None, -1), axis)] + + return left_ramp, right_ramp + + +def _get_stats(padded, axis, width_pair, length_pair, stat_func): + """ + Calculate statistic for the empty-padded array in given dimension. + + Parameters + ---------- + padded : ndarray + Empty-padded array. + axis : int + Dimension in which the statistic is calculated. + width_pair : (int, int) + Pair of widths that mark the pad area on both sides in the given + dimension. + length_pair : 2-element sequence of None or int + Gives the number of values in valid area from each side that is + taken into account when calculating the statistic. If None the entire + valid area in `padded` is considered. + stat_func : function + Function to compute statistic. The expected signature is + ``stat_func(x: ndarray, axis: int, keepdims: bool) -> ndarray``. + + Returns + ------- + left_stat, right_stat : ndarray + Calculated statistic for both sides of `padded`. + """ + # Calculate indices of the edges of the area with original values + left_index = width_pair[0] + right_index = padded.shape[axis] - width_pair[1] + # as well as its length + max_length = right_index - left_index + + # Limit stat_lengths to max_length + left_length, right_length = length_pair + if left_length is None or max_length < left_length: + left_length = max_length + if right_length is None or max_length < right_length: + right_length = max_length + + if (left_length == 0 or right_length == 0) \ + and stat_func in {np.amax, np.amin}: + # amax and amin can't operate on an empty array, + # raise a more descriptive warning here instead of the default one + raise ValueError("stat_length of 0 yields no value for padding") + + # Calculate statistic for the left side + left_slice = _slice_at_axis( + slice(left_index, left_index + left_length), axis) + left_chunk = padded[left_slice] + left_stat = stat_func(left_chunk, axis=axis, keepdims=True) + _round_if_needed(left_stat, padded.dtype) + + if left_length == right_length == max_length: + # return early as right_stat must be identical to left_stat + return left_stat, left_stat + + # Calculate statistic for the right side + right_slice = _slice_at_axis( + slice(right_index - right_length, right_index), axis) + right_chunk = padded[right_slice] + right_stat = stat_func(right_chunk, axis=axis, keepdims=True) + _round_if_needed(right_stat, padded.dtype) + + return left_stat, right_stat + + +def _set_reflect_both(padded, axis, width_pair, method, + original_period, include_edge=False): + """ + Pad `axis` of `arr` with reflection. + + Parameters + ---------- + padded : ndarray + Input array of arbitrary shape. + axis : int + Axis along which to pad `arr`. + width_pair : (int, int) + Pair of widths that mark the pad area on both sides in the given + dimension. + method : str + Controls method of reflection; options are 'even' or 'odd'. + original_period : int + Original length of data on `axis` of `arr`. + include_edge : bool + If true, edge value is included in reflection, otherwise the edge + value forms the symmetric axis to the reflection. + + Returns + ------- + pad_amt : tuple of ints, length 2 + New index positions of padding to do along the `axis`. If these are + both 0, padding is done in this dimension. + """ + left_pad, right_pad = width_pair + old_length = padded.shape[axis] - right_pad - left_pad + + if include_edge: + # Avoid wrapping with only a subset of the original area + # by ensuring period can only be a multiple of the original + # area's length. + old_length = old_length // original_period * original_period + # Edge is included, we need to offset the pad amount by 1 + edge_offset = 1 + else: + # Avoid wrapping with only a subset of the original area + # by ensuring period can only be a multiple of the original + # area's length. + old_length = ((old_length - 1) // (original_period - 1) + * (original_period - 1) + 1) + edge_offset = 0 # Edge is not included, no need to offset pad amount + old_length -= 1 # but must be omitted from the chunk + + if left_pad > 0: + # Pad with reflected values on left side: + # First limit chunk size which can't be larger than pad area + chunk_length = min(old_length, left_pad) + # Slice right to left, stop on or next to edge, start relative to stop + stop = left_pad - edge_offset + start = stop + chunk_length + left_slice = _slice_at_axis(slice(start, stop, -1), axis) + left_chunk = padded[left_slice] + + if method == "odd": + # Negate chunk and align with edge + edge_slice = _slice_at_axis(slice(left_pad, left_pad + 1), axis) + left_chunk = 2 * padded[edge_slice] - left_chunk + + # Insert chunk into padded area + start = left_pad - chunk_length + stop = left_pad + pad_area = _slice_at_axis(slice(start, stop), axis) + padded[pad_area] = left_chunk + # Adjust pointer to left edge for next iteration + left_pad -= chunk_length + + if right_pad > 0: + # Pad with reflected values on right side: + # First limit chunk size which can't be larger than pad area + chunk_length = min(old_length, right_pad) + # Slice right to left, start on or next to edge, stop relative to start + start = -right_pad + edge_offset - 2 + stop = start - chunk_length + right_slice = _slice_at_axis(slice(start, stop, -1), axis) + right_chunk = padded[right_slice] + + if method == "odd": + # Negate chunk and align with edge + edge_slice = _slice_at_axis( + slice(-right_pad - 1, -right_pad), axis) + right_chunk = 2 * padded[edge_slice] - right_chunk + + # Insert chunk into padded area + start = padded.shape[axis] - right_pad + stop = start + chunk_length + pad_area = _slice_at_axis(slice(start, stop), axis) + padded[pad_area] = right_chunk + # Adjust pointer to right edge for next iteration + right_pad -= chunk_length + + return left_pad, right_pad + + +def _set_wrap_both(padded, axis, width_pair, original_period): + """ + Pad `axis` of `arr` with wrapped values. + + Parameters + ---------- + padded : ndarray + Input array of arbitrary shape. + axis : int + Axis along which to pad `arr`. + width_pair : (int, int) + Pair of widths that mark the pad area on both sides in the given + dimension. + original_period : int + Original length of data on `axis` of `arr`. + + Returns + ------- + pad_amt : tuple of ints, length 2 + New index positions of padding to do along the `axis`. If these are + both 0, padding is done in this dimension. + """ + left_pad, right_pad = width_pair + period = padded.shape[axis] - right_pad - left_pad + # Avoid wrapping with only a subset of the original area by ensuring period + # can only be a multiple of the original area's length. + period = period // original_period * original_period + + # If the current dimension of `arr` doesn't contain enough valid values + # (not part of the undefined pad area) we need to pad multiple times. + # Each time the pad area shrinks on both sides which is communicated with + # these variables. + new_left_pad = 0 + new_right_pad = 0 + + if left_pad > 0: + # Pad with wrapped values on left side + # First slice chunk from left side of the non-pad area. + # Use min(period, left_pad) to ensure that chunk is not larger than + # pad area. + slice_end = left_pad + period + slice_start = slice_end - min(period, left_pad) + right_slice = _slice_at_axis(slice(slice_start, slice_end), axis) + right_chunk = padded[right_slice] + + if left_pad > period: + # Chunk is smaller than pad area + pad_area = _slice_at_axis(slice(left_pad - period, left_pad), axis) + new_left_pad = left_pad - period + else: + # Chunk matches pad area + pad_area = _slice_at_axis(slice(None, left_pad), axis) + padded[pad_area] = right_chunk + + if right_pad > 0: + # Pad with wrapped values on right side + # First slice chunk from right side of the non-pad area. + # Use min(period, right_pad) to ensure that chunk is not larger than + # pad area. + slice_start = -right_pad - period + slice_end = slice_start + min(period, right_pad) + left_slice = _slice_at_axis(slice(slice_start, slice_end), axis) + left_chunk = padded[left_slice] + + if right_pad > period: + # Chunk is smaller than pad area + pad_area = _slice_at_axis( + slice(-right_pad, -right_pad + period), axis) + new_right_pad = right_pad - period + else: + # Chunk matches pad area + pad_area = _slice_at_axis(slice(-right_pad, None), axis) + padded[pad_area] = left_chunk + + return new_left_pad, new_right_pad + + +def _as_pairs(x, ndim, as_index=False): + """ + Broadcast `x` to an array with the shape (`ndim`, 2). + + A helper function for `pad` that prepares and validates arguments like + `pad_width` for iteration in pairs. + + Parameters + ---------- + x : {None, scalar, array-like} + The object to broadcast to the shape (`ndim`, 2). + ndim : int + Number of pairs the broadcasted `x` will have. + as_index : bool, optional + If `x` is not None, try to round each element of `x` to an integer + (dtype `np.intp`) and ensure every element is positive. + + Returns + ------- + pairs : nested iterables, shape (`ndim`, 2) + The broadcasted version of `x`. + + Raises + ------ + ValueError + If `as_index` is True and `x` contains negative elements. + Or if `x` is not broadcastable to the shape (`ndim`, 2). + """ + if x is None: + # Pass through None as a special case, otherwise np.round(x) fails + # with an AttributeError + return ((None, None),) * ndim + + x = np.array(x) + if as_index: + x = np.round(x).astype(np.intp, copy=False) + + if x.ndim < 3: + # Optimization: Possibly use faster paths for cases where `x` has + # only 1 or 2 elements. `np.broadcast_to` could handle these as well + # but is currently slower + + if x.size == 1: + # x was supplied as a single value + x = x.ravel() # Ensure x[0] works for x.ndim == 0, 1, 2 + if as_index and x < 0: + raise ValueError("index can't contain negative values") + return ((x[0], x[0]),) * ndim + + if x.size == 2 and x.shape != (2, 1): + # x was supplied with a single value for each side + # but except case when each dimension has a single value + # which should be broadcasted to a pair, + # e.g. [[1], [2]] -> [[1, 1], [2, 2]] not [[1, 2], [1, 2]] + x = x.ravel() # Ensure x[0], x[1] works + if as_index and (x[0] < 0 or x[1] < 0): + raise ValueError("index can't contain negative values") + return ((x[0], x[1]),) * ndim + + if as_index and x.min() < 0: + raise ValueError("index can't contain negative values") + + # Converting the array with `tolist` seems to improve performance + # when iterating and indexing the result (see usage in `pad`) + return np.broadcast_to(x, (ndim, 2)).tolist() + + +def _pad_dispatcher(array, pad_width, mode=None, **kwargs): + return (array,) + + +############################################################################### +# Public functions + + +@array_function_dispatch(_pad_dispatcher, module='numpy') +def pad(array, pad_width, mode='constant', **kwargs): + """ + Pad an array. + + Parameters + ---------- + array : array_like of rank N + The array to pad. + pad_width : {sequence, array_like, int} + Number of values padded to the edges of each axis. + ``((before_1, after_1), ... (before_N, after_N))`` unique pad widths + for each axis. + ``(before, after)`` or ``((before, after),)`` yields same before + and after pad for each axis. + ``(pad,)`` or ``int`` is a shortcut for before = after = pad width + for all axes. + mode : str or function, optional + One of the following string values or a user supplied function. + + 'constant' (default) + Pads with a constant value. + 'edge' + Pads with the edge values of array. + 'linear_ramp' + Pads with the linear ramp between end_value and the + array edge value. + 'maximum' + Pads with the maximum value of all or part of the + vector along each axis. + 'mean' + Pads with the mean value of all or part of the + vector along each axis. + 'median' + Pads with the median value of all or part of the + vector along each axis. + 'minimum' + Pads with the minimum value of all or part of the + vector along each axis. + 'reflect' + Pads with the reflection of the vector mirrored on + the first and last values of the vector along each + axis. + 'symmetric' + Pads with the reflection of the vector mirrored + along the edge of the array. + 'wrap' + Pads with the wrap of the vector along the axis. + The first values are used to pad the end and the + end values are used to pad the beginning. + 'empty' + Pads with undefined values. + + + Padding function, see Notes. + stat_length : sequence or int, optional + Used in 'maximum', 'mean', 'median', and 'minimum'. Number of + values at edge of each axis used to calculate the statistic value. + + ``((before_1, after_1), ... (before_N, after_N))`` unique statistic + lengths for each axis. + + ``(before, after)`` or ``((before, after),)`` yields same before + and after statistic lengths for each axis. + + ``(stat_length,)`` or ``int`` is a shortcut for + ``before = after = statistic`` length for all axes. + + Default is ``None``, to use the entire axis. + constant_values : sequence or scalar, optional + Used in 'constant'. The values to set the padded values for each + axis. + + ``((before_1, after_1), ... (before_N, after_N))`` unique pad constants + for each axis. + + ``(before, after)`` or ``((before, after),)`` yields same before + and after constants for each axis. + + ``(constant,)`` or ``constant`` is a shortcut for + ``before = after = constant`` for all axes. + + Default is 0. + end_values : sequence or scalar, optional + Used in 'linear_ramp'. The values used for the ending value of the + linear_ramp and that will form the edge of the padded array. + + ``((before_1, after_1), ... (before_N, after_N))`` unique end values + for each axis. + + ``(before, after)`` or ``((before, after),)`` yields same before + and after end values for each axis. + + ``(constant,)`` or ``constant`` is a shortcut for + ``before = after = constant`` for all axes. + + Default is 0. + reflect_type : {'even', 'odd'}, optional + Used in 'reflect', and 'symmetric'. The 'even' style is the + default with an unaltered reflection around the edge value. For + the 'odd' style, the extended part of the array is created by + subtracting the reflected values from two times the edge value. + + Returns + ------- + pad : ndarray + Padded array of rank equal to `array` with shape increased + according to `pad_width`. + + Notes + ----- + For an array with rank greater than 1, some of the padding of later + axes is calculated from padding of previous axes. This is easiest to + think about with a rank 2 array where the corners of the padded array + are calculated by using padded values from the first axis. + + The padding function, if used, should modify a rank 1 array in-place. It + has the following signature:: + + padding_func(vector, iaxis_pad_width, iaxis, kwargs) + + where + + vector : ndarray + A rank 1 array already padded with zeros. Padded values are + vector[:iaxis_pad_width[0]] and vector[-iaxis_pad_width[1]:]. + iaxis_pad_width : tuple + A 2-tuple of ints, iaxis_pad_width[0] represents the number of + values padded at the beginning of vector where + iaxis_pad_width[1] represents the number of values padded at + the end of vector. + iaxis : int + The axis currently being calculated. + kwargs : dict + Any keyword arguments the function requires. + + Examples + -------- + >>> import numpy as np + >>> a = [1, 2, 3, 4, 5] + >>> np.pad(a, (2, 3), 'constant', constant_values=(4, 6)) + array([4, 4, 1, ..., 6, 6, 6]) + + >>> np.pad(a, (2, 3), 'edge') + array([1, 1, 1, ..., 5, 5, 5]) + + >>> np.pad(a, (2, 3), 'linear_ramp', end_values=(5, -4)) + array([ 5, 3, 1, 2, 3, 4, 5, 2, -1, -4]) + + >>> np.pad(a, (2,), 'maximum') + array([5, 5, 1, 2, 3, 4, 5, 5, 5]) + + >>> np.pad(a, (2,), 'mean') + array([3, 3, 1, 2, 3, 4, 5, 3, 3]) + + >>> np.pad(a, (2,), 'median') + array([3, 3, 1, 2, 3, 4, 5, 3, 3]) + + >>> a = [[1, 2], [3, 4]] + >>> np.pad(a, ((3, 2), (2, 3)), 'minimum') + array([[1, 1, 1, 2, 1, 1, 1], + [1, 1, 1, 2, 1, 1, 1], + [1, 1, 1, 2, 1, 1, 1], + [1, 1, 1, 2, 1, 1, 1], + [3, 3, 3, 4, 3, 3, 3], + [1, 1, 1, 2, 1, 1, 1], + [1, 1, 1, 2, 1, 1, 1]]) + + >>> a = [1, 2, 3, 4, 5] + >>> np.pad(a, (2, 3), 'reflect') + array([3, 2, 1, 2, 3, 4, 5, 4, 3, 2]) + + >>> np.pad(a, (2, 3), 'reflect', reflect_type='odd') + array([-1, 0, 1, 2, 3, 4, 5, 6, 7, 8]) + + >>> np.pad(a, (2, 3), 'symmetric') + array([2, 1, 1, 2, 3, 4, 5, 5, 4, 3]) + + >>> np.pad(a, (2, 3), 'symmetric', reflect_type='odd') + array([0, 1, 1, 2, 3, 4, 5, 5, 6, 7]) + + >>> np.pad(a, (2, 3), 'wrap') + array([4, 5, 1, 2, 3, 4, 5, 1, 2, 3]) + + >>> def pad_with(vector, pad_width, iaxis, kwargs): + ... pad_value = kwargs.get('padder', 10) + ... vector[:pad_width[0]] = pad_value + ... vector[-pad_width[1]:] = pad_value + >>> a = np.arange(6) + >>> a = a.reshape((2, 3)) + >>> np.pad(a, 2, pad_with) + array([[10, 10, 10, 10, 10, 10, 10], + [10, 10, 10, 10, 10, 10, 10], + [10, 10, 0, 1, 2, 10, 10], + [10, 10, 3, 4, 5, 10, 10], + [10, 10, 10, 10, 10, 10, 10], + [10, 10, 10, 10, 10, 10, 10]]) + >>> np.pad(a, 2, pad_with, padder=100) + array([[100, 100, 100, 100, 100, 100, 100], + [100, 100, 100, 100, 100, 100, 100], + [100, 100, 0, 1, 2, 100, 100], + [100, 100, 3, 4, 5, 100, 100], + [100, 100, 100, 100, 100, 100, 100], + [100, 100, 100, 100, 100, 100, 100]]) + """ + array = np.asarray(array) + pad_width = np.asarray(pad_width) + + if not pad_width.dtype.kind == 'i': + raise TypeError('`pad_width` must be of integral type.') + + # Broadcast to shape (array.ndim, 2) + pad_width = _as_pairs(pad_width, array.ndim, as_index=True) + + if callable(mode): + # Old behavior: Use user-supplied function with np.apply_along_axis + function = mode + # Create a new zero padded array + padded, _ = _pad_simple(array, pad_width, fill_value=0) + # And apply along each axis + + for axis in range(padded.ndim): + # Iterate using ndindex as in apply_along_axis, but assuming that + # function operates inplace on the padded array. + + # view with the iteration axis at the end + view = np.moveaxis(padded, axis, -1) + + # compute indices for the iteration axes, and append a trailing + # ellipsis to prevent 0d arrays decaying to scalars (gh-8642) + inds = ndindex(view.shape[:-1]) + inds = (ind + (Ellipsis,) for ind in inds) + for ind in inds: + function(view[ind], pad_width[axis], axis, kwargs) + + return padded + + # Make sure that no unsupported keywords were passed for the current mode + allowed_kwargs = { + 'empty': [], 'edge': [], 'wrap': [], + 'constant': ['constant_values'], + 'linear_ramp': ['end_values'], + 'maximum': ['stat_length'], + 'mean': ['stat_length'], + 'median': ['stat_length'], + 'minimum': ['stat_length'], + 'reflect': ['reflect_type'], + 'symmetric': ['reflect_type'], + } + try: + unsupported_kwargs = set(kwargs) - set(allowed_kwargs[mode]) + except KeyError: + raise ValueError(f"mode '{mode}' is not supported") from None + if unsupported_kwargs: + raise ValueError("unsupported keyword arguments for mode " + f"'{mode}': {unsupported_kwargs}") + + stat_functions = {"maximum": np.amax, "minimum": np.amin, + "mean": np.mean, "median": np.median} + + # Create array with final shape and original values + # (padded area is undefined) + padded, original_area_slice = _pad_simple(array, pad_width) + # And prepare iteration over all dimensions + # (zipping may be more readable than using enumerate) + axes = range(padded.ndim) + + if mode == "constant": + values = kwargs.get("constant_values", 0) + values = _as_pairs(values, padded.ndim) + for axis, width_pair, value_pair in zip(axes, pad_width, values): + roi = _view_roi(padded, original_area_slice, axis) + _set_pad_area(roi, axis, width_pair, value_pair) + + elif mode == "empty": + pass # Do nothing as _pad_simple already returned the correct result + + elif array.size == 0: + # Only modes "constant" and "empty" can extend empty axes, all other + # modes depend on `array` not being empty + # -> ensure every empty axis is only "padded with 0" + for axis, width_pair in zip(axes, pad_width): + if array.shape[axis] == 0 and any(width_pair): + raise ValueError( + f"can't extend empty axis {axis} using modes other than " + "'constant' or 'empty'" + ) + # passed, don't need to do anything more as _pad_simple already + # returned the correct result + + elif mode == "edge": + for axis, width_pair in zip(axes, pad_width): + roi = _view_roi(padded, original_area_slice, axis) + edge_pair = _get_edges(roi, axis, width_pair) + _set_pad_area(roi, axis, width_pair, edge_pair) + + elif mode == "linear_ramp": + end_values = kwargs.get("end_values", 0) + end_values = _as_pairs(end_values, padded.ndim) + for axis, width_pair, value_pair in zip(axes, pad_width, end_values): + roi = _view_roi(padded, original_area_slice, axis) + ramp_pair = _get_linear_ramps(roi, axis, width_pair, value_pair) + _set_pad_area(roi, axis, width_pair, ramp_pair) + + elif mode in stat_functions: + func = stat_functions[mode] + length = kwargs.get("stat_length") + length = _as_pairs(length, padded.ndim, as_index=True) + for axis, width_pair, length_pair in zip(axes, pad_width, length): + roi = _view_roi(padded, original_area_slice, axis) + stat_pair = _get_stats(roi, axis, width_pair, length_pair, func) + _set_pad_area(roi, axis, width_pair, stat_pair) + + elif mode in {"reflect", "symmetric"}: + method = kwargs.get("reflect_type", "even") + include_edge = mode == "symmetric" + for axis, (left_index, right_index) in zip(axes, pad_width): + if array.shape[axis] == 1 and (left_index > 0 or right_index > 0): + # Extending singleton dimension for 'reflect' is legacy + # behavior; it really should raise an error. + edge_pair = _get_edges(padded, axis, (left_index, right_index)) + _set_pad_area( + padded, axis, (left_index, right_index), edge_pair) + continue + + roi = _view_roi(padded, original_area_slice, axis) + while left_index > 0 or right_index > 0: + # Iteratively pad until dimension is filled with reflected + # values. This is necessary if the pad area is larger than + # the length of the original values in the current dimension. + left_index, right_index = _set_reflect_both( + roi, axis, (left_index, right_index), + method, array.shape[axis], include_edge + ) + + elif mode == "wrap": + for axis, (left_index, right_index) in zip(axes, pad_width): + roi = _view_roi(padded, original_area_slice, axis) + original_period = padded.shape[axis] - right_index - left_index + while left_index > 0 or right_index > 0: + # Iteratively pad until dimension is filled with wrapped + # values. This is necessary if the pad area is larger than + # the length of the original values in the current dimension. + left_index, right_index = _set_wrap_both( + roi, axis, (left_index, right_index), original_period) + + return padded diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/_arraypad_impl.pyi b/.venv/lib/python3.12/site-packages/numpy/lib/_arraypad_impl.pyi new file mode 100644 index 0000000..46b4376 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/lib/_arraypad_impl.pyi @@ -0,0 +1,89 @@ +from typing import ( + Any, + Protocol, + TypeAlias, + TypeVar, + overload, + type_check_only, +) +from typing import ( + Literal as L, +) + +from numpy import generic +from numpy._typing import ( + ArrayLike, + NDArray, + _ArrayLike, + _ArrayLikeInt, +) + +__all__ = ["pad"] + +_ScalarT = TypeVar("_ScalarT", bound=generic) + +@type_check_only +class _ModeFunc(Protocol): + def __call__( + self, + vector: NDArray[Any], + iaxis_pad_width: tuple[int, int], + iaxis: int, + kwargs: dict[str, Any], + /, + ) -> None: ... + +_ModeKind: TypeAlias = L[ + "constant", + "edge", + "linear_ramp", + "maximum", + "mean", + "median", + "minimum", + "reflect", + "symmetric", + "wrap", + "empty", +] + +# TODO: In practice each keyword argument is exclusive to one or more +# specific modes. Consider adding more overloads to express this in the future. + +# Expand `**kwargs` into explicit keyword-only arguments +@overload +def pad( + array: _ArrayLike[_ScalarT], + pad_width: _ArrayLikeInt, + mode: _ModeKind = ..., + *, + stat_length: _ArrayLikeInt | None = ..., + constant_values: ArrayLike = ..., + end_values: ArrayLike = ..., + reflect_type: L["odd", "even"] = ..., +) -> NDArray[_ScalarT]: ... +@overload +def pad( + array: ArrayLike, + pad_width: _ArrayLikeInt, + mode: _ModeKind = ..., + *, + stat_length: _ArrayLikeInt | None = ..., + constant_values: ArrayLike = ..., + end_values: ArrayLike = ..., + reflect_type: L["odd", "even"] = ..., +) -> NDArray[Any]: ... +@overload +def pad( + array: _ArrayLike[_ScalarT], + pad_width: _ArrayLikeInt, + mode: _ModeFunc, + **kwargs: Any, +) -> NDArray[_ScalarT]: ... +@overload +def pad( + array: ArrayLike, + pad_width: _ArrayLikeInt, + mode: _ModeFunc, + **kwargs: Any, +) -> NDArray[Any]: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/_arraysetops_impl.py b/.venv/lib/python3.12/site-packages/numpy/lib/_arraysetops_impl.py new file mode 100644 index 0000000..ef0739b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/lib/_arraysetops_impl.py @@ -0,0 +1,1260 @@ +""" +Set operations for arrays based on sorting. + +Notes +----- + +For floating point arrays, inaccurate results may appear due to usual round-off +and floating point comparison issues. + +Speed could be gained in some operations by an implementation of +`numpy.sort`, that can provide directly the permutation vectors, thus avoiding +calls to `numpy.argsort`. + +Original author: Robert Cimrman + +""" +import functools +import warnings +from typing import NamedTuple + +import numpy as np +from numpy._core import overrides +from numpy._core._multiarray_umath import _array_converter, _unique_hash + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy') + + +__all__ = [ + "ediff1d", "in1d", "intersect1d", "isin", "setdiff1d", "setxor1d", + "union1d", "unique", "unique_all", "unique_counts", "unique_inverse", + "unique_values" +] + + +def _ediff1d_dispatcher(ary, to_end=None, to_begin=None): + return (ary, to_end, to_begin) + + +@array_function_dispatch(_ediff1d_dispatcher) +def ediff1d(ary, to_end=None, to_begin=None): + """ + The differences between consecutive elements of an array. + + Parameters + ---------- + ary : array_like + If necessary, will be flattened before the differences are taken. + to_end : array_like, optional + Number(s) to append at the end of the returned differences. + to_begin : array_like, optional + Number(s) to prepend at the beginning of the returned differences. + + Returns + ------- + ediff1d : ndarray + The differences. Loosely, this is ``ary.flat[1:] - ary.flat[:-1]``. + + See Also + -------- + diff, gradient + + Notes + ----- + When applied to masked arrays, this function drops the mask information + if the `to_begin` and/or `to_end` parameters are used. + + Examples + -------- + >>> import numpy as np + >>> x = np.array([1, 2, 4, 7, 0]) + >>> np.ediff1d(x) + array([ 1, 2, 3, -7]) + + >>> np.ediff1d(x, to_begin=-99, to_end=np.array([88, 99])) + array([-99, 1, 2, ..., -7, 88, 99]) + + The returned array is always 1D. + + >>> y = [[1, 2, 4], [1, 6, 24]] + >>> np.ediff1d(y) + array([ 1, 2, -3, 5, 18]) + + """ + conv = _array_converter(ary) + # Convert to (any) array and ravel: + ary = conv[0].ravel() + + # enforce that the dtype of `ary` is used for the output + dtype_req = ary.dtype + + # fast track default case + if to_begin is None and to_end is None: + return ary[1:] - ary[:-1] + + if to_begin is None: + l_begin = 0 + else: + to_begin = np.asanyarray(to_begin) + if not np.can_cast(to_begin, dtype_req, casting="same_kind"): + raise TypeError("dtype of `to_begin` must be compatible " + "with input `ary` under the `same_kind` rule.") + + to_begin = to_begin.ravel() + l_begin = len(to_begin) + + if to_end is None: + l_end = 0 + else: + to_end = np.asanyarray(to_end) + if not np.can_cast(to_end, dtype_req, casting="same_kind"): + raise TypeError("dtype of `to_end` must be compatible " + "with input `ary` under the `same_kind` rule.") + + to_end = to_end.ravel() + l_end = len(to_end) + + # do the calculation in place and copy to_begin and to_end + l_diff = max(len(ary) - 1, 0) + result = np.empty_like(ary, shape=l_diff + l_begin + l_end) + + if l_begin > 0: + result[:l_begin] = to_begin + if l_end > 0: + result[l_begin + l_diff:] = to_end + np.subtract(ary[1:], ary[:-1], result[l_begin:l_begin + l_diff]) + + return conv.wrap(result) + + +def _unpack_tuple(x): + """ Unpacks one-element tuples for use as return values """ + if len(x) == 1: + return x[0] + else: + return x + + +def _unique_dispatcher(ar, return_index=None, return_inverse=None, + return_counts=None, axis=None, *, equal_nan=None, + sorted=True): + return (ar,) + + +@array_function_dispatch(_unique_dispatcher) +def unique(ar, return_index=False, return_inverse=False, + return_counts=False, axis=None, *, equal_nan=True, + sorted=True): + """ + Find the unique elements of an array. + + Returns the sorted unique elements of an array. There are three optional + outputs in addition to the unique elements: + + * the indices of the input array that give the unique values + * the indices of the unique array that reconstruct the input array + * the number of times each unique value comes up in the input array + + Parameters + ---------- + ar : array_like + Input array. Unless `axis` is specified, this will be flattened if it + is not already 1-D. + return_index : bool, optional + If True, also return the indices of `ar` (along the specified axis, + if provided, or in the flattened array) that result in the unique array. + return_inverse : bool, optional + If True, also return the indices of the unique array (for the specified + axis, if provided) that can be used to reconstruct `ar`. + return_counts : bool, optional + If True, also return the number of times each unique item appears + in `ar`. + axis : int or None, optional + The axis to operate on. If None, `ar` will be flattened. If an integer, + the subarrays indexed by the given axis will be flattened and treated + as the elements of a 1-D array with the dimension of the given axis, + see the notes for more details. Object arrays or structured arrays + that contain objects are not supported if the `axis` kwarg is used. The + default is None. + + equal_nan : bool, optional + If True, collapses multiple NaN values in the return array into one. + + .. versionadded:: 1.24 + + sorted : bool, optional + If True, the unique elements are sorted. Elements may be sorted in + practice even if ``sorted=False``, but this could change without + notice. + + .. versionadded:: 2.3 + + Returns + ------- + unique : ndarray + The sorted unique values. + unique_indices : ndarray, optional + The indices of the first occurrences of the unique values in the + original array. Only provided if `return_index` is True. + unique_inverse : ndarray, optional + The indices to reconstruct the original array from the + unique array. Only provided if `return_inverse` is True. + unique_counts : ndarray, optional + The number of times each of the unique values comes up in the + original array. Only provided if `return_counts` is True. + + See Also + -------- + repeat : Repeat elements of an array. + sort : Return a sorted copy of an array. + + Notes + ----- + When an axis is specified the subarrays indexed by the axis are sorted. + This is done by making the specified axis the first dimension of the array + (move the axis to the first dimension to keep the order of the other axes) + and then flattening the subarrays in C order. The flattened subarrays are + then viewed as a structured type with each element given a label, with the + effect that we end up with a 1-D array of structured types that can be + treated in the same way as any other 1-D array. The result is that the + flattened subarrays are sorted in lexicographic order starting with the + first element. + + .. versionchanged:: 1.21 + Like np.sort, NaN will sort to the end of the values. + For complex arrays all NaN values are considered equivalent + (no matter whether the NaN is in the real or imaginary part). + As the representant for the returned array the smallest one in the + lexicographical order is chosen - see np.sort for how the lexicographical + order is defined for complex arrays. + + .. versionchanged:: 2.0 + For multi-dimensional inputs, ``unique_inverse`` is reshaped + such that the input can be reconstructed using + ``np.take(unique, unique_inverse, axis=axis)``. The result is + now not 1-dimensional when ``axis=None``. + + Note that in NumPy 2.0.0 a higher dimensional array was returned also + when ``axis`` was not ``None``. This was reverted, but + ``inverse.reshape(-1)`` can be used to ensure compatibility with both + versions. + + Examples + -------- + >>> import numpy as np + >>> np.unique([1, 1, 2, 2, 3, 3]) + array([1, 2, 3]) + >>> a = np.array([[1, 1], [2, 3]]) + >>> np.unique(a) + array([1, 2, 3]) + + Return the unique rows of a 2D array + + >>> a = np.array([[1, 0, 0], [1, 0, 0], [2, 3, 4]]) + >>> np.unique(a, axis=0) + array([[1, 0, 0], [2, 3, 4]]) + + Return the indices of the original array that give the unique values: + + >>> a = np.array(['a', 'b', 'b', 'c', 'a']) + >>> u, indices = np.unique(a, return_index=True) + >>> u + array(['a', 'b', 'c'], dtype='>> indices + array([0, 1, 3]) + >>> a[indices] + array(['a', 'b', 'c'], dtype='>> a = np.array([1, 2, 6, 4, 2, 3, 2]) + >>> u, indices = np.unique(a, return_inverse=True) + >>> u + array([1, 2, 3, 4, 6]) + >>> indices + array([0, 1, 4, 3, 1, 2, 1]) + >>> u[indices] + array([1, 2, 6, 4, 2, 3, 2]) + + Reconstruct the input values from the unique values and counts: + + >>> a = np.array([1, 2, 6, 4, 2, 3, 2]) + >>> values, counts = np.unique(a, return_counts=True) + >>> values + array([1, 2, 3, 4, 6]) + >>> counts + array([1, 3, 1, 1, 1]) + >>> np.repeat(values, counts) + array([1, 2, 2, 2, 3, 4, 6]) # original order not preserved + + """ + ar = np.asanyarray(ar) + if axis is None: + ret = _unique1d(ar, return_index, return_inverse, return_counts, + equal_nan=equal_nan, inverse_shape=ar.shape, axis=None, + sorted=sorted) + return _unpack_tuple(ret) + + # axis was specified and not None + try: + ar = np.moveaxis(ar, axis, 0) + except np.exceptions.AxisError: + # this removes the "axis1" or "axis2" prefix from the error message + raise np.exceptions.AxisError(axis, ar.ndim) from None + inverse_shape = [1] * ar.ndim + inverse_shape[axis] = ar.shape[0] + + # Must reshape to a contiguous 2D array for this to work... + orig_shape, orig_dtype = ar.shape, ar.dtype + ar = ar.reshape(orig_shape[0], np.prod(orig_shape[1:], dtype=np.intp)) + ar = np.ascontiguousarray(ar) + dtype = [(f'f{i}', ar.dtype) for i in range(ar.shape[1])] + + # At this point, `ar` has shape `(n, m)`, and `dtype` is a structured + # data type with `m` fields where each field has the data type of `ar`. + # In the following, we create the array `consolidated`, which has + # shape `(n,)` with data type `dtype`. + try: + if ar.shape[1] > 0: + consolidated = ar.view(dtype) + else: + # If ar.shape[1] == 0, then dtype will be `np.dtype([])`, which is + # a data type with itemsize 0, and the call `ar.view(dtype)` will + # fail. Instead, we'll use `np.empty` to explicitly create the + # array with shape `(len(ar),)`. Because `dtype` in this case has + # itemsize 0, the total size of the result is still 0 bytes. + consolidated = np.empty(len(ar), dtype=dtype) + except TypeError as e: + # There's no good way to do this for object arrays, etc... + msg = 'The axis argument to unique is not supported for dtype {dt}' + raise TypeError(msg.format(dt=ar.dtype)) from e + + def reshape_uniq(uniq): + n = len(uniq) + uniq = uniq.view(orig_dtype) + uniq = uniq.reshape(n, *orig_shape[1:]) + uniq = np.moveaxis(uniq, 0, axis) + return uniq + + output = _unique1d(consolidated, return_index, + return_inverse, return_counts, + equal_nan=equal_nan, inverse_shape=inverse_shape, + axis=axis, sorted=sorted) + output = (reshape_uniq(output[0]),) + output[1:] + return _unpack_tuple(output) + + +def _unique1d(ar, return_index=False, return_inverse=False, + return_counts=False, *, equal_nan=True, inverse_shape=None, + axis=None, sorted=True): + """ + Find the unique elements of an array, ignoring shape. + + Uses a hash table to find the unique elements if possible. + """ + ar = np.asanyarray(ar).flatten() + if len(ar.shape) != 1: + # np.matrix, and maybe some other array subclasses, insist on keeping + # two dimensions for all operations. Coerce to an ndarray in such cases. + ar = np.asarray(ar).flatten() + + optional_indices = return_index or return_inverse + + # masked arrays are not supported yet. + if not optional_indices and not return_counts and not np.ma.is_masked(ar): + # First we convert the array to a numpy array, later we wrap it back + # in case it was a subclass of numpy.ndarray. + conv = _array_converter(ar) + ar_, = conv + + if (hash_unique := _unique_hash(ar_)) is not NotImplemented: + if sorted: + hash_unique.sort() + # We wrap the result back in case it was a subclass of numpy.ndarray. + return (conv.wrap(hash_unique),) + + # If we don't use the hash map, we use the slower sorting method. + if optional_indices: + perm = ar.argsort(kind='mergesort' if return_index else 'quicksort') + aux = ar[perm] + else: + ar.sort() + aux = ar + mask = np.empty(aux.shape, dtype=np.bool) + mask[:1] = True + if (equal_nan and aux.shape[0] > 0 and aux.dtype.kind in "cfmM" and + np.isnan(aux[-1])): + if aux.dtype.kind == "c": # for complex all NaNs are considered equivalent + aux_firstnan = np.searchsorted(np.isnan(aux), True, side='left') + else: + aux_firstnan = np.searchsorted(aux, aux[-1], side='left') + if aux_firstnan > 0: + mask[1:aux_firstnan] = ( + aux[1:aux_firstnan] != aux[:aux_firstnan - 1]) + mask[aux_firstnan] = True + mask[aux_firstnan + 1:] = False + else: + mask[1:] = aux[1:] != aux[:-1] + + ret = (aux[mask],) + if return_index: + ret += (perm[mask],) + if return_inverse: + imask = np.cumsum(mask) - 1 + inv_idx = np.empty(mask.shape, dtype=np.intp) + inv_idx[perm] = imask + ret += (inv_idx.reshape(inverse_shape) if axis is None else inv_idx,) + if return_counts: + idx = np.concatenate(np.nonzero(mask) + ([mask.size],)) + ret += (np.diff(idx),) + return ret + + +# Array API set functions + +class UniqueAllResult(NamedTuple): + values: np.ndarray + indices: np.ndarray + inverse_indices: np.ndarray + counts: np.ndarray + + +class UniqueCountsResult(NamedTuple): + values: np.ndarray + counts: np.ndarray + + +class UniqueInverseResult(NamedTuple): + values: np.ndarray + inverse_indices: np.ndarray + + +def _unique_all_dispatcher(x, /): + return (x,) + + +@array_function_dispatch(_unique_all_dispatcher) +def unique_all(x): + """ + Find the unique elements of an array, and counts, inverse, and indices. + + This function is an Array API compatible alternative to:: + + np.unique(x, return_index=True, return_inverse=True, + return_counts=True, equal_nan=False, sorted=False) + + but returns a namedtuple for easier access to each output. + + .. note:: + This function currently always returns a sorted result, however, + this could change in any NumPy minor release. + + Parameters + ---------- + x : array_like + Input array. It will be flattened if it is not already 1-D. + + Returns + ------- + out : namedtuple + The result containing: + + * values - The unique elements of an input array. + * indices - The first occurring indices for each unique element. + * inverse_indices - The indices from the set of unique elements + that reconstruct `x`. + * counts - The corresponding counts for each unique element. + + See Also + -------- + unique : Find the unique elements of an array. + + Examples + -------- + >>> import numpy as np + >>> x = [1, 1, 2] + >>> uniq = np.unique_all(x) + >>> uniq.values + array([1, 2]) + >>> uniq.indices + array([0, 2]) + >>> uniq.inverse_indices + array([0, 0, 1]) + >>> uniq.counts + array([2, 1]) + """ + result = unique( + x, + return_index=True, + return_inverse=True, + return_counts=True, + equal_nan=False, + ) + return UniqueAllResult(*result) + + +def _unique_counts_dispatcher(x, /): + return (x,) + + +@array_function_dispatch(_unique_counts_dispatcher) +def unique_counts(x): + """ + Find the unique elements and counts of an input array `x`. + + This function is an Array API compatible alternative to:: + + np.unique(x, return_counts=True, equal_nan=False, sorted=False) + + but returns a namedtuple for easier access to each output. + + .. note:: + This function currently always returns a sorted result, however, + this could change in any NumPy minor release. + + Parameters + ---------- + x : array_like + Input array. It will be flattened if it is not already 1-D. + + Returns + ------- + out : namedtuple + The result containing: + + * values - The unique elements of an input array. + * counts - The corresponding counts for each unique element. + + See Also + -------- + unique : Find the unique elements of an array. + + Examples + -------- + >>> import numpy as np + >>> x = [1, 1, 2] + >>> uniq = np.unique_counts(x) + >>> uniq.values + array([1, 2]) + >>> uniq.counts + array([2, 1]) + """ + result = unique( + x, + return_index=False, + return_inverse=False, + return_counts=True, + equal_nan=False, + ) + return UniqueCountsResult(*result) + + +def _unique_inverse_dispatcher(x, /): + return (x,) + + +@array_function_dispatch(_unique_inverse_dispatcher) +def unique_inverse(x): + """ + Find the unique elements of `x` and indices to reconstruct `x`. + + This function is an Array API compatible alternative to:: + + np.unique(x, return_inverse=True, equal_nan=False, sorted=False) + + but returns a namedtuple for easier access to each output. + + .. note:: + This function currently always returns a sorted result, however, + this could change in any NumPy minor release. + + Parameters + ---------- + x : array_like + Input array. It will be flattened if it is not already 1-D. + + Returns + ------- + out : namedtuple + The result containing: + + * values - The unique elements of an input array. + * inverse_indices - The indices from the set of unique elements + that reconstruct `x`. + + See Also + -------- + unique : Find the unique elements of an array. + + Examples + -------- + >>> import numpy as np + >>> x = [1, 1, 2] + >>> uniq = np.unique_inverse(x) + >>> uniq.values + array([1, 2]) + >>> uniq.inverse_indices + array([0, 0, 1]) + """ + result = unique( + x, + return_index=False, + return_inverse=True, + return_counts=False, + equal_nan=False, + ) + return UniqueInverseResult(*result) + + +def _unique_values_dispatcher(x, /): + return (x,) + + +@array_function_dispatch(_unique_values_dispatcher) +def unique_values(x): + """ + Returns the unique elements of an input array `x`. + + This function is an Array API compatible alternative to:: + + np.unique(x, equal_nan=False, sorted=False) + + .. versionchanged:: 2.3 + The algorithm was changed to a faster one that does not rely on + sorting, and hence the results are no longer implicitly sorted. + + Parameters + ---------- + x : array_like + Input array. It will be flattened if it is not already 1-D. + + Returns + ------- + out : ndarray + The unique elements of an input array. + + See Also + -------- + unique : Find the unique elements of an array. + + Examples + -------- + >>> import numpy as np + >>> np.unique_values([1, 1, 2]) + array([1, 2]) # may vary + + """ + return unique( + x, + return_index=False, + return_inverse=False, + return_counts=False, + equal_nan=False, + sorted=False, + ) + + +def _intersect1d_dispatcher( + ar1, ar2, assume_unique=None, return_indices=None): + return (ar1, ar2) + + +@array_function_dispatch(_intersect1d_dispatcher) +def intersect1d(ar1, ar2, assume_unique=False, return_indices=False): + """ + Find the intersection of two arrays. + + Return the sorted, unique values that are in both of the input arrays. + + Parameters + ---------- + ar1, ar2 : array_like + Input arrays. Will be flattened if not already 1D. + assume_unique : bool + If True, the input arrays are both assumed to be unique, which + can speed up the calculation. If True but ``ar1`` or ``ar2`` are not + unique, incorrect results and out-of-bounds indices could result. + Default is False. + return_indices : bool + If True, the indices which correspond to the intersection of the two + arrays are returned. The first instance of a value is used if there are + multiple. Default is False. + + Returns + ------- + intersect1d : ndarray + Sorted 1D array of common and unique elements. + comm1 : ndarray + The indices of the first occurrences of the common values in `ar1`. + Only provided if `return_indices` is True. + comm2 : ndarray + The indices of the first occurrences of the common values in `ar2`. + Only provided if `return_indices` is True. + + Examples + -------- + >>> import numpy as np + >>> np.intersect1d([1, 3, 4, 3], [3, 1, 2, 1]) + array([1, 3]) + + To intersect more than two arrays, use functools.reduce: + + >>> from functools import reduce + >>> reduce(np.intersect1d, ([1, 3, 4, 3], [3, 1, 2, 1], [6, 3, 4, 2])) + array([3]) + + To return the indices of the values common to the input arrays + along with the intersected values: + + >>> x = np.array([1, 1, 2, 3, 4]) + >>> y = np.array([2, 1, 4, 6]) + >>> xy, x_ind, y_ind = np.intersect1d(x, y, return_indices=True) + >>> x_ind, y_ind + (array([0, 2, 4]), array([1, 0, 2])) + >>> xy, x[x_ind], y[y_ind] + (array([1, 2, 4]), array([1, 2, 4]), array([1, 2, 4])) + + """ + ar1 = np.asanyarray(ar1) + ar2 = np.asanyarray(ar2) + + if not assume_unique: + if return_indices: + ar1, ind1 = unique(ar1, return_index=True) + ar2, ind2 = unique(ar2, return_index=True) + else: + ar1 = unique(ar1) + ar2 = unique(ar2) + else: + ar1 = ar1.ravel() + ar2 = ar2.ravel() + + aux = np.concatenate((ar1, ar2)) + if return_indices: + aux_sort_indices = np.argsort(aux, kind='mergesort') + aux = aux[aux_sort_indices] + else: + aux.sort() + + mask = aux[1:] == aux[:-1] + int1d = aux[:-1][mask] + + if return_indices: + ar1_indices = aux_sort_indices[:-1][mask] + ar2_indices = aux_sort_indices[1:][mask] - ar1.size + if not assume_unique: + ar1_indices = ind1[ar1_indices] + ar2_indices = ind2[ar2_indices] + + return int1d, ar1_indices, ar2_indices + else: + return int1d + + +def _setxor1d_dispatcher(ar1, ar2, assume_unique=None): + return (ar1, ar2) + + +@array_function_dispatch(_setxor1d_dispatcher) +def setxor1d(ar1, ar2, assume_unique=False): + """ + Find the set exclusive-or of two arrays. + + Return the sorted, unique values that are in only one (not both) of the + input arrays. + + Parameters + ---------- + ar1, ar2 : array_like + Input arrays. + assume_unique : bool + If True, the input arrays are both assumed to be unique, which + can speed up the calculation. Default is False. + + Returns + ------- + setxor1d : ndarray + Sorted 1D array of unique values that are in only one of the input + arrays. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([1, 2, 3, 2, 4]) + >>> b = np.array([2, 3, 5, 7, 5]) + >>> np.setxor1d(a,b) + array([1, 4, 5, 7]) + + """ + if not assume_unique: + ar1 = unique(ar1) + ar2 = unique(ar2) + + aux = np.concatenate((ar1, ar2), axis=None) + if aux.size == 0: + return aux + + aux.sort() + flag = np.concatenate(([True], aux[1:] != aux[:-1], [True])) + return aux[flag[1:] & flag[:-1]] + + +def _in1d_dispatcher(ar1, ar2, assume_unique=None, invert=None, *, + kind=None): + return (ar1, ar2) + + +@array_function_dispatch(_in1d_dispatcher) +def in1d(ar1, ar2, assume_unique=False, invert=False, *, kind=None): + """ + Test whether each element of a 1-D array is also present in a second array. + + .. deprecated:: 2.0 + Use :func:`isin` instead of `in1d` for new code. + + Returns a boolean array the same length as `ar1` that is True + where an element of `ar1` is in `ar2` and False otherwise. + + Parameters + ---------- + ar1 : (M,) array_like + Input array. + ar2 : array_like + The values against which to test each value of `ar1`. + assume_unique : bool, optional + If True, the input arrays are both assumed to be unique, which + can speed up the calculation. Default is False. + invert : bool, optional + If True, the values in the returned array are inverted (that is, + False where an element of `ar1` is in `ar2` and True otherwise). + Default is False. ``np.in1d(a, b, invert=True)`` is equivalent + to (but is faster than) ``np.invert(in1d(a, b))``. + kind : {None, 'sort', 'table'}, optional + The algorithm to use. This will not affect the final result, + but will affect the speed and memory use. The default, None, + will select automatically based on memory considerations. + + * If 'sort', will use a mergesort-based approach. This will have + a memory usage of roughly 6 times the sum of the sizes of + `ar1` and `ar2`, not accounting for size of dtypes. + * If 'table', will use a lookup table approach similar + to a counting sort. This is only available for boolean and + integer arrays. This will have a memory usage of the + size of `ar1` plus the max-min value of `ar2`. `assume_unique` + has no effect when the 'table' option is used. + * If None, will automatically choose 'table' if + the required memory allocation is less than or equal to + 6 times the sum of the sizes of `ar1` and `ar2`, + otherwise will use 'sort'. This is done to not use + a large amount of memory by default, even though + 'table' may be faster in most cases. If 'table' is chosen, + `assume_unique` will have no effect. + + Returns + ------- + in1d : (M,) ndarray, bool + The values `ar1[in1d]` are in `ar2`. + + See Also + -------- + isin : Version of this function that preserves the + shape of ar1. + + Notes + ----- + `in1d` can be considered as an element-wise function version of the + python keyword `in`, for 1-D sequences. ``in1d(a, b)`` is roughly + equivalent to ``np.array([item in b for item in a])``. + However, this idea fails if `ar2` is a set, or similar (non-sequence) + container: As ``ar2`` is converted to an array, in those cases + ``asarray(ar2)`` is an object array rather than the expected array of + contained values. + + Using ``kind='table'`` tends to be faster than `kind='sort'` if the + following relationship is true: + ``log10(len(ar2)) > (log10(max(ar2)-min(ar2)) - 2.27) / 0.927``, + but may use greater memory. The default value for `kind` will + be automatically selected based only on memory usage, so one may + manually set ``kind='table'`` if memory constraints can be relaxed. + + Examples + -------- + >>> import numpy as np + >>> test = np.array([0, 1, 2, 5, 0]) + >>> states = [0, 2] + >>> mask = np.in1d(test, states) + >>> mask + array([ True, False, True, False, True]) + >>> test[mask] + array([0, 2, 0]) + >>> mask = np.in1d(test, states, invert=True) + >>> mask + array([False, True, False, True, False]) + >>> test[mask] + array([1, 5]) + """ + + # Deprecated in NumPy 2.0, 2023-08-18 + warnings.warn( + "`in1d` is deprecated. Use `np.isin` instead.", + DeprecationWarning, + stacklevel=2 + ) + + return _in1d(ar1, ar2, assume_unique, invert, kind=kind) + + +def _in1d(ar1, ar2, assume_unique=False, invert=False, *, kind=None): + # Ravel both arrays, behavior for the first array could be different + ar1 = np.asarray(ar1).ravel() + ar2 = np.asarray(ar2).ravel() + + # Ensure that iteration through object arrays yields size-1 arrays + if ar2.dtype == object: + ar2 = ar2.reshape(-1, 1) + + if kind not in {None, 'sort', 'table'}: + raise ValueError( + f"Invalid kind: '{kind}'. Please use None, 'sort' or 'table'.") + + # Can use the table method if all arrays are integers or boolean: + is_int_arrays = all(ar.dtype.kind in ("u", "i", "b") for ar in (ar1, ar2)) + use_table_method = is_int_arrays and kind in {None, 'table'} + + if use_table_method: + if ar2.size == 0: + if invert: + return np.ones_like(ar1, dtype=bool) + else: + return np.zeros_like(ar1, dtype=bool) + + # Convert booleans to uint8 so we can use the fast integer algorithm + if ar1.dtype == bool: + ar1 = ar1.astype(np.uint8) + if ar2.dtype == bool: + ar2 = ar2.astype(np.uint8) + + ar2_min = int(np.min(ar2)) + ar2_max = int(np.max(ar2)) + + ar2_range = ar2_max - ar2_min + + # Constraints on whether we can actually use the table method: + # 1. Assert memory usage is not too large + below_memory_constraint = ar2_range <= 6 * (ar1.size + ar2.size) + # 2. Check overflows for (ar2 - ar2_min); dtype=ar2.dtype + range_safe_from_overflow = ar2_range <= np.iinfo(ar2.dtype).max + + # Optimal performance is for approximately + # log10(size) > (log10(range) - 2.27) / 0.927. + # However, here we set the requirement that by default + # the intermediate array can only be 6x + # the combined memory allocation of the original + # arrays. See discussion on + # https://github.com/numpy/numpy/pull/12065. + + if ( + range_safe_from_overflow and + (below_memory_constraint or kind == 'table') + ): + + if invert: + outgoing_array = np.ones_like(ar1, dtype=bool) + else: + outgoing_array = np.zeros_like(ar1, dtype=bool) + + # Make elements 1 where the integer exists in ar2 + if invert: + isin_helper_ar = np.ones(ar2_range + 1, dtype=bool) + isin_helper_ar[ar2 - ar2_min] = 0 + else: + isin_helper_ar = np.zeros(ar2_range + 1, dtype=bool) + isin_helper_ar[ar2 - ar2_min] = 1 + + # Mask out elements we know won't work + basic_mask = (ar1 <= ar2_max) & (ar1 >= ar2_min) + in_range_ar1 = ar1[basic_mask] + if in_range_ar1.size == 0: + # Nothing more to do, since all values are out of range. + return outgoing_array + + # Unfortunately, ar2_min can be out of range for `intp` even + # if the calculation result must fit in range (and be positive). + # In that case, use ar2.dtype which must work for all unmasked + # values. + try: + ar2_min = np.array(ar2_min, dtype=np.intp) + dtype = np.intp + except OverflowError: + dtype = ar2.dtype + + out = np.empty_like(in_range_ar1, dtype=np.intp) + outgoing_array[basic_mask] = isin_helper_ar[ + np.subtract(in_range_ar1, ar2_min, dtype=dtype, + out=out, casting="unsafe")] + + return outgoing_array + elif kind == 'table': # not range_safe_from_overflow + raise RuntimeError( + "You have specified kind='table', " + "but the range of values in `ar2` or `ar1` exceed the " + "maximum integer of the datatype. " + "Please set `kind` to None or 'sort'." + ) + elif kind == 'table': + raise ValueError( + "The 'table' method is only " + "supported for boolean or integer arrays. " + "Please select 'sort' or None for kind." + ) + + # Check if one of the arrays may contain arbitrary objects + contains_object = ar1.dtype.hasobject or ar2.dtype.hasobject + + # This code is run when + # a) the first condition is true, making the code significantly faster + # b) the second condition is true (i.e. `ar1` or `ar2` may contain + # arbitrary objects), since then sorting is not guaranteed to work + if len(ar2) < 10 * len(ar1) ** 0.145 or contains_object: + if invert: + mask = np.ones(len(ar1), dtype=bool) + for a in ar2: + mask &= (ar1 != a) + else: + mask = np.zeros(len(ar1), dtype=bool) + for a in ar2: + mask |= (ar1 == a) + return mask + + # Otherwise use sorting + if not assume_unique: + ar1, rev_idx = np.unique(ar1, return_inverse=True) + ar2 = np.unique(ar2) + + ar = np.concatenate((ar1, ar2)) + # We need this to be a stable sort, so always use 'mergesort' + # here. The values from the first array should always come before + # the values from the second array. + order = ar.argsort(kind='mergesort') + sar = ar[order] + if invert: + bool_ar = (sar[1:] != sar[:-1]) + else: + bool_ar = (sar[1:] == sar[:-1]) + flag = np.concatenate((bool_ar, [invert])) + ret = np.empty(ar.shape, dtype=bool) + ret[order] = flag + + if assume_unique: + return ret[:len(ar1)] + else: + return ret[rev_idx] + + +def _isin_dispatcher(element, test_elements, assume_unique=None, invert=None, + *, kind=None): + return (element, test_elements) + + +@array_function_dispatch(_isin_dispatcher) +def isin(element, test_elements, assume_unique=False, invert=False, *, + kind=None): + """ + Calculates ``element in test_elements``, broadcasting over `element` only. + Returns a boolean array of the same shape as `element` that is True + where an element of `element` is in `test_elements` and False otherwise. + + Parameters + ---------- + element : array_like + Input array. + test_elements : array_like + The values against which to test each value of `element`. + This argument is flattened if it is an array or array_like. + See notes for behavior with non-array-like parameters. + assume_unique : bool, optional + If True, the input arrays are both assumed to be unique, which + can speed up the calculation. Default is False. + invert : bool, optional + If True, the values in the returned array are inverted, as if + calculating `element not in test_elements`. Default is False. + ``np.isin(a, b, invert=True)`` is equivalent to (but faster + than) ``np.invert(np.isin(a, b))``. + kind : {None, 'sort', 'table'}, optional + The algorithm to use. This will not affect the final result, + but will affect the speed and memory use. The default, None, + will select automatically based on memory considerations. + + * If 'sort', will use a mergesort-based approach. This will have + a memory usage of roughly 6 times the sum of the sizes of + `element` and `test_elements`, not accounting for size of dtypes. + * If 'table', will use a lookup table approach similar + to a counting sort. This is only available for boolean and + integer arrays. This will have a memory usage of the + size of `element` plus the max-min value of `test_elements`. + `assume_unique` has no effect when the 'table' option is used. + * If None, will automatically choose 'table' if + the required memory allocation is less than or equal to + 6 times the sum of the sizes of `element` and `test_elements`, + otherwise will use 'sort'. This is done to not use + a large amount of memory by default, even though + 'table' may be faster in most cases. If 'table' is chosen, + `assume_unique` will have no effect. + + + Returns + ------- + isin : ndarray, bool + Has the same shape as `element`. The values `element[isin]` + are in `test_elements`. + + Notes + ----- + `isin` is an element-wise function version of the python keyword `in`. + ``isin(a, b)`` is roughly equivalent to + ``np.array([item in b for item in a])`` if `a` and `b` are 1-D sequences. + + `element` and `test_elements` are converted to arrays if they are not + already. If `test_elements` is a set (or other non-sequence collection) + it will be converted to an object array with one element, rather than an + array of the values contained in `test_elements`. This is a consequence + of the `array` constructor's way of handling non-sequence collections. + Converting the set to a list usually gives the desired behavior. + + Using ``kind='table'`` tends to be faster than `kind='sort'` if the + following relationship is true: + ``log10(len(test_elements)) > + (log10(max(test_elements)-min(test_elements)) - 2.27) / 0.927``, + but may use greater memory. The default value for `kind` will + be automatically selected based only on memory usage, so one may + manually set ``kind='table'`` if memory constraints can be relaxed. + + Examples + -------- + >>> import numpy as np + >>> element = 2*np.arange(4).reshape((2, 2)) + >>> element + array([[0, 2], + [4, 6]]) + >>> test_elements = [1, 2, 4, 8] + >>> mask = np.isin(element, test_elements) + >>> mask + array([[False, True], + [ True, False]]) + >>> element[mask] + array([2, 4]) + + The indices of the matched values can be obtained with `nonzero`: + + >>> np.nonzero(mask) + (array([0, 1]), array([1, 0])) + + The test can also be inverted: + + >>> mask = np.isin(element, test_elements, invert=True) + >>> mask + array([[ True, False], + [False, True]]) + >>> element[mask] + array([0, 6]) + + Because of how `array` handles sets, the following does not + work as expected: + + >>> test_set = {1, 2, 4, 8} + >>> np.isin(element, test_set) + array([[False, False], + [False, False]]) + + Casting the set to a list gives the expected result: + + >>> np.isin(element, list(test_set)) + array([[False, True], + [ True, False]]) + """ + element = np.asarray(element) + return _in1d(element, test_elements, assume_unique=assume_unique, + invert=invert, kind=kind).reshape(element.shape) + + +def _union1d_dispatcher(ar1, ar2): + return (ar1, ar2) + + +@array_function_dispatch(_union1d_dispatcher) +def union1d(ar1, ar2): + """ + Find the union of two arrays. + + Return the unique, sorted array of values that are in either of the two + input arrays. + + Parameters + ---------- + ar1, ar2 : array_like + Input arrays. They are flattened if they are not already 1D. + + Returns + ------- + union1d : ndarray + Unique, sorted union of the input arrays. + + Examples + -------- + >>> import numpy as np + >>> np.union1d([-1, 0, 1], [-2, 0, 2]) + array([-2, -1, 0, 1, 2]) + + To find the union of more than two arrays, use functools.reduce: + + >>> from functools import reduce + >>> reduce(np.union1d, ([1, 3, 4, 3], [3, 1, 2, 1], [6, 3, 4, 2])) + array([1, 2, 3, 4, 6]) + """ + return unique(np.concatenate((ar1, ar2), axis=None)) + + +def _setdiff1d_dispatcher(ar1, ar2, assume_unique=None): + return (ar1, ar2) + + +@array_function_dispatch(_setdiff1d_dispatcher) +def setdiff1d(ar1, ar2, assume_unique=False): + """ + Find the set difference of two arrays. + + Return the unique values in `ar1` that are not in `ar2`. + + Parameters + ---------- + ar1 : array_like + Input array. + ar2 : array_like + Input comparison array. + assume_unique : bool + If True, the input arrays are both assumed to be unique, which + can speed up the calculation. Default is False. + + Returns + ------- + setdiff1d : ndarray + 1D array of values in `ar1` that are not in `ar2`. The result + is sorted when `assume_unique=False`, but otherwise only sorted + if the input is sorted. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([1, 2, 3, 2, 4, 1]) + >>> b = np.array([3, 4, 5, 6]) + >>> np.setdiff1d(a, b) + array([1, 2]) + + """ + if assume_unique: + ar1 = np.asarray(ar1).ravel() + else: + ar1 = unique(ar1) + ar2 = unique(ar2) + return ar1[_in1d(ar1, ar2, assume_unique=True, invert=True)] diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/_arraysetops_impl.pyi b/.venv/lib/python3.12/site-packages/numpy/lib/_arraysetops_impl.pyi new file mode 100644 index 0000000..a7ad5b9 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/lib/_arraysetops_impl.pyi @@ -0,0 +1,444 @@ +from typing import Any, Generic, NamedTuple, SupportsIndex, TypeAlias, overload +from typing import Literal as L + +from typing_extensions import TypeVar, deprecated + +import numpy as np +from numpy._typing import ( + ArrayLike, + NDArray, + _ArrayLike, + _ArrayLikeBool_co, + _ArrayLikeNumber_co, +) + +__all__ = [ + "ediff1d", + "in1d", + "intersect1d", + "isin", + "setdiff1d", + "setxor1d", + "union1d", + "unique", + "unique_all", + "unique_counts", + "unique_inverse", + "unique_values", +] + +_ScalarT = TypeVar("_ScalarT", bound=np.generic) +_NumericT = TypeVar("_NumericT", bound=np.number | np.timedelta64 | np.object_) + +# Explicitly set all allowed values to prevent accidental castings to +# abstract dtypes (their common super-type). +# Only relevant if two or more arguments are parametrized, (e.g. `setdiff1d`) +# which could result in, for example, `int64` and `float64`producing a +# `number[_64Bit]` array +_EitherSCT = TypeVar( + "_EitherSCT", + np.bool, + np.int8, np.int16, np.int32, np.int64, np.intp, + np.uint8, np.uint16, np.uint32, np.uint64, np.uintp, + np.float16, np.float32, np.float64, np.longdouble, + np.complex64, np.complex128, np.clongdouble, + np.timedelta64, np.datetime64, + np.bytes_, np.str_, np.void, np.object_, + np.integer, np.floating, np.complexfloating, np.character, +) # fmt: skip + +_AnyArray: TypeAlias = NDArray[Any] +_IntArray: TypeAlias = NDArray[np.intp] + +### + +class UniqueAllResult(NamedTuple, Generic[_ScalarT]): + values: NDArray[_ScalarT] + indices: _IntArray + inverse_indices: _IntArray + counts: _IntArray + +class UniqueCountsResult(NamedTuple, Generic[_ScalarT]): + values: NDArray[_ScalarT] + counts: _IntArray + +class UniqueInverseResult(NamedTuple, Generic[_ScalarT]): + values: NDArray[_ScalarT] + inverse_indices: _IntArray + +# +@overload +def ediff1d( + ary: _ArrayLikeBool_co, + to_end: ArrayLike | None = None, + to_begin: ArrayLike | None = None, +) -> NDArray[np.int8]: ... +@overload +def ediff1d( + ary: _ArrayLike[_NumericT], + to_end: ArrayLike | None = None, + to_begin: ArrayLike | None = None, +) -> NDArray[_NumericT]: ... +@overload +def ediff1d( + ary: _ArrayLike[np.datetime64[Any]], + to_end: ArrayLike | None = None, + to_begin: ArrayLike | None = None, +) -> NDArray[np.timedelta64]: ... +@overload +def ediff1d( + ary: _ArrayLikeNumber_co, + to_end: ArrayLike | None = None, + to_begin: ArrayLike | None = None, +) -> _AnyArray: ... + +# +@overload # known scalar-type, FFF +def unique( + ar: _ArrayLike[_ScalarT], + return_index: L[False] = False, + return_inverse: L[False] = False, + return_counts: L[False] = False, + axis: SupportsIndex | None = None, + *, + equal_nan: bool = True, +) -> NDArray[_ScalarT]: ... +@overload # unknown scalar-type, FFF +def unique( + ar: ArrayLike, + return_index: L[False] = False, + return_inverse: L[False] = False, + return_counts: L[False] = False, + axis: SupportsIndex | None = None, + *, + equal_nan: bool = True, +) -> _AnyArray: ... +@overload # known scalar-type, TFF +def unique( + ar: _ArrayLike[_ScalarT], + return_index: L[True], + return_inverse: L[False] = False, + return_counts: L[False] = False, + axis: SupportsIndex | None = None, + *, + equal_nan: bool = True, +) -> tuple[NDArray[_ScalarT], _IntArray]: ... +@overload # unknown scalar-type, TFF +def unique( + ar: ArrayLike, + return_index: L[True], + return_inverse: L[False] = False, + return_counts: L[False] = False, + axis: SupportsIndex | None = None, + *, + equal_nan: bool = True, +) -> tuple[_AnyArray, _IntArray]: ... +@overload # known scalar-type, FTF (positional) +def unique( + ar: _ArrayLike[_ScalarT], + return_index: L[False], + return_inverse: L[True], + return_counts: L[False] = False, + axis: SupportsIndex | None = None, + *, + equal_nan: bool = True, +) -> tuple[NDArray[_ScalarT], _IntArray]: ... +@overload # known scalar-type, FTF (keyword) +def unique( + ar: _ArrayLike[_ScalarT], + return_index: L[False] = False, + *, + return_inverse: L[True], + return_counts: L[False] = False, + axis: SupportsIndex | None = None, + equal_nan: bool = True, +) -> tuple[NDArray[_ScalarT], _IntArray]: ... +@overload # unknown scalar-type, FTF (positional) +def unique( + ar: ArrayLike, + return_index: L[False], + return_inverse: L[True], + return_counts: L[False] = False, + axis: SupportsIndex | None = None, + *, + equal_nan: bool = True, +) -> tuple[_AnyArray, _IntArray]: ... +@overload # unknown scalar-type, FTF (keyword) +def unique( + ar: ArrayLike, + return_index: L[False] = False, + *, + return_inverse: L[True], + return_counts: L[False] = False, + axis: SupportsIndex | None = None, + equal_nan: bool = True, +) -> tuple[_AnyArray, _IntArray]: ... +@overload # known scalar-type, FFT (positional) +def unique( + ar: _ArrayLike[_ScalarT], + return_index: L[False], + return_inverse: L[False], + return_counts: L[True], + axis: SupportsIndex | None = None, + *, + equal_nan: bool = True, +) -> tuple[NDArray[_ScalarT], _IntArray]: ... +@overload # known scalar-type, FFT (keyword) +def unique( + ar: _ArrayLike[_ScalarT], + return_index: L[False] = False, + return_inverse: L[False] = False, + *, + return_counts: L[True], + axis: SupportsIndex | None = None, + equal_nan: bool = True, +) -> tuple[NDArray[_ScalarT], _IntArray]: ... +@overload # unknown scalar-type, FFT (positional) +def unique( + ar: ArrayLike, + return_index: L[False], + return_inverse: L[False], + return_counts: L[True], + axis: SupportsIndex | None = None, + *, + equal_nan: bool = True, +) -> tuple[_AnyArray, _IntArray]: ... +@overload # unknown scalar-type, FFT (keyword) +def unique( + ar: ArrayLike, + return_index: L[False] = False, + return_inverse: L[False] = False, + *, + return_counts: L[True], + axis: SupportsIndex | None = None, + equal_nan: bool = True, +) -> tuple[_AnyArray, _IntArray]: ... +@overload # known scalar-type, TTF +def unique( + ar: _ArrayLike[_ScalarT], + return_index: L[True], + return_inverse: L[True], + return_counts: L[False] = False, + axis: SupportsIndex | None = None, + *, + equal_nan: bool = True, +) -> tuple[NDArray[_ScalarT], _IntArray, _IntArray]: ... +@overload # unknown scalar-type, TTF +def unique( + ar: ArrayLike, + return_index: L[True], + return_inverse: L[True], + return_counts: L[False] = False, + axis: SupportsIndex | None = None, + *, + equal_nan: bool = True, +) -> tuple[_AnyArray, _IntArray, _IntArray]: ... +@overload # known scalar-type, TFT (positional) +def unique( + ar: _ArrayLike[_ScalarT], + return_index: L[True], + return_inverse: L[False], + return_counts: L[True], + axis: SupportsIndex | None = None, + *, + equal_nan: bool = True, +) -> tuple[NDArray[_ScalarT], _IntArray, _IntArray]: ... +@overload # known scalar-type, TFT (keyword) +def unique( + ar: _ArrayLike[_ScalarT], + return_index: L[True], + return_inverse: L[False] = False, + *, + return_counts: L[True], + axis: SupportsIndex | None = None, + equal_nan: bool = True, +) -> tuple[NDArray[_ScalarT], _IntArray, _IntArray]: ... +@overload # unknown scalar-type, TFT (positional) +def unique( + ar: ArrayLike, + return_index: L[True], + return_inverse: L[False], + return_counts: L[True], + axis: SupportsIndex | None = None, + *, + equal_nan: bool = True, +) -> tuple[_AnyArray, _IntArray, _IntArray]: ... +@overload # unknown scalar-type, TFT (keyword) +def unique( + ar: ArrayLike, + return_index: L[True], + return_inverse: L[False] = False, + *, + return_counts: L[True], + axis: SupportsIndex | None = None, + equal_nan: bool = True, +) -> tuple[_AnyArray, _IntArray, _IntArray]: ... +@overload # known scalar-type, FTT (positional) +def unique( + ar: _ArrayLike[_ScalarT], + return_index: L[False], + return_inverse: L[True], + return_counts: L[True], + axis: SupportsIndex | None = None, + *, + equal_nan: bool = True, +) -> tuple[NDArray[_ScalarT], _IntArray, _IntArray]: ... +@overload # known scalar-type, FTT (keyword) +def unique( + ar: _ArrayLike[_ScalarT], + return_index: L[False] = False, + *, + return_inverse: L[True], + return_counts: L[True], + axis: SupportsIndex | None = None, + equal_nan: bool = True, +) -> tuple[NDArray[_ScalarT], _IntArray, _IntArray]: ... +@overload # unknown scalar-type, FTT (positional) +def unique( + ar: ArrayLike, + return_index: L[False], + return_inverse: L[True], + return_counts: L[True], + axis: SupportsIndex | None = None, + *, + equal_nan: bool = True, +) -> tuple[_AnyArray, _IntArray, _IntArray]: ... +@overload # unknown scalar-type, FTT (keyword) +def unique( + ar: ArrayLike, + return_index: L[False] = False, + *, + return_inverse: L[True], + return_counts: L[True], + axis: SupportsIndex | None = None, + equal_nan: bool = True, +) -> tuple[_AnyArray, _IntArray, _IntArray]: ... +@overload # known scalar-type, TTT +def unique( + ar: _ArrayLike[_ScalarT], + return_index: L[True], + return_inverse: L[True], + return_counts: L[True], + axis: SupportsIndex | None = None, + *, + equal_nan: bool = True, +) -> tuple[NDArray[_ScalarT], _IntArray, _IntArray, _IntArray]: ... +@overload # unknown scalar-type, TTT +def unique( + ar: ArrayLike, + return_index: L[True], + return_inverse: L[True], + return_counts: L[True], + axis: SupportsIndex | None = None, + *, + equal_nan: bool = True, +) -> tuple[_AnyArray, _IntArray, _IntArray, _IntArray]: ... + +# +@overload +def unique_all(x: _ArrayLike[_ScalarT]) -> UniqueAllResult[_ScalarT]: ... +@overload +def unique_all(x: ArrayLike) -> UniqueAllResult[Any]: ... + +# +@overload +def unique_counts(x: _ArrayLike[_ScalarT]) -> UniqueCountsResult[_ScalarT]: ... +@overload +def unique_counts(x: ArrayLike) -> UniqueCountsResult[Any]: ... + +# +@overload +def unique_inverse(x: _ArrayLike[_ScalarT]) -> UniqueInverseResult[_ScalarT]: ... +@overload +def unique_inverse(x: ArrayLike) -> UniqueInverseResult[Any]: ... + +# +@overload +def unique_values(x: _ArrayLike[_ScalarT]) -> NDArray[_ScalarT]: ... +@overload +def unique_values(x: ArrayLike) -> _AnyArray: ... + +# +@overload # known scalar-type, return_indices=False (default) +def intersect1d( + ar1: _ArrayLike[_EitherSCT], + ar2: _ArrayLike[_EitherSCT], + assume_unique: bool = False, + return_indices: L[False] = False, +) -> NDArray[_EitherSCT]: ... +@overload # known scalar-type, return_indices=True (positional) +def intersect1d( + ar1: _ArrayLike[_EitherSCT], + ar2: _ArrayLike[_EitherSCT], + assume_unique: bool, + return_indices: L[True], +) -> tuple[NDArray[_EitherSCT], _IntArray, _IntArray]: ... +@overload # known scalar-type, return_indices=True (keyword) +def intersect1d( + ar1: _ArrayLike[_EitherSCT], + ar2: _ArrayLike[_EitherSCT], + assume_unique: bool = False, + *, + return_indices: L[True], +) -> tuple[NDArray[_EitherSCT], _IntArray, _IntArray]: ... +@overload # unknown scalar-type, return_indices=False (default) +def intersect1d( + ar1: ArrayLike, + ar2: ArrayLike, + assume_unique: bool = False, + return_indices: L[False] = False, +) -> _AnyArray: ... +@overload # unknown scalar-type, return_indices=True (positional) +def intersect1d( + ar1: ArrayLike, + ar2: ArrayLike, + assume_unique: bool, + return_indices: L[True], +) -> tuple[_AnyArray, _IntArray, _IntArray]: ... +@overload # unknown scalar-type, return_indices=True (keyword) +def intersect1d( + ar1: ArrayLike, + ar2: ArrayLike, + assume_unique: bool = False, + *, + return_indices: L[True], +) -> tuple[_AnyArray, _IntArray, _IntArray]: ... + +# +@overload +def setxor1d(ar1: _ArrayLike[_EitherSCT], ar2: _ArrayLike[_EitherSCT], assume_unique: bool = False) -> NDArray[_EitherSCT]: ... +@overload +def setxor1d(ar1: ArrayLike, ar2: ArrayLike, assume_unique: bool = False) -> _AnyArray: ... + +# +@overload +def union1d(ar1: _ArrayLike[_EitherSCT], ar2: _ArrayLike[_EitherSCT]) -> NDArray[_EitherSCT]: ... +@overload +def union1d(ar1: ArrayLike, ar2: ArrayLike) -> _AnyArray: ... + +# +@overload +def setdiff1d(ar1: _ArrayLike[_EitherSCT], ar2: _ArrayLike[_EitherSCT], assume_unique: bool = False) -> NDArray[_EitherSCT]: ... +@overload +def setdiff1d(ar1: ArrayLike, ar2: ArrayLike, assume_unique: bool = False) -> _AnyArray: ... + +# +def isin( + element: ArrayLike, + test_elements: ArrayLike, + assume_unique: bool = False, + invert: bool = False, + *, + kind: L["sort", "table"] | None = None, +) -> NDArray[np.bool]: ... + +# +@deprecated("Use 'isin' instead") +def in1d( + element: ArrayLike, + test_elements: ArrayLike, + assume_unique: bool = False, + invert: bool = False, + *, + kind: L["sort", "table"] | None = None, +) -> NDArray[np.bool]: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/_arrayterator_impl.py b/.venv/lib/python3.12/site-packages/numpy/lib/_arrayterator_impl.py new file mode 100644 index 0000000..5f7c5fc --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/lib/_arrayterator_impl.py @@ -0,0 +1,224 @@ +""" +A buffered iterator for big arrays. + +This module solves the problem of iterating over a big file-based array +without having to read it into memory. The `Arrayterator` class wraps +an array object, and when iterated it will return sub-arrays with at most +a user-specified number of elements. + +""" +from functools import reduce +from operator import mul + +__all__ = ['Arrayterator'] + + +class Arrayterator: + """ + Buffered iterator for big arrays. + + `Arrayterator` creates a buffered iterator for reading big arrays in small + contiguous blocks. The class is useful for objects stored in the + file system. It allows iteration over the object *without* reading + everything in memory; instead, small blocks are read and iterated over. + + `Arrayterator` can be used with any object that supports multidimensional + slices. This includes NumPy arrays, but also variables from + Scientific.IO.NetCDF or pynetcdf for example. + + Parameters + ---------- + var : array_like + The object to iterate over. + buf_size : int, optional + The buffer size. If `buf_size` is supplied, the maximum amount of + data that will be read into memory is `buf_size` elements. + Default is None, which will read as many element as possible + into memory. + + Attributes + ---------- + var + buf_size + start + stop + step + shape + flat + + See Also + -------- + numpy.ndenumerate : Multidimensional array iterator. + numpy.flatiter : Flat array iterator. + numpy.memmap : Create a memory-map to an array stored + in a binary file on disk. + + Notes + ----- + The algorithm works by first finding a "running dimension", along which + the blocks will be extracted. Given an array of dimensions + ``(d1, d2, ..., dn)``, e.g. if `buf_size` is smaller than ``d1``, the + first dimension will be used. If, on the other hand, + ``d1 < buf_size < d1*d2`` the second dimension will be used, and so on. + Blocks are extracted along this dimension, and when the last block is + returned the process continues from the next dimension, until all + elements have been read. + + Examples + -------- + >>> import numpy as np + >>> a = np.arange(3 * 4 * 5 * 6).reshape(3, 4, 5, 6) + >>> a_itor = np.lib.Arrayterator(a, 2) + >>> a_itor.shape + (3, 4, 5, 6) + + Now we can iterate over ``a_itor``, and it will return arrays of size + two. Since `buf_size` was smaller than any dimension, the first + dimension will be iterated over first: + + >>> for subarr in a_itor: + ... if not subarr.all(): + ... print(subarr, subarr.shape) # doctest: +SKIP + >>> # [[[[0 1]]]] (1, 1, 1, 2) + + """ + + __module__ = "numpy.lib" + + def __init__(self, var, buf_size=None): + self.var = var + self.buf_size = buf_size + + self.start = [0 for dim in var.shape] + self.stop = list(var.shape) + self.step = [1 for dim in var.shape] + + def __getattr__(self, attr): + return getattr(self.var, attr) + + def __getitem__(self, index): + """ + Return a new arrayterator. + + """ + # Fix index, handling ellipsis and incomplete slices. + if not isinstance(index, tuple): + index = (index,) + fixed = [] + length, dims = len(index), self.ndim + for slice_ in index: + if slice_ is Ellipsis: + fixed.extend([slice(None)] * (dims - length + 1)) + length = len(fixed) + elif isinstance(slice_, int): + fixed.append(slice(slice_, slice_ + 1, 1)) + else: + fixed.append(slice_) + index = tuple(fixed) + if len(index) < dims: + index += (slice(None),) * (dims - len(index)) + + # Return a new arrayterator object. + out = self.__class__(self.var, self.buf_size) + for i, (start, stop, step, slice_) in enumerate( + zip(self.start, self.stop, self.step, index)): + out.start[i] = start + (slice_.start or 0) + out.step[i] = step * (slice_.step or 1) + out.stop[i] = start + (slice_.stop or stop - start) + out.stop[i] = min(stop, out.stop[i]) + return out + + def __array__(self, dtype=None, copy=None): + """ + Return corresponding data. + + """ + slice_ = tuple(slice(*t) for t in zip( + self.start, self.stop, self.step)) + return self.var[slice_] + + @property + def flat(self): + """ + A 1-D flat iterator for Arrayterator objects. + + This iterator returns elements of the array to be iterated over in + `~lib.Arrayterator` one by one. + It is similar to `flatiter`. + + See Also + -------- + lib.Arrayterator + flatiter + + Examples + -------- + >>> a = np.arange(3 * 4 * 5 * 6).reshape(3, 4, 5, 6) + >>> a_itor = np.lib.Arrayterator(a, 2) + + >>> for subarr in a_itor.flat: + ... if not subarr: + ... print(subarr, type(subarr)) + ... + 0 + + """ + for block in self: + yield from block.flat + + @property + def shape(self): + """ + The shape of the array to be iterated over. + + For an example, see `Arrayterator`. + + """ + return tuple(((stop - start - 1) // step + 1) for start, stop, step in + zip(self.start, self.stop, self.step)) + + def __iter__(self): + # Skip arrays with degenerate dimensions + if [dim for dim in self.shape if dim <= 0]: + return + + start = self.start[:] + stop = self.stop[:] + step = self.step[:] + ndims = self.var.ndim + + while True: + count = self.buf_size or reduce(mul, self.shape) + + # iterate over each dimension, looking for the + # running dimension (ie, the dimension along which + # the blocks will be built from) + rundim = 0 + for i in range(ndims - 1, -1, -1): + # if count is zero we ran out of elements to read + # along higher dimensions, so we read only a single position + if count == 0: + stop[i] = start[i] + 1 + elif count <= self.shape[i]: + # limit along this dimension + stop[i] = start[i] + count * step[i] + rundim = i + else: + # read everything along this dimension + stop[i] = self.stop[i] + stop[i] = min(self.stop[i], stop[i]) + count = count // self.shape[i] + + # yield a block + slice_ = tuple(slice(*t) for t in zip(start, stop, step)) + yield self.var[slice_] + + # Update start position, taking care of overflow to + # other dimensions + start[rundim] = stop[rundim] # start where we stopped + for i in range(ndims - 1, 0, -1): + if start[i] >= self.stop[i]: + start[i] = self.start[i] + start[i - 1] += self.step[i - 1] + if start[0] >= self.stop[0]: + return diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/_arrayterator_impl.pyi b/.venv/lib/python3.12/site-packages/numpy/lib/_arrayterator_impl.pyi new file mode 100644 index 0000000..e1a9e05 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/lib/_arrayterator_impl.pyi @@ -0,0 +1,46 @@ +# pyright: reportIncompatibleMethodOverride=false + +from collections.abc import Generator +from types import EllipsisType +from typing import Any, Final, TypeAlias, overload + +from typing_extensions import TypeVar + +import numpy as np +from numpy._typing import _AnyShape, _Shape + +__all__ = ["Arrayterator"] + +_ShapeT_co = TypeVar("_ShapeT_co", bound=_Shape, default=_AnyShape, covariant=True) +_DTypeT = TypeVar("_DTypeT", bound=np.dtype) +_DTypeT_co = TypeVar("_DTypeT_co", bound=np.dtype, default=np.dtype, covariant=True) +_ScalarT = TypeVar("_ScalarT", bound=np.generic) + +_AnyIndex: TypeAlias = EllipsisType | int | slice | tuple[EllipsisType | int | slice, ...] + +# NOTE: In reality `Arrayterator` does not actually inherit from `ndarray`, +# but its ``__getattr__` method does wrap around the former and thus has +# access to all its methods + +class Arrayterator(np.ndarray[_ShapeT_co, _DTypeT_co]): + var: np.ndarray[_ShapeT_co, _DTypeT_co] # type: ignore[assignment] + buf_size: Final[int | None] + start: Final[list[int]] + stop: Final[list[int]] + step: Final[list[int]] + + @property # type: ignore[misc] + def shape(self) -> _ShapeT_co: ... + @property + def flat(self: Arrayterator[Any, np.dtype[_ScalarT]]) -> Generator[_ScalarT]: ... # type: ignore[override] + + # + def __init__(self, /, var: np.ndarray[_ShapeT_co, _DTypeT_co], buf_size: int | None = None) -> None: ... + def __getitem__(self, index: _AnyIndex, /) -> Arrayterator[_AnyShape, _DTypeT_co]: ... # type: ignore[override] + def __iter__(self) -> Generator[np.ndarray[_AnyShape, _DTypeT_co]]: ... + + # + @overload # type: ignore[override] + def __array__(self, /, dtype: None = None, copy: bool | None = None) -> np.ndarray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __array__(self, /, dtype: _DTypeT, copy: bool | None = None) -> np.ndarray[_ShapeT_co, _DTypeT]: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/_datasource.py b/.venv/lib/python3.12/site-packages/numpy/lib/_datasource.py new file mode 100644 index 0000000..72398c5 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/lib/_datasource.py @@ -0,0 +1,700 @@ +"""A file interface for handling local and remote data files. + +The goal of datasource is to abstract some of the file system operations +when dealing with data files so the researcher doesn't have to know all the +low-level details. Through datasource, a researcher can obtain and use a +file with one function call, regardless of location of the file. + +DataSource is meant to augment standard python libraries, not replace them. +It should work seamlessly with standard file IO operations and the os +module. + +DataSource files can originate locally or remotely: + +- local files : '/home/guido/src/local/data.txt' +- URLs (http, ftp, ...) : 'http://www.scipy.org/not/real/data.txt' + +DataSource files can also be compressed or uncompressed. Currently only +gzip, bz2 and xz are supported. + +Example:: + + >>> # Create a DataSource, use os.curdir (default) for local storage. + >>> from numpy import DataSource + >>> ds = DataSource() + >>> + >>> # Open a remote file. + >>> # DataSource downloads the file, stores it locally in: + >>> # './www.google.com/index.html' + >>> # opens the file and returns a file object. + >>> fp = ds.open('http://www.google.com/') # doctest: +SKIP + >>> + >>> # Use the file as you normally would + >>> fp.read() # doctest: +SKIP + >>> fp.close() # doctest: +SKIP + +""" +import os + +from numpy._utils import set_module + +_open = open + + +def _check_mode(mode, encoding, newline): + """Check mode and that encoding and newline are compatible. + + Parameters + ---------- + mode : str + File open mode. + encoding : str + File encoding. + newline : str + Newline for text files. + + """ + if "t" in mode: + if "b" in mode: + raise ValueError(f"Invalid mode: {mode!r}") + else: + if encoding is not None: + raise ValueError("Argument 'encoding' not supported in binary mode") + if newline is not None: + raise ValueError("Argument 'newline' not supported in binary mode") + + +# Using a class instead of a module-level dictionary +# to reduce the initial 'import numpy' overhead by +# deferring the import of lzma, bz2 and gzip until needed + +# TODO: .zip support, .tar support? +class _FileOpeners: + """ + Container for different methods to open (un-)compressed files. + + `_FileOpeners` contains a dictionary that holds one method for each + supported file format. Attribute lookup is implemented in such a way + that an instance of `_FileOpeners` itself can be indexed with the keys + of that dictionary. Currently uncompressed files as well as files + compressed with ``gzip``, ``bz2`` or ``xz`` compression are supported. + + Notes + ----- + `_file_openers`, an instance of `_FileOpeners`, is made available for + use in the `_datasource` module. + + Examples + -------- + >>> import gzip + >>> np.lib._datasource._file_openers.keys() + [None, '.bz2', '.gz', '.xz', '.lzma'] + >>> np.lib._datasource._file_openers['.gz'] is gzip.open + True + + """ + + def __init__(self): + self._loaded = False + self._file_openers = {None: open} + + def _load(self): + if self._loaded: + return + + try: + import bz2 + self._file_openers[".bz2"] = bz2.open + except ImportError: + pass + + try: + import gzip + self._file_openers[".gz"] = gzip.open + except ImportError: + pass + + try: + import lzma + self._file_openers[".xz"] = lzma.open + self._file_openers[".lzma"] = lzma.open + except (ImportError, AttributeError): + # There are incompatible backports of lzma that do not have the + # lzma.open attribute, so catch that as well as ImportError. + pass + + self._loaded = True + + def keys(self): + """ + Return the keys of currently supported file openers. + + Parameters + ---------- + None + + Returns + ------- + keys : list + The keys are None for uncompressed files and the file extension + strings (i.e. ``'.gz'``, ``'.xz'``) for supported compression + methods. + + """ + self._load() + return list(self._file_openers.keys()) + + def __getitem__(self, key): + self._load() + return self._file_openers[key] + + +_file_openers = _FileOpeners() + +def open(path, mode='r', destpath=os.curdir, encoding=None, newline=None): + """ + Open `path` with `mode` and return the file object. + + If ``path`` is an URL, it will be downloaded, stored in the + `DataSource` `destpath` directory and opened from there. + + Parameters + ---------- + path : str or pathlib.Path + Local file path or URL to open. + mode : str, optional + Mode to open `path`. Mode 'r' for reading, 'w' for writing, 'a' to + append. Available modes depend on the type of object specified by + path. Default is 'r'. + destpath : str, optional + Path to the directory where the source file gets downloaded to for + use. If `destpath` is None, a temporary directory will be created. + The default path is the current directory. + encoding : {None, str}, optional + Open text file with given encoding. The default encoding will be + what `open` uses. + newline : {None, str}, optional + Newline to use when reading text file. + + Returns + ------- + out : file object + The opened file. + + Notes + ----- + This is a convenience function that instantiates a `DataSource` and + returns the file object from ``DataSource.open(path)``. + + """ + + ds = DataSource(destpath) + return ds.open(path, mode, encoding=encoding, newline=newline) + + +@set_module('numpy.lib.npyio') +class DataSource: + """ + DataSource(destpath='.') + + A generic data source file (file, http, ftp, ...). + + DataSources can be local files or remote files/URLs. The files may + also be compressed or uncompressed. DataSource hides some of the + low-level details of downloading the file, allowing you to simply pass + in a valid file path (or URL) and obtain a file object. + + Parameters + ---------- + destpath : str or None, optional + Path to the directory where the source file gets downloaded to for + use. If `destpath` is None, a temporary directory will be created. + The default path is the current directory. + + Notes + ----- + URLs require a scheme string (``http://``) to be used, without it they + will fail:: + + >>> repos = np.lib.npyio.DataSource() + >>> repos.exists('www.google.com/index.html') + False + >>> repos.exists('http://www.google.com/index.html') + True + + Temporary directories are deleted when the DataSource is deleted. + + Examples + -------- + :: + + >>> ds = np.lib.npyio.DataSource('/home/guido') + >>> urlname = 'http://www.google.com/' + >>> gfile = ds.open('http://www.google.com/') + >>> ds.abspath(urlname) + '/home/guido/www.google.com/index.html' + + >>> ds = np.lib.npyio.DataSource(None) # use with temporary file + >>> ds.open('/home/guido/foobar.txt') + + >>> ds.abspath('/home/guido/foobar.txt') + '/tmp/.../home/guido/foobar.txt' + + """ + + def __init__(self, destpath=os.curdir): + """Create a DataSource with a local path at destpath.""" + if destpath: + self._destpath = os.path.abspath(destpath) + self._istmpdest = False + else: + import tempfile # deferring import to improve startup time + self._destpath = tempfile.mkdtemp() + self._istmpdest = True + + def __del__(self): + # Remove temp directories + if hasattr(self, '_istmpdest') and self._istmpdest: + import shutil + + shutil.rmtree(self._destpath) + + def _iszip(self, filename): + """Test if the filename is a zip file by looking at the file extension. + + """ + fname, ext = os.path.splitext(filename) + return ext in _file_openers.keys() + + def _iswritemode(self, mode): + """Test if the given mode will open a file for writing.""" + + # Currently only used to test the bz2 files. + _writemodes = ("w", "+") + return any(c in _writemodes for c in mode) + + def _splitzipext(self, filename): + """Split zip extension from filename and return filename. + + Returns + ------- + base, zip_ext : {tuple} + + """ + + if self._iszip(filename): + return os.path.splitext(filename) + else: + return filename, None + + def _possible_names(self, filename): + """Return a tuple containing compressed filename variations.""" + names = [filename] + if not self._iszip(filename): + for zipext in _file_openers.keys(): + if zipext: + names.append(filename + zipext) + return names + + def _isurl(self, path): + """Test if path is a net location. Tests the scheme and netloc.""" + + # We do this here to reduce the 'import numpy' initial import time. + from urllib.parse import urlparse + + # BUG : URLs require a scheme string ('http://') to be used. + # www.google.com will fail. + # Should we prepend the scheme for those that don't have it and + # test that also? Similar to the way we append .gz and test for + # for compressed versions of files. + + scheme, netloc, upath, uparams, uquery, ufrag = urlparse(path) + return bool(scheme and netloc) + + def _cache(self, path): + """Cache the file specified by path. + + Creates a copy of the file in the datasource cache. + + """ + # We import these here because importing them is slow and + # a significant fraction of numpy's total import time. + import shutil + from urllib.request import urlopen + + upath = self.abspath(path) + + # ensure directory exists + if not os.path.exists(os.path.dirname(upath)): + os.makedirs(os.path.dirname(upath)) + + # TODO: Doesn't handle compressed files! + if self._isurl(path): + with urlopen(path) as openedurl: + with _open(upath, 'wb') as f: + shutil.copyfileobj(openedurl, f) + else: + shutil.copyfile(path, upath) + return upath + + def _findfile(self, path): + """Searches for ``path`` and returns full path if found. + + If path is an URL, _findfile will cache a local copy and return the + path to the cached file. If path is a local file, _findfile will + return a path to that local file. + + The search will include possible compressed versions of the file + and return the first occurrence found. + + """ + + # Build list of possible local file paths + if not self._isurl(path): + # Valid local paths + filelist = self._possible_names(path) + # Paths in self._destpath + filelist += self._possible_names(self.abspath(path)) + else: + # Cached URLs in self._destpath + filelist = self._possible_names(self.abspath(path)) + # Remote URLs + filelist = filelist + self._possible_names(path) + + for name in filelist: + if self.exists(name): + if self._isurl(name): + name = self._cache(name) + return name + return None + + def abspath(self, path): + """ + Return absolute path of file in the DataSource directory. + + If `path` is an URL, then `abspath` will return either the location + the file exists locally or the location it would exist when opened + using the `open` method. + + Parameters + ---------- + path : str or pathlib.Path + Can be a local file or a remote URL. + + Returns + ------- + out : str + Complete path, including the `DataSource` destination directory. + + Notes + ----- + The functionality is based on `os.path.abspath`. + + """ + # We do this here to reduce the 'import numpy' initial import time. + from urllib.parse import urlparse + + # TODO: This should be more robust. Handles case where path includes + # the destpath, but not other sub-paths. Failing case: + # path = /home/guido/datafile.txt + # destpath = /home/alex/ + # upath = self.abspath(path) + # upath == '/home/alex/home/guido/datafile.txt' + + # handle case where path includes self._destpath + splitpath = path.split(self._destpath, 2) + if len(splitpath) > 1: + path = splitpath[1] + scheme, netloc, upath, uparams, uquery, ufrag = urlparse(path) + netloc = self._sanitize_relative_path(netloc) + upath = self._sanitize_relative_path(upath) + return os.path.join(self._destpath, netloc, upath) + + def _sanitize_relative_path(self, path): + """Return a sanitised relative path for which + os.path.abspath(os.path.join(base, path)).startswith(base) + """ + last = None + path = os.path.normpath(path) + while path != last: + last = path + # Note: os.path.join treats '/' as os.sep on Windows + path = path.lstrip(os.sep).lstrip('/') + path = path.lstrip(os.pardir).removeprefix('..') + drive, path = os.path.splitdrive(path) # for Windows + return path + + def exists(self, path): + """ + Test if path exists. + + Test if `path` exists as (and in this order): + + - a local file. + - a remote URL that has been downloaded and stored locally in the + `DataSource` directory. + - a remote URL that has not been downloaded, but is valid and + accessible. + + Parameters + ---------- + path : str or pathlib.Path + Can be a local file or a remote URL. + + Returns + ------- + out : bool + True if `path` exists. + + Notes + ----- + When `path` is an URL, `exists` will return True if it's either + stored locally in the `DataSource` directory, or is a valid remote + URL. `DataSource` does not discriminate between the two, the file + is accessible if it exists in either location. + + """ + + # First test for local path + if os.path.exists(path): + return True + + # We import this here because importing urllib is slow and + # a significant fraction of numpy's total import time. + from urllib.error import URLError + from urllib.request import urlopen + + # Test cached url + upath = self.abspath(path) + if os.path.exists(upath): + return True + + # Test remote url + if self._isurl(path): + try: + netfile = urlopen(path) + netfile.close() + del netfile + return True + except URLError: + return False + return False + + def open(self, path, mode='r', encoding=None, newline=None): + """ + Open and return file-like object. + + If `path` is an URL, it will be downloaded, stored in the + `DataSource` directory and opened from there. + + Parameters + ---------- + path : str or pathlib.Path + Local file path or URL to open. + mode : {'r', 'w', 'a'}, optional + Mode to open `path`. Mode 'r' for reading, 'w' for writing, + 'a' to append. Available modes depend on the type of object + specified by `path`. Default is 'r'. + encoding : {None, str}, optional + Open text file with given encoding. The default encoding will be + what `open` uses. + newline : {None, str}, optional + Newline to use when reading text file. + + Returns + ------- + out : file object + File object. + + """ + + # TODO: There is no support for opening a file for writing which + # doesn't exist yet (creating a file). Should there be? + + # TODO: Add a ``subdir`` parameter for specifying the subdirectory + # used to store URLs in self._destpath. + + if self._isurl(path) and self._iswritemode(mode): + raise ValueError("URLs are not writeable") + + # NOTE: _findfile will fail on a new file opened for writing. + found = self._findfile(path) + if found: + _fname, ext = self._splitzipext(found) + if ext == 'bz2': + mode.replace("+", "") + return _file_openers[ext](found, mode=mode, + encoding=encoding, newline=newline) + else: + raise FileNotFoundError(f"{path} not found.") + + +class Repository (DataSource): + """ + Repository(baseurl, destpath='.') + + A data repository where multiple DataSource's share a base + URL/directory. + + `Repository` extends `DataSource` by prepending a base URL (or + directory) to all the files it handles. Use `Repository` when you will + be working with multiple files from one base URL. Initialize + `Repository` with the base URL, then refer to each file by its filename + only. + + Parameters + ---------- + baseurl : str + Path to the local directory or remote location that contains the + data files. + destpath : str or None, optional + Path to the directory where the source file gets downloaded to for + use. If `destpath` is None, a temporary directory will be created. + The default path is the current directory. + + Examples + -------- + To analyze all files in the repository, do something like this + (note: this is not self-contained code):: + + >>> repos = np.lib._datasource.Repository('/home/user/data/dir/') + >>> for filename in filelist: + ... fp = repos.open(filename) + ... fp.analyze() + ... fp.close() + + Similarly you could use a URL for a repository:: + + >>> repos = np.lib._datasource.Repository('http://www.xyz.edu/data') + + """ + + def __init__(self, baseurl, destpath=os.curdir): + """Create a Repository with a shared url or directory of baseurl.""" + DataSource.__init__(self, destpath=destpath) + self._baseurl = baseurl + + def __del__(self): + DataSource.__del__(self) + + def _fullpath(self, path): + """Return complete path for path. Prepends baseurl if necessary.""" + splitpath = path.split(self._baseurl, 2) + if len(splitpath) == 1: + result = os.path.join(self._baseurl, path) + else: + result = path # path contains baseurl already + return result + + def _findfile(self, path): + """Extend DataSource method to prepend baseurl to ``path``.""" + return DataSource._findfile(self, self._fullpath(path)) + + def abspath(self, path): + """ + Return absolute path of file in the Repository directory. + + If `path` is an URL, then `abspath` will return either the location + the file exists locally or the location it would exist when opened + using the `open` method. + + Parameters + ---------- + path : str or pathlib.Path + Can be a local file or a remote URL. This may, but does not + have to, include the `baseurl` with which the `Repository` was + initialized. + + Returns + ------- + out : str + Complete path, including the `DataSource` destination directory. + + """ + return DataSource.abspath(self, self._fullpath(path)) + + def exists(self, path): + """ + Test if path exists prepending Repository base URL to path. + + Test if `path` exists as (and in this order): + + - a local file. + - a remote URL that has been downloaded and stored locally in the + `DataSource` directory. + - a remote URL that has not been downloaded, but is valid and + accessible. + + Parameters + ---------- + path : str or pathlib.Path + Can be a local file or a remote URL. This may, but does not + have to, include the `baseurl` with which the `Repository` was + initialized. + + Returns + ------- + out : bool + True if `path` exists. + + Notes + ----- + When `path` is an URL, `exists` will return True if it's either + stored locally in the `DataSource` directory, or is a valid remote + URL. `DataSource` does not discriminate between the two, the file + is accessible if it exists in either location. + + """ + return DataSource.exists(self, self._fullpath(path)) + + def open(self, path, mode='r', encoding=None, newline=None): + """ + Open and return file-like object prepending Repository base URL. + + If `path` is an URL, it will be downloaded, stored in the + DataSource directory and opened from there. + + Parameters + ---------- + path : str or pathlib.Path + Local file path or URL to open. This may, but does not have to, + include the `baseurl` with which the `Repository` was + initialized. + mode : {'r', 'w', 'a'}, optional + Mode to open `path`. Mode 'r' for reading, 'w' for writing, + 'a' to append. Available modes depend on the type of object + specified by `path`. Default is 'r'. + encoding : {None, str}, optional + Open text file with given encoding. The default encoding will be + what `open` uses. + newline : {None, str}, optional + Newline to use when reading text file. + + Returns + ------- + out : file object + File object. + + """ + return DataSource.open(self, self._fullpath(path), mode, + encoding=encoding, newline=newline) + + def listdir(self): + """ + List files in the source Repository. + + Returns + ------- + files : list of str or pathlib.Path + List of file names (not containing a directory part). + + Notes + ----- + Does not currently work for remote repositories. + + """ + if self._isurl(self._baseurl): + raise NotImplementedError( + "Directory listing of URLs, not supported yet.") + else: + return os.listdir(self._baseurl) diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/_datasource.pyi b/.venv/lib/python3.12/site-packages/numpy/lib/_datasource.pyi new file mode 100644 index 0000000..9f91fdf --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/lib/_datasource.pyi @@ -0,0 +1,31 @@ +from pathlib import Path +from typing import IO, Any, TypeAlias + +from _typeshed import OpenBinaryMode, OpenTextMode + +_Mode: TypeAlias = OpenBinaryMode | OpenTextMode + +### + +# exported in numpy.lib.nppyio +class DataSource: + def __init__(self, /, destpath: Path | str | None = ...) -> None: ... + def __del__(self, /) -> None: ... + def abspath(self, /, path: str) -> str: ... + def exists(self, /, path: str) -> bool: ... + + # Whether the file-object is opened in string or bytes mode (by default) + # depends on the file-extension of `path` + def open(self, /, path: str, mode: _Mode = "r", encoding: str | None = None, newline: str | None = None) -> IO[Any]: ... + +class Repository(DataSource): + def __init__(self, /, baseurl: str, destpath: str | None = ...) -> None: ... + def listdir(self, /) -> list[str]: ... + +def open( + path: str, + mode: _Mode = "r", + destpath: str | None = ..., + encoding: str | None = None, + newline: str | None = None, +) -> IO[Any]: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/_format_impl.py b/.venv/lib/python3.12/site-packages/numpy/lib/_format_impl.py new file mode 100644 index 0000000..7378ba5 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/lib/_format_impl.py @@ -0,0 +1,1036 @@ +""" +Binary serialization + +NPY format +========== + +A simple format for saving numpy arrays to disk with the full +information about them. + +The ``.npy`` format is the standard binary file format in NumPy for +persisting a *single* arbitrary NumPy array on disk. The format stores all +of the shape and dtype information necessary to reconstruct the array +correctly even on another machine with a different architecture. +The format is designed to be as simple as possible while achieving +its limited goals. + +The ``.npz`` format is the standard format for persisting *multiple* NumPy +arrays on disk. A ``.npz`` file is a zip file containing multiple ``.npy`` +files, one for each array. + +Capabilities +------------ + +- Can represent all NumPy arrays including nested record arrays and + object arrays. + +- Represents the data in its native binary form. + +- Supports Fortran-contiguous arrays directly. + +- Stores all of the necessary information to reconstruct the array + including shape and dtype on a machine of a different + architecture. Both little-endian and big-endian arrays are + supported, and a file with little-endian numbers will yield + a little-endian array on any machine reading the file. The + types are described in terms of their actual sizes. For example, + if a machine with a 64-bit C "long int" writes out an array with + "long ints", a reading machine with 32-bit C "long ints" will yield + an array with 64-bit integers. + +- Is straightforward to reverse engineer. Datasets often live longer than + the programs that created them. A competent developer should be + able to create a solution in their preferred programming language to + read most ``.npy`` files that they have been given without much + documentation. + +- Allows memory-mapping of the data. See `open_memmap`. + +- Can be read from a filelike stream object instead of an actual file. + +- Stores object arrays, i.e. arrays containing elements that are arbitrary + Python objects. Files with object arrays are not to be mmapable, but + can be read and written to disk. + +Limitations +----------- + +- Arbitrary subclasses of numpy.ndarray are not completely preserved. + Subclasses will be accepted for writing, but only the array data will + be written out. A regular numpy.ndarray object will be created + upon reading the file. + +.. warning:: + + Due to limitations in the interpretation of structured dtypes, dtypes + with fields with empty names will have the names replaced by 'f0', 'f1', + etc. Such arrays will not round-trip through the format entirely + accurately. The data is intact; only the field names will differ. We are + working on a fix for this. This fix will not require a change in the + file format. The arrays with such structures can still be saved and + restored, and the correct dtype may be restored by using the + ``loadedarray.view(correct_dtype)`` method. + +File extensions +--------------- + +We recommend using the ``.npy`` and ``.npz`` extensions for files saved +in this format. This is by no means a requirement; applications may wish +to use these file formats but use an extension specific to the +application. In the absence of an obvious alternative, however, +we suggest using ``.npy`` and ``.npz``. + +Version numbering +----------------- + +The version numbering of these formats is independent of NumPy version +numbering. If the format is upgraded, the code in `numpy.io` will still +be able to read and write Version 1.0 files. + +Format Version 1.0 +------------------ + +The first 6 bytes are a magic string: exactly ``\\x93NUMPY``. + +The next 1 byte is an unsigned byte: the major version number of the file +format, e.g. ``\\x01``. + +The next 1 byte is an unsigned byte: the minor version number of the file +format, e.g. ``\\x00``. Note: the version of the file format is not tied +to the version of the numpy package. + +The next 2 bytes form a little-endian unsigned short int: the length of +the header data HEADER_LEN. + +The next HEADER_LEN bytes form the header data describing the array's +format. It is an ASCII string which contains a Python literal expression +of a dictionary. It is terminated by a newline (``\\n``) and padded with +spaces (``\\x20``) to make the total of +``len(magic string) + 2 + len(length) + HEADER_LEN`` be evenly divisible +by 64 for alignment purposes. + +The dictionary contains three keys: + + "descr" : dtype.descr + An object that can be passed as an argument to the `numpy.dtype` + constructor to create the array's dtype. + "fortran_order" : bool + Whether the array data is Fortran-contiguous or not. Since + Fortran-contiguous arrays are a common form of non-C-contiguity, + we allow them to be written directly to disk for efficiency. + "shape" : tuple of int + The shape of the array. + +For repeatability and readability, the dictionary keys are sorted in +alphabetic order. This is for convenience only. A writer SHOULD implement +this if possible. A reader MUST NOT depend on this. + +Following the header comes the array data. If the dtype contains Python +objects (i.e. ``dtype.hasobject is True``), then the data is a Python +pickle of the array. Otherwise the data is the contiguous (either C- +or Fortran-, depending on ``fortran_order``) bytes of the array. +Consumers can figure out the number of bytes by multiplying the number +of elements given by the shape (noting that ``shape=()`` means there is +1 element) by ``dtype.itemsize``. + +Format Version 2.0 +------------------ + +The version 1.0 format only allowed the array header to have a total size of +65535 bytes. This can be exceeded by structured arrays with a large number of +columns. The version 2.0 format extends the header size to 4 GiB. +`numpy.save` will automatically save in 2.0 format if the data requires it, +else it will always use the more compatible 1.0 format. + +The description of the fourth element of the header therefore has become: +"The next 4 bytes form a little-endian unsigned int: the length of the header +data HEADER_LEN." + +Format Version 3.0 +------------------ + +This version replaces the ASCII string (which in practice was latin1) with +a utf8-encoded string, so supports structured types with any unicode field +names. + +Notes +----- +The ``.npy`` format, including motivation for creating it and a comparison of +alternatives, is described in the +:doc:`"npy-format" NEP `, however details have +evolved with time and this document is more current. + +""" +import io +import os +import pickle +import warnings + +import numpy +from numpy._utils import set_module +from numpy.lib._utils_impl import drop_metadata + +__all__ = [] + +drop_metadata.__module__ = "numpy.lib.format" + +EXPECTED_KEYS = {'descr', 'fortran_order', 'shape'} +MAGIC_PREFIX = b'\x93NUMPY' +MAGIC_LEN = len(MAGIC_PREFIX) + 2 +ARRAY_ALIGN = 64 # plausible values are powers of 2 between 16 and 4096 +BUFFER_SIZE = 2**18 # size of buffer for reading npz files in bytes +# allow growth within the address space of a 64 bit machine along one axis +GROWTH_AXIS_MAX_DIGITS = 21 # = len(str(8*2**64-1)) hypothetical int1 dtype + +# difference between version 1.0 and 2.0 is a 4 byte (I) header length +# instead of 2 bytes (H) allowing storage of large structured arrays +_header_size_info = { + (1, 0): (' 255: + raise ValueError("major version must be 0 <= major < 256") + if minor < 0 or minor > 255: + raise ValueError("minor version must be 0 <= minor < 256") + return MAGIC_PREFIX + bytes([major, minor]) + + +@set_module("numpy.lib.format") +def read_magic(fp): + """ Read the magic string to get the version of the file format. + + Parameters + ---------- + fp : filelike object + + Returns + ------- + major : int + minor : int + """ + magic_str = _read_bytes(fp, MAGIC_LEN, "magic string") + if magic_str[:-2] != MAGIC_PREFIX: + msg = "the magic string is not correct; expected %r, got %r" + raise ValueError(msg % (MAGIC_PREFIX, magic_str[:-2])) + major, minor = magic_str[-2:] + return major, minor + + +@set_module("numpy.lib.format") +def dtype_to_descr(dtype): + """ + Get a serializable descriptor from the dtype. + + The .descr attribute of a dtype object cannot be round-tripped through + the dtype() constructor. Simple types, like dtype('float32'), have + a descr which looks like a record array with one field with '' as + a name. The dtype() constructor interprets this as a request to give + a default name. Instead, we construct descriptor that can be passed to + dtype(). + + Parameters + ---------- + dtype : dtype + The dtype of the array that will be written to disk. + + Returns + ------- + descr : object + An object that can be passed to `numpy.dtype()` in order to + replicate the input dtype. + + """ + # NOTE: that drop_metadata may not return the right dtype e.g. for user + # dtypes. In that case our code below would fail the same, though. + new_dtype = drop_metadata(dtype) + if new_dtype is not dtype: + warnings.warn("metadata on a dtype is not saved to an npy/npz. " + "Use another format (such as pickle) to store it.", + UserWarning, stacklevel=2) + dtype = new_dtype + + if dtype.names is not None: + # This is a record array. The .descr is fine. XXX: parts of the + # record array with an empty name, like padding bytes, still get + # fiddled with. This needs to be fixed in the C implementation of + # dtype(). + return dtype.descr + elif not type(dtype)._legacy: + # this must be a user-defined dtype since numpy does not yet expose any + # non-legacy dtypes in the public API + # + # non-legacy dtypes don't yet have __array_interface__ + # support. Instead, as a hack, we use pickle to save the array, and lie + # that the dtype is object. When the array is loaded, the descriptor is + # unpickled with the array and the object dtype in the header is + # discarded. + # + # a future NEP should define a way to serialize user-defined + # descriptors and ideally work out the possible security implications + warnings.warn("Custom dtypes are saved as python objects using the " + "pickle protocol. Loading this file requires " + "allow_pickle=True to be set.", + UserWarning, stacklevel=2) + return "|O" + else: + return dtype.str + + +@set_module("numpy.lib.format") +def descr_to_dtype(descr): + """ + Returns a dtype based off the given description. + + This is essentially the reverse of `~lib.format.dtype_to_descr`. It will + remove the valueless padding fields created by, i.e. simple fields like + dtype('float32'), and then convert the description to its corresponding + dtype. + + Parameters + ---------- + descr : object + The object retrieved by dtype.descr. Can be passed to + `numpy.dtype` in order to replicate the input dtype. + + Returns + ------- + dtype : dtype + The dtype constructed by the description. + + """ + if isinstance(descr, str): + # No padding removal needed + return numpy.dtype(descr) + elif isinstance(descr, tuple): + # subtype, will always have a shape descr[1] + dt = descr_to_dtype(descr[0]) + return numpy.dtype((dt, descr[1])) + + titles = [] + names = [] + formats = [] + offsets = [] + offset = 0 + for field in descr: + if len(field) == 2: + name, descr_str = field + dt = descr_to_dtype(descr_str) + else: + name, descr_str, shape = field + dt = numpy.dtype((descr_to_dtype(descr_str), shape)) + + # Ignore padding bytes, which will be void bytes with '' as name + # Once support for blank names is removed, only "if name == ''" needed) + is_pad = (name == '' and dt.type is numpy.void and dt.names is None) + if not is_pad: + title, name = name if isinstance(name, tuple) else (None, name) + titles.append(title) + names.append(name) + formats.append(dt) + offsets.append(offset) + offset += dt.itemsize + + return numpy.dtype({'names': names, 'formats': formats, 'titles': titles, + 'offsets': offsets, 'itemsize': offset}) + + +@set_module("numpy.lib.format") +def header_data_from_array_1_0(array): + """ Get the dictionary of header metadata from a numpy.ndarray. + + Parameters + ---------- + array : numpy.ndarray + + Returns + ------- + d : dict + This has the appropriate entries for writing its string representation + to the header of the file. + """ + d = {'shape': array.shape} + if array.flags.c_contiguous: + d['fortran_order'] = False + elif array.flags.f_contiguous: + d['fortran_order'] = True + else: + # Totally non-contiguous data. We will have to make it C-contiguous + # before writing. Note that we need to test for C_CONTIGUOUS first + # because a 1-D array is both C_CONTIGUOUS and F_CONTIGUOUS. + d['fortran_order'] = False + + d['descr'] = dtype_to_descr(array.dtype) + return d + + +def _wrap_header(header, version): + """ + Takes a stringified header, and attaches the prefix and padding to it + """ + import struct + assert version is not None + fmt, encoding = _header_size_info[version] + header = header.encode(encoding) + hlen = len(header) + 1 + padlen = ARRAY_ALIGN - ((MAGIC_LEN + struct.calcsize(fmt) + hlen) % ARRAY_ALIGN) + try: + header_prefix = magic(*version) + struct.pack(fmt, hlen + padlen) + except struct.error: + msg = f"Header length {hlen} too big for version={version}" + raise ValueError(msg) from None + + # Pad the header with spaces and a final newline such that the magic + # string, the header-length short and the header are aligned on a + # ARRAY_ALIGN byte boundary. This supports memory mapping of dtypes + # aligned up to ARRAY_ALIGN on systems like Linux where mmap() + # offset must be page-aligned (i.e. the beginning of the file). + return header_prefix + header + b' ' * padlen + b'\n' + + +def _wrap_header_guess_version(header): + """ + Like `_wrap_header`, but chooses an appropriate version given the contents + """ + try: + return _wrap_header(header, (1, 0)) + except ValueError: + pass + + try: + ret = _wrap_header(header, (2, 0)) + except UnicodeEncodeError: + pass + else: + warnings.warn("Stored array in format 2.0. It can only be" + "read by NumPy >= 1.9", UserWarning, stacklevel=2) + return ret + + header = _wrap_header(header, (3, 0)) + warnings.warn("Stored array in format 3.0. It can only be " + "read by NumPy >= 1.17", UserWarning, stacklevel=2) + return header + + +def _write_array_header(fp, d, version=None): + """ Write the header for an array and returns the version used + + Parameters + ---------- + fp : filelike object + d : dict + This has the appropriate entries for writing its string representation + to the header of the file. + version : tuple or None + None means use oldest that works. Providing an explicit version will + raise a ValueError if the format does not allow saving this data. + Default: None + """ + header = ["{"] + for key, value in sorted(d.items()): + # Need to use repr here, since we eval these when reading + header.append(f"'{key}': {repr(value)}, ") + header.append("}") + header = "".join(header) + + # Add some spare space so that the array header can be modified in-place + # when changing the array size, e.g. when growing it by appending data at + # the end. + shape = d['shape'] + header += " " * ((GROWTH_AXIS_MAX_DIGITS - len(repr( + shape[-1 if d['fortran_order'] else 0] + ))) if len(shape) > 0 else 0) + + if version is None: + header = _wrap_header_guess_version(header) + else: + header = _wrap_header(header, version) + fp.write(header) + + +@set_module("numpy.lib.format") +def write_array_header_1_0(fp, d): + """ Write the header for an array using the 1.0 format. + + Parameters + ---------- + fp : filelike object + d : dict + This has the appropriate entries for writing its string + representation to the header of the file. + """ + _write_array_header(fp, d, (1, 0)) + + +@set_module("numpy.lib.format") +def write_array_header_2_0(fp, d): + """ Write the header for an array using the 2.0 format. + The 2.0 format allows storing very large structured arrays. + + Parameters + ---------- + fp : filelike object + d : dict + This has the appropriate entries for writing its string + representation to the header of the file. + """ + _write_array_header(fp, d, (2, 0)) + + +@set_module("numpy.lib.format") +def read_array_header_1_0(fp, max_header_size=_MAX_HEADER_SIZE): + """ + Read an array header from a filelike object using the 1.0 file format + version. + + This will leave the file object located just after the header. + + Parameters + ---------- + fp : filelike object + A file object or something with a `.read()` method like a file. + + Returns + ------- + shape : tuple of int + The shape of the array. + fortran_order : bool + The array data will be written out directly if it is either + C-contiguous or Fortran-contiguous. Otherwise, it will be made + contiguous before writing it out. + dtype : dtype + The dtype of the file's data. + max_header_size : int, optional + Maximum allowed size of the header. Large headers may not be safe + to load securely and thus require explicitly passing a larger value. + See :py:func:`ast.literal_eval()` for details. + + Raises + ------ + ValueError + If the data is invalid. + + """ + return _read_array_header( + fp, version=(1, 0), max_header_size=max_header_size) + + +@set_module("numpy.lib.format") +def read_array_header_2_0(fp, max_header_size=_MAX_HEADER_SIZE): + """ + Read an array header from a filelike object using the 2.0 file format + version. + + This will leave the file object located just after the header. + + Parameters + ---------- + fp : filelike object + A file object or something with a `.read()` method like a file. + max_header_size : int, optional + Maximum allowed size of the header. Large headers may not be safe + to load securely and thus require explicitly passing a larger value. + See :py:func:`ast.literal_eval()` for details. + + Returns + ------- + shape : tuple of int + The shape of the array. + fortran_order : bool + The array data will be written out directly if it is either + C-contiguous or Fortran-contiguous. Otherwise, it will be made + contiguous before writing it out. + dtype : dtype + The dtype of the file's data. + + Raises + ------ + ValueError + If the data is invalid. + + """ + return _read_array_header( + fp, version=(2, 0), max_header_size=max_header_size) + + +def _filter_header(s): + """Clean up 'L' in npz header ints. + + Cleans up the 'L' in strings representing integers. Needed to allow npz + headers produced in Python2 to be read in Python3. + + Parameters + ---------- + s : string + Npy file header. + + Returns + ------- + header : str + Cleaned up header. + + """ + import tokenize + from io import StringIO + + tokens = [] + last_token_was_number = False + for token in tokenize.generate_tokens(StringIO(s).readline): + token_type = token[0] + token_string = token[1] + if (last_token_was_number and + token_type == tokenize.NAME and + token_string == "L"): + continue + else: + tokens.append(token) + last_token_was_number = (token_type == tokenize.NUMBER) + return tokenize.untokenize(tokens) + + +def _read_array_header(fp, version, max_header_size=_MAX_HEADER_SIZE): + """ + see read_array_header_1_0 + """ + # Read an unsigned, little-endian short int which has the length of the + # header. + import ast + import struct + hinfo = _header_size_info.get(version) + if hinfo is None: + raise ValueError(f"Invalid version {version!r}") + hlength_type, encoding = hinfo + + hlength_str = _read_bytes(fp, struct.calcsize(hlength_type), "array header length") + header_length = struct.unpack(hlength_type, hlength_str)[0] + header = _read_bytes(fp, header_length, "array header") + header = header.decode(encoding) + if len(header) > max_header_size: + raise ValueError( + f"Header info length ({len(header)}) is large and may not be safe " + "to load securely.\n" + "To allow loading, adjust `max_header_size` or fully trust " + "the `.npy` file using `allow_pickle=True`.\n" + "For safety against large resource use or crashes, sandboxing " + "may be necessary.") + + # The header is a pretty-printed string representation of a literal + # Python dictionary with trailing newlines padded to a ARRAY_ALIGN byte + # boundary. The keys are strings. + # "shape" : tuple of int + # "fortran_order" : bool + # "descr" : dtype.descr + # Versions (2, 0) and (1, 0) could have been created by a Python 2 + # implementation before header filtering was implemented. + # + # For performance reasons, we try without _filter_header first though + try: + d = ast.literal_eval(header) + except SyntaxError as e: + if version <= (2, 0): + header = _filter_header(header) + try: + d = ast.literal_eval(header) + except SyntaxError as e2: + msg = "Cannot parse header: {!r}" + raise ValueError(msg.format(header)) from e2 + else: + warnings.warn( + "Reading `.npy` or `.npz` file required additional " + "header parsing as it was created on Python 2. Save the " + "file again to speed up loading and avoid this warning.", + UserWarning, stacklevel=4) + else: + msg = "Cannot parse header: {!r}" + raise ValueError(msg.format(header)) from e + if not isinstance(d, dict): + msg = "Header is not a dictionary: {!r}" + raise ValueError(msg.format(d)) + + if EXPECTED_KEYS != d.keys(): + keys = sorted(d.keys()) + msg = "Header does not contain the correct keys: {!r}" + raise ValueError(msg.format(keys)) + + # Sanity-check the values. + if (not isinstance(d['shape'], tuple) or + not all(isinstance(x, int) for x in d['shape'])): + msg = "shape is not valid: {!r}" + raise ValueError(msg.format(d['shape'])) + if not isinstance(d['fortran_order'], bool): + msg = "fortran_order is not a valid bool: {!r}" + raise ValueError(msg.format(d['fortran_order'])) + try: + dtype = descr_to_dtype(d['descr']) + except TypeError as e: + msg = "descr is not a valid dtype descriptor: {!r}" + raise ValueError(msg.format(d['descr'])) from e + + return d['shape'], d['fortran_order'], dtype + + +@set_module("numpy.lib.format") +def write_array(fp, array, version=None, allow_pickle=True, pickle_kwargs=None): + """ + Write an array to an NPY file, including a header. + + If the array is neither C-contiguous nor Fortran-contiguous AND the + file_like object is not a real file object, this function will have to + copy data in memory. + + Parameters + ---------- + fp : file_like object + An open, writable file object, or similar object with a + ``.write()`` method. + array : ndarray + The array to write to disk. + version : (int, int) or None, optional + The version number of the format. None means use the oldest + supported version that is able to store the data. Default: None + allow_pickle : bool, optional + Whether to allow writing pickled data. Default: True + pickle_kwargs : dict, optional + Additional keyword arguments to pass to pickle.dump, excluding + 'protocol'. These are only useful when pickling objects in object + arrays to Python 2 compatible format. + + Raises + ------ + ValueError + If the array cannot be persisted. This includes the case of + allow_pickle=False and array being an object array. + Various other errors + If the array contains Python objects as part of its dtype, the + process of pickling them may raise various errors if the objects + are not picklable. + + """ + _check_version(version) + _write_array_header(fp, header_data_from_array_1_0(array), version) + + if array.itemsize == 0: + buffersize = 0 + else: + # Set buffer size to 16 MiB to hide the Python loop overhead. + buffersize = max(16 * 1024 ** 2 // array.itemsize, 1) + + dtype_class = type(array.dtype) + + if array.dtype.hasobject or not dtype_class._legacy: + # We contain Python objects so we cannot write out the data + # directly. Instead, we will pickle it out + if not allow_pickle: + if array.dtype.hasobject: + raise ValueError("Object arrays cannot be saved when " + "allow_pickle=False") + if not dtype_class._legacy: + raise ValueError("User-defined dtypes cannot be saved " + "when allow_pickle=False") + if pickle_kwargs is None: + pickle_kwargs = {} + pickle.dump(array, fp, protocol=4, **pickle_kwargs) + elif array.flags.f_contiguous and not array.flags.c_contiguous: + if isfileobj(fp): + array.T.tofile(fp) + else: + for chunk in numpy.nditer( + array, flags=['external_loop', 'buffered', 'zerosize_ok'], + buffersize=buffersize, order='F'): + fp.write(chunk.tobytes('C')) + elif isfileobj(fp): + array.tofile(fp) + else: + for chunk in numpy.nditer( + array, flags=['external_loop', 'buffered', 'zerosize_ok'], + buffersize=buffersize, order='C'): + fp.write(chunk.tobytes('C')) + + +@set_module("numpy.lib.format") +def read_array(fp, allow_pickle=False, pickle_kwargs=None, *, + max_header_size=_MAX_HEADER_SIZE): + """ + Read an array from an NPY file. + + Parameters + ---------- + fp : file_like object + If this is not a real file object, then this may take extra memory + and time. + allow_pickle : bool, optional + Whether to allow writing pickled data. Default: False + pickle_kwargs : dict + Additional keyword arguments to pass to pickle.load. These are only + useful when loading object arrays saved on Python 2. + max_header_size : int, optional + Maximum allowed size of the header. Large headers may not be safe + to load securely and thus require explicitly passing a larger value. + See :py:func:`ast.literal_eval()` for details. + This option is ignored when `allow_pickle` is passed. In that case + the file is by definition trusted and the limit is unnecessary. + + Returns + ------- + array : ndarray + The array from the data on disk. + + Raises + ------ + ValueError + If the data is invalid, or allow_pickle=False and the file contains + an object array. + + """ + if allow_pickle: + # Effectively ignore max_header_size, since `allow_pickle` indicates + # that the input is fully trusted. + max_header_size = 2**64 + + version = read_magic(fp) + _check_version(version) + shape, fortran_order, dtype = _read_array_header( + fp, version, max_header_size=max_header_size) + if len(shape) == 0: + count = 1 + else: + count = numpy.multiply.reduce(shape, dtype=numpy.int64) + + # Now read the actual data. + if dtype.hasobject: + # The array contained Python objects. We need to unpickle the data. + if not allow_pickle: + raise ValueError("Object arrays cannot be loaded when " + "allow_pickle=False") + if pickle_kwargs is None: + pickle_kwargs = {} + try: + array = pickle.load(fp, **pickle_kwargs) + except UnicodeError as err: + # Friendlier error message + raise UnicodeError("Unpickling a python object failed: %r\n" + "You may need to pass the encoding= option " + "to numpy.load" % (err,)) from err + else: + if isfileobj(fp): + # We can use the fast fromfile() function. + array = numpy.fromfile(fp, dtype=dtype, count=count) + else: + # This is not a real file. We have to read it the + # memory-intensive way. + # crc32 module fails on reads greater than 2 ** 32 bytes, + # breaking large reads from gzip streams. Chunk reads to + # BUFFER_SIZE bytes to avoid issue and reduce memory overhead + # of the read. In non-chunked case count < max_read_count, so + # only one read is performed. + + # Use np.ndarray instead of np.empty since the latter does + # not correctly instantiate zero-width string dtypes; see + # https://github.com/numpy/numpy/pull/6430 + array = numpy.ndarray(count, dtype=dtype) + + if dtype.itemsize > 0: + # If dtype.itemsize == 0 then there's nothing more to read + max_read_count = BUFFER_SIZE // min(BUFFER_SIZE, dtype.itemsize) + + for i in range(0, count, max_read_count): + read_count = min(max_read_count, count - i) + read_size = int(read_count * dtype.itemsize) + data = _read_bytes(fp, read_size, "array data") + array[i:i + read_count] = numpy.frombuffer(data, dtype=dtype, + count=read_count) + + if array.size != count: + raise ValueError( + "Failed to read all data for array. " + f"Expected {shape} = {count} elements, " + f"could only read {array.size} elements. " + "(file seems not fully written?)" + ) + + if fortran_order: + array.shape = shape[::-1] + array = array.transpose() + else: + array.shape = shape + + return array + + +@set_module("numpy.lib.format") +def open_memmap(filename, mode='r+', dtype=None, shape=None, + fortran_order=False, version=None, *, + max_header_size=_MAX_HEADER_SIZE): + """ + Open a .npy file as a memory-mapped array. + + This may be used to read an existing file or create a new one. + + Parameters + ---------- + filename : str or path-like + The name of the file on disk. This may *not* be a file-like + object. + mode : str, optional + The mode in which to open the file; the default is 'r+'. In + addition to the standard file modes, 'c' is also accepted to mean + "copy on write." See `memmap` for the available mode strings. + dtype : data-type, optional + The data type of the array if we are creating a new file in "write" + mode, if not, `dtype` is ignored. The default value is None, which + results in a data-type of `float64`. + shape : tuple of int + The shape of the array if we are creating a new file in "write" + mode, in which case this parameter is required. Otherwise, this + parameter is ignored and is thus optional. + fortran_order : bool, optional + Whether the array should be Fortran-contiguous (True) or + C-contiguous (False, the default) if we are creating a new file in + "write" mode. + version : tuple of int (major, minor) or None + If the mode is a "write" mode, then this is the version of the file + format used to create the file. None means use the oldest + supported version that is able to store the data. Default: None + max_header_size : int, optional + Maximum allowed size of the header. Large headers may not be safe + to load securely and thus require explicitly passing a larger value. + See :py:func:`ast.literal_eval()` for details. + + Returns + ------- + marray : memmap + The memory-mapped array. + + Raises + ------ + ValueError + If the data or the mode is invalid. + OSError + If the file is not found or cannot be opened correctly. + + See Also + -------- + numpy.memmap + + """ + if isfileobj(filename): + raise ValueError("Filename must be a string or a path-like object." + " Memmap cannot use existing file handles.") + + if 'w' in mode: + # We are creating the file, not reading it. + # Check if we ought to create the file. + _check_version(version) + # Ensure that the given dtype is an authentic dtype object rather + # than just something that can be interpreted as a dtype object. + dtype = numpy.dtype(dtype) + if dtype.hasobject: + msg = "Array can't be memory-mapped: Python objects in dtype." + raise ValueError(msg) + d = { + "descr": dtype_to_descr(dtype), + "fortran_order": fortran_order, + "shape": shape, + } + # If we got here, then it should be safe to create the file. + with open(os.fspath(filename), mode + 'b') as fp: + _write_array_header(fp, d, version) + offset = fp.tell() + else: + # Read the header of the file first. + with open(os.fspath(filename), 'rb') as fp: + version = read_magic(fp) + _check_version(version) + + shape, fortran_order, dtype = _read_array_header( + fp, version, max_header_size=max_header_size) + if dtype.hasobject: + msg = "Array can't be memory-mapped: Python objects in dtype." + raise ValueError(msg) + offset = fp.tell() + + if fortran_order: + order = 'F' + else: + order = 'C' + + # We need to change a write-only mode to a read-write mode since we've + # already written data to the file. + if mode == 'w+': + mode = 'r+' + + marray = numpy.memmap(filename, dtype=dtype, shape=shape, order=order, + mode=mode, offset=offset) + + return marray + + +def _read_bytes(fp, size, error_template="ran out of data"): + """ + Read from file-like object until size bytes are read. + Raises ValueError if not EOF is encountered before size bytes are read. + Non-blocking objects only supported if they derive from io objects. + + Required as e.g. ZipExtFile in python 2.6 can return less data than + requested. + """ + data = b"" + while True: + # io files (default in python3) return None or raise on + # would-block, python2 file will truncate, probably nothing can be + # done about that. note that regular files can't be non-blocking + try: + r = fp.read(size - len(data)) + data += r + if len(r) == 0 or len(data) == size: + break + except BlockingIOError: + pass + if len(data) != size: + msg = "EOF: reading %s, expected %d bytes got %d" + raise ValueError(msg % (error_template, size, len(data))) + else: + return data + + +@set_module("numpy.lib.format") +def isfileobj(f): + if not isinstance(f, (io.FileIO, io.BufferedReader, io.BufferedWriter)): + return False + try: + # BufferedReader/Writer may raise OSError when + # fetching `fileno()` (e.g. when wrapping BytesIO). + f.fileno() + return True + except OSError: + return False diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/_format_impl.pyi b/.venv/lib/python3.12/site-packages/numpy/lib/_format_impl.pyi new file mode 100644 index 0000000..f4898d9 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/lib/_format_impl.pyi @@ -0,0 +1,26 @@ +from typing import Final, Literal + +from numpy.lib._utils_impl import drop_metadata # noqa: F401 + +__all__: list[str] = [] + +EXPECTED_KEYS: Final[set[str]] +MAGIC_PREFIX: Final[bytes] +MAGIC_LEN: Literal[8] +ARRAY_ALIGN: Literal[64] +BUFFER_SIZE: Literal[262144] # 2**18 +GROWTH_AXIS_MAX_DIGITS: Literal[21] + +def magic(major, minor): ... +def read_magic(fp): ... +def dtype_to_descr(dtype): ... +def descr_to_dtype(descr): ... +def header_data_from_array_1_0(array): ... +def write_array_header_1_0(fp, d): ... +def write_array_header_2_0(fp, d): ... +def read_array_header_1_0(fp): ... +def read_array_header_2_0(fp): ... +def write_array(fp, array, version=..., allow_pickle=..., pickle_kwargs=...): ... +def read_array(fp, allow_pickle=..., pickle_kwargs=...): ... +def open_memmap(filename, mode=..., dtype=..., shape=..., fortran_order=..., version=...): ... +def isfileobj(f): ... diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/_function_base_impl.py b/.venv/lib/python3.12/site-packages/numpy/lib/_function_base_impl.py new file mode 100644 index 0000000..9ee5944 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/lib/_function_base_impl.py @@ -0,0 +1,5844 @@ +import builtins +import collections.abc +import functools +import re +import sys +import warnings + +import numpy as np +import numpy._core.numeric as _nx +from numpy._core import overrides, transpose +from numpy._core._multiarray_umath import _array_converter +from numpy._core.fromnumeric import any, mean, nonzero, partition, ravel, sum +from numpy._core.multiarray import _monotonicity, _place, bincount, normalize_axis_index +from numpy._core.multiarray import interp as compiled_interp +from numpy._core.multiarray import interp_complex as compiled_interp_complex +from numpy._core.numeric import ( + absolute, + arange, + array, + asanyarray, + asarray, + concatenate, + dot, + empty, + integer, + intp, + isscalar, + ndarray, + ones, + take, + where, + zeros_like, +) +from numpy._core.numerictypes import typecodes +from numpy._core.umath import ( + add, + arctan2, + cos, + exp, + frompyfunc, + less_equal, + minimum, + mod, + not_equal, + pi, + sin, + sqrt, + subtract, +) +from numpy._utils import set_module + +# needed in this module for compatibility +from numpy.lib._histograms_impl import histogram, histogramdd # noqa: F401 +from numpy.lib._twodim_base_impl import diag + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy') + + +__all__ = [ + 'select', 'piecewise', 'trim_zeros', 'copy', 'iterable', 'percentile', + 'diff', 'gradient', 'angle', 'unwrap', 'sort_complex', 'flip', + 'rot90', 'extract', 'place', 'vectorize', 'asarray_chkfinite', 'average', + 'bincount', 'digitize', 'cov', 'corrcoef', + 'median', 'sinc', 'hamming', 'hanning', 'bartlett', + 'blackman', 'kaiser', 'trapezoid', 'trapz', 'i0', + 'meshgrid', 'delete', 'insert', 'append', 'interp', + 'quantile' + ] + +# _QuantileMethods is a dictionary listing all the supported methods to +# compute quantile/percentile. +# +# Below virtual_index refers to the index of the element where the percentile +# would be found in the sorted sample. +# When the sample contains exactly the percentile wanted, the virtual_index is +# an integer to the index of this element. +# When the percentile wanted is in between two elements, the virtual_index +# is made of a integer part (a.k.a 'i' or 'left') and a fractional part +# (a.k.a 'g' or 'gamma') +# +# Each method in _QuantileMethods has two properties +# get_virtual_index : Callable +# The function used to compute the virtual_index. +# fix_gamma : Callable +# A function used for discrete methods to force the index to a specific value. +_QuantileMethods = { + # --- HYNDMAN and FAN METHODS + # Discrete methods + 'inverted_cdf': { + 'get_virtual_index': lambda n, quantiles: _inverted_cdf(n, quantiles), # noqa: PLW0108 + 'fix_gamma': None, # should never be called + }, + 'averaged_inverted_cdf': { + 'get_virtual_index': lambda n, quantiles: (n * quantiles) - 1, + 'fix_gamma': lambda gamma, _: _get_gamma_mask( + shape=gamma.shape, + default_value=1., + conditioned_value=0.5, + where=gamma == 0), + }, + 'closest_observation': { + 'get_virtual_index': lambda n, quantiles: _closest_observation(n, quantiles), # noqa: PLW0108 + 'fix_gamma': None, # should never be called + }, + # Continuous methods + 'interpolated_inverted_cdf': { + 'get_virtual_index': lambda n, quantiles: + _compute_virtual_index(n, quantiles, 0, 1), + 'fix_gamma': lambda gamma, _: gamma, + }, + 'hazen': { + 'get_virtual_index': lambda n, quantiles: + _compute_virtual_index(n, quantiles, 0.5, 0.5), + 'fix_gamma': lambda gamma, _: gamma, + }, + 'weibull': { + 'get_virtual_index': lambda n, quantiles: + _compute_virtual_index(n, quantiles, 0, 0), + 'fix_gamma': lambda gamma, _: gamma, + }, + # Default method. + # To avoid some rounding issues, `(n-1) * quantiles` is preferred to + # `_compute_virtual_index(n, quantiles, 1, 1)`. + # They are mathematically equivalent. + 'linear': { + 'get_virtual_index': lambda n, quantiles: (n - 1) * quantiles, + 'fix_gamma': lambda gamma, _: gamma, + }, + 'median_unbiased': { + 'get_virtual_index': lambda n, quantiles: + _compute_virtual_index(n, quantiles, 1 / 3.0, 1 / 3.0), + 'fix_gamma': lambda gamma, _: gamma, + }, + 'normal_unbiased': { + 'get_virtual_index': lambda n, quantiles: + _compute_virtual_index(n, quantiles, 3 / 8.0, 3 / 8.0), + 'fix_gamma': lambda gamma, _: gamma, + }, + # --- OTHER METHODS + 'lower': { + 'get_virtual_index': lambda n, quantiles: np.floor( + (n - 1) * quantiles).astype(np.intp), + 'fix_gamma': None, # should never be called, index dtype is int + }, + 'higher': { + 'get_virtual_index': lambda n, quantiles: np.ceil( + (n - 1) * quantiles).astype(np.intp), + 'fix_gamma': None, # should never be called, index dtype is int + }, + 'midpoint': { + 'get_virtual_index': lambda n, quantiles: 0.5 * ( + np.floor((n - 1) * quantiles) + + np.ceil((n - 1) * quantiles)), + 'fix_gamma': lambda gamma, index: _get_gamma_mask( + shape=gamma.shape, + default_value=0.5, + conditioned_value=0., + where=index % 1 == 0), + }, + 'nearest': { + 'get_virtual_index': lambda n, quantiles: np.around( + (n - 1) * quantiles).astype(np.intp), + 'fix_gamma': None, + # should never be called, index dtype is int + }} + + +def _rot90_dispatcher(m, k=None, axes=None): + return (m,) + + +@array_function_dispatch(_rot90_dispatcher) +def rot90(m, k=1, axes=(0, 1)): + """ + Rotate an array by 90 degrees in the plane specified by axes. + + Rotation direction is from the first towards the second axis. + This means for a 2D array with the default `k` and `axes`, the + rotation will be counterclockwise. + + Parameters + ---------- + m : array_like + Array of two or more dimensions. + k : integer + Number of times the array is rotated by 90 degrees. + axes : (2,) array_like + The array is rotated in the plane defined by the axes. + Axes must be different. + + Returns + ------- + y : ndarray + A rotated view of `m`. + + See Also + -------- + flip : Reverse the order of elements in an array along the given axis. + fliplr : Flip an array horizontally. + flipud : Flip an array vertically. + + Notes + ----- + ``rot90(m, k=1, axes=(1,0))`` is the reverse of + ``rot90(m, k=1, axes=(0,1))`` + + ``rot90(m, k=1, axes=(1,0))`` is equivalent to + ``rot90(m, k=-1, axes=(0,1))`` + + Examples + -------- + >>> import numpy as np + >>> m = np.array([[1,2],[3,4]], int) + >>> m + array([[1, 2], + [3, 4]]) + >>> np.rot90(m) + array([[2, 4], + [1, 3]]) + >>> np.rot90(m, 2) + array([[4, 3], + [2, 1]]) + >>> m = np.arange(8).reshape((2,2,2)) + >>> np.rot90(m, 1, (1,2)) + array([[[1, 3], + [0, 2]], + [[5, 7], + [4, 6]]]) + + """ + axes = tuple(axes) + if len(axes) != 2: + raise ValueError("len(axes) must be 2.") + + m = asanyarray(m) + + if axes[0] == axes[1] or absolute(axes[0] - axes[1]) == m.ndim: + raise ValueError("Axes must be different.") + + if (axes[0] >= m.ndim or axes[0] < -m.ndim + or axes[1] >= m.ndim or axes[1] < -m.ndim): + raise ValueError(f"Axes={axes} out of range for array of ndim={m.ndim}.") + + k %= 4 + + if k == 0: + return m[:] + if k == 2: + return flip(flip(m, axes[0]), axes[1]) + + axes_list = arange(0, m.ndim) + (axes_list[axes[0]], axes_list[axes[1]]) = (axes_list[axes[1]], + axes_list[axes[0]]) + + if k == 1: + return transpose(flip(m, axes[1]), axes_list) + else: + # k == 3 + return flip(transpose(m, axes_list), axes[1]) + + +def _flip_dispatcher(m, axis=None): + return (m,) + + +@array_function_dispatch(_flip_dispatcher) +def flip(m, axis=None): + """ + Reverse the order of elements in an array along the given axis. + + The shape of the array is preserved, but the elements are reordered. + + Parameters + ---------- + m : array_like + Input array. + axis : None or int or tuple of ints, optional + Axis or axes along which to flip over. The default, + axis=None, will flip over all of the axes of the input array. + If axis is negative it counts from the last to the first axis. + + If axis is a tuple of ints, flipping is performed on all of the axes + specified in the tuple. + + Returns + ------- + out : array_like + A view of `m` with the entries of axis reversed. Since a view is + returned, this operation is done in constant time. + + See Also + -------- + flipud : Flip an array vertically (axis=0). + fliplr : Flip an array horizontally (axis=1). + + Notes + ----- + flip(m, 0) is equivalent to flipud(m). + + flip(m, 1) is equivalent to fliplr(m). + + flip(m, n) corresponds to ``m[...,::-1,...]`` with ``::-1`` at position n. + + flip(m) corresponds to ``m[::-1,::-1,...,::-1]`` with ``::-1`` at all + positions. + + flip(m, (0, 1)) corresponds to ``m[::-1,::-1,...]`` with ``::-1`` at + position 0 and position 1. + + Examples + -------- + >>> import numpy as np + >>> A = np.arange(8).reshape((2,2,2)) + >>> A + array([[[0, 1], + [2, 3]], + [[4, 5], + [6, 7]]]) + >>> np.flip(A, 0) + array([[[4, 5], + [6, 7]], + [[0, 1], + [2, 3]]]) + >>> np.flip(A, 1) + array([[[2, 3], + [0, 1]], + [[6, 7], + [4, 5]]]) + >>> np.flip(A) + array([[[7, 6], + [5, 4]], + [[3, 2], + [1, 0]]]) + >>> np.flip(A, (0, 2)) + array([[[5, 4], + [7, 6]], + [[1, 0], + [3, 2]]]) + >>> rng = np.random.default_rng() + >>> A = rng.normal(size=(3,4,5)) + >>> np.all(np.flip(A,2) == A[:,:,::-1,...]) + True + """ + if not hasattr(m, 'ndim'): + m = asarray(m) + if axis is None: + indexer = (np.s_[::-1],) * m.ndim + else: + axis = _nx.normalize_axis_tuple(axis, m.ndim) + indexer = [np.s_[:]] * m.ndim + for ax in axis: + indexer[ax] = np.s_[::-1] + indexer = tuple(indexer) + return m[indexer] + + +@set_module('numpy') +def iterable(y): + """ + Check whether or not an object can be iterated over. + + Parameters + ---------- + y : object + Input object. + + Returns + ------- + b : bool + Return ``True`` if the object has an iterator method or is a + sequence and ``False`` otherwise. + + + Examples + -------- + >>> import numpy as np + >>> np.iterable([1, 2, 3]) + True + >>> np.iterable(2) + False + + Notes + ----- + In most cases, the results of ``np.iterable(obj)`` are consistent with + ``isinstance(obj, collections.abc.Iterable)``. One notable exception is + the treatment of 0-dimensional arrays:: + + >>> from collections.abc import Iterable + >>> a = np.array(1.0) # 0-dimensional numpy array + >>> isinstance(a, Iterable) + True + >>> np.iterable(a) + False + + """ + try: + iter(y) + except TypeError: + return False + return True + + +def _weights_are_valid(weights, a, axis): + """Validate weights array. + + We assume, weights is not None. + """ + wgt = np.asanyarray(weights) + + # Sanity checks + if a.shape != wgt.shape: + if axis is None: + raise TypeError( + "Axis must be specified when shapes of a and weights " + "differ.") + if wgt.shape != tuple(a.shape[ax] for ax in axis): + raise ValueError( + "Shape of weights must be consistent with " + "shape of a along specified axis.") + + # setup wgt to broadcast along axis + wgt = wgt.transpose(np.argsort(axis)) + wgt = wgt.reshape(tuple((s if ax in axis else 1) + for ax, s in enumerate(a.shape))) + return wgt + + +def _average_dispatcher(a, axis=None, weights=None, returned=None, *, + keepdims=None): + return (a, weights) + + +@array_function_dispatch(_average_dispatcher) +def average(a, axis=None, weights=None, returned=False, *, + keepdims=np._NoValue): + """ + Compute the weighted average along the specified axis. + + Parameters + ---------- + a : array_like + Array containing data to be averaged. If `a` is not an array, a + conversion is attempted. + axis : None or int or tuple of ints, optional + Axis or axes along which to average `a`. The default, + `axis=None`, will average over all of the elements of the input array. + If axis is negative it counts from the last to the first axis. + If axis is a tuple of ints, averaging is performed on all of the axes + specified in the tuple instead of a single axis or all the axes as + before. + weights : array_like, optional + An array of weights associated with the values in `a`. Each value in + `a` contributes to the average according to its associated weight. + The array of weights must be the same shape as `a` if no axis is + specified, otherwise the weights must have dimensions and shape + consistent with `a` along the specified axis. + If `weights=None`, then all data in `a` are assumed to have a + weight equal to one. + The calculation is:: + + avg = sum(a * weights) / sum(weights) + + where the sum is over all included elements. + The only constraint on the values of `weights` is that `sum(weights)` + must not be 0. + returned : bool, optional + Default is `False`. If `True`, the tuple (`average`, `sum_of_weights`) + is returned, otherwise only the average is returned. + If `weights=None`, `sum_of_weights` is equivalent to the number of + elements over which the average is taken. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `a`. + *Note:* `keepdims` will not work with instances of `numpy.matrix` + or other classes whose methods do not support `keepdims`. + + .. versionadded:: 1.23.0 + + Returns + ------- + retval, [sum_of_weights] : array_type or double + Return the average along the specified axis. When `returned` is `True`, + return a tuple with the average as the first element and the sum + of the weights as the second element. `sum_of_weights` is of the + same type as `retval`. The result dtype follows a general pattern. + If `weights` is None, the result dtype will be that of `a` , or ``float64`` + if `a` is integral. Otherwise, if `weights` is not None and `a` is non- + integral, the result type will be the type of lowest precision capable of + representing values of both `a` and `weights`. If `a` happens to be + integral, the previous rules still applies but the result dtype will + at least be ``float64``. + + Raises + ------ + ZeroDivisionError + When all weights along axis are zero. See `numpy.ma.average` for a + version robust to this type of error. + TypeError + When `weights` does not have the same shape as `a`, and `axis=None`. + ValueError + When `weights` does not have dimensions and shape consistent with `a` + along specified `axis`. + + See Also + -------- + mean + + ma.average : average for masked arrays -- useful if your data contains + "missing" values + numpy.result_type : Returns the type that results from applying the + numpy type promotion rules to the arguments. + + Examples + -------- + >>> import numpy as np + >>> data = np.arange(1, 5) + >>> data + array([1, 2, 3, 4]) + >>> np.average(data) + 2.5 + >>> np.average(np.arange(1, 11), weights=np.arange(10, 0, -1)) + 4.0 + + >>> data = np.arange(6).reshape((3, 2)) + >>> data + array([[0, 1], + [2, 3], + [4, 5]]) + >>> np.average(data, axis=1, weights=[1./4, 3./4]) + array([0.75, 2.75, 4.75]) + >>> np.average(data, weights=[1./4, 3./4]) + Traceback (most recent call last): + ... + TypeError: Axis must be specified when shapes of a and weights differ. + + With ``keepdims=True``, the following result has shape (3, 1). + + >>> np.average(data, axis=1, keepdims=True) + array([[0.5], + [2.5], + [4.5]]) + + >>> data = np.arange(8).reshape((2, 2, 2)) + >>> data + array([[[0, 1], + [2, 3]], + [[4, 5], + [6, 7]]]) + >>> np.average(data, axis=(0, 1), weights=[[1./4, 3./4], [1., 1./2]]) + array([3.4, 4.4]) + >>> np.average(data, axis=0, weights=[[1./4, 3./4], [1., 1./2]]) + Traceback (most recent call last): + ... + ValueError: Shape of weights must be consistent + with shape of a along specified axis. + """ + a = np.asanyarray(a) + + if axis is not None: + axis = _nx.normalize_axis_tuple(axis, a.ndim, argname="axis") + + if keepdims is np._NoValue: + # Don't pass on the keepdims argument if one wasn't given. + keepdims_kw = {} + else: + keepdims_kw = {'keepdims': keepdims} + + if weights is None: + avg = a.mean(axis, **keepdims_kw) + avg_as_array = np.asanyarray(avg) + scl = avg_as_array.dtype.type(a.size / avg_as_array.size) + else: + wgt = _weights_are_valid(weights=weights, a=a, axis=axis) + + if issubclass(a.dtype.type, (np.integer, np.bool)): + result_dtype = np.result_type(a.dtype, wgt.dtype, 'f8') + else: + result_dtype = np.result_type(a.dtype, wgt.dtype) + + scl = wgt.sum(axis=axis, dtype=result_dtype, **keepdims_kw) + if np.any(scl == 0.0): + raise ZeroDivisionError( + "Weights sum to zero, can't be normalized") + + avg = avg_as_array = np.multiply(a, wgt, + dtype=result_dtype).sum(axis, **keepdims_kw) / scl + + if returned: + if scl.shape != avg_as_array.shape: + scl = np.broadcast_to(scl, avg_as_array.shape).copy() + return avg, scl + else: + return avg + + +@set_module('numpy') +def asarray_chkfinite(a, dtype=None, order=None): + """Convert the input to an array, checking for NaNs or Infs. + + Parameters + ---------- + a : array_like + Input data, in any form that can be converted to an array. This + includes lists, lists of tuples, tuples, tuples of tuples, tuples + of lists and ndarrays. Success requires no NaNs or Infs. + dtype : data-type, optional + By default, the data-type is inferred from the input data. + order : {'C', 'F', 'A', 'K'}, optional + Memory layout. 'A' and 'K' depend on the order of input array a. + 'C' row-major (C-style), + 'F' column-major (Fortran-style) memory representation. + 'A' (any) means 'F' if `a` is Fortran contiguous, 'C' otherwise + 'K' (keep) preserve input order + Defaults to 'C'. + + Returns + ------- + out : ndarray + Array interpretation of `a`. No copy is performed if the input + is already an ndarray. If `a` is a subclass of ndarray, a base + class ndarray is returned. + + Raises + ------ + ValueError + Raises ValueError if `a` contains NaN (Not a Number) or Inf (Infinity). + + See Also + -------- + asarray : Create and array. + asanyarray : Similar function which passes through subclasses. + ascontiguousarray : Convert input to a contiguous array. + asfortranarray : Convert input to an ndarray with column-major + memory order. + fromiter : Create an array from an iterator. + fromfunction : Construct an array by executing a function on grid + positions. + + Examples + -------- + >>> import numpy as np + + Convert a list into an array. If all elements are finite, then + ``asarray_chkfinite`` is identical to ``asarray``. + + >>> a = [1, 2] + >>> np.asarray_chkfinite(a, dtype=float) + array([1., 2.]) + + Raises ValueError if array_like contains Nans or Infs. + + >>> a = [1, 2, np.inf] + >>> try: + ... np.asarray_chkfinite(a) + ... except ValueError: + ... print('ValueError') + ... + ValueError + + """ + a = asarray(a, dtype=dtype, order=order) + if a.dtype.char in typecodes['AllFloat'] and not np.isfinite(a).all(): + raise ValueError( + "array must not contain infs or NaNs") + return a + + +def _piecewise_dispatcher(x, condlist, funclist, *args, **kw): + yield x + # support the undocumented behavior of allowing scalars + if np.iterable(condlist): + yield from condlist + + +@array_function_dispatch(_piecewise_dispatcher) +def piecewise(x, condlist, funclist, *args, **kw): + """ + Evaluate a piecewise-defined function. + + Given a set of conditions and corresponding functions, evaluate each + function on the input data wherever its condition is true. + + Parameters + ---------- + x : ndarray or scalar + The input domain. + condlist : list of bool arrays or bool scalars + Each boolean array corresponds to a function in `funclist`. Wherever + `condlist[i]` is True, `funclist[i](x)` is used as the output value. + + Each boolean array in `condlist` selects a piece of `x`, + and should therefore be of the same shape as `x`. + + The length of `condlist` must correspond to that of `funclist`. + If one extra function is given, i.e. if + ``len(funclist) == len(condlist) + 1``, then that extra function + is the default value, used wherever all conditions are false. + funclist : list of callables, f(x,*args,**kw), or scalars + Each function is evaluated over `x` wherever its corresponding + condition is True. It should take a 1d array as input and give an 1d + array or a scalar value as output. If, instead of a callable, + a scalar is provided then a constant function (``lambda x: scalar``) is + assumed. + args : tuple, optional + Any further arguments given to `piecewise` are passed to the functions + upon execution, i.e., if called ``piecewise(..., ..., 1, 'a')``, then + each function is called as ``f(x, 1, 'a')``. + kw : dict, optional + Keyword arguments used in calling `piecewise` are passed to the + functions upon execution, i.e., if called + ``piecewise(..., ..., alpha=1)``, then each function is called as + ``f(x, alpha=1)``. + + Returns + ------- + out : ndarray + The output is the same shape and type as x and is found by + calling the functions in `funclist` on the appropriate portions of `x`, + as defined by the boolean arrays in `condlist`. Portions not covered + by any condition have a default value of 0. + + + See Also + -------- + choose, select, where + + Notes + ----- + This is similar to choose or select, except that functions are + evaluated on elements of `x` that satisfy the corresponding condition from + `condlist`. + + The result is:: + + |-- + |funclist[0](x[condlist[0]]) + out = |funclist[1](x[condlist[1]]) + |... + |funclist[n2](x[condlist[n2]]) + |-- + + Examples + -------- + >>> import numpy as np + + Define the signum function, which is -1 for ``x < 0`` and +1 for ``x >= 0``. + + >>> x = np.linspace(-2.5, 2.5, 6) + >>> np.piecewise(x, [x < 0, x >= 0], [-1, 1]) + array([-1., -1., -1., 1., 1., 1.]) + + Define the absolute value, which is ``-x`` for ``x <0`` and ``x`` for + ``x >= 0``. + + >>> np.piecewise(x, [x < 0, x >= 0], [lambda x: -x, lambda x: x]) + array([2.5, 1.5, 0.5, 0.5, 1.5, 2.5]) + + Apply the same function to a scalar value. + + >>> y = -2 + >>> np.piecewise(y, [y < 0, y >= 0], [lambda x: -x, lambda x: x]) + array(2) + + """ + x = asanyarray(x) + n2 = len(funclist) + + # undocumented: single condition is promoted to a list of one condition + if isscalar(condlist) or ( + not isinstance(condlist[0], (list, ndarray)) and x.ndim != 0): + condlist = [condlist] + + condlist = asarray(condlist, dtype=bool) + n = len(condlist) + + if n == n2 - 1: # compute the "otherwise" condition. + condelse = ~np.any(condlist, axis=0, keepdims=True) + condlist = np.concatenate([condlist, condelse], axis=0) + n += 1 + elif n != n2: + raise ValueError( + f"with {n} condition(s), either {n} or {n + 1} functions are expected" + ) + + y = zeros_like(x) + for cond, func in zip(condlist, funclist): + if not isinstance(func, collections.abc.Callable): + y[cond] = func + else: + vals = x[cond] + if vals.size > 0: + y[cond] = func(vals, *args, **kw) + + return y + + +def _select_dispatcher(condlist, choicelist, default=None): + yield from condlist + yield from choicelist + + +@array_function_dispatch(_select_dispatcher) +def select(condlist, choicelist, default=0): + """ + Return an array drawn from elements in choicelist, depending on conditions. + + Parameters + ---------- + condlist : list of bool ndarrays + The list of conditions which determine from which array in `choicelist` + the output elements are taken. When multiple conditions are satisfied, + the first one encountered in `condlist` is used. + choicelist : list of ndarrays + The list of arrays from which the output elements are taken. It has + to be of the same length as `condlist`. + default : scalar, optional + The element inserted in `output` when all conditions evaluate to False. + + Returns + ------- + output : ndarray + The output at position m is the m-th element of the array in + `choicelist` where the m-th element of the corresponding array in + `condlist` is True. + + See Also + -------- + where : Return elements from one of two arrays depending on condition. + take, choose, compress, diag, diagonal + + Examples + -------- + >>> import numpy as np + + Beginning with an array of integers from 0 to 5 (inclusive), + elements less than ``3`` are negated, elements greater than ``3`` + are squared, and elements not meeting either of these conditions + (exactly ``3``) are replaced with a `default` value of ``42``. + + >>> x = np.arange(6) + >>> condlist = [x<3, x>3] + >>> choicelist = [-x, x**2] + >>> np.select(condlist, choicelist, 42) + array([ 0, -1, -2, 42, 16, 25]) + + When multiple conditions are satisfied, the first one encountered in + `condlist` is used. + + >>> condlist = [x<=4, x>3] + >>> choicelist = [x, x**2] + >>> np.select(condlist, choicelist, 55) + array([ 0, 1, 2, 3, 4, 25]) + + """ + # Check the size of condlist and choicelist are the same, or abort. + if len(condlist) != len(choicelist): + raise ValueError( + 'list of cases must be same length as list of conditions') + + # Now that the dtype is known, handle the deprecated select([], []) case + if len(condlist) == 0: + raise ValueError("select with an empty condition list is not possible") + + # TODO: This preserves the Python int, float, complex manually to get the + # right `result_type` with NEP 50. Most likely we will grow a better + # way to spell this (and this can be replaced). + choicelist = [ + choice if type(choice) in (int, float, complex) else np.asarray(choice) + for choice in choicelist] + choicelist.append(default if type(default) in (int, float, complex) + else np.asarray(default)) + + try: + dtype = np.result_type(*choicelist) + except TypeError as e: + msg = f'Choicelist and default value do not have a common dtype: {e}' + raise TypeError(msg) from None + + # Convert conditions to arrays and broadcast conditions and choices + # as the shape is needed for the result. Doing it separately optimizes + # for example when all choices are scalars. + condlist = np.broadcast_arrays(*condlist) + choicelist = np.broadcast_arrays(*choicelist) + + # If cond array is not an ndarray in boolean format or scalar bool, abort. + for i, cond in enumerate(condlist): + if cond.dtype.type is not np.bool: + raise TypeError( + f'invalid entry {i} in condlist: should be boolean ndarray') + + if choicelist[0].ndim == 0: + # This may be common, so avoid the call. + result_shape = condlist[0].shape + else: + result_shape = np.broadcast_arrays(condlist[0], choicelist[0])[0].shape + + result = np.full(result_shape, choicelist[-1], dtype) + + # Use np.copyto to burn each choicelist array onto result, using the + # corresponding condlist as a boolean mask. This is done in reverse + # order since the first choice should take precedence. + choicelist = choicelist[-2::-1] + condlist = condlist[::-1] + for choice, cond in zip(choicelist, condlist): + np.copyto(result, choice, where=cond) + + return result + + +def _copy_dispatcher(a, order=None, subok=None): + return (a,) + + +@array_function_dispatch(_copy_dispatcher) +def copy(a, order='K', subok=False): + """ + Return an array copy of the given object. + + Parameters + ---------- + a : array_like + Input data. + order : {'C', 'F', 'A', 'K'}, optional + Controls the memory layout of the copy. 'C' means C-order, + 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous, + 'C' otherwise. 'K' means match the layout of `a` as closely + as possible. (Note that this function and :meth:`ndarray.copy` are very + similar, but have different default values for their order= + arguments.) + subok : bool, optional + If True, then sub-classes will be passed-through, otherwise the + returned array will be forced to be a base-class array (defaults to False). + + Returns + ------- + arr : ndarray + Array interpretation of `a`. + + See Also + -------- + ndarray.copy : Preferred method for creating an array copy + + Notes + ----- + This is equivalent to: + + >>> np.array(a, copy=True) #doctest: +SKIP + + The copy made of the data is shallow, i.e., for arrays with object dtype, + the new array will point to the same objects. + See Examples from `ndarray.copy`. + + Examples + -------- + >>> import numpy as np + + Create an array x, with a reference y and a copy z: + + >>> x = np.array([1, 2, 3]) + >>> y = x + >>> z = np.copy(x) + + Note that, when we modify x, y changes, but not z: + + >>> x[0] = 10 + >>> x[0] == y[0] + True + >>> x[0] == z[0] + False + + Note that, np.copy clears previously set WRITEABLE=False flag. + + >>> a = np.array([1, 2, 3]) + >>> a.flags["WRITEABLE"] = False + >>> b = np.copy(a) + >>> b.flags["WRITEABLE"] + True + >>> b[0] = 3 + >>> b + array([3, 2, 3]) + """ + return array(a, order=order, subok=subok, copy=True) + +# Basic operations + + +def _gradient_dispatcher(f, *varargs, axis=None, edge_order=None): + yield f + yield from varargs + + +@array_function_dispatch(_gradient_dispatcher) +def gradient(f, *varargs, axis=None, edge_order=1): + """ + Return the gradient of an N-dimensional array. + + The gradient is computed using second order accurate central differences + in the interior points and either first or second order accurate one-sides + (forward or backwards) differences at the boundaries. + The returned gradient hence has the same shape as the input array. + + Parameters + ---------- + f : array_like + An N-dimensional array containing samples of a scalar function. + varargs : list of scalar or array, optional + Spacing between f values. Default unitary spacing for all dimensions. + Spacing can be specified using: + + 1. single scalar to specify a sample distance for all dimensions. + 2. N scalars to specify a constant sample distance for each dimension. + i.e. `dx`, `dy`, `dz`, ... + 3. N arrays to specify the coordinates of the values along each + dimension of F. The length of the array must match the size of + the corresponding dimension + 4. Any combination of N scalars/arrays with the meaning of 2. and 3. + + If `axis` is given, the number of varargs must equal the number of axes + specified in the axis parameter. + Default: 1. (see Examples below). + + edge_order : {1, 2}, optional + Gradient is calculated using N-th order accurate differences + at the boundaries. Default: 1. + axis : None or int or tuple of ints, optional + Gradient is calculated only along the given axis or axes + The default (axis = None) is to calculate the gradient for all the axes + of the input array. axis may be negative, in which case it counts from + the last to the first axis. + + Returns + ------- + gradient : ndarray or tuple of ndarray + A tuple of ndarrays (or a single ndarray if there is only one + dimension) corresponding to the derivatives of f with respect + to each dimension. Each derivative has the same shape as f. + + Examples + -------- + >>> import numpy as np + >>> f = np.array([1, 2, 4, 7, 11, 16]) + >>> np.gradient(f) + array([1. , 1.5, 2.5, 3.5, 4.5, 5. ]) + >>> np.gradient(f, 2) + array([0.5 , 0.75, 1.25, 1.75, 2.25, 2.5 ]) + + Spacing can be also specified with an array that represents the coordinates + of the values F along the dimensions. + For instance a uniform spacing: + + >>> x = np.arange(f.size) + >>> np.gradient(f, x) + array([1. , 1.5, 2.5, 3.5, 4.5, 5. ]) + + Or a non uniform one: + + >>> x = np.array([0., 1., 1.5, 3.5, 4., 6.]) + >>> np.gradient(f, x) + array([1. , 3. , 3.5, 6.7, 6.9, 2.5]) + + For two dimensional arrays, the return will be two arrays ordered by + axis. In this example the first array stands for the gradient in + rows and the second one in columns direction: + + >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]])) + (array([[ 2., 2., -1.], + [ 2., 2., -1.]]), + array([[1. , 2.5, 4. ], + [1. , 1. , 1. ]])) + + In this example the spacing is also specified: + uniform for axis=0 and non uniform for axis=1 + + >>> dx = 2. + >>> y = [1., 1.5, 3.5] + >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]]), dx, y) + (array([[ 1. , 1. , -0.5], + [ 1. , 1. , -0.5]]), + array([[2. , 2. , 2. ], + [2. , 1.7, 0.5]])) + + It is possible to specify how boundaries are treated using `edge_order` + + >>> x = np.array([0, 1, 2, 3, 4]) + >>> f = x**2 + >>> np.gradient(f, edge_order=1) + array([1., 2., 4., 6., 7.]) + >>> np.gradient(f, edge_order=2) + array([0., 2., 4., 6., 8.]) + + The `axis` keyword can be used to specify a subset of axes of which the + gradient is calculated + + >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]]), axis=0) + array([[ 2., 2., -1.], + [ 2., 2., -1.]]) + + The `varargs` argument defines the spacing between sample points in the + input array. It can take two forms: + + 1. An array, specifying coordinates, which may be unevenly spaced: + + >>> x = np.array([0., 2., 3., 6., 8.]) + >>> y = x ** 2 + >>> np.gradient(y, x, edge_order=2) + array([ 0., 4., 6., 12., 16.]) + + 2. A scalar, representing the fixed sample distance: + + >>> dx = 2 + >>> x = np.array([0., 2., 4., 6., 8.]) + >>> y = x ** 2 + >>> np.gradient(y, dx, edge_order=2) + array([ 0., 4., 8., 12., 16.]) + + It's possible to provide different data for spacing along each dimension. + The number of arguments must match the number of dimensions in the input + data. + + >>> dx = 2 + >>> dy = 3 + >>> x = np.arange(0, 6, dx) + >>> y = np.arange(0, 9, dy) + >>> xs, ys = np.meshgrid(x, y) + >>> zs = xs + 2 * ys + >>> np.gradient(zs, dy, dx) # Passing two scalars + (array([[2., 2., 2.], + [2., 2., 2.], + [2., 2., 2.]]), + array([[1., 1., 1.], + [1., 1., 1.], + [1., 1., 1.]])) + + Mixing scalars and arrays is also allowed: + + >>> np.gradient(zs, y, dx) # Passing one array and one scalar + (array([[2., 2., 2.], + [2., 2., 2.], + [2., 2., 2.]]), + array([[1., 1., 1.], + [1., 1., 1.], + [1., 1., 1.]])) + + Notes + ----- + Assuming that :math:`f\\in C^{3}` (i.e., :math:`f` has at least 3 continuous + derivatives) and let :math:`h_{*}` be a non-homogeneous stepsize, we + minimize the "consistency error" :math:`\\eta_{i}` between the true gradient + and its estimate from a linear combination of the neighboring grid-points: + + .. math:: + + \\eta_{i} = f_{i}^{\\left(1\\right)} - + \\left[ \\alpha f\\left(x_{i}\\right) + + \\beta f\\left(x_{i} + h_{d}\\right) + + \\gamma f\\left(x_{i}-h_{s}\\right) + \\right] + + By substituting :math:`f(x_{i} + h_{d})` and :math:`f(x_{i} - h_{s})` + with their Taylor series expansion, this translates into solving + the following the linear system: + + .. math:: + + \\left\\{ + \\begin{array}{r} + \\alpha+\\beta+\\gamma=0 \\\\ + \\beta h_{d}-\\gamma h_{s}=1 \\\\ + \\beta h_{d}^{2}+\\gamma h_{s}^{2}=0 + \\end{array} + \\right. + + The resulting approximation of :math:`f_{i}^{(1)}` is the following: + + .. math:: + + \\hat f_{i}^{(1)} = + \\frac{ + h_{s}^{2}f\\left(x_{i} + h_{d}\\right) + + \\left(h_{d}^{2} - h_{s}^{2}\\right)f\\left(x_{i}\\right) + - h_{d}^{2}f\\left(x_{i}-h_{s}\\right)} + { h_{s}h_{d}\\left(h_{d} + h_{s}\\right)} + + \\mathcal{O}\\left(\\frac{h_{d}h_{s}^{2} + + h_{s}h_{d}^{2}}{h_{d} + + h_{s}}\\right) + + It is worth noting that if :math:`h_{s}=h_{d}` + (i.e., data are evenly spaced) + we find the standard second order approximation: + + .. math:: + + \\hat f_{i}^{(1)}= + \\frac{f\\left(x_{i+1}\\right) - f\\left(x_{i-1}\\right)}{2h} + + \\mathcal{O}\\left(h^{2}\\right) + + With a similar procedure the forward/backward approximations used for + boundaries can be derived. + + References + ---------- + .. [1] Quarteroni A., Sacco R., Saleri F. (2007) Numerical Mathematics + (Texts in Applied Mathematics). New York: Springer. + .. [2] Durran D. R. (1999) Numerical Methods for Wave Equations + in Geophysical Fluid Dynamics. New York: Springer. + .. [3] Fornberg B. (1988) Generation of Finite Difference Formulas on + Arbitrarily Spaced Grids, + Mathematics of Computation 51, no. 184 : 699-706. + `PDF `_. + """ + f = np.asanyarray(f) + N = f.ndim # number of dimensions + + if axis is None: + axes = tuple(range(N)) + else: + axes = _nx.normalize_axis_tuple(axis, N) + + len_axes = len(axes) + n = len(varargs) + if n == 0: + # no spacing argument - use 1 in all axes + dx = [1.0] * len_axes + elif n == 1 and np.ndim(varargs[0]) == 0: + # single scalar for all axes + dx = varargs * len_axes + elif n == len_axes: + # scalar or 1d array for each axis + dx = list(varargs) + for i, distances in enumerate(dx): + distances = np.asanyarray(distances) + if distances.ndim == 0: + continue + elif distances.ndim != 1: + raise ValueError("distances must be either scalars or 1d") + if len(distances) != f.shape[axes[i]]: + raise ValueError("when 1d, distances must match " + "the length of the corresponding dimension") + if np.issubdtype(distances.dtype, np.integer): + # Convert numpy integer types to float64 to avoid modular + # arithmetic in np.diff(distances). + distances = distances.astype(np.float64) + diffx = np.diff(distances) + # if distances are constant reduce to the scalar case + # since it brings a consistent speedup + if (diffx == diffx[0]).all(): + diffx = diffx[0] + dx[i] = diffx + else: + raise TypeError("invalid number of arguments") + + if edge_order > 2: + raise ValueError("'edge_order' greater than 2 not supported") + + # use central differences on interior and one-sided differences on the + # endpoints. This preserves second order-accuracy over the full domain. + + outvals = [] + + # create slice objects --- initially all are [:, :, ..., :] + slice1 = [slice(None)] * N + slice2 = [slice(None)] * N + slice3 = [slice(None)] * N + slice4 = [slice(None)] * N + + otype = f.dtype + if otype.type is np.datetime64: + # the timedelta dtype with the same unit information + otype = np.dtype(otype.name.replace('datetime', 'timedelta')) + # view as timedelta to allow addition + f = f.view(otype) + elif otype.type is np.timedelta64: + pass + elif np.issubdtype(otype, np.inexact): + pass + else: + # All other types convert to floating point. + # First check if f is a numpy integer type; if so, convert f to float64 + # to avoid modular arithmetic when computing the changes in f. + if np.issubdtype(otype, np.integer): + f = f.astype(np.float64) + otype = np.float64 + + for axis, ax_dx in zip(axes, dx): + if f.shape[axis] < edge_order + 1: + raise ValueError( + "Shape of array too small to calculate a numerical gradient, " + "at least (edge_order + 1) elements are required.") + # result allocation + out = np.empty_like(f, dtype=otype) + + # spacing for the current axis + uniform_spacing = np.ndim(ax_dx) == 0 + + # Numerical differentiation: 2nd order interior + slice1[axis] = slice(1, -1) + slice2[axis] = slice(None, -2) + slice3[axis] = slice(1, -1) + slice4[axis] = slice(2, None) + + if uniform_spacing: + out[tuple(slice1)] = (f[tuple(slice4)] - f[tuple(slice2)]) / (2. * ax_dx) + else: + dx1 = ax_dx[0:-1] + dx2 = ax_dx[1:] + a = -(dx2) / (dx1 * (dx1 + dx2)) + b = (dx2 - dx1) / (dx1 * dx2) + c = dx1 / (dx2 * (dx1 + dx2)) + # fix the shape for broadcasting + shape = np.ones(N, dtype=int) + shape[axis] = -1 + a.shape = b.shape = c.shape = shape + # 1D equivalent -- out[1:-1] = a * f[:-2] + b * f[1:-1] + c * f[2:] + out[tuple(slice1)] = a * f[tuple(slice2)] + b * f[tuple(slice3)] \ + + c * f[tuple(slice4)] + + # Numerical differentiation: 1st order edges + if edge_order == 1: + slice1[axis] = 0 + slice2[axis] = 1 + slice3[axis] = 0 + dx_0 = ax_dx if uniform_spacing else ax_dx[0] + # 1D equivalent -- out[0] = (f[1] - f[0]) / (x[1] - x[0]) + out[tuple(slice1)] = (f[tuple(slice2)] - f[tuple(slice3)]) / dx_0 + + slice1[axis] = -1 + slice2[axis] = -1 + slice3[axis] = -2 + dx_n = ax_dx if uniform_spacing else ax_dx[-1] + # 1D equivalent -- out[-1] = (f[-1] - f[-2]) / (x[-1] - x[-2]) + out[tuple(slice1)] = (f[tuple(slice2)] - f[tuple(slice3)]) / dx_n + + # Numerical differentiation: 2nd order edges + else: + slice1[axis] = 0 + slice2[axis] = 0 + slice3[axis] = 1 + slice4[axis] = 2 + if uniform_spacing: + a = -1.5 / ax_dx + b = 2. / ax_dx + c = -0.5 / ax_dx + else: + dx1 = ax_dx[0] + dx2 = ax_dx[1] + a = -(2. * dx1 + dx2) / (dx1 * (dx1 + dx2)) + b = (dx1 + dx2) / (dx1 * dx2) + c = - dx1 / (dx2 * (dx1 + dx2)) + # 1D equivalent -- out[0] = a * f[0] + b * f[1] + c * f[2] + out[tuple(slice1)] = a * f[tuple(slice2)] + b * f[tuple(slice3)] \ + + c * f[tuple(slice4)] + + slice1[axis] = -1 + slice2[axis] = -3 + slice3[axis] = -2 + slice4[axis] = -1 + if uniform_spacing: + a = 0.5 / ax_dx + b = -2. / ax_dx + c = 1.5 / ax_dx + else: + dx1 = ax_dx[-2] + dx2 = ax_dx[-1] + a = (dx2) / (dx1 * (dx1 + dx2)) + b = - (dx2 + dx1) / (dx1 * dx2) + c = (2. * dx2 + dx1) / (dx2 * (dx1 + dx2)) + # 1D equivalent -- out[-1] = a * f[-3] + b * f[-2] + c * f[-1] + out[tuple(slice1)] = a * f[tuple(slice2)] + b * f[tuple(slice3)] \ + + c * f[tuple(slice4)] + + outvals.append(out) + + # reset the slice object in this dimension to ":" + slice1[axis] = slice(None) + slice2[axis] = slice(None) + slice3[axis] = slice(None) + slice4[axis] = slice(None) + + if len_axes == 1: + return outvals[0] + return tuple(outvals) + + +def _diff_dispatcher(a, n=None, axis=None, prepend=None, append=None): + return (a, prepend, append) + + +@array_function_dispatch(_diff_dispatcher) +def diff(a, n=1, axis=-1, prepend=np._NoValue, append=np._NoValue): + """ + Calculate the n-th discrete difference along the given axis. + + The first difference is given by ``out[i] = a[i+1] - a[i]`` along + the given axis, higher differences are calculated by using `diff` + recursively. + + Parameters + ---------- + a : array_like + Input array + n : int, optional + The number of times values are differenced. If zero, the input + is returned as-is. + axis : int, optional + The axis along which the difference is taken, default is the + last axis. + prepend, append : array_like, optional + Values to prepend or append to `a` along axis prior to + performing the difference. Scalar values are expanded to + arrays with length 1 in the direction of axis and the shape + of the input array in along all other axes. Otherwise the + dimension and shape must match `a` except along axis. + + Returns + ------- + diff : ndarray + The n-th differences. The shape of the output is the same as `a` + except along `axis` where the dimension is smaller by `n`. The + type of the output is the same as the type of the difference + between any two elements of `a`. This is the same as the type of + `a` in most cases. A notable exception is `datetime64`, which + results in a `timedelta64` output array. + + See Also + -------- + gradient, ediff1d, cumsum + + Notes + ----- + Type is preserved for boolean arrays, so the result will contain + `False` when consecutive elements are the same and `True` when they + differ. + + For unsigned integer arrays, the results will also be unsigned. This + should not be surprising, as the result is consistent with + calculating the difference directly: + + >>> u8_arr = np.array([1, 0], dtype=np.uint8) + >>> np.diff(u8_arr) + array([255], dtype=uint8) + >>> u8_arr[1,...] - u8_arr[0,...] + np.uint8(255) + + If this is not desirable, then the array should be cast to a larger + integer type first: + + >>> i16_arr = u8_arr.astype(np.int16) + >>> np.diff(i16_arr) + array([-1], dtype=int16) + + Examples + -------- + >>> import numpy as np + >>> x = np.array([1, 2, 4, 7, 0]) + >>> np.diff(x) + array([ 1, 2, 3, -7]) + >>> np.diff(x, n=2) + array([ 1, 1, -10]) + + >>> x = np.array([[1, 3, 6, 10], [0, 5, 6, 8]]) + >>> np.diff(x) + array([[2, 3, 4], + [5, 1, 2]]) + >>> np.diff(x, axis=0) + array([[-1, 2, 0, -2]]) + + >>> x = np.arange('1066-10-13', '1066-10-16', dtype=np.datetime64) + >>> np.diff(x) + array([1, 1], dtype='timedelta64[D]') + + """ + if n == 0: + return a + if n < 0: + raise ValueError( + "order must be non-negative but got " + repr(n)) + + a = asanyarray(a) + nd = a.ndim + if nd == 0: + raise ValueError("diff requires input that is at least one dimensional") + axis = normalize_axis_index(axis, nd) + + combined = [] + if prepend is not np._NoValue: + prepend = np.asanyarray(prepend) + if prepend.ndim == 0: + shape = list(a.shape) + shape[axis] = 1 + prepend = np.broadcast_to(prepend, tuple(shape)) + combined.append(prepend) + + combined.append(a) + + if append is not np._NoValue: + append = np.asanyarray(append) + if append.ndim == 0: + shape = list(a.shape) + shape[axis] = 1 + append = np.broadcast_to(append, tuple(shape)) + combined.append(append) + + if len(combined) > 1: + a = np.concatenate(combined, axis) + + slice1 = [slice(None)] * nd + slice2 = [slice(None)] * nd + slice1[axis] = slice(1, None) + slice2[axis] = slice(None, -1) + slice1 = tuple(slice1) + slice2 = tuple(slice2) + + op = not_equal if a.dtype == np.bool else subtract + for _ in range(n): + a = op(a[slice1], a[slice2]) + + return a + + +def _interp_dispatcher(x, xp, fp, left=None, right=None, period=None): + return (x, xp, fp) + + +@array_function_dispatch(_interp_dispatcher) +def interp(x, xp, fp, left=None, right=None, period=None): + """ + One-dimensional linear interpolation for monotonically increasing sample points. + + Returns the one-dimensional piecewise linear interpolant to a function + with given discrete data points (`xp`, `fp`), evaluated at `x`. + + Parameters + ---------- + x : array_like + The x-coordinates at which to evaluate the interpolated values. + + xp : 1-D sequence of floats + The x-coordinates of the data points, must be increasing if argument + `period` is not specified. Otherwise, `xp` is internally sorted after + normalizing the periodic boundaries with ``xp = xp % period``. + + fp : 1-D sequence of float or complex + The y-coordinates of the data points, same length as `xp`. + + left : optional float or complex corresponding to fp + Value to return for `x < xp[0]`, default is `fp[0]`. + + right : optional float or complex corresponding to fp + Value to return for `x > xp[-1]`, default is `fp[-1]`. + + period : None or float, optional + A period for the x-coordinates. This parameter allows the proper + interpolation of angular x-coordinates. Parameters `left` and `right` + are ignored if `period` is specified. + + Returns + ------- + y : float or complex (corresponding to fp) or ndarray + The interpolated values, same shape as `x`. + + Raises + ------ + ValueError + If `xp` and `fp` have different length + If `xp` or `fp` are not 1-D sequences + If `period == 0` + + See Also + -------- + scipy.interpolate + + Warnings + -------- + The x-coordinate sequence is expected to be increasing, but this is not + explicitly enforced. However, if the sequence `xp` is non-increasing, + interpolation results are meaningless. + + Note that, since NaN is unsortable, `xp` also cannot contain NaNs. + + A simple check for `xp` being strictly increasing is:: + + np.all(np.diff(xp) > 0) + + Examples + -------- + >>> import numpy as np + >>> xp = [1, 2, 3] + >>> fp = [3, 2, 0] + >>> np.interp(2.5, xp, fp) + 1.0 + >>> np.interp([0, 1, 1.5, 2.72, 3.14], xp, fp) + array([3. , 3. , 2.5 , 0.56, 0. ]) + >>> UNDEF = -99.0 + >>> np.interp(3.14, xp, fp, right=UNDEF) + -99.0 + + Plot an interpolant to the sine function: + + >>> x = np.linspace(0, 2*np.pi, 10) + >>> y = np.sin(x) + >>> xvals = np.linspace(0, 2*np.pi, 50) + >>> yinterp = np.interp(xvals, x, y) + >>> import matplotlib.pyplot as plt + >>> plt.plot(x, y, 'o') + [] + >>> plt.plot(xvals, yinterp, '-x') + [] + >>> plt.show() + + Interpolation with periodic x-coordinates: + + >>> x = [-180, -170, -185, 185, -10, -5, 0, 365] + >>> xp = [190, -190, 350, -350] + >>> fp = [5, 10, 3, 4] + >>> np.interp(x, xp, fp, period=360) + array([7.5 , 5. , 8.75, 6.25, 3. , 3.25, 3.5 , 3.75]) + + Complex interpolation: + + >>> x = [1.5, 4.0] + >>> xp = [2,3,5] + >>> fp = [1.0j, 0, 2+3j] + >>> np.interp(x, xp, fp) + array([0.+1.j , 1.+1.5j]) + + """ + + fp = np.asarray(fp) + + if np.iscomplexobj(fp): + interp_func = compiled_interp_complex + input_dtype = np.complex128 + else: + interp_func = compiled_interp + input_dtype = np.float64 + + if period is not None: + if period == 0: + raise ValueError("period must be a non-zero value") + period = abs(period) + left = None + right = None + + x = np.asarray(x, dtype=np.float64) + xp = np.asarray(xp, dtype=np.float64) + fp = np.asarray(fp, dtype=input_dtype) + + if xp.ndim != 1 or fp.ndim != 1: + raise ValueError("Data points must be 1-D sequences") + if xp.shape[0] != fp.shape[0]: + raise ValueError("fp and xp are not of the same length") + # normalizing periodic boundaries + x = x % period + xp = xp % period + asort_xp = np.argsort(xp) + xp = xp[asort_xp] + fp = fp[asort_xp] + xp = np.concatenate((xp[-1:] - period, xp, xp[0:1] + period)) + fp = np.concatenate((fp[-1:], fp, fp[0:1])) + + return interp_func(x, xp, fp, left, right) + + +def _angle_dispatcher(z, deg=None): + return (z,) + + +@array_function_dispatch(_angle_dispatcher) +def angle(z, deg=False): + """ + Return the angle of the complex argument. + + Parameters + ---------- + z : array_like + A complex number or sequence of complex numbers. + deg : bool, optional + Return angle in degrees if True, radians if False (default). + + Returns + ------- + angle : ndarray or scalar + The counterclockwise angle from the positive real axis on the complex + plane in the range ``(-pi, pi]``, with dtype as numpy.float64. + + See Also + -------- + arctan2 + absolute + + Notes + ----- + This function passes the imaginary and real parts of the argument to + `arctan2` to compute the result; consequently, it follows the convention + of `arctan2` when the magnitude of the argument is zero. See example. + + Examples + -------- + >>> import numpy as np + >>> np.angle([1.0, 1.0j, 1+1j]) # in radians + array([ 0. , 1.57079633, 0.78539816]) # may vary + >>> np.angle(1+1j, deg=True) # in degrees + 45.0 + >>> np.angle([0., -0., complex(0., -0.), complex(-0., -0.)]) # convention + array([ 0. , 3.14159265, -0. , -3.14159265]) + + """ + z = asanyarray(z) + if issubclass(z.dtype.type, _nx.complexfloating): + zimag = z.imag + zreal = z.real + else: + zimag = 0 + zreal = z + + a = arctan2(zimag, zreal) + if deg: + a *= 180 / pi + return a + + +def _unwrap_dispatcher(p, discont=None, axis=None, *, period=None): + return (p,) + + +@array_function_dispatch(_unwrap_dispatcher) +def unwrap(p, discont=None, axis=-1, *, period=2 * pi): + r""" + Unwrap by taking the complement of large deltas with respect to the period. + + This unwraps a signal `p` by changing elements which have an absolute + difference from their predecessor of more than ``max(discont, period/2)`` + to their `period`-complementary values. + + For the default case where `period` is :math:`2\pi` and `discont` is + :math:`\pi`, this unwraps a radian phase `p` such that adjacent differences + are never greater than :math:`\pi` by adding :math:`2k\pi` for some + integer :math:`k`. + + Parameters + ---------- + p : array_like + Input array. + discont : float, optional + Maximum discontinuity between values, default is ``period/2``. + Values below ``period/2`` are treated as if they were ``period/2``. + To have an effect different from the default, `discont` should be + larger than ``period/2``. + axis : int, optional + Axis along which unwrap will operate, default is the last axis. + period : float, optional + Size of the range over which the input wraps. By default, it is + ``2 pi``. + + .. versionadded:: 1.21.0 + + Returns + ------- + out : ndarray + Output array. + + See Also + -------- + rad2deg, deg2rad + + Notes + ----- + If the discontinuity in `p` is smaller than ``period/2``, + but larger than `discont`, no unwrapping is done because taking + the complement would only make the discontinuity larger. + + Examples + -------- + >>> import numpy as np + >>> phase = np.linspace(0, np.pi, num=5) + >>> phase[3:] += np.pi + >>> phase + array([ 0. , 0.78539816, 1.57079633, 5.49778714, 6.28318531]) # may vary + >>> np.unwrap(phase) + array([ 0. , 0.78539816, 1.57079633, -0.78539816, 0. ]) # may vary + >>> np.unwrap([0, 1, 2, -1, 0], period=4) + array([0, 1, 2, 3, 4]) + >>> np.unwrap([ 1, 2, 3, 4, 5, 6, 1, 2, 3], period=6) + array([1, 2, 3, 4, 5, 6, 7, 8, 9]) + >>> np.unwrap([2, 3, 4, 5, 2, 3, 4, 5], period=4) + array([2, 3, 4, 5, 6, 7, 8, 9]) + >>> phase_deg = np.mod(np.linspace(0 ,720, 19), 360) - 180 + >>> np.unwrap(phase_deg, period=360) + array([-180., -140., -100., -60., -20., 20., 60., 100., 140., + 180., 220., 260., 300., 340., 380., 420., 460., 500., + 540.]) + """ + p = asarray(p) + nd = p.ndim + dd = diff(p, axis=axis) + if discont is None: + discont = period / 2 + slice1 = [slice(None, None)] * nd # full slices + slice1[axis] = slice(1, None) + slice1 = tuple(slice1) + dtype = np.result_type(dd, period) + if _nx.issubdtype(dtype, _nx.integer): + interval_high, rem = divmod(period, 2) + boundary_ambiguous = rem == 0 + else: + interval_high = period / 2 + boundary_ambiguous = True + interval_low = -interval_high + ddmod = mod(dd - interval_low, period) + interval_low + if boundary_ambiguous: + # for `mask = (abs(dd) == period/2)`, the above line made + # `ddmod[mask] == -period/2`. correct these such that + # `ddmod[mask] == sign(dd[mask])*period/2`. + _nx.copyto(ddmod, interval_high, + where=(ddmod == interval_low) & (dd > 0)) + ph_correct = ddmod - dd + _nx.copyto(ph_correct, 0, where=abs(dd) < discont) + up = array(p, copy=True, dtype=dtype) + up[slice1] = p[slice1] + ph_correct.cumsum(axis) + return up + + +def _sort_complex(a): + return (a,) + + +@array_function_dispatch(_sort_complex) +def sort_complex(a): + """ + Sort a complex array using the real part first, then the imaginary part. + + Parameters + ---------- + a : array_like + Input array + + Returns + ------- + out : complex ndarray + Always returns a sorted complex array. + + Examples + -------- + >>> import numpy as np + >>> np.sort_complex([5, 3, 6, 2, 1]) + array([1.+0.j, 2.+0.j, 3.+0.j, 5.+0.j, 6.+0.j]) + + >>> np.sort_complex([1 + 2j, 2 - 1j, 3 - 2j, 3 - 3j, 3 + 5j]) + array([1.+2.j, 2.-1.j, 3.-3.j, 3.-2.j, 3.+5.j]) + + """ + b = array(a, copy=True) + b.sort() + if not issubclass(b.dtype.type, _nx.complexfloating): + if b.dtype.char in 'bhBH': + return b.astype('F') + elif b.dtype.char == 'g': + return b.astype('G') + else: + return b.astype('D') + else: + return b + + +def _arg_trim_zeros(filt): + """Return indices of the first and last non-zero element. + + Parameters + ---------- + filt : array_like + Input array. + + Returns + ------- + start, stop : ndarray + Two arrays containing the indices of the first and last non-zero + element in each dimension. + + See also + -------- + trim_zeros + + Examples + -------- + >>> import numpy as np + >>> _arg_trim_zeros(np.array([0, 0, 1, 1, 0])) + (array([2]), array([3])) + """ + nonzero = ( + np.argwhere(filt) + if filt.dtype != np.object_ + # Historically, `trim_zeros` treats `None` in an object array + # as non-zero while argwhere doesn't, account for that + else np.argwhere(filt != 0) + ) + if nonzero.size == 0: + start = stop = np.array([], dtype=np.intp) + else: + start = nonzero.min(axis=0) + stop = nonzero.max(axis=0) + return start, stop + + +def _trim_zeros(filt, trim=None, axis=None): + return (filt,) + + +@array_function_dispatch(_trim_zeros) +def trim_zeros(filt, trim='fb', axis=None): + """Remove values along a dimension which are zero along all other. + + Parameters + ---------- + filt : array_like + Input array. + trim : {"fb", "f", "b"}, optional + A string with 'f' representing trim from front and 'b' to trim from + back. By default, zeros are trimmed on both sides. + Front and back refer to the edges of a dimension, with "front" referring + to the side with the lowest index 0, and "back" referring to the highest + index (or index -1). + axis : int or sequence, optional + If None, `filt` is cropped such that the smallest bounding box is + returned that still contains all values which are not zero. + If an axis is specified, `filt` will be sliced in that dimension only + on the sides specified by `trim`. The remaining area will be the + smallest that still contains all values wich are not zero. + + .. versionadded:: 2.2.0 + + Returns + ------- + trimmed : ndarray or sequence + The result of trimming the input. The number of dimensions and the + input data type are preserved. + + Notes + ----- + For all-zero arrays, the first axis is trimmed first. + + Examples + -------- + >>> import numpy as np + >>> a = np.array((0, 0, 0, 1, 2, 3, 0, 2, 1, 0)) + >>> np.trim_zeros(a) + array([1, 2, 3, 0, 2, 1]) + + >>> np.trim_zeros(a, trim='b') + array([0, 0, 0, ..., 0, 2, 1]) + + Multiple dimensions are supported. + + >>> b = np.array([[0, 0, 2, 3, 0, 0], + ... [0, 1, 0, 3, 0, 0], + ... [0, 0, 0, 0, 0, 0]]) + >>> np.trim_zeros(b) + array([[0, 2, 3], + [1, 0, 3]]) + + >>> np.trim_zeros(b, axis=-1) + array([[0, 2, 3], + [1, 0, 3], + [0, 0, 0]]) + + The input data type is preserved, list/tuple in means list/tuple out. + + >>> np.trim_zeros([0, 1, 2, 0]) + [1, 2] + + """ + filt_ = np.asarray(filt) + + trim = trim.lower() + if trim not in {"fb", "bf", "f", "b"}: + raise ValueError(f"unexpected character(s) in `trim`: {trim!r}") + + start, stop = _arg_trim_zeros(filt_) + stop += 1 # Adjust for slicing + + if start.size == 0: + # filt is all-zero -> assign same values to start and stop so that + # resulting slice will be empty + start = stop = np.zeros(filt_.ndim, dtype=np.intp) + else: + if 'f' not in trim: + start = (None,) * filt_.ndim + if 'b' not in trim: + stop = (None,) * filt_.ndim + + if len(start) == 1: + # filt is 1D -> don't use multi-dimensional slicing to preserve + # non-array input types + sl = slice(start[0], stop[0]) + elif axis is None: + # trim all axes + sl = tuple(slice(*x) for x in zip(start, stop)) + else: + # only trim single axis + axis = normalize_axis_index(axis, filt_.ndim) + sl = (slice(None),) * axis + (slice(start[axis], stop[axis]),) + (...,) + + trimmed = filt[sl] + return trimmed + + +def _extract_dispatcher(condition, arr): + return (condition, arr) + + +@array_function_dispatch(_extract_dispatcher) +def extract(condition, arr): + """ + Return the elements of an array that satisfy some condition. + + This is equivalent to ``np.compress(ravel(condition), ravel(arr))``. If + `condition` is boolean ``np.extract`` is equivalent to ``arr[condition]``. + + Note that `place` does the exact opposite of `extract`. + + Parameters + ---------- + condition : array_like + An array whose nonzero or True entries indicate the elements of `arr` + to extract. + arr : array_like + Input array of the same size as `condition`. + + Returns + ------- + extract : ndarray + Rank 1 array of values from `arr` where `condition` is True. + + See Also + -------- + take, put, copyto, compress, place + + Examples + -------- + >>> import numpy as np + >>> arr = np.arange(12).reshape((3, 4)) + >>> arr + array([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> condition = np.mod(arr, 3)==0 + >>> condition + array([[ True, False, False, True], + [False, False, True, False], + [False, True, False, False]]) + >>> np.extract(condition, arr) + array([0, 3, 6, 9]) + + + If `condition` is boolean: + + >>> arr[condition] + array([0, 3, 6, 9]) + + """ + return _nx.take(ravel(arr), nonzero(ravel(condition))[0]) + + +def _place_dispatcher(arr, mask, vals): + return (arr, mask, vals) + + +@array_function_dispatch(_place_dispatcher) +def place(arr, mask, vals): + """ + Change elements of an array based on conditional and input values. + + Similar to ``np.copyto(arr, vals, where=mask)``, the difference is that + `place` uses the first N elements of `vals`, where N is the number of + True values in `mask`, while `copyto` uses the elements where `mask` + is True. + + Note that `extract` does the exact opposite of `place`. + + Parameters + ---------- + arr : ndarray + Array to put data into. + mask : array_like + Boolean mask array. Must have the same size as `a`. + vals : 1-D sequence + Values to put into `a`. Only the first N elements are used, where + N is the number of True values in `mask`. If `vals` is smaller + than N, it will be repeated, and if elements of `a` are to be masked, + this sequence must be non-empty. + + See Also + -------- + copyto, put, take, extract + + Examples + -------- + >>> import numpy as np + >>> arr = np.arange(6).reshape(2, 3) + >>> np.place(arr, arr>2, [44, 55]) + >>> arr + array([[ 0, 1, 2], + [44, 55, 44]]) + + """ + return _place(arr, mask, vals) + + +def disp(mesg, device=None, linefeed=True): + """ + Display a message on a device. + + .. deprecated:: 2.0 + Use your own printing function instead. + + Parameters + ---------- + mesg : str + Message to display. + device : object + Device to write message. If None, defaults to ``sys.stdout`` which is + very similar to ``print``. `device` needs to have ``write()`` and + ``flush()`` methods. + linefeed : bool, optional + Option whether to print a line feed or not. Defaults to True. + + Raises + ------ + AttributeError + If `device` does not have a ``write()`` or ``flush()`` method. + + Examples + -------- + >>> import numpy as np + + Besides ``sys.stdout``, a file-like object can also be used as it has + both required methods: + + >>> from io import StringIO + >>> buf = StringIO() + >>> np.disp('"Display" in a file', device=buf) + >>> buf.getvalue() + '"Display" in a file\\n' + + """ + + # Deprecated in NumPy 2.0, 2023-07-11 + warnings.warn( + "`disp` is deprecated, " + "use your own printing function instead. " + "(deprecated in NumPy 2.0)", + DeprecationWarning, + stacklevel=2 + ) + + if device is None: + device = sys.stdout + if linefeed: + device.write(f'{mesg}\n') + else: + device.write(f'{mesg}') + device.flush() + + +# See https://docs.scipy.org/doc/numpy/reference/c-api.generalized-ufuncs.html +_DIMENSION_NAME = r'\w+' +_CORE_DIMENSION_LIST = f'(?:{_DIMENSION_NAME}(?:,{_DIMENSION_NAME})*)?' +_ARGUMENT = fr'\({_CORE_DIMENSION_LIST}\)' +_ARGUMENT_LIST = f'{_ARGUMENT}(?:,{_ARGUMENT})*' +_SIGNATURE = f'^{_ARGUMENT_LIST}->{_ARGUMENT_LIST}$' + + +def _parse_gufunc_signature(signature): + """ + Parse string signatures for a generalized universal function. + + Arguments + --------- + signature : string + Generalized universal function signature, e.g., ``(m,n),(n,p)->(m,p)`` + for ``np.matmul``. + + Returns + ------- + Tuple of input and output core dimensions parsed from the signature, each + of the form List[Tuple[str, ...]]. + """ + signature = re.sub(r'\s+', '', signature) + + if not re.match(_SIGNATURE, signature): + raise ValueError( + f'not a valid gufunc signature: {signature}') + return tuple([tuple(re.findall(_DIMENSION_NAME, arg)) + for arg in re.findall(_ARGUMENT, arg_list)] + for arg_list in signature.split('->')) + + +def _update_dim_sizes(dim_sizes, arg, core_dims): + """ + Incrementally check and update core dimension sizes for a single argument. + + Arguments + --------- + dim_sizes : Dict[str, int] + Sizes of existing core dimensions. Will be updated in-place. + arg : ndarray + Argument to examine. + core_dims : Tuple[str, ...] + Core dimensions for this argument. + """ + if not core_dims: + return + + num_core_dims = len(core_dims) + if arg.ndim < num_core_dims: + raise ValueError( + '%d-dimensional argument does not have enough ' + 'dimensions for all core dimensions %r' + % (arg.ndim, core_dims)) + + core_shape = arg.shape[-num_core_dims:] + for dim, size in zip(core_dims, core_shape): + if dim in dim_sizes: + if size != dim_sizes[dim]: + raise ValueError( + 'inconsistent size for core dimension %r: %r vs %r' + % (dim, size, dim_sizes[dim])) + else: + dim_sizes[dim] = size + + +def _parse_input_dimensions(args, input_core_dims): + """ + Parse broadcast and core dimensions for vectorize with a signature. + + Arguments + --------- + args : Tuple[ndarray, ...] + Tuple of input arguments to examine. + input_core_dims : List[Tuple[str, ...]] + List of core dimensions corresponding to each input. + + Returns + ------- + broadcast_shape : Tuple[int, ...] + Common shape to broadcast all non-core dimensions to. + dim_sizes : Dict[str, int] + Common sizes for named core dimensions. + """ + broadcast_args = [] + dim_sizes = {} + for arg, core_dims in zip(args, input_core_dims): + _update_dim_sizes(dim_sizes, arg, core_dims) + ndim = arg.ndim - len(core_dims) + dummy_array = np.lib.stride_tricks.as_strided(0, arg.shape[:ndim]) + broadcast_args.append(dummy_array) + broadcast_shape = np.lib._stride_tricks_impl._broadcast_shape( + *broadcast_args + ) + return broadcast_shape, dim_sizes + + +def _calculate_shapes(broadcast_shape, dim_sizes, list_of_core_dims): + """Helper for calculating broadcast shapes with core dimensions.""" + return [broadcast_shape + tuple(dim_sizes[dim] for dim in core_dims) + for core_dims in list_of_core_dims] + + +def _create_arrays(broadcast_shape, dim_sizes, list_of_core_dims, dtypes, + results=None): + """Helper for creating output arrays in vectorize.""" + shapes = _calculate_shapes(broadcast_shape, dim_sizes, list_of_core_dims) + if dtypes is None: + dtypes = [None] * len(shapes) + if results is None: + arrays = tuple(np.empty(shape=shape, dtype=dtype) + for shape, dtype in zip(shapes, dtypes)) + else: + arrays = tuple(np.empty_like(result, shape=shape, dtype=dtype) + for result, shape, dtype + in zip(results, shapes, dtypes)) + return arrays + + +def _get_vectorize_dtype(dtype): + if dtype.char in "SU": + return dtype.char + return dtype + + +@set_module('numpy') +class vectorize: + """ + vectorize(pyfunc=np._NoValue, otypes=None, doc=None, excluded=None, + cache=False, signature=None) + + Returns an object that acts like pyfunc, but takes arrays as input. + + Define a vectorized function which takes a nested sequence of objects or + numpy arrays as inputs and returns a single numpy array or a tuple of numpy + arrays. The vectorized function evaluates `pyfunc` over successive tuples + of the input arrays like the python map function, except it uses the + broadcasting rules of numpy. + + The data type of the output of `vectorized` is determined by calling + the function with the first element of the input. This can be avoided + by specifying the `otypes` argument. + + Parameters + ---------- + pyfunc : callable, optional + A python function or method. + Can be omitted to produce a decorator with keyword arguments. + otypes : str or list of dtypes, optional + The output data type. It must be specified as either a string of + typecode characters or a list of data type specifiers. There should + be one data type specifier for each output. + doc : str, optional + The docstring for the function. If None, the docstring will be the + ``pyfunc.__doc__``. + excluded : set, optional + Set of strings or integers representing the positional or keyword + arguments for which the function will not be vectorized. These will be + passed directly to `pyfunc` unmodified. + + cache : bool, optional + If `True`, then cache the first function call that determines the number + of outputs if `otypes` is not provided. + + signature : string, optional + Generalized universal function signature, e.g., ``(m,n),(n)->(m)`` for + vectorized matrix-vector multiplication. If provided, ``pyfunc`` will + be called with (and expected to return) arrays with shapes given by the + size of corresponding core dimensions. By default, ``pyfunc`` is + assumed to take scalars as input and output. + + Returns + ------- + out : callable + A vectorized function if ``pyfunc`` was provided, + a decorator otherwise. + + See Also + -------- + frompyfunc : Takes an arbitrary Python function and returns a ufunc + + Notes + ----- + The `vectorize` function is provided primarily for convenience, not for + performance. The implementation is essentially a for loop. + + If `otypes` is not specified, then a call to the function with the + first argument will be used to determine the number of outputs. The + results of this call will be cached if `cache` is `True` to prevent + calling the function twice. However, to implement the cache, the + original function must be wrapped which will slow down subsequent + calls, so only do this if your function is expensive. + + The new keyword argument interface and `excluded` argument support + further degrades performance. + + References + ---------- + .. [1] :doc:`/reference/c-api/generalized-ufuncs` + + Examples + -------- + >>> import numpy as np + >>> def myfunc(a, b): + ... "Return a-b if a>b, otherwise return a+b" + ... if a > b: + ... return a - b + ... else: + ... return a + b + + >>> vfunc = np.vectorize(myfunc) + >>> vfunc([1, 2, 3, 4], 2) + array([3, 4, 1, 2]) + + The docstring is taken from the input function to `vectorize` unless it + is specified: + + >>> vfunc.__doc__ + 'Return a-b if a>b, otherwise return a+b' + >>> vfunc = np.vectorize(myfunc, doc='Vectorized `myfunc`') + >>> vfunc.__doc__ + 'Vectorized `myfunc`' + + The output type is determined by evaluating the first element of the input, + unless it is specified: + + >>> out = vfunc([1, 2, 3, 4], 2) + >>> type(out[0]) + + >>> vfunc = np.vectorize(myfunc, otypes=[float]) + >>> out = vfunc([1, 2, 3, 4], 2) + >>> type(out[0]) + + + The `excluded` argument can be used to prevent vectorizing over certain + arguments. This can be useful for array-like arguments of a fixed length + such as the coefficients for a polynomial as in `polyval`: + + >>> def mypolyval(p, x): + ... _p = list(p) + ... res = _p.pop(0) + ... while _p: + ... res = res*x + _p.pop(0) + ... return res + + Here, we exclude the zeroth argument from vectorization whether it is + passed by position or keyword. + + >>> vpolyval = np.vectorize(mypolyval, excluded={0, 'p'}) + >>> vpolyval([1, 2, 3], x=[0, 1]) + array([3, 6]) + >>> vpolyval(p=[1, 2, 3], x=[0, 1]) + array([3, 6]) + + The `signature` argument allows for vectorizing functions that act on + non-scalar arrays of fixed length. For example, you can use it for a + vectorized calculation of Pearson correlation coefficient and its p-value: + + >>> import scipy.stats + >>> pearsonr = np.vectorize(scipy.stats.pearsonr, + ... signature='(n),(n)->(),()') + >>> pearsonr([[0, 1, 2, 3]], [[1, 2, 3, 4], [4, 3, 2, 1]]) + (array([ 1., -1.]), array([ 0., 0.])) + + Or for a vectorized convolution: + + >>> convolve = np.vectorize(np.convolve, signature='(n),(m)->(k)') + >>> convolve(np.eye(4), [1, 2, 1]) + array([[1., 2., 1., 0., 0., 0.], + [0., 1., 2., 1., 0., 0.], + [0., 0., 1., 2., 1., 0.], + [0., 0., 0., 1., 2., 1.]]) + + Decorator syntax is supported. The decorator can be called as + a function to provide keyword arguments: + + >>> @np.vectorize + ... def identity(x): + ... return x + ... + >>> identity([0, 1, 2]) + array([0, 1, 2]) + >>> @np.vectorize(otypes=[float]) + ... def as_float(x): + ... return x + ... + >>> as_float([0, 1, 2]) + array([0., 1., 2.]) + """ + def __init__(self, pyfunc=np._NoValue, otypes=None, doc=None, + excluded=None, cache=False, signature=None): + + if (pyfunc != np._NoValue) and (not callable(pyfunc)): + # Splitting the error message to keep + # the length below 79 characters. + part1 = "When used as a decorator, " + part2 = "only accepts keyword arguments." + raise TypeError(part1 + part2) + + self.pyfunc = pyfunc + self.cache = cache + self.signature = signature + if pyfunc != np._NoValue and hasattr(pyfunc, '__name__'): + self.__name__ = pyfunc.__name__ + + self._ufunc = {} # Caching to improve default performance + self._doc = None + self.__doc__ = doc + if doc is None and hasattr(pyfunc, '__doc__'): + self.__doc__ = pyfunc.__doc__ + else: + self._doc = doc + + if isinstance(otypes, str): + for char in otypes: + if char not in typecodes['All']: + raise ValueError(f"Invalid otype specified: {char}") + elif iterable(otypes): + otypes = [_get_vectorize_dtype(_nx.dtype(x)) for x in otypes] + elif otypes is not None: + raise ValueError("Invalid otype specification") + self.otypes = otypes + + # Excluded variable support + if excluded is None: + excluded = set() + self.excluded = set(excluded) + + if signature is not None: + self._in_and_out_core_dims = _parse_gufunc_signature(signature) + else: + self._in_and_out_core_dims = None + + def _init_stage_2(self, pyfunc, *args, **kwargs): + self.__name__ = pyfunc.__name__ + self.pyfunc = pyfunc + if self._doc is None: + self.__doc__ = pyfunc.__doc__ + else: + self.__doc__ = self._doc + + def _call_as_normal(self, *args, **kwargs): + """ + Return arrays with the results of `pyfunc` broadcast (vectorized) over + `args` and `kwargs` not in `excluded`. + """ + excluded = self.excluded + if not kwargs and not excluded: + func = self.pyfunc + vargs = args + else: + # The wrapper accepts only positional arguments: we use `names` and + # `inds` to mutate `the_args` and `kwargs` to pass to the original + # function. + nargs = len(args) + + names = [_n for _n in kwargs if _n not in excluded] + inds = [_i for _i in range(nargs) if _i not in excluded] + the_args = list(args) + + def func(*vargs): + for _n, _i in enumerate(inds): + the_args[_i] = vargs[_n] + kwargs.update(zip(names, vargs[len(inds):])) + return self.pyfunc(*the_args, **kwargs) + + vargs = [args[_i] for _i in inds] + vargs.extend([kwargs[_n] for _n in names]) + + return self._vectorize_call(func=func, args=vargs) + + def __call__(self, *args, **kwargs): + if self.pyfunc is np._NoValue: + self._init_stage_2(*args, **kwargs) + return self + + return self._call_as_normal(*args, **kwargs) + + def _get_ufunc_and_otypes(self, func, args): + """Return (ufunc, otypes).""" + # frompyfunc will fail if args is empty + if not args: + raise ValueError('args can not be empty') + + if self.otypes is not None: + otypes = self.otypes + + # self._ufunc is a dictionary whose keys are the number of + # arguments (i.e. len(args)) and whose values are ufuncs created + # by frompyfunc. len(args) can be different for different calls if + # self.pyfunc has parameters with default values. We only use the + # cache when func is self.pyfunc, which occurs when the call uses + # only positional arguments and no arguments are excluded. + + nin = len(args) + nout = len(self.otypes) + if func is not self.pyfunc or nin not in self._ufunc: + ufunc = frompyfunc(func, nin, nout) + else: + ufunc = None # We'll get it from self._ufunc + if func is self.pyfunc: + ufunc = self._ufunc.setdefault(nin, ufunc) + else: + # Get number of outputs and output types by calling the function on + # the first entries of args. We also cache the result to prevent + # the subsequent call when the ufunc is evaluated. + # Assumes that ufunc first evaluates the 0th elements in the input + # arrays (the input values are not checked to ensure this) + args = [asarray(a) for a in args] + if builtins.any(arg.size == 0 for arg in args): + raise ValueError('cannot call `vectorize` on size 0 inputs ' + 'unless `otypes` is set') + + inputs = [arg.flat[0] for arg in args] + outputs = func(*inputs) + + # Performance note: profiling indicates that -- for simple + # functions at least -- this wrapping can almost double the + # execution time. + # Hence we make it optional. + if self.cache: + _cache = [outputs] + + def _func(*vargs): + if _cache: + return _cache.pop() + else: + return func(*vargs) + else: + _func = func + + if isinstance(outputs, tuple): + nout = len(outputs) + else: + nout = 1 + outputs = (outputs,) + + otypes = ''.join([asarray(outputs[_k]).dtype.char + for _k in range(nout)]) + + # Performance note: profiling indicates that creating the ufunc is + # not a significant cost compared with wrapping so it seems not + # worth trying to cache this. + ufunc = frompyfunc(_func, len(args), nout) + + return ufunc, otypes + + def _vectorize_call(self, func, args): + """Vectorized call to `func` over positional `args`.""" + if self.signature is not None: + res = self._vectorize_call_with_signature(func, args) + elif not args: + res = func() + else: + ufunc, otypes = self._get_ufunc_and_otypes(func=func, args=args) + # gh-29196: `dtype=object` should eventually be removed + args = [asanyarray(a, dtype=object) for a in args] + outputs = ufunc(*args, out=...) + + if ufunc.nout == 1: + res = asanyarray(outputs, dtype=otypes[0]) + else: + res = tuple(asanyarray(x, dtype=t) + for x, t in zip(outputs, otypes)) + return res + + def _vectorize_call_with_signature(self, func, args): + """Vectorized call over positional arguments with a signature.""" + input_core_dims, output_core_dims = self._in_and_out_core_dims + + if len(args) != len(input_core_dims): + raise TypeError('wrong number of positional arguments: ' + 'expected %r, got %r' + % (len(input_core_dims), len(args))) + args = tuple(asanyarray(arg) for arg in args) + + broadcast_shape, dim_sizes = _parse_input_dimensions( + args, input_core_dims) + input_shapes = _calculate_shapes(broadcast_shape, dim_sizes, + input_core_dims) + args = [np.broadcast_to(arg, shape, subok=True) + for arg, shape in zip(args, input_shapes)] + + outputs = None + otypes = self.otypes + nout = len(output_core_dims) + + for index in np.ndindex(*broadcast_shape): + results = func(*(arg[index] for arg in args)) + + n_results = len(results) if isinstance(results, tuple) else 1 + + if nout != n_results: + raise ValueError( + 'wrong number of outputs from pyfunc: expected %r, got %r' + % (nout, n_results)) + + if nout == 1: + results = (results,) + + if outputs is None: + for result, core_dims in zip(results, output_core_dims): + _update_dim_sizes(dim_sizes, result, core_dims) + + outputs = _create_arrays(broadcast_shape, dim_sizes, + output_core_dims, otypes, results) + + for output, result in zip(outputs, results): + output[index] = result + + if outputs is None: + # did not call the function even once + if otypes is None: + raise ValueError('cannot call `vectorize` on size 0 inputs ' + 'unless `otypes` is set') + if builtins.any(dim not in dim_sizes + for dims in output_core_dims + for dim in dims): + raise ValueError('cannot call `vectorize` with a signature ' + 'including new output dimensions on size 0 ' + 'inputs') + outputs = _create_arrays(broadcast_shape, dim_sizes, + output_core_dims, otypes) + + return outputs[0] if nout == 1 else outputs + + +def _cov_dispatcher(m, y=None, rowvar=None, bias=None, ddof=None, + fweights=None, aweights=None, *, dtype=None): + return (m, y, fweights, aweights) + + +@array_function_dispatch(_cov_dispatcher) +def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None, + aweights=None, *, dtype=None): + """ + Estimate a covariance matrix, given data and weights. + + Covariance indicates the level to which two variables vary together. + If we examine N-dimensional samples, :math:`X = [x_1, x_2, ... x_N]^T`, + then the covariance matrix element :math:`C_{ij}` is the covariance of + :math:`x_i` and :math:`x_j`. The element :math:`C_{ii}` is the variance + of :math:`x_i`. + + See the notes for an outline of the algorithm. + + Parameters + ---------- + m : array_like + A 1-D or 2-D array containing multiple variables and observations. + Each row of `m` represents a variable, and each column a single + observation of all those variables. Also see `rowvar` below. + y : array_like, optional + An additional set of variables and observations. `y` has the same form + as that of `m`. + rowvar : bool, optional + If `rowvar` is True (default), then each row represents a + variable, with observations in the columns. Otherwise, the relationship + is transposed: each column represents a variable, while the rows + contain observations. + bias : bool, optional + Default normalization (False) is by ``(N - 1)``, where ``N`` is the + number of observations given (unbiased estimate). If `bias` is True, + then normalization is by ``N``. These values can be overridden by using + the keyword ``ddof`` in numpy versions >= 1.5. + ddof : int, optional + If not ``None`` the default value implied by `bias` is overridden. + Note that ``ddof=1`` will return the unbiased estimate, even if both + `fweights` and `aweights` are specified, and ``ddof=0`` will return + the simple average. See the notes for the details. The default value + is ``None``. + fweights : array_like, int, optional + 1-D array of integer frequency weights; the number of times each + observation vector should be repeated. + aweights : array_like, optional + 1-D array of observation vector weights. These relative weights are + typically large for observations considered "important" and smaller for + observations considered less "important". If ``ddof=0`` the array of + weights can be used to assign probabilities to observation vectors. + dtype : data-type, optional + Data-type of the result. By default, the return data-type will have + at least `numpy.float64` precision. + + .. versionadded:: 1.20 + + Returns + ------- + out : ndarray + The covariance matrix of the variables. + + See Also + -------- + corrcoef : Normalized covariance matrix + + Notes + ----- + Assume that the observations are in the columns of the observation + array `m` and let ``f = fweights`` and ``a = aweights`` for brevity. The + steps to compute the weighted covariance are as follows:: + + >>> m = np.arange(10, dtype=np.float64) + >>> f = np.arange(10) * 2 + >>> a = np.arange(10) ** 2. + >>> ddof = 1 + >>> w = f * a + >>> v1 = np.sum(w) + >>> v2 = np.sum(w * a) + >>> m -= np.sum(m * w, axis=None, keepdims=True) / v1 + >>> cov = np.dot(m * w, m.T) * v1 / (v1**2 - ddof * v2) + + Note that when ``a == 1``, the normalization factor + ``v1 / (v1**2 - ddof * v2)`` goes over to ``1 / (np.sum(f) - ddof)`` + as it should. + + Examples + -------- + >>> import numpy as np + + Consider two variables, :math:`x_0` and :math:`x_1`, which + correlate perfectly, but in opposite directions: + + >>> x = np.array([[0, 2], [1, 1], [2, 0]]).T + >>> x + array([[0, 1, 2], + [2, 1, 0]]) + + Note how :math:`x_0` increases while :math:`x_1` decreases. The covariance + matrix shows this clearly: + + >>> np.cov(x) + array([[ 1., -1.], + [-1., 1.]]) + + Note that element :math:`C_{0,1}`, which shows the correlation between + :math:`x_0` and :math:`x_1`, is negative. + + Further, note how `x` and `y` are combined: + + >>> x = [-2.1, -1, 4.3] + >>> y = [3, 1.1, 0.12] + >>> X = np.stack((x, y), axis=0) + >>> np.cov(X) + array([[11.71 , -4.286 ], # may vary + [-4.286 , 2.144133]]) + >>> np.cov(x, y) + array([[11.71 , -4.286 ], # may vary + [-4.286 , 2.144133]]) + >>> np.cov(x) + array(11.71) + + """ + # Check inputs + if ddof is not None and ddof != int(ddof): + raise ValueError( + "ddof must be integer") + + # Handles complex arrays too + m = np.asarray(m) + if m.ndim > 2: + raise ValueError("m has more than 2 dimensions") + + if y is not None: + y = np.asarray(y) + if y.ndim > 2: + raise ValueError("y has more than 2 dimensions") + + if dtype is None: + if y is None: + dtype = np.result_type(m, np.float64) + else: + dtype = np.result_type(m, y, np.float64) + + X = array(m, ndmin=2, dtype=dtype) + if not rowvar and m.ndim != 1: + X = X.T + if X.shape[0] == 0: + return np.array([]).reshape(0, 0) + if y is not None: + y = array(y, copy=None, ndmin=2, dtype=dtype) + if not rowvar and y.shape[0] != 1: + y = y.T + X = np.concatenate((X, y), axis=0) + + if ddof is None: + if bias == 0: + ddof = 1 + else: + ddof = 0 + + # Get the product of frequencies and weights + w = None + if fweights is not None: + fweights = np.asarray(fweights, dtype=float) + if not np.all(fweights == np.around(fweights)): + raise TypeError( + "fweights must be integer") + if fweights.ndim > 1: + raise RuntimeError( + "cannot handle multidimensional fweights") + if fweights.shape[0] != X.shape[1]: + raise RuntimeError( + "incompatible numbers of samples and fweights") + if any(fweights < 0): + raise ValueError( + "fweights cannot be negative") + w = fweights + if aweights is not None: + aweights = np.asarray(aweights, dtype=float) + if aweights.ndim > 1: + raise RuntimeError( + "cannot handle multidimensional aweights") + if aweights.shape[0] != X.shape[1]: + raise RuntimeError( + "incompatible numbers of samples and aweights") + if any(aweights < 0): + raise ValueError( + "aweights cannot be negative") + if w is None: + w = aweights + else: + w *= aweights + + avg, w_sum = average(X, axis=1, weights=w, returned=True) + w_sum = w_sum[0] + + # Determine the normalization + if w is None: + fact = X.shape[1] - ddof + elif ddof == 0: + fact = w_sum + elif aweights is None: + fact = w_sum - ddof + else: + fact = w_sum - ddof * sum(w * aweights) / w_sum + + if fact <= 0: + warnings.warn("Degrees of freedom <= 0 for slice", + RuntimeWarning, stacklevel=2) + fact = 0.0 + + X -= avg[:, None] + if w is None: + X_T = X.T + else: + X_T = (X * w).T + c = dot(X, X_T.conj()) + c *= np.true_divide(1, fact) + return c.squeeze() + + +def _corrcoef_dispatcher(x, y=None, rowvar=None, bias=None, ddof=None, *, + dtype=None): + return (x, y) + + +@array_function_dispatch(_corrcoef_dispatcher) +def corrcoef(x, y=None, rowvar=True, bias=np._NoValue, ddof=np._NoValue, *, + dtype=None): + """ + Return Pearson product-moment correlation coefficients. + + Please refer to the documentation for `cov` for more detail. The + relationship between the correlation coefficient matrix, `R`, and the + covariance matrix, `C`, is + + .. math:: R_{ij} = \\frac{ C_{ij} } { \\sqrt{ C_{ii} C_{jj} } } + + The values of `R` are between -1 and 1, inclusive. + + Parameters + ---------- + x : array_like + A 1-D or 2-D array containing multiple variables and observations. + Each row of `x` represents a variable, and each column a single + observation of all those variables. Also see `rowvar` below. + y : array_like, optional + An additional set of variables and observations. `y` has the same + shape as `x`. + rowvar : bool, optional + If `rowvar` is True (default), then each row represents a + variable, with observations in the columns. Otherwise, the relationship + is transposed: each column represents a variable, while the rows + contain observations. + bias : _NoValue, optional + Has no effect, do not use. + + .. deprecated:: 1.10.0 + ddof : _NoValue, optional + Has no effect, do not use. + + .. deprecated:: 1.10.0 + dtype : data-type, optional + Data-type of the result. By default, the return data-type will have + at least `numpy.float64` precision. + + .. versionadded:: 1.20 + + Returns + ------- + R : ndarray + The correlation coefficient matrix of the variables. + + See Also + -------- + cov : Covariance matrix + + Notes + ----- + Due to floating point rounding the resulting array may not be Hermitian, + the diagonal elements may not be 1, and the elements may not satisfy the + inequality abs(a) <= 1. The real and imaginary parts are clipped to the + interval [-1, 1] in an attempt to improve on that situation but is not + much help in the complex case. + + This function accepts but discards arguments `bias` and `ddof`. This is + for backwards compatibility with previous versions of this function. These + arguments had no effect on the return values of the function and can be + safely ignored in this and previous versions of numpy. + + Examples + -------- + >>> import numpy as np + + In this example we generate two random arrays, ``xarr`` and ``yarr``, and + compute the row-wise and column-wise Pearson correlation coefficients, + ``R``. Since ``rowvar`` is true by default, we first find the row-wise + Pearson correlation coefficients between the variables of ``xarr``. + + >>> import numpy as np + >>> rng = np.random.default_rng(seed=42) + >>> xarr = rng.random((3, 3)) + >>> xarr + array([[0.77395605, 0.43887844, 0.85859792], + [0.69736803, 0.09417735, 0.97562235], + [0.7611397 , 0.78606431, 0.12811363]]) + >>> R1 = np.corrcoef(xarr) + >>> R1 + array([[ 1. , 0.99256089, -0.68080986], + [ 0.99256089, 1. , -0.76492172], + [-0.68080986, -0.76492172, 1. ]]) + + If we add another set of variables and observations ``yarr``, we can + compute the row-wise Pearson correlation coefficients between the + variables in ``xarr`` and ``yarr``. + + >>> yarr = rng.random((3, 3)) + >>> yarr + array([[0.45038594, 0.37079802, 0.92676499], + [0.64386512, 0.82276161, 0.4434142 ], + [0.22723872, 0.55458479, 0.06381726]]) + >>> R2 = np.corrcoef(xarr, yarr) + >>> R2 + array([[ 1. , 0.99256089, -0.68080986, 0.75008178, -0.934284 , + -0.99004057], + [ 0.99256089, 1. , -0.76492172, 0.82502011, -0.97074098, + -0.99981569], + [-0.68080986, -0.76492172, 1. , -0.99507202, 0.89721355, + 0.77714685], + [ 0.75008178, 0.82502011, -0.99507202, 1. , -0.93657855, + -0.83571711], + [-0.934284 , -0.97074098, 0.89721355, -0.93657855, 1. , + 0.97517215], + [-0.99004057, -0.99981569, 0.77714685, -0.83571711, 0.97517215, + 1. ]]) + + Finally if we use the option ``rowvar=False``, the columns are now + being treated as the variables and we will find the column-wise Pearson + correlation coefficients between variables in ``xarr`` and ``yarr``. + + >>> R3 = np.corrcoef(xarr, yarr, rowvar=False) + >>> R3 + array([[ 1. , 0.77598074, -0.47458546, -0.75078643, -0.9665554 , + 0.22423734], + [ 0.77598074, 1. , -0.92346708, -0.99923895, -0.58826587, + -0.44069024], + [-0.47458546, -0.92346708, 1. , 0.93773029, 0.23297648, + 0.75137473], + [-0.75078643, -0.99923895, 0.93773029, 1. , 0.55627469, + 0.47536961], + [-0.9665554 , -0.58826587, 0.23297648, 0.55627469, 1. , + -0.46666491], + [ 0.22423734, -0.44069024, 0.75137473, 0.47536961, -0.46666491, + 1. ]]) + + """ + if bias is not np._NoValue or ddof is not np._NoValue: + # 2015-03-15, 1.10 + warnings.warn('bias and ddof have no effect and are deprecated', + DeprecationWarning, stacklevel=2) + c = cov(x, y, rowvar, dtype=dtype) + try: + d = diag(c) + except ValueError: + # scalar covariance + # nan if incorrect value (nan, inf, 0), 1 otherwise + return c / c + stddev = sqrt(d.real) + c /= stddev[:, None] + c /= stddev[None, :] + + # Clip real and imaginary parts to [-1, 1]. This does not guarantee + # abs(a[i,j]) <= 1 for complex arrays, but is the best we can do without + # excessive work. + np.clip(c.real, -1, 1, out=c.real) + if np.iscomplexobj(c): + np.clip(c.imag, -1, 1, out=c.imag) + + return c + + +@set_module('numpy') +def blackman(M): + """ + Return the Blackman window. + + The Blackman window is a taper formed by using the first three + terms of a summation of cosines. It was designed to have close to the + minimal leakage possible. It is close to optimal, only slightly worse + than a Kaiser window. + + Parameters + ---------- + M : int + Number of points in the output window. If zero or less, an empty + array is returned. + + Returns + ------- + out : ndarray + The window, with the maximum value normalized to one (the value one + appears only if the number of samples is odd). + + See Also + -------- + bartlett, hamming, hanning, kaiser + + Notes + ----- + The Blackman window is defined as + + .. math:: w(n) = 0.42 - 0.5 \\cos(2\\pi n/M) + 0.08 \\cos(4\\pi n/M) + + Most references to the Blackman window come from the signal processing + literature, where it is used as one of many windowing functions for + smoothing values. It is also known as an apodization (which means + "removing the foot", i.e. smoothing discontinuities at the beginning + and end of the sampled signal) or tapering function. It is known as a + "near optimal" tapering function, almost as good (by some measures) + as the kaiser window. + + References + ---------- + Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra, + Dover Publications, New York. + + Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing. + Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471. + + Examples + -------- + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> np.blackman(12) + array([-1.38777878e-17, 3.26064346e-02, 1.59903635e-01, # may vary + 4.14397981e-01, 7.36045180e-01, 9.67046769e-01, + 9.67046769e-01, 7.36045180e-01, 4.14397981e-01, + 1.59903635e-01, 3.26064346e-02, -1.38777878e-17]) + + Plot the window and the frequency response. + + .. plot:: + :include-source: + + import matplotlib.pyplot as plt + from numpy.fft import fft, fftshift + window = np.blackman(51) + plt.plot(window) + plt.title("Blackman window") + plt.ylabel("Amplitude") + plt.xlabel("Sample") + plt.show() # doctest: +SKIP + + plt.figure() + A = fft(window, 2048) / 25.5 + mag = np.abs(fftshift(A)) + freq = np.linspace(-0.5, 0.5, len(A)) + with np.errstate(divide='ignore', invalid='ignore'): + response = 20 * np.log10(mag) + response = np.clip(response, -100, 100) + plt.plot(freq, response) + plt.title("Frequency response of Blackman window") + plt.ylabel("Magnitude [dB]") + plt.xlabel("Normalized frequency [cycles per sample]") + plt.axis('tight') + plt.show() + + """ + # Ensures at least float64 via 0.0. M should be an integer, but conversion + # to double is safe for a range. + values = np.array([0.0, M]) + M = values[1] + + if M < 1: + return array([], dtype=values.dtype) + if M == 1: + return ones(1, dtype=values.dtype) + n = arange(1 - M, M, 2) + return 0.42 + 0.5 * cos(pi * n / (M - 1)) + 0.08 * cos(2.0 * pi * n / (M - 1)) + + +@set_module('numpy') +def bartlett(M): + """ + Return the Bartlett window. + + The Bartlett window is very similar to a triangular window, except + that the end points are at zero. It is often used in signal + processing for tapering a signal, without generating too much + ripple in the frequency domain. + + Parameters + ---------- + M : int + Number of points in the output window. If zero or less, an + empty array is returned. + + Returns + ------- + out : array + The triangular window, with the maximum value normalized to one + (the value one appears only if the number of samples is odd), with + the first and last samples equal to zero. + + See Also + -------- + blackman, hamming, hanning, kaiser + + Notes + ----- + The Bartlett window is defined as + + .. math:: w(n) = \\frac{2}{M-1} \\left( + \\frac{M-1}{2} - \\left|n - \\frac{M-1}{2}\\right| + \\right) + + Most references to the Bartlett window come from the signal processing + literature, where it is used as one of many windowing functions for + smoothing values. Note that convolution with this window produces linear + interpolation. It is also known as an apodization (which means "removing + the foot", i.e. smoothing discontinuities at the beginning and end of the + sampled signal) or tapering function. The Fourier transform of the + Bartlett window is the product of two sinc functions. Note the excellent + discussion in Kanasewich [2]_. + + References + ---------- + .. [1] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra", + Biometrika 37, 1-16, 1950. + .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", + The University of Alberta Press, 1975, pp. 109-110. + .. [3] A.V. Oppenheim and R.W. Schafer, "Discrete-Time Signal + Processing", Prentice-Hall, 1999, pp. 468-471. + .. [4] Wikipedia, "Window function", + https://en.wikipedia.org/wiki/Window_function + .. [5] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, + "Numerical Recipes", Cambridge University Press, 1986, page 429. + + Examples + -------- + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> np.bartlett(12) + array([ 0. , 0.18181818, 0.36363636, 0.54545455, 0.72727273, # may vary + 0.90909091, 0.90909091, 0.72727273, 0.54545455, 0.36363636, + 0.18181818, 0. ]) + + Plot the window and its frequency response (requires SciPy and matplotlib). + + .. plot:: + :include-source: + + import matplotlib.pyplot as plt + from numpy.fft import fft, fftshift + window = np.bartlett(51) + plt.plot(window) + plt.title("Bartlett window") + plt.ylabel("Amplitude") + plt.xlabel("Sample") + plt.show() + plt.figure() + A = fft(window, 2048) / 25.5 + mag = np.abs(fftshift(A)) + freq = np.linspace(-0.5, 0.5, len(A)) + with np.errstate(divide='ignore', invalid='ignore'): + response = 20 * np.log10(mag) + response = np.clip(response, -100, 100) + plt.plot(freq, response) + plt.title("Frequency response of Bartlett window") + plt.ylabel("Magnitude [dB]") + plt.xlabel("Normalized frequency [cycles per sample]") + plt.axis('tight') + plt.show() + + """ + # Ensures at least float64 via 0.0. M should be an integer, but conversion + # to double is safe for a range. + values = np.array([0.0, M]) + M = values[1] + + if M < 1: + return array([], dtype=values.dtype) + if M == 1: + return ones(1, dtype=values.dtype) + n = arange(1 - M, M, 2) + return where(less_equal(n, 0), 1 + n / (M - 1), 1 - n / (M - 1)) + + +@set_module('numpy') +def hanning(M): + """ + Return the Hanning window. + + The Hanning window is a taper formed by using a weighted cosine. + + Parameters + ---------- + M : int + Number of points in the output window. If zero or less, an + empty array is returned. + + Returns + ------- + out : ndarray, shape(M,) + The window, with the maximum value normalized to one (the value + one appears only if `M` is odd). + + See Also + -------- + bartlett, blackman, hamming, kaiser + + Notes + ----- + The Hanning window is defined as + + .. math:: w(n) = 0.5 - 0.5\\cos\\left(\\frac{2\\pi{n}}{M-1}\\right) + \\qquad 0 \\leq n \\leq M-1 + + The Hanning was named for Julius von Hann, an Austrian meteorologist. + It is also known as the Cosine Bell. Some authors prefer that it be + called a Hann window, to help avoid confusion with the very similar + Hamming window. + + Most references to the Hanning window come from the signal processing + literature, where it is used as one of many windowing functions for + smoothing values. It is also known as an apodization (which means + "removing the foot", i.e. smoothing discontinuities at the beginning + and end of the sampled signal) or tapering function. + + References + ---------- + .. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power + spectra, Dover Publications, New York. + .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", + The University of Alberta Press, 1975, pp. 106-108. + .. [3] Wikipedia, "Window function", + https://en.wikipedia.org/wiki/Window_function + .. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, + "Numerical Recipes", Cambridge University Press, 1986, page 425. + + Examples + -------- + >>> import numpy as np + >>> np.hanning(12) + array([0. , 0.07937323, 0.29229249, 0.57115742, 0.82743037, + 0.97974649, 0.97974649, 0.82743037, 0.57115742, 0.29229249, + 0.07937323, 0. ]) + + Plot the window and its frequency response. + + .. plot:: + :include-source: + + import matplotlib.pyplot as plt + from numpy.fft import fft, fftshift + window = np.hanning(51) + plt.plot(window) + plt.title("Hann window") + plt.ylabel("Amplitude") + plt.xlabel("Sample") + plt.show() + + plt.figure() + A = fft(window, 2048) / 25.5 + mag = np.abs(fftshift(A)) + freq = np.linspace(-0.5, 0.5, len(A)) + with np.errstate(divide='ignore', invalid='ignore'): + response = 20 * np.log10(mag) + response = np.clip(response, -100, 100) + plt.plot(freq, response) + plt.title("Frequency response of the Hann window") + plt.ylabel("Magnitude [dB]") + plt.xlabel("Normalized frequency [cycles per sample]") + plt.axis('tight') + plt.show() + + """ + # Ensures at least float64 via 0.0. M should be an integer, but conversion + # to double is safe for a range. + values = np.array([0.0, M]) + M = values[1] + + if M < 1: + return array([], dtype=values.dtype) + if M == 1: + return ones(1, dtype=values.dtype) + n = arange(1 - M, M, 2) + return 0.5 + 0.5 * cos(pi * n / (M - 1)) + + +@set_module('numpy') +def hamming(M): + """ + Return the Hamming window. + + The Hamming window is a taper formed by using a weighted cosine. + + Parameters + ---------- + M : int + Number of points in the output window. If zero or less, an + empty array is returned. + + Returns + ------- + out : ndarray + The window, with the maximum value normalized to one (the value + one appears only if the number of samples is odd). + + See Also + -------- + bartlett, blackman, hanning, kaiser + + Notes + ----- + The Hamming window is defined as + + .. math:: w(n) = 0.54 - 0.46\\cos\\left(\\frac{2\\pi{n}}{M-1}\\right) + \\qquad 0 \\leq n \\leq M-1 + + The Hamming was named for R. W. Hamming, an associate of J. W. Tukey + and is described in Blackman and Tukey. It was recommended for + smoothing the truncated autocovariance function in the time domain. + Most references to the Hamming window come from the signal processing + literature, where it is used as one of many windowing functions for + smoothing values. It is also known as an apodization (which means + "removing the foot", i.e. smoothing discontinuities at the beginning + and end of the sampled signal) or tapering function. + + References + ---------- + .. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power + spectra, Dover Publications, New York. + .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The + University of Alberta Press, 1975, pp. 109-110. + .. [3] Wikipedia, "Window function", + https://en.wikipedia.org/wiki/Window_function + .. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, + "Numerical Recipes", Cambridge University Press, 1986, page 425. + + Examples + -------- + >>> import numpy as np + >>> np.hamming(12) + array([ 0.08 , 0.15302337, 0.34890909, 0.60546483, 0.84123594, # may vary + 0.98136677, 0.98136677, 0.84123594, 0.60546483, 0.34890909, + 0.15302337, 0.08 ]) + + Plot the window and the frequency response. + + .. plot:: + :include-source: + + import matplotlib.pyplot as plt + from numpy.fft import fft, fftshift + window = np.hamming(51) + plt.plot(window) + plt.title("Hamming window") + plt.ylabel("Amplitude") + plt.xlabel("Sample") + plt.show() + + plt.figure() + A = fft(window, 2048) / 25.5 + mag = np.abs(fftshift(A)) + freq = np.linspace(-0.5, 0.5, len(A)) + response = 20 * np.log10(mag) + response = np.clip(response, -100, 100) + plt.plot(freq, response) + plt.title("Frequency response of Hamming window") + plt.ylabel("Magnitude [dB]") + plt.xlabel("Normalized frequency [cycles per sample]") + plt.axis('tight') + plt.show() + + """ + # Ensures at least float64 via 0.0. M should be an integer, but conversion + # to double is safe for a range. + values = np.array([0.0, M]) + M = values[1] + + if M < 1: + return array([], dtype=values.dtype) + if M == 1: + return ones(1, dtype=values.dtype) + n = arange(1 - M, M, 2) + return 0.54 + 0.46 * cos(pi * n / (M - 1)) + + +## Code from cephes for i0 + +_i0A = [ + -4.41534164647933937950E-18, + 3.33079451882223809783E-17, + -2.43127984654795469359E-16, + 1.71539128555513303061E-15, + -1.16853328779934516808E-14, + 7.67618549860493561688E-14, + -4.85644678311192946090E-13, + 2.95505266312963983461E-12, + -1.72682629144155570723E-11, + 9.67580903537323691224E-11, + -5.18979560163526290666E-10, + 2.65982372468238665035E-9, + -1.30002500998624804212E-8, + 6.04699502254191894932E-8, + -2.67079385394061173391E-7, + 1.11738753912010371815E-6, + -4.41673835845875056359E-6, + 1.64484480707288970893E-5, + -5.75419501008210370398E-5, + 1.88502885095841655729E-4, + -5.76375574538582365885E-4, + 1.63947561694133579842E-3, + -4.32430999505057594430E-3, + 1.05464603945949983183E-2, + -2.37374148058994688156E-2, + 4.93052842396707084878E-2, + -9.49010970480476444210E-2, + 1.71620901522208775349E-1, + -3.04682672343198398683E-1, + 6.76795274409476084995E-1 + ] + +_i0B = [ + -7.23318048787475395456E-18, + -4.83050448594418207126E-18, + 4.46562142029675999901E-17, + 3.46122286769746109310E-17, + -2.82762398051658348494E-16, + -3.42548561967721913462E-16, + 1.77256013305652638360E-15, + 3.81168066935262242075E-15, + -9.55484669882830764870E-15, + -4.15056934728722208663E-14, + 1.54008621752140982691E-14, + 3.85277838274214270114E-13, + 7.18012445138366623367E-13, + -1.79417853150680611778E-12, + -1.32158118404477131188E-11, + -3.14991652796324136454E-11, + 1.18891471078464383424E-11, + 4.94060238822496958910E-10, + 3.39623202570838634515E-9, + 2.26666899049817806459E-8, + 2.04891858946906374183E-7, + 2.89137052083475648297E-6, + 6.88975834691682398426E-5, + 3.36911647825569408990E-3, + 8.04490411014108831608E-1 + ] + + +def _chbevl(x, vals): + b0 = vals[0] + b1 = 0.0 + + for i in range(1, len(vals)): + b2 = b1 + b1 = b0 + b0 = x * b1 - b2 + vals[i] + + return 0.5 * (b0 - b2) + + +def _i0_1(x): + return exp(x) * _chbevl(x / 2.0 - 2, _i0A) + + +def _i0_2(x): + return exp(x) * _chbevl(32.0 / x - 2.0, _i0B) / sqrt(x) + + +def _i0_dispatcher(x): + return (x,) + + +@array_function_dispatch(_i0_dispatcher) +def i0(x): + """ + Modified Bessel function of the first kind, order 0. + + Usually denoted :math:`I_0`. + + Parameters + ---------- + x : array_like of float + Argument of the Bessel function. + + Returns + ------- + out : ndarray, shape = x.shape, dtype = float + The modified Bessel function evaluated at each of the elements of `x`. + + See Also + -------- + scipy.special.i0, scipy.special.iv, scipy.special.ive + + Notes + ----- + The scipy implementation is recommended over this function: it is a + proper ufunc written in C, and more than an order of magnitude faster. + + We use the algorithm published by Clenshaw [1]_ and referenced by + Abramowitz and Stegun [2]_, for which the function domain is + partitioned into the two intervals [0,8] and (8,inf), and Chebyshev + polynomial expansions are employed in each interval. Relative error on + the domain [0,30] using IEEE arithmetic is documented [3]_ as having a + peak of 5.8e-16 with an rms of 1.4e-16 (n = 30000). + + References + ---------- + .. [1] C. W. Clenshaw, "Chebyshev series for mathematical functions", in + *National Physical Laboratory Mathematical Tables*, vol. 5, London: + Her Majesty's Stationery Office, 1962. + .. [2] M. Abramowitz and I. A. Stegun, *Handbook of Mathematical + Functions*, 10th printing, New York: Dover, 1964, pp. 379. + https://personal.math.ubc.ca/~cbm/aands/page_379.htm + .. [3] https://metacpan.org/pod/distribution/Math-Cephes/lib/Math/Cephes.pod#i0:-Modified-Bessel-function-of-order-zero + + Examples + -------- + >>> import numpy as np + >>> np.i0(0.) + array(1.0) + >>> np.i0([0, 1, 2, 3]) + array([1. , 1.26606588, 2.2795853 , 4.88079259]) + + """ + x = np.asanyarray(x) + if x.dtype.kind == 'c': + raise TypeError("i0 not supported for complex values") + if x.dtype.kind != 'f': + x = x.astype(float) + x = np.abs(x) + return piecewise(x, [x <= 8.0], [_i0_1, _i0_2]) + +## End of cephes code for i0 + + +@set_module('numpy') +def kaiser(M, beta): + """ + Return the Kaiser window. + + The Kaiser window is a taper formed by using a Bessel function. + + Parameters + ---------- + M : int + Number of points in the output window. If zero or less, an + empty array is returned. + beta : float + Shape parameter for window. + + Returns + ------- + out : array + The window, with the maximum value normalized to one (the value + one appears only if the number of samples is odd). + + See Also + -------- + bartlett, blackman, hamming, hanning + + Notes + ----- + The Kaiser window is defined as + + .. math:: w(n) = I_0\\left( \\beta \\sqrt{1-\\frac{4n^2}{(M-1)^2}} + \\right)/I_0(\\beta) + + with + + .. math:: \\quad -\\frac{M-1}{2} \\leq n \\leq \\frac{M-1}{2}, + + where :math:`I_0` is the modified zeroth-order Bessel function. + + The Kaiser was named for Jim Kaiser, who discovered a simple + approximation to the DPSS window based on Bessel functions. The Kaiser + window is a very good approximation to the Digital Prolate Spheroidal + Sequence, or Slepian window, which is the transform which maximizes the + energy in the main lobe of the window relative to total energy. + + The Kaiser can approximate many other windows by varying the beta + parameter. + + ==== ======================= + beta Window shape + ==== ======================= + 0 Rectangular + 5 Similar to a Hamming + 6 Similar to a Hanning + 8.6 Similar to a Blackman + ==== ======================= + + A beta value of 14 is probably a good starting point. Note that as beta + gets large, the window narrows, and so the number of samples needs to be + large enough to sample the increasingly narrow spike, otherwise NaNs will + get returned. + + Most references to the Kaiser window come from the signal processing + literature, where it is used as one of many windowing functions for + smoothing values. It is also known as an apodization (which means + "removing the foot", i.e. smoothing discontinuities at the beginning + and end of the sampled signal) or tapering function. + + References + ---------- + .. [1] J. F. Kaiser, "Digital Filters" - Ch 7 in "Systems analysis by + digital computer", Editors: F.F. Kuo and J.F. Kaiser, p 218-285. + John Wiley and Sons, New York, (1966). + .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The + University of Alberta Press, 1975, pp. 177-178. + .. [3] Wikipedia, "Window function", + https://en.wikipedia.org/wiki/Window_function + + Examples + -------- + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> np.kaiser(12, 14) + array([7.72686684e-06, 3.46009194e-03, 4.65200189e-02, # may vary + 2.29737120e-01, 5.99885316e-01, 9.45674898e-01, + 9.45674898e-01, 5.99885316e-01, 2.29737120e-01, + 4.65200189e-02, 3.46009194e-03, 7.72686684e-06]) + + + Plot the window and the frequency response. + + .. plot:: + :include-source: + + import matplotlib.pyplot as plt + from numpy.fft import fft, fftshift + window = np.kaiser(51, 14) + plt.plot(window) + plt.title("Kaiser window") + plt.ylabel("Amplitude") + plt.xlabel("Sample") + plt.show() + + plt.figure() + A = fft(window, 2048) / 25.5 + mag = np.abs(fftshift(A)) + freq = np.linspace(-0.5, 0.5, len(A)) + response = 20 * np.log10(mag) + response = np.clip(response, -100, 100) + plt.plot(freq, response) + plt.title("Frequency response of Kaiser window") + plt.ylabel("Magnitude [dB]") + plt.xlabel("Normalized frequency [cycles per sample]") + plt.axis('tight') + plt.show() + + """ + # Ensures at least float64 via 0.0. M should be an integer, but conversion + # to double is safe for a range. (Simplified result_type with 0.0 + # strongly typed. result-type is not/less order sensitive, but that mainly + # matters for integers anyway.) + values = np.array([0.0, M, beta]) + M = values[1] + beta = values[2] + + if M == 1: + return np.ones(1, dtype=values.dtype) + n = arange(0, M) + alpha = (M - 1) / 2.0 + return i0(beta * sqrt(1 - ((n - alpha) / alpha)**2.0)) / i0(beta) + + +def _sinc_dispatcher(x): + return (x,) + + +@array_function_dispatch(_sinc_dispatcher) +def sinc(x): + r""" + Return the normalized sinc function. + + The sinc function is equal to :math:`\sin(\pi x)/(\pi x)` for any argument + :math:`x\ne 0`. ``sinc(0)`` takes the limit value 1, making ``sinc`` not + only everywhere continuous but also infinitely differentiable. + + .. note:: + + Note the normalization factor of ``pi`` used in the definition. + This is the most commonly used definition in signal processing. + Use ``sinc(x / np.pi)`` to obtain the unnormalized sinc function + :math:`\sin(x)/x` that is more common in mathematics. + + Parameters + ---------- + x : ndarray + Array (possibly multi-dimensional) of values for which to calculate + ``sinc(x)``. + + Returns + ------- + out : ndarray + ``sinc(x)``, which has the same shape as the input. + + Notes + ----- + The name sinc is short for "sine cardinal" or "sinus cardinalis". + + The sinc function is used in various signal processing applications, + including in anti-aliasing, in the construction of a Lanczos resampling + filter, and in interpolation. + + For bandlimited interpolation of discrete-time signals, the ideal + interpolation kernel is proportional to the sinc function. + + References + ---------- + .. [1] Weisstein, Eric W. "Sinc Function." From MathWorld--A Wolfram Web + Resource. https://mathworld.wolfram.com/SincFunction.html + .. [2] Wikipedia, "Sinc function", + https://en.wikipedia.org/wiki/Sinc_function + + Examples + -------- + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> x = np.linspace(-4, 4, 41) + >>> np.sinc(x) + array([-3.89804309e-17, -4.92362781e-02, -8.40918587e-02, # may vary + -8.90384387e-02, -5.84680802e-02, 3.89804309e-17, + 6.68206631e-02, 1.16434881e-01, 1.26137788e-01, + 8.50444803e-02, -3.89804309e-17, -1.03943254e-01, + -1.89206682e-01, -2.16236208e-01, -1.55914881e-01, + 3.89804309e-17, 2.33872321e-01, 5.04551152e-01, + 7.56826729e-01, 9.35489284e-01, 1.00000000e+00, + 9.35489284e-01, 7.56826729e-01, 5.04551152e-01, + 2.33872321e-01, 3.89804309e-17, -1.55914881e-01, + -2.16236208e-01, -1.89206682e-01, -1.03943254e-01, + -3.89804309e-17, 8.50444803e-02, 1.26137788e-01, + 1.16434881e-01, 6.68206631e-02, 3.89804309e-17, + -5.84680802e-02, -8.90384387e-02, -8.40918587e-02, + -4.92362781e-02, -3.89804309e-17]) + + >>> plt.plot(x, np.sinc(x)) + [] + >>> plt.title("Sinc Function") + Text(0.5, 1.0, 'Sinc Function') + >>> plt.ylabel("Amplitude") + Text(0, 0.5, 'Amplitude') + >>> plt.xlabel("X") + Text(0.5, 0, 'X') + >>> plt.show() + + """ + x = np.asanyarray(x) + x = pi * x + # Hope that 1e-20 is sufficient for objects... + eps = np.finfo(x.dtype).eps if x.dtype.kind == "f" else 1e-20 + y = where(x, x, eps) + return sin(y) / y + + +def _ureduce(a, func, keepdims=False, **kwargs): + """ + Internal Function. + Call `func` with `a` as first argument swapping the axes to use extended + axis on functions that don't support it natively. + + Returns result and a.shape with axis dims set to 1. + + Parameters + ---------- + a : array_like + Input array or object that can be converted to an array. + func : callable + Reduction function capable of receiving a single axis argument. + It is called with `a` as first argument followed by `kwargs`. + kwargs : keyword arguments + additional keyword arguments to pass to `func`. + + Returns + ------- + result : tuple + Result of func(a, **kwargs) and a.shape with axis dims set to 1 + which can be used to reshape the result to the same shape a ufunc with + keepdims=True would produce. + + """ + a = np.asanyarray(a) + axis = kwargs.get('axis') + out = kwargs.get('out') + + if keepdims is np._NoValue: + keepdims = False + + nd = a.ndim + if axis is not None: + axis = _nx.normalize_axis_tuple(axis, nd) + + if keepdims and out is not None: + index_out = tuple( + 0 if i in axis else slice(None) for i in range(nd)) + kwargs['out'] = out[(Ellipsis, ) + index_out] + + if len(axis) == 1: + kwargs['axis'] = axis[0] + else: + keep = set(range(nd)) - set(axis) + nkeep = len(keep) + # swap axis that should not be reduced to front + for i, s in enumerate(sorted(keep)): + a = a.swapaxes(i, s) + # merge reduced axis + a = a.reshape(a.shape[:nkeep] + (-1,)) + kwargs['axis'] = -1 + elif keepdims and out is not None: + index_out = (0, ) * nd + kwargs['out'] = out[(Ellipsis, ) + index_out] + + r = func(a, **kwargs) + + if out is not None: + return out + + if keepdims: + if axis is None: + index_r = (np.newaxis, ) * nd + else: + index_r = tuple( + np.newaxis if i in axis else slice(None) + for i in range(nd)) + r = r[(Ellipsis, ) + index_r] + + return r + + +def _median_dispatcher( + a, axis=None, out=None, overwrite_input=None, keepdims=None): + return (a, out) + + +@array_function_dispatch(_median_dispatcher) +def median(a, axis=None, out=None, overwrite_input=False, keepdims=False): + """ + Compute the median along the specified axis. + + Returns the median of the array elements. + + Parameters + ---------- + a : array_like + Input array or object that can be converted to an array. + axis : {int, sequence of int, None}, optional + Axis or axes along which the medians are computed. The default, + axis=None, will compute the median along a flattened version of + the array. If a sequence of axes, the array is first flattened + along the given axes, then the median is computed along the + resulting flattened axis. + out : ndarray, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output, + but the type (of the output) will be cast if necessary. + overwrite_input : bool, optional + If True, then allow use of memory of input array `a` for + calculations. The input array will be modified by the call to + `median`. This will save memory when you do not need to preserve + the contents of the input array. Treat the input as undefined, + but it will probably be fully or partially sorted. Default is + False. If `overwrite_input` is ``True`` and `a` is not already an + `ndarray`, an error will be raised. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `arr`. + + Returns + ------- + median : ndarray + A new array holding the result. If the input contains integers + or floats smaller than ``float64``, then the output data-type is + ``np.float64``. Otherwise, the data-type of the output is the + same as that of the input. If `out` is specified, that array is + returned instead. + + See Also + -------- + mean, percentile + + Notes + ----- + Given a vector ``V`` of length ``N``, the median of ``V`` is the + middle value of a sorted copy of ``V``, ``V_sorted`` - i + e., ``V_sorted[(N-1)/2]``, when ``N`` is odd, and the average of the + two middle values of ``V_sorted`` when ``N`` is even. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[10, 7, 4], [3, 2, 1]]) + >>> a + array([[10, 7, 4], + [ 3, 2, 1]]) + >>> np.median(a) + np.float64(3.5) + >>> np.median(a, axis=0) + array([6.5, 4.5, 2.5]) + >>> np.median(a, axis=1) + array([7., 2.]) + >>> np.median(a, axis=(0, 1)) + np.float64(3.5) + >>> m = np.median(a, axis=0) + >>> out = np.zeros_like(m) + >>> np.median(a, axis=0, out=m) + array([6.5, 4.5, 2.5]) + >>> m + array([6.5, 4.5, 2.5]) + >>> b = a.copy() + >>> np.median(b, axis=1, overwrite_input=True) + array([7., 2.]) + >>> assert not np.all(a==b) + >>> b = a.copy() + >>> np.median(b, axis=None, overwrite_input=True) + np.float64(3.5) + >>> assert not np.all(a==b) + + """ + return _ureduce(a, func=_median, keepdims=keepdims, axis=axis, out=out, + overwrite_input=overwrite_input) + + +def _median(a, axis=None, out=None, overwrite_input=False): + # can't be reasonably be implemented in terms of percentile as we have to + # call mean to not break astropy + a = np.asanyarray(a) + + # Set the partition indexes + if axis is None: + sz = a.size + else: + sz = a.shape[axis] + if sz % 2 == 0: + szh = sz // 2 + kth = [szh - 1, szh] + else: + kth = [(sz - 1) // 2] + + # We have to check for NaNs (as of writing 'M' doesn't actually work). + supports_nans = np.issubdtype(a.dtype, np.inexact) or a.dtype.kind in 'Mm' + if supports_nans: + kth.append(-1) + + if overwrite_input: + if axis is None: + part = a.ravel() + part.partition(kth) + else: + a.partition(kth, axis=axis) + part = a + else: + part = partition(a, kth, axis=axis) + + if part.shape == (): + # make 0-D arrays work + return part.item() + if axis is None: + axis = 0 + + indexer = [slice(None)] * part.ndim + index = part.shape[axis] // 2 + if part.shape[axis] % 2 == 1: + # index with slice to allow mean (below) to work + indexer[axis] = slice(index, index + 1) + else: + indexer[axis] = slice(index - 1, index + 1) + indexer = tuple(indexer) + + # Use mean in both odd and even case to coerce data type, + # using out array if needed. + rout = mean(part[indexer], axis=axis, out=out) + if supports_nans and sz > 0: + # If nans are possible, warn and replace by nans like mean would. + rout = np.lib._utils_impl._median_nancheck(part, rout, axis) + + return rout + + +def _percentile_dispatcher(a, q, axis=None, out=None, overwrite_input=None, + method=None, keepdims=None, *, weights=None, + interpolation=None): + return (a, q, out, weights) + + +@array_function_dispatch(_percentile_dispatcher) +def percentile(a, + q, + axis=None, + out=None, + overwrite_input=False, + method="linear", + keepdims=False, + *, + weights=None, + interpolation=None): + """ + Compute the q-th percentile of the data along the specified axis. + + Returns the q-th percentile(s) of the array elements. + + Parameters + ---------- + a : array_like of real numbers + Input array or object that can be converted to an array. + q : array_like of float + Percentage or sequence of percentages for the percentiles to compute. + Values must be between 0 and 100 inclusive. + axis : {int, tuple of int, None}, optional + Axis or axes along which the percentiles are computed. The + default is to compute the percentile(s) along a flattened + version of the array. + out : ndarray, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output, + but the type (of the output) will be cast if necessary. + overwrite_input : bool, optional + If True, then allow the input array `a` to be modified by intermediate + calculations, to save memory. In this case, the contents of the input + `a` after this function completes is undefined. + method : str, optional + This parameter specifies the method to use for estimating the + percentile. There are many different methods, some unique to NumPy. + See the notes for explanation. The options sorted by their R type + as summarized in the H&F paper [1]_ are: + + 1. 'inverted_cdf' + 2. 'averaged_inverted_cdf' + 3. 'closest_observation' + 4. 'interpolated_inverted_cdf' + 5. 'hazen' + 6. 'weibull' + 7. 'linear' (default) + 8. 'median_unbiased' + 9. 'normal_unbiased' + + The first three methods are discontinuous. NumPy further defines the + following discontinuous variations of the default 'linear' (7.) option: + + * 'lower' + * 'higher', + * 'midpoint' + * 'nearest' + + .. versionchanged:: 1.22.0 + This argument was previously called "interpolation" and only + offered the "linear" default and last four options. + + keepdims : bool, optional + If this is set to True, the axes which are reduced are left in + the result as dimensions with size one. With this option, the + result will broadcast correctly against the original array `a`. + + weights : array_like, optional + An array of weights associated with the values in `a`. Each value in + `a` contributes to the percentile according to its associated weight. + The weights array can either be 1-D (in which case its length must be + the size of `a` along the given axis) or of the same shape as `a`. + If `weights=None`, then all data in `a` are assumed to have a + weight equal to one. + Only `method="inverted_cdf"` supports weights. + See the notes for more details. + + .. versionadded:: 2.0.0 + + interpolation : str, optional + Deprecated name for the method keyword argument. + + .. deprecated:: 1.22.0 + + Returns + ------- + percentile : scalar or ndarray + If `q` is a single percentile and `axis=None`, then the result + is a scalar. If multiple percentiles are given, first axis of + the result corresponds to the percentiles. The other axes are + the axes that remain after the reduction of `a`. If the input + contains integers or floats smaller than ``float64``, the output + data-type is ``float64``. Otherwise, the output data-type is the + same as that of the input. If `out` is specified, that array is + returned instead. + + See Also + -------- + mean + median : equivalent to ``percentile(..., 50)`` + nanpercentile + quantile : equivalent to percentile, except q in the range [0, 1]. + + Notes + ----- + The behavior of `numpy.percentile` with percentage `q` is + that of `numpy.quantile` with argument ``q/100``. + For more information, please see `numpy.quantile`. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[10, 7, 4], [3, 2, 1]]) + >>> a + array([[10, 7, 4], + [ 3, 2, 1]]) + >>> np.percentile(a, 50) + 3.5 + >>> np.percentile(a, 50, axis=0) + array([6.5, 4.5, 2.5]) + >>> np.percentile(a, 50, axis=1) + array([7., 2.]) + >>> np.percentile(a, 50, axis=1, keepdims=True) + array([[7.], + [2.]]) + + >>> m = np.percentile(a, 50, axis=0) + >>> out = np.zeros_like(m) + >>> np.percentile(a, 50, axis=0, out=out) + array([6.5, 4.5, 2.5]) + >>> m + array([6.5, 4.5, 2.5]) + + >>> b = a.copy() + >>> np.percentile(b, 50, axis=1, overwrite_input=True) + array([7., 2.]) + >>> assert not np.all(a == b) + + The different methods can be visualized graphically: + + .. plot:: + + import matplotlib.pyplot as plt + + a = np.arange(4) + p = np.linspace(0, 100, 6001) + ax = plt.gca() + lines = [ + ('linear', '-', 'C0'), + ('inverted_cdf', ':', 'C1'), + # Almost the same as `inverted_cdf`: + ('averaged_inverted_cdf', '-.', 'C1'), + ('closest_observation', ':', 'C2'), + ('interpolated_inverted_cdf', '--', 'C1'), + ('hazen', '--', 'C3'), + ('weibull', '-.', 'C4'), + ('median_unbiased', '--', 'C5'), + ('normal_unbiased', '-.', 'C6'), + ] + for method, style, color in lines: + ax.plot( + p, np.percentile(a, p, method=method), + label=method, linestyle=style, color=color) + ax.set( + title='Percentiles for different methods and data: ' + str(a), + xlabel='Percentile', + ylabel='Estimated percentile value', + yticks=a) + ax.legend(bbox_to_anchor=(1.03, 1)) + plt.tight_layout() + plt.show() + + References + ---------- + .. [1] R. J. Hyndman and Y. Fan, + "Sample quantiles in statistical packages," + The American Statistician, 50(4), pp. 361-365, 1996 + + """ + if interpolation is not None: + method = _check_interpolation_as_method( + method, interpolation, "percentile") + + a = np.asanyarray(a) + if a.dtype.kind == "c": + raise TypeError("a must be an array of real numbers") + + # Use dtype of array if possible (e.g., if q is a python int or float) + # by making the divisor have the dtype of the data array. + q = np.true_divide(q, a.dtype.type(100) if a.dtype.kind == "f" else 100, out=...) + if not _quantile_is_valid(q): + raise ValueError("Percentiles must be in the range [0, 100]") + + if weights is not None: + if method != "inverted_cdf": + msg = ("Only method 'inverted_cdf' supports weights. " + f"Got: {method}.") + raise ValueError(msg) + if axis is not None: + axis = _nx.normalize_axis_tuple(axis, a.ndim, argname="axis") + weights = _weights_are_valid(weights=weights, a=a, axis=axis) + if np.any(weights < 0): + raise ValueError("Weights must be non-negative.") + + return _quantile_unchecked( + a, q, axis, out, overwrite_input, method, keepdims, weights) + + +def _quantile_dispatcher(a, q, axis=None, out=None, overwrite_input=None, + method=None, keepdims=None, *, weights=None, + interpolation=None): + return (a, q, out, weights) + + +@array_function_dispatch(_quantile_dispatcher) +def quantile(a, + q, + axis=None, + out=None, + overwrite_input=False, + method="linear", + keepdims=False, + *, + weights=None, + interpolation=None): + """ + Compute the q-th quantile of the data along the specified axis. + + Parameters + ---------- + a : array_like of real numbers + Input array or object that can be converted to an array. + q : array_like of float + Probability or sequence of probabilities of the quantiles to compute. + Values must be between 0 and 1 inclusive. + axis : {int, tuple of int, None}, optional + Axis or axes along which the quantiles are computed. The default is + to compute the quantile(s) along a flattened version of the array. + out : ndarray, optional + Alternative output array in which to place the result. It must have + the same shape and buffer length as the expected output, but the + type (of the output) will be cast if necessary. + overwrite_input : bool, optional + If True, then allow the input array `a` to be modified by + intermediate calculations, to save memory. In this case, the + contents of the input `a` after this function completes is + undefined. + method : str, optional + This parameter specifies the method to use for estimating the + quantile. There are many different methods, some unique to NumPy. + The recommended options, numbered as they appear in [1]_, are: + + 1. 'inverted_cdf' + 2. 'averaged_inverted_cdf' + 3. 'closest_observation' + 4. 'interpolated_inverted_cdf' + 5. 'hazen' + 6. 'weibull' + 7. 'linear' (default) + 8. 'median_unbiased' + 9. 'normal_unbiased' + + The first three methods are discontinuous. For backward compatibility + with previous versions of NumPy, the following discontinuous variations + of the default 'linear' (7.) option are available: + + * 'lower' + * 'higher', + * 'midpoint' + * 'nearest' + + See Notes for details. + + .. versionchanged:: 1.22.0 + This argument was previously called "interpolation" and only + offered the "linear" default and last four options. + + keepdims : bool, optional + If this is set to True, the axes which are reduced are left in + the result as dimensions with size one. With this option, the + result will broadcast correctly against the original array `a`. + + weights : array_like, optional + An array of weights associated with the values in `a`. Each value in + `a` contributes to the quantile according to its associated weight. + The weights array can either be 1-D (in which case its length must be + the size of `a` along the given axis) or of the same shape as `a`. + If `weights=None`, then all data in `a` are assumed to have a + weight equal to one. + Only `method="inverted_cdf"` supports weights. + See the notes for more details. + + .. versionadded:: 2.0.0 + + interpolation : str, optional + Deprecated name for the method keyword argument. + + .. deprecated:: 1.22.0 + + Returns + ------- + quantile : scalar or ndarray + If `q` is a single probability and `axis=None`, then the result + is a scalar. If multiple probability levels are given, first axis + of the result corresponds to the quantiles. The other axes are + the axes that remain after the reduction of `a`. If the input + contains integers or floats smaller than ``float64``, the output + data-type is ``float64``. Otherwise, the output data-type is the + same as that of the input. If `out` is specified, that array is + returned instead. + + See Also + -------- + mean + percentile : equivalent to quantile, but with q in the range [0, 100]. + median : equivalent to ``quantile(..., 0.5)`` + nanquantile + + Notes + ----- + Given a sample `a` from an underlying distribution, `quantile` provides a + nonparametric estimate of the inverse cumulative distribution function. + + By default, this is done by interpolating between adjacent elements in + ``y``, a sorted copy of `a`:: + + (1-g)*y[j] + g*y[j+1] + + where the index ``j`` and coefficient ``g`` are the integral and + fractional components of ``q * (n-1)``, and ``n`` is the number of + elements in the sample. + + This is a special case of Equation 1 of H&F [1]_. More generally, + + - ``j = (q*n + m - 1) // 1``, and + - ``g = (q*n + m - 1) % 1``, + + where ``m`` may be defined according to several different conventions. + The preferred convention may be selected using the ``method`` parameter: + + =============================== =============== =============== + ``method`` number in H&F ``m`` + =============================== =============== =============== + ``interpolated_inverted_cdf`` 4 ``0`` + ``hazen`` 5 ``1/2`` + ``weibull`` 6 ``q`` + ``linear`` (default) 7 ``1 - q`` + ``median_unbiased`` 8 ``q/3 + 1/3`` + ``normal_unbiased`` 9 ``q/4 + 3/8`` + =============================== =============== =============== + + Note that indices ``j`` and ``j + 1`` are clipped to the range ``0`` to + ``n - 1`` when the results of the formula would be outside the allowed + range of non-negative indices. The ``- 1`` in the formulas for ``j`` and + ``g`` accounts for Python's 0-based indexing. + + The table above includes only the estimators from H&F that are continuous + functions of probability `q` (estimators 4-9). NumPy also provides the + three discontinuous estimators from H&F (estimators 1-3), where ``j`` is + defined as above, ``m`` is defined as follows, and ``g`` is a function + of the real-valued ``index = q*n + m - 1`` and ``j``. + + 1. ``inverted_cdf``: ``m = 0`` and ``g = int(index - j > 0)`` + 2. ``averaged_inverted_cdf``: ``m = 0`` and + ``g = (1 + int(index - j > 0)) / 2`` + 3. ``closest_observation``: ``m = -1/2`` and + ``g = 1 - int((index == j) & (j%2 == 1))`` + + For backward compatibility with previous versions of NumPy, `quantile` + provides four additional discontinuous estimators. Like + ``method='linear'``, all have ``m = 1 - q`` so that ``j = q*(n-1) // 1``, + but ``g`` is defined as follows. + + - ``lower``: ``g = 0`` + - ``midpoint``: ``g = 0.5`` + - ``higher``: ``g = 1`` + - ``nearest``: ``g = (q*(n-1) % 1) > 0.5`` + + **Weighted quantiles:** + More formally, the quantile at probability level :math:`q` of a cumulative + distribution function :math:`F(y)=P(Y \\leq y)` with probability measure + :math:`P` is defined as any number :math:`x` that fulfills the + *coverage conditions* + + .. math:: P(Y < x) \\leq q \\quad\\text{and}\\quad P(Y \\leq x) \\geq q + + with random variable :math:`Y\\sim P`. + Sample quantiles, the result of `quantile`, provide nonparametric + estimation of the underlying population counterparts, represented by the + unknown :math:`F`, given a data vector `a` of length ``n``. + + Some of the estimators above arise when one considers :math:`F` as the + empirical distribution function of the data, i.e. + :math:`F(y) = \\frac{1}{n} \\sum_i 1_{a_i \\leq y}`. + Then, different methods correspond to different choices of :math:`x` that + fulfill the above coverage conditions. Methods that follow this approach + are ``inverted_cdf`` and ``averaged_inverted_cdf``. + + For weighted quantiles, the coverage conditions still hold. The + empirical cumulative distribution is simply replaced by its weighted + version, i.e. + :math:`P(Y \\leq t) = \\frac{1}{\\sum_i w_i} \\sum_i w_i 1_{x_i \\leq t}`. + Only ``method="inverted_cdf"`` supports weights. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[10, 7, 4], [3, 2, 1]]) + >>> a + array([[10, 7, 4], + [ 3, 2, 1]]) + >>> np.quantile(a, 0.5) + 3.5 + >>> np.quantile(a, 0.5, axis=0) + array([6.5, 4.5, 2.5]) + >>> np.quantile(a, 0.5, axis=1) + array([7., 2.]) + >>> np.quantile(a, 0.5, axis=1, keepdims=True) + array([[7.], + [2.]]) + >>> m = np.quantile(a, 0.5, axis=0) + >>> out = np.zeros_like(m) + >>> np.quantile(a, 0.5, axis=0, out=out) + array([6.5, 4.5, 2.5]) + >>> m + array([6.5, 4.5, 2.5]) + >>> b = a.copy() + >>> np.quantile(b, 0.5, axis=1, overwrite_input=True) + array([7., 2.]) + >>> assert not np.all(a == b) + + See also `numpy.percentile` for a visualization of most methods. + + References + ---------- + .. [1] R. J. Hyndman and Y. Fan, + "Sample quantiles in statistical packages," + The American Statistician, 50(4), pp. 361-365, 1996 + + """ + if interpolation is not None: + method = _check_interpolation_as_method( + method, interpolation, "quantile") + + a = np.asanyarray(a) + if a.dtype.kind == "c": + raise TypeError("a must be an array of real numbers") + + # Use dtype of array if possible (e.g., if q is a python int or float). + if isinstance(q, (int, float)) and a.dtype.kind == "f": + q = np.asanyarray(q, dtype=a.dtype) + else: + q = np.asanyarray(q) + + if not _quantile_is_valid(q): + raise ValueError("Quantiles must be in the range [0, 1]") + + if weights is not None: + if method != "inverted_cdf": + msg = ("Only method 'inverted_cdf' supports weights. " + f"Got: {method}.") + raise ValueError(msg) + if axis is not None: + axis = _nx.normalize_axis_tuple(axis, a.ndim, argname="axis") + weights = _weights_are_valid(weights=weights, a=a, axis=axis) + if np.any(weights < 0): + raise ValueError("Weights must be non-negative.") + + return _quantile_unchecked( + a, q, axis, out, overwrite_input, method, keepdims, weights) + + +def _quantile_unchecked(a, + q, + axis=None, + out=None, + overwrite_input=False, + method="linear", + keepdims=False, + weights=None): + """Assumes that q is in [0, 1], and is an ndarray""" + return _ureduce(a, + func=_quantile_ureduce_func, + q=q, + weights=weights, + keepdims=keepdims, + axis=axis, + out=out, + overwrite_input=overwrite_input, + method=method) + + +def _quantile_is_valid(q): + # avoid expensive reductions, relevant for arrays with < O(1000) elements + if q.ndim == 1 and q.size < 10: + for i in range(q.size): + if not (0.0 <= q[i] <= 1.0): + return False + elif not (q.min() >= 0 and q.max() <= 1): + return False + return True + + +def _check_interpolation_as_method(method, interpolation, fname): + # Deprecated NumPy 1.22, 2021-11-08 + warnings.warn( + f"the `interpolation=` argument to {fname} was renamed to " + "`method=`, which has additional options.\n" + "Users of the modes 'nearest', 'lower', 'higher', or " + "'midpoint' are encouraged to review the method they used. " + "(Deprecated NumPy 1.22)", + DeprecationWarning, stacklevel=4) + if method != "linear": + # sanity check, we assume this basically never happens + raise TypeError( + "You shall not pass both `method` and `interpolation`!\n" + "(`interpolation` is Deprecated in favor of `method`)") + return interpolation + + +def _compute_virtual_index(n, quantiles, alpha: float, beta: float): + """ + Compute the floating point indexes of an array for the linear + interpolation of quantiles. + n : array_like + The sample sizes. + quantiles : array_like + The quantiles values. + alpha : float + A constant used to correct the index computed. + beta : float + A constant used to correct the index computed. + + alpha and beta values depend on the chosen method + (see quantile documentation) + + Reference: + Hyndman&Fan paper "Sample Quantiles in Statistical Packages", + DOI: 10.1080/00031305.1996.10473566 + """ + return n * quantiles + ( + alpha + quantiles * (1 - alpha - beta) + ) - 1 + + +def _get_gamma(virtual_indexes, previous_indexes, method): + """ + Compute gamma (a.k.a 'm' or 'weight') for the linear interpolation + of quantiles. + + virtual_indexes : array_like + The indexes where the percentile is supposed to be found in the sorted + sample. + previous_indexes : array_like + The floor values of virtual_indexes. + interpolation : dict + The interpolation method chosen, which may have a specific rule + modifying gamma. + + gamma is usually the fractional part of virtual_indexes but can be modified + by the interpolation method. + """ + gamma = np.asanyarray(virtual_indexes - previous_indexes) + gamma = method["fix_gamma"](gamma, virtual_indexes) + # Ensure both that we have an array, and that we keep the dtype + # (which may have been matched to the input array). + return np.asanyarray(gamma, dtype=virtual_indexes.dtype) + + +def _lerp(a, b, t, out=None): + """ + Compute the linear interpolation weighted by gamma on each point of + two same shape array. + + a : array_like + Left bound. + b : array_like + Right bound. + t : array_like + The interpolation weight. + out : array_like + Output array. + """ + diff_b_a = subtract(b, a) + # asanyarray is a stop-gap until gh-13105 + lerp_interpolation = asanyarray(add(a, diff_b_a * t, out=out)) + subtract(b, diff_b_a * (1 - t), out=lerp_interpolation, where=t >= 0.5, + casting='unsafe', dtype=type(lerp_interpolation.dtype)) + if lerp_interpolation.ndim == 0 and out is None: + lerp_interpolation = lerp_interpolation[()] # unpack 0d arrays + return lerp_interpolation + + +def _get_gamma_mask(shape, default_value, conditioned_value, where): + out = np.full(shape, default_value) + np.copyto(out, conditioned_value, where=where, casting="unsafe") + return out + + +def _discrete_interpolation_to_boundaries(index, gamma_condition_fun): + previous = np.floor(index) + next = previous + 1 + gamma = index - previous + res = _get_gamma_mask(shape=index.shape, + default_value=next, + conditioned_value=previous, + where=gamma_condition_fun(gamma, index) + ).astype(np.intp) + # Some methods can lead to out-of-bound integers, clip them: + res[res < 0] = 0 + return res + + +def _closest_observation(n, quantiles): + # "choose the nearest even order statistic at g=0" (H&F (1996) pp. 362). + # Order is 1-based so for zero-based indexing round to nearest odd index. + gamma_fun = lambda gamma, index: (gamma == 0) & (np.floor(index) % 2 == 1) + return _discrete_interpolation_to_boundaries((n * quantiles) - 1 - 0.5, + gamma_fun) + + +def _inverted_cdf(n, quantiles): + gamma_fun = lambda gamma, _: (gamma == 0) + return _discrete_interpolation_to_boundaries((n * quantiles) - 1, + gamma_fun) + + +def _quantile_ureduce_func( + a: np.array, + q: np.array, + weights: np.array, + axis: int | None = None, + out=None, + overwrite_input: bool = False, + method="linear", +) -> np.array: + if q.ndim > 2: + # The code below works fine for nd, but it might not have useful + # semantics. For now, keep the supported dimensions the same as it was + # before. + raise ValueError("q must be a scalar or 1d") + if overwrite_input: + if axis is None: + axis = 0 + arr = a.ravel() + wgt = None if weights is None else weights.ravel() + else: + arr = a + wgt = weights + elif axis is None: + axis = 0 + arr = a.flatten() + wgt = None if weights is None else weights.flatten() + else: + arr = a.copy() + wgt = weights + result = _quantile(arr, + quantiles=q, + axis=axis, + method=method, + out=out, + weights=wgt) + return result + + +def _get_indexes(arr, virtual_indexes, valid_values_count): + """ + Get the valid indexes of arr neighbouring virtual_indexes. + Note + This is a companion function to linear interpolation of + Quantiles + + Returns + ------- + (previous_indexes, next_indexes): Tuple + A Tuple of virtual_indexes neighbouring indexes + """ + previous_indexes = np.asanyarray(np.floor(virtual_indexes)) + next_indexes = np.asanyarray(previous_indexes + 1) + indexes_above_bounds = virtual_indexes >= valid_values_count - 1 + # When indexes is above max index, take the max value of the array + if indexes_above_bounds.any(): + previous_indexes[indexes_above_bounds] = -1 + next_indexes[indexes_above_bounds] = -1 + # When indexes is below min index, take the min value of the array + indexes_below_bounds = virtual_indexes < 0 + if indexes_below_bounds.any(): + previous_indexes[indexes_below_bounds] = 0 + next_indexes[indexes_below_bounds] = 0 + if np.issubdtype(arr.dtype, np.inexact): + # After the sort, slices having NaNs will have for last element a NaN + virtual_indexes_nans = np.isnan(virtual_indexes) + if virtual_indexes_nans.any(): + previous_indexes[virtual_indexes_nans] = -1 + next_indexes[virtual_indexes_nans] = -1 + previous_indexes = previous_indexes.astype(np.intp) + next_indexes = next_indexes.astype(np.intp) + return previous_indexes, next_indexes + + +def _quantile( + arr: np.array, + quantiles: np.array, + axis: int = -1, + method="linear", + out=None, + weights=None, +): + """ + Private function that doesn't support extended axis or keepdims. + These methods are extended to this function using _ureduce + See nanpercentile for parameter usage + It computes the quantiles of the array for the given axis. + A linear interpolation is performed based on the `interpolation`. + + By default, the method is "linear" where alpha == beta == 1 which + performs the 7th method of Hyndman&Fan. + With "median_unbiased" we get alpha == beta == 1/3 + thus the 8th method of Hyndman&Fan. + """ + # --- Setup + arr = np.asanyarray(arr) + values_count = arr.shape[axis] + # The dimensions of `q` are prepended to the output shape, so we need the + # axis being sampled from `arr` to be last. + if axis != 0: # But moveaxis is slow, so only call it if necessary. + arr = np.moveaxis(arr, axis, destination=0) + supports_nans = ( + np.issubdtype(arr.dtype, np.inexact) or arr.dtype.kind in 'Mm' + ) + + if weights is None: + # --- Computation of indexes + # Index where to find the value in the sorted array. + # Virtual because it is a floating point value, not an valid index. + # The nearest neighbours are used for interpolation + try: + method_props = _QuantileMethods[method] + except KeyError: + raise ValueError( + f"{method!r} is not a valid method. Use one of: " + f"{_QuantileMethods.keys()}") from None + virtual_indexes = method_props["get_virtual_index"](values_count, + quantiles) + virtual_indexes = np.asanyarray(virtual_indexes) + + if method_props["fix_gamma"] is None: + supports_integers = True + else: + int_virtual_indices = np.issubdtype(virtual_indexes.dtype, + np.integer) + supports_integers = method == 'linear' and int_virtual_indices + + if supports_integers: + # No interpolation needed, take the points along axis + if supports_nans: + # may contain nan, which would sort to the end + arr.partition( + concatenate((virtual_indexes.ravel(), [-1])), axis=0, + ) + slices_having_nans = np.isnan(arr[-1, ...]) + else: + # cannot contain nan + arr.partition(virtual_indexes.ravel(), axis=0) + slices_having_nans = np.array(False, dtype=bool) + result = take(arr, virtual_indexes, axis=0, out=out) + else: + previous_indexes, next_indexes = _get_indexes(arr, + virtual_indexes, + values_count) + # --- Sorting + arr.partition( + np.unique(np.concatenate(([0, -1], + previous_indexes.ravel(), + next_indexes.ravel(), + ))), + axis=0) + if supports_nans: + slices_having_nans = np.isnan(arr[-1, ...]) + else: + slices_having_nans = None + # --- Get values from indexes + previous = arr[previous_indexes] + next = arr[next_indexes] + # --- Linear interpolation + gamma = _get_gamma(virtual_indexes, previous_indexes, method_props) + result_shape = virtual_indexes.shape + (1,) * (arr.ndim - 1) + gamma = gamma.reshape(result_shape) + result = _lerp(previous, + next, + gamma, + out=out) + else: + # Weighted case + # This implements method="inverted_cdf", the only supported weighted + # method, which needs to sort anyway. + weights = np.asanyarray(weights) + if axis != 0: + weights = np.moveaxis(weights, axis, destination=0) + index_array = np.argsort(arr, axis=0, kind="stable") + + # arr = arr[index_array, ...] # but this adds trailing dimensions of + # 1. + arr = np.take_along_axis(arr, index_array, axis=0) + if weights.shape == arr.shape: + weights = np.take_along_axis(weights, index_array, axis=0) + else: + # weights is 1d + weights = weights.reshape(-1)[index_array, ...] + + if supports_nans: + # may contain nan, which would sort to the end + slices_having_nans = np.isnan(arr[-1, ...]) + else: + # cannot contain nan + slices_having_nans = np.array(False, dtype=bool) + + # We use the weights to calculate the empirical cumulative + # distribution function cdf + cdf = weights.cumsum(axis=0, dtype=np.float64) + cdf /= cdf[-1, ...] # normalization to 1 + # Search index i such that + # sum(weights[j], j=0..i-1) < quantile <= sum(weights[j], j=0..i) + # is then equivalent to + # cdf[i-1] < quantile <= cdf[i] + # Unfortunately, searchsorted only accepts 1-d arrays as first + # argument, so we will need to iterate over dimensions. + + # Without the following cast, searchsorted can return surprising + # results, e.g. + # np.searchsorted(np.array([0.2, 0.4, 0.6, 0.8, 1.]), + # np.array(0.4, dtype=np.float32), side="left") + # returns 2 instead of 1 because 0.4 is not binary representable. + if quantiles.dtype.kind == "f": + cdf = cdf.astype(quantiles.dtype) + # Weights must be non-negative, so we might have zero weights at the + # beginning leading to some leading zeros in cdf. The call to + # np.searchsorted for quantiles=0 will then pick the first element, + # but should pick the first one larger than zero. We + # therefore simply set 0 values in cdf to -1. + if np.any(cdf[0, ...] == 0): + cdf[cdf == 0] = -1 + + def find_cdf_1d(arr, cdf): + indices = np.searchsorted(cdf, quantiles, side="left") + # We might have reached the maximum with i = len(arr), e.g. for + # quantiles = 1, and need to cut it to len(arr) - 1. + indices = minimum(indices, values_count - 1) + result = take(arr, indices, axis=0) + return result + + r_shape = arr.shape[1:] + if quantiles.ndim > 0: + r_shape = quantiles.shape + r_shape + if out is None: + result = np.empty_like(arr, shape=r_shape) + else: + if out.shape != r_shape: + msg = (f"Wrong shape of argument 'out', shape={r_shape} is " + f"required; got shape={out.shape}.") + raise ValueError(msg) + result = out + + # See apply_along_axis, which we do for axis=0. Note that Ni = (,) + # always, so we remove it here. + Nk = arr.shape[1:] + for kk in np.ndindex(Nk): + result[(...,) + kk] = find_cdf_1d( + arr[np.s_[:, ] + kk], cdf[np.s_[:, ] + kk] + ) + + # Make result the same as in unweighted inverted_cdf. + if result.shape == () and result.dtype == np.dtype("O"): + result = result.item() + + if np.any(slices_having_nans): + if result.ndim == 0 and out is None: + # can't write to a scalar, but indexing will be correct + result = arr[-1] + else: + np.copyto(result, arr[-1, ...], where=slices_having_nans) + return result + + +def _trapezoid_dispatcher(y, x=None, dx=None, axis=None): + return (y, x) + + +@array_function_dispatch(_trapezoid_dispatcher) +def trapezoid(y, x=None, dx=1.0, axis=-1): + r""" + Integrate along the given axis using the composite trapezoidal rule. + + If `x` is provided, the integration happens in sequence along its + elements - they are not sorted. + + Integrate `y` (`x`) along each 1d slice on the given axis, compute + :math:`\int y(x) dx`. + When `x` is specified, this integrates along the parametric curve, + computing :math:`\int_t y(t) dt = + \int_t y(t) \left.\frac{dx}{dt}\right|_{x=x(t)} dt`. + + .. versionadded:: 2.0.0 + + Parameters + ---------- + y : array_like + Input array to integrate. + x : array_like, optional + The sample points corresponding to the `y` values. If `x` is None, + the sample points are assumed to be evenly spaced `dx` apart. The + default is None. + dx : scalar, optional + The spacing between sample points when `x` is None. The default is 1. + axis : int, optional + The axis along which to integrate. + + Returns + ------- + trapezoid : float or ndarray + Definite integral of `y` = n-dimensional array as approximated along + a single axis by the trapezoidal rule. If `y` is a 1-dimensional array, + then the result is a float. If `n` is greater than 1, then the result + is an `n`-1 dimensional array. + + See Also + -------- + sum, cumsum + + Notes + ----- + Image [2]_ illustrates trapezoidal rule -- y-axis locations of points + will be taken from `y` array, by default x-axis distances between + points will be 1.0, alternatively they can be provided with `x` array + or with `dx` scalar. Return value will be equal to combined area under + the red lines. + + + References + ---------- + .. [1] Wikipedia page: https://en.wikipedia.org/wiki/Trapezoidal_rule + + .. [2] Illustration image: + https://en.wikipedia.org/wiki/File:Composite_trapezoidal_rule_illustration.png + + Examples + -------- + >>> import numpy as np + + Use the trapezoidal rule on evenly spaced points: + + >>> np.trapezoid([1, 2, 3]) + 4.0 + + The spacing between sample points can be selected by either the + ``x`` or ``dx`` arguments: + + >>> np.trapezoid([1, 2, 3], x=[4, 6, 8]) + 8.0 + >>> np.trapezoid([1, 2, 3], dx=2) + 8.0 + + Using a decreasing ``x`` corresponds to integrating in reverse: + + >>> np.trapezoid([1, 2, 3], x=[8, 6, 4]) + -8.0 + + More generally ``x`` is used to integrate along a parametric curve. We can + estimate the integral :math:`\int_0^1 x^2 = 1/3` using: + + >>> x = np.linspace(0, 1, num=50) + >>> y = x**2 + >>> np.trapezoid(y, x) + 0.33340274885464394 + + Or estimate the area of a circle, noting we repeat the sample which closes + the curve: + + >>> theta = np.linspace(0, 2 * np.pi, num=1000, endpoint=True) + >>> np.trapezoid(np.cos(theta), x=np.sin(theta)) + 3.141571941375841 + + ``np.trapezoid`` can be applied along a specified axis to do multiple + computations in one call: + + >>> a = np.arange(6).reshape(2, 3) + >>> a + array([[0, 1, 2], + [3, 4, 5]]) + >>> np.trapezoid(a, axis=0) + array([1.5, 2.5, 3.5]) + >>> np.trapezoid(a, axis=1) + array([2., 8.]) + """ + + y = asanyarray(y) + if x is None: + d = dx + else: + x = asanyarray(x) + if x.ndim == 1: + d = diff(x) + # reshape to correct shape + shape = [1] * y.ndim + shape[axis] = d.shape[0] + d = d.reshape(shape) + else: + d = diff(x, axis=axis) + nd = y.ndim + slice1 = [slice(None)] * nd + slice2 = [slice(None)] * nd + slice1[axis] = slice(1, None) + slice2[axis] = slice(None, -1) + try: + ret = (d * (y[tuple(slice1)] + y[tuple(slice2)]) / 2.0).sum(axis) + except ValueError: + # Operations didn't work, cast to ndarray + d = np.asarray(d) + y = np.asarray(y) + ret = add.reduce(d * (y[tuple(slice1)] + y[tuple(slice2)]) / 2.0, axis) + return ret + + +@set_module('numpy') +def trapz(y, x=None, dx=1.0, axis=-1): + """ + `trapz` is deprecated in NumPy 2.0. + + Please use `trapezoid` instead, or one of the numerical integration + functions in `scipy.integrate`. + """ + # Deprecated in NumPy 2.0, 2023-08-18 + warnings.warn( + "`trapz` is deprecated. Use `trapezoid` instead, or one of the " + "numerical integration functions in `scipy.integrate`.", + DeprecationWarning, + stacklevel=2 + ) + return trapezoid(y, x=x, dx=dx, axis=axis) + + +def _meshgrid_dispatcher(*xi, copy=None, sparse=None, indexing=None): + return xi + + +# Based on scitools meshgrid +@array_function_dispatch(_meshgrid_dispatcher) +def meshgrid(*xi, copy=True, sparse=False, indexing='xy'): + """ + Return a tuple of coordinate matrices from coordinate vectors. + + Make N-D coordinate arrays for vectorized evaluations of + N-D scalar/vector fields over N-D grids, given + one-dimensional coordinate arrays x1, x2,..., xn. + + Parameters + ---------- + x1, x2,..., xn : array_like + 1-D arrays representing the coordinates of a grid. + indexing : {'xy', 'ij'}, optional + Cartesian ('xy', default) or matrix ('ij') indexing of output. + See Notes for more details. + sparse : bool, optional + If True the shape of the returned coordinate array for dimension *i* + is reduced from ``(N1, ..., Ni, ... Nn)`` to + ``(1, ..., 1, Ni, 1, ..., 1)``. These sparse coordinate grids are + intended to be used with :ref:`basics.broadcasting`. When all + coordinates are used in an expression, broadcasting still leads to a + fully-dimensonal result array. + + Default is False. + + copy : bool, optional + If False, a view into the original arrays are returned in order to + conserve memory. Default is True. Please note that + ``sparse=False, copy=False`` will likely return non-contiguous + arrays. Furthermore, more than one element of a broadcast array + may refer to a single memory location. If you need to write to the + arrays, make copies first. + + Returns + ------- + X1, X2,..., XN : tuple of ndarrays + For vectors `x1`, `x2`,..., `xn` with lengths ``Ni=len(xi)``, + returns ``(N1, N2, N3,..., Nn)`` shaped arrays if indexing='ij' + or ``(N2, N1, N3,..., Nn)`` shaped arrays if indexing='xy' + with the elements of `xi` repeated to fill the matrix along + the first dimension for `x1`, the second for `x2` and so on. + + Notes + ----- + This function supports both indexing conventions through the indexing + keyword argument. Giving the string 'ij' returns a meshgrid with + matrix indexing, while 'xy' returns a meshgrid with Cartesian indexing. + In the 2-D case with inputs of length M and N, the outputs are of shape + (N, M) for 'xy' indexing and (M, N) for 'ij' indexing. In the 3-D case + with inputs of length M, N and P, outputs are of shape (N, M, P) for + 'xy' indexing and (M, N, P) for 'ij' indexing. The difference is + illustrated by the following code snippet:: + + xv, yv = np.meshgrid(x, y, indexing='ij') + for i in range(nx): + for j in range(ny): + # treat xv[i,j], yv[i,j] + + xv, yv = np.meshgrid(x, y, indexing='xy') + for i in range(nx): + for j in range(ny): + # treat xv[j,i], yv[j,i] + + In the 1-D and 0-D case, the indexing and sparse keywords have no effect. + + See Also + -------- + mgrid : Construct a multi-dimensional "meshgrid" using indexing notation. + ogrid : Construct an open multi-dimensional "meshgrid" using indexing + notation. + :ref:`how-to-index` + + Examples + -------- + >>> import numpy as np + >>> nx, ny = (3, 2) + >>> x = np.linspace(0, 1, nx) + >>> y = np.linspace(0, 1, ny) + >>> xv, yv = np.meshgrid(x, y) + >>> xv + array([[0. , 0.5, 1. ], + [0. , 0.5, 1. ]]) + >>> yv + array([[0., 0., 0.], + [1., 1., 1.]]) + + The result of `meshgrid` is a coordinate grid: + + >>> import matplotlib.pyplot as plt + >>> plt.plot(xv, yv, marker='o', color='k', linestyle='none') + >>> plt.show() + + You can create sparse output arrays to save memory and computation time. + + >>> xv, yv = np.meshgrid(x, y, sparse=True) + >>> xv + array([[0. , 0.5, 1. ]]) + >>> yv + array([[0.], + [1.]]) + + `meshgrid` is very useful to evaluate functions on a grid. If the + function depends on all coordinates, both dense and sparse outputs can be + used. + + >>> x = np.linspace(-5, 5, 101) + >>> y = np.linspace(-5, 5, 101) + >>> # full coordinate arrays + >>> xx, yy = np.meshgrid(x, y) + >>> zz = np.sqrt(xx**2 + yy**2) + >>> xx.shape, yy.shape, zz.shape + ((101, 101), (101, 101), (101, 101)) + >>> # sparse coordinate arrays + >>> xs, ys = np.meshgrid(x, y, sparse=True) + >>> zs = np.sqrt(xs**2 + ys**2) + >>> xs.shape, ys.shape, zs.shape + ((1, 101), (101, 1), (101, 101)) + >>> np.array_equal(zz, zs) + True + + >>> h = plt.contourf(x, y, zs) + >>> plt.axis('scaled') + >>> plt.colorbar() + >>> plt.show() + """ + ndim = len(xi) + + if indexing not in ['xy', 'ij']: + raise ValueError( + "Valid values for `indexing` are 'xy' and 'ij'.") + + s0 = (1,) * ndim + output = [np.asanyarray(x).reshape(s0[:i] + (-1,) + s0[i + 1:]) + for i, x in enumerate(xi)] + + if indexing == 'xy' and ndim > 1: + # switch first and second axis + output[0].shape = (1, -1) + s0[2:] + output[1].shape = (-1, 1) + s0[2:] + + if not sparse: + # Return the full N-D matrix (not only the 1-D vector) + output = np.broadcast_arrays(*output, subok=True) + + if copy: + output = tuple(x.copy() for x in output) + + return output + + +def _delete_dispatcher(arr, obj, axis=None): + return (arr, obj) + + +@array_function_dispatch(_delete_dispatcher) +def delete(arr, obj, axis=None): + """ + Return a new array with sub-arrays along an axis deleted. For a one + dimensional array, this returns those entries not returned by + `arr[obj]`. + + Parameters + ---------- + arr : array_like + Input array. + obj : slice, int, array-like of ints or bools + Indicate indices of sub-arrays to remove along the specified axis. + + .. versionchanged:: 1.19.0 + Boolean indices are now treated as a mask of elements to remove, + rather than being cast to the integers 0 and 1. + + axis : int, optional + The axis along which to delete the subarray defined by `obj`. + If `axis` is None, `obj` is applied to the flattened array. + + Returns + ------- + out : ndarray + A copy of `arr` with the elements specified by `obj` removed. Note + that `delete` does not occur in-place. If `axis` is None, `out` is + a flattened array. + + See Also + -------- + insert : Insert elements into an array. + append : Append elements at the end of an array. + + Notes + ----- + Often it is preferable to use a boolean mask. For example: + + >>> arr = np.arange(12) + 1 + >>> mask = np.ones(len(arr), dtype=bool) + >>> mask[[0,2,4]] = False + >>> result = arr[mask,...] + + Is equivalent to ``np.delete(arr, [0,2,4], axis=0)``, but allows further + use of `mask`. + + Examples + -------- + >>> import numpy as np + >>> arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]]) + >>> arr + array([[ 1, 2, 3, 4], + [ 5, 6, 7, 8], + [ 9, 10, 11, 12]]) + >>> np.delete(arr, 1, 0) + array([[ 1, 2, 3, 4], + [ 9, 10, 11, 12]]) + + >>> np.delete(arr, np.s_[::2], 1) + array([[ 2, 4], + [ 6, 8], + [10, 12]]) + >>> np.delete(arr, [1,3,5], None) + array([ 1, 3, 5, 7, 8, 9, 10, 11, 12]) + + """ + conv = _array_converter(arr) + arr, = conv.as_arrays(subok=False) + + ndim = arr.ndim + arrorder = 'F' if arr.flags.fnc else 'C' + if axis is None: + if ndim != 1: + arr = arr.ravel() + # needed for np.matrix, which is still not 1d after being ravelled + ndim = arr.ndim + axis = ndim - 1 + else: + axis = normalize_axis_index(axis, ndim) + + slobj = [slice(None)] * ndim + N = arr.shape[axis] + newshape = list(arr.shape) + + if isinstance(obj, slice): + start, stop, step = obj.indices(N) + xr = range(start, stop, step) + numtodel = len(xr) + + if numtodel <= 0: + return conv.wrap(arr.copy(order=arrorder), to_scalar=False) + + # Invert if step is negative: + if step < 0: + step = -step + start = xr[-1] + stop = xr[0] + 1 + + newshape[axis] -= numtodel + new = empty(newshape, arr.dtype, arrorder) + # copy initial chunk + if start == 0: + pass + else: + slobj[axis] = slice(None, start) + new[tuple(slobj)] = arr[tuple(slobj)] + # copy end chunk + if stop == N: + pass + else: + slobj[axis] = slice(stop - numtodel, None) + slobj2 = [slice(None)] * ndim + slobj2[axis] = slice(stop, None) + new[tuple(slobj)] = arr[tuple(slobj2)] + # copy middle pieces + if step == 1: + pass + else: # use array indexing. + keep = ones(stop - start, dtype=bool) + keep[:stop - start:step] = False + slobj[axis] = slice(start, stop - numtodel) + slobj2 = [slice(None)] * ndim + slobj2[axis] = slice(start, stop) + arr = arr[tuple(slobj2)] + slobj2[axis] = keep + new[tuple(slobj)] = arr[tuple(slobj2)] + + return conv.wrap(new, to_scalar=False) + + if isinstance(obj, (int, integer)) and not isinstance(obj, bool): + single_value = True + else: + single_value = False + _obj = obj + obj = np.asarray(obj) + # `size == 0` to allow empty lists similar to indexing, but (as there) + # is really too generic: + if obj.size == 0 and not isinstance(_obj, np.ndarray): + obj = obj.astype(intp) + elif obj.size == 1 and obj.dtype.kind in "ui": + # For a size 1 integer array we can use the single-value path + # (most dtypes, except boolean, should just fail later). + obj = obj.item() + single_value = True + + if single_value: + # optimization for a single value + if (obj < -N or obj >= N): + raise IndexError( + f"index {obj} is out of bounds for axis {axis} with " + f"size {N}") + if (obj < 0): + obj += N + newshape[axis] -= 1 + new = empty(newshape, arr.dtype, arrorder) + slobj[axis] = slice(None, obj) + new[tuple(slobj)] = arr[tuple(slobj)] + slobj[axis] = slice(obj, None) + slobj2 = [slice(None)] * ndim + slobj2[axis] = slice(obj + 1, None) + new[tuple(slobj)] = arr[tuple(slobj2)] + else: + if obj.dtype == bool: + if obj.shape != (N,): + raise ValueError('boolean array argument obj to delete ' + 'must be one dimensional and match the axis ' + f'length of {N}') + + # optimization, the other branch is slower + keep = ~obj + else: + keep = ones(N, dtype=bool) + keep[obj,] = False + + slobj[axis] = keep + new = arr[tuple(slobj)] + + return conv.wrap(new, to_scalar=False) + + +def _insert_dispatcher(arr, obj, values, axis=None): + return (arr, obj, values) + + +@array_function_dispatch(_insert_dispatcher) +def insert(arr, obj, values, axis=None): + """ + Insert values along the given axis before the given indices. + + Parameters + ---------- + arr : array_like + Input array. + obj : slice, int, array-like of ints or bools + Object that defines the index or indices before which `values` is + inserted. + + .. versionchanged:: 2.1.2 + Boolean indices are now treated as a mask of elements to insert, + rather than being cast to the integers 0 and 1. + + Support for multiple insertions when `obj` is a single scalar or a + sequence with one element (similar to calling insert multiple + times). + values : array_like + Values to insert into `arr`. If the type of `values` is different + from that of `arr`, `values` is converted to the type of `arr`. + `values` should be shaped so that ``arr[...,obj,...] = values`` + is legal. + axis : int, optional + Axis along which to insert `values`. If `axis` is None then `arr` + is flattened first. + + Returns + ------- + out : ndarray + A copy of `arr` with `values` inserted. Note that `insert` + does not occur in-place: a new array is returned. If + `axis` is None, `out` is a flattened array. + + See Also + -------- + append : Append elements at the end of an array. + concatenate : Join a sequence of arrays along an existing axis. + delete : Delete elements from an array. + + Notes + ----- + Note that for higher dimensional inserts ``obj=0`` behaves very different + from ``obj=[0]`` just like ``arr[:,0,:] = values`` is different from + ``arr[:,[0],:] = values``. This is because of the difference between basic + and advanced :ref:`indexing `. + + Examples + -------- + >>> import numpy as np + >>> a = np.arange(6).reshape(3, 2) + >>> a + array([[0, 1], + [2, 3], + [4, 5]]) + >>> np.insert(a, 1, 6) + array([0, 6, 1, 2, 3, 4, 5]) + >>> np.insert(a, 1, 6, axis=1) + array([[0, 6, 1], + [2, 6, 3], + [4, 6, 5]]) + + Difference between sequence and scalars, + showing how ``obj=[1]`` behaves different from ``obj=1``: + + >>> np.insert(a, [1], [[7],[8],[9]], axis=1) + array([[0, 7, 1], + [2, 8, 3], + [4, 9, 5]]) + >>> np.insert(a, 1, [[7],[8],[9]], axis=1) + array([[0, 7, 8, 9, 1], + [2, 7, 8, 9, 3], + [4, 7, 8, 9, 5]]) + >>> np.array_equal(np.insert(a, 1, [7, 8, 9], axis=1), + ... np.insert(a, [1], [[7],[8],[9]], axis=1)) + True + + >>> b = a.flatten() + >>> b + array([0, 1, 2, 3, 4, 5]) + >>> np.insert(b, [2, 2], [6, 7]) + array([0, 1, 6, 7, 2, 3, 4, 5]) + + >>> np.insert(b, slice(2, 4), [7, 8]) + array([0, 1, 7, 2, 8, 3, 4, 5]) + + >>> np.insert(b, [2, 2], [7.13, False]) # type casting + array([0, 1, 7, 0, 2, 3, 4, 5]) + + >>> x = np.arange(8).reshape(2, 4) + >>> idx = (1, 3) + >>> np.insert(x, idx, 999, axis=1) + array([[ 0, 999, 1, 2, 999, 3], + [ 4, 999, 5, 6, 999, 7]]) + + """ + conv = _array_converter(arr) + arr, = conv.as_arrays(subok=False) + + ndim = arr.ndim + arrorder = 'F' if arr.flags.fnc else 'C' + if axis is None: + if ndim != 1: + arr = arr.ravel() + # needed for np.matrix, which is still not 1d after being ravelled + ndim = arr.ndim + axis = ndim - 1 + else: + axis = normalize_axis_index(axis, ndim) + slobj = [slice(None)] * ndim + N = arr.shape[axis] + newshape = list(arr.shape) + + if isinstance(obj, slice): + # turn it into a range object + indices = arange(*obj.indices(N), dtype=intp) + else: + # need to copy obj, because indices will be changed in-place + indices = np.array(obj) + if indices.dtype == bool: + if obj.ndim != 1: + raise ValueError('boolean array argument obj to insert ' + 'must be one dimensional') + indices = np.flatnonzero(obj) + elif indices.ndim > 1: + raise ValueError( + "index array argument obj to insert must be one dimensional " + "or scalar") + if indices.size == 1: + index = indices.item() + if index < -N or index > N: + raise IndexError(f"index {obj} is out of bounds for axis {axis} " + f"with size {N}") + if (index < 0): + index += N + + # There are some object array corner cases here, but we cannot avoid + # that: + values = array(values, copy=None, ndmin=arr.ndim, dtype=arr.dtype) + if indices.ndim == 0: + # broadcasting is very different here, since a[:,0,:] = ... behaves + # very different from a[:,[0],:] = ...! This changes values so that + # it works likes the second case. (here a[:,0:1,:]) + values = np.moveaxis(values, 0, axis) + numnew = values.shape[axis] + newshape[axis] += numnew + new = empty(newshape, arr.dtype, arrorder) + slobj[axis] = slice(None, index) + new[tuple(slobj)] = arr[tuple(slobj)] + slobj[axis] = slice(index, index + numnew) + new[tuple(slobj)] = values + slobj[axis] = slice(index + numnew, None) + slobj2 = [slice(None)] * ndim + slobj2[axis] = slice(index, None) + new[tuple(slobj)] = arr[tuple(slobj2)] + + return conv.wrap(new, to_scalar=False) + + elif indices.size == 0 and not isinstance(obj, np.ndarray): + # Can safely cast the empty list to intp + indices = indices.astype(intp) + + indices[indices < 0] += N + + numnew = len(indices) + order = indices.argsort(kind='mergesort') # stable sort + indices[order] += np.arange(numnew) + + newshape[axis] += numnew + old_mask = ones(newshape[axis], dtype=bool) + old_mask[indices] = False + + new = empty(newshape, arr.dtype, arrorder) + slobj2 = [slice(None)] * ndim + slobj[axis] = indices + slobj2[axis] = old_mask + new[tuple(slobj)] = values + new[tuple(slobj2)] = arr + + return conv.wrap(new, to_scalar=False) + + +def _append_dispatcher(arr, values, axis=None): + return (arr, values) + + +@array_function_dispatch(_append_dispatcher) +def append(arr, values, axis=None): + """ + Append values to the end of an array. + + Parameters + ---------- + arr : array_like + Values are appended to a copy of this array. + values : array_like + These values are appended to a copy of `arr`. It must be of the + correct shape (the same shape as `arr`, excluding `axis`). If + `axis` is not specified, `values` can be any shape and will be + flattened before use. + axis : int, optional + The axis along which `values` are appended. If `axis` is not + given, both `arr` and `values` are flattened before use. + + Returns + ------- + append : ndarray + A copy of `arr` with `values` appended to `axis`. Note that + `append` does not occur in-place: a new array is allocated and + filled. If `axis` is None, `out` is a flattened array. + + See Also + -------- + insert : Insert elements into an array. + delete : Delete elements from an array. + + Examples + -------- + >>> import numpy as np + >>> np.append([1, 2, 3], [[4, 5, 6], [7, 8, 9]]) + array([1, 2, 3, ..., 7, 8, 9]) + + When `axis` is specified, `values` must have the correct shape. + + >>> np.append([[1, 2, 3], [4, 5, 6]], [[7, 8, 9]], axis=0) + array([[1, 2, 3], + [4, 5, 6], + [7, 8, 9]]) + + >>> np.append([[1, 2, 3], [4, 5, 6]], [7, 8, 9], axis=0) + Traceback (most recent call last): + ... + ValueError: all the input arrays must have same number of dimensions, but + the array at index 0 has 2 dimension(s) and the array at index 1 has 1 + dimension(s) + + >>> a = np.array([1, 2], dtype=int) + >>> c = np.append(a, []) + >>> c + array([1., 2.]) + >>> c.dtype + float64 + + Default dtype for empty ndarrays is `float64` thus making the output of dtype + `float64` when appended with dtype `int64` + + """ + arr = asanyarray(arr) + if axis is None: + if arr.ndim != 1: + arr = arr.ravel() + values = ravel(values) + axis = arr.ndim - 1 + return concatenate((arr, values), axis=axis) + + +def _digitize_dispatcher(x, bins, right=None): + return (x, bins) + + +@array_function_dispatch(_digitize_dispatcher) +def digitize(x, bins, right=False): + """ + Return the indices of the bins to which each value in input array belongs. + + ========= ============= ============================ + `right` order of bins returned index `i` satisfies + ========= ============= ============================ + ``False`` increasing ``bins[i-1] <= x < bins[i]`` + ``True`` increasing ``bins[i-1] < x <= bins[i]`` + ``False`` decreasing ``bins[i-1] > x >= bins[i]`` + ``True`` decreasing ``bins[i-1] >= x > bins[i]`` + ========= ============= ============================ + + If values in `x` are beyond the bounds of `bins`, 0 or ``len(bins)`` is + returned as appropriate. + + Parameters + ---------- + x : array_like + Input array to be binned. Prior to NumPy 1.10.0, this array had to + be 1-dimensional, but can now have any shape. + bins : array_like + Array of bins. It has to be 1-dimensional and monotonic. + right : bool, optional + Indicating whether the intervals include the right or the left bin + edge. Default behavior is (right==False) indicating that the interval + does not include the right edge. The left bin end is open in this + case, i.e., bins[i-1] <= x < bins[i] is the default behavior for + monotonically increasing bins. + + Returns + ------- + indices : ndarray of ints + Output array of indices, of same shape as `x`. + + Raises + ------ + ValueError + If `bins` is not monotonic. + TypeError + If the type of the input is complex. + + See Also + -------- + bincount, histogram, unique, searchsorted + + Notes + ----- + If values in `x` are such that they fall outside the bin range, + attempting to index `bins` with the indices that `digitize` returns + will result in an IndexError. + + .. versionadded:: 1.10.0 + + `numpy.digitize` is implemented in terms of `numpy.searchsorted`. + This means that a binary search is used to bin the values, which scales + much better for larger number of bins than the previous linear search. + It also removes the requirement for the input array to be 1-dimensional. + + For monotonically *increasing* `bins`, the following are equivalent:: + + np.digitize(x, bins, right=True) + np.searchsorted(bins, x, side='left') + + Note that as the order of the arguments are reversed, the side must be too. + The `searchsorted` call is marginally faster, as it does not do any + monotonicity checks. Perhaps more importantly, it supports all dtypes. + + Examples + -------- + >>> import numpy as np + >>> x = np.array([0.2, 6.4, 3.0, 1.6]) + >>> bins = np.array([0.0, 1.0, 2.5, 4.0, 10.0]) + >>> inds = np.digitize(x, bins) + >>> inds + array([1, 4, 3, 2]) + >>> for n in range(x.size): + ... print(bins[inds[n]-1], "<=", x[n], "<", bins[inds[n]]) + ... + 0.0 <= 0.2 < 1.0 + 4.0 <= 6.4 < 10.0 + 2.5 <= 3.0 < 4.0 + 1.0 <= 1.6 < 2.5 + + >>> x = np.array([1.2, 10.0, 12.4, 15.5, 20.]) + >>> bins = np.array([0, 5, 10, 15, 20]) + >>> np.digitize(x,bins,right=True) + array([1, 2, 3, 4, 4]) + >>> np.digitize(x,bins,right=False) + array([1, 3, 3, 4, 5]) + """ + x = _nx.asarray(x) + bins = _nx.asarray(bins) + + # here for compatibility, searchsorted below is happy to take this + if np.issubdtype(x.dtype, _nx.complexfloating): + raise TypeError("x may not be complex") + + mono = _monotonicity(bins) + if mono == 0: + raise ValueError("bins must be monotonically increasing or decreasing") + + # this is backwards because the arguments below are swapped + side = 'left' if right else 'right' + if mono == -1: + # reverse the bins, and invert the results + return len(bins) - _nx.searchsorted(bins[::-1], x, side=side) + else: + return _nx.searchsorted(bins, x, side=side) diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/_function_base_impl.pyi b/.venv/lib/python3.12/site-packages/numpy/lib/_function_base_impl.pyi new file mode 100644 index 0000000..090fb23 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/lib/_function_base_impl.pyi @@ -0,0 +1,985 @@ +# ruff: noqa: ANN401 +from collections.abc import Callable, Iterable, Sequence +from typing import ( + Any, + Concatenate, + ParamSpec, + Protocol, + SupportsIndex, + SupportsInt, + TypeAlias, + TypeVar, + overload, + type_check_only, +) +from typing import Literal as L + +from _typeshed import Incomplete +from typing_extensions import TypeIs, deprecated + +import numpy as np +from numpy import ( + _OrderKACF, + bool_, + complex128, + complexfloating, + datetime64, + float64, + floating, + generic, + integer, + intp, + object_, + timedelta64, + vectorize, +) +from numpy._core.multiarray import bincount +from numpy._globals import _NoValueType +from numpy._typing import ( + ArrayLike, + DTypeLike, + NDArray, + _ArrayLike, + _ArrayLikeBool_co, + _ArrayLikeComplex_co, + _ArrayLikeDT64_co, + _ArrayLikeFloat_co, + _ArrayLikeInt_co, + _ArrayLikeNumber_co, + _ArrayLikeObject_co, + _ArrayLikeTD64_co, + _ComplexLike_co, + _DTypeLike, + _FloatLike_co, + _NestedSequence, + _NumberLike_co, + _ScalarLike_co, + _ShapeLike, +) + +__all__ = [ + "select", + "piecewise", + "trim_zeros", + "copy", + "iterable", + "percentile", + "diff", + "gradient", + "angle", + "unwrap", + "sort_complex", + "flip", + "rot90", + "extract", + "place", + "vectorize", + "asarray_chkfinite", + "average", + "bincount", + "digitize", + "cov", + "corrcoef", + "median", + "sinc", + "hamming", + "hanning", + "bartlett", + "blackman", + "kaiser", + "trapezoid", + "trapz", + "i0", + "meshgrid", + "delete", + "insert", + "append", + "interp", + "quantile", +] + +_T = TypeVar("_T") +_T_co = TypeVar("_T_co", covariant=True) +# The `{}ss` suffix refers to the Python 3.12 syntax: `**P` +_Pss = ParamSpec("_Pss") +_ScalarT = TypeVar("_ScalarT", bound=generic) +_ScalarT1 = TypeVar("_ScalarT1", bound=generic) +_ScalarT2 = TypeVar("_ScalarT2", bound=generic) +_ArrayT = TypeVar("_ArrayT", bound=np.ndarray) + +_2Tuple: TypeAlias = tuple[_T, _T] +_MeshgridIdx: TypeAlias = L['ij', 'xy'] + +@type_check_only +class _TrimZerosSequence(Protocol[_T_co]): + def __len__(self, /) -> int: ... + @overload + def __getitem__(self, key: int, /) -> object: ... + @overload + def __getitem__(self, key: slice, /) -> _T_co: ... + +### + +@overload +def rot90( + m: _ArrayLike[_ScalarT], + k: int = ..., + axes: tuple[int, int] = ..., +) -> NDArray[_ScalarT]: ... +@overload +def rot90( + m: ArrayLike, + k: int = ..., + axes: tuple[int, int] = ..., +) -> NDArray[Any]: ... + +@overload +def flip(m: _ScalarT, axis: None = ...) -> _ScalarT: ... +@overload +def flip(m: _ScalarLike_co, axis: None = ...) -> Any: ... +@overload +def flip(m: _ArrayLike[_ScalarT], axis: _ShapeLike | None = ...) -> NDArray[_ScalarT]: ... +@overload +def flip(m: ArrayLike, axis: _ShapeLike | None = ...) -> NDArray[Any]: ... + +def iterable(y: object) -> TypeIs[Iterable[Any]]: ... + +@overload +def average( + a: _ArrayLikeFloat_co, + axis: None = None, + weights: _ArrayLikeFloat_co | None = None, + returned: L[False] = False, + *, + keepdims: L[False] | _NoValueType = ..., +) -> floating: ... +@overload +def average( + a: _ArrayLikeFloat_co, + axis: None = None, + weights: _ArrayLikeFloat_co | None = None, + *, + returned: L[True], + keepdims: L[False] | _NoValueType = ..., +) -> _2Tuple[floating]: ... +@overload +def average( + a: _ArrayLikeComplex_co, + axis: None = None, + weights: _ArrayLikeComplex_co | None = None, + returned: L[False] = False, + *, + keepdims: L[False] | _NoValueType = ..., +) -> complexfloating: ... +@overload +def average( + a: _ArrayLikeComplex_co, + axis: None = None, + weights: _ArrayLikeComplex_co | None = None, + *, + returned: L[True], + keepdims: L[False] | _NoValueType = ..., +) -> _2Tuple[complexfloating]: ... +@overload +def average( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: _ShapeLike | None = None, + weights: object | None = None, + *, + returned: L[True], + keepdims: bool | bool_ | _NoValueType = ..., +) -> _2Tuple[Incomplete]: ... +@overload +def average( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: _ShapeLike | None = None, + weights: object | None = None, + returned: bool | bool_ = False, + *, + keepdims: bool | bool_ | _NoValueType = ..., +) -> Incomplete: ... + +@overload +def asarray_chkfinite( + a: _ArrayLike[_ScalarT], + dtype: None = ..., + order: _OrderKACF = ..., +) -> NDArray[_ScalarT]: ... +@overload +def asarray_chkfinite( + a: object, + dtype: None = ..., + order: _OrderKACF = ..., +) -> NDArray[Any]: ... +@overload +def asarray_chkfinite( + a: Any, + dtype: _DTypeLike[_ScalarT], + order: _OrderKACF = ..., +) -> NDArray[_ScalarT]: ... +@overload +def asarray_chkfinite( + a: Any, + dtype: DTypeLike, + order: _OrderKACF = ..., +) -> NDArray[Any]: ... + +@overload +def piecewise( + x: _ArrayLike[_ScalarT], + condlist: _ArrayLike[bool_] | Sequence[_ArrayLikeBool_co], + funclist: Sequence[ + Callable[Concatenate[NDArray[_ScalarT], _Pss], NDArray[_ScalarT | Any]] + | _ScalarT | object + ], + /, + *args: _Pss.args, + **kw: _Pss.kwargs, +) -> NDArray[_ScalarT]: ... +@overload +def piecewise( + x: ArrayLike, + condlist: _ArrayLike[bool_] | Sequence[_ArrayLikeBool_co], + funclist: Sequence[ + Callable[Concatenate[NDArray[Any], _Pss], NDArray[Any]] + | object + ], + /, + *args: _Pss.args, + **kw: _Pss.kwargs, +) -> NDArray[Any]: ... + +def select( + condlist: Sequence[ArrayLike], + choicelist: Sequence[ArrayLike], + default: ArrayLike = ..., +) -> NDArray[Any]: ... + +@overload +def copy( + a: _ArrayT, + order: _OrderKACF, + subok: L[True], +) -> _ArrayT: ... +@overload +def copy( + a: _ArrayT, + order: _OrderKACF = ..., + *, + subok: L[True], +) -> _ArrayT: ... +@overload +def copy( + a: _ArrayLike[_ScalarT], + order: _OrderKACF = ..., + subok: L[False] = ..., +) -> NDArray[_ScalarT]: ... +@overload +def copy( + a: ArrayLike, + order: _OrderKACF = ..., + subok: L[False] = ..., +) -> NDArray[Any]: ... + +def gradient( + f: ArrayLike, + *varargs: ArrayLike, + axis: _ShapeLike | None = ..., + edge_order: L[1, 2] = ..., +) -> Any: ... + +@overload +def diff( + a: _T, + n: L[0], + axis: SupportsIndex = ..., + prepend: ArrayLike = ..., + append: ArrayLike = ..., +) -> _T: ... +@overload +def diff( + a: ArrayLike, + n: int = ..., + axis: SupportsIndex = ..., + prepend: ArrayLike = ..., + append: ArrayLike = ..., +) -> NDArray[Any]: ... + +@overload # float scalar +def interp( + x: _FloatLike_co, + xp: _ArrayLikeFloat_co, + fp: _ArrayLikeFloat_co, + left: _FloatLike_co | None = None, + right: _FloatLike_co | None = None, + period: _FloatLike_co | None = None, +) -> float64: ... +@overload # float array +def interp( + x: NDArray[floating | integer | np.bool] | _NestedSequence[_FloatLike_co], + xp: _ArrayLikeFloat_co, + fp: _ArrayLikeFloat_co, + left: _FloatLike_co | None = None, + right: _FloatLike_co | None = None, + period: _FloatLike_co | None = None, +) -> NDArray[float64]: ... +@overload # float scalar or array +def interp( + x: _ArrayLikeFloat_co, + xp: _ArrayLikeFloat_co, + fp: _ArrayLikeFloat_co, + left: _FloatLike_co | None = None, + right: _FloatLike_co | None = None, + period: _FloatLike_co | None = None, +) -> NDArray[float64] | float64: ... +@overload # complex scalar +def interp( + x: _FloatLike_co, + xp: _ArrayLikeFloat_co, + fp: _ArrayLike[complexfloating], + left: _NumberLike_co | None = None, + right: _NumberLike_co | None = None, + period: _FloatLike_co | None = None, +) -> complex128: ... +@overload # complex or float scalar +def interp( + x: _FloatLike_co, + xp: _ArrayLikeFloat_co, + fp: Sequence[complex | complexfloating], + left: _NumberLike_co | None = None, + right: _NumberLike_co | None = None, + period: _FloatLike_co | None = None, +) -> complex128 | float64: ... +@overload # complex array +def interp( + x: NDArray[floating | integer | np.bool] | _NestedSequence[_FloatLike_co], + xp: _ArrayLikeFloat_co, + fp: _ArrayLike[complexfloating], + left: _NumberLike_co | None = None, + right: _NumberLike_co | None = None, + period: _FloatLike_co | None = None, +) -> NDArray[complex128]: ... +@overload # complex or float array +def interp( + x: NDArray[floating | integer | np.bool] | _NestedSequence[_FloatLike_co], + xp: _ArrayLikeFloat_co, + fp: Sequence[complex | complexfloating], + left: _NumberLike_co | None = None, + right: _NumberLike_co | None = None, + period: _FloatLike_co | None = None, +) -> NDArray[complex128 | float64]: ... +@overload # complex scalar or array +def interp( + x: _ArrayLikeFloat_co, + xp: _ArrayLikeFloat_co, + fp: _ArrayLike[complexfloating], + left: _NumberLike_co | None = None, + right: _NumberLike_co | None = None, + period: _FloatLike_co | None = None, +) -> NDArray[complex128] | complex128: ... +@overload # complex or float scalar or array +def interp( + x: _ArrayLikeFloat_co, + xp: _ArrayLikeFloat_co, + fp: _ArrayLikeNumber_co, + left: _NumberLike_co | None = None, + right: _NumberLike_co | None = None, + period: _FloatLike_co | None = None, +) -> NDArray[complex128 | float64] | complex128 | float64: ... + +@overload +def angle(z: _ComplexLike_co, deg: bool = ...) -> floating: ... +@overload +def angle(z: object_, deg: bool = ...) -> Any: ... +@overload +def angle(z: _ArrayLikeComplex_co, deg: bool = ...) -> NDArray[floating]: ... +@overload +def angle(z: _ArrayLikeObject_co, deg: bool = ...) -> NDArray[object_]: ... + +@overload +def unwrap( + p: _ArrayLikeFloat_co, + discont: float | None = ..., + axis: int = ..., + *, + period: float = ..., +) -> NDArray[floating]: ... +@overload +def unwrap( + p: _ArrayLikeObject_co, + discont: float | None = ..., + axis: int = ..., + *, + period: float = ..., +) -> NDArray[object_]: ... + +def sort_complex(a: ArrayLike) -> NDArray[complexfloating]: ... + +def trim_zeros( + filt: _TrimZerosSequence[_T], + trim: L["f", "b", "fb", "bf"] = ..., +) -> _T: ... + +@overload +def extract(condition: ArrayLike, arr: _ArrayLike[_ScalarT]) -> NDArray[_ScalarT]: ... +@overload +def extract(condition: ArrayLike, arr: ArrayLike) -> NDArray[Any]: ... + +def place(arr: NDArray[Any], mask: ArrayLike, vals: Any) -> None: ... + +@overload +def cov( + m: _ArrayLikeFloat_co, + y: _ArrayLikeFloat_co | None = ..., + rowvar: bool = ..., + bias: bool = ..., + ddof: SupportsIndex | SupportsInt | None = ..., + fweights: ArrayLike | None = ..., + aweights: ArrayLike | None = ..., + *, + dtype: None = ..., +) -> NDArray[floating]: ... +@overload +def cov( + m: _ArrayLikeComplex_co, + y: _ArrayLikeComplex_co | None = ..., + rowvar: bool = ..., + bias: bool = ..., + ddof: SupportsIndex | SupportsInt | None = ..., + fweights: ArrayLike | None = ..., + aweights: ArrayLike | None = ..., + *, + dtype: None = ..., +) -> NDArray[complexfloating]: ... +@overload +def cov( + m: _ArrayLikeComplex_co, + y: _ArrayLikeComplex_co | None = ..., + rowvar: bool = ..., + bias: bool = ..., + ddof: SupportsIndex | SupportsInt | None = ..., + fweights: ArrayLike | None = ..., + aweights: ArrayLike | None = ..., + *, + dtype: _DTypeLike[_ScalarT], +) -> NDArray[_ScalarT]: ... +@overload +def cov( + m: _ArrayLikeComplex_co, + y: _ArrayLikeComplex_co | None = ..., + rowvar: bool = ..., + bias: bool = ..., + ddof: SupportsIndex | SupportsInt | None = ..., + fweights: ArrayLike | None = ..., + aweights: ArrayLike | None = ..., + *, + dtype: DTypeLike, +) -> NDArray[Any]: ... + +# NOTE `bias` and `ddof` are deprecated and ignored +@overload +def corrcoef( + m: _ArrayLikeFloat_co, + y: _ArrayLikeFloat_co | None = None, + rowvar: bool = True, + bias: _NoValueType = ..., + ddof: _NoValueType = ..., + *, + dtype: None = None, +) -> NDArray[floating]: ... +@overload +def corrcoef( + m: _ArrayLikeComplex_co, + y: _ArrayLikeComplex_co | None = None, + rowvar: bool = True, + bias: _NoValueType = ..., + ddof: _NoValueType = ..., + *, + dtype: None = None, +) -> NDArray[complexfloating]: ... +@overload +def corrcoef( + m: _ArrayLikeComplex_co, + y: _ArrayLikeComplex_co | None = None, + rowvar: bool = True, + bias: _NoValueType = ..., + ddof: _NoValueType = ..., + *, + dtype: _DTypeLike[_ScalarT], +) -> NDArray[_ScalarT]: ... +@overload +def corrcoef( + m: _ArrayLikeComplex_co, + y: _ArrayLikeComplex_co | None = None, + rowvar: bool = True, + bias: _NoValueType = ..., + ddof: _NoValueType = ..., + *, + dtype: DTypeLike | None = None, +) -> NDArray[Any]: ... + +def blackman(M: _FloatLike_co) -> NDArray[floating]: ... + +def bartlett(M: _FloatLike_co) -> NDArray[floating]: ... + +def hanning(M: _FloatLike_co) -> NDArray[floating]: ... + +def hamming(M: _FloatLike_co) -> NDArray[floating]: ... + +def i0(x: _ArrayLikeFloat_co) -> NDArray[floating]: ... + +def kaiser( + M: _FloatLike_co, + beta: _FloatLike_co, +) -> NDArray[floating]: ... + +@overload +def sinc(x: _FloatLike_co) -> floating: ... +@overload +def sinc(x: _ComplexLike_co) -> complexfloating: ... +@overload +def sinc(x: _ArrayLikeFloat_co) -> NDArray[floating]: ... +@overload +def sinc(x: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ... + +@overload +def median( + a: _ArrayLikeFloat_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + keepdims: L[False] = ..., +) -> floating: ... +@overload +def median( + a: _ArrayLikeComplex_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + keepdims: L[False] = ..., +) -> complexfloating: ... +@overload +def median( + a: _ArrayLikeTD64_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + keepdims: L[False] = ..., +) -> timedelta64: ... +@overload +def median( + a: _ArrayLikeObject_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + keepdims: L[False] = ..., +) -> Any: ... +@overload +def median( + a: _ArrayLikeFloat_co | _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, + axis: _ShapeLike | None = ..., + out: None = ..., + overwrite_input: bool = ..., + keepdims: bool = ..., +) -> Any: ... +@overload +def median( + a: _ArrayLikeFloat_co | _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, + axis: _ShapeLike | None, + out: _ArrayT, + overwrite_input: bool = ..., + keepdims: bool = ..., +) -> _ArrayT: ... +@overload +def median( + a: _ArrayLikeFloat_co | _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, + axis: _ShapeLike | None = ..., + *, + out: _ArrayT, + overwrite_input: bool = ..., + keepdims: bool = ..., +) -> _ArrayT: ... + +_MethodKind = L[ + "inverted_cdf", + "averaged_inverted_cdf", + "closest_observation", + "interpolated_inverted_cdf", + "hazen", + "weibull", + "linear", + "median_unbiased", + "normal_unbiased", + "lower", + "higher", + "midpoint", + "nearest", +] + +@overload +def percentile( + a: _ArrayLikeFloat_co, + q: _FloatLike_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + method: _MethodKind = ..., + keepdims: L[False] = ..., + *, + weights: _ArrayLikeFloat_co | None = ..., +) -> floating: ... +@overload +def percentile( + a: _ArrayLikeComplex_co, + q: _FloatLike_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + method: _MethodKind = ..., + keepdims: L[False] = ..., + *, + weights: _ArrayLikeFloat_co | None = ..., +) -> complexfloating: ... +@overload +def percentile( + a: _ArrayLikeTD64_co, + q: _FloatLike_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + method: _MethodKind = ..., + keepdims: L[False] = ..., + *, + weights: _ArrayLikeFloat_co | None = ..., +) -> timedelta64: ... +@overload +def percentile( + a: _ArrayLikeDT64_co, + q: _FloatLike_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + method: _MethodKind = ..., + keepdims: L[False] = ..., + *, + weights: _ArrayLikeFloat_co | None = ..., +) -> datetime64: ... +@overload +def percentile( + a: _ArrayLikeObject_co, + q: _FloatLike_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + method: _MethodKind = ..., + keepdims: L[False] = ..., + *, + weights: _ArrayLikeFloat_co | None = ..., +) -> Any: ... +@overload +def percentile( + a: _ArrayLikeFloat_co, + q: _ArrayLikeFloat_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + method: _MethodKind = ..., + keepdims: L[False] = ..., + *, + weights: _ArrayLikeFloat_co | None = ..., +) -> NDArray[floating]: ... +@overload +def percentile( + a: _ArrayLikeComplex_co, + q: _ArrayLikeFloat_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + method: _MethodKind = ..., + keepdims: L[False] = ..., + *, + weights: _ArrayLikeFloat_co | None = ..., +) -> NDArray[complexfloating]: ... +@overload +def percentile( + a: _ArrayLikeTD64_co, + q: _ArrayLikeFloat_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + method: _MethodKind = ..., + keepdims: L[False] = ..., + *, + weights: _ArrayLikeFloat_co | None = ..., +) -> NDArray[timedelta64]: ... +@overload +def percentile( + a: _ArrayLikeDT64_co, + q: _ArrayLikeFloat_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + method: _MethodKind = ..., + keepdims: L[False] = ..., + *, + weights: _ArrayLikeFloat_co | None = ..., +) -> NDArray[datetime64]: ... +@overload +def percentile( + a: _ArrayLikeObject_co, + q: _ArrayLikeFloat_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + method: _MethodKind = ..., + keepdims: L[False] = ..., + *, + weights: _ArrayLikeFloat_co | None = ..., +) -> NDArray[object_]: ... +@overload +def percentile( + a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeDT64_co | _ArrayLikeObject_co, + q: _ArrayLikeFloat_co, + axis: _ShapeLike | None = ..., + out: None = ..., + overwrite_input: bool = ..., + method: _MethodKind = ..., + keepdims: bool = ..., + *, + weights: _ArrayLikeFloat_co | None = ..., +) -> Any: ... +@overload +def percentile( + a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeDT64_co | _ArrayLikeObject_co, + q: _ArrayLikeFloat_co, + axis: _ShapeLike | None, + out: _ArrayT, + overwrite_input: bool = ..., + method: _MethodKind = ..., + keepdims: bool = ..., + *, + weights: _ArrayLikeFloat_co | None = ..., +) -> _ArrayT: ... +@overload +def percentile( + a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeDT64_co | _ArrayLikeObject_co, + q: _ArrayLikeFloat_co, + axis: _ShapeLike | None = ..., + *, + out: _ArrayT, + overwrite_input: bool = ..., + method: _MethodKind = ..., + keepdims: bool = ..., + weights: _ArrayLikeFloat_co | None = ..., +) -> _ArrayT: ... + +# NOTE: Not an alias, but they do have identical signatures +# (that we can reuse) +quantile = percentile + +_ScalarT_fm = TypeVar( + "_ScalarT_fm", + bound=floating | complexfloating | timedelta64, +) + +class _SupportsRMulFloat(Protocol[_T_co]): + def __rmul__(self, other: float, /) -> _T_co: ... + +@overload +def trapezoid( # type: ignore[overload-overlap] + y: Sequence[_FloatLike_co], + x: Sequence[_FloatLike_co] | None = ..., + dx: float = ..., + axis: SupportsIndex = ..., +) -> float64: ... +@overload +def trapezoid( + y: Sequence[_ComplexLike_co], + x: Sequence[_ComplexLike_co] | None = ..., + dx: float = ..., + axis: SupportsIndex = ..., +) -> complex128: ... +@overload +def trapezoid( + y: _ArrayLike[bool_ | integer], + x: _ArrayLike[bool_ | integer] | None = ..., + dx: float = ..., + axis: SupportsIndex = ..., +) -> float64 | NDArray[float64]: ... +@overload +def trapezoid( # type: ignore[overload-overlap] + y: _ArrayLikeObject_co, + x: _ArrayLikeFloat_co | _ArrayLikeObject_co | None = ..., + dx: float = ..., + axis: SupportsIndex = ..., +) -> float | NDArray[object_]: ... +@overload +def trapezoid( + y: _ArrayLike[_ScalarT_fm], + x: _ArrayLike[_ScalarT_fm] | _ArrayLikeInt_co | None = ..., + dx: float = ..., + axis: SupportsIndex = ..., +) -> _ScalarT_fm | NDArray[_ScalarT_fm]: ... +@overload +def trapezoid( + y: Sequence[_SupportsRMulFloat[_T]], + x: Sequence[_SupportsRMulFloat[_T] | _T] | None = ..., + dx: float = ..., + axis: SupportsIndex = ..., +) -> _T: ... +@overload +def trapezoid( + y: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, + x: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co | None = ..., + dx: float = ..., + axis: SupportsIndex = ..., +) -> ( + floating | complexfloating | timedelta64 + | NDArray[floating | complexfloating | timedelta64 | object_] +): ... + +@deprecated("Use 'trapezoid' instead") +def trapz(y: ArrayLike, x: ArrayLike | None = None, dx: float = 1.0, axis: int = -1) -> generic | NDArray[generic]: ... + +@overload +def meshgrid( + *, + copy: bool = ..., + sparse: bool = ..., + indexing: _MeshgridIdx = ..., +) -> tuple[()]: ... +@overload +def meshgrid( + x1: _ArrayLike[_ScalarT], + /, + *, + copy: bool = ..., + sparse: bool = ..., + indexing: _MeshgridIdx = ..., +) -> tuple[NDArray[_ScalarT]]: ... +@overload +def meshgrid( + x1: ArrayLike, + /, + *, + copy: bool = ..., + sparse: bool = ..., + indexing: _MeshgridIdx = ..., +) -> tuple[NDArray[Any]]: ... +@overload +def meshgrid( + x1: _ArrayLike[_ScalarT1], + x2: _ArrayLike[_ScalarT2], + /, + *, + copy: bool = ..., + sparse: bool = ..., + indexing: _MeshgridIdx = ..., +) -> tuple[NDArray[_ScalarT1], NDArray[_ScalarT2]]: ... +@overload +def meshgrid( + x1: ArrayLike, + x2: _ArrayLike[_ScalarT], + /, + *, + copy: bool = ..., + sparse: bool = ..., + indexing: _MeshgridIdx = ..., +) -> tuple[NDArray[Any], NDArray[_ScalarT]]: ... +@overload +def meshgrid( + x1: _ArrayLike[_ScalarT], + x2: ArrayLike, + /, + *, + copy: bool = ..., + sparse: bool = ..., + indexing: _MeshgridIdx = ..., +) -> tuple[NDArray[_ScalarT], NDArray[Any]]: ... +@overload +def meshgrid( + x1: ArrayLike, + x2: ArrayLike, + /, + *, + copy: bool = ..., + sparse: bool = ..., + indexing: _MeshgridIdx = ..., +) -> tuple[NDArray[Any], NDArray[Any]]: ... +@overload +def meshgrid( + x1: ArrayLike, + x2: ArrayLike, + x3: ArrayLike, + /, + *, + copy: bool = ..., + sparse: bool = ..., + indexing: _MeshgridIdx = ..., +) -> tuple[NDArray[Any], NDArray[Any], NDArray[Any]]: ... +@overload +def meshgrid( + x1: ArrayLike, + x2: ArrayLike, + x3: ArrayLike, + x4: ArrayLike, + /, + *, + copy: bool = ..., + sparse: bool = ..., + indexing: _MeshgridIdx = ..., +) -> tuple[NDArray[Any], NDArray[Any], NDArray[Any], NDArray[Any]]: ... +@overload +def meshgrid( + *xi: ArrayLike, + copy: bool = ..., + sparse: bool = ..., + indexing: _MeshgridIdx = ..., +) -> tuple[NDArray[Any], ...]: ... + +@overload +def delete( + arr: _ArrayLike[_ScalarT], + obj: slice | _ArrayLikeInt_co, + axis: SupportsIndex | None = ..., +) -> NDArray[_ScalarT]: ... +@overload +def delete( + arr: ArrayLike, + obj: slice | _ArrayLikeInt_co, + axis: SupportsIndex | None = ..., +) -> NDArray[Any]: ... + +@overload +def insert( + arr: _ArrayLike[_ScalarT], + obj: slice | _ArrayLikeInt_co, + values: ArrayLike, + axis: SupportsIndex | None = ..., +) -> NDArray[_ScalarT]: ... +@overload +def insert( + arr: ArrayLike, + obj: slice | _ArrayLikeInt_co, + values: ArrayLike, + axis: SupportsIndex | None = ..., +) -> NDArray[Any]: ... + +def append( + arr: ArrayLike, + values: ArrayLike, + axis: SupportsIndex | None = ..., +) -> NDArray[Any]: ... + +@overload +def digitize( + x: _FloatLike_co, + bins: _ArrayLikeFloat_co, + right: bool = ..., +) -> intp: ... +@overload +def digitize( + x: _ArrayLikeFloat_co, + bins: _ArrayLikeFloat_co, + right: bool = ..., +) -> NDArray[intp]: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/_histograms_impl.py b/.venv/lib/python3.12/site-packages/numpy/lib/_histograms_impl.py new file mode 100644 index 0000000..b4aacd0 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/lib/_histograms_impl.py @@ -0,0 +1,1085 @@ +""" +Histogram-related functions +""" +import contextlib +import functools +import operator +import warnings + +import numpy as np +from numpy._core import overrides + +__all__ = ['histogram', 'histogramdd', 'histogram_bin_edges'] + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy') + +# range is a keyword argument to many functions, so save the builtin so they can +# use it. +_range = range + + +def _ptp(x): + """Peak-to-peak value of x. + + This implementation avoids the problem of signed integer arrays having a + peak-to-peak value that cannot be represented with the array's data type. + This function returns an unsigned value for signed integer arrays. + """ + return _unsigned_subtract(x.max(), x.min()) + + +def _hist_bin_sqrt(x, range): + """ + Square root histogram bin estimator. + + Bin width is inversely proportional to the data size. Used by many + programs for its simplicity. + + Parameters + ---------- + x : array_like + Input data that is to be histogrammed, trimmed to range. May not + be empty. + + Returns + ------- + h : An estimate of the optimal bin width for the given data. + """ + del range # unused + return _ptp(x) / np.sqrt(x.size) + + +def _hist_bin_sturges(x, range): + """ + Sturges histogram bin estimator. + + A very simplistic estimator based on the assumption of normality of + the data. This estimator has poor performance for non-normal data, + which becomes especially obvious for large data sets. The estimate + depends only on size of the data. + + Parameters + ---------- + x : array_like + Input data that is to be histogrammed, trimmed to range. May not + be empty. + + Returns + ------- + h : An estimate of the optimal bin width for the given data. + """ + del range # unused + return _ptp(x) / (np.log2(x.size) + 1.0) + + +def _hist_bin_rice(x, range): + """ + Rice histogram bin estimator. + + Another simple estimator with no normality assumption. It has better + performance for large data than Sturges, but tends to overestimate + the number of bins. The number of bins is proportional to the cube + root of data size (asymptotically optimal). The estimate depends + only on size of the data. + + Parameters + ---------- + x : array_like + Input data that is to be histogrammed, trimmed to range. May not + be empty. + + Returns + ------- + h : An estimate of the optimal bin width for the given data. + """ + del range # unused + return _ptp(x) / (2.0 * x.size ** (1.0 / 3)) + + +def _hist_bin_scott(x, range): + """ + Scott histogram bin estimator. + + The binwidth is proportional to the standard deviation of the data + and inversely proportional to the cube root of data size + (asymptotically optimal). + + Parameters + ---------- + x : array_like + Input data that is to be histogrammed, trimmed to range. May not + be empty. + + Returns + ------- + h : An estimate of the optimal bin width for the given data. + """ + del range # unused + return (24.0 * np.pi**0.5 / x.size)**(1.0 / 3.0) * np.std(x) + + +def _hist_bin_stone(x, range): + """ + Histogram bin estimator based on minimizing the estimated integrated squared error (ISE). + + The number of bins is chosen by minimizing the estimated ISE against the unknown + true distribution. The ISE is estimated using cross-validation and can be regarded + as a generalization of Scott's rule. + https://en.wikipedia.org/wiki/Histogram#Scott.27s_normal_reference_rule + + This paper by Stone appears to be the origination of this rule. + https://digitalassets.lib.berkeley.edu/sdtr/ucb/text/34.pdf + + Parameters + ---------- + x : array_like + Input data that is to be histogrammed, trimmed to range. May not + be empty. + range : (float, float) + The lower and upper range of the bins. + + Returns + ------- + h : An estimate of the optimal bin width for the given data. + """ # noqa: E501 + + n = x.size + ptp_x = _ptp(x) + if n <= 1 or ptp_x == 0: + return 0 + + def jhat(nbins): + hh = ptp_x / nbins + p_k = np.histogram(x, bins=nbins, range=range)[0] / n + return (2 - (n + 1) * p_k.dot(p_k)) / hh + + nbins_upper_bound = max(100, int(np.sqrt(n))) + nbins = min(_range(1, nbins_upper_bound + 1), key=jhat) + if nbins == nbins_upper_bound: + warnings.warn("The number of bins estimated may be suboptimal.", + RuntimeWarning, stacklevel=3) + return ptp_x / nbins + + +def _hist_bin_doane(x, range): + """ + Doane's histogram bin estimator. + + Improved version of Sturges' formula which works better for + non-normal data. See + stats.stackexchange.com/questions/55134/doanes-formula-for-histogram-binning + + Parameters + ---------- + x : array_like + Input data that is to be histogrammed, trimmed to range. May not + be empty. + + Returns + ------- + h : An estimate of the optimal bin width for the given data. + """ + del range # unused + if x.size > 2: + sg1 = np.sqrt(6.0 * (x.size - 2) / ((x.size + 1.0) * (x.size + 3))) + sigma = np.std(x) + if sigma > 0.0: + # These three operations add up to + # g1 = np.mean(((x - np.mean(x)) / sigma)**3) + # but use only one temp array instead of three + temp = x - np.mean(x) + np.true_divide(temp, sigma, temp) + np.power(temp, 3, temp) + g1 = np.mean(temp) + return _ptp(x) / (1.0 + np.log2(x.size) + + np.log2(1.0 + np.absolute(g1) / sg1)) + return 0.0 + + +def _hist_bin_fd(x, range): + """ + The Freedman-Diaconis histogram bin estimator. + + The Freedman-Diaconis rule uses interquartile range (IQR) to + estimate binwidth. It is considered a variation of the Scott rule + with more robustness as the IQR is less affected by outliers than + the standard deviation. However, the IQR depends on fewer points + than the standard deviation, so it is less accurate, especially for + long tailed distributions. + + If the IQR is 0, this function returns 0 for the bin width. + Binwidth is inversely proportional to the cube root of data size + (asymptotically optimal). + + Parameters + ---------- + x : array_like + Input data that is to be histogrammed, trimmed to range. May not + be empty. + + Returns + ------- + h : An estimate of the optimal bin width for the given data. + """ + del range # unused + iqr = np.subtract(*np.percentile(x, [75, 25])) + return 2.0 * iqr * x.size ** (-1.0 / 3.0) + + +def _hist_bin_auto(x, range): + """ + Histogram bin estimator that uses the minimum width of a relaxed + Freedman-Diaconis and Sturges estimators if the FD bin width does + not result in a large number of bins. The relaxed Freedman-Diaconis estimator + limits the bin width to half the sqrt estimated to avoid small bins. + + The FD estimator is usually the most robust method, but its width + estimate tends to be too large for small `x` and bad for data with limited + variance. The Sturges estimator is quite good for small (<1000) datasets + and is the default in the R language. This method gives good off-the-shelf + behaviour. + + + Parameters + ---------- + x : array_like + Input data that is to be histogrammed, trimmed to range. May not + be empty. + range : Tuple with range for the histogram + + Returns + ------- + h : An estimate of the optimal bin width for the given data. + + See Also + -------- + _hist_bin_fd, _hist_bin_sturges + """ + fd_bw = _hist_bin_fd(x, range) + sturges_bw = _hist_bin_sturges(x, range) + sqrt_bw = _hist_bin_sqrt(x, range) + # heuristic to limit the maximal number of bins + fd_bw_corrected = max(fd_bw, sqrt_bw / 2) + return min(fd_bw_corrected, sturges_bw) + + +# Private dict initialized at module load time +_hist_bin_selectors = {'stone': _hist_bin_stone, + 'auto': _hist_bin_auto, + 'doane': _hist_bin_doane, + 'fd': _hist_bin_fd, + 'rice': _hist_bin_rice, + 'scott': _hist_bin_scott, + 'sqrt': _hist_bin_sqrt, + 'sturges': _hist_bin_sturges} + + +def _ravel_and_check_weights(a, weights): + """ Check a and weights have matching shapes, and ravel both """ + a = np.asarray(a) + + # Ensure that the array is a "subtractable" dtype + if a.dtype == np.bool: + msg = f"Converting input from {a.dtype} to {np.uint8} for compatibility." + warnings.warn(msg, RuntimeWarning, stacklevel=3) + a = a.astype(np.uint8) + + if weights is not None: + weights = np.asarray(weights) + if weights.shape != a.shape: + raise ValueError( + 'weights should have the same shape as a.') + weights = weights.ravel() + a = a.ravel() + return a, weights + + +def _get_outer_edges(a, range): + """ + Determine the outer bin edges to use, from either the data or the range + argument + """ + if range is not None: + first_edge, last_edge = range + if first_edge > last_edge: + raise ValueError( + 'max must be larger than min in range parameter.') + if not (np.isfinite(first_edge) and np.isfinite(last_edge)): + raise ValueError( + f"supplied range of [{first_edge}, {last_edge}] is not finite") + elif a.size == 0: + # handle empty arrays. Can't determine range, so use 0-1. + first_edge, last_edge = 0, 1 + else: + first_edge, last_edge = a.min(), a.max() + if not (np.isfinite(first_edge) and np.isfinite(last_edge)): + raise ValueError( + f"autodetected range of [{first_edge}, {last_edge}] is not finite") + + # expand empty range to avoid divide by zero + if first_edge == last_edge: + first_edge = first_edge - 0.5 + last_edge = last_edge + 0.5 + + return first_edge, last_edge + + +def _unsigned_subtract(a, b): + """ + Subtract two values where a >= b, and produce an unsigned result + + This is needed when finding the difference between the upper and lower + bound of an int16 histogram + """ + # coerce to a single type + signed_to_unsigned = { + np.byte: np.ubyte, + np.short: np.ushort, + np.intc: np.uintc, + np.int_: np.uint, + np.longlong: np.ulonglong + } + dt = np.result_type(a, b) + try: + unsigned_dt = signed_to_unsigned[dt.type] + except KeyError: + return np.subtract(a, b, dtype=dt) + else: + # we know the inputs are integers, and we are deliberately casting + # signed to unsigned. The input may be negative python integers so + # ensure we pass in arrays with the initial dtype (related to NEP 50). + return np.subtract(np.asarray(a, dtype=dt), np.asarray(b, dtype=dt), + casting='unsafe', dtype=unsigned_dt) + + +def _get_bin_edges(a, bins, range, weights): + """ + Computes the bins used internally by `histogram`. + + Parameters + ========== + a : ndarray + Ravelled data array + bins, range + Forwarded arguments from `histogram`. + weights : ndarray, optional + Ravelled weights array, or None + + Returns + ======= + bin_edges : ndarray + Array of bin edges + uniform_bins : (Number, Number, int): + The upper bound, lowerbound, and number of bins, used in the optimized + implementation of `histogram` that works on uniform bins. + """ + # parse the overloaded bins argument + n_equal_bins = None + bin_edges = None + + if isinstance(bins, str): + bin_name = bins + # if `bins` is a string for an automatic method, + # this will replace it with the number of bins calculated + if bin_name not in _hist_bin_selectors: + raise ValueError( + f"{bin_name!r} is not a valid estimator for `bins`") + if weights is not None: + raise TypeError("Automated estimation of the number of " + "bins is not supported for weighted data") + + first_edge, last_edge = _get_outer_edges(a, range) + + # truncate the range if needed + if range is not None: + keep = (a >= first_edge) + keep &= (a <= last_edge) + if not np.logical_and.reduce(keep): + a = a[keep] + + if a.size == 0: + n_equal_bins = 1 + else: + # Do not call selectors on empty arrays + width = _hist_bin_selectors[bin_name](a, (first_edge, last_edge)) + if width: + if np.issubdtype(a.dtype, np.integer) and width < 1: + width = 1 + delta = _unsigned_subtract(last_edge, first_edge) + n_equal_bins = int(np.ceil(delta / width)) + else: + # Width can be zero for some estimators, e.g. FD when + # the IQR of the data is zero. + n_equal_bins = 1 + + elif np.ndim(bins) == 0: + try: + n_equal_bins = operator.index(bins) + except TypeError as e: + raise TypeError( + '`bins` must be an integer, a string, or an array') from e + if n_equal_bins < 1: + raise ValueError('`bins` must be positive, when an integer') + + first_edge, last_edge = _get_outer_edges(a, range) + + elif np.ndim(bins) == 1: + bin_edges = np.asarray(bins) + if np.any(bin_edges[:-1] > bin_edges[1:]): + raise ValueError( + '`bins` must increase monotonically, when an array') + + else: + raise ValueError('`bins` must be 1d, when an array') + + if n_equal_bins is not None: + # gh-10322 means that type resolution rules are dependent on array + # shapes. To avoid this causing problems, we pick a type now and stick + # with it throughout. + bin_type = np.result_type(first_edge, last_edge, a) + if np.issubdtype(bin_type, np.integer): + bin_type = np.result_type(bin_type, float) + + # bin edges must be computed + bin_edges = np.linspace( + first_edge, last_edge, n_equal_bins + 1, + endpoint=True, dtype=bin_type) + if np.any(bin_edges[:-1] >= bin_edges[1:]): + raise ValueError( + f'Too many bins for data range. Cannot create {n_equal_bins} ' + f'finite-sized bins.') + return bin_edges, (first_edge, last_edge, n_equal_bins) + else: + return bin_edges, None + + +def _search_sorted_inclusive(a, v): + """ + Like `searchsorted`, but where the last item in `v` is placed on the right. + + In the context of a histogram, this makes the last bin edge inclusive + """ + return np.concatenate(( + a.searchsorted(v[:-1], 'left'), + a.searchsorted(v[-1:], 'right') + )) + + +def _histogram_bin_edges_dispatcher(a, bins=None, range=None, weights=None): + return (a, bins, weights) + + +@array_function_dispatch(_histogram_bin_edges_dispatcher) +def histogram_bin_edges(a, bins=10, range=None, weights=None): + r""" + Function to calculate only the edges of the bins used by the `histogram` + function. + + Parameters + ---------- + a : array_like + Input data. The histogram is computed over the flattened array. + bins : int or sequence of scalars or str, optional + If `bins` is an int, it defines the number of equal-width + bins in the given range (10, by default). If `bins` is a + sequence, it defines the bin edges, including the rightmost + edge, allowing for non-uniform bin widths. + + If `bins` is a string from the list below, `histogram_bin_edges` will + use the method chosen to calculate the optimal bin width and + consequently the number of bins (see the Notes section for more detail + on the estimators) from the data that falls within the requested range. + While the bin width will be optimal for the actual data + in the range, the number of bins will be computed to fill the + entire range, including the empty portions. For visualisation, + using the 'auto' option is suggested. Weighted data is not + supported for automated bin size selection. + + 'auto' + Minimum bin width between the 'sturges' and 'fd' estimators. + Provides good all-around performance. + + 'fd' (Freedman Diaconis Estimator) + Robust (resilient to outliers) estimator that takes into + account data variability and data size. + + 'doane' + An improved version of Sturges' estimator that works better + with non-normal datasets. + + 'scott' + Less robust estimator that takes into account data variability + and data size. + + 'stone' + Estimator based on leave-one-out cross-validation estimate of + the integrated squared error. Can be regarded as a generalization + of Scott's rule. + + 'rice' + Estimator does not take variability into account, only data + size. Commonly overestimates number of bins required. + + 'sturges' + R's default method, only accounts for data size. Only + optimal for gaussian data and underestimates number of bins + for large non-gaussian datasets. + + 'sqrt' + Square root (of data size) estimator, used by Excel and + other programs for its speed and simplicity. + + range : (float, float), optional + The lower and upper range of the bins. If not provided, range + is simply ``(a.min(), a.max())``. Values outside the range are + ignored. The first element of the range must be less than or + equal to the second. `range` affects the automatic bin + computation as well. While bin width is computed to be optimal + based on the actual data within `range`, the bin count will fill + the entire range including portions containing no data. + + weights : array_like, optional + An array of weights, of the same shape as `a`. Each value in + `a` only contributes its associated weight towards the bin count + (instead of 1). This is currently not used by any of the bin estimators, + but may be in the future. + + Returns + ------- + bin_edges : array of dtype float + The edges to pass into `histogram` + + See Also + -------- + histogram + + Notes + ----- + The methods to estimate the optimal number of bins are well founded + in literature, and are inspired by the choices R provides for + histogram visualisation. Note that having the number of bins + proportional to :math:`n^{1/3}` is asymptotically optimal, which is + why it appears in most estimators. These are simply plug-in methods + that give good starting points for number of bins. In the equations + below, :math:`h` is the binwidth and :math:`n_h` is the number of + bins. All estimators that compute bin counts are recast to bin width + using the `ptp` of the data. The final bin count is obtained from + ``np.round(np.ceil(range / h))``. The final bin width is often less + than what is returned by the estimators below. + + 'auto' (minimum bin width of the 'sturges' and 'fd' estimators) + A compromise to get a good value. For small datasets the Sturges + value will usually be chosen, while larger datasets will usually + default to FD. Avoids the overly conservative behaviour of FD + and Sturges for small and large datasets respectively. + Switchover point is usually :math:`a.size \approx 1000`. + + 'fd' (Freedman Diaconis Estimator) + .. math:: h = 2 \frac{IQR}{n^{1/3}} + + The binwidth is proportional to the interquartile range (IQR) + and inversely proportional to cube root of a.size. Can be too + conservative for small datasets, but is quite good for large + datasets. The IQR is very robust to outliers. + + 'scott' + .. math:: h = \sigma \sqrt[3]{\frac{24 \sqrt{\pi}}{n}} + + The binwidth is proportional to the standard deviation of the + data and inversely proportional to cube root of ``x.size``. Can + be too conservative for small datasets, but is quite good for + large datasets. The standard deviation is not very robust to + outliers. Values are very similar to the Freedman-Diaconis + estimator in the absence of outliers. + + 'rice' + .. math:: n_h = 2n^{1/3} + + The number of bins is only proportional to cube root of + ``a.size``. It tends to overestimate the number of bins and it + does not take into account data variability. + + 'sturges' + .. math:: n_h = \log _{2}(n) + 1 + + The number of bins is the base 2 log of ``a.size``. This + estimator assumes normality of data and is too conservative for + larger, non-normal datasets. This is the default method in R's + ``hist`` method. + + 'doane' + .. math:: n_h = 1 + \log_{2}(n) + + \log_{2}\left(1 + \frac{|g_1|}{\sigma_{g_1}}\right) + + g_1 = mean\left[\left(\frac{x - \mu}{\sigma}\right)^3\right] + + \sigma_{g_1} = \sqrt{\frac{6(n - 2)}{(n + 1)(n + 3)}} + + An improved version of Sturges' formula that produces better + estimates for non-normal datasets. This estimator attempts to + account for the skew of the data. + + 'sqrt' + .. math:: n_h = \sqrt n + + The simplest and fastest estimator. Only takes into account the + data size. + + Additionally, if the data is of integer dtype, then the binwidth will never + be less than 1. + + Examples + -------- + >>> import numpy as np + >>> arr = np.array([0, 0, 0, 1, 2, 3, 3, 4, 5]) + >>> np.histogram_bin_edges(arr, bins='auto', range=(0, 1)) + array([0. , 0.25, 0.5 , 0.75, 1. ]) + >>> np.histogram_bin_edges(arr, bins=2) + array([0. , 2.5, 5. ]) + + For consistency with histogram, an array of pre-computed bins is + passed through unmodified: + + >>> np.histogram_bin_edges(arr, [1, 2]) + array([1, 2]) + + This function allows one set of bins to be computed, and reused across + multiple histograms: + + >>> shared_bins = np.histogram_bin_edges(arr, bins='auto') + >>> shared_bins + array([0., 1., 2., 3., 4., 5.]) + + >>> group_id = np.array([0, 1, 1, 0, 1, 1, 0, 1, 1]) + >>> hist_0, _ = np.histogram(arr[group_id == 0], bins=shared_bins) + >>> hist_1, _ = np.histogram(arr[group_id == 1], bins=shared_bins) + + >>> hist_0; hist_1 + array([1, 1, 0, 1, 0]) + array([2, 0, 1, 1, 2]) + + Which gives more easily comparable results than using separate bins for + each histogram: + + >>> hist_0, bins_0 = np.histogram(arr[group_id == 0], bins='auto') + >>> hist_1, bins_1 = np.histogram(arr[group_id == 1], bins='auto') + >>> hist_0; hist_1 + array([1, 1, 1]) + array([2, 1, 1, 2]) + >>> bins_0; bins_1 + array([0., 1., 2., 3.]) + array([0. , 1.25, 2.5 , 3.75, 5. ]) + + """ + a, weights = _ravel_and_check_weights(a, weights) + bin_edges, _ = _get_bin_edges(a, bins, range, weights) + return bin_edges + + +def _histogram_dispatcher( + a, bins=None, range=None, density=None, weights=None): + return (a, bins, weights) + + +@array_function_dispatch(_histogram_dispatcher) +def histogram(a, bins=10, range=None, density=None, weights=None): + r""" + Compute the histogram of a dataset. + + Parameters + ---------- + a : array_like + Input data. The histogram is computed over the flattened array. + bins : int or sequence of scalars or str, optional + If `bins` is an int, it defines the number of equal-width + bins in the given range (10, by default). If `bins` is a + sequence, it defines a monotonically increasing array of bin edges, + including the rightmost edge, allowing for non-uniform bin widths. + + If `bins` is a string, it defines the method used to calculate the + optimal bin width, as defined by `histogram_bin_edges`. + + range : (float, float), optional + The lower and upper range of the bins. If not provided, range + is simply ``(a.min(), a.max())``. Values outside the range are + ignored. The first element of the range must be less than or + equal to the second. `range` affects the automatic bin + computation as well. While bin width is computed to be optimal + based on the actual data within `range`, the bin count will fill + the entire range including portions containing no data. + weights : array_like, optional + An array of weights, of the same shape as `a`. Each value in + `a` only contributes its associated weight towards the bin count + (instead of 1). If `density` is True, the weights are + normalized, so that the integral of the density over the range + remains 1. + Please note that the ``dtype`` of `weights` will also become the + ``dtype`` of the returned accumulator (`hist`), so it must be + large enough to hold accumulated values as well. + density : bool, optional + If ``False``, the result will contain the number of samples in + each bin. If ``True``, the result is the value of the + probability *density* function at the bin, normalized such that + the *integral* over the range is 1. Note that the sum of the + histogram values will not be equal to 1 unless bins of unity + width are chosen; it is not a probability *mass* function. + + Returns + ------- + hist : array + The values of the histogram. See `density` and `weights` for a + description of the possible semantics. If `weights` are given, + ``hist.dtype`` will be taken from `weights`. + bin_edges : array of dtype float + Return the bin edges ``(length(hist)+1)``. + + + See Also + -------- + histogramdd, bincount, searchsorted, digitize, histogram_bin_edges + + Notes + ----- + All but the last (righthand-most) bin is half-open. In other words, + if `bins` is:: + + [1, 2, 3, 4] + + then the first bin is ``[1, 2)`` (including 1, but excluding 2) and + the second ``[2, 3)``. The last bin, however, is ``[3, 4]``, which + *includes* 4. + + + Examples + -------- + >>> import numpy as np + >>> np.histogram([1, 2, 1], bins=[0, 1, 2, 3]) + (array([0, 2, 1]), array([0, 1, 2, 3])) + >>> np.histogram(np.arange(4), bins=np.arange(5), density=True) + (array([0.25, 0.25, 0.25, 0.25]), array([0, 1, 2, 3, 4])) + >>> np.histogram([[1, 2, 1], [1, 0, 1]], bins=[0,1,2,3]) + (array([1, 4, 1]), array([0, 1, 2, 3])) + + >>> a = np.arange(5) + >>> hist, bin_edges = np.histogram(a, density=True) + >>> hist + array([0.5, 0. , 0.5, 0. , 0. , 0.5, 0. , 0.5, 0. , 0.5]) + >>> hist.sum() + 2.4999999999999996 + >>> np.sum(hist * np.diff(bin_edges)) + 1.0 + + Automated Bin Selection Methods example, using 2 peak random data + with 2000 points. + + .. plot:: + :include-source: + + import matplotlib.pyplot as plt + import numpy as np + + rng = np.random.RandomState(10) # deterministic random data + a = np.hstack((rng.normal(size=1000), + rng.normal(loc=5, scale=2, size=1000))) + plt.hist(a, bins='auto') # arguments are passed to np.histogram + plt.title("Histogram with 'auto' bins") + plt.show() + + """ + a, weights = _ravel_and_check_weights(a, weights) + + bin_edges, uniform_bins = _get_bin_edges(a, bins, range, weights) + + # Histogram is an integer or a float array depending on the weights. + if weights is None: + ntype = np.dtype(np.intp) + else: + ntype = weights.dtype + + # We set a block size, as this allows us to iterate over chunks when + # computing histograms, to minimize memory usage. + BLOCK = 65536 + + # The fast path uses bincount, but that only works for certain types + # of weight + simple_weights = ( + weights is None or + np.can_cast(weights.dtype, np.double) or + np.can_cast(weights.dtype, complex) + ) + + if uniform_bins is not None and simple_weights: + # Fast algorithm for equal bins + # We now convert values of a to bin indices, under the assumption of + # equal bin widths (which is valid here). + first_edge, last_edge, n_equal_bins = uniform_bins + + # Initialize empty histogram + n = np.zeros(n_equal_bins, ntype) + + # Pre-compute histogram scaling factor + norm_numerator = n_equal_bins + norm_denom = _unsigned_subtract(last_edge, first_edge) + + # We iterate over blocks here for two reasons: the first is that for + # large arrays, it is actually faster (for example for a 10^8 array it + # is 2x as fast) and it results in a memory footprint 3x lower in the + # limit of large arrays. + for i in _range(0, len(a), BLOCK): + tmp_a = a[i:i + BLOCK] + if weights is None: + tmp_w = None + else: + tmp_w = weights[i:i + BLOCK] + + # Only include values in the right range + keep = (tmp_a >= first_edge) + keep &= (tmp_a <= last_edge) + if not np.logical_and.reduce(keep): + tmp_a = tmp_a[keep] + if tmp_w is not None: + tmp_w = tmp_w[keep] + + # This cast ensures no type promotions occur below, which gh-10322 + # make unpredictable. Getting it wrong leads to precision errors + # like gh-8123. + tmp_a = tmp_a.astype(bin_edges.dtype, copy=False) + + # Compute the bin indices, and for values that lie exactly on + # last_edge we need to subtract one + f_indices = ((_unsigned_subtract(tmp_a, first_edge) / norm_denom) + * norm_numerator) + indices = f_indices.astype(np.intp) + indices[indices == n_equal_bins] -= 1 + + # The index computation is not guaranteed to give exactly + # consistent results within ~1 ULP of the bin edges. + decrement = tmp_a < bin_edges[indices] + indices[decrement] -= 1 + # The last bin includes the right edge. The other bins do not. + increment = ((tmp_a >= bin_edges[indices + 1]) + & (indices != n_equal_bins - 1)) + indices[increment] += 1 + + # We now compute the histogram using bincount + if ntype.kind == 'c': + n.real += np.bincount(indices, weights=tmp_w.real, + minlength=n_equal_bins) + n.imag += np.bincount(indices, weights=tmp_w.imag, + minlength=n_equal_bins) + else: + n += np.bincount(indices, weights=tmp_w, + minlength=n_equal_bins).astype(ntype) + else: + # Compute via cumulative histogram + cum_n = np.zeros(bin_edges.shape, ntype) + if weights is None: + for i in _range(0, len(a), BLOCK): + sa = np.sort(a[i:i + BLOCK]) + cum_n += _search_sorted_inclusive(sa, bin_edges) + else: + zero = np.zeros(1, dtype=ntype) + for i in _range(0, len(a), BLOCK): + tmp_a = a[i:i + BLOCK] + tmp_w = weights[i:i + BLOCK] + sorting_index = np.argsort(tmp_a) + sa = tmp_a[sorting_index] + sw = tmp_w[sorting_index] + cw = np.concatenate((zero, sw.cumsum())) + bin_index = _search_sorted_inclusive(sa, bin_edges) + cum_n += cw[bin_index] + + n = np.diff(cum_n) + + if density: + db = np.array(np.diff(bin_edges), float) + return n / db / n.sum(), bin_edges + + return n, bin_edges + + +def _histogramdd_dispatcher(sample, bins=None, range=None, density=None, + weights=None): + if hasattr(sample, 'shape'): # same condition as used in histogramdd + yield sample + else: + yield from sample + with contextlib.suppress(TypeError): + yield from bins + yield weights + + +@array_function_dispatch(_histogramdd_dispatcher) +def histogramdd(sample, bins=10, range=None, density=None, weights=None): + """ + Compute the multidimensional histogram of some data. + + Parameters + ---------- + sample : (N, D) array, or (N, D) array_like + The data to be histogrammed. + + Note the unusual interpretation of sample when an array_like: + + * When an array, each row is a coordinate in a D-dimensional space - + such as ``histogramdd(np.array([p1, p2, p3]))``. + * When an array_like, each element is the list of values for single + coordinate - such as ``histogramdd((X, Y, Z))``. + + The first form should be preferred. + + bins : sequence or int, optional + The bin specification: + + * A sequence of arrays describing the monotonically increasing bin + edges along each dimension. + * The number of bins for each dimension (nx, ny, ... =bins) + * The number of bins for all dimensions (nx=ny=...=bins). + + range : sequence, optional + A sequence of length D, each an optional (lower, upper) tuple giving + the outer bin edges to be used if the edges are not given explicitly in + `bins`. + An entry of None in the sequence results in the minimum and maximum + values being used for the corresponding dimension. + The default, None, is equivalent to passing a tuple of D None values. + density : bool, optional + If False, the default, returns the number of samples in each bin. + If True, returns the probability *density* function at the bin, + ``bin_count / sample_count / bin_volume``. + weights : (N,) array_like, optional + An array of values `w_i` weighing each sample `(x_i, y_i, z_i, ...)`. + Weights are normalized to 1 if density is True. If density is False, + the values of the returned histogram are equal to the sum of the + weights belonging to the samples falling into each bin. + + Returns + ------- + H : ndarray + The multidimensional histogram of sample x. See density and weights + for the different possible semantics. + edges : tuple of ndarrays + A tuple of D arrays describing the bin edges for each dimension. + + See Also + -------- + histogram: 1-D histogram + histogram2d: 2-D histogram + + Examples + -------- + >>> import numpy as np + >>> rng = np.random.default_rng() + >>> r = rng.normal(size=(100,3)) + >>> H, edges = np.histogramdd(r, bins = (5, 8, 4)) + >>> H.shape, edges[0].size, edges[1].size, edges[2].size + ((5, 8, 4), 6, 9, 5) + + """ + + try: + # Sample is an ND-array. + N, D = sample.shape + except (AttributeError, ValueError): + # Sample is a sequence of 1D arrays. + sample = np.atleast_2d(sample).T + N, D = sample.shape + + nbin = np.empty(D, np.intp) + edges = D * [None] + dedges = D * [None] + if weights is not None: + weights = np.asarray(weights) + + try: + M = len(bins) + if M != D: + raise ValueError( + 'The dimension of bins must be equal to the dimension of the ' + 'sample x.') + except TypeError: + # bins is an integer + bins = D * [bins] + + # normalize the range argument + if range is None: + range = (None,) * D + elif len(range) != D: + raise ValueError('range argument must have one entry per dimension') + + # Create edge arrays + for i in _range(D): + if np.ndim(bins[i]) == 0: + if bins[i] < 1: + raise ValueError( + f'`bins[{i}]` must be positive, when an integer') + smin, smax = _get_outer_edges(sample[:, i], range[i]) + try: + n = operator.index(bins[i]) + + except TypeError as e: + raise TypeError( + f"`bins[{i}]` must be an integer, when a scalar" + ) from e + + edges[i] = np.linspace(smin, smax, n + 1) + elif np.ndim(bins[i]) == 1: + edges[i] = np.asarray(bins[i]) + if np.any(edges[i][:-1] > edges[i][1:]): + raise ValueError( + f'`bins[{i}]` must be monotonically increasing, when an array') + else: + raise ValueError( + f'`bins[{i}]` must be a scalar or 1d array') + + nbin[i] = len(edges[i]) + 1 # includes an outlier on each end + dedges[i] = np.diff(edges[i]) + + # Compute the bin number each sample falls into. + Ncount = tuple( + # avoid np.digitize to work around gh-11022 + np.searchsorted(edges[i], sample[:, i], side='right') + for i in _range(D) + ) + + # Using digitize, values that fall on an edge are put in the right bin. + # For the rightmost bin, we want values equal to the right edge to be + # counted in the last bin, and not as an outlier. + for i in _range(D): + # Find which points are on the rightmost edge. + on_edge = (sample[:, i] == edges[i][-1]) + # Shift these points one bin to the left. + Ncount[i][on_edge] -= 1 + + # Compute the sample indices in the flattened histogram matrix. + # This raises an error if the array is too large. + xy = np.ravel_multi_index(Ncount, nbin) + + # Compute the number of repetitions in xy and assign it to the + # flattened histmat. + hist = np.bincount(xy, weights, minlength=nbin.prod()) + + # Shape into a proper matrix + hist = hist.reshape(nbin) + + # This preserves the (bad) behavior observed in gh-7845, for now. + hist = hist.astype(float, casting='safe') + + # Remove outliers (indices 0 and -1 for each dimension). + core = D * (slice(1, -1),) + hist = hist[core] + + if density: + # calculate the probability density function + s = hist.sum() + for i in _range(D): + shape = np.ones(D, int) + shape[i] = nbin[i] - 2 + hist = hist / dedges[i].reshape(shape) + hist /= s + + if (hist.shape != nbin - 2).any(): + raise RuntimeError( + "Internal Shape Error") + return hist, edges diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/_histograms_impl.pyi b/.venv/lib/python3.12/site-packages/numpy/lib/_histograms_impl.pyi new file mode 100644 index 0000000..5e7afb5 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/lib/_histograms_impl.pyi @@ -0,0 +1,50 @@ +from collections.abc import Sequence +from typing import ( + Any, + SupportsIndex, + TypeAlias, +) +from typing import ( + Literal as L, +) + +from numpy._typing import ( + ArrayLike, + NDArray, +) + +__all__ = ["histogram", "histogramdd", "histogram_bin_edges"] + +_BinKind: TypeAlias = L[ + "stone", + "auto", + "doane", + "fd", + "rice", + "scott", + "sqrt", + "sturges", +] + +def histogram_bin_edges( + a: ArrayLike, + bins: _BinKind | SupportsIndex | ArrayLike = ..., + range: tuple[float, float] | None = ..., + weights: ArrayLike | None = ..., +) -> NDArray[Any]: ... + +def histogram( + a: ArrayLike, + bins: _BinKind | SupportsIndex | ArrayLike = ..., + range: tuple[float, float] | None = ..., + density: bool = ..., + weights: ArrayLike | None = ..., +) -> tuple[NDArray[Any], NDArray[Any]]: ... + +def histogramdd( + sample: ArrayLike, + bins: SupportsIndex | ArrayLike = ..., + range: Sequence[tuple[float, float]] = ..., + density: bool | None = ..., + weights: ArrayLike | None = ..., +) -> tuple[NDArray[Any], tuple[NDArray[Any], ...]]: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/_index_tricks_impl.py b/.venv/lib/python3.12/site-packages/numpy/lib/_index_tricks_impl.py new file mode 100644 index 0000000..131bbae --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/lib/_index_tricks_impl.py @@ -0,0 +1,1067 @@ +import functools +import math +import sys +import warnings + +import numpy as np +import numpy._core.numeric as _nx +import numpy.matrixlib as matrixlib +from numpy._core import linspace, overrides +from numpy._core.multiarray import ravel_multi_index, unravel_index +from numpy._core.numeric import ScalarType, array +from numpy._core.numerictypes import issubdtype +from numpy._utils import set_module +from numpy.lib._function_base_impl import diff +from numpy.lib.stride_tricks import as_strided + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy') + + +__all__ = [ + 'ravel_multi_index', 'unravel_index', 'mgrid', 'ogrid', 'r_', 'c_', + 's_', 'index_exp', 'ix_', 'ndenumerate', 'ndindex', 'fill_diagonal', + 'diag_indices', 'diag_indices_from' +] + + +def _ix__dispatcher(*args): + return args + + +@array_function_dispatch(_ix__dispatcher) +def ix_(*args): + """ + Construct an open mesh from multiple sequences. + + This function takes N 1-D sequences and returns N outputs with N + dimensions each, such that the shape is 1 in all but one dimension + and the dimension with the non-unit shape value cycles through all + N dimensions. + + Using `ix_` one can quickly construct index arrays that will index + the cross product. ``a[np.ix_([1,3],[2,5])]`` returns the array + ``[[a[1,2] a[1,5]], [a[3,2] a[3,5]]]``. + + Parameters + ---------- + args : 1-D sequences + Each sequence should be of integer or boolean type. + Boolean sequences will be interpreted as boolean masks for the + corresponding dimension (equivalent to passing in + ``np.nonzero(boolean_sequence)``). + + Returns + ------- + out : tuple of ndarrays + N arrays with N dimensions each, with N the number of input + sequences. Together these arrays form an open mesh. + + See Also + -------- + ogrid, mgrid, meshgrid + + Examples + -------- + >>> import numpy as np + >>> a = np.arange(10).reshape(2, 5) + >>> a + array([[0, 1, 2, 3, 4], + [5, 6, 7, 8, 9]]) + >>> ixgrid = np.ix_([0, 1], [2, 4]) + >>> ixgrid + (array([[0], + [1]]), array([[2, 4]])) + >>> ixgrid[0].shape, ixgrid[1].shape + ((2, 1), (1, 2)) + >>> a[ixgrid] + array([[2, 4], + [7, 9]]) + + >>> ixgrid = np.ix_([True, True], [2, 4]) + >>> a[ixgrid] + array([[2, 4], + [7, 9]]) + >>> ixgrid = np.ix_([True, True], [False, False, True, False, True]) + >>> a[ixgrid] + array([[2, 4], + [7, 9]]) + + """ + out = [] + nd = len(args) + for k, new in enumerate(args): + if not isinstance(new, _nx.ndarray): + new = np.asarray(new) + if new.size == 0: + # Explicitly type empty arrays to avoid float default + new = new.astype(_nx.intp) + if new.ndim != 1: + raise ValueError("Cross index must be 1 dimensional") + if issubdtype(new.dtype, _nx.bool): + new, = new.nonzero() + new = new.reshape((1,) * k + (new.size,) + (1,) * (nd - k - 1)) + out.append(new) + return tuple(out) + + +class nd_grid: + """ + Construct a multi-dimensional "meshgrid". + + ``grid = nd_grid()`` creates an instance which will return a mesh-grid + when indexed. The dimension and number of the output arrays are equal + to the number of indexing dimensions. If the step length is not a + complex number, then the stop is not inclusive. + + However, if the step length is a **complex number** (e.g. 5j), then the + integer part of its magnitude is interpreted as specifying the + number of points to create between the start and stop values, where + the stop value **is inclusive**. + + If instantiated with an argument of ``sparse=True``, the mesh-grid is + open (or not fleshed out) so that only one-dimension of each returned + argument is greater than 1. + + Parameters + ---------- + sparse : bool, optional + Whether the grid is sparse or not. Default is False. + + Notes + ----- + Two instances of `nd_grid` are made available in the NumPy namespace, + `mgrid` and `ogrid`, approximately defined as:: + + mgrid = nd_grid(sparse=False) + ogrid = nd_grid(sparse=True) + + Users should use these pre-defined instances instead of using `nd_grid` + directly. + """ + __slots__ = ('sparse',) + + def __init__(self, sparse=False): + self.sparse = sparse + + def __getitem__(self, key): + try: + size = [] + # Mimic the behavior of `np.arange` and use a data type + # which is at least as large as `np.int_` + num_list = [0] + for k in range(len(key)): + step = key[k].step + start = key[k].start + stop = key[k].stop + if start is None: + start = 0 + if step is None: + step = 1 + if isinstance(step, (_nx.complexfloating, complex)): + step = abs(step) + size.append(int(step)) + else: + size.append( + math.ceil((stop - start) / step)) + num_list += [start, stop, step] + typ = _nx.result_type(*num_list) + if self.sparse: + nn = [_nx.arange(_x, dtype=_t) + for _x, _t in zip(size, (typ,) * len(size))] + else: + nn = _nx.indices(size, typ) + for k, kk in enumerate(key): + step = kk.step + start = kk.start + if start is None: + start = 0 + if step is None: + step = 1 + if isinstance(step, (_nx.complexfloating, complex)): + step = int(abs(step)) + if step != 1: + step = (kk.stop - start) / float(step - 1) + nn[k] = (nn[k] * step + start) + if self.sparse: + slobj = [_nx.newaxis] * len(size) + for k in range(len(size)): + slobj[k] = slice(None, None) + nn[k] = nn[k][tuple(slobj)] + slobj[k] = _nx.newaxis + return tuple(nn) # ogrid -> tuple of arrays + return nn # mgrid -> ndarray + except (IndexError, TypeError): + step = key.step + stop = key.stop + start = key.start + if start is None: + start = 0 + if isinstance(step, (_nx.complexfloating, complex)): + # Prevent the (potential) creation of integer arrays + step_float = abs(step) + step = length = int(step_float) + if step != 1: + step = (key.stop - start) / float(step - 1) + typ = _nx.result_type(start, stop, step_float) + return _nx.arange(0, length, 1, dtype=typ) * step + start + else: + return _nx.arange(start, stop, step) + + +class MGridClass(nd_grid): + """ + An instance which returns a dense multi-dimensional "meshgrid". + + An instance which returns a dense (or fleshed out) mesh-grid + when indexed, so that each returned argument has the same shape. + The dimensions and number of the output arrays are equal to the + number of indexing dimensions. If the step length is not a complex + number, then the stop is not inclusive. + + However, if the step length is a **complex number** (e.g. 5j), then + the integer part of its magnitude is interpreted as specifying the + number of points to create between the start and stop values, where + the stop value **is inclusive**. + + Returns + ------- + mesh-grid : ndarray + A single array, containing a set of `ndarray`\\ s all of the same + dimensions. stacked along the first axis. + + See Also + -------- + ogrid : like `mgrid` but returns open (not fleshed out) mesh grids + meshgrid: return coordinate matrices from coordinate vectors + r_ : array concatenator + :ref:`how-to-partition` + + Examples + -------- + >>> import numpy as np + >>> np.mgrid[0:5, 0:5] + array([[[0, 0, 0, 0, 0], + [1, 1, 1, 1, 1], + [2, 2, 2, 2, 2], + [3, 3, 3, 3, 3], + [4, 4, 4, 4, 4]], + [[0, 1, 2, 3, 4], + [0, 1, 2, 3, 4], + [0, 1, 2, 3, 4], + [0, 1, 2, 3, 4], + [0, 1, 2, 3, 4]]]) + >>> np.mgrid[-1:1:5j] + array([-1. , -0.5, 0. , 0.5, 1. ]) + + >>> np.mgrid[0:4].shape + (4,) + >>> np.mgrid[0:4, 0:5].shape + (2, 4, 5) + >>> np.mgrid[0:4, 0:5, 0:6].shape + (3, 4, 5, 6) + + """ + __slots__ = () + + def __init__(self): + super().__init__(sparse=False) + + +mgrid = MGridClass() + + +class OGridClass(nd_grid): + """ + An instance which returns an open multi-dimensional "meshgrid". + + An instance which returns an open (i.e. not fleshed out) mesh-grid + when indexed, so that only one dimension of each returned array is + greater than 1. The dimension and number of the output arrays are + equal to the number of indexing dimensions. If the step length is + not a complex number, then the stop is not inclusive. + + However, if the step length is a **complex number** (e.g. 5j), then + the integer part of its magnitude is interpreted as specifying the + number of points to create between the start and stop values, where + the stop value **is inclusive**. + + Returns + ------- + mesh-grid : ndarray or tuple of ndarrays + If the input is a single slice, returns an array. + If the input is multiple slices, returns a tuple of arrays, with + only one dimension not equal to 1. + + See Also + -------- + mgrid : like `ogrid` but returns dense (or fleshed out) mesh grids + meshgrid: return coordinate matrices from coordinate vectors + r_ : array concatenator + :ref:`how-to-partition` + + Examples + -------- + >>> from numpy import ogrid + >>> ogrid[-1:1:5j] + array([-1. , -0.5, 0. , 0.5, 1. ]) + >>> ogrid[0:5, 0:5] + (array([[0], + [1], + [2], + [3], + [4]]), + array([[0, 1, 2, 3, 4]])) + + """ + __slots__ = () + + def __init__(self): + super().__init__(sparse=True) + + +ogrid = OGridClass() + + +class AxisConcatenator: + """ + Translates slice objects to concatenation along an axis. + + For detailed documentation on usage, see `r_`. + """ + __slots__ = ('axis', 'matrix', 'ndmin', 'trans1d') + + # allow ma.mr_ to override this + concatenate = staticmethod(_nx.concatenate) + makemat = staticmethod(matrixlib.matrix) + + def __init__(self, axis=0, matrix=False, ndmin=1, trans1d=-1): + self.axis = axis + self.matrix = matrix + self.trans1d = trans1d + self.ndmin = ndmin + + def __getitem__(self, key): + # handle matrix builder syntax + if isinstance(key, str): + frame = sys._getframe().f_back + mymat = matrixlib.bmat(key, frame.f_globals, frame.f_locals) + return mymat + + if not isinstance(key, tuple): + key = (key,) + + # copy attributes, since they can be overridden in the first argument + trans1d = self.trans1d + ndmin = self.ndmin + matrix = self.matrix + axis = self.axis + + objs = [] + # dtypes or scalars for weak scalar handling in result_type + result_type_objs = [] + + for k, item in enumerate(key): + scalar = False + if isinstance(item, slice): + step = item.step + start = item.start + stop = item.stop + if start is None: + start = 0 + if step is None: + step = 1 + if isinstance(step, (_nx.complexfloating, complex)): + size = int(abs(step)) + newobj = linspace(start, stop, num=size) + else: + newobj = _nx.arange(start, stop, step) + if ndmin > 1: + newobj = array(newobj, copy=None, ndmin=ndmin) + if trans1d != -1: + newobj = newobj.swapaxes(-1, trans1d) + elif isinstance(item, str): + if k != 0: + raise ValueError("special directives must be the " + "first entry.") + if item in ('r', 'c'): + matrix = True + col = (item == 'c') + continue + if ',' in item: + vec = item.split(',') + try: + axis, ndmin = [int(x) for x in vec[:2]] + if len(vec) == 3: + trans1d = int(vec[2]) + continue + except Exception as e: + raise ValueError( + f"unknown special directive {item!r}" + ) from e + try: + axis = int(item) + continue + except (ValueError, TypeError) as e: + raise ValueError("unknown special directive") from e + elif type(item) in ScalarType: + scalar = True + newobj = item + else: + item_ndim = np.ndim(item) + newobj = array(item, copy=None, subok=True, ndmin=ndmin) + if trans1d != -1 and item_ndim < ndmin: + k2 = ndmin - item_ndim + k1 = trans1d + if k1 < 0: + k1 += k2 + 1 + defaxes = list(range(ndmin)) + axes = defaxes[:k1] + defaxes[k2:] + defaxes[k1:k2] + newobj = newobj.transpose(axes) + + objs.append(newobj) + if scalar: + result_type_objs.append(item) + else: + result_type_objs.append(newobj.dtype) + + # Ensure that scalars won't up-cast unless warranted, for 0, drops + # through to error in concatenate. + if len(result_type_objs) != 0: + final_dtype = _nx.result_type(*result_type_objs) + # concatenate could do cast, but that can be overridden: + objs = [array(obj, copy=None, subok=True, + ndmin=ndmin, dtype=final_dtype) for obj in objs] + + res = self.concatenate(tuple(objs), axis=axis) + + if matrix: + oldndim = res.ndim + res = self.makemat(res) + if oldndim == 1 and col: + res = res.T + return res + + def __len__(self): + return 0 + +# separate classes are used here instead of just making r_ = concatenator(0), +# etc. because otherwise we couldn't get the doc string to come out right +# in help(r_) + + +class RClass(AxisConcatenator): + """ + Translates slice objects to concatenation along the first axis. + + This is a simple way to build up arrays quickly. There are two use cases. + + 1. If the index expression contains comma separated arrays, then stack + them along their first axis. + 2. If the index expression contains slice notation or scalars then create + a 1-D array with a range indicated by the slice notation. + + If slice notation is used, the syntax ``start:stop:step`` is equivalent + to ``np.arange(start, stop, step)`` inside of the brackets. However, if + ``step`` is an imaginary number (i.e. 100j) then its integer portion is + interpreted as a number-of-points desired and the start and stop are + inclusive. In other words ``start:stop:stepj`` is interpreted as + ``np.linspace(start, stop, step, endpoint=1)`` inside of the brackets. + After expansion of slice notation, all comma separated sequences are + concatenated together. + + Optional character strings placed as the first element of the index + expression can be used to change the output. The strings 'r' or 'c' result + in matrix output. If the result is 1-D and 'r' is specified a 1 x N (row) + matrix is produced. If the result is 1-D and 'c' is specified, then a N x 1 + (column) matrix is produced. If the result is 2-D then both provide the + same matrix result. + + A string integer specifies which axis to stack multiple comma separated + arrays along. A string of two comma-separated integers allows indication + of the minimum number of dimensions to force each entry into as the + second integer (the axis to concatenate along is still the first integer). + + A string with three comma-separated integers allows specification of the + axis to concatenate along, the minimum number of dimensions to force the + entries to, and which axis should contain the start of the arrays which + are less than the specified number of dimensions. In other words the third + integer allows you to specify where the 1's should be placed in the shape + of the arrays that have their shapes upgraded. By default, they are placed + in the front of the shape tuple. The third argument allows you to specify + where the start of the array should be instead. Thus, a third argument of + '0' would place the 1's at the end of the array shape. Negative integers + specify where in the new shape tuple the last dimension of upgraded arrays + should be placed, so the default is '-1'. + + Parameters + ---------- + Not a function, so takes no parameters + + + Returns + ------- + A concatenated ndarray or matrix. + + See Also + -------- + concatenate : Join a sequence of arrays along an existing axis. + c_ : Translates slice objects to concatenation along the second axis. + + Examples + -------- + >>> import numpy as np + >>> np.r_[np.array([1,2,3]), 0, 0, np.array([4,5,6])] + array([1, 2, 3, ..., 4, 5, 6]) + >>> np.r_[-1:1:6j, [0]*3, 5, 6] + array([-1. , -0.6, -0.2, 0.2, 0.6, 1. , 0. , 0. , 0. , 5. , 6. ]) + + String integers specify the axis to concatenate along or the minimum + number of dimensions to force entries into. + + >>> a = np.array([[0, 1, 2], [3, 4, 5]]) + >>> np.r_['-1', a, a] # concatenate along last axis + array([[0, 1, 2, 0, 1, 2], + [3, 4, 5, 3, 4, 5]]) + >>> np.r_['0,2', [1,2,3], [4,5,6]] # concatenate along first axis, dim>=2 + array([[1, 2, 3], + [4, 5, 6]]) + + >>> np.r_['0,2,0', [1,2,3], [4,5,6]] + array([[1], + [2], + [3], + [4], + [5], + [6]]) + >>> np.r_['1,2,0', [1,2,3], [4,5,6]] + array([[1, 4], + [2, 5], + [3, 6]]) + + Using 'r' or 'c' as a first string argument creates a matrix. + + >>> np.r_['r',[1,2,3], [4,5,6]] + matrix([[1, 2, 3, 4, 5, 6]]) + + """ + __slots__ = () + + def __init__(self): + AxisConcatenator.__init__(self, 0) + + +r_ = RClass() + + +class CClass(AxisConcatenator): + """ + Translates slice objects to concatenation along the second axis. + + This is short-hand for ``np.r_['-1,2,0', index expression]``, which is + useful because of its common occurrence. In particular, arrays will be + stacked along their last axis after being upgraded to at least 2-D with + 1's post-pended to the shape (column vectors made out of 1-D arrays). + + See Also + -------- + column_stack : Stack 1-D arrays as columns into a 2-D array. + r_ : For more detailed documentation. + + Examples + -------- + >>> import numpy as np + >>> np.c_[np.array([1,2,3]), np.array([4,5,6])] + array([[1, 4], + [2, 5], + [3, 6]]) + >>> np.c_[np.array([[1,2,3]]), 0, 0, np.array([[4,5,6]])] + array([[1, 2, 3, ..., 4, 5, 6]]) + + """ + __slots__ = () + + def __init__(self): + AxisConcatenator.__init__(self, -1, ndmin=2, trans1d=0) + + +c_ = CClass() + + +@set_module('numpy') +class ndenumerate: + """ + Multidimensional index iterator. + + Return an iterator yielding pairs of array coordinates and values. + + Parameters + ---------- + arr : ndarray + Input array. + + See Also + -------- + ndindex, flatiter + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[1, 2], [3, 4]]) + >>> for index, x in np.ndenumerate(a): + ... print(index, x) + (0, 0) 1 + (0, 1) 2 + (1, 0) 3 + (1, 1) 4 + + """ + + def __init__(self, arr): + self.iter = np.asarray(arr).flat + + def __next__(self): + """ + Standard iterator method, returns the index tuple and array value. + + Returns + ------- + coords : tuple of ints + The indices of the current iteration. + val : scalar + The array element of the current iteration. + + """ + return self.iter.coords, next(self.iter) + + def __iter__(self): + return self + + +@set_module('numpy') +class ndindex: + """ + An N-dimensional iterator object to index arrays. + + Given the shape of an array, an `ndindex` instance iterates over + the N-dimensional index of the array. At each iteration a tuple + of indices is returned, the last dimension is iterated over first. + + Parameters + ---------- + shape : ints, or a single tuple of ints + The size of each dimension of the array can be passed as + individual parameters or as the elements of a tuple. + + See Also + -------- + ndenumerate, flatiter + + Examples + -------- + >>> import numpy as np + + Dimensions as individual arguments + + >>> for index in np.ndindex(3, 2, 1): + ... print(index) + (0, 0, 0) + (0, 1, 0) + (1, 0, 0) + (1, 1, 0) + (2, 0, 0) + (2, 1, 0) + + Same dimensions - but in a tuple ``(3, 2, 1)`` + + >>> for index in np.ndindex((3, 2, 1)): + ... print(index) + (0, 0, 0) + (0, 1, 0) + (1, 0, 0) + (1, 1, 0) + (2, 0, 0) + (2, 1, 0) + + """ + + def __init__(self, *shape): + if len(shape) == 1 and isinstance(shape[0], tuple): + shape = shape[0] + x = as_strided(_nx.zeros(1), shape=shape, + strides=_nx.zeros_like(shape)) + self._it = _nx.nditer(x, flags=['multi_index', 'zerosize_ok'], + order='C') + + def __iter__(self): + return self + + def ndincr(self): + """ + Increment the multi-dimensional index by one. + + This method is for backward compatibility only: do not use. + + .. deprecated:: 1.20.0 + This method has been advised against since numpy 1.8.0, but only + started emitting DeprecationWarning as of this version. + """ + # NumPy 1.20.0, 2020-09-08 + warnings.warn( + "`ndindex.ndincr()` is deprecated, use `next(ndindex)` instead", + DeprecationWarning, stacklevel=2) + next(self) + + def __next__(self): + """ + Standard iterator method, updates the index and returns the index + tuple. + + Returns + ------- + val : tuple of ints + Returns a tuple containing the indices of the current + iteration. + + """ + next(self._it) + return self._it.multi_index + + +# You can do all this with slice() plus a few special objects, +# but there's a lot to remember. This version is simpler because +# it uses the standard array indexing syntax. +# +# Written by Konrad Hinsen +# last revision: 1999-7-23 +# +# Cosmetic changes by T. Oliphant 2001 +# +# + +class IndexExpression: + """ + A nicer way to build up index tuples for arrays. + + .. note:: + Use one of the two predefined instances ``index_exp`` or `s_` + rather than directly using `IndexExpression`. + + For any index combination, including slicing and axis insertion, + ``a[indices]`` is the same as ``a[np.index_exp[indices]]`` for any + array `a`. However, ``np.index_exp[indices]`` can be used anywhere + in Python code and returns a tuple of slice objects that can be + used in the construction of complex index expressions. + + Parameters + ---------- + maketuple : bool + If True, always returns a tuple. + + See Also + -------- + s_ : Predefined instance without tuple conversion: + `s_ = IndexExpression(maketuple=False)`. + The ``index_exp`` is another predefined instance that + always returns a tuple: + `index_exp = IndexExpression(maketuple=True)`. + + Notes + ----- + You can do all this with :class:`slice` plus a few special objects, + but there's a lot to remember and this version is simpler because + it uses the standard array indexing syntax. + + Examples + -------- + >>> import numpy as np + >>> np.s_[2::2] + slice(2, None, 2) + >>> np.index_exp[2::2] + (slice(2, None, 2),) + + >>> np.array([0, 1, 2, 3, 4])[np.s_[2::2]] + array([2, 4]) + + """ + __slots__ = ('maketuple',) + + def __init__(self, maketuple): + self.maketuple = maketuple + + def __getitem__(self, item): + if self.maketuple and not isinstance(item, tuple): + return (item,) + else: + return item + + +index_exp = IndexExpression(maketuple=True) +s_ = IndexExpression(maketuple=False) + +# End contribution from Konrad. + + +# The following functions complement those in twodim_base, but are +# applicable to N-dimensions. + + +def _fill_diagonal_dispatcher(a, val, wrap=None): + return (a,) + + +@array_function_dispatch(_fill_diagonal_dispatcher) +def fill_diagonal(a, val, wrap=False): + """Fill the main diagonal of the given array of any dimensionality. + + For an array `a` with ``a.ndim >= 2``, the diagonal is the list of + values ``a[i, ..., i]`` with indices ``i`` all identical. This function + modifies the input array in-place without returning a value. + + Parameters + ---------- + a : array, at least 2-D. + Array whose diagonal is to be filled in-place. + val : scalar or array_like + Value(s) to write on the diagonal. If `val` is scalar, the value is + written along the diagonal. If array-like, the flattened `val` is + written along the diagonal, repeating if necessary to fill all + diagonal entries. + + wrap : bool + For tall matrices in NumPy version up to 1.6.2, the + diagonal "wrapped" after N columns. You can have this behavior + with this option. This affects only tall matrices. + + See also + -------- + diag_indices, diag_indices_from + + Notes + ----- + This functionality can be obtained via `diag_indices`, but internally + this version uses a much faster implementation that never constructs the + indices and uses simple slicing. + + Examples + -------- + >>> import numpy as np + >>> a = np.zeros((3, 3), int) + >>> np.fill_diagonal(a, 5) + >>> a + array([[5, 0, 0], + [0, 5, 0], + [0, 0, 5]]) + + The same function can operate on a 4-D array: + + >>> a = np.zeros((3, 3, 3, 3), int) + >>> np.fill_diagonal(a, 4) + + We only show a few blocks for clarity: + + >>> a[0, 0] + array([[4, 0, 0], + [0, 0, 0], + [0, 0, 0]]) + >>> a[1, 1] + array([[0, 0, 0], + [0, 4, 0], + [0, 0, 0]]) + >>> a[2, 2] + array([[0, 0, 0], + [0, 0, 0], + [0, 0, 4]]) + + The wrap option affects only tall matrices: + + >>> # tall matrices no wrap + >>> a = np.zeros((5, 3), int) + >>> np.fill_diagonal(a, 4) + >>> a + array([[4, 0, 0], + [0, 4, 0], + [0, 0, 4], + [0, 0, 0], + [0, 0, 0]]) + + >>> # tall matrices wrap + >>> a = np.zeros((5, 3), int) + >>> np.fill_diagonal(a, 4, wrap=True) + >>> a + array([[4, 0, 0], + [0, 4, 0], + [0, 0, 4], + [0, 0, 0], + [4, 0, 0]]) + + >>> # wide matrices + >>> a = np.zeros((3, 5), int) + >>> np.fill_diagonal(a, 4, wrap=True) + >>> a + array([[4, 0, 0, 0, 0], + [0, 4, 0, 0, 0], + [0, 0, 4, 0, 0]]) + + The anti-diagonal can be filled by reversing the order of elements + using either `numpy.flipud` or `numpy.fliplr`. + + >>> a = np.zeros((3, 3), int); + >>> np.fill_diagonal(np.fliplr(a), [1,2,3]) # Horizontal flip + >>> a + array([[0, 0, 1], + [0, 2, 0], + [3, 0, 0]]) + >>> np.fill_diagonal(np.flipud(a), [1,2,3]) # Vertical flip + >>> a + array([[0, 0, 3], + [0, 2, 0], + [1, 0, 0]]) + + Note that the order in which the diagonal is filled varies depending + on the flip function. + """ + if a.ndim < 2: + raise ValueError("array must be at least 2-d") + end = None + if a.ndim == 2: + # Explicit, fast formula for the common case. For 2-d arrays, we + # accept rectangular ones. + step = a.shape[1] + 1 + # This is needed to don't have tall matrix have the diagonal wrap. + if not wrap: + end = a.shape[1] * a.shape[1] + else: + # For more than d=2, the strided formula is only valid for arrays with + # all dimensions equal, so we check first. + if not np.all(diff(a.shape) == 0): + raise ValueError("All dimensions of input must be of equal length") + step = 1 + (np.cumprod(a.shape[:-1])).sum() + + # Write the value out into the diagonal. + a.flat[:end:step] = val + + +@set_module('numpy') +def diag_indices(n, ndim=2): + """ + Return the indices to access the main diagonal of an array. + + This returns a tuple of indices that can be used to access the main + diagonal of an array `a` with ``a.ndim >= 2`` dimensions and shape + (n, n, ..., n). For ``a.ndim = 2`` this is the usual diagonal, for + ``a.ndim > 2`` this is the set of indices to access ``a[i, i, ..., i]`` + for ``i = [0..n-1]``. + + Parameters + ---------- + n : int + The size, along each dimension, of the arrays for which the returned + indices can be used. + + ndim : int, optional + The number of dimensions. + + See Also + -------- + diag_indices_from + + Examples + -------- + >>> import numpy as np + + Create a set of indices to access the diagonal of a (4, 4) array: + + >>> di = np.diag_indices(4) + >>> di + (array([0, 1, 2, 3]), array([0, 1, 2, 3])) + >>> a = np.arange(16).reshape(4, 4) + >>> a + array([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11], + [12, 13, 14, 15]]) + >>> a[di] = 100 + >>> a + array([[100, 1, 2, 3], + [ 4, 100, 6, 7], + [ 8, 9, 100, 11], + [ 12, 13, 14, 100]]) + + Now, we create indices to manipulate a 3-D array: + + >>> d3 = np.diag_indices(2, 3) + >>> d3 + (array([0, 1]), array([0, 1]), array([0, 1])) + + And use it to set the diagonal of an array of zeros to 1: + + >>> a = np.zeros((2, 2, 2), dtype=int) + >>> a[d3] = 1 + >>> a + array([[[1, 0], + [0, 0]], + [[0, 0], + [0, 1]]]) + + """ + idx = np.arange(n) + return (idx,) * ndim + + +def _diag_indices_from(arr): + return (arr,) + + +@array_function_dispatch(_diag_indices_from) +def diag_indices_from(arr): + """ + Return the indices to access the main diagonal of an n-dimensional array. + + See `diag_indices` for full details. + + Parameters + ---------- + arr : array, at least 2-D + + See Also + -------- + diag_indices + + Examples + -------- + >>> import numpy as np + + Create a 4 by 4 array. + + >>> a = np.arange(16).reshape(4, 4) + >>> a + array([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11], + [12, 13, 14, 15]]) + + Get the indices of the diagonal elements. + + >>> di = np.diag_indices_from(a) + >>> di + (array([0, 1, 2, 3]), array([0, 1, 2, 3])) + + >>> a[di] + array([ 0, 5, 10, 15]) + + This is simply syntactic sugar for diag_indices. + + >>> np.diag_indices(a.shape[0]) + (array([0, 1, 2, 3]), array([0, 1, 2, 3])) + + """ + + if not arr.ndim >= 2: + raise ValueError("input array must be at least 2-d") + # For more than d=2, the strided formula is only valid for arrays with + # all dimensions equal, so we check first. + if not np.all(diff(arr.shape) == 0): + raise ValueError("All dimensions of input must be of equal length") + + return diag_indices(arr.shape[0], arr.ndim) diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/_index_tricks_impl.pyi b/.venv/lib/python3.12/site-packages/numpy/lib/_index_tricks_impl.pyi new file mode 100644 index 0000000..7ac2b3a --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/lib/_index_tricks_impl.pyi @@ -0,0 +1,196 @@ +from collections.abc import Sequence +from typing import Any, ClassVar, Final, Generic, Self, SupportsIndex, final, overload +from typing import Literal as L + +from _typeshed import Incomplete +from typing_extensions import TypeVar, deprecated + +import numpy as np +from numpy._core.multiarray import ravel_multi_index, unravel_index +from numpy._typing import ( + ArrayLike, + NDArray, + _AnyShape, + _FiniteNestedSequence, + _NestedSequence, + _SupportsArray, + _SupportsDType, +) + +__all__ = [ # noqa: RUF022 + "ravel_multi_index", + "unravel_index", + "mgrid", + "ogrid", + "r_", + "c_", + "s_", + "index_exp", + "ix_", + "ndenumerate", + "ndindex", + "fill_diagonal", + "diag_indices", + "diag_indices_from", +] + +### + +_T = TypeVar("_T") +_TupleT = TypeVar("_TupleT", bound=tuple[Any, ...]) +_ArrayT = TypeVar("_ArrayT", bound=NDArray[Any]) +_DTypeT = TypeVar("_DTypeT", bound=np.dtype) +_ScalarT = TypeVar("_ScalarT", bound=np.generic) +_ScalarT_co = TypeVar("_ScalarT_co", bound=np.generic, covariant=True) +_BoolT_co = TypeVar("_BoolT_co", bound=bool, default=bool, covariant=True) + +_AxisT_co = TypeVar("_AxisT_co", bound=int, default=L[0], covariant=True) +_MatrixT_co = TypeVar("_MatrixT_co", bound=bool, default=L[False], covariant=True) +_NDMinT_co = TypeVar("_NDMinT_co", bound=int, default=L[1], covariant=True) +_Trans1DT_co = TypeVar("_Trans1DT_co", bound=int, default=L[-1], covariant=True) + +### + +class ndenumerate(Generic[_ScalarT_co]): + @overload + def __new__(cls, arr: _FiniteNestedSequence[_SupportsArray[np.dtype[_ScalarT]]]) -> ndenumerate[_ScalarT]: ... + @overload + def __new__(cls, arr: str | _NestedSequence[str]) -> ndenumerate[np.str_]: ... + @overload + def __new__(cls, arr: bytes | _NestedSequence[bytes]) -> ndenumerate[np.bytes_]: ... + @overload + def __new__(cls, arr: bool | _NestedSequence[bool]) -> ndenumerate[np.bool]: ... + @overload + def __new__(cls, arr: int | _NestedSequence[int]) -> ndenumerate[np.intp]: ... + @overload + def __new__(cls, arr: float | _NestedSequence[float]) -> ndenumerate[np.float64]: ... + @overload + def __new__(cls, arr: complex | _NestedSequence[complex]) -> ndenumerate[np.complex128]: ... + @overload + def __new__(cls, arr: object) -> ndenumerate[Any]: ... + + # The first overload is a (semi-)workaround for a mypy bug (tested with v1.10 and v1.11) + @overload + def __next__( + self: ndenumerate[np.bool | np.number | np.flexible | np.datetime64 | np.timedelta64], + /, + ) -> tuple[_AnyShape, _ScalarT_co]: ... + @overload + def __next__(self: ndenumerate[np.object_], /) -> tuple[_AnyShape, Incomplete]: ... + @overload + def __next__(self, /) -> tuple[_AnyShape, _ScalarT_co]: ... + + # + def __iter__(self) -> Self: ... + +class ndindex: + @overload + def __init__(self, shape: tuple[SupportsIndex, ...], /) -> None: ... + @overload + def __init__(self, /, *shape: SupportsIndex) -> None: ... + + # + def __iter__(self) -> Self: ... + def __next__(self) -> _AnyShape: ... + + # + @deprecated("Deprecated since 1.20.0.") + def ndincr(self, /) -> None: ... + +class nd_grid(Generic[_BoolT_co]): + sparse: _BoolT_co + def __init__(self, sparse: _BoolT_co = ...) -> None: ... + @overload + def __getitem__(self: nd_grid[L[False]], key: slice | Sequence[slice]) -> NDArray[Incomplete]: ... + @overload + def __getitem__(self: nd_grid[L[True]], key: slice | Sequence[slice]) -> tuple[NDArray[Incomplete], ...]: ... + +@final +class MGridClass(nd_grid[L[False]]): + def __init__(self) -> None: ... + +@final +class OGridClass(nd_grid[L[True]]): + def __init__(self) -> None: ... + +class AxisConcatenator(Generic[_AxisT_co, _MatrixT_co, _NDMinT_co, _Trans1DT_co]): + __slots__ = "axis", "matrix", "ndmin", "trans1d" + + makemat: ClassVar[type[np.matrix[tuple[int, int], np.dtype]]] + + axis: _AxisT_co + matrix: _MatrixT_co + ndmin: _NDMinT_co + trans1d: _Trans1DT_co + + # + def __init__( + self, + /, + axis: _AxisT_co = ..., + matrix: _MatrixT_co = ..., + ndmin: _NDMinT_co = ..., + trans1d: _Trans1DT_co = ..., + ) -> None: ... + + # TODO(jorenham): annotate this + def __getitem__(self, key: Incomplete, /) -> Incomplete: ... + def __len__(self, /) -> L[0]: ... + + # + @staticmethod + @overload + def concatenate(*a: ArrayLike, axis: SupportsIndex | None = 0, out: _ArrayT) -> _ArrayT: ... + @staticmethod + @overload + def concatenate(*a: ArrayLike, axis: SupportsIndex | None = 0, out: None = None) -> NDArray[Incomplete]: ... + +@final +class RClass(AxisConcatenator[L[0], L[False], L[1], L[-1]]): + def __init__(self, /) -> None: ... + +@final +class CClass(AxisConcatenator[L[-1], L[False], L[2], L[0]]): + def __init__(self, /) -> None: ... + +class IndexExpression(Generic[_BoolT_co]): + maketuple: _BoolT_co + def __init__(self, maketuple: _BoolT_co) -> None: ... + @overload + def __getitem__(self, item: _TupleT) -> _TupleT: ... + @overload + def __getitem__(self: IndexExpression[L[True]], item: _T) -> tuple[_T]: ... + @overload + def __getitem__(self: IndexExpression[L[False]], item: _T) -> _T: ... + +@overload +def ix_(*args: _FiniteNestedSequence[_SupportsDType[_DTypeT]]) -> tuple[np.ndarray[_AnyShape, _DTypeT], ...]: ... +@overload +def ix_(*args: str | _NestedSequence[str]) -> tuple[NDArray[np.str_], ...]: ... +@overload +def ix_(*args: bytes | _NestedSequence[bytes]) -> tuple[NDArray[np.bytes_], ...]: ... +@overload +def ix_(*args: bool | _NestedSequence[bool]) -> tuple[NDArray[np.bool], ...]: ... +@overload +def ix_(*args: int | _NestedSequence[int]) -> tuple[NDArray[np.intp], ...]: ... +@overload +def ix_(*args: float | _NestedSequence[float]) -> tuple[NDArray[np.float64], ...]: ... +@overload +def ix_(*args: complex | _NestedSequence[complex]) -> tuple[NDArray[np.complex128], ...]: ... + +# +def fill_diagonal(a: NDArray[Any], val: object, wrap: bool = ...) -> None: ... + +# +def diag_indices(n: int, ndim: int = ...) -> tuple[NDArray[np.intp], ...]: ... +def diag_indices_from(arr: ArrayLike) -> tuple[NDArray[np.intp], ...]: ... + +# +mgrid: Final[MGridClass] = ... +ogrid: Final[OGridClass] = ... + +r_: Final[RClass] = ... +c_: Final[CClass] = ... + +index_exp: Final[IndexExpression[L[True]]] = ... +s_: Final[IndexExpression[L[False]]] = ... diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/_iotools.py b/.venv/lib/python3.12/site-packages/numpy/lib/_iotools.py new file mode 100644 index 0000000..3586b41 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/lib/_iotools.py @@ -0,0 +1,900 @@ +"""A collection of functions designed to help I/O with ascii files. + +""" +__docformat__ = "restructuredtext en" + +import itertools + +import numpy as np +import numpy._core.numeric as nx +from numpy._utils import asbytes, asunicode + + +def _decode_line(line, encoding=None): + """Decode bytes from binary input streams. + + Defaults to decoding from 'latin1'. + + Parameters + ---------- + line : str or bytes + Line to be decoded. + encoding : str + Encoding used to decode `line`. + + Returns + ------- + decoded_line : str + + """ + if type(line) is bytes: + if encoding is None: + encoding = "latin1" + line = line.decode(encoding) + + return line + + +def _is_string_like(obj): + """ + Check whether obj behaves like a string. + """ + try: + obj + '' + except (TypeError, ValueError): + return False + return True + + +def _is_bytes_like(obj): + """ + Check whether obj behaves like a bytes object. + """ + try: + obj + b'' + except (TypeError, ValueError): + return False + return True + + +def has_nested_fields(ndtype): + """ + Returns whether one or several fields of a dtype are nested. + + Parameters + ---------- + ndtype : dtype + Data-type of a structured array. + + Raises + ------ + AttributeError + If `ndtype` does not have a `names` attribute. + + Examples + -------- + >>> import numpy as np + >>> dt = np.dtype([('name', 'S4'), ('x', float), ('y', float)]) + >>> np.lib._iotools.has_nested_fields(dt) + False + + """ + return any(ndtype[name].names is not None for name in ndtype.names or ()) + + +def flatten_dtype(ndtype, flatten_base=False): + """ + Unpack a structured data-type by collapsing nested fields and/or fields + with a shape. + + Note that the field names are lost. + + Parameters + ---------- + ndtype : dtype + The datatype to collapse + flatten_base : bool, optional + If True, transform a field with a shape into several fields. Default is + False. + + Examples + -------- + >>> import numpy as np + >>> dt = np.dtype([('name', 'S4'), ('x', float), ('y', float), + ... ('block', int, (2, 3))]) + >>> np.lib._iotools.flatten_dtype(dt) + [dtype('S4'), dtype('float64'), dtype('float64'), dtype('int64')] + >>> np.lib._iotools.flatten_dtype(dt, flatten_base=True) + [dtype('S4'), + dtype('float64'), + dtype('float64'), + dtype('int64'), + dtype('int64'), + dtype('int64'), + dtype('int64'), + dtype('int64'), + dtype('int64')] + + """ + names = ndtype.names + if names is None: + if flatten_base: + return [ndtype.base] * int(np.prod(ndtype.shape)) + return [ndtype.base] + else: + types = [] + for field in names: + info = ndtype.fields[field] + flat_dt = flatten_dtype(info[0], flatten_base) + types.extend(flat_dt) + return types + + +class LineSplitter: + """ + Object to split a string at a given delimiter or at given places. + + Parameters + ---------- + delimiter : str, int, or sequence of ints, optional + If a string, character used to delimit consecutive fields. + If an integer or a sequence of integers, width(s) of each field. + comments : str, optional + Character used to mark the beginning of a comment. Default is '#'. + autostrip : bool, optional + Whether to strip each individual field. Default is True. + + """ + + def autostrip(self, method): + """ + Wrapper to strip each member of the output of `method`. + + Parameters + ---------- + method : function + Function that takes a single argument and returns a sequence of + strings. + + Returns + ------- + wrapped : function + The result of wrapping `method`. `wrapped` takes a single input + argument and returns a list of strings that are stripped of + white-space. + + """ + return lambda input: [_.strip() for _ in method(input)] + + def __init__(self, delimiter=None, comments='#', autostrip=True, + encoding=None): + delimiter = _decode_line(delimiter) + comments = _decode_line(comments) + + self.comments = comments + + # Delimiter is a character + if (delimiter is None) or isinstance(delimiter, str): + delimiter = delimiter or None + _handyman = self._delimited_splitter + # Delimiter is a list of field widths + elif hasattr(delimiter, '__iter__'): + _handyman = self._variablewidth_splitter + idx = np.cumsum([0] + list(delimiter)) + delimiter = [slice(i, j) for (i, j) in itertools.pairwise(idx)] + # Delimiter is a single integer + elif int(delimiter): + (_handyman, delimiter) = ( + self._fixedwidth_splitter, int(delimiter)) + else: + (_handyman, delimiter) = (self._delimited_splitter, None) + self.delimiter = delimiter + if autostrip: + self._handyman = self.autostrip(_handyman) + else: + self._handyman = _handyman + self.encoding = encoding + + def _delimited_splitter(self, line): + """Chop off comments, strip, and split at delimiter. """ + if self.comments is not None: + line = line.split(self.comments)[0] + line = line.strip(" \r\n") + if not line: + return [] + return line.split(self.delimiter) + + def _fixedwidth_splitter(self, line): + if self.comments is not None: + line = line.split(self.comments)[0] + line = line.strip("\r\n") + if not line: + return [] + fixed = self.delimiter + slices = [slice(i, i + fixed) for i in range(0, len(line), fixed)] + return [line[s] for s in slices] + + def _variablewidth_splitter(self, line): + if self.comments is not None: + line = line.split(self.comments)[0] + if not line: + return [] + slices = self.delimiter + return [line[s] for s in slices] + + def __call__(self, line): + return self._handyman(_decode_line(line, self.encoding)) + + +class NameValidator: + """ + Object to validate a list of strings to use as field names. + + The strings are stripped of any non alphanumeric character, and spaces + are replaced by '_'. During instantiation, the user can define a list + of names to exclude, as well as a list of invalid characters. Names in + the exclusion list are appended a '_' character. + + Once an instance has been created, it can be called with a list of + names, and a list of valid names will be created. The `__call__` + method accepts an optional keyword "default" that sets the default name + in case of ambiguity. By default this is 'f', so that names will + default to `f0`, `f1`, etc. + + Parameters + ---------- + excludelist : sequence, optional + A list of names to exclude. This list is appended to the default + list ['return', 'file', 'print']. Excluded names are appended an + underscore: for example, `file` becomes `file_` if supplied. + deletechars : str, optional + A string combining invalid characters that must be deleted from the + names. + case_sensitive : {True, False, 'upper', 'lower'}, optional + * If True, field names are case-sensitive. + * If False or 'upper', field names are converted to upper case. + * If 'lower', field names are converted to lower case. + + The default value is True. + replace_space : '_', optional + Character(s) used in replacement of white spaces. + + Notes + ----- + Calling an instance of `NameValidator` is the same as calling its + method `validate`. + + Examples + -------- + >>> import numpy as np + >>> validator = np.lib._iotools.NameValidator() + >>> validator(['file', 'field2', 'with space', 'CaSe']) + ('file_', 'field2', 'with_space', 'CaSe') + + >>> validator = np.lib._iotools.NameValidator(excludelist=['excl'], + ... deletechars='q', + ... case_sensitive=False) + >>> validator(['excl', 'field2', 'no_q', 'with space', 'CaSe']) + ('EXCL', 'FIELD2', 'NO_Q', 'WITH_SPACE', 'CASE') + + """ + + defaultexcludelist = 'return', 'file', 'print' + defaultdeletechars = frozenset(r"""~!@#$%^&*()-=+~\|]}[{';: /?.>,<""") + + def __init__(self, excludelist=None, deletechars=None, + case_sensitive=None, replace_space='_'): + # Process the exclusion list .. + if excludelist is None: + excludelist = [] + excludelist.extend(self.defaultexcludelist) + self.excludelist = excludelist + # Process the list of characters to delete + if deletechars is None: + delete = set(self.defaultdeletechars) + else: + delete = set(deletechars) + delete.add('"') + self.deletechars = delete + # Process the case option ..... + if (case_sensitive is None) or (case_sensitive is True): + self.case_converter = lambda x: x + elif (case_sensitive is False) or case_sensitive.startswith('u'): + self.case_converter = lambda x: x.upper() + elif case_sensitive.startswith('l'): + self.case_converter = lambda x: x.lower() + else: + msg = f'unrecognized case_sensitive value {case_sensitive}.' + raise ValueError(msg) + + self.replace_space = replace_space + + def validate(self, names, defaultfmt="f%i", nbfields=None): + """ + Validate a list of strings as field names for a structured array. + + Parameters + ---------- + names : sequence of str + Strings to be validated. + defaultfmt : str, optional + Default format string, used if validating a given string + reduces its length to zero. + nbfields : integer, optional + Final number of validated names, used to expand or shrink the + initial list of names. + + Returns + ------- + validatednames : list of str + The list of validated field names. + + Notes + ----- + A `NameValidator` instance can be called directly, which is the + same as calling `validate`. For examples, see `NameValidator`. + + """ + # Initial checks .............. + if (names is None): + if (nbfields is None): + return None + names = [] + if isinstance(names, str): + names = [names, ] + if nbfields is not None: + nbnames = len(names) + if (nbnames < nbfields): + names = list(names) + [''] * (nbfields - nbnames) + elif (nbnames > nbfields): + names = names[:nbfields] + # Set some shortcuts ........... + deletechars = self.deletechars + excludelist = self.excludelist + case_converter = self.case_converter + replace_space = self.replace_space + # Initializes some variables ... + validatednames = [] + seen = {} + nbempty = 0 + + for item in names: + item = case_converter(item).strip() + if replace_space: + item = item.replace(' ', replace_space) + item = ''.join([c for c in item if c not in deletechars]) + if item == '': + item = defaultfmt % nbempty + while item in names: + nbempty += 1 + item = defaultfmt % nbempty + nbempty += 1 + elif item in excludelist: + item += '_' + cnt = seen.get(item, 0) + if cnt > 0: + validatednames.append(item + '_%d' % cnt) + else: + validatednames.append(item) + seen[item] = cnt + 1 + return tuple(validatednames) + + def __call__(self, names, defaultfmt="f%i", nbfields=None): + return self.validate(names, defaultfmt=defaultfmt, nbfields=nbfields) + + +def str2bool(value): + """ + Tries to transform a string supposed to represent a boolean to a boolean. + + Parameters + ---------- + value : str + The string that is transformed to a boolean. + + Returns + ------- + boolval : bool + The boolean representation of `value`. + + Raises + ------ + ValueError + If the string is not 'True' or 'False' (case independent) + + Examples + -------- + >>> import numpy as np + >>> np.lib._iotools.str2bool('TRUE') + True + >>> np.lib._iotools.str2bool('false') + False + + """ + value = value.upper() + if value == 'TRUE': + return True + elif value == 'FALSE': + return False + else: + raise ValueError("Invalid boolean") + + +class ConverterError(Exception): + """ + Exception raised when an error occurs in a converter for string values. + + """ + pass + + +class ConverterLockError(ConverterError): + """ + Exception raised when an attempt is made to upgrade a locked converter. + + """ + pass + + +class ConversionWarning(UserWarning): + """ + Warning issued when a string converter has a problem. + + Notes + ----- + In `genfromtxt` a `ConversionWarning` is issued if raising exceptions + is explicitly suppressed with the "invalid_raise" keyword. + + """ + pass + + +class StringConverter: + """ + Factory class for function transforming a string into another object + (int, float). + + After initialization, an instance can be called to transform a string + into another object. If the string is recognized as representing a + missing value, a default value is returned. + + Attributes + ---------- + func : function + Function used for the conversion. + default : any + Default value to return when the input corresponds to a missing + value. + type : type + Type of the output. + _status : int + Integer representing the order of the conversion. + _mapper : sequence of tuples + Sequence of tuples (dtype, function, default value) to evaluate in + order. + _locked : bool + Holds `locked` parameter. + + Parameters + ---------- + dtype_or_func : {None, dtype, function}, optional + If a `dtype`, specifies the input data type, used to define a basic + function and a default value for missing data. For example, when + `dtype` is float, the `func` attribute is set to `float` and the + default value to `np.nan`. If a function, this function is used to + convert a string to another object. In this case, it is recommended + to give an associated default value as input. + default : any, optional + Value to return by default, that is, when the string to be + converted is flagged as missing. If not given, `StringConverter` + tries to supply a reasonable default value. + missing_values : {None, sequence of str}, optional + ``None`` or sequence of strings indicating a missing value. If ``None`` + then missing values are indicated by empty entries. The default is + ``None``. + locked : bool, optional + Whether the StringConverter should be locked to prevent automatic + upgrade or not. Default is False. + + """ + _mapper = [(nx.bool, str2bool, False), + (nx.int_, int, -1),] + + # On 32-bit systems, we need to make sure that we explicitly include + # nx.int64 since ns.int_ is nx.int32. + if nx.dtype(nx.int_).itemsize < nx.dtype(nx.int64).itemsize: + _mapper.append((nx.int64, int, -1)) + + _mapper.extend([(nx.float64, float, nx.nan), + (nx.complex128, complex, nx.nan + 0j), + (nx.longdouble, nx.longdouble, nx.nan), + # If a non-default dtype is passed, fall back to generic + # ones (should only be used for the converter) + (nx.integer, int, -1), + (nx.floating, float, nx.nan), + (nx.complexfloating, complex, nx.nan + 0j), + # Last, try with the string types (must be last, because + # `_mapper[-1]` is used as default in some cases) + (nx.str_, asunicode, '???'), + (nx.bytes_, asbytes, '???'), + ]) + + @classmethod + def _getdtype(cls, val): + """Returns the dtype of the input variable.""" + return np.array(val).dtype + + @classmethod + def _getsubdtype(cls, val): + """Returns the type of the dtype of the input variable.""" + return np.array(val).dtype.type + + @classmethod + def _dtypeortype(cls, dtype): + """Returns dtype for datetime64 and type of dtype otherwise.""" + + # This is a bit annoying. We want to return the "general" type in most + # cases (ie. "string" rather than "S10"), but we want to return the + # specific type for datetime64 (ie. "datetime64[us]" rather than + # "datetime64"). + if dtype.type == np.datetime64: + return dtype + return dtype.type + + @classmethod + def upgrade_mapper(cls, func, default=None): + """ + Upgrade the mapper of a StringConverter by adding a new function and + its corresponding default. + + The input function (or sequence of functions) and its associated + default value (if any) is inserted in penultimate position of the + mapper. The corresponding type is estimated from the dtype of the + default value. + + Parameters + ---------- + func : var + Function, or sequence of functions + + Examples + -------- + >>> import dateutil.parser + >>> import datetime + >>> dateparser = dateutil.parser.parse + >>> defaultdate = datetime.date(2000, 1, 1) + >>> StringConverter.upgrade_mapper(dateparser, default=defaultdate) + """ + # Func is a single functions + if callable(func): + cls._mapper.insert(-1, (cls._getsubdtype(default), func, default)) + return + elif hasattr(func, '__iter__'): + if isinstance(func[0], (tuple, list)): + for _ in func: + cls._mapper.insert(-1, _) + return + if default is None: + default = [None] * len(func) + else: + default = list(default) + default.append([None] * (len(func) - len(default))) + for fct, dft in zip(func, default): + cls._mapper.insert(-1, (cls._getsubdtype(dft), fct, dft)) + + @classmethod + def _find_map_entry(cls, dtype): + # if a converter for the specific dtype is available use that + for i, (deftype, func, default_def) in enumerate(cls._mapper): + if dtype.type == deftype: + return i, (deftype, func, default_def) + + # otherwise find an inexact match + for i, (deftype, func, default_def) in enumerate(cls._mapper): + if np.issubdtype(dtype.type, deftype): + return i, (deftype, func, default_def) + + raise LookupError + + def __init__(self, dtype_or_func=None, default=None, missing_values=None, + locked=False): + # Defines a lock for upgrade + self._locked = bool(locked) + # No input dtype: minimal initialization + if dtype_or_func is None: + self.func = str2bool + self._status = 0 + self.default = default or False + dtype = np.dtype('bool') + else: + # Is the input a np.dtype ? + try: + self.func = None + dtype = np.dtype(dtype_or_func) + except TypeError: + # dtype_or_func must be a function, then + if not callable(dtype_or_func): + errmsg = ("The input argument `dtype` is neither a" + " function nor a dtype (got '%s' instead)") + raise TypeError(errmsg % type(dtype_or_func)) + # Set the function + self.func = dtype_or_func + # If we don't have a default, try to guess it or set it to + # None + if default is None: + try: + default = self.func('0') + except ValueError: + default = None + dtype = self._getdtype(default) + + # find the best match in our mapper + try: + self._status, (_, func, default_def) = self._find_map_entry(dtype) + except LookupError: + # no match + self.default = default + _, func, _ = self._mapper[-1] + self._status = 0 + else: + # use the found default only if we did not already have one + if default is None: + self.default = default_def + else: + self.default = default + + # If the input was a dtype, set the function to the last we saw + if self.func is None: + self.func = func + + # If the status is 1 (int), change the function to + # something more robust. + if self.func == self._mapper[1][1]: + if issubclass(dtype.type, np.uint64): + self.func = np.uint64 + elif issubclass(dtype.type, np.int64): + self.func = np.int64 + else: + self.func = lambda x: int(float(x)) + # Store the list of strings corresponding to missing values. + if missing_values is None: + self.missing_values = {''} + else: + if isinstance(missing_values, str): + missing_values = missing_values.split(",") + self.missing_values = set(list(missing_values) + ['']) + + self._callingfunction = self._strict_call + self.type = self._dtypeortype(dtype) + self._checked = False + self._initial_default = default + + def _loose_call(self, value): + try: + return self.func(value) + except ValueError: + return self.default + + def _strict_call(self, value): + try: + + # We check if we can convert the value using the current function + new_value = self.func(value) + + # In addition to having to check whether func can convert the + # value, we also have to make sure that we don't get overflow + # errors for integers. + if self.func is int: + try: + np.array(value, dtype=self.type) + except OverflowError: + raise ValueError + + # We're still here so we can now return the new value + return new_value + + except ValueError: + if value.strip() in self.missing_values: + if not self._status: + self._checked = False + return self.default + raise ValueError(f"Cannot convert string '{value}'") + + def __call__(self, value): + return self._callingfunction(value) + + def _do_upgrade(self): + # Raise an exception if we locked the converter... + if self._locked: + errmsg = "Converter is locked and cannot be upgraded" + raise ConverterLockError(errmsg) + _statusmax = len(self._mapper) + # Complains if we try to upgrade by the maximum + _status = self._status + if _status == _statusmax: + errmsg = "Could not find a valid conversion function" + raise ConverterError(errmsg) + elif _status < _statusmax - 1: + _status += 1 + self.type, self.func, default = self._mapper[_status] + self._status = _status + if self._initial_default is not None: + self.default = self._initial_default + else: + self.default = default + + def upgrade(self, value): + """ + Find the best converter for a given string, and return the result. + + The supplied string `value` is converted by testing different + converters in order. First the `func` method of the + `StringConverter` instance is tried, if this fails other available + converters are tried. The order in which these other converters + are tried is determined by the `_status` attribute of the instance. + + Parameters + ---------- + value : str + The string to convert. + + Returns + ------- + out : any + The result of converting `value` with the appropriate converter. + + """ + self._checked = True + try: + return self._strict_call(value) + except ValueError: + self._do_upgrade() + return self.upgrade(value) + + def iterupgrade(self, value): + self._checked = True + if not hasattr(value, '__iter__'): + value = (value,) + _strict_call = self._strict_call + try: + for _m in value: + _strict_call(_m) + except ValueError: + self._do_upgrade() + self.iterupgrade(value) + + def update(self, func, default=None, testing_value=None, + missing_values='', locked=False): + """ + Set StringConverter attributes directly. + + Parameters + ---------- + func : function + Conversion function. + default : any, optional + Value to return by default, that is, when the string to be + converted is flagged as missing. If not given, + `StringConverter` tries to supply a reasonable default value. + testing_value : str, optional + A string representing a standard input value of the converter. + This string is used to help defining a reasonable default + value. + missing_values : {sequence of str, None}, optional + Sequence of strings indicating a missing value. If ``None``, then + the existing `missing_values` are cleared. The default is ``''``. + locked : bool, optional + Whether the StringConverter should be locked to prevent + automatic upgrade or not. Default is False. + + Notes + ----- + `update` takes the same parameters as the constructor of + `StringConverter`, except that `func` does not accept a `dtype` + whereas `dtype_or_func` in the constructor does. + + """ + self.func = func + self._locked = locked + + # Don't reset the default to None if we can avoid it + if default is not None: + self.default = default + self.type = self._dtypeortype(self._getdtype(default)) + else: + try: + tester = func(testing_value or '1') + except (TypeError, ValueError): + tester = None + self.type = self._dtypeortype(self._getdtype(tester)) + + # Add the missing values to the existing set or clear it. + if missing_values is None: + # Clear all missing values even though the ctor initializes it to + # set(['']) when the argument is None. + self.missing_values = set() + else: + if not np.iterable(missing_values): + missing_values = [missing_values] + if not all(isinstance(v, str) for v in missing_values): + raise TypeError("missing_values must be strings or unicode") + self.missing_values.update(missing_values) + + +def easy_dtype(ndtype, names=None, defaultfmt="f%i", **validationargs): + """ + Convenience function to create a `np.dtype` object. + + The function processes the input `dtype` and matches it with the given + names. + + Parameters + ---------- + ndtype : var + Definition of the dtype. Can be any string or dictionary recognized + by the `np.dtype` function, or a sequence of types. + names : str or sequence, optional + Sequence of strings to use as field names for a structured dtype. + For convenience, `names` can be a string of a comma-separated list + of names. + defaultfmt : str, optional + Format string used to define missing names, such as ``"f%i"`` + (default) or ``"fields_%02i"``. + validationargs : optional + A series of optional arguments used to initialize a + `NameValidator`. + + Examples + -------- + >>> import numpy as np + >>> np.lib._iotools.easy_dtype(float) + dtype('float64') + >>> np.lib._iotools.easy_dtype("i4, f8") + dtype([('f0', '>> np.lib._iotools.easy_dtype("i4, f8", defaultfmt="field_%03i") + dtype([('field_000', '>> np.lib._iotools.easy_dtype((int, float, float), names="a,b,c") + dtype([('a', '>> np.lib._iotools.easy_dtype(float, names="a,b,c") + dtype([('a', ' None: ... + def __call__(self, /, line: str | bytes) -> list[str]: ... + def autostrip(self, /, method: Callable[[_T], Iterable[str]]) -> Callable[[_T], list[str]]: ... + +class NameValidator: + defaultexcludelist: ClassVar[Sequence[str]] + defaultdeletechars: ClassVar[Sequence[str]] + excludelist: list[str] + deletechars: set[str] + case_converter: Callable[[str], str] + replace_space: str + + def __init__( + self, + /, + excludelist: Iterable[str] | None = None, + deletechars: Iterable[str] | None = None, + case_sensitive: Literal["upper", "lower"] | bool | None = None, + replace_space: str = "_", + ) -> None: ... + def __call__(self, /, names: Iterable[str], defaultfmt: str = "f%i", nbfields: int | None = None) -> tuple[str, ...]: ... + def validate(self, /, names: Iterable[str], defaultfmt: str = "f%i", nbfields: int | None = None) -> tuple[str, ...]: ... + +class StringConverter: + func: Callable[[str], Any] | None + default: Any + missing_values: set[str] + type: np.dtype[np.datetime64] | np.generic + + def __init__( + self, + /, + dtype_or_func: npt.DTypeLike | None = None, + default: None = None, + missing_values: Iterable[str] | None = None, + locked: bool = False, + ) -> None: ... + def update( + self, + /, + func: Callable[[str], Any], + default: object | None = None, + testing_value: str | None = None, + missing_values: str = "", + locked: bool = False, + ) -> None: ... + # + def __call__(self, /, value: str) -> Any: ... + def upgrade(self, /, value: str) -> Any: ... + def iterupgrade(self, /, value: Iterable[str] | str) -> None: ... + + # + @classmethod + def upgrade_mapper(cls, func: Callable[[str], Any], default: object | None = None) -> None: ... + +@overload +def str2bool(value: Literal["false", "False", "FALSE"]) -> Literal[False]: ... +@overload +def str2bool(value: Literal["true", "True", "TRUE"]) -> Literal[True]: ... + +# +def has_nested_fields(ndtype: np.dtype[np.void]) -> bool: ... +def flatten_dtype(ndtype: np.dtype[np.void], flatten_base: bool = False) -> type[np.dtype]: ... +def easy_dtype( + ndtype: npt.DTypeLike, + names: Iterable[str] | None = None, + defaultfmt: str = "f%i", + **validationargs: Unpack[_ValidationKwargs], +) -> np.dtype[np.void]: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/_nanfunctions_impl.py b/.venv/lib/python3.12/site-packages/numpy/lib/_nanfunctions_impl.py new file mode 100644 index 0000000..4a01490 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/lib/_nanfunctions_impl.py @@ -0,0 +1,2024 @@ +""" +Functions that ignore NaN. + +Functions +--------- + +- `nanmin` -- minimum non-NaN value +- `nanmax` -- maximum non-NaN value +- `nanargmin` -- index of minimum non-NaN value +- `nanargmax` -- index of maximum non-NaN value +- `nansum` -- sum of non-NaN values +- `nanprod` -- product of non-NaN values +- `nancumsum` -- cumulative sum of non-NaN values +- `nancumprod` -- cumulative product of non-NaN values +- `nanmean` -- mean of non-NaN values +- `nanvar` -- variance of non-NaN values +- `nanstd` -- standard deviation of non-NaN values +- `nanmedian` -- median of non-NaN values +- `nanquantile` -- qth quantile of non-NaN values +- `nanpercentile` -- qth percentile of non-NaN values + +""" +import functools +import warnings + +import numpy as np +import numpy._core.numeric as _nx +from numpy._core import overrides +from numpy.lib import _function_base_impl as fnb +from numpy.lib._function_base_impl import _weights_are_valid + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy') + + +__all__ = [ + 'nansum', 'nanmax', 'nanmin', 'nanargmax', 'nanargmin', 'nanmean', + 'nanmedian', 'nanpercentile', 'nanvar', 'nanstd', 'nanprod', + 'nancumsum', 'nancumprod', 'nanquantile' + ] + + +def _nan_mask(a, out=None): + """ + Parameters + ---------- + a : array-like + Input array with at least 1 dimension. + out : ndarray, optional + Alternate output array in which to place the result. The default + is ``None``; if provided, it must have the same shape as the + expected output and will prevent the allocation of a new array. + + Returns + ------- + y : bool ndarray or True + A bool array where ``np.nan`` positions are marked with ``False`` + and other positions are marked with ``True``. If the type of ``a`` + is such that it can't possibly contain ``np.nan``, returns ``True``. + """ + # we assume that a is an array for this private function + + if a.dtype.kind not in 'fc': + return True + + y = np.isnan(a, out=out) + y = np.invert(y, out=y) + return y + +def _replace_nan(a, val): + """ + If `a` is of inexact type, make a copy of `a`, replace NaNs with + the `val` value, and return the copy together with a boolean mask + marking the locations where NaNs were present. If `a` is not of + inexact type, do nothing and return `a` together with a mask of None. + + Note that scalars will end up as array scalars, which is important + for using the result as the value of the out argument in some + operations. + + Parameters + ---------- + a : array-like + Input array. + val : float + NaN values are set to val before doing the operation. + + Returns + ------- + y : ndarray + If `a` is of inexact type, return a copy of `a` with the NaNs + replaced by the fill value, otherwise return `a`. + mask: {bool, None} + If `a` is of inexact type, return a boolean mask marking locations of + NaNs, otherwise return None. + + """ + a = np.asanyarray(a) + + if a.dtype == np.object_: + # object arrays do not support `isnan` (gh-9009), so make a guess + mask = np.not_equal(a, a, dtype=bool) + elif issubclass(a.dtype.type, np.inexact): + mask = np.isnan(a) + else: + mask = None + + if mask is not None: + a = np.array(a, subok=True, copy=True) + np.copyto(a, val, where=mask) + + return a, mask + + +def _copyto(a, val, mask): + """ + Replace values in `a` with NaN where `mask` is True. This differs from + copyto in that it will deal with the case where `a` is a numpy scalar. + + Parameters + ---------- + a : ndarray or numpy scalar + Array or numpy scalar some of whose values are to be replaced + by val. + val : numpy scalar + Value used a replacement. + mask : ndarray, scalar + Boolean array. Where True the corresponding element of `a` is + replaced by `val`. Broadcasts. + + Returns + ------- + res : ndarray, scalar + Array with elements replaced or scalar `val`. + + """ + if isinstance(a, np.ndarray): + np.copyto(a, val, where=mask, casting='unsafe') + else: + a = a.dtype.type(val) + return a + + +def _remove_nan_1d(arr1d, second_arr1d=None, overwrite_input=False): + """ + Equivalent to arr1d[~arr1d.isnan()], but in a different order + + Presumably faster as it incurs fewer copies + + Parameters + ---------- + arr1d : ndarray + Array to remove nans from + second_arr1d : ndarray or None + A second array which will have the same positions removed as arr1d. + overwrite_input : bool + True if `arr1d` can be modified in place + + Returns + ------- + res : ndarray + Array with nan elements removed + second_res : ndarray or None + Second array with nan element positions of first array removed. + overwrite_input : bool + True if `res` can be modified in place, given the constraint on the + input + """ + if arr1d.dtype == object: + # object arrays do not support `isnan` (gh-9009), so make a guess + c = np.not_equal(arr1d, arr1d, dtype=bool) + else: + c = np.isnan(arr1d) + + s = np.nonzero(c)[0] + if s.size == arr1d.size: + warnings.warn("All-NaN slice encountered", RuntimeWarning, + stacklevel=6) + if second_arr1d is None: + return arr1d[:0], None, True + else: + return arr1d[:0], second_arr1d[:0], True + elif s.size == 0: + return arr1d, second_arr1d, overwrite_input + else: + if not overwrite_input: + arr1d = arr1d.copy() + # select non-nans at end of array + enonan = arr1d[-s.size:][~c[-s.size:]] + # fill nans in beginning of array with non-nans of end + arr1d[s[:enonan.size]] = enonan + + if second_arr1d is None: + return arr1d[:-s.size], None, True + else: + if not overwrite_input: + second_arr1d = second_arr1d.copy() + enonan = second_arr1d[-s.size:][~c[-s.size:]] + second_arr1d[s[:enonan.size]] = enonan + + return arr1d[:-s.size], second_arr1d[:-s.size], True + + +def _divide_by_count(a, b, out=None): + """ + Compute a/b ignoring invalid results. If `a` is an array the division + is done in place. If `a` is a scalar, then its type is preserved in the + output. If out is None, then a is used instead so that the division + is in place. Note that this is only called with `a` an inexact type. + + Parameters + ---------- + a : {ndarray, numpy scalar} + Numerator. Expected to be of inexact type but not checked. + b : {ndarray, numpy scalar} + Denominator. + out : ndarray, optional + Alternate output array in which to place the result. The default + is ``None``; if provided, it must have the same shape as the + expected output, but the type will be cast if necessary. + + Returns + ------- + ret : {ndarray, numpy scalar} + The return value is a/b. If `a` was an ndarray the division is done + in place. If `a` is a numpy scalar, the division preserves its type. + + """ + with np.errstate(invalid='ignore', divide='ignore'): + if isinstance(a, np.ndarray): + if out is None: + return np.divide(a, b, out=a, casting='unsafe') + else: + return np.divide(a, b, out=out, casting='unsafe') + elif out is None: + # Precaution against reduced object arrays + try: + return a.dtype.type(a / b) + except AttributeError: + return a / b + else: + # This is questionable, but currently a numpy scalar can + # be output to a zero dimensional array. + return np.divide(a, b, out=out, casting='unsafe') + + +def _nanmin_dispatcher(a, axis=None, out=None, keepdims=None, + initial=None, where=None): + return (a, out) + + +@array_function_dispatch(_nanmin_dispatcher) +def nanmin(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, + where=np._NoValue): + """ + Return minimum of an array or minimum along an axis, ignoring any NaNs. + When all-NaN slices are encountered a ``RuntimeWarning`` is raised and + Nan is returned for that slice. + + Parameters + ---------- + a : array_like + Array containing numbers whose minimum is desired. If `a` is not an + array, a conversion is attempted. + axis : {int, tuple of int, None}, optional + Axis or axes along which the minimum is computed. The default is to compute + the minimum of the flattened array. + out : ndarray, optional + Alternate output array in which to place the result. The default + is ``None``; if provided, it must have the same shape as the + expected output, but the type will be cast if necessary. See + :ref:`ufuncs-output-type` for more details. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `a`. + + If the value is anything but the default, then + `keepdims` will be passed through to the `min` method + of sub-classes of `ndarray`. If the sub-classes methods + does not implement `keepdims` any exceptions will be raised. + initial : scalar, optional + The maximum value of an output element. Must be present to allow + computation on empty slice. See `~numpy.ufunc.reduce` for details. + + .. versionadded:: 1.22.0 + where : array_like of bool, optional + Elements to compare for the minimum. See `~numpy.ufunc.reduce` + for details. + + .. versionadded:: 1.22.0 + + Returns + ------- + nanmin : ndarray + An array with the same shape as `a`, with the specified axis + removed. If `a` is a 0-d array, or if axis is None, an ndarray + scalar is returned. The same dtype as `a` is returned. + + See Also + -------- + nanmax : + The maximum value of an array along a given axis, ignoring any NaNs. + amin : + The minimum value of an array along a given axis, propagating any NaNs. + fmin : + Element-wise minimum of two arrays, ignoring any NaNs. + minimum : + Element-wise minimum of two arrays, propagating any NaNs. + isnan : + Shows which elements are Not a Number (NaN). + isfinite: + Shows which elements are neither NaN nor infinity. + + amax, fmax, maximum + + Notes + ----- + NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic + (IEEE 754). This means that Not a Number is not equivalent to infinity. + Positive infinity is treated as a very large number and negative + infinity is treated as a very small (i.e. negative) number. + + If the input has a integer type the function is equivalent to np.min. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[1, 2], [3, np.nan]]) + >>> np.nanmin(a) + 1.0 + >>> np.nanmin(a, axis=0) + array([1., 2.]) + >>> np.nanmin(a, axis=1) + array([1., 3.]) + + When positive infinity and negative infinity are present: + + >>> np.nanmin([1, 2, np.nan, np.inf]) + 1.0 + >>> np.nanmin([1, 2, np.nan, -np.inf]) + -inf + + """ + kwargs = {} + if keepdims is not np._NoValue: + kwargs['keepdims'] = keepdims + if initial is not np._NoValue: + kwargs['initial'] = initial + if where is not np._NoValue: + kwargs['where'] = where + + if (type(a) is np.ndarray or type(a) is np.memmap) and a.dtype != np.object_: + # Fast, but not safe for subclasses of ndarray, or object arrays, + # which do not implement isnan (gh-9009), or fmin correctly (gh-8975) + res = np.fmin.reduce(a, axis=axis, out=out, **kwargs) + if np.isnan(res).any(): + warnings.warn("All-NaN slice encountered", RuntimeWarning, + stacklevel=2) + else: + # Slow, but safe for subclasses of ndarray + a, mask = _replace_nan(a, +np.inf) + res = np.amin(a, axis=axis, out=out, **kwargs) + if mask is None: + return res + + # Check for all-NaN axis + kwargs.pop("initial", None) + mask = np.all(mask, axis=axis, **kwargs) + if np.any(mask): + res = _copyto(res, np.nan, mask) + warnings.warn("All-NaN axis encountered", RuntimeWarning, + stacklevel=2) + return res + + +def _nanmax_dispatcher(a, axis=None, out=None, keepdims=None, + initial=None, where=None): + return (a, out) + + +@array_function_dispatch(_nanmax_dispatcher) +def nanmax(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, + where=np._NoValue): + """ + Return the maximum of an array or maximum along an axis, ignoring any + NaNs. When all-NaN slices are encountered a ``RuntimeWarning`` is + raised and NaN is returned for that slice. + + Parameters + ---------- + a : array_like + Array containing numbers whose maximum is desired. If `a` is not an + array, a conversion is attempted. + axis : {int, tuple of int, None}, optional + Axis or axes along which the maximum is computed. The default is to compute + the maximum of the flattened array. + out : ndarray, optional + Alternate output array in which to place the result. The default + is ``None``; if provided, it must have the same shape as the + expected output, but the type will be cast if necessary. See + :ref:`ufuncs-output-type` for more details. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `a`. + If the value is anything but the default, then + `keepdims` will be passed through to the `max` method + of sub-classes of `ndarray`. If the sub-classes methods + does not implement `keepdims` any exceptions will be raised. + initial : scalar, optional + The minimum value of an output element. Must be present to allow + computation on empty slice. See `~numpy.ufunc.reduce` for details. + + .. versionadded:: 1.22.0 + where : array_like of bool, optional + Elements to compare for the maximum. See `~numpy.ufunc.reduce` + for details. + + .. versionadded:: 1.22.0 + + Returns + ------- + nanmax : ndarray + An array with the same shape as `a`, with the specified axis removed. + If `a` is a 0-d array, or if axis is None, an ndarray scalar is + returned. The same dtype as `a` is returned. + + See Also + -------- + nanmin : + The minimum value of an array along a given axis, ignoring any NaNs. + amax : + The maximum value of an array along a given axis, propagating any NaNs. + fmax : + Element-wise maximum of two arrays, ignoring any NaNs. + maximum : + Element-wise maximum of two arrays, propagating any NaNs. + isnan : + Shows which elements are Not a Number (NaN). + isfinite: + Shows which elements are neither NaN nor infinity. + + amin, fmin, minimum + + Notes + ----- + NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic + (IEEE 754). This means that Not a Number is not equivalent to infinity. + Positive infinity is treated as a very large number and negative + infinity is treated as a very small (i.e. negative) number. + + If the input has a integer type the function is equivalent to np.max. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[1, 2], [3, np.nan]]) + >>> np.nanmax(a) + 3.0 + >>> np.nanmax(a, axis=0) + array([3., 2.]) + >>> np.nanmax(a, axis=1) + array([2., 3.]) + + When positive infinity and negative infinity are present: + + >>> np.nanmax([1, 2, np.nan, -np.inf]) + 2.0 + >>> np.nanmax([1, 2, np.nan, np.inf]) + inf + + """ + kwargs = {} + if keepdims is not np._NoValue: + kwargs['keepdims'] = keepdims + if initial is not np._NoValue: + kwargs['initial'] = initial + if where is not np._NoValue: + kwargs['where'] = where + + if (type(a) is np.ndarray or type(a) is np.memmap) and a.dtype != np.object_: + # Fast, but not safe for subclasses of ndarray, or object arrays, + # which do not implement isnan (gh-9009), or fmax correctly (gh-8975) + res = np.fmax.reduce(a, axis=axis, out=out, **kwargs) + if np.isnan(res).any(): + warnings.warn("All-NaN slice encountered", RuntimeWarning, + stacklevel=2) + else: + # Slow, but safe for subclasses of ndarray + a, mask = _replace_nan(a, -np.inf) + res = np.amax(a, axis=axis, out=out, **kwargs) + if mask is None: + return res + + # Check for all-NaN axis + kwargs.pop("initial", None) + mask = np.all(mask, axis=axis, **kwargs) + if np.any(mask): + res = _copyto(res, np.nan, mask) + warnings.warn("All-NaN axis encountered", RuntimeWarning, + stacklevel=2) + return res + + +def _nanargmin_dispatcher(a, axis=None, out=None, *, keepdims=None): + return (a,) + + +@array_function_dispatch(_nanargmin_dispatcher) +def nanargmin(a, axis=None, out=None, *, keepdims=np._NoValue): + """ + Return the indices of the minimum values in the specified axis ignoring + NaNs. For all-NaN slices ``ValueError`` is raised. Warning: the results + cannot be trusted if a slice contains only NaNs and Infs. + + Parameters + ---------- + a : array_like + Input data. + axis : int, optional + Axis along which to operate. By default flattened input is used. + out : array, optional + If provided, the result will be inserted into this array. It should + be of the appropriate shape and dtype. + + .. versionadded:: 1.22.0 + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the array. + + .. versionadded:: 1.22.0 + + Returns + ------- + index_array : ndarray + An array of indices or a single index value. + + See Also + -------- + argmin, nanargmax + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[np.nan, 4], [2, 3]]) + >>> np.argmin(a) + 0 + >>> np.nanargmin(a) + 2 + >>> np.nanargmin(a, axis=0) + array([1, 1]) + >>> np.nanargmin(a, axis=1) + array([1, 0]) + + """ + a, mask = _replace_nan(a, np.inf) + if mask is not None and mask.size: + mask = np.all(mask, axis=axis) + if np.any(mask): + raise ValueError("All-NaN slice encountered") + res = np.argmin(a, axis=axis, out=out, keepdims=keepdims) + return res + + +def _nanargmax_dispatcher(a, axis=None, out=None, *, keepdims=None): + return (a,) + + +@array_function_dispatch(_nanargmax_dispatcher) +def nanargmax(a, axis=None, out=None, *, keepdims=np._NoValue): + """ + Return the indices of the maximum values in the specified axis ignoring + NaNs. For all-NaN slices ``ValueError`` is raised. Warning: the + results cannot be trusted if a slice contains only NaNs and -Infs. + + + Parameters + ---------- + a : array_like + Input data. + axis : int, optional + Axis along which to operate. By default flattened input is used. + out : array, optional + If provided, the result will be inserted into this array. It should + be of the appropriate shape and dtype. + + .. versionadded:: 1.22.0 + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the array. + + .. versionadded:: 1.22.0 + + Returns + ------- + index_array : ndarray + An array of indices or a single index value. + + See Also + -------- + argmax, nanargmin + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[np.nan, 4], [2, 3]]) + >>> np.argmax(a) + 0 + >>> np.nanargmax(a) + 1 + >>> np.nanargmax(a, axis=0) + array([1, 0]) + >>> np.nanargmax(a, axis=1) + array([1, 1]) + + """ + a, mask = _replace_nan(a, -np.inf) + if mask is not None and mask.size: + mask = np.all(mask, axis=axis) + if np.any(mask): + raise ValueError("All-NaN slice encountered") + res = np.argmax(a, axis=axis, out=out, keepdims=keepdims) + return res + + +def _nansum_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None, + initial=None, where=None): + return (a, out) + + +@array_function_dispatch(_nansum_dispatcher) +def nansum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, + initial=np._NoValue, where=np._NoValue): + """ + Return the sum of array elements over a given axis treating Not a + Numbers (NaNs) as zero. + + In NumPy versions <= 1.9.0 Nan is returned for slices that are all-NaN or + empty. In later versions zero is returned. + + Parameters + ---------- + a : array_like + Array containing numbers whose sum is desired. If `a` is not an + array, a conversion is attempted. + axis : {int, tuple of int, None}, optional + Axis or axes along which the sum is computed. The default is to compute the + sum of the flattened array. + dtype : data-type, optional + The type of the returned array and of the accumulator in which the + elements are summed. By default, the dtype of `a` is used. An + exception is when `a` has an integer type with less precision than + the platform (u)intp. In that case, the default will be either + (u)int32 or (u)int64 depending on whether the platform is 32 or 64 + bits. For inexact inputs, dtype must be inexact. + out : ndarray, optional + Alternate output array in which to place the result. The default + is ``None``. If provided, it must have the same shape as the + expected output, but the type will be cast if necessary. See + :ref:`ufuncs-output-type` for more details. The casting of NaN to integer + can yield unexpected results. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `a`. + + If the value is anything but the default, then + `keepdims` will be passed through to the `mean` or `sum` methods + of sub-classes of `ndarray`. If the sub-classes methods + does not implement `keepdims` any exceptions will be raised. + initial : scalar, optional + Starting value for the sum. See `~numpy.ufunc.reduce` for details. + + .. versionadded:: 1.22.0 + where : array_like of bool, optional + Elements to include in the sum. See `~numpy.ufunc.reduce` for details. + + .. versionadded:: 1.22.0 + + Returns + ------- + nansum : ndarray. + A new array holding the result is returned unless `out` is + specified, in which it is returned. The result has the same + size as `a`, and the same shape as `a` if `axis` is not None + or `a` is a 1-d array. + + See Also + -------- + numpy.sum : Sum across array propagating NaNs. + isnan : Show which elements are NaN. + isfinite : Show which elements are not NaN or +/-inf. + + Notes + ----- + If both positive and negative infinity are present, the sum will be Not + A Number (NaN). + + Examples + -------- + >>> import numpy as np + >>> np.nansum(1) + 1 + >>> np.nansum([1]) + 1 + >>> np.nansum([1, np.nan]) + 1.0 + >>> a = np.array([[1, 1], [1, np.nan]]) + >>> np.nansum(a) + 3.0 + >>> np.nansum(a, axis=0) + array([2., 1.]) + >>> np.nansum([1, np.nan, np.inf]) + inf + >>> np.nansum([1, np.nan, -np.inf]) + -inf + >>> from numpy.testing import suppress_warnings + >>> with np.errstate(invalid="ignore"): + ... np.nansum([1, np.nan, np.inf, -np.inf]) # both +/- infinity present + np.float64(nan) + + """ + a, mask = _replace_nan(a, 0) + return np.sum(a, axis=axis, dtype=dtype, out=out, keepdims=keepdims, + initial=initial, where=where) + + +def _nanprod_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None, + initial=None, where=None): + return (a, out) + + +@array_function_dispatch(_nanprod_dispatcher) +def nanprod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, + initial=np._NoValue, where=np._NoValue): + """ + Return the product of array elements over a given axis treating Not a + Numbers (NaNs) as ones. + + One is returned for slices that are all-NaN or empty. + + Parameters + ---------- + a : array_like + Array containing numbers whose product is desired. If `a` is not an + array, a conversion is attempted. + axis : {int, tuple of int, None}, optional + Axis or axes along which the product is computed. The default is to compute + the product of the flattened array. + dtype : data-type, optional + The type of the returned array and of the accumulator in which the + elements are summed. By default, the dtype of `a` is used. An + exception is when `a` has an integer type with less precision than + the platform (u)intp. In that case, the default will be either + (u)int32 or (u)int64 depending on whether the platform is 32 or 64 + bits. For inexact inputs, dtype must be inexact. + out : ndarray, optional + Alternate output array in which to place the result. The default + is ``None``. If provided, it must have the same shape as the + expected output, but the type will be cast if necessary. See + :ref:`ufuncs-output-type` for more details. The casting of NaN to integer + can yield unexpected results. + keepdims : bool, optional + If True, the axes which are reduced are left in the result as + dimensions with size one. With this option, the result will + broadcast correctly against the original `arr`. + initial : scalar, optional + The starting value for this product. See `~numpy.ufunc.reduce` + for details. + + .. versionadded:: 1.22.0 + where : array_like of bool, optional + Elements to include in the product. See `~numpy.ufunc.reduce` + for details. + + .. versionadded:: 1.22.0 + + Returns + ------- + nanprod : ndarray + A new array holding the result is returned unless `out` is + specified, in which case it is returned. + + See Also + -------- + numpy.prod : Product across array propagating NaNs. + isnan : Show which elements are NaN. + + Examples + -------- + >>> import numpy as np + >>> np.nanprod(1) + 1 + >>> np.nanprod([1]) + 1 + >>> np.nanprod([1, np.nan]) + 1.0 + >>> a = np.array([[1, 2], [3, np.nan]]) + >>> np.nanprod(a) + 6.0 + >>> np.nanprod(a, axis=0) + array([3., 2.]) + + """ + a, mask = _replace_nan(a, 1) + return np.prod(a, axis=axis, dtype=dtype, out=out, keepdims=keepdims, + initial=initial, where=where) + + +def _nancumsum_dispatcher(a, axis=None, dtype=None, out=None): + return (a, out) + + +@array_function_dispatch(_nancumsum_dispatcher) +def nancumsum(a, axis=None, dtype=None, out=None): + """ + Return the cumulative sum of array elements over a given axis treating Not a + Numbers (NaNs) as zero. The cumulative sum does not change when NaNs are + encountered and leading NaNs are replaced by zeros. + + Zeros are returned for slices that are all-NaN or empty. + + Parameters + ---------- + a : array_like + Input array. + axis : int, optional + Axis along which the cumulative sum is computed. The default + (None) is to compute the cumsum over the flattened array. + dtype : dtype, optional + Type of the returned array and of the accumulator in which the + elements are summed. If `dtype` is not specified, it defaults + to the dtype of `a`, unless `a` has an integer dtype with a + precision less than that of the default platform integer. In + that case, the default platform integer is used. + out : ndarray, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output + but the type will be cast if necessary. See :ref:`ufuncs-output-type` for + more details. + + Returns + ------- + nancumsum : ndarray. + A new array holding the result is returned unless `out` is + specified, in which it is returned. The result has the same + size as `a`, and the same shape as `a` if `axis` is not None + or `a` is a 1-d array. + + See Also + -------- + numpy.cumsum : Cumulative sum across array propagating NaNs. + isnan : Show which elements are NaN. + + Examples + -------- + >>> import numpy as np + >>> np.nancumsum(1) + array([1]) + >>> np.nancumsum([1]) + array([1]) + >>> np.nancumsum([1, np.nan]) + array([1., 1.]) + >>> a = np.array([[1, 2], [3, np.nan]]) + >>> np.nancumsum(a) + array([1., 3., 6., 6.]) + >>> np.nancumsum(a, axis=0) + array([[1., 2.], + [4., 2.]]) + >>> np.nancumsum(a, axis=1) + array([[1., 3.], + [3., 3.]]) + + """ + a, mask = _replace_nan(a, 0) + return np.cumsum(a, axis=axis, dtype=dtype, out=out) + + +def _nancumprod_dispatcher(a, axis=None, dtype=None, out=None): + return (a, out) + + +@array_function_dispatch(_nancumprod_dispatcher) +def nancumprod(a, axis=None, dtype=None, out=None): + """ + Return the cumulative product of array elements over a given axis treating Not a + Numbers (NaNs) as one. The cumulative product does not change when NaNs are + encountered and leading NaNs are replaced by ones. + + Ones are returned for slices that are all-NaN or empty. + + Parameters + ---------- + a : array_like + Input array. + axis : int, optional + Axis along which the cumulative product is computed. By default + the input is flattened. + dtype : dtype, optional + Type of the returned array, as well as of the accumulator in which + the elements are multiplied. If *dtype* is not specified, it + defaults to the dtype of `a`, unless `a` has an integer dtype with + a precision less than that of the default platform integer. In + that case, the default platform integer is used instead. + out : ndarray, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output + but the type of the resulting values will be cast if necessary. + + Returns + ------- + nancumprod : ndarray + A new array holding the result is returned unless `out` is + specified, in which case it is returned. + + See Also + -------- + numpy.cumprod : Cumulative product across array propagating NaNs. + isnan : Show which elements are NaN. + + Examples + -------- + >>> import numpy as np + >>> np.nancumprod(1) + array([1]) + >>> np.nancumprod([1]) + array([1]) + >>> np.nancumprod([1, np.nan]) + array([1., 1.]) + >>> a = np.array([[1, 2], [3, np.nan]]) + >>> np.nancumprod(a) + array([1., 2., 6., 6.]) + >>> np.nancumprod(a, axis=0) + array([[1., 2.], + [3., 2.]]) + >>> np.nancumprod(a, axis=1) + array([[1., 2.], + [3., 3.]]) + + """ + a, mask = _replace_nan(a, 1) + return np.cumprod(a, axis=axis, dtype=dtype, out=out) + + +def _nanmean_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None, + *, where=None): + return (a, out) + + +@array_function_dispatch(_nanmean_dispatcher) +def nanmean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, + *, where=np._NoValue): + """ + Compute the arithmetic mean along the specified axis, ignoring NaNs. + + Returns the average of the array elements. The average is taken over + the flattened array by default, otherwise over the specified axis. + `float64` intermediate and return values are used for integer inputs. + + For all-NaN slices, NaN is returned and a `RuntimeWarning` is raised. + + Parameters + ---------- + a : array_like + Array containing numbers whose mean is desired. If `a` is not an + array, a conversion is attempted. + axis : {int, tuple of int, None}, optional + Axis or axes along which the means are computed. The default is to compute + the mean of the flattened array. + dtype : data-type, optional + Type to use in computing the mean. For integer inputs, the default + is `float64`; for inexact inputs, it is the same as the input + dtype. + out : ndarray, optional + Alternate output array in which to place the result. The default + is ``None``; if provided, it must have the same shape as the + expected output, but the type will be cast if necessary. + See :ref:`ufuncs-output-type` for more details. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `a`. + + If the value is anything but the default, then + `keepdims` will be passed through to the `mean` or `sum` methods + of sub-classes of `ndarray`. If the sub-classes methods + does not implement `keepdims` any exceptions will be raised. + where : array_like of bool, optional + Elements to include in the mean. See `~numpy.ufunc.reduce` for details. + + .. versionadded:: 1.22.0 + + Returns + ------- + m : ndarray, see dtype parameter above + If `out=None`, returns a new array containing the mean values, + otherwise a reference to the output array is returned. Nan is + returned for slices that contain only NaNs. + + See Also + -------- + average : Weighted average + mean : Arithmetic mean taken while not ignoring NaNs + var, nanvar + + Notes + ----- + The arithmetic mean is the sum of the non-NaN elements along the axis + divided by the number of non-NaN elements. + + Note that for floating-point input, the mean is computed using the same + precision the input has. Depending on the input data, this can cause + the results to be inaccurate, especially for `float32`. Specifying a + higher-precision accumulator using the `dtype` keyword can alleviate + this issue. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[1, np.nan], [3, 4]]) + >>> np.nanmean(a) + 2.6666666666666665 + >>> np.nanmean(a, axis=0) + array([2., 4.]) + >>> np.nanmean(a, axis=1) + array([1., 3.5]) # may vary + + """ + arr, mask = _replace_nan(a, 0) + if mask is None: + return np.mean(arr, axis=axis, dtype=dtype, out=out, keepdims=keepdims, + where=where) + + if dtype is not None: + dtype = np.dtype(dtype) + if dtype is not None and not issubclass(dtype.type, np.inexact): + raise TypeError("If a is inexact, then dtype must be inexact") + if out is not None and not issubclass(out.dtype.type, np.inexact): + raise TypeError("If a is inexact, then out must be inexact") + + cnt = np.sum(~mask, axis=axis, dtype=np.intp, keepdims=keepdims, + where=where) + tot = np.sum(arr, axis=axis, dtype=dtype, out=out, keepdims=keepdims, + where=where) + avg = _divide_by_count(tot, cnt, out=out) + + isbad = (cnt == 0) + if isbad.any(): + warnings.warn("Mean of empty slice", RuntimeWarning, stacklevel=2) + # NaN is the only possible bad value, so no further + # action is needed to handle bad results. + return avg + + +def _nanmedian1d(arr1d, overwrite_input=False): + """ + Private function for rank 1 arrays. Compute the median ignoring NaNs. + See nanmedian for parameter usage + """ + arr1d_parsed, _, overwrite_input = _remove_nan_1d( + arr1d, overwrite_input=overwrite_input, + ) + + if arr1d_parsed.size == 0: + # Ensure that a nan-esque scalar of the appropriate type (and unit) + # is returned for `timedelta64` and `complexfloating` + return arr1d[-1] + + return np.median(arr1d_parsed, overwrite_input=overwrite_input) + + +def _nanmedian(a, axis=None, out=None, overwrite_input=False): + """ + Private function that doesn't support extended axis or keepdims. + These methods are extended to this function using _ureduce + See nanmedian for parameter usage + + """ + if axis is None or a.ndim == 1: + part = a.ravel() + if out is None: + return _nanmedian1d(part, overwrite_input) + else: + out[...] = _nanmedian1d(part, overwrite_input) + return out + else: + # for small medians use sort + indexing which is still faster than + # apply_along_axis + # benchmarked with shuffled (50, 50, x) containing a few NaN + if a.shape[axis] < 600: + return _nanmedian_small(a, axis, out, overwrite_input) + result = np.apply_along_axis(_nanmedian1d, axis, a, overwrite_input) + if out is not None: + out[...] = result + return result + + +def _nanmedian_small(a, axis=None, out=None, overwrite_input=False): + """ + sort + indexing median, faster for small medians along multiple + dimensions due to the high overhead of apply_along_axis + + see nanmedian for parameter usage + """ + a = np.ma.masked_array(a, np.isnan(a)) + m = np.ma.median(a, axis=axis, overwrite_input=overwrite_input) + for i in range(np.count_nonzero(m.mask.ravel())): + warnings.warn("All-NaN slice encountered", RuntimeWarning, + stacklevel=5) + + fill_value = np.timedelta64("NaT") if m.dtype.kind == "m" else np.nan + if out is not None: + out[...] = m.filled(fill_value) + return out + return m.filled(fill_value) + + +def _nanmedian_dispatcher( + a, axis=None, out=None, overwrite_input=None, keepdims=None): + return (a, out) + + +@array_function_dispatch(_nanmedian_dispatcher) +def nanmedian(a, axis=None, out=None, overwrite_input=False, keepdims=np._NoValue): + """ + Compute the median along the specified axis, while ignoring NaNs. + + Returns the median of the array elements. + + Parameters + ---------- + a : array_like + Input array or object that can be converted to an array. + axis : {int, sequence of int, None}, optional + Axis or axes along which the medians are computed. The default + is to compute the median along a flattened version of the array. + A sequence of axes is supported since version 1.9.0. + out : ndarray, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output, + but the type (of the output) will be cast if necessary. + overwrite_input : bool, optional + If True, then allow use of memory of input array `a` for + calculations. The input array will be modified by the call to + `median`. This will save memory when you do not need to preserve + the contents of the input array. Treat the input as undefined, + but it will probably be fully or partially sorted. Default is + False. If `overwrite_input` is ``True`` and `a` is not already an + `ndarray`, an error will be raised. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `a`. + + If this is anything but the default value it will be passed + through (in the special case of an empty array) to the + `mean` function of the underlying array. If the array is + a sub-class and `mean` does not have the kwarg `keepdims` this + will raise a RuntimeError. + + Returns + ------- + median : ndarray + A new array holding the result. If the input contains integers + or floats smaller than ``float64``, then the output data-type is + ``np.float64``. Otherwise, the data-type of the output is the + same as that of the input. If `out` is specified, that array is + returned instead. + + See Also + -------- + mean, median, percentile + + Notes + ----- + Given a vector ``V`` of length ``N``, the median of ``V`` is the + middle value of a sorted copy of ``V``, ``V_sorted`` - i.e., + ``V_sorted[(N-1)/2]``, when ``N`` is odd and the average of the two + middle values of ``V_sorted`` when ``N`` is even. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[10.0, 7, 4], [3, 2, 1]]) + >>> a[0, 1] = np.nan + >>> a + array([[10., nan, 4.], + [ 3., 2., 1.]]) + >>> np.median(a) + np.float64(nan) + >>> np.nanmedian(a) + 3.0 + >>> np.nanmedian(a, axis=0) + array([6.5, 2. , 2.5]) + >>> np.median(a, axis=1) + array([nan, 2.]) + >>> b = a.copy() + >>> np.nanmedian(b, axis=1, overwrite_input=True) + array([7., 2.]) + >>> assert not np.all(a==b) + >>> b = a.copy() + >>> np.nanmedian(b, axis=None, overwrite_input=True) + 3.0 + >>> assert not np.all(a==b) + + """ + a = np.asanyarray(a) + # apply_along_axis in _nanmedian doesn't handle empty arrays well, + # so deal them upfront + if a.size == 0: + return np.nanmean(a, axis, out=out, keepdims=keepdims) + + return fnb._ureduce(a, func=_nanmedian, keepdims=keepdims, + axis=axis, out=out, + overwrite_input=overwrite_input) + + +def _nanpercentile_dispatcher( + a, q, axis=None, out=None, overwrite_input=None, + method=None, keepdims=None, *, weights=None, interpolation=None): + return (a, q, out, weights) + + +@array_function_dispatch(_nanpercentile_dispatcher) +def nanpercentile( + a, + q, + axis=None, + out=None, + overwrite_input=False, + method="linear", + keepdims=np._NoValue, + *, + weights=None, + interpolation=None, +): + """ + Compute the qth percentile of the data along the specified axis, + while ignoring nan values. + + Returns the qth percentile(s) of the array elements. + + Parameters + ---------- + a : array_like + Input array or object that can be converted to an array, containing + nan values to be ignored. + q : array_like of float + Percentile or sequence of percentiles to compute, which must be + between 0 and 100 inclusive. + axis : {int, tuple of int, None}, optional + Axis or axes along which the percentiles are computed. The default + is to compute the percentile(s) along a flattened version of the + array. + out : ndarray, optional + Alternative output array in which to place the result. It must have + the same shape and buffer length as the expected output, but the + type (of the output) will be cast if necessary. + overwrite_input : bool, optional + If True, then allow the input array `a` to be modified by + intermediate calculations, to save memory. In this case, the + contents of the input `a` after this function completes is + undefined. + method : str, optional + This parameter specifies the method to use for estimating the + percentile. There are many different methods, some unique to NumPy. + See the notes for explanation. The options sorted by their R type + as summarized in the H&F paper [1]_ are: + + 1. 'inverted_cdf' + 2. 'averaged_inverted_cdf' + 3. 'closest_observation' + 4. 'interpolated_inverted_cdf' + 5. 'hazen' + 6. 'weibull' + 7. 'linear' (default) + 8. 'median_unbiased' + 9. 'normal_unbiased' + + The first three methods are discontinuous. NumPy further defines the + following discontinuous variations of the default 'linear' (7.) option: + + * 'lower' + * 'higher', + * 'midpoint' + * 'nearest' + + .. versionchanged:: 1.22.0 + This argument was previously called "interpolation" and only + offered the "linear" default and last four options. + + keepdims : bool, optional + If this is set to True, the axes which are reduced are left in + the result as dimensions with size one. With this option, the + result will broadcast correctly against the original array `a`. + + If this is anything but the default value it will be passed + through (in the special case of an empty array) to the + `mean` function of the underlying array. If the array is + a sub-class and `mean` does not have the kwarg `keepdims` this + will raise a RuntimeError. + + weights : array_like, optional + An array of weights associated with the values in `a`. Each value in + `a` contributes to the percentile according to its associated weight. + The weights array can either be 1-D (in which case its length must be + the size of `a` along the given axis) or of the same shape as `a`. + If `weights=None`, then all data in `a` are assumed to have a + weight equal to one. + Only `method="inverted_cdf"` supports weights. + + .. versionadded:: 2.0.0 + + interpolation : str, optional + Deprecated name for the method keyword argument. + + .. deprecated:: 1.22.0 + + Returns + ------- + percentile : scalar or ndarray + If `q` is a single percentile and `axis=None`, then the result + is a scalar. If multiple percentiles are given, first axis of + the result corresponds to the percentiles. The other axes are + the axes that remain after the reduction of `a`. If the input + contains integers or floats smaller than ``float64``, the output + data-type is ``float64``. Otherwise, the output data-type is the + same as that of the input. If `out` is specified, that array is + returned instead. + + See Also + -------- + nanmean + nanmedian : equivalent to ``nanpercentile(..., 50)`` + percentile, median, mean + nanquantile : equivalent to nanpercentile, except q in range [0, 1]. + + Notes + ----- + The behavior of `numpy.nanpercentile` with percentage `q` is that of + `numpy.quantile` with argument ``q/100`` (ignoring nan values). + For more information, please see `numpy.quantile`. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[10., 7., 4.], [3., 2., 1.]]) + >>> a[0][1] = np.nan + >>> a + array([[10., nan, 4.], + [ 3., 2., 1.]]) + >>> np.percentile(a, 50) + np.float64(nan) + >>> np.nanpercentile(a, 50) + 3.0 + >>> np.nanpercentile(a, 50, axis=0) + array([6.5, 2. , 2.5]) + >>> np.nanpercentile(a, 50, axis=1, keepdims=True) + array([[7.], + [2.]]) + >>> m = np.nanpercentile(a, 50, axis=0) + >>> out = np.zeros_like(m) + >>> np.nanpercentile(a, 50, axis=0, out=out) + array([6.5, 2. , 2.5]) + >>> m + array([6.5, 2. , 2.5]) + + >>> b = a.copy() + >>> np.nanpercentile(b, 50, axis=1, overwrite_input=True) + array([7., 2.]) + >>> assert not np.all(a==b) + + References + ---------- + .. [1] R. J. Hyndman and Y. Fan, + "Sample quantiles in statistical packages," + The American Statistician, 50(4), pp. 361-365, 1996 + + """ + if interpolation is not None: + method = fnb._check_interpolation_as_method( + method, interpolation, "nanpercentile") + + a = np.asanyarray(a) + if a.dtype.kind == "c": + raise TypeError("a must be an array of real numbers") + + q = np.true_divide(q, a.dtype.type(100) if a.dtype.kind == "f" else 100, out=...) + if not fnb._quantile_is_valid(q): + raise ValueError("Percentiles must be in the range [0, 100]") + + if weights is not None: + if method != "inverted_cdf": + msg = ("Only method 'inverted_cdf' supports weights. " + f"Got: {method}.") + raise ValueError(msg) + if axis is not None: + axis = _nx.normalize_axis_tuple(axis, a.ndim, argname="axis") + weights = _weights_are_valid(weights=weights, a=a, axis=axis) + if np.any(weights < 0): + raise ValueError("Weights must be non-negative.") + + return _nanquantile_unchecked( + a, q, axis, out, overwrite_input, method, keepdims, weights) + + +def _nanquantile_dispatcher(a, q, axis=None, out=None, overwrite_input=None, + method=None, keepdims=None, *, weights=None, + interpolation=None): + return (a, q, out, weights) + + +@array_function_dispatch(_nanquantile_dispatcher) +def nanquantile( + a, + q, + axis=None, + out=None, + overwrite_input=False, + method="linear", + keepdims=np._NoValue, + *, + weights=None, + interpolation=None, +): + """ + Compute the qth quantile of the data along the specified axis, + while ignoring nan values. + Returns the qth quantile(s) of the array elements. + + Parameters + ---------- + a : array_like + Input array or object that can be converted to an array, containing + nan values to be ignored + q : array_like of float + Probability or sequence of probabilities for the quantiles to compute. + Values must be between 0 and 1 inclusive. + axis : {int, tuple of int, None}, optional + Axis or axes along which the quantiles are computed. The + default is to compute the quantile(s) along a flattened + version of the array. + out : ndarray, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output, + but the type (of the output) will be cast if necessary. + overwrite_input : bool, optional + If True, then allow the input array `a` to be modified by intermediate + calculations, to save memory. In this case, the contents of the input + `a` after this function completes is undefined. + method : str, optional + This parameter specifies the method to use for estimating the + quantile. There are many different methods, some unique to NumPy. + See the notes for explanation. The options sorted by their R type + as summarized in the H&F paper [1]_ are: + + 1. 'inverted_cdf' + 2. 'averaged_inverted_cdf' + 3. 'closest_observation' + 4. 'interpolated_inverted_cdf' + 5. 'hazen' + 6. 'weibull' + 7. 'linear' (default) + 8. 'median_unbiased' + 9. 'normal_unbiased' + + The first three methods are discontinuous. NumPy further defines the + following discontinuous variations of the default 'linear' (7.) option: + + * 'lower' + * 'higher', + * 'midpoint' + * 'nearest' + + .. versionchanged:: 1.22.0 + This argument was previously called "interpolation" and only + offered the "linear" default and last four options. + + keepdims : bool, optional + If this is set to True, the axes which are reduced are left in + the result as dimensions with size one. With this option, the + result will broadcast correctly against the original array `a`. + + If this is anything but the default value it will be passed + through (in the special case of an empty array) to the + `mean` function of the underlying array. If the array is + a sub-class and `mean` does not have the kwarg `keepdims` this + will raise a RuntimeError. + + weights : array_like, optional + An array of weights associated with the values in `a`. Each value in + `a` contributes to the quantile according to its associated weight. + The weights array can either be 1-D (in which case its length must be + the size of `a` along the given axis) or of the same shape as `a`. + If `weights=None`, then all data in `a` are assumed to have a + weight equal to one. + Only `method="inverted_cdf"` supports weights. + + .. versionadded:: 2.0.0 + + interpolation : str, optional + Deprecated name for the method keyword argument. + + .. deprecated:: 1.22.0 + + Returns + ------- + quantile : scalar or ndarray + If `q` is a single probability and `axis=None`, then the result + is a scalar. If multiple probability levels are given, first axis of + the result corresponds to the quantiles. The other axes are + the axes that remain after the reduction of `a`. If the input + contains integers or floats smaller than ``float64``, the output + data-type is ``float64``. Otherwise, the output data-type is the + same as that of the input. If `out` is specified, that array is + returned instead. + + See Also + -------- + quantile + nanmean, nanmedian + nanmedian : equivalent to ``nanquantile(..., 0.5)`` + nanpercentile : same as nanquantile, but with q in the range [0, 100]. + + Notes + ----- + The behavior of `numpy.nanquantile` is the same as that of + `numpy.quantile` (ignoring nan values). + For more information, please see `numpy.quantile`. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[10., 7., 4.], [3., 2., 1.]]) + >>> a[0][1] = np.nan + >>> a + array([[10., nan, 4.], + [ 3., 2., 1.]]) + >>> np.quantile(a, 0.5) + np.float64(nan) + >>> np.nanquantile(a, 0.5) + 3.0 + >>> np.nanquantile(a, 0.5, axis=0) + array([6.5, 2. , 2.5]) + >>> np.nanquantile(a, 0.5, axis=1, keepdims=True) + array([[7.], + [2.]]) + >>> m = np.nanquantile(a, 0.5, axis=0) + >>> out = np.zeros_like(m) + >>> np.nanquantile(a, 0.5, axis=0, out=out) + array([6.5, 2. , 2.5]) + >>> m + array([6.5, 2. , 2.5]) + >>> b = a.copy() + >>> np.nanquantile(b, 0.5, axis=1, overwrite_input=True) + array([7., 2.]) + >>> assert not np.all(a==b) + + References + ---------- + .. [1] R. J. Hyndman and Y. Fan, + "Sample quantiles in statistical packages," + The American Statistician, 50(4), pp. 361-365, 1996 + + """ + + if interpolation is not None: + method = fnb._check_interpolation_as_method( + method, interpolation, "nanquantile") + + a = np.asanyarray(a) + if a.dtype.kind == "c": + raise TypeError("a must be an array of real numbers") + + # Use dtype of array if possible (e.g., if q is a python int or float). + if isinstance(q, (int, float)) and a.dtype.kind == "f": + q = np.asanyarray(q, dtype=a.dtype) + else: + q = np.asanyarray(q) + + if not fnb._quantile_is_valid(q): + raise ValueError("Quantiles must be in the range [0, 1]") + + if weights is not None: + if method != "inverted_cdf": + msg = ("Only method 'inverted_cdf' supports weights. " + f"Got: {method}.") + raise ValueError(msg) + if axis is not None: + axis = _nx.normalize_axis_tuple(axis, a.ndim, argname="axis") + weights = _weights_are_valid(weights=weights, a=a, axis=axis) + if np.any(weights < 0): + raise ValueError("Weights must be non-negative.") + + return _nanquantile_unchecked( + a, q, axis, out, overwrite_input, method, keepdims, weights) + + +def _nanquantile_unchecked( + a, + q, + axis=None, + out=None, + overwrite_input=False, + method="linear", + keepdims=np._NoValue, + weights=None, +): + """Assumes that q is in [0, 1], and is an ndarray""" + # apply_along_axis in _nanpercentile doesn't handle empty arrays well, + # so deal them upfront + if a.size == 0: + return np.nanmean(a, axis, out=out, keepdims=keepdims) + return fnb._ureduce(a, + func=_nanquantile_ureduce_func, + q=q, + weights=weights, + keepdims=keepdims, + axis=axis, + out=out, + overwrite_input=overwrite_input, + method=method) + + +def _nanquantile_ureduce_func( + a: np.array, + q: np.array, + weights: np.array, + axis: int | None = None, + out=None, + overwrite_input: bool = False, + method="linear", +): + """ + Private function that doesn't support extended axis or keepdims. + These methods are extended to this function using _ureduce + See nanpercentile for parameter usage + """ + if axis is None or a.ndim == 1: + part = a.ravel() + wgt = None if weights is None else weights.ravel() + result = _nanquantile_1d(part, q, overwrite_input, method, weights=wgt) + # Note that this code could try to fill in `out` right away + elif weights is None: + result = np.apply_along_axis(_nanquantile_1d, axis, a, q, + overwrite_input, method, weights) + # apply_along_axis fills in collapsed axis with results. + # Move those axes to the beginning to match percentile's + # convention. + if q.ndim != 0: + from_ax = [axis + i for i in range(q.ndim)] + result = np.moveaxis(result, from_ax, list(range(q.ndim))) + else: + # We need to apply along axis over 2 arrays, a and weights. + # move operation axes to end for simplicity: + a = np.moveaxis(a, axis, -1) + if weights is not None: + weights = np.moveaxis(weights, axis, -1) + if out is not None: + result = out + else: + # weights are limited to `inverted_cdf` so the result dtype + # is known to be identical to that of `a` here: + result = np.empty_like(a, shape=q.shape + a.shape[:-1]) + + for ii in np.ndindex(a.shape[:-1]): + result[(...,) + ii] = _nanquantile_1d( + a[ii], q, weights=weights[ii], + overwrite_input=overwrite_input, method=method, + ) + # This path dealt with `out` already... + return result + + if out is not None: + out[...] = result + return result + + +def _nanquantile_1d( + arr1d, q, overwrite_input=False, method="linear", weights=None, +): + """ + Private function for rank 1 arrays. Compute quantile ignoring NaNs. + See nanpercentile for parameter usage + """ + # TODO: What to do when arr1d = [1, np.nan] and weights = [0, 1]? + arr1d, weights, overwrite_input = _remove_nan_1d(arr1d, + second_arr1d=weights, overwrite_input=overwrite_input) + if arr1d.size == 0: + # convert to scalar + return np.full(q.shape, np.nan, dtype=arr1d.dtype)[()] + + return fnb._quantile_unchecked( + arr1d, + q, + overwrite_input=overwrite_input, + method=method, + weights=weights, + ) + + +def _nanvar_dispatcher(a, axis=None, dtype=None, out=None, ddof=None, + keepdims=None, *, where=None, mean=None, + correction=None): + return (a, out) + + +@array_function_dispatch(_nanvar_dispatcher) +def nanvar(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, + *, where=np._NoValue, mean=np._NoValue, correction=np._NoValue): + """ + Compute the variance along the specified axis, while ignoring NaNs. + + Returns the variance of the array elements, a measure of the spread of + a distribution. The variance is computed for the flattened array by + default, otherwise over the specified axis. + + For all-NaN slices or slices with zero degrees of freedom, NaN is + returned and a `RuntimeWarning` is raised. + + Parameters + ---------- + a : array_like + Array containing numbers whose variance is desired. If `a` is not an + array, a conversion is attempted. + axis : {int, tuple of int, None}, optional + Axis or axes along which the variance is computed. The default is to compute + the variance of the flattened array. + dtype : data-type, optional + Type to use in computing the variance. For arrays of integer type + the default is `float64`; for arrays of float types it is the same as + the array type. + out : ndarray, optional + Alternate output array in which to place the result. It must have + the same shape as the expected output, but the type is cast if + necessary. + ddof : {int, float}, optional + "Delta Degrees of Freedom": the divisor used in the calculation is + ``N - ddof``, where ``N`` represents the number of non-NaN + elements. By default `ddof` is zero. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `a`. + where : array_like of bool, optional + Elements to include in the variance. See `~numpy.ufunc.reduce` for + details. + + .. versionadded:: 1.22.0 + + mean : array_like, optional + Provide the mean to prevent its recalculation. The mean should have + a shape as if it was calculated with ``keepdims=True``. + The axis for the calculation of the mean should be the same as used in + the call to this var function. + + .. versionadded:: 2.0.0 + + correction : {int, float}, optional + Array API compatible name for the ``ddof`` parameter. Only one of them + can be provided at the same time. + + .. versionadded:: 2.0.0 + + Returns + ------- + variance : ndarray, see dtype parameter above + If `out` is None, return a new array containing the variance, + otherwise return a reference to the output array. If ddof is >= the + number of non-NaN elements in a slice or the slice contains only + NaNs, then the result for that slice is NaN. + + See Also + -------- + std : Standard deviation + mean : Average + var : Variance while not ignoring NaNs + nanstd, nanmean + :ref:`ufuncs-output-type` + + Notes + ----- + The variance is the average of the squared deviations from the mean, + i.e., ``var = mean(abs(x - x.mean())**2)``. + + The mean is normally calculated as ``x.sum() / N``, where ``N = len(x)``. + If, however, `ddof` is specified, the divisor ``N - ddof`` is used + instead. In standard statistical practice, ``ddof=1`` provides an + unbiased estimator of the variance of a hypothetical infinite + population. ``ddof=0`` provides a maximum likelihood estimate of the + variance for normally distributed variables. + + Note that for complex numbers, the absolute value is taken before + squaring, so that the result is always real and nonnegative. + + For floating-point input, the variance is computed using the same + precision the input has. Depending on the input data, this can cause + the results to be inaccurate, especially for `float32` (see example + below). Specifying a higher-accuracy accumulator using the ``dtype`` + keyword can alleviate this issue. + + For this function to work on sub-classes of ndarray, they must define + `sum` with the kwarg `keepdims` + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[1, np.nan], [3, 4]]) + >>> np.nanvar(a) + 1.5555555555555554 + >>> np.nanvar(a, axis=0) + array([1., 0.]) + >>> np.nanvar(a, axis=1) + array([0., 0.25]) # may vary + + """ + arr, mask = _replace_nan(a, 0) + if mask is None: + return np.var(arr, axis=axis, dtype=dtype, out=out, ddof=ddof, + keepdims=keepdims, where=where, mean=mean, + correction=correction) + + if dtype is not None: + dtype = np.dtype(dtype) + if dtype is not None and not issubclass(dtype.type, np.inexact): + raise TypeError("If a is inexact, then dtype must be inexact") + if out is not None and not issubclass(out.dtype.type, np.inexact): + raise TypeError("If a is inexact, then out must be inexact") + + if correction != np._NoValue: + if ddof != 0: + raise ValueError( + "ddof and correction can't be provided simultaneously." + ) + else: + ddof = correction + + # Compute mean + if type(arr) is np.matrix: + _keepdims = np._NoValue + else: + _keepdims = True + + cnt = np.sum(~mask, axis=axis, dtype=np.intp, keepdims=_keepdims, + where=where) + + if mean is not np._NoValue: + avg = mean + else: + # we need to special case matrix for reverse compatibility + # in order for this to work, these sums need to be called with + # keepdims=True, however matrix now raises an error in this case, but + # the reason that it drops the keepdims kwarg is to force keepdims=True + # so this used to work by serendipity. + avg = np.sum(arr, axis=axis, dtype=dtype, + keepdims=_keepdims, where=where) + avg = _divide_by_count(avg, cnt) + + # Compute squared deviation from mean. + np.subtract(arr, avg, out=arr, casting='unsafe', where=where) + arr = _copyto(arr, 0, mask) + if issubclass(arr.dtype.type, np.complexfloating): + sqr = np.multiply(arr, arr.conj(), out=arr, where=where).real + else: + sqr = np.multiply(arr, arr, out=arr, where=where) + + # Compute variance. + var = np.sum(sqr, axis=axis, dtype=dtype, out=out, keepdims=keepdims, + where=where) + + # Precaution against reduced object arrays + try: + var_ndim = var.ndim + except AttributeError: + var_ndim = np.ndim(var) + if var_ndim < cnt.ndim: + # Subclasses of ndarray may ignore keepdims, so check here. + cnt = cnt.squeeze(axis) + dof = cnt - ddof + var = _divide_by_count(var, dof) + + isbad = (dof <= 0) + if np.any(isbad): + warnings.warn("Degrees of freedom <= 0 for slice.", RuntimeWarning, + stacklevel=2) + # NaN, inf, or negative numbers are all possible bad + # values, so explicitly replace them with NaN. + var = _copyto(var, np.nan, isbad) + return var + + +def _nanstd_dispatcher(a, axis=None, dtype=None, out=None, ddof=None, + keepdims=None, *, where=None, mean=None, + correction=None): + return (a, out) + + +@array_function_dispatch(_nanstd_dispatcher) +def nanstd(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, + *, where=np._NoValue, mean=np._NoValue, correction=np._NoValue): + """ + Compute the standard deviation along the specified axis, while + ignoring NaNs. + + Returns the standard deviation, a measure of the spread of a + distribution, of the non-NaN array elements. The standard deviation is + computed for the flattened array by default, otherwise over the + specified axis. + + For all-NaN slices or slices with zero degrees of freedom, NaN is + returned and a `RuntimeWarning` is raised. + + Parameters + ---------- + a : array_like + Calculate the standard deviation of the non-NaN values. + axis : {int, tuple of int, None}, optional + Axis or axes along which the standard deviation is computed. The default is + to compute the standard deviation of the flattened array. + dtype : dtype, optional + Type to use in computing the standard deviation. For arrays of + integer type the default is float64, for arrays of float types it + is the same as the array type. + out : ndarray, optional + Alternative output array in which to place the result. It must have + the same shape as the expected output but the type (of the + calculated values) will be cast if necessary. + ddof : {int, float}, optional + Means Delta Degrees of Freedom. The divisor used in calculations + is ``N - ddof``, where ``N`` represents the number of non-NaN + elements. By default `ddof` is zero. + + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `a`. + + If this value is anything but the default it is passed through + as-is to the relevant functions of the sub-classes. If these + functions do not have a `keepdims` kwarg, a RuntimeError will + be raised. + where : array_like of bool, optional + Elements to include in the standard deviation. + See `~numpy.ufunc.reduce` for details. + + .. versionadded:: 1.22.0 + + mean : array_like, optional + Provide the mean to prevent its recalculation. The mean should have + a shape as if it was calculated with ``keepdims=True``. + The axis for the calculation of the mean should be the same as used in + the call to this std function. + + .. versionadded:: 2.0.0 + + correction : {int, float}, optional + Array API compatible name for the ``ddof`` parameter. Only one of them + can be provided at the same time. + + .. versionadded:: 2.0.0 + + Returns + ------- + standard_deviation : ndarray, see dtype parameter above. + If `out` is None, return a new array containing the standard + deviation, otherwise return a reference to the output array. If + ddof is >= the number of non-NaN elements in a slice or the slice + contains only NaNs, then the result for that slice is NaN. + + See Also + -------- + var, mean, std + nanvar, nanmean + :ref:`ufuncs-output-type` + + Notes + ----- + The standard deviation is the square root of the average of the squared + deviations from the mean: ``std = sqrt(mean(abs(x - x.mean())**2))``. + + The average squared deviation is normally calculated as + ``x.sum() / N``, where ``N = len(x)``. If, however, `ddof` is + specified, the divisor ``N - ddof`` is used instead. In standard + statistical practice, ``ddof=1`` provides an unbiased estimator of the + variance of the infinite population. ``ddof=0`` provides a maximum + likelihood estimate of the variance for normally distributed variables. + The standard deviation computed in this function is the square root of + the estimated variance, so even with ``ddof=1``, it will not be an + unbiased estimate of the standard deviation per se. + + Note that, for complex numbers, `std` takes the absolute value before + squaring, so that the result is always real and nonnegative. + + For floating-point input, the *std* is computed using the same + precision the input has. Depending on the input data, this can cause + the results to be inaccurate, especially for float32 (see example + below). Specifying a higher-accuracy accumulator using the `dtype` + keyword can alleviate this issue. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[1, np.nan], [3, 4]]) + >>> np.nanstd(a) + 1.247219128924647 + >>> np.nanstd(a, axis=0) + array([1., 0.]) + >>> np.nanstd(a, axis=1) + array([0., 0.5]) # may vary + + """ + var = nanvar(a, axis=axis, dtype=dtype, out=out, ddof=ddof, + keepdims=keepdims, where=where, mean=mean, + correction=correction) + if isinstance(var, np.ndarray): + std = np.sqrt(var, out=var) + elif hasattr(var, 'dtype'): + std = var.dtype.type(np.sqrt(var)) + else: + std = np.sqrt(var) + return std diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/_nanfunctions_impl.pyi b/.venv/lib/python3.12/site-packages/numpy/lib/_nanfunctions_impl.pyi new file mode 100644 index 0000000..f39800d --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/lib/_nanfunctions_impl.pyi @@ -0,0 +1,52 @@ +from numpy._core.fromnumeric import ( + amax, + amin, + argmax, + argmin, + cumprod, + cumsum, + mean, + prod, + std, + sum, + var, +) +from numpy.lib._function_base_impl import ( + median, + percentile, + quantile, +) + +__all__ = [ + "nansum", + "nanmax", + "nanmin", + "nanargmax", + "nanargmin", + "nanmean", + "nanmedian", + "nanpercentile", + "nanvar", + "nanstd", + "nanprod", + "nancumsum", + "nancumprod", + "nanquantile", +] + +# NOTE: In reality these functions are not aliases but distinct functions +# with identical signatures. +nanmin = amin +nanmax = amax +nanargmin = argmin +nanargmax = argmax +nansum = sum +nanprod = prod +nancumsum = cumsum +nancumprod = cumprod +nanmean = mean +nanvar = var +nanstd = std +nanmedian = median +nanpercentile = percentile +nanquantile = quantile diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/_npyio_impl.py b/.venv/lib/python3.12/site-packages/numpy/lib/_npyio_impl.py new file mode 100644 index 0000000..6aea567 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/lib/_npyio_impl.py @@ -0,0 +1,2596 @@ +""" +IO related functions. +""" +import contextlib +import functools +import itertools +import operator +import os +import pickle +import re +import warnings +import weakref +from collections.abc import Mapping +from operator import itemgetter + +import numpy as np +from numpy._core import overrides +from numpy._core._multiarray_umath import _load_from_filelike +from numpy._core.multiarray import packbits, unpackbits +from numpy._core.overrides import finalize_array_function_like, set_module +from numpy._utils import asbytes, asunicode + +from . import format +from ._datasource import DataSource # noqa: F401 +from ._format_impl import _MAX_HEADER_SIZE +from ._iotools import ( + ConversionWarning, + ConverterError, + ConverterLockError, + LineSplitter, + NameValidator, + StringConverter, + _decode_line, + _is_string_like, + easy_dtype, + flatten_dtype, + has_nested_fields, +) + +__all__ = [ + 'savetxt', 'loadtxt', 'genfromtxt', 'load', 'save', 'savez', + 'savez_compressed', 'packbits', 'unpackbits', 'fromregex' + ] + + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy') + + +class BagObj: + """ + BagObj(obj) + + Convert attribute look-ups to getitems on the object passed in. + + Parameters + ---------- + obj : class instance + Object on which attribute look-up is performed. + + Examples + -------- + >>> import numpy as np + >>> from numpy.lib._npyio_impl import BagObj as BO + >>> class BagDemo: + ... def __getitem__(self, key): # An instance of BagObj(BagDemo) + ... # will call this method when any + ... # attribute look-up is required + ... result = "Doesn't matter what you want, " + ... return result + "you're gonna get this" + ... + >>> demo_obj = BagDemo() + >>> bagobj = BO(demo_obj) + >>> bagobj.hello_there + "Doesn't matter what you want, you're gonna get this" + >>> bagobj.I_can_be_anything + "Doesn't matter what you want, you're gonna get this" + + """ + + def __init__(self, obj): + # Use weakref to make NpzFile objects collectable by refcount + self._obj = weakref.proxy(obj) + + def __getattribute__(self, key): + try: + return object.__getattribute__(self, '_obj')[key] + except KeyError: + raise AttributeError(key) from None + + def __dir__(self): + """ + Enables dir(bagobj) to list the files in an NpzFile. + + This also enables tab-completion in an interpreter or IPython. + """ + return list(object.__getattribute__(self, '_obj').keys()) + + +def zipfile_factory(file, *args, **kwargs): + """ + Create a ZipFile. + + Allows for Zip64, and the `file` argument can accept file, str, or + pathlib.Path objects. `args` and `kwargs` are passed to the zipfile.ZipFile + constructor. + """ + if not hasattr(file, 'read'): + file = os.fspath(file) + import zipfile + kwargs['allowZip64'] = True + return zipfile.ZipFile(file, *args, **kwargs) + + +@set_module('numpy.lib.npyio') +class NpzFile(Mapping): + """ + NpzFile(fid) + + A dictionary-like object with lazy-loading of files in the zipped + archive provided on construction. + + `NpzFile` is used to load files in the NumPy ``.npz`` data archive + format. It assumes that files in the archive have a ``.npy`` extension, + other files are ignored. + + The arrays and file strings are lazily loaded on either + getitem access using ``obj['key']`` or attribute lookup using + ``obj.f.key``. A list of all files (without ``.npy`` extensions) can + be obtained with ``obj.files`` and the ZipFile object itself using + ``obj.zip``. + + Attributes + ---------- + files : list of str + List of all files in the archive with a ``.npy`` extension. + zip : ZipFile instance + The ZipFile object initialized with the zipped archive. + f : BagObj instance + An object on which attribute can be performed as an alternative + to getitem access on the `NpzFile` instance itself. + allow_pickle : bool, optional + Allow loading pickled data. Default: False + pickle_kwargs : dict, optional + Additional keyword arguments to pass on to pickle.load. + These are only useful when loading object arrays saved on + Python 2. + max_header_size : int, optional + Maximum allowed size of the header. Large headers may not be safe + to load securely and thus require explicitly passing a larger value. + See :py:func:`ast.literal_eval()` for details. + This option is ignored when `allow_pickle` is passed. In that case + the file is by definition trusted and the limit is unnecessary. + + Parameters + ---------- + fid : file, str, or pathlib.Path + The zipped archive to open. This is either a file-like object + or a string containing the path to the archive. + own_fid : bool, optional + Whether NpzFile should close the file handle. + Requires that `fid` is a file-like object. + + Examples + -------- + >>> import numpy as np + >>> from tempfile import TemporaryFile + >>> outfile = TemporaryFile() + >>> x = np.arange(10) + >>> y = np.sin(x) + >>> np.savez(outfile, x=x, y=y) + >>> _ = outfile.seek(0) + + >>> npz = np.load(outfile) + >>> isinstance(npz, np.lib.npyio.NpzFile) + True + >>> npz + NpzFile 'object' with keys: x, y + >>> sorted(npz.files) + ['x', 'y'] + >>> npz['x'] # getitem access + array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + >>> npz.f.x # attribute lookup + array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + + """ + # Make __exit__ safe if zipfile_factory raises an exception + zip = None + fid = None + _MAX_REPR_ARRAY_COUNT = 5 + + def __init__(self, fid, own_fid=False, allow_pickle=False, + pickle_kwargs=None, *, + max_header_size=_MAX_HEADER_SIZE): + # Import is postponed to here since zipfile depends on gzip, an + # optional component of the so-called standard library. + _zip = zipfile_factory(fid) + _files = _zip.namelist() + self.files = [name.removesuffix(".npy") for name in _files] + self._files = dict(zip(self.files, _files)) + self._files.update(zip(_files, _files)) + self.allow_pickle = allow_pickle + self.max_header_size = max_header_size + self.pickle_kwargs = pickle_kwargs + self.zip = _zip + self.f = BagObj(self) + if own_fid: + self.fid = fid + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, traceback): + self.close() + + def close(self): + """ + Close the file. + + """ + if self.zip is not None: + self.zip.close() + self.zip = None + if self.fid is not None: + self.fid.close() + self.fid = None + self.f = None # break reference cycle + + def __del__(self): + self.close() + + # Implement the Mapping ABC + def __iter__(self): + return iter(self.files) + + def __len__(self): + return len(self.files) + + def __getitem__(self, key): + try: + key = self._files[key] + except KeyError: + raise KeyError(f"{key} is not a file in the archive") from None + else: + with self.zip.open(key) as bytes: + magic = bytes.read(len(format.MAGIC_PREFIX)) + bytes.seek(0) + if magic == format.MAGIC_PREFIX: + # FIXME: This seems like it will copy strings around + # more than is strictly necessary. The zipfile + # will read the string and then + # the format.read_array will copy the string + # to another place in memory. + # It would be better if the zipfile could read + # (or at least uncompress) the data + # directly into the array memory. + return format.read_array( + bytes, + allow_pickle=self.allow_pickle, + pickle_kwargs=self.pickle_kwargs, + max_header_size=self.max_header_size + ) + else: + return bytes.read() + + def __contains__(self, key): + return (key in self._files) + + def __repr__(self): + # Get filename or default to `object` + if isinstance(self.fid, str): + filename = self.fid + else: + filename = getattr(self.fid, "name", "object") + + # Get the name of arrays + array_names = ', '.join(self.files[:self._MAX_REPR_ARRAY_COUNT]) + if len(self.files) > self._MAX_REPR_ARRAY_COUNT: + array_names += "..." + return f"NpzFile {filename!r} with keys: {array_names}" + + # Work around problems with the docstrings in the Mapping methods + # They contain a `->`, which confuses the type annotation interpretations + # of sphinx-docs. See gh-25964 + + def get(self, key, default=None, /): + """ + D.get(k,[,d]) returns D[k] if k in D, else d. d defaults to None. + """ + return Mapping.get(self, key, default) + + def items(self): + """ + D.items() returns a set-like object providing a view on the items + """ + return Mapping.items(self) + + def keys(self): + """ + D.keys() returns a set-like object providing a view on the keys + """ + return Mapping.keys(self) + + def values(self): + """ + D.values() returns a set-like object providing a view on the values + """ + return Mapping.values(self) + + +@set_module('numpy') +def load(file, mmap_mode=None, allow_pickle=False, fix_imports=True, + encoding='ASCII', *, max_header_size=_MAX_HEADER_SIZE): + """ + Load arrays or pickled objects from ``.npy``, ``.npz`` or pickled files. + + .. warning:: Loading files that contain object arrays uses the ``pickle`` + module, which is not secure against erroneous or maliciously + constructed data. Consider passing ``allow_pickle=False`` to + load data that is known not to contain object arrays for the + safer handling of untrusted sources. + + Parameters + ---------- + file : file-like object, string, or pathlib.Path + The file to read. File-like objects must support the + ``seek()`` and ``read()`` methods and must always + be opened in binary mode. Pickled files require that the + file-like object support the ``readline()`` method as well. + mmap_mode : {None, 'r+', 'r', 'w+', 'c'}, optional + If not None, then memory-map the file, using the given mode (see + `numpy.memmap` for a detailed description of the modes). A + memory-mapped array is kept on disk. However, it can be accessed + and sliced like any ndarray. Memory mapping is especially useful + for accessing small fragments of large files without reading the + entire file into memory. + allow_pickle : bool, optional + Allow loading pickled object arrays stored in npy files. Reasons for + disallowing pickles include security, as loading pickled data can + execute arbitrary code. If pickles are disallowed, loading object + arrays will fail. Default: False + fix_imports : bool, optional + Only useful when loading Python 2 generated pickled files, + which includes npy/npz files containing object arrays. If `fix_imports` + is True, pickle will try to map the old Python 2 names to the new names + used in Python 3. + encoding : str, optional + What encoding to use when reading Python 2 strings. Only useful when + loading Python 2 generated pickled files, which includes + npy/npz files containing object arrays. Values other than 'latin1', + 'ASCII', and 'bytes' are not allowed, as they can corrupt numerical + data. Default: 'ASCII' + max_header_size : int, optional + Maximum allowed size of the header. Large headers may not be safe + to load securely and thus require explicitly passing a larger value. + See :py:func:`ast.literal_eval()` for details. + This option is ignored when `allow_pickle` is passed. In that case + the file is by definition trusted and the limit is unnecessary. + + Returns + ------- + result : array, tuple, dict, etc. + Data stored in the file. For ``.npz`` files, the returned instance + of NpzFile class must be closed to avoid leaking file descriptors. + + Raises + ------ + OSError + If the input file does not exist or cannot be read. + UnpicklingError + If ``allow_pickle=True``, but the file cannot be loaded as a pickle. + ValueError + The file contains an object array, but ``allow_pickle=False`` given. + EOFError + When calling ``np.load`` multiple times on the same file handle, + if all data has already been read + + See Also + -------- + save, savez, savez_compressed, loadtxt + memmap : Create a memory-map to an array stored in a file on disk. + lib.format.open_memmap : Create or load a memory-mapped ``.npy`` file. + + Notes + ----- + - If the file contains pickle data, then whatever object is stored + in the pickle is returned. + - If the file is a ``.npy`` file, then a single array is returned. + - If the file is a ``.npz`` file, then a dictionary-like object is + returned, containing ``{filename: array}`` key-value pairs, one for + each file in the archive. + - If the file is a ``.npz`` file, the returned value supports the + context manager protocol in a similar fashion to the open function:: + + with load('foo.npz') as data: + a = data['a'] + + The underlying file descriptor is closed when exiting the 'with' + block. + + Examples + -------- + >>> import numpy as np + + Store data to disk, and load it again: + + >>> np.save('/tmp/123', np.array([[1, 2, 3], [4, 5, 6]])) + >>> np.load('/tmp/123.npy') + array([[1, 2, 3], + [4, 5, 6]]) + + Store compressed data to disk, and load it again: + + >>> a=np.array([[1, 2, 3], [4, 5, 6]]) + >>> b=np.array([1, 2]) + >>> np.savez('/tmp/123.npz', a=a, b=b) + >>> data = np.load('/tmp/123.npz') + >>> data['a'] + array([[1, 2, 3], + [4, 5, 6]]) + >>> data['b'] + array([1, 2]) + >>> data.close() + + Mem-map the stored array, and then access the second row + directly from disk: + + >>> X = np.load('/tmp/123.npy', mmap_mode='r') + >>> X[1, :] + memmap([4, 5, 6]) + + """ + if encoding not in ('ASCII', 'latin1', 'bytes'): + # The 'encoding' value for pickle also affects what encoding + # the serialized binary data of NumPy arrays is loaded + # in. Pickle does not pass on the encoding information to + # NumPy. The unpickling code in numpy._core.multiarray is + # written to assume that unicode data appearing where binary + # should be is in 'latin1'. 'bytes' is also safe, as is 'ASCII'. + # + # Other encoding values can corrupt binary data, and we + # purposefully disallow them. For the same reason, the errors= + # argument is not exposed, as values other than 'strict' + # result can similarly silently corrupt numerical data. + raise ValueError("encoding must be 'ASCII', 'latin1', or 'bytes'") + + pickle_kwargs = {'encoding': encoding, 'fix_imports': fix_imports} + + with contextlib.ExitStack() as stack: + if hasattr(file, 'read'): + fid = file + own_fid = False + else: + fid = stack.enter_context(open(os.fspath(file), "rb")) + own_fid = True + + # Code to distinguish from NumPy binary files and pickles. + _ZIP_PREFIX = b'PK\x03\x04' + _ZIP_SUFFIX = b'PK\x05\x06' # empty zip files start with this + N = len(format.MAGIC_PREFIX) + magic = fid.read(N) + if not magic: + raise EOFError("No data left in file") + # If the file size is less than N, we need to make sure not + # to seek past the beginning of the file + fid.seek(-min(N, len(magic)), 1) # back-up + if magic.startswith((_ZIP_PREFIX, _ZIP_SUFFIX)): + # zip-file (assume .npz) + # Potentially transfer file ownership to NpzFile + stack.pop_all() + ret = NpzFile(fid, own_fid=own_fid, allow_pickle=allow_pickle, + pickle_kwargs=pickle_kwargs, + max_header_size=max_header_size) + return ret + elif magic == format.MAGIC_PREFIX: + # .npy file + if mmap_mode: + if allow_pickle: + max_header_size = 2**64 + return format.open_memmap(file, mode=mmap_mode, + max_header_size=max_header_size) + else: + return format.read_array(fid, allow_pickle=allow_pickle, + pickle_kwargs=pickle_kwargs, + max_header_size=max_header_size) + else: + # Try a pickle + if not allow_pickle: + raise ValueError( + "This file contains pickled (object) data. If you trust " + "the file you can load it unsafely using the " + "`allow_pickle=` keyword argument or `pickle.load()`.") + try: + return pickle.load(fid, **pickle_kwargs) + except Exception as e: + raise pickle.UnpicklingError( + f"Failed to interpret file {file!r} as a pickle") from e + + +def _save_dispatcher(file, arr, allow_pickle=None, fix_imports=None): + return (arr,) + + +@array_function_dispatch(_save_dispatcher) +def save(file, arr, allow_pickle=True, fix_imports=np._NoValue): + """ + Save an array to a binary file in NumPy ``.npy`` format. + + Parameters + ---------- + file : file, str, or pathlib.Path + File or filename to which the data is saved. If file is a file-object, + then the filename is unchanged. If file is a string or Path, + a ``.npy`` extension will be appended to the filename if it does not + already have one. + arr : array_like + Array data to be saved. + allow_pickle : bool, optional + Allow saving object arrays using Python pickles. Reasons for + disallowing pickles include security (loading pickled data can execute + arbitrary code) and portability (pickled objects may not be loadable + on different Python installations, for example if the stored objects + require libraries that are not available, and not all pickled data is + compatible between different versions of Python). + Default: True + fix_imports : bool, optional + The `fix_imports` flag is deprecated and has no effect. + + .. deprecated:: 2.1 + This flag is ignored since NumPy 1.17 and was only needed to + support loading in Python 2 some files written in Python 3. + + See Also + -------- + savez : Save several arrays into a ``.npz`` archive + savetxt, load + + Notes + ----- + For a description of the ``.npy`` format, see :py:mod:`numpy.lib.format`. + + Any data saved to the file is appended to the end of the file. + + Examples + -------- + >>> import numpy as np + + >>> from tempfile import TemporaryFile + >>> outfile = TemporaryFile() + + >>> x = np.arange(10) + >>> np.save(outfile, x) + + >>> _ = outfile.seek(0) # Only needed to simulate closing & reopening file + >>> np.load(outfile) + array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + + + >>> with open('test.npy', 'wb') as f: + ... np.save(f, np.array([1, 2])) + ... np.save(f, np.array([1, 3])) + >>> with open('test.npy', 'rb') as f: + ... a = np.load(f) + ... b = np.load(f) + >>> print(a, b) + # [1 2] [1 3] + """ + if fix_imports is not np._NoValue: + # Deprecated 2024-05-16, NumPy 2.1 + warnings.warn( + "The 'fix_imports' flag is deprecated and has no effect. " + "(Deprecated in NumPy 2.1)", + DeprecationWarning, stacklevel=2) + if hasattr(file, 'write'): + file_ctx = contextlib.nullcontext(file) + else: + file = os.fspath(file) + if not file.endswith('.npy'): + file = file + '.npy' + file_ctx = open(file, "wb") + + with file_ctx as fid: + arr = np.asanyarray(arr) + format.write_array(fid, arr, allow_pickle=allow_pickle, + pickle_kwargs={'fix_imports': fix_imports}) + + +def _savez_dispatcher(file, *args, allow_pickle=True, **kwds): + yield from args + yield from kwds.values() + + +@array_function_dispatch(_savez_dispatcher) +def savez(file, *args, allow_pickle=True, **kwds): + """Save several arrays into a single file in uncompressed ``.npz`` format. + + Provide arrays as keyword arguments to store them under the + corresponding name in the output file: ``savez(fn, x=x, y=y)``. + + If arrays are specified as positional arguments, i.e., ``savez(fn, + x, y)``, their names will be `arr_0`, `arr_1`, etc. + + Parameters + ---------- + file : file, str, or pathlib.Path + Either the filename (string) or an open file (file-like object) + where the data will be saved. If file is a string or a Path, the + ``.npz`` extension will be appended to the filename if it is not + already there. + args : Arguments, optional + Arrays to save to the file. Please use keyword arguments (see + `kwds` below) to assign names to arrays. Arrays specified as + args will be named "arr_0", "arr_1", and so on. + allow_pickle : bool, optional + Allow saving object arrays using Python pickles. Reasons for + disallowing pickles include security (loading pickled data can execute + arbitrary code) and portability (pickled objects may not be loadable + on different Python installations, for example if the stored objects + require libraries that are not available, and not all pickled data is + compatible between different versions of Python). + Default: True + kwds : Keyword arguments, optional + Arrays to save to the file. Each array will be saved to the + output file with its corresponding keyword name. + + Returns + ------- + None + + See Also + -------- + save : Save a single array to a binary file in NumPy format. + savetxt : Save an array to a file as plain text. + savez_compressed : Save several arrays into a compressed ``.npz`` archive + + Notes + ----- + The ``.npz`` file format is a zipped archive of files named after the + variables they contain. The archive is not compressed and each file + in the archive contains one variable in ``.npy`` format. For a + description of the ``.npy`` format, see :py:mod:`numpy.lib.format`. + + When opening the saved ``.npz`` file with `load` a `~lib.npyio.NpzFile` + object is returned. This is a dictionary-like object which can be queried + for its list of arrays (with the ``.files`` attribute), and for the arrays + themselves. + + Keys passed in `kwds` are used as filenames inside the ZIP archive. + Therefore, keys should be valid filenames; e.g., avoid keys that begin with + ``/`` or contain ``.``. + + When naming variables with keyword arguments, it is not possible to name a + variable ``file``, as this would cause the ``file`` argument to be defined + twice in the call to ``savez``. + + Examples + -------- + >>> import numpy as np + >>> from tempfile import TemporaryFile + >>> outfile = TemporaryFile() + >>> x = np.arange(10) + >>> y = np.sin(x) + + Using `savez` with \\*args, the arrays are saved with default names. + + >>> np.savez(outfile, x, y) + >>> _ = outfile.seek(0) # Only needed to simulate closing & reopening file + >>> npzfile = np.load(outfile) + >>> npzfile.files + ['arr_0', 'arr_1'] + >>> npzfile['arr_0'] + array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + + Using `savez` with \\**kwds, the arrays are saved with the keyword names. + + >>> outfile = TemporaryFile() + >>> np.savez(outfile, x=x, y=y) + >>> _ = outfile.seek(0) + >>> npzfile = np.load(outfile) + >>> sorted(npzfile.files) + ['x', 'y'] + >>> npzfile['x'] + array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + + """ + _savez(file, args, kwds, False, allow_pickle=allow_pickle) + + +def _savez_compressed_dispatcher(file, *args, allow_pickle=True, **kwds): + yield from args + yield from kwds.values() + + +@array_function_dispatch(_savez_compressed_dispatcher) +def savez_compressed(file, *args, allow_pickle=True, **kwds): + """ + Save several arrays into a single file in compressed ``.npz`` format. + + Provide arrays as keyword arguments to store them under the + corresponding name in the output file: ``savez_compressed(fn, x=x, y=y)``. + + If arrays are specified as positional arguments, i.e., + ``savez_compressed(fn, x, y)``, their names will be `arr_0`, `arr_1`, etc. + + Parameters + ---------- + file : file, str, or pathlib.Path + Either the filename (string) or an open file (file-like object) + where the data will be saved. If file is a string or a Path, the + ``.npz`` extension will be appended to the filename if it is not + already there. + args : Arguments, optional + Arrays to save to the file. Please use keyword arguments (see + `kwds` below) to assign names to arrays. Arrays specified as + args will be named "arr_0", "arr_1", and so on. + allow_pickle : bool, optional + Allow saving object arrays using Python pickles. Reasons for + disallowing pickles include security (loading pickled data can execute + arbitrary code) and portability (pickled objects may not be loadable + on different Python installations, for example if the stored objects + require libraries that are not available, and not all pickled data is + compatible between different versions of Python). + Default: True + kwds : Keyword arguments, optional + Arrays to save to the file. Each array will be saved to the + output file with its corresponding keyword name. + + Returns + ------- + None + + See Also + -------- + numpy.save : Save a single array to a binary file in NumPy format. + numpy.savetxt : Save an array to a file as plain text. + numpy.savez : Save several arrays into an uncompressed ``.npz`` file format + numpy.load : Load the files created by savez_compressed. + + Notes + ----- + The ``.npz`` file format is a zipped archive of files named after the + variables they contain. The archive is compressed with + ``zipfile.ZIP_DEFLATED`` and each file in the archive contains one variable + in ``.npy`` format. For a description of the ``.npy`` format, see + :py:mod:`numpy.lib.format`. + + + When opening the saved ``.npz`` file with `load` a `~lib.npyio.NpzFile` + object is returned. This is a dictionary-like object which can be queried + for its list of arrays (with the ``.files`` attribute), and for the arrays + themselves. + + Examples + -------- + >>> import numpy as np + >>> test_array = np.random.rand(3, 2) + >>> test_vector = np.random.rand(4) + >>> np.savez_compressed('/tmp/123', a=test_array, b=test_vector) + >>> loaded = np.load('/tmp/123.npz') + >>> print(np.array_equal(test_array, loaded['a'])) + True + >>> print(np.array_equal(test_vector, loaded['b'])) + True + + """ + _savez(file, args, kwds, True, allow_pickle=allow_pickle) + + +def _savez(file, args, kwds, compress, allow_pickle=True, pickle_kwargs=None): + # Import is postponed to here since zipfile depends on gzip, an optional + # component of the so-called standard library. + import zipfile + + if not hasattr(file, 'write'): + file = os.fspath(file) + if not file.endswith('.npz'): + file = file + '.npz' + + namedict = kwds + for i, val in enumerate(args): + key = 'arr_%d' % i + if key in namedict.keys(): + raise ValueError( + f"Cannot use un-named variables and keyword {key}") + namedict[key] = val + + if compress: + compression = zipfile.ZIP_DEFLATED + else: + compression = zipfile.ZIP_STORED + + zipf = zipfile_factory(file, mode="w", compression=compression) + try: + for key, val in namedict.items(): + fname = key + '.npy' + val = np.asanyarray(val) + # always force zip64, gh-10776 + with zipf.open(fname, 'w', force_zip64=True) as fid: + format.write_array(fid, val, + allow_pickle=allow_pickle, + pickle_kwargs=pickle_kwargs) + finally: + zipf.close() + + +def _ensure_ndmin_ndarray_check_param(ndmin): + """Just checks if the param ndmin is supported on + _ensure_ndmin_ndarray. It is intended to be used as + verification before running anything expensive. + e.g. loadtxt, genfromtxt + """ + # Check correctness of the values of `ndmin` + if ndmin not in [0, 1, 2]: + raise ValueError(f"Illegal value of ndmin keyword: {ndmin}") + +def _ensure_ndmin_ndarray(a, *, ndmin: int): + """This is a helper function of loadtxt and genfromtxt to ensure + proper minimum dimension as requested + + ndim : int. Supported values 1, 2, 3 + ^^ whenever this changes, keep in sync with + _ensure_ndmin_ndarray_check_param + """ + # Verify that the array has at least dimensions `ndmin`. + # Tweak the size and shape of the arrays - remove extraneous dimensions + if a.ndim > ndmin: + a = np.squeeze(a) + # and ensure we have the minimum number of dimensions asked for + # - has to be in this order for the odd case ndmin=1, a.squeeze().ndim=0 + if a.ndim < ndmin: + if ndmin == 1: + a = np.atleast_1d(a) + elif ndmin == 2: + a = np.atleast_2d(a).T + + return a + + +# amount of lines loadtxt reads in one chunk, can be overridden for testing +_loadtxt_chunksize = 50000 + + +def _check_nonneg_int(value, name="argument"): + try: + operator.index(value) + except TypeError: + raise TypeError(f"{name} must be an integer") from None + if value < 0: + raise ValueError(f"{name} must be nonnegative") + + +def _preprocess_comments(iterable, comments, encoding): + """ + Generator that consumes a line iterated iterable and strips out the + multiple (or multi-character) comments from lines. + This is a pre-processing step to achieve feature parity with loadtxt + (we assume that this feature is a nieche feature). + """ + for line in iterable: + if isinstance(line, bytes): + # Need to handle conversion here, or the splitting would fail + line = line.decode(encoding) + + for c in comments: + line = line.split(c, 1)[0] + + yield line + + +# The number of rows we read in one go if confronted with a parametric dtype +_loadtxt_chunksize = 50000 + + +def _read(fname, *, delimiter=',', comment='#', quote='"', + imaginary_unit='j', usecols=None, skiplines=0, + max_rows=None, converters=None, ndmin=None, unpack=False, + dtype=np.float64, encoding=None): + r""" + Read a NumPy array from a text file. + This is a helper function for loadtxt. + + Parameters + ---------- + fname : file, str, or pathlib.Path + The filename or the file to be read. + delimiter : str, optional + Field delimiter of the fields in line of the file. + Default is a comma, ','. If None any sequence of whitespace is + considered a delimiter. + comment : str or sequence of str or None, optional + Character that begins a comment. All text from the comment + character to the end of the line is ignored. + Multiple comments or multiple-character comment strings are supported, + but may be slower and `quote` must be empty if used. + Use None to disable all use of comments. + quote : str or None, optional + Character that is used to quote string fields. Default is '"' + (a double quote). Use None to disable quote support. + imaginary_unit : str, optional + Character that represent the imaginary unit `sqrt(-1)`. + Default is 'j'. + usecols : array_like, optional + A one-dimensional array of integer column numbers. These are the + columns from the file to be included in the array. If this value + is not given, all the columns are used. + skiplines : int, optional + Number of lines to skip before interpreting the data in the file. + max_rows : int, optional + Maximum number of rows of data to read. Default is to read the + entire file. + converters : dict or callable, optional + A function to parse all columns strings into the desired value, or + a dictionary mapping column number to a parser function. + E.g. if column 0 is a date string: ``converters = {0: datestr2num}``. + Converters can also be used to provide a default value for missing + data, e.g. ``converters = lambda s: float(s.strip() or 0)`` will + convert empty fields to 0. + Default: None + ndmin : int, optional + Minimum dimension of the array returned. + Allowed values are 0, 1 or 2. Default is 0. + unpack : bool, optional + If True, the returned array is transposed, so that arguments may be + unpacked using ``x, y, z = read(...)``. When used with a structured + data-type, arrays are returned for each field. Default is False. + dtype : numpy data type + A NumPy dtype instance, can be a structured dtype to map to the + columns of the file. + encoding : str, optional + Encoding used to decode the inputfile. The special value 'bytes' + (the default) enables backwards-compatible behavior for `converters`, + ensuring that inputs to the converter functions are encoded + bytes objects. The special value 'bytes' has no additional effect if + ``converters=None``. If encoding is ``'bytes'`` or ``None``, the + default system encoding is used. + + Returns + ------- + ndarray + NumPy array. + """ + # Handle special 'bytes' keyword for encoding + byte_converters = False + if encoding == 'bytes': + encoding = None + byte_converters = True + + if dtype is None: + raise TypeError("a dtype must be provided.") + dtype = np.dtype(dtype) + + read_dtype_via_object_chunks = None + if dtype.kind in 'SUM' and dtype in { + np.dtype("S0"), np.dtype("U0"), np.dtype("M8"), np.dtype("m8")}: + # This is a legacy "flexible" dtype. We do not truly support + # parametric dtypes currently (no dtype discovery step in the core), + # but have to support these for backward compatibility. + read_dtype_via_object_chunks = dtype + dtype = np.dtype(object) + + if usecols is not None: + # Allow usecols to be a single int or a sequence of ints, the C-code + # handles the rest + try: + usecols = list(usecols) + except TypeError: + usecols = [usecols] + + _ensure_ndmin_ndarray_check_param(ndmin) + + if comment is None: + comments = None + else: + # assume comments are a sequence of strings + if "" in comment: + raise ValueError( + "comments cannot be an empty string. Use comments=None to " + "disable comments." + ) + comments = tuple(comment) + comment = None + if len(comments) == 0: + comments = None # No comments at all + elif len(comments) == 1: + # If there is only one comment, and that comment has one character, + # the normal parsing can deal with it just fine. + if isinstance(comments[0], str) and len(comments[0]) == 1: + comment = comments[0] + comments = None + # Input validation if there are multiple comment characters + elif delimiter in comments: + raise TypeError( + f"Comment characters '{comments}' cannot include the " + f"delimiter '{delimiter}'" + ) + + # comment is now either a 1 or 0 character string or a tuple: + if comments is not None: + # Note: An earlier version support two character comments (and could + # have been extended to multiple characters, we assume this is + # rare enough to not optimize for. + if quote is not None: + raise ValueError( + "when multiple comments or a multi-character comment is " + "given, quotes are not supported. In this case quotechar " + "must be set to None.") + + if len(imaginary_unit) != 1: + raise ValueError('len(imaginary_unit) must be 1.') + + _check_nonneg_int(skiplines) + if max_rows is not None: + _check_nonneg_int(max_rows) + else: + # Passing -1 to the C code means "read the entire file". + max_rows = -1 + + fh_closing_ctx = contextlib.nullcontext() + filelike = False + try: + if isinstance(fname, os.PathLike): + fname = os.fspath(fname) + if isinstance(fname, str): + fh = np.lib._datasource.open(fname, 'rt', encoding=encoding) + if encoding is None: + encoding = getattr(fh, 'encoding', 'latin1') + + fh_closing_ctx = contextlib.closing(fh) + data = fh + filelike = True + else: + if encoding is None: + encoding = getattr(fname, 'encoding', 'latin1') + data = iter(fname) + except TypeError as e: + raise ValueError( + f"fname must be a string, filehandle, list of strings,\n" + f"or generator. Got {type(fname)} instead.") from e + + with fh_closing_ctx: + if comments is not None: + if filelike: + data = iter(data) + filelike = False + data = _preprocess_comments(data, comments, encoding) + + if read_dtype_via_object_chunks is None: + arr = _load_from_filelike( + data, delimiter=delimiter, comment=comment, quote=quote, + imaginary_unit=imaginary_unit, + usecols=usecols, skiplines=skiplines, max_rows=max_rows, + converters=converters, dtype=dtype, + encoding=encoding, filelike=filelike, + byte_converters=byte_converters) + + else: + # This branch reads the file into chunks of object arrays and then + # casts them to the desired actual dtype. This ensures correct + # string-length and datetime-unit discovery (like `arr.astype()`). + # Due to chunking, certain error reports are less clear, currently. + if filelike: + data = iter(data) # cannot chunk when reading from file + filelike = False + + c_byte_converters = False + if read_dtype_via_object_chunks == "S": + c_byte_converters = True # Use latin1 rather than ascii + + chunks = [] + while max_rows != 0: + if max_rows < 0: + chunk_size = _loadtxt_chunksize + else: + chunk_size = min(_loadtxt_chunksize, max_rows) + + next_arr = _load_from_filelike( + data, delimiter=delimiter, comment=comment, quote=quote, + imaginary_unit=imaginary_unit, + usecols=usecols, skiplines=skiplines, max_rows=chunk_size, + converters=converters, dtype=dtype, + encoding=encoding, filelike=filelike, + byte_converters=byte_converters, + c_byte_converters=c_byte_converters) + # Cast here already. We hope that this is better even for + # large files because the storage is more compact. It could + # be adapted (in principle the concatenate could cast). + chunks.append(next_arr.astype(read_dtype_via_object_chunks)) + + skiplines = 0 # Only have to skip for first chunk + if max_rows >= 0: + max_rows -= chunk_size + if len(next_arr) < chunk_size: + # There was less data than requested, so we are done. + break + + # Need at least one chunk, but if empty, the last one may have + # the wrong shape. + if len(chunks) > 1 and len(chunks[-1]) == 0: + del chunks[-1] + if len(chunks) == 1: + arr = chunks[0] + else: + arr = np.concatenate(chunks, axis=0) + + # NOTE: ndmin works as advertised for structured dtypes, but normally + # these would return a 1D result plus the structured dimension, + # so ndmin=2 adds a third dimension even when no squeezing occurs. + # A `squeeze=False` could be a better solution (pandas uses squeeze). + arr = _ensure_ndmin_ndarray(arr, ndmin=ndmin) + + if arr.shape: + if arr.shape[0] == 0: + warnings.warn( + f'loadtxt: input contained no data: "{fname}"', + category=UserWarning, + stacklevel=3 + ) + + if unpack: + # Unpack structured dtypes if requested: + dt = arr.dtype + if dt.names is not None: + # For structured arrays, return an array for each field. + return [arr[field] for field in dt.names] + else: + return arr.T + else: + return arr + + +@finalize_array_function_like +@set_module('numpy') +def loadtxt(fname, dtype=float, comments='#', delimiter=None, + converters=None, skiprows=0, usecols=None, unpack=False, + ndmin=0, encoding=None, max_rows=None, *, quotechar=None, + like=None): + r""" + Load data from a text file. + + Parameters + ---------- + fname : file, str, pathlib.Path, list of str, generator + File, filename, list, or generator to read. If the filename + extension is ``.gz`` or ``.bz2``, the file is first decompressed. Note + that generators must return bytes or strings. The strings + in a list or produced by a generator are treated as lines. + dtype : data-type, optional + Data-type of the resulting array; default: float. If this is a + structured data-type, the resulting array will be 1-dimensional, and + each row will be interpreted as an element of the array. In this + case, the number of columns used must match the number of fields in + the data-type. + comments : str or sequence of str or None, optional + The characters or list of characters used to indicate the start of a + comment. None implies no comments. For backwards compatibility, byte + strings will be decoded as 'latin1'. The default is '#'. + delimiter : str, optional + The character used to separate the values. For backwards compatibility, + byte strings will be decoded as 'latin1'. The default is whitespace. + + .. versionchanged:: 1.23.0 + Only single character delimiters are supported. Newline characters + cannot be used as the delimiter. + + converters : dict or callable, optional + Converter functions to customize value parsing. If `converters` is + callable, the function is applied to all columns, else it must be a + dict that maps column number to a parser function. + See examples for further details. + Default: None. + + .. versionchanged:: 1.23.0 + The ability to pass a single callable to be applied to all columns + was added. + + skiprows : int, optional + Skip the first `skiprows` lines, including comments; default: 0. + usecols : int or sequence, optional + Which columns to read, with 0 being the first. For example, + ``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns. + The default, None, results in all columns being read. + unpack : bool, optional + If True, the returned array is transposed, so that arguments may be + unpacked using ``x, y, z = loadtxt(...)``. When used with a + structured data-type, arrays are returned for each field. + Default is False. + ndmin : int, optional + The returned array will have at least `ndmin` dimensions. + Otherwise mono-dimensional axes will be squeezed. + Legal values: 0 (default), 1 or 2. + encoding : str, optional + Encoding used to decode the inputfile. Does not apply to input streams. + The special value 'bytes' enables backward compatibility workarounds + that ensures you receive byte arrays as results if possible and passes + 'latin1' encoded strings to converters. Override this value to receive + unicode arrays and pass strings as input to converters. If set to None + the system default is used. The default value is None. + + .. versionchanged:: 2.0 + Before NumPy 2, the default was ``'bytes'`` for Python 2 + compatibility. The default is now ``None``. + + max_rows : int, optional + Read `max_rows` rows of content after `skiprows` lines. The default is + to read all the rows. Note that empty rows containing no data such as + empty lines and comment lines are not counted towards `max_rows`, + while such lines are counted in `skiprows`. + + .. versionchanged:: 1.23.0 + Lines containing no data, including comment lines (e.g., lines + starting with '#' or as specified via `comments`) are not counted + towards `max_rows`. + quotechar : unicode character or None, optional + The character used to denote the start and end of a quoted item. + Occurrences of the delimiter or comment characters are ignored within + a quoted item. The default value is ``quotechar=None``, which means + quoting support is disabled. + + If two consecutive instances of `quotechar` are found within a quoted + field, the first is treated as an escape character. See examples. + + .. versionadded:: 1.23.0 + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + out : ndarray + Data read from the text file. + + See Also + -------- + load, fromstring, fromregex + genfromtxt : Load data with missing values handled as specified. + scipy.io.loadmat : reads MATLAB data files + + Notes + ----- + This function aims to be a fast reader for simply formatted files. The + `genfromtxt` function provides more sophisticated handling of, e.g., + lines with missing values. + + Each row in the input text file must have the same number of values to be + able to read all values. If all rows do not have same number of values, a + subset of up to n columns (where n is the least number of values present + in all rows) can be read by specifying the columns via `usecols`. + + The strings produced by the Python float.hex method can be used as + input for floats. + + Examples + -------- + >>> import numpy as np + >>> from io import StringIO # StringIO behaves like a file object + >>> c = StringIO("0 1\n2 3") + >>> np.loadtxt(c) + array([[0., 1.], + [2., 3.]]) + + >>> d = StringIO("M 21 72\nF 35 58") + >>> np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'), + ... 'formats': ('S1', 'i4', 'f4')}) + array([(b'M', 21, 72.), (b'F', 35, 58.)], + dtype=[('gender', 'S1'), ('age', '>> c = StringIO("1,0,2\n3,0,4") + >>> x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True) + >>> x + array([1., 3.]) + >>> y + array([2., 4.]) + + The `converters` argument is used to specify functions to preprocess the + text prior to parsing. `converters` can be a dictionary that maps + preprocessing functions to each column: + + >>> s = StringIO("1.618, 2.296\n3.141, 4.669\n") + >>> conv = { + ... 0: lambda x: np.floor(float(x)), # conversion fn for column 0 + ... 1: lambda x: np.ceil(float(x)), # conversion fn for column 1 + ... } + >>> np.loadtxt(s, delimiter=",", converters=conv) + array([[1., 3.], + [3., 5.]]) + + `converters` can be a callable instead of a dictionary, in which case it + is applied to all columns: + + >>> s = StringIO("0xDE 0xAD\n0xC0 0xDE") + >>> import functools + >>> conv = functools.partial(int, base=16) + >>> np.loadtxt(s, converters=conv) + array([[222., 173.], + [192., 222.]]) + + This example shows how `converters` can be used to convert a field + with a trailing minus sign into a negative number. + + >>> s = StringIO("10.01 31.25-\n19.22 64.31\n17.57- 63.94") + >>> def conv(fld): + ... return -float(fld[:-1]) if fld.endswith("-") else float(fld) + ... + >>> np.loadtxt(s, converters=conv) + array([[ 10.01, -31.25], + [ 19.22, 64.31], + [-17.57, 63.94]]) + + Using a callable as the converter can be particularly useful for handling + values with different formatting, e.g. floats with underscores: + + >>> s = StringIO("1 2.7 100_000") + >>> np.loadtxt(s, converters=float) + array([1.e+00, 2.7e+00, 1.e+05]) + + This idea can be extended to automatically handle values specified in + many different formats, such as hex values: + + >>> def conv(val): + ... try: + ... return float(val) + ... except ValueError: + ... return float.fromhex(val) + >>> s = StringIO("1, 2.5, 3_000, 0b4, 0x1.4000000000000p+2") + >>> np.loadtxt(s, delimiter=",", converters=conv) + array([1.0e+00, 2.5e+00, 3.0e+03, 1.8e+02, 5.0e+00]) + + Or a format where the ``-`` sign comes after the number: + + >>> s = StringIO("10.01 31.25-\n19.22 64.31\n17.57- 63.94") + >>> conv = lambda x: -float(x[:-1]) if x.endswith("-") else float(x) + >>> np.loadtxt(s, converters=conv) + array([[ 10.01, -31.25], + [ 19.22, 64.31], + [-17.57, 63.94]]) + + Support for quoted fields is enabled with the `quotechar` parameter. + Comment and delimiter characters are ignored when they appear within a + quoted item delineated by `quotechar`: + + >>> s = StringIO('"alpha, #42", 10.0\n"beta, #64", 2.0\n') + >>> dtype = np.dtype([("label", "U12"), ("value", float)]) + >>> np.loadtxt(s, dtype=dtype, delimiter=",", quotechar='"') + array([('alpha, #42', 10.), ('beta, #64', 2.)], + dtype=[('label', '>> s = StringIO('"alpha, #42" 10.0\n"beta, #64" 2.0\n') + >>> dtype = np.dtype([("label", "U12"), ("value", float)]) + >>> np.loadtxt(s, dtype=dtype, delimiter=None, quotechar='"') + array([('alpha, #42', 10.), ('beta, #64', 2.)], + dtype=[('label', '>> s = StringIO('"Hello, my name is ""Monty""!"') + >>> np.loadtxt(s, dtype="U", delimiter=",", quotechar='"') + array('Hello, my name is "Monty"!', dtype='>> d = StringIO("1 2\n2 4\n3 9 12\n4 16 20") + >>> np.loadtxt(d, usecols=(0, 1)) + array([[ 1., 2.], + [ 2., 4.], + [ 3., 9.], + [ 4., 16.]]) + + """ + + if like is not None: + return _loadtxt_with_like( + like, fname, dtype=dtype, comments=comments, delimiter=delimiter, + converters=converters, skiprows=skiprows, usecols=usecols, + unpack=unpack, ndmin=ndmin, encoding=encoding, + max_rows=max_rows + ) + + if isinstance(delimiter, bytes): + delimiter.decode("latin1") + + if dtype is None: + dtype = np.float64 + + comment = comments + # Control character type conversions for Py3 convenience + if comment is not None: + if isinstance(comment, (str, bytes)): + comment = [comment] + comment = [ + x.decode('latin1') if isinstance(x, bytes) else x for x in comment] + if isinstance(delimiter, bytes): + delimiter = delimiter.decode('latin1') + + arr = _read(fname, dtype=dtype, comment=comment, delimiter=delimiter, + converters=converters, skiplines=skiprows, usecols=usecols, + unpack=unpack, ndmin=ndmin, encoding=encoding, + max_rows=max_rows, quote=quotechar) + + return arr + + +_loadtxt_with_like = array_function_dispatch()(loadtxt) + + +def _savetxt_dispatcher(fname, X, fmt=None, delimiter=None, newline=None, + header=None, footer=None, comments=None, + encoding=None): + return (X,) + + +@array_function_dispatch(_savetxt_dispatcher) +def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='', + footer='', comments='# ', encoding=None): + """ + Save an array to a text file. + + Parameters + ---------- + fname : filename, file handle or pathlib.Path + If the filename ends in ``.gz``, the file is automatically saved in + compressed gzip format. `loadtxt` understands gzipped files + transparently. + X : 1D or 2D array_like + Data to be saved to a text file. + fmt : str or sequence of strs, optional + A single format (%10.5f), a sequence of formats, or a + multi-format string, e.g. 'Iteration %d -- %10.5f', in which + case `delimiter` is ignored. For complex `X`, the legal options + for `fmt` are: + + * a single specifier, ``fmt='%.4e'``, resulting in numbers formatted + like ``' (%s+%sj)' % (fmt, fmt)`` + * a full string specifying every real and imaginary part, e.g. + ``' %.4e %+.4ej %.4e %+.4ej %.4e %+.4ej'`` for 3 columns + * a list of specifiers, one per column - in this case, the real + and imaginary part must have separate specifiers, + e.g. ``['%.3e + %.3ej', '(%.15e%+.15ej)']`` for 2 columns + delimiter : str, optional + String or character separating columns. + newline : str, optional + String or character separating lines. + header : str, optional + String that will be written at the beginning of the file. + footer : str, optional + String that will be written at the end of the file. + comments : str, optional + String that will be prepended to the ``header`` and ``footer`` strings, + to mark them as comments. Default: '# ', as expected by e.g. + ``numpy.loadtxt``. + encoding : {None, str}, optional + Encoding used to encode the outputfile. Does not apply to output + streams. If the encoding is something other than 'bytes' or 'latin1' + you will not be able to load the file in NumPy versions < 1.14. Default + is 'latin1'. + + See Also + -------- + save : Save an array to a binary file in NumPy ``.npy`` format + savez : Save several arrays into an uncompressed ``.npz`` archive + savez_compressed : Save several arrays into a compressed ``.npz`` archive + + Notes + ----- + Further explanation of the `fmt` parameter + (``%[flag]width[.precision]specifier``): + + flags: + ``-`` : left justify + + ``+`` : Forces to precede result with + or -. + + ``0`` : Left pad the number with zeros instead of space (see width). + + width: + Minimum number of characters to be printed. The value is not truncated + if it has more characters. + + precision: + - For integer specifiers (eg. ``d,i,o,x``), the minimum number of + digits. + - For ``e, E`` and ``f`` specifiers, the number of digits to print + after the decimal point. + - For ``g`` and ``G``, the maximum number of significant digits. + - For ``s``, the maximum number of characters. + + specifiers: + ``c`` : character + + ``d`` or ``i`` : signed decimal integer + + ``e`` or ``E`` : scientific notation with ``e`` or ``E``. + + ``f`` : decimal floating point + + ``g,G`` : use the shorter of ``e,E`` or ``f`` + + ``o`` : signed octal + + ``s`` : string of characters + + ``u`` : unsigned decimal integer + + ``x,X`` : unsigned hexadecimal integer + + This explanation of ``fmt`` is not complete, for an exhaustive + specification see [1]_. + + References + ---------- + .. [1] `Format Specification Mini-Language + `_, + Python Documentation. + + Examples + -------- + >>> import numpy as np + >>> x = y = z = np.arange(0.0,5.0,1.0) + >>> np.savetxt('test.out', x, delimiter=',') # X is an array + >>> np.savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays + >>> np.savetxt('test.out', x, fmt='%1.4e') # use exponential notation + + """ + + class WriteWrap: + """Convert to bytes on bytestream inputs. + + """ + def __init__(self, fh, encoding): + self.fh = fh + self.encoding = encoding + self.do_write = self.first_write + + def close(self): + self.fh.close() + + def write(self, v): + self.do_write(v) + + def write_bytes(self, v): + if isinstance(v, bytes): + self.fh.write(v) + else: + self.fh.write(v.encode(self.encoding)) + + def write_normal(self, v): + self.fh.write(asunicode(v)) + + def first_write(self, v): + try: + self.write_normal(v) + self.write = self.write_normal + except TypeError: + # input is probably a bytestream + self.write_bytes(v) + self.write = self.write_bytes + + own_fh = False + if isinstance(fname, os.PathLike): + fname = os.fspath(fname) + if _is_string_like(fname): + # datasource doesn't support creating a new file ... + open(fname, 'wt').close() + fh = np.lib._datasource.open(fname, 'wt', encoding=encoding) + own_fh = True + elif hasattr(fname, 'write'): + # wrap to handle byte output streams + fh = WriteWrap(fname, encoding or 'latin1') + else: + raise ValueError('fname must be a string or file handle') + + try: + X = np.asarray(X) + + # Handle 1-dimensional arrays + if X.ndim == 0 or X.ndim > 2: + raise ValueError( + "Expected 1D or 2D array, got %dD array instead" % X.ndim) + elif X.ndim == 1: + # Common case -- 1d array of numbers + if X.dtype.names is None: + X = np.atleast_2d(X).T + ncol = 1 + + # Complex dtype -- each field indicates a separate column + else: + ncol = len(X.dtype.names) + else: + ncol = X.shape[1] + + iscomplex_X = np.iscomplexobj(X) + # `fmt` can be a string with multiple insertion points or a + # list of formats. E.g. '%10.5f\t%10d' or ('%10.5f', '$10d') + if type(fmt) in (list, tuple): + if len(fmt) != ncol: + raise AttributeError(f'fmt has wrong shape. {str(fmt)}') + format = delimiter.join(fmt) + elif isinstance(fmt, str): + n_fmt_chars = fmt.count('%') + error = ValueError(f'fmt has wrong number of % formats: {fmt}') + if n_fmt_chars == 1: + if iscomplex_X: + fmt = [f' ({fmt}+{fmt}j)', ] * ncol + else: + fmt = [fmt, ] * ncol + format = delimiter.join(fmt) + elif iscomplex_X and n_fmt_chars != (2 * ncol): + raise error + elif ((not iscomplex_X) and n_fmt_chars != ncol): + raise error + else: + format = fmt + else: + raise ValueError(f'invalid fmt: {fmt!r}') + + if len(header) > 0: + header = header.replace('\n', '\n' + comments) + fh.write(comments + header + newline) + if iscomplex_X: + for row in X: + row2 = [] + for number in row: + row2.extend((number.real, number.imag)) + s = format % tuple(row2) + newline + fh.write(s.replace('+-', '-')) + else: + for row in X: + try: + v = format % tuple(row) + newline + except TypeError as e: + raise TypeError("Mismatch between array dtype ('%s') and " + "format specifier ('%s')" + % (str(X.dtype), format)) from e + fh.write(v) + + if len(footer) > 0: + footer = footer.replace('\n', '\n' + comments) + fh.write(comments + footer + newline) + finally: + if own_fh: + fh.close() + + +@set_module('numpy') +def fromregex(file, regexp, dtype, encoding=None): + r""" + Construct an array from a text file, using regular expression parsing. + + The returned array is always a structured array, and is constructed from + all matches of the regular expression in the file. Groups in the regular + expression are converted to fields of the structured array. + + Parameters + ---------- + file : file, str, or pathlib.Path + Filename or file object to read. + + .. versionchanged:: 1.22.0 + Now accepts `os.PathLike` implementations. + + regexp : str or regexp + Regular expression used to parse the file. + Groups in the regular expression correspond to fields in the dtype. + dtype : dtype or list of dtypes + Dtype for the structured array; must be a structured datatype. + encoding : str, optional + Encoding used to decode the inputfile. Does not apply to input streams. + + Returns + ------- + output : ndarray + The output array, containing the part of the content of `file` that + was matched by `regexp`. `output` is always a structured array. + + Raises + ------ + TypeError + When `dtype` is not a valid dtype for a structured array. + + See Also + -------- + fromstring, loadtxt + + Notes + ----- + Dtypes for structured arrays can be specified in several forms, but all + forms specify at least the data type and field name. For details see + `basics.rec`. + + Examples + -------- + >>> import numpy as np + >>> from io import StringIO + >>> text = StringIO("1312 foo\n1534 bar\n444 qux") + + >>> regexp = r"(\d+)\s+(...)" # match [digits, whitespace, anything] + >>> output = np.fromregex(text, regexp, + ... [('num', np.int64), ('key', 'S3')]) + >>> output + array([(1312, b'foo'), (1534, b'bar'), ( 444, b'qux')], + dtype=[('num', '>> output['num'] + array([1312, 1534, 444]) + + """ + own_fh = False + if not hasattr(file, "read"): + file = os.fspath(file) + file = np.lib._datasource.open(file, 'rt', encoding=encoding) + own_fh = True + + try: + if not isinstance(dtype, np.dtype): + dtype = np.dtype(dtype) + if dtype.names is None: + raise TypeError('dtype must be a structured datatype.') + + content = file.read() + if isinstance(content, bytes) and isinstance(regexp, str): + regexp = asbytes(regexp) + + if not hasattr(regexp, 'match'): + regexp = re.compile(regexp) + seq = regexp.findall(content) + if seq and not isinstance(seq[0], tuple): + # Only one group is in the regexp. + # Create the new array as a single data-type and then + # re-interpret as a single-field structured array. + newdtype = np.dtype(dtype[dtype.names[0]]) + output = np.array(seq, dtype=newdtype) + output.dtype = dtype + else: + output = np.array(seq, dtype=dtype) + + return output + finally: + if own_fh: + file.close() + + +#####-------------------------------------------------------------------------- +#---- --- ASCII functions --- +#####-------------------------------------------------------------------------- + + +@finalize_array_function_like +@set_module('numpy') +def genfromtxt(fname, dtype=float, comments='#', delimiter=None, + skip_header=0, skip_footer=0, converters=None, + missing_values=None, filling_values=None, usecols=None, + names=None, excludelist=None, + deletechars=''.join(sorted(NameValidator.defaultdeletechars)), # noqa: B008 + replace_space='_', autostrip=False, case_sensitive=True, + defaultfmt="f%i", unpack=None, usemask=False, loose=True, + invalid_raise=True, max_rows=None, encoding=None, + *, ndmin=0, like=None): + """ + Load data from a text file, with missing values handled as specified. + + Each line past the first `skip_header` lines is split at the `delimiter` + character, and characters following the `comments` character are discarded. + + Parameters + ---------- + fname : file, str, pathlib.Path, list of str, generator + File, filename, list, or generator to read. If the filename + extension is ``.gz`` or ``.bz2``, the file is first decompressed. Note + that generators must return bytes or strings. The strings + in a list or produced by a generator are treated as lines. + dtype : dtype, optional + Data type of the resulting array. + If None, the dtypes will be determined by the contents of each + column, individually. + comments : str, optional + The character used to indicate the start of a comment. + All the characters occurring on a line after a comment are discarded. + delimiter : str, int, or sequence, optional + The string used to separate values. By default, any consecutive + whitespaces act as delimiter. An integer or sequence of integers + can also be provided as width(s) of each field. + skiprows : int, optional + `skiprows` was removed in numpy 1.10. Please use `skip_header` instead. + skip_header : int, optional + The number of lines to skip at the beginning of the file. + skip_footer : int, optional + The number of lines to skip at the end of the file. + converters : variable, optional + The set of functions that convert the data of a column to a value. + The converters can also be used to provide a default value + for missing data: ``converters = {3: lambda s: float(s or 0)}``. + missing : variable, optional + `missing` was removed in numpy 1.10. Please use `missing_values` + instead. + missing_values : variable, optional + The set of strings corresponding to missing data. + filling_values : variable, optional + The set of values to be used as default when the data are missing. + usecols : sequence, optional + Which columns to read, with 0 being the first. For example, + ``usecols = (1, 4, 5)`` will extract the 2nd, 5th and 6th columns. + names : {None, True, str, sequence}, optional + If `names` is True, the field names are read from the first line after + the first `skip_header` lines. This line can optionally be preceded + by a comment delimiter. Any content before the comment delimiter is + discarded. If `names` is a sequence or a single-string of + comma-separated names, the names will be used to define the field + names in a structured dtype. If `names` is None, the names of the + dtype fields will be used, if any. + excludelist : sequence, optional + A list of names to exclude. This list is appended to the default list + ['return','file','print']. Excluded names are appended with an + underscore: for example, `file` would become `file_`. + deletechars : str, optional + A string combining invalid characters that must be deleted from the + names. + defaultfmt : str, optional + A format used to define default field names, such as "f%i" or "f_%02i". + autostrip : bool, optional + Whether to automatically strip white spaces from the variables. + replace_space : char, optional + Character(s) used in replacement of white spaces in the variable + names. By default, use a '_'. + case_sensitive : {True, False, 'upper', 'lower'}, optional + If True, field names are case sensitive. + If False or 'upper', field names are converted to upper case. + If 'lower', field names are converted to lower case. + unpack : bool, optional + If True, the returned array is transposed, so that arguments may be + unpacked using ``x, y, z = genfromtxt(...)``. When used with a + structured data-type, arrays are returned for each field. + Default is False. + usemask : bool, optional + If True, return a masked array. + If False, return a regular array. + loose : bool, optional + If True, do not raise errors for invalid values. + invalid_raise : bool, optional + If True, an exception is raised if an inconsistency is detected in the + number of columns. + If False, a warning is emitted and the offending lines are skipped. + max_rows : int, optional + The maximum number of rows to read. Must not be used with skip_footer + at the same time. If given, the value must be at least 1. Default is + to read the entire file. + encoding : str, optional + Encoding used to decode the inputfile. Does not apply when `fname` + is a file object. The special value 'bytes' enables backward + compatibility workarounds that ensure that you receive byte arrays + when possible and passes latin1 encoded strings to converters. + Override this value to receive unicode arrays and pass strings + as input to converters. If set to None the system default is used. + The default value is 'bytes'. + + .. versionchanged:: 2.0 + Before NumPy 2, the default was ``'bytes'`` for Python 2 + compatibility. The default is now ``None``. + + ndmin : int, optional + Same parameter as `loadtxt` + + .. versionadded:: 1.23.0 + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + out : ndarray + Data read from the text file. If `usemask` is True, this is a + masked array. + + See Also + -------- + numpy.loadtxt : equivalent function when no data is missing. + + Notes + ----- + * When spaces are used as delimiters, or when no delimiter has been given + as input, there should not be any missing data between two fields. + * When variables are named (either by a flexible dtype or with a `names` + sequence), there must not be any header in the file (else a ValueError + exception is raised). + * Individual values are not stripped of spaces by default. + When using a custom converter, make sure the function does remove spaces. + * Custom converters may receive unexpected values due to dtype + discovery. + + References + ---------- + .. [1] NumPy User Guide, section `I/O with NumPy + `_. + + Examples + -------- + >>> from io import StringIO + >>> import numpy as np + + Comma delimited file with mixed dtype + + >>> s = StringIO("1,1.3,abcde") + >>> data = np.genfromtxt(s, dtype=[('myint','i8'),('myfloat','f8'), + ... ('mystring','S5')], delimiter=",") + >>> data + array((1, 1.3, b'abcde'), + dtype=[('myint', '>> _ = s.seek(0) # needed for StringIO example only + >>> data = np.genfromtxt(s, dtype=None, + ... names = ['myint','myfloat','mystring'], delimiter=",") + >>> data + array((1, 1.3, 'abcde'), + dtype=[('myint', '>> _ = s.seek(0) + >>> data = np.genfromtxt(s, dtype="i8,f8,S5", + ... names=['myint','myfloat','mystring'], delimiter=",") + >>> data + array((1, 1.3, b'abcde'), + dtype=[('myint', '>> s = StringIO("11.3abcde") + >>> data = np.genfromtxt(s, dtype=None, names=['intvar','fltvar','strvar'], + ... delimiter=[1,3,5]) + >>> data + array((1, 1.3, 'abcde'), + dtype=[('intvar', '>> f = StringIO(''' + ... text,# of chars + ... hello world,11 + ... numpy,5''') + >>> np.genfromtxt(f, dtype='S12,S12', delimiter=',') + array([(b'text', b''), (b'hello world', b'11'), (b'numpy', b'5')], + dtype=[('f0', 'S12'), ('f1', 'S12')]) + + """ + + if like is not None: + return _genfromtxt_with_like( + like, fname, dtype=dtype, comments=comments, delimiter=delimiter, + skip_header=skip_header, skip_footer=skip_footer, + converters=converters, missing_values=missing_values, + filling_values=filling_values, usecols=usecols, names=names, + excludelist=excludelist, deletechars=deletechars, + replace_space=replace_space, autostrip=autostrip, + case_sensitive=case_sensitive, defaultfmt=defaultfmt, + unpack=unpack, usemask=usemask, loose=loose, + invalid_raise=invalid_raise, max_rows=max_rows, encoding=encoding, + ndmin=ndmin, + ) + + _ensure_ndmin_ndarray_check_param(ndmin) + + if max_rows is not None: + if skip_footer: + raise ValueError( + "The keywords 'skip_footer' and 'max_rows' can not be " + "specified at the same time.") + if max_rows < 1: + raise ValueError("'max_rows' must be at least 1.") + + if usemask: + from numpy.ma import MaskedArray, make_mask_descr + # Check the input dictionary of converters + user_converters = converters or {} + if not isinstance(user_converters, dict): + raise TypeError( + "The input argument 'converter' should be a valid dictionary " + "(got '%s' instead)" % type(user_converters)) + + if encoding == 'bytes': + encoding = None + byte_converters = True + else: + byte_converters = False + + # Initialize the filehandle, the LineSplitter and the NameValidator + if isinstance(fname, os.PathLike): + fname = os.fspath(fname) + if isinstance(fname, str): + fid = np.lib._datasource.open(fname, 'rt', encoding=encoding) + fid_ctx = contextlib.closing(fid) + else: + fid = fname + fid_ctx = contextlib.nullcontext(fid) + try: + fhd = iter(fid) + except TypeError as e: + raise TypeError( + "fname must be a string, a filehandle, a sequence of strings,\n" + f"or an iterator of strings. Got {type(fname)} instead." + ) from e + with fid_ctx: + split_line = LineSplitter(delimiter=delimiter, comments=comments, + autostrip=autostrip, encoding=encoding) + validate_names = NameValidator(excludelist=excludelist, + deletechars=deletechars, + case_sensitive=case_sensitive, + replace_space=replace_space) + + # Skip the first `skip_header` rows + try: + for i in range(skip_header): + next(fhd) + + # Keep on until we find the first valid values + first_values = None + + while not first_values: + first_line = _decode_line(next(fhd), encoding) + if (names is True) and (comments is not None): + if comments in first_line: + first_line = ( + ''.join(first_line.split(comments)[1:])) + first_values = split_line(first_line) + except StopIteration: + # return an empty array if the datafile is empty + first_line = '' + first_values = [] + warnings.warn( + f'genfromtxt: Empty input file: "{fname}"', stacklevel=2 + ) + + # Should we take the first values as names ? + if names is True: + fval = first_values[0].strip() + if comments is not None: + if fval in comments: + del first_values[0] + + # Check the columns to use: make sure `usecols` is a list + if usecols is not None: + try: + usecols = [_.strip() for _ in usecols.split(",")] + except AttributeError: + try: + usecols = list(usecols) + except TypeError: + usecols = [usecols, ] + nbcols = len(usecols or first_values) + + # Check the names and overwrite the dtype.names if needed + if names is True: + names = validate_names([str(_.strip()) for _ in first_values]) + first_line = '' + elif _is_string_like(names): + names = validate_names([_.strip() for _ in names.split(',')]) + elif names: + names = validate_names(names) + # Get the dtype + if dtype is not None: + dtype = easy_dtype(dtype, defaultfmt=defaultfmt, names=names, + excludelist=excludelist, + deletechars=deletechars, + case_sensitive=case_sensitive, + replace_space=replace_space) + # Make sure the names is a list (for 2.5) + if names is not None: + names = list(names) + + if usecols: + for (i, current) in enumerate(usecols): + # if usecols is a list of names, convert to a list of indices + if _is_string_like(current): + usecols[i] = names.index(current) + elif current < 0: + usecols[i] = current + len(first_values) + # If the dtype is not None, make sure we update it + if (dtype is not None) and (len(dtype) > nbcols): + descr = dtype.descr + dtype = np.dtype([descr[_] for _ in usecols]) + names = list(dtype.names) + # If `names` is not None, update the names + elif (names is not None) and (len(names) > nbcols): + names = [names[_] for _ in usecols] + elif (names is not None) and (dtype is not None): + names = list(dtype.names) + + # Process the missing values ............................... + # Rename missing_values for convenience + user_missing_values = missing_values or () + if isinstance(user_missing_values, bytes): + user_missing_values = user_missing_values.decode('latin1') + + # Define the list of missing_values (one column: one list) + missing_values = [[''] for _ in range(nbcols)] + + # We have a dictionary: process it field by field + if isinstance(user_missing_values, dict): + # Loop on the items + for (key, val) in user_missing_values.items(): + # Is the key a string ? + if _is_string_like(key): + try: + # Transform it into an integer + key = names.index(key) + except ValueError: + # We couldn't find it: the name must have been dropped + continue + # Redefine the key as needed if it's a column number + if usecols: + try: + key = usecols.index(key) + except ValueError: + pass + # Transform the value as a list of string + if isinstance(val, (list, tuple)): + val = [str(_) for _ in val] + else: + val = [str(val), ] + # Add the value(s) to the current list of missing + if key is None: + # None acts as default + for miss in missing_values: + miss.extend(val) + else: + missing_values[key].extend(val) + # We have a sequence : each item matches a column + elif isinstance(user_missing_values, (list, tuple)): + for (value, entry) in zip(user_missing_values, missing_values): + value = str(value) + if value not in entry: + entry.append(value) + # We have a string : apply it to all entries + elif isinstance(user_missing_values, str): + user_value = user_missing_values.split(",") + for entry in missing_values: + entry.extend(user_value) + # We have something else: apply it to all entries + else: + for entry in missing_values: + entry.extend([str(user_missing_values)]) + + # Process the filling_values ............................... + # Rename the input for convenience + user_filling_values = filling_values + if user_filling_values is None: + user_filling_values = [] + # Define the default + filling_values = [None] * nbcols + # We have a dictionary : update each entry individually + if isinstance(user_filling_values, dict): + for (key, val) in user_filling_values.items(): + if _is_string_like(key): + try: + # Transform it into an integer + key = names.index(key) + except ValueError: + # We couldn't find it: the name must have been dropped + continue + # Redefine the key if it's a column number + # and usecols is defined + if usecols: + try: + key = usecols.index(key) + except ValueError: + pass + # Add the value to the list + filling_values[key] = val + # We have a sequence : update on a one-to-one basis + elif isinstance(user_filling_values, (list, tuple)): + n = len(user_filling_values) + if (n <= nbcols): + filling_values[:n] = user_filling_values + else: + filling_values = user_filling_values[:nbcols] + # We have something else : use it for all entries + else: + filling_values = [user_filling_values] * nbcols + + # Initialize the converters ................................ + if dtype is None: + # Note: we can't use a [...]*nbcols, as we would have 3 times + # the same converter, instead of 3 different converters. + converters = [ + StringConverter(None, missing_values=miss, default=fill) + for (miss, fill) in zip(missing_values, filling_values) + ] + else: + dtype_flat = flatten_dtype(dtype, flatten_base=True) + # Initialize the converters + if len(dtype_flat) > 1: + # Flexible type : get a converter from each dtype + zipit = zip(dtype_flat, missing_values, filling_values) + converters = [StringConverter(dt, + locked=True, + missing_values=miss, + default=fill) + for (dt, miss, fill) in zipit] + else: + # Set to a default converter (but w/ different missing values) + zipit = zip(missing_values, filling_values) + converters = [StringConverter(dtype, + locked=True, + missing_values=miss, + default=fill) + for (miss, fill) in zipit] + # Update the converters to use the user-defined ones + uc_update = [] + for (j, conv) in user_converters.items(): + # If the converter is specified by column names, + # use the index instead + if _is_string_like(j): + try: + j = names.index(j) + i = j + except ValueError: + continue + elif usecols: + try: + i = usecols.index(j) + except ValueError: + # Unused converter specified + continue + else: + i = j + # Find the value to test - first_line is not filtered by usecols: + if len(first_line): + testing_value = first_values[j] + else: + testing_value = None + if conv is bytes: + user_conv = asbytes + elif byte_converters: + # Converters may use decode to workaround numpy's old + # behavior, so encode the string again before passing + # to the user converter. + def tobytes_first(x, conv): + if type(x) is bytes: + return conv(x) + return conv(x.encode("latin1")) + user_conv = functools.partial(tobytes_first, conv=conv) + else: + user_conv = conv + converters[i].update(user_conv, locked=True, + testing_value=testing_value, + default=filling_values[i], + missing_values=missing_values[i],) + uc_update.append((i, user_conv)) + # Make sure we have the corrected keys in user_converters... + user_converters.update(uc_update) + + # Fixme: possible error as following variable never used. + # miss_chars = [_.missing_values for _ in converters] + + # Initialize the output lists ... + # ... rows + rows = [] + append_to_rows = rows.append + # ... masks + if usemask: + masks = [] + append_to_masks = masks.append + # ... invalid + invalid = [] + append_to_invalid = invalid.append + + # Parse each line + for (i, line) in enumerate(itertools.chain([first_line, ], fhd)): + values = split_line(line) + nbvalues = len(values) + # Skip an empty line + if nbvalues == 0: + continue + if usecols: + # Select only the columns we need + try: + values = [values[_] for _ in usecols] + except IndexError: + append_to_invalid((i + skip_header + 1, nbvalues)) + continue + elif nbvalues != nbcols: + append_to_invalid((i + skip_header + 1, nbvalues)) + continue + # Store the values + append_to_rows(tuple(values)) + if usemask: + append_to_masks(tuple(v.strip() in m + for (v, m) in zip(values, + missing_values))) + if len(rows) == max_rows: + break + + # Upgrade the converters (if needed) + if dtype is None: + for (i, converter) in enumerate(converters): + current_column = [itemgetter(i)(_m) for _m in rows] + try: + converter.iterupgrade(current_column) + except ConverterLockError: + errmsg = f"Converter #{i} is locked and cannot be upgraded: " + current_column = map(itemgetter(i), rows) + for (j, value) in enumerate(current_column): + try: + converter.upgrade(value) + except (ConverterError, ValueError): + line_number = j + 1 + skip_header + errmsg += f"(occurred line #{line_number} for value '{value}')" + raise ConverterError(errmsg) + + # Check that we don't have invalid values + nbinvalid = len(invalid) + if nbinvalid > 0: + nbrows = len(rows) + nbinvalid - skip_footer + # Construct the error message + template = f" Line #%i (got %i columns instead of {nbcols})" + if skip_footer > 0: + nbinvalid_skipped = len([_ for _ in invalid + if _[0] > nbrows + skip_header]) + invalid = invalid[:nbinvalid - nbinvalid_skipped] + skip_footer -= nbinvalid_skipped +# +# nbrows -= skip_footer +# errmsg = [template % (i, nb) +# for (i, nb) in invalid if i < nbrows] +# else: + errmsg = [template % (i, nb) + for (i, nb) in invalid] + if len(errmsg): + errmsg.insert(0, "Some errors were detected !") + errmsg = "\n".join(errmsg) + # Raise an exception ? + if invalid_raise: + raise ValueError(errmsg) + # Issue a warning ? + else: + warnings.warn(errmsg, ConversionWarning, stacklevel=2) + + # Strip the last skip_footer data + if skip_footer > 0: + rows = rows[:-skip_footer] + if usemask: + masks = masks[:-skip_footer] + + # Convert each value according to the converter: + # We want to modify the list in place to avoid creating a new one... + if loose: + rows = list( + zip(*[[conv._loose_call(_r) for _r in map(itemgetter(i), rows)] + for (i, conv) in enumerate(converters)])) + else: + rows = list( + zip(*[[conv._strict_call(_r) for _r in map(itemgetter(i), rows)] + for (i, conv) in enumerate(converters)])) + + # Reset the dtype + data = rows + if dtype is None: + # Get the dtypes from the types of the converters + column_types = [conv.type for conv in converters] + # Find the columns with strings... + strcolidx = [i for (i, v) in enumerate(column_types) + if v == np.str_] + + if byte_converters and strcolidx: + # convert strings back to bytes for backward compatibility + warnings.warn( + "Reading unicode strings without specifying the encoding " + "argument is deprecated. Set the encoding, use None for the " + "system default.", + np.exceptions.VisibleDeprecationWarning, stacklevel=2) + + def encode_unicode_cols(row_tup): + row = list(row_tup) + for i in strcolidx: + row[i] = row[i].encode('latin1') + return tuple(row) + + try: + data = [encode_unicode_cols(r) for r in data] + except UnicodeEncodeError: + pass + else: + for i in strcolidx: + column_types[i] = np.bytes_ + + # Update string types to be the right length + sized_column_types = column_types.copy() + for i, col_type in enumerate(column_types): + if np.issubdtype(col_type, np.character): + n_chars = max(len(row[i]) for row in data) + sized_column_types[i] = (col_type, n_chars) + + if names is None: + # If the dtype is uniform (before sizing strings) + base = { + c_type + for c, c_type in zip(converters, column_types) + if c._checked} + if len(base) == 1: + uniform_type, = base + (ddtype, mdtype) = (uniform_type, bool) + else: + ddtype = [(defaultfmt % i, dt) + for (i, dt) in enumerate(sized_column_types)] + if usemask: + mdtype = [(defaultfmt % i, bool) + for (i, dt) in enumerate(sized_column_types)] + else: + ddtype = list(zip(names, sized_column_types)) + mdtype = list(zip(names, [bool] * len(sized_column_types))) + output = np.array(data, dtype=ddtype) + if usemask: + outputmask = np.array(masks, dtype=mdtype) + else: + # Overwrite the initial dtype names if needed + if names and dtype.names is not None: + dtype.names = names + # Case 1. We have a structured type + if len(dtype_flat) > 1: + # Nested dtype, eg [('a', int), ('b', [('b0', int), ('b1', 'f4')])] + # First, create the array using a flattened dtype: + # [('a', int), ('b1', int), ('b2', float)] + # Then, view the array using the specified dtype. + if 'O' in (_.char for _ in dtype_flat): + if has_nested_fields(dtype): + raise NotImplementedError( + "Nested fields involving objects are not supported...") + else: + output = np.array(data, dtype=dtype) + else: + rows = np.array(data, dtype=[('', _) for _ in dtype_flat]) + output = rows.view(dtype) + # Now, process the rowmasks the same way + if usemask: + rowmasks = np.array( + masks, dtype=np.dtype([('', bool) for t in dtype_flat])) + # Construct the new dtype + mdtype = make_mask_descr(dtype) + outputmask = rowmasks.view(mdtype) + # Case #2. We have a basic dtype + else: + # We used some user-defined converters + if user_converters: + ishomogeneous = True + descr = [] + for i, ttype in enumerate([conv.type for conv in converters]): + # Keep the dtype of the current converter + if i in user_converters: + ishomogeneous &= (ttype == dtype.type) + if np.issubdtype(ttype, np.character): + ttype = (ttype, max(len(row[i]) for row in data)) + descr.append(('', ttype)) + else: + descr.append(('', dtype)) + # So we changed the dtype ? + if not ishomogeneous: + # We have more than one field + if len(descr) > 1: + dtype = np.dtype(descr) + # We have only one field: drop the name if not needed. + else: + dtype = np.dtype(ttype) + # + output = np.array(data, dtype) + if usemask: + if dtype.names is not None: + mdtype = [(_, bool) for _ in dtype.names] + else: + mdtype = bool + outputmask = np.array(masks, dtype=mdtype) + # Try to take care of the missing data we missed + names = output.dtype.names + if usemask and names: + for (name, conv) in zip(names, converters): + missing_values = [conv(_) for _ in conv.missing_values + if _ != ''] + for mval in missing_values: + outputmask[name] |= (output[name] == mval) + # Construct the final array + if usemask: + output = output.view(MaskedArray) + output._mask = outputmask + + output = _ensure_ndmin_ndarray(output, ndmin=ndmin) + + if unpack: + if names is None: + return output.T + elif len(names) == 1: + # squeeze single-name dtypes too + return output[names[0]] + else: + # For structured arrays with multiple fields, + # return an array for each field. + return [output[field] for field in names] + return output + + +_genfromtxt_with_like = array_function_dispatch()(genfromtxt) + + +def recfromtxt(fname, **kwargs): + """ + Load ASCII data from a file and return it in a record array. + + If ``usemask=False`` a standard `recarray` is returned, + if ``usemask=True`` a MaskedRecords array is returned. + + .. deprecated:: 2.0 + Use `numpy.genfromtxt` instead. + + Parameters + ---------- + fname, kwargs : For a description of input parameters, see `genfromtxt`. + + See Also + -------- + numpy.genfromtxt : generic function + + Notes + ----- + By default, `dtype` is None, which means that the data-type of the output + array will be determined from the data. + + """ + + # Deprecated in NumPy 2.0, 2023-07-11 + warnings.warn( + "`recfromtxt` is deprecated, " + "use `numpy.genfromtxt` instead." + "(deprecated in NumPy 2.0)", + DeprecationWarning, + stacklevel=2 + ) + + kwargs.setdefault("dtype", None) + usemask = kwargs.get('usemask', False) + output = genfromtxt(fname, **kwargs) + if usemask: + from numpy.ma.mrecords import MaskedRecords + output = output.view(MaskedRecords) + else: + output = output.view(np.recarray) + return output + + +def recfromcsv(fname, **kwargs): + """ + Load ASCII data stored in a comma-separated file. + + The returned array is a record array (if ``usemask=False``, see + `recarray`) or a masked record array (if ``usemask=True``, + see `ma.mrecords.MaskedRecords`). + + .. deprecated:: 2.0 + Use `numpy.genfromtxt` with comma as `delimiter` instead. + + Parameters + ---------- + fname, kwargs : For a description of input parameters, see `genfromtxt`. + + See Also + -------- + numpy.genfromtxt : generic function to load ASCII data. + + Notes + ----- + By default, `dtype` is None, which means that the data-type of the output + array will be determined from the data. + + """ + + # Deprecated in NumPy 2.0, 2023-07-11 + warnings.warn( + "`recfromcsv` is deprecated, " + "use `numpy.genfromtxt` with comma as `delimiter` instead. " + "(deprecated in NumPy 2.0)", + DeprecationWarning, + stacklevel=2 + ) + + # Set default kwargs for genfromtxt as relevant to csv import. + kwargs.setdefault("case_sensitive", "lower") + kwargs.setdefault("names", True) + kwargs.setdefault("delimiter", ",") + kwargs.setdefault("dtype", None) + output = genfromtxt(fname, **kwargs) + + usemask = kwargs.get("usemask", False) + if usemask: + from numpy.ma.mrecords import MaskedRecords + output = output.view(MaskedRecords) + else: + output = output.view(np.recarray) + return output diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/_npyio_impl.pyi b/.venv/lib/python3.12/site-packages/numpy/lib/_npyio_impl.pyi new file mode 100644 index 0000000..40369c5 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/lib/_npyio_impl.pyi @@ -0,0 +1,301 @@ +import types +import zipfile +from collections.abc import Callable, Collection, Iterable, Iterator, Mapping, Sequence +from re import Pattern +from typing import ( + IO, + Any, + ClassVar, + Generic, + Protocol, + Self, + TypeAlias, + overload, + type_check_only, +) +from typing import Literal as L + +from _typeshed import ( + StrOrBytesPath, + StrPath, + SupportsKeysAndGetItem, + SupportsRead, + SupportsWrite, +) +from typing_extensions import TypeVar, deprecated, override + +import numpy as np +from numpy._core.multiarray import packbits, unpackbits +from numpy._typing import ArrayLike, DTypeLike, NDArray, _DTypeLike, _SupportsArrayFunc +from numpy.ma.mrecords import MaskedRecords + +from ._datasource import DataSource as DataSource + +__all__ = [ + "fromregex", + "genfromtxt", + "load", + "loadtxt", + "packbits", + "save", + "savetxt", + "savez", + "savez_compressed", + "unpackbits", +] + +_T_co = TypeVar("_T_co", covariant=True) +_ScalarT = TypeVar("_ScalarT", bound=np.generic) +_ScalarT_co = TypeVar("_ScalarT_co", bound=np.generic, default=Any, covariant=True) + +_FName: TypeAlias = StrPath | Iterable[str] | Iterable[bytes] +_FNameRead: TypeAlias = StrPath | SupportsRead[str] | SupportsRead[bytes] +_FNameWriteBytes: TypeAlias = StrPath | SupportsWrite[bytes] +_FNameWrite: TypeAlias = _FNameWriteBytes | SupportsWrite[str] + +@type_check_only +class _SupportsReadSeek(SupportsRead[_T_co], Protocol[_T_co]): + def seek(self, offset: int, whence: int, /) -> object: ... + +class BagObj(Generic[_T_co]): + def __init__(self, /, obj: SupportsKeysAndGetItem[str, _T_co]) -> None: ... + def __getattribute__(self, key: str, /) -> _T_co: ... + def __dir__(self) -> list[str]: ... + +class NpzFile(Mapping[str, NDArray[_ScalarT_co]]): + _MAX_REPR_ARRAY_COUNT: ClassVar[int] = 5 + + zip: zipfile.ZipFile + fid: IO[str] | None + files: list[str] + allow_pickle: bool + pickle_kwargs: Mapping[str, Any] | None + f: BagObj[NpzFile[_ScalarT_co]] + + # + def __init__( + self, + /, + fid: IO[Any], + own_fid: bool = False, + allow_pickle: bool = False, + pickle_kwargs: Mapping[str, object] | None = None, + *, + max_header_size: int = 10_000, + ) -> None: ... + def __del__(self) -> None: ... + def __enter__(self) -> Self: ... + def __exit__(self, cls: type[BaseException] | None, e: BaseException | None, tb: types.TracebackType | None, /) -> None: ... + @override + def __len__(self) -> int: ... + @override + def __iter__(self) -> Iterator[str]: ... + @override + def __getitem__(self, key: str, /) -> NDArray[_ScalarT_co]: ... + def close(self) -> None: ... + +# NOTE: Returns a `NpzFile` if file is a zip file; +# returns an `ndarray`/`memmap` otherwise +def load( + file: StrOrBytesPath | _SupportsReadSeek[bytes], + mmap_mode: L["r+", "r", "w+", "c"] | None = None, + allow_pickle: bool = False, + fix_imports: bool = True, + encoding: L["ASCII", "latin1", "bytes"] = "ASCII", + *, + max_header_size: int = 10_000, +) -> Any: ... + +@overload +def save(file: _FNameWriteBytes, arr: ArrayLike, allow_pickle: bool = True) -> None: ... +@overload +@deprecated("The 'fix_imports' flag is deprecated in NumPy 2.1.") +def save(file: _FNameWriteBytes, arr: ArrayLike, allow_pickle: bool, fix_imports: bool) -> None: ... +@overload +@deprecated("The 'fix_imports' flag is deprecated in NumPy 2.1.") +def save(file: _FNameWriteBytes, arr: ArrayLike, allow_pickle: bool = True, *, fix_imports: bool) -> None: ... + +# +def savez(file: _FNameWriteBytes, *args: ArrayLike, allow_pickle: bool = True, **kwds: ArrayLike) -> None: ... + +# +def savez_compressed(file: _FNameWriteBytes, *args: ArrayLike, allow_pickle: bool = True, **kwds: ArrayLike) -> None: ... + +# File-like objects only have to implement `__iter__` and, +# optionally, `encoding` +@overload +def loadtxt( + fname: _FName, + dtype: None = None, + comments: str | Sequence[str] | None = "#", + delimiter: str | None = None, + converters: Mapping[int | str, Callable[[str], Any]] | Callable[[str], Any] | None = None, + skiprows: int = 0, + usecols: int | Sequence[int] | None = None, + unpack: bool = False, + ndmin: L[0, 1, 2] = 0, + encoding: str | None = None, + max_rows: int | None = None, + *, + quotechar: str | None = None, + like: _SupportsArrayFunc | None = None, +) -> NDArray[np.float64]: ... +@overload +def loadtxt( + fname: _FName, + dtype: _DTypeLike[_ScalarT], + comments: str | Sequence[str] | None = "#", + delimiter: str | None = None, + converters: Mapping[int | str, Callable[[str], Any]] | Callable[[str], Any] | None = None, + skiprows: int = 0, + usecols: int | Sequence[int] | None = None, + unpack: bool = False, + ndmin: L[0, 1, 2] = 0, + encoding: str | None = None, + max_rows: int | None = None, + *, + quotechar: str | None = None, + like: _SupportsArrayFunc | None = None, +) -> NDArray[_ScalarT]: ... +@overload +def loadtxt( + fname: _FName, + dtype: DTypeLike, + comments: str | Sequence[str] | None = "#", + delimiter: str | None = None, + converters: Mapping[int | str, Callable[[str], Any]] | Callable[[str], Any] | None = None, + skiprows: int = 0, + usecols: int | Sequence[int] | None = None, + unpack: bool = False, + ndmin: L[0, 1, 2] = 0, + encoding: str | None = None, + max_rows: int | None = None, + *, + quotechar: str | None = None, + like: _SupportsArrayFunc | None = None, +) -> NDArray[Any]: ... + +def savetxt( + fname: _FNameWrite, + X: ArrayLike, + fmt: str | Sequence[str] = "%.18e", + delimiter: str = " ", + newline: str = "\n", + header: str = "", + footer: str = "", + comments: str = "# ", + encoding: str | None = None, +) -> None: ... + +@overload +def fromregex( + file: _FNameRead, + regexp: str | bytes | Pattern[Any], + dtype: _DTypeLike[_ScalarT], + encoding: str | None = None, +) -> NDArray[_ScalarT]: ... +@overload +def fromregex( + file: _FNameRead, + regexp: str | bytes | Pattern[Any], + dtype: DTypeLike, + encoding: str | None = None, +) -> NDArray[Any]: ... + +@overload +def genfromtxt( + fname: _FName, + dtype: None = None, + comments: str = ..., + delimiter: str | int | Iterable[int] | None = ..., + skip_header: int = ..., + skip_footer: int = ..., + converters: Mapping[int | str, Callable[[str], Any]] | None = ..., + missing_values: Any = ..., + filling_values: Any = ..., + usecols: Sequence[int] | None = ..., + names: L[True] | str | Collection[str] | None = ..., + excludelist: Sequence[str] | None = ..., + deletechars: str = ..., + replace_space: str = ..., + autostrip: bool = ..., + case_sensitive: bool | L["upper", "lower"] = ..., + defaultfmt: str = ..., + unpack: bool | None = ..., + usemask: bool = ..., + loose: bool = ..., + invalid_raise: bool = ..., + max_rows: int | None = ..., + encoding: str = ..., + *, + ndmin: L[0, 1, 2] = ..., + like: _SupportsArrayFunc | None = ..., +) -> NDArray[Any]: ... +@overload +def genfromtxt( + fname: _FName, + dtype: _DTypeLike[_ScalarT], + comments: str = ..., + delimiter: str | int | Iterable[int] | None = ..., + skip_header: int = ..., + skip_footer: int = ..., + converters: Mapping[int | str, Callable[[str], Any]] | None = ..., + missing_values: Any = ..., + filling_values: Any = ..., + usecols: Sequence[int] | None = ..., + names: L[True] | str | Collection[str] | None = ..., + excludelist: Sequence[str] | None = ..., + deletechars: str = ..., + replace_space: str = ..., + autostrip: bool = ..., + case_sensitive: bool | L["upper", "lower"] = ..., + defaultfmt: str = ..., + unpack: bool | None = ..., + usemask: bool = ..., + loose: bool = ..., + invalid_raise: bool = ..., + max_rows: int | None = ..., + encoding: str = ..., + *, + ndmin: L[0, 1, 2] = ..., + like: _SupportsArrayFunc | None = ..., +) -> NDArray[_ScalarT]: ... +@overload +def genfromtxt( + fname: _FName, + dtype: DTypeLike, + comments: str = ..., + delimiter: str | int | Iterable[int] | None = ..., + skip_header: int = ..., + skip_footer: int = ..., + converters: Mapping[int | str, Callable[[str], Any]] | None = ..., + missing_values: Any = ..., + filling_values: Any = ..., + usecols: Sequence[int] | None = ..., + names: L[True] | str | Collection[str] | None = ..., + excludelist: Sequence[str] | None = ..., + deletechars: str = ..., + replace_space: str = ..., + autostrip: bool = ..., + case_sensitive: bool | L["upper", "lower"] = ..., + defaultfmt: str = ..., + unpack: bool | None = ..., + usemask: bool = ..., + loose: bool = ..., + invalid_raise: bool = ..., + max_rows: int | None = ..., + encoding: str = ..., + *, + ndmin: L[0, 1, 2] = ..., + like: _SupportsArrayFunc | None = ..., +) -> NDArray[Any]: ... + +@overload +def recfromtxt(fname: _FName, *, usemask: L[False] = False, **kwargs: object) -> np.recarray[Any, np.dtype[np.record]]: ... +@overload +def recfromtxt(fname: _FName, *, usemask: L[True], **kwargs: object) -> MaskedRecords[Any, np.dtype[np.void]]: ... + +@overload +def recfromcsv(fname: _FName, *, usemask: L[False] = False, **kwargs: object) -> np.recarray[Any, np.dtype[np.record]]: ... +@overload +def recfromcsv(fname: _FName, *, usemask: L[True], **kwargs: object) -> MaskedRecords[Any, np.dtype[np.void]]: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/_polynomial_impl.py b/.venv/lib/python3.12/site-packages/numpy/lib/_polynomial_impl.py new file mode 100644 index 0000000..a58ca76 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/lib/_polynomial_impl.py @@ -0,0 +1,1465 @@ +""" +Functions to operate on polynomials. + +""" +__all__ = ['poly', 'roots', 'polyint', 'polyder', 'polyadd', + 'polysub', 'polymul', 'polydiv', 'polyval', 'poly1d', + 'polyfit'] + +import functools +import re +import warnings + +import numpy._core.numeric as NX +from numpy._core import ( + abs, + array, + atleast_1d, + dot, + finfo, + hstack, + isscalar, + ones, + overrides, +) +from numpy._utils import set_module +from numpy.exceptions import RankWarning +from numpy.lib._function_base_impl import trim_zeros +from numpy.lib._twodim_base_impl import diag, vander +from numpy.lib._type_check_impl import imag, iscomplex, mintypecode, real +from numpy.linalg import eigvals, inv, lstsq + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy') + + +def _poly_dispatcher(seq_of_zeros): + return seq_of_zeros + + +@array_function_dispatch(_poly_dispatcher) +def poly(seq_of_zeros): + """ + Find the coefficients of a polynomial with the given sequence of roots. + + .. note:: + This forms part of the old polynomial API. Since version 1.4, the + new polynomial API defined in `numpy.polynomial` is preferred. + A summary of the differences can be found in the + :doc:`transition guide `. + + Returns the coefficients of the polynomial whose leading coefficient + is one for the given sequence of zeros (multiple roots must be included + in the sequence as many times as their multiplicity; see Examples). + A square matrix (or array, which will be treated as a matrix) can also + be given, in which case the coefficients of the characteristic polynomial + of the matrix are returned. + + Parameters + ---------- + seq_of_zeros : array_like, shape (N,) or (N, N) + A sequence of polynomial roots, or a square array or matrix object. + + Returns + ------- + c : ndarray + 1D array of polynomial coefficients from highest to lowest degree: + + ``c[0] * x**(N) + c[1] * x**(N-1) + ... + c[N-1] * x + c[N]`` + where c[0] always equals 1. + + Raises + ------ + ValueError + If input is the wrong shape (the input must be a 1-D or square + 2-D array). + + See Also + -------- + polyval : Compute polynomial values. + roots : Return the roots of a polynomial. + polyfit : Least squares polynomial fit. + poly1d : A one-dimensional polynomial class. + + Notes + ----- + Specifying the roots of a polynomial still leaves one degree of + freedom, typically represented by an undetermined leading + coefficient. [1]_ In the case of this function, that coefficient - + the first one in the returned array - is always taken as one. (If + for some reason you have one other point, the only automatic way + presently to leverage that information is to use ``polyfit``.) + + The characteristic polynomial, :math:`p_a(t)`, of an `n`-by-`n` + matrix **A** is given by + + :math:`p_a(t) = \\mathrm{det}(t\\, \\mathbf{I} - \\mathbf{A})`, + + where **I** is the `n`-by-`n` identity matrix. [2]_ + + References + ---------- + .. [1] M. Sullivan and M. Sullivan, III, "Algebra and Trigonometry, + Enhanced With Graphing Utilities," Prentice-Hall, pg. 318, 1996. + + .. [2] G. Strang, "Linear Algebra and Its Applications, 2nd Edition," + Academic Press, pg. 182, 1980. + + Examples + -------- + + Given a sequence of a polynomial's zeros: + + >>> import numpy as np + + >>> np.poly((0, 0, 0)) # Multiple root example + array([1., 0., 0., 0.]) + + The line above represents z**3 + 0*z**2 + 0*z + 0. + + >>> np.poly((-1./2, 0, 1./2)) + array([ 1. , 0. , -0.25, 0. ]) + + The line above represents z**3 - z/4 + + >>> np.poly((np.random.random(1)[0], 0, np.random.random(1)[0])) + array([ 1. , -0.77086955, 0.08618131, 0. ]) # random + + Given a square array object: + + >>> P = np.array([[0, 1./3], [-1./2, 0]]) + >>> np.poly(P) + array([1. , 0. , 0.16666667]) + + Note how in all cases the leading coefficient is always 1. + + """ + seq_of_zeros = atleast_1d(seq_of_zeros) + sh = seq_of_zeros.shape + + if len(sh) == 2 and sh[0] == sh[1] and sh[0] != 0: + seq_of_zeros = eigvals(seq_of_zeros) + elif len(sh) == 1: + dt = seq_of_zeros.dtype + # Let object arrays slip through, e.g. for arbitrary precision + if dt != object: + seq_of_zeros = seq_of_zeros.astype(mintypecode(dt.char)) + else: + raise ValueError("input must be 1d or non-empty square 2d array.") + + if len(seq_of_zeros) == 0: + return 1.0 + dt = seq_of_zeros.dtype + a = ones((1,), dtype=dt) + for zero in seq_of_zeros: + a = NX.convolve(a, array([1, -zero], dtype=dt), mode='full') + + if issubclass(a.dtype.type, NX.complexfloating): + # if complex roots are all complex conjugates, the roots are real. + roots = NX.asarray(seq_of_zeros, complex) + if NX.all(NX.sort(roots) == NX.sort(roots.conjugate())): + a = a.real.copy() + + return a + + +def _roots_dispatcher(p): + return p + + +@array_function_dispatch(_roots_dispatcher) +def roots(p): + """ + Return the roots of a polynomial with coefficients given in p. + + .. note:: + This forms part of the old polynomial API. Since version 1.4, the + new polynomial API defined in `numpy.polynomial` is preferred. + A summary of the differences can be found in the + :doc:`transition guide `. + + The values in the rank-1 array `p` are coefficients of a polynomial. + If the length of `p` is n+1 then the polynomial is described by:: + + p[0] * x**n + p[1] * x**(n-1) + ... + p[n-1]*x + p[n] + + Parameters + ---------- + p : array_like + Rank-1 array of polynomial coefficients. + + Returns + ------- + out : ndarray + An array containing the roots of the polynomial. + + Raises + ------ + ValueError + When `p` cannot be converted to a rank-1 array. + + See also + -------- + poly : Find the coefficients of a polynomial with a given sequence + of roots. + polyval : Compute polynomial values. + polyfit : Least squares polynomial fit. + poly1d : A one-dimensional polynomial class. + + Notes + ----- + The algorithm relies on computing the eigenvalues of the + companion matrix [1]_. + + References + ---------- + .. [1] R. A. Horn & C. R. Johnson, *Matrix Analysis*. Cambridge, UK: + Cambridge University Press, 1999, pp. 146-7. + + Examples + -------- + >>> import numpy as np + >>> coeff = [3.2, 2, 1] + >>> np.roots(coeff) + array([-0.3125+0.46351241j, -0.3125-0.46351241j]) + + """ + # If input is scalar, this makes it an array + p = atleast_1d(p) + if p.ndim != 1: + raise ValueError("Input must be a rank-1 array.") + + # find non-zero array entries + non_zero = NX.nonzero(NX.ravel(p))[0] + + # Return an empty array if polynomial is all zeros + if len(non_zero) == 0: + return NX.array([]) + + # find the number of trailing zeros -- this is the number of roots at 0. + trailing_zeros = len(p) - non_zero[-1] - 1 + + # strip leading and trailing zeros + p = p[int(non_zero[0]):int(non_zero[-1]) + 1] + + # casting: if incoming array isn't floating point, make it floating point. + if not issubclass(p.dtype.type, (NX.floating, NX.complexfloating)): + p = p.astype(float) + + N = len(p) + if N > 1: + # build companion matrix and find its eigenvalues (the roots) + A = diag(NX.ones((N - 2,), p.dtype), -1) + A[0, :] = -p[1:] / p[0] + roots = eigvals(A) + else: + roots = NX.array([]) + + # tack any zeros onto the back of the array + roots = hstack((roots, NX.zeros(trailing_zeros, roots.dtype))) + return roots + + +def _polyint_dispatcher(p, m=None, k=None): + return (p,) + + +@array_function_dispatch(_polyint_dispatcher) +def polyint(p, m=1, k=None): + """ + Return an antiderivative (indefinite integral) of a polynomial. + + .. note:: + This forms part of the old polynomial API. Since version 1.4, the + new polynomial API defined in `numpy.polynomial` is preferred. + A summary of the differences can be found in the + :doc:`transition guide `. + + The returned order `m` antiderivative `P` of polynomial `p` satisfies + :math:`\\frac{d^m}{dx^m}P(x) = p(x)` and is defined up to `m - 1` + integration constants `k`. The constants determine the low-order + polynomial part + + .. math:: \\frac{k_{m-1}}{0!} x^0 + \\ldots + \\frac{k_0}{(m-1)!}x^{m-1} + + of `P` so that :math:`P^{(j)}(0) = k_{m-j-1}`. + + Parameters + ---------- + p : array_like or poly1d + Polynomial to integrate. + A sequence is interpreted as polynomial coefficients, see `poly1d`. + m : int, optional + Order of the antiderivative. (Default: 1) + k : list of `m` scalars or scalar, optional + Integration constants. They are given in the order of integration: + those corresponding to highest-order terms come first. + + If ``None`` (default), all constants are assumed to be zero. + If `m = 1`, a single scalar can be given instead of a list. + + See Also + -------- + polyder : derivative of a polynomial + poly1d.integ : equivalent method + + Examples + -------- + + The defining property of the antiderivative: + + >>> import numpy as np + + >>> p = np.poly1d([1,1,1]) + >>> P = np.polyint(p) + >>> P + poly1d([ 0.33333333, 0.5 , 1. , 0. ]) # may vary + >>> np.polyder(P) == p + True + + The integration constants default to zero, but can be specified: + + >>> P = np.polyint(p, 3) + >>> P(0) + 0.0 + >>> np.polyder(P)(0) + 0.0 + >>> np.polyder(P, 2)(0) + 0.0 + >>> P = np.polyint(p, 3, k=[6,5,3]) + >>> P + poly1d([ 0.01666667, 0.04166667, 0.16666667, 3. , 5. , 3. ]) # may vary + + Note that 3 = 6 / 2!, and that the constants are given in the order of + integrations. Constant of the highest-order polynomial term comes first: + + >>> np.polyder(P, 2)(0) + 6.0 + >>> np.polyder(P, 1)(0) + 5.0 + >>> P(0) + 3.0 + + """ + m = int(m) + if m < 0: + raise ValueError("Order of integral must be positive (see polyder)") + if k is None: + k = NX.zeros(m, float) + k = atleast_1d(k) + if len(k) == 1 and m > 1: + k = k[0] * NX.ones(m, float) + if len(k) < m: + raise ValueError( + "k must be a scalar or a rank-1 array of length 1 or >m.") + + truepoly = isinstance(p, poly1d) + p = NX.asarray(p) + if m == 0: + if truepoly: + return poly1d(p) + return p + else: + # Note: this must work also with object and integer arrays + y = NX.concatenate((p.__truediv__(NX.arange(len(p), 0, -1)), [k[0]])) + val = polyint(y, m - 1, k=k[1:]) + if truepoly: + return poly1d(val) + return val + + +def _polyder_dispatcher(p, m=None): + return (p,) + + +@array_function_dispatch(_polyder_dispatcher) +def polyder(p, m=1): + """ + Return the derivative of the specified order of a polynomial. + + .. note:: + This forms part of the old polynomial API. Since version 1.4, the + new polynomial API defined in `numpy.polynomial` is preferred. + A summary of the differences can be found in the + :doc:`transition guide `. + + Parameters + ---------- + p : poly1d or sequence + Polynomial to differentiate. + A sequence is interpreted as polynomial coefficients, see `poly1d`. + m : int, optional + Order of differentiation (default: 1) + + Returns + ------- + der : poly1d + A new polynomial representing the derivative. + + See Also + -------- + polyint : Anti-derivative of a polynomial. + poly1d : Class for one-dimensional polynomials. + + Examples + -------- + + The derivative of the polynomial :math:`x^3 + x^2 + x^1 + 1` is: + + >>> import numpy as np + + >>> p = np.poly1d([1,1,1,1]) + >>> p2 = np.polyder(p) + >>> p2 + poly1d([3, 2, 1]) + + which evaluates to: + + >>> p2(2.) + 17.0 + + We can verify this, approximating the derivative with + ``(f(x + h) - f(x))/h``: + + >>> (p(2. + 0.001) - p(2.)) / 0.001 + 17.007000999997857 + + The fourth-order derivative of a 3rd-order polynomial is zero: + + >>> np.polyder(p, 2) + poly1d([6, 2]) + >>> np.polyder(p, 3) + poly1d([6]) + >>> np.polyder(p, 4) + poly1d([0]) + + """ + m = int(m) + if m < 0: + raise ValueError("Order of derivative must be positive (see polyint)") + + truepoly = isinstance(p, poly1d) + p = NX.asarray(p) + n = len(p) - 1 + y = p[:-1] * NX.arange(n, 0, -1) + if m == 0: + val = p + else: + val = polyder(y, m - 1) + if truepoly: + val = poly1d(val) + return val + + +def _polyfit_dispatcher(x, y, deg, rcond=None, full=None, w=None, cov=None): + return (x, y, w) + + +@array_function_dispatch(_polyfit_dispatcher) +def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False): + """ + Least squares polynomial fit. + + .. note:: + This forms part of the old polynomial API. Since version 1.4, the + new polynomial API defined in `numpy.polynomial` is preferred. + A summary of the differences can be found in the + :doc:`transition guide `. + + Fit a polynomial ``p(x) = p[0] * x**deg + ... + p[deg]`` of degree `deg` + to points `(x, y)`. Returns a vector of coefficients `p` that minimises + the squared error in the order `deg`, `deg-1`, ... `0`. + + The `Polynomial.fit ` class + method is recommended for new code as it is more stable numerically. See + the documentation of the method for more information. + + Parameters + ---------- + x : array_like, shape (M,) + x-coordinates of the M sample points ``(x[i], y[i])``. + y : array_like, shape (M,) or (M, K) + y-coordinates of the sample points. Several data sets of sample + points sharing the same x-coordinates can be fitted at once by + passing in a 2D-array that contains one dataset per column. + deg : int + Degree of the fitting polynomial + rcond : float, optional + Relative condition number of the fit. Singular values smaller than + this relative to the largest singular value will be ignored. The + default value is len(x)*eps, where eps is the relative precision of + the float type, about 2e-16 in most cases. + full : bool, optional + Switch determining nature of return value. When it is False (the + default) just the coefficients are returned, when True diagnostic + information from the singular value decomposition is also returned. + w : array_like, shape (M,), optional + Weights. If not None, the weight ``w[i]`` applies to the unsquared + residual ``y[i] - y_hat[i]`` at ``x[i]``. Ideally the weights are + chosen so that the errors of the products ``w[i]*y[i]`` all have the + same variance. When using inverse-variance weighting, use + ``w[i] = 1/sigma(y[i])``. The default value is None. + cov : bool or str, optional + If given and not `False`, return not just the estimate but also its + covariance matrix. By default, the covariance are scaled by + chi2/dof, where dof = M - (deg + 1), i.e., the weights are presumed + to be unreliable except in a relative sense and everything is scaled + such that the reduced chi2 is unity. This scaling is omitted if + ``cov='unscaled'``, as is relevant for the case that the weights are + w = 1/sigma, with sigma known to be a reliable estimate of the + uncertainty. + + Returns + ------- + p : ndarray, shape (deg + 1,) or (deg + 1, K) + Polynomial coefficients, highest power first. If `y` was 2-D, the + coefficients for `k`-th data set are in ``p[:,k]``. + + residuals, rank, singular_values, rcond + These values are only returned if ``full == True`` + + - residuals -- sum of squared residuals of the least squares fit + - rank -- the effective rank of the scaled Vandermonde + coefficient matrix + - singular_values -- singular values of the scaled Vandermonde + coefficient matrix + - rcond -- value of `rcond`. + + For more details, see `numpy.linalg.lstsq`. + + V : ndarray, shape (deg + 1, deg + 1) or (deg + 1, deg + 1, K) + Present only if ``full == False`` and ``cov == True``. The covariance + matrix of the polynomial coefficient estimates. The diagonal of + this matrix are the variance estimates for each coefficient. If y + is a 2-D array, then the covariance matrix for the `k`-th data set + are in ``V[:,:,k]`` + + + Warns + ----- + RankWarning + The rank of the coefficient matrix in the least-squares fit is + deficient. The warning is only raised if ``full == False``. + + The warnings can be turned off by + + >>> import warnings + >>> warnings.simplefilter('ignore', np.exceptions.RankWarning) + + See Also + -------- + polyval : Compute polynomial values. + linalg.lstsq : Computes a least-squares fit. + scipy.interpolate.UnivariateSpline : Computes spline fits. + + Notes + ----- + The solution minimizes the squared error + + .. math:: + E = \\sum_{j=0}^k |p(x_j) - y_j|^2 + + in the equations:: + + x[0]**n * p[0] + ... + x[0] * p[n-1] + p[n] = y[0] + x[1]**n * p[0] + ... + x[1] * p[n-1] + p[n] = y[1] + ... + x[k]**n * p[0] + ... + x[k] * p[n-1] + p[n] = y[k] + + The coefficient matrix of the coefficients `p` is a Vandermonde matrix. + + `polyfit` issues a `~exceptions.RankWarning` when the least-squares fit is + badly conditioned. This implies that the best fit is not well-defined due + to numerical error. The results may be improved by lowering the polynomial + degree or by replacing `x` by `x` - `x`.mean(). The `rcond` parameter + can also be set to a value smaller than its default, but the resulting + fit may be spurious: including contributions from the small singular + values can add numerical noise to the result. + + Note that fitting polynomial coefficients is inherently badly conditioned + when the degree of the polynomial is large or the interval of sample points + is badly centered. The quality of the fit should always be checked in these + cases. When polynomial fits are not satisfactory, splines may be a good + alternative. + + References + ---------- + .. [1] Wikipedia, "Curve fitting", + https://en.wikipedia.org/wiki/Curve_fitting + .. [2] Wikipedia, "Polynomial interpolation", + https://en.wikipedia.org/wiki/Polynomial_interpolation + + Examples + -------- + >>> import numpy as np + >>> import warnings + >>> x = np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0]) + >>> y = np.array([0.0, 0.8, 0.9, 0.1, -0.8, -1.0]) + >>> z = np.polyfit(x, y, 3) + >>> z + array([ 0.08703704, -0.81349206, 1.69312169, -0.03968254]) # may vary + + It is convenient to use `poly1d` objects for dealing with polynomials: + + >>> p = np.poly1d(z) + >>> p(0.5) + 0.6143849206349179 # may vary + >>> p(3.5) + -0.34732142857143039 # may vary + >>> p(10) + 22.579365079365115 # may vary + + High-order polynomials may oscillate wildly: + + >>> with warnings.catch_warnings(): + ... warnings.simplefilter('ignore', np.exceptions.RankWarning) + ... p30 = np.poly1d(np.polyfit(x, y, 30)) + ... + >>> p30(4) + -0.80000000000000204 # may vary + >>> p30(5) + -0.99999999999999445 # may vary + >>> p30(4.5) + -0.10547061179440398 # may vary + + Illustration: + + >>> import matplotlib.pyplot as plt + >>> xp = np.linspace(-2, 6, 100) + >>> _ = plt.plot(x, y, '.', xp, p(xp), '-', xp, p30(xp), '--') + >>> plt.ylim(-2,2) + (-2, 2) + >>> plt.show() + + """ + order = int(deg) + 1 + x = NX.asarray(x) + 0.0 + y = NX.asarray(y) + 0.0 + + # check arguments. + if deg < 0: + raise ValueError("expected deg >= 0") + if x.ndim != 1: + raise TypeError("expected 1D vector for x") + if x.size == 0: + raise TypeError("expected non-empty vector for x") + if y.ndim < 1 or y.ndim > 2: + raise TypeError("expected 1D or 2D array for y") + if x.shape[0] != y.shape[0]: + raise TypeError("expected x and y to have same length") + + # set rcond + if rcond is None: + rcond = len(x) * finfo(x.dtype).eps + + # set up least squares equation for powers of x + lhs = vander(x, order) + rhs = y + + # apply weighting + if w is not None: + w = NX.asarray(w) + 0.0 + if w.ndim != 1: + raise TypeError("expected a 1-d array for weights") + if w.shape[0] != y.shape[0]: + raise TypeError("expected w and y to have the same length") + lhs *= w[:, NX.newaxis] + if rhs.ndim == 2: + rhs *= w[:, NX.newaxis] + else: + rhs *= w + + # scale lhs to improve condition number and solve + scale = NX.sqrt((lhs * lhs).sum(axis=0)) + lhs /= scale + c, resids, rank, s = lstsq(lhs, rhs, rcond) + c = (c.T / scale).T # broadcast scale coefficients + + # warn on rank reduction, which indicates an ill conditioned matrix + if rank != order and not full: + msg = "Polyfit may be poorly conditioned" + warnings.warn(msg, RankWarning, stacklevel=2) + + if full: + return c, resids, rank, s, rcond + elif cov: + Vbase = inv(dot(lhs.T, lhs)) + Vbase /= NX.outer(scale, scale) + if cov == "unscaled": + fac = 1 + else: + if len(x) <= order: + raise ValueError("the number of data points must exceed order " + "to scale the covariance matrix") + # note, this used to be: fac = resids / (len(x) - order - 2.0) + # it was decided that the "- 2" (originally justified by "Bayesian + # uncertainty analysis") is not what the user expects + # (see gh-11196 and gh-11197) + fac = resids / (len(x) - order) + if y.ndim == 1: + return c, Vbase * fac + else: + return c, Vbase[:, :, NX.newaxis] * fac + else: + return c + + +def _polyval_dispatcher(p, x): + return (p, x) + + +@array_function_dispatch(_polyval_dispatcher) +def polyval(p, x): + """ + Evaluate a polynomial at specific values. + + .. note:: + This forms part of the old polynomial API. Since version 1.4, the + new polynomial API defined in `numpy.polynomial` is preferred. + A summary of the differences can be found in the + :doc:`transition guide `. + + If `p` is of length N, this function returns the value:: + + p[0]*x**(N-1) + p[1]*x**(N-2) + ... + p[N-2]*x + p[N-1] + + If `x` is a sequence, then ``p(x)`` is returned for each element of ``x``. + If `x` is another polynomial then the composite polynomial ``p(x(t))`` + is returned. + + Parameters + ---------- + p : array_like or poly1d object + 1D array of polynomial coefficients (including coefficients equal + to zero) from highest degree to the constant term, or an + instance of poly1d. + x : array_like or poly1d object + A number, an array of numbers, or an instance of poly1d, at + which to evaluate `p`. + + Returns + ------- + values : ndarray or poly1d + If `x` is a poly1d instance, the result is the composition of the two + polynomials, i.e., `x` is "substituted" in `p` and the simplified + result is returned. In addition, the type of `x` - array_like or + poly1d - governs the type of the output: `x` array_like => `values` + array_like, `x` a poly1d object => `values` is also. + + See Also + -------- + poly1d: A polynomial class. + + Notes + ----- + Horner's scheme [1]_ is used to evaluate the polynomial. Even so, + for polynomials of high degree the values may be inaccurate due to + rounding errors. Use carefully. + + If `x` is a subtype of `ndarray` the return value will be of the same type. + + References + ---------- + .. [1] I. N. Bronshtein, K. A. Semendyayev, and K. A. Hirsch (Eng. + trans. Ed.), *Handbook of Mathematics*, New York, Van Nostrand + Reinhold Co., 1985, pg. 720. + + Examples + -------- + >>> import numpy as np + >>> np.polyval([3,0,1], 5) # 3 * 5**2 + 0 * 5**1 + 1 + 76 + >>> np.polyval([3,0,1], np.poly1d(5)) + poly1d([76]) + >>> np.polyval(np.poly1d([3,0,1]), 5) + 76 + >>> np.polyval(np.poly1d([3,0,1]), np.poly1d(5)) + poly1d([76]) + + """ + p = NX.asarray(p) + if isinstance(x, poly1d): + y = 0 + else: + x = NX.asanyarray(x) + y = NX.zeros_like(x) + for pv in p: + y = y * x + pv + return y + + +def _binary_op_dispatcher(a1, a2): + return (a1, a2) + + +@array_function_dispatch(_binary_op_dispatcher) +def polyadd(a1, a2): + """ + Find the sum of two polynomials. + + .. note:: + This forms part of the old polynomial API. Since version 1.4, the + new polynomial API defined in `numpy.polynomial` is preferred. + A summary of the differences can be found in the + :doc:`transition guide `. + + Returns the polynomial resulting from the sum of two input polynomials. + Each input must be either a poly1d object or a 1D sequence of polynomial + coefficients, from highest to lowest degree. + + Parameters + ---------- + a1, a2 : array_like or poly1d object + Input polynomials. + + Returns + ------- + out : ndarray or poly1d object + The sum of the inputs. If either input is a poly1d object, then the + output is also a poly1d object. Otherwise, it is a 1D array of + polynomial coefficients from highest to lowest degree. + + See Also + -------- + poly1d : A one-dimensional polynomial class. + poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, polyval + + Examples + -------- + >>> import numpy as np + >>> np.polyadd([1, 2], [9, 5, 4]) + array([9, 6, 6]) + + Using poly1d objects: + + >>> p1 = np.poly1d([1, 2]) + >>> p2 = np.poly1d([9, 5, 4]) + >>> print(p1) + 1 x + 2 + >>> print(p2) + 2 + 9 x + 5 x + 4 + >>> print(np.polyadd(p1, p2)) + 2 + 9 x + 6 x + 6 + + """ + truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d)) + a1 = atleast_1d(a1) + a2 = atleast_1d(a2) + diff = len(a2) - len(a1) + if diff == 0: + val = a1 + a2 + elif diff > 0: + zr = NX.zeros(diff, a1.dtype) + val = NX.concatenate((zr, a1)) + a2 + else: + zr = NX.zeros(abs(diff), a2.dtype) + val = a1 + NX.concatenate((zr, a2)) + if truepoly: + val = poly1d(val) + return val + + +@array_function_dispatch(_binary_op_dispatcher) +def polysub(a1, a2): + """ + Difference (subtraction) of two polynomials. + + .. note:: + This forms part of the old polynomial API. Since version 1.4, the + new polynomial API defined in `numpy.polynomial` is preferred. + A summary of the differences can be found in the + :doc:`transition guide `. + + Given two polynomials `a1` and `a2`, returns ``a1 - a2``. + `a1` and `a2` can be either array_like sequences of the polynomials' + coefficients (including coefficients equal to zero), or `poly1d` objects. + + Parameters + ---------- + a1, a2 : array_like or poly1d + Minuend and subtrahend polynomials, respectively. + + Returns + ------- + out : ndarray or poly1d + Array or `poly1d` object of the difference polynomial's coefficients. + + See Also + -------- + polyval, polydiv, polymul, polyadd + + Examples + -------- + + .. math:: (2 x^2 + 10 x - 2) - (3 x^2 + 10 x -4) = (-x^2 + 2) + + >>> import numpy as np + + >>> np.polysub([2, 10, -2], [3, 10, -4]) + array([-1, 0, 2]) + + """ + truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d)) + a1 = atleast_1d(a1) + a2 = atleast_1d(a2) + diff = len(a2) - len(a1) + if diff == 0: + val = a1 - a2 + elif diff > 0: + zr = NX.zeros(diff, a1.dtype) + val = NX.concatenate((zr, a1)) - a2 + else: + zr = NX.zeros(abs(diff), a2.dtype) + val = a1 - NX.concatenate((zr, a2)) + if truepoly: + val = poly1d(val) + return val + + +@array_function_dispatch(_binary_op_dispatcher) +def polymul(a1, a2): + """ + Find the product of two polynomials. + + .. note:: + This forms part of the old polynomial API. Since version 1.4, the + new polynomial API defined in `numpy.polynomial` is preferred. + A summary of the differences can be found in the + :doc:`transition guide `. + + Finds the polynomial resulting from the multiplication of the two input + polynomials. Each input must be either a poly1d object or a 1D sequence + of polynomial coefficients, from highest to lowest degree. + + Parameters + ---------- + a1, a2 : array_like or poly1d object + Input polynomials. + + Returns + ------- + out : ndarray or poly1d object + The polynomial resulting from the multiplication of the inputs. If + either inputs is a poly1d object, then the output is also a poly1d + object. Otherwise, it is a 1D array of polynomial coefficients from + highest to lowest degree. + + See Also + -------- + poly1d : A one-dimensional polynomial class. + poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, polyval + convolve : Array convolution. Same output as polymul, but has parameter + for overlap mode. + + Examples + -------- + >>> import numpy as np + >>> np.polymul([1, 2, 3], [9, 5, 1]) + array([ 9, 23, 38, 17, 3]) + + Using poly1d objects: + + >>> p1 = np.poly1d([1, 2, 3]) + >>> p2 = np.poly1d([9, 5, 1]) + >>> print(p1) + 2 + 1 x + 2 x + 3 + >>> print(p2) + 2 + 9 x + 5 x + 1 + >>> print(np.polymul(p1, p2)) + 4 3 2 + 9 x + 23 x + 38 x + 17 x + 3 + + """ + truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d)) + a1, a2 = poly1d(a1), poly1d(a2) + val = NX.convolve(a1, a2) + if truepoly: + val = poly1d(val) + return val + + +def _polydiv_dispatcher(u, v): + return (u, v) + + +@array_function_dispatch(_polydiv_dispatcher) +def polydiv(u, v): + """ + Returns the quotient and remainder of polynomial division. + + .. note:: + This forms part of the old polynomial API. Since version 1.4, the + new polynomial API defined in `numpy.polynomial` is preferred. + A summary of the differences can be found in the + :doc:`transition guide `. + + The input arrays are the coefficients (including any coefficients + equal to zero) of the "numerator" (dividend) and "denominator" + (divisor) polynomials, respectively. + + Parameters + ---------- + u : array_like or poly1d + Dividend polynomial's coefficients. + + v : array_like or poly1d + Divisor polynomial's coefficients. + + Returns + ------- + q : ndarray + Coefficients, including those equal to zero, of the quotient. + r : ndarray + Coefficients, including those equal to zero, of the remainder. + + See Also + -------- + poly, polyadd, polyder, polydiv, polyfit, polyint, polymul, polysub + polyval + + Notes + ----- + Both `u` and `v` must be 0-d or 1-d (ndim = 0 or 1), but `u.ndim` need + not equal `v.ndim`. In other words, all four possible combinations - + ``u.ndim = v.ndim = 0``, ``u.ndim = v.ndim = 1``, + ``u.ndim = 1, v.ndim = 0``, and ``u.ndim = 0, v.ndim = 1`` - work. + + Examples + -------- + + .. math:: \\frac{3x^2 + 5x + 2}{2x + 1} = 1.5x + 1.75, remainder 0.25 + + >>> import numpy as np + + >>> x = np.array([3.0, 5.0, 2.0]) + >>> y = np.array([2.0, 1.0]) + >>> np.polydiv(x, y) + (array([1.5 , 1.75]), array([0.25])) + + """ + truepoly = (isinstance(u, poly1d) or isinstance(v, poly1d)) + u = atleast_1d(u) + 0.0 + v = atleast_1d(v) + 0.0 + # w has the common type + w = u[0] + v[0] + m = len(u) - 1 + n = len(v) - 1 + scale = 1. / v[0] + q = NX.zeros((max(m - n + 1, 1),), w.dtype) + r = u.astype(w.dtype) + for k in range(m - n + 1): + d = scale * r[k] + q[k] = d + r[k:k + n + 1] -= d * v + while NX.allclose(r[0], 0, rtol=1e-14) and (r.shape[-1] > 1): + r = r[1:] + if truepoly: + return poly1d(q), poly1d(r) + return q, r + + +_poly_mat = re.compile(r"\*\*([0-9]*)") +def _raise_power(astr, wrap=70): + n = 0 + line1 = '' + line2 = '' + output = ' ' + while True: + mat = _poly_mat.search(astr, n) + if mat is None: + break + span = mat.span() + power = mat.groups()[0] + partstr = astr[n:span[0]] + n = span[1] + toadd2 = partstr + ' ' * (len(power) - 1) + toadd1 = ' ' * (len(partstr) - 1) + power + if ((len(line2) + len(toadd2) > wrap) or + (len(line1) + len(toadd1) > wrap)): + output += line1 + "\n" + line2 + "\n " + line1 = toadd1 + line2 = toadd2 + else: + line2 += partstr + ' ' * (len(power) - 1) + line1 += ' ' * (len(partstr) - 1) + power + output += line1 + "\n" + line2 + return output + astr[n:] + + +@set_module('numpy') +class poly1d: + """ + A one-dimensional polynomial class. + + .. note:: + This forms part of the old polynomial API. Since version 1.4, the + new polynomial API defined in `numpy.polynomial` is preferred. + A summary of the differences can be found in the + :doc:`transition guide `. + + A convenience class, used to encapsulate "natural" operations on + polynomials so that said operations may take on their customary + form in code (see Examples). + + Parameters + ---------- + c_or_r : array_like + The polynomial's coefficients, in decreasing powers, or if + the value of the second parameter is True, the polynomial's + roots (values where the polynomial evaluates to 0). For example, + ``poly1d([1, 2, 3])`` returns an object that represents + :math:`x^2 + 2x + 3`, whereas ``poly1d([1, 2, 3], True)`` returns + one that represents :math:`(x-1)(x-2)(x-3) = x^3 - 6x^2 + 11x -6`. + r : bool, optional + If True, `c_or_r` specifies the polynomial's roots; the default + is False. + variable : str, optional + Changes the variable used when printing `p` from `x` to `variable` + (see Examples). + + Examples + -------- + >>> import numpy as np + + Construct the polynomial :math:`x^2 + 2x + 3`: + + >>> import numpy as np + + >>> p = np.poly1d([1, 2, 3]) + >>> print(np.poly1d(p)) + 2 + 1 x + 2 x + 3 + + Evaluate the polynomial at :math:`x = 0.5`: + + >>> p(0.5) + 4.25 + + Find the roots: + + >>> p.r + array([-1.+1.41421356j, -1.-1.41421356j]) + >>> p(p.r) + array([ -4.44089210e-16+0.j, -4.44089210e-16+0.j]) # may vary + + These numbers in the previous line represent (0, 0) to machine precision + + Show the coefficients: + + >>> p.c + array([1, 2, 3]) + + Display the order (the leading zero-coefficients are removed): + + >>> p.order + 2 + + Show the coefficient of the k-th power in the polynomial + (which is equivalent to ``p.c[-(i+1)]``): + + >>> p[1] + 2 + + Polynomials can be added, subtracted, multiplied, and divided + (returns quotient and remainder): + + >>> p * p + poly1d([ 1, 4, 10, 12, 9]) + + >>> (p**3 + 4) / p + (poly1d([ 1., 4., 10., 12., 9.]), poly1d([4.])) + + ``asarray(p)`` gives the coefficient array, so polynomials can be + used in all functions that accept arrays: + + >>> p**2 # square of polynomial + poly1d([ 1, 4, 10, 12, 9]) + + >>> np.square(p) # square of individual coefficients + array([1, 4, 9]) + + The variable used in the string representation of `p` can be modified, + using the `variable` parameter: + + >>> p = np.poly1d([1,2,3], variable='z') + >>> print(p) + 2 + 1 z + 2 z + 3 + + Construct a polynomial from its roots: + + >>> np.poly1d([1, 2], True) + poly1d([ 1., -3., 2.]) + + This is the same polynomial as obtained by: + + >>> np.poly1d([1, -1]) * np.poly1d([1, -2]) + poly1d([ 1, -3, 2]) + + """ + __hash__ = None + + @property + def coeffs(self): + """ The polynomial coefficients """ + return self._coeffs + + @coeffs.setter + def coeffs(self, value): + # allowing this makes p.coeffs *= 2 legal + if value is not self._coeffs: + raise AttributeError("Cannot set attribute") + + @property + def variable(self): + """ The name of the polynomial variable """ + return self._variable + + # calculated attributes + @property + def order(self): + """ The order or degree of the polynomial """ + return len(self._coeffs) - 1 + + @property + def roots(self): + """ The roots of the polynomial, where self(x) == 0 """ + return roots(self._coeffs) + + # our internal _coeffs property need to be backed by __dict__['coeffs'] for + # scipy to work correctly. + @property + def _coeffs(self): + return self.__dict__['coeffs'] + + @_coeffs.setter + def _coeffs(self, coeffs): + self.__dict__['coeffs'] = coeffs + + # alias attributes + r = roots + c = coef = coefficients = coeffs + o = order + + def __init__(self, c_or_r, r=False, variable=None): + if isinstance(c_or_r, poly1d): + self._variable = c_or_r._variable + self._coeffs = c_or_r._coeffs + + if set(c_or_r.__dict__) - set(self.__dict__): + msg = ("In the future extra properties will not be copied " + "across when constructing one poly1d from another") + warnings.warn(msg, FutureWarning, stacklevel=2) + self.__dict__.update(c_or_r.__dict__) + + if variable is not None: + self._variable = variable + return + if r: + c_or_r = poly(c_or_r) + c_or_r = atleast_1d(c_or_r) + if c_or_r.ndim > 1: + raise ValueError("Polynomial must be 1d only.") + c_or_r = trim_zeros(c_or_r, trim='f') + if len(c_or_r) == 0: + c_or_r = NX.array([0], dtype=c_or_r.dtype) + self._coeffs = c_or_r + if variable is None: + variable = 'x' + self._variable = variable + + def __array__(self, t=None, copy=None): + if t: + return NX.asarray(self.coeffs, t, copy=copy) + else: + return NX.asarray(self.coeffs, copy=copy) + + def __repr__(self): + vals = repr(self.coeffs) + vals = vals[6:-1] + return f"poly1d({vals})" + + def __len__(self): + return self.order + + def __str__(self): + thestr = "0" + var = self.variable + + # Remove leading zeros + coeffs = self.coeffs[NX.logical_or.accumulate(self.coeffs != 0)] + N = len(coeffs) - 1 + + def fmt_float(q): + s = f'{q:.4g}' + s = s.removesuffix('.0000') + return s + + for k, coeff in enumerate(coeffs): + if not iscomplex(coeff): + coefstr = fmt_float(real(coeff)) + elif real(coeff) == 0: + coefstr = f'{fmt_float(imag(coeff))}j' + else: + coefstr = f'({fmt_float(real(coeff))} + {fmt_float(imag(coeff))}j)' + + power = (N - k) + if power == 0: + if coefstr != '0': + newstr = f'{coefstr}' + elif k == 0: + newstr = '0' + else: + newstr = '' + elif power == 1: + if coefstr == '0': + newstr = '' + elif coefstr == 'b': + newstr = var + else: + newstr = f'{coefstr} {var}' + elif coefstr == '0': + newstr = '' + elif coefstr == 'b': + newstr = '%s**%d' % (var, power,) + else: + newstr = '%s %s**%d' % (coefstr, var, power) + + if k > 0: + if newstr != '': + if newstr.startswith('-'): + thestr = f"{thestr} - {newstr[1:]}" + else: + thestr = f"{thestr} + {newstr}" + else: + thestr = newstr + return _raise_power(thestr) + + def __call__(self, val): + return polyval(self.coeffs, val) + + def __neg__(self): + return poly1d(-self.coeffs) + + def __pos__(self): + return self + + def __mul__(self, other): + if isscalar(other): + return poly1d(self.coeffs * other) + else: + other = poly1d(other) + return poly1d(polymul(self.coeffs, other.coeffs)) + + def __rmul__(self, other): + if isscalar(other): + return poly1d(other * self.coeffs) + else: + other = poly1d(other) + return poly1d(polymul(self.coeffs, other.coeffs)) + + def __add__(self, other): + other = poly1d(other) + return poly1d(polyadd(self.coeffs, other.coeffs)) + + def __radd__(self, other): + other = poly1d(other) + return poly1d(polyadd(self.coeffs, other.coeffs)) + + def __pow__(self, val): + if not isscalar(val) or int(val) != val or val < 0: + raise ValueError("Power to non-negative integers only.") + res = [1] + for _ in range(val): + res = polymul(self.coeffs, res) + return poly1d(res) + + def __sub__(self, other): + other = poly1d(other) + return poly1d(polysub(self.coeffs, other.coeffs)) + + def __rsub__(self, other): + other = poly1d(other) + return poly1d(polysub(other.coeffs, self.coeffs)) + + def __truediv__(self, other): + if isscalar(other): + return poly1d(self.coeffs / other) + else: + other = poly1d(other) + return polydiv(self, other) + + def __rtruediv__(self, other): + if isscalar(other): + return poly1d(other / self.coeffs) + else: + other = poly1d(other) + return polydiv(other, self) + + def __eq__(self, other): + if not isinstance(other, poly1d): + return NotImplemented + if self.coeffs.shape != other.coeffs.shape: + return False + return (self.coeffs == other.coeffs).all() + + def __ne__(self, other): + if not isinstance(other, poly1d): + return NotImplemented + return not self.__eq__(other) + + def __getitem__(self, val): + ind = self.order - val + if val > self.order: + return self.coeffs.dtype.type(0) + if val < 0: + return self.coeffs.dtype.type(0) + return self.coeffs[ind] + + def __setitem__(self, key, val): + ind = self.order - key + if key < 0: + raise ValueError("Does not support negative powers.") + if key > self.order: + zr = NX.zeros(key - self.order, self.coeffs.dtype) + self._coeffs = NX.concatenate((zr, self.coeffs)) + ind = 0 + self._coeffs[ind] = val + + def __iter__(self): + return iter(self.coeffs) + + def integ(self, m=1, k=0): + """ + Return an antiderivative (indefinite integral) of this polynomial. + + Refer to `polyint` for full documentation. + + See Also + -------- + polyint : equivalent function + + """ + return poly1d(polyint(self.coeffs, m=m, k=k)) + + def deriv(self, m=1): + """ + Return a derivative of this polynomial. + + Refer to `polyder` for full documentation. + + See Also + -------- + polyder : equivalent function + + """ + return poly1d(polyder(self.coeffs, m=m)) + +# Stuff to do on module import + + +warnings.simplefilter('always', RankWarning) diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/_polynomial_impl.pyi b/.venv/lib/python3.12/site-packages/numpy/lib/_polynomial_impl.pyi new file mode 100644 index 0000000..faf2f01 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/lib/_polynomial_impl.pyi @@ -0,0 +1,316 @@ +from typing import ( + Any, + NoReturn, + SupportsIndex, + SupportsInt, + TypeAlias, + TypeVar, + overload, +) +from typing import ( + Literal as L, +) + +import numpy as np +from numpy import ( + complex128, + complexfloating, + float64, + floating, + int32, + int64, + object_, + poly1d, + signedinteger, + unsignedinteger, +) +from numpy._typing import ( + ArrayLike, + NDArray, + _ArrayLikeBool_co, + _ArrayLikeComplex_co, + _ArrayLikeFloat_co, + _ArrayLikeInt_co, + _ArrayLikeObject_co, + _ArrayLikeUInt_co, +) + +_T = TypeVar("_T") + +_2Tup: TypeAlias = tuple[_T, _T] +_5Tup: TypeAlias = tuple[ + _T, + NDArray[float64], + NDArray[int32], + NDArray[float64], + NDArray[float64], +] + +__all__ = [ + "poly", + "roots", + "polyint", + "polyder", + "polyadd", + "polysub", + "polymul", + "polydiv", + "polyval", + "poly1d", + "polyfit", +] + +def poly(seq_of_zeros: ArrayLike) -> NDArray[floating]: ... + +# Returns either a float or complex array depending on the input values. +# See `np.linalg.eigvals`. +def roots(p: ArrayLike) -> NDArray[complexfloating] | NDArray[floating]: ... + +@overload +def polyint( + p: poly1d, + m: SupportsInt | SupportsIndex = ..., + k: _ArrayLikeComplex_co | _ArrayLikeObject_co | None = ..., +) -> poly1d: ... +@overload +def polyint( + p: _ArrayLikeFloat_co, + m: SupportsInt | SupportsIndex = ..., + k: _ArrayLikeFloat_co | None = ..., +) -> NDArray[floating]: ... +@overload +def polyint( + p: _ArrayLikeComplex_co, + m: SupportsInt | SupportsIndex = ..., + k: _ArrayLikeComplex_co | None = ..., +) -> NDArray[complexfloating]: ... +@overload +def polyint( + p: _ArrayLikeObject_co, + m: SupportsInt | SupportsIndex = ..., + k: _ArrayLikeObject_co | None = ..., +) -> NDArray[object_]: ... + +@overload +def polyder( + p: poly1d, + m: SupportsInt | SupportsIndex = ..., +) -> poly1d: ... +@overload +def polyder( + p: _ArrayLikeFloat_co, + m: SupportsInt | SupportsIndex = ..., +) -> NDArray[floating]: ... +@overload +def polyder( + p: _ArrayLikeComplex_co, + m: SupportsInt | SupportsIndex = ..., +) -> NDArray[complexfloating]: ... +@overload +def polyder( + p: _ArrayLikeObject_co, + m: SupportsInt | SupportsIndex = ..., +) -> NDArray[object_]: ... + +@overload +def polyfit( + x: _ArrayLikeFloat_co, + y: _ArrayLikeFloat_co, + deg: SupportsIndex | SupportsInt, + rcond: float | None = ..., + full: L[False] = ..., + w: _ArrayLikeFloat_co | None = ..., + cov: L[False] = ..., +) -> NDArray[float64]: ... +@overload +def polyfit( + x: _ArrayLikeComplex_co, + y: _ArrayLikeComplex_co, + deg: SupportsIndex | SupportsInt, + rcond: float | None = ..., + full: L[False] = ..., + w: _ArrayLikeFloat_co | None = ..., + cov: L[False] = ..., +) -> NDArray[complex128]: ... +@overload +def polyfit( + x: _ArrayLikeFloat_co, + y: _ArrayLikeFloat_co, + deg: SupportsIndex | SupportsInt, + rcond: float | None = ..., + full: L[False] = ..., + w: _ArrayLikeFloat_co | None = ..., + cov: L[True, "unscaled"] = ..., +) -> _2Tup[NDArray[float64]]: ... +@overload +def polyfit( + x: _ArrayLikeComplex_co, + y: _ArrayLikeComplex_co, + deg: SupportsIndex | SupportsInt, + rcond: float | None = ..., + full: L[False] = ..., + w: _ArrayLikeFloat_co | None = ..., + cov: L[True, "unscaled"] = ..., +) -> _2Tup[NDArray[complex128]]: ... +@overload +def polyfit( + x: _ArrayLikeFloat_co, + y: _ArrayLikeFloat_co, + deg: SupportsIndex | SupportsInt, + rcond: float | None = ..., + full: L[True] = ..., + w: _ArrayLikeFloat_co | None = ..., + cov: bool | L["unscaled"] = ..., +) -> _5Tup[NDArray[float64]]: ... +@overload +def polyfit( + x: _ArrayLikeComplex_co, + y: _ArrayLikeComplex_co, + deg: SupportsIndex | SupportsInt, + rcond: float | None = ..., + full: L[True] = ..., + w: _ArrayLikeFloat_co | None = ..., + cov: bool | L["unscaled"] = ..., +) -> _5Tup[NDArray[complex128]]: ... + +@overload +def polyval( + p: _ArrayLikeBool_co, + x: _ArrayLikeBool_co, +) -> NDArray[int64]: ... +@overload +def polyval( + p: _ArrayLikeUInt_co, + x: _ArrayLikeUInt_co, +) -> NDArray[unsignedinteger]: ... +@overload +def polyval( + p: _ArrayLikeInt_co, + x: _ArrayLikeInt_co, +) -> NDArray[signedinteger]: ... +@overload +def polyval( + p: _ArrayLikeFloat_co, + x: _ArrayLikeFloat_co, +) -> NDArray[floating]: ... +@overload +def polyval( + p: _ArrayLikeComplex_co, + x: _ArrayLikeComplex_co, +) -> NDArray[complexfloating]: ... +@overload +def polyval( + p: _ArrayLikeObject_co, + x: _ArrayLikeObject_co, +) -> NDArray[object_]: ... + +@overload +def polyadd( + a1: poly1d, + a2: _ArrayLikeComplex_co | _ArrayLikeObject_co, +) -> poly1d: ... +@overload +def polyadd( + a1: _ArrayLikeComplex_co | _ArrayLikeObject_co, + a2: poly1d, +) -> poly1d: ... +@overload +def polyadd( + a1: _ArrayLikeBool_co, + a2: _ArrayLikeBool_co, +) -> NDArray[np.bool]: ... +@overload +def polyadd( + a1: _ArrayLikeUInt_co, + a2: _ArrayLikeUInt_co, +) -> NDArray[unsignedinteger]: ... +@overload +def polyadd( + a1: _ArrayLikeInt_co, + a2: _ArrayLikeInt_co, +) -> NDArray[signedinteger]: ... +@overload +def polyadd( + a1: _ArrayLikeFloat_co, + a2: _ArrayLikeFloat_co, +) -> NDArray[floating]: ... +@overload +def polyadd( + a1: _ArrayLikeComplex_co, + a2: _ArrayLikeComplex_co, +) -> NDArray[complexfloating]: ... +@overload +def polyadd( + a1: _ArrayLikeObject_co, + a2: _ArrayLikeObject_co, +) -> NDArray[object_]: ... + +@overload +def polysub( + a1: poly1d, + a2: _ArrayLikeComplex_co | _ArrayLikeObject_co, +) -> poly1d: ... +@overload +def polysub( + a1: _ArrayLikeComplex_co | _ArrayLikeObject_co, + a2: poly1d, +) -> poly1d: ... +@overload +def polysub( + a1: _ArrayLikeBool_co, + a2: _ArrayLikeBool_co, +) -> NoReturn: ... +@overload +def polysub( + a1: _ArrayLikeUInt_co, + a2: _ArrayLikeUInt_co, +) -> NDArray[unsignedinteger]: ... +@overload +def polysub( + a1: _ArrayLikeInt_co, + a2: _ArrayLikeInt_co, +) -> NDArray[signedinteger]: ... +@overload +def polysub( + a1: _ArrayLikeFloat_co, + a2: _ArrayLikeFloat_co, +) -> NDArray[floating]: ... +@overload +def polysub( + a1: _ArrayLikeComplex_co, + a2: _ArrayLikeComplex_co, +) -> NDArray[complexfloating]: ... +@overload +def polysub( + a1: _ArrayLikeObject_co, + a2: _ArrayLikeObject_co, +) -> NDArray[object_]: ... + +# NOTE: Not an alias, but they do have the same signature (that we can reuse) +polymul = polyadd + +@overload +def polydiv( + u: poly1d, + v: _ArrayLikeComplex_co | _ArrayLikeObject_co, +) -> _2Tup[poly1d]: ... +@overload +def polydiv( + u: _ArrayLikeComplex_co | _ArrayLikeObject_co, + v: poly1d, +) -> _2Tup[poly1d]: ... +@overload +def polydiv( + u: _ArrayLikeFloat_co, + v: _ArrayLikeFloat_co, +) -> _2Tup[NDArray[floating]]: ... +@overload +def polydiv( + u: _ArrayLikeComplex_co, + v: _ArrayLikeComplex_co, +) -> _2Tup[NDArray[complexfloating]]: ... +@overload +def polydiv( + u: _ArrayLikeObject_co, + v: _ArrayLikeObject_co, +) -> _2Tup[NDArray[Any]]: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/_scimath_impl.py b/.venv/lib/python3.12/site-packages/numpy/lib/_scimath_impl.py new file mode 100644 index 0000000..8136a7d --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/lib/_scimath_impl.py @@ -0,0 +1,642 @@ +""" +Wrapper functions to more user-friendly calling of certain math functions +whose output data-type is different than the input data-type in certain +domains of the input. + +For example, for functions like `log` with branch cuts, the versions in this +module provide the mathematically valid answers in the complex plane:: + + >>> import math + >>> np.emath.log(-math.exp(1)) == (1+1j*math.pi) + True + +Similarly, `sqrt`, other base logarithms, `power` and trig functions are +correctly handled. See their respective docstrings for specific examples. + +""" +import numpy._core.numeric as nx +import numpy._core.numerictypes as nt +from numpy._core.numeric import any, asarray +from numpy._core.overrides import array_function_dispatch, set_module +from numpy.lib._type_check_impl import isreal + +__all__ = [ + 'sqrt', 'log', 'log2', 'logn', 'log10', 'power', 'arccos', 'arcsin', + 'arctanh' + ] + + +_ln2 = nx.log(2.0) + + +def _tocomplex(arr): + """Convert its input `arr` to a complex array. + + The input is returned as a complex array of the smallest type that will fit + the original data: types like single, byte, short, etc. become csingle, + while others become cdouble. + + A copy of the input is always made. + + Parameters + ---------- + arr : array + + Returns + ------- + array + An array with the same input data as the input but in complex form. + + Examples + -------- + >>> import numpy as np + + First, consider an input of type short: + + >>> a = np.array([1,2,3],np.short) + + >>> ac = np.lib.scimath._tocomplex(a); ac + array([1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64) + + >>> ac.dtype + dtype('complex64') + + If the input is of type double, the output is correspondingly of the + complex double type as well: + + >>> b = np.array([1,2,3],np.double) + + >>> bc = np.lib.scimath._tocomplex(b); bc + array([1.+0.j, 2.+0.j, 3.+0.j]) + + >>> bc.dtype + dtype('complex128') + + Note that even if the input was complex to begin with, a copy is still + made, since the astype() method always copies: + + >>> c = np.array([1,2,3],np.csingle) + + >>> cc = np.lib.scimath._tocomplex(c); cc + array([1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64) + + >>> c *= 2; c + array([2.+0.j, 4.+0.j, 6.+0.j], dtype=complex64) + + >>> cc + array([1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64) + """ + if issubclass(arr.dtype.type, (nt.single, nt.byte, nt.short, nt.ubyte, + nt.ushort, nt.csingle)): + return arr.astype(nt.csingle) + else: + return arr.astype(nt.cdouble) + + +def _fix_real_lt_zero(x): + """Convert `x` to complex if it has real, negative components. + + Otherwise, output is just the array version of the input (via asarray). + + Parameters + ---------- + x : array_like + + Returns + ------- + array + + Examples + -------- + >>> import numpy as np + >>> np.lib.scimath._fix_real_lt_zero([1,2]) + array([1, 2]) + + >>> np.lib.scimath._fix_real_lt_zero([-1,2]) + array([-1.+0.j, 2.+0.j]) + + """ + x = asarray(x) + if any(isreal(x) & (x < 0)): + x = _tocomplex(x) + return x + + +def _fix_int_lt_zero(x): + """Convert `x` to double if it has real, negative components. + + Otherwise, output is just the array version of the input (via asarray). + + Parameters + ---------- + x : array_like + + Returns + ------- + array + + Examples + -------- + >>> import numpy as np + >>> np.lib.scimath._fix_int_lt_zero([1,2]) + array([1, 2]) + + >>> np.lib.scimath._fix_int_lt_zero([-1,2]) + array([-1., 2.]) + """ + x = asarray(x) + if any(isreal(x) & (x < 0)): + x = x * 1.0 + return x + + +def _fix_real_abs_gt_1(x): + """Convert `x` to complex if it has real components x_i with abs(x_i)>1. + + Otherwise, output is just the array version of the input (via asarray). + + Parameters + ---------- + x : array_like + + Returns + ------- + array + + Examples + -------- + >>> import numpy as np + >>> np.lib.scimath._fix_real_abs_gt_1([0,1]) + array([0, 1]) + + >>> np.lib.scimath._fix_real_abs_gt_1([0,2]) + array([0.+0.j, 2.+0.j]) + """ + x = asarray(x) + if any(isreal(x) & (abs(x) > 1)): + x = _tocomplex(x) + return x + + +def _unary_dispatcher(x): + return (x,) + + +@set_module('numpy.lib.scimath') +@array_function_dispatch(_unary_dispatcher) +def sqrt(x): + """ + Compute the square root of x. + + For negative input elements, a complex value is returned + (unlike `numpy.sqrt` which returns NaN). + + Parameters + ---------- + x : array_like + The input value(s). + + Returns + ------- + out : ndarray or scalar + The square root of `x`. If `x` was a scalar, so is `out`, + otherwise an array is returned. + + See Also + -------- + numpy.sqrt + + Examples + -------- + For real, non-negative inputs this works just like `numpy.sqrt`: + + >>> import numpy as np + + >>> np.emath.sqrt(1) + 1.0 + >>> np.emath.sqrt([1, 4]) + array([1., 2.]) + + But it automatically handles negative inputs: + + >>> np.emath.sqrt(-1) + 1j + >>> np.emath.sqrt([-1,4]) + array([0.+1.j, 2.+0.j]) + + Different results are expected because: + floating point 0.0 and -0.0 are distinct. + + For more control, explicitly use complex() as follows: + + >>> np.emath.sqrt(complex(-4.0, 0.0)) + 2j + >>> np.emath.sqrt(complex(-4.0, -0.0)) + -2j + """ + x = _fix_real_lt_zero(x) + return nx.sqrt(x) + + +@set_module('numpy.lib.scimath') +@array_function_dispatch(_unary_dispatcher) +def log(x): + """ + Compute the natural logarithm of `x`. + + Return the "principal value" (for a description of this, see `numpy.log`) + of :math:`log_e(x)`. For real `x > 0`, this is a real number (``log(0)`` + returns ``-inf`` and ``log(np.inf)`` returns ``inf``). Otherwise, the + complex principle value is returned. + + Parameters + ---------- + x : array_like + The value(s) whose log is (are) required. + + Returns + ------- + out : ndarray or scalar + The log of the `x` value(s). If `x` was a scalar, so is `out`, + otherwise an array is returned. + + See Also + -------- + numpy.log + + Notes + ----- + For a log() that returns ``NAN`` when real `x < 0`, use `numpy.log` + (note, however, that otherwise `numpy.log` and this `log` are identical, + i.e., both return ``-inf`` for `x = 0`, ``inf`` for `x = inf`, and, + notably, the complex principle value if ``x.imag != 0``). + + Examples + -------- + >>> import numpy as np + >>> np.emath.log(np.exp(1)) + 1.0 + + Negative arguments are handled "correctly" (recall that + ``exp(log(x)) == x`` does *not* hold for real ``x < 0``): + + >>> np.emath.log(-np.exp(1)) == (1 + np.pi * 1j) + True + + """ + x = _fix_real_lt_zero(x) + return nx.log(x) + + +@set_module('numpy.lib.scimath') +@array_function_dispatch(_unary_dispatcher) +def log10(x): + """ + Compute the logarithm base 10 of `x`. + + Return the "principal value" (for a description of this, see + `numpy.log10`) of :math:`log_{10}(x)`. For real `x > 0`, this + is a real number (``log10(0)`` returns ``-inf`` and ``log10(np.inf)`` + returns ``inf``). Otherwise, the complex principle value is returned. + + Parameters + ---------- + x : array_like or scalar + The value(s) whose log base 10 is (are) required. + + Returns + ------- + out : ndarray or scalar + The log base 10 of the `x` value(s). If `x` was a scalar, so is `out`, + otherwise an array object is returned. + + See Also + -------- + numpy.log10 + + Notes + ----- + For a log10() that returns ``NAN`` when real `x < 0`, use `numpy.log10` + (note, however, that otherwise `numpy.log10` and this `log10` are + identical, i.e., both return ``-inf`` for `x = 0`, ``inf`` for `x = inf`, + and, notably, the complex principle value if ``x.imag != 0``). + + Examples + -------- + >>> import numpy as np + + (We set the printing precision so the example can be auto-tested) + + >>> np.set_printoptions(precision=4) + + >>> np.emath.log10(10**1) + 1.0 + + >>> np.emath.log10([-10**1, -10**2, 10**2]) + array([1.+1.3644j, 2.+1.3644j, 2.+0.j ]) + + """ + x = _fix_real_lt_zero(x) + return nx.log10(x) + + +def _logn_dispatcher(n, x): + return (n, x,) + + +@set_module('numpy.lib.scimath') +@array_function_dispatch(_logn_dispatcher) +def logn(n, x): + """ + Take log base n of x. + + If `x` contains negative inputs, the answer is computed and returned in the + complex domain. + + Parameters + ---------- + n : array_like + The integer base(s) in which the log is taken. + x : array_like + The value(s) whose log base `n` is (are) required. + + Returns + ------- + out : ndarray or scalar + The log base `n` of the `x` value(s). If `x` was a scalar, so is + `out`, otherwise an array is returned. + + Examples + -------- + >>> import numpy as np + >>> np.set_printoptions(precision=4) + + >>> np.emath.logn(2, [4, 8]) + array([2., 3.]) + >>> np.emath.logn(2, [-4, -8, 8]) + array([2.+4.5324j, 3.+4.5324j, 3.+0.j ]) + + """ + x = _fix_real_lt_zero(x) + n = _fix_real_lt_zero(n) + return nx.log(x) / nx.log(n) + + +@set_module('numpy.lib.scimath') +@array_function_dispatch(_unary_dispatcher) +def log2(x): + """ + Compute the logarithm base 2 of `x`. + + Return the "principal value" (for a description of this, see + `numpy.log2`) of :math:`log_2(x)`. For real `x > 0`, this is + a real number (``log2(0)`` returns ``-inf`` and ``log2(np.inf)`` returns + ``inf``). Otherwise, the complex principle value is returned. + + Parameters + ---------- + x : array_like + The value(s) whose log base 2 is (are) required. + + Returns + ------- + out : ndarray or scalar + The log base 2 of the `x` value(s). If `x` was a scalar, so is `out`, + otherwise an array is returned. + + See Also + -------- + numpy.log2 + + Notes + ----- + For a log2() that returns ``NAN`` when real `x < 0`, use `numpy.log2` + (note, however, that otherwise `numpy.log2` and this `log2` are + identical, i.e., both return ``-inf`` for `x = 0`, ``inf`` for `x = inf`, + and, notably, the complex principle value if ``x.imag != 0``). + + Examples + -------- + + We set the printing precision so the example can be auto-tested: + + >>> np.set_printoptions(precision=4) + + >>> np.emath.log2(8) + 3.0 + >>> np.emath.log2([-4, -8, 8]) + array([2.+4.5324j, 3.+4.5324j, 3.+0.j ]) + + """ + x = _fix_real_lt_zero(x) + return nx.log2(x) + + +def _power_dispatcher(x, p): + return (x, p) + + +@set_module('numpy.lib.scimath') +@array_function_dispatch(_power_dispatcher) +def power(x, p): + """ + Return x to the power p, (x**p). + + If `x` contains negative values, the output is converted to the + complex domain. + + Parameters + ---------- + x : array_like + The input value(s). + p : array_like of ints + The power(s) to which `x` is raised. If `x` contains multiple values, + `p` has to either be a scalar, or contain the same number of values + as `x`. In the latter case, the result is + ``x[0]**p[0], x[1]**p[1], ...``. + + Returns + ------- + out : ndarray or scalar + The result of ``x**p``. If `x` and `p` are scalars, so is `out`, + otherwise an array is returned. + + See Also + -------- + numpy.power + + Examples + -------- + >>> import numpy as np + >>> np.set_printoptions(precision=4) + + >>> np.emath.power(2, 2) + 4 + + >>> np.emath.power([2, 4], 2) + array([ 4, 16]) + + >>> np.emath.power([2, 4], -2) + array([0.25 , 0.0625]) + + >>> np.emath.power([-2, 4], 2) + array([ 4.-0.j, 16.+0.j]) + + >>> np.emath.power([2, 4], [2, 4]) + array([ 4, 256]) + + """ + x = _fix_real_lt_zero(x) + p = _fix_int_lt_zero(p) + return nx.power(x, p) + + +@set_module('numpy.lib.scimath') +@array_function_dispatch(_unary_dispatcher) +def arccos(x): + """ + Compute the inverse cosine of x. + + Return the "principal value" (for a description of this, see + `numpy.arccos`) of the inverse cosine of `x`. For real `x` such that + `abs(x) <= 1`, this is a real number in the closed interval + :math:`[0, \\pi]`. Otherwise, the complex principle value is returned. + + Parameters + ---------- + x : array_like or scalar + The value(s) whose arccos is (are) required. + + Returns + ------- + out : ndarray or scalar + The inverse cosine(s) of the `x` value(s). If `x` was a scalar, so + is `out`, otherwise an array object is returned. + + See Also + -------- + numpy.arccos + + Notes + ----- + For an arccos() that returns ``NAN`` when real `x` is not in the + interval ``[-1,1]``, use `numpy.arccos`. + + Examples + -------- + >>> import numpy as np + >>> np.set_printoptions(precision=4) + + >>> np.emath.arccos(1) # a scalar is returned + 0.0 + + >>> np.emath.arccos([1,2]) + array([0.-0.j , 0.-1.317j]) + + """ + x = _fix_real_abs_gt_1(x) + return nx.arccos(x) + + +@set_module('numpy.lib.scimath') +@array_function_dispatch(_unary_dispatcher) +def arcsin(x): + """ + Compute the inverse sine of x. + + Return the "principal value" (for a description of this, see + `numpy.arcsin`) of the inverse sine of `x`. For real `x` such that + `abs(x) <= 1`, this is a real number in the closed interval + :math:`[-\\pi/2, \\pi/2]`. Otherwise, the complex principle value is + returned. + + Parameters + ---------- + x : array_like or scalar + The value(s) whose arcsin is (are) required. + + Returns + ------- + out : ndarray or scalar + The inverse sine(s) of the `x` value(s). If `x` was a scalar, so + is `out`, otherwise an array object is returned. + + See Also + -------- + numpy.arcsin + + Notes + ----- + For an arcsin() that returns ``NAN`` when real `x` is not in the + interval ``[-1,1]``, use `numpy.arcsin`. + + Examples + -------- + >>> import numpy as np + >>> np.set_printoptions(precision=4) + + >>> np.emath.arcsin(0) + 0.0 + + >>> np.emath.arcsin([0,1]) + array([0. , 1.5708]) + + """ + x = _fix_real_abs_gt_1(x) + return nx.arcsin(x) + + +@set_module('numpy.lib.scimath') +@array_function_dispatch(_unary_dispatcher) +def arctanh(x): + """ + Compute the inverse hyperbolic tangent of `x`. + + Return the "principal value" (for a description of this, see + `numpy.arctanh`) of ``arctanh(x)``. For real `x` such that + ``abs(x) < 1``, this is a real number. If `abs(x) > 1`, or if `x` is + complex, the result is complex. Finally, `x = 1` returns``inf`` and + ``x=-1`` returns ``-inf``. + + Parameters + ---------- + x : array_like + The value(s) whose arctanh is (are) required. + + Returns + ------- + out : ndarray or scalar + The inverse hyperbolic tangent(s) of the `x` value(s). If `x` was + a scalar so is `out`, otherwise an array is returned. + + + See Also + -------- + numpy.arctanh + + Notes + ----- + For an arctanh() that returns ``NAN`` when real `x` is not in the + interval ``(-1,1)``, use `numpy.arctanh` (this latter, however, does + return +/-inf for ``x = +/-1``). + + Examples + -------- + >>> import numpy as np + >>> np.set_printoptions(precision=4) + + >>> np.emath.arctanh(0.5) + 0.5493061443340549 + + >>> from numpy.testing import suppress_warnings + >>> with suppress_warnings() as sup: + ... sup.filter(RuntimeWarning) + ... np.emath.arctanh(np.eye(2)) + array([[inf, 0.], + [ 0., inf]]) + >>> np.emath.arctanh([1j]) + array([0.+0.7854j]) + + """ + x = _fix_real_abs_gt_1(x) + return nx.arctanh(x) diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/_scimath_impl.pyi b/.venv/lib/python3.12/site-packages/numpy/lib/_scimath_impl.pyi new file mode 100644 index 0000000..e6390c2 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/lib/_scimath_impl.pyi @@ -0,0 +1,93 @@ +from typing import Any, overload + +from numpy import complexfloating +from numpy._typing import ( + NDArray, + _ArrayLikeComplex_co, + _ArrayLikeFloat_co, + _ComplexLike_co, + _FloatLike_co, +) + +__all__ = ["sqrt", "log", "log2", "logn", "log10", "power", "arccos", "arcsin", "arctanh"] + +@overload +def sqrt(x: _FloatLike_co) -> Any: ... +@overload +def sqrt(x: _ComplexLike_co) -> complexfloating: ... +@overload +def sqrt(x: _ArrayLikeFloat_co) -> NDArray[Any]: ... +@overload +def sqrt(x: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ... + +@overload +def log(x: _FloatLike_co) -> Any: ... +@overload +def log(x: _ComplexLike_co) -> complexfloating: ... +@overload +def log(x: _ArrayLikeFloat_co) -> NDArray[Any]: ... +@overload +def log(x: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ... + +@overload +def log10(x: _FloatLike_co) -> Any: ... +@overload +def log10(x: _ComplexLike_co) -> complexfloating: ... +@overload +def log10(x: _ArrayLikeFloat_co) -> NDArray[Any]: ... +@overload +def log10(x: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ... + +@overload +def log2(x: _FloatLike_co) -> Any: ... +@overload +def log2(x: _ComplexLike_co) -> complexfloating: ... +@overload +def log2(x: _ArrayLikeFloat_co) -> NDArray[Any]: ... +@overload +def log2(x: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ... + +@overload +def logn(n: _FloatLike_co, x: _FloatLike_co) -> Any: ... +@overload +def logn(n: _ComplexLike_co, x: _ComplexLike_co) -> complexfloating: ... +@overload +def logn(n: _ArrayLikeFloat_co, x: _ArrayLikeFloat_co) -> NDArray[Any]: ... +@overload +def logn(n: _ArrayLikeComplex_co, x: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ... + +@overload +def power(x: _FloatLike_co, p: _FloatLike_co) -> Any: ... +@overload +def power(x: _ComplexLike_co, p: _ComplexLike_co) -> complexfloating: ... +@overload +def power(x: _ArrayLikeFloat_co, p: _ArrayLikeFloat_co) -> NDArray[Any]: ... +@overload +def power(x: _ArrayLikeComplex_co, p: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ... + +@overload +def arccos(x: _FloatLike_co) -> Any: ... +@overload +def arccos(x: _ComplexLike_co) -> complexfloating: ... +@overload +def arccos(x: _ArrayLikeFloat_co) -> NDArray[Any]: ... +@overload +def arccos(x: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ... + +@overload +def arcsin(x: _FloatLike_co) -> Any: ... +@overload +def arcsin(x: _ComplexLike_co) -> complexfloating: ... +@overload +def arcsin(x: _ArrayLikeFloat_co) -> NDArray[Any]: ... +@overload +def arcsin(x: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ... + +@overload +def arctanh(x: _FloatLike_co) -> Any: ... +@overload +def arctanh(x: _ComplexLike_co) -> complexfloating: ... +@overload +def arctanh(x: _ArrayLikeFloat_co) -> NDArray[Any]: ... +@overload +def arctanh(x: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/_shape_base_impl.py b/.venv/lib/python3.12/site-packages/numpy/lib/_shape_base_impl.py new file mode 100644 index 0000000..89b86c8 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/lib/_shape_base_impl.py @@ -0,0 +1,1301 @@ +import functools +import warnings + +import numpy._core.numeric as _nx +from numpy._core import atleast_3d, overrides, vstack +from numpy._core._multiarray_umath import _array_converter +from numpy._core.fromnumeric import reshape, transpose +from numpy._core.multiarray import normalize_axis_index +from numpy._core.numeric import ( + array, + asanyarray, + asarray, + normalize_axis_tuple, + zeros, + zeros_like, +) +from numpy._core.overrides import set_module +from numpy._core.shape_base import _arrays_for_stack_dispatcher +from numpy.lib._index_tricks_impl import ndindex +from numpy.matrixlib.defmatrix import matrix # this raises all the right alarm bells + +__all__ = [ + 'column_stack', 'row_stack', 'dstack', 'array_split', 'split', + 'hsplit', 'vsplit', 'dsplit', 'apply_over_axes', 'expand_dims', + 'apply_along_axis', 'kron', 'tile', 'take_along_axis', + 'put_along_axis' + ] + + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy') + + +def _make_along_axis_idx(arr_shape, indices, axis): + # compute dimensions to iterate over + if not _nx.issubdtype(indices.dtype, _nx.integer): + raise IndexError('`indices` must be an integer array') + if len(arr_shape) != indices.ndim: + raise ValueError( + "`indices` and `arr` must have the same number of dimensions") + shape_ones = (1,) * indices.ndim + dest_dims = list(range(axis)) + [None] + list(range(axis + 1, indices.ndim)) + + # build a fancy index, consisting of orthogonal aranges, with the + # requested index inserted at the right location + fancy_index = [] + for dim, n in zip(dest_dims, arr_shape): + if dim is None: + fancy_index.append(indices) + else: + ind_shape = shape_ones[:dim] + (-1,) + shape_ones[dim + 1:] + fancy_index.append(_nx.arange(n).reshape(ind_shape)) + + return tuple(fancy_index) + + +def _take_along_axis_dispatcher(arr, indices, axis=None): + return (arr, indices) + + +@array_function_dispatch(_take_along_axis_dispatcher) +def take_along_axis(arr, indices, axis=-1): + """ + Take values from the input array by matching 1d index and data slices. + + This iterates over matching 1d slices oriented along the specified axis in + the index and data arrays, and uses the former to look up values in the + latter. These slices can be different lengths. + + Functions returning an index along an axis, like `argsort` and + `argpartition`, produce suitable indices for this function. + + Parameters + ---------- + arr : ndarray (Ni..., M, Nk...) + Source array + indices : ndarray (Ni..., J, Nk...) + Indices to take along each 1d slice of ``arr``. This must match the + dimension of ``arr``, but dimensions Ni and Nj only need to broadcast + against ``arr``. + axis : int or None, optional + The axis to take 1d slices along. If axis is None, the input array is + treated as if it had first been flattened to 1d, for consistency with + `sort` and `argsort`. + + .. versionchanged:: 2.3 + The default value is now ``-1``. + + Returns + ------- + out: ndarray (Ni..., J, Nk...) + The indexed result. + + Notes + ----- + This is equivalent to (but faster than) the following use of `ndindex` and + `s_`, which sets each of ``ii`` and ``kk`` to a tuple of indices:: + + Ni, M, Nk = a.shape[:axis], a.shape[axis], a.shape[axis+1:] + J = indices.shape[axis] # Need not equal M + out = np.empty(Ni + (J,) + Nk) + + for ii in ndindex(Ni): + for kk in ndindex(Nk): + a_1d = a [ii + s_[:,] + kk] + indices_1d = indices[ii + s_[:,] + kk] + out_1d = out [ii + s_[:,] + kk] + for j in range(J): + out_1d[j] = a_1d[indices_1d[j]] + + Equivalently, eliminating the inner loop, the last two lines would be:: + + out_1d[:] = a_1d[indices_1d] + + See Also + -------- + take : Take along an axis, using the same indices for every 1d slice + put_along_axis : + Put values into the destination array by matching 1d index and data slices + + Examples + -------- + >>> import numpy as np + + For this sample array + + >>> a = np.array([[10, 30, 20], [60, 40, 50]]) + + We can sort either by using sort directly, or argsort and this function + + >>> np.sort(a, axis=1) + array([[10, 20, 30], + [40, 50, 60]]) + >>> ai = np.argsort(a, axis=1) + >>> ai + array([[0, 2, 1], + [1, 2, 0]]) + >>> np.take_along_axis(a, ai, axis=1) + array([[10, 20, 30], + [40, 50, 60]]) + + The same works for max and min, if you maintain the trivial dimension + with ``keepdims``: + + >>> np.max(a, axis=1, keepdims=True) + array([[30], + [60]]) + >>> ai = np.argmax(a, axis=1, keepdims=True) + >>> ai + array([[1], + [0]]) + >>> np.take_along_axis(a, ai, axis=1) + array([[30], + [60]]) + + If we want to get the max and min at the same time, we can stack the + indices first + + >>> ai_min = np.argmin(a, axis=1, keepdims=True) + >>> ai_max = np.argmax(a, axis=1, keepdims=True) + >>> ai = np.concatenate([ai_min, ai_max], axis=1) + >>> ai + array([[0, 1], + [1, 0]]) + >>> np.take_along_axis(a, ai, axis=1) + array([[10, 30], + [40, 60]]) + """ + # normalize inputs + if axis is None: + if indices.ndim != 1: + raise ValueError( + 'when axis=None, `indices` must have a single dimension.') + arr = arr.flat + arr_shape = (len(arr),) # flatiter has no .shape + axis = 0 + else: + axis = normalize_axis_index(axis, arr.ndim) + arr_shape = arr.shape + + # use the fancy index + return arr[_make_along_axis_idx(arr_shape, indices, axis)] + + +def _put_along_axis_dispatcher(arr, indices, values, axis): + return (arr, indices, values) + + +@array_function_dispatch(_put_along_axis_dispatcher) +def put_along_axis(arr, indices, values, axis): + """ + Put values into the destination array by matching 1d index and data slices. + + This iterates over matching 1d slices oriented along the specified axis in + the index and data arrays, and uses the former to place values into the + latter. These slices can be different lengths. + + Functions returning an index along an axis, like `argsort` and + `argpartition`, produce suitable indices for this function. + + Parameters + ---------- + arr : ndarray (Ni..., M, Nk...) + Destination array. + indices : ndarray (Ni..., J, Nk...) + Indices to change along each 1d slice of `arr`. This must match the + dimension of arr, but dimensions in Ni and Nj may be 1 to broadcast + against `arr`. + values : array_like (Ni..., J, Nk...) + values to insert at those indices. Its shape and dimension are + broadcast to match that of `indices`. + axis : int + The axis to take 1d slices along. If axis is None, the destination + array is treated as if a flattened 1d view had been created of it. + + Notes + ----- + This is equivalent to (but faster than) the following use of `ndindex` and + `s_`, which sets each of ``ii`` and ``kk`` to a tuple of indices:: + + Ni, M, Nk = a.shape[:axis], a.shape[axis], a.shape[axis+1:] + J = indices.shape[axis] # Need not equal M + + for ii in ndindex(Ni): + for kk in ndindex(Nk): + a_1d = a [ii + s_[:,] + kk] + indices_1d = indices[ii + s_[:,] + kk] + values_1d = values [ii + s_[:,] + kk] + for j in range(J): + a_1d[indices_1d[j]] = values_1d[j] + + Equivalently, eliminating the inner loop, the last two lines would be:: + + a_1d[indices_1d] = values_1d + + See Also + -------- + take_along_axis : + Take values from the input array by matching 1d index and data slices + + Examples + -------- + >>> import numpy as np + + For this sample array + + >>> a = np.array([[10, 30, 20], [60, 40, 50]]) + + We can replace the maximum values with: + + >>> ai = np.argmax(a, axis=1, keepdims=True) + >>> ai + array([[1], + [0]]) + >>> np.put_along_axis(a, ai, 99, axis=1) + >>> a + array([[10, 99, 20], + [99, 40, 50]]) + + """ + # normalize inputs + if axis is None: + if indices.ndim != 1: + raise ValueError( + 'when axis=None, `indices` must have a single dimension.') + arr = arr.flat + axis = 0 + arr_shape = (len(arr),) # flatiter has no .shape + else: + axis = normalize_axis_index(axis, arr.ndim) + arr_shape = arr.shape + + # use the fancy index + arr[_make_along_axis_idx(arr_shape, indices, axis)] = values + + +def _apply_along_axis_dispatcher(func1d, axis, arr, *args, **kwargs): + return (arr,) + + +@array_function_dispatch(_apply_along_axis_dispatcher) +def apply_along_axis(func1d, axis, arr, *args, **kwargs): + """ + Apply a function to 1-D slices along the given axis. + + Execute `func1d(a, *args, **kwargs)` where `func1d` operates on 1-D arrays + and `a` is a 1-D slice of `arr` along `axis`. + + This is equivalent to (but faster than) the following use of `ndindex` and + `s_`, which sets each of ``ii``, ``jj``, and ``kk`` to a tuple of indices:: + + Ni, Nk = a.shape[:axis], a.shape[axis+1:] + for ii in ndindex(Ni): + for kk in ndindex(Nk): + f = func1d(arr[ii + s_[:,] + kk]) + Nj = f.shape + for jj in ndindex(Nj): + out[ii + jj + kk] = f[jj] + + Equivalently, eliminating the inner loop, this can be expressed as:: + + Ni, Nk = a.shape[:axis], a.shape[axis+1:] + for ii in ndindex(Ni): + for kk in ndindex(Nk): + out[ii + s_[...,] + kk] = func1d(arr[ii + s_[:,] + kk]) + + Parameters + ---------- + func1d : function (M,) -> (Nj...) + This function should accept 1-D arrays. It is applied to 1-D + slices of `arr` along the specified axis. + axis : integer + Axis along which `arr` is sliced. + arr : ndarray (Ni..., M, Nk...) + Input array. + args : any + Additional arguments to `func1d`. + kwargs : any + Additional named arguments to `func1d`. + + Returns + ------- + out : ndarray (Ni..., Nj..., Nk...) + The output array. The shape of `out` is identical to the shape of + `arr`, except along the `axis` dimension. This axis is removed, and + replaced with new dimensions equal to the shape of the return value + of `func1d`. So if `func1d` returns a scalar `out` will have one + fewer dimensions than `arr`. + + See Also + -------- + apply_over_axes : Apply a function repeatedly over multiple axes. + + Examples + -------- + >>> import numpy as np + >>> def my_func(a): + ... \"\"\"Average first and last element of a 1-D array\"\"\" + ... return (a[0] + a[-1]) * 0.5 + >>> b = np.array([[1,2,3], [4,5,6], [7,8,9]]) + >>> np.apply_along_axis(my_func, 0, b) + array([4., 5., 6.]) + >>> np.apply_along_axis(my_func, 1, b) + array([2., 5., 8.]) + + For a function that returns a 1D array, the number of dimensions in + `outarr` is the same as `arr`. + + >>> b = np.array([[8,1,7], [4,3,9], [5,2,6]]) + >>> np.apply_along_axis(sorted, 1, b) + array([[1, 7, 8], + [3, 4, 9], + [2, 5, 6]]) + + For a function that returns a higher dimensional array, those dimensions + are inserted in place of the `axis` dimension. + + >>> b = np.array([[1,2,3], [4,5,6], [7,8,9]]) + >>> np.apply_along_axis(np.diag, -1, b) + array([[[1, 0, 0], + [0, 2, 0], + [0, 0, 3]], + [[4, 0, 0], + [0, 5, 0], + [0, 0, 6]], + [[7, 0, 0], + [0, 8, 0], + [0, 0, 9]]]) + """ + # handle negative axes + conv = _array_converter(arr) + arr = conv[0] + + nd = arr.ndim + axis = normalize_axis_index(axis, nd) + + # arr, with the iteration axis at the end + in_dims = list(range(nd)) + inarr_view = transpose(arr, in_dims[:axis] + in_dims[axis + 1:] + [axis]) + + # compute indices for the iteration axes, and append a trailing ellipsis to + # prevent 0d arrays decaying to scalars, which fixes gh-8642 + inds = ndindex(inarr_view.shape[:-1]) + inds = (ind + (Ellipsis,) for ind in inds) + + # invoke the function on the first item + try: + ind0 = next(inds) + except StopIteration: + raise ValueError( + 'Cannot apply_along_axis when any iteration dimensions are 0' + ) from None + res = asanyarray(func1d(inarr_view[ind0], *args, **kwargs)) + + # build a buffer for storing evaluations of func1d. + # remove the requested axis, and add the new ones on the end. + # laid out so that each write is contiguous. + # for a tuple index inds, buff[inds] = func1d(inarr_view[inds]) + if not isinstance(res, matrix): + buff = zeros_like(res, shape=inarr_view.shape[:-1] + res.shape) + else: + # Matrices are nasty with reshaping, so do not preserve them here. + buff = zeros(inarr_view.shape[:-1] + res.shape, dtype=res.dtype) + + # permutation of axes such that out = buff.transpose(buff_permute) + buff_dims = list(range(buff.ndim)) + buff_permute = ( + buff_dims[0 : axis] + + buff_dims[buff.ndim - res.ndim : buff.ndim] + + buff_dims[axis : buff.ndim - res.ndim] + ) + + # save the first result, then compute and save all remaining results + buff[ind0] = res + for ind in inds: + buff[ind] = asanyarray(func1d(inarr_view[ind], *args, **kwargs)) + + res = transpose(buff, buff_permute) + return conv.wrap(res) + + +def _apply_over_axes_dispatcher(func, a, axes): + return (a,) + + +@array_function_dispatch(_apply_over_axes_dispatcher) +def apply_over_axes(func, a, axes): + """ + Apply a function repeatedly over multiple axes. + + `func` is called as `res = func(a, axis)`, where `axis` is the first + element of `axes`. The result `res` of the function call must have + either the same dimensions as `a` or one less dimension. If `res` + has one less dimension than `a`, a dimension is inserted before + `axis`. The call to `func` is then repeated for each axis in `axes`, + with `res` as the first argument. + + Parameters + ---------- + func : function + This function must take two arguments, `func(a, axis)`. + a : array_like + Input array. + axes : array_like + Axes over which `func` is applied; the elements must be integers. + + Returns + ------- + apply_over_axis : ndarray + The output array. The number of dimensions is the same as `a`, + but the shape can be different. This depends on whether `func` + changes the shape of its output with respect to its input. + + See Also + -------- + apply_along_axis : + Apply a function to 1-D slices of an array along the given axis. + + Notes + ----- + This function is equivalent to tuple axis arguments to reorderable ufuncs + with keepdims=True. Tuple axis arguments to ufuncs have been available since + version 1.7.0. + + Examples + -------- + >>> import numpy as np + >>> a = np.arange(24).reshape(2,3,4) + >>> a + array([[[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]], + [[12, 13, 14, 15], + [16, 17, 18, 19], + [20, 21, 22, 23]]]) + + Sum over axes 0 and 2. The result has same number of dimensions + as the original array: + + >>> np.apply_over_axes(np.sum, a, [0,2]) + array([[[ 60], + [ 92], + [124]]]) + + Tuple axis arguments to ufuncs are equivalent: + + >>> np.sum(a, axis=(0,2), keepdims=True) + array([[[ 60], + [ 92], + [124]]]) + + """ + val = asarray(a) + N = a.ndim + if array(axes).ndim == 0: + axes = (axes,) + for axis in axes: + if axis < 0: + axis = N + axis + args = (val, axis) + res = func(*args) + if res.ndim == val.ndim: + val = res + else: + res = expand_dims(res, axis) + if res.ndim == val.ndim: + val = res + else: + raise ValueError("function is not returning " + "an array of the correct shape") + return val + + +def _expand_dims_dispatcher(a, axis): + return (a,) + + +@array_function_dispatch(_expand_dims_dispatcher) +def expand_dims(a, axis): + """ + Expand the shape of an array. + + Insert a new axis that will appear at the `axis` position in the expanded + array shape. + + Parameters + ---------- + a : array_like + Input array. + axis : int or tuple of ints + Position in the expanded axes where the new axis (or axes) is placed. + + .. deprecated:: 1.13.0 + Passing an axis where ``axis > a.ndim`` will be treated as + ``axis == a.ndim``, and passing ``axis < -a.ndim - 1`` will + be treated as ``axis == 0``. This behavior is deprecated. + + Returns + ------- + result : ndarray + View of `a` with the number of dimensions increased. + + See Also + -------- + squeeze : The inverse operation, removing singleton dimensions + reshape : Insert, remove, and combine dimensions, and resize existing ones + atleast_1d, atleast_2d, atleast_3d + + Examples + -------- + >>> import numpy as np + >>> x = np.array([1, 2]) + >>> x.shape + (2,) + + The following is equivalent to ``x[np.newaxis, :]`` or ``x[np.newaxis]``: + + >>> y = np.expand_dims(x, axis=0) + >>> y + array([[1, 2]]) + >>> y.shape + (1, 2) + + The following is equivalent to ``x[:, np.newaxis]``: + + >>> y = np.expand_dims(x, axis=1) + >>> y + array([[1], + [2]]) + >>> y.shape + (2, 1) + + ``axis`` may also be a tuple: + + >>> y = np.expand_dims(x, axis=(0, 1)) + >>> y + array([[[1, 2]]]) + + >>> y = np.expand_dims(x, axis=(2, 0)) + >>> y + array([[[1], + [2]]]) + + Note that some examples may use ``None`` instead of ``np.newaxis``. These + are the same objects: + + >>> np.newaxis is None + True + + """ + if isinstance(a, matrix): + a = asarray(a) + else: + a = asanyarray(a) + + if not isinstance(axis, (tuple, list)): + axis = (axis,) + + out_ndim = len(axis) + a.ndim + axis = normalize_axis_tuple(axis, out_ndim) + + shape_it = iter(a.shape) + shape = [1 if ax in axis else next(shape_it) for ax in range(out_ndim)] + + return a.reshape(shape) + + +# NOTE: Remove once deprecation period passes +@set_module("numpy") +def row_stack(tup, *, dtype=None, casting="same_kind"): + # Deprecated in NumPy 2.0, 2023-08-18 + warnings.warn( + "`row_stack` alias is deprecated. " + "Use `np.vstack` directly.", + DeprecationWarning, + stacklevel=2 + ) + return vstack(tup, dtype=dtype, casting=casting) + + +row_stack.__doc__ = vstack.__doc__ + + +def _column_stack_dispatcher(tup): + return _arrays_for_stack_dispatcher(tup) + + +@array_function_dispatch(_column_stack_dispatcher) +def column_stack(tup): + """ + Stack 1-D arrays as columns into a 2-D array. + + Take a sequence of 1-D arrays and stack them as columns + to make a single 2-D array. 2-D arrays are stacked as-is, + just like with `hstack`. 1-D arrays are turned into 2-D columns + first. + + Parameters + ---------- + tup : sequence of 1-D or 2-D arrays. + Arrays to stack. All of them must have the same first dimension. + + Returns + ------- + stacked : 2-D array + The array formed by stacking the given arrays. + + See Also + -------- + stack, hstack, vstack, concatenate + + Examples + -------- + >>> import numpy as np + >>> a = np.array((1,2,3)) + >>> b = np.array((2,3,4)) + >>> np.column_stack((a,b)) + array([[1, 2], + [2, 3], + [3, 4]]) + + """ + arrays = [] + for v in tup: + arr = asanyarray(v) + if arr.ndim < 2: + arr = array(arr, copy=None, subok=True, ndmin=2).T + arrays.append(arr) + return _nx.concatenate(arrays, 1) + + +def _dstack_dispatcher(tup): + return _arrays_for_stack_dispatcher(tup) + + +@array_function_dispatch(_dstack_dispatcher) +def dstack(tup): + """ + Stack arrays in sequence depth wise (along third axis). + + This is equivalent to concatenation along the third axis after 2-D arrays + of shape `(M,N)` have been reshaped to `(M,N,1)` and 1-D arrays of shape + `(N,)` have been reshaped to `(1,N,1)`. Rebuilds arrays divided by + `dsplit`. + + This function makes most sense for arrays with up to 3 dimensions. For + instance, for pixel-data with a height (first axis), width (second axis), + and r/g/b channels (third axis). The functions `concatenate`, `stack` and + `block` provide more general stacking and concatenation operations. + + Parameters + ---------- + tup : sequence of arrays + The arrays must have the same shape along all but the third axis. + 1-D or 2-D arrays must have the same shape. + + Returns + ------- + stacked : ndarray + The array formed by stacking the given arrays, will be at least 3-D. + + See Also + -------- + concatenate : Join a sequence of arrays along an existing axis. + stack : Join a sequence of arrays along a new axis. + block : Assemble an nd-array from nested lists of blocks. + vstack : Stack arrays in sequence vertically (row wise). + hstack : Stack arrays in sequence horizontally (column wise). + column_stack : Stack 1-D arrays as columns into a 2-D array. + dsplit : Split array along third axis. + + Examples + -------- + >>> import numpy as np + >>> a = np.array((1,2,3)) + >>> b = np.array((2,3,4)) + >>> np.dstack((a,b)) + array([[[1, 2], + [2, 3], + [3, 4]]]) + + >>> a = np.array([[1],[2],[3]]) + >>> b = np.array([[2],[3],[4]]) + >>> np.dstack((a,b)) + array([[[1, 2]], + [[2, 3]], + [[3, 4]]]) + + """ + arrs = atleast_3d(*tup) + if not isinstance(arrs, tuple): + arrs = (arrs,) + return _nx.concatenate(arrs, 2) + + +def _replace_zero_by_x_arrays(sub_arys): + for i in range(len(sub_arys)): + if _nx.ndim(sub_arys[i]) == 0: + sub_arys[i] = _nx.empty(0, dtype=sub_arys[i].dtype) + elif _nx.sometrue(_nx.equal(_nx.shape(sub_arys[i]), 0)): + sub_arys[i] = _nx.empty(0, dtype=sub_arys[i].dtype) + return sub_arys + + +def _array_split_dispatcher(ary, indices_or_sections, axis=None): + return (ary, indices_or_sections) + + +@array_function_dispatch(_array_split_dispatcher) +def array_split(ary, indices_or_sections, axis=0): + """ + Split an array into multiple sub-arrays. + + Please refer to the ``split`` documentation. The only difference + between these functions is that ``array_split`` allows + `indices_or_sections` to be an integer that does *not* equally + divide the axis. For an array of length l that should be split + into n sections, it returns l % n sub-arrays of size l//n + 1 + and the rest of size l//n. + + See Also + -------- + split : Split array into multiple sub-arrays of equal size. + + Examples + -------- + >>> import numpy as np + >>> x = np.arange(8.0) + >>> np.array_split(x, 3) + [array([0., 1., 2.]), array([3., 4., 5.]), array([6., 7.])] + + >>> x = np.arange(9) + >>> np.array_split(x, 4) + [array([0, 1, 2]), array([3, 4]), array([5, 6]), array([7, 8])] + + """ + try: + Ntotal = ary.shape[axis] + except AttributeError: + Ntotal = len(ary) + try: + # handle array case. + Nsections = len(indices_or_sections) + 1 + div_points = [0] + list(indices_or_sections) + [Ntotal] + except TypeError: + # indices_or_sections is a scalar, not an array. + Nsections = int(indices_or_sections) + if Nsections <= 0: + raise ValueError('number sections must be larger than 0.') from None + Neach_section, extras = divmod(Ntotal, Nsections) + section_sizes = ([0] + + extras * [Neach_section + 1] + + (Nsections - extras) * [Neach_section]) + div_points = _nx.array(section_sizes, dtype=_nx.intp).cumsum() + + sub_arys = [] + sary = _nx.swapaxes(ary, axis, 0) + for i in range(Nsections): + st = div_points[i] + end = div_points[i + 1] + sub_arys.append(_nx.swapaxes(sary[st:end], axis, 0)) + + return sub_arys + + +def _split_dispatcher(ary, indices_or_sections, axis=None): + return (ary, indices_or_sections) + + +@array_function_dispatch(_split_dispatcher) +def split(ary, indices_or_sections, axis=0): + """ + Split an array into multiple sub-arrays as views into `ary`. + + Parameters + ---------- + ary : ndarray + Array to be divided into sub-arrays. + indices_or_sections : int or 1-D array + If `indices_or_sections` is an integer, N, the array will be divided + into N equal arrays along `axis`. If such a split is not possible, + an error is raised. + + If `indices_or_sections` is a 1-D array of sorted integers, the entries + indicate where along `axis` the array is split. For example, + ``[2, 3]`` would, for ``axis=0``, result in + + - ary[:2] + - ary[2:3] + - ary[3:] + + If an index exceeds the dimension of the array along `axis`, + an empty sub-array is returned correspondingly. + axis : int, optional + The axis along which to split, default is 0. + + Returns + ------- + sub-arrays : list of ndarrays + A list of sub-arrays as views into `ary`. + + Raises + ------ + ValueError + If `indices_or_sections` is given as an integer, but + a split does not result in equal division. + + See Also + -------- + array_split : Split an array into multiple sub-arrays of equal or + near-equal size. Does not raise an exception if + an equal division cannot be made. + hsplit : Split array into multiple sub-arrays horizontally (column-wise). + vsplit : Split array into multiple sub-arrays vertically (row wise). + dsplit : Split array into multiple sub-arrays along the 3rd axis (depth). + concatenate : Join a sequence of arrays along an existing axis. + stack : Join a sequence of arrays along a new axis. + hstack : Stack arrays in sequence horizontally (column wise). + vstack : Stack arrays in sequence vertically (row wise). + dstack : Stack arrays in sequence depth wise (along third dimension). + + Examples + -------- + >>> import numpy as np + >>> x = np.arange(9.0) + >>> np.split(x, 3) + [array([0., 1., 2.]), array([3., 4., 5.]), array([6., 7., 8.])] + + >>> x = np.arange(8.0) + >>> np.split(x, [3, 5, 6, 10]) + [array([0., 1., 2.]), + array([3., 4.]), + array([5.]), + array([6., 7.]), + array([], dtype=float64)] + + """ + try: + len(indices_or_sections) + except TypeError: + sections = indices_or_sections + N = ary.shape[axis] + if N % sections: + raise ValueError( + 'array split does not result in an equal division') from None + return array_split(ary, indices_or_sections, axis) + + +def _hvdsplit_dispatcher(ary, indices_or_sections): + return (ary, indices_or_sections) + + +@array_function_dispatch(_hvdsplit_dispatcher) +def hsplit(ary, indices_or_sections): + """ + Split an array into multiple sub-arrays horizontally (column-wise). + + Please refer to the `split` documentation. `hsplit` is equivalent + to `split` with ``axis=1``, the array is always split along the second + axis except for 1-D arrays, where it is split at ``axis=0``. + + See Also + -------- + split : Split an array into multiple sub-arrays of equal size. + + Examples + -------- + >>> import numpy as np + >>> x = np.arange(16.0).reshape(4, 4) + >>> x + array([[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.], + [12., 13., 14., 15.]]) + >>> np.hsplit(x, 2) + [array([[ 0., 1.], + [ 4., 5.], + [ 8., 9.], + [12., 13.]]), + array([[ 2., 3.], + [ 6., 7.], + [10., 11.], + [14., 15.]])] + >>> np.hsplit(x, np.array([3, 6])) + [array([[ 0., 1., 2.], + [ 4., 5., 6.], + [ 8., 9., 10.], + [12., 13., 14.]]), + array([[ 3.], + [ 7.], + [11.], + [15.]]), + array([], shape=(4, 0), dtype=float64)] + + With a higher dimensional array the split is still along the second axis. + + >>> x = np.arange(8.0).reshape(2, 2, 2) + >>> x + array([[[0., 1.], + [2., 3.]], + [[4., 5.], + [6., 7.]]]) + >>> np.hsplit(x, 2) + [array([[[0., 1.]], + [[4., 5.]]]), + array([[[2., 3.]], + [[6., 7.]]])] + + With a 1-D array, the split is along axis 0. + + >>> x = np.array([0, 1, 2, 3, 4, 5]) + >>> np.hsplit(x, 2) + [array([0, 1, 2]), array([3, 4, 5])] + + """ + if _nx.ndim(ary) == 0: + raise ValueError('hsplit only works on arrays of 1 or more dimensions') + if ary.ndim > 1: + return split(ary, indices_or_sections, 1) + else: + return split(ary, indices_or_sections, 0) + + +@array_function_dispatch(_hvdsplit_dispatcher) +def vsplit(ary, indices_or_sections): + """ + Split an array into multiple sub-arrays vertically (row-wise). + + Please refer to the ``split`` documentation. ``vsplit`` is equivalent + to ``split`` with `axis=0` (default), the array is always split along the + first axis regardless of the array dimension. + + See Also + -------- + split : Split an array into multiple sub-arrays of equal size. + + Examples + -------- + >>> import numpy as np + >>> x = np.arange(16.0).reshape(4, 4) + >>> x + array([[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.], + [12., 13., 14., 15.]]) + >>> np.vsplit(x, 2) + [array([[0., 1., 2., 3.], + [4., 5., 6., 7.]]), + array([[ 8., 9., 10., 11.], + [12., 13., 14., 15.]])] + >>> np.vsplit(x, np.array([3, 6])) + [array([[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.]]), + array([[12., 13., 14., 15.]]), + array([], shape=(0, 4), dtype=float64)] + + With a higher dimensional array the split is still along the first axis. + + >>> x = np.arange(8.0).reshape(2, 2, 2) + >>> x + array([[[0., 1.], + [2., 3.]], + [[4., 5.], + [6., 7.]]]) + >>> np.vsplit(x, 2) + [array([[[0., 1.], + [2., 3.]]]), + array([[[4., 5.], + [6., 7.]]])] + + """ + if _nx.ndim(ary) < 2: + raise ValueError('vsplit only works on arrays of 2 or more dimensions') + return split(ary, indices_or_sections, 0) + + +@array_function_dispatch(_hvdsplit_dispatcher) +def dsplit(ary, indices_or_sections): + """ + Split array into multiple sub-arrays along the 3rd axis (depth). + + Please refer to the `split` documentation. `dsplit` is equivalent + to `split` with ``axis=2``, the array is always split along the third + axis provided the array dimension is greater than or equal to 3. + + See Also + -------- + split : Split an array into multiple sub-arrays of equal size. + + Examples + -------- + >>> import numpy as np + >>> x = np.arange(16.0).reshape(2, 2, 4) + >>> x + array([[[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.]], + [[ 8., 9., 10., 11.], + [12., 13., 14., 15.]]]) + >>> np.dsplit(x, 2) + [array([[[ 0., 1.], + [ 4., 5.]], + [[ 8., 9.], + [12., 13.]]]), array([[[ 2., 3.], + [ 6., 7.]], + [[10., 11.], + [14., 15.]]])] + >>> np.dsplit(x, np.array([3, 6])) + [array([[[ 0., 1., 2.], + [ 4., 5., 6.]], + [[ 8., 9., 10.], + [12., 13., 14.]]]), + array([[[ 3.], + [ 7.]], + [[11.], + [15.]]]), + array([], shape=(2, 2, 0), dtype=float64)] + """ + if _nx.ndim(ary) < 3: + raise ValueError('dsplit only works on arrays of 3 or more dimensions') + return split(ary, indices_or_sections, 2) + + +def get_array_wrap(*args): + """Find the wrapper for the array with the highest priority. + + In case of ties, leftmost wins. If no wrapper is found, return None. + + .. deprecated:: 2.0 + """ + + # Deprecated in NumPy 2.0, 2023-07-11 + warnings.warn( + "`get_array_wrap` is deprecated. " + "(deprecated in NumPy 2.0)", + DeprecationWarning, + stacklevel=2 + ) + + wrappers = sorted((getattr(x, '__array_priority__', 0), -i, + x.__array_wrap__) for i, x in enumerate(args) + if hasattr(x, '__array_wrap__')) + if wrappers: + return wrappers[-1][-1] + return None + + +def _kron_dispatcher(a, b): + return (a, b) + + +@array_function_dispatch(_kron_dispatcher) +def kron(a, b): + """ + Kronecker product of two arrays. + + Computes the Kronecker product, a composite array made of blocks of the + second array scaled by the first. + + Parameters + ---------- + a, b : array_like + + Returns + ------- + out : ndarray + + See Also + -------- + outer : The outer product + + Notes + ----- + The function assumes that the number of dimensions of `a` and `b` + are the same, if necessary prepending the smallest with ones. + If ``a.shape = (r0,r1,..,rN)`` and ``b.shape = (s0,s1,...,sN)``, + the Kronecker product has shape ``(r0*s0, r1*s1, ..., rN*SN)``. + The elements are products of elements from `a` and `b`, organized + explicitly by:: + + kron(a,b)[k0,k1,...,kN] = a[i0,i1,...,iN] * b[j0,j1,...,jN] + + where:: + + kt = it * st + jt, t = 0,...,N + + In the common 2-D case (N=1), the block structure can be visualized:: + + [[ a[0,0]*b, a[0,1]*b, ... , a[0,-1]*b ], + [ ... ... ], + [ a[-1,0]*b, a[-1,1]*b, ... , a[-1,-1]*b ]] + + + Examples + -------- + >>> import numpy as np + >>> np.kron([1,10,100], [5,6,7]) + array([ 5, 6, 7, ..., 500, 600, 700]) + >>> np.kron([5,6,7], [1,10,100]) + array([ 5, 50, 500, ..., 7, 70, 700]) + + >>> np.kron(np.eye(2), np.ones((2,2))) + array([[1., 1., 0., 0.], + [1., 1., 0., 0.], + [0., 0., 1., 1.], + [0., 0., 1., 1.]]) + + >>> a = np.arange(100).reshape((2,5,2,5)) + >>> b = np.arange(24).reshape((2,3,4)) + >>> c = np.kron(a,b) + >>> c.shape + (2, 10, 6, 20) + >>> I = (1,3,0,2) + >>> J = (0,2,1) + >>> J1 = (0,) + J # extend to ndim=4 + >>> S1 = (1,) + b.shape + >>> K = tuple(np.array(I) * np.array(S1) + np.array(J1)) + >>> c[K] == a[I]*b[J] + True + + """ + # Working: + # 1. Equalise the shapes by prepending smaller array with 1s + # 2. Expand shapes of both the arrays by adding new axes at + # odd positions for 1st array and even positions for 2nd + # 3. Compute the product of the modified array + # 4. The inner most array elements now contain the rows of + # the Kronecker product + # 5. Reshape the result to kron's shape, which is same as + # product of shapes of the two arrays. + b = asanyarray(b) + a = array(a, copy=None, subok=True, ndmin=b.ndim) + is_any_mat = isinstance(a, matrix) or isinstance(b, matrix) + ndb, nda = b.ndim, a.ndim + nd = max(ndb, nda) + + if (nda == 0 or ndb == 0): + return _nx.multiply(a, b) + + as_ = a.shape + bs = b.shape + if not a.flags.contiguous: + a = reshape(a, as_) + if not b.flags.contiguous: + b = reshape(b, bs) + + # Equalise the shapes by prepending smaller one with 1s + as_ = (1,) * max(0, ndb - nda) + as_ + bs = (1,) * max(0, nda - ndb) + bs + + # Insert empty dimensions + a_arr = expand_dims(a, axis=tuple(range(ndb - nda))) + b_arr = expand_dims(b, axis=tuple(range(nda - ndb))) + + # Compute the product + a_arr = expand_dims(a_arr, axis=tuple(range(1, nd * 2, 2))) + b_arr = expand_dims(b_arr, axis=tuple(range(0, nd * 2, 2))) + # In case of `mat`, convert result to `array` + result = _nx.multiply(a_arr, b_arr, subok=(not is_any_mat)) + + # Reshape back + result = result.reshape(_nx.multiply(as_, bs)) + + return result if not is_any_mat else matrix(result, copy=False) + + +def _tile_dispatcher(A, reps): + return (A, reps) + + +@array_function_dispatch(_tile_dispatcher) +def tile(A, reps): + """ + Construct an array by repeating A the number of times given by reps. + + If `reps` has length ``d``, the result will have dimension of + ``max(d, A.ndim)``. + + If ``A.ndim < d``, `A` is promoted to be d-dimensional by prepending new + axes. So a shape (3,) array is promoted to (1, 3) for 2-D replication, + or shape (1, 1, 3) for 3-D replication. If this is not the desired + behavior, promote `A` to d-dimensions manually before calling this + function. + + If ``A.ndim > d``, `reps` is promoted to `A`.ndim by prepending 1's to it. + Thus for an `A` of shape (2, 3, 4, 5), a `reps` of (2, 2) is treated as + (1, 1, 2, 2). + + Note : Although tile may be used for broadcasting, it is strongly + recommended to use numpy's broadcasting operations and functions. + + Parameters + ---------- + A : array_like + The input array. + reps : array_like + The number of repetitions of `A` along each axis. + + Returns + ------- + c : ndarray + The tiled output array. + + See Also + -------- + repeat : Repeat elements of an array. + broadcast_to : Broadcast an array to a new shape + + Examples + -------- + >>> import numpy as np + >>> a = np.array([0, 1, 2]) + >>> np.tile(a, 2) + array([0, 1, 2, 0, 1, 2]) + >>> np.tile(a, (2, 2)) + array([[0, 1, 2, 0, 1, 2], + [0, 1, 2, 0, 1, 2]]) + >>> np.tile(a, (2, 1, 2)) + array([[[0, 1, 2, 0, 1, 2]], + [[0, 1, 2, 0, 1, 2]]]) + + >>> b = np.array([[1, 2], [3, 4]]) + >>> np.tile(b, 2) + array([[1, 2, 1, 2], + [3, 4, 3, 4]]) + >>> np.tile(b, (2, 1)) + array([[1, 2], + [3, 4], + [1, 2], + [3, 4]]) + + >>> c = np.array([1,2,3,4]) + >>> np.tile(c,(4,1)) + array([[1, 2, 3, 4], + [1, 2, 3, 4], + [1, 2, 3, 4], + [1, 2, 3, 4]]) + """ + try: + tup = tuple(reps) + except TypeError: + tup = (reps,) + d = len(tup) + if all(x == 1 for x in tup) and isinstance(A, _nx.ndarray): + # Fixes the problem that the function does not make a copy if A is a + # numpy array and the repetitions are 1 in all dimensions + return _nx.array(A, copy=True, subok=True, ndmin=d) + else: + # Note that no copy of zero-sized arrays is made. However since they + # have no data there is no risk of an inadvertent overwrite. + c = _nx.array(A, copy=None, subok=True, ndmin=d) + if (d < c.ndim): + tup = (1,) * (c.ndim - d) + tup + shape_out = tuple(s * t for s, t in zip(c.shape, tup)) + n = c.size + if n > 0: + for dim_in, nrep in zip(c.shape, tup): + if nrep != 1: + c = c.reshape(-1, n).repeat(nrep, 0) + n //= dim_in + return c.reshape(shape_out) diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/_shape_base_impl.pyi b/.venv/lib/python3.12/site-packages/numpy/lib/_shape_base_impl.pyi new file mode 100644 index 0000000..a50d372 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/lib/_shape_base_impl.pyi @@ -0,0 +1,235 @@ +from collections.abc import Callable, Sequence +from typing import ( + Any, + Concatenate, + ParamSpec, + Protocol, + SupportsIndex, + TypeVar, + overload, + type_check_only, +) + +from typing_extensions import deprecated + +import numpy as np +from numpy import ( + _CastingKind, + complexfloating, + floating, + generic, + integer, + object_, + signedinteger, + ufunc, + unsignedinteger, +) +from numpy._typing import ( + ArrayLike, + DTypeLike, + NDArray, + _ArrayLike, + _ArrayLikeBool_co, + _ArrayLikeComplex_co, + _ArrayLikeFloat_co, + _ArrayLikeInt_co, + _ArrayLikeObject_co, + _ArrayLikeUInt_co, + _ShapeLike, +) + +__all__ = [ + "column_stack", + "row_stack", + "dstack", + "array_split", + "split", + "hsplit", + "vsplit", + "dsplit", + "apply_over_axes", + "expand_dims", + "apply_along_axis", + "kron", + "tile", + "take_along_axis", + "put_along_axis", +] + +_P = ParamSpec("_P") +_ScalarT = TypeVar("_ScalarT", bound=generic) + +# Signature of `__array_wrap__` +@type_check_only +class _ArrayWrap(Protocol): + def __call__( + self, + array: NDArray[Any], + context: tuple[ufunc, tuple[Any, ...], int] | None = ..., + return_scalar: bool = ..., + /, + ) -> Any: ... + +@type_check_only +class _SupportsArrayWrap(Protocol): + @property + def __array_wrap__(self) -> _ArrayWrap: ... + +### + +def take_along_axis( + arr: _ScalarT | NDArray[_ScalarT], + indices: NDArray[integer], + axis: int | None = ..., +) -> NDArray[_ScalarT]: ... + +def put_along_axis( + arr: NDArray[_ScalarT], + indices: NDArray[integer], + values: ArrayLike, + axis: int | None, +) -> None: ... + +@overload +def apply_along_axis( + func1d: Callable[Concatenate[NDArray[Any], _P], _ArrayLike[_ScalarT]], + axis: SupportsIndex, + arr: ArrayLike, + *args: _P.args, + **kwargs: _P.kwargs, +) -> NDArray[_ScalarT]: ... +@overload +def apply_along_axis( + func1d: Callable[Concatenate[NDArray[Any], _P], Any], + axis: SupportsIndex, + arr: ArrayLike, + *args: _P.args, + **kwargs: _P.kwargs, +) -> NDArray[Any]: ... + +def apply_over_axes( + func: Callable[[NDArray[Any], int], NDArray[_ScalarT]], + a: ArrayLike, + axes: int | Sequence[int], +) -> NDArray[_ScalarT]: ... + +@overload +def expand_dims( + a: _ArrayLike[_ScalarT], + axis: _ShapeLike, +) -> NDArray[_ScalarT]: ... +@overload +def expand_dims( + a: ArrayLike, + axis: _ShapeLike, +) -> NDArray[Any]: ... + +# Deprecated in NumPy 2.0, 2023-08-18 +@deprecated("`row_stack` alias is deprecated. Use `np.vstack` directly.") +def row_stack( + tup: Sequence[ArrayLike], + *, + dtype: DTypeLike | None = None, + casting: _CastingKind = "same_kind", +) -> NDArray[Any]: ... + +# +@overload +def column_stack(tup: Sequence[_ArrayLike[_ScalarT]]) -> NDArray[_ScalarT]: ... +@overload +def column_stack(tup: Sequence[ArrayLike]) -> NDArray[Any]: ... + +@overload +def dstack(tup: Sequence[_ArrayLike[_ScalarT]]) -> NDArray[_ScalarT]: ... +@overload +def dstack(tup: Sequence[ArrayLike]) -> NDArray[Any]: ... + +@overload +def array_split( + ary: _ArrayLike[_ScalarT], + indices_or_sections: _ShapeLike, + axis: SupportsIndex = ..., +) -> list[NDArray[_ScalarT]]: ... +@overload +def array_split( + ary: ArrayLike, + indices_or_sections: _ShapeLike, + axis: SupportsIndex = ..., +) -> list[NDArray[Any]]: ... + +@overload +def split( + ary: _ArrayLike[_ScalarT], + indices_or_sections: _ShapeLike, + axis: SupportsIndex = ..., +) -> list[NDArray[_ScalarT]]: ... +@overload +def split( + ary: ArrayLike, + indices_or_sections: _ShapeLike, + axis: SupportsIndex = ..., +) -> list[NDArray[Any]]: ... + +@overload +def hsplit( + ary: _ArrayLike[_ScalarT], + indices_or_sections: _ShapeLike, +) -> list[NDArray[_ScalarT]]: ... +@overload +def hsplit( + ary: ArrayLike, + indices_or_sections: _ShapeLike, +) -> list[NDArray[Any]]: ... + +@overload +def vsplit( + ary: _ArrayLike[_ScalarT], + indices_or_sections: _ShapeLike, +) -> list[NDArray[_ScalarT]]: ... +@overload +def vsplit( + ary: ArrayLike, + indices_or_sections: _ShapeLike, +) -> list[NDArray[Any]]: ... + +@overload +def dsplit( + ary: _ArrayLike[_ScalarT], + indices_or_sections: _ShapeLike, +) -> list[NDArray[_ScalarT]]: ... +@overload +def dsplit( + ary: ArrayLike, + indices_or_sections: _ShapeLike, +) -> list[NDArray[Any]]: ... + +@overload +def get_array_wrap(*args: _SupportsArrayWrap) -> _ArrayWrap: ... +@overload +def get_array_wrap(*args: object) -> _ArrayWrap | None: ... + +@overload +def kron(a: _ArrayLikeBool_co, b: _ArrayLikeBool_co) -> NDArray[np.bool]: ... # type: ignore[misc] +@overload +def kron(a: _ArrayLikeUInt_co, b: _ArrayLikeUInt_co) -> NDArray[unsignedinteger]: ... # type: ignore[misc] +@overload +def kron(a: _ArrayLikeInt_co, b: _ArrayLikeInt_co) -> NDArray[signedinteger]: ... # type: ignore[misc] +@overload +def kron(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co) -> NDArray[floating]: ... # type: ignore[misc] +@overload +def kron(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ... +@overload +def kron(a: _ArrayLikeObject_co, b: Any) -> NDArray[object_]: ... +@overload +def kron(a: Any, b: _ArrayLikeObject_co) -> NDArray[object_]: ... + +@overload +def tile( + A: _ArrayLike[_ScalarT], + reps: int | Sequence[int], +) -> NDArray[_ScalarT]: ... +@overload +def tile( + A: ArrayLike, + reps: int | Sequence[int], +) -> NDArray[Any]: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/_stride_tricks_impl.py b/.venv/lib/python3.12/site-packages/numpy/lib/_stride_tricks_impl.py new file mode 100644 index 0000000..d478078 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/lib/_stride_tricks_impl.py @@ -0,0 +1,549 @@ +""" +Utilities that manipulate strides to achieve desirable effects. + +An explanation of strides can be found in the :ref:`arrays.ndarray`. + +""" +import numpy as np +from numpy._core.numeric import normalize_axis_tuple +from numpy._core.overrides import array_function_dispatch, set_module + +__all__ = ['broadcast_to', 'broadcast_arrays', 'broadcast_shapes'] + + +class DummyArray: + """Dummy object that just exists to hang __array_interface__ dictionaries + and possibly keep alive a reference to a base array. + """ + + def __init__(self, interface, base=None): + self.__array_interface__ = interface + self.base = base + + +def _maybe_view_as_subclass(original_array, new_array): + if type(original_array) is not type(new_array): + # if input was an ndarray subclass and subclasses were OK, + # then view the result as that subclass. + new_array = new_array.view(type=type(original_array)) + # Since we have done something akin to a view from original_array, we + # should let the subclass finalize (if it has it implemented, i.e., is + # not None). + if new_array.__array_finalize__: + new_array.__array_finalize__(original_array) + return new_array + + +@set_module("numpy.lib.stride_tricks") +def as_strided(x, shape=None, strides=None, subok=False, writeable=True): + """ + Create a view into the array with the given shape and strides. + + .. warning:: This function has to be used with extreme care, see notes. + + Parameters + ---------- + x : ndarray + Array to create a new. + shape : sequence of int, optional + The shape of the new array. Defaults to ``x.shape``. + strides : sequence of int, optional + The strides of the new array. Defaults to ``x.strides``. + subok : bool, optional + If True, subclasses are preserved. + writeable : bool, optional + If set to False, the returned array will always be readonly. + Otherwise it will be writable if the original array was. It + is advisable to set this to False if possible (see Notes). + + Returns + ------- + view : ndarray + + See also + -------- + broadcast_to : broadcast an array to a given shape. + reshape : reshape an array. + lib.stride_tricks.sliding_window_view : + userfriendly and safe function for a creation of sliding window views. + + Notes + ----- + ``as_strided`` creates a view into the array given the exact strides + and shape. This means it manipulates the internal data structure of + ndarray and, if done incorrectly, the array elements can point to + invalid memory and can corrupt results or crash your program. + It is advisable to always use the original ``x.strides`` when + calculating new strides to avoid reliance on a contiguous memory + layout. + + Furthermore, arrays created with this function often contain self + overlapping memory, so that two elements are identical. + Vectorized write operations on such arrays will typically be + unpredictable. They may even give different results for small, large, + or transposed arrays. + + Since writing to these arrays has to be tested and done with great + care, you may want to use ``writeable=False`` to avoid accidental write + operations. + + For these reasons it is advisable to avoid ``as_strided`` when + possible. + """ + # first convert input to array, possibly keeping subclass + x = np.array(x, copy=None, subok=subok) + interface = dict(x.__array_interface__) + if shape is not None: + interface['shape'] = tuple(shape) + if strides is not None: + interface['strides'] = tuple(strides) + + array = np.asarray(DummyArray(interface, base=x)) + # The route via `__interface__` does not preserve structured + # dtypes. Since dtype should remain unchanged, we set it explicitly. + array.dtype = x.dtype + + view = _maybe_view_as_subclass(x, array) + + if view.flags.writeable and not writeable: + view.flags.writeable = False + + return view + + +def _sliding_window_view_dispatcher(x, window_shape, axis=None, *, + subok=None, writeable=None): + return (x,) + + +@array_function_dispatch( + _sliding_window_view_dispatcher, module="numpy.lib.stride_tricks" +) +def sliding_window_view(x, window_shape, axis=None, *, + subok=False, writeable=False): + """ + Create a sliding window view into the array with the given window shape. + + Also known as rolling or moving window, the window slides across all + dimensions of the array and extracts subsets of the array at all window + positions. + + .. versionadded:: 1.20.0 + + Parameters + ---------- + x : array_like + Array to create the sliding window view from. + window_shape : int or tuple of int + Size of window over each axis that takes part in the sliding window. + If `axis` is not present, must have same length as the number of input + array dimensions. Single integers `i` are treated as if they were the + tuple `(i,)`. + axis : int or tuple of int, optional + Axis or axes along which the sliding window is applied. + By default, the sliding window is applied to all axes and + `window_shape[i]` will refer to axis `i` of `x`. + If `axis` is given as a `tuple of int`, `window_shape[i]` will refer to + the axis `axis[i]` of `x`. + Single integers `i` are treated as if they were the tuple `(i,)`. + subok : bool, optional + If True, sub-classes will be passed-through, otherwise the returned + array will be forced to be a base-class array (default). + writeable : bool, optional + When true, allow writing to the returned view. The default is false, + as this should be used with caution: the returned view contains the + same memory location multiple times, so writing to one location will + cause others to change. + + Returns + ------- + view : ndarray + Sliding window view of the array. The sliding window dimensions are + inserted at the end, and the original dimensions are trimmed as + required by the size of the sliding window. + That is, ``view.shape = x_shape_trimmed + window_shape``, where + ``x_shape_trimmed`` is ``x.shape`` with every entry reduced by one less + than the corresponding window size. + + See Also + -------- + lib.stride_tricks.as_strided: A lower-level and less safe routine for + creating arbitrary views from custom shape and strides. + broadcast_to: broadcast an array to a given shape. + + Notes + ----- + For many applications using a sliding window view can be convenient, but + potentially very slow. Often specialized solutions exist, for example: + + - `scipy.signal.fftconvolve` + + - filtering functions in `scipy.ndimage` + + - moving window functions provided by + `bottleneck `_. + + As a rough estimate, a sliding window approach with an input size of `N` + and a window size of `W` will scale as `O(N*W)` where frequently a special + algorithm can achieve `O(N)`. That means that the sliding window variant + for a window size of 100 can be a 100 times slower than a more specialized + version. + + Nevertheless, for small window sizes, when no custom algorithm exists, or + as a prototyping and developing tool, this function can be a good solution. + + Examples + -------- + >>> import numpy as np + >>> from numpy.lib.stride_tricks import sliding_window_view + >>> x = np.arange(6) + >>> x.shape + (6,) + >>> v = sliding_window_view(x, 3) + >>> v.shape + (4, 3) + >>> v + array([[0, 1, 2], + [1, 2, 3], + [2, 3, 4], + [3, 4, 5]]) + + This also works in more dimensions, e.g. + + >>> i, j = np.ogrid[:3, :4] + >>> x = 10*i + j + >>> x.shape + (3, 4) + >>> x + array([[ 0, 1, 2, 3], + [10, 11, 12, 13], + [20, 21, 22, 23]]) + >>> shape = (2,2) + >>> v = sliding_window_view(x, shape) + >>> v.shape + (2, 3, 2, 2) + >>> v + array([[[[ 0, 1], + [10, 11]], + [[ 1, 2], + [11, 12]], + [[ 2, 3], + [12, 13]]], + [[[10, 11], + [20, 21]], + [[11, 12], + [21, 22]], + [[12, 13], + [22, 23]]]]) + + The axis can be specified explicitly: + + >>> v = sliding_window_view(x, 3, 0) + >>> v.shape + (1, 4, 3) + >>> v + array([[[ 0, 10, 20], + [ 1, 11, 21], + [ 2, 12, 22], + [ 3, 13, 23]]]) + + The same axis can be used several times. In that case, every use reduces + the corresponding original dimension: + + >>> v = sliding_window_view(x, (2, 3), (1, 1)) + >>> v.shape + (3, 1, 2, 3) + >>> v + array([[[[ 0, 1, 2], + [ 1, 2, 3]]], + [[[10, 11, 12], + [11, 12, 13]]], + [[[20, 21, 22], + [21, 22, 23]]]]) + + Combining with stepped slicing (`::step`), this can be used to take sliding + views which skip elements: + + >>> x = np.arange(7) + >>> sliding_window_view(x, 5)[:, ::2] + array([[0, 2, 4], + [1, 3, 5], + [2, 4, 6]]) + + or views which move by multiple elements + + >>> x = np.arange(7) + >>> sliding_window_view(x, 3)[::2, :] + array([[0, 1, 2], + [2, 3, 4], + [4, 5, 6]]) + + A common application of `sliding_window_view` is the calculation of running + statistics. The simplest example is the + `moving average `_: + + >>> x = np.arange(6) + >>> x.shape + (6,) + >>> v = sliding_window_view(x, 3) + >>> v.shape + (4, 3) + >>> v + array([[0, 1, 2], + [1, 2, 3], + [2, 3, 4], + [3, 4, 5]]) + >>> moving_average = v.mean(axis=-1) + >>> moving_average + array([1., 2., 3., 4.]) + + Note that a sliding window approach is often **not** optimal (see Notes). + """ + window_shape = (tuple(window_shape) + if np.iterable(window_shape) + else (window_shape,)) + # first convert input to array, possibly keeping subclass + x = np.array(x, copy=None, subok=subok) + + window_shape_array = np.array(window_shape) + if np.any(window_shape_array < 0): + raise ValueError('`window_shape` cannot contain negative values') + + if axis is None: + axis = tuple(range(x.ndim)) + if len(window_shape) != len(axis): + raise ValueError(f'Since axis is `None`, must provide ' + f'window_shape for all dimensions of `x`; ' + f'got {len(window_shape)} window_shape elements ' + f'and `x.ndim` is {x.ndim}.') + else: + axis = normalize_axis_tuple(axis, x.ndim, allow_duplicate=True) + if len(window_shape) != len(axis): + raise ValueError(f'Must provide matching length window_shape and ' + f'axis; got {len(window_shape)} window_shape ' + f'elements and {len(axis)} axes elements.') + + out_strides = x.strides + tuple(x.strides[ax] for ax in axis) + + # note: same axis can be windowed repeatedly + x_shape_trimmed = list(x.shape) + for ax, dim in zip(axis, window_shape): + if x_shape_trimmed[ax] < dim: + raise ValueError( + 'window shape cannot be larger than input array shape') + x_shape_trimmed[ax] -= dim - 1 + out_shape = tuple(x_shape_trimmed) + window_shape + return as_strided(x, strides=out_strides, shape=out_shape, + subok=subok, writeable=writeable) + + +def _broadcast_to(array, shape, subok, readonly): + shape = tuple(shape) if np.iterable(shape) else (shape,) + array = np.array(array, copy=None, subok=subok) + if not shape and array.shape: + raise ValueError('cannot broadcast a non-scalar to a scalar array') + if any(size < 0 for size in shape): + raise ValueError('all elements of broadcast shape must be non-' + 'negative') + extras = [] + it = np.nditer( + (array,), flags=['multi_index', 'refs_ok', 'zerosize_ok'] + extras, + op_flags=['readonly'], itershape=shape, order='C') + with it: + # never really has writebackifcopy semantics + broadcast = it.itviews[0] + result = _maybe_view_as_subclass(array, broadcast) + # In a future version this will go away + if not readonly and array.flags._writeable_no_warn: + result.flags.writeable = True + result.flags._warn_on_write = True + return result + + +def _broadcast_to_dispatcher(array, shape, subok=None): + return (array,) + + +@array_function_dispatch(_broadcast_to_dispatcher, module='numpy') +def broadcast_to(array, shape, subok=False): + """Broadcast an array to a new shape. + + Parameters + ---------- + array : array_like + The array to broadcast. + shape : tuple or int + The shape of the desired array. A single integer ``i`` is interpreted + as ``(i,)``. + subok : bool, optional + If True, then sub-classes will be passed-through, otherwise + the returned array will be forced to be a base-class array (default). + + Returns + ------- + broadcast : array + A readonly view on the original array with the given shape. It is + typically not contiguous. Furthermore, more than one element of a + broadcasted array may refer to a single memory location. + + Raises + ------ + ValueError + If the array is not compatible with the new shape according to NumPy's + broadcasting rules. + + See Also + -------- + broadcast + broadcast_arrays + broadcast_shapes + + Examples + -------- + >>> import numpy as np + >>> x = np.array([1, 2, 3]) + >>> np.broadcast_to(x, (3, 3)) + array([[1, 2, 3], + [1, 2, 3], + [1, 2, 3]]) + """ + return _broadcast_to(array, shape, subok=subok, readonly=True) + + +def _broadcast_shape(*args): + """Returns the shape of the arrays that would result from broadcasting the + supplied arrays against each other. + """ + # use the old-iterator because np.nditer does not handle size 0 arrays + # consistently + b = np.broadcast(*args[:32]) + # unfortunately, it cannot handle 32 or more arguments directly + for pos in range(32, len(args), 31): + # ironically, np.broadcast does not properly handle np.broadcast + # objects (it treats them as scalars) + # use broadcasting to avoid allocating the full array + b = broadcast_to(0, b.shape) + b = np.broadcast(b, *args[pos:(pos + 31)]) + return b.shape + + +_size0_dtype = np.dtype([]) + + +@set_module('numpy') +def broadcast_shapes(*args): + """ + Broadcast the input shapes into a single shape. + + :ref:`Learn more about broadcasting here `. + + .. versionadded:: 1.20.0 + + Parameters + ---------- + *args : tuples of ints, or ints + The shapes to be broadcast against each other. + + Returns + ------- + tuple + Broadcasted shape. + + Raises + ------ + ValueError + If the shapes are not compatible and cannot be broadcast according + to NumPy's broadcasting rules. + + See Also + -------- + broadcast + broadcast_arrays + broadcast_to + + Examples + -------- + >>> import numpy as np + >>> np.broadcast_shapes((1, 2), (3, 1), (3, 2)) + (3, 2) + + >>> np.broadcast_shapes((6, 7), (5, 6, 1), (7,), (5, 1, 7)) + (5, 6, 7) + """ + arrays = [np.empty(x, dtype=_size0_dtype) for x in args] + return _broadcast_shape(*arrays) + + +def _broadcast_arrays_dispatcher(*args, subok=None): + return args + + +@array_function_dispatch(_broadcast_arrays_dispatcher, module='numpy') +def broadcast_arrays(*args, subok=False): + """ + Broadcast any number of arrays against each other. + + Parameters + ---------- + *args : array_likes + The arrays to broadcast. + + subok : bool, optional + If True, then sub-classes will be passed-through, otherwise + the returned arrays will be forced to be a base-class array (default). + + Returns + ------- + broadcasted : tuple of arrays + These arrays are views on the original arrays. They are typically + not contiguous. Furthermore, more than one element of a + broadcasted array may refer to a single memory location. If you need + to write to the arrays, make copies first. While you can set the + ``writable`` flag True, writing to a single output value may end up + changing more than one location in the output array. + + .. deprecated:: 1.17 + The output is currently marked so that if written to, a deprecation + warning will be emitted. A future version will set the + ``writable`` flag False so writing to it will raise an error. + + See Also + -------- + broadcast + broadcast_to + broadcast_shapes + + Examples + -------- + >>> import numpy as np + >>> x = np.array([[1,2,3]]) + >>> y = np.array([[4],[5]]) + >>> np.broadcast_arrays(x, y) + (array([[1, 2, 3], + [1, 2, 3]]), + array([[4, 4, 4], + [5, 5, 5]])) + + Here is a useful idiom for getting contiguous copies instead of + non-contiguous views. + + >>> [np.array(a) for a in np.broadcast_arrays(x, y)] + [array([[1, 2, 3], + [1, 2, 3]]), + array([[4, 4, 4], + [5, 5, 5]])] + + """ + # nditer is not used here to avoid the limit of 32 arrays. + # Otherwise, something like the following one-liner would suffice: + # return np.nditer(args, flags=['multi_index', 'zerosize_ok'], + # order='C').itviews + + args = [np.array(_m, copy=None, subok=subok) for _m in args] + + shape = _broadcast_shape(*args) + + result = [array if array.shape == shape + else _broadcast_to(array, shape, subok=subok, readonly=False) + for array in args] + return tuple(result) diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/_stride_tricks_impl.pyi b/.venv/lib/python3.12/site-packages/numpy/lib/_stride_tricks_impl.pyi new file mode 100644 index 0000000..a7005d7 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/lib/_stride_tricks_impl.pyi @@ -0,0 +1,74 @@ +from collections.abc import Iterable +from typing import Any, SupportsIndex, TypeVar, overload + +from numpy import generic +from numpy._typing import ArrayLike, NDArray, _AnyShape, _ArrayLike, _ShapeLike + +__all__ = ["broadcast_to", "broadcast_arrays", "broadcast_shapes"] + +_ScalarT = TypeVar("_ScalarT", bound=generic) + +class DummyArray: + __array_interface__: dict[str, Any] + base: NDArray[Any] | None + def __init__( + self, + interface: dict[str, Any], + base: NDArray[Any] | None = ..., + ) -> None: ... + +@overload +def as_strided( + x: _ArrayLike[_ScalarT], + shape: Iterable[int] | None = ..., + strides: Iterable[int] | None = ..., + subok: bool = ..., + writeable: bool = ..., +) -> NDArray[_ScalarT]: ... +@overload +def as_strided( + x: ArrayLike, + shape: Iterable[int] | None = ..., + strides: Iterable[int] | None = ..., + subok: bool = ..., + writeable: bool = ..., +) -> NDArray[Any]: ... + +@overload +def sliding_window_view( + x: _ArrayLike[_ScalarT], + window_shape: int | Iterable[int], + axis: SupportsIndex | None = ..., + *, + subok: bool = ..., + writeable: bool = ..., +) -> NDArray[_ScalarT]: ... +@overload +def sliding_window_view( + x: ArrayLike, + window_shape: int | Iterable[int], + axis: SupportsIndex | None = ..., + *, + subok: bool = ..., + writeable: bool = ..., +) -> NDArray[Any]: ... + +@overload +def broadcast_to( + array: _ArrayLike[_ScalarT], + shape: int | Iterable[int], + subok: bool = ..., +) -> NDArray[_ScalarT]: ... +@overload +def broadcast_to( + array: ArrayLike, + shape: int | Iterable[int], + subok: bool = ..., +) -> NDArray[Any]: ... + +def broadcast_shapes(*args: _ShapeLike) -> _AnyShape: ... + +def broadcast_arrays( + *args: ArrayLike, + subok: bool = ..., +) -> tuple[NDArray[Any], ...]: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/_twodim_base_impl.py b/.venv/lib/python3.12/site-packages/numpy/lib/_twodim_base_impl.py new file mode 100644 index 0000000..dc6a558 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/lib/_twodim_base_impl.py @@ -0,0 +1,1201 @@ +""" Basic functions for manipulating 2d arrays + +""" +import functools +import operator + +from numpy._core import iinfo, overrides +from numpy._core._multiarray_umath import _array_converter +from numpy._core.numeric import ( + arange, + asanyarray, + asarray, + diagonal, + empty, + greater_equal, + indices, + int8, + int16, + int32, + int64, + intp, + multiply, + nonzero, + ones, + promote_types, + where, + zeros, +) +from numpy._core.overrides import finalize_array_function_like, set_module +from numpy.lib._stride_tricks_impl import broadcast_to + +__all__ = [ + 'diag', 'diagflat', 'eye', 'fliplr', 'flipud', 'tri', 'triu', + 'tril', 'vander', 'histogram2d', 'mask_indices', 'tril_indices', + 'tril_indices_from', 'triu_indices', 'triu_indices_from', ] + + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy') + + +i1 = iinfo(int8) +i2 = iinfo(int16) +i4 = iinfo(int32) + + +def _min_int(low, high): + """ get small int that fits the range """ + if high <= i1.max and low >= i1.min: + return int8 + if high <= i2.max and low >= i2.min: + return int16 + if high <= i4.max and low >= i4.min: + return int32 + return int64 + + +def _flip_dispatcher(m): + return (m,) + + +@array_function_dispatch(_flip_dispatcher) +def fliplr(m): + """ + Reverse the order of elements along axis 1 (left/right). + + For a 2-D array, this flips the entries in each row in the left/right + direction. Columns are preserved, but appear in a different order than + before. + + Parameters + ---------- + m : array_like + Input array, must be at least 2-D. + + Returns + ------- + f : ndarray + A view of `m` with the columns reversed. Since a view + is returned, this operation is :math:`\\mathcal O(1)`. + + See Also + -------- + flipud : Flip array in the up/down direction. + flip : Flip array in one or more dimensions. + rot90 : Rotate array counterclockwise. + + Notes + ----- + Equivalent to ``m[:,::-1]`` or ``np.flip(m, axis=1)``. + Requires the array to be at least 2-D. + + Examples + -------- + >>> import numpy as np + >>> A = np.diag([1.,2.,3.]) + >>> A + array([[1., 0., 0.], + [0., 2., 0.], + [0., 0., 3.]]) + >>> np.fliplr(A) + array([[0., 0., 1.], + [0., 2., 0.], + [3., 0., 0.]]) + + >>> rng = np.random.default_rng() + >>> A = rng.normal(size=(2,3,5)) + >>> np.all(np.fliplr(A) == A[:,::-1,...]) + True + + """ + m = asanyarray(m) + if m.ndim < 2: + raise ValueError("Input must be >= 2-d.") + return m[:, ::-1] + + +@array_function_dispatch(_flip_dispatcher) +def flipud(m): + """ + Reverse the order of elements along axis 0 (up/down). + + For a 2-D array, this flips the entries in each column in the up/down + direction. Rows are preserved, but appear in a different order than before. + + Parameters + ---------- + m : array_like + Input array. + + Returns + ------- + out : array_like + A view of `m` with the rows reversed. Since a view is + returned, this operation is :math:`\\mathcal O(1)`. + + See Also + -------- + fliplr : Flip array in the left/right direction. + flip : Flip array in one or more dimensions. + rot90 : Rotate array counterclockwise. + + Notes + ----- + Equivalent to ``m[::-1, ...]`` or ``np.flip(m, axis=0)``. + Requires the array to be at least 1-D. + + Examples + -------- + >>> import numpy as np + >>> A = np.diag([1.0, 2, 3]) + >>> A + array([[1., 0., 0.], + [0., 2., 0.], + [0., 0., 3.]]) + >>> np.flipud(A) + array([[0., 0., 3.], + [0., 2., 0.], + [1., 0., 0.]]) + + >>> rng = np.random.default_rng() + >>> A = rng.normal(size=(2,3,5)) + >>> np.all(np.flipud(A) == A[::-1,...]) + True + + >>> np.flipud([1,2]) + array([2, 1]) + + """ + m = asanyarray(m) + if m.ndim < 1: + raise ValueError("Input must be >= 1-d.") + return m[::-1, ...] + + +@finalize_array_function_like +@set_module('numpy') +def eye(N, M=None, k=0, dtype=float, order='C', *, device=None, like=None): + """ + Return a 2-D array with ones on the diagonal and zeros elsewhere. + + Parameters + ---------- + N : int + Number of rows in the output. + M : int, optional + Number of columns in the output. If None, defaults to `N`. + k : int, optional + Index of the diagonal: 0 (the default) refers to the main diagonal, + a positive value refers to an upper diagonal, and a negative value + to a lower diagonal. + dtype : data-type, optional + Data-type of the returned array. + order : {'C', 'F'}, optional + Whether the output should be stored in row-major (C-style) or + column-major (Fortran-style) order in memory. + device : str, optional + The device on which to place the created array. Default: None. + For Array-API interoperability only, so must be ``"cpu"`` if passed. + + .. versionadded:: 2.0.0 + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + I : ndarray of shape (N,M) + An array where all elements are equal to zero, except for the `k`-th + diagonal, whose values are equal to one. + + See Also + -------- + identity : (almost) equivalent function + diag : diagonal 2-D array from a 1-D array specified by the user. + + Examples + -------- + >>> import numpy as np + >>> np.eye(2, dtype=int) + array([[1, 0], + [0, 1]]) + >>> np.eye(3, k=1) + array([[0., 1., 0.], + [0., 0., 1.], + [0., 0., 0.]]) + + """ + if like is not None: + return _eye_with_like( + like, N, M=M, k=k, dtype=dtype, order=order, device=device + ) + if M is None: + M = N + m = zeros((N, M), dtype=dtype, order=order, device=device) + if k >= M: + return m + # Ensure M and k are integers, so we don't get any surprise casting + # results in the expressions `M-k` and `M+1` used below. This avoids + # a problem with inputs with type (for example) np.uint64. + M = operator.index(M) + k = operator.index(k) + if k >= 0: + i = k + else: + i = (-k) * M + m[:M - k].flat[i::M + 1] = 1 + return m + + +_eye_with_like = array_function_dispatch()(eye) + + +def _diag_dispatcher(v, k=None): + return (v,) + + +@array_function_dispatch(_diag_dispatcher) +def diag(v, k=0): + """ + Extract a diagonal or construct a diagonal array. + + See the more detailed documentation for ``numpy.diagonal`` if you use this + function to extract a diagonal and wish to write to the resulting array; + whether it returns a copy or a view depends on what version of numpy you + are using. + + Parameters + ---------- + v : array_like + If `v` is a 2-D array, return a copy of its `k`-th diagonal. + If `v` is a 1-D array, return a 2-D array with `v` on the `k`-th + diagonal. + k : int, optional + Diagonal in question. The default is 0. Use `k>0` for diagonals + above the main diagonal, and `k<0` for diagonals below the main + diagonal. + + Returns + ------- + out : ndarray + The extracted diagonal or constructed diagonal array. + + See Also + -------- + diagonal : Return specified diagonals. + diagflat : Create a 2-D array with the flattened input as a diagonal. + trace : Sum along diagonals. + triu : Upper triangle of an array. + tril : Lower triangle of an array. + + Examples + -------- + >>> import numpy as np + >>> x = np.arange(9).reshape((3,3)) + >>> x + array([[0, 1, 2], + [3, 4, 5], + [6, 7, 8]]) + + >>> np.diag(x) + array([0, 4, 8]) + >>> np.diag(x, k=1) + array([1, 5]) + >>> np.diag(x, k=-1) + array([3, 7]) + + >>> np.diag(np.diag(x)) + array([[0, 0, 0], + [0, 4, 0], + [0, 0, 8]]) + + """ + v = asanyarray(v) + s = v.shape + if len(s) == 1: + n = s[0] + abs(k) + res = zeros((n, n), v.dtype) + if k >= 0: + i = k + else: + i = (-k) * n + res[:n - k].flat[i::n + 1] = v + return res + elif len(s) == 2: + return diagonal(v, k) + else: + raise ValueError("Input must be 1- or 2-d.") + + +@array_function_dispatch(_diag_dispatcher) +def diagflat(v, k=0): + """ + Create a two-dimensional array with the flattened input as a diagonal. + + Parameters + ---------- + v : array_like + Input data, which is flattened and set as the `k`-th + diagonal of the output. + k : int, optional + Diagonal to set; 0, the default, corresponds to the "main" diagonal, + a positive (negative) `k` giving the number of the diagonal above + (below) the main. + + Returns + ------- + out : ndarray + The 2-D output array. + + See Also + -------- + diag : MATLAB work-alike for 1-D and 2-D arrays. + diagonal : Return specified diagonals. + trace : Sum along diagonals. + + Examples + -------- + >>> import numpy as np + >>> np.diagflat([[1,2], [3,4]]) + array([[1, 0, 0, 0], + [0, 2, 0, 0], + [0, 0, 3, 0], + [0, 0, 0, 4]]) + + >>> np.diagflat([1,2], 1) + array([[0, 1, 0], + [0, 0, 2], + [0, 0, 0]]) + + """ + conv = _array_converter(v) + v, = conv.as_arrays(subok=False) + v = v.ravel() + s = len(v) + n = s + abs(k) + res = zeros((n, n), v.dtype) + if (k >= 0): + i = arange(0, n - k, dtype=intp) + fi = i + k + i * n + else: + i = arange(0, n + k, dtype=intp) + fi = i + (i - k) * n + res.flat[fi] = v + + return conv.wrap(res) + + +@finalize_array_function_like +@set_module('numpy') +def tri(N, M=None, k=0, dtype=float, *, like=None): + """ + An array with ones at and below the given diagonal and zeros elsewhere. + + Parameters + ---------- + N : int + Number of rows in the array. + M : int, optional + Number of columns in the array. + By default, `M` is taken equal to `N`. + k : int, optional + The sub-diagonal at and below which the array is filled. + `k` = 0 is the main diagonal, while `k` < 0 is below it, + and `k` > 0 is above. The default is 0. + dtype : dtype, optional + Data type of the returned array. The default is float. + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + tri : ndarray of shape (N, M) + Array with its lower triangle filled with ones and zero elsewhere; + in other words ``T[i,j] == 1`` for ``j <= i + k``, 0 otherwise. + + Examples + -------- + >>> import numpy as np + >>> np.tri(3, 5, 2, dtype=int) + array([[1, 1, 1, 0, 0], + [1, 1, 1, 1, 0], + [1, 1, 1, 1, 1]]) + + >>> np.tri(3, 5, -1) + array([[0., 0., 0., 0., 0.], + [1., 0., 0., 0., 0.], + [1., 1., 0., 0., 0.]]) + + """ + if like is not None: + return _tri_with_like(like, N, M=M, k=k, dtype=dtype) + + if M is None: + M = N + + m = greater_equal.outer(arange(N, dtype=_min_int(0, N)), + arange(-k, M - k, dtype=_min_int(-k, M - k))) + + # Avoid making a copy if the requested type is already bool + m = m.astype(dtype, copy=False) + + return m + + +_tri_with_like = array_function_dispatch()(tri) + + +def _trilu_dispatcher(m, k=None): + return (m,) + + +@array_function_dispatch(_trilu_dispatcher) +def tril(m, k=0): + """ + Lower triangle of an array. + + Return a copy of an array with elements above the `k`-th diagonal zeroed. + For arrays with ``ndim`` exceeding 2, `tril` will apply to the final two + axes. + + Parameters + ---------- + m : array_like, shape (..., M, N) + Input array. + k : int, optional + Diagonal above which to zero elements. `k = 0` (the default) is the + main diagonal, `k < 0` is below it and `k > 0` is above. + + Returns + ------- + tril : ndarray, shape (..., M, N) + Lower triangle of `m`, of same shape and data-type as `m`. + + See Also + -------- + triu : same thing, only for the upper triangle + + Examples + -------- + >>> import numpy as np + >>> np.tril([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1) + array([[ 0, 0, 0], + [ 4, 0, 0], + [ 7, 8, 0], + [10, 11, 12]]) + + >>> np.tril(np.arange(3*4*5).reshape(3, 4, 5)) + array([[[ 0, 0, 0, 0, 0], + [ 5, 6, 0, 0, 0], + [10, 11, 12, 0, 0], + [15, 16, 17, 18, 0]], + [[20, 0, 0, 0, 0], + [25, 26, 0, 0, 0], + [30, 31, 32, 0, 0], + [35, 36, 37, 38, 0]], + [[40, 0, 0, 0, 0], + [45, 46, 0, 0, 0], + [50, 51, 52, 0, 0], + [55, 56, 57, 58, 0]]]) + + """ + m = asanyarray(m) + mask = tri(*m.shape[-2:], k=k, dtype=bool) + + return where(mask, m, zeros(1, m.dtype)) + + +@array_function_dispatch(_trilu_dispatcher) +def triu(m, k=0): + """ + Upper triangle of an array. + + Return a copy of an array with the elements below the `k`-th diagonal + zeroed. For arrays with ``ndim`` exceeding 2, `triu` will apply to the + final two axes. + + Please refer to the documentation for `tril` for further details. + + See Also + -------- + tril : lower triangle of an array + + Examples + -------- + >>> import numpy as np + >>> np.triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1) + array([[ 1, 2, 3], + [ 4, 5, 6], + [ 0, 8, 9], + [ 0, 0, 12]]) + + >>> np.triu(np.arange(3*4*5).reshape(3, 4, 5)) + array([[[ 0, 1, 2, 3, 4], + [ 0, 6, 7, 8, 9], + [ 0, 0, 12, 13, 14], + [ 0, 0, 0, 18, 19]], + [[20, 21, 22, 23, 24], + [ 0, 26, 27, 28, 29], + [ 0, 0, 32, 33, 34], + [ 0, 0, 0, 38, 39]], + [[40, 41, 42, 43, 44], + [ 0, 46, 47, 48, 49], + [ 0, 0, 52, 53, 54], + [ 0, 0, 0, 58, 59]]]) + + """ + m = asanyarray(m) + mask = tri(*m.shape[-2:], k=k - 1, dtype=bool) + + return where(mask, zeros(1, m.dtype), m) + + +def _vander_dispatcher(x, N=None, increasing=None): + return (x,) + + +# Originally borrowed from John Hunter and matplotlib +@array_function_dispatch(_vander_dispatcher) +def vander(x, N=None, increasing=False): + """ + Generate a Vandermonde matrix. + + The columns of the output matrix are powers of the input vector. The + order of the powers is determined by the `increasing` boolean argument. + Specifically, when `increasing` is False, the `i`-th output column is + the input vector raised element-wise to the power of ``N - i - 1``. Such + a matrix with a geometric progression in each row is named for Alexandre- + Theophile Vandermonde. + + Parameters + ---------- + x : array_like + 1-D input array. + N : int, optional + Number of columns in the output. If `N` is not specified, a square + array is returned (``N = len(x)``). + increasing : bool, optional + Order of the powers of the columns. If True, the powers increase + from left to right, if False (the default) they are reversed. + + Returns + ------- + out : ndarray + Vandermonde matrix. If `increasing` is False, the first column is + ``x^(N-1)``, the second ``x^(N-2)`` and so forth. If `increasing` is + True, the columns are ``x^0, x^1, ..., x^(N-1)``. + + See Also + -------- + polynomial.polynomial.polyvander + + Examples + -------- + >>> import numpy as np + >>> x = np.array([1, 2, 3, 5]) + >>> N = 3 + >>> np.vander(x, N) + array([[ 1, 1, 1], + [ 4, 2, 1], + [ 9, 3, 1], + [25, 5, 1]]) + + >>> np.column_stack([x**(N-1-i) for i in range(N)]) + array([[ 1, 1, 1], + [ 4, 2, 1], + [ 9, 3, 1], + [25, 5, 1]]) + + >>> x = np.array([1, 2, 3, 5]) + >>> np.vander(x) + array([[ 1, 1, 1, 1], + [ 8, 4, 2, 1], + [ 27, 9, 3, 1], + [125, 25, 5, 1]]) + >>> np.vander(x, increasing=True) + array([[ 1, 1, 1, 1], + [ 1, 2, 4, 8], + [ 1, 3, 9, 27], + [ 1, 5, 25, 125]]) + + The determinant of a square Vandermonde matrix is the product + of the differences between the values of the input vector: + + >>> np.linalg.det(np.vander(x)) + 48.000000000000043 # may vary + >>> (5-3)*(5-2)*(5-1)*(3-2)*(3-1)*(2-1) + 48 + + """ + x = asarray(x) + if x.ndim != 1: + raise ValueError("x must be a one-dimensional array or sequence.") + if N is None: + N = len(x) + + v = empty((len(x), N), dtype=promote_types(x.dtype, int)) + tmp = v[:, ::-1] if not increasing else v + + if N > 0: + tmp[:, 0] = 1 + if N > 1: + tmp[:, 1:] = x[:, None] + multiply.accumulate(tmp[:, 1:], out=tmp[:, 1:], axis=1) + + return v + + +def _histogram2d_dispatcher(x, y, bins=None, range=None, density=None, + weights=None): + yield x + yield y + + # This terrible logic is adapted from the checks in histogram2d + try: + N = len(bins) + except TypeError: + N = 1 + if N == 2: + yield from bins # bins=[x, y] + else: + yield bins + + yield weights + + +@array_function_dispatch(_histogram2d_dispatcher) +def histogram2d(x, y, bins=10, range=None, density=None, weights=None): + """ + Compute the bi-dimensional histogram of two data samples. + + Parameters + ---------- + x : array_like, shape (N,) + An array containing the x coordinates of the points to be + histogrammed. + y : array_like, shape (N,) + An array containing the y coordinates of the points to be + histogrammed. + bins : int or array_like or [int, int] or [array, array], optional + The bin specification: + + * If int, the number of bins for the two dimensions (nx=ny=bins). + * If array_like, the bin edges for the two dimensions + (x_edges=y_edges=bins). + * If [int, int], the number of bins in each dimension + (nx, ny = bins). + * If [array, array], the bin edges in each dimension + (x_edges, y_edges = bins). + * A combination [int, array] or [array, int], where int + is the number of bins and array is the bin edges. + + range : array_like, shape(2,2), optional + The leftmost and rightmost edges of the bins along each dimension + (if not specified explicitly in the `bins` parameters): + ``[[xmin, xmax], [ymin, ymax]]``. All values outside of this range + will be considered outliers and not tallied in the histogram. + density : bool, optional + If False, the default, returns the number of samples in each bin. + If True, returns the probability *density* function at the bin, + ``bin_count / sample_count / bin_area``. + weights : array_like, shape(N,), optional + An array of values ``w_i`` weighing each sample ``(x_i, y_i)``. + Weights are normalized to 1 if `density` is True. If `density` is + False, the values of the returned histogram are equal to the sum of + the weights belonging to the samples falling into each bin. + + Returns + ------- + H : ndarray, shape(nx, ny) + The bi-dimensional histogram of samples `x` and `y`. Values in `x` + are histogrammed along the first dimension and values in `y` are + histogrammed along the second dimension. + xedges : ndarray, shape(nx+1,) + The bin edges along the first dimension. + yedges : ndarray, shape(ny+1,) + The bin edges along the second dimension. + + See Also + -------- + histogram : 1D histogram + histogramdd : Multidimensional histogram + + Notes + ----- + When `density` is True, then the returned histogram is the sample + density, defined such that the sum over bins of the product + ``bin_value * bin_area`` is 1. + + Please note that the histogram does not follow the Cartesian convention + where `x` values are on the abscissa and `y` values on the ordinate + axis. Rather, `x` is histogrammed along the first dimension of the + array (vertical), and `y` along the second dimension of the array + (horizontal). This ensures compatibility with `histogramdd`. + + Examples + -------- + >>> import numpy as np + >>> from matplotlib.image import NonUniformImage + >>> import matplotlib.pyplot as plt + + Construct a 2-D histogram with variable bin width. First define the bin + edges: + + >>> xedges = [0, 1, 3, 5] + >>> yedges = [0, 2, 3, 4, 6] + + Next we create a histogram H with random bin content: + + >>> x = np.random.normal(2, 1, 100) + >>> y = np.random.normal(1, 1, 100) + >>> H, xedges, yedges = np.histogram2d(x, y, bins=(xedges, yedges)) + >>> # Histogram does not follow Cartesian convention (see Notes), + >>> # therefore transpose H for visualization purposes. + >>> H = H.T + + :func:`imshow ` can only display square bins: + + >>> fig = plt.figure(figsize=(7, 3)) + >>> ax = fig.add_subplot(131, title='imshow: square bins') + >>> plt.imshow(H, interpolation='nearest', origin='lower', + ... extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]]) + + + :func:`pcolormesh ` can display actual edges: + + >>> ax = fig.add_subplot(132, title='pcolormesh: actual edges', + ... aspect='equal') + >>> X, Y = np.meshgrid(xedges, yedges) + >>> ax.pcolormesh(X, Y, H) + + + :class:`NonUniformImage ` can be used to + display actual bin edges with interpolation: + + >>> ax = fig.add_subplot(133, title='NonUniformImage: interpolated', + ... aspect='equal', xlim=xedges[[0, -1]], ylim=yedges[[0, -1]]) + >>> im = NonUniformImage(ax, interpolation='bilinear') + >>> xcenters = (xedges[:-1] + xedges[1:]) / 2 + >>> ycenters = (yedges[:-1] + yedges[1:]) / 2 + >>> im.set_data(xcenters, ycenters, H) + >>> ax.add_image(im) + >>> plt.show() + + It is also possible to construct a 2-D histogram without specifying bin + edges: + + >>> # Generate non-symmetric test data + >>> n = 10000 + >>> x = np.linspace(1, 100, n) + >>> y = 2*np.log(x) + np.random.rand(n) - 0.5 + >>> # Compute 2d histogram. Note the order of x/y and xedges/yedges + >>> H, yedges, xedges = np.histogram2d(y, x, bins=20) + + Now we can plot the histogram using + :func:`pcolormesh `, and a + :func:`hexbin ` for comparison. + + >>> # Plot histogram using pcolormesh + >>> fig, (ax1, ax2) = plt.subplots(ncols=2, sharey=True) + >>> ax1.pcolormesh(xedges, yedges, H, cmap='rainbow') + >>> ax1.plot(x, 2*np.log(x), 'k-') + >>> ax1.set_xlim(x.min(), x.max()) + >>> ax1.set_ylim(y.min(), y.max()) + >>> ax1.set_xlabel('x') + >>> ax1.set_ylabel('y') + >>> ax1.set_title('histogram2d') + >>> ax1.grid() + + >>> # Create hexbin plot for comparison + >>> ax2.hexbin(x, y, gridsize=20, cmap='rainbow') + >>> ax2.plot(x, 2*np.log(x), 'k-') + >>> ax2.set_title('hexbin') + >>> ax2.set_xlim(x.min(), x.max()) + >>> ax2.set_xlabel('x') + >>> ax2.grid() + + >>> plt.show() + """ + from numpy import histogramdd + + if len(x) != len(y): + raise ValueError('x and y must have the same length.') + + try: + N = len(bins) + except TypeError: + N = 1 + + if N not in {1, 2}: + xedges = yedges = asarray(bins) + bins = [xedges, yedges] + hist, edges = histogramdd([x, y], bins, range, density, weights) + return hist, edges[0], edges[1] + + +@set_module('numpy') +def mask_indices(n, mask_func, k=0): + """ + Return the indices to access (n, n) arrays, given a masking function. + + Assume `mask_func` is a function that, for a square array a of size + ``(n, n)`` with a possible offset argument `k`, when called as + ``mask_func(a, k)`` returns a new array with zeros in certain locations + (functions like `triu` or `tril` do precisely this). Then this function + returns the indices where the non-zero values would be located. + + Parameters + ---------- + n : int + The returned indices will be valid to access arrays of shape (n, n). + mask_func : callable + A function whose call signature is similar to that of `triu`, `tril`. + That is, ``mask_func(x, k)`` returns a boolean array, shaped like `x`. + `k` is an optional argument to the function. + k : scalar + An optional argument which is passed through to `mask_func`. Functions + like `triu`, `tril` take a second argument that is interpreted as an + offset. + + Returns + ------- + indices : tuple of arrays. + The `n` arrays of indices corresponding to the locations where + ``mask_func(np.ones((n, n)), k)`` is True. + + See Also + -------- + triu, tril, triu_indices, tril_indices + + Examples + -------- + >>> import numpy as np + + These are the indices that would allow you to access the upper triangular + part of any 3x3 array: + + >>> iu = np.mask_indices(3, np.triu) + + For example, if `a` is a 3x3 array: + + >>> a = np.arange(9).reshape(3, 3) + >>> a + array([[0, 1, 2], + [3, 4, 5], + [6, 7, 8]]) + >>> a[iu] + array([0, 1, 2, 4, 5, 8]) + + An offset can be passed also to the masking function. This gets us the + indices starting on the first diagonal right of the main one: + + >>> iu1 = np.mask_indices(3, np.triu, 1) + + with which we now extract only three elements: + + >>> a[iu1] + array([1, 2, 5]) + + """ + m = ones((n, n), int) + a = mask_func(m, k) + return nonzero(a != 0) + + +@set_module('numpy') +def tril_indices(n, k=0, m=None): + """ + Return the indices for the lower-triangle of an (n, m) array. + + Parameters + ---------- + n : int + The row dimension of the arrays for which the returned + indices will be valid. + k : int, optional + Diagonal offset (see `tril` for details). + m : int, optional + The column dimension of the arrays for which the returned + arrays will be valid. + By default `m` is taken equal to `n`. + + + Returns + ------- + inds : tuple of arrays + The row and column indices, respectively. The row indices are sorted + in non-decreasing order, and the correspdonding column indices are + strictly increasing for each row. + + See also + -------- + triu_indices : similar function, for upper-triangular. + mask_indices : generic function accepting an arbitrary mask function. + tril, triu + + Examples + -------- + >>> import numpy as np + + Compute two different sets of indices to access 4x4 arrays, one for the + lower triangular part starting at the main diagonal, and one starting two + diagonals further right: + + >>> il1 = np.tril_indices(4) + >>> il1 + (array([0, 1, 1, 2, 2, 2, 3, 3, 3, 3]), array([0, 0, 1, 0, 1, 2, 0, 1, 2, 3])) + + Note that row indices (first array) are non-decreasing, and the corresponding + column indices (second array) are strictly increasing for each row. + Here is how they can be used with a sample array: + + >>> a = np.arange(16).reshape(4, 4) + >>> a + array([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11], + [12, 13, 14, 15]]) + + Both for indexing: + + >>> a[il1] + array([ 0, 4, 5, ..., 13, 14, 15]) + + And for assigning values: + + >>> a[il1] = -1 + >>> a + array([[-1, 1, 2, 3], + [-1, -1, 6, 7], + [-1, -1, -1, 11], + [-1, -1, -1, -1]]) + + These cover almost the whole array (two diagonals right of the main one): + + >>> il2 = np.tril_indices(4, 2) + >>> a[il2] = -10 + >>> a + array([[-10, -10, -10, 3], + [-10, -10, -10, -10], + [-10, -10, -10, -10], + [-10, -10, -10, -10]]) + + """ + tri_ = tri(n, m, k=k, dtype=bool) + + return tuple(broadcast_to(inds, tri_.shape)[tri_] + for inds in indices(tri_.shape, sparse=True)) + + +def _trilu_indices_form_dispatcher(arr, k=None): + return (arr,) + + +@array_function_dispatch(_trilu_indices_form_dispatcher) +def tril_indices_from(arr, k=0): + """ + Return the indices for the lower-triangle of arr. + + See `tril_indices` for full details. + + Parameters + ---------- + arr : array_like + The indices will be valid for square arrays whose dimensions are + the same as arr. + k : int, optional + Diagonal offset (see `tril` for details). + + Examples + -------- + >>> import numpy as np + + Create a 4 by 4 array + + >>> a = np.arange(16).reshape(4, 4) + >>> a + array([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11], + [12, 13, 14, 15]]) + + Pass the array to get the indices of the lower triangular elements. + + >>> trili = np.tril_indices_from(a) + >>> trili + (array([0, 1, 1, 2, 2, 2, 3, 3, 3, 3]), array([0, 0, 1, 0, 1, 2, 0, 1, 2, 3])) + + >>> a[trili] + array([ 0, 4, 5, 8, 9, 10, 12, 13, 14, 15]) + + This is syntactic sugar for tril_indices(). + + >>> np.tril_indices(a.shape[0]) + (array([0, 1, 1, 2, 2, 2, 3, 3, 3, 3]), array([0, 0, 1, 0, 1, 2, 0, 1, 2, 3])) + + Use the `k` parameter to return the indices for the lower triangular array + up to the k-th diagonal. + + >>> trili1 = np.tril_indices_from(a, k=1) + >>> a[trili1] + array([ 0, 1, 4, 5, 6, 8, 9, 10, 11, 12, 13, 14, 15]) + + See Also + -------- + tril_indices, tril, triu_indices_from + """ + if arr.ndim != 2: + raise ValueError("input array must be 2-d") + return tril_indices(arr.shape[-2], k=k, m=arr.shape[-1]) + + +@set_module('numpy') +def triu_indices(n, k=0, m=None): + """ + Return the indices for the upper-triangle of an (n, m) array. + + Parameters + ---------- + n : int + The size of the arrays for which the returned indices will + be valid. + k : int, optional + Diagonal offset (see `triu` for details). + m : int, optional + The column dimension of the arrays for which the returned + arrays will be valid. + By default `m` is taken equal to `n`. + + + Returns + ------- + inds : tuple, shape(2) of ndarrays, shape(`n`) + The row and column indices, respectively. The row indices are sorted + in non-decreasing order, and the correspdonding column indices are + strictly increasing for each row. + + See also + -------- + tril_indices : similar function, for lower-triangular. + mask_indices : generic function accepting an arbitrary mask function. + triu, tril + + Examples + -------- + >>> import numpy as np + + Compute two different sets of indices to access 4x4 arrays, one for the + upper triangular part starting at the main diagonal, and one starting two + diagonals further right: + + >>> iu1 = np.triu_indices(4) + >>> iu1 + (array([0, 0, 0, 0, 1, 1, 1, 2, 2, 3]), array([0, 1, 2, 3, 1, 2, 3, 2, 3, 3])) + + Note that row indices (first array) are non-decreasing, and the corresponding + column indices (second array) are strictly increasing for each row. + + Here is how they can be used with a sample array: + + >>> a = np.arange(16).reshape(4, 4) + >>> a + array([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11], + [12, 13, 14, 15]]) + + Both for indexing: + + >>> a[iu1] + array([ 0, 1, 2, ..., 10, 11, 15]) + + And for assigning values: + + >>> a[iu1] = -1 + >>> a + array([[-1, -1, -1, -1], + [ 4, -1, -1, -1], + [ 8, 9, -1, -1], + [12, 13, 14, -1]]) + + These cover only a small part of the whole array (two diagonals right + of the main one): + + >>> iu2 = np.triu_indices(4, 2) + >>> a[iu2] = -10 + >>> a + array([[ -1, -1, -10, -10], + [ 4, -1, -1, -10], + [ 8, 9, -1, -1], + [ 12, 13, 14, -1]]) + + """ + tri_ = ~tri(n, m, k=k - 1, dtype=bool) + + return tuple(broadcast_to(inds, tri_.shape)[tri_] + for inds in indices(tri_.shape, sparse=True)) + + +@array_function_dispatch(_trilu_indices_form_dispatcher) +def triu_indices_from(arr, k=0): + """ + Return the indices for the upper-triangle of arr. + + See `triu_indices` for full details. + + Parameters + ---------- + arr : ndarray, shape(N, N) + The indices will be valid for square arrays. + k : int, optional + Diagonal offset (see `triu` for details). + + Returns + ------- + triu_indices_from : tuple, shape(2) of ndarray, shape(N) + Indices for the upper-triangle of `arr`. + + Examples + -------- + >>> import numpy as np + + Create a 4 by 4 array + + >>> a = np.arange(16).reshape(4, 4) + >>> a + array([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11], + [12, 13, 14, 15]]) + + Pass the array to get the indices of the upper triangular elements. + + >>> triui = np.triu_indices_from(a) + >>> triui + (array([0, 0, 0, 0, 1, 1, 1, 2, 2, 3]), array([0, 1, 2, 3, 1, 2, 3, 2, 3, 3])) + + >>> a[triui] + array([ 0, 1, 2, 3, 5, 6, 7, 10, 11, 15]) + + This is syntactic sugar for triu_indices(). + + >>> np.triu_indices(a.shape[0]) + (array([0, 0, 0, 0, 1, 1, 1, 2, 2, 3]), array([0, 1, 2, 3, 1, 2, 3, 2, 3, 3])) + + Use the `k` parameter to return the indices for the upper triangular array + from the k-th diagonal. + + >>> triuim1 = np.triu_indices_from(a, k=1) + >>> a[triuim1] + array([ 1, 2, 3, 6, 7, 11]) + + + See Also + -------- + triu_indices, triu, tril_indices_from + """ + if arr.ndim != 2: + raise ValueError("input array must be 2-d") + return triu_indices(arr.shape[-2], k=k, m=arr.shape[-1]) diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/_twodim_base_impl.pyi b/.venv/lib/python3.12/site-packages/numpy/lib/_twodim_base_impl.pyi new file mode 100644 index 0000000..43df38e --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/lib/_twodim_base_impl.pyi @@ -0,0 +1,438 @@ +from collections.abc import Callable, Sequence +from typing import ( + Any, + TypeAlias, + TypeVar, + overload, +) +from typing import ( + Literal as L, +) + +import numpy as np +from numpy import ( + _OrderCF, + complex128, + complexfloating, + datetime64, + float64, + floating, + generic, + int_, + intp, + object_, + signedinteger, + timedelta64, +) +from numpy._typing import ( + ArrayLike, + DTypeLike, + NDArray, + _ArrayLike, + _ArrayLikeComplex_co, + _ArrayLikeFloat_co, + _ArrayLikeInt_co, + _ArrayLikeObject_co, + _DTypeLike, + _SupportsArray, + _SupportsArrayFunc, +) + +__all__ = [ + "diag", + "diagflat", + "eye", + "fliplr", + "flipud", + "tri", + "triu", + "tril", + "vander", + "histogram2d", + "mask_indices", + "tril_indices", + "tril_indices_from", + "triu_indices", + "triu_indices_from", +] + +### + +_T = TypeVar("_T") +_ScalarT = TypeVar("_ScalarT", bound=generic) +_ComplexFloatingT = TypeVar("_ComplexFloatingT", bound=np.complexfloating) +_InexactT = TypeVar("_InexactT", bound=np.inexact) +_NumberCoT = TypeVar("_NumberCoT", bound=_Number_co) + +# The returned arrays dtype must be compatible with `np.equal` +_MaskFunc: TypeAlias = Callable[[NDArray[int_], _T], NDArray[_Number_co | timedelta64 | datetime64 | object_]] + +_Int_co: TypeAlias = np.integer | np.bool +_Float_co: TypeAlias = np.floating | _Int_co +_Number_co: TypeAlias = np.number | np.bool + +_ArrayLike1D: TypeAlias = _SupportsArray[np.dtype[_ScalarT]] | Sequence[_ScalarT] +_ArrayLike1DInt_co: TypeAlias = _SupportsArray[np.dtype[_Int_co]] | Sequence[int | _Int_co] +_ArrayLike1DFloat_co: TypeAlias = _SupportsArray[np.dtype[_Float_co]] | Sequence[float | _Float_co] +_ArrayLike2DFloat_co: TypeAlias = _SupportsArray[np.dtype[_Float_co]] | Sequence[_ArrayLike1DFloat_co] +_ArrayLike1DNumber_co: TypeAlias = _SupportsArray[np.dtype[_Number_co]] | Sequence[complex | _Number_co] + +### + +@overload +def fliplr(m: _ArrayLike[_ScalarT]) -> NDArray[_ScalarT]: ... +@overload +def fliplr(m: ArrayLike) -> NDArray[Any]: ... + +@overload +def flipud(m: _ArrayLike[_ScalarT]) -> NDArray[_ScalarT]: ... +@overload +def flipud(m: ArrayLike) -> NDArray[Any]: ... + +@overload +def eye( + N: int, + M: int | None = ..., + k: int = ..., + dtype: None = ..., + order: _OrderCF = ..., + *, + device: L["cpu"] | None = ..., + like: _SupportsArrayFunc | None = ..., +) -> NDArray[float64]: ... +@overload +def eye( + N: int, + M: int | None, + k: int, + dtype: _DTypeLike[_ScalarT], + order: _OrderCF = ..., + *, + device: L["cpu"] | None = ..., + like: _SupportsArrayFunc | None = ..., +) -> NDArray[_ScalarT]: ... +@overload +def eye( + N: int, + M: int | None = ..., + k: int = ..., + *, + dtype: _DTypeLike[_ScalarT], + order: _OrderCF = ..., + device: L["cpu"] | None = ..., + like: _SupportsArrayFunc | None = ..., +) -> NDArray[_ScalarT]: ... +@overload +def eye( + N: int, + M: int | None = ..., + k: int = ..., + dtype: DTypeLike = ..., + order: _OrderCF = ..., + *, + device: L["cpu"] | None = ..., + like: _SupportsArrayFunc | None = ..., +) -> NDArray[Any]: ... + +@overload +def diag(v: _ArrayLike[_ScalarT], k: int = ...) -> NDArray[_ScalarT]: ... +@overload +def diag(v: ArrayLike, k: int = ...) -> NDArray[Any]: ... + +@overload +def diagflat(v: _ArrayLike[_ScalarT], k: int = ...) -> NDArray[_ScalarT]: ... +@overload +def diagflat(v: ArrayLike, k: int = ...) -> NDArray[Any]: ... + +@overload +def tri( + N: int, + M: int | None = ..., + k: int = ..., + dtype: None = ..., + *, + like: _SupportsArrayFunc | None = ... +) -> NDArray[float64]: ... +@overload +def tri( + N: int, + M: int | None, + k: int, + dtype: _DTypeLike[_ScalarT], + *, + like: _SupportsArrayFunc | None = ... +) -> NDArray[_ScalarT]: ... +@overload +def tri( + N: int, + M: int | None = ..., + k: int = ..., + *, + dtype: _DTypeLike[_ScalarT], + like: _SupportsArrayFunc | None = ... +) -> NDArray[_ScalarT]: ... +@overload +def tri( + N: int, + M: int | None = ..., + k: int = ..., + dtype: DTypeLike = ..., + *, + like: _SupportsArrayFunc | None = ... +) -> NDArray[Any]: ... + +@overload +def tril(m: _ArrayLike[_ScalarT], k: int = 0) -> NDArray[_ScalarT]: ... +@overload +def tril(m: ArrayLike, k: int = 0) -> NDArray[Any]: ... + +@overload +def triu(m: _ArrayLike[_ScalarT], k: int = 0) -> NDArray[_ScalarT]: ... +@overload +def triu(m: ArrayLike, k: int = 0) -> NDArray[Any]: ... + +@overload +def vander( # type: ignore[misc] + x: _ArrayLikeInt_co, + N: int | None = ..., + increasing: bool = ..., +) -> NDArray[signedinteger]: ... +@overload +def vander( # type: ignore[misc] + x: _ArrayLikeFloat_co, + N: int | None = ..., + increasing: bool = ..., +) -> NDArray[floating]: ... +@overload +def vander( + x: _ArrayLikeComplex_co, + N: int | None = ..., + increasing: bool = ..., +) -> NDArray[complexfloating]: ... +@overload +def vander( + x: _ArrayLikeObject_co, + N: int | None = ..., + increasing: bool = ..., +) -> NDArray[object_]: ... + +@overload +def histogram2d( + x: _ArrayLike1D[_ComplexFloatingT], + y: _ArrayLike1D[_ComplexFloatingT | _Float_co], + bins: int | Sequence[int] = ..., + range: _ArrayLike2DFloat_co | None = ..., + density: bool | None = ..., + weights: _ArrayLike1DFloat_co | None = ..., +) -> tuple[ + NDArray[float64], + NDArray[_ComplexFloatingT], + NDArray[_ComplexFloatingT], +]: ... +@overload +def histogram2d( + x: _ArrayLike1D[_ComplexFloatingT | _Float_co], + y: _ArrayLike1D[_ComplexFloatingT], + bins: int | Sequence[int] = ..., + range: _ArrayLike2DFloat_co | None = ..., + density: bool | None = ..., + weights: _ArrayLike1DFloat_co | None = ..., +) -> tuple[ + NDArray[float64], + NDArray[_ComplexFloatingT], + NDArray[_ComplexFloatingT], +]: ... +@overload +def histogram2d( + x: _ArrayLike1D[_InexactT], + y: _ArrayLike1D[_InexactT | _Int_co], + bins: int | Sequence[int] = ..., + range: _ArrayLike2DFloat_co | None = ..., + density: bool | None = ..., + weights: _ArrayLike1DFloat_co | None = ..., +) -> tuple[ + NDArray[float64], + NDArray[_InexactT], + NDArray[_InexactT], +]: ... +@overload +def histogram2d( + x: _ArrayLike1D[_InexactT | _Int_co], + y: _ArrayLike1D[_InexactT], + bins: int | Sequence[int] = ..., + range: _ArrayLike2DFloat_co | None = ..., + density: bool | None = ..., + weights: _ArrayLike1DFloat_co | None = ..., +) -> tuple[ + NDArray[float64], + NDArray[_InexactT], + NDArray[_InexactT], +]: ... +@overload +def histogram2d( + x: _ArrayLike1DInt_co | Sequence[float], + y: _ArrayLike1DInt_co | Sequence[float], + bins: int | Sequence[int] = ..., + range: _ArrayLike2DFloat_co | None = ..., + density: bool | None = ..., + weights: _ArrayLike1DFloat_co | None = ..., +) -> tuple[ + NDArray[float64], + NDArray[float64], + NDArray[float64], +]: ... +@overload +def histogram2d( + x: Sequence[complex], + y: Sequence[complex], + bins: int | Sequence[int] = ..., + range: _ArrayLike2DFloat_co | None = ..., + density: bool | None = ..., + weights: _ArrayLike1DFloat_co | None = ..., +) -> tuple[ + NDArray[float64], + NDArray[complex128 | float64], + NDArray[complex128 | float64], +]: ... +@overload +def histogram2d( + x: _ArrayLike1DNumber_co, + y: _ArrayLike1DNumber_co, + bins: _ArrayLike1D[_NumberCoT] | Sequence[_ArrayLike1D[_NumberCoT]], + range: _ArrayLike2DFloat_co | None = ..., + density: bool | None = ..., + weights: _ArrayLike1DFloat_co | None = ..., +) -> tuple[ + NDArray[float64], + NDArray[_NumberCoT], + NDArray[_NumberCoT], +]: ... +@overload +def histogram2d( + x: _ArrayLike1D[_InexactT], + y: _ArrayLike1D[_InexactT], + bins: Sequence[_ArrayLike1D[_NumberCoT] | int], + range: _ArrayLike2DFloat_co | None = ..., + density: bool | None = ..., + weights: _ArrayLike1DFloat_co | None = ..., +) -> tuple[ + NDArray[float64], + NDArray[_NumberCoT | _InexactT], + NDArray[_NumberCoT | _InexactT], +]: ... +@overload +def histogram2d( + x: _ArrayLike1DInt_co | Sequence[float], + y: _ArrayLike1DInt_co | Sequence[float], + bins: Sequence[_ArrayLike1D[_NumberCoT] | int], + range: _ArrayLike2DFloat_co | None = ..., + density: bool | None = ..., + weights: _ArrayLike1DFloat_co | None = ..., +) -> tuple[ + NDArray[float64], + NDArray[_NumberCoT | float64], + NDArray[_NumberCoT | float64], +]: ... +@overload +def histogram2d( + x: Sequence[complex], + y: Sequence[complex], + bins: Sequence[_ArrayLike1D[_NumberCoT] | int], + range: _ArrayLike2DFloat_co | None = ..., + density: bool | None = ..., + weights: _ArrayLike1DFloat_co | None = ..., +) -> tuple[ + NDArray[float64], + NDArray[_NumberCoT | complex128 | float64], + NDArray[_NumberCoT | complex128 | float64], +]: ... +@overload +def histogram2d( + x: _ArrayLike1DNumber_co, + y: _ArrayLike1DNumber_co, + bins: Sequence[Sequence[bool]], + range: _ArrayLike2DFloat_co | None = ..., + density: bool | None = ..., + weights: _ArrayLike1DFloat_co | None = ..., +) -> tuple[ + NDArray[float64], + NDArray[np.bool], + NDArray[np.bool], +]: ... +@overload +def histogram2d( + x: _ArrayLike1DNumber_co, + y: _ArrayLike1DNumber_co, + bins: Sequence[Sequence[int]], + range: _ArrayLike2DFloat_co | None = ..., + density: bool | None = ..., + weights: _ArrayLike1DFloat_co | None = ..., +) -> tuple[ + NDArray[float64], + NDArray[np.int_ | np.bool], + NDArray[np.int_ | np.bool], +]: ... +@overload +def histogram2d( + x: _ArrayLike1DNumber_co, + y: _ArrayLike1DNumber_co, + bins: Sequence[Sequence[float]], + range: _ArrayLike2DFloat_co | None = ..., + density: bool | None = ..., + weights: _ArrayLike1DFloat_co | None = ..., +) -> tuple[ + NDArray[float64], + NDArray[np.float64 | np.int_ | np.bool], + NDArray[np.float64 | np.int_ | np.bool], +]: ... +@overload +def histogram2d( + x: _ArrayLike1DNumber_co, + y: _ArrayLike1DNumber_co, + bins: Sequence[Sequence[complex]], + range: _ArrayLike2DFloat_co | None = ..., + density: bool | None = ..., + weights: _ArrayLike1DFloat_co | None = ..., +) -> tuple[ + NDArray[float64], + NDArray[np.complex128 | np.float64 | np.int_ | np.bool], + NDArray[np.complex128 | np.float64 | np.int_ | np.bool], +]: ... + +# NOTE: we're assuming/demanding here the `mask_func` returns +# an ndarray of shape `(n, n)`; otherwise there is the possibility +# of the output tuple having more or less than 2 elements +@overload +def mask_indices( + n: int, + mask_func: _MaskFunc[int], + k: int = ..., +) -> tuple[NDArray[intp], NDArray[intp]]: ... +@overload +def mask_indices( + n: int, + mask_func: _MaskFunc[_T], + k: _T, +) -> tuple[NDArray[intp], NDArray[intp]]: ... + +def tril_indices( + n: int, + k: int = ..., + m: int | None = ..., +) -> tuple[NDArray[int_], NDArray[int_]]: ... + +def tril_indices_from( + arr: NDArray[Any], + k: int = ..., +) -> tuple[NDArray[int_], NDArray[int_]]: ... + +def triu_indices( + n: int, + k: int = ..., + m: int | None = ..., +) -> tuple[NDArray[int_], NDArray[int_]]: ... + +def triu_indices_from( + arr: NDArray[Any], + k: int = ..., +) -> tuple[NDArray[int_], NDArray[int_]]: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/_type_check_impl.py b/.venv/lib/python3.12/site-packages/numpy/lib/_type_check_impl.py new file mode 100644 index 0000000..977609c --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/lib/_type_check_impl.py @@ -0,0 +1,699 @@ +"""Automatically adapted for numpy Sep 19, 2005 by convertcode.py + +""" +import functools + +__all__ = ['iscomplexobj', 'isrealobj', 'imag', 'iscomplex', + 'isreal', 'nan_to_num', 'real', 'real_if_close', + 'typename', 'mintypecode', + 'common_type'] + +import numpy._core.numeric as _nx +from numpy._core import getlimits, overrides +from numpy._core.numeric import asanyarray, asarray, isnan, zeros +from numpy._utils import set_module + +from ._ufunclike_impl import isneginf, isposinf + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy') + + +_typecodes_by_elsize = 'GDFgdfQqLlIiHhBb?' + + +@set_module('numpy') +def mintypecode(typechars, typeset='GDFgdf', default='d'): + """ + Return the character for the minimum-size type to which given types can + be safely cast. + + The returned type character must represent the smallest size dtype such + that an array of the returned type can handle the data from an array of + all types in `typechars` (or if `typechars` is an array, then its + dtype.char). + + Parameters + ---------- + typechars : list of str or array_like + If a list of strings, each string should represent a dtype. + If array_like, the character representation of the array dtype is used. + typeset : str or list of str, optional + The set of characters that the returned character is chosen from. + The default set is 'GDFgdf'. + default : str, optional + The default character, this is returned if none of the characters in + `typechars` matches a character in `typeset`. + + Returns + ------- + typechar : str + The character representing the minimum-size type that was found. + + See Also + -------- + dtype + + Examples + -------- + >>> import numpy as np + >>> np.mintypecode(['d', 'f', 'S']) + 'd' + >>> x = np.array([1.1, 2-3.j]) + >>> np.mintypecode(x) + 'D' + + >>> np.mintypecode('abceh', default='G') + 'G' + + """ + typecodes = ((isinstance(t, str) and t) or asarray(t).dtype.char + for t in typechars) + intersection = {t for t in typecodes if t in typeset} + if not intersection: + return default + if 'F' in intersection and 'd' in intersection: + return 'D' + return min(intersection, key=_typecodes_by_elsize.index) + + +def _real_dispatcher(val): + return (val,) + + +@array_function_dispatch(_real_dispatcher) +def real(val): + """ + Return the real part of the complex argument. + + Parameters + ---------- + val : array_like + Input array. + + Returns + ------- + out : ndarray or scalar + The real component of the complex argument. If `val` is real, the type + of `val` is used for the output. If `val` has complex elements, the + returned type is float. + + See Also + -------- + real_if_close, imag, angle + + Examples + -------- + >>> import numpy as np + >>> a = np.array([1+2j, 3+4j, 5+6j]) + >>> a.real + array([1., 3., 5.]) + >>> a.real = 9 + >>> a + array([9.+2.j, 9.+4.j, 9.+6.j]) + >>> a.real = np.array([9, 8, 7]) + >>> a + array([9.+2.j, 8.+4.j, 7.+6.j]) + >>> np.real(1 + 1j) + 1.0 + + """ + try: + return val.real + except AttributeError: + return asanyarray(val).real + + +def _imag_dispatcher(val): + return (val,) + + +@array_function_dispatch(_imag_dispatcher) +def imag(val): + """ + Return the imaginary part of the complex argument. + + Parameters + ---------- + val : array_like + Input array. + + Returns + ------- + out : ndarray or scalar + The imaginary component of the complex argument. If `val` is real, + the type of `val` is used for the output. If `val` has complex + elements, the returned type is float. + + See Also + -------- + real, angle, real_if_close + + Examples + -------- + >>> import numpy as np + >>> a = np.array([1+2j, 3+4j, 5+6j]) + >>> a.imag + array([2., 4., 6.]) + >>> a.imag = np.array([8, 10, 12]) + >>> a + array([1. +8.j, 3.+10.j, 5.+12.j]) + >>> np.imag(1 + 1j) + 1.0 + + """ + try: + return val.imag + except AttributeError: + return asanyarray(val).imag + + +def _is_type_dispatcher(x): + return (x,) + + +@array_function_dispatch(_is_type_dispatcher) +def iscomplex(x): + """ + Returns a bool array, where True if input element is complex. + + What is tested is whether the input has a non-zero imaginary part, not if + the input type is complex. + + Parameters + ---------- + x : array_like + Input array. + + Returns + ------- + out : ndarray of bools + Output array. + + See Also + -------- + isreal + iscomplexobj : Return True if x is a complex type or an array of complex + numbers. + + Examples + -------- + >>> import numpy as np + >>> np.iscomplex([1+1j, 1+0j, 4.5, 3, 2, 2j]) + array([ True, False, False, False, False, True]) + + """ + ax = asanyarray(x) + if issubclass(ax.dtype.type, _nx.complexfloating): + return ax.imag != 0 + res = zeros(ax.shape, bool) + return res[()] # convert to scalar if needed + + +@array_function_dispatch(_is_type_dispatcher) +def isreal(x): + """ + Returns a bool array, where True if input element is real. + + If element has complex type with zero imaginary part, the return value + for that element is True. + + Parameters + ---------- + x : array_like + Input array. + + Returns + ------- + out : ndarray, bool + Boolean array of same shape as `x`. + + Notes + ----- + `isreal` may behave unexpectedly for string or object arrays (see examples) + + See Also + -------- + iscomplex + isrealobj : Return True if x is not a complex type. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([1+1j, 1+0j, 4.5, 3, 2, 2j], dtype=complex) + >>> np.isreal(a) + array([False, True, True, True, True, False]) + + The function does not work on string arrays. + + >>> a = np.array([2j, "a"], dtype="U") + >>> np.isreal(a) # Warns about non-elementwise comparison + False + + Returns True for all elements in input array of ``dtype=object`` even if + any of the elements is complex. + + >>> a = np.array([1, "2", 3+4j], dtype=object) + >>> np.isreal(a) + array([ True, True, True]) + + isreal should not be used with object arrays + + >>> a = np.array([1+2j, 2+1j], dtype=object) + >>> np.isreal(a) + array([ True, True]) + + """ + return imag(x) == 0 + + +@array_function_dispatch(_is_type_dispatcher) +def iscomplexobj(x): + """ + Check for a complex type or an array of complex numbers. + + The type of the input is checked, not the value. Even if the input + has an imaginary part equal to zero, `iscomplexobj` evaluates to True. + + Parameters + ---------- + x : any + The input can be of any type and shape. + + Returns + ------- + iscomplexobj : bool + The return value, True if `x` is of a complex type or has at least + one complex element. + + See Also + -------- + isrealobj, iscomplex + + Examples + -------- + >>> import numpy as np + >>> np.iscomplexobj(1) + False + >>> np.iscomplexobj(1+0j) + True + >>> np.iscomplexobj([3, 1+0j, True]) + True + + """ + try: + dtype = x.dtype + type_ = dtype.type + except AttributeError: + type_ = asarray(x).dtype.type + return issubclass(type_, _nx.complexfloating) + + +@array_function_dispatch(_is_type_dispatcher) +def isrealobj(x): + """ + Return True if x is a not complex type or an array of complex numbers. + + The type of the input is checked, not the value. So even if the input + has an imaginary part equal to zero, `isrealobj` evaluates to False + if the data type is complex. + + Parameters + ---------- + x : any + The input can be of any type and shape. + + Returns + ------- + y : bool + The return value, False if `x` is of a complex type. + + See Also + -------- + iscomplexobj, isreal + + Notes + ----- + The function is only meant for arrays with numerical values but it + accepts all other objects. Since it assumes array input, the return + value of other objects may be True. + + >>> np.isrealobj('A string') + True + >>> np.isrealobj(False) + True + >>> np.isrealobj(None) + True + + Examples + -------- + >>> import numpy as np + >>> np.isrealobj(1) + True + >>> np.isrealobj(1+0j) + False + >>> np.isrealobj([3, 1+0j, True]) + False + + """ + return not iscomplexobj(x) + +#----------------------------------------------------------------------------- + +def _getmaxmin(t): + from numpy._core import getlimits + f = getlimits.finfo(t) + return f.max, f.min + + +def _nan_to_num_dispatcher(x, copy=None, nan=None, posinf=None, neginf=None): + return (x,) + + +@array_function_dispatch(_nan_to_num_dispatcher) +def nan_to_num(x, copy=True, nan=0.0, posinf=None, neginf=None): + """ + Replace NaN with zero and infinity with large finite numbers (default + behaviour) or with the numbers defined by the user using the `nan`, + `posinf` and/or `neginf` keywords. + + If `x` is inexact, NaN is replaced by zero or by the user defined value in + `nan` keyword, infinity is replaced by the largest finite floating point + values representable by ``x.dtype`` or by the user defined value in + `posinf` keyword and -infinity is replaced by the most negative finite + floating point values representable by ``x.dtype`` or by the user defined + value in `neginf` keyword. + + For complex dtypes, the above is applied to each of the real and + imaginary components of `x` separately. + + If `x` is not inexact, then no replacements are made. + + Parameters + ---------- + x : scalar or array_like + Input data. + copy : bool, optional + Whether to create a copy of `x` (True) or to replace values + in-place (False). The in-place operation only occurs if + casting to an array does not require a copy. + Default is True. + nan : int, float, optional + Value to be used to fill NaN values. If no value is passed + then NaN values will be replaced with 0.0. + posinf : int, float, optional + Value to be used to fill positive infinity values. If no value is + passed then positive infinity values will be replaced with a very + large number. + neginf : int, float, optional + Value to be used to fill negative infinity values. If no value is + passed then negative infinity values will be replaced with a very + small (or negative) number. + + Returns + ------- + out : ndarray + `x`, with the non-finite values replaced. If `copy` is False, this may + be `x` itself. + + See Also + -------- + isinf : Shows which elements are positive or negative infinity. + isneginf : Shows which elements are negative infinity. + isposinf : Shows which elements are positive infinity. + isnan : Shows which elements are Not a Number (NaN). + isfinite : Shows which elements are finite (not NaN, not infinity) + + Notes + ----- + NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic + (IEEE 754). This means that Not a Number is not equivalent to infinity. + + Examples + -------- + >>> import numpy as np + >>> np.nan_to_num(np.inf) + 1.7976931348623157e+308 + >>> np.nan_to_num(-np.inf) + -1.7976931348623157e+308 + >>> np.nan_to_num(np.nan) + 0.0 + >>> x = np.array([np.inf, -np.inf, np.nan, -128, 128]) + >>> np.nan_to_num(x) + array([ 1.79769313e+308, -1.79769313e+308, 0.00000000e+000, # may vary + -1.28000000e+002, 1.28000000e+002]) + >>> np.nan_to_num(x, nan=-9999, posinf=33333333, neginf=33333333) + array([ 3.3333333e+07, 3.3333333e+07, -9.9990000e+03, + -1.2800000e+02, 1.2800000e+02]) + >>> y = np.array([complex(np.inf, np.nan), np.nan, complex(np.nan, np.inf)]) + array([ 1.79769313e+308, -1.79769313e+308, 0.00000000e+000, # may vary + -1.28000000e+002, 1.28000000e+002]) + >>> np.nan_to_num(y) + array([ 1.79769313e+308 +0.00000000e+000j, # may vary + 0.00000000e+000 +0.00000000e+000j, + 0.00000000e+000 +1.79769313e+308j]) + >>> np.nan_to_num(y, nan=111111, posinf=222222) + array([222222.+111111.j, 111111. +0.j, 111111.+222222.j]) + """ + x = _nx.array(x, subok=True, copy=copy) + xtype = x.dtype.type + + isscalar = (x.ndim == 0) + + if not issubclass(xtype, _nx.inexact): + return x[()] if isscalar else x + + iscomplex = issubclass(xtype, _nx.complexfloating) + + dest = (x.real, x.imag) if iscomplex else (x,) + maxf, minf = _getmaxmin(x.real.dtype) + if posinf is not None: + maxf = posinf + if neginf is not None: + minf = neginf + for d in dest: + idx_nan = isnan(d) + idx_posinf = isposinf(d) + idx_neginf = isneginf(d) + _nx.copyto(d, nan, where=idx_nan) + _nx.copyto(d, maxf, where=idx_posinf) + _nx.copyto(d, minf, where=idx_neginf) + return x[()] if isscalar else x + +#----------------------------------------------------------------------------- + +def _real_if_close_dispatcher(a, tol=None): + return (a,) + + +@array_function_dispatch(_real_if_close_dispatcher) +def real_if_close(a, tol=100): + """ + If input is complex with all imaginary parts close to zero, return + real parts. + + "Close to zero" is defined as `tol` * (machine epsilon of the type for + `a`). + + Parameters + ---------- + a : array_like + Input array. + tol : float + Tolerance in machine epsilons for the complex part of the elements + in the array. If the tolerance is <=1, then the absolute tolerance + is used. + + Returns + ------- + out : ndarray + If `a` is real, the type of `a` is used for the output. If `a` + has complex elements, the returned type is float. + + See Also + -------- + real, imag, angle + + Notes + ----- + Machine epsilon varies from machine to machine and between data types + but Python floats on most platforms have a machine epsilon equal to + 2.2204460492503131e-16. You can use 'np.finfo(float).eps' to print + out the machine epsilon for floats. + + Examples + -------- + >>> import numpy as np + >>> np.finfo(float).eps + 2.2204460492503131e-16 # may vary + + >>> np.real_if_close([2.1 + 4e-14j, 5.2 + 3e-15j], tol=1000) + array([2.1, 5.2]) + >>> np.real_if_close([2.1 + 4e-13j, 5.2 + 3e-15j], tol=1000) + array([2.1+4.e-13j, 5.2 + 3e-15j]) + + """ + a = asanyarray(a) + type_ = a.dtype.type + if not issubclass(type_, _nx.complexfloating): + return a + if tol > 1: + f = getlimits.finfo(type_) + tol = f.eps * tol + if _nx.all(_nx.absolute(a.imag) < tol): + a = a.real + return a + + +#----------------------------------------------------------------------------- + +_namefromtype = {'S1': 'character', + '?': 'bool', + 'b': 'signed char', + 'B': 'unsigned char', + 'h': 'short', + 'H': 'unsigned short', + 'i': 'integer', + 'I': 'unsigned integer', + 'l': 'long integer', + 'L': 'unsigned long integer', + 'q': 'long long integer', + 'Q': 'unsigned long long integer', + 'f': 'single precision', + 'd': 'double precision', + 'g': 'long precision', + 'F': 'complex single precision', + 'D': 'complex double precision', + 'G': 'complex long double precision', + 'S': 'string', + 'U': 'unicode', + 'V': 'void', + 'O': 'object' + } + +@set_module('numpy') +def typename(char): + """ + Return a description for the given data type code. + + Parameters + ---------- + char : str + Data type code. + + Returns + ------- + out : str + Description of the input data type code. + + See Also + -------- + dtype + + Examples + -------- + >>> import numpy as np + >>> typechars = ['S1', '?', 'B', 'D', 'G', 'F', 'I', 'H', 'L', 'O', 'Q', + ... 'S', 'U', 'V', 'b', 'd', 'g', 'f', 'i', 'h', 'l', 'q'] + >>> for typechar in typechars: + ... print(typechar, ' : ', np.typename(typechar)) + ... + S1 : character + ? : bool + B : unsigned char + D : complex double precision + G : complex long double precision + F : complex single precision + I : unsigned integer + H : unsigned short + L : unsigned long integer + O : object + Q : unsigned long long integer + S : string + U : unicode + V : void + b : signed char + d : double precision + g : long precision + f : single precision + i : integer + h : short + l : long integer + q : long long integer + + """ + return _namefromtype[char] + +#----------------------------------------------------------------------------- + + +#determine the "minimum common type" for a group of arrays. +array_type = [[_nx.float16, _nx.float32, _nx.float64, _nx.longdouble], + [None, _nx.complex64, _nx.complex128, _nx.clongdouble]] +array_precision = {_nx.float16: 0, + _nx.float32: 1, + _nx.float64: 2, + _nx.longdouble: 3, + _nx.complex64: 1, + _nx.complex128: 2, + _nx.clongdouble: 3} + + +def _common_type_dispatcher(*arrays): + return arrays + + +@array_function_dispatch(_common_type_dispatcher) +def common_type(*arrays): + """ + Return a scalar type which is common to the input arrays. + + The return type will always be an inexact (i.e. floating point) scalar + type, even if all the arrays are integer arrays. If one of the inputs is + an integer array, the minimum precision type that is returned is a + 64-bit floating point dtype. + + All input arrays except int64 and uint64 can be safely cast to the + returned dtype without loss of information. + + Parameters + ---------- + array1, array2, ... : ndarrays + Input arrays. + + Returns + ------- + out : data type code + Data type code. + + See Also + -------- + dtype, mintypecode + + Examples + -------- + >>> np.common_type(np.arange(2, dtype=np.float32)) + + >>> np.common_type(np.arange(2, dtype=np.float32), np.arange(2)) + + >>> np.common_type(np.arange(4), np.array([45, 6.j]), np.array([45.0])) + + + """ + is_complex = False + precision = 0 + for a in arrays: + t = a.dtype.type + if iscomplexobj(a): + is_complex = True + if issubclass(t, _nx.integer): + p = 2 # array_precision[_nx.double] + else: + p = array_precision.get(t) + if p is None: + raise TypeError("can't get common type for non-numeric array") + precision = max(precision, p) + if is_complex: + return array_type[1][precision] + else: + return array_type[0][precision] diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/_type_check_impl.pyi b/.venv/lib/python3.12/site-packages/numpy/lib/_type_check_impl.pyi new file mode 100644 index 0000000..944015e --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/lib/_type_check_impl.pyi @@ -0,0 +1,350 @@ +from collections.abc import Container, Iterable +from typing import Any, Protocol, TypeAlias, overload, type_check_only +from typing import Literal as L + +from _typeshed import Incomplete +from typing_extensions import TypeVar + +import numpy as np +from numpy._typing import ( + ArrayLike, + NDArray, + _16Bit, + _32Bit, + _64Bit, + _ArrayLike, + _NestedSequence, + _ScalarLike_co, + _SupportsArray, +) + +__all__ = [ + "common_type", + "imag", + "iscomplex", + "iscomplexobj", + "isreal", + "isrealobj", + "mintypecode", + "nan_to_num", + "real", + "real_if_close", + "typename", +] + +_T = TypeVar("_T") +_T_co = TypeVar("_T_co", covariant=True) +_ScalarT = TypeVar("_ScalarT", bound=np.generic) +_ScalarT_co = TypeVar("_ScalarT_co", bound=np.generic, covariant=True) +_RealT = TypeVar("_RealT", bound=np.floating | np.integer | np.bool) + +_FloatMax32: TypeAlias = np.float32 | np.float16 +_ComplexMax128: TypeAlias = np.complex128 | np.complex64 +_RealMax64: TypeAlias = np.float64 | np.float32 | np.float16 | np.integer +_Real: TypeAlias = np.floating | np.integer +_InexactMax32: TypeAlias = np.inexact[_32Bit] | np.float16 +_NumberMax64: TypeAlias = np.number[_64Bit] | np.number[_32Bit] | np.number[_16Bit] | np.integer + +@type_check_only +class _HasReal(Protocol[_T_co]): + @property + def real(self, /) -> _T_co: ... + +@type_check_only +class _HasImag(Protocol[_T_co]): + @property + def imag(self, /) -> _T_co: ... + +@type_check_only +class _HasDType(Protocol[_ScalarT_co]): + @property + def dtype(self, /) -> np.dtype[_ScalarT_co]: ... + +### + +def mintypecode(typechars: Iterable[str | ArrayLike], typeset: str | Container[str] = "GDFgdf", default: str = "d") -> str: ... + +# +@overload +def real(val: _HasReal[_T]) -> _T: ... # type: ignore[overload-overlap] +@overload +def real(val: _ArrayLike[_RealT]) -> NDArray[_RealT]: ... +@overload +def real(val: ArrayLike) -> NDArray[Any]: ... + +# +@overload +def imag(val: _HasImag[_T]) -> _T: ... # type: ignore[overload-overlap] +@overload +def imag(val: _ArrayLike[_RealT]) -> NDArray[_RealT]: ... +@overload +def imag(val: ArrayLike) -> NDArray[Any]: ... + +# +@overload +def iscomplex(x: _ScalarLike_co) -> np.bool: ... +@overload +def iscomplex(x: NDArray[Any] | _NestedSequence[ArrayLike]) -> NDArray[np.bool]: ... +@overload +def iscomplex(x: ArrayLike) -> np.bool | NDArray[np.bool]: ... + +# +@overload +def isreal(x: _ScalarLike_co) -> np.bool: ... +@overload +def isreal(x: NDArray[Any] | _NestedSequence[ArrayLike]) -> NDArray[np.bool]: ... +@overload +def isreal(x: ArrayLike) -> np.bool | NDArray[np.bool]: ... + +# +def iscomplexobj(x: _HasDType[Any] | ArrayLike) -> bool: ... +def isrealobj(x: _HasDType[Any] | ArrayLike) -> bool: ... + +# +@overload +def nan_to_num( + x: _ScalarT, + copy: bool = True, + nan: float = 0.0, + posinf: float | None = None, + neginf: float | None = None, +) -> _ScalarT: ... +@overload +def nan_to_num( + x: NDArray[_ScalarT] | _NestedSequence[_ArrayLike[_ScalarT]], + copy: bool = True, + nan: float = 0.0, + posinf: float | None = None, + neginf: float | None = None, +) -> NDArray[_ScalarT]: ... +@overload +def nan_to_num( + x: _SupportsArray[np.dtype[_ScalarT]], + copy: bool = True, + nan: float = 0.0, + posinf: float | None = None, + neginf: float | None = None, +) -> _ScalarT | NDArray[_ScalarT]: ... +@overload +def nan_to_num( + x: _NestedSequence[ArrayLike], + copy: bool = True, + nan: float = 0.0, + posinf: float | None = None, + neginf: float | None = None, +) -> NDArray[Incomplete]: ... +@overload +def nan_to_num( + x: ArrayLike, + copy: bool = True, + nan: float = 0.0, + posinf: float | None = None, + neginf: float | None = None, +) -> Incomplete: ... + +# NOTE: The [overload-overlap] mypy error is a false positive +@overload +def real_if_close(a: _ArrayLike[np.complex64], tol: float = 100) -> NDArray[np.float32 | np.complex64]: ... # type: ignore[overload-overlap] +@overload +def real_if_close(a: _ArrayLike[np.complex128], tol: float = 100) -> NDArray[np.float64 | np.complex128]: ... +@overload +def real_if_close(a: _ArrayLike[np.clongdouble], tol: float = 100) -> NDArray[np.longdouble | np.clongdouble]: ... +@overload +def real_if_close(a: _ArrayLike[_RealT], tol: float = 100) -> NDArray[_RealT]: ... +@overload +def real_if_close(a: ArrayLike, tol: float = 100) -> NDArray[Any]: ... + +# +@overload +def typename(char: L['S1']) -> L['character']: ... +@overload +def typename(char: L['?']) -> L['bool']: ... +@overload +def typename(char: L['b']) -> L['signed char']: ... +@overload +def typename(char: L['B']) -> L['unsigned char']: ... +@overload +def typename(char: L['h']) -> L['short']: ... +@overload +def typename(char: L['H']) -> L['unsigned short']: ... +@overload +def typename(char: L['i']) -> L['integer']: ... +@overload +def typename(char: L['I']) -> L['unsigned integer']: ... +@overload +def typename(char: L['l']) -> L['long integer']: ... +@overload +def typename(char: L['L']) -> L['unsigned long integer']: ... +@overload +def typename(char: L['q']) -> L['long long integer']: ... +@overload +def typename(char: L['Q']) -> L['unsigned long long integer']: ... +@overload +def typename(char: L['f']) -> L['single precision']: ... +@overload +def typename(char: L['d']) -> L['double precision']: ... +@overload +def typename(char: L['g']) -> L['long precision']: ... +@overload +def typename(char: L['F']) -> L['complex single precision']: ... +@overload +def typename(char: L['D']) -> L['complex double precision']: ... +@overload +def typename(char: L['G']) -> L['complex long double precision']: ... +@overload +def typename(char: L['S']) -> L['string']: ... +@overload +def typename(char: L['U']) -> L['unicode']: ... +@overload +def typename(char: L['V']) -> L['void']: ... +@overload +def typename(char: L['O']) -> L['object']: ... + +# NOTE: The [overload-overlap] mypy errors are false positives +@overload +def common_type() -> type[np.float16]: ... +@overload +def common_type(a0: _HasDType[np.float16], /, *ai: _HasDType[np.float16]) -> type[np.float16]: ... # type: ignore[overload-overlap] +@overload +def common_type(a0: _HasDType[np.float32], /, *ai: _HasDType[_FloatMax32]) -> type[np.float32]: ... # type: ignore[overload-overlap] +@overload +def common_type( # type: ignore[overload-overlap] + a0: _HasDType[np.float64 | np.integer], + /, + *ai: _HasDType[_RealMax64], +) -> type[np.float64]: ... +@overload +def common_type( # type: ignore[overload-overlap] + a0: _HasDType[np.longdouble], + /, + *ai: _HasDType[_Real], +) -> type[np.longdouble]: ... +@overload +def common_type( # type: ignore[overload-overlap] + a0: _HasDType[np.complex64], + /, + *ai: _HasDType[_InexactMax32], +) -> type[np.complex64]: ... +@overload +def common_type( # type: ignore[overload-overlap] + a0: _HasDType[np.complex128], + /, + *ai: _HasDType[_NumberMax64], +) -> type[np.complex128]: ... +@overload +def common_type( # type: ignore[overload-overlap] + a0: _HasDType[np.clongdouble], + /, + *ai: _HasDType[np.number], +) -> type[np.clongdouble]: ... +@overload +def common_type( # type: ignore[overload-overlap] + a0: _HasDType[_FloatMax32], + array1: _HasDType[np.float32], + /, + *ai: _HasDType[_FloatMax32], +) -> type[np.float32]: ... +@overload +def common_type( + a0: _HasDType[_RealMax64], + array1: _HasDType[np.float64 | np.integer], + /, + *ai: _HasDType[_RealMax64], +) -> type[np.float64]: ... +@overload +def common_type( + a0: _HasDType[_Real], + array1: _HasDType[np.longdouble], + /, + *ai: _HasDType[_Real], +) -> type[np.longdouble]: ... +@overload +def common_type( # type: ignore[overload-overlap] + a0: _HasDType[_InexactMax32], + array1: _HasDType[np.complex64], + /, + *ai: _HasDType[_InexactMax32], +) -> type[np.complex64]: ... +@overload +def common_type( + a0: _HasDType[np.float64], + array1: _HasDType[_ComplexMax128], + /, + *ai: _HasDType[_NumberMax64], +) -> type[np.complex128]: ... +@overload +def common_type( + a0: _HasDType[_ComplexMax128], + array1: _HasDType[np.float64], + /, + *ai: _HasDType[_NumberMax64], +) -> type[np.complex128]: ... +@overload +def common_type( + a0: _HasDType[_NumberMax64], + array1: _HasDType[np.complex128], + /, + *ai: _HasDType[_NumberMax64], +) -> type[np.complex128]: ... +@overload +def common_type( + a0: _HasDType[_ComplexMax128], + array1: _HasDType[np.complex128 | np.integer], + /, + *ai: _HasDType[_NumberMax64], +) -> type[np.complex128]: ... +@overload +def common_type( + a0: _HasDType[np.complex128 | np.integer], + array1: _HasDType[_ComplexMax128], + /, + *ai: _HasDType[_NumberMax64], +) -> type[np.complex128]: ... +@overload +def common_type( + a0: _HasDType[_Real], + /, + *ai: _HasDType[_Real], +) -> type[np.floating]: ... +@overload +def common_type( + a0: _HasDType[np.number], + array1: _HasDType[np.clongdouble], + /, + *ai: _HasDType[np.number], +) -> type[np.clongdouble]: ... +@overload +def common_type( + a0: _HasDType[np.longdouble], + array1: _HasDType[np.complexfloating], + /, + *ai: _HasDType[np.number], +) -> type[np.clongdouble]: ... +@overload +def common_type( + a0: _HasDType[np.complexfloating], + array1: _HasDType[np.longdouble], + /, + *ai: _HasDType[np.number], +) -> type[np.clongdouble]: ... +@overload +def common_type( + a0: _HasDType[np.complexfloating], + array1: _HasDType[np.number], + /, + *ai: _HasDType[np.number], +) -> type[np.complexfloating]: ... +@overload +def common_type( + a0: _HasDType[np.number], + array1: _HasDType[np.complexfloating], + /, + *ai: _HasDType[np.number], +) -> type[np.complexfloating]: ... +@overload +def common_type( + a0: _HasDType[np.number], + array1: _HasDType[np.number], + /, + *ai: _HasDType[np.number], +) -> type[Any]: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/_ufunclike_impl.py b/.venv/lib/python3.12/site-packages/numpy/lib/_ufunclike_impl.py new file mode 100644 index 0000000..695aab1 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/lib/_ufunclike_impl.py @@ -0,0 +1,207 @@ +""" +Module of functions that are like ufuncs in acting on arrays and optionally +storing results in an output array. + +""" +__all__ = ['fix', 'isneginf', 'isposinf'] + +import numpy._core.numeric as nx +from numpy._core.overrides import array_function_dispatch + + +def _dispatcher(x, out=None): + return (x, out) + + +@array_function_dispatch(_dispatcher, verify=False, module='numpy') +def fix(x, out=None): + """ + Round to nearest integer towards zero. + + Round an array of floats element-wise to nearest integer towards zero. + The rounded values have the same data-type as the input. + + Parameters + ---------- + x : array_like + An array to be rounded + out : ndarray, optional + A location into which the result is stored. If provided, it must have + a shape that the input broadcasts to. If not provided or None, a + freshly-allocated array is returned. + + Returns + ------- + out : ndarray of floats + An array with the same dimensions and data-type as the input. + If second argument is not supplied then a new array is returned + with the rounded values. + + If a second argument is supplied the result is stored there. + The return value ``out`` is then a reference to that array. + + See Also + -------- + rint, trunc, floor, ceil + around : Round to given number of decimals + + Examples + -------- + >>> import numpy as np + >>> np.fix(3.14) + 3.0 + >>> np.fix(3) + 3 + >>> np.fix([2.1, 2.9, -2.1, -2.9]) + array([ 2., 2., -2., -2.]) + + """ + # promote back to an array if flattened + res = nx.asanyarray(nx.ceil(x, out=out)) + res = nx.floor(x, out=res, where=nx.greater_equal(x, 0)) + + # when no out argument is passed and no subclasses are involved, flatten + # scalars + if out is None and type(res) is nx.ndarray: + res = res[()] + return res + + +@array_function_dispatch(_dispatcher, verify=False, module='numpy') +def isposinf(x, out=None): + """ + Test element-wise for positive infinity, return result as bool array. + + Parameters + ---------- + x : array_like + The input array. + out : array_like, optional + A location into which the result is stored. If provided, it must have a + shape that the input broadcasts to. If not provided or None, a + freshly-allocated boolean array is returned. + + Returns + ------- + out : ndarray + A boolean array with the same dimensions as the input. + If second argument is not supplied then a boolean array is returned + with values True where the corresponding element of the input is + positive infinity and values False where the element of the input is + not positive infinity. + + If a second argument is supplied the result is stored there. If the + type of that array is a numeric type the result is represented as zeros + and ones, if the type is boolean then as False and True. + The return value `out` is then a reference to that array. + + See Also + -------- + isinf, isneginf, isfinite, isnan + + Notes + ----- + NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic + (IEEE 754). + + Errors result if the second argument is also supplied when x is a scalar + input, if first and second arguments have different shapes, or if the + first argument has complex values + + Examples + -------- + >>> import numpy as np + >>> np.isposinf(np.inf) + True + >>> np.isposinf(-np.inf) + False + >>> np.isposinf([-np.inf, 0., np.inf]) + array([False, False, True]) + + >>> x = np.array([-np.inf, 0., np.inf]) + >>> y = np.array([2, 2, 2]) + >>> np.isposinf(x, y) + array([0, 0, 1]) + >>> y + array([0, 0, 1]) + + """ + is_inf = nx.isinf(x) + try: + signbit = ~nx.signbit(x) + except TypeError as e: + dtype = nx.asanyarray(x).dtype + raise TypeError(f'This operation is not supported for {dtype} values ' + 'because it would be ambiguous.') from e + else: + return nx.logical_and(is_inf, signbit, out) + + +@array_function_dispatch(_dispatcher, verify=False, module='numpy') +def isneginf(x, out=None): + """ + Test element-wise for negative infinity, return result as bool array. + + Parameters + ---------- + x : array_like + The input array. + out : array_like, optional + A location into which the result is stored. If provided, it must have a + shape that the input broadcasts to. If not provided or None, a + freshly-allocated boolean array is returned. + + Returns + ------- + out : ndarray + A boolean array with the same dimensions as the input. + If second argument is not supplied then a numpy boolean array is + returned with values True where the corresponding element of the + input is negative infinity and values False where the element of + the input is not negative infinity. + + If a second argument is supplied the result is stored there. If the + type of that array is a numeric type the result is represented as + zeros and ones, if the type is boolean then as False and True. The + return value `out` is then a reference to that array. + + See Also + -------- + isinf, isposinf, isnan, isfinite + + Notes + ----- + NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic + (IEEE 754). + + Errors result if the second argument is also supplied when x is a scalar + input, if first and second arguments have different shapes, or if the + first argument has complex values. + + Examples + -------- + >>> import numpy as np + >>> np.isneginf(-np.inf) + True + >>> np.isneginf(np.inf) + False + >>> np.isneginf([-np.inf, 0., np.inf]) + array([ True, False, False]) + + >>> x = np.array([-np.inf, 0., np.inf]) + >>> y = np.array([2, 2, 2]) + >>> np.isneginf(x, y) + array([1, 0, 0]) + >>> y + array([1, 0, 0]) + + """ + is_inf = nx.isinf(x) + try: + signbit = nx.signbit(x) + except TypeError as e: + dtype = nx.asanyarray(x).dtype + raise TypeError(f'This operation is not supported for {dtype} values ' + 'because it would be ambiguous.') from e + else: + return nx.logical_and(is_inf, signbit, out) diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/_ufunclike_impl.pyi b/.venv/lib/python3.12/site-packages/numpy/lib/_ufunclike_impl.pyi new file mode 100644 index 0000000..a673f05 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/lib/_ufunclike_impl.pyi @@ -0,0 +1,67 @@ +from typing import Any, TypeVar, overload + +import numpy as np +from numpy import floating, object_ +from numpy._typing import ( + NDArray, + _ArrayLikeFloat_co, + _ArrayLikeObject_co, + _FloatLike_co, +) + +__all__ = ["fix", "isneginf", "isposinf"] + +_ArrayT = TypeVar("_ArrayT", bound=NDArray[Any]) + +@overload +def fix( # type: ignore[misc] + x: _FloatLike_co, + out: None = ..., +) -> floating: ... +@overload +def fix( + x: _ArrayLikeFloat_co, + out: None = ..., +) -> NDArray[floating]: ... +@overload +def fix( + x: _ArrayLikeObject_co, + out: None = ..., +) -> NDArray[object_]: ... +@overload +def fix( + x: _ArrayLikeFloat_co | _ArrayLikeObject_co, + out: _ArrayT, +) -> _ArrayT: ... + +@overload +def isposinf( # type: ignore[misc] + x: _FloatLike_co, + out: None = ..., +) -> np.bool: ... +@overload +def isposinf( + x: _ArrayLikeFloat_co, + out: None = ..., +) -> NDArray[np.bool]: ... +@overload +def isposinf( + x: _ArrayLikeFloat_co, + out: _ArrayT, +) -> _ArrayT: ... + +@overload +def isneginf( # type: ignore[misc] + x: _FloatLike_co, + out: None = ..., +) -> np.bool: ... +@overload +def isneginf( + x: _ArrayLikeFloat_co, + out: None = ..., +) -> NDArray[np.bool]: ... +@overload +def isneginf( + x: _ArrayLikeFloat_co, + out: _ArrayT, +) -> _ArrayT: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/_user_array_impl.py b/.venv/lib/python3.12/site-packages/numpy/lib/_user_array_impl.py new file mode 100644 index 0000000..f3a6c0f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/lib/_user_array_impl.py @@ -0,0 +1,299 @@ +""" +Container class for backward compatibility with NumArray. + +The user_array.container class exists for backward compatibility with NumArray +and is not meant to be used in new code. If you need to create an array +container class, we recommend either creating a class that wraps an ndarray +or subclasses ndarray. + +""" +from numpy._core import ( + absolute, + add, + arange, + array, + asarray, + bitwise_and, + bitwise_or, + bitwise_xor, + divide, + equal, + greater, + greater_equal, + invert, + left_shift, + less, + less_equal, + multiply, + not_equal, + power, + remainder, + reshape, + right_shift, + shape, + sin, + sqrt, + subtract, + transpose, +) +from numpy._core.overrides import set_module + + +@set_module("numpy.lib.user_array") +class container: + """ + container(data, dtype=None, copy=True) + + Standard container-class for easy multiple-inheritance. + + Methods + ------- + copy + byteswap + astype + + """ + def __init__(self, data, dtype=None, copy=True): + self.array = array(data, dtype, copy=copy) + + def __repr__(self): + if self.ndim > 0: + return self.__class__.__name__ + repr(self.array)[len("array"):] + else: + return self.__class__.__name__ + "(" + repr(self.array) + ")" + + def __array__(self, t=None): + if t: + return self.array.astype(t) + return self.array + + # Array as sequence + def __len__(self): + return len(self.array) + + def __getitem__(self, index): + return self._rc(self.array[index]) + + def __setitem__(self, index, value): + self.array[index] = asarray(value, self.dtype) + + def __abs__(self): + return self._rc(absolute(self.array)) + + def __neg__(self): + return self._rc(-self.array) + + def __add__(self, other): + return self._rc(self.array + asarray(other)) + + __radd__ = __add__ + + def __iadd__(self, other): + add(self.array, other, self.array) + return self + + def __sub__(self, other): + return self._rc(self.array - asarray(other)) + + def __rsub__(self, other): + return self._rc(asarray(other) - self.array) + + def __isub__(self, other): + subtract(self.array, other, self.array) + return self + + def __mul__(self, other): + return self._rc(multiply(self.array, asarray(other))) + + __rmul__ = __mul__ + + def __imul__(self, other): + multiply(self.array, other, self.array) + return self + + def __mod__(self, other): + return self._rc(remainder(self.array, other)) + + def __rmod__(self, other): + return self._rc(remainder(other, self.array)) + + def __imod__(self, other): + remainder(self.array, other, self.array) + return self + + def __divmod__(self, other): + return (self._rc(divide(self.array, other)), + self._rc(remainder(self.array, other))) + + def __rdivmod__(self, other): + return (self._rc(divide(other, self.array)), + self._rc(remainder(other, self.array))) + + def __pow__(self, other): + return self._rc(power(self.array, asarray(other))) + + def __rpow__(self, other): + return self._rc(power(asarray(other), self.array)) + + def __ipow__(self, other): + power(self.array, other, self.array) + return self + + def __lshift__(self, other): + return self._rc(left_shift(self.array, other)) + + def __rshift__(self, other): + return self._rc(right_shift(self.array, other)) + + def __rlshift__(self, other): + return self._rc(left_shift(other, self.array)) + + def __rrshift__(self, other): + return self._rc(right_shift(other, self.array)) + + def __ilshift__(self, other): + left_shift(self.array, other, self.array) + return self + + def __irshift__(self, other): + right_shift(self.array, other, self.array) + return self + + def __and__(self, other): + return self._rc(bitwise_and(self.array, other)) + + def __rand__(self, other): + return self._rc(bitwise_and(other, self.array)) + + def __iand__(self, other): + bitwise_and(self.array, other, self.array) + return self + + def __xor__(self, other): + return self._rc(bitwise_xor(self.array, other)) + + def __rxor__(self, other): + return self._rc(bitwise_xor(other, self.array)) + + def __ixor__(self, other): + bitwise_xor(self.array, other, self.array) + return self + + def __or__(self, other): + return self._rc(bitwise_or(self.array, other)) + + def __ror__(self, other): + return self._rc(bitwise_or(other, self.array)) + + def __ior__(self, other): + bitwise_or(self.array, other, self.array) + return self + + def __pos__(self): + return self._rc(self.array) + + def __invert__(self): + return self._rc(invert(self.array)) + + def _scalarfunc(self, func): + if self.ndim == 0: + return func(self[0]) + else: + raise TypeError( + "only rank-0 arrays can be converted to Python scalars.") + + def __complex__(self): + return self._scalarfunc(complex) + + def __float__(self): + return self._scalarfunc(float) + + def __int__(self): + return self._scalarfunc(int) + + def __hex__(self): + return self._scalarfunc(hex) + + def __oct__(self): + return self._scalarfunc(oct) + + def __lt__(self, other): + return self._rc(less(self.array, other)) + + def __le__(self, other): + return self._rc(less_equal(self.array, other)) + + def __eq__(self, other): + return self._rc(equal(self.array, other)) + + def __ne__(self, other): + return self._rc(not_equal(self.array, other)) + + def __gt__(self, other): + return self._rc(greater(self.array, other)) + + def __ge__(self, other): + return self._rc(greater_equal(self.array, other)) + + def copy(self): + "" + return self._rc(self.array.copy()) + + def tobytes(self): + "" + return self.array.tobytes() + + def byteswap(self): + "" + return self._rc(self.array.byteswap()) + + def astype(self, typecode): + "" + return self._rc(self.array.astype(typecode)) + + def _rc(self, a): + if len(shape(a)) == 0: + return a + else: + return self.__class__(a) + + def __array_wrap__(self, *args): + return self.__class__(args[0]) + + def __setattr__(self, attr, value): + if attr == 'array': + object.__setattr__(self, attr, value) + return + try: + self.array.__setattr__(attr, value) + except AttributeError: + object.__setattr__(self, attr, value) + + # Only called after other approaches fail. + def __getattr__(self, attr): + if (attr == 'array'): + return object.__getattribute__(self, attr) + return self.array.__getattribute__(attr) + + +############################################################# +# Test of class container +############################################################# +if __name__ == '__main__': + temp = reshape(arange(10000), (100, 100)) + + ua = container(temp) + # new object created begin test + print(dir(ua)) + print(shape(ua), ua.shape) # I have changed Numeric.py + + ua_small = ua[:3, :5] + print(ua_small) + # this did not change ua[0,0], which is not normal behavior + ua_small[0, 0] = 10 + print(ua_small[0, 0], ua[0, 0]) + print(sin(ua_small) / 3. * 6. + sqrt(ua_small ** 2)) + print(less(ua_small, 103), type(less(ua_small, 103))) + print(type(ua_small * reshape(arange(15), shape(ua_small)))) + print(reshape(ua_small, (5, 3))) + print(transpose(ua_small)) diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/_user_array_impl.pyi b/.venv/lib/python3.12/site-packages/numpy/lib/_user_array_impl.pyi new file mode 100644 index 0000000..13c0a01 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/lib/_user_array_impl.pyi @@ -0,0 +1,225 @@ +from types import EllipsisType +from typing import Any, Generic, Self, SupportsIndex, TypeAlias, overload + +from _typeshed import Incomplete +from typing_extensions import TypeVar, override + +import numpy as np +import numpy.typing as npt +from numpy._typing import ( + _AnyShape, + _ArrayLike, + _ArrayLikeBool_co, + _ArrayLikeInt_co, + _DTypeLike, +) + +### + +_ScalarT = TypeVar("_ScalarT", bound=np.generic) +_ShapeT = TypeVar("_ShapeT", bound=tuple[int, ...]) +_ShapeT_co = TypeVar("_ShapeT_co", bound=tuple[int, ...], default=_AnyShape, covariant=True) +_DTypeT = TypeVar("_DTypeT", bound=np.dtype) +_DTypeT_co = TypeVar("_DTypeT_co", bound=np.dtype, default=np.dtype, covariant=True) + +_BoolArrayT = TypeVar("_BoolArrayT", bound=container[Any, np.dtype[np.bool]]) +_IntegralArrayT = TypeVar("_IntegralArrayT", bound=container[Any, np.dtype[np.bool | np.integer | np.object_]]) +_RealContainerT = TypeVar( + "_RealContainerT", + bound=container[Any, np.dtype[np.bool | np.integer | np.floating | np.timedelta64 | np.object_]], +) +_NumericContainerT = TypeVar("_NumericContainerT", bound=container[Any, np.dtype[np.number | np.timedelta64 | np.object_]]) + +_ArrayInt_co: TypeAlias = npt.NDArray[np.integer | np.bool] + +_ToIndexSlice: TypeAlias = slice | EllipsisType | _ArrayInt_co | None +_ToIndexSlices: TypeAlias = _ToIndexSlice | tuple[_ToIndexSlice, ...] +_ToIndex: TypeAlias = SupportsIndex | _ToIndexSlice +_ToIndices: TypeAlias = _ToIndex | tuple[_ToIndex, ...] + +### + +class container(Generic[_ShapeT_co, _DTypeT_co]): + array: np.ndarray[_ShapeT_co, _DTypeT_co] + + @overload + def __init__( + self, + /, + data: container[_ShapeT_co, _DTypeT_co] | np.ndarray[_ShapeT_co, _DTypeT_co], + dtype: None = None, + copy: bool = True, + ) -> None: ... + @overload + def __init__( + self: container[Any, np.dtype[_ScalarT]], + /, + data: _ArrayLike[_ScalarT], + dtype: None = None, + copy: bool = True, + ) -> None: ... + @overload + def __init__( + self: container[Any, np.dtype[_ScalarT]], + /, + data: npt.ArrayLike, + dtype: _DTypeLike[_ScalarT], + copy: bool = True, + ) -> None: ... + @overload + def __init__(self, /, data: npt.ArrayLike, dtype: npt.DTypeLike | None = None, copy: bool = True) -> None: ... + + # + def __complex__(self, /) -> complex: ... + def __float__(self, /) -> float: ... + def __int__(self, /) -> int: ... + def __hex__(self, /) -> str: ... + def __oct__(self, /) -> str: ... + + # + @override + def __eq__(self, other: object, /) -> container[_ShapeT_co, np.dtype[np.bool]]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + @override + def __ne__(self, other: object, /) -> container[_ShapeT_co, np.dtype[np.bool]]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + + # + def __lt__(self, other: npt.ArrayLike, /) -> container[_ShapeT_co, np.dtype[np.bool]]: ... + def __le__(self, other: npt.ArrayLike, /) -> container[_ShapeT_co, np.dtype[np.bool]]: ... + def __gt__(self, other: npt.ArrayLike, /) -> container[_ShapeT_co, np.dtype[np.bool]]: ... + def __ge__(self, other: npt.ArrayLike, /) -> container[_ShapeT_co, np.dtype[np.bool]]: ... + + # + def __len__(self, /) -> int: ... + + # keep in sync with np.ndarray + @overload + def __getitem__(self, key: _ArrayInt_co | tuple[_ArrayInt_co, ...], /) -> container[_ShapeT_co, _DTypeT_co]: ... + @overload + def __getitem__(self, key: _ToIndexSlices, /) -> container[_AnyShape, _DTypeT_co]: ... + @overload + def __getitem__(self, key: _ToIndices, /) -> Any: ... + @overload + def __getitem__(self: container[Any, np.dtype[np.void]], key: list[str], /) -> container[_ShapeT_co, np.dtype[np.void]]: ... + @overload + def __getitem__(self: container[Any, np.dtype[np.void]], key: str, /) -> container[_ShapeT_co, np.dtype]: ... + + # keep in sync with np.ndarray + @overload + def __setitem__(self, index: _ToIndices, value: object, /) -> None: ... + @overload + def __setitem__(self: container[Any, np.dtype[np.void]], key: str | list[str], value: object, /) -> None: ... + + # keep in sync with np.ndarray + @overload + def __abs__(self: container[_ShapeT, np.dtype[np.complex64]], /) -> container[_ShapeT, np.dtype[np.float32]]: ... # type: ignore[overload-overlap] + @overload + def __abs__(self: container[_ShapeT, np.dtype[np.complex128]], /) -> container[_ShapeT, np.dtype[np.float64]]: ... + @overload + def __abs__(self: container[_ShapeT, np.dtype[np.complex192]], /) -> container[_ShapeT, np.dtype[np.float96]]: ... + @overload + def __abs__(self: container[_ShapeT, np.dtype[np.complex256]], /) -> container[_ShapeT, np.dtype[np.float128]]: ... + @overload + def __abs__(self: _RealContainerT, /) -> _RealContainerT: ... + + # + def __neg__(self: _NumericContainerT, /) -> _NumericContainerT: ... # noqa: PYI019 + def __pos__(self: _NumericContainerT, /) -> _NumericContainerT: ... # noqa: PYI019 + def __invert__(self: _IntegralArrayT, /) -> _IntegralArrayT: ... # noqa: PYI019 + + # TODO(jorenham): complete these binary ops + + # + def __add__(self, other: npt.ArrayLike, /) -> Incomplete: ... + def __radd__(self, other: npt.ArrayLike, /) -> Incomplete: ... + def __iadd__(self, other: npt.ArrayLike, /) -> Self: ... + + # + def __sub__(self, other: npt.ArrayLike, /) -> Incomplete: ... + def __rsub__(self, other: npt.ArrayLike, /) -> Incomplete: ... + def __isub__(self, other: npt.ArrayLike, /) -> Self: ... + + # + def __mul__(self, other: npt.ArrayLike, /) -> Incomplete: ... + def __rmul__(self, other: npt.ArrayLike, /) -> Incomplete: ... + def __imul__(self, other: npt.ArrayLike, /) -> Self: ... + + # + def __mod__(self, other: npt.ArrayLike, /) -> Incomplete: ... + def __rmod__(self, other: npt.ArrayLike, /) -> Incomplete: ... + def __imod__(self, other: npt.ArrayLike, /) -> Self: ... + + # + def __divmod__(self, other: npt.ArrayLike, /) -> tuple[Incomplete, Incomplete]: ... + def __rdivmod__(self, other: npt.ArrayLike, /) -> tuple[Incomplete, Incomplete]: ... + + # + def __pow__(self, other: npt.ArrayLike, /) -> Incomplete: ... + def __rpow__(self, other: npt.ArrayLike, /) -> Incomplete: ... + def __ipow__(self, other: npt.ArrayLike, /) -> Self: ... + + # + def __lshift__(self, other: _ArrayLikeInt_co, /) -> container[_AnyShape, np.dtype[np.integer]]: ... + def __rlshift__(self, other: _ArrayLikeInt_co, /) -> container[_AnyShape, np.dtype[np.integer]]: ... + def __ilshift__(self, other: _ArrayLikeInt_co, /) -> Self: ... + + # + def __rshift__(self, other: _ArrayLikeInt_co, /) -> container[_AnyShape, np.dtype[np.integer]]: ... + def __rrshift__(self, other: _ArrayLikeInt_co, /) -> container[_AnyShape, np.dtype[np.integer]]: ... + def __irshift__(self, other: _ArrayLikeInt_co, /) -> Self: ... + + # + @overload + def __and__( + self: container[Any, np.dtype[np.bool]], other: _ArrayLikeBool_co, / + ) -> container[_AnyShape, np.dtype[np.bool]]: ... + @overload + def __and__(self, other: _ArrayLikeInt_co, /) -> container[_AnyShape, np.dtype[np.bool | np.integer]]: ... + __rand__ = __and__ + @overload + def __iand__(self: _BoolArrayT, other: _ArrayLikeBool_co, /) -> _BoolArrayT: ... + @overload + def __iand__(self, other: _ArrayLikeInt_co, /) -> Self: ... + + # + @overload + def __xor__( + self: container[Any, np.dtype[np.bool]], other: _ArrayLikeBool_co, / + ) -> container[_AnyShape, np.dtype[np.bool]]: ... + @overload + def __xor__(self, other: _ArrayLikeInt_co, /) -> container[_AnyShape, np.dtype[np.bool | np.integer]]: ... + __rxor__ = __xor__ + @overload + def __ixor__(self: _BoolArrayT, other: _ArrayLikeBool_co, /) -> _BoolArrayT: ... + @overload + def __ixor__(self, other: _ArrayLikeInt_co, /) -> Self: ... + + # + @overload + def __or__( + self: container[Any, np.dtype[np.bool]], other: _ArrayLikeBool_co, / + ) -> container[_AnyShape, np.dtype[np.bool]]: ... + @overload + def __or__(self, other: _ArrayLikeInt_co, /) -> container[_AnyShape, np.dtype[np.bool | np.integer]]: ... + __ror__ = __or__ + @overload + def __ior__(self: _BoolArrayT, other: _ArrayLikeBool_co, /) -> _BoolArrayT: ... + @overload + def __ior__(self, other: _ArrayLikeInt_co, /) -> Self: ... + + # + @overload + def __array__(self, /, t: None = None) -> np.ndarray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __array__(self, /, t: _DTypeT) -> np.ndarray[_ShapeT_co, _DTypeT]: ... + + # + @overload + def __array_wrap__(self, arg0: npt.ArrayLike, /) -> container[_ShapeT_co, _DTypeT_co]: ... + @overload + def __array_wrap__(self, a: np.ndarray[_ShapeT, _DTypeT], c: Any = ..., s: Any = ..., /) -> container[_ShapeT, _DTypeT]: ... + + # + def copy(self, /) -> Self: ... + def tobytes(self, /) -> bytes: ... + def byteswap(self, /) -> Self: ... + def astype(self, /, typecode: _DTypeLike[_ScalarT]) -> container[_ShapeT_co, np.dtype[_ScalarT]]: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/_utils_impl.py b/.venv/lib/python3.12/site-packages/numpy/lib/_utils_impl.py new file mode 100644 index 0000000..2e1ee23 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/lib/_utils_impl.py @@ -0,0 +1,779 @@ +import functools +import os +import platform +import sys +import textwrap +import types +import warnings + +import numpy as np +from numpy._core import ndarray +from numpy._utils import set_module + +__all__ = [ + 'get_include', 'info', 'show_runtime' +] + + +@set_module('numpy') +def show_runtime(): + """ + Print information about various resources in the system + including available intrinsic support and BLAS/LAPACK library + in use + + .. versionadded:: 1.24.0 + + See Also + -------- + show_config : Show libraries in the system on which NumPy was built. + + Notes + ----- + 1. Information is derived with the help of `threadpoolctl `_ + library if available. + 2. SIMD related information is derived from ``__cpu_features__``, + ``__cpu_baseline__`` and ``__cpu_dispatch__`` + + """ + from pprint import pprint + + from numpy._core._multiarray_umath import ( + __cpu_baseline__, + __cpu_dispatch__, + __cpu_features__, + ) + config_found = [{ + "numpy_version": np.__version__, + "python": sys.version, + "uname": platform.uname(), + }] + features_found, features_not_found = [], [] + for feature in __cpu_dispatch__: + if __cpu_features__[feature]: + features_found.append(feature) + else: + features_not_found.append(feature) + config_found.append({ + "simd_extensions": { + "baseline": __cpu_baseline__, + "found": features_found, + "not_found": features_not_found + } + }) + try: + from threadpoolctl import threadpool_info + config_found.extend(threadpool_info()) + except ImportError: + print("WARNING: `threadpoolctl` not found in system!" + " Install it by `pip install threadpoolctl`." + " Once installed, try `np.show_runtime` again" + " for more detailed build information") + pprint(config_found) + + +@set_module('numpy') +def get_include(): + """ + Return the directory that contains the NumPy \\*.h header files. + + Extension modules that need to compile against NumPy may need to use this + function to locate the appropriate include directory. + + Notes + ----- + When using ``setuptools``, for example in ``setup.py``:: + + import numpy as np + ... + Extension('extension_name', ... + include_dirs=[np.get_include()]) + ... + + Note that a CLI tool ``numpy-config`` was introduced in NumPy 2.0, using + that is likely preferred for build systems other than ``setuptools``:: + + $ numpy-config --cflags + -I/path/to/site-packages/numpy/_core/include + + # Or rely on pkg-config: + $ export PKG_CONFIG_PATH=$(numpy-config --pkgconfigdir) + $ pkg-config --cflags + -I/path/to/site-packages/numpy/_core/include + + Examples + -------- + >>> np.get_include() + '.../site-packages/numpy/core/include' # may vary + + """ + import numpy + if numpy.show_config is None: + # running from numpy source directory + d = os.path.join(os.path.dirname(numpy.__file__), '_core', 'include') + else: + # using installed numpy core headers + import numpy._core as _core + d = os.path.join(os.path.dirname(_core.__file__), 'include') + return d + + +class _Deprecate: + """ + Decorator class to deprecate old functions. + + Refer to `deprecate` for details. + + See Also + -------- + deprecate + + """ + + def __init__(self, old_name=None, new_name=None, message=None): + self.old_name = old_name + self.new_name = new_name + self.message = message + + def __call__(self, func, *args, **kwargs): + """ + Decorator call. Refer to ``decorate``. + + """ + old_name = self.old_name + new_name = self.new_name + message = self.message + + if old_name is None: + old_name = func.__name__ + if new_name is None: + depdoc = f"`{old_name}` is deprecated!" + else: + depdoc = f"`{old_name}` is deprecated, use `{new_name}` instead!" + + if message is not None: + depdoc += "\n" + message + + @functools.wraps(func) + def newfunc(*args, **kwds): + warnings.warn(depdoc, DeprecationWarning, stacklevel=2) + return func(*args, **kwds) + + newfunc.__name__ = old_name + doc = func.__doc__ + if doc is None: + doc = depdoc + else: + lines = doc.expandtabs().split('\n') + indent = _get_indent(lines[1:]) + if lines[0].lstrip(): + # Indent the original first line to let inspect.cleandoc() + # dedent the docstring despite the deprecation notice. + doc = indent * ' ' + doc + else: + # Remove the same leading blank lines as cleandoc() would. + skip = len(lines[0]) + 1 + for line in lines[1:]: + if len(line) > indent: + break + skip += len(line) + 1 + doc = doc[skip:] + depdoc = textwrap.indent(depdoc, ' ' * indent) + doc = f'{depdoc}\n\n{doc}' + newfunc.__doc__ = doc + + return newfunc + + +def _get_indent(lines): + """ + Determines the leading whitespace that could be removed from all the lines. + """ + indent = sys.maxsize + for line in lines: + content = len(line.lstrip()) + if content: + indent = min(indent, len(line) - content) + if indent == sys.maxsize: + indent = 0 + return indent + + +def deprecate(*args, **kwargs): + """ + Issues a DeprecationWarning, adds warning to `old_name`'s + docstring, rebinds ``old_name.__name__`` and returns the new + function object. + + This function may also be used as a decorator. + + .. deprecated:: 2.0 + Use `~warnings.warn` with :exc:`DeprecationWarning` instead. + + Parameters + ---------- + func : function + The function to be deprecated. + old_name : str, optional + The name of the function to be deprecated. Default is None, in + which case the name of `func` is used. + new_name : str, optional + The new name for the function. Default is None, in which case the + deprecation message is that `old_name` is deprecated. If given, the + deprecation message is that `old_name` is deprecated and `new_name` + should be used instead. + message : str, optional + Additional explanation of the deprecation. Displayed in the + docstring after the warning. + + Returns + ------- + old_func : function + The deprecated function. + + Examples + -------- + Note that ``olduint`` returns a value after printing Deprecation + Warning: + + >>> olduint = np.lib.utils.deprecate(np.uint) + DeprecationWarning: `uint64` is deprecated! # may vary + >>> olduint(6) + 6 + + """ + # Deprecate may be run as a function or as a decorator + # If run as a function, we initialise the decorator class + # and execute its __call__ method. + + # Deprecated in NumPy 2.0, 2023-07-11 + warnings.warn( + "`deprecate` is deprecated, " + "use `warn` with `DeprecationWarning` instead. " + "(deprecated in NumPy 2.0)", + DeprecationWarning, + stacklevel=2 + ) + + if args: + fn = args[0] + args = args[1:] + + return _Deprecate(*args, **kwargs)(fn) + else: + return _Deprecate(*args, **kwargs) + + +def deprecate_with_doc(msg): + """ + Deprecates a function and includes the deprecation in its docstring. + + .. deprecated:: 2.0 + Use `~warnings.warn` with :exc:`DeprecationWarning` instead. + + This function is used as a decorator. It returns an object that can be + used to issue a DeprecationWarning, by passing the to-be decorated + function as argument, this adds warning to the to-be decorated function's + docstring and returns the new function object. + + See Also + -------- + deprecate : Decorate a function such that it issues a + :exc:`DeprecationWarning` + + Parameters + ---------- + msg : str + Additional explanation of the deprecation. Displayed in the + docstring after the warning. + + Returns + ------- + obj : object + + """ + + # Deprecated in NumPy 2.0, 2023-07-11 + warnings.warn( + "`deprecate` is deprecated, " + "use `warn` with `DeprecationWarning` instead. " + "(deprecated in NumPy 2.0)", + DeprecationWarning, + stacklevel=2 + ) + + return _Deprecate(message=msg) + + +#----------------------------------------------------------------------------- + + +# NOTE: pydoc defines a help function which works similarly to this +# except it uses a pager to take over the screen. + +# combine name and arguments and split to multiple lines of width +# characters. End lines on a comma and begin argument list indented with +# the rest of the arguments. +def _split_line(name, arguments, width): + firstwidth = len(name) + k = firstwidth + newstr = name + sepstr = ", " + arglist = arguments.split(sepstr) + for argument in arglist: + if k == firstwidth: + addstr = "" + else: + addstr = sepstr + k = k + len(argument) + len(addstr) + if k > width: + k = firstwidth + 1 + len(argument) + newstr = newstr + ",\n" + " " * (firstwidth + 2) + argument + else: + newstr = newstr + addstr + argument + return newstr + + +_namedict = None +_dictlist = None + +# Traverse all module directories underneath globals +# to see if something is defined +def _makenamedict(module='numpy'): + module = __import__(module, globals(), locals(), []) + thedict = {module.__name__: module.__dict__} + dictlist = [module.__name__] + totraverse = [module.__dict__] + while True: + if len(totraverse) == 0: + break + thisdict = totraverse.pop(0) + for x in thisdict.keys(): + if isinstance(thisdict[x], types.ModuleType): + modname = thisdict[x].__name__ + if modname not in dictlist: + moddict = thisdict[x].__dict__ + dictlist.append(modname) + totraverse.append(moddict) + thedict[modname] = moddict + return thedict, dictlist + + +def _info(obj, output=None): + """Provide information about ndarray obj. + + Parameters + ---------- + obj : ndarray + Must be ndarray, not checked. + output + Where printed output goes. + + Notes + ----- + Copied over from the numarray module prior to its removal. + Adapted somewhat as only numpy is an option now. + + Called by info. + + """ + extra = "" + tic = "" + bp = lambda x: x + cls = getattr(obj, '__class__', type(obj)) + nm = getattr(cls, '__name__', cls) + strides = obj.strides + endian = obj.dtype.byteorder + + if output is None: + output = sys.stdout + + print("class: ", nm, file=output) + print("shape: ", obj.shape, file=output) + print("strides: ", strides, file=output) + print("itemsize: ", obj.itemsize, file=output) + print("aligned: ", bp(obj.flags.aligned), file=output) + print("contiguous: ", bp(obj.flags.contiguous), file=output) + print("fortran: ", obj.flags.fortran, file=output) + print( + f"data pointer: {hex(obj.ctypes._as_parameter_.value)}{extra}", + file=output + ) + print("byteorder: ", end=' ', file=output) + if endian in ['|', '=']: + print(f"{tic}{sys.byteorder}{tic}", file=output) + byteswap = False + elif endian == '>': + print(f"{tic}big{tic}", file=output) + byteswap = sys.byteorder != "big" + else: + print(f"{tic}little{tic}", file=output) + byteswap = sys.byteorder != "little" + print("byteswap: ", bp(byteswap), file=output) + print(f"type: {obj.dtype}", file=output) + + +@set_module('numpy') +def info(object=None, maxwidth=76, output=None, toplevel='numpy'): + """ + Get help information for an array, function, class, or module. + + Parameters + ---------- + object : object or str, optional + Input object or name to get information about. If `object` is + an `ndarray` instance, information about the array is printed. + If `object` is a numpy object, its docstring is given. If it is + a string, available modules are searched for matching objects. + If None, information about `info` itself is returned. + maxwidth : int, optional + Printing width. + output : file like object, optional + File like object that the output is written to, default is + ``None``, in which case ``sys.stdout`` will be used. + The object has to be opened in 'w' or 'a' mode. + toplevel : str, optional + Start search at this level. + + Notes + ----- + When used interactively with an object, ``np.info(obj)`` is equivalent + to ``help(obj)`` on the Python prompt or ``obj?`` on the IPython + prompt. + + Examples + -------- + >>> np.info(np.polyval) # doctest: +SKIP + polyval(p, x) + Evaluate the polynomial p at x. + ... + + When using a string for `object` it is possible to get multiple results. + + >>> np.info('fft') # doctest: +SKIP + *** Found in numpy *** + Core FFT routines + ... + *** Found in numpy.fft *** + fft(a, n=None, axis=-1) + ... + *** Repeat reference found in numpy.fft.fftpack *** + *** Total of 3 references found. *** + + When the argument is an array, information about the array is printed. + + >>> a = np.array([[1 + 2j, 3, -4], [-5j, 6, 0]], dtype=np.complex64) + >>> np.info(a) + class: ndarray + shape: (2, 3) + strides: (24, 8) + itemsize: 8 + aligned: True + contiguous: True + fortran: False + data pointer: 0x562b6e0d2860 # may vary + byteorder: little + byteswap: False + type: complex64 + + """ + global _namedict, _dictlist + # Local import to speed up numpy's import time. + import inspect + import pydoc + + if (hasattr(object, '_ppimport_importer') or + hasattr(object, '_ppimport_module')): + object = object._ppimport_module + elif hasattr(object, '_ppimport_attr'): + object = object._ppimport_attr + + if output is None: + output = sys.stdout + + if object is None: + info(info) + elif isinstance(object, ndarray): + _info(object, output=output) + elif isinstance(object, str): + if _namedict is None: + _namedict, _dictlist = _makenamedict(toplevel) + numfound = 0 + objlist = [] + for namestr in _dictlist: + try: + obj = _namedict[namestr][object] + if id(obj) in objlist: + print(f"\n *** Repeat reference found in {namestr} *** ", + file=output + ) + else: + objlist.append(id(obj)) + print(f" *** Found in {namestr} ***", file=output) + info(obj) + print("-" * maxwidth, file=output) + numfound += 1 + except KeyError: + pass + if numfound == 0: + print(f"Help for {object} not found.", file=output) + else: + print("\n " + "*** Total of %d references found. ***" % numfound, + file=output + ) + + elif inspect.isfunction(object) or inspect.ismethod(object): + name = object.__name__ + try: + arguments = str(inspect.signature(object)) + except Exception: + arguments = "()" + + if len(name + arguments) > maxwidth: + argstr = _split_line(name, arguments, maxwidth) + else: + argstr = name + arguments + + print(" " + argstr + "\n", file=output) + print(inspect.getdoc(object), file=output) + + elif inspect.isclass(object): + name = object.__name__ + try: + arguments = str(inspect.signature(object)) + except Exception: + arguments = "()" + + if len(name + arguments) > maxwidth: + argstr = _split_line(name, arguments, maxwidth) + else: + argstr = name + arguments + + print(" " + argstr + "\n", file=output) + doc1 = inspect.getdoc(object) + if doc1 is None: + if hasattr(object, '__init__'): + print(inspect.getdoc(object.__init__), file=output) + else: + print(inspect.getdoc(object), file=output) + + methods = pydoc.allmethods(object) + + public_methods = [meth for meth in methods if meth[0] != '_'] + if public_methods: + print("\n\nMethods:\n", file=output) + for meth in public_methods: + thisobj = getattr(object, meth, None) + if thisobj is not None: + methstr, other = pydoc.splitdoc( + inspect.getdoc(thisobj) or "None" + ) + print(f" {meth} -- {methstr}", file=output) + + elif hasattr(object, '__doc__'): + print(inspect.getdoc(object), file=output) + + +def safe_eval(source): + """ + Protected string evaluation. + + .. deprecated:: 2.0 + Use `ast.literal_eval` instead. + + Evaluate a string containing a Python literal expression without + allowing the execution of arbitrary non-literal code. + + .. warning:: + + This function is identical to :py:meth:`ast.literal_eval` and + has the same security implications. It may not always be safe + to evaluate large input strings. + + Parameters + ---------- + source : str + The string to evaluate. + + Returns + ------- + obj : object + The result of evaluating `source`. + + Raises + ------ + SyntaxError + If the code has invalid Python syntax, or if it contains + non-literal code. + + Examples + -------- + >>> np.safe_eval('1') + 1 + >>> np.safe_eval('[1, 2, 3]') + [1, 2, 3] + >>> np.safe_eval('{"foo": ("bar", 10.0)}') + {'foo': ('bar', 10.0)} + + >>> np.safe_eval('import os') + Traceback (most recent call last): + ... + SyntaxError: invalid syntax + + >>> np.safe_eval('open("/home/user/.ssh/id_dsa").read()') + Traceback (most recent call last): + ... + ValueError: malformed node or string: <_ast.Call object at 0x...> + + """ + + # Deprecated in NumPy 2.0, 2023-07-11 + warnings.warn( + "`safe_eval` is deprecated. Use `ast.literal_eval` instead. " + "Be aware of security implications, such as memory exhaustion " + "based attacks (deprecated in NumPy 2.0)", + DeprecationWarning, + stacklevel=2 + ) + + # Local import to speed up numpy's import time. + import ast + return ast.literal_eval(source) + + +def _median_nancheck(data, result, axis): + """ + Utility function to check median result from data for NaN values at the end + and return NaN in that case. Input result can also be a MaskedArray. + + Parameters + ---------- + data : array + Sorted input data to median function + result : Array or MaskedArray + Result of median function. + axis : int + Axis along which the median was computed. + + Returns + ------- + result : scalar or ndarray + Median or NaN in axes which contained NaN in the input. If the input + was an array, NaN will be inserted in-place. If a scalar, either the + input itself or a scalar NaN. + """ + if data.size == 0: + return result + potential_nans = data.take(-1, axis=axis) + n = np.isnan(potential_nans) + # masked NaN values are ok, although for masked the copyto may fail for + # unmasked ones (this was always broken) when the result is a scalar. + if np.ma.isMaskedArray(n): + n = n.filled(False) + + if not n.any(): + return result + + # Without given output, it is possible that the current result is a + # numpy scalar, which is not writeable. If so, just return nan. + if isinstance(result, np.generic): + return potential_nans + + # Otherwise copy NaNs (if there are any) + np.copyto(result, potential_nans, where=n) + return result + +def _opt_info(): + """ + Returns a string containing the CPU features supported + by the current build. + + The format of the string can be explained as follows: + - Dispatched features supported by the running machine end with `*`. + - Dispatched features not supported by the running machine + end with `?`. + - Remaining features represent the baseline. + + Returns: + str: A formatted string indicating the supported CPU features. + """ + from numpy._core._multiarray_umath import ( + __cpu_baseline__, + __cpu_dispatch__, + __cpu_features__, + ) + + if len(__cpu_baseline__) == 0 and len(__cpu_dispatch__) == 0: + return '' + + enabled_features = ' '.join(__cpu_baseline__) + for feature in __cpu_dispatch__: + if __cpu_features__[feature]: + enabled_features += f" {feature}*" + else: + enabled_features += f" {feature}?" + + return enabled_features + +def drop_metadata(dtype, /): + """ + Returns the dtype unchanged if it contained no metadata or a copy of the + dtype if it (or any of its structure dtypes) contained metadata. + + This utility is used by `np.save` and `np.savez` to drop metadata before + saving. + + .. note:: + + Due to its limitation this function may move to a more appropriate + home or change in the future and is considered semi-public API only. + + .. warning:: + + This function does not preserve more strange things like record dtypes + and user dtypes may simply return the wrong thing. If you need to be + sure about the latter, check the result with: + ``np.can_cast(new_dtype, dtype, casting="no")``. + + """ + if dtype.fields is not None: + found_metadata = dtype.metadata is not None + + names = [] + formats = [] + offsets = [] + titles = [] + for name, field in dtype.fields.items(): + field_dt = drop_metadata(field[0]) + if field_dt is not field[0]: + found_metadata = True + + names.append(name) + formats.append(field_dt) + offsets.append(field[1]) + titles.append(None if len(field) < 3 else field[2]) + + if not found_metadata: + return dtype + + structure = { + 'names': names, 'formats': formats, 'offsets': offsets, 'titles': titles, + 'itemsize': dtype.itemsize} + + # NOTE: Could pass (dtype.type, structure) to preserve record dtypes... + return np.dtype(structure, align=dtype.isalignedstruct) + elif dtype.subdtype is not None: + # subarray dtype + subdtype, shape = dtype.subdtype + new_subdtype = drop_metadata(subdtype) + if dtype.metadata is None and new_subdtype is subdtype: + return dtype + + return np.dtype((new_subdtype, shape)) + else: + # Normal unstructured dtype + if dtype.metadata is None: + return dtype + # Note that `dt.str` doesn't round-trip e.g. for user-dtypes. + return np.dtype(dtype.str) diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/_utils_impl.pyi b/.venv/lib/python3.12/site-packages/numpy/lib/_utils_impl.pyi new file mode 100644 index 0000000..00ed47c --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/lib/_utils_impl.pyi @@ -0,0 +1,10 @@ +from _typeshed import SupportsWrite + +from numpy._typing import DTypeLike + +__all__ = ["get_include", "info", "show_runtime"] + +def get_include() -> str: ... +def show_runtime() -> None: ... +def info(object: object = ..., maxwidth: int = ..., output: SupportsWrite[str] | None = ..., toplevel: str = ...) -> None: ... +def drop_metadata(dtype: DTypeLike, /) -> DTypeLike: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/_version.py b/.venv/lib/python3.12/site-packages/numpy/lib/_version.py new file mode 100644 index 0000000..f7a3538 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/lib/_version.py @@ -0,0 +1,154 @@ +"""Utility to compare (NumPy) version strings. + +The NumpyVersion class allows properly comparing numpy version strings. +The LooseVersion and StrictVersion classes that distutils provides don't +work; they don't recognize anything like alpha/beta/rc/dev versions. + +""" +import re + +__all__ = ['NumpyVersion'] + + +class NumpyVersion: + """Parse and compare numpy version strings. + + NumPy has the following versioning scheme (numbers given are examples; they + can be > 9 in principle): + + - Released version: '1.8.0', '1.8.1', etc. + - Alpha: '1.8.0a1', '1.8.0a2', etc. + - Beta: '1.8.0b1', '1.8.0b2', etc. + - Release candidates: '1.8.0rc1', '1.8.0rc2', etc. + - Development versions: '1.8.0.dev-f1234afa' (git commit hash appended) + - Development versions after a1: '1.8.0a1.dev-f1234afa', + '1.8.0b2.dev-f1234afa', + '1.8.1rc1.dev-f1234afa', etc. + - Development versions (no git hash available): '1.8.0.dev-Unknown' + + Comparing needs to be done against a valid version string or other + `NumpyVersion` instance. Note that all development versions of the same + (pre-)release compare equal. + + Parameters + ---------- + vstring : str + NumPy version string (``np.__version__``). + + Examples + -------- + >>> from numpy.lib import NumpyVersion + >>> if NumpyVersion(np.__version__) < '1.7.0': + ... print('skip') + >>> # skip + + >>> NumpyVersion('1.7') # raises ValueError, add ".0" + Traceback (most recent call last): + ... + ValueError: Not a valid numpy version string + + """ + + __module__ = "numpy.lib" + + def __init__(self, vstring): + self.vstring = vstring + ver_main = re.match(r'\d+\.\d+\.\d+', vstring) + if not ver_main: + raise ValueError("Not a valid numpy version string") + + self.version = ver_main.group() + self.major, self.minor, self.bugfix = [int(x) for x in + self.version.split('.')] + if len(vstring) == ver_main.end(): + self.pre_release = 'final' + else: + alpha = re.match(r'a\d', vstring[ver_main.end():]) + beta = re.match(r'b\d', vstring[ver_main.end():]) + rc = re.match(r'rc\d', vstring[ver_main.end():]) + pre_rel = [m for m in [alpha, beta, rc] if m is not None] + if pre_rel: + self.pre_release = pre_rel[0].group() + else: + self.pre_release = '' + + self.is_devversion = bool(re.search(r'.dev', vstring)) + + def _compare_version(self, other): + """Compare major.minor.bugfix""" + if self.major == other.major: + if self.minor == other.minor: + if self.bugfix == other.bugfix: + vercmp = 0 + elif self.bugfix > other.bugfix: + vercmp = 1 + else: + vercmp = -1 + elif self.minor > other.minor: + vercmp = 1 + else: + vercmp = -1 + elif self.major > other.major: + vercmp = 1 + else: + vercmp = -1 + + return vercmp + + def _compare_pre_release(self, other): + """Compare alpha/beta/rc/final.""" + if self.pre_release == other.pre_release: + vercmp = 0 + elif self.pre_release == 'final': + vercmp = 1 + elif other.pre_release == 'final': + vercmp = -1 + elif self.pre_release > other.pre_release: + vercmp = 1 + else: + vercmp = -1 + + return vercmp + + def _compare(self, other): + if not isinstance(other, (str, NumpyVersion)): + raise ValueError("Invalid object to compare with NumpyVersion.") + + if isinstance(other, str): + other = NumpyVersion(other) + + vercmp = self._compare_version(other) + if vercmp == 0: + # Same x.y.z version, check for alpha/beta/rc + vercmp = self._compare_pre_release(other) + if vercmp == 0: + # Same version and same pre-release, check if dev version + if self.is_devversion is other.is_devversion: + vercmp = 0 + elif self.is_devversion: + vercmp = -1 + else: + vercmp = 1 + + return vercmp + + def __lt__(self, other): + return self._compare(other) < 0 + + def __le__(self, other): + return self._compare(other) <= 0 + + def __eq__(self, other): + return self._compare(other) == 0 + + def __ne__(self, other): + return self._compare(other) != 0 + + def __gt__(self, other): + return self._compare(other) > 0 + + def __ge__(self, other): + return self._compare(other) >= 0 + + def __repr__(self): + return f"NumpyVersion({self.vstring})" diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/_version.pyi b/.venv/lib/python3.12/site-packages/numpy/lib/_version.pyi new file mode 100644 index 0000000..c53ef79 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/lib/_version.pyi @@ -0,0 +1,17 @@ +__all__ = ["NumpyVersion"] + +class NumpyVersion: + vstring: str + version: str + major: int + minor: int + bugfix: int + pre_release: str + is_devversion: bool + def __init__(self, vstring: str) -> None: ... + def __lt__(self, other: str | NumpyVersion) -> bool: ... + def __le__(self, other: str | NumpyVersion) -> bool: ... + def __eq__(self, other: str | NumpyVersion) -> bool: ... # type: ignore[override] + def __ne__(self, other: str | NumpyVersion) -> bool: ... # type: ignore[override] + def __gt__(self, other: str | NumpyVersion) -> bool: ... + def __ge__(self, other: str | NumpyVersion) -> bool: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/array_utils.py b/.venv/lib/python3.12/site-packages/numpy/lib/array_utils.py new file mode 100644 index 0000000..c267eb0 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/lib/array_utils.py @@ -0,0 +1,7 @@ +from ._array_utils_impl import ( # noqa: F401 + __all__, + __doc__, + byte_bounds, + normalize_axis_index, + normalize_axis_tuple, +) diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/array_utils.pyi b/.venv/lib/python3.12/site-packages/numpy/lib/array_utils.pyi new file mode 100644 index 0000000..8adc3c5 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/lib/array_utils.pyi @@ -0,0 +1,12 @@ +from ._array_utils_impl import ( + __all__ as __all__, +) +from ._array_utils_impl import ( + byte_bounds as byte_bounds, +) +from ._array_utils_impl import ( + normalize_axis_index as normalize_axis_index, +) +from ._array_utils_impl import ( + normalize_axis_tuple as normalize_axis_tuple, +) diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/format.py b/.venv/lib/python3.12/site-packages/numpy/lib/format.py new file mode 100644 index 0000000..8e0c799 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/lib/format.py @@ -0,0 +1,24 @@ +from ._format_impl import ( # noqa: F401 + ARRAY_ALIGN, + BUFFER_SIZE, + EXPECTED_KEYS, + GROWTH_AXIS_MAX_DIGITS, + MAGIC_LEN, + MAGIC_PREFIX, + __all__, + __doc__, + descr_to_dtype, + drop_metadata, + dtype_to_descr, + header_data_from_array_1_0, + isfileobj, + magic, + open_memmap, + read_array, + read_array_header_1_0, + read_array_header_2_0, + read_magic, + write_array, + write_array_header_1_0, + write_array_header_2_0, +) diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/format.pyi b/.venv/lib/python3.12/site-packages/numpy/lib/format.pyi new file mode 100644 index 0000000..dd9470e --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/lib/format.pyi @@ -0,0 +1,66 @@ +from ._format_impl import ( + ARRAY_ALIGN as ARRAY_ALIGN, +) +from ._format_impl import ( + BUFFER_SIZE as BUFFER_SIZE, +) +from ._format_impl import ( + EXPECTED_KEYS as EXPECTED_KEYS, +) +from ._format_impl import ( + GROWTH_AXIS_MAX_DIGITS as GROWTH_AXIS_MAX_DIGITS, +) +from ._format_impl import ( + MAGIC_LEN as MAGIC_LEN, +) +from ._format_impl import ( + MAGIC_PREFIX as MAGIC_PREFIX, +) +from ._format_impl import ( + __all__ as __all__, +) +from ._format_impl import ( + __doc__ as __doc__, +) +from ._format_impl import ( + descr_to_dtype as descr_to_dtype, +) +from ._format_impl import ( + drop_metadata as drop_metadata, +) +from ._format_impl import ( + dtype_to_descr as dtype_to_descr, +) +from ._format_impl import ( + header_data_from_array_1_0 as header_data_from_array_1_0, +) +from ._format_impl import ( + isfileobj as isfileobj, +) +from ._format_impl import ( + magic as magic, +) +from ._format_impl import ( + open_memmap as open_memmap, +) +from ._format_impl import ( + read_array as read_array, +) +from ._format_impl import ( + read_array_header_1_0 as read_array_header_1_0, +) +from ._format_impl import ( + read_array_header_2_0 as read_array_header_2_0, +) +from ._format_impl import ( + read_magic as read_magic, +) +from ._format_impl import ( + write_array as write_array, +) +from ._format_impl import ( + write_array_header_1_0 as write_array_header_1_0, +) +from ._format_impl import ( + write_array_header_2_0 as write_array_header_2_0, +) diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/introspect.py b/.venv/lib/python3.12/site-packages/numpy/lib/introspect.py new file mode 100644 index 0000000..f4a0f32 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/lib/introspect.py @@ -0,0 +1,95 @@ +""" +Introspection helper functions. +""" + +__all__ = ['opt_func_info'] + + +def opt_func_info(func_name=None, signature=None): + """ + Returns a dictionary containing the currently supported CPU dispatched + features for all optimized functions. + + Parameters + ---------- + func_name : str (optional) + Regular expression to filter by function name. + + signature : str (optional) + Regular expression to filter by data type. + + Returns + ------- + dict + A dictionary where keys are optimized function names and values are + nested dictionaries indicating supported targets based on data types. + + Examples + -------- + Retrieve dispatch information for functions named 'add' or 'sub' and + data types 'float64' or 'float32': + + >>> import numpy as np + >>> dict = np.lib.introspect.opt_func_info( + ... func_name="add|abs", signature="float64|complex64" + ... ) + >>> import json + >>> print(json.dumps(dict, indent=2)) + { + "absolute": { + "dd": { + "current": "SSE41", + "available": "SSE41 baseline(SSE SSE2 SSE3)" + }, + "Ff": { + "current": "FMA3__AVX2", + "available": "AVX512F FMA3__AVX2 baseline(SSE SSE2 SSE3)" + }, + "Dd": { + "current": "FMA3__AVX2", + "available": "AVX512F FMA3__AVX2 baseline(SSE SSE2 SSE3)" + } + }, + "add": { + "ddd": { + "current": "FMA3__AVX2", + "available": "FMA3__AVX2 baseline(SSE SSE2 SSE3)" + }, + "FFF": { + "current": "FMA3__AVX2", + "available": "FMA3__AVX2 baseline(SSE SSE2 SSE3)" + } + } + } + + """ + import re + + from numpy._core._multiarray_umath import __cpu_targets_info__ as targets + from numpy._core._multiarray_umath import dtype + + if func_name is not None: + func_pattern = re.compile(func_name) + matching_funcs = { + k: v for k, v in targets.items() + if func_pattern.search(k) + } + else: + matching_funcs = targets + + if signature is not None: + sig_pattern = re.compile(signature) + matching_sigs = {} + for k, v in matching_funcs.items(): + matching_chars = {} + for chars, targets in v.items(): + if any( + sig_pattern.search(c) or sig_pattern.search(dtype(c).name) + for c in chars + ): + matching_chars[chars] = targets + if matching_chars: + matching_sigs[k] = matching_chars + else: + matching_sigs = matching_funcs + return matching_sigs diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/introspect.pyi b/.venv/lib/python3.12/site-packages/numpy/lib/introspect.pyi new file mode 100644 index 0000000..7929981 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/lib/introspect.pyi @@ -0,0 +1,3 @@ +__all__ = ["opt_func_info"] + +def opt_func_info(func_name: str | None = None, signature: str | None = None) -> dict[str, dict[str, dict[str, str]]]: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/mixins.py b/.venv/lib/python3.12/site-packages/numpy/lib/mixins.py new file mode 100644 index 0000000..831bb34 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/lib/mixins.py @@ -0,0 +1,180 @@ +""" +Mixin classes for custom array types that don't inherit from ndarray. +""" + +__all__ = ['NDArrayOperatorsMixin'] + + +def _disables_array_ufunc(obj): + """True when __array_ufunc__ is set to None.""" + try: + return obj.__array_ufunc__ is None + except AttributeError: + return False + + +def _binary_method(ufunc, name): + """Implement a forward binary method with a ufunc, e.g., __add__.""" + def func(self, other): + if _disables_array_ufunc(other): + return NotImplemented + return ufunc(self, other) + func.__name__ = f'__{name}__' + return func + + +def _reflected_binary_method(ufunc, name): + """Implement a reflected binary method with a ufunc, e.g., __radd__.""" + def func(self, other): + if _disables_array_ufunc(other): + return NotImplemented + return ufunc(other, self) + func.__name__ = f'__r{name}__' + return func + + +def _inplace_binary_method(ufunc, name): + """Implement an in-place binary method with a ufunc, e.g., __iadd__.""" + def func(self, other): + return ufunc(self, other, out=(self,)) + func.__name__ = f'__i{name}__' + return func + + +def _numeric_methods(ufunc, name): + """Implement forward, reflected and inplace binary methods with a ufunc.""" + return (_binary_method(ufunc, name), + _reflected_binary_method(ufunc, name), + _inplace_binary_method(ufunc, name)) + + +def _unary_method(ufunc, name): + """Implement a unary special method with a ufunc.""" + def func(self): + return ufunc(self) + func.__name__ = f'__{name}__' + return func + + +class NDArrayOperatorsMixin: + """Mixin defining all operator special methods using __array_ufunc__. + + This class implements the special methods for almost all of Python's + builtin operators defined in the `operator` module, including comparisons + (``==``, ``>``, etc.) and arithmetic (``+``, ``*``, ``-``, etc.), by + deferring to the ``__array_ufunc__`` method, which subclasses must + implement. + + It is useful for writing classes that do not inherit from `numpy.ndarray`, + but that should support arithmetic and numpy universal functions like + arrays as described in :external+neps:doc:`nep-0013-ufunc-overrides`. + + As an trivial example, consider this implementation of an ``ArrayLike`` + class that simply wraps a NumPy array and ensures that the result of any + arithmetic operation is also an ``ArrayLike`` object: + + >>> import numbers + >>> class ArrayLike(np.lib.mixins.NDArrayOperatorsMixin): + ... def __init__(self, value): + ... self.value = np.asarray(value) + ... + ... # One might also consider adding the built-in list type to this + ... # list, to support operations like np.add(array_like, list) + ... _HANDLED_TYPES = (np.ndarray, numbers.Number) + ... + ... def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): + ... out = kwargs.get('out', ()) + ... for x in inputs + out: + ... # Only support operations with instances of + ... # _HANDLED_TYPES. Use ArrayLike instead of type(self) + ... # for isinstance to allow subclasses that don't + ... # override __array_ufunc__ to handle ArrayLike objects. + ... if not isinstance( + ... x, self._HANDLED_TYPES + (ArrayLike,) + ... ): + ... return NotImplemented + ... + ... # Defer to the implementation of the ufunc + ... # on unwrapped values. + ... inputs = tuple(x.value if isinstance(x, ArrayLike) else x + ... for x in inputs) + ... if out: + ... kwargs['out'] = tuple( + ... x.value if isinstance(x, ArrayLike) else x + ... for x in out) + ... result = getattr(ufunc, method)(*inputs, **kwargs) + ... + ... if type(result) is tuple: + ... # multiple return values + ... return tuple(type(self)(x) for x in result) + ... elif method == 'at': + ... # no return value + ... return None + ... else: + ... # one return value + ... return type(self)(result) + ... + ... def __repr__(self): + ... return '%s(%r)' % (type(self).__name__, self.value) + + In interactions between ``ArrayLike`` objects and numbers or numpy arrays, + the result is always another ``ArrayLike``: + + >>> x = ArrayLike([1, 2, 3]) + >>> x - 1 + ArrayLike(array([0, 1, 2])) + >>> 1 - x + ArrayLike(array([ 0, -1, -2])) + >>> np.arange(3) - x + ArrayLike(array([-1, -1, -1])) + >>> x - np.arange(3) + ArrayLike(array([1, 1, 1])) + + Note that unlike ``numpy.ndarray``, ``ArrayLike`` does not allow operations + with arbitrary, unrecognized types. This ensures that interactions with + ArrayLike preserve a well-defined casting hierarchy. + + """ + from numpy._core import umath as um + + __slots__ = () + # Like np.ndarray, this mixin class implements "Option 1" from the ufunc + # overrides NEP. + + # comparisons don't have reflected and in-place versions + __lt__ = _binary_method(um.less, 'lt') + __le__ = _binary_method(um.less_equal, 'le') + __eq__ = _binary_method(um.equal, 'eq') + __ne__ = _binary_method(um.not_equal, 'ne') + __gt__ = _binary_method(um.greater, 'gt') + __ge__ = _binary_method(um.greater_equal, 'ge') + + # numeric methods + __add__, __radd__, __iadd__ = _numeric_methods(um.add, 'add') + __sub__, __rsub__, __isub__ = _numeric_methods(um.subtract, 'sub') + __mul__, __rmul__, __imul__ = _numeric_methods(um.multiply, 'mul') + __matmul__, __rmatmul__, __imatmul__ = _numeric_methods( + um.matmul, 'matmul') + __truediv__, __rtruediv__, __itruediv__ = _numeric_methods( + um.true_divide, 'truediv') + __floordiv__, __rfloordiv__, __ifloordiv__ = _numeric_methods( + um.floor_divide, 'floordiv') + __mod__, __rmod__, __imod__ = _numeric_methods(um.remainder, 'mod') + __divmod__ = _binary_method(um.divmod, 'divmod') + __rdivmod__ = _reflected_binary_method(um.divmod, 'divmod') + # __idivmod__ does not exist + # TODO: handle the optional third argument for __pow__? + __pow__, __rpow__, __ipow__ = _numeric_methods(um.power, 'pow') + __lshift__, __rlshift__, __ilshift__ = _numeric_methods( + um.left_shift, 'lshift') + __rshift__, __rrshift__, __irshift__ = _numeric_methods( + um.right_shift, 'rshift') + __and__, __rand__, __iand__ = _numeric_methods(um.bitwise_and, 'and') + __xor__, __rxor__, __ixor__ = _numeric_methods(um.bitwise_xor, 'xor') + __or__, __ror__, __ior__ = _numeric_methods(um.bitwise_or, 'or') + + # unary methods + __neg__ = _unary_method(um.negative, 'neg') + __pos__ = _unary_method(um.positive, 'pos') + __abs__ = _unary_method(um.absolute, 'abs') + __invert__ = _unary_method(um.invert, 'invert') diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/mixins.pyi b/.venv/lib/python3.12/site-packages/numpy/lib/mixins.pyi new file mode 100644 index 0000000..4f4801f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/lib/mixins.pyi @@ -0,0 +1,75 @@ +from abc import ABC, abstractmethod +from typing import Any +from typing import Literal as L + +from numpy import ufunc + +__all__ = ["NDArrayOperatorsMixin"] + +# NOTE: `NDArrayOperatorsMixin` is not formally an abstract baseclass, +# even though it's reliant on subclasses implementing `__array_ufunc__` + +# NOTE: The accepted input- and output-types of the various dunders are +# completely dependent on how `__array_ufunc__` is implemented. +# As such, only little type safety can be provided here. + +class NDArrayOperatorsMixin(ABC): + @abstractmethod + def __array_ufunc__( + self, + ufunc: ufunc, + method: L["__call__", "reduce", "reduceat", "accumulate", "outer", "at"], + *inputs: Any, + **kwargs: Any, + ) -> Any: ... + def __lt__(self, other: Any) -> Any: ... + def __le__(self, other: Any) -> Any: ... + def __eq__(self, other: Any) -> Any: ... + def __ne__(self, other: Any) -> Any: ... + def __gt__(self, other: Any) -> Any: ... + def __ge__(self, other: Any) -> Any: ... + def __add__(self, other: Any) -> Any: ... + def __radd__(self, other: Any) -> Any: ... + def __iadd__(self, other: Any) -> Any: ... + def __sub__(self, other: Any) -> Any: ... + def __rsub__(self, other: Any) -> Any: ... + def __isub__(self, other: Any) -> Any: ... + def __mul__(self, other: Any) -> Any: ... + def __rmul__(self, other: Any) -> Any: ... + def __imul__(self, other: Any) -> Any: ... + def __matmul__(self, other: Any) -> Any: ... + def __rmatmul__(self, other: Any) -> Any: ... + def __imatmul__(self, other: Any) -> Any: ... + def __truediv__(self, other: Any) -> Any: ... + def __rtruediv__(self, other: Any) -> Any: ... + def __itruediv__(self, other: Any) -> Any: ... + def __floordiv__(self, other: Any) -> Any: ... + def __rfloordiv__(self, other: Any) -> Any: ... + def __ifloordiv__(self, other: Any) -> Any: ... + def __mod__(self, other: Any) -> Any: ... + def __rmod__(self, other: Any) -> Any: ... + def __imod__(self, other: Any) -> Any: ... + def __divmod__(self, other: Any) -> Any: ... + def __rdivmod__(self, other: Any) -> Any: ... + def __pow__(self, other: Any) -> Any: ... + def __rpow__(self, other: Any) -> Any: ... + def __ipow__(self, other: Any) -> Any: ... + def __lshift__(self, other: Any) -> Any: ... + def __rlshift__(self, other: Any) -> Any: ... + def __ilshift__(self, other: Any) -> Any: ... + def __rshift__(self, other: Any) -> Any: ... + def __rrshift__(self, other: Any) -> Any: ... + def __irshift__(self, other: Any) -> Any: ... + def __and__(self, other: Any) -> Any: ... + def __rand__(self, other: Any) -> Any: ... + def __iand__(self, other: Any) -> Any: ... + def __xor__(self, other: Any) -> Any: ... + def __rxor__(self, other: Any) -> Any: ... + def __ixor__(self, other: Any) -> Any: ... + def __or__(self, other: Any) -> Any: ... + def __ror__(self, other: Any) -> Any: ... + def __ior__(self, other: Any) -> Any: ... + def __neg__(self) -> Any: ... + def __pos__(self) -> Any: ... + def __abs__(self) -> Any: ... + def __invert__(self) -> Any: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/npyio.py b/.venv/lib/python3.12/site-packages/numpy/lib/npyio.py new file mode 100644 index 0000000..84d8079 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/lib/npyio.py @@ -0,0 +1 @@ +from ._npyio_impl import DataSource, NpzFile, __doc__ # noqa: F401 diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/npyio.pyi b/.venv/lib/python3.12/site-packages/numpy/lib/npyio.pyi new file mode 100644 index 0000000..49fb4d1 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/lib/npyio.pyi @@ -0,0 +1,9 @@ +from numpy.lib._npyio_impl import ( + DataSource as DataSource, +) +from numpy.lib._npyio_impl import ( + NpzFile as NpzFile, +) +from numpy.lib._npyio_impl import ( + __doc__ as __doc__, +) diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/recfunctions.py b/.venv/lib/python3.12/site-packages/numpy/lib/recfunctions.py new file mode 100644 index 0000000..c8a6dd8 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/lib/recfunctions.py @@ -0,0 +1,1681 @@ +""" +Collection of utilities to manipulate structured arrays. + +Most of these functions were initially implemented by John Hunter for +matplotlib. They have been rewritten and extended for convenience. + +""" +import itertools + +import numpy as np +import numpy.ma as ma +import numpy.ma.mrecords as mrec +from numpy._core.overrides import array_function_dispatch +from numpy.lib._iotools import _is_string_like + +__all__ = [ + 'append_fields', 'apply_along_fields', 'assign_fields_by_name', + 'drop_fields', 'find_duplicates', 'flatten_descr', + 'get_fieldstructure', 'get_names', 'get_names_flat', + 'join_by', 'merge_arrays', 'rec_append_fields', + 'rec_drop_fields', 'rec_join', 'recursive_fill_fields', + 'rename_fields', 'repack_fields', 'require_fields', + 'stack_arrays', 'structured_to_unstructured', 'unstructured_to_structured', + ] + + +def _recursive_fill_fields_dispatcher(input, output): + return (input, output) + + +@array_function_dispatch(_recursive_fill_fields_dispatcher) +def recursive_fill_fields(input, output): + """ + Fills fields from output with fields from input, + with support for nested structures. + + Parameters + ---------- + input : ndarray + Input array. + output : ndarray + Output array. + + Notes + ----- + * `output` should be at least the same size as `input` + + Examples + -------- + >>> import numpy as np + >>> from numpy.lib import recfunctions as rfn + >>> a = np.array([(1, 10.), (2, 20.)], dtype=[('A', np.int64), ('B', np.float64)]) + >>> b = np.zeros((3,), dtype=a.dtype) + >>> rfn.recursive_fill_fields(a, b) + array([(1, 10.), (2, 20.), (0, 0.)], dtype=[('A', '>> import numpy as np + >>> dt = np.dtype([(('a', 'A'), np.int64), ('b', np.double, 3)]) + >>> dt.descr + [(('a', 'A'), '>> _get_fieldspec(dt) + [(('a', 'A'), dtype('int64')), ('b', dtype(('>> import numpy as np + >>> from numpy.lib import recfunctions as rfn + >>> rfn.get_names(np.empty((1,), dtype=[('A', int)]).dtype) + ('A',) + >>> rfn.get_names(np.empty((1,), dtype=[('A',int), ('B', float)]).dtype) + ('A', 'B') + >>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])]) + >>> rfn.get_names(adtype) + ('a', ('b', ('ba', 'bb'))) + """ + listnames = [] + names = adtype.names + for name in names: + current = adtype[name] + if current.names is not None: + listnames.append((name, tuple(get_names(current)))) + else: + listnames.append(name) + return tuple(listnames) + + +def get_names_flat(adtype): + """ + Returns the field names of the input datatype as a tuple. Input datatype + must have fields otherwise error is raised. + Nested structure are flattened beforehand. + + Parameters + ---------- + adtype : dtype + Input datatype + + Examples + -------- + >>> import numpy as np + >>> from numpy.lib import recfunctions as rfn + >>> rfn.get_names_flat(np.empty((1,), dtype=[('A', int)]).dtype) is None + False + >>> rfn.get_names_flat(np.empty((1,), dtype=[('A',int), ('B', str)]).dtype) + ('A', 'B') + >>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])]) + >>> rfn.get_names_flat(adtype) + ('a', 'b', 'ba', 'bb') + """ + listnames = [] + names = adtype.names + for name in names: + listnames.append(name) + current = adtype[name] + if current.names is not None: + listnames.extend(get_names_flat(current)) + return tuple(listnames) + + +def flatten_descr(ndtype): + """ + Flatten a structured data-type description. + + Examples + -------- + >>> import numpy as np + >>> from numpy.lib import recfunctions as rfn + >>> ndtype = np.dtype([('a', '>> rfn.flatten_descr(ndtype) + (('a', dtype('int32')), ('ba', dtype('float64')), ('bb', dtype('int32'))) + + """ + names = ndtype.names + if names is None: + return (('', ndtype),) + else: + descr = [] + for field in names: + (typ, _) = ndtype.fields[field] + if typ.names is not None: + descr.extend(flatten_descr(typ)) + else: + descr.append((field, typ)) + return tuple(descr) + + +def _zip_dtype(seqarrays, flatten=False): + newdtype = [] + if flatten: + for a in seqarrays: + newdtype.extend(flatten_descr(a.dtype)) + else: + for a in seqarrays: + current = a.dtype + if current.names is not None and len(current.names) == 1: + # special case - dtypes of 1 field are flattened + newdtype.extend(_get_fieldspec(current)) + else: + newdtype.append(('', current)) + return np.dtype(newdtype) + + +def _zip_descr(seqarrays, flatten=False): + """ + Combine the dtype description of a series of arrays. + + Parameters + ---------- + seqarrays : sequence of arrays + Sequence of arrays + flatten : {boolean}, optional + Whether to collapse nested descriptions. + """ + return _zip_dtype(seqarrays, flatten=flatten).descr + + +def get_fieldstructure(adtype, lastname=None, parents=None,): + """ + Returns a dictionary with fields indexing lists of their parent fields. + + This function is used to simplify access to fields nested in other fields. + + Parameters + ---------- + adtype : np.dtype + Input datatype + lastname : optional + Last processed field name (used internally during recursion). + parents : dictionary + Dictionary of parent fields (used internally during recursion). + + Examples + -------- + >>> import numpy as np + >>> from numpy.lib import recfunctions as rfn + >>> ndtype = np.dtype([('A', int), + ... ('B', [('BA', int), + ... ('BB', [('BBA', int), ('BBB', int)])])]) + >>> rfn.get_fieldstructure(ndtype) + ... # XXX: possible regression, order of BBA and BBB is swapped + {'A': [], 'B': [], 'BA': ['B'], 'BB': ['B'], 'BBA': ['B', 'BB'], 'BBB': ['B', 'BB']} + + """ + if parents is None: + parents = {} + names = adtype.names + for name in names: + current = adtype[name] + if current.names is not None: + if lastname: + parents[name] = [lastname, ] + else: + parents[name] = [] + parents.update(get_fieldstructure(current, name, parents)) + else: + lastparent = list(parents.get(lastname, []) or []) + if lastparent: + lastparent.append(lastname) + elif lastname: + lastparent = [lastname, ] + parents[name] = lastparent or [] + return parents + + +def _izip_fields_flat(iterable): + """ + Returns an iterator of concatenated fields from a sequence of arrays, + collapsing any nested structure. + + """ + for element in iterable: + if isinstance(element, np.void): + yield from _izip_fields_flat(tuple(element)) + else: + yield element + + +def _izip_fields(iterable): + """ + Returns an iterator of concatenated fields from a sequence of arrays. + + """ + for element in iterable: + if (hasattr(element, '__iter__') and + not isinstance(element, str)): + yield from _izip_fields(element) + elif isinstance(element, np.void) and len(tuple(element)) == 1: + # this statement is the same from the previous expression + yield from _izip_fields(element) + else: + yield element + + +def _izip_records(seqarrays, fill_value=None, flatten=True): + """ + Returns an iterator of concatenated items from a sequence of arrays. + + Parameters + ---------- + seqarrays : sequence of arrays + Sequence of arrays. + fill_value : {None, integer} + Value used to pad shorter iterables. + flatten : {True, False}, + Whether to + """ + + # Should we flatten the items, or just use a nested approach + if flatten: + zipfunc = _izip_fields_flat + else: + zipfunc = _izip_fields + + for tup in itertools.zip_longest(*seqarrays, fillvalue=fill_value): + yield tuple(zipfunc(tup)) + + +def _fix_output(output, usemask=True, asrecarray=False): + """ + Private function: return a recarray, a ndarray, a MaskedArray + or a MaskedRecords depending on the input parameters + """ + if not isinstance(output, ma.MaskedArray): + usemask = False + if usemask: + if asrecarray: + output = output.view(mrec.MaskedRecords) + else: + output = ma.filled(output) + if asrecarray: + output = output.view(np.recarray) + return output + + +def _fix_defaults(output, defaults=None): + """ + Update the fill_value and masked data of `output` + from the default given in a dictionary defaults. + """ + names = output.dtype.names + (data, mask, fill_value) = (output.data, output.mask, output.fill_value) + for (k, v) in (defaults or {}).items(): + if k in names: + fill_value[k] = v + data[k][mask[k]] = v + return output + + +def _merge_arrays_dispatcher(seqarrays, fill_value=None, flatten=None, + usemask=None, asrecarray=None): + return seqarrays + + +@array_function_dispatch(_merge_arrays_dispatcher) +def merge_arrays(seqarrays, fill_value=-1, flatten=False, + usemask=False, asrecarray=False): + """ + Merge arrays field by field. + + Parameters + ---------- + seqarrays : sequence of ndarrays + Sequence of arrays + fill_value : {float}, optional + Filling value used to pad missing data on the shorter arrays. + flatten : {False, True}, optional + Whether to collapse nested fields. + usemask : {False, True}, optional + Whether to return a masked array or not. + asrecarray : {False, True}, optional + Whether to return a recarray (MaskedRecords) or not. + + Examples + -------- + >>> import numpy as np + >>> from numpy.lib import recfunctions as rfn + >>> rfn.merge_arrays((np.array([1, 2]), np.array([10., 20., 30.]))) + array([( 1, 10.), ( 2, 20.), (-1, 30.)], + dtype=[('f0', '>> rfn.merge_arrays((np.array([1, 2], dtype=np.int64), + ... np.array([10., 20., 30.])), usemask=False) + array([(1, 10.0), (2, 20.0), (-1, 30.0)], + dtype=[('f0', '>> rfn.merge_arrays((np.array([1, 2]).view([('a', np.int64)]), + ... np.array([10., 20., 30.])), + ... usemask=False, asrecarray=True) + rec.array([( 1, 10.), ( 2, 20.), (-1, 30.)], + dtype=[('a', '>> import numpy as np + >>> from numpy.lib import recfunctions as rfn + >>> a = np.array([(1, (2, 3.0)), (4, (5, 6.0))], + ... dtype=[('a', np.int64), ('b', [('ba', np.double), ('bb', np.int64)])]) + >>> rfn.drop_fields(a, 'a') + array([((2., 3),), ((5., 6),)], + dtype=[('b', [('ba', '>> rfn.drop_fields(a, 'ba') + array([(1, (3,)), (4, (6,))], dtype=[('a', '>> rfn.drop_fields(a, ['ba', 'bb']) + array([(1,), (4,)], dtype=[('a', '>> import numpy as np + >>> from numpy.lib import recfunctions as rfn + >>> a = np.array([(1, (2, [3.0, 30.])), (4, (5, [6.0, 60.]))], + ... dtype=[('a', int),('b', [('ba', float), ('bb', (float, 2))])]) + >>> rfn.rename_fields(a, {'a':'A', 'bb':'BB'}) + array([(1, (2., [ 3., 30.])), (4, (5., [ 6., 60.]))], + dtype=[('A', ' 1: + data = merge_arrays(data, flatten=True, usemask=usemask, + fill_value=fill_value) + else: + data = data.pop() + # + output = ma.masked_all( + max(len(base), len(data)), + dtype=_get_fieldspec(base.dtype) + _get_fieldspec(data.dtype)) + output = recursive_fill_fields(base, output) + output = recursive_fill_fields(data, output) + # + return _fix_output(output, usemask=usemask, asrecarray=asrecarray) + + +def _rec_append_fields_dispatcher(base, names, data, dtypes=None): + yield base + yield from data + + +@array_function_dispatch(_rec_append_fields_dispatcher) +def rec_append_fields(base, names, data, dtypes=None): + """ + Add new fields to an existing array. + + The names of the fields are given with the `names` arguments, + the corresponding values with the `data` arguments. + If a single field is appended, `names`, `data` and `dtypes` do not have + to be lists but just values. + + Parameters + ---------- + base : array + Input array to extend. + names : string, sequence + String or sequence of strings corresponding to the names + of the new fields. + data : array or sequence of arrays + Array or sequence of arrays storing the fields to add to the base. + dtypes : sequence of datatypes, optional + Datatype or sequence of datatypes. + If None, the datatypes are estimated from the `data`. + + See Also + -------- + append_fields + + Returns + ------- + appended_array : np.recarray + """ + return append_fields(base, names, data=data, dtypes=dtypes, + asrecarray=True, usemask=False) + + +def _repack_fields_dispatcher(a, align=None, recurse=None): + return (a,) + + +@array_function_dispatch(_repack_fields_dispatcher) +def repack_fields(a, align=False, recurse=False): + """ + Re-pack the fields of a structured array or dtype in memory. + + The memory layout of structured datatypes allows fields at arbitrary + byte offsets. This means the fields can be separated by padding bytes, + their offsets can be non-monotonically increasing, and they can overlap. + + This method removes any overlaps and reorders the fields in memory so they + have increasing byte offsets, and adds or removes padding bytes depending + on the `align` option, which behaves like the `align` option to + `numpy.dtype`. + + If `align=False`, this method produces a "packed" memory layout in which + each field starts at the byte the previous field ended, and any padding + bytes are removed. + + If `align=True`, this methods produces an "aligned" memory layout in which + each field's offset is a multiple of its alignment, and the total itemsize + is a multiple of the largest alignment, by adding padding bytes as needed. + + Parameters + ---------- + a : ndarray or dtype + array or dtype for which to repack the fields. + align : boolean + If true, use an "aligned" memory layout, otherwise use a "packed" layout. + recurse : boolean + If True, also repack nested structures. + + Returns + ------- + repacked : ndarray or dtype + Copy of `a` with fields repacked, or `a` itself if no repacking was + needed. + + Examples + -------- + >>> import numpy as np + + >>> from numpy.lib import recfunctions as rfn + >>> def print_offsets(d): + ... print("offsets:", [d.fields[name][1] for name in d.names]) + ... print("itemsize:", d.itemsize) + ... + >>> dt = np.dtype('u1, >> dt + dtype({'names': ['f0', 'f1', 'f2'], 'formats': ['u1', '>> print_offsets(dt) + offsets: [0, 8, 16] + itemsize: 24 + >>> packed_dt = rfn.repack_fields(dt) + >>> packed_dt + dtype([('f0', 'u1'), ('f1', '>> print_offsets(packed_dt) + offsets: [0, 1, 9] + itemsize: 17 + + """ + if not isinstance(a, np.dtype): + dt = repack_fields(a.dtype, align=align, recurse=recurse) + return a.astype(dt, copy=False) + + if a.names is None: + return a + + fieldinfo = [] + for name in a.names: + tup = a.fields[name] + if recurse: + fmt = repack_fields(tup[0], align=align, recurse=True) + else: + fmt = tup[0] + + if len(tup) == 3: + name = (tup[2], name) + + fieldinfo.append((name, fmt)) + + dt = np.dtype(fieldinfo, align=align) + return np.dtype((a.type, dt)) + +def _get_fields_and_offsets(dt, offset=0): + """ + Returns a flat list of (dtype, count, offset) tuples of all the + scalar fields in the dtype "dt", including nested fields, in left + to right order. + """ + + # counts up elements in subarrays, including nested subarrays, and returns + # base dtype and count + def count_elem(dt): + count = 1 + while dt.shape != (): + for size in dt.shape: + count *= size + dt = dt.base + return dt, count + + fields = [] + for name in dt.names: + field = dt.fields[name] + f_dt, f_offset = field[0], field[1] + f_dt, n = count_elem(f_dt) + + if f_dt.names is None: + fields.append((np.dtype((f_dt, (n,))), n, f_offset + offset)) + else: + subfields = _get_fields_and_offsets(f_dt, f_offset + offset) + size = f_dt.itemsize + + for i in range(n): + if i == 0: + # optimization: avoid list comprehension if no subarray + fields.extend(subfields) + else: + fields.extend([(d, c, o + i * size) for d, c, o in subfields]) + return fields + +def _common_stride(offsets, counts, itemsize): + """ + Returns the stride between the fields, or None if the stride is not + constant. The values in "counts" designate the lengths of + subarrays. Subarrays are treated as many contiguous fields, with + always positive stride. + """ + if len(offsets) <= 1: + return itemsize + + negative = offsets[1] < offsets[0] # negative stride + if negative: + # reverse, so offsets will be ascending + it = zip(reversed(offsets), reversed(counts)) + else: + it = zip(offsets, counts) + + prev_offset = None + stride = None + for offset, count in it: + if count != 1: # subarray: always c-contiguous + if negative: + return None # subarrays can never have a negative stride + if stride is None: + stride = itemsize + if stride != itemsize: + return None + end_offset = offset + (count - 1) * itemsize + else: + end_offset = offset + + if prev_offset is not None: + new_stride = offset - prev_offset + if stride is None: + stride = new_stride + if stride != new_stride: + return None + + prev_offset = end_offset + + if negative: + return -stride + return stride + + +def _structured_to_unstructured_dispatcher(arr, dtype=None, copy=None, + casting=None): + return (arr,) + +@array_function_dispatch(_structured_to_unstructured_dispatcher) +def structured_to_unstructured(arr, dtype=None, copy=False, casting='unsafe'): + """ + Converts an n-D structured array into an (n+1)-D unstructured array. + + The new array will have a new last dimension equal in size to the + number of field-elements of the input array. If not supplied, the output + datatype is determined from the numpy type promotion rules applied to all + the field datatypes. + + Nested fields, as well as each element of any subarray fields, all count + as a single field-elements. + + Parameters + ---------- + arr : ndarray + Structured array or dtype to convert. Cannot contain object datatype. + dtype : dtype, optional + The dtype of the output unstructured array. + copy : bool, optional + If true, always return a copy. If false, a view is returned if + possible, such as when the `dtype` and strides of the fields are + suitable and the array subtype is one of `numpy.ndarray`, + `numpy.recarray` or `numpy.memmap`. + + .. versionchanged:: 1.25.0 + A view can now be returned if the fields are separated by a + uniform stride. + + casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional + See casting argument of `numpy.ndarray.astype`. Controls what kind of + data casting may occur. + + Returns + ------- + unstructured : ndarray + Unstructured array with one more dimension. + + Examples + -------- + >>> import numpy as np + + >>> from numpy.lib import recfunctions as rfn + >>> a = np.zeros(4, dtype=[('a', 'i4'), ('b', 'f4,u2'), ('c', 'f4', 2)]) + >>> a + array([(0, (0., 0), [0., 0.]), (0, (0., 0), [0., 0.]), + (0, (0., 0), [0., 0.]), (0, (0., 0), [0., 0.])], + dtype=[('a', '>> rfn.structured_to_unstructured(a) + array([[0., 0., 0., 0., 0.], + [0., 0., 0., 0., 0.], + [0., 0., 0., 0., 0.], + [0., 0., 0., 0., 0.]]) + + >>> b = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)], + ... dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')]) + >>> np.mean(rfn.structured_to_unstructured(b[['x', 'z']]), axis=-1) + array([ 3. , 5.5, 9. , 11. ]) + + """ # noqa: E501 + if arr.dtype.names is None: + raise ValueError('arr must be a structured array') + + fields = _get_fields_and_offsets(arr.dtype) + n_fields = len(fields) + if n_fields == 0 and dtype is None: + raise ValueError("arr has no fields. Unable to guess dtype") + elif n_fields == 0: + # too many bugs elsewhere for this to work now + raise NotImplementedError("arr with no fields is not supported") + + dts, counts, offsets = zip(*fields) + names = [f'f{n}' for n in range(n_fields)] + + if dtype is None: + out_dtype = np.result_type(*[dt.base for dt in dts]) + else: + out_dtype = np.dtype(dtype) + + # Use a series of views and casts to convert to an unstructured array: + + # first view using flattened fields (doesn't work for object arrays) + # Note: dts may include a shape for subarrays + flattened_fields = np.dtype({'names': names, + 'formats': dts, + 'offsets': offsets, + 'itemsize': arr.dtype.itemsize}) + arr = arr.view(flattened_fields) + + # we only allow a few types to be unstructured by manipulating the + # strides, because we know it won't work with, for example, np.matrix nor + # np.ma.MaskedArray. + can_view = type(arr) in (np.ndarray, np.recarray, np.memmap) + if (not copy) and can_view and all(dt.base == out_dtype for dt in dts): + # all elements have the right dtype already; if they have a common + # stride, we can just return a view + common_stride = _common_stride(offsets, counts, out_dtype.itemsize) + if common_stride is not None: + wrap = arr.__array_wrap__ + + new_shape = arr.shape + (sum(counts), out_dtype.itemsize) + new_strides = arr.strides + (abs(common_stride), 1) + + arr = arr[..., np.newaxis].view(np.uint8) # view as bytes + arr = arr[..., min(offsets):] # remove the leading unused data + arr = np.lib.stride_tricks.as_strided(arr, + new_shape, + new_strides, + subok=True) + + # cast and drop the last dimension again + arr = arr.view(out_dtype)[..., 0] + + if common_stride < 0: + arr = arr[..., ::-1] # reverse, if the stride was negative + if type(arr) is not type(wrap.__self__): + # Some types (e.g. recarray) turn into an ndarray along the + # way, so we have to wrap it again in order to match the + # behavior with copy=True. + arr = wrap(arr) + return arr + + # next cast to a packed format with all fields converted to new dtype + packed_fields = np.dtype({'names': names, + 'formats': [(out_dtype, dt.shape) for dt in dts]}) + arr = arr.astype(packed_fields, copy=copy, casting=casting) + + # finally is it safe to view the packed fields as the unstructured type + return arr.view((out_dtype, (sum(counts),))) + + +def _unstructured_to_structured_dispatcher(arr, dtype=None, names=None, + align=None, copy=None, casting=None): + return (arr,) + +@array_function_dispatch(_unstructured_to_structured_dispatcher) +def unstructured_to_structured(arr, dtype=None, names=None, align=False, + copy=False, casting='unsafe'): + """ + Converts an n-D unstructured array into an (n-1)-D structured array. + + The last dimension of the input array is converted into a structure, with + number of field-elements equal to the size of the last dimension of the + input array. By default all output fields have the input array's dtype, but + an output structured dtype with an equal number of fields-elements can be + supplied instead. + + Nested fields, as well as each element of any subarray fields, all count + towards the number of field-elements. + + Parameters + ---------- + arr : ndarray + Unstructured array or dtype to convert. + dtype : dtype, optional + The structured dtype of the output array + names : list of strings, optional + If dtype is not supplied, this specifies the field names for the output + dtype, in order. The field dtypes will be the same as the input array. + align : boolean, optional + Whether to create an aligned memory layout. + copy : bool, optional + See copy argument to `numpy.ndarray.astype`. If true, always return a + copy. If false, and `dtype` requirements are satisfied, a view is + returned. + casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional + See casting argument of `numpy.ndarray.astype`. Controls what kind of + data casting may occur. + + Returns + ------- + structured : ndarray + Structured array with fewer dimensions. + + Examples + -------- + >>> import numpy as np + + >>> from numpy.lib import recfunctions as rfn + >>> dt = np.dtype([('a', 'i4'), ('b', 'f4,u2'), ('c', 'f4', 2)]) + >>> a = np.arange(20).reshape((4,5)) + >>> a + array([[ 0, 1, 2, 3, 4], + [ 5, 6, 7, 8, 9], + [10, 11, 12, 13, 14], + [15, 16, 17, 18, 19]]) + >>> rfn.unstructured_to_structured(a, dt) + array([( 0, ( 1., 2), [ 3., 4.]), ( 5, ( 6., 7), [ 8., 9.]), + (10, (11., 12), [13., 14.]), (15, (16., 17), [18., 19.])], + dtype=[('a', '>> import numpy as np + + >>> from numpy.lib import recfunctions as rfn + >>> b = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)], + ... dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')]) + >>> rfn.apply_along_fields(np.mean, b) + array([ 2.66666667, 5.33333333, 8.66666667, 11. ]) + >>> rfn.apply_along_fields(np.mean, b[['x', 'z']]) + array([ 3. , 5.5, 9. , 11. ]) + + """ + if arr.dtype.names is None: + raise ValueError('arr must be a structured array') + + uarr = structured_to_unstructured(arr) + return func(uarr, axis=-1) + # works and avoids axis requirement, but very, very slow: + #return np.apply_along_axis(func, -1, uarr) + +def _assign_fields_by_name_dispatcher(dst, src, zero_unassigned=None): + return dst, src + +@array_function_dispatch(_assign_fields_by_name_dispatcher) +def assign_fields_by_name(dst, src, zero_unassigned=True): + """ + Assigns values from one structured array to another by field name. + + Normally in numpy >= 1.14, assignment of one structured array to another + copies fields "by position", meaning that the first field from the src is + copied to the first field of the dst, and so on, regardless of field name. + + This function instead copies "by field name", such that fields in the dst + are assigned from the identically named field in the src. This applies + recursively for nested structures. This is how structure assignment worked + in numpy >= 1.6 to <= 1.13. + + Parameters + ---------- + dst : ndarray + src : ndarray + The source and destination arrays during assignment. + zero_unassigned : bool, optional + If True, fields in the dst for which there was no matching + field in the src are filled with the value 0 (zero). This + was the behavior of numpy <= 1.13. If False, those fields + are not modified. + """ + + if dst.dtype.names is None: + dst[...] = src + return + + for name in dst.dtype.names: + if name not in src.dtype.names: + if zero_unassigned: + dst[name] = 0 + else: + assign_fields_by_name(dst[name], src[name], + zero_unassigned) + +def _require_fields_dispatcher(array, required_dtype): + return (array,) + +@array_function_dispatch(_require_fields_dispatcher) +def require_fields(array, required_dtype): + """ + Casts a structured array to a new dtype using assignment by field-name. + + This function assigns from the old to the new array by name, so the + value of a field in the output array is the value of the field with the + same name in the source array. This has the effect of creating a new + ndarray containing only the fields "required" by the required_dtype. + + If a field name in the required_dtype does not exist in the + input array, that field is created and set to 0 in the output array. + + Parameters + ---------- + a : ndarray + array to cast + required_dtype : dtype + datatype for output array + + Returns + ------- + out : ndarray + array with the new dtype, with field values copied from the fields in + the input array with the same name + + Examples + -------- + >>> import numpy as np + + >>> from numpy.lib import recfunctions as rfn + >>> a = np.ones(4, dtype=[('a', 'i4'), ('b', 'f8'), ('c', 'u1')]) + >>> rfn.require_fields(a, [('b', 'f4'), ('c', 'u1')]) + array([(1., 1), (1., 1), (1., 1), (1., 1)], + dtype=[('b', '>> rfn.require_fields(a, [('b', 'f4'), ('newf', 'u1')]) + array([(1., 0), (1., 0), (1., 0), (1., 0)], + dtype=[('b', '>> import numpy as np + >>> from numpy.lib import recfunctions as rfn + >>> x = np.array([1, 2,]) + >>> rfn.stack_arrays(x) is x + True + >>> z = np.array([('A', 1), ('B', 2)], dtype=[('A', '|S3'), ('B', float)]) + >>> zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)], + ... dtype=[('A', '|S3'), ('B', np.double), ('C', np.double)]) + >>> test = rfn.stack_arrays((z,zz)) + >>> test + masked_array(data=[(b'A', 1.0, --), (b'B', 2.0, --), (b'a', 10.0, 100.0), + (b'b', 20.0, 200.0), (b'c', 30.0, 300.0)], + mask=[(False, False, True), (False, False, True), + (False, False, False), (False, False, False), + (False, False, False)], + fill_value=(b'N/A', 1e+20, 1e+20), + dtype=[('A', 'S3'), ('B', ' '{fdtype}'") + # Only one field: use concatenate + if len(newdescr) == 1: + output = ma.concatenate(seqarrays) + else: + # + output = ma.masked_all((np.sum(nrecords),), newdescr) + offset = np.cumsum(np.r_[0, nrecords]) + seen = [] + for (a, n, i, j) in zip(seqarrays, fldnames, offset[:-1], offset[1:]): + names = a.dtype.names + if names is None: + output[f'f{len(seen)}'][i:j] = a + else: + for name in n: + output[name][i:j] = a[name] + if name not in seen: + seen.append(name) + # + return _fix_output(_fix_defaults(output, defaults), + usemask=usemask, asrecarray=asrecarray) + + +def _find_duplicates_dispatcher( + a, key=None, ignoremask=None, return_index=None): + return (a,) + + +@array_function_dispatch(_find_duplicates_dispatcher) +def find_duplicates(a, key=None, ignoremask=True, return_index=False): + """ + Find the duplicates in a structured array along a given key + + Parameters + ---------- + a : array-like + Input array + key : {string, None}, optional + Name of the fields along which to check the duplicates. + If None, the search is performed by records + ignoremask : {True, False}, optional + Whether masked data should be discarded or considered as duplicates. + return_index : {False, True}, optional + Whether to return the indices of the duplicated values. + + Examples + -------- + >>> import numpy as np + >>> from numpy.lib import recfunctions as rfn + >>> ndtype = [('a', int)] + >>> a = np.ma.array([1, 1, 1, 2, 2, 3, 3], + ... mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype) + >>> rfn.find_duplicates(a, ignoremask=True, return_index=True) + (masked_array(data=[(1,), (1,), (2,), (2,)], + mask=[(False,), (False,), (False,), (False,)], + fill_value=(999999,), + dtype=[('a', '= nb1)] - nb1 + (r1cmn, r2cmn) = (len(idx_1), len(idx_2)) + if jointype == 'inner': + (r1spc, r2spc) = (0, 0) + elif jointype == 'outer': + idx_out = idx_sort[~flag_in] + idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)])) + idx_2 = np.concatenate((idx_2, idx_out[(idx_out >= nb1)] - nb1)) + (r1spc, r2spc) = (len(idx_1) - r1cmn, len(idx_2) - r2cmn) + elif jointype == 'leftouter': + idx_out = idx_sort[~flag_in] + idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)])) + (r1spc, r2spc) = (len(idx_1) - r1cmn, 0) + # Select the entries from each input + (s1, s2) = (r1[idx_1], r2[idx_2]) + # + # Build the new description of the output array ....... + # Start with the key fields + ndtype = _get_fieldspec(r1k.dtype) + + # Add the fields from r1 + for fname, fdtype in _get_fieldspec(r1.dtype): + if fname not in key: + ndtype.append((fname, fdtype)) + + # Add the fields from r2 + for fname, fdtype in _get_fieldspec(r2.dtype): + # Have we seen the current name already ? + # we need to rebuild this list every time + names = [name for name, dtype in ndtype] + try: + nameidx = names.index(fname) + except ValueError: + #... we haven't: just add the description to the current list + ndtype.append((fname, fdtype)) + else: + # collision + _, cdtype = ndtype[nameidx] + if fname in key: + # The current field is part of the key: take the largest dtype + ndtype[nameidx] = (fname, max(fdtype, cdtype)) + else: + # The current field is not part of the key: add the suffixes, + # and place the new field adjacent to the old one + ndtype[nameidx:nameidx + 1] = [ + (fname + r1postfix, cdtype), + (fname + r2postfix, fdtype) + ] + # Rebuild a dtype from the new fields + ndtype = np.dtype(ndtype) + # Find the largest nb of common fields : + # r1cmn and r2cmn should be equal, but... + cmn = max(r1cmn, r2cmn) + # Construct an empty array + output = ma.masked_all((cmn + r1spc + r2spc,), dtype=ndtype) + names = output.dtype.names + for f in r1names: + selected = s1[f] + if f not in names or (f in r2names and not r2postfix and f not in key): + f += r1postfix + current = output[f] + current[:r1cmn] = selected[:r1cmn] + if jointype in ('outer', 'leftouter'): + current[cmn:cmn + r1spc] = selected[r1cmn:] + for f in r2names: + selected = s2[f] + if f not in names or (f in r1names and not r1postfix and f not in key): + f += r2postfix + current = output[f] + current[:r2cmn] = selected[:r2cmn] + if (jointype == 'outer') and r2spc: + current[-r2spc:] = selected[r2cmn:] + # Sort and finalize the output + output.sort(order=key) + kwargs = {'usemask': usemask, 'asrecarray': asrecarray} + return _fix_output(_fix_defaults(output, defaults), **kwargs) + + +def _rec_join_dispatcher( + key, r1, r2, jointype=None, r1postfix=None, r2postfix=None, + defaults=None): + return (r1, r2) + + +@array_function_dispatch(_rec_join_dispatcher) +def rec_join(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2', + defaults=None): + """ + Join arrays `r1` and `r2` on keys. + Alternative to join_by, that always returns a np.recarray. + + See Also + -------- + join_by : equivalent function + """ + kwargs = {'jointype': jointype, 'r1postfix': r1postfix, 'r2postfix': r2postfix, + 'defaults': defaults, 'usemask': False, 'asrecarray': True} + return join_by(key, r1, r2, **kwargs) + + +del array_function_dispatch diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/recfunctions.pyi b/.venv/lib/python3.12/site-packages/numpy/lib/recfunctions.pyi new file mode 100644 index 0000000..0736429 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/lib/recfunctions.pyi @@ -0,0 +1,435 @@ +from collections.abc import Callable, Iterable, Mapping, Sequence +from typing import Any, Literal, TypeAlias, overload + +from _typeshed import Incomplete +from typing_extensions import TypeVar + +import numpy as np +import numpy.typing as npt +from numpy._typing import _AnyShape, _DTypeLike, _DTypeLikeVoid +from numpy.ma.mrecords import MaskedRecords + +__all__ = [ + "append_fields", + "apply_along_fields", + "assign_fields_by_name", + "drop_fields", + "find_duplicates", + "flatten_descr", + "get_fieldstructure", + "get_names", + "get_names_flat", + "join_by", + "merge_arrays", + "rec_append_fields", + "rec_drop_fields", + "rec_join", + "recursive_fill_fields", + "rename_fields", + "repack_fields", + "require_fields", + "stack_arrays", + "structured_to_unstructured", + "unstructured_to_structured", +] + +_T = TypeVar("_T") +_ShapeT = TypeVar("_ShapeT", bound=tuple[int, ...]) +_ScalarT = TypeVar("_ScalarT", bound=np.generic) +_DTypeT = TypeVar("_DTypeT", bound=np.dtype) +_ArrayT = TypeVar("_ArrayT", bound=npt.NDArray[Any]) +_VoidArrayT = TypeVar("_VoidArrayT", bound=npt.NDArray[np.void]) +_NonVoidDTypeT = TypeVar("_NonVoidDTypeT", bound=_NonVoidDType) + +_OneOrMany: TypeAlias = _T | Iterable[_T] +_BuiltinSequence: TypeAlias = tuple[_T, ...] | list[_T] + +_NestedNames: TypeAlias = tuple[str | _NestedNames, ...] +_NonVoid: TypeAlias = np.bool | np.number | np.character | np.datetime64 | np.timedelta64 | np.object_ +_NonVoidDType: TypeAlias = np.dtype[_NonVoid] | np.dtypes.StringDType + +_JoinType: TypeAlias = Literal["inner", "outer", "leftouter"] + +### + +def recursive_fill_fields(input: npt.NDArray[np.void], output: _VoidArrayT) -> _VoidArrayT: ... + +# +def get_names(adtype: np.dtype[np.void]) -> _NestedNames: ... +def get_names_flat(adtype: np.dtype[np.void]) -> tuple[str, ...]: ... + +# +@overload +def flatten_descr(ndtype: _NonVoidDTypeT) -> tuple[tuple[Literal[""], _NonVoidDTypeT]]: ... +@overload +def flatten_descr(ndtype: np.dtype[np.void]) -> tuple[tuple[str, np.dtype]]: ... + +# +def get_fieldstructure( + adtype: np.dtype[np.void], + lastname: str | None = None, + parents: dict[str, list[str]] | None = None, +) -> dict[str, list[str]]: ... + +# +@overload +def merge_arrays( + seqarrays: Sequence[np.ndarray[_ShapeT, np.dtype]] | np.ndarray[_ShapeT, np.dtype], + fill_value: float = -1, + flatten: bool = False, + usemask: bool = False, + asrecarray: bool = False, +) -> np.recarray[_ShapeT, np.dtype[np.void]]: ... +@overload +def merge_arrays( + seqarrays: Sequence[npt.ArrayLike] | np.void, + fill_value: float = -1, + flatten: bool = False, + usemask: bool = False, + asrecarray: bool = False, +) -> np.recarray[_AnyShape, np.dtype[np.void]]: ... + +# +@overload +def drop_fields( + base: np.ndarray[_ShapeT, np.dtype[np.void]], + drop_names: str | Iterable[str], + usemask: bool = True, + asrecarray: Literal[False] = False, +) -> np.ndarray[_ShapeT, np.dtype[np.void]]: ... +@overload +def drop_fields( + base: np.ndarray[_ShapeT, np.dtype[np.void]], + drop_names: str | Iterable[str], + usemask: bool, + asrecarray: Literal[True], +) -> np.recarray[_ShapeT, np.dtype[np.void]]: ... +@overload +def drop_fields( + base: np.ndarray[_ShapeT, np.dtype[np.void]], + drop_names: str | Iterable[str], + usemask: bool = True, + *, + asrecarray: Literal[True], +) -> np.recarray[_ShapeT, np.dtype[np.void]]: ... + +# +@overload +def rename_fields( + base: MaskedRecords[_ShapeT, np.dtype[np.void]], + namemapper: Mapping[str, str], +) -> MaskedRecords[_ShapeT, np.dtype[np.void]]: ... +@overload +def rename_fields( + base: np.ma.MaskedArray[_ShapeT, np.dtype[np.void]], + namemapper: Mapping[str, str], +) -> np.ma.MaskedArray[_ShapeT, np.dtype[np.void]]: ... +@overload +def rename_fields( + base: np.recarray[_ShapeT, np.dtype[np.void]], + namemapper: Mapping[str, str], +) -> np.recarray[_ShapeT, np.dtype[np.void]]: ... +@overload +def rename_fields( + base: np.ndarray[_ShapeT, np.dtype[np.void]], + namemapper: Mapping[str, str], +) -> np.ndarray[_ShapeT, np.dtype[np.void]]: ... + +# +@overload +def append_fields( + base: np.ndarray[_ShapeT, np.dtype[np.void]], + names: _OneOrMany[str], + data: _OneOrMany[npt.NDArray[Any]], + dtypes: _BuiltinSequence[np.dtype] | None, + fill_value: int, + usemask: Literal[False], + asrecarray: Literal[False] = False, +) -> np.ndarray[_ShapeT, np.dtype[np.void]]: ... +@overload +def append_fields( + base: np.ndarray[_ShapeT, np.dtype[np.void]], + names: _OneOrMany[str], + data: _OneOrMany[npt.NDArray[Any]], + dtypes: _BuiltinSequence[np.dtype] | None = None, + fill_value: int = -1, + *, + usemask: Literal[False], + asrecarray: Literal[False] = False, +) -> np.ndarray[_ShapeT, np.dtype[np.void]]: ... +@overload +def append_fields( + base: np.ndarray[_ShapeT, np.dtype[np.void]], + names: _OneOrMany[str], + data: _OneOrMany[npt.NDArray[Any]], + dtypes: _BuiltinSequence[np.dtype] | None, + fill_value: int, + usemask: Literal[False], + asrecarray: Literal[True], +) -> np.recarray[_ShapeT, np.dtype[np.void]]: ... +@overload +def append_fields( + base: np.ndarray[_ShapeT, np.dtype[np.void]], + names: _OneOrMany[str], + data: _OneOrMany[npt.NDArray[Any]], + dtypes: _BuiltinSequence[np.dtype] | None = None, + fill_value: int = -1, + *, + usemask: Literal[False], + asrecarray: Literal[True], +) -> np.recarray[_ShapeT, np.dtype[np.void]]: ... +@overload +def append_fields( + base: np.ndarray[_ShapeT, np.dtype[np.void]], + names: _OneOrMany[str], + data: _OneOrMany[npt.NDArray[Any]], + dtypes: _BuiltinSequence[np.dtype] | None = None, + fill_value: int = -1, + usemask: Literal[True] = True, + asrecarray: Literal[False] = False, +) -> np.ma.MaskedArray[_ShapeT, np.dtype[np.void]]: ... +@overload +def append_fields( + base: np.ndarray[_ShapeT, np.dtype[np.void]], + names: _OneOrMany[str], + data: _OneOrMany[npt.NDArray[Any]], + dtypes: _BuiltinSequence[np.dtype] | None, + fill_value: int, + usemask: Literal[True], + asrecarray: Literal[True], +) -> MaskedRecords[_ShapeT, np.dtype[np.void]]: ... +@overload +def append_fields( + base: np.ndarray[_ShapeT, np.dtype[np.void]], + names: _OneOrMany[str], + data: _OneOrMany[npt.NDArray[Any]], + dtypes: _BuiltinSequence[np.dtype] | None = None, + fill_value: int = -1, + usemask: Literal[True] = True, + *, + asrecarray: Literal[True], +) -> MaskedRecords[_ShapeT, np.dtype[np.void]]: ... + +# +def rec_drop_fields( + base: np.ndarray[_ShapeT, np.dtype[np.void]], + drop_names: str | Iterable[str], +) -> np.recarray[_ShapeT, np.dtype[np.void]]: ... + +# +def rec_append_fields( + base: np.ndarray[_ShapeT, np.dtype[np.void]], + names: _OneOrMany[str], + data: _OneOrMany[npt.NDArray[Any]], + dtypes: _BuiltinSequence[np.dtype] | None = None, +) -> np.ma.MaskedArray[_ShapeT, np.dtype[np.void]]: ... + +# TODO(jorenham): Stop passing `void` directly once structured dtypes are implemented, +# e.g. using a `TypeVar` with constraints. +# https://github.com/numpy/numtype/issues/92 +@overload +def repack_fields(a: _DTypeT, align: bool = False, recurse: bool = False) -> _DTypeT: ... +@overload +def repack_fields(a: _ScalarT, align: bool = False, recurse: bool = False) -> _ScalarT: ... +@overload +def repack_fields(a: _ArrayT, align: bool = False, recurse: bool = False) -> _ArrayT: ... + +# TODO(jorenham): Attempt shape-typing (return type has ndim == arr.ndim + 1) +@overload +def structured_to_unstructured( + arr: npt.NDArray[np.void], + dtype: _DTypeLike[_ScalarT], + copy: bool = False, + casting: np._CastingKind = "unsafe", +) -> npt.NDArray[_ScalarT]: ... +@overload +def structured_to_unstructured( + arr: npt.NDArray[np.void], + dtype: npt.DTypeLike | None = None, + copy: bool = False, + casting: np._CastingKind = "unsafe", +) -> npt.NDArray[Any]: ... + +# +@overload +def unstructured_to_structured( + arr: npt.NDArray[Any], + dtype: npt.DTypeLike, + names: None = None, + align: bool = False, + copy: bool = False, + casting: str = "unsafe", +) -> npt.NDArray[np.void]: ... +@overload +def unstructured_to_structured( + arr: npt.NDArray[Any], + dtype: None, + names: _OneOrMany[str], + align: bool = False, + copy: bool = False, + casting: str = "unsafe", +) -> npt.NDArray[np.void]: ... + +# +def apply_along_fields( + func: Callable[[np.ndarray[_ShapeT, Any]], npt.NDArray[Any]], + arr: np.ndarray[_ShapeT, np.dtype[np.void]], +) -> np.ndarray[_ShapeT, np.dtype[np.void]]: ... + +# +def assign_fields_by_name(dst: npt.NDArray[np.void], src: npt.NDArray[np.void], zero_unassigned: bool = True) -> None: ... + +# +def require_fields( + array: np.ndarray[_ShapeT, np.dtype[np.void]], + required_dtype: _DTypeLikeVoid, +) -> np.ndarray[_ShapeT, np.dtype[np.void]]: ... + +# TODO(jorenham): Attempt shape-typing +@overload +def stack_arrays( + arrays: _ArrayT, + defaults: Mapping[str, object] | None = None, + usemask: bool = True, + asrecarray: bool = False, + autoconvert: bool = False, +) -> _ArrayT: ... +@overload +def stack_arrays( + arrays: Sequence[npt.NDArray[Any]], + defaults: Mapping[str, Incomplete] | None, + usemask: Literal[False], + asrecarray: Literal[False] = False, + autoconvert: bool = False, +) -> npt.NDArray[np.void]: ... +@overload +def stack_arrays( + arrays: Sequence[npt.NDArray[Any]], + defaults: Mapping[str, Incomplete] | None = None, + *, + usemask: Literal[False], + asrecarray: Literal[False] = False, + autoconvert: bool = False, +) -> npt.NDArray[np.void]: ... +@overload +def stack_arrays( + arrays: Sequence[npt.NDArray[Any]], + defaults: Mapping[str, Incomplete] | None = None, + *, + usemask: Literal[False], + asrecarray: Literal[True], + autoconvert: bool = False, +) -> np.recarray[_AnyShape, np.dtype[np.void]]: ... +@overload +def stack_arrays( + arrays: Sequence[npt.NDArray[Any]], + defaults: Mapping[str, Incomplete] | None = None, + usemask: Literal[True] = True, + asrecarray: Literal[False] = False, + autoconvert: bool = False, +) -> np.ma.MaskedArray[_AnyShape, np.dtype[np.void]]: ... +@overload +def stack_arrays( + arrays: Sequence[npt.NDArray[Any]], + defaults: Mapping[str, Incomplete] | None, + usemask: Literal[True], + asrecarray: Literal[True], + autoconvert: bool = False, +) -> MaskedRecords[_AnyShape, np.dtype[np.void]]: ... +@overload +def stack_arrays( + arrays: Sequence[npt.NDArray[Any]], + defaults: Mapping[str, Incomplete] | None = None, + usemask: Literal[True] = True, + *, + asrecarray: Literal[True], + autoconvert: bool = False, +) -> MaskedRecords[_AnyShape, np.dtype[np.void]]: ... + +# +@overload +def find_duplicates( + a: np.ma.MaskedArray[_ShapeT, np.dtype[np.void]], + key: str | None = None, + ignoremask: bool = True, + return_index: Literal[False] = False, +) -> np.ma.MaskedArray[_ShapeT, np.dtype[np.void]]: ... +@overload +def find_duplicates( + a: np.ma.MaskedArray[_ShapeT, np.dtype[np.void]], + key: str | None, + ignoremask: bool, + return_index: Literal[True], +) -> tuple[np.ma.MaskedArray[_ShapeT, np.dtype[np.void]], np.ndarray[_ShapeT, np.dtype[np.int_]]]: ... +@overload +def find_duplicates( + a: np.ma.MaskedArray[_ShapeT, np.dtype[np.void]], + key: str | None = None, + ignoremask: bool = True, + *, + return_index: Literal[True], +) -> tuple[np.ma.MaskedArray[_ShapeT, np.dtype[np.void]], np.ndarray[_ShapeT, np.dtype[np.int_]]]: ... + +# +@overload +def join_by( + key: str | Sequence[str], + r1: npt.NDArray[np.void], + r2: npt.NDArray[np.void], + jointype: _JoinType = "inner", + r1postfix: str = "1", + r2postfix: str = "2", + defaults: Mapping[str, object] | None = None, + *, + usemask: Literal[False], + asrecarray: Literal[False] = False, +) -> np.ndarray[tuple[int], np.dtype[np.void]]: ... +@overload +def join_by( + key: str | Sequence[str], + r1: npt.NDArray[np.void], + r2: npt.NDArray[np.void], + jointype: _JoinType = "inner", + r1postfix: str = "1", + r2postfix: str = "2", + defaults: Mapping[str, object] | None = None, + *, + usemask: Literal[False], + asrecarray: Literal[True], +) -> np.recarray[tuple[int], np.dtype[np.void]]: ... +@overload +def join_by( + key: str | Sequence[str], + r1: npt.NDArray[np.void], + r2: npt.NDArray[np.void], + jointype: _JoinType = "inner", + r1postfix: str = "1", + r2postfix: str = "2", + defaults: Mapping[str, object] | None = None, + usemask: Literal[True] = True, + asrecarray: Literal[False] = False, +) -> np.ma.MaskedArray[tuple[int], np.dtype[np.void]]: ... +@overload +def join_by( + key: str | Sequence[str], + r1: npt.NDArray[np.void], + r2: npt.NDArray[np.void], + jointype: _JoinType = "inner", + r1postfix: str = "1", + r2postfix: str = "2", + defaults: Mapping[str, object] | None = None, + usemask: Literal[True] = True, + *, + asrecarray: Literal[True], +) -> MaskedRecords[tuple[int], np.dtype[np.void]]: ... + +# +def rec_join( + key: str | Sequence[str], + r1: npt.NDArray[np.void], + r2: npt.NDArray[np.void], + jointype: _JoinType = "inner", + r1postfix: str = "1", + r2postfix: str = "2", + defaults: Mapping[str, object] | None = None, +) -> np.recarray[tuple[int], np.dtype[np.void]]: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/scimath.py b/.venv/lib/python3.12/site-packages/numpy/lib/scimath.py new file mode 100644 index 0000000..fb6824d --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/lib/scimath.py @@ -0,0 +1,13 @@ +from ._scimath_impl import ( # noqa: F401 + __all__, + __doc__, + arccos, + arcsin, + arctanh, + log, + log2, + log10, + logn, + power, + sqrt, +) diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/scimath.pyi b/.venv/lib/python3.12/site-packages/numpy/lib/scimath.pyi new file mode 100644 index 0000000..253235d --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/lib/scimath.pyi @@ -0,0 +1,30 @@ +from ._scimath_impl import ( + __all__ as __all__, +) +from ._scimath_impl import ( + arccos as arccos, +) +from ._scimath_impl import ( + arcsin as arcsin, +) +from ._scimath_impl import ( + arctanh as arctanh, +) +from ._scimath_impl import ( + log as log, +) +from ._scimath_impl import ( + log2 as log2, +) +from ._scimath_impl import ( + log10 as log10, +) +from ._scimath_impl import ( + logn as logn, +) +from ._scimath_impl import ( + power as power, +) +from ._scimath_impl import ( + sqrt as sqrt, +) diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/stride_tricks.py b/.venv/lib/python3.12/site-packages/numpy/lib/stride_tricks.py new file mode 100644 index 0000000..721a548 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/lib/stride_tricks.py @@ -0,0 +1 @@ +from ._stride_tricks_impl import __doc__, as_strided, sliding_window_view # noqa: F401 diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/stride_tricks.pyi b/.venv/lib/python3.12/site-packages/numpy/lib/stride_tricks.pyi new file mode 100644 index 0000000..42d8fe9 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/lib/stride_tricks.pyi @@ -0,0 +1,6 @@ +from numpy.lib._stride_tricks_impl import ( + as_strided as as_strided, +) +from numpy.lib._stride_tricks_impl import ( + sliding_window_view as sliding_window_view, +) diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/__init__.py b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..08ba67e Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test__datasource.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test__datasource.cpython-312.pyc new file mode 100644 index 0000000..f6ce1a4 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test__datasource.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test__iotools.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test__iotools.cpython-312.pyc new file mode 100644 index 0000000..064d4eb Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test__iotools.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test__version.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test__version.cpython-312.pyc new file mode 100644 index 0000000..a212b0b Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test__version.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_array_utils.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_array_utils.cpython-312.pyc new file mode 100644 index 0000000..72fd67e Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_array_utils.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_arraypad.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_arraypad.cpython-312.pyc new file mode 100644 index 0000000..673f7c1 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_arraypad.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_arraysetops.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_arraysetops.cpython-312.pyc new file mode 100644 index 0000000..3edbed0 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_arraysetops.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_arrayterator.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_arrayterator.cpython-312.pyc new file mode 100644 index 0000000..a9a8259 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_arrayterator.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_format.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_format.cpython-312.pyc new file mode 100644 index 0000000..4b905ea Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_format.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_function_base.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_function_base.cpython-312.pyc new file mode 100644 index 0000000..beaa22d Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_function_base.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_histograms.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_histograms.cpython-312.pyc new file mode 100644 index 0000000..2b1a824 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_histograms.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_index_tricks.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_index_tricks.cpython-312.pyc new file mode 100644 index 0000000..2610bc0 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_index_tricks.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_io.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_io.cpython-312.pyc new file mode 100644 index 0000000..20e6590 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_io.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_loadtxt.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_loadtxt.cpython-312.pyc new file mode 100644 index 0000000..9d3d392 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_loadtxt.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_mixins.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_mixins.cpython-312.pyc new file mode 100644 index 0000000..f390886 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_mixins.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_nanfunctions.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_nanfunctions.cpython-312.pyc new file mode 100644 index 0000000..c4c3dfe Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_nanfunctions.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_packbits.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_packbits.cpython-312.pyc new file mode 100644 index 0000000..c7d50a1 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_packbits.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_polynomial.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_polynomial.cpython-312.pyc new file mode 100644 index 0000000..17dd09f Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_polynomial.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_recfunctions.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_recfunctions.cpython-312.pyc new file mode 100644 index 0000000..7850412 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_recfunctions.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_regression.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_regression.cpython-312.pyc new file mode 100644 index 0000000..bd89d0d Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_regression.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_shape_base.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_shape_base.cpython-312.pyc new file mode 100644 index 0000000..69e2029 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_shape_base.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_stride_tricks.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_stride_tricks.cpython-312.pyc new file mode 100644 index 0000000..6af642b Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_stride_tricks.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_twodim_base.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_twodim_base.cpython-312.pyc new file mode 100644 index 0000000..268e30e Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_twodim_base.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_type_check.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_type_check.cpython-312.pyc new file mode 100644 index 0000000..f06fa5f Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_type_check.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_ufunclike.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_ufunclike.cpython-312.pyc new file mode 100644 index 0000000..bf8e207 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_ufunclike.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_utils.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_utils.cpython-312.pyc new file mode 100644 index 0000000..2f4fba6 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_utils.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/data/py2-np0-objarr.npy b/.venv/lib/python3.12/site-packages/numpy/lib/tests/data/py2-np0-objarr.npy new file mode 100644 index 0000000..a6e9e23 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/lib/tests/data/py2-np0-objarr.npy differ diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/data/py2-objarr.npy b/.venv/lib/python3.12/site-packages/numpy/lib/tests/data/py2-objarr.npy new file mode 100644 index 0000000..12936c9 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/lib/tests/data/py2-objarr.npy differ diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/data/py2-objarr.npz b/.venv/lib/python3.12/site-packages/numpy/lib/tests/data/py2-objarr.npz new file mode 100644 index 0000000..68a3b53 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/lib/tests/data/py2-objarr.npz differ diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/data/py3-objarr.npy b/.venv/lib/python3.12/site-packages/numpy/lib/tests/data/py3-objarr.npy new file mode 100644 index 0000000..c9f33b0 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/lib/tests/data/py3-objarr.npy differ diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/data/py3-objarr.npz b/.venv/lib/python3.12/site-packages/numpy/lib/tests/data/py3-objarr.npz new file mode 100644 index 0000000..fd7d9d3 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/lib/tests/data/py3-objarr.npz differ diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/data/python3.npy b/.venv/lib/python3.12/site-packages/numpy/lib/tests/data/python3.npy new file mode 100644 index 0000000..7c6997d Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/lib/tests/data/python3.npy differ diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/data/win64python2.npy b/.venv/lib/python3.12/site-packages/numpy/lib/tests/data/win64python2.npy new file mode 100644 index 0000000..d9bc36a Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/lib/tests/data/win64python2.npy differ diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/test__datasource.py b/.venv/lib/python3.12/site-packages/numpy/lib/tests/test__datasource.py new file mode 100644 index 0000000..6513732 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/lib/tests/test__datasource.py @@ -0,0 +1,352 @@ +import os +import urllib.request as urllib_request +from shutil import rmtree +from tempfile import NamedTemporaryFile, mkdtemp, mkstemp +from urllib.error import URLError +from urllib.parse import urlparse + +import pytest + +import numpy.lib._datasource as datasource +from numpy.testing import assert_, assert_equal, assert_raises + + +def urlopen_stub(url, data=None): + '''Stub to replace urlopen for testing.''' + if url == valid_httpurl(): + tmpfile = NamedTemporaryFile(prefix='urltmp_') + return tmpfile + else: + raise URLError('Name or service not known') + + +# setup and teardown +old_urlopen = None + + +def setup_module(): + global old_urlopen + + old_urlopen = urllib_request.urlopen + urllib_request.urlopen = urlopen_stub + + +def teardown_module(): + urllib_request.urlopen = old_urlopen + + +# A valid website for more robust testing +http_path = 'http://www.google.com/' +http_file = 'index.html' + +http_fakepath = 'http://fake.abc.web/site/' +http_fakefile = 'fake.txt' + +malicious_files = ['/etc/shadow', '../../shadow', + '..\\system.dat', 'c:\\windows\\system.dat'] + +magic_line = b'three is the magic number' + + +# Utility functions used by many tests +def valid_textfile(filedir): + # Generate and return a valid temporary file. + fd, path = mkstemp(suffix='.txt', prefix='dstmp_', dir=filedir, text=True) + os.close(fd) + return path + + +def invalid_textfile(filedir): + # Generate and return an invalid filename. + fd, path = mkstemp(suffix='.txt', prefix='dstmp_', dir=filedir) + os.close(fd) + os.remove(path) + return path + + +def valid_httpurl(): + return http_path + http_file + + +def invalid_httpurl(): + return http_fakepath + http_fakefile + + +def valid_baseurl(): + return http_path + + +def invalid_baseurl(): + return http_fakepath + + +def valid_httpfile(): + return http_file + + +def invalid_httpfile(): + return http_fakefile + + +class TestDataSourceOpen: + def setup_method(self): + self.tmpdir = mkdtemp() + self.ds = datasource.DataSource(self.tmpdir) + + def teardown_method(self): + rmtree(self.tmpdir) + del self.ds + + def test_ValidHTTP(self): + fh = self.ds.open(valid_httpurl()) + assert_(fh) + fh.close() + + def test_InvalidHTTP(self): + url = invalid_httpurl() + assert_raises(OSError, self.ds.open, url) + try: + self.ds.open(url) + except OSError as e: + # Regression test for bug fixed in r4342. + assert_(e.errno is None) + + def test_InvalidHTTPCacheURLError(self): + assert_raises(URLError, self.ds._cache, invalid_httpurl()) + + def test_ValidFile(self): + local_file = valid_textfile(self.tmpdir) + fh = self.ds.open(local_file) + assert_(fh) + fh.close() + + def test_InvalidFile(self): + invalid_file = invalid_textfile(self.tmpdir) + assert_raises(OSError, self.ds.open, invalid_file) + + def test_ValidGzipFile(self): + try: + import gzip + except ImportError: + # We don't have the gzip capabilities to test. + pytest.skip() + # Test datasource's internal file_opener for Gzip files. + filepath = os.path.join(self.tmpdir, 'foobar.txt.gz') + fp = gzip.open(filepath, 'w') + fp.write(magic_line) + fp.close() + fp = self.ds.open(filepath) + result = fp.readline() + fp.close() + assert_equal(magic_line, result) + + def test_ValidBz2File(self): + try: + import bz2 + except ImportError: + # We don't have the bz2 capabilities to test. + pytest.skip() + # Test datasource's internal file_opener for BZip2 files. + filepath = os.path.join(self.tmpdir, 'foobar.txt.bz2') + fp = bz2.BZ2File(filepath, 'w') + fp.write(magic_line) + fp.close() + fp = self.ds.open(filepath) + result = fp.readline() + fp.close() + assert_equal(magic_line, result) + + +class TestDataSourceExists: + def setup_method(self): + self.tmpdir = mkdtemp() + self.ds = datasource.DataSource(self.tmpdir) + + def teardown_method(self): + rmtree(self.tmpdir) + del self.ds + + def test_ValidHTTP(self): + assert_(self.ds.exists(valid_httpurl())) + + def test_InvalidHTTP(self): + assert_equal(self.ds.exists(invalid_httpurl()), False) + + def test_ValidFile(self): + # Test valid file in destpath + tmpfile = valid_textfile(self.tmpdir) + assert_(self.ds.exists(tmpfile)) + # Test valid local file not in destpath + localdir = mkdtemp() + tmpfile = valid_textfile(localdir) + assert_(self.ds.exists(tmpfile)) + rmtree(localdir) + + def test_InvalidFile(self): + tmpfile = invalid_textfile(self.tmpdir) + assert_equal(self.ds.exists(tmpfile), False) + + +class TestDataSourceAbspath: + def setup_method(self): + self.tmpdir = os.path.abspath(mkdtemp()) + self.ds = datasource.DataSource(self.tmpdir) + + def teardown_method(self): + rmtree(self.tmpdir) + del self.ds + + def test_ValidHTTP(self): + scheme, netloc, upath, pms, qry, frg = urlparse(valid_httpurl()) + local_path = os.path.join(self.tmpdir, netloc, + upath.strip(os.sep).strip('/')) + assert_equal(local_path, self.ds.abspath(valid_httpurl())) + + def test_ValidFile(self): + tmpfile = valid_textfile(self.tmpdir) + tmpfilename = os.path.split(tmpfile)[-1] + # Test with filename only + assert_equal(tmpfile, self.ds.abspath(tmpfilename)) + # Test filename with complete path + assert_equal(tmpfile, self.ds.abspath(tmpfile)) + + def test_InvalidHTTP(self): + scheme, netloc, upath, pms, qry, frg = urlparse(invalid_httpurl()) + invalidhttp = os.path.join(self.tmpdir, netloc, + upath.strip(os.sep).strip('/')) + assert_(invalidhttp != self.ds.abspath(valid_httpurl())) + + def test_InvalidFile(self): + invalidfile = valid_textfile(self.tmpdir) + tmpfile = valid_textfile(self.tmpdir) + tmpfilename = os.path.split(tmpfile)[-1] + # Test with filename only + assert_(invalidfile != self.ds.abspath(tmpfilename)) + # Test filename with complete path + assert_(invalidfile != self.ds.abspath(tmpfile)) + + def test_sandboxing(self): + tmpfile = valid_textfile(self.tmpdir) + tmpfilename = os.path.split(tmpfile)[-1] + + tmp_path = lambda x: os.path.abspath(self.ds.abspath(x)) + + assert_(tmp_path(valid_httpurl()).startswith(self.tmpdir)) + assert_(tmp_path(invalid_httpurl()).startswith(self.tmpdir)) + assert_(tmp_path(tmpfile).startswith(self.tmpdir)) + assert_(tmp_path(tmpfilename).startswith(self.tmpdir)) + for fn in malicious_files: + assert_(tmp_path(http_path + fn).startswith(self.tmpdir)) + assert_(tmp_path(fn).startswith(self.tmpdir)) + + def test_windows_os_sep(self): + orig_os_sep = os.sep + try: + os.sep = '\\' + self.test_ValidHTTP() + self.test_ValidFile() + self.test_InvalidHTTP() + self.test_InvalidFile() + self.test_sandboxing() + finally: + os.sep = orig_os_sep + + +class TestRepositoryAbspath: + def setup_method(self): + self.tmpdir = os.path.abspath(mkdtemp()) + self.repos = datasource.Repository(valid_baseurl(), self.tmpdir) + + def teardown_method(self): + rmtree(self.tmpdir) + del self.repos + + def test_ValidHTTP(self): + scheme, netloc, upath, pms, qry, frg = urlparse(valid_httpurl()) + local_path = os.path.join(self.repos._destpath, netloc, + upath.strip(os.sep).strip('/')) + filepath = self.repos.abspath(valid_httpfile()) + assert_equal(local_path, filepath) + + def test_sandboxing(self): + tmp_path = lambda x: os.path.abspath(self.repos.abspath(x)) + assert_(tmp_path(valid_httpfile()).startswith(self.tmpdir)) + for fn in malicious_files: + assert_(tmp_path(http_path + fn).startswith(self.tmpdir)) + assert_(tmp_path(fn).startswith(self.tmpdir)) + + def test_windows_os_sep(self): + orig_os_sep = os.sep + try: + os.sep = '\\' + self.test_ValidHTTP() + self.test_sandboxing() + finally: + os.sep = orig_os_sep + + +class TestRepositoryExists: + def setup_method(self): + self.tmpdir = mkdtemp() + self.repos = datasource.Repository(valid_baseurl(), self.tmpdir) + + def teardown_method(self): + rmtree(self.tmpdir) + del self.repos + + def test_ValidFile(self): + # Create local temp file + tmpfile = valid_textfile(self.tmpdir) + assert_(self.repos.exists(tmpfile)) + + def test_InvalidFile(self): + tmpfile = invalid_textfile(self.tmpdir) + assert_equal(self.repos.exists(tmpfile), False) + + def test_RemoveHTTPFile(self): + assert_(self.repos.exists(valid_httpurl())) + + def test_CachedHTTPFile(self): + localfile = valid_httpurl() + # Create a locally cached temp file with an URL based + # directory structure. This is similar to what Repository.open + # would do. + scheme, netloc, upath, pms, qry, frg = urlparse(localfile) + local_path = os.path.join(self.repos._destpath, netloc) + os.mkdir(local_path, 0o0700) + tmpfile = valid_textfile(local_path) + assert_(self.repos.exists(tmpfile)) + + +class TestOpenFunc: + def setup_method(self): + self.tmpdir = mkdtemp() + + def teardown_method(self): + rmtree(self.tmpdir) + + def test_DataSourceOpen(self): + local_file = valid_textfile(self.tmpdir) + # Test case where destpath is passed in + fp = datasource.open(local_file, destpath=self.tmpdir) + assert_(fp) + fp.close() + # Test case where default destpath is used + fp = datasource.open(local_file) + assert_(fp) + fp.close() + +def test_del_attr_handling(): + # DataSource __del__ can be called + # even if __init__ fails when the + # Exception object is caught by the + # caller as happens in refguide_check + # is_deprecated() function + + ds = datasource.DataSource() + # simulate failed __init__ by removing key attribute + # produced within __init__ and expected by __del__ + del ds._istmpdest + # should not raise an AttributeError if __del__ + # gracefully handles failed __init__: + ds.__del__() diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/test__iotools.py b/.venv/lib/python3.12/site-packages/numpy/lib/tests/test__iotools.py new file mode 100644 index 0000000..1581ffb --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/lib/tests/test__iotools.py @@ -0,0 +1,360 @@ +import time +from datetime import date + +import numpy as np +from numpy.lib._iotools import ( + LineSplitter, + NameValidator, + StringConverter, + easy_dtype, + flatten_dtype, + has_nested_fields, +) +from numpy.testing import ( + assert_, + assert_allclose, + assert_equal, + assert_raises, +) + + +class TestLineSplitter: + "Tests the LineSplitter class." + + def test_no_delimiter(self): + "Test LineSplitter w/o delimiter" + strg = " 1 2 3 4 5 # test" + test = LineSplitter()(strg) + assert_equal(test, ['1', '2', '3', '4', '5']) + test = LineSplitter('')(strg) + assert_equal(test, ['1', '2', '3', '4', '5']) + + def test_space_delimiter(self): + "Test space delimiter" + strg = " 1 2 3 4 5 # test" + test = LineSplitter(' ')(strg) + assert_equal(test, ['1', '2', '3', '4', '', '5']) + test = LineSplitter(' ')(strg) + assert_equal(test, ['1 2 3 4', '5']) + + def test_tab_delimiter(self): + "Test tab delimiter" + strg = " 1\t 2\t 3\t 4\t 5 6" + test = LineSplitter('\t')(strg) + assert_equal(test, ['1', '2', '3', '4', '5 6']) + strg = " 1 2\t 3 4\t 5 6" + test = LineSplitter('\t')(strg) + assert_equal(test, ['1 2', '3 4', '5 6']) + + def test_other_delimiter(self): + "Test LineSplitter on delimiter" + strg = "1,2,3,4,,5" + test = LineSplitter(',')(strg) + assert_equal(test, ['1', '2', '3', '4', '', '5']) + # + strg = " 1,2,3,4,,5 # test" + test = LineSplitter(',')(strg) + assert_equal(test, ['1', '2', '3', '4', '', '5']) + + # gh-11028 bytes comment/delimiters should get encoded + strg = b" 1,2,3,4,,5 % test" + test = LineSplitter(delimiter=b',', comments=b'%')(strg) + assert_equal(test, ['1', '2', '3', '4', '', '5']) + + def test_constant_fixed_width(self): + "Test LineSplitter w/ fixed-width fields" + strg = " 1 2 3 4 5 # test" + test = LineSplitter(3)(strg) + assert_equal(test, ['1', '2', '3', '4', '', '5', '']) + # + strg = " 1 3 4 5 6# test" + test = LineSplitter(20)(strg) + assert_equal(test, ['1 3 4 5 6']) + # + strg = " 1 3 4 5 6# test" + test = LineSplitter(30)(strg) + assert_equal(test, ['1 3 4 5 6']) + + def test_variable_fixed_width(self): + strg = " 1 3 4 5 6# test" + test = LineSplitter((3, 6, 6, 3))(strg) + assert_equal(test, ['1', '3', '4 5', '6']) + # + strg = " 1 3 4 5 6# test" + test = LineSplitter((6, 6, 9))(strg) + assert_equal(test, ['1', '3 4', '5 6']) + +# ----------------------------------------------------------------------------- + + +class TestNameValidator: + + def test_case_sensitivity(self): + "Test case sensitivity" + names = ['A', 'a', 'b', 'c'] + test = NameValidator().validate(names) + assert_equal(test, ['A', 'a', 'b', 'c']) + test = NameValidator(case_sensitive=False).validate(names) + assert_equal(test, ['A', 'A_1', 'B', 'C']) + test = NameValidator(case_sensitive='upper').validate(names) + assert_equal(test, ['A', 'A_1', 'B', 'C']) + test = NameValidator(case_sensitive='lower').validate(names) + assert_equal(test, ['a', 'a_1', 'b', 'c']) + + # check exceptions + assert_raises(ValueError, NameValidator, case_sensitive='foobar') + + def test_excludelist(self): + "Test excludelist" + names = ['dates', 'data', 'Other Data', 'mask'] + validator = NameValidator(excludelist=['dates', 'data', 'mask']) + test = validator.validate(names) + assert_equal(test, ['dates_', 'data_', 'Other_Data', 'mask_']) + + def test_missing_names(self): + "Test validate missing names" + namelist = ('a', 'b', 'c') + validator = NameValidator() + assert_equal(validator(namelist), ['a', 'b', 'c']) + namelist = ('', 'b', 'c') + assert_equal(validator(namelist), ['f0', 'b', 'c']) + namelist = ('a', 'b', '') + assert_equal(validator(namelist), ['a', 'b', 'f0']) + namelist = ('', 'f0', '') + assert_equal(validator(namelist), ['f1', 'f0', 'f2']) + + def test_validate_nb_names(self): + "Test validate nb names" + namelist = ('a', 'b', 'c') + validator = NameValidator() + assert_equal(validator(namelist, nbfields=1), ('a',)) + assert_equal(validator(namelist, nbfields=5, defaultfmt="g%i"), + ['a', 'b', 'c', 'g0', 'g1']) + + def test_validate_wo_names(self): + "Test validate no names" + namelist = None + validator = NameValidator() + assert_(validator(namelist) is None) + assert_equal(validator(namelist, nbfields=3), ['f0', 'f1', 'f2']) + +# ----------------------------------------------------------------------------- + + +def _bytes_to_date(s): + return date(*time.strptime(s, "%Y-%m-%d")[:3]) + + +class TestStringConverter: + "Test StringConverter" + + def test_creation(self): + "Test creation of a StringConverter" + converter = StringConverter(int, -99999) + assert_equal(converter._status, 1) + assert_equal(converter.default, -99999) + + def test_upgrade(self): + "Tests the upgrade method." + + converter = StringConverter() + assert_equal(converter._status, 0) + + # test int + assert_equal(converter.upgrade('0'), 0) + assert_equal(converter._status, 1) + + # On systems where long defaults to 32-bit, the statuses will be + # offset by one, so we check for this here. + import numpy._core.numeric as nx + status_offset = int(nx.dtype(nx.int_).itemsize < nx.dtype(nx.int64).itemsize) + + # test int > 2**32 + assert_equal(converter.upgrade('17179869184'), 17179869184) + assert_equal(converter._status, 1 + status_offset) + + # test float + assert_allclose(converter.upgrade('0.'), 0.0) + assert_equal(converter._status, 2 + status_offset) + + # test complex + assert_equal(converter.upgrade('0j'), complex('0j')) + assert_equal(converter._status, 3 + status_offset) + + # test str + # note that the longdouble type has been skipped, so the + # _status increases by 2. Everything should succeed with + # unicode conversion (8). + for s in ['a', b'a']: + res = converter.upgrade(s) + assert_(type(res) is str) + assert_equal(res, 'a') + assert_equal(converter._status, 8 + status_offset) + + def test_missing(self): + "Tests the use of missing values." + converter = StringConverter(missing_values=('missing', + 'missed')) + converter.upgrade('0') + assert_equal(converter('0'), 0) + assert_equal(converter(''), converter.default) + assert_equal(converter('missing'), converter.default) + assert_equal(converter('missed'), converter.default) + try: + converter('miss') + except ValueError: + pass + + def test_upgrademapper(self): + "Tests updatemapper" + dateparser = _bytes_to_date + _original_mapper = StringConverter._mapper[:] + try: + StringConverter.upgrade_mapper(dateparser, date(2000, 1, 1)) + convert = StringConverter(dateparser, date(2000, 1, 1)) + test = convert('2001-01-01') + assert_equal(test, date(2001, 1, 1)) + test = convert('2009-01-01') + assert_equal(test, date(2009, 1, 1)) + test = convert('') + assert_equal(test, date(2000, 1, 1)) + finally: + StringConverter._mapper = _original_mapper + + def test_string_to_object(self): + "Make sure that string-to-object functions are properly recognized" + old_mapper = StringConverter._mapper[:] # copy of list + conv = StringConverter(_bytes_to_date) + assert_equal(conv._mapper, old_mapper) + assert_(hasattr(conv, 'default')) + + def test_keep_default(self): + "Make sure we don't lose an explicit default" + converter = StringConverter(None, missing_values='', + default=-999) + converter.upgrade('3.14159265') + assert_equal(converter.default, -999) + assert_equal(converter.type, np.dtype(float)) + # + converter = StringConverter( + None, missing_values='', default=0) + converter.upgrade('3.14159265') + assert_equal(converter.default, 0) + assert_equal(converter.type, np.dtype(float)) + + def test_keep_default_zero(self): + "Check that we don't lose a default of 0" + converter = StringConverter(int, default=0, + missing_values="N/A") + assert_equal(converter.default, 0) + + def test_keep_missing_values(self): + "Check that we're not losing missing values" + converter = StringConverter(int, default=0, + missing_values="N/A") + assert_equal( + converter.missing_values, {'', 'N/A'}) + + def test_int64_dtype(self): + "Check that int64 integer types can be specified" + converter = StringConverter(np.int64, default=0) + val = "-9223372036854775807" + assert_(converter(val) == -9223372036854775807) + val = "9223372036854775807" + assert_(converter(val) == 9223372036854775807) + + def test_uint64_dtype(self): + "Check that uint64 integer types can be specified" + converter = StringConverter(np.uint64, default=0) + val = "9223372043271415339" + assert_(converter(val) == 9223372043271415339) + + +class TestMiscFunctions: + + def test_has_nested_dtype(self): + "Test has_nested_dtype" + ndtype = np.dtype(float) + assert_equal(has_nested_fields(ndtype), False) + ndtype = np.dtype([('A', '|S3'), ('B', float)]) + assert_equal(has_nested_fields(ndtype), False) + ndtype = np.dtype([('A', int), ('B', [('BA', float), ('BB', '|S1')])]) + assert_equal(has_nested_fields(ndtype), True) + + def test_easy_dtype(self): + "Test ndtype on dtypes" + # Simple case + ndtype = float + assert_equal(easy_dtype(ndtype), np.dtype(float)) + # As string w/o names + ndtype = "i4, f8" + assert_equal(easy_dtype(ndtype), + np.dtype([('f0', "i4"), ('f1', "f8")])) + # As string w/o names but different default format + assert_equal(easy_dtype(ndtype, defaultfmt="field_%03i"), + np.dtype([('field_000', "i4"), ('field_001', "f8")])) + # As string w/ names + ndtype = "i4, f8" + assert_equal(easy_dtype(ndtype, names="a, b"), + np.dtype([('a', "i4"), ('b', "f8")])) + # As string w/ names (too many) + ndtype = "i4, f8" + assert_equal(easy_dtype(ndtype, names="a, b, c"), + np.dtype([('a', "i4"), ('b', "f8")])) + # As string w/ names (not enough) + ndtype = "i4, f8" + assert_equal(easy_dtype(ndtype, names=", b"), + np.dtype([('f0', "i4"), ('b', "f8")])) + # ... (with different default format) + assert_equal(easy_dtype(ndtype, names="a", defaultfmt="f%02i"), + np.dtype([('a', "i4"), ('f00', "f8")])) + # As list of tuples w/o names + ndtype = [('A', int), ('B', float)] + assert_equal(easy_dtype(ndtype), np.dtype([('A', int), ('B', float)])) + # As list of tuples w/ names + assert_equal(easy_dtype(ndtype, names="a,b"), + np.dtype([('a', int), ('b', float)])) + # As list of tuples w/ not enough names + assert_equal(easy_dtype(ndtype, names="a"), + np.dtype([('a', int), ('f0', float)])) + # As list of tuples w/ too many names + assert_equal(easy_dtype(ndtype, names="a,b,c"), + np.dtype([('a', int), ('b', float)])) + # As list of types w/o names + ndtype = (int, float, float) + assert_equal(easy_dtype(ndtype), + np.dtype([('f0', int), ('f1', float), ('f2', float)])) + # As list of types w names + ndtype = (int, float, float) + assert_equal(easy_dtype(ndtype, names="a, b, c"), + np.dtype([('a', int), ('b', float), ('c', float)])) + # As simple dtype w/ names + ndtype = np.dtype(float) + assert_equal(easy_dtype(ndtype, names="a, b, c"), + np.dtype([(_, float) for _ in ('a', 'b', 'c')])) + # As simple dtype w/o names (but multiple fields) + ndtype = np.dtype(float) + assert_equal( + easy_dtype(ndtype, names=['', '', ''], defaultfmt="f%02i"), + np.dtype([(_, float) for _ in ('f00', 'f01', 'f02')])) + + def test_flatten_dtype(self): + "Testing flatten_dtype" + # Standard dtype + dt = np.dtype([("a", "f8"), ("b", "f8")]) + dt_flat = flatten_dtype(dt) + assert_equal(dt_flat, [float, float]) + # Recursive dtype + dt = np.dtype([("a", [("aa", '|S1'), ("ab", '|S2')]), ("b", int)]) + dt_flat = flatten_dtype(dt) + assert_equal(dt_flat, [np.dtype('|S1'), np.dtype('|S2'), int]) + # dtype with shaped fields + dt = np.dtype([("a", (float, 2)), ("b", (int, 3))]) + dt_flat = flatten_dtype(dt) + assert_equal(dt_flat, [float, int]) + dt_flat = flatten_dtype(dt, True) + assert_equal(dt_flat, [float] * 2 + [int] * 3) + # dtype w/ titles + dt = np.dtype([(("a", "A"), "f8"), (("b", "B"), "f8")]) + dt_flat = flatten_dtype(dt) + assert_equal(dt_flat, [float, float]) diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/test__version.py b/.venv/lib/python3.12/site-packages/numpy/lib/tests/test__version.py new file mode 100644 index 0000000..6e6a34a --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/lib/tests/test__version.py @@ -0,0 +1,64 @@ +"""Tests for the NumpyVersion class. + +""" +from numpy.lib import NumpyVersion +from numpy.testing import assert_, assert_raises + + +def test_main_versions(): + assert_(NumpyVersion('1.8.0') == '1.8.0') + for ver in ['1.9.0', '2.0.0', '1.8.1', '10.0.1']: + assert_(NumpyVersion('1.8.0') < ver) + + for ver in ['1.7.0', '1.7.1', '0.9.9']: + assert_(NumpyVersion('1.8.0') > ver) + + +def test_version_1_point_10(): + # regression test for gh-2998. + assert_(NumpyVersion('1.9.0') < '1.10.0') + assert_(NumpyVersion('1.11.0') < '1.11.1') + assert_(NumpyVersion('1.11.0') == '1.11.0') + assert_(NumpyVersion('1.99.11') < '1.99.12') + + +def test_alpha_beta_rc(): + assert_(NumpyVersion('1.8.0rc1') == '1.8.0rc1') + for ver in ['1.8.0', '1.8.0rc2']: + assert_(NumpyVersion('1.8.0rc1') < ver) + + for ver in ['1.8.0a2', '1.8.0b3', '1.7.2rc4']: + assert_(NumpyVersion('1.8.0rc1') > ver) + + assert_(NumpyVersion('1.8.0b1') > '1.8.0a2') + + +def test_dev_version(): + assert_(NumpyVersion('1.9.0.dev-Unknown') < '1.9.0') + for ver in ['1.9.0', '1.9.0a1', '1.9.0b2', '1.9.0b2.dev-ffffffff']: + assert_(NumpyVersion('1.9.0.dev-f16acvda') < ver) + + assert_(NumpyVersion('1.9.0.dev-f16acvda') == '1.9.0.dev-11111111') + + +def test_dev_a_b_rc_mixed(): + assert_(NumpyVersion('1.9.0a2.dev-f16acvda') == '1.9.0a2.dev-11111111') + assert_(NumpyVersion('1.9.0a2.dev-6acvda54') < '1.9.0a2') + + +def test_dev0_version(): + assert_(NumpyVersion('1.9.0.dev0+Unknown') < '1.9.0') + for ver in ['1.9.0', '1.9.0a1', '1.9.0b2', '1.9.0b2.dev0+ffffffff']: + assert_(NumpyVersion('1.9.0.dev0+f16acvda') < ver) + + assert_(NumpyVersion('1.9.0.dev0+f16acvda') == '1.9.0.dev0+11111111') + + +def test_dev0_a_b_rc_mixed(): + assert_(NumpyVersion('1.9.0a2.dev0+f16acvda') == '1.9.0a2.dev0+11111111') + assert_(NumpyVersion('1.9.0a2.dev0+6acvda54') < '1.9.0a2') + + +def test_raises(): + for ver in ['1.9', '1,9.0', '1.7.x']: + assert_raises(ValueError, NumpyVersion, ver) diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_array_utils.py b/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_array_utils.py new file mode 100644 index 0000000..55b9d28 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_array_utils.py @@ -0,0 +1,32 @@ +import numpy as np +from numpy.lib import array_utils +from numpy.testing import assert_equal + + +class TestByteBounds: + def test_byte_bounds(self): + # pointer difference matches size * itemsize + # due to contiguity + a = np.arange(12).reshape(3, 4) + low, high = array_utils.byte_bounds(a) + assert_equal(high - low, a.size * a.itemsize) + + def test_unusual_order_positive_stride(self): + a = np.arange(12).reshape(3, 4) + b = a.T + low, high = array_utils.byte_bounds(b) + assert_equal(high - low, b.size * b.itemsize) + + def test_unusual_order_negative_stride(self): + a = np.arange(12).reshape(3, 4) + b = a.T[::-1] + low, high = array_utils.byte_bounds(b) + assert_equal(high - low, b.size * b.itemsize) + + def test_strided(self): + a = np.arange(12) + b = a[::2] + low, high = array_utils.byte_bounds(b) + # the largest pointer address is lost (even numbers only in the + # stride), and compensate addresses for striding by 2 + assert_equal(high - low, b.size * 2 * b.itemsize - b.itemsize) diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_arraypad.py b/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_arraypad.py new file mode 100644 index 0000000..6efbe34 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_arraypad.py @@ -0,0 +1,1415 @@ +"""Tests for the array padding functions. + +""" +import pytest + +import numpy as np +from numpy.lib._arraypad_impl import _as_pairs +from numpy.testing import assert_allclose, assert_array_equal, assert_equal + +_numeric_dtypes = ( + np._core.sctypes["uint"] + + np._core.sctypes["int"] + + np._core.sctypes["float"] + + np._core.sctypes["complex"] +) +_all_modes = { + 'constant': {'constant_values': 0}, + 'edge': {}, + 'linear_ramp': {'end_values': 0}, + 'maximum': {'stat_length': None}, + 'mean': {'stat_length': None}, + 'median': {'stat_length': None}, + 'minimum': {'stat_length': None}, + 'reflect': {'reflect_type': 'even'}, + 'symmetric': {'reflect_type': 'even'}, + 'wrap': {}, + 'empty': {} +} + + +class TestAsPairs: + def test_single_value(self): + """Test casting for a single value.""" + expected = np.array([[3, 3]] * 10) + for x in (3, [3], [[3]]): + result = _as_pairs(x, 10) + assert_equal(result, expected) + # Test with dtype=object + obj = object() + assert_equal( + _as_pairs(obj, 10), + np.array([[obj, obj]] * 10) + ) + + def test_two_values(self): + """Test proper casting for two different values.""" + # Broadcasting in the first dimension with numbers + expected = np.array([[3, 4]] * 10) + for x in ([3, 4], [[3, 4]]): + result = _as_pairs(x, 10) + assert_equal(result, expected) + # and with dtype=object + obj = object() + assert_equal( + _as_pairs(["a", obj], 10), + np.array([["a", obj]] * 10) + ) + + # Broadcasting in the second / last dimension with numbers + assert_equal( + _as_pairs([[3], [4]], 2), + np.array([[3, 3], [4, 4]]) + ) + # and with dtype=object + assert_equal( + _as_pairs([["a"], [obj]], 2), + np.array([["a", "a"], [obj, obj]]) + ) + + def test_with_none(self): + expected = ((None, None), (None, None), (None, None)) + assert_equal( + _as_pairs(None, 3, as_index=False), + expected + ) + assert_equal( + _as_pairs(None, 3, as_index=True), + expected + ) + + def test_pass_through(self): + """Test if `x` already matching desired output are passed through.""" + expected = np.arange(12).reshape((6, 2)) + assert_equal( + _as_pairs(expected, 6), + expected + ) + + def test_as_index(self): + """Test results if `as_index=True`.""" + assert_equal( + _as_pairs([2.6, 3.3], 10, as_index=True), + np.array([[3, 3]] * 10, dtype=np.intp) + ) + assert_equal( + _as_pairs([2.6, 4.49], 10, as_index=True), + np.array([[3, 4]] * 10, dtype=np.intp) + ) + for x in (-3, [-3], [[-3]], [-3, 4], [3, -4], [[-3, 4]], [[4, -3]], + [[1, 2]] * 9 + [[1, -2]]): + with pytest.raises(ValueError, match="negative values"): + _as_pairs(x, 10, as_index=True) + + def test_exceptions(self): + """Ensure faulty usage is discovered.""" + with pytest.raises(ValueError, match="more dimensions than allowed"): + _as_pairs([[[3]]], 10) + with pytest.raises(ValueError, match="could not be broadcast"): + _as_pairs([[1, 2], [3, 4]], 3) + with pytest.raises(ValueError, match="could not be broadcast"): + _as_pairs(np.ones((2, 3)), 3) + + +class TestConditionalShortcuts: + @pytest.mark.parametrize("mode", _all_modes.keys()) + def test_zero_padding_shortcuts(self, mode): + test = np.arange(120).reshape(4, 5, 6) + pad_amt = [(0, 0) for _ in test.shape] + assert_array_equal(test, np.pad(test, pad_amt, mode=mode)) + + @pytest.mark.parametrize("mode", ['maximum', 'mean', 'median', 'minimum',]) + def test_shallow_statistic_range(self, mode): + test = np.arange(120).reshape(4, 5, 6) + pad_amt = [(1, 1) for _ in test.shape] + assert_array_equal(np.pad(test, pad_amt, mode='edge'), + np.pad(test, pad_amt, mode=mode, stat_length=1)) + + @pytest.mark.parametrize("mode", ['maximum', 'mean', 'median', 'minimum',]) + def test_clip_statistic_range(self, mode): + test = np.arange(30).reshape(5, 6) + pad_amt = [(3, 3) for _ in test.shape] + assert_array_equal(np.pad(test, pad_amt, mode=mode), + np.pad(test, pad_amt, mode=mode, stat_length=30)) + + +class TestStatistic: + def test_check_mean_stat_length(self): + a = np.arange(100).astype('f') + a = np.pad(a, ((25, 20), ), 'mean', stat_length=((2, 3), )) + b = np.array( + [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, + 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, + 0.5, 0.5, 0.5, 0.5, 0.5, + + 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., + 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., + 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., + 30., 31., 32., 33., 34., 35., 36., 37., 38., 39., + 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., + 50., 51., 52., 53., 54., 55., 56., 57., 58., 59., + 60., 61., 62., 63., 64., 65., 66., 67., 68., 69., + 70., 71., 72., 73., 74., 75., 76., 77., 78., 79., + 80., 81., 82., 83., 84., 85., 86., 87., 88., 89., + 90., 91., 92., 93., 94., 95., 96., 97., 98., 99., + + 98., 98., 98., 98., 98., 98., 98., 98., 98., 98., + 98., 98., 98., 98., 98., 98., 98., 98., 98., 98. + ]) + assert_array_equal(a, b) + + def test_check_maximum_1(self): + a = np.arange(100) + a = np.pad(a, (25, 20), 'maximum') + b = np.array( + [99, 99, 99, 99, 99, 99, 99, 99, 99, 99, + 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, + 99, 99, 99, 99, 99, + + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, + 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + + 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, + 99, 99, 99, 99, 99, 99, 99, 99, 99, 99] + ) + assert_array_equal(a, b) + + def test_check_maximum_2(self): + a = np.arange(100) + 1 + a = np.pad(a, (25, 20), 'maximum') + b = np.array( + [100, 100, 100, 100, 100, 100, 100, 100, 100, 100, + 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, + 100, 100, 100, 100, 100, + + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, + 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, + 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, + 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, + 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, + 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, + 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, + 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, + 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, + + 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, + 100, 100, 100, 100, 100, 100, 100, 100, 100, 100] + ) + assert_array_equal(a, b) + + def test_check_maximum_stat_length(self): + a = np.arange(100) + 1 + a = np.pad(a, (25, 20), 'maximum', stat_length=10) + b = np.array( + [10, 10, 10, 10, 10, 10, 10, 10, 10, 10, + 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, + 10, 10, 10, 10, 10, + + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, + 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, + 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, + 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, + 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, + 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, + 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, + 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, + 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, + + 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, + 100, 100, 100, 100, 100, 100, 100, 100, 100, 100] + ) + assert_array_equal(a, b) + + def test_check_minimum_1(self): + a = np.arange(100) + a = np.pad(a, (25, 20), 'minimum') + b = np.array( + [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, + + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, + 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + ) + assert_array_equal(a, b) + + def test_check_minimum_2(self): + a = np.arange(100) + 2 + a = np.pad(a, (25, 20), 'minimum') + b = np.array( + [ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, + + 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, + 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, + 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, + 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, + 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, + 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, + 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, + 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, + 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, + + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2] + ) + assert_array_equal(a, b) + + def test_check_minimum_stat_length(self): + a = np.arange(100) + 1 + a = np.pad(a, (25, 20), 'minimum', stat_length=10) + b = np.array( + [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, + + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, + 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, + 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, + 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, + 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, + 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, + 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, + 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, + 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, + + 91, 91, 91, 91, 91, 91, 91, 91, 91, 91, + 91, 91, 91, 91, 91, 91, 91, 91, 91, 91] + ) + assert_array_equal(a, b) + + def test_check_median(self): + a = np.arange(100).astype('f') + a = np.pad(a, (25, 20), 'median') + b = np.array( + [49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, + 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, + 49.5, 49.5, 49.5, 49.5, 49.5, + + 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., + 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., + 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., + 30., 31., 32., 33., 34., 35., 36., 37., 38., 39., + 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., + 50., 51., 52., 53., 54., 55., 56., 57., 58., 59., + 60., 61., 62., 63., 64., 65., 66., 67., 68., 69., + 70., 71., 72., 73., 74., 75., 76., 77., 78., 79., + 80., 81., 82., 83., 84., 85., 86., 87., 88., 89., + 90., 91., 92., 93., 94., 95., 96., 97., 98., 99., + + 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, + 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5] + ) + assert_array_equal(a, b) + + def test_check_median_01(self): + a = np.array([[3, 1, 4], [4, 5, 9], [9, 8, 2]]) + a = np.pad(a, 1, 'median') + b = np.array( + [[4, 4, 5, 4, 4], + + [3, 3, 1, 4, 3], + [5, 4, 5, 9, 5], + [8, 9, 8, 2, 8], + + [4, 4, 5, 4, 4]] + ) + assert_array_equal(a, b) + + def test_check_median_02(self): + a = np.array([[3, 1, 4], [4, 5, 9], [9, 8, 2]]) + a = np.pad(a.T, 1, 'median').T + b = np.array( + [[5, 4, 5, 4, 5], + + [3, 3, 1, 4, 3], + [5, 4, 5, 9, 5], + [8, 9, 8, 2, 8], + + [5, 4, 5, 4, 5]] + ) + assert_array_equal(a, b) + + def test_check_median_stat_length(self): + a = np.arange(100).astype('f') + a[1] = 2. + a[97] = 96. + a = np.pad(a, (25, 20), 'median', stat_length=(3, 5)) + b = np.array( + [ 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., + 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., + 2., 2., 2., 2., 2., + + 0., 2., 2., 3., 4., 5., 6., 7., 8., 9., + 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., + 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., + 30., 31., 32., 33., 34., 35., 36., 37., 38., 39., + 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., + 50., 51., 52., 53., 54., 55., 56., 57., 58., 59., + 60., 61., 62., 63., 64., 65., 66., 67., 68., 69., + 70., 71., 72., 73., 74., 75., 76., 77., 78., 79., + 80., 81., 82., 83., 84., 85., 86., 87., 88., 89., + 90., 91., 92., 93., 94., 95., 96., 96., 98., 99., + + 96., 96., 96., 96., 96., 96., 96., 96., 96., 96., + 96., 96., 96., 96., 96., 96., 96., 96., 96., 96.] + ) + assert_array_equal(a, b) + + def test_check_mean_shape_one(self): + a = [[4, 5, 6]] + a = np.pad(a, (5, 7), 'mean', stat_length=2) + b = np.array( + [[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6]] + ) + assert_array_equal(a, b) + + def test_check_mean_2(self): + a = np.arange(100).astype('f') + a = np.pad(a, (25, 20), 'mean') + b = np.array( + [49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, + 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, + 49.5, 49.5, 49.5, 49.5, 49.5, + + 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., + 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., + 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., + 30., 31., 32., 33., 34., 35., 36., 37., 38., 39., + 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., + 50., 51., 52., 53., 54., 55., 56., 57., 58., 59., + 60., 61., 62., 63., 64., 65., 66., 67., 68., 69., + 70., 71., 72., 73., 74., 75., 76., 77., 78., 79., + 80., 81., 82., 83., 84., 85., 86., 87., 88., 89., + 90., 91., 92., 93., 94., 95., 96., 97., 98., 99., + + 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, + 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5] + ) + assert_array_equal(a, b) + + @pytest.mark.parametrize("mode", [ + "mean", + "median", + "minimum", + "maximum" + ]) + def test_same_prepend_append(self, mode): + """ Test that appended and prepended values are equal """ + # This test is constructed to trigger floating point rounding errors in + # a way that caused gh-11216 for mode=='mean' + a = np.array([-1, 2, -1]) + np.array([0, 1e-12, 0], dtype=np.float64) + a = np.pad(a, (1, 1), mode) + assert_equal(a[0], a[-1]) + + @pytest.mark.parametrize("mode", ["mean", "median", "minimum", "maximum"]) + @pytest.mark.parametrize( + "stat_length", [-2, (-2,), (3, -1), ((5, 2), (-2, 3)), ((-4,), (2,))] + ) + def test_check_negative_stat_length(self, mode, stat_length): + arr = np.arange(30).reshape((6, 5)) + match = "index can't contain negative values" + with pytest.raises(ValueError, match=match): + np.pad(arr, 2, mode, stat_length=stat_length) + + def test_simple_stat_length(self): + a = np.arange(30) + a = np.reshape(a, (6, 5)) + a = np.pad(a, ((2, 3), (3, 2)), mode='mean', stat_length=(3,)) + b = np.array( + [[6, 6, 6, 5, 6, 7, 8, 9, 8, 8], + [6, 6, 6, 5, 6, 7, 8, 9, 8, 8], + + [1, 1, 1, 0, 1, 2, 3, 4, 3, 3], + [6, 6, 6, 5, 6, 7, 8, 9, 8, 8], + [11, 11, 11, 10, 11, 12, 13, 14, 13, 13], + [16, 16, 16, 15, 16, 17, 18, 19, 18, 18], + [21, 21, 21, 20, 21, 22, 23, 24, 23, 23], + [26, 26, 26, 25, 26, 27, 28, 29, 28, 28], + + [21, 21, 21, 20, 21, 22, 23, 24, 23, 23], + [21, 21, 21, 20, 21, 22, 23, 24, 23, 23], + [21, 21, 21, 20, 21, 22, 23, 24, 23, 23]] + ) + assert_array_equal(a, b) + + @pytest.mark.filterwarnings("ignore:Mean of empty slice:RuntimeWarning") + @pytest.mark.filterwarnings( + "ignore:invalid value encountered in( scalar)? divide:RuntimeWarning" + ) + @pytest.mark.parametrize("mode", ["mean", "median"]) + def test_zero_stat_length_valid(self, mode): + arr = np.pad([1., 2.], (1, 2), mode, stat_length=0) + expected = np.array([np.nan, 1., 2., np.nan, np.nan]) + assert_equal(arr, expected) + + @pytest.mark.parametrize("mode", ["minimum", "maximum"]) + def test_zero_stat_length_invalid(self, mode): + match = "stat_length of 0 yields no value for padding" + with pytest.raises(ValueError, match=match): + np.pad([1., 2.], 0, mode, stat_length=0) + with pytest.raises(ValueError, match=match): + np.pad([1., 2.], 0, mode, stat_length=(1, 0)) + with pytest.raises(ValueError, match=match): + np.pad([1., 2.], 1, mode, stat_length=0) + with pytest.raises(ValueError, match=match): + np.pad([1., 2.], 1, mode, stat_length=(1, 0)) + + +class TestConstant: + def test_check_constant(self): + a = np.arange(100) + a = np.pad(a, (25, 20), 'constant', constant_values=(10, 20)) + b = np.array( + [10, 10, 10, 10, 10, 10, 10, 10, 10, 10, + 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, + 10, 10, 10, 10, 10, + + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, + 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + + 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, + 20, 20, 20, 20, 20, 20, 20, 20, 20, 20] + ) + assert_array_equal(a, b) + + def test_check_constant_zeros(self): + a = np.arange(100) + a = np.pad(a, (25, 20), 'constant') + b = np.array( + [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, + + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, + 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + ) + assert_array_equal(a, b) + + def test_check_constant_float(self): + # If input array is int, but constant_values are float, the dtype of + # the array to be padded is kept + arr = np.arange(30).reshape(5, 6) + test = np.pad(arr, (1, 2), mode='constant', + constant_values=1.1) + expected = np.array( + [[1, 1, 1, 1, 1, 1, 1, 1, 1], + + [1, 0, 1, 2, 3, 4, 5, 1, 1], + [1, 6, 7, 8, 9, 10, 11, 1, 1], + [1, 12, 13, 14, 15, 16, 17, 1, 1], + [1, 18, 19, 20, 21, 22, 23, 1, 1], + [1, 24, 25, 26, 27, 28, 29, 1, 1], + + [1, 1, 1, 1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1, 1, 1, 1]] + ) + assert_allclose(test, expected) + + def test_check_constant_float2(self): + # If input array is float, and constant_values are float, the dtype of + # the array to be padded is kept - here retaining the float constants + arr = np.arange(30).reshape(5, 6) + arr_float = arr.astype(np.float64) + test = np.pad(arr_float, ((1, 2), (1, 2)), mode='constant', + constant_values=1.1) + expected = np.array( + [[1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1], + + [1.1, 0. , 1. , 2. , 3. , 4. , 5. , 1.1, 1.1], # noqa: E203 + [1.1, 6. , 7. , 8. , 9. , 10. , 11. , 1.1, 1.1], # noqa: E203 + [1.1, 12. , 13. , 14. , 15. , 16. , 17. , 1.1, 1.1], # noqa: E203 + [1.1, 18. , 19. , 20. , 21. , 22. , 23. , 1.1, 1.1], # noqa: E203 + [1.1, 24. , 25. , 26. , 27. , 28. , 29. , 1.1, 1.1], # noqa: E203 + + [1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1], + [1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1]] + ) + assert_allclose(test, expected) + + def test_check_constant_float3(self): + a = np.arange(100, dtype=float) + a = np.pad(a, (25, 20), 'constant', constant_values=(-1.1, -1.2)) + b = np.array( + [-1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, + -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, + -1.1, -1.1, -1.1, -1.1, -1.1, + + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, + 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + + -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, + -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2] + ) + assert_allclose(a, b) + + def test_check_constant_odd_pad_amount(self): + arr = np.arange(30).reshape(5, 6) + test = np.pad(arr, ((1,), (2,)), mode='constant', + constant_values=3) + expected = np.array( + [[3, 3, 3, 3, 3, 3, 3, 3, 3, 3], + + [3, 3, 0, 1, 2, 3, 4, 5, 3, 3], + [3, 3, 6, 7, 8, 9, 10, 11, 3, 3], + [3, 3, 12, 13, 14, 15, 16, 17, 3, 3], + [3, 3, 18, 19, 20, 21, 22, 23, 3, 3], + [3, 3, 24, 25, 26, 27, 28, 29, 3, 3], + + [3, 3, 3, 3, 3, 3, 3, 3, 3, 3]] + ) + assert_allclose(test, expected) + + def test_check_constant_pad_2d(self): + arr = np.arange(4).reshape(2, 2) + test = np.pad(arr, ((1, 2), (1, 3)), mode='constant', + constant_values=((1, 2), (3, 4))) + expected = np.array( + [[3, 1, 1, 4, 4, 4], + [3, 0, 1, 4, 4, 4], + [3, 2, 3, 4, 4, 4], + [3, 2, 2, 4, 4, 4], + [3, 2, 2, 4, 4, 4]] + ) + assert_allclose(test, expected) + + def test_check_large_integers(self): + uint64_max = 2 ** 64 - 1 + arr = np.full(5, uint64_max, dtype=np.uint64) + test = np.pad(arr, 1, mode="constant", constant_values=arr.min()) + expected = np.full(7, uint64_max, dtype=np.uint64) + assert_array_equal(test, expected) + + int64_max = 2 ** 63 - 1 + arr = np.full(5, int64_max, dtype=np.int64) + test = np.pad(arr, 1, mode="constant", constant_values=arr.min()) + expected = np.full(7, int64_max, dtype=np.int64) + assert_array_equal(test, expected) + + def test_check_object_array(self): + arr = np.empty(1, dtype=object) + obj_a = object() + arr[0] = obj_a + obj_b = object() + obj_c = object() + arr = np.pad(arr, pad_width=1, mode='constant', + constant_values=(obj_b, obj_c)) + + expected = np.empty((3,), dtype=object) + expected[0] = obj_b + expected[1] = obj_a + expected[2] = obj_c + + assert_array_equal(arr, expected) + + def test_pad_empty_dimension(self): + arr = np.zeros((3, 0, 2)) + result = np.pad(arr, [(0,), (2,), (1,)], mode="constant") + assert result.shape == (3, 4, 4) + + +class TestLinearRamp: + def test_check_simple(self): + a = np.arange(100).astype('f') + a = np.pad(a, (25, 20), 'linear_ramp', end_values=(4, 5)) + b = np.array( + [4.00, 3.84, 3.68, 3.52, 3.36, 3.20, 3.04, 2.88, 2.72, 2.56, + 2.40, 2.24, 2.08, 1.92, 1.76, 1.60, 1.44, 1.28, 1.12, 0.96, + 0.80, 0.64, 0.48, 0.32, 0.16, + + 0.00, 1.00, 2.00, 3.00, 4.00, 5.00, 6.00, 7.00, 8.00, 9.00, + 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, + 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, + 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, 39.0, + 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0, 47.0, 48.0, 49.0, + 50.0, 51.0, 52.0, 53.0, 54.0, 55.0, 56.0, 57.0, 58.0, 59.0, + 60.0, 61.0, 62.0, 63.0, 64.0, 65.0, 66.0, 67.0, 68.0, 69.0, + 70.0, 71.0, 72.0, 73.0, 74.0, 75.0, 76.0, 77.0, 78.0, 79.0, + 80.0, 81.0, 82.0, 83.0, 84.0, 85.0, 86.0, 87.0, 88.0, 89.0, + 90.0, 91.0, 92.0, 93.0, 94.0, 95.0, 96.0, 97.0, 98.0, 99.0, + + 94.3, 89.6, 84.9, 80.2, 75.5, 70.8, 66.1, 61.4, 56.7, 52.0, + 47.3, 42.6, 37.9, 33.2, 28.5, 23.8, 19.1, 14.4, 9.7, 5.] + ) + assert_allclose(a, b, rtol=1e-5, atol=1e-5) + + def test_check_2d(self): + arr = np.arange(20).reshape(4, 5).astype(np.float64) + test = np.pad(arr, (2, 2), mode='linear_ramp', end_values=(0, 0)) + expected = np.array( + [[0., 0., 0., 0., 0., 0., 0., 0., 0.], + [0., 0., 0., 0.5, 1., 1.5, 2., 1., 0.], + [0., 0., 0., 1., 2., 3., 4., 2., 0.], + [0., 2.5, 5., 6., 7., 8., 9., 4.5, 0.], + [0., 5., 10., 11., 12., 13., 14., 7., 0.], + [0., 7.5, 15., 16., 17., 18., 19., 9.5, 0.], + [0., 3.75, 7.5, 8., 8.5, 9., 9.5, 4.75, 0.], + [0., 0., 0., 0., 0., 0., 0., 0., 0.]]) + assert_allclose(test, expected) + + @pytest.mark.xfail(exceptions=(AssertionError,)) + def test_object_array(self): + from fractions import Fraction + arr = np.array([Fraction(1, 2), Fraction(-1, 2)]) + actual = np.pad(arr, (2, 3), mode='linear_ramp', end_values=0) + + # deliberately chosen to have a non-power-of-2 denominator such that + # rounding to floats causes a failure. + expected = np.array([ + Fraction( 0, 12), + Fraction( 3, 12), + Fraction( 6, 12), + Fraction(-6, 12), + Fraction(-4, 12), + Fraction(-2, 12), + Fraction(-0, 12), + ]) + assert_equal(actual, expected) + + def test_end_values(self): + """Ensure that end values are exact.""" + a = np.pad(np.ones(10).reshape(2, 5), (223, 123), mode="linear_ramp") + assert_equal(a[:, 0], 0.) + assert_equal(a[:, -1], 0.) + assert_equal(a[0, :], 0.) + assert_equal(a[-1, :], 0.) + + @pytest.mark.parametrize("dtype", _numeric_dtypes) + def test_negative_difference(self, dtype): + """ + Check correct behavior of unsigned dtypes if there is a negative + difference between the edge to pad and `end_values`. Check both cases + to be independent of implementation. Test behavior for all other dtypes + in case dtype casting interferes with complex dtypes. See gh-14191. + """ + x = np.array([3], dtype=dtype) + result = np.pad(x, 3, mode="linear_ramp", end_values=0) + expected = np.array([0, 1, 2, 3, 2, 1, 0], dtype=dtype) + assert_equal(result, expected) + + x = np.array([0], dtype=dtype) + result = np.pad(x, 3, mode="linear_ramp", end_values=3) + expected = np.array([3, 2, 1, 0, 1, 2, 3], dtype=dtype) + assert_equal(result, expected) + + +class TestReflect: + def test_check_simple(self): + a = np.arange(100) + a = np.pad(a, (25, 20), 'reflect') + b = np.array( + [25, 24, 23, 22, 21, 20, 19, 18, 17, 16, + 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, + 5, 4, 3, 2, 1, + + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, + 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + + 98, 97, 96, 95, 94, 93, 92, 91, 90, 89, + 88, 87, 86, 85, 84, 83, 82, 81, 80, 79] + ) + assert_array_equal(a, b) + + def test_check_odd_method(self): + a = np.arange(100) + a = np.pad(a, (25, 20), 'reflect', reflect_type='odd') + b = np.array( + [-25, -24, -23, -22, -21, -20, -19, -18, -17, -16, + -15, -14, -13, -12, -11, -10, -9, -8, -7, -6, + -5, -4, -3, -2, -1, + + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, + 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + + 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, + 110, 111, 112, 113, 114, 115, 116, 117, 118, 119] + ) + assert_array_equal(a, b) + + def test_check_large_pad(self): + a = [[4, 5, 6], [6, 7, 8]] + a = np.pad(a, (5, 7), 'reflect') + b = np.array( + [[7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7], + + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7], + + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5]] + ) + assert_array_equal(a, b) + + def test_check_shape(self): + a = [[4, 5, 6]] + a = np.pad(a, (5, 7), 'reflect') + b = np.array( + [[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5]] + ) + assert_array_equal(a, b) + + def test_check_01(self): + a = np.pad([1, 2, 3], 2, 'reflect') + b = np.array([3, 2, 1, 2, 3, 2, 1]) + assert_array_equal(a, b) + + def test_check_02(self): + a = np.pad([1, 2, 3], 3, 'reflect') + b = np.array([2, 3, 2, 1, 2, 3, 2, 1, 2]) + assert_array_equal(a, b) + + def test_check_03(self): + a = np.pad([1, 2, 3], 4, 'reflect') + b = np.array([1, 2, 3, 2, 1, 2, 3, 2, 1, 2, 3]) + assert_array_equal(a, b) + + def test_check_04(self): + a = np.pad([1, 2, 3], [1, 10], 'reflect') + b = np.array([2, 1, 2, 3, 2, 1, 2, 3, 2, 1, 2, 3, 2, 1]) + assert_array_equal(a, b) + + def test_check_05(self): + a = np.pad([1, 2, 3, 4], [45, 10], 'reflect') + b = np.array( + [4, 3, 2, 1, 2, 3, 4, 3, 2, 1, + 2, 3, 4, 3, 2, 1, 2, 3, 4, 3, + 2, 1, 2, 3, 4, 3, 2, 1, 2, 3, + 4, 3, 2, 1, 2, 3, 4, 3, 2, 1, + 2, 3, 4, 3, 2, 1, 2, 3, 4, 3, + 2, 1, 2, 3, 4, 3, 2, 1, 2]) + assert_array_equal(a, b) + + def test_check_06(self): + a = np.pad([1, 2, 3, 4], [15, 2], 'symmetric') + b = np.array( + [2, 3, 4, 4, 3, 2, 1, 1, 2, 3, + 4, 4, 3, 2, 1, 1, 2, 3, 4, 4, + 3] + ) + assert_array_equal(a, b) + + def test_check_07(self): + a = np.pad([1, 2, 3, 4, 5, 6], [45, 3], 'symmetric') + b = np.array( + [4, 5, 6, 6, 5, 4, 3, 2, 1, 1, + 2, 3, 4, 5, 6, 6, 5, 4, 3, 2, + 1, 1, 2, 3, 4, 5, 6, 6, 5, 4, + 3, 2, 1, 1, 2, 3, 4, 5, 6, 6, + 5, 4, 3, 2, 1, 1, 2, 3, 4, 5, + 6, 6, 5, 4]) + assert_array_equal(a, b) + + +class TestEmptyArray: + """Check how padding behaves on arrays with an empty dimension.""" + + @pytest.mark.parametrize( + # Keep parametrization ordered, otherwise pytest-xdist might believe + # that different tests were collected during parallelization + "mode", sorted(_all_modes.keys() - {"constant", "empty"}) + ) + def test_pad_empty_dimension(self, mode): + match = ("can't extend empty axis 0 using modes other than 'constant' " + "or 'empty'") + with pytest.raises(ValueError, match=match): + np.pad([], 4, mode=mode) + with pytest.raises(ValueError, match=match): + np.pad(np.ndarray(0), 4, mode=mode) + with pytest.raises(ValueError, match=match): + np.pad(np.zeros((0, 3)), ((1,), (0,)), mode=mode) + + @pytest.mark.parametrize("mode", _all_modes.keys()) + def test_pad_non_empty_dimension(self, mode): + result = np.pad(np.ones((2, 0, 2)), ((3,), (0,), (1,)), mode=mode) + assert result.shape == (8, 0, 4) + + +class TestSymmetric: + def test_check_simple(self): + a = np.arange(100) + a = np.pad(a, (25, 20), 'symmetric') + b = np.array( + [24, 23, 22, 21, 20, 19, 18, 17, 16, 15, + 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, + 4, 3, 2, 1, 0, + + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, + 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + + 99, 98, 97, 96, 95, 94, 93, 92, 91, 90, + 89, 88, 87, 86, 85, 84, 83, 82, 81, 80] + ) + assert_array_equal(a, b) + + def test_check_odd_method(self): + a = np.arange(100) + a = np.pad(a, (25, 20), 'symmetric', reflect_type='odd') + b = np.array( + [-24, -23, -22, -21, -20, -19, -18, -17, -16, -15, + -14, -13, -12, -11, -10, -9, -8, -7, -6, -5, + -4, -3, -2, -1, 0, + + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, + 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + + 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, + 109, 110, 111, 112, 113, 114, 115, 116, 117, 118] + ) + assert_array_equal(a, b) + + def test_check_large_pad(self): + a = [[4, 5, 6], [6, 7, 8]] + a = np.pad(a, (5, 7), 'symmetric') + b = np.array( + [[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8], + [7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8], + + [7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8], + [7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6]] + ) + + assert_array_equal(a, b) + + def test_check_large_pad_odd(self): + a = [[4, 5, 6], [6, 7, 8]] + a = np.pad(a, (5, 7), 'symmetric', reflect_type='odd') + b = np.array( + [[-3, -2, -2, -1, 0, 0, 1, 2, 2, 3, 4, 4, 5, 6, 6], + [-3, -2, -2, -1, 0, 0, 1, 2, 2, 3, 4, 4, 5, 6, 6], + [-1, 0, 0, 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8], + [-1, 0, 0, 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8], + [ 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10], + + [ 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10], + [ 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10, 11, 12, 12], + + [ 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10, 11, 12, 12], + [ 5, 6, 6, 7, 8, 8, 9, 10, 10, 11, 12, 12, 13, 14, 14], + [ 5, 6, 6, 7, 8, 8, 9, 10, 10, 11, 12, 12, 13, 14, 14], + [ 7, 8, 8, 9, 10, 10, 11, 12, 12, 13, 14, 14, 15, 16, 16], + [ 7, 8, 8, 9, 10, 10, 11, 12, 12, 13, 14, 14, 15, 16, 16], + [ 9, 10, 10, 11, 12, 12, 13, 14, 14, 15, 16, 16, 17, 18, 18], + [ 9, 10, 10, 11, 12, 12, 13, 14, 14, 15, 16, 16, 17, 18, 18]] + ) + assert_array_equal(a, b) + + def test_check_shape(self): + a = [[4, 5, 6]] + a = np.pad(a, (5, 7), 'symmetric') + b = np.array( + [[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6]] + ) + assert_array_equal(a, b) + + def test_check_01(self): + a = np.pad([1, 2, 3], 2, 'symmetric') + b = np.array([2, 1, 1, 2, 3, 3, 2]) + assert_array_equal(a, b) + + def test_check_02(self): + a = np.pad([1, 2, 3], 3, 'symmetric') + b = np.array([3, 2, 1, 1, 2, 3, 3, 2, 1]) + assert_array_equal(a, b) + + def test_check_03(self): + a = np.pad([1, 2, 3], 6, 'symmetric') + b = np.array([1, 2, 3, 3, 2, 1, 1, 2, 3, 3, 2, 1, 1, 2, 3]) + assert_array_equal(a, b) + + +class TestWrap: + def test_check_simple(self): + a = np.arange(100) + a = np.pad(a, (25, 20), 'wrap') + b = np.array( + [75, 76, 77, 78, 79, 80, 81, 82, 83, 84, + 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, + 95, 96, 97, 98, 99, + + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, + 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] + ) + assert_array_equal(a, b) + + def test_check_large_pad(self): + a = np.arange(12) + a = np.reshape(a, (3, 4)) + a = np.pad(a, (10, 12), 'wrap') + b = np.array( + [[10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, + 11, 8, 9, 10, 11, 8, 9, 10, 11], + [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, + 3, 0, 1, 2, 3, 0, 1, 2, 3], + [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, + 7, 4, 5, 6, 7, 4, 5, 6, 7], + [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, + 11, 8, 9, 10, 11, 8, 9, 10, 11], + [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, + 3, 0, 1, 2, 3, 0, 1, 2, 3], + [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, + 7, 4, 5, 6, 7, 4, 5, 6, 7], + [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, + 11, 8, 9, 10, 11, 8, 9, 10, 11], + [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, + 3, 0, 1, 2, 3, 0, 1, 2, 3], + [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, + 7, 4, 5, 6, 7, 4, 5, 6, 7], + [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, + 11, 8, 9, 10, 11, 8, 9, 10, 11], + + [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, + 3, 0, 1, 2, 3, 0, 1, 2, 3], + [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, + 7, 4, 5, 6, 7, 4, 5, 6, 7], + [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, + 11, 8, 9, 10, 11, 8, 9, 10, 11], + + [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, + 3, 0, 1, 2, 3, 0, 1, 2, 3], + [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, + 7, 4, 5, 6, 7, 4, 5, 6, 7], + [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, + 11, 8, 9, 10, 11, 8, 9, 10, 11], + [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, + 3, 0, 1, 2, 3, 0, 1, 2, 3], + [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, + 7, 4, 5, 6, 7, 4, 5, 6, 7], + [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, + 11, 8, 9, 10, 11, 8, 9, 10, 11], + [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, + 3, 0, 1, 2, 3, 0, 1, 2, 3], + [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, + 7, 4, 5, 6, 7, 4, 5, 6, 7], + [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, + 11, 8, 9, 10, 11, 8, 9, 10, 11], + [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, + 3, 0, 1, 2, 3, 0, 1, 2, 3], + [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, + 7, 4, 5, 6, 7, 4, 5, 6, 7], + [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, + 11, 8, 9, 10, 11, 8, 9, 10, 11]] + ) + assert_array_equal(a, b) + + def test_check_01(self): + a = np.pad([1, 2, 3], 3, 'wrap') + b = np.array([1, 2, 3, 1, 2, 3, 1, 2, 3]) + assert_array_equal(a, b) + + def test_check_02(self): + a = np.pad([1, 2, 3], 4, 'wrap') + b = np.array([3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1]) + assert_array_equal(a, b) + + def test_pad_with_zero(self): + a = np.ones((3, 5)) + b = np.pad(a, (0, 5), mode="wrap") + assert_array_equal(a, b[:-5, :-5]) + + def test_repeated_wrapping(self): + """ + Check wrapping on each side individually if the wrapped area is longer + than the original array. + """ + a = np.arange(5) + b = np.pad(a, (12, 0), mode="wrap") + assert_array_equal(np.r_[a, a, a, a][3:], b) + + a = np.arange(5) + b = np.pad(a, (0, 12), mode="wrap") + assert_array_equal(np.r_[a, a, a, a][:-3], b) + + def test_repeated_wrapping_multiple_origin(self): + """ + Assert that 'wrap' pads only with multiples of the original area if + the pad width is larger than the original array. + """ + a = np.arange(4).reshape(2, 2) + a = np.pad(a, [(1, 3), (3, 1)], mode='wrap') + b = np.array( + [[3, 2, 3, 2, 3, 2], + [1, 0, 1, 0, 1, 0], + [3, 2, 3, 2, 3, 2], + [1, 0, 1, 0, 1, 0], + [3, 2, 3, 2, 3, 2], + [1, 0, 1, 0, 1, 0]] + ) + assert_array_equal(a, b) + + +class TestEdge: + def test_check_simple(self): + a = np.arange(12) + a = np.reshape(a, (4, 3)) + a = np.pad(a, ((2, 3), (3, 2)), 'edge') + b = np.array( + [[0, 0, 0, 0, 1, 2, 2, 2], + [0, 0, 0, 0, 1, 2, 2, 2], + + [0, 0, 0, 0, 1, 2, 2, 2], + [3, 3, 3, 3, 4, 5, 5, 5], + [6, 6, 6, 6, 7, 8, 8, 8], + [9, 9, 9, 9, 10, 11, 11, 11], + + [9, 9, 9, 9, 10, 11, 11, 11], + [9, 9, 9, 9, 10, 11, 11, 11], + [9, 9, 9, 9, 10, 11, 11, 11]] + ) + assert_array_equal(a, b) + + def test_check_width_shape_1_2(self): + # Check a pad_width of the form ((1, 2),). + # Regression test for issue gh-7808. + a = np.array([1, 2, 3]) + padded = np.pad(a, ((1, 2),), 'edge') + expected = np.array([1, 1, 2, 3, 3, 3]) + assert_array_equal(padded, expected) + + a = np.array([[1, 2, 3], [4, 5, 6]]) + padded = np.pad(a, ((1, 2),), 'edge') + expected = np.pad(a, ((1, 2), (1, 2)), 'edge') + assert_array_equal(padded, expected) + + a = np.arange(24).reshape(2, 3, 4) + padded = np.pad(a, ((1, 2),), 'edge') + expected = np.pad(a, ((1, 2), (1, 2), (1, 2)), 'edge') + assert_array_equal(padded, expected) + + +class TestEmpty: + def test_simple(self): + arr = np.arange(24).reshape(4, 6) + result = np.pad(arr, [(2, 3), (3, 1)], mode="empty") + assert result.shape == (9, 10) + assert_equal(arr, result[2:-3, 3:-1]) + + def test_pad_empty_dimension(self): + arr = np.zeros((3, 0, 2)) + result = np.pad(arr, [(0,), (2,), (1,)], mode="empty") + assert result.shape == (3, 4, 4) + + +def test_legacy_vector_functionality(): + def _padwithtens(vector, pad_width, iaxis, kwargs): + vector[:pad_width[0]] = 10 + vector[-pad_width[1]:] = 10 + + a = np.arange(6).reshape(2, 3) + a = np.pad(a, 2, _padwithtens) + b = np.array( + [[10, 10, 10, 10, 10, 10, 10], + [10, 10, 10, 10, 10, 10, 10], + + [10, 10, 0, 1, 2, 10, 10], + [10, 10, 3, 4, 5, 10, 10], + + [10, 10, 10, 10, 10, 10, 10], + [10, 10, 10, 10, 10, 10, 10]] + ) + assert_array_equal(a, b) + + +def test_unicode_mode(): + a = np.pad([1], 2, mode='constant') + b = np.array([0, 0, 1, 0, 0]) + assert_array_equal(a, b) + + +@pytest.mark.parametrize("mode", ["edge", "symmetric", "reflect", "wrap"]) +def test_object_input(mode): + # Regression test for issue gh-11395. + a = np.full((4, 3), fill_value=None) + pad_amt = ((2, 3), (3, 2)) + b = np.full((9, 8), fill_value=None) + assert_array_equal(np.pad(a, pad_amt, mode=mode), b) + + +class TestPadWidth: + @pytest.mark.parametrize("pad_width", [ + (4, 5, 6, 7), + ((1,), (2,), (3,)), + ((1, 2), (3, 4), (5, 6)), + ((3, 4, 5), (0, 1, 2)), + ]) + @pytest.mark.parametrize("mode", _all_modes.keys()) + def test_misshaped_pad_width(self, pad_width, mode): + arr = np.arange(30).reshape((6, 5)) + match = "operands could not be broadcast together" + with pytest.raises(ValueError, match=match): + np.pad(arr, pad_width, mode) + + @pytest.mark.parametrize("mode", _all_modes.keys()) + def test_misshaped_pad_width_2(self, mode): + arr = np.arange(30).reshape((6, 5)) + match = ("input operand has more dimensions than allowed by the axis " + "remapping") + with pytest.raises(ValueError, match=match): + np.pad(arr, (((3,), (4,), (5,)), ((0,), (1,), (2,))), mode) + + @pytest.mark.parametrize( + "pad_width", [-2, (-2,), (3, -1), ((5, 2), (-2, 3)), ((-4,), (2,))]) + @pytest.mark.parametrize("mode", _all_modes.keys()) + def test_negative_pad_width(self, pad_width, mode): + arr = np.arange(30).reshape((6, 5)) + match = "index can't contain negative values" + with pytest.raises(ValueError, match=match): + np.pad(arr, pad_width, mode) + + @pytest.mark.parametrize("pad_width, dtype", [ + ("3", None), + ("word", None), + (None, None), + (object(), None), + (3.4, None), + (((2, 3, 4), (3, 2)), object), + (complex(1, -1), None), + (((-2.1, 3), (3, 2)), None), + ]) + @pytest.mark.parametrize("mode", _all_modes.keys()) + def test_bad_type(self, pad_width, dtype, mode): + arr = np.arange(30).reshape((6, 5)) + match = "`pad_width` must be of integral type." + if dtype is not None: + # avoid DeprecationWarning when not specifying dtype + with pytest.raises(TypeError, match=match): + np.pad(arr, np.array(pad_width, dtype=dtype), mode) + else: + with pytest.raises(TypeError, match=match): + np.pad(arr, pad_width, mode) + with pytest.raises(TypeError, match=match): + np.pad(arr, np.array(pad_width), mode) + + def test_pad_width_as_ndarray(self): + a = np.arange(12) + a = np.reshape(a, (4, 3)) + a = np.pad(a, np.array(((2, 3), (3, 2))), 'edge') + b = np.array( + [[0, 0, 0, 0, 1, 2, 2, 2], + [0, 0, 0, 0, 1, 2, 2, 2], + + [0, 0, 0, 0, 1, 2, 2, 2], + [3, 3, 3, 3, 4, 5, 5, 5], + [6, 6, 6, 6, 7, 8, 8, 8], + [9, 9, 9, 9, 10, 11, 11, 11], + + [9, 9, 9, 9, 10, 11, 11, 11], + [9, 9, 9, 9, 10, 11, 11, 11], + [9, 9, 9, 9, 10, 11, 11, 11]] + ) + assert_array_equal(a, b) + + @pytest.mark.parametrize("pad_width", [0, (0, 0), ((0, 0), (0, 0))]) + @pytest.mark.parametrize("mode", _all_modes.keys()) + def test_zero_pad_width(self, pad_width, mode): + arr = np.arange(30).reshape(6, 5) + assert_array_equal(arr, np.pad(arr, pad_width, mode=mode)) + + +@pytest.mark.parametrize("mode", _all_modes.keys()) +def test_kwargs(mode): + """Test behavior of pad's kwargs for the given mode.""" + allowed = _all_modes[mode] + not_allowed = {} + for kwargs in _all_modes.values(): + if kwargs != allowed: + not_allowed.update(kwargs) + # Test if allowed keyword arguments pass + np.pad([1, 2, 3], 1, mode, **allowed) + # Test if prohibited keyword arguments of other modes raise an error + for key, value in not_allowed.items(): + match = f"unsupported keyword arguments for mode '{mode}'" + with pytest.raises(ValueError, match=match): + np.pad([1, 2, 3], 1, mode, **{key: value}) + + +def test_constant_zero_default(): + arr = np.array([1, 1]) + assert_array_equal(np.pad(arr, 2), [0, 0, 1, 1, 0, 0]) + + +@pytest.mark.parametrize("mode", [1, "const", object(), None, True, False]) +def test_unsupported_mode(mode): + match = f"mode '{mode}' is not supported" + with pytest.raises(ValueError, match=match): + np.pad([1, 2, 3], 4, mode=mode) + + +@pytest.mark.parametrize("mode", _all_modes.keys()) +def test_non_contiguous_array(mode): + arr = np.arange(24).reshape(4, 6)[::2, ::2] + result = np.pad(arr, (2, 3), mode) + assert result.shape == (7, 8) + assert_equal(result[2:-3, 2:-3], arr) + + +@pytest.mark.parametrize("mode", _all_modes.keys()) +def test_memory_layout_persistence(mode): + """Test if C and F order is preserved for all pad modes.""" + x = np.ones((5, 10), order='C') + assert np.pad(x, 5, mode).flags["C_CONTIGUOUS"] + x = np.ones((5, 10), order='F') + assert np.pad(x, 5, mode).flags["F_CONTIGUOUS"] + + +@pytest.mark.parametrize("dtype", _numeric_dtypes) +@pytest.mark.parametrize("mode", _all_modes.keys()) +def test_dtype_persistence(dtype, mode): + arr = np.zeros((3, 2, 1), dtype=dtype) + result = np.pad(arr, 1, mode=mode) + assert result.dtype == dtype diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_arraysetops.py b/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_arraysetops.py new file mode 100644 index 0000000..7865e1b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_arraysetops.py @@ -0,0 +1,1074 @@ +"""Test functions for 1D array set operations. + +""" +import pytest + +import numpy as np +from numpy import ediff1d, intersect1d, isin, setdiff1d, setxor1d, union1d, unique +from numpy.exceptions import AxisError +from numpy.testing import ( + assert_array_equal, + assert_equal, + assert_raises, + assert_raises_regex, +) + + +class TestSetOps: + + def test_intersect1d(self): + # unique inputs + a = np.array([5, 7, 1, 2]) + b = np.array([2, 4, 3, 1, 5]) + + ec = np.array([1, 2, 5]) + c = intersect1d(a, b, assume_unique=True) + assert_array_equal(c, ec) + + # non-unique inputs + a = np.array([5, 5, 7, 1, 2]) + b = np.array([2, 1, 4, 3, 3, 1, 5]) + + ed = np.array([1, 2, 5]) + c = intersect1d(a, b) + assert_array_equal(c, ed) + assert_array_equal([], intersect1d([], [])) + + def test_intersect1d_array_like(self): + # See gh-11772 + class Test: + def __array__(self, dtype=None, copy=None): + return np.arange(3) + + a = Test() + res = intersect1d(a, a) + assert_array_equal(res, a) + res = intersect1d([1, 2, 3], [1, 2, 3]) + assert_array_equal(res, [1, 2, 3]) + + def test_intersect1d_indices(self): + # unique inputs + a = np.array([1, 2, 3, 4]) + b = np.array([2, 1, 4, 6]) + c, i1, i2 = intersect1d(a, b, assume_unique=True, return_indices=True) + ee = np.array([1, 2, 4]) + assert_array_equal(c, ee) + assert_array_equal(a[i1], ee) + assert_array_equal(b[i2], ee) + + # non-unique inputs + a = np.array([1, 2, 2, 3, 4, 3, 2]) + b = np.array([1, 8, 4, 2, 2, 3, 2, 3]) + c, i1, i2 = intersect1d(a, b, return_indices=True) + ef = np.array([1, 2, 3, 4]) + assert_array_equal(c, ef) + assert_array_equal(a[i1], ef) + assert_array_equal(b[i2], ef) + + # non1d, unique inputs + a = np.array([[2, 4, 5, 6], [7, 8, 1, 15]]) + b = np.array([[3, 2, 7, 6], [10, 12, 8, 9]]) + c, i1, i2 = intersect1d(a, b, assume_unique=True, return_indices=True) + ui1 = np.unravel_index(i1, a.shape) + ui2 = np.unravel_index(i2, b.shape) + ea = np.array([2, 6, 7, 8]) + assert_array_equal(ea, a[ui1]) + assert_array_equal(ea, b[ui2]) + + # non1d, not assumed to be uniqueinputs + a = np.array([[2, 4, 5, 6, 6], [4, 7, 8, 7, 2]]) + b = np.array([[3, 2, 7, 7], [10, 12, 8, 7]]) + c, i1, i2 = intersect1d(a, b, return_indices=True) + ui1 = np.unravel_index(i1, a.shape) + ui2 = np.unravel_index(i2, b.shape) + ea = np.array([2, 7, 8]) + assert_array_equal(ea, a[ui1]) + assert_array_equal(ea, b[ui2]) + + def test_setxor1d(self): + a = np.array([5, 7, 1, 2]) + b = np.array([2, 4, 3, 1, 5]) + + ec = np.array([3, 4, 7]) + c = setxor1d(a, b) + assert_array_equal(c, ec) + + a = np.array([1, 2, 3]) + b = np.array([6, 5, 4]) + + ec = np.array([1, 2, 3, 4, 5, 6]) + c = setxor1d(a, b) + assert_array_equal(c, ec) + + a = np.array([1, 8, 2, 3]) + b = np.array([6, 5, 4, 8]) + + ec = np.array([1, 2, 3, 4, 5, 6]) + c = setxor1d(a, b) + assert_array_equal(c, ec) + + assert_array_equal([], setxor1d([], [])) + + def test_setxor1d_unique(self): + a = np.array([1, 8, 2, 3]) + b = np.array([6, 5, 4, 8]) + + ec = np.array([1, 2, 3, 4, 5, 6]) + c = setxor1d(a, b, assume_unique=True) + assert_array_equal(c, ec) + + a = np.array([[1], [8], [2], [3]]) + b = np.array([[6, 5], [4, 8]]) + + ec = np.array([1, 2, 3, 4, 5, 6]) + c = setxor1d(a, b, assume_unique=True) + assert_array_equal(c, ec) + + def test_ediff1d(self): + zero_elem = np.array([]) + one_elem = np.array([1]) + two_elem = np.array([1, 2]) + + assert_array_equal([], ediff1d(zero_elem)) + assert_array_equal([0], ediff1d(zero_elem, to_begin=0)) + assert_array_equal([0], ediff1d(zero_elem, to_end=0)) + assert_array_equal([-1, 0], ediff1d(zero_elem, to_begin=-1, to_end=0)) + assert_array_equal([], ediff1d(one_elem)) + assert_array_equal([1], ediff1d(two_elem)) + assert_array_equal([7, 1, 9], ediff1d(two_elem, to_begin=7, to_end=9)) + assert_array_equal([5, 6, 1, 7, 8], + ediff1d(two_elem, to_begin=[5, 6], to_end=[7, 8])) + assert_array_equal([1, 9], ediff1d(two_elem, to_end=9)) + assert_array_equal([1, 7, 8], ediff1d(two_elem, to_end=[7, 8])) + assert_array_equal([7, 1], ediff1d(two_elem, to_begin=7)) + assert_array_equal([5, 6, 1], ediff1d(two_elem, to_begin=[5, 6])) + + @pytest.mark.parametrize("ary, prepend, append, expected", [ + # should fail because trying to cast + # np.nan standard floating point value + # into an integer array: + (np.array([1, 2, 3], dtype=np.int64), + None, + np.nan, + 'to_end'), + # should fail because attempting + # to downcast to int type: + (np.array([1, 2, 3], dtype=np.int64), + np.array([5, 7, 2], dtype=np.float32), + None, + 'to_begin'), + # should fail because attempting to cast + # two special floating point values + # to integers (on both sides of ary), + # `to_begin` is in the error message as the impl checks this first: + (np.array([1., 3., 9.], dtype=np.int8), + np.nan, + np.nan, + 'to_begin'), + ]) + def test_ediff1d_forbidden_type_casts(self, ary, prepend, append, expected): + # verify resolution of gh-11490 + + # specifically, raise an appropriate + # Exception when attempting to append or + # prepend with an incompatible type + msg = f'dtype of `{expected}` must be compatible' + with assert_raises_regex(TypeError, msg): + ediff1d(ary=ary, + to_end=append, + to_begin=prepend) + + @pytest.mark.parametrize( + "ary,prepend,append,expected", + [ + (np.array([1, 2, 3], dtype=np.int16), + 2**16, # will be cast to int16 under same kind rule. + 2**16 + 4, + np.array([0, 1, 1, 4], dtype=np.int16)), + (np.array([1, 2, 3], dtype=np.float32), + np.array([5], dtype=np.float64), + None, + np.array([5, 1, 1], dtype=np.float32)), + (np.array([1, 2, 3], dtype=np.int32), + 0, + 0, + np.array([0, 1, 1, 0], dtype=np.int32)), + (np.array([1, 2, 3], dtype=np.int64), + 3, + -9, + np.array([3, 1, 1, -9], dtype=np.int64)), + ] + ) + def test_ediff1d_scalar_handling(self, + ary, + prepend, + append, + expected): + # maintain backwards-compatibility + # of scalar prepend / append behavior + # in ediff1d following fix for gh-11490 + actual = np.ediff1d(ary=ary, + to_end=append, + to_begin=prepend) + assert_equal(actual, expected) + assert actual.dtype == expected.dtype + + @pytest.mark.parametrize("kind", [None, "sort", "table"]) + def test_isin(self, kind): + def _isin_slow(a, b): + b = np.asarray(b).flatten().tolist() + return a in b + isin_slow = np.vectorize(_isin_slow, otypes=[bool], excluded={1}) + + def assert_isin_equal(a, b): + x = isin(a, b, kind=kind) + y = isin_slow(a, b) + assert_array_equal(x, y) + + # multidimensional arrays in both arguments + a = np.arange(24).reshape([2, 3, 4]) + b = np.array([[10, 20, 30], [0, 1, 3], [11, 22, 33]]) + assert_isin_equal(a, b) + + # array-likes as both arguments + c = [(9, 8), (7, 6)] + d = (9, 7) + assert_isin_equal(c, d) + + # zero-d array: + f = np.array(3) + assert_isin_equal(f, b) + assert_isin_equal(a, f) + assert_isin_equal(f, f) + + # scalar: + assert_isin_equal(5, b) + assert_isin_equal(a, 6) + assert_isin_equal(5, 6) + + # empty array-like: + if kind != "table": + # An empty list will become float64, + # which is invalid for kind="table" + x = [] + assert_isin_equal(x, b) + assert_isin_equal(a, x) + assert_isin_equal(x, x) + + # empty array with various types: + for dtype in [bool, np.int64, np.float64]: + if kind == "table" and dtype == np.float64: + continue + + if dtype in {np.int64, np.float64}: + ar = np.array([10, 20, 30], dtype=dtype) + elif dtype in {bool}: + ar = np.array([True, False, False]) + + empty_array = np.array([], dtype=dtype) + + assert_isin_equal(empty_array, ar) + assert_isin_equal(ar, empty_array) + assert_isin_equal(empty_array, empty_array) + + @pytest.mark.parametrize("kind", [None, "sort", "table"]) + def test_isin_additional(self, kind): + # we use two different sizes for the b array here to test the + # two different paths in isin(). + for mult in (1, 10): + # One check without np.array to make sure lists are handled correct + a = [5, 7, 1, 2] + b = [2, 4, 3, 1, 5] * mult + ec = np.array([True, False, True, True]) + c = isin(a, b, assume_unique=True, kind=kind) + assert_array_equal(c, ec) + + a[0] = 8 + ec = np.array([False, False, True, True]) + c = isin(a, b, assume_unique=True, kind=kind) + assert_array_equal(c, ec) + + a[0], a[3] = 4, 8 + ec = np.array([True, False, True, False]) + c = isin(a, b, assume_unique=True, kind=kind) + assert_array_equal(c, ec) + + a = np.array([5, 4, 5, 3, 4, 4, 3, 4, 3, 5, 2, 1, 5, 5]) + b = [2, 3, 4] * mult + ec = [False, True, False, True, True, True, True, True, True, + False, True, False, False, False] + c = isin(a, b, kind=kind) + assert_array_equal(c, ec) + + b = b + [5, 5, 4] * mult + ec = [True, True, True, True, True, True, True, True, True, True, + True, False, True, True] + c = isin(a, b, kind=kind) + assert_array_equal(c, ec) + + a = np.array([5, 7, 1, 2]) + b = np.array([2, 4, 3, 1, 5] * mult) + ec = np.array([True, False, True, True]) + c = isin(a, b, kind=kind) + assert_array_equal(c, ec) + + a = np.array([5, 7, 1, 1, 2]) + b = np.array([2, 4, 3, 3, 1, 5] * mult) + ec = np.array([True, False, True, True, True]) + c = isin(a, b, kind=kind) + assert_array_equal(c, ec) + + a = np.array([5, 5]) + b = np.array([2, 2] * mult) + ec = np.array([False, False]) + c = isin(a, b, kind=kind) + assert_array_equal(c, ec) + + a = np.array([5]) + b = np.array([2]) + ec = np.array([False]) + c = isin(a, b, kind=kind) + assert_array_equal(c, ec) + + if kind in {None, "sort"}: + assert_array_equal(isin([], [], kind=kind), []) + + def test_isin_char_array(self): + a = np.array(['a', 'b', 'c', 'd', 'e', 'c', 'e', 'b']) + b = np.array(['a', 'c']) + + ec = np.array([True, False, True, False, False, True, False, False]) + c = isin(a, b) + + assert_array_equal(c, ec) + + @pytest.mark.parametrize("kind", [None, "sort", "table"]) + def test_isin_invert(self, kind): + "Test isin's invert parameter" + # We use two different sizes for the b array here to test the + # two different paths in isin(). + for mult in (1, 10): + a = np.array([5, 4, 5, 3, 4, 4, 3, 4, 3, 5, 2, 1, 5, 5]) + b = [2, 3, 4] * mult + assert_array_equal(np.invert(isin(a, b, kind=kind)), + isin(a, b, invert=True, kind=kind)) + + # float: + if kind in {None, "sort"}: + for mult in (1, 10): + a = np.array([5, 4, 5, 3, 4, 4, 3, 4, 3, 5, 2, 1, 5, 5], + dtype=np.float32) + b = [2, 3, 4] * mult + b = np.array(b, dtype=np.float32) + assert_array_equal(np.invert(isin(a, b, kind=kind)), + isin(a, b, invert=True, kind=kind)) + + def test_isin_hit_alternate_algorithm(self): + """Hit the standard isin code with integers""" + # Need extreme range to hit standard code + # This hits it without the use of kind='table' + a = np.array([5, 4, 5, 3, 4, 4, 1e9], dtype=np.int64) + b = np.array([2, 3, 4, 1e9], dtype=np.int64) + expected = np.array([0, 1, 0, 1, 1, 1, 1], dtype=bool) + assert_array_equal(expected, isin(a, b)) + assert_array_equal(np.invert(expected), isin(a, b, invert=True)) + + a = np.array([5, 7, 1, 2], dtype=np.int64) + b = np.array([2, 4, 3, 1, 5, 1e9], dtype=np.int64) + ec = np.array([True, False, True, True]) + c = isin(a, b, assume_unique=True) + assert_array_equal(c, ec) + + @pytest.mark.parametrize("kind", [None, "sort", "table"]) + def test_isin_boolean(self, kind): + """Test that isin works for boolean input""" + a = np.array([True, False]) + b = np.array([False, False, False]) + expected = np.array([False, True]) + assert_array_equal(expected, + isin(a, b, kind=kind)) + assert_array_equal(np.invert(expected), + isin(a, b, invert=True, kind=kind)) + + @pytest.mark.parametrize("kind", [None, "sort"]) + def test_isin_timedelta(self, kind): + """Test that isin works for timedelta input""" + rstate = np.random.RandomState(0) + a = rstate.randint(0, 100, size=10) + b = rstate.randint(0, 100, size=10) + truth = isin(a, b) + a_timedelta = a.astype("timedelta64[s]") + b_timedelta = b.astype("timedelta64[s]") + assert_array_equal(truth, isin(a_timedelta, b_timedelta, kind=kind)) + + def test_isin_table_timedelta_fails(self): + a = np.array([0, 1, 2], dtype="timedelta64[s]") + b = a + # Make sure it raises a value error: + with pytest.raises(ValueError): + isin(a, b, kind="table") + + @pytest.mark.parametrize( + "dtype1,dtype2", + [ + (np.int8, np.int16), + (np.int16, np.int8), + (np.uint8, np.uint16), + (np.uint16, np.uint8), + (np.uint8, np.int16), + (np.int16, np.uint8), + (np.uint64, np.int64), + ] + ) + @pytest.mark.parametrize("kind", [None, "sort", "table"]) + def test_isin_mixed_dtype(self, dtype1, dtype2, kind): + """Test that isin works as expected for mixed dtype input.""" + is_dtype2_signed = np.issubdtype(dtype2, np.signedinteger) + ar1 = np.array([0, 0, 1, 1], dtype=dtype1) + + if is_dtype2_signed: + ar2 = np.array([-128, 0, 127], dtype=dtype2) + else: + ar2 = np.array([127, 0, 255], dtype=dtype2) + + expected = np.array([True, True, False, False]) + + expect_failure = kind == "table" and ( + dtype1 == np.int16 and dtype2 == np.int8) + + if expect_failure: + with pytest.raises(RuntimeError, match="exceed the maximum"): + isin(ar1, ar2, kind=kind) + else: + assert_array_equal(isin(ar1, ar2, kind=kind), expected) + + @pytest.mark.parametrize("data", [ + np.array([2**63, 2**63 + 1], dtype=np.uint64), + np.array([-2**62, -2**62 - 1], dtype=np.int64), + ]) + @pytest.mark.parametrize("kind", [None, "sort", "table"]) + def test_isin_mixed_huge_vals(self, kind, data): + """Test values outside intp range (negative ones if 32bit system)""" + query = data[1] + res = np.isin(data, query, kind=kind) + assert_array_equal(res, [False, True]) + # Also check that nothing weird happens for values can't possibly + # in range. + data = data.astype(np.int32) # clearly different values + res = np.isin(data, query, kind=kind) + assert_array_equal(res, [False, False]) + + @pytest.mark.parametrize("kind", [None, "sort", "table"]) + def test_isin_mixed_boolean(self, kind): + """Test that isin works as expected for bool/int input.""" + for dtype in np.typecodes["AllInteger"]: + a = np.array([True, False, False], dtype=bool) + b = np.array([0, 0, 0, 0], dtype=dtype) + expected = np.array([False, True, True], dtype=bool) + assert_array_equal(isin(a, b, kind=kind), expected) + + a, b = b, a + expected = np.array([True, True, True, True], dtype=bool) + assert_array_equal(isin(a, b, kind=kind), expected) + + def test_isin_first_array_is_object(self): + ar1 = [None] + ar2 = np.array([1] * 10) + expected = np.array([False]) + result = np.isin(ar1, ar2) + assert_array_equal(result, expected) + + def test_isin_second_array_is_object(self): + ar1 = 1 + ar2 = np.array([None] * 10) + expected = np.array([False]) + result = np.isin(ar1, ar2) + assert_array_equal(result, expected) + + def test_isin_both_arrays_are_object(self): + ar1 = [None] + ar2 = np.array([None] * 10) + expected = np.array([True]) + result = np.isin(ar1, ar2) + assert_array_equal(result, expected) + + def test_isin_both_arrays_have_structured_dtype(self): + # Test arrays of a structured data type containing an integer field + # and a field of dtype `object` allowing for arbitrary Python objects + dt = np.dtype([('field1', int), ('field2', object)]) + ar1 = np.array([(1, None)], dtype=dt) + ar2 = np.array([(1, None)] * 10, dtype=dt) + expected = np.array([True]) + result = np.isin(ar1, ar2) + assert_array_equal(result, expected) + + def test_isin_with_arrays_containing_tuples(self): + ar1 = np.array([(1,), 2], dtype=object) + ar2 = np.array([(1,), 2], dtype=object) + expected = np.array([True, True]) + result = np.isin(ar1, ar2) + assert_array_equal(result, expected) + result = np.isin(ar1, ar2, invert=True) + assert_array_equal(result, np.invert(expected)) + + # An integer is added at the end of the array to make sure + # that the array builder will create the array with tuples + # and after it's created the integer is removed. + # There's a bug in the array constructor that doesn't handle + # tuples properly and adding the integer fixes that. + ar1 = np.array([(1,), (2, 1), 1], dtype=object) + ar1 = ar1[:-1] + ar2 = np.array([(1,), (2, 1), 1], dtype=object) + ar2 = ar2[:-1] + expected = np.array([True, True]) + result = np.isin(ar1, ar2) + assert_array_equal(result, expected) + result = np.isin(ar1, ar2, invert=True) + assert_array_equal(result, np.invert(expected)) + + ar1 = np.array([(1,), (2, 3), 1], dtype=object) + ar1 = ar1[:-1] + ar2 = np.array([(1,), 2], dtype=object) + expected = np.array([True, False]) + result = np.isin(ar1, ar2) + assert_array_equal(result, expected) + result = np.isin(ar1, ar2, invert=True) + assert_array_equal(result, np.invert(expected)) + + def test_isin_errors(self): + """Test that isin raises expected errors.""" + + # Error 1: `kind` is not one of 'sort' 'table' or None. + ar1 = np.array([1, 2, 3, 4, 5]) + ar2 = np.array([2, 4, 6, 8, 10]) + assert_raises(ValueError, isin, ar1, ar2, kind='quicksort') + + # Error 2: `kind="table"` does not work for non-integral arrays. + obj_ar1 = np.array([1, 'a', 3, 'b', 5], dtype=object) + obj_ar2 = np.array([1, 'a', 3, 'b', 5], dtype=object) + assert_raises(ValueError, isin, obj_ar1, obj_ar2, kind='table') + + for dtype in [np.int32, np.int64]: + ar1 = np.array([-1, 2, 3, 4, 5], dtype=dtype) + # The range of this array will overflow: + overflow_ar2 = np.array([-1, np.iinfo(dtype).max], dtype=dtype) + + # Error 3: `kind="table"` will trigger a runtime error + # if there is an integer overflow expected when computing the + # range of ar2 + assert_raises( + RuntimeError, + isin, ar1, overflow_ar2, kind='table' + ) + + # Non-error: `kind=None` will *not* trigger a runtime error + # if there is an integer overflow, it will switch to + # the `sort` algorithm. + result = np.isin(ar1, overflow_ar2, kind=None) + assert_array_equal(result, [True] + [False] * 4) + result = np.isin(ar1, overflow_ar2, kind='sort') + assert_array_equal(result, [True] + [False] * 4) + + def test_union1d(self): + a = np.array([5, 4, 7, 1, 2]) + b = np.array([2, 4, 3, 3, 2, 1, 5]) + + ec = np.array([1, 2, 3, 4, 5, 7]) + c = union1d(a, b) + assert_array_equal(c, ec) + + # Tests gh-10340, arguments to union1d should be + # flattened if they are not already 1D + x = np.array([[0, 1, 2], [3, 4, 5]]) + y = np.array([0, 1, 2, 3, 4]) + ez = np.array([0, 1, 2, 3, 4, 5]) + z = union1d(x, y) + assert_array_equal(z, ez) + + assert_array_equal([], union1d([], [])) + + def test_setdiff1d(self): + a = np.array([6, 5, 4, 7, 1, 2, 7, 4]) + b = np.array([2, 4, 3, 3, 2, 1, 5]) + + ec = np.array([6, 7]) + c = setdiff1d(a, b) + assert_array_equal(c, ec) + + a = np.arange(21) + b = np.arange(19) + ec = np.array([19, 20]) + c = setdiff1d(a, b) + assert_array_equal(c, ec) + + assert_array_equal([], setdiff1d([], [])) + a = np.array((), np.uint32) + assert_equal(setdiff1d(a, []).dtype, np.uint32) + + def test_setdiff1d_unique(self): + a = np.array([3, 2, 1]) + b = np.array([7, 5, 2]) + expected = np.array([3, 1]) + actual = setdiff1d(a, b, assume_unique=True) + assert_equal(actual, expected) + + def test_setdiff1d_char_array(self): + a = np.array(['a', 'b', 'c']) + b = np.array(['a', 'b', 's']) + assert_array_equal(setdiff1d(a, b), np.array(['c'])) + + def test_manyways(self): + a = np.array([5, 7, 1, 2, 8]) + b = np.array([9, 8, 2, 4, 3, 1, 5]) + + c1 = setxor1d(a, b) + aux1 = intersect1d(a, b) + aux2 = union1d(a, b) + c2 = setdiff1d(aux2, aux1) + assert_array_equal(c1, c2) + + +class TestUnique: + + def check_all(self, a, b, i1, i2, c, dt): + base_msg = 'check {0} failed for type {1}' + + msg = base_msg.format('values', dt) + v = unique(a) + assert_array_equal(v, b, msg) + assert type(v) == type(b) + + msg = base_msg.format('return_index', dt) + v, j = unique(a, True, False, False) + assert_array_equal(v, b, msg) + assert_array_equal(j, i1, msg) + assert type(v) == type(b) + + msg = base_msg.format('return_inverse', dt) + v, j = unique(a, False, True, False) + assert_array_equal(v, b, msg) + assert_array_equal(j, i2, msg) + assert type(v) == type(b) + + msg = base_msg.format('return_counts', dt) + v, j = unique(a, False, False, True) + assert_array_equal(v, b, msg) + assert_array_equal(j, c, msg) + assert type(v) == type(b) + + msg = base_msg.format('return_index and return_inverse', dt) + v, j1, j2 = unique(a, True, True, False) + assert_array_equal(v, b, msg) + assert_array_equal(j1, i1, msg) + assert_array_equal(j2, i2, msg) + assert type(v) == type(b) + + msg = base_msg.format('return_index and return_counts', dt) + v, j1, j2 = unique(a, True, False, True) + assert_array_equal(v, b, msg) + assert_array_equal(j1, i1, msg) + assert_array_equal(j2, c, msg) + assert type(v) == type(b) + + msg = base_msg.format('return_inverse and return_counts', dt) + v, j1, j2 = unique(a, False, True, True) + assert_array_equal(v, b, msg) + assert_array_equal(j1, i2, msg) + assert_array_equal(j2, c, msg) + assert type(v) == type(b) + + msg = base_msg.format(('return_index, return_inverse ' + 'and return_counts'), dt) + v, j1, j2, j3 = unique(a, True, True, True) + assert_array_equal(v, b, msg) + assert_array_equal(j1, i1, msg) + assert_array_equal(j2, i2, msg) + assert_array_equal(j3, c, msg) + assert type(v) == type(b) + + def get_types(self): + types = [] + types.extend(np.typecodes['AllInteger']) + types.extend(np.typecodes['AllFloat']) + types.append('datetime64[D]') + types.append('timedelta64[D]') + return types + + def test_unique_1d(self): + + a = [5, 7, 1, 2, 1, 5, 7] * 10 + b = [1, 2, 5, 7] + i1 = [2, 3, 0, 1] + i2 = [2, 3, 0, 1, 0, 2, 3] * 10 + c = np.multiply([2, 1, 2, 2], 10) + + # test for numeric arrays + types = self.get_types() + for dt in types: + aa = np.array(a, dt) + bb = np.array(b, dt) + self.check_all(aa, bb, i1, i2, c, dt) + + # test for object arrays + dt = 'O' + aa = np.empty(len(a), dt) + aa[:] = a + bb = np.empty(len(b), dt) + bb[:] = b + self.check_all(aa, bb, i1, i2, c, dt) + + # test for structured arrays + dt = [('', 'i'), ('', 'i')] + aa = np.array(list(zip(a, a)), dt) + bb = np.array(list(zip(b, b)), dt) + self.check_all(aa, bb, i1, i2, c, dt) + + # test for ticket #2799 + aa = [1. + 0.j, 1 - 1.j, 1] + assert_array_equal(np.unique(aa), [1. - 1.j, 1. + 0.j]) + + # test for ticket #4785 + a = [(1, 2), (1, 2), (2, 3)] + unq = [1, 2, 3] + inv = [[0, 1], [0, 1], [1, 2]] + a1 = unique(a) + assert_array_equal(a1, unq) + a2, a2_inv = unique(a, return_inverse=True) + assert_array_equal(a2, unq) + assert_array_equal(a2_inv, inv) + + # test for chararrays with return_inverse (gh-5099) + a = np.char.chararray(5) + a[...] = '' + a2, a2_inv = np.unique(a, return_inverse=True) + assert_array_equal(a2_inv, np.zeros(5)) + + # test for ticket #9137 + a = [] + a1_idx = np.unique(a, return_index=True)[1] + a2_inv = np.unique(a, return_inverse=True)[1] + a3_idx, a3_inv = np.unique(a, return_index=True, + return_inverse=True)[1:] + assert_equal(a1_idx.dtype, np.intp) + assert_equal(a2_inv.dtype, np.intp) + assert_equal(a3_idx.dtype, np.intp) + assert_equal(a3_inv.dtype, np.intp) + + # test for ticket 2111 - float + a = [2.0, np.nan, 1.0, np.nan] + ua = [1.0, 2.0, np.nan] + ua_idx = [2, 0, 1] + ua_inv = [1, 2, 0, 2] + ua_cnt = [1, 1, 2] + assert_equal(np.unique(a), ua) + assert_equal(np.unique(a, return_index=True), (ua, ua_idx)) + assert_equal(np.unique(a, return_inverse=True), (ua, ua_inv)) + assert_equal(np.unique(a, return_counts=True), (ua, ua_cnt)) + + # test for ticket 2111 - complex + a = [2.0 - 1j, np.nan, 1.0 + 1j, complex(0.0, np.nan), complex(1.0, np.nan)] + ua = [1.0 + 1j, 2.0 - 1j, complex(0.0, np.nan)] + ua_idx = [2, 0, 3] + ua_inv = [1, 2, 0, 2, 2] + ua_cnt = [1, 1, 3] + assert_equal(np.unique(a), ua) + assert_equal(np.unique(a, return_index=True), (ua, ua_idx)) + assert_equal(np.unique(a, return_inverse=True), (ua, ua_inv)) + assert_equal(np.unique(a, return_counts=True), (ua, ua_cnt)) + + # test for ticket 2111 - datetime64 + nat = np.datetime64('nat') + a = [np.datetime64('2020-12-26'), nat, np.datetime64('2020-12-24'), nat] + ua = [np.datetime64('2020-12-24'), np.datetime64('2020-12-26'), nat] + ua_idx = [2, 0, 1] + ua_inv = [1, 2, 0, 2] + ua_cnt = [1, 1, 2] + assert_equal(np.unique(a), ua) + assert_equal(np.unique(a, return_index=True), (ua, ua_idx)) + assert_equal(np.unique(a, return_inverse=True), (ua, ua_inv)) + assert_equal(np.unique(a, return_counts=True), (ua, ua_cnt)) + + # test for ticket 2111 - timedelta + nat = np.timedelta64('nat') + a = [np.timedelta64(1, 'D'), nat, np.timedelta64(1, 'h'), nat] + ua = [np.timedelta64(1, 'h'), np.timedelta64(1, 'D'), nat] + ua_idx = [2, 0, 1] + ua_inv = [1, 2, 0, 2] + ua_cnt = [1, 1, 2] + assert_equal(np.unique(a), ua) + assert_equal(np.unique(a, return_index=True), (ua, ua_idx)) + assert_equal(np.unique(a, return_inverse=True), (ua, ua_inv)) + assert_equal(np.unique(a, return_counts=True), (ua, ua_cnt)) + + # test for gh-19300 + all_nans = [np.nan] * 4 + ua = [np.nan] + ua_idx = [0] + ua_inv = [0, 0, 0, 0] + ua_cnt = [4] + assert_equal(np.unique(all_nans), ua) + assert_equal(np.unique(all_nans, return_index=True), (ua, ua_idx)) + assert_equal(np.unique(all_nans, return_inverse=True), (ua, ua_inv)) + assert_equal(np.unique(all_nans, return_counts=True), (ua, ua_cnt)) + + def test_unique_zero_sized(self): + # test for zero-sized arrays + for dt in self.get_types(): + a = np.array([], dt) + b = np.array([], dt) + i1 = np.array([], np.int64) + i2 = np.array([], np.int64) + c = np.array([], np.int64) + self.check_all(a, b, i1, i2, c, dt) + + def test_unique_subclass(self): + class Subclass(np.ndarray): + pass + + i1 = [2, 3, 0, 1] + i2 = [2, 3, 0, 1, 0, 2, 3] * 10 + c = np.multiply([2, 1, 2, 2], 10) + + # test for numeric arrays + types = self.get_types() + for dt in types: + a = np.array([5, 7, 1, 2, 1, 5, 7] * 10, dtype=dt) + b = np.array([1, 2, 5, 7], dtype=dt) + aa = Subclass(a.shape, dtype=dt, buffer=a) + bb = Subclass(b.shape, dtype=dt, buffer=b) + self.check_all(aa, bb, i1, i2, c, dt) + + @pytest.mark.parametrize("arg", ["return_index", "return_inverse", "return_counts"]) + def test_unsupported_hash_based(self, arg): + """These currently never use the hash-based solution. However, + it seems easier to just allow it. + + When the hash-based solution is added, this test should fail and be + replaced with something more comprehensive. + """ + a = np.array([1, 5, 2, 3, 4, 8, 199, 1, 3, 5]) + + res_not_sorted = np.unique([1, 1], sorted=False, **{arg: True}) + res_sorted = np.unique([1, 1], sorted=True, **{arg: True}) + # The following should fail without first sorting `res_not_sorted`. + for arr, expected in zip(res_not_sorted, res_sorted): + assert_array_equal(arr, expected) + + def test_unique_axis_errors(self): + assert_raises(TypeError, self._run_axis_tests, object) + assert_raises(TypeError, self._run_axis_tests, + [('a', int), ('b', object)]) + + assert_raises(AxisError, unique, np.arange(10), axis=2) + assert_raises(AxisError, unique, np.arange(10), axis=-2) + + def test_unique_axis_list(self): + msg = "Unique failed on list of lists" + inp = [[0, 1, 0], [0, 1, 0]] + inp_arr = np.asarray(inp) + assert_array_equal(unique(inp, axis=0), unique(inp_arr, axis=0), msg) + assert_array_equal(unique(inp, axis=1), unique(inp_arr, axis=1), msg) + + def test_unique_axis(self): + types = [] + types.extend(np.typecodes['AllInteger']) + types.extend(np.typecodes['AllFloat']) + types.append('datetime64[D]') + types.append('timedelta64[D]') + types.append([('a', int), ('b', int)]) + types.append([('a', int), ('b', float)]) + + for dtype in types: + self._run_axis_tests(dtype) + + msg = 'Non-bitwise-equal booleans test failed' + data = np.arange(10, dtype=np.uint8).reshape(-1, 2).view(bool) + result = np.array([[False, True], [True, True]], dtype=bool) + assert_array_equal(unique(data, axis=0), result, msg) + + msg = 'Negative zero equality test failed' + data = np.array([[-0.0, 0.0], [0.0, -0.0], [-0.0, 0.0], [0.0, -0.0]]) + result = np.array([[-0.0, 0.0]]) + assert_array_equal(unique(data, axis=0), result, msg) + + @pytest.mark.parametrize("axis", [0, -1]) + def test_unique_1d_with_axis(self, axis): + x = np.array([4, 3, 2, 3, 2, 1, 2, 2]) + uniq = unique(x, axis=axis) + assert_array_equal(uniq, [1, 2, 3, 4]) + + @pytest.mark.parametrize("axis", [None, 0, -1]) + def test_unique_inverse_with_axis(self, axis): + x = np.array([[4, 4, 3], [2, 2, 1], [2, 2, 1], [4, 4, 3]]) + uniq, inv = unique(x, return_inverse=True, axis=axis) + assert_equal(inv.ndim, x.ndim if axis is None else 1) + assert_array_equal(x, np.take(uniq, inv, axis=axis)) + + def test_unique_axis_zeros(self): + # issue 15559 + single_zero = np.empty(shape=(2, 0), dtype=np.int8) + uniq, idx, inv, cnt = unique(single_zero, axis=0, return_index=True, + return_inverse=True, return_counts=True) + + # there's 1 element of shape (0,) along axis 0 + assert_equal(uniq.dtype, single_zero.dtype) + assert_array_equal(uniq, np.empty(shape=(1, 0))) + assert_array_equal(idx, np.array([0])) + assert_array_equal(inv, np.array([0, 0])) + assert_array_equal(cnt, np.array([2])) + + # there's 0 elements of shape (2,) along axis 1 + uniq, idx, inv, cnt = unique(single_zero, axis=1, return_index=True, + return_inverse=True, return_counts=True) + + assert_equal(uniq.dtype, single_zero.dtype) + assert_array_equal(uniq, np.empty(shape=(2, 0))) + assert_array_equal(idx, np.array([])) + assert_array_equal(inv, np.array([])) + assert_array_equal(cnt, np.array([])) + + # test a "complicated" shape + shape = (0, 2, 0, 3, 0, 4, 0) + multiple_zeros = np.empty(shape=shape) + for axis in range(len(shape)): + expected_shape = list(shape) + if shape[axis] == 0: + expected_shape[axis] = 0 + else: + expected_shape[axis] = 1 + + assert_array_equal(unique(multiple_zeros, axis=axis), + np.empty(shape=expected_shape)) + + def test_unique_masked(self): + # issue 8664 + x = np.array([64, 0, 1, 2, 3, 63, 63, 0, 0, 0, 1, 2, 0, 63, 0], + dtype='uint8') + y = np.ma.masked_equal(x, 0) + + v = np.unique(y) + v2, i, c = np.unique(y, return_index=True, return_counts=True) + + msg = 'Unique returned different results when asked for index' + assert_array_equal(v.data, v2.data, msg) + assert_array_equal(v.mask, v2.mask, msg) + + def test_unique_sort_order_with_axis(self): + # These tests fail if sorting along axis is done by treating subarrays + # as unsigned byte strings. See gh-10495. + fmt = "sort order incorrect for integer type '%s'" + for dt in 'bhilq': + a = np.array([[-1], [0]], dt) + b = np.unique(a, axis=0) + assert_array_equal(a, b, fmt % dt) + + def _run_axis_tests(self, dtype): + data = np.array([[0, 1, 0, 0], + [1, 0, 0, 0], + [0, 1, 0, 0], + [1, 0, 0, 0]]).astype(dtype) + + msg = 'Unique with 1d array and axis=0 failed' + result = np.array([0, 1]) + assert_array_equal(unique(data), result.astype(dtype), msg) + + msg = 'Unique with 2d array and axis=0 failed' + result = np.array([[0, 1, 0, 0], [1, 0, 0, 0]]) + assert_array_equal(unique(data, axis=0), result.astype(dtype), msg) + + msg = 'Unique with 2d array and axis=1 failed' + result = np.array([[0, 0, 1], [0, 1, 0], [0, 0, 1], [0, 1, 0]]) + assert_array_equal(unique(data, axis=1), result.astype(dtype), msg) + + msg = 'Unique with 3d array and axis=2 failed' + data3d = np.array([[[1, 1], + [1, 0]], + [[0, 1], + [0, 0]]]).astype(dtype) + result = np.take(data3d, [1, 0], axis=2) + assert_array_equal(unique(data3d, axis=2), result, msg) + + uniq, idx, inv, cnt = unique(data, axis=0, return_index=True, + return_inverse=True, return_counts=True) + msg = "Unique's return_index=True failed with axis=0" + assert_array_equal(data[idx], uniq, msg) + msg = "Unique's return_inverse=True failed with axis=0" + assert_array_equal(np.take(uniq, inv, axis=0), data) + msg = "Unique's return_counts=True failed with axis=0" + assert_array_equal(cnt, np.array([2, 2]), msg) + + uniq, idx, inv, cnt = unique(data, axis=1, return_index=True, + return_inverse=True, return_counts=True) + msg = "Unique's return_index=True failed with axis=1" + assert_array_equal(data[:, idx], uniq) + msg = "Unique's return_inverse=True failed with axis=1" + assert_array_equal(np.take(uniq, inv, axis=1), data) + msg = "Unique's return_counts=True failed with axis=1" + assert_array_equal(cnt, np.array([2, 1, 1]), msg) + + def test_unique_nanequals(self): + # issue 20326 + a = np.array([1, 1, np.nan, np.nan, np.nan]) + unq = np.unique(a) + not_unq = np.unique(a, equal_nan=False) + assert_array_equal(unq, np.array([1, np.nan])) + assert_array_equal(not_unq, np.array([1, np.nan, np.nan, np.nan])) + + def test_unique_array_api_functions(self): + arr = np.array([np.nan, 1, 4, 1, 3, 4, np.nan, 5, 1]) + + for res_unique_array_api, res_unique in [ + ( + np.unique_values(arr), + np.unique(arr, equal_nan=False) + ), + ( + np.unique_counts(arr), + np.unique(arr, return_counts=True, equal_nan=False) + ), + ( + np.unique_inverse(arr), + np.unique(arr, return_inverse=True, equal_nan=False) + ), + ( + np.unique_all(arr), + np.unique( + arr, + return_index=True, + return_inverse=True, + return_counts=True, + equal_nan=False + ) + ) + ]: + assert len(res_unique_array_api) == len(res_unique) + for actual, expected in zip(res_unique_array_api, res_unique): + assert_array_equal(actual, expected) + + def test_unique_inverse_shape(self): + # Regression test for https://github.com/numpy/numpy/issues/25552 + arr = np.array([[1, 2, 3], [2, 3, 1]]) + expected_values, expected_inverse = np.unique(arr, return_inverse=True) + expected_inverse = expected_inverse.reshape(arr.shape) + for func in np.unique_inverse, np.unique_all: + result = func(arr) + assert_array_equal(expected_values, result.values) + assert_array_equal(expected_inverse, result.inverse_indices) + assert_array_equal(arr, result.values[result.inverse_indices]) + + @pytest.mark.parametrize( + 'data', + [[[1, 1, 1], + [1, 1, 1]], + [1, 3, 2], + 1], + ) + @pytest.mark.parametrize('transpose', [False, True]) + @pytest.mark.parametrize('dtype', [np.int32, np.float64]) + def test_unique_with_matrix(self, data, transpose, dtype): + mat = np.matrix(data).astype(dtype) + if transpose: + mat = mat.T + u = np.unique(mat) + expected = np.unique(np.asarray(mat)) + assert_array_equal(u, expected, strict=True) diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_arrayterator.py b/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_arrayterator.py new file mode 100644 index 0000000..800c9a2 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_arrayterator.py @@ -0,0 +1,46 @@ +from functools import reduce +from operator import mul + +import numpy as np +from numpy.lib import Arrayterator +from numpy.random import randint +from numpy.testing import assert_ + + +def test(): + np.random.seed(np.arange(10)) + + # Create a random array + ndims = randint(5) + 1 + shape = tuple(randint(10) + 1 for dim in range(ndims)) + els = reduce(mul, shape) + a = np.arange(els) + a.shape = shape + + buf_size = randint(2 * els) + b = Arrayterator(a, buf_size) + + # Check that each block has at most ``buf_size`` elements + for block in b: + assert_(len(block.flat) <= (buf_size or els)) + + # Check that all elements are iterated correctly + assert_(list(b.flat) == list(a.flat)) + + # Slice arrayterator + start = [randint(dim) for dim in shape] + stop = [randint(dim) + 1 for dim in shape] + step = [randint(dim) + 1 for dim in shape] + slice_ = tuple(slice(*t) for t in zip(start, stop, step)) + c = b[slice_] + d = a[slice_] + + # Check that each block has at most ``buf_size`` elements + for block in c: + assert_(len(block.flat) <= (buf_size or els)) + + # Check that the arrayterator is sliced correctly + assert_(np.all(c.__array__() == d)) + + # Check that all elements are iterated correctly + assert_(list(c.flat) == list(d.flat)) diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_format.py b/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_format.py new file mode 100644 index 0000000..d805d34 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_format.py @@ -0,0 +1,1054 @@ +# doctest +r''' Test the .npy file format. + +Set up: + + >>> import sys + >>> from io import BytesIO + >>> from numpy.lib import format + >>> + >>> scalars = [ + ... np.uint8, + ... np.int8, + ... np.uint16, + ... np.int16, + ... np.uint32, + ... np.int32, + ... np.uint64, + ... np.int64, + ... np.float32, + ... np.float64, + ... np.complex64, + ... np.complex128, + ... object, + ... ] + >>> + >>> basic_arrays = [] + >>> + >>> for scalar in scalars: + ... for endian in '<>': + ... dtype = np.dtype(scalar).newbyteorder(endian) + ... basic = np.arange(15).astype(dtype) + ... basic_arrays.extend([ + ... np.array([], dtype=dtype), + ... np.array(10, dtype=dtype), + ... basic, + ... basic.reshape((3,5)), + ... basic.reshape((3,5)).T, + ... basic.reshape((3,5))[::-1,::2], + ... ]) + ... + >>> + >>> Pdescr = [ + ... ('x', 'i4', (2,)), + ... ('y', 'f8', (2, 2)), + ... ('z', 'u1')] + >>> + >>> + >>> PbufferT = [ + ... ([3,2], [[6.,4.],[6.,4.]], 8), + ... ([4,3], [[7.,5.],[7.,5.]], 9), + ... ] + >>> + >>> + >>> Ndescr = [ + ... ('x', 'i4', (2,)), + ... ('Info', [ + ... ('value', 'c16'), + ... ('y2', 'f8'), + ... ('Info2', [ + ... ('name', 'S2'), + ... ('value', 'c16', (2,)), + ... ('y3', 'f8', (2,)), + ... ('z3', 'u4', (2,))]), + ... ('name', 'S2'), + ... ('z2', 'b1')]), + ... ('color', 'S2'), + ... ('info', [ + ... ('Name', 'U8'), + ... ('Value', 'c16')]), + ... ('y', 'f8', (2, 2)), + ... ('z', 'u1')] + >>> + >>> + >>> NbufferT = [ + ... ([3,2], (6j, 6., ('nn', [6j,4j], [6.,4.], [1,2]), 'NN', True), 'cc', ('NN', 6j), [[6.,4.],[6.,4.]], 8), + ... ([4,3], (7j, 7., ('oo', [7j,5j], [7.,5.], [2,1]), 'OO', False), 'dd', ('OO', 7j), [[7.,5.],[7.,5.]], 9), + ... ] + >>> + >>> + >>> record_arrays = [ + ... np.array(PbufferT, dtype=np.dtype(Pdescr).newbyteorder('<')), + ... np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('<')), + ... np.array(PbufferT, dtype=np.dtype(Pdescr).newbyteorder('>')), + ... np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('>')), + ... ] + +Test the magic string writing. + + >>> format.magic(1, 0) + '\x93NUMPY\x01\x00' + >>> format.magic(0, 0) + '\x93NUMPY\x00\x00' + >>> format.magic(255, 255) + '\x93NUMPY\xff\xff' + >>> format.magic(2, 5) + '\x93NUMPY\x02\x05' + +Test the magic string reading. + + >>> format.read_magic(BytesIO(format.magic(1, 0))) + (1, 0) + >>> format.read_magic(BytesIO(format.magic(0, 0))) + (0, 0) + >>> format.read_magic(BytesIO(format.magic(255, 255))) + (255, 255) + >>> format.read_magic(BytesIO(format.magic(2, 5))) + (2, 5) + +Test the header writing. + + >>> for arr in basic_arrays + record_arrays: + ... f = BytesIO() + ... format.write_array_header_1_0(f, arr) # XXX: arr is not a dict, items gets called on it + ... print(repr(f.getvalue())) + ... + "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '|u1', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '|u1', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '|i1', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '|i1', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'u2', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '>u2', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '>u2', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '>u2', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '>u2', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '>u2', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'i2', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '>i2', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '>i2', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '>i2', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '>i2', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '>i2', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'u4', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '>u4', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '>u4', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '>u4', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '>u4', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '>u4', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'i4', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '>i4', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '>i4', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '>i4', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '>i4', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '>i4', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'u8', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '>u8', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '>u8', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '>u8', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '>u8', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '>u8', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'i8', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '>i8', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '>i8', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '>i8', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '>i8', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '>i8', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'f4', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '>f4', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '>f4', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '>f4', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '>f4', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '>f4', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'f8', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '>f8', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '>f8', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '>f8', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '>f8', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '>f8', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'c8', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '>c8', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '>c8', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '>c8', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '>c8', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '>c8', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'c16', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '>c16', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '>c16', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '>c16', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '>c16', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '>c16', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': 'O', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': 'O', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': 'O', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': 'O', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (3, 3)} \n" + "v\x00{'descr': [('x', 'i4', (2,)), ('y', '>f8', (2, 2)), ('z', '|u1')],\n 'fortran_order': False,\n 'shape': (2,)} \n" + "\x16\x02{'descr': [('x', '>i4', (2,)),\n ('Info',\n [('value', '>c16'),\n ('y2', '>f8'),\n ('Info2',\n [('name', '|S2'),\n ('value', '>c16', (2,)),\n ('y3', '>f8', (2,)),\n ('z3', '>u4', (2,))]),\n ('name', '|S2'),\n ('z2', '|b1')]),\n ('color', '|S2'),\n ('info', [('Name', '>U8'), ('Value', '>c16')]),\n ('y', '>f8', (2, 2)),\n ('z', '|u1')],\n 'fortran_order': False,\n 'shape': (2,)} \n" +''' +import os +import sys +import warnings +from io import BytesIO + +import pytest + +import numpy as np +from numpy.lib import format +from numpy.testing import ( + IS_64BIT, + IS_PYPY, + IS_WASM, + assert_, + assert_array_equal, + assert_raises, + assert_raises_regex, + assert_warns, +) +from numpy.testing._private.utils import requires_memory + +# Generate some basic arrays to test with. +scalars = [ + np.uint8, + np.int8, + np.uint16, + np.int16, + np.uint32, + np.int32, + np.uint64, + np.int64, + np.float32, + np.float64, + np.complex64, + np.complex128, + object, +] +basic_arrays = [] +for scalar in scalars: + for endian in '<>': + dtype = np.dtype(scalar).newbyteorder(endian) + basic = np.arange(1500).astype(dtype) + basic_arrays.extend([ + # Empty + np.array([], dtype=dtype), + # Rank-0 + np.array(10, dtype=dtype), + # 1-D + basic, + # 2-D C-contiguous + basic.reshape((30, 50)), + # 2-D F-contiguous + basic.reshape((30, 50)).T, + # 2-D non-contiguous + basic.reshape((30, 50))[::-1, ::2], + ]) + +# More complicated record arrays. +# This is the structure of the table used for plain objects: +# +# +-+-+-+ +# |x|y|z| +# +-+-+-+ + +# Structure of a plain array description: +Pdescr = [ + ('x', 'i4', (2,)), + ('y', 'f8', (2, 2)), + ('z', 'u1')] + +# A plain list of tuples with values for testing: +PbufferT = [ + # x y z + ([3, 2], [[6., 4.], [6., 4.]], 8), + ([4, 3], [[7., 5.], [7., 5.]], 9), + ] + + +# This is the structure of the table used for nested objects (DON'T PANIC!): +# +# +-+---------------------------------+-----+----------+-+-+ +# |x|Info |color|info |y|z| +# | +-----+--+----------------+----+--+ +----+-----+ | | +# | |value|y2|Info2 |name|z2| |Name|Value| | | +# | | | +----+-----+--+--+ | | | | | | | +# | | | |name|value|y3|z3| | | | | | | | +# +-+-----+--+----+-----+--+--+----+--+-----+----+-----+-+-+ +# + +# The corresponding nested array description: +Ndescr = [ + ('x', 'i4', (2,)), + ('Info', [ + ('value', 'c16'), + ('y2', 'f8'), + ('Info2', [ + ('name', 'S2'), + ('value', 'c16', (2,)), + ('y3', 'f8', (2,)), + ('z3', 'u4', (2,))]), + ('name', 'S2'), + ('z2', 'b1')]), + ('color', 'S2'), + ('info', [ + ('Name', 'U8'), + ('Value', 'c16')]), + ('y', 'f8', (2, 2)), + ('z', 'u1')] + +NbufferT = [ + # x Info color info y z + # value y2 Info2 name z2 Name Value + # name value y3 z3 + ([3, 2], (6j, 6., ('nn', [6j, 4j], [6., 4.], [1, 2]), 'NN', True), + 'cc', ('NN', 6j), [[6., 4.], [6., 4.]], 8), + ([4, 3], (7j, 7., ('oo', [7j, 5j], [7., 5.], [2, 1]), 'OO', False), + 'dd', ('OO', 7j), [[7., 5.], [7., 5.]], 9), + ] + +record_arrays = [ + np.array(PbufferT, dtype=np.dtype(Pdescr).newbyteorder('<')), + np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('<')), + np.array(PbufferT, dtype=np.dtype(Pdescr).newbyteorder('>')), + np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('>')), + np.zeros(1, dtype=[('c', ('= (3, 12), reason="see gh-23988") +@pytest.mark.xfail(IS_WASM, reason="Emscripten NODEFS has a buggy dup") +def test_python2_python3_interoperability(): + fname = 'win64python2.npy' + path = os.path.join(os.path.dirname(__file__), 'data', fname) + with pytest.warns(UserWarning, match="Reading.*this warning\\."): + data = np.load(path) + assert_array_equal(data, np.ones(2)) + + +def test_pickle_python2_python3(): + # Test that loading object arrays saved on Python 2 works both on + # Python 2 and Python 3 and vice versa + data_dir = os.path.join(os.path.dirname(__file__), 'data') + + expected = np.array([None, range, '\u512a\u826f', + b'\xe4\xb8\x8d\xe8\x89\xaf'], + dtype=object) + + for fname in ['py2-np0-objarr.npy', 'py2-objarr.npy', 'py2-objarr.npz', + 'py3-objarr.npy', 'py3-objarr.npz']: + path = os.path.join(data_dir, fname) + + for encoding in ['bytes', 'latin1']: + data_f = np.load(path, allow_pickle=True, encoding=encoding) + if fname.endswith('.npz'): + data = data_f['x'] + data_f.close() + else: + data = data_f + + if encoding == 'latin1' and fname.startswith('py2'): + assert_(isinstance(data[3], str)) + assert_array_equal(data[:-1], expected[:-1]) + # mojibake occurs + assert_array_equal(data[-1].encode(encoding), expected[-1]) + else: + assert_(isinstance(data[3], bytes)) + assert_array_equal(data, expected) + + if fname.startswith('py2'): + if fname.endswith('.npz'): + data = np.load(path, allow_pickle=True) + assert_raises(UnicodeError, data.__getitem__, 'x') + data.close() + data = np.load(path, allow_pickle=True, fix_imports=False, + encoding='latin1') + assert_raises(ImportError, data.__getitem__, 'x') + data.close() + else: + assert_raises(UnicodeError, np.load, path, + allow_pickle=True) + assert_raises(ImportError, np.load, path, + allow_pickle=True, fix_imports=False, + encoding='latin1') + + +def test_pickle_disallow(tmpdir): + data_dir = os.path.join(os.path.dirname(__file__), 'data') + + path = os.path.join(data_dir, 'py2-objarr.npy') + assert_raises(ValueError, np.load, path, + allow_pickle=False, encoding='latin1') + + path = os.path.join(data_dir, 'py2-objarr.npz') + with np.load(path, allow_pickle=False, encoding='latin1') as f: + assert_raises(ValueError, f.__getitem__, 'x') + + path = os.path.join(tmpdir, 'pickle-disabled.npy') + assert_raises(ValueError, np.save, path, np.array([None], dtype=object), + allow_pickle=False) + +@pytest.mark.parametrize('dt', [ + np.dtype(np.dtype([('a', np.int8), + ('b', np.int16), + ('c', np.int32), + ], align=True), + (3,)), + np.dtype([('x', np.dtype({'names': ['a', 'b'], + 'formats': ['i1', 'i1'], + 'offsets': [0, 4], + 'itemsize': 8, + }, + (3,)), + (4,), + )]), + np.dtype([('x', + (' 1, a) + assert_array_equal(b, [3, 2, 2, 3, 3]) + + def test_place(self): + # Make sure that non-np.ndarray objects + # raise an error instead of doing nothing + assert_raises(TypeError, place, [1, 2, 3], [True, False], [0, 1]) + + a = np.array([1, 4, 3, 2, 5, 8, 7]) + place(a, [0, 1, 0, 1, 0, 1, 0], [2, 4, 6]) + assert_array_equal(a, [1, 2, 3, 4, 5, 6, 7]) + + place(a, np.zeros(7), []) + assert_array_equal(a, np.arange(1, 8)) + + place(a, [1, 0, 1, 0, 1, 0, 1], [8, 9]) + assert_array_equal(a, [8, 2, 9, 4, 8, 6, 9]) + assert_raises_regex(ValueError, "Cannot insert from an empty array", + lambda: place(a, [0, 0, 0, 0, 0, 1, 0], [])) + + # See Issue #6974 + a = np.array(['12', '34']) + place(a, [0, 1], '9') + assert_array_equal(a, ['12', '9']) + + def test_both(self): + a = rand(10) + mask = a > 0.5 + ac = a.copy() + c = extract(mask, a) + place(a, mask, 0) + place(a, mask, c) + assert_array_equal(a, ac) + + +# _foo1 and _foo2 are used in some tests in TestVectorize. + +def _foo1(x, y=1.0): + return y * math.floor(x) + + +def _foo2(x, y=1.0, z=0.0): + return y * math.floor(x) + z + + +class TestVectorize: + + def test_simple(self): + def addsubtract(a, b): + if a > b: + return a - b + else: + return a + b + + f = vectorize(addsubtract) + r = f([0, 3, 6, 9], [1, 3, 5, 7]) + assert_array_equal(r, [1, 6, 1, 2]) + + def test_scalar(self): + def addsubtract(a, b): + if a > b: + return a - b + else: + return a + b + + f = vectorize(addsubtract) + r = f([0, 3, 6, 9], 5) + assert_array_equal(r, [5, 8, 1, 4]) + + def test_large(self): + x = np.linspace(-3, 2, 10000) + f = vectorize(lambda x: x) + y = f(x) + assert_array_equal(y, x) + + def test_ufunc(self): + f = vectorize(math.cos) + args = np.array([0, 0.5 * np.pi, np.pi, 1.5 * np.pi, 2 * np.pi]) + r1 = f(args) + r2 = np.cos(args) + assert_array_almost_equal(r1, r2) + + def test_keywords(self): + + def foo(a, b=1): + return a + b + + f = vectorize(foo) + args = np.array([1, 2, 3]) + r1 = f(args) + r2 = np.array([2, 3, 4]) + assert_array_equal(r1, r2) + r1 = f(args, 2) + r2 = np.array([3, 4, 5]) + assert_array_equal(r1, r2) + + def test_keywords_with_otypes_order1(self): + # gh-1620: The second call of f would crash with + # `ValueError: invalid number of arguments`. + f = vectorize(_foo1, otypes=[float]) + # We're testing the caching of ufuncs by vectorize, so the order + # of these function calls is an important part of the test. + r1 = f(np.arange(3.0), 1.0) + r2 = f(np.arange(3.0)) + assert_array_equal(r1, r2) + + def test_keywords_with_otypes_order2(self): + # gh-1620: The second call of f would crash with + # `ValueError: non-broadcastable output operand with shape () + # doesn't match the broadcast shape (3,)`. + f = vectorize(_foo1, otypes=[float]) + # We're testing the caching of ufuncs by vectorize, so the order + # of these function calls is an important part of the test. + r1 = f(np.arange(3.0)) + r2 = f(np.arange(3.0), 1.0) + assert_array_equal(r1, r2) + + def test_keywords_with_otypes_order3(self): + # gh-1620: The third call of f would crash with + # `ValueError: invalid number of arguments`. + f = vectorize(_foo1, otypes=[float]) + # We're testing the caching of ufuncs by vectorize, so the order + # of these function calls is an important part of the test. + r1 = f(np.arange(3.0)) + r2 = f(np.arange(3.0), y=1.0) + r3 = f(np.arange(3.0)) + assert_array_equal(r1, r2) + assert_array_equal(r1, r3) + + def test_keywords_with_otypes_several_kwd_args1(self): + # gh-1620 Make sure different uses of keyword arguments + # don't break the vectorized function. + f = vectorize(_foo2, otypes=[float]) + # We're testing the caching of ufuncs by vectorize, so the order + # of these function calls is an important part of the test. + r1 = f(10.4, z=100) + r2 = f(10.4, y=-1) + r3 = f(10.4) + assert_equal(r1, _foo2(10.4, z=100)) + assert_equal(r2, _foo2(10.4, y=-1)) + assert_equal(r3, _foo2(10.4)) + + def test_keywords_with_otypes_several_kwd_args2(self): + # gh-1620 Make sure different uses of keyword arguments + # don't break the vectorized function. + f = vectorize(_foo2, otypes=[float]) + # We're testing the caching of ufuncs by vectorize, so the order + # of these function calls is an important part of the test. + r1 = f(z=100, x=10.4, y=-1) + r2 = f(1, 2, 3) + assert_equal(r1, _foo2(z=100, x=10.4, y=-1)) + assert_equal(r2, _foo2(1, 2, 3)) + + def test_keywords_no_func_code(self): + # This needs to test a function that has keywords but + # no func_code attribute, since otherwise vectorize will + # inspect the func_code. + import random + try: + vectorize(random.randrange) # Should succeed + except Exception: + raise AssertionError + + def test_keywords2_ticket_2100(self): + # Test kwarg support: enhancement ticket 2100 + + def foo(a, b=1): + return a + b + + f = vectorize(foo) + args = np.array([1, 2, 3]) + r1 = f(a=args) + r2 = np.array([2, 3, 4]) + assert_array_equal(r1, r2) + r1 = f(b=1, a=args) + assert_array_equal(r1, r2) + r1 = f(args, b=2) + r2 = np.array([3, 4, 5]) + assert_array_equal(r1, r2) + + def test_keywords3_ticket_2100(self): + # Test excluded with mixed positional and kwargs: ticket 2100 + def mypolyval(x, p): + _p = list(p) + res = _p.pop(0) + while _p: + res = res * x + _p.pop(0) + return res + + vpolyval = np.vectorize(mypolyval, excluded=['p', 1]) + ans = [3, 6] + assert_array_equal(ans, vpolyval(x=[0, 1], p=[1, 2, 3])) + assert_array_equal(ans, vpolyval([0, 1], p=[1, 2, 3])) + assert_array_equal(ans, vpolyval([0, 1], [1, 2, 3])) + + def test_keywords4_ticket_2100(self): + # Test vectorizing function with no positional args. + @vectorize + def f(**kw): + res = 1.0 + for _k in kw: + res *= kw[_k] + return res + + assert_array_equal(f(a=[1, 2], b=[3, 4]), [3, 8]) + + def test_keywords5_ticket_2100(self): + # Test vectorizing function with no kwargs args. + @vectorize + def f(*v): + return np.prod(v) + + assert_array_equal(f([1, 2], [3, 4]), [3, 8]) + + def test_coverage1_ticket_2100(self): + def foo(): + return 1 + + f = vectorize(foo) + assert_array_equal(f(), 1) + + def test_assigning_docstring(self): + def foo(x): + """Original documentation""" + return x + + f = vectorize(foo) + assert_equal(f.__doc__, foo.__doc__) + + doc = "Provided documentation" + f = vectorize(foo, doc=doc) + assert_equal(f.__doc__, doc) + + def test_UnboundMethod_ticket_1156(self): + # Regression test for issue 1156 + class Foo: + b = 2 + + def bar(self, a): + return a ** self.b + + assert_array_equal(vectorize(Foo().bar)(np.arange(9)), + np.arange(9) ** 2) + assert_array_equal(vectorize(Foo.bar)(Foo(), np.arange(9)), + np.arange(9) ** 2) + + def test_execution_order_ticket_1487(self): + # Regression test for dependence on execution order: issue 1487 + f1 = vectorize(lambda x: x) + res1a = f1(np.arange(3)) + res1b = f1(np.arange(0.1, 3)) + f2 = vectorize(lambda x: x) + res2b = f2(np.arange(0.1, 3)) + res2a = f2(np.arange(3)) + assert_equal(res1a, res2a) + assert_equal(res1b, res2b) + + def test_string_ticket_1892(self): + # Test vectorization over strings: issue 1892. + f = np.vectorize(lambda x: x) + s = '0123456789' * 10 + assert_equal(s, f(s)) + + def test_dtype_promotion_gh_29189(self): + # dtype should not be silently promoted (int32 -> int64) + dtypes = [np.int16, np.int32, np.int64, np.float16, np.float32, np.float64] + + for dtype in dtypes: + x = np.asarray([1, 2, 3], dtype=dtype) + y = np.vectorize(lambda x: x + x)(x) + assert x.dtype == y.dtype + + def test_cache(self): + # Ensure that vectorized func called exactly once per argument. + _calls = [0] + + @vectorize + def f(x): + _calls[0] += 1 + return x ** 2 + + f.cache = True + x = np.arange(5) + assert_array_equal(f(x), x * x) + assert_equal(_calls[0], len(x)) + + def test_otypes(self): + f = np.vectorize(lambda x: x) + f.otypes = 'i' + x = np.arange(5) + assert_array_equal(f(x), x) + + def test_otypes_object_28624(self): + # with object otype, the vectorized function should return y + # wrapped into an object array + y = np.arange(3) + f = vectorize(lambda x: y, otypes=[object]) + + assert f(None).item() is y + assert f([None]).item() is y + + y = [1, 2, 3] + f = vectorize(lambda x: y, otypes=[object]) + + assert f(None).item() is y + assert f([None]).item() is y + + def test_parse_gufunc_signature(self): + assert_equal(nfb._parse_gufunc_signature('(x)->()'), ([('x',)], [()])) + assert_equal(nfb._parse_gufunc_signature('(x,y)->()'), + ([('x', 'y')], [()])) + assert_equal(nfb._parse_gufunc_signature('(x),(y)->()'), + ([('x',), ('y',)], [()])) + assert_equal(nfb._parse_gufunc_signature('(x)->(y)'), + ([('x',)], [('y',)])) + assert_equal(nfb._parse_gufunc_signature('(x)->(y),()'), + ([('x',)], [('y',), ()])) + assert_equal(nfb._parse_gufunc_signature('(),(a,b,c),(d)->(d,e)'), + ([(), ('a', 'b', 'c'), ('d',)], [('d', 'e')])) + + # Tests to check if whitespaces are ignored + assert_equal(nfb._parse_gufunc_signature('(x )->()'), ([('x',)], [()])) + assert_equal(nfb._parse_gufunc_signature('( x , y )->( )'), + ([('x', 'y')], [()])) + assert_equal(nfb._parse_gufunc_signature('(x),( y) ->()'), + ([('x',), ('y',)], [()])) + assert_equal(nfb._parse_gufunc_signature('( x)-> (y ) '), + ([('x',)], [('y',)])) + assert_equal(nfb._parse_gufunc_signature(' (x)->( y),( )'), + ([('x',)], [('y',), ()])) + assert_equal(nfb._parse_gufunc_signature( + '( ), ( a, b,c ) ,( d) -> (d , e)'), + ([(), ('a', 'b', 'c'), ('d',)], [('d', 'e')])) + + with assert_raises(ValueError): + nfb._parse_gufunc_signature('(x)(y)->()') + with assert_raises(ValueError): + nfb._parse_gufunc_signature('(x),(y)->') + with assert_raises(ValueError): + nfb._parse_gufunc_signature('((x))->(x)') + + def test_signature_simple(self): + def addsubtract(a, b): + if a > b: + return a - b + else: + return a + b + + f = vectorize(addsubtract, signature='(),()->()') + r = f([0, 3, 6, 9], [1, 3, 5, 7]) + assert_array_equal(r, [1, 6, 1, 2]) + + def test_signature_mean_last(self): + def mean(a): + return a.mean() + + f = vectorize(mean, signature='(n)->()') + r = f([[1, 3], [2, 4]]) + assert_array_equal(r, [2, 3]) + + def test_signature_center(self): + def center(a): + return a - a.mean() + + f = vectorize(center, signature='(n)->(n)') + r = f([[1, 3], [2, 4]]) + assert_array_equal(r, [[-1, 1], [-1, 1]]) + + def test_signature_two_outputs(self): + f = vectorize(lambda x: (x, x), signature='()->(),()') + r = f([1, 2, 3]) + assert_(isinstance(r, tuple) and len(r) == 2) + assert_array_equal(r[0], [1, 2, 3]) + assert_array_equal(r[1], [1, 2, 3]) + + def test_signature_outer(self): + f = vectorize(np.outer, signature='(a),(b)->(a,b)') + r = f([1, 2], [1, 2, 3]) + assert_array_equal(r, [[1, 2, 3], [2, 4, 6]]) + + r = f([[[1, 2]]], [1, 2, 3]) + assert_array_equal(r, [[[[1, 2, 3], [2, 4, 6]]]]) + + r = f([[1, 0], [2, 0]], [1, 2, 3]) + assert_array_equal(r, [[[1, 2, 3], [0, 0, 0]], + [[2, 4, 6], [0, 0, 0]]]) + + r = f([1, 2], [[1, 2, 3], [0, 0, 0]]) + assert_array_equal(r, [[[1, 2, 3], [2, 4, 6]], + [[0, 0, 0], [0, 0, 0]]]) + + def test_signature_computed_size(self): + f = vectorize(lambda x: x[:-1], signature='(n)->(m)') + r = f([1, 2, 3]) + assert_array_equal(r, [1, 2]) + + r = f([[1, 2, 3], [2, 3, 4]]) + assert_array_equal(r, [[1, 2], [2, 3]]) + + def test_signature_excluded(self): + + def foo(a, b=1): + return a + b + + f = vectorize(foo, signature='()->()', excluded={'b'}) + assert_array_equal(f([1, 2, 3]), [2, 3, 4]) + assert_array_equal(f([1, 2, 3], b=0), [1, 2, 3]) + + def test_signature_otypes(self): + f = vectorize(lambda x: x, signature='(n)->(n)', otypes=['float64']) + r = f([1, 2, 3]) + assert_equal(r.dtype, np.dtype('float64')) + assert_array_equal(r, [1, 2, 3]) + + def test_signature_invalid_inputs(self): + f = vectorize(operator.add, signature='(n),(n)->(n)') + with assert_raises_regex(TypeError, 'wrong number of positional'): + f([1, 2]) + with assert_raises_regex( + ValueError, 'does not have enough dimensions'): + f(1, 2) + with assert_raises_regex( + ValueError, 'inconsistent size for core dimension'): + f([1, 2], [1, 2, 3]) + + f = vectorize(operator.add, signature='()->()') + with assert_raises_regex(TypeError, 'wrong number of positional'): + f(1, 2) + + def test_signature_invalid_outputs(self): + + f = vectorize(lambda x: x[:-1], signature='(n)->(n)') + with assert_raises_regex( + ValueError, 'inconsistent size for core dimension'): + f([1, 2, 3]) + + f = vectorize(lambda x: x, signature='()->(),()') + with assert_raises_regex(ValueError, 'wrong number of outputs'): + f(1) + + f = vectorize(lambda x: (x, x), signature='()->()') + with assert_raises_regex(ValueError, 'wrong number of outputs'): + f([1, 2]) + + def test_size_zero_output(self): + # see issue 5868 + f = np.vectorize(lambda x: x) + x = np.zeros([0, 5], dtype=int) + with assert_raises_regex(ValueError, 'otypes'): + f(x) + + f.otypes = 'i' + assert_array_equal(f(x), x) + + f = np.vectorize(lambda x: x, signature='()->()') + with assert_raises_regex(ValueError, 'otypes'): + f(x) + + f = np.vectorize(lambda x: x, signature='()->()', otypes='i') + assert_array_equal(f(x), x) + + f = np.vectorize(lambda x: x, signature='(n)->(n)', otypes='i') + assert_array_equal(f(x), x) + + f = np.vectorize(lambda x: x, signature='(n)->(n)') + assert_array_equal(f(x.T), x.T) + + f = np.vectorize(lambda x: [x], signature='()->(n)', otypes='i') + with assert_raises_regex(ValueError, 'new output dimensions'): + f(x) + + def test_subclasses(self): + class subclass(np.ndarray): + pass + + m = np.array([[1., 0., 0.], + [0., 0., 1.], + [0., 1., 0.]]).view(subclass) + v = np.array([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]]).view(subclass) + # generalized (gufunc) + matvec = np.vectorize(np.matmul, signature='(m,m),(m)->(m)') + r = matvec(m, v) + assert_equal(type(r), subclass) + assert_equal(r, [[1., 3., 2.], [4., 6., 5.], [7., 9., 8.]]) + + # element-wise (ufunc) + mult = np.vectorize(lambda x, y: x * y) + r = mult(m, v) + assert_equal(type(r), subclass) + assert_equal(r, m * v) + + def test_name(self): + # gh-23021 + @np.vectorize + def f2(a, b): + return a + b + + assert f2.__name__ == 'f2' + + def test_decorator(self): + @vectorize + def addsubtract(a, b): + if a > b: + return a - b + else: + return a + b + + r = addsubtract([0, 3, 6, 9], [1, 3, 5, 7]) + assert_array_equal(r, [1, 6, 1, 2]) + + def test_docstring(self): + @vectorize + def f(x): + """Docstring""" + return x + + if sys.flags.optimize < 2: + assert f.__doc__ == "Docstring" + + def test_partial(self): + def foo(x, y): + return x + y + + bar = partial(foo, 3) + vbar = np.vectorize(bar) + assert vbar(1) == 4 + + def test_signature_otypes_decorator(self): + @vectorize(signature='(n)->(n)', otypes=['float64']) + def f(x): + return x + + r = f([1, 2, 3]) + assert_equal(r.dtype, np.dtype('float64')) + assert_array_equal(r, [1, 2, 3]) + assert f.__name__ == 'f' + + def test_bad_input(self): + with assert_raises(TypeError): + A = np.vectorize(pyfunc=3) + + def test_no_keywords(self): + with assert_raises(TypeError): + @np.vectorize("string") + def foo(): + return "bar" + + def test_positional_regression_9477(self): + # This supplies the first keyword argument as a positional, + # to ensure that they are still properly forwarded after the + # enhancement for #9477 + f = vectorize((lambda x: x), ['float64']) + r = f([2]) + assert_equal(r.dtype, np.dtype('float64')) + + def test_datetime_conversion(self): + otype = "datetime64[ns]" + arr = np.array(['2024-01-01', '2024-01-02', '2024-01-03'], + dtype='datetime64[ns]') + assert_array_equal(np.vectorize(lambda x: x, signature="(i)->(j)", + otypes=[otype])(arr), arr) + + +class TestLeaks: + class A: + iters = 20 + + def bound(self, *args): + return 0 + + @staticmethod + def unbound(*args): + return 0 + + @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") + @pytest.mark.skipif(NOGIL_BUILD, + reason=("Functions are immortalized if a thread is " + "launched, making this test flaky")) + @pytest.mark.parametrize('name, incr', [ + ('bound', A.iters), + ('unbound', 0), + ]) + def test_frompyfunc_leaks(self, name, incr): + # exposed in gh-11867 as np.vectorized, but the problem stems from + # frompyfunc. + # class.attribute = np.frompyfunc() creates a + # reference cycle if is a bound class method. + # It requires a gc collection cycle to break the cycle. + import gc + A_func = getattr(self.A, name) + gc.disable() + try: + refcount = sys.getrefcount(A_func) + for i in range(self.A.iters): + a = self.A() + a.f = np.frompyfunc(getattr(a, name), 1, 1) + out = a.f(np.arange(10)) + a = None + # A.func is part of a reference cycle if incr is non-zero + assert_equal(sys.getrefcount(A_func), refcount + incr) + for i in range(5): + gc.collect() + assert_equal(sys.getrefcount(A_func), refcount) + finally: + gc.enable() + + +class TestDigitize: + + def test_forward(self): + x = np.arange(-6, 5) + bins = np.arange(-5, 5) + assert_array_equal(digitize(x, bins), np.arange(11)) + + def test_reverse(self): + x = np.arange(5, -6, -1) + bins = np.arange(5, -5, -1) + assert_array_equal(digitize(x, bins), np.arange(11)) + + def test_random(self): + x = rand(10) + bin = np.linspace(x.min(), x.max(), 10) + assert_(np.all(digitize(x, bin) != 0)) + + def test_right_basic(self): + x = [1, 5, 4, 10, 8, 11, 0] + bins = [1, 5, 10] + default_answer = [1, 2, 1, 3, 2, 3, 0] + assert_array_equal(digitize(x, bins), default_answer) + right_answer = [0, 1, 1, 2, 2, 3, 0] + assert_array_equal(digitize(x, bins, True), right_answer) + + def test_right_open(self): + x = np.arange(-6, 5) + bins = np.arange(-6, 4) + assert_array_equal(digitize(x, bins, True), np.arange(11)) + + def test_right_open_reverse(self): + x = np.arange(5, -6, -1) + bins = np.arange(4, -6, -1) + assert_array_equal(digitize(x, bins, True), np.arange(11)) + + def test_right_open_random(self): + x = rand(10) + bins = np.linspace(x.min(), x.max(), 10) + assert_(np.all(digitize(x, bins, True) != 10)) + + def test_monotonic(self): + x = [-1, 0, 1, 2] + bins = [0, 0, 1] + assert_array_equal(digitize(x, bins, False), [0, 2, 3, 3]) + assert_array_equal(digitize(x, bins, True), [0, 0, 2, 3]) + bins = [1, 1, 0] + assert_array_equal(digitize(x, bins, False), [3, 2, 0, 0]) + assert_array_equal(digitize(x, bins, True), [3, 3, 2, 0]) + bins = [1, 1, 1, 1] + assert_array_equal(digitize(x, bins, False), [0, 0, 4, 4]) + assert_array_equal(digitize(x, bins, True), [0, 0, 0, 4]) + bins = [0, 0, 1, 0] + assert_raises(ValueError, digitize, x, bins) + bins = [1, 1, 0, 1] + assert_raises(ValueError, digitize, x, bins) + + def test_casting_error(self): + x = [1, 2, 3 + 1.j] + bins = [1, 2, 3] + assert_raises(TypeError, digitize, x, bins) + x, bins = bins, x + assert_raises(TypeError, digitize, x, bins) + + def test_return_type(self): + # Functions returning indices should always return base ndarrays + class A(np.ndarray): + pass + a = np.arange(5).view(A) + b = np.arange(1, 3).view(A) + assert_(not isinstance(digitize(b, a, False), A)) + assert_(not isinstance(digitize(b, a, True), A)) + + def test_large_integers_increasing(self): + # gh-11022 + x = 2**54 # loses precision in a float + assert_equal(np.digitize(x, [x - 1, x + 1]), 1) + + @pytest.mark.xfail( + reason="gh-11022: np._core.multiarray._monoticity loses precision") + def test_large_integers_decreasing(self): + # gh-11022 + x = 2**54 # loses precision in a float + assert_equal(np.digitize(x, [x + 1, x - 1]), 1) + + +class TestUnwrap: + + def test_simple(self): + # check that unwrap removes jumps greater that 2*pi + assert_array_equal(unwrap([1, 1 + 2 * np.pi]), [1, 1]) + # check that unwrap maintains continuity + assert_(np.all(diff(unwrap(rand(10) * 100)) < np.pi)) + + def test_period(self): + # check that unwrap removes jumps greater that 255 + assert_array_equal(unwrap([1, 1 + 256], period=255), [1, 2]) + # check that unwrap maintains continuity + assert_(np.all(diff(unwrap(rand(10) * 1000, period=255)) < 255)) + # check simple case + simple_seq = np.array([0, 75, 150, 225, 300]) + wrap_seq = np.mod(simple_seq, 255) + assert_array_equal(unwrap(wrap_seq, period=255), simple_seq) + # check custom discont value + uneven_seq = np.array([0, 75, 150, 225, 300, 430]) + wrap_uneven = np.mod(uneven_seq, 250) + no_discont = unwrap(wrap_uneven, period=250) + assert_array_equal(no_discont, [0, 75, 150, 225, 300, 180]) + sm_discont = unwrap(wrap_uneven, period=250, discont=140) + assert_array_equal(sm_discont, [0, 75, 150, 225, 300, 430]) + assert sm_discont.dtype == wrap_uneven.dtype + + +@pytest.mark.parametrize( + "dtype", "O" + np.typecodes["AllInteger"] + np.typecodes["Float"] +) +@pytest.mark.parametrize("M", [0, 1, 10]) +class TestFilterwindows: + + def test_hanning(self, dtype: str, M: int) -> None: + scalar = np.array(M, dtype=dtype)[()] + + w = hanning(scalar) + if dtype == "O": + ref_dtype = np.float64 + else: + ref_dtype = np.result_type(scalar.dtype, np.float64) + assert w.dtype == ref_dtype + + # check symmetry + assert_equal(w, flipud(w)) + + # check known value + if scalar < 1: + assert_array_equal(w, np.array([])) + elif scalar == 1: + assert_array_equal(w, np.ones(1)) + else: + assert_almost_equal(np.sum(w, axis=0), 4.500, 4) + + def test_hamming(self, dtype: str, M: int) -> None: + scalar = np.array(M, dtype=dtype)[()] + + w = hamming(scalar) + if dtype == "O": + ref_dtype = np.float64 + else: + ref_dtype = np.result_type(scalar.dtype, np.float64) + assert w.dtype == ref_dtype + + # check symmetry + assert_equal(w, flipud(w)) + + # check known value + if scalar < 1: + assert_array_equal(w, np.array([])) + elif scalar == 1: + assert_array_equal(w, np.ones(1)) + else: + assert_almost_equal(np.sum(w, axis=0), 4.9400, 4) + + def test_bartlett(self, dtype: str, M: int) -> None: + scalar = np.array(M, dtype=dtype)[()] + + w = bartlett(scalar) + if dtype == "O": + ref_dtype = np.float64 + else: + ref_dtype = np.result_type(scalar.dtype, np.float64) + assert w.dtype == ref_dtype + + # check symmetry + assert_equal(w, flipud(w)) + + # check known value + if scalar < 1: + assert_array_equal(w, np.array([])) + elif scalar == 1: + assert_array_equal(w, np.ones(1)) + else: + assert_almost_equal(np.sum(w, axis=0), 4.4444, 4) + + def test_blackman(self, dtype: str, M: int) -> None: + scalar = np.array(M, dtype=dtype)[()] + + w = blackman(scalar) + if dtype == "O": + ref_dtype = np.float64 + else: + ref_dtype = np.result_type(scalar.dtype, np.float64) + assert w.dtype == ref_dtype + + # check symmetry + assert_equal(w, flipud(w)) + + # check known value + if scalar < 1: + assert_array_equal(w, np.array([])) + elif scalar == 1: + assert_array_equal(w, np.ones(1)) + else: + assert_almost_equal(np.sum(w, axis=0), 3.7800, 4) + + def test_kaiser(self, dtype: str, M: int) -> None: + scalar = np.array(M, dtype=dtype)[()] + + w = kaiser(scalar, 0) + if dtype == "O": + ref_dtype = np.float64 + else: + ref_dtype = np.result_type(scalar.dtype, np.float64) + assert w.dtype == ref_dtype + + # check symmetry + assert_equal(w, flipud(w)) + + # check known value + if scalar < 1: + assert_array_equal(w, np.array([])) + elif scalar == 1: + assert_array_equal(w, np.ones(1)) + else: + assert_almost_equal(np.sum(w, axis=0), 10, 15) + + +class TestTrapezoid: + + def test_simple(self): + x = np.arange(-10, 10, .1) + r = trapezoid(np.exp(-.5 * x ** 2) / np.sqrt(2 * np.pi), dx=0.1) + # check integral of normal equals 1 + assert_almost_equal(r, 1, 7) + + def test_ndim(self): + x = np.linspace(0, 1, 3) + y = np.linspace(0, 2, 8) + z = np.linspace(0, 3, 13) + + wx = np.ones_like(x) * (x[1] - x[0]) + wx[0] /= 2 + wx[-1] /= 2 + wy = np.ones_like(y) * (y[1] - y[0]) + wy[0] /= 2 + wy[-1] /= 2 + wz = np.ones_like(z) * (z[1] - z[0]) + wz[0] /= 2 + wz[-1] /= 2 + + q = x[:, None, None] + y[None, :, None] + z[None, None, :] + + qx = (q * wx[:, None, None]).sum(axis=0) + qy = (q * wy[None, :, None]).sum(axis=1) + qz = (q * wz[None, None, :]).sum(axis=2) + + # n-d `x` + r = trapezoid(q, x=x[:, None, None], axis=0) + assert_almost_equal(r, qx) + r = trapezoid(q, x=y[None, :, None], axis=1) + assert_almost_equal(r, qy) + r = trapezoid(q, x=z[None, None, :], axis=2) + assert_almost_equal(r, qz) + + # 1-d `x` + r = trapezoid(q, x=x, axis=0) + assert_almost_equal(r, qx) + r = trapezoid(q, x=y, axis=1) + assert_almost_equal(r, qy) + r = trapezoid(q, x=z, axis=2) + assert_almost_equal(r, qz) + + def test_masked(self): + # Testing that masked arrays behave as if the function is 0 where + # masked + x = np.arange(5) + y = x * x + mask = x == 2 + ym = np.ma.array(y, mask=mask) + r = 13.0 # sum(0.5 * (0 + 1) * 1.0 + 0.5 * (9 + 16)) + assert_almost_equal(trapezoid(ym, x), r) + + xm = np.ma.array(x, mask=mask) + assert_almost_equal(trapezoid(ym, xm), r) + + xm = np.ma.array(x, mask=mask) + assert_almost_equal(trapezoid(y, xm), r) + + +class TestSinc: + + def test_simple(self): + assert_(sinc(0) == 1) + w = sinc(np.linspace(-1, 1, 100)) + # check symmetry + assert_array_almost_equal(w, flipud(w), 7) + + def test_array_like(self): + x = [0, 0.5] + y1 = sinc(np.array(x)) + y2 = sinc(list(x)) + y3 = sinc(tuple(x)) + assert_array_equal(y1, y2) + assert_array_equal(y1, y3) + + def test_bool_dtype(self): + x = (np.arange(4, dtype=np.uint8) % 2 == 1) + actual = sinc(x) + expected = sinc(x.astype(np.float64)) + assert_allclose(actual, expected) + assert actual.dtype == np.float64 + + @pytest.mark.parametrize('dtype', [np.uint8, np.int16, np.uint64]) + def test_int_dtypes(self, dtype): + x = np.arange(4, dtype=dtype) + actual = sinc(x) + expected = sinc(x.astype(np.float64)) + assert_allclose(actual, expected) + assert actual.dtype == np.float64 + + @pytest.mark.parametrize( + 'dtype', + [np.float16, np.float32, np.longdouble, np.complex64, np.complex128] + ) + def test_float_dtypes(self, dtype): + x = np.arange(4, dtype=dtype) + assert sinc(x).dtype == x.dtype + + def test_float16_underflow(self): + x = np.float16(0) + # before gh-27784, fill value for 0 in input would underflow float16, + # resulting in nan + assert_array_equal(sinc(x), np.asarray(1.0)) + +class TestUnique: + + def test_simple(self): + x = np.array([4, 3, 2, 1, 1, 2, 3, 4, 0]) + assert_(np.all(unique(x) == [0, 1, 2, 3, 4])) + assert_(unique(np.array([1, 1, 1, 1, 1])) == np.array([1])) + x = ['widget', 'ham', 'foo', 'bar', 'foo', 'ham'] + assert_(np.all(unique(x) == ['bar', 'foo', 'ham', 'widget'])) + x = np.array([5 + 6j, 1 + 1j, 1 + 10j, 10, 5 + 6j]) + assert_(np.all(unique(x) == [1 + 1j, 1 + 10j, 5 + 6j, 10])) + + +class TestCheckFinite: + + def test_simple(self): + a = [1, 2, 3] + b = [1, 2, np.inf] + c = [1, 2, np.nan] + np.asarray_chkfinite(a) + assert_raises(ValueError, np.asarray_chkfinite, b) + assert_raises(ValueError, np.asarray_chkfinite, c) + + def test_dtype_order(self): + # Regression test for missing dtype and order arguments + a = [1, 2, 3] + a = np.asarray_chkfinite(a, order='F', dtype=np.float64) + assert_(a.dtype == np.float64) + + +class TestCorrCoef: + A = np.array( + [[0.15391142, 0.18045767, 0.14197213], + [0.70461506, 0.96474128, 0.27906989], + [0.9297531, 0.32296769, 0.19267156]]) + B = np.array( + [[0.10377691, 0.5417086, 0.49807457], + [0.82872117, 0.77801674, 0.39226705], + [0.9314666, 0.66800209, 0.03538394]]) + res1 = np.array( + [[1., 0.9379533, -0.04931983], + [0.9379533, 1., 0.30007991], + [-0.04931983, 0.30007991, 1.]]) + res2 = np.array( + [[1., 0.9379533, -0.04931983, 0.30151751, 0.66318558, 0.51532523], + [0.9379533, 1., 0.30007991, -0.04781421, 0.88157256, 0.78052386], + [-0.04931983, 0.30007991, 1., -0.96717111, 0.71483595, 0.83053601], + [0.30151751, -0.04781421, -0.96717111, 1., -0.51366032, -0.66173113], + [0.66318558, 0.88157256, 0.71483595, -0.51366032, 1., 0.98317823], + [0.51532523, 0.78052386, 0.83053601, -0.66173113, 0.98317823, 1.]]) + + def test_non_array(self): + assert_almost_equal(np.corrcoef([0, 1, 0], [1, 0, 1]), + [[1., -1.], [-1., 1.]]) + + def test_simple(self): + tgt1 = corrcoef(self.A) + assert_almost_equal(tgt1, self.res1) + assert_(np.all(np.abs(tgt1) <= 1.0)) + + tgt2 = corrcoef(self.A, self.B) + assert_almost_equal(tgt2, self.res2) + assert_(np.all(np.abs(tgt2) <= 1.0)) + + def test_ddof(self): + # ddof raises DeprecationWarning + with suppress_warnings() as sup: + warnings.simplefilter("always") + assert_warns(DeprecationWarning, corrcoef, self.A, ddof=-1) + sup.filter(DeprecationWarning) + # ddof has no or negligible effect on the function + assert_almost_equal(corrcoef(self.A, ddof=-1), self.res1) + assert_almost_equal(corrcoef(self.A, self.B, ddof=-1), self.res2) + assert_almost_equal(corrcoef(self.A, ddof=3), self.res1) + assert_almost_equal(corrcoef(self.A, self.B, ddof=3), self.res2) + + def test_bias(self): + # bias raises DeprecationWarning + with suppress_warnings() as sup: + warnings.simplefilter("always") + assert_warns(DeprecationWarning, corrcoef, self.A, self.B, 1, 0) + assert_warns(DeprecationWarning, corrcoef, self.A, bias=0) + sup.filter(DeprecationWarning) + # bias has no or negligible effect on the function + assert_almost_equal(corrcoef(self.A, bias=1), self.res1) + + def test_complex(self): + x = np.array([[1, 2, 3], [1j, 2j, 3j]]) + res = corrcoef(x) + tgt = np.array([[1., -1.j], [1.j, 1.]]) + assert_allclose(res, tgt) + assert_(np.all(np.abs(res) <= 1.0)) + + def test_xy(self): + x = np.array([[1, 2, 3]]) + y = np.array([[1j, 2j, 3j]]) + assert_allclose(np.corrcoef(x, y), np.array([[1., -1.j], [1.j, 1.]])) + + def test_empty(self): + with warnings.catch_warnings(record=True): + warnings.simplefilter('always', RuntimeWarning) + assert_array_equal(corrcoef(np.array([])), np.nan) + assert_array_equal(corrcoef(np.array([]).reshape(0, 2)), + np.array([]).reshape(0, 0)) + assert_array_equal(corrcoef(np.array([]).reshape(2, 0)), + np.array([[np.nan, np.nan], [np.nan, np.nan]])) + + def test_extreme(self): + x = [[1e-100, 1e100], [1e100, 1e-100]] + with np.errstate(all='raise'): + c = corrcoef(x) + assert_array_almost_equal(c, np.array([[1., -1.], [-1., 1.]])) + assert_(np.all(np.abs(c) <= 1.0)) + + @pytest.mark.parametrize("test_type", [np.half, np.single, np.double, np.longdouble]) + def test_corrcoef_dtype(self, test_type): + cast_A = self.A.astype(test_type) + res = corrcoef(cast_A, dtype=test_type) + assert test_type == res.dtype + + +class TestCov: + x1 = np.array([[0, 2], [1, 1], [2, 0]]).T + res1 = np.array([[1., -1.], [-1., 1.]]) + x2 = np.array([0.0, 1.0, 2.0], ndmin=2) + frequencies = np.array([1, 4, 1]) + x2_repeats = np.array([[0.0], [1.0], [1.0], [1.0], [1.0], [2.0]]).T + res2 = np.array([[0.4, -0.4], [-0.4, 0.4]]) + unit_frequencies = np.ones(3, dtype=np.int_) + weights = np.array([1.0, 4.0, 1.0]) + res3 = np.array([[2. / 3., -2. / 3.], [-2. / 3., 2. / 3.]]) + unit_weights = np.ones(3) + x3 = np.array([0.3942, 0.5969, 0.7730, 0.9918, 0.7964]) + + def test_basic(self): + assert_allclose(cov(self.x1), self.res1) + + def test_complex(self): + x = np.array([[1, 2, 3], [1j, 2j, 3j]]) + res = np.array([[1., -1.j], [1.j, 1.]]) + assert_allclose(cov(x), res) + assert_allclose(cov(x, aweights=np.ones(3)), res) + + def test_xy(self): + x = np.array([[1, 2, 3]]) + y = np.array([[1j, 2j, 3j]]) + assert_allclose(cov(x, y), np.array([[1., -1.j], [1.j, 1.]])) + + def test_empty(self): + with warnings.catch_warnings(record=True): + warnings.simplefilter('always', RuntimeWarning) + assert_array_equal(cov(np.array([])), np.nan) + assert_array_equal(cov(np.array([]).reshape(0, 2)), + np.array([]).reshape(0, 0)) + assert_array_equal(cov(np.array([]).reshape(2, 0)), + np.array([[np.nan, np.nan], [np.nan, np.nan]])) + + def test_wrong_ddof(self): + with warnings.catch_warnings(record=True): + warnings.simplefilter('always', RuntimeWarning) + assert_array_equal(cov(self.x1, ddof=5), + np.array([[np.inf, -np.inf], + [-np.inf, np.inf]])) + + def test_1D_rowvar(self): + assert_allclose(cov(self.x3), cov(self.x3, rowvar=False)) + y = np.array([0.0780, 0.3107, 0.2111, 0.0334, 0.8501]) + assert_allclose(cov(self.x3, y), cov(self.x3, y, rowvar=False)) + + def test_1D_variance(self): + assert_allclose(cov(self.x3, ddof=1), np.var(self.x3, ddof=1)) + + def test_fweights(self): + assert_allclose(cov(self.x2, fweights=self.frequencies), + cov(self.x2_repeats)) + assert_allclose(cov(self.x1, fweights=self.frequencies), + self.res2) + assert_allclose(cov(self.x1, fweights=self.unit_frequencies), + self.res1) + nonint = self.frequencies + 0.5 + assert_raises(TypeError, cov, self.x1, fweights=nonint) + f = np.ones((2, 3), dtype=np.int_) + assert_raises(RuntimeError, cov, self.x1, fweights=f) + f = np.ones(2, dtype=np.int_) + assert_raises(RuntimeError, cov, self.x1, fweights=f) + f = -1 * np.ones(3, dtype=np.int_) + assert_raises(ValueError, cov, self.x1, fweights=f) + + def test_aweights(self): + assert_allclose(cov(self.x1, aweights=self.weights), self.res3) + assert_allclose(cov(self.x1, aweights=3.0 * self.weights), + cov(self.x1, aweights=self.weights)) + assert_allclose(cov(self.x1, aweights=self.unit_weights), self.res1) + w = np.ones((2, 3)) + assert_raises(RuntimeError, cov, self.x1, aweights=w) + w = np.ones(2) + assert_raises(RuntimeError, cov, self.x1, aweights=w) + w = -1.0 * np.ones(3) + assert_raises(ValueError, cov, self.x1, aweights=w) + + def test_unit_fweights_and_aweights(self): + assert_allclose(cov(self.x2, fweights=self.frequencies, + aweights=self.unit_weights), + cov(self.x2_repeats)) + assert_allclose(cov(self.x1, fweights=self.frequencies, + aweights=self.unit_weights), + self.res2) + assert_allclose(cov(self.x1, fweights=self.unit_frequencies, + aweights=self.unit_weights), + self.res1) + assert_allclose(cov(self.x1, fweights=self.unit_frequencies, + aweights=self.weights), + self.res3) + assert_allclose(cov(self.x1, fweights=self.unit_frequencies, + aweights=3.0 * self.weights), + cov(self.x1, aweights=self.weights)) + assert_allclose(cov(self.x1, fweights=self.unit_frequencies, + aweights=self.unit_weights), + self.res1) + + @pytest.mark.parametrize("test_type", [np.half, np.single, np.double, np.longdouble]) + def test_cov_dtype(self, test_type): + cast_x1 = self.x1.astype(test_type) + res = cov(cast_x1, dtype=test_type) + assert test_type == res.dtype + + def test_gh_27658(self): + x = np.ones((3, 1)) + expected = np.cov(x, ddof=0, rowvar=True) + actual = np.cov(x.T, ddof=0, rowvar=False) + assert_allclose(actual, expected, strict=True) + + +class Test_I0: + + def test_simple(self): + assert_almost_equal( + i0(0.5), + np.array(1.0634833707413234)) + + # need at least one test above 8, as the implementation is piecewise + A = np.array([0.49842636, 0.6969809, 0.22011976, 0.0155549, 10.0]) + expected = np.array([1.06307822, 1.12518299, 1.01214991, 1.00006049, 2815.71662847]) + assert_almost_equal(i0(A), expected) + assert_almost_equal(i0(-A), expected) + + B = np.array([[0.827002, 0.99959078], + [0.89694769, 0.39298162], + [0.37954418, 0.05206293], + [0.36465447, 0.72446427], + [0.48164949, 0.50324519]]) + assert_almost_equal( + i0(B), + np.array([[1.17843223, 1.26583466], + [1.21147086, 1.03898290], + [1.03633899, 1.00067775], + [1.03352052, 1.13557954], + [1.05884290, 1.06432317]])) + # Regression test for gh-11205 + i0_0 = np.i0([0.]) + assert_equal(i0_0.shape, (1,)) + assert_array_equal(np.i0([0.]), np.array([1.])) + + def test_non_array(self): + a = np.arange(4) + + class array_like: + __array_interface__ = a.__array_interface__ + + def __array_wrap__(self, arr, context, return_scalar): + return self + + # E.g. pandas series survive ufunc calls through array-wrap: + assert isinstance(np.abs(array_like()), array_like) + exp = np.i0(a) + res = np.i0(array_like()) + + assert_array_equal(exp, res) + + def test_complex(self): + a = np.array([0, 1 + 2j]) + with pytest.raises(TypeError, match="i0 not supported for complex values"): + res = i0(a) + + +class TestKaiser: + + def test_simple(self): + assert_(np.isfinite(kaiser(1, 1.0))) + assert_almost_equal(kaiser(0, 1.0), + np.array([])) + assert_almost_equal(kaiser(2, 1.0), + np.array([0.78984831, 0.78984831])) + assert_almost_equal(kaiser(5, 1.0), + np.array([0.78984831, 0.94503323, 1., + 0.94503323, 0.78984831])) + assert_almost_equal(kaiser(5, 1.56789), + np.array([0.58285404, 0.88409679, 1., + 0.88409679, 0.58285404])) + + def test_int_beta(self): + kaiser(3, 4) + + +class TestMeshgrid: + + def test_simple(self): + [X, Y] = meshgrid([1, 2, 3], [4, 5, 6, 7]) + assert_array_equal(X, np.array([[1, 2, 3], + [1, 2, 3], + [1, 2, 3], + [1, 2, 3]])) + assert_array_equal(Y, np.array([[4, 4, 4], + [5, 5, 5], + [6, 6, 6], + [7, 7, 7]])) + + def test_single_input(self): + [X] = meshgrid([1, 2, 3, 4]) + assert_array_equal(X, np.array([1, 2, 3, 4])) + + def test_no_input(self): + args = [] + assert_array_equal([], meshgrid(*args)) + assert_array_equal([], meshgrid(*args, copy=False)) + + def test_indexing(self): + x = [1, 2, 3] + y = [4, 5, 6, 7] + [X, Y] = meshgrid(x, y, indexing='ij') + assert_array_equal(X, np.array([[1, 1, 1, 1], + [2, 2, 2, 2], + [3, 3, 3, 3]])) + assert_array_equal(Y, np.array([[4, 5, 6, 7], + [4, 5, 6, 7], + [4, 5, 6, 7]])) + + # Test expected shapes: + z = [8, 9] + assert_(meshgrid(x, y)[0].shape == (4, 3)) + assert_(meshgrid(x, y, indexing='ij')[0].shape == (3, 4)) + assert_(meshgrid(x, y, z)[0].shape == (4, 3, 2)) + assert_(meshgrid(x, y, z, indexing='ij')[0].shape == (3, 4, 2)) + + assert_raises(ValueError, meshgrid, x, y, indexing='notvalid') + + def test_sparse(self): + [X, Y] = meshgrid([1, 2, 3], [4, 5, 6, 7], sparse=True) + assert_array_equal(X, np.array([[1, 2, 3]])) + assert_array_equal(Y, np.array([[4], [5], [6], [7]])) + + def test_invalid_arguments(self): + # Test that meshgrid complains about invalid arguments + # Regression test for issue #4755: + # https://github.com/numpy/numpy/issues/4755 + assert_raises(TypeError, meshgrid, + [1, 2, 3], [4, 5, 6, 7], indices='ij') + + def test_return_type(self): + # Test for appropriate dtype in returned arrays. + # Regression test for issue #5297 + # https://github.com/numpy/numpy/issues/5297 + x = np.arange(0, 10, dtype=np.float32) + y = np.arange(10, 20, dtype=np.float64) + + X, Y = np.meshgrid(x, y) + + assert_(X.dtype == x.dtype) + assert_(Y.dtype == y.dtype) + + # copy + X, Y = np.meshgrid(x, y, copy=True) + + assert_(X.dtype == x.dtype) + assert_(Y.dtype == y.dtype) + + # sparse + X, Y = np.meshgrid(x, y, sparse=True) + + assert_(X.dtype == x.dtype) + assert_(Y.dtype == y.dtype) + + def test_writeback(self): + # Issue 8561 + X = np.array([1.1, 2.2]) + Y = np.array([3.3, 4.4]) + x, y = np.meshgrid(X, Y, sparse=False, copy=True) + + x[0, :] = 0 + assert_equal(x[0, :], 0) + assert_equal(x[1, :], X) + + def test_nd_shape(self): + a, b, c, d, e = np.meshgrid(*([0] * i for i in range(1, 6))) + expected_shape = (2, 1, 3, 4, 5) + assert_equal(a.shape, expected_shape) + assert_equal(b.shape, expected_shape) + assert_equal(c.shape, expected_shape) + assert_equal(d.shape, expected_shape) + assert_equal(e.shape, expected_shape) + + def test_nd_values(self): + a, b, c = np.meshgrid([0], [1, 2], [3, 4, 5]) + assert_equal(a, [[[0, 0, 0]], [[0, 0, 0]]]) + assert_equal(b, [[[1, 1, 1]], [[2, 2, 2]]]) + assert_equal(c, [[[3, 4, 5]], [[3, 4, 5]]]) + + def test_nd_indexing(self): + a, b, c = np.meshgrid([0], [1, 2], [3, 4, 5], indexing='ij') + assert_equal(a, [[[0, 0, 0], [0, 0, 0]]]) + assert_equal(b, [[[1, 1, 1], [2, 2, 2]]]) + assert_equal(c, [[[3, 4, 5], [3, 4, 5]]]) + + +class TestPiecewise: + + def test_simple(self): + # Condition is single bool list + x = piecewise([0, 0], [True, False], [1]) + assert_array_equal(x, [1, 0]) + + # List of conditions: single bool list + x = piecewise([0, 0], [[True, False]], [1]) + assert_array_equal(x, [1, 0]) + + # Conditions is single bool array + x = piecewise([0, 0], np.array([True, False]), [1]) + assert_array_equal(x, [1, 0]) + + # Condition is single int array + x = piecewise([0, 0], np.array([1, 0]), [1]) + assert_array_equal(x, [1, 0]) + + # List of conditions: int array + x = piecewise([0, 0], [np.array([1, 0])], [1]) + assert_array_equal(x, [1, 0]) + + x = piecewise([0, 0], [[False, True]], [lambda x:-1]) + assert_array_equal(x, [0, -1]) + + assert_raises_regex(ValueError, '1 or 2 functions are expected', + piecewise, [0, 0], [[False, True]], []) + assert_raises_regex(ValueError, '1 or 2 functions are expected', + piecewise, [0, 0], [[False, True]], [1, 2, 3]) + + def test_two_conditions(self): + x = piecewise([1, 2], [[True, False], [False, True]], [3, 4]) + assert_array_equal(x, [3, 4]) + + def test_scalar_domains_three_conditions(self): + x = piecewise(3, [True, False, False], [4, 2, 0]) + assert_equal(x, 4) + + def test_default(self): + # No value specified for x[1], should be 0 + x = piecewise([1, 2], [True, False], [2]) + assert_array_equal(x, [2, 0]) + + # Should set x[1] to 3 + x = piecewise([1, 2], [True, False], [2, 3]) + assert_array_equal(x, [2, 3]) + + def test_0d(self): + x = np.array(3) + y = piecewise(x, x > 3, [4, 0]) + assert_(y.ndim == 0) + assert_(y == 0) + + x = 5 + y = piecewise(x, [True, False], [1, 0]) + assert_(y.ndim == 0) + assert_(y == 1) + + # With 3 ranges (It was failing, before) + y = piecewise(x, [False, False, True], [1, 2, 3]) + assert_array_equal(y, 3) + + def test_0d_comparison(self): + x = 3 + y = piecewise(x, [x <= 3, x > 3], [4, 0]) # Should succeed. + assert_equal(y, 4) + + # With 3 ranges (It was failing, before) + x = 4 + y = piecewise(x, [x <= 3, (x > 3) * (x <= 5), x > 5], [1, 2, 3]) + assert_array_equal(y, 2) + + assert_raises_regex(ValueError, '2 or 3 functions are expected', + piecewise, x, [x <= 3, x > 3], [1]) + assert_raises_regex(ValueError, '2 or 3 functions are expected', + piecewise, x, [x <= 3, x > 3], [1, 1, 1, 1]) + + def test_0d_0d_condition(self): + x = np.array(3) + c = np.array(x > 3) + y = piecewise(x, [c], [1, 2]) + assert_equal(y, 2) + + def test_multidimensional_extrafunc(self): + x = np.array([[-2.5, -1.5, -0.5], + [0.5, 1.5, 2.5]]) + y = piecewise(x, [x < 0, x >= 2], [-1, 1, 3]) + assert_array_equal(y, np.array([[-1., -1., -1.], + [3., 3., 1.]])) + + def test_subclasses(self): + class subclass(np.ndarray): + pass + x = np.arange(5.).view(subclass) + r = piecewise(x, [x < 2., x >= 4], [-1., 1., 0.]) + assert_equal(type(r), subclass) + assert_equal(r, [-1., -1., 0., 0., 1.]) + + +class TestBincount: + + def test_simple(self): + y = np.bincount(np.arange(4)) + assert_array_equal(y, np.ones(4)) + + def test_simple2(self): + y = np.bincount(np.array([1, 5, 2, 4, 1])) + assert_array_equal(y, np.array([0, 2, 1, 0, 1, 1])) + + def test_simple_weight(self): + x = np.arange(4) + w = np.array([0.2, 0.3, 0.5, 0.1]) + y = np.bincount(x, w) + assert_array_equal(y, w) + + def test_simple_weight2(self): + x = np.array([1, 2, 4, 5, 2]) + w = np.array([0.2, 0.3, 0.5, 0.1, 0.2]) + y = np.bincount(x, w) + assert_array_equal(y, np.array([0, 0.2, 0.5, 0, 0.5, 0.1])) + + def test_with_minlength(self): + x = np.array([0, 1, 0, 1, 1]) + y = np.bincount(x, minlength=3) + assert_array_equal(y, np.array([2, 3, 0])) + x = [] + y = np.bincount(x, minlength=0) + assert_array_equal(y, np.array([])) + + def test_with_minlength_smaller_than_maxvalue(self): + x = np.array([0, 1, 1, 2, 2, 3, 3]) + y = np.bincount(x, minlength=2) + assert_array_equal(y, np.array([1, 2, 2, 2])) + y = np.bincount(x, minlength=0) + assert_array_equal(y, np.array([1, 2, 2, 2])) + + def test_with_minlength_and_weights(self): + x = np.array([1, 2, 4, 5, 2]) + w = np.array([0.2, 0.3, 0.5, 0.1, 0.2]) + y = np.bincount(x, w, 8) + assert_array_equal(y, np.array([0, 0.2, 0.5, 0, 0.5, 0.1, 0, 0])) + + def test_empty(self): + x = np.array([], dtype=int) + y = np.bincount(x) + assert_array_equal(x, y) + + def test_empty_with_minlength(self): + x = np.array([], dtype=int) + y = np.bincount(x, minlength=5) + assert_array_equal(y, np.zeros(5, dtype=int)) + + @pytest.mark.parametrize('minlength', [0, 3]) + def test_empty_list(self, minlength): + assert_array_equal(np.bincount([], minlength=minlength), + np.zeros(minlength, dtype=int)) + + def test_with_incorrect_minlength(self): + x = np.array([], dtype=int) + assert_raises_regex(TypeError, + "'str' object cannot be interpreted", + lambda: np.bincount(x, minlength="foobar")) + assert_raises_regex(ValueError, + "must not be negative", + lambda: np.bincount(x, minlength=-1)) + + x = np.arange(5) + assert_raises_regex(TypeError, + "'str' object cannot be interpreted", + lambda: np.bincount(x, minlength="foobar")) + assert_raises_regex(ValueError, + "must not be negative", + lambda: np.bincount(x, minlength=-1)) + + @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") + def test_dtype_reference_leaks(self): + # gh-6805 + intp_refcount = sys.getrefcount(np.dtype(np.intp)) + double_refcount = sys.getrefcount(np.dtype(np.double)) + + for j in range(10): + np.bincount([1, 2, 3]) + assert_equal(sys.getrefcount(np.dtype(np.intp)), intp_refcount) + assert_equal(sys.getrefcount(np.dtype(np.double)), double_refcount) + + for j in range(10): + np.bincount([1, 2, 3], [4, 5, 6]) + assert_equal(sys.getrefcount(np.dtype(np.intp)), intp_refcount) + assert_equal(sys.getrefcount(np.dtype(np.double)), double_refcount) + + @pytest.mark.parametrize("vals", [[[2, 2]], 2]) + def test_error_not_1d(self, vals): + # Test that values has to be 1-D (both as array and nested list) + vals_arr = np.asarray(vals) + with assert_raises(ValueError): + np.bincount(vals_arr) + with assert_raises(ValueError): + np.bincount(vals) + + @pytest.mark.parametrize("dt", np.typecodes["AllInteger"]) + def test_gh_28354(self, dt): + a = np.array([0, 1, 1, 3, 2, 1, 7], dtype=dt) + actual = np.bincount(a) + expected = [1, 3, 1, 1, 0, 0, 0, 1] + assert_array_equal(actual, expected) + + def test_contiguous_handling(self): + # check for absence of hard crash + np.bincount(np.arange(10000)[::2]) + + def test_gh_28354_array_like(self): + class A: + def __array__(self): + return np.array([0, 1, 1, 3, 2, 1, 7], dtype=np.uint64) + + a = A() + actual = np.bincount(a) + expected = [1, 3, 1, 1, 0, 0, 0, 1] + assert_array_equal(actual, expected) + + +class TestInterp: + + def test_exceptions(self): + assert_raises(ValueError, interp, 0, [], []) + assert_raises(ValueError, interp, 0, [0], [1, 2]) + assert_raises(ValueError, interp, 0, [0, 1], [1, 2], period=0) + assert_raises(ValueError, interp, 0, [], [], period=360) + assert_raises(ValueError, interp, 0, [0], [1, 2], period=360) + + def test_basic(self): + x = np.linspace(0, 1, 5) + y = np.linspace(0, 1, 5) + x0 = np.linspace(0, 1, 50) + assert_almost_equal(np.interp(x0, x, y), x0) + + def test_right_left_behavior(self): + # Needs range of sizes to test different code paths. + # size ==1 is special cased, 1 < size < 5 is linear search, and + # size >= 5 goes through local search and possibly binary search. + for size in range(1, 10): + xp = np.arange(size, dtype=np.double) + yp = np.ones(size, dtype=np.double) + incpts = np.array([-1, 0, size - 1, size], dtype=np.double) + decpts = incpts[::-1] + + incres = interp(incpts, xp, yp) + decres = interp(decpts, xp, yp) + inctgt = np.array([1, 1, 1, 1], dtype=float) + dectgt = inctgt[::-1] + assert_equal(incres, inctgt) + assert_equal(decres, dectgt) + + incres = interp(incpts, xp, yp, left=0) + decres = interp(decpts, xp, yp, left=0) + inctgt = np.array([0, 1, 1, 1], dtype=float) + dectgt = inctgt[::-1] + assert_equal(incres, inctgt) + assert_equal(decres, dectgt) + + incres = interp(incpts, xp, yp, right=2) + decres = interp(decpts, xp, yp, right=2) + inctgt = np.array([1, 1, 1, 2], dtype=float) + dectgt = inctgt[::-1] + assert_equal(incres, inctgt) + assert_equal(decres, dectgt) + + incres = interp(incpts, xp, yp, left=0, right=2) + decres = interp(decpts, xp, yp, left=0, right=2) + inctgt = np.array([0, 1, 1, 2], dtype=float) + dectgt = inctgt[::-1] + assert_equal(incres, inctgt) + assert_equal(decres, dectgt) + + def test_scalar_interpolation_point(self): + x = np.linspace(0, 1, 5) + y = np.linspace(0, 1, 5) + x0 = 0 + assert_almost_equal(np.interp(x0, x, y), x0) + x0 = .3 + assert_almost_equal(np.interp(x0, x, y), x0) + x0 = np.float32(.3) + assert_almost_equal(np.interp(x0, x, y), x0) + x0 = np.float64(.3) + assert_almost_equal(np.interp(x0, x, y), x0) + x0 = np.nan + assert_almost_equal(np.interp(x0, x, y), x0) + + def test_non_finite_behavior_exact_x(self): + x = [1, 2, 2.5, 3, 4] + xp = [1, 2, 3, 4] + fp = [1, 2, np.inf, 4] + assert_almost_equal(np.interp(x, xp, fp), [1, 2, np.inf, np.inf, 4]) + fp = [1, 2, np.nan, 4] + assert_almost_equal(np.interp(x, xp, fp), [1, 2, np.nan, np.nan, 4]) + + @pytest.fixture(params=[ + np.float64, + lambda x: _make_complex(x, 0), + lambda x: _make_complex(0, x), + lambda x: _make_complex(x, np.multiply(x, -2)) + ], ids=[ + 'real', + 'complex-real', + 'complex-imag', + 'complex-both' + ]) + def sc(self, request): + """ scale function used by the below tests """ + return request.param + + def test_non_finite_any_nan(self, sc): + """ test that nans are propagated """ + assert_equal(np.interp(0.5, [np.nan, 1], sc([ 0, 10])), sc(np.nan)) + assert_equal(np.interp(0.5, [ 0, np.nan], sc([ 0, 10])), sc(np.nan)) + assert_equal(np.interp(0.5, [ 0, 1], sc([np.nan, 10])), sc(np.nan)) + assert_equal(np.interp(0.5, [ 0, 1], sc([ 0, np.nan])), sc(np.nan)) + + def test_non_finite_inf(self, sc): + """ Test that interp between opposite infs gives nan """ + assert_equal(np.interp(0.5, [-np.inf, +np.inf], sc([ 0, 10])), sc(np.nan)) + assert_equal(np.interp(0.5, [ 0, 1], sc([-np.inf, +np.inf])), sc(np.nan)) + assert_equal(np.interp(0.5, [ 0, 1], sc([+np.inf, -np.inf])), sc(np.nan)) + + # unless the y values are equal + assert_equal(np.interp(0.5, [-np.inf, +np.inf], sc([ 10, 10])), sc(10)) + + def test_non_finite_half_inf_xf(self, sc): + """ Test that interp where both axes have a bound at inf gives nan """ + assert_equal(np.interp(0.5, [-np.inf, 1], sc([-np.inf, 10])), sc(np.nan)) + assert_equal(np.interp(0.5, [-np.inf, 1], sc([+np.inf, 10])), sc(np.nan)) + assert_equal(np.interp(0.5, [-np.inf, 1], sc([ 0, -np.inf])), sc(np.nan)) + assert_equal(np.interp(0.5, [-np.inf, 1], sc([ 0, +np.inf])), sc(np.nan)) + assert_equal(np.interp(0.5, [ 0, +np.inf], sc([-np.inf, 10])), sc(np.nan)) + assert_equal(np.interp(0.5, [ 0, +np.inf], sc([+np.inf, 10])), sc(np.nan)) + assert_equal(np.interp(0.5, [ 0, +np.inf], sc([ 0, -np.inf])), sc(np.nan)) + assert_equal(np.interp(0.5, [ 0, +np.inf], sc([ 0, +np.inf])), sc(np.nan)) + + def test_non_finite_half_inf_x(self, sc): + """ Test interp where the x axis has a bound at inf """ + assert_equal(np.interp(0.5, [-np.inf, -np.inf], sc([0, 10])), sc(10)) + assert_equal(np.interp(0.5, [-np.inf, 1 ], sc([0, 10])), sc(10)) # noqa: E202 + assert_equal(np.interp(0.5, [ 0, +np.inf], sc([0, 10])), sc(0)) + assert_equal(np.interp(0.5, [+np.inf, +np.inf], sc([0, 10])), sc(0)) + + def test_non_finite_half_inf_f(self, sc): + """ Test interp where the f axis has a bound at inf """ + assert_equal(np.interp(0.5, [0, 1], sc([ 0, -np.inf])), sc(-np.inf)) + assert_equal(np.interp(0.5, [0, 1], sc([ 0, +np.inf])), sc(+np.inf)) + assert_equal(np.interp(0.5, [0, 1], sc([-np.inf, 10])), sc(-np.inf)) + assert_equal(np.interp(0.5, [0, 1], sc([+np.inf, 10])), sc(+np.inf)) + assert_equal(np.interp(0.5, [0, 1], sc([-np.inf, -np.inf])), sc(-np.inf)) + assert_equal(np.interp(0.5, [0, 1], sc([+np.inf, +np.inf])), sc(+np.inf)) + + def test_complex_interp(self): + # test complex interpolation + x = np.linspace(0, 1, 5) + y = np.linspace(0, 1, 5) + (1 + np.linspace(0, 1, 5)) * 1.0j + x0 = 0.3 + y0 = x0 + (1 + x0) * 1.0j + assert_almost_equal(np.interp(x0, x, y), y0) + # test complex left and right + x0 = -1 + left = 2 + 3.0j + assert_almost_equal(np.interp(x0, x, y, left=left), left) + x0 = 2.0 + right = 2 + 3.0j + assert_almost_equal(np.interp(x0, x, y, right=right), right) + # test complex non finite + x = [1, 2, 2.5, 3, 4] + xp = [1, 2, 3, 4] + fp = [1, 2 + 1j, np.inf, 4] + y = [1, 2 + 1j, np.inf + 0.5j, np.inf, 4] + assert_almost_equal(np.interp(x, xp, fp), y) + # test complex periodic + x = [-180, -170, -185, 185, -10, -5, 0, 365] + xp = [190, -190, 350, -350] + fp = [5 + 1.0j, 10 + 2j, 3 + 3j, 4 + 4j] + y = [7.5 + 1.5j, 5. + 1.0j, 8.75 + 1.75j, 6.25 + 1.25j, 3. + 3j, 3.25 + 3.25j, + 3.5 + 3.5j, 3.75 + 3.75j] + assert_almost_equal(np.interp(x, xp, fp, period=360), y) + + def test_zero_dimensional_interpolation_point(self): + x = np.linspace(0, 1, 5) + y = np.linspace(0, 1, 5) + x0 = np.array(.3) + assert_almost_equal(np.interp(x0, x, y), x0) + + xp = np.array([0, 2, 4]) + fp = np.array([1, -1, 1]) + + actual = np.interp(np.array(1), xp, fp) + assert_equal(actual, 0) + assert_(isinstance(actual, np.float64)) + + actual = np.interp(np.array(4.5), xp, fp, period=4) + assert_equal(actual, 0.5) + assert_(isinstance(actual, np.float64)) + + def test_if_len_x_is_small(self): + xp = np.arange(0, 10, 0.0001) + fp = np.sin(xp) + assert_almost_equal(np.interp(np.pi, xp, fp), 0.0) + + def test_period(self): + x = [-180, -170, -185, 185, -10, -5, 0, 365] + xp = [190, -190, 350, -350] + fp = [5, 10, 3, 4] + y = [7.5, 5., 8.75, 6.25, 3., 3.25, 3.5, 3.75] + assert_almost_equal(np.interp(x, xp, fp, period=360), y) + x = np.array(x, order='F').reshape(2, -1) + y = np.array(y, order='C').reshape(2, -1) + assert_almost_equal(np.interp(x, xp, fp, period=360), y) + + +class TestPercentile: + + def test_basic(self): + x = np.arange(8) * 0.5 + assert_equal(np.percentile(x, 0), 0.) + assert_equal(np.percentile(x, 100), 3.5) + assert_equal(np.percentile(x, 50), 1.75) + x[1] = np.nan + assert_equal(np.percentile(x, 0), np.nan) + assert_equal(np.percentile(x, 0, method='nearest'), np.nan) + assert_equal(np.percentile(x, 0, method='inverted_cdf'), np.nan) + assert_equal( + np.percentile(x, 0, method='inverted_cdf', + weights=np.ones_like(x)), + np.nan, + ) + + def test_fraction(self): + x = [Fraction(i, 2) for i in range(8)] + + p = np.percentile(x, Fraction(0)) + assert_equal(p, Fraction(0)) + assert_equal(type(p), Fraction) + + p = np.percentile(x, Fraction(100)) + assert_equal(p, Fraction(7, 2)) + assert_equal(type(p), Fraction) + + p = np.percentile(x, Fraction(50)) + assert_equal(p, Fraction(7, 4)) + assert_equal(type(p), Fraction) + + p = np.percentile(x, [Fraction(50)]) + assert_equal(p, np.array([Fraction(7, 4)])) + assert_equal(type(p), np.ndarray) + + def test_api(self): + d = np.ones(5) + np.percentile(d, 5, None, None, False) + np.percentile(d, 5, None, None, False, 'linear') + o = np.ones((1,)) + np.percentile(d, 5, None, o, False, 'linear') + + def test_complex(self): + arr_c = np.array([0.5 + 3.0j, 2.1 + 0.5j, 1.6 + 2.3j], dtype='G') + assert_raises(TypeError, np.percentile, arr_c, 0.5) + arr_c = np.array([0.5 + 3.0j, 2.1 + 0.5j, 1.6 + 2.3j], dtype='D') + assert_raises(TypeError, np.percentile, arr_c, 0.5) + arr_c = np.array([0.5 + 3.0j, 2.1 + 0.5j, 1.6 + 2.3j], dtype='F') + assert_raises(TypeError, np.percentile, arr_c, 0.5) + + def test_2D(self): + x = np.array([[1, 1, 1], + [1, 1, 1], + [4, 4, 3], + [1, 1, 1], + [1, 1, 1]]) + assert_array_equal(np.percentile(x, 50, axis=0), [1, 1, 1]) + + @pytest.mark.parametrize("dtype", np.typecodes["Float"]) + def test_linear_nan_1D(self, dtype): + # METHOD 1 of H&F + arr = np.asarray([15.0, np.nan, 35.0, 40.0, 50.0], dtype=dtype) + res = np.percentile( + arr, + 40.0, + method="linear") + np.testing.assert_equal(res, np.nan) + np.testing.assert_equal(res.dtype, arr.dtype) + + H_F_TYPE_CODES = [(int_type, np.float64) + for int_type in np.typecodes["AllInteger"] + ] + [(np.float16, np.float16), + (np.float32, np.float32), + (np.float64, np.float64), + (np.longdouble, np.longdouble), + (np.dtype("O"), np.float64)] + + @pytest.mark.parametrize(["function", "quantile"], + [(np.quantile, 0.4), + (np.percentile, 40.0)]) + @pytest.mark.parametrize(["input_dtype", "expected_dtype"], H_F_TYPE_CODES) + @pytest.mark.parametrize(["method", "weighted", "expected"], + [("inverted_cdf", False, 20), + ("inverted_cdf", True, 20), + ("averaged_inverted_cdf", False, 27.5), + ("closest_observation", False, 20), + ("interpolated_inverted_cdf", False, 20), + ("hazen", False, 27.5), + ("weibull", False, 26), + ("linear", False, 29), + ("median_unbiased", False, 27), + ("normal_unbiased", False, 27.125), + ]) + def test_linear_interpolation(self, + function, + quantile, + method, + weighted, + expected, + input_dtype, + expected_dtype): + expected_dtype = np.dtype(expected_dtype) + + arr = np.asarray([15.0, 20.0, 35.0, 40.0, 50.0], dtype=input_dtype) + weights = np.ones_like(arr) if weighted else None + if input_dtype is np.longdouble: + if function is np.quantile: + # 0.4 is not exactly representable and it matters + # for "averaged_inverted_cdf", so we need to cheat. + quantile = input_dtype("0.4") + # We want to use nulp, but that does not work for longdouble + test_function = np.testing.assert_almost_equal + else: + test_function = np.testing.assert_array_almost_equal_nulp + + actual = function(arr, quantile, method=method, weights=weights) + + test_function(actual, expected_dtype.type(expected)) + + if method in ["inverted_cdf", "closest_observation"]: + if input_dtype == "O": + np.testing.assert_equal(np.asarray(actual).dtype, np.float64) + else: + np.testing.assert_equal(np.asarray(actual).dtype, + np.dtype(input_dtype)) + else: + np.testing.assert_equal(np.asarray(actual).dtype, + np.dtype(expected_dtype)) + + TYPE_CODES = np.typecodes["AllInteger"] + np.typecodes["Float"] + "O" + + @pytest.mark.parametrize("dtype", TYPE_CODES) + def test_lower_higher(self, dtype): + assert_equal(np.percentile(np.arange(10, dtype=dtype), 50, + method='lower'), 4) + assert_equal(np.percentile(np.arange(10, dtype=dtype), 50, + method='higher'), 5) + + @pytest.mark.parametrize("dtype", TYPE_CODES) + def test_midpoint(self, dtype): + assert_equal(np.percentile(np.arange(10, dtype=dtype), 51, + method='midpoint'), 4.5) + assert_equal(np.percentile(np.arange(9, dtype=dtype) + 1, 50, + method='midpoint'), 5) + assert_equal(np.percentile(np.arange(11, dtype=dtype), 51, + method='midpoint'), 5.5) + assert_equal(np.percentile(np.arange(11, dtype=dtype), 50, + method='midpoint'), 5) + + @pytest.mark.parametrize("dtype", TYPE_CODES) + def test_nearest(self, dtype): + assert_equal(np.percentile(np.arange(10, dtype=dtype), 51, + method='nearest'), 5) + assert_equal(np.percentile(np.arange(10, dtype=dtype), 49, + method='nearest'), 4) + + def test_linear_interpolation_extrapolation(self): + arr = np.random.rand(5) + + actual = np.percentile(arr, 100) + np.testing.assert_equal(actual, arr.max()) + + actual = np.percentile(arr, 0) + np.testing.assert_equal(actual, arr.min()) + + def test_sequence(self): + x = np.arange(8) * 0.5 + assert_equal(np.percentile(x, [0, 100, 50]), [0, 3.5, 1.75]) + + def test_axis(self): + x = np.arange(12).reshape(3, 4) + + assert_equal(np.percentile(x, (25, 50, 100)), [2.75, 5.5, 11.0]) + + r0 = [[2, 3, 4, 5], [4, 5, 6, 7], [8, 9, 10, 11]] + assert_equal(np.percentile(x, (25, 50, 100), axis=0), r0) + + r1 = [[0.75, 1.5, 3], [4.75, 5.5, 7], [8.75, 9.5, 11]] + assert_equal(np.percentile(x, (25, 50, 100), axis=1), np.array(r1).T) + + # ensure qth axis is always first as with np.array(old_percentile(..)) + x = np.arange(3 * 4 * 5 * 6).reshape(3, 4, 5, 6) + assert_equal(np.percentile(x, (25, 50)).shape, (2,)) + assert_equal(np.percentile(x, (25, 50, 75)).shape, (3,)) + assert_equal(np.percentile(x, (25, 50), axis=0).shape, (2, 4, 5, 6)) + assert_equal(np.percentile(x, (25, 50), axis=1).shape, (2, 3, 5, 6)) + assert_equal(np.percentile(x, (25, 50), axis=2).shape, (2, 3, 4, 6)) + assert_equal(np.percentile(x, (25, 50), axis=3).shape, (2, 3, 4, 5)) + assert_equal( + np.percentile(x, (25, 50, 75), axis=1).shape, (3, 3, 5, 6)) + assert_equal(np.percentile(x, (25, 50), + method="higher").shape, (2,)) + assert_equal(np.percentile(x, (25, 50, 75), + method="higher").shape, (3,)) + assert_equal(np.percentile(x, (25, 50), axis=0, + method="higher").shape, (2, 4, 5, 6)) + assert_equal(np.percentile(x, (25, 50), axis=1, + method="higher").shape, (2, 3, 5, 6)) + assert_equal(np.percentile(x, (25, 50), axis=2, + method="higher").shape, (2, 3, 4, 6)) + assert_equal(np.percentile(x, (25, 50), axis=3, + method="higher").shape, (2, 3, 4, 5)) + assert_equal(np.percentile(x, (25, 50, 75), axis=1, + method="higher").shape, (3, 3, 5, 6)) + + def test_scalar_q(self): + # test for no empty dimensions for compatibility with old percentile + x = np.arange(12).reshape(3, 4) + assert_equal(np.percentile(x, 50), 5.5) + assert_(np.isscalar(np.percentile(x, 50))) + r0 = np.array([4., 5., 6., 7.]) + assert_equal(np.percentile(x, 50, axis=0), r0) + assert_equal(np.percentile(x, 50, axis=0).shape, r0.shape) + r1 = np.array([1.5, 5.5, 9.5]) + assert_almost_equal(np.percentile(x, 50, axis=1), r1) + assert_equal(np.percentile(x, 50, axis=1).shape, r1.shape) + + out = np.empty(1) + assert_equal(np.percentile(x, 50, out=out), 5.5) + assert_equal(out, 5.5) + out = np.empty(4) + assert_equal(np.percentile(x, 50, axis=0, out=out), r0) + assert_equal(out, r0) + out = np.empty(3) + assert_equal(np.percentile(x, 50, axis=1, out=out), r1) + assert_equal(out, r1) + + # test for no empty dimensions for compatibility with old percentile + x = np.arange(12).reshape(3, 4) + assert_equal(np.percentile(x, 50, method='lower'), 5.) + assert_(np.isscalar(np.percentile(x, 50))) + r0 = np.array([4., 5., 6., 7.]) + c0 = np.percentile(x, 50, method='lower', axis=0) + assert_equal(c0, r0) + assert_equal(c0.shape, r0.shape) + r1 = np.array([1., 5., 9.]) + c1 = np.percentile(x, 50, method='lower', axis=1) + assert_almost_equal(c1, r1) + assert_equal(c1.shape, r1.shape) + + out = np.empty((), dtype=x.dtype) + c = np.percentile(x, 50, method='lower', out=out) + assert_equal(c, 5) + assert_equal(out, 5) + out = np.empty(4, dtype=x.dtype) + c = np.percentile(x, 50, method='lower', axis=0, out=out) + assert_equal(c, r0) + assert_equal(out, r0) + out = np.empty(3, dtype=x.dtype) + c = np.percentile(x, 50, method='lower', axis=1, out=out) + assert_equal(c, r1) + assert_equal(out, r1) + + def test_exception(self): + assert_raises(ValueError, np.percentile, [1, 2], 56, + method='foobar') + assert_raises(ValueError, np.percentile, [1], 101) + assert_raises(ValueError, np.percentile, [1], -1) + assert_raises(ValueError, np.percentile, [1], list(range(50)) + [101]) + assert_raises(ValueError, np.percentile, [1], list(range(50)) + [-0.1]) + + def test_percentile_list(self): + assert_equal(np.percentile([1, 2, 3], 0), 1) + + @pytest.mark.parametrize( + "percentile, with_weights", + [ + (np.percentile, False), + (partial(np.percentile, method="inverted_cdf"), True), + ] + ) + def test_percentile_out(self, percentile, with_weights): + out_dtype = int if with_weights else float + x = np.array([1, 2, 3]) + y = np.zeros((3,), dtype=out_dtype) + p = (1, 2, 3) + weights = np.ones_like(x) if with_weights else None + r = percentile(x, p, out=y, weights=weights) + assert r is y + assert_equal(percentile(x, p, weights=weights), y) + + x = np.array([[1, 2, 3], + [4, 5, 6]]) + y = np.zeros((3, 3), dtype=out_dtype) + weights = np.ones_like(x) if with_weights else None + r = percentile(x, p, axis=0, out=y, weights=weights) + assert r is y + assert_equal(percentile(x, p, weights=weights, axis=0), y) + + y = np.zeros((3, 2), dtype=out_dtype) + percentile(x, p, axis=1, out=y, weights=weights) + assert_equal(percentile(x, p, weights=weights, axis=1), y) + + x = np.arange(12).reshape(3, 4) + # q.dim > 1, float + if with_weights: + r0 = np.array([[0, 1, 2, 3], [4, 5, 6, 7]]) + else: + r0 = np.array([[2., 3., 4., 5.], [4., 5., 6., 7.]]) + out = np.empty((2, 4), dtype=out_dtype) + weights = np.ones_like(x) if with_weights else None + assert_equal( + percentile(x, (25, 50), axis=0, out=out, weights=weights), r0 + ) + assert_equal(out, r0) + r1 = np.array([[0.75, 4.75, 8.75], [1.5, 5.5, 9.5]]) + out = np.empty((2, 3)) + assert_equal(np.percentile(x, (25, 50), axis=1, out=out), r1) + assert_equal(out, r1) + + # q.dim > 1, int + r0 = np.array([[0, 1, 2, 3], [4, 5, 6, 7]]) + out = np.empty((2, 4), dtype=x.dtype) + c = np.percentile(x, (25, 50), method='lower', axis=0, out=out) + assert_equal(c, r0) + assert_equal(out, r0) + r1 = np.array([[0, 4, 8], [1, 5, 9]]) + out = np.empty((2, 3), dtype=x.dtype) + c = np.percentile(x, (25, 50), method='lower', axis=1, out=out) + assert_equal(c, r1) + assert_equal(out, r1) + + def test_percentile_empty_dim(self): + # empty dims are preserved + d = np.arange(11 * 2).reshape(11, 1, 2, 1) + assert_array_equal(np.percentile(d, 50, axis=0).shape, (1, 2, 1)) + assert_array_equal(np.percentile(d, 50, axis=1).shape, (11, 2, 1)) + assert_array_equal(np.percentile(d, 50, axis=2).shape, (11, 1, 1)) + assert_array_equal(np.percentile(d, 50, axis=3).shape, (11, 1, 2)) + assert_array_equal(np.percentile(d, 50, axis=-1).shape, (11, 1, 2)) + assert_array_equal(np.percentile(d, 50, axis=-2).shape, (11, 1, 1)) + assert_array_equal(np.percentile(d, 50, axis=-3).shape, (11, 2, 1)) + assert_array_equal(np.percentile(d, 50, axis=-4).shape, (1, 2, 1)) + + assert_array_equal(np.percentile(d, 50, axis=2, + method='midpoint').shape, + (11, 1, 1)) + assert_array_equal(np.percentile(d, 50, axis=-2, + method='midpoint').shape, + (11, 1, 1)) + + assert_array_equal(np.array(np.percentile(d, [10, 50], axis=0)).shape, + (2, 1, 2, 1)) + assert_array_equal(np.array(np.percentile(d, [10, 50], axis=1)).shape, + (2, 11, 2, 1)) + assert_array_equal(np.array(np.percentile(d, [10, 50], axis=2)).shape, + (2, 11, 1, 1)) + assert_array_equal(np.array(np.percentile(d, [10, 50], axis=3)).shape, + (2, 11, 1, 2)) + + def test_percentile_no_overwrite(self): + a = np.array([2, 3, 4, 1]) + np.percentile(a, [50], overwrite_input=False) + assert_equal(a, np.array([2, 3, 4, 1])) + + a = np.array([2, 3, 4, 1]) + np.percentile(a, [50]) + assert_equal(a, np.array([2, 3, 4, 1])) + + def test_no_p_overwrite(self): + p = np.linspace(0., 100., num=5) + np.percentile(np.arange(100.), p, method="midpoint") + assert_array_equal(p, np.linspace(0., 100., num=5)) + p = np.linspace(0., 100., num=5).tolist() + np.percentile(np.arange(100.), p, method="midpoint") + assert_array_equal(p, np.linspace(0., 100., num=5).tolist()) + + def test_percentile_overwrite(self): + a = np.array([2, 3, 4, 1]) + b = np.percentile(a, [50], overwrite_input=True) + assert_equal(b, np.array([2.5])) + + b = np.percentile([2, 3, 4, 1], [50], overwrite_input=True) + assert_equal(b, np.array([2.5])) + + def test_extended_axis(self): + o = np.random.normal(size=(71, 23)) + x = np.dstack([o] * 10) + assert_equal(np.percentile(x, 30, axis=(0, 1)), np.percentile(o, 30)) + x = np.moveaxis(x, -1, 0) + assert_equal(np.percentile(x, 30, axis=(-2, -1)), np.percentile(o, 30)) + x = x.swapaxes(0, 1).copy() + assert_equal(np.percentile(x, 30, axis=(0, -1)), np.percentile(o, 30)) + x = x.swapaxes(0, 1).copy() + + assert_equal(np.percentile(x, [25, 60], axis=(0, 1, 2)), + np.percentile(x, [25, 60], axis=None)) + assert_equal(np.percentile(x, [25, 60], axis=(0,)), + np.percentile(x, [25, 60], axis=0)) + + d = np.arange(3 * 5 * 7 * 11).reshape((3, 5, 7, 11)) + np.random.shuffle(d.ravel()) + assert_equal(np.percentile(d, 25, axis=(0, 1, 2))[0], + np.percentile(d[:, :, :, 0].flatten(), 25)) + assert_equal(np.percentile(d, [10, 90], axis=(0, 1, 3))[:, 1], + np.percentile(d[:, :, 1, :].flatten(), [10, 90])) + assert_equal(np.percentile(d, 25, axis=(3, 1, -4))[2], + np.percentile(d[:, :, 2, :].flatten(), 25)) + assert_equal(np.percentile(d, 25, axis=(3, 1, 2))[2], + np.percentile(d[2, :, :, :].flatten(), 25)) + assert_equal(np.percentile(d, 25, axis=(3, 2))[2, 1], + np.percentile(d[2, 1, :, :].flatten(), 25)) + assert_equal(np.percentile(d, 25, axis=(1, -2))[2, 1], + np.percentile(d[2, :, :, 1].flatten(), 25)) + assert_equal(np.percentile(d, 25, axis=(1, 3))[2, 2], + np.percentile(d[2, :, 2, :].flatten(), 25)) + + def test_extended_axis_invalid(self): + d = np.ones((3, 5, 7, 11)) + assert_raises(AxisError, np.percentile, d, axis=-5, q=25) + assert_raises(AxisError, np.percentile, d, axis=(0, -5), q=25) + assert_raises(AxisError, np.percentile, d, axis=4, q=25) + assert_raises(AxisError, np.percentile, d, axis=(0, 4), q=25) + # each of these refers to the same axis twice + assert_raises(ValueError, np.percentile, d, axis=(1, 1), q=25) + assert_raises(ValueError, np.percentile, d, axis=(-1, -1), q=25) + assert_raises(ValueError, np.percentile, d, axis=(3, -1), q=25) + + def test_keepdims(self): + d = np.ones((3, 5, 7, 11)) + assert_equal(np.percentile(d, 7, axis=None, keepdims=True).shape, + (1, 1, 1, 1)) + assert_equal(np.percentile(d, 7, axis=(0, 1), keepdims=True).shape, + (1, 1, 7, 11)) + assert_equal(np.percentile(d, 7, axis=(0, 3), keepdims=True).shape, + (1, 5, 7, 1)) + assert_equal(np.percentile(d, 7, axis=(1,), keepdims=True).shape, + (3, 1, 7, 11)) + assert_equal(np.percentile(d, 7, (0, 1, 2, 3), keepdims=True).shape, + (1, 1, 1, 1)) + assert_equal(np.percentile(d, 7, axis=(0, 1, 3), keepdims=True).shape, + (1, 1, 7, 1)) + + assert_equal(np.percentile(d, [1, 7], axis=(0, 1, 3), + keepdims=True).shape, (2, 1, 1, 7, 1)) + assert_equal(np.percentile(d, [1, 7], axis=(0, 3), + keepdims=True).shape, (2, 1, 5, 7, 1)) + + @pytest.mark.parametrize('q', [7, [1, 7]]) + @pytest.mark.parametrize( + argnames='axis', + argvalues=[ + None, + 1, + (1,), + (0, 1), + (-3, -1), + ] + ) + def test_keepdims_out(self, q, axis): + d = np.ones((3, 5, 7, 11)) + if axis is None: + shape_out = (1,) * d.ndim + else: + axis_norm = normalize_axis_tuple(axis, d.ndim) + shape_out = tuple( + 1 if i in axis_norm else d.shape[i] for i in range(d.ndim)) + shape_out = np.shape(q) + shape_out + + out = np.empty(shape_out) + result = np.percentile(d, q, axis=axis, keepdims=True, out=out) + assert result is out + assert_equal(result.shape, shape_out) + + def test_out(self): + o = np.zeros((4,)) + d = np.ones((3, 4)) + assert_equal(np.percentile(d, 0, 0, out=o), o) + assert_equal(np.percentile(d, 0, 0, method='nearest', out=o), o) + o = np.zeros((3,)) + assert_equal(np.percentile(d, 1, 1, out=o), o) + assert_equal(np.percentile(d, 1, 1, method='nearest', out=o), o) + + o = np.zeros(()) + assert_equal(np.percentile(d, 2, out=o), o) + assert_equal(np.percentile(d, 2, method='nearest', out=o), o) + + @pytest.mark.parametrize("method, weighted", [ + ("linear", False), + ("nearest", False), + ("inverted_cdf", False), + ("inverted_cdf", True), + ]) + def test_out_nan(self, method, weighted): + if weighted: + kwargs = {"weights": np.ones((3, 4)), "method": method} + else: + kwargs = {"method": method} + with warnings.catch_warnings(record=True): + warnings.filterwarnings('always', '', RuntimeWarning) + o = np.zeros((4,)) + d = np.ones((3, 4)) + d[2, 1] = np.nan + assert_equal(np.percentile(d, 0, 0, out=o, **kwargs), o) + + o = np.zeros((3,)) + assert_equal(np.percentile(d, 1, 1, out=o, **kwargs), o) + + o = np.zeros(()) + assert_equal(np.percentile(d, 1, out=o, **kwargs), o) + + def test_nan_behavior(self): + a = np.arange(24, dtype=float) + a[2] = np.nan + assert_equal(np.percentile(a, 0.3), np.nan) + assert_equal(np.percentile(a, 0.3, axis=0), np.nan) + assert_equal(np.percentile(a, [0.3, 0.6], axis=0), + np.array([np.nan] * 2)) + + a = np.arange(24, dtype=float).reshape(2, 3, 4) + a[1, 2, 3] = np.nan + a[1, 1, 2] = np.nan + + # no axis + assert_equal(np.percentile(a, 0.3), np.nan) + assert_equal(np.percentile(a, 0.3).ndim, 0) + + # axis0 zerod + b = np.percentile(np.arange(24, dtype=float).reshape(2, 3, 4), 0.3, 0) + b[2, 3] = np.nan + b[1, 2] = np.nan + assert_equal(np.percentile(a, 0.3, 0), b) + + # axis0 not zerod + b = np.percentile(np.arange(24, dtype=float).reshape(2, 3, 4), + [0.3, 0.6], 0) + b[:, 2, 3] = np.nan + b[:, 1, 2] = np.nan + assert_equal(np.percentile(a, [0.3, 0.6], 0), b) + + # axis1 zerod + b = np.percentile(np.arange(24, dtype=float).reshape(2, 3, 4), 0.3, 1) + b[1, 3] = np.nan + b[1, 2] = np.nan + assert_equal(np.percentile(a, 0.3, 1), b) + # axis1 not zerod + b = np.percentile( + np.arange(24, dtype=float).reshape(2, 3, 4), [0.3, 0.6], 1) + b[:, 1, 3] = np.nan + b[:, 1, 2] = np.nan + assert_equal(np.percentile(a, [0.3, 0.6], 1), b) + + # axis02 zerod + b = np.percentile( + np.arange(24, dtype=float).reshape(2, 3, 4), 0.3, (0, 2)) + b[1] = np.nan + b[2] = np.nan + assert_equal(np.percentile(a, 0.3, (0, 2)), b) + # axis02 not zerod + b = np.percentile(np.arange(24, dtype=float).reshape(2, 3, 4), + [0.3, 0.6], (0, 2)) + b[:, 1] = np.nan + b[:, 2] = np.nan + assert_equal(np.percentile(a, [0.3, 0.6], (0, 2)), b) + # axis02 not zerod with method='nearest' + b = np.percentile(np.arange(24, dtype=float).reshape(2, 3, 4), + [0.3, 0.6], (0, 2), method='nearest') + b[:, 1] = np.nan + b[:, 2] = np.nan + assert_equal(np.percentile( + a, [0.3, 0.6], (0, 2), method='nearest'), b) + + def test_nan_q(self): + # GH18830 + with pytest.raises(ValueError, match="Percentiles must be in"): + np.percentile([1, 2, 3, 4.0], np.nan) + with pytest.raises(ValueError, match="Percentiles must be in"): + np.percentile([1, 2, 3, 4.0], [np.nan]) + q = np.linspace(1.0, 99.0, 16) + q[0] = np.nan + with pytest.raises(ValueError, match="Percentiles must be in"): + np.percentile([1, 2, 3, 4.0], q) + + @pytest.mark.parametrize("dtype", ["m8[D]", "M8[s]"]) + @pytest.mark.parametrize("pos", [0, 23, 10]) + def test_nat_basic(self, dtype, pos): + # TODO: Note that times have dubious rounding as of fixing NaTs! + # NaT and NaN should behave the same, do basic tests for NaT: + a = np.arange(0, 24, dtype=dtype) + a[pos] = "NaT" + res = np.percentile(a, 30) + assert res.dtype == dtype + assert np.isnat(res) + res = np.percentile(a, [30, 60]) + assert res.dtype == dtype + assert np.isnat(res).all() + + a = np.arange(0, 24 * 3, dtype=dtype).reshape(-1, 3) + a[pos, 1] = "NaT" + res = np.percentile(a, 30, axis=0) + assert_array_equal(np.isnat(res), [False, True, False]) + + +quantile_methods = [ + 'inverted_cdf', 'averaged_inverted_cdf', 'closest_observation', + 'interpolated_inverted_cdf', 'hazen', 'weibull', 'linear', + 'median_unbiased', 'normal_unbiased', 'nearest', 'lower', 'higher', + 'midpoint'] + + +methods_supporting_weights = ["inverted_cdf"] + + +class TestQuantile: + # most of this is already tested by TestPercentile + + def V(self, x, y, alpha): + # Identification function used in several tests. + return (x >= y) - alpha + + def test_max_ulp(self): + x = [0.0, 0.2, 0.4] + a = np.quantile(x, 0.45) + # The default linear method would result in 0 + 0.2 * (0.45/2) = 0.18. + # 0.18 is not exactly representable and the formula leads to a 1 ULP + # different result. Ensure it is this exact within 1 ULP, see gh-20331. + np.testing.assert_array_max_ulp(a, 0.18, maxulp=1) + + def test_basic(self): + x = np.arange(8) * 0.5 + assert_equal(np.quantile(x, 0), 0.) + assert_equal(np.quantile(x, 1), 3.5) + assert_equal(np.quantile(x, 0.5), 1.75) + + def test_correct_quantile_value(self): + a = np.array([True]) + tf_quant = np.quantile(True, False) + assert_equal(tf_quant, a[0]) + assert_equal(type(tf_quant), a.dtype) + a = np.array([False, True, True]) + quant_res = np.quantile(a, a) + assert_array_equal(quant_res, a) + assert_equal(quant_res.dtype, a.dtype) + + def test_fraction(self): + # fractional input, integral quantile + x = [Fraction(i, 2) for i in range(8)] + q = np.quantile(x, 0) + assert_equal(q, 0) + assert_equal(type(q), Fraction) + + q = np.quantile(x, 1) + assert_equal(q, Fraction(7, 2)) + assert_equal(type(q), Fraction) + + q = np.quantile(x, .5) + assert_equal(q, 1.75) + assert_equal(type(q), np.float64) + + q = np.quantile(x, Fraction(1, 2)) + assert_equal(q, Fraction(7, 4)) + assert_equal(type(q), Fraction) + + q = np.quantile(x, [Fraction(1, 2)]) + assert_equal(q, np.array([Fraction(7, 4)])) + assert_equal(type(q), np.ndarray) + + q = np.quantile(x, [[Fraction(1, 2)]]) + assert_equal(q, np.array([[Fraction(7, 4)]])) + assert_equal(type(q), np.ndarray) + + # repeat with integral input but fractional quantile + x = np.arange(8) + assert_equal(np.quantile(x, Fraction(1, 2)), Fraction(7, 2)) + + def test_complex(self): + # gh-22652 + arr_c = np.array([0.5 + 3.0j, 2.1 + 0.5j, 1.6 + 2.3j], dtype='G') + assert_raises(TypeError, np.quantile, arr_c, 0.5) + arr_c = np.array([0.5 + 3.0j, 2.1 + 0.5j, 1.6 + 2.3j], dtype='D') + assert_raises(TypeError, np.quantile, arr_c, 0.5) + arr_c = np.array([0.5 + 3.0j, 2.1 + 0.5j, 1.6 + 2.3j], dtype='F') + assert_raises(TypeError, np.quantile, arr_c, 0.5) + + def test_no_p_overwrite(self): + # this is worth retesting, because quantile does not make a copy + p0 = np.array([0, 0.75, 0.25, 0.5, 1.0]) + p = p0.copy() + np.quantile(np.arange(100.), p, method="midpoint") + assert_array_equal(p, p0) + + p0 = p0.tolist() + p = p.tolist() + np.quantile(np.arange(100.), p, method="midpoint") + assert_array_equal(p, p0) + + @pytest.mark.parametrize("dtype", np.typecodes["AllInteger"]) + def test_quantile_preserve_int_type(self, dtype): + res = np.quantile(np.array([1, 2], dtype=dtype), [0.5], + method="nearest") + assert res.dtype == dtype + + @pytest.mark.parametrize("method", quantile_methods) + def test_q_zero_one(self, method): + # gh-24710 + arr = [10, 11, 12] + quantile = np.quantile(arr, q=[0, 1], method=method) + assert_equal(quantile, np.array([10, 12])) + + @pytest.mark.parametrize("method", quantile_methods) + def test_quantile_monotonic(self, method): + # GH 14685 + # test that the return value of quantile is monotonic if p0 is ordered + # Also tests that the boundary values are not mishandled. + p0 = np.linspace(0, 1, 101) + quantile = np.quantile(np.array([0, 1, 1, 2, 2, 3, 3, 4, 5, 5, 1, 1, 9, 9, 9, + 8, 8, 7]) * 0.1, p0, method=method) + assert_equal(np.sort(quantile), quantile) + + # Also test one where the number of data points is clearly divisible: + quantile = np.quantile([0., 1., 2., 3.], p0, method=method) + assert_equal(np.sort(quantile), quantile) + + @hypothesis.given( + arr=arrays(dtype=np.float64, + shape=st.integers(min_value=3, max_value=1000), + elements=st.floats(allow_infinity=False, allow_nan=False, + min_value=-1e300, max_value=1e300))) + def test_quantile_monotonic_hypo(self, arr): + p0 = np.arange(0, 1, 0.01) + quantile = np.quantile(arr, p0) + assert_equal(np.sort(quantile), quantile) + + def test_quantile_scalar_nan(self): + a = np.array([[10., 7., 4.], [3., 2., 1.]]) + a[0][1] = np.nan + actual = np.quantile(a, 0.5) + assert np.isscalar(actual) + assert_equal(np.quantile(a, 0.5), np.nan) + + @pytest.mark.parametrize("weights", [False, True]) + @pytest.mark.parametrize("method", quantile_methods) + @pytest.mark.parametrize("alpha", [0.2, 0.5, 0.9]) + def test_quantile_identification_equation(self, weights, method, alpha): + # Test that the identification equation holds for the empirical + # CDF: + # E[V(x, Y)] = 0 <=> x is quantile + # with Y the random variable for which we have observed values and + # V(x, y) the canonical identification function for the quantile (at + # level alpha), see + # https://doi.org/10.48550/arXiv.0912.0902 + if weights and method not in methods_supporting_weights: + pytest.skip("Weights not supported by method.") + rng = np.random.default_rng(4321) + # We choose n and alpha such that we cover 3 cases: + # - n * alpha is an integer + # - n * alpha is a float that gets rounded down + # - n * alpha is a float that gest rounded up + n = 102 # n * alpha = 20.4, 51. , 91.8 + y = rng.random(n) + w = rng.integers(low=0, high=10, size=n) if weights else None + x = np.quantile(y, alpha, method=method, weights=w) + + if method in ("higher",): + # These methods do not fulfill the identification equation. + assert np.abs(np.mean(self.V(x, y, alpha))) > 0.1 / n + elif int(n * alpha) == n * alpha and not weights: + # We can expect exact results, up to machine precision. + assert_allclose( + np.average(self.V(x, y, alpha), weights=w), 0, atol=1e-14, + ) + else: + # V = (x >= y) - alpha cannot sum to zero exactly but within + # "sample precision". + assert_allclose(np.average(self.V(x, y, alpha), weights=w), 0, + atol=1 / n / np.amin([alpha, 1 - alpha])) + + @pytest.mark.parametrize("weights", [False, True]) + @pytest.mark.parametrize("method", quantile_methods) + @pytest.mark.parametrize("alpha", [0.2, 0.5, 0.9]) + def test_quantile_add_and_multiply_constant(self, weights, method, alpha): + # Test that + # 1. quantile(c + x) = c + quantile(x) + # 2. quantile(c * x) = c * quantile(x) + # 3. quantile(-x) = -quantile(x, 1 - alpha) + # On empirical quantiles, this equation does not hold exactly. + # Koenker (2005) "Quantile Regression" Chapter 2.2.3 calls these + # properties equivariance. + if weights and method not in methods_supporting_weights: + pytest.skip("Weights not supported by method.") + rng = np.random.default_rng(4321) + # We choose n and alpha such that we have cases for + # - n * alpha is an integer + # - n * alpha is a float that gets rounded down + # - n * alpha is a float that gest rounded up + n = 102 # n * alpha = 20.4, 51. , 91.8 + y = rng.random(n) + w = rng.integers(low=0, high=10, size=n) if weights else None + q = np.quantile(y, alpha, method=method, weights=w) + c = 13.5 + + # 1 + assert_allclose(np.quantile(c + y, alpha, method=method, weights=w), + c + q) + # 2 + assert_allclose(np.quantile(c * y, alpha, method=method, weights=w), + c * q) + # 3 + if weights: + # From here on, we would need more methods to support weights. + return + q = -np.quantile(-y, 1 - alpha, method=method) + if method == "inverted_cdf": + if ( + n * alpha == int(n * alpha) + or np.round(n * alpha) == int(n * alpha) + 1 + ): + assert_allclose(q, np.quantile(y, alpha, method="higher")) + else: + assert_allclose(q, np.quantile(y, alpha, method="lower")) + elif method == "closest_observation": + if n * alpha == int(n * alpha): + assert_allclose(q, np.quantile(y, alpha, method="higher")) + elif np.round(n * alpha) == int(n * alpha) + 1: + assert_allclose( + q, np.quantile(y, alpha + 1 / n, method="higher")) + else: + assert_allclose(q, np.quantile(y, alpha, method="lower")) + elif method == "interpolated_inverted_cdf": + assert_allclose(q, np.quantile(y, alpha + 1 / n, method=method)) + elif method == "nearest": + if n * alpha == int(n * alpha): + assert_allclose(q, np.quantile(y, alpha + 1 / n, method=method)) + else: + assert_allclose(q, np.quantile(y, alpha, method=method)) + elif method == "lower": + assert_allclose(q, np.quantile(y, alpha, method="higher")) + elif method == "higher": + assert_allclose(q, np.quantile(y, alpha, method="lower")) + else: + # "averaged_inverted_cdf", "hazen", "weibull", "linear", + # "median_unbiased", "normal_unbiased", "midpoint" + assert_allclose(q, np.quantile(y, alpha, method=method)) + + @pytest.mark.parametrize("method", methods_supporting_weights) + @pytest.mark.parametrize("alpha", [0.2, 0.5, 0.9]) + def test_quantile_constant_weights(self, method, alpha): + rng = np.random.default_rng(4321) + # We choose n and alpha such that we have cases for + # - n * alpha is an integer + # - n * alpha is a float that gets rounded down + # - n * alpha is a float that gest rounded up + n = 102 # n * alpha = 20.4, 51. , 91.8 + y = rng.random(n) + q = np.quantile(y, alpha, method=method) + + w = np.ones_like(y) + qw = np.quantile(y, alpha, method=method, weights=w) + assert_allclose(qw, q) + + w = 8.125 * np.ones_like(y) + qw = np.quantile(y, alpha, method=method, weights=w) + assert_allclose(qw, q) + + @pytest.mark.parametrize("method", methods_supporting_weights) + @pytest.mark.parametrize("alpha", [0, 0.2, 0.5, 0.9, 1]) + def test_quantile_with_integer_weights(self, method, alpha): + # Integer weights can be interpreted as repeated observations. + rng = np.random.default_rng(4321) + # We choose n and alpha such that we have cases for + # - n * alpha is an integer + # - n * alpha is a float that gets rounded down + # - n * alpha is a float that gest rounded up + n = 102 # n * alpha = 20.4, 51. , 91.8 + y = rng.random(n) + w = rng.integers(low=0, high=10, size=n, dtype=np.int32) + + qw = np.quantile(y, alpha, method=method, weights=w) + q = np.quantile(np.repeat(y, w), alpha, method=method) + assert_allclose(qw, q) + + @pytest.mark.parametrize("method", methods_supporting_weights) + def test_quantile_with_weights_and_axis(self, method): + rng = np.random.default_rng(4321) + + # 1d weight and single alpha + y = rng.random((2, 10, 3)) + w = np.abs(rng.random(10)) + alpha = 0.5 + q = np.quantile(y, alpha, weights=w, method=method, axis=1) + q_res = np.zeros(shape=(2, 3)) + for i in range(2): + for j in range(3): + q_res[i, j] = np.quantile( + y[i, :, j], alpha, method=method, weights=w + ) + assert_allclose(q, q_res) + + # 1d weight and 1d alpha + alpha = [0, 0.2, 0.4, 0.6, 0.8, 1] # shape (6,) + q = np.quantile(y, alpha, weights=w, method=method, axis=1) + q_res = np.zeros(shape=(6, 2, 3)) + for i in range(2): + for j in range(3): + q_res[:, i, j] = np.quantile( + y[i, :, j], alpha, method=method, weights=w + ) + assert_allclose(q, q_res) + + # 1d weight and 2d alpha + alpha = [[0, 0.2], [0.4, 0.6], [0.8, 1]] # shape (3, 2) + q = np.quantile(y, alpha, weights=w, method=method, axis=1) + q_res = q_res.reshape((3, 2, 2, 3)) + assert_allclose(q, q_res) + + # shape of weights equals shape of y + w = np.abs(rng.random((2, 10, 3))) + alpha = 0.5 + q = np.quantile(y, alpha, weights=w, method=method, axis=1) + q_res = np.zeros(shape=(2, 3)) + for i in range(2): + for j in range(3): + q_res[i, j] = np.quantile( + y[i, :, j], alpha, method=method, weights=w[i, :, j] + ) + assert_allclose(q, q_res) + + @pytest.mark.parametrize("method", methods_supporting_weights) + def test_quantile_weights_min_max(self, method): + # Test weighted quantile at 0 and 1 with leading and trailing zero + # weights. + w = [0, 0, 1, 2, 3, 0] + y = np.arange(6) + y_min = np.quantile(y, 0, weights=w, method="inverted_cdf") + y_max = np.quantile(y, 1, weights=w, method="inverted_cdf") + assert y_min == y[2] # == 2 + assert y_max == y[4] # == 4 + + def test_quantile_weights_raises_negative_weights(self): + y = [1, 2] + w = [-0.5, 1] + with pytest.raises(ValueError, match="Weights must be non-negative"): + np.quantile(y, 0.5, weights=w, method="inverted_cdf") + + @pytest.mark.parametrize( + "method", + sorted(set(quantile_methods) - set(methods_supporting_weights)), + ) + def test_quantile_weights_raises_unsupported_methods(self, method): + y = [1, 2] + w = [0.5, 1] + msg = "Only method 'inverted_cdf' supports weights" + with pytest.raises(ValueError, match=msg): + np.quantile(y, 0.5, weights=w, method=method) + + def test_weibull_fraction(self): + arr = [Fraction(0, 1), Fraction(1, 10)] + quantile = np.quantile(arr, [0, ], method='weibull') + assert_equal(quantile, np.array(Fraction(0, 1))) + quantile = np.quantile(arr, [Fraction(1, 2)], method='weibull') + assert_equal(quantile, np.array(Fraction(1, 20))) + + def test_closest_observation(self): + # Round ties to nearest even order statistic (see #26656) + m = 'closest_observation' + q = 0.5 + arr = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + assert_equal(2, np.quantile(arr[0:3], q, method=m)) + assert_equal(2, np.quantile(arr[0:4], q, method=m)) + assert_equal(2, np.quantile(arr[0:5], q, method=m)) + assert_equal(3, np.quantile(arr[0:6], q, method=m)) + assert_equal(4, np.quantile(arr[0:7], q, method=m)) + assert_equal(4, np.quantile(arr[0:8], q, method=m)) + assert_equal(4, np.quantile(arr[0:9], q, method=m)) + assert_equal(5, np.quantile(arr, q, method=m)) + + +class TestLerp: + @hypothesis.given(t0=st.floats(allow_nan=False, allow_infinity=False, + min_value=0, max_value=1), + t1=st.floats(allow_nan=False, allow_infinity=False, + min_value=0, max_value=1), + a=st.floats(allow_nan=False, allow_infinity=False, + min_value=-1e300, max_value=1e300), + b=st.floats(allow_nan=False, allow_infinity=False, + min_value=-1e300, max_value=1e300)) + def test_linear_interpolation_formula_monotonic(self, t0, t1, a, b): + l0 = nfb._lerp(a, b, t0) + l1 = nfb._lerp(a, b, t1) + if t0 == t1 or a == b: + assert l0 == l1 # uninteresting + elif (t0 < t1) == (a < b): + assert l0 <= l1 + else: + assert l0 >= l1 + + @hypothesis.given(t=st.floats(allow_nan=False, allow_infinity=False, + min_value=0, max_value=1), + a=st.floats(allow_nan=False, allow_infinity=False, + min_value=-1e300, max_value=1e300), + b=st.floats(allow_nan=False, allow_infinity=False, + min_value=-1e300, max_value=1e300)) + def test_linear_interpolation_formula_bounded(self, t, a, b): + if a <= b: + assert a <= nfb._lerp(a, b, t) <= b + else: + assert b <= nfb._lerp(a, b, t) <= a + + @hypothesis.given(t=st.floats(allow_nan=False, allow_infinity=False, + min_value=0, max_value=1), + a=st.floats(allow_nan=False, allow_infinity=False, + min_value=-1e300, max_value=1e300), + b=st.floats(allow_nan=False, allow_infinity=False, + min_value=-1e300, max_value=1e300)) + def test_linear_interpolation_formula_symmetric(self, t, a, b): + # double subtraction is needed to remove the extra precision of t < 0.5 + left = nfb._lerp(a, b, 1 - (1 - t)) + right = nfb._lerp(b, a, 1 - t) + assert_allclose(left, right) + + def test_linear_interpolation_formula_0d_inputs(self): + a = np.array(2) + b = np.array(5) + t = np.array(0.2) + assert nfb._lerp(a, b, t) == 2.6 + + +class TestMedian: + + def test_basic(self): + a0 = np.array(1) + a1 = np.arange(2) + a2 = np.arange(6).reshape(2, 3) + assert_equal(np.median(a0), 1) + assert_allclose(np.median(a1), 0.5) + assert_allclose(np.median(a2), 2.5) + assert_allclose(np.median(a2, axis=0), [1.5, 2.5, 3.5]) + assert_equal(np.median(a2, axis=1), [1, 4]) + assert_allclose(np.median(a2, axis=None), 2.5) + + a = np.array([0.0444502, 0.0463301, 0.141249, 0.0606775]) + assert_almost_equal((a[1] + a[3]) / 2., np.median(a)) + a = np.array([0.0463301, 0.0444502, 0.141249]) + assert_equal(a[0], np.median(a)) + a = np.array([0.0444502, 0.141249, 0.0463301]) + assert_equal(a[-1], np.median(a)) + # check array scalar result + assert_equal(np.median(a).ndim, 0) + a[1] = np.nan + assert_equal(np.median(a).ndim, 0) + + def test_axis_keyword(self): + a3 = np.array([[2, 3], + [0, 1], + [6, 7], + [4, 5]]) + for a in [a3, np.random.randint(0, 100, size=(2, 3, 4))]: + orig = a.copy() + np.median(a, axis=None) + for ax in range(a.ndim): + np.median(a, axis=ax) + assert_array_equal(a, orig) + + assert_allclose(np.median(a3, axis=0), [3, 4]) + assert_allclose(np.median(a3.T, axis=1), [3, 4]) + assert_allclose(np.median(a3), 3.5) + assert_allclose(np.median(a3, axis=None), 3.5) + assert_allclose(np.median(a3.T), 3.5) + + def test_overwrite_keyword(self): + a3 = np.array([[2, 3], + [0, 1], + [6, 7], + [4, 5]]) + a0 = np.array(1) + a1 = np.arange(2) + a2 = np.arange(6).reshape(2, 3) + assert_allclose(np.median(a0.copy(), overwrite_input=True), 1) + assert_allclose(np.median(a1.copy(), overwrite_input=True), 0.5) + assert_allclose(np.median(a2.copy(), overwrite_input=True), 2.5) + assert_allclose( + np.median(a2.copy(), overwrite_input=True, axis=0), [1.5, 2.5, 3.5]) + assert_allclose( + np.median(a2.copy(), overwrite_input=True, axis=1), [1, 4]) + assert_allclose( + np.median(a2.copy(), overwrite_input=True, axis=None), 2.5) + assert_allclose( + np.median(a3.copy(), overwrite_input=True, axis=0), [3, 4]) + assert_allclose( + np.median(a3.T.copy(), overwrite_input=True, axis=1), [3, 4]) + + a4 = np.arange(3 * 4 * 5, dtype=np.float32).reshape((3, 4, 5)) + np.random.shuffle(a4.ravel()) + assert_allclose(np.median(a4, axis=None), + np.median(a4.copy(), axis=None, overwrite_input=True)) + assert_allclose(np.median(a4, axis=0), + np.median(a4.copy(), axis=0, overwrite_input=True)) + assert_allclose(np.median(a4, axis=1), + np.median(a4.copy(), axis=1, overwrite_input=True)) + assert_allclose(np.median(a4, axis=2), + np.median(a4.copy(), axis=2, overwrite_input=True)) + + def test_array_like(self): + x = [1, 2, 3] + assert_almost_equal(np.median(x), 2) + x2 = [x] + assert_almost_equal(np.median(x2), 2) + assert_allclose(np.median(x2, axis=0), x) + + def test_subclass(self): + # gh-3846 + class MySubClass(np.ndarray): + + def __new__(cls, input_array, info=None): + obj = np.asarray(input_array).view(cls) + obj.info = info + return obj + + def mean(self, axis=None, dtype=None, out=None): + return -7 + + a = MySubClass([1, 2, 3]) + assert_equal(np.median(a), -7) + + @pytest.mark.parametrize('arr', + ([1., 2., 3.], [1., np.nan, 3.], np.nan, 0.)) + def test_subclass2(self, arr): + """Check that we return subclasses, even if a NaN scalar.""" + class MySubclass(np.ndarray): + pass + + m = np.median(np.array(arr).view(MySubclass)) + assert isinstance(m, MySubclass) + + def test_out(self): + o = np.zeros((4,)) + d = np.ones((3, 4)) + assert_equal(np.median(d, 0, out=o), o) + o = np.zeros((3,)) + assert_equal(np.median(d, 1, out=o), o) + o = np.zeros(()) + assert_equal(np.median(d, out=o), o) + + def test_out_nan(self): + with warnings.catch_warnings(record=True): + warnings.filterwarnings('always', '', RuntimeWarning) + o = np.zeros((4,)) + d = np.ones((3, 4)) + d[2, 1] = np.nan + assert_equal(np.median(d, 0, out=o), o) + o = np.zeros((3,)) + assert_equal(np.median(d, 1, out=o), o) + o = np.zeros(()) + assert_equal(np.median(d, out=o), o) + + def test_nan_behavior(self): + a = np.arange(24, dtype=float) + a[2] = np.nan + assert_equal(np.median(a), np.nan) + assert_equal(np.median(a, axis=0), np.nan) + + a = np.arange(24, dtype=float).reshape(2, 3, 4) + a[1, 2, 3] = np.nan + a[1, 1, 2] = np.nan + + # no axis + assert_equal(np.median(a), np.nan) + assert_equal(np.median(a).ndim, 0) + + # axis0 + b = np.median(np.arange(24, dtype=float).reshape(2, 3, 4), 0) + b[2, 3] = np.nan + b[1, 2] = np.nan + assert_equal(np.median(a, 0), b) + + # axis1 + b = np.median(np.arange(24, dtype=float).reshape(2, 3, 4), 1) + b[1, 3] = np.nan + b[1, 2] = np.nan + assert_equal(np.median(a, 1), b) + + # axis02 + b = np.median(np.arange(24, dtype=float).reshape(2, 3, 4), (0, 2)) + b[1] = np.nan + b[2] = np.nan + assert_equal(np.median(a, (0, 2)), b) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work correctly") + def test_empty(self): + # mean(empty array) emits two warnings: empty slice and divide by 0 + a = np.array([], dtype=float) + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', RuntimeWarning) + assert_equal(np.median(a), np.nan) + assert_(w[0].category is RuntimeWarning) + assert_equal(len(w), 2) + + # multiple dimensions + a = np.array([], dtype=float, ndmin=3) + # no axis + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', RuntimeWarning) + assert_equal(np.median(a), np.nan) + assert_(w[0].category is RuntimeWarning) + + # axis 0 and 1 + b = np.array([], dtype=float, ndmin=2) + assert_equal(np.median(a, axis=0), b) + assert_equal(np.median(a, axis=1), b) + + # axis 2 + b = np.array(np.nan, dtype=float, ndmin=2) + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', RuntimeWarning) + assert_equal(np.median(a, axis=2), b) + assert_(w[0].category is RuntimeWarning) + + def test_object(self): + o = np.arange(7.) + assert_(type(np.median(o.astype(object))), float) + o[2] = np.nan + assert_(type(np.median(o.astype(object))), float) + + def test_extended_axis(self): + o = np.random.normal(size=(71, 23)) + x = np.dstack([o] * 10) + assert_equal(np.median(x, axis=(0, 1)), np.median(o)) + x = np.moveaxis(x, -1, 0) + assert_equal(np.median(x, axis=(-2, -1)), np.median(o)) + x = x.swapaxes(0, 1).copy() + assert_equal(np.median(x, axis=(0, -1)), np.median(o)) + + assert_equal(np.median(x, axis=(0, 1, 2)), np.median(x, axis=None)) + assert_equal(np.median(x, axis=(0, )), np.median(x, axis=0)) + assert_equal(np.median(x, axis=(-1, )), np.median(x, axis=-1)) + + d = np.arange(3 * 5 * 7 * 11).reshape((3, 5, 7, 11)) + np.random.shuffle(d.ravel()) + assert_equal(np.median(d, axis=(0, 1, 2))[0], + np.median(d[:, :, :, 0].flatten())) + assert_equal(np.median(d, axis=(0, 1, 3))[1], + np.median(d[:, :, 1, :].flatten())) + assert_equal(np.median(d, axis=(3, 1, -4))[2], + np.median(d[:, :, 2, :].flatten())) + assert_equal(np.median(d, axis=(3, 1, 2))[2], + np.median(d[2, :, :, :].flatten())) + assert_equal(np.median(d, axis=(3, 2))[2, 1], + np.median(d[2, 1, :, :].flatten())) + assert_equal(np.median(d, axis=(1, -2))[2, 1], + np.median(d[2, :, :, 1].flatten())) + assert_equal(np.median(d, axis=(1, 3))[2, 2], + np.median(d[2, :, 2, :].flatten())) + + def test_extended_axis_invalid(self): + d = np.ones((3, 5, 7, 11)) + assert_raises(AxisError, np.median, d, axis=-5) + assert_raises(AxisError, np.median, d, axis=(0, -5)) + assert_raises(AxisError, np.median, d, axis=4) + assert_raises(AxisError, np.median, d, axis=(0, 4)) + assert_raises(ValueError, np.median, d, axis=(1, 1)) + + def test_keepdims(self): + d = np.ones((3, 5, 7, 11)) + assert_equal(np.median(d, axis=None, keepdims=True).shape, + (1, 1, 1, 1)) + assert_equal(np.median(d, axis=(0, 1), keepdims=True).shape, + (1, 1, 7, 11)) + assert_equal(np.median(d, axis=(0, 3), keepdims=True).shape, + (1, 5, 7, 1)) + assert_equal(np.median(d, axis=(1,), keepdims=True).shape, + (3, 1, 7, 11)) + assert_equal(np.median(d, axis=(0, 1, 2, 3), keepdims=True).shape, + (1, 1, 1, 1)) + assert_equal(np.median(d, axis=(0, 1, 3), keepdims=True).shape, + (1, 1, 7, 1)) + + @pytest.mark.parametrize( + argnames='axis', + argvalues=[ + None, + 1, + (1, ), + (0, 1), + (-3, -1), + ] + ) + def test_keepdims_out(self, axis): + d = np.ones((3, 5, 7, 11)) + if axis is None: + shape_out = (1,) * d.ndim + else: + axis_norm = normalize_axis_tuple(axis, d.ndim) + shape_out = tuple( + 1 if i in axis_norm else d.shape[i] for i in range(d.ndim)) + out = np.empty(shape_out) + result = np.median(d, axis=axis, keepdims=True, out=out) + assert result is out + assert_equal(result.shape, shape_out) + + @pytest.mark.parametrize("dtype", ["m8[s]"]) + @pytest.mark.parametrize("pos", [0, 23, 10]) + def test_nat_behavior(self, dtype, pos): + # TODO: Median does not support Datetime, due to `mean`. + # NaT and NaN should behave the same, do basic tests for NaT. + a = np.arange(0, 24, dtype=dtype) + a[pos] = "NaT" + res = np.median(a) + assert res.dtype == dtype + assert np.isnat(res) + res = np.percentile(a, [30, 60]) + assert res.dtype == dtype + assert np.isnat(res).all() + + a = np.arange(0, 24 * 3, dtype=dtype).reshape(-1, 3) + a[pos, 1] = "NaT" + res = np.median(a, axis=0) + assert_array_equal(np.isnat(res), [False, True, False]) + + +class TestSortComplex: + + @pytest.mark.parametrize("type_in, type_out", [ + ('l', 'D'), + ('h', 'F'), + ('H', 'F'), + ('b', 'F'), + ('B', 'F'), + ('g', 'G'), + ]) + def test_sort_real(self, type_in, type_out): + # sort_complex() type casting for real input types + a = np.array([5, 3, 6, 2, 1], dtype=type_in) + actual = np.sort_complex(a) + expected = np.sort(a).astype(type_out) + assert_equal(actual, expected) + assert_equal(actual.dtype, expected.dtype) + + def test_sort_complex(self): + # sort_complex() handling of complex input + a = np.array([2 + 3j, 1 - 2j, 1 - 3j, 2 + 1j], dtype='D') + expected = np.array([1 - 3j, 1 - 2j, 2 + 1j, 2 + 3j], dtype='D') + actual = np.sort_complex(a) + assert_equal(actual, expected) + assert_equal(actual.dtype, expected.dtype) diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_histograms.py b/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_histograms.py new file mode 100644 index 0000000..b7752d1 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_histograms.py @@ -0,0 +1,855 @@ +import pytest + +import numpy as np +from numpy import histogram, histogram_bin_edges, histogramdd +from numpy.testing import ( + assert_, + assert_allclose, + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, + assert_array_max_ulp, + assert_equal, + assert_raises, + assert_raises_regex, + suppress_warnings, +) + + +class TestHistogram: + + def setup_method(self): + pass + + def teardown_method(self): + pass + + def test_simple(self): + n = 100 + v = np.random.rand(n) + (a, b) = histogram(v) + # check if the sum of the bins equals the number of samples + assert_equal(np.sum(a, axis=0), n) + # check that the bin counts are evenly spaced when the data is from + # a linear function + (a, b) = histogram(np.linspace(0, 10, 100)) + assert_array_equal(a, 10) + + def test_one_bin(self): + # Ticket 632 + hist, edges = histogram([1, 2, 3, 4], [1, 2]) + assert_array_equal(hist, [2, ]) + assert_array_equal(edges, [1, 2]) + assert_raises(ValueError, histogram, [1, 2], bins=0) + h, e = histogram([1, 2], bins=1) + assert_equal(h, np.array([2])) + assert_allclose(e, np.array([1., 2.])) + + def test_density(self): + # Check that the integral of the density equals 1. + n = 100 + v = np.random.rand(n) + a, b = histogram(v, density=True) + area = np.sum(a * np.diff(b)) + assert_almost_equal(area, 1) + + # Check with non-constant bin widths + v = np.arange(10) + bins = [0, 1, 3, 6, 10] + a, b = histogram(v, bins, density=True) + assert_array_equal(a, .1) + assert_equal(np.sum(a * np.diff(b)), 1) + + # Test that passing False works too + a, b = histogram(v, bins, density=False) + assert_array_equal(a, [1, 2, 3, 4]) + + # Variable bin widths are especially useful to deal with + # infinities. + v = np.arange(10) + bins = [0, 1, 3, 6, np.inf] + a, b = histogram(v, bins, density=True) + assert_array_equal(a, [.1, .1, .1, 0.]) + + # Taken from a bug report from N. Becker on the numpy-discussion + # mailing list Aug. 6, 2010. + counts, dmy = np.histogram( + [1, 2, 3, 4], [0.5, 1.5, np.inf], density=True) + assert_equal(counts, [.25, 0]) + + def test_outliers(self): + # Check that outliers are not tallied + a = np.arange(10) + .5 + + # Lower outliers + h, b = histogram(a, range=[0, 9]) + assert_equal(h.sum(), 9) + + # Upper outliers + h, b = histogram(a, range=[1, 10]) + assert_equal(h.sum(), 9) + + # Normalization + h, b = histogram(a, range=[1, 9], density=True) + assert_almost_equal((h * np.diff(b)).sum(), 1, decimal=15) + + # Weights + w = np.arange(10) + .5 + h, b = histogram(a, range=[1, 9], weights=w, density=True) + assert_equal((h * np.diff(b)).sum(), 1) + + h, b = histogram(a, bins=8, range=[1, 9], weights=w) + assert_equal(h, w[1:-1]) + + def test_arr_weights_mismatch(self): + a = np.arange(10) + .5 + w = np.arange(11) + .5 + with assert_raises_regex(ValueError, "same shape as"): + h, b = histogram(a, range=[1, 9], weights=w, density=True) + + def test_type(self): + # Check the type of the returned histogram + a = np.arange(10) + .5 + h, b = histogram(a) + assert_(np.issubdtype(h.dtype, np.integer)) + + h, b = histogram(a, density=True) + assert_(np.issubdtype(h.dtype, np.floating)) + + h, b = histogram(a, weights=np.ones(10, int)) + assert_(np.issubdtype(h.dtype, np.integer)) + + h, b = histogram(a, weights=np.ones(10, float)) + assert_(np.issubdtype(h.dtype, np.floating)) + + def test_f32_rounding(self): + # gh-4799, check that the rounding of the edges works with float32 + x = np.array([276.318359, -69.593948, 21.329449], dtype=np.float32) + y = np.array([5005.689453, 4481.327637, 6010.369629], dtype=np.float32) + counts_hist, xedges, yedges = np.histogram2d(x, y, bins=100) + assert_equal(counts_hist.sum(), 3.) + + def test_bool_conversion(self): + # gh-12107 + # Reference integer histogram + a = np.array([1, 1, 0], dtype=np.uint8) + int_hist, int_edges = np.histogram(a) + + # Should raise an warning on booleans + # Ensure that the histograms are equivalent, need to suppress + # the warnings to get the actual outputs + with suppress_warnings() as sup: + rec = sup.record(RuntimeWarning, 'Converting input from .*') + hist, edges = np.histogram([True, True, False]) + # A warning should be issued + assert_equal(len(rec), 1) + assert_array_equal(hist, int_hist) + assert_array_equal(edges, int_edges) + + def test_weights(self): + v = np.random.rand(100) + w = np.ones(100) * 5 + a, b = histogram(v) + na, nb = histogram(v, density=True) + wa, wb = histogram(v, weights=w) + nwa, nwb = histogram(v, weights=w, density=True) + assert_array_almost_equal(a * 5, wa) + assert_array_almost_equal(na, nwa) + + # Check weights are properly applied. + v = np.linspace(0, 10, 10) + w = np.concatenate((np.zeros(5), np.ones(5))) + wa, wb = histogram(v, bins=np.arange(11), weights=w) + assert_array_almost_equal(wa, w) + + # Check with integer weights + wa, wb = histogram([1, 2, 2, 4], bins=4, weights=[4, 3, 2, 1]) + assert_array_equal(wa, [4, 5, 0, 1]) + wa, wb = histogram( + [1, 2, 2, 4], bins=4, weights=[4, 3, 2, 1], density=True) + assert_array_almost_equal(wa, np.array([4, 5, 0, 1]) / 10. / 3. * 4) + + # Check weights with non-uniform bin widths + a, b = histogram( + np.arange(9), [0, 1, 3, 6, 10], + weights=[2, 1, 1, 1, 1, 1, 1, 1, 1], density=True) + assert_almost_equal(a, [.2, .1, .1, .075]) + + def test_exotic_weights(self): + + # Test the use of weights that are not integer or floats, but e.g. + # complex numbers or object types. + + # Complex weights + values = np.array([1.3, 2.5, 2.3]) + weights = np.array([1, -1, 2]) + 1j * np.array([2, 1, 2]) + + # Check with custom bins + wa, wb = histogram(values, bins=[0, 2, 3], weights=weights) + assert_array_almost_equal(wa, np.array([1, 1]) + 1j * np.array([2, 3])) + + # Check with even bins + wa, wb = histogram(values, bins=2, range=[1, 3], weights=weights) + assert_array_almost_equal(wa, np.array([1, 1]) + 1j * np.array([2, 3])) + + # Decimal weights + from decimal import Decimal + values = np.array([1.3, 2.5, 2.3]) + weights = np.array([Decimal(1), Decimal(2), Decimal(3)]) + + # Check with custom bins + wa, wb = histogram(values, bins=[0, 2, 3], weights=weights) + assert_array_almost_equal(wa, [Decimal(1), Decimal(5)]) + + # Check with even bins + wa, wb = histogram(values, bins=2, range=[1, 3], weights=weights) + assert_array_almost_equal(wa, [Decimal(1), Decimal(5)]) + + def test_no_side_effects(self): + # This is a regression test that ensures that values passed to + # ``histogram`` are unchanged. + values = np.array([1.3, 2.5, 2.3]) + np.histogram(values, range=[-10, 10], bins=100) + assert_array_almost_equal(values, [1.3, 2.5, 2.3]) + + def test_empty(self): + a, b = histogram([], bins=([0, 1])) + assert_array_equal(a, np.array([0])) + assert_array_equal(b, np.array([0, 1])) + + def test_error_binnum_type(self): + # Tests if right Error is raised if bins argument is float + vals = np.linspace(0.0, 1.0, num=100) + histogram(vals, 5) + assert_raises(TypeError, histogram, vals, 2.4) + + def test_finite_range(self): + # Normal ranges should be fine + vals = np.linspace(0.0, 1.0, num=100) + histogram(vals, range=[0.25, 0.75]) + assert_raises(ValueError, histogram, vals, range=[np.nan, 0.75]) + assert_raises(ValueError, histogram, vals, range=[0.25, np.inf]) + + def test_invalid_range(self): + # start of range must be < end of range + vals = np.linspace(0.0, 1.0, num=100) + with assert_raises_regex(ValueError, "max must be larger than"): + np.histogram(vals, range=[0.1, 0.01]) + + def test_bin_edge_cases(self): + # Ensure that floating-point computations correctly place edge cases. + arr = np.array([337, 404, 739, 806, 1007, 1811, 2012]) + hist, edges = np.histogram(arr, bins=8296, range=(2, 2280)) + mask = hist > 0 + left_edges = edges[:-1][mask] + right_edges = edges[1:][mask] + for x, left, right in zip(arr, left_edges, right_edges): + assert_(x >= left) + assert_(x < right) + + def test_last_bin_inclusive_range(self): + arr = np.array([0., 0., 0., 1., 2., 3., 3., 4., 5.]) + hist, edges = np.histogram(arr, bins=30, range=(-0.5, 5)) + assert_equal(hist[-1], 1) + + def test_bin_array_dims(self): + # gracefully handle bins object > 1 dimension + vals = np.linspace(0.0, 1.0, num=100) + bins = np.array([[0, 0.5], [0.6, 1.0]]) + with assert_raises_regex(ValueError, "must be 1d"): + np.histogram(vals, bins=bins) + + def test_unsigned_monotonicity_check(self): + # Ensures ValueError is raised if bins not increasing monotonically + # when bins contain unsigned values (see #9222) + arr = np.array([2]) + bins = np.array([1, 3, 1], dtype='uint64') + with assert_raises(ValueError): + hist, edges = np.histogram(arr, bins=bins) + + def test_object_array_of_0d(self): + # gh-7864 + assert_raises(ValueError, + histogram, [np.array(0.4) for i in range(10)] + [-np.inf]) + assert_raises(ValueError, + histogram, [np.array(0.4) for i in range(10)] + [np.inf]) + + # these should not crash + np.histogram([np.array(0.5) for i in range(10)] + [.500000000000002]) + np.histogram([np.array(0.5) for i in range(10)] + [.5]) + + def test_some_nan_values(self): + # gh-7503 + one_nan = np.array([0, 1, np.nan]) + all_nan = np.array([np.nan, np.nan]) + + # the internal comparisons with NaN give warnings + sup = suppress_warnings() + sup.filter(RuntimeWarning) + with sup: + # can't infer range with nan + assert_raises(ValueError, histogram, one_nan, bins='auto') + assert_raises(ValueError, histogram, all_nan, bins='auto') + + # explicit range solves the problem + h, b = histogram(one_nan, bins='auto', range=(0, 1)) + assert_equal(h.sum(), 2) # nan is not counted + h, b = histogram(all_nan, bins='auto', range=(0, 1)) + assert_equal(h.sum(), 0) # nan is not counted + + # as does an explicit set of bins + h, b = histogram(one_nan, bins=[0, 1]) + assert_equal(h.sum(), 2) # nan is not counted + h, b = histogram(all_nan, bins=[0, 1]) + assert_equal(h.sum(), 0) # nan is not counted + + def test_datetime(self): + begin = np.datetime64('2000-01-01', 'D') + offsets = np.array([0, 0, 1, 1, 2, 3, 5, 10, 20]) + bins = np.array([0, 2, 7, 20]) + dates = begin + offsets + date_bins = begin + bins + + td = np.dtype('timedelta64[D]') + + # Results should be the same for integer offsets or datetime values. + # For now, only explicit bins are supported, since linspace does not + # work on datetimes or timedeltas + d_count, d_edge = histogram(dates, bins=date_bins) + t_count, t_edge = histogram(offsets.astype(td), bins=bins.astype(td)) + i_count, i_edge = histogram(offsets, bins=bins) + + assert_equal(d_count, i_count) + assert_equal(t_count, i_count) + + assert_equal((d_edge - begin).astype(int), i_edge) + assert_equal(t_edge.astype(int), i_edge) + + assert_equal(d_edge.dtype, dates.dtype) + assert_equal(t_edge.dtype, td) + + def do_signed_overflow_bounds(self, dtype): + exponent = 8 * np.dtype(dtype).itemsize - 1 + arr = np.array([-2**exponent + 4, 2**exponent - 4], dtype=dtype) + hist, e = histogram(arr, bins=2) + assert_equal(e, [-2**exponent + 4, 0, 2**exponent - 4]) + assert_equal(hist, [1, 1]) + + def test_signed_overflow_bounds(self): + self.do_signed_overflow_bounds(np.byte) + self.do_signed_overflow_bounds(np.short) + self.do_signed_overflow_bounds(np.intc) + self.do_signed_overflow_bounds(np.int_) + self.do_signed_overflow_bounds(np.longlong) + + def do_precision_lower_bound(self, float_small, float_large): + eps = np.finfo(float_large).eps + + arr = np.array([1.0], float_small) + range = np.array([1.0 + eps, 2.0], float_large) + + # test is looking for behavior when the bounds change between dtypes + if range.astype(float_small)[0] != 1: + return + + # previously crashed + count, x_loc = np.histogram(arr, bins=1, range=range) + assert_equal(count, [0]) + assert_equal(x_loc.dtype, float_large) + + def do_precision_upper_bound(self, float_small, float_large): + eps = np.finfo(float_large).eps + + arr = np.array([1.0], float_small) + range = np.array([0.0, 1.0 - eps], float_large) + + # test is looking for behavior when the bounds change between dtypes + if range.astype(float_small)[-1] != 1: + return + + # previously crashed + count, x_loc = np.histogram(arr, bins=1, range=range) + assert_equal(count, [0]) + + assert_equal(x_loc.dtype, float_large) + + def do_precision(self, float_small, float_large): + self.do_precision_lower_bound(float_small, float_large) + self.do_precision_upper_bound(float_small, float_large) + + def test_precision(self): + # not looping results in a useful stack trace upon failure + self.do_precision(np.half, np.single) + self.do_precision(np.half, np.double) + self.do_precision(np.half, np.longdouble) + self.do_precision(np.single, np.double) + self.do_precision(np.single, np.longdouble) + self.do_precision(np.double, np.longdouble) + + def test_histogram_bin_edges(self): + hist, e = histogram([1, 2, 3, 4], [1, 2]) + edges = histogram_bin_edges([1, 2, 3, 4], [1, 2]) + assert_array_equal(edges, e) + + arr = np.array([0., 0., 0., 1., 2., 3., 3., 4., 5.]) + hist, e = histogram(arr, bins=30, range=(-0.5, 5)) + edges = histogram_bin_edges(arr, bins=30, range=(-0.5, 5)) + assert_array_equal(edges, e) + + hist, e = histogram(arr, bins='auto', range=(0, 1)) + edges = histogram_bin_edges(arr, bins='auto', range=(0, 1)) + assert_array_equal(edges, e) + + def test_small_value_range(self): + arr = np.array([1, 1 + 2e-16] * 10) + with pytest.raises(ValueError, match="Too many bins for data range"): + histogram(arr, bins=10) + + # @requires_memory(free_bytes=1e10) + # @pytest.mark.slow + @pytest.mark.skip(reason="Bad memory reports lead to OOM in ci testing") + def test_big_arrays(self): + sample = np.zeros([100000000, 3]) + xbins = 400 + ybins = 400 + zbins = np.arange(16000) + hist = np.histogramdd(sample=sample, bins=(xbins, ybins, zbins)) + assert_equal(type(hist), type((1, 2))) + + def test_gh_23110(self): + hist, e = np.histogram(np.array([-0.9e-308], dtype='>f8'), + bins=2, + range=(-1e-308, -2e-313)) + expected_hist = np.array([1, 0]) + assert_array_equal(hist, expected_hist) + + def test_gh_28400(self): + e = 1 + 1e-12 + Z = [0, 1, 1, 1, 1, 1, e, e, e, e, e, e, 2] + counts, edges = np.histogram(Z, bins="auto") + assert len(counts) < 10 + assert edges[0] == Z[0] + assert edges[-1] == Z[-1] + +class TestHistogramOptimBinNums: + """ + Provide test coverage when using provided estimators for optimal number of + bins + """ + + def test_empty(self): + estimator_list = ['fd', 'scott', 'rice', 'sturges', + 'doane', 'sqrt', 'auto', 'stone'] + # check it can deal with empty data + for estimator in estimator_list: + a, b = histogram([], bins=estimator) + assert_array_equal(a, np.array([0])) + assert_array_equal(b, np.array([0, 1])) + + def test_simple(self): + """ + Straightforward testing with a mixture of linspace data (for + consistency). All test values have been precomputed and the values + shouldn't change + """ + # Some basic sanity checking, with some fixed data. + # Checking for the correct number of bins + basic_test = {50: {'fd': 4, 'scott': 4, 'rice': 8, 'sturges': 7, + 'doane': 8, 'sqrt': 8, 'auto': 7, 'stone': 2}, + 500: {'fd': 8, 'scott': 8, 'rice': 16, 'sturges': 10, + 'doane': 12, 'sqrt': 23, 'auto': 10, 'stone': 9}, + 5000: {'fd': 17, 'scott': 17, 'rice': 35, 'sturges': 14, + 'doane': 17, 'sqrt': 71, 'auto': 17, 'stone': 20}} + + for testlen, expectedResults in basic_test.items(): + # Create some sort of non uniform data to test with + # (2 peak uniform mixture) + x1 = np.linspace(-10, -1, testlen // 5 * 2) + x2 = np.linspace(1, 10, testlen // 5 * 3) + x = np.concatenate((x1, x2)) + for estimator, numbins in expectedResults.items(): + a, b = np.histogram(x, estimator) + assert_equal(len(a), numbins, err_msg=f"For the {estimator} estimator " + f"with datasize of {testlen}") + + def test_small(self): + """ + Smaller datasets have the potential to cause issues with the data + adaptive methods, especially the FD method. All bin numbers have been + precalculated. + """ + small_dat = {1: {'fd': 1, 'scott': 1, 'rice': 1, 'sturges': 1, + 'doane': 1, 'sqrt': 1, 'stone': 1}, + 2: {'fd': 2, 'scott': 1, 'rice': 3, 'sturges': 2, + 'doane': 1, 'sqrt': 2, 'stone': 1}, + 3: {'fd': 2, 'scott': 2, 'rice': 3, 'sturges': 3, + 'doane': 3, 'sqrt': 2, 'stone': 1}} + + for testlen, expectedResults in small_dat.items(): + testdat = np.arange(testlen).astype(float) + for estimator, expbins in expectedResults.items(): + a, b = np.histogram(testdat, estimator) + assert_equal(len(a), expbins, err_msg=f"For the {estimator} estimator " + f"with datasize of {testlen}") + + def test_incorrect_methods(self): + """ + Check a Value Error is thrown when an unknown string is passed in + """ + check_list = ['mad', 'freeman', 'histograms', 'IQR'] + for estimator in check_list: + assert_raises(ValueError, histogram, [1, 2, 3], estimator) + + def test_novariance(self): + """ + Check that methods handle no variance in data + Primarily for Scott and FD as the SD and IQR are both 0 in this case + """ + novar_dataset = np.ones(100) + novar_resultdict = {'fd': 1, 'scott': 1, 'rice': 1, 'sturges': 1, + 'doane': 1, 'sqrt': 1, 'auto': 1, 'stone': 1} + + for estimator, numbins in novar_resultdict.items(): + a, b = np.histogram(novar_dataset, estimator) + assert_equal(len(a), numbins, + err_msg=f"{estimator} estimator, No Variance test") + + def test_limited_variance(self): + """ + Check when IQR is 0, but variance exists, we return a reasonable value. + """ + lim_var_data = np.ones(1000) + lim_var_data[:3] = 0 + lim_var_data[-4:] = 100 + + edges_auto = histogram_bin_edges(lim_var_data, 'auto') + assert_equal(edges_auto[0], 0) + assert_equal(edges_auto[-1], 100.) + assert len(edges_auto) < 100 + + edges_fd = histogram_bin_edges(lim_var_data, 'fd') + assert_equal(edges_fd, np.array([0, 100])) + + edges_sturges = histogram_bin_edges(lim_var_data, 'sturges') + assert_equal(edges_sturges, np.linspace(0, 100, 12)) + + def test_outlier(self): + """ + Check the FD, Scott and Doane with outliers. + + The FD estimates a smaller binwidth since it's less affected by + outliers. Since the range is so (artificially) large, this means more + bins, most of which will be empty, but the data of interest usually is + unaffected. The Scott estimator is more affected and returns fewer bins, + despite most of the variance being in one area of the data. The Doane + estimator lies somewhere between the other two. + """ + xcenter = np.linspace(-10, 10, 50) + outlier_dataset = np.hstack((np.linspace(-110, -100, 5), xcenter)) + + outlier_resultdict = {'fd': 21, 'scott': 5, 'doane': 11, 'stone': 6} + + for estimator, numbins in outlier_resultdict.items(): + a, b = np.histogram(outlier_dataset, estimator) + assert_equal(len(a), numbins) + + def test_scott_vs_stone(self): + """Verify that Scott's rule and Stone's rule converges for normally distributed data""" + + def nbins_ratio(seed, size): + rng = np.random.RandomState(seed) + x = rng.normal(loc=0, scale=2, size=size) + a, b = len(np.histogram(x, 'stone')[0]), len(np.histogram(x, 'scott')[0]) + return a / (a + b) + + ll = [[nbins_ratio(seed, size) for size in np.geomspace(start=10, stop=100, num=4).round().astype(int)] + for seed in range(10)] + + # the average difference between the two methods decreases as the dataset size increases. + avg = abs(np.mean(ll, axis=0) - 0.5) + assert_almost_equal(avg, [0.15, 0.09, 0.08, 0.03], decimal=2) + + def test_simple_range(self): + """ + Straightforward testing with a mixture of linspace data (for + consistency). Adding in a 3rd mixture that will then be + completely ignored. All test values have been precomputed and + the shouldn't change. + """ + # some basic sanity checking, with some fixed data. + # Checking for the correct number of bins + basic_test = { + 50: {'fd': 8, 'scott': 8, 'rice': 15, + 'sturges': 14, 'auto': 14, 'stone': 8}, + 500: {'fd': 15, 'scott': 16, 'rice': 32, + 'sturges': 20, 'auto': 20, 'stone': 80}, + 5000: {'fd': 33, 'scott': 33, 'rice': 69, + 'sturges': 27, 'auto': 33, 'stone': 80} + } + + for testlen, expectedResults in basic_test.items(): + # create some sort of non uniform data to test with + # (3 peak uniform mixture) + x1 = np.linspace(-10, -1, testlen // 5 * 2) + x2 = np.linspace(1, 10, testlen // 5 * 3) + x3 = np.linspace(-100, -50, testlen) + x = np.hstack((x1, x2, x3)) + for estimator, numbins in expectedResults.items(): + a, b = np.histogram(x, estimator, range=(-20, 20)) + msg = f"For the {estimator} estimator" + msg += f" with datasize of {testlen}" + assert_equal(len(a), numbins, err_msg=msg) + + @pytest.mark.parametrize("bins", ['auto', 'fd', 'doane', 'scott', + 'stone', 'rice', 'sturges']) + def test_signed_integer_data(self, bins): + # Regression test for gh-14379. + a = np.array([-2, 0, 127], dtype=np.int8) + hist, edges = np.histogram(a, bins=bins) + hist32, edges32 = np.histogram(a.astype(np.int32), bins=bins) + assert_array_equal(hist, hist32) + assert_array_equal(edges, edges32) + + @pytest.mark.parametrize("bins", ['auto', 'fd', 'doane', 'scott', + 'stone', 'rice', 'sturges']) + def test_integer(self, bins): + """ + Test that bin width for integer data is at least 1. + """ + with suppress_warnings() as sup: + if bins == 'stone': + sup.filter(RuntimeWarning) + assert_equal( + np.histogram_bin_edges(np.tile(np.arange(9), 1000), bins), + np.arange(9)) + + def test_integer_non_auto(self): + """ + Test that the bin-width>=1 requirement *only* applies to auto binning. + """ + assert_equal( + np.histogram_bin_edges(np.tile(np.arange(9), 1000), 16), + np.arange(17) / 2) + assert_equal( + np.histogram_bin_edges(np.tile(np.arange(9), 1000), [.1, .2]), + [.1, .2]) + + def test_simple_weighted(self): + """ + Check that weighted data raises a TypeError + """ + estimator_list = ['fd', 'scott', 'rice', 'sturges', 'auto'] + for estimator in estimator_list: + assert_raises(TypeError, histogram, [1, 2, 3], + estimator, weights=[1, 2, 3]) + + +class TestHistogramdd: + + def test_simple(self): + x = np.array([[-.5, .5, 1.5], [-.5, 1.5, 2.5], [-.5, 2.5, .5], + [.5, .5, 1.5], [.5, 1.5, 2.5], [.5, 2.5, 2.5]]) + H, edges = histogramdd(x, (2, 3, 3), + range=[[-1, 1], [0, 3], [0, 3]]) + answer = np.array([[[0, 1, 0], [0, 0, 1], [1, 0, 0]], + [[0, 1, 0], [0, 0, 1], [0, 0, 1]]]) + assert_array_equal(H, answer) + + # Check normalization + ed = [[-2, 0, 2], [0, 1, 2, 3], [0, 1, 2, 3]] + H, edges = histogramdd(x, bins=ed, density=True) + assert_(np.all(H == answer / 12.)) + + # Check that H has the correct shape. + H, edges = histogramdd(x, (2, 3, 4), + range=[[-1, 1], [0, 3], [0, 4]], + density=True) + answer = np.array([[[0, 1, 0, 0], [0, 0, 1, 0], [1, 0, 0, 0]], + [[0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 1, 0]]]) + assert_array_almost_equal(H, answer / 6., 4) + # Check that a sequence of arrays is accepted and H has the correct + # shape. + z = [np.squeeze(y) for y in np.split(x, 3, axis=1)] + H, edges = histogramdd( + z, bins=(4, 3, 2), range=[[-2, 2], [0, 3], [0, 2]]) + answer = np.array([[[0, 0], [0, 0], [0, 0]], + [[0, 1], [0, 0], [1, 0]], + [[0, 1], [0, 0], [0, 0]], + [[0, 0], [0, 0], [0, 0]]]) + assert_array_equal(H, answer) + + Z = np.zeros((5, 5, 5)) + Z[list(range(5)), list(range(5)), list(range(5))] = 1. + H, edges = histogramdd([np.arange(5), np.arange(5), np.arange(5)], 5) + assert_array_equal(H, Z) + + def test_shape_3d(self): + # All possible permutations for bins of different lengths in 3D. + bins = ((5, 4, 6), (6, 4, 5), (5, 6, 4), (4, 6, 5), (6, 5, 4), + (4, 5, 6)) + r = np.random.rand(10, 3) + for b in bins: + H, edges = histogramdd(r, b) + assert_(H.shape == b) + + def test_shape_4d(self): + # All possible permutations for bins of different lengths in 4D. + bins = ((7, 4, 5, 6), (4, 5, 7, 6), (5, 6, 4, 7), (7, 6, 5, 4), + (5, 7, 6, 4), (4, 6, 7, 5), (6, 5, 7, 4), (7, 5, 4, 6), + (7, 4, 6, 5), (6, 4, 7, 5), (6, 7, 5, 4), (4, 6, 5, 7), + (4, 7, 5, 6), (5, 4, 6, 7), (5, 7, 4, 6), (6, 7, 4, 5), + (6, 5, 4, 7), (4, 7, 6, 5), (4, 5, 6, 7), (7, 6, 4, 5), + (5, 4, 7, 6), (5, 6, 7, 4), (6, 4, 5, 7), (7, 5, 6, 4)) + + r = np.random.rand(10, 4) + for b in bins: + H, edges = histogramdd(r, b) + assert_(H.shape == b) + + def test_weights(self): + v = np.random.rand(100, 2) + hist, edges = histogramdd(v) + n_hist, edges = histogramdd(v, density=True) + w_hist, edges = histogramdd(v, weights=np.ones(100)) + assert_array_equal(w_hist, hist) + w_hist, edges = histogramdd(v, weights=np.ones(100) * 2, density=True) + assert_array_equal(w_hist, n_hist) + w_hist, edges = histogramdd(v, weights=np.ones(100, int) * 2) + assert_array_equal(w_hist, 2 * hist) + + def test_identical_samples(self): + x = np.zeros((10, 2), int) + hist, edges = histogramdd(x, bins=2) + assert_array_equal(edges[0], np.array([-0.5, 0., 0.5])) + + def test_empty(self): + a, b = histogramdd([[], []], bins=([0, 1], [0, 1])) + assert_array_max_ulp(a, np.array([[0.]])) + a, b = np.histogramdd([[], [], []], bins=2) + assert_array_max_ulp(a, np.zeros((2, 2, 2))) + + def test_bins_errors(self): + # There are two ways to specify bins. Check for the right errors + # when mixing those. + x = np.arange(8).reshape(2, 4) + assert_raises(ValueError, np.histogramdd, x, bins=[-1, 2, 4, 5]) + assert_raises(ValueError, np.histogramdd, x, bins=[1, 0.99, 1, 1]) + assert_raises( + ValueError, np.histogramdd, x, bins=[1, 1, 1, [1, 2, 3, -3]]) + assert_(np.histogramdd(x, bins=[1, 1, 1, [1, 2, 3, 4]])) + + def test_inf_edges(self): + # Test using +/-inf bin edges works. See #1788. + with np.errstate(invalid='ignore'): + x = np.arange(6).reshape(3, 2) + expected = np.array([[1, 0], [0, 1], [0, 1]]) + h, e = np.histogramdd(x, bins=[3, [-np.inf, 2, 10]]) + assert_allclose(h, expected) + h, e = np.histogramdd(x, bins=[3, np.array([-1, 2, np.inf])]) + assert_allclose(h, expected) + h, e = np.histogramdd(x, bins=[3, [-np.inf, 3, np.inf]]) + assert_allclose(h, expected) + + def test_rightmost_binedge(self): + # Test event very close to rightmost binedge. See Github issue #4266 + x = [0.9999999995] + bins = [[0., 0.5, 1.0]] + hist, _ = histogramdd(x, bins=bins) + assert_(hist[0] == 0.0) + assert_(hist[1] == 1.) + x = [1.0] + bins = [[0., 0.5, 1.0]] + hist, _ = histogramdd(x, bins=bins) + assert_(hist[0] == 0.0) + assert_(hist[1] == 1.) + x = [1.0000000001] + bins = [[0., 0.5, 1.0]] + hist, _ = histogramdd(x, bins=bins) + assert_(hist[0] == 0.0) + assert_(hist[1] == 0.0) + x = [1.0001] + bins = [[0., 0.5, 1.0]] + hist, _ = histogramdd(x, bins=bins) + assert_(hist[0] == 0.0) + assert_(hist[1] == 0.0) + + def test_finite_range(self): + vals = np.random.random((100, 3)) + histogramdd(vals, range=[[0.0, 1.0], [0.25, 0.75], [0.25, 0.5]]) + assert_raises(ValueError, histogramdd, vals, + range=[[0.0, 1.0], [0.25, 0.75], [0.25, np.inf]]) + assert_raises(ValueError, histogramdd, vals, + range=[[0.0, 1.0], [np.nan, 0.75], [0.25, 0.5]]) + + def test_equal_edges(self): + """ Test that adjacent entries in an edge array can be equal """ + x = np.array([0, 1, 2]) + y = np.array([0, 1, 2]) + x_edges = np.array([0, 2, 2]) + y_edges = 1 + hist, edges = histogramdd((x, y), bins=(x_edges, y_edges)) + + hist_expected = np.array([ + [2.], + [1.], # x == 2 falls in the final bin + ]) + assert_equal(hist, hist_expected) + + def test_edge_dtype(self): + """ Test that if an edge array is input, its type is preserved """ + x = np.array([0, 10, 20]) + y = x / 10 + x_edges = np.array([0, 5, 15, 20]) + y_edges = x_edges / 10 + hist, edges = histogramdd((x, y), bins=(x_edges, y_edges)) + + assert_equal(edges[0].dtype, x_edges.dtype) + assert_equal(edges[1].dtype, y_edges.dtype) + + def test_large_integers(self): + big = 2**60 # Too large to represent with a full precision float + + x = np.array([0], np.int64) + x_edges = np.array([-1, +1], np.int64) + y = big + x + y_edges = big + x_edges + + hist, edges = histogramdd((x, y), bins=(x_edges, y_edges)) + + assert_equal(hist[0, 0], 1) + + def test_density_non_uniform_2d(self): + # Defines the following grid: + # + # 0 2 8 + # 0+-+-----+ + # + | + + # + | + + # 6+-+-----+ + # 8+-+-----+ + x_edges = np.array([0, 2, 8]) + y_edges = np.array([0, 6, 8]) + relative_areas = np.array([ + [3, 9], + [1, 3]]) + + # ensure the number of points in each region is proportional to its area + x = np.array([1] + [1] * 3 + [7] * 3 + [7] * 9) + y = np.array([7] + [1] * 3 + [7] * 3 + [1] * 9) + + # sanity check that the above worked as intended + hist, edges = histogramdd((y, x), bins=(y_edges, x_edges)) + assert_equal(hist, relative_areas) + + # resulting histogram should be uniform, since counts and areas are proportional + hist, edges = histogramdd((y, x), bins=(y_edges, x_edges), density=True) + assert_equal(hist, 1 / (8 * 8)) + + def test_density_non_uniform_1d(self): + # compare to histogram to show the results are the same + v = np.arange(10) + bins = np.array([0, 1, 3, 6, 10]) + hist, edges = histogram(v, bins, density=True) + hist_dd, edges_dd = histogramdd((v,), (bins,), density=True) + assert_equal(hist, hist_dd) + assert_equal(edges, edges_dd[0]) diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_index_tricks.py b/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_index_tricks.py new file mode 100644 index 0000000..ed8709d --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_index_tricks.py @@ -0,0 +1,568 @@ +import pytest + +import numpy as np +from numpy.lib._index_tricks_impl import ( + c_, + diag_indices, + diag_indices_from, + fill_diagonal, + index_exp, + ix_, + mgrid, + ndenumerate, + ndindex, + ogrid, + r_, + s_, +) +from numpy.testing import ( + assert_, + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, + assert_equal, + assert_raises, + assert_raises_regex, +) + + +class TestRavelUnravelIndex: + def test_basic(self): + assert_equal(np.unravel_index(2, (2, 2)), (1, 0)) + + # test that new shape argument works properly + assert_equal(np.unravel_index(indices=2, + shape=(2, 2)), + (1, 0)) + + # test that an invalid second keyword argument + # is properly handled, including the old name `dims`. + with assert_raises(TypeError): + np.unravel_index(indices=2, hape=(2, 2)) + + with assert_raises(TypeError): + np.unravel_index(2, hape=(2, 2)) + + with assert_raises(TypeError): + np.unravel_index(254, ims=(17, 94)) + + with assert_raises(TypeError): + np.unravel_index(254, dims=(17, 94)) + + assert_equal(np.ravel_multi_index((1, 0), (2, 2)), 2) + assert_equal(np.unravel_index(254, (17, 94)), (2, 66)) + assert_equal(np.ravel_multi_index((2, 66), (17, 94)), 254) + assert_raises(ValueError, np.unravel_index, -1, (2, 2)) + assert_raises(TypeError, np.unravel_index, 0.5, (2, 2)) + assert_raises(ValueError, np.unravel_index, 4, (2, 2)) + assert_raises(ValueError, np.ravel_multi_index, (-3, 1), (2, 2)) + assert_raises(ValueError, np.ravel_multi_index, (2, 1), (2, 2)) + assert_raises(ValueError, np.ravel_multi_index, (0, -3), (2, 2)) + assert_raises(ValueError, np.ravel_multi_index, (0, 2), (2, 2)) + assert_raises(TypeError, np.ravel_multi_index, (0.1, 0.), (2, 2)) + + assert_equal(np.unravel_index((2 * 3 + 1) * 6 + 4, (4, 3, 6)), [2, 1, 4]) + assert_equal( + np.ravel_multi_index([2, 1, 4], (4, 3, 6)), (2 * 3 + 1) * 6 + 4) + + arr = np.array([[3, 6, 6], [4, 5, 1]]) + assert_equal(np.ravel_multi_index(arr, (7, 6)), [22, 41, 37]) + assert_equal( + np.ravel_multi_index(arr, (7, 6), order='F'), [31, 41, 13]) + assert_equal( + np.ravel_multi_index(arr, (4, 6), mode='clip'), [22, 23, 19]) + assert_equal(np.ravel_multi_index(arr, (4, 4), mode=('clip', 'wrap')), + [12, 13, 13]) + assert_equal(np.ravel_multi_index((3, 1, 4, 1), (6, 7, 8, 9)), 1621) + + assert_equal(np.unravel_index(np.array([22, 41, 37]), (7, 6)), + [[3, 6, 6], [4, 5, 1]]) + assert_equal( + np.unravel_index(np.array([31, 41, 13]), (7, 6), order='F'), + [[3, 6, 6], [4, 5, 1]]) + assert_equal(np.unravel_index(1621, (6, 7, 8, 9)), [3, 1, 4, 1]) + + def test_empty_indices(self): + msg1 = 'indices must be integral: the provided empty sequence was' + msg2 = 'only int indices permitted' + assert_raises_regex(TypeError, msg1, np.unravel_index, [], (10, 3, 5)) + assert_raises_regex(TypeError, msg1, np.unravel_index, (), (10, 3, 5)) + assert_raises_regex(TypeError, msg2, np.unravel_index, np.array([]), + (10, 3, 5)) + assert_equal(np.unravel_index(np.array([], dtype=int), (10, 3, 5)), + [[], [], []]) + assert_raises_regex(TypeError, msg1, np.ravel_multi_index, ([], []), + (10, 3)) + assert_raises_regex(TypeError, msg1, np.ravel_multi_index, ([], ['abc']), + (10, 3)) + assert_raises_regex(TypeError, msg2, np.ravel_multi_index, + (np.array([]), np.array([])), (5, 3)) + assert_equal(np.ravel_multi_index( + (np.array([], dtype=int), np.array([], dtype=int)), (5, 3)), []) + assert_equal(np.ravel_multi_index(np.array([[], []], dtype=int), + (5, 3)), []) + + def test_big_indices(self): + # ravel_multi_index for big indices (issue #7546) + if np.intp == np.int64: + arr = ([1, 29], [3, 5], [3, 117], [19, 2], + [2379, 1284], [2, 2], [0, 1]) + assert_equal( + np.ravel_multi_index(arr, (41, 7, 120, 36, 2706, 8, 6)), + [5627771580, 117259570957]) + + # test unravel_index for big indices (issue #9538) + assert_raises(ValueError, np.unravel_index, 1, (2**32 - 1, 2**31 + 1)) + + # test overflow checking for too big array (issue #7546) + dummy_arr = ([0], [0]) + half_max = np.iinfo(np.intp).max // 2 + assert_equal( + np.ravel_multi_index(dummy_arr, (half_max, 2)), [0]) + assert_raises(ValueError, + np.ravel_multi_index, dummy_arr, (half_max + 1, 2)) + assert_equal( + np.ravel_multi_index(dummy_arr, (half_max, 2), order='F'), [0]) + assert_raises(ValueError, + np.ravel_multi_index, dummy_arr, (half_max + 1, 2), order='F') + + def test_dtypes(self): + # Test with different data types + for dtype in [np.int16, np.uint16, np.int32, + np.uint32, np.int64, np.uint64]: + coords = np.array( + [[1, 0, 1, 2, 3, 4], [1, 6, 1, 3, 2, 0]], dtype=dtype) + shape = (5, 8) + uncoords = 8 * coords[0] + coords[1] + assert_equal(np.ravel_multi_index(coords, shape), uncoords) + assert_equal(coords, np.unravel_index(uncoords, shape)) + uncoords = coords[0] + 5 * coords[1] + assert_equal( + np.ravel_multi_index(coords, shape, order='F'), uncoords) + assert_equal(coords, np.unravel_index(uncoords, shape, order='F')) + + coords = np.array( + [[1, 0, 1, 2, 3, 4], [1, 6, 1, 3, 2, 0], [1, 3, 1, 0, 9, 5]], + dtype=dtype) + shape = (5, 8, 10) + uncoords = 10 * (8 * coords[0] + coords[1]) + coords[2] + assert_equal(np.ravel_multi_index(coords, shape), uncoords) + assert_equal(coords, np.unravel_index(uncoords, shape)) + uncoords = coords[0] + 5 * (coords[1] + 8 * coords[2]) + assert_equal( + np.ravel_multi_index(coords, shape, order='F'), uncoords) + assert_equal(coords, np.unravel_index(uncoords, shape, order='F')) + + def test_clipmodes(self): + # Test clipmodes + assert_equal( + np.ravel_multi_index([5, 1, -1, 2], (4, 3, 7, 12), mode='wrap'), + np.ravel_multi_index([1, 1, 6, 2], (4, 3, 7, 12))) + assert_equal(np.ravel_multi_index([5, 1, -1, 2], (4, 3, 7, 12), + mode=( + 'wrap', 'raise', 'clip', 'raise')), + np.ravel_multi_index([1, 1, 0, 2], (4, 3, 7, 12))) + assert_raises( + ValueError, np.ravel_multi_index, [5, 1, -1, 2], (4, 3, 7, 12)) + + def test_writeability(self): + # gh-7269 + x, y = np.unravel_index([1, 2, 3], (4, 5)) + assert_(x.flags.writeable) + assert_(y.flags.writeable) + + def test_0d(self): + # gh-580 + x = np.unravel_index(0, ()) + assert_equal(x, ()) + + assert_raises_regex(ValueError, "0d array", np.unravel_index, [0], ()) + assert_raises_regex( + ValueError, "out of bounds", np.unravel_index, [1], ()) + + @pytest.mark.parametrize("mode", ["clip", "wrap", "raise"]) + def test_empty_array_ravel(self, mode): + res = np.ravel_multi_index( + np.zeros((3, 0), dtype=np.intp), (2, 1, 0), mode=mode) + assert res.shape == (0,) + + with assert_raises(ValueError): + np.ravel_multi_index( + np.zeros((3, 1), dtype=np.intp), (2, 1, 0), mode=mode) + + def test_empty_array_unravel(self): + res = np.unravel_index(np.zeros(0, dtype=np.intp), (2, 1, 0)) + # res is a tuple of three empty arrays + assert len(res) == 3 + assert all(a.shape == (0,) for a in res) + + with assert_raises(ValueError): + np.unravel_index([1], (2, 1, 0)) + +class TestGrid: + def test_basic(self): + a = mgrid[-1:1:10j] + b = mgrid[-1:1:0.1] + assert_(a.shape == (10,)) + assert_(b.shape == (20,)) + assert_(a[0] == -1) + assert_almost_equal(a[-1], 1) + assert_(b[0] == -1) + assert_almost_equal(b[1] - b[0], 0.1, 11) + assert_almost_equal(b[-1], b[0] + 19 * 0.1, 11) + assert_almost_equal(a[1] - a[0], 2.0 / 9.0, 11) + + def test_linspace_equivalence(self): + y, st = np.linspace(2, 10, retstep=True) + assert_almost_equal(st, 8 / 49.0) + assert_array_almost_equal(y, mgrid[2:10:50j], 13) + + def test_nd(self): + c = mgrid[-1:1:10j, -2:2:10j] + d = mgrid[-1:1:0.1, -2:2:0.2] + assert_(c.shape == (2, 10, 10)) + assert_(d.shape == (2, 20, 20)) + assert_array_equal(c[0][0, :], -np.ones(10, 'd')) + assert_array_equal(c[1][:, 0], -2 * np.ones(10, 'd')) + assert_array_almost_equal(c[0][-1, :], np.ones(10, 'd'), 11) + assert_array_almost_equal(c[1][:, -1], 2 * np.ones(10, 'd'), 11) + assert_array_almost_equal(d[0, 1, :] - d[0, 0, :], + 0.1 * np.ones(20, 'd'), 11) + assert_array_almost_equal(d[1, :, 1] - d[1, :, 0], + 0.2 * np.ones(20, 'd'), 11) + + def test_sparse(self): + grid_full = mgrid[-1:1:10j, -2:2:10j] + grid_sparse = ogrid[-1:1:10j, -2:2:10j] + + # sparse grids can be made dense by broadcasting + grid_broadcast = np.broadcast_arrays(*grid_sparse) + for f, b in zip(grid_full, grid_broadcast): + assert_equal(f, b) + + @pytest.mark.parametrize("start, stop, step, expected", [ + (None, 10, 10j, (200, 10)), + (-10, 20, None, (1800, 30)), + ]) + def test_mgrid_size_none_handling(self, start, stop, step, expected): + # regression test None value handling for + # start and step values used by mgrid; + # internally, this aims to cover previously + # unexplored code paths in nd_grid() + grid = mgrid[start:stop:step, start:stop:step] + # need a smaller grid to explore one of the + # untested code paths + grid_small = mgrid[start:stop:step] + assert_equal(grid.size, expected[0]) + assert_equal(grid_small.size, expected[1]) + + def test_accepts_npfloating(self): + # regression test for #16466 + grid64 = mgrid[0.1:0.33:0.1, ] + grid32 = mgrid[np.float32(0.1):np.float32(0.33):np.float32(0.1), ] + assert_array_almost_equal(grid64, grid32) + # At some point this was float64, but NEP 50 changed it: + assert grid32.dtype == np.float32 + assert grid64.dtype == np.float64 + + # different code path for single slice + grid64 = mgrid[0.1:0.33:0.1] + grid32 = mgrid[np.float32(0.1):np.float32(0.33):np.float32(0.1)] + assert_(grid32.dtype == np.float64) + assert_array_almost_equal(grid64, grid32) + + def test_accepts_longdouble(self): + # regression tests for #16945 + grid64 = mgrid[0.1:0.33:0.1, ] + grid128 = mgrid[ + np.longdouble(0.1):np.longdouble(0.33):np.longdouble(0.1), + ] + assert_(grid128.dtype == np.longdouble) + assert_array_almost_equal(grid64, grid128) + + grid128c_a = mgrid[0:np.longdouble(1):3.4j] + grid128c_b = mgrid[0:np.longdouble(1):3.4j, ] + assert_(grid128c_a.dtype == grid128c_b.dtype == np.longdouble) + assert_array_equal(grid128c_a, grid128c_b[0]) + + # different code path for single slice + grid64 = mgrid[0.1:0.33:0.1] + grid128 = mgrid[ + np.longdouble(0.1):np.longdouble(0.33):np.longdouble(0.1) + ] + assert_(grid128.dtype == np.longdouble) + assert_array_almost_equal(grid64, grid128) + + def test_accepts_npcomplexfloating(self): + # Related to #16466 + assert_array_almost_equal( + mgrid[0.1:0.3:3j, ], mgrid[0.1:0.3:np.complex64(3j), ] + ) + + # different code path for single slice + assert_array_almost_equal( + mgrid[0.1:0.3:3j], mgrid[0.1:0.3:np.complex64(3j)] + ) + + # Related to #16945 + grid64_a = mgrid[0.1:0.3:3.3j] + grid64_b = mgrid[0.1:0.3:3.3j, ][0] + assert_(grid64_a.dtype == grid64_b.dtype == np.float64) + assert_array_equal(grid64_a, grid64_b) + + grid128_a = mgrid[0.1:0.3:np.clongdouble(3.3j)] + grid128_b = mgrid[0.1:0.3:np.clongdouble(3.3j), ][0] + assert_(grid128_a.dtype == grid128_b.dtype == np.longdouble) + assert_array_equal(grid64_a, grid64_b) + + +class TestConcatenator: + def test_1d(self): + assert_array_equal(r_[1, 2, 3, 4, 5, 6], np.array([1, 2, 3, 4, 5, 6])) + b = np.ones(5) + c = r_[b, 0, 0, b] + assert_array_equal(c, [1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1]) + + def test_mixed_type(self): + g = r_[10.1, 1:10] + assert_(g.dtype == 'f8') + + def test_more_mixed_type(self): + g = r_[-10.1, np.array([1]), np.array([2, 3, 4]), 10.0] + assert_(g.dtype == 'f8') + + def test_complex_step(self): + # Regression test for #12262 + g = r_[0:36:100j] + assert_(g.shape == (100,)) + + # Related to #16466 + g = r_[0:36:np.complex64(100j)] + assert_(g.shape == (100,)) + + def test_2d(self): + b = np.random.rand(5, 5) + c = np.random.rand(5, 5) + d = r_['1', b, c] # append columns + assert_(d.shape == (5, 10)) + assert_array_equal(d[:, :5], b) + assert_array_equal(d[:, 5:], c) + d = r_[b, c] + assert_(d.shape == (10, 5)) + assert_array_equal(d[:5, :], b) + assert_array_equal(d[5:, :], c) + + def test_0d(self): + assert_equal(r_[0, np.array(1), 2], [0, 1, 2]) + assert_equal(r_[[0, 1, 2], np.array(3)], [0, 1, 2, 3]) + assert_equal(r_[np.array(0), [1, 2, 3]], [0, 1, 2, 3]) + + +class TestNdenumerate: + def test_basic(self): + a = np.array([[1, 2], [3, 4]]) + assert_equal(list(ndenumerate(a)), + [((0, 0), 1), ((0, 1), 2), ((1, 0), 3), ((1, 1), 4)]) + + +class TestIndexExpression: + def test_regression_1(self): + # ticket #1196 + a = np.arange(2) + assert_equal(a[:-1], a[s_[:-1]]) + assert_equal(a[:-1], a[index_exp[:-1]]) + + def test_simple_1(self): + a = np.random.rand(4, 5, 6) + + assert_equal(a[:, :3, [1, 2]], a[index_exp[:, :3, [1, 2]]]) + assert_equal(a[:, :3, [1, 2]], a[s_[:, :3, [1, 2]]]) + + +class TestIx_: + def test_regression_1(self): + # Test empty untyped inputs create outputs of indexing type, gh-5804 + a, = np.ix_(range(0)) + assert_equal(a.dtype, np.intp) + + a, = np.ix_([]) + assert_equal(a.dtype, np.intp) + + # but if the type is specified, don't change it + a, = np.ix_(np.array([], dtype=np.float32)) + assert_equal(a.dtype, np.float32) + + def test_shape_and_dtype(self): + sizes = (4, 5, 3, 2) + # Test both lists and arrays + for func in (range, np.arange): + arrays = np.ix_(*[func(sz) for sz in sizes]) + for k, (a, sz) in enumerate(zip(arrays, sizes)): + assert_equal(a.shape[k], sz) + assert_(all(sh == 1 for j, sh in enumerate(a.shape) if j != k)) + assert_(np.issubdtype(a.dtype, np.integer)) + + def test_bool(self): + bool_a = [True, False, True, True] + int_a, = np.nonzero(bool_a) + assert_equal(np.ix_(bool_a)[0], int_a) + + def test_1d_only(self): + idx2d = [[1, 2, 3], [4, 5, 6]] + assert_raises(ValueError, np.ix_, idx2d) + + def test_repeated_input(self): + length_of_vector = 5 + x = np.arange(length_of_vector) + out = ix_(x, x) + assert_equal(out[0].shape, (length_of_vector, 1)) + assert_equal(out[1].shape, (1, length_of_vector)) + # check that input shape is not modified + assert_equal(x.shape, (length_of_vector,)) + + +def test_c_(): + a = c_[np.array([[1, 2, 3]]), 0, 0, np.array([[4, 5, 6]])] + assert_equal(a, [[1, 2, 3, 0, 0, 4, 5, 6]]) + + +class TestFillDiagonal: + def test_basic(self): + a = np.zeros((3, 3), int) + fill_diagonal(a, 5) + assert_array_equal( + a, np.array([[5, 0, 0], + [0, 5, 0], + [0, 0, 5]]) + ) + + def test_tall_matrix(self): + a = np.zeros((10, 3), int) + fill_diagonal(a, 5) + assert_array_equal( + a, np.array([[5, 0, 0], + [0, 5, 0], + [0, 0, 5], + [0, 0, 0], + [0, 0, 0], + [0, 0, 0], + [0, 0, 0], + [0, 0, 0], + [0, 0, 0], + [0, 0, 0]]) + ) + + def test_tall_matrix_wrap(self): + a = np.zeros((10, 3), int) + fill_diagonal(a, 5, True) + assert_array_equal( + a, np.array([[5, 0, 0], + [0, 5, 0], + [0, 0, 5], + [0, 0, 0], + [5, 0, 0], + [0, 5, 0], + [0, 0, 5], + [0, 0, 0], + [5, 0, 0], + [0, 5, 0]]) + ) + + def test_wide_matrix(self): + a = np.zeros((3, 10), int) + fill_diagonal(a, 5) + assert_array_equal( + a, np.array([[5, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 5, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 5, 0, 0, 0, 0, 0, 0, 0]]) + ) + + def test_operate_4d_array(self): + a = np.zeros((3, 3, 3, 3), int) + fill_diagonal(a, 4) + i = np.array([0, 1, 2]) + assert_equal(np.where(a != 0), (i, i, i, i)) + + def test_low_dim_handling(self): + # raise error with low dimensionality + a = np.zeros(3, int) + with assert_raises_regex(ValueError, "at least 2-d"): + fill_diagonal(a, 5) + + def test_hetero_shape_handling(self): + # raise error with high dimensionality and + # shape mismatch + a = np.zeros((3, 3, 7, 3), int) + with assert_raises_regex(ValueError, "equal length"): + fill_diagonal(a, 2) + + +def test_diag_indices(): + di = diag_indices(4) + a = np.array([[1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12], + [13, 14, 15, 16]]) + a[di] = 100 + assert_array_equal( + a, np.array([[100, 2, 3, 4], + [5, 100, 7, 8], + [9, 10, 100, 12], + [13, 14, 15, 100]]) + ) + + # Now, we create indices to manipulate a 3-d array: + d3 = diag_indices(2, 3) + + # And use it to set the diagonal of a zeros array to 1: + a = np.zeros((2, 2, 2), int) + a[d3] = 1 + assert_array_equal( + a, np.array([[[1, 0], + [0, 0]], + [[0, 0], + [0, 1]]]) + ) + + +class TestDiagIndicesFrom: + + def test_diag_indices_from(self): + x = np.random.random((4, 4)) + r, c = diag_indices_from(x) + assert_array_equal(r, np.arange(4)) + assert_array_equal(c, np.arange(4)) + + def test_error_small_input(self): + x = np.ones(7) + with assert_raises_regex(ValueError, "at least 2-d"): + diag_indices_from(x) + + def test_error_shape_mismatch(self): + x = np.zeros((3, 3, 2, 3), int) + with assert_raises_regex(ValueError, "equal length"): + diag_indices_from(x) + + +def test_ndindex(): + x = list(ndindex(1, 2, 3)) + expected = [ix for ix, e in ndenumerate(np.zeros((1, 2, 3)))] + assert_array_equal(x, expected) + + x = list(ndindex((1, 2, 3))) + assert_array_equal(x, expected) + + # Test use of scalars and tuples + x = list(ndindex((3,))) + assert_array_equal(x, list(ndindex(3))) + + # Make sure size argument is optional + x = list(ndindex()) + assert_equal(x, [()]) + + x = list(ndindex(())) + assert_equal(x, [()]) + + # Make sure 0-sized ndindex works correctly + x = list(ndindex(*[0])) + assert_equal(x, []) diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_io.py b/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_io.py new file mode 100644 index 0000000..303dcfe --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_io.py @@ -0,0 +1,2848 @@ +import gc +import gzip +import locale +import os +import re +import sys +import threading +import time +import warnings +import zipfile +from ctypes import c_bool +from datetime import datetime +from io import BytesIO, StringIO +from multiprocessing import Value, get_context +from pathlib import Path +from tempfile import NamedTemporaryFile + +import pytest + +import numpy as np +import numpy.ma as ma +from numpy._utils import asbytes +from numpy.exceptions import VisibleDeprecationWarning +from numpy.lib import _npyio_impl +from numpy.lib._iotools import ConversionWarning, ConverterError +from numpy.lib._npyio_impl import recfromcsv, recfromtxt +from numpy.ma.testutils import assert_equal +from numpy.testing import ( + HAS_REFCOUNT, + IS_PYPY, + IS_WASM, + assert_, + assert_allclose, + assert_array_equal, + assert_no_gc_cycles, + assert_no_warnings, + assert_raises, + assert_raises_regex, + assert_warns, + break_cycles, + suppress_warnings, + tempdir, + temppath, +) +from numpy.testing._private.utils import requires_memory + + +class TextIO(BytesIO): + """Helper IO class. + + Writes encode strings to bytes if needed, reads return bytes. + This makes it easier to emulate files opened in binary mode + without needing to explicitly convert strings to bytes in + setting up the test data. + + """ + def __init__(self, s=""): + BytesIO.__init__(self, asbytes(s)) + + def write(self, s): + BytesIO.write(self, asbytes(s)) + + def writelines(self, lines): + BytesIO.writelines(self, [asbytes(s) for s in lines]) + + +IS_64BIT = sys.maxsize > 2**32 +try: + import bz2 + HAS_BZ2 = True +except ImportError: + HAS_BZ2 = False +try: + import lzma + HAS_LZMA = True +except ImportError: + HAS_LZMA = False + + +def strptime(s, fmt=None): + """ + This function is available in the datetime module only from Python >= + 2.5. + + """ + if isinstance(s, bytes): + s = s.decode("latin1") + return datetime(*time.strptime(s, fmt)[:3]) + + +class RoundtripTest: + def roundtrip(self, save_func, *args, **kwargs): + """ + save_func : callable + Function used to save arrays to file. + file_on_disk : bool + If true, store the file on disk, instead of in a + string buffer. + save_kwds : dict + Parameters passed to `save_func`. + load_kwds : dict + Parameters passed to `numpy.load`. + args : tuple of arrays + Arrays stored to file. + + """ + save_kwds = kwargs.get('save_kwds', {}) + load_kwds = kwargs.get('load_kwds', {"allow_pickle": True}) + file_on_disk = kwargs.get('file_on_disk', False) + + if file_on_disk: + target_file = NamedTemporaryFile(delete=False) + load_file = target_file.name + else: + target_file = BytesIO() + load_file = target_file + + try: + arr = args + + save_func(target_file, *arr, **save_kwds) + target_file.flush() + target_file.seek(0) + + if sys.platform == 'win32' and not isinstance(target_file, BytesIO): + target_file.close() + + arr_reloaded = np.load(load_file, **load_kwds) + + self.arr = arr + self.arr_reloaded = arr_reloaded + finally: + if not isinstance(target_file, BytesIO): + target_file.close() + # holds an open file descriptor so it can't be deleted on win + if 'arr_reloaded' in locals(): + if not isinstance(arr_reloaded, np.lib.npyio.NpzFile): + os.remove(target_file.name) + + def check_roundtrips(self, a): + self.roundtrip(a) + self.roundtrip(a, file_on_disk=True) + self.roundtrip(np.asfortranarray(a)) + self.roundtrip(np.asfortranarray(a), file_on_disk=True) + if a.shape[0] > 1: + # neither C nor Fortran contiguous for 2D arrays or more + self.roundtrip(np.asfortranarray(a)[1:]) + self.roundtrip(np.asfortranarray(a)[1:], file_on_disk=True) + + def test_array(self): + a = np.array([], float) + self.check_roundtrips(a) + + a = np.array([[1, 2], [3, 4]], float) + self.check_roundtrips(a) + + a = np.array([[1, 2], [3, 4]], int) + self.check_roundtrips(a) + + a = np.array([[1 + 5j, 2 + 6j], [3 + 7j, 4 + 8j]], dtype=np.csingle) + self.check_roundtrips(a) + + a = np.array([[1 + 5j, 2 + 6j], [3 + 7j, 4 + 8j]], dtype=np.cdouble) + self.check_roundtrips(a) + + def test_array_object(self): + a = np.array([], object) + self.check_roundtrips(a) + + a = np.array([[1, 2], [3, 4]], object) + self.check_roundtrips(a) + + def test_1D(self): + a = np.array([1, 2, 3, 4], int) + self.roundtrip(a) + + @pytest.mark.skipif(sys.platform == 'win32', reason="Fails on Win32") + def test_mmap(self): + a = np.array([[1, 2.5], [4, 7.3]]) + self.roundtrip(a, file_on_disk=True, load_kwds={'mmap_mode': 'r'}) + + a = np.asfortranarray([[1, 2.5], [4, 7.3]]) + self.roundtrip(a, file_on_disk=True, load_kwds={'mmap_mode': 'r'}) + + def test_record(self): + a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')]) + self.check_roundtrips(a) + + @pytest.mark.slow + def test_format_2_0(self): + dt = [(("%d" % i) * 100, float) for i in range(500)] + a = np.ones(1000, dtype=dt) + with warnings.catch_warnings(record=True): + warnings.filterwarnings('always', '', UserWarning) + self.check_roundtrips(a) + + +class TestSaveLoad(RoundtripTest): + def roundtrip(self, *args, **kwargs): + RoundtripTest.roundtrip(self, np.save, *args, **kwargs) + assert_equal(self.arr[0], self.arr_reloaded) + assert_equal(self.arr[0].dtype, self.arr_reloaded.dtype) + assert_equal(self.arr[0].flags.fnc, self.arr_reloaded.flags.fnc) + + +class TestSavezLoad(RoundtripTest): + def roundtrip(self, *args, **kwargs): + RoundtripTest.roundtrip(self, np.savez, *args, **kwargs) + try: + for n, arr in enumerate(self.arr): + reloaded = self.arr_reloaded['arr_%d' % n] + assert_equal(arr, reloaded) + assert_equal(arr.dtype, reloaded.dtype) + assert_equal(arr.flags.fnc, reloaded.flags.fnc) + finally: + # delete tempfile, must be done here on windows + if self.arr_reloaded.fid: + self.arr_reloaded.fid.close() + os.remove(self.arr_reloaded.fid.name) + + def test_load_non_npy(self): + """Test loading non-.npy files and name mapping in .npz.""" + with temppath(prefix="numpy_test_npz_load_non_npy_", suffix=".npz") as tmp: + with zipfile.ZipFile(tmp, "w") as npz: + with npz.open("test1.npy", "w") as out_file: + np.save(out_file, np.arange(10)) + with npz.open("test2", "w") as out_file: + np.save(out_file, np.arange(10)) + with npz.open("metadata", "w") as out_file: + out_file.write(b"Name: Test") + with np.load(tmp) as npz: + assert len(npz["test1"]) == 10 + assert len(npz["test1.npy"]) == 10 + assert len(npz["test2"]) == 10 + assert npz["metadata"] == b"Name: Test" + + @pytest.mark.skipif(IS_PYPY, reason="Hangs on PyPy") + @pytest.mark.skipif(not IS_64BIT, reason="Needs 64bit platform") + @pytest.mark.slow + def test_big_arrays(self): + L = (1 << 31) + 100000 + a = np.empty(L, dtype=np.uint8) + with temppath(prefix="numpy_test_big_arrays_", suffix=".npz") as tmp: + np.savez(tmp, a=a) + del a + npfile = np.load(tmp) + a = npfile['a'] # Should succeed + npfile.close() + + def test_multiple_arrays(self): + a = np.array([[1, 2], [3, 4]], float) + b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex) + self.roundtrip(a, b) + + def test_named_arrays(self): + a = np.array([[1, 2], [3, 4]], float) + b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex) + c = BytesIO() + np.savez(c, file_a=a, file_b=b) + c.seek(0) + l = np.load(c) + assert_equal(a, l['file_a']) + assert_equal(b, l['file_b']) + + def test_tuple_getitem_raises(self): + # gh-23748 + a = np.array([1, 2, 3]) + f = BytesIO() + np.savez(f, a=a) + f.seek(0) + l = np.load(f) + with pytest.raises(KeyError, match="(1, 2)"): + l[1, 2] + + def test_BagObj(self): + a = np.array([[1, 2], [3, 4]], float) + b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex) + c = BytesIO() + np.savez(c, file_a=a, file_b=b) + c.seek(0) + l = np.load(c) + assert_equal(sorted(dir(l.f)), ['file_a', 'file_b']) + assert_equal(a, l.f.file_a) + assert_equal(b, l.f.file_b) + + @pytest.mark.skipif(IS_WASM, reason="Cannot start thread") + def test_savez_filename_clashes(self): + # Test that issue #852 is fixed + # and savez functions in multithreaded environment + + def writer(error_list): + with temppath(suffix='.npz') as tmp: + arr = np.random.randn(500, 500) + try: + np.savez(tmp, arr=arr) + except OSError as err: + error_list.append(err) + + errors = [] + threads = [threading.Thread(target=writer, args=(errors,)) + for j in range(3)] + for t in threads: + t.start() + for t in threads: + t.join() + + if errors: + raise AssertionError(errors) + + def test_not_closing_opened_fid(self): + # Test that issue #2178 is fixed: + # verify could seek on 'loaded' file + with temppath(suffix='.npz') as tmp: + with open(tmp, 'wb') as fp: + np.savez(fp, data='LOVELY LOAD') + with open(tmp, 'rb', 10000) as fp: + fp.seek(0) + assert_(not fp.closed) + np.load(fp)['data'] + # fp must not get closed by .load + assert_(not fp.closed) + fp.seek(0) + assert_(not fp.closed) + + @pytest.mark.slow_pypy + def test_closing_fid(self): + # Test that issue #1517 (too many opened files) remains closed + # It might be a "weak" test since failed to get triggered on + # e.g. Debian sid of 2012 Jul 05 but was reported to + # trigger the failure on Ubuntu 10.04: + # http://projects.scipy.org/numpy/ticket/1517#comment:2 + with temppath(suffix='.npz') as tmp: + np.savez(tmp, data='LOVELY LOAD') + # We need to check if the garbage collector can properly close + # numpy npz file returned by np.load when their reference count + # goes to zero. Python running in debug mode raises a + # ResourceWarning when file closing is left to the garbage + # collector, so we catch the warnings. + with suppress_warnings() as sup: + sup.filter(ResourceWarning) # TODO: specify exact message + for i in range(1, 1025): + try: + np.load(tmp)["data"] + except Exception as e: + msg = f"Failed to load data from a file: {e}" + raise AssertionError(msg) + finally: + if IS_PYPY: + gc.collect() + + def test_closing_zipfile_after_load(self): + # Check that zipfile owns file and can close it. This needs to + # pass a file name to load for the test. On windows failure will + # cause a second error will be raised when the attempt to remove + # the open file is made. + prefix = 'numpy_test_closing_zipfile_after_load_' + with temppath(suffix='.npz', prefix=prefix) as tmp: + np.savez(tmp, lab='place holder') + data = np.load(tmp) + fp = data.zip.fp + data.close() + assert_(fp.closed) + + @pytest.mark.parametrize("count, expected_repr", [ + (1, "NpzFile {fname!r} with keys: arr_0"), + (5, "NpzFile {fname!r} with keys: arr_0, arr_1, arr_2, arr_3, arr_4"), + # _MAX_REPR_ARRAY_COUNT is 5, so files with more than 5 keys are + # expected to end in '...' + (6, "NpzFile {fname!r} with keys: arr_0, arr_1, arr_2, arr_3, arr_4..."), + ]) + def test_repr_lists_keys(self, count, expected_repr): + a = np.array([[1, 2], [3, 4]], float) + with temppath(suffix='.npz') as tmp: + np.savez(tmp, *[a] * count) + l = np.load(tmp) + assert repr(l) == expected_repr.format(fname=tmp) + l.close() + + +class TestSaveTxt: + def test_array(self): + a = np.array([[1, 2], [3, 4]], float) + fmt = "%.18e" + c = BytesIO() + np.savetxt(c, a, fmt=fmt) + c.seek(0) + assert_equal(c.readlines(), + [asbytes((fmt + ' ' + fmt + '\n') % (1, 2)), + asbytes((fmt + ' ' + fmt + '\n') % (3, 4))]) + + a = np.array([[1, 2], [3, 4]], int) + c = BytesIO() + np.savetxt(c, a, fmt='%d') + c.seek(0) + assert_equal(c.readlines(), [b'1 2\n', b'3 4\n']) + + def test_1D(self): + a = np.array([1, 2, 3, 4], int) + c = BytesIO() + np.savetxt(c, a, fmt='%d') + c.seek(0) + lines = c.readlines() + assert_equal(lines, [b'1\n', b'2\n', b'3\n', b'4\n']) + + def test_0D_3D(self): + c = BytesIO() + assert_raises(ValueError, np.savetxt, c, np.array(1)) + assert_raises(ValueError, np.savetxt, c, np.array([[[1], [2]]])) + + def test_structured(self): + a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')]) + c = BytesIO() + np.savetxt(c, a, fmt='%d') + c.seek(0) + assert_equal(c.readlines(), [b'1 2\n', b'3 4\n']) + + def test_structured_padded(self): + # gh-13297 + a = np.array([(1, 2, 3), (4, 5, 6)], dtype=[ + ('foo', 'i4'), ('bar', 'i4'), ('baz', 'i4') + ]) + c = BytesIO() + np.savetxt(c, a[['foo', 'baz']], fmt='%d') + c.seek(0) + assert_equal(c.readlines(), [b'1 3\n', b'4 6\n']) + + def test_multifield_view(self): + a = np.ones(1, dtype=[('x', 'i4'), ('y', 'i4'), ('z', 'f4')]) + v = a[['x', 'z']] + with temppath(suffix='.npy') as path: + path = Path(path) + np.save(path, v) + data = np.load(path) + assert_array_equal(data, v) + + def test_delimiter(self): + a = np.array([[1., 2.], [3., 4.]]) + c = BytesIO() + np.savetxt(c, a, delimiter=',', fmt='%d') + c.seek(0) + assert_equal(c.readlines(), [b'1,2\n', b'3,4\n']) + + def test_format(self): + a = np.array([(1, 2), (3, 4)]) + c = BytesIO() + # Sequence of formats + np.savetxt(c, a, fmt=['%02d', '%3.1f']) + c.seek(0) + assert_equal(c.readlines(), [b'01 2.0\n', b'03 4.0\n']) + + # A single multiformat string + c = BytesIO() + np.savetxt(c, a, fmt='%02d : %3.1f') + c.seek(0) + lines = c.readlines() + assert_equal(lines, [b'01 : 2.0\n', b'03 : 4.0\n']) + + # Specify delimiter, should be overridden + c = BytesIO() + np.savetxt(c, a, fmt='%02d : %3.1f', delimiter=',') + c.seek(0) + lines = c.readlines() + assert_equal(lines, [b'01 : 2.0\n', b'03 : 4.0\n']) + + # Bad fmt, should raise a ValueError + c = BytesIO() + assert_raises(ValueError, np.savetxt, c, a, fmt=99) + + def test_header_footer(self): + # Test the functionality of the header and footer keyword argument. + + c = BytesIO() + a = np.array([(1, 2), (3, 4)], dtype=int) + test_header_footer = 'Test header / footer' + # Test the header keyword argument + np.savetxt(c, a, fmt='%1d', header=test_header_footer) + c.seek(0) + assert_equal(c.read(), + asbytes('# ' + test_header_footer + '\n1 2\n3 4\n')) + # Test the footer keyword argument + c = BytesIO() + np.savetxt(c, a, fmt='%1d', footer=test_header_footer) + c.seek(0) + assert_equal(c.read(), + asbytes('1 2\n3 4\n# ' + test_header_footer + '\n')) + # Test the commentstr keyword argument used on the header + c = BytesIO() + commentstr = '% ' + np.savetxt(c, a, fmt='%1d', + header=test_header_footer, comments=commentstr) + c.seek(0) + assert_equal(c.read(), + asbytes(commentstr + test_header_footer + '\n' + '1 2\n3 4\n')) + # Test the commentstr keyword argument used on the footer + c = BytesIO() + commentstr = '% ' + np.savetxt(c, a, fmt='%1d', + footer=test_header_footer, comments=commentstr) + c.seek(0) + assert_equal(c.read(), + asbytes('1 2\n3 4\n' + commentstr + test_header_footer + '\n')) + + @pytest.mark.parametrize("filename_type", [Path, str]) + def test_file_roundtrip(self, filename_type): + with temppath() as name: + a = np.array([(1, 2), (3, 4)]) + np.savetxt(filename_type(name), a) + b = np.loadtxt(filename_type(name)) + assert_array_equal(a, b) + + def test_complex_arrays(self): + ncols = 2 + nrows = 2 + a = np.zeros((ncols, nrows), dtype=np.complex128) + re = np.pi + im = np.e + a[:] = re + 1.0j * im + + # One format only + c = BytesIO() + np.savetxt(c, a, fmt=' %+.3e') + c.seek(0) + lines = c.readlines() + assert_equal( + lines, + [b' ( +3.142e+00+ +2.718e+00j) ( +3.142e+00+ +2.718e+00j)\n', + b' ( +3.142e+00+ +2.718e+00j) ( +3.142e+00+ +2.718e+00j)\n']) + + # One format for each real and imaginary part + c = BytesIO() + np.savetxt(c, a, fmt=' %+.3e' * 2 * ncols) + c.seek(0) + lines = c.readlines() + assert_equal( + lines, + [b' +3.142e+00 +2.718e+00 +3.142e+00 +2.718e+00\n', + b' +3.142e+00 +2.718e+00 +3.142e+00 +2.718e+00\n']) + + # One format for each complex number + c = BytesIO() + np.savetxt(c, a, fmt=['(%.3e%+.3ej)'] * ncols) + c.seek(0) + lines = c.readlines() + assert_equal( + lines, + [b'(3.142e+00+2.718e+00j) (3.142e+00+2.718e+00j)\n', + b'(3.142e+00+2.718e+00j) (3.142e+00+2.718e+00j)\n']) + + def test_complex_negative_exponent(self): + # Previous to 1.15, some formats generated x+-yj, gh 7895 + ncols = 2 + nrows = 2 + a = np.zeros((ncols, nrows), dtype=np.complex128) + re = np.pi + im = np.e + a[:] = re - 1.0j * im + c = BytesIO() + np.savetxt(c, a, fmt='%.3e') + c.seek(0) + lines = c.readlines() + assert_equal( + lines, + [b' (3.142e+00-2.718e+00j) (3.142e+00-2.718e+00j)\n', + b' (3.142e+00-2.718e+00j) (3.142e+00-2.718e+00j)\n']) + + def test_custom_writer(self): + + class CustomWriter(list): + def write(self, text): + self.extend(text.split(b'\n')) + + w = CustomWriter() + a = np.array([(1, 2), (3, 4)]) + np.savetxt(w, a) + b = np.loadtxt(w) + assert_array_equal(a, b) + + def test_unicode(self): + utf8 = b'\xcf\x96'.decode('UTF-8') + a = np.array([utf8], dtype=np.str_) + with tempdir() as tmpdir: + # set encoding as on windows it may not be unicode even on py3 + np.savetxt(os.path.join(tmpdir, 'test.csv'), a, fmt=['%s'], + encoding='UTF-8') + + def test_unicode_roundtrip(self): + utf8 = b'\xcf\x96'.decode('UTF-8') + a = np.array([utf8], dtype=np.str_) + # our gz wrapper support encoding + suffixes = ['', '.gz'] + if HAS_BZ2: + suffixes.append('.bz2') + if HAS_LZMA: + suffixes.extend(['.xz', '.lzma']) + with tempdir() as tmpdir: + for suffix in suffixes: + np.savetxt(os.path.join(tmpdir, 'test.csv' + suffix), a, + fmt=['%s'], encoding='UTF-16-LE') + b = np.loadtxt(os.path.join(tmpdir, 'test.csv' + suffix), + encoding='UTF-16-LE', dtype=np.str_) + assert_array_equal(a, b) + + def test_unicode_bytestream(self): + utf8 = b'\xcf\x96'.decode('UTF-8') + a = np.array([utf8], dtype=np.str_) + s = BytesIO() + np.savetxt(s, a, fmt=['%s'], encoding='UTF-8') + s.seek(0) + assert_equal(s.read().decode('UTF-8'), utf8 + '\n') + + def test_unicode_stringstream(self): + utf8 = b'\xcf\x96'.decode('UTF-8') + a = np.array([utf8], dtype=np.str_) + s = StringIO() + np.savetxt(s, a, fmt=['%s'], encoding='UTF-8') + s.seek(0) + assert_equal(s.read(), utf8 + '\n') + + @pytest.mark.parametrize("iotype", [StringIO, BytesIO]) + def test_unicode_and_bytes_fmt(self, iotype): + # string type of fmt should not matter, see also gh-4053 + a = np.array([1.]) + s = iotype() + np.savetxt(s, a, fmt="%f") + s.seek(0) + if iotype is StringIO: + assert_equal(s.read(), "%f\n" % 1.) + else: + assert_equal(s.read(), b"%f\n" % 1.) + + @pytest.mark.skipif(sys.platform == 'win32', reason="files>4GB may not work") + @pytest.mark.slow + @requires_memory(free_bytes=7e9) + def test_large_zip(self): + def check_large_zip(memoryerror_raised): + memoryerror_raised.value = False + try: + # The test takes at least 6GB of memory, writes a file larger + # than 4GB. This tests the ``allowZip64`` kwarg to ``zipfile`` + test_data = np.asarray([np.random.rand( + np.random.randint(50, 100), 4) + for i in range(800000)], dtype=object) + with tempdir() as tmpdir: + np.savez(os.path.join(tmpdir, 'test.npz'), + test_data=test_data) + except MemoryError: + memoryerror_raised.value = True + raise + # run in a subprocess to ensure memory is released on PyPy, see gh-15775 + # Use an object in shared memory to re-raise the MemoryError exception + # in our process if needed, see gh-16889 + memoryerror_raised = Value(c_bool) + + # Since Python 3.8, the default start method for multiprocessing has + # been changed from 'fork' to 'spawn' on macOS, causing inconsistency + # on memory sharing model, leading to failed test for check_large_zip + ctx = get_context('fork') + p = ctx.Process(target=check_large_zip, args=(memoryerror_raised,)) + p.start() + p.join() + if memoryerror_raised.value: + raise MemoryError("Child process raised a MemoryError exception") + # -9 indicates a SIGKILL, probably an OOM. + if p.exitcode == -9: + pytest.xfail("subprocess got a SIGKILL, apparently free memory was not sufficient") + assert p.exitcode == 0 + +class LoadTxtBase: + def check_compressed(self, fopen, suffixes): + # Test that we can load data from a compressed file + wanted = np.arange(6).reshape((2, 3)) + linesep = ('\n', '\r\n', '\r') + for sep in linesep: + data = '0 1 2' + sep + '3 4 5' + for suffix in suffixes: + with temppath(suffix=suffix) as name: + with fopen(name, mode='wt', encoding='UTF-32-LE') as f: + f.write(data) + res = self.loadfunc(name, encoding='UTF-32-LE') + assert_array_equal(res, wanted) + with fopen(name, "rt", encoding='UTF-32-LE') as f: + res = self.loadfunc(f) + assert_array_equal(res, wanted) + + def test_compressed_gzip(self): + self.check_compressed(gzip.open, ('.gz',)) + + @pytest.mark.skipif(not HAS_BZ2, reason="Needs bz2") + def test_compressed_bz2(self): + self.check_compressed(bz2.open, ('.bz2',)) + + @pytest.mark.skipif(not HAS_LZMA, reason="Needs lzma") + def test_compressed_lzma(self): + self.check_compressed(lzma.open, ('.xz', '.lzma')) + + def test_encoding(self): + with temppath() as path: + with open(path, "wb") as f: + f.write('0.\n1.\n2.'.encode("UTF-16")) + x = self.loadfunc(path, encoding="UTF-16") + assert_array_equal(x, [0., 1., 2.]) + + def test_stringload(self): + # umlaute + nonascii = b'\xc3\xb6\xc3\xbc\xc3\xb6'.decode("UTF-8") + with temppath() as path: + with open(path, "wb") as f: + f.write(nonascii.encode("UTF-16")) + x = self.loadfunc(path, encoding="UTF-16", dtype=np.str_) + assert_array_equal(x, nonascii) + + def test_binary_decode(self): + utf16 = b'\xff\xfeh\x04 \x00i\x04 \x00j\x04' + v = self.loadfunc(BytesIO(utf16), dtype=np.str_, encoding='UTF-16') + assert_array_equal(v, np.array(utf16.decode('UTF-16').split())) + + def test_converters_decode(self): + # test converters that decode strings + c = TextIO() + c.write(b'\xcf\x96') + c.seek(0) + x = self.loadfunc(c, dtype=np.str_, encoding="bytes", + converters={0: lambda x: x.decode('UTF-8')}) + a = np.array([b'\xcf\x96'.decode('UTF-8')]) + assert_array_equal(x, a) + + def test_converters_nodecode(self): + # test native string converters enabled by setting an encoding + utf8 = b'\xcf\x96'.decode('UTF-8') + with temppath() as path: + with open(path, 'wt', encoding='UTF-8') as f: + f.write(utf8) + x = self.loadfunc(path, dtype=np.str_, + converters={0: lambda x: x + 't'}, + encoding='UTF-8') + a = np.array([utf8 + 't']) + assert_array_equal(x, a) + + +class TestLoadTxt(LoadTxtBase): + loadfunc = staticmethod(np.loadtxt) + + def setup_method(self): + # lower chunksize for testing + self.orig_chunk = _npyio_impl._loadtxt_chunksize + _npyio_impl._loadtxt_chunksize = 1 + + def teardown_method(self): + _npyio_impl._loadtxt_chunksize = self.orig_chunk + + def test_record(self): + c = TextIO() + c.write('1 2\n3 4') + c.seek(0) + x = np.loadtxt(c, dtype=[('x', np.int32), ('y', np.int32)]) + a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')]) + assert_array_equal(x, a) + + d = TextIO() + d.write('M 64 75.0\nF 25 60.0') + d.seek(0) + mydescriptor = {'names': ('gender', 'age', 'weight'), + 'formats': ('S1', 'i4', 'f4')} + b = np.array([('M', 64.0, 75.0), + ('F', 25.0, 60.0)], dtype=mydescriptor) + y = np.loadtxt(d, dtype=mydescriptor) + assert_array_equal(y, b) + + def test_array(self): + c = TextIO() + c.write('1 2\n3 4') + + c.seek(0) + x = np.loadtxt(c, dtype=int) + a = np.array([[1, 2], [3, 4]], int) + assert_array_equal(x, a) + + c.seek(0) + x = np.loadtxt(c, dtype=float) + a = np.array([[1, 2], [3, 4]], float) + assert_array_equal(x, a) + + def test_1D(self): + c = TextIO() + c.write('1\n2\n3\n4\n') + c.seek(0) + x = np.loadtxt(c, dtype=int) + a = np.array([1, 2, 3, 4], int) + assert_array_equal(x, a) + + c = TextIO() + c.write('1,2,3,4\n') + c.seek(0) + x = np.loadtxt(c, dtype=int, delimiter=',') + a = np.array([1, 2, 3, 4], int) + assert_array_equal(x, a) + + def test_missing(self): + c = TextIO() + c.write('1,2,3,,5\n') + c.seek(0) + x = np.loadtxt(c, dtype=int, delimiter=',', + converters={3: lambda s: int(s or - 999)}) + a = np.array([1, 2, 3, -999, 5], int) + assert_array_equal(x, a) + + def test_converters_with_usecols(self): + c = TextIO() + c.write('1,2,3,,5\n6,7,8,9,10\n') + c.seek(0) + x = np.loadtxt(c, dtype=int, delimiter=',', + converters={3: lambda s: int(s or - 999)}, + usecols=(1, 3,)) + a = np.array([[2, -999], [7, 9]], int) + assert_array_equal(x, a) + + def test_comments_unicode(self): + c = TextIO() + c.write('# comment\n1,2,3,5\n') + c.seek(0) + x = np.loadtxt(c, dtype=int, delimiter=',', + comments='#') + a = np.array([1, 2, 3, 5], int) + assert_array_equal(x, a) + + def test_comments_byte(self): + c = TextIO() + c.write('# comment\n1,2,3,5\n') + c.seek(0) + x = np.loadtxt(c, dtype=int, delimiter=',', + comments=b'#') + a = np.array([1, 2, 3, 5], int) + assert_array_equal(x, a) + + def test_comments_multiple(self): + c = TextIO() + c.write('# comment\n1,2,3\n@ comment2\n4,5,6 // comment3') + c.seek(0) + x = np.loadtxt(c, dtype=int, delimiter=',', + comments=['#', '@', '//']) + a = np.array([[1, 2, 3], [4, 5, 6]], int) + assert_array_equal(x, a) + + @pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), + reason="PyPy bug in error formatting") + def test_comments_multi_chars(self): + c = TextIO() + c.write('/* comment\n1,2,3,5\n') + c.seek(0) + x = np.loadtxt(c, dtype=int, delimiter=',', + comments='/*') + a = np.array([1, 2, 3, 5], int) + assert_array_equal(x, a) + + # Check that '/*' is not transformed to ['/', '*'] + c = TextIO() + c.write('*/ comment\n1,2,3,5\n') + c.seek(0) + assert_raises(ValueError, np.loadtxt, c, dtype=int, delimiter=',', + comments='/*') + + def test_skiprows(self): + c = TextIO() + c.write('comment\n1,2,3,5\n') + c.seek(0) + x = np.loadtxt(c, dtype=int, delimiter=',', + skiprows=1) + a = np.array([1, 2, 3, 5], int) + assert_array_equal(x, a) + + c = TextIO() + c.write('# comment\n1,2,3,5\n') + c.seek(0) + x = np.loadtxt(c, dtype=int, delimiter=',', + skiprows=1) + a = np.array([1, 2, 3, 5], int) + assert_array_equal(x, a) + + def test_usecols(self): + a = np.array([[1, 2], [3, 4]], float) + c = BytesIO() + np.savetxt(c, a) + c.seek(0) + x = np.loadtxt(c, dtype=float, usecols=(1,)) + assert_array_equal(x, a[:, 1]) + + a = np.array([[1, 2, 3], [3, 4, 5]], float) + c = BytesIO() + np.savetxt(c, a) + c.seek(0) + x = np.loadtxt(c, dtype=float, usecols=(1, 2)) + assert_array_equal(x, a[:, 1:]) + + # Testing with arrays instead of tuples. + c.seek(0) + x = np.loadtxt(c, dtype=float, usecols=np.array([1, 2])) + assert_array_equal(x, a[:, 1:]) + + # Testing with an integer instead of a sequence + for int_type in [int, np.int8, np.int16, + np.int32, np.int64, np.uint8, np.uint16, + np.uint32, np.uint64]: + to_read = int_type(1) + c.seek(0) + x = np.loadtxt(c, dtype=float, usecols=to_read) + assert_array_equal(x, a[:, 1]) + + # Testing with some crazy custom integer type + class CrazyInt: + def __index__(self): + return 1 + + crazy_int = CrazyInt() + c.seek(0) + x = np.loadtxt(c, dtype=float, usecols=crazy_int) + assert_array_equal(x, a[:, 1]) + + c.seek(0) + x = np.loadtxt(c, dtype=float, usecols=(crazy_int,)) + assert_array_equal(x, a[:, 1]) + + # Checking with dtypes defined converters. + data = '''JOE 70.1 25.3 + BOB 60.5 27.9 + ''' + c = TextIO(data) + names = ['stid', 'temp'] + dtypes = ['S4', 'f8'] + arr = np.loadtxt(c, usecols=(0, 2), dtype=list(zip(names, dtypes))) + assert_equal(arr['stid'], [b"JOE", b"BOB"]) + assert_equal(arr['temp'], [25.3, 27.9]) + + # Testing non-ints in usecols + c.seek(0) + bogus_idx = 1.5 + assert_raises_regex( + TypeError, + f'^usecols must be.*{type(bogus_idx).__name__}', + np.loadtxt, c, usecols=bogus_idx + ) + + assert_raises_regex( + TypeError, + f'^usecols must be.*{type(bogus_idx).__name__}', + np.loadtxt, c, usecols=[0, bogus_idx, 0] + ) + + def test_bad_usecols(self): + with pytest.raises(OverflowError): + np.loadtxt(["1\n"], usecols=[2**64], delimiter=",") + with pytest.raises((ValueError, OverflowError)): + # Overflow error on 32bit platforms + np.loadtxt(["1\n"], usecols=[2**62], delimiter=",") + with pytest.raises(TypeError, + match="If a structured dtype .*. But 1 usecols were given and " + "the number of fields is 3."): + np.loadtxt(["1,1\n"], dtype="i,2i", usecols=[0], delimiter=",") + + def test_fancy_dtype(self): + c = TextIO() + c.write('1,2,3.0\n4,5,6.0\n') + c.seek(0) + dt = np.dtype([('x', int), ('y', [('t', int), ('s', float)])]) + x = np.loadtxt(c, dtype=dt, delimiter=',') + a = np.array([(1, (2, 3.0)), (4, (5, 6.0))], dt) + assert_array_equal(x, a) + + def test_shaped_dtype(self): + c = TextIO("aaaa 1.0 8.0 1 2 3 4 5 6") + dt = np.dtype([('name', 'S4'), ('x', float), ('y', float), + ('block', int, (2, 3))]) + x = np.loadtxt(c, dtype=dt) + a = np.array([('aaaa', 1.0, 8.0, [[1, 2, 3], [4, 5, 6]])], + dtype=dt) + assert_array_equal(x, a) + + def test_3d_shaped_dtype(self): + c = TextIO("aaaa 1.0 8.0 1 2 3 4 5 6 7 8 9 10 11 12") + dt = np.dtype([('name', 'S4'), ('x', float), ('y', float), + ('block', int, (2, 2, 3))]) + x = np.loadtxt(c, dtype=dt) + a = np.array([('aaaa', 1.0, 8.0, + [[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]])], + dtype=dt) + assert_array_equal(x, a) + + def test_str_dtype(self): + # see gh-8033 + c = ["str1", "str2"] + + for dt in (str, np.bytes_): + a = np.array(["str1", "str2"], dtype=dt) + x = np.loadtxt(c, dtype=dt) + assert_array_equal(x, a) + + def test_empty_file(self): + with pytest.warns(UserWarning, match="input contained no data"): + c = TextIO() + x = np.loadtxt(c) + assert_equal(x.shape, (0,)) + x = np.loadtxt(c, dtype=np.int64) + assert_equal(x.shape, (0,)) + assert_(x.dtype == np.int64) + + def test_unused_converter(self): + c = TextIO() + c.writelines(['1 21\n', '3 42\n']) + c.seek(0) + data = np.loadtxt(c, usecols=(1,), + converters={0: lambda s: int(s, 16)}) + assert_array_equal(data, [21, 42]) + + c.seek(0) + data = np.loadtxt(c, usecols=(1,), + converters={1: lambda s: int(s, 16)}) + assert_array_equal(data, [33, 66]) + + def test_dtype_with_object(self): + # Test using an explicit dtype with an object + data = """ 1; 2001-01-01 + 2; 2002-01-31 """ + ndtype = [('idx', int), ('code', object)] + func = lambda s: strptime(s.strip(), "%Y-%m-%d") + converters = {1: func} + test = np.loadtxt(TextIO(data), delimiter=";", dtype=ndtype, + converters=converters) + control = np.array( + [(1, datetime(2001, 1, 1)), (2, datetime(2002, 1, 31))], + dtype=ndtype) + assert_equal(test, control) + + def test_uint64_type(self): + tgt = (9223372043271415339, 9223372043271415853) + c = TextIO() + c.write("%s %s" % tgt) + c.seek(0) + res = np.loadtxt(c, dtype=np.uint64) + assert_equal(res, tgt) + + def test_int64_type(self): + tgt = (-9223372036854775807, 9223372036854775807) + c = TextIO() + c.write("%s %s" % tgt) + c.seek(0) + res = np.loadtxt(c, dtype=np.int64) + assert_equal(res, tgt) + + def test_from_float_hex(self): + # IEEE doubles and floats only, otherwise the float32 + # conversion may fail. + tgt = np.logspace(-10, 10, 5).astype(np.float32) + tgt = np.hstack((tgt, -tgt)).astype(float) + inp = '\n'.join(map(float.hex, tgt)) + c = TextIO() + c.write(inp) + for dt in [float, np.float32]: + c.seek(0) + res = np.loadtxt( + c, dtype=dt, converters=float.fromhex, encoding="latin1") + assert_equal(res, tgt, err_msg=f"{dt}") + + @pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), + reason="PyPy bug in error formatting") + def test_default_float_converter_no_default_hex_conversion(self): + """ + Ensure that fromhex is only used for values with the correct prefix and + is not called by default. Regression test related to gh-19598. + """ + c = TextIO("a b c") + with pytest.raises(ValueError, + match=".*convert string 'a' to float64 at row 0, column 1"): + np.loadtxt(c) + + @pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), + reason="PyPy bug in error formatting") + def test_default_float_converter_exception(self): + """ + Ensure that the exception message raised during failed floating point + conversion is correct. Regression test related to gh-19598. + """ + c = TextIO("qrs tuv") # Invalid values for default float converter + with pytest.raises(ValueError, + match="could not convert string 'qrs' to float64"): + np.loadtxt(c) + + def test_from_complex(self): + tgt = (complex(1, 1), complex(1, -1)) + c = TextIO() + c.write("%s %s" % tgt) + c.seek(0) + res = np.loadtxt(c, dtype=complex) + assert_equal(res, tgt) + + def test_complex_misformatted(self): + # test for backward compatibility + # some complex formats used to generate x+-yj + a = np.zeros((2, 2), dtype=np.complex128) + re = np.pi + im = np.e + a[:] = re - 1.0j * im + c = BytesIO() + np.savetxt(c, a, fmt='%.16e') + c.seek(0) + txt = c.read() + c.seek(0) + # misformat the sign on the imaginary part, gh 7895 + txt_bad = txt.replace(b'e+00-', b'e00+-') + assert_(txt_bad != txt) + c.write(txt_bad) + c.seek(0) + res = np.loadtxt(c, dtype=complex) + assert_equal(res, a) + + def test_universal_newline(self): + with temppath() as name: + with open(name, 'w') as f: + f.write('1 21\r3 42\r') + data = np.loadtxt(name) + assert_array_equal(data, [[1, 21], [3, 42]]) + + def test_empty_field_after_tab(self): + c = TextIO() + c.write('1 \t2 \t3\tstart \n4\t5\t6\t \n7\t8\t9.5\t') + c.seek(0) + dt = {'names': ('x', 'y', 'z', 'comment'), + 'formats': (' num rows + c = TextIO() + c.write('comment\n1,2,3,5\n4,5,7,8\n2,1,4,5') + c.seek(0) + x = np.loadtxt(c, dtype=int, delimiter=',', + skiprows=1, max_rows=6) + a = np.array([[1, 2, 3, 5], [4, 5, 7, 8], [2, 1, 4, 5]], int) + assert_array_equal(x, a) + + @pytest.mark.parametrize(["skip", "data"], [ + (1, ["ignored\n", "1,2\n", "\n", "3,4\n"]), + # "Bad" lines that do not end in newlines: + (1, ["ignored", "1,2", "", "3,4"]), + (1, StringIO("ignored\n1,2\n\n3,4")), + # Same as above, but do not skip any lines: + (0, ["-1,0\n", "1,2\n", "\n", "3,4\n"]), + (0, ["-1,0", "1,2", "", "3,4"]), + (0, StringIO("-1,0\n1,2\n\n3,4"))]) + def test_max_rows_empty_lines(self, skip, data): + with pytest.warns(UserWarning, + match=f"Input line 3.*max_rows={3 - skip}"): + res = np.loadtxt(data, dtype=int, skiprows=skip, delimiter=",", + max_rows=3 - skip) + assert_array_equal(res, [[-1, 0], [1, 2], [3, 4]][skip:]) + + if isinstance(data, StringIO): + data.seek(0) + + with warnings.catch_warnings(): + warnings.simplefilter("error", UserWarning) + with pytest.raises(UserWarning): + np.loadtxt(data, dtype=int, skiprows=skip, delimiter=",", + max_rows=3 - skip) + +class Testfromregex: + def test_record(self): + c = TextIO() + c.write('1.312 foo\n1.534 bar\n4.444 qux') + c.seek(0) + + dt = [('num', np.float64), ('val', 'S3')] + x = np.fromregex(c, r"([0-9.]+)\s+(...)", dt) + a = np.array([(1.312, 'foo'), (1.534, 'bar'), (4.444, 'qux')], + dtype=dt) + assert_array_equal(x, a) + + def test_record_2(self): + c = TextIO() + c.write('1312 foo\n1534 bar\n4444 qux') + c.seek(0) + + dt = [('num', np.int32), ('val', 'S3')] + x = np.fromregex(c, r"(\d+)\s+(...)", dt) + a = np.array([(1312, 'foo'), (1534, 'bar'), (4444, 'qux')], + dtype=dt) + assert_array_equal(x, a) + + def test_record_3(self): + c = TextIO() + c.write('1312 foo\n1534 bar\n4444 qux') + c.seek(0) + + dt = [('num', np.float64)] + x = np.fromregex(c, r"(\d+)\s+...", dt) + a = np.array([(1312,), (1534,), (4444,)], dtype=dt) + assert_array_equal(x, a) + + @pytest.mark.parametrize("path_type", [str, Path]) + def test_record_unicode(self, path_type): + utf8 = b'\xcf\x96' + with temppath() as str_path: + path = path_type(str_path) + with open(path, 'wb') as f: + f.write(b'1.312 foo' + utf8 + b' \n1.534 bar\n4.444 qux') + + dt = [('num', np.float64), ('val', 'U4')] + x = np.fromregex(path, r"(?u)([0-9.]+)\s+(\w+)", dt, encoding='UTF-8') + a = np.array([(1.312, 'foo' + utf8.decode('UTF-8')), (1.534, 'bar'), + (4.444, 'qux')], dtype=dt) + assert_array_equal(x, a) + + regexp = re.compile(r"([0-9.]+)\s+(\w+)", re.UNICODE) + x = np.fromregex(path, regexp, dt, encoding='UTF-8') + assert_array_equal(x, a) + + def test_compiled_bytes(self): + regexp = re.compile(br'(\d)') + c = BytesIO(b'123') + dt = [('num', np.float64)] + a = np.array([1, 2, 3], dtype=dt) + x = np.fromregex(c, regexp, dt) + assert_array_equal(x, a) + + def test_bad_dtype_not_structured(self): + regexp = re.compile(br'(\d)') + c = BytesIO(b'123') + with pytest.raises(TypeError, match='structured datatype'): + np.fromregex(c, regexp, dtype=np.float64) + + +#####-------------------------------------------------------------------------- + + +class TestFromTxt(LoadTxtBase): + loadfunc = staticmethod(np.genfromtxt) + + def test_record(self): + # Test w/ explicit dtype + data = TextIO('1 2\n3 4') + test = np.genfromtxt(data, dtype=[('x', np.int32), ('y', np.int32)]) + control = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')]) + assert_equal(test, control) + # + data = TextIO('M 64.0 75.0\nF 25.0 60.0') + descriptor = {'names': ('gender', 'age', 'weight'), + 'formats': ('S1', 'i4', 'f4')} + control = np.array([('M', 64.0, 75.0), ('F', 25.0, 60.0)], + dtype=descriptor) + test = np.genfromtxt(data, dtype=descriptor) + assert_equal(test, control) + + def test_array(self): + # Test outputting a standard ndarray + data = TextIO('1 2\n3 4') + control = np.array([[1, 2], [3, 4]], dtype=int) + test = np.genfromtxt(data, dtype=int) + assert_array_equal(test, control) + # + data.seek(0) + control = np.array([[1, 2], [3, 4]], dtype=float) + test = np.loadtxt(data, dtype=float) + assert_array_equal(test, control) + + def test_1D(self): + # Test squeezing to 1D + control = np.array([1, 2, 3, 4], int) + # + data = TextIO('1\n2\n3\n4\n') + test = np.genfromtxt(data, dtype=int) + assert_array_equal(test, control) + # + data = TextIO('1,2,3,4\n') + test = np.genfromtxt(data, dtype=int, delimiter=',') + assert_array_equal(test, control) + + def test_comments(self): + # Test the stripping of comments + control = np.array([1, 2, 3, 5], int) + # Comment on its own line + data = TextIO('# comment\n1,2,3,5\n') + test = np.genfromtxt(data, dtype=int, delimiter=',', comments='#') + assert_equal(test, control) + # Comment at the end of a line + data = TextIO('1,2,3,5# comment\n') + test = np.genfromtxt(data, dtype=int, delimiter=',', comments='#') + assert_equal(test, control) + + def test_skiprows(self): + # Test row skipping + control = np.array([1, 2, 3, 5], int) + kwargs = {"dtype": int, "delimiter": ','} + # + data = TextIO('comment\n1,2,3,5\n') + test = np.genfromtxt(data, skip_header=1, **kwargs) + assert_equal(test, control) + # + data = TextIO('# comment\n1,2,3,5\n') + test = np.loadtxt(data, skiprows=1, **kwargs) + assert_equal(test, control) + + def test_skip_footer(self): + data = [f"# {i}" for i in range(1, 6)] + data.append("A, B, C") + data.extend([f"{i},{i:3.1f},{i:03d}" for i in range(51)]) + data[-1] = "99,99" + kwargs = {"delimiter": ",", "names": True, "skip_header": 5, "skip_footer": 10} + test = np.genfromtxt(TextIO("\n".join(data)), **kwargs) + ctrl = np.array([(f"{i:f}", f"{i:f}", f"{i:f}") for i in range(41)], + dtype=[(_, float) for _ in "ABC"]) + assert_equal(test, ctrl) + + def test_skip_footer_with_invalid(self): + with suppress_warnings() as sup: + sup.filter(ConversionWarning) + basestr = '1 1\n2 2\n3 3\n4 4\n5 \n6 \n7 \n' + # Footer too small to get rid of all invalid values + assert_raises(ValueError, np.genfromtxt, + TextIO(basestr), skip_footer=1) + # except ValueError: + # pass + a = np.genfromtxt( + TextIO(basestr), skip_footer=1, invalid_raise=False) + assert_equal(a, np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]])) + # + a = np.genfromtxt(TextIO(basestr), skip_footer=3) + assert_equal(a, np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]])) + # + basestr = '1 1\n2 \n3 3\n4 4\n5 \n6 6\n7 7\n' + a = np.genfromtxt( + TextIO(basestr), skip_footer=1, invalid_raise=False) + assert_equal(a, np.array([[1., 1.], [3., 3.], [4., 4.], [6., 6.]])) + a = np.genfromtxt( + TextIO(basestr), skip_footer=3, invalid_raise=False) + assert_equal(a, np.array([[1., 1.], [3., 3.], [4., 4.]])) + + def test_header(self): + # Test retrieving a header + data = TextIO('gender age weight\nM 64.0 75.0\nF 25.0 60.0') + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', VisibleDeprecationWarning) + test = np.genfromtxt(data, dtype=None, names=True, + encoding='bytes') + assert_(w[0].category is VisibleDeprecationWarning) + control = {'gender': np.array([b'M', b'F']), + 'age': np.array([64.0, 25.0]), + 'weight': np.array([75.0, 60.0])} + assert_equal(test['gender'], control['gender']) + assert_equal(test['age'], control['age']) + assert_equal(test['weight'], control['weight']) + + def test_auto_dtype(self): + # Test the automatic definition of the output dtype + data = TextIO('A 64 75.0 3+4j True\nBCD 25 60.0 5+6j False') + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', VisibleDeprecationWarning) + test = np.genfromtxt(data, dtype=None, encoding='bytes') + assert_(w[0].category is VisibleDeprecationWarning) + control = [np.array([b'A', b'BCD']), + np.array([64, 25]), + np.array([75.0, 60.0]), + np.array([3 + 4j, 5 + 6j]), + np.array([True, False]), ] + assert_equal(test.dtype.names, ['f0', 'f1', 'f2', 'f3', 'f4']) + for (i, ctrl) in enumerate(control): + assert_equal(test[f'f{i}'], ctrl) + + def test_auto_dtype_uniform(self): + # Tests whether the output dtype can be uniformized + data = TextIO('1 2 3 4\n5 6 7 8\n') + test = np.genfromtxt(data, dtype=None) + control = np.array([[1, 2, 3, 4], [5, 6, 7, 8]]) + assert_equal(test, control) + + def test_fancy_dtype(self): + # Check that a nested dtype isn't MIA + data = TextIO('1,2,3.0\n4,5,6.0\n') + fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])]) + test = np.genfromtxt(data, dtype=fancydtype, delimiter=',') + control = np.array([(1, (2, 3.0)), (4, (5, 6.0))], dtype=fancydtype) + assert_equal(test, control) + + def test_names_overwrite(self): + # Test overwriting the names of the dtype + descriptor = {'names': ('g', 'a', 'w'), + 'formats': ('S1', 'i4', 'f4')} + data = TextIO(b'M 64.0 75.0\nF 25.0 60.0') + names = ('gender', 'age', 'weight') + test = np.genfromtxt(data, dtype=descriptor, names=names) + descriptor['names'] = names + control = np.array([('M', 64.0, 75.0), + ('F', 25.0, 60.0)], dtype=descriptor) + assert_equal(test, control) + + def test_bad_fname(self): + with pytest.raises(TypeError, match='fname must be a string,'): + np.genfromtxt(123) + + def test_commented_header(self): + # Check that names can be retrieved even if the line is commented out. + data = TextIO(""" +#gender age weight +M 21 72.100000 +F 35 58.330000 +M 33 21.99 + """) + # The # is part of the first name and should be deleted automatically. + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', VisibleDeprecationWarning) + test = np.genfromtxt(data, names=True, dtype=None, + encoding="bytes") + assert_(w[0].category is VisibleDeprecationWarning) + ctrl = np.array([('M', 21, 72.1), ('F', 35, 58.33), ('M', 33, 21.99)], + dtype=[('gender', '|S1'), ('age', int), ('weight', float)]) + assert_equal(test, ctrl) + # Ditto, but we should get rid of the first element + data = TextIO(b""" +# gender age weight +M 21 72.100000 +F 35 58.330000 +M 33 21.99 + """) + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', VisibleDeprecationWarning) + test = np.genfromtxt(data, names=True, dtype=None, + encoding="bytes") + assert_(w[0].category is VisibleDeprecationWarning) + assert_equal(test, ctrl) + + def test_names_and_comments_none(self): + # Tests case when names is true but comments is None (gh-10780) + data = TextIO('col1 col2\n 1 2\n 3 4') + test = np.genfromtxt(data, dtype=(int, int), comments=None, names=True) + control = np.array([(1, 2), (3, 4)], dtype=[('col1', int), ('col2', int)]) + assert_equal(test, control) + + def test_file_is_closed_on_error(self): + # gh-13200 + with tempdir() as tmpdir: + fpath = os.path.join(tmpdir, "test.csv") + with open(fpath, "wb") as f: + f.write('\N{GREEK PI SYMBOL}'.encode()) + + # ResourceWarnings are emitted from a destructor, so won't be + # detected by regular propagation to errors. + with assert_no_warnings(): + with pytest.raises(UnicodeDecodeError): + np.genfromtxt(fpath, encoding="ascii") + + def test_autonames_and_usecols(self): + # Tests names and usecols + data = TextIO('A B C D\n aaaa 121 45 9.1') + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', VisibleDeprecationWarning) + test = np.genfromtxt(data, usecols=('A', 'C', 'D'), + names=True, dtype=None, encoding="bytes") + assert_(w[0].category is VisibleDeprecationWarning) + control = np.array(('aaaa', 45, 9.1), + dtype=[('A', '|S4'), ('C', int), ('D', float)]) + assert_equal(test, control) + + def test_converters_with_usecols(self): + # Test the combination user-defined converters and usecol + data = TextIO('1,2,3,,5\n6,7,8,9,10\n') + test = np.genfromtxt(data, dtype=int, delimiter=',', + converters={3: lambda s: int(s or - 999)}, + usecols=(1, 3,)) + control = np.array([[2, -999], [7, 9]], int) + assert_equal(test, control) + + def test_converters_with_usecols_and_names(self): + # Tests names and usecols + data = TextIO('A B C D\n aaaa 121 45 9.1') + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', VisibleDeprecationWarning) + test = np.genfromtxt(data, usecols=('A', 'C', 'D'), names=True, + dtype=None, encoding="bytes", + converters={'C': lambda s: 2 * int(s)}) + assert_(w[0].category is VisibleDeprecationWarning) + control = np.array(('aaaa', 90, 9.1), + dtype=[('A', '|S4'), ('C', int), ('D', float)]) + assert_equal(test, control) + + def test_converters_cornercases(self): + # Test the conversion to datetime. + converter = { + 'date': lambda s: strptime(s, '%Y-%m-%d %H:%M:%SZ')} + data = TextIO('2009-02-03 12:00:00Z, 72214.0') + test = np.genfromtxt(data, delimiter=',', dtype=None, + names=['date', 'stid'], converters=converter) + control = np.array((datetime(2009, 2, 3), 72214.), + dtype=[('date', np.object_), ('stid', float)]) + assert_equal(test, control) + + def test_converters_cornercases2(self): + # Test the conversion to datetime64. + converter = { + 'date': lambda s: np.datetime64(strptime(s, '%Y-%m-%d %H:%M:%SZ'))} + data = TextIO('2009-02-03 12:00:00Z, 72214.0') + test = np.genfromtxt(data, delimiter=',', dtype=None, + names=['date', 'stid'], converters=converter) + control = np.array((datetime(2009, 2, 3), 72214.), + dtype=[('date', 'datetime64[us]'), ('stid', float)]) + assert_equal(test, control) + + def test_unused_converter(self): + # Test whether unused converters are forgotten + data = TextIO("1 21\n 3 42\n") + test = np.genfromtxt(data, usecols=(1,), + converters={0: lambda s: int(s, 16)}) + assert_equal(test, [21, 42]) + # + data.seek(0) + test = np.genfromtxt(data, usecols=(1,), + converters={1: lambda s: int(s, 16)}) + assert_equal(test, [33, 66]) + + def test_invalid_converter(self): + strip_rand = lambda x: float((b'r' in x.lower() and x.split()[-1]) or + ((b'r' not in x.lower() and x.strip()) or 0.0)) + strip_per = lambda x: float((b'%' in x.lower() and x.split()[0]) or + ((b'%' not in x.lower() and x.strip()) or 0.0)) + s = TextIO("D01N01,10/1/2003 ,1 %,R 75,400,600\r\n" + "L24U05,12/5/2003, 2 %,1,300, 150.5\r\n" + "D02N03,10/10/2004,R 1,,7,145.55") + kwargs = { + "converters": {2: strip_per, 3: strip_rand}, "delimiter": ",", + "dtype": None, "encoding": "bytes"} + assert_raises(ConverterError, np.genfromtxt, s, **kwargs) + + def test_tricky_converter_bug1666(self): + # Test some corner cases + s = TextIO('q1,2\nq3,4') + cnv = lambda s: float(s[1:]) + test = np.genfromtxt(s, delimiter=',', converters={0: cnv}) + control = np.array([[1., 2.], [3., 4.]]) + assert_equal(test, control) + + def test_dtype_with_converters(self): + dstr = "2009; 23; 46" + test = np.genfromtxt(TextIO(dstr,), + delimiter=";", dtype=float, converters={0: bytes}) + control = np.array([('2009', 23., 46)], + dtype=[('f0', '|S4'), ('f1', float), ('f2', float)]) + assert_equal(test, control) + test = np.genfromtxt(TextIO(dstr,), + delimiter=";", dtype=float, converters={0: float}) + control = np.array([2009., 23., 46],) + assert_equal(test, control) + + @pytest.mark.filterwarnings("ignore:.*recfromcsv.*:DeprecationWarning") + def test_dtype_with_converters_and_usecols(self): + dstr = "1,5,-1,1:1\n2,8,-1,1:n\n3,3,-2,m:n\n" + dmap = {'1:1': 0, '1:n': 1, 'm:1': 2, 'm:n': 3} + dtyp = [('e1', 'i4'), ('e2', 'i4'), ('e3', 'i2'), ('n', 'i1')] + conv = {0: int, 1: int, 2: int, 3: lambda r: dmap[r.decode()]} + test = recfromcsv(TextIO(dstr,), dtype=dtyp, delimiter=',', + names=None, converters=conv, encoding="bytes") + control = np.rec.array([(1, 5, -1, 0), (2, 8, -1, 1), (3, 3, -2, 3)], dtype=dtyp) + assert_equal(test, control) + dtyp = [('e1', 'i4'), ('e2', 'i4'), ('n', 'i1')] + test = recfromcsv(TextIO(dstr,), dtype=dtyp, delimiter=',', + usecols=(0, 1, 3), names=None, converters=conv, + encoding="bytes") + control = np.rec.array([(1, 5, 0), (2, 8, 1), (3, 3, 3)], dtype=dtyp) + assert_equal(test, control) + + def test_dtype_with_object(self): + # Test using an explicit dtype with an object + data = """ 1; 2001-01-01 + 2; 2002-01-31 """ + ndtype = [('idx', int), ('code', object)] + func = lambda s: strptime(s.strip(), "%Y-%m-%d") + converters = {1: func} + test = np.genfromtxt(TextIO(data), delimiter=";", dtype=ndtype, + converters=converters) + control = np.array( + [(1, datetime(2001, 1, 1)), (2, datetime(2002, 1, 31))], + dtype=ndtype) + assert_equal(test, control) + + ndtype = [('nest', [('idx', int), ('code', object)])] + with assert_raises_regex(NotImplementedError, + 'Nested fields.* not supported.*'): + test = np.genfromtxt(TextIO(data), delimiter=";", + dtype=ndtype, converters=converters) + + # nested but empty fields also aren't supported + ndtype = [('idx', int), ('code', object), ('nest', [])] + with assert_raises_regex(NotImplementedError, + 'Nested fields.* not supported.*'): + test = np.genfromtxt(TextIO(data), delimiter=";", + dtype=ndtype, converters=converters) + + def test_dtype_with_object_no_converter(self): + # Object without a converter uses bytes: + parsed = np.genfromtxt(TextIO("1"), dtype=object) + assert parsed[()] == b"1" + parsed = np.genfromtxt(TextIO("string"), dtype=object) + assert parsed[()] == b"string" + + def test_userconverters_with_explicit_dtype(self): + # Test user_converters w/ explicit (standard) dtype + data = TextIO('skip,skip,2001-01-01,1.0,skip') + test = np.genfromtxt(data, delimiter=",", names=None, dtype=float, + usecols=(2, 3), converters={2: bytes}) + control = np.array([('2001-01-01', 1.)], + dtype=[('', '|S10'), ('', float)]) + assert_equal(test, control) + + def test_utf8_userconverters_with_explicit_dtype(self): + utf8 = b'\xcf\x96' + with temppath() as path: + with open(path, 'wb') as f: + f.write(b'skip,skip,2001-01-01' + utf8 + b',1.0,skip') + test = np.genfromtxt(path, delimiter=",", names=None, dtype=float, + usecols=(2, 3), converters={2: str}, + encoding='UTF-8') + control = np.array([('2001-01-01' + utf8.decode('UTF-8'), 1.)], + dtype=[('', '|U11'), ('', float)]) + assert_equal(test, control) + + def test_spacedelimiter(self): + # Test space delimiter + data = TextIO("1 2 3 4 5\n6 7 8 9 10") + test = np.genfromtxt(data) + control = np.array([[1., 2., 3., 4., 5.], + [6., 7., 8., 9., 10.]]) + assert_equal(test, control) + + def test_integer_delimiter(self): + # Test using an integer for delimiter + data = " 1 2 3\n 4 5 67\n890123 4" + test = np.genfromtxt(TextIO(data), delimiter=3) + control = np.array([[1, 2, 3], [4, 5, 67], [890, 123, 4]]) + assert_equal(test, control) + + def test_missing(self): + data = TextIO('1,2,3,,5\n') + test = np.genfromtxt(data, dtype=int, delimiter=',', + converters={3: lambda s: int(s or - 999)}) + control = np.array([1, 2, 3, -999, 5], int) + assert_equal(test, control) + + def test_missing_with_tabs(self): + # Test w/ a delimiter tab + txt = "1\t2\t3\n\t2\t\n1\t\t3" + test = np.genfromtxt(TextIO(txt), delimiter="\t", + usemask=True,) + ctrl_d = np.array([(1, 2, 3), (np.nan, 2, np.nan), (1, np.nan, 3)],) + ctrl_m = np.array([(0, 0, 0), (1, 0, 1), (0, 1, 0)], dtype=bool) + assert_equal(test.data, ctrl_d) + assert_equal(test.mask, ctrl_m) + + def test_usecols(self): + # Test the selection of columns + # Select 1 column + control = np.array([[1, 2], [3, 4]], float) + data = TextIO() + np.savetxt(data, control) + data.seek(0) + test = np.genfromtxt(data, dtype=float, usecols=(1,)) + assert_equal(test, control[:, 1]) + # + control = np.array([[1, 2, 3], [3, 4, 5]], float) + data = TextIO() + np.savetxt(data, control) + data.seek(0) + test = np.genfromtxt(data, dtype=float, usecols=(1, 2)) + assert_equal(test, control[:, 1:]) + # Testing with arrays instead of tuples. + data.seek(0) + test = np.genfromtxt(data, dtype=float, usecols=np.array([1, 2])) + assert_equal(test, control[:, 1:]) + + def test_usecols_as_css(self): + # Test giving usecols with a comma-separated string + data = "1 2 3\n4 5 6" + test = np.genfromtxt(TextIO(data), + names="a, b, c", usecols="a, c") + ctrl = np.array([(1, 3), (4, 6)], dtype=[(_, float) for _ in "ac"]) + assert_equal(test, ctrl) + + def test_usecols_with_structured_dtype(self): + # Test usecols with an explicit structured dtype + data = TextIO("JOE 70.1 25.3\nBOB 60.5 27.9") + names = ['stid', 'temp'] + dtypes = ['S4', 'f8'] + test = np.genfromtxt( + data, usecols=(0, 2), dtype=list(zip(names, dtypes))) + assert_equal(test['stid'], [b"JOE", b"BOB"]) + assert_equal(test['temp'], [25.3, 27.9]) + + def test_usecols_with_integer(self): + # Test usecols with an integer + test = np.genfromtxt(TextIO(b"1 2 3\n4 5 6"), usecols=0) + assert_equal(test, np.array([1., 4.])) + + def test_usecols_with_named_columns(self): + # Test usecols with named columns + ctrl = np.array([(1, 3), (4, 6)], dtype=[('a', float), ('c', float)]) + data = "1 2 3\n4 5 6" + kwargs = {"names": "a, b, c"} + test = np.genfromtxt(TextIO(data), usecols=(0, -1), **kwargs) + assert_equal(test, ctrl) + test = np.genfromtxt(TextIO(data), + usecols=('a', 'c'), **kwargs) + assert_equal(test, ctrl) + + def test_empty_file(self): + # Test that an empty file raises the proper warning. + with suppress_warnings() as sup: + sup.filter(message="genfromtxt: Empty input file:") + data = TextIO() + test = np.genfromtxt(data) + assert_equal(test, np.array([])) + + # when skip_header > 0 + test = np.genfromtxt(data, skip_header=1) + assert_equal(test, np.array([])) + + def test_fancy_dtype_alt(self): + # Check that a nested dtype isn't MIA + data = TextIO('1,2,3.0\n4,5,6.0\n') + fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])]) + test = np.genfromtxt(data, dtype=fancydtype, delimiter=',', usemask=True) + control = ma.array([(1, (2, 3.0)), (4, (5, 6.0))], dtype=fancydtype) + assert_equal(test, control) + + def test_shaped_dtype(self): + c = TextIO("aaaa 1.0 8.0 1 2 3 4 5 6") + dt = np.dtype([('name', 'S4'), ('x', float), ('y', float), + ('block', int, (2, 3))]) + x = np.genfromtxt(c, dtype=dt) + a = np.array([('aaaa', 1.0, 8.0, [[1, 2, 3], [4, 5, 6]])], + dtype=dt) + assert_array_equal(x, a) + + def test_withmissing(self): + data = TextIO('A,B\n0,1\n2,N/A') + kwargs = {"delimiter": ",", "missing_values": "N/A", "names": True} + test = np.genfromtxt(data, dtype=None, usemask=True, **kwargs) + control = ma.array([(0, 1), (2, -1)], + mask=[(False, False), (False, True)], + dtype=[('A', int), ('B', int)]) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + # + data.seek(0) + test = np.genfromtxt(data, usemask=True, **kwargs) + control = ma.array([(0, 1), (2, -1)], + mask=[(False, False), (False, True)], + dtype=[('A', float), ('B', float)]) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + + def test_user_missing_values(self): + data = "A, B, C\n0, 0., 0j\n1, N/A, 1j\n-9, 2.2, N/A\n3, -99, 3j" + basekwargs = {"dtype": None, "delimiter": ",", "names": True} + mdtype = [('A', int), ('B', float), ('C', complex)] + # + test = np.genfromtxt(TextIO(data), missing_values="N/A", + **basekwargs) + control = ma.array([(0, 0.0, 0j), (1, -999, 1j), + (-9, 2.2, -999j), (3, -99, 3j)], + mask=[(0, 0, 0), (0, 1, 0), (0, 0, 1), (0, 0, 0)], + dtype=mdtype) + assert_equal(test, control) + # + basekwargs['dtype'] = mdtype + test = np.genfromtxt(TextIO(data), + missing_values={0: -9, 1: -99, 2: -999j}, usemask=True, **basekwargs) + control = ma.array([(0, 0.0, 0j), (1, -999, 1j), + (-9, 2.2, -999j), (3, -99, 3j)], + mask=[(0, 0, 0), (0, 1, 0), (1, 0, 1), (0, 1, 0)], + dtype=mdtype) + assert_equal(test, control) + # + test = np.genfromtxt(TextIO(data), + missing_values={0: -9, 'B': -99, 'C': -999j}, + usemask=True, + **basekwargs) + control = ma.array([(0, 0.0, 0j), (1, -999, 1j), + (-9, 2.2, -999j), (3, -99, 3j)], + mask=[(0, 0, 0), (0, 1, 0), (1, 0, 1), (0, 1, 0)], + dtype=mdtype) + assert_equal(test, control) + + def test_user_filling_values(self): + # Test with missing and filling values + ctrl = np.array([(0, 3), (4, -999)], dtype=[('a', int), ('b', int)]) + data = "N/A, 2, 3\n4, ,???" + kwargs = {"delimiter": ",", + "dtype": int, + "names": "a,b,c", + "missing_values": {0: "N/A", 'b': " ", 2: "???"}, + "filling_values": {0: 0, 'b': 0, 2: -999}} + test = np.genfromtxt(TextIO(data), **kwargs) + ctrl = np.array([(0, 2, 3), (4, 0, -999)], + dtype=[(_, int) for _ in "abc"]) + assert_equal(test, ctrl) + # + test = np.genfromtxt(TextIO(data), usecols=(0, -1), **kwargs) + ctrl = np.array([(0, 3), (4, -999)], dtype=[(_, int) for _ in "ac"]) + assert_equal(test, ctrl) + + data2 = "1,2,*,4\n5,*,7,8\n" + test = np.genfromtxt(TextIO(data2), delimiter=',', dtype=int, + missing_values="*", filling_values=0) + ctrl = np.array([[1, 2, 0, 4], [5, 0, 7, 8]]) + assert_equal(test, ctrl) + test = np.genfromtxt(TextIO(data2), delimiter=',', dtype=int, + missing_values="*", filling_values=-1) + ctrl = np.array([[1, 2, -1, 4], [5, -1, 7, 8]]) + assert_equal(test, ctrl) + + def test_withmissing_float(self): + data = TextIO('A,B\n0,1.5\n2,-999.00') + test = np.genfromtxt(data, dtype=None, delimiter=',', + missing_values='-999.0', names=True, usemask=True) + control = ma.array([(0, 1.5), (2, -1.)], + mask=[(False, False), (False, True)], + dtype=[('A', int), ('B', float)]) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + + def test_with_masked_column_uniform(self): + # Test masked column + data = TextIO('1 2 3\n4 5 6\n') + test = np.genfromtxt(data, dtype=None, + missing_values='2,5', usemask=True) + control = ma.array([[1, 2, 3], [4, 5, 6]], mask=[[0, 1, 0], [0, 1, 0]]) + assert_equal(test, control) + + def test_with_masked_column_various(self): + # Test masked column + data = TextIO('True 2 3\nFalse 5 6\n') + test = np.genfromtxt(data, dtype=None, + missing_values='2,5', usemask=True) + control = ma.array([(1, 2, 3), (0, 5, 6)], + mask=[(0, 1, 0), (0, 1, 0)], + dtype=[('f0', bool), ('f1', bool), ('f2', int)]) + assert_equal(test, control) + + def test_invalid_raise(self): + # Test invalid raise + data = ["1, 1, 1, 1, 1"] * 50 + for i in range(5): + data[10 * i] = "2, 2, 2, 2 2" + data.insert(0, "a, b, c, d, e") + mdata = TextIO("\n".join(data)) + + kwargs = {"delimiter": ",", "dtype": None, "names": True} + + def f(): + return np.genfromtxt(mdata, invalid_raise=False, **kwargs) + mtest = assert_warns(ConversionWarning, f) + assert_equal(len(mtest), 45) + assert_equal(mtest, np.ones(45, dtype=[(_, int) for _ in 'abcde'])) + # + mdata.seek(0) + assert_raises(ValueError, np.genfromtxt, mdata, + delimiter=",", names=True) + + def test_invalid_raise_with_usecols(self): + # Test invalid_raise with usecols + data = ["1, 1, 1, 1, 1"] * 50 + for i in range(5): + data[10 * i] = "2, 2, 2, 2 2" + data.insert(0, "a, b, c, d, e") + mdata = TextIO("\n".join(data)) + + kwargs = {"delimiter": ",", "dtype": None, "names": True, + "invalid_raise": False} + + def f(): + return np.genfromtxt(mdata, usecols=(0, 4), **kwargs) + mtest = assert_warns(ConversionWarning, f) + assert_equal(len(mtest), 45) + assert_equal(mtest, np.ones(45, dtype=[(_, int) for _ in 'ae'])) + # + mdata.seek(0) + mtest = np.genfromtxt(mdata, usecols=(0, 1), **kwargs) + assert_equal(len(mtest), 50) + control = np.ones(50, dtype=[(_, int) for _ in 'ab']) + control[[10 * _ for _ in range(5)]] = (2, 2) + assert_equal(mtest, control) + + def test_inconsistent_dtype(self): + # Test inconsistent dtype + data = ["1, 1, 1, 1, -1.1"] * 50 + mdata = TextIO("\n".join(data)) + + converters = {4: lambda x: f"({x.decode()})"} + kwargs = {"delimiter": ",", "converters": converters, + "dtype": [(_, int) for _ in 'abcde'], "encoding": "bytes"} + assert_raises(ValueError, np.genfromtxt, mdata, **kwargs) + + def test_default_field_format(self): + # Test default format + data = "0, 1, 2.3\n4, 5, 6.7" + mtest = np.genfromtxt(TextIO(data), + delimiter=",", dtype=None, defaultfmt="f%02i") + ctrl = np.array([(0, 1, 2.3), (4, 5, 6.7)], + dtype=[("f00", int), ("f01", int), ("f02", float)]) + assert_equal(mtest, ctrl) + + def test_single_dtype_wo_names(self): + # Test single dtype w/o names + data = "0, 1, 2.3\n4, 5, 6.7" + mtest = np.genfromtxt(TextIO(data), + delimiter=",", dtype=float, defaultfmt="f%02i") + ctrl = np.array([[0., 1., 2.3], [4., 5., 6.7]], dtype=float) + assert_equal(mtest, ctrl) + + def test_single_dtype_w_explicit_names(self): + # Test single dtype w explicit names + data = "0, 1, 2.3\n4, 5, 6.7" + mtest = np.genfromtxt(TextIO(data), + delimiter=",", dtype=float, names="a, b, c") + ctrl = np.array([(0., 1., 2.3), (4., 5., 6.7)], + dtype=[(_, float) for _ in "abc"]) + assert_equal(mtest, ctrl) + + def test_single_dtype_w_implicit_names(self): + # Test single dtype w implicit names + data = "a, b, c\n0, 1, 2.3\n4, 5, 6.7" + mtest = np.genfromtxt(TextIO(data), + delimiter=",", dtype=float, names=True) + ctrl = np.array([(0., 1., 2.3), (4., 5., 6.7)], + dtype=[(_, float) for _ in "abc"]) + assert_equal(mtest, ctrl) + + def test_easy_structured_dtype(self): + # Test easy structured dtype + data = "0, 1, 2.3\n4, 5, 6.7" + mtest = np.genfromtxt(TextIO(data), delimiter=",", + dtype=(int, float, float), defaultfmt="f_%02i") + ctrl = np.array([(0, 1., 2.3), (4, 5., 6.7)], + dtype=[("f_00", int), ("f_01", float), ("f_02", float)]) + assert_equal(mtest, ctrl) + + def test_autostrip(self): + # Test autostrip + data = "01/01/2003 , 1.3, abcde" + kwargs = {"delimiter": ",", "dtype": None, "encoding": "bytes"} + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', VisibleDeprecationWarning) + mtest = np.genfromtxt(TextIO(data), **kwargs) + assert_(w[0].category is VisibleDeprecationWarning) + ctrl = np.array([('01/01/2003 ', 1.3, ' abcde')], + dtype=[('f0', '|S12'), ('f1', float), ('f2', '|S8')]) + assert_equal(mtest, ctrl) + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', VisibleDeprecationWarning) + mtest = np.genfromtxt(TextIO(data), autostrip=True, **kwargs) + assert_(w[0].category is VisibleDeprecationWarning) + ctrl = np.array([('01/01/2003', 1.3, 'abcde')], + dtype=[('f0', '|S10'), ('f1', float), ('f2', '|S5')]) + assert_equal(mtest, ctrl) + + def test_replace_space(self): + # Test the 'replace_space' option + txt = "A.A, B (B), C:C\n1, 2, 3.14" + # Test default: replace ' ' by '_' and delete non-alphanum chars + test = np.genfromtxt(TextIO(txt), + delimiter=",", names=True, dtype=None) + ctrl_dtype = [("AA", int), ("B_B", int), ("CC", float)] + ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype) + assert_equal(test, ctrl) + # Test: no replace, no delete + test = np.genfromtxt(TextIO(txt), + delimiter=",", names=True, dtype=None, + replace_space='', deletechars='') + ctrl_dtype = [("A.A", int), ("B (B)", int), ("C:C", float)] + ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype) + assert_equal(test, ctrl) + # Test: no delete (spaces are replaced by _) + test = np.genfromtxt(TextIO(txt), + delimiter=",", names=True, dtype=None, + deletechars='') + ctrl_dtype = [("A.A", int), ("B_(B)", int), ("C:C", float)] + ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype) + assert_equal(test, ctrl) + + def test_replace_space_known_dtype(self): + # Test the 'replace_space' (and related) options when dtype != None + txt = "A.A, B (B), C:C\n1, 2, 3" + # Test default: replace ' ' by '_' and delete non-alphanum chars + test = np.genfromtxt(TextIO(txt), + delimiter=",", names=True, dtype=int) + ctrl_dtype = [("AA", int), ("B_B", int), ("CC", int)] + ctrl = np.array((1, 2, 3), dtype=ctrl_dtype) + assert_equal(test, ctrl) + # Test: no replace, no delete + test = np.genfromtxt(TextIO(txt), + delimiter=",", names=True, dtype=int, + replace_space='', deletechars='') + ctrl_dtype = [("A.A", int), ("B (B)", int), ("C:C", int)] + ctrl = np.array((1, 2, 3), dtype=ctrl_dtype) + assert_equal(test, ctrl) + # Test: no delete (spaces are replaced by _) + test = np.genfromtxt(TextIO(txt), + delimiter=",", names=True, dtype=int, + deletechars='') + ctrl_dtype = [("A.A", int), ("B_(B)", int), ("C:C", int)] + ctrl = np.array((1, 2, 3), dtype=ctrl_dtype) + assert_equal(test, ctrl) + + def test_incomplete_names(self): + # Test w/ incomplete names + data = "A,,C\n0,1,2\n3,4,5" + kwargs = {"delimiter": ",", "names": True} + # w/ dtype=None + ctrl = np.array([(0, 1, 2), (3, 4, 5)], + dtype=[(_, int) for _ in ('A', 'f0', 'C')]) + test = np.genfromtxt(TextIO(data), dtype=None, **kwargs) + assert_equal(test, ctrl) + # w/ default dtype + ctrl = np.array([(0, 1, 2), (3, 4, 5)], + dtype=[(_, float) for _ in ('A', 'f0', 'C')]) + test = np.genfromtxt(TextIO(data), **kwargs) + + def test_names_auto_completion(self): + # Make sure that names are properly completed + data = "1 2 3\n 4 5 6" + test = np.genfromtxt(TextIO(data), + dtype=(int, float, int), names="a") + ctrl = np.array([(1, 2, 3), (4, 5, 6)], + dtype=[('a', int), ('f0', float), ('f1', int)]) + assert_equal(test, ctrl) + + def test_names_with_usecols_bug1636(self): + # Make sure we pick up the right names w/ usecols + data = "A,B,C,D,E\n0,1,2,3,4\n0,1,2,3,4\n0,1,2,3,4" + ctrl_names = ("A", "C", "E") + test = np.genfromtxt(TextIO(data), + dtype=(int, int, int), delimiter=",", + usecols=(0, 2, 4), names=True) + assert_equal(test.dtype.names, ctrl_names) + # + test = np.genfromtxt(TextIO(data), + dtype=(int, int, int), delimiter=",", + usecols=("A", "C", "E"), names=True) + assert_equal(test.dtype.names, ctrl_names) + # + test = np.genfromtxt(TextIO(data), + dtype=int, delimiter=",", + usecols=("A", "C", "E"), names=True) + assert_equal(test.dtype.names, ctrl_names) + + def test_fixed_width_names(self): + # Test fix-width w/ names + data = " A B C\n 0 1 2.3\n 45 67 9." + kwargs = {"delimiter": (5, 5, 4), "names": True, "dtype": None} + ctrl = np.array([(0, 1, 2.3), (45, 67, 9.)], + dtype=[('A', int), ('B', int), ('C', float)]) + test = np.genfromtxt(TextIO(data), **kwargs) + assert_equal(test, ctrl) + # + kwargs = {"delimiter": 5, "names": True, "dtype": None} + ctrl = np.array([(0, 1, 2.3), (45, 67, 9.)], + dtype=[('A', int), ('B', int), ('C', float)]) + test = np.genfromtxt(TextIO(data), **kwargs) + assert_equal(test, ctrl) + + def test_filling_values(self): + # Test missing values + data = b"1, 2, 3\n1, , 5\n0, 6, \n" + kwargs = {"delimiter": ",", "dtype": None, "filling_values": -999} + ctrl = np.array([[1, 2, 3], [1, -999, 5], [0, 6, -999]], dtype=int) + test = np.genfromtxt(TextIO(data), **kwargs) + assert_equal(test, ctrl) + + def test_comments_is_none(self): + # Github issue 329 (None was previously being converted to 'None'). + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', VisibleDeprecationWarning) + test = np.genfromtxt(TextIO("test1,testNonetherestofthedata"), + dtype=None, comments=None, delimiter=',', + encoding="bytes") + assert_(w[0].category is VisibleDeprecationWarning) + assert_equal(test[1], b'testNonetherestofthedata') + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', VisibleDeprecationWarning) + test = np.genfromtxt(TextIO("test1, testNonetherestofthedata"), + dtype=None, comments=None, delimiter=',', + encoding="bytes") + assert_(w[0].category is VisibleDeprecationWarning) + assert_equal(test[1], b' testNonetherestofthedata') + + def test_latin1(self): + latin1 = b'\xf6\xfc\xf6' + norm = b"norm1,norm2,norm3\n" + enc = b"test1,testNonethe" + latin1 + b",test3\n" + s = norm + enc + norm + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', VisibleDeprecationWarning) + test = np.genfromtxt(TextIO(s), + dtype=None, comments=None, delimiter=',', + encoding="bytes") + assert_(w[0].category is VisibleDeprecationWarning) + assert_equal(test[1, 0], b"test1") + assert_equal(test[1, 1], b"testNonethe" + latin1) + assert_equal(test[1, 2], b"test3") + test = np.genfromtxt(TextIO(s), + dtype=None, comments=None, delimiter=',', + encoding='latin1') + assert_equal(test[1, 0], "test1") + assert_equal(test[1, 1], "testNonethe" + latin1.decode('latin1')) + assert_equal(test[1, 2], "test3") + + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', VisibleDeprecationWarning) + test = np.genfromtxt(TextIO(b"0,testNonethe" + latin1), + dtype=None, comments=None, delimiter=',', + encoding="bytes") + assert_(w[0].category is VisibleDeprecationWarning) + assert_equal(test['f0'], 0) + assert_equal(test['f1'], b"testNonethe" + latin1) + + def test_binary_decode_autodtype(self): + utf16 = b'\xff\xfeh\x04 \x00i\x04 \x00j\x04' + v = self.loadfunc(BytesIO(utf16), dtype=None, encoding='UTF-16') + assert_array_equal(v, np.array(utf16.decode('UTF-16').split())) + + def test_utf8_byte_encoding(self): + utf8 = b"\xcf\x96" + norm = b"norm1,norm2,norm3\n" + enc = b"test1,testNonethe" + utf8 + b",test3\n" + s = norm + enc + norm + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', VisibleDeprecationWarning) + test = np.genfromtxt(TextIO(s), + dtype=None, comments=None, delimiter=',', + encoding="bytes") + assert_(w[0].category is VisibleDeprecationWarning) + ctl = np.array([ + [b'norm1', b'norm2', b'norm3'], + [b'test1', b'testNonethe' + utf8, b'test3'], + [b'norm1', b'norm2', b'norm3']]) + assert_array_equal(test, ctl) + + def test_utf8_file(self): + utf8 = b"\xcf\x96" + with temppath() as path: + with open(path, "wb") as f: + f.write((b"test1,testNonethe" + utf8 + b",test3\n") * 2) + test = np.genfromtxt(path, dtype=None, comments=None, + delimiter=',', encoding="UTF-8") + ctl = np.array([ + ["test1", "testNonethe" + utf8.decode("UTF-8"), "test3"], + ["test1", "testNonethe" + utf8.decode("UTF-8"), "test3"]], + dtype=np.str_) + assert_array_equal(test, ctl) + + # test a mixed dtype + with open(path, "wb") as f: + f.write(b"0,testNonethe" + utf8) + test = np.genfromtxt(path, dtype=None, comments=None, + delimiter=',', encoding="UTF-8") + assert_equal(test['f0'], 0) + assert_equal(test['f1'], "testNonethe" + utf8.decode("UTF-8")) + + def test_utf8_file_nodtype_unicode(self): + # bytes encoding with non-latin1 -> unicode upcast + utf8 = '\u03d6' + latin1 = '\xf6\xfc\xf6' + + # skip test if cannot encode utf8 test string with preferred + # encoding. The preferred encoding is assumed to be the default + # encoding of open. Will need to change this for PyTest, maybe + # using pytest.mark.xfail(raises=***). + try: + encoding = locale.getpreferredencoding() + utf8.encode(encoding) + except (UnicodeError, ImportError): + pytest.skip('Skipping test_utf8_file_nodtype_unicode, ' + 'unable to encode utf8 in preferred encoding') + + with temppath() as path: + with open(path, "wt") as f: + f.write("norm1,norm2,norm3\n") + f.write("norm1," + latin1 + ",norm3\n") + f.write("test1,testNonethe" + utf8 + ",test3\n") + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', + VisibleDeprecationWarning) + test = np.genfromtxt(path, dtype=None, comments=None, + delimiter=',', encoding="bytes") + # Check for warning when encoding not specified. + assert_(w[0].category is VisibleDeprecationWarning) + ctl = np.array([ + ["norm1", "norm2", "norm3"], + ["norm1", latin1, "norm3"], + ["test1", "testNonethe" + utf8, "test3"]], + dtype=np.str_) + assert_array_equal(test, ctl) + + @pytest.mark.filterwarnings("ignore:.*recfromtxt.*:DeprecationWarning") + def test_recfromtxt(self): + # + data = TextIO('A,B\n0,1\n2,3') + kwargs = {"delimiter": ",", "missing_values": "N/A", "names": True} + test = recfromtxt(data, **kwargs) + control = np.array([(0, 1), (2, 3)], + dtype=[('A', int), ('B', int)]) + assert_(isinstance(test, np.recarray)) + assert_equal(test, control) + # + data = TextIO('A,B\n0,1\n2,N/A') + test = recfromtxt(data, dtype=None, usemask=True, **kwargs) + control = ma.array([(0, 1), (2, -1)], + mask=[(False, False), (False, True)], + dtype=[('A', int), ('B', int)]) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + assert_equal(test.A, [0, 2]) + + @pytest.mark.filterwarnings("ignore:.*recfromcsv.*:DeprecationWarning") + def test_recfromcsv(self): + # + data = TextIO('A,B\n0,1\n2,3') + kwargs = {"missing_values": "N/A", "names": True, "case_sensitive": True, + "encoding": "bytes"} + test = recfromcsv(data, dtype=None, **kwargs) + control = np.array([(0, 1), (2, 3)], + dtype=[('A', int), ('B', int)]) + assert_(isinstance(test, np.recarray)) + assert_equal(test, control) + # + data = TextIO('A,B\n0,1\n2,N/A') + test = recfromcsv(data, dtype=None, usemask=True, **kwargs) + control = ma.array([(0, 1), (2, -1)], + mask=[(False, False), (False, True)], + dtype=[('A', int), ('B', int)]) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + assert_equal(test.A, [0, 2]) + # + data = TextIO('A,B\n0,1\n2,3') + test = recfromcsv(data, missing_values='N/A',) + control = np.array([(0, 1), (2, 3)], + dtype=[('a', int), ('b', int)]) + assert_(isinstance(test, np.recarray)) + assert_equal(test, control) + # + data = TextIO('A,B\n0,1\n2,3') + dtype = [('a', int), ('b', float)] + test = recfromcsv(data, missing_values='N/A', dtype=dtype) + control = np.array([(0, 1), (2, 3)], + dtype=dtype) + assert_(isinstance(test, np.recarray)) + assert_equal(test, control) + + # gh-10394 + data = TextIO('color\n"red"\n"blue"') + test = recfromcsv(data, converters={0: lambda x: x.strip('\"')}) + control = np.array([('red',), ('blue',)], dtype=[('color', (str, 4))]) + assert_equal(test.dtype, control.dtype) + assert_equal(test, control) + + def test_max_rows(self): + # Test the `max_rows` keyword argument. + data = '1 2\n3 4\n5 6\n7 8\n9 10\n' + txt = TextIO(data) + a1 = np.genfromtxt(txt, max_rows=3) + a2 = np.genfromtxt(txt) + assert_equal(a1, [[1, 2], [3, 4], [5, 6]]) + assert_equal(a2, [[7, 8], [9, 10]]) + + # max_rows must be at least 1. + assert_raises(ValueError, np.genfromtxt, TextIO(data), max_rows=0) + + # An input with several invalid rows. + data = '1 1\n2 2\n0 \n3 3\n4 4\n5 \n6 \n7 \n' + + test = np.genfromtxt(TextIO(data), max_rows=2) + control = np.array([[1., 1.], [2., 2.]]) + assert_equal(test, control) + + # Test keywords conflict + assert_raises(ValueError, np.genfromtxt, TextIO(data), skip_footer=1, + max_rows=4) + + # Test with invalid value + assert_raises(ValueError, np.genfromtxt, TextIO(data), max_rows=4) + + # Test with invalid not raise + with suppress_warnings() as sup: + sup.filter(ConversionWarning) + + test = np.genfromtxt(TextIO(data), max_rows=4, invalid_raise=False) + control = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]]) + assert_equal(test, control) + + test = np.genfromtxt(TextIO(data), max_rows=5, invalid_raise=False) + control = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]]) + assert_equal(test, control) + + # Structured array with field names. + data = 'a b\n#c d\n1 1\n2 2\n#0 \n3 3\n4 4\n5 5\n' + + # Test with header, names and comments + txt = TextIO(data) + test = np.genfromtxt(txt, skip_header=1, max_rows=3, names=True) + control = np.array([(1.0, 1.0), (2.0, 2.0), (3.0, 3.0)], + dtype=[('c', ' should convert to float + # 2**34 = 17179869184 => should convert to int64 + # 2**10 = 1024 => should convert to int (int32 on 32-bit systems, + # int64 on 64-bit systems) + + data = TextIO('73786976294838206464 17179869184 1024') + + test = np.genfromtxt(data, dtype=None) + + assert_equal(test.dtype.names, ['f0', 'f1', 'f2']) + + assert_(test.dtype['f0'] == float) + assert_(test.dtype['f1'] == np.int64) + assert_(test.dtype['f2'] == np.int_) + + assert_allclose(test['f0'], 73786976294838206464.) + assert_equal(test['f1'], 17179869184) + assert_equal(test['f2'], 1024) + + def test_unpack_float_data(self): + txt = TextIO("1,2,3\n4,5,6\n7,8,9\n0.0,1.0,2.0") + a, b, c = np.loadtxt(txt, delimiter=",", unpack=True) + assert_array_equal(a, np.array([1.0, 4.0, 7.0, 0.0])) + assert_array_equal(b, np.array([2.0, 5.0, 8.0, 1.0])) + assert_array_equal(c, np.array([3.0, 6.0, 9.0, 2.0])) + + def test_unpack_structured(self): + # Regression test for gh-4341 + # Unpacking should work on structured arrays + txt = TextIO("M 21 72\nF 35 58") + dt = {'names': ('a', 'b', 'c'), 'formats': ('S1', 'i4', 'f4')} + a, b, c = np.genfromtxt(txt, dtype=dt, unpack=True) + assert_equal(a.dtype, np.dtype('S1')) + assert_equal(b.dtype, np.dtype('i4')) + assert_equal(c.dtype, np.dtype('f4')) + assert_array_equal(a, np.array([b'M', b'F'])) + assert_array_equal(b, np.array([21, 35])) + assert_array_equal(c, np.array([72., 58.])) + + def test_unpack_auto_dtype(self): + # Regression test for gh-4341 + # Unpacking should work when dtype=None + txt = TextIO("M 21 72.\nF 35 58.") + expected = (np.array(["M", "F"]), np.array([21, 35]), np.array([72., 58.])) + test = np.genfromtxt(txt, dtype=None, unpack=True, encoding="utf-8") + for arr, result in zip(expected, test): + assert_array_equal(arr, result) + assert_equal(arr.dtype, result.dtype) + + def test_unpack_single_name(self): + # Regression test for gh-4341 + # Unpacking should work when structured dtype has only one field + txt = TextIO("21\n35") + dt = {'names': ('a',), 'formats': ('i4',)} + expected = np.array([21, 35], dtype=np.int32) + test = np.genfromtxt(txt, dtype=dt, unpack=True) + assert_array_equal(expected, test) + assert_equal(expected.dtype, test.dtype) + + def test_squeeze_scalar(self): + # Regression test for gh-4341 + # Unpacking a scalar should give zero-dim output, + # even if dtype is structured + txt = TextIO("1") + dt = {'names': ('a',), 'formats': ('i4',)} + expected = np.array((1,), dtype=np.int32) + test = np.genfromtxt(txt, dtype=dt, unpack=True) + assert_array_equal(expected, test) + assert_equal((), test.shape) + assert_equal(expected.dtype, test.dtype) + + @pytest.mark.parametrize("ndim", [0, 1, 2]) + def test_ndmin_keyword(self, ndim: int): + # lets have the same behaviour of ndmin as loadtxt + # as they should be the same for non-missing values + txt = "42" + + a = np.loadtxt(StringIO(txt), ndmin=ndim) + b = np.genfromtxt(StringIO(txt), ndmin=ndim) + + assert_array_equal(a, b) + + +class TestPathUsage: + # Test that pathlib.Path can be used + def test_loadtxt(self): + with temppath(suffix='.txt') as path: + path = Path(path) + a = np.array([[1.1, 2], [3, 4]]) + np.savetxt(path, a) + x = np.loadtxt(path) + assert_array_equal(x, a) + + def test_save_load(self): + # Test that pathlib.Path instances can be used with save. + with temppath(suffix='.npy') as path: + path = Path(path) + a = np.array([[1, 2], [3, 4]], int) + np.save(path, a) + data = np.load(path) + assert_array_equal(data, a) + + def test_save_load_memmap(self): + # Test that pathlib.Path instances can be loaded mem-mapped. + with temppath(suffix='.npy') as path: + path = Path(path) + a = np.array([[1, 2], [3, 4]], int) + np.save(path, a) + data = np.load(path, mmap_mode='r') + assert_array_equal(data, a) + # close the mem-mapped file + del data + if IS_PYPY: + break_cycles() + break_cycles() + + @pytest.mark.xfail(IS_WASM, reason="memmap doesn't work correctly") + @pytest.mark.parametrize("filename_type", [Path, str]) + def test_save_load_memmap_readwrite(self, filename_type): + with temppath(suffix='.npy') as path: + path = filename_type(path) + a = np.array([[1, 2], [3, 4]], int) + np.save(path, a) + b = np.load(path, mmap_mode='r+') + a[0][0] = 5 + b[0][0] = 5 + del b # closes the file + if IS_PYPY: + break_cycles() + break_cycles() + data = np.load(path) + assert_array_equal(data, a) + + @pytest.mark.parametrize("filename_type", [Path, str]) + def test_savez_load(self, filename_type): + with temppath(suffix='.npz') as path: + path = filename_type(path) + np.savez(path, lab='place holder') + with np.load(path) as data: + assert_array_equal(data['lab'], 'place holder') + + @pytest.mark.parametrize("filename_type", [Path, str]) + def test_savez_compressed_load(self, filename_type): + with temppath(suffix='.npz') as path: + path = filename_type(path) + np.savez_compressed(path, lab='place holder') + data = np.load(path) + assert_array_equal(data['lab'], 'place holder') + data.close() + + @pytest.mark.parametrize("filename_type", [Path, str]) + def test_genfromtxt(self, filename_type): + with temppath(suffix='.txt') as path: + path = filename_type(path) + a = np.array([(1, 2), (3, 4)]) + np.savetxt(path, a) + data = np.genfromtxt(path) + assert_array_equal(a, data) + + @pytest.mark.parametrize("filename_type", [Path, str]) + @pytest.mark.filterwarnings("ignore:.*recfromtxt.*:DeprecationWarning") + def test_recfromtxt(self, filename_type): + with temppath(suffix='.txt') as path: + path = filename_type(path) + with open(path, 'w') as f: + f.write('A,B\n0,1\n2,3') + + kwargs = {"delimiter": ",", "missing_values": "N/A", "names": True} + test = recfromtxt(path, **kwargs) + control = np.array([(0, 1), (2, 3)], + dtype=[('A', int), ('B', int)]) + assert_(isinstance(test, np.recarray)) + assert_equal(test, control) + + @pytest.mark.parametrize("filename_type", [Path, str]) + @pytest.mark.filterwarnings("ignore:.*recfromcsv.*:DeprecationWarning") + def test_recfromcsv(self, filename_type): + with temppath(suffix='.txt') as path: + path = filename_type(path) + with open(path, 'w') as f: + f.write('A,B\n0,1\n2,3') + + kwargs = { + "missing_values": "N/A", "names": True, "case_sensitive": True + } + test = recfromcsv(path, dtype=None, **kwargs) + control = np.array([(0, 1), (2, 3)], + dtype=[('A', int), ('B', int)]) + assert_(isinstance(test, np.recarray)) + assert_equal(test, control) + + +def test_gzip_load(): + a = np.random.random((5, 5)) + + s = BytesIO() + f = gzip.GzipFile(fileobj=s, mode="w") + + np.save(f, a) + f.close() + s.seek(0) + + f = gzip.GzipFile(fileobj=s, mode="r") + assert_array_equal(np.load(f), a) + + +# These next two classes encode the minimal API needed to save()/load() arrays. +# The `test_ducktyping` ensures they work correctly +class JustWriter: + def __init__(self, base): + self.base = base + + def write(self, s): + return self.base.write(s) + + def flush(self): + return self.base.flush() + +class JustReader: + def __init__(self, base): + self.base = base + + def read(self, n): + return self.base.read(n) + + def seek(self, off, whence=0): + return self.base.seek(off, whence) + + +def test_ducktyping(): + a = np.random.random((5, 5)) + + s = BytesIO() + f = JustWriter(s) + + np.save(f, a) + f.flush() + s.seek(0) + + f = JustReader(s) + assert_array_equal(np.load(f), a) + + +def test_gzip_loadtxt(): + # Thanks to another windows brokenness, we can't use + # NamedTemporaryFile: a file created from this function cannot be + # reopened by another open call. So we first put the gzipped string + # of the test reference array, write it to a securely opened file, + # which is then read from by the loadtxt function + s = BytesIO() + g = gzip.GzipFile(fileobj=s, mode='w') + g.write(b'1 2 3\n') + g.close() + + s.seek(0) + with temppath(suffix='.gz') as name: + with open(name, 'wb') as f: + f.write(s.read()) + res = np.loadtxt(name) + s.close() + + assert_array_equal(res, [1, 2, 3]) + + +def test_gzip_loadtxt_from_string(): + s = BytesIO() + f = gzip.GzipFile(fileobj=s, mode="w") + f.write(b'1 2 3\n') + f.close() + s.seek(0) + + f = gzip.GzipFile(fileobj=s, mode="r") + assert_array_equal(np.loadtxt(f), [1, 2, 3]) + + +def test_npzfile_dict(): + s = BytesIO() + x = np.zeros((3, 3)) + y = np.zeros((3, 3)) + + np.savez(s, x=x, y=y) + s.seek(0) + + z = np.load(s) + + assert_('x' in z) + assert_('y' in z) + assert_('x' in z.keys()) + assert_('y' in z.keys()) + + for f, a in z.items(): + assert_(f in ['x', 'y']) + assert_equal(a.shape, (3, 3)) + + for a in z.values(): + assert_equal(a.shape, (3, 3)) + + assert_(len(z.items()) == 2) + + for f in z: + assert_(f in ['x', 'y']) + + assert_('x' in z.keys()) + assert (z.get('x') == z['x']).all() + + +@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") +def test_load_refcount(): + # Check that objects returned by np.load are directly freed based on + # their refcount, rather than needing the gc to collect them. + + f = BytesIO() + np.savez(f, [1, 2, 3]) + f.seek(0) + + with assert_no_gc_cycles(): + np.load(f) + + f.seek(0) + dt = [("a", 'u1', 2), ("b", 'u1', 2)] + with assert_no_gc_cycles(): + x = np.loadtxt(TextIO("0 1 2 3"), dtype=dt) + assert_equal(x, np.array([((0, 1), (2, 3))], dtype=dt)) + + +def test_load_multiple_arrays_until_eof(): + f = BytesIO() + np.save(f, 1) + np.save(f, 2) + f.seek(0) + out1 = np.load(f) + assert out1 == 1 + out2 = np.load(f) + assert out2 == 2 + with pytest.raises(EOFError): + np.load(f) + + +def test_savez_nopickle(): + obj_array = np.array([1, 'hello'], dtype=object) + with temppath(suffix='.npz') as tmp: + np.savez(tmp, obj_array) + + with temppath(suffix='.npz') as tmp: + with pytest.raises(ValueError, match="Object arrays cannot be saved when.*"): + np.savez(tmp, obj_array, allow_pickle=False) + + with temppath(suffix='.npz') as tmp: + np.savez_compressed(tmp, obj_array) + + with temppath(suffix='.npz') as tmp: + with pytest.raises(ValueError, match="Object arrays cannot be saved when.*"): + np.savez_compressed(tmp, obj_array, allow_pickle=False) diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_loadtxt.py b/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_loadtxt.py new file mode 100644 index 0000000..a2022a0 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_loadtxt.py @@ -0,0 +1,1101 @@ +""" +Tests specific to `np.loadtxt` added during the move of loadtxt to be backed +by C code. +These tests complement those found in `test_io.py`. +""" + +import os +import sys +from io import StringIO +from tempfile import NamedTemporaryFile, mkstemp + +import pytest + +import numpy as np +from numpy.ma.testutils import assert_equal +from numpy.testing import HAS_REFCOUNT, IS_PYPY, assert_array_equal + + +def test_scientific_notation(): + """Test that both 'e' and 'E' are parsed correctly.""" + data = StringIO( + + "1.0e-1,2.0E1,3.0\n" + "4.0e-2,5.0E-1,6.0\n" + "7.0e-3,8.0E1,9.0\n" + "0.0e-4,1.0E-1,2.0" + + ) + expected = np.array( + [[0.1, 20., 3.0], [0.04, 0.5, 6], [0.007, 80., 9], [0, 0.1, 2]] + ) + assert_array_equal(np.loadtxt(data, delimiter=","), expected) + + +@pytest.mark.parametrize("comment", ["..", "//", "@-", "this is a comment:"]) +def test_comment_multiple_chars(comment): + content = "# IGNORE\n1.5, 2.5# ABC\n3.0,4.0# XXX\n5.5,6.0\n" + txt = StringIO(content.replace("#", comment)) + a = np.loadtxt(txt, delimiter=",", comments=comment) + assert_equal(a, [[1.5, 2.5], [3.0, 4.0], [5.5, 6.0]]) + + +@pytest.fixture +def mixed_types_structured(): + """ + Fixture providing heterogeneous input data with a structured dtype, along + with the associated structured array. + """ + data = StringIO( + + "1000;2.4;alpha;-34\n" + "2000;3.1;beta;29\n" + "3500;9.9;gamma;120\n" + "4090;8.1;delta;0\n" + "5001;4.4;epsilon;-99\n" + "6543;7.8;omega;-1\n" + + ) + dtype = np.dtype( + [('f0', np.uint16), ('f1', np.float64), ('f2', 'S7'), ('f3', np.int8)] + ) + expected = np.array( + [ + (1000, 2.4, "alpha", -34), + (2000, 3.1, "beta", 29), + (3500, 9.9, "gamma", 120), + (4090, 8.1, "delta", 0), + (5001, 4.4, "epsilon", -99), + (6543, 7.8, "omega", -1) + ], + dtype=dtype + ) + return data, dtype, expected + + +@pytest.mark.parametrize('skiprows', [0, 1, 2, 3]) +def test_structured_dtype_and_skiprows_no_empty_lines( + skiprows, mixed_types_structured): + data, dtype, expected = mixed_types_structured + a = np.loadtxt(data, dtype=dtype, delimiter=";", skiprows=skiprows) + assert_array_equal(a, expected[skiprows:]) + + +def test_unpack_structured(mixed_types_structured): + data, dtype, expected = mixed_types_structured + + a, b, c, d = np.loadtxt(data, dtype=dtype, delimiter=";", unpack=True) + assert_array_equal(a, expected["f0"]) + assert_array_equal(b, expected["f1"]) + assert_array_equal(c, expected["f2"]) + assert_array_equal(d, expected["f3"]) + + +def test_structured_dtype_with_shape(): + dtype = np.dtype([("a", "u1", 2), ("b", "u1", 2)]) + data = StringIO("0,1,2,3\n6,7,8,9\n") + expected = np.array([((0, 1), (2, 3)), ((6, 7), (8, 9))], dtype=dtype) + assert_array_equal(np.loadtxt(data, delimiter=",", dtype=dtype), expected) + + +def test_structured_dtype_with_multi_shape(): + dtype = np.dtype([("a", "u1", (2, 2))]) + data = StringIO("0 1 2 3\n") + expected = np.array([(((0, 1), (2, 3)),)], dtype=dtype) + assert_array_equal(np.loadtxt(data, dtype=dtype), expected) + + +def test_nested_structured_subarray(): + # Test from gh-16678 + point = np.dtype([('x', float), ('y', float)]) + dt = np.dtype([('code', int), ('points', point, (2,))]) + data = StringIO("100,1,2,3,4\n200,5,6,7,8\n") + expected = np.array( + [ + (100, [(1., 2.), (3., 4.)]), + (200, [(5., 6.), (7., 8.)]), + ], + dtype=dt + ) + assert_array_equal(np.loadtxt(data, dtype=dt, delimiter=","), expected) + + +def test_structured_dtype_offsets(): + # An aligned structured dtype will have additional padding + dt = np.dtype("i1, i4, i1, i4, i1, i4", align=True) + data = StringIO("1,2,3,4,5,6\n7,8,9,10,11,12\n") + expected = np.array([(1, 2, 3, 4, 5, 6), (7, 8, 9, 10, 11, 12)], dtype=dt) + assert_array_equal(np.loadtxt(data, delimiter=",", dtype=dt), expected) + + +@pytest.mark.parametrize("param", ("skiprows", "max_rows")) +def test_exception_negative_row_limits(param): + """skiprows and max_rows should raise for negative parameters.""" + with pytest.raises(ValueError, match="argument must be nonnegative"): + np.loadtxt("foo.bar", **{param: -3}) + + +@pytest.mark.parametrize("param", ("skiprows", "max_rows")) +def test_exception_noninteger_row_limits(param): + with pytest.raises(TypeError, match="argument must be an integer"): + np.loadtxt("foo.bar", **{param: 1.0}) + + +@pytest.mark.parametrize( + "data, shape", + [ + ("1 2 3 4 5\n", (1, 5)), # Single row + ("1\n2\n3\n4\n5\n", (5, 1)), # Single column + ] +) +def test_ndmin_single_row_or_col(data, shape): + arr = np.array([1, 2, 3, 4, 5]) + arr2d = arr.reshape(shape) + + assert_array_equal(np.loadtxt(StringIO(data), dtype=int), arr) + assert_array_equal(np.loadtxt(StringIO(data), dtype=int, ndmin=0), arr) + assert_array_equal(np.loadtxt(StringIO(data), dtype=int, ndmin=1), arr) + assert_array_equal(np.loadtxt(StringIO(data), dtype=int, ndmin=2), arr2d) + + +@pytest.mark.parametrize("badval", [-1, 3, None, "plate of shrimp"]) +def test_bad_ndmin(badval): + with pytest.raises(ValueError, match="Illegal value of ndmin keyword"): + np.loadtxt("foo.bar", ndmin=badval) + + +@pytest.mark.parametrize( + "ws", + ( + " ", # space + "\t", # tab + "\u2003", # em + "\u00A0", # non-break + "\u3000", # ideographic space + ) +) +def test_blank_lines_spaces_delimit(ws): + txt = StringIO( + f"1 2{ws}30\n\n{ws}\n" + f"4 5 60{ws}\n {ws} \n" + f"7 8 {ws} 90\n # comment\n" + f"3 2 1" + ) + # NOTE: It is unclear that the ` # comment` should succeed. Except + # for delimiter=None, which should use any whitespace (and maybe + # should just be implemented closer to Python + expected = np.array([[1, 2, 30], [4, 5, 60], [7, 8, 90], [3, 2, 1]]) + assert_equal( + np.loadtxt(txt, dtype=int, delimiter=None, comments="#"), expected + ) + + +def test_blank_lines_normal_delimiter(): + txt = StringIO('1,2,30\n\n4,5,60\n\n7,8,90\n# comment\n3,2,1') + expected = np.array([[1, 2, 30], [4, 5, 60], [7, 8, 90], [3, 2, 1]]) + assert_equal( + np.loadtxt(txt, dtype=int, delimiter=',', comments="#"), expected + ) + + +@pytest.mark.parametrize("dtype", (float, object)) +def test_maxrows_no_blank_lines(dtype): + txt = StringIO("1.5,2.5\n3.0,4.0\n5.5,6.0") + res = np.loadtxt(txt, dtype=dtype, delimiter=",", max_rows=2) + assert_equal(res.dtype, dtype) + assert_equal(res, np.array([["1.5", "2.5"], ["3.0", "4.0"]], dtype=dtype)) + + +@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), + reason="PyPy bug in error formatting") +@pytest.mark.parametrize("dtype", (np.dtype("f8"), np.dtype("i2"))) +def test_exception_message_bad_values(dtype): + txt = StringIO("1,2\n3,XXX\n5,6") + msg = f"could not convert string 'XXX' to {dtype} at row 1, column 2" + with pytest.raises(ValueError, match=msg): + np.loadtxt(txt, dtype=dtype, delimiter=",") + + +def test_converters_negative_indices(): + txt = StringIO('1.5,2.5\n3.0,XXX\n5.5,6.0') + conv = {-1: lambda s: np.nan if s == 'XXX' else float(s)} + expected = np.array([[1.5, 2.5], [3.0, np.nan], [5.5, 6.0]]) + res = np.loadtxt(txt, dtype=np.float64, delimiter=",", converters=conv) + assert_equal(res, expected) + + +def test_converters_negative_indices_with_usecols(): + txt = StringIO('1.5,2.5,3.5\n3.0,4.0,XXX\n5.5,6.0,7.5\n') + conv = {-1: lambda s: np.nan if s == 'XXX' else float(s)} + expected = np.array([[1.5, 3.5], [3.0, np.nan], [5.5, 7.5]]) + res = np.loadtxt( + txt, + dtype=np.float64, + delimiter=",", + converters=conv, + usecols=[0, -1], + ) + assert_equal(res, expected) + + # Second test with variable number of rows: + res = np.loadtxt(StringIO('''0,1,2\n0,1,2,3,4'''), delimiter=",", + usecols=[0, -1], converters={-1: (lambda x: -1)}) + assert_array_equal(res, [[0, -1], [0, -1]]) + + +def test_ragged_error(): + rows = ["1,2,3", "1,2,3", "4,3,2,1"] + with pytest.raises(ValueError, + match="the number of columns changed from 3 to 4 at row 3"): + np.loadtxt(rows, delimiter=",") + + +def test_ragged_usecols(): + # usecols, and negative ones, work even with varying number of columns. + txt = StringIO("0,0,XXX\n0,XXX,0,XXX\n0,XXX,XXX,0,XXX\n") + expected = np.array([[0, 0], [0, 0], [0, 0]]) + res = np.loadtxt(txt, dtype=float, delimiter=",", usecols=[0, -2]) + assert_equal(res, expected) + + txt = StringIO("0,0,XXX\n0\n0,XXX,XXX,0,XXX\n") + with pytest.raises(ValueError, + match="invalid column index -2 at row 2 with 1 columns"): + # There is no -2 column in the second row: + np.loadtxt(txt, dtype=float, delimiter=",", usecols=[0, -2]) + + +def test_empty_usecols(): + txt = StringIO("0,0,XXX\n0,XXX,0,XXX\n0,XXX,XXX,0,XXX\n") + res = np.loadtxt(txt, dtype=np.dtype([]), delimiter=",", usecols=[]) + assert res.shape == (3,) + assert res.dtype == np.dtype([]) + + +@pytest.mark.parametrize("c1", ["a", "の", "🫕"]) +@pytest.mark.parametrize("c2", ["a", "の", "🫕"]) +def test_large_unicode_characters(c1, c2): + # c1 and c2 span ascii, 16bit and 32bit range. + txt = StringIO(f"a,{c1},c,1.0\ne,{c2},2.0,g") + res = np.loadtxt(txt, dtype=np.dtype('U12'), delimiter=",") + expected = np.array( + [f"a,{c1},c,1.0".split(","), f"e,{c2},2.0,g".split(",")], + dtype=np.dtype('U12') + ) + assert_equal(res, expected) + + +def test_unicode_with_converter(): + txt = StringIO("cat,dog\nαβγ,δεζ\nabc,def\n") + conv = {0: lambda s: s.upper()} + res = np.loadtxt( + txt, + dtype=np.dtype("U12"), + converters=conv, + delimiter=",", + encoding=None + ) + expected = np.array([['CAT', 'dog'], ['ΑΒΓ', 'δεζ'], ['ABC', 'def']]) + assert_equal(res, expected) + + +def test_converter_with_structured_dtype(): + txt = StringIO('1.5,2.5,Abc\n3.0,4.0,dEf\n5.5,6.0,ghI\n') + dt = np.dtype([('m', np.int32), ('r', np.float32), ('code', 'U8')]) + conv = {0: lambda s: int(10 * float(s)), -1: lambda s: s.upper()} + res = np.loadtxt(txt, dtype=dt, delimiter=",", converters=conv) + expected = np.array( + [(15, 2.5, 'ABC'), (30, 4.0, 'DEF'), (55, 6.0, 'GHI')], dtype=dt + ) + assert_equal(res, expected) + + +def test_converter_with_unicode_dtype(): + """ + With the 'bytes' encoding, tokens are encoded prior to being + passed to the converter. This means that the output of the converter may + be bytes instead of unicode as expected by `read_rows`. + + This test checks that outputs from the above scenario are properly decoded + prior to parsing by `read_rows`. + """ + txt = StringIO('abc,def\nrst,xyz') + conv = bytes.upper + res = np.loadtxt( + txt, dtype=np.dtype("U3"), converters=conv, delimiter=",", + encoding="bytes") + expected = np.array([['ABC', 'DEF'], ['RST', 'XYZ']]) + assert_equal(res, expected) + + +def test_read_huge_row(): + row = "1.5, 2.5," * 50000 + row = row[:-1] + "\n" + txt = StringIO(row * 2) + res = np.loadtxt(txt, delimiter=",", dtype=float) + assert_equal(res, np.tile([1.5, 2.5], (2, 50000))) + + +@pytest.mark.parametrize("dtype", "edfgFDG") +def test_huge_float(dtype): + # Covers a non-optimized path that is rarely taken: + field = "0" * 1000 + ".123456789" + dtype = np.dtype(dtype) + value = np.loadtxt([field], dtype=dtype)[()] + assert value == dtype.type("0.123456789") + + +@pytest.mark.parametrize( + ("given_dtype", "expected_dtype"), + [ + ("S", np.dtype("S5")), + ("U", np.dtype("U5")), + ], +) +def test_string_no_length_given(given_dtype, expected_dtype): + """ + The given dtype is just 'S' or 'U' with no length. In these cases, the + length of the resulting dtype is determined by the longest string found + in the file. + """ + txt = StringIO("AAA,5-1\nBBBBB,0-3\nC,4-9\n") + res = np.loadtxt(txt, dtype=given_dtype, delimiter=",") + expected = np.array( + [['AAA', '5-1'], ['BBBBB', '0-3'], ['C', '4-9']], dtype=expected_dtype + ) + assert_equal(res, expected) + assert_equal(res.dtype, expected_dtype) + + +def test_float_conversion(): + """ + Some tests that the conversion to float64 works as accurately as the + Python built-in `float` function. In a naive version of the float parser, + these strings resulted in values that were off by an ULP or two. + """ + strings = [ + '0.9999999999999999', + '9876543210.123456', + '5.43215432154321e+300', + '0.901', + '0.333', + ] + txt = StringIO('\n'.join(strings)) + res = np.loadtxt(txt) + expected = np.array([float(s) for s in strings]) + assert_equal(res, expected) + + +def test_bool(): + # Simple test for bool via integer + txt = StringIO("1, 0\n10, -1") + res = np.loadtxt(txt, dtype=bool, delimiter=",") + assert res.dtype == bool + assert_array_equal(res, [[True, False], [True, True]]) + # Make sure we use only 1 and 0 on the byte level: + assert_array_equal(res.view(np.uint8), [[1, 0], [1, 1]]) + + +@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), + reason="PyPy bug in error formatting") +@pytest.mark.parametrize("dtype", np.typecodes["AllInteger"]) +@pytest.mark.filterwarnings("error:.*integer via a float.*:DeprecationWarning") +def test_integer_signs(dtype): + dtype = np.dtype(dtype) + assert np.loadtxt(["+2"], dtype=dtype) == 2 + if dtype.kind == "u": + with pytest.raises(ValueError): + np.loadtxt(["-1\n"], dtype=dtype) + else: + assert np.loadtxt(["-2\n"], dtype=dtype) == -2 + + for sign in ["++", "+-", "--", "-+"]: + with pytest.raises(ValueError): + np.loadtxt([f"{sign}2\n"], dtype=dtype) + + +@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), + reason="PyPy bug in error formatting") +@pytest.mark.parametrize("dtype", np.typecodes["AllInteger"]) +@pytest.mark.filterwarnings("error:.*integer via a float.*:DeprecationWarning") +def test_implicit_cast_float_to_int_fails(dtype): + txt = StringIO("1.0, 2.1, 3.7\n4, 5, 6") + with pytest.raises(ValueError): + np.loadtxt(txt, dtype=dtype, delimiter=",") + +@pytest.mark.parametrize("dtype", (np.complex64, np.complex128)) +@pytest.mark.parametrize("with_parens", (False, True)) +def test_complex_parsing(dtype, with_parens): + s = "(1.0-2.5j),3.75,(7+-5.0j)\n(4),(-19e2j),(0)" + if not with_parens: + s = s.replace("(", "").replace(")", "") + + res = np.loadtxt(StringIO(s), dtype=dtype, delimiter=",") + expected = np.array( + [[1.0 - 2.5j, 3.75, 7 - 5j], [4.0, -1900j, 0]], dtype=dtype + ) + assert_equal(res, expected) + + +def test_read_from_generator(): + def gen(): + for i in range(4): + yield f"{i},{2 * i},{i**2}" + + res = np.loadtxt(gen(), dtype=int, delimiter=",") + expected = np.array([[0, 0, 0], [1, 2, 1], [2, 4, 4], [3, 6, 9]]) + assert_equal(res, expected) + + +def test_read_from_generator_multitype(): + def gen(): + for i in range(3): + yield f"{i} {i / 4}" + + res = np.loadtxt(gen(), dtype="i, d", delimiter=" ") + expected = np.array([(0, 0.0), (1, 0.25), (2, 0.5)], dtype="i, d") + assert_equal(res, expected) + + +def test_read_from_bad_generator(): + def gen(): + yield from ["1,2", b"3, 5", 12738] + + with pytest.raises( + TypeError, match=r"non-string returned while reading data"): + np.loadtxt(gen(), dtype="i, i", delimiter=",") + + +@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") +def test_object_cleanup_on_read_error(): + sentinel = object() + already_read = 0 + + def conv(x): + nonlocal already_read + if already_read > 4999: + raise ValueError("failed half-way through!") + already_read += 1 + return sentinel + + txt = StringIO("x\n" * 10000) + + with pytest.raises(ValueError, match="at row 5000, column 1"): + np.loadtxt(txt, dtype=object, converters={0: conv}) + + assert sys.getrefcount(sentinel) == 2 + + +@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), + reason="PyPy bug in error formatting") +def test_character_not_bytes_compatible(): + """Test exception when a character cannot be encoded as 'S'.""" + data = StringIO("–") # == \u2013 + with pytest.raises(ValueError): + np.loadtxt(data, dtype="S5") + + +@pytest.mark.parametrize("conv", (0, [float], "")) +def test_invalid_converter(conv): + msg = ( + "converters must be a dictionary mapping columns to converter " + "functions or a single callable." + ) + with pytest.raises(TypeError, match=msg): + np.loadtxt(StringIO("1 2\n3 4"), converters=conv) + + +@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), + reason="PyPy bug in error formatting") +def test_converters_dict_raises_non_integer_key(): + with pytest.raises(TypeError, match="keys of the converters dict"): + np.loadtxt(StringIO("1 2\n3 4"), converters={"a": int}) + with pytest.raises(TypeError, match="keys of the converters dict"): + np.loadtxt(StringIO("1 2\n3 4"), converters={"a": int}, usecols=0) + + +@pytest.mark.parametrize("bad_col_ind", (3, -3)) +def test_converters_dict_raises_non_col_key(bad_col_ind): + data = StringIO("1 2\n3 4") + with pytest.raises(ValueError, match="converter specified for column"): + np.loadtxt(data, converters={bad_col_ind: int}) + + +def test_converters_dict_raises_val_not_callable(): + with pytest.raises(TypeError, + match="values of the converters dictionary must be callable"): + np.loadtxt(StringIO("1 2\n3 4"), converters={0: 1}) + + +@pytest.mark.parametrize("q", ('"', "'", "`")) +def test_quoted_field(q): + txt = StringIO( + f"{q}alpha, x{q}, 2.5\n{q}beta, y{q}, 4.5\n{q}gamma, z{q}, 5.0\n" + ) + dtype = np.dtype([('f0', 'U8'), ('f1', np.float64)]) + expected = np.array( + [("alpha, x", 2.5), ("beta, y", 4.5), ("gamma, z", 5.0)], dtype=dtype + ) + + res = np.loadtxt(txt, dtype=dtype, delimiter=",", quotechar=q) + assert_array_equal(res, expected) + + +@pytest.mark.parametrize("q", ('"', "'", "`")) +def test_quoted_field_with_whitepace_delimiter(q): + txt = StringIO( + f"{q}alpha, x{q} 2.5\n{q}beta, y{q} 4.5\n{q}gamma, z{q} 5.0\n" + ) + dtype = np.dtype([('f0', 'U8'), ('f1', np.float64)]) + expected = np.array( + [("alpha, x", 2.5), ("beta, y", 4.5), ("gamma, z", 5.0)], dtype=dtype + ) + + res = np.loadtxt(txt, dtype=dtype, delimiter=None, quotechar=q) + assert_array_equal(res, expected) + + +def test_quote_support_default(): + """Support for quoted fields is disabled by default.""" + txt = StringIO('"lat,long", 45, 30\n') + dtype = np.dtype([('f0', 'U24'), ('f1', np.float64), ('f2', np.float64)]) + + with pytest.raises(ValueError, + match="the dtype passed requires 3 columns but 4 were"): + np.loadtxt(txt, dtype=dtype, delimiter=",") + + # Enable quoting support with non-None value for quotechar param + txt.seek(0) + expected = np.array([("lat,long", 45., 30.)], dtype=dtype) + + res = np.loadtxt(txt, dtype=dtype, delimiter=",", quotechar='"') + assert_array_equal(res, expected) + + +@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), + reason="PyPy bug in error formatting") +def test_quotechar_multichar_error(): + txt = StringIO("1,2\n3,4") + msg = r".*must be a single unicode character or None" + with pytest.raises(TypeError, match=msg): + np.loadtxt(txt, delimiter=",", quotechar="''") + + +def test_comment_multichar_error_with_quote(): + txt = StringIO("1,2\n3,4") + msg = ( + "when multiple comments or a multi-character comment is given, " + "quotes are not supported." + ) + with pytest.raises(ValueError, match=msg): + np.loadtxt(txt, delimiter=",", comments="123", quotechar='"') + with pytest.raises(ValueError, match=msg): + np.loadtxt(txt, delimiter=",", comments=["#", "%"], quotechar='"') + + # A single character string in a tuple is unpacked though: + res = np.loadtxt(txt, delimiter=",", comments=("#",), quotechar="'") + assert_equal(res, [[1, 2], [3, 4]]) + + +def test_structured_dtype_with_quotes(): + data = StringIO( + + "1000;2.4;'alpha';-34\n" + "2000;3.1;'beta';29\n" + "3500;9.9;'gamma';120\n" + "4090;8.1;'delta';0\n" + "5001;4.4;'epsilon';-99\n" + "6543;7.8;'omega';-1\n" + + ) + dtype = np.dtype( + [('f0', np.uint16), ('f1', np.float64), ('f2', 'S7'), ('f3', np.int8)] + ) + expected = np.array( + [ + (1000, 2.4, "alpha", -34), + (2000, 3.1, "beta", 29), + (3500, 9.9, "gamma", 120), + (4090, 8.1, "delta", 0), + (5001, 4.4, "epsilon", -99), + (6543, 7.8, "omega", -1) + ], + dtype=dtype + ) + res = np.loadtxt(data, dtype=dtype, delimiter=";", quotechar="'") + assert_array_equal(res, expected) + + +def test_quoted_field_is_not_empty(): + txt = StringIO('1\n\n"4"\n""') + expected = np.array(["1", "4", ""], dtype="U1") + res = np.loadtxt(txt, delimiter=",", dtype="U1", quotechar='"') + assert_equal(res, expected) + +def test_quoted_field_is_not_empty_nonstrict(): + # Same as test_quoted_field_is_not_empty but check that we are not strict + # about missing closing quote (this is the `csv.reader` default also) + txt = StringIO('1\n\n"4"\n"') + expected = np.array(["1", "4", ""], dtype="U1") + res = np.loadtxt(txt, delimiter=",", dtype="U1", quotechar='"') + assert_equal(res, expected) + +def test_consecutive_quotechar_escaped(): + txt = StringIO('"Hello, my name is ""Monty""!"') + expected = np.array('Hello, my name is "Monty"!', dtype="U40") + res = np.loadtxt(txt, dtype="U40", delimiter=",", quotechar='"') + assert_equal(res, expected) + + +@pytest.mark.parametrize("data", ("", "\n\n\n", "# 1 2 3\n# 4 5 6\n")) +@pytest.mark.parametrize("ndmin", (0, 1, 2)) +@pytest.mark.parametrize("usecols", [None, (1, 2, 3)]) +def test_warn_on_no_data(data, ndmin, usecols): + """Check that a UserWarning is emitted when no data is read from input.""" + if usecols is not None: + expected_shape = (0, 3) + elif ndmin == 2: + expected_shape = (0, 1) # guess a single column?! + else: + expected_shape = (0,) + + txt = StringIO(data) + with pytest.warns(UserWarning, match="input contained no data"): + res = np.loadtxt(txt, ndmin=ndmin, usecols=usecols) + assert res.shape == expected_shape + + with NamedTemporaryFile(mode="w") as fh: + fh.write(data) + fh.seek(0) + with pytest.warns(UserWarning, match="input contained no data"): + res = np.loadtxt(txt, ndmin=ndmin, usecols=usecols) + assert res.shape == expected_shape + +@pytest.mark.parametrize("skiprows", (2, 3)) +def test_warn_on_skipped_data(skiprows): + data = "1 2 3\n4 5 6" + txt = StringIO(data) + with pytest.warns(UserWarning, match="input contained no data"): + np.loadtxt(txt, skiprows=skiprows) + + +@pytest.mark.parametrize(["dtype", "value"], [ + ("i2", 0x0001), ("u2", 0x0001), + ("i4", 0x00010203), ("u4", 0x00010203), + ("i8", 0x0001020304050607), ("u8", 0x0001020304050607), + # The following values are constructed to lead to unique bytes: + ("float16", 3.07e-05), + ("float32", 9.2557e-41), ("complex64", 9.2557e-41 + 2.8622554e-29j), + ("float64", -1.758571353180402e-24), + # Here and below, the repr side-steps a small loss of precision in + # complex `str` in PyPy (which is probably fine, as repr works): + ("complex128", repr(5.406409232372729e-29 - 1.758571353180402e-24j)), + # Use integer values that fit into double. Everything else leads to + # problems due to longdoubles going via double and decimal strings + # causing rounding errors. + ("longdouble", 0x01020304050607), + ("clongdouble", repr(0x01020304050607 + (0x00121314151617 * 1j))), + ("U2", "\U00010203\U000a0b0c")]) +@pytest.mark.parametrize("swap", [True, False]) +def test_byteswapping_and_unaligned(dtype, value, swap): + # Try to create "interesting" values within the valid unicode range: + dtype = np.dtype(dtype) + data = [f"x,{value}\n"] # repr as PyPy `str` truncates some + if swap: + dtype = dtype.newbyteorder() + full_dt = np.dtype([("a", "S1"), ("b", dtype)], align=False) + # The above ensures that the interesting "b" field is unaligned: + assert full_dt.fields["b"][1] == 1 + res = np.loadtxt(data, dtype=full_dt, delimiter=",", + max_rows=1) # max-rows prevents over-allocation + assert res["b"] == dtype.type(value) + + +@pytest.mark.parametrize("dtype", + np.typecodes["AllInteger"] + "efdFD" + "?") +def test_unicode_whitespace_stripping(dtype): + # Test that all numeric types (and bool) strip whitespace correctly + # \u202F is a narrow no-break space, `\n` is just a whitespace if quoted. + # Currently, skip float128 as it did not always support this and has no + # "custom" parsing: + txt = StringIO(' 3 ,"\u202F2\n"') + res = np.loadtxt(txt, dtype=dtype, delimiter=",", quotechar='"') + assert_array_equal(res, np.array([3, 2]).astype(dtype)) + + +@pytest.mark.parametrize("dtype", "FD") +def test_unicode_whitespace_stripping_complex(dtype): + # Complex has a few extra cases since it has two components and + # parentheses + line = " 1 , 2+3j , ( 4+5j ), ( 6+-7j ) , 8j , ( 9j ) \n" + data = [line, line.replace(" ", "\u202F")] + res = np.loadtxt(data, dtype=dtype, delimiter=',') + assert_array_equal(res, np.array([[1, 2 + 3j, 4 + 5j, 6 - 7j, 8j, 9j]] * 2)) + + +@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), + reason="PyPy bug in error formatting") +@pytest.mark.parametrize("dtype", "FD") +@pytest.mark.parametrize("field", + ["1 +2j", "1+ 2j", "1+2 j", "1+-+3", "(1j", "(1", "(1+2j", "1+2j)"]) +def test_bad_complex(dtype, field): + with pytest.raises(ValueError): + np.loadtxt([field + "\n"], dtype=dtype, delimiter=",") + + +@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), + reason="PyPy bug in error formatting") +@pytest.mark.parametrize("dtype", + np.typecodes["AllInteger"] + "efgdFDG" + "?") +def test_nul_character_error(dtype): + # Test that a \0 character is correctly recognized as an error even if + # what comes before is valid (not everything gets parsed internally). + if dtype.lower() == "g": + pytest.xfail("longdouble/clongdouble assignment may misbehave.") + with pytest.raises(ValueError): + np.loadtxt(["1\000"], dtype=dtype, delimiter=",", quotechar='"') + + +@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), + reason="PyPy bug in error formatting") +@pytest.mark.parametrize("dtype", + np.typecodes["AllInteger"] + "efgdFDG" + "?") +def test_no_thousands_support(dtype): + # Mainly to document behaviour, Python supports thousands like 1_1. + # (e and G may end up using different conversion and support it, this is + # a bug but happens...) + if dtype == "e": + pytest.skip("half assignment currently uses Python float converter") + if dtype in "eG": + pytest.xfail("clongdouble assignment is buggy (uses `complex`?).") + + assert int("1_1") == float("1_1") == complex("1_1") == 11 + with pytest.raises(ValueError): + np.loadtxt(["1_1\n"], dtype=dtype) + + +@pytest.mark.parametrize("data", [ + ["1,2\n", "2\n,3\n"], + ["1,2\n", "2\r,3\n"]]) +def test_bad_newline_in_iterator(data): + # In NumPy <=1.22 this was accepted, because newlines were completely + # ignored when the input was an iterable. This could be changed, but right + # now, we raise an error. + msg = "Found an unquoted embedded newline within a single line" + with pytest.raises(ValueError, match=msg): + np.loadtxt(data, delimiter=",") + + +@pytest.mark.parametrize("data", [ + ["1,2\n", "2,3\r\n"], # a universal newline + ["1,2\n", "'2\n',3\n"], # a quoted newline + ["1,2\n", "'2\r',3\n"], + ["1,2\n", "'2\r\n',3\n"], +]) +def test_good_newline_in_iterator(data): + # The quoted newlines will be untransformed here, but are just whitespace. + res = np.loadtxt(data, delimiter=",", quotechar="'") + assert_array_equal(res, [[1., 2.], [2., 3.]]) + + +@pytest.mark.parametrize("newline", ["\n", "\r", "\r\n"]) +def test_universal_newlines_quoted(newline): + # Check that universal newline support within the tokenizer is not applied + # to quoted fields. (note that lines must end in newline or quoted + # fields will not include a newline at all) + data = ['1,"2\n"\n', '3,"4\n', '1"\n'] + data = [row.replace("\n", newline) for row in data] + res = np.loadtxt(data, dtype=object, delimiter=",", quotechar='"') + assert_array_equal(res, [['1', f'2{newline}'], ['3', f'4{newline}1']]) + + +def test_null_character(): + # Basic tests to check that the NUL character is not special: + res = np.loadtxt(["1\0002\0003\n", "4\0005\0006"], delimiter="\000") + assert_array_equal(res, [[1, 2, 3], [4, 5, 6]]) + + # Also not as part of a field (avoid unicode/arrays as unicode strips \0) + res = np.loadtxt(["1\000,2\000,3\n", "4\000,5\000,6"], + delimiter=",", dtype=object) + assert res.tolist() == [["1\000", "2\000", "3"], ["4\000", "5\000", "6"]] + + +def test_iterator_fails_getting_next_line(): + class BadSequence: + def __len__(self): + return 100 + + def __getitem__(self, item): + if item == 50: + raise RuntimeError("Bad things happened!") + return f"{item}, {item + 1}" + + with pytest.raises(RuntimeError, match="Bad things happened!"): + np.loadtxt(BadSequence(), dtype=int, delimiter=",") + + +class TestCReaderUnitTests: + # These are internal tests for path that should not be possible to hit + # unless things go very very wrong somewhere. + def test_not_an_filelike(self): + with pytest.raises(AttributeError, match=".*read"): + np._core._multiarray_umath._load_from_filelike( + object(), dtype=np.dtype("i"), filelike=True) + + def test_filelike_read_fails(self): + # Can only be reached if loadtxt opens the file, so it is hard to do + # via the public interface (although maybe not impossible considering + # the current "DataClass" backing). + class BadFileLike: + counter = 0 + + def read(self, size): + self.counter += 1 + if self.counter > 20: + raise RuntimeError("Bad bad bad!") + return "1,2,3\n" + + with pytest.raises(RuntimeError, match="Bad bad bad!"): + np._core._multiarray_umath._load_from_filelike( + BadFileLike(), dtype=np.dtype("i"), filelike=True) + + def test_filelike_bad_read(self): + # Can only be reached if loadtxt opens the file, so it is hard to do + # via the public interface (although maybe not impossible considering + # the current "DataClass" backing). + + class BadFileLike: + counter = 0 + + def read(self, size): + return 1234 # not a string! + + with pytest.raises(TypeError, + match="non-string returned while reading data"): + np._core._multiarray_umath._load_from_filelike( + BadFileLike(), dtype=np.dtype("i"), filelike=True) + + def test_not_an_iter(self): + with pytest.raises(TypeError, + match="error reading from object, expected an iterable"): + np._core._multiarray_umath._load_from_filelike( + object(), dtype=np.dtype("i"), filelike=False) + + def test_bad_type(self): + with pytest.raises(TypeError, match="internal error: dtype must"): + np._core._multiarray_umath._load_from_filelike( + object(), dtype="i", filelike=False) + + def test_bad_encoding(self): + with pytest.raises(TypeError, match="encoding must be a unicode"): + np._core._multiarray_umath._load_from_filelike( + object(), dtype=np.dtype("i"), filelike=False, encoding=123) + + @pytest.mark.parametrize("newline", ["\r", "\n", "\r\n"]) + def test_manual_universal_newlines(self, newline): + # This is currently not available to users, because we should always + # open files with universal newlines enabled `newlines=None`. + # (And reading from an iterator uses slightly different code paths.) + # We have no real support for `newline="\r"` or `newline="\n" as the + # user cannot specify those options. + data = StringIO('0\n1\n"2\n"\n3\n4 #\n'.replace("\n", newline), + newline="") + + res = np._core._multiarray_umath._load_from_filelike( + data, dtype=np.dtype("U10"), filelike=True, + quote='"', comment="#", skiplines=1) + assert_array_equal(res[:, 0], ["1", f"2{newline}", "3", "4 "]) + + +def test_delimiter_comment_collision_raises(): + with pytest.raises(TypeError, match=".*control characters.*incompatible"): + np.loadtxt(StringIO("1, 2, 3"), delimiter=",", comments=",") + + +def test_delimiter_quotechar_collision_raises(): + with pytest.raises(TypeError, match=".*control characters.*incompatible"): + np.loadtxt(StringIO("1, 2, 3"), delimiter=",", quotechar=",") + + +def test_comment_quotechar_collision_raises(): + with pytest.raises(TypeError, match=".*control characters.*incompatible"): + np.loadtxt(StringIO("1 2 3"), comments="#", quotechar="#") + + +def test_delimiter_and_multiple_comments_collision_raises(): + with pytest.raises( + TypeError, match="Comment characters.*cannot include the delimiter" + ): + np.loadtxt(StringIO("1, 2, 3"), delimiter=",", comments=["#", ","]) + + +@pytest.mark.parametrize( + "ws", + ( + " ", # space + "\t", # tab + "\u2003", # em + "\u00A0", # non-break + "\u3000", # ideographic space + ) +) +def test_collision_with_default_delimiter_raises(ws): + with pytest.raises(TypeError, match=".*control characters.*incompatible"): + np.loadtxt(StringIO(f"1{ws}2{ws}3\n4{ws}5{ws}6\n"), comments=ws) + with pytest.raises(TypeError, match=".*control characters.*incompatible"): + np.loadtxt(StringIO(f"1{ws}2{ws}3\n4{ws}5{ws}6\n"), quotechar=ws) + + +@pytest.mark.parametrize("nl", ("\n", "\r")) +def test_control_character_newline_raises(nl): + txt = StringIO(f"1{nl}2{nl}3{nl}{nl}4{nl}5{nl}6{nl}{nl}") + msg = "control character.*cannot be a newline" + with pytest.raises(TypeError, match=msg): + np.loadtxt(txt, delimiter=nl) + with pytest.raises(TypeError, match=msg): + np.loadtxt(txt, comments=nl) + with pytest.raises(TypeError, match=msg): + np.loadtxt(txt, quotechar=nl) + + +@pytest.mark.parametrize( + ("generic_data", "long_datum", "unitless_dtype", "expected_dtype"), + [ + ("2012-03", "2013-01-15", "M8", "M8[D]"), # Datetimes + ("spam-a-lot", "tis_but_a_scratch", "U", "U17"), # str + ], +) +@pytest.mark.parametrize("nrows", (10, 50000, 60000)) # lt, eq, gt chunksize +def test_parametric_unit_discovery( + generic_data, long_datum, unitless_dtype, expected_dtype, nrows +): + """Check that the correct unit (e.g. month, day, second) is discovered from + the data when a user specifies a unitless datetime.""" + # Unit should be "D" (days) due to last entry + data = [generic_data] * nrows + [long_datum] + expected = np.array(data, dtype=expected_dtype) + assert len(data) == nrows + 1 + assert len(data) == len(expected) + + # file-like path + txt = StringIO("\n".join(data)) + a = np.loadtxt(txt, dtype=unitless_dtype) + assert len(a) == len(expected) + assert a.dtype == expected.dtype + assert_equal(a, expected) + + # file-obj path + fd, fname = mkstemp() + os.close(fd) + with open(fname, "w") as fh: + fh.write("\n".join(data) + "\n") + # loading the full file... + a = np.loadtxt(fname, dtype=unitless_dtype) + assert len(a) == len(expected) + assert a.dtype == expected.dtype + assert_equal(a, expected) + # loading half of the file... + a = np.loadtxt(fname, dtype=unitless_dtype, max_rows=int(nrows / 2)) + os.remove(fname) + assert len(a) == int(nrows / 2) + assert_equal(a, expected[:int(nrows / 2)]) + + +def test_str_dtype_unit_discovery_with_converter(): + data = ["spam-a-lot"] * 60000 + ["XXXtis_but_a_scratch"] + expected = np.array( + ["spam-a-lot"] * 60000 + ["tis_but_a_scratch"], dtype="U17" + ) + conv = lambda s: s.removeprefix("XXX") + + # file-like path + txt = StringIO("\n".join(data)) + a = np.loadtxt(txt, dtype="U", converters=conv) + assert a.dtype == expected.dtype + assert_equal(a, expected) + + # file-obj path + fd, fname = mkstemp() + os.close(fd) + with open(fname, "w") as fh: + fh.write("\n".join(data)) + a = np.loadtxt(fname, dtype="U", converters=conv) + os.remove(fname) + assert a.dtype == expected.dtype + assert_equal(a, expected) + + +@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), + reason="PyPy bug in error formatting") +def test_control_character_empty(): + with pytest.raises(TypeError, match="Text reading control character must"): + np.loadtxt(StringIO("1 2 3"), delimiter="") + with pytest.raises(TypeError, match="Text reading control character must"): + np.loadtxt(StringIO("1 2 3"), quotechar="") + with pytest.raises(ValueError, match="comments cannot be an empty string"): + np.loadtxt(StringIO("1 2 3"), comments="") + with pytest.raises(ValueError, match="comments cannot be an empty string"): + np.loadtxt(StringIO("1 2 3"), comments=["#", ""]) + + +def test_control_characters_as_bytes(): + """Byte control characters (comments, delimiter) are supported.""" + a = np.loadtxt(StringIO("#header\n1,2,3"), comments=b"#", delimiter=b",") + assert_equal(a, [1, 2, 3]) + + +@pytest.mark.filterwarnings('ignore::UserWarning') +def test_field_growing_cases(): + # Test empty field appending/growing (each field still takes 1 character) + # to see if the final field appending does not create issues. + res = np.loadtxt([""], delimiter=",", dtype=bytes) + assert len(res) == 0 + + for i in range(1, 1024): + res = np.loadtxt(["," * i], delimiter=",", dtype=bytes, max_rows=10) + assert len(res) == i + 1 + +@pytest.mark.parametrize("nmax", (10000, 50000, 55000, 60000)) +def test_maxrows_exceeding_chunksize(nmax): + # tries to read all of the file, + # or less, equal, greater than _loadtxt_chunksize + file_length = 60000 + + # file-like path + data = ["a 0.5 1"] * file_length + txt = StringIO("\n".join(data)) + res = np.loadtxt(txt, dtype=str, delimiter=" ", max_rows=nmax) + assert len(res) == nmax + + # file-obj path + fd, fname = mkstemp() + os.close(fd) + with open(fname, "w") as fh: + fh.write("\n".join(data)) + res = np.loadtxt(fname, dtype=str, delimiter=" ", max_rows=nmax) + os.remove(fname) + assert len(res) == nmax + +@pytest.mark.parametrize("nskip", (0, 10000, 12345, 50000, 67891, 100000)) +def test_skiprow_exceeding_maxrows_exceeding_chunksize(tmpdir, nskip): + # tries to read a file in chunks by skipping a variable amount of lines, + # less, equal, greater than max_rows + file_length = 110000 + data = "\n".join(f"{i} a 0.5 1" for i in range(1, file_length + 1)) + expected_length = min(60000, file_length - nskip) + expected = np.arange(nskip + 1, nskip + 1 + expected_length).astype(str) + + # file-like path + txt = StringIO(data) + res = np.loadtxt(txt, dtype='str', delimiter=" ", skiprows=nskip, max_rows=60000) + assert len(res) == expected_length + # are the right lines read in res? + assert_array_equal(expected, res[:, 0]) + + # file-obj path + tmp_file = tmpdir / "test_data.txt" + tmp_file.write(data) + fname = str(tmp_file) + res = np.loadtxt(fname, dtype='str', delimiter=" ", skiprows=nskip, max_rows=60000) + assert len(res) == expected_length + # are the right lines read in res? + assert_array_equal(expected, res[:, 0]) diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_mixins.py b/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_mixins.py new file mode 100644 index 0000000..f0aec15 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_mixins.py @@ -0,0 +1,215 @@ +import numbers +import operator + +import numpy as np +from numpy.testing import assert_, assert_equal, assert_raises + +# NOTE: This class should be kept as an exact copy of the example from the +# docstring for NDArrayOperatorsMixin. + +class ArrayLike(np.lib.mixins.NDArrayOperatorsMixin): + def __init__(self, value): + self.value = np.asarray(value) + + # One might also consider adding the built-in list type to this + # list, to support operations like np.add(array_like, list) + _HANDLED_TYPES = (np.ndarray, numbers.Number) + + def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): + out = kwargs.get('out', ()) + for x in inputs + out: + # Only support operations with instances of _HANDLED_TYPES. + # Use ArrayLike instead of type(self) for isinstance to + # allow subclasses that don't override __array_ufunc__ to + # handle ArrayLike objects. + if not isinstance(x, self._HANDLED_TYPES + (ArrayLike,)): + return NotImplemented + + # Defer to the implementation of the ufunc on unwrapped values. + inputs = tuple(x.value if isinstance(x, ArrayLike) else x + for x in inputs) + if out: + kwargs['out'] = tuple( + x.value if isinstance(x, ArrayLike) else x + for x in out) + result = getattr(ufunc, method)(*inputs, **kwargs) + + if type(result) is tuple: + # multiple return values + return tuple(type(self)(x) for x in result) + elif method == 'at': + # no return value + return None + else: + # one return value + return type(self)(result) + + def __repr__(self): + return f'{type(self).__name__}({self.value!r})' + + +def wrap_array_like(result): + if type(result) is tuple: + return tuple(ArrayLike(r) for r in result) + else: + return ArrayLike(result) + + +def _assert_equal_type_and_value(result, expected, err_msg=None): + assert_equal(type(result), type(expected), err_msg=err_msg) + if isinstance(result, tuple): + assert_equal(len(result), len(expected), err_msg=err_msg) + for result_item, expected_item in zip(result, expected): + _assert_equal_type_and_value(result_item, expected_item, err_msg) + else: + assert_equal(result.value, expected.value, err_msg=err_msg) + assert_equal(getattr(result.value, 'dtype', None), + getattr(expected.value, 'dtype', None), err_msg=err_msg) + + +_ALL_BINARY_OPERATORS = [ + operator.lt, + operator.le, + operator.eq, + operator.ne, + operator.gt, + operator.ge, + operator.add, + operator.sub, + operator.mul, + operator.truediv, + operator.floordiv, + operator.mod, + divmod, + pow, + operator.lshift, + operator.rshift, + operator.and_, + operator.xor, + operator.or_, +] + + +class TestNDArrayOperatorsMixin: + + def test_array_like_add(self): + + def check(result): + _assert_equal_type_and_value(result, ArrayLike(0)) + + check(ArrayLike(0) + 0) + check(0 + ArrayLike(0)) + + check(ArrayLike(0) + np.array(0)) + check(np.array(0) + ArrayLike(0)) + + check(ArrayLike(np.array(0)) + 0) + check(0 + ArrayLike(np.array(0))) + + check(ArrayLike(np.array(0)) + np.array(0)) + check(np.array(0) + ArrayLike(np.array(0))) + + def test_inplace(self): + array_like = ArrayLike(np.array([0])) + array_like += 1 + _assert_equal_type_and_value(array_like, ArrayLike(np.array([1]))) + + array = np.array([0]) + array += ArrayLike(1) + _assert_equal_type_and_value(array, ArrayLike(np.array([1]))) + + def test_opt_out(self): + + class OptOut: + """Object that opts out of __array_ufunc__.""" + __array_ufunc__ = None + + def __add__(self, other): + return self + + def __radd__(self, other): + return self + + array_like = ArrayLike(1) + opt_out = OptOut() + + # supported operations + assert_(array_like + opt_out is opt_out) + assert_(opt_out + array_like is opt_out) + + # not supported + with assert_raises(TypeError): + # don't use the Python default, array_like = array_like + opt_out + array_like += opt_out + with assert_raises(TypeError): + array_like - opt_out + with assert_raises(TypeError): + opt_out - array_like + + def test_subclass(self): + + class SubArrayLike(ArrayLike): + """Should take precedence over ArrayLike.""" + + x = ArrayLike(0) + y = SubArrayLike(1) + _assert_equal_type_and_value(x + y, y) + _assert_equal_type_and_value(y + x, y) + + def test_object(self): + x = ArrayLike(0) + obj = object() + with assert_raises(TypeError): + x + obj + with assert_raises(TypeError): + obj + x + with assert_raises(TypeError): + x += obj + + def test_unary_methods(self): + array = np.array([-1, 0, 1, 2]) + array_like = ArrayLike(array) + for op in [operator.neg, + operator.pos, + abs, + operator.invert]: + _assert_equal_type_and_value(op(array_like), ArrayLike(op(array))) + + def test_forward_binary_methods(self): + array = np.array([-1, 0, 1, 2]) + array_like = ArrayLike(array) + for op in _ALL_BINARY_OPERATORS: + expected = wrap_array_like(op(array, 1)) + actual = op(array_like, 1) + err_msg = f'failed for operator {op}' + _assert_equal_type_and_value(expected, actual, err_msg=err_msg) + + def test_reflected_binary_methods(self): + for op in _ALL_BINARY_OPERATORS: + expected = wrap_array_like(op(2, 1)) + actual = op(2, ArrayLike(1)) + err_msg = f'failed for operator {op}' + _assert_equal_type_and_value(expected, actual, err_msg=err_msg) + + def test_matmul(self): + array = np.array([1, 2], dtype=np.float64) + array_like = ArrayLike(array) + expected = ArrayLike(np.float64(5)) + _assert_equal_type_and_value(expected, np.matmul(array_like, array)) + _assert_equal_type_and_value( + expected, operator.matmul(array_like, array)) + _assert_equal_type_and_value( + expected, operator.matmul(array, array_like)) + + def test_ufunc_at(self): + array = ArrayLike(np.array([1, 2, 3, 4])) + assert_(np.negative.at(array, np.array([0, 1])) is None) + _assert_equal_type_and_value(array, ArrayLike([-1, -2, 3, 4])) + + def test_ufunc_two_outputs(self): + mantissa, exponent = np.frexp(2 ** -3) + expected = (ArrayLike(mantissa), ArrayLike(exponent)) + _assert_equal_type_and_value( + np.frexp(ArrayLike(2 ** -3)), expected) + _assert_equal_type_and_value( + np.frexp(ArrayLike(np.array(2 ** -3))), expected) diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_nanfunctions.py b/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_nanfunctions.py new file mode 100644 index 0000000..89a6d1f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_nanfunctions.py @@ -0,0 +1,1438 @@ +import inspect +import warnings +from functools import partial + +import pytest + +import numpy as np +from numpy._core.numeric import normalize_axis_tuple +from numpy.exceptions import AxisError, ComplexWarning +from numpy.lib._nanfunctions_impl import _nan_mask, _replace_nan +from numpy.testing import ( + assert_, + assert_almost_equal, + assert_array_equal, + assert_equal, + assert_raises, + assert_raises_regex, + suppress_warnings, +) + +# Test data +_ndat = np.array([[0.6244, np.nan, 0.2692, 0.0116, np.nan, 0.1170], + [0.5351, -0.9403, np.nan, 0.2100, 0.4759, 0.2833], + [np.nan, np.nan, np.nan, 0.1042, np.nan, -0.5954], + [0.1610, np.nan, np.nan, 0.1859, 0.3146, np.nan]]) + + +# Rows of _ndat with nans removed +_rdat = [np.array([0.6244, 0.2692, 0.0116, 0.1170]), + np.array([0.5351, -0.9403, 0.2100, 0.4759, 0.2833]), + np.array([0.1042, -0.5954]), + np.array([0.1610, 0.1859, 0.3146])] + +# Rows of _ndat with nans converted to ones +_ndat_ones = np.array([[0.6244, 1.0, 0.2692, 0.0116, 1.0, 0.1170], + [0.5351, -0.9403, 1.0, 0.2100, 0.4759, 0.2833], + [1.0, 1.0, 1.0, 0.1042, 1.0, -0.5954], + [0.1610, 1.0, 1.0, 0.1859, 0.3146, 1.0]]) + +# Rows of _ndat with nans converted to zeros +_ndat_zeros = np.array([[0.6244, 0.0, 0.2692, 0.0116, 0.0, 0.1170], + [0.5351, -0.9403, 0.0, 0.2100, 0.4759, 0.2833], + [0.0, 0.0, 0.0, 0.1042, 0.0, -0.5954], + [0.1610, 0.0, 0.0, 0.1859, 0.3146, 0.0]]) + + +class TestSignatureMatch: + NANFUNCS = { + np.nanmin: np.amin, + np.nanmax: np.amax, + np.nanargmin: np.argmin, + np.nanargmax: np.argmax, + np.nansum: np.sum, + np.nanprod: np.prod, + np.nancumsum: np.cumsum, + np.nancumprod: np.cumprod, + np.nanmean: np.mean, + np.nanmedian: np.median, + np.nanpercentile: np.percentile, + np.nanquantile: np.quantile, + np.nanvar: np.var, + np.nanstd: np.std, + } + IDS = [k.__name__ for k in NANFUNCS] + + @staticmethod + def get_signature(func, default="..."): + """Construct a signature and replace all default parameter-values.""" + prm_list = [] + signature = inspect.signature(func) + for prm in signature.parameters.values(): + if prm.default is inspect.Parameter.empty: + prm_list.append(prm) + else: + prm_list.append(prm.replace(default=default)) + return inspect.Signature(prm_list) + + @pytest.mark.parametrize("nan_func,func", NANFUNCS.items(), ids=IDS) + def test_signature_match(self, nan_func, func): + # Ignore the default parameter-values as they can sometimes differ + # between the two functions (*e.g.* one has `False` while the other + # has `np._NoValue`) + signature = self.get_signature(func) + nan_signature = self.get_signature(nan_func) + np.testing.assert_equal(signature, nan_signature) + + def test_exhaustiveness(self): + """Validate that all nan functions are actually tested.""" + np.testing.assert_equal( + set(self.IDS), set(np.lib._nanfunctions_impl.__all__) + ) + + +class TestNanFunctions_MinMax: + + nanfuncs = [np.nanmin, np.nanmax] + stdfuncs = [np.min, np.max] + + def test_mutation(self): + # Check that passed array is not modified. + ndat = _ndat.copy() + for f in self.nanfuncs: + f(ndat) + assert_equal(ndat, _ndat) + + def test_keepdims(self): + mat = np.eye(3) + for nf, rf in zip(self.nanfuncs, self.stdfuncs): + for axis in [None, 0, 1]: + tgt = rf(mat, axis=axis, keepdims=True) + res = nf(mat, axis=axis, keepdims=True) + assert_(res.ndim == tgt.ndim) + + def test_out(self): + mat = np.eye(3) + for nf, rf in zip(self.nanfuncs, self.stdfuncs): + resout = np.zeros(3) + tgt = rf(mat, axis=1) + res = nf(mat, axis=1, out=resout) + assert_almost_equal(res, resout) + assert_almost_equal(res, tgt) + + def test_dtype_from_input(self): + codes = 'efdgFDG' + for nf, rf in zip(self.nanfuncs, self.stdfuncs): + for c in codes: + mat = np.eye(3, dtype=c) + tgt = rf(mat, axis=1).dtype.type + res = nf(mat, axis=1).dtype.type + assert_(res is tgt) + # scalar case + tgt = rf(mat, axis=None).dtype.type + res = nf(mat, axis=None).dtype.type + assert_(res is tgt) + + def test_result_values(self): + for nf, rf in zip(self.nanfuncs, self.stdfuncs): + tgt = [rf(d) for d in _rdat] + res = nf(_ndat, axis=1) + assert_almost_equal(res, tgt) + + @pytest.mark.parametrize("axis", [None, 0, 1]) + @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) + @pytest.mark.parametrize("array", [ + np.array(np.nan), + np.full((3, 3), np.nan), + ], ids=["0d", "2d"]) + def test_allnans(self, axis, dtype, array): + if axis is not None and array.ndim == 0: + pytest.skip("`axis != None` not supported for 0d arrays") + + array = array.astype(dtype) + match = "All-NaN slice encountered" + for func in self.nanfuncs: + with pytest.warns(RuntimeWarning, match=match): + out = func(array, axis=axis) + assert np.isnan(out).all() + assert out.dtype == array.dtype + + def test_masked(self): + mat = np.ma.fix_invalid(_ndat) + msk = mat._mask.copy() + for f in [np.nanmin]: + res = f(mat, axis=1) + tgt = f(_ndat, axis=1) + assert_equal(res, tgt) + assert_equal(mat._mask, msk) + assert_(not np.isinf(mat).any()) + + def test_scalar(self): + for f in self.nanfuncs: + assert_(f(0.) == 0.) + + def test_subclass(self): + class MyNDArray(np.ndarray): + pass + + # Check that it works and that type and + # shape are preserved + mine = np.eye(3).view(MyNDArray) + for f in self.nanfuncs: + res = f(mine, axis=0) + assert_(isinstance(res, MyNDArray)) + assert_(res.shape == (3,)) + res = f(mine, axis=1) + assert_(isinstance(res, MyNDArray)) + assert_(res.shape == (3,)) + res = f(mine) + assert_(res.shape == ()) + + # check that rows of nan are dealt with for subclasses (#4628) + mine[1] = np.nan + for f in self.nanfuncs: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + res = f(mine, axis=0) + assert_(isinstance(res, MyNDArray)) + assert_(not np.any(np.isnan(res))) + assert_(len(w) == 0) + + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + res = f(mine, axis=1) + assert_(isinstance(res, MyNDArray)) + assert_(np.isnan(res[1]) and not np.isnan(res[0]) + and not np.isnan(res[2])) + assert_(len(w) == 1, 'no warning raised') + assert_(issubclass(w[0].category, RuntimeWarning)) + + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + res = f(mine) + assert_(res.shape == ()) + assert_(res != np.nan) + assert_(len(w) == 0) + + def test_object_array(self): + arr = np.array([[1.0, 2.0], [np.nan, 4.0], [np.nan, np.nan]], dtype=object) + assert_equal(np.nanmin(arr), 1.0) + assert_equal(np.nanmin(arr, axis=0), [1.0, 2.0]) + + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + # assert_equal does not work on object arrays of nan + assert_equal(list(np.nanmin(arr, axis=1)), [1.0, 4.0, np.nan]) + assert_(len(w) == 1, 'no warning raised') + assert_(issubclass(w[0].category, RuntimeWarning)) + + @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) + def test_initial(self, dtype): + class MyNDArray(np.ndarray): + pass + + ar = np.arange(9).astype(dtype) + ar[:5] = np.nan + + for f in self.nanfuncs: + initial = 100 if f is np.nanmax else 0 + + ret1 = f(ar, initial=initial) + assert ret1.dtype == dtype + assert ret1 == initial + + ret2 = f(ar.view(MyNDArray), initial=initial) + assert ret2.dtype == dtype + assert ret2 == initial + + @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) + def test_where(self, dtype): + class MyNDArray(np.ndarray): + pass + + ar = np.arange(9).reshape(3, 3).astype(dtype) + ar[0, :] = np.nan + where = np.ones_like(ar, dtype=np.bool) + where[:, 0] = False + + for f in self.nanfuncs: + reference = 4 if f is np.nanmin else 8 + + ret1 = f(ar, where=where, initial=5) + assert ret1.dtype == dtype + assert ret1 == reference + + ret2 = f(ar.view(MyNDArray), where=where, initial=5) + assert ret2.dtype == dtype + assert ret2 == reference + + +class TestNanFunctions_ArgminArgmax: + + nanfuncs = [np.nanargmin, np.nanargmax] + + def test_mutation(self): + # Check that passed array is not modified. + ndat = _ndat.copy() + for f in self.nanfuncs: + f(ndat) + assert_equal(ndat, _ndat) + + def test_result_values(self): + for f, fcmp in zip(self.nanfuncs, [np.greater, np.less]): + for row in _ndat: + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "invalid value encountered in") + ind = f(row) + val = row[ind] + # comparing with NaN is tricky as the result + # is always false except for NaN != NaN + assert_(not np.isnan(val)) + assert_(not fcmp(val, row).any()) + assert_(not np.equal(val, row[:ind]).any()) + + @pytest.mark.parametrize("axis", [None, 0, 1]) + @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) + @pytest.mark.parametrize("array", [ + np.array(np.nan), + np.full((3, 3), np.nan), + ], ids=["0d", "2d"]) + def test_allnans(self, axis, dtype, array): + if axis is not None and array.ndim == 0: + pytest.skip("`axis != None` not supported for 0d arrays") + + array = array.astype(dtype) + for func in self.nanfuncs: + with pytest.raises(ValueError, match="All-NaN slice encountered"): + func(array, axis=axis) + + def test_empty(self): + mat = np.zeros((0, 3)) + for f in self.nanfuncs: + for axis in [0, None]: + assert_raises_regex( + ValueError, + "attempt to get argm.. of an empty sequence", + f, mat, axis=axis) + for axis in [1]: + res = f(mat, axis=axis) + assert_equal(res, np.zeros(0)) + + def test_scalar(self): + for f in self.nanfuncs: + assert_(f(0.) == 0.) + + def test_subclass(self): + class MyNDArray(np.ndarray): + pass + + # Check that it works and that type and + # shape are preserved + mine = np.eye(3).view(MyNDArray) + for f in self.nanfuncs: + res = f(mine, axis=0) + assert_(isinstance(res, MyNDArray)) + assert_(res.shape == (3,)) + res = f(mine, axis=1) + assert_(isinstance(res, MyNDArray)) + assert_(res.shape == (3,)) + res = f(mine) + assert_(res.shape == ()) + + @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) + def test_keepdims(self, dtype): + ar = np.arange(9).astype(dtype) + ar[:5] = np.nan + + for f in self.nanfuncs: + reference = 5 if f is np.nanargmin else 8 + ret = f(ar, keepdims=True) + assert ret.ndim == ar.ndim + assert ret == reference + + @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) + def test_out(self, dtype): + ar = np.arange(9).astype(dtype) + ar[:5] = np.nan + + for f in self.nanfuncs: + out = np.zeros((), dtype=np.intp) + reference = 5 if f is np.nanargmin else 8 + ret = f(ar, out=out) + assert ret is out + assert ret == reference + + +_TEST_ARRAYS = { + "0d": np.array(5), + "1d": np.array([127, 39, 93, 87, 46]) +} +for _v in _TEST_ARRAYS.values(): + _v.setflags(write=False) + + +@pytest.mark.parametrize( + "dtype", + np.typecodes["AllInteger"] + np.typecodes["AllFloat"] + "O", +) +@pytest.mark.parametrize("mat", _TEST_ARRAYS.values(), ids=_TEST_ARRAYS.keys()) +class TestNanFunctions_NumberTypes: + nanfuncs = { + np.nanmin: np.min, + np.nanmax: np.max, + np.nanargmin: np.argmin, + np.nanargmax: np.argmax, + np.nansum: np.sum, + np.nanprod: np.prod, + np.nancumsum: np.cumsum, + np.nancumprod: np.cumprod, + np.nanmean: np.mean, + np.nanmedian: np.median, + np.nanvar: np.var, + np.nanstd: np.std, + } + nanfunc_ids = [i.__name__ for i in nanfuncs] + + @pytest.mark.parametrize("nanfunc,func", nanfuncs.items(), ids=nanfunc_ids) + @np.errstate(over="ignore") + def test_nanfunc(self, mat, dtype, nanfunc, func): + mat = mat.astype(dtype) + tgt = func(mat) + out = nanfunc(mat) + + assert_almost_equal(out, tgt) + if dtype == "O": + assert type(out) is type(tgt) + else: + assert out.dtype == tgt.dtype + + @pytest.mark.parametrize( + "nanfunc,func", + [(np.nanquantile, np.quantile), (np.nanpercentile, np.percentile)], + ids=["nanquantile", "nanpercentile"], + ) + def test_nanfunc_q(self, mat, dtype, nanfunc, func): + mat = mat.astype(dtype) + if mat.dtype.kind == "c": + assert_raises(TypeError, func, mat, q=1) + assert_raises(TypeError, nanfunc, mat, q=1) + + else: + tgt = func(mat, q=1) + out = nanfunc(mat, q=1) + + assert_almost_equal(out, tgt) + + if dtype == "O": + assert type(out) is type(tgt) + else: + assert out.dtype == tgt.dtype + + @pytest.mark.parametrize( + "nanfunc,func", + [(np.nanvar, np.var), (np.nanstd, np.std)], + ids=["nanvar", "nanstd"], + ) + def test_nanfunc_ddof(self, mat, dtype, nanfunc, func): + mat = mat.astype(dtype) + tgt = func(mat, ddof=0.5) + out = nanfunc(mat, ddof=0.5) + + assert_almost_equal(out, tgt) + if dtype == "O": + assert type(out) is type(tgt) + else: + assert out.dtype == tgt.dtype + + @pytest.mark.parametrize( + "nanfunc", [np.nanvar, np.nanstd] + ) + def test_nanfunc_correction(self, mat, dtype, nanfunc): + mat = mat.astype(dtype) + assert_almost_equal( + nanfunc(mat, correction=0.5), nanfunc(mat, ddof=0.5) + ) + + err_msg = "ddof and correction can't be provided simultaneously." + with assert_raises_regex(ValueError, err_msg): + nanfunc(mat, ddof=0.5, correction=0.5) + + with assert_raises_regex(ValueError, err_msg): + nanfunc(mat, ddof=1, correction=0) + + +class SharedNanFunctionsTestsMixin: + def test_mutation(self): + # Check that passed array is not modified. + ndat = _ndat.copy() + for f in self.nanfuncs: + f(ndat) + assert_equal(ndat, _ndat) + + def test_keepdims(self): + mat = np.eye(3) + for nf, rf in zip(self.nanfuncs, self.stdfuncs): + for axis in [None, 0, 1]: + tgt = rf(mat, axis=axis, keepdims=True) + res = nf(mat, axis=axis, keepdims=True) + assert_(res.ndim == tgt.ndim) + + def test_out(self): + mat = np.eye(3) + for nf, rf in zip(self.nanfuncs, self.stdfuncs): + resout = np.zeros(3) + tgt = rf(mat, axis=1) + res = nf(mat, axis=1, out=resout) + assert_almost_equal(res, resout) + assert_almost_equal(res, tgt) + + def test_dtype_from_dtype(self): + mat = np.eye(3) + codes = 'efdgFDG' + for nf, rf in zip(self.nanfuncs, self.stdfuncs): + for c in codes: + with suppress_warnings() as sup: + if nf in {np.nanstd, np.nanvar} and c in 'FDG': + # Giving the warning is a small bug, see gh-8000 + sup.filter(ComplexWarning) + tgt = rf(mat, dtype=np.dtype(c), axis=1).dtype.type + res = nf(mat, dtype=np.dtype(c), axis=1).dtype.type + assert_(res is tgt) + # scalar case + tgt = rf(mat, dtype=np.dtype(c), axis=None).dtype.type + res = nf(mat, dtype=np.dtype(c), axis=None).dtype.type + assert_(res is tgt) + + def test_dtype_from_char(self): + mat = np.eye(3) + codes = 'efdgFDG' + for nf, rf in zip(self.nanfuncs, self.stdfuncs): + for c in codes: + with suppress_warnings() as sup: + if nf in {np.nanstd, np.nanvar} and c in 'FDG': + # Giving the warning is a small bug, see gh-8000 + sup.filter(ComplexWarning) + tgt = rf(mat, dtype=c, axis=1).dtype.type + res = nf(mat, dtype=c, axis=1).dtype.type + assert_(res is tgt) + # scalar case + tgt = rf(mat, dtype=c, axis=None).dtype.type + res = nf(mat, dtype=c, axis=None).dtype.type + assert_(res is tgt) + + def test_dtype_from_input(self): + codes = 'efdgFDG' + for nf, rf in zip(self.nanfuncs, self.stdfuncs): + for c in codes: + mat = np.eye(3, dtype=c) + tgt = rf(mat, axis=1).dtype.type + res = nf(mat, axis=1).dtype.type + assert_(res is tgt, f"res {res}, tgt {tgt}") + # scalar case + tgt = rf(mat, axis=None).dtype.type + res = nf(mat, axis=None).dtype.type + assert_(res is tgt) + + def test_result_values(self): + for nf, rf in zip(self.nanfuncs, self.stdfuncs): + tgt = [rf(d) for d in _rdat] + res = nf(_ndat, axis=1) + assert_almost_equal(res, tgt) + + def test_scalar(self): + for f in self.nanfuncs: + assert_(f(0.) == 0.) + + def test_subclass(self): + class MyNDArray(np.ndarray): + pass + + # Check that it works and that type and + # shape are preserved + array = np.eye(3) + mine = array.view(MyNDArray) + for f in self.nanfuncs: + expected_shape = f(array, axis=0).shape + res = f(mine, axis=0) + assert_(isinstance(res, MyNDArray)) + assert_(res.shape == expected_shape) + expected_shape = f(array, axis=1).shape + res = f(mine, axis=1) + assert_(isinstance(res, MyNDArray)) + assert_(res.shape == expected_shape) + expected_shape = f(array).shape + res = f(mine) + assert_(isinstance(res, MyNDArray)) + assert_(res.shape == expected_shape) + + +class TestNanFunctions_SumProd(SharedNanFunctionsTestsMixin): + + nanfuncs = [np.nansum, np.nanprod] + stdfuncs = [np.sum, np.prod] + + @pytest.mark.parametrize("axis", [None, 0, 1]) + @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) + @pytest.mark.parametrize("array", [ + np.array(np.nan), + np.full((3, 3), np.nan), + ], ids=["0d", "2d"]) + def test_allnans(self, axis, dtype, array): + if axis is not None and array.ndim == 0: + pytest.skip("`axis != None` not supported for 0d arrays") + + array = array.astype(dtype) + for func, identity in zip(self.nanfuncs, [0, 1]): + out = func(array, axis=axis) + assert np.all(out == identity) + assert out.dtype == array.dtype + + def test_empty(self): + for f, tgt_value in zip([np.nansum, np.nanprod], [0, 1]): + mat = np.zeros((0, 3)) + tgt = [tgt_value] * 3 + res = f(mat, axis=0) + assert_equal(res, tgt) + tgt = [] + res = f(mat, axis=1) + assert_equal(res, tgt) + tgt = tgt_value + res = f(mat, axis=None) + assert_equal(res, tgt) + + @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) + def test_initial(self, dtype): + ar = np.arange(9).astype(dtype) + ar[:5] = np.nan + + for f in self.nanfuncs: + reference = 28 if f is np.nansum else 3360 + ret = f(ar, initial=2) + assert ret.dtype == dtype + assert ret == reference + + @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) + def test_where(self, dtype): + ar = np.arange(9).reshape(3, 3).astype(dtype) + ar[0, :] = np.nan + where = np.ones_like(ar, dtype=np.bool) + where[:, 0] = False + + for f in self.nanfuncs: + reference = 26 if f is np.nansum else 2240 + ret = f(ar, where=where, initial=2) + assert ret.dtype == dtype + assert ret == reference + + +class TestNanFunctions_CumSumProd(SharedNanFunctionsTestsMixin): + + nanfuncs = [np.nancumsum, np.nancumprod] + stdfuncs = [np.cumsum, np.cumprod] + + @pytest.mark.parametrize("axis", [None, 0, 1]) + @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) + @pytest.mark.parametrize("array", [ + np.array(np.nan), + np.full((3, 3), np.nan) + ], ids=["0d", "2d"]) + def test_allnans(self, axis, dtype, array): + if axis is not None and array.ndim == 0: + pytest.skip("`axis != None` not supported for 0d arrays") + + array = array.astype(dtype) + for func, identity in zip(self.nanfuncs, [0, 1]): + out = func(array) + assert np.all(out == identity) + assert out.dtype == array.dtype + + def test_empty(self): + for f, tgt_value in zip(self.nanfuncs, [0, 1]): + mat = np.zeros((0, 3)) + tgt = tgt_value * np.ones((0, 3)) + res = f(mat, axis=0) + assert_equal(res, tgt) + tgt = mat + res = f(mat, axis=1) + assert_equal(res, tgt) + tgt = np.zeros(0) + res = f(mat, axis=None) + assert_equal(res, tgt) + + def test_keepdims(self): + for f, g in zip(self.nanfuncs, self.stdfuncs): + mat = np.eye(3) + for axis in [None, 0, 1]: + tgt = f(mat, axis=axis, out=None) + res = g(mat, axis=axis, out=None) + assert_(res.ndim == tgt.ndim) + + for f in self.nanfuncs: + d = np.ones((3, 5, 7, 11)) + # Randomly set some elements to NaN: + rs = np.random.RandomState(0) + d[rs.rand(*d.shape) < 0.5] = np.nan + res = f(d, axis=None) + assert_equal(res.shape, (1155,)) + for axis in np.arange(4): + res = f(d, axis=axis) + assert_equal(res.shape, (3, 5, 7, 11)) + + def test_result_values(self): + for axis in (-2, -1, 0, 1, None): + tgt = np.cumprod(_ndat_ones, axis=axis) + res = np.nancumprod(_ndat, axis=axis) + assert_almost_equal(res, tgt) + tgt = np.cumsum(_ndat_zeros, axis=axis) + res = np.nancumsum(_ndat, axis=axis) + assert_almost_equal(res, tgt) + + def test_out(self): + mat = np.eye(3) + for nf, rf in zip(self.nanfuncs, self.stdfuncs): + resout = np.eye(3) + for axis in (-2, -1, 0, 1): + tgt = rf(mat, axis=axis) + res = nf(mat, axis=axis, out=resout) + assert_almost_equal(res, resout) + assert_almost_equal(res, tgt) + + +class TestNanFunctions_MeanVarStd(SharedNanFunctionsTestsMixin): + + nanfuncs = [np.nanmean, np.nanvar, np.nanstd] + stdfuncs = [np.mean, np.var, np.std] + + def test_dtype_error(self): + for f in self.nanfuncs: + for dtype in [np.bool, np.int_, np.object_]: + assert_raises(TypeError, f, _ndat, axis=1, dtype=dtype) + + def test_out_dtype_error(self): + for f in self.nanfuncs: + for dtype in [np.bool, np.int_, np.object_]: + out = np.empty(_ndat.shape[0], dtype=dtype) + assert_raises(TypeError, f, _ndat, axis=1, out=out) + + def test_ddof(self): + nanfuncs = [np.nanvar, np.nanstd] + stdfuncs = [np.var, np.std] + for nf, rf in zip(nanfuncs, stdfuncs): + for ddof in [0, 1]: + tgt = [rf(d, ddof=ddof) for d in _rdat] + res = nf(_ndat, axis=1, ddof=ddof) + assert_almost_equal(res, tgt) + + def test_ddof_too_big(self): + nanfuncs = [np.nanvar, np.nanstd] + stdfuncs = [np.var, np.std] + dsize = [len(d) for d in _rdat] + for nf, rf in zip(nanfuncs, stdfuncs): + for ddof in range(5): + with suppress_warnings() as sup: + sup.record(RuntimeWarning) + sup.filter(ComplexWarning) + tgt = [ddof >= d for d in dsize] + res = nf(_ndat, axis=1, ddof=ddof) + assert_equal(np.isnan(res), tgt) + if any(tgt): + assert_(len(sup.log) == 1) + else: + assert_(len(sup.log) == 0) + + @pytest.mark.parametrize("axis", [None, 0, 1]) + @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) + @pytest.mark.parametrize("array", [ + np.array(np.nan), + np.full((3, 3), np.nan), + ], ids=["0d", "2d"]) + def test_allnans(self, axis, dtype, array): + if axis is not None and array.ndim == 0: + pytest.skip("`axis != None` not supported for 0d arrays") + + array = array.astype(dtype) + match = "(Degrees of freedom <= 0 for slice.)|(Mean of empty slice)" + for func in self.nanfuncs: + with pytest.warns(RuntimeWarning, match=match): + out = func(array, axis=axis) + assert np.isnan(out).all() + + # `nanvar` and `nanstd` convert complex inputs to their + # corresponding floating dtype + if func is np.nanmean: + assert out.dtype == array.dtype + else: + assert out.dtype == np.abs(array).dtype + + def test_empty(self): + mat = np.zeros((0, 3)) + for f in self.nanfuncs: + for axis in [0, None]: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + assert_(np.isnan(f(mat, axis=axis)).all()) + assert_(len(w) == 1) + assert_(issubclass(w[0].category, RuntimeWarning)) + for axis in [1]: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + assert_equal(f(mat, axis=axis), np.zeros([])) + assert_(len(w) == 0) + + @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) + def test_where(self, dtype): + ar = np.arange(9).reshape(3, 3).astype(dtype) + ar[0, :] = np.nan + where = np.ones_like(ar, dtype=np.bool) + where[:, 0] = False + + for f, f_std in zip(self.nanfuncs, self.stdfuncs): + reference = f_std(ar[where][2:]) + dtype_reference = dtype if f is np.nanmean else ar.real.dtype + + ret = f(ar, where=where) + assert ret.dtype == dtype_reference + np.testing.assert_allclose(ret, reference) + + def test_nanstd_with_mean_keyword(self): + # Setting the seed to make the test reproducible + rng = np.random.RandomState(1234) + A = rng.randn(10, 20, 5) + 0.5 + A[:, 5, :] = np.nan + + mean_out = np.zeros((10, 1, 5)) + std_out = np.zeros((10, 1, 5)) + + mean = np.nanmean(A, + out=mean_out, + axis=1, + keepdims=True) + + # The returned object should be the object specified during calling + assert mean_out is mean + + std = np.nanstd(A, + out=std_out, + axis=1, + keepdims=True, + mean=mean) + + # The returned object should be the object specified during calling + assert std_out is std + + # Shape of returned mean and std should be same + assert std.shape == mean.shape + assert std.shape == (10, 1, 5) + + # Output should be the same as from the individual algorithms + std_old = np.nanstd(A, axis=1, keepdims=True) + + assert std_old.shape == mean.shape + assert_almost_equal(std, std_old) + + +_TIME_UNITS = ( + "Y", "M", "W", "D", "h", "m", "s", "ms", "us", "ns", "ps", "fs", "as" +) + +# All `inexact` + `timdelta64` type codes +_TYPE_CODES = list(np.typecodes["AllFloat"]) +_TYPE_CODES += [f"m8[{unit}]" for unit in _TIME_UNITS] + + +class TestNanFunctions_Median: + + def test_mutation(self): + # Check that passed array is not modified. + ndat = _ndat.copy() + np.nanmedian(ndat) + assert_equal(ndat, _ndat) + + def test_keepdims(self): + mat = np.eye(3) + for axis in [None, 0, 1]: + tgt = np.median(mat, axis=axis, out=None, overwrite_input=False) + res = np.nanmedian(mat, axis=axis, out=None, overwrite_input=False) + assert_(res.ndim == tgt.ndim) + + d = np.ones((3, 5, 7, 11)) + # Randomly set some elements to NaN: + w = np.random.random((4, 200)) * np.array(d.shape)[:, None] + w = w.astype(np.intp) + d[tuple(w)] = np.nan + with suppress_warnings() as sup: + sup.filter(RuntimeWarning) + res = np.nanmedian(d, axis=None, keepdims=True) + assert_equal(res.shape, (1, 1, 1, 1)) + res = np.nanmedian(d, axis=(0, 1), keepdims=True) + assert_equal(res.shape, (1, 1, 7, 11)) + res = np.nanmedian(d, axis=(0, 3), keepdims=True) + assert_equal(res.shape, (1, 5, 7, 1)) + res = np.nanmedian(d, axis=(1,), keepdims=True) + assert_equal(res.shape, (3, 1, 7, 11)) + res = np.nanmedian(d, axis=(0, 1, 2, 3), keepdims=True) + assert_equal(res.shape, (1, 1, 1, 1)) + res = np.nanmedian(d, axis=(0, 1, 3), keepdims=True) + assert_equal(res.shape, (1, 1, 7, 1)) + + @pytest.mark.parametrize( + argnames='axis', + argvalues=[ + None, + 1, + (1, ), + (0, 1), + (-3, -1), + ] + ) + @pytest.mark.filterwarnings("ignore:All-NaN slice:RuntimeWarning") + def test_keepdims_out(self, axis): + d = np.ones((3, 5, 7, 11)) + # Randomly set some elements to NaN: + w = np.random.random((4, 200)) * np.array(d.shape)[:, None] + w = w.astype(np.intp) + d[tuple(w)] = np.nan + if axis is None: + shape_out = (1,) * d.ndim + else: + axis_norm = normalize_axis_tuple(axis, d.ndim) + shape_out = tuple( + 1 if i in axis_norm else d.shape[i] for i in range(d.ndim)) + out = np.empty(shape_out) + result = np.nanmedian(d, axis=axis, keepdims=True, out=out) + assert result is out + assert_equal(result.shape, shape_out) + + def test_out(self): + mat = np.random.rand(3, 3) + nan_mat = np.insert(mat, [0, 2], np.nan, axis=1) + resout = np.zeros(3) + tgt = np.median(mat, axis=1) + res = np.nanmedian(nan_mat, axis=1, out=resout) + assert_almost_equal(res, resout) + assert_almost_equal(res, tgt) + # 0-d output: + resout = np.zeros(()) + tgt = np.median(mat, axis=None) + res = np.nanmedian(nan_mat, axis=None, out=resout) + assert_almost_equal(res, resout) + assert_almost_equal(res, tgt) + res = np.nanmedian(nan_mat, axis=(0, 1), out=resout) + assert_almost_equal(res, resout) + assert_almost_equal(res, tgt) + + def test_small_large(self): + # test the small and large code paths, current cutoff 400 elements + for s in [5, 20, 51, 200, 1000]: + d = np.random.randn(4, s) + # Randomly set some elements to NaN: + w = np.random.randint(0, d.size, size=d.size // 5) + d.ravel()[w] = np.nan + d[:, 0] = 1. # ensure at least one good value + # use normal median without nans to compare + tgt = [] + for x in d: + nonan = np.compress(~np.isnan(x), x) + tgt.append(np.median(nonan, overwrite_input=True)) + + assert_array_equal(np.nanmedian(d, axis=-1), tgt) + + def test_result_values(self): + tgt = [np.median(d) for d in _rdat] + res = np.nanmedian(_ndat, axis=1) + assert_almost_equal(res, tgt) + + @pytest.mark.parametrize("axis", [None, 0, 1]) + @pytest.mark.parametrize("dtype", _TYPE_CODES) + def test_allnans(self, dtype, axis): + mat = np.full((3, 3), np.nan).astype(dtype) + with suppress_warnings() as sup: + sup.record(RuntimeWarning) + + output = np.nanmedian(mat, axis=axis) + assert output.dtype == mat.dtype + assert np.isnan(output).all() + + if axis is None: + assert_(len(sup.log) == 1) + else: + assert_(len(sup.log) == 3) + + # Check scalar + scalar = np.array(np.nan).astype(dtype)[()] + output_scalar = np.nanmedian(scalar) + assert output_scalar.dtype == scalar.dtype + assert np.isnan(output_scalar) + + if axis is None: + assert_(len(sup.log) == 2) + else: + assert_(len(sup.log) == 4) + + def test_empty(self): + mat = np.zeros((0, 3)) + for axis in [0, None]: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + assert_(np.isnan(np.nanmedian(mat, axis=axis)).all()) + assert_(len(w) == 1) + assert_(issubclass(w[0].category, RuntimeWarning)) + for axis in [1]: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + assert_equal(np.nanmedian(mat, axis=axis), np.zeros([])) + assert_(len(w) == 0) + + def test_scalar(self): + assert_(np.nanmedian(0.) == 0.) + + def test_extended_axis_invalid(self): + d = np.ones((3, 5, 7, 11)) + assert_raises(AxisError, np.nanmedian, d, axis=-5) + assert_raises(AxisError, np.nanmedian, d, axis=(0, -5)) + assert_raises(AxisError, np.nanmedian, d, axis=4) + assert_raises(AxisError, np.nanmedian, d, axis=(0, 4)) + assert_raises(ValueError, np.nanmedian, d, axis=(1, 1)) + + def test_float_special(self): + with suppress_warnings() as sup: + sup.filter(RuntimeWarning) + for inf in [np.inf, -np.inf]: + a = np.array([[inf, np.nan], [np.nan, np.nan]]) + assert_equal(np.nanmedian(a, axis=0), [inf, np.nan]) + assert_equal(np.nanmedian(a, axis=1), [inf, np.nan]) + assert_equal(np.nanmedian(a), inf) + + # minimum fill value check + a = np.array([[np.nan, np.nan, inf], + [np.nan, np.nan, inf]]) + assert_equal(np.nanmedian(a), inf) + assert_equal(np.nanmedian(a, axis=0), [np.nan, np.nan, inf]) + assert_equal(np.nanmedian(a, axis=1), inf) + + # no mask path + a = np.array([[inf, inf], [inf, inf]]) + assert_equal(np.nanmedian(a, axis=1), inf) + + a = np.array([[inf, 7, -inf, -9], + [-10, np.nan, np.nan, 5], + [4, np.nan, np.nan, inf]], + dtype=np.float32) + if inf > 0: + assert_equal(np.nanmedian(a, axis=0), [4., 7., -inf, 5.]) + assert_equal(np.nanmedian(a), 4.5) + else: + assert_equal(np.nanmedian(a, axis=0), [-10., 7., -inf, -9.]) + assert_equal(np.nanmedian(a), -2.5) + assert_equal(np.nanmedian(a, axis=-1), [-1., -2.5, inf]) + + for i in range(10): + for j in range(1, 10): + a = np.array([([np.nan] * i) + ([inf] * j)] * 2) + assert_equal(np.nanmedian(a), inf) + assert_equal(np.nanmedian(a, axis=1), inf) + assert_equal(np.nanmedian(a, axis=0), + ([np.nan] * i) + [inf] * j) + + a = np.array([([np.nan] * i) + ([-inf] * j)] * 2) + assert_equal(np.nanmedian(a), -inf) + assert_equal(np.nanmedian(a, axis=1), -inf) + assert_equal(np.nanmedian(a, axis=0), + ([np.nan] * i) + [-inf] * j) + + +class TestNanFunctions_Percentile: + + def test_mutation(self): + # Check that passed array is not modified. + ndat = _ndat.copy() + np.nanpercentile(ndat, 30) + assert_equal(ndat, _ndat) + + def test_keepdims(self): + mat = np.eye(3) + for axis in [None, 0, 1]: + tgt = np.percentile(mat, 70, axis=axis, out=None, + overwrite_input=False) + res = np.nanpercentile(mat, 70, axis=axis, out=None, + overwrite_input=False) + assert_(res.ndim == tgt.ndim) + + d = np.ones((3, 5, 7, 11)) + # Randomly set some elements to NaN: + w = np.random.random((4, 200)) * np.array(d.shape)[:, None] + w = w.astype(np.intp) + d[tuple(w)] = np.nan + with suppress_warnings() as sup: + sup.filter(RuntimeWarning) + res = np.nanpercentile(d, 90, axis=None, keepdims=True) + assert_equal(res.shape, (1, 1, 1, 1)) + res = np.nanpercentile(d, 90, axis=(0, 1), keepdims=True) + assert_equal(res.shape, (1, 1, 7, 11)) + res = np.nanpercentile(d, 90, axis=(0, 3), keepdims=True) + assert_equal(res.shape, (1, 5, 7, 1)) + res = np.nanpercentile(d, 90, axis=(1,), keepdims=True) + assert_equal(res.shape, (3, 1, 7, 11)) + res = np.nanpercentile(d, 90, axis=(0, 1, 2, 3), keepdims=True) + assert_equal(res.shape, (1, 1, 1, 1)) + res = np.nanpercentile(d, 90, axis=(0, 1, 3), keepdims=True) + assert_equal(res.shape, (1, 1, 7, 1)) + + @pytest.mark.parametrize('q', [7, [1, 7]]) + @pytest.mark.parametrize( + argnames='axis', + argvalues=[ + None, + 1, + (1,), + (0, 1), + (-3, -1), + ] + ) + @pytest.mark.filterwarnings("ignore:All-NaN slice:RuntimeWarning") + def test_keepdims_out(self, q, axis): + d = np.ones((3, 5, 7, 11)) + # Randomly set some elements to NaN: + w = np.random.random((4, 200)) * np.array(d.shape)[:, None] + w = w.astype(np.intp) + d[tuple(w)] = np.nan + if axis is None: + shape_out = (1,) * d.ndim + else: + axis_norm = normalize_axis_tuple(axis, d.ndim) + shape_out = tuple( + 1 if i in axis_norm else d.shape[i] for i in range(d.ndim)) + shape_out = np.shape(q) + shape_out + + out = np.empty(shape_out) + result = np.nanpercentile(d, q, axis=axis, keepdims=True, out=out) + assert result is out + assert_equal(result.shape, shape_out) + + @pytest.mark.parametrize("weighted", [False, True]) + def test_out(self, weighted): + mat = np.random.rand(3, 3) + nan_mat = np.insert(mat, [0, 2], np.nan, axis=1) + resout = np.zeros(3) + if weighted: + w_args = {"weights": np.ones_like(mat), "method": "inverted_cdf"} + nan_w_args = { + "weights": np.ones_like(nan_mat), "method": "inverted_cdf" + } + else: + w_args = {} + nan_w_args = {} + tgt = np.percentile(mat, 42, axis=1, **w_args) + res = np.nanpercentile(nan_mat, 42, axis=1, out=resout, **nan_w_args) + assert_almost_equal(res, resout) + assert_almost_equal(res, tgt) + # 0-d output: + resout = np.zeros(()) + tgt = np.percentile(mat, 42, axis=None, **w_args) + res = np.nanpercentile( + nan_mat, 42, axis=None, out=resout, **nan_w_args + ) + assert_almost_equal(res, resout) + assert_almost_equal(res, tgt) + res = np.nanpercentile( + nan_mat, 42, axis=(0, 1), out=resout, **nan_w_args + ) + assert_almost_equal(res, resout) + assert_almost_equal(res, tgt) + + def test_complex(self): + arr_c = np.array([0.5 + 3.0j, 2.1 + 0.5j, 1.6 + 2.3j], dtype='G') + assert_raises(TypeError, np.nanpercentile, arr_c, 0.5) + arr_c = np.array([0.5 + 3.0j, 2.1 + 0.5j, 1.6 + 2.3j], dtype='D') + assert_raises(TypeError, np.nanpercentile, arr_c, 0.5) + arr_c = np.array([0.5 + 3.0j, 2.1 + 0.5j, 1.6 + 2.3j], dtype='F') + assert_raises(TypeError, np.nanpercentile, arr_c, 0.5) + + @pytest.mark.parametrize("weighted", [False, True]) + @pytest.mark.parametrize("use_out", [False, True]) + def test_result_values(self, weighted, use_out): + if weighted: + percentile = partial(np.percentile, method="inverted_cdf") + nanpercentile = partial(np.nanpercentile, method="inverted_cdf") + + def gen_weights(d): + return np.ones_like(d) + + else: + percentile = np.percentile + nanpercentile = np.nanpercentile + + def gen_weights(d): + return None + + tgt = [percentile(d, 28, weights=gen_weights(d)) for d in _rdat] + out = np.empty_like(tgt) if use_out else None + res = nanpercentile(_ndat, 28, axis=1, + weights=gen_weights(_ndat), out=out) + assert_almost_equal(res, tgt) + # Transpose the array to fit the output convention of numpy.percentile + tgt = np.transpose([percentile(d, (28, 98), weights=gen_weights(d)) + for d in _rdat]) + out = np.empty_like(tgt) if use_out else None + res = nanpercentile(_ndat, (28, 98), axis=1, + weights=gen_weights(_ndat), out=out) + assert_almost_equal(res, tgt) + + @pytest.mark.parametrize("axis", [None, 0, 1]) + @pytest.mark.parametrize("dtype", np.typecodes["Float"]) + @pytest.mark.parametrize("array", [ + np.array(np.nan), + np.full((3, 3), np.nan), + ], ids=["0d", "2d"]) + def test_allnans(self, axis, dtype, array): + if axis is not None and array.ndim == 0: + pytest.skip("`axis != None` not supported for 0d arrays") + + array = array.astype(dtype) + with pytest.warns(RuntimeWarning, match="All-NaN slice encountered"): + out = np.nanpercentile(array, 60, axis=axis) + assert np.isnan(out).all() + assert out.dtype == array.dtype + + def test_empty(self): + mat = np.zeros((0, 3)) + for axis in [0, None]: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + assert_(np.isnan(np.nanpercentile(mat, 40, axis=axis)).all()) + assert_(len(w) == 1) + assert_(issubclass(w[0].category, RuntimeWarning)) + for axis in [1]: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + assert_equal(np.nanpercentile(mat, 40, axis=axis), np.zeros([])) + assert_(len(w) == 0) + + def test_scalar(self): + assert_equal(np.nanpercentile(0., 100), 0.) + a = np.arange(6) + r = np.nanpercentile(a, 50, axis=0) + assert_equal(r, 2.5) + assert_(np.isscalar(r)) + + def test_extended_axis_invalid(self): + d = np.ones((3, 5, 7, 11)) + assert_raises(AxisError, np.nanpercentile, d, q=5, axis=-5) + assert_raises(AxisError, np.nanpercentile, d, q=5, axis=(0, -5)) + assert_raises(AxisError, np.nanpercentile, d, q=5, axis=4) + assert_raises(AxisError, np.nanpercentile, d, q=5, axis=(0, 4)) + assert_raises(ValueError, np.nanpercentile, d, q=5, axis=(1, 1)) + + def test_multiple_percentiles(self): + perc = [50, 100] + mat = np.ones((4, 3)) + nan_mat = np.nan * mat + # For checking consistency in higher dimensional case + large_mat = np.ones((3, 4, 5)) + large_mat[:, 0:2:4, :] = 0 + large_mat[:, :, 3:] *= 2 + for axis in [None, 0, 1]: + for keepdim in [False, True]: + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "All-NaN slice encountered") + val = np.percentile(mat, perc, axis=axis, keepdims=keepdim) + nan_val = np.nanpercentile(nan_mat, perc, axis=axis, + keepdims=keepdim) + assert_equal(nan_val.shape, val.shape) + + val = np.percentile(large_mat, perc, axis=axis, + keepdims=keepdim) + nan_val = np.nanpercentile(large_mat, perc, axis=axis, + keepdims=keepdim) + assert_equal(nan_val, val) + + megamat = np.ones((3, 4, 5, 6)) + assert_equal( + np.nanpercentile(megamat, perc, axis=(1, 2)).shape, (2, 3, 6) + ) + + @pytest.mark.parametrize("nan_weight", [0, 1, 2, 3, 1e200]) + def test_nan_value_with_weight(self, nan_weight): + x = [1, np.nan, 2, 3] + result = np.float64(2.0) + q_unweighted = np.nanpercentile(x, 50, method="inverted_cdf") + assert_equal(q_unweighted, result) + + # The weight value at the nan position should not matter. + w = [1.0, nan_weight, 1.0, 1.0] + q_weighted = np.nanpercentile(x, 50, weights=w, method="inverted_cdf") + assert_equal(q_weighted, result) + + @pytest.mark.parametrize("axis", [0, 1, 2]) + def test_nan_value_with_weight_ndim(self, axis): + # Create a multi-dimensional array to test + np.random.seed(1) + x_no_nan = np.random.random(size=(100, 99, 2)) + # Set some places to NaN (not particularly smart) so there is always + # some non-Nan. + x = x_no_nan.copy() + x[np.arange(99), np.arange(99), 0] = np.nan + + p = np.array([[20., 50., 30], [70, 33, 80]]) + + # We just use ones as weights, but replace it with 0 or 1e200 at the + # NaN positions below. + weights = np.ones_like(x) + + # For comparison use weighted normal percentile with nan weights at + # 0 (and no NaNs); not sure this is strictly identical but should be + # sufficiently so (if a percentile lies exactly on a 0 value). + weights[np.isnan(x)] = 0 + p_expected = np.percentile( + x_no_nan, p, axis=axis, weights=weights, method="inverted_cdf") + + p_unweighted = np.nanpercentile( + x, p, axis=axis, method="inverted_cdf") + # The normal and unweighted versions should be identical: + assert_equal(p_unweighted, p_expected) + + weights[np.isnan(x)] = 1e200 # huge value, shouldn't matter + p_weighted = np.nanpercentile( + x, p, axis=axis, weights=weights, method="inverted_cdf") + assert_equal(p_weighted, p_expected) + # Also check with out passed: + out = np.empty_like(p_weighted) + res = np.nanpercentile( + x, p, axis=axis, weights=weights, out=out, method="inverted_cdf") + + assert res is out + assert_equal(out, p_expected) + + +class TestNanFunctions_Quantile: + # most of this is already tested by TestPercentile + + @pytest.mark.parametrize("weighted", [False, True]) + def test_regression(self, weighted): + ar = np.arange(24).reshape(2, 3, 4).astype(float) + ar[0][1] = np.nan + if weighted: + w_args = {"weights": np.ones_like(ar), "method": "inverted_cdf"} + else: + w_args = {} + + assert_equal(np.nanquantile(ar, q=0.5, **w_args), + np.nanpercentile(ar, q=50, **w_args)) + assert_equal(np.nanquantile(ar, q=0.5, axis=0, **w_args), + np.nanpercentile(ar, q=50, axis=0, **w_args)) + assert_equal(np.nanquantile(ar, q=0.5, axis=1, **w_args), + np.nanpercentile(ar, q=50, axis=1, **w_args)) + assert_equal(np.nanquantile(ar, q=[0.5], axis=1, **w_args), + np.nanpercentile(ar, q=[50], axis=1, **w_args)) + assert_equal(np.nanquantile(ar, q=[0.25, 0.5, 0.75], axis=1, **w_args), + np.nanpercentile(ar, q=[25, 50, 75], axis=1, **w_args)) + + def test_basic(self): + x = np.arange(8) * 0.5 + assert_equal(np.nanquantile(x, 0), 0.) + assert_equal(np.nanquantile(x, 1), 3.5) + assert_equal(np.nanquantile(x, 0.5), 1.75) + + def test_complex(self): + arr_c = np.array([0.5 + 3.0j, 2.1 + 0.5j, 1.6 + 2.3j], dtype='G') + assert_raises(TypeError, np.nanquantile, arr_c, 0.5) + arr_c = np.array([0.5 + 3.0j, 2.1 + 0.5j, 1.6 + 2.3j], dtype='D') + assert_raises(TypeError, np.nanquantile, arr_c, 0.5) + arr_c = np.array([0.5 + 3.0j, 2.1 + 0.5j, 1.6 + 2.3j], dtype='F') + assert_raises(TypeError, np.nanquantile, arr_c, 0.5) + + def test_no_p_overwrite(self): + # this is worth retesting, because quantile does not make a copy + p0 = np.array([0, 0.75, 0.25, 0.5, 1.0]) + p = p0.copy() + np.nanquantile(np.arange(100.), p, method="midpoint") + assert_array_equal(p, p0) + + p0 = p0.tolist() + p = p.tolist() + np.nanquantile(np.arange(100.), p, method="midpoint") + assert_array_equal(p, p0) + + @pytest.mark.parametrize("axis", [None, 0, 1]) + @pytest.mark.parametrize("dtype", np.typecodes["Float"]) + @pytest.mark.parametrize("array", [ + np.array(np.nan), + np.full((3, 3), np.nan), + ], ids=["0d", "2d"]) + def test_allnans(self, axis, dtype, array): + if axis is not None and array.ndim == 0: + pytest.skip("`axis != None` not supported for 0d arrays") + + array = array.astype(dtype) + with pytest.warns(RuntimeWarning, match="All-NaN slice encountered"): + out = np.nanquantile(array, 1, axis=axis) + assert np.isnan(out).all() + assert out.dtype == array.dtype + +@pytest.mark.parametrize("arr, expected", [ + # array of floats with some nans + (np.array([np.nan, 5.0, np.nan, np.inf]), + np.array([False, True, False, True])), + # int64 array that can't possibly have nans + (np.array([1, 5, 7, 9], dtype=np.int64), + True), + # bool array that can't possibly have nans + (np.array([False, True, False, True]), + True), + # 2-D complex array with nans + (np.array([[np.nan, 5.0], + [np.nan, np.inf]], dtype=np.complex64), + np.array([[False, True], + [False, True]])), + ]) +def test__nan_mask(arr, expected): + for out in [None, np.empty(arr.shape, dtype=np.bool)]: + actual = _nan_mask(arr, out=out) + assert_equal(actual, expected) + # the above won't distinguish between True proper + # and an array of True values; we want True proper + # for types that can't possibly contain NaN + if type(expected) is not np.ndarray: + assert actual is True + + +def test__replace_nan(): + """ Test that _replace_nan returns the original array if there are no + NaNs, not a copy. + """ + for dtype in [np.bool, np.int32, np.int64]: + arr = np.array([0, 1], dtype=dtype) + result, mask = _replace_nan(arr, 0) + assert mask is None + # do not make a copy if there are no nans + assert result is arr + + for dtype in [np.float32, np.float64]: + arr = np.array([0, 1], dtype=dtype) + result, mask = _replace_nan(arr, 2) + assert (mask == False).all() + # mask is not None, so we make a copy + assert result is not arr + assert_equal(result, arr) + + arr_nan = np.array([0, 1, np.nan], dtype=dtype) + result_nan, mask_nan = _replace_nan(arr_nan, 2) + assert_equal(mask_nan, np.array([False, False, True])) + assert result_nan is not arr_nan + assert_equal(result_nan, np.array([0, 1, 2])) + assert np.isnan(arr_nan[-1]) + + +def test_memmap_takes_fast_route(tmpdir): + # We want memory mapped arrays to take the fast route through nanmax, + # which avoids creating a mask by using fmax.reduce (see gh-28721). So we + # check that on bad input, the error is from fmax (rather than maximum). + a = np.arange(10., dtype=float) + with open(tmpdir.join("data.bin"), "w+b") as fh: + fh.write(a.tobytes()) + mm = np.memmap(fh, dtype=a.dtype, shape=a.shape) + with pytest.raises(ValueError, match="reduction operation fmax"): + np.nanmax(mm, out=np.zeros(2)) + # For completeness, same for nanmin. + with pytest.raises(ValueError, match="reduction operation fmin"): + np.nanmin(mm, out=np.zeros(2)) diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_packbits.py b/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_packbits.py new file mode 100644 index 0000000..0b0e9d1 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_packbits.py @@ -0,0 +1,376 @@ +from itertools import chain + +import pytest + +import numpy as np +from numpy.testing import assert_array_equal, assert_equal, assert_raises + + +def test_packbits(): + # Copied from the docstring. + a = [[[1, 0, 1], [0, 1, 0]], + [[1, 1, 0], [0, 0, 1]]] + for dt in '?bBhHiIlLqQ': + arr = np.array(a, dtype=dt) + b = np.packbits(arr, axis=-1) + assert_equal(b.dtype, np.uint8) + assert_array_equal(b, np.array([[[160], [64]], [[192], [32]]])) + + assert_raises(TypeError, np.packbits, np.array(a, dtype=float)) + + +def test_packbits_empty(): + shapes = [ + (0,), (10, 20, 0), (10, 0, 20), (0, 10, 20), (20, 0, 0), (0, 20, 0), + (0, 0, 20), (0, 0, 0), + ] + for dt in '?bBhHiIlLqQ': + for shape in shapes: + a = np.empty(shape, dtype=dt) + b = np.packbits(a) + assert_equal(b.dtype, np.uint8) + assert_equal(b.shape, (0,)) + + +def test_packbits_empty_with_axis(): + # Original shapes and lists of packed shapes for different axes. + shapes = [ + ((0,), [(0,)]), + ((10, 20, 0), [(2, 20, 0), (10, 3, 0), (10, 20, 0)]), + ((10, 0, 20), [(2, 0, 20), (10, 0, 20), (10, 0, 3)]), + ((0, 10, 20), [(0, 10, 20), (0, 2, 20), (0, 10, 3)]), + ((20, 0, 0), [(3, 0, 0), (20, 0, 0), (20, 0, 0)]), + ((0, 20, 0), [(0, 20, 0), (0, 3, 0), (0, 20, 0)]), + ((0, 0, 20), [(0, 0, 20), (0, 0, 20), (0, 0, 3)]), + ((0, 0, 0), [(0, 0, 0), (0, 0, 0), (0, 0, 0)]), + ] + for dt in '?bBhHiIlLqQ': + for in_shape, out_shapes in shapes: + for ax, out_shape in enumerate(out_shapes): + a = np.empty(in_shape, dtype=dt) + b = np.packbits(a, axis=ax) + assert_equal(b.dtype, np.uint8) + assert_equal(b.shape, out_shape) + +@pytest.mark.parametrize('bitorder', ('little', 'big')) +def test_packbits_large(bitorder): + # test data large enough for 16 byte vectorization + a = np.array([1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, + 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, + 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, + 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, + 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, + 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, + 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, + 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, + 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, + 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, + 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, + 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, + 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, + 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, + 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0]) + a = a.repeat(3) + for dtype in '?bBhHiIlLqQ': + arr = np.array(a, dtype=dtype) + b = np.packbits(arr, axis=None, bitorder=bitorder) + assert_equal(b.dtype, np.uint8) + r = [252, 127, 192, 3, 254, 7, 252, 0, 7, 31, 240, 0, 28, 1, 255, 252, + 113, 248, 3, 255, 192, 28, 15, 192, 28, 126, 0, 224, 127, 255, + 227, 142, 7, 31, 142, 63, 28, 126, 56, 227, 240, 0, 227, 128, 63, + 224, 14, 56, 252, 112, 56, 255, 241, 248, 3, 240, 56, 224, 112, + 63, 255, 255, 199, 224, 14, 0, 31, 143, 192, 3, 255, 199, 0, 1, + 255, 224, 1, 255, 252, 126, 63, 0, 1, 192, 252, 14, 63, 0, 15, + 199, 252, 113, 255, 3, 128, 56, 252, 14, 7, 0, 113, 255, 255, 142, 56, 227, + 129, 248, 227, 129, 199, 31, 128] + if bitorder == 'big': + assert_array_equal(b, r) + # equal for size being multiple of 8 + assert_array_equal(np.unpackbits(b, bitorder=bitorder)[:-4], a) + + # check last byte of different remainders (16 byte vectorization) + b = [np.packbits(arr[:-i], axis=None)[-1] for i in range(1, 16)] + assert_array_equal(b, [128, 128, 128, 31, 30, 28, 24, 16, 0, 0, 0, 199, + 198, 196, 192]) + + arr = arr.reshape(36, 25) + b = np.packbits(arr, axis=0) + assert_equal(b.dtype, np.uint8) + assert_array_equal(b, [[190, 186, 178, 178, 150, 215, 87, 83, 83, 195, + 199, 206, 204, 204, 140, 140, 136, 136, 8, 40, 105, + 107, 75, 74, 88], + [72, 216, 248, 241, 227, 195, 202, 90, 90, 83, + 83, 119, 127, 109, 73, 64, 208, 244, 189, 45, + 41, 104, 122, 90, 18], + [113, 120, 248, 216, 152, 24, 60, 52, 182, 150, + 150, 150, 146, 210, 210, 246, 255, 255, 223, + 151, 21, 17, 17, 131, 163], + [214, 210, 210, 64, 68, 5, 5, 1, 72, 88, 92, + 92, 78, 110, 39, 181, 149, 220, 222, 218, 218, + 202, 234, 170, 168], + [0, 128, 128, 192, 80, 112, 48, 160, 160, 224, + 240, 208, 144, 128, 160, 224, 240, 208, 144, + 144, 176, 240, 224, 192, 128]]) + + b = np.packbits(arr, axis=1) + assert_equal(b.dtype, np.uint8) + assert_array_equal(b, [[252, 127, 192, 0], + [ 7, 252, 15, 128], + [240, 0, 28, 0], + [255, 128, 0, 128], + [192, 31, 255, 128], + [142, 63, 0, 0], + [255, 240, 7, 0], + [ 7, 224, 14, 0], + [126, 0, 224, 0], + [255, 255, 199, 0], + [ 56, 28, 126, 0], + [113, 248, 227, 128], + [227, 142, 63, 0], + [ 0, 28, 112, 0], + [ 15, 248, 3, 128], + [ 28, 126, 56, 0], + [ 56, 255, 241, 128], + [240, 7, 224, 0], + [227, 129, 192, 128], + [255, 255, 254, 0], + [126, 0, 224, 0], + [ 3, 241, 248, 0], + [ 0, 255, 241, 128], + [128, 0, 255, 128], + [224, 1, 255, 128], + [248, 252, 126, 0], + [ 0, 7, 3, 128], + [224, 113, 248, 0], + [ 0, 252, 127, 128], + [142, 63, 224, 0], + [224, 14, 63, 0], + [ 7, 3, 128, 0], + [113, 255, 255, 128], + [ 28, 113, 199, 0], + [ 7, 227, 142, 0], + [ 14, 56, 252, 0]]) + + arr = arr.T.copy() + b = np.packbits(arr, axis=0) + assert_equal(b.dtype, np.uint8) + assert_array_equal(b, [[252, 7, 240, 255, 192, 142, 255, 7, 126, 255, + 56, 113, 227, 0, 15, 28, 56, 240, 227, 255, + 126, 3, 0, 128, 224, 248, 0, 224, 0, 142, 224, + 7, 113, 28, 7, 14], + [127, 252, 0, 128, 31, 63, 240, 224, 0, 255, + 28, 248, 142, 28, 248, 126, 255, 7, 129, 255, + 0, 241, 255, 0, 1, 252, 7, 113, 252, 63, 14, + 3, 255, 113, 227, 56], + [192, 15, 28, 0, 255, 0, 7, 14, 224, 199, 126, + 227, 63, 112, 3, 56, 241, 224, 192, 254, 224, + 248, 241, 255, 255, 126, 3, 248, 127, 224, 63, + 128, 255, 199, 142, 252], + [0, 128, 0, 128, 128, 0, 0, 0, 0, 0, 0, 128, 0, + 0, 128, 0, 128, 0, 128, 0, 0, 0, 128, 128, + 128, 0, 128, 0, 128, 0, 0, 0, 128, 0, 0, 0]]) + + b = np.packbits(arr, axis=1) + assert_equal(b.dtype, np.uint8) + assert_array_equal(b, [[190, 72, 113, 214, 0], + [186, 216, 120, 210, 128], + [178, 248, 248, 210, 128], + [178, 241, 216, 64, 192], + [150, 227, 152, 68, 80], + [215, 195, 24, 5, 112], + [ 87, 202, 60, 5, 48], + [ 83, 90, 52, 1, 160], + [ 83, 90, 182, 72, 160], + [195, 83, 150, 88, 224], + [199, 83, 150, 92, 240], + [206, 119, 150, 92, 208], + [204, 127, 146, 78, 144], + [204, 109, 210, 110, 128], + [140, 73, 210, 39, 160], + [140, 64, 246, 181, 224], + [136, 208, 255, 149, 240], + [136, 244, 255, 220, 208], + [ 8, 189, 223, 222, 144], + [ 40, 45, 151, 218, 144], + [105, 41, 21, 218, 176], + [107, 104, 17, 202, 240], + [ 75, 122, 17, 234, 224], + [ 74, 90, 131, 170, 192], + [ 88, 18, 163, 168, 128]]) + + # result is the same if input is multiplied with a nonzero value + for dtype in 'bBhHiIlLqQ': + arr = np.array(a, dtype=dtype) + rnd = np.random.randint(low=np.iinfo(dtype).min, + high=np.iinfo(dtype).max, size=arr.size, + dtype=dtype) + rnd[rnd == 0] = 1 + arr *= rnd.astype(dtype) + b = np.packbits(arr, axis=-1) + assert_array_equal(np.unpackbits(b)[:-4], a) + + assert_raises(TypeError, np.packbits, np.array(a, dtype=float)) + + +def test_packbits_very_large(): + # test some with a larger arrays gh-8637 + # code is covered earlier but larger array makes crash on bug more likely + for s in range(950, 1050): + for dt in '?bBhHiIlLqQ': + x = np.ones((200, s), dtype=bool) + np.packbits(x, axis=1) + + +def test_unpackbits(): + # Copied from the docstring. + a = np.array([[2], [7], [23]], dtype=np.uint8) + b = np.unpackbits(a, axis=1) + assert_equal(b.dtype, np.uint8) + assert_array_equal(b, np.array([[0, 0, 0, 0, 0, 0, 1, 0], + [0, 0, 0, 0, 0, 1, 1, 1], + [0, 0, 0, 1, 0, 1, 1, 1]])) + +def test_pack_unpack_order(): + a = np.array([[2], [7], [23]], dtype=np.uint8) + b = np.unpackbits(a, axis=1) + assert_equal(b.dtype, np.uint8) + b_little = np.unpackbits(a, axis=1, bitorder='little') + b_big = np.unpackbits(a, axis=1, bitorder='big') + assert_array_equal(b, b_big) + assert_array_equal(a, np.packbits(b_little, axis=1, bitorder='little')) + assert_array_equal(b[:, ::-1], b_little) + assert_array_equal(a, np.packbits(b_big, axis=1, bitorder='big')) + assert_raises(ValueError, np.unpackbits, a, bitorder='r') + assert_raises(TypeError, np.unpackbits, a, bitorder=10) + + +def test_unpackbits_empty(): + a = np.empty((0,), dtype=np.uint8) + b = np.unpackbits(a) + assert_equal(b.dtype, np.uint8) + assert_array_equal(b, np.empty((0,))) + + +def test_unpackbits_empty_with_axis(): + # Lists of packed shapes for different axes and unpacked shapes. + shapes = [ + ([(0,)], (0,)), + ([(2, 24, 0), (16, 3, 0), (16, 24, 0)], (16, 24, 0)), + ([(2, 0, 24), (16, 0, 24), (16, 0, 3)], (16, 0, 24)), + ([(0, 16, 24), (0, 2, 24), (0, 16, 3)], (0, 16, 24)), + ([(3, 0, 0), (24, 0, 0), (24, 0, 0)], (24, 0, 0)), + ([(0, 24, 0), (0, 3, 0), (0, 24, 0)], (0, 24, 0)), + ([(0, 0, 24), (0, 0, 24), (0, 0, 3)], (0, 0, 24)), + ([(0, 0, 0), (0, 0, 0), (0, 0, 0)], (0, 0, 0)), + ] + for in_shapes, out_shape in shapes: + for ax, in_shape in enumerate(in_shapes): + a = np.empty(in_shape, dtype=np.uint8) + b = np.unpackbits(a, axis=ax) + assert_equal(b.dtype, np.uint8) + assert_equal(b.shape, out_shape) + + +def test_unpackbits_large(): + # test all possible numbers via comparison to already tested packbits + d = np.arange(277, dtype=np.uint8) + assert_array_equal(np.packbits(np.unpackbits(d)), d) + assert_array_equal(np.packbits(np.unpackbits(d[::2])), d[::2]) + d = np.tile(d, (3, 1)) + assert_array_equal(np.packbits(np.unpackbits(d, axis=1), axis=1), d) + d = d.T.copy() + assert_array_equal(np.packbits(np.unpackbits(d, axis=0), axis=0), d) + + +class TestCount: + x = np.array([ + [1, 0, 1, 0, 0, 1, 0], + [0, 1, 1, 1, 0, 0, 0], + [0, 0, 1, 0, 0, 1, 1], + [1, 1, 0, 0, 0, 1, 1], + [1, 0, 1, 0, 1, 0, 1], + [0, 0, 1, 1, 1, 0, 0], + [0, 1, 0, 1, 0, 1, 0], + ], dtype=np.uint8) + padded1 = np.zeros(57, dtype=np.uint8) + padded1[:49] = x.ravel() + padded1b = np.zeros(57, dtype=np.uint8) + padded1b[:49] = x[::-1].copy().ravel() + padded2 = np.zeros((9, 9), dtype=np.uint8) + padded2[:7, :7] = x + + @pytest.mark.parametrize('bitorder', ('little', 'big')) + @pytest.mark.parametrize('count', chain(range(58), range(-1, -57, -1))) + def test_roundtrip(self, bitorder, count): + if count < 0: + # one extra zero of padding + cutoff = count - 1 + else: + cutoff = count + # test complete invertibility of packbits and unpackbits with count + packed = np.packbits(self.x, bitorder=bitorder) + unpacked = np.unpackbits(packed, count=count, bitorder=bitorder) + assert_equal(unpacked.dtype, np.uint8) + assert_array_equal(unpacked, self.padded1[:cutoff]) + + @pytest.mark.parametrize('kwargs', [ + {}, {'count': None}, + ]) + def test_count(self, kwargs): + packed = np.packbits(self.x) + unpacked = np.unpackbits(packed, **kwargs) + assert_equal(unpacked.dtype, np.uint8) + assert_array_equal(unpacked, self.padded1[:-1]) + + @pytest.mark.parametrize('bitorder', ('little', 'big')) + # delta==-1 when count<0 because one extra zero of padding + @pytest.mark.parametrize('count', chain(range(8), range(-1, -9, -1))) + def test_roundtrip_axis(self, bitorder, count): + if count < 0: + # one extra zero of padding + cutoff = count - 1 + else: + cutoff = count + packed0 = np.packbits(self.x, axis=0, bitorder=bitorder) + unpacked0 = np.unpackbits(packed0, axis=0, count=count, + bitorder=bitorder) + assert_equal(unpacked0.dtype, np.uint8) + assert_array_equal(unpacked0, self.padded2[:cutoff, :self.x.shape[1]]) + + packed1 = np.packbits(self.x, axis=1, bitorder=bitorder) + unpacked1 = np.unpackbits(packed1, axis=1, count=count, + bitorder=bitorder) + assert_equal(unpacked1.dtype, np.uint8) + assert_array_equal(unpacked1, self.padded2[:self.x.shape[0], :cutoff]) + + @pytest.mark.parametrize('kwargs', [ + {}, {'count': None}, + {'bitorder': 'little'}, + {'bitorder': 'little', 'count': None}, + {'bitorder': 'big'}, + {'bitorder': 'big', 'count': None}, + ]) + def test_axis_count(self, kwargs): + packed0 = np.packbits(self.x, axis=0) + unpacked0 = np.unpackbits(packed0, axis=0, **kwargs) + assert_equal(unpacked0.dtype, np.uint8) + if kwargs.get('bitorder', 'big') == 'big': + assert_array_equal(unpacked0, self.padded2[:-1, :self.x.shape[1]]) + else: + assert_array_equal(unpacked0[::-1, :], self.padded2[:-1, :self.x.shape[1]]) + + packed1 = np.packbits(self.x, axis=1) + unpacked1 = np.unpackbits(packed1, axis=1, **kwargs) + assert_equal(unpacked1.dtype, np.uint8) + if kwargs.get('bitorder', 'big') == 'big': + assert_array_equal(unpacked1, self.padded2[:self.x.shape[0], :-1]) + else: + assert_array_equal(unpacked1[:, ::-1], self.padded2[:self.x.shape[0], :-1]) + + def test_bad_count(self): + packed0 = np.packbits(self.x, axis=0) + assert_raises(ValueError, np.unpackbits, packed0, axis=0, count=-9) + packed1 = np.packbits(self.x, axis=1) + assert_raises(ValueError, np.unpackbits, packed1, axis=1, count=-9) + packed = np.packbits(self.x) + assert_raises(ValueError, np.unpackbits, packed, count=-57) diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_polynomial.py b/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_polynomial.py new file mode 100644 index 0000000..c173ac3 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_polynomial.py @@ -0,0 +1,320 @@ +import pytest + +import numpy as np +import numpy.polynomial.polynomial as poly +from numpy.testing import ( + assert_, + assert_allclose, + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, + assert_equal, + assert_raises, +) + +# `poly1d` has some support for `np.bool` and `np.timedelta64`, +# but it is limited and they are therefore excluded here +TYPE_CODES = np.typecodes["AllInteger"] + np.typecodes["AllFloat"] + "O" + + +class TestPolynomial: + def test_poly1d_str_and_repr(self): + p = np.poly1d([1., 2, 3]) + assert_equal(repr(p), 'poly1d([1., 2., 3.])') + assert_equal(str(p), + ' 2\n' + '1 x + 2 x + 3') + + q = np.poly1d([3., 2, 1]) + assert_equal(repr(q), 'poly1d([3., 2., 1.])') + assert_equal(str(q), + ' 2\n' + '3 x + 2 x + 1') + + r = np.poly1d([1.89999 + 2j, -3j, -5.12345678, 2 + 1j]) + assert_equal(str(r), + ' 3 2\n' + '(1.9 + 2j) x - 3j x - 5.123 x + (2 + 1j)') + + assert_equal(str(np.poly1d([-3, -2, -1])), + ' 2\n' + '-3 x - 2 x - 1') + + def test_poly1d_resolution(self): + p = np.poly1d([1., 2, 3]) + q = np.poly1d([3., 2, 1]) + assert_equal(p(0), 3.0) + assert_equal(p(5), 38.0) + assert_equal(q(0), 1.0) + assert_equal(q(5), 86.0) + + def test_poly1d_math(self): + # here we use some simple coeffs to make calculations easier + p = np.poly1d([1., 2, 4]) + q = np.poly1d([4., 2, 1]) + assert_equal(p / q, (np.poly1d([0.25]), np.poly1d([1.5, 3.75]))) + assert_equal(p.integ(), np.poly1d([1 / 3, 1., 4., 0.])) + assert_equal(p.integ(1), np.poly1d([1 / 3, 1., 4., 0.])) + + p = np.poly1d([1., 2, 3]) + q = np.poly1d([3., 2, 1]) + assert_equal(p * q, np.poly1d([3., 8., 14., 8., 3.])) + assert_equal(p + q, np.poly1d([4., 4., 4.])) + assert_equal(p - q, np.poly1d([-2., 0., 2.])) + assert_equal(p ** 4, np.poly1d([1., 8., 36., 104., 214., 312., 324., 216., 81.])) + assert_equal(p(q), np.poly1d([9., 12., 16., 8., 6.])) + assert_equal(q(p), np.poly1d([3., 12., 32., 40., 34.])) + assert_equal(p.deriv(), np.poly1d([2., 2.])) + assert_equal(p.deriv(2), np.poly1d([2.])) + assert_equal(np.polydiv(np.poly1d([1, 0, -1]), np.poly1d([1, 1])), + (np.poly1d([1., -1.]), np.poly1d([0.]))) + + @pytest.mark.parametrize("type_code", TYPE_CODES) + def test_poly1d_misc(self, type_code: str) -> None: + dtype = np.dtype(type_code) + ar = np.array([1, 2, 3], dtype=dtype) + p = np.poly1d(ar) + + # `__eq__` + assert_equal(np.asarray(p), ar) + assert_equal(np.asarray(p).dtype, dtype) + assert_equal(len(p), 2) + + # `__getitem__` + comparison_dct = {-1: 0, 0: 3, 1: 2, 2: 1, 3: 0} + for index, ref in comparison_dct.items(): + scalar = p[index] + assert_equal(scalar, ref) + if dtype == np.object_: + assert isinstance(scalar, int) + else: + assert_equal(scalar.dtype, dtype) + + def test_poly1d_variable_arg(self): + q = np.poly1d([1., 2, 3], variable='y') + assert_equal(str(q), + ' 2\n' + '1 y + 2 y + 3') + q = np.poly1d([1., 2, 3], variable='lambda') + assert_equal(str(q), + ' 2\n' + '1 lambda + 2 lambda + 3') + + def test_poly(self): + assert_array_almost_equal(np.poly([3, -np.sqrt(2), np.sqrt(2)]), + [1, -3, -2, 6]) + + # From matlab docs + A = [[1, 2, 3], [4, 5, 6], [7, 8, 0]] + assert_array_almost_equal(np.poly(A), [1, -6, -72, -27]) + + # Should produce real output for perfect conjugates + assert_(np.isrealobj(np.poly([+1.082j, +2.613j, -2.613j, -1.082j]))) + assert_(np.isrealobj(np.poly([0 + 1j, -0 + -1j, 1 + 2j, + 1 - 2j, 1. + 3.5j, 1 - 3.5j]))) + assert_(np.isrealobj(np.poly([1j, -1j, 1 + 2j, 1 - 2j, 1 + 3j, 1 - 3.j]))) + assert_(np.isrealobj(np.poly([1j, -1j, 1 + 2j, 1 - 2j]))) + assert_(np.isrealobj(np.poly([1j, -1j, 2j, -2j]))) + assert_(np.isrealobj(np.poly([1j, -1j]))) + assert_(np.isrealobj(np.poly([1, -1]))) + + assert_(np.iscomplexobj(np.poly([1j, -1.0000001j]))) + + np.random.seed(42) + a = np.random.randn(100) + 1j * np.random.randn(100) + assert_(np.isrealobj(np.poly(np.concatenate((a, np.conjugate(a)))))) + + def test_roots(self): + assert_array_equal(np.roots([1, 0, 0]), [0, 0]) + + # Testing for larger root values + for i in np.logspace(10, 25, num=1000, base=10): + tgt = np.array([-1, 1, i]) + res = np.sort(np.roots(poly.polyfromroots(tgt)[::-1])) + assert_almost_equal(res, tgt, 14 - int(np.log10(i))) # Adapting the expected precision according to the root value, to take into account numerical calculation error + + for i in np.logspace(10, 25, num=1000, base=10): + tgt = np.array([-1, 1.01, i]) + res = np.sort(np.roots(poly.polyfromroots(tgt)[::-1])) + assert_almost_equal(res, tgt, 14 - int(np.log10(i))) # Adapting the expected precision according to the root value, to take into account numerical calculation error + + def test_str_leading_zeros(self): + p = np.poly1d([4, 3, 2, 1]) + p[3] = 0 + assert_equal(str(p), + " 2\n" + "3 x + 2 x + 1") + + p = np.poly1d([1, 2]) + p[0] = 0 + p[1] = 0 + assert_equal(str(p), " \n0") + + def test_polyfit(self): + c = np.array([3., 2., 1.]) + x = np.linspace(0, 2, 7) + y = np.polyval(c, x) + err = [1, -1, 1, -1, 1, -1, 1] + weights = np.arange(8, 1, -1)**2 / 7.0 + + # Check exception when too few points for variance estimate. Note that + # the estimate requires the number of data points to exceed + # degree + 1 + assert_raises(ValueError, np.polyfit, + [1], [1], deg=0, cov=True) + + # check 1D case + m, cov = np.polyfit(x, y + err, 2, cov=True) + est = [3.8571, 0.2857, 1.619] + assert_almost_equal(est, m, decimal=4) + val0 = [[ 1.4694, -2.9388, 0.8163], + [-2.9388, 6.3673, -2.1224], + [ 0.8163, -2.1224, 1.161 ]] # noqa: E202 + assert_almost_equal(val0, cov, decimal=4) + + m2, cov2 = np.polyfit(x, y + err, 2, w=weights, cov=True) + assert_almost_equal([4.8927, -1.0177, 1.7768], m2, decimal=4) + val = [[ 4.3964, -5.0052, 0.4878], + [-5.0052, 6.8067, -0.9089], + [ 0.4878, -0.9089, 0.3337]] + assert_almost_equal(val, cov2, decimal=4) + + m3, cov3 = np.polyfit(x, y + err, 2, w=weights, cov="unscaled") + assert_almost_equal([4.8927, -1.0177, 1.7768], m3, decimal=4) + val = [[ 0.1473, -0.1677, 0.0163], + [-0.1677, 0.228 , -0.0304], # noqa: E203 + [ 0.0163, -0.0304, 0.0112]] + assert_almost_equal(val, cov3, decimal=4) + + # check 2D (n,1) case + y = y[:, np.newaxis] + c = c[:, np.newaxis] + assert_almost_equal(c, np.polyfit(x, y, 2)) + # check 2D (n,2) case + yy = np.concatenate((y, y), axis=1) + cc = np.concatenate((c, c), axis=1) + assert_almost_equal(cc, np.polyfit(x, yy, 2)) + + m, cov = np.polyfit(x, yy + np.array(err)[:, np.newaxis], 2, cov=True) + assert_almost_equal(est, m[:, 0], decimal=4) + assert_almost_equal(est, m[:, 1], decimal=4) + assert_almost_equal(val0, cov[:, :, 0], decimal=4) + assert_almost_equal(val0, cov[:, :, 1], decimal=4) + + # check order 1 (deg=0) case, were the analytic results are simple + np.random.seed(123) + y = np.random.normal(size=(4, 10000)) + mean, cov = np.polyfit(np.zeros(y.shape[0]), y, deg=0, cov=True) + # Should get sigma_mean = sigma/sqrt(N) = 1./sqrt(4) = 0.5. + assert_allclose(mean.std(), 0.5, atol=0.01) + assert_allclose(np.sqrt(cov.mean()), 0.5, atol=0.01) + # Without scaling, since reduced chi2 is 1, the result should be the same. + mean, cov = np.polyfit(np.zeros(y.shape[0]), y, w=np.ones(y.shape[0]), + deg=0, cov="unscaled") + assert_allclose(mean.std(), 0.5, atol=0.01) + assert_almost_equal(np.sqrt(cov.mean()), 0.5) + # If we estimate our errors wrong, no change with scaling: + w = np.full(y.shape[0], 1. / 0.5) + mean, cov = np.polyfit(np.zeros(y.shape[0]), y, w=w, deg=0, cov=True) + assert_allclose(mean.std(), 0.5, atol=0.01) + assert_allclose(np.sqrt(cov.mean()), 0.5, atol=0.01) + # But if we do not scale, our estimate for the error in the mean will + # differ. + mean, cov = np.polyfit(np.zeros(y.shape[0]), y, w=w, deg=0, cov="unscaled") + assert_allclose(mean.std(), 0.5, atol=0.01) + assert_almost_equal(np.sqrt(cov.mean()), 0.25) + + def test_objects(self): + from decimal import Decimal + p = np.poly1d([Decimal('4.0'), Decimal('3.0'), Decimal('2.0')]) + p2 = p * Decimal('1.333333333333333') + assert_(p2[1] == Decimal("3.9999999999999990")) + p2 = p.deriv() + assert_(p2[1] == Decimal('8.0')) + p2 = p.integ() + assert_(p2[3] == Decimal("1.333333333333333333333333333")) + assert_(p2[2] == Decimal('1.5')) + assert_(np.issubdtype(p2.coeffs.dtype, np.object_)) + p = np.poly([Decimal(1), Decimal(2)]) + assert_equal(np.poly([Decimal(1), Decimal(2)]), + [1, Decimal(-3), Decimal(2)]) + + def test_complex(self): + p = np.poly1d([3j, 2j, 1j]) + p2 = p.integ() + assert_((p2.coeffs == [1j, 1j, 1j, 0]).all()) + p2 = p.deriv() + assert_((p2.coeffs == [6j, 2j]).all()) + + def test_integ_coeffs(self): + p = np.poly1d([3, 2, 1]) + p2 = p.integ(3, k=[9, 7, 6]) + assert_( + (p2.coeffs == [1 / 4. / 5., 1 / 3. / 4., 1 / 2. / 3., 9 / 1. / 2., 7, 6]).all()) + + def test_zero_dims(self): + try: + np.poly(np.zeros((0, 0))) + except ValueError: + pass + + def test_poly_int_overflow(self): + """ + Regression test for gh-5096. + """ + v = np.arange(1, 21) + assert_almost_equal(np.poly(v), np.poly(np.diag(v))) + + def test_zero_poly_dtype(self): + """ + Regression test for gh-16354. + """ + z = np.array([0, 0, 0]) + p = np.poly1d(z.astype(np.int64)) + assert_equal(p.coeffs.dtype, np.int64) + + p = np.poly1d(z.astype(np.float32)) + assert_equal(p.coeffs.dtype, np.float32) + + p = np.poly1d(z.astype(np.complex64)) + assert_equal(p.coeffs.dtype, np.complex64) + + def test_poly_eq(self): + p = np.poly1d([1, 2, 3]) + p2 = np.poly1d([1, 2, 4]) + assert_equal(p == None, False) # noqa: E711 + assert_equal(p != None, True) # noqa: E711 + assert_equal(p == p, True) + assert_equal(p == p2, False) + assert_equal(p != p2, True) + + def test_polydiv(self): + b = np.poly1d([2, 6, 6, 1]) + a = np.poly1d([-1j, (1 + 2j), -(2 + 1j), 1]) + q, r = np.polydiv(b, a) + assert_equal(q.coeffs.dtype, np.complex128) + assert_equal(r.coeffs.dtype, np.complex128) + assert_equal(q * a + r, b) + + c = [1, 2, 3] + d = np.poly1d([1, 2, 3]) + s, t = np.polydiv(c, d) + assert isinstance(s, np.poly1d) + assert isinstance(t, np.poly1d) + u, v = np.polydiv(d, c) + assert isinstance(u, np.poly1d) + assert isinstance(v, np.poly1d) + + def test_poly_coeffs_mutable(self): + """ Coefficients should be modifiable """ + p = np.poly1d([1, 2, 3]) + + p.coeffs += 1 + assert_equal(p.coeffs, [2, 3, 4]) + + p.coeffs[2] += 10 + assert_equal(p.coeffs, [2, 3, 14]) + + # this never used to be allowed - let's not add features to deprecated + # APIs + assert_raises(AttributeError, setattr, p, 'coeffs', np.array(1)) diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_recfunctions.py b/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_recfunctions.py new file mode 100644 index 0000000..eee1f47 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_recfunctions.py @@ -0,0 +1,1052 @@ + +import numpy as np +import numpy.ma as ma +from numpy.lib.recfunctions import ( + append_fields, + apply_along_fields, + assign_fields_by_name, + drop_fields, + find_duplicates, + get_fieldstructure, + join_by, + merge_arrays, + recursive_fill_fields, + rename_fields, + repack_fields, + require_fields, + stack_arrays, + structured_to_unstructured, + unstructured_to_structured, +) +from numpy.ma.mrecords import MaskedRecords +from numpy.ma.testutils import assert_equal +from numpy.testing import assert_, assert_raises + +get_fieldspec = np.lib.recfunctions._get_fieldspec +get_names = np.lib.recfunctions.get_names +get_names_flat = np.lib.recfunctions.get_names_flat +zip_descr = np.lib.recfunctions._zip_descr +zip_dtype = np.lib.recfunctions._zip_dtype + + +class TestRecFunctions: + # Misc tests + + def setup_method(self): + x = np.array([1, 2, ]) + y = np.array([10, 20, 30]) + z = np.array([('A', 1.), ('B', 2.)], + dtype=[('A', '|S3'), ('B', float)]) + w = np.array([(1, (2, 3.0)), (4, (5, 6.0))], + dtype=[('a', int), ('b', [('ba', float), ('bb', int)])]) + self.data = (w, x, y, z) + + def test_zip_descr(self): + # Test zip_descr + (w, x, y, z) = self.data + + # Std array + test = zip_descr((x, x), flatten=True) + assert_equal(test, + np.dtype([('', int), ('', int)])) + test = zip_descr((x, x), flatten=False) + assert_equal(test, + np.dtype([('', int), ('', int)])) + + # Std & flexible-dtype + test = zip_descr((x, z), flatten=True) + assert_equal(test, + np.dtype([('', int), ('A', '|S3'), ('B', float)])) + test = zip_descr((x, z), flatten=False) + assert_equal(test, + np.dtype([('', int), + ('', [('A', '|S3'), ('B', float)])])) + + # Standard & nested dtype + test = zip_descr((x, w), flatten=True) + assert_equal(test, + np.dtype([('', int), + ('a', int), + ('ba', float), ('bb', int)])) + test = zip_descr((x, w), flatten=False) + assert_equal(test, + np.dtype([('', int), + ('', [('a', int), + ('b', [('ba', float), ('bb', int)])])])) + + def test_drop_fields(self): + # Test drop_fields + a = np.array([(1, (2, 3.0)), (4, (5, 6.0))], + dtype=[('a', int), ('b', [('ba', float), ('bb', int)])]) + + # A basic field + test = drop_fields(a, 'a') + control = np.array([((2, 3.0),), ((5, 6.0),)], + dtype=[('b', [('ba', float), ('bb', int)])]) + assert_equal(test, control) + + # Another basic field (but nesting two fields) + test = drop_fields(a, 'b') + control = np.array([(1,), (4,)], dtype=[('a', int)]) + assert_equal(test, control) + + # A nested sub-field + test = drop_fields(a, ['ba', ]) + control = np.array([(1, (3.0,)), (4, (6.0,))], + dtype=[('a', int), ('b', [('bb', int)])]) + assert_equal(test, control) + + # All the nested sub-field from a field: zap that field + test = drop_fields(a, ['ba', 'bb']) + control = np.array([(1,), (4,)], dtype=[('a', int)]) + assert_equal(test, control) + + # dropping all fields results in an array with no fields + test = drop_fields(a, ['a', 'b']) + control = np.array([(), ()], dtype=[]) + assert_equal(test, control) + + def test_rename_fields(self): + # Test rename fields + a = np.array([(1, (2, [3.0, 30.])), (4, (5, [6.0, 60.]))], + dtype=[('a', int), + ('b', [('ba', float), ('bb', (float, 2))])]) + test = rename_fields(a, {'a': 'A', 'bb': 'BB'}) + newdtype = [('A', int), ('b', [('ba', float), ('BB', (float, 2))])] + control = a.view(newdtype) + assert_equal(test.dtype, newdtype) + assert_equal(test, control) + + def test_get_names(self): + # Test get_names + ndtype = np.dtype([('A', '|S3'), ('B', float)]) + test = get_names(ndtype) + assert_equal(test, ('A', 'B')) + + ndtype = np.dtype([('a', int), ('b', [('ba', float), ('bb', int)])]) + test = get_names(ndtype) + assert_equal(test, ('a', ('b', ('ba', 'bb')))) + + ndtype = np.dtype([('a', int), ('b', [])]) + test = get_names(ndtype) + assert_equal(test, ('a', ('b', ()))) + + ndtype = np.dtype([]) + test = get_names(ndtype) + assert_equal(test, ()) + + def test_get_names_flat(self): + # Test get_names_flat + ndtype = np.dtype([('A', '|S3'), ('B', float)]) + test = get_names_flat(ndtype) + assert_equal(test, ('A', 'B')) + + ndtype = np.dtype([('a', int), ('b', [('ba', float), ('bb', int)])]) + test = get_names_flat(ndtype) + assert_equal(test, ('a', 'b', 'ba', 'bb')) + + ndtype = np.dtype([('a', int), ('b', [])]) + test = get_names_flat(ndtype) + assert_equal(test, ('a', 'b')) + + ndtype = np.dtype([]) + test = get_names_flat(ndtype) + assert_equal(test, ()) + + def test_get_fieldstructure(self): + # Test get_fieldstructure + + # No nested fields + ndtype = np.dtype([('A', '|S3'), ('B', float)]) + test = get_fieldstructure(ndtype) + assert_equal(test, {'A': [], 'B': []}) + + # One 1-nested field + ndtype = np.dtype([('A', int), ('B', [('BA', float), ('BB', '|S1')])]) + test = get_fieldstructure(ndtype) + assert_equal(test, {'A': [], 'B': [], 'BA': ['B', ], 'BB': ['B']}) + + # One 2-nested fields + ndtype = np.dtype([('A', int), + ('B', [('BA', int), + ('BB', [('BBA', int), ('BBB', int)])])]) + test = get_fieldstructure(ndtype) + control = {'A': [], 'B': [], 'BA': ['B'], 'BB': ['B'], + 'BBA': ['B', 'BB'], 'BBB': ['B', 'BB']} + assert_equal(test, control) + + # 0 fields + ndtype = np.dtype([]) + test = get_fieldstructure(ndtype) + assert_equal(test, {}) + + def test_find_duplicates(self): + # Test find_duplicates + a = ma.array([(2, (2., 'B')), (1, (2., 'B')), (2, (2., 'B')), + (1, (1., 'B')), (2, (2., 'B')), (2, (2., 'C'))], + mask=[(0, (0, 0)), (0, (0, 0)), (0, (0, 0)), + (0, (0, 0)), (1, (0, 0)), (0, (1, 0))], + dtype=[('A', int), ('B', [('BA', float), ('BB', '|S1')])]) + + test = find_duplicates(a, ignoremask=False, return_index=True) + control = [0, 2] + assert_equal(sorted(test[-1]), control) + assert_equal(test[0], a[test[-1]]) + + test = find_duplicates(a, key='A', return_index=True) + control = [0, 1, 2, 3, 5] + assert_equal(sorted(test[-1]), control) + assert_equal(test[0], a[test[-1]]) + + test = find_duplicates(a, key='B', return_index=True) + control = [0, 1, 2, 4] + assert_equal(sorted(test[-1]), control) + assert_equal(test[0], a[test[-1]]) + + test = find_duplicates(a, key='BA', return_index=True) + control = [0, 1, 2, 4] + assert_equal(sorted(test[-1]), control) + assert_equal(test[0], a[test[-1]]) + + test = find_duplicates(a, key='BB', return_index=True) + control = [0, 1, 2, 3, 4] + assert_equal(sorted(test[-1]), control) + assert_equal(test[0], a[test[-1]]) + + def test_find_duplicates_ignoremask(self): + # Test the ignoremask option of find_duplicates + ndtype = [('a', int)] + a = ma.array([1, 1, 1, 2, 2, 3, 3], + mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype) + test = find_duplicates(a, ignoremask=True, return_index=True) + control = [0, 1, 3, 4] + assert_equal(sorted(test[-1]), control) + assert_equal(test[0], a[test[-1]]) + + test = find_duplicates(a, ignoremask=False, return_index=True) + control = [0, 1, 2, 3, 4, 6] + assert_equal(sorted(test[-1]), control) + assert_equal(test[0], a[test[-1]]) + + def test_repack_fields(self): + dt = np.dtype('u1,f4,i8', align=True) + a = np.zeros(2, dtype=dt) + + assert_equal(repack_fields(dt), np.dtype('u1,f4,i8')) + assert_equal(repack_fields(a).itemsize, 13) + assert_equal(repack_fields(repack_fields(dt), align=True), dt) + + # make sure type is preserved + dt = np.dtype((np.record, dt)) + assert_(repack_fields(dt).type is np.record) + + def test_structured_to_unstructured(self, tmp_path): + a = np.zeros(4, dtype=[('a', 'i4'), ('b', 'f4,u2'), ('c', 'f4', 2)]) + out = structured_to_unstructured(a) + assert_equal(out, np.zeros((4, 5), dtype='f8')) + + b = np.array([(1, 2, 5), (4, 5, 7), (7, 8, 11), (10, 11, 12)], + dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')]) + out = np.mean(structured_to_unstructured(b[['x', 'z']]), axis=-1) + assert_equal(out, np.array([3., 5.5, 9., 11.])) + out = np.mean(structured_to_unstructured(b[['x']]), axis=-1) + assert_equal(out, np.array([1., 4. , 7., 10.])) # noqa: E203 + + c = np.arange(20).reshape((4, 5)) + out = unstructured_to_structured(c, a.dtype) + want = np.array([( 0, ( 1., 2), [ 3., 4.]), + ( 5, ( 6., 7), [ 8., 9.]), + (10, (11., 12), [13., 14.]), + (15, (16., 17), [18., 19.])], + dtype=[('a', 'i4'), + ('b', [('f0', 'f4'), ('f1', 'u2')]), + ('c', 'f4', (2,))]) + assert_equal(out, want) + + d = np.array([(1, 2, 5), (4, 5, 7), (7, 8, 11), (10, 11, 12)], + dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')]) + assert_equal(apply_along_fields(np.mean, d), + np.array([ 8.0 / 3, 16.0 / 3, 26.0 / 3, 11.])) + assert_equal(apply_along_fields(np.mean, d[['x', 'z']]), + np.array([ 3., 5.5, 9., 11.])) + + # check that for uniform field dtypes we get a view, not a copy: + d = np.array([(1, 2, 5), (4, 5, 7), (7, 8, 11), (10, 11, 12)], + dtype=[('x', 'i4'), ('y', 'i4'), ('z', 'i4')]) + dd = structured_to_unstructured(d) + ddd = unstructured_to_structured(dd, d.dtype) + assert_(np.shares_memory(dd, d)) + assert_(np.shares_memory(ddd, d)) + + # check that reversing the order of attributes works + dd_attrib_rev = structured_to_unstructured(d[['z', 'x']]) + assert_equal(dd_attrib_rev, [[5, 1], [7, 4], [11, 7], [12, 10]]) + assert_(np.shares_memory(dd_attrib_rev, d)) + + # including uniform fields with subarrays unpacked + d = np.array([(1, [2, 3], [[ 4, 5], [ 6, 7]]), + (8, [9, 10], [[11, 12], [13, 14]])], + dtype=[('x0', 'i4'), ('x1', ('i4', 2)), + ('x2', ('i4', (2, 2)))]) + dd = structured_to_unstructured(d) + ddd = unstructured_to_structured(dd, d.dtype) + assert_(np.shares_memory(dd, d)) + assert_(np.shares_memory(ddd, d)) + + # check that reversing with sub-arrays works as expected + d_rev = d[::-1] + dd_rev = structured_to_unstructured(d_rev) + assert_equal(dd_rev, [[8, 9, 10, 11, 12, 13, 14], + [1, 2, 3, 4, 5, 6, 7]]) + + # check that sub-arrays keep the order of their values + d_attrib_rev = d[['x2', 'x1', 'x0']] + dd_attrib_rev = structured_to_unstructured(d_attrib_rev) + assert_equal(dd_attrib_rev, [[4, 5, 6, 7, 2, 3, 1], + [11, 12, 13, 14, 9, 10, 8]]) + + # with ignored field at the end + d = np.array([(1, [2, 3], [[4, 5], [6, 7]], 32), + (8, [9, 10], [[11, 12], [13, 14]], 64)], + dtype=[('x0', 'i4'), ('x1', ('i4', 2)), + ('x2', ('i4', (2, 2))), ('ignored', 'u1')]) + dd = structured_to_unstructured(d[['x0', 'x1', 'x2']]) + assert_(np.shares_memory(dd, d)) + assert_equal(dd, [[1, 2, 3, 4, 5, 6, 7], + [8, 9, 10, 11, 12, 13, 14]]) + + # test that nested fields with identical names don't break anything + point = np.dtype([('x', int), ('y', int)]) + triangle = np.dtype([('a', point), ('b', point), ('c', point)]) + arr = np.zeros(10, triangle) + res = structured_to_unstructured(arr, dtype=int) + assert_equal(res, np.zeros((10, 6), dtype=int)) + + # test nested combinations of subarrays and structured arrays, gh-13333 + def subarray(dt, shape): + return np.dtype((dt, shape)) + + def structured(*dts): + return np.dtype([(f'x{i}', dt) for i, dt in enumerate(dts)]) + + def inspect(dt, dtype=None): + arr = np.zeros((), dt) + ret = structured_to_unstructured(arr, dtype=dtype) + backarr = unstructured_to_structured(ret, dt) + return ret.shape, ret.dtype, backarr.dtype + + dt = structured(subarray(structured(np.int32, np.int32), 3)) + assert_equal(inspect(dt), ((6,), np.int32, dt)) + + dt = structured(subarray(subarray(np.int32, 2), 2)) + assert_equal(inspect(dt), ((4,), np.int32, dt)) + + dt = structured(np.int32) + assert_equal(inspect(dt), ((1,), np.int32, dt)) + + dt = structured(np.int32, subarray(subarray(np.int32, 2), 2)) + assert_equal(inspect(dt), ((5,), np.int32, dt)) + + dt = structured() + assert_raises(ValueError, structured_to_unstructured, np.zeros(3, dt)) + + # these currently don't work, but we may make it work in the future + assert_raises(NotImplementedError, structured_to_unstructured, + np.zeros(3, dt), dtype=np.int32) + assert_raises(NotImplementedError, unstructured_to_structured, + np.zeros((3, 0), dtype=np.int32)) + + # test supported ndarray subclasses + d_plain = np.array([(1, 2), (3, 4)], dtype=[('a', 'i4'), ('b', 'i4')]) + dd_expected = structured_to_unstructured(d_plain, copy=True) + + # recarray + d = d_plain.view(np.recarray) + + dd = structured_to_unstructured(d, copy=False) + ddd = structured_to_unstructured(d, copy=True) + assert_(np.shares_memory(d, dd)) + assert_(type(dd) is np.recarray) + assert_(type(ddd) is np.recarray) + assert_equal(dd, dd_expected) + assert_equal(ddd, dd_expected) + + # memmap + d = np.memmap(tmp_path / 'memmap', + mode='w+', + dtype=d_plain.dtype, + shape=d_plain.shape) + d[:] = d_plain + dd = structured_to_unstructured(d, copy=False) + ddd = structured_to_unstructured(d, copy=True) + assert_(np.shares_memory(d, dd)) + assert_(type(dd) is np.memmap) + assert_(type(ddd) is np.memmap) + assert_equal(dd, dd_expected) + assert_equal(ddd, dd_expected) + + def test_unstructured_to_structured(self): + # test if dtype is the args of np.dtype + a = np.zeros((20, 2)) + test_dtype_args = [('x', float), ('y', float)] + test_dtype = np.dtype(test_dtype_args) + field1 = unstructured_to_structured(a, dtype=test_dtype_args) # now + field2 = unstructured_to_structured(a, dtype=test_dtype) # before + assert_equal(field1, field2) + + def test_field_assignment_by_name(self): + a = np.ones(2, dtype=[('a', 'i4'), ('b', 'f8'), ('c', 'u1')]) + newdt = [('b', 'f4'), ('c', 'u1')] + + assert_equal(require_fields(a, newdt), np.ones(2, newdt)) + + b = np.array([(1, 2), (3, 4)], dtype=newdt) + assign_fields_by_name(a, b, zero_unassigned=False) + assert_equal(a, np.array([(1, 1, 2), (1, 3, 4)], dtype=a.dtype)) + assign_fields_by_name(a, b) + assert_equal(a, np.array([(0, 1, 2), (0, 3, 4)], dtype=a.dtype)) + + # test nested fields + a = np.ones(2, dtype=[('a', [('b', 'f8'), ('c', 'u1')])]) + newdt = [('a', [('c', 'u1')])] + assert_equal(require_fields(a, newdt), np.ones(2, newdt)) + b = np.array([((2,),), ((3,),)], dtype=newdt) + assign_fields_by_name(a, b, zero_unassigned=False) + assert_equal(a, np.array([((1, 2),), ((1, 3),)], dtype=a.dtype)) + assign_fields_by_name(a, b) + assert_equal(a, np.array([((0, 2),), ((0, 3),)], dtype=a.dtype)) + + # test unstructured code path for 0d arrays + a, b = np.array(3), np.array(0) + assign_fields_by_name(b, a) + assert_equal(b[()], 3) + + +class TestRecursiveFillFields: + # Test recursive_fill_fields. + def test_simple_flexible(self): + # Test recursive_fill_fields on flexible-array + a = np.array([(1, 10.), (2, 20.)], dtype=[('A', int), ('B', float)]) + b = np.zeros((3,), dtype=a.dtype) + test = recursive_fill_fields(a, b) + control = np.array([(1, 10.), (2, 20.), (0, 0.)], + dtype=[('A', int), ('B', float)]) + assert_equal(test, control) + + def test_masked_flexible(self): + # Test recursive_fill_fields on masked flexible-array + a = ma.array([(1, 10.), (2, 20.)], mask=[(0, 1), (1, 0)], + dtype=[('A', int), ('B', float)]) + b = ma.zeros((3,), dtype=a.dtype) + test = recursive_fill_fields(a, b) + control = ma.array([(1, 10.), (2, 20.), (0, 0.)], + mask=[(0, 1), (1, 0), (0, 0)], + dtype=[('A', int), ('B', float)]) + assert_equal(test, control) + + +class TestMergeArrays: + # Test merge_arrays + + def setup_method(self): + x = np.array([1, 2, ]) + y = np.array([10, 20, 30]) + z = np.array( + [('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)]) + w = np.array( + [(1, (2, 3.0, ())), (4, (5, 6.0, ()))], + dtype=[('a', int), ('b', [('ba', float), ('bb', int), ('bc', [])])]) + self.data = (w, x, y, z) + + def test_solo(self): + # Test merge_arrays on a single array. + (_, x, _, z) = self.data + + test = merge_arrays(x) + control = np.array([(1,), (2,)], dtype=[('f0', int)]) + assert_equal(test, control) + test = merge_arrays((x,)) + assert_equal(test, control) + + test = merge_arrays(z, flatten=False) + assert_equal(test, z) + test = merge_arrays(z, flatten=True) + assert_equal(test, z) + + def test_solo_w_flatten(self): + # Test merge_arrays on a single array w & w/o flattening + w = self.data[0] + test = merge_arrays(w, flatten=False) + assert_equal(test, w) + + test = merge_arrays(w, flatten=True) + control = np.array([(1, 2, 3.0), (4, 5, 6.0)], + dtype=[('a', int), ('ba', float), ('bb', int)]) + assert_equal(test, control) + + def test_standard(self): + # Test standard & standard + # Test merge arrays + (_, x, y, _) = self.data + test = merge_arrays((x, y), usemask=False) + control = np.array([(1, 10), (2, 20), (-1, 30)], + dtype=[('f0', int), ('f1', int)]) + assert_equal(test, control) + + test = merge_arrays((x, y), usemask=True) + control = ma.array([(1, 10), (2, 20), (-1, 30)], + mask=[(0, 0), (0, 0), (1, 0)], + dtype=[('f0', int), ('f1', int)]) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + + def test_flatten(self): + # Test standard & flexible + (_, x, _, z) = self.data + test = merge_arrays((x, z), flatten=True) + control = np.array([(1, 'A', 1.), (2, 'B', 2.)], + dtype=[('f0', int), ('A', '|S3'), ('B', float)]) + assert_equal(test, control) + + test = merge_arrays((x, z), flatten=False) + control = np.array([(1, ('A', 1.)), (2, ('B', 2.))], + dtype=[('f0', int), + ('f1', [('A', '|S3'), ('B', float)])]) + assert_equal(test, control) + + def test_flatten_wflexible(self): + # Test flatten standard & nested + (w, x, _, _) = self.data + test = merge_arrays((x, w), flatten=True) + control = np.array([(1, 1, 2, 3.0), (2, 4, 5, 6.0)], + dtype=[('f0', int), + ('a', int), ('ba', float), ('bb', int)]) + assert_equal(test, control) + + test = merge_arrays((x, w), flatten=False) + controldtype = [('f0', int), + ('f1', [('a', int), + ('b', [('ba', float), ('bb', int), ('bc', [])])])] + control = np.array([(1., (1, (2, 3.0, ()))), (2, (4, (5, 6.0, ())))], + dtype=controldtype) + assert_equal(test, control) + + def test_wmasked_arrays(self): + # Test merge_arrays masked arrays + (_, x, _, _) = self.data + mx = ma.array([1, 2, 3], mask=[1, 0, 0]) + test = merge_arrays((x, mx), usemask=True) + control = ma.array([(1, 1), (2, 2), (-1, 3)], + mask=[(0, 1), (0, 0), (1, 0)], + dtype=[('f0', int), ('f1', int)]) + assert_equal(test, control) + test = merge_arrays((x, mx), usemask=True, asrecarray=True) + assert_equal(test, control) + assert_(isinstance(test, MaskedRecords)) + + def test_w_singlefield(self): + # Test single field + test = merge_arrays((np.array([1, 2]).view([('a', int)]), + np.array([10., 20., 30.])),) + control = ma.array([(1, 10.), (2, 20.), (-1, 30.)], + mask=[(0, 0), (0, 0), (1, 0)], + dtype=[('a', int), ('f1', float)]) + assert_equal(test, control) + + def test_w_shorter_flex(self): + # Test merge_arrays w/ a shorter flexndarray. + z = self.data[-1] + + # Fixme, this test looks incomplete and broken + #test = merge_arrays((z, np.array([10, 20, 30]).view([('C', int)]))) + #control = np.array([('A', 1., 10), ('B', 2., 20), ('-1', -1, 20)], + # dtype=[('A', '|S3'), ('B', float), ('C', int)]) + #assert_equal(test, control) + + merge_arrays((z, np.array([10, 20, 30]).view([('C', int)]))) + np.array([('A', 1., 10), ('B', 2., 20), ('-1', -1, 20)], + dtype=[('A', '|S3'), ('B', float), ('C', int)]) + + def test_singlerecord(self): + (_, x, y, z) = self.data + test = merge_arrays((x[0], y[0], z[0]), usemask=False) + control = np.array([(1, 10, ('A', 1))], + dtype=[('f0', int), + ('f1', int), + ('f2', [('A', '|S3'), ('B', float)])]) + assert_equal(test, control) + + +class TestAppendFields: + # Test append_fields + + def setup_method(self): + x = np.array([1, 2, ]) + y = np.array([10, 20, 30]) + z = np.array( + [('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)]) + w = np.array([(1, (2, 3.0)), (4, (5, 6.0))], + dtype=[('a', int), ('b', [('ba', float), ('bb', int)])]) + self.data = (w, x, y, z) + + def test_append_single(self): + # Test simple case + (_, x, _, _) = self.data + test = append_fields(x, 'A', data=[10, 20, 30]) + control = ma.array([(1, 10), (2, 20), (-1, 30)], + mask=[(0, 0), (0, 0), (1, 0)], + dtype=[('f0', int), ('A', int)],) + assert_equal(test, control) + + def test_append_double(self): + # Test simple case + (_, x, _, _) = self.data + test = append_fields(x, ('A', 'B'), data=[[10, 20, 30], [100, 200]]) + control = ma.array([(1, 10, 100), (2, 20, 200), (-1, 30, -1)], + mask=[(0, 0, 0), (0, 0, 0), (1, 0, 1)], + dtype=[('f0', int), ('A', int), ('B', int)],) + assert_equal(test, control) + + def test_append_on_flex(self): + # Test append_fields on flexible type arrays + z = self.data[-1] + test = append_fields(z, 'C', data=[10, 20, 30]) + control = ma.array([('A', 1., 10), ('B', 2., 20), (-1, -1., 30)], + mask=[(0, 0, 0), (0, 0, 0), (1, 1, 0)], + dtype=[('A', '|S3'), ('B', float), ('C', int)],) + assert_equal(test, control) + + def test_append_on_nested(self): + # Test append_fields on nested fields + w = self.data[0] + test = append_fields(w, 'C', data=[10, 20, 30]) + control = ma.array([(1, (2, 3.0), 10), + (4, (5, 6.0), 20), + (-1, (-1, -1.), 30)], + mask=[( + 0, (0, 0), 0), (0, (0, 0), 0), (1, (1, 1), 0)], + dtype=[('a', int), + ('b', [('ba', float), ('bb', int)]), + ('C', int)],) + assert_equal(test, control) + + +class TestStackArrays: + # Test stack_arrays + def setup_method(self): + x = np.array([1, 2, ]) + y = np.array([10, 20, 30]) + z = np.array( + [('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)]) + w = np.array([(1, (2, 3.0)), (4, (5, 6.0))], + dtype=[('a', int), ('b', [('ba', float), ('bb', int)])]) + self.data = (w, x, y, z) + + def test_solo(self): + # Test stack_arrays on single arrays + (_, x, _, _) = self.data + test = stack_arrays((x,)) + assert_equal(test, x) + assert_(test is x) + + test = stack_arrays(x) + assert_equal(test, x) + assert_(test is x) + + def test_unnamed_fields(self): + # Tests combinations of arrays w/o named fields + (_, x, y, _) = self.data + + test = stack_arrays((x, x), usemask=False) + control = np.array([1, 2, 1, 2]) + assert_equal(test, control) + + test = stack_arrays((x, y), usemask=False) + control = np.array([1, 2, 10, 20, 30]) + assert_equal(test, control) + + test = stack_arrays((y, x), usemask=False) + control = np.array([10, 20, 30, 1, 2]) + assert_equal(test, control) + + def test_unnamed_and_named_fields(self): + # Test combination of arrays w/ & w/o named fields + (_, x, _, z) = self.data + + test = stack_arrays((x, z)) + control = ma.array([(1, -1, -1), (2, -1, -1), + (-1, 'A', 1), (-1, 'B', 2)], + mask=[(0, 1, 1), (0, 1, 1), + (1, 0, 0), (1, 0, 0)], + dtype=[('f0', int), ('A', '|S3'), ('B', float)]) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + + test = stack_arrays((z, x)) + control = ma.array([('A', 1, -1), ('B', 2, -1), + (-1, -1, 1), (-1, -1, 2), ], + mask=[(0, 0, 1), (0, 0, 1), + (1, 1, 0), (1, 1, 0)], + dtype=[('A', '|S3'), ('B', float), ('f2', int)]) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + + test = stack_arrays((z, z, x)) + control = ma.array([('A', 1, -1), ('B', 2, -1), + ('A', 1, -1), ('B', 2, -1), + (-1, -1, 1), (-1, -1, 2), ], + mask=[(0, 0, 1), (0, 0, 1), + (0, 0, 1), (0, 0, 1), + (1, 1, 0), (1, 1, 0)], + dtype=[('A', '|S3'), ('B', float), ('f2', int)]) + assert_equal(test, control) + + def test_matching_named_fields(self): + # Test combination of arrays w/ matching field names + (_, x, _, z) = self.data + zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)], + dtype=[('A', '|S3'), ('B', float), ('C', float)]) + test = stack_arrays((z, zz)) + control = ma.array([('A', 1, -1), ('B', 2, -1), + ( + 'a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)], + dtype=[('A', '|S3'), ('B', float), ('C', float)], + mask=[(0, 0, 1), (0, 0, 1), + (0, 0, 0), (0, 0, 0), (0, 0, 0)]) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + + test = stack_arrays((z, zz, x)) + ndtype = [('A', '|S3'), ('B', float), ('C', float), ('f3', int)] + control = ma.array([('A', 1, -1, -1), ('B', 2, -1, -1), + ('a', 10., 100., -1), ('b', 20., 200., -1), + ('c', 30., 300., -1), + (-1, -1, -1, 1), (-1, -1, -1, 2)], + dtype=ndtype, + mask=[(0, 0, 1, 1), (0, 0, 1, 1), + (0, 0, 0, 1), (0, 0, 0, 1), (0, 0, 0, 1), + (1, 1, 1, 0), (1, 1, 1, 0)]) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + + def test_defaults(self): + # Test defaults: no exception raised if keys of defaults are not fields. + (_, _, _, z) = self.data + zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)], + dtype=[('A', '|S3'), ('B', float), ('C', float)]) + defaults = {'A': '???', 'B': -999., 'C': -9999., 'D': -99999.} + test = stack_arrays((z, zz), defaults=defaults) + control = ma.array([('A', 1, -9999.), ('B', 2, -9999.), + ( + 'a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)], + dtype=[('A', '|S3'), ('B', float), ('C', float)], + mask=[(0, 0, 1), (0, 0, 1), + (0, 0, 0), (0, 0, 0), (0, 0, 0)]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + + def test_autoconversion(self): + # Tests autoconversion + adtype = [('A', int), ('B', bool), ('C', float)] + a = ma.array([(1, 2, 3)], mask=[(0, 1, 0)], dtype=adtype) + bdtype = [('A', int), ('B', float), ('C', float)] + b = ma.array([(4, 5, 6)], dtype=bdtype) + control = ma.array([(1, 2, 3), (4, 5, 6)], mask=[(0, 1, 0), (0, 0, 0)], + dtype=bdtype) + test = stack_arrays((a, b), autoconvert=True) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + with assert_raises(TypeError): + stack_arrays((a, b), autoconvert=False) + + def test_checktitles(self): + # Test using titles in the field names + adtype = [(('a', 'A'), int), (('b', 'B'), bool), (('c', 'C'), float)] + a = ma.array([(1, 2, 3)], mask=[(0, 1, 0)], dtype=adtype) + bdtype = [(('a', 'A'), int), (('b', 'B'), bool), (('c', 'C'), float)] + b = ma.array([(4, 5, 6)], dtype=bdtype) + test = stack_arrays((a, b)) + control = ma.array([(1, 2, 3), (4, 5, 6)], mask=[(0, 1, 0), (0, 0, 0)], + dtype=bdtype) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + + def test_subdtype(self): + z = np.array([ + ('A', 1), ('B', 2) + ], dtype=[('A', '|S3'), ('B', float, (1,))]) + zz = np.array([ + ('a', [10.], 100.), ('b', [20.], 200.), ('c', [30.], 300.) + ], dtype=[('A', '|S3'), ('B', float, (1,)), ('C', float)]) + + res = stack_arrays((z, zz)) + expected = ma.array( + data=[ + (b'A', [1.0], 0), + (b'B', [2.0], 0), + (b'a', [10.0], 100.0), + (b'b', [20.0], 200.0), + (b'c', [30.0], 300.0)], + mask=[ + (False, [False], True), + (False, [False], True), + (False, [False], False), + (False, [False], False), + (False, [False], False) + ], + dtype=zz.dtype + ) + assert_equal(res.dtype, expected.dtype) + assert_equal(res, expected) + assert_equal(res.mask, expected.mask) + + +class TestJoinBy: + def setup_method(self): + self.a = np.array(list(zip(np.arange(10), np.arange(50, 60), + np.arange(100, 110))), + dtype=[('a', int), ('b', int), ('c', int)]) + self.b = np.array(list(zip(np.arange(5, 15), np.arange(65, 75), + np.arange(100, 110))), + dtype=[('a', int), ('b', int), ('d', int)]) + + def test_inner_join(self): + # Basic test of join_by + a, b = self.a, self.b + + test = join_by('a', a, b, jointype='inner') + control = np.array([(5, 55, 65, 105, 100), (6, 56, 66, 106, 101), + (7, 57, 67, 107, 102), (8, 58, 68, 108, 103), + (9, 59, 69, 109, 104)], + dtype=[('a', int), ('b1', int), ('b2', int), + ('c', int), ('d', int)]) + assert_equal(test, control) + + def test_join(self): + a, b = self.a, self.b + + # Fixme, this test is broken + #test = join_by(('a', 'b'), a, b) + #control = np.array([(5, 55, 105, 100), (6, 56, 106, 101), + # (7, 57, 107, 102), (8, 58, 108, 103), + # (9, 59, 109, 104)], + # dtype=[('a', int), ('b', int), + # ('c', int), ('d', int)]) + #assert_equal(test, control) + + join_by(('a', 'b'), a, b) + np.array([(5, 55, 105, 100), (6, 56, 106, 101), + (7, 57, 107, 102), (8, 58, 108, 103), + (9, 59, 109, 104)], + dtype=[('a', int), ('b', int), + ('c', int), ('d', int)]) + + def test_join_subdtype(self): + # tests the bug in https://stackoverflow.com/q/44769632/102441 + foo = np.array([(1,)], + dtype=[('key', int)]) + bar = np.array([(1, np.array([1, 2, 3]))], + dtype=[('key', int), ('value', 'uint16', 3)]) + res = join_by('key', foo, bar) + assert_equal(res, bar.view(ma.MaskedArray)) + + def test_outer_join(self): + a, b = self.a, self.b + + test = join_by(('a', 'b'), a, b, 'outer') + control = ma.array([(0, 50, 100, -1), (1, 51, 101, -1), + (2, 52, 102, -1), (3, 53, 103, -1), + (4, 54, 104, -1), (5, 55, 105, -1), + (5, 65, -1, 100), (6, 56, 106, -1), + (6, 66, -1, 101), (7, 57, 107, -1), + (7, 67, -1, 102), (8, 58, 108, -1), + (8, 68, -1, 103), (9, 59, 109, -1), + (9, 69, -1, 104), (10, 70, -1, 105), + (11, 71, -1, 106), (12, 72, -1, 107), + (13, 73, -1, 108), (14, 74, -1, 109)], + mask=[(0, 0, 0, 1), (0, 0, 0, 1), + (0, 0, 0, 1), (0, 0, 0, 1), + (0, 0, 0, 1), (0, 0, 0, 1), + (0, 0, 1, 0), (0, 0, 0, 1), + (0, 0, 1, 0), (0, 0, 0, 1), + (0, 0, 1, 0), (0, 0, 0, 1), + (0, 0, 1, 0), (0, 0, 0, 1), + (0, 0, 1, 0), (0, 0, 1, 0), + (0, 0, 1, 0), (0, 0, 1, 0), + (0, 0, 1, 0), (0, 0, 1, 0)], + dtype=[('a', int), ('b', int), + ('c', int), ('d', int)]) + assert_equal(test, control) + + def test_leftouter_join(self): + a, b = self.a, self.b + + test = join_by(('a', 'b'), a, b, 'leftouter') + control = ma.array([(0, 50, 100, -1), (1, 51, 101, -1), + (2, 52, 102, -1), (3, 53, 103, -1), + (4, 54, 104, -1), (5, 55, 105, -1), + (6, 56, 106, -1), (7, 57, 107, -1), + (8, 58, 108, -1), (9, 59, 109, -1)], + mask=[(0, 0, 0, 1), (0, 0, 0, 1), + (0, 0, 0, 1), (0, 0, 0, 1), + (0, 0, 0, 1), (0, 0, 0, 1), + (0, 0, 0, 1), (0, 0, 0, 1), + (0, 0, 0, 1), (0, 0, 0, 1)], + dtype=[('a', int), ('b', int), ('c', int), ('d', int)]) + assert_equal(test, control) + + def test_different_field_order(self): + # gh-8940 + a = np.zeros(3, dtype=[('a', 'i4'), ('b', 'f4'), ('c', 'u1')]) + b = np.ones(3, dtype=[('c', 'u1'), ('b', 'f4'), ('a', 'i4')]) + # this should not give a FutureWarning: + j = join_by(['c', 'b'], a, b, jointype='inner', usemask=False) + assert_equal(j.dtype.names, ['b', 'c', 'a1', 'a2']) + + def test_duplicate_keys(self): + a = np.zeros(3, dtype=[('a', 'i4'), ('b', 'f4'), ('c', 'u1')]) + b = np.ones(3, dtype=[('c', 'u1'), ('b', 'f4'), ('a', 'i4')]) + assert_raises(ValueError, join_by, ['a', 'b', 'b'], a, b) + + def test_same_name_different_dtypes_key(self): + a_dtype = np.dtype([('key', 'S5'), ('value', ' 2**32 + + +def _add_keepdims(func): + """ hack in keepdims behavior into a function taking an axis """ + @functools.wraps(func) + def wrapped(a, axis, **kwargs): + res = func(a, axis=axis, **kwargs) + if axis is None: + axis = 0 # res is now a scalar, so we can insert this anywhere + return np.expand_dims(res, axis=axis) + return wrapped + + +class TestTakeAlongAxis: + def test_argequivalent(self): + """ Test it translates from arg to """ + from numpy.random import rand + a = rand(3, 4, 5) + + funcs = [ + (np.sort, np.argsort, {}), + (_add_keepdims(np.min), _add_keepdims(np.argmin), {}), + (_add_keepdims(np.max), _add_keepdims(np.argmax), {}), + #(np.partition, np.argpartition, dict(kth=2)), + ] + + for func, argfunc, kwargs in funcs: + for axis in list(range(a.ndim)) + [None]: + a_func = func(a, axis=axis, **kwargs) + ai_func = argfunc(a, axis=axis, **kwargs) + assert_equal(a_func, take_along_axis(a, ai_func, axis=axis)) + + def test_invalid(self): + """ Test it errors when indices has too few dimensions """ + a = np.ones((10, 10)) + ai = np.ones((10, 2), dtype=np.intp) + + # sanity check + take_along_axis(a, ai, axis=1) + + # not enough indices + assert_raises(ValueError, take_along_axis, a, np.array(1), axis=1) + # bool arrays not allowed + assert_raises(IndexError, take_along_axis, a, ai.astype(bool), axis=1) + # float arrays not allowed + assert_raises(IndexError, take_along_axis, a, ai.astype(float), axis=1) + # invalid axis + assert_raises(AxisError, take_along_axis, a, ai, axis=10) + # invalid indices + assert_raises(ValueError, take_along_axis, a, ai, axis=None) + + def test_empty(self): + """ Test everything is ok with empty results, even with inserted dims """ + a = np.ones((3, 4, 5)) + ai = np.ones((3, 0, 5), dtype=np.intp) + + actual = take_along_axis(a, ai, axis=1) + assert_equal(actual.shape, ai.shape) + + def test_broadcast(self): + """ Test that non-indexing dimensions are broadcast in both directions """ + a = np.ones((3, 4, 1)) + ai = np.ones((1, 2, 5), dtype=np.intp) + actual = take_along_axis(a, ai, axis=1) + assert_equal(actual.shape, (3, 2, 5)) + + +class TestPutAlongAxis: + def test_replace_max(self): + a_base = np.array([[10, 30, 20], [60, 40, 50]]) + + for axis in list(range(a_base.ndim)) + [None]: + # we mutate this in the loop + a = a_base.copy() + + # replace the max with a small value + i_max = _add_keepdims(np.argmax)(a, axis=axis) + put_along_axis(a, i_max, -99, axis=axis) + + # find the new minimum, which should max + i_min = _add_keepdims(np.argmin)(a, axis=axis) + + assert_equal(i_min, i_max) + + def test_broadcast(self): + """ Test that non-indexing dimensions are broadcast in both directions """ + a = np.ones((3, 4, 1)) + ai = np.arange(10, dtype=np.intp).reshape((1, 2, 5)) % 4 + put_along_axis(a, ai, 20, axis=1) + assert_equal(take_along_axis(a, ai, axis=1), 20) + + def test_invalid(self): + """ Test invalid inputs """ + a_base = np.array([[10, 30, 20], [60, 40, 50]]) + indices = np.array([[0], [1]]) + values = np.array([[2], [1]]) + + # sanity check + a = a_base.copy() + put_along_axis(a, indices, values, axis=0) + assert np.all(a == [[2, 2, 2], [1, 1, 1]]) + + # invalid indices + a = a_base.copy() + with assert_raises(ValueError) as exc: + put_along_axis(a, indices, values, axis=None) + assert "single dimension" in str(exc.exception) + + +class TestApplyAlongAxis: + def test_simple(self): + a = np.ones((20, 10), 'd') + assert_array_equal( + apply_along_axis(len, 0, a), len(a) * np.ones(a.shape[1])) + + def test_simple101(self): + a = np.ones((10, 101), 'd') + assert_array_equal( + apply_along_axis(len, 0, a), len(a) * np.ones(a.shape[1])) + + def test_3d(self): + a = np.arange(27).reshape((3, 3, 3)) + assert_array_equal(apply_along_axis(np.sum, 0, a), + [[27, 30, 33], [36, 39, 42], [45, 48, 51]]) + + def test_preserve_subclass(self): + def double(row): + return row * 2 + + class MyNDArray(np.ndarray): + pass + + m = np.array([[0, 1], [2, 3]]).view(MyNDArray) + expected = np.array([[0, 2], [4, 6]]).view(MyNDArray) + + result = apply_along_axis(double, 0, m) + assert_(isinstance(result, MyNDArray)) + assert_array_equal(result, expected) + + result = apply_along_axis(double, 1, m) + assert_(isinstance(result, MyNDArray)) + assert_array_equal(result, expected) + + def test_subclass(self): + class MinimalSubclass(np.ndarray): + data = 1 + + def minimal_function(array): + return array.data + + a = np.zeros((6, 3)).view(MinimalSubclass) + + assert_array_equal( + apply_along_axis(minimal_function, 0, a), np.array([1, 1, 1]) + ) + + def test_scalar_array(self, cls=np.ndarray): + a = np.ones((6, 3)).view(cls) + res = apply_along_axis(np.sum, 0, a) + assert_(isinstance(res, cls)) + assert_array_equal(res, np.array([6, 6, 6]).view(cls)) + + def test_0d_array(self, cls=np.ndarray): + def sum_to_0d(x): + """ Sum x, returning a 0d array of the same class """ + assert_equal(x.ndim, 1) + return np.squeeze(np.sum(x, keepdims=True)) + a = np.ones((6, 3)).view(cls) + res = apply_along_axis(sum_to_0d, 0, a) + assert_(isinstance(res, cls)) + assert_array_equal(res, np.array([6, 6, 6]).view(cls)) + + res = apply_along_axis(sum_to_0d, 1, a) + assert_(isinstance(res, cls)) + assert_array_equal(res, np.array([3, 3, 3, 3, 3, 3]).view(cls)) + + def test_axis_insertion(self, cls=np.ndarray): + def f1to2(x): + """produces an asymmetric non-square matrix from x""" + assert_equal(x.ndim, 1) + return (x[::-1] * x[1:, None]).view(cls) + + a2d = np.arange(6 * 3).reshape((6, 3)) + + # 2d insertion along first axis + actual = apply_along_axis(f1to2, 0, a2d) + expected = np.stack([ + f1to2(a2d[:, i]) for i in range(a2d.shape[1]) + ], axis=-1).view(cls) + assert_equal(type(actual), type(expected)) + assert_equal(actual, expected) + + # 2d insertion along last axis + actual = apply_along_axis(f1to2, 1, a2d) + expected = np.stack([ + f1to2(a2d[i, :]) for i in range(a2d.shape[0]) + ], axis=0).view(cls) + assert_equal(type(actual), type(expected)) + assert_equal(actual, expected) + + # 3d insertion along middle axis + a3d = np.arange(6 * 5 * 3).reshape((6, 5, 3)) + + actual = apply_along_axis(f1to2, 1, a3d) + expected = np.stack([ + np.stack([ + f1to2(a3d[i, :, j]) for i in range(a3d.shape[0]) + ], axis=0) + for j in range(a3d.shape[2]) + ], axis=-1).view(cls) + assert_equal(type(actual), type(expected)) + assert_equal(actual, expected) + + def test_subclass_preservation(self): + class MinimalSubclass(np.ndarray): + pass + self.test_scalar_array(MinimalSubclass) + self.test_0d_array(MinimalSubclass) + self.test_axis_insertion(MinimalSubclass) + + def test_axis_insertion_ma(self): + def f1to2(x): + """produces an asymmetric non-square matrix from x""" + assert_equal(x.ndim, 1) + res = x[::-1] * x[1:, None] + return np.ma.masked_where(res % 5 == 0, res) + a = np.arange(6 * 3).reshape((6, 3)) + res = apply_along_axis(f1to2, 0, a) + assert_(isinstance(res, np.ma.masked_array)) + assert_equal(res.ndim, 3) + assert_array_equal(res[:, :, 0].mask, f1to2(a[:, 0]).mask) + assert_array_equal(res[:, :, 1].mask, f1to2(a[:, 1]).mask) + assert_array_equal(res[:, :, 2].mask, f1to2(a[:, 2]).mask) + + def test_tuple_func1d(self): + def sample_1d(x): + return x[1], x[0] + res = np.apply_along_axis(sample_1d, 1, np.array([[1, 2], [3, 4]])) + assert_array_equal(res, np.array([[2, 1], [4, 3]])) + + def test_empty(self): + # can't apply_along_axis when there's no chance to call the function + def never_call(x): + assert_(False) # should never be reached + + a = np.empty((0, 0)) + assert_raises(ValueError, np.apply_along_axis, never_call, 0, a) + assert_raises(ValueError, np.apply_along_axis, never_call, 1, a) + + # but it's sometimes ok with some non-zero dimensions + def empty_to_1(x): + assert_(len(x) == 0) + return 1 + + a = np.empty((10, 0)) + actual = np.apply_along_axis(empty_to_1, 1, a) + assert_equal(actual, np.ones(10)) + assert_raises(ValueError, np.apply_along_axis, empty_to_1, 0, a) + + def test_with_iterable_object(self): + # from issue 5248 + d = np.array([ + [{1, 11}, {2, 22}, {3, 33}], + [{4, 44}, {5, 55}, {6, 66}] + ]) + actual = np.apply_along_axis(lambda a: set.union(*a), 0, d) + expected = np.array([{1, 11, 4, 44}, {2, 22, 5, 55}, {3, 33, 6, 66}]) + + assert_equal(actual, expected) + + # issue 8642 - assert_equal doesn't detect this! + for i in np.ndindex(actual.shape): + assert_equal(type(actual[i]), type(expected[i])) + + +class TestApplyOverAxes: + def test_simple(self): + a = np.arange(24).reshape(2, 3, 4) + aoa_a = apply_over_axes(np.sum, a, [0, 2]) + assert_array_equal(aoa_a, np.array([[[60], [92], [124]]])) + + +class TestExpandDims: + def test_functionality(self): + s = (2, 3, 4, 5) + a = np.empty(s) + for axis in range(-5, 4): + b = expand_dims(a, axis) + assert_(b.shape[axis] == 1) + assert_(np.squeeze(b).shape == s) + + def test_axis_tuple(self): + a = np.empty((3, 3, 3)) + assert np.expand_dims(a, axis=(0, 1, 2)).shape == (1, 1, 1, 3, 3, 3) + assert np.expand_dims(a, axis=(0, -1, -2)).shape == (1, 3, 3, 3, 1, 1) + assert np.expand_dims(a, axis=(0, 3, 5)).shape == (1, 3, 3, 1, 3, 1) + assert np.expand_dims(a, axis=(0, -3, -5)).shape == (1, 1, 3, 1, 3, 3) + + def test_axis_out_of_range(self): + s = (2, 3, 4, 5) + a = np.empty(s) + assert_raises(AxisError, expand_dims, a, -6) + assert_raises(AxisError, expand_dims, a, 5) + + a = np.empty((3, 3, 3)) + assert_raises(AxisError, expand_dims, a, (0, -6)) + assert_raises(AxisError, expand_dims, a, (0, 5)) + + def test_repeated_axis(self): + a = np.empty((3, 3, 3)) + assert_raises(ValueError, expand_dims, a, axis=(1, 1)) + + def test_subclasses(self): + a = np.arange(10).reshape((2, 5)) + a = np.ma.array(a, mask=a % 3 == 0) + + expanded = np.expand_dims(a, axis=1) + assert_(isinstance(expanded, np.ma.MaskedArray)) + assert_equal(expanded.shape, (2, 1, 5)) + assert_equal(expanded.mask.shape, (2, 1, 5)) + + +class TestArraySplit: + def test_integer_0_split(self): + a = np.arange(10) + assert_raises(ValueError, array_split, a, 0) + + def test_integer_split(self): + a = np.arange(10) + res = array_split(a, 1) + desired = [np.arange(10)] + compare_results(res, desired) + + res = array_split(a, 2) + desired = [np.arange(5), np.arange(5, 10)] + compare_results(res, desired) + + res = array_split(a, 3) + desired = [np.arange(4), np.arange(4, 7), np.arange(7, 10)] + compare_results(res, desired) + + res = array_split(a, 4) + desired = [np.arange(3), np.arange(3, 6), np.arange(6, 8), + np.arange(8, 10)] + compare_results(res, desired) + + res = array_split(a, 5) + desired = [np.arange(2), np.arange(2, 4), np.arange(4, 6), + np.arange(6, 8), np.arange(8, 10)] + compare_results(res, desired) + + res = array_split(a, 6) + desired = [np.arange(2), np.arange(2, 4), np.arange(4, 6), + np.arange(6, 8), np.arange(8, 9), np.arange(9, 10)] + compare_results(res, desired) + + res = array_split(a, 7) + desired = [np.arange(2), np.arange(2, 4), np.arange(4, 6), + np.arange(6, 7), np.arange(7, 8), np.arange(8, 9), + np.arange(9, 10)] + compare_results(res, desired) + + res = array_split(a, 8) + desired = [np.arange(2), np.arange(2, 4), np.arange(4, 5), + np.arange(5, 6), np.arange(6, 7), np.arange(7, 8), + np.arange(8, 9), np.arange(9, 10)] + compare_results(res, desired) + + res = array_split(a, 9) + desired = [np.arange(2), np.arange(2, 3), np.arange(3, 4), + np.arange(4, 5), np.arange(5, 6), np.arange(6, 7), + np.arange(7, 8), np.arange(8, 9), np.arange(9, 10)] + compare_results(res, desired) + + res = array_split(a, 10) + desired = [np.arange(1), np.arange(1, 2), np.arange(2, 3), + np.arange(3, 4), np.arange(4, 5), np.arange(5, 6), + np.arange(6, 7), np.arange(7, 8), np.arange(8, 9), + np.arange(9, 10)] + compare_results(res, desired) + + res = array_split(a, 11) + desired = [np.arange(1), np.arange(1, 2), np.arange(2, 3), + np.arange(3, 4), np.arange(4, 5), np.arange(5, 6), + np.arange(6, 7), np.arange(7, 8), np.arange(8, 9), + np.arange(9, 10), np.array([])] + compare_results(res, desired) + + def test_integer_split_2D_rows(self): + a = np.array([np.arange(10), np.arange(10)]) + res = array_split(a, 3, axis=0) + tgt = [np.array([np.arange(10)]), np.array([np.arange(10)]), + np.zeros((0, 10))] + compare_results(res, tgt) + assert_(a.dtype.type is res[-1].dtype.type) + + # Same thing for manual splits: + res = array_split(a, [0, 1], axis=0) + tgt = [np.zeros((0, 10)), np.array([np.arange(10)]), + np.array([np.arange(10)])] + compare_results(res, tgt) + assert_(a.dtype.type is res[-1].dtype.type) + + def test_integer_split_2D_cols(self): + a = np.array([np.arange(10), np.arange(10)]) + res = array_split(a, 3, axis=-1) + desired = [np.array([np.arange(4), np.arange(4)]), + np.array([np.arange(4, 7), np.arange(4, 7)]), + np.array([np.arange(7, 10), np.arange(7, 10)])] + compare_results(res, desired) + + def test_integer_split_2D_default(self): + """ This will fail if we change default axis + """ + a = np.array([np.arange(10), np.arange(10)]) + res = array_split(a, 3) + tgt = [np.array([np.arange(10)]), np.array([np.arange(10)]), + np.zeros((0, 10))] + compare_results(res, tgt) + assert_(a.dtype.type is res[-1].dtype.type) + # perhaps should check higher dimensions + + @pytest.mark.skipif(not IS_64BIT, reason="Needs 64bit platform") + def test_integer_split_2D_rows_greater_max_int32(self): + a = np.broadcast_to([0], (1 << 32, 2)) + res = array_split(a, 4) + chunk = np.broadcast_to([0], (1 << 30, 2)) + tgt = [chunk] * 4 + for i in range(len(tgt)): + assert_equal(res[i].shape, tgt[i].shape) + + def test_index_split_simple(self): + a = np.arange(10) + indices = [1, 5, 7] + res = array_split(a, indices, axis=-1) + desired = [np.arange(0, 1), np.arange(1, 5), np.arange(5, 7), + np.arange(7, 10)] + compare_results(res, desired) + + def test_index_split_low_bound(self): + a = np.arange(10) + indices = [0, 5, 7] + res = array_split(a, indices, axis=-1) + desired = [np.array([]), np.arange(0, 5), np.arange(5, 7), + np.arange(7, 10)] + compare_results(res, desired) + + def test_index_split_high_bound(self): + a = np.arange(10) + indices = [0, 5, 7, 10, 12] + res = array_split(a, indices, axis=-1) + desired = [np.array([]), np.arange(0, 5), np.arange(5, 7), + np.arange(7, 10), np.array([]), np.array([])] + compare_results(res, desired) + + +class TestSplit: + # The split function is essentially the same as array_split, + # except that it test if splitting will result in an + # equal split. Only test for this case. + + def test_equal_split(self): + a = np.arange(10) + res = split(a, 2) + desired = [np.arange(5), np.arange(5, 10)] + compare_results(res, desired) + + def test_unequal_split(self): + a = np.arange(10) + assert_raises(ValueError, split, a, 3) + + +class TestColumnStack: + def test_non_iterable(self): + assert_raises(TypeError, column_stack, 1) + + def test_1D_arrays(self): + # example from docstring + a = np.array((1, 2, 3)) + b = np.array((2, 3, 4)) + expected = np.array([[1, 2], + [2, 3], + [3, 4]]) + actual = np.column_stack((a, b)) + assert_equal(actual, expected) + + def test_2D_arrays(self): + # same as hstack 2D docstring example + a = np.array([[1], [2], [3]]) + b = np.array([[2], [3], [4]]) + expected = np.array([[1, 2], + [2, 3], + [3, 4]]) + actual = np.column_stack((a, b)) + assert_equal(actual, expected) + + def test_generator(self): + with pytest.raises(TypeError, match="arrays to stack must be"): + column_stack(np.arange(3) for _ in range(2)) + + +class TestDstack: + def test_non_iterable(self): + assert_raises(TypeError, dstack, 1) + + def test_0D_array(self): + a = np.array(1) + b = np.array(2) + res = dstack([a, b]) + desired = np.array([[[1, 2]]]) + assert_array_equal(res, desired) + + def test_1D_array(self): + a = np.array([1]) + b = np.array([2]) + res = dstack([a, b]) + desired = np.array([[[1, 2]]]) + assert_array_equal(res, desired) + + def test_2D_array(self): + a = np.array([[1], [2]]) + b = np.array([[1], [2]]) + res = dstack([a, b]) + desired = np.array([[[1, 1]], [[2, 2, ]]]) + assert_array_equal(res, desired) + + def test_2D_array2(self): + a = np.array([1, 2]) + b = np.array([1, 2]) + res = dstack([a, b]) + desired = np.array([[[1, 1], [2, 2]]]) + assert_array_equal(res, desired) + + def test_generator(self): + with pytest.raises(TypeError, match="arrays to stack must be"): + dstack(np.arange(3) for _ in range(2)) + + +# array_split has more comprehensive test of splitting. +# only do simple test on hsplit, vsplit, and dsplit +class TestHsplit: + """Only testing for integer splits. + + """ + def test_non_iterable(self): + assert_raises(ValueError, hsplit, 1, 1) + + def test_0D_array(self): + a = np.array(1) + try: + hsplit(a, 2) + assert_(0) + except ValueError: + pass + + def test_1D_array(self): + a = np.array([1, 2, 3, 4]) + res = hsplit(a, 2) + desired = [np.array([1, 2]), np.array([3, 4])] + compare_results(res, desired) + + def test_2D_array(self): + a = np.array([[1, 2, 3, 4], + [1, 2, 3, 4]]) + res = hsplit(a, 2) + desired = [np.array([[1, 2], [1, 2]]), np.array([[3, 4], [3, 4]])] + compare_results(res, desired) + + +class TestVsplit: + """Only testing for integer splits. + + """ + def test_non_iterable(self): + assert_raises(ValueError, vsplit, 1, 1) + + def test_0D_array(self): + a = np.array(1) + assert_raises(ValueError, vsplit, a, 2) + + def test_1D_array(self): + a = np.array([1, 2, 3, 4]) + try: + vsplit(a, 2) + assert_(0) + except ValueError: + pass + + def test_2D_array(self): + a = np.array([[1, 2, 3, 4], + [1, 2, 3, 4]]) + res = vsplit(a, 2) + desired = [np.array([[1, 2, 3, 4]]), np.array([[1, 2, 3, 4]])] + compare_results(res, desired) + + +class TestDsplit: + # Only testing for integer splits. + def test_non_iterable(self): + assert_raises(ValueError, dsplit, 1, 1) + + def test_0D_array(self): + a = np.array(1) + assert_raises(ValueError, dsplit, a, 2) + + def test_1D_array(self): + a = np.array([1, 2, 3, 4]) + assert_raises(ValueError, dsplit, a, 2) + + def test_2D_array(self): + a = np.array([[1, 2, 3, 4], + [1, 2, 3, 4]]) + try: + dsplit(a, 2) + assert_(0) + except ValueError: + pass + + def test_3D_array(self): + a = np.array([[[1, 2, 3, 4], + [1, 2, 3, 4]], + [[1, 2, 3, 4], + [1, 2, 3, 4]]]) + res = dsplit(a, 2) + desired = [np.array([[[1, 2], [1, 2]], [[1, 2], [1, 2]]]), + np.array([[[3, 4], [3, 4]], [[3, 4], [3, 4]]])] + compare_results(res, desired) + + +class TestSqueeze: + def test_basic(self): + from numpy.random import rand + + a = rand(20, 10, 10, 1, 1) + b = rand(20, 1, 10, 1, 20) + c = rand(1, 1, 20, 10) + assert_array_equal(np.squeeze(a), np.reshape(a, (20, 10, 10))) + assert_array_equal(np.squeeze(b), np.reshape(b, (20, 10, 20))) + assert_array_equal(np.squeeze(c), np.reshape(c, (20, 10))) + + # Squeezing to 0-dim should still give an ndarray + a = [[[1.5]]] + res = np.squeeze(a) + assert_equal(res, 1.5) + assert_equal(res.ndim, 0) + assert_equal(type(res), np.ndarray) + + +class TestKron: + def test_basic(self): + # Using 0-dimensional ndarray + a = np.array(1) + b = np.array([[1, 2], [3, 4]]) + k = np.array([[1, 2], [3, 4]]) + assert_array_equal(np.kron(a, b), k) + a = np.array([[1, 2], [3, 4]]) + b = np.array(1) + assert_array_equal(np.kron(a, b), k) + + # Using 1-dimensional ndarray + a = np.array([3]) + b = np.array([[1, 2], [3, 4]]) + k = np.array([[3, 6], [9, 12]]) + assert_array_equal(np.kron(a, b), k) + a = np.array([[1, 2], [3, 4]]) + b = np.array([3]) + assert_array_equal(np.kron(a, b), k) + + # Using 3-dimensional ndarray + a = np.array([[[1]], [[2]]]) + b = np.array([[1, 2], [3, 4]]) + k = np.array([[[1, 2], [3, 4]], [[2, 4], [6, 8]]]) + assert_array_equal(np.kron(a, b), k) + a = np.array([[1, 2], [3, 4]]) + b = np.array([[[1]], [[2]]]) + k = np.array([[[1, 2], [3, 4]], [[2, 4], [6, 8]]]) + assert_array_equal(np.kron(a, b), k) + + def test_return_type(self): + class myarray(np.ndarray): + __array_priority__ = 1.0 + + a = np.ones([2, 2]) + ma = myarray(a.shape, a.dtype, a.data) + assert_equal(type(kron(a, a)), np.ndarray) + assert_equal(type(kron(ma, ma)), myarray) + assert_equal(type(kron(a, ma)), myarray) + assert_equal(type(kron(ma, a)), myarray) + + @pytest.mark.parametrize( + "array_class", [np.asarray, np.asmatrix] + ) + def test_kron_smoke(self, array_class): + a = array_class(np.ones([3, 3])) + b = array_class(np.ones([3, 3])) + k = array_class(np.ones([9, 9])) + + assert_array_equal(np.kron(a, b), k) + + def test_kron_ma(self): + x = np.ma.array([[1, 2], [3, 4]], mask=[[0, 1], [1, 0]]) + k = np.ma.array(np.diag([1, 4, 4, 16]), + mask=~np.array(np.identity(4), dtype=bool)) + + assert_array_equal(k, np.kron(x, x)) + + @pytest.mark.parametrize( + "shape_a,shape_b", [ + ((1, 1), (1, 1)), + ((1, 2, 3), (4, 5, 6)), + ((2, 2), (2, 2, 2)), + ((1, 0), (1, 1)), + ((2, 0, 2), (2, 2)), + ((2, 0, 0, 2), (2, 0, 2)), + ]) + def test_kron_shape(self, shape_a, shape_b): + a = np.ones(shape_a) + b = np.ones(shape_b) + normalised_shape_a = (1,) * max(0, len(shape_b) - len(shape_a)) + shape_a + normalised_shape_b = (1,) * max(0, len(shape_a) - len(shape_b)) + shape_b + expected_shape = np.multiply(normalised_shape_a, normalised_shape_b) + + k = np.kron(a, b) + assert np.array_equal( + k.shape, expected_shape), "Unexpected shape from kron" + + +class TestTile: + def test_basic(self): + a = np.array([0, 1, 2]) + b = [[1, 2], [3, 4]] + assert_equal(tile(a, 2), [0, 1, 2, 0, 1, 2]) + assert_equal(tile(a, (2, 2)), [[0, 1, 2, 0, 1, 2], [0, 1, 2, 0, 1, 2]]) + assert_equal(tile(a, (1, 2)), [[0, 1, 2, 0, 1, 2]]) + assert_equal(tile(b, 2), [[1, 2, 1, 2], [3, 4, 3, 4]]) + assert_equal(tile(b, (2, 1)), [[1, 2], [3, 4], [1, 2], [3, 4]]) + assert_equal(tile(b, (2, 2)), [[1, 2, 1, 2], [3, 4, 3, 4], + [1, 2, 1, 2], [3, 4, 3, 4]]) + + def test_tile_one_repetition_on_array_gh4679(self): + a = np.arange(5) + b = tile(a, 1) + b += 2 + assert_equal(a, np.arange(5)) + + def test_empty(self): + a = np.array([[[]]]) + b = np.array([[], []]) + c = tile(b, 2).shape + d = tile(a, (3, 2, 5)).shape + assert_equal(c, (2, 0)) + assert_equal(d, (3, 2, 0)) + + def test_kroncompare(self): + from numpy.random import randint + + reps = [(2,), (1, 2), (2, 1), (2, 2), (2, 3, 2), (3, 2)] + shape = [(3,), (2, 3), (3, 4, 3), (3, 2, 3), (4, 3, 2, 4), (2, 2)] + for s in shape: + b = randint(0, 10, size=s) + for r in reps: + a = np.ones(r, b.dtype) + large = tile(b, r) + klarge = kron(a, b) + assert_equal(large, klarge) + + +class TestMayShareMemory: + def test_basic(self): + d = np.ones((50, 60)) + d2 = np.ones((30, 60, 6)) + assert_(np.may_share_memory(d, d)) + assert_(np.may_share_memory(d, d[::-1])) + assert_(np.may_share_memory(d, d[::2])) + assert_(np.may_share_memory(d, d[1:, ::-1])) + + assert_(not np.may_share_memory(d[::-1], d2)) + assert_(not np.may_share_memory(d[::2], d2)) + assert_(not np.may_share_memory(d[1:, ::-1], d2)) + assert_(np.may_share_memory(d2[1:, ::-1], d2)) + + +# Utility +def compare_results(res, desired): + """Compare lists of arrays.""" + for x, y in zip(res, desired, strict=False): + assert_array_equal(x, y) diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_stride_tricks.py b/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_stride_tricks.py new file mode 100644 index 0000000..fe40c95 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_stride_tricks.py @@ -0,0 +1,656 @@ +import pytest + +import numpy as np +from numpy._core._rational_tests import rational +from numpy.lib._stride_tricks_impl import ( + _broadcast_shape, + as_strided, + broadcast_arrays, + broadcast_shapes, + broadcast_to, + sliding_window_view, +) +from numpy.testing import ( + assert_, + assert_array_equal, + assert_equal, + assert_raises, + assert_raises_regex, + assert_warns, +) + + +def assert_shapes_correct(input_shapes, expected_shape): + # Broadcast a list of arrays with the given input shapes and check the + # common output shape. + + inarrays = [np.zeros(s) for s in input_shapes] + outarrays = broadcast_arrays(*inarrays) + outshapes = [a.shape for a in outarrays] + expected = [expected_shape] * len(inarrays) + assert_equal(outshapes, expected) + + +def assert_incompatible_shapes_raise(input_shapes): + # Broadcast a list of arrays with the given (incompatible) input shapes + # and check that they raise a ValueError. + + inarrays = [np.zeros(s) for s in input_shapes] + assert_raises(ValueError, broadcast_arrays, *inarrays) + + +def assert_same_as_ufunc(shape0, shape1, transposed=False, flipped=False): + # Broadcast two shapes against each other and check that the data layout + # is the same as if a ufunc did the broadcasting. + + x0 = np.zeros(shape0, dtype=int) + # Note that multiply.reduce's identity element is 1.0, so when shape1==(), + # this gives the desired n==1. + n = int(np.multiply.reduce(shape1)) + x1 = np.arange(n).reshape(shape1) + if transposed: + x0 = x0.T + x1 = x1.T + if flipped: + x0 = x0[::-1] + x1 = x1[::-1] + # Use the add ufunc to do the broadcasting. Since we're adding 0s to x1, the + # result should be exactly the same as the broadcasted view of x1. + y = x0 + x1 + b0, b1 = broadcast_arrays(x0, x1) + assert_array_equal(y, b1) + + +def test_same(): + x = np.arange(10) + y = np.arange(10) + bx, by = broadcast_arrays(x, y) + assert_array_equal(x, bx) + assert_array_equal(y, by) + +def test_broadcast_kwargs(): + # ensure that a TypeError is appropriately raised when + # np.broadcast_arrays() is called with any keyword + # argument other than 'subok' + x = np.arange(10) + y = np.arange(10) + + with assert_raises_regex(TypeError, 'got an unexpected keyword'): + broadcast_arrays(x, y, dtype='float64') + + +def test_one_off(): + x = np.array([[1, 2, 3]]) + y = np.array([[1], [2], [3]]) + bx, by = broadcast_arrays(x, y) + bx0 = np.array([[1, 2, 3], [1, 2, 3], [1, 2, 3]]) + by0 = bx0.T + assert_array_equal(bx0, bx) + assert_array_equal(by0, by) + + +def test_same_input_shapes(): + # Check that the final shape is just the input shape. + + data = [ + (), + (1,), + (3,), + (0, 1), + (0, 3), + (1, 0), + (3, 0), + (1, 3), + (3, 1), + (3, 3), + ] + for shape in data: + input_shapes = [shape] + # Single input. + assert_shapes_correct(input_shapes, shape) + # Double input. + input_shapes2 = [shape, shape] + assert_shapes_correct(input_shapes2, shape) + # Triple input. + input_shapes3 = [shape, shape, shape] + assert_shapes_correct(input_shapes3, shape) + + +def test_two_compatible_by_ones_input_shapes(): + # Check that two different input shapes of the same length, but some have + # ones, broadcast to the correct shape. + + data = [ + [[(1,), (3,)], (3,)], + [[(1, 3), (3, 3)], (3, 3)], + [[(3, 1), (3, 3)], (3, 3)], + [[(1, 3), (3, 1)], (3, 3)], + [[(1, 1), (3, 3)], (3, 3)], + [[(1, 1), (1, 3)], (1, 3)], + [[(1, 1), (3, 1)], (3, 1)], + [[(1, 0), (0, 0)], (0, 0)], + [[(0, 1), (0, 0)], (0, 0)], + [[(1, 0), (0, 1)], (0, 0)], + [[(1, 1), (0, 0)], (0, 0)], + [[(1, 1), (1, 0)], (1, 0)], + [[(1, 1), (0, 1)], (0, 1)], + ] + for input_shapes, expected_shape in data: + assert_shapes_correct(input_shapes, expected_shape) + # Reverse the input shapes since broadcasting should be symmetric. + assert_shapes_correct(input_shapes[::-1], expected_shape) + + +def test_two_compatible_by_prepending_ones_input_shapes(): + # Check that two different input shapes (of different lengths) broadcast + # to the correct shape. + + data = [ + [[(), (3,)], (3,)], + [[(3,), (3, 3)], (3, 3)], + [[(3,), (3, 1)], (3, 3)], + [[(1,), (3, 3)], (3, 3)], + [[(), (3, 3)], (3, 3)], + [[(1, 1), (3,)], (1, 3)], + [[(1,), (3, 1)], (3, 1)], + [[(1,), (1, 3)], (1, 3)], + [[(), (1, 3)], (1, 3)], + [[(), (3, 1)], (3, 1)], + [[(), (0,)], (0,)], + [[(0,), (0, 0)], (0, 0)], + [[(0,), (0, 1)], (0, 0)], + [[(1,), (0, 0)], (0, 0)], + [[(), (0, 0)], (0, 0)], + [[(1, 1), (0,)], (1, 0)], + [[(1,), (0, 1)], (0, 1)], + [[(1,), (1, 0)], (1, 0)], + [[(), (1, 0)], (1, 0)], + [[(), (0, 1)], (0, 1)], + ] + for input_shapes, expected_shape in data: + assert_shapes_correct(input_shapes, expected_shape) + # Reverse the input shapes since broadcasting should be symmetric. + assert_shapes_correct(input_shapes[::-1], expected_shape) + + +def test_incompatible_shapes_raise_valueerror(): + # Check that a ValueError is raised for incompatible shapes. + + data = [ + [(3,), (4,)], + [(2, 3), (2,)], + [(3,), (3,), (4,)], + [(1, 3, 4), (2, 3, 3)], + ] + for input_shapes in data: + assert_incompatible_shapes_raise(input_shapes) + # Reverse the input shapes since broadcasting should be symmetric. + assert_incompatible_shapes_raise(input_shapes[::-1]) + + +def test_same_as_ufunc(): + # Check that the data layout is the same as if a ufunc did the operation. + + data = [ + [[(1,), (3,)], (3,)], + [[(1, 3), (3, 3)], (3, 3)], + [[(3, 1), (3, 3)], (3, 3)], + [[(1, 3), (3, 1)], (3, 3)], + [[(1, 1), (3, 3)], (3, 3)], + [[(1, 1), (1, 3)], (1, 3)], + [[(1, 1), (3, 1)], (3, 1)], + [[(1, 0), (0, 0)], (0, 0)], + [[(0, 1), (0, 0)], (0, 0)], + [[(1, 0), (0, 1)], (0, 0)], + [[(1, 1), (0, 0)], (0, 0)], + [[(1, 1), (1, 0)], (1, 0)], + [[(1, 1), (0, 1)], (0, 1)], + [[(), (3,)], (3,)], + [[(3,), (3, 3)], (3, 3)], + [[(3,), (3, 1)], (3, 3)], + [[(1,), (3, 3)], (3, 3)], + [[(), (3, 3)], (3, 3)], + [[(1, 1), (3,)], (1, 3)], + [[(1,), (3, 1)], (3, 1)], + [[(1,), (1, 3)], (1, 3)], + [[(), (1, 3)], (1, 3)], + [[(), (3, 1)], (3, 1)], + [[(), (0,)], (0,)], + [[(0,), (0, 0)], (0, 0)], + [[(0,), (0, 1)], (0, 0)], + [[(1,), (0, 0)], (0, 0)], + [[(), (0, 0)], (0, 0)], + [[(1, 1), (0,)], (1, 0)], + [[(1,), (0, 1)], (0, 1)], + [[(1,), (1, 0)], (1, 0)], + [[(), (1, 0)], (1, 0)], + [[(), (0, 1)], (0, 1)], + ] + for input_shapes, expected_shape in data: + assert_same_as_ufunc(input_shapes[0], input_shapes[1], + f"Shapes: {input_shapes[0]} {input_shapes[1]}") + # Reverse the input shapes since broadcasting should be symmetric. + assert_same_as_ufunc(input_shapes[1], input_shapes[0]) + # Try them transposed, too. + assert_same_as_ufunc(input_shapes[0], input_shapes[1], True) + # ... and flipped for non-rank-0 inputs in order to test negative + # strides. + if () not in input_shapes: + assert_same_as_ufunc(input_shapes[0], input_shapes[1], False, True) + assert_same_as_ufunc(input_shapes[0], input_shapes[1], True, True) + + +def test_broadcast_to_succeeds(): + data = [ + [np.array(0), (0,), np.array(0)], + [np.array(0), (1,), np.zeros(1)], + [np.array(0), (3,), np.zeros(3)], + [np.ones(1), (1,), np.ones(1)], + [np.ones(1), (2,), np.ones(2)], + [np.ones(1), (1, 2, 3), np.ones((1, 2, 3))], + [np.arange(3), (3,), np.arange(3)], + [np.arange(3), (1, 3), np.arange(3).reshape(1, -1)], + [np.arange(3), (2, 3), np.array([[0, 1, 2], [0, 1, 2]])], + # test if shape is not a tuple + [np.ones(0), 0, np.ones(0)], + [np.ones(1), 1, np.ones(1)], + [np.ones(1), 2, np.ones(2)], + # these cases with size 0 are strange, but they reproduce the behavior + # of broadcasting with ufuncs (see test_same_as_ufunc above) + [np.ones(1), (0,), np.ones(0)], + [np.ones((1, 2)), (0, 2), np.ones((0, 2))], + [np.ones((2, 1)), (2, 0), np.ones((2, 0))], + ] + for input_array, shape, expected in data: + actual = broadcast_to(input_array, shape) + assert_array_equal(expected, actual) + + +def test_broadcast_to_raises(): + data = [ + [(0,), ()], + [(1,), ()], + [(3,), ()], + [(3,), (1,)], + [(3,), (2,)], + [(3,), (4,)], + [(1, 2), (2, 1)], + [(1, 1), (1,)], + [(1,), -1], + [(1,), (-1,)], + [(1, 2), (-1, 2)], + ] + for orig_shape, target_shape in data: + arr = np.zeros(orig_shape) + assert_raises(ValueError, lambda: broadcast_to(arr, target_shape)) + + +def test_broadcast_shape(): + # tests internal _broadcast_shape + # _broadcast_shape is already exercised indirectly by broadcast_arrays + # _broadcast_shape is also exercised by the public broadcast_shapes function + assert_equal(_broadcast_shape(), ()) + assert_equal(_broadcast_shape([1, 2]), (2,)) + assert_equal(_broadcast_shape(np.ones((1, 1))), (1, 1)) + assert_equal(_broadcast_shape(np.ones((1, 1)), np.ones((3, 4))), (3, 4)) + assert_equal(_broadcast_shape(*([np.ones((1, 2))] * 32)), (1, 2)) + assert_equal(_broadcast_shape(*([np.ones((1, 2))] * 100)), (1, 2)) + + # regression tests for gh-5862 + assert_equal(_broadcast_shape(*([np.ones(2)] * 32 + [1])), (2,)) + bad_args = [np.ones(2)] * 32 + [np.ones(3)] * 32 + assert_raises(ValueError, lambda: _broadcast_shape(*bad_args)) + + +def test_broadcast_shapes_succeeds(): + # tests public broadcast_shapes + data = [ + [[], ()], + [[()], ()], + [[(7,)], (7,)], + [[(1, 2), (2,)], (1, 2)], + [[(1, 1)], (1, 1)], + [[(1, 1), (3, 4)], (3, 4)], + [[(6, 7), (5, 6, 1), (7,), (5, 1, 7)], (5, 6, 7)], + [[(5, 6, 1)], (5, 6, 1)], + [[(1, 3), (3, 1)], (3, 3)], + [[(1, 0), (0, 0)], (0, 0)], + [[(0, 1), (0, 0)], (0, 0)], + [[(1, 0), (0, 1)], (0, 0)], + [[(1, 1), (0, 0)], (0, 0)], + [[(1, 1), (1, 0)], (1, 0)], + [[(1, 1), (0, 1)], (0, 1)], + [[(), (0,)], (0,)], + [[(0,), (0, 0)], (0, 0)], + [[(0,), (0, 1)], (0, 0)], + [[(1,), (0, 0)], (0, 0)], + [[(), (0, 0)], (0, 0)], + [[(1, 1), (0,)], (1, 0)], + [[(1,), (0, 1)], (0, 1)], + [[(1,), (1, 0)], (1, 0)], + [[(), (1, 0)], (1, 0)], + [[(), (0, 1)], (0, 1)], + [[(1,), (3,)], (3,)], + [[2, (3, 2)], (3, 2)], + ] + for input_shapes, target_shape in data: + assert_equal(broadcast_shapes(*input_shapes), target_shape) + + assert_equal(broadcast_shapes(*([(1, 2)] * 32)), (1, 2)) + assert_equal(broadcast_shapes(*([(1, 2)] * 100)), (1, 2)) + + # regression tests for gh-5862 + assert_equal(broadcast_shapes(*([(2,)] * 32)), (2,)) + + +def test_broadcast_shapes_raises(): + # tests public broadcast_shapes + data = [ + [(3,), (4,)], + [(2, 3), (2,)], + [(3,), (3,), (4,)], + [(1, 3, 4), (2, 3, 3)], + [(1, 2), (3, 1), (3, 2), (10, 5)], + [2, (2, 3)], + ] + for input_shapes in data: + assert_raises(ValueError, lambda: broadcast_shapes(*input_shapes)) + + bad_args = [(2,)] * 32 + [(3,)] * 32 + assert_raises(ValueError, lambda: broadcast_shapes(*bad_args)) + + +def test_as_strided(): + a = np.array([None]) + a_view = as_strided(a) + expected = np.array([None]) + assert_array_equal(a_view, np.array([None])) + + a = np.array([1, 2, 3, 4]) + a_view = as_strided(a, shape=(2,), strides=(2 * a.itemsize,)) + expected = np.array([1, 3]) + assert_array_equal(a_view, expected) + + a = np.array([1, 2, 3, 4]) + a_view = as_strided(a, shape=(3, 4), strides=(0, 1 * a.itemsize)) + expected = np.array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]]) + assert_array_equal(a_view, expected) + + # Regression test for gh-5081 + dt = np.dtype([('num', 'i4'), ('obj', 'O')]) + a = np.empty((4,), dtype=dt) + a['num'] = np.arange(1, 5) + a_view = as_strided(a, shape=(3, 4), strides=(0, a.itemsize)) + expected_num = [[1, 2, 3, 4]] * 3 + expected_obj = [[None] * 4] * 3 + assert_equal(a_view.dtype, dt) + assert_array_equal(expected_num, a_view['num']) + assert_array_equal(expected_obj, a_view['obj']) + + # Make sure that void types without fields are kept unchanged + a = np.empty((4,), dtype='V4') + a_view = as_strided(a, shape=(3, 4), strides=(0, a.itemsize)) + assert_equal(a.dtype, a_view.dtype) + + # Make sure that the only type that could fail is properly handled + dt = np.dtype({'names': [''], 'formats': ['V4']}) + a = np.empty((4,), dtype=dt) + a_view = as_strided(a, shape=(3, 4), strides=(0, a.itemsize)) + assert_equal(a.dtype, a_view.dtype) + + # Custom dtypes should not be lost (gh-9161) + r = [rational(i) for i in range(4)] + a = np.array(r, dtype=rational) + a_view = as_strided(a, shape=(3, 4), strides=(0, a.itemsize)) + assert_equal(a.dtype, a_view.dtype) + assert_array_equal([r] * 3, a_view) + + +class TestSlidingWindowView: + def test_1d(self): + arr = np.arange(5) + arr_view = sliding_window_view(arr, 2) + expected = np.array([[0, 1], + [1, 2], + [2, 3], + [3, 4]]) + assert_array_equal(arr_view, expected) + + def test_2d(self): + i, j = np.ogrid[:3, :4] + arr = 10 * i + j + shape = (2, 2) + arr_view = sliding_window_view(arr, shape) + expected = np.array([[[[0, 1], [10, 11]], + [[1, 2], [11, 12]], + [[2, 3], [12, 13]]], + [[[10, 11], [20, 21]], + [[11, 12], [21, 22]], + [[12, 13], [22, 23]]]]) + assert_array_equal(arr_view, expected) + + def test_2d_with_axis(self): + i, j = np.ogrid[:3, :4] + arr = 10 * i + j + arr_view = sliding_window_view(arr, 3, 0) + expected = np.array([[[0, 10, 20], + [1, 11, 21], + [2, 12, 22], + [3, 13, 23]]]) + assert_array_equal(arr_view, expected) + + def test_2d_repeated_axis(self): + i, j = np.ogrid[:3, :4] + arr = 10 * i + j + arr_view = sliding_window_view(arr, (2, 3), (1, 1)) + expected = np.array([[[[0, 1, 2], + [1, 2, 3]]], + [[[10, 11, 12], + [11, 12, 13]]], + [[[20, 21, 22], + [21, 22, 23]]]]) + assert_array_equal(arr_view, expected) + + def test_2d_without_axis(self): + i, j = np.ogrid[:4, :4] + arr = 10 * i + j + shape = (2, 3) + arr_view = sliding_window_view(arr, shape) + expected = np.array([[[[0, 1, 2], [10, 11, 12]], + [[1, 2, 3], [11, 12, 13]]], + [[[10, 11, 12], [20, 21, 22]], + [[11, 12, 13], [21, 22, 23]]], + [[[20, 21, 22], [30, 31, 32]], + [[21, 22, 23], [31, 32, 33]]]]) + assert_array_equal(arr_view, expected) + + def test_errors(self): + i, j = np.ogrid[:4, :4] + arr = 10 * i + j + with pytest.raises(ValueError, match='cannot contain negative values'): + sliding_window_view(arr, (-1, 3)) + with pytest.raises( + ValueError, + match='must provide window_shape for all dimensions of `x`'): + sliding_window_view(arr, (1,)) + with pytest.raises( + ValueError, + match='Must provide matching length window_shape and axis'): + sliding_window_view(arr, (1, 3, 4), axis=(0, 1)) + with pytest.raises( + ValueError, + match='window shape cannot be larger than input array'): + sliding_window_view(arr, (5, 5)) + + def test_writeable(self): + arr = np.arange(5) + view = sliding_window_view(arr, 2, writeable=False) + assert_(not view.flags.writeable) + with pytest.raises( + ValueError, + match='assignment destination is read-only'): + view[0, 0] = 3 + view = sliding_window_view(arr, 2, writeable=True) + assert_(view.flags.writeable) + view[0, 1] = 3 + assert_array_equal(arr, np.array([0, 3, 2, 3, 4])) + + def test_subok(self): + class MyArray(np.ndarray): + pass + + arr = np.arange(5).view(MyArray) + assert_(not isinstance(sliding_window_view(arr, 2, + subok=False), + MyArray)) + assert_(isinstance(sliding_window_view(arr, 2, subok=True), MyArray)) + # Default behavior + assert_(not isinstance(sliding_window_view(arr, 2), MyArray)) + + +def as_strided_writeable(): + arr = np.ones(10) + view = as_strided(arr, writeable=False) + assert_(not view.flags.writeable) + + # Check that writeable also is fine: + view = as_strided(arr, writeable=True) + assert_(view.flags.writeable) + view[...] = 3 + assert_array_equal(arr, np.full_like(arr, 3)) + + # Test that things do not break down for readonly: + arr.flags.writeable = False + view = as_strided(arr, writeable=False) + view = as_strided(arr, writeable=True) + assert_(not view.flags.writeable) + + +class VerySimpleSubClass(np.ndarray): + def __new__(cls, *args, **kwargs): + return np.array(*args, subok=True, **kwargs).view(cls) + + +class SimpleSubClass(VerySimpleSubClass): + def __new__(cls, *args, **kwargs): + self = np.array(*args, subok=True, **kwargs).view(cls) + self.info = 'simple' + return self + + def __array_finalize__(self, obj): + self.info = getattr(obj, 'info', '') + ' finalized' + + +def test_subclasses(): + # test that subclass is preserved only if subok=True + a = VerySimpleSubClass([1, 2, 3, 4]) + assert_(type(a) is VerySimpleSubClass) + a_view = as_strided(a, shape=(2,), strides=(2 * a.itemsize,)) + assert_(type(a_view) is np.ndarray) + a_view = as_strided(a, shape=(2,), strides=(2 * a.itemsize,), subok=True) + assert_(type(a_view) is VerySimpleSubClass) + # test that if a subclass has __array_finalize__, it is used + a = SimpleSubClass([1, 2, 3, 4]) + a_view = as_strided(a, shape=(2,), strides=(2 * a.itemsize,), subok=True) + assert_(type(a_view) is SimpleSubClass) + assert_(a_view.info == 'simple finalized') + + # similar tests for broadcast_arrays + b = np.arange(len(a)).reshape(-1, 1) + a_view, b_view = broadcast_arrays(a, b) + assert_(type(a_view) is np.ndarray) + assert_(type(b_view) is np.ndarray) + assert_(a_view.shape == b_view.shape) + a_view, b_view = broadcast_arrays(a, b, subok=True) + assert_(type(a_view) is SimpleSubClass) + assert_(a_view.info == 'simple finalized') + assert_(type(b_view) is np.ndarray) + assert_(a_view.shape == b_view.shape) + + # and for broadcast_to + shape = (2, 4) + a_view = broadcast_to(a, shape) + assert_(type(a_view) is np.ndarray) + assert_(a_view.shape == shape) + a_view = broadcast_to(a, shape, subok=True) + assert_(type(a_view) is SimpleSubClass) + assert_(a_view.info == 'simple finalized') + assert_(a_view.shape == shape) + + +def test_writeable(): + # broadcast_to should return a readonly array + original = np.array([1, 2, 3]) + result = broadcast_to(original, (2, 3)) + assert_equal(result.flags.writeable, False) + assert_raises(ValueError, result.__setitem__, slice(None), 0) + + # but the result of broadcast_arrays needs to be writeable, to + # preserve backwards compatibility + test_cases = [((False,), broadcast_arrays(original,)), + ((True, False), broadcast_arrays(0, original))] + for is_broadcast, results in test_cases: + for array_is_broadcast, result in zip(is_broadcast, results): + # This will change to False in a future version + if array_is_broadcast: + with assert_warns(FutureWarning): + assert_equal(result.flags.writeable, True) + with assert_warns(DeprecationWarning): + result[:] = 0 + # Warning not emitted, writing to the array resets it + assert_equal(result.flags.writeable, True) + else: + # No warning: + assert_equal(result.flags.writeable, True) + + for results in [broadcast_arrays(original), + broadcast_arrays(0, original)]: + for result in results: + # resets the warn_on_write DeprecationWarning + result.flags.writeable = True + # check: no warning emitted + assert_equal(result.flags.writeable, True) + result[:] = 0 + + # keep readonly input readonly + original.flags.writeable = False + _, result = broadcast_arrays(0, original) + assert_equal(result.flags.writeable, False) + + # regression test for GH6491 + shape = (2,) + strides = [0] + tricky_array = as_strided(np.array(0), shape, strides) + other = np.zeros((1,)) + first, second = broadcast_arrays(tricky_array, other) + assert_(first.shape == second.shape) + + +def test_writeable_memoryview(): + # The result of broadcast_arrays exports as a non-writeable memoryview + # because otherwise there is no good way to opt in to the new behaviour + # (i.e. you would need to set writeable to False explicitly). + # See gh-13929. + original = np.array([1, 2, 3]) + + test_cases = [((False, ), broadcast_arrays(original,)), + ((True, False), broadcast_arrays(0, original))] + for is_broadcast, results in test_cases: + for array_is_broadcast, result in zip(is_broadcast, results): + # This will change to False in a future version + if array_is_broadcast: + # memoryview(result, writable=True) will give warning but cannot + # be tested using the python API. + assert memoryview(result).readonly + else: + assert not memoryview(result).readonly + + +def test_reference_types(): + input_array = np.array('a', dtype=object) + expected = np.array(['a'] * 3, dtype=object) + actual = broadcast_to(input_array, (3,)) + assert_array_equal(expected, actual) + + actual, _ = broadcast_arrays(input_array, np.ones(3)) + assert_array_equal(expected, actual) diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_twodim_base.py b/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_twodim_base.py new file mode 100644 index 0000000..eb6aa69 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_twodim_base.py @@ -0,0 +1,559 @@ +"""Test functions for matrix module + +""" +import pytest + +import numpy as np +from numpy import ( + add, + arange, + array, + diag, + eye, + fliplr, + flipud, + histogram2d, + mask_indices, + ones, + tri, + tril_indices, + tril_indices_from, + triu_indices, + triu_indices_from, + vander, + zeros, +) +from numpy.testing import ( + assert_, + assert_array_almost_equal, + assert_array_equal, + assert_array_max_ulp, + assert_equal, + assert_raises, +) + + +def get_mat(n): + data = arange(n) + data = add.outer(data, data) + return data + + +class TestEye: + def test_basic(self): + assert_equal(eye(4), + array([[1, 0, 0, 0], + [0, 1, 0, 0], + [0, 0, 1, 0], + [0, 0, 0, 1]])) + + assert_equal(eye(4, dtype='f'), + array([[1, 0, 0, 0], + [0, 1, 0, 0], + [0, 0, 1, 0], + [0, 0, 0, 1]], 'f')) + + assert_equal(eye(3) == 1, + eye(3, dtype=bool)) + + def test_uint64(self): + # Regression test for gh-9982 + assert_equal(eye(np.uint64(2), dtype=int), array([[1, 0], [0, 1]])) + assert_equal(eye(np.uint64(2), M=np.uint64(4), k=np.uint64(1)), + array([[0, 1, 0, 0], [0, 0, 1, 0]])) + + def test_diag(self): + assert_equal(eye(4, k=1), + array([[0, 1, 0, 0], + [0, 0, 1, 0], + [0, 0, 0, 1], + [0, 0, 0, 0]])) + + assert_equal(eye(4, k=-1), + array([[0, 0, 0, 0], + [1, 0, 0, 0], + [0, 1, 0, 0], + [0, 0, 1, 0]])) + + def test_2d(self): + assert_equal(eye(4, 3), + array([[1, 0, 0], + [0, 1, 0], + [0, 0, 1], + [0, 0, 0]])) + + assert_equal(eye(3, 4), + array([[1, 0, 0, 0], + [0, 1, 0, 0], + [0, 0, 1, 0]])) + + def test_diag2d(self): + assert_equal(eye(3, 4, k=2), + array([[0, 0, 1, 0], + [0, 0, 0, 1], + [0, 0, 0, 0]])) + + assert_equal(eye(4, 3, k=-2), + array([[0, 0, 0], + [0, 0, 0], + [1, 0, 0], + [0, 1, 0]])) + + def test_eye_bounds(self): + assert_equal(eye(2, 2, 1), [[0, 1], [0, 0]]) + assert_equal(eye(2, 2, -1), [[0, 0], [1, 0]]) + assert_equal(eye(2, 2, 2), [[0, 0], [0, 0]]) + assert_equal(eye(2, 2, -2), [[0, 0], [0, 0]]) + assert_equal(eye(3, 2, 2), [[0, 0], [0, 0], [0, 0]]) + assert_equal(eye(3, 2, 1), [[0, 1], [0, 0], [0, 0]]) + assert_equal(eye(3, 2, -1), [[0, 0], [1, 0], [0, 1]]) + assert_equal(eye(3, 2, -2), [[0, 0], [0, 0], [1, 0]]) + assert_equal(eye(3, 2, -3), [[0, 0], [0, 0], [0, 0]]) + + def test_strings(self): + assert_equal(eye(2, 2, dtype='S3'), + [[b'1', b''], [b'', b'1']]) + + def test_bool(self): + assert_equal(eye(2, 2, dtype=bool), [[True, False], [False, True]]) + + def test_order(self): + mat_c = eye(4, 3, k=-1) + mat_f = eye(4, 3, k=-1, order='F') + assert_equal(mat_c, mat_f) + assert mat_c.flags.c_contiguous + assert not mat_c.flags.f_contiguous + assert not mat_f.flags.c_contiguous + assert mat_f.flags.f_contiguous + + +class TestDiag: + def test_vector(self): + vals = (100 * arange(5)).astype('l') + b = zeros((5, 5)) + for k in range(5): + b[k, k] = vals[k] + assert_equal(diag(vals), b) + b = zeros((7, 7)) + c = b.copy() + for k in range(5): + b[k, k + 2] = vals[k] + c[k + 2, k] = vals[k] + assert_equal(diag(vals, k=2), b) + assert_equal(diag(vals, k=-2), c) + + def test_matrix(self, vals=None): + if vals is None: + vals = (100 * get_mat(5) + 1).astype('l') + b = zeros((5,)) + for k in range(5): + b[k] = vals[k, k] + assert_equal(diag(vals), b) + b = b * 0 + for k in range(3): + b[k] = vals[k, k + 2] + assert_equal(diag(vals, 2), b[:3]) + for k in range(3): + b[k] = vals[k + 2, k] + assert_equal(diag(vals, -2), b[:3]) + + def test_fortran_order(self): + vals = array((100 * get_mat(5) + 1), order='F', dtype='l') + self.test_matrix(vals) + + def test_diag_bounds(self): + A = [[1, 2], [3, 4], [5, 6]] + assert_equal(diag(A, k=2), []) + assert_equal(diag(A, k=1), [2]) + assert_equal(diag(A, k=0), [1, 4]) + assert_equal(diag(A, k=-1), [3, 6]) + assert_equal(diag(A, k=-2), [5]) + assert_equal(diag(A, k=-3), []) + + def test_failure(self): + assert_raises(ValueError, diag, [[[1]]]) + + +class TestFliplr: + def test_basic(self): + assert_raises(ValueError, fliplr, ones(4)) + a = get_mat(4) + b = a[:, ::-1] + assert_equal(fliplr(a), b) + a = [[0, 1, 2], + [3, 4, 5]] + b = [[2, 1, 0], + [5, 4, 3]] + assert_equal(fliplr(a), b) + + +class TestFlipud: + def test_basic(self): + a = get_mat(4) + b = a[::-1, :] + assert_equal(flipud(a), b) + a = [[0, 1, 2], + [3, 4, 5]] + b = [[3, 4, 5], + [0, 1, 2]] + assert_equal(flipud(a), b) + + +class TestHistogram2d: + def test_simple(self): + x = array( + [0.41702200, 0.72032449, 1.1437481e-4, 0.302332573, 0.146755891]) + y = array( + [0.09233859, 0.18626021, 0.34556073, 0.39676747, 0.53881673]) + xedges = np.linspace(0, 1, 10) + yedges = np.linspace(0, 1, 10) + H = histogram2d(x, y, (xedges, yedges))[0] + answer = array( + [[0, 0, 0, 1, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [1, 0, 1, 0, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0]]) + assert_array_equal(H.T, answer) + H = histogram2d(x, y, xedges)[0] + assert_array_equal(H.T, answer) + H, xedges, yedges = histogram2d(list(range(10)), list(range(10))) + assert_array_equal(H, eye(10, 10)) + assert_array_equal(xedges, np.linspace(0, 9, 11)) + assert_array_equal(yedges, np.linspace(0, 9, 11)) + + def test_asym(self): + x = array([1, 1, 2, 3, 4, 4, 4, 5]) + y = array([1, 3, 2, 0, 1, 2, 3, 4]) + H, xed, yed = histogram2d( + x, y, (6, 5), range=[[0, 6], [0, 5]], density=True) + answer = array( + [[0., 0, 0, 0, 0], + [0, 1, 0, 1, 0], + [0, 0, 1, 0, 0], + [1, 0, 0, 0, 0], + [0, 1, 1, 1, 0], + [0, 0, 0, 0, 1]]) + assert_array_almost_equal(H, answer / 8., 3) + assert_array_equal(xed, np.linspace(0, 6, 7)) + assert_array_equal(yed, np.linspace(0, 5, 6)) + + def test_density(self): + x = array([1, 2, 3, 1, 2, 3, 1, 2, 3]) + y = array([1, 1, 1, 2, 2, 2, 3, 3, 3]) + H, xed, yed = histogram2d( + x, y, [[1, 2, 3, 5], [1, 2, 3, 5]], density=True) + answer = array([[1, 1, .5], + [1, 1, .5], + [.5, .5, .25]]) / 9. + assert_array_almost_equal(H, answer, 3) + + def test_all_outliers(self): + r = np.random.rand(100) + 1. + 1e6 # histogramdd rounds by decimal=6 + H, xed, yed = histogram2d(r, r, (4, 5), range=([0, 1], [0, 1])) + assert_array_equal(H, 0) + + def test_empty(self): + a, edge1, edge2 = histogram2d([], [], bins=([0, 1], [0, 1])) + assert_array_max_ulp(a, array([[0.]])) + + a, edge1, edge2 = histogram2d([], [], bins=4) + assert_array_max_ulp(a, np.zeros((4, 4))) + + def test_binparameter_combination(self): + x = array( + [0, 0.09207008, 0.64575234, 0.12875982, 0.47390599, + 0.59944483, 1]) + y = array( + [0, 0.14344267, 0.48988575, 0.30558665, 0.44700682, + 0.15886423, 1]) + edges = (0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1) + H, xe, ye = histogram2d(x, y, (edges, 4)) + answer = array( + [[2., 0., 0., 0.], + [0., 1., 0., 0.], + [0., 0., 0., 0.], + [0., 0., 0., 0.], + [0., 1., 0., 0.], + [1., 0., 0., 0.], + [0., 1., 0., 0.], + [0., 0., 0., 0.], + [0., 0., 0., 0.], + [0., 0., 0., 1.]]) + assert_array_equal(H, answer) + assert_array_equal(ye, array([0., 0.25, 0.5, 0.75, 1])) + H, xe, ye = histogram2d(x, y, (4, edges)) + answer = array( + [[1., 1., 0., 1., 0., 0., 0., 0., 0., 0.], + [0., 0., 0., 0., 1., 0., 0., 0., 0., 0.], + [0., 1., 0., 0., 1., 0., 0., 0., 0., 0.], + [0., 0., 0., 0., 0., 0., 0., 0., 0., 1.]]) + assert_array_equal(H, answer) + assert_array_equal(xe, array([0., 0.25, 0.5, 0.75, 1])) + + def test_dispatch(self): + class ShouldDispatch: + def __array_function__(self, function, types, args, kwargs): + return types, args, kwargs + + xy = [1, 2] + s_d = ShouldDispatch() + r = histogram2d(s_d, xy) + # Cannot use assert_equal since that dispatches... + assert_(r == ((ShouldDispatch,), (s_d, xy), {})) + r = histogram2d(xy, s_d) + assert_(r == ((ShouldDispatch,), (xy, s_d), {})) + r = histogram2d(xy, xy, bins=s_d) + assert_(r, ((ShouldDispatch,), (xy, xy), {'bins': s_d})) + r = histogram2d(xy, xy, bins=[s_d, 5]) + assert_(r, ((ShouldDispatch,), (xy, xy), {'bins': [s_d, 5]})) + assert_raises(Exception, histogram2d, xy, xy, bins=[s_d]) + r = histogram2d(xy, xy, weights=s_d) + assert_(r, ((ShouldDispatch,), (xy, xy), {'weights': s_d})) + + @pytest.mark.parametrize(("x_len", "y_len"), [(10, 11), (20, 19)]) + def test_bad_length(self, x_len, y_len): + x, y = np.ones(x_len), np.ones(y_len) + with pytest.raises(ValueError, + match='x and y must have the same length.'): + histogram2d(x, y) + + +class TestTri: + def test_dtype(self): + out = array([[1, 0, 0], + [1, 1, 0], + [1, 1, 1]]) + assert_array_equal(tri(3), out) + assert_array_equal(tri(3, dtype=bool), out.astype(bool)) + + +def test_tril_triu_ndim2(): + for dtype in np.typecodes['AllFloat'] + np.typecodes['AllInteger']: + a = np.ones((2, 2), dtype=dtype) + b = np.tril(a) + c = np.triu(a) + assert_array_equal(b, [[1, 0], [1, 1]]) + assert_array_equal(c, b.T) + # should return the same dtype as the original array + assert_equal(b.dtype, a.dtype) + assert_equal(c.dtype, a.dtype) + + +def test_tril_triu_ndim3(): + for dtype in np.typecodes['AllFloat'] + np.typecodes['AllInteger']: + a = np.array([ + [[1, 1], [1, 1]], + [[1, 1], [1, 0]], + [[1, 1], [0, 0]], + ], dtype=dtype) + a_tril_desired = np.array([ + [[1, 0], [1, 1]], + [[1, 0], [1, 0]], + [[1, 0], [0, 0]], + ], dtype=dtype) + a_triu_desired = np.array([ + [[1, 1], [0, 1]], + [[1, 1], [0, 0]], + [[1, 1], [0, 0]], + ], dtype=dtype) + a_triu_observed = np.triu(a) + a_tril_observed = np.tril(a) + assert_array_equal(a_triu_observed, a_triu_desired) + assert_array_equal(a_tril_observed, a_tril_desired) + assert_equal(a_triu_observed.dtype, a.dtype) + assert_equal(a_tril_observed.dtype, a.dtype) + + +def test_tril_triu_with_inf(): + # Issue 4859 + arr = np.array([[1, 1, np.inf], + [1, 1, 1], + [np.inf, 1, 1]]) + out_tril = np.array([[1, 0, 0], + [1, 1, 0], + [np.inf, 1, 1]]) + out_triu = out_tril.T + assert_array_equal(np.triu(arr), out_triu) + assert_array_equal(np.tril(arr), out_tril) + + +def test_tril_triu_dtype(): + # Issue 4916 + # tril and triu should return the same dtype as input + for c in np.typecodes['All']: + if c == 'V': + continue + arr = np.zeros((3, 3), dtype=c) + assert_equal(np.triu(arr).dtype, arr.dtype) + assert_equal(np.tril(arr).dtype, arr.dtype) + + # check special cases + arr = np.array([['2001-01-01T12:00', '2002-02-03T13:56'], + ['2004-01-01T12:00', '2003-01-03T13:45']], + dtype='datetime64') + assert_equal(np.triu(arr).dtype, arr.dtype) + assert_equal(np.tril(arr).dtype, arr.dtype) + + arr = np.zeros((3, 3), dtype='f4,f4') + assert_equal(np.triu(arr).dtype, arr.dtype) + assert_equal(np.tril(arr).dtype, arr.dtype) + + +def test_mask_indices(): + # simple test without offset + iu = mask_indices(3, np.triu) + a = np.arange(9).reshape(3, 3) + assert_array_equal(a[iu], array([0, 1, 2, 4, 5, 8])) + # Now with an offset + iu1 = mask_indices(3, np.triu, 1) + assert_array_equal(a[iu1], array([1, 2, 5])) + + +def test_tril_indices(): + # indices without and with offset + il1 = tril_indices(4) + il2 = tril_indices(4, k=2) + il3 = tril_indices(4, m=5) + il4 = tril_indices(4, k=2, m=5) + + a = np.array([[1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12], + [13, 14, 15, 16]]) + b = np.arange(1, 21).reshape(4, 5) + + # indexing: + assert_array_equal(a[il1], + array([1, 5, 6, 9, 10, 11, 13, 14, 15, 16])) + assert_array_equal(b[il3], + array([1, 6, 7, 11, 12, 13, 16, 17, 18, 19])) + + # And for assigning values: + a[il1] = -1 + assert_array_equal(a, + array([[-1, 2, 3, 4], + [-1, -1, 7, 8], + [-1, -1, -1, 12], + [-1, -1, -1, -1]])) + b[il3] = -1 + assert_array_equal(b, + array([[-1, 2, 3, 4, 5], + [-1, -1, 8, 9, 10], + [-1, -1, -1, 14, 15], + [-1, -1, -1, -1, 20]])) + # These cover almost the whole array (two diagonals right of the main one): + a[il2] = -10 + assert_array_equal(a, + array([[-10, -10, -10, 4], + [-10, -10, -10, -10], + [-10, -10, -10, -10], + [-10, -10, -10, -10]])) + b[il4] = -10 + assert_array_equal(b, + array([[-10, -10, -10, 4, 5], + [-10, -10, -10, -10, 10], + [-10, -10, -10, -10, -10], + [-10, -10, -10, -10, -10]])) + + +class TestTriuIndices: + def test_triu_indices(self): + iu1 = triu_indices(4) + iu2 = triu_indices(4, k=2) + iu3 = triu_indices(4, m=5) + iu4 = triu_indices(4, k=2, m=5) + + a = np.array([[1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12], + [13, 14, 15, 16]]) + b = np.arange(1, 21).reshape(4, 5) + + # Both for indexing: + assert_array_equal(a[iu1], + array([1, 2, 3, 4, 6, 7, 8, 11, 12, 16])) + assert_array_equal(b[iu3], + array([1, 2, 3, 4, 5, 7, 8, 9, + 10, 13, 14, 15, 19, 20])) + + # And for assigning values: + a[iu1] = -1 + assert_array_equal(a, + array([[-1, -1, -1, -1], + [5, -1, -1, -1], + [9, 10, -1, -1], + [13, 14, 15, -1]])) + b[iu3] = -1 + assert_array_equal(b, + array([[-1, -1, -1, -1, -1], + [6, -1, -1, -1, -1], + [11, 12, -1, -1, -1], + [16, 17, 18, -1, -1]])) + + # These cover almost the whole array (two diagonals right of the + # main one): + a[iu2] = -10 + assert_array_equal(a, + array([[-1, -1, -10, -10], + [5, -1, -1, -10], + [9, 10, -1, -1], + [13, 14, 15, -1]])) + b[iu4] = -10 + assert_array_equal(b, + array([[-1, -1, -10, -10, -10], + [6, -1, -1, -10, -10], + [11, 12, -1, -1, -10], + [16, 17, 18, -1, -1]])) + + +class TestTrilIndicesFrom: + def test_exceptions(self): + assert_raises(ValueError, tril_indices_from, np.ones((2,))) + assert_raises(ValueError, tril_indices_from, np.ones((2, 2, 2))) + # assert_raises(ValueError, tril_indices_from, np.ones((2, 3))) + + +class TestTriuIndicesFrom: + def test_exceptions(self): + assert_raises(ValueError, triu_indices_from, np.ones((2,))) + assert_raises(ValueError, triu_indices_from, np.ones((2, 2, 2))) + # assert_raises(ValueError, triu_indices_from, np.ones((2, 3))) + + +class TestVander: + def test_basic(self): + c = np.array([0, 1, -2, 3]) + v = vander(c) + powers = np.array([[0, 0, 0, 0, 1], + [1, 1, 1, 1, 1], + [16, -8, 4, -2, 1], + [81, 27, 9, 3, 1]]) + # Check default value of N: + assert_array_equal(v, powers[:, 1:]) + # Check a range of N values, including 0 and 5 (greater than default) + m = powers.shape[1] + for n in range(6): + v = vander(c, N=n) + assert_array_equal(v, powers[:, m - n:m]) + + def test_dtypes(self): + c = array([11, -12, 13], dtype=np.int8) + v = vander(c) + expected = np.array([[121, 11, 1], + [144, -12, 1], + [169, 13, 1]]) + assert_array_equal(v, expected) + + c = array([1.0 + 1j, 1.0 - 1j]) + v = vander(c, N=3) + expected = np.array([[2j, 1 + 1j, 1], + [-2j, 1 - 1j, 1]]) + # The data is floating point, but the values are small integers, + # so assert_array_equal *should* be safe here (rather than, say, + # assert_array_almost_equal). + assert_array_equal(v, expected) diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_type_check.py b/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_type_check.py new file mode 100644 index 0000000..447c2c3 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_type_check.py @@ -0,0 +1,473 @@ +import numpy as np +from numpy import ( + common_type, + iscomplex, + iscomplexobj, + isneginf, + isposinf, + isreal, + isrealobj, + mintypecode, + nan_to_num, + real_if_close, +) +from numpy.testing import assert_, assert_array_equal, assert_equal + + +def assert_all(x): + assert_(np.all(x), x) + + +class TestCommonType: + def test_basic(self): + ai32 = np.array([[1, 2], [3, 4]], dtype=np.int32) + af16 = np.array([[1, 2], [3, 4]], dtype=np.float16) + af32 = np.array([[1, 2], [3, 4]], dtype=np.float32) + af64 = np.array([[1, 2], [3, 4]], dtype=np.float64) + acs = np.array([[1 + 5j, 2 + 6j], [3 + 7j, 4 + 8j]], dtype=np.complex64) + acd = np.array([[1 + 5j, 2 + 6j], [3 + 7j, 4 + 8j]], dtype=np.complex128) + assert_(common_type(ai32) == np.float64) + assert_(common_type(af16) == np.float16) + assert_(common_type(af32) == np.float32) + assert_(common_type(af64) == np.float64) + assert_(common_type(acs) == np.complex64) + assert_(common_type(acd) == np.complex128) + + +class TestMintypecode: + + def test_default_1(self): + for itype in '1bcsuwil': + assert_equal(mintypecode(itype), 'd') + assert_equal(mintypecode('f'), 'f') + assert_equal(mintypecode('d'), 'd') + assert_equal(mintypecode('F'), 'F') + assert_equal(mintypecode('D'), 'D') + + def test_default_2(self): + for itype in '1bcsuwil': + assert_equal(mintypecode(itype + 'f'), 'f') + assert_equal(mintypecode(itype + 'd'), 'd') + assert_equal(mintypecode(itype + 'F'), 'F') + assert_equal(mintypecode(itype + 'D'), 'D') + assert_equal(mintypecode('ff'), 'f') + assert_equal(mintypecode('fd'), 'd') + assert_equal(mintypecode('fF'), 'F') + assert_equal(mintypecode('fD'), 'D') + assert_equal(mintypecode('df'), 'd') + assert_equal(mintypecode('dd'), 'd') + #assert_equal(mintypecode('dF',savespace=1),'F') + assert_equal(mintypecode('dF'), 'D') + assert_equal(mintypecode('dD'), 'D') + assert_equal(mintypecode('Ff'), 'F') + #assert_equal(mintypecode('Fd',savespace=1),'F') + assert_equal(mintypecode('Fd'), 'D') + assert_equal(mintypecode('FF'), 'F') + assert_equal(mintypecode('FD'), 'D') + assert_equal(mintypecode('Df'), 'D') + assert_equal(mintypecode('Dd'), 'D') + assert_equal(mintypecode('DF'), 'D') + assert_equal(mintypecode('DD'), 'D') + + def test_default_3(self): + assert_equal(mintypecode('fdF'), 'D') + #assert_equal(mintypecode('fdF',savespace=1),'F') + assert_equal(mintypecode('fdD'), 'D') + assert_equal(mintypecode('fFD'), 'D') + assert_equal(mintypecode('dFD'), 'D') + + assert_equal(mintypecode('ifd'), 'd') + assert_equal(mintypecode('ifF'), 'F') + assert_equal(mintypecode('ifD'), 'D') + assert_equal(mintypecode('idF'), 'D') + #assert_equal(mintypecode('idF',savespace=1),'F') + assert_equal(mintypecode('idD'), 'D') + + +class TestIsscalar: + + def test_basic(self): + assert_(np.isscalar(3)) + assert_(not np.isscalar([3])) + assert_(not np.isscalar((3,))) + assert_(np.isscalar(3j)) + assert_(np.isscalar(4.0)) + + +class TestReal: + + def test_real(self): + y = np.random.rand(10,) + assert_array_equal(y, np.real(y)) + + y = np.array(1) + out = np.real(y) + assert_array_equal(y, out) + assert_(isinstance(out, np.ndarray)) + + y = 1 + out = np.real(y) + assert_equal(y, out) + assert_(not isinstance(out, np.ndarray)) + + def test_cmplx(self): + y = np.random.rand(10,) + 1j * np.random.rand(10,) + assert_array_equal(y.real, np.real(y)) + + y = np.array(1 + 1j) + out = np.real(y) + assert_array_equal(y.real, out) + assert_(isinstance(out, np.ndarray)) + + y = 1 + 1j + out = np.real(y) + assert_equal(1.0, out) + assert_(not isinstance(out, np.ndarray)) + + +class TestImag: + + def test_real(self): + y = np.random.rand(10,) + assert_array_equal(0, np.imag(y)) + + y = np.array(1) + out = np.imag(y) + assert_array_equal(0, out) + assert_(isinstance(out, np.ndarray)) + + y = 1 + out = np.imag(y) + assert_equal(0, out) + assert_(not isinstance(out, np.ndarray)) + + def test_cmplx(self): + y = np.random.rand(10,) + 1j * np.random.rand(10,) + assert_array_equal(y.imag, np.imag(y)) + + y = np.array(1 + 1j) + out = np.imag(y) + assert_array_equal(y.imag, out) + assert_(isinstance(out, np.ndarray)) + + y = 1 + 1j + out = np.imag(y) + assert_equal(1.0, out) + assert_(not isinstance(out, np.ndarray)) + + +class TestIscomplex: + + def test_fail(self): + z = np.array([-1, 0, 1]) + res = iscomplex(z) + assert_(not np.any(res, axis=0)) + + def test_pass(self): + z = np.array([-1j, 1, 0]) + res = iscomplex(z) + assert_array_equal(res, [1, 0, 0]) + + +class TestIsreal: + + def test_pass(self): + z = np.array([-1, 0, 1j]) + res = isreal(z) + assert_array_equal(res, [1, 1, 0]) + + def test_fail(self): + z = np.array([-1j, 1, 0]) + res = isreal(z) + assert_array_equal(res, [0, 1, 1]) + + +class TestIscomplexobj: + + def test_basic(self): + z = np.array([-1, 0, 1]) + assert_(not iscomplexobj(z)) + z = np.array([-1j, 0, -1]) + assert_(iscomplexobj(z)) + + def test_scalar(self): + assert_(not iscomplexobj(1.0)) + assert_(iscomplexobj(1 + 0j)) + + def test_list(self): + assert_(iscomplexobj([3, 1 + 0j, True])) + assert_(not iscomplexobj([3, 1, True])) + + def test_duck(self): + class DummyComplexArray: + @property + def dtype(self): + return np.dtype(complex) + dummy = DummyComplexArray() + assert_(iscomplexobj(dummy)) + + def test_pandas_duck(self): + # This tests a custom np.dtype duck-typed class, such as used by pandas + # (pandas.core.dtypes) + class PdComplex(np.complex128): + pass + + class PdDtype: + name = 'category' + names = None + type = PdComplex + kind = 'c' + str = ' 1e10) and assert_all(np.isfinite(vals[2])) + assert_equal(type(vals), np.ndarray) + + # perform the same tests but with nan, posinf and neginf keywords + with np.errstate(divide='ignore', invalid='ignore'): + vals = nan_to_num(np.array((-1., 0, 1)) / 0., + nan=10, posinf=20, neginf=30) + assert_equal(vals, [30, 10, 20]) + assert_all(np.isfinite(vals[[0, 2]])) + assert_equal(type(vals), np.ndarray) + + # perform the same test but in-place + with np.errstate(divide='ignore', invalid='ignore'): + vals = np.array((-1., 0, 1)) / 0. + result = nan_to_num(vals, copy=False) + + assert_(result is vals) + assert_all(vals[0] < -1e10) and assert_all(np.isfinite(vals[0])) + assert_(vals[1] == 0) + assert_all(vals[2] > 1e10) and assert_all(np.isfinite(vals[2])) + assert_equal(type(vals), np.ndarray) + + # perform the same test but in-place + with np.errstate(divide='ignore', invalid='ignore'): + vals = np.array((-1., 0, 1)) / 0. + result = nan_to_num(vals, copy=False, nan=10, posinf=20, neginf=30) + + assert_(result is vals) + assert_equal(vals, [30, 10, 20]) + assert_all(np.isfinite(vals[[0, 2]])) + assert_equal(type(vals), np.ndarray) + + def test_array(self): + vals = nan_to_num([1]) + assert_array_equal(vals, np.array([1], int)) + assert_equal(type(vals), np.ndarray) + vals = nan_to_num([1], nan=10, posinf=20, neginf=30) + assert_array_equal(vals, np.array([1], int)) + assert_equal(type(vals), np.ndarray) + + def test_integer(self): + vals = nan_to_num(1) + assert_all(vals == 1) + assert_equal(type(vals), np.int_) + vals = nan_to_num(1, nan=10, posinf=20, neginf=30) + assert_all(vals == 1) + assert_equal(type(vals), np.int_) + + def test_float(self): + vals = nan_to_num(1.0) + assert_all(vals == 1.0) + assert_equal(type(vals), np.float64) + vals = nan_to_num(1.1, nan=10, posinf=20, neginf=30) + assert_all(vals == 1.1) + assert_equal(type(vals), np.float64) + + def test_complex_good(self): + vals = nan_to_num(1 + 1j) + assert_all(vals == 1 + 1j) + assert_equal(type(vals), np.complex128) + vals = nan_to_num(1 + 1j, nan=10, posinf=20, neginf=30) + assert_all(vals == 1 + 1j) + assert_equal(type(vals), np.complex128) + + def test_complex_bad(self): + with np.errstate(divide='ignore', invalid='ignore'): + v = 1 + 1j + v += np.array(0 + 1.j) / 0. + vals = nan_to_num(v) + # !! This is actually (unexpectedly) zero + assert_all(np.isfinite(vals)) + assert_equal(type(vals), np.complex128) + + def test_complex_bad2(self): + with np.errstate(divide='ignore', invalid='ignore'): + v = 1 + 1j + v += np.array(-1 + 1.j) / 0. + vals = nan_to_num(v) + assert_all(np.isfinite(vals)) + assert_equal(type(vals), np.complex128) + # Fixme + #assert_all(vals.imag > 1e10) and assert_all(np.isfinite(vals)) + # !! This is actually (unexpectedly) positive + # !! inf. Comment out for now, and see if it + # !! changes + #assert_all(vals.real < -1e10) and assert_all(np.isfinite(vals)) + + def test_do_not_rewrite_previous_keyword(self): + # This is done to test that when, for instance, nan=np.inf then these + # values are not rewritten by posinf keyword to the posinf value. + with np.errstate(divide='ignore', invalid='ignore'): + vals = nan_to_num(np.array((-1., 0, 1)) / 0., nan=np.inf, posinf=999) + assert_all(np.isfinite(vals[[0, 2]])) + assert_all(vals[0] < -1e10) + assert_equal(vals[[1, 2]], [np.inf, 999]) + assert_equal(type(vals), np.ndarray) + + +class TestRealIfClose: + + def test_basic(self): + a = np.random.rand(10) + b = real_if_close(a + 1e-15j) + assert_all(isrealobj(b)) + assert_array_equal(a, b) + b = real_if_close(a + 1e-7j) + assert_all(iscomplexobj(b)) + b = real_if_close(a + 1e-7j, tol=1e-6) + assert_all(isrealobj(b)) diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_ufunclike.py b/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_ufunclike.py new file mode 100644 index 0000000..b4257eb --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_ufunclike.py @@ -0,0 +1,97 @@ +import numpy as np +from numpy import fix, isneginf, isposinf +from numpy.testing import assert_, assert_array_equal, assert_equal, assert_raises + + +class TestUfunclike: + + def test_isposinf(self): + a = np.array([np.inf, -np.inf, np.nan, 0.0, 3.0, -3.0]) + out = np.zeros(a.shape, bool) + tgt = np.array([True, False, False, False, False, False]) + + res = isposinf(a) + assert_equal(res, tgt) + res = isposinf(a, out) + assert_equal(res, tgt) + assert_equal(out, tgt) + + a = a.astype(np.complex128) + with assert_raises(TypeError): + isposinf(a) + + def test_isneginf(self): + a = np.array([np.inf, -np.inf, np.nan, 0.0, 3.0, -3.0]) + out = np.zeros(a.shape, bool) + tgt = np.array([False, True, False, False, False, False]) + + res = isneginf(a) + assert_equal(res, tgt) + res = isneginf(a, out) + assert_equal(res, tgt) + assert_equal(out, tgt) + + a = a.astype(np.complex128) + with assert_raises(TypeError): + isneginf(a) + + def test_fix(self): + a = np.array([[1.0, 1.1, 1.5, 1.8], [-1.0, -1.1, -1.5, -1.8]]) + out = np.zeros(a.shape, float) + tgt = np.array([[1., 1., 1., 1.], [-1., -1., -1., -1.]]) + + res = fix(a) + assert_equal(res, tgt) + res = fix(a, out) + assert_equal(res, tgt) + assert_equal(out, tgt) + assert_equal(fix(3.14), 3) + + def test_fix_with_subclass(self): + class MyArray(np.ndarray): + def __new__(cls, data, metadata=None): + res = np.array(data, copy=True).view(cls) + res.metadata = metadata + return res + + def __array_wrap__(self, obj, context=None, return_scalar=False): + if not isinstance(obj, MyArray): + obj = obj.view(MyArray) + if obj.metadata is None: + obj.metadata = self.metadata + return obj + + def __array_finalize__(self, obj): + self.metadata = getattr(obj, 'metadata', None) + return self + + a = np.array([1.1, -1.1]) + m = MyArray(a, metadata='foo') + f = fix(m) + assert_array_equal(f, np.array([1, -1])) + assert_(isinstance(f, MyArray)) + assert_equal(f.metadata, 'foo') + + # check 0d arrays don't decay to scalars + m0d = m[0, ...] + m0d.metadata = 'bar' + f0d = fix(m0d) + assert_(isinstance(f0d, MyArray)) + assert_equal(f0d.metadata, 'bar') + + def test_scalar(self): + x = np.inf + actual = np.isposinf(x) + expected = np.True_ + assert_equal(actual, expected) + assert_equal(type(actual), type(expected)) + + x = -3.4 + actual = np.fix(x) + expected = np.float64(-3.0) + assert_equal(actual, expected) + assert_equal(type(actual), type(expected)) + + out = np.array(0.0) + actual = np.fix(x, out=out) + assert_(actual is out) diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_utils.py b/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_utils.py new file mode 100644 index 0000000..0106ee0 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_utils.py @@ -0,0 +1,80 @@ +from io import StringIO + +import pytest + +import numpy as np +import numpy.lib._utils_impl as _utils_impl +from numpy.testing import assert_raises_regex + + +def test_assert_raises_regex_context_manager(): + with assert_raises_regex(ValueError, 'no deprecation warning'): + raise ValueError('no deprecation warning') + + +def test_info_method_heading(): + # info(class) should only print "Methods:" heading if methods exist + + class NoPublicMethods: + pass + + class WithPublicMethods: + def first_method(): + pass + + def _has_method_heading(cls): + out = StringIO() + np.info(cls, output=out) + return 'Methods:' in out.getvalue() + + assert _has_method_heading(WithPublicMethods) + assert not _has_method_heading(NoPublicMethods) + + +def test_drop_metadata(): + def _compare_dtypes(dt1, dt2): + return np.can_cast(dt1, dt2, casting='no') + + # structured dtype + dt = np.dtype([('l1', [('l2', np.dtype('S8', metadata={'msg': 'toto'}))])], + metadata={'msg': 'titi'}) + dt_m = _utils_impl.drop_metadata(dt) + assert _compare_dtypes(dt, dt_m) is True + assert dt_m.metadata is None + assert dt_m['l1'].metadata is None + assert dt_m['l1']['l2'].metadata is None + + # alignment + dt = np.dtype([('x', '>> from numpy import linalg as LA + >>> LA.inv(np.zeros((2,2))) + Traceback (most recent call last): + File "", line 1, in + File "...linalg.py", line 350, + in inv return wrap(solve(a, identity(a.shape[0], dtype=a.dtype))) + File "...linalg.py", line 249, + in solve + raise LinAlgError('Singular matrix') + numpy.linalg.LinAlgError: Singular matrix + + """ + + +def _raise_linalgerror_singular(err, flag): + raise LinAlgError("Singular matrix") + +def _raise_linalgerror_nonposdef(err, flag): + raise LinAlgError("Matrix is not positive definite") + +def _raise_linalgerror_eigenvalues_nonconvergence(err, flag): + raise LinAlgError("Eigenvalues did not converge") + +def _raise_linalgerror_svd_nonconvergence(err, flag): + raise LinAlgError("SVD did not converge") + +def _raise_linalgerror_lstsq(err, flag): + raise LinAlgError("SVD did not converge in Linear Least Squares") + +def _raise_linalgerror_qr(err, flag): + raise LinAlgError("Incorrect argument found while performing " + "QR factorization") + + +def _makearray(a): + new = asarray(a) + wrap = getattr(a, "__array_wrap__", new.__array_wrap__) + return new, wrap + +def isComplexType(t): + return issubclass(t, complexfloating) + + +_real_types_map = {single: single, + double: double, + csingle: single, + cdouble: double} + +_complex_types_map = {single: csingle, + double: cdouble, + csingle: csingle, + cdouble: cdouble} + +def _realType(t, default=double): + return _real_types_map.get(t, default) + +def _complexType(t, default=cdouble): + return _complex_types_map.get(t, default) + +def _commonType(*arrays): + # in lite version, use higher precision (always double or cdouble) + result_type = single + is_complex = False + for a in arrays: + type_ = a.dtype.type + if issubclass(type_, inexact): + if isComplexType(type_): + is_complex = True + rt = _realType(type_, default=None) + if rt is double: + result_type = double + elif rt is None: + # unsupported inexact scalar + raise TypeError(f"array type {a.dtype.name} is unsupported in linalg") + else: + result_type = double + if is_complex: + result_type = _complex_types_map[result_type] + return cdouble, result_type + else: + return double, result_type + + +def _to_native_byte_order(*arrays): + ret = [] + for arr in arrays: + if arr.dtype.byteorder not in ('=', '|'): + ret.append(asarray(arr, dtype=arr.dtype.newbyteorder('='))) + else: + ret.append(arr) + if len(ret) == 1: + return ret[0] + else: + return ret + + +def _assert_2d(*arrays): + for a in arrays: + if a.ndim != 2: + raise LinAlgError('%d-dimensional array given. Array must be ' + 'two-dimensional' % a.ndim) + +def _assert_stacked_2d(*arrays): + for a in arrays: + if a.ndim < 2: + raise LinAlgError('%d-dimensional array given. Array must be ' + 'at least two-dimensional' % a.ndim) + +def _assert_stacked_square(*arrays): + for a in arrays: + try: + m, n = a.shape[-2:] + except ValueError: + raise LinAlgError('%d-dimensional array given. Array must be ' + 'at least two-dimensional' % a.ndim) + if m != n: + raise LinAlgError('Last 2 dimensions of the array must be square') + +def _assert_finite(*arrays): + for a in arrays: + if not isfinite(a).all(): + raise LinAlgError("Array must not contain infs or NaNs") + +def _is_empty_2d(arr): + # check size first for efficiency + return arr.size == 0 and prod(arr.shape[-2:]) == 0 + + +def transpose(a): + """ + Transpose each matrix in a stack of matrices. + + Unlike np.transpose, this only swaps the last two axes, rather than all of + them + + Parameters + ---------- + a : (...,M,N) array_like + + Returns + ------- + aT : (...,N,M) ndarray + """ + return swapaxes(a, -1, -2) + +# Linear equations + +def _tensorsolve_dispatcher(a, b, axes=None): + return (a, b) + + +@array_function_dispatch(_tensorsolve_dispatcher) +def tensorsolve(a, b, axes=None): + """ + Solve the tensor equation ``a x = b`` for x. + + It is assumed that all indices of `x` are summed over in the product, + together with the rightmost indices of `a`, as is done in, for example, + ``tensordot(a, x, axes=x.ndim)``. + + Parameters + ---------- + a : array_like + Coefficient tensor, of shape ``b.shape + Q``. `Q`, a tuple, equals + the shape of that sub-tensor of `a` consisting of the appropriate + number of its rightmost indices, and must be such that + ``prod(Q) == prod(b.shape)`` (in which sense `a` is said to be + 'square'). + b : array_like + Right-hand tensor, which can be of any shape. + axes : tuple of ints, optional + Axes in `a` to reorder to the right, before inversion. + If None (default), no reordering is done. + + Returns + ------- + x : ndarray, shape Q + + Raises + ------ + LinAlgError + If `a` is singular or not 'square' (in the above sense). + + See Also + -------- + numpy.tensordot, tensorinv, numpy.einsum + + Examples + -------- + >>> import numpy as np + >>> a = np.eye(2*3*4) + >>> a.shape = (2*3, 4, 2, 3, 4) + >>> rng = np.random.default_rng() + >>> b = rng.normal(size=(2*3, 4)) + >>> x = np.linalg.tensorsolve(a, b) + >>> x.shape + (2, 3, 4) + >>> np.allclose(np.tensordot(a, x, axes=3), b) + True + + """ + a, wrap = _makearray(a) + b = asarray(b) + an = a.ndim + + if axes is not None: + allaxes = list(range(an)) + for k in axes: + allaxes.remove(k) + allaxes.insert(an, k) + a = a.transpose(allaxes) + + oldshape = a.shape[-(an - b.ndim):] + prod = 1 + for k in oldshape: + prod *= k + + if a.size != prod ** 2: + raise LinAlgError( + "Input arrays must satisfy the requirement \ + prod(a.shape[b.ndim:]) == prod(a.shape[:b.ndim])" + ) + + a = a.reshape(prod, prod) + b = b.ravel() + res = wrap(solve(a, b)) + res.shape = oldshape + return res + + +def _solve_dispatcher(a, b): + return (a, b) + + +@array_function_dispatch(_solve_dispatcher) +def solve(a, b): + """ + Solve a linear matrix equation, or system of linear scalar equations. + + Computes the "exact" solution, `x`, of the well-determined, i.e., full + rank, linear matrix equation `ax = b`. + + Parameters + ---------- + a : (..., M, M) array_like + Coefficient matrix. + b : {(M,), (..., M, K)}, array_like + Ordinate or "dependent variable" values. + + Returns + ------- + x : {(..., M,), (..., M, K)} ndarray + Solution to the system a x = b. Returned shape is (..., M) if b is + shape (M,) and (..., M, K) if b is (..., M, K), where the "..." part is + broadcasted between a and b. + + Raises + ------ + LinAlgError + If `a` is singular or not square. + + See Also + -------- + scipy.linalg.solve : Similar function in SciPy. + + Notes + ----- + Broadcasting rules apply, see the `numpy.linalg` documentation for + details. + + The solutions are computed using LAPACK routine ``_gesv``. + + `a` must be square and of full-rank, i.e., all rows (or, equivalently, + columns) must be linearly independent; if either is not true, use + `lstsq` for the least-squares best "solution" of the + system/equation. + + .. versionchanged:: 2.0 + + The b array is only treated as a shape (M,) column vector if it is + exactly 1-dimensional. In all other instances it is treated as a stack + of (M, K) matrices. Previously b would be treated as a stack of (M,) + vectors if b.ndim was equal to a.ndim - 1. + + References + ---------- + .. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, + FL, Academic Press, Inc., 1980, pg. 22. + + Examples + -------- + Solve the system of equations: + ``x0 + 2 * x1 = 1`` and + ``3 * x0 + 5 * x1 = 2``: + + >>> import numpy as np + >>> a = np.array([[1, 2], [3, 5]]) + >>> b = np.array([1, 2]) + >>> x = np.linalg.solve(a, b) + >>> x + array([-1., 1.]) + + Check that the solution is correct: + + >>> np.allclose(np.dot(a, x), b) + True + + """ + a, _ = _makearray(a) + _assert_stacked_square(a) + b, wrap = _makearray(b) + t, result_t = _commonType(a, b) + + # We use the b = (..., M,) logic, only if the number of extra dimensions + # match exactly + if b.ndim == 1: + gufunc = _umath_linalg.solve1 + else: + gufunc = _umath_linalg.solve + + signature = 'DD->D' if isComplexType(t) else 'dd->d' + with errstate(call=_raise_linalgerror_singular, invalid='call', + over='ignore', divide='ignore', under='ignore'): + r = gufunc(a, b, signature=signature) + + return wrap(r.astype(result_t, copy=False)) + + +def _tensorinv_dispatcher(a, ind=None): + return (a,) + + +@array_function_dispatch(_tensorinv_dispatcher) +def tensorinv(a, ind=2): + """ + Compute the 'inverse' of an N-dimensional array. + + The result is an inverse for `a` relative to the tensordot operation + ``tensordot(a, b, ind)``, i. e., up to floating-point accuracy, + ``tensordot(tensorinv(a), a, ind)`` is the "identity" tensor for the + tensordot operation. + + Parameters + ---------- + a : array_like + Tensor to 'invert'. Its shape must be 'square', i. e., + ``prod(a.shape[:ind]) == prod(a.shape[ind:])``. + ind : int, optional + Number of first indices that are involved in the inverse sum. + Must be a positive integer, default is 2. + + Returns + ------- + b : ndarray + `a`'s tensordot inverse, shape ``a.shape[ind:] + a.shape[:ind]``. + + Raises + ------ + LinAlgError + If `a` is singular or not 'square' (in the above sense). + + See Also + -------- + numpy.tensordot, tensorsolve + + Examples + -------- + >>> import numpy as np + >>> a = np.eye(4*6) + >>> a.shape = (4, 6, 8, 3) + >>> ainv = np.linalg.tensorinv(a, ind=2) + >>> ainv.shape + (8, 3, 4, 6) + >>> rng = np.random.default_rng() + >>> b = rng.normal(size=(4, 6)) + >>> np.allclose(np.tensordot(ainv, b), np.linalg.tensorsolve(a, b)) + True + + >>> a = np.eye(4*6) + >>> a.shape = (24, 8, 3) + >>> ainv = np.linalg.tensorinv(a, ind=1) + >>> ainv.shape + (8, 3, 24) + >>> rng = np.random.default_rng() + >>> b = rng.normal(size=24) + >>> np.allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b)) + True + + """ + a = asarray(a) + oldshape = a.shape + prod = 1 + if ind > 0: + invshape = oldshape[ind:] + oldshape[:ind] + for k in oldshape[ind:]: + prod *= k + else: + raise ValueError("Invalid ind argument.") + a = a.reshape(prod, -1) + ia = inv(a) + return ia.reshape(*invshape) + + +# Matrix inversion + +def _unary_dispatcher(a): + return (a,) + + +@array_function_dispatch(_unary_dispatcher) +def inv(a): + """ + Compute the inverse of a matrix. + + Given a square matrix `a`, return the matrix `ainv` satisfying + ``a @ ainv = ainv @ a = eye(a.shape[0])``. + + Parameters + ---------- + a : (..., M, M) array_like + Matrix to be inverted. + + Returns + ------- + ainv : (..., M, M) ndarray or matrix + Inverse of the matrix `a`. + + Raises + ------ + LinAlgError + If `a` is not square or inversion fails. + + See Also + -------- + scipy.linalg.inv : Similar function in SciPy. + numpy.linalg.cond : Compute the condition number of a matrix. + numpy.linalg.svd : Compute the singular value decomposition of a matrix. + + Notes + ----- + Broadcasting rules apply, see the `numpy.linalg` documentation for + details. + + If `a` is detected to be singular, a `LinAlgError` is raised. If `a` is + ill-conditioned, a `LinAlgError` may or may not be raised, and results may + be inaccurate due to floating-point errors. + + References + ---------- + .. [1] Wikipedia, "Condition number", + https://en.wikipedia.org/wiki/Condition_number + + Examples + -------- + >>> import numpy as np + >>> from numpy.linalg import inv + >>> a = np.array([[1., 2.], [3., 4.]]) + >>> ainv = inv(a) + >>> np.allclose(a @ ainv, np.eye(2)) + True + >>> np.allclose(ainv @ a, np.eye(2)) + True + + If a is a matrix object, then the return value is a matrix as well: + + >>> ainv = inv(np.matrix(a)) + >>> ainv + matrix([[-2. , 1. ], + [ 1.5, -0.5]]) + + Inverses of several matrices can be computed at once: + + >>> a = np.array([[[1., 2.], [3., 4.]], [[1, 3], [3, 5]]]) + >>> inv(a) + array([[[-2. , 1. ], + [ 1.5 , -0.5 ]], + [[-1.25, 0.75], + [ 0.75, -0.25]]]) + + If a matrix is close to singular, the computed inverse may not satisfy + ``a @ ainv = ainv @ a = eye(a.shape[0])`` even if a `LinAlgError` + is not raised: + + >>> a = np.array([[2,4,6],[2,0,2],[6,8,14]]) + >>> inv(a) # No errors raised + array([[-1.12589991e+15, -5.62949953e+14, 5.62949953e+14], + [-1.12589991e+15, -5.62949953e+14, 5.62949953e+14], + [ 1.12589991e+15, 5.62949953e+14, -5.62949953e+14]]) + >>> a @ inv(a) + array([[ 0. , -0.5 , 0. ], # may vary + [-0.5 , 0.625, 0.25 ], + [ 0. , 0. , 1. ]]) + + To detect ill-conditioned matrices, you can use `numpy.linalg.cond` to + compute its *condition number* [1]_. The larger the condition number, the + more ill-conditioned the matrix is. As a rule of thumb, if the condition + number ``cond(a) = 10**k``, then you may lose up to ``k`` digits of + accuracy on top of what would be lost to the numerical method due to loss + of precision from arithmetic methods. + + >>> from numpy.linalg import cond + >>> cond(a) + np.float64(8.659885634118668e+17) # may vary + + It is also possible to detect ill-conditioning by inspecting the matrix's + singular values directly. The ratio between the largest and the smallest + singular value is the condition number: + + >>> from numpy.linalg import svd + >>> sigma = svd(a, compute_uv=False) # Do not compute singular vectors + >>> sigma.max()/sigma.min() + 8.659885634118668e+17 # may vary + + """ + a, wrap = _makearray(a) + _assert_stacked_square(a) + t, result_t = _commonType(a) + + signature = 'D->D' if isComplexType(t) else 'd->d' + with errstate(call=_raise_linalgerror_singular, invalid='call', + over='ignore', divide='ignore', under='ignore'): + ainv = _umath_linalg.inv(a, signature=signature) + return wrap(ainv.astype(result_t, copy=False)) + + +def _matrix_power_dispatcher(a, n): + return (a,) + + +@array_function_dispatch(_matrix_power_dispatcher) +def matrix_power(a, n): + """ + Raise a square matrix to the (integer) power `n`. + + For positive integers `n`, the power is computed by repeated matrix + squarings and matrix multiplications. If ``n == 0``, the identity matrix + of the same shape as M is returned. If ``n < 0``, the inverse + is computed and then raised to the ``abs(n)``. + + .. note:: Stacks of object matrices are not currently supported. + + Parameters + ---------- + a : (..., M, M) array_like + Matrix to be "powered". + n : int + The exponent can be any integer or long integer, positive, + negative, or zero. + + Returns + ------- + a**n : (..., M, M) ndarray or matrix object + The return value is the same shape and type as `M`; + if the exponent is positive or zero then the type of the + elements is the same as those of `M`. If the exponent is + negative the elements are floating-point. + + Raises + ------ + LinAlgError + For matrices that are not square or that (for negative powers) cannot + be inverted numerically. + + Examples + -------- + >>> import numpy as np + >>> from numpy.linalg import matrix_power + >>> i = np.array([[0, 1], [-1, 0]]) # matrix equiv. of the imaginary unit + >>> matrix_power(i, 3) # should = -i + array([[ 0, -1], + [ 1, 0]]) + >>> matrix_power(i, 0) + array([[1, 0], + [0, 1]]) + >>> matrix_power(i, -3) # should = 1/(-i) = i, but w/ f.p. elements + array([[ 0., 1.], + [-1., 0.]]) + + Somewhat more sophisticated example + + >>> q = np.zeros((4, 4)) + >>> q[0:2, 0:2] = -i + >>> q[2:4, 2:4] = i + >>> q # one of the three quaternion units not equal to 1 + array([[ 0., -1., 0., 0.], + [ 1., 0., 0., 0.], + [ 0., 0., 0., 1.], + [ 0., 0., -1., 0.]]) + >>> matrix_power(q, 2) # = -np.eye(4) + array([[-1., 0., 0., 0.], + [ 0., -1., 0., 0.], + [ 0., 0., -1., 0.], + [ 0., 0., 0., -1.]]) + + """ + a = asanyarray(a) + _assert_stacked_square(a) + + try: + n = operator.index(n) + except TypeError as e: + raise TypeError("exponent must be an integer") from e + + # Fall back on dot for object arrays. Object arrays are not supported by + # the current implementation of matmul using einsum + if a.dtype != object: + fmatmul = matmul + elif a.ndim == 2: + fmatmul = dot + else: + raise NotImplementedError( + "matrix_power not supported for stacks of object arrays") + + if n == 0: + a = empty_like(a) + a[...] = eye(a.shape[-2], dtype=a.dtype) + return a + + elif n < 0: + a = inv(a) + n = abs(n) + + # short-cuts. + if n == 1: + return a + + elif n == 2: + return fmatmul(a, a) + + elif n == 3: + return fmatmul(fmatmul(a, a), a) + + # Use binary decomposition to reduce the number of matrix multiplications. + # Here, we iterate over the bits of n, from LSB to MSB, raise `a` to + # increasing powers of 2, and multiply into the result as needed. + z = result = None + while n > 0: + z = a if z is None else fmatmul(z, z) + n, bit = divmod(n, 2) + if bit: + result = z if result is None else fmatmul(result, z) + + return result + + +# Cholesky decomposition + +def _cholesky_dispatcher(a, /, *, upper=None): + return (a,) + + +@array_function_dispatch(_cholesky_dispatcher) +def cholesky(a, /, *, upper=False): + """ + Cholesky decomposition. + + Return the lower or upper Cholesky decomposition, ``L * L.H`` or + ``U.H * U``, of the square matrix ``a``, where ``L`` is lower-triangular, + ``U`` is upper-triangular, and ``.H`` is the conjugate transpose operator + (which is the ordinary transpose if ``a`` is real-valued). ``a`` must be + Hermitian (symmetric if real-valued) and positive-definite. No checking is + performed to verify whether ``a`` is Hermitian or not. In addition, only + the lower or upper-triangular and diagonal elements of ``a`` are used. + Only ``L`` or ``U`` is actually returned. + + Parameters + ---------- + a : (..., M, M) array_like + Hermitian (symmetric if all elements are real), positive-definite + input matrix. + upper : bool + If ``True``, the result must be the upper-triangular Cholesky factor. + If ``False``, the result must be the lower-triangular Cholesky factor. + Default: ``False``. + + Returns + ------- + L : (..., M, M) array_like + Lower or upper-triangular Cholesky factor of `a`. Returns a matrix + object if `a` is a matrix object. + + Raises + ------ + LinAlgError + If the decomposition fails, for example, if `a` is not + positive-definite. + + See Also + -------- + scipy.linalg.cholesky : Similar function in SciPy. + scipy.linalg.cholesky_banded : Cholesky decompose a banded Hermitian + positive-definite matrix. + scipy.linalg.cho_factor : Cholesky decomposition of a matrix, to use in + `scipy.linalg.cho_solve`. + + Notes + ----- + Broadcasting rules apply, see the `numpy.linalg` documentation for + details. + + The Cholesky decomposition is often used as a fast way of solving + + .. math:: A \\mathbf{x} = \\mathbf{b} + + (when `A` is both Hermitian/symmetric and positive-definite). + + First, we solve for :math:`\\mathbf{y}` in + + .. math:: L \\mathbf{y} = \\mathbf{b}, + + and then for :math:`\\mathbf{x}` in + + .. math:: L^{H} \\mathbf{x} = \\mathbf{y}. + + Examples + -------- + >>> import numpy as np + >>> A = np.array([[1,-2j],[2j,5]]) + >>> A + array([[ 1.+0.j, -0.-2.j], + [ 0.+2.j, 5.+0.j]]) + >>> L = np.linalg.cholesky(A) + >>> L + array([[1.+0.j, 0.+0.j], + [0.+2.j, 1.+0.j]]) + >>> np.dot(L, L.T.conj()) # verify that L * L.H = A + array([[1.+0.j, 0.-2.j], + [0.+2.j, 5.+0.j]]) + >>> A = [[1,-2j],[2j,5]] # what happens if A is only array_like? + >>> np.linalg.cholesky(A) # an ndarray object is returned + array([[1.+0.j, 0.+0.j], + [0.+2.j, 1.+0.j]]) + >>> # But a matrix object is returned if A is a matrix object + >>> np.linalg.cholesky(np.matrix(A)) + matrix([[ 1.+0.j, 0.+0.j], + [ 0.+2.j, 1.+0.j]]) + >>> # The upper-triangular Cholesky factor can also be obtained. + >>> np.linalg.cholesky(A, upper=True) + array([[1.-0.j, 0.-2.j], + [0.-0.j, 1.-0.j]]) + + """ + gufunc = _umath_linalg.cholesky_up if upper else _umath_linalg.cholesky_lo + a, wrap = _makearray(a) + _assert_stacked_square(a) + t, result_t = _commonType(a) + signature = 'D->D' if isComplexType(t) else 'd->d' + with errstate(call=_raise_linalgerror_nonposdef, invalid='call', + over='ignore', divide='ignore', under='ignore'): + r = gufunc(a, signature=signature) + return wrap(r.astype(result_t, copy=False)) + + +# outer product + + +def _outer_dispatcher(x1, x2): + return (x1, x2) + + +@array_function_dispatch(_outer_dispatcher) +def outer(x1, x2, /): + """ + Compute the outer product of two vectors. + + This function is Array API compatible. Compared to ``np.outer`` + it accepts 1-dimensional inputs only. + + Parameters + ---------- + x1 : (M,) array_like + One-dimensional input array of size ``N``. + Must have a numeric data type. + x2 : (N,) array_like + One-dimensional input array of size ``M``. + Must have a numeric data type. + + Returns + ------- + out : (M, N) ndarray + ``out[i, j] = a[i] * b[j]`` + + See also + -------- + outer + + Examples + -------- + Make a (*very* coarse) grid for computing a Mandelbrot set: + + >>> rl = np.linalg.outer(np.ones((5,)), np.linspace(-2, 2, 5)) + >>> rl + array([[-2., -1., 0., 1., 2.], + [-2., -1., 0., 1., 2.], + [-2., -1., 0., 1., 2.], + [-2., -1., 0., 1., 2.], + [-2., -1., 0., 1., 2.]]) + >>> im = np.linalg.outer(1j*np.linspace(2, -2, 5), np.ones((5,))) + >>> im + array([[0.+2.j, 0.+2.j, 0.+2.j, 0.+2.j, 0.+2.j], + [0.+1.j, 0.+1.j, 0.+1.j, 0.+1.j, 0.+1.j], + [0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], + [0.-1.j, 0.-1.j, 0.-1.j, 0.-1.j, 0.-1.j], + [0.-2.j, 0.-2.j, 0.-2.j, 0.-2.j, 0.-2.j]]) + >>> grid = rl + im + >>> grid + array([[-2.+2.j, -1.+2.j, 0.+2.j, 1.+2.j, 2.+2.j], + [-2.+1.j, -1.+1.j, 0.+1.j, 1.+1.j, 2.+1.j], + [-2.+0.j, -1.+0.j, 0.+0.j, 1.+0.j, 2.+0.j], + [-2.-1.j, -1.-1.j, 0.-1.j, 1.-1.j, 2.-1.j], + [-2.-2.j, -1.-2.j, 0.-2.j, 1.-2.j, 2.-2.j]]) + + An example using a "vector" of letters: + + >>> x = np.array(['a', 'b', 'c'], dtype=object) + >>> np.linalg.outer(x, [1, 2, 3]) + array([['a', 'aa', 'aaa'], + ['b', 'bb', 'bbb'], + ['c', 'cc', 'ccc']], dtype=object) + + """ + x1 = asanyarray(x1) + x2 = asanyarray(x2) + if x1.ndim != 1 or x2.ndim != 1: + raise ValueError( + "Input arrays must be one-dimensional, but they are " + f"{x1.ndim=} and {x2.ndim=}." + ) + return _core_outer(x1, x2, out=None) + + +# QR decomposition + + +def _qr_dispatcher(a, mode=None): + return (a,) + + +@array_function_dispatch(_qr_dispatcher) +def qr(a, mode='reduced'): + """ + Compute the qr factorization of a matrix. + + Factor the matrix `a` as *qr*, where `q` is orthonormal and `r` is + upper-triangular. + + Parameters + ---------- + a : array_like, shape (..., M, N) + An array-like object with the dimensionality of at least 2. + mode : {'reduced', 'complete', 'r', 'raw'}, optional, default: 'reduced' + If K = min(M, N), then + + * 'reduced' : returns Q, R with dimensions (..., M, K), (..., K, N) + * 'complete' : returns Q, R with dimensions (..., M, M), (..., M, N) + * 'r' : returns R only with dimensions (..., K, N) + * 'raw' : returns h, tau with dimensions (..., N, M), (..., K,) + + The options 'reduced', 'complete, and 'raw' are new in numpy 1.8, + see the notes for more information. The default is 'reduced', and to + maintain backward compatibility with earlier versions of numpy both + it and the old default 'full' can be omitted. Note that array h + returned in 'raw' mode is transposed for calling Fortran. The + 'economic' mode is deprecated. The modes 'full' and 'economic' may + be passed using only the first letter for backwards compatibility, + but all others must be spelled out. See the Notes for more + explanation. + + + Returns + ------- + When mode is 'reduced' or 'complete', the result will be a namedtuple with + the attributes `Q` and `R`. + + Q : ndarray of float or complex, optional + A matrix with orthonormal columns. When mode = 'complete' the + result is an orthogonal/unitary matrix depending on whether or not + a is real/complex. The determinant may be either +/- 1 in that + case. In case the number of dimensions in the input array is + greater than 2 then a stack of the matrices with above properties + is returned. + R : ndarray of float or complex, optional + The upper-triangular matrix or a stack of upper-triangular + matrices if the number of dimensions in the input array is greater + than 2. + (h, tau) : ndarrays of np.double or np.cdouble, optional + The array h contains the Householder reflectors that generate q + along with r. The tau array contains scaling factors for the + reflectors. In the deprecated 'economic' mode only h is returned. + + Raises + ------ + LinAlgError + If factoring fails. + + See Also + -------- + scipy.linalg.qr : Similar function in SciPy. + scipy.linalg.rq : Compute RQ decomposition of a matrix. + + Notes + ----- + This is an interface to the LAPACK routines ``dgeqrf``, ``zgeqrf``, + ``dorgqr``, and ``zungqr``. + + For more information on the qr factorization, see for example: + https://en.wikipedia.org/wiki/QR_factorization + + Subclasses of `ndarray` are preserved except for the 'raw' mode. So if + `a` is of type `matrix`, all the return values will be matrices too. + + New 'reduced', 'complete', and 'raw' options for mode were added in + NumPy 1.8.0 and the old option 'full' was made an alias of 'reduced'. In + addition the options 'full' and 'economic' were deprecated. Because + 'full' was the previous default and 'reduced' is the new default, + backward compatibility can be maintained by letting `mode` default. + The 'raw' option was added so that LAPACK routines that can multiply + arrays by q using the Householder reflectors can be used. Note that in + this case the returned arrays are of type np.double or np.cdouble and + the h array is transposed to be FORTRAN compatible. No routines using + the 'raw' return are currently exposed by numpy, but some are available + in lapack_lite and just await the necessary work. + + Examples + -------- + >>> import numpy as np + >>> rng = np.random.default_rng() + >>> a = rng.normal(size=(9, 6)) + >>> Q, R = np.linalg.qr(a) + >>> np.allclose(a, np.dot(Q, R)) # a does equal QR + True + >>> R2 = np.linalg.qr(a, mode='r') + >>> np.allclose(R, R2) # mode='r' returns the same R as mode='full' + True + >>> a = np.random.normal(size=(3, 2, 2)) # Stack of 2 x 2 matrices as input + >>> Q, R = np.linalg.qr(a) + >>> Q.shape + (3, 2, 2) + >>> R.shape + (3, 2, 2) + >>> np.allclose(a, np.matmul(Q, R)) + True + + Example illustrating a common use of `qr`: solving of least squares + problems + + What are the least-squares-best `m` and `y0` in ``y = y0 + mx`` for + the following data: {(0,1), (1,0), (1,2), (2,1)}. (Graph the points + and you'll see that it should be y0 = 0, m = 1.) The answer is provided + by solving the over-determined matrix equation ``Ax = b``, where:: + + A = array([[0, 1], [1, 1], [1, 1], [2, 1]]) + x = array([[y0], [m]]) + b = array([[1], [0], [2], [1]]) + + If A = QR such that Q is orthonormal (which is always possible via + Gram-Schmidt), then ``x = inv(R) * (Q.T) * b``. (In numpy practice, + however, we simply use `lstsq`.) + + >>> A = np.array([[0, 1], [1, 1], [1, 1], [2, 1]]) + >>> A + array([[0, 1], + [1, 1], + [1, 1], + [2, 1]]) + >>> b = np.array([1, 2, 2, 3]) + >>> Q, R = np.linalg.qr(A) + >>> p = np.dot(Q.T, b) + >>> np.dot(np.linalg.inv(R), p) + array([ 1., 1.]) + + """ + if mode not in ('reduced', 'complete', 'r', 'raw'): + if mode in ('f', 'full'): + # 2013-04-01, 1.8 + msg = ( + "The 'full' option is deprecated in favor of 'reduced'.\n" + "For backward compatibility let mode default." + ) + warnings.warn(msg, DeprecationWarning, stacklevel=2) + mode = 'reduced' + elif mode in ('e', 'economic'): + # 2013-04-01, 1.8 + msg = "The 'economic' option is deprecated." + warnings.warn(msg, DeprecationWarning, stacklevel=2) + mode = 'economic' + else: + raise ValueError(f"Unrecognized mode '{mode}'") + + a, wrap = _makearray(a) + _assert_stacked_2d(a) + m, n = a.shape[-2:] + t, result_t = _commonType(a) + a = a.astype(t, copy=True) + a = _to_native_byte_order(a) + mn = min(m, n) + + signature = 'D->D' if isComplexType(t) else 'd->d' + with errstate(call=_raise_linalgerror_qr, invalid='call', + over='ignore', divide='ignore', under='ignore'): + tau = _umath_linalg.qr_r_raw(a, signature=signature) + + # handle modes that don't return q + if mode == 'r': + r = triu(a[..., :mn, :]) + r = r.astype(result_t, copy=False) + return wrap(r) + + if mode == 'raw': + q = transpose(a) + q = q.astype(result_t, copy=False) + tau = tau.astype(result_t, copy=False) + return wrap(q), tau + + if mode == 'economic': + a = a.astype(result_t, copy=False) + return wrap(a) + + # mc is the number of columns in the resulting q + # matrix. If the mode is complete then it is + # same as number of rows, and if the mode is reduced, + # then it is the minimum of number of rows and columns. + if mode == 'complete' and m > n: + mc = m + gufunc = _umath_linalg.qr_complete + else: + mc = mn + gufunc = _umath_linalg.qr_reduced + + signature = 'DD->D' if isComplexType(t) else 'dd->d' + with errstate(call=_raise_linalgerror_qr, invalid='call', + over='ignore', divide='ignore', under='ignore'): + q = gufunc(a, tau, signature=signature) + r = triu(a[..., :mc, :]) + + q = q.astype(result_t, copy=False) + r = r.astype(result_t, copy=False) + + return QRResult(wrap(q), wrap(r)) + +# Eigenvalues + + +@array_function_dispatch(_unary_dispatcher) +def eigvals(a): + """ + Compute the eigenvalues of a general matrix. + + Main difference between `eigvals` and `eig`: the eigenvectors aren't + returned. + + Parameters + ---------- + a : (..., M, M) array_like + A complex- or real-valued matrix whose eigenvalues will be computed. + + Returns + ------- + w : (..., M,) ndarray + The eigenvalues, each repeated according to its multiplicity. + They are not necessarily ordered, nor are they necessarily + real for real matrices. + + Raises + ------ + LinAlgError + If the eigenvalue computation does not converge. + + See Also + -------- + eig : eigenvalues and right eigenvectors of general arrays + eigvalsh : eigenvalues of real symmetric or complex Hermitian + (conjugate symmetric) arrays. + eigh : eigenvalues and eigenvectors of real symmetric or complex + Hermitian (conjugate symmetric) arrays. + scipy.linalg.eigvals : Similar function in SciPy. + + Notes + ----- + Broadcasting rules apply, see the `numpy.linalg` documentation for + details. + + This is implemented using the ``_geev`` LAPACK routines which compute + the eigenvalues and eigenvectors of general square arrays. + + Examples + -------- + Illustration, using the fact that the eigenvalues of a diagonal matrix + are its diagonal elements, that multiplying a matrix on the left + by an orthogonal matrix, `Q`, and on the right by `Q.T` (the transpose + of `Q`), preserves the eigenvalues of the "middle" matrix. In other words, + if `Q` is orthogonal, then ``Q * A * Q.T`` has the same eigenvalues as + ``A``: + + >>> import numpy as np + >>> from numpy import linalg as LA + >>> x = np.random.random() + >>> Q = np.array([[np.cos(x), -np.sin(x)], [np.sin(x), np.cos(x)]]) + >>> LA.norm(Q[0, :]), LA.norm(Q[1, :]), np.dot(Q[0, :],Q[1, :]) + (1.0, 1.0, 0.0) + + Now multiply a diagonal matrix by ``Q`` on one side and + by ``Q.T`` on the other: + + >>> D = np.diag((-1,1)) + >>> LA.eigvals(D) + array([-1., 1.]) + >>> A = np.dot(Q, D) + >>> A = np.dot(A, Q.T) + >>> LA.eigvals(A) + array([ 1., -1.]) # random + + """ + a, wrap = _makearray(a) + _assert_stacked_square(a) + _assert_finite(a) + t, result_t = _commonType(a) + + signature = 'D->D' if isComplexType(t) else 'd->D' + with errstate(call=_raise_linalgerror_eigenvalues_nonconvergence, + invalid='call', over='ignore', divide='ignore', + under='ignore'): + w = _umath_linalg.eigvals(a, signature=signature) + + if not isComplexType(t): + if all(w.imag == 0): + w = w.real + result_t = _realType(result_t) + else: + result_t = _complexType(result_t) + + return w.astype(result_t, copy=False) + + +def _eigvalsh_dispatcher(a, UPLO=None): + return (a,) + + +@array_function_dispatch(_eigvalsh_dispatcher) +def eigvalsh(a, UPLO='L'): + """ + Compute the eigenvalues of a complex Hermitian or real symmetric matrix. + + Main difference from eigh: the eigenvectors are not computed. + + Parameters + ---------- + a : (..., M, M) array_like + A complex- or real-valued matrix whose eigenvalues are to be + computed. + UPLO : {'L', 'U'}, optional + Specifies whether the calculation is done with the lower triangular + part of `a` ('L', default) or the upper triangular part ('U'). + Irrespective of this value only the real parts of the diagonal will + be considered in the computation to preserve the notion of a Hermitian + matrix. It therefore follows that the imaginary part of the diagonal + will always be treated as zero. + + Returns + ------- + w : (..., M,) ndarray + The eigenvalues in ascending order, each repeated according to + its multiplicity. + + Raises + ------ + LinAlgError + If the eigenvalue computation does not converge. + + See Also + -------- + eigh : eigenvalues and eigenvectors of real symmetric or complex Hermitian + (conjugate symmetric) arrays. + eigvals : eigenvalues of general real or complex arrays. + eig : eigenvalues and right eigenvectors of general real or complex + arrays. + scipy.linalg.eigvalsh : Similar function in SciPy. + + Notes + ----- + Broadcasting rules apply, see the `numpy.linalg` documentation for + details. + + The eigenvalues are computed using LAPACK routines ``_syevd``, ``_heevd``. + + Examples + -------- + >>> import numpy as np + >>> from numpy import linalg as LA + >>> a = np.array([[1, -2j], [2j, 5]]) + >>> LA.eigvalsh(a) + array([ 0.17157288, 5.82842712]) # may vary + + >>> # demonstrate the treatment of the imaginary part of the diagonal + >>> a = np.array([[5+2j, 9-2j], [0+2j, 2-1j]]) + >>> a + array([[5.+2.j, 9.-2.j], + [0.+2.j, 2.-1.j]]) + >>> # with UPLO='L' this is numerically equivalent to using LA.eigvals() + >>> # with: + >>> b = np.array([[5.+0.j, 0.-2.j], [0.+2.j, 2.-0.j]]) + >>> b + array([[5.+0.j, 0.-2.j], + [0.+2.j, 2.+0.j]]) + >>> wa = LA.eigvalsh(a) + >>> wb = LA.eigvals(b) + >>> wa + array([1., 6.]) + >>> wb + array([6.+0.j, 1.+0.j]) + + """ + UPLO = UPLO.upper() + if UPLO not in ('L', 'U'): + raise ValueError("UPLO argument must be 'L' or 'U'") + + if UPLO == 'L': + gufunc = _umath_linalg.eigvalsh_lo + else: + gufunc = _umath_linalg.eigvalsh_up + + a, wrap = _makearray(a) + _assert_stacked_square(a) + t, result_t = _commonType(a) + signature = 'D->d' if isComplexType(t) else 'd->d' + with errstate(call=_raise_linalgerror_eigenvalues_nonconvergence, + invalid='call', over='ignore', divide='ignore', + under='ignore'): + w = gufunc(a, signature=signature) + return w.astype(_realType(result_t), copy=False) + + +# Eigenvectors + + +@array_function_dispatch(_unary_dispatcher) +def eig(a): + """ + Compute the eigenvalues and right eigenvectors of a square array. + + Parameters + ---------- + a : (..., M, M) array + Matrices for which the eigenvalues and right eigenvectors will + be computed + + Returns + ------- + A namedtuple with the following attributes: + + eigenvalues : (..., M) array + The eigenvalues, each repeated according to its multiplicity. + The eigenvalues are not necessarily ordered. The resulting + array will be of complex type, unless the imaginary part is + zero in which case it will be cast to a real type. When `a` + is real the resulting eigenvalues will be real (0 imaginary + part) or occur in conjugate pairs + + eigenvectors : (..., M, M) array + The normalized (unit "length") eigenvectors, such that the + column ``eigenvectors[:,i]`` is the eigenvector corresponding to the + eigenvalue ``eigenvalues[i]``. + + Raises + ------ + LinAlgError + If the eigenvalue computation does not converge. + + See Also + -------- + eigvals : eigenvalues of a non-symmetric array. + eigh : eigenvalues and eigenvectors of a real symmetric or complex + Hermitian (conjugate symmetric) array. + eigvalsh : eigenvalues of a real symmetric or complex Hermitian + (conjugate symmetric) array. + scipy.linalg.eig : Similar function in SciPy that also solves the + generalized eigenvalue problem. + scipy.linalg.schur : Best choice for unitary and other non-Hermitian + normal matrices. + + Notes + ----- + Broadcasting rules apply, see the `numpy.linalg` documentation for + details. + + This is implemented using the ``_geev`` LAPACK routines which compute + the eigenvalues and eigenvectors of general square arrays. + + The number `w` is an eigenvalue of `a` if there exists a vector `v` such + that ``a @ v = w * v``. Thus, the arrays `a`, `eigenvalues`, and + `eigenvectors` satisfy the equations ``a @ eigenvectors[:,i] = + eigenvalues[i] * eigenvectors[:,i]`` for :math:`i \\in \\{0,...,M-1\\}`. + + The array `eigenvectors` may not be of maximum rank, that is, some of the + columns may be linearly dependent, although round-off error may obscure + that fact. If the eigenvalues are all different, then theoretically the + eigenvectors are linearly independent and `a` can be diagonalized by a + similarity transformation using `eigenvectors`, i.e, ``inv(eigenvectors) @ + a @ eigenvectors`` is diagonal. + + For non-Hermitian normal matrices the SciPy function `scipy.linalg.schur` + is preferred because the matrix `eigenvectors` is guaranteed to be + unitary, which is not the case when using `eig`. The Schur factorization + produces an upper triangular matrix rather than a diagonal matrix, but for + normal matrices only the diagonal of the upper triangular matrix is + needed, the rest is roundoff error. + + Finally, it is emphasized that `eigenvectors` consists of the *right* (as + in right-hand side) eigenvectors of `a`. A vector `y` satisfying ``y.T @ a + = z * y.T`` for some number `z` is called a *left* eigenvector of `a`, + and, in general, the left and right eigenvectors of a matrix are not + necessarily the (perhaps conjugate) transposes of each other. + + References + ---------- + G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, FL, + Academic Press, Inc., 1980, Various pp. + + Examples + -------- + >>> import numpy as np + >>> from numpy import linalg as LA + + (Almost) trivial example with real eigenvalues and eigenvectors. + + >>> eigenvalues, eigenvectors = LA.eig(np.diag((1, 2, 3))) + >>> eigenvalues + array([1., 2., 3.]) + >>> eigenvectors + array([[1., 0., 0.], + [0., 1., 0.], + [0., 0., 1.]]) + + Real matrix possessing complex eigenvalues and eigenvectors; + note that the eigenvalues are complex conjugates of each other. + + >>> eigenvalues, eigenvectors = LA.eig(np.array([[1, -1], [1, 1]])) + >>> eigenvalues + array([1.+1.j, 1.-1.j]) + >>> eigenvectors + array([[0.70710678+0.j , 0.70710678-0.j ], + [0. -0.70710678j, 0. +0.70710678j]]) + + Complex-valued matrix with real eigenvalues (but complex-valued + eigenvectors); note that ``a.conj().T == a``, i.e., `a` is Hermitian. + + >>> a = np.array([[1, 1j], [-1j, 1]]) + >>> eigenvalues, eigenvectors = LA.eig(a) + >>> eigenvalues + array([2.+0.j, 0.+0.j]) + >>> eigenvectors + array([[ 0. +0.70710678j, 0.70710678+0.j ], # may vary + [ 0.70710678+0.j , -0. +0.70710678j]]) + + Be careful about round-off error! + + >>> a = np.array([[1 + 1e-9, 0], [0, 1 - 1e-9]]) + >>> # Theor. eigenvalues are 1 +/- 1e-9 + >>> eigenvalues, eigenvectors = LA.eig(a) + >>> eigenvalues + array([1., 1.]) + >>> eigenvectors + array([[1., 0.], + [0., 1.]]) + + """ + a, wrap = _makearray(a) + _assert_stacked_square(a) + _assert_finite(a) + t, result_t = _commonType(a) + + signature = 'D->DD' if isComplexType(t) else 'd->DD' + with errstate(call=_raise_linalgerror_eigenvalues_nonconvergence, + invalid='call', over='ignore', divide='ignore', + under='ignore'): + w, vt = _umath_linalg.eig(a, signature=signature) + + if not isComplexType(t) and all(w.imag == 0.0): + w = w.real + vt = vt.real + result_t = _realType(result_t) + else: + result_t = _complexType(result_t) + + vt = vt.astype(result_t, copy=False) + return EigResult(w.astype(result_t, copy=False), wrap(vt)) + + +@array_function_dispatch(_eigvalsh_dispatcher) +def eigh(a, UPLO='L'): + """ + Return the eigenvalues and eigenvectors of a complex Hermitian + (conjugate symmetric) or a real symmetric matrix. + + Returns two objects, a 1-D array containing the eigenvalues of `a`, and + a 2-D square array or matrix (depending on the input type) of the + corresponding eigenvectors (in columns). + + Parameters + ---------- + a : (..., M, M) array + Hermitian or real symmetric matrices whose eigenvalues and + eigenvectors are to be computed. + UPLO : {'L', 'U'}, optional + Specifies whether the calculation is done with the lower triangular + part of `a` ('L', default) or the upper triangular part ('U'). + Irrespective of this value only the real parts of the diagonal will + be considered in the computation to preserve the notion of a Hermitian + matrix. It therefore follows that the imaginary part of the diagonal + will always be treated as zero. + + Returns + ------- + A namedtuple with the following attributes: + + eigenvalues : (..., M) ndarray + The eigenvalues in ascending order, each repeated according to + its multiplicity. + eigenvectors : {(..., M, M) ndarray, (..., M, M) matrix} + The column ``eigenvectors[:, i]`` is the normalized eigenvector + corresponding to the eigenvalue ``eigenvalues[i]``. Will return a + matrix object if `a` is a matrix object. + + Raises + ------ + LinAlgError + If the eigenvalue computation does not converge. + + See Also + -------- + eigvalsh : eigenvalues of real symmetric or complex Hermitian + (conjugate symmetric) arrays. + eig : eigenvalues and right eigenvectors for non-symmetric arrays. + eigvals : eigenvalues of non-symmetric arrays. + scipy.linalg.eigh : Similar function in SciPy (but also solves the + generalized eigenvalue problem). + + Notes + ----- + Broadcasting rules apply, see the `numpy.linalg` documentation for + details. + + The eigenvalues/eigenvectors are computed using LAPACK routines ``_syevd``, + ``_heevd``. + + The eigenvalues of real symmetric or complex Hermitian matrices are always + real. [1]_ The array `eigenvalues` of (column) eigenvectors is unitary and + `a`, `eigenvalues`, and `eigenvectors` satisfy the equations ``dot(a, + eigenvectors[:, i]) = eigenvalues[i] * eigenvectors[:, i]``. + + References + ---------- + .. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, + FL, Academic Press, Inc., 1980, pg. 222. + + Examples + -------- + >>> import numpy as np + >>> from numpy import linalg as LA + >>> a = np.array([[1, -2j], [2j, 5]]) + >>> a + array([[ 1.+0.j, -0.-2.j], + [ 0.+2.j, 5.+0.j]]) + >>> eigenvalues, eigenvectors = LA.eigh(a) + >>> eigenvalues + array([0.17157288, 5.82842712]) + >>> eigenvectors + array([[-0.92387953+0.j , -0.38268343+0.j ], # may vary + [ 0. +0.38268343j, 0. -0.92387953j]]) + + >>> (np.dot(a, eigenvectors[:, 0]) - + ... eigenvalues[0] * eigenvectors[:, 0]) # verify 1st eigenval/vec pair + array([5.55111512e-17+0.0000000e+00j, 0.00000000e+00+1.2490009e-16j]) + >>> (np.dot(a, eigenvectors[:, 1]) - + ... eigenvalues[1] * eigenvectors[:, 1]) # verify 2nd eigenval/vec pair + array([0.+0.j, 0.+0.j]) + + >>> A = np.matrix(a) # what happens if input is a matrix object + >>> A + matrix([[ 1.+0.j, -0.-2.j], + [ 0.+2.j, 5.+0.j]]) + >>> eigenvalues, eigenvectors = LA.eigh(A) + >>> eigenvalues + array([0.17157288, 5.82842712]) + >>> eigenvectors + matrix([[-0.92387953+0.j , -0.38268343+0.j ], # may vary + [ 0. +0.38268343j, 0. -0.92387953j]]) + + >>> # demonstrate the treatment of the imaginary part of the diagonal + >>> a = np.array([[5+2j, 9-2j], [0+2j, 2-1j]]) + >>> a + array([[5.+2.j, 9.-2.j], + [0.+2.j, 2.-1.j]]) + >>> # with UPLO='L' this is numerically equivalent to using LA.eig() with: + >>> b = np.array([[5.+0.j, 0.-2.j], [0.+2.j, 2.-0.j]]) + >>> b + array([[5.+0.j, 0.-2.j], + [0.+2.j, 2.+0.j]]) + >>> wa, va = LA.eigh(a) + >>> wb, vb = LA.eig(b) + >>> wa + array([1., 6.]) + >>> wb + array([6.+0.j, 1.+0.j]) + >>> va + array([[-0.4472136 +0.j , -0.89442719+0.j ], # may vary + [ 0. +0.89442719j, 0. -0.4472136j ]]) + >>> vb + array([[ 0.89442719+0.j , -0. +0.4472136j], + [-0. +0.4472136j, 0.89442719+0.j ]]) + + """ + UPLO = UPLO.upper() + if UPLO not in ('L', 'U'): + raise ValueError("UPLO argument must be 'L' or 'U'") + + a, wrap = _makearray(a) + _assert_stacked_square(a) + t, result_t = _commonType(a) + + if UPLO == 'L': + gufunc = _umath_linalg.eigh_lo + else: + gufunc = _umath_linalg.eigh_up + + signature = 'D->dD' if isComplexType(t) else 'd->dd' + with errstate(call=_raise_linalgerror_eigenvalues_nonconvergence, + invalid='call', over='ignore', divide='ignore', + under='ignore'): + w, vt = gufunc(a, signature=signature) + w = w.astype(_realType(result_t), copy=False) + vt = vt.astype(result_t, copy=False) + return EighResult(w, wrap(vt)) + + +# Singular value decomposition + +def _svd_dispatcher(a, full_matrices=None, compute_uv=None, hermitian=None): + return (a,) + + +@array_function_dispatch(_svd_dispatcher) +def svd(a, full_matrices=True, compute_uv=True, hermitian=False): + """ + Singular Value Decomposition. + + When `a` is a 2D array, and ``full_matrices=False``, then it is + factorized as ``u @ np.diag(s) @ vh = (u * s) @ vh``, where + `u` and the Hermitian transpose of `vh` are 2D arrays with + orthonormal columns and `s` is a 1D array of `a`'s singular + values. When `a` is higher-dimensional, SVD is applied in + stacked mode as explained below. + + Parameters + ---------- + a : (..., M, N) array_like + A real or complex array with ``a.ndim >= 2``. + full_matrices : bool, optional + If True (default), `u` and `vh` have the shapes ``(..., M, M)`` and + ``(..., N, N)``, respectively. Otherwise, the shapes are + ``(..., M, K)`` and ``(..., K, N)``, respectively, where + ``K = min(M, N)``. + compute_uv : bool, optional + Whether or not to compute `u` and `vh` in addition to `s`. True + by default. + hermitian : bool, optional + If True, `a` is assumed to be Hermitian (symmetric if real-valued), + enabling a more efficient method for finding singular values. + Defaults to False. + + Returns + ------- + When `compute_uv` is True, the result is a namedtuple with the following + attribute names: + + U : { (..., M, M), (..., M, K) } array + Unitary array(s). The first ``a.ndim - 2`` dimensions have the same + size as those of the input `a`. The size of the last two dimensions + depends on the value of `full_matrices`. Only returned when + `compute_uv` is True. + S : (..., K) array + Vector(s) with the singular values, within each vector sorted in + descending order. The first ``a.ndim - 2`` dimensions have the same + size as those of the input `a`. + Vh : { (..., N, N), (..., K, N) } array + Unitary array(s). The first ``a.ndim - 2`` dimensions have the same + size as those of the input `a`. The size of the last two dimensions + depends on the value of `full_matrices`. Only returned when + `compute_uv` is True. + + Raises + ------ + LinAlgError + If SVD computation does not converge. + + See Also + -------- + scipy.linalg.svd : Similar function in SciPy. + scipy.linalg.svdvals : Compute singular values of a matrix. + + Notes + ----- + The decomposition is performed using LAPACK routine ``_gesdd``. + + SVD is usually described for the factorization of a 2D matrix :math:`A`. + The higher-dimensional case will be discussed below. In the 2D case, SVD is + written as :math:`A = U S V^H`, where :math:`A = a`, :math:`U= u`, + :math:`S= \\mathtt{np.diag}(s)` and :math:`V^H = vh`. The 1D array `s` + contains the singular values of `a` and `u` and `vh` are unitary. The rows + of `vh` are the eigenvectors of :math:`A^H A` and the columns of `u` are + the eigenvectors of :math:`A A^H`. In both cases the corresponding + (possibly non-zero) eigenvalues are given by ``s**2``. + + If `a` has more than two dimensions, then broadcasting rules apply, as + explained in :ref:`routines.linalg-broadcasting`. This means that SVD is + working in "stacked" mode: it iterates over all indices of the first + ``a.ndim - 2`` dimensions and for each combination SVD is applied to the + last two indices. The matrix `a` can be reconstructed from the + decomposition with either ``(u * s[..., None, :]) @ vh`` or + ``u @ (s[..., None] * vh)``. (The ``@`` operator can be replaced by the + function ``np.matmul`` for python versions below 3.5.) + + If `a` is a ``matrix`` object (as opposed to an ``ndarray``), then so are + all the return values. + + Examples + -------- + >>> import numpy as np + >>> rng = np.random.default_rng() + >>> a = rng.normal(size=(9, 6)) + 1j*rng.normal(size=(9, 6)) + >>> b = rng.normal(size=(2, 7, 8, 3)) + 1j*rng.normal(size=(2, 7, 8, 3)) + + + Reconstruction based on full SVD, 2D case: + + >>> U, S, Vh = np.linalg.svd(a, full_matrices=True) + >>> U.shape, S.shape, Vh.shape + ((9, 9), (6,), (6, 6)) + >>> np.allclose(a, np.dot(U[:, :6] * S, Vh)) + True + >>> smat = np.zeros((9, 6), dtype=complex) + >>> smat[:6, :6] = np.diag(S) + >>> np.allclose(a, np.dot(U, np.dot(smat, Vh))) + True + + Reconstruction based on reduced SVD, 2D case: + + >>> U, S, Vh = np.linalg.svd(a, full_matrices=False) + >>> U.shape, S.shape, Vh.shape + ((9, 6), (6,), (6, 6)) + >>> np.allclose(a, np.dot(U * S, Vh)) + True + >>> smat = np.diag(S) + >>> np.allclose(a, np.dot(U, np.dot(smat, Vh))) + True + + Reconstruction based on full SVD, 4D case: + + >>> U, S, Vh = np.linalg.svd(b, full_matrices=True) + >>> U.shape, S.shape, Vh.shape + ((2, 7, 8, 8), (2, 7, 3), (2, 7, 3, 3)) + >>> np.allclose(b, np.matmul(U[..., :3] * S[..., None, :], Vh)) + True + >>> np.allclose(b, np.matmul(U[..., :3], S[..., None] * Vh)) + True + + Reconstruction based on reduced SVD, 4D case: + + >>> U, S, Vh = np.linalg.svd(b, full_matrices=False) + >>> U.shape, S.shape, Vh.shape + ((2, 7, 8, 3), (2, 7, 3), (2, 7, 3, 3)) + >>> np.allclose(b, np.matmul(U * S[..., None, :], Vh)) + True + >>> np.allclose(b, np.matmul(U, S[..., None] * Vh)) + True + + """ + import numpy as np + a, wrap = _makearray(a) + + if hermitian: + # note: lapack svd returns eigenvalues with s ** 2 sorted descending, + # but eig returns s sorted ascending, so we re-order the eigenvalues + # and related arrays to have the correct order + if compute_uv: + s, u = eigh(a) + sgn = sign(s) + s = abs(s) + sidx = argsort(s)[..., ::-1] + sgn = np.take_along_axis(sgn, sidx, axis=-1) + s = np.take_along_axis(s, sidx, axis=-1) + u = np.take_along_axis(u, sidx[..., None, :], axis=-1) + # singular values are unsigned, move the sign into v + vt = transpose(u * sgn[..., None, :]).conjugate() + return SVDResult(wrap(u), s, wrap(vt)) + else: + s = eigvalsh(a) + s = abs(s) + return sort(s)[..., ::-1] + + _assert_stacked_2d(a) + t, result_t = _commonType(a) + + m, n = a.shape[-2:] + if compute_uv: + if full_matrices: + gufunc = _umath_linalg.svd_f + else: + gufunc = _umath_linalg.svd_s + + signature = 'D->DdD' if isComplexType(t) else 'd->ddd' + with errstate(call=_raise_linalgerror_svd_nonconvergence, + invalid='call', over='ignore', divide='ignore', + under='ignore'): + u, s, vh = gufunc(a, signature=signature) + u = u.astype(result_t, copy=False) + s = s.astype(_realType(result_t), copy=False) + vh = vh.astype(result_t, copy=False) + return SVDResult(wrap(u), s, wrap(vh)) + else: + signature = 'D->d' if isComplexType(t) else 'd->d' + with errstate(call=_raise_linalgerror_svd_nonconvergence, + invalid='call', over='ignore', divide='ignore', + under='ignore'): + s = _umath_linalg.svd(a, signature=signature) + s = s.astype(_realType(result_t), copy=False) + return s + + +def _svdvals_dispatcher(x): + return (x,) + + +@array_function_dispatch(_svdvals_dispatcher) +def svdvals(x, /): + """ + Returns the singular values of a matrix (or a stack of matrices) ``x``. + When x is a stack of matrices, the function will compute the singular + values for each matrix in the stack. + + This function is Array API compatible. + + Calling ``np.svdvals(x)`` to get singular values is the same as + ``np.svd(x, compute_uv=False, hermitian=False)``. + + Parameters + ---------- + x : (..., M, N) array_like + Input array having shape (..., M, N) and whose last two + dimensions form matrices on which to perform singular value + decomposition. Should have a floating-point data type. + + Returns + ------- + out : ndarray + An array with shape (..., K) that contains the vector(s) + of singular values of length K, where K = min(M, N). + + See Also + -------- + scipy.linalg.svdvals : Compute singular values of a matrix. + + Examples + -------- + + >>> np.linalg.svdvals([[1, 2, 3, 4, 5], + ... [1, 4, 9, 16, 25], + ... [1, 8, 27, 64, 125]]) + array([146.68862757, 5.57510612, 0.60393245]) + + Determine the rank of a matrix using singular values: + + >>> s = np.linalg.svdvals([[1, 2, 3], + ... [2, 4, 6], + ... [-1, 1, -1]]); s + array([8.38434191e+00, 1.64402274e+00, 2.31534378e-16]) + >>> np.count_nonzero(s > 1e-10) # Matrix of rank 2 + 2 + + """ + return svd(x, compute_uv=False, hermitian=False) + + +def _cond_dispatcher(x, p=None): + return (x,) + + +@array_function_dispatch(_cond_dispatcher) +def cond(x, p=None): + """ + Compute the condition number of a matrix. + + This function is capable of returning the condition number using + one of seven different norms, depending on the value of `p` (see + Parameters below). + + Parameters + ---------- + x : (..., M, N) array_like + The matrix whose condition number is sought. + p : {None, 1, -1, 2, -2, inf, -inf, 'fro'}, optional + Order of the norm used in the condition number computation: + + ===== ============================ + p norm for matrices + ===== ============================ + None 2-norm, computed directly using the ``SVD`` + 'fro' Frobenius norm + inf max(sum(abs(x), axis=1)) + -inf min(sum(abs(x), axis=1)) + 1 max(sum(abs(x), axis=0)) + -1 min(sum(abs(x), axis=0)) + 2 2-norm (largest sing. value) + -2 smallest singular value + ===== ============================ + + inf means the `numpy.inf` object, and the Frobenius norm is + the root-of-sum-of-squares norm. + + Returns + ------- + c : {float, inf} + The condition number of the matrix. May be infinite. + + See Also + -------- + numpy.linalg.norm + + Notes + ----- + The condition number of `x` is defined as the norm of `x` times the + norm of the inverse of `x` [1]_; the norm can be the usual L2-norm + (root-of-sum-of-squares) or one of a number of other matrix norms. + + References + ---------- + .. [1] G. Strang, *Linear Algebra and Its Applications*, Orlando, FL, + Academic Press, Inc., 1980, pg. 285. + + Examples + -------- + >>> import numpy as np + >>> from numpy import linalg as LA + >>> a = np.array([[1, 0, -1], [0, 1, 0], [1, 0, 1]]) + >>> a + array([[ 1, 0, -1], + [ 0, 1, 0], + [ 1, 0, 1]]) + >>> LA.cond(a) + 1.4142135623730951 + >>> LA.cond(a, 'fro') + 3.1622776601683795 + >>> LA.cond(a, np.inf) + 2.0 + >>> LA.cond(a, -np.inf) + 1.0 + >>> LA.cond(a, 1) + 2.0 + >>> LA.cond(a, -1) + 1.0 + >>> LA.cond(a, 2) + 1.4142135623730951 + >>> LA.cond(a, -2) + 0.70710678118654746 # may vary + >>> (min(LA.svd(a, compute_uv=False)) * + ... min(LA.svd(LA.inv(a), compute_uv=False))) + 0.70710678118654746 # may vary + + """ + x = asarray(x) # in case we have a matrix + if _is_empty_2d(x): + raise LinAlgError("cond is not defined on empty arrays") + if p is None or p in {2, -2}: + s = svd(x, compute_uv=False) + with errstate(all='ignore'): + if p == -2: + r = s[..., -1] / s[..., 0] + else: + r = s[..., 0] / s[..., -1] + else: + # Call inv(x) ignoring errors. The result array will + # contain nans in the entries where inversion failed. + _assert_stacked_square(x) + t, result_t = _commonType(x) + signature = 'D->D' if isComplexType(t) else 'd->d' + with errstate(all='ignore'): + invx = _umath_linalg.inv(x, signature=signature) + r = norm(x, p, axis=(-2, -1)) * norm(invx, p, axis=(-2, -1)) + r = r.astype(result_t, copy=False) + + # Convert nans to infs unless the original array had nan entries + r = asarray(r) + nan_mask = isnan(r) + if nan_mask.any(): + nan_mask &= ~isnan(x).any(axis=(-2, -1)) + if r.ndim > 0: + r[nan_mask] = inf + elif nan_mask: + r[()] = inf + + # Convention is to return scalars instead of 0d arrays + if r.ndim == 0: + r = r[()] + + return r + + +def _matrix_rank_dispatcher(A, tol=None, hermitian=None, *, rtol=None): + return (A,) + + +@array_function_dispatch(_matrix_rank_dispatcher) +def matrix_rank(A, tol=None, hermitian=False, *, rtol=None): + """ + Return matrix rank of array using SVD method + + Rank of the array is the number of singular values of the array that are + greater than `tol`. + + Parameters + ---------- + A : {(M,), (..., M, N)} array_like + Input vector or stack of matrices. + tol : (...) array_like, float, optional + Threshold below which SVD values are considered zero. If `tol` is + None, and ``S`` is an array with singular values for `M`, and + ``eps`` is the epsilon value for datatype of ``S``, then `tol` is + set to ``S.max() * max(M, N) * eps``. + hermitian : bool, optional + If True, `A` is assumed to be Hermitian (symmetric if real-valued), + enabling a more efficient method for finding singular values. + Defaults to False. + rtol : (...) array_like, float, optional + Parameter for the relative tolerance component. Only ``tol`` or + ``rtol`` can be set at a time. Defaults to ``max(M, N) * eps``. + + .. versionadded:: 2.0.0 + + Returns + ------- + rank : (...) array_like + Rank of A. + + Notes + ----- + The default threshold to detect rank deficiency is a test on the magnitude + of the singular values of `A`. By default, we identify singular values + less than ``S.max() * max(M, N) * eps`` as indicating rank deficiency + (with the symbols defined above). This is the algorithm MATLAB uses [1]. + It also appears in *Numerical recipes* in the discussion of SVD solutions + for linear least squares [2]. + + This default threshold is designed to detect rank deficiency accounting + for the numerical errors of the SVD computation. Imagine that there + is a column in `A` that is an exact (in floating point) linear combination + of other columns in `A`. Computing the SVD on `A` will not produce + a singular value exactly equal to 0 in general: any difference of + the smallest SVD value from 0 will be caused by numerical imprecision + in the calculation of the SVD. Our threshold for small SVD values takes + this numerical imprecision into account, and the default threshold will + detect such numerical rank deficiency. The threshold may declare a matrix + `A` rank deficient even if the linear combination of some columns of `A` + is not exactly equal to another column of `A` but only numerically very + close to another column of `A`. + + We chose our default threshold because it is in wide use. Other thresholds + are possible. For example, elsewhere in the 2007 edition of *Numerical + recipes* there is an alternative threshold of ``S.max() * + np.finfo(A.dtype).eps / 2. * np.sqrt(m + n + 1.)``. The authors describe + this threshold as being based on "expected roundoff error" (p 71). + + The thresholds above deal with floating point roundoff error in the + calculation of the SVD. However, you may have more information about + the sources of error in `A` that would make you consider other tolerance + values to detect *effective* rank deficiency. The most useful measure + of the tolerance depends on the operations you intend to use on your + matrix. For example, if your data come from uncertain measurements with + uncertainties greater than floating point epsilon, choosing a tolerance + near that uncertainty may be preferable. The tolerance may be absolute + if the uncertainties are absolute rather than relative. + + References + ---------- + .. [1] MATLAB reference documentation, "Rank" + https://www.mathworks.com/help/techdoc/ref/rank.html + .. [2] W. H. Press, S. A. Teukolsky, W. T. Vetterling and B. P. Flannery, + "Numerical Recipes (3rd edition)", Cambridge University Press, 2007, + page 795. + + Examples + -------- + >>> import numpy as np + >>> from numpy.linalg import matrix_rank + >>> matrix_rank(np.eye(4)) # Full rank matrix + 4 + >>> I=np.eye(4); I[-1,-1] = 0. # rank deficient matrix + >>> matrix_rank(I) + 3 + >>> matrix_rank(np.ones((4,))) # 1 dimension - rank 1 unless all 0 + 1 + >>> matrix_rank(np.zeros((4,))) + 0 + """ + if rtol is not None and tol is not None: + raise ValueError("`tol` and `rtol` can't be both set.") + + A = asarray(A) + if A.ndim < 2: + return int(not all(A == 0)) + S = svd(A, compute_uv=False, hermitian=hermitian) + + if tol is None: + if rtol is None: + rtol = max(A.shape[-2:]) * finfo(S.dtype).eps + else: + rtol = asarray(rtol)[..., newaxis] + tol = S.max(axis=-1, keepdims=True) * rtol + else: + tol = asarray(tol)[..., newaxis] + + return count_nonzero(S > tol, axis=-1) + + +# Generalized inverse + +def _pinv_dispatcher(a, rcond=None, hermitian=None, *, rtol=None): + return (a,) + + +@array_function_dispatch(_pinv_dispatcher) +def pinv(a, rcond=None, hermitian=False, *, rtol=_NoValue): + """ + Compute the (Moore-Penrose) pseudo-inverse of a matrix. + + Calculate the generalized inverse of a matrix using its + singular-value decomposition (SVD) and including all + *large* singular values. + + Parameters + ---------- + a : (..., M, N) array_like + Matrix or stack of matrices to be pseudo-inverted. + rcond : (...) array_like of float, optional + Cutoff for small singular values. + Singular values less than or equal to + ``rcond * largest_singular_value`` are set to zero. + Broadcasts against the stack of matrices. Default: ``1e-15``. + hermitian : bool, optional + If True, `a` is assumed to be Hermitian (symmetric if real-valued), + enabling a more efficient method for finding singular values. + Defaults to False. + rtol : (...) array_like of float, optional + Same as `rcond`, but it's an Array API compatible parameter name. + Only `rcond` or `rtol` can be set at a time. If none of them are + provided then NumPy's ``1e-15`` default is used. If ``rtol=None`` + is passed then the API standard default is used. + + .. versionadded:: 2.0.0 + + Returns + ------- + B : (..., N, M) ndarray + The pseudo-inverse of `a`. If `a` is a `matrix` instance, then so + is `B`. + + Raises + ------ + LinAlgError + If the SVD computation does not converge. + + See Also + -------- + scipy.linalg.pinv : Similar function in SciPy. + scipy.linalg.pinvh : Compute the (Moore-Penrose) pseudo-inverse of a + Hermitian matrix. + + Notes + ----- + The pseudo-inverse of a matrix A, denoted :math:`A^+`, is + defined as: "the matrix that 'solves' [the least-squares problem] + :math:`Ax = b`," i.e., if :math:`\\bar{x}` is said solution, then + :math:`A^+` is that matrix such that :math:`\\bar{x} = A^+b`. + + It can be shown that if :math:`Q_1 \\Sigma Q_2^T = A` is the singular + value decomposition of A, then + :math:`A^+ = Q_2 \\Sigma^+ Q_1^T`, where :math:`Q_{1,2}` are + orthogonal matrices, :math:`\\Sigma` is a diagonal matrix consisting + of A's so-called singular values, (followed, typically, by + zeros), and then :math:`\\Sigma^+` is simply the diagonal matrix + consisting of the reciprocals of A's singular values + (again, followed by zeros). [1]_ + + References + ---------- + .. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, + FL, Academic Press, Inc., 1980, pp. 139-142. + + Examples + -------- + The following example checks that ``a * a+ * a == a`` and + ``a+ * a * a+ == a+``: + + >>> import numpy as np + >>> rng = np.random.default_rng() + >>> a = rng.normal(size=(9, 6)) + >>> B = np.linalg.pinv(a) + >>> np.allclose(a, np.dot(a, np.dot(B, a))) + True + >>> np.allclose(B, np.dot(B, np.dot(a, B))) + True + + """ + a, wrap = _makearray(a) + if rcond is None: + if rtol is _NoValue: + rcond = 1e-15 + elif rtol is None: + rcond = max(a.shape[-2:]) * finfo(a.dtype).eps + else: + rcond = rtol + elif rtol is not _NoValue: + raise ValueError("`rtol` and `rcond` can't be both set.") + else: + # NOTE: Deprecate `rcond` in a few versions. + pass + + rcond = asarray(rcond) + if _is_empty_2d(a): + m, n = a.shape[-2:] + res = empty(a.shape[:-2] + (n, m), dtype=a.dtype) + return wrap(res) + a = a.conjugate() + u, s, vt = svd(a, full_matrices=False, hermitian=hermitian) + + # discard small singular values + cutoff = rcond[..., newaxis] * amax(s, axis=-1, keepdims=True) + large = s > cutoff + s = divide(1, s, where=large, out=s) + s[~large] = 0 + + res = matmul(transpose(vt), multiply(s[..., newaxis], transpose(u))) + return wrap(res) + + +# Determinant + + +@array_function_dispatch(_unary_dispatcher) +def slogdet(a): + """ + Compute the sign and (natural) logarithm of the determinant of an array. + + If an array has a very small or very large determinant, then a call to + `det` may overflow or underflow. This routine is more robust against such + issues, because it computes the logarithm of the determinant rather than + the determinant itself. + + Parameters + ---------- + a : (..., M, M) array_like + Input array, has to be a square 2-D array. + + Returns + ------- + A namedtuple with the following attributes: + + sign : (...) array_like + A number representing the sign of the determinant. For a real matrix, + this is 1, 0, or -1. For a complex matrix, this is a complex number + with absolute value 1 (i.e., it is on the unit circle), or else 0. + logabsdet : (...) array_like + The natural log of the absolute value of the determinant. + + If the determinant is zero, then `sign` will be 0 and `logabsdet` + will be -inf. In all cases, the determinant is equal to + ``sign * np.exp(logabsdet)``. + + See Also + -------- + det + + Notes + ----- + Broadcasting rules apply, see the `numpy.linalg` documentation for + details. + + The determinant is computed via LU factorization using the LAPACK + routine ``z/dgetrf``. + + Examples + -------- + The determinant of a 2-D array ``[[a, b], [c, d]]`` is ``ad - bc``: + + >>> import numpy as np + >>> a = np.array([[1, 2], [3, 4]]) + >>> (sign, logabsdet) = np.linalg.slogdet(a) + >>> (sign, logabsdet) + (-1, 0.69314718055994529) # may vary + >>> sign * np.exp(logabsdet) + -2.0 + + Computing log-determinants for a stack of matrices: + + >>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ]) + >>> a.shape + (3, 2, 2) + >>> sign, logabsdet = np.linalg.slogdet(a) + >>> (sign, logabsdet) + (array([-1., -1., -1.]), array([ 0.69314718, 1.09861229, 2.07944154])) + >>> sign * np.exp(logabsdet) + array([-2., -3., -8.]) + + This routine succeeds where ordinary `det` does not: + + >>> np.linalg.det(np.eye(500) * 0.1) + 0.0 + >>> np.linalg.slogdet(np.eye(500) * 0.1) + (1, -1151.2925464970228) + + """ + a = asarray(a) + _assert_stacked_square(a) + t, result_t = _commonType(a) + real_t = _realType(result_t) + signature = 'D->Dd' if isComplexType(t) else 'd->dd' + sign, logdet = _umath_linalg.slogdet(a, signature=signature) + sign = sign.astype(result_t, copy=False) + logdet = logdet.astype(real_t, copy=False) + return SlogdetResult(sign, logdet) + + +@array_function_dispatch(_unary_dispatcher) +def det(a): + """ + Compute the determinant of an array. + + Parameters + ---------- + a : (..., M, M) array_like + Input array to compute determinants for. + + Returns + ------- + det : (...) array_like + Determinant of `a`. + + See Also + -------- + slogdet : Another way to represent the determinant, more suitable + for large matrices where underflow/overflow may occur. + scipy.linalg.det : Similar function in SciPy. + + Notes + ----- + Broadcasting rules apply, see the `numpy.linalg` documentation for + details. + + The determinant is computed via LU factorization using the LAPACK + routine ``z/dgetrf``. + + Examples + -------- + The determinant of a 2-D array [[a, b], [c, d]] is ad - bc: + + >>> import numpy as np + >>> a = np.array([[1, 2], [3, 4]]) + >>> np.linalg.det(a) + -2.0 # may vary + + Computing determinants for a stack of matrices: + + >>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ]) + >>> a.shape + (3, 2, 2) + >>> np.linalg.det(a) + array([-2., -3., -8.]) + + """ + a = asarray(a) + _assert_stacked_square(a) + t, result_t = _commonType(a) + signature = 'D->D' if isComplexType(t) else 'd->d' + r = _umath_linalg.det(a, signature=signature) + r = r.astype(result_t, copy=False) + return r + + +# Linear Least Squares + +def _lstsq_dispatcher(a, b, rcond=None): + return (a, b) + + +@array_function_dispatch(_lstsq_dispatcher) +def lstsq(a, b, rcond=None): + r""" + Return the least-squares solution to a linear matrix equation. + + Computes the vector `x` that approximately solves the equation + ``a @ x = b``. The equation may be under-, well-, or over-determined + (i.e., the number of linearly independent rows of `a` can be less than, + equal to, or greater than its number of linearly independent columns). + If `a` is square and of full rank, then `x` (but for round-off error) + is the "exact" solution of the equation. Else, `x` minimizes the + Euclidean 2-norm :math:`||b - ax||`. If there are multiple minimizing + solutions, the one with the smallest 2-norm :math:`||x||` is returned. + + Parameters + ---------- + a : (M, N) array_like + "Coefficient" matrix. + b : {(M,), (M, K)} array_like + Ordinate or "dependent variable" values. If `b` is two-dimensional, + the least-squares solution is calculated for each of the `K` columns + of `b`. + rcond : float, optional + Cut-off ratio for small singular values of `a`. + For the purposes of rank determination, singular values are treated + as zero if they are smaller than `rcond` times the largest singular + value of `a`. + The default uses the machine precision times ``max(M, N)``. Passing + ``-1`` will use machine precision. + + .. versionchanged:: 2.0 + Previously, the default was ``-1``, but a warning was given that + this would change. + + Returns + ------- + x : {(N,), (N, K)} ndarray + Least-squares solution. If `b` is two-dimensional, + the solutions are in the `K` columns of `x`. + residuals : {(1,), (K,), (0,)} ndarray + Sums of squared residuals: Squared Euclidean 2-norm for each column in + ``b - a @ x``. + If the rank of `a` is < N or M <= N, this is an empty array. + If `b` is 1-dimensional, this is a (1,) shape array. + Otherwise the shape is (K,). + rank : int + Rank of matrix `a`. + s : (min(M, N),) ndarray + Singular values of `a`. + + Raises + ------ + LinAlgError + If computation does not converge. + + See Also + -------- + scipy.linalg.lstsq : Similar function in SciPy. + + Notes + ----- + If `b` is a matrix, then all array results are returned as matrices. + + Examples + -------- + Fit a line, ``y = mx + c``, through some noisy data-points: + + >>> import numpy as np + >>> x = np.array([0, 1, 2, 3]) + >>> y = np.array([-1, 0.2, 0.9, 2.1]) + + By examining the coefficients, we see that the line should have a + gradient of roughly 1 and cut the y-axis at, more or less, -1. + + We can rewrite the line equation as ``y = Ap``, where ``A = [[x 1]]`` + and ``p = [[m], [c]]``. Now use `lstsq` to solve for `p`: + + >>> A = np.vstack([x, np.ones(len(x))]).T + >>> A + array([[ 0., 1.], + [ 1., 1.], + [ 2., 1.], + [ 3., 1.]]) + + >>> m, c = np.linalg.lstsq(A, y)[0] + >>> m, c + (1.0 -0.95) # may vary + + Plot the data along with the fitted line: + + >>> import matplotlib.pyplot as plt + >>> _ = plt.plot(x, y, 'o', label='Original data', markersize=10) + >>> _ = plt.plot(x, m*x + c, 'r', label='Fitted line') + >>> _ = plt.legend() + >>> plt.show() + + """ + a, _ = _makearray(a) + b, wrap = _makearray(b) + is_1d = b.ndim == 1 + if is_1d: + b = b[:, newaxis] + _assert_2d(a, b) + m, n = a.shape[-2:] + m2, n_rhs = b.shape[-2:] + if m != m2: + raise LinAlgError('Incompatible dimensions') + + t, result_t = _commonType(a, b) + result_real_t = _realType(result_t) + + if rcond is None: + rcond = finfo(t).eps * max(n, m) + + signature = 'DDd->Ddid' if isComplexType(t) else 'ddd->ddid' + if n_rhs == 0: + # lapack can't handle n_rhs = 0 - so allocate + # the array one larger in that axis + b = zeros(b.shape[:-2] + (m, n_rhs + 1), dtype=b.dtype) + + with errstate(call=_raise_linalgerror_lstsq, invalid='call', + over='ignore', divide='ignore', under='ignore'): + x, resids, rank, s = _umath_linalg.lstsq(a, b, rcond, + signature=signature) + if m == 0: + x[...] = 0 + if n_rhs == 0: + # remove the item we added + x = x[..., :n_rhs] + resids = resids[..., :n_rhs] + + # remove the axis we added + if is_1d: + x = x.squeeze(axis=-1) + # we probably should squeeze resids too, but we can't + # without breaking compatibility. + + # as documented + if rank != n or m <= n: + resids = array([], result_real_t) + + # coerce output arrays + s = s.astype(result_real_t, copy=False) + resids = resids.astype(result_real_t, copy=False) + # Copying lets the memory in r_parts be freed + x = x.astype(result_t, copy=True) + return wrap(x), wrap(resids), rank, s + + +def _multi_svd_norm(x, row_axis, col_axis, op, initial=None): + """Compute a function of the singular values of the 2-D matrices in `x`. + + This is a private utility function used by `numpy.linalg.norm()`. + + Parameters + ---------- + x : ndarray + row_axis, col_axis : int + The axes of `x` that hold the 2-D matrices. + op : callable + This should be either numpy.amin or `numpy.amax` or `numpy.sum`. + + Returns + ------- + result : float or ndarray + If `x` is 2-D, the return values is a float. + Otherwise, it is an array with ``x.ndim - 2`` dimensions. + The return values are either the minimum or maximum or sum of the + singular values of the matrices, depending on whether `op` + is `numpy.amin` or `numpy.amax` or `numpy.sum`. + + """ + y = moveaxis(x, (row_axis, col_axis), (-2, -1)) + result = op(svd(y, compute_uv=False), axis=-1, initial=initial) + return result + + +def _norm_dispatcher(x, ord=None, axis=None, keepdims=None): + return (x,) + + +@array_function_dispatch(_norm_dispatcher) +def norm(x, ord=None, axis=None, keepdims=False): + """ + Matrix or vector norm. + + This function is able to return one of eight different matrix norms, + or one of an infinite number of vector norms (described below), depending + on the value of the ``ord`` parameter. + + Parameters + ---------- + x : array_like + Input array. If `axis` is None, `x` must be 1-D or 2-D, unless `ord` + is None. If both `axis` and `ord` are None, the 2-norm of + ``x.ravel`` will be returned. + ord : {int, float, inf, -inf, 'fro', 'nuc'}, optional + Order of the norm (see table under ``Notes`` for what values are + supported for matrices and vectors respectively). inf means numpy's + `inf` object. The default is None. + axis : {None, int, 2-tuple of ints}, optional. + If `axis` is an integer, it specifies the axis of `x` along which to + compute the vector norms. If `axis` is a 2-tuple, it specifies the + axes that hold 2-D matrices, and the matrix norms of these matrices + are computed. If `axis` is None then either a vector norm (when `x` + is 1-D) or a matrix norm (when `x` is 2-D) is returned. The default + is None. + + keepdims : bool, optional + If this is set to True, the axes which are normed over are left in the + result as dimensions with size one. With this option the result will + broadcast correctly against the original `x`. + + Returns + ------- + n : float or ndarray + Norm of the matrix or vector(s). + + See Also + -------- + scipy.linalg.norm : Similar function in SciPy. + + Notes + ----- + For values of ``ord < 1``, the result is, strictly speaking, not a + mathematical 'norm', but it may still be useful for various numerical + purposes. + + The following norms can be calculated: + + ===== ============================ ========================== + ord norm for matrices norm for vectors + ===== ============================ ========================== + None Frobenius norm 2-norm + 'fro' Frobenius norm -- + 'nuc' nuclear norm -- + inf max(sum(abs(x), axis=1)) max(abs(x)) + -inf min(sum(abs(x), axis=1)) min(abs(x)) + 0 -- sum(x != 0) + 1 max(sum(abs(x), axis=0)) as below + -1 min(sum(abs(x), axis=0)) as below + 2 2-norm (largest sing. value) as below + -2 smallest singular value as below + other -- sum(abs(x)**ord)**(1./ord) + ===== ============================ ========================== + + The Frobenius norm is given by [1]_: + + :math:`||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}` + + The nuclear norm is the sum of the singular values. + + Both the Frobenius and nuclear norm orders are only defined for + matrices and raise a ValueError when ``x.ndim != 2``. + + References + ---------- + .. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*, + Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15 + + Examples + -------- + + >>> import numpy as np + >>> from numpy import linalg as LA + >>> a = np.arange(9) - 4 + >>> a + array([-4, -3, -2, ..., 2, 3, 4]) + >>> b = a.reshape((3, 3)) + >>> b + array([[-4, -3, -2], + [-1, 0, 1], + [ 2, 3, 4]]) + + >>> LA.norm(a) + 7.745966692414834 + >>> LA.norm(b) + 7.745966692414834 + >>> LA.norm(b, 'fro') + 7.745966692414834 + >>> LA.norm(a, np.inf) + 4.0 + >>> LA.norm(b, np.inf) + 9.0 + >>> LA.norm(a, -np.inf) + 0.0 + >>> LA.norm(b, -np.inf) + 2.0 + + >>> LA.norm(a, 1) + 20.0 + >>> LA.norm(b, 1) + 7.0 + >>> LA.norm(a, -1) + -4.6566128774142013e-010 + >>> LA.norm(b, -1) + 6.0 + >>> LA.norm(a, 2) + 7.745966692414834 + >>> LA.norm(b, 2) + 7.3484692283495345 + + >>> LA.norm(a, -2) + 0.0 + >>> LA.norm(b, -2) + 1.8570331885190563e-016 # may vary + >>> LA.norm(a, 3) + 5.8480354764257312 # may vary + >>> LA.norm(a, -3) + 0.0 + + Using the `axis` argument to compute vector norms: + + >>> c = np.array([[ 1, 2, 3], + ... [-1, 1, 4]]) + >>> LA.norm(c, axis=0) + array([ 1.41421356, 2.23606798, 5. ]) + >>> LA.norm(c, axis=1) + array([ 3.74165739, 4.24264069]) + >>> LA.norm(c, ord=1, axis=1) + array([ 6., 6.]) + + Using the `axis` argument to compute matrix norms: + + >>> m = np.arange(8).reshape(2,2,2) + >>> LA.norm(m, axis=(1,2)) + array([ 3.74165739, 11.22497216]) + >>> LA.norm(m[0, :, :]), LA.norm(m[1, :, :]) + (3.7416573867739413, 11.224972160321824) + + """ + x = asarray(x) + + if not issubclass(x.dtype.type, (inexact, object_)): + x = x.astype(float) + + # Immediately handle some default, simple, fast, and common cases. + if axis is None: + ndim = x.ndim + if ( + (ord is None) or + (ord in ('f', 'fro') and ndim == 2) or + (ord == 2 and ndim == 1) + ): + x = x.ravel(order='K') + if isComplexType(x.dtype.type): + x_real = x.real + x_imag = x.imag + sqnorm = x_real.dot(x_real) + x_imag.dot(x_imag) + else: + sqnorm = x.dot(x) + ret = sqrt(sqnorm) + if keepdims: + ret = ret.reshape(ndim * [1]) + return ret + + # Normalize the `axis` argument to a tuple. + nd = x.ndim + if axis is None: + axis = tuple(range(nd)) + elif not isinstance(axis, tuple): + try: + axis = int(axis) + except Exception as e: + raise TypeError( + "'axis' must be None, an integer or a tuple of integers" + ) from e + axis = (axis,) + + if len(axis) == 1: + if ord == inf: + return abs(x).max(axis=axis, keepdims=keepdims, initial=0) + elif ord == -inf: + return abs(x).min(axis=axis, keepdims=keepdims) + elif ord == 0: + # Zero norm + return ( + (x != 0) + .astype(x.real.dtype) + .sum(axis=axis, keepdims=keepdims) + ) + elif ord == 1: + # special case for speedup + return add.reduce(abs(x), axis=axis, keepdims=keepdims) + elif ord is None or ord == 2: + # special case for speedup + s = (x.conj() * x).real + return sqrt(add.reduce(s, axis=axis, keepdims=keepdims)) + # None of the str-type keywords for ord ('fro', 'nuc') + # are valid for vectors + elif isinstance(ord, str): + raise ValueError(f"Invalid norm order '{ord}' for vectors") + else: + absx = abs(x) + absx **= ord + ret = add.reduce(absx, axis=axis, keepdims=keepdims) + ret **= reciprocal(ord, dtype=ret.dtype) + return ret + elif len(axis) == 2: + row_axis, col_axis = axis + row_axis = normalize_axis_index(row_axis, nd) + col_axis = normalize_axis_index(col_axis, nd) + if row_axis == col_axis: + raise ValueError('Duplicate axes given.') + if ord == 2: + ret = _multi_svd_norm(x, row_axis, col_axis, amax, 0) + elif ord == -2: + ret = _multi_svd_norm(x, row_axis, col_axis, amin) + elif ord == 1: + if col_axis > row_axis: + col_axis -= 1 + ret = add.reduce(abs(x), axis=row_axis).max(axis=col_axis, initial=0) + elif ord == inf: + if row_axis > col_axis: + row_axis -= 1 + ret = add.reduce(abs(x), axis=col_axis).max(axis=row_axis, initial=0) + elif ord == -1: + if col_axis > row_axis: + col_axis -= 1 + ret = add.reduce(abs(x), axis=row_axis).min(axis=col_axis) + elif ord == -inf: + if row_axis > col_axis: + row_axis -= 1 + ret = add.reduce(abs(x), axis=col_axis).min(axis=row_axis) + elif ord in [None, 'fro', 'f']: + ret = sqrt(add.reduce((x.conj() * x).real, axis=axis)) + elif ord == 'nuc': + ret = _multi_svd_norm(x, row_axis, col_axis, sum, 0) + else: + raise ValueError("Invalid norm order for matrices.") + if keepdims: + ret_shape = list(x.shape) + ret_shape[axis[0]] = 1 + ret_shape[axis[1]] = 1 + ret = ret.reshape(ret_shape) + return ret + else: + raise ValueError("Improper number of dimensions to norm.") + + +# multi_dot + +def _multidot_dispatcher(arrays, *, out=None): + yield from arrays + yield out + + +@array_function_dispatch(_multidot_dispatcher) +def multi_dot(arrays, *, out=None): + """ + Compute the dot product of two or more arrays in a single function call, + while automatically selecting the fastest evaluation order. + + `multi_dot` chains `numpy.dot` and uses optimal parenthesization + of the matrices [1]_ [2]_. Depending on the shapes of the matrices, + this can speed up the multiplication a lot. + + If the first argument is 1-D it is treated as a row vector. + If the last argument is 1-D it is treated as a column vector. + The other arguments must be 2-D. + + Think of `multi_dot` as:: + + def multi_dot(arrays): return functools.reduce(np.dot, arrays) + + + Parameters + ---------- + arrays : sequence of array_like + If the first argument is 1-D it is treated as row vector. + If the last argument is 1-D it is treated as column vector. + The other arguments must be 2-D. + out : ndarray, optional + Output argument. This must have the exact kind that would be returned + if it was not used. In particular, it must have the right type, must be + C-contiguous, and its dtype must be the dtype that would be returned + for `dot(a, b)`. This is a performance feature. Therefore, if these + conditions are not met, an exception is raised, instead of attempting + to be flexible. + + Returns + ------- + output : ndarray + Returns the dot product of the supplied arrays. + + See Also + -------- + numpy.dot : dot multiplication with two arguments. + + References + ---------- + + .. [1] Cormen, "Introduction to Algorithms", Chapter 15.2, p. 370-378 + .. [2] https://en.wikipedia.org/wiki/Matrix_chain_multiplication + + Examples + -------- + `multi_dot` allows you to write:: + + >>> import numpy as np + >>> from numpy.linalg import multi_dot + >>> # Prepare some data + >>> A = np.random.random((10000, 100)) + >>> B = np.random.random((100, 1000)) + >>> C = np.random.random((1000, 5)) + >>> D = np.random.random((5, 333)) + >>> # the actual dot multiplication + >>> _ = multi_dot([A, B, C, D]) + + instead of:: + + >>> _ = np.dot(np.dot(np.dot(A, B), C), D) + >>> # or + >>> _ = A.dot(B).dot(C).dot(D) + + Notes + ----- + The cost for a matrix multiplication can be calculated with the + following function:: + + def cost(A, B): + return A.shape[0] * A.shape[1] * B.shape[1] + + Assume we have three matrices + :math:`A_{10 \times 100}, B_{100 \times 5}, C_{5 \times 50}`. + + The costs for the two different parenthesizations are as follows:: + + cost((AB)C) = 10*100*5 + 10*5*50 = 5000 + 2500 = 7500 + cost(A(BC)) = 10*100*50 + 100*5*50 = 50000 + 25000 = 75000 + + """ + n = len(arrays) + # optimization only makes sense for len(arrays) > 2 + if n < 2: + raise ValueError("Expecting at least two arrays.") + elif n == 2: + return dot(arrays[0], arrays[1], out=out) + + arrays = [asanyarray(a) for a in arrays] + + # save original ndim to reshape the result array into the proper form later + ndim_first, ndim_last = arrays[0].ndim, arrays[-1].ndim + # Explicitly convert vectors to 2D arrays to keep the logic of the internal + # _multi_dot_* functions as simple as possible. + if arrays[0].ndim == 1: + arrays[0] = atleast_2d(arrays[0]) + if arrays[-1].ndim == 1: + arrays[-1] = atleast_2d(arrays[-1]).T + _assert_2d(*arrays) + + # _multi_dot_three is much faster than _multi_dot_matrix_chain_order + if n == 3: + result = _multi_dot_three(arrays[0], arrays[1], arrays[2], out=out) + else: + order = _multi_dot_matrix_chain_order(arrays) + result = _multi_dot(arrays, order, 0, n - 1, out=out) + + # return proper shape + if ndim_first == 1 and ndim_last == 1: + return result[0, 0] # scalar + elif ndim_first == 1 or ndim_last == 1: + return result.ravel() # 1-D + else: + return result + + +def _multi_dot_three(A, B, C, out=None): + """ + Find the best order for three arrays and do the multiplication. + + For three arguments `_multi_dot_three` is approximately 15 times faster + than `_multi_dot_matrix_chain_order` + + """ + a0, a1b0 = A.shape + b1c0, c1 = C.shape + # cost1 = cost((AB)C) = a0*a1b0*b1c0 + a0*b1c0*c1 + cost1 = a0 * b1c0 * (a1b0 + c1) + # cost2 = cost(A(BC)) = a1b0*b1c0*c1 + a0*a1b0*c1 + cost2 = a1b0 * c1 * (a0 + b1c0) + + if cost1 < cost2: + return dot(dot(A, B), C, out=out) + else: + return dot(A, dot(B, C), out=out) + + +def _multi_dot_matrix_chain_order(arrays, return_costs=False): + """ + Return a np.array that encodes the optimal order of multiplications. + + The optimal order array is then used by `_multi_dot()` to do the + multiplication. + + Also return the cost matrix if `return_costs` is `True` + + The implementation CLOSELY follows Cormen, "Introduction to Algorithms", + Chapter 15.2, p. 370-378. Note that Cormen uses 1-based indices. + + cost[i, j] = min([ + cost[prefix] + cost[suffix] + cost_mult(prefix, suffix) + for k in range(i, j)]) + + """ + n = len(arrays) + # p stores the dimensions of the matrices + # Example for p: A_{10x100}, B_{100x5}, C_{5x50} --> p = [10, 100, 5, 50] + p = [a.shape[0] for a in arrays] + [arrays[-1].shape[1]] + # m is a matrix of costs of the subproblems + # m[i,j]: min number of scalar multiplications needed to compute A_{i..j} + m = zeros((n, n), dtype=double) + # s is the actual ordering + # s[i, j] is the value of k at which we split the product A_i..A_j + s = empty((n, n), dtype=intp) + + for l in range(1, n): + for i in range(n - l): + j = i + l + m[i, j] = inf + for k in range(i, j): + q = m[i, k] + m[k + 1, j] + p[i] * p[k + 1] * p[j + 1] + if q < m[i, j]: + m[i, j] = q + s[i, j] = k # Note that Cormen uses 1-based index + + return (s, m) if return_costs else s + + +def _multi_dot(arrays, order, i, j, out=None): + """Actually do the multiplication with the given order.""" + if i == j: + # the initial call with non-None out should never get here + assert out is None + + return arrays[i] + else: + return dot(_multi_dot(arrays, order, i, order[i, j]), + _multi_dot(arrays, order, order[i, j] + 1, j), + out=out) + + +# diagonal + +def _diagonal_dispatcher(x, /, *, offset=None): + return (x,) + + +@array_function_dispatch(_diagonal_dispatcher) +def diagonal(x, /, *, offset=0): + """ + Returns specified diagonals of a matrix (or a stack of matrices) ``x``. + + This function is Array API compatible, contrary to + :py:func:`numpy.diagonal`, the matrix is assumed + to be defined by the last two dimensions. + + Parameters + ---------- + x : (...,M,N) array_like + Input array having shape (..., M, N) and whose innermost two + dimensions form MxN matrices. + offset : int, optional + Offset specifying the off-diagonal relative to the main diagonal, + where:: + + * offset = 0: the main diagonal. + * offset > 0: off-diagonal above the main diagonal. + * offset < 0: off-diagonal below the main diagonal. + + Returns + ------- + out : (...,min(N,M)) ndarray + An array containing the diagonals and whose shape is determined by + removing the last two dimensions and appending a dimension equal to + the size of the resulting diagonals. The returned array must have + the same data type as ``x``. + + See Also + -------- + numpy.diagonal + + Examples + -------- + >>> a = np.arange(4).reshape(2, 2); a + array([[0, 1], + [2, 3]]) + >>> np.linalg.diagonal(a) + array([0, 3]) + + A 3-D example: + + >>> a = np.arange(8).reshape(2, 2, 2); a + array([[[0, 1], + [2, 3]], + [[4, 5], + [6, 7]]]) + >>> np.linalg.diagonal(a) + array([[0, 3], + [4, 7]]) + + Diagonals adjacent to the main diagonal can be obtained by using the + `offset` argument: + + >>> a = np.arange(9).reshape(3, 3) + >>> a + array([[0, 1, 2], + [3, 4, 5], + [6, 7, 8]]) + >>> np.linalg.diagonal(a, offset=1) # First superdiagonal + array([1, 5]) + >>> np.linalg.diagonal(a, offset=2) # Second superdiagonal + array([2]) + >>> np.linalg.diagonal(a, offset=-1) # First subdiagonal + array([3, 7]) + >>> np.linalg.diagonal(a, offset=-2) # Second subdiagonal + array([6]) + + The anti-diagonal can be obtained by reversing the order of elements + using either `numpy.flipud` or `numpy.fliplr`. + + >>> a = np.arange(9).reshape(3, 3) + >>> a + array([[0, 1, 2], + [3, 4, 5], + [6, 7, 8]]) + >>> np.linalg.diagonal(np.fliplr(a)) # Horizontal flip + array([2, 4, 6]) + >>> np.linalg.diagonal(np.flipud(a)) # Vertical flip + array([6, 4, 2]) + + Note that the order in which the diagonal is retrieved varies depending + on the flip function. + + """ + return _core_diagonal(x, offset, axis1=-2, axis2=-1) + + +# trace + +def _trace_dispatcher(x, /, *, offset=None, dtype=None): + return (x,) + + +@array_function_dispatch(_trace_dispatcher) +def trace(x, /, *, offset=0, dtype=None): + """ + Returns the sum along the specified diagonals of a matrix + (or a stack of matrices) ``x``. + + This function is Array API compatible, contrary to + :py:func:`numpy.trace`. + + Parameters + ---------- + x : (...,M,N) array_like + Input array having shape (..., M, N) and whose innermost two + dimensions form MxN matrices. + offset : int, optional + Offset specifying the off-diagonal relative to the main diagonal, + where:: + + * offset = 0: the main diagonal. + * offset > 0: off-diagonal above the main diagonal. + * offset < 0: off-diagonal below the main diagonal. + + dtype : dtype, optional + Data type of the returned array. + + Returns + ------- + out : ndarray + An array containing the traces and whose shape is determined by + removing the last two dimensions and storing the traces in the last + array dimension. For example, if x has rank k and shape: + (I, J, K, ..., L, M, N), then an output array has rank k-2 and shape: + (I, J, K, ..., L) where:: + + out[i, j, k, ..., l] = trace(a[i, j, k, ..., l, :, :]) + + The returned array must have a data type as described by the dtype + parameter above. + + See Also + -------- + numpy.trace + + Examples + -------- + >>> np.linalg.trace(np.eye(3)) + 3.0 + >>> a = np.arange(8).reshape((2, 2, 2)) + >>> np.linalg.trace(a) + array([3, 11]) + + Trace is computed with the last two axes as the 2-d sub-arrays. + This behavior differs from :py:func:`numpy.trace` which uses the first two + axes by default. + + >>> a = np.arange(24).reshape((3, 2, 2, 2)) + >>> np.linalg.trace(a).shape + (3, 2) + + Traces adjacent to the main diagonal can be obtained by using the + `offset` argument: + + >>> a = np.arange(9).reshape((3, 3)); a + array([[0, 1, 2], + [3, 4, 5], + [6, 7, 8]]) + >>> np.linalg.trace(a, offset=1) # First superdiagonal + 6 + >>> np.linalg.trace(a, offset=2) # Second superdiagonal + 2 + >>> np.linalg.trace(a, offset=-1) # First subdiagonal + 10 + >>> np.linalg.trace(a, offset=-2) # Second subdiagonal + 6 + + """ + return _core_trace(x, offset, axis1=-2, axis2=-1, dtype=dtype) + + +# cross + +def _cross_dispatcher(x1, x2, /, *, axis=None): + return (x1, x2,) + + +@array_function_dispatch(_cross_dispatcher) +def cross(x1, x2, /, *, axis=-1): + """ + Returns the cross product of 3-element vectors. + + If ``x1`` and/or ``x2`` are multi-dimensional arrays, then + the cross-product of each pair of corresponding 3-element vectors + is independently computed. + + This function is Array API compatible, contrary to + :func:`numpy.cross`. + + Parameters + ---------- + x1 : array_like + The first input array. + x2 : array_like + The second input array. Must be compatible with ``x1`` for all + non-compute axes. The size of the axis over which to compute + the cross-product must be the same size as the respective axis + in ``x1``. + axis : int, optional + The axis (dimension) of ``x1`` and ``x2`` containing the vectors for + which to compute the cross-product. Default: ``-1``. + + Returns + ------- + out : ndarray + An array containing the cross products. + + See Also + -------- + numpy.cross + + Examples + -------- + Vector cross-product. + + >>> x = np.array([1, 2, 3]) + >>> y = np.array([4, 5, 6]) + >>> np.linalg.cross(x, y) + array([-3, 6, -3]) + + Multiple vector cross-products. Note that the direction of the cross + product vector is defined by the *right-hand rule*. + + >>> x = np.array([[1,2,3], [4,5,6]]) + >>> y = np.array([[4,5,6], [1,2,3]]) + >>> np.linalg.cross(x, y) + array([[-3, 6, -3], + [ 3, -6, 3]]) + + >>> x = np.array([[1, 2], [3, 4], [5, 6]]) + >>> y = np.array([[4, 5], [6, 1], [2, 3]]) + >>> np.linalg.cross(x, y, axis=0) + array([[-24, 6], + [ 18, 24], + [-6, -18]]) + + """ + x1 = asanyarray(x1) + x2 = asanyarray(x2) + + if x1.shape[axis] != 3 or x2.shape[axis] != 3: + raise ValueError( + "Both input arrays must be (arrays of) 3-dimensional vectors, " + f"but they are {x1.shape[axis]} and {x2.shape[axis]} " + "dimensional instead." + ) + + return _core_cross(x1, x2, axis=axis) + + +# matmul + +def _matmul_dispatcher(x1, x2, /): + return (x1, x2) + + +@array_function_dispatch(_matmul_dispatcher) +def matmul(x1, x2, /): + """ + Computes the matrix product. + + This function is Array API compatible, contrary to + :func:`numpy.matmul`. + + Parameters + ---------- + x1 : array_like + The first input array. + x2 : array_like + The second input array. + + Returns + ------- + out : ndarray + The matrix product of the inputs. + This is a scalar only when both ``x1``, ``x2`` are 1-d vectors. + + Raises + ------ + ValueError + If the last dimension of ``x1`` is not the same size as + the second-to-last dimension of ``x2``. + + If a scalar value is passed in. + + See Also + -------- + numpy.matmul + + Examples + -------- + For 2-D arrays it is the matrix product: + + >>> a = np.array([[1, 0], + ... [0, 1]]) + >>> b = np.array([[4, 1], + ... [2, 2]]) + >>> np.linalg.matmul(a, b) + array([[4, 1], + [2, 2]]) + + For 2-D mixed with 1-D, the result is the usual. + + >>> a = np.array([[1, 0], + ... [0, 1]]) + >>> b = np.array([1, 2]) + >>> np.linalg.matmul(a, b) + array([1, 2]) + >>> np.linalg.matmul(b, a) + array([1, 2]) + + + Broadcasting is conventional for stacks of arrays + + >>> a = np.arange(2 * 2 * 4).reshape((2, 2, 4)) + >>> b = np.arange(2 * 2 * 4).reshape((2, 4, 2)) + >>> np.linalg.matmul(a,b).shape + (2, 2, 2) + >>> np.linalg.matmul(a, b)[0, 1, 1] + 98 + >>> sum(a[0, 1, :] * b[0 , :, 1]) + 98 + + Vector, vector returns the scalar inner product, but neither argument + is complex-conjugated: + + >>> np.linalg.matmul([2j, 3j], [2j, 3j]) + (-13+0j) + + Scalar multiplication raises an error. + + >>> np.linalg.matmul([1,2], 3) + Traceback (most recent call last): + ... + ValueError: matmul: Input operand 1 does not have enough dimensions ... + + """ + return _core_matmul(x1, x2) + + +# tensordot + +def _tensordot_dispatcher(x1, x2, /, *, axes=None): + return (x1, x2) + + +@array_function_dispatch(_tensordot_dispatcher) +def tensordot(x1, x2, /, *, axes=2): + return _core_tensordot(x1, x2, axes=axes) + + +tensordot.__doc__ = _core_tensordot.__doc__ + + +# matrix_transpose + +def _matrix_transpose_dispatcher(x): + return (x,) + +@array_function_dispatch(_matrix_transpose_dispatcher) +def matrix_transpose(x, /): + return _core_matrix_transpose(x) + + +matrix_transpose.__doc__ = f"""{_core_matrix_transpose.__doc__} + + Notes + ----- + This function is an alias of `numpy.matrix_transpose`. +""" + + +# matrix_norm + +def _matrix_norm_dispatcher(x, /, *, keepdims=None, ord=None): + return (x,) + +@array_function_dispatch(_matrix_norm_dispatcher) +def matrix_norm(x, /, *, keepdims=False, ord="fro"): + """ + Computes the matrix norm of a matrix (or a stack of matrices) ``x``. + + This function is Array API compatible. + + Parameters + ---------- + x : array_like + Input array having shape (..., M, N) and whose two innermost + dimensions form ``MxN`` matrices. + keepdims : bool, optional + If this is set to True, the axes which are normed over are left in + the result as dimensions with size one. Default: False. + ord : {1, -1, 2, -2, inf, -inf, 'fro', 'nuc'}, optional + The order of the norm. For details see the table under ``Notes`` + in `numpy.linalg.norm`. + + See Also + -------- + numpy.linalg.norm : Generic norm function + + Examples + -------- + >>> from numpy import linalg as LA + >>> a = np.arange(9) - 4 + >>> a + array([-4, -3, -2, ..., 2, 3, 4]) + >>> b = a.reshape((3, 3)) + >>> b + array([[-4, -3, -2], + [-1, 0, 1], + [ 2, 3, 4]]) + + >>> LA.matrix_norm(b) + 7.745966692414834 + >>> LA.matrix_norm(b, ord='fro') + 7.745966692414834 + >>> LA.matrix_norm(b, ord=np.inf) + 9.0 + >>> LA.matrix_norm(b, ord=-np.inf) + 2.0 + + >>> LA.matrix_norm(b, ord=1) + 7.0 + >>> LA.matrix_norm(b, ord=-1) + 6.0 + >>> LA.matrix_norm(b, ord=2) + 7.3484692283495345 + >>> LA.matrix_norm(b, ord=-2) + 1.8570331885190563e-016 # may vary + + """ + x = asanyarray(x) + return norm(x, axis=(-2, -1), keepdims=keepdims, ord=ord) + + +# vector_norm + +def _vector_norm_dispatcher(x, /, *, axis=None, keepdims=None, ord=None): + return (x,) + +@array_function_dispatch(_vector_norm_dispatcher) +def vector_norm(x, /, *, axis=None, keepdims=False, ord=2): + """ + Computes the vector norm of a vector (or batch of vectors) ``x``. + + This function is Array API compatible. + + Parameters + ---------- + x : array_like + Input array. + axis : {None, int, 2-tuple of ints}, optional + If an integer, ``axis`` specifies the axis (dimension) along which + to compute vector norms. If an n-tuple, ``axis`` specifies the axes + (dimensions) along which to compute batched vector norms. If ``None``, + the vector norm must be computed over all array values (i.e., + equivalent to computing the vector norm of a flattened array). + Default: ``None``. + keepdims : bool, optional + If this is set to True, the axes which are normed over are left in + the result as dimensions with size one. Default: False. + ord : {int, float, inf, -inf}, optional + The order of the norm. For details see the table under ``Notes`` + in `numpy.linalg.norm`. + + See Also + -------- + numpy.linalg.norm : Generic norm function + + Examples + -------- + >>> from numpy import linalg as LA + >>> a = np.arange(9) + 1 + >>> a + array([1, 2, 3, 4, 5, 6, 7, 8, 9]) + >>> b = a.reshape((3, 3)) + >>> b + array([[1, 2, 3], + [4, 5, 6], + [7, 8, 9]]) + + >>> LA.vector_norm(b) + 16.881943016134134 + >>> LA.vector_norm(b, ord=np.inf) + 9.0 + >>> LA.vector_norm(b, ord=-np.inf) + 1.0 + + >>> LA.vector_norm(b, ord=0) + 9.0 + >>> LA.vector_norm(b, ord=1) + 45.0 + >>> LA.vector_norm(b, ord=-1) + 0.3534857623790153 + >>> LA.vector_norm(b, ord=2) + 16.881943016134134 + >>> LA.vector_norm(b, ord=-2) + 0.8058837395885292 + + """ + x = asanyarray(x) + shape = list(x.shape) + if axis is None: + # Note: np.linalg.norm() doesn't handle 0-D arrays + x = x.ravel() + _axis = 0 + elif isinstance(axis, tuple): + # Note: The axis argument supports any number of axes, whereas + # np.linalg.norm() only supports a single axis for vector norm. + normalized_axis = normalize_axis_tuple(axis, x.ndim) + rest = tuple(i for i in range(x.ndim) if i not in normalized_axis) + newshape = axis + rest + x = _core_transpose(x, newshape).reshape( + ( + prod([x.shape[i] for i in axis], dtype=int), + *[x.shape[i] for i in rest] + ) + ) + _axis = 0 + else: + _axis = axis + + res = norm(x, axis=_axis, ord=ord) + + if keepdims: + # We can't reuse np.linalg.norm(keepdims) because of the reshape hacks + # above to avoid matrix norm logic. + _axis = normalize_axis_tuple( + range(len(shape)) if axis is None else axis, len(shape) + ) + for i in _axis: + shape[i] = 1 + res = res.reshape(tuple(shape)) + + return res + + +# vecdot + +def _vecdot_dispatcher(x1, x2, /, *, axis=None): + return (x1, x2) + +@array_function_dispatch(_vecdot_dispatcher) +def vecdot(x1, x2, /, *, axis=-1): + """ + Computes the vector dot product. + + This function is restricted to arguments compatible with the Array API, + contrary to :func:`numpy.vecdot`. + + Let :math:`\\mathbf{a}` be a vector in ``x1`` and :math:`\\mathbf{b}` be + a corresponding vector in ``x2``. The dot product is defined as: + + .. math:: + \\mathbf{a} \\cdot \\mathbf{b} = \\sum_{i=0}^{n-1} \\overline{a_i}b_i + + over the dimension specified by ``axis`` and where :math:`\\overline{a_i}` + denotes the complex conjugate if :math:`a_i` is complex and the identity + otherwise. + + Parameters + ---------- + x1 : array_like + First input array. + x2 : array_like + Second input array. + axis : int, optional + Axis over which to compute the dot product. Default: ``-1``. + + Returns + ------- + output : ndarray + The vector dot product of the input. + + See Also + -------- + numpy.vecdot + + Examples + -------- + Get the projected size along a given normal for an array of vectors. + + >>> v = np.array([[0., 5., 0.], [0., 0., 10.], [0., 6., 8.]]) + >>> n = np.array([0., 0.6, 0.8]) + >>> np.linalg.vecdot(v, n) + array([ 3., 8., 10.]) + + """ + return _core_vecdot(x1, x2, axis=axis) diff --git a/.venv/lib/python3.12/site-packages/numpy/linalg/_linalg.pyi b/.venv/lib/python3.12/site-packages/numpy/linalg/_linalg.pyi new file mode 100644 index 0000000..3f318a8 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/linalg/_linalg.pyi @@ -0,0 +1,482 @@ +from collections.abc import Iterable +from typing import ( + Any, + NamedTuple, + Never, + SupportsIndex, + SupportsInt, + TypeAlias, + TypeVar, + overload, +) +from typing import Literal as L + +import numpy as np +from numpy import ( + complex128, + complexfloating, + float64, + # other + floating, + int32, + object_, + signedinteger, + timedelta64, + unsignedinteger, + # re-exports + vecdot, +) +from numpy._core.fromnumeric import matrix_transpose +from numpy._core.numeric import tensordot +from numpy._typing import ( + ArrayLike, + DTypeLike, + NDArray, + _ArrayLike, + _ArrayLikeBool_co, + _ArrayLikeComplex_co, + _ArrayLikeFloat_co, + _ArrayLikeInt_co, + _ArrayLikeObject_co, + _ArrayLikeTD64_co, + _ArrayLikeUInt_co, +) +from numpy.linalg import LinAlgError + +__all__ = [ + "matrix_power", + "solve", + "tensorsolve", + "tensorinv", + "inv", + "cholesky", + "eigvals", + "eigvalsh", + "pinv", + "slogdet", + "det", + "svd", + "svdvals", + "eig", + "eigh", + "lstsq", + "norm", + "qr", + "cond", + "matrix_rank", + "LinAlgError", + "multi_dot", + "trace", + "diagonal", + "cross", + "outer", + "tensordot", + "matmul", + "matrix_transpose", + "matrix_norm", + "vector_norm", + "vecdot", +] + +_ArrayT = TypeVar("_ArrayT", bound=NDArray[Any]) + +_ModeKind: TypeAlias = L["reduced", "complete", "r", "raw"] + +### + +fortran_int = np.intc + +class EigResult(NamedTuple): + eigenvalues: NDArray[Any] + eigenvectors: NDArray[Any] + +class EighResult(NamedTuple): + eigenvalues: NDArray[Any] + eigenvectors: NDArray[Any] + +class QRResult(NamedTuple): + Q: NDArray[Any] + R: NDArray[Any] + +class SlogdetResult(NamedTuple): + # TODO: `sign` and `logabsdet` are scalars for input 2D arrays and + # a `(x.ndim - 2)`` dimensionl arrays otherwise + sign: Any + logabsdet: Any + +class SVDResult(NamedTuple): + U: NDArray[Any] + S: NDArray[Any] + Vh: NDArray[Any] + +@overload +def tensorsolve( + a: _ArrayLikeInt_co, + b: _ArrayLikeInt_co, + axes: Iterable[int] | None = ..., +) -> NDArray[float64]: ... +@overload +def tensorsolve( + a: _ArrayLikeFloat_co, + b: _ArrayLikeFloat_co, + axes: Iterable[int] | None = ..., +) -> NDArray[floating]: ... +@overload +def tensorsolve( + a: _ArrayLikeComplex_co, + b: _ArrayLikeComplex_co, + axes: Iterable[int] | None = ..., +) -> NDArray[complexfloating]: ... + +@overload +def solve( + a: _ArrayLikeInt_co, + b: _ArrayLikeInt_co, +) -> NDArray[float64]: ... +@overload +def solve( + a: _ArrayLikeFloat_co, + b: _ArrayLikeFloat_co, +) -> NDArray[floating]: ... +@overload +def solve( + a: _ArrayLikeComplex_co, + b: _ArrayLikeComplex_co, +) -> NDArray[complexfloating]: ... + +@overload +def tensorinv( + a: _ArrayLikeInt_co, + ind: int = ..., +) -> NDArray[float64]: ... +@overload +def tensorinv( + a: _ArrayLikeFloat_co, + ind: int = ..., +) -> NDArray[floating]: ... +@overload +def tensorinv( + a: _ArrayLikeComplex_co, + ind: int = ..., +) -> NDArray[complexfloating]: ... + +@overload +def inv(a: _ArrayLikeInt_co) -> NDArray[float64]: ... +@overload +def inv(a: _ArrayLikeFloat_co) -> NDArray[floating]: ... +@overload +def inv(a: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ... + +# TODO: The supported input and output dtypes are dependent on the value of `n`. +# For example: `n < 0` always casts integer types to float64 +def matrix_power( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + n: SupportsIndex, +) -> NDArray[Any]: ... + +@overload +def cholesky(a: _ArrayLikeInt_co, /, *, upper: bool = False) -> NDArray[float64]: ... +@overload +def cholesky(a: _ArrayLikeFloat_co, /, *, upper: bool = False) -> NDArray[floating]: ... +@overload +def cholesky(a: _ArrayLikeComplex_co, /, *, upper: bool = False) -> NDArray[complexfloating]: ... + +@overload +def outer(x1: _ArrayLike[Never], x2: _ArrayLike[Never]) -> NDArray[Any]: ... +@overload +def outer(x1: _ArrayLikeBool_co, x2: _ArrayLikeBool_co) -> NDArray[np.bool]: ... +@overload +def outer(x1: _ArrayLikeUInt_co, x2: _ArrayLikeUInt_co) -> NDArray[unsignedinteger]: ... +@overload +def outer(x1: _ArrayLikeInt_co, x2: _ArrayLikeInt_co) -> NDArray[signedinteger]: ... +@overload +def outer(x1: _ArrayLikeFloat_co, x2: _ArrayLikeFloat_co) -> NDArray[floating]: ... +@overload +def outer( + x1: _ArrayLikeComplex_co, + x2: _ArrayLikeComplex_co, +) -> NDArray[complexfloating]: ... +@overload +def outer( + x1: _ArrayLikeTD64_co, + x2: _ArrayLikeTD64_co, + out: None = ..., +) -> NDArray[timedelta64]: ... +@overload +def outer(x1: _ArrayLikeObject_co, x2: _ArrayLikeObject_co) -> NDArray[object_]: ... +@overload +def outer( + x1: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, + x2: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, +) -> _ArrayT: ... + +@overload +def qr(a: _ArrayLikeInt_co, mode: _ModeKind = ...) -> QRResult: ... +@overload +def qr(a: _ArrayLikeFloat_co, mode: _ModeKind = ...) -> QRResult: ... +@overload +def qr(a: _ArrayLikeComplex_co, mode: _ModeKind = ...) -> QRResult: ... + +@overload +def eigvals(a: _ArrayLikeInt_co) -> NDArray[float64] | NDArray[complex128]: ... +@overload +def eigvals(a: _ArrayLikeFloat_co) -> NDArray[floating] | NDArray[complexfloating]: ... +@overload +def eigvals(a: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ... + +@overload +def eigvalsh(a: _ArrayLikeInt_co, UPLO: L["L", "U", "l", "u"] = ...) -> NDArray[float64]: ... +@overload +def eigvalsh(a: _ArrayLikeComplex_co, UPLO: L["L", "U", "l", "u"] = ...) -> NDArray[floating]: ... + +@overload +def eig(a: _ArrayLikeInt_co) -> EigResult: ... +@overload +def eig(a: _ArrayLikeFloat_co) -> EigResult: ... +@overload +def eig(a: _ArrayLikeComplex_co) -> EigResult: ... + +@overload +def eigh( + a: _ArrayLikeInt_co, + UPLO: L["L", "U", "l", "u"] = ..., +) -> EighResult: ... +@overload +def eigh( + a: _ArrayLikeFloat_co, + UPLO: L["L", "U", "l", "u"] = ..., +) -> EighResult: ... +@overload +def eigh( + a: _ArrayLikeComplex_co, + UPLO: L["L", "U", "l", "u"] = ..., +) -> EighResult: ... + +@overload +def svd( + a: _ArrayLikeInt_co, + full_matrices: bool = ..., + compute_uv: L[True] = ..., + hermitian: bool = ..., +) -> SVDResult: ... +@overload +def svd( + a: _ArrayLikeFloat_co, + full_matrices: bool = ..., + compute_uv: L[True] = ..., + hermitian: bool = ..., +) -> SVDResult: ... +@overload +def svd( + a: _ArrayLikeComplex_co, + full_matrices: bool = ..., + compute_uv: L[True] = ..., + hermitian: bool = ..., +) -> SVDResult: ... +@overload +def svd( + a: _ArrayLikeInt_co, + full_matrices: bool = ..., + compute_uv: L[False] = ..., + hermitian: bool = ..., +) -> NDArray[float64]: ... +@overload +def svd( + a: _ArrayLikeComplex_co, + full_matrices: bool = ..., + compute_uv: L[False] = ..., + hermitian: bool = ..., +) -> NDArray[floating]: ... + +def svdvals( + x: _ArrayLikeInt_co | _ArrayLikeFloat_co | _ArrayLikeComplex_co +) -> NDArray[floating]: ... + +# TODO: Returns a scalar for 2D arrays and +# a `(x.ndim - 2)`` dimensionl array otherwise +def cond(x: _ArrayLikeComplex_co, p: float | L["fro", "nuc"] | None = ...) -> Any: ... + +# TODO: Returns `int` for <2D arrays and `intp` otherwise +def matrix_rank( + A: _ArrayLikeComplex_co, + tol: _ArrayLikeFloat_co | None = ..., + hermitian: bool = ..., + *, + rtol: _ArrayLikeFloat_co | None = ..., +) -> Any: ... + +@overload +def pinv( + a: _ArrayLikeInt_co, + rcond: _ArrayLikeFloat_co = ..., + hermitian: bool = ..., +) -> NDArray[float64]: ... +@overload +def pinv( + a: _ArrayLikeFloat_co, + rcond: _ArrayLikeFloat_co = ..., + hermitian: bool = ..., +) -> NDArray[floating]: ... +@overload +def pinv( + a: _ArrayLikeComplex_co, + rcond: _ArrayLikeFloat_co = ..., + hermitian: bool = ..., +) -> NDArray[complexfloating]: ... + +# TODO: Returns a 2-tuple of scalars for 2D arrays and +# a 2-tuple of `(a.ndim - 2)`` dimensionl arrays otherwise +def slogdet(a: _ArrayLikeComplex_co) -> SlogdetResult: ... + +# TODO: Returns a 2-tuple of scalars for 2D arrays and +# a 2-tuple of `(a.ndim - 2)`` dimensionl arrays otherwise +def det(a: _ArrayLikeComplex_co) -> Any: ... + +@overload +def lstsq(a: _ArrayLikeInt_co, b: _ArrayLikeInt_co, rcond: float | None = ...) -> tuple[ + NDArray[float64], + NDArray[float64], + int32, + NDArray[float64], +]: ... +@overload +def lstsq(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, rcond: float | None = ...) -> tuple[ + NDArray[floating], + NDArray[floating], + int32, + NDArray[floating], +]: ... +@overload +def lstsq(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, rcond: float | None = ...) -> tuple[ + NDArray[complexfloating], + NDArray[floating], + int32, + NDArray[floating], +]: ... + +@overload +def norm( + x: ArrayLike, + ord: float | L["fro", "nuc"] | None = ..., + axis: None = ..., + keepdims: bool = ..., +) -> floating: ... +@overload +def norm( + x: ArrayLike, + ord: float | L["fro", "nuc"] | None = ..., + axis: SupportsInt | SupportsIndex | tuple[int, ...] = ..., + keepdims: bool = ..., +) -> Any: ... + +@overload +def matrix_norm( + x: ArrayLike, + /, + *, + ord: float | L["fro", "nuc"] | None = ..., + keepdims: bool = ..., +) -> floating: ... +@overload +def matrix_norm( + x: ArrayLike, + /, + *, + ord: float | L["fro", "nuc"] | None = ..., + keepdims: bool = ..., +) -> Any: ... + +@overload +def vector_norm( + x: ArrayLike, + /, + *, + axis: None = ..., + ord: float | None = ..., + keepdims: bool = ..., +) -> floating: ... +@overload +def vector_norm( + x: ArrayLike, + /, + *, + axis: SupportsInt | SupportsIndex | tuple[int, ...] = ..., + ord: float | None = ..., + keepdims: bool = ..., +) -> Any: ... + +# TODO: Returns a scalar or array +def multi_dot( + arrays: Iterable[_ArrayLikeComplex_co | _ArrayLikeObject_co | _ArrayLikeTD64_co], + *, + out: NDArray[Any] | None = ..., +) -> Any: ... + +def diagonal( + x: ArrayLike, # >= 2D array + /, + *, + offset: SupportsIndex = ..., +) -> NDArray[Any]: ... + +def trace( + x: ArrayLike, # >= 2D array + /, + *, + offset: SupportsIndex = ..., + dtype: DTypeLike = ..., +) -> Any: ... + +@overload +def cross( + x1: _ArrayLikeUInt_co, + x2: _ArrayLikeUInt_co, + /, + *, + axis: int = ..., +) -> NDArray[unsignedinteger]: ... +@overload +def cross( + x1: _ArrayLikeInt_co, + x2: _ArrayLikeInt_co, + /, + *, + axis: int = ..., +) -> NDArray[signedinteger]: ... +@overload +def cross( + x1: _ArrayLikeFloat_co, + x2: _ArrayLikeFloat_co, + /, + *, + axis: int = ..., +) -> NDArray[floating]: ... +@overload +def cross( + x1: _ArrayLikeComplex_co, + x2: _ArrayLikeComplex_co, + /, + *, + axis: int = ..., +) -> NDArray[complexfloating]: ... + +@overload +def matmul( + x1: _ArrayLikeInt_co, + x2: _ArrayLikeInt_co, +) -> NDArray[signedinteger]: ... +@overload +def matmul( + x1: _ArrayLikeUInt_co, + x2: _ArrayLikeUInt_co, +) -> NDArray[unsignedinteger]: ... +@overload +def matmul( + x1: _ArrayLikeFloat_co, + x2: _ArrayLikeFloat_co, +) -> NDArray[floating]: ... +@overload +def matmul( + x1: _ArrayLikeComplex_co, + x2: _ArrayLikeComplex_co, +) -> NDArray[complexfloating]: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/linalg/_umath_linalg.cpython-312-x86_64-linux-gnu.so b/.venv/lib/python3.12/site-packages/numpy/linalg/_umath_linalg.cpython-312-x86_64-linux-gnu.so new file mode 100755 index 0000000..6fbf940 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/linalg/_umath_linalg.cpython-312-x86_64-linux-gnu.so differ diff --git a/.venv/lib/python3.12/site-packages/numpy/linalg/_umath_linalg.pyi b/.venv/lib/python3.12/site-packages/numpy/linalg/_umath_linalg.pyi new file mode 100644 index 0000000..cd07acd --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/linalg/_umath_linalg.pyi @@ -0,0 +1,61 @@ +from typing import Final +from typing import Literal as L + +import numpy as np +from numpy._typing._ufunc import _GUFunc_Nin2_Nout1 + +__version__: Final[str] = ... +_ilp64: Final[bool] = ... + +### +# 1 -> 1 + +# (m,m) -> () +det: Final[np.ufunc] = ... +# (m,m) -> (m) +cholesky_lo: Final[np.ufunc] = ... +cholesky_up: Final[np.ufunc] = ... +eigvals: Final[np.ufunc] = ... +eigvalsh_lo: Final[np.ufunc] = ... +eigvalsh_up: Final[np.ufunc] = ... +# (m,m) -> (m,m) +inv: Final[np.ufunc] = ... +# (m,n) -> (p) +qr_r_raw: Final[np.ufunc] = ... +svd: Final[np.ufunc] = ... + +### +# 1 -> 2 + +# (m,m) -> (), () +slogdet: Final[np.ufunc] = ... +# (m,m) -> (m), (m,m) +eig: Final[np.ufunc] = ... +eigh_lo: Final[np.ufunc] = ... +eigh_up: Final[np.ufunc] = ... + +### +# 2 -> 1 + +# (m,n), (n) -> (m,m) +qr_complete: Final[_GUFunc_Nin2_Nout1[L["qr_complete"], L[2], None, L["(m,n),(n)->(m,m)"]]] = ... +# (m,n), (k) -> (m,k) +qr_reduced: Final[_GUFunc_Nin2_Nout1[L["qr_reduced"], L[2], None, L["(m,n),(k)->(m,k)"]]] = ... +# (m,m), (m,n) -> (m,n) +solve: Final[_GUFunc_Nin2_Nout1[L["solve"], L[4], None, L["(m,m),(m,n)->(m,n)"]]] = ... +# (m,m), (m) -> (m) +solve1: Final[_GUFunc_Nin2_Nout1[L["solve1"], L[4], None, L["(m,m),(m)->(m)"]]] = ... + +### +# 1 -> 3 + +# (m,n) -> (m,m), (p), (n,n) +svd_f: Final[np.ufunc] = ... +# (m,n) -> (m,p), (p), (p,n) +svd_s: Final[np.ufunc] = ... + +### +# 3 -> 4 + +# (m,n), (m,k), () -> (n,k), (k), (), (p) +lstsq: Final[np.ufunc] = ... diff --git a/.venv/lib/python3.12/site-packages/numpy/linalg/lapack_lite.cpython-312-x86_64-linux-gnu.so b/.venv/lib/python3.12/site-packages/numpy/linalg/lapack_lite.cpython-312-x86_64-linux-gnu.so new file mode 100755 index 0000000..aa39da7 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/linalg/lapack_lite.cpython-312-x86_64-linux-gnu.so differ diff --git a/.venv/lib/python3.12/site-packages/numpy/linalg/lapack_lite.pyi b/.venv/lib/python3.12/site-packages/numpy/linalg/lapack_lite.pyi new file mode 100644 index 0000000..835293a --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/linalg/lapack_lite.pyi @@ -0,0 +1,141 @@ +from typing import Final, TypedDict, type_check_only + +import numpy as np +from numpy._typing import NDArray + +from ._linalg import fortran_int + +### + +@type_check_only +class _GELSD(TypedDict): + m: int + n: int + nrhs: int + lda: int + ldb: int + rank: int + lwork: int + info: int + +@type_check_only +class _DGELSD(_GELSD): + dgelsd_: int + rcond: float + +@type_check_only +class _ZGELSD(_GELSD): + zgelsd_: int + +@type_check_only +class _GEQRF(TypedDict): + m: int + n: int + lda: int + lwork: int + info: int + +@type_check_only +class _DGEQRF(_GEQRF): + dgeqrf_: int + +@type_check_only +class _ZGEQRF(_GEQRF): + zgeqrf_: int + +@type_check_only +class _DORGQR(TypedDict): + dorgqr_: int + info: int + +@type_check_only +class _ZUNGQR(TypedDict): + zungqr_: int + info: int + +### + +_ilp64: Final[bool] = ... + +def dgelsd( + m: int, + n: int, + nrhs: int, + a: NDArray[np.float64], + lda: int, + b: NDArray[np.float64], + ldb: int, + s: NDArray[np.float64], + rcond: float, + rank: int, + work: NDArray[np.float64], + lwork: int, + iwork: NDArray[fortran_int], + info: int, +) -> _DGELSD: ... +def zgelsd( + m: int, + n: int, + nrhs: int, + a: NDArray[np.complex128], + lda: int, + b: NDArray[np.complex128], + ldb: int, + s: NDArray[np.float64], + rcond: float, + rank: int, + work: NDArray[np.complex128], + lwork: int, + rwork: NDArray[np.float64], + iwork: NDArray[fortran_int], + info: int, +) -> _ZGELSD: ... + +# +def dgeqrf( + m: int, + n: int, + a: NDArray[np.float64], # in/out, shape: (lda, n) + lda: int, + tau: NDArray[np.float64], # out, shape: (min(m, n),) + work: NDArray[np.float64], # out, shape: (max(1, lwork),) + lwork: int, + info: int, # out +) -> _DGEQRF: ... +def zgeqrf( + m: int, + n: int, + a: NDArray[np.complex128], # in/out, shape: (lda, n) + lda: int, + tau: NDArray[np.complex128], # out, shape: (min(m, n),) + work: NDArray[np.complex128], # out, shape: (max(1, lwork),) + lwork: int, + info: int, # out +) -> _ZGEQRF: ... + +# +def dorgqr( + m: int, # >=0 + n: int, # m >= n >= 0 + k: int, # n >= k >= 0 + a: NDArray[np.float64], # in/out, shape: (lda, n) + lda: int, # >= max(1, m) + tau: NDArray[np.float64], # in, shape: (k,) + work: NDArray[np.float64], # out, shape: (max(1, lwork),) + lwork: int, + info: int, # out +) -> _DORGQR: ... +def zungqr( + m: int, + n: int, + k: int, + a: NDArray[np.complex128], + lda: int, + tau: NDArray[np.complex128], + work: NDArray[np.complex128], + lwork: int, + info: int, +) -> _ZUNGQR: ... + +# +def xerbla(srname: object, info: int) -> None: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/linalg/linalg.py b/.venv/lib/python3.12/site-packages/numpy/linalg/linalg.py new file mode 100644 index 0000000..81c80d0 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/linalg/linalg.py @@ -0,0 +1,17 @@ +def __getattr__(attr_name): + import warnings + + from numpy.linalg import _linalg + ret = getattr(_linalg, attr_name, None) + if ret is None: + raise AttributeError( + f"module 'numpy.linalg.linalg' has no attribute {attr_name}") + warnings.warn( + "The numpy.linalg.linalg has been made private and renamed to " + "numpy.linalg._linalg. All public functions exported by it are " + f"available from numpy.linalg. Please use numpy.linalg.{attr_name} " + "instead.", + DeprecationWarning, + stacklevel=3 + ) + return ret diff --git a/.venv/lib/python3.12/site-packages/numpy/linalg/linalg.pyi b/.venv/lib/python3.12/site-packages/numpy/linalg/linalg.pyi new file mode 100644 index 0000000..dbe9bec --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/linalg/linalg.pyi @@ -0,0 +1,69 @@ +from ._linalg import ( + LinAlgError, + cholesky, + cond, + cross, + det, + diagonal, + eig, + eigh, + eigvals, + eigvalsh, + inv, + lstsq, + matmul, + matrix_norm, + matrix_power, + matrix_rank, + matrix_transpose, + multi_dot, + norm, + outer, + pinv, + qr, + slogdet, + solve, + svd, + svdvals, + tensordot, + tensorinv, + tensorsolve, + trace, + vecdot, + vector_norm, +) + +__all__ = [ + "LinAlgError", + "cholesky", + "cond", + "cross", + "det", + "diagonal", + "eig", + "eigh", + "eigvals", + "eigvalsh", + "inv", + "lstsq", + "matmul", + "matrix_norm", + "matrix_power", + "matrix_rank", + "matrix_transpose", + "multi_dot", + "norm", + "outer", + "pinv", + "qr", + "slogdet", + "solve", + "svd", + "svdvals", + "tensordot", + "tensorinv", + "tensorsolve", + "trace", + "vecdot", + "vector_norm", +] diff --git a/.venv/lib/python3.12/site-packages/numpy/linalg/tests/__init__.py b/.venv/lib/python3.12/site-packages/numpy/linalg/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/.venv/lib/python3.12/site-packages/numpy/linalg/tests/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/linalg/tests/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..cc537ca Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/linalg/tests/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/linalg/tests/__pycache__/test_deprecations.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/linalg/tests/__pycache__/test_deprecations.cpython-312.pyc new file mode 100644 index 0000000..38df258 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/linalg/tests/__pycache__/test_deprecations.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/linalg/tests/__pycache__/test_linalg.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/linalg/tests/__pycache__/test_linalg.cpython-312.pyc new file mode 100644 index 0000000..8afeed5 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/linalg/tests/__pycache__/test_linalg.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/linalg/tests/__pycache__/test_regression.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/linalg/tests/__pycache__/test_regression.cpython-312.pyc new file mode 100644 index 0000000..4320f33 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/linalg/tests/__pycache__/test_regression.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/linalg/tests/test_deprecations.py b/.venv/lib/python3.12/site-packages/numpy/linalg/tests/test_deprecations.py new file mode 100644 index 0000000..cd4c108 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/linalg/tests/test_deprecations.py @@ -0,0 +1,20 @@ +"""Test deprecation and future warnings. + +""" +import numpy as np +from numpy.testing import assert_warns + + +def test_qr_mode_full_future_warning(): + """Check mode='full' FutureWarning. + + In numpy 1.8 the mode options 'full' and 'economic' in linalg.qr were + deprecated. The release date will probably be sometime in the summer + of 2013. + + """ + a = np.eye(2) + assert_warns(DeprecationWarning, np.linalg.qr, a, mode='full') + assert_warns(DeprecationWarning, np.linalg.qr, a, mode='f') + assert_warns(DeprecationWarning, np.linalg.qr, a, mode='economic') + assert_warns(DeprecationWarning, np.linalg.qr, a, mode='e') diff --git a/.venv/lib/python3.12/site-packages/numpy/linalg/tests/test_linalg.py b/.venv/lib/python3.12/site-packages/numpy/linalg/tests/test_linalg.py new file mode 100644 index 0000000..cbf7dd6 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/linalg/tests/test_linalg.py @@ -0,0 +1,2430 @@ +""" Test functions for linalg module + +""" +import itertools +import os +import subprocess +import sys +import textwrap +import threading +import traceback + +import pytest + +import numpy as np +from numpy import ( + array, + asarray, + atleast_2d, + cdouble, + csingle, + dot, + double, + identity, + inf, + linalg, + matmul, + multiply, + single, +) +from numpy._core import swapaxes +from numpy.exceptions import AxisError +from numpy.linalg import LinAlgError, matrix_power, matrix_rank, multi_dot, norm +from numpy.linalg._linalg import _multi_dot_matrix_chain_order +from numpy.testing import ( + HAS_LAPACK64, + IS_WASM, + NOGIL_BUILD, + assert_, + assert_allclose, + assert_almost_equal, + assert_array_equal, + assert_equal, + assert_raises, + assert_raises_regex, + suppress_warnings, +) + +try: + import numpy.linalg.lapack_lite +except ImportError: + # May be broken when numpy was built without BLAS/LAPACK present + # If so, ensure we don't break the whole test suite - the `lapack_lite` + # submodule should be removed, it's only used in two tests in this file. + pass + + +def consistent_subclass(out, in_): + # For ndarray subclass input, our output should have the same subclass + # (non-ndarray input gets converted to ndarray). + return type(out) is (type(in_) if isinstance(in_, np.ndarray) + else np.ndarray) + + +old_assert_almost_equal = assert_almost_equal + + +def assert_almost_equal(a, b, single_decimal=6, double_decimal=12, **kw): + if asarray(a).dtype.type in (single, csingle): + decimal = single_decimal + else: + decimal = double_decimal + old_assert_almost_equal(a, b, decimal=decimal, **kw) + + +def get_real_dtype(dtype): + return {single: single, double: double, + csingle: single, cdouble: double}[dtype] + + +def get_complex_dtype(dtype): + return {single: csingle, double: cdouble, + csingle: csingle, cdouble: cdouble}[dtype] + + +def get_rtol(dtype): + # Choose a safe rtol + if dtype in (single, csingle): + return 1e-5 + else: + return 1e-11 + + +# used to categorize tests +all_tags = { + 'square', 'nonsquare', 'hermitian', # mutually exclusive + 'generalized', 'size-0', 'strided' # optional additions +} + + +class LinalgCase: + def __init__(self, name, a, b, tags=set()): + """ + A bundle of arguments to be passed to a test case, with an identifying + name, the operands a and b, and a set of tags to filter the tests + """ + assert_(isinstance(name, str)) + self.name = name + self.a = a + self.b = b + self.tags = frozenset(tags) # prevent shared tags + + def check(self, do): + """ + Run the function `do` on this test case, expanding arguments + """ + do(self.a, self.b, tags=self.tags) + + def __repr__(self): + return f'' + + +def apply_tag(tag, cases): + """ + Add the given tag (a string) to each of the cases (a list of LinalgCase + objects) + """ + assert tag in all_tags, "Invalid tag" + for case in cases: + case.tags = case.tags | {tag} + return cases + + +# +# Base test cases +# + +np.random.seed(1234) + +CASES = [] + +# square test cases +CASES += apply_tag('square', [ + LinalgCase("single", + array([[1., 2.], [3., 4.]], dtype=single), + array([2., 1.], dtype=single)), + LinalgCase("double", + array([[1., 2.], [3., 4.]], dtype=double), + array([2., 1.], dtype=double)), + LinalgCase("double_2", + array([[1., 2.], [3., 4.]], dtype=double), + array([[2., 1., 4.], [3., 4., 6.]], dtype=double)), + LinalgCase("csingle", + array([[1. + 2j, 2 + 3j], [3 + 4j, 4 + 5j]], dtype=csingle), + array([2. + 1j, 1. + 2j], dtype=csingle)), + LinalgCase("cdouble", + array([[1. + 2j, 2 + 3j], [3 + 4j, 4 + 5j]], dtype=cdouble), + array([2. + 1j, 1. + 2j], dtype=cdouble)), + LinalgCase("cdouble_2", + array([[1. + 2j, 2 + 3j], [3 + 4j, 4 + 5j]], dtype=cdouble), + array([[2. + 1j, 1. + 2j, 1 + 3j], [1 - 2j, 1 - 3j, 1 - 6j]], dtype=cdouble)), + LinalgCase("0x0", + np.empty((0, 0), dtype=double), + np.empty((0,), dtype=double), + tags={'size-0'}), + LinalgCase("8x8", + np.random.rand(8, 8), + np.random.rand(8)), + LinalgCase("1x1", + np.random.rand(1, 1), + np.random.rand(1)), + LinalgCase("nonarray", + [[1, 2], [3, 4]], + [2, 1]), +]) + +# non-square test-cases +CASES += apply_tag('nonsquare', [ + LinalgCase("single_nsq_1", + array([[1., 2., 3.], [3., 4., 6.]], dtype=single), + array([2., 1.], dtype=single)), + LinalgCase("single_nsq_2", + array([[1., 2.], [3., 4.], [5., 6.]], dtype=single), + array([2., 1., 3.], dtype=single)), + LinalgCase("double_nsq_1", + array([[1., 2., 3.], [3., 4., 6.]], dtype=double), + array([2., 1.], dtype=double)), + LinalgCase("double_nsq_2", + array([[1., 2.], [3., 4.], [5., 6.]], dtype=double), + array([2., 1., 3.], dtype=double)), + LinalgCase("csingle_nsq_1", + array( + [[1. + 1j, 2. + 2j, 3. - 3j], [3. - 5j, 4. + 9j, 6. + 2j]], dtype=csingle), + array([2. + 1j, 1. + 2j], dtype=csingle)), + LinalgCase("csingle_nsq_2", + array( + [[1. + 1j, 2. + 2j], [3. - 3j, 4. - 9j], [5. - 4j, 6. + 8j]], dtype=csingle), + array([2. + 1j, 1. + 2j, 3. - 3j], dtype=csingle)), + LinalgCase("cdouble_nsq_1", + array( + [[1. + 1j, 2. + 2j, 3. - 3j], [3. - 5j, 4. + 9j, 6. + 2j]], dtype=cdouble), + array([2. + 1j, 1. + 2j], dtype=cdouble)), + LinalgCase("cdouble_nsq_2", + array( + [[1. + 1j, 2. + 2j], [3. - 3j, 4. - 9j], [5. - 4j, 6. + 8j]], dtype=cdouble), + array([2. + 1j, 1. + 2j, 3. - 3j], dtype=cdouble)), + LinalgCase("cdouble_nsq_1_2", + array( + [[1. + 1j, 2. + 2j, 3. - 3j], [3. - 5j, 4. + 9j, 6. + 2j]], dtype=cdouble), + array([[2. + 1j, 1. + 2j], [1 - 1j, 2 - 2j]], dtype=cdouble)), + LinalgCase("cdouble_nsq_2_2", + array( + [[1. + 1j, 2. + 2j], [3. - 3j, 4. - 9j], [5. - 4j, 6. + 8j]], dtype=cdouble), + array([[2. + 1j, 1. + 2j], [1 - 1j, 2 - 2j], [1 - 1j, 2 - 2j]], dtype=cdouble)), + LinalgCase("8x11", + np.random.rand(8, 11), + np.random.rand(8)), + LinalgCase("1x5", + np.random.rand(1, 5), + np.random.rand(1)), + LinalgCase("5x1", + np.random.rand(5, 1), + np.random.rand(5)), + LinalgCase("0x4", + np.random.rand(0, 4), + np.random.rand(0), + tags={'size-0'}), + LinalgCase("4x0", + np.random.rand(4, 0), + np.random.rand(4), + tags={'size-0'}), +]) + +# hermitian test-cases +CASES += apply_tag('hermitian', [ + LinalgCase("hsingle", + array([[1., 2.], [2., 1.]], dtype=single), + None), + LinalgCase("hdouble", + array([[1., 2.], [2., 1.]], dtype=double), + None), + LinalgCase("hcsingle", + array([[1., 2 + 3j], [2 - 3j, 1]], dtype=csingle), + None), + LinalgCase("hcdouble", + array([[1., 2 + 3j], [2 - 3j, 1]], dtype=cdouble), + None), + LinalgCase("hempty", + np.empty((0, 0), dtype=double), + None, + tags={'size-0'}), + LinalgCase("hnonarray", + [[1, 2], [2, 1]], + None), + LinalgCase("matrix_b_only", + array([[1., 2.], [2., 1.]]), + None), + LinalgCase("hmatrix_1x1", + np.random.rand(1, 1), + None), +]) + + +# +# Gufunc test cases +# +def _make_generalized_cases(): + new_cases = [] + + for case in CASES: + if not isinstance(case.a, np.ndarray): + continue + + a = np.array([case.a, 2 * case.a, 3 * case.a]) + if case.b is None: + b = None + elif case.b.ndim == 1: + b = case.b + else: + b = np.array([case.b, 7 * case.b, 6 * case.b]) + new_case = LinalgCase(case.name + "_tile3", a, b, + tags=case.tags | {'generalized'}) + new_cases.append(new_case) + + a = np.array([case.a] * 2 * 3).reshape((3, 2) + case.a.shape) + if case.b is None: + b = None + elif case.b.ndim == 1: + b = np.array([case.b] * 2 * 3 * a.shape[-1])\ + .reshape((3, 2) + case.a.shape[-2:]) + else: + b = np.array([case.b] * 2 * 3).reshape((3, 2) + case.b.shape) + new_case = LinalgCase(case.name + "_tile213", a, b, + tags=case.tags | {'generalized'}) + new_cases.append(new_case) + + return new_cases + + +CASES += _make_generalized_cases() + + +# +# Generate stride combination variations of the above +# +def _stride_comb_iter(x): + """ + Generate cartesian product of strides for all axes + """ + + if not isinstance(x, np.ndarray): + yield x, "nop" + return + + stride_set = [(1,)] * x.ndim + stride_set[-1] = (1, 3, -4) + if x.ndim > 1: + stride_set[-2] = (1, 3, -4) + if x.ndim > 2: + stride_set[-3] = (1, -4) + + for repeats in itertools.product(*tuple(stride_set)): + new_shape = [abs(a * b) for a, b in zip(x.shape, repeats)] + slices = tuple(slice(None, None, repeat) for repeat in repeats) + + # new array with different strides, but same data + xi = np.empty(new_shape, dtype=x.dtype) + xi.view(np.uint32).fill(0xdeadbeef) + xi = xi[slices] + xi[...] = x + xi = xi.view(x.__class__) + assert_(np.all(xi == x)) + yield xi, "stride_" + "_".join(["%+d" % j for j in repeats]) + + # generate also zero strides if possible + if x.ndim >= 1 and x.shape[-1] == 1: + s = list(x.strides) + s[-1] = 0 + xi = np.lib.stride_tricks.as_strided(x, strides=s) + yield xi, "stride_xxx_0" + if x.ndim >= 2 and x.shape[-2] == 1: + s = list(x.strides) + s[-2] = 0 + xi = np.lib.stride_tricks.as_strided(x, strides=s) + yield xi, "stride_xxx_0_x" + if x.ndim >= 2 and x.shape[:-2] == (1, 1): + s = list(x.strides) + s[-1] = 0 + s[-2] = 0 + xi = np.lib.stride_tricks.as_strided(x, strides=s) + yield xi, "stride_xxx_0_0" + + +def _make_strided_cases(): + new_cases = [] + for case in CASES: + for a, a_label in _stride_comb_iter(case.a): + for b, b_label in _stride_comb_iter(case.b): + new_case = LinalgCase(case.name + "_" + a_label + "_" + b_label, a, b, + tags=case.tags | {'strided'}) + new_cases.append(new_case) + return new_cases + + +CASES += _make_strided_cases() + + +# +# Test different routines against the above cases +# +class LinalgTestCase: + TEST_CASES = CASES + + def check_cases(self, require=set(), exclude=set()): + """ + Run func on each of the cases with all of the tags in require, and none + of the tags in exclude + """ + for case in self.TEST_CASES: + # filter by require and exclude + if case.tags & require != require: + continue + if case.tags & exclude: + continue + + try: + case.check(self.do) + except Exception as e: + msg = f'In test case: {case!r}\n\n' + msg += traceback.format_exc() + raise AssertionError(msg) from e + + +class LinalgSquareTestCase(LinalgTestCase): + + def test_sq_cases(self): + self.check_cases(require={'square'}, + exclude={'generalized', 'size-0'}) + + def test_empty_sq_cases(self): + self.check_cases(require={'square', 'size-0'}, + exclude={'generalized'}) + + +class LinalgNonsquareTestCase(LinalgTestCase): + + def test_nonsq_cases(self): + self.check_cases(require={'nonsquare'}, + exclude={'generalized', 'size-0'}) + + def test_empty_nonsq_cases(self): + self.check_cases(require={'nonsquare', 'size-0'}, + exclude={'generalized'}) + + +class HermitianTestCase(LinalgTestCase): + + def test_herm_cases(self): + self.check_cases(require={'hermitian'}, + exclude={'generalized', 'size-0'}) + + def test_empty_herm_cases(self): + self.check_cases(require={'hermitian', 'size-0'}, + exclude={'generalized'}) + + +class LinalgGeneralizedSquareTestCase(LinalgTestCase): + + @pytest.mark.slow + def test_generalized_sq_cases(self): + self.check_cases(require={'generalized', 'square'}, + exclude={'size-0'}) + + @pytest.mark.slow + def test_generalized_empty_sq_cases(self): + self.check_cases(require={'generalized', 'square', 'size-0'}) + + +class LinalgGeneralizedNonsquareTestCase(LinalgTestCase): + + @pytest.mark.slow + def test_generalized_nonsq_cases(self): + self.check_cases(require={'generalized', 'nonsquare'}, + exclude={'size-0'}) + + @pytest.mark.slow + def test_generalized_empty_nonsq_cases(self): + self.check_cases(require={'generalized', 'nonsquare', 'size-0'}) + + +class HermitianGeneralizedTestCase(LinalgTestCase): + + @pytest.mark.slow + def test_generalized_herm_cases(self): + self.check_cases(require={'generalized', 'hermitian'}, + exclude={'size-0'}) + + @pytest.mark.slow + def test_generalized_empty_herm_cases(self): + self.check_cases(require={'generalized', 'hermitian', 'size-0'}, + exclude={'none'}) + + +def identity_like_generalized(a): + a = asarray(a) + if a.ndim >= 3: + r = np.empty(a.shape, dtype=a.dtype) + r[...] = identity(a.shape[-2]) + return r + else: + return identity(a.shape[0]) + + +class SolveCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase): + # kept apart from TestSolve for use for testing with matrices. + def do(self, a, b, tags): + x = linalg.solve(a, b) + if np.array(b).ndim == 1: + # When a is (..., M, M) and b is (M,), it is the same as when b is + # (M, 1), except the result has shape (..., M) + adotx = matmul(a, x[..., None])[..., 0] + assert_almost_equal(np.broadcast_to(b, adotx.shape), adotx) + else: + adotx = matmul(a, x) + assert_almost_equal(b, adotx) + assert_(consistent_subclass(x, b)) + + +class TestSolve(SolveCases): + @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble]) + def test_types(self, dtype): + x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) + assert_equal(linalg.solve(x, x).dtype, dtype) + + def test_1_d(self): + class ArraySubclass(np.ndarray): + pass + a = np.arange(8).reshape(2, 2, 2) + b = np.arange(2).view(ArraySubclass) + result = linalg.solve(a, b) + assert result.shape == (2, 2) + + # If b is anything other than 1-D it should be treated as a stack of + # matrices + b = np.arange(4).reshape(2, 2).view(ArraySubclass) + result = linalg.solve(a, b) + assert result.shape == (2, 2, 2) + + b = np.arange(2).reshape(1, 2).view(ArraySubclass) + assert_raises(ValueError, linalg.solve, a, b) + + def test_0_size(self): + class ArraySubclass(np.ndarray): + pass + # Test system of 0x0 matrices + a = np.arange(8).reshape(2, 2, 2) + b = np.arange(6).reshape(1, 2, 3).view(ArraySubclass) + + expected = linalg.solve(a, b)[:, 0:0, :] + result = linalg.solve(a[:, 0:0, 0:0], b[:, 0:0, :]) + assert_array_equal(result, expected) + assert_(isinstance(result, ArraySubclass)) + + # Test errors for non-square and only b's dimension being 0 + assert_raises(linalg.LinAlgError, linalg.solve, a[:, 0:0, 0:1], b) + assert_raises(ValueError, linalg.solve, a, b[:, 0:0, :]) + + # Test broadcasting error + b = np.arange(6).reshape(1, 3, 2) # broadcasting error + assert_raises(ValueError, linalg.solve, a, b) + assert_raises(ValueError, linalg.solve, a[0:0], b[0:0]) + + # Test zero "single equations" with 0x0 matrices. + b = np.arange(2).view(ArraySubclass) + expected = linalg.solve(a, b)[:, 0:0] + result = linalg.solve(a[:, 0:0, 0:0], b[0:0]) + assert_array_equal(result, expected) + assert_(isinstance(result, ArraySubclass)) + + b = np.arange(3).reshape(1, 3) + assert_raises(ValueError, linalg.solve, a, b) + assert_raises(ValueError, linalg.solve, a[0:0], b[0:0]) + assert_raises(ValueError, linalg.solve, a[:, 0:0, 0:0], b) + + def test_0_size_k(self): + # test zero multiple equation (K=0) case. + class ArraySubclass(np.ndarray): + pass + a = np.arange(4).reshape(1, 2, 2) + b = np.arange(6).reshape(3, 2, 1).view(ArraySubclass) + + expected = linalg.solve(a, b)[:, :, 0:0] + result = linalg.solve(a, b[:, :, 0:0]) + assert_array_equal(result, expected) + assert_(isinstance(result, ArraySubclass)) + + # test both zero. + expected = linalg.solve(a, b)[:, 0:0, 0:0] + result = linalg.solve(a[:, 0:0, 0:0], b[:, 0:0, 0:0]) + assert_array_equal(result, expected) + assert_(isinstance(result, ArraySubclass)) + + +class InvCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase): + + def do(self, a, b, tags): + a_inv = linalg.inv(a) + assert_almost_equal(matmul(a, a_inv), + identity_like_generalized(a)) + assert_(consistent_subclass(a_inv, a)) + + +class TestInv(InvCases): + @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble]) + def test_types(self, dtype): + x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) + assert_equal(linalg.inv(x).dtype, dtype) + + def test_0_size(self): + # Check that all kinds of 0-sized arrays work + class ArraySubclass(np.ndarray): + pass + a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass) + res = linalg.inv(a) + assert_(res.dtype.type is np.float64) + assert_equal(a.shape, res.shape) + assert_(isinstance(res, ArraySubclass)) + + a = np.zeros((0, 0), dtype=np.complex64).view(ArraySubclass) + res = linalg.inv(a) + assert_(res.dtype.type is np.complex64) + assert_equal(a.shape, res.shape) + assert_(isinstance(res, ArraySubclass)) + + +class EigvalsCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase): + + def do(self, a, b, tags): + ev = linalg.eigvals(a) + evalues, evectors = linalg.eig(a) + assert_almost_equal(ev, evalues) + + +class TestEigvals(EigvalsCases): + @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble]) + def test_types(self, dtype): + x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) + assert_equal(linalg.eigvals(x).dtype, dtype) + x = np.array([[1, 0.5], [-1, 1]], dtype=dtype) + assert_equal(linalg.eigvals(x).dtype, get_complex_dtype(dtype)) + + def test_0_size(self): + # Check that all kinds of 0-sized arrays work + class ArraySubclass(np.ndarray): + pass + a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass) + res = linalg.eigvals(a) + assert_(res.dtype.type is np.float64) + assert_equal((0, 1), res.shape) + # This is just for documentation, it might make sense to change: + assert_(isinstance(res, np.ndarray)) + + a = np.zeros((0, 0), dtype=np.complex64).view(ArraySubclass) + res = linalg.eigvals(a) + assert_(res.dtype.type is np.complex64) + assert_equal((0,), res.shape) + # This is just for documentation, it might make sense to change: + assert_(isinstance(res, np.ndarray)) + + +class EigCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase): + + def do(self, a, b, tags): + res = linalg.eig(a) + eigenvalues, eigenvectors = res.eigenvalues, res.eigenvectors + assert_allclose(matmul(a, eigenvectors), + np.asarray(eigenvectors) * np.asarray(eigenvalues)[..., None, :], + rtol=get_rtol(eigenvalues.dtype)) + assert_(consistent_subclass(eigenvectors, a)) + + +class TestEig(EigCases): + @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble]) + def test_types(self, dtype): + x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) + w, v = np.linalg.eig(x) + assert_equal(w.dtype, dtype) + assert_equal(v.dtype, dtype) + + x = np.array([[1, 0.5], [-1, 1]], dtype=dtype) + w, v = np.linalg.eig(x) + assert_equal(w.dtype, get_complex_dtype(dtype)) + assert_equal(v.dtype, get_complex_dtype(dtype)) + + def test_0_size(self): + # Check that all kinds of 0-sized arrays work + class ArraySubclass(np.ndarray): + pass + a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass) + res, res_v = linalg.eig(a) + assert_(res_v.dtype.type is np.float64) + assert_(res.dtype.type is np.float64) + assert_equal(a.shape, res_v.shape) + assert_equal((0, 1), res.shape) + # This is just for documentation, it might make sense to change: + assert_(isinstance(a, np.ndarray)) + + a = np.zeros((0, 0), dtype=np.complex64).view(ArraySubclass) + res, res_v = linalg.eig(a) + assert_(res_v.dtype.type is np.complex64) + assert_(res.dtype.type is np.complex64) + assert_equal(a.shape, res_v.shape) + assert_equal((0,), res.shape) + # This is just for documentation, it might make sense to change: + assert_(isinstance(a, np.ndarray)) + + +class SVDBaseTests: + hermitian = False + + @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble]) + def test_types(self, dtype): + x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) + res = linalg.svd(x) + U, S, Vh = res.U, res.S, res.Vh + assert_equal(U.dtype, dtype) + assert_equal(S.dtype, get_real_dtype(dtype)) + assert_equal(Vh.dtype, dtype) + s = linalg.svd(x, compute_uv=False, hermitian=self.hermitian) + assert_equal(s.dtype, get_real_dtype(dtype)) + + +class SVDCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase): + + def do(self, a, b, tags): + u, s, vt = linalg.svd(a, False) + assert_allclose(a, matmul(np.asarray(u) * np.asarray(s)[..., None, :], + np.asarray(vt)), + rtol=get_rtol(u.dtype)) + assert_(consistent_subclass(u, a)) + assert_(consistent_subclass(vt, a)) + + +class TestSVD(SVDCases, SVDBaseTests): + def test_empty_identity(self): + """ Empty input should put an identity matrix in u or vh """ + x = np.empty((4, 0)) + u, s, vh = linalg.svd(x, compute_uv=True, hermitian=self.hermitian) + assert_equal(u.shape, (4, 4)) + assert_equal(vh.shape, (0, 0)) + assert_equal(u, np.eye(4)) + + x = np.empty((0, 4)) + u, s, vh = linalg.svd(x, compute_uv=True, hermitian=self.hermitian) + assert_equal(u.shape, (0, 0)) + assert_equal(vh.shape, (4, 4)) + assert_equal(vh, np.eye(4)) + + def test_svdvals(self): + x = np.array([[1, 0.5], [0.5, 1]]) + s_from_svd = linalg.svd(x, compute_uv=False, hermitian=self.hermitian) + s_from_svdvals = linalg.svdvals(x) + assert_almost_equal(s_from_svd, s_from_svdvals) + + +class SVDHermitianCases(HermitianTestCase, HermitianGeneralizedTestCase): + + def do(self, a, b, tags): + u, s, vt = linalg.svd(a, False, hermitian=True) + assert_allclose(a, matmul(np.asarray(u) * np.asarray(s)[..., None, :], + np.asarray(vt)), + rtol=get_rtol(u.dtype)) + + def hermitian(mat): + axes = list(range(mat.ndim)) + axes[-1], axes[-2] = axes[-2], axes[-1] + return np.conj(np.transpose(mat, axes=axes)) + + assert_almost_equal(np.matmul(u, hermitian(u)), np.broadcast_to(np.eye(u.shape[-1]), u.shape)) + assert_almost_equal(np.matmul(vt, hermitian(vt)), np.broadcast_to(np.eye(vt.shape[-1]), vt.shape)) + assert_equal(np.sort(s)[..., ::-1], s) + assert_(consistent_subclass(u, a)) + assert_(consistent_subclass(vt, a)) + + +class TestSVDHermitian(SVDHermitianCases, SVDBaseTests): + hermitian = True + + +class CondCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase): + # cond(x, p) for p in (None, 2, -2) + + def do(self, a, b, tags): + c = asarray(a) # a might be a matrix + if 'size-0' in tags: + assert_raises(LinAlgError, linalg.cond, c) + return + + # +-2 norms + s = linalg.svd(c, compute_uv=False) + assert_almost_equal( + linalg.cond(a), s[..., 0] / s[..., -1], + single_decimal=5, double_decimal=11) + assert_almost_equal( + linalg.cond(a, 2), s[..., 0] / s[..., -1], + single_decimal=5, double_decimal=11) + assert_almost_equal( + linalg.cond(a, -2), s[..., -1] / s[..., 0], + single_decimal=5, double_decimal=11) + + # Other norms + cinv = np.linalg.inv(c) + assert_almost_equal( + linalg.cond(a, 1), + abs(c).sum(-2).max(-1) * abs(cinv).sum(-2).max(-1), + single_decimal=5, double_decimal=11) + assert_almost_equal( + linalg.cond(a, -1), + abs(c).sum(-2).min(-1) * abs(cinv).sum(-2).min(-1), + single_decimal=5, double_decimal=11) + assert_almost_equal( + linalg.cond(a, np.inf), + abs(c).sum(-1).max(-1) * abs(cinv).sum(-1).max(-1), + single_decimal=5, double_decimal=11) + assert_almost_equal( + linalg.cond(a, -np.inf), + abs(c).sum(-1).min(-1) * abs(cinv).sum(-1).min(-1), + single_decimal=5, double_decimal=11) + assert_almost_equal( + linalg.cond(a, 'fro'), + np.sqrt((abs(c)**2).sum(-1).sum(-1) + * (abs(cinv)**2).sum(-1).sum(-1)), + single_decimal=5, double_decimal=11) + + +class TestCond(CondCases): + def test_basic_nonsvd(self): + # Smoketest the non-svd norms + A = array([[1., 0, 1], [0, -2., 0], [0, 0, 3.]]) + assert_almost_equal(linalg.cond(A, inf), 4) + assert_almost_equal(linalg.cond(A, -inf), 2 / 3) + assert_almost_equal(linalg.cond(A, 1), 4) + assert_almost_equal(linalg.cond(A, -1), 0.5) + assert_almost_equal(linalg.cond(A, 'fro'), np.sqrt(265 / 12)) + + def test_singular(self): + # Singular matrices have infinite condition number for + # positive norms, and negative norms shouldn't raise + # exceptions + As = [np.zeros((2, 2)), np.ones((2, 2))] + p_pos = [None, 1, 2, 'fro'] + p_neg = [-1, -2] + for A, p in itertools.product(As, p_pos): + # Inversion may not hit exact infinity, so just check the + # number is large + assert_(linalg.cond(A, p) > 1e15) + for A, p in itertools.product(As, p_neg): + linalg.cond(A, p) + + @pytest.mark.xfail(True, run=False, + reason="Platform/LAPACK-dependent failure, " + "see gh-18914") + def test_nan(self): + # nans should be passed through, not converted to infs + ps = [None, 1, -1, 2, -2, 'fro'] + p_pos = [None, 1, 2, 'fro'] + + A = np.ones((2, 2)) + A[0, 1] = np.nan + for p in ps: + c = linalg.cond(A, p) + assert_(isinstance(c, np.float64)) + assert_(np.isnan(c)) + + A = np.ones((3, 2, 2)) + A[1, 0, 1] = np.nan + for p in ps: + c = linalg.cond(A, p) + assert_(np.isnan(c[1])) + if p in p_pos: + assert_(c[0] > 1e15) + assert_(c[2] > 1e15) + else: + assert_(not np.isnan(c[0])) + assert_(not np.isnan(c[2])) + + def test_stacked_singular(self): + # Check behavior when only some of the stacked matrices are + # singular + np.random.seed(1234) + A = np.random.rand(2, 2, 2, 2) + A[0, 0] = 0 + A[1, 1] = 0 + + for p in (None, 1, 2, 'fro', -1, -2): + c = linalg.cond(A, p) + assert_equal(c[0, 0], np.inf) + assert_equal(c[1, 1], np.inf) + assert_(np.isfinite(c[0, 1])) + assert_(np.isfinite(c[1, 0])) + + +class PinvCases(LinalgSquareTestCase, + LinalgNonsquareTestCase, + LinalgGeneralizedSquareTestCase, + LinalgGeneralizedNonsquareTestCase): + + def do(self, a, b, tags): + a_ginv = linalg.pinv(a) + # `a @ a_ginv == I` does not hold if a is singular + dot = matmul + assert_almost_equal(dot(dot(a, a_ginv), a), a, single_decimal=5, double_decimal=11) + assert_(consistent_subclass(a_ginv, a)) + + +class TestPinv(PinvCases): + pass + + +class PinvHermitianCases(HermitianTestCase, HermitianGeneralizedTestCase): + + def do(self, a, b, tags): + a_ginv = linalg.pinv(a, hermitian=True) + # `a @ a_ginv == I` does not hold if a is singular + dot = matmul + assert_almost_equal(dot(dot(a, a_ginv), a), a, single_decimal=5, double_decimal=11) + assert_(consistent_subclass(a_ginv, a)) + + +class TestPinvHermitian(PinvHermitianCases): + pass + + +def test_pinv_rtol_arg(): + a = np.array([[1, 2, 3], [4, 1, 1], [2, 3, 1]]) + + assert_almost_equal( + np.linalg.pinv(a, rcond=0.5), + np.linalg.pinv(a, rtol=0.5), + ) + + with pytest.raises( + ValueError, match=r"`rtol` and `rcond` can't be both set." + ): + np.linalg.pinv(a, rcond=0.5, rtol=0.5) + + +class DetCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase): + + def do(self, a, b, tags): + d = linalg.det(a) + res = linalg.slogdet(a) + s, ld = res.sign, res.logabsdet + if asarray(a).dtype.type in (single, double): + ad = asarray(a).astype(double) + else: + ad = asarray(a).astype(cdouble) + ev = linalg.eigvals(ad) + assert_almost_equal(d, multiply.reduce(ev, axis=-1)) + assert_almost_equal(s * np.exp(ld), multiply.reduce(ev, axis=-1)) + + s = np.atleast_1d(s) + ld = np.atleast_1d(ld) + m = (s != 0) + assert_almost_equal(np.abs(s[m]), 1) + assert_equal(ld[~m], -inf) + + +class TestDet(DetCases): + def test_zero(self): + assert_equal(linalg.det([[0.0]]), 0.0) + assert_equal(type(linalg.det([[0.0]])), double) + assert_equal(linalg.det([[0.0j]]), 0.0) + assert_equal(type(linalg.det([[0.0j]])), cdouble) + + assert_equal(linalg.slogdet([[0.0]]), (0.0, -inf)) + assert_equal(type(linalg.slogdet([[0.0]])[0]), double) + assert_equal(type(linalg.slogdet([[0.0]])[1]), double) + assert_equal(linalg.slogdet([[0.0j]]), (0.0j, -inf)) + assert_equal(type(linalg.slogdet([[0.0j]])[0]), cdouble) + assert_equal(type(linalg.slogdet([[0.0j]])[1]), double) + + @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble]) + def test_types(self, dtype): + x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) + assert_equal(np.linalg.det(x).dtype, dtype) + ph, s = np.linalg.slogdet(x) + assert_equal(s.dtype, get_real_dtype(dtype)) + assert_equal(ph.dtype, dtype) + + def test_0_size(self): + a = np.zeros((0, 0), dtype=np.complex64) + res = linalg.det(a) + assert_equal(res, 1.) + assert_(res.dtype.type is np.complex64) + res = linalg.slogdet(a) + assert_equal(res, (1, 0)) + assert_(res[0].dtype.type is np.complex64) + assert_(res[1].dtype.type is np.float32) + + a = np.zeros((0, 0), dtype=np.float64) + res = linalg.det(a) + assert_equal(res, 1.) + assert_(res.dtype.type is np.float64) + res = linalg.slogdet(a) + assert_equal(res, (1, 0)) + assert_(res[0].dtype.type is np.float64) + assert_(res[1].dtype.type is np.float64) + + +class LstsqCases(LinalgSquareTestCase, LinalgNonsquareTestCase): + + def do(self, a, b, tags): + arr = np.asarray(a) + m, n = arr.shape + u, s, vt = linalg.svd(a, False) + x, residuals, rank, sv = linalg.lstsq(a, b, rcond=-1) + if m == 0: + assert_((x == 0).all()) + if m <= n: + assert_almost_equal(b, dot(a, x)) + assert_equal(rank, m) + else: + assert_equal(rank, n) + assert_almost_equal(sv, sv.__array_wrap__(s)) + if rank == n and m > n: + expect_resids = ( + np.asarray(abs(np.dot(a, x) - b)) ** 2).sum(axis=0) + expect_resids = np.asarray(expect_resids) + if np.asarray(b).ndim == 1: + expect_resids.shape = (1,) + assert_equal(residuals.shape, expect_resids.shape) + else: + expect_resids = np.array([]).view(type(x)) + assert_almost_equal(residuals, expect_resids) + assert_(np.issubdtype(residuals.dtype, np.floating)) + assert_(consistent_subclass(x, b)) + assert_(consistent_subclass(residuals, b)) + + +class TestLstsq(LstsqCases): + def test_rcond(self): + a = np.array([[0., 1., 0., 1., 2., 0.], + [0., 2., 0., 0., 1., 0.], + [1., 0., 1., 0., 0., 4.], + [0., 0., 0., 2., 3., 0.]]).T + + b = np.array([1, 0, 0, 0, 0, 0]) + + x, residuals, rank, s = linalg.lstsq(a, b, rcond=-1) + assert_(rank == 4) + x, residuals, rank, s = linalg.lstsq(a, b) + assert_(rank == 3) + x, residuals, rank, s = linalg.lstsq(a, b, rcond=None) + assert_(rank == 3) + + @pytest.mark.parametrize(["m", "n", "n_rhs"], [ + (4, 2, 2), + (0, 4, 1), + (0, 4, 2), + (4, 0, 1), + (4, 0, 2), + (4, 2, 0), + (0, 0, 0) + ]) + def test_empty_a_b(self, m, n, n_rhs): + a = np.arange(m * n).reshape(m, n) + b = np.ones((m, n_rhs)) + x, residuals, rank, s = linalg.lstsq(a, b, rcond=None) + if m == 0: + assert_((x == 0).all()) + assert_equal(x.shape, (n, n_rhs)) + assert_equal(residuals.shape, ((n_rhs,) if m > n else (0,))) + if m > n and n_rhs > 0: + # residuals are exactly the squared norms of b's columns + r = b - np.dot(a, x) + assert_almost_equal(residuals, (r * r).sum(axis=-2)) + assert_equal(rank, min(m, n)) + assert_equal(s.shape, (min(m, n),)) + + def test_incompatible_dims(self): + # use modified version of docstring example + x = np.array([0, 1, 2, 3]) + y = np.array([-1, 0.2, 0.9, 2.1, 3.3]) + A = np.vstack([x, np.ones(len(x))]).T + with assert_raises_regex(LinAlgError, "Incompatible dimensions"): + linalg.lstsq(A, y, rcond=None) + + +@pytest.mark.parametrize('dt', [np.dtype(c) for c in '?bBhHiIqQefdgFDGO']) +class TestMatrixPower: + + rshft_0 = np.eye(4) + rshft_1 = rshft_0[[3, 0, 1, 2]] + rshft_2 = rshft_0[[2, 3, 0, 1]] + rshft_3 = rshft_0[[1, 2, 3, 0]] + rshft_all = [rshft_0, rshft_1, rshft_2, rshft_3] + noninv = array([[1, 0], [0, 0]]) + stacked = np.block([[[rshft_0]]] * 2) + # FIXME the 'e' dtype might work in future + dtnoinv = [object, np.dtype('e'), np.dtype('g'), np.dtype('G')] + + def test_large_power(self, dt): + rshft = self.rshft_1.astype(dt) + assert_equal( + matrix_power(rshft, 2**100 + 2**10 + 2**5 + 0), self.rshft_0) + assert_equal( + matrix_power(rshft, 2**100 + 2**10 + 2**5 + 1), self.rshft_1) + assert_equal( + matrix_power(rshft, 2**100 + 2**10 + 2**5 + 2), self.rshft_2) + assert_equal( + matrix_power(rshft, 2**100 + 2**10 + 2**5 + 3), self.rshft_3) + + def test_power_is_zero(self, dt): + def tz(M): + mz = matrix_power(M, 0) + assert_equal(mz, identity_like_generalized(M)) + assert_equal(mz.dtype, M.dtype) + + for mat in self.rshft_all: + tz(mat.astype(dt)) + if dt != object: + tz(self.stacked.astype(dt)) + + def test_power_is_one(self, dt): + def tz(mat): + mz = matrix_power(mat, 1) + assert_equal(mz, mat) + assert_equal(mz.dtype, mat.dtype) + + for mat in self.rshft_all: + tz(mat.astype(dt)) + if dt != object: + tz(self.stacked.astype(dt)) + + def test_power_is_two(self, dt): + def tz(mat): + mz = matrix_power(mat, 2) + mmul = matmul if mat.dtype != object else dot + assert_equal(mz, mmul(mat, mat)) + assert_equal(mz.dtype, mat.dtype) + + for mat in self.rshft_all: + tz(mat.astype(dt)) + if dt != object: + tz(self.stacked.astype(dt)) + + def test_power_is_minus_one(self, dt): + def tz(mat): + invmat = matrix_power(mat, -1) + mmul = matmul if mat.dtype != object else dot + assert_almost_equal( + mmul(invmat, mat), identity_like_generalized(mat)) + + for mat in self.rshft_all: + if dt not in self.dtnoinv: + tz(mat.astype(dt)) + + def test_exceptions_bad_power(self, dt): + mat = self.rshft_0.astype(dt) + assert_raises(TypeError, matrix_power, mat, 1.5) + assert_raises(TypeError, matrix_power, mat, [1]) + + def test_exceptions_non_square(self, dt): + assert_raises(LinAlgError, matrix_power, np.array([1], dt), 1) + assert_raises(LinAlgError, matrix_power, np.array([[1], [2]], dt), 1) + assert_raises(LinAlgError, matrix_power, np.ones((4, 3, 2), dt), 1) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + def test_exceptions_not_invertible(self, dt): + if dt in self.dtnoinv: + return + mat = self.noninv.astype(dt) + assert_raises(LinAlgError, matrix_power, mat, -1) + + +class TestEigvalshCases(HermitianTestCase, HermitianGeneralizedTestCase): + + def do(self, a, b, tags): + # note that eigenvalue arrays returned by eig must be sorted since + # their order isn't guaranteed. + ev = linalg.eigvalsh(a, 'L') + evalues, evectors = linalg.eig(a) + evalues.sort(axis=-1) + assert_allclose(ev, evalues, rtol=get_rtol(ev.dtype)) + + ev2 = linalg.eigvalsh(a, 'U') + assert_allclose(ev2, evalues, rtol=get_rtol(ev.dtype)) + + +class TestEigvalsh: + @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble]) + def test_types(self, dtype): + x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) + w = np.linalg.eigvalsh(x) + assert_equal(w.dtype, get_real_dtype(dtype)) + + def test_invalid(self): + x = np.array([[1, 0.5], [0.5, 1]], dtype=np.float32) + assert_raises(ValueError, np.linalg.eigvalsh, x, UPLO="lrong") + assert_raises(ValueError, np.linalg.eigvalsh, x, "lower") + assert_raises(ValueError, np.linalg.eigvalsh, x, "upper") + + def test_UPLO(self): + Klo = np.array([[0, 0], [1, 0]], dtype=np.double) + Kup = np.array([[0, 1], [0, 0]], dtype=np.double) + tgt = np.array([-1, 1], dtype=np.double) + rtol = get_rtol(np.double) + + # Check default is 'L' + w = np.linalg.eigvalsh(Klo) + assert_allclose(w, tgt, rtol=rtol) + # Check 'L' + w = np.linalg.eigvalsh(Klo, UPLO='L') + assert_allclose(w, tgt, rtol=rtol) + # Check 'l' + w = np.linalg.eigvalsh(Klo, UPLO='l') + assert_allclose(w, tgt, rtol=rtol) + # Check 'U' + w = np.linalg.eigvalsh(Kup, UPLO='U') + assert_allclose(w, tgt, rtol=rtol) + # Check 'u' + w = np.linalg.eigvalsh(Kup, UPLO='u') + assert_allclose(w, tgt, rtol=rtol) + + def test_0_size(self): + # Check that all kinds of 0-sized arrays work + class ArraySubclass(np.ndarray): + pass + a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass) + res = linalg.eigvalsh(a) + assert_(res.dtype.type is np.float64) + assert_equal((0, 1), res.shape) + # This is just for documentation, it might make sense to change: + assert_(isinstance(res, np.ndarray)) + + a = np.zeros((0, 0), dtype=np.complex64).view(ArraySubclass) + res = linalg.eigvalsh(a) + assert_(res.dtype.type is np.float32) + assert_equal((0,), res.shape) + # This is just for documentation, it might make sense to change: + assert_(isinstance(res, np.ndarray)) + + +class TestEighCases(HermitianTestCase, HermitianGeneralizedTestCase): + + def do(self, a, b, tags): + # note that eigenvalue arrays returned by eig must be sorted since + # their order isn't guaranteed. + res = linalg.eigh(a) + ev, evc = res.eigenvalues, res.eigenvectors + evalues, evectors = linalg.eig(a) + evalues.sort(axis=-1) + assert_almost_equal(ev, evalues) + + assert_allclose(matmul(a, evc), + np.asarray(ev)[..., None, :] * np.asarray(evc), + rtol=get_rtol(ev.dtype)) + + ev2, evc2 = linalg.eigh(a, 'U') + assert_almost_equal(ev2, evalues) + + assert_allclose(matmul(a, evc2), + np.asarray(ev2)[..., None, :] * np.asarray(evc2), + rtol=get_rtol(ev.dtype), err_msg=repr(a)) + + +class TestEigh: + @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble]) + def test_types(self, dtype): + x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) + w, v = np.linalg.eigh(x) + assert_equal(w.dtype, get_real_dtype(dtype)) + assert_equal(v.dtype, dtype) + + def test_invalid(self): + x = np.array([[1, 0.5], [0.5, 1]], dtype=np.float32) + assert_raises(ValueError, np.linalg.eigh, x, UPLO="lrong") + assert_raises(ValueError, np.linalg.eigh, x, "lower") + assert_raises(ValueError, np.linalg.eigh, x, "upper") + + def test_UPLO(self): + Klo = np.array([[0, 0], [1, 0]], dtype=np.double) + Kup = np.array([[0, 1], [0, 0]], dtype=np.double) + tgt = np.array([-1, 1], dtype=np.double) + rtol = get_rtol(np.double) + + # Check default is 'L' + w, v = np.linalg.eigh(Klo) + assert_allclose(w, tgt, rtol=rtol) + # Check 'L' + w, v = np.linalg.eigh(Klo, UPLO='L') + assert_allclose(w, tgt, rtol=rtol) + # Check 'l' + w, v = np.linalg.eigh(Klo, UPLO='l') + assert_allclose(w, tgt, rtol=rtol) + # Check 'U' + w, v = np.linalg.eigh(Kup, UPLO='U') + assert_allclose(w, tgt, rtol=rtol) + # Check 'u' + w, v = np.linalg.eigh(Kup, UPLO='u') + assert_allclose(w, tgt, rtol=rtol) + + def test_0_size(self): + # Check that all kinds of 0-sized arrays work + class ArraySubclass(np.ndarray): + pass + a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass) + res, res_v = linalg.eigh(a) + assert_(res_v.dtype.type is np.float64) + assert_(res.dtype.type is np.float64) + assert_equal(a.shape, res_v.shape) + assert_equal((0, 1), res.shape) + # This is just for documentation, it might make sense to change: + assert_(isinstance(a, np.ndarray)) + + a = np.zeros((0, 0), dtype=np.complex64).view(ArraySubclass) + res, res_v = linalg.eigh(a) + assert_(res_v.dtype.type is np.complex64) + assert_(res.dtype.type is np.float32) + assert_equal(a.shape, res_v.shape) + assert_equal((0,), res.shape) + # This is just for documentation, it might make sense to change: + assert_(isinstance(a, np.ndarray)) + + +class _TestNormBase: + dt = None + dec = None + + @staticmethod + def check_dtype(x, res): + if issubclass(x.dtype.type, np.inexact): + assert_equal(res.dtype, x.real.dtype) + else: + # For integer input, don't have to test float precision of output. + assert_(issubclass(res.dtype.type, np.floating)) + + +class _TestNormGeneral(_TestNormBase): + + def test_empty(self): + assert_equal(norm([]), 0.0) + assert_equal(norm(array([], dtype=self.dt)), 0.0) + assert_equal(norm(atleast_2d(array([], dtype=self.dt))), 0.0) + + def test_vector_return_type(self): + a = np.array([1, 0, 1]) + + exact_types = np.typecodes['AllInteger'] + inexact_types = np.typecodes['AllFloat'] + + all_types = exact_types + inexact_types + + for each_type in all_types: + at = a.astype(each_type) + + an = norm(at, -np.inf) + self.check_dtype(at, an) + assert_almost_equal(an, 0.0) + + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "divide by zero encountered") + an = norm(at, -1) + self.check_dtype(at, an) + assert_almost_equal(an, 0.0) + + an = norm(at, 0) + self.check_dtype(at, an) + assert_almost_equal(an, 2) + + an = norm(at, 1) + self.check_dtype(at, an) + assert_almost_equal(an, 2.0) + + an = norm(at, 2) + self.check_dtype(at, an) + assert_almost_equal(an, an.dtype.type(2.0)**an.dtype.type(1.0 / 2.0)) + + an = norm(at, 4) + self.check_dtype(at, an) + assert_almost_equal(an, an.dtype.type(2.0)**an.dtype.type(1.0 / 4.0)) + + an = norm(at, np.inf) + self.check_dtype(at, an) + assert_almost_equal(an, 1.0) + + def test_vector(self): + a = [1, 2, 3, 4] + b = [-1, -2, -3, -4] + c = [-1, 2, -3, 4] + + def _test(v): + np.testing.assert_almost_equal(norm(v), 30 ** 0.5, + decimal=self.dec) + np.testing.assert_almost_equal(norm(v, inf), 4.0, + decimal=self.dec) + np.testing.assert_almost_equal(norm(v, -inf), 1.0, + decimal=self.dec) + np.testing.assert_almost_equal(norm(v, 1), 10.0, + decimal=self.dec) + np.testing.assert_almost_equal(norm(v, -1), 12.0 / 25, + decimal=self.dec) + np.testing.assert_almost_equal(norm(v, 2), 30 ** 0.5, + decimal=self.dec) + np.testing.assert_almost_equal(norm(v, -2), ((205. / 144) ** -0.5), + decimal=self.dec) + np.testing.assert_almost_equal(norm(v, 0), 4, + decimal=self.dec) + + for v in (a, b, c,): + _test(v) + + for v in (array(a, dtype=self.dt), array(b, dtype=self.dt), + array(c, dtype=self.dt)): + _test(v) + + def test_axis(self): + # Vector norms. + # Compare the use of `axis` with computing the norm of each row + # or column separately. + A = array([[1, 2, 3], [4, 5, 6]], dtype=self.dt) + for order in [None, -1, 0, 1, 2, 3, np.inf, -np.inf]: + expected0 = [norm(A[:, k], ord=order) for k in range(A.shape[1])] + assert_almost_equal(norm(A, ord=order, axis=0), expected0) + expected1 = [norm(A[k, :], ord=order) for k in range(A.shape[0])] + assert_almost_equal(norm(A, ord=order, axis=1), expected1) + + # Matrix norms. + B = np.arange(1, 25, dtype=self.dt).reshape(2, 3, 4) + nd = B.ndim + for order in [None, -2, 2, -1, 1, np.inf, -np.inf, 'fro']: + for axis in itertools.combinations(range(-nd, nd), 2): + row_axis, col_axis = axis + if row_axis < 0: + row_axis += nd + if col_axis < 0: + col_axis += nd + if row_axis == col_axis: + assert_raises(ValueError, norm, B, ord=order, axis=axis) + else: + n = norm(B, ord=order, axis=axis) + + # The logic using k_index only works for nd = 3. + # This has to be changed if nd is increased. + k_index = nd - (row_axis + col_axis) + if row_axis < col_axis: + expected = [norm(B[:].take(k, axis=k_index), ord=order) + for k in range(B.shape[k_index])] + else: + expected = [norm(B[:].take(k, axis=k_index).T, ord=order) + for k in range(B.shape[k_index])] + assert_almost_equal(n, expected) + + def test_keepdims(self): + A = np.arange(1, 25, dtype=self.dt).reshape(2, 3, 4) + + allclose_err = 'order {0}, axis = {1}' + shape_err = 'Shape mismatch found {0}, expected {1}, order={2}, axis={3}' + + # check the order=None, axis=None case + expected = norm(A, ord=None, axis=None) + found = norm(A, ord=None, axis=None, keepdims=True) + assert_allclose(np.squeeze(found), expected, + err_msg=allclose_err.format(None, None)) + expected_shape = (1, 1, 1) + assert_(found.shape == expected_shape, + shape_err.format(found.shape, expected_shape, None, None)) + + # Vector norms. + for order in [None, -1, 0, 1, 2, 3, np.inf, -np.inf]: + for k in range(A.ndim): + expected = norm(A, ord=order, axis=k) + found = norm(A, ord=order, axis=k, keepdims=True) + assert_allclose(np.squeeze(found), expected, + err_msg=allclose_err.format(order, k)) + expected_shape = list(A.shape) + expected_shape[k] = 1 + expected_shape = tuple(expected_shape) + assert_(found.shape == expected_shape, + shape_err.format(found.shape, expected_shape, order, k)) + + # Matrix norms. + for order in [None, -2, 2, -1, 1, np.inf, -np.inf, 'fro', 'nuc']: + for k in itertools.permutations(range(A.ndim), 2): + expected = norm(A, ord=order, axis=k) + found = norm(A, ord=order, axis=k, keepdims=True) + assert_allclose(np.squeeze(found), expected, + err_msg=allclose_err.format(order, k)) + expected_shape = list(A.shape) + expected_shape[k[0]] = 1 + expected_shape[k[1]] = 1 + expected_shape = tuple(expected_shape) + assert_(found.shape == expected_shape, + shape_err.format(found.shape, expected_shape, order, k)) + + +class _TestNorm2D(_TestNormBase): + # Define the part for 2d arrays separately, so we can subclass this + # and run the tests using np.matrix in matrixlib.tests.test_matrix_linalg. + array = np.array + + def test_matrix_empty(self): + assert_equal(norm(self.array([[]], dtype=self.dt)), 0.0) + + def test_matrix_return_type(self): + a = self.array([[1, 0, 1], [0, 1, 1]]) + + exact_types = np.typecodes['AllInteger'] + + # float32, complex64, float64, complex128 types are the only types + # allowed by `linalg`, which performs the matrix operations used + # within `norm`. + inexact_types = 'fdFD' + + all_types = exact_types + inexact_types + + for each_type in all_types: + at = a.astype(each_type) + + an = norm(at, -np.inf) + self.check_dtype(at, an) + assert_almost_equal(an, 2.0) + + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "divide by zero encountered") + an = norm(at, -1) + self.check_dtype(at, an) + assert_almost_equal(an, 1.0) + + an = norm(at, 1) + self.check_dtype(at, an) + assert_almost_equal(an, 2.0) + + an = norm(at, 2) + self.check_dtype(at, an) + assert_almost_equal(an, 3.0**(1.0 / 2.0)) + + an = norm(at, -2) + self.check_dtype(at, an) + assert_almost_equal(an, 1.0) + + an = norm(at, np.inf) + self.check_dtype(at, an) + assert_almost_equal(an, 2.0) + + an = norm(at, 'fro') + self.check_dtype(at, an) + assert_almost_equal(an, 2.0) + + an = norm(at, 'nuc') + self.check_dtype(at, an) + # Lower bar needed to support low precision floats. + # They end up being off by 1 in the 7th place. + np.testing.assert_almost_equal(an, 2.7320508075688772, decimal=6) + + def test_matrix_2x2(self): + A = self.array([[1, 3], [5, 7]], dtype=self.dt) + assert_almost_equal(norm(A), 84 ** 0.5) + assert_almost_equal(norm(A, 'fro'), 84 ** 0.5) + assert_almost_equal(norm(A, 'nuc'), 10.0) + assert_almost_equal(norm(A, inf), 12.0) + assert_almost_equal(norm(A, -inf), 4.0) + assert_almost_equal(norm(A, 1), 10.0) + assert_almost_equal(norm(A, -1), 6.0) + assert_almost_equal(norm(A, 2), 9.1231056256176615) + assert_almost_equal(norm(A, -2), 0.87689437438234041) + + assert_raises(ValueError, norm, A, 'nofro') + assert_raises(ValueError, norm, A, -3) + assert_raises(ValueError, norm, A, 0) + + def test_matrix_3x3(self): + # This test has been added because the 2x2 example + # happened to have equal nuclear norm and induced 1-norm. + # The 1/10 scaling factor accommodates the absolute tolerance + # used in assert_almost_equal. + A = (1 / 10) * \ + self.array([[1, 2, 3], [6, 0, 5], [3, 2, 1]], dtype=self.dt) + assert_almost_equal(norm(A), (1 / 10) * 89 ** 0.5) + assert_almost_equal(norm(A, 'fro'), (1 / 10) * 89 ** 0.5) + assert_almost_equal(norm(A, 'nuc'), 1.3366836911774836) + assert_almost_equal(norm(A, inf), 1.1) + assert_almost_equal(norm(A, -inf), 0.6) + assert_almost_equal(norm(A, 1), 1.0) + assert_almost_equal(norm(A, -1), 0.4) + assert_almost_equal(norm(A, 2), 0.88722940323461277) + assert_almost_equal(norm(A, -2), 0.19456584790481812) + + def test_bad_args(self): + # Check that bad arguments raise the appropriate exceptions. + + A = self.array([[1, 2, 3], [4, 5, 6]], dtype=self.dt) + B = np.arange(1, 25, dtype=self.dt).reshape(2, 3, 4) + + # Using `axis=` or passing in a 1-D array implies vector + # norms are being computed, so also using `ord='fro'` + # or `ord='nuc'` or any other string raises a ValueError. + assert_raises(ValueError, norm, A, 'fro', 0) + assert_raises(ValueError, norm, A, 'nuc', 0) + assert_raises(ValueError, norm, [3, 4], 'fro', None) + assert_raises(ValueError, norm, [3, 4], 'nuc', None) + assert_raises(ValueError, norm, [3, 4], 'test', None) + + # Similarly, norm should raise an exception when ord is any finite + # number other than 1, 2, -1 or -2 when computing matrix norms. + for order in [0, 3]: + assert_raises(ValueError, norm, A, order, None) + assert_raises(ValueError, norm, A, order, (0, 1)) + assert_raises(ValueError, norm, B, order, (1, 2)) + + # Invalid axis + assert_raises(AxisError, norm, B, None, 3) + assert_raises(AxisError, norm, B, None, (2, 3)) + assert_raises(ValueError, norm, B, None, (0, 1, 2)) + + +class _TestNorm(_TestNorm2D, _TestNormGeneral): + pass + + +class TestNorm_NonSystematic: + + def test_longdouble_norm(self): + # Non-regression test: p-norm of longdouble would previously raise + # UnboundLocalError. + x = np.arange(10, dtype=np.longdouble) + old_assert_almost_equal(norm(x, ord=3), 12.65, decimal=2) + + def test_intmin(self): + # Non-regression test: p-norm of signed integer would previously do + # float cast and abs in the wrong order. + x = np.array([-2 ** 31], dtype=np.int32) + old_assert_almost_equal(norm(x, ord=3), 2 ** 31, decimal=5) + + def test_complex_high_ord(self): + # gh-4156 + d = np.empty((2,), dtype=np.clongdouble) + d[0] = 6 + 7j + d[1] = -6 + 7j + res = 11.615898132184 + old_assert_almost_equal(np.linalg.norm(d, ord=3), res, decimal=10) + d = d.astype(np.complex128) + old_assert_almost_equal(np.linalg.norm(d, ord=3), res, decimal=9) + d = d.astype(np.complex64) + old_assert_almost_equal(np.linalg.norm(d, ord=3), res, decimal=5) + + +# Separate definitions so we can use them for matrix tests. +class _TestNormDoubleBase(_TestNormBase): + dt = np.double + dec = 12 + + +class _TestNormSingleBase(_TestNormBase): + dt = np.float32 + dec = 6 + + +class _TestNormInt64Base(_TestNormBase): + dt = np.int64 + dec = 12 + + +class TestNormDouble(_TestNorm, _TestNormDoubleBase): + pass + + +class TestNormSingle(_TestNorm, _TestNormSingleBase): + pass + + +class TestNormInt64(_TestNorm, _TestNormInt64Base): + pass + + +class TestMatrixRank: + + def test_matrix_rank(self): + # Full rank matrix + assert_equal(4, matrix_rank(np.eye(4))) + # rank deficient matrix + I = np.eye(4) + I[-1, -1] = 0. + assert_equal(matrix_rank(I), 3) + # All zeros - zero rank + assert_equal(matrix_rank(np.zeros((4, 4))), 0) + # 1 dimension - rank 1 unless all 0 + assert_equal(matrix_rank([1, 0, 0, 0]), 1) + assert_equal(matrix_rank(np.zeros((4,))), 0) + # accepts array-like + assert_equal(matrix_rank([1]), 1) + # greater than 2 dimensions treated as stacked matrices + ms = np.array([I, np.eye(4), np.zeros((4, 4))]) + assert_equal(matrix_rank(ms), np.array([3, 4, 0])) + # works on scalar + assert_equal(matrix_rank(1), 1) + + with assert_raises_regex( + ValueError, "`tol` and `rtol` can\'t be both set." + ): + matrix_rank(I, tol=0.01, rtol=0.01) + + def test_symmetric_rank(self): + assert_equal(4, matrix_rank(np.eye(4), hermitian=True)) + assert_equal(1, matrix_rank(np.ones((4, 4)), hermitian=True)) + assert_equal(0, matrix_rank(np.zeros((4, 4)), hermitian=True)) + # rank deficient matrix + I = np.eye(4) + I[-1, -1] = 0. + assert_equal(3, matrix_rank(I, hermitian=True)) + # manually supplied tolerance + I[-1, -1] = 1e-8 + assert_equal(4, matrix_rank(I, hermitian=True, tol=0.99e-8)) + assert_equal(3, matrix_rank(I, hermitian=True, tol=1.01e-8)) + + +def test_reduced_rank(): + # Test matrices with reduced rank + rng = np.random.RandomState(20120714) + for i in range(100): + # Make a rank deficient matrix + X = rng.normal(size=(40, 10)) + X[:, 0] = X[:, 1] + X[:, 2] + # Assert that matrix_rank detected deficiency + assert_equal(matrix_rank(X), 9) + X[:, 3] = X[:, 4] + X[:, 5] + assert_equal(matrix_rank(X), 8) + + +class TestQR: + # Define the array class here, so run this on matrices elsewhere. + array = np.array + + def check_qr(self, a): + # This test expects the argument `a` to be an ndarray or + # a subclass of an ndarray of inexact type. + a_type = type(a) + a_dtype = a.dtype + m, n = a.shape + k = min(m, n) + + # mode == 'complete' + res = linalg.qr(a, mode='complete') + Q, R = res.Q, res.R + assert_(Q.dtype == a_dtype) + assert_(R.dtype == a_dtype) + assert_(isinstance(Q, a_type)) + assert_(isinstance(R, a_type)) + assert_(Q.shape == (m, m)) + assert_(R.shape == (m, n)) + assert_almost_equal(dot(Q, R), a) + assert_almost_equal(dot(Q.T.conj(), Q), np.eye(m)) + assert_almost_equal(np.triu(R), R) + + # mode == 'reduced' + q1, r1 = linalg.qr(a, mode='reduced') + assert_(q1.dtype == a_dtype) + assert_(r1.dtype == a_dtype) + assert_(isinstance(q1, a_type)) + assert_(isinstance(r1, a_type)) + assert_(q1.shape == (m, k)) + assert_(r1.shape == (k, n)) + assert_almost_equal(dot(q1, r1), a) + assert_almost_equal(dot(q1.T.conj(), q1), np.eye(k)) + assert_almost_equal(np.triu(r1), r1) + + # mode == 'r' + r2 = linalg.qr(a, mode='r') + assert_(r2.dtype == a_dtype) + assert_(isinstance(r2, a_type)) + assert_almost_equal(r2, r1) + + @pytest.mark.parametrize(["m", "n"], [ + (3, 0), + (0, 3), + (0, 0) + ]) + def test_qr_empty(self, m, n): + k = min(m, n) + a = np.empty((m, n)) + + self.check_qr(a) + + h, tau = np.linalg.qr(a, mode='raw') + assert_equal(h.dtype, np.double) + assert_equal(tau.dtype, np.double) + assert_equal(h.shape, (n, m)) + assert_equal(tau.shape, (k,)) + + def test_mode_raw(self): + # The factorization is not unique and varies between libraries, + # so it is not possible to check against known values. Functional + # testing is a possibility, but awaits the exposure of more + # of the functions in lapack_lite. Consequently, this test is + # very limited in scope. Note that the results are in FORTRAN + # order, hence the h arrays are transposed. + a = self.array([[1, 2], [3, 4], [5, 6]], dtype=np.double) + + # Test double + h, tau = linalg.qr(a, mode='raw') + assert_(h.dtype == np.double) + assert_(tau.dtype == np.double) + assert_(h.shape == (2, 3)) + assert_(tau.shape == (2,)) + + h, tau = linalg.qr(a.T, mode='raw') + assert_(h.dtype == np.double) + assert_(tau.dtype == np.double) + assert_(h.shape == (3, 2)) + assert_(tau.shape == (2,)) + + def test_mode_all_but_economic(self): + a = self.array([[1, 2], [3, 4]]) + b = self.array([[1, 2], [3, 4], [5, 6]]) + for dt in "fd": + m1 = a.astype(dt) + m2 = b.astype(dt) + self.check_qr(m1) + self.check_qr(m2) + self.check_qr(m2.T) + + for dt in "fd": + m1 = 1 + 1j * a.astype(dt) + m2 = 1 + 1j * b.astype(dt) + self.check_qr(m1) + self.check_qr(m2) + self.check_qr(m2.T) + + def check_qr_stacked(self, a): + # This test expects the argument `a` to be an ndarray or + # a subclass of an ndarray of inexact type. + a_type = type(a) + a_dtype = a.dtype + m, n = a.shape[-2:] + k = min(m, n) + + # mode == 'complete' + q, r = linalg.qr(a, mode='complete') + assert_(q.dtype == a_dtype) + assert_(r.dtype == a_dtype) + assert_(isinstance(q, a_type)) + assert_(isinstance(r, a_type)) + assert_(q.shape[-2:] == (m, m)) + assert_(r.shape[-2:] == (m, n)) + assert_almost_equal(matmul(q, r), a) + I_mat = np.identity(q.shape[-1]) + stack_I_mat = np.broadcast_to(I_mat, + q.shape[:-2] + (q.shape[-1],) * 2) + assert_almost_equal(matmul(swapaxes(q, -1, -2).conj(), q), stack_I_mat) + assert_almost_equal(np.triu(r[..., :, :]), r) + + # mode == 'reduced' + q1, r1 = linalg.qr(a, mode='reduced') + assert_(q1.dtype == a_dtype) + assert_(r1.dtype == a_dtype) + assert_(isinstance(q1, a_type)) + assert_(isinstance(r1, a_type)) + assert_(q1.shape[-2:] == (m, k)) + assert_(r1.shape[-2:] == (k, n)) + assert_almost_equal(matmul(q1, r1), a) + I_mat = np.identity(q1.shape[-1]) + stack_I_mat = np.broadcast_to(I_mat, + q1.shape[:-2] + (q1.shape[-1],) * 2) + assert_almost_equal(matmul(swapaxes(q1, -1, -2).conj(), q1), + stack_I_mat) + assert_almost_equal(np.triu(r1[..., :, :]), r1) + + # mode == 'r' + r2 = linalg.qr(a, mode='r') + assert_(r2.dtype == a_dtype) + assert_(isinstance(r2, a_type)) + assert_almost_equal(r2, r1) + + @pytest.mark.parametrize("size", [ + (3, 4), (4, 3), (4, 4), + (3, 0), (0, 3)]) + @pytest.mark.parametrize("outer_size", [ + (2, 2), (2,), (2, 3, 4)]) + @pytest.mark.parametrize("dt", [ + np.single, np.double, + np.csingle, np.cdouble]) + def test_stacked_inputs(self, outer_size, size, dt): + + rng = np.random.default_rng(123) + A = rng.normal(size=outer_size + size).astype(dt) + B = rng.normal(size=outer_size + size).astype(dt) + self.check_qr_stacked(A) + self.check_qr_stacked(A + 1.j * B) + + +class TestCholesky: + + @pytest.mark.parametrize( + 'shape', [(1, 1), (2, 2), (3, 3), (50, 50), (3, 10, 10)] + ) + @pytest.mark.parametrize( + 'dtype', (np.float32, np.float64, np.complex64, np.complex128) + ) + @pytest.mark.parametrize( + 'upper', [False, True]) + def test_basic_property(self, shape, dtype, upper): + np.random.seed(1) + a = np.random.randn(*shape) + if np.issubdtype(dtype, np.complexfloating): + a = a + 1j * np.random.randn(*shape) + + t = list(range(len(shape))) + t[-2:] = -1, -2 + + a = np.matmul(a.transpose(t).conj(), a) + a = np.asarray(a, dtype=dtype) + + c = np.linalg.cholesky(a, upper=upper) + + # Check A = L L^H or A = U^H U + if upper: + b = np.matmul(c.transpose(t).conj(), c) + else: + b = np.matmul(c, c.transpose(t).conj()) + + atol = 500 * a.shape[0] * np.finfo(dtype).eps + assert_allclose(b, a, atol=atol, err_msg=f'{shape} {dtype}\n{a}\n{c}') + + # Check diag(L or U) is real and positive + d = np.diagonal(c, axis1=-2, axis2=-1) + assert_(np.all(np.isreal(d))) + assert_(np.all(d >= 0)) + + def test_0_size(self): + class ArraySubclass(np.ndarray): + pass + a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass) + res = linalg.cholesky(a) + assert_equal(a.shape, res.shape) + assert_(res.dtype.type is np.float64) + # for documentation purpose: + assert_(isinstance(res, np.ndarray)) + + a = np.zeros((1, 0, 0), dtype=np.complex64).view(ArraySubclass) + res = linalg.cholesky(a) + assert_equal(a.shape, res.shape) + assert_(res.dtype.type is np.complex64) + assert_(isinstance(res, np.ndarray)) + + def test_upper_lower_arg(self): + # Explicit test of upper argument that also checks the default. + a = np.array([[1 + 0j, 0 - 2j], [0 + 2j, 5 + 0j]]) + + assert_equal(linalg.cholesky(a), linalg.cholesky(a, upper=False)) + + assert_equal( + linalg.cholesky(a, upper=True), + linalg.cholesky(a).T.conj() + ) + + +class TestOuter: + arr1 = np.arange(3) + arr2 = np.arange(3) + expected = np.array( + [[0, 0, 0], + [0, 1, 2], + [0, 2, 4]] + ) + + assert_array_equal(np.linalg.outer(arr1, arr2), expected) + + with assert_raises_regex( + ValueError, "Input arrays must be one-dimensional" + ): + np.linalg.outer(arr1[:, np.newaxis], arr2) + + +def test_byteorder_check(): + # Byte order check should pass for native order + if sys.byteorder == 'little': + native = '<' + else: + native = '>' + + for dtt in (np.float32, np.float64): + arr = np.eye(4, dtype=dtt) + n_arr = arr.view(arr.dtype.newbyteorder(native)) + sw_arr = arr.view(arr.dtype.newbyteorder("S")).byteswap() + assert_equal(arr.dtype.byteorder, '=') + for routine in (linalg.inv, linalg.det, linalg.pinv): + # Normal call + res = routine(arr) + # Native but not '=' + assert_array_equal(res, routine(n_arr)) + # Swapped + assert_array_equal(res, routine(sw_arr)) + + +@pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") +def test_generalized_raise_multiloop(): + # It should raise an error even if the error doesn't occur in the + # last iteration of the ufunc inner loop + + invertible = np.array([[1, 2], [3, 4]]) + non_invertible = np.array([[1, 1], [1, 1]]) + + x = np.zeros([4, 4, 2, 2])[1::2] + x[...] = invertible + x[0, 0] = non_invertible + + assert_raises(np.linalg.LinAlgError, np.linalg.inv, x) + + +@pytest.mark.skipif( + threading.active_count() > 1, + reason="skipping test that uses fork because there are multiple threads") +@pytest.mark.skipif( + NOGIL_BUILD, + reason="Cannot safely use fork in tests on the free-threaded build") +def test_xerbla_override(): + # Check that our xerbla has been successfully linked in. If it is not, + # the default xerbla routine is called, which prints a message to stdout + # and may, or may not, abort the process depending on the LAPACK package. + + XERBLA_OK = 255 + + try: + pid = os.fork() + except (OSError, AttributeError): + # fork failed, or not running on POSIX + pytest.skip("Not POSIX or fork failed.") + + if pid == 0: + # child; close i/o file handles + os.close(1) + os.close(0) + # Avoid producing core files. + import resource + resource.setrlimit(resource.RLIMIT_CORE, (0, 0)) + # These calls may abort. + try: + np.linalg.lapack_lite.xerbla() + except ValueError: + pass + except Exception: + os._exit(os.EX_CONFIG) + + try: + a = np.array([[1.]]) + np.linalg.lapack_lite.dorgqr( + 1, 1, 1, a, + 0, # <- invalid value + a, a, 0, 0) + except ValueError as e: + if "DORGQR parameter number 5" in str(e): + # success, reuse error code to mark success as + # FORTRAN STOP returns as success. + os._exit(XERBLA_OK) + + # Did not abort, but our xerbla was not linked in. + os._exit(os.EX_CONFIG) + else: + # parent + pid, status = os.wait() + if os.WEXITSTATUS(status) != XERBLA_OK: + pytest.skip('Numpy xerbla not linked in.') + + +@pytest.mark.skipif(IS_WASM, reason="Cannot start subprocess") +@pytest.mark.slow +def test_sdot_bug_8577(): + # Regression test that loading certain other libraries does not + # result to wrong results in float32 linear algebra. + # + # There's a bug gh-8577 on OSX that can trigger this, and perhaps + # there are also other situations in which it occurs. + # + # Do the check in a separate process. + + bad_libs = ['PyQt5.QtWidgets', 'IPython'] + + template = textwrap.dedent(""" + import sys + {before} + try: + import {bad_lib} + except ImportError: + sys.exit(0) + {after} + x = np.ones(2, dtype=np.float32) + sys.exit(0 if np.allclose(x.dot(x), 2.0) else 1) + """) + + for bad_lib in bad_libs: + code = template.format(before="import numpy as np", after="", + bad_lib=bad_lib) + subprocess.check_call([sys.executable, "-c", code]) + + # Swapped import order + code = template.format(after="import numpy as np", before="", + bad_lib=bad_lib) + subprocess.check_call([sys.executable, "-c", code]) + + +class TestMultiDot: + + def test_basic_function_with_three_arguments(self): + # multi_dot with three arguments uses a fast hand coded algorithm to + # determine the optimal order. Therefore test it separately. + A = np.random.random((6, 2)) + B = np.random.random((2, 6)) + C = np.random.random((6, 2)) + + assert_almost_equal(multi_dot([A, B, C]), A.dot(B).dot(C)) + assert_almost_equal(multi_dot([A, B, C]), np.dot(A, np.dot(B, C))) + + def test_basic_function_with_two_arguments(self): + # separate code path with two arguments + A = np.random.random((6, 2)) + B = np.random.random((2, 6)) + + assert_almost_equal(multi_dot([A, B]), A.dot(B)) + assert_almost_equal(multi_dot([A, B]), np.dot(A, B)) + + def test_basic_function_with_dynamic_programming_optimization(self): + # multi_dot with four or more arguments uses the dynamic programming + # optimization and therefore deserve a separate + A = np.random.random((6, 2)) + B = np.random.random((2, 6)) + C = np.random.random((6, 2)) + D = np.random.random((2, 1)) + assert_almost_equal(multi_dot([A, B, C, D]), A.dot(B).dot(C).dot(D)) + + def test_vector_as_first_argument(self): + # The first argument can be 1-D + A1d = np.random.random(2) # 1-D + B = np.random.random((2, 6)) + C = np.random.random((6, 2)) + D = np.random.random((2, 2)) + + # the result should be 1-D + assert_equal(multi_dot([A1d, B, C, D]).shape, (2,)) + + def test_vector_as_last_argument(self): + # The last argument can be 1-D + A = np.random.random((6, 2)) + B = np.random.random((2, 6)) + C = np.random.random((6, 2)) + D1d = np.random.random(2) # 1-D + + # the result should be 1-D + assert_equal(multi_dot([A, B, C, D1d]).shape, (6,)) + + def test_vector_as_first_and_last_argument(self): + # The first and last arguments can be 1-D + A1d = np.random.random(2) # 1-D + B = np.random.random((2, 6)) + C = np.random.random((6, 2)) + D1d = np.random.random(2) # 1-D + + # the result should be a scalar + assert_equal(multi_dot([A1d, B, C, D1d]).shape, ()) + + def test_three_arguments_and_out(self): + # multi_dot with three arguments uses a fast hand coded algorithm to + # determine the optimal order. Therefore test it separately. + A = np.random.random((6, 2)) + B = np.random.random((2, 6)) + C = np.random.random((6, 2)) + + out = np.zeros((6, 2)) + ret = multi_dot([A, B, C], out=out) + assert out is ret + assert_almost_equal(out, A.dot(B).dot(C)) + assert_almost_equal(out, np.dot(A, np.dot(B, C))) + + def test_two_arguments_and_out(self): + # separate code path with two arguments + A = np.random.random((6, 2)) + B = np.random.random((2, 6)) + out = np.zeros((6, 6)) + ret = multi_dot([A, B], out=out) + assert out is ret + assert_almost_equal(out, A.dot(B)) + assert_almost_equal(out, np.dot(A, B)) + + def test_dynamic_programming_optimization_and_out(self): + # multi_dot with four or more arguments uses the dynamic programming + # optimization and therefore deserve a separate test + A = np.random.random((6, 2)) + B = np.random.random((2, 6)) + C = np.random.random((6, 2)) + D = np.random.random((2, 1)) + out = np.zeros((6, 1)) + ret = multi_dot([A, B, C, D], out=out) + assert out is ret + assert_almost_equal(out, A.dot(B).dot(C).dot(D)) + + def test_dynamic_programming_logic(self): + # Test for the dynamic programming part + # This test is directly taken from Cormen page 376. + arrays = [np.random.random((30, 35)), + np.random.random((35, 15)), + np.random.random((15, 5)), + np.random.random((5, 10)), + np.random.random((10, 20)), + np.random.random((20, 25))] + m_expected = np.array([[0., 15750., 7875., 9375., 11875., 15125.], + [0., 0., 2625., 4375., 7125., 10500.], + [0., 0., 0., 750., 2500., 5375.], + [0., 0., 0., 0., 1000., 3500.], + [0., 0., 0., 0., 0., 5000.], + [0., 0., 0., 0., 0., 0.]]) + s_expected = np.array([[0, 1, 1, 3, 3, 3], + [0, 0, 2, 3, 3, 3], + [0, 0, 0, 3, 3, 3], + [0, 0, 0, 0, 4, 5], + [0, 0, 0, 0, 0, 5], + [0, 0, 0, 0, 0, 0]], dtype=int) + s_expected -= 1 # Cormen uses 1-based index, python does not. + + s, m = _multi_dot_matrix_chain_order(arrays, return_costs=True) + + # Only the upper triangular part (without the diagonal) is interesting. + assert_almost_equal(np.triu(s[:-1, 1:]), + np.triu(s_expected[:-1, 1:])) + assert_almost_equal(np.triu(m), np.triu(m_expected)) + + def test_too_few_input_arrays(self): + assert_raises(ValueError, multi_dot, []) + assert_raises(ValueError, multi_dot, [np.random.random((3, 3))]) + + +class TestTensorinv: + + @pytest.mark.parametrize("arr, ind", [ + (np.ones((4, 6, 8, 2)), 2), + (np.ones((3, 3, 2)), 1), + ]) + def test_non_square_handling(self, arr, ind): + with assert_raises(LinAlgError): + linalg.tensorinv(arr, ind=ind) + + @pytest.mark.parametrize("shape, ind", [ + # examples from docstring + ((4, 6, 8, 3), 2), + ((24, 8, 3), 1), + ]) + def test_tensorinv_shape(self, shape, ind): + a = np.eye(24) + a.shape = shape + ainv = linalg.tensorinv(a=a, ind=ind) + expected = a.shape[ind:] + a.shape[:ind] + actual = ainv.shape + assert_equal(actual, expected) + + @pytest.mark.parametrize("ind", [ + 0, -2, + ]) + def test_tensorinv_ind_limit(self, ind): + a = np.eye(24) + a.shape = (4, 6, 8, 3) + with assert_raises(ValueError): + linalg.tensorinv(a=a, ind=ind) + + def test_tensorinv_result(self): + # mimic a docstring example + a = np.eye(24) + a.shape = (24, 8, 3) + ainv = linalg.tensorinv(a, ind=1) + b = np.ones(24) + assert_allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b)) + + +class TestTensorsolve: + + @pytest.mark.parametrize("a, axes", [ + (np.ones((4, 6, 8, 2)), None), + (np.ones((3, 3, 2)), (0, 2)), + ]) + def test_non_square_handling(self, a, axes): + with assert_raises(LinAlgError): + b = np.ones(a.shape[:2]) + linalg.tensorsolve(a, b, axes=axes) + + @pytest.mark.parametrize("shape", + [(2, 3, 6), (3, 4, 4, 3), (0, 3, 3, 0)], + ) + def test_tensorsolve_result(self, shape): + a = np.random.randn(*shape) + b = np.ones(a.shape[:2]) + x = np.linalg.tensorsolve(a, b) + assert_allclose(np.tensordot(a, x, axes=len(x.shape)), b) + + +def test_unsupported_commontype(): + # linalg gracefully handles unsupported type + arr = np.array([[1, -2], [2, 5]], dtype='float16') + with assert_raises_regex(TypeError, "unsupported in linalg"): + linalg.cholesky(arr) + + +#@pytest.mark.slow +#@pytest.mark.xfail(not HAS_LAPACK64, run=False, +# reason="Numpy not compiled with 64-bit BLAS/LAPACK") +#@requires_memory(free_bytes=16e9) +@pytest.mark.skip(reason="Bad memory reports lead to OOM in ci testing") +def test_blas64_dot(): + n = 2**32 + a = np.zeros([1, n], dtype=np.float32) + b = np.ones([1, 1], dtype=np.float32) + a[0, -1] = 1 + c = np.dot(b, a) + assert_equal(c[0, -1], 1) + + +@pytest.mark.xfail(not HAS_LAPACK64, + reason="Numpy not compiled with 64-bit BLAS/LAPACK") +def test_blas64_geqrf_lwork_smoketest(): + # Smoke test LAPACK geqrf lwork call with 64-bit integers + dtype = np.float64 + lapack_routine = np.linalg.lapack_lite.dgeqrf + + m = 2**32 + 1 + n = 2**32 + 1 + lda = m + + # Dummy arrays, not referenced by the lapack routine, so don't + # need to be of the right size + a = np.zeros([1, 1], dtype=dtype) + work = np.zeros([1], dtype=dtype) + tau = np.zeros([1], dtype=dtype) + + # Size query + results = lapack_routine(m, n, a, lda, tau, work, -1, 0) + assert_equal(results['info'], 0) + assert_equal(results['m'], m) + assert_equal(results['n'], m) + + # Should result to an integer of a reasonable size + lwork = int(work.item()) + assert_(2**32 < lwork < 2**42) + + +def test_diagonal(): + # Here we only test if selected axes are compatible + # with Array API (last two). Core implementation + # of `diagonal` is tested in `test_multiarray.py`. + x = np.arange(60).reshape((3, 4, 5)) + actual = np.linalg.diagonal(x) + expected = np.array( + [ + [0, 6, 12, 18], + [20, 26, 32, 38], + [40, 46, 52, 58], + ] + ) + assert_equal(actual, expected) + + +def test_trace(): + # Here we only test if selected axes are compatible + # with Array API (last two). Core implementation + # of `trace` is tested in `test_multiarray.py`. + x = np.arange(60).reshape((3, 4, 5)) + actual = np.linalg.trace(x) + expected = np.array([36, 116, 196]) + + assert_equal(actual, expected) + + +def test_cross(): + x = np.arange(9).reshape((3, 3)) + actual = np.linalg.cross(x, x + 1) + expected = np.array([ + [-1, 2, -1], + [-1, 2, -1], + [-1, 2, -1], + ]) + + assert_equal(actual, expected) + + # We test that lists are converted to arrays. + u = [1, 2, 3] + v = [4, 5, 6] + actual = np.linalg.cross(u, v) + expected = array([-3, 6, -3]) + + assert_equal(actual, expected) + + with assert_raises_regex( + ValueError, + r"input arrays must be \(arrays of\) 3-dimensional vectors" + ): + x_2dim = x[:, 1:] + np.linalg.cross(x_2dim, x_2dim) + + +def test_tensordot(): + # np.linalg.tensordot is just an alias for np.tensordot + x = np.arange(6).reshape((2, 3)) + + assert np.linalg.tensordot(x, x) == 55 + assert np.linalg.tensordot(x, x, axes=[(0, 1), (0, 1)]) == 55 + + +def test_matmul(): + # np.linalg.matmul and np.matmul only differs in the number + # of arguments in the signature + x = np.arange(6).reshape((2, 3)) + actual = np.linalg.matmul(x, x.T) + expected = np.array([[5, 14], [14, 50]]) + + assert_equal(actual, expected) + + +def test_matrix_transpose(): + x = np.arange(6).reshape((2, 3)) + actual = np.linalg.matrix_transpose(x) + expected = x.T + + assert_equal(actual, expected) + + with assert_raises_regex( + ValueError, "array must be at least 2-dimensional" + ): + np.linalg.matrix_transpose(x[:, 0]) + + +def test_matrix_norm(): + x = np.arange(9).reshape((3, 3)) + actual = np.linalg.matrix_norm(x) + + assert_almost_equal(actual, np.float64(14.2828), double_decimal=3) + + actual = np.linalg.matrix_norm(x, keepdims=True) + + assert_almost_equal(actual, np.array([[14.2828]]), double_decimal=3) + + +def test_matrix_norm_empty(): + for shape in [(0, 2), (2, 0), (0, 0)]: + for dtype in [np.float64, np.float32, np.int32]: + x = np.zeros(shape, dtype) + assert_equal(np.linalg.matrix_norm(x, ord="fro"), 0) + assert_equal(np.linalg.matrix_norm(x, ord="nuc"), 0) + assert_equal(np.linalg.matrix_norm(x, ord=1), 0) + assert_equal(np.linalg.matrix_norm(x, ord=2), 0) + assert_equal(np.linalg.matrix_norm(x, ord=np.inf), 0) + +def test_vector_norm(): + x = np.arange(9).reshape((3, 3)) + actual = np.linalg.vector_norm(x) + + assert_almost_equal(actual, np.float64(14.2828), double_decimal=3) + + actual = np.linalg.vector_norm(x, axis=0) + + assert_almost_equal( + actual, np.array([6.7082, 8.124, 9.6436]), double_decimal=3 + ) + + actual = np.linalg.vector_norm(x, keepdims=True) + expected = np.full((1, 1), 14.2828, dtype='float64') + assert_equal(actual.shape, expected.shape) + assert_almost_equal(actual, expected, double_decimal=3) + + +def test_vector_norm_empty(): + for dtype in [np.float64, np.float32, np.int32]: + x = np.zeros(0, dtype) + assert_equal(np.linalg.vector_norm(x, ord=1), 0) + assert_equal(np.linalg.vector_norm(x, ord=2), 0) + assert_equal(np.linalg.vector_norm(x, ord=np.inf), 0) diff --git a/.venv/lib/python3.12/site-packages/numpy/linalg/tests/test_regression.py b/.venv/lib/python3.12/site-packages/numpy/linalg/tests/test_regression.py new file mode 100644 index 0000000..c46f83a --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/linalg/tests/test_regression.py @@ -0,0 +1,181 @@ +""" Test functions for linalg module +""" + +import pytest + +import numpy as np +from numpy import arange, array, dot, float64, linalg, transpose +from numpy.testing import ( + assert_, + assert_array_almost_equal, + assert_array_equal, + assert_array_less, + assert_equal, + assert_raises, +) + + +class TestRegression: + + def test_eig_build(self): + # Ticket #652 + rva = array([1.03221168e+02 + 0.j, + -1.91843603e+01 + 0.j, + -6.04004526e-01 + 15.84422474j, + -6.04004526e-01 - 15.84422474j, + -1.13692929e+01 + 0.j, + -6.57612485e-01 + 10.41755503j, + -6.57612485e-01 - 10.41755503j, + 1.82126812e+01 + 0.j, + 1.06011014e+01 + 0.j, + 7.80732773e+00 + 0.j, + -7.65390898e-01 + 0.j, + 1.51971555e-15 + 0.j, + -1.51308713e-15 + 0.j]) + a = arange(13 * 13, dtype=float64) + a.shape = (13, 13) + a = a % 17 + va, ve = linalg.eig(a) + va.sort() + rva.sort() + assert_array_almost_equal(va, rva) + + def test_eigh_build(self): + # Ticket 662. + rvals = [68.60568999, 89.57756725, 106.67185574] + + cov = array([[77.70273908, 3.51489954, 15.64602427], + [ 3.51489954, 88.97013878, -1.07431931], + [15.64602427, -1.07431931, 98.18223512]]) + + vals, vecs = linalg.eigh(cov) + assert_array_almost_equal(vals, rvals) + + def test_svd_build(self): + # Ticket 627. + a = array([[0., 1.], [1., 1.], [2., 1.], [3., 1.]]) + m, n = a.shape + u, s, vh = linalg.svd(a) + + b = dot(transpose(u[:, n:]), a) + + assert_array_almost_equal(b, np.zeros((2, 2))) + + def test_norm_vector_badarg(self): + # Regression for #786: Frobenius norm for vectors raises + # ValueError. + assert_raises(ValueError, linalg.norm, array([1., 2., 3.]), 'fro') + + def test_lapack_endian(self): + # For bug #1482 + a = array([[ 5.7998084, -2.1825367], + [-2.1825367, 9.85910595]], dtype='>f8') + b = array(a, dtype=' 0.5) + assert_equal(c, 1) + assert_equal(np.linalg.matrix_rank(a), 1) + assert_array_less(1, np.linalg.norm(a, ord=2)) + + w_svdvals = linalg.svdvals(a) + assert_array_almost_equal(w, w_svdvals) + + def test_norm_object_array(self): + # gh-7575 + testvector = np.array([np.array([0, 1]), 0, 0], dtype=object) + + norm = linalg.norm(testvector) + assert_array_equal(norm, [0, 1]) + assert_(norm.dtype == np.dtype('float64')) + + norm = linalg.norm(testvector, ord=1) + assert_array_equal(norm, [0, 1]) + assert_(norm.dtype != np.dtype('float64')) + + norm = linalg.norm(testvector, ord=2) + assert_array_equal(norm, [0, 1]) + assert_(norm.dtype == np.dtype('float64')) + + assert_raises(ValueError, linalg.norm, testvector, ord='fro') + assert_raises(ValueError, linalg.norm, testvector, ord='nuc') + assert_raises(ValueError, linalg.norm, testvector, ord=np.inf) + assert_raises(ValueError, linalg.norm, testvector, ord=-np.inf) + assert_raises(ValueError, linalg.norm, testvector, ord=0) + assert_raises(ValueError, linalg.norm, testvector, ord=-1) + assert_raises(ValueError, linalg.norm, testvector, ord=-2) + + testmatrix = np.array([[np.array([0, 1]), 0, 0], + [0, 0, 0]], dtype=object) + + norm = linalg.norm(testmatrix) + assert_array_equal(norm, [0, 1]) + assert_(norm.dtype == np.dtype('float64')) + + norm = linalg.norm(testmatrix, ord='fro') + assert_array_equal(norm, [0, 1]) + assert_(norm.dtype == np.dtype('float64')) + + assert_raises(TypeError, linalg.norm, testmatrix, ord='nuc') + assert_raises(ValueError, linalg.norm, testmatrix, ord=np.inf) + assert_raises(ValueError, linalg.norm, testmatrix, ord=-np.inf) + assert_raises(ValueError, linalg.norm, testmatrix, ord=0) + assert_raises(ValueError, linalg.norm, testmatrix, ord=1) + assert_raises(ValueError, linalg.norm, testmatrix, ord=-1) + assert_raises(TypeError, linalg.norm, testmatrix, ord=2) + assert_raises(TypeError, linalg.norm, testmatrix, ord=-2) + assert_raises(ValueError, linalg.norm, testmatrix, ord=3) + + def test_lstsq_complex_larger_rhs(self): + # gh-9891 + size = 20 + n_rhs = 70 + G = np.random.randn(size, size) + 1j * np.random.randn(size, size) + u = np.random.randn(size, n_rhs) + 1j * np.random.randn(size, n_rhs) + b = G.dot(u) + # This should work without segmentation fault. + u_lstsq, res, rank, sv = linalg.lstsq(G, b, rcond=None) + # check results just in case + assert_array_almost_equal(u_lstsq, u) + + @pytest.mark.parametrize("upper", [True, False]) + def test_cholesky_empty_array(self, upper): + # gh-25840 - upper=True hung before. + res = np.linalg.cholesky(np.zeros((0, 0)), upper=upper) + assert res.size == 0 + + @pytest.mark.parametrize("rtol", [0.0, [0.0] * 4, np.zeros((4,))]) + def test_matrix_rank_rtol_argument(self, rtol): + # gh-25877 + x = np.zeros((4, 3, 2)) + res = np.linalg.matrix_rank(x, rtol=rtol) + assert res.shape == (4,) + + def test_openblas_threading(self): + # gh-27036 + # Test whether matrix multiplication involving a large matrix always + # gives the same (correct) answer + x = np.arange(500000, dtype=np.float64) + src = np.vstack((x, -10 * x)).T + matrix = np.array([[0, 1], [1, 0]]) + expected = np.vstack((-10 * x, x)).T # src @ matrix + for i in range(200): + result = src @ matrix + mismatches = (~np.isclose(result, expected)).sum() + if mismatches != 0: + assert False, ("unexpected result from matmul, " + "probably due to OpenBLAS threading issues") diff --git a/.venv/lib/python3.12/site-packages/numpy/ma/API_CHANGES.txt b/.venv/lib/python3.12/site-packages/numpy/ma/API_CHANGES.txt new file mode 100644 index 0000000..a3d792a --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/ma/API_CHANGES.txt @@ -0,0 +1,135 @@ +.. -*- rest -*- + +================================================== +API changes in the new masked array implementation +================================================== + +Masked arrays are subclasses of ndarray +--------------------------------------- + +Contrary to the original implementation, masked arrays are now regular +ndarrays:: + + >>> x = masked_array([1,2,3],mask=[0,0,1]) + >>> print isinstance(x, numpy.ndarray) + True + + +``_data`` returns a view of the masked array +-------------------------------------------- + +Masked arrays are composed of a ``_data`` part and a ``_mask``. Accessing the +``_data`` part will return a regular ndarray or any of its subclass, depending +on the initial data:: + + >>> x = masked_array(numpy.matrix([[1,2],[3,4]]),mask=[[0,0],[0,1]]) + >>> print x._data + [[1 2] + [3 4]] + >>> print type(x._data) + + + +In practice, ``_data`` is implemented as a property, not as an attribute. +Therefore, you cannot access it directly, and some simple tests such as the +following one will fail:: + + >>>x._data is x._data + False + + +``filled(x)`` can return a subclass of ndarray +---------------------------------------------- +The function ``filled(a)`` returns an array of the same type as ``a._data``:: + + >>> x = masked_array(numpy.matrix([[1,2],[3,4]]),mask=[[0,0],[0,1]]) + >>> y = filled(x) + >>> print type(y) + + >>> print y + matrix([[ 1, 2], + [ 3, 999999]]) + + +``put``, ``putmask`` behave like their ndarray counterparts +----------------------------------------------------------- + +Previously, ``putmask`` was used like this:: + + mask = [False,True,True] + x = array([1,4,7],mask=mask) + putmask(x,mask,[3]) + +which translated to:: + + x[~mask] = [3] + +(Note that a ``True``-value in a mask suppresses a value.) + +In other words, the mask had the same length as ``x``, whereas +``values`` had ``sum(~mask)`` elements. + +Now, the behaviour is similar to that of ``ndarray.putmask``, where +the mask and the values are both the same length as ``x``, i.e. + +:: + + putmask(x,mask,[3,0,0]) + + +``fill_value`` is a property +---------------------------- + +``fill_value`` is no longer a method, but a property:: + + >>> print x.fill_value + 999999 + +``cumsum`` and ``cumprod`` ignore missing values +------------------------------------------------ + +Missing values are assumed to be the identity element, i.e. 0 for +``cumsum`` and 1 for ``cumprod``:: + + >>> x = N.ma.array([1,2,3,4],mask=[False,True,False,False]) + >>> print x + [1 -- 3 4] + >>> print x.cumsum() + [1 -- 4 8] + >> print x.cumprod() + [1 -- 3 12] + +``bool(x)`` raises a ValueError +------------------------------- + +Masked arrays now behave like regular ``ndarrays``, in that they cannot be +converted to booleans: + +:: + + >>> x = N.ma.array([1,2,3]) + >>> bool(x) + Traceback (most recent call last): + File "", line 1, in + ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all() + + +================================== +New features (non exhaustive list) +================================== + +``mr_`` +------- + +``mr_`` mimics the behavior of ``r_`` for masked arrays:: + + >>> np.ma.mr_[3,4,5] + masked_array(data = [3 4 5], + mask = False, + fill_value=999999) + + +``anom`` +-------- + +The ``anom`` method returns the deviations from the average (anomalies). diff --git a/.venv/lib/python3.12/site-packages/numpy/ma/LICENSE b/.venv/lib/python3.12/site-packages/numpy/ma/LICENSE new file mode 100644 index 0000000..b41aae0 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/ma/LICENSE @@ -0,0 +1,24 @@ +* Copyright (c) 2006, University of Georgia and Pierre G.F. Gerard-Marchant +* All rights reserved. +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions are met: +* +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above copyright +* notice, this list of conditions and the following disclaimer in the +* documentation and/or other materials provided with the distribution. +* * Neither the name of the University of Georgia nor the +* names of its contributors may be used to endorse or promote products +* derived from this software without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY +* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +* DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/.venv/lib/python3.12/site-packages/numpy/ma/README.rst b/.venv/lib/python3.12/site-packages/numpy/ma/README.rst new file mode 100644 index 0000000..cd10103 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/ma/README.rst @@ -0,0 +1,236 @@ +================================== +A guide to masked arrays in NumPy +================================== + +.. Contents:: + +See http://www.scipy.org/scipy/numpy/wiki/MaskedArray (dead link) +for updates of this document. + + +History +------- + +As a regular user of MaskedArray, I (Pierre G.F. Gerard-Marchant) became +increasingly frustrated with the subclassing of masked arrays (even if +I can only blame my inexperience). I needed to develop a class of arrays +that could store some additional information along with numerical values, +while keeping the possibility for missing data (picture storing a series +of dates along with measurements, what would later become the `TimeSeries +Scikit `__ +(dead link). + +I started to implement such a class, but then quickly realized that +any additional information disappeared when processing these subarrays +(for example, adding a constant value to a subarray would erase its +dates). I ended up writing the equivalent of *numpy.core.ma* for my +particular class, ufuncs included. Everything went fine until I needed to +subclass my new class, when more problems showed up: some attributes of +the new subclass were lost during processing. I identified the culprit as +MaskedArray, which returns masked ndarrays when I expected masked +arrays of my class. I was preparing myself to rewrite *numpy.core.ma* +when I forced myself to learn how to subclass ndarrays. As I became more +familiar with the *__new__* and *__array_finalize__* methods, +I started to wonder why masked arrays were objects, and not ndarrays, +and whether it wouldn't be more convenient for subclassing if they did +behave like regular ndarrays. + +The new *maskedarray* is what I eventually come up with. The +main differences with the initial *numpy.core.ma* package are +that MaskedArray is now a subclass of *ndarray* and that the +*_data* section can now be any subclass of *ndarray*. Apart from a +couple of issues listed below, the behavior of the new MaskedArray +class reproduces the old one. Initially the *maskedarray* +implementation was marginally slower than *numpy.ma* in some areas, +but work is underway to speed it up; the expectation is that it can be +made substantially faster than the present *numpy.ma*. + + +Note that if the subclass has some special methods and +attributes, they are not propagated to the masked version: +this would require a modification of the *__getattribute__* +method (first trying *ndarray.__getattribute__*, then trying +*self._data.__getattribute__* if an exception is raised in the first +place), which really slows things down. + +Main differences +---------------- + + * The *_data* part of the masked array can be any subclass of ndarray (but not recarray, cf below). + * *fill_value* is now a property, not a function. + * in the majority of cases, the mask is forced to *nomask* when no value is actually masked. A notable exception is when a masked array (with no masked values) has just been unpickled. + * I got rid of the *share_mask* flag, I never understood its purpose. + * *put*, *putmask* and *take* now mimic the ndarray methods, to avoid unpleasant surprises. Moreover, *put* and *putmask* both update the mask when needed. * if *a* is a masked array, *bool(a)* raises a *ValueError*, as it does with ndarrays. + * in the same way, the comparison of two masked arrays is a masked array, not a boolean + * *filled(a)* returns an array of the same subclass as *a._data*, and no test is performed on whether it is contiguous or not. + * the mask is always printed, even if it's *nomask*, which makes things easy (for me at least) to remember that a masked array is used. + * *cumsum* works as if the *_data* array was filled with 0. The mask is preserved, but not updated. + * *cumprod* works as if the *_data* array was filled with 1. The mask is preserved, but not updated. + +New features +------------ + +This list is non-exhaustive... + + * the *mr_* function mimics *r_* for masked arrays. + * the *anom* method returns the anomalies (deviations from the average) + +Using the new package with numpy.core.ma +---------------------------------------- + +I tried to make sure that the new package can understand old masked +arrays. Unfortunately, there's no upward compatibility. + +For example: + +>>> import numpy.core.ma as old_ma +>>> import maskedarray as new_ma +>>> x = old_ma.array([1,2,3,4,5], mask=[0,0,1,0,0]) +>>> x +array(data = + [ 1 2 999999 4 5], + mask = + [False False True False False], + fill_value=999999) +>>> y = new_ma.array([1,2,3,4,5], mask=[0,0,1,0,0]) +>>> y +array(data = [1 2 -- 4 5], + mask = [False False True False False], + fill_value=999999) +>>> x==y +array(data = + [True True True True True], + mask = + [False False True False False], + fill_value=?) +>>> old_ma.getmask(x) == new_ma.getmask(x) +array([True, True, True, True, True]) +>>> old_ma.getmask(y) == new_ma.getmask(y) +array([True, True, False, True, True]) +>>> old_ma.getmask(y) +False + + +Using maskedarray with matplotlib +--------------------------------- + +Starting with matplotlib 0.91.2, the masked array importing will work with +the maskedarray branch) as well as with earlier versions. + +By default matplotlib still uses numpy.ma, but there is an rcParams setting +that you can use to select maskedarray instead. In the matplotlibrc file +you will find:: + + #maskedarray : False # True to use external maskedarray module + # instead of numpy.ma; this is a temporary # + setting for testing maskedarray. + + +Uncomment and set to True to select maskedarray everywhere. +Alternatively, you can test a script with maskedarray by using a +command-line option, e.g.:: + + python simple_plot.py --maskedarray + + +Masked records +-------------- + +Like *numpy.ma.core*, the *ndarray*-based implementation +of MaskedArray is limited when working with records: you can +mask any record of the array, but not a field in a record. If you +need this feature, you may want to give the *mrecords* package +a try (available in the *maskedarray* directory in the scipy +sandbox). This module defines a new class, *MaskedRecord*. An +instance of this class accepts a *recarray* as data, and uses two +masks: the *fieldmask* has as many entries as records in the array, +each entry with the same fields as a record, but of boolean types: +they indicate whether the field is masked or not; a record entry +is flagged as masked in the *mask* array if all the fields are +masked. A few examples in the file should give you an idea of what +can be done. Note that *mrecords* is still experimental... + +Optimizing maskedarray +---------------------- + +Should masked arrays be filled before processing or not? +-------------------------------------------------------- + +In the current implementation, most operations on masked arrays involve +the following steps: + + * the input arrays are filled + * the operation is performed on the filled arrays + * the mask is set for the results, from the combination of the input masks and the mask corresponding to the domain of the operation. + +For example, consider the division of two masked arrays:: + + import numpy + import maskedarray as ma + x = ma.array([1,2,3,4],mask=[1,0,0,0], dtype=numpy.float64) + y = ma.array([-1,0,1,2], mask=[0,0,0,1], dtype=numpy.float64) + +The division of x by y is then computed as:: + + d1 = x.filled(0) # d1 = array([0., 2., 3., 4.]) + d2 = y.filled(1) # array([-1., 0., 1., 1.]) + m = ma.mask_or(ma.getmask(x), ma.getmask(y)) # m = + array([True,False,False,True]) + dm = ma.divide.domain(d1,d2) # array([False, True, False, False]) + result = (d1/d2).view(MaskedArray) # masked_array([-0. inf, 3., 4.]) + result._mask = logical_or(m, dm) + +Note that a division by zero takes place. To avoid it, we can consider +to fill the input arrays, taking the domain mask into account, so that:: + + d1 = x._data.copy() # d1 = array([1., 2., 3., 4.]) + d2 = y._data.copy() # array([-1., 0., 1., 2.]) + dm = ma.divide.domain(d1,d2) # array([False, True, False, False]) + numpy.putmask(d2, dm, 1) # d2 = array([-1., 1., 1., 2.]) + m = ma.mask_or(ma.getmask(x), ma.getmask(y)) # m = + array([True,False,False,True]) + result = (d1/d2).view(MaskedArray) # masked_array([-1. 0., 3., 2.]) + result._mask = logical_or(m, dm) + +Note that the *.copy()* is required to avoid updating the inputs with +*putmask*. The *.filled()* method also involves a *.copy()*. + +A third possibility consists in avoid filling the arrays:: + + d1 = x._data # d1 = array([1., 2., 3., 4.]) + d2 = y._data # array([-1., 0., 1., 2.]) + dm = ma.divide.domain(d1,d2) # array([False, True, False, False]) + m = ma.mask_or(ma.getmask(x), ma.getmask(y)) # m = + array([True,False,False,True]) + result = (d1/d2).view(MaskedArray) # masked_array([-1. inf, 3., 2.]) + result._mask = logical_or(m, dm) + +Note that here again the division by zero takes place. + +A quick benchmark gives the following results: + + * *numpy.ma.divide* : 2.69 ms per loop + * classical division : 2.21 ms per loop + * division w/ prefilling : 2.34 ms per loop + * division w/o filling : 1.55 ms per loop + +So, is it worth filling the arrays beforehand ? Yes, if we are interested +in avoiding floating-point exceptions that may fill the result with infs +and nans. No, if we are only interested into speed... + + +Thanks +------ + +I'd like to thank Paul Dubois, Travis Oliphant and Sasha for the +original masked array package: without you, I would never have started +that (it might be argued that I shouldn't have anyway, but that's +another story...). I also wish to extend these thanks to Reggie Dugard +and Eric Firing for their suggestions and numerous improvements. + + +Revision notes +-------------- + + * 08/25/2007 : Creation of this page + * 01/23/2007 : The package has been moved to the SciPy sandbox, and is regularly updated: please check out your SVN version! diff --git a/.venv/lib/python3.12/site-packages/numpy/ma/__init__.py b/.venv/lib/python3.12/site-packages/numpy/ma/__init__.py new file mode 100644 index 0000000..e2a742e --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/ma/__init__.py @@ -0,0 +1,53 @@ +""" +============= +Masked Arrays +============= + +Arrays sometimes contain invalid or missing data. When doing operations +on such arrays, we wish to suppress invalid values, which is the purpose masked +arrays fulfill (an example of typical use is given below). + +For example, examine the following array: + +>>> x = np.array([2, 1, 3, np.nan, 5, 2, 3, np.nan]) + +When we try to calculate the mean of the data, the result is undetermined: + +>>> np.mean(x) +nan + +The mean is calculated using roughly ``np.sum(x)/len(x)``, but since +any number added to ``NaN`` [1]_ produces ``NaN``, this doesn't work. Enter +masked arrays: + +>>> m = np.ma.masked_array(x, np.isnan(x)) +>>> m +masked_array(data=[2.0, 1.0, 3.0, --, 5.0, 2.0, 3.0, --], + mask=[False, False, False, True, False, False, False, True], + fill_value=1e+20) + +Here, we construct a masked array that suppress all ``NaN`` values. We +may now proceed to calculate the mean of the other values: + +>>> np.mean(m) +2.6666666666666665 + +.. [1] Not-a-Number, a floating point value that is the result of an + invalid operation. + +.. moduleauthor:: Pierre Gerard-Marchant +.. moduleauthor:: Jarrod Millman + +""" +from . import core, extras +from .core import * +from .extras import * + +__all__ = ['core', 'extras'] +__all__ += core.__all__ +__all__ += extras.__all__ + +from numpy._pytesttester import PytestTester + +test = PytestTester(__name__) +del PytestTester diff --git a/.venv/lib/python3.12/site-packages/numpy/ma/__init__.pyi b/.venv/lib/python3.12/site-packages/numpy/ma/__init__.pyi new file mode 100644 index 0000000..176e929 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/ma/__init__.pyi @@ -0,0 +1,458 @@ +from . import core, extras +from .core import ( + MAError, + MaskedArray, + MaskError, + MaskType, + abs, + absolute, + add, + all, + allclose, + allequal, + alltrue, + amax, + amin, + angle, + anom, + anomalies, + any, + append, + arange, + arccos, + arccosh, + arcsin, + arcsinh, + arctan, + arctan2, + arctanh, + argmax, + argmin, + argsort, + around, + array, + asanyarray, + asarray, + bitwise_and, + bitwise_or, + bitwise_xor, + bool_, + ceil, + choose, + clip, + common_fill_value, + compress, + compressed, + concatenate, + conjugate, + convolve, + copy, + correlate, + cos, + cosh, + count, + cumprod, + cumsum, + default_fill_value, + diag, + diagonal, + diff, + divide, + empty, + empty_like, + equal, + exp, + expand_dims, + fabs, + filled, + fix_invalid, + flatten_mask, + flatten_structured_array, + floor, + floor_divide, + fmod, + frombuffer, + fromflex, + fromfunction, + getdata, + getmask, + getmaskarray, + greater, + greater_equal, + harden_mask, + hypot, + identity, + ids, + indices, + inner, + innerproduct, + is_mask, + is_masked, + isarray, + isMA, + isMaskedArray, + left_shift, + less, + less_equal, + log, + log2, + log10, + logical_and, + logical_not, + logical_or, + logical_xor, + make_mask, + make_mask_descr, + make_mask_none, + mask_or, + masked, + masked_array, + masked_equal, + masked_greater, + masked_greater_equal, + masked_inside, + masked_invalid, + masked_less, + masked_less_equal, + masked_not_equal, + masked_object, + masked_outside, + masked_print_option, + masked_singleton, + masked_values, + masked_where, + max, + maximum, + maximum_fill_value, + mean, + min, + minimum, + minimum_fill_value, + mod, + multiply, + mvoid, + ndim, + negative, + nomask, + nonzero, + not_equal, + ones, + ones_like, + outer, + outerproduct, + power, + prod, + product, + ptp, + put, + putmask, + ravel, + remainder, + repeat, + reshape, + resize, + right_shift, + round, + round_, + set_fill_value, + shape, + sin, + sinh, + size, + soften_mask, + sometrue, + sort, + sqrt, + squeeze, + std, + subtract, + sum, + swapaxes, + take, + tan, + tanh, + trace, + transpose, + true_divide, + var, + where, + zeros, + zeros_like, +) +from .extras import ( + apply_along_axis, + apply_over_axes, + atleast_1d, + atleast_2d, + atleast_3d, + average, + clump_masked, + clump_unmasked, + column_stack, + compress_cols, + compress_nd, + compress_rowcols, + compress_rows, + corrcoef, + count_masked, + cov, + diagflat, + dot, + dstack, + ediff1d, + flatnotmasked_contiguous, + flatnotmasked_edges, + hsplit, + hstack, + in1d, + intersect1d, + isin, + mask_cols, + mask_rowcols, + mask_rows, + masked_all, + masked_all_like, + median, + mr_, + ndenumerate, + notmasked_contiguous, + notmasked_edges, + polyfit, + row_stack, + setdiff1d, + setxor1d, + stack, + union1d, + unique, + vander, + vstack, +) + +__all__ = [ + "core", + "extras", + "MAError", + "MaskError", + "MaskType", + "MaskedArray", + "abs", + "absolute", + "add", + "all", + "allclose", + "allequal", + "alltrue", + "amax", + "amin", + "angle", + "anom", + "anomalies", + "any", + "append", + "arange", + "arccos", + "arccosh", + "arcsin", + "arcsinh", + "arctan", + "arctan2", + "arctanh", + "argmax", + "argmin", + "argsort", + "around", + "array", + "asanyarray", + "asarray", + "bitwise_and", + "bitwise_or", + "bitwise_xor", + "bool_", + "ceil", + "choose", + "clip", + "common_fill_value", + "compress", + "compressed", + "concatenate", + "conjugate", + "convolve", + "copy", + "correlate", + "cos", + "cosh", + "count", + "cumprod", + "cumsum", + "default_fill_value", + "diag", + "diagonal", + "diff", + "divide", + "empty", + "empty_like", + "equal", + "exp", + "expand_dims", + "fabs", + "filled", + "fix_invalid", + "flatten_mask", + "flatten_structured_array", + "floor", + "floor_divide", + "fmod", + "frombuffer", + "fromflex", + "fromfunction", + "getdata", + "getmask", + "getmaskarray", + "greater", + "greater_equal", + "harden_mask", + "hypot", + "identity", + "ids", + "indices", + "inner", + "innerproduct", + "isMA", + "isMaskedArray", + "is_mask", + "is_masked", + "isarray", + "left_shift", + "less", + "less_equal", + "log", + "log10", + "log2", + "logical_and", + "logical_not", + "logical_or", + "logical_xor", + "make_mask", + "make_mask_descr", + "make_mask_none", + "mask_or", + "masked", + "masked_array", + "masked_equal", + "masked_greater", + "masked_greater_equal", + "masked_inside", + "masked_invalid", + "masked_less", + "masked_less_equal", + "masked_not_equal", + "masked_object", + "masked_outside", + "masked_print_option", + "masked_singleton", + "masked_values", + "masked_where", + "max", + "maximum", + "maximum_fill_value", + "mean", + "min", + "minimum", + "minimum_fill_value", + "mod", + "multiply", + "mvoid", + "ndim", + "negative", + "nomask", + "nonzero", + "not_equal", + "ones", + "ones_like", + "outer", + "outerproduct", + "power", + "prod", + "product", + "ptp", + "put", + "putmask", + "ravel", + "remainder", + "repeat", + "reshape", + "resize", + "right_shift", + "round", + "round_", + "set_fill_value", + "shape", + "sin", + "sinh", + "size", + "soften_mask", + "sometrue", + "sort", + "sqrt", + "squeeze", + "std", + "subtract", + "sum", + "swapaxes", + "take", + "tan", + "tanh", + "trace", + "transpose", + "true_divide", + "var", + "where", + "zeros", + "zeros_like", + "apply_along_axis", + "apply_over_axes", + "atleast_1d", + "atleast_2d", + "atleast_3d", + "average", + "clump_masked", + "clump_unmasked", + "column_stack", + "compress_cols", + "compress_nd", + "compress_rowcols", + "compress_rows", + "count_masked", + "corrcoef", + "cov", + "diagflat", + "dot", + "dstack", + "ediff1d", + "flatnotmasked_contiguous", + "flatnotmasked_edges", + "hsplit", + "hstack", + "isin", + "in1d", + "intersect1d", + "mask_cols", + "mask_rowcols", + "mask_rows", + "masked_all", + "masked_all_like", + "median", + "mr_", + "ndenumerate", + "notmasked_contiguous", + "notmasked_edges", + "polyfit", + "row_stack", + "setdiff1d", + "setxor1d", + "stack", + "unique", + "union1d", + "vander", + "vstack", +] diff --git a/.venv/lib/python3.12/site-packages/numpy/ma/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/ma/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..f5ec9b3 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/ma/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/ma/__pycache__/core.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/ma/__pycache__/core.cpython-312.pyc new file mode 100644 index 0000000..7f4ce11 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/ma/__pycache__/core.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/ma/__pycache__/extras.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/ma/__pycache__/extras.cpython-312.pyc new file mode 100644 index 0000000..c35336f Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/ma/__pycache__/extras.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/ma/__pycache__/mrecords.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/ma/__pycache__/mrecords.cpython-312.pyc new file mode 100644 index 0000000..7524ee6 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/ma/__pycache__/mrecords.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/ma/__pycache__/testutils.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/ma/__pycache__/testutils.cpython-312.pyc new file mode 100644 index 0000000..d5f2731 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/ma/__pycache__/testutils.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/ma/core.py b/.venv/lib/python3.12/site-packages/numpy/ma/core.py new file mode 100644 index 0000000..05ea373 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/ma/core.py @@ -0,0 +1,8936 @@ +""" +numpy.ma : a package to handle missing or invalid values. + +This package was initially written for numarray by Paul F. Dubois +at Lawrence Livermore National Laboratory. +In 2006, the package was completely rewritten by Pierre Gerard-Marchant +(University of Georgia) to make the MaskedArray class a subclass of ndarray, +and to improve support of structured arrays. + + +Copyright 1999, 2000, 2001 Regents of the University of California. +Released for unlimited redistribution. + +* Adapted for numpy_core 2005 by Travis Oliphant and (mainly) Paul Dubois. +* Subclassing of the base `ndarray` 2006 by Pierre Gerard-Marchant + (pgmdevlist_AT_gmail_DOT_com) +* Improvements suggested by Reggie Dugard (reggie_AT_merfinllc_DOT_com) + +.. moduleauthor:: Pierre Gerard-Marchant + +""" +import builtins +import functools +import inspect +import operator +import re +import textwrap +import warnings + +import numpy as np +import numpy._core.numerictypes as ntypes +import numpy._core.umath as umath +from numpy import ( + _NoValue, + amax, + amin, + angle, + bool_, + expand_dims, + finfo, # noqa: F401 + iinfo, # noqa: F401 + iscomplexobj, + ndarray, +) +from numpy import array as narray # noqa: F401 +from numpy._core import multiarray as mu +from numpy._core.numeric import normalize_axis_tuple +from numpy._utils import set_module +from numpy._utils._inspect import formatargspec, getargspec + +__all__ = [ + 'MAError', 'MaskError', 'MaskType', 'MaskedArray', 'abs', 'absolute', + 'add', 'all', 'allclose', 'allequal', 'alltrue', 'amax', 'amin', + 'angle', 'anom', 'anomalies', 'any', 'append', 'arange', 'arccos', + 'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctan2', 'arctanh', + 'argmax', 'argmin', 'argsort', 'around', 'array', 'asanyarray', + 'asarray', 'bitwise_and', 'bitwise_or', 'bitwise_xor', 'bool_', 'ceil', + 'choose', 'clip', 'common_fill_value', 'compress', 'compressed', + 'concatenate', 'conjugate', 'convolve', 'copy', 'correlate', 'cos', 'cosh', + 'count', 'cumprod', 'cumsum', 'default_fill_value', 'diag', 'diagonal', + 'diff', 'divide', 'empty', 'empty_like', 'equal', 'exp', + 'expand_dims', 'fabs', 'filled', 'fix_invalid', 'flatten_mask', + 'flatten_structured_array', 'floor', 'floor_divide', 'fmod', + 'frombuffer', 'fromflex', 'fromfunction', 'getdata', 'getmask', + 'getmaskarray', 'greater', 'greater_equal', 'harden_mask', 'hypot', + 'identity', 'ids', 'indices', 'inner', 'innerproduct', 'isMA', + 'isMaskedArray', 'is_mask', 'is_masked', 'isarray', 'left_shift', + 'less', 'less_equal', 'log', 'log10', 'log2', + 'logical_and', 'logical_not', 'logical_or', 'logical_xor', 'make_mask', + 'make_mask_descr', 'make_mask_none', 'mask_or', 'masked', + 'masked_array', 'masked_equal', 'masked_greater', + 'masked_greater_equal', 'masked_inside', 'masked_invalid', + 'masked_less', 'masked_less_equal', 'masked_not_equal', + 'masked_object', 'masked_outside', 'masked_print_option', + 'masked_singleton', 'masked_values', 'masked_where', 'max', 'maximum', + 'maximum_fill_value', 'mean', 'min', 'minimum', 'minimum_fill_value', + 'mod', 'multiply', 'mvoid', 'ndim', 'negative', 'nomask', 'nonzero', + 'not_equal', 'ones', 'ones_like', 'outer', 'outerproduct', 'power', 'prod', + 'product', 'ptp', 'put', 'putmask', 'ravel', 'remainder', + 'repeat', 'reshape', 'resize', 'right_shift', 'round', 'round_', + 'set_fill_value', 'shape', 'sin', 'sinh', 'size', 'soften_mask', + 'sometrue', 'sort', 'sqrt', 'squeeze', 'std', 'subtract', 'sum', + 'swapaxes', 'take', 'tan', 'tanh', 'trace', 'transpose', 'true_divide', + 'var', 'where', 'zeros', 'zeros_like', + ] + +MaskType = np.bool +nomask = MaskType(0) + +class MaskedArrayFutureWarning(FutureWarning): + pass + +def _deprecate_argsort_axis(arr): + """ + Adjust the axis passed to argsort, warning if necessary + + Parameters + ---------- + arr + The array which argsort was called on + + np.ma.argsort has a long-term bug where the default of the axis argument + is wrong (gh-8701), which now must be kept for backwards compatibility. + Thankfully, this only makes a difference when arrays are 2- or more- + dimensional, so we only need a warning then. + """ + if arr.ndim <= 1: + # no warning needed - but switch to -1 anyway, to avoid surprising + # subclasses, which are more likely to implement scalar axes. + return -1 + else: + # 2017-04-11, Numpy 1.13.0, gh-8701: warn on axis default + warnings.warn( + "In the future the default for argsort will be axis=-1, not the " + "current None, to match its documentation and np.argsort. " + "Explicitly pass -1 or None to silence this warning.", + MaskedArrayFutureWarning, stacklevel=3) + return None + + +def doc_note(initialdoc, note): + """ + Adds a Notes section to an existing docstring. + + """ + if initialdoc is None: + return + if note is None: + return initialdoc + + notesplit = re.split(r'\n\s*?Notes\n\s*?-----', inspect.cleandoc(initialdoc)) + notedoc = f"\n\nNotes\n-----\n{inspect.cleandoc(note)}\n" + + return ''.join(notesplit[:1] + [notedoc] + notesplit[1:]) + + +def get_object_signature(obj): + """ + Get the signature from obj + + """ + try: + sig = formatargspec(*getargspec(obj)) + except TypeError: + sig = '' + return sig + + +############################################################################### +# Exceptions # +############################################################################### + + +class MAError(Exception): + """ + Class for masked array related errors. + + """ + pass + + +class MaskError(MAError): + """ + Class for mask related errors. + + """ + pass + + +############################################################################### +# Filling options # +############################################################################### + + +# b: boolean - c: complex - f: floats - i: integer - O: object - S: string +default_filler = {'b': True, + 'c': 1.e20 + 0.0j, + 'f': 1.e20, + 'i': 999999, + 'O': '?', + 'S': b'N/A', + 'u': 999999, + 'V': b'???', + 'U': 'N/A' + } + +# Add datetime64 and timedelta64 types +for v in ["Y", "M", "W", "D", "h", "m", "s", "ms", "us", "ns", "ps", + "fs", "as"]: + default_filler["M8[" + v + "]"] = np.datetime64("NaT", v) + default_filler["m8[" + v + "]"] = np.timedelta64("NaT", v) + +float_types_list = [np.half, np.single, np.double, np.longdouble, + np.csingle, np.cdouble, np.clongdouble] + +_minvals: dict[type, int] = {} +_maxvals: dict[type, int] = {} + +for sctype in ntypes.sctypeDict.values(): + scalar_dtype = np.dtype(sctype) + + if scalar_dtype.kind in "Mm": + info = np.iinfo(np.int64) + min_val, max_val = info.min + 1, info.max + elif np.issubdtype(scalar_dtype, np.integer): + info = np.iinfo(sctype) + min_val, max_val = info.min, info.max + elif np.issubdtype(scalar_dtype, np.floating): + info = np.finfo(sctype) + min_val, max_val = info.min, info.max + elif scalar_dtype.kind == "b": + min_val, max_val = 0, 1 + else: + min_val, max_val = None, None + + _minvals[sctype] = min_val + _maxvals[sctype] = max_val + +max_filler = _minvals +max_filler.update([(k, -np.inf) for k in float_types_list[:4]]) +max_filler.update([(k, complex(-np.inf, -np.inf)) for k in float_types_list[-3:]]) + +min_filler = _maxvals +min_filler.update([(k, +np.inf) for k in float_types_list[:4]]) +min_filler.update([(k, complex(+np.inf, +np.inf)) for k in float_types_list[-3:]]) + +del float_types_list + +def _recursive_fill_value(dtype, f): + """ + Recursively produce a fill value for `dtype`, calling f on scalar dtypes + """ + if dtype.names is not None: + # We wrap into `array` here, which ensures we use NumPy cast rules + # for integer casts, this allows the use of 99999 as a fill value + # for int8. + # TODO: This is probably a mess, but should best preserve behavior? + vals = tuple( + np.array(_recursive_fill_value(dtype[name], f)) + for name in dtype.names) + return np.array(vals, dtype=dtype)[()] # decay to void scalar from 0d + elif dtype.subdtype: + subtype, shape = dtype.subdtype + subval = _recursive_fill_value(subtype, f) + return np.full(shape, subval) + else: + return f(dtype) + + +def _get_dtype_of(obj): + """ Convert the argument for *_fill_value into a dtype """ + if isinstance(obj, np.dtype): + return obj + elif hasattr(obj, 'dtype'): + return obj.dtype + else: + return np.asanyarray(obj).dtype + + +def default_fill_value(obj): + """ + Return the default fill value for the argument object. + + The default filling value depends on the datatype of the input + array or the type of the input scalar: + + ======== ======== + datatype default + ======== ======== + bool True + int 999999 + float 1.e20 + complex 1.e20+0j + object '?' + string 'N/A' + ======== ======== + + For structured types, a structured scalar is returned, with each field the + default fill value for its type. + + For subarray types, the fill value is an array of the same size containing + the default scalar fill value. + + Parameters + ---------- + obj : ndarray, dtype or scalar + The array data-type or scalar for which the default fill value + is returned. + + Returns + ------- + fill_value : scalar + The default fill value. + + Examples + -------- + >>> import numpy as np + >>> np.ma.default_fill_value(1) + 999999 + >>> np.ma.default_fill_value(np.array([1.1, 2., np.pi])) + 1e+20 + >>> np.ma.default_fill_value(np.dtype(complex)) + (1e+20+0j) + + """ + def _scalar_fill_value(dtype): + if dtype.kind in 'Mm': + return default_filler.get(dtype.str[1:], '?') + else: + return default_filler.get(dtype.kind, '?') + + dtype = _get_dtype_of(obj) + return _recursive_fill_value(dtype, _scalar_fill_value) + + +def _extremum_fill_value(obj, extremum, extremum_name): + + def _scalar_fill_value(dtype): + try: + return extremum[dtype.type] + except KeyError as e: + raise TypeError( + f"Unsuitable type {dtype} for calculating {extremum_name}." + ) from None + + dtype = _get_dtype_of(obj) + return _recursive_fill_value(dtype, _scalar_fill_value) + + +def minimum_fill_value(obj): + """ + Return the maximum value that can be represented by the dtype of an object. + + This function is useful for calculating a fill value suitable for + taking the minimum of an array with a given dtype. + + Parameters + ---------- + obj : ndarray, dtype or scalar + An object that can be queried for it's numeric type. + + Returns + ------- + val : scalar + The maximum representable value. + + Raises + ------ + TypeError + If `obj` isn't a suitable numeric type. + + See Also + -------- + maximum_fill_value : The inverse function. + set_fill_value : Set the filling value of a masked array. + MaskedArray.fill_value : Return current fill value. + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> a = np.int8() + >>> ma.minimum_fill_value(a) + 127 + >>> a = np.int32() + >>> ma.minimum_fill_value(a) + 2147483647 + + An array of numeric data can also be passed. + + >>> a = np.array([1, 2, 3], dtype=np.int8) + >>> ma.minimum_fill_value(a) + 127 + >>> a = np.array([1, 2, 3], dtype=np.float32) + >>> ma.minimum_fill_value(a) + inf + + """ + return _extremum_fill_value(obj, min_filler, "minimum") + + +def maximum_fill_value(obj): + """ + Return the minimum value that can be represented by the dtype of an object. + + This function is useful for calculating a fill value suitable for + taking the maximum of an array with a given dtype. + + Parameters + ---------- + obj : ndarray, dtype or scalar + An object that can be queried for it's numeric type. + + Returns + ------- + val : scalar + The minimum representable value. + + Raises + ------ + TypeError + If `obj` isn't a suitable numeric type. + + See Also + -------- + minimum_fill_value : The inverse function. + set_fill_value : Set the filling value of a masked array. + MaskedArray.fill_value : Return current fill value. + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> a = np.int8() + >>> ma.maximum_fill_value(a) + -128 + >>> a = np.int32() + >>> ma.maximum_fill_value(a) + -2147483648 + + An array of numeric data can also be passed. + + >>> a = np.array([1, 2, 3], dtype=np.int8) + >>> ma.maximum_fill_value(a) + -128 + >>> a = np.array([1, 2, 3], dtype=np.float32) + >>> ma.maximum_fill_value(a) + -inf + + """ + return _extremum_fill_value(obj, max_filler, "maximum") + + +def _recursive_set_fill_value(fillvalue, dt): + """ + Create a fill value for a structured dtype. + + Parameters + ---------- + fillvalue : scalar or array_like + Scalar or array representing the fill value. If it is of shorter + length than the number of fields in dt, it will be resized. + dt : dtype + The structured dtype for which to create the fill value. + + Returns + ------- + val : tuple + A tuple of values corresponding to the structured fill value. + + """ + fillvalue = np.resize(fillvalue, len(dt.names)) + output_value = [] + for (fval, name) in zip(fillvalue, dt.names): + cdtype = dt[name] + if cdtype.subdtype: + cdtype = cdtype.subdtype[0] + + if cdtype.names is not None: + output_value.append(tuple(_recursive_set_fill_value(fval, cdtype))) + else: + output_value.append(np.array(fval, dtype=cdtype).item()) + return tuple(output_value) + + +def _check_fill_value(fill_value, ndtype): + """ + Private function validating the given `fill_value` for the given dtype. + + If fill_value is None, it is set to the default corresponding to the dtype. + + If fill_value is not None, its value is forced to the given dtype. + + The result is always a 0d array. + + """ + ndtype = np.dtype(ndtype) + if fill_value is None: + fill_value = default_fill_value(ndtype) + # TODO: It seems better to always store a valid fill_value, the oddity + # about is that `_fill_value = None` would behave even more + # different then. + # (e.g. this allows arr_uint8.astype(int64) to have the default + # fill value again...) + # The one thing that changed in 2.0/2.1 around cast safety is that the + # default `int(99...)` is not a same-kind cast anymore, so if we + # have a uint, use the default uint. + if ndtype.kind == "u": + fill_value = np.uint(fill_value) + elif ndtype.names is not None: + if isinstance(fill_value, (ndarray, np.void)): + try: + fill_value = np.asarray(fill_value, dtype=ndtype) + except ValueError as e: + err_msg = "Unable to transform %s to dtype %s" + raise ValueError(err_msg % (fill_value, ndtype)) from e + else: + fill_value = np.asarray(fill_value, dtype=object) + fill_value = np.array(_recursive_set_fill_value(fill_value, ndtype), + dtype=ndtype) + elif isinstance(fill_value, str) and (ndtype.char not in 'OSVU'): + # Note this check doesn't work if fill_value is not a scalar + err_msg = "Cannot set fill value of string with array of dtype %s" + raise TypeError(err_msg % ndtype) + else: + # In case we want to convert 1e20 to int. + # Also in case of converting string arrays. + try: + fill_value = np.asarray(fill_value, dtype=ndtype) + except (OverflowError, ValueError) as e: + # Raise TypeError instead of OverflowError or ValueError. + # OverflowError is seldom used, and the real problem here is + # that the passed fill_value is not compatible with the ndtype. + err_msg = "Cannot convert fill_value %s to dtype %s" + raise TypeError(err_msg % (fill_value, ndtype)) from e + return np.array(fill_value) + + +def set_fill_value(a, fill_value): + """ + Set the filling value of a, if a is a masked array. + + This function changes the fill value of the masked array `a` in place. + If `a` is not a masked array, the function returns silently, without + doing anything. + + Parameters + ---------- + a : array_like + Input array. + fill_value : dtype + Filling value. A consistency test is performed to make sure + the value is compatible with the dtype of `a`. + + Returns + ------- + None + Nothing returned by this function. + + See Also + -------- + maximum_fill_value : Return the default fill value for a dtype. + MaskedArray.fill_value : Return current fill value. + MaskedArray.set_fill_value : Equivalent method. + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> a = np.arange(5) + >>> a + array([0, 1, 2, 3, 4]) + >>> a = ma.masked_where(a < 3, a) + >>> a + masked_array(data=[--, --, --, 3, 4], + mask=[ True, True, True, False, False], + fill_value=999999) + >>> ma.set_fill_value(a, -999) + >>> a + masked_array(data=[--, --, --, 3, 4], + mask=[ True, True, True, False, False], + fill_value=-999) + + Nothing happens if `a` is not a masked array. + + >>> a = list(range(5)) + >>> a + [0, 1, 2, 3, 4] + >>> ma.set_fill_value(a, 100) + >>> a + [0, 1, 2, 3, 4] + >>> a = np.arange(5) + >>> a + array([0, 1, 2, 3, 4]) + >>> ma.set_fill_value(a, 100) + >>> a + array([0, 1, 2, 3, 4]) + + """ + if isinstance(a, MaskedArray): + a.set_fill_value(fill_value) + + +def get_fill_value(a): + """ + Return the filling value of a, if any. Otherwise, returns the + default filling value for that type. + + """ + if isinstance(a, MaskedArray): + result = a.fill_value + else: + result = default_fill_value(a) + return result + + +def common_fill_value(a, b): + """ + Return the common filling value of two masked arrays, if any. + + If ``a.fill_value == b.fill_value``, return the fill value, + otherwise return None. + + Parameters + ---------- + a, b : MaskedArray + The masked arrays for which to compare fill values. + + Returns + ------- + fill_value : scalar or None + The common fill value, or None. + + Examples + -------- + >>> import numpy as np + >>> x = np.ma.array([0, 1.], fill_value=3) + >>> y = np.ma.array([0, 1.], fill_value=3) + >>> np.ma.common_fill_value(x, y) + 3.0 + + """ + t1 = get_fill_value(a) + t2 = get_fill_value(b) + if t1 == t2: + return t1 + return None + + +def filled(a, fill_value=None): + """ + Return input as an `~numpy.ndarray`, with masked values replaced by + `fill_value`. + + If `a` is not a `MaskedArray`, `a` itself is returned. + If `a` is a `MaskedArray` with no masked values, then ``a.data`` is + returned. + If `a` is a `MaskedArray` and `fill_value` is None, `fill_value` is set to + ``a.fill_value``. + + Parameters + ---------- + a : MaskedArray or array_like + An input object. + fill_value : array_like, optional. + Can be scalar or non-scalar. If non-scalar, the + resulting filled array should be broadcastable + over input array. Default is None. + + Returns + ------- + a : ndarray + The filled array. + + See Also + -------- + compressed + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> x = ma.array(np.arange(9).reshape(3, 3), mask=[[1, 0, 0], + ... [1, 0, 0], + ... [0, 0, 0]]) + >>> x.filled() + array([[999999, 1, 2], + [999999, 4, 5], + [ 6, 7, 8]]) + >>> x.filled(fill_value=333) + array([[333, 1, 2], + [333, 4, 5], + [ 6, 7, 8]]) + >>> x.filled(fill_value=np.arange(3)) + array([[0, 1, 2], + [0, 4, 5], + [6, 7, 8]]) + + """ + if hasattr(a, 'filled'): + return a.filled(fill_value) + + elif isinstance(a, ndarray): + # Should we check for contiguity ? and a.flags['CONTIGUOUS']: + return a + elif isinstance(a, dict): + return np.array(a, 'O') + else: + return np.array(a) + + +def get_masked_subclass(*arrays): + """ + Return the youngest subclass of MaskedArray from a list of (masked) arrays. + + In case of siblings, the first listed takes over. + + """ + if len(arrays) == 1: + arr = arrays[0] + if isinstance(arr, MaskedArray): + rcls = type(arr) + else: + rcls = MaskedArray + else: + arrcls = [type(a) for a in arrays] + rcls = arrcls[0] + if not issubclass(rcls, MaskedArray): + rcls = MaskedArray + for cls in arrcls[1:]: + if issubclass(cls, rcls): + rcls = cls + # Don't return MaskedConstant as result: revert to MaskedArray + if rcls.__name__ == 'MaskedConstant': + return MaskedArray + return rcls + + +def getdata(a, subok=True): + """ + Return the data of a masked array as an ndarray. + + Return the data of `a` (if any) as an ndarray if `a` is a ``MaskedArray``, + else return `a` as a ndarray or subclass (depending on `subok`) if not. + + Parameters + ---------- + a : array_like + Input ``MaskedArray``, alternatively a ndarray or a subclass thereof. + subok : bool + Whether to force the output to be a `pure` ndarray (False) or to + return a subclass of ndarray if appropriate (True, default). + + See Also + -------- + getmask : Return the mask of a masked array, or nomask. + getmaskarray : Return the mask of a masked array, or full array of False. + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> a = ma.masked_equal([[1,2],[3,4]], 2) + >>> a + masked_array( + data=[[1, --], + [3, 4]], + mask=[[False, True], + [False, False]], + fill_value=2) + >>> ma.getdata(a) + array([[1, 2], + [3, 4]]) + + Equivalently use the ``MaskedArray`` `data` attribute. + + >>> a.data + array([[1, 2], + [3, 4]]) + + """ + try: + data = a._data + except AttributeError: + data = np.array(a, copy=None, subok=subok) + if not subok: + return data.view(ndarray) + return data + + +get_data = getdata + + +def fix_invalid(a, mask=nomask, copy=True, fill_value=None): + """ + Return input with invalid data masked and replaced by a fill value. + + Invalid data means values of `nan`, `inf`, etc. + + Parameters + ---------- + a : array_like + Input array, a (subclass of) ndarray. + mask : sequence, optional + Mask. Must be convertible to an array of booleans with the same + shape as `data`. True indicates a masked (i.e. invalid) data. + copy : bool, optional + Whether to use a copy of `a` (True) or to fix `a` in place (False). + Default is True. + fill_value : scalar, optional + Value used for fixing invalid data. Default is None, in which case + the ``a.fill_value`` is used. + + Returns + ------- + b : MaskedArray + The input array with invalid entries fixed. + + Notes + ----- + A copy is performed by default. + + Examples + -------- + >>> import numpy as np + >>> x = np.ma.array([1., -1, np.nan, np.inf], mask=[1] + [0]*3) + >>> x + masked_array(data=[--, -1.0, nan, inf], + mask=[ True, False, False, False], + fill_value=1e+20) + >>> np.ma.fix_invalid(x) + masked_array(data=[--, -1.0, --, --], + mask=[ True, False, True, True], + fill_value=1e+20) + + >>> fixed = np.ma.fix_invalid(x) + >>> fixed.data + array([ 1.e+00, -1.e+00, 1.e+20, 1.e+20]) + >>> x.data + array([ 1., -1., nan, inf]) + + """ + a = masked_array(a, copy=copy, mask=mask, subok=True) + invalid = np.logical_not(np.isfinite(a._data)) + if not invalid.any(): + return a + a._mask |= invalid + if fill_value is None: + fill_value = a.fill_value + a._data[invalid] = fill_value + return a + +def is_string_or_list_of_strings(val): + return (isinstance(val, str) or + (isinstance(val, list) and val and + builtins.all(isinstance(s, str) for s in val))) + +############################################################################### +# Ufuncs # +############################################################################### + + +ufunc_domain = {} +ufunc_fills = {} + + +class _DomainCheckInterval: + """ + Define a valid interval, so that : + + ``domain_check_interval(a,b)(x) == True`` where + ``x < a`` or ``x > b``. + + """ + + def __init__(self, a, b): + "domain_check_interval(a,b)(x) = true where x < a or y > b" + if a > b: + (a, b) = (b, a) + self.a = a + self.b = b + + def __call__(self, x): + "Execute the call behavior." + # nans at masked positions cause RuntimeWarnings, even though + # they are masked. To avoid this we suppress warnings. + with np.errstate(invalid='ignore'): + return umath.logical_or(umath.greater(x, self.b), + umath.less(x, self.a)) + + +class _DomainTan: + """ + Define a valid interval for the `tan` function, so that: + + ``domain_tan(eps) = True`` where ``abs(cos(x)) < eps`` + + """ + + def __init__(self, eps): + "domain_tan(eps) = true where abs(cos(x)) < eps)" + self.eps = eps + + def __call__(self, x): + "Executes the call behavior." + with np.errstate(invalid='ignore'): + return umath.less(umath.absolute(umath.cos(x)), self.eps) + + +class _DomainSafeDivide: + """ + Define a domain for safe division. + + """ + + def __init__(self, tolerance=None): + self.tolerance = tolerance + + def __call__(self, a, b): + # Delay the selection of the tolerance to here in order to reduce numpy + # import times. The calculation of these parameters is a substantial + # component of numpy's import time. + if self.tolerance is None: + self.tolerance = np.finfo(float).tiny + # don't call ma ufuncs from __array_wrap__ which would fail for scalars + a, b = np.asarray(a), np.asarray(b) + with np.errstate(all='ignore'): + return umath.absolute(a) * self.tolerance >= umath.absolute(b) + + +class _DomainGreater: + """ + DomainGreater(v)(x) is True where x <= v. + + """ + + def __init__(self, critical_value): + "DomainGreater(v)(x) = true where x <= v" + self.critical_value = critical_value + + def __call__(self, x): + "Executes the call behavior." + with np.errstate(invalid='ignore'): + return umath.less_equal(x, self.critical_value) + + +class _DomainGreaterEqual: + """ + DomainGreaterEqual(v)(x) is True where x < v. + + """ + + def __init__(self, critical_value): + "DomainGreaterEqual(v)(x) = true where x < v" + self.critical_value = critical_value + + def __call__(self, x): + "Executes the call behavior." + with np.errstate(invalid='ignore'): + return umath.less(x, self.critical_value) + + +class _MaskedUFunc: + def __init__(self, ufunc): + self.f = ufunc + self.__doc__ = ufunc.__doc__ + self.__name__ = ufunc.__name__ + self.__qualname__ = ufunc.__qualname__ + + def __str__(self): + return f"Masked version of {self.f}" + + +class _MaskedUnaryOperation(_MaskedUFunc): + """ + Defines masked version of unary operations, where invalid values are + pre-masked. + + Parameters + ---------- + mufunc : callable + The function for which to define a masked version. Made available + as ``_MaskedUnaryOperation.f``. + fill : scalar, optional + Filling value, default is 0. + domain : class instance + Domain for the function. Should be one of the ``_Domain*`` + classes. Default is None. + + """ + + def __init__(self, mufunc, fill=0, domain=None): + super().__init__(mufunc) + self.fill = fill + self.domain = domain + ufunc_domain[mufunc] = domain + ufunc_fills[mufunc] = fill + + def __call__(self, a, *args, **kwargs): + """ + Execute the call behavior. + + """ + d = getdata(a) + # Deal with domain + if self.domain is not None: + # Case 1.1. : Domained function + # nans at masked positions cause RuntimeWarnings, even though + # they are masked. To avoid this we suppress warnings. + with np.errstate(divide='ignore', invalid='ignore'): + result = self.f(d, *args, **kwargs) + # Make a mask + m = ~umath.isfinite(result) + m |= self.domain(d) + m |= getmask(a) + else: + # Case 1.2. : Function without a domain + # Get the result and the mask + with np.errstate(divide='ignore', invalid='ignore'): + result = self.f(d, *args, **kwargs) + m = getmask(a) + + if not result.ndim: + # Case 2.1. : The result is scalarscalar + if m: + return masked + return result + + if m is not nomask: + # Case 2.2. The result is an array + # We need to fill the invalid data back w/ the input Now, + # that's plain silly: in C, we would just skip the element and + # keep the original, but we do have to do it that way in Python + + # In case result has a lower dtype than the inputs (as in + # equal) + try: + np.copyto(result, d, where=m) + except TypeError: + pass + # Transform to + masked_result = result.view(get_masked_subclass(a)) + masked_result._mask = m + masked_result._update_from(a) + return masked_result + + +class _MaskedBinaryOperation(_MaskedUFunc): + """ + Define masked version of binary operations, where invalid + values are pre-masked. + + Parameters + ---------- + mbfunc : function + The function for which to define a masked version. Made available + as ``_MaskedBinaryOperation.f``. + domain : class instance + Default domain for the function. Should be one of the ``_Domain*`` + classes. Default is None. + fillx : scalar, optional + Filling value for the first argument, default is 0. + filly : scalar, optional + Filling value for the second argument, default is 0. + + """ + + def __init__(self, mbfunc, fillx=0, filly=0): + """ + abfunc(fillx, filly) must be defined. + + abfunc(x, filly) = x for all x to enable reduce. + + """ + super().__init__(mbfunc) + self.fillx = fillx + self.filly = filly + ufunc_domain[mbfunc] = None + ufunc_fills[mbfunc] = (fillx, filly) + + def __call__(self, a, b, *args, **kwargs): + """ + Execute the call behavior. + + """ + # Get the data, as ndarray + (da, db) = (getdata(a), getdata(b)) + # Get the result + with np.errstate(): + np.seterr(divide='ignore', invalid='ignore') + result = self.f(da, db, *args, **kwargs) + # Get the mask for the result + (ma, mb) = (getmask(a), getmask(b)) + if ma is nomask: + if mb is nomask: + m = nomask + else: + m = umath.logical_or(getmaskarray(a), mb) + elif mb is nomask: + m = umath.logical_or(ma, getmaskarray(b)) + else: + m = umath.logical_or(ma, mb) + + # Case 1. : scalar + if not result.ndim: + if m: + return masked + return result + + # Case 2. : array + # Revert result to da where masked + if m is not nomask and m.any(): + # any errors, just abort; impossible to guarantee masked values + try: + np.copyto(result, da, casting='unsafe', where=m) + except Exception: + pass + + # Transforms to a (subclass of) MaskedArray + masked_result = result.view(get_masked_subclass(a, b)) + masked_result._mask = m + if isinstance(a, MaskedArray): + masked_result._update_from(a) + elif isinstance(b, MaskedArray): + masked_result._update_from(b) + return masked_result + + def reduce(self, target, axis=0, dtype=None): + """ + Reduce `target` along the given `axis`. + + """ + tclass = get_masked_subclass(target) + m = getmask(target) + t = filled(target, self.filly) + if t.shape == (): + t = t.reshape(1) + if m is not nomask: + m = make_mask(m, copy=True) + m.shape = (1,) + + if m is nomask: + tr = self.f.reduce(t, axis) + mr = nomask + else: + tr = self.f.reduce(t, axis, dtype=dtype) + mr = umath.logical_and.reduce(m, axis) + + if not tr.shape: + if mr: + return masked + else: + return tr + masked_tr = tr.view(tclass) + masked_tr._mask = mr + return masked_tr + + def outer(self, a, b): + """ + Return the function applied to the outer product of a and b. + + """ + (da, db) = (getdata(a), getdata(b)) + d = self.f.outer(da, db) + ma = getmask(a) + mb = getmask(b) + if ma is nomask and mb is nomask: + m = nomask + else: + ma = getmaskarray(a) + mb = getmaskarray(b) + m = umath.logical_or.outer(ma, mb) + if (not m.ndim) and m: + return masked + if m is not nomask: + np.copyto(d, da, where=m) + if not d.shape: + return d + masked_d = d.view(get_masked_subclass(a, b)) + masked_d._mask = m + return masked_d + + def accumulate(self, target, axis=0): + """Accumulate `target` along `axis` after filling with y fill + value. + + """ + tclass = get_masked_subclass(target) + t = filled(target, self.filly) + result = self.f.accumulate(t, axis) + masked_result = result.view(tclass) + return masked_result + + +class _DomainedBinaryOperation(_MaskedUFunc): + """ + Define binary operations that have a domain, like divide. + + They have no reduce, outer or accumulate. + + Parameters + ---------- + mbfunc : function + The function for which to define a masked version. Made available + as ``_DomainedBinaryOperation.f``. + domain : class instance + Default domain for the function. Should be one of the ``_Domain*`` + classes. + fillx : scalar, optional + Filling value for the first argument, default is 0. + filly : scalar, optional + Filling value for the second argument, default is 0. + + """ + + def __init__(self, dbfunc, domain, fillx=0, filly=0): + """abfunc(fillx, filly) must be defined. + abfunc(x, filly) = x for all x to enable reduce. + """ + super().__init__(dbfunc) + self.domain = domain + self.fillx = fillx + self.filly = filly + ufunc_domain[dbfunc] = domain + ufunc_fills[dbfunc] = (fillx, filly) + + def __call__(self, a, b, *args, **kwargs): + "Execute the call behavior." + # Get the data + (da, db) = (getdata(a), getdata(b)) + # Get the result + with np.errstate(divide='ignore', invalid='ignore'): + result = self.f(da, db, *args, **kwargs) + # Get the mask as a combination of the source masks and invalid + m = ~umath.isfinite(result) + m |= getmask(a) + m |= getmask(b) + # Apply the domain + domain = ufunc_domain.get(self.f, None) + if domain is not None: + m |= domain(da, db) + # Take care of the scalar case first + if not m.ndim: + if m: + return masked + else: + return result + # When the mask is True, put back da if possible + # any errors, just abort; impossible to guarantee masked values + try: + np.copyto(result, 0, casting='unsafe', where=m) + # avoid using "*" since this may be overlaid + masked_da = umath.multiply(m, da) + # only add back if it can be cast safely + if np.can_cast(masked_da.dtype, result.dtype, casting='safe'): + result += masked_da + except Exception: + pass + + # Transforms to a (subclass of) MaskedArray + masked_result = result.view(get_masked_subclass(a, b)) + masked_result._mask = m + if isinstance(a, MaskedArray): + masked_result._update_from(a) + elif isinstance(b, MaskedArray): + masked_result._update_from(b) + return masked_result + + +# Unary ufuncs +exp = _MaskedUnaryOperation(umath.exp) +conjugate = _MaskedUnaryOperation(umath.conjugate) +sin = _MaskedUnaryOperation(umath.sin) +cos = _MaskedUnaryOperation(umath.cos) +arctan = _MaskedUnaryOperation(umath.arctan) +arcsinh = _MaskedUnaryOperation(umath.arcsinh) +sinh = _MaskedUnaryOperation(umath.sinh) +cosh = _MaskedUnaryOperation(umath.cosh) +tanh = _MaskedUnaryOperation(umath.tanh) +abs = absolute = _MaskedUnaryOperation(umath.absolute) +angle = _MaskedUnaryOperation(angle) +fabs = _MaskedUnaryOperation(umath.fabs) +negative = _MaskedUnaryOperation(umath.negative) +floor = _MaskedUnaryOperation(umath.floor) +ceil = _MaskedUnaryOperation(umath.ceil) +around = _MaskedUnaryOperation(np.around) +logical_not = _MaskedUnaryOperation(umath.logical_not) + +# Domained unary ufuncs +sqrt = _MaskedUnaryOperation(umath.sqrt, 0.0, + _DomainGreaterEqual(0.0)) +log = _MaskedUnaryOperation(umath.log, 1.0, + _DomainGreater(0.0)) +log2 = _MaskedUnaryOperation(umath.log2, 1.0, + _DomainGreater(0.0)) +log10 = _MaskedUnaryOperation(umath.log10, 1.0, + _DomainGreater(0.0)) +tan = _MaskedUnaryOperation(umath.tan, 0.0, + _DomainTan(1e-35)) +arcsin = _MaskedUnaryOperation(umath.arcsin, 0.0, + _DomainCheckInterval(-1.0, 1.0)) +arccos = _MaskedUnaryOperation(umath.arccos, 0.0, + _DomainCheckInterval(-1.0, 1.0)) +arccosh = _MaskedUnaryOperation(umath.arccosh, 1.0, + _DomainGreaterEqual(1.0)) +arctanh = _MaskedUnaryOperation(umath.arctanh, 0.0, + _DomainCheckInterval(-1.0 + 1e-15, 1.0 - 1e-15)) + +# Binary ufuncs +add = _MaskedBinaryOperation(umath.add) +subtract = _MaskedBinaryOperation(umath.subtract) +multiply = _MaskedBinaryOperation(umath.multiply, 1, 1) +arctan2 = _MaskedBinaryOperation(umath.arctan2, 0.0, 1.0) +equal = _MaskedBinaryOperation(umath.equal) +equal.reduce = None +not_equal = _MaskedBinaryOperation(umath.not_equal) +not_equal.reduce = None +less_equal = _MaskedBinaryOperation(umath.less_equal) +less_equal.reduce = None +greater_equal = _MaskedBinaryOperation(umath.greater_equal) +greater_equal.reduce = None +less = _MaskedBinaryOperation(umath.less) +less.reduce = None +greater = _MaskedBinaryOperation(umath.greater) +greater.reduce = None +logical_and = _MaskedBinaryOperation(umath.logical_and) +alltrue = _MaskedBinaryOperation(umath.logical_and, 1, 1).reduce +logical_or = _MaskedBinaryOperation(umath.logical_or) +sometrue = logical_or.reduce +logical_xor = _MaskedBinaryOperation(umath.logical_xor) +bitwise_and = _MaskedBinaryOperation(umath.bitwise_and) +bitwise_or = _MaskedBinaryOperation(umath.bitwise_or) +bitwise_xor = _MaskedBinaryOperation(umath.bitwise_xor) +hypot = _MaskedBinaryOperation(umath.hypot) + +# Domained binary ufuncs +divide = _DomainedBinaryOperation(umath.divide, _DomainSafeDivide(), 0, 1) +true_divide = divide # Just an alias for divide. +floor_divide = _DomainedBinaryOperation(umath.floor_divide, + _DomainSafeDivide(), 0, 1) +remainder = _DomainedBinaryOperation(umath.remainder, + _DomainSafeDivide(), 0, 1) +fmod = _DomainedBinaryOperation(umath.fmod, _DomainSafeDivide(), 0, 1) +mod = remainder + +############################################################################### +# Mask creation functions # +############################################################################### + + +def _replace_dtype_fields_recursive(dtype, primitive_dtype): + "Private function allowing recursion in _replace_dtype_fields." + _recurse = _replace_dtype_fields_recursive + + # Do we have some name fields ? + if dtype.names is not None: + descr = [] + for name in dtype.names: + field = dtype.fields[name] + if len(field) == 3: + # Prepend the title to the name + name = (field[-1], name) + descr.append((name, _recurse(field[0], primitive_dtype))) + new_dtype = np.dtype(descr) + + # Is this some kind of composite a la (float,2) + elif dtype.subdtype: + descr = list(dtype.subdtype) + descr[0] = _recurse(dtype.subdtype[0], primitive_dtype) + new_dtype = np.dtype(tuple(descr)) + + # this is a primitive type, so do a direct replacement + else: + new_dtype = primitive_dtype + + # preserve identity of dtypes + if new_dtype == dtype: + new_dtype = dtype + + return new_dtype + + +def _replace_dtype_fields(dtype, primitive_dtype): + """ + Construct a dtype description list from a given dtype. + + Returns a new dtype object, with all fields and subtypes in the given type + recursively replaced with `primitive_dtype`. + + Arguments are coerced to dtypes first. + """ + dtype = np.dtype(dtype) + primitive_dtype = np.dtype(primitive_dtype) + return _replace_dtype_fields_recursive(dtype, primitive_dtype) + + +def make_mask_descr(ndtype): + """ + Construct a dtype description list from a given dtype. + + Returns a new dtype object, with the type of all fields in `ndtype` to a + boolean type. Field names are not altered. + + Parameters + ---------- + ndtype : dtype + The dtype to convert. + + Returns + ------- + result : dtype + A dtype that looks like `ndtype`, the type of all fields is boolean. + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> dtype = np.dtype({'names':['foo', 'bar'], + ... 'formats':[np.float32, np.int64]}) + >>> dtype + dtype([('foo', '>> ma.make_mask_descr(dtype) + dtype([('foo', '|b1'), ('bar', '|b1')]) + >>> ma.make_mask_descr(np.float32) + dtype('bool') + + """ + return _replace_dtype_fields(ndtype, MaskType) + + +def getmask(a): + """ + Return the mask of a masked array, or nomask. + + Return the mask of `a` as an ndarray if `a` is a `MaskedArray` and the + mask is not `nomask`, else return `nomask`. To guarantee a full array + of booleans of the same shape as a, use `getmaskarray`. + + Parameters + ---------- + a : array_like + Input `MaskedArray` for which the mask is required. + + See Also + -------- + getdata : Return the data of a masked array as an ndarray. + getmaskarray : Return the mask of a masked array, or full array of False. + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> a = ma.masked_equal([[1,2],[3,4]], 2) + >>> a + masked_array( + data=[[1, --], + [3, 4]], + mask=[[False, True], + [False, False]], + fill_value=2) + >>> ma.getmask(a) + array([[False, True], + [False, False]]) + + Equivalently use the `MaskedArray` `mask` attribute. + + >>> a.mask + array([[False, True], + [False, False]]) + + Result when mask == `nomask` + + >>> b = ma.masked_array([[1,2],[3,4]]) + >>> b + masked_array( + data=[[1, 2], + [3, 4]], + mask=False, + fill_value=999999) + >>> ma.nomask + False + >>> ma.getmask(b) == ma.nomask + True + >>> b.mask == ma.nomask + True + + """ + return getattr(a, '_mask', nomask) + + +get_mask = getmask + + +def getmaskarray(arr): + """ + Return the mask of a masked array, or full boolean array of False. + + Return the mask of `arr` as an ndarray if `arr` is a `MaskedArray` and + the mask is not `nomask`, else return a full boolean array of False of + the same shape as `arr`. + + Parameters + ---------- + arr : array_like + Input `MaskedArray` for which the mask is required. + + See Also + -------- + getmask : Return the mask of a masked array, or nomask. + getdata : Return the data of a masked array as an ndarray. + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> a = ma.masked_equal([[1,2],[3,4]], 2) + >>> a + masked_array( + data=[[1, --], + [3, 4]], + mask=[[False, True], + [False, False]], + fill_value=2) + >>> ma.getmaskarray(a) + array([[False, True], + [False, False]]) + + Result when mask == ``nomask`` + + >>> b = ma.masked_array([[1,2],[3,4]]) + >>> b + masked_array( + data=[[1, 2], + [3, 4]], + mask=False, + fill_value=999999) + >>> ma.getmaskarray(b) + array([[False, False], + [False, False]]) + + """ + mask = getmask(arr) + if mask is nomask: + mask = make_mask_none(np.shape(arr), getattr(arr, 'dtype', None)) + return mask + + +def is_mask(m): + """ + Return True if m is a valid, standard mask. + + This function does not check the contents of the input, only that the + type is MaskType. In particular, this function returns False if the + mask has a flexible dtype. + + Parameters + ---------- + m : array_like + Array to test. + + Returns + ------- + result : bool + True if `m.dtype.type` is MaskType, False otherwise. + + See Also + -------- + ma.isMaskedArray : Test whether input is an instance of MaskedArray. + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> m = ma.masked_equal([0, 1, 0, 2, 3], 0) + >>> m + masked_array(data=[--, 1, --, 2, 3], + mask=[ True, False, True, False, False], + fill_value=0) + >>> ma.is_mask(m) + False + >>> ma.is_mask(m.mask) + True + + Input must be an ndarray (or have similar attributes) + for it to be considered a valid mask. + + >>> m = [False, True, False] + >>> ma.is_mask(m) + False + >>> m = np.array([False, True, False]) + >>> m + array([False, True, False]) + >>> ma.is_mask(m) + True + + Arrays with complex dtypes don't return True. + + >>> dtype = np.dtype({'names':['monty', 'pithon'], + ... 'formats':[bool, bool]}) + >>> dtype + dtype([('monty', '|b1'), ('pithon', '|b1')]) + >>> m = np.array([(True, False), (False, True), (True, False)], + ... dtype=dtype) + >>> m + array([( True, False), (False, True), ( True, False)], + dtype=[('monty', '?'), ('pithon', '?')]) + >>> ma.is_mask(m) + False + + """ + try: + return m.dtype.type is MaskType + except AttributeError: + return False + + +def _shrink_mask(m): + """ + Shrink a mask to nomask if possible + """ + if m.dtype.names is None and not m.any(): + return nomask + else: + return m + + +def make_mask(m, copy=False, shrink=True, dtype=MaskType): + """ + Create a boolean mask from an array. + + Return `m` as a boolean mask, creating a copy if necessary or requested. + The function can accept any sequence that is convertible to integers, + or ``nomask``. Does not require that contents must be 0s and 1s, values + of 0 are interpreted as False, everything else as True. + + Parameters + ---------- + m : array_like + Potential mask. + copy : bool, optional + Whether to return a copy of `m` (True) or `m` itself (False). + shrink : bool, optional + Whether to shrink `m` to ``nomask`` if all its values are False. + dtype : dtype, optional + Data-type of the output mask. By default, the output mask has a + dtype of MaskType (bool). If the dtype is flexible, each field has + a boolean dtype. This is ignored when `m` is ``nomask``, in which + case ``nomask`` is always returned. + + Returns + ------- + result : ndarray + A boolean mask derived from `m`. + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> m = [True, False, True, True] + >>> ma.make_mask(m) + array([ True, False, True, True]) + >>> m = [1, 0, 1, 1] + >>> ma.make_mask(m) + array([ True, False, True, True]) + >>> m = [1, 0, 2, -3] + >>> ma.make_mask(m) + array([ True, False, True, True]) + + Effect of the `shrink` parameter. + + >>> m = np.zeros(4) + >>> m + array([0., 0., 0., 0.]) + >>> ma.make_mask(m) + False + >>> ma.make_mask(m, shrink=False) + array([False, False, False, False]) + + Using a flexible `dtype`. + + >>> m = [1, 0, 1, 1] + >>> n = [0, 1, 0, 0] + >>> arr = [] + >>> for man, mouse in zip(m, n): + ... arr.append((man, mouse)) + >>> arr + [(1, 0), (0, 1), (1, 0), (1, 0)] + >>> dtype = np.dtype({'names':['man', 'mouse'], + ... 'formats':[np.int64, np.int64]}) + >>> arr = np.array(arr, dtype=dtype) + >>> arr + array([(1, 0), (0, 1), (1, 0), (1, 0)], + dtype=[('man', '>> ma.make_mask(arr, dtype=dtype) + array([(True, False), (False, True), (True, False), (True, False)], + dtype=[('man', '|b1'), ('mouse', '|b1')]) + + """ + if m is nomask: + return nomask + + # Make sure the input dtype is valid. + dtype = make_mask_descr(dtype) + + # legacy boolean special case: "existence of fields implies true" + if isinstance(m, ndarray) and m.dtype.fields and dtype == np.bool: + return np.ones(m.shape, dtype=dtype) + + # Fill the mask in case there are missing data; turn it into an ndarray. + copy = None if not copy else True + result = np.array(filled(m, True), copy=copy, dtype=dtype, subok=True) + # Bas les masques ! + if shrink: + result = _shrink_mask(result) + return result + + +def make_mask_none(newshape, dtype=None): + """ + Return a boolean mask of the given shape, filled with False. + + This function returns a boolean ndarray with all entries False, that can + be used in common mask manipulations. If a complex dtype is specified, the + type of each field is converted to a boolean type. + + Parameters + ---------- + newshape : tuple + A tuple indicating the shape of the mask. + dtype : {None, dtype}, optional + If None, use a MaskType instance. Otherwise, use a new datatype with + the same fields as `dtype`, converted to boolean types. + + Returns + ------- + result : ndarray + An ndarray of appropriate shape and dtype, filled with False. + + See Also + -------- + make_mask : Create a boolean mask from an array. + make_mask_descr : Construct a dtype description list from a given dtype. + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> ma.make_mask_none((3,)) + array([False, False, False]) + + Defining a more complex dtype. + + >>> dtype = np.dtype({'names':['foo', 'bar'], + ... 'formats':[np.float32, np.int64]}) + >>> dtype + dtype([('foo', '>> ma.make_mask_none((3,), dtype=dtype) + array([(False, False), (False, False), (False, False)], + dtype=[('foo', '|b1'), ('bar', '|b1')]) + + """ + if dtype is None: + result = np.zeros(newshape, dtype=MaskType) + else: + result = np.zeros(newshape, dtype=make_mask_descr(dtype)) + return result + + +def _recursive_mask_or(m1, m2, newmask): + names = m1.dtype.names + for name in names: + current1 = m1[name] + if current1.dtype.names is not None: + _recursive_mask_or(current1, m2[name], newmask[name]) + else: + umath.logical_or(current1, m2[name], newmask[name]) + + +def mask_or(m1, m2, copy=False, shrink=True): + """ + Combine two masks with the ``logical_or`` operator. + + The result may be a view on `m1` or `m2` if the other is `nomask` + (i.e. False). + + Parameters + ---------- + m1, m2 : array_like + Input masks. + copy : bool, optional + If copy is False and one of the inputs is `nomask`, return a view + of the other input mask. Defaults to False. + shrink : bool, optional + Whether to shrink the output to `nomask` if all its values are + False. Defaults to True. + + Returns + ------- + mask : output mask + The result masks values that are masked in either `m1` or `m2`. + + Raises + ------ + ValueError + If `m1` and `m2` have different flexible dtypes. + + Examples + -------- + >>> import numpy as np + >>> m1 = np.ma.make_mask([0, 1, 1, 0]) + >>> m2 = np.ma.make_mask([1, 0, 0, 0]) + >>> np.ma.mask_or(m1, m2) + array([ True, True, True, False]) + + """ + + if (m1 is nomask) or (m1 is False): + dtype = getattr(m2, 'dtype', MaskType) + return make_mask(m2, copy=copy, shrink=shrink, dtype=dtype) + if (m2 is nomask) or (m2 is False): + dtype = getattr(m1, 'dtype', MaskType) + return make_mask(m1, copy=copy, shrink=shrink, dtype=dtype) + if m1 is m2 and is_mask(m1): + return _shrink_mask(m1) if shrink else m1 + (dtype1, dtype2) = (getattr(m1, 'dtype', None), getattr(m2, 'dtype', None)) + if dtype1 != dtype2: + raise ValueError(f"Incompatible dtypes '{dtype1}'<>'{dtype2}'") + if dtype1.names is not None: + # Allocate an output mask array with the properly broadcast shape. + newmask = np.empty(np.broadcast(m1, m2).shape, dtype1) + _recursive_mask_or(m1, m2, newmask) + return newmask + return make_mask(umath.logical_or(m1, m2), copy=copy, shrink=shrink) + + +def flatten_mask(mask): + """ + Returns a completely flattened version of the mask, where nested fields + are collapsed. + + Parameters + ---------- + mask : array_like + Input array, which will be interpreted as booleans. + + Returns + ------- + flattened_mask : ndarray of bools + The flattened input. + + Examples + -------- + >>> import numpy as np + >>> mask = np.array([0, 0, 1]) + >>> np.ma.flatten_mask(mask) + array([False, False, True]) + + >>> mask = np.array([(0, 0), (0, 1)], dtype=[('a', bool), ('b', bool)]) + >>> np.ma.flatten_mask(mask) + array([False, False, False, True]) + + >>> mdtype = [('a', bool), ('b', [('ba', bool), ('bb', bool)])] + >>> mask = np.array([(0, (0, 0)), (0, (0, 1))], dtype=mdtype) + >>> np.ma.flatten_mask(mask) + array([False, False, False, False, False, True]) + + """ + + def _flatmask(mask): + "Flatten the mask and returns a (maybe nested) sequence of booleans." + mnames = mask.dtype.names + if mnames is not None: + return [flatten_mask(mask[name]) for name in mnames] + else: + return mask + + def _flatsequence(sequence): + "Generates a flattened version of the sequence." + try: + for element in sequence: + if hasattr(element, '__iter__'): + yield from _flatsequence(element) + else: + yield element + except TypeError: + yield sequence + + mask = np.asarray(mask) + flattened = _flatsequence(_flatmask(mask)) + return np.array(list(flattened), dtype=bool) + + +def _check_mask_axis(mask, axis, keepdims=np._NoValue): + "Check whether there are masked values along the given axis" + kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} + if mask is not nomask: + return mask.all(axis=axis, **kwargs) + return nomask + + +############################################################################### +# Masking functions # +############################################################################### + +def masked_where(condition, a, copy=True): + """ + Mask an array where a condition is met. + + Return `a` as an array masked where `condition` is True. + Any masked values of `a` or `condition` are also masked in the output. + + Parameters + ---------- + condition : array_like + Masking condition. When `condition` tests floating point values for + equality, consider using ``masked_values`` instead. + a : array_like + Array to mask. + copy : bool + If True (default) make a copy of `a` in the result. If False modify + `a` in place and return a view. + + Returns + ------- + result : MaskedArray + The result of masking `a` where `condition` is True. + + See Also + -------- + masked_values : Mask using floating point equality. + masked_equal : Mask where equal to a given value. + masked_not_equal : Mask where *not* equal to a given value. + masked_less_equal : Mask where less than or equal to a given value. + masked_greater_equal : Mask where greater than or equal to a given value. + masked_less : Mask where less than a given value. + masked_greater : Mask where greater than a given value. + masked_inside : Mask inside a given interval. + masked_outside : Mask outside a given interval. + masked_invalid : Mask invalid values (NaNs or infs). + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> a = np.arange(4) + >>> a + array([0, 1, 2, 3]) + >>> ma.masked_where(a <= 2, a) + masked_array(data=[--, --, --, 3], + mask=[ True, True, True, False], + fill_value=999999) + + Mask array `b` conditional on `a`. + + >>> b = ['a', 'b', 'c', 'd'] + >>> ma.masked_where(a == 2, b) + masked_array(data=['a', 'b', --, 'd'], + mask=[False, False, True, False], + fill_value='N/A', + dtype='>> c = ma.masked_where(a <= 2, a) + >>> c + masked_array(data=[--, --, --, 3], + mask=[ True, True, True, False], + fill_value=999999) + >>> c[0] = 99 + >>> c + masked_array(data=[99, --, --, 3], + mask=[False, True, True, False], + fill_value=999999) + >>> a + array([0, 1, 2, 3]) + >>> c = ma.masked_where(a <= 2, a, copy=False) + >>> c[0] = 99 + >>> c + masked_array(data=[99, --, --, 3], + mask=[False, True, True, False], + fill_value=999999) + >>> a + array([99, 1, 2, 3]) + + When `condition` or `a` contain masked values. + + >>> a = np.arange(4) + >>> a = ma.masked_where(a == 2, a) + >>> a + masked_array(data=[0, 1, --, 3], + mask=[False, False, True, False], + fill_value=999999) + >>> b = np.arange(4) + >>> b = ma.masked_where(b == 0, b) + >>> b + masked_array(data=[--, 1, 2, 3], + mask=[ True, False, False, False], + fill_value=999999) + >>> ma.masked_where(a == 3, b) + masked_array(data=[--, 1, --, --], + mask=[ True, False, True, True], + fill_value=999999) + + """ + # Make sure that condition is a valid standard-type mask. + cond = make_mask(condition, shrink=False) + a = np.array(a, copy=copy, subok=True) + + (cshape, ashape) = (cond.shape, a.shape) + if cshape and cshape != ashape: + raise IndexError("Inconsistent shape between the condition and the input" + " (got %s and %s)" % (cshape, ashape)) + if hasattr(a, '_mask'): + cond = mask_or(cond, a._mask) + cls = type(a) + else: + cls = MaskedArray + result = a.view(cls) + # Assign to *.mask so that structured masks are handled correctly. + result.mask = _shrink_mask(cond) + # There is no view of a boolean so when 'a' is a MaskedArray with nomask + # the update to the result's mask has no effect. + if not copy and hasattr(a, '_mask') and getmask(a) is nomask: + a._mask = result._mask.view() + return result + + +def masked_greater(x, value, copy=True): + """ + Mask an array where greater than a given value. + + This function is a shortcut to ``masked_where``, with + `condition` = (x > value). + + See Also + -------- + masked_where : Mask where a condition is met. + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> a = np.arange(4) + >>> a + array([0, 1, 2, 3]) + >>> ma.masked_greater(a, 2) + masked_array(data=[0, 1, 2, --], + mask=[False, False, False, True], + fill_value=999999) + + """ + return masked_where(greater(x, value), x, copy=copy) + + +def masked_greater_equal(x, value, copy=True): + """ + Mask an array where greater than or equal to a given value. + + This function is a shortcut to ``masked_where``, with + `condition` = (x >= value). + + See Also + -------- + masked_where : Mask where a condition is met. + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> a = np.arange(4) + >>> a + array([0, 1, 2, 3]) + >>> ma.masked_greater_equal(a, 2) + masked_array(data=[0, 1, --, --], + mask=[False, False, True, True], + fill_value=999999) + + """ + return masked_where(greater_equal(x, value), x, copy=copy) + + +def masked_less(x, value, copy=True): + """ + Mask an array where less than a given value. + + This function is a shortcut to ``masked_where``, with + `condition` = (x < value). + + See Also + -------- + masked_where : Mask where a condition is met. + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> a = np.arange(4) + >>> a + array([0, 1, 2, 3]) + >>> ma.masked_less(a, 2) + masked_array(data=[--, --, 2, 3], + mask=[ True, True, False, False], + fill_value=999999) + + """ + return masked_where(less(x, value), x, copy=copy) + + +def masked_less_equal(x, value, copy=True): + """ + Mask an array where less than or equal to a given value. + + This function is a shortcut to ``masked_where``, with + `condition` = (x <= value). + + See Also + -------- + masked_where : Mask where a condition is met. + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> a = np.arange(4) + >>> a + array([0, 1, 2, 3]) + >>> ma.masked_less_equal(a, 2) + masked_array(data=[--, --, --, 3], + mask=[ True, True, True, False], + fill_value=999999) + + """ + return masked_where(less_equal(x, value), x, copy=copy) + + +def masked_not_equal(x, value, copy=True): + """ + Mask an array where *not* equal to a given value. + + This function is a shortcut to ``masked_where``, with + `condition` = (x != value). + + See Also + -------- + masked_where : Mask where a condition is met. + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> a = np.arange(4) + >>> a + array([0, 1, 2, 3]) + >>> ma.masked_not_equal(a, 2) + masked_array(data=[--, --, 2, --], + mask=[ True, True, False, True], + fill_value=999999) + + """ + return masked_where(not_equal(x, value), x, copy=copy) + + +def masked_equal(x, value, copy=True): + """ + Mask an array where equal to a given value. + + Return a MaskedArray, masked where the data in array `x` are + equal to `value`. The fill_value of the returned MaskedArray + is set to `value`. + + For floating point arrays, consider using ``masked_values(x, value)``. + + See Also + -------- + masked_where : Mask where a condition is met. + masked_values : Mask using floating point equality. + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> a = np.arange(4) + >>> a + array([0, 1, 2, 3]) + >>> ma.masked_equal(a, 2) + masked_array(data=[0, 1, --, 3], + mask=[False, False, True, False], + fill_value=2) + + """ + output = masked_where(equal(x, value), x, copy=copy) + output.fill_value = value + return output + + +def masked_inside(x, v1, v2, copy=True): + """ + Mask an array inside a given interval. + + Shortcut to ``masked_where``, where `condition` is True for `x` inside + the interval [v1,v2] (v1 <= x <= v2). The boundaries `v1` and `v2` + can be given in either order. + + See Also + -------- + masked_where : Mask where a condition is met. + + Notes + ----- + The array `x` is prefilled with its filling value. + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> x = [0.31, 1.2, 0.01, 0.2, -0.4, -1.1] + >>> ma.masked_inside(x, -0.3, 0.3) + masked_array(data=[0.31, 1.2, --, --, -0.4, -1.1], + mask=[False, False, True, True, False, False], + fill_value=1e+20) + + The order of `v1` and `v2` doesn't matter. + + >>> ma.masked_inside(x, 0.3, -0.3) + masked_array(data=[0.31, 1.2, --, --, -0.4, -1.1], + mask=[False, False, True, True, False, False], + fill_value=1e+20) + + """ + if v2 < v1: + (v1, v2) = (v2, v1) + xf = filled(x) + condition = (xf >= v1) & (xf <= v2) + return masked_where(condition, x, copy=copy) + + +def masked_outside(x, v1, v2, copy=True): + """ + Mask an array outside a given interval. + + Shortcut to ``masked_where``, where `condition` is True for `x` outside + the interval [v1,v2] (x < v1)|(x > v2). + The boundaries `v1` and `v2` can be given in either order. + + See Also + -------- + masked_where : Mask where a condition is met. + + Notes + ----- + The array `x` is prefilled with its filling value. + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> x = [0.31, 1.2, 0.01, 0.2, -0.4, -1.1] + >>> ma.masked_outside(x, -0.3, 0.3) + masked_array(data=[--, --, 0.01, 0.2, --, --], + mask=[ True, True, False, False, True, True], + fill_value=1e+20) + + The order of `v1` and `v2` doesn't matter. + + >>> ma.masked_outside(x, 0.3, -0.3) + masked_array(data=[--, --, 0.01, 0.2, --, --], + mask=[ True, True, False, False, True, True], + fill_value=1e+20) + + """ + if v2 < v1: + (v1, v2) = (v2, v1) + xf = filled(x) + condition = (xf < v1) | (xf > v2) + return masked_where(condition, x, copy=copy) + + +def masked_object(x, value, copy=True, shrink=True): + """ + Mask the array `x` where the data are exactly equal to value. + + This function is similar to `masked_values`, but only suitable + for object arrays: for floating point, use `masked_values` instead. + + Parameters + ---------- + x : array_like + Array to mask + value : object + Comparison value + copy : {True, False}, optional + Whether to return a copy of `x`. + shrink : {True, False}, optional + Whether to collapse a mask full of False to nomask + + Returns + ------- + result : MaskedArray + The result of masking `x` where equal to `value`. + + See Also + -------- + masked_where : Mask where a condition is met. + masked_equal : Mask where equal to a given value (integers). + masked_values : Mask using floating point equality. + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> food = np.array(['green_eggs', 'ham'], dtype=object) + >>> # don't eat spoiled food + >>> eat = ma.masked_object(food, 'green_eggs') + >>> eat + masked_array(data=[--, 'ham'], + mask=[ True, False], + fill_value='green_eggs', + dtype=object) + >>> # plain ol` ham is boring + >>> fresh_food = np.array(['cheese', 'ham', 'pineapple'], dtype=object) + >>> eat = ma.masked_object(fresh_food, 'green_eggs') + >>> eat + masked_array(data=['cheese', 'ham', 'pineapple'], + mask=False, + fill_value='green_eggs', + dtype=object) + + Note that `mask` is set to ``nomask`` if possible. + + >>> eat + masked_array(data=['cheese', 'ham', 'pineapple'], + mask=False, + fill_value='green_eggs', + dtype=object) + + """ + if isMaskedArray(x): + condition = umath.equal(x._data, value) + mask = x._mask + else: + condition = umath.equal(np.asarray(x), value) + mask = nomask + mask = mask_or(mask, make_mask(condition, shrink=shrink)) + return masked_array(x, mask=mask, copy=copy, fill_value=value) + + +def masked_values(x, value, rtol=1e-5, atol=1e-8, copy=True, shrink=True): + """ + Mask using floating point equality. + + Return a MaskedArray, masked where the data in array `x` are approximately + equal to `value`, determined using `isclose`. The default tolerances for + `masked_values` are the same as those for `isclose`. + + For integer types, exact equality is used, in the same way as + `masked_equal`. + + The fill_value is set to `value` and the mask is set to ``nomask`` if + possible. + + Parameters + ---------- + x : array_like + Array to mask. + value : float + Masking value. + rtol, atol : float, optional + Tolerance parameters passed on to `isclose` + copy : bool, optional + Whether to return a copy of `x`. + shrink : bool, optional + Whether to collapse a mask full of False to ``nomask``. + + Returns + ------- + result : MaskedArray + The result of masking `x` where approximately equal to `value`. + + See Also + -------- + masked_where : Mask where a condition is met. + masked_equal : Mask where equal to a given value (integers). + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> x = np.array([1, 1.1, 2, 1.1, 3]) + >>> ma.masked_values(x, 1.1) + masked_array(data=[1.0, --, 2.0, --, 3.0], + mask=[False, True, False, True, False], + fill_value=1.1) + + Note that `mask` is set to ``nomask`` if possible. + + >>> ma.masked_values(x, 2.1) + masked_array(data=[1. , 1.1, 2. , 1.1, 3. ], + mask=False, + fill_value=2.1) + + Unlike `masked_equal`, `masked_values` can perform approximate equalities. + + >>> ma.masked_values(x, 2.1, atol=1e-1) + masked_array(data=[1.0, 1.1, --, 1.1, 3.0], + mask=[False, False, True, False, False], + fill_value=2.1) + + """ + xnew = filled(x, value) + if np.issubdtype(xnew.dtype, np.floating): + mask = np.isclose(xnew, value, atol=atol, rtol=rtol) + else: + mask = umath.equal(xnew, value) + ret = masked_array(xnew, mask=mask, copy=copy, fill_value=value) + if shrink: + ret.shrink_mask() + return ret + + +def masked_invalid(a, copy=True): + """ + Mask an array where invalid values occur (NaNs or infs). + + This function is a shortcut to ``masked_where``, with + `condition` = ~(np.isfinite(a)). Any pre-existing mask is conserved. + Only applies to arrays with a dtype where NaNs or infs make sense + (i.e. floating point types), but accepts any array_like object. + + See Also + -------- + masked_where : Mask where a condition is met. + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> a = np.arange(5, dtype=float) + >>> a[2] = np.nan + >>> a[3] = np.inf + >>> a + array([ 0., 1., nan, inf, 4.]) + >>> ma.masked_invalid(a) + masked_array(data=[0.0, 1.0, --, --, 4.0], + mask=[False, False, True, True, False], + fill_value=1e+20) + + """ + a = np.array(a, copy=None, subok=True) + res = masked_where(~(np.isfinite(a)), a, copy=copy) + # masked_invalid previously never returned nomask as a mask and doing so + # threw off matplotlib (gh-22842). So use shrink=False: + if res._mask is nomask: + res._mask = make_mask_none(res.shape, res.dtype) + return res + +############################################################################### +# Printing options # +############################################################################### + + +class _MaskedPrintOption: + """ + Handle the string used to represent missing data in a masked array. + + """ + + def __init__(self, display): + """ + Create the masked_print_option object. + + """ + self._display = display + self._enabled = True + + def display(self): + """ + Display the string to print for masked values. + + """ + return self._display + + def set_display(self, s): + """ + Set the string to print for masked values. + + """ + self._display = s + + def enabled(self): + """ + Is the use of the display value enabled? + + """ + return self._enabled + + def enable(self, shrink=1): + """ + Set the enabling shrink to `shrink`. + + """ + self._enabled = shrink + + def __str__(self): + return str(self._display) + + __repr__ = __str__ + + +# if you single index into a masked location you get this object. +masked_print_option = _MaskedPrintOption('--') + + +def _recursive_printoption(result, mask, printopt): + """ + Puts printoptions in result where mask is True. + + Private function allowing for recursion + + """ + names = result.dtype.names + if names is not None: + for name in names: + curdata = result[name] + curmask = mask[name] + _recursive_printoption(curdata, curmask, printopt) + else: + np.copyto(result, printopt, where=mask) + + +# For better or worse, these end in a newline +_legacy_print_templates = { + 'long_std': textwrap.dedent("""\ + masked_%(name)s(data = + %(data)s, + %(nlen)s mask = + %(mask)s, + %(nlen)s fill_value = %(fill)s) + """), + 'long_flx': textwrap.dedent("""\ + masked_%(name)s(data = + %(data)s, + %(nlen)s mask = + %(mask)s, + %(nlen)s fill_value = %(fill)s, + %(nlen)s dtype = %(dtype)s) + """), + 'short_std': textwrap.dedent("""\ + masked_%(name)s(data = %(data)s, + %(nlen)s mask = %(mask)s, + %(nlen)s fill_value = %(fill)s) + """), + 'short_flx': textwrap.dedent("""\ + masked_%(name)s(data = %(data)s, + %(nlen)s mask = %(mask)s, + %(nlen)s fill_value = %(fill)s, + %(nlen)s dtype = %(dtype)s) + """) +} + +############################################################################### +# MaskedArray class # +############################################################################### + + +def _recursive_filled(a, mask, fill_value): + """ + Recursively fill `a` with `fill_value`. + + """ + names = a.dtype.names + for name in names: + current = a[name] + if current.dtype.names is not None: + _recursive_filled(current, mask[name], fill_value[name]) + else: + np.copyto(current, fill_value[name], where=mask[name]) + + +def flatten_structured_array(a): + """ + Flatten a structured array. + + The data type of the output is chosen such that it can represent all of the + (nested) fields. + + Parameters + ---------- + a : structured array + + Returns + ------- + output : masked array or ndarray + A flattened masked array if the input is a masked array, otherwise a + standard ndarray. + + Examples + -------- + >>> import numpy as np + >>> ndtype = [('a', int), ('b', float)] + >>> a = np.array([(1, 1), (2, 2)], dtype=ndtype) + >>> np.ma.flatten_structured_array(a) + array([[1., 1.], + [2., 2.]]) + + """ + + def flatten_sequence(iterable): + """ + Flattens a compound of nested iterables. + + """ + for elm in iter(iterable): + if hasattr(elm, '__iter__'): + yield from flatten_sequence(elm) + else: + yield elm + + a = np.asanyarray(a) + inishape = a.shape + a = a.ravel() + if isinstance(a, MaskedArray): + out = np.array([tuple(flatten_sequence(d.item())) for d in a._data]) + out = out.view(MaskedArray) + out._mask = np.array([tuple(flatten_sequence(d.item())) + for d in getmaskarray(a)]) + else: + out = np.array([tuple(flatten_sequence(d.item())) for d in a]) + if len(inishape) > 1: + newshape = list(out.shape) + newshape[0] = inishape + out.shape = tuple(flatten_sequence(newshape)) + return out + + +def _arraymethod(funcname, onmask=True): + """ + Return a class method wrapper around a basic array method. + + Creates a class method which returns a masked array, where the new + ``_data`` array is the output of the corresponding basic method called + on the original ``_data``. + + If `onmask` is True, the new mask is the output of the method called + on the initial mask. Otherwise, the new mask is just a reference + to the initial mask. + + Parameters + ---------- + funcname : str + Name of the function to apply on data. + onmask : bool + Whether the mask must be processed also (True) or left + alone (False). Default is True. Make available as `_onmask` + attribute. + + Returns + ------- + method : instancemethod + Class method wrapper of the specified basic array method. + + """ + def wrapped_method(self, *args, **params): + result = getattr(self._data, funcname)(*args, **params) + result = result.view(type(self)) + result._update_from(self) + mask = self._mask + if not onmask: + result.__setmask__(mask) + elif mask is not nomask: + # __setmask__ makes a copy, which we don't want + result._mask = getattr(mask, funcname)(*args, **params) + return result + methdoc = getattr(ndarray, funcname, None) or getattr(np, funcname, None) + if methdoc is not None: + wrapped_method.__doc__ = methdoc.__doc__ + wrapped_method.__name__ = funcname + return wrapped_method + + +class MaskedIterator: + """ + Flat iterator object to iterate over masked arrays. + + A `MaskedIterator` iterator is returned by ``x.flat`` for any masked array + `x`. It allows iterating over the array as if it were a 1-D array, + either in a for-loop or by calling its `next` method. + + Iteration is done in C-contiguous style, with the last index varying the + fastest. The iterator can also be indexed using basic slicing or + advanced indexing. + + See Also + -------- + MaskedArray.flat : Return a flat iterator over an array. + MaskedArray.flatten : Returns a flattened copy of an array. + + Notes + ----- + `MaskedIterator` is not exported by the `ma` module. Instead of + instantiating a `MaskedIterator` directly, use `MaskedArray.flat`. + + Examples + -------- + >>> import numpy as np + >>> x = np.ma.array(arange(6).reshape(2, 3)) + >>> fl = x.flat + >>> type(fl) + + >>> for item in fl: + ... print(item) + ... + 0 + 1 + 2 + 3 + 4 + 5 + + Extracting more than a single element b indexing the `MaskedIterator` + returns a masked array: + + >>> fl[2:4] + masked_array(data = [2 3], + mask = False, + fill_value = 999999) + + """ + + def __init__(self, ma): + self.ma = ma + self.dataiter = ma._data.flat + + if ma._mask is nomask: + self.maskiter = None + else: + self.maskiter = ma._mask.flat + + def __iter__(self): + return self + + def __getitem__(self, indx): + result = self.dataiter.__getitem__(indx).view(type(self.ma)) + if self.maskiter is not None: + _mask = self.maskiter.__getitem__(indx) + if isinstance(_mask, ndarray): + # set shape to match that of data; this is needed for matrices + _mask.shape = result.shape + result._mask = _mask + elif isinstance(_mask, np.void): + return mvoid(result, mask=_mask, hardmask=self.ma._hardmask) + elif _mask: # Just a scalar, masked + return masked + return result + + # This won't work if ravel makes a copy + def __setitem__(self, index, value): + self.dataiter[index] = getdata(value) + if self.maskiter is not None: + self.maskiter[index] = getmaskarray(value) + + def __next__(self): + """ + Return the next value, or raise StopIteration. + + Examples + -------- + >>> import numpy as np + >>> x = np.ma.array([3, 2], mask=[0, 1]) + >>> fl = x.flat + >>> next(fl) + 3 + >>> next(fl) + masked + >>> next(fl) + Traceback (most recent call last): + ... + StopIteration + + """ + d = next(self.dataiter) + if self.maskiter is not None: + m = next(self.maskiter) + if isinstance(m, np.void): + return mvoid(d, mask=m, hardmask=self.ma._hardmask) + elif m: # Just a scalar, masked + return masked + return d + + +@set_module("numpy.ma") +class MaskedArray(ndarray): + """ + An array class with possibly masked values. + + Masked values of True exclude the corresponding element from any + computation. + + Construction:: + + x = MaskedArray(data, mask=nomask, dtype=None, copy=False, subok=True, + ndmin=0, fill_value=None, keep_mask=True, hard_mask=None, + shrink=True, order=None) + + Parameters + ---------- + data : array_like + Input data. + mask : sequence, optional + Mask. Must be convertible to an array of booleans with the same + shape as `data`. True indicates a masked (i.e. invalid) data. + dtype : dtype, optional + Data type of the output. + If `dtype` is None, the type of the data argument (``data.dtype``) + is used. If `dtype` is not None and different from ``data.dtype``, + a copy is performed. + copy : bool, optional + Whether to copy the input data (True), or to use a reference instead. + Default is False. + subok : bool, optional + Whether to return a subclass of `MaskedArray` if possible (True) or a + plain `MaskedArray`. Default is True. + ndmin : int, optional + Minimum number of dimensions. Default is 0. + fill_value : scalar, optional + Value used to fill in the masked values when necessary. + If None, a default based on the data-type is used. + keep_mask : bool, optional + Whether to combine `mask` with the mask of the input data, if any + (True), or to use only `mask` for the output (False). Default is True. + hard_mask : bool, optional + Whether to use a hard mask or not. With a hard mask, masked values + cannot be unmasked. Default is False. + shrink : bool, optional + Whether to force compression of an empty mask. Default is True. + order : {'C', 'F', 'A'}, optional + Specify the order of the array. If order is 'C', then the array + will be in C-contiguous order (last-index varies the fastest). + If order is 'F', then the returned array will be in + Fortran-contiguous order (first-index varies the fastest). + If order is 'A' (default), then the returned array may be + in any order (either C-, Fortran-contiguous, or even discontiguous), + unless a copy is required, in which case it will be C-contiguous. + + Examples + -------- + >>> import numpy as np + + The ``mask`` can be initialized with an array of boolean values + with the same shape as ``data``. + + >>> data = np.arange(6).reshape((2, 3)) + >>> np.ma.MaskedArray(data, mask=[[False, True, False], + ... [False, False, True]]) + masked_array( + data=[[0, --, 2], + [3, 4, --]], + mask=[[False, True, False], + [False, False, True]], + fill_value=999999) + + Alternatively, the ``mask`` can be initialized to homogeneous boolean + array with the same shape as ``data`` by passing in a scalar + boolean value: + + >>> np.ma.MaskedArray(data, mask=False) + masked_array( + data=[[0, 1, 2], + [3, 4, 5]], + mask=[[False, False, False], + [False, False, False]], + fill_value=999999) + + >>> np.ma.MaskedArray(data, mask=True) + masked_array( + data=[[--, --, --], + [--, --, --]], + mask=[[ True, True, True], + [ True, True, True]], + fill_value=999999, + dtype=int64) + + .. note:: + The recommended practice for initializing ``mask`` with a scalar + boolean value is to use ``True``/``False`` rather than + ``np.True_``/``np.False_``. The reason is :attr:`nomask` + is represented internally as ``np.False_``. + + >>> np.False_ is np.ma.nomask + True + + """ + + __array_priority__ = 15 + _defaultmask = nomask + _defaulthardmask = False + _baseclass = ndarray + + # Maximum number of elements per axis used when printing an array. The + # 1d case is handled separately because we need more values in this case. + _print_width = 100 + _print_width_1d = 1500 + + def __new__(cls, data=None, mask=nomask, dtype=None, copy=False, + subok=True, ndmin=0, fill_value=None, keep_mask=True, + hard_mask=None, shrink=True, order=None): + """ + Create a new masked array from scratch. + + Notes + ----- + A masked array can also be created by taking a .view(MaskedArray). + + """ + # Process data. + copy = None if not copy else True + _data = np.array(data, dtype=dtype, copy=copy, + order=order, subok=True, ndmin=ndmin) + _baseclass = getattr(data, '_baseclass', type(_data)) + # Check that we're not erasing the mask. + if isinstance(data, MaskedArray) and (data.shape != _data.shape): + copy = True + + # Here, we copy the _view_, so that we can attach new properties to it + # we must never do .view(MaskedConstant), as that would create a new + # instance of np.ma.masked, which make identity comparison fail + if isinstance(data, cls) and subok and not isinstance(data, MaskedConstant): + _data = ndarray.view(_data, type(data)) + else: + _data = ndarray.view(_data, cls) + + # Handle the case where data is not a subclass of ndarray, but + # still has the _mask attribute like MaskedArrays + if hasattr(data, '_mask') and not isinstance(data, ndarray): + _data._mask = data._mask + # FIXME: should we set `_data._sharedmask = True`? + # Process mask. + # Type of the mask + mdtype = make_mask_descr(_data.dtype) + if mask is nomask: + # Case 1. : no mask in input. + # Erase the current mask ? + if not keep_mask: + # With a reduced version + if shrink: + _data._mask = nomask + # With full version + else: + _data._mask = np.zeros(_data.shape, dtype=mdtype) + # Check whether we missed something + elif isinstance(data, (tuple, list)): + try: + # If data is a sequence of masked array + mask = np.array( + [getmaskarray(np.asanyarray(m, dtype=_data.dtype)) + for m in data], dtype=mdtype) + except (ValueError, TypeError): + # If data is nested + mask = nomask + # Force shrinking of the mask if needed (and possible) + if (mdtype == MaskType) and mask.any(): + _data._mask = mask + _data._sharedmask = False + else: + _data._sharedmask = not copy + if copy: + _data._mask = _data._mask.copy() + # Reset the shape of the original mask + if getmask(data) is not nomask: + # gh-21022 encounters an issue here + # because data._mask.shape is not writeable, but + # the op was also pointless in that case, because + # the shapes were the same, so we can at least + # avoid that path + if data._mask.shape != data.shape: + data._mask.shape = data.shape + else: + # Case 2. : With a mask in input. + # If mask is boolean, create an array of True or False + + # if users pass `mask=None` be forgiving here and cast it False + # for speed; although the default is `mask=nomask` and can differ. + if mask is None: + mask = False + + if mask is True and mdtype == MaskType: + mask = np.ones(_data.shape, dtype=mdtype) + elif mask is False and mdtype == MaskType: + mask = np.zeros(_data.shape, dtype=mdtype) + else: + # Read the mask with the current mdtype + try: + mask = np.array(mask, copy=copy, dtype=mdtype) + # Or assume it's a sequence of bool/int + except TypeError: + mask = np.array([tuple([m] * len(mdtype)) for m in mask], + dtype=mdtype) + # Make sure the mask and the data have the same shape + if mask.shape != _data.shape: + (nd, nm) = (_data.size, mask.size) + if nm == 1: + mask = np.resize(mask, _data.shape) + elif nm == nd: + mask = np.reshape(mask, _data.shape) + else: + msg = (f"Mask and data not compatible:" + f" data size is {nd}, mask size is {nm}.") + raise MaskError(msg) + copy = True + # Set the mask to the new value + if _data._mask is nomask: + _data._mask = mask + _data._sharedmask = not copy + elif not keep_mask: + _data._mask = mask + _data._sharedmask = not copy + else: + if _data.dtype.names is not None: + def _recursive_or(a, b): + "do a|=b on each field of a, recursively" + for name in a.dtype.names: + (af, bf) = (a[name], b[name]) + if af.dtype.names is not None: + _recursive_or(af, bf) + else: + af |= bf + + _recursive_or(_data._mask, mask) + else: + _data._mask = np.logical_or(mask, _data._mask) + _data._sharedmask = False + + # Update fill_value. + if fill_value is None: + fill_value = getattr(data, '_fill_value', None) + # But don't run the check unless we have something to check. + if fill_value is not None: + _data._fill_value = _check_fill_value(fill_value, _data.dtype) + # Process extra options .. + if hard_mask is None: + _data._hardmask = getattr(data, '_hardmask', False) + else: + _data._hardmask = hard_mask + _data._baseclass = _baseclass + return _data + + def _update_from(self, obj): + """ + Copies some attributes of obj to self. + + """ + if isinstance(obj, ndarray): + _baseclass = type(obj) + else: + _baseclass = ndarray + # We need to copy the _basedict to avoid backward propagation + _optinfo = {} + _optinfo.update(getattr(obj, '_optinfo', {})) + _optinfo.update(getattr(obj, '_basedict', {})) + if not isinstance(obj, MaskedArray): + _optinfo.update(getattr(obj, '__dict__', {})) + _dict = {'_fill_value': getattr(obj, '_fill_value', None), + '_hardmask': getattr(obj, '_hardmask', False), + '_sharedmask': getattr(obj, '_sharedmask', False), + '_isfield': getattr(obj, '_isfield', False), + '_baseclass': getattr(obj, '_baseclass', _baseclass), + '_optinfo': _optinfo, + '_basedict': _optinfo} + self.__dict__.update(_dict) + self.__dict__.update(_optinfo) + + def __array_finalize__(self, obj): + """ + Finalizes the masked array. + + """ + # Get main attributes. + self._update_from(obj) + + # We have to decide how to initialize self.mask, based on + # obj.mask. This is very difficult. There might be some + # correspondence between the elements in the array we are being + # created from (= obj) and us. Or there might not. This method can + # be called in all kinds of places for all kinds of reasons -- could + # be empty_like, could be slicing, could be a ufunc, could be a view. + # The numpy subclassing interface simply doesn't give us any way + # to know, which means that at best this method will be based on + # guesswork and heuristics. To make things worse, there isn't even any + # clear consensus about what the desired behavior is. For instance, + # most users think that np.empty_like(marr) -- which goes via this + # method -- should return a masked array with an empty mask (see + # gh-3404 and linked discussions), but others disagree, and they have + # existing code which depends on empty_like returning an array that + # matches the input mask. + # + # Historically our algorithm was: if the template object mask had the + # same *number of elements* as us, then we used *it's mask object + # itself* as our mask, so that writes to us would also write to the + # original array. This is horribly broken in multiple ways. + # + # Now what we do instead is, if the template object mask has the same + # number of elements as us, and we do not have the same base pointer + # as the template object (b/c views like arr[...] should keep the same + # mask), then we make a copy of the template object mask and use + # that. This is also horribly broken but somewhat less so. Maybe. + if isinstance(obj, ndarray): + # XX: This looks like a bug -- shouldn't it check self.dtype + # instead? + if obj.dtype.names is not None: + _mask = getmaskarray(obj) + else: + _mask = getmask(obj) + + # If self and obj point to exactly the same data, then probably + # self is a simple view of obj (e.g., self = obj[...]), so they + # should share the same mask. (This isn't 100% reliable, e.g. self + # could be the first row of obj, or have strange strides, but as a + # heuristic it's not bad.) In all other cases, we make a copy of + # the mask, so that future modifications to 'self' do not end up + # side-effecting 'obj' as well. + if (_mask is not nomask and obj.__array_interface__["data"][0] + != self.__array_interface__["data"][0]): + # We should make a copy. But we could get here via astype, + # in which case the mask might need a new dtype as well + # (e.g., changing to or from a structured dtype), and the + # order could have changed. So, change the mask type if + # needed and use astype instead of copy. + if self.dtype == obj.dtype: + _mask_dtype = _mask.dtype + else: + _mask_dtype = make_mask_descr(self.dtype) + + if self.flags.c_contiguous: + order = "C" + elif self.flags.f_contiguous: + order = "F" + else: + order = "K" + + _mask = _mask.astype(_mask_dtype, order) + else: + # Take a view so shape changes, etc., do not propagate back. + _mask = _mask.view() + else: + _mask = nomask + + self._mask = _mask + # Finalize the mask + if self._mask is not nomask: + try: + self._mask.shape = self.shape + except ValueError: + self._mask = nomask + except (TypeError, AttributeError): + # When _mask.shape is not writable (because it's a void) + pass + + # Finalize the fill_value + if self._fill_value is not None: + self._fill_value = _check_fill_value(self._fill_value, self.dtype) + elif self.dtype.names is not None: + # Finalize the default fill_value for structured arrays + self._fill_value = _check_fill_value(None, self.dtype) + + def __array_wrap__(self, obj, context=None, return_scalar=False): + """ + Special hook for ufuncs. + + Wraps the numpy array and sets the mask according to context. + + """ + if obj is self: # for in-place operations + result = obj + else: + result = obj.view(type(self)) + result._update_from(self) + + if context is not None: + result._mask = result._mask.copy() + func, args, out_i = context + # args sometimes contains outputs (gh-10459), which we don't want + input_args = args[:func.nin] + m = functools.reduce(mask_or, [getmaskarray(arg) for arg in input_args]) + # Get the domain mask + domain = ufunc_domain.get(func) + if domain is not None: + # Take the domain, and make sure it's a ndarray + with np.errstate(divide='ignore', invalid='ignore'): + # The result may be masked for two (unary) domains. + # That can't really be right as some domains drop + # the mask and some don't behaving differently here. + d = domain(*input_args).astype(bool, copy=False) + d = filled(d, True) + + if d.any(): + # Fill the result where the domain is wrong + try: + # Binary domain: take the last value + fill_value = ufunc_fills[func][-1] + except TypeError: + # Unary domain: just use this one + fill_value = ufunc_fills[func] + except KeyError: + # Domain not recognized, use fill_value instead + fill_value = self.fill_value + + np.copyto(result, fill_value, where=d) + + # Update the mask + if m is nomask: + m = d + else: + # Don't modify inplace, we risk back-propagation + m = (m | d) + + # Make sure the mask has the proper size + if result is not self and result.shape == () and m: + return masked + else: + result._mask = m + result._sharedmask = False + + return result + + def view(self, dtype=None, type=None, fill_value=None): + """ + Return a view of the MaskedArray data. + + Parameters + ---------- + dtype : data-type or ndarray sub-class, optional + Data-type descriptor of the returned view, e.g., float32 or int16. + The default, None, results in the view having the same data-type + as `a`. As with ``ndarray.view``, dtype can also be specified as + an ndarray sub-class, which then specifies the type of the + returned object (this is equivalent to setting the ``type`` + parameter). + type : Python type, optional + Type of the returned view, either ndarray or a subclass. The + default None results in type preservation. + fill_value : scalar, optional + The value to use for invalid entries (None by default). + If None, then this argument is inferred from the passed `dtype`, or + in its absence the original array, as discussed in the notes below. + + See Also + -------- + numpy.ndarray.view : Equivalent method on ndarray object. + + Notes + ----- + + ``a.view()`` is used two different ways: + + ``a.view(some_dtype)`` or ``a.view(dtype=some_dtype)`` constructs a view + of the array's memory with a different data-type. This can cause a + reinterpretation of the bytes of memory. + + ``a.view(ndarray_subclass)`` or ``a.view(type=ndarray_subclass)`` just + returns an instance of `ndarray_subclass` that looks at the same array + (same shape, dtype, etc.) This does not cause a reinterpretation of the + memory. + + If `fill_value` is not specified, but `dtype` is specified (and is not + an ndarray sub-class), the `fill_value` of the MaskedArray will be + reset. If neither `fill_value` nor `dtype` are specified (or if + `dtype` is an ndarray sub-class), then the fill value is preserved. + Finally, if `fill_value` is specified, but `dtype` is not, the fill + value is set to the specified value. + + For ``a.view(some_dtype)``, if ``some_dtype`` has a different number of + bytes per entry than the previous dtype (for example, converting a + regular array to a structured array), then the behavior of the view + cannot be predicted just from the superficial appearance of ``a`` (shown + by ``print(a)``). It also depends on exactly how ``a`` is stored in + memory. Therefore if ``a`` is C-ordered versus fortran-ordered, versus + defined as a slice or transpose, etc., the view may give different + results. + """ + + if dtype is None: + if type is None: + output = ndarray.view(self) + else: + output = ndarray.view(self, type) + elif type is None: + try: + if issubclass(dtype, ndarray): + output = ndarray.view(self, dtype) + dtype = None + else: + output = ndarray.view(self, dtype) + except TypeError: + output = ndarray.view(self, dtype) + else: + output = ndarray.view(self, dtype, type) + + # also make the mask be a view (so attr changes to the view's + # mask do no affect original object's mask) + # (especially important to avoid affecting np.masked singleton) + if getmask(output) is not nomask: + output._mask = output._mask.view() + + # Make sure to reset the _fill_value if needed + if getattr(output, '_fill_value', None) is not None: + if fill_value is None: + if dtype is None: + pass # leave _fill_value as is + else: + output._fill_value = None + else: + output.fill_value = fill_value + return output + + def __getitem__(self, indx): + """ + x.__getitem__(y) <==> x[y] + + Return the item described by i, as a masked array. + + """ + # We could directly use ndarray.__getitem__ on self. + # But then we would have to modify __array_finalize__ to prevent the + # mask of being reshaped if it hasn't been set up properly yet + # So it's easier to stick to the current version + dout = self.data[indx] + _mask = self._mask + + def _is_scalar(m): + return not isinstance(m, np.ndarray) + + def _scalar_heuristic(arr, elem): + """ + Return whether `elem` is a scalar result of indexing `arr`, or None + if undecidable without promoting nomask to a full mask + """ + # obviously a scalar + if not isinstance(elem, np.ndarray): + return True + + # object array scalar indexing can return anything + elif arr.dtype.type is np.object_: + if arr.dtype is not elem.dtype: + # elem is an array, but dtypes do not match, so must be + # an element + return True + + # well-behaved subclass that only returns 0d arrays when + # expected - this is not a scalar + elif type(arr).__getitem__ == ndarray.__getitem__: + return False + + return None + + if _mask is not nomask: + # _mask cannot be a subclass, so it tells us whether we should + # expect a scalar. It also cannot be of dtype object. + mout = _mask[indx] + scalar_expected = _is_scalar(mout) + + else: + # attempt to apply the heuristic to avoid constructing a full mask + mout = nomask + scalar_expected = _scalar_heuristic(self.data, dout) + if scalar_expected is None: + # heuristics have failed + # construct a full array, so we can be certain. This is costly. + # we could also fall back on ndarray.__getitem__(self.data, indx) + scalar_expected = _is_scalar(getmaskarray(self)[indx]) + + # Did we extract a single item? + if scalar_expected: + # A record + if isinstance(dout, np.void): + # We should always re-cast to mvoid, otherwise users can + # change masks on rows that already have masked values, but not + # on rows that have no masked values, which is inconsistent. + return mvoid(dout, mask=mout, hardmask=self._hardmask) + + # special case introduced in gh-5962 + elif (self.dtype.type is np.object_ and + isinstance(dout, np.ndarray) and + dout is not masked): + # If masked, turn into a MaskedArray, with everything masked. + if mout: + return MaskedArray(dout, mask=True) + else: + return dout + + # Just a scalar + elif mout: + return masked + else: + return dout + else: + # Force dout to MA + dout = dout.view(type(self)) + # Inherit attributes from self + dout._update_from(self) + # Check the fill_value + if is_string_or_list_of_strings(indx): + if self._fill_value is not None: + dout._fill_value = self._fill_value[indx] + + # Something like gh-15895 has happened if this check fails. + # _fill_value should always be an ndarray. + if not isinstance(dout._fill_value, np.ndarray): + raise RuntimeError('Internal NumPy error.') + # If we're indexing a multidimensional field in a + # structured array (such as dtype("(2,)i2,(2,)i1")), + # dimensionality goes up (M[field].ndim == M.ndim + + # M.dtype[field].ndim). That's fine for + # M[field] but problematic for M[field].fill_value + # which should have shape () to avoid breaking several + # methods. There is no great way out, so set to + # first element. See issue #6723. + if dout._fill_value.ndim > 0: + if not (dout._fill_value == + dout._fill_value.flat[0]).all(): + warnings.warn( + "Upon accessing multidimensional field " + f"{indx!s}, need to keep dimensionality " + "of fill_value at 0. Discarding " + "heterogeneous fill_value and setting " + f"all to {dout._fill_value[0]!s}.", + stacklevel=2) + # Need to use `.flat[0:1].squeeze(...)` instead of just + # `.flat[0]` to ensure the result is a 0d array and not + # a scalar. + dout._fill_value = dout._fill_value.flat[0:1].squeeze(axis=0) + dout._isfield = True + # Update the mask if needed + if mout is not nomask: + # set shape to match that of data; this is needed for matrices + dout._mask = reshape(mout, dout.shape) + dout._sharedmask = True + # Note: Don't try to check for m.any(), that'll take too long + return dout + + # setitem may put NaNs into integer arrays or occasionally overflow a + # float. But this may happen in masked values, so avoid otherwise + # correct warnings (as is typical also in masked calculations). + @np.errstate(over='ignore', invalid='ignore') + def __setitem__(self, indx, value): + """ + x.__setitem__(i, y) <==> x[i]=y + + Set item described by index. If value is masked, masks those + locations. + + """ + if self is masked: + raise MaskError('Cannot alter the masked element.') + _data = self._data + _mask = self._mask + if isinstance(indx, str): + _data[indx] = value + if _mask is nomask: + self._mask = _mask = make_mask_none(self.shape, self.dtype) + _mask[indx] = getmask(value) + return + + _dtype = _data.dtype + + if value is masked: + # The mask wasn't set: create a full version. + if _mask is nomask: + _mask = self._mask = make_mask_none(self.shape, _dtype) + # Now, set the mask to its value. + if _dtype.names is not None: + _mask[indx] = tuple([True] * len(_dtype.names)) + else: + _mask[indx] = True + return + + # Get the _data part of the new value + dval = getattr(value, '_data', value) + # Get the _mask part of the new value + mval = getmask(value) + if _dtype.names is not None and mval is nomask: + mval = tuple([False] * len(_dtype.names)) + if _mask is nomask: + # Set the data, then the mask + _data[indx] = dval + if mval is not nomask: + _mask = self._mask = make_mask_none(self.shape, _dtype) + _mask[indx] = mval + elif not self._hardmask: + # Set the data, then the mask + if (isinstance(indx, masked_array) and + not isinstance(value, masked_array)): + _data[indx.data] = dval + else: + _data[indx] = dval + _mask[indx] = mval + elif hasattr(indx, 'dtype') and (indx.dtype == MaskType): + indx = indx * umath.logical_not(_mask) + _data[indx] = dval + else: + if _dtype.names is not None: + err_msg = "Flexible 'hard' masks are not yet supported." + raise NotImplementedError(err_msg) + mindx = mask_or(_mask[indx], mval, copy=True) + dindx = self._data[indx] + if dindx.size > 1: + np.copyto(dindx, dval, where=~mindx) + elif mindx is nomask: + dindx = dval + _data[indx] = dindx + _mask[indx] = mindx + return + + # Define so that we can overwrite the setter. + @property + def dtype(self): + return super().dtype + + @dtype.setter + def dtype(self, dtype): + super(MaskedArray, type(self)).dtype.__set__(self, dtype) + if self._mask is not nomask: + self._mask = self._mask.view(make_mask_descr(dtype), ndarray) + # Try to reset the shape of the mask (if we don't have a void). + # This raises a ValueError if the dtype change won't work. + try: + self._mask.shape = self.shape + except (AttributeError, TypeError): + pass + + @property + def shape(self): + return super().shape + + @shape.setter + def shape(self, shape): + super(MaskedArray, type(self)).shape.__set__(self, shape) + # Cannot use self._mask, since it may not (yet) exist when a + # masked matrix sets the shape. + if getmask(self) is not nomask: + self._mask.shape = self.shape + + def __setmask__(self, mask, copy=False): + """ + Set the mask. + + """ + idtype = self.dtype + current_mask = self._mask + if mask is masked: + mask = True + + if current_mask is nomask: + # Make sure the mask is set + # Just don't do anything if there's nothing to do. + if mask is nomask: + return + current_mask = self._mask = make_mask_none(self.shape, idtype) + + if idtype.names is None: + # No named fields. + # Hardmask: don't unmask the data + if self._hardmask: + current_mask |= mask + # Softmask: set everything to False + # If it's obviously a compatible scalar, use a quick update + # method. + elif isinstance(mask, (int, float, np.bool, np.number)): + current_mask[...] = mask + # Otherwise fall back to the slower, general purpose way. + else: + current_mask.flat = mask + else: + # Named fields w/ + mdtype = current_mask.dtype + mask = np.asarray(mask) + # Mask is a singleton + if not mask.ndim: + # It's a boolean : make a record + if mask.dtype.kind == 'b': + mask = np.array(tuple([mask.item()] * len(mdtype)), + dtype=mdtype) + # It's a record: make sure the dtype is correct + else: + mask = mask.astype(mdtype) + # Mask is a sequence + else: + # Make sure the new mask is a ndarray with the proper dtype + try: + copy = None if not copy else True + mask = np.array(mask, copy=copy, dtype=mdtype) + # Or assume it's a sequence of bool/int + except TypeError: + mask = np.array([tuple([m] * len(mdtype)) for m in mask], + dtype=mdtype) + # Hardmask: don't unmask the data + if self._hardmask: + for n in idtype.names: + current_mask[n] |= mask[n] + # Softmask: set everything to False + # If it's obviously a compatible scalar, use a quick update + # method. + elif isinstance(mask, (int, float, np.bool, np.number)): + current_mask[...] = mask + # Otherwise fall back to the slower, general purpose way. + else: + current_mask.flat = mask + # Reshape if needed + if current_mask.shape: + current_mask.shape = self.shape + return + + _set_mask = __setmask__ + + @property + def mask(self): + """ Current mask. """ + + # We could try to force a reshape, but that wouldn't work in some + # cases. + # Return a view so that the dtype and shape cannot be changed in place + # This still preserves nomask by identity + return self._mask.view() + + @mask.setter + def mask(self, value): + self.__setmask__(value) + + @property + def recordmask(self): + """ + Get or set the mask of the array if it has no named fields. For + structured arrays, returns a ndarray of booleans where entries are + ``True`` if **all** the fields are masked, ``False`` otherwise: + + >>> x = np.ma.array([(1, 1), (2, 2), (3, 3), (4, 4), (5, 5)], + ... mask=[(0, 0), (1, 0), (1, 1), (0, 1), (0, 0)], + ... dtype=[('a', int), ('b', int)]) + >>> x.recordmask + array([False, False, True, False, False]) + """ + + _mask = self._mask.view(ndarray) + if _mask.dtype.names is None: + return _mask + return np.all(flatten_structured_array(_mask), axis=-1) + + @recordmask.setter + def recordmask(self, mask): + raise NotImplementedError("Coming soon: setting the mask per records!") + + def harden_mask(self): + """ + Force the mask to hard, preventing unmasking by assignment. + + Whether the mask of a masked array is hard or soft is determined by + its `~ma.MaskedArray.hardmask` property. `harden_mask` sets + `~ma.MaskedArray.hardmask` to ``True`` (and returns the modified + self). + + See Also + -------- + ma.MaskedArray.hardmask + ma.MaskedArray.soften_mask + + """ + self._hardmask = True + return self + + def soften_mask(self): + """ + Force the mask to soft (default), allowing unmasking by assignment. + + Whether the mask of a masked array is hard or soft is determined by + its `~ma.MaskedArray.hardmask` property. `soften_mask` sets + `~ma.MaskedArray.hardmask` to ``False`` (and returns the modified + self). + + See Also + -------- + ma.MaskedArray.hardmask + ma.MaskedArray.harden_mask + + """ + self._hardmask = False + return self + + @property + def hardmask(self): + """ + Specifies whether values can be unmasked through assignments. + + By default, assigning definite values to masked array entries will + unmask them. When `hardmask` is ``True``, the mask will not change + through assignments. + + See Also + -------- + ma.MaskedArray.harden_mask + ma.MaskedArray.soften_mask + + Examples + -------- + >>> import numpy as np + >>> x = np.arange(10) + >>> m = np.ma.masked_array(x, x>5) + >>> assert not m.hardmask + + Since `m` has a soft mask, assigning an element value unmasks that + element: + + >>> m[8] = 42 + >>> m + masked_array(data=[0, 1, 2, 3, 4, 5, --, --, 42, --], + mask=[False, False, False, False, False, False, + True, True, False, True], + fill_value=999999) + + After hardening, the mask is not affected by assignments: + + >>> hardened = np.ma.harden_mask(m) + >>> assert m.hardmask and hardened is m + >>> m[:] = 23 + >>> m + masked_array(data=[23, 23, 23, 23, 23, 23, --, --, 23, --], + mask=[False, False, False, False, False, False, + True, True, False, True], + fill_value=999999) + + """ + return self._hardmask + + def unshare_mask(self): + """ + Copy the mask and set the `sharedmask` flag to ``False``. + + Whether the mask is shared between masked arrays can be seen from + the `sharedmask` property. `unshare_mask` ensures the mask is not + shared. A copy of the mask is only made if it was shared. + + See Also + -------- + sharedmask + + """ + if self._sharedmask: + self._mask = self._mask.copy() + self._sharedmask = False + return self + + @property + def sharedmask(self): + """ Share status of the mask (read-only). """ + return self._sharedmask + + def shrink_mask(self): + """ + Reduce a mask to nomask when possible. + + Parameters + ---------- + None + + Returns + ------- + result : MaskedArray + A :class:`~ma.MaskedArray` object. + + Examples + -------- + >>> import numpy as np + >>> x = np.ma.array([[1,2 ], [3, 4]], mask=[0]*4) + >>> x.mask + array([[False, False], + [False, False]]) + >>> x.shrink_mask() + masked_array( + data=[[1, 2], + [3, 4]], + mask=False, + fill_value=999999) + >>> x.mask + False + + """ + self._mask = _shrink_mask(self._mask) + return self + + @property + def baseclass(self): + """ Class of the underlying data (read-only). """ + return self._baseclass + + def _get_data(self): + """ + Returns the underlying data, as a view of the masked array. + + If the underlying data is a subclass of :class:`numpy.ndarray`, it is + returned as such. + + >>> x = np.ma.array(np.matrix([[1, 2], [3, 4]]), mask=[[0, 1], [1, 0]]) + >>> x.data + matrix([[1, 2], + [3, 4]]) + + The type of the data can be accessed through the :attr:`baseclass` + attribute. + """ + return ndarray.view(self, self._baseclass) + + _data = property(fget=_get_data) + data = property(fget=_get_data) + + @property + def flat(self): + """ Return a flat iterator, or set a flattened version of self to value. """ + return MaskedIterator(self) + + @flat.setter + def flat(self, value): + y = self.ravel() + y[:] = value + + @property + def fill_value(self): + """ + The filling value of the masked array is a scalar. When setting, None + will set to a default based on the data type. + + Examples + -------- + >>> import numpy as np + >>> for dt in [np.int32, np.int64, np.float64, np.complex128]: + ... np.ma.array([0, 1], dtype=dt).get_fill_value() + ... + np.int64(999999) + np.int64(999999) + np.float64(1e+20) + np.complex128(1e+20+0j) + + >>> x = np.ma.array([0, 1.], fill_value=-np.inf) + >>> x.fill_value + np.float64(-inf) + >>> x.fill_value = np.pi + >>> x.fill_value + np.float64(3.1415926535897931) + + Reset to default: + + >>> x.fill_value = None + >>> x.fill_value + np.float64(1e+20) + + """ + if self._fill_value is None: + self._fill_value = _check_fill_value(None, self.dtype) + + # Temporary workaround to account for the fact that str and bytes + # scalars cannot be indexed with (), whereas all other numpy + # scalars can. See issues #7259 and #7267. + # The if-block can be removed after #7267 has been fixed. + if isinstance(self._fill_value, ndarray): + return self._fill_value[()] + return self._fill_value + + @fill_value.setter + def fill_value(self, value=None): + target = _check_fill_value(value, self.dtype) + if not target.ndim == 0: + # 2019-11-12, 1.18.0 + warnings.warn( + "Non-scalar arrays for the fill value are deprecated. Use " + "arrays with scalar values instead. The filled function " + "still supports any array as `fill_value`.", + DeprecationWarning, stacklevel=2) + + _fill_value = self._fill_value + if _fill_value is None: + # Create the attribute if it was undefined + self._fill_value = target + else: + # Don't overwrite the attribute, just fill it (for propagation) + _fill_value[()] = target + + # kept for compatibility + get_fill_value = fill_value.fget + set_fill_value = fill_value.fset + + def filled(self, fill_value=None): + """ + Return a copy of self, with masked values filled with a given value. + **However**, if there are no masked values to fill, self will be + returned instead as an ndarray. + + Parameters + ---------- + fill_value : array_like, optional + The value to use for invalid entries. Can be scalar or non-scalar. + If non-scalar, the resulting ndarray must be broadcastable over + input array. Default is None, in which case, the `fill_value` + attribute of the array is used instead. + + Returns + ------- + filled_array : ndarray + A copy of ``self`` with invalid entries replaced by *fill_value* + (be it the function argument or the attribute of ``self``), or + ``self`` itself as an ndarray if there are no invalid entries to + be replaced. + + Notes + ----- + The result is **not** a MaskedArray! + + Examples + -------- + >>> import numpy as np + >>> x = np.ma.array([1,2,3,4,5], mask=[0,0,1,0,1], fill_value=-999) + >>> x.filled() + array([ 1, 2, -999, 4, -999]) + >>> x.filled(fill_value=1000) + array([ 1, 2, 1000, 4, 1000]) + >>> type(x.filled()) + + + Subclassing is preserved. This means that if, e.g., the data part of + the masked array is a recarray, `filled` returns a recarray: + + >>> x = np.array([(-1, 2), (-3, 4)], dtype='i8,i8').view(np.recarray) + >>> m = np.ma.array(x, mask=[(True, False), (False, True)]) + >>> m.filled() + rec.array([(999999, 2), ( -3, 999999)], + dtype=[('f0', '>> import numpy as np + >>> x = np.ma.array(np.arange(5), mask=[0]*2 + [1]*3) + >>> x.compressed() + array([0, 1]) + >>> type(x.compressed()) + + + N-D arrays are compressed to 1-D. + + >>> arr = [[1, 2], [3, 4]] + >>> mask = [[1, 0], [0, 1]] + >>> x = np.ma.array(arr, mask=mask) + >>> x.compressed() + array([2, 3]) + + """ + data = ndarray.ravel(self._data) + if self._mask is not nomask: + data = data.compress(np.logical_not(ndarray.ravel(self._mask))) + return data + + def compress(self, condition, axis=None, out=None): + """ + Return `a` where condition is ``True``. + + If condition is a `~ma.MaskedArray`, missing values are considered + as ``False``. + + Parameters + ---------- + condition : var + Boolean 1-d array selecting which entries to return. If len(condition) + is less than the size of a along the axis, then output is truncated + to length of condition array. + axis : {None, int}, optional + Axis along which the operation must be performed. + out : {None, ndarray}, optional + Alternative output array in which to place the result. It must have + the same shape as the expected output but the type will be cast if + necessary. + + Returns + ------- + result : MaskedArray + A :class:`~ma.MaskedArray` object. + + Notes + ----- + Please note the difference with :meth:`compressed` ! + The output of :meth:`compress` has a mask, the output of + :meth:`compressed` does not. + + Examples + -------- + >>> import numpy as np + >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) + >>> x + masked_array( + data=[[1, --, 3], + [--, 5, --], + [7, --, 9]], + mask=[[False, True, False], + [ True, False, True], + [False, True, False]], + fill_value=999999) + >>> x.compress([1, 0, 1]) + masked_array(data=[1, 3], + mask=[False, False], + fill_value=999999) + + >>> x.compress([1, 0, 1], axis=1) + masked_array( + data=[[1, 3], + [--, --], + [7, 9]], + mask=[[False, False], + [ True, True], + [False, False]], + fill_value=999999) + + """ + # Get the basic components + (_data, _mask) = (self._data, self._mask) + + # Force the condition to a regular ndarray and forget the missing + # values. + condition = np.asarray(condition) + + _new = _data.compress(condition, axis=axis, out=out).view(type(self)) + _new._update_from(self) + if _mask is not nomask: + _new._mask = _mask.compress(condition, axis=axis) + return _new + + def _insert_masked_print(self): + """ + Replace masked values with masked_print_option, casting all innermost + dtypes to object. + """ + if masked_print_option.enabled(): + mask = self._mask + if mask is nomask: + res = self._data + else: + # convert to object array to make filled work + data = self._data + # For big arrays, to avoid a costly conversion to the + # object dtype, extract the corners before the conversion. + print_width = (self._print_width if self.ndim > 1 + else self._print_width_1d) + for axis in range(self.ndim): + if data.shape[axis] > print_width: + ind = print_width // 2 + arr = np.split(data, (ind, -ind), axis=axis) + data = np.concatenate((arr[0], arr[2]), axis=axis) + arr = np.split(mask, (ind, -ind), axis=axis) + mask = np.concatenate((arr[0], arr[2]), axis=axis) + + rdtype = _replace_dtype_fields(self.dtype, "O") + res = data.astype(rdtype) + _recursive_printoption(res, mask, masked_print_option) + else: + res = self.filled(self.fill_value) + return res + + def __str__(self): + return str(self._insert_masked_print()) + + def __repr__(self): + """ + Literal string representation. + + """ + if self._baseclass is np.ndarray: + name = 'array' + else: + name = self._baseclass.__name__ + + # 2016-11-19: Demoted to legacy format + if np._core.arrayprint._get_legacy_print_mode() <= 113: + is_long = self.ndim > 1 + parameters = { + 'name': name, + 'nlen': " " * len(name), + 'data': str(self), + 'mask': str(self._mask), + 'fill': str(self.fill_value), + 'dtype': str(self.dtype) + } + is_structured = bool(self.dtype.names) + key = '{}_{}'.format( + 'long' if is_long else 'short', + 'flx' if is_structured else 'std' + ) + return _legacy_print_templates[key] % parameters + + prefix = f"masked_{name}(" + + dtype_needed = ( + not np._core.arrayprint.dtype_is_implied(self.dtype) or + np.all(self.mask) or + self.size == 0 + ) + + # determine which keyword args need to be shown + keys = ['data', 'mask', 'fill_value'] + if dtype_needed: + keys.append('dtype') + + # array has only one row (non-column) + is_one_row = builtins.all(dim == 1 for dim in self.shape[:-1]) + + # choose what to indent each keyword with + min_indent = 2 + if is_one_row: + # first key on the same line as the type, remaining keys + # aligned by equals + indents = {} + indents[keys[0]] = prefix + for k in keys[1:]: + n = builtins.max(min_indent, len(prefix + keys[0]) - len(k)) + indents[k] = ' ' * n + prefix = '' # absorbed into the first indent + else: + # each key on its own line, indented by two spaces + indents = dict.fromkeys(keys, ' ' * min_indent) + prefix = prefix + '\n' # first key on the next line + + # format the field values + reprs = {} + reprs['data'] = np.array2string( + self._insert_masked_print(), + separator=", ", + prefix=indents['data'] + 'data=', + suffix=',') + reprs['mask'] = np.array2string( + self._mask, + separator=", ", + prefix=indents['mask'] + 'mask=', + suffix=',') + + if self._fill_value is None: + self.fill_value # initialize fill_value # noqa: B018 + + if (self._fill_value.dtype.kind in ("S", "U") + and self.dtype.kind == self._fill_value.dtype.kind): + # Allow strings: "N/A" has length 3 so would mismatch. + fill_repr = repr(self.fill_value.item()) + elif self._fill_value.dtype == self.dtype and not self.dtype == object: + # Guess that it is OK to use the string as item repr. To really + # fix this, it needs new logic (shared with structured scalars) + fill_repr = str(self.fill_value) + else: + fill_repr = repr(self.fill_value) + + reprs['fill_value'] = fill_repr + if dtype_needed: + reprs['dtype'] = np._core.arrayprint.dtype_short_repr(self.dtype) + + # join keys with values and indentations + result = ',\n'.join( + f'{indents[k]}{k}={reprs[k]}' + for k in keys + ) + return prefix + result + ')' + + def _delegate_binop(self, other): + # This emulates the logic in + # private/binop_override.h:forward_binop_should_defer + if isinstance(other, type(self)): + return False + array_ufunc = getattr(other, "__array_ufunc__", False) + if array_ufunc is False: + other_priority = getattr(other, "__array_priority__", -1000000) + return self.__array_priority__ < other_priority + else: + # If array_ufunc is not None, it will be called inside the ufunc; + # None explicitly tells us to not call the ufunc, i.e., defer. + return array_ufunc is None + + def _comparison(self, other, compare): + """Compare self with other using operator.eq or operator.ne. + + When either of the elements is masked, the result is masked as well, + but the underlying boolean data are still set, with self and other + considered equal if both are masked, and unequal otherwise. + + For structured arrays, all fields are combined, with masked values + ignored. The result is masked if all fields were masked, with self + and other considered equal only if both were fully masked. + """ + omask = getmask(other) + smask = self.mask + mask = mask_or(smask, omask, copy=True) + + odata = getdata(other) + if mask.dtype.names is not None: + # only == and != are reasonably defined for structured dtypes, + # so give up early for all other comparisons: + if compare not in (operator.eq, operator.ne): + return NotImplemented + # For possibly masked structured arrays we need to be careful, + # since the standard structured array comparison will use all + # fields, masked or not. To avoid masked fields influencing the + # outcome, we set all masked fields in self to other, so they'll + # count as equal. To prepare, we ensure we have the right shape. + broadcast_shape = np.broadcast(self, odata).shape + sbroadcast = np.broadcast_to(self, broadcast_shape, subok=True) + sbroadcast._mask = mask + sdata = sbroadcast.filled(odata) + # Now take care of the mask; the merged mask should have an item + # masked if all fields were masked (in one and/or other). + mask = (mask == np.ones((), mask.dtype)) + # Ensure we can compare masks below if other was not masked. + if omask is np.False_: + omask = np.zeros((), smask.dtype) + + else: + # For regular arrays, just use the data as they come. + sdata = self.data + + check = compare(sdata, odata) + + if isinstance(check, (np.bool, bool)): + return masked if mask else check + + if mask is not nomask: + if compare in (operator.eq, operator.ne): + # Adjust elements that were masked, which should be treated + # as equal if masked in both, unequal if masked in one. + # Note that this works automatically for structured arrays too. + # Ignore this for operations other than `==` and `!=` + check = np.where(mask, compare(smask, omask), check) + + if mask.shape != check.shape: + # Guarantee consistency of the shape, making a copy since the + # the mask may need to get written to later. + mask = np.broadcast_to(mask, check.shape).copy() + + check = check.view(type(self)) + check._update_from(self) + check._mask = mask + + # Cast fill value to np.bool if needed. If it cannot be cast, the + # default boolean fill value is used. + if check._fill_value is not None: + try: + fill = _check_fill_value(check._fill_value, np.bool) + except (TypeError, ValueError): + fill = _check_fill_value(None, np.bool) + check._fill_value = fill + + return check + + def __eq__(self, other): + """Check whether other equals self elementwise. + + When either of the elements is masked, the result is masked as well, + but the underlying boolean data are still set, with self and other + considered equal if both are masked, and unequal otherwise. + + For structured arrays, all fields are combined, with masked values + ignored. The result is masked if all fields were masked, with self + and other considered equal only if both were fully masked. + """ + return self._comparison(other, operator.eq) + + def __ne__(self, other): + """Check whether other does not equal self elementwise. + + When either of the elements is masked, the result is masked as well, + but the underlying boolean data are still set, with self and other + considered equal if both are masked, and unequal otherwise. + + For structured arrays, all fields are combined, with masked values + ignored. The result is masked if all fields were masked, with self + and other considered equal only if both were fully masked. + """ + return self._comparison(other, operator.ne) + + # All other comparisons: + def __le__(self, other): + return self._comparison(other, operator.le) + + def __lt__(self, other): + return self._comparison(other, operator.lt) + + def __ge__(self, other): + return self._comparison(other, operator.ge) + + def __gt__(self, other): + return self._comparison(other, operator.gt) + + def __add__(self, other): + """ + Add self to other, and return a new masked array. + + """ + if self._delegate_binop(other): + return NotImplemented + return add(self, other) + + def __radd__(self, other): + """ + Add other to self, and return a new masked array. + + """ + # In analogy with __rsub__ and __rdiv__, use original order: + # we get here from `other + self`. + return add(other, self) + + def __sub__(self, other): + """ + Subtract other from self, and return a new masked array. + + """ + if self._delegate_binop(other): + return NotImplemented + return subtract(self, other) + + def __rsub__(self, other): + """ + Subtract self from other, and return a new masked array. + + """ + return subtract(other, self) + + def __mul__(self, other): + "Multiply self by other, and return a new masked array." + if self._delegate_binop(other): + return NotImplemented + return multiply(self, other) + + def __rmul__(self, other): + """ + Multiply other by self, and return a new masked array. + + """ + # In analogy with __rsub__ and __rdiv__, use original order: + # we get here from `other * self`. + return multiply(other, self) + + def __truediv__(self, other): + """ + Divide other into self, and return a new masked array. + + """ + if self._delegate_binop(other): + return NotImplemented + return true_divide(self, other) + + def __rtruediv__(self, other): + """ + Divide self into other, and return a new masked array. + + """ + return true_divide(other, self) + + def __floordiv__(self, other): + """ + Divide other into self, and return a new masked array. + + """ + if self._delegate_binop(other): + return NotImplemented + return floor_divide(self, other) + + def __rfloordiv__(self, other): + """ + Divide self into other, and return a new masked array. + + """ + return floor_divide(other, self) + + def __pow__(self, other): + """ + Raise self to the power other, masking the potential NaNs/Infs + + """ + if self._delegate_binop(other): + return NotImplemented + return power(self, other) + + def __rpow__(self, other): + """ + Raise other to the power self, masking the potential NaNs/Infs + + """ + return power(other, self) + + def __iadd__(self, other): + """ + Add other to self in-place. + + """ + m = getmask(other) + if self._mask is nomask: + if m is not nomask and m.any(): + self._mask = make_mask_none(self.shape, self.dtype) + self._mask += m + elif m is not nomask: + self._mask += m + other_data = getdata(other) + other_data = np.where(self._mask, other_data.dtype.type(0), other_data) + self._data.__iadd__(other_data) + return self + + def __isub__(self, other): + """ + Subtract other from self in-place. + + """ + m = getmask(other) + if self._mask is nomask: + if m is not nomask and m.any(): + self._mask = make_mask_none(self.shape, self.dtype) + self._mask += m + elif m is not nomask: + self._mask += m + other_data = getdata(other) + other_data = np.where(self._mask, other_data.dtype.type(0), other_data) + self._data.__isub__(other_data) + return self + + def __imul__(self, other): + """ + Multiply self by other in-place. + + """ + m = getmask(other) + if self._mask is nomask: + if m is not nomask and m.any(): + self._mask = make_mask_none(self.shape, self.dtype) + self._mask += m + elif m is not nomask: + self._mask += m + other_data = getdata(other) + other_data = np.where(self._mask, other_data.dtype.type(1), other_data) + self._data.__imul__(other_data) + return self + + def __ifloordiv__(self, other): + """ + Floor divide self by other in-place. + + """ + other_data = getdata(other) + dom_mask = _DomainSafeDivide().__call__(self._data, other_data) + other_mask = getmask(other) + new_mask = mask_or(other_mask, dom_mask) + # The following 3 lines control the domain filling + if dom_mask.any(): + (_, fval) = ufunc_fills[np.floor_divide] + other_data = np.where( + dom_mask, other_data.dtype.type(fval), other_data) + self._mask |= new_mask + other_data = np.where(self._mask, other_data.dtype.type(1), other_data) + self._data.__ifloordiv__(other_data) + return self + + def __itruediv__(self, other): + """ + True divide self by other in-place. + + """ + other_data = getdata(other) + dom_mask = _DomainSafeDivide().__call__(self._data, other_data) + other_mask = getmask(other) + new_mask = mask_or(other_mask, dom_mask) + # The following 3 lines control the domain filling + if dom_mask.any(): + (_, fval) = ufunc_fills[np.true_divide] + other_data = np.where( + dom_mask, other_data.dtype.type(fval), other_data) + self._mask |= new_mask + other_data = np.where(self._mask, other_data.dtype.type(1), other_data) + self._data.__itruediv__(other_data) + return self + + def __ipow__(self, other): + """ + Raise self to the power other, in place. + + """ + other_data = getdata(other) + other_data = np.where(self._mask, other_data.dtype.type(1), other_data) + other_mask = getmask(other) + with np.errstate(divide='ignore', invalid='ignore'): + self._data.__ipow__(other_data) + invalid = np.logical_not(np.isfinite(self._data)) + if invalid.any(): + if self._mask is not nomask: + self._mask |= invalid + else: + self._mask = invalid + np.copyto(self._data, self.fill_value, where=invalid) + new_mask = mask_or(other_mask, invalid) + self._mask = mask_or(self._mask, new_mask) + return self + + def __float__(self): + """ + Convert to float. + + """ + if self.size > 1: + raise TypeError("Only length-1 arrays can be converted " + "to Python scalars") + elif self._mask: + warnings.warn("Warning: converting a masked element to nan.", stacklevel=2) + return np.nan + return float(self.item()) + + def __int__(self): + """ + Convert to int. + + """ + if self.size > 1: + raise TypeError("Only length-1 arrays can be converted " + "to Python scalars") + elif self._mask: + raise MaskError('Cannot convert masked element to a Python int.') + return int(self.item()) + + @property + def imag(self): + """ + The imaginary part of the masked array. + + This property is a view on the imaginary part of this `MaskedArray`. + + See Also + -------- + real + + Examples + -------- + >>> import numpy as np + >>> x = np.ma.array([1+1.j, -2j, 3.45+1.6j], mask=[False, True, False]) + >>> x.imag + masked_array(data=[1.0, --, 1.6], + mask=[False, True, False], + fill_value=1e+20) + + """ + result = self._data.imag.view(type(self)) + result.__setmask__(self._mask) + return result + + # kept for compatibility + get_imag = imag.fget + + @property + def real(self): + """ + The real part of the masked array. + + This property is a view on the real part of this `MaskedArray`. + + See Also + -------- + imag + + Examples + -------- + >>> import numpy as np + >>> x = np.ma.array([1+1.j, -2j, 3.45+1.6j], mask=[False, True, False]) + >>> x.real + masked_array(data=[1.0, --, 3.45], + mask=[False, True, False], + fill_value=1e+20) + + """ + result = self._data.real.view(type(self)) + result.__setmask__(self._mask) + return result + + # kept for compatibility + get_real = real.fget + + def count(self, axis=None, keepdims=np._NoValue): + """ + Count the non-masked elements of the array along the given axis. + + Parameters + ---------- + axis : None or int or tuple of ints, optional + Axis or axes along which the count is performed. + The default, None, performs the count over all + the dimensions of the input array. `axis` may be negative, in + which case it counts from the last to the first axis. + If this is a tuple of ints, the count is performed on multiple + axes, instead of a single axis or all the axes as before. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the array. + + Returns + ------- + result : ndarray or scalar + An array with the same shape as the input array, with the specified + axis removed. If the array is a 0-d array, or if `axis` is None, a + scalar is returned. + + See Also + -------- + ma.count_masked : Count masked elements in array or along a given axis. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = ma.arange(6).reshape((2, 3)) + >>> a[1, :] = ma.masked + >>> a + masked_array( + data=[[0, 1, 2], + [--, --, --]], + mask=[[False, False, False], + [ True, True, True]], + fill_value=999999) + >>> a.count() + 3 + + When the `axis` keyword is specified an array of appropriate size is + returned. + + >>> a.count(axis=0) + array([1, 1, 1]) + >>> a.count(axis=1) + array([3, 0]) + + """ + kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} + + m = self._mask + # special case for matrices (we assume no other subclasses modify + # their dimensions) + if isinstance(self.data, np.matrix): + if m is nomask: + m = np.zeros(self.shape, dtype=np.bool) + m = m.view(type(self.data)) + + if m is nomask: + # compare to _count_reduce_items in _methods.py + + if self.shape == (): + if axis not in (None, 0): + raise np.exceptions.AxisError(axis=axis, ndim=self.ndim) + return 1 + elif axis is None: + if kwargs.get('keepdims'): + return np.array(self.size, dtype=np.intp, ndmin=self.ndim) + return self.size + + axes = normalize_axis_tuple(axis, self.ndim) + items = 1 + for ax in axes: + items *= self.shape[ax] + + if kwargs.get('keepdims'): + out_dims = list(self.shape) + for a in axes: + out_dims[a] = 1 + else: + out_dims = [d for n, d in enumerate(self.shape) + if n not in axes] + # make sure to return a 0-d array if axis is supplied + return np.full(out_dims, items, dtype=np.intp) + + # take care of the masked singleton + if self is masked: + return 0 + + return (~m).sum(axis=axis, dtype=np.intp, **kwargs) + + def ravel(self, order='C'): + """ + Returns a 1D version of self, as a view. + + Parameters + ---------- + order : {'C', 'F', 'A', 'K'}, optional + The elements of `a` are read using this index order. 'C' means to + index the elements in C-like order, with the last axis index + changing fastest, back to the first axis index changing slowest. + 'F' means to index the elements in Fortran-like index order, with + the first index changing fastest, and the last index changing + slowest. Note that the 'C' and 'F' options take no account of the + memory layout of the underlying array, and only refer to the order + of axis indexing. 'A' means to read the elements in Fortran-like + index order if `m` is Fortran *contiguous* in memory, C-like order + otherwise. 'K' means to read the elements in the order they occur + in memory, except for reversing the data when strides are negative. + By default, 'C' index order is used. + (Masked arrays currently use 'A' on the data when 'K' is passed.) + + Returns + ------- + MaskedArray + Output view is of shape ``(self.size,)`` (or + ``(np.ma.product(self.shape),)``). + + Examples + -------- + >>> import numpy as np + >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) + >>> x + masked_array( + data=[[1, --, 3], + [--, 5, --], + [7, --, 9]], + mask=[[False, True, False], + [ True, False, True], + [False, True, False]], + fill_value=999999) + >>> x.ravel() + masked_array(data=[1, --, 3, --, 5, --, 7, --, 9], + mask=[False, True, False, True, False, True, False, True, + False], + fill_value=999999) + + """ + # The order of _data and _mask could be different (it shouldn't be + # normally). Passing order `K` or `A` would be incorrect. + # So we ignore the mask memory order. + # TODO: We don't actually support K, so use A instead. We could + # try to guess this correct by sorting strides or deprecate. + if order in "kKaA": + order = "F" if self._data.flags.fnc else "C" + r = ndarray.ravel(self._data, order=order).view(type(self)) + r._update_from(self) + if self._mask is not nomask: + r._mask = ndarray.ravel(self._mask, order=order).reshape(r.shape) + else: + r._mask = nomask + return r + + def reshape(self, *s, **kwargs): + """ + Give a new shape to the array without changing its data. + + Returns a masked array containing the same data, but with a new shape. + The result is a view on the original array; if this is not possible, a + ValueError is raised. + + Parameters + ---------- + shape : int or tuple of ints + The new shape should be compatible with the original shape. If an + integer is supplied, then the result will be a 1-D array of that + length. + order : {'C', 'F'}, optional + Determines whether the array data should be viewed as in C + (row-major) or FORTRAN (column-major) order. + + Returns + ------- + reshaped_array : array + A new view on the array. + + See Also + -------- + reshape : Equivalent function in the masked array module. + numpy.ndarray.reshape : Equivalent method on ndarray object. + numpy.reshape : Equivalent function in the NumPy module. + + Notes + ----- + The reshaping operation cannot guarantee that a copy will not be made, + to modify the shape in place, use ``a.shape = s`` + + Examples + -------- + >>> import numpy as np + >>> x = np.ma.array([[1,2],[3,4]], mask=[1,0,0,1]) + >>> x + masked_array( + data=[[--, 2], + [3, --]], + mask=[[ True, False], + [False, True]], + fill_value=999999) + >>> x = x.reshape((4,1)) + >>> x + masked_array( + data=[[--], + [2], + [3], + [--]], + mask=[[ True], + [False], + [False], + [ True]], + fill_value=999999) + + """ + kwargs.update(order=kwargs.get('order', 'C')) + result = self._data.reshape(*s, **kwargs).view(type(self)) + result._update_from(self) + mask = self._mask + if mask is not nomask: + result._mask = mask.reshape(*s, **kwargs) + return result + + def resize(self, newshape, refcheck=True, order=False): + """ + .. warning:: + + This method does nothing, except raise a ValueError exception. A + masked array does not own its data and therefore cannot safely be + resized in place. Use the `numpy.ma.resize` function instead. + + This method is difficult to implement safely and may be deprecated in + future releases of NumPy. + + """ + # Note : the 'order' keyword looks broken, let's just drop it + errmsg = "A masked array does not own its data "\ + "and therefore cannot be resized.\n" \ + "Use the numpy.ma.resize function instead." + raise ValueError(errmsg) + + def put(self, indices, values, mode='raise'): + """ + Set storage-indexed locations to corresponding values. + + Sets self._data.flat[n] = values[n] for each n in indices. + If `values` is shorter than `indices` then it will repeat. + If `values` has some masked values, the initial mask is updated + in consequence, else the corresponding values are unmasked. + + Parameters + ---------- + indices : 1-D array_like + Target indices, interpreted as integers. + values : array_like + Values to place in self._data copy at target indices. + mode : {'raise', 'wrap', 'clip'}, optional + Specifies how out-of-bounds indices will behave. + 'raise' : raise an error. + 'wrap' : wrap around. + 'clip' : clip to the range. + + Notes + ----- + `values` can be a scalar or length 1 array. + + Examples + -------- + >>> import numpy as np + >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) + >>> x + masked_array( + data=[[1, --, 3], + [--, 5, --], + [7, --, 9]], + mask=[[False, True, False], + [ True, False, True], + [False, True, False]], + fill_value=999999) + >>> x.put([0,4,8],[10,20,30]) + >>> x + masked_array( + data=[[10, --, 3], + [--, 20, --], + [7, --, 30]], + mask=[[False, True, False], + [ True, False, True], + [False, True, False]], + fill_value=999999) + + >>> x.put(4,999) + >>> x + masked_array( + data=[[10, --, 3], + [--, 999, --], + [7, --, 30]], + mask=[[False, True, False], + [ True, False, True], + [False, True, False]], + fill_value=999999) + + """ + # Hard mask: Get rid of the values/indices that fall on masked data + if self._hardmask and self._mask is not nomask: + mask = self._mask[indices] + indices = narray(indices, copy=None) + values = narray(values, copy=None, subok=True) + values.resize(indices.shape) + indices = indices[~mask] + values = values[~mask] + + self._data.put(indices, values, mode=mode) + + # short circuit if neither self nor values are masked + if self._mask is nomask and getmask(values) is nomask: + return + + m = getmaskarray(self) + + if getmask(values) is nomask: + m.put(indices, False, mode=mode) + else: + m.put(indices, values._mask, mode=mode) + m = make_mask(m, copy=False, shrink=True) + self._mask = m + return + + def ids(self): + """ + Return the addresses of the data and mask areas. + + Parameters + ---------- + None + + Examples + -------- + >>> import numpy as np + >>> x = np.ma.array([1, 2, 3], mask=[0, 1, 1]) + >>> x.ids() + (166670640, 166659832) # may vary + + If the array has no mask, the address of `nomask` is returned. This address + is typically not close to the data in memory: + + >>> x = np.ma.array([1, 2, 3]) + >>> x.ids() + (166691080, 3083169284) # may vary + + """ + if self._mask is nomask: + return (self.ctypes.data, id(nomask)) + return (self.ctypes.data, self._mask.ctypes.data) + + def iscontiguous(self): + """ + Return a boolean indicating whether the data is contiguous. + + Parameters + ---------- + None + + Examples + -------- + >>> import numpy as np + >>> x = np.ma.array([1, 2, 3]) + >>> x.iscontiguous() + True + + `iscontiguous` returns one of the flags of the masked array: + + >>> x.flags + C_CONTIGUOUS : True + F_CONTIGUOUS : True + OWNDATA : False + WRITEABLE : True + ALIGNED : True + WRITEBACKIFCOPY : False + + """ + return self.flags['CONTIGUOUS'] + + def all(self, axis=None, out=None, keepdims=np._NoValue): + """ + Returns True if all elements evaluate to True. + + The output array is masked where all the values along the given axis + are masked: if the output would have been a scalar and that all the + values are masked, then the output is `masked`. + + Refer to `numpy.all` for full documentation. + + See Also + -------- + numpy.ndarray.all : corresponding function for ndarrays + numpy.all : equivalent function + + Examples + -------- + >>> import numpy as np + >>> np.ma.array([1,2,3]).all() + True + >>> a = np.ma.array([1,2,3], mask=True) + >>> (a.all() is np.ma.masked) + True + + """ + kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} + + mask = _check_mask_axis(self._mask, axis, **kwargs) + if out is None: + d = self.filled(True).all(axis=axis, **kwargs).view(type(self)) + if d.ndim: + d.__setmask__(mask) + elif mask: + return masked + return d + self.filled(True).all(axis=axis, out=out, **kwargs) + if isinstance(out, MaskedArray): + if out.ndim or mask: + out.__setmask__(mask) + return out + + def any(self, axis=None, out=None, keepdims=np._NoValue): + """ + Returns True if any of the elements of `a` evaluate to True. + + Masked values are considered as False during computation. + + Refer to `numpy.any` for full documentation. + + See Also + -------- + numpy.ndarray.any : corresponding function for ndarrays + numpy.any : equivalent function + + """ + kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} + + mask = _check_mask_axis(self._mask, axis, **kwargs) + if out is None: + d = self.filled(False).any(axis=axis, **kwargs).view(type(self)) + if d.ndim: + d.__setmask__(mask) + elif mask: + d = masked + return d + self.filled(False).any(axis=axis, out=out, **kwargs) + if isinstance(out, MaskedArray): + if out.ndim or mask: + out.__setmask__(mask) + return out + + def nonzero(self): + """ + Return the indices of unmasked elements that are not zero. + + Returns a tuple of arrays, one for each dimension, containing the + indices of the non-zero elements in that dimension. The corresponding + non-zero values can be obtained with:: + + a[a.nonzero()] + + To group the indices by element, rather than dimension, use + instead:: + + np.transpose(a.nonzero()) + + The result of this is always a 2d array, with a row for each non-zero + element. + + Parameters + ---------- + None + + Returns + ------- + tuple_of_arrays : tuple + Indices of elements that are non-zero. + + See Also + -------- + numpy.nonzero : + Function operating on ndarrays. + flatnonzero : + Return indices that are non-zero in the flattened version of the input + array. + numpy.ndarray.nonzero : + Equivalent ndarray method. + count_nonzero : + Counts the number of non-zero elements in the input array. + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> x = ma.array(np.eye(3)) + >>> x + masked_array( + data=[[1., 0., 0.], + [0., 1., 0.], + [0., 0., 1.]], + mask=False, + fill_value=1e+20) + >>> x.nonzero() + (array([0, 1, 2]), array([0, 1, 2])) + + Masked elements are ignored. + + >>> x[1, 1] = ma.masked + >>> x + masked_array( + data=[[1.0, 0.0, 0.0], + [0.0, --, 0.0], + [0.0, 0.0, 1.0]], + mask=[[False, False, False], + [False, True, False], + [False, False, False]], + fill_value=1e+20) + >>> x.nonzero() + (array([0, 2]), array([0, 2])) + + Indices can also be grouped by element. + + >>> np.transpose(x.nonzero()) + array([[0, 0], + [2, 2]]) + + A common use for ``nonzero`` is to find the indices of an array, where + a condition is True. Given an array `a`, the condition `a` > 3 is a + boolean array and since False is interpreted as 0, ma.nonzero(a > 3) + yields the indices of the `a` where the condition is true. + + >>> a = ma.array([[1,2,3],[4,5,6],[7,8,9]]) + >>> a > 3 + masked_array( + data=[[False, False, False], + [ True, True, True], + [ True, True, True]], + mask=False, + fill_value=True) + >>> ma.nonzero(a > 3) + (array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2])) + + The ``nonzero`` method of the condition array can also be called. + + >>> (a > 3).nonzero() + (array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2])) + + """ + return np.asarray(self.filled(0)).nonzero() + + def trace(self, offset=0, axis1=0, axis2=1, dtype=None, out=None): + """ + (this docstring should be overwritten) + """ + # !!!: implement out + test! + m = self._mask + if m is nomask: + result = super().trace(offset=offset, axis1=axis1, axis2=axis2, + out=out) + return result.astype(dtype) + else: + D = self.diagonal(offset=offset, axis1=axis1, axis2=axis2) + return D.astype(dtype).filled(0).sum(axis=-1, out=out) + trace.__doc__ = ndarray.trace.__doc__ + + def dot(self, b, out=None, strict=False): + """ + a.dot(b, out=None) + + Masked dot product of two arrays. Note that `out` and `strict` are + located in different positions than in `ma.dot`. In order to + maintain compatibility with the functional version, it is + recommended that the optional arguments be treated as keyword only. + At some point that may be mandatory. + + Parameters + ---------- + b : masked_array_like + Inputs array. + out : masked_array, optional + Output argument. This must have the exact kind that would be + returned if it was not used. In particular, it must have the + right type, must be C-contiguous, and its dtype must be the + dtype that would be returned for `ma.dot(a,b)`. This is a + performance feature. Therefore, if these conditions are not + met, an exception is raised, instead of attempting to be + flexible. + strict : bool, optional + Whether masked data are propagated (True) or set to 0 (False) + for the computation. Default is False. Propagating the mask + means that if a masked value appears in a row or column, the + whole row or column is considered masked. + + See Also + -------- + numpy.ma.dot : equivalent function + + """ + return dot(self, b, out=out, strict=strict) + + def sum(self, axis=None, dtype=None, out=None, keepdims=np._NoValue): + """ + Return the sum of the array elements over the given axis. + + Masked elements are set to 0 internally. + + Refer to `numpy.sum` for full documentation. + + See Also + -------- + numpy.ndarray.sum : corresponding function for ndarrays + numpy.sum : equivalent function + + Examples + -------- + >>> import numpy as np + >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) + >>> x + masked_array( + data=[[1, --, 3], + [--, 5, --], + [7, --, 9]], + mask=[[False, True, False], + [ True, False, True], + [False, True, False]], + fill_value=999999) + >>> x.sum() + 25 + >>> x.sum(axis=1) + masked_array(data=[4, 5, 16], + mask=[False, False, False], + fill_value=999999) + >>> x.sum(axis=0) + masked_array(data=[8, 5, 12], + mask=[False, False, False], + fill_value=999999) + >>> print(type(x.sum(axis=0, dtype=np.int64)[0])) + + + """ + kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} + + _mask = self._mask + newmask = _check_mask_axis(_mask, axis, **kwargs) + # No explicit output + if out is None: + result = self.filled(0).sum(axis, dtype=dtype, **kwargs) + rndim = getattr(result, 'ndim', 0) + if rndim: + result = result.view(type(self)) + result.__setmask__(newmask) + elif newmask: + result = masked + return result + # Explicit output + result = self.filled(0).sum(axis, dtype=dtype, out=out, **kwargs) + if isinstance(out, MaskedArray): + outmask = getmask(out) + if outmask is nomask: + outmask = out._mask = make_mask_none(out.shape) + outmask.flat = newmask + return out + + def cumsum(self, axis=None, dtype=None, out=None): + """ + Return the cumulative sum of the array elements over the given axis. + + Masked values are set to 0 internally during the computation. + However, their position is saved, and the result will be masked at + the same locations. + + Refer to `numpy.cumsum` for full documentation. + + Notes + ----- + The mask is lost if `out` is not a valid :class:`ma.MaskedArray` ! + + Arithmetic is modular when using integer types, and no error is + raised on overflow. + + See Also + -------- + numpy.ndarray.cumsum : corresponding function for ndarrays + numpy.cumsum : equivalent function + + Examples + -------- + >>> import numpy as np + >>> marr = np.ma.array(np.arange(10), mask=[0,0,0,1,1,1,0,0,0,0]) + >>> marr.cumsum() + masked_array(data=[0, 1, 3, --, --, --, 9, 16, 24, 33], + mask=[False, False, False, True, True, True, False, False, + False, False], + fill_value=999999) + + """ + result = self.filled(0).cumsum(axis=axis, dtype=dtype, out=out) + if out is not None: + if isinstance(out, MaskedArray): + out.__setmask__(self.mask) + return out + result = result.view(type(self)) + result.__setmask__(self._mask) + return result + + def prod(self, axis=None, dtype=None, out=None, keepdims=np._NoValue): + """ + Return the product of the array elements over the given axis. + + Masked elements are set to 1 internally for computation. + + Refer to `numpy.prod` for full documentation. + + Notes + ----- + Arithmetic is modular when using integer types, and no error is raised + on overflow. + + See Also + -------- + numpy.ndarray.prod : corresponding function for ndarrays + numpy.prod : equivalent function + """ + kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} + + _mask = self._mask + newmask = _check_mask_axis(_mask, axis, **kwargs) + # No explicit output + if out is None: + result = self.filled(1).prod(axis, dtype=dtype, **kwargs) + rndim = getattr(result, 'ndim', 0) + if rndim: + result = result.view(type(self)) + result.__setmask__(newmask) + elif newmask: + result = masked + return result + # Explicit output + result = self.filled(1).prod(axis, dtype=dtype, out=out, **kwargs) + if isinstance(out, MaskedArray): + outmask = getmask(out) + if outmask is nomask: + outmask = out._mask = make_mask_none(out.shape) + outmask.flat = newmask + return out + product = prod + + def cumprod(self, axis=None, dtype=None, out=None): + """ + Return the cumulative product of the array elements over the given axis. + + Masked values are set to 1 internally during the computation. + However, their position is saved, and the result will be masked at + the same locations. + + Refer to `numpy.cumprod` for full documentation. + + Notes + ----- + The mask is lost if `out` is not a valid MaskedArray ! + + Arithmetic is modular when using integer types, and no error is + raised on overflow. + + See Also + -------- + numpy.ndarray.cumprod : corresponding function for ndarrays + numpy.cumprod : equivalent function + """ + result = self.filled(1).cumprod(axis=axis, dtype=dtype, out=out) + if out is not None: + if isinstance(out, MaskedArray): + out.__setmask__(self._mask) + return out + result = result.view(type(self)) + result.__setmask__(self._mask) + return result + + def mean(self, axis=None, dtype=None, out=None, keepdims=np._NoValue): + """ + Returns the average of the array elements along given axis. + + Masked entries are ignored, and result elements which are not + finite will be masked. + + Refer to `numpy.mean` for full documentation. + + See Also + -------- + numpy.ndarray.mean : corresponding function for ndarrays + numpy.mean : Equivalent function + numpy.ma.average : Weighted average. + + Examples + -------- + >>> import numpy as np + >>> a = np.ma.array([1,2,3], mask=[False, False, True]) + >>> a + masked_array(data=[1, 2, --], + mask=[False, False, True], + fill_value=999999) + >>> a.mean() + 1.5 + + """ + kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} + if self._mask is nomask: + result = super().mean(axis=axis, dtype=dtype, **kwargs)[()] + else: + is_float16_result = False + if dtype is None: + if issubclass(self.dtype.type, (ntypes.integer, ntypes.bool)): + dtype = mu.dtype('f8') + elif issubclass(self.dtype.type, ntypes.float16): + dtype = mu.dtype('f4') + is_float16_result = True + dsum = self.sum(axis=axis, dtype=dtype, **kwargs) + cnt = self.count(axis=axis, **kwargs) + if cnt.shape == () and (cnt == 0): + result = masked + elif is_float16_result: + result = self.dtype.type(dsum * 1. / cnt) + else: + result = dsum * 1. / cnt + if out is not None: + out.flat = result + if isinstance(out, MaskedArray): + outmask = getmask(out) + if outmask is nomask: + outmask = out._mask = make_mask_none(out.shape) + outmask.flat = getmask(result) + return out + return result + + def anom(self, axis=None, dtype=None): + """ + Compute the anomalies (deviations from the arithmetic mean) + along the given axis. + + Returns an array of anomalies, with the same shape as the input and + where the arithmetic mean is computed along the given axis. + + Parameters + ---------- + axis : int, optional + Axis over which the anomalies are taken. + The default is to use the mean of the flattened array as reference. + dtype : dtype, optional + Type to use in computing the variance. For arrays of integer type + the default is float32; for arrays of float types it is the same as + the array type. + + See Also + -------- + mean : Compute the mean of the array. + + Examples + -------- + >>> import numpy as np + >>> a = np.ma.array([1,2,3]) + >>> a.anom() + masked_array(data=[-1., 0., 1.], + mask=False, + fill_value=1e+20) + + """ + m = self.mean(axis, dtype) + if not axis: + return self - m + else: + return self - expand_dims(m, axis) + + def var(self, axis=None, dtype=None, out=None, ddof=0, + keepdims=np._NoValue, mean=np._NoValue): + """ + Returns the variance of the array elements along given axis. + + Masked entries are ignored, and result elements which are not + finite will be masked. + + Refer to `numpy.var` for full documentation. + + See Also + -------- + numpy.ndarray.var : corresponding function for ndarrays + numpy.var : Equivalent function + """ + kwargs = {} + + if keepdims is not np._NoValue: + kwargs['keepdims'] = keepdims + + # Easy case: nomask, business as usual + if self._mask is nomask: + + if mean is not np._NoValue: + kwargs['mean'] = mean + + ret = super().var(axis=axis, dtype=dtype, out=out, ddof=ddof, + **kwargs)[()] + if out is not None: + if isinstance(out, MaskedArray): + out.__setmask__(nomask) + return out + return ret + + # Some data are masked, yay! + cnt = self.count(axis=axis, **kwargs) - ddof + + if mean is not np._NoValue: + danom = self - mean + else: + danom = self - self.mean(axis, dtype, keepdims=True) + + if iscomplexobj(self): + danom = umath.absolute(danom) ** 2 + else: + danom *= danom + dvar = divide(danom.sum(axis, **kwargs), cnt).view(type(self)) + # Apply the mask if it's not a scalar + if dvar.ndim: + dvar._mask = mask_or(self._mask.all(axis, **kwargs), (cnt <= 0)) + dvar._update_from(self) + elif getmask(dvar): + # Make sure that masked is returned when the scalar is masked. + dvar = masked + if out is not None: + if isinstance(out, MaskedArray): + out.flat = 0 + out.__setmask__(True) + elif out.dtype.kind in 'biu': + errmsg = "Masked data information would be lost in one or "\ + "more location." + raise MaskError(errmsg) + else: + out.flat = np.nan + return out + # In case with have an explicit output + if out is not None: + # Set the data + out.flat = dvar + # Set the mask if needed + if isinstance(out, MaskedArray): + out.__setmask__(dvar.mask) + return out + return dvar + var.__doc__ = np.var.__doc__ + + def std(self, axis=None, dtype=None, out=None, ddof=0, + keepdims=np._NoValue, mean=np._NoValue): + """ + Returns the standard deviation of the array elements along given axis. + + Masked entries are ignored. + + Refer to `numpy.std` for full documentation. + + See Also + -------- + numpy.ndarray.std : corresponding function for ndarrays + numpy.std : Equivalent function + """ + kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} + + dvar = self.var(axis, dtype, out, ddof, **kwargs) + if dvar is not masked: + if out is not None: + np.power(out, 0.5, out=out, casting='unsafe') + return out + dvar = sqrt(dvar) + return dvar + + def round(self, decimals=0, out=None): + """ + Return each element rounded to the given number of decimals. + + Refer to `numpy.around` for full documentation. + + See Also + -------- + numpy.ndarray.round : corresponding function for ndarrays + numpy.around : equivalent function + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> x = ma.array([1.35, 2.5, 1.5, 1.75, 2.25, 2.75], + ... mask=[0, 0, 0, 1, 0, 0]) + >>> ma.round(x) + masked_array(data=[1.0, 2.0, 2.0, --, 2.0, 3.0], + mask=[False, False, False, True, False, False], + fill_value=1e+20) + + """ + result = self._data.round(decimals=decimals, out=out).view(type(self)) + if result.ndim > 0: + result._mask = self._mask + result._update_from(self) + elif self._mask: + # Return masked when the scalar is masked + result = masked + # No explicit output: we're done + if out is None: + return result + if isinstance(out, MaskedArray): + out.__setmask__(self._mask) + return out + + def argsort(self, axis=np._NoValue, kind=None, order=None, endwith=True, + fill_value=None, *, stable=False): + """ + Return an ndarray of indices that sort the array along the + specified axis. Masked values are filled beforehand to + `fill_value`. + + Parameters + ---------- + axis : int, optional + Axis along which to sort. If None, the default, the flattened array + is used. + kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional + The sorting algorithm used. + order : list, optional + When `a` is an array with fields defined, this argument specifies + which fields to compare first, second, etc. Not all fields need be + specified. + endwith : {True, False}, optional + Whether missing values (if any) should be treated as the largest values + (True) or the smallest values (False) + When the array contains unmasked values at the same extremes of the + datatype, the ordering of these values and the masked values is + undefined. + fill_value : scalar or None, optional + Value used internally for the masked values. + If ``fill_value`` is not None, it supersedes ``endwith``. + stable : bool, optional + Only for compatibility with ``np.argsort``. Ignored. + + Returns + ------- + index_array : ndarray, int + Array of indices that sort `a` along the specified axis. + In other words, ``a[index_array]`` yields a sorted `a`. + + See Also + -------- + ma.MaskedArray.sort : Describes sorting algorithms used. + lexsort : Indirect stable sort with multiple keys. + numpy.ndarray.sort : Inplace sort. + + Notes + ----- + See `sort` for notes on the different sorting algorithms. + + Examples + -------- + >>> import numpy as np + >>> a = np.ma.array([3,2,1], mask=[False, False, True]) + >>> a + masked_array(data=[3, 2, --], + mask=[False, False, True], + fill_value=999999) + >>> a.argsort() + array([1, 0, 2]) + + """ + if stable: + raise ValueError( + "`stable` parameter is not supported for masked arrays." + ) + + # 2017-04-11, Numpy 1.13.0, gh-8701: warn on axis default + if axis is np._NoValue: + axis = _deprecate_argsort_axis(self) + + if fill_value is None: + if endwith: + # nan > inf + if np.issubdtype(self.dtype, np.floating): + fill_value = np.nan + else: + fill_value = minimum_fill_value(self) + else: + fill_value = maximum_fill_value(self) + + filled = self.filled(fill_value) + return filled.argsort(axis=axis, kind=kind, order=order) + + def argmin(self, axis=None, fill_value=None, out=None, *, + keepdims=np._NoValue): + """ + Return array of indices to the minimum values along the given axis. + + Parameters + ---------- + axis : {None, integer} + If None, the index is into the flattened array, otherwise along + the specified axis + fill_value : scalar or None, optional + Value used to fill in the masked values. If None, the output of + minimum_fill_value(self._data) is used instead. + out : {None, array}, optional + Array into which the result can be placed. Its type is preserved + and it must be of the right shape to hold the output. + + Returns + ------- + ndarray or scalar + If multi-dimension input, returns a new ndarray of indices to the + minimum values along the given axis. Otherwise, returns a scalar + of index to the minimum values along the given axis. + + Examples + -------- + >>> import numpy as np + >>> x = np.ma.array(np.arange(4), mask=[1,1,0,0]) + >>> x.shape = (2,2) + >>> x + masked_array( + data=[[--, --], + [2, 3]], + mask=[[ True, True], + [False, False]], + fill_value=999999) + >>> x.argmin(axis=0, fill_value=-1) + array([0, 0]) + >>> x.argmin(axis=0, fill_value=9) + array([1, 1]) + + """ + if fill_value is None: + fill_value = minimum_fill_value(self) + d = self.filled(fill_value).view(ndarray) + keepdims = False if keepdims is np._NoValue else bool(keepdims) + return d.argmin(axis, out=out, keepdims=keepdims) + + def argmax(self, axis=None, fill_value=None, out=None, *, + keepdims=np._NoValue): + """ + Returns array of indices of the maximum values along the given axis. + Masked values are treated as if they had the value fill_value. + + Parameters + ---------- + axis : {None, integer} + If None, the index is into the flattened array, otherwise along + the specified axis + fill_value : scalar or None, optional + Value used to fill in the masked values. If None, the output of + maximum_fill_value(self._data) is used instead. + out : {None, array}, optional + Array into which the result can be placed. Its type is preserved + and it must be of the right shape to hold the output. + + Returns + ------- + index_array : {integer_array} + + Examples + -------- + >>> import numpy as np + >>> a = np.arange(6).reshape(2,3) + >>> a.argmax() + 5 + >>> a.argmax(0) + array([1, 1, 1]) + >>> a.argmax(1) + array([2, 2]) + + """ + if fill_value is None: + fill_value = maximum_fill_value(self._data) + d = self.filled(fill_value).view(ndarray) + keepdims = False if keepdims is np._NoValue else bool(keepdims) + return d.argmax(axis, out=out, keepdims=keepdims) + + def sort(self, axis=-1, kind=None, order=None, endwith=True, + fill_value=None, *, stable=False): + """ + Sort the array, in-place + + Parameters + ---------- + a : array_like + Array to be sorted. + axis : int, optional + Axis along which to sort. If None, the array is flattened before + sorting. The default is -1, which sorts along the last axis. + kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional + The sorting algorithm used. + order : list, optional + When `a` is a structured array, this argument specifies which fields + to compare first, second, and so on. This list does not need to + include all of the fields. + endwith : {True, False}, optional + Whether missing values (if any) should be treated as the largest values + (True) or the smallest values (False) + When the array contains unmasked values sorting at the same extremes of the + datatype, the ordering of these values and the masked values is + undefined. + fill_value : scalar or None, optional + Value used internally for the masked values. + If ``fill_value`` is not None, it supersedes ``endwith``. + stable : bool, optional + Only for compatibility with ``np.sort``. Ignored. + + Returns + ------- + sorted_array : ndarray + Array of the same type and shape as `a`. + + See Also + -------- + numpy.ndarray.sort : Method to sort an array in-place. + argsort : Indirect sort. + lexsort : Indirect stable sort on multiple keys. + searchsorted : Find elements in a sorted array. + + Notes + ----- + See ``sort`` for notes on the different sorting algorithms. + + Examples + -------- + >>> import numpy as np + >>> a = np.ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0]) + >>> # Default + >>> a.sort() + >>> a + masked_array(data=[1, 3, 5, --, --], + mask=[False, False, False, True, True], + fill_value=999999) + + >>> a = np.ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0]) + >>> # Put missing values in the front + >>> a.sort(endwith=False) + >>> a + masked_array(data=[--, --, 1, 3, 5], + mask=[ True, True, False, False, False], + fill_value=999999) + + >>> a = np.ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0]) + >>> # fill_value takes over endwith + >>> a.sort(endwith=False, fill_value=3) + >>> a + masked_array(data=[1, --, --, 3, 5], + mask=[False, True, True, False, False], + fill_value=999999) + + """ + if stable: + raise ValueError( + "`stable` parameter is not supported for masked arrays." + ) + + if self._mask is nomask: + ndarray.sort(self, axis=axis, kind=kind, order=order) + return + + if self is masked: + return + + sidx = self.argsort(axis=axis, kind=kind, order=order, + fill_value=fill_value, endwith=endwith) + + self[...] = np.take_along_axis(self, sidx, axis=axis) + + def min(self, axis=None, out=None, fill_value=None, keepdims=np._NoValue): + """ + Return the minimum along a given axis. + + Parameters + ---------- + axis : None or int or tuple of ints, optional + Axis along which to operate. By default, ``axis`` is None and the + flattened input is used. + If this is a tuple of ints, the minimum is selected over multiple + axes, instead of a single axis or all the axes as before. + out : array_like, optional + Alternative output array in which to place the result. Must be of + the same shape and buffer length as the expected output. + fill_value : scalar or None, optional + Value used to fill in the masked values. + If None, use the output of `minimum_fill_value`. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the array. + + Returns + ------- + amin : array_like + New array holding the result. + If ``out`` was specified, ``out`` is returned. + + See Also + -------- + ma.minimum_fill_value + Returns the minimum filling value for a given datatype. + + Examples + -------- + >>> import numpy.ma as ma + >>> x = [[1., -2., 3.], [0.2, -0.7, 0.1]] + >>> mask = [[1, 1, 0], [0, 0, 1]] + >>> masked_x = ma.masked_array(x, mask) + >>> masked_x + masked_array( + data=[[--, --, 3.0], + [0.2, -0.7, --]], + mask=[[ True, True, False], + [False, False, True]], + fill_value=1e+20) + >>> ma.min(masked_x) + -0.7 + >>> ma.min(masked_x, axis=-1) + masked_array(data=[3.0, -0.7], + mask=[False, False], + fill_value=1e+20) + >>> ma.min(masked_x, axis=0, keepdims=True) + masked_array(data=[[0.2, -0.7, 3.0]], + mask=[[False, False, False]], + fill_value=1e+20) + >>> mask = [[1, 1, 1,], [1, 1, 1]] + >>> masked_x = ma.masked_array(x, mask) + >>> ma.min(masked_x, axis=0) + masked_array(data=[--, --, --], + mask=[ True, True, True], + fill_value=1e+20, + dtype=float64) + """ + kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} + + _mask = self._mask + newmask = _check_mask_axis(_mask, axis, **kwargs) + if fill_value is None: + fill_value = minimum_fill_value(self) + # No explicit output + if out is None: + result = self.filled(fill_value).min( + axis=axis, out=out, **kwargs).view(type(self)) + if result.ndim: + # Set the mask + result.__setmask__(newmask) + # Get rid of Infs + if newmask.ndim: + np.copyto(result, result.fill_value, where=newmask) + elif newmask: + result = masked + return result + # Explicit output + self.filled(fill_value).min(axis=axis, out=out, **kwargs) + if isinstance(out, MaskedArray): + outmask = getmask(out) + if outmask is nomask: + outmask = out._mask = make_mask_none(out.shape) + outmask.flat = newmask + else: + if out.dtype.kind in 'biu': + errmsg = "Masked data information would be lost in one or more"\ + " location." + raise MaskError(errmsg) + np.copyto(out, np.nan, where=newmask) + return out + + def max(self, axis=None, out=None, fill_value=None, keepdims=np._NoValue): + """ + Return the maximum along a given axis. + + Parameters + ---------- + axis : None or int or tuple of ints, optional + Axis along which to operate. By default, ``axis`` is None and the + flattened input is used. + If this is a tuple of ints, the maximum is selected over multiple + axes, instead of a single axis or all the axes as before. + out : array_like, optional + Alternative output array in which to place the result. Must + be of the same shape and buffer length as the expected output. + fill_value : scalar or None, optional + Value used to fill in the masked values. + If None, use the output of maximum_fill_value(). + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the array. + + Returns + ------- + amax : array_like + New array holding the result. + If ``out`` was specified, ``out`` is returned. + + See Also + -------- + ma.maximum_fill_value + Returns the maximum filling value for a given datatype. + + Examples + -------- + >>> import numpy.ma as ma + >>> x = [[-1., 2.5], [4., -2.], [3., 0.]] + >>> mask = [[0, 0], [1, 0], [1, 0]] + >>> masked_x = ma.masked_array(x, mask) + >>> masked_x + masked_array( + data=[[-1.0, 2.5], + [--, -2.0], + [--, 0.0]], + mask=[[False, False], + [ True, False], + [ True, False]], + fill_value=1e+20) + >>> ma.max(masked_x) + 2.5 + >>> ma.max(masked_x, axis=0) + masked_array(data=[-1.0, 2.5], + mask=[False, False], + fill_value=1e+20) + >>> ma.max(masked_x, axis=1, keepdims=True) + masked_array( + data=[[2.5], + [-2.0], + [0.0]], + mask=[[False], + [False], + [False]], + fill_value=1e+20) + >>> mask = [[1, 1], [1, 1], [1, 1]] + >>> masked_x = ma.masked_array(x, mask) + >>> ma.max(masked_x, axis=1) + masked_array(data=[--, --, --], + mask=[ True, True, True], + fill_value=1e+20, + dtype=float64) + """ + kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} + + _mask = self._mask + newmask = _check_mask_axis(_mask, axis, **kwargs) + if fill_value is None: + fill_value = maximum_fill_value(self) + # No explicit output + if out is None: + result = self.filled(fill_value).max( + axis=axis, out=out, **kwargs).view(type(self)) + if result.ndim: + # Set the mask + result.__setmask__(newmask) + # Get rid of Infs + if newmask.ndim: + np.copyto(result, result.fill_value, where=newmask) + elif newmask: + result = masked + return result + # Explicit output + self.filled(fill_value).max(axis=axis, out=out, **kwargs) + if isinstance(out, MaskedArray): + outmask = getmask(out) + if outmask is nomask: + outmask = out._mask = make_mask_none(out.shape) + outmask.flat = newmask + else: + + if out.dtype.kind in 'biu': + errmsg = "Masked data information would be lost in one or more"\ + " location." + raise MaskError(errmsg) + np.copyto(out, np.nan, where=newmask) + return out + + def ptp(self, axis=None, out=None, fill_value=None, keepdims=False): + """ + Return (maximum - minimum) along the given dimension + (i.e. peak-to-peak value). + + .. warning:: + `ptp` preserves the data type of the array. This means the + return value for an input of signed integers with n bits + (e.g. `np.int8`, `np.int16`, etc) is also a signed integer + with n bits. In that case, peak-to-peak values greater than + ``2**(n-1)-1`` will be returned as negative values. An example + with a work-around is shown below. + + Parameters + ---------- + axis : {None, int}, optional + Axis along which to find the peaks. If None (default) the + flattened array is used. + out : {None, array_like}, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output + but the type will be cast if necessary. + fill_value : scalar or None, optional + Value used to fill in the masked values. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the array. + + Returns + ------- + ptp : ndarray. + A new array holding the result, unless ``out`` was + specified, in which case a reference to ``out`` is returned. + + Examples + -------- + >>> import numpy as np + >>> x = np.ma.MaskedArray([[4, 9, 2, 10], + ... [6, 9, 7, 12]]) + + >>> x.ptp(axis=1) + masked_array(data=[8, 6], + mask=False, + fill_value=999999) + + >>> x.ptp(axis=0) + masked_array(data=[2, 0, 5, 2], + mask=False, + fill_value=999999) + + >>> x.ptp() + 10 + + This example shows that a negative value can be returned when + the input is an array of signed integers. + + >>> y = np.ma.MaskedArray([[1, 127], + ... [0, 127], + ... [-1, 127], + ... [-2, 127]], dtype=np.int8) + >>> y.ptp(axis=1) + masked_array(data=[ 126, 127, -128, -127], + mask=False, + fill_value=np.int64(999999), + dtype=int8) + + A work-around is to use the `view()` method to view the result as + unsigned integers with the same bit width: + + >>> y.ptp(axis=1).view(np.uint8) + masked_array(data=[126, 127, 128, 129], + mask=False, + fill_value=np.uint64(999999), + dtype=uint8) + """ + if out is None: + result = self.max(axis=axis, fill_value=fill_value, + keepdims=keepdims) + result -= self.min(axis=axis, fill_value=fill_value, + keepdims=keepdims) + return result + out.flat = self.max(axis=axis, out=out, fill_value=fill_value, + keepdims=keepdims) + min_value = self.min(axis=axis, fill_value=fill_value, + keepdims=keepdims) + np.subtract(out, min_value, out=out, casting='unsafe') + return out + + def partition(self, *args, **kwargs): + warnings.warn("Warning: 'partition' will ignore the 'mask' " + f"of the {self.__class__.__name__}.", + stacklevel=2) + return super().partition(*args, **kwargs) + + def argpartition(self, *args, **kwargs): + warnings.warn("Warning: 'argpartition' will ignore the 'mask' " + f"of the {self.__class__.__name__}.", + stacklevel=2) + return super().argpartition(*args, **kwargs) + + def take(self, indices, axis=None, out=None, mode='raise'): + """ + Take elements from a masked array along an axis. + + This function does the same thing as "fancy" indexing (indexing arrays + using arrays) for masked arrays. It can be easier to use if you need + elements along a given axis. + + Parameters + ---------- + a : masked_array + The source masked array. + indices : array_like + The indices of the values to extract. Also allow scalars for indices. + axis : int, optional + The axis over which to select values. By default, the flattened + input array is used. + out : MaskedArray, optional + If provided, the result will be placed in this array. It should + be of the appropriate shape and dtype. Note that `out` is always + buffered if `mode='raise'`; use other modes for better performance. + mode : {'raise', 'wrap', 'clip'}, optional + Specifies how out-of-bounds indices will behave. + + * 'raise' -- raise an error (default) + * 'wrap' -- wrap around + * 'clip' -- clip to the range + + 'clip' mode means that all indices that are too large are replaced + by the index that addresses the last element along that axis. Note + that this disables indexing with negative numbers. + + Returns + ------- + out : MaskedArray + The returned array has the same type as `a`. + + See Also + -------- + numpy.take : Equivalent function for ndarrays. + compress : Take elements using a boolean mask. + take_along_axis : Take elements by matching the array and the index arrays. + + Notes + ----- + This function behaves similarly to `numpy.take`, but it handles masked + values. The mask is retained in the output array, and masked values + in the input array remain masked in the output. + + Examples + -------- + >>> import numpy as np + >>> a = np.ma.array([4, 3, 5, 7, 6, 8], mask=[0, 0, 1, 0, 1, 0]) + >>> indices = [0, 1, 4] + >>> np.ma.take(a, indices) + masked_array(data=[4, 3, --], + mask=[False, False, True], + fill_value=999999) + + When `indices` is not one-dimensional, the output also has these dimensions: + + >>> np.ma.take(a, [[0, 1], [2, 3]]) + masked_array(data=[[4, 3], + [--, 7]], + mask=[[False, False], + [ True, False]], + fill_value=999999) + """ + (_data, _mask) = (self._data, self._mask) + cls = type(self) + # Make sure the indices are not masked + maskindices = getmask(indices) + if maskindices is not nomask: + indices = indices.filled(0) + # Get the data, promoting scalars to 0d arrays with [...] so that + # .view works correctly + if out is None: + out = _data.take(indices, axis=axis, mode=mode)[...].view(cls) + else: + np.take(_data, indices, axis=axis, mode=mode, out=out) + # Get the mask + if isinstance(out, MaskedArray): + if _mask is nomask: + outmask = maskindices + else: + outmask = _mask.take(indices, axis=axis, mode=mode) + outmask |= maskindices + out.__setmask__(outmask) + # demote 0d arrays back to scalars, for consistency with ndarray.take + return out[()] + + # Array methods + copy = _arraymethod('copy') + diagonal = _arraymethod('diagonal') + flatten = _arraymethod('flatten') + repeat = _arraymethod('repeat') + squeeze = _arraymethod('squeeze') + swapaxes = _arraymethod('swapaxes') + T = property(fget=lambda self: self.transpose()) + transpose = _arraymethod('transpose') + + @property + def mT(self): + """ + Return the matrix-transpose of the masked array. + + The matrix transpose is the transpose of the last two dimensions, even + if the array is of higher dimension. + + .. versionadded:: 2.0 + + Returns + ------- + result: MaskedArray + The masked array with the last two dimensions transposed + + Raises + ------ + ValueError + If the array is of dimension less than 2. + + See Also + -------- + ndarray.mT: + Equivalent method for arrays + """ + + if self.ndim < 2: + raise ValueError("matrix transpose with ndim < 2 is undefined") + + if self._mask is nomask: + return masked_array(data=self._data.mT) + else: + return masked_array(data=self.data.mT, mask=self.mask.mT) + + def tolist(self, fill_value=None): + """ + Return the data portion of the masked array as a hierarchical Python list. + + Data items are converted to the nearest compatible Python type. + Masked values are converted to `fill_value`. If `fill_value` is None, + the corresponding entries in the output list will be ``None``. + + Parameters + ---------- + fill_value : scalar, optional + The value to use for invalid entries. Default is None. + + Returns + ------- + result : list + The Python list representation of the masked array. + + Examples + -------- + >>> import numpy as np + >>> x = np.ma.array([[1,2,3], [4,5,6], [7,8,9]], mask=[0] + [1,0]*4) + >>> x.tolist() + [[1, None, 3], [None, 5, None], [7, None, 9]] + >>> x.tolist(-999) + [[1, -999, 3], [-999, 5, -999], [7, -999, 9]] + + """ + _mask = self._mask + # No mask ? Just return .data.tolist ? + if _mask is nomask: + return self._data.tolist() + # Explicit fill_value: fill the array and get the list + if fill_value is not None: + return self.filled(fill_value).tolist() + # Structured array. + names = self.dtype.names + if names: + result = self._data.astype([(_, object) for _ in names]) + for n in names: + result[n][_mask[n]] = None + return result.tolist() + # Standard arrays. + if _mask is nomask: + return [None] + # Set temps to save time when dealing w/ marrays. + inishape = self.shape + result = np.array(self._data.ravel(), dtype=object) + result[_mask.ravel()] = None + result.shape = inishape + return result.tolist() + + def tobytes(self, fill_value=None, order='C'): + """ + Return the array data as a string containing the raw bytes in the array. + + The array is filled with a fill value before the string conversion. + + Parameters + ---------- + fill_value : scalar, optional + Value used to fill in the masked values. Default is None, in which + case `MaskedArray.fill_value` is used. + order : {'C','F','A'}, optional + Order of the data item in the copy. Default is 'C'. + + - 'C' -- C order (row major). + - 'F' -- Fortran order (column major). + - 'A' -- Any, current order of array. + - None -- Same as 'A'. + + See Also + -------- + numpy.ndarray.tobytes + tolist, tofile + + Notes + ----- + As for `ndarray.tobytes`, information about the shape, dtype, etc., + but also about `fill_value`, will be lost. + + Examples + -------- + >>> import numpy as np + >>> x = np.ma.array(np.array([[1, 2], [3, 4]]), mask=[[0, 1], [1, 0]]) + >>> x.tobytes() + b'\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\x00?B\\x0f\\x00\\x00\\x00\\x00\\x00?B\\x0f\\x00\\x00\\x00\\x00\\x00\\x04\\x00\\x00\\x00\\x00\\x00\\x00\\x00' + + """ + return self.filled(fill_value).tobytes(order=order) + + def tofile(self, fid, sep="", format="%s"): + """ + Save a masked array to a file in binary format. + + .. warning:: + This function is not implemented yet. + + Raises + ------ + NotImplementedError + When `tofile` is called. + + """ + raise NotImplementedError("MaskedArray.tofile() not implemented yet.") + + def toflex(self): + """ + Transforms a masked array into a flexible-type array. + + The flexible type array that is returned will have two fields: + + * the ``_data`` field stores the ``_data`` part of the array. + * the ``_mask`` field stores the ``_mask`` part of the array. + + Parameters + ---------- + None + + Returns + ------- + record : ndarray + A new flexible-type `ndarray` with two fields: the first element + containing a value, the second element containing the corresponding + mask boolean. The returned record shape matches self.shape. + + Notes + ----- + A side-effect of transforming a masked array into a flexible `ndarray` is + that meta information (``fill_value``, ...) will be lost. + + Examples + -------- + >>> import numpy as np + >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) + >>> x + masked_array( + data=[[1, --, 3], + [--, 5, --], + [7, --, 9]], + mask=[[False, True, False], + [ True, False, True], + [False, True, False]], + fill_value=999999) + >>> x.toflex() + array([[(1, False), (2, True), (3, False)], + [(4, True), (5, False), (6, True)], + [(7, False), (8, True), (9, False)]], + dtype=[('_data', 'i2", (2,))]) + # x = A[0]; y = x["A"]; then y.mask["A"].size==2 + # and we can not say masked/unmasked. + # The result is no longer mvoid! + # See also issue #6724. + return masked_array( + data=self._data[indx], mask=m[indx], + fill_value=self._fill_value[indx], + hard_mask=self._hardmask) + if m is not nomask and m[indx]: + return masked + return self._data[indx] + + def __setitem__(self, indx, value): + self._data[indx] = value + if self._hardmask: + self._mask[indx] |= getattr(value, "_mask", False) + else: + self._mask[indx] = getattr(value, "_mask", False) + + def __str__(self): + m = self._mask + if m is nomask: + return str(self._data) + + rdtype = _replace_dtype_fields(self._data.dtype, "O") + data_arr = super()._data + res = data_arr.astype(rdtype) + _recursive_printoption(res, self._mask, masked_print_option) + return str(res) + + __repr__ = __str__ + + def __iter__(self): + "Defines an iterator for mvoid" + (_data, _mask) = (self._data, self._mask) + if _mask is nomask: + yield from _data + else: + for (d, m) in zip(_data, _mask): + if m: + yield masked + else: + yield d + + def __len__(self): + return self._data.__len__() + + def filled(self, fill_value=None): + """ + Return a copy with masked fields filled with a given value. + + Parameters + ---------- + fill_value : array_like, optional + The value to use for invalid entries. Can be scalar or + non-scalar. If latter is the case, the filled array should + be broadcastable over input array. Default is None, in + which case the `fill_value` attribute is used instead. + + Returns + ------- + filled_void + A `np.void` object + + See Also + -------- + MaskedArray.filled + + """ + return asarray(self).filled(fill_value)[()] + + def tolist(self): + """ + Transforms the mvoid object into a tuple. + + Masked fields are replaced by None. + + Returns + ------- + returned_tuple + Tuple of fields + """ + _mask = self._mask + if _mask is nomask: + return self._data.tolist() + result = [] + for (d, m) in zip(self._data, self._mask): + if m: + result.append(None) + else: + # .item() makes sure we return a standard Python object + result.append(d.item()) + return tuple(result) + + +############################################################################## +# Shortcuts # +############################################################################## + + +def isMaskedArray(x): + """ + Test whether input is an instance of MaskedArray. + + This function returns True if `x` is an instance of MaskedArray + and returns False otherwise. Any object is accepted as input. + + Parameters + ---------- + x : object + Object to test. + + Returns + ------- + result : bool + True if `x` is a MaskedArray. + + See Also + -------- + isMA : Alias to isMaskedArray. + isarray : Alias to isMaskedArray. + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> a = np.eye(3, 3) + >>> a + array([[ 1., 0., 0.], + [ 0., 1., 0.], + [ 0., 0., 1.]]) + >>> m = ma.masked_values(a, 0) + >>> m + masked_array( + data=[[1.0, --, --], + [--, 1.0, --], + [--, --, 1.0]], + mask=[[False, True, True], + [ True, False, True], + [ True, True, False]], + fill_value=0.0) + >>> ma.isMaskedArray(a) + False + >>> ma.isMaskedArray(m) + True + >>> ma.isMaskedArray([0, 1, 2]) + False + + """ + return isinstance(x, MaskedArray) + + +isarray = isMaskedArray +isMA = isMaskedArray # backward compatibility + + +class MaskedConstant(MaskedArray): + # the lone np.ma.masked instance + __singleton = None + + @classmethod + def __has_singleton(cls): + # second case ensures `cls.__singleton` is not just a view on the + # superclass singleton + return cls.__singleton is not None and type(cls.__singleton) is cls + + def __new__(cls): + if not cls.__has_singleton(): + # We define the masked singleton as a float for higher precedence. + # Note that it can be tricky sometimes w/ type comparison + data = np.array(0.) + mask = np.array(True) + + # prevent any modifications + data.flags.writeable = False + mask.flags.writeable = False + + # don't fall back on MaskedArray.__new__(MaskedConstant), since + # that might confuse it - this way, the construction is entirely + # within our control + cls.__singleton = MaskedArray(data, mask=mask).view(cls) + + return cls.__singleton + + def __array_finalize__(self, obj): + if not self.__has_singleton(): + # this handles the `.view` in __new__, which we want to copy across + # properties normally + return super().__array_finalize__(obj) + elif self is self.__singleton: + # not clear how this can happen, play it safe + pass + else: + # everywhere else, we want to downcast to MaskedArray, to prevent a + # duplicate maskedconstant. + self.__class__ = MaskedArray + MaskedArray.__array_finalize__(self, obj) + + def __array_wrap__(self, obj, context=None, return_scalar=False): + return self.view(MaskedArray).__array_wrap__(obj, context) + + def __str__(self): + return str(masked_print_option._display) + + def __repr__(self): + if self is MaskedConstant.__singleton: + return 'masked' + else: + # it's a subclass, or something is wrong, make it obvious + return object.__repr__(self) + + def __format__(self, format_spec): + # Replace ndarray.__format__ with the default, which supports no + # format characters. + # Supporting format characters is unwise here, because we do not know + # what type the user was expecting - better to not guess. + try: + return object.__format__(self, format_spec) + except TypeError: + # 2020-03-23, NumPy 1.19.0 + warnings.warn( + "Format strings passed to MaskedConstant are ignored," + " but in future may error or produce different behavior", + FutureWarning, stacklevel=2 + ) + return object.__format__(self, "") + + def __reduce__(self): + """Override of MaskedArray's __reduce__. + """ + return (self.__class__, ()) + + # inplace operations have no effect. We have to override them to avoid + # trying to modify the readonly data and mask arrays + def __iop__(self, other): + return self + __iadd__ = \ + __isub__ = \ + __imul__ = \ + __ifloordiv__ = \ + __itruediv__ = \ + __ipow__ = \ + __iop__ + del __iop__ # don't leave this around + + def copy(self, *args, **kwargs): + """ Copy is a no-op on the maskedconstant, as it is a scalar """ + # maskedconstant is a scalar, so copy doesn't need to copy. There's + # precedent for this with `np.bool` scalars. + return self + + def __copy__(self): + return self + + def __deepcopy__(self, memo): + return self + + def __setattr__(self, attr, value): + if not self.__has_singleton(): + # allow the singleton to be initialized + return super().__setattr__(attr, value) + elif self is self.__singleton: + raise AttributeError( + f"attributes of {self!r} are not writeable") + else: + # duplicate instance - we can end up here from __array_finalize__, + # where we set the __class__ attribute + return super().__setattr__(attr, value) + + +masked = masked_singleton = MaskedConstant() +masked_array = MaskedArray + + +def array(data, dtype=None, copy=False, order=None, + mask=nomask, fill_value=None, keep_mask=True, + hard_mask=False, shrink=True, subok=True, ndmin=0): + """ + Shortcut to MaskedArray. + + The options are in a different order for convenience and backwards + compatibility. + + """ + return MaskedArray(data, mask=mask, dtype=dtype, copy=copy, + subok=subok, keep_mask=keep_mask, + hard_mask=hard_mask, fill_value=fill_value, + ndmin=ndmin, shrink=shrink, order=order) + + +array.__doc__ = masked_array.__doc__ + + +def is_masked(x): + """ + Determine whether input has masked values. + + Accepts any object as input, but always returns False unless the + input is a MaskedArray containing masked values. + + Parameters + ---------- + x : array_like + Array to check for masked values. + + Returns + ------- + result : bool + True if `x` is a MaskedArray with masked values, False otherwise. + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> x = ma.masked_equal([0, 1, 0, 2, 3], 0) + >>> x + masked_array(data=[--, 1, --, 2, 3], + mask=[ True, False, True, False, False], + fill_value=0) + >>> ma.is_masked(x) + True + >>> x = ma.masked_equal([0, 1, 0, 2, 3], 42) + >>> x + masked_array(data=[0, 1, 0, 2, 3], + mask=False, + fill_value=42) + >>> ma.is_masked(x) + False + + Always returns False if `x` isn't a MaskedArray. + + >>> x = [False, True, False] + >>> ma.is_masked(x) + False + >>> x = 'a string' + >>> ma.is_masked(x) + False + + """ + m = getmask(x) + if m is nomask: + return False + elif m.any(): + return True + return False + + +############################################################################## +# Extrema functions # +############################################################################## + + +class _extrema_operation(_MaskedUFunc): + """ + Generic class for maximum/minimum functions. + + .. note:: + This is the base class for `_maximum_operation` and + `_minimum_operation`. + + """ + def __init__(self, ufunc, compare, fill_value): + super().__init__(ufunc) + self.compare = compare + self.fill_value_func = fill_value + + def __call__(self, a, b): + "Executes the call behavior." + + return where(self.compare(a, b), a, b) + + def reduce(self, target, axis=np._NoValue): + "Reduce target along the given axis." + target = narray(target, copy=None, subok=True) + m = getmask(target) + + if axis is np._NoValue and target.ndim > 1: + name = self.__name__ + # 2017-05-06, Numpy 1.13.0: warn on axis default + warnings.warn( + f"In the future the default for ma.{name}.reduce will be axis=0, " + f"not the current None, to match np.{name}.reduce. " + "Explicitly pass 0 or None to silence this warning.", + MaskedArrayFutureWarning, stacklevel=2) + axis = None + + if axis is not np._NoValue: + kwargs = {'axis': axis} + else: + kwargs = {} + + if m is nomask: + t = self.f.reduce(target, **kwargs) + else: + target = target.filled( + self.fill_value_func(target)).view(type(target)) + t = self.f.reduce(target, **kwargs) + m = umath.logical_and.reduce(m, **kwargs) + if hasattr(t, '_mask'): + t._mask = m + elif m: + t = masked + return t + + def outer(self, a, b): + "Return the function applied to the outer product of a and b." + ma = getmask(a) + mb = getmask(b) + if ma is nomask and mb is nomask: + m = nomask + else: + ma = getmaskarray(a) + mb = getmaskarray(b) + m = logical_or.outer(ma, mb) + result = self.f.outer(filled(a), filled(b)) + if not isinstance(result, MaskedArray): + result = result.view(MaskedArray) + result._mask = m + return result + +def min(obj, axis=None, out=None, fill_value=None, keepdims=np._NoValue): + kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} + + try: + return obj.min(axis=axis, fill_value=fill_value, out=out, **kwargs) + except (AttributeError, TypeError): + # If obj doesn't have a min method, or if the method doesn't accept a + # fill_value argument + return asanyarray(obj).min(axis=axis, fill_value=fill_value, + out=out, **kwargs) + + +min.__doc__ = MaskedArray.min.__doc__ + +def max(obj, axis=None, out=None, fill_value=None, keepdims=np._NoValue): + kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} + + try: + return obj.max(axis=axis, fill_value=fill_value, out=out, **kwargs) + except (AttributeError, TypeError): + # If obj doesn't have a max method, or if the method doesn't accept a + # fill_value argument + return asanyarray(obj).max(axis=axis, fill_value=fill_value, + out=out, **kwargs) + + +max.__doc__ = MaskedArray.max.__doc__ + + +def ptp(obj, axis=None, out=None, fill_value=None, keepdims=np._NoValue): + kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} + try: + return obj.ptp(axis, out=out, fill_value=fill_value, **kwargs) + except (AttributeError, TypeError): + # If obj doesn't have a ptp method or if the method doesn't accept + # a fill_value argument + return asanyarray(obj).ptp(axis=axis, fill_value=fill_value, + out=out, **kwargs) + + +ptp.__doc__ = MaskedArray.ptp.__doc__ + + +############################################################################## +# Definition of functions from the corresponding methods # +############################################################################## + + +class _frommethod: + """ + Define functions from existing MaskedArray methods. + + Parameters + ---------- + methodname : str + Name of the method to transform. + + """ + + def __init__(self, methodname, reversed=False): + self.__name__ = methodname + self.__qualname__ = methodname + self.__doc__ = self.getdoc() + self.reversed = reversed + + def getdoc(self): + "Return the doc of the function (from the doc of the method)." + meth = getattr(MaskedArray, self.__name__, None) or\ + getattr(np, self.__name__, None) + signature = self.__name__ + get_object_signature(meth) + if meth is not None: + doc = f""" {signature} +{getattr(meth, '__doc__', None)}""" + return doc + + def __call__(self, a, *args, **params): + if self.reversed: + args = list(args) + a, args[0] = args[0], a + + marr = asanyarray(a) + method_name = self.__name__ + method = getattr(type(marr), method_name, None) + if method is None: + # use the corresponding np function + method = getattr(np, method_name) + + return method(marr, *args, **params) + + +all = _frommethod('all') +anomalies = anom = _frommethod('anom') +any = _frommethod('any') +compress = _frommethod('compress', reversed=True) +cumprod = _frommethod('cumprod') +cumsum = _frommethod('cumsum') +copy = _frommethod('copy') +diagonal = _frommethod('diagonal') +harden_mask = _frommethod('harden_mask') +ids = _frommethod('ids') +maximum = _extrema_operation(umath.maximum, greater, maximum_fill_value) +mean = _frommethod('mean') +minimum = _extrema_operation(umath.minimum, less, minimum_fill_value) +nonzero = _frommethod('nonzero') +prod = _frommethod('prod') +product = _frommethod('product') +ravel = _frommethod('ravel') +repeat = _frommethod('repeat') +shrink_mask = _frommethod('shrink_mask') +soften_mask = _frommethod('soften_mask') +std = _frommethod('std') +sum = _frommethod('sum') +swapaxes = _frommethod('swapaxes') +#take = _frommethod('take') +trace = _frommethod('trace') +var = _frommethod('var') + +count = _frommethod('count') + +def take(a, indices, axis=None, out=None, mode='raise'): + """ + + """ + a = masked_array(a) + return a.take(indices, axis=axis, out=out, mode=mode) + + +def power(a, b, third=None): + """ + Returns element-wise base array raised to power from second array. + + This is the masked array version of `numpy.power`. For details see + `numpy.power`. + + See Also + -------- + numpy.power + + Notes + ----- + The *out* argument to `numpy.power` is not supported, `third` has to be + None. + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> x = [11.2, -3.973, 0.801, -1.41] + >>> mask = [0, 0, 0, 1] + >>> masked_x = ma.masked_array(x, mask) + >>> masked_x + masked_array(data=[11.2, -3.973, 0.801, --], + mask=[False, False, False, True], + fill_value=1e+20) + >>> ma.power(masked_x, 2) + masked_array(data=[125.43999999999998, 15.784728999999999, + 0.6416010000000001, --], + mask=[False, False, False, True], + fill_value=1e+20) + >>> y = [-0.5, 2, 0, 17] + >>> masked_y = ma.masked_array(y, mask) + >>> masked_y + masked_array(data=[-0.5, 2.0, 0.0, --], + mask=[False, False, False, True], + fill_value=1e+20) + >>> ma.power(masked_x, masked_y) + masked_array(data=[0.2988071523335984, 15.784728999999999, 1.0, --], + mask=[False, False, False, True], + fill_value=1e+20) + + """ + if third is not None: + raise MaskError("3-argument power not supported.") + # Get the masks + ma = getmask(a) + mb = getmask(b) + m = mask_or(ma, mb) + # Get the rawdata + fa = getdata(a) + fb = getdata(b) + # Get the type of the result (so that we preserve subclasses) + if isinstance(a, MaskedArray): + basetype = type(a) + else: + basetype = MaskedArray + # Get the result and view it as a (subclass of) MaskedArray + with np.errstate(divide='ignore', invalid='ignore'): + result = np.where(m, fa, umath.power(fa, fb)).view(basetype) + result._update_from(a) + # Find where we're in trouble w/ NaNs and Infs + invalid = np.logical_not(np.isfinite(result.view(ndarray))) + # Add the initial mask + if m is not nomask: + if not result.ndim: + return masked + result._mask = np.logical_or(m, invalid) + # Fix the invalid parts + if invalid.any(): + if not result.ndim: + return masked + elif result._mask is nomask: + result._mask = invalid + result._data[invalid] = result.fill_value + return result + + +argmin = _frommethod('argmin') +argmax = _frommethod('argmax') + +def argsort(a, axis=np._NoValue, kind=None, order=None, endwith=True, + fill_value=None, *, stable=None): + "Function version of the eponymous method." + a = np.asanyarray(a) + + # 2017-04-11, Numpy 1.13.0, gh-8701: warn on axis default + if axis is np._NoValue: + axis = _deprecate_argsort_axis(a) + + if isinstance(a, MaskedArray): + return a.argsort(axis=axis, kind=kind, order=order, endwith=endwith, + fill_value=fill_value, stable=None) + else: + return a.argsort(axis=axis, kind=kind, order=order, stable=None) + + +argsort.__doc__ = MaskedArray.argsort.__doc__ + +def sort(a, axis=-1, kind=None, order=None, endwith=True, fill_value=None, *, + stable=None): + """ + Return a sorted copy of the masked array. + + Equivalent to creating a copy of the array + and applying the MaskedArray ``sort()`` method. + + Refer to ``MaskedArray.sort`` for the full documentation + + See Also + -------- + MaskedArray.sort : equivalent method + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> x = [11.2, -3.973, 0.801, -1.41] + >>> mask = [0, 0, 0, 1] + >>> masked_x = ma.masked_array(x, mask) + >>> masked_x + masked_array(data=[11.2, -3.973, 0.801, --], + mask=[False, False, False, True], + fill_value=1e+20) + >>> ma.sort(masked_x) + masked_array(data=[-3.973, 0.801, 11.2, --], + mask=[False, False, False, True], + fill_value=1e+20) + """ + a = np.array(a, copy=True, subok=True) + if axis is None: + a = a.flatten() + axis = 0 + + if isinstance(a, MaskedArray): + a.sort(axis=axis, kind=kind, order=order, endwith=endwith, + fill_value=fill_value, stable=stable) + else: + a.sort(axis=axis, kind=kind, order=order, stable=stable) + return a + + +def compressed(x): + """ + Return all the non-masked data as a 1-D array. + + This function is equivalent to calling the "compressed" method of a + `ma.MaskedArray`, see `ma.MaskedArray.compressed` for details. + + See Also + -------- + ma.MaskedArray.compressed : Equivalent method. + + Examples + -------- + >>> import numpy as np + + Create an array with negative values masked: + + >>> import numpy as np + >>> x = np.array([[1, -1, 0], [2, -1, 3], [7, 4, -1]]) + >>> masked_x = np.ma.masked_array(x, mask=x < 0) + >>> masked_x + masked_array( + data=[[1, --, 0], + [2, --, 3], + [7, 4, --]], + mask=[[False, True, False], + [False, True, False], + [False, False, True]], + fill_value=999999) + + Compress the masked array into a 1-D array of non-masked values: + + >>> np.ma.compressed(masked_x) + array([1, 0, 2, 3, 7, 4]) + + """ + return asanyarray(x).compressed() + + +def concatenate(arrays, axis=0): + """ + Concatenate a sequence of arrays along the given axis. + + Parameters + ---------- + arrays : sequence of array_like + The arrays must have the same shape, except in the dimension + corresponding to `axis` (the first, by default). + axis : int, optional + The axis along which the arrays will be joined. Default is 0. + + Returns + ------- + result : MaskedArray + The concatenated array with any masked entries preserved. + + See Also + -------- + numpy.concatenate : Equivalent function in the top-level NumPy module. + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> a = ma.arange(3) + >>> a[1] = ma.masked + >>> b = ma.arange(2, 5) + >>> a + masked_array(data=[0, --, 2], + mask=[False, True, False], + fill_value=999999) + >>> b + masked_array(data=[2, 3, 4], + mask=False, + fill_value=999999) + >>> ma.concatenate([a, b]) + masked_array(data=[0, --, 2, 2, 3, 4], + mask=[False, True, False, False, False, False], + fill_value=999999) + + """ + d = np.concatenate([getdata(a) for a in arrays], axis) + rcls = get_masked_subclass(*arrays) + data = d.view(rcls) + # Check whether one of the arrays has a non-empty mask. + for x in arrays: + if getmask(x) is not nomask: + break + else: + return data + # OK, so we have to concatenate the masks + dm = np.concatenate([getmaskarray(a) for a in arrays], axis) + dm = dm.reshape(d.shape) + + # If we decide to keep a '_shrinkmask' option, we want to check that + # all of them are True, and then check for dm.any() + data._mask = _shrink_mask(dm) + return data + + +def diag(v, k=0): + """ + Extract a diagonal or construct a diagonal array. + + This function is the equivalent of `numpy.diag` that takes masked + values into account, see `numpy.diag` for details. + + See Also + -------- + numpy.diag : Equivalent function for ndarrays. + + Examples + -------- + >>> import numpy as np + + Create an array with negative values masked: + + >>> import numpy as np + >>> x = np.array([[11.2, -3.973, 18], [0.801, -1.41, 12], [7, 33, -12]]) + >>> masked_x = np.ma.masked_array(x, mask=x < 0) + >>> masked_x + masked_array( + data=[[11.2, --, 18.0], + [0.801, --, 12.0], + [7.0, 33.0, --]], + mask=[[False, True, False], + [False, True, False], + [False, False, True]], + fill_value=1e+20) + + Isolate the main diagonal from the masked array: + + >>> np.ma.diag(masked_x) + masked_array(data=[11.2, --, --], + mask=[False, True, True], + fill_value=1e+20) + + Isolate the first diagonal below the main diagonal: + + >>> np.ma.diag(masked_x, -1) + masked_array(data=[0.801, 33.0], + mask=[False, False], + fill_value=1e+20) + + """ + output = np.diag(v, k).view(MaskedArray) + if getmask(v) is not nomask: + output._mask = np.diag(v._mask, k) + return output + + +def left_shift(a, n): + """ + Shift the bits of an integer to the left. + + This is the masked array version of `numpy.left_shift`, for details + see that function. + + See Also + -------- + numpy.left_shift + + Examples + -------- + Shift with a masked array: + + >>> arr = np.ma.array([10, 20, 30], mask=[False, True, False]) + >>> np.ma.left_shift(arr, 1) + masked_array(data=[20, --, 60], + mask=[False, True, False], + fill_value=999999) + + Large shift: + + >>> np.ma.left_shift(10, 10) + masked_array(data=10240, + mask=False, + fill_value=999999) + + Shift with a scalar and an array: + + >>> scalar = 10 + >>> arr = np.ma.array([1, 2, 3], mask=[False, True, False]) + >>> np.ma.left_shift(scalar, arr) + masked_array(data=[20, --, 80], + mask=[False, True, False], + fill_value=999999) + + + """ + m = getmask(a) + if m is nomask: + d = umath.left_shift(filled(a), n) + return masked_array(d) + else: + d = umath.left_shift(filled(a, 0), n) + return masked_array(d, mask=m) + + +def right_shift(a, n): + """ + Shift the bits of an integer to the right. + + This is the masked array version of `numpy.right_shift`, for details + see that function. + + See Also + -------- + numpy.right_shift + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> x = [11, 3, 8, 1] + >>> mask = [0, 0, 0, 1] + >>> masked_x = ma.masked_array(x, mask) + >>> masked_x + masked_array(data=[11, 3, 8, --], + mask=[False, False, False, True], + fill_value=999999) + >>> ma.right_shift(masked_x,1) + masked_array(data=[5, 1, 4, --], + mask=[False, False, False, True], + fill_value=999999) + + """ + m = getmask(a) + if m is nomask: + d = umath.right_shift(filled(a), n) + return masked_array(d) + else: + d = umath.right_shift(filled(a, 0), n) + return masked_array(d, mask=m) + + +def put(a, indices, values, mode='raise'): + """ + Set storage-indexed locations to corresponding values. + + This function is equivalent to `MaskedArray.put`, see that method + for details. + + See Also + -------- + MaskedArray.put + + Examples + -------- + Putting values in a masked array: + + >>> a = np.ma.array([1, 2, 3, 4], mask=[False, True, False, False]) + >>> np.ma.put(a, [1, 3], [10, 30]) + >>> a + masked_array(data=[ 1, 10, 3, 30], + mask=False, + fill_value=999999) + + Using put with a 2D array: + + >>> b = np.ma.array([[1, 2], [3, 4]], mask=[[False, True], [False, False]]) + >>> np.ma.put(b, [[0, 1], [1, 0]], [[10, 20], [30, 40]]) + >>> b + masked_array( + data=[[40, 30], + [ 3, 4]], + mask=False, + fill_value=999999) + + """ + # We can't use 'frommethod', the order of arguments is different + try: + return a.put(indices, values, mode=mode) + except AttributeError: + return np.asarray(a).put(indices, values, mode=mode) + + +def putmask(a, mask, values): # , mode='raise'): + """ + Changes elements of an array based on conditional and input values. + + This is the masked array version of `numpy.putmask`, for details see + `numpy.putmask`. + + See Also + -------- + numpy.putmask + + Notes + ----- + Using a masked array as `values` will **not** transform a `ndarray` into + a `MaskedArray`. + + Examples + -------- + >>> import numpy as np + >>> arr = [[1, 2], [3, 4]] + >>> mask = [[1, 0], [0, 0]] + >>> x = np.ma.array(arr, mask=mask) + >>> np.ma.putmask(x, x < 4, 10*x) + >>> x + masked_array( + data=[[--, 20], + [30, 4]], + mask=[[ True, False], + [False, False]], + fill_value=999999) + >>> x.data + array([[10, 20], + [30, 4]]) + + """ + # We can't use 'frommethod', the order of arguments is different + if not isinstance(a, MaskedArray): + a = a.view(MaskedArray) + (valdata, valmask) = (getdata(values), getmask(values)) + if getmask(a) is nomask: + if valmask is not nomask: + a._sharedmask = True + a._mask = make_mask_none(a.shape, a.dtype) + np.copyto(a._mask, valmask, where=mask) + elif a._hardmask: + if valmask is not nomask: + m = a._mask.copy() + np.copyto(m, valmask, where=mask) + a.mask |= m + else: + if valmask is nomask: + valmask = getmaskarray(values) + np.copyto(a._mask, valmask, where=mask) + np.copyto(a._data, valdata, where=mask) + + +def transpose(a, axes=None): + """ + Permute the dimensions of an array. + + This function is exactly equivalent to `numpy.transpose`. + + See Also + -------- + numpy.transpose : Equivalent function in top-level NumPy module. + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> x = ma.arange(4).reshape((2,2)) + >>> x[1, 1] = ma.masked + >>> x + masked_array( + data=[[0, 1], + [2, --]], + mask=[[False, False], + [False, True]], + fill_value=999999) + + >>> ma.transpose(x) + masked_array( + data=[[0, 2], + [1, --]], + mask=[[False, False], + [False, True]], + fill_value=999999) + """ + # We can't use 'frommethod', as 'transpose' doesn't take keywords + try: + return a.transpose(axes) + except AttributeError: + return np.asarray(a).transpose(axes).view(MaskedArray) + + +def reshape(a, new_shape, order='C'): + """ + Returns an array containing the same data with a new shape. + + Refer to `MaskedArray.reshape` for full documentation. + + See Also + -------- + MaskedArray.reshape : equivalent function + + Examples + -------- + Reshaping a 1-D array: + + >>> a = np.ma.array([1, 2, 3, 4]) + >>> np.ma.reshape(a, (2, 2)) + masked_array( + data=[[1, 2], + [3, 4]], + mask=False, + fill_value=999999) + + Reshaping a 2-D array: + + >>> b = np.ma.array([[1, 2], [3, 4]]) + >>> np.ma.reshape(b, (1, 4)) + masked_array(data=[[1, 2, 3, 4]], + mask=False, + fill_value=999999) + + Reshaping a 1-D array with a mask: + + >>> c = np.ma.array([1, 2, 3, 4], mask=[False, True, False, False]) + >>> np.ma.reshape(c, (2, 2)) + masked_array( + data=[[1, --], + [3, 4]], + mask=[[False, True], + [False, False]], + fill_value=999999) + + """ + # We can't use 'frommethod', it whine about some parameters. Dmmit. + try: + return a.reshape(new_shape, order=order) + except AttributeError: + _tmp = np.asarray(a).reshape(new_shape, order=order) + return _tmp.view(MaskedArray) + + +def resize(x, new_shape): + """ + Return a new masked array with the specified size and shape. + + This is the masked equivalent of the `numpy.resize` function. The new + array is filled with repeated copies of `x` (in the order that the + data are stored in memory). If `x` is masked, the new array will be + masked, and the new mask will be a repetition of the old one. + + See Also + -------- + numpy.resize : Equivalent function in the top level NumPy module. + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> a = ma.array([[1, 2] ,[3, 4]]) + >>> a[0, 1] = ma.masked + >>> a + masked_array( + data=[[1, --], + [3, 4]], + mask=[[False, True], + [False, False]], + fill_value=999999) + >>> np.resize(a, (3, 3)) + masked_array( + data=[[1, 2, 3], + [4, 1, 2], + [3, 4, 1]], + mask=False, + fill_value=999999) + >>> ma.resize(a, (3, 3)) + masked_array( + data=[[1, --, 3], + [4, 1, --], + [3, 4, 1]], + mask=[[False, True, False], + [False, False, True], + [False, False, False]], + fill_value=999999) + + A MaskedArray is always returned, regardless of the input type. + + >>> a = np.array([[1, 2] ,[3, 4]]) + >>> ma.resize(a, (3, 3)) + masked_array( + data=[[1, 2, 3], + [4, 1, 2], + [3, 4, 1]], + mask=False, + fill_value=999999) + + """ + # We can't use _frommethods here, as N.resize is notoriously whiny. + m = getmask(x) + if m is not nomask: + m = np.resize(m, new_shape) + result = np.resize(x, new_shape).view(get_masked_subclass(x)) + if result.ndim: + result._mask = m + return result + + +def ndim(obj): + """ + maskedarray version of the numpy function. + + """ + return np.ndim(getdata(obj)) + + +ndim.__doc__ = np.ndim.__doc__ + + +def shape(obj): + "maskedarray version of the numpy function." + return np.shape(getdata(obj)) + + +shape.__doc__ = np.shape.__doc__ + + +def size(obj, axis=None): + "maskedarray version of the numpy function." + return np.size(getdata(obj), axis) + + +size.__doc__ = np.size.__doc__ + + +def diff(a, /, n=1, axis=-1, prepend=np._NoValue, append=np._NoValue): + """ + Calculate the n-th discrete difference along the given axis. + The first difference is given by ``out[i] = a[i+1] - a[i]`` along + the given axis, higher differences are calculated by using `diff` + recursively. + Preserves the input mask. + + Parameters + ---------- + a : array_like + Input array + n : int, optional + The number of times values are differenced. If zero, the input + is returned as-is. + axis : int, optional + The axis along which the difference is taken, default is the + last axis. + prepend, append : array_like, optional + Values to prepend or append to `a` along axis prior to + performing the difference. Scalar values are expanded to + arrays with length 1 in the direction of axis and the shape + of the input array in along all other axes. Otherwise the + dimension and shape must match `a` except along axis. + + Returns + ------- + diff : MaskedArray + The n-th differences. The shape of the output is the same as `a` + except along `axis` where the dimension is smaller by `n`. The + type of the output is the same as the type of the difference + between any two elements of `a`. This is the same as the type of + `a` in most cases. A notable exception is `datetime64`, which + results in a `timedelta64` output array. + + See Also + -------- + numpy.diff : Equivalent function in the top-level NumPy module. + + Notes + ----- + Type is preserved for boolean arrays, so the result will contain + `False` when consecutive elements are the same and `True` when they + differ. + + For unsigned integer arrays, the results will also be unsigned. This + should not be surprising, as the result is consistent with + calculating the difference directly: + + >>> u8_arr = np.array([1, 0], dtype=np.uint8) + >>> np.ma.diff(u8_arr) + masked_array(data=[255], + mask=False, + fill_value=np.uint64(999999), + dtype=uint8) + >>> u8_arr[1,...] - u8_arr[0,...] + np.uint8(255) + + If this is not desirable, then the array should be cast to a larger + integer type first: + + >>> i16_arr = u8_arr.astype(np.int16) + >>> np.ma.diff(i16_arr) + masked_array(data=[-1], + mask=False, + fill_value=np.int64(999999), + dtype=int16) + + Examples + -------- + >>> import numpy as np + >>> a = np.array([1, 2, 3, 4, 7, 0, 2, 3]) + >>> x = np.ma.masked_where(a < 2, a) + >>> np.ma.diff(x) + masked_array(data=[--, 1, 1, 3, --, --, 1], + mask=[ True, False, False, False, True, True, False], + fill_value=999999) + + >>> np.ma.diff(x, n=2) + masked_array(data=[--, 0, 2, --, --, --], + mask=[ True, False, False, True, True, True], + fill_value=999999) + + >>> a = np.array([[1, 3, 1, 5, 10], [0, 1, 5, 6, 8]]) + >>> x = np.ma.masked_equal(a, value=1) + >>> np.ma.diff(x) + masked_array( + data=[[--, --, --, 5], + [--, --, 1, 2]], + mask=[[ True, True, True, False], + [ True, True, False, False]], + fill_value=1) + + >>> np.ma.diff(x, axis=0) + masked_array(data=[[--, --, --, 1, -2]], + mask=[[ True, True, True, False, False]], + fill_value=1) + + """ + if n == 0: + return a + if n < 0: + raise ValueError("order must be non-negative but got " + repr(n)) + + a = np.ma.asanyarray(a) + if a.ndim == 0: + raise ValueError( + "diff requires input that is at least one dimensional" + ) + + combined = [] + if prepend is not np._NoValue: + prepend = np.ma.asanyarray(prepend) + if prepend.ndim == 0: + shape = list(a.shape) + shape[axis] = 1 + prepend = np.broadcast_to(prepend, tuple(shape)) + combined.append(prepend) + + combined.append(a) + + if append is not np._NoValue: + append = np.ma.asanyarray(append) + if append.ndim == 0: + shape = list(a.shape) + shape[axis] = 1 + append = np.broadcast_to(append, tuple(shape)) + combined.append(append) + + if len(combined) > 1: + a = np.ma.concatenate(combined, axis) + + # GH 22465 np.diff without prepend/append preserves the mask + return np.diff(a, n, axis) + + +############################################################################## +# Extra functions # +############################################################################## + + +def where(condition, x=_NoValue, y=_NoValue): + """ + Return a masked array with elements from `x` or `y`, depending on condition. + + .. note:: + When only `condition` is provided, this function is identical to + `nonzero`. The rest of this documentation covers only the case where + all three arguments are provided. + + Parameters + ---------- + condition : array_like, bool + Where True, yield `x`, otherwise yield `y`. + x, y : array_like, optional + Values from which to choose. `x`, `y` and `condition` need to be + broadcastable to some shape. + + Returns + ------- + out : MaskedArray + An masked array with `masked` elements where the condition is masked, + elements from `x` where `condition` is True, and elements from `y` + elsewhere. + + See Also + -------- + numpy.where : Equivalent function in the top-level NumPy module. + nonzero : The function that is called when x and y are omitted + + Examples + -------- + >>> import numpy as np + >>> x = np.ma.array(np.arange(9.).reshape(3, 3), mask=[[0, 1, 0], + ... [1, 0, 1], + ... [0, 1, 0]]) + >>> x + masked_array( + data=[[0.0, --, 2.0], + [--, 4.0, --], + [6.0, --, 8.0]], + mask=[[False, True, False], + [ True, False, True], + [False, True, False]], + fill_value=1e+20) + >>> np.ma.where(x > 5, x, -3.1416) + masked_array( + data=[[-3.1416, --, -3.1416], + [--, -3.1416, --], + [6.0, --, 8.0]], + mask=[[False, True, False], + [ True, False, True], + [False, True, False]], + fill_value=1e+20) + + """ + + # handle the single-argument case + missing = (x is _NoValue, y is _NoValue).count(True) + if missing == 1: + raise ValueError("Must provide both 'x' and 'y' or neither.") + if missing == 2: + return nonzero(condition) + + # we only care if the condition is true - false or masked pick y + cf = filled(condition, False) + xd = getdata(x) + yd = getdata(y) + + # we need the full arrays here for correct final dimensions + cm = getmaskarray(condition) + xm = getmaskarray(x) + ym = getmaskarray(y) + + # deal with the fact that masked.dtype == float64, but we don't actually + # want to treat it as that. + if x is masked and y is not masked: + xd = np.zeros((), dtype=yd.dtype) + xm = np.ones((), dtype=ym.dtype) + elif y is masked and x is not masked: + yd = np.zeros((), dtype=xd.dtype) + ym = np.ones((), dtype=xm.dtype) + + data = np.where(cf, xd, yd) + mask = np.where(cf, xm, ym) + mask = np.where(cm, np.ones((), dtype=mask.dtype), mask) + + # collapse the mask, for backwards compatibility + mask = _shrink_mask(mask) + + return masked_array(data, mask=mask) + + +def choose(indices, choices, out=None, mode='raise'): + """ + Use an index array to construct a new array from a list of choices. + + Given an array of integers and a list of n choice arrays, this method + will create a new array that merges each of the choice arrays. Where a + value in `index` is i, the new array will have the value that choices[i] + contains in the same place. + + Parameters + ---------- + indices : ndarray of ints + This array must contain integers in ``[0, n-1]``, where n is the + number of choices. + choices : sequence of arrays + Choice arrays. The index array and all of the choices should be + broadcastable to the same shape. + out : array, optional + If provided, the result will be inserted into this array. It should + be of the appropriate shape and `dtype`. + mode : {'raise', 'wrap', 'clip'}, optional + Specifies how out-of-bounds indices will behave. + + * 'raise' : raise an error + * 'wrap' : wrap around + * 'clip' : clip to the range + + Returns + ------- + merged_array : array + + See Also + -------- + choose : equivalent function + + Examples + -------- + >>> import numpy as np + >>> choice = np.array([[1,1,1], [2,2,2], [3,3,3]]) + >>> a = np.array([2, 1, 0]) + >>> np.ma.choose(a, choice) + masked_array(data=[3, 2, 1], + mask=False, + fill_value=999999) + + """ + def fmask(x): + "Returns the filled array, or True if masked." + if x is masked: + return True + return filled(x) + + def nmask(x): + "Returns the mask, True if ``masked``, False if ``nomask``." + if x is masked: + return True + return getmask(x) + # Get the indices. + c = filled(indices, 0) + # Get the masks. + masks = [nmask(x) for x in choices] + data = [fmask(x) for x in choices] + # Construct the mask + outputmask = np.choose(c, masks, mode=mode) + outputmask = make_mask(mask_or(outputmask, getmask(indices)), + copy=False, shrink=True) + # Get the choices. + d = np.choose(c, data, mode=mode, out=out).view(MaskedArray) + if out is not None: + if isinstance(out, MaskedArray): + out.__setmask__(outputmask) + return out + d.__setmask__(outputmask) + return d + + +def round_(a, decimals=0, out=None): + """ + Return a copy of a, rounded to 'decimals' places. + + When 'decimals' is negative, it specifies the number of positions + to the left of the decimal point. The real and imaginary parts of + complex numbers are rounded separately. Nothing is done if the + array is not of float type and 'decimals' is greater than or equal + to 0. + + Parameters + ---------- + decimals : int + Number of decimals to round to. May be negative. + out : array_like + Existing array to use for output. + If not given, returns a default copy of a. + + Notes + ----- + If out is given and does not have a mask attribute, the mask of a + is lost! + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> x = [11.2, -3.973, 0.801, -1.41] + >>> mask = [0, 0, 0, 1] + >>> masked_x = ma.masked_array(x, mask) + >>> masked_x + masked_array(data=[11.2, -3.973, 0.801, --], + mask=[False, False, False, True], + fill_value=1e+20) + >>> ma.round_(masked_x) + masked_array(data=[11.0, -4.0, 1.0, --], + mask=[False, False, False, True], + fill_value=1e+20) + >>> ma.round(masked_x, decimals=1) + masked_array(data=[11.2, -4.0, 0.8, --], + mask=[False, False, False, True], + fill_value=1e+20) + >>> ma.round_(masked_x, decimals=-1) + masked_array(data=[10.0, -0.0, 0.0, --], + mask=[False, False, False, True], + fill_value=1e+20) + """ + if out is None: + return np.round(a, decimals, out) + else: + np.round(getdata(a), decimals, out) + if hasattr(out, '_mask'): + out._mask = getmask(a) + return out + + +round = round_ + + +def _mask_propagate(a, axis): + """ + Mask whole 1-d vectors of an array that contain masked values. + """ + a = array(a, subok=False) + m = getmask(a) + if m is nomask or not m.any() or axis is None: + return a + a._mask = a._mask.copy() + axes = normalize_axis_tuple(axis, a.ndim) + for ax in axes: + a._mask |= m.any(axis=ax, keepdims=True) + return a + + +# Include masked dot here to avoid import problems in getting it from +# extras.py. Note that it is not included in __all__, but rather exported +# from extras in order to avoid backward compatibility problems. +def dot(a, b, strict=False, out=None): + """ + Return the dot product of two arrays. + + This function is the equivalent of `numpy.dot` that takes masked values + into account. Note that `strict` and `out` are in different position + than in the method version. In order to maintain compatibility with the + corresponding method, it is recommended that the optional arguments be + treated as keyword only. At some point that may be mandatory. + + Parameters + ---------- + a, b : masked_array_like + Inputs arrays. + strict : bool, optional + Whether masked data are propagated (True) or set to 0 (False) for + the computation. Default is False. Propagating the mask means that + if a masked value appears in a row or column, the whole row or + column is considered masked. + out : masked_array, optional + Output argument. This must have the exact kind that would be returned + if it was not used. In particular, it must have the right type, must be + C-contiguous, and its dtype must be the dtype that would be returned + for `dot(a,b)`. This is a performance feature. Therefore, if these + conditions are not met, an exception is raised, instead of attempting + to be flexible. + + See Also + -------- + numpy.dot : Equivalent function for ndarrays. + + Examples + -------- + >>> import numpy as np + >>> a = np.ma.array([[1, 2, 3], [4, 5, 6]], mask=[[1, 0, 0], [0, 0, 0]]) + >>> b = np.ma.array([[1, 2], [3, 4], [5, 6]], mask=[[1, 0], [0, 0], [0, 0]]) + >>> np.ma.dot(a, b) + masked_array( + data=[[21, 26], + [45, 64]], + mask=[[False, False], + [False, False]], + fill_value=999999) + >>> np.ma.dot(a, b, strict=True) + masked_array( + data=[[--, --], + [--, 64]], + mask=[[ True, True], + [ True, False]], + fill_value=999999) + + """ + if strict is True: + if np.ndim(a) == 0 or np.ndim(b) == 0: + pass + elif b.ndim == 1: + a = _mask_propagate(a, a.ndim - 1) + b = _mask_propagate(b, b.ndim - 1) + else: + a = _mask_propagate(a, a.ndim - 1) + b = _mask_propagate(b, b.ndim - 2) + am = ~getmaskarray(a) + bm = ~getmaskarray(b) + + if out is None: + d = np.dot(filled(a, 0), filled(b, 0)) + m = ~np.dot(am, bm) + if np.ndim(d) == 0: + d = np.asarray(d) + r = d.view(get_masked_subclass(a, b)) + r.__setmask__(m) + return r + else: + d = np.dot(filled(a, 0), filled(b, 0), out._data) + if out.mask.shape != d.shape: + out._mask = np.empty(d.shape, MaskType) + np.dot(am, bm, out._mask) + np.logical_not(out._mask, out._mask) + return out + + +def inner(a, b): + """ + Returns the inner product of a and b for arrays of floating point types. + + Like the generic NumPy equivalent the product sum is over the last dimension + of a and b. The first argument is not conjugated. + + """ + fa = filled(a, 0) + fb = filled(b, 0) + if fa.ndim == 0: + fa.shape = (1,) + if fb.ndim == 0: + fb.shape = (1,) + return np.inner(fa, fb).view(MaskedArray) + + +inner.__doc__ = doc_note(np.inner.__doc__, + "Masked values are replaced by 0.") +innerproduct = inner + + +def outer(a, b): + "maskedarray version of the numpy function." + fa = filled(a, 0).ravel() + fb = filled(b, 0).ravel() + d = np.outer(fa, fb) + ma = getmask(a) + mb = getmask(b) + if ma is nomask and mb is nomask: + return masked_array(d) + ma = getmaskarray(a) + mb = getmaskarray(b) + m = make_mask(1 - np.outer(1 - ma, 1 - mb), copy=False) + return masked_array(d, mask=m) + + +outer.__doc__ = doc_note(np.outer.__doc__, + "Masked values are replaced by 0.") +outerproduct = outer + + +def _convolve_or_correlate(f, a, v, mode, propagate_mask): + """ + Helper function for ma.correlate and ma.convolve + """ + if propagate_mask: + # results which are contributed to by either item in any pair being invalid + mask = ( + f(getmaskarray(a), np.ones(np.shape(v), dtype=bool), mode=mode) + | f(np.ones(np.shape(a), dtype=bool), getmaskarray(v), mode=mode) + ) + data = f(getdata(a), getdata(v), mode=mode) + else: + # results which are not contributed to by any pair of valid elements + mask = ~f(~getmaskarray(a), ~getmaskarray(v), mode=mode) + data = f(filled(a, 0), filled(v, 0), mode=mode) + + return masked_array(data, mask=mask) + + +def correlate(a, v, mode='valid', propagate_mask=True): + """ + Cross-correlation of two 1-dimensional sequences. + + Parameters + ---------- + a, v : array_like + Input sequences. + mode : {'valid', 'same', 'full'}, optional + Refer to the `np.convolve` docstring. Note that the default + is 'valid', unlike `convolve`, which uses 'full'. + propagate_mask : bool + If True, then a result element is masked if any masked element contributes + towards it. If False, then a result element is only masked if no non-masked + element contribute towards it + + Returns + ------- + out : MaskedArray + Discrete cross-correlation of `a` and `v`. + + See Also + -------- + numpy.correlate : Equivalent function in the top-level NumPy module. + + Examples + -------- + Basic correlation: + + >>> a = np.ma.array([1, 2, 3]) + >>> v = np.ma.array([0, 1, 0]) + >>> np.ma.correlate(a, v, mode='valid') + masked_array(data=[2], + mask=[False], + fill_value=999999) + + Correlation with masked elements: + + >>> a = np.ma.array([1, 2, 3], mask=[False, True, False]) + >>> v = np.ma.array([0, 1, 0]) + >>> np.ma.correlate(a, v, mode='valid', propagate_mask=True) + masked_array(data=[--], + mask=[ True], + fill_value=999999, + dtype=int64) + + Correlation with different modes and mixed array types: + + >>> a = np.ma.array([1, 2, 3]) + >>> v = np.ma.array([0, 1, 0]) + >>> np.ma.correlate(a, v, mode='full') + masked_array(data=[0, 1, 2, 3, 0], + mask=[False, False, False, False, False], + fill_value=999999) + + """ + return _convolve_or_correlate(np.correlate, a, v, mode, propagate_mask) + + +def convolve(a, v, mode='full', propagate_mask=True): + """ + Returns the discrete, linear convolution of two one-dimensional sequences. + + Parameters + ---------- + a, v : array_like + Input sequences. + mode : {'valid', 'same', 'full'}, optional + Refer to the `np.convolve` docstring. + propagate_mask : bool + If True, then if any masked element is included in the sum for a result + element, then the result is masked. + If False, then the result element is only masked if no non-masked cells + contribute towards it + + Returns + ------- + out : MaskedArray + Discrete, linear convolution of `a` and `v`. + + See Also + -------- + numpy.convolve : Equivalent function in the top-level NumPy module. + """ + return _convolve_or_correlate(np.convolve, a, v, mode, propagate_mask) + + +def allequal(a, b, fill_value=True): + """ + Return True if all entries of a and b are equal, using + fill_value as a truth value where either or both are masked. + + Parameters + ---------- + a, b : array_like + Input arrays to compare. + fill_value : bool, optional + Whether masked values in a or b are considered equal (True) or not + (False). + + Returns + ------- + y : bool + Returns True if the two arrays are equal within the given + tolerance, False otherwise. If either array contains NaN, + then False is returned. + + See Also + -------- + all, any + numpy.ma.allclose + + Examples + -------- + >>> import numpy as np + >>> a = np.ma.array([1e10, 1e-7, 42.0], mask=[0, 0, 1]) + >>> a + masked_array(data=[10000000000.0, 1e-07, --], + mask=[False, False, True], + fill_value=1e+20) + + >>> b = np.array([1e10, 1e-7, -42.0]) + >>> b + array([ 1.00000000e+10, 1.00000000e-07, -4.20000000e+01]) + >>> np.ma.allequal(a, b, fill_value=False) + False + >>> np.ma.allequal(a, b) + True + + """ + m = mask_or(getmask(a), getmask(b)) + if m is nomask: + x = getdata(a) + y = getdata(b) + d = umath.equal(x, y) + return d.all() + elif fill_value: + x = getdata(a) + y = getdata(b) + d = umath.equal(x, y) + dm = array(d, mask=m, copy=False) + return dm.filled(True).all(None) + else: + return False + + +def allclose(a, b, masked_equal=True, rtol=1e-5, atol=1e-8): + """ + Returns True if two arrays are element-wise equal within a tolerance. + + This function is equivalent to `allclose` except that masked values + are treated as equal (default) or unequal, depending on the `masked_equal` + argument. + + Parameters + ---------- + a, b : array_like + Input arrays to compare. + masked_equal : bool, optional + Whether masked values in `a` and `b` are considered equal (True) or not + (False). They are considered equal by default. + rtol : float, optional + Relative tolerance. The relative difference is equal to ``rtol * b``. + Default is 1e-5. + atol : float, optional + Absolute tolerance. The absolute difference is equal to `atol`. + Default is 1e-8. + + Returns + ------- + y : bool + Returns True if the two arrays are equal within the given + tolerance, False otherwise. If either array contains NaN, then + False is returned. + + See Also + -------- + all, any + numpy.allclose : the non-masked `allclose`. + + Notes + ----- + If the following equation is element-wise True, then `allclose` returns + True:: + + absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`)) + + Return True if all elements of `a` and `b` are equal subject to + given tolerances. + + Examples + -------- + >>> import numpy as np + >>> a = np.ma.array([1e10, 1e-7, 42.0], mask=[0, 0, 1]) + >>> a + masked_array(data=[10000000000.0, 1e-07, --], + mask=[False, False, True], + fill_value=1e+20) + >>> b = np.ma.array([1e10, 1e-8, -42.0], mask=[0, 0, 1]) + >>> np.ma.allclose(a, b) + False + + >>> a = np.ma.array([1e10, 1e-8, 42.0], mask=[0, 0, 1]) + >>> b = np.ma.array([1.00001e10, 1e-9, -42.0], mask=[0, 0, 1]) + >>> np.ma.allclose(a, b) + True + >>> np.ma.allclose(a, b, masked_equal=False) + False + + Masked values are not compared directly. + + >>> a = np.ma.array([1e10, 1e-8, 42.0], mask=[0, 0, 1]) + >>> b = np.ma.array([1.00001e10, 1e-9, 42.0], mask=[0, 0, 1]) + >>> np.ma.allclose(a, b) + True + >>> np.ma.allclose(a, b, masked_equal=False) + False + + """ + x = masked_array(a, copy=False) + y = masked_array(b, copy=False) + + # make sure y is an inexact type to avoid abs(MIN_INT); will cause + # casting of x later. + # NOTE: We explicitly allow timedelta, which used to work. This could + # possibly be deprecated. See also gh-18286. + # timedelta works if `atol` is an integer or also a timedelta. + # Although, the default tolerances are unlikely to be useful + if y.dtype.kind != "m": + dtype = np.result_type(y, 1.) + if y.dtype != dtype: + y = masked_array(y, dtype=dtype, copy=False) + + m = mask_or(getmask(x), getmask(y)) + xinf = np.isinf(masked_array(x, copy=False, mask=m)).filled(False) + # If we have some infs, they should fall at the same place. + if not np.all(xinf == filled(np.isinf(y), False)): + return False + # No infs at all + if not np.any(xinf): + d = filled(less_equal(absolute(x - y), atol + rtol * absolute(y)), + masked_equal) + return np.all(d) + + if not np.all(filled(x[xinf] == y[xinf], masked_equal)): + return False + x = x[~xinf] + y = y[~xinf] + + d = filled(less_equal(absolute(x - y), atol + rtol * absolute(y)), + masked_equal) + + return np.all(d) + + +def asarray(a, dtype=None, order=None): + """ + Convert the input to a masked array of the given data-type. + + No copy is performed if the input is already an `ndarray`. If `a` is + a subclass of `MaskedArray`, a base class `MaskedArray` is returned. + + Parameters + ---------- + a : array_like + Input data, in any form that can be converted to a masked array. This + includes lists, lists of tuples, tuples, tuples of tuples, tuples + of lists, ndarrays and masked arrays. + dtype : dtype, optional + By default, the data-type is inferred from the input data. + order : {'C', 'F'}, optional + Whether to use row-major ('C') or column-major ('FORTRAN') memory + representation. Default is 'C'. + + Returns + ------- + out : MaskedArray + Masked array interpretation of `a`. + + See Also + -------- + asanyarray : Similar to `asarray`, but conserves subclasses. + + Examples + -------- + >>> import numpy as np + >>> x = np.arange(10.).reshape(2, 5) + >>> x + array([[0., 1., 2., 3., 4.], + [5., 6., 7., 8., 9.]]) + >>> np.ma.asarray(x) + masked_array( + data=[[0., 1., 2., 3., 4.], + [5., 6., 7., 8., 9.]], + mask=False, + fill_value=1e+20) + >>> type(np.ma.asarray(x)) + + + """ + order = order or 'C' + return masked_array(a, dtype=dtype, copy=False, keep_mask=True, + subok=False, order=order) + + +def asanyarray(a, dtype=None): + """ + Convert the input to a masked array, conserving subclasses. + + If `a` is a subclass of `MaskedArray`, its class is conserved. + No copy is performed if the input is already an `ndarray`. + + Parameters + ---------- + a : array_like + Input data, in any form that can be converted to an array. + dtype : dtype, optional + By default, the data-type is inferred from the input data. + order : {'C', 'F'}, optional + Whether to use row-major ('C') or column-major ('FORTRAN') memory + representation. Default is 'C'. + + Returns + ------- + out : MaskedArray + MaskedArray interpretation of `a`. + + See Also + -------- + asarray : Similar to `asanyarray`, but does not conserve subclass. + + Examples + -------- + >>> import numpy as np + >>> x = np.arange(10.).reshape(2, 5) + >>> x + array([[0., 1., 2., 3., 4.], + [5., 6., 7., 8., 9.]]) + >>> np.ma.asanyarray(x) + masked_array( + data=[[0., 1., 2., 3., 4.], + [5., 6., 7., 8., 9.]], + mask=False, + fill_value=1e+20) + >>> type(np.ma.asanyarray(x)) + + + """ + # workaround for #8666, to preserve identity. Ideally the bottom line + # would handle this for us. + if isinstance(a, MaskedArray) and (dtype is None or dtype == a.dtype): + return a + return masked_array(a, dtype=dtype, copy=False, keep_mask=True, subok=True) + + +############################################################################## +# Pickling # +############################################################################## + + +def fromfile(file, dtype=float, count=-1, sep=''): + raise NotImplementedError( + "fromfile() not yet implemented for a MaskedArray.") + + +def fromflex(fxarray): + """ + Build a masked array from a suitable flexible-type array. + + The input array has to have a data-type with ``_data`` and ``_mask`` + fields. This type of array is output by `MaskedArray.toflex`. + + Parameters + ---------- + fxarray : ndarray + The structured input array, containing ``_data`` and ``_mask`` + fields. If present, other fields are discarded. + + Returns + ------- + result : MaskedArray + The constructed masked array. + + See Also + -------- + MaskedArray.toflex : Build a flexible-type array from a masked array. + + Examples + -------- + >>> import numpy as np + >>> x = np.ma.array(np.arange(9).reshape(3, 3), mask=[0] + [1, 0] * 4) + >>> rec = x.toflex() + >>> rec + array([[(0, False), (1, True), (2, False)], + [(3, True), (4, False), (5, True)], + [(6, False), (7, True), (8, False)]], + dtype=[('_data', '>> x2 = np.ma.fromflex(rec) + >>> x2 + masked_array( + data=[[0, --, 2], + [--, 4, --], + [6, --, 8]], + mask=[[False, True, False], + [ True, False, True], + [False, True, False]], + fill_value=999999) + + Extra fields can be present in the structured array but are discarded: + + >>> dt = [('_data', '>> rec2 = np.zeros((2, 2), dtype=dt) + >>> rec2 + array([[(0, False, 0.), (0, False, 0.)], + [(0, False, 0.), (0, False, 0.)]], + dtype=[('_data', '>> y = np.ma.fromflex(rec2) + >>> y + masked_array( + data=[[0, 0], + [0, 0]], + mask=[[False, False], + [False, False]], + fill_value=np.int64(999999), + dtype=int32) + + """ + return masked_array(fxarray['_data'], mask=fxarray['_mask']) + + +class _convert2ma: + + """ + Convert functions from numpy to numpy.ma. + + Parameters + ---------- + _methodname : string + Name of the method to transform. + + """ + __doc__ = None + + def __init__(self, funcname, np_ret, np_ma_ret, params=None): + self._func = getattr(np, funcname) + self.__doc__ = self.getdoc(np_ret, np_ma_ret) + self._extras = params or {} + + def getdoc(self, np_ret, np_ma_ret): + "Return the doc of the function (from the doc of the method)." + doc = getattr(self._func, '__doc__', None) + sig = get_object_signature(self._func) + if doc: + doc = self._replace_return_type(doc, np_ret, np_ma_ret) + # Add the signature of the function at the beginning of the doc + if sig: + sig = f"{self._func.__name__}{sig}\n" + doc = sig + doc + return doc + + def _replace_return_type(self, doc, np_ret, np_ma_ret): + """ + Replace documentation of ``np`` function's return type. + + Replaces it with the proper type for the ``np.ma`` function. + + Parameters + ---------- + doc : str + The documentation of the ``np`` method. + np_ret : str + The return type string of the ``np`` method that we want to + replace. (e.g. "out : ndarray") + np_ma_ret : str + The return type string of the ``np.ma`` method. + (e.g. "out : MaskedArray") + """ + if np_ret not in doc: + raise RuntimeError( + f"Failed to replace `{np_ret}` with `{np_ma_ret}`. " + f"The documentation string for return type, {np_ret}, is not " + f"found in the docstring for `np.{self._func.__name__}`. " + f"Fix the docstring for `np.{self._func.__name__}` or " + "update the expected string for return type." + ) + + return doc.replace(np_ret, np_ma_ret) + + def __call__(self, *args, **params): + # Find the common parameters to the call and the definition + _extras = self._extras + common_params = set(params).intersection(_extras) + # Drop the common parameters from the call + for p in common_params: + _extras[p] = params.pop(p) + # Get the result + result = self._func.__call__(*args, **params).view(MaskedArray) + if "fill_value" in common_params: + result.fill_value = _extras.get("fill_value", None) + if "hardmask" in common_params: + result._hardmask = bool(_extras.get("hard_mask", False)) + return result + + +arange = _convert2ma( + 'arange', + params={'fill_value': None, 'hardmask': False}, + np_ret='arange : ndarray', + np_ma_ret='arange : MaskedArray', +) +clip = _convert2ma( + 'clip', + params={'fill_value': None, 'hardmask': False}, + np_ret='clipped_array : ndarray', + np_ma_ret='clipped_array : MaskedArray', +) +empty = _convert2ma( + 'empty', + params={'fill_value': None, 'hardmask': False}, + np_ret='out : ndarray', + np_ma_ret='out : MaskedArray', +) +empty_like = _convert2ma( + 'empty_like', + np_ret='out : ndarray', + np_ma_ret='out : MaskedArray', +) +frombuffer = _convert2ma( + 'frombuffer', + np_ret='out : ndarray', + np_ma_ret='out: MaskedArray', +) +fromfunction = _convert2ma( + 'fromfunction', + np_ret='fromfunction : any', + np_ma_ret='fromfunction: MaskedArray', +) +identity = _convert2ma( + 'identity', + params={'fill_value': None, 'hardmask': False}, + np_ret='out : ndarray', + np_ma_ret='out : MaskedArray', +) +indices = _convert2ma( + 'indices', + params={'fill_value': None, 'hardmask': False}, + np_ret='grid : one ndarray or tuple of ndarrays', + np_ma_ret='grid : one MaskedArray or tuple of MaskedArrays', +) +ones = _convert2ma( + 'ones', + params={'fill_value': None, 'hardmask': False}, + np_ret='out : ndarray', + np_ma_ret='out : MaskedArray', +) +ones_like = _convert2ma( + 'ones_like', + np_ret='out : ndarray', + np_ma_ret='out : MaskedArray', +) +squeeze = _convert2ma( + 'squeeze', + params={'fill_value': None, 'hardmask': False}, + np_ret='squeezed : ndarray', + np_ma_ret='squeezed : MaskedArray', +) +zeros = _convert2ma( + 'zeros', + params={'fill_value': None, 'hardmask': False}, + np_ret='out : ndarray', + np_ma_ret='out : MaskedArray', +) +zeros_like = _convert2ma( + 'zeros_like', + np_ret='out : ndarray', + np_ma_ret='out : MaskedArray', +) + + +def append(a, b, axis=None): + """Append values to the end of an array. + + Parameters + ---------- + a : array_like + Values are appended to a copy of this array. + b : array_like + These values are appended to a copy of `a`. It must be of the + correct shape (the same shape as `a`, excluding `axis`). If `axis` + is not specified, `b` can be any shape and will be flattened + before use. + axis : int, optional + The axis along which `v` are appended. If `axis` is not given, + both `a` and `b` are flattened before use. + + Returns + ------- + append : MaskedArray + A copy of `a` with `b` appended to `axis`. Note that `append` + does not occur in-place: a new array is allocated and filled. If + `axis` is None, the result is a flattened array. + + See Also + -------- + numpy.append : Equivalent function in the top-level NumPy module. + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> a = ma.masked_values([1, 2, 3], 2) + >>> b = ma.masked_values([[4, 5, 6], [7, 8, 9]], 7) + >>> ma.append(a, b) + masked_array(data=[1, --, 3, 4, 5, 6, --, 8, 9], + mask=[False, True, False, False, False, False, True, False, + False], + fill_value=999999) + """ + return concatenate([a, b], axis) diff --git a/.venv/lib/python3.12/site-packages/numpy/ma/core.pyi b/.venv/lib/python3.12/site-packages/numpy/ma/core.pyi new file mode 100644 index 0000000..089469d --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/ma/core.pyi @@ -0,0 +1,1462 @@ +# pyright: reportIncompatibleMethodOverride=false +# ruff: noqa: ANN001, ANN002, ANN003, ANN201, ANN202 ANN204, ANN401 + +from collections.abc import Sequence +from typing import Any, Literal, Self, SupportsIndex, TypeAlias, overload + +from _typeshed import Incomplete +from typing_extensions import TypeIs, TypeVar + +import numpy as np +from numpy import ( + _HasDTypeWithRealAndImag, + _ModeKind, + _OrderKACF, + _PartitionKind, + _SortKind, + amax, + amin, + bool_, + bytes_, + character, + complexfloating, + datetime64, + dtype, + dtypes, + expand_dims, + float64, + floating, + generic, + int_, + integer, + intp, + ndarray, + object_, + str_, + timedelta64, +) +from numpy._globals import _NoValueType +from numpy._typing import ( + ArrayLike, + NDArray, + _AnyShape, + _ArrayLike, + _ArrayLikeBool_co, + _ArrayLikeBytes_co, + _ArrayLikeComplex_co, + _ArrayLikeFloat_co, + _ArrayLikeInt, + _ArrayLikeInt_co, + _ArrayLikeStr_co, + _ArrayLikeString_co, + _ArrayLikeTD64_co, + _DTypeLikeBool, + _IntLike_co, + _ScalarLike_co, + _Shape, + _ShapeLike, +) + +__all__ = [ + "MAError", + "MaskError", + "MaskType", + "MaskedArray", + "abs", + "absolute", + "add", + "all", + "allclose", + "allequal", + "alltrue", + "amax", + "amin", + "angle", + "anom", + "anomalies", + "any", + "append", + "arange", + "arccos", + "arccosh", + "arcsin", + "arcsinh", + "arctan", + "arctan2", + "arctanh", + "argmax", + "argmin", + "argsort", + "around", + "array", + "asanyarray", + "asarray", + "bitwise_and", + "bitwise_or", + "bitwise_xor", + "bool_", + "ceil", + "choose", + "clip", + "common_fill_value", + "compress", + "compressed", + "concatenate", + "conjugate", + "convolve", + "copy", + "correlate", + "cos", + "cosh", + "count", + "cumprod", + "cumsum", + "default_fill_value", + "diag", + "diagonal", + "diff", + "divide", + "empty", + "empty_like", + "equal", + "exp", + "expand_dims", + "fabs", + "filled", + "fix_invalid", + "flatten_mask", + "flatten_structured_array", + "floor", + "floor_divide", + "fmod", + "frombuffer", + "fromflex", + "fromfunction", + "getdata", + "getmask", + "getmaskarray", + "greater", + "greater_equal", + "harden_mask", + "hypot", + "identity", + "ids", + "indices", + "inner", + "innerproduct", + "isMA", + "isMaskedArray", + "is_mask", + "is_masked", + "isarray", + "left_shift", + "less", + "less_equal", + "log", + "log2", + "log10", + "logical_and", + "logical_not", + "logical_or", + "logical_xor", + "make_mask", + "make_mask_descr", + "make_mask_none", + "mask_or", + "masked", + "masked_array", + "masked_equal", + "masked_greater", + "masked_greater_equal", + "masked_inside", + "masked_invalid", + "masked_less", + "masked_less_equal", + "masked_not_equal", + "masked_object", + "masked_outside", + "masked_print_option", + "masked_singleton", + "masked_values", + "masked_where", + "max", + "maximum", + "maximum_fill_value", + "mean", + "min", + "minimum", + "minimum_fill_value", + "mod", + "multiply", + "mvoid", + "ndim", + "negative", + "nomask", + "nonzero", + "not_equal", + "ones", + "ones_like", + "outer", + "outerproduct", + "power", + "prod", + "product", + "ptp", + "put", + "putmask", + "ravel", + "remainder", + "repeat", + "reshape", + "resize", + "right_shift", + "round", + "round_", + "set_fill_value", + "shape", + "sin", + "sinh", + "size", + "soften_mask", + "sometrue", + "sort", + "sqrt", + "squeeze", + "std", + "subtract", + "sum", + "swapaxes", + "take", + "tan", + "tanh", + "trace", + "transpose", + "true_divide", + "var", + "where", + "zeros", + "zeros_like", +] + +_ShapeT = TypeVar("_ShapeT", bound=_Shape) +_ShapeT_co = TypeVar("_ShapeT_co", bound=_Shape, default=_AnyShape, covariant=True) +_DTypeT = TypeVar("_DTypeT", bound=dtype) +_DTypeT_co = TypeVar("_DTypeT_co", bound=dtype, default=dtype, covariant=True) +_ArrayT = TypeVar("_ArrayT", bound=ndarray[Any, Any]) +_ScalarT = TypeVar("_ScalarT", bound=generic) +_ScalarT_co = TypeVar("_ScalarT_co", bound=generic, covariant=True) +# A subset of `MaskedArray` that can be parametrized w.r.t. `np.generic` +_MaskedArray: TypeAlias = MaskedArray[_AnyShape, dtype[_ScalarT]] +_Array1D: TypeAlias = np.ndarray[tuple[int], np.dtype[_ScalarT]] + +MaskType = bool_ +nomask: bool_[Literal[False]] + +class MaskedArrayFutureWarning(FutureWarning): ... +class MAError(Exception): ... +class MaskError(MAError): ... + +def default_fill_value(obj): ... +def minimum_fill_value(obj): ... +def maximum_fill_value(obj): ... +def set_fill_value(a, fill_value): ... +def common_fill_value(a, b): ... +@overload +def filled(a: ndarray[_ShapeT_co, _DTypeT_co], fill_value: _ScalarLike_co | None = None) -> ndarray[_ShapeT_co, _DTypeT_co]: ... +@overload +def filled(a: _ArrayLike[_ScalarT_co], fill_value: _ScalarLike_co | None = None) -> NDArray[_ScalarT_co]: ... +@overload +def filled(a: ArrayLike, fill_value: _ScalarLike_co | None = None) -> NDArray[Any]: ... +def getdata(a, subok=...): ... +get_data = getdata + +def fix_invalid(a, mask=..., copy=..., fill_value=...): ... + +class _MaskedUFunc: + f: Any + __doc__: Any + __name__: Any + def __init__(self, ufunc): ... + +class _MaskedUnaryOperation(_MaskedUFunc): + fill: Any + domain: Any + def __init__(self, mufunc, fill=..., domain=...): ... + def __call__(self, a, *args, **kwargs): ... + +class _MaskedBinaryOperation(_MaskedUFunc): + fillx: Any + filly: Any + def __init__(self, mbfunc, fillx=..., filly=...): ... + def __call__(self, a, b, *args, **kwargs): ... + def reduce(self, target, axis=..., dtype=...): ... + def outer(self, a, b): ... + def accumulate(self, target, axis=...): ... + +class _DomainedBinaryOperation(_MaskedUFunc): + domain: Any + fillx: Any + filly: Any + def __init__(self, dbfunc, domain, fillx=..., filly=...): ... + def __call__(self, a, b, *args, **kwargs): ... + +exp: _MaskedUnaryOperation +conjugate: _MaskedUnaryOperation +sin: _MaskedUnaryOperation +cos: _MaskedUnaryOperation +arctan: _MaskedUnaryOperation +arcsinh: _MaskedUnaryOperation +sinh: _MaskedUnaryOperation +cosh: _MaskedUnaryOperation +tanh: _MaskedUnaryOperation +abs: _MaskedUnaryOperation +absolute: _MaskedUnaryOperation +angle: _MaskedUnaryOperation +fabs: _MaskedUnaryOperation +negative: _MaskedUnaryOperation +floor: _MaskedUnaryOperation +ceil: _MaskedUnaryOperation +around: _MaskedUnaryOperation +logical_not: _MaskedUnaryOperation +sqrt: _MaskedUnaryOperation +log: _MaskedUnaryOperation +log2: _MaskedUnaryOperation +log10: _MaskedUnaryOperation +tan: _MaskedUnaryOperation +arcsin: _MaskedUnaryOperation +arccos: _MaskedUnaryOperation +arccosh: _MaskedUnaryOperation +arctanh: _MaskedUnaryOperation + +add: _MaskedBinaryOperation +subtract: _MaskedBinaryOperation +multiply: _MaskedBinaryOperation +arctan2: _MaskedBinaryOperation +equal: _MaskedBinaryOperation +not_equal: _MaskedBinaryOperation +less_equal: _MaskedBinaryOperation +greater_equal: _MaskedBinaryOperation +less: _MaskedBinaryOperation +greater: _MaskedBinaryOperation +logical_and: _MaskedBinaryOperation +def alltrue(target: ArrayLike, axis: SupportsIndex | None = 0, dtype: _DTypeLikeBool | None = None) -> Incomplete: ... +logical_or: _MaskedBinaryOperation +def sometrue(target: ArrayLike, axis: SupportsIndex | None = 0, dtype: _DTypeLikeBool | None = None) -> Incomplete: ... +logical_xor: _MaskedBinaryOperation +bitwise_and: _MaskedBinaryOperation +bitwise_or: _MaskedBinaryOperation +bitwise_xor: _MaskedBinaryOperation +hypot: _MaskedBinaryOperation + +divide: _DomainedBinaryOperation +true_divide: _DomainedBinaryOperation +floor_divide: _DomainedBinaryOperation +remainder: _DomainedBinaryOperation +fmod: _DomainedBinaryOperation +mod: _DomainedBinaryOperation + +def make_mask_descr(ndtype): ... + +@overload +def getmask(a: _ScalarLike_co) -> bool_: ... +@overload +def getmask(a: MaskedArray[_ShapeT_co, Any]) -> np.ndarray[_ShapeT_co, dtype[bool_]] | bool_: ... +@overload +def getmask(a: ArrayLike) -> NDArray[bool_] | bool_: ... + +get_mask = getmask + +def getmaskarray(arr): ... + +# It's sufficient for `m` to have dtype with type: `type[np.bool_]`, +# which isn't necessarily a ndarray. Please open an issue if this causes issues. +def is_mask(m: object) -> TypeIs[NDArray[bool_]]: ... + +def make_mask(m, copy=..., shrink=..., dtype=...): ... +def make_mask_none(newshape, dtype=...): ... +def mask_or(m1, m2, copy=..., shrink=...): ... +def flatten_mask(mask): ... +def masked_where(condition, a, copy=...): ... +def masked_greater(x, value, copy=...): ... +def masked_greater_equal(x, value, copy=...): ... +def masked_less(x, value, copy=...): ... +def masked_less_equal(x, value, copy=...): ... +def masked_not_equal(x, value, copy=...): ... +def masked_equal(x, value, copy=...): ... +def masked_inside(x, v1, v2, copy=...): ... +def masked_outside(x, v1, v2, copy=...): ... +def masked_object(x, value, copy=..., shrink=...): ... +def masked_values(x, value, rtol=..., atol=..., copy=..., shrink=...): ... +def masked_invalid(a, copy=...): ... + +class _MaskedPrintOption: + def __init__(self, display): ... + def display(self): ... + def set_display(self, s): ... + def enabled(self): ... + def enable(self, shrink=...): ... + +masked_print_option: _MaskedPrintOption + +def flatten_structured_array(a): ... + +class MaskedIterator: + ma: Any + dataiter: Any + maskiter: Any + def __init__(self, ma): ... + def __iter__(self): ... + def __getitem__(self, indx): ... + def __setitem__(self, index, value): ... + def __next__(self): ... + +class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): + __array_priority__: Any + def __new__(cls, data=..., mask=..., dtype=..., copy=..., subok=..., ndmin=..., fill_value=..., keep_mask=..., hard_mask=..., shrink=..., order=...): ... + def __array_finalize__(self, obj): ... + def __array_wrap__(self, obj, context=..., return_scalar=...): ... + def view(self, dtype=..., type=..., fill_value=...): ... + def __getitem__(self, indx): ... + def __setitem__(self, indx, value): ... + @property + def shape(self) -> _ShapeT_co: ... + @shape.setter + def shape(self: MaskedArray[_ShapeT, Any], shape: _ShapeT, /) -> None: ... + def __setmask__(self, mask: _ArrayLikeBool_co, copy: bool = False) -> None: ... + @property + def mask(self) -> NDArray[MaskType] | MaskType: ... + @mask.setter + def mask(self, value: _ArrayLikeBool_co, /) -> None: ... + @property + def recordmask(self): ... + @recordmask.setter + def recordmask(self, mask): ... + def harden_mask(self) -> Self: ... + def soften_mask(self) -> Self: ... + @property + def hardmask(self) -> bool: ... + def unshare_mask(self) -> Self: ... + @property + def sharedmask(self) -> bool: ... + def shrink_mask(self) -> Self: ... + @property + def baseclass(self) -> type[NDArray[Any]]: ... + data: Any + @property + def flat(self): ... + @flat.setter + def flat(self, value): ... + @property + def fill_value(self): ... + @fill_value.setter + def fill_value(self, value=...): ... + get_fill_value: Any + set_fill_value: Any + def filled(self, /, fill_value: _ScalarLike_co | None = None) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def compressed(self) -> ndarray[tuple[int], _DTypeT_co]: ... + def compress(self, condition, axis=..., out=...): ... + def __eq__(self, other): ... + def __ne__(self, other): ... + def __ge__(self, other: ArrayLike, /) -> _MaskedArray[bool_]: ... # type: ignore[override] + def __gt__(self, other: ArrayLike, /) -> _MaskedArray[bool_]: ... # type: ignore[override] + def __le__(self, other: ArrayLike, /) -> _MaskedArray[bool_]: ... # type: ignore[override] + def __lt__(self, other: ArrayLike, /) -> _MaskedArray[bool_]: ... # type: ignore[override] + def __add__(self, other): ... + def __radd__(self, other): ... + def __sub__(self, other): ... + def __rsub__(self, other): ... + def __mul__(self, other): ... + def __rmul__(self, other): ... + def __truediv__(self, other): ... + def __rtruediv__(self, other): ... + def __floordiv__(self, other): ... + def __rfloordiv__(self, other): ... + def __pow__(self, other, mod: None = None, /): ... + def __rpow__(self, other, mod: None = None, /): ... + + # Keep in sync with `ndarray.__iadd__` + @overload + def __iadd__( + self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, / + ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __iadd__(self: _MaskedArray[integer], other: _ArrayLikeInt_co, /) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __iadd__( + self: _MaskedArray[floating], other: _ArrayLikeFloat_co, / + ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __iadd__( + self: _MaskedArray[complexfloating], other: _ArrayLikeComplex_co, / + ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __iadd__( + self: _MaskedArray[timedelta64 | datetime64], other: _ArrayLikeTD64_co, / + ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __iadd__(self: _MaskedArray[bytes_], other: _ArrayLikeBytes_co, /) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __iadd__( + self: MaskedArray[Any, dtype[str_] | dtypes.StringDType], + other: _ArrayLikeStr_co | _ArrayLikeString_co, + /, + ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __iadd__( + self: _MaskedArray[object_], other: Any, / + ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + + # Keep in sync with `ndarray.__isub__` + @overload + def __isub__(self: _MaskedArray[integer], other: _ArrayLikeInt_co, /) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __isub__( + self: _MaskedArray[floating], other: _ArrayLikeFloat_co, / + ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __isub__( + self: _MaskedArray[complexfloating], other: _ArrayLikeComplex_co, / + ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __isub__( + self: _MaskedArray[timedelta64 | datetime64], other: _ArrayLikeTD64_co, / + ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __isub__( + self: _MaskedArray[object_], other: Any, / + ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + + # Keep in sync with `ndarray.__imul__` + @overload + def __imul__( + self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, / + ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __imul__( + self: MaskedArray[Any, dtype[integer] | dtype[character] | dtypes.StringDType], other: _ArrayLikeInt_co, / + ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __imul__( + self: _MaskedArray[floating | timedelta64], other: _ArrayLikeFloat_co, / + ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __imul__( + self: _MaskedArray[complexfloating], other: _ArrayLikeComplex_co, / + ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __imul__( + self: _MaskedArray[object_], other: Any, / + ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + + # Keep in sync with `ndarray.__ifloordiv__` + @overload + def __ifloordiv__(self: _MaskedArray[integer], other: _ArrayLikeInt_co, /) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __ifloordiv__( + self: _MaskedArray[floating | timedelta64], other: _ArrayLikeFloat_co, / + ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __ifloordiv__( + self: _MaskedArray[object_], other: Any, / + ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + + # Keep in sync with `ndarray.__itruediv__` + @overload + def __itruediv__( + self: _MaskedArray[floating | timedelta64], other: _ArrayLikeFloat_co, / + ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __itruediv__( + self: _MaskedArray[complexfloating], + other: _ArrayLikeComplex_co, + /, + ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __itruediv__( + self: _MaskedArray[object_], other: Any, / + ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + + # Keep in sync with `ndarray.__ipow__` + @overload + def __ipow__(self: _MaskedArray[integer], other: _ArrayLikeInt_co, /) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __ipow__( + self: _MaskedArray[floating], other: _ArrayLikeFloat_co, / + ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __ipow__( + self: _MaskedArray[complexfloating], other: _ArrayLikeComplex_co, / + ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __ipow__( + self: _MaskedArray[object_], other: Any, / + ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + + # + @property # type: ignore[misc] + def imag(self: _HasDTypeWithRealAndImag[object, _ScalarT], /) -> MaskedArray[_ShapeT_co, dtype[_ScalarT]]: ... + get_imag: Any + @property # type: ignore[misc] + def real(self: _HasDTypeWithRealAndImag[_ScalarT, object], /) -> MaskedArray[_ShapeT_co, dtype[_ScalarT]]: ... + get_real: Any + + # keep in sync with `np.ma.count` + @overload + def count(self, axis: None = None, keepdims: Literal[False] | _NoValueType = ...) -> int: ... + @overload + def count(self, axis: _ShapeLike, keepdims: bool | _NoValueType = ...) -> NDArray[int_]: ... + @overload + def count(self, axis: _ShapeLike | None = ..., *, keepdims: Literal[True]) -> NDArray[int_]: ... + @overload + def count(self, axis: _ShapeLike | None, keepdims: Literal[True]) -> NDArray[int_]: ... + + def ravel(self, order: _OrderKACF = "C") -> MaskedArray[tuple[int], _DTypeT_co]: ... + def reshape(self, *s, **kwargs): ... + def resize(self, newshape, refcheck=..., order=...): ... + def put(self, indices: _ArrayLikeInt_co, values: ArrayLike, mode: _ModeKind = "raise") -> None: ... + def ids(self) -> tuple[int, int]: ... + def iscontiguous(self) -> bool: ... + + @overload + def all( + self, + axis: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + ) -> bool_: ... + @overload + def all( + self, + axis: _ShapeLike | None = None, + out: None = None, + *, + keepdims: Literal[True], + ) -> _MaskedArray[bool_]: ... + @overload + def all( + self, + axis: _ShapeLike | None, + out: None, + keepdims: Literal[True], + ) -> _MaskedArray[bool_]: ... + @overload + def all( + self, + axis: _ShapeLike | None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., + ) -> bool_ | _MaskedArray[bool_]: ... + @overload + def all( + self, + axis: _ShapeLike | None = None, + *, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., + ) -> _ArrayT: ... + @overload + def all( + self, + axis: _ShapeLike | None, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., + ) -> _ArrayT: ... + + @overload + def any( + self, + axis: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + ) -> bool_: ... + @overload + def any( + self, + axis: _ShapeLike | None = None, + out: None = None, + *, + keepdims: Literal[True], + ) -> _MaskedArray[bool_]: ... + @overload + def any( + self, + axis: _ShapeLike | None, + out: None, + keepdims: Literal[True], + ) -> _MaskedArray[bool_]: ... + @overload + def any( + self, + axis: _ShapeLike | None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., + ) -> bool_ | _MaskedArray[bool_]: ... + @overload + def any( + self, + axis: _ShapeLike | None = None, + *, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., + ) -> _ArrayT: ... + @overload + def any( + self, + axis: _ShapeLike | None, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., + ) -> _ArrayT: ... + + def nonzero(self) -> tuple[_Array1D[intp], *tuple[_Array1D[intp], ...]]: ... + def trace(self, offset=..., axis1=..., axis2=..., dtype=..., out=...): ... + def dot(self, b, out=..., strict=...): ... + def sum(self, axis=..., dtype=..., out=..., keepdims=...): ... + def cumsum(self, axis=..., dtype=..., out=...): ... + def prod(self, axis=..., dtype=..., out=..., keepdims=...): ... + product: Any + def cumprod(self, axis=..., dtype=..., out=...): ... + def mean(self, axis=..., dtype=..., out=..., keepdims=...): ... + def anom(self, axis=..., dtype=...): ... + def var(self, axis=..., dtype=..., out=..., ddof=..., keepdims=...): ... + def std(self, axis=..., dtype=..., out=..., ddof=..., keepdims=...): ... + def round(self, decimals=..., out=...): ... + def argsort(self, axis=..., kind=..., order=..., endwith=..., fill_value=..., *, stable=...): ... + + # Keep in-sync with np.ma.argmin + @overload # type: ignore[override] + def argmin( + self, + axis: None = None, + fill_value: _ScalarLike_co | None = None, + out: None = None, + *, + keepdims: Literal[False] | _NoValueType = ..., + ) -> intp: ... + @overload + def argmin( + self, + axis: SupportsIndex | None = None, + fill_value: _ScalarLike_co | None = None, + out: None = None, + *, + keepdims: bool | _NoValueType = ..., + ) -> Any: ... + @overload + def argmin( + self, + axis: SupportsIndex | None = None, + fill_value: _ScalarLike_co | None = None, + *, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., + ) -> _ArrayT: ... + @overload + def argmin( + self, + axis: SupportsIndex | None, + fill_value: _ScalarLike_co | None, + out: _ArrayT, + *, + keepdims: bool | _NoValueType = ..., + ) -> _ArrayT: ... + + # Keep in-sync with np.ma.argmax + @overload # type: ignore[override] + def argmax( + self, + axis: None = None, + fill_value: _ScalarLike_co | None = None, + out: None = None, + *, + keepdims: Literal[False] | _NoValueType = ..., + ) -> intp: ... + @overload + def argmax( + self, + axis: SupportsIndex | None = None, + fill_value: _ScalarLike_co | None = None, + out: None = None, + *, + keepdims: bool | _NoValueType = ..., + ) -> Any: ... + @overload + def argmax( + self, + axis: SupportsIndex | None = None, + fill_value: _ScalarLike_co | None = None, + *, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., + ) -> _ArrayT: ... + @overload + def argmax( + self, + axis: SupportsIndex | None, + fill_value: _ScalarLike_co | None, + out: _ArrayT, + *, + keepdims: bool | _NoValueType = ..., + ) -> _ArrayT: ... + + # + def sort( # type: ignore[override] + self, + axis: SupportsIndex = -1, + kind: _SortKind | None = None, + order: str | Sequence[str] | None = None, + endwith: bool | None = True, + fill_value: _ScalarLike_co | None = None, + *, + stable: Literal[False] | None = False, + ) -> None: ... + + # + @overload # type: ignore[override] + def min( + self: _MaskedArray[_ScalarT], + axis: None = None, + out: None = None, + fill_value: _ScalarLike_co | None = None, + keepdims: Literal[False] | _NoValueType = ..., + ) -> _ScalarT: ... + @overload + def min( + self, + axis: _ShapeLike | None = None, + out: None = None, + fill_value: _ScalarLike_co | None = None, + keepdims: bool | _NoValueType = ... + ) -> Any: ... + @overload + def min( + self, + axis: _ShapeLike | None, + out: _ArrayT, + fill_value: _ScalarLike_co | None = None, + keepdims: bool | _NoValueType = ..., + ) -> _ArrayT: ... + @overload + def min( + self, + axis: _ShapeLike | None = None, + *, + out: _ArrayT, + fill_value: _ScalarLike_co | None = None, + keepdims: bool | _NoValueType = ..., + ) -> _ArrayT: ... + + # + @overload # type: ignore[override] + def max( + self: _MaskedArray[_ScalarT], + axis: None = None, + out: None = None, + fill_value: _ScalarLike_co | None = None, + keepdims: Literal[False] | _NoValueType = ..., + ) -> _ScalarT: ... + @overload + def max( + self, + axis: _ShapeLike | None = None, + out: None = None, + fill_value: _ScalarLike_co | None = None, + keepdims: bool | _NoValueType = ... + ) -> Any: ... + @overload + def max( + self, + axis: _ShapeLike | None, + out: _ArrayT, + fill_value: _ScalarLike_co | None = None, + keepdims: bool | _NoValueType = ..., + ) -> _ArrayT: ... + @overload + def max( + self, + axis: _ShapeLike | None = None, + *, + out: _ArrayT, + fill_value: _ScalarLike_co | None = None, + keepdims: bool | _NoValueType = ..., + ) -> _ArrayT: ... + + # + @overload + def ptp( + self: _MaskedArray[_ScalarT], + axis: None = None, + out: None = None, + fill_value: _ScalarLike_co | None = None, + keepdims: Literal[False] = False, + ) -> _ScalarT: ... + @overload + def ptp( + self, + axis: _ShapeLike | None = None, + out: None = None, + fill_value: _ScalarLike_co | None = None, + keepdims: bool = False, + ) -> Any: ... + @overload + def ptp( + self, + axis: _ShapeLike | None, + out: _ArrayT, + fill_value: _ScalarLike_co | None = None, + keepdims: bool = False, + ) -> _ArrayT: ... + @overload + def ptp( + self, + axis: _ShapeLike | None = None, + *, + out: _ArrayT, + fill_value: _ScalarLike_co | None = None, + keepdims: bool = False, + ) -> _ArrayT: ... + + # + @overload + def partition( + self, + /, + kth: _ArrayLikeInt, + axis: SupportsIndex = -1, + kind: _PartitionKind = "introselect", + order: None = None + ) -> None: ... + @overload + def partition( + self: _MaskedArray[np.void], + /, + kth: _ArrayLikeInt, + axis: SupportsIndex = -1, + kind: _PartitionKind = "introselect", + order: str | Sequence[str] | None = None, + ) -> None: ... + + # + @overload + def argpartition( + self, + /, + kth: _ArrayLikeInt, + axis: SupportsIndex | None = -1, + kind: _PartitionKind = "introselect", + order: None = None, + ) -> _MaskedArray[intp]: ... + @overload + def argpartition( + self: _MaskedArray[np.void], + /, + kth: _ArrayLikeInt, + axis: SupportsIndex | None = -1, + kind: _PartitionKind = "introselect", + order: str | Sequence[str] | None = None, + ) -> _MaskedArray[intp]: ... + + # Keep in-sync with np.ma.take + @overload + def take( # type: ignore[overload-overlap] + self: _MaskedArray[_ScalarT], + indices: _IntLike_co, + axis: None = None, + out: None = None, + mode: _ModeKind = 'raise' + ) -> _ScalarT: ... + @overload + def take( + self: _MaskedArray[_ScalarT], + indices: _ArrayLikeInt_co, + axis: SupportsIndex | None = None, + out: None = None, + mode: _ModeKind = 'raise', + ) -> _MaskedArray[_ScalarT]: ... + @overload + def take( + self, + indices: _ArrayLikeInt_co, + axis: SupportsIndex | None, + out: _ArrayT, + mode: _ModeKind = 'raise', + ) -> _ArrayT: ... + @overload + def take( + self, + indices: _ArrayLikeInt_co, + axis: SupportsIndex | None = None, + *, + out: _ArrayT, + mode: _ModeKind = 'raise', + ) -> _ArrayT: ... + + copy: Any + diagonal: Any + flatten: Any + + @overload + def repeat( + self, + repeats: _ArrayLikeInt_co, + axis: None = None, + ) -> MaskedArray[tuple[int], _DTypeT_co]: ... + @overload + def repeat( + self, + repeats: _ArrayLikeInt_co, + axis: SupportsIndex, + ) -> MaskedArray[_AnyShape, _DTypeT_co]: ... + + squeeze: Any + + def swapaxes( + self, + axis1: SupportsIndex, + axis2: SupportsIndex, + / + ) -> MaskedArray[_AnyShape, _DTypeT_co]: ... + + # + def toflex(self) -> Incomplete: ... + def torecords(self) -> Incomplete: ... + def tolist(self, fill_value: Incomplete | None = None) -> Incomplete: ... + def tobytes(self, /, fill_value: Incomplete | None = None, order: _OrderKACF = "C") -> bytes: ... # type: ignore[override] + def tofile(self, /, fid: Incomplete, sep: str = "", format: str = "%s") -> Incomplete: ... + + # + def __reduce__(self): ... + def __deepcopy__(self, memo=...): ... + + # Keep `dtype` at the bottom to avoid name conflicts with `np.dtype` + @property + def dtype(self) -> _DTypeT_co: ... + @dtype.setter + def dtype(self: MaskedArray[_AnyShape, _DTypeT], dtype: _DTypeT, /) -> None: ... + +class mvoid(MaskedArray[_ShapeT_co, _DTypeT_co]): + def __new__( + self, # pyright: ignore[reportSelfClsParameterName] + data, + mask=..., + dtype=..., + fill_value=..., + hardmask=..., + copy=..., + subok=..., + ): ... + def __getitem__(self, indx): ... + def __setitem__(self, indx, value): ... + def __iter__(self): ... + def __len__(self): ... + def filled(self, fill_value=...): ... + def tolist(self): ... + +def isMaskedArray(x): ... +isarray = isMaskedArray +isMA = isMaskedArray + +# 0D float64 array +class MaskedConstant(MaskedArray[_AnyShape, dtype[float64]]): + def __new__(cls): ... + __class__: Any + def __array_finalize__(self, obj): ... + def __array_wrap__(self, obj, context=..., return_scalar=...): ... + def __format__(self, format_spec): ... + def __reduce__(self): ... + def __iop__(self, other): ... + __iadd__: Any + __isub__: Any + __imul__: Any + __ifloordiv__: Any + __itruediv__: Any + __ipow__: Any + def copy(self, *args, **kwargs): ... + def __copy__(self): ... + def __deepcopy__(self, memo): ... + def __setattr__(self, attr, value): ... + +masked: MaskedConstant +masked_singleton: MaskedConstant +masked_array = MaskedArray + +def array( + data, + dtype=..., + copy=..., + order=..., + mask=..., + fill_value=..., + keep_mask=..., + hard_mask=..., + shrink=..., + subok=..., + ndmin=..., +): ... +def is_masked(x: object) -> bool: ... + +class _extrema_operation(_MaskedUFunc): + compare: Any + fill_value_func: Any + def __init__(self, ufunc, compare, fill_value): ... + # NOTE: in practice `b` has a default value, but users should + # explicitly provide a value here as the default is deprecated + def __call__(self, a, b): ... + def reduce(self, target, axis=...): ... + def outer(self, a, b): ... + +@overload +def min( + obj: _ArrayLike[_ScalarT], + axis: None = None, + out: None = None, + fill_value: _ScalarLike_co | None = None, + keepdims: Literal[False] | _NoValueType = ..., +) -> _ScalarT: ... +@overload +def min( + obj: ArrayLike, + axis: _ShapeLike | None = None, + out: None = None, + fill_value: _ScalarLike_co | None = None, + keepdims: bool | _NoValueType = ... +) -> Any: ... +@overload +def min( + obj: ArrayLike, + axis: _ShapeLike | None, + out: _ArrayT, + fill_value: _ScalarLike_co | None = None, + keepdims: bool | _NoValueType = ..., +) -> _ArrayT: ... +@overload +def min( + obj: ArrayLike, + axis: _ShapeLike | None = None, + *, + out: _ArrayT, + fill_value: _ScalarLike_co | None = None, + keepdims: bool | _NoValueType = ..., +) -> _ArrayT: ... + +@overload +def max( + obj: _ArrayLike[_ScalarT], + axis: None = None, + out: None = None, + fill_value: _ScalarLike_co | None = None, + keepdims: Literal[False] | _NoValueType = ..., +) -> _ScalarT: ... +@overload +def max( + obj: ArrayLike, + axis: _ShapeLike | None = None, + out: None = None, + fill_value: _ScalarLike_co | None = None, + keepdims: bool | _NoValueType = ... +) -> Any: ... +@overload +def max( + obj: ArrayLike, + axis: _ShapeLike | None, + out: _ArrayT, + fill_value: _ScalarLike_co | None = None, + keepdims: bool | _NoValueType = ..., +) -> _ArrayT: ... +@overload +def max( + obj: ArrayLike, + axis: _ShapeLike | None = None, + *, + out: _ArrayT, + fill_value: _ScalarLike_co | None = None, + keepdims: bool | _NoValueType = ..., +) -> _ArrayT: ... + +@overload +def ptp( + obj: _ArrayLike[_ScalarT], + axis: None = None, + out: None = None, + fill_value: _ScalarLike_co | None = None, + keepdims: Literal[False] | _NoValueType = ..., +) -> _ScalarT: ... +@overload +def ptp( + obj: ArrayLike, + axis: _ShapeLike | None = None, + out: None = None, + fill_value: _ScalarLike_co | None = None, + keepdims: bool | _NoValueType = ... +) -> Any: ... +@overload +def ptp( + obj: ArrayLike, + axis: _ShapeLike | None, + out: _ArrayT, + fill_value: _ScalarLike_co | None = None, + keepdims: bool | _NoValueType = ..., +) -> _ArrayT: ... +@overload +def ptp( + obj: ArrayLike, + axis: _ShapeLike | None = None, + *, + out: _ArrayT, + fill_value: _ScalarLike_co | None = None, + keepdims: bool | _NoValueType = ..., +) -> _ArrayT: ... + +class _frommethod: + __name__: Any + __doc__: Any + reversed: Any + def __init__(self, methodname, reversed=...): ... + def getdoc(self): ... + def __call__(self, a, *args, **params): ... + +all: _frommethod +anomalies: _frommethod +anom: _frommethod +any: _frommethod +compress: _frommethod +cumprod: _frommethod +cumsum: _frommethod +copy: _frommethod +diagonal: _frommethod +harden_mask: _frommethod +ids: _frommethod +mean: _frommethod +nonzero: _frommethod +prod: _frommethod +product: _frommethod +ravel: _frommethod +repeat: _frommethod +soften_mask: _frommethod +std: _frommethod +sum: _frommethod +swapaxes: _frommethod +trace: _frommethod +var: _frommethod + +@overload +def count(self: ArrayLike, axis: None = None, keepdims: Literal[False] | _NoValueType = ...) -> int: ... +@overload +def count(self: ArrayLike, axis: _ShapeLike, keepdims: bool | _NoValueType = ...) -> NDArray[int_]: ... +@overload +def count(self: ArrayLike, axis: _ShapeLike | None = ..., *, keepdims: Literal[True]) -> NDArray[int_]: ... +@overload +def count(self: ArrayLike, axis: _ShapeLike | None, keepdims: Literal[True]) -> NDArray[int_]: ... + +@overload +def argmin( + self: ArrayLike, + axis: None = None, + fill_value: _ScalarLike_co | None = None, + out: None = None, + *, + keepdims: Literal[False] | _NoValueType = ..., +) -> intp: ... +@overload +def argmin( + self: ArrayLike, + axis: SupportsIndex | None = None, + fill_value: _ScalarLike_co | None = None, + out: None = None, + *, + keepdims: bool | _NoValueType = ..., +) -> Any: ... +@overload +def argmin( + self: ArrayLike, + axis: SupportsIndex | None = None, + fill_value: _ScalarLike_co | None = None, + *, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., +) -> _ArrayT: ... +@overload +def argmin( + self: ArrayLike, + axis: SupportsIndex | None, + fill_value: _ScalarLike_co | None, + out: _ArrayT, + *, + keepdims: bool | _NoValueType = ..., +) -> _ArrayT: ... + +# +@overload +def argmax( + self: ArrayLike, + axis: None = None, + fill_value: _ScalarLike_co | None = None, + out: None = None, + *, + keepdims: Literal[False] | _NoValueType = ..., +) -> intp: ... +@overload +def argmax( + self: ArrayLike, + axis: SupportsIndex | None = None, + fill_value: _ScalarLike_co | None = None, + out: None = None, + *, + keepdims: bool | _NoValueType = ..., +) -> Any: ... +@overload +def argmax( + self: ArrayLike, + axis: SupportsIndex | None = None, + fill_value: _ScalarLike_co | None = None, + *, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., +) -> _ArrayT: ... +@overload +def argmax( + self: ArrayLike, + axis: SupportsIndex | None, + fill_value: _ScalarLike_co | None, + out: _ArrayT, + *, + keepdims: bool | _NoValueType = ..., +) -> _ArrayT: ... + +minimum: _extrema_operation +maximum: _extrema_operation + +@overload +def take( + a: _ArrayLike[_ScalarT], + indices: _IntLike_co, + axis: None = None, + out: None = None, + mode: _ModeKind = 'raise' +) -> _ScalarT: ... +@overload +def take( + a: _ArrayLike[_ScalarT], + indices: _ArrayLikeInt_co, + axis: SupportsIndex | None = None, + out: None = None, + mode: _ModeKind = 'raise', +) -> _MaskedArray[_ScalarT]: ... +@overload +def take( + a: ArrayLike, + indices: _IntLike_co, + axis: SupportsIndex | None = None, + out: None = None, + mode: _ModeKind = 'raise', +) -> Any: ... +@overload +def take( + a: ArrayLike, + indices: _ArrayLikeInt_co, + axis: SupportsIndex | None = None, + out: None = None, + mode: _ModeKind = 'raise', +) -> _MaskedArray[Any]: ... +@overload +def take( + a: ArrayLike, + indices: _ArrayLikeInt_co, + axis: SupportsIndex | None, + out: _ArrayT, + mode: _ModeKind = 'raise', +) -> _ArrayT: ... +@overload +def take( + a: ArrayLike, + indices: _ArrayLikeInt_co, + axis: SupportsIndex | None = None, + *, + out: _ArrayT, + mode: _ModeKind = 'raise', +) -> _ArrayT: ... + +def power(a, b, third=...): ... +def argsort(a, axis=..., kind=..., order=..., endwith=..., fill_value=..., *, stable=...): ... +@overload +def sort( + a: _ArrayT, + axis: SupportsIndex = -1, + kind: _SortKind | None = None, + order: str | Sequence[str] | None = None, + endwith: bool | None = True, + fill_value: _ScalarLike_co | None = None, + *, + stable: Literal[False] | None = False, +) -> _ArrayT: ... +@overload +def sort( + a: ArrayLike, + axis: SupportsIndex = -1, + kind: _SortKind | None = None, + order: str | Sequence[str] | None = None, + endwith: bool | None = True, + fill_value: _ScalarLike_co | None = None, + *, + stable: Literal[False] | None = False, +) -> NDArray[Any]: ... +@overload +def compressed(x: _ArrayLike[_ScalarT_co]) -> _Array1D[_ScalarT_co]: ... +@overload +def compressed(x: ArrayLike) -> _Array1D[Any]: ... +def concatenate(arrays, axis=...): ... +def diag(v, k=...): ... +def left_shift(a, n): ... +def right_shift(a, n): ... +def put(a: NDArray[Any], indices: _ArrayLikeInt_co, values: ArrayLike, mode: _ModeKind = 'raise') -> None: ... +def putmask(a: NDArray[Any], mask: _ArrayLikeBool_co, values: ArrayLike) -> None: ... +def transpose(a, axes=...): ... +def reshape(a, new_shape, order=...): ... +def resize(x, new_shape): ... +def ndim(obj: ArrayLike) -> int: ... +def shape(obj): ... +def size(obj: ArrayLike, axis: SupportsIndex | None = None) -> int: ... +def diff(a, /, n=..., axis=..., prepend=..., append=...): ... +def where(condition, x=..., y=...): ... +def choose(indices, choices, out=..., mode=...): ... +def round_(a, decimals=..., out=...): ... +round = round_ + +def inner(a, b): ... +innerproduct = inner + +def outer(a, b): ... +outerproduct = outer + +def correlate(a, v, mode=..., propagate_mask=...): ... +def convolve(a, v, mode=..., propagate_mask=...): ... + +def allequal(a: ArrayLike, b: ArrayLike, fill_value: bool = True) -> bool: ... + +def allclose(a: ArrayLike, b: ArrayLike, masked_equal: bool = True, rtol: float = 1e-5, atol: float = 1e-8) -> bool: ... + +def asarray(a, dtype=..., order=...): ... +def asanyarray(a, dtype=...): ... +def fromflex(fxarray): ... + +class _convert2ma: + def __init__(self, /, funcname: str, np_ret: str, np_ma_ret: str, params: dict[str, Any] | None = None) -> None: ... + def __call__(self, /, *args: object, **params: object) -> Any: ... + def getdoc(self, /, np_ret: str, np_ma_ret: str) -> str | None: ... + +arange: _convert2ma +clip: _convert2ma +empty: _convert2ma +empty_like: _convert2ma +frombuffer: _convert2ma +fromfunction: _convert2ma +identity: _convert2ma +indices: _convert2ma +ones: _convert2ma +ones_like: _convert2ma +squeeze: _convert2ma +zeros: _convert2ma +zeros_like: _convert2ma + +def append(a, b, axis=...): ... +def dot(a, b, strict=..., out=...): ... +def mask_rowcols(a, axis=...): ... diff --git a/.venv/lib/python3.12/site-packages/numpy/ma/extras.py b/.venv/lib/python3.12/site-packages/numpy/ma/extras.py new file mode 100644 index 0000000..094c1e2 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/ma/extras.py @@ -0,0 +1,2344 @@ +""" +Masked arrays add-ons. + +A collection of utilities for `numpy.ma`. + +:author: Pierre Gerard-Marchant +:contact: pierregm_at_uga_dot_edu + +""" +__all__ = [ + 'apply_along_axis', 'apply_over_axes', 'atleast_1d', 'atleast_2d', + 'atleast_3d', 'average', 'clump_masked', 'clump_unmasked', 'column_stack', + 'compress_cols', 'compress_nd', 'compress_rowcols', 'compress_rows', + 'count_masked', 'corrcoef', 'cov', 'diagflat', 'dot', 'dstack', 'ediff1d', + 'flatnotmasked_contiguous', 'flatnotmasked_edges', 'hsplit', 'hstack', + 'isin', 'in1d', 'intersect1d', 'mask_cols', 'mask_rowcols', 'mask_rows', + 'masked_all', 'masked_all_like', 'median', 'mr_', 'ndenumerate', + 'notmasked_contiguous', 'notmasked_edges', 'polyfit', 'row_stack', + 'setdiff1d', 'setxor1d', 'stack', 'unique', 'union1d', 'vander', 'vstack', + ] + +import itertools +import warnings + +import numpy as np +from numpy import array as nxarray +from numpy import ndarray +from numpy.lib._function_base_impl import _ureduce +from numpy.lib._index_tricks_impl import AxisConcatenator +from numpy.lib.array_utils import normalize_axis_index, normalize_axis_tuple + +from . import core as ma +from .core import ( # noqa: F401 + MAError, + MaskedArray, + add, + array, + asarray, + concatenate, + count, + dot, + filled, + get_masked_subclass, + getdata, + getmask, + getmaskarray, + make_mask_descr, + mask_or, + masked, + masked_array, + nomask, + ones, + sort, + zeros, +) + + +def issequence(seq): + """ + Is seq a sequence (ndarray, list or tuple)? + + """ + return isinstance(seq, (ndarray, tuple, list)) + + +def count_masked(arr, axis=None): + """ + Count the number of masked elements along the given axis. + + Parameters + ---------- + arr : array_like + An array with (possibly) masked elements. + axis : int, optional + Axis along which to count. If None (default), a flattened + version of the array is used. + + Returns + ------- + count : int, ndarray + The total number of masked elements (axis=None) or the number + of masked elements along each slice of the given axis. + + See Also + -------- + MaskedArray.count : Count non-masked elements. + + Examples + -------- + >>> import numpy as np + >>> a = np.arange(9).reshape((3,3)) + >>> a = np.ma.array(a) + >>> a[1, 0] = np.ma.masked + >>> a[1, 2] = np.ma.masked + >>> a[2, 1] = np.ma.masked + >>> a + masked_array( + data=[[0, 1, 2], + [--, 4, --], + [6, --, 8]], + mask=[[False, False, False], + [ True, False, True], + [False, True, False]], + fill_value=999999) + >>> np.ma.count_masked(a) + 3 + + When the `axis` keyword is used an array is returned. + + >>> np.ma.count_masked(a, axis=0) + array([1, 1, 1]) + >>> np.ma.count_masked(a, axis=1) + array([0, 2, 1]) + + """ + m = getmaskarray(arr) + return m.sum(axis) + + +def masked_all(shape, dtype=float): + """ + Empty masked array with all elements masked. + + Return an empty masked array of the given shape and dtype, where all the + data are masked. + + Parameters + ---------- + shape : int or tuple of ints + Shape of the required MaskedArray, e.g., ``(2, 3)`` or ``2``. + dtype : dtype, optional + Data type of the output. + + Returns + ------- + a : MaskedArray + A masked array with all data masked. + + See Also + -------- + masked_all_like : Empty masked array modelled on an existing array. + + Notes + ----- + Unlike other masked array creation functions (e.g. `numpy.ma.zeros`, + `numpy.ma.ones`, `numpy.ma.full`), `masked_all` does not initialize the + values of the array, and may therefore be marginally faster. However, + the values stored in the newly allocated array are arbitrary. For + reproducible behavior, be sure to set each element of the array before + reading. + + Examples + -------- + >>> import numpy as np + >>> np.ma.masked_all((3, 3)) + masked_array( + data=[[--, --, --], + [--, --, --], + [--, --, --]], + mask=[[ True, True, True], + [ True, True, True], + [ True, True, True]], + fill_value=1e+20, + dtype=float64) + + The `dtype` parameter defines the underlying data type. + + >>> a = np.ma.masked_all((3, 3)) + >>> a.dtype + dtype('float64') + >>> a = np.ma.masked_all((3, 3), dtype=np.int32) + >>> a.dtype + dtype('int32') + + """ + a = masked_array(np.empty(shape, dtype), + mask=np.ones(shape, make_mask_descr(dtype))) + return a + + +def masked_all_like(arr): + """ + Empty masked array with the properties of an existing array. + + Return an empty masked array of the same shape and dtype as + the array `arr`, where all the data are masked. + + Parameters + ---------- + arr : ndarray + An array describing the shape and dtype of the required MaskedArray. + + Returns + ------- + a : MaskedArray + A masked array with all data masked. + + Raises + ------ + AttributeError + If `arr` doesn't have a shape attribute (i.e. not an ndarray) + + See Also + -------- + masked_all : Empty masked array with all elements masked. + + Notes + ----- + Unlike other masked array creation functions (e.g. `numpy.ma.zeros_like`, + `numpy.ma.ones_like`, `numpy.ma.full_like`), `masked_all_like` does not + initialize the values of the array, and may therefore be marginally + faster. However, the values stored in the newly allocated array are + arbitrary. For reproducible behavior, be sure to set each element of the + array before reading. + + Examples + -------- + >>> import numpy as np + >>> arr = np.zeros((2, 3), dtype=np.float32) + >>> arr + array([[0., 0., 0.], + [0., 0., 0.]], dtype=float32) + >>> np.ma.masked_all_like(arr) + masked_array( + data=[[--, --, --], + [--, --, --]], + mask=[[ True, True, True], + [ True, True, True]], + fill_value=np.float64(1e+20), + dtype=float32) + + The dtype of the masked array matches the dtype of `arr`. + + >>> arr.dtype + dtype('float32') + >>> np.ma.masked_all_like(arr).dtype + dtype('float32') + + """ + a = np.empty_like(arr).view(MaskedArray) + a._mask = np.ones(a.shape, dtype=make_mask_descr(a.dtype)) + return a + + +#####-------------------------------------------------------------------------- +#---- --- Standard functions --- +#####-------------------------------------------------------------------------- +class _fromnxfunction: + """ + Defines a wrapper to adapt NumPy functions to masked arrays. + + + An instance of `_fromnxfunction` can be called with the same parameters + as the wrapped NumPy function. The docstring of `newfunc` is adapted from + the wrapped function as well, see `getdoc`. + + This class should not be used directly. Instead, one of its extensions that + provides support for a specific type of input should be used. + + Parameters + ---------- + funcname : str + The name of the function to be adapted. The function should be + in the NumPy namespace (i.e. ``np.funcname``). + + """ + + def __init__(self, funcname): + self.__name__ = funcname + self.__qualname__ = funcname + self.__doc__ = self.getdoc() + + def getdoc(self): + """ + Retrieve the docstring and signature from the function. + + The ``__doc__`` attribute of the function is used as the docstring for + the new masked array version of the function. A note on application + of the function to the mask is appended. + + Parameters + ---------- + None + + """ + npfunc = getattr(np, self.__name__, None) + doc = getattr(npfunc, '__doc__', None) + if doc: + sig = ma.get_object_signature(npfunc) + doc = ma.doc_note(doc, "The function is applied to both the _data " + "and the _mask, if any.") + if sig: + sig = self.__name__ + sig + "\n\n" + return sig + doc + return + + def __call__(self, *args, **params): + pass + + +class _fromnxfunction_single(_fromnxfunction): + """ + A version of `_fromnxfunction` that is called with a single array + argument followed by auxiliary args that are passed verbatim for + both the data and mask calls. + """ + def __call__(self, x, *args, **params): + func = getattr(np, self.__name__) + if isinstance(x, ndarray): + _d = func(x.__array__(), *args, **params) + _m = func(getmaskarray(x), *args, **params) + return masked_array(_d, mask=_m) + else: + _d = func(np.asarray(x), *args, **params) + _m = func(getmaskarray(x), *args, **params) + return masked_array(_d, mask=_m) + + +class _fromnxfunction_seq(_fromnxfunction): + """ + A version of `_fromnxfunction` that is called with a single sequence + of arrays followed by auxiliary args that are passed verbatim for + both the data and mask calls. + """ + def __call__(self, x, *args, **params): + func = getattr(np, self.__name__) + _d = func(tuple(np.asarray(a) for a in x), *args, **params) + _m = func(tuple(getmaskarray(a) for a in x), *args, **params) + return masked_array(_d, mask=_m) + + +class _fromnxfunction_args(_fromnxfunction): + """ + A version of `_fromnxfunction` that is called with multiple array + arguments. The first non-array-like input marks the beginning of the + arguments that are passed verbatim for both the data and mask calls. + Array arguments are processed independently and the results are + returned in a list. If only one array is found, the return value is + just the processed array instead of a list. + """ + def __call__(self, *args, **params): + func = getattr(np, self.__name__) + arrays = [] + args = list(args) + while len(args) > 0 and issequence(args[0]): + arrays.append(args.pop(0)) + res = [] + for x in arrays: + _d = func(np.asarray(x), *args, **params) + _m = func(getmaskarray(x), *args, **params) + res.append(masked_array(_d, mask=_m)) + if len(arrays) == 1: + return res[0] + return res + + +class _fromnxfunction_allargs(_fromnxfunction): + """ + A version of `_fromnxfunction` that is called with multiple array + arguments. Similar to `_fromnxfunction_args` except that all args + are converted to arrays even if they are not so already. This makes + it possible to process scalars as 1-D arrays. Only keyword arguments + are passed through verbatim for the data and mask calls. Arrays + arguments are processed independently and the results are returned + in a list. If only one arg is present, the return value is just the + processed array instead of a list. + """ + def __call__(self, *args, **params): + func = getattr(np, self.__name__) + res = [] + for x in args: + _d = func(np.asarray(x), **params) + _m = func(getmaskarray(x), **params) + res.append(masked_array(_d, mask=_m)) + if len(args) == 1: + return res[0] + return res + + +atleast_1d = _fromnxfunction_allargs('atleast_1d') +atleast_2d = _fromnxfunction_allargs('atleast_2d') +atleast_3d = _fromnxfunction_allargs('atleast_3d') + +vstack = row_stack = _fromnxfunction_seq('vstack') +hstack = _fromnxfunction_seq('hstack') +column_stack = _fromnxfunction_seq('column_stack') +dstack = _fromnxfunction_seq('dstack') +stack = _fromnxfunction_seq('stack') + +hsplit = _fromnxfunction_single('hsplit') + +diagflat = _fromnxfunction_single('diagflat') + + +#####-------------------------------------------------------------------------- +#---- +#####-------------------------------------------------------------------------- +def flatten_inplace(seq): + """Flatten a sequence in place.""" + k = 0 + while (k != len(seq)): + while hasattr(seq[k], '__iter__'): + seq[k:(k + 1)] = seq[k] + k += 1 + return seq + + +def apply_along_axis(func1d, axis, arr, *args, **kwargs): + """ + (This docstring should be overwritten) + """ + arr = array(arr, copy=False, subok=True) + nd = arr.ndim + axis = normalize_axis_index(axis, nd) + ind = [0] * (nd - 1) + i = np.zeros(nd, 'O') + indlist = list(range(nd)) + indlist.remove(axis) + i[axis] = slice(None, None) + outshape = np.asarray(arr.shape).take(indlist) + i.put(indlist, ind) + res = func1d(arr[tuple(i.tolist())], *args, **kwargs) + # if res is a number, then we have a smaller output array + asscalar = np.isscalar(res) + if not asscalar: + try: + len(res) + except TypeError: + asscalar = True + # Note: we shouldn't set the dtype of the output from the first result + # so we force the type to object, and build a list of dtypes. We'll + # just take the largest, to avoid some downcasting + dtypes = [] + if asscalar: + dtypes.append(np.asarray(res).dtype) + outarr = zeros(outshape, object) + outarr[tuple(ind)] = res + Ntot = np.prod(outshape) + k = 1 + while k < Ntot: + # increment the index + ind[-1] += 1 + n = -1 + while (ind[n] >= outshape[n]) and (n > (1 - nd)): + ind[n - 1] += 1 + ind[n] = 0 + n -= 1 + i.put(indlist, ind) + res = func1d(arr[tuple(i.tolist())], *args, **kwargs) + outarr[tuple(ind)] = res + dtypes.append(asarray(res).dtype) + k += 1 + else: + res = array(res, copy=False, subok=True) + j = i.copy() + j[axis] = ([slice(None, None)] * res.ndim) + j.put(indlist, ind) + Ntot = np.prod(outshape) + holdshape = outshape + outshape = list(arr.shape) + outshape[axis] = res.shape + dtypes.append(asarray(res).dtype) + outshape = flatten_inplace(outshape) + outarr = zeros(outshape, object) + outarr[tuple(flatten_inplace(j.tolist()))] = res + k = 1 + while k < Ntot: + # increment the index + ind[-1] += 1 + n = -1 + while (ind[n] >= holdshape[n]) and (n > (1 - nd)): + ind[n - 1] += 1 + ind[n] = 0 + n -= 1 + i.put(indlist, ind) + j.put(indlist, ind) + res = func1d(arr[tuple(i.tolist())], *args, **kwargs) + outarr[tuple(flatten_inplace(j.tolist()))] = res + dtypes.append(asarray(res).dtype) + k += 1 + max_dtypes = np.dtype(np.asarray(dtypes).max()) + if not hasattr(arr, '_mask'): + result = np.asarray(outarr, dtype=max_dtypes) + else: + result = asarray(outarr, dtype=max_dtypes) + result.fill_value = ma.default_fill_value(result) + return result + + +apply_along_axis.__doc__ = np.apply_along_axis.__doc__ + + +def apply_over_axes(func, a, axes): + """ + (This docstring will be overwritten) + """ + val = asarray(a) + N = a.ndim + if array(axes).ndim == 0: + axes = (axes,) + for axis in axes: + if axis < 0: + axis = N + axis + args = (val, axis) + res = func(*args) + if res.ndim == val.ndim: + val = res + else: + res = ma.expand_dims(res, axis) + if res.ndim == val.ndim: + val = res + else: + raise ValueError("function is not returning " + "an array of the correct shape") + return val + + +if apply_over_axes.__doc__ is not None: + apply_over_axes.__doc__ = np.apply_over_axes.__doc__[ + :np.apply_over_axes.__doc__.find('Notes')].rstrip() + \ + """ + + Examples + -------- + >>> import numpy as np + >>> a = np.ma.arange(24).reshape(2,3,4) + >>> a[:,0,1] = np.ma.masked + >>> a[:,1,:] = np.ma.masked + >>> a + masked_array( + data=[[[0, --, 2, 3], + [--, --, --, --], + [8, 9, 10, 11]], + [[12, --, 14, 15], + [--, --, --, --], + [20, 21, 22, 23]]], + mask=[[[False, True, False, False], + [ True, True, True, True], + [False, False, False, False]], + [[False, True, False, False], + [ True, True, True, True], + [False, False, False, False]]], + fill_value=999999) + >>> np.ma.apply_over_axes(np.ma.sum, a, [0,2]) + masked_array( + data=[[[46], + [--], + [124]]], + mask=[[[False], + [ True], + [False]]], + fill_value=999999) + + Tuple axis arguments to ufuncs are equivalent: + + >>> np.ma.sum(a, axis=(0,2)).reshape((1,-1,1)) + masked_array( + data=[[[46], + [--], + [124]]], + mask=[[[False], + [ True], + [False]]], + fill_value=999999) + """ + + +def average(a, axis=None, weights=None, returned=False, *, + keepdims=np._NoValue): + """ + Return the weighted average of array over the given axis. + + Parameters + ---------- + a : array_like + Data to be averaged. + Masked entries are not taken into account in the computation. + axis : None or int or tuple of ints, optional + Axis or axes along which to average `a`. The default, + `axis=None`, will average over all of the elements of the input array. + If axis is a tuple of ints, averaging is performed on all of the axes + specified in the tuple instead of a single axis or all the axes as + before. + weights : array_like, optional + An array of weights associated with the values in `a`. Each value in + `a` contributes to the average according to its associated weight. + The array of weights must be the same shape as `a` if no axis is + specified, otherwise the weights must have dimensions and shape + consistent with `a` along the specified axis. + If `weights=None`, then all data in `a` are assumed to have a + weight equal to one. + The calculation is:: + + avg = sum(a * weights) / sum(weights) + + where the sum is over all included elements. + The only constraint on the values of `weights` is that `sum(weights)` + must not be 0. + returned : bool, optional + Flag indicating whether a tuple ``(result, sum of weights)`` + should be returned as output (True), or just the result (False). + Default is False. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `a`. + *Note:* `keepdims` will not work with instances of `numpy.matrix` + or other classes whose methods do not support `keepdims`. + + .. versionadded:: 1.23.0 + + Returns + ------- + average, [sum_of_weights] : (tuple of) scalar or MaskedArray + The average along the specified axis. When returned is `True`, + return a tuple with the average as the first element and the sum + of the weights as the second element. The return type is `np.float64` + if `a` is of integer type and floats smaller than `float64`, or the + input data-type, otherwise. If returned, `sum_of_weights` is always + `float64`. + + Raises + ------ + ZeroDivisionError + When all weights along axis are zero. See `numpy.ma.average` for a + version robust to this type of error. + TypeError + When `weights` does not have the same shape as `a`, and `axis=None`. + ValueError + When `weights` does not have dimensions and shape consistent with `a` + along specified `axis`. + + Examples + -------- + >>> import numpy as np + >>> a = np.ma.array([1., 2., 3., 4.], mask=[False, False, True, True]) + >>> np.ma.average(a, weights=[3, 1, 0, 0]) + 1.25 + + >>> x = np.ma.arange(6.).reshape(3, 2) + >>> x + masked_array( + data=[[0., 1.], + [2., 3.], + [4., 5.]], + mask=False, + fill_value=1e+20) + >>> data = np.arange(8).reshape((2, 2, 2)) + >>> data + array([[[0, 1], + [2, 3]], + [[4, 5], + [6, 7]]]) + >>> np.ma.average(data, axis=(0, 1), weights=[[1./4, 3./4], [1., 1./2]]) + masked_array(data=[3.4, 4.4], + mask=[False, False], + fill_value=1e+20) + >>> np.ma.average(data, axis=0, weights=[[1./4, 3./4], [1., 1./2]]) + Traceback (most recent call last): + ... + ValueError: Shape of weights must be consistent + with shape of a along specified axis. + + >>> avg, sumweights = np.ma.average(x, axis=0, weights=[1, 2, 3], + ... returned=True) + >>> avg + masked_array(data=[2.6666666666666665, 3.6666666666666665], + mask=[False, False], + fill_value=1e+20) + + With ``keepdims=True``, the following result has shape (3, 1). + + >>> np.ma.average(x, axis=1, keepdims=True) + masked_array( + data=[[0.5], + [2.5], + [4.5]], + mask=False, + fill_value=1e+20) + """ + a = asarray(a) + m = getmask(a) + + if axis is not None: + axis = normalize_axis_tuple(axis, a.ndim, argname="axis") + + if keepdims is np._NoValue: + # Don't pass on the keepdims argument if one wasn't given. + keepdims_kw = {} + else: + keepdims_kw = {'keepdims': keepdims} + + if weights is None: + avg = a.mean(axis, **keepdims_kw) + scl = avg.dtype.type(a.count(axis)) + else: + wgt = asarray(weights) + + if issubclass(a.dtype.type, (np.integer, np.bool)): + result_dtype = np.result_type(a.dtype, wgt.dtype, 'f8') + else: + result_dtype = np.result_type(a.dtype, wgt.dtype) + + # Sanity checks + if a.shape != wgt.shape: + if axis is None: + raise TypeError( + "Axis must be specified when shapes of a and weights " + "differ.") + if wgt.shape != tuple(a.shape[ax] for ax in axis): + raise ValueError( + "Shape of weights must be consistent with " + "shape of a along specified axis.") + + # setup wgt to broadcast along axis + wgt = wgt.transpose(np.argsort(axis)) + wgt = wgt.reshape(tuple((s if ax in axis else 1) + for ax, s in enumerate(a.shape))) + + if m is not nomask: + wgt = wgt * (~a.mask) + wgt.mask |= a.mask + + scl = wgt.sum(axis=axis, dtype=result_dtype, **keepdims_kw) + avg = np.multiply(a, wgt, + dtype=result_dtype).sum(axis, **keepdims_kw) / scl + + if returned: + if scl.shape != avg.shape: + scl = np.broadcast_to(scl, avg.shape).copy() + return avg, scl + else: + return avg + + +def median(a, axis=None, out=None, overwrite_input=False, keepdims=False): + """ + Compute the median along the specified axis. + + Returns the median of the array elements. + + Parameters + ---------- + a : array_like + Input array or object that can be converted to an array. + axis : int, optional + Axis along which the medians are computed. The default (None) is + to compute the median along a flattened version of the array. + out : ndarray, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output + but the type will be cast if necessary. + overwrite_input : bool, optional + If True, then allow use of memory of input array (a) for + calculations. The input array will be modified by the call to + median. This will save memory when you do not need to preserve + the contents of the input array. Treat the input as undefined, + but it will probably be fully or partially sorted. Default is + False. Note that, if `overwrite_input` is True, and the input + is not already an `ndarray`, an error will be raised. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the input array. + + Returns + ------- + median : ndarray + A new array holding the result is returned unless out is + specified, in which case a reference to out is returned. + Return data-type is `float64` for integers and floats smaller than + `float64`, or the input data-type, otherwise. + + See Also + -------- + mean + + Notes + ----- + Given a vector ``V`` with ``N`` non masked values, the median of ``V`` + is the middle value of a sorted copy of ``V`` (``Vs``) - i.e. + ``Vs[(N-1)/2]``, when ``N`` is odd, or ``{Vs[N/2 - 1] + Vs[N/2]}/2`` + when ``N`` is even. + + Examples + -------- + >>> import numpy as np + >>> x = np.ma.array(np.arange(8), mask=[0]*4 + [1]*4) + >>> np.ma.median(x) + 1.5 + + >>> x = np.ma.array(np.arange(10).reshape(2, 5), mask=[0]*6 + [1]*4) + >>> np.ma.median(x) + 2.5 + >>> np.ma.median(x, axis=-1, overwrite_input=True) + masked_array(data=[2.0, 5.0], + mask=[False, False], + fill_value=1e+20) + + """ + if not hasattr(a, 'mask'): + m = np.median(getdata(a, subok=True), axis=axis, + out=out, overwrite_input=overwrite_input, + keepdims=keepdims) + if isinstance(m, np.ndarray) and 1 <= m.ndim: + return masked_array(m, copy=False) + else: + return m + + return _ureduce(a, func=_median, keepdims=keepdims, axis=axis, out=out, + overwrite_input=overwrite_input) + + +def _median(a, axis=None, out=None, overwrite_input=False): + # when an unmasked NaN is present return it, so we need to sort the NaN + # values behind the mask + if np.issubdtype(a.dtype, np.inexact): + fill_value = np.inf + else: + fill_value = None + if overwrite_input: + if axis is None: + asorted = a.ravel() + asorted.sort(fill_value=fill_value) + else: + a.sort(axis=axis, fill_value=fill_value) + asorted = a + else: + asorted = sort(a, axis=axis, fill_value=fill_value) + + if axis is None: + axis = 0 + else: + axis = normalize_axis_index(axis, asorted.ndim) + + if asorted.shape[axis] == 0: + # for empty axis integer indices fail so use slicing to get same result + # as median (which is mean of empty slice = nan) + indexer = [slice(None)] * asorted.ndim + indexer[axis] = slice(0, 0) + indexer = tuple(indexer) + return np.ma.mean(asorted[indexer], axis=axis, out=out) + + if asorted.ndim == 1: + idx, odd = divmod(count(asorted), 2) + mid = asorted[idx + odd - 1:idx + 1] + if np.issubdtype(asorted.dtype, np.inexact) and asorted.size > 0: + # avoid inf / x = masked + s = mid.sum(out=out) + if not odd: + s = np.true_divide(s, 2., casting='safe', out=out) + s = np.lib._utils_impl._median_nancheck(asorted, s, axis) + else: + s = mid.mean(out=out) + + # if result is masked either the input contained enough + # minimum_fill_value so that it would be the median or all values + # masked + if np.ma.is_masked(s) and not np.all(asorted.mask): + return np.ma.minimum_fill_value(asorted) + return s + + counts = count(asorted, axis=axis, keepdims=True) + h = counts // 2 + + # duplicate high if odd number of elements so mean does nothing + odd = counts % 2 == 1 + l = np.where(odd, h, h - 1) + + lh = np.concatenate([l, h], axis=axis) + + # get low and high median + low_high = np.take_along_axis(asorted, lh, axis=axis) + + def replace_masked(s): + # Replace masked entries with minimum_full_value unless it all values + # are masked. This is required as the sort order of values equal or + # larger than the fill value is undefined and a valid value placed + # elsewhere, e.g. [4, --, inf]. + if np.ma.is_masked(s): + rep = (~np.all(asorted.mask, axis=axis, keepdims=True)) & s.mask + s.data[rep] = np.ma.minimum_fill_value(asorted) + s.mask[rep] = False + + replace_masked(low_high) + + if np.issubdtype(asorted.dtype, np.inexact): + # avoid inf / x = masked + s = np.ma.sum(low_high, axis=axis, out=out) + np.true_divide(s.data, 2., casting='unsafe', out=s.data) + + s = np.lib._utils_impl._median_nancheck(asorted, s, axis) + else: + s = np.ma.mean(low_high, axis=axis, out=out) + + return s + + +def compress_nd(x, axis=None): + """Suppress slices from multiple dimensions which contain masked values. + + Parameters + ---------- + x : array_like, MaskedArray + The array to operate on. If not a MaskedArray instance (or if no array + elements are masked), `x` is interpreted as a MaskedArray with `mask` + set to `nomask`. + axis : tuple of ints or int, optional + Which dimensions to suppress slices from can be configured with this + parameter. + - If axis is a tuple of ints, those are the axes to suppress slices from. + - If axis is an int, then that is the only axis to suppress slices from. + - If axis is None, all axis are selected. + + Returns + ------- + compress_array : ndarray + The compressed array. + + Examples + -------- + >>> import numpy as np + >>> arr = [[1, 2], [3, 4]] + >>> mask = [[0, 1], [0, 0]] + >>> x = np.ma.array(arr, mask=mask) + >>> np.ma.compress_nd(x, axis=0) + array([[3, 4]]) + >>> np.ma.compress_nd(x, axis=1) + array([[1], + [3]]) + >>> np.ma.compress_nd(x) + array([[3]]) + + """ + x = asarray(x) + m = getmask(x) + # Set axis to tuple of ints + if axis is None: + axis = tuple(range(x.ndim)) + else: + axis = normalize_axis_tuple(axis, x.ndim) + + # Nothing is masked: return x + if m is nomask or not m.any(): + return x._data + # All is masked: return empty + if m.all(): + return nxarray([]) + # Filter elements through boolean indexing + data = x._data + for ax in axis: + axes = tuple(list(range(ax)) + list(range(ax + 1, x.ndim))) + data = data[(slice(None),) * ax + (~m.any(axis=axes),)] + return data + + +def compress_rowcols(x, axis=None): + """ + Suppress the rows and/or columns of a 2-D array that contain + masked values. + + The suppression behavior is selected with the `axis` parameter. + + - If axis is None, both rows and columns are suppressed. + - If axis is 0, only rows are suppressed. + - If axis is 1 or -1, only columns are suppressed. + + Parameters + ---------- + x : array_like, MaskedArray + The array to operate on. If not a MaskedArray instance (or if no array + elements are masked), `x` is interpreted as a MaskedArray with + `mask` set to `nomask`. Must be a 2D array. + axis : int, optional + Axis along which to perform the operation. Default is None. + + Returns + ------- + compressed_array : ndarray + The compressed array. + + Examples + -------- + >>> import numpy as np + >>> x = np.ma.array(np.arange(9).reshape(3, 3), mask=[[1, 0, 0], + ... [1, 0, 0], + ... [0, 0, 0]]) + >>> x + masked_array( + data=[[--, 1, 2], + [--, 4, 5], + [6, 7, 8]], + mask=[[ True, False, False], + [ True, False, False], + [False, False, False]], + fill_value=999999) + + >>> np.ma.compress_rowcols(x) + array([[7, 8]]) + >>> np.ma.compress_rowcols(x, 0) + array([[6, 7, 8]]) + >>> np.ma.compress_rowcols(x, 1) + array([[1, 2], + [4, 5], + [7, 8]]) + + """ + if asarray(x).ndim != 2: + raise NotImplementedError("compress_rowcols works for 2D arrays only.") + return compress_nd(x, axis=axis) + + +def compress_rows(a): + """ + Suppress whole rows of a 2-D array that contain masked values. + + This is equivalent to ``np.ma.compress_rowcols(a, 0)``, see + `compress_rowcols` for details. + + Parameters + ---------- + x : array_like, MaskedArray + The array to operate on. If not a MaskedArray instance (or if no array + elements are masked), `x` is interpreted as a MaskedArray with + `mask` set to `nomask`. Must be a 2D array. + + Returns + ------- + compressed_array : ndarray + The compressed array. + + See Also + -------- + compress_rowcols + + Examples + -------- + >>> import numpy as np + >>> a = np.ma.array(np.arange(9).reshape(3, 3), mask=[[1, 0, 0], + ... [1, 0, 0], + ... [0, 0, 0]]) + >>> np.ma.compress_rows(a) + array([[6, 7, 8]]) + + """ + a = asarray(a) + if a.ndim != 2: + raise NotImplementedError("compress_rows works for 2D arrays only.") + return compress_rowcols(a, 0) + + +def compress_cols(a): + """ + Suppress whole columns of a 2-D array that contain masked values. + + This is equivalent to ``np.ma.compress_rowcols(a, 1)``, see + `compress_rowcols` for details. + + Parameters + ---------- + x : array_like, MaskedArray + The array to operate on. If not a MaskedArray instance (or if no array + elements are masked), `x` is interpreted as a MaskedArray with + `mask` set to `nomask`. Must be a 2D array. + + Returns + ------- + compressed_array : ndarray + The compressed array. + + See Also + -------- + compress_rowcols + + Examples + -------- + >>> import numpy as np + >>> a = np.ma.array(np.arange(9).reshape(3, 3), mask=[[1, 0, 0], + ... [1, 0, 0], + ... [0, 0, 0]]) + >>> np.ma.compress_cols(a) + array([[1, 2], + [4, 5], + [7, 8]]) + + """ + a = asarray(a) + if a.ndim != 2: + raise NotImplementedError("compress_cols works for 2D arrays only.") + return compress_rowcols(a, 1) + + +def mask_rowcols(a, axis=None): + """ + Mask rows and/or columns of a 2D array that contain masked values. + + Mask whole rows and/or columns of a 2D array that contain + masked values. The masking behavior is selected using the + `axis` parameter. + + - If `axis` is None, rows *and* columns are masked. + - If `axis` is 0, only rows are masked. + - If `axis` is 1 or -1, only columns are masked. + + Parameters + ---------- + a : array_like, MaskedArray + The array to mask. If not a MaskedArray instance (or if no array + elements are masked), the result is a MaskedArray with `mask` set + to `nomask` (False). Must be a 2D array. + axis : int, optional + Axis along which to perform the operation. If None, applies to a + flattened version of the array. + + Returns + ------- + a : MaskedArray + A modified version of the input array, masked depending on the value + of the `axis` parameter. + + Raises + ------ + NotImplementedError + If input array `a` is not 2D. + + See Also + -------- + mask_rows : Mask rows of a 2D array that contain masked values. + mask_cols : Mask cols of a 2D array that contain masked values. + masked_where : Mask where a condition is met. + + Notes + ----- + The input array's mask is modified by this function. + + Examples + -------- + >>> import numpy as np + >>> a = np.zeros((3, 3), dtype=int) + >>> a[1, 1] = 1 + >>> a + array([[0, 0, 0], + [0, 1, 0], + [0, 0, 0]]) + >>> a = np.ma.masked_equal(a, 1) + >>> a + masked_array( + data=[[0, 0, 0], + [0, --, 0], + [0, 0, 0]], + mask=[[False, False, False], + [False, True, False], + [False, False, False]], + fill_value=1) + >>> np.ma.mask_rowcols(a) + masked_array( + data=[[0, --, 0], + [--, --, --], + [0, --, 0]], + mask=[[False, True, False], + [ True, True, True], + [False, True, False]], + fill_value=1) + + """ + a = array(a, subok=False) + if a.ndim != 2: + raise NotImplementedError("mask_rowcols works for 2D arrays only.") + m = getmask(a) + # Nothing is masked: return a + if m is nomask or not m.any(): + return a + maskedval = m.nonzero() + a._mask = a._mask.copy() + if not axis: + a[np.unique(maskedval[0])] = masked + if axis in [None, 1, -1]: + a[:, np.unique(maskedval[1])] = masked + return a + + +def mask_rows(a, axis=np._NoValue): + """ + Mask rows of a 2D array that contain masked values. + + This function is a shortcut to ``mask_rowcols`` with `axis` equal to 0. + + See Also + -------- + mask_rowcols : Mask rows and/or columns of a 2D array. + masked_where : Mask where a condition is met. + + Examples + -------- + >>> import numpy as np + >>> a = np.zeros((3, 3), dtype=int) + >>> a[1, 1] = 1 + >>> a + array([[0, 0, 0], + [0, 1, 0], + [0, 0, 0]]) + >>> a = np.ma.masked_equal(a, 1) + >>> a + masked_array( + data=[[0, 0, 0], + [0, --, 0], + [0, 0, 0]], + mask=[[False, False, False], + [False, True, False], + [False, False, False]], + fill_value=1) + + >>> np.ma.mask_rows(a) + masked_array( + data=[[0, 0, 0], + [--, --, --], + [0, 0, 0]], + mask=[[False, False, False], + [ True, True, True], + [False, False, False]], + fill_value=1) + + """ + if axis is not np._NoValue: + # remove the axis argument when this deprecation expires + # NumPy 1.18.0, 2019-11-28 + warnings.warn( + "The axis argument has always been ignored, in future passing it " + "will raise TypeError", DeprecationWarning, stacklevel=2) + return mask_rowcols(a, 0) + + +def mask_cols(a, axis=np._NoValue): + """ + Mask columns of a 2D array that contain masked values. + + This function is a shortcut to ``mask_rowcols`` with `axis` equal to 1. + + See Also + -------- + mask_rowcols : Mask rows and/or columns of a 2D array. + masked_where : Mask where a condition is met. + + Examples + -------- + >>> import numpy as np + >>> a = np.zeros((3, 3), dtype=int) + >>> a[1, 1] = 1 + >>> a + array([[0, 0, 0], + [0, 1, 0], + [0, 0, 0]]) + >>> a = np.ma.masked_equal(a, 1) + >>> a + masked_array( + data=[[0, 0, 0], + [0, --, 0], + [0, 0, 0]], + mask=[[False, False, False], + [False, True, False], + [False, False, False]], + fill_value=1) + >>> np.ma.mask_cols(a) + masked_array( + data=[[0, --, 0], + [0, --, 0], + [0, --, 0]], + mask=[[False, True, False], + [False, True, False], + [False, True, False]], + fill_value=1) + + """ + if axis is not np._NoValue: + # remove the axis argument when this deprecation expires + # NumPy 1.18.0, 2019-11-28 + warnings.warn( + "The axis argument has always been ignored, in future passing it " + "will raise TypeError", DeprecationWarning, stacklevel=2) + return mask_rowcols(a, 1) + + +#####-------------------------------------------------------------------------- +#---- --- arraysetops --- +#####-------------------------------------------------------------------------- + +def ediff1d(arr, to_end=None, to_begin=None): + """ + Compute the differences between consecutive elements of an array. + + This function is the equivalent of `numpy.ediff1d` that takes masked + values into account, see `numpy.ediff1d` for details. + + See Also + -------- + numpy.ediff1d : Equivalent function for ndarrays. + + Examples + -------- + >>> import numpy as np + >>> arr = np.ma.array([1, 2, 4, 7, 0]) + >>> np.ma.ediff1d(arr) + masked_array(data=[ 1, 2, 3, -7], + mask=False, + fill_value=999999) + + """ + arr = ma.asanyarray(arr).flat + ed = arr[1:] - arr[:-1] + arrays = [ed] + # + if to_begin is not None: + arrays.insert(0, to_begin) + if to_end is not None: + arrays.append(to_end) + # + if len(arrays) != 1: + # We'll save ourselves a copy of a potentially large array in the common + # case where neither to_begin or to_end was given. + ed = hstack(arrays) + # + return ed + + +def unique(ar1, return_index=False, return_inverse=False): + """ + Finds the unique elements of an array. + + Masked values are considered the same element (masked). The output array + is always a masked array. See `numpy.unique` for more details. + + See Also + -------- + numpy.unique : Equivalent function for ndarrays. + + Examples + -------- + >>> import numpy as np + >>> a = [1, 2, 1000, 2, 3] + >>> mask = [0, 0, 1, 0, 0] + >>> masked_a = np.ma.masked_array(a, mask) + >>> masked_a + masked_array(data=[1, 2, --, 2, 3], + mask=[False, False, True, False, False], + fill_value=999999) + >>> np.ma.unique(masked_a) + masked_array(data=[1, 2, 3, --], + mask=[False, False, False, True], + fill_value=999999) + >>> np.ma.unique(masked_a, return_index=True) + (masked_array(data=[1, 2, 3, --], + mask=[False, False, False, True], + fill_value=999999), array([0, 1, 4, 2])) + >>> np.ma.unique(masked_a, return_inverse=True) + (masked_array(data=[1, 2, 3, --], + mask=[False, False, False, True], + fill_value=999999), array([0, 1, 3, 1, 2])) + >>> np.ma.unique(masked_a, return_index=True, return_inverse=True) + (masked_array(data=[1, 2, 3, --], + mask=[False, False, False, True], + fill_value=999999), array([0, 1, 4, 2]), array([0, 1, 3, 1, 2])) + """ + output = np.unique(ar1, + return_index=return_index, + return_inverse=return_inverse) + if isinstance(output, tuple): + output = list(output) + output[0] = output[0].view(MaskedArray) + output = tuple(output) + else: + output = output.view(MaskedArray) + return output + + +def intersect1d(ar1, ar2, assume_unique=False): + """ + Returns the unique elements common to both arrays. + + Masked values are considered equal one to the other. + The output is always a masked array. + + See `numpy.intersect1d` for more details. + + See Also + -------- + numpy.intersect1d : Equivalent function for ndarrays. + + Examples + -------- + >>> import numpy as np + >>> x = np.ma.array([1, 3, 3, 3], mask=[0, 0, 0, 1]) + >>> y = np.ma.array([3, 1, 1, 1], mask=[0, 0, 0, 1]) + >>> np.ma.intersect1d(x, y) + masked_array(data=[1, 3, --], + mask=[False, False, True], + fill_value=999999) + + """ + if assume_unique: + aux = ma.concatenate((ar1, ar2)) + else: + # Might be faster than unique( intersect1d( ar1, ar2 ) )? + aux = ma.concatenate((unique(ar1), unique(ar2))) + aux.sort() + return aux[:-1][aux[1:] == aux[:-1]] + + +def setxor1d(ar1, ar2, assume_unique=False): + """ + Set exclusive-or of 1-D arrays with unique elements. + + The output is always a masked array. See `numpy.setxor1d` for more details. + + See Also + -------- + numpy.setxor1d : Equivalent function for ndarrays. + + Examples + -------- + >>> import numpy as np + >>> ar1 = np.ma.array([1, 2, 3, 2, 4]) + >>> ar2 = np.ma.array([2, 3, 5, 7, 5]) + >>> np.ma.setxor1d(ar1, ar2) + masked_array(data=[1, 4, 5, 7], + mask=False, + fill_value=999999) + + """ + if not assume_unique: + ar1 = unique(ar1) + ar2 = unique(ar2) + + aux = ma.concatenate((ar1, ar2), axis=None) + if aux.size == 0: + return aux + aux.sort() + auxf = aux.filled() +# flag = ediff1d( aux, to_end = 1, to_begin = 1 ) == 0 + flag = ma.concatenate(([True], (auxf[1:] != auxf[:-1]), [True])) +# flag2 = ediff1d( flag ) == 0 + flag2 = (flag[1:] == flag[:-1]) + return aux[flag2] + + +def in1d(ar1, ar2, assume_unique=False, invert=False): + """ + Test whether each element of an array is also present in a second + array. + + The output is always a masked array. See `numpy.in1d` for more details. + + We recommend using :func:`isin` instead of `in1d` for new code. + + See Also + -------- + isin : Version of this function that preserves the shape of ar1. + numpy.in1d : Equivalent function for ndarrays. + + Examples + -------- + >>> import numpy as np + >>> ar1 = np.ma.array([0, 1, 2, 5, 0]) + >>> ar2 = [0, 2] + >>> np.ma.in1d(ar1, ar2) + masked_array(data=[ True, False, True, False, True], + mask=False, + fill_value=True) + + """ + if not assume_unique: + ar1, rev_idx = unique(ar1, return_inverse=True) + ar2 = unique(ar2) + + ar = ma.concatenate((ar1, ar2)) + # We need this to be a stable sort, so always use 'mergesort' + # here. The values from the first array should always come before + # the values from the second array. + order = ar.argsort(kind='mergesort') + sar = ar[order] + if invert: + bool_ar = (sar[1:] != sar[:-1]) + else: + bool_ar = (sar[1:] == sar[:-1]) + flag = ma.concatenate((bool_ar, [invert])) + indx = order.argsort(kind='mergesort')[:len(ar1)] + + if assume_unique: + return flag[indx] + else: + return flag[indx][rev_idx] + + +def isin(element, test_elements, assume_unique=False, invert=False): + """ + Calculates `element in test_elements`, broadcasting over + `element` only. + + The output is always a masked array of the same shape as `element`. + See `numpy.isin` for more details. + + See Also + -------- + in1d : Flattened version of this function. + numpy.isin : Equivalent function for ndarrays. + + Examples + -------- + >>> import numpy as np + >>> element = np.ma.array([1, 2, 3, 4, 5, 6]) + >>> test_elements = [0, 2] + >>> np.ma.isin(element, test_elements) + masked_array(data=[False, True, False, False, False, False], + mask=False, + fill_value=True) + + """ + element = ma.asarray(element) + return in1d(element, test_elements, assume_unique=assume_unique, + invert=invert).reshape(element.shape) + + +def union1d(ar1, ar2): + """ + Union of two arrays. + + The output is always a masked array. See `numpy.union1d` for more details. + + See Also + -------- + numpy.union1d : Equivalent function for ndarrays. + + Examples + -------- + >>> import numpy as np + >>> ar1 = np.ma.array([1, 2, 3, 4]) + >>> ar2 = np.ma.array([3, 4, 5, 6]) + >>> np.ma.union1d(ar1, ar2) + masked_array(data=[1, 2, 3, 4, 5, 6], + mask=False, + fill_value=999999) + + """ + return unique(ma.concatenate((ar1, ar2), axis=None)) + + +def setdiff1d(ar1, ar2, assume_unique=False): + """ + Set difference of 1D arrays with unique elements. + + The output is always a masked array. See `numpy.setdiff1d` for more + details. + + See Also + -------- + numpy.setdiff1d : Equivalent function for ndarrays. + + Examples + -------- + >>> import numpy as np + >>> x = np.ma.array([1, 2, 3, 4], mask=[0, 1, 0, 1]) + >>> np.ma.setdiff1d(x, [1, 2]) + masked_array(data=[3, --], + mask=[False, True], + fill_value=999999) + + """ + if assume_unique: + ar1 = ma.asarray(ar1).ravel() + else: + ar1 = unique(ar1) + ar2 = unique(ar2) + return ar1[in1d(ar1, ar2, assume_unique=True, invert=True)] + + +############################################################################### +# Covariance # +############################################################################### + + +def _covhelper(x, y=None, rowvar=True, allow_masked=True): + """ + Private function for the computation of covariance and correlation + coefficients. + + """ + x = ma.array(x, ndmin=2, copy=True, dtype=float) + xmask = ma.getmaskarray(x) + # Quick exit if we can't process masked data + if not allow_masked and xmask.any(): + raise ValueError("Cannot process masked data.") + # + if x.shape[0] == 1: + rowvar = True + # Make sure that rowvar is either 0 or 1 + rowvar = int(bool(rowvar)) + axis = 1 - rowvar + if rowvar: + tup = (slice(None), None) + else: + tup = (None, slice(None)) + # + if y is None: + # Check if we can guarantee that the integers in the (N - ddof) + # normalisation can be accurately represented with single-precision + # before computing the dot product. + if x.shape[0] > 2 ** 24 or x.shape[1] > 2 ** 24: + xnm_dtype = np.float64 + else: + xnm_dtype = np.float32 + xnotmask = np.logical_not(xmask).astype(xnm_dtype) + else: + y = array(y, copy=False, ndmin=2, dtype=float) + ymask = ma.getmaskarray(y) + if not allow_masked and ymask.any(): + raise ValueError("Cannot process masked data.") + if xmask.any() or ymask.any(): + if y.shape == x.shape: + # Define some common mask + common_mask = np.logical_or(xmask, ymask) + if common_mask is not nomask: + xmask = x._mask = y._mask = ymask = common_mask + x._sharedmask = False + y._sharedmask = False + x = ma.concatenate((x, y), axis) + # Check if we can guarantee that the integers in the (N - ddof) + # normalisation can be accurately represented with single-precision + # before computing the dot product. + if x.shape[0] > 2 ** 24 or x.shape[1] > 2 ** 24: + xnm_dtype = np.float64 + else: + xnm_dtype = np.float32 + xnotmask = np.logical_not(np.concatenate((xmask, ymask), axis)).astype( + xnm_dtype + ) + x -= x.mean(axis=rowvar)[tup] + return (x, xnotmask, rowvar) + + +def cov(x, y=None, rowvar=True, bias=False, allow_masked=True, ddof=None): + """ + Estimate the covariance matrix. + + Except for the handling of missing data this function does the same as + `numpy.cov`. For more details and examples, see `numpy.cov`. + + By default, masked values are recognized as such. If `x` and `y` have the + same shape, a common mask is allocated: if ``x[i,j]`` is masked, then + ``y[i,j]`` will also be masked. + Setting `allow_masked` to False will raise an exception if values are + missing in either of the input arrays. + + Parameters + ---------- + x : array_like + A 1-D or 2-D array containing multiple variables and observations. + Each row of `x` represents a variable, and each column a single + observation of all those variables. Also see `rowvar` below. + y : array_like, optional + An additional set of variables and observations. `y` has the same + shape as `x`. + rowvar : bool, optional + If `rowvar` is True (default), then each row represents a + variable, with observations in the columns. Otherwise, the relationship + is transposed: each column represents a variable, while the rows + contain observations. + bias : bool, optional + Default normalization (False) is by ``(N-1)``, where ``N`` is the + number of observations given (unbiased estimate). If `bias` is True, + then normalization is by ``N``. This keyword can be overridden by + the keyword ``ddof`` in numpy versions >= 1.5. + allow_masked : bool, optional + If True, masked values are propagated pair-wise: if a value is masked + in `x`, the corresponding value is masked in `y`. + If False, raises a `ValueError` exception when some values are missing. + ddof : {None, int}, optional + If not ``None`` normalization is by ``(N - ddof)``, where ``N`` is + the number of observations; this overrides the value implied by + ``bias``. The default value is ``None``. + + Raises + ------ + ValueError + Raised if some values are missing and `allow_masked` is False. + + See Also + -------- + numpy.cov + + Examples + -------- + >>> import numpy as np + >>> x = np.ma.array([[0, 1], [1, 1]], mask=[0, 1, 0, 1]) + >>> y = np.ma.array([[1, 0], [0, 1]], mask=[0, 0, 1, 1]) + >>> np.ma.cov(x, y) + masked_array( + data=[[--, --, --, --], + [--, --, --, --], + [--, --, --, --], + [--, --, --, --]], + mask=[[ True, True, True, True], + [ True, True, True, True], + [ True, True, True, True], + [ True, True, True, True]], + fill_value=1e+20, + dtype=float64) + + """ + # Check inputs + if ddof is not None and ddof != int(ddof): + raise ValueError("ddof must be an integer") + # Set up ddof + if ddof is None: + if bias: + ddof = 0 + else: + ddof = 1 + + (x, xnotmask, rowvar) = _covhelper(x, y, rowvar, allow_masked) + if not rowvar: + fact = np.dot(xnotmask.T, xnotmask) - ddof + mask = np.less_equal(fact, 0, dtype=bool) + with np.errstate(divide="ignore", invalid="ignore"): + data = np.dot(filled(x.T, 0), filled(x.conj(), 0)) / fact + result = ma.array(data, mask=mask).squeeze() + else: + fact = np.dot(xnotmask, xnotmask.T) - ddof + mask = np.less_equal(fact, 0, dtype=bool) + with np.errstate(divide="ignore", invalid="ignore"): + data = np.dot(filled(x, 0), filled(x.T.conj(), 0)) / fact + result = ma.array(data, mask=mask).squeeze() + return result + + +def corrcoef(x, y=None, rowvar=True, bias=np._NoValue, allow_masked=True, + ddof=np._NoValue): + """ + Return Pearson product-moment correlation coefficients. + + Except for the handling of missing data this function does the same as + `numpy.corrcoef`. For more details and examples, see `numpy.corrcoef`. + + Parameters + ---------- + x : array_like + A 1-D or 2-D array containing multiple variables and observations. + Each row of `x` represents a variable, and each column a single + observation of all those variables. Also see `rowvar` below. + y : array_like, optional + An additional set of variables and observations. `y` has the same + shape as `x`. + rowvar : bool, optional + If `rowvar` is True (default), then each row represents a + variable, with observations in the columns. Otherwise, the relationship + is transposed: each column represents a variable, while the rows + contain observations. + bias : _NoValue, optional + Has no effect, do not use. + + .. deprecated:: 1.10.0 + allow_masked : bool, optional + If True, masked values are propagated pair-wise: if a value is masked + in `x`, the corresponding value is masked in `y`. + If False, raises an exception. Because `bias` is deprecated, this + argument needs to be treated as keyword only to avoid a warning. + ddof : _NoValue, optional + Has no effect, do not use. + + .. deprecated:: 1.10.0 + + See Also + -------- + numpy.corrcoef : Equivalent function in top-level NumPy module. + cov : Estimate the covariance matrix. + + Notes + ----- + This function accepts but discards arguments `bias` and `ddof`. This is + for backwards compatibility with previous versions of this function. These + arguments had no effect on the return values of the function and can be + safely ignored in this and previous versions of numpy. + + Examples + -------- + >>> import numpy as np + >>> x = np.ma.array([[0, 1], [1, 1]], mask=[0, 1, 0, 1]) + >>> np.ma.corrcoef(x) + masked_array( + data=[[--, --], + [--, --]], + mask=[[ True, True], + [ True, True]], + fill_value=1e+20, + dtype=float64) + + """ + msg = 'bias and ddof have no effect and are deprecated' + if bias is not np._NoValue or ddof is not np._NoValue: + # 2015-03-15, 1.10 + warnings.warn(msg, DeprecationWarning, stacklevel=2) + # Estimate the covariance matrix. + corr = cov(x, y, rowvar, allow_masked=allow_masked) + # The non-masked version returns a masked value for a scalar. + try: + std = ma.sqrt(ma.diagonal(corr)) + except ValueError: + return ma.MaskedConstant() + corr /= ma.multiply.outer(std, std) + return corr + +#####-------------------------------------------------------------------------- +#---- --- Concatenation helpers --- +#####-------------------------------------------------------------------------- + +class MAxisConcatenator(AxisConcatenator): + """ + Translate slice objects to concatenation along an axis. + + For documentation on usage, see `mr_class`. + + See Also + -------- + mr_class + + """ + __slots__ = () + + concatenate = staticmethod(concatenate) + + @classmethod + def makemat(cls, arr): + # There used to be a view as np.matrix here, but we may eventually + # deprecate that class. In preparation, we use the unmasked version + # to construct the matrix (with copy=False for backwards compatibility + # with the .view) + data = super().makemat(arr.data, copy=False) + return array(data, mask=arr.mask) + + def __getitem__(self, key): + # matrix builder syntax, like 'a, b; c, d' + if isinstance(key, str): + raise MAError("Unavailable for masked array.") + + return super().__getitem__(key) + + +class mr_class(MAxisConcatenator): + """ + Translate slice objects to concatenation along the first axis. + + This is the masked array version of `r_`. + + See Also + -------- + r_ + + Examples + -------- + >>> import numpy as np + >>> np.ma.mr_[np.ma.array([1,2,3]), 0, 0, np.ma.array([4,5,6])] + masked_array(data=[1, 2, 3, ..., 4, 5, 6], + mask=False, + fill_value=999999) + + """ + __slots__ = () + + def __init__(self): + MAxisConcatenator.__init__(self, 0) + + +mr_ = mr_class() + + +#####-------------------------------------------------------------------------- +#---- Find unmasked data --- +#####-------------------------------------------------------------------------- + +def ndenumerate(a, compressed=True): + """ + Multidimensional index iterator. + + Return an iterator yielding pairs of array coordinates and values, + skipping elements that are masked. With `compressed=False`, + `ma.masked` is yielded as the value of masked elements. This + behavior differs from that of `numpy.ndenumerate`, which yields the + value of the underlying data array. + + Notes + ----- + .. versionadded:: 1.23.0 + + Parameters + ---------- + a : array_like + An array with (possibly) masked elements. + compressed : bool, optional + If True (default), masked elements are skipped. + + See Also + -------- + numpy.ndenumerate : Equivalent function ignoring any mask. + + Examples + -------- + >>> import numpy as np + >>> a = np.ma.arange(9).reshape((3, 3)) + >>> a[1, 0] = np.ma.masked + >>> a[1, 2] = np.ma.masked + >>> a[2, 1] = np.ma.masked + >>> a + masked_array( + data=[[0, 1, 2], + [--, 4, --], + [6, --, 8]], + mask=[[False, False, False], + [ True, False, True], + [False, True, False]], + fill_value=999999) + >>> for index, x in np.ma.ndenumerate(a): + ... print(index, x) + (0, 0) 0 + (0, 1) 1 + (0, 2) 2 + (1, 1) 4 + (2, 0) 6 + (2, 2) 8 + + >>> for index, x in np.ma.ndenumerate(a, compressed=False): + ... print(index, x) + (0, 0) 0 + (0, 1) 1 + (0, 2) 2 + (1, 0) -- + (1, 1) 4 + (1, 2) -- + (2, 0) 6 + (2, 1) -- + (2, 2) 8 + """ + for it, mask in zip(np.ndenumerate(a), getmaskarray(a).flat): + if not mask: + yield it + elif not compressed: + yield it[0], masked + + +def flatnotmasked_edges(a): + """ + Find the indices of the first and last unmasked values. + + Expects a 1-D `MaskedArray`, returns None if all values are masked. + + Parameters + ---------- + a : array_like + Input 1-D `MaskedArray` + + Returns + ------- + edges : ndarray or None + The indices of first and last non-masked value in the array. + Returns None if all values are masked. + + See Also + -------- + flatnotmasked_contiguous, notmasked_contiguous, notmasked_edges + clump_masked, clump_unmasked + + Notes + ----- + Only accepts 1-D arrays. + + Examples + -------- + >>> import numpy as np + >>> a = np.ma.arange(10) + >>> np.ma.flatnotmasked_edges(a) + array([0, 9]) + + >>> mask = (a < 3) | (a > 8) | (a == 5) + >>> a[mask] = np.ma.masked + >>> np.array(a[~a.mask]) + array([3, 4, 6, 7, 8]) + + >>> np.ma.flatnotmasked_edges(a) + array([3, 8]) + + >>> a[:] = np.ma.masked + >>> print(np.ma.flatnotmasked_edges(a)) + None + + """ + m = getmask(a) + if m is nomask or not np.any(m): + return np.array([0, a.size - 1]) + unmasked = np.flatnonzero(~m) + if len(unmasked) > 0: + return unmasked[[0, -1]] + else: + return None + + +def notmasked_edges(a, axis=None): + """ + Find the indices of the first and last unmasked values along an axis. + + If all values are masked, return None. Otherwise, return a list + of two tuples, corresponding to the indices of the first and last + unmasked values respectively. + + Parameters + ---------- + a : array_like + The input array. + axis : int, optional + Axis along which to perform the operation. + If None (default), applies to a flattened version of the array. + + Returns + ------- + edges : ndarray or list + An array of start and end indexes if there are any masked data in + the array. If there are no masked data in the array, `edges` is a + list of the first and last index. + + See Also + -------- + flatnotmasked_contiguous, flatnotmasked_edges, notmasked_contiguous + clump_masked, clump_unmasked + + Examples + -------- + >>> import numpy as np + >>> a = np.arange(9).reshape((3, 3)) + >>> m = np.zeros_like(a) + >>> m[1:, 1:] = 1 + + >>> am = np.ma.array(a, mask=m) + >>> np.array(am[~am.mask]) + array([0, 1, 2, 3, 6]) + + >>> np.ma.notmasked_edges(am) + array([0, 6]) + + """ + a = asarray(a) + if axis is None or a.ndim == 1: + return flatnotmasked_edges(a) + m = getmaskarray(a) + idx = array(np.indices(a.shape), mask=np.asarray([m] * a.ndim)) + return [tuple(idx[i].min(axis).compressed() for i in range(a.ndim)), + tuple(idx[i].max(axis).compressed() for i in range(a.ndim)), ] + + +def flatnotmasked_contiguous(a): + """ + Find contiguous unmasked data in a masked array. + + Parameters + ---------- + a : array_like + The input array. + + Returns + ------- + slice_list : list + A sorted sequence of `slice` objects (start index, end index). + + See Also + -------- + flatnotmasked_edges, notmasked_contiguous, notmasked_edges + clump_masked, clump_unmasked + + Notes + ----- + Only accepts 2-D arrays at most. + + Examples + -------- + >>> import numpy as np + >>> a = np.ma.arange(10) + >>> np.ma.flatnotmasked_contiguous(a) + [slice(0, 10, None)] + + >>> mask = (a < 3) | (a > 8) | (a == 5) + >>> a[mask] = np.ma.masked + >>> np.array(a[~a.mask]) + array([3, 4, 6, 7, 8]) + + >>> np.ma.flatnotmasked_contiguous(a) + [slice(3, 5, None), slice(6, 9, None)] + >>> a[:] = np.ma.masked + >>> np.ma.flatnotmasked_contiguous(a) + [] + + """ + m = getmask(a) + if m is nomask: + return [slice(0, a.size)] + i = 0 + result = [] + for (k, g) in itertools.groupby(m.ravel()): + n = len(list(g)) + if not k: + result.append(slice(i, i + n)) + i += n + return result + + +def notmasked_contiguous(a, axis=None): + """ + Find contiguous unmasked data in a masked array along the given axis. + + Parameters + ---------- + a : array_like + The input array. + axis : int, optional + Axis along which to perform the operation. + If None (default), applies to a flattened version of the array, and this + is the same as `flatnotmasked_contiguous`. + + Returns + ------- + endpoints : list + A list of slices (start and end indexes) of unmasked indexes + in the array. + + If the input is 2d and axis is specified, the result is a list of lists. + + See Also + -------- + flatnotmasked_edges, flatnotmasked_contiguous, notmasked_edges + clump_masked, clump_unmasked + + Notes + ----- + Only accepts 2-D arrays at most. + + Examples + -------- + >>> import numpy as np + >>> a = np.arange(12).reshape((3, 4)) + >>> mask = np.zeros_like(a) + >>> mask[1:, :-1] = 1; mask[0, 1] = 1; mask[-1, 0] = 0 + >>> ma = np.ma.array(a, mask=mask) + >>> ma + masked_array( + data=[[0, --, 2, 3], + [--, --, --, 7], + [8, --, --, 11]], + mask=[[False, True, False, False], + [ True, True, True, False], + [False, True, True, False]], + fill_value=999999) + >>> np.array(ma[~ma.mask]) + array([ 0, 2, 3, 7, 8, 11]) + + >>> np.ma.notmasked_contiguous(ma) + [slice(0, 1, None), slice(2, 4, None), slice(7, 9, None), slice(11, 12, None)] + + >>> np.ma.notmasked_contiguous(ma, axis=0) + [[slice(0, 1, None), slice(2, 3, None)], [], [slice(0, 1, None)], [slice(0, 3, None)]] + + >>> np.ma.notmasked_contiguous(ma, axis=1) + [[slice(0, 1, None), slice(2, 4, None)], [slice(3, 4, None)], [slice(0, 1, None), slice(3, 4, None)]] + + """ # noqa: E501 + a = asarray(a) + nd = a.ndim + if nd > 2: + raise NotImplementedError("Currently limited to at most 2D array.") + if axis is None or nd == 1: + return flatnotmasked_contiguous(a) + # + result = [] + # + other = (axis + 1) % 2 + idx = [0, 0] + idx[axis] = slice(None, None) + # + for i in range(a.shape[other]): + idx[other] = i + result.append(flatnotmasked_contiguous(a[tuple(idx)])) + return result + + +def _ezclump(mask): + """ + Finds the clumps (groups of data with the same values) for a 1D bool array. + + Returns a series of slices. + """ + if mask.ndim > 1: + mask = mask.ravel() + idx = (mask[1:] ^ mask[:-1]).nonzero() + idx = idx[0] + 1 + + if mask[0]: + if len(idx) == 0: + return [slice(0, mask.size)] + + r = [slice(0, idx[0])] + r.extend((slice(left, right) + for left, right in zip(idx[1:-1:2], idx[2::2]))) + else: + if len(idx) == 0: + return [] + + r = [slice(left, right) for left, right in zip(idx[:-1:2], idx[1::2])] + + if mask[-1]: + r.append(slice(idx[-1], mask.size)) + return r + + +def clump_unmasked(a): + """ + Return list of slices corresponding to the unmasked clumps of a 1-D array. + (A "clump" is defined as a contiguous region of the array). + + Parameters + ---------- + a : ndarray + A one-dimensional masked array. + + Returns + ------- + slices : list of slice + The list of slices, one for each continuous region of unmasked + elements in `a`. + + See Also + -------- + flatnotmasked_edges, flatnotmasked_contiguous, notmasked_edges + notmasked_contiguous, clump_masked + + Examples + -------- + >>> import numpy as np + >>> a = np.ma.masked_array(np.arange(10)) + >>> a[[0, 1, 2, 6, 8, 9]] = np.ma.masked + >>> np.ma.clump_unmasked(a) + [slice(3, 6, None), slice(7, 8, None)] + + """ + mask = getattr(a, '_mask', nomask) + if mask is nomask: + return [slice(0, a.size)] + return _ezclump(~mask) + + +def clump_masked(a): + """ + Returns a list of slices corresponding to the masked clumps of a 1-D array. + (A "clump" is defined as a contiguous region of the array). + + Parameters + ---------- + a : ndarray + A one-dimensional masked array. + + Returns + ------- + slices : list of slice + The list of slices, one for each continuous region of masked elements + in `a`. + + See Also + -------- + flatnotmasked_edges, flatnotmasked_contiguous, notmasked_edges + notmasked_contiguous, clump_unmasked + + Examples + -------- + >>> import numpy as np + >>> a = np.ma.masked_array(np.arange(10)) + >>> a[[0, 1, 2, 6, 8, 9]] = np.ma.masked + >>> np.ma.clump_masked(a) + [slice(0, 3, None), slice(6, 7, None), slice(8, 10, None)] + + """ + mask = ma.getmask(a) + if mask is nomask: + return [] + return _ezclump(mask) + + +############################################################################### +# Polynomial fit # +############################################################################### + + +def vander(x, n=None): + """ + Masked values in the input array result in rows of zeros. + + """ + _vander = np.vander(x, n) + m = getmask(x) + if m is not nomask: + _vander[m] = 0 + return _vander + + +vander.__doc__ = ma.doc_note(np.vander.__doc__, vander.__doc__) + + +def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False): + """ + Any masked values in x is propagated in y, and vice-versa. + + """ + x = asarray(x) + y = asarray(y) + + m = getmask(x) + if y.ndim == 1: + m = mask_or(m, getmask(y)) + elif y.ndim == 2: + my = getmask(mask_rows(y)) + if my is not nomask: + m = mask_or(m, my[:, 0]) + else: + raise TypeError("Expected a 1D or 2D array for y!") + + if w is not None: + w = asarray(w) + if w.ndim != 1: + raise TypeError("expected a 1-d array for weights") + if w.shape[0] != y.shape[0]: + raise TypeError("expected w and y to have the same length") + m = mask_or(m, getmask(w)) + + if m is not nomask: + not_m = ~m + if w is not None: + w = w[not_m] + return np.polyfit(x[not_m], y[not_m], deg, rcond, full, w, cov) + else: + return np.polyfit(x, y, deg, rcond, full, w, cov) + + +polyfit.__doc__ = ma.doc_note(np.polyfit.__doc__, polyfit.__doc__) diff --git a/.venv/lib/python3.12/site-packages/numpy/ma/extras.pyi b/.venv/lib/python3.12/site-packages/numpy/ma/extras.pyi new file mode 100644 index 0000000..c3f9fcd --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/ma/extras.pyi @@ -0,0 +1,134 @@ +from _typeshed import Incomplete + +import numpy as np +from numpy.lib._function_base_impl import average +from numpy.lib._index_tricks_impl import AxisConcatenator + +from .core import MaskedArray, dot + +__all__ = [ + "apply_along_axis", + "apply_over_axes", + "atleast_1d", + "atleast_2d", + "atleast_3d", + "average", + "clump_masked", + "clump_unmasked", + "column_stack", + "compress_cols", + "compress_nd", + "compress_rowcols", + "compress_rows", + "corrcoef", + "count_masked", + "cov", + "diagflat", + "dot", + "dstack", + "ediff1d", + "flatnotmasked_contiguous", + "flatnotmasked_edges", + "hsplit", + "hstack", + "in1d", + "intersect1d", + "isin", + "mask_cols", + "mask_rowcols", + "mask_rows", + "masked_all", + "masked_all_like", + "median", + "mr_", + "ndenumerate", + "notmasked_contiguous", + "notmasked_edges", + "polyfit", + "row_stack", + "setdiff1d", + "setxor1d", + "stack", + "union1d", + "unique", + "vander", + "vstack", +] + +def count_masked(arr, axis=...): ... +def masked_all(shape, dtype=...): ... +def masked_all_like(arr): ... + +class _fromnxfunction: + __name__: Incomplete + __doc__: Incomplete + def __init__(self, funcname) -> None: ... + def getdoc(self): ... + def __call__(self, *args, **params): ... + +class _fromnxfunction_single(_fromnxfunction): + def __call__(self, x, *args, **params): ... + +class _fromnxfunction_seq(_fromnxfunction): + def __call__(self, x, *args, **params): ... + +class _fromnxfunction_allargs(_fromnxfunction): + def __call__(self, *args, **params): ... + +atleast_1d: _fromnxfunction_allargs +atleast_2d: _fromnxfunction_allargs +atleast_3d: _fromnxfunction_allargs + +vstack: _fromnxfunction_seq +row_stack: _fromnxfunction_seq +hstack: _fromnxfunction_seq +column_stack: _fromnxfunction_seq +dstack: _fromnxfunction_seq +stack: _fromnxfunction_seq + +hsplit: _fromnxfunction_single +diagflat: _fromnxfunction_single + +def apply_along_axis(func1d, axis, arr, *args, **kwargs): ... +def apply_over_axes(func, a, axes): ... +def median(a, axis=..., out=..., overwrite_input=..., keepdims=...): ... +def compress_nd(x, axis=...): ... +def compress_rowcols(x, axis=...): ... +def compress_rows(a): ... +def compress_cols(a): ... +def mask_rows(a, axis=...): ... +def mask_cols(a, axis=...): ... +def ediff1d(arr, to_end=..., to_begin=...): ... +def unique(ar1, return_index=..., return_inverse=...): ... +def intersect1d(ar1, ar2, assume_unique=...): ... +def setxor1d(ar1, ar2, assume_unique=...): ... +def in1d(ar1, ar2, assume_unique=..., invert=...): ... +def isin(element, test_elements, assume_unique=..., invert=...): ... +def union1d(ar1, ar2): ... +def setdiff1d(ar1, ar2, assume_unique=...): ... +def cov(x, y=..., rowvar=..., bias=..., allow_masked=..., ddof=...): ... +def corrcoef(x, y=..., rowvar=..., bias=..., allow_masked=..., ddof=...): ... + +class MAxisConcatenator(AxisConcatenator): + @staticmethod + def concatenate(arrays: Incomplete, axis: int = 0) -> Incomplete: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + @classmethod + def makemat(cls, arr: Incomplete) -> Incomplete: ... # type: ignore[override] # pyright: ignore[reportIncompatibleVariableOverride] + +class mr_class(MAxisConcatenator): + def __init__(self) -> None: ... + +mr_: mr_class + +def ndenumerate(a, compressed=...): ... +def flatnotmasked_edges(a): ... +def notmasked_edges(a, axis=...): ... +def flatnotmasked_contiguous(a): ... +def notmasked_contiguous(a, axis=...): ... +def clump_unmasked(a): ... +def clump_masked(a): ... +def vander(x, n=...): ... +def polyfit(x, y, deg, rcond=..., full=..., w=..., cov=...): ... + +# +def mask_rowcols(a: Incomplete, axis: Incomplete | None = None) -> MaskedArray[Incomplete, np.dtype[Incomplete]]: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/ma/mrecords.py b/.venv/lib/python3.12/site-packages/numpy/ma/mrecords.py new file mode 100644 index 0000000..835f3ce --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/ma/mrecords.py @@ -0,0 +1,773 @@ +""":mod:`numpy.ma..mrecords` + +Defines the equivalent of :class:`numpy.recarrays` for masked arrays, +where fields can be accessed as attributes. +Note that :class:`numpy.ma.MaskedArray` already supports structured datatypes +and the masking of individual fields. + +.. moduleauthor:: Pierre Gerard-Marchant + +""" +# We should make sure that no field is called '_mask','mask','_fieldmask', +# or whatever restricted keywords. An idea would be to no bother in the +# first place, and then rename the invalid fields with a trailing +# underscore. Maybe we could just overload the parser function ? + +import warnings + +import numpy as np +import numpy.ma as ma + +_byteorderconv = np._core.records._byteorderconv + + +_check_fill_value = ma.core._check_fill_value + + +__all__ = [ + 'MaskedRecords', 'mrecarray', 'fromarrays', 'fromrecords', + 'fromtextfile', 'addfield', +] + +reserved_fields = ['_data', '_mask', '_fieldmask', 'dtype'] + + +def _checknames(descr, names=None): + """ + Checks that field names ``descr`` are not reserved keywords. + + If this is the case, a default 'f%i' is substituted. If the argument + `names` is not None, updates the field names to valid names. + + """ + ndescr = len(descr) + default_names = [f'f{i}' for i in range(ndescr)] + if names is None: + new_names = default_names + else: + if isinstance(names, (tuple, list)): + new_names = names + elif isinstance(names, str): + new_names = names.split(',') + else: + raise NameError(f'illegal input names {names!r}') + nnames = len(new_names) + if nnames < ndescr: + new_names += default_names[nnames:] + ndescr = [] + for (n, d, t) in zip(new_names, default_names, descr.descr): + if n in reserved_fields: + if t[0] in reserved_fields: + ndescr.append((d, t[1])) + else: + ndescr.append(t) + else: + ndescr.append((n, t[1])) + return np.dtype(ndescr) + + +def _get_fieldmask(self): + mdescr = [(n, '|b1') for n in self.dtype.names] + fdmask = np.empty(self.shape, dtype=mdescr) + fdmask.flat = tuple([False] * len(mdescr)) + return fdmask + + +class MaskedRecords(ma.MaskedArray): + """ + + Attributes + ---------- + _data : recarray + Underlying data, as a record array. + _mask : boolean array + Mask of the records. A record is masked when all its fields are + masked. + _fieldmask : boolean recarray + Record array of booleans, setting the mask of each individual field + of each record. + _fill_value : record + Filling values for each field. + + """ + + def __new__(cls, shape, dtype=None, buf=None, offset=0, strides=None, + formats=None, names=None, titles=None, + byteorder=None, aligned=False, + mask=ma.nomask, hard_mask=False, fill_value=None, keep_mask=True, + copy=False, + **options): + + self = np.recarray.__new__(cls, shape, dtype=dtype, buf=buf, offset=offset, + strides=strides, formats=formats, names=names, + titles=titles, byteorder=byteorder, + aligned=aligned,) + + mdtype = ma.make_mask_descr(self.dtype) + if mask is ma.nomask or not np.size(mask): + if not keep_mask: + self._mask = tuple([False] * len(mdtype)) + else: + mask = np.array(mask, copy=copy) + if mask.shape != self.shape: + (nd, nm) = (self.size, mask.size) + if nm == 1: + mask = np.resize(mask, self.shape) + elif nm == nd: + mask = np.reshape(mask, self.shape) + else: + msg = (f"Mask and data not compatible: data size is {nd}," + " mask size is {nm}.") + raise ma.MAError(msg) + if not keep_mask: + self.__setmask__(mask) + self._sharedmask = True + else: + if mask.dtype == mdtype: + _mask = mask + else: + _mask = np.array([tuple([m] * len(mdtype)) for m in mask], + dtype=mdtype) + self._mask = _mask + return self + + def __array_finalize__(self, obj): + # Make sure we have a _fieldmask by default + _mask = getattr(obj, '_mask', None) + if _mask is None: + objmask = getattr(obj, '_mask', ma.nomask) + _dtype = np.ndarray.__getattribute__(self, 'dtype') + if objmask is ma.nomask: + _mask = ma.make_mask_none(self.shape, dtype=_dtype) + else: + mdescr = ma.make_mask_descr(_dtype) + _mask = np.array([tuple([m] * len(mdescr)) for m in objmask], + dtype=mdescr).view(np.recarray) + # Update some of the attributes + _dict = self.__dict__ + _dict.update(_mask=_mask) + self._update_from(obj) + if _dict['_baseclass'] == np.ndarray: + _dict['_baseclass'] = np.recarray + + @property + def _data(self): + """ + Returns the data as a recarray. + + """ + return np.ndarray.view(self, np.recarray) + + @property + def _fieldmask(self): + """ + Alias to mask. + + """ + return self._mask + + def __len__(self): + """ + Returns the length + + """ + # We have more than one record + if self.ndim: + return len(self._data) + # We have only one record: return the nb of fields + return len(self.dtype) + + def __getattribute__(self, attr): + try: + return object.__getattribute__(self, attr) + except AttributeError: + # attr must be a fieldname + pass + fielddict = np.ndarray.__getattribute__(self, 'dtype').fields + try: + res = fielddict[attr][:2] + except (TypeError, KeyError) as e: + raise AttributeError( + f'record array has no attribute {attr}') from e + # So far, so good + _localdict = np.ndarray.__getattribute__(self, '__dict__') + _data = np.ndarray.view(self, _localdict['_baseclass']) + obj = _data.getfield(*res) + if obj.dtype.names is not None: + raise NotImplementedError("MaskedRecords is currently limited to" + "simple records.") + # Get some special attributes + # Reset the object's mask + hasmasked = False + _mask = _localdict.get('_mask', None) + if _mask is not None: + try: + _mask = _mask[attr] + except IndexError: + # Couldn't find a mask: use the default (nomask) + pass + tp_len = len(_mask.dtype) + hasmasked = _mask.view((bool, ((tp_len,) if tp_len else ()))).any() + if (obj.shape or hasmasked): + obj = obj.view(ma.MaskedArray) + obj._baseclass = np.ndarray + obj._isfield = True + obj._mask = _mask + # Reset the field values + _fill_value = _localdict.get('_fill_value', None) + if _fill_value is not None: + try: + obj._fill_value = _fill_value[attr] + except ValueError: + obj._fill_value = None + else: + obj = obj.item() + return obj + + def __setattr__(self, attr, val): + """ + Sets the attribute attr to the value val. + + """ + # Should we call __setmask__ first ? + if attr in ['mask', 'fieldmask']: + self.__setmask__(val) + return + # Create a shortcut (so that we don't have to call getattr all the time) + _localdict = object.__getattribute__(self, '__dict__') + # Check whether we're creating a new field + newattr = attr not in _localdict + try: + # Is attr a generic attribute ? + ret = object.__setattr__(self, attr, val) + except Exception: + # Not a generic attribute: exit if it's not a valid field + fielddict = np.ndarray.__getattribute__(self, 'dtype').fields or {} + optinfo = np.ndarray.__getattribute__(self, '_optinfo') or {} + if not (attr in fielddict or attr in optinfo): + raise + else: + # Get the list of names + fielddict = np.ndarray.__getattribute__(self, 'dtype').fields or {} + # Check the attribute + if attr not in fielddict: + return ret + if newattr: + # We just added this one or this setattr worked on an + # internal attribute. + try: + object.__delattr__(self, attr) + except Exception: + return ret + # Let's try to set the field + try: + res = fielddict[attr][:2] + except (TypeError, KeyError) as e: + raise AttributeError( + f'record array has no attribute {attr}') from e + + if val is ma.masked: + _fill_value = _localdict['_fill_value'] + if _fill_value is not None: + dval = _localdict['_fill_value'][attr] + else: + dval = val + mval = True + else: + dval = ma.filled(val) + mval = ma.getmaskarray(val) + obj = np.ndarray.__getattribute__(self, '_data').setfield(dval, *res) + _localdict['_mask'].__setitem__(attr, mval) + return obj + + def __getitem__(self, indx): + """ + Returns all the fields sharing the same fieldname base. + + The fieldname base is either `_data` or `_mask`. + + """ + _localdict = self.__dict__ + _mask = np.ndarray.__getattribute__(self, '_mask') + _data = np.ndarray.view(self, _localdict['_baseclass']) + # We want a field + if isinstance(indx, str): + # Make sure _sharedmask is True to propagate back to _fieldmask + # Don't use _set_mask, there are some copies being made that + # break propagation Don't force the mask to nomask, that wreaks + # easy masking + obj = _data[indx].view(ma.MaskedArray) + obj._mask = _mask[indx] + obj._sharedmask = True + fval = _localdict['_fill_value'] + if fval is not None: + obj._fill_value = fval[indx] + # Force to masked if the mask is True + if not obj.ndim and obj._mask: + return ma.masked + return obj + # We want some elements. + # First, the data. + obj = np.asarray(_data[indx]).view(mrecarray) + obj._mask = np.asarray(_mask[indx]).view(np.recarray) + return obj + + def __setitem__(self, indx, value): + """ + Sets the given record to value. + + """ + ma.MaskedArray.__setitem__(self, indx, value) + if isinstance(indx, str): + self._mask[indx] = ma.getmaskarray(value) + + def __str__(self): + """ + Calculates the string representation. + + """ + if self.size > 1: + mstr = [f"({','.join([str(i) for i in s])})" + for s in zip(*[getattr(self, f) for f in self.dtype.names])] + return f"[{', '.join(mstr)}]" + else: + mstr = [f"{','.join([str(i) for i in s])}" + for s in zip([getattr(self, f) for f in self.dtype.names])] + return f"({', '.join(mstr)})" + + def __repr__(self): + """ + Calculates the repr representation. + + """ + _names = self.dtype.names + fmt = f"%{max(len(n) for n in _names) + 4}s : %s" + reprstr = [fmt % (f, getattr(self, f)) for f in self.dtype.names] + reprstr.insert(0, 'masked_records(') + reprstr.extend([fmt % (' fill_value', self.fill_value), + ' )']) + return str("\n".join(reprstr)) + + def view(self, dtype=None, type=None): + """ + Returns a view of the mrecarray. + + """ + # OK, basic copy-paste from MaskedArray.view. + if dtype is None: + if type is None: + output = np.ndarray.view(self) + else: + output = np.ndarray.view(self, type) + # Here again. + elif type is None: + try: + if issubclass(dtype, np.ndarray): + output = np.ndarray.view(self, dtype) + else: + output = np.ndarray.view(self, dtype) + # OK, there's the change + except TypeError: + dtype = np.dtype(dtype) + # we need to revert to MaskedArray, but keeping the possibility + # of subclasses (eg, TimeSeriesRecords), so we'll force a type + # set to the first parent + if dtype.fields is None: + basetype = self.__class__.__bases__[0] + output = self.__array__().view(dtype, basetype) + output._update_from(self) + else: + output = np.ndarray.view(self, dtype) + output._fill_value = None + else: + output = np.ndarray.view(self, dtype, type) + # Update the mask, just like in MaskedArray.view + if (getattr(output, '_mask', ma.nomask) is not ma.nomask): + mdtype = ma.make_mask_descr(output.dtype) + output._mask = self._mask.view(mdtype, np.ndarray) + output._mask.shape = output.shape + return output + + def harden_mask(self): + """ + Forces the mask to hard. + + """ + self._hardmask = True + + def soften_mask(self): + """ + Forces the mask to soft + + """ + self._hardmask = False + + def copy(self): + """ + Returns a copy of the masked record. + + """ + copied = self._data.copy().view(type(self)) + copied._mask = self._mask.copy() + return copied + + def tolist(self, fill_value=None): + """ + Return the data portion of the array as a list. + + Data items are converted to the nearest compatible Python type. + Masked values are converted to fill_value. If fill_value is None, + the corresponding entries in the output list will be ``None``. + + """ + if fill_value is not None: + return self.filled(fill_value).tolist() + result = np.array(self.filled().tolist(), dtype=object) + mask = np.array(self._mask.tolist()) + result[mask] = None + return result.tolist() + + def __getstate__(self): + """Return the internal state of the masked array. + + This is for pickling. + + """ + state = (1, + self.shape, + self.dtype, + self.flags.fnc, + self._data.tobytes(), + self._mask.tobytes(), + self._fill_value, + ) + return state + + def __setstate__(self, state): + """ + Restore the internal state of the masked array. + + This is for pickling. ``state`` is typically the output of the + ``__getstate__`` output, and is a 5-tuple: + + - class name + - a tuple giving the shape of the data + - a typecode for the data + - a binary string for the data + - a binary string for the mask. + + """ + (ver, shp, typ, isf, raw, msk, flv) = state + np.ndarray.__setstate__(self, (shp, typ, isf, raw)) + mdtype = np.dtype([(k, np.bool) for (k, _) in self.dtype.descr]) + self.__dict__['_mask'].__setstate__((shp, mdtype, isf, msk)) + self.fill_value = flv + + def __reduce__(self): + """ + Return a 3-tuple for pickling a MaskedArray. + + """ + return (_mrreconstruct, + (self.__class__, self._baseclass, (0,), 'b',), + self.__getstate__()) + + +def _mrreconstruct(subtype, baseclass, baseshape, basetype,): + """ + Build a new MaskedArray from the information stored in a pickle. + + """ + _data = np.ndarray.__new__(baseclass, baseshape, basetype).view(subtype) + _mask = np.ndarray.__new__(np.ndarray, baseshape, 'b1') + return subtype.__new__(subtype, _data, mask=_mask, dtype=basetype,) + + +mrecarray = MaskedRecords + + +############################################################################### +# Constructors # +############################################################################### + + +def fromarrays(arraylist, dtype=None, shape=None, formats=None, + names=None, titles=None, aligned=False, byteorder=None, + fill_value=None): + """ + Creates a mrecarray from a (flat) list of masked arrays. + + Parameters + ---------- + arraylist : sequence + A list of (masked) arrays. Each element of the sequence is first converted + to a masked array if needed. If a 2D array is passed as argument, it is + processed line by line + dtype : {None, dtype}, optional + Data type descriptor. + shape : {None, integer}, optional + Number of records. If None, shape is defined from the shape of the + first array in the list. + formats : {None, sequence}, optional + Sequence of formats for each individual field. If None, the formats will + be autodetected by inspecting the fields and selecting the highest dtype + possible. + names : {None, sequence}, optional + Sequence of the names of each field. + fill_value : {None, sequence}, optional + Sequence of data to be used as filling values. + + Notes + ----- + Lists of tuples should be preferred over lists of lists for faster processing. + + """ + datalist = [ma.getdata(x) for x in arraylist] + masklist = [np.atleast_1d(ma.getmaskarray(x)) for x in arraylist] + _array = np.rec.fromarrays(datalist, + dtype=dtype, shape=shape, formats=formats, + names=names, titles=titles, aligned=aligned, + byteorder=byteorder).view(mrecarray) + _array._mask.flat = list(zip(*masklist)) + if fill_value is not None: + _array.fill_value = fill_value + return _array + + +def fromrecords(reclist, dtype=None, shape=None, formats=None, names=None, + titles=None, aligned=False, byteorder=None, + fill_value=None, mask=ma.nomask): + """ + Creates a MaskedRecords from a list of records. + + Parameters + ---------- + reclist : sequence + A list of records. Each element of the sequence is first converted + to a masked array if needed. If a 2D array is passed as argument, it is + processed line by line + dtype : {None, dtype}, optional + Data type descriptor. + shape : {None,int}, optional + Number of records. If None, ``shape`` is defined from the shape of the + first array in the list. + formats : {None, sequence}, optional + Sequence of formats for each individual field. If None, the formats will + be autodetected by inspecting the fields and selecting the highest dtype + possible. + names : {None, sequence}, optional + Sequence of the names of each field. + fill_value : {None, sequence}, optional + Sequence of data to be used as filling values. + mask : {nomask, sequence}, optional. + External mask to apply on the data. + + Notes + ----- + Lists of tuples should be preferred over lists of lists for faster processing. + + """ + # Grab the initial _fieldmask, if needed: + _mask = getattr(reclist, '_mask', None) + # Get the list of records. + if isinstance(reclist, np.ndarray): + # Make sure we don't have some hidden mask + if isinstance(reclist, ma.MaskedArray): + reclist = reclist.filled().view(np.ndarray) + # Grab the initial dtype, just in case + if dtype is None: + dtype = reclist.dtype + reclist = reclist.tolist() + mrec = np.rec.fromrecords(reclist, dtype=dtype, shape=shape, formats=formats, + names=names, titles=titles, + aligned=aligned, byteorder=byteorder).view(mrecarray) + # Set the fill_value if needed + if fill_value is not None: + mrec.fill_value = fill_value + # Now, let's deal w/ the mask + if mask is not ma.nomask: + mask = np.asarray(mask) + maskrecordlength = len(mask.dtype) + if maskrecordlength: + mrec._mask.flat = mask + elif mask.ndim == 2: + mrec._mask.flat = [tuple(m) for m in mask] + else: + mrec.__setmask__(mask) + if _mask is not None: + mrec._mask[:] = _mask + return mrec + + +def _guessvartypes(arr): + """ + Tries to guess the dtypes of the str_ ndarray `arr`. + + Guesses by testing element-wise conversion. Returns a list of dtypes. + The array is first converted to ndarray. If the array is 2D, the test + is performed on the first line. An exception is raised if the file is + 3D or more. + + """ + vartypes = [] + arr = np.asarray(arr) + if arr.ndim == 2: + arr = arr[0] + elif arr.ndim > 2: + raise ValueError("The array should be 2D at most!") + # Start the conversion loop. + for f in arr: + try: + int(f) + except (ValueError, TypeError): + try: + float(f) + except (ValueError, TypeError): + try: + complex(f) + except (ValueError, TypeError): + vartypes.append(arr.dtype) + else: + vartypes.append(np.dtype(complex)) + else: + vartypes.append(np.dtype(float)) + else: + vartypes.append(np.dtype(int)) + return vartypes + + +def openfile(fname): + """ + Opens the file handle of file `fname`. + + """ + # A file handle + if hasattr(fname, 'readline'): + return fname + # Try to open the file and guess its type + try: + f = open(fname) + except FileNotFoundError as e: + raise FileNotFoundError(f"No such file: '{fname}'") from e + if f.readline()[:2] != "\\x": + f.seek(0, 0) + return f + f.close() + raise NotImplementedError("Wow, binary file") + + +def fromtextfile(fname, delimiter=None, commentchar='#', missingchar='', + varnames=None, vartypes=None, + *, delimitor=np._NoValue): # backwards compatibility + """ + Creates a mrecarray from data stored in the file `filename`. + + Parameters + ---------- + fname : {file name/handle} + Handle of an opened file. + delimiter : {None, string}, optional + Alphanumeric character used to separate columns in the file. + If None, any (group of) white spacestring(s) will be used. + commentchar : {'#', string}, optional + Alphanumeric character used to mark the start of a comment. + missingchar : {'', string}, optional + String indicating missing data, and used to create the masks. + varnames : {None, sequence}, optional + Sequence of the variable names. If None, a list will be created from + the first non empty line of the file. + vartypes : {None, sequence}, optional + Sequence of the variables dtypes. If None, it will be estimated from + the first non-commented line. + + + Ultra simple: the varnames are in the header, one line""" + if delimitor is not np._NoValue: + if delimiter is not None: + raise TypeError("fromtextfile() got multiple values for argument " + "'delimiter'") + # NumPy 1.22.0, 2021-09-23 + warnings.warn("The 'delimitor' keyword argument of " + "numpy.ma.mrecords.fromtextfile() is deprecated " + "since NumPy 1.22.0, use 'delimiter' instead.", + DeprecationWarning, stacklevel=2) + delimiter = delimitor + + # Try to open the file. + ftext = openfile(fname) + + # Get the first non-empty line as the varnames + while True: + line = ftext.readline() + firstline = line[:line.find(commentchar)].strip() + _varnames = firstline.split(delimiter) + if len(_varnames) > 1: + break + if varnames is None: + varnames = _varnames + + # Get the data. + _variables = ma.masked_array([line.strip().split(delimiter) for line in ftext + if line[0] != commentchar and len(line) > 1]) + (_, nfields) = _variables.shape + ftext.close() + + # Try to guess the dtype. + if vartypes is None: + vartypes = _guessvartypes(_variables[0]) + else: + vartypes = [np.dtype(v) for v in vartypes] + if len(vartypes) != nfields: + msg = f"Attempting to {len(vartypes)} dtypes for {nfields} fields!" + msg += " Reverting to default." + warnings.warn(msg, stacklevel=2) + vartypes = _guessvartypes(_variables[0]) + + # Construct the descriptor. + mdescr = list(zip(varnames, vartypes)) + mfillv = [ma.default_fill_value(f) for f in vartypes] + + # Get the data and the mask. + # We just need a list of masked_arrays. It's easier to create it like that: + _mask = (_variables.T == missingchar) + _datalist = [ma.masked_array(a, mask=m, dtype=t, fill_value=f) + for (a, m, t, f) in zip(_variables.T, _mask, vartypes, mfillv)] + + return fromarrays(_datalist, dtype=mdescr) + + +def addfield(mrecord, newfield, newfieldname=None): + """Adds a new field to the masked record array + + Uses `newfield` as data and `newfieldname` as name. If `newfieldname` + is None, the new field name is set to 'fi', where `i` is the number of + existing fields. + + """ + _data = mrecord._data + _mask = mrecord._mask + if newfieldname is None or newfieldname in reserved_fields: + newfieldname = f'f{len(_data.dtype)}' + newfield = ma.array(newfield) + # Get the new data. + # Create a new empty recarray + newdtype = np.dtype(_data.dtype.descr + [(newfieldname, newfield.dtype)]) + newdata = np.recarray(_data.shape, newdtype) + # Add the existing field + [newdata.setfield(_data.getfield(*f), *f) + for f in _data.dtype.fields.values()] + # Add the new field + newdata.setfield(newfield._data, *newdata.dtype.fields[newfieldname]) + newdata = newdata.view(MaskedRecords) + # Get the new mask + # Create a new empty recarray + newmdtype = np.dtype([(n, np.bool) for n in newdtype.names]) + newmask = np.recarray(_data.shape, newmdtype) + # Add the old masks + [newmask.setfield(_mask.getfield(*f), *f) + for f in _mask.dtype.fields.values()] + # Add the mask of the new field + newmask.setfield(ma.getmaskarray(newfield), + *newmask.dtype.fields[newfieldname]) + newdata._mask = newmask + return newdata diff --git a/.venv/lib/python3.12/site-packages/numpy/ma/mrecords.pyi b/.venv/lib/python3.12/site-packages/numpy/ma/mrecords.pyi new file mode 100644 index 0000000..cae687a --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/ma/mrecords.pyi @@ -0,0 +1,96 @@ +from typing import Any, TypeVar + +from numpy import dtype + +from . import MaskedArray + +__all__ = [ + "MaskedRecords", + "mrecarray", + "fromarrays", + "fromrecords", + "fromtextfile", + "addfield", +] + +_ShapeT_co = TypeVar("_ShapeT_co", covariant=True, bound=tuple[int, ...]) +_DTypeT_co = TypeVar("_DTypeT_co", bound=dtype, covariant=True) + +class MaskedRecords(MaskedArray[_ShapeT_co, _DTypeT_co]): + def __new__( + cls, + shape, + dtype=..., + buf=..., + offset=..., + strides=..., + formats=..., + names=..., + titles=..., + byteorder=..., + aligned=..., + mask=..., + hard_mask=..., + fill_value=..., + keep_mask=..., + copy=..., + **options, + ): ... + _mask: Any + _fill_value: Any + @property + def _data(self): ... + @property + def _fieldmask(self): ... + def __array_finalize__(self, obj): ... + def __len__(self): ... + def __getattribute__(self, attr): ... + def __setattr__(self, attr, val): ... + def __getitem__(self, indx): ... + def __setitem__(self, indx, value): ... + def view(self, dtype=..., type=...): ... + def harden_mask(self): ... + def soften_mask(self): ... + def copy(self): ... + def tolist(self, fill_value=...): ... + def __reduce__(self): ... + +mrecarray = MaskedRecords + +def fromarrays( + arraylist, + dtype=..., + shape=..., + formats=..., + names=..., + titles=..., + aligned=..., + byteorder=..., + fill_value=..., +): ... + +def fromrecords( + reclist, + dtype=..., + shape=..., + formats=..., + names=..., + titles=..., + aligned=..., + byteorder=..., + fill_value=..., + mask=..., +): ... + +def fromtextfile( + fname, + delimiter=..., + commentchar=..., + missingchar=..., + varnames=..., + vartypes=..., + # NOTE: deprecated: NumPy 1.22.0, 2021-09-23 + # delimitor=..., +): ... + +def addfield(mrecord, newfield, newfieldname=...): ... diff --git a/.venv/lib/python3.12/site-packages/numpy/ma/tests/__init__.py b/.venv/lib/python3.12/site-packages/numpy/ma/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/.venv/lib/python3.12/site-packages/numpy/ma/tests/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/ma/tests/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..c092311 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/ma/tests/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/ma/tests/__pycache__/test_arrayobject.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/ma/tests/__pycache__/test_arrayobject.cpython-312.pyc new file mode 100644 index 0000000..dc78204 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/ma/tests/__pycache__/test_arrayobject.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/ma/tests/__pycache__/test_core.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/ma/tests/__pycache__/test_core.cpython-312.pyc new file mode 100644 index 0000000..0c928d4 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/ma/tests/__pycache__/test_core.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/ma/tests/__pycache__/test_deprecations.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/ma/tests/__pycache__/test_deprecations.cpython-312.pyc new file mode 100644 index 0000000..bab6c69 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/ma/tests/__pycache__/test_deprecations.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/ma/tests/__pycache__/test_extras.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/ma/tests/__pycache__/test_extras.cpython-312.pyc new file mode 100644 index 0000000..3044246 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/ma/tests/__pycache__/test_extras.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/ma/tests/__pycache__/test_mrecords.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/ma/tests/__pycache__/test_mrecords.cpython-312.pyc new file mode 100644 index 0000000..b06c9ae Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/ma/tests/__pycache__/test_mrecords.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/ma/tests/__pycache__/test_old_ma.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/ma/tests/__pycache__/test_old_ma.cpython-312.pyc new file mode 100644 index 0000000..ee9fefd Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/ma/tests/__pycache__/test_old_ma.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/ma/tests/__pycache__/test_regression.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/ma/tests/__pycache__/test_regression.cpython-312.pyc new file mode 100644 index 0000000..0ae4af7 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/ma/tests/__pycache__/test_regression.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/ma/tests/__pycache__/test_subclassing.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/ma/tests/__pycache__/test_subclassing.cpython-312.pyc new file mode 100644 index 0000000..57db03b Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/ma/tests/__pycache__/test_subclassing.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/ma/tests/test_arrayobject.py b/.venv/lib/python3.12/site-packages/numpy/ma/tests/test_arrayobject.py new file mode 100644 index 0000000..2000cea --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/ma/tests/test_arrayobject.py @@ -0,0 +1,40 @@ +import pytest + +import numpy as np +from numpy.ma import masked_array +from numpy.testing import assert_array_equal + + +def test_matrix_transpose_raises_error_for_1d(): + msg = "matrix transpose with ndim < 2 is undefined" + ma_arr = masked_array(data=[1, 2, 3, 4, 5, 6], + mask=[1, 0, 1, 1, 1, 0]) + with pytest.raises(ValueError, match=msg): + ma_arr.mT + + +def test_matrix_transpose_equals_transpose_2d(): + ma_arr = masked_array(data=[[1, 2, 3], [4, 5, 6]], + mask=[[1, 0, 1], [1, 1, 0]]) + assert_array_equal(ma_arr.T, ma_arr.mT) + + +ARRAY_SHAPES_TO_TEST = ( + (5, 2), + (5, 2, 3), + (5, 2, 3, 4), +) + + +@pytest.mark.parametrize("shape", ARRAY_SHAPES_TO_TEST) +def test_matrix_transpose_equals_swapaxes(shape): + num_of_axes = len(shape) + vec = np.arange(shape[-1]) + arr = np.broadcast_to(vec, shape) + + rng = np.random.default_rng(42) + mask = rng.choice([0, 1], size=shape) + ma_arr = masked_array(data=arr, mask=mask) + + tgt = np.swapaxes(arr, num_of_axes - 2, num_of_axes - 1) + assert_array_equal(tgt, ma_arr.mT) diff --git a/.venv/lib/python3.12/site-packages/numpy/ma/tests/test_core.py b/.venv/lib/python3.12/site-packages/numpy/ma/tests/test_core.py new file mode 100644 index 0000000..091ba6c --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/ma/tests/test_core.py @@ -0,0 +1,5886 @@ +"""Tests suite for MaskedArray & subclassing. + +:author: Pierre Gerard-Marchant +:contact: pierregm_at_uga_dot_edu +""" +__author__ = "Pierre GF Gerard-Marchant" + +import copy +import itertools +import operator +import pickle +import sys +import textwrap +import warnings +from functools import reduce + +import pytest + +import numpy as np +import numpy._core.fromnumeric as fromnumeric +import numpy._core.umath as umath +import numpy.ma.core +from numpy import ndarray +from numpy._utils import asbytes +from numpy.exceptions import AxisError +from numpy.ma.core import ( + MAError, + MaskedArray, + MaskError, + MaskType, + abs, + absolute, + add, + all, + allclose, + allequal, + alltrue, + angle, + anom, + arange, + arccos, + arccosh, + arcsin, + arctan, + arctan2, + argsort, + array, + asarray, + choose, + concatenate, + conjugate, + cos, + cosh, + count, + default_fill_value, + diag, + divide, + empty, + empty_like, + equal, + exp, + filled, + fix_invalid, + flatten_mask, + flatten_structured_array, + fromflex, + getmask, + getmaskarray, + greater, + greater_equal, + identity, + inner, + isMaskedArray, + less, + less_equal, + log, + log10, + make_mask, + make_mask_descr, + mask_or, + masked, + masked_array, + masked_equal, + masked_greater, + masked_greater_equal, + masked_inside, + masked_less, + masked_less_equal, + masked_not_equal, + masked_outside, + masked_print_option, + masked_values, + masked_where, + max, + maximum, + maximum_fill_value, + min, + minimum, + minimum_fill_value, + mod, + multiply, + mvoid, + nomask, + not_equal, + ones, + ones_like, + outer, + power, + product, + put, + putmask, + ravel, + repeat, + reshape, + resize, + shape, + sin, + sinh, + sometrue, + sort, + sqrt, + subtract, + sum, + take, + tan, + tanh, + transpose, + where, + zeros, + zeros_like, +) +from numpy.ma.testutils import ( + assert_, + assert_almost_equal, + assert_array_equal, + assert_equal, + assert_equal_records, + assert_mask_equal, + assert_not_equal, + fail_if_equal, +) +from numpy.testing import ( + IS_WASM, + assert_raises, + assert_warns, + suppress_warnings, + temppath, +) +from numpy.testing._private.utils import requires_memory + +pi = np.pi + + +suppress_copy_mask_on_assignment = suppress_warnings() +suppress_copy_mask_on_assignment.filter( + numpy.ma.core.MaskedArrayFutureWarning, + "setting an item on a masked array which has a shared mask will not copy") + + +# For parametrized numeric testing +num_dts = [np.dtype(dt_) for dt_ in '?bhilqBHILQefdgFD'] +num_ids = [dt_.char for dt_ in num_dts] + + +class TestMaskedArray: + # Base test class for MaskedArrays. + + def setup_method(self): + # Base data definition. + x = np.array([1., 1., 1., -2., pi / 2.0, 4., 5., -10., 10., 1., 2., 3.]) + y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) + a10 = 10. + m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] + m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1] + xm = masked_array(x, mask=m1) + ym = masked_array(y, mask=m2) + z = np.array([-.5, 0., .5, .8]) + zm = masked_array(z, mask=[0, 1, 0, 0]) + xf = np.where(m1, 1e+20, x) + xm.set_fill_value(1e+20) + self.d = (x, y, a10, m1, m2, xm, ym, z, zm, xf) + + def test_basicattributes(self): + # Tests some basic array attributes. + a = array([1, 3, 2]) + b = array([1, 3, 2], mask=[1, 0, 1]) + assert_equal(a.ndim, 1) + assert_equal(b.ndim, 1) + assert_equal(a.size, 3) + assert_equal(b.size, 3) + assert_equal(a.shape, (3,)) + assert_equal(b.shape, (3,)) + + def test_basic0d(self): + # Checks masking a scalar + x = masked_array(0) + assert_equal(str(x), '0') + x = masked_array(0, mask=True) + assert_equal(str(x), str(masked_print_option)) + x = masked_array(0, mask=False) + assert_equal(str(x), '0') + x = array(0, mask=1) + assert_(x.filled().dtype is x._data.dtype) + + def test_basic1d(self): + # Test of basic array creation and properties in 1 dimension. + (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d + assert_(not isMaskedArray(x)) + assert_(isMaskedArray(xm)) + assert_((xm - ym).filled(0).any()) + fail_if_equal(xm.mask.astype(int), ym.mask.astype(int)) + s = x.shape + assert_equal(np.shape(xm), s) + assert_equal(xm.shape, s) + assert_equal(xm.dtype, x.dtype) + assert_equal(zm.dtype, z.dtype) + assert_equal(xm.size, reduce(lambda x, y: x * y, s)) + assert_equal(count(xm), len(m1) - reduce(lambda x, y: x + y, m1)) + assert_array_equal(xm, xf) + assert_array_equal(filled(xm, 1.e20), xf) + assert_array_equal(x, xm) + + def test_basic2d(self): + # Test of basic array creation and properties in 2 dimensions. + (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d + for s in [(4, 3), (6, 2)]: + x.shape = s + y.shape = s + xm.shape = s + ym.shape = s + xf.shape = s + + assert_(not isMaskedArray(x)) + assert_(isMaskedArray(xm)) + assert_equal(shape(xm), s) + assert_equal(xm.shape, s) + assert_equal(xm.size, reduce(lambda x, y: x * y, s)) + assert_equal(count(xm), len(m1) - reduce(lambda x, y: x + y, m1)) + assert_equal(xm, xf) + assert_equal(filled(xm, 1.e20), xf) + assert_equal(x, xm) + + def test_concatenate_basic(self): + # Tests concatenations. + (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d + # basic concatenation + assert_equal(np.concatenate((x, y)), concatenate((xm, ym))) + assert_equal(np.concatenate((x, y)), concatenate((x, y))) + assert_equal(np.concatenate((x, y)), concatenate((xm, y))) + assert_equal(np.concatenate((x, y, x)), concatenate((x, ym, x))) + + def test_concatenate_alongaxis(self): + # Tests concatenations. + (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d + # Concatenation along an axis + s = (3, 4) + x.shape = y.shape = xm.shape = ym.shape = s + assert_equal(xm.mask, np.reshape(m1, s)) + assert_equal(ym.mask, np.reshape(m2, s)) + xmym = concatenate((xm, ym), 1) + assert_equal(np.concatenate((x, y), 1), xmym) + assert_equal(np.concatenate((xm.mask, ym.mask), 1), xmym._mask) + + x = zeros(2) + y = array(ones(2), mask=[False, True]) + z = concatenate((x, y)) + assert_array_equal(z, [0, 0, 1, 1]) + assert_array_equal(z.mask, [False, False, False, True]) + z = concatenate((y, x)) + assert_array_equal(z, [1, 1, 0, 0]) + assert_array_equal(z.mask, [False, True, False, False]) + + def test_concatenate_flexible(self): + # Tests the concatenation on flexible arrays. + data = masked_array(list(zip(np.random.rand(10), + np.arange(10))), + dtype=[('a', float), ('b', int)]) + + test = concatenate([data[:5], data[5:]]) + assert_equal_records(test, data) + + def test_creation_ndmin(self): + # Check the use of ndmin + x = array([1, 2, 3], mask=[1, 0, 0], ndmin=2) + assert_equal(x.shape, (1, 3)) + assert_equal(x._data, [[1, 2, 3]]) + assert_equal(x._mask, [[1, 0, 0]]) + + def test_creation_ndmin_from_maskedarray(self): + # Make sure we're not losing the original mask w/ ndmin + x = array([1, 2, 3]) + x[-1] = masked + xx = array(x, ndmin=2, dtype=float) + assert_equal(x.shape, x._mask.shape) + assert_equal(xx.shape, xx._mask.shape) + + def test_creation_maskcreation(self): + # Tests how masks are initialized at the creation of Maskedarrays. + data = arange(24, dtype=float) + data[[3, 6, 15]] = masked + dma_1 = MaskedArray(data) + assert_equal(dma_1.mask, data.mask) + dma_2 = MaskedArray(dma_1) + assert_equal(dma_2.mask, dma_1.mask) + dma_3 = MaskedArray(dma_1, mask=[1, 0, 0, 0] * 6) + fail_if_equal(dma_3.mask, dma_1.mask) + + x = array([1, 2, 3], mask=True) + assert_equal(x._mask, [True, True, True]) + x = array([1, 2, 3], mask=False) + assert_equal(x._mask, [False, False, False]) + y = array([1, 2, 3], mask=x._mask, copy=False) + assert_(np.may_share_memory(x.mask, y.mask)) + y = array([1, 2, 3], mask=x._mask, copy=True) + assert_(not np.may_share_memory(x.mask, y.mask)) + x = array([1, 2, 3], mask=None) + assert_equal(x._mask, [False, False, False]) + + def test_masked_singleton_array_creation_warns(self): + # The first works, but should not (ideally), there may be no way + # to solve this, however, as long as `np.ma.masked` is an ndarray. + np.array(np.ma.masked) + with pytest.warns(UserWarning): + # Tries to create a float array, using `float(np.ma.masked)`. + # We may want to define this is invalid behaviour in the future! + # (requiring np.ma.masked to be a known NumPy scalar probably + # with a DType.) + np.array([3., np.ma.masked]) + + def test_creation_with_list_of_maskedarrays(self): + # Tests creating a masked array from a list of masked arrays. + x = array(np.arange(5), mask=[1, 0, 0, 0, 0]) + data = array((x, x[::-1])) + assert_equal(data, [[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]]) + assert_equal(data._mask, [[1, 0, 0, 0, 0], [0, 0, 0, 0, 1]]) + + x.mask = nomask + data = array((x, x[::-1])) + assert_equal(data, [[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]]) + assert_(data.mask is nomask) + + def test_creation_with_list_of_maskedarrays_no_bool_cast(self): + # Tests the regression in gh-18551 + masked_str = np.ma.masked_array(['a', 'b'], mask=[True, False]) + normal_int = np.arange(2) + res = np.ma.asarray([masked_str, normal_int], dtype="U21") + assert_array_equal(res.mask, [[True, False], [False, False]]) + + # The above only failed due a long chain of oddity, try also with + # an object array that cannot be converted to bool always: + class NotBool: + def __bool__(self): + raise ValueError("not a bool!") + masked_obj = np.ma.masked_array([NotBool(), 'b'], mask=[True, False]) + # Check that the NotBool actually fails like we would expect: + with pytest.raises(ValueError, match="not a bool!"): + np.asarray([masked_obj], dtype=bool) + + res = np.ma.asarray([masked_obj, normal_int]) + assert_array_equal(res.mask, [[True, False], [False, False]]) + + def test_creation_from_ndarray_with_padding(self): + x = np.array([('A', 0)], dtype={'names': ['f0', 'f1'], + 'formats': ['S4', 'i8'], + 'offsets': [0, 8]}) + array(x) # used to fail due to 'V' padding field in x.dtype.descr + + def test_unknown_keyword_parameter(self): + with pytest.raises(TypeError, match="unexpected keyword argument"): + MaskedArray([1, 2, 3], maks=[0, 1, 0]) # `mask` is misspelled. + + def test_asarray(self): + (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d + xm.fill_value = -9999 + xm._hardmask = True + xmm = asarray(xm) + assert_equal(xmm._data, xm._data) + assert_equal(xmm._mask, xm._mask) + assert_equal(xmm.fill_value, xm.fill_value) + assert_equal(xmm._hardmask, xm._hardmask) + + def test_asarray_default_order(self): + # See Issue #6646 + m = np.eye(3).T + assert_(not m.flags.c_contiguous) + + new_m = asarray(m) + assert_(new_m.flags.c_contiguous) + + def test_asarray_enforce_order(self): + # See Issue #6646 + m = np.eye(3).T + assert_(not m.flags.c_contiguous) + + new_m = asarray(m, order='C') + assert_(new_m.flags.c_contiguous) + + def test_fix_invalid(self): + # Checks fix_invalid. + with np.errstate(invalid='ignore'): + data = masked_array([np.nan, 0., 1.], mask=[0, 0, 1]) + data_fixed = fix_invalid(data) + assert_equal(data_fixed._data, [data.fill_value, 0., 1.]) + assert_equal(data_fixed._mask, [1., 0., 1.]) + + def test_maskedelement(self): + # Test of masked element + x = arange(6) + x[1] = masked + assert_(str(masked) == '--') + assert_(x[1] is masked) + assert_equal(filled(x[1], 0), 0) + + def test_set_element_as_object(self): + # Tests setting elements with object + a = empty(1, dtype=object) + x = (1, 2, 3, 4, 5) + a[0] = x + assert_equal(a[0], x) + assert_(a[0] is x) + + import datetime + dt = datetime.datetime.now() + a[0] = dt + assert_(a[0] is dt) + + def test_indexing(self): + # Tests conversions and indexing + x1 = np.array([1, 2, 4, 3]) + x2 = array(x1, mask=[1, 0, 0, 0]) + x3 = array(x1, mask=[0, 1, 0, 1]) + x4 = array(x1) + # test conversion to strings + str(x2) # raises? + repr(x2) # raises? + assert_equal(np.sort(x1), sort(x2, endwith=False)) + # tests of indexing + assert_(type(x2[1]) is type(x1[1])) + assert_(x1[1] == x2[1]) + assert_(x2[0] is masked) + assert_equal(x1[2], x2[2]) + assert_equal(x1[2:5], x2[2:5]) + assert_equal(x1[:], x2[:]) + assert_equal(x1[1:], x3[1:]) + x1[2] = 9 + x2[2] = 9 + assert_equal(x1, x2) + x1[1:3] = 99 + x2[1:3] = 99 + assert_equal(x1, x2) + x2[1] = masked + assert_equal(x1, x2) + x2[1:3] = masked + assert_equal(x1, x2) + x2[:] = x1 + x2[1] = masked + assert_(allequal(getmask(x2), array([0, 1, 0, 0]))) + x3[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0]) + assert_(allequal(getmask(x3), array([0, 1, 1, 0]))) + x4[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0]) + assert_(allequal(getmask(x4), array([0, 1, 1, 0]))) + assert_(allequal(x4, array([1, 2, 3, 4]))) + x1 = np.arange(5) * 1.0 + x2 = masked_values(x1, 3.0) + assert_equal(x1, x2) + assert_(allequal(array([0, 0, 0, 1, 0], MaskType), x2.mask)) + assert_equal(3.0, x2.fill_value) + x1 = array([1, 'hello', 2, 3], object) + x2 = np.array([1, 'hello', 2, 3], object) + s1 = x1[1] + s2 = x2[1] + assert_equal(type(s2), str) + assert_equal(type(s1), str) + assert_equal(s1, s2) + assert_(x1[1:1].shape == (0,)) + + def test_setitem_no_warning(self): + # Setitem shouldn't warn, because the assignment might be masked + # and warning for a masked assignment is weird (see gh-23000) + # (When the value is masked, otherwise a warning would be acceptable + # but is not given currently.) + x = np.ma.arange(60).reshape((6, 10)) + index = (slice(1, 5, 2), [7, 5]) + value = np.ma.masked_all((2, 2)) + value._data[...] = np.inf # not a valid integer... + x[index] = value + # The masked scalar is special cased, but test anyway (it's NaN): + x[...] = np.ma.masked + # Finally, a large value that cannot be cast to the float32 `x` + x = np.ma.arange(3., dtype=np.float32) + value = np.ma.array([2e234, 1, 1], mask=[True, False, False]) + x[...] = value + x[[0, 1, 2]] = value + + @suppress_copy_mask_on_assignment + def test_copy(self): + # Tests of some subtle points of copying and sizing. + n = [0, 0, 1, 0, 0] + m = make_mask(n) + m2 = make_mask(m) + assert_(m is m2) + m3 = make_mask(m, copy=True) + assert_(m is not m3) + + x1 = np.arange(5) + y1 = array(x1, mask=m) + assert_equal(y1._data.__array_interface__, x1.__array_interface__) + assert_(allequal(x1, y1.data)) + assert_equal(y1._mask.__array_interface__, m.__array_interface__) + + y1a = array(y1) + # Default for masked array is not to copy; see gh-10318. + assert_(y1a._data.__array_interface__ == + y1._data.__array_interface__) + assert_(y1a._mask.__array_interface__ == + y1._mask.__array_interface__) + + y2 = array(x1, mask=m3) + assert_(y2._data.__array_interface__ == x1.__array_interface__) + assert_(y2._mask.__array_interface__ == m3.__array_interface__) + assert_(y2[2] is masked) + y2[2] = 9 + assert_(y2[2] is not masked) + assert_(y2._mask.__array_interface__ == m3.__array_interface__) + assert_(allequal(y2.mask, 0)) + + y2a = array(x1, mask=m, copy=1) + assert_(y2a._data.__array_interface__ != x1.__array_interface__) + #assert_( y2a._mask is not m) + assert_(y2a._mask.__array_interface__ != m.__array_interface__) + assert_(y2a[2] is masked) + y2a[2] = 9 + assert_(y2a[2] is not masked) + #assert_( y2a._mask is not m) + assert_(y2a._mask.__array_interface__ != m.__array_interface__) + assert_(allequal(y2a.mask, 0)) + + y3 = array(x1 * 1.0, mask=m) + assert_(filled(y3).dtype is (x1 * 1.0).dtype) + + x4 = arange(4) + x4[2] = masked + y4 = resize(x4, (8,)) + assert_equal(concatenate([x4, x4]), y4) + assert_equal(getmask(y4), [0, 0, 1, 0, 0, 0, 1, 0]) + y5 = repeat(x4, (2, 2, 2, 2), axis=0) + assert_equal(y5, [0, 0, 1, 1, 2, 2, 3, 3]) + y6 = repeat(x4, 2, axis=0) + assert_equal(y5, y6) + y7 = x4.repeat((2, 2, 2, 2), axis=0) + assert_equal(y5, y7) + y8 = x4.repeat(2, 0) + assert_equal(y5, y8) + + y9 = x4.copy() + assert_equal(y9._data, x4._data) + assert_equal(y9._mask, x4._mask) + + x = masked_array([1, 2, 3], mask=[0, 1, 0]) + # Copy is False by default + y = masked_array(x) + assert_equal(y._data.ctypes.data, x._data.ctypes.data) + assert_equal(y._mask.ctypes.data, x._mask.ctypes.data) + y = masked_array(x, copy=True) + assert_not_equal(y._data.ctypes.data, x._data.ctypes.data) + assert_not_equal(y._mask.ctypes.data, x._mask.ctypes.data) + + def test_copy_0d(self): + # gh-9430 + x = np.ma.array(43, mask=True) + xc = x.copy() + assert_equal(xc.mask, True) + + def test_copy_on_python_builtins(self): + # Tests copy works on python builtins (issue#8019) + assert_(isMaskedArray(np.ma.copy([1, 2, 3]))) + assert_(isMaskedArray(np.ma.copy((1, 2, 3)))) + + def test_copy_immutable(self): + # Tests that the copy method is immutable, GitHub issue #5247 + a = np.ma.array([1, 2, 3]) + b = np.ma.array([4, 5, 6]) + a_copy_method = a.copy + b.copy + assert_equal(a_copy_method(), [1, 2, 3]) + + def test_deepcopy(self): + from copy import deepcopy + a = array([0, 1, 2], mask=[False, True, False]) + copied = deepcopy(a) + assert_equal(copied.mask, a.mask) + assert_not_equal(id(a._mask), id(copied._mask)) + + copied[1] = 1 + assert_equal(copied.mask, [0, 0, 0]) + assert_equal(a.mask, [0, 1, 0]) + + copied = deepcopy(a) + assert_equal(copied.mask, a.mask) + copied.mask[1] = False + assert_equal(copied.mask, [0, 0, 0]) + assert_equal(a.mask, [0, 1, 0]) + + def test_format(self): + a = array([0, 1, 2], mask=[False, True, False]) + assert_equal(format(a), "[0 -- 2]") + assert_equal(format(masked), "--") + assert_equal(format(masked, ""), "--") + + # Postponed from PR #15410, perhaps address in the future. + # assert_equal(format(masked, " >5"), " --") + # assert_equal(format(masked, " <5"), "-- ") + + # Expect a FutureWarning for using format_spec with MaskedElement + with assert_warns(FutureWarning): + with_format_string = format(masked, " >5") + assert_equal(with_format_string, "--") + + def test_str_repr(self): + a = array([0, 1, 2], mask=[False, True, False]) + assert_equal(str(a), '[0 -- 2]') + assert_equal( + repr(a), + textwrap.dedent('''\ + masked_array(data=[0, --, 2], + mask=[False, True, False], + fill_value=999999)''') + ) + + # arrays with a continuation + a = np.ma.arange(2000) + a[1:50] = np.ma.masked + assert_equal( + repr(a), + textwrap.dedent('''\ + masked_array(data=[0, --, --, ..., 1997, 1998, 1999], + mask=[False, True, True, ..., False, False, False], + fill_value=999999)''') + ) + + # line-wrapped 1d arrays are correctly aligned + a = np.ma.arange(20) + assert_equal( + repr(a), + textwrap.dedent('''\ + masked_array(data=[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, + 14, 15, 16, 17, 18, 19], + mask=False, + fill_value=999999)''') + ) + + # 2d arrays cause wrapping + a = array([[1, 2, 3], [4, 5, 6]], dtype=np.int8) + a[1, 1] = np.ma.masked + assert_equal( + repr(a), + textwrap.dedent(f'''\ + masked_array( + data=[[1, 2, 3], + [4, --, 6]], + mask=[[False, False, False], + [False, True, False]], + fill_value={np.array(999999)[()]!r}, + dtype=int8)''') + ) + + # but not it they're a row vector + assert_equal( + repr(a[:1]), + textwrap.dedent(f'''\ + masked_array(data=[[1, 2, 3]], + mask=[[False, False, False]], + fill_value={np.array(999999)[()]!r}, + dtype=int8)''') + ) + + # dtype=int is implied, so not shown + assert_equal( + repr(a.astype(int)), + textwrap.dedent('''\ + masked_array( + data=[[1, 2, 3], + [4, --, 6]], + mask=[[False, False, False], + [False, True, False]], + fill_value=999999)''') + ) + + def test_str_repr_legacy(self): + oldopts = np.get_printoptions() + np.set_printoptions(legacy='1.13') + try: + a = array([0, 1, 2], mask=[False, True, False]) + assert_equal(str(a), '[0 -- 2]') + assert_equal(repr(a), 'masked_array(data = [0 -- 2],\n' + ' mask = [False True False],\n' + ' fill_value = 999999)\n') + + a = np.ma.arange(2000) + a[1:50] = np.ma.masked + assert_equal( + repr(a), + 'masked_array(data = [0 -- -- ..., 1997 1998 1999],\n' + ' mask = [False True True ..., False False False],\n' + ' fill_value = 999999)\n' + ) + finally: + np.set_printoptions(**oldopts) + + def test_0d_unicode(self): + u = 'caf\xe9' + utype = type(u) + + arr_nomask = np.ma.array(u) + arr_masked = np.ma.array(u, mask=True) + + assert_equal(utype(arr_nomask), u) + assert_equal(utype(arr_masked), '--') + + def test_pickling(self): + # Tests pickling + for dtype in (int, float, str, object): + a = arange(10).astype(dtype) + a.fill_value = 999 + + masks = ([0, 0, 0, 1, 0, 1, 0, 1, 0, 1], # partially masked + True, # Fully masked + False) # Fully unmasked + + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + for mask in masks: + a.mask = mask + a_pickled = pickle.loads(pickle.dumps(a, protocol=proto)) + assert_equal(a_pickled._mask, a._mask) + assert_equal(a_pickled._data, a._data) + if dtype in (object, int): + assert_equal(a_pickled.fill_value, 999) + else: + assert_equal(a_pickled.fill_value, dtype(999)) + assert_array_equal(a_pickled.mask, mask) + + def test_pickling_subbaseclass(self): + # Test pickling w/ a subclass of ndarray + x = np.array([(1.0, 2), (3.0, 4)], + dtype=[('x', float), ('y', int)]).view(np.recarray) + a = masked_array(x, mask=[(True, False), (False, True)]) + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + a_pickled = pickle.loads(pickle.dumps(a, protocol=proto)) + assert_equal(a_pickled._mask, a._mask) + assert_equal(a_pickled, a) + assert_(isinstance(a_pickled._data, np.recarray)) + + def test_pickling_maskedconstant(self): + # Test pickling MaskedConstant + mc = np.ma.masked + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + mc_pickled = pickle.loads(pickle.dumps(mc, protocol=proto)) + assert_equal(mc_pickled._baseclass, mc._baseclass) + assert_equal(mc_pickled._mask, mc._mask) + assert_equal(mc_pickled._data, mc._data) + + def test_pickling_wstructured(self): + # Tests pickling w/ structured array + a = array([(1, 1.), (2, 2.)], mask=[(0, 0), (0, 1)], + dtype=[('a', int), ('b', float)]) + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + a_pickled = pickle.loads(pickle.dumps(a, protocol=proto)) + assert_equal(a_pickled._mask, a._mask) + assert_equal(a_pickled, a) + + def test_pickling_keepalignment(self): + # Tests pickling w/ F_CONTIGUOUS arrays + a = arange(10) + a.shape = (-1, 2) + b = a.T + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + test = pickle.loads(pickle.dumps(b, protocol=proto)) + assert_equal(test, b) + + def test_single_element_subscript(self): + # Tests single element subscripts of Maskedarrays. + a = array([1, 3, 2]) + b = array([1, 3, 2], mask=[1, 0, 1]) + assert_equal(a[0].shape, ()) + assert_equal(b[0].shape, ()) + assert_equal(b[1].shape, ()) + + def test_topython(self): + # Tests some communication issues with Python. + assert_equal(1, int(array(1))) + assert_equal(1.0, float(array(1))) + assert_equal(1, int(array([[[1]]]))) + assert_equal(1.0, float(array([[1]]))) + assert_raises(TypeError, float, array([1, 1])) + + with suppress_warnings() as sup: + sup.filter(UserWarning, 'Warning: converting a masked element') + assert_(np.isnan(float(array([1], mask=[1])))) + + a = array([1, 2, 3], mask=[1, 0, 0]) + assert_raises(TypeError, lambda: float(a)) + assert_equal(float(a[-1]), 3.) + assert_(np.isnan(float(a[0]))) + assert_raises(TypeError, int, a) + assert_equal(int(a[-1]), 3) + assert_raises(MAError, lambda: int(a[0])) + + def test_oddfeatures_1(self): + # Test of other odd features + x = arange(20) + x = x.reshape(4, 5) + x.flat[5] = 12 + assert_(x[1, 0] == 12) + z = x + 10j * x + assert_equal(z.real, x) + assert_equal(z.imag, 10 * x) + assert_equal((z * conjugate(z)).real, 101 * x * x) + z.imag[...] = 0.0 + + x = arange(10) + x[3] = masked + assert_(str(x[3]) == str(masked)) + c = x >= 8 + assert_(count(where(c, masked, masked)) == 0) + assert_(shape(where(c, masked, masked)) == c.shape) + + z = masked_where(c, x) + assert_(z.dtype is x.dtype) + assert_(z[3] is masked) + assert_(z[4] is not masked) + assert_(z[7] is not masked) + assert_(z[8] is masked) + assert_(z[9] is masked) + assert_equal(x, z) + + def test_oddfeatures_2(self): + # Tests some more features. + x = array([1., 2., 3., 4., 5.]) + c = array([1, 1, 1, 0, 0]) + x[2] = masked + z = where(c, x, -x) + assert_equal(z, [1., 2., 0., -4., -5]) + c[0] = masked + z = where(c, x, -x) + assert_equal(z, [1., 2., 0., -4., -5]) + assert_(z[0] is masked) + assert_(z[1] is not masked) + assert_(z[2] is masked) + + @suppress_copy_mask_on_assignment + def test_oddfeatures_3(self): + # Tests some generic features + atest = array([10], mask=True) + btest = array([20]) + idx = atest.mask + atest[idx] = btest[idx] + assert_equal(atest, [20]) + + def test_filled_with_object_dtype(self): + a = np.ma.masked_all(1, dtype='O') + assert_equal(a.filled('x')[0], 'x') + + def test_filled_with_flexible_dtype(self): + # Test filled w/ flexible dtype + flexi = array([(1, 1, 1)], + dtype=[('i', int), ('s', '|S8'), ('f', float)]) + flexi[0] = masked + assert_equal(flexi.filled(), + np.array([(default_fill_value(0), + default_fill_value('0'), + default_fill_value(0.),)], dtype=flexi.dtype)) + flexi[0] = masked + assert_equal(flexi.filled(1), + np.array([(1, '1', 1.)], dtype=flexi.dtype)) + + def test_filled_with_mvoid(self): + # Test filled w/ mvoid + ndtype = [('a', int), ('b', float)] + a = mvoid((1, 2.), mask=[(0, 1)], dtype=ndtype) + # Filled using default + test = a.filled() + assert_equal(tuple(test), (1, default_fill_value(1.))) + # Explicit fill_value + test = a.filled((-1, -1)) + assert_equal(tuple(test), (1, -1)) + # Using predefined filling values + a.fill_value = (-999, -999) + assert_equal(tuple(a.filled()), (1, -999)) + + def test_filled_with_nested_dtype(self): + # Test filled w/ nested dtype + ndtype = [('A', int), ('B', [('BA', int), ('BB', int)])] + a = array([(1, (1, 1)), (2, (2, 2))], + mask=[(0, (1, 0)), (0, (0, 1))], dtype=ndtype) + test = a.filled(0) + control = np.array([(1, (0, 1)), (2, (2, 0))], dtype=ndtype) + assert_equal(test, control) + + test = a['B'].filled(0) + control = np.array([(0, 1), (2, 0)], dtype=a['B'].dtype) + assert_equal(test, control) + + # test if mask gets set correctly (see #6760) + Z = numpy.ma.zeros(2, numpy.dtype([("A", "(2,2)i1,(2,2)i1", (2, 2))])) + assert_equal(Z.data.dtype, numpy.dtype([('A', [('f0', 'i1', (2, 2)), + ('f1', 'i1', (2, 2))], (2, 2))])) + assert_equal(Z.mask.dtype, numpy.dtype([('A', [('f0', '?', (2, 2)), + ('f1', '?', (2, 2))], (2, 2))])) + + def test_filled_with_f_order(self): + # Test filled w/ F-contiguous array + a = array(np.array([(0, 1, 2), (4, 5, 6)], order='F'), + mask=np.array([(0, 0, 1), (1, 0, 0)], order='F'), + order='F') # this is currently ignored + assert_(a.flags['F_CONTIGUOUS']) + assert_(a.filled(0).flags['F_CONTIGUOUS']) + + def test_optinfo_propagation(self): + # Checks that _optinfo dictionary isn't back-propagated + x = array([1, 2, 3, ], dtype=float) + x._optinfo['info'] = '???' + y = x.copy() + assert_equal(y._optinfo['info'], '???') + y._optinfo['info'] = '!!!' + assert_equal(x._optinfo['info'], '???') + + def test_optinfo_forward_propagation(self): + a = array([1, 2, 2, 4]) + a._optinfo["key"] = "value" + assert_equal(a._optinfo["key"], (a == 2)._optinfo["key"]) + assert_equal(a._optinfo["key"], (a != 2)._optinfo["key"]) + assert_equal(a._optinfo["key"], (a > 2)._optinfo["key"]) + assert_equal(a._optinfo["key"], (a >= 2)._optinfo["key"]) + assert_equal(a._optinfo["key"], (a <= 2)._optinfo["key"]) + assert_equal(a._optinfo["key"], (a + 2)._optinfo["key"]) + assert_equal(a._optinfo["key"], (a - 2)._optinfo["key"]) + assert_equal(a._optinfo["key"], (a * 2)._optinfo["key"]) + assert_equal(a._optinfo["key"], (a / 2)._optinfo["key"]) + assert_equal(a._optinfo["key"], a[:2]._optinfo["key"]) + assert_equal(a._optinfo["key"], a[[0, 0, 2]]._optinfo["key"]) + assert_equal(a._optinfo["key"], np.exp(a)._optinfo["key"]) + assert_equal(a._optinfo["key"], np.abs(a)._optinfo["key"]) + assert_equal(a._optinfo["key"], array(a, copy=True)._optinfo["key"]) + assert_equal(a._optinfo["key"], np.zeros_like(a)._optinfo["key"]) + + def test_fancy_printoptions(self): + # Test printing a masked array w/ fancy dtype. + fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])]) + test = array([(1, (2, 3.0)), (4, (5, 6.0))], + mask=[(1, (0, 1)), (0, (1, 0))], + dtype=fancydtype) + control = "[(--, (2, --)) (4, (--, 6.0))]" + assert_equal(str(test), control) + + # Test 0-d array with multi-dimensional dtype + t_2d0 = masked_array(data=(0, [[0.0, 0.0, 0.0], + [0.0, 0.0, 0.0]], + 0.0), + mask=(False, [[True, False, True], + [False, False, True]], + False), + dtype="int, (2,3)float, float") + control = "(0, [[--, 0.0, --], [0.0, 0.0, --]], 0.0)" + assert_equal(str(t_2d0), control) + + def test_flatten_structured_array(self): + # Test flatten_structured_array on arrays + # On ndarray + ndtype = [('a', int), ('b', float)] + a = np.array([(1, 1), (2, 2)], dtype=ndtype) + test = flatten_structured_array(a) + control = np.array([[1., 1.], [2., 2.]], dtype=float) + assert_equal(test, control) + assert_equal(test.dtype, control.dtype) + # On masked_array + a = array([(1, 1), (2, 2)], mask=[(0, 1), (1, 0)], dtype=ndtype) + test = flatten_structured_array(a) + control = array([[1., 1.], [2., 2.]], + mask=[[0, 1], [1, 0]], dtype=float) + assert_equal(test, control) + assert_equal(test.dtype, control.dtype) + assert_equal(test.mask, control.mask) + # On masked array with nested structure + ndtype = [('a', int), ('b', [('ba', int), ('bb', float)])] + a = array([(1, (1, 1.1)), (2, (2, 2.2))], + mask=[(0, (1, 0)), (1, (0, 1))], dtype=ndtype) + test = flatten_structured_array(a) + control = array([[1., 1., 1.1], [2., 2., 2.2]], + mask=[[0, 1, 0], [1, 0, 1]], dtype=float) + assert_equal(test, control) + assert_equal(test.dtype, control.dtype) + assert_equal(test.mask, control.mask) + # Keeping the initial shape + ndtype = [('a', int), ('b', float)] + a = np.array([[(1, 1), ], [(2, 2), ]], dtype=ndtype) + test = flatten_structured_array(a) + control = np.array([[[1., 1.], ], [[2., 2.], ]], dtype=float) + assert_equal(test, control) + assert_equal(test.dtype, control.dtype) + + def test_void0d(self): + # Test creating a mvoid object + ndtype = [('a', int), ('b', int)] + a = np.array([(1, 2,)], dtype=ndtype)[0] + f = mvoid(a) + assert_(isinstance(f, mvoid)) + + a = masked_array([(1, 2)], mask=[(1, 0)], dtype=ndtype)[0] + assert_(isinstance(a, mvoid)) + + a = masked_array([(1, 2), (1, 2)], mask=[(1, 0), (0, 0)], dtype=ndtype) + f = mvoid(a._data[0], a._mask[0]) + assert_(isinstance(f, mvoid)) + + def test_mvoid_getitem(self): + # Test mvoid.__getitem__ + ndtype = [('a', int), ('b', int)] + a = masked_array([(1, 2,), (3, 4)], mask=[(0, 0), (1, 0)], + dtype=ndtype) + # w/o mask + f = a[0] + assert_(isinstance(f, mvoid)) + assert_equal((f[0], f['a']), (1, 1)) + assert_equal(f['b'], 2) + # w/ mask + f = a[1] + assert_(isinstance(f, mvoid)) + assert_(f[0] is masked) + assert_(f['a'] is masked) + assert_equal(f[1], 4) + + # exotic dtype + A = masked_array(data=[([0, 1],)], + mask=[([True, False],)], + dtype=[("A", ">i2", (2,))]) + assert_equal(A[0]["A"], A["A"][0]) + assert_equal(A[0]["A"], masked_array(data=[0, 1], + mask=[True, False], dtype=">i2")) + + def test_mvoid_iter(self): + # Test iteration on __getitem__ + ndtype = [('a', int), ('b', int)] + a = masked_array([(1, 2,), (3, 4)], mask=[(0, 0), (1, 0)], + dtype=ndtype) + # w/o mask + assert_equal(list(a[0]), [1, 2]) + # w/ mask + assert_equal(list(a[1]), [masked, 4]) + + def test_mvoid_print(self): + # Test printing a mvoid + mx = array([(1, 1), (2, 2)], dtype=[('a', int), ('b', int)]) + assert_equal(str(mx[0]), "(1, 1)") + mx['b'][0] = masked + ini_display = masked_print_option._display + masked_print_option.set_display("-X-") + try: + assert_equal(str(mx[0]), "(1, -X-)") + assert_equal(repr(mx[0]), "(1, -X-)") + finally: + masked_print_option.set_display(ini_display) + + # also check if there are object datatypes (see gh-7493) + mx = array([(1,), (2,)], dtype=[('a', 'O')]) + assert_equal(str(mx[0]), "(1,)") + + def test_mvoid_multidim_print(self): + + # regression test for gh-6019 + t_ma = masked_array(data=[([1, 2, 3],)], + mask=[([False, True, False],)], + fill_value=([999999, 999999, 999999],), + dtype=[('a', ' 1: + assert_equal(np.concatenate((x, y), 1), concatenate((xm, ym), 1)) + assert_equal(np.add.reduce(x, 1), add.reduce(x, 1)) + assert_equal(np.sum(x, 1), sum(x, 1)) + assert_equal(np.prod(x, 1), product(x, 1)) + + def test_binops_d2D(self): + # Test binary operations on 2D data + a = array([[1.], [2.], [3.]], mask=[[False], [True], [True]]) + b = array([[2., 3.], [4., 5.], [6., 7.]]) + + test = a * b + control = array([[2., 3.], [2., 2.], [3., 3.]], + mask=[[0, 0], [1, 1], [1, 1]]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + + test = b * a + control = array([[2., 3.], [4., 5.], [6., 7.]], + mask=[[0, 0], [1, 1], [1, 1]]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + + a = array([[1.], [2.], [3.]]) + b = array([[2., 3.], [4., 5.], [6., 7.]], + mask=[[0, 0], [0, 0], [0, 1]]) + test = a * b + control = array([[2, 3], [8, 10], [18, 3]], + mask=[[0, 0], [0, 0], [0, 1]]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + + test = b * a + control = array([[2, 3], [8, 10], [18, 7]], + mask=[[0, 0], [0, 0], [0, 1]]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + + def test_domained_binops_d2D(self): + # Test domained binary operations on 2D data + a = array([[1.], [2.], [3.]], mask=[[False], [True], [True]]) + b = array([[2., 3.], [4., 5.], [6., 7.]]) + + test = a / b + control = array([[1. / 2., 1. / 3.], [2., 2.], [3., 3.]], + mask=[[0, 0], [1, 1], [1, 1]]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + + test = b / a + control = array([[2. / 1., 3. / 1.], [4., 5.], [6., 7.]], + mask=[[0, 0], [1, 1], [1, 1]]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + + a = array([[1.], [2.], [3.]]) + b = array([[2., 3.], [4., 5.], [6., 7.]], + mask=[[0, 0], [0, 0], [0, 1]]) + test = a / b + control = array([[1. / 2, 1. / 3], [2. / 4, 2. / 5], [3. / 6, 3]], + mask=[[0, 0], [0, 0], [0, 1]]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + + test = b / a + control = array([[2 / 1., 3 / 1.], [4 / 2., 5 / 2.], [6 / 3., 7]], + mask=[[0, 0], [0, 0], [0, 1]]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + + def test_noshrinking(self): + # Check that we don't shrink a mask when not wanted + # Binary operations + a = masked_array([1., 2., 3.], mask=[False, False, False], + shrink=False) + b = a + 1 + assert_equal(b.mask, [0, 0, 0]) + # In place binary operation + a += 1 + assert_equal(a.mask, [0, 0, 0]) + # Domained binary operation + b = a / 1. + assert_equal(b.mask, [0, 0, 0]) + # In place binary operation + a /= 1. + assert_equal(a.mask, [0, 0, 0]) + + def test_ufunc_nomask(self): + # check the case ufuncs should set the mask to false + m = np.ma.array([1]) + # check we don't get array([False], dtype=bool) + assert_equal(np.true_divide(m, 5).mask.shape, ()) + + def test_noshink_on_creation(self): + # Check that the mask is not shrunk on array creation when not wanted + a = np.ma.masked_values([1., 2.5, 3.1], 1.5, shrink=False) + assert_equal(a.mask, [0, 0, 0]) + + def test_mod(self): + # Tests mod + (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d + assert_equal(mod(x, y), mod(xm, ym)) + test = mod(ym, xm) + assert_equal(test, np.mod(ym, xm)) + assert_equal(test.mask, mask_or(xm.mask, ym.mask)) + test = mod(xm, ym) + assert_equal(test, np.mod(xm, ym)) + assert_equal(test.mask, mask_or(mask_or(xm.mask, ym.mask), (ym == 0))) + + def test_TakeTransposeInnerOuter(self): + # Test of take, transpose, inner, outer products + x = arange(24) + y = np.arange(24) + x[5:6] = masked + x = x.reshape(2, 3, 4) + y = y.reshape(2, 3, 4) + assert_equal(np.transpose(y, (2, 0, 1)), transpose(x, (2, 0, 1))) + assert_equal(np.take(y, (2, 0, 1), 1), take(x, (2, 0, 1), 1)) + assert_equal(np.inner(filled(x, 0), filled(y, 0)), + inner(x, y)) + assert_equal(np.outer(filled(x, 0), filled(y, 0)), + outer(x, y)) + y = array(['abc', 1, 'def', 2, 3], object) + y[2] = masked + t = take(y, [0, 3, 4]) + assert_(t[0] == 'abc') + assert_(t[1] == 2) + assert_(t[2] == 3) + + def test_imag_real(self): + # Check complex + xx = array([1 + 10j, 20 + 2j], mask=[1, 0]) + assert_equal(xx.imag, [10, 2]) + assert_equal(xx.imag.filled(), [1e+20, 2]) + assert_equal(xx.imag.dtype, xx._data.imag.dtype) + assert_equal(xx.real, [1, 20]) + assert_equal(xx.real.filled(), [1e+20, 20]) + assert_equal(xx.real.dtype, xx._data.real.dtype) + + def test_methods_with_output(self): + xm = array(np.random.uniform(0, 10, 12)).reshape(3, 4) + xm[:, 0] = xm[0] = xm[-1, -1] = masked + + funclist = ('sum', 'prod', 'var', 'std', 'max', 'min', 'ptp', 'mean',) + + for funcname in funclist: + npfunc = getattr(np, funcname) + xmmeth = getattr(xm, funcname) + # A ndarray as explicit input + output = np.empty(4, dtype=float) + output.fill(-9999) + result = npfunc(xm, axis=0, out=output) + # ... the result should be the given output + assert_(result is output) + assert_equal(result, xmmeth(axis=0, out=output)) + + output = empty(4, dtype=int) + result = xmmeth(axis=0, out=output) + assert_(result is output) + assert_(output[0] is masked) + + def test_eq_on_structured(self): + # Test the equality of structured arrays + ndtype = [('A', int), ('B', int)] + a = array([(1, 1), (2, 2)], mask=[(0, 1), (0, 0)], dtype=ndtype) + + test = (a == a) + assert_equal(test.data, [True, True]) + assert_equal(test.mask, [False, False]) + assert_(test.fill_value == True) + + test = (a == a[0]) + assert_equal(test.data, [True, False]) + assert_equal(test.mask, [False, False]) + assert_(test.fill_value == True) + + b = array([(1, 1), (2, 2)], mask=[(1, 0), (0, 0)], dtype=ndtype) + test = (a == b) + assert_equal(test.data, [False, True]) + assert_equal(test.mask, [True, False]) + assert_(test.fill_value == True) + + test = (a[0] == b) + assert_equal(test.data, [False, False]) + assert_equal(test.mask, [True, False]) + assert_(test.fill_value == True) + + b = array([(1, 1), (2, 2)], mask=[(0, 1), (1, 0)], dtype=ndtype) + test = (a == b) + assert_equal(test.data, [True, True]) + assert_equal(test.mask, [False, False]) + assert_(test.fill_value == True) + + # complicated dtype, 2-dimensional array. + ndtype = [('A', int), ('B', [('BA', int), ('BB', int)])] + a = array([[(1, (1, 1)), (2, (2, 2))], + [(3, (3, 3)), (4, (4, 4))]], + mask=[[(0, (1, 0)), (0, (0, 1))], + [(1, (0, 0)), (1, (1, 1))]], dtype=ndtype) + test = (a[0, 0] == a) + assert_equal(test.data, [[True, False], [False, False]]) + assert_equal(test.mask, [[False, False], [False, True]]) + assert_(test.fill_value == True) + + def test_ne_on_structured(self): + # Test the equality of structured arrays + ndtype = [('A', int), ('B', int)] + a = array([(1, 1), (2, 2)], mask=[(0, 1), (0, 0)], dtype=ndtype) + + test = (a != a) + assert_equal(test.data, [False, False]) + assert_equal(test.mask, [False, False]) + assert_(test.fill_value == True) + + test = (a != a[0]) + assert_equal(test.data, [False, True]) + assert_equal(test.mask, [False, False]) + assert_(test.fill_value == True) + + b = array([(1, 1), (2, 2)], mask=[(1, 0), (0, 0)], dtype=ndtype) + test = (a != b) + assert_equal(test.data, [True, False]) + assert_equal(test.mask, [True, False]) + assert_(test.fill_value == True) + + test = (a[0] != b) + assert_equal(test.data, [True, True]) + assert_equal(test.mask, [True, False]) + assert_(test.fill_value == True) + + b = array([(1, 1), (2, 2)], mask=[(0, 1), (1, 0)], dtype=ndtype) + test = (a != b) + assert_equal(test.data, [False, False]) + assert_equal(test.mask, [False, False]) + assert_(test.fill_value == True) + + # complicated dtype, 2-dimensional array. + ndtype = [('A', int), ('B', [('BA', int), ('BB', int)])] + a = array([[(1, (1, 1)), (2, (2, 2))], + [(3, (3, 3)), (4, (4, 4))]], + mask=[[(0, (1, 0)), (0, (0, 1))], + [(1, (0, 0)), (1, (1, 1))]], dtype=ndtype) + test = (a[0, 0] != a) + assert_equal(test.data, [[False, True], [True, True]]) + assert_equal(test.mask, [[False, False], [False, True]]) + assert_(test.fill_value == True) + + def test_eq_ne_structured_with_non_masked(self): + a = array([(1, 1), (2, 2), (3, 4)], + mask=[(0, 1), (0, 0), (1, 1)], dtype='i4,i4') + eq = a == a.data + ne = a.data != a + # Test the obvious. + assert_(np.all(eq)) + assert_(not np.any(ne)) + # Expect the mask set only for items with all fields masked. + expected_mask = a.mask == np.ones((), a.mask.dtype) + assert_array_equal(eq.mask, expected_mask) + assert_array_equal(ne.mask, expected_mask) + # The masked element will indicated not equal, because the + # masks did not match. + assert_equal(eq.data, [True, True, False]) + assert_array_equal(eq.data, ~ne.data) + + def test_eq_ne_structured_extra(self): + # ensure simple examples are symmetric and make sense. + # from https://github.com/numpy/numpy/pull/8590#discussion_r101126465 + dt = np.dtype('i4,i4') + for m1 in (mvoid((1, 2), mask=(0, 0), dtype=dt), + mvoid((1, 2), mask=(0, 1), dtype=dt), + mvoid((1, 2), mask=(1, 0), dtype=dt), + mvoid((1, 2), mask=(1, 1), dtype=dt)): + ma1 = m1.view(MaskedArray) + r1 = ma1.view('2i4') + for m2 in (np.array((1, 1), dtype=dt), + mvoid((1, 1), dtype=dt), + mvoid((1, 0), mask=(0, 1), dtype=dt), + mvoid((3, 2), mask=(0, 1), dtype=dt)): + ma2 = m2.view(MaskedArray) + r2 = ma2.view('2i4') + eq_expected = (r1 == r2).all() + assert_equal(m1 == m2, eq_expected) + assert_equal(m2 == m1, eq_expected) + assert_equal(ma1 == m2, eq_expected) + assert_equal(m1 == ma2, eq_expected) + assert_equal(ma1 == ma2, eq_expected) + # Also check it is the same if we do it element by element. + el_by_el = [m1[name] == m2[name] for name in dt.names] + assert_equal(array(el_by_el, dtype=bool).all(), eq_expected) + ne_expected = (r1 != r2).any() + assert_equal(m1 != m2, ne_expected) + assert_equal(m2 != m1, ne_expected) + assert_equal(ma1 != m2, ne_expected) + assert_equal(m1 != ma2, ne_expected) + assert_equal(ma1 != ma2, ne_expected) + el_by_el = [m1[name] != m2[name] for name in dt.names] + assert_equal(array(el_by_el, dtype=bool).any(), ne_expected) + + @pytest.mark.parametrize('dt', ['S', 'U']) + @pytest.mark.parametrize('fill', [None, 'A']) + def test_eq_for_strings(self, dt, fill): + # Test the equality of structured arrays + a = array(['a', 'b'], dtype=dt, mask=[0, 1], fill_value=fill) + + test = (a == a) + assert_equal(test.data, [True, True]) + assert_equal(test.mask, [False, True]) + assert_(test.fill_value == True) + + test = (a == a[0]) + assert_equal(test.data, [True, False]) + assert_equal(test.mask, [False, True]) + assert_(test.fill_value == True) + + b = array(['a', 'b'], dtype=dt, mask=[1, 0], fill_value=fill) + test = (a == b) + assert_equal(test.data, [False, False]) + assert_equal(test.mask, [True, True]) + assert_(test.fill_value == True) + + test = (a[0] == b) + assert_equal(test.data, [False, False]) + assert_equal(test.mask, [True, False]) + assert_(test.fill_value == True) + + test = (b == a[0]) + assert_equal(test.data, [False, False]) + assert_equal(test.mask, [True, False]) + assert_(test.fill_value == True) + + @pytest.mark.parametrize('dt', ['S', 'U']) + @pytest.mark.parametrize('fill', [None, 'A']) + def test_ne_for_strings(self, dt, fill): + # Test the equality of structured arrays + a = array(['a', 'b'], dtype=dt, mask=[0, 1], fill_value=fill) + + test = (a != a) + assert_equal(test.data, [False, False]) + assert_equal(test.mask, [False, True]) + assert_(test.fill_value == True) + + test = (a != a[0]) + assert_equal(test.data, [False, True]) + assert_equal(test.mask, [False, True]) + assert_(test.fill_value == True) + + b = array(['a', 'b'], dtype=dt, mask=[1, 0], fill_value=fill) + test = (a != b) + assert_equal(test.data, [True, True]) + assert_equal(test.mask, [True, True]) + assert_(test.fill_value == True) + + test = (a[0] != b) + assert_equal(test.data, [True, True]) + assert_equal(test.mask, [True, False]) + assert_(test.fill_value == True) + + test = (b != a[0]) + assert_equal(test.data, [True, True]) + assert_equal(test.mask, [True, False]) + assert_(test.fill_value == True) + + @pytest.mark.parametrize('dt1', num_dts, ids=num_ids) + @pytest.mark.parametrize('dt2', num_dts, ids=num_ids) + @pytest.mark.parametrize('fill', [None, 1]) + def test_eq_for_numeric(self, dt1, dt2, fill): + # Test the equality of structured arrays + a = array([0, 1], dtype=dt1, mask=[0, 1], fill_value=fill) + + test = (a == a) + assert_equal(test.data, [True, True]) + assert_equal(test.mask, [False, True]) + assert_(test.fill_value == True) + + test = (a == a[0]) + assert_equal(test.data, [True, False]) + assert_equal(test.mask, [False, True]) + assert_(test.fill_value == True) + + b = array([0, 1], dtype=dt2, mask=[1, 0], fill_value=fill) + test = (a == b) + assert_equal(test.data, [False, False]) + assert_equal(test.mask, [True, True]) + assert_(test.fill_value == True) + + test = (a[0] == b) + assert_equal(test.data, [False, False]) + assert_equal(test.mask, [True, False]) + assert_(test.fill_value == True) + + test = (b == a[0]) + assert_equal(test.data, [False, False]) + assert_equal(test.mask, [True, False]) + assert_(test.fill_value == True) + + @pytest.mark.parametrize("op", [operator.eq, operator.lt]) + def test_eq_broadcast_with_unmasked(self, op): + a = array([0, 1], mask=[0, 1]) + b = np.arange(10).reshape(5, 2) + result = op(a, b) + assert_(result.mask.shape == b.shape) + assert_equal(result.mask, np.zeros(b.shape, bool) | a.mask) + + @pytest.mark.parametrize("op", [operator.eq, operator.gt]) + def test_comp_no_mask_not_broadcast(self, op): + # Regression test for failing doctest in MaskedArray.nonzero + # after gh-24556. + a = array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) + result = op(a, 3) + assert_(not result.mask.shape) + assert_(result.mask is nomask) + + @pytest.mark.parametrize('dt1', num_dts, ids=num_ids) + @pytest.mark.parametrize('dt2', num_dts, ids=num_ids) + @pytest.mark.parametrize('fill', [None, 1]) + def test_ne_for_numeric(self, dt1, dt2, fill): + # Test the equality of structured arrays + a = array([0, 1], dtype=dt1, mask=[0, 1], fill_value=fill) + + test = (a != a) + assert_equal(test.data, [False, False]) + assert_equal(test.mask, [False, True]) + assert_(test.fill_value == True) + + test = (a != a[0]) + assert_equal(test.data, [False, True]) + assert_equal(test.mask, [False, True]) + assert_(test.fill_value == True) + + b = array([0, 1], dtype=dt2, mask=[1, 0], fill_value=fill) + test = (a != b) + assert_equal(test.data, [True, True]) + assert_equal(test.mask, [True, True]) + assert_(test.fill_value == True) + + test = (a[0] != b) + assert_equal(test.data, [True, True]) + assert_equal(test.mask, [True, False]) + assert_(test.fill_value == True) + + test = (b != a[0]) + assert_equal(test.data, [True, True]) + assert_equal(test.mask, [True, False]) + assert_(test.fill_value == True) + + @pytest.mark.parametrize('dt1', num_dts, ids=num_ids) + @pytest.mark.parametrize('dt2', num_dts, ids=num_ids) + @pytest.mark.parametrize('fill', [None, 1]) + @pytest.mark.parametrize('op', + [operator.le, operator.lt, operator.ge, operator.gt]) + def test_comparisons_for_numeric(self, op, dt1, dt2, fill): + # Test the equality of structured arrays + a = array([0, 1], dtype=dt1, mask=[0, 1], fill_value=fill) + + test = op(a, a) + assert_equal(test.data, op(a._data, a._data)) + assert_equal(test.mask, [False, True]) + assert_(test.fill_value == True) + + test = op(a, a[0]) + assert_equal(test.data, op(a._data, a._data[0])) + assert_equal(test.mask, [False, True]) + assert_(test.fill_value == True) + + b = array([0, 1], dtype=dt2, mask=[1, 0], fill_value=fill) + test = op(a, b) + assert_equal(test.data, op(a._data, b._data)) + assert_equal(test.mask, [True, True]) + assert_(test.fill_value == True) + + test = op(a[0], b) + assert_equal(test.data, op(a._data[0], b._data)) + assert_equal(test.mask, [True, False]) + assert_(test.fill_value == True) + + test = op(b, a[0]) + assert_equal(test.data, op(b._data, a._data[0])) + assert_equal(test.mask, [True, False]) + assert_(test.fill_value == True) + + @pytest.mark.parametrize('op', + [operator.le, operator.lt, operator.ge, operator.gt]) + @pytest.mark.parametrize('fill', [None, "N/A"]) + def test_comparisons_strings(self, op, fill): + # See gh-21770, mask propagation is broken for strings (and some other + # cases) so we explicitly test strings here. + # In principle only == and != may need special handling... + ma1 = masked_array(["a", "b", "cde"], mask=[0, 1, 0], fill_value=fill) + ma2 = masked_array(["cde", "b", "a"], mask=[0, 1, 0], fill_value=fill) + assert_equal(op(ma1, ma2)._data, op(ma1._data, ma2._data)) + + def test_eq_with_None(self): + # Really, comparisons with None should not be done, but check them + # anyway. Note that pep8 will flag these tests. + # Deprecation is in place for arrays, and when it happens this + # test will fail (and have to be changed accordingly). + + # With partial mask + with suppress_warnings() as sup: + sup.filter(FutureWarning, "Comparison to `None`") + a = array([None, 1], mask=[0, 1]) + assert_equal(a == None, array([True, False], mask=[0, 1])) # noqa: E711 + assert_equal(a.data == None, [True, False]) # noqa: E711 + assert_equal(a != None, array([False, True], mask=[0, 1])) # noqa: E711 + # With nomask + a = array([None, 1], mask=False) + assert_equal(a == None, [True, False]) # noqa: E711 + assert_equal(a != None, [False, True]) # noqa: E711 + # With complete mask + a = array([None, 2], mask=True) + assert_equal(a == None, array([False, True], mask=True)) # noqa: E711 + assert_equal(a != None, array([True, False], mask=True)) # noqa: E711 + # Fully masked, even comparison to None should return "masked" + a = masked + assert_equal(a == None, masked) # noqa: E711 + + def test_eq_with_scalar(self): + a = array(1) + assert_equal(a == 1, True) + assert_equal(a == 0, False) + assert_equal(a != 1, False) + assert_equal(a != 0, True) + b = array(1, mask=True) + assert_equal(b == 0, masked) + assert_equal(b == 1, masked) + assert_equal(b != 0, masked) + assert_equal(b != 1, masked) + + def test_eq_different_dimensions(self): + m1 = array([1, 1], mask=[0, 1]) + # test comparison with both masked and regular arrays. + for m2 in (array([[0, 1], [1, 2]]), + np.array([[0, 1], [1, 2]])): + test = (m1 == m2) + assert_equal(test.data, [[False, False], + [True, False]]) + assert_equal(test.mask, [[False, True], + [False, True]]) + + def test_numpyarithmetic(self): + # Check that the mask is not back-propagated when using numpy functions + a = masked_array([-1, 0, 1, 2, 3], mask=[0, 0, 0, 0, 1]) + control = masked_array([np.nan, np.nan, 0, np.log(2), -1], + mask=[1, 1, 0, 0, 1]) + + test = log(a) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + assert_equal(a.mask, [0, 0, 0, 0, 1]) + + test = np.log(a) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + assert_equal(a.mask, [0, 0, 0, 0, 1]) + + +class TestMaskedArrayAttributes: + + def test_keepmask(self): + # Tests the keep mask flag + x = masked_array([1, 2, 3], mask=[1, 0, 0]) + mx = masked_array(x) + assert_equal(mx.mask, x.mask) + mx = masked_array(x, mask=[0, 1, 0], keep_mask=False) + assert_equal(mx.mask, [0, 1, 0]) + mx = masked_array(x, mask=[0, 1, 0], keep_mask=True) + assert_equal(mx.mask, [1, 1, 0]) + # We default to true + mx = masked_array(x, mask=[0, 1, 0]) + assert_equal(mx.mask, [1, 1, 0]) + + def test_hardmask(self): + # Test hard_mask + d = arange(5) + n = [0, 0, 0, 1, 1] + m = make_mask(n) + xh = array(d, mask=m, hard_mask=True) + # We need to copy, to avoid updating d in xh ! + xs = array(d, mask=m, hard_mask=False, copy=True) + xh[[1, 4]] = [10, 40] + xs[[1, 4]] = [10, 40] + assert_equal(xh._data, [0, 10, 2, 3, 4]) + assert_equal(xs._data, [0, 10, 2, 3, 40]) + assert_equal(xs.mask, [0, 0, 0, 1, 0]) + assert_(xh._hardmask) + assert_(not xs._hardmask) + xh[1:4] = [10, 20, 30] + xs[1:4] = [10, 20, 30] + assert_equal(xh._data, [0, 10, 20, 3, 4]) + assert_equal(xs._data, [0, 10, 20, 30, 40]) + assert_equal(xs.mask, nomask) + xh[0] = masked + xs[0] = masked + assert_equal(xh.mask, [1, 0, 0, 1, 1]) + assert_equal(xs.mask, [1, 0, 0, 0, 0]) + xh[:] = 1 + xs[:] = 1 + assert_equal(xh._data, [0, 1, 1, 3, 4]) + assert_equal(xs._data, [1, 1, 1, 1, 1]) + assert_equal(xh.mask, [1, 0, 0, 1, 1]) + assert_equal(xs.mask, nomask) + # Switch to soft mask + xh.soften_mask() + xh[:] = arange(5) + assert_equal(xh._data, [0, 1, 2, 3, 4]) + assert_equal(xh.mask, nomask) + # Switch back to hard mask + xh.harden_mask() + xh[xh < 3] = masked + assert_equal(xh._data, [0, 1, 2, 3, 4]) + assert_equal(xh._mask, [1, 1, 1, 0, 0]) + xh[filled(xh > 1, False)] = 5 + assert_equal(xh._data, [0, 1, 2, 5, 5]) + assert_equal(xh._mask, [1, 1, 1, 0, 0]) + + xh = array([[1, 2], [3, 4]], mask=[[1, 0], [0, 0]], hard_mask=True) + xh[0] = 0 + assert_equal(xh._data, [[1, 0], [3, 4]]) + assert_equal(xh._mask, [[1, 0], [0, 0]]) + xh[-1, -1] = 5 + assert_equal(xh._data, [[1, 0], [3, 5]]) + assert_equal(xh._mask, [[1, 0], [0, 0]]) + xh[filled(xh < 5, False)] = 2 + assert_equal(xh._data, [[1, 2], [2, 5]]) + assert_equal(xh._mask, [[1, 0], [0, 0]]) + + def test_hardmask_again(self): + # Another test of hardmask + d = arange(5) + n = [0, 0, 0, 1, 1] + m = make_mask(n) + xh = array(d, mask=m, hard_mask=True) + xh[4:5] = 999 + xh[0:1] = 999 + assert_equal(xh._data, [999, 1, 2, 3, 4]) + + def test_hardmask_oncemore_yay(self): + # OK, yet another test of hardmask + # Make sure that harden_mask/soften_mask//unshare_mask returns self + a = array([1, 2, 3], mask=[1, 0, 0]) + b = a.harden_mask() + assert_equal(a, b) + b[0] = 0 + assert_equal(a, b) + assert_equal(b, array([1, 2, 3], mask=[1, 0, 0])) + a = b.soften_mask() + a[0] = 0 + assert_equal(a, b) + assert_equal(b, array([0, 2, 3], mask=[0, 0, 0])) + + def test_smallmask(self): + # Checks the behaviour of _smallmask + a = arange(10) + a[1] = masked + a[1] = 1 + assert_equal(a._mask, nomask) + a = arange(10) + a._smallmask = False + a[1] = masked + a[1] = 1 + assert_equal(a._mask, zeros(10)) + + def test_shrink_mask(self): + # Tests .shrink_mask() + a = array([1, 2, 3], mask=[0, 0, 0]) + b = a.shrink_mask() + assert_equal(a, b) + assert_equal(a.mask, nomask) + + # Mask cannot be shrunk on structured types, so is a no-op + a = np.ma.array([(1, 2.0)], [('a', int), ('b', float)]) + b = a.copy() + a.shrink_mask() + assert_equal(a.mask, b.mask) + + def test_flat(self): + # Test that flat can return all types of items [#4585, #4615] + # test 2-D record array + # ... on structured array w/ masked records + x = array([[(1, 1.1, 'one'), (2, 2.2, 'two'), (3, 3.3, 'thr')], + [(4, 4.4, 'fou'), (5, 5.5, 'fiv'), (6, 6.6, 'six')]], + dtype=[('a', int), ('b', float), ('c', '|S8')]) + x['a'][0, 1] = masked + x['b'][1, 0] = masked + x['c'][0, 2] = masked + x[-1, -1] = masked + xflat = x.flat + assert_equal(xflat[0], x[0, 0]) + assert_equal(xflat[1], x[0, 1]) + assert_equal(xflat[2], x[0, 2]) + assert_equal(xflat[:3], x[0]) + assert_equal(xflat[3], x[1, 0]) + assert_equal(xflat[4], x[1, 1]) + assert_equal(xflat[5], x[1, 2]) + assert_equal(xflat[3:], x[1]) + assert_equal(xflat[-1], x[-1, -1]) + i = 0 + j = 0 + for xf in xflat: + assert_equal(xf, x[j, i]) + i += 1 + if i >= x.shape[-1]: + i = 0 + j += 1 + + def test_assign_dtype(self): + # check that the mask's dtype is updated when dtype is changed + a = np.zeros(4, dtype='f4,i4') + + m = np.ma.array(a) + m.dtype = np.dtype('f4') + repr(m) # raises? + assert_equal(m.dtype, np.dtype('f4')) + + # check that dtype changes that change shape of mask too much + # are not allowed + def assign(): + m = np.ma.array(a) + m.dtype = np.dtype('f8') + assert_raises(ValueError, assign) + + b = a.view(dtype='f4', type=np.ma.MaskedArray) # raises? + assert_equal(b.dtype, np.dtype('f4')) + + # check that nomask is preserved + a = np.zeros(4, dtype='f4') + m = np.ma.array(a) + m.dtype = np.dtype('f4,i4') + assert_equal(m.dtype, np.dtype('f4,i4')) + assert_equal(m._mask, np.ma.nomask) + + +class TestFillingValues: + + def test_check_on_scalar(self): + # Test _check_fill_value set to valid and invalid values + _check_fill_value = np.ma.core._check_fill_value + + fval = _check_fill_value(0, int) + assert_equal(fval, 0) + fval = _check_fill_value(None, int) + assert_equal(fval, default_fill_value(0)) + + fval = _check_fill_value(0, "|S3") + assert_equal(fval, b"0") + fval = _check_fill_value(None, "|S3") + assert_equal(fval, default_fill_value(b"camelot!")) + assert_raises(TypeError, _check_fill_value, 1e+20, int) + assert_raises(TypeError, _check_fill_value, 'stuff', int) + + def test_check_on_fields(self): + # Tests _check_fill_value with records + _check_fill_value = np.ma.core._check_fill_value + ndtype = [('a', int), ('b', float), ('c', "|S3")] + # A check on a list should return a single record + fval = _check_fill_value([-999, -12345678.9, "???"], ndtype) + assert_(isinstance(fval, ndarray)) + assert_equal(fval.item(), [-999, -12345678.9, b"???"]) + # A check on None should output the defaults + fval = _check_fill_value(None, ndtype) + assert_(isinstance(fval, ndarray)) + assert_equal(fval.item(), [default_fill_value(0), + default_fill_value(0.), + asbytes(default_fill_value("0"))]) + #.....Using a structured type as fill_value should work + fill_val = np.array((-999, -12345678.9, "???"), dtype=ndtype) + fval = _check_fill_value(fill_val, ndtype) + assert_(isinstance(fval, ndarray)) + assert_equal(fval.item(), [-999, -12345678.9, b"???"]) + + #.....Using a flexible type w/ a different type shouldn't matter + # BEHAVIOR in 1.5 and earlier, and 1.13 and later: match structured + # types by position + fill_val = np.array((-999, -12345678.9, "???"), + dtype=[("A", int), ("B", float), ("C", "|S3")]) + fval = _check_fill_value(fill_val, ndtype) + assert_(isinstance(fval, ndarray)) + assert_equal(fval.item(), [-999, -12345678.9, b"???"]) + + #.....Using an object-array shouldn't matter either + fill_val = np.ndarray(shape=(1,), dtype=object) + fill_val[0] = (-999, -12345678.9, b"???") + fval = _check_fill_value(fill_val, object) + assert_(isinstance(fval, ndarray)) + assert_equal(fval.item(), [-999, -12345678.9, b"???"]) + # NOTE: This test was never run properly as "fill_value" rather than + # "fill_val" was assigned. Written properly, it fails. + #fill_val = np.array((-999, -12345678.9, "???")) + #fval = _check_fill_value(fill_val, ndtype) + #assert_(isinstance(fval, ndarray)) + #assert_equal(fval.item(), [-999, -12345678.9, b"???"]) + #.....One-field-only flexible type should work as well + ndtype = [("a", int)] + fval = _check_fill_value(-999999999, ndtype) + assert_(isinstance(fval, ndarray)) + assert_equal(fval.item(), (-999999999,)) + + def test_fillvalue_conversion(self): + # Tests the behavior of fill_value during conversion + # We had a tailored comment to make sure special attributes are + # properly dealt with + a = array([b'3', b'4', b'5']) + a._optinfo.update({'comment': "updated!"}) + + b = array(a, dtype=int) + assert_equal(b._data, [3, 4, 5]) + assert_equal(b.fill_value, default_fill_value(0)) + + b = array(a, dtype=float) + assert_equal(b._data, [3, 4, 5]) + assert_equal(b.fill_value, default_fill_value(0.)) + + b = a.astype(int) + assert_equal(b._data, [3, 4, 5]) + assert_equal(b.fill_value, default_fill_value(0)) + assert_equal(b._optinfo['comment'], "updated!") + + b = a.astype([('a', '|S3')]) + assert_equal(b['a']._data, a._data) + assert_equal(b['a'].fill_value, a.fill_value) + + def test_default_fill_value(self): + # check all calling conventions + f1 = default_fill_value(1.) + f2 = default_fill_value(np.array(1.)) + f3 = default_fill_value(np.array(1.).dtype) + assert_equal(f1, f2) + assert_equal(f1, f3) + + def test_default_fill_value_structured(self): + fields = array([(1, 1, 1)], + dtype=[('i', int), ('s', '|S8'), ('f', float)]) + + f1 = default_fill_value(fields) + f2 = default_fill_value(fields.dtype) + expected = np.array((default_fill_value(0), + default_fill_value('0'), + default_fill_value(0.)), dtype=fields.dtype) + assert_equal(f1, expected) + assert_equal(f2, expected) + + def test_default_fill_value_void(self): + dt = np.dtype([('v', 'V7')]) + f = default_fill_value(dt) + assert_equal(f['v'], np.array(default_fill_value(dt['v']), dt['v'])) + + def test_fillvalue(self): + # Yet more fun with the fill_value + data = masked_array([1, 2, 3], fill_value=-999) + series = data[[0, 2, 1]] + assert_equal(series._fill_value, data._fill_value) + + mtype = [('f', float), ('s', '|S3')] + x = array([(1, 'a'), (2, 'b'), (pi, 'pi')], dtype=mtype) + x.fill_value = 999 + assert_equal(x.fill_value.item(), [999., b'999']) + assert_equal(x['f'].fill_value, 999) + assert_equal(x['s'].fill_value, b'999') + + x.fill_value = (9, '???') + assert_equal(x.fill_value.item(), (9, b'???')) + assert_equal(x['f'].fill_value, 9) + assert_equal(x['s'].fill_value, b'???') + + x = array([1, 2, 3.1]) + x.fill_value = 999 + assert_equal(np.asarray(x.fill_value).dtype, float) + assert_equal(x.fill_value, 999.) + assert_equal(x._fill_value, np.array(999.)) + + def test_subarray_fillvalue(self): + # gh-10483 test multi-field index fill value + fields = array([(1, 1, 1)], + dtype=[('i', int), ('s', '|S8'), ('f', float)]) + with suppress_warnings() as sup: + sup.filter(FutureWarning, "Numpy has detected") + subfields = fields[['i', 'f']] + assert_equal(tuple(subfields.fill_value), (999999, 1.e+20)) + # test comparison does not raise: + subfields[1:] == subfields[:-1] + + def test_fillvalue_exotic_dtype(self): + # Tests yet more exotic flexible dtypes + _check_fill_value = np.ma.core._check_fill_value + ndtype = [('i', int), ('s', '|S8'), ('f', float)] + control = np.array((default_fill_value(0), + default_fill_value('0'), + default_fill_value(0.),), + dtype=ndtype) + assert_equal(_check_fill_value(None, ndtype), control) + # The shape shouldn't matter + ndtype = [('f0', float, (2, 2))] + control = np.array((default_fill_value(0.),), + dtype=[('f0', float)]).astype(ndtype) + assert_equal(_check_fill_value(None, ndtype), control) + control = np.array((0,), dtype=[('f0', float)]).astype(ndtype) + assert_equal(_check_fill_value(0, ndtype), control) + + ndtype = np.dtype("int, (2,3)float, float") + control = np.array((default_fill_value(0), + default_fill_value(0.), + default_fill_value(0.),), + dtype="int, float, float").astype(ndtype) + test = _check_fill_value(None, ndtype) + assert_equal(test, control) + control = np.array((0, 0, 0), dtype="int, float, float").astype(ndtype) + assert_equal(_check_fill_value(0, ndtype), control) + # but when indexing, fill value should become scalar not tuple + # See issue #6723 + M = masked_array(control) + assert_equal(M["f1"].fill_value.ndim, 0) + + def test_fillvalue_datetime_timedelta(self): + # Test default fillvalue for datetime64 and timedelta64 types. + # See issue #4476, this would return '?' which would cause errors + # elsewhere + + for timecode in ("as", "fs", "ps", "ns", "us", "ms", "s", "m", + "h", "D", "W", "M", "Y"): + control = numpy.datetime64("NaT", timecode) + test = default_fill_value(numpy.dtype(" 0 + + # test different unary domains + sqrt(m) + log(m) + tan(m) + arcsin(m) + arccos(m) + arccosh(m) + + # test binary domains + divide(m, 2) + + # also check that allclose uses ma ufuncs, to avoid warning + allclose(m, 0.5) + + def test_masked_array_underflow(self): + x = np.arange(0, 3, 0.1) + X = np.ma.array(x) + with np.errstate(under="raise"): + X2 = X / 2.0 + np.testing.assert_array_equal(X2, x / 2) + +class TestMaskedArrayInPlaceArithmetic: + # Test MaskedArray Arithmetic + + def setup_method(self): + x = arange(10) + y = arange(10) + xm = arange(10) + xm[2] = masked + self.intdata = (x, y, xm) + self.floatdata = (x.astype(float), y.astype(float), xm.astype(float)) + self.othertypes = np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + self.othertypes = [np.dtype(_).type for _ in self.othertypes] + self.uint8data = ( + x.astype(np.uint8), + y.astype(np.uint8), + xm.astype(np.uint8) + ) + + def test_inplace_addition_scalar(self): + # Test of inplace additions + (x, y, xm) = self.intdata + xm[2] = masked + x += 1 + assert_equal(x, y + 1) + xm += 1 + assert_equal(xm, y + 1) + + (x, _, xm) = self.floatdata + id1 = x.data.ctypes.data + x += 1. + assert_(id1 == x.data.ctypes.data) + assert_equal(x, y + 1.) + + def test_inplace_addition_array(self): + # Test of inplace additions + (x, y, xm) = self.intdata + m = xm.mask + a = arange(10, dtype=np.int16) + a[-1] = masked + x += a + xm += a + assert_equal(x, y + a) + assert_equal(xm, y + a) + assert_equal(xm.mask, mask_or(m, a.mask)) + + def test_inplace_subtraction_scalar(self): + # Test of inplace subtractions + (x, y, xm) = self.intdata + x -= 1 + assert_equal(x, y - 1) + xm -= 1 + assert_equal(xm, y - 1) + + def test_inplace_subtraction_array(self): + # Test of inplace subtractions + (x, y, xm) = self.floatdata + m = xm.mask + a = arange(10, dtype=float) + a[-1] = masked + x -= a + xm -= a + assert_equal(x, y - a) + assert_equal(xm, y - a) + assert_equal(xm.mask, mask_or(m, a.mask)) + + def test_inplace_multiplication_scalar(self): + # Test of inplace multiplication + (x, y, xm) = self.floatdata + x *= 2.0 + assert_equal(x, y * 2) + xm *= 2.0 + assert_equal(xm, y * 2) + + def test_inplace_multiplication_array(self): + # Test of inplace multiplication + (x, y, xm) = self.floatdata + m = xm.mask + a = arange(10, dtype=float) + a[-1] = masked + x *= a + xm *= a + assert_equal(x, y * a) + assert_equal(xm, y * a) + assert_equal(xm.mask, mask_or(m, a.mask)) + + def test_inplace_division_scalar_int(self): + # Test of inplace division + (x, y, xm) = self.intdata + x = arange(10) * 2 + xm = arange(10) * 2 + xm[2] = masked + x //= 2 + assert_equal(x, y) + xm //= 2 + assert_equal(xm, y) + + def test_inplace_division_scalar_float(self): + # Test of inplace division + (x, y, xm) = self.floatdata + x /= 2.0 + assert_equal(x, y / 2.0) + xm /= arange(10) + assert_equal(xm, ones((10,))) + + def test_inplace_division_array_float(self): + # Test of inplace division + (x, y, xm) = self.floatdata + m = xm.mask + a = arange(10, dtype=float) + a[-1] = masked + x /= a + xm /= a + assert_equal(x, y / a) + assert_equal(xm, y / a) + assert_equal(xm.mask, mask_or(mask_or(m, a.mask), (a == 0))) + + def test_inplace_division_misc(self): + + x = [1., 1., 1., -2., pi / 2., 4., 5., -10., 10., 1., 2., 3.] + y = [5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.] + m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] + m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1] + xm = masked_array(x, mask=m1) + ym = masked_array(y, mask=m2) + + z = xm / ym + assert_equal(z._mask, [1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1]) + assert_equal(z._data, + [1., 1., 1., -1., -pi / 2., 4., 5., 1., 1., 1., 2., 3.]) + + xm = xm.copy() + xm /= ym + assert_equal(xm._mask, [1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1]) + assert_equal(z._data, + [1., 1., 1., -1., -pi / 2., 4., 5., 1., 1., 1., 2., 3.]) + + def test_datafriendly_add(self): + # Test keeping data w/ (inplace) addition + x = array([1, 2, 3], mask=[0, 0, 1]) + # Test add w/ scalar + xx = x + 1 + assert_equal(xx.data, [2, 3, 3]) + assert_equal(xx.mask, [0, 0, 1]) + # Test iadd w/ scalar + x += 1 + assert_equal(x.data, [2, 3, 3]) + assert_equal(x.mask, [0, 0, 1]) + # Test add w/ array + x = array([1, 2, 3], mask=[0, 0, 1]) + xx = x + array([1, 2, 3], mask=[1, 0, 0]) + assert_equal(xx.data, [1, 4, 3]) + assert_equal(xx.mask, [1, 0, 1]) + # Test iadd w/ array + x = array([1, 2, 3], mask=[0, 0, 1]) + x += array([1, 2, 3], mask=[1, 0, 0]) + assert_equal(x.data, [1, 4, 3]) + assert_equal(x.mask, [1, 0, 1]) + + def test_datafriendly_sub(self): + # Test keeping data w/ (inplace) subtraction + # Test sub w/ scalar + x = array([1, 2, 3], mask=[0, 0, 1]) + xx = x - 1 + assert_equal(xx.data, [0, 1, 3]) + assert_equal(xx.mask, [0, 0, 1]) + # Test isub w/ scalar + x = array([1, 2, 3], mask=[0, 0, 1]) + x -= 1 + assert_equal(x.data, [0, 1, 3]) + assert_equal(x.mask, [0, 0, 1]) + # Test sub w/ array + x = array([1, 2, 3], mask=[0, 0, 1]) + xx = x - array([1, 2, 3], mask=[1, 0, 0]) + assert_equal(xx.data, [1, 0, 3]) + assert_equal(xx.mask, [1, 0, 1]) + # Test isub w/ array + x = array([1, 2, 3], mask=[0, 0, 1]) + x -= array([1, 2, 3], mask=[1, 0, 0]) + assert_equal(x.data, [1, 0, 3]) + assert_equal(x.mask, [1, 0, 1]) + + def test_datafriendly_mul(self): + # Test keeping data w/ (inplace) multiplication + # Test mul w/ scalar + x = array([1, 2, 3], mask=[0, 0, 1]) + xx = x * 2 + assert_equal(xx.data, [2, 4, 3]) + assert_equal(xx.mask, [0, 0, 1]) + # Test imul w/ scalar + x = array([1, 2, 3], mask=[0, 0, 1]) + x *= 2 + assert_equal(x.data, [2, 4, 3]) + assert_equal(x.mask, [0, 0, 1]) + # Test mul w/ array + x = array([1, 2, 3], mask=[0, 0, 1]) + xx = x * array([10, 20, 30], mask=[1, 0, 0]) + assert_equal(xx.data, [1, 40, 3]) + assert_equal(xx.mask, [1, 0, 1]) + # Test imul w/ array + x = array([1, 2, 3], mask=[0, 0, 1]) + x *= array([10, 20, 30], mask=[1, 0, 0]) + assert_equal(x.data, [1, 40, 3]) + assert_equal(x.mask, [1, 0, 1]) + + def test_datafriendly_div(self): + # Test keeping data w/ (inplace) division + # Test div on scalar + x = array([1, 2, 3], mask=[0, 0, 1]) + xx = x / 2. + assert_equal(xx.data, [1 / 2., 2 / 2., 3]) + assert_equal(xx.mask, [0, 0, 1]) + # Test idiv on scalar + x = array([1., 2., 3.], mask=[0, 0, 1]) + x /= 2. + assert_equal(x.data, [1 / 2., 2 / 2., 3]) + assert_equal(x.mask, [0, 0, 1]) + # Test div on array + x = array([1., 2., 3.], mask=[0, 0, 1]) + xx = x / array([10., 20., 30.], mask=[1, 0, 0]) + assert_equal(xx.data, [1., 2. / 20., 3.]) + assert_equal(xx.mask, [1, 0, 1]) + # Test idiv on array + x = array([1., 2., 3.], mask=[0, 0, 1]) + x /= array([10., 20., 30.], mask=[1, 0, 0]) + assert_equal(x.data, [1., 2 / 20., 3.]) + assert_equal(x.mask, [1, 0, 1]) + + def test_datafriendly_pow(self): + # Test keeping data w/ (inplace) power + # Test pow on scalar + x = array([1., 2., 3.], mask=[0, 0, 1]) + xx = x ** 2.5 + assert_equal(xx.data, [1., 2. ** 2.5, 3.]) + assert_equal(xx.mask, [0, 0, 1]) + # Test ipow on scalar + x **= 2.5 + assert_equal(x.data, [1., 2. ** 2.5, 3]) + assert_equal(x.mask, [0, 0, 1]) + + def test_datafriendly_add_arrays(self): + a = array([[1, 1], [3, 3]]) + b = array([1, 1], mask=[0, 0]) + a += b + assert_equal(a, [[2, 2], [4, 4]]) + if a.mask is not nomask: + assert_equal(a.mask, [[0, 0], [0, 0]]) + + a = array([[1, 1], [3, 3]]) + b = array([1, 1], mask=[0, 1]) + a += b + assert_equal(a, [[2, 2], [4, 4]]) + assert_equal(a.mask, [[0, 1], [0, 1]]) + + def test_datafriendly_sub_arrays(self): + a = array([[1, 1], [3, 3]]) + b = array([1, 1], mask=[0, 0]) + a -= b + assert_equal(a, [[0, 0], [2, 2]]) + if a.mask is not nomask: + assert_equal(a.mask, [[0, 0], [0, 0]]) + + a = array([[1, 1], [3, 3]]) + b = array([1, 1], mask=[0, 1]) + a -= b + assert_equal(a, [[0, 0], [2, 2]]) + assert_equal(a.mask, [[0, 1], [0, 1]]) + + def test_datafriendly_mul_arrays(self): + a = array([[1, 1], [3, 3]]) + b = array([1, 1], mask=[0, 0]) + a *= b + assert_equal(a, [[1, 1], [3, 3]]) + if a.mask is not nomask: + assert_equal(a.mask, [[0, 0], [0, 0]]) + + a = array([[1, 1], [3, 3]]) + b = array([1, 1], mask=[0, 1]) + a *= b + assert_equal(a, [[1, 1], [3, 3]]) + assert_equal(a.mask, [[0, 1], [0, 1]]) + + def test_inplace_addition_scalar_type(self): + # Test of inplace additions + for t in self.othertypes: + with warnings.catch_warnings(): + warnings.filterwarnings("error") + (x, y, xm) = (_.astype(t) for _ in self.uint8data) + xm[2] = masked + x += t(1) + assert_equal(x, y + t(1)) + xm += t(1) + assert_equal(xm, y + t(1)) + + def test_inplace_addition_array_type(self): + # Test of inplace additions + for t in self.othertypes: + with warnings.catch_warnings(): + warnings.filterwarnings("error") + (x, y, xm) = (_.astype(t) for _ in self.uint8data) + m = xm.mask + a = arange(10, dtype=t) + a[-1] = masked + x += a + xm += a + assert_equal(x, y + a) + assert_equal(xm, y + a) + assert_equal(xm.mask, mask_or(m, a.mask)) + + def test_inplace_subtraction_scalar_type(self): + # Test of inplace subtractions + for t in self.othertypes: + with warnings.catch_warnings(): + warnings.filterwarnings("error") + (x, y, xm) = (_.astype(t) for _ in self.uint8data) + x -= t(1) + assert_equal(x, y - t(1)) + xm -= t(1) + assert_equal(xm, y - t(1)) + + def test_inplace_subtraction_array_type(self): + # Test of inplace subtractions + for t in self.othertypes: + with warnings.catch_warnings(): + warnings.filterwarnings("error") + (x, y, xm) = (_.astype(t) for _ in self.uint8data) + m = xm.mask + a = arange(10, dtype=t) + a[-1] = masked + x -= a + xm -= a + assert_equal(x, y - a) + assert_equal(xm, y - a) + assert_equal(xm.mask, mask_or(m, a.mask)) + + def test_inplace_multiplication_scalar_type(self): + # Test of inplace multiplication + for t in self.othertypes: + with warnings.catch_warnings(): + warnings.filterwarnings("error") + (x, y, xm) = (_.astype(t) for _ in self.uint8data) + x *= t(2) + assert_equal(x, y * t(2)) + xm *= t(2) + assert_equal(xm, y * t(2)) + + def test_inplace_multiplication_array_type(self): + # Test of inplace multiplication + for t in self.othertypes: + with warnings.catch_warnings(): + warnings.filterwarnings("error") + (x, y, xm) = (_.astype(t) for _ in self.uint8data) + m = xm.mask + a = arange(10, dtype=t) + a[-1] = masked + x *= a + xm *= a + assert_equal(x, y * a) + assert_equal(xm, y * a) + assert_equal(xm.mask, mask_or(m, a.mask)) + + def test_inplace_floor_division_scalar_type(self): + # Test of inplace division + # Check for TypeError in case of unsupported types + unsupported = {np.dtype(t).type for t in np.typecodes["Complex"]} + for t in self.othertypes: + with warnings.catch_warnings(): + warnings.filterwarnings("error") + (x, y, xm) = (_.astype(t) for _ in self.uint8data) + x = arange(10, dtype=t) * t(2) + xm = arange(10, dtype=t) * t(2) + xm[2] = masked + try: + x //= t(2) + xm //= t(2) + assert_equal(x, y) + assert_equal(xm, y) + except TypeError: + msg = f"Supported type {t} throwing TypeError" + assert t in unsupported, msg + + def test_inplace_floor_division_array_type(self): + # Test of inplace division + # Check for TypeError in case of unsupported types + unsupported = {np.dtype(t).type for t in np.typecodes["Complex"]} + for t in self.othertypes: + with warnings.catch_warnings(): + warnings.filterwarnings("error") + (x, y, xm) = (_.astype(t) for _ in self.uint8data) + m = xm.mask + a = arange(10, dtype=t) + a[-1] = masked + try: + x //= a + xm //= a + assert_equal(x, y // a) + assert_equal(xm, y // a) + assert_equal( + xm.mask, + mask_or(mask_or(m, a.mask), (a == t(0))) + ) + except TypeError: + msg = f"Supported type {t} throwing TypeError" + assert t in unsupported, msg + + def test_inplace_division_scalar_type(self): + # Test of inplace division + for t in self.othertypes: + with suppress_warnings() as sup: + sup.record(UserWarning) + + (x, y, xm) = (_.astype(t) for _ in self.uint8data) + x = arange(10, dtype=t) * t(2) + xm = arange(10, dtype=t) * t(2) + xm[2] = masked + + # May get a DeprecationWarning or a TypeError. + # + # This is a consequence of the fact that this is true divide + # and will require casting to float for calculation and + # casting back to the original type. This will only be raised + # with integers. Whether it is an error or warning is only + # dependent on how stringent the casting rules are. + # + # Will handle the same way. + try: + x /= t(2) + assert_equal(x, y) + except (DeprecationWarning, TypeError) as e: + warnings.warn(str(e), stacklevel=1) + try: + xm /= t(2) + assert_equal(xm, y) + except (DeprecationWarning, TypeError) as e: + warnings.warn(str(e), stacklevel=1) + + if issubclass(t, np.integer): + assert_equal(len(sup.log), 2, f'Failed on type={t}.') + else: + assert_equal(len(sup.log), 0, f'Failed on type={t}.') + + def test_inplace_division_array_type(self): + # Test of inplace division + for t in self.othertypes: + with suppress_warnings() as sup: + sup.record(UserWarning) + (x, y, xm) = (_.astype(t) for _ in self.uint8data) + m = xm.mask + a = arange(10, dtype=t) + a[-1] = masked + + # May get a DeprecationWarning or a TypeError. + # + # This is a consequence of the fact that this is true divide + # and will require casting to float for calculation and + # casting back to the original type. This will only be raised + # with integers. Whether it is an error or warning is only + # dependent on how stringent the casting rules are. + # + # Will handle the same way. + try: + x /= a + assert_equal(x, y / a) + except (DeprecationWarning, TypeError) as e: + warnings.warn(str(e), stacklevel=1) + try: + xm /= a + assert_equal(xm, y / a) + assert_equal( + xm.mask, + mask_or(mask_or(m, a.mask), (a == t(0))) + ) + except (DeprecationWarning, TypeError) as e: + warnings.warn(str(e), stacklevel=1) + + if issubclass(t, np.integer): + assert_equal(len(sup.log), 2, f'Failed on type={t}.') + else: + assert_equal(len(sup.log), 0, f'Failed on type={t}.') + + def test_inplace_pow_type(self): + # Test keeping data w/ (inplace) power + for t in self.othertypes: + with warnings.catch_warnings(): + warnings.filterwarnings("error") + # Test pow on scalar + x = array([1, 2, 3], mask=[0, 0, 1], dtype=t) + xx = x ** t(2) + xx_r = array([1, 2 ** 2, 3], mask=[0, 0, 1], dtype=t) + assert_equal(xx.data, xx_r.data) + assert_equal(xx.mask, xx_r.mask) + # Test ipow on scalar + x **= t(2) + assert_equal(x.data, xx_r.data) + assert_equal(x.mask, xx_r.mask) + + +class TestMaskedArrayMethods: + # Test class for miscellaneous MaskedArrays methods. + def setup_method(self): + # Base data definition. + x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928, + 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, + 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, + 6.04, 9.63, 7.712, 3.382, 4.489, 6.479, + 7.189, 9.645, 5.395, 4.961, 9.894, 2.893, + 7.357, 9.828, 6.272, 3.758, 6.693, 0.993]) + X = x.reshape(6, 6) + XX = x.reshape(3, 2, 2, 3) + + m = np.array([0, 1, 0, 1, 0, 0, + 1, 0, 1, 1, 0, 1, + 0, 0, 0, 1, 0, 1, + 0, 0, 0, 1, 1, 1, + 1, 0, 0, 1, 0, 0, + 0, 0, 1, 0, 1, 0]) + mx = array(data=x, mask=m) + mX = array(data=X, mask=m.reshape(X.shape)) + mXX = array(data=XX, mask=m.reshape(XX.shape)) + + m2 = np.array([1, 1, 0, 1, 0, 0, + 1, 1, 1, 1, 0, 1, + 0, 0, 1, 1, 0, 1, + 0, 0, 0, 1, 1, 1, + 1, 0, 0, 1, 1, 0, + 0, 0, 1, 0, 1, 1]) + m2x = array(data=x, mask=m2) + m2X = array(data=X, mask=m2.reshape(X.shape)) + m2XX = array(data=XX, mask=m2.reshape(XX.shape)) + self.d = (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) + + def test_generic_methods(self): + # Tests some MaskedArray methods. + a = array([1, 3, 2]) + assert_equal(a.any(), a._data.any()) + assert_equal(a.all(), a._data.all()) + assert_equal(a.argmax(), a._data.argmax()) + assert_equal(a.argmin(), a._data.argmin()) + assert_equal(a.choose(0, 1, 2, 3, 4), a._data.choose(0, 1, 2, 3, 4)) + assert_equal(a.compress([1, 0, 1]), a._data.compress([1, 0, 1])) + assert_equal(a.conj(), a._data.conj()) + assert_equal(a.conjugate(), a._data.conjugate()) + + m = array([[1, 2], [3, 4]]) + assert_equal(m.diagonal(), m._data.diagonal()) + assert_equal(a.sum(), a._data.sum()) + assert_equal(a.take([1, 2]), a._data.take([1, 2])) + assert_equal(m.transpose(), m._data.transpose()) + + def test_allclose(self): + # Tests allclose on arrays + a = np.random.rand(10) + b = a + np.random.rand(10) * 1e-8 + assert_(allclose(a, b)) + # Test allclose w/ infs + a[0] = np.inf + assert_(not allclose(a, b)) + b[0] = np.inf + assert_(allclose(a, b)) + # Test allclose w/ masked + a = masked_array(a) + a[-1] = masked + assert_(allclose(a, b, masked_equal=True)) + assert_(not allclose(a, b, masked_equal=False)) + # Test comparison w/ scalar + a *= 1e-8 + a[0] = 0 + assert_(allclose(a, 0, masked_equal=True)) + + # Test that the function works for MIN_INT integer typed arrays + a = masked_array([np.iinfo(np.int_).min], dtype=np.int_) + assert_(allclose(a, a)) + + def test_allclose_timedelta(self): + # Allclose currently works for timedelta64 as long as `atol` is + # an integer or also a timedelta64 + a = np.array([[1, 2, 3, 4]], dtype="m8[ns]") + assert allclose(a, a, atol=0) + assert allclose(a, a, atol=np.timedelta64(1, "ns")) + + def test_allany(self): + # Checks the any/all methods/functions. + x = np.array([[0.13, 0.26, 0.90], + [0.28, 0.33, 0.63], + [0.31, 0.87, 0.70]]) + m = np.array([[True, False, False], + [False, False, False], + [True, True, False]], dtype=np.bool) + mx = masked_array(x, mask=m) + mxbig = (mx > 0.5) + mxsmall = (mx < 0.5) + + assert_(not mxbig.all()) + assert_(mxbig.any()) + assert_equal(mxbig.all(0), [False, False, True]) + assert_equal(mxbig.all(1), [False, False, True]) + assert_equal(mxbig.any(0), [False, False, True]) + assert_equal(mxbig.any(1), [True, True, True]) + + assert_(not mxsmall.all()) + assert_(mxsmall.any()) + assert_equal(mxsmall.all(0), [True, True, False]) + assert_equal(mxsmall.all(1), [False, False, False]) + assert_equal(mxsmall.any(0), [True, True, False]) + assert_equal(mxsmall.any(1), [True, True, False]) + + def test_allany_oddities(self): + # Some fun with all and any + store = empty((), dtype=bool) + full = array([1, 2, 3], mask=True) + + assert_(full.all() is masked) + full.all(out=store) + assert_(store) + assert_(store._mask, True) + assert_(store is not masked) + + store = empty((), dtype=bool) + assert_(full.any() is masked) + full.any(out=store) + assert_(not store) + assert_(store._mask, True) + assert_(store is not masked) + + def test_argmax_argmin(self): + # Tests argmin & argmax on MaskedArrays. + (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d + + assert_equal(mx.argmin(), 35) + assert_equal(mX.argmin(), 35) + assert_equal(m2x.argmin(), 4) + assert_equal(m2X.argmin(), 4) + assert_equal(mx.argmax(), 28) + assert_equal(mX.argmax(), 28) + assert_equal(m2x.argmax(), 31) + assert_equal(m2X.argmax(), 31) + + assert_equal(mX.argmin(0), [2, 2, 2, 5, 0, 5]) + assert_equal(m2X.argmin(0), [2, 2, 4, 5, 0, 4]) + assert_equal(mX.argmax(0), [0, 5, 0, 5, 4, 0]) + assert_equal(m2X.argmax(0), [5, 5, 0, 5, 1, 0]) + + assert_equal(mX.argmin(1), [4, 1, 0, 0, 5, 5, ]) + assert_equal(m2X.argmin(1), [4, 4, 0, 0, 5, 3]) + assert_equal(mX.argmax(1), [2, 4, 1, 1, 4, 1]) + assert_equal(m2X.argmax(1), [2, 4, 1, 1, 1, 1]) + + def test_clip(self): + # Tests clip on MaskedArrays. + x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928, + 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, + 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, + 6.04, 9.63, 7.712, 3.382, 4.489, 6.479, + 7.189, 9.645, 5.395, 4.961, 9.894, 2.893, + 7.357, 9.828, 6.272, 3.758, 6.693, 0.993]) + m = np.array([0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, + 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, + 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0]) + mx = array(x, mask=m) + clipped = mx.clip(2, 8) + assert_equal(clipped.mask, mx.mask) + assert_equal(clipped._data, x.clip(2, 8)) + assert_equal(clipped._data, mx._data.clip(2, 8)) + + def test_clip_out(self): + # gh-14140 + a = np.arange(10) + m = np.ma.MaskedArray(a, mask=[0, 1] * 5) + m.clip(0, 5, out=m) + assert_equal(m.mask, [0, 1] * 5) + + def test_compress(self): + # test compress + a = masked_array([1., 2., 3., 4., 5.], fill_value=9999) + condition = (a > 1.5) & (a < 3.5) + assert_equal(a.compress(condition), [2., 3.]) + + a[[2, 3]] = masked + b = a.compress(condition) + assert_equal(b._data, [2., 3.]) + assert_equal(b._mask, [0, 1]) + assert_equal(b.fill_value, 9999) + assert_equal(b, a[condition]) + + condition = (a < 4.) + b = a.compress(condition) + assert_equal(b._data, [1., 2., 3.]) + assert_equal(b._mask, [0, 0, 1]) + assert_equal(b.fill_value, 9999) + assert_equal(b, a[condition]) + + a = masked_array([[10, 20, 30], [40, 50, 60]], + mask=[[0, 0, 1], [1, 0, 0]]) + b = a.compress(a.ravel() >= 22) + assert_equal(b._data, [30, 40, 50, 60]) + assert_equal(b._mask, [1, 1, 0, 0]) + + x = np.array([3, 1, 2]) + b = a.compress(x >= 2, axis=1) + assert_equal(b._data, [[10, 30], [40, 60]]) + assert_equal(b._mask, [[0, 1], [1, 0]]) + + def test_compressed(self): + # Tests compressed + a = array([1, 2, 3, 4], mask=[0, 0, 0, 0]) + b = a.compressed() + assert_equal(b, a) + a[0] = masked + b = a.compressed() + assert_equal(b, [2, 3, 4]) + + def test_empty(self): + # Tests empty/like + datatype = [('a', int), ('b', float), ('c', '|S8')] + a = masked_array([(1, 1.1, '1.1'), (2, 2.2, '2.2'), (3, 3.3, '3.3')], + dtype=datatype) + assert_equal(len(a.fill_value.item()), len(datatype)) + + b = empty_like(a) + assert_equal(b.shape, a.shape) + assert_equal(b.fill_value, a.fill_value) + + b = empty(len(a), dtype=datatype) + assert_equal(b.shape, a.shape) + assert_equal(b.fill_value, a.fill_value) + + # check empty_like mask handling + a = masked_array([1, 2, 3], mask=[False, True, False]) + b = empty_like(a) + assert_(not np.may_share_memory(a.mask, b.mask)) + b = a.view(masked_array) + assert_(np.may_share_memory(a.mask, b.mask)) + + def test_zeros(self): + # Tests zeros/like + datatype = [('a', int), ('b', float), ('c', '|S8')] + a = masked_array([(1, 1.1, '1.1'), (2, 2.2, '2.2'), (3, 3.3, '3.3')], + dtype=datatype) + assert_equal(len(a.fill_value.item()), len(datatype)) + + b = zeros(len(a), dtype=datatype) + assert_equal(b.shape, a.shape) + assert_equal(b.fill_value, a.fill_value) + + b = zeros_like(a) + assert_equal(b.shape, a.shape) + assert_equal(b.fill_value, a.fill_value) + + # check zeros_like mask handling + a = masked_array([1, 2, 3], mask=[False, True, False]) + b = zeros_like(a) + assert_(not np.may_share_memory(a.mask, b.mask)) + b = a.view() + assert_(np.may_share_memory(a.mask, b.mask)) + + def test_ones(self): + # Tests ones/like + datatype = [('a', int), ('b', float), ('c', '|S8')] + a = masked_array([(1, 1.1, '1.1'), (2, 2.2, '2.2'), (3, 3.3, '3.3')], + dtype=datatype) + assert_equal(len(a.fill_value.item()), len(datatype)) + + b = ones(len(a), dtype=datatype) + assert_equal(b.shape, a.shape) + assert_equal(b.fill_value, a.fill_value) + + b = ones_like(a) + assert_equal(b.shape, a.shape) + assert_equal(b.fill_value, a.fill_value) + + # check ones_like mask handling + a = masked_array([1, 2, 3], mask=[False, True, False]) + b = ones_like(a) + assert_(not np.may_share_memory(a.mask, b.mask)) + b = a.view() + assert_(np.may_share_memory(a.mask, b.mask)) + + @suppress_copy_mask_on_assignment + def test_put(self): + # Tests put. + d = arange(5) + n = [0, 0, 0, 1, 1] + m = make_mask(n) + x = array(d, mask=m) + assert_(x[3] is masked) + assert_(x[4] is masked) + x[[1, 4]] = [10, 40] + assert_(x[3] is masked) + assert_(x[4] is not masked) + assert_equal(x, [0, 10, 2, -1, 40]) + + x = masked_array(arange(10), mask=[1, 0, 0, 0, 0] * 2) + i = [0, 2, 4, 6] + x.put(i, [6, 4, 2, 0]) + assert_equal(x, asarray([6, 1, 4, 3, 2, 5, 0, 7, 8, 9, ])) + assert_equal(x.mask, [0, 0, 0, 0, 0, 1, 0, 0, 0, 0]) + x.put(i, masked_array([0, 2, 4, 6], [1, 0, 1, 0])) + assert_array_equal(x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, ]) + assert_equal(x.mask, [1, 0, 0, 0, 1, 1, 0, 0, 0, 0]) + + x = masked_array(arange(10), mask=[1, 0, 0, 0, 0] * 2) + put(x, i, [6, 4, 2, 0]) + assert_equal(x, asarray([6, 1, 4, 3, 2, 5, 0, 7, 8, 9, ])) + assert_equal(x.mask, [0, 0, 0, 0, 0, 1, 0, 0, 0, 0]) + put(x, i, masked_array([0, 2, 4, 6], [1, 0, 1, 0])) + assert_array_equal(x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, ]) + assert_equal(x.mask, [1, 0, 0, 0, 1, 1, 0, 0, 0, 0]) + + def test_put_nomask(self): + # GitHub issue 6425 + x = zeros(10) + z = array([3., -1.], mask=[False, True]) + + x.put([1, 2], z) + assert_(x[0] is not masked) + assert_equal(x[0], 0) + assert_(x[1] is not masked) + assert_equal(x[1], 3) + assert_(x[2] is masked) + assert_(x[3] is not masked) + assert_equal(x[3], 0) + + def test_put_hardmask(self): + # Tests put on hardmask + d = arange(5) + n = [0, 0, 0, 1, 1] + m = make_mask(n) + xh = array(d + 1, mask=m, hard_mask=True, copy=True) + xh.put([4, 2, 0, 1, 3], [1, 2, 3, 4, 5]) + assert_equal(xh._data, [3, 4, 2, 4, 5]) + + def test_putmask(self): + x = arange(6) + 1 + mx = array(x, mask=[0, 0, 0, 1, 1, 1]) + mask = [0, 0, 1, 0, 0, 1] + # w/o mask, w/o masked values + xx = x.copy() + putmask(xx, mask, 99) + assert_equal(xx, [1, 2, 99, 4, 5, 99]) + # w/ mask, w/o masked values + mxx = mx.copy() + putmask(mxx, mask, 99) + assert_equal(mxx._data, [1, 2, 99, 4, 5, 99]) + assert_equal(mxx._mask, [0, 0, 0, 1, 1, 0]) + # w/o mask, w/ masked values + values = array([10, 20, 30, 40, 50, 60], mask=[1, 1, 1, 0, 0, 0]) + xx = x.copy() + putmask(xx, mask, values) + assert_equal(xx._data, [1, 2, 30, 4, 5, 60]) + assert_equal(xx._mask, [0, 0, 1, 0, 0, 0]) + # w/ mask, w/ masked values + mxx = mx.copy() + putmask(mxx, mask, values) + assert_equal(mxx._data, [1, 2, 30, 4, 5, 60]) + assert_equal(mxx._mask, [0, 0, 1, 1, 1, 0]) + # w/ mask, w/ masked values + hardmask + mxx = mx.copy() + mxx.harden_mask() + putmask(mxx, mask, values) + assert_equal(mxx, [1, 2, 30, 4, 5, 60]) + + def test_ravel(self): + # Tests ravel + a = array([[1, 2, 3, 4, 5]], mask=[[0, 1, 0, 0, 0]]) + aravel = a.ravel() + assert_equal(aravel._mask.shape, aravel.shape) + a = array([0, 0], mask=[1, 1]) + aravel = a.ravel() + assert_equal(aravel._mask.shape, a.shape) + # Checks that small_mask is preserved + a = array([1, 2, 3, 4], mask=[0, 0, 0, 0], shrink=False) + assert_equal(a.ravel()._mask, [0, 0, 0, 0]) + # Test that the fill_value is preserved + a.fill_value = -99 + a.shape = (2, 2) + ar = a.ravel() + assert_equal(ar._mask, [0, 0, 0, 0]) + assert_equal(ar._data, [1, 2, 3, 4]) + assert_equal(ar.fill_value, -99) + # Test index ordering + assert_equal(a.ravel(order='C'), [1, 2, 3, 4]) + assert_equal(a.ravel(order='F'), [1, 3, 2, 4]) + + @pytest.mark.parametrize("order", "AKCF") + @pytest.mark.parametrize("data_order", "CF") + def test_ravel_order(self, order, data_order): + # Ravelling must ravel mask and data in the same order always to avoid + # misaligning the two in the ravel result. + arr = np.ones((5, 10), order=data_order) + arr[0, :] = 0 + mask = np.ones((10, 5), dtype=bool, order=data_order).T + mask[0, :] = False + x = array(arr, mask=mask) + assert x._data.flags.fnc != x._mask.flags.fnc + assert (x.filled(0) == 0).all() + raveled = x.ravel(order) + assert (raveled.filled(0) == 0).all() + + # NOTE: Can be wrong if arr order is neither C nor F and `order="K"` + assert_array_equal(arr.ravel(order), x.ravel(order)._data) + + def test_reshape(self): + # Tests reshape + x = arange(4) + x[0] = masked + y = x.reshape(2, 2) + assert_equal(y.shape, (2, 2,)) + assert_equal(y._mask.shape, (2, 2,)) + assert_equal(x.shape, (4,)) + assert_equal(x._mask.shape, (4,)) + + def test_sort(self): + # Test sort + x = array([1, 4, 2, 3], mask=[0, 1, 0, 0], dtype=np.uint8) + + sortedx = sort(x) + assert_equal(sortedx._data, [1, 2, 3, 4]) + assert_equal(sortedx._mask, [0, 0, 0, 1]) + + sortedx = sort(x, endwith=False) + assert_equal(sortedx._data, [4, 1, 2, 3]) + assert_equal(sortedx._mask, [1, 0, 0, 0]) + + x.sort() + assert_equal(x._data, [1, 2, 3, 4]) + assert_equal(x._mask, [0, 0, 0, 1]) + + x = array([1, 4, 2, 3], mask=[0, 1, 0, 0], dtype=np.uint8) + x.sort(endwith=False) + assert_equal(x._data, [4, 1, 2, 3]) + assert_equal(x._mask, [1, 0, 0, 0]) + + x = [1, 4, 2, 3] + sortedx = sort(x) + assert_(not isinstance(sorted, MaskedArray)) + + x = array([0, 1, -1, -2, 2], mask=nomask, dtype=np.int8) + sortedx = sort(x, endwith=False) + assert_equal(sortedx._data, [-2, -1, 0, 1, 2]) + x = array([0, 1, -1, -2, 2], mask=[0, 1, 0, 0, 1], dtype=np.int8) + sortedx = sort(x, endwith=False) + assert_equal(sortedx._data, [1, 2, -2, -1, 0]) + assert_equal(sortedx._mask, [1, 1, 0, 0, 0]) + + x = array([0, -1], dtype=np.int8) + sortedx = sort(x, kind="stable") + assert_equal(sortedx, array([-1, 0], dtype=np.int8)) + + def test_stable_sort(self): + x = array([1, 2, 3, 1, 2, 3], dtype=np.uint8) + expected = array([0, 3, 1, 4, 2, 5]) + computed = argsort(x, kind='stable') + assert_equal(computed, expected) + + def test_argsort_matches_sort(self): + x = array([1, 4, 2, 3], mask=[0, 1, 0, 0], dtype=np.uint8) + + for kwargs in [{}, + {"endwith": True}, + {"endwith": False}, + {"fill_value": 2}, + {"fill_value": 2, "endwith": True}, + {"fill_value": 2, "endwith": False}]: + sortedx = sort(x, **kwargs) + argsortedx = x[argsort(x, **kwargs)] + assert_equal(sortedx._data, argsortedx._data) + assert_equal(sortedx._mask, argsortedx._mask) + + def test_sort_2d(self): + # Check sort of 2D array. + # 2D array w/o mask + a = masked_array([[8, 4, 1], [2, 0, 9]]) + a.sort(0) + assert_equal(a, [[2, 0, 1], [8, 4, 9]]) + a = masked_array([[8, 4, 1], [2, 0, 9]]) + a.sort(1) + assert_equal(a, [[1, 4, 8], [0, 2, 9]]) + # 2D array w/mask + a = masked_array([[8, 4, 1], [2, 0, 9]], mask=[[1, 0, 0], [0, 0, 1]]) + a.sort(0) + assert_equal(a, [[2, 0, 1], [8, 4, 9]]) + assert_equal(a._mask, [[0, 0, 0], [1, 0, 1]]) + a = masked_array([[8, 4, 1], [2, 0, 9]], mask=[[1, 0, 0], [0, 0, 1]]) + a.sort(1) + assert_equal(a, [[1, 4, 8], [0, 2, 9]]) + assert_equal(a._mask, [[0, 0, 1], [0, 0, 1]]) + # 3D + a = masked_array([[[7, 8, 9], [4, 5, 6], [1, 2, 3]], + [[1, 2, 3], [7, 8, 9], [4, 5, 6]], + [[7, 8, 9], [1, 2, 3], [4, 5, 6]], + [[4, 5, 6], [1, 2, 3], [7, 8, 9]]]) + a[a % 4 == 0] = masked + am = a.copy() + an = a.filled(99) + am.sort(0) + an.sort(0) + assert_equal(am, an) + am = a.copy() + an = a.filled(99) + am.sort(1) + an.sort(1) + assert_equal(am, an) + am = a.copy() + an = a.filled(99) + am.sort(2) + an.sort(2) + assert_equal(am, an) + + def test_sort_flexible(self): + # Test sort on structured dtype. + a = array( + data=[(3, 3), (3, 2), (2, 2), (2, 1), (1, 0), (1, 1), (1, 2)], + mask=[(0, 0), (0, 1), (0, 0), (0, 0), (1, 0), (0, 0), (0, 0)], + dtype=[('A', int), ('B', int)]) + mask_last = array( + data=[(1, 1), (1, 2), (2, 1), (2, 2), (3, 3), (3, 2), (1, 0)], + mask=[(0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 1), (1, 0)], + dtype=[('A', int), ('B', int)]) + mask_first = array( + data=[(1, 0), (1, 1), (1, 2), (2, 1), (2, 2), (3, 2), (3, 3)], + mask=[(1, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 1), (0, 0)], + dtype=[('A', int), ('B', int)]) + + test = sort(a) + assert_equal(test, mask_last) + assert_equal(test.mask, mask_last.mask) + + test = sort(a, endwith=False) + assert_equal(test, mask_first) + assert_equal(test.mask, mask_first.mask) + + # Test sort on dtype with subarray (gh-8069) + # Just check that the sort does not error, structured array subarrays + # are treated as byte strings and that leads to differing behavior + # depending on endianness and `endwith`. + dt = np.dtype([('v', int, 2)]) + a = a.view(dt) + test = sort(a) + test = sort(a, endwith=False) + + def test_argsort(self): + # Test argsort + a = array([1, 5, 2, 4, 3], mask=[1, 0, 0, 1, 0]) + assert_equal(np.argsort(a), argsort(a)) + + def test_squeeze(self): + # Check squeeze + data = masked_array([[1, 2, 3]]) + assert_equal(data.squeeze(), [1, 2, 3]) + data = masked_array([[1, 2, 3]], mask=[[1, 1, 1]]) + assert_equal(data.squeeze(), [1, 2, 3]) + assert_equal(data.squeeze()._mask, [1, 1, 1]) + + # normal ndarrays return a view + arr = np.array([[1]]) + arr_sq = arr.squeeze() + assert_equal(arr_sq, 1) + arr_sq[...] = 2 + assert_equal(arr[0, 0], 2) + + # so maskedarrays should too + m_arr = masked_array([[1]], mask=True) + m_arr_sq = m_arr.squeeze() + assert_(m_arr_sq is not np.ma.masked) + assert_equal(m_arr_sq.mask, True) + m_arr_sq[...] = 2 + assert_equal(m_arr[0, 0], 2) + + def test_swapaxes(self): + # Tests swapaxes on MaskedArrays. + x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928, + 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, + 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, + 6.04, 9.63, 7.712, 3.382, 4.489, 6.479, + 7.189, 9.645, 5.395, 4.961, 9.894, 2.893, + 7.357, 9.828, 6.272, 3.758, 6.693, 0.993]) + m = np.array([0, 1, 0, 1, 0, 0, + 1, 0, 1, 1, 0, 1, + 0, 0, 0, 1, 0, 1, + 0, 0, 0, 1, 1, 1, + 1, 0, 0, 1, 0, 0, + 0, 0, 1, 0, 1, 0]) + mX = array(x, mask=m).reshape(6, 6) + mXX = mX.reshape(3, 2, 2, 3) + + mXswapped = mX.swapaxes(0, 1) + assert_equal(mXswapped[-1], mX[:, -1]) + + mXXswapped = mXX.swapaxes(0, 2) + assert_equal(mXXswapped.shape, (2, 2, 3, 3)) + + def test_take(self): + # Tests take + x = masked_array([10, 20, 30, 40], [0, 1, 0, 1]) + assert_equal(x.take([0, 0, 3]), masked_array([10, 10, 40], [0, 0, 1])) + assert_equal(x.take([0, 0, 3]), x[[0, 0, 3]]) + assert_equal(x.take([[0, 1], [0, 1]]), + masked_array([[10, 20], [10, 20]], [[0, 1], [0, 1]])) + + # assert_equal crashes when passed np.ma.mask + assert_(x[1] is np.ma.masked) + assert_(x.take(1) is np.ma.masked) + + x = array([[10, 20, 30], [40, 50, 60]], mask=[[0, 0, 1], [1, 0, 0, ]]) + assert_equal(x.take([0, 2], axis=1), + array([[10, 30], [40, 60]], mask=[[0, 1], [1, 0]])) + assert_equal(take(x, [0, 2], axis=1), + array([[10, 30], [40, 60]], mask=[[0, 1], [1, 0]])) + + def test_take_masked_indices(self): + # Test take w/ masked indices + a = np.array((40, 18, 37, 9, 22)) + indices = np.arange(3)[None, :] + np.arange(5)[:, None] + mindices = array(indices, mask=(indices >= len(a))) + # No mask + test = take(a, mindices, mode='clip') + ctrl = array([[40, 18, 37], + [18, 37, 9], + [37, 9, 22], + [9, 22, 22], + [22, 22, 22]]) + assert_equal(test, ctrl) + # Masked indices + test = take(a, mindices) + ctrl = array([[40, 18, 37], + [18, 37, 9], + [37, 9, 22], + [9, 22, 40], + [22, 40, 40]]) + ctrl[3, 2] = ctrl[4, 1] = ctrl[4, 2] = masked + assert_equal(test, ctrl) + assert_equal(test.mask, ctrl.mask) + # Masked input + masked indices + a = array((40, 18, 37, 9, 22), mask=(0, 1, 0, 0, 0)) + test = take(a, mindices) + ctrl[0, 1] = ctrl[1, 0] = masked + assert_equal(test, ctrl) + assert_equal(test.mask, ctrl.mask) + + def test_tolist(self): + # Tests to list + # ... on 1D + x = array(np.arange(12)) + x[[1, -2]] = masked + xlist = x.tolist() + assert_(xlist[1] is None) + assert_(xlist[-2] is None) + # ... on 2D + x.shape = (3, 4) + xlist = x.tolist() + ctrl = [[0, None, 2, 3], [4, 5, 6, 7], [8, 9, None, 11]] + assert_equal(xlist[0], [0, None, 2, 3]) + assert_equal(xlist[1], [4, 5, 6, 7]) + assert_equal(xlist[2], [8, 9, None, 11]) + assert_equal(xlist, ctrl) + # ... on structured array w/ masked records + x = array(list(zip([1, 2, 3], + [1.1, 2.2, 3.3], + ['one', 'two', 'thr'])), + dtype=[('a', int), ('b', float), ('c', '|S8')]) + x[-1] = masked + assert_equal(x.tolist(), + [(1, 1.1, b'one'), + (2, 2.2, b'two'), + (None, None, None)]) + # ... on structured array w/ masked fields + a = array([(1, 2,), (3, 4)], mask=[(0, 1), (0, 0)], + dtype=[('a', int), ('b', int)]) + test = a.tolist() + assert_equal(test, [[1, None], [3, 4]]) + # ... on mvoid + a = a[0] + test = a.tolist() + assert_equal(test, [1, None]) + + def test_tolist_specialcase(self): + # Test mvoid.tolist: make sure we return a standard Python object + a = array([(0, 1), (2, 3)], dtype=[('a', int), ('b', int)]) + # w/o mask: each entry is a np.void whose elements are standard Python + for entry in a: + for item in entry.tolist(): + assert_(not isinstance(item, np.generic)) + # w/ mask: each entry is a ma.void whose elements should be + # standard Python + a.mask[0] = (0, 1) + for entry in a: + for item in entry.tolist(): + assert_(not isinstance(item, np.generic)) + + def test_toflex(self): + # Test the conversion to records + data = arange(10) + record = data.toflex() + assert_equal(record['_data'], data._data) + assert_equal(record['_mask'], data._mask) + + data[[0, 1, 2, -1]] = masked + record = data.toflex() + assert_equal(record['_data'], data._data) + assert_equal(record['_mask'], data._mask) + + ndtype = [('i', int), ('s', '|S3'), ('f', float)] + data = array(list(zip(np.arange(10), + 'ABCDEFGHIJKLM', + np.random.rand(10))), + dtype=ndtype) + data[[0, 1, 2, -1]] = masked + record = data.toflex() + assert_equal(record['_data'], data._data) + assert_equal(record['_mask'], data._mask) + + ndtype = np.dtype("int, (2,3)float, float") + data = array(list(zip(np.arange(10), + np.random.rand(10), + np.random.rand(10))), + dtype=ndtype) + data[[0, 1, 2, -1]] = masked + record = data.toflex() + assert_equal_records(record['_data'], data._data) + assert_equal_records(record['_mask'], data._mask) + + def test_fromflex(self): + # Test the reconstruction of a masked_array from a record + a = array([1, 2, 3]) + test = fromflex(a.toflex()) + assert_equal(test, a) + assert_equal(test.mask, a.mask) + + a = array([1, 2, 3], mask=[0, 0, 1]) + test = fromflex(a.toflex()) + assert_equal(test, a) + assert_equal(test.mask, a.mask) + + a = array([(1, 1.), (2, 2.), (3, 3.)], mask=[(1, 0), (0, 0), (0, 1)], + dtype=[('A', int), ('B', float)]) + test = fromflex(a.toflex()) + assert_equal(test, a) + assert_equal(test.data, a.data) + + def test_arraymethod(self): + # Test a _arraymethod w/ n argument + marray = masked_array([[1, 2, 3, 4, 5]], mask=[0, 0, 1, 0, 0]) + control = masked_array([[1], [2], [3], [4], [5]], + mask=[0, 0, 1, 0, 0]) + assert_equal(marray.T, control) + assert_equal(marray.transpose(), control) + + assert_equal(MaskedArray.cumsum(marray.T, 0), control.cumsum(0)) + + def test_arraymethod_0d(self): + # gh-9430 + x = np.ma.array(42, mask=True) + assert_equal(x.T.mask, x.mask) + assert_equal(x.T.data, x.data) + + def test_transpose_view(self): + x = np.ma.array([[1, 2, 3], [4, 5, 6]]) + x[0, 1] = np.ma.masked + xt = x.T + + xt[1, 0] = 10 + xt[0, 1] = np.ma.masked + + assert_equal(x.data, xt.T.data) + assert_equal(x.mask, xt.T.mask) + + def test_diagonal_view(self): + x = np.ma.zeros((3, 3)) + x[0, 0] = 10 + x[1, 1] = np.ma.masked + x[2, 2] = 20 + xd = x.diagonal() + x[1, 1] = 15 + assert_equal(xd.mask, x.diagonal().mask) + assert_equal(xd.data, x.diagonal().data) + + +class TestMaskedArrayMathMethods: + + def setup_method(self): + # Base data definition. + x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928, + 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, + 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, + 6.04, 9.63, 7.712, 3.382, 4.489, 6.479, + 7.189, 9.645, 5.395, 4.961, 9.894, 2.893, + 7.357, 9.828, 6.272, 3.758, 6.693, 0.993]) + X = x.reshape(6, 6) + XX = x.reshape(3, 2, 2, 3) + + m = np.array([0, 1, 0, 1, 0, 0, + 1, 0, 1, 1, 0, 1, + 0, 0, 0, 1, 0, 1, + 0, 0, 0, 1, 1, 1, + 1, 0, 0, 1, 0, 0, + 0, 0, 1, 0, 1, 0]) + mx = array(data=x, mask=m) + mX = array(data=X, mask=m.reshape(X.shape)) + mXX = array(data=XX, mask=m.reshape(XX.shape)) + + m2 = np.array([1, 1, 0, 1, 0, 0, + 1, 1, 1, 1, 0, 1, + 0, 0, 1, 1, 0, 1, + 0, 0, 0, 1, 1, 1, + 1, 0, 0, 1, 1, 0, + 0, 0, 1, 0, 1, 1]) + m2x = array(data=x, mask=m2) + m2X = array(data=X, mask=m2.reshape(X.shape)) + m2XX = array(data=XX, mask=m2.reshape(XX.shape)) + self.d = (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) + + def test_cumsumprod(self): + # Tests cumsum & cumprod on MaskedArrays. + (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d + mXcp = mX.cumsum(0) + assert_equal(mXcp._data, mX.filled(0).cumsum(0)) + mXcp = mX.cumsum(1) + assert_equal(mXcp._data, mX.filled(0).cumsum(1)) + + mXcp = mX.cumprod(0) + assert_equal(mXcp._data, mX.filled(1).cumprod(0)) + mXcp = mX.cumprod(1) + assert_equal(mXcp._data, mX.filled(1).cumprod(1)) + + def test_cumsumprod_with_output(self): + # Tests cumsum/cumprod w/ output + xm = array(np.random.uniform(0, 10, 12)).reshape(3, 4) + xm[:, 0] = xm[0] = xm[-1, -1] = masked + + for funcname in ('cumsum', 'cumprod'): + npfunc = getattr(np, funcname) + xmmeth = getattr(xm, funcname) + + # A ndarray as explicit input + output = np.empty((3, 4), dtype=float) + output.fill(-9999) + result = npfunc(xm, axis=0, out=output) + # ... the result should be the given output + assert_(result is output) + assert_equal(result, xmmeth(axis=0, out=output)) + + output = empty((3, 4), dtype=int) + result = xmmeth(axis=0, out=output) + assert_(result is output) + + def test_ptp(self): + # Tests ptp on MaskedArrays. + (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d + (n, m) = X.shape + assert_equal(mx.ptp(), np.ptp(mx.compressed())) + rows = np.zeros(n, float) + cols = np.zeros(m, float) + for k in range(m): + cols[k] = np.ptp(mX[:, k].compressed()) + for k in range(n): + rows[k] = np.ptp(mX[k].compressed()) + assert_equal(mX.ptp(0), cols) + assert_equal(mX.ptp(1), rows) + + def test_add_object(self): + x = masked_array(['a', 'b'], mask=[1, 0], dtype=object) + y = x + 'x' + assert_equal(y[1], 'bx') + assert_(y.mask[0]) + + def test_sum_object(self): + # Test sum on object dtype + a = masked_array([1, 2, 3], mask=[1, 0, 0], dtype=object) + assert_equal(a.sum(), 5) + a = masked_array([[1, 2, 3], [4, 5, 6]], dtype=object) + assert_equal(a.sum(axis=0), [5, 7, 9]) + + def test_prod_object(self): + # Test prod on object dtype + a = masked_array([1, 2, 3], mask=[1, 0, 0], dtype=object) + assert_equal(a.prod(), 2 * 3) + a = masked_array([[1, 2, 3], [4, 5, 6]], dtype=object) + assert_equal(a.prod(axis=0), [4, 10, 18]) + + def test_meananom_object(self): + # Test mean/anom on object dtype + a = masked_array([1, 2, 3], dtype=object) + assert_equal(a.mean(), 2) + assert_equal(a.anom(), [-1, 0, 1]) + + def test_anom_shape(self): + a = masked_array([1, 2, 3]) + assert_equal(a.anom().shape, a.shape) + a.mask = True + assert_equal(a.anom().shape, a.shape) + assert_(np.ma.is_masked(a.anom())) + + def test_anom(self): + a = masked_array(np.arange(1, 7).reshape(2, 3)) + assert_almost_equal(a.anom(), + [[-2.5, -1.5, -0.5], [0.5, 1.5, 2.5]]) + assert_almost_equal(a.anom(axis=0), + [[-1.5, -1.5, -1.5], [1.5, 1.5, 1.5]]) + assert_almost_equal(a.anom(axis=1), + [[-1., 0., 1.], [-1., 0., 1.]]) + a.mask = [[0, 0, 1], [0, 1, 0]] + mval = -99 + assert_almost_equal(a.anom().filled(mval), + [[-2.25, -1.25, mval], [0.75, mval, 2.75]]) + assert_almost_equal(a.anom(axis=0).filled(mval), + [[-1.5, 0.0, mval], [1.5, mval, 0.0]]) + assert_almost_equal(a.anom(axis=1).filled(mval), + [[-0.5, 0.5, mval], [-1.0, mval, 1.0]]) + + def test_trace(self): + # Tests trace on MaskedArrays. + (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d + mXdiag = mX.diagonal() + assert_equal(mX.trace(), mX.diagonal().compressed().sum()) + assert_almost_equal(mX.trace(), + X.trace() - sum(mXdiag.mask * X.diagonal(), + axis=0)) + assert_equal(np.trace(mX), mX.trace()) + + # gh-5560 + arr = np.arange(2 * 4 * 4).reshape(2, 4, 4) + m_arr = np.ma.masked_array(arr, False) + assert_equal(arr.trace(axis1=1, axis2=2), m_arr.trace(axis1=1, axis2=2)) + + def test_dot(self): + # Tests dot on MaskedArrays. + (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d + fx = mx.filled(0) + r = mx.dot(mx) + assert_almost_equal(r.filled(0), fx.dot(fx)) + assert_(r.mask is nomask) + + fX = mX.filled(0) + r = mX.dot(mX) + assert_almost_equal(r.filled(0), fX.dot(fX)) + assert_(r.mask[1, 3]) + r1 = empty_like(r) + mX.dot(mX, out=r1) + assert_almost_equal(r, r1) + + mYY = mXX.swapaxes(-1, -2) + fXX, fYY = mXX.filled(0), mYY.filled(0) + r = mXX.dot(mYY) + assert_almost_equal(r.filled(0), fXX.dot(fYY)) + r1 = empty_like(r) + mXX.dot(mYY, out=r1) + assert_almost_equal(r, r1) + + def test_dot_shape_mismatch(self): + # regression test + x = masked_array([[1, 2], [3, 4]], mask=[[0, 1], [0, 0]]) + y = masked_array([[1, 2], [3, 4]], mask=[[0, 1], [0, 0]]) + z = masked_array([[0, 1], [3, 3]]) + x.dot(y, out=z) + assert_almost_equal(z.filled(0), [[1, 0], [15, 16]]) + assert_almost_equal(z.mask, [[0, 1], [0, 0]]) + + def test_varmean_nomask(self): + # gh-5769 + foo = array([1, 2, 3, 4], dtype='f8') + bar = array([1, 2, 3, 4], dtype='f8') + assert_equal(type(foo.mean()), np.float64) + assert_equal(type(foo.var()), np.float64) + assert (foo.mean() == bar.mean()) is np.bool(True) + + # check array type is preserved and out works + foo = array(np.arange(16).reshape((4, 4)), dtype='f8') + bar = empty(4, dtype='f4') + assert_equal(type(foo.mean(axis=1)), MaskedArray) + assert_equal(type(foo.var(axis=1)), MaskedArray) + assert_(foo.mean(axis=1, out=bar) is bar) + assert_(foo.var(axis=1, out=bar) is bar) + + def test_varstd(self): + # Tests var & std on MaskedArrays. + (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d + assert_almost_equal(mX.var(axis=None), mX.compressed().var()) + assert_almost_equal(mX.std(axis=None), mX.compressed().std()) + assert_almost_equal(mX.std(axis=None, ddof=1), + mX.compressed().std(ddof=1)) + assert_almost_equal(mX.var(axis=None, ddof=1), + mX.compressed().var(ddof=1)) + assert_equal(mXX.var(axis=3).shape, XX.var(axis=3).shape) + assert_equal(mX.var().shape, X.var().shape) + (mXvar0, mXvar1) = (mX.var(axis=0), mX.var(axis=1)) + assert_almost_equal(mX.var(axis=None, ddof=2), + mX.compressed().var(ddof=2)) + assert_almost_equal(mX.std(axis=None, ddof=2), + mX.compressed().std(ddof=2)) + for k in range(6): + assert_almost_equal(mXvar1[k], mX[k].compressed().var()) + assert_almost_equal(mXvar0[k], mX[:, k].compressed().var()) + assert_almost_equal(np.sqrt(mXvar0[k]), + mX[:, k].compressed().std()) + + @suppress_copy_mask_on_assignment + def test_varstd_specialcases(self): + # Test a special case for var + nout = np.array(-1, dtype=float) + mout = array(-1, dtype=float) + + x = array(arange(10), mask=True) + for methodname in ('var', 'std'): + method = getattr(x, methodname) + assert_(method() is masked) + assert_(method(0) is masked) + assert_(method(-1) is masked) + # Using a masked array as explicit output + method(out=mout) + assert_(mout is not masked) + assert_equal(mout.mask, True) + # Using a ndarray as explicit output + method(out=nout) + assert_(np.isnan(nout)) + + x = array(arange(10), mask=True) + x[-1] = 9 + for methodname in ('var', 'std'): + method = getattr(x, methodname) + assert_(method(ddof=1) is masked) + assert_(method(0, ddof=1) is masked) + assert_(method(-1, ddof=1) is masked) + # Using a masked array as explicit output + method(out=mout, ddof=1) + assert_(mout is not masked) + assert_equal(mout.mask, True) + # Using a ndarray as explicit output + method(out=nout, ddof=1) + assert_(np.isnan(nout)) + + def test_varstd_ddof(self): + a = array([[1, 1, 0], [1, 1, 0]], mask=[[0, 0, 1], [0, 0, 1]]) + test = a.std(axis=0, ddof=0) + assert_equal(test.filled(0), [0, 0, 0]) + assert_equal(test.mask, [0, 0, 1]) + test = a.std(axis=0, ddof=1) + assert_equal(test.filled(0), [0, 0, 0]) + assert_equal(test.mask, [0, 0, 1]) + test = a.std(axis=0, ddof=2) + assert_equal(test.filled(0), [0, 0, 0]) + assert_equal(test.mask, [1, 1, 1]) + + def test_diag(self): + # Test diag + x = arange(9).reshape((3, 3)) + x[1, 1] = masked + out = np.diag(x) + assert_equal(out, [0, 4, 8]) + out = diag(x) + assert_equal(out, [0, 4, 8]) + assert_equal(out.mask, [0, 1, 0]) + out = diag(out) + control = array([[0, 0, 0], [0, 4, 0], [0, 0, 8]], + mask=[[0, 0, 0], [0, 1, 0], [0, 0, 0]]) + assert_equal(out, control) + + def test_axis_methods_nomask(self): + # Test the combination nomask & methods w/ axis + a = array([[1, 2, 3], [4, 5, 6]]) + + assert_equal(a.sum(0), [5, 7, 9]) + assert_equal(a.sum(-1), [6, 15]) + assert_equal(a.sum(1), [6, 15]) + + assert_equal(a.prod(0), [4, 10, 18]) + assert_equal(a.prod(-1), [6, 120]) + assert_equal(a.prod(1), [6, 120]) + + assert_equal(a.min(0), [1, 2, 3]) + assert_equal(a.min(-1), [1, 4]) + assert_equal(a.min(1), [1, 4]) + + assert_equal(a.max(0), [4, 5, 6]) + assert_equal(a.max(-1), [3, 6]) + assert_equal(a.max(1), [3, 6]) + + @requires_memory(free_bytes=2 * 10000 * 1000 * 2) + def test_mean_overflow(self): + # Test overflow in masked arrays + # gh-20272 + a = masked_array(np.full((10000, 10000), 65535, dtype=np.uint16), + mask=np.zeros((10000, 10000))) + assert_equal(a.mean(), 65535.0) + + def test_diff_with_prepend(self): + # GH 22465 + x = np.array([1, 2, 2, 3, 4, 2, 1, 1]) + + a = np.ma.masked_equal(x[3:], value=2) + a_prep = np.ma.masked_equal(x[:3], value=2) + diff1 = np.ma.diff(a, prepend=a_prep, axis=0) + + b = np.ma.masked_equal(x, value=2) + diff2 = np.ma.diff(b, axis=0) + + assert_(np.ma.allequal(diff1, diff2)) + + def test_diff_with_append(self): + # GH 22465 + x = np.array([1, 2, 2, 3, 4, 2, 1, 1]) + + a = np.ma.masked_equal(x[:3], value=2) + a_app = np.ma.masked_equal(x[3:], value=2) + diff1 = np.ma.diff(a, append=a_app, axis=0) + + b = np.ma.masked_equal(x, value=2) + diff2 = np.ma.diff(b, axis=0) + + assert_(np.ma.allequal(diff1, diff2)) + + def test_diff_with_dim_0(self): + with pytest.raises( + ValueError, + match="diff requires input that is at least one dimensional" + ): + np.ma.diff(np.array(1)) + + def test_diff_with_n_0(self): + a = np.ma.masked_equal([1, 2, 2, 3, 4, 2, 1, 1], value=2) + diff = np.ma.diff(a, n=0, axis=0) + + assert_(np.ma.allequal(a, diff)) + + +class TestMaskedArrayMathMethodsComplex: + # Test class for miscellaneous MaskedArrays methods. + def setup_method(self): + # Base data definition. + x = np.array([8.375j, 7.545j, 8.828j, 8.5j, 1.757j, 5.928, + 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, + 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, + 6.04, 9.63, 7.712, 3.382, 4.489, 6.479j, + 7.189j, 9.645, 5.395, 4.961, 9.894, 2.893, + 7.357, 9.828, 6.272, 3.758, 6.693, 0.993j]) + X = x.reshape(6, 6) + XX = x.reshape(3, 2, 2, 3) + + m = np.array([0, 1, 0, 1, 0, 0, + 1, 0, 1, 1, 0, 1, + 0, 0, 0, 1, 0, 1, + 0, 0, 0, 1, 1, 1, + 1, 0, 0, 1, 0, 0, + 0, 0, 1, 0, 1, 0]) + mx = array(data=x, mask=m) + mX = array(data=X, mask=m.reshape(X.shape)) + mXX = array(data=XX, mask=m.reshape(XX.shape)) + + m2 = np.array([1, 1, 0, 1, 0, 0, + 1, 1, 1, 1, 0, 1, + 0, 0, 1, 1, 0, 1, + 0, 0, 0, 1, 1, 1, + 1, 0, 0, 1, 1, 0, + 0, 0, 1, 0, 1, 1]) + m2x = array(data=x, mask=m2) + m2X = array(data=X, mask=m2.reshape(X.shape)) + m2XX = array(data=XX, mask=m2.reshape(XX.shape)) + self.d = (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) + + def test_varstd(self): + # Tests var & std on MaskedArrays. + (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d + assert_almost_equal(mX.var(axis=None), mX.compressed().var()) + assert_almost_equal(mX.std(axis=None), mX.compressed().std()) + assert_equal(mXX.var(axis=3).shape, XX.var(axis=3).shape) + assert_equal(mX.var().shape, X.var().shape) + (mXvar0, mXvar1) = (mX.var(axis=0), mX.var(axis=1)) + assert_almost_equal(mX.var(axis=None, ddof=2), + mX.compressed().var(ddof=2)) + assert_almost_equal(mX.std(axis=None, ddof=2), + mX.compressed().std(ddof=2)) + for k in range(6): + assert_almost_equal(mXvar1[k], mX[k].compressed().var()) + assert_almost_equal(mXvar0[k], mX[:, k].compressed().var()) + assert_almost_equal(np.sqrt(mXvar0[k]), + mX[:, k].compressed().std()) + + +class TestMaskedArrayFunctions: + # Test class for miscellaneous functions. + + def setup_method(self): + x = np.array([1., 1., 1., -2., pi / 2.0, 4., 5., -10., 10., 1., 2., 3.]) + y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) + m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] + m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1] + xm = masked_array(x, mask=m1) + ym = masked_array(y, mask=m2) + xm.set_fill_value(1e+20) + self.info = (xm, ym) + + def test_masked_where_bool(self): + x = [1, 2] + y = masked_where(False, x) + assert_equal(y, [1, 2]) + assert_equal(y[1], 2) + + def test_masked_equal_wlist(self): + x = [1, 2, 3] + mx = masked_equal(x, 3) + assert_equal(mx, x) + assert_equal(mx._mask, [0, 0, 1]) + mx = masked_not_equal(x, 3) + assert_equal(mx, x) + assert_equal(mx._mask, [1, 1, 0]) + + def test_masked_equal_fill_value(self): + x = [1, 2, 3] + mx = masked_equal(x, 3) + assert_equal(mx._mask, [0, 0, 1]) + assert_equal(mx.fill_value, 3) + + def test_masked_where_condition(self): + # Tests masking functions. + x = array([1., 2., 3., 4., 5.]) + x[2] = masked + assert_equal(masked_where(greater(x, 2), x), masked_greater(x, 2)) + assert_equal(masked_where(greater_equal(x, 2), x), + masked_greater_equal(x, 2)) + assert_equal(masked_where(less(x, 2), x), masked_less(x, 2)) + assert_equal(masked_where(less_equal(x, 2), x), + masked_less_equal(x, 2)) + assert_equal(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2)) + assert_equal(masked_where(equal(x, 2), x), masked_equal(x, 2)) + assert_equal(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2)) + assert_equal(masked_where([1, 1, 0, 0, 0], [1, 2, 3, 4, 5]), + [99, 99, 3, 4, 5]) + + def test_masked_where_oddities(self): + # Tests some generic features. + atest = ones((10, 10, 10), dtype=float) + btest = zeros(atest.shape, MaskType) + ctest = masked_where(btest, atest) + assert_equal(atest, ctest) + + def test_masked_where_shape_constraint(self): + a = arange(10) + with assert_raises(IndexError): + masked_equal(1, a) + test = masked_equal(a, 1) + assert_equal(test.mask, [0, 1, 0, 0, 0, 0, 0, 0, 0, 0]) + + def test_masked_where_structured(self): + # test that masked_where on a structured array sets a structured + # mask (see issue #2972) + a = np.zeros(10, dtype=[("A", " 6, x) + + def test_masked_otherfunctions(self): + assert_equal(masked_inside(list(range(5)), 1, 3), + [0, 199, 199, 199, 4]) + assert_equal(masked_outside(list(range(5)), 1, 3), [199, 1, 2, 3, 199]) + assert_equal(masked_inside(array(list(range(5)), + mask=[1, 0, 0, 0, 0]), 1, 3).mask, + [1, 1, 1, 1, 0]) + assert_equal(masked_outside(array(list(range(5)), + mask=[0, 1, 0, 0, 0]), 1, 3).mask, + [1, 1, 0, 0, 1]) + assert_equal(masked_equal(array(list(range(5)), + mask=[1, 0, 0, 0, 0]), 2).mask, + [1, 0, 1, 0, 0]) + assert_equal(masked_not_equal(array([2, 2, 1, 2, 1], + mask=[1, 0, 0, 0, 0]), 2).mask, + [1, 0, 1, 0, 1]) + + def test_round(self): + a = array([1.23456, 2.34567, 3.45678, 4.56789, 5.67890], + mask=[0, 1, 0, 0, 0]) + assert_equal(a.round(), [1., 2., 3., 5., 6.]) + assert_equal(a.round(1), [1.2, 2.3, 3.5, 4.6, 5.7]) + assert_equal(a.round(3), [1.235, 2.346, 3.457, 4.568, 5.679]) + b = empty_like(a) + a.round(out=b) + assert_equal(b, [1., 2., 3., 5., 6.]) + + x = array([1., 2., 3., 4., 5.]) + c = array([1, 1, 1, 0, 0]) + x[2] = masked + z = where(c, x, -x) + assert_equal(z, [1., 2., 0., -4., -5]) + c[0] = masked + z = where(c, x, -x) + assert_equal(z, [1., 2., 0., -4., -5]) + assert_(z[0] is masked) + assert_(z[1] is not masked) + assert_(z[2] is masked) + + def test_round_with_output(self): + # Testing round with an explicit output + + xm = array(np.random.uniform(0, 10, 12)).reshape(3, 4) + xm[:, 0] = xm[0] = xm[-1, -1] = masked + + # A ndarray as explicit input + output = np.empty((3, 4), dtype=float) + output.fill(-9999) + result = np.round(xm, decimals=2, out=output) + # ... the result should be the given output + assert_(result is output) + assert_equal(result, xm.round(decimals=2, out=output)) + + output = empty((3, 4), dtype=float) + result = xm.round(decimals=2, out=output) + assert_(result is output) + + def test_round_with_scalar(self): + # Testing round with scalar/zero dimension input + # GH issue 2244 + a = array(1.1, mask=[False]) + assert_equal(a.round(), 1) + + a = array(1.1, mask=[True]) + assert_(a.round() is masked) + + a = array(1.1, mask=[False]) + output = np.empty(1, dtype=float) + output.fill(-9999) + a.round(out=output) + assert_equal(output, 1) + + a = array(1.1, mask=[False]) + output = array(-9999., mask=[True]) + a.round(out=output) + assert_equal(output[()], 1) + + a = array(1.1, mask=[True]) + output = array(-9999., mask=[False]) + a.round(out=output) + assert_(output[()] is masked) + + def test_identity(self): + a = identity(5) + assert_(isinstance(a, MaskedArray)) + assert_equal(a, np.identity(5)) + + def test_power(self): + x = -1.1 + assert_almost_equal(power(x, 2.), 1.21) + assert_(power(x, masked) is masked) + x = array([-1.1, -1.1, 1.1, 1.1, 0.]) + b = array([0.5, 2., 0.5, 2., -1.], mask=[0, 0, 0, 0, 1]) + y = power(x, b) + assert_almost_equal(y, [0, 1.21, 1.04880884817, 1.21, 0.]) + assert_equal(y._mask, [1, 0, 0, 0, 1]) + b.mask = nomask + y = power(x, b) + assert_equal(y._mask, [1, 0, 0, 0, 1]) + z = x ** b + assert_equal(z._mask, y._mask) + assert_almost_equal(z, y) + assert_almost_equal(z._data, y._data) + x **= b + assert_equal(x._mask, y._mask) + assert_almost_equal(x, y) + assert_almost_equal(x._data, y._data) + + def test_power_with_broadcasting(self): + # Test power w/ broadcasting + a2 = np.array([[1., 2., 3.], [4., 5., 6.]]) + a2m = array(a2, mask=[[1, 0, 0], [0, 0, 1]]) + b1 = np.array([2, 4, 3]) + b2 = np.array([b1, b1]) + b2m = array(b2, mask=[[0, 1, 0], [0, 1, 0]]) + + ctrl = array([[1 ** 2, 2 ** 4, 3 ** 3], [4 ** 2, 5 ** 4, 6 ** 3]], + mask=[[1, 1, 0], [0, 1, 1]]) + # No broadcasting, base & exp w/ mask + test = a2m ** b2m + assert_equal(test, ctrl) + assert_equal(test.mask, ctrl.mask) + # No broadcasting, base w/ mask, exp w/o mask + test = a2m ** b2 + assert_equal(test, ctrl) + assert_equal(test.mask, a2m.mask) + # No broadcasting, base w/o mask, exp w/ mask + test = a2 ** b2m + assert_equal(test, ctrl) + assert_equal(test.mask, b2m.mask) + + ctrl = array([[2 ** 2, 4 ** 4, 3 ** 3], [2 ** 2, 4 ** 4, 3 ** 3]], + mask=[[0, 1, 0], [0, 1, 0]]) + test = b1 ** b2m + assert_equal(test, ctrl) + assert_equal(test.mask, ctrl.mask) + test = b2m ** b1 + assert_equal(test, ctrl) + assert_equal(test.mask, ctrl.mask) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + def test_where(self): + # Test the where function + x = np.array([1., 1., 1., -2., pi / 2.0, 4., 5., -10., 10., 1., 2., 3.]) + y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) + m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] + m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1] + xm = masked_array(x, mask=m1) + ym = masked_array(y, mask=m2) + xm.set_fill_value(1e+20) + + d = where(xm > 2, xm, -9) + assert_equal(d, [-9., -9., -9., -9., -9., 4., + -9., -9., 10., -9., -9., 3.]) + assert_equal(d._mask, xm._mask) + d = where(xm > 2, -9, ym) + assert_equal(d, [5., 0., 3., 2., -1., -9., + -9., -10., -9., 1., 0., -9.]) + assert_equal(d._mask, [1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0]) + d = where(xm > 2, xm, masked) + assert_equal(d, [-9., -9., -9., -9., -9., 4., + -9., -9., 10., -9., -9., 3.]) + tmp = xm._mask.copy() + tmp[(xm <= 2).filled(True)] = True + assert_equal(d._mask, tmp) + + with np.errstate(invalid="warn"): + # The fill value is 1e20, it cannot be converted to `int`: + with pytest.warns(RuntimeWarning, match="invalid value"): + ixm = xm.astype(int) + d = where(ixm > 2, ixm, masked) + assert_equal(d, [-9, -9, -9, -9, -9, 4, -9, -9, 10, -9, -9, 3]) + assert_equal(d.dtype, ixm.dtype) + + def test_where_object(self): + a = np.array(None) + b = masked_array(None) + r = b.copy() + assert_equal(np.ma.where(True, a, a), r) + assert_equal(np.ma.where(True, b, b), r) + + def test_where_with_masked_choice(self): + x = arange(10) + x[3] = masked + c = x >= 8 + # Set False to masked + z = where(c, x, masked) + assert_(z.dtype is x.dtype) + assert_(z[3] is masked) + assert_(z[4] is masked) + assert_(z[7] is masked) + assert_(z[8] is not masked) + assert_(z[9] is not masked) + assert_equal(x, z) + # Set True to masked + z = where(c, masked, x) + assert_(z.dtype is x.dtype) + assert_(z[3] is masked) + assert_(z[4] is not masked) + assert_(z[7] is not masked) + assert_(z[8] is masked) + assert_(z[9] is masked) + + def test_where_with_masked_condition(self): + x = array([1., 2., 3., 4., 5.]) + c = array([1, 1, 1, 0, 0]) + x[2] = masked + z = where(c, x, -x) + assert_equal(z, [1., 2., 0., -4., -5]) + c[0] = masked + z = where(c, x, -x) + assert_equal(z, [1., 2., 0., -4., -5]) + assert_(z[0] is masked) + assert_(z[1] is not masked) + assert_(z[2] is masked) + + x = arange(1, 6) + x[-1] = masked + y = arange(1, 6) * 10 + y[2] = masked + c = array([1, 1, 1, 0, 0], mask=[1, 0, 0, 0, 0]) + cm = c.filled(1) + z = where(c, x, y) + zm = where(cm, x, y) + assert_equal(z, zm) + assert_(getmask(zm) is nomask) + assert_equal(zm, [1, 2, 3, 40, 50]) + z = where(c, masked, 1) + assert_equal(z, [99, 99, 99, 1, 1]) + z = where(c, 1, masked) + assert_equal(z, [99, 1, 1, 99, 99]) + + def test_where_type(self): + # Test the type conservation with where + x = np.arange(4, dtype=np.int32) + y = np.arange(4, dtype=np.float32) * 2.2 + test = where(x > 1.5, y, x).dtype + control = np.result_type(np.int32, np.float32) + assert_equal(test, control) + + def test_where_broadcast(self): + # Issue 8599 + x = np.arange(9).reshape(3, 3) + y = np.zeros(3) + core = np.where([1, 0, 1], x, y) + ma = where([1, 0, 1], x, y) + + assert_equal(core, ma) + assert_equal(core.dtype, ma.dtype) + + def test_where_structured(self): + # Issue 8600 + dt = np.dtype([('a', int), ('b', int)]) + x = np.array([(1, 2), (3, 4), (5, 6)], dtype=dt) + y = np.array((10, 20), dtype=dt) + core = np.where([0, 1, 1], x, y) + ma = np.where([0, 1, 1], x, y) + + assert_equal(core, ma) + assert_equal(core.dtype, ma.dtype) + + def test_where_structured_masked(self): + dt = np.dtype([('a', int), ('b', int)]) + x = np.array([(1, 2), (3, 4), (5, 6)], dtype=dt) + + ma = where([0, 1, 1], x, masked) + expected = masked_where([1, 0, 0], x) + + assert_equal(ma.dtype, expected.dtype) + assert_equal(ma, expected) + assert_equal(ma.mask, expected.mask) + + def test_masked_invalid_error(self): + a = np.arange(5, dtype=object) + a[3] = np.inf + a[2] = np.nan + with pytest.raises(TypeError, + match="not supported for the input types"): + np.ma.masked_invalid(a) + + def test_masked_invalid_pandas(self): + # getdata() used to be bad for pandas series due to its _data + # attribute. This test is a regression test mainly and may be + # removed if getdata() is adjusted. + class Series: + _data = "nonsense" + + def __array__(self, dtype=None, copy=None): + return np.array([5, np.nan, np.inf]) + + arr = np.ma.masked_invalid(Series()) + assert_array_equal(arr._data, np.array(Series())) + assert_array_equal(arr._mask, [False, True, True]) + + @pytest.mark.parametrize("copy", [True, False]) + def test_masked_invalid_full_mask(self, copy): + # Matplotlib relied on masked_invalid always returning a full mask + # (Also astropy projects, but were ok with it gh-22720 and gh-22842) + a = np.ma.array([1, 2, 3, 4]) + assert a._mask is nomask + res = np.ma.masked_invalid(a, copy=copy) + assert res.mask is not nomask + # mask of a should not be mutated + assert a.mask is nomask + assert np.may_share_memory(a._data, res._data) != copy + + def test_choose(self): + # Test choose + choices = [[0, 1, 2, 3], [10, 11, 12, 13], + [20, 21, 22, 23], [30, 31, 32, 33]] + chosen = choose([2, 3, 1, 0], choices) + assert_equal(chosen, array([20, 31, 12, 3])) + chosen = choose([2, 4, 1, 0], choices, mode='clip') + assert_equal(chosen, array([20, 31, 12, 3])) + chosen = choose([2, 4, 1, 0], choices, mode='wrap') + assert_equal(chosen, array([20, 1, 12, 3])) + # Check with some masked indices + indices_ = array([2, 4, 1, 0], mask=[1, 0, 0, 1]) + chosen = choose(indices_, choices, mode='wrap') + assert_equal(chosen, array([99, 1, 12, 99])) + assert_equal(chosen.mask, [1, 0, 0, 1]) + # Check with some masked choices + choices = array(choices, mask=[[0, 0, 0, 1], [1, 1, 0, 1], + [1, 0, 0, 0], [0, 0, 0, 0]]) + indices_ = [2, 3, 1, 0] + chosen = choose(indices_, choices, mode='wrap') + assert_equal(chosen, array([20, 31, 12, 3])) + assert_equal(chosen.mask, [1, 0, 0, 1]) + + def test_choose_with_out(self): + # Test choose with an explicit out keyword + choices = [[0, 1, 2, 3], [10, 11, 12, 13], + [20, 21, 22, 23], [30, 31, 32, 33]] + store = empty(4, dtype=int) + chosen = choose([2, 3, 1, 0], choices, out=store) + assert_equal(store, array([20, 31, 12, 3])) + assert_(store is chosen) + # Check with some masked indices + out + store = empty(4, dtype=int) + indices_ = array([2, 3, 1, 0], mask=[1, 0, 0, 1]) + chosen = choose(indices_, choices, mode='wrap', out=store) + assert_equal(store, array([99, 31, 12, 99])) + assert_equal(store.mask, [1, 0, 0, 1]) + # Check with some masked choices + out ina ndarray ! + choices = array(choices, mask=[[0, 0, 0, 1], [1, 1, 0, 1], + [1, 0, 0, 0], [0, 0, 0, 0]]) + indices_ = [2, 3, 1, 0] + store = empty(4, dtype=int).view(ndarray) + chosen = choose(indices_, choices, mode='wrap', out=store) + assert_equal(store, array([999999, 31, 12, 999999])) + + def test_reshape(self): + a = arange(10) + a[0] = masked + # Try the default + b = a.reshape((5, 2)) + assert_equal(b.shape, (5, 2)) + assert_(b.flags['C']) + # Try w/ arguments as list instead of tuple + b = a.reshape(5, 2) + assert_equal(b.shape, (5, 2)) + assert_(b.flags['C']) + # Try w/ order + b = a.reshape((5, 2), order='F') + assert_equal(b.shape, (5, 2)) + assert_(b.flags['F']) + # Try w/ order + b = a.reshape(5, 2, order='F') + assert_equal(b.shape, (5, 2)) + assert_(b.flags['F']) + + c = np.reshape(a, (2, 5)) + assert_(isinstance(c, MaskedArray)) + assert_equal(c.shape, (2, 5)) + assert_(c[0, 0] is masked) + assert_(c.flags['C']) + + def test_make_mask_descr(self): + # Flexible + ntype = [('a', float), ('b', float)] + test = make_mask_descr(ntype) + assert_equal(test, [('a', bool), ('b', bool)]) + assert_(test is make_mask_descr(test)) + + # Standard w/ shape + ntype = (float, 2) + test = make_mask_descr(ntype) + assert_equal(test, (bool, 2)) + assert_(test is make_mask_descr(test)) + + # Standard standard + ntype = float + test = make_mask_descr(ntype) + assert_equal(test, np.dtype(bool)) + assert_(test is make_mask_descr(test)) + + # Nested + ntype = [('a', float), ('b', [('ba', float), ('bb', float)])] + test = make_mask_descr(ntype) + control = np.dtype([('a', 'b1'), ('b', [('ba', 'b1'), ('bb', 'b1')])]) + assert_equal(test, control) + assert_(test is make_mask_descr(test)) + + # Named+ shape + ntype = [('a', (float, 2))] + test = make_mask_descr(ntype) + assert_equal(test, np.dtype([('a', (bool, 2))])) + assert_(test is make_mask_descr(test)) + + # 2 names + ntype = [(('A', 'a'), float)] + test = make_mask_descr(ntype) + assert_equal(test, np.dtype([(('A', 'a'), bool)])) + assert_(test is make_mask_descr(test)) + + # nested boolean types should preserve identity + base_type = np.dtype([('a', int, 3)]) + base_mtype = make_mask_descr(base_type) + sub_type = np.dtype([('a', int), ('b', base_mtype)]) + test = make_mask_descr(sub_type) + assert_equal(test, np.dtype([('a', bool), ('b', [('a', bool, 3)])])) + assert_(test.fields['b'][0] is base_mtype) + + def test_make_mask(self): + # Test make_mask + # w/ a list as an input + mask = [0, 1] + test = make_mask(mask) + assert_equal(test.dtype, MaskType) + assert_equal(test, [0, 1]) + # w/ a ndarray as an input + mask = np.array([0, 1], dtype=bool) + test = make_mask(mask) + assert_equal(test.dtype, MaskType) + assert_equal(test, [0, 1]) + # w/ a flexible-type ndarray as an input - use default + mdtype = [('a', bool), ('b', bool)] + mask = np.array([(0, 0), (0, 1)], dtype=mdtype) + test = make_mask(mask) + assert_equal(test.dtype, MaskType) + assert_equal(test, [1, 1]) + # w/ a flexible-type ndarray as an input - use input dtype + mdtype = [('a', bool), ('b', bool)] + mask = np.array([(0, 0), (0, 1)], dtype=mdtype) + test = make_mask(mask, dtype=mask.dtype) + assert_equal(test.dtype, mdtype) + assert_equal(test, mask) + # w/ a flexible-type ndarray as an input - use input dtype + mdtype = [('a', float), ('b', float)] + bdtype = [('a', bool), ('b', bool)] + mask = np.array([(0, 0), (0, 1)], dtype=mdtype) + test = make_mask(mask, dtype=mask.dtype) + assert_equal(test.dtype, bdtype) + assert_equal(test, np.array([(0, 0), (0, 1)], dtype=bdtype)) + # Ensure this also works for void + mask = np.array((False, True), dtype='?,?')[()] + assert_(isinstance(mask, np.void)) + test = make_mask(mask, dtype=mask.dtype) + assert_equal(test, mask) + assert_(test is not mask) + mask = np.array((0, 1), dtype='i4,i4')[()] + test2 = make_mask(mask, dtype=mask.dtype) + assert_equal(test2, test) + # test that nomask is returned when m is nomask. + bools = [True, False] + dtypes = [MaskType, float] + msgformat = 'copy=%s, shrink=%s, dtype=%s' + for cpy, shr, dt in itertools.product(bools, bools, dtypes): + res = make_mask(nomask, copy=cpy, shrink=shr, dtype=dt) + assert_(res is nomask, msgformat % (cpy, shr, dt)) + + def test_mask_or(self): + # Initialize + mtype = [('a', bool), ('b', bool)] + mask = np.array([(0, 0), (0, 1), (1, 0), (0, 0)], dtype=mtype) + # Test using nomask as input + test = mask_or(mask, nomask) + assert_equal(test, mask) + test = mask_or(nomask, mask) + assert_equal(test, mask) + # Using False as input + test = mask_or(mask, False) + assert_equal(test, mask) + # Using another array w / the same dtype + other = np.array([(0, 1), (0, 1), (0, 1), (0, 1)], dtype=mtype) + test = mask_or(mask, other) + control = np.array([(0, 1), (0, 1), (1, 1), (0, 1)], dtype=mtype) + assert_equal(test, control) + # Using another array w / a different dtype + othertype = [('A', bool), ('B', bool)] + other = np.array([(0, 1), (0, 1), (0, 1), (0, 1)], dtype=othertype) + try: + test = mask_or(mask, other) + except ValueError: + pass + # Using nested arrays + dtype = [('a', bool), ('b', [('ba', bool), ('bb', bool)])] + amask = np.array([(0, (1, 0)), (0, (1, 0))], dtype=dtype) + bmask = np.array([(1, (0, 1)), (0, (0, 0))], dtype=dtype) + cntrl = np.array([(1, (1, 1)), (0, (1, 0))], dtype=dtype) + assert_equal(mask_or(amask, bmask), cntrl) + + a = np.array([False, False]) + assert mask_or(a, a) is nomask # gh-27360 + + def test_allequal(self): + x = array([1, 2, 3], mask=[0, 0, 0]) + y = array([1, 2, 3], mask=[1, 0, 0]) + z = array([[1, 2, 3], [4, 5, 6]], mask=[[0, 0, 0], [1, 1, 1]]) + + assert allequal(x, y) + assert not allequal(x, y, fill_value=False) + assert allequal(x, z) + + # test allequal for the same input, with mask=nomask, this test is for + # the scenario raised in https://github.com/numpy/numpy/issues/27201 + assert allequal(x, x) + assert allequal(x, x, fill_value=False) + + assert allequal(y, y) + assert not allequal(y, y, fill_value=False) + + def test_flatten_mask(self): + # Tests flatten mask + # Standard dtype + mask = np.array([0, 0, 1], dtype=bool) + assert_equal(flatten_mask(mask), mask) + # Flexible dtype + mask = np.array([(0, 0), (0, 1)], dtype=[('a', bool), ('b', bool)]) + test = flatten_mask(mask) + control = np.array([0, 0, 0, 1], dtype=bool) + assert_equal(test, control) + + mdtype = [('a', bool), ('b', [('ba', bool), ('bb', bool)])] + data = [(0, (0, 0)), (0, (0, 1))] + mask = np.array(data, dtype=mdtype) + test = flatten_mask(mask) + control = np.array([0, 0, 0, 0, 0, 1], dtype=bool) + assert_equal(test, control) + + def test_on_ndarray(self): + # Test functions on ndarrays + a = np.array([1, 2, 3, 4]) + m = array(a, mask=False) + test = anom(a) + assert_equal(test, m.anom()) + test = reshape(a, (2, 2)) + assert_equal(test, m.reshape(2, 2)) + + def test_compress(self): + # Test compress function on ndarray and masked array + # Address Github #2495. + arr = np.arange(8) + arr.shape = 4, 2 + cond = np.array([True, False, True, True]) + control = arr[[0, 2, 3]] + test = np.ma.compress(cond, arr, axis=0) + assert_equal(test, control) + marr = np.ma.array(arr) + test = np.ma.compress(cond, marr, axis=0) + assert_equal(test, control) + + def test_compressed(self): + # Test ma.compressed function. + # Address gh-4026 + a = np.ma.array([1, 2]) + test = np.ma.compressed(a) + assert_(type(test) is np.ndarray) + + # Test case when input data is ndarray subclass + class A(np.ndarray): + pass + + a = np.ma.array(A(shape=0)) + test = np.ma.compressed(a) + assert_(type(test) is A) + + # Test that compress flattens + test = np.ma.compressed([[1], [2]]) + assert_equal(test.ndim, 1) + test = np.ma.compressed([[[[[1]]]]]) + assert_equal(test.ndim, 1) + + # Test case when input is MaskedArray subclass + class M(MaskedArray): + pass + + test = np.ma.compressed(M([[[]], [[]]])) + assert_equal(test.ndim, 1) + + # with .compressed() overridden + class M(MaskedArray): + def compressed(self): + return 42 + + test = np.ma.compressed(M([[[]], [[]]])) + assert_equal(test, 42) + + def test_convolve(self): + a = masked_equal(np.arange(5), 2) + b = np.array([1, 1]) + + result = masked_equal([0, 1, -1, -1, 7, 4], -1) + test = np.ma.convolve(a, b, mode='full') + assert_equal(test, result) + + test = np.ma.convolve(a, b, mode='same') + assert_equal(test, result[:-1]) + + test = np.ma.convolve(a, b, mode='valid') + assert_equal(test, result[1:-1]) + + result = masked_equal([0, 1, 1, 3, 7, 4], -1) + test = np.ma.convolve(a, b, mode='full', propagate_mask=False) + assert_equal(test, result) + + test = np.ma.convolve(a, b, mode='same', propagate_mask=False) + assert_equal(test, result[:-1]) + + test = np.ma.convolve(a, b, mode='valid', propagate_mask=False) + assert_equal(test, result[1:-1]) + + test = np.ma.convolve([1, 1], [1, 1, 1]) + assert_equal(test, masked_equal([1, 2, 2, 1], -1)) + + a = [1, 1] + b = masked_equal([1, -1, -1, 1], -1) + test = np.ma.convolve(a, b, propagate_mask=False) + assert_equal(test, masked_equal([1, 1, -1, 1, 1], -1)) + test = np.ma.convolve(a, b, propagate_mask=True) + assert_equal(test, masked_equal([-1, -1, -1, -1, -1], -1)) + + +class TestMaskedFields: + + def setup_method(self): + ilist = [1, 2, 3, 4, 5] + flist = [1.1, 2.2, 3.3, 4.4, 5.5] + slist = ['one', 'two', 'three', 'four', 'five'] + ddtype = [('a', int), ('b', float), ('c', '|S8')] + mdtype = [('a', bool), ('b', bool), ('c', bool)] + mask = [0, 1, 0, 0, 1] + base = array(list(zip(ilist, flist, slist)), mask=mask, dtype=ddtype) + self.data = {"base": base, "mask": mask, "ddtype": ddtype, "mdtype": mdtype} + + def test_set_records_masks(self): + base = self.data['base'] + mdtype = self.data['mdtype'] + # Set w/ nomask or masked + base.mask = nomask + assert_equal_records(base._mask, np.zeros(base.shape, dtype=mdtype)) + base.mask = masked + assert_equal_records(base._mask, np.ones(base.shape, dtype=mdtype)) + # Set w/ simple boolean + base.mask = False + assert_equal_records(base._mask, np.zeros(base.shape, dtype=mdtype)) + base.mask = True + assert_equal_records(base._mask, np.ones(base.shape, dtype=mdtype)) + # Set w/ list + base.mask = [0, 0, 0, 1, 1] + assert_equal_records(base._mask, + np.array([(x, x, x) for x in [0, 0, 0, 1, 1]], + dtype=mdtype)) + + def test_set_record_element(self): + # Check setting an element of a record) + base = self.data['base'] + (base_a, base_b, base_c) = (base['a'], base['b'], base['c']) + base[0] = (pi, pi, 'pi') + + assert_equal(base_a.dtype, int) + assert_equal(base_a._data, [3, 2, 3, 4, 5]) + + assert_equal(base_b.dtype, float) + assert_equal(base_b._data, [pi, 2.2, 3.3, 4.4, 5.5]) + + assert_equal(base_c.dtype, '|S8') + assert_equal(base_c._data, + [b'pi', b'two', b'three', b'four', b'five']) + + def test_set_record_slice(self): + base = self.data['base'] + (base_a, base_b, base_c) = (base['a'], base['b'], base['c']) + base[:3] = (pi, pi, 'pi') + + assert_equal(base_a.dtype, int) + assert_equal(base_a._data, [3, 3, 3, 4, 5]) + + assert_equal(base_b.dtype, float) + assert_equal(base_b._data, [pi, pi, pi, 4.4, 5.5]) + + assert_equal(base_c.dtype, '|S8') + assert_equal(base_c._data, + [b'pi', b'pi', b'pi', b'four', b'five']) + + def test_mask_element(self): + "Check record access" + base = self.data['base'] + base[0] = masked + + for n in ('a', 'b', 'c'): + assert_equal(base[n].mask, [1, 1, 0, 0, 1]) + assert_equal(base[n]._data, base._data[n]) + + def test_getmaskarray(self): + # Test getmaskarray on flexible dtype + ndtype = [('a', int), ('b', float)] + test = empty(3, dtype=ndtype) + assert_equal(getmaskarray(test), + np.array([(0, 0), (0, 0), (0, 0)], + dtype=[('a', '|b1'), ('b', '|b1')])) + test[:] = masked + assert_equal(getmaskarray(test), + np.array([(1, 1), (1, 1), (1, 1)], + dtype=[('a', '|b1'), ('b', '|b1')])) + + def test_view(self): + # Test view w/ flexible dtype + iterator = list(zip(np.arange(10), np.random.rand(10))) + data = np.array(iterator) + a = array(iterator, dtype=[('a', float), ('b', float)]) + a.mask[0] = (1, 0) + controlmask = np.array([1] + 19 * [0], dtype=bool) + # Transform globally to simple dtype + test = a.view(float) + assert_equal(test, data.ravel()) + assert_equal(test.mask, controlmask) + # Transform globally to dty + test = a.view((float, 2)) + assert_equal(test, data) + assert_equal(test.mask, controlmask.reshape(-1, 2)) + + def test_getitem(self): + ndtype = [('a', float), ('b', float)] + a = array(list(zip(np.random.rand(10), np.arange(10))), dtype=ndtype) + a.mask = np.array(list(zip([0, 0, 0, 0, 0, 0, 0, 0, 1, 1], + [1, 0, 0, 0, 0, 0, 0, 0, 1, 0])), + dtype=[('a', bool), ('b', bool)]) + + def _test_index(i): + assert_equal(type(a[i]), mvoid) + assert_equal_records(a[i]._data, a._data[i]) + assert_equal_records(a[i]._mask, a._mask[i]) + + assert_equal(type(a[i, ...]), MaskedArray) + assert_equal_records(a[i, ...]._data, a._data[i, ...]) + assert_equal_records(a[i, ...]._mask, a._mask[i, ...]) + + _test_index(1) # No mask + _test_index(0) # One element masked + _test_index(-2) # All element masked + + def test_setitem(self): + # Issue 4866: check that one can set individual items in [record][col] + # and [col][record] order + ndtype = np.dtype([('a', float), ('b', int)]) + ma = np.ma.MaskedArray([(1.0, 1), (2.0, 2)], dtype=ndtype) + ma['a'][1] = 3.0 + assert_equal(ma['a'], np.array([1.0, 3.0])) + ma[1]['a'] = 4.0 + assert_equal(ma['a'], np.array([1.0, 4.0])) + # Issue 2403 + mdtype = np.dtype([('a', bool), ('b', bool)]) + # soft mask + control = np.array([(False, True), (True, True)], dtype=mdtype) + a = np.ma.masked_all((2,), dtype=ndtype) + a['a'][0] = 2 + assert_equal(a.mask, control) + a = np.ma.masked_all((2,), dtype=ndtype) + a[0]['a'] = 2 + assert_equal(a.mask, control) + # hard mask + control = np.array([(True, True), (True, True)], dtype=mdtype) + a = np.ma.masked_all((2,), dtype=ndtype) + a.harden_mask() + a['a'][0] = 2 + assert_equal(a.mask, control) + a = np.ma.masked_all((2,), dtype=ndtype) + a.harden_mask() + a[0]['a'] = 2 + assert_equal(a.mask, control) + + def test_setitem_scalar(self): + # 8510 + mask_0d = np.ma.masked_array(1, mask=True) + arr = np.ma.arange(3) + arr[0] = mask_0d + assert_array_equal(arr.mask, [True, False, False]) + + def test_element_len(self): + # check that len() works for mvoid (Github issue #576) + for rec in self.data['base']: + assert_equal(len(rec), len(self.data['ddtype'])) + + +class TestMaskedObjectArray: + + def test_getitem(self): + arr = np.ma.array([None, None]) + for dt in [float, object]: + a0 = np.eye(2).astype(dt) + a1 = np.eye(3).astype(dt) + arr[0] = a0 + arr[1] = a1 + + assert_(arr[0] is a0) + assert_(arr[1] is a1) + assert_(isinstance(arr[0, ...], MaskedArray)) + assert_(isinstance(arr[1, ...], MaskedArray)) + assert_(arr[0, ...][()] is a0) + assert_(arr[1, ...][()] is a1) + + arr[0] = np.ma.masked + + assert_(arr[1] is a1) + assert_(isinstance(arr[0, ...], MaskedArray)) + assert_(isinstance(arr[1, ...], MaskedArray)) + assert_equal(arr[0, ...].mask, True) + assert_(arr[1, ...][()] is a1) + + # gh-5962 - object arrays of arrays do something special + assert_equal(arr[0].data, a0) + assert_equal(arr[0].mask, True) + assert_equal(arr[0, ...][()].data, a0) + assert_equal(arr[0, ...][()].mask, True) + + def test_nested_ma(self): + + arr = np.ma.array([None, None]) + # set the first object to be an unmasked masked constant. A little fiddly + arr[0, ...] = np.array([np.ma.masked], object)[0, ...] + + # check the above line did what we were aiming for + assert_(arr.data[0] is np.ma.masked) + + # test that getitem returned the value by identity + assert_(arr[0] is np.ma.masked) + + # now mask the masked value! + arr[0] = np.ma.masked + assert_(arr[0] is np.ma.masked) + + +class TestMaskedView: + + def setup_method(self): + iterator = list(zip(np.arange(10), np.random.rand(10))) + data = np.array(iterator) + a = array(iterator, dtype=[('a', float), ('b', float)]) + a.mask[0] = (1, 0) + controlmask = np.array([1] + 19 * [0], dtype=bool) + self.data = (data, a, controlmask) + + def test_view_to_nothing(self): + (data, a, controlmask) = self.data + test = a.view() + assert_(isinstance(test, MaskedArray)) + assert_equal(test._data, a._data) + assert_equal(test._mask, a._mask) + + def test_view_to_type(self): + (data, a, controlmask) = self.data + test = a.view(np.ndarray) + assert_(not isinstance(test, MaskedArray)) + assert_equal(test, a._data) + assert_equal_records(test, data.view(a.dtype).squeeze()) + + def test_view_to_simple_dtype(self): + (data, a, controlmask) = self.data + # View globally + test = a.view(float) + assert_(isinstance(test, MaskedArray)) + assert_equal(test, data.ravel()) + assert_equal(test.mask, controlmask) + + def test_view_to_flexible_dtype(self): + (data, a, controlmask) = self.data + + test = a.view([('A', float), ('B', float)]) + assert_equal(test.mask.dtype.names, ('A', 'B')) + assert_equal(test['A'], a['a']) + assert_equal(test['B'], a['b']) + + test = a[0].view([('A', float), ('B', float)]) + assert_(isinstance(test, MaskedArray)) + assert_equal(test.mask.dtype.names, ('A', 'B')) + assert_equal(test['A'], a['a'][0]) + assert_equal(test['B'], a['b'][0]) + + test = a[-1].view([('A', float), ('B', float)]) + assert_(isinstance(test, MaskedArray)) + assert_equal(test.dtype.names, ('A', 'B')) + assert_equal(test['A'], a['a'][-1]) + assert_equal(test['B'], a['b'][-1]) + + def test_view_to_subdtype(self): + (data, a, controlmask) = self.data + # View globally + test = a.view((float, 2)) + assert_(isinstance(test, MaskedArray)) + assert_equal(test, data) + assert_equal(test.mask, controlmask.reshape(-1, 2)) + # View on 1 masked element + test = a[0].view((float, 2)) + assert_(isinstance(test, MaskedArray)) + assert_equal(test, data[0]) + assert_equal(test.mask, (1, 0)) + # View on 1 unmasked element + test = a[-1].view((float, 2)) + assert_(isinstance(test, MaskedArray)) + assert_equal(test, data[-1]) + + def test_view_to_dtype_and_type(self): + (data, a, controlmask) = self.data + + test = a.view((float, 2), np.recarray) + assert_equal(test, data) + assert_(isinstance(test, np.recarray)) + assert_(not isinstance(test, MaskedArray)) + + +class TestOptionalArgs: + def test_ndarrayfuncs(self): + # test axis arg behaves the same as ndarray (including multiple axes) + + d = np.arange(24.0).reshape((2, 3, 4)) + m = np.zeros(24, dtype=bool).reshape((2, 3, 4)) + # mask out last element of last dimension + m[:, :, -1] = True + a = np.ma.array(d, mask=m) + + def testaxis(f, a, d): + numpy_f = numpy.__getattribute__(f) + ma_f = np.ma.__getattribute__(f) + + # test axis arg + assert_equal(ma_f(a, axis=1)[..., :-1], numpy_f(d[..., :-1], axis=1)) + assert_equal(ma_f(a, axis=(0, 1))[..., :-1], + numpy_f(d[..., :-1], axis=(0, 1))) + + def testkeepdims(f, a, d): + numpy_f = numpy.__getattribute__(f) + ma_f = np.ma.__getattribute__(f) + + # test keepdims arg + assert_equal(ma_f(a, keepdims=True).shape, + numpy_f(d, keepdims=True).shape) + assert_equal(ma_f(a, keepdims=False).shape, + numpy_f(d, keepdims=False).shape) + + # test both at once + assert_equal(ma_f(a, axis=1, keepdims=True)[..., :-1], + numpy_f(d[..., :-1], axis=1, keepdims=True)) + assert_equal(ma_f(a, axis=(0, 1), keepdims=True)[..., :-1], + numpy_f(d[..., :-1], axis=(0, 1), keepdims=True)) + + for f in ['sum', 'prod', 'mean', 'var', 'std']: + testaxis(f, a, d) + testkeepdims(f, a, d) + + for f in ['min', 'max']: + testaxis(f, a, d) + + d = (np.arange(24).reshape((2, 3, 4)) % 2 == 0) + a = np.ma.array(d, mask=m) + for f in ['all', 'any']: + testaxis(f, a, d) + testkeepdims(f, a, d) + + def test_count(self): + # test np.ma.count specially + + d = np.arange(24.0).reshape((2, 3, 4)) + m = np.zeros(24, dtype=bool).reshape((2, 3, 4)) + m[:, 0, :] = True + a = np.ma.array(d, mask=m) + + assert_equal(count(a), 16) + assert_equal(count(a, axis=1), 2 * ones((2, 4))) + assert_equal(count(a, axis=(0, 1)), 4 * ones((4,))) + assert_equal(count(a, keepdims=True), 16 * ones((1, 1, 1))) + assert_equal(count(a, axis=1, keepdims=True), 2 * ones((2, 1, 4))) + assert_equal(count(a, axis=(0, 1), keepdims=True), 4 * ones((1, 1, 4))) + assert_equal(count(a, axis=-2), 2 * ones((2, 4))) + assert_raises(ValueError, count, a, axis=(1, 1)) + assert_raises(AxisError, count, a, axis=3) + + # check the 'nomask' path + a = np.ma.array(d, mask=nomask) + + assert_equal(count(a), 24) + assert_equal(count(a, axis=1), 3 * ones((2, 4))) + assert_equal(count(a, axis=(0, 1)), 6 * ones((4,))) + assert_equal(count(a, keepdims=True), 24 * ones((1, 1, 1))) + assert_equal(np.ndim(count(a, keepdims=True)), 3) + assert_equal(count(a, axis=1, keepdims=True), 3 * ones((2, 1, 4))) + assert_equal(count(a, axis=(0, 1), keepdims=True), 6 * ones((1, 1, 4))) + assert_equal(count(a, axis=-2), 3 * ones((2, 4))) + assert_raises(ValueError, count, a, axis=(1, 1)) + assert_raises(AxisError, count, a, axis=3) + + # check the 'masked' singleton + assert_equal(count(np.ma.masked), 0) + + # check 0-d arrays do not allow axis > 0 + assert_raises(AxisError, count, np.ma.array(1), axis=1) + + +class TestMaskedConstant: + def _do_add_test(self, add): + # sanity check + assert_(add(np.ma.masked, 1) is np.ma.masked) + + # now try with a vector + vector = np.array([1, 2, 3]) + result = add(np.ma.masked, vector) + + # lots of things could go wrong here + assert_(result is not np.ma.masked) + assert_(not isinstance(result, np.ma.core.MaskedConstant)) + assert_equal(result.shape, vector.shape) + assert_equal(np.ma.getmask(result), np.ones(vector.shape, dtype=bool)) + + def test_ufunc(self): + self._do_add_test(np.add) + + def test_operator(self): + self._do_add_test(lambda a, b: a + b) + + def test_ctor(self): + m = np.ma.array(np.ma.masked) + + # most importantly, we do not want to create a new MaskedConstant + # instance + assert_(not isinstance(m, np.ma.core.MaskedConstant)) + assert_(m is not np.ma.masked) + + def test_repr(self): + # copies should not exist, but if they do, it should be obvious that + # something is wrong + assert_equal(repr(np.ma.masked), 'masked') + + # create a new instance in a weird way + masked2 = np.ma.MaskedArray.__new__(np.ma.core.MaskedConstant) + assert_not_equal(repr(masked2), 'masked') + + def test_pickle(self): + from io import BytesIO + + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + with BytesIO() as f: + pickle.dump(np.ma.masked, f, protocol=proto) + f.seek(0) + res = pickle.load(f) + assert_(res is np.ma.masked) + + def test_copy(self): + # gh-9328 + # copy is a no-op, like it is with np.True_ + assert_equal( + np.ma.masked.copy() is np.ma.masked, + np.True_.copy() is np.True_) + + def test__copy(self): + import copy + assert_( + copy.copy(np.ma.masked) is np.ma.masked) + + def test_deepcopy(self): + import copy + assert_( + copy.deepcopy(np.ma.masked) is np.ma.masked) + + def test_immutable(self): + orig = np.ma.masked + assert_raises(np.ma.core.MaskError, operator.setitem, orig, (), 1) + assert_raises(ValueError, operator.setitem, orig.data, (), 1) + assert_raises(ValueError, operator.setitem, orig.mask, (), False) + + view = np.ma.masked.view(np.ma.MaskedArray) + assert_raises(ValueError, operator.setitem, view, (), 1) + assert_raises(ValueError, operator.setitem, view.data, (), 1) + assert_raises(ValueError, operator.setitem, view.mask, (), False) + + def test_coercion_int(self): + a_i = np.zeros((), int) + assert_raises(MaskError, operator.setitem, a_i, (), np.ma.masked) + assert_raises(MaskError, int, np.ma.masked) + + def test_coercion_float(self): + a_f = np.zeros((), float) + assert_warns(UserWarning, operator.setitem, a_f, (), np.ma.masked) + assert_(np.isnan(a_f[()])) + + @pytest.mark.xfail(reason="See gh-9750") + def test_coercion_unicode(self): + a_u = np.zeros((), 'U10') + a_u[()] = np.ma.masked + assert_equal(a_u[()], '--') + + @pytest.mark.xfail(reason="See gh-9750") + def test_coercion_bytes(self): + a_b = np.zeros((), 'S10') + a_b[()] = np.ma.masked + assert_equal(a_b[()], b'--') + + def test_subclass(self): + # https://github.com/astropy/astropy/issues/6645 + class Sub(type(np.ma.masked)): + pass + + a = Sub() + assert_(a is Sub()) + assert_(a is not np.ma.masked) + assert_not_equal(repr(a), 'masked') + + def test_attributes_readonly(self): + assert_raises(AttributeError, setattr, np.ma.masked, 'shape', (1,)) + assert_raises(AttributeError, setattr, np.ma.masked, 'dtype', np.int64) + + +class TestMaskedWhereAliases: + + # TODO: Test masked_object, masked_equal, ... + + def test_masked_values(self): + res = masked_values(np.array([-32768.0]), np.int16(-32768)) + assert_equal(res.mask, [True]) + + res = masked_values(np.inf, np.inf) + assert_equal(res.mask, True) + + res = np.ma.masked_values(np.inf, -np.inf) + assert_equal(res.mask, False) + + res = np.ma.masked_values([1, 2, 3, 4], 5, shrink=True) + assert_(res.mask is np.ma.nomask) + + res = np.ma.masked_values([1, 2, 3, 4], 5, shrink=False) + assert_equal(res.mask, [False] * 4) + + +def test_masked_array(): + a = np.ma.array([0, 1, 2, 3], mask=[0, 0, 1, 0]) + assert_equal(np.argwhere(a), [[1], [3]]) + +def test_masked_array_no_copy(): + # check nomask array is updated in place + a = np.ma.array([1, 2, 3, 4]) + _ = np.ma.masked_where(a == 3, a, copy=False) + assert_array_equal(a.mask, [False, False, True, False]) + # check masked array is updated in place + a = np.ma.array([1, 2, 3, 4], mask=[1, 0, 0, 0]) + _ = np.ma.masked_where(a == 3, a, copy=False) + assert_array_equal(a.mask, [True, False, True, False]) + # check masked array with masked_invalid is updated in place + a = np.ma.array([np.inf, 1, 2, 3, 4]) + _ = np.ma.masked_invalid(a, copy=False) + assert_array_equal(a.mask, [True, False, False, False, False]) + +def test_append_masked_array(): + a = np.ma.masked_equal([1, 2, 3], value=2) + b = np.ma.masked_equal([4, 3, 2], value=2) + + result = np.ma.append(a, b) + expected_data = [1, 2, 3, 4, 3, 2] + expected_mask = [False, True, False, False, False, True] + assert_array_equal(result.data, expected_data) + assert_array_equal(result.mask, expected_mask) + + a = np.ma.masked_all((2, 2)) + b = np.ma.ones((3, 1)) + + result = np.ma.append(a, b) + expected_data = [1] * 3 + expected_mask = [True] * 4 + [False] * 3 + assert_array_equal(result.data[-3], expected_data) + assert_array_equal(result.mask, expected_mask) + + result = np.ma.append(a, b, axis=None) + assert_array_equal(result.data[-3], expected_data) + assert_array_equal(result.mask, expected_mask) + + +def test_append_masked_array_along_axis(): + a = np.ma.masked_equal([1, 2, 3], value=2) + b = np.ma.masked_values([[4, 5, 6], [7, 8, 9]], 7) + + # When `axis` is specified, `values` must have the correct shape. + assert_raises(ValueError, np.ma.append, a, b, axis=0) + + result = np.ma.append(a[np.newaxis, :], b, axis=0) + expected = np.ma.arange(1, 10) + expected[[1, 6]] = np.ma.masked + expected = expected.reshape((3, 3)) + assert_array_equal(result.data, expected.data) + assert_array_equal(result.mask, expected.mask) + +def test_default_fill_value_complex(): + # regression test for Python 3, where 'unicode' was not defined + assert_(default_fill_value(1 + 1j) == 1.e20 + 0.0j) + + +def test_ufunc_with_output(): + # check that giving an output argument always returns that output. + # Regression test for gh-8416. + x = array([1., 2., 3.], mask=[0, 0, 1]) + y = np.add(x, 1., out=x) + assert_(y is x) + + +def test_ufunc_with_out_varied(): + """ Test that masked arrays are immune to gh-10459 """ + # the mask of the output should not affect the result, however it is passed + a = array([ 1, 2, 3], mask=[1, 0, 0]) + b = array([10, 20, 30], mask=[1, 0, 0]) + out = array([ 0, 0, 0], mask=[0, 0, 1]) + expected = array([11, 22, 33], mask=[1, 0, 0]) + + out_pos = out.copy() + res_pos = np.add(a, b, out_pos) + + out_kw = out.copy() + res_kw = np.add(a, b, out=out_kw) + + out_tup = out.copy() + res_tup = np.add(a, b, out=(out_tup,)) + + assert_equal(res_kw.mask, expected.mask) + assert_equal(res_kw.data, expected.data) + assert_equal(res_tup.mask, expected.mask) + assert_equal(res_tup.data, expected.data) + assert_equal(res_pos.mask, expected.mask) + assert_equal(res_pos.data, expected.data) + + +def test_astype_mask_ordering(): + descr = np.dtype([('v', int, 3), ('x', [('y', float)])]) + x = array([ + [([1, 2, 3], (1.0,)), ([1, 2, 3], (2.0,))], + [([1, 2, 3], (3.0,)), ([1, 2, 3], (4.0,))]], dtype=descr) + x[0]['v'][0] = np.ma.masked + + x_a = x.astype(descr) + assert x_a.dtype.names == np.dtype(descr).names + assert x_a.mask.dtype.names == np.dtype(descr).names + assert_equal(x, x_a) + + assert_(x is x.astype(x.dtype, copy=False)) + assert_equal(type(x.astype(x.dtype, subok=False)), np.ndarray) + + x_f = x.astype(x.dtype, order='F') + assert_(x_f.flags.f_contiguous) + assert_(x_f.mask.flags.f_contiguous) + + # Also test the same indirectly, via np.array + x_a2 = np.array(x, dtype=descr, subok=True) + assert x_a2.dtype.names == np.dtype(descr).names + assert x_a2.mask.dtype.names == np.dtype(descr).names + assert_equal(x, x_a2) + + assert_(x is np.array(x, dtype=descr, copy=None, subok=True)) + + x_f2 = np.array(x, dtype=x.dtype, order='F', subok=True) + assert_(x_f2.flags.f_contiguous) + assert_(x_f2.mask.flags.f_contiguous) + + +@pytest.mark.parametrize('dt1', num_dts, ids=num_ids) +@pytest.mark.parametrize('dt2', num_dts, ids=num_ids) +@pytest.mark.filterwarnings('ignore::numpy.exceptions.ComplexWarning') +def test_astype_basic(dt1, dt2): + # See gh-12070 + src = np.ma.array(ones(3, dt1), fill_value=1) + dst = src.astype(dt2) + + assert_(src.fill_value == 1) + assert_(src.dtype == dt1) + assert_(src.fill_value.dtype == dt1) + + assert_(dst.fill_value == 1) + assert_(dst.dtype == dt2) + assert_(dst.fill_value.dtype == dt2) + + assert_equal(src, dst) + + +def test_fieldless_void(): + dt = np.dtype([]) # a void dtype with no fields + x = np.empty(4, dt) + + # these arrays contain no values, so there's little to test - but this + # shouldn't crash + mx = np.ma.array(x) + assert_equal(mx.dtype, x.dtype) + assert_equal(mx.shape, x.shape) + + mx = np.ma.array(x, mask=x) + assert_equal(mx.dtype, x.dtype) + assert_equal(mx.shape, x.shape) + + +def test_mask_shape_assignment_does_not_break_masked(): + a = np.ma.masked + b = np.ma.array(1, mask=a.mask) + b.shape = (1,) + assert_equal(a.mask.shape, ()) + +@pytest.mark.skipif(sys.flags.optimize > 1, + reason="no docstrings present to inspect when PYTHONOPTIMIZE/Py_OptimizeFlag > 1") # noqa: E501 +def test_doc_note(): + def method(self): + """This docstring + + Has multiple lines + + And notes + + Notes + ----- + original note + """ + pass + + expected_doc = """This docstring + +Has multiple lines + +And notes + +Notes +----- +note + +original note""" + + assert_equal(np.ma.core.doc_note(method.__doc__, "note"), expected_doc) + + +def test_gh_22556(): + source = np.ma.array([0, [0, 1, 2]], dtype=object) + deepcopy = copy.deepcopy(source) + deepcopy[1].append('this should not appear in source') + assert len(source[1]) == 3 + + +def test_gh_21022(): + # testing for absence of reported error + source = np.ma.masked_array(data=[-1, -1], mask=True, dtype=np.float64) + axis = np.array(0) + result = np.prod(source, axis=axis, keepdims=False) + result = np.ma.masked_array(result, + mask=np.ones(result.shape, dtype=np.bool)) + array = np.ma.masked_array(data=-1, mask=True, dtype=np.float64) + copy.deepcopy(array) + copy.deepcopy(result) + + +def test_deepcopy_2d_obj(): + source = np.ma.array([[0, "dog"], + [1, 1], + [[1, 2], "cat"]], + mask=[[0, 1], + [0, 0], + [0, 0]], + dtype=object) + deepcopy = copy.deepcopy(source) + deepcopy[2, 0].extend(['this should not appear in source', 3]) + assert len(source[2, 0]) == 2 + assert len(deepcopy[2, 0]) == 4 + assert_equal(deepcopy._mask, source._mask) + deepcopy._mask[0, 0] = 1 + assert source._mask[0, 0] == 0 + + +def test_deepcopy_0d_obj(): + source = np.ma.array(0, mask=[0], dtype=object) + deepcopy = copy.deepcopy(source) + deepcopy[...] = 17 + assert_equal(source, 0) + assert_equal(deepcopy, 17) + + +def test_uint_fill_value_and_filled(): + # See also gh-27269 + a = np.ma.MaskedArray([1, 1], [True, False], dtype="uint16") + # the fill value should likely not be 99999, but for now guarantee it: + assert a.fill_value == 999999 + # However, it's type is uint: + assert a.fill_value.dtype.kind == "u" + # And this ensures things like filled work: + np.testing.assert_array_equal( + a.filled(), np.array([999999, 1]).astype("uint16"), strict=True) diff --git a/.venv/lib/python3.12/site-packages/numpy/ma/tests/test_deprecations.py b/.venv/lib/python3.12/site-packages/numpy/ma/tests/test_deprecations.py new file mode 100644 index 0000000..8cc8b9c --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/ma/tests/test_deprecations.py @@ -0,0 +1,87 @@ +"""Test deprecation and future warnings. + +""" +import io +import textwrap + +import pytest + +import numpy as np +from numpy.ma.core import MaskedArrayFutureWarning +from numpy.ma.testutils import assert_equal +from numpy.testing import assert_warns + + +class TestArgsort: + """ gh-8701 """ + def _test_base(self, argsort, cls): + arr_0d = np.array(1).view(cls) + argsort(arr_0d) + + arr_1d = np.array([1, 2, 3]).view(cls) + argsort(arr_1d) + + # argsort has a bad default for >1d arrays + arr_2d = np.array([[1, 2], [3, 4]]).view(cls) + result = assert_warns( + np.ma.core.MaskedArrayFutureWarning, argsort, arr_2d) + assert_equal(result, argsort(arr_2d, axis=None)) + + # should be no warnings for explicitly specifying it + argsort(arr_2d, axis=None) + argsort(arr_2d, axis=-1) + + def test_function_ndarray(self): + return self._test_base(np.ma.argsort, np.ndarray) + + def test_function_maskedarray(self): + return self._test_base(np.ma.argsort, np.ma.MaskedArray) + + def test_method(self): + return self._test_base(np.ma.MaskedArray.argsort, np.ma.MaskedArray) + + +class TestMinimumMaximum: + + def test_axis_default(self): + # NumPy 1.13, 2017-05-06 + + data1d = np.ma.arange(6) + data2d = data1d.reshape(2, 3) + + ma_min = np.ma.minimum.reduce + ma_max = np.ma.maximum.reduce + + # check that the default axis is still None, but warns on 2d arrays + result = assert_warns(MaskedArrayFutureWarning, ma_max, data2d) + assert_equal(result, ma_max(data2d, axis=None)) + + result = assert_warns(MaskedArrayFutureWarning, ma_min, data2d) + assert_equal(result, ma_min(data2d, axis=None)) + + # no warnings on 1d, as both new and old defaults are equivalent + result = ma_min(data1d) + assert_equal(result, ma_min(data1d, axis=None)) + assert_equal(result, ma_min(data1d, axis=0)) + + result = ma_max(data1d) + assert_equal(result, ma_max(data1d, axis=None)) + assert_equal(result, ma_max(data1d, axis=0)) + + +class TestFromtextfile: + def test_fromtextfile_delimitor(self): + # NumPy 1.22.0, 2021-09-23 + + textfile = io.StringIO(textwrap.dedent( + """ + A,B,C,D + 'string 1';1;1.0;'mixed column' + 'string 2';2;2.0; + 'string 3';3;3.0;123 + 'string 4';4;4.0;3.14 + """ + )) + + with pytest.warns(DeprecationWarning): + result = np.ma.mrecords.fromtextfile(textfile, delimitor=';') diff --git a/.venv/lib/python3.12/site-packages/numpy/ma/tests/test_extras.py b/.venv/lib/python3.12/site-packages/numpy/ma/tests/test_extras.py new file mode 100644 index 0000000..3d10e83 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/ma/tests/test_extras.py @@ -0,0 +1,1998 @@ +"""Tests suite for MaskedArray. +Adapted from the original test_ma by Pierre Gerard-Marchant + +:author: Pierre Gerard-Marchant +:contact: pierregm_at_uga_dot_edu + +""" +import itertools +import warnings + +import pytest + +import numpy as np +from numpy._core.numeric import normalize_axis_tuple +from numpy.ma.core import ( + MaskedArray, + arange, + array, + count, + getmaskarray, + masked, + masked_array, + nomask, + ones, + shape, + zeros, +) +from numpy.ma.extras import ( + _covhelper, + apply_along_axis, + apply_over_axes, + atleast_1d, + atleast_2d, + atleast_3d, + average, + clump_masked, + clump_unmasked, + compress_nd, + compress_rowcols, + corrcoef, + cov, + diagflat, + dot, + ediff1d, + flatnotmasked_contiguous, + in1d, + intersect1d, + isin, + mask_rowcols, + masked_all, + masked_all_like, + median, + mr_, + ndenumerate, + notmasked_contiguous, + notmasked_edges, + polyfit, + setdiff1d, + setxor1d, + stack, + union1d, + unique, + vstack, +) +from numpy.ma.testutils import ( + assert_, + assert_almost_equal, + assert_array_equal, + assert_equal, +) +from numpy.testing import assert_warns, suppress_warnings + + +class TestGeneric: + # + def test_masked_all(self): + # Tests masked_all + # Standard dtype + test = masked_all((2,), dtype=float) + control = array([1, 1], mask=[1, 1], dtype=float) + assert_equal(test, control) + # Flexible dtype + dt = np.dtype({'names': ['a', 'b'], 'formats': ['f', 'f']}) + test = masked_all((2,), dtype=dt) + control = array([(0, 0), (0, 0)], mask=[(1, 1), (1, 1)], dtype=dt) + assert_equal(test, control) + test = masked_all((2, 2), dtype=dt) + control = array([[(0, 0), (0, 0)], [(0, 0), (0, 0)]], + mask=[[(1, 1), (1, 1)], [(1, 1), (1, 1)]], + dtype=dt) + assert_equal(test, control) + # Nested dtype + dt = np.dtype([('a', 'f'), ('b', [('ba', 'f'), ('bb', 'f')])]) + test = masked_all((2,), dtype=dt) + control = array([(1, (1, 1)), (1, (1, 1))], + mask=[(1, (1, 1)), (1, (1, 1))], dtype=dt) + assert_equal(test, control) + test = masked_all((2,), dtype=dt) + control = array([(1, (1, 1)), (1, (1, 1))], + mask=[(1, (1, 1)), (1, (1, 1))], dtype=dt) + assert_equal(test, control) + test = masked_all((1, 1), dtype=dt) + control = array([[(1, (1, 1))]], mask=[[(1, (1, 1))]], dtype=dt) + assert_equal(test, control) + + def test_masked_all_with_object_nested(self): + # Test masked_all works with nested array with dtype of an 'object' + # refers to issue #15895 + my_dtype = np.dtype([('b', ([('c', object)], (1,)))]) + masked_arr = np.ma.masked_all((1,), my_dtype) + + assert_equal(type(masked_arr['b']), np.ma.core.MaskedArray) + assert_equal(type(masked_arr['b']['c']), np.ma.core.MaskedArray) + assert_equal(len(masked_arr['b']['c']), 1) + assert_equal(masked_arr['b']['c'].shape, (1, 1)) + assert_equal(masked_arr['b']['c']._fill_value.shape, ()) + + def test_masked_all_with_object(self): + # same as above except that the array is not nested + my_dtype = np.dtype([('b', (object, (1,)))]) + masked_arr = np.ma.masked_all((1,), my_dtype) + + assert_equal(type(masked_arr['b']), np.ma.core.MaskedArray) + assert_equal(len(masked_arr['b']), 1) + assert_equal(masked_arr['b'].shape, (1, 1)) + assert_equal(masked_arr['b']._fill_value.shape, ()) + + def test_masked_all_like(self): + # Tests masked_all + # Standard dtype + base = array([1, 2], dtype=float) + test = masked_all_like(base) + control = array([1, 1], mask=[1, 1], dtype=float) + assert_equal(test, control) + # Flexible dtype + dt = np.dtype({'names': ['a', 'b'], 'formats': ['f', 'f']}) + base = array([(0, 0), (0, 0)], mask=[(1, 1), (1, 1)], dtype=dt) + test = masked_all_like(base) + control = array([(10, 10), (10, 10)], mask=[(1, 1), (1, 1)], dtype=dt) + assert_equal(test, control) + # Nested dtype + dt = np.dtype([('a', 'f'), ('b', [('ba', 'f'), ('bb', 'f')])]) + control = array([(1, (1, 1)), (1, (1, 1))], + mask=[(1, (1, 1)), (1, (1, 1))], dtype=dt) + test = masked_all_like(control) + assert_equal(test, control) + + def check_clump(self, f): + for i in range(1, 7): + for j in range(2**i): + k = np.arange(i, dtype=int) + ja = np.full(i, j, dtype=int) + a = masked_array(2**k) + a.mask = (ja & (2**k)) != 0 + s = 0 + for sl in f(a): + s += a.data[sl].sum() + if f == clump_unmasked: + assert_equal(a.compressed().sum(), s) + else: + a.mask = ~a.mask + assert_equal(a.compressed().sum(), s) + + def test_clump_masked(self): + # Test clump_masked + a = masked_array(np.arange(10)) + a[[0, 1, 2, 6, 8, 9]] = masked + # + test = clump_masked(a) + control = [slice(0, 3), slice(6, 7), slice(8, 10)] + assert_equal(test, control) + + self.check_clump(clump_masked) + + def test_clump_unmasked(self): + # Test clump_unmasked + a = masked_array(np.arange(10)) + a[[0, 1, 2, 6, 8, 9]] = masked + test = clump_unmasked(a) + control = [slice(3, 6), slice(7, 8), ] + assert_equal(test, control) + + self.check_clump(clump_unmasked) + + def test_flatnotmasked_contiguous(self): + # Test flatnotmasked_contiguous + a = arange(10) + # No mask + test = flatnotmasked_contiguous(a) + assert_equal(test, [slice(0, a.size)]) + # mask of all false + a.mask = np.zeros(10, dtype=bool) + assert_equal(test, [slice(0, a.size)]) + # Some mask + a[(a < 3) | (a > 8) | (a == 5)] = masked + test = flatnotmasked_contiguous(a) + assert_equal(test, [slice(3, 5), slice(6, 9)]) + # + a[:] = masked + test = flatnotmasked_contiguous(a) + assert_equal(test, []) + + +class TestAverage: + # Several tests of average. Why so many ? Good point... + def test_testAverage1(self): + # Test of average. + ott = array([0., 1., 2., 3.], mask=[True, False, False, False]) + assert_equal(2.0, average(ott, axis=0)) + assert_equal(2.0, average(ott, weights=[1., 1., 2., 1.])) + result, wts = average(ott, weights=[1., 1., 2., 1.], returned=True) + assert_equal(2.0, result) + assert_(wts == 4.0) + ott[:] = masked + assert_equal(average(ott, axis=0).mask, [True]) + ott = array([0., 1., 2., 3.], mask=[True, False, False, False]) + ott = ott.reshape(2, 2) + ott[:, 1] = masked + assert_equal(average(ott, axis=0), [2.0, 0.0]) + assert_equal(average(ott, axis=1).mask[0], [True]) + assert_equal([2., 0.], average(ott, axis=0)) + result, wts = average(ott, axis=0, returned=True) + assert_equal(wts, [1., 0.]) + + def test_testAverage2(self): + # More tests of average. + w1 = [0, 1, 1, 1, 1, 0] + w2 = [[0, 1, 1, 1, 1, 0], [1, 0, 0, 0, 0, 1]] + x = arange(6, dtype=np.float64) + assert_equal(average(x, axis=0), 2.5) + assert_equal(average(x, axis=0, weights=w1), 2.5) + y = array([arange(6, dtype=np.float64), 2.0 * arange(6)]) + assert_equal(average(y, None), np.add.reduce(np.arange(6)) * 3. / 12.) + assert_equal(average(y, axis=0), np.arange(6) * 3. / 2.) + assert_equal(average(y, axis=1), + [average(x, axis=0), average(x, axis=0) * 2.0]) + assert_equal(average(y, None, weights=w2), 20. / 6.) + assert_equal(average(y, axis=0, weights=w2), + [0., 1., 2., 3., 4., 10.]) + assert_equal(average(y, axis=1), + [average(x, axis=0), average(x, axis=0) * 2.0]) + m1 = zeros(6) + m2 = [0, 0, 1, 1, 0, 0] + m3 = [[0, 0, 1, 1, 0, 0], [0, 1, 1, 1, 1, 0]] + m4 = ones(6) + m5 = [0, 1, 1, 1, 1, 1] + assert_equal(average(masked_array(x, m1), axis=0), 2.5) + assert_equal(average(masked_array(x, m2), axis=0), 2.5) + assert_equal(average(masked_array(x, m4), axis=0).mask, [True]) + assert_equal(average(masked_array(x, m5), axis=0), 0.0) + assert_equal(count(average(masked_array(x, m4), axis=0)), 0) + z = masked_array(y, m3) + assert_equal(average(z, None), 20. / 6.) + assert_equal(average(z, axis=0), [0., 1., 99., 99., 4.0, 7.5]) + assert_equal(average(z, axis=1), [2.5, 5.0]) + assert_equal(average(z, axis=0, weights=w2), + [0., 1., 99., 99., 4.0, 10.0]) + + def test_testAverage3(self): + # Yet more tests of average! + a = arange(6) + b = arange(6) * 3 + r1, w1 = average([[a, b], [b, a]], axis=1, returned=True) + assert_equal(shape(r1), shape(w1)) + assert_equal(r1.shape, w1.shape) + r2, w2 = average(ones((2, 2, 3)), axis=0, weights=[3, 1], returned=True) + assert_equal(shape(w2), shape(r2)) + r2, w2 = average(ones((2, 2, 3)), returned=True) + assert_equal(shape(w2), shape(r2)) + r2, w2 = average(ones((2, 2, 3)), weights=ones((2, 2, 3)), returned=True) + assert_equal(shape(w2), shape(r2)) + a2d = array([[1, 2], [0, 4]], float) + a2dm = masked_array(a2d, [[False, False], [True, False]]) + a2da = average(a2d, axis=0) + assert_equal(a2da, [0.5, 3.0]) + a2dma = average(a2dm, axis=0) + assert_equal(a2dma, [1.0, 3.0]) + a2dma = average(a2dm, axis=None) + assert_equal(a2dma, 7. / 3.) + a2dma = average(a2dm, axis=1) + assert_equal(a2dma, [1.5, 4.0]) + + def test_testAverage4(self): + # Test that `keepdims` works with average + x = np.array([2, 3, 4]).reshape(3, 1) + b = np.ma.array(x, mask=[[False], [False], [True]]) + w = np.array([4, 5, 6]).reshape(3, 1) + actual = average(b, weights=w, axis=1, keepdims=True) + desired = masked_array([[2.], [3.], [4.]], [[False], [False], [True]]) + assert_equal(actual, desired) + + def test_weight_and_input_dims_different(self): + # this test mirrors a test for np.average() + # in lib/test/test_function_base.py + y = np.arange(12).reshape(2, 2, 3) + w = np.array([0., 0., 1., .5, .5, 0., 0., .5, .5, 1., 0., 0.])\ + .reshape(2, 2, 3) + + m = np.full((2, 2, 3), False) + yma = np.ma.array(y, mask=m) + subw0 = w[:, :, 0] + + actual = average(yma, axis=(0, 1), weights=subw0) + desired = masked_array([7., 8., 9.], mask=[False, False, False]) + assert_almost_equal(actual, desired) + + m = np.full((2, 2, 3), False) + m[:, :, 0] = True + m[0, 0, 1] = True + yma = np.ma.array(y, mask=m) + actual = average(yma, axis=(0, 1), weights=subw0) + desired = masked_array( + [np.nan, 8., 9.], + mask=[True, False, False]) + assert_almost_equal(actual, desired) + + m = np.full((2, 2, 3), False) + yma = np.ma.array(y, mask=m) + + subw1 = w[1, :, :] + actual = average(yma, axis=(1, 2), weights=subw1) + desired = masked_array([2.25, 8.25], mask=[False, False]) + assert_almost_equal(actual, desired) + + # here the weights have the wrong shape for the specified axes + with pytest.raises( + ValueError, + match="Shape of weights must be consistent with " + "shape of a along specified axis"): + average(yma, axis=(0, 1, 2), weights=subw0) + + with pytest.raises( + ValueError, + match="Shape of weights must be consistent with " + "shape of a along specified axis"): + average(yma, axis=(0, 1), weights=subw1) + + # swapping the axes should be same as transposing weights + actual = average(yma, axis=(1, 0), weights=subw0) + desired = average(yma, axis=(0, 1), weights=subw0.T) + assert_almost_equal(actual, desired) + + def test_onintegers_with_mask(self): + # Test average on integers with mask + a = average(array([1, 2])) + assert_equal(a, 1.5) + a = average(array([1, 2, 3, 4], mask=[False, False, True, True])) + assert_equal(a, 1.5) + + def test_complex(self): + # Test with complex data. + # (Regression test for https://github.com/numpy/numpy/issues/2684) + mask = np.array([[0, 0, 0, 1, 0], + [0, 1, 0, 0, 0]], dtype=bool) + a = masked_array([[0, 1 + 2j, 3 + 4j, 5 + 6j, 7 + 8j], + [9j, 0 + 1j, 2 + 3j, 4 + 5j, 7 + 7j]], + mask=mask) + + av = average(a) + expected = np.average(a.compressed()) + assert_almost_equal(av.real, expected.real) + assert_almost_equal(av.imag, expected.imag) + + av0 = average(a, axis=0) + expected0 = average(a.real, axis=0) + average(a.imag, axis=0) * 1j + assert_almost_equal(av0.real, expected0.real) + assert_almost_equal(av0.imag, expected0.imag) + + av1 = average(a, axis=1) + expected1 = average(a.real, axis=1) + average(a.imag, axis=1) * 1j + assert_almost_equal(av1.real, expected1.real) + assert_almost_equal(av1.imag, expected1.imag) + + # Test with the 'weights' argument. + wts = np.array([[0.5, 1.0, 2.0, 1.0, 0.5], + [1.0, 1.0, 1.0, 1.0, 1.0]]) + wav = average(a, weights=wts) + expected = np.average(a.compressed(), weights=wts[~mask]) + assert_almost_equal(wav.real, expected.real) + assert_almost_equal(wav.imag, expected.imag) + + wav0 = average(a, weights=wts, axis=0) + expected0 = (average(a.real, weights=wts, axis=0) + + average(a.imag, weights=wts, axis=0) * 1j) + assert_almost_equal(wav0.real, expected0.real) + assert_almost_equal(wav0.imag, expected0.imag) + + wav1 = average(a, weights=wts, axis=1) + expected1 = (average(a.real, weights=wts, axis=1) + + average(a.imag, weights=wts, axis=1) * 1j) + assert_almost_equal(wav1.real, expected1.real) + assert_almost_equal(wav1.imag, expected1.imag) + + @pytest.mark.parametrize( + 'x, axis, expected_avg, weights, expected_wavg, expected_wsum', + [([1, 2, 3], None, [2.0], [3, 4, 1], [1.75], [8.0]), + ([[1, 2, 5], [1, 6, 11]], 0, [[1.0, 4.0, 8.0]], + [1, 3], [[1.0, 5.0, 9.5]], [[4, 4, 4]])], + ) + def test_basic_keepdims(self, x, axis, expected_avg, + weights, expected_wavg, expected_wsum): + avg = np.ma.average(x, axis=axis, keepdims=True) + assert avg.shape == np.shape(expected_avg) + assert_array_equal(avg, expected_avg) + + wavg = np.ma.average(x, axis=axis, weights=weights, keepdims=True) + assert wavg.shape == np.shape(expected_wavg) + assert_array_equal(wavg, expected_wavg) + + wavg, wsum = np.ma.average(x, axis=axis, weights=weights, + returned=True, keepdims=True) + assert wavg.shape == np.shape(expected_wavg) + assert_array_equal(wavg, expected_wavg) + assert wsum.shape == np.shape(expected_wsum) + assert_array_equal(wsum, expected_wsum) + + def test_masked_weights(self): + # Test with masked weights. + # (Regression test for https://github.com/numpy/numpy/issues/10438) + a = np.ma.array(np.arange(9).reshape(3, 3), + mask=[[1, 0, 0], [1, 0, 0], [0, 0, 0]]) + weights_unmasked = masked_array([5, 28, 31], mask=False) + weights_masked = masked_array([5, 28, 31], mask=[1, 0, 0]) + + avg_unmasked = average(a, axis=0, + weights=weights_unmasked, returned=False) + expected_unmasked = np.array([6.0, 5.21875, 6.21875]) + assert_almost_equal(avg_unmasked, expected_unmasked) + + avg_masked = average(a, axis=0, weights=weights_masked, returned=False) + expected_masked = np.array([6.0, 5.576271186440678, 6.576271186440678]) + assert_almost_equal(avg_masked, expected_masked) + + # weights should be masked if needed + # depending on the array mask. This is to avoid summing + # masked nan or other values that are not cancelled by a zero + a = np.ma.array([1.0, 2.0, 3.0, 4.0], + mask=[False, False, True, True]) + avg_unmasked = average(a, weights=[1, 1, 1, np.nan]) + + assert_almost_equal(avg_unmasked, 1.5) + + a = np.ma.array([ + [1.0, 2.0, 3.0, 4.0], + [5.0, 6.0, 7.0, 8.0], + [9.0, 1.0, 2.0, 3.0], + ], mask=[ + [False, True, True, False], + [True, False, True, True], + [True, False, True, False], + ]) + + avg_masked = np.ma.average(a, weights=[1, np.nan, 1], axis=0) + avg_expected = np.ma.array([1.0, np.nan, np.nan, 3.5], + mask=[False, True, True, False]) + + assert_almost_equal(avg_masked, avg_expected) + assert_equal(avg_masked.mask, avg_expected.mask) + + +class TestConcatenator: + # Tests for mr_, the equivalent of r_ for masked arrays. + + def test_1d(self): + # Tests mr_ on 1D arrays. + assert_array_equal(mr_[1, 2, 3, 4, 5, 6], array([1, 2, 3, 4, 5, 6])) + b = ones(5) + m = [1, 0, 0, 0, 0] + d = masked_array(b, mask=m) + c = mr_[d, 0, 0, d] + assert_(isinstance(c, MaskedArray)) + assert_array_equal(c, [1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1]) + assert_array_equal(c.mask, mr_[m, 0, 0, m]) + + def test_2d(self): + # Tests mr_ on 2D arrays. + a_1 = np.random.rand(5, 5) + a_2 = np.random.rand(5, 5) + m_1 = np.round(np.random.rand(5, 5), 0) + m_2 = np.round(np.random.rand(5, 5), 0) + b_1 = masked_array(a_1, mask=m_1) + b_2 = masked_array(a_2, mask=m_2) + # append columns + d = mr_['1', b_1, b_2] + assert_(d.shape == (5, 10)) + assert_array_equal(d[:, :5], b_1) + assert_array_equal(d[:, 5:], b_2) + assert_array_equal(d.mask, np.r_['1', m_1, m_2]) + d = mr_[b_1, b_2] + assert_(d.shape == (10, 5)) + assert_array_equal(d[:5, :], b_1) + assert_array_equal(d[5:, :], b_2) + assert_array_equal(d.mask, np.r_[m_1, m_2]) + + def test_masked_constant(self): + actual = mr_[np.ma.masked, 1] + assert_equal(actual.mask, [True, False]) + assert_equal(actual.data[1], 1) + + actual = mr_[[1, 2], np.ma.masked] + assert_equal(actual.mask, [False, False, True]) + assert_equal(actual.data[:2], [1, 2]) + + +class TestNotMasked: + # Tests notmasked_edges and notmasked_contiguous. + + def test_edges(self): + # Tests unmasked_edges + data = masked_array(np.arange(25).reshape(5, 5), + mask=[[0, 0, 1, 0, 0], + [0, 0, 0, 1, 1], + [1, 1, 0, 0, 0], + [0, 0, 0, 0, 0], + [1, 1, 1, 0, 0]],) + test = notmasked_edges(data, None) + assert_equal(test, [0, 24]) + test = notmasked_edges(data, 0) + assert_equal(test[0], [(0, 0, 1, 0, 0), (0, 1, 2, 3, 4)]) + assert_equal(test[1], [(3, 3, 3, 4, 4), (0, 1, 2, 3, 4)]) + test = notmasked_edges(data, 1) + assert_equal(test[0], [(0, 1, 2, 3, 4), (0, 0, 2, 0, 3)]) + assert_equal(test[1], [(0, 1, 2, 3, 4), (4, 2, 4, 4, 4)]) + # + test = notmasked_edges(data.data, None) + assert_equal(test, [0, 24]) + test = notmasked_edges(data.data, 0) + assert_equal(test[0], [(0, 0, 0, 0, 0), (0, 1, 2, 3, 4)]) + assert_equal(test[1], [(4, 4, 4, 4, 4), (0, 1, 2, 3, 4)]) + test = notmasked_edges(data.data, -1) + assert_equal(test[0], [(0, 1, 2, 3, 4), (0, 0, 0, 0, 0)]) + assert_equal(test[1], [(0, 1, 2, 3, 4), (4, 4, 4, 4, 4)]) + # + data[-2] = masked + test = notmasked_edges(data, 0) + assert_equal(test[0], [(0, 0, 1, 0, 0), (0, 1, 2, 3, 4)]) + assert_equal(test[1], [(1, 1, 2, 4, 4), (0, 1, 2, 3, 4)]) + test = notmasked_edges(data, -1) + assert_equal(test[0], [(0, 1, 2, 4), (0, 0, 2, 3)]) + assert_equal(test[1], [(0, 1, 2, 4), (4, 2, 4, 4)]) + + def test_contiguous(self): + # Tests notmasked_contiguous + a = masked_array(np.arange(24).reshape(3, 8), + mask=[[0, 0, 0, 0, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1, 1, 1], + [0, 0, 0, 0, 0, 0, 1, 0]]) + tmp = notmasked_contiguous(a, None) + assert_equal(tmp, [ + slice(0, 4, None), + slice(16, 22, None), + slice(23, 24, None) + ]) + + tmp = notmasked_contiguous(a, 0) + assert_equal(tmp, [ + [slice(0, 1, None), slice(2, 3, None)], + [slice(0, 1, None), slice(2, 3, None)], + [slice(0, 1, None), slice(2, 3, None)], + [slice(0, 1, None), slice(2, 3, None)], + [slice(2, 3, None)], + [slice(2, 3, None)], + [], + [slice(2, 3, None)] + ]) + # + tmp = notmasked_contiguous(a, 1) + assert_equal(tmp, [ + [slice(0, 4, None)], + [], + [slice(0, 6, None), slice(7, 8, None)] + ]) + + +class TestCompressFunctions: + + def test_compress_nd(self): + # Tests compress_nd + x = np.array(list(range(3 * 4 * 5))).reshape(3, 4, 5) + m = np.zeros((3, 4, 5)).astype(bool) + m[1, 1, 1] = True + x = array(x, mask=m) + + # axis=None + a = compress_nd(x) + assert_equal(a, [[[ 0, 2, 3, 4], + [10, 12, 13, 14], + [15, 17, 18, 19]], + [[40, 42, 43, 44], + [50, 52, 53, 54], + [55, 57, 58, 59]]]) + + # axis=0 + a = compress_nd(x, 0) + assert_equal(a, [[[ 0, 1, 2, 3, 4], + [ 5, 6, 7, 8, 9], + [10, 11, 12, 13, 14], + [15, 16, 17, 18, 19]], + [[40, 41, 42, 43, 44], + [45, 46, 47, 48, 49], + [50, 51, 52, 53, 54], + [55, 56, 57, 58, 59]]]) + + # axis=1 + a = compress_nd(x, 1) + assert_equal(a, [[[ 0, 1, 2, 3, 4], + [10, 11, 12, 13, 14], + [15, 16, 17, 18, 19]], + [[20, 21, 22, 23, 24], + [30, 31, 32, 33, 34], + [35, 36, 37, 38, 39]], + [[40, 41, 42, 43, 44], + [50, 51, 52, 53, 54], + [55, 56, 57, 58, 59]]]) + + a2 = compress_nd(x, (1,)) + a3 = compress_nd(x, -2) + a4 = compress_nd(x, (-2,)) + assert_equal(a, a2) + assert_equal(a, a3) + assert_equal(a, a4) + + # axis=2 + a = compress_nd(x, 2) + assert_equal(a, [[[ 0, 2, 3, 4], + [ 5, 7, 8, 9], + [10, 12, 13, 14], + [15, 17, 18, 19]], + [[20, 22, 23, 24], + [25, 27, 28, 29], + [30, 32, 33, 34], + [35, 37, 38, 39]], + [[40, 42, 43, 44], + [45, 47, 48, 49], + [50, 52, 53, 54], + [55, 57, 58, 59]]]) + + a2 = compress_nd(x, (2,)) + a3 = compress_nd(x, -1) + a4 = compress_nd(x, (-1,)) + assert_equal(a, a2) + assert_equal(a, a3) + assert_equal(a, a4) + + # axis=(0, 1) + a = compress_nd(x, (0, 1)) + assert_equal(a, [[[ 0, 1, 2, 3, 4], + [10, 11, 12, 13, 14], + [15, 16, 17, 18, 19]], + [[40, 41, 42, 43, 44], + [50, 51, 52, 53, 54], + [55, 56, 57, 58, 59]]]) + a2 = compress_nd(x, (0, -2)) + assert_equal(a, a2) + + # axis=(1, 2) + a = compress_nd(x, (1, 2)) + assert_equal(a, [[[ 0, 2, 3, 4], + [10, 12, 13, 14], + [15, 17, 18, 19]], + [[20, 22, 23, 24], + [30, 32, 33, 34], + [35, 37, 38, 39]], + [[40, 42, 43, 44], + [50, 52, 53, 54], + [55, 57, 58, 59]]]) + + a2 = compress_nd(x, (-2, 2)) + a3 = compress_nd(x, (1, -1)) + a4 = compress_nd(x, (-2, -1)) + assert_equal(a, a2) + assert_equal(a, a3) + assert_equal(a, a4) + + # axis=(0, 2) + a = compress_nd(x, (0, 2)) + assert_equal(a, [[[ 0, 2, 3, 4], + [ 5, 7, 8, 9], + [10, 12, 13, 14], + [15, 17, 18, 19]], + [[40, 42, 43, 44], + [45, 47, 48, 49], + [50, 52, 53, 54], + [55, 57, 58, 59]]]) + + a2 = compress_nd(x, (0, -1)) + assert_equal(a, a2) + + def test_compress_rowcols(self): + # Tests compress_rowcols + x = array(np.arange(9).reshape(3, 3), + mask=[[1, 0, 0], [0, 0, 0], [0, 0, 0]]) + assert_equal(compress_rowcols(x), [[4, 5], [7, 8]]) + assert_equal(compress_rowcols(x, 0), [[3, 4, 5], [6, 7, 8]]) + assert_equal(compress_rowcols(x, 1), [[1, 2], [4, 5], [7, 8]]) + x = array(x._data, mask=[[0, 0, 0], [0, 1, 0], [0, 0, 0]]) + assert_equal(compress_rowcols(x), [[0, 2], [6, 8]]) + assert_equal(compress_rowcols(x, 0), [[0, 1, 2], [6, 7, 8]]) + assert_equal(compress_rowcols(x, 1), [[0, 2], [3, 5], [6, 8]]) + x = array(x._data, mask=[[1, 0, 0], [0, 1, 0], [0, 0, 0]]) + assert_equal(compress_rowcols(x), [[8]]) + assert_equal(compress_rowcols(x, 0), [[6, 7, 8]]) + assert_equal(compress_rowcols(x, 1,), [[2], [5], [8]]) + x = array(x._data, mask=[[1, 0, 0], [0, 1, 0], [0, 0, 1]]) + assert_equal(compress_rowcols(x).size, 0) + assert_equal(compress_rowcols(x, 0).size, 0) + assert_equal(compress_rowcols(x, 1).size, 0) + + def test_mask_rowcols(self): + # Tests mask_rowcols. + x = array(np.arange(9).reshape(3, 3), + mask=[[1, 0, 0], [0, 0, 0], [0, 0, 0]]) + assert_equal(mask_rowcols(x).mask, + [[1, 1, 1], [1, 0, 0], [1, 0, 0]]) + assert_equal(mask_rowcols(x, 0).mask, + [[1, 1, 1], [0, 0, 0], [0, 0, 0]]) + assert_equal(mask_rowcols(x, 1).mask, + [[1, 0, 0], [1, 0, 0], [1, 0, 0]]) + x = array(x._data, mask=[[0, 0, 0], [0, 1, 0], [0, 0, 0]]) + assert_equal(mask_rowcols(x).mask, + [[0, 1, 0], [1, 1, 1], [0, 1, 0]]) + assert_equal(mask_rowcols(x, 0).mask, + [[0, 0, 0], [1, 1, 1], [0, 0, 0]]) + assert_equal(mask_rowcols(x, 1).mask, + [[0, 1, 0], [0, 1, 0], [0, 1, 0]]) + x = array(x._data, mask=[[1, 0, 0], [0, 1, 0], [0, 0, 0]]) + assert_equal(mask_rowcols(x).mask, + [[1, 1, 1], [1, 1, 1], [1, 1, 0]]) + assert_equal(mask_rowcols(x, 0).mask, + [[1, 1, 1], [1, 1, 1], [0, 0, 0]]) + assert_equal(mask_rowcols(x, 1,).mask, + [[1, 1, 0], [1, 1, 0], [1, 1, 0]]) + x = array(x._data, mask=[[1, 0, 0], [0, 1, 0], [0, 0, 1]]) + assert_(mask_rowcols(x).all() is masked) + assert_(mask_rowcols(x, 0).all() is masked) + assert_(mask_rowcols(x, 1).all() is masked) + assert_(mask_rowcols(x).mask.all()) + assert_(mask_rowcols(x, 0).mask.all()) + assert_(mask_rowcols(x, 1).mask.all()) + + @pytest.mark.parametrize("axis", [None, 0, 1]) + @pytest.mark.parametrize(["func", "rowcols_axis"], + [(np.ma.mask_rows, 0), (np.ma.mask_cols, 1)]) + def test_mask_row_cols_axis_deprecation(self, axis, func, rowcols_axis): + # Test deprecation of the axis argument to `mask_rows` and `mask_cols` + x = array(np.arange(9).reshape(3, 3), + mask=[[1, 0, 0], [0, 0, 0], [0, 0, 0]]) + + with assert_warns(DeprecationWarning): + res = func(x, axis=axis) + assert_equal(res, mask_rowcols(x, rowcols_axis)) + + def test_dot(self): + # Tests dot product + n = np.arange(1, 7) + # + m = [1, 0, 0, 0, 0, 0] + a = masked_array(n, mask=m).reshape(2, 3) + b = masked_array(n, mask=m).reshape(3, 2) + c = dot(a, b, strict=True) + assert_equal(c.mask, [[1, 1], [1, 0]]) + c = dot(b, a, strict=True) + assert_equal(c.mask, [[1, 1, 1], [1, 0, 0], [1, 0, 0]]) + c = dot(a, b, strict=False) + assert_equal(c, np.dot(a.filled(0), b.filled(0))) + c = dot(b, a, strict=False) + assert_equal(c, np.dot(b.filled(0), a.filled(0))) + # + m = [0, 0, 0, 0, 0, 1] + a = masked_array(n, mask=m).reshape(2, 3) + b = masked_array(n, mask=m).reshape(3, 2) + c = dot(a, b, strict=True) + assert_equal(c.mask, [[0, 1], [1, 1]]) + c = dot(b, a, strict=True) + assert_equal(c.mask, [[0, 0, 1], [0, 0, 1], [1, 1, 1]]) + c = dot(a, b, strict=False) + assert_equal(c, np.dot(a.filled(0), b.filled(0))) + assert_equal(c, dot(a, b)) + c = dot(b, a, strict=False) + assert_equal(c, np.dot(b.filled(0), a.filled(0))) + # + m = [0, 0, 0, 0, 0, 0] + a = masked_array(n, mask=m).reshape(2, 3) + b = masked_array(n, mask=m).reshape(3, 2) + c = dot(a, b) + assert_equal(c.mask, nomask) + c = dot(b, a) + assert_equal(c.mask, nomask) + # + a = masked_array(n, mask=[1, 0, 0, 0, 0, 0]).reshape(2, 3) + b = masked_array(n, mask=[0, 0, 0, 0, 0, 0]).reshape(3, 2) + c = dot(a, b, strict=True) + assert_equal(c.mask, [[1, 1], [0, 0]]) + c = dot(a, b, strict=False) + assert_equal(c, np.dot(a.filled(0), b.filled(0))) + c = dot(b, a, strict=True) + assert_equal(c.mask, [[1, 0, 0], [1, 0, 0], [1, 0, 0]]) + c = dot(b, a, strict=False) + assert_equal(c, np.dot(b.filled(0), a.filled(0))) + # + a = masked_array(n, mask=[0, 0, 0, 0, 0, 1]).reshape(2, 3) + b = masked_array(n, mask=[0, 0, 0, 0, 0, 0]).reshape(3, 2) + c = dot(a, b, strict=True) + assert_equal(c.mask, [[0, 0], [1, 1]]) + c = dot(a, b) + assert_equal(c, np.dot(a.filled(0), b.filled(0))) + c = dot(b, a, strict=True) + assert_equal(c.mask, [[0, 0, 1], [0, 0, 1], [0, 0, 1]]) + c = dot(b, a, strict=False) + assert_equal(c, np.dot(b.filled(0), a.filled(0))) + # + a = masked_array(n, mask=[0, 0, 0, 0, 0, 1]).reshape(2, 3) + b = masked_array(n, mask=[0, 0, 1, 0, 0, 0]).reshape(3, 2) + c = dot(a, b, strict=True) + assert_equal(c.mask, [[1, 0], [1, 1]]) + c = dot(a, b, strict=False) + assert_equal(c, np.dot(a.filled(0), b.filled(0))) + c = dot(b, a, strict=True) + assert_equal(c.mask, [[0, 0, 1], [1, 1, 1], [0, 0, 1]]) + c = dot(b, a, strict=False) + assert_equal(c, np.dot(b.filled(0), a.filled(0))) + # + a = masked_array(np.arange(8).reshape(2, 2, 2), + mask=[[[1, 0], [0, 0]], [[0, 0], [0, 0]]]) + b = masked_array(np.arange(8).reshape(2, 2, 2), + mask=[[[0, 0], [0, 0]], [[0, 0], [0, 1]]]) + c = dot(a, b, strict=True) + assert_equal(c.mask, + [[[[1, 1], [1, 1]], [[0, 0], [0, 1]]], + [[[0, 0], [0, 1]], [[0, 0], [0, 1]]]]) + c = dot(a, b, strict=False) + assert_equal(c.mask, + [[[[0, 0], [0, 1]], [[0, 0], [0, 0]]], + [[[0, 0], [0, 0]], [[0, 0], [0, 0]]]]) + c = dot(b, a, strict=True) + assert_equal(c.mask, + [[[[1, 0], [0, 0]], [[1, 0], [0, 0]]], + [[[1, 0], [0, 0]], [[1, 1], [1, 1]]]]) + c = dot(b, a, strict=False) + assert_equal(c.mask, + [[[[0, 0], [0, 0]], [[0, 0], [0, 0]]], + [[[0, 0], [0, 0]], [[1, 0], [0, 0]]]]) + # + a = masked_array(np.arange(8).reshape(2, 2, 2), + mask=[[[1, 0], [0, 0]], [[0, 0], [0, 0]]]) + b = 5. + c = dot(a, b, strict=True) + assert_equal(c.mask, [[[1, 0], [0, 0]], [[0, 0], [0, 0]]]) + c = dot(a, b, strict=False) + assert_equal(c.mask, [[[1, 0], [0, 0]], [[0, 0], [0, 0]]]) + c = dot(b, a, strict=True) + assert_equal(c.mask, [[[1, 0], [0, 0]], [[0, 0], [0, 0]]]) + c = dot(b, a, strict=False) + assert_equal(c.mask, [[[1, 0], [0, 0]], [[0, 0], [0, 0]]]) + # + a = masked_array(np.arange(8).reshape(2, 2, 2), + mask=[[[1, 0], [0, 0]], [[0, 0], [0, 0]]]) + b = masked_array(np.arange(2), mask=[0, 1]) + c = dot(a, b, strict=True) + assert_equal(c.mask, [[1, 1], [1, 1]]) + c = dot(a, b, strict=False) + assert_equal(c.mask, [[1, 0], [0, 0]]) + + def test_dot_returns_maskedarray(self): + # See gh-6611 + a = np.eye(3) + b = array(a) + assert_(type(dot(a, a)) is MaskedArray) + assert_(type(dot(a, b)) is MaskedArray) + assert_(type(dot(b, a)) is MaskedArray) + assert_(type(dot(b, b)) is MaskedArray) + + def test_dot_out(self): + a = array(np.eye(3)) + out = array(np.zeros((3, 3))) + res = dot(a, a, out=out) + assert_(res is out) + assert_equal(a, res) + + +class TestApplyAlongAxis: + # Tests 2D functions + def test_3d(self): + a = arange(12.).reshape(2, 2, 3) + + def myfunc(b): + return b[1] + + xa = apply_along_axis(myfunc, 2, a) + assert_equal(xa, [[1, 4], [7, 10]]) + + # Tests kwargs functions + def test_3d_kwargs(self): + a = arange(12).reshape(2, 2, 3) + + def myfunc(b, offset=0): + return b[1 + offset] + + xa = apply_along_axis(myfunc, 2, a, offset=1) + assert_equal(xa, [[2, 5], [8, 11]]) + + +class TestApplyOverAxes: + # Tests apply_over_axes + def test_basic(self): + a = arange(24).reshape(2, 3, 4) + test = apply_over_axes(np.sum, a, [0, 2]) + ctrl = np.array([[[60], [92], [124]]]) + assert_equal(test, ctrl) + a[(a % 2).astype(bool)] = masked + test = apply_over_axes(np.sum, a, [0, 2]) + ctrl = np.array([[[28], [44], [60]]]) + assert_equal(test, ctrl) + + +class TestMedian: + def test_pytype(self): + r = np.ma.median([[np.inf, np.inf], [np.inf, np.inf]], axis=-1) + assert_equal(r, np.inf) + + def test_inf(self): + # test that even which computes handles inf / x = masked + r = np.ma.median(np.ma.masked_array([[np.inf, np.inf], + [np.inf, np.inf]]), axis=-1) + assert_equal(r, np.inf) + r = np.ma.median(np.ma.masked_array([[np.inf, np.inf], + [np.inf, np.inf]]), axis=None) + assert_equal(r, np.inf) + # all masked + r = np.ma.median(np.ma.masked_array([[np.inf, np.inf], + [np.inf, np.inf]], mask=True), + axis=-1) + assert_equal(r.mask, True) + r = np.ma.median(np.ma.masked_array([[np.inf, np.inf], + [np.inf, np.inf]], mask=True), + axis=None) + assert_equal(r.mask, True) + + def test_non_masked(self): + x = np.arange(9) + assert_equal(np.ma.median(x), 4.) + assert_(type(np.ma.median(x)) is not MaskedArray) + x = range(8) + assert_equal(np.ma.median(x), 3.5) + assert_(type(np.ma.median(x)) is not MaskedArray) + x = 5 + assert_equal(np.ma.median(x), 5.) + assert_(type(np.ma.median(x)) is not MaskedArray) + # integer + x = np.arange(9 * 8).reshape(9, 8) + assert_equal(np.ma.median(x, axis=0), np.median(x, axis=0)) + assert_equal(np.ma.median(x, axis=1), np.median(x, axis=1)) + assert_(np.ma.median(x, axis=1) is not MaskedArray) + # float + x = np.arange(9 * 8.).reshape(9, 8) + assert_equal(np.ma.median(x, axis=0), np.median(x, axis=0)) + assert_equal(np.ma.median(x, axis=1), np.median(x, axis=1)) + assert_(np.ma.median(x, axis=1) is not MaskedArray) + + def test_docstring_examples(self): + "test the examples given in the docstring of ma.median" + x = array(np.arange(8), mask=[0] * 4 + [1] * 4) + assert_equal(np.ma.median(x), 1.5) + assert_equal(np.ma.median(x).shape, (), "shape mismatch") + assert_(type(np.ma.median(x)) is not MaskedArray) + x = array(np.arange(10).reshape(2, 5), mask=[0] * 6 + [1] * 4) + assert_equal(np.ma.median(x), 2.5) + assert_equal(np.ma.median(x).shape, (), "shape mismatch") + assert_(type(np.ma.median(x)) is not MaskedArray) + ma_x = np.ma.median(x, axis=-1, overwrite_input=True) + assert_equal(ma_x, [2., 5.]) + assert_equal(ma_x.shape, (2,), "shape mismatch") + assert_(type(ma_x) is MaskedArray) + + def test_axis_argument_errors(self): + msg = "mask = %s, ndim = %s, axis = %s, overwrite_input = %s" + for ndmin in range(5): + for mask in [False, True]: + x = array(1, ndmin=ndmin, mask=mask) + + # Valid axis values should not raise exception + args = itertools.product(range(-ndmin, ndmin), [False, True]) + for axis, over in args: + try: + np.ma.median(x, axis=axis, overwrite_input=over) + except Exception: + raise AssertionError(msg % (mask, ndmin, axis, over)) + + # Invalid axis values should raise exception + args = itertools.product([-(ndmin + 1), ndmin], [False, True]) + for axis, over in args: + try: + np.ma.median(x, axis=axis, overwrite_input=over) + except np.exceptions.AxisError: + pass + else: + raise AssertionError(msg % (mask, ndmin, axis, over)) + + def test_masked_0d(self): + # Check values + x = array(1, mask=False) + assert_equal(np.ma.median(x), 1) + x = array(1, mask=True) + assert_equal(np.ma.median(x), np.ma.masked) + + def test_masked_1d(self): + x = array(np.arange(5), mask=True) + assert_equal(np.ma.median(x), np.ma.masked) + assert_equal(np.ma.median(x).shape, (), "shape mismatch") + assert_(type(np.ma.median(x)) is np.ma.core.MaskedConstant) + x = array(np.arange(5), mask=False) + assert_equal(np.ma.median(x), 2.) + assert_equal(np.ma.median(x).shape, (), "shape mismatch") + assert_(type(np.ma.median(x)) is not MaskedArray) + x = array(np.arange(5), mask=[0, 1, 0, 0, 0]) + assert_equal(np.ma.median(x), 2.5) + assert_equal(np.ma.median(x).shape, (), "shape mismatch") + assert_(type(np.ma.median(x)) is not MaskedArray) + x = array(np.arange(5), mask=[0, 1, 1, 1, 1]) + assert_equal(np.ma.median(x), 0.) + assert_equal(np.ma.median(x).shape, (), "shape mismatch") + assert_(type(np.ma.median(x)) is not MaskedArray) + # integer + x = array(np.arange(5), mask=[0, 1, 1, 0, 0]) + assert_equal(np.ma.median(x), 3.) + assert_equal(np.ma.median(x).shape, (), "shape mismatch") + assert_(type(np.ma.median(x)) is not MaskedArray) + # float + x = array(np.arange(5.), mask=[0, 1, 1, 0, 0]) + assert_equal(np.ma.median(x), 3.) + assert_equal(np.ma.median(x).shape, (), "shape mismatch") + assert_(type(np.ma.median(x)) is not MaskedArray) + # integer + x = array(np.arange(6), mask=[0, 1, 1, 1, 1, 0]) + assert_equal(np.ma.median(x), 2.5) + assert_equal(np.ma.median(x).shape, (), "shape mismatch") + assert_(type(np.ma.median(x)) is not MaskedArray) + # float + x = array(np.arange(6.), mask=[0, 1, 1, 1, 1, 0]) + assert_equal(np.ma.median(x), 2.5) + assert_equal(np.ma.median(x).shape, (), "shape mismatch") + assert_(type(np.ma.median(x)) is not MaskedArray) + + def test_1d_shape_consistency(self): + assert_equal(np.ma.median(array([1, 2, 3], mask=[0, 0, 0])).shape, + np.ma.median(array([1, 2, 3], mask=[0, 1, 0])).shape) + + def test_2d(self): + # Tests median w/ 2D + (n, p) = (101, 30) + x = masked_array(np.linspace(-1., 1., n),) + x[:10] = x[-10:] = masked + z = masked_array(np.empty((n, p), dtype=float)) + z[:, 0] = x[:] + idx = np.arange(len(x)) + for i in range(1, p): + np.random.shuffle(idx) + z[:, i] = x[idx] + assert_equal(median(z[:, 0]), 0) + assert_equal(median(z), 0) + assert_equal(median(z, axis=0), np.zeros(p)) + assert_equal(median(z.T, axis=1), np.zeros(p)) + + def test_2d_waxis(self): + # Tests median w/ 2D arrays and different axis. + x = masked_array(np.arange(30).reshape(10, 3)) + x[:3] = x[-3:] = masked + assert_equal(median(x), 14.5) + assert_(type(np.ma.median(x)) is not MaskedArray) + assert_equal(median(x, axis=0), [13.5, 14.5, 15.5]) + assert_(type(np.ma.median(x, axis=0)) is MaskedArray) + assert_equal(median(x, axis=1), [0, 0, 0, 10, 13, 16, 19, 0, 0, 0]) + assert_(type(np.ma.median(x, axis=1)) is MaskedArray) + assert_equal(median(x, axis=1).mask, [1, 1, 1, 0, 0, 0, 0, 1, 1, 1]) + + def test_3d(self): + # Tests median w/ 3D + x = np.ma.arange(24).reshape(3, 4, 2) + x[x % 3 == 0] = masked + assert_equal(median(x, 0), [[12, 9], [6, 15], [12, 9], [18, 15]]) + x.shape = (4, 3, 2) + assert_equal(median(x, 0), [[99, 10], [11, 99], [13, 14]]) + x = np.ma.arange(24).reshape(4, 3, 2) + x[x % 5 == 0] = masked + assert_equal(median(x, 0), [[12, 10], [8, 9], [16, 17]]) + + def test_neg_axis(self): + x = masked_array(np.arange(30).reshape(10, 3)) + x[:3] = x[-3:] = masked + assert_equal(median(x, axis=-1), median(x, axis=1)) + + def test_out_1d(self): + # integer float even odd + for v in (30, 30., 31, 31.): + x = masked_array(np.arange(v)) + x[:3] = x[-3:] = masked + out = masked_array(np.ones(())) + r = median(x, out=out) + if v == 30: + assert_equal(out, 14.5) + else: + assert_equal(out, 15.) + assert_(r is out) + assert_(type(r) is MaskedArray) + + def test_out(self): + # integer float even odd + for v in (40, 40., 30, 30.): + x = masked_array(np.arange(v).reshape(10, -1)) + x[:3] = x[-3:] = masked + out = masked_array(np.ones(10)) + r = median(x, axis=1, out=out) + if v == 30: + e = masked_array([0.] * 3 + [10, 13, 16, 19] + [0.] * 3, + mask=[True] * 3 + [False] * 4 + [True] * 3) + else: + e = masked_array([0.] * 3 + [13.5, 17.5, 21.5, 25.5] + [0.] * 3, + mask=[True] * 3 + [False] * 4 + [True] * 3) + assert_equal(r, e) + assert_(r is out) + assert_(type(r) is MaskedArray) + + @pytest.mark.parametrize( + argnames='axis', + argvalues=[ + None, + 1, + (1, ), + (0, 1), + (-3, -1), + ] + ) + def test_keepdims_out(self, axis): + mask = np.zeros((3, 5, 7, 11), dtype=bool) + # Randomly set some elements to True: + w = np.random.random((4, 200)) * np.array(mask.shape)[:, None] + w = w.astype(np.intp) + mask[tuple(w)] = np.nan + d = masked_array(np.ones(mask.shape), mask=mask) + if axis is None: + shape_out = (1,) * d.ndim + else: + axis_norm = normalize_axis_tuple(axis, d.ndim) + shape_out = tuple( + 1 if i in axis_norm else d.shape[i] for i in range(d.ndim)) + out = masked_array(np.empty(shape_out)) + result = median(d, axis=axis, keepdims=True, out=out) + assert result is out + assert_equal(result.shape, shape_out) + + def test_single_non_masked_value_on_axis(self): + data = [[1., 0.], + [0., 3.], + [0., 0.]] + masked_arr = np.ma.masked_equal(data, 0) + expected = [1., 3.] + assert_array_equal(np.ma.median(masked_arr, axis=0), + expected) + + def test_nan(self): + for mask in (False, np.zeros(6, dtype=bool)): + dm = np.ma.array([[1, np.nan, 3], [1, 2, 3]]) + dm.mask = mask + + # scalar result + r = np.ma.median(dm, axis=None) + assert_(np.isscalar(r)) + assert_array_equal(r, np.nan) + r = np.ma.median(dm.ravel(), axis=0) + assert_(np.isscalar(r)) + assert_array_equal(r, np.nan) + + r = np.ma.median(dm, axis=0) + assert_equal(type(r), MaskedArray) + assert_array_equal(r, [1, np.nan, 3]) + r = np.ma.median(dm, axis=1) + assert_equal(type(r), MaskedArray) + assert_array_equal(r, [np.nan, 2]) + r = np.ma.median(dm, axis=-1) + assert_equal(type(r), MaskedArray) + assert_array_equal(r, [np.nan, 2]) + + dm = np.ma.array([[1, np.nan, 3], [1, 2, 3]]) + dm[:, 2] = np.ma.masked + assert_array_equal(np.ma.median(dm, axis=None), np.nan) + assert_array_equal(np.ma.median(dm, axis=0), [1, np.nan, 3]) + assert_array_equal(np.ma.median(dm, axis=1), [np.nan, 1.5]) + + def test_out_nan(self): + o = np.ma.masked_array(np.zeros((4,))) + d = np.ma.masked_array(np.ones((3, 4))) + d[2, 1] = np.nan + d[2, 2] = np.ma.masked + assert_equal(np.ma.median(d, 0, out=o), o) + o = np.ma.masked_array(np.zeros((3,))) + assert_equal(np.ma.median(d, 1, out=o), o) + o = np.ma.masked_array(np.zeros(())) + assert_equal(np.ma.median(d, out=o), o) + + def test_nan_behavior(self): + a = np.ma.masked_array(np.arange(24, dtype=float)) + a[::3] = np.ma.masked + a[2] = np.nan + assert_array_equal(np.ma.median(a), np.nan) + assert_array_equal(np.ma.median(a, axis=0), np.nan) + + a = np.ma.masked_array(np.arange(24, dtype=float).reshape(2, 3, 4)) + a.mask = np.arange(a.size) % 2 == 1 + aorig = a.copy() + a[1, 2, 3] = np.nan + a[1, 1, 2] = np.nan + + # no axis + assert_array_equal(np.ma.median(a), np.nan) + assert_(np.isscalar(np.ma.median(a))) + + # axis0 + b = np.ma.median(aorig, axis=0) + b[2, 3] = np.nan + b[1, 2] = np.nan + assert_equal(np.ma.median(a, 0), b) + + # axis1 + b = np.ma.median(aorig, axis=1) + b[1, 3] = np.nan + b[1, 2] = np.nan + assert_equal(np.ma.median(a, 1), b) + + # axis02 + b = np.ma.median(aorig, axis=(0, 2)) + b[1] = np.nan + b[2] = np.nan + assert_equal(np.ma.median(a, (0, 2)), b) + + def test_ambigous_fill(self): + # 255 is max value, used as filler for sort + a = np.array([[3, 3, 255], [3, 3, 255]], dtype=np.uint8) + a = np.ma.masked_array(a, mask=a == 3) + assert_array_equal(np.ma.median(a, axis=1), 255) + assert_array_equal(np.ma.median(a, axis=1).mask, False) + assert_array_equal(np.ma.median(a, axis=0), a[0]) + assert_array_equal(np.ma.median(a), 255) + + def test_special(self): + for inf in [np.inf, -np.inf]: + a = np.array([[inf, np.nan], [np.nan, np.nan]]) + a = np.ma.masked_array(a, mask=np.isnan(a)) + assert_equal(np.ma.median(a, axis=0), [inf, np.nan]) + assert_equal(np.ma.median(a, axis=1), [inf, np.nan]) + assert_equal(np.ma.median(a), inf) + + a = np.array([[np.nan, np.nan, inf], [np.nan, np.nan, inf]]) + a = np.ma.masked_array(a, mask=np.isnan(a)) + assert_array_equal(np.ma.median(a, axis=1), inf) + assert_array_equal(np.ma.median(a, axis=1).mask, False) + assert_array_equal(np.ma.median(a, axis=0), a[0]) + assert_array_equal(np.ma.median(a), inf) + + # no mask + a = np.array([[inf, inf], [inf, inf]]) + assert_equal(np.ma.median(a), inf) + assert_equal(np.ma.median(a, axis=0), inf) + assert_equal(np.ma.median(a, axis=1), inf) + + a = np.array([[inf, 7, -inf, -9], + [-10, np.nan, np.nan, 5], + [4, np.nan, np.nan, inf]], + dtype=np.float32) + a = np.ma.masked_array(a, mask=np.isnan(a)) + if inf > 0: + assert_equal(np.ma.median(a, axis=0), [4., 7., -inf, 5.]) + assert_equal(np.ma.median(a), 4.5) + else: + assert_equal(np.ma.median(a, axis=0), [-10., 7., -inf, -9.]) + assert_equal(np.ma.median(a), -2.5) + assert_equal(np.ma.median(a, axis=1), [-1., -2.5, inf]) + + for i in range(10): + for j in range(1, 10): + a = np.array([([np.nan] * i) + ([inf] * j)] * 2) + a = np.ma.masked_array(a, mask=np.isnan(a)) + assert_equal(np.ma.median(a), inf) + assert_equal(np.ma.median(a, axis=1), inf) + assert_equal(np.ma.median(a, axis=0), + ([np.nan] * i) + [inf] * j) + + def test_empty(self): + # empty arrays + a = np.ma.masked_array(np.array([], dtype=float)) + with suppress_warnings() as w: + w.record(RuntimeWarning) + assert_array_equal(np.ma.median(a), np.nan) + assert_(w.log[0].category is RuntimeWarning) + + # multiple dimensions + a = np.ma.masked_array(np.array([], dtype=float, ndmin=3)) + # no axis + with suppress_warnings() as w: + w.record(RuntimeWarning) + warnings.filterwarnings('always', '', RuntimeWarning) + assert_array_equal(np.ma.median(a), np.nan) + assert_(w.log[0].category is RuntimeWarning) + + # axis 0 and 1 + b = np.ma.masked_array(np.array([], dtype=float, ndmin=2)) + assert_equal(np.ma.median(a, axis=0), b) + assert_equal(np.ma.median(a, axis=1), b) + + # axis 2 + b = np.ma.masked_array(np.array(np.nan, dtype=float, ndmin=2)) + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', RuntimeWarning) + assert_equal(np.ma.median(a, axis=2), b) + assert_(w[0].category is RuntimeWarning) + + def test_object(self): + o = np.ma.masked_array(np.arange(7.)) + assert_(type(np.ma.median(o.astype(object))), float) + o[2] = np.nan + assert_(type(np.ma.median(o.astype(object))), float) + + +class TestCov: + + def setup_method(self): + self.data = array(np.random.rand(12)) + + def test_covhelper(self): + x = self.data + # Test not mask output type is a float. + assert_(_covhelper(x, rowvar=True)[1].dtype, np.float32) + assert_(_covhelper(x, y=x, rowvar=False)[1].dtype, np.float32) + # Test not mask output is equal after casting to float. + mask = x > 0.5 + assert_array_equal( + _covhelper( + np.ma.masked_array(x, mask), rowvar=True + )[1].astype(bool), + ~mask.reshape(1, -1), + ) + assert_array_equal( + _covhelper( + np.ma.masked_array(x, mask), y=x, rowvar=False + )[1].astype(bool), + np.vstack((~mask, ~mask)), + ) + + def test_1d_without_missing(self): + # Test cov on 1D variable w/o missing values + x = self.data + assert_almost_equal(np.cov(x), cov(x)) + assert_almost_equal(np.cov(x, rowvar=False), cov(x, rowvar=False)) + assert_almost_equal(np.cov(x, rowvar=False, bias=True), + cov(x, rowvar=False, bias=True)) + + def test_2d_without_missing(self): + # Test cov on 1 2D variable w/o missing values + x = self.data.reshape(3, 4) + assert_almost_equal(np.cov(x), cov(x)) + assert_almost_equal(np.cov(x, rowvar=False), cov(x, rowvar=False)) + assert_almost_equal(np.cov(x, rowvar=False, bias=True), + cov(x, rowvar=False, bias=True)) + + def test_1d_with_missing(self): + # Test cov 1 1D variable w/missing values + x = self.data + x[-1] = masked + x -= x.mean() + nx = x.compressed() + assert_almost_equal(np.cov(nx), cov(x)) + assert_almost_equal(np.cov(nx, rowvar=False), cov(x, rowvar=False)) + assert_almost_equal(np.cov(nx, rowvar=False, bias=True), + cov(x, rowvar=False, bias=True)) + # + try: + cov(x, allow_masked=False) + except ValueError: + pass + # + # 2 1D variables w/ missing values + nx = x[1:-1] + assert_almost_equal(np.cov(nx, nx[::-1]), cov(x, x[::-1])) + assert_almost_equal(np.cov(nx, nx[::-1], rowvar=False), + cov(x, x[::-1], rowvar=False)) + assert_almost_equal(np.cov(nx, nx[::-1], rowvar=False, bias=True), + cov(x, x[::-1], rowvar=False, bias=True)) + + def test_2d_with_missing(self): + # Test cov on 2D variable w/ missing value + x = self.data + x[-1] = masked + x = x.reshape(3, 4) + valid = np.logical_not(getmaskarray(x)).astype(int) + frac = np.dot(valid, valid.T) + xf = (x - x.mean(1)[:, None]).filled(0) + assert_almost_equal(cov(x), + np.cov(xf) * (x.shape[1] - 1) / (frac - 1.)) + assert_almost_equal(cov(x, bias=True), + np.cov(xf, bias=True) * x.shape[1] / frac) + frac = np.dot(valid.T, valid) + xf = (x - x.mean(0)).filled(0) + assert_almost_equal(cov(x, rowvar=False), + (np.cov(xf, rowvar=False) * + (x.shape[0] - 1) / (frac - 1.))) + assert_almost_equal(cov(x, rowvar=False, bias=True), + (np.cov(xf, rowvar=False, bias=True) * + x.shape[0] / frac)) + + +class TestCorrcoef: + + def setup_method(self): + self.data = array(np.random.rand(12)) + self.data2 = array(np.random.rand(12)) + + def test_ddof(self): + # ddof raises DeprecationWarning + x, y = self.data, self.data2 + expected = np.corrcoef(x) + expected2 = np.corrcoef(x, y) + with suppress_warnings() as sup: + warnings.simplefilter("always") + assert_warns(DeprecationWarning, corrcoef, x, ddof=-1) + sup.filter(DeprecationWarning, "bias and ddof have no effect") + # ddof has no or negligible effect on the function + assert_almost_equal(np.corrcoef(x, ddof=0), corrcoef(x, ddof=0)) + assert_almost_equal(corrcoef(x, ddof=-1), expected) + assert_almost_equal(corrcoef(x, y, ddof=-1), expected2) + assert_almost_equal(corrcoef(x, ddof=3), expected) + assert_almost_equal(corrcoef(x, y, ddof=3), expected2) + + def test_bias(self): + x, y = self.data, self.data2 + expected = np.corrcoef(x) + # bias raises DeprecationWarning + with suppress_warnings() as sup: + warnings.simplefilter("always") + assert_warns(DeprecationWarning, corrcoef, x, y, True, False) + assert_warns(DeprecationWarning, corrcoef, x, y, True, True) + assert_warns(DeprecationWarning, corrcoef, x, bias=False) + sup.filter(DeprecationWarning, "bias and ddof have no effect") + # bias has no or negligible effect on the function + assert_almost_equal(corrcoef(x, bias=1), expected) + + def test_1d_without_missing(self): + # Test cov on 1D variable w/o missing values + x = self.data + assert_almost_equal(np.corrcoef(x), corrcoef(x)) + assert_almost_equal(np.corrcoef(x, rowvar=False), + corrcoef(x, rowvar=False)) + with suppress_warnings() as sup: + sup.filter(DeprecationWarning, "bias and ddof have no effect") + assert_almost_equal(np.corrcoef(x, rowvar=False, bias=True), + corrcoef(x, rowvar=False, bias=True)) + + def test_2d_without_missing(self): + # Test corrcoef on 1 2D variable w/o missing values + x = self.data.reshape(3, 4) + assert_almost_equal(np.corrcoef(x), corrcoef(x)) + assert_almost_equal(np.corrcoef(x, rowvar=False), + corrcoef(x, rowvar=False)) + with suppress_warnings() as sup: + sup.filter(DeprecationWarning, "bias and ddof have no effect") + assert_almost_equal(np.corrcoef(x, rowvar=False, bias=True), + corrcoef(x, rowvar=False, bias=True)) + + def test_1d_with_missing(self): + # Test corrcoef 1 1D variable w/missing values + x = self.data + x[-1] = masked + x -= x.mean() + nx = x.compressed() + assert_almost_equal(np.corrcoef(nx), corrcoef(x)) + assert_almost_equal(np.corrcoef(nx, rowvar=False), + corrcoef(x, rowvar=False)) + with suppress_warnings() as sup: + sup.filter(DeprecationWarning, "bias and ddof have no effect") + assert_almost_equal(np.corrcoef(nx, rowvar=False, bias=True), + corrcoef(x, rowvar=False, bias=True)) + try: + corrcoef(x, allow_masked=False) + except ValueError: + pass + # 2 1D variables w/ missing values + nx = x[1:-1] + assert_almost_equal(np.corrcoef(nx, nx[::-1]), corrcoef(x, x[::-1])) + assert_almost_equal(np.corrcoef(nx, nx[::-1], rowvar=False), + corrcoef(x, x[::-1], rowvar=False)) + with suppress_warnings() as sup: + sup.filter(DeprecationWarning, "bias and ddof have no effect") + # ddof and bias have no or negligible effect on the function + assert_almost_equal(np.corrcoef(nx, nx[::-1]), + corrcoef(x, x[::-1], bias=1)) + assert_almost_equal(np.corrcoef(nx, nx[::-1]), + corrcoef(x, x[::-1], ddof=2)) + + def test_2d_with_missing(self): + # Test corrcoef on 2D variable w/ missing value + x = self.data + x[-1] = masked + x = x.reshape(3, 4) + + test = corrcoef(x) + control = np.corrcoef(x) + assert_almost_equal(test[:-1, :-1], control[:-1, :-1]) + with suppress_warnings() as sup: + sup.filter(DeprecationWarning, "bias and ddof have no effect") + # ddof and bias have no or negligible effect on the function + assert_almost_equal(corrcoef(x, ddof=-2)[:-1, :-1], + control[:-1, :-1]) + assert_almost_equal(corrcoef(x, ddof=3)[:-1, :-1], + control[:-1, :-1]) + assert_almost_equal(corrcoef(x, bias=1)[:-1, :-1], + control[:-1, :-1]) + + +class TestPolynomial: + # + def test_polyfit(self): + # Tests polyfit + # On ndarrays + x = np.random.rand(10) + y = np.random.rand(20).reshape(-1, 2) + assert_almost_equal(polyfit(x, y, 3), np.polyfit(x, y, 3)) + # ON 1D maskedarrays + x = x.view(MaskedArray) + x[0] = masked + y = y.view(MaskedArray) + y[0, 0] = y[-1, -1] = masked + # + (C, R, K, S, D) = polyfit(x, y[:, 0], 3, full=True) + (c, r, k, s, d) = np.polyfit(x[1:], y[1:, 0].compressed(), 3, + full=True) + for (a, a_) in zip((C, R, K, S, D), (c, r, k, s, d)): + assert_almost_equal(a, a_) + # + (C, R, K, S, D) = polyfit(x, y[:, -1], 3, full=True) + (c, r, k, s, d) = np.polyfit(x[1:-1], y[1:-1, -1], 3, full=True) + for (a, a_) in zip((C, R, K, S, D), (c, r, k, s, d)): + assert_almost_equal(a, a_) + # + (C, R, K, S, D) = polyfit(x, y, 3, full=True) + (c, r, k, s, d) = np.polyfit(x[1:-1], y[1:-1, :], 3, full=True) + for (a, a_) in zip((C, R, K, S, D), (c, r, k, s, d)): + assert_almost_equal(a, a_) + # + w = np.random.rand(10) + 1 + wo = w.copy() + xs = x[1:-1] + ys = y[1:-1] + ws = w[1:-1] + (C, R, K, S, D) = polyfit(x, y, 3, full=True, w=w) + (c, r, k, s, d) = np.polyfit(xs, ys, 3, full=True, w=ws) + assert_equal(w, wo) + for (a, a_) in zip((C, R, K, S, D), (c, r, k, s, d)): + assert_almost_equal(a, a_) + + def test_polyfit_with_masked_NaNs(self): + x = np.random.rand(10) + y = np.random.rand(20).reshape(-1, 2) + + x[0] = np.nan + y[-1, -1] = np.nan + x = x.view(MaskedArray) + y = y.view(MaskedArray) + x[0] = masked + y[-1, -1] = masked + + (C, R, K, S, D) = polyfit(x, y, 3, full=True) + (c, r, k, s, d) = np.polyfit(x[1:-1], y[1:-1, :], 3, full=True) + for (a, a_) in zip((C, R, K, S, D), (c, r, k, s, d)): + assert_almost_equal(a, a_) + + +class TestArraySetOps: + + def test_unique_onlist(self): + # Test unique on list + data = [1, 1, 1, 2, 2, 3] + test = unique(data, return_index=True, return_inverse=True) + assert_(isinstance(test[0], MaskedArray)) + assert_equal(test[0], masked_array([1, 2, 3], mask=[0, 0, 0])) + assert_equal(test[1], [0, 3, 5]) + assert_equal(test[2], [0, 0, 0, 1, 1, 2]) + + def test_unique_onmaskedarray(self): + # Test unique on masked data w/use_mask=True + data = masked_array([1, 1, 1, 2, 2, 3], mask=[0, 0, 1, 0, 1, 0]) + test = unique(data, return_index=True, return_inverse=True) + assert_equal(test[0], masked_array([1, 2, 3, -1], mask=[0, 0, 0, 1])) + assert_equal(test[1], [0, 3, 5, 2]) + assert_equal(test[2], [0, 0, 3, 1, 3, 2]) + # + data.fill_value = 3 + data = masked_array(data=[1, 1, 1, 2, 2, 3], + mask=[0, 0, 1, 0, 1, 0], fill_value=3) + test = unique(data, return_index=True, return_inverse=True) + assert_equal(test[0], masked_array([1, 2, 3, -1], mask=[0, 0, 0, 1])) + assert_equal(test[1], [0, 3, 5, 2]) + assert_equal(test[2], [0, 0, 3, 1, 3, 2]) + + def test_unique_allmasked(self): + # Test all masked + data = masked_array([1, 1, 1], mask=True) + test = unique(data, return_index=True, return_inverse=True) + assert_equal(test[0], masked_array([1, ], mask=[True])) + assert_equal(test[1], [0]) + assert_equal(test[2], [0, 0, 0]) + # + # Test masked + data = masked + test = unique(data, return_index=True, return_inverse=True) + assert_equal(test[0], masked_array(masked)) + assert_equal(test[1], [0]) + assert_equal(test[2], [0]) + + def test_ediff1d(self): + # Tests mediff1d + x = masked_array(np.arange(5), mask=[1, 0, 0, 0, 1]) + control = array([1, 1, 1, 4], mask=[1, 0, 0, 1]) + test = ediff1d(x) + assert_equal(test, control) + assert_equal(test.filled(0), control.filled(0)) + assert_equal(test.mask, control.mask) + + def test_ediff1d_tobegin(self): + # Test ediff1d w/ to_begin + x = masked_array(np.arange(5), mask=[1, 0, 0, 0, 1]) + test = ediff1d(x, to_begin=masked) + control = array([0, 1, 1, 1, 4], mask=[1, 1, 0, 0, 1]) + assert_equal(test, control) + assert_equal(test.filled(0), control.filled(0)) + assert_equal(test.mask, control.mask) + # + test = ediff1d(x, to_begin=[1, 2, 3]) + control = array([1, 2, 3, 1, 1, 1, 4], mask=[0, 0, 0, 1, 0, 0, 1]) + assert_equal(test, control) + assert_equal(test.filled(0), control.filled(0)) + assert_equal(test.mask, control.mask) + + def test_ediff1d_toend(self): + # Test ediff1d w/ to_end + x = masked_array(np.arange(5), mask=[1, 0, 0, 0, 1]) + test = ediff1d(x, to_end=masked) + control = array([1, 1, 1, 4, 0], mask=[1, 0, 0, 1, 1]) + assert_equal(test, control) + assert_equal(test.filled(0), control.filled(0)) + assert_equal(test.mask, control.mask) + # + test = ediff1d(x, to_end=[1, 2, 3]) + control = array([1, 1, 1, 4, 1, 2, 3], mask=[1, 0, 0, 1, 0, 0, 0]) + assert_equal(test, control) + assert_equal(test.filled(0), control.filled(0)) + assert_equal(test.mask, control.mask) + + def test_ediff1d_tobegin_toend(self): + # Test ediff1d w/ to_begin and to_end + x = masked_array(np.arange(5), mask=[1, 0, 0, 0, 1]) + test = ediff1d(x, to_end=masked, to_begin=masked) + control = array([0, 1, 1, 1, 4, 0], mask=[1, 1, 0, 0, 1, 1]) + assert_equal(test, control) + assert_equal(test.filled(0), control.filled(0)) + assert_equal(test.mask, control.mask) + # + test = ediff1d(x, to_end=[1, 2, 3], to_begin=masked) + control = array([0, 1, 1, 1, 4, 1, 2, 3], + mask=[1, 1, 0, 0, 1, 0, 0, 0]) + assert_equal(test, control) + assert_equal(test.filled(0), control.filled(0)) + assert_equal(test.mask, control.mask) + + def test_ediff1d_ndarray(self): + # Test ediff1d w/ a ndarray + x = np.arange(5) + test = ediff1d(x) + control = array([1, 1, 1, 1], mask=[0, 0, 0, 0]) + assert_equal(test, control) + assert_(isinstance(test, MaskedArray)) + assert_equal(test.filled(0), control.filled(0)) + assert_equal(test.mask, control.mask) + # + test = ediff1d(x, to_end=masked, to_begin=masked) + control = array([0, 1, 1, 1, 1, 0], mask=[1, 0, 0, 0, 0, 1]) + assert_(isinstance(test, MaskedArray)) + assert_equal(test.filled(0), control.filled(0)) + assert_equal(test.mask, control.mask) + + def test_intersect1d(self): + # Test intersect1d + x = array([1, 3, 3, 3], mask=[0, 0, 0, 1]) + y = array([3, 1, 1, 1], mask=[0, 0, 0, 1]) + test = intersect1d(x, y) + control = array([1, 3, -1], mask=[0, 0, 1]) + assert_equal(test, control) + + def test_setxor1d(self): + # Test setxor1d + a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1]) + b = array([1, 2, 3, 4, 5, -1], mask=[0, 0, 0, 0, 0, 1]) + test = setxor1d(a, b) + assert_equal(test, array([3, 4, 7])) + # + a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1]) + b = [1, 2, 3, 4, 5] + test = setxor1d(a, b) + assert_equal(test, array([3, 4, 7, -1], mask=[0, 0, 0, 1])) + # + a = array([1, 2, 3]) + b = array([6, 5, 4]) + test = setxor1d(a, b) + assert_(isinstance(test, MaskedArray)) + assert_equal(test, [1, 2, 3, 4, 5, 6]) + # + a = array([1, 8, 2, 3], mask=[0, 1, 0, 0]) + b = array([6, 5, 4, 8], mask=[0, 0, 0, 1]) + test = setxor1d(a, b) + assert_(isinstance(test, MaskedArray)) + assert_equal(test, [1, 2, 3, 4, 5, 6]) + # + assert_array_equal([], setxor1d([], [])) + + def test_setxor1d_unique(self): + # Test setxor1d with assume_unique=True + a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1]) + b = [1, 2, 3, 4, 5] + test = setxor1d(a, b, assume_unique=True) + assert_equal(test, array([3, 4, 7, -1], mask=[0, 0, 0, 1])) + # + a = array([1, 8, 2, 3], mask=[0, 1, 0, 0]) + b = array([6, 5, 4, 8], mask=[0, 0, 0, 1]) + test = setxor1d(a, b, assume_unique=True) + assert_(isinstance(test, MaskedArray)) + assert_equal(test, [1, 2, 3, 4, 5, 6]) + # + a = array([[1], [8], [2], [3]]) + b = array([[6, 5], [4, 8]]) + test = setxor1d(a, b, assume_unique=True) + assert_(isinstance(test, MaskedArray)) + assert_equal(test, [1, 2, 3, 4, 5, 6]) + + def test_isin(self): + # the tests for in1d cover most of isin's behavior + # if in1d is removed, would need to change those tests to test + # isin instead. + a = np.arange(24).reshape([2, 3, 4]) + mask = np.zeros([2, 3, 4]) + mask[1, 2, 0] = 1 + a = array(a, mask=mask) + b = array(data=[0, 10, 20, 30, 1, 3, 11, 22, 33], + mask=[0, 1, 0, 1, 0, 1, 0, 1, 0]) + ec = zeros((2, 3, 4), dtype=bool) + ec[0, 0, 0] = True + ec[0, 0, 1] = True + ec[0, 2, 3] = True + c = isin(a, b) + assert_(isinstance(c, MaskedArray)) + assert_array_equal(c, ec) + # compare results of np.isin to ma.isin + d = np.isin(a, b[~b.mask]) & ~a.mask + assert_array_equal(c, d) + + def test_in1d(self): + # Test in1d + a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1]) + b = array([1, 2, 3, 4, 5, -1], mask=[0, 0, 0, 0, 0, 1]) + test = in1d(a, b) + assert_equal(test, [True, True, True, False, True]) + # + a = array([5, 5, 2, 1, -1], mask=[0, 0, 0, 0, 1]) + b = array([1, 5, -1], mask=[0, 0, 1]) + test = in1d(a, b) + assert_equal(test, [True, True, False, True, True]) + # + assert_array_equal([], in1d([], [])) + + def test_in1d_invert(self): + # Test in1d's invert parameter + a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1]) + b = array([1, 2, 3, 4, 5, -1], mask=[0, 0, 0, 0, 0, 1]) + assert_equal(np.invert(in1d(a, b)), in1d(a, b, invert=True)) + + a = array([5, 5, 2, 1, -1], mask=[0, 0, 0, 0, 1]) + b = array([1, 5, -1], mask=[0, 0, 1]) + assert_equal(np.invert(in1d(a, b)), in1d(a, b, invert=True)) + + assert_array_equal([], in1d([], [], invert=True)) + + def test_union1d(self): + # Test union1d + a = array([1, 2, 5, 7, 5, -1], mask=[0, 0, 0, 0, 0, 1]) + b = array([1, 2, 3, 4, 5, -1], mask=[0, 0, 0, 0, 0, 1]) + test = union1d(a, b) + control = array([1, 2, 3, 4, 5, 7, -1], mask=[0, 0, 0, 0, 0, 0, 1]) + assert_equal(test, control) + + # Tests gh-10340, arguments to union1d should be + # flattened if they are not already 1D + x = array([[0, 1, 2], [3, 4, 5]], mask=[[0, 0, 0], [0, 0, 1]]) + y = array([0, 1, 2, 3, 4], mask=[0, 0, 0, 0, 1]) + ez = array([0, 1, 2, 3, 4, 5], mask=[0, 0, 0, 0, 0, 1]) + z = union1d(x, y) + assert_equal(z, ez) + # + assert_array_equal([], union1d([], [])) + + def test_setdiff1d(self): + # Test setdiff1d + a = array([6, 5, 4, 7, 7, 1, 2, 1], mask=[0, 0, 0, 0, 0, 0, 0, 1]) + b = array([2, 4, 3, 3, 2, 1, 5]) + test = setdiff1d(a, b) + assert_equal(test, array([6, 7, -1], mask=[0, 0, 1])) + # + a = arange(10) + b = arange(8) + assert_equal(setdiff1d(a, b), array([8, 9])) + a = array([], np.uint32, mask=[]) + assert_equal(setdiff1d(a, []).dtype, np.uint32) + + def test_setdiff1d_char_array(self): + # Test setdiff1d_charray + a = np.array(['a', 'b', 'c']) + b = np.array(['a', 'b', 's']) + assert_array_equal(setdiff1d(a, b), np.array(['c'])) + + +class TestShapeBase: + + def test_atleast_2d(self): + # Test atleast_2d + a = masked_array([0, 1, 2], mask=[0, 1, 0]) + b = atleast_2d(a) + assert_equal(b.shape, (1, 3)) + assert_equal(b.mask.shape, b.data.shape) + assert_equal(a.shape, (3,)) + assert_equal(a.mask.shape, a.data.shape) + assert_equal(b.mask.shape, b.data.shape) + + def test_shape_scalar(self): + # the atleast and diagflat function should work with scalars + # GitHub issue #3367 + # Additionally, the atleast functions should accept multiple scalars + # correctly + b = atleast_1d(1.0) + assert_equal(b.shape, (1,)) + assert_equal(b.mask.shape, b.shape) + assert_equal(b.data.shape, b.shape) + + b = atleast_1d(1.0, 2.0) + for a in b: + assert_equal(a.shape, (1,)) + assert_equal(a.mask.shape, a.shape) + assert_equal(a.data.shape, a.shape) + + b = atleast_2d(1.0) + assert_equal(b.shape, (1, 1)) + assert_equal(b.mask.shape, b.shape) + assert_equal(b.data.shape, b.shape) + + b = atleast_2d(1.0, 2.0) + for a in b: + assert_equal(a.shape, (1, 1)) + assert_equal(a.mask.shape, a.shape) + assert_equal(a.data.shape, a.shape) + + b = atleast_3d(1.0) + assert_equal(b.shape, (1, 1, 1)) + assert_equal(b.mask.shape, b.shape) + assert_equal(b.data.shape, b.shape) + + b = atleast_3d(1.0, 2.0) + for a in b: + assert_equal(a.shape, (1, 1, 1)) + assert_equal(a.mask.shape, a.shape) + assert_equal(a.data.shape, a.shape) + + b = diagflat(1.0) + assert_equal(b.shape, (1, 1)) + assert_equal(b.mask.shape, b.data.shape) + + +class TestNDEnumerate: + + def test_ndenumerate_nomasked(self): + ordinary = np.arange(6.).reshape((1, 3, 2)) + empty_mask = np.zeros_like(ordinary, dtype=bool) + with_mask = masked_array(ordinary, mask=empty_mask) + assert_equal(list(np.ndenumerate(ordinary)), + list(ndenumerate(ordinary))) + assert_equal(list(ndenumerate(ordinary)), + list(ndenumerate(with_mask))) + assert_equal(list(ndenumerate(with_mask)), + list(ndenumerate(with_mask, compressed=False))) + + def test_ndenumerate_allmasked(self): + a = masked_all(()) + b = masked_all((100,)) + c = masked_all((2, 3, 4)) + assert_equal(list(ndenumerate(a)), []) + assert_equal(list(ndenumerate(b)), []) + assert_equal(list(ndenumerate(b, compressed=False)), + list(zip(np.ndindex((100,)), 100 * [masked]))) + assert_equal(list(ndenumerate(c)), []) + assert_equal(list(ndenumerate(c, compressed=False)), + list(zip(np.ndindex((2, 3, 4)), 2 * 3 * 4 * [masked]))) + + def test_ndenumerate_mixedmasked(self): + a = masked_array(np.arange(12).reshape((3, 4)), + mask=[[1, 1, 1, 1], + [1, 1, 0, 1], + [0, 0, 0, 0]]) + items = [((1, 2), 6), + ((2, 0), 8), ((2, 1), 9), ((2, 2), 10), ((2, 3), 11)] + assert_equal(list(ndenumerate(a)), items) + assert_equal(len(list(ndenumerate(a, compressed=False))), a.size) + for coordinate, value in ndenumerate(a, compressed=False): + assert_equal(a[coordinate], value) + + +class TestStack: + + def test_stack_1d(self): + a = masked_array([0, 1, 2], mask=[0, 1, 0]) + b = masked_array([9, 8, 7], mask=[1, 0, 0]) + + c = stack([a, b], axis=0) + assert_equal(c.shape, (2, 3)) + assert_array_equal(a.mask, c[0].mask) + assert_array_equal(b.mask, c[1].mask) + + d = vstack([a, b]) + assert_array_equal(c.data, d.data) + assert_array_equal(c.mask, d.mask) + + c = stack([a, b], axis=1) + assert_equal(c.shape, (3, 2)) + assert_array_equal(a.mask, c[:, 0].mask) + assert_array_equal(b.mask, c[:, 1].mask) + + def test_stack_masks(self): + a = masked_array([0, 1, 2], mask=True) + b = masked_array([9, 8, 7], mask=False) + + c = stack([a, b], axis=0) + assert_equal(c.shape, (2, 3)) + assert_array_equal(a.mask, c[0].mask) + assert_array_equal(b.mask, c[1].mask) + + d = vstack([a, b]) + assert_array_equal(c.data, d.data) + assert_array_equal(c.mask, d.mask) + + c = stack([a, b], axis=1) + assert_equal(c.shape, (3, 2)) + assert_array_equal(a.mask, c[:, 0].mask) + assert_array_equal(b.mask, c[:, 1].mask) + + def test_stack_nd(self): + # 2D + shp = (3, 2) + d1 = np.random.randint(0, 10, shp) + d2 = np.random.randint(0, 10, shp) + m1 = np.random.randint(0, 2, shp).astype(bool) + m2 = np.random.randint(0, 2, shp).astype(bool) + a1 = masked_array(d1, mask=m1) + a2 = masked_array(d2, mask=m2) + + c = stack([a1, a2], axis=0) + c_shp = (2,) + shp + assert_equal(c.shape, c_shp) + assert_array_equal(a1.mask, c[0].mask) + assert_array_equal(a2.mask, c[1].mask) + + c = stack([a1, a2], axis=-1) + c_shp = shp + (2,) + assert_equal(c.shape, c_shp) + assert_array_equal(a1.mask, c[..., 0].mask) + assert_array_equal(a2.mask, c[..., 1].mask) + + # 4D + shp = (3, 2, 4, 5,) + d1 = np.random.randint(0, 10, shp) + d2 = np.random.randint(0, 10, shp) + m1 = np.random.randint(0, 2, shp).astype(bool) + m2 = np.random.randint(0, 2, shp).astype(bool) + a1 = masked_array(d1, mask=m1) + a2 = masked_array(d2, mask=m2) + + c = stack([a1, a2], axis=0) + c_shp = (2,) + shp + assert_equal(c.shape, c_shp) + assert_array_equal(a1.mask, c[0].mask) + assert_array_equal(a2.mask, c[1].mask) + + c = stack([a1, a2], axis=-1) + c_shp = shp + (2,) + assert_equal(c.shape, c_shp) + assert_array_equal(a1.mask, c[..., 0].mask) + assert_array_equal(a2.mask, c[..., 1].mask) diff --git a/.venv/lib/python3.12/site-packages/numpy/ma/tests/test_mrecords.py b/.venv/lib/python3.12/site-packages/numpy/ma/tests/test_mrecords.py new file mode 100644 index 0000000..0da9151 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/ma/tests/test_mrecords.py @@ -0,0 +1,497 @@ +"""Tests suite for mrecords. + +:author: Pierre Gerard-Marchant +:contact: pierregm_at_uga_dot_edu + +""" +import pickle + +import numpy as np +import numpy.ma as ma +from numpy._core.records import fromarrays as recfromarrays +from numpy._core.records import fromrecords as recfromrecords +from numpy._core.records import recarray +from numpy.ma import masked, nomask +from numpy.ma.mrecords import ( + MaskedRecords, + addfield, + fromarrays, + fromrecords, + fromtextfile, + mrecarray, +) +from numpy.ma.testutils import ( + assert_, + assert_equal, + assert_equal_records, +) +from numpy.testing import temppath + + +class TestMRecords: + + ilist = [1, 2, 3, 4, 5] + flist = [1.1, 2.2, 3.3, 4.4, 5.5] + slist = [b'one', b'two', b'three', b'four', b'five'] + ddtype = [('a', int), ('b', float), ('c', '|S8')] + mask = [0, 1, 0, 0, 1] + base = ma.array(list(zip(ilist, flist, slist)), mask=mask, dtype=ddtype) + + def test_byview(self): + # Test creation by view + base = self.base + mbase = base.view(mrecarray) + assert_equal(mbase.recordmask, base.recordmask) + assert_equal_records(mbase._mask, base._mask) + assert_(isinstance(mbase._data, recarray)) + assert_equal_records(mbase._data, base._data.view(recarray)) + for field in ('a', 'b', 'c'): + assert_equal(base[field], mbase[field]) + assert_equal_records(mbase.view(mrecarray), mbase) + + def test_get(self): + # Tests fields retrieval + base = self.base.copy() + mbase = base.view(mrecarray) + # As fields.......... + for field in ('a', 'b', 'c'): + assert_equal(getattr(mbase, field), mbase[field]) + assert_equal(base[field], mbase[field]) + # as elements ....... + mbase_first = mbase[0] + assert_(isinstance(mbase_first, mrecarray)) + assert_equal(mbase_first.dtype, mbase.dtype) + assert_equal(mbase_first.tolist(), (1, 1.1, b'one')) + # Used to be mask, now it's recordmask + assert_equal(mbase_first.recordmask, nomask) + assert_equal(mbase_first._mask.item(), (False, False, False)) + assert_equal(mbase_first['a'], mbase['a'][0]) + mbase_last = mbase[-1] + assert_(isinstance(mbase_last, mrecarray)) + assert_equal(mbase_last.dtype, mbase.dtype) + assert_equal(mbase_last.tolist(), (None, None, None)) + # Used to be mask, now it's recordmask + assert_equal(mbase_last.recordmask, True) + assert_equal(mbase_last._mask.item(), (True, True, True)) + assert_equal(mbase_last['a'], mbase['a'][-1]) + assert_(mbase_last['a'] is masked) + # as slice .......... + mbase_sl = mbase[:2] + assert_(isinstance(mbase_sl, mrecarray)) + assert_equal(mbase_sl.dtype, mbase.dtype) + # Used to be mask, now it's recordmask + assert_equal(mbase_sl.recordmask, [0, 1]) + assert_equal_records(mbase_sl.mask, + np.array([(False, False, False), + (True, True, True)], + dtype=mbase._mask.dtype)) + assert_equal_records(mbase_sl, base[:2].view(mrecarray)) + for field in ('a', 'b', 'c'): + assert_equal(getattr(mbase_sl, field), base[:2][field]) + + def test_set_fields(self): + # Tests setting fields. + base = self.base.copy() + mbase = base.view(mrecarray) + mbase = mbase.copy() + mbase.fill_value = (999999, 1e20, 'N/A') + # Change the data, the mask should be conserved + mbase.a._data[:] = 5 + assert_equal(mbase['a']._data, [5, 5, 5, 5, 5]) + assert_equal(mbase['a']._mask, [0, 1, 0, 0, 1]) + # Change the elements, and the mask will follow + mbase.a = 1 + assert_equal(mbase['a']._data, [1] * 5) + assert_equal(ma.getmaskarray(mbase['a']), [0] * 5) + # Use to be _mask, now it's recordmask + assert_equal(mbase.recordmask, [False] * 5) + assert_equal(mbase._mask.tolist(), + np.array([(0, 0, 0), + (0, 1, 1), + (0, 0, 0), + (0, 0, 0), + (0, 1, 1)], + dtype=bool)) + # Set a field to mask ........................ + mbase.c = masked + # Use to be mask, and now it's still mask ! + assert_equal(mbase.c.mask, [1] * 5) + assert_equal(mbase.c.recordmask, [1] * 5) + assert_equal(ma.getmaskarray(mbase['c']), [1] * 5) + assert_equal(ma.getdata(mbase['c']), [b'N/A'] * 5) + assert_equal(mbase._mask.tolist(), + np.array([(0, 0, 1), + (0, 1, 1), + (0, 0, 1), + (0, 0, 1), + (0, 1, 1)], + dtype=bool)) + # Set fields by slices ....................... + mbase = base.view(mrecarray).copy() + mbase.a[3:] = 5 + assert_equal(mbase.a, [1, 2, 3, 5, 5]) + assert_equal(mbase.a._mask, [0, 1, 0, 0, 0]) + mbase.b[3:] = masked + assert_equal(mbase.b, base['b']) + assert_equal(mbase.b._mask, [0, 1, 0, 1, 1]) + # Set fields globally.......................... + ndtype = [('alpha', '|S1'), ('num', int)] + data = ma.array([('a', 1), ('b', 2), ('c', 3)], dtype=ndtype) + rdata = data.view(MaskedRecords) + val = ma.array([10, 20, 30], mask=[1, 0, 0]) + + rdata['num'] = val + assert_equal(rdata.num, val) + assert_equal(rdata.num.mask, [1, 0, 0]) + + def test_set_fields_mask(self): + # Tests setting the mask of a field. + base = self.base.copy() + # This one has already a mask.... + mbase = base.view(mrecarray) + mbase['a'][-2] = masked + assert_equal(mbase.a, [1, 2, 3, 4, 5]) + assert_equal(mbase.a._mask, [0, 1, 0, 1, 1]) + # This one has not yet + mbase = fromarrays([np.arange(5), np.random.rand(5)], + dtype=[('a', int), ('b', float)]) + mbase['a'][-2] = masked + assert_equal(mbase.a, [0, 1, 2, 3, 4]) + assert_equal(mbase.a._mask, [0, 0, 0, 1, 0]) + + def test_set_mask(self): + base = self.base.copy() + mbase = base.view(mrecarray) + # Set the mask to True ....................... + mbase.mask = masked + assert_equal(ma.getmaskarray(mbase['b']), [1] * 5) + assert_equal(mbase['a']._mask, mbase['b']._mask) + assert_equal(mbase['a']._mask, mbase['c']._mask) + assert_equal(mbase._mask.tolist(), + np.array([(1, 1, 1)] * 5, dtype=bool)) + # Delete the mask ............................ + mbase.mask = nomask + assert_equal(ma.getmaskarray(mbase['c']), [0] * 5) + assert_equal(mbase._mask.tolist(), + np.array([(0, 0, 0)] * 5, dtype=bool)) + + def test_set_mask_fromarray(self): + base = self.base.copy() + mbase = base.view(mrecarray) + # Sets the mask w/ an array + mbase.mask = [1, 0, 0, 0, 1] + assert_equal(mbase.a.mask, [1, 0, 0, 0, 1]) + assert_equal(mbase.b.mask, [1, 0, 0, 0, 1]) + assert_equal(mbase.c.mask, [1, 0, 0, 0, 1]) + # Yay, once more ! + mbase.mask = [0, 0, 0, 0, 1] + assert_equal(mbase.a.mask, [0, 0, 0, 0, 1]) + assert_equal(mbase.b.mask, [0, 0, 0, 0, 1]) + assert_equal(mbase.c.mask, [0, 0, 0, 0, 1]) + + def test_set_mask_fromfields(self): + mbase = self.base.copy().view(mrecarray) + + nmask = np.array( + [(0, 1, 0), (0, 1, 0), (1, 0, 1), (1, 0, 1), (0, 0, 0)], + dtype=[('a', bool), ('b', bool), ('c', bool)]) + mbase.mask = nmask + assert_equal(mbase.a.mask, [0, 0, 1, 1, 0]) + assert_equal(mbase.b.mask, [1, 1, 0, 0, 0]) + assert_equal(mbase.c.mask, [0, 0, 1, 1, 0]) + # Reinitialize and redo + mbase.mask = False + mbase.fieldmask = nmask + assert_equal(mbase.a.mask, [0, 0, 1, 1, 0]) + assert_equal(mbase.b.mask, [1, 1, 0, 0, 0]) + assert_equal(mbase.c.mask, [0, 0, 1, 1, 0]) + + def test_set_elements(self): + base = self.base.copy() + # Set an element to mask ..................... + mbase = base.view(mrecarray).copy() + mbase[-2] = masked + assert_equal( + mbase._mask.tolist(), + np.array([(0, 0, 0), (1, 1, 1), (0, 0, 0), (1, 1, 1), (1, 1, 1)], + dtype=bool)) + # Used to be mask, now it's recordmask! + assert_equal(mbase.recordmask, [0, 1, 0, 1, 1]) + # Set slices ................................. + mbase = base.view(mrecarray).copy() + mbase[:2] = (5, 5, 5) + assert_equal(mbase.a._data, [5, 5, 3, 4, 5]) + assert_equal(mbase.a._mask, [0, 0, 0, 0, 1]) + assert_equal(mbase.b._data, [5., 5., 3.3, 4.4, 5.5]) + assert_equal(mbase.b._mask, [0, 0, 0, 0, 1]) + assert_equal(mbase.c._data, + [b'5', b'5', b'three', b'four', b'five']) + assert_equal(mbase.b._mask, [0, 0, 0, 0, 1]) + + mbase = base.view(mrecarray).copy() + mbase[:2] = masked + assert_equal(mbase.a._data, [1, 2, 3, 4, 5]) + assert_equal(mbase.a._mask, [1, 1, 0, 0, 1]) + assert_equal(mbase.b._data, [1.1, 2.2, 3.3, 4.4, 5.5]) + assert_equal(mbase.b._mask, [1, 1, 0, 0, 1]) + assert_equal(mbase.c._data, + [b'one', b'two', b'three', b'four', b'five']) + assert_equal(mbase.b._mask, [1, 1, 0, 0, 1]) + + def test_setslices_hardmask(self): + # Tests setting slices w/ hardmask. + base = self.base.copy() + mbase = base.view(mrecarray) + mbase.harden_mask() + try: + mbase[-2:] = (5, 5, 5) + assert_equal(mbase.a._data, [1, 2, 3, 5, 5]) + assert_equal(mbase.b._data, [1.1, 2.2, 3.3, 5, 5.5]) + assert_equal(mbase.c._data, + [b'one', b'two', b'three', b'5', b'five']) + assert_equal(mbase.a._mask, [0, 1, 0, 0, 1]) + assert_equal(mbase.b._mask, mbase.a._mask) + assert_equal(mbase.b._mask, mbase.c._mask) + except NotImplementedError: + # OK, not implemented yet... + pass + except AssertionError: + raise + else: + raise Exception("Flexible hard masks should be supported !") + # Not using a tuple should crash + try: + mbase[-2:] = 3 + except (NotImplementedError, TypeError): + pass + else: + raise TypeError("Should have expected a readable buffer object!") + + def test_hardmask(self): + # Test hardmask + base = self.base.copy() + mbase = base.view(mrecarray) + mbase.harden_mask() + assert_(mbase._hardmask) + mbase.mask = nomask + assert_equal_records(mbase._mask, base._mask) + mbase.soften_mask() + assert_(not mbase._hardmask) + mbase.mask = nomask + # So, the mask of a field is no longer set to nomask... + assert_equal_records(mbase._mask, + ma.make_mask_none(base.shape, base.dtype)) + assert_(ma.make_mask(mbase['b']._mask) is nomask) + assert_equal(mbase['a']._mask, mbase['b']._mask) + + def test_pickling(self): + # Test pickling + base = self.base.copy() + mrec = base.view(mrecarray) + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + _ = pickle.dumps(mrec, protocol=proto) + mrec_ = pickle.loads(_) + assert_equal(mrec_.dtype, mrec.dtype) + assert_equal_records(mrec_._data, mrec._data) + assert_equal(mrec_._mask, mrec._mask) + assert_equal_records(mrec_._mask, mrec._mask) + + def test_filled(self): + # Test filling the array + _a = ma.array([1, 2, 3], mask=[0, 0, 1], dtype=int) + _b = ma.array([1.1, 2.2, 3.3], mask=[0, 0, 1], dtype=float) + _c = ma.array(['one', 'two', 'three'], mask=[0, 0, 1], dtype='|S8') + ddtype = [('a', int), ('b', float), ('c', '|S8')] + mrec = fromarrays([_a, _b, _c], dtype=ddtype, + fill_value=(99999, 99999., 'N/A')) + mrecfilled = mrec.filled() + assert_equal(mrecfilled['a'], np.array((1, 2, 99999), dtype=int)) + assert_equal(mrecfilled['b'], np.array((1.1, 2.2, 99999.), + dtype=float)) + assert_equal(mrecfilled['c'], np.array(('one', 'two', 'N/A'), + dtype='|S8')) + + def test_tolist(self): + # Test tolist. + _a = ma.array([1, 2, 3], mask=[0, 0, 1], dtype=int) + _b = ma.array([1.1, 2.2, 3.3], mask=[0, 0, 1], dtype=float) + _c = ma.array(['one', 'two', 'three'], mask=[1, 0, 0], dtype='|S8') + ddtype = [('a', int), ('b', float), ('c', '|S8')] + mrec = fromarrays([_a, _b, _c], dtype=ddtype, + fill_value=(99999, 99999., 'N/A')) + + assert_equal(mrec.tolist(), + [(1, 1.1, None), (2, 2.2, b'two'), + (None, None, b'three')]) + + def test_withnames(self): + # Test the creation w/ format and names + x = mrecarray(1, formats=float, names='base') + x[0]['base'] = 10 + assert_equal(x['base'][0], 10) + + def test_exotic_formats(self): + # Test that 'exotic' formats are processed properly + easy = mrecarray(1, dtype=[('i', int), ('s', '|S8'), ('f', float)]) + easy[0] = masked + assert_equal(easy.filled(1).item(), (1, b'1', 1.)) + + solo = mrecarray(1, dtype=[('f0', ' 1: + assert_(eq(np.concatenate((x, y), 1), + concatenate((xm, ym), 1))) + assert_(eq(np.add.reduce(x, 1), add.reduce(x, 1))) + assert_(eq(np.sum(x, 1), sum(x, 1))) + assert_(eq(np.prod(x, 1), product(x, 1))) + + def test_testCI(self): + # Test of conversions and indexing + x1 = np.array([1, 2, 4, 3]) + x2 = array(x1, mask=[1, 0, 0, 0]) + x3 = array(x1, mask=[0, 1, 0, 1]) + x4 = array(x1) + # test conversion to strings + str(x2) # raises? + repr(x2) # raises? + assert_(eq(np.sort(x1), sort(x2, fill_value=0))) + # tests of indexing + assert_(type(x2[1]) is type(x1[1])) + assert_(x1[1] == x2[1]) + assert_(x2[0] is masked) + assert_(eq(x1[2], x2[2])) + assert_(eq(x1[2:5], x2[2:5])) + assert_(eq(x1[:], x2[:])) + assert_(eq(x1[1:], x3[1:])) + x1[2] = 9 + x2[2] = 9 + assert_(eq(x1, x2)) + x1[1:3] = 99 + x2[1:3] = 99 + assert_(eq(x1, x2)) + x2[1] = masked + assert_(eq(x1, x2)) + x2[1:3] = masked + assert_(eq(x1, x2)) + x2[:] = x1 + x2[1] = masked + assert_(allequal(getmask(x2), array([0, 1, 0, 0]))) + x3[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0]) + assert_(allequal(getmask(x3), array([0, 1, 1, 0]))) + x4[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0]) + assert_(allequal(getmask(x4), array([0, 1, 1, 0]))) + assert_(allequal(x4, array([1, 2, 3, 4]))) + x1 = np.arange(5) * 1.0 + x2 = masked_values(x1, 3.0) + assert_(eq(x1, x2)) + assert_(allequal(array([0, 0, 0, 1, 0], MaskType), x2.mask)) + assert_(eq(3.0, x2.fill_value)) + x1 = array([1, 'hello', 2, 3], object) + x2 = np.array([1, 'hello', 2, 3], object) + s1 = x1[1] + s2 = x2[1] + assert_equal(type(s2), str) + assert_equal(type(s1), str) + assert_equal(s1, s2) + assert_(x1[1:1].shape == (0,)) + + def test_testCopySize(self): + # Tests of some subtle points of copying and sizing. + n = [0, 0, 1, 0, 0] + m = make_mask(n) + m2 = make_mask(m) + assert_(m is m2) + m3 = make_mask(m, copy=True) + assert_(m is not m3) + + x1 = np.arange(5) + y1 = array(x1, mask=m) + assert_(y1._data is not x1) + assert_(allequal(x1, y1._data)) + assert_(y1._mask is m) + + y1a = array(y1, copy=0) + # For copy=False, one might expect that the array would just + # passed on, i.e., that it would be "is" instead of "==". + # See gh-4043 for discussion. + assert_(y1a._mask.__array_interface__ == + y1._mask.__array_interface__) + + y2 = array(x1, mask=m3, copy=0) + assert_(y2._mask is m3) + assert_(y2[2] is masked) + y2[2] = 9 + assert_(y2[2] is not masked) + assert_(y2._mask is m3) + assert_(allequal(y2.mask, 0)) + + y2a = array(x1, mask=m, copy=1) + assert_(y2a._mask is not m) + assert_(y2a[2] is masked) + y2a[2] = 9 + assert_(y2a[2] is not masked) + assert_(y2a._mask is not m) + assert_(allequal(y2a.mask, 0)) + + y3 = array(x1 * 1.0, mask=m) + assert_(filled(y3).dtype is (x1 * 1.0).dtype) + + x4 = arange(4) + x4[2] = masked + y4 = resize(x4, (8,)) + assert_(eq(concatenate([x4, x4]), y4)) + assert_(eq(getmask(y4), [0, 0, 1, 0, 0, 0, 1, 0])) + y5 = repeat(x4, (2, 2, 2, 2), axis=0) + assert_(eq(y5, [0, 0, 1, 1, 2, 2, 3, 3])) + y6 = repeat(x4, 2, axis=0) + assert_(eq(y5, y6)) + + def test_testPut(self): + # Test of put + d = arange(5) + n = [0, 0, 0, 1, 1] + m = make_mask(n) + m2 = m.copy() + x = array(d, mask=m) + assert_(x[3] is masked) + assert_(x[4] is masked) + x[[1, 4]] = [10, 40] + assert_(x._mask is m) + assert_(x[3] is masked) + assert_(x[4] is not masked) + assert_(eq(x, [0, 10, 2, -1, 40])) + + x = array(d, mask=m2, copy=True) + x.put([0, 1, 2], [-1, 100, 200]) + assert_(x._mask is not m2) + assert_(x[3] is masked) + assert_(x[4] is masked) + assert_(eq(x, [-1, 100, 200, 0, 0])) + + def test_testPut2(self): + # Test of put + d = arange(5) + x = array(d, mask=[0, 0, 0, 0, 0]) + z = array([10, 40], mask=[1, 0]) + assert_(x[2] is not masked) + assert_(x[3] is not masked) + x[2:4] = z + assert_(x[2] is masked) + assert_(x[3] is not masked) + assert_(eq(x, [0, 1, 10, 40, 4])) + + d = arange(5) + x = array(d, mask=[0, 0, 0, 0, 0]) + y = x[2:4] + z = array([10, 40], mask=[1, 0]) + assert_(x[2] is not masked) + assert_(x[3] is not masked) + y[:] = z + assert_(y[0] is masked) + assert_(y[1] is not masked) + assert_(eq(y, [10, 40])) + assert_(x[2] is masked) + assert_(x[3] is not masked) + assert_(eq(x, [0, 1, 10, 40, 4])) + + def test_testMaPut(self): + (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d + m = [1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1] + i = np.nonzero(m)[0] + put(ym, i, zm) + assert_(all(take(ym, i, axis=0) == zm)) + + def test_testOddFeatures(self): + # Test of other odd features + x = arange(20) + x = x.reshape(4, 5) + x.flat[5] = 12 + assert_(x[1, 0] == 12) + z = x + 10j * x + assert_(eq(z.real, x)) + assert_(eq(z.imag, 10 * x)) + assert_(eq((z * conjugate(z)).real, 101 * x * x)) + z.imag[...] = 0.0 + + x = arange(10) + x[3] = masked + assert_(str(x[3]) == str(masked)) + c = x >= 8 + assert_(count(where(c, masked, masked)) == 0) + assert_(shape(where(c, masked, masked)) == c.shape) + z = where(c, x, masked) + assert_(z.dtype is x.dtype) + assert_(z[3] is masked) + assert_(z[4] is masked) + assert_(z[7] is masked) + assert_(z[8] is not masked) + assert_(z[9] is not masked) + assert_(eq(x, z)) + z = where(c, masked, x) + assert_(z.dtype is x.dtype) + assert_(z[3] is masked) + assert_(z[4] is not masked) + assert_(z[7] is not masked) + assert_(z[8] is masked) + assert_(z[9] is masked) + z = masked_where(c, x) + assert_(z.dtype is x.dtype) + assert_(z[3] is masked) + assert_(z[4] is not masked) + assert_(z[7] is not masked) + assert_(z[8] is masked) + assert_(z[9] is masked) + assert_(eq(x, z)) + x = array([1., 2., 3., 4., 5.]) + c = array([1, 1, 1, 0, 0]) + x[2] = masked + z = where(c, x, -x) + assert_(eq(z, [1., 2., 0., -4., -5])) + c[0] = masked + z = where(c, x, -x) + assert_(eq(z, [1., 2., 0., -4., -5])) + assert_(z[0] is masked) + assert_(z[1] is not masked) + assert_(z[2] is masked) + assert_(eq(masked_where(greater(x, 2), x), masked_greater(x, 2))) + assert_(eq(masked_where(greater_equal(x, 2), x), + masked_greater_equal(x, 2))) + assert_(eq(masked_where(less(x, 2), x), masked_less(x, 2))) + assert_(eq(masked_where(less_equal(x, 2), x), masked_less_equal(x, 2))) + assert_(eq(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2))) + assert_(eq(masked_where(equal(x, 2), x), masked_equal(x, 2))) + assert_(eq(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2))) + assert_(eq(masked_inside(list(range(5)), 1, 3), [0, 199, 199, 199, 4])) + assert_(eq(masked_outside(list(range(5)), 1, 3), [199, 1, 2, 3, 199])) + assert_(eq(masked_inside(array(list(range(5)), + mask=[1, 0, 0, 0, 0]), 1, 3).mask, + [1, 1, 1, 1, 0])) + assert_(eq(masked_outside(array(list(range(5)), + mask=[0, 1, 0, 0, 0]), 1, 3).mask, + [1, 1, 0, 0, 1])) + assert_(eq(masked_equal(array(list(range(5)), + mask=[1, 0, 0, 0, 0]), 2).mask, + [1, 0, 1, 0, 0])) + assert_(eq(masked_not_equal(array([2, 2, 1, 2, 1], + mask=[1, 0, 0, 0, 0]), 2).mask, + [1, 0, 1, 0, 1])) + assert_(eq(masked_where([1, 1, 0, 0, 0], [1, 2, 3, 4, 5]), + [99, 99, 3, 4, 5])) + atest = ones((10, 10, 10), dtype=np.float32) + btest = zeros(atest.shape, MaskType) + ctest = masked_where(btest, atest) + assert_(eq(atest, ctest)) + z = choose(c, (-x, x)) + assert_(eq(z, [1., 2., 0., -4., -5])) + assert_(z[0] is masked) + assert_(z[1] is not masked) + assert_(z[2] is masked) + x = arange(6) + x[5] = masked + y = arange(6) * 10 + y[2] = masked + c = array([1, 1, 1, 0, 0, 0], mask=[1, 0, 0, 0, 0, 0]) + cm = c.filled(1) + z = where(c, x, y) + zm = where(cm, x, y) + assert_(eq(z, zm)) + assert_(getmask(zm) is nomask) + assert_(eq(zm, [0, 1, 2, 30, 40, 50])) + z = where(c, masked, 1) + assert_(eq(z, [99, 99, 99, 1, 1, 1])) + z = where(c, 1, masked) + assert_(eq(z, [99, 1, 1, 99, 99, 99])) + + def test_testMinMax2(self): + # Test of minimum, maximum. + assert_(eq(minimum([1, 2, 3], [4, 0, 9]), [1, 0, 3])) + assert_(eq(maximum([1, 2, 3], [4, 0, 9]), [4, 2, 9])) + x = arange(5) + y = arange(5) - 2 + x[3] = masked + y[0] = masked + assert_(eq(minimum(x, y), where(less(x, y), x, y))) + assert_(eq(maximum(x, y), where(greater(x, y), x, y))) + assert_(minimum.reduce(x) == 0) + assert_(maximum.reduce(x) == 4) + + def test_testTakeTransposeInnerOuter(self): + # Test of take, transpose, inner, outer products + x = arange(24) + y = np.arange(24) + x[5:6] = masked + x = x.reshape(2, 3, 4) + y = y.reshape(2, 3, 4) + assert_(eq(np.transpose(y, (2, 0, 1)), transpose(x, (2, 0, 1)))) + assert_(eq(np.take(y, (2, 0, 1), 1), take(x, (2, 0, 1), 1))) + assert_(eq(np.inner(filled(x, 0), filled(y, 0)), + inner(x, y))) + assert_(eq(np.outer(filled(x, 0), filled(y, 0)), + outer(x, y))) + y = array(['abc', 1, 'def', 2, 3], object) + y[2] = masked + t = take(y, [0, 3, 4]) + assert_(t[0] == 'abc') + assert_(t[1] == 2) + assert_(t[2] == 3) + + def test_testInplace(self): + # Test of inplace operations and rich comparisons + y = arange(10) + + x = arange(10) + xm = arange(10) + xm[2] = masked + x += 1 + assert_(eq(x, y + 1)) + xm += 1 + assert_(eq(x, y + 1)) + + x = arange(10) + xm = arange(10) + xm[2] = masked + x -= 1 + assert_(eq(x, y - 1)) + xm -= 1 + assert_(eq(xm, y - 1)) + + x = arange(10) * 1.0 + xm = arange(10) * 1.0 + xm[2] = masked + x *= 2.0 + assert_(eq(x, y * 2)) + xm *= 2.0 + assert_(eq(xm, y * 2)) + + x = arange(10) * 2 + xm = arange(10) + xm[2] = masked + x //= 2 + assert_(eq(x, y)) + xm //= 2 + assert_(eq(x, y)) + + x = arange(10) * 1.0 + xm = arange(10) * 1.0 + xm[2] = masked + x /= 2.0 + assert_(eq(x, y / 2.0)) + xm /= arange(10) + assert_(eq(xm, ones((10,)))) + + x = arange(10).astype(np.float32) + xm = arange(10) + xm[2] = masked + x += 1. + assert_(eq(x, y + 1.)) + + def test_testPickle(self): + # Test of pickling + x = arange(12) + x[4:10:2] = masked + x = x.reshape(4, 3) + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + s = pickle.dumps(x, protocol=proto) + y = pickle.loads(s) + assert_(eq(x, y)) + + def test_testMasked(self): + # Test of masked element + xx = arange(6) + xx[1] = masked + assert_(str(masked) == '--') + assert_(xx[1] is masked) + assert_equal(filled(xx[1], 0), 0) + + def test_testAverage1(self): + # Test of average. + ott = array([0., 1., 2., 3.], mask=[1, 0, 0, 0]) + assert_(eq(2.0, average(ott, axis=0))) + assert_(eq(2.0, average(ott, weights=[1., 1., 2., 1.]))) + result, wts = average(ott, weights=[1., 1., 2., 1.], returned=True) + assert_(eq(2.0, result)) + assert_(wts == 4.0) + ott[:] = masked + assert_(average(ott, axis=0) is masked) + ott = array([0., 1., 2., 3.], mask=[1, 0, 0, 0]) + ott = ott.reshape(2, 2) + ott[:, 1] = masked + assert_(eq(average(ott, axis=0), [2.0, 0.0])) + assert_(average(ott, axis=1)[0] is masked) + assert_(eq([2., 0.], average(ott, axis=0))) + result, wts = average(ott, axis=0, returned=True) + assert_(eq(wts, [1., 0.])) + + def test_testAverage2(self): + # More tests of average. + w1 = [0, 1, 1, 1, 1, 0] + w2 = [[0, 1, 1, 1, 1, 0], [1, 0, 0, 0, 0, 1]] + x = arange(6) + assert_(allclose(average(x, axis=0), 2.5)) + assert_(allclose(average(x, axis=0, weights=w1), 2.5)) + y = array([arange(6), 2.0 * arange(6)]) + assert_(allclose(average(y, None), + np.add.reduce(np.arange(6)) * 3. / 12.)) + assert_(allclose(average(y, axis=0), np.arange(6) * 3. / 2.)) + assert_(allclose(average(y, axis=1), + [average(x, axis=0), average(x, axis=0) * 2.0])) + assert_(allclose(average(y, None, weights=w2), 20. / 6.)) + assert_(allclose(average(y, axis=0, weights=w2), + [0., 1., 2., 3., 4., 10.])) + assert_(allclose(average(y, axis=1), + [average(x, axis=0), average(x, axis=0) * 2.0])) + m1 = zeros(6) + m2 = [0, 0, 1, 1, 0, 0] + m3 = [[0, 0, 1, 1, 0, 0], [0, 1, 1, 1, 1, 0]] + m4 = ones(6) + m5 = [0, 1, 1, 1, 1, 1] + assert_(allclose(average(masked_array(x, m1), axis=0), 2.5)) + assert_(allclose(average(masked_array(x, m2), axis=0), 2.5)) + assert_(average(masked_array(x, m4), axis=0) is masked) + assert_equal(average(masked_array(x, m5), axis=0), 0.0) + assert_equal(count(average(masked_array(x, m4), axis=0)), 0) + z = masked_array(y, m3) + assert_(allclose(average(z, None), 20. / 6.)) + assert_(allclose(average(z, axis=0), + [0., 1., 99., 99., 4.0, 7.5])) + assert_(allclose(average(z, axis=1), [2.5, 5.0])) + assert_(allclose(average(z, axis=0, weights=w2), + [0., 1., 99., 99., 4.0, 10.0])) + + a = arange(6) + b = arange(6) * 3 + r1, w1 = average([[a, b], [b, a]], axis=1, returned=True) + assert_equal(shape(r1), shape(w1)) + assert_equal(r1.shape, w1.shape) + r2, w2 = average(ones((2, 2, 3)), axis=0, weights=[3, 1], returned=True) + assert_equal(shape(w2), shape(r2)) + r2, w2 = average(ones((2, 2, 3)), returned=True) + assert_equal(shape(w2), shape(r2)) + r2, w2 = average(ones((2, 2, 3)), weights=ones((2, 2, 3)), returned=True) + assert_(shape(w2) == shape(r2)) + a2d = array([[1, 2], [0, 4]], float) + a2dm = masked_array(a2d, [[0, 0], [1, 0]]) + a2da = average(a2d, axis=0) + assert_(eq(a2da, [0.5, 3.0])) + a2dma = average(a2dm, axis=0) + assert_(eq(a2dma, [1.0, 3.0])) + a2dma = average(a2dm, axis=None) + assert_(eq(a2dma, 7. / 3.)) + a2dma = average(a2dm, axis=1) + assert_(eq(a2dma, [1.5, 4.0])) + + def test_testToPython(self): + assert_equal(1, int(array(1))) + assert_equal(1.0, float(array(1))) + assert_equal(1, int(array([[[1]]]))) + assert_equal(1.0, float(array([[1]]))) + assert_raises(TypeError, float, array([1, 1])) + assert_raises(ValueError, bool, array([0, 1])) + assert_raises(ValueError, bool, array([0, 0], mask=[0, 1])) + + def test_testScalarArithmetic(self): + xm = array(0, mask=1) + # TODO FIXME: Find out what the following raises a warning in r8247 + with np.errstate(divide='ignore'): + assert_((1 / array(0)).mask) + assert_((1 + xm).mask) + assert_((-xm).mask) + assert_((-xm).mask) + assert_(maximum(xm, xm).mask) + assert_(minimum(xm, xm).mask) + assert_(xm.filled().dtype is xm._data.dtype) + x = array(0, mask=0) + assert_(x.filled() == x._data) + assert_equal(str(xm), str(masked_print_option)) + + def test_testArrayMethods(self): + a = array([1, 3, 2]) + assert_(eq(a.any(), a._data.any())) + assert_(eq(a.all(), a._data.all())) + assert_(eq(a.argmax(), a._data.argmax())) + assert_(eq(a.argmin(), a._data.argmin())) + assert_(eq(a.choose(0, 1, 2, 3, 4), + a._data.choose(0, 1, 2, 3, 4))) + assert_(eq(a.compress([1, 0, 1]), a._data.compress([1, 0, 1]))) + assert_(eq(a.conj(), a._data.conj())) + assert_(eq(a.conjugate(), a._data.conjugate())) + m = array([[1, 2], [3, 4]]) + assert_(eq(m.diagonal(), m._data.diagonal())) + assert_(eq(a.sum(), a._data.sum())) + assert_(eq(a.take([1, 2]), a._data.take([1, 2]))) + assert_(eq(m.transpose(), m._data.transpose())) + + def test_testArrayAttributes(self): + a = array([1, 3, 2]) + assert_equal(a.ndim, 1) + + def test_testAPI(self): + assert_(not [m for m in dir(np.ndarray) + if m not in dir(MaskedArray) and + not m.startswith('_')]) + + def test_testSingleElementSubscript(self): + a = array([1, 3, 2]) + b = array([1, 3, 2], mask=[1, 0, 1]) + assert_equal(a[0].shape, ()) + assert_equal(b[0].shape, ()) + assert_equal(b[1].shape, ()) + + def test_assignment_by_condition(self): + # Test for gh-18951 + a = array([1, 2, 3, 4], mask=[1, 0, 1, 0]) + c = a >= 3 + a[c] = 5 + assert_(a[2] is masked) + + def test_assignment_by_condition_2(self): + # gh-19721 + a = masked_array([0, 1], mask=[False, False]) + b = masked_array([0, 1], mask=[True, True]) + mask = a < 1 + b[mask] = a[mask] + expected_mask = [False, True] + assert_equal(b.mask, expected_mask) + + +class TestUfuncs: + def setup_method(self): + self.d = (array([1.0, 0, -1, pi / 2] * 2, mask=[0, 1] + [0] * 6), + array([1.0, 0, -1, pi / 2] * 2, mask=[1, 0] + [0] * 6),) + + def test_testUfuncRegression(self): + f_invalid_ignore = [ + 'sqrt', 'arctanh', 'arcsin', 'arccos', + 'arccosh', 'arctanh', 'log', 'log10', 'divide', + 'true_divide', 'floor_divide', 'remainder', 'fmod'] + for f in ['sqrt', 'log', 'log10', 'exp', 'conjugate', + 'sin', 'cos', 'tan', + 'arcsin', 'arccos', 'arctan', + 'sinh', 'cosh', 'tanh', + 'arcsinh', + 'arccosh', + 'arctanh', + 'absolute', 'fabs', 'negative', + 'floor', 'ceil', + 'logical_not', + 'add', 'subtract', 'multiply', + 'divide', 'true_divide', 'floor_divide', + 'remainder', 'fmod', 'hypot', 'arctan2', + 'equal', 'not_equal', 'less_equal', 'greater_equal', + 'less', 'greater', + 'logical_and', 'logical_or', 'logical_xor']: + try: + uf = getattr(umath, f) + except AttributeError: + uf = getattr(fromnumeric, f) + mf = getattr(np.ma, f) + args = self.d[:uf.nin] + with np.errstate(): + if f in f_invalid_ignore: + np.seterr(invalid='ignore') + if f in ['arctanh', 'log', 'log10']: + np.seterr(divide='ignore') + ur = uf(*args) + mr = mf(*args) + assert_(eq(ur.filled(0), mr.filled(0), f)) + assert_(eqmask(ur.mask, mr.mask)) + + def test_reduce(self): + a = self.d[0] + assert_(not alltrue(a, axis=0)) + assert_(sometrue(a, axis=0)) + assert_equal(sum(a[:3], axis=0), 0) + assert_equal(product(a, axis=0), 0) + + def test_minmax(self): + a = arange(1, 13).reshape(3, 4) + amask = masked_where(a < 5, a) + assert_equal(amask.max(), a.max()) + assert_equal(amask.min(), 5) + assert_((amask.max(0) == a.max(0)).all()) + assert_((amask.min(0) == [5, 6, 7, 8]).all()) + assert_(amask.max(1)[0].mask) + assert_(amask.min(1)[0].mask) + + def test_nonzero(self): + for t in "?bhilqpBHILQPfdgFDGO": + x = array([1, 0, 2, 0], mask=[0, 0, 1, 1]) + assert_(eq(nonzero(x), [0])) + + +class TestArrayMethods: + + def setup_method(self): + x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928, + 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, + 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, + 6.04, 9.63, 7.712, 3.382, 4.489, 6.479, + 7.189, 9.645, 5.395, 4.961, 9.894, 2.893, + 7.357, 9.828, 6.272, 3.758, 6.693, 0.993]) + X = x.reshape(6, 6) + XX = x.reshape(3, 2, 2, 3) + + m = np.array([0, 1, 0, 1, 0, 0, + 1, 0, 1, 1, 0, 1, + 0, 0, 0, 1, 0, 1, + 0, 0, 0, 1, 1, 1, + 1, 0, 0, 1, 0, 0, + 0, 0, 1, 0, 1, 0]) + mx = array(data=x, mask=m) + mX = array(data=X, mask=m.reshape(X.shape)) + mXX = array(data=XX, mask=m.reshape(XX.shape)) + + self.d = (x, X, XX, m, mx, mX, mXX) + + def test_trace(self): + (x, X, XX, m, mx, mX, mXX,) = self.d + mXdiag = mX.diagonal() + assert_equal(mX.trace(), mX.diagonal().compressed().sum()) + assert_(eq(mX.trace(), + X.trace() - sum(mXdiag.mask * X.diagonal(), + axis=0))) + + def test_clip(self): + (x, X, XX, m, mx, mX, mXX,) = self.d + clipped = mx.clip(2, 8) + assert_(eq(clipped.mask, mx.mask)) + assert_(eq(clipped._data, x.clip(2, 8))) + assert_(eq(clipped._data, mx._data.clip(2, 8))) + + def test_ptp(self): + (x, X, XX, m, mx, mX, mXX,) = self.d + (n, m) = X.shape + # print(type(mx), mx.compressed()) + # raise Exception() + assert_equal(mx.ptp(), np.ptp(mx.compressed())) + rows = np.zeros(n, np.float64) + cols = np.zeros(m, np.float64) + for k in range(m): + cols[k] = np.ptp(mX[:, k].compressed()) + for k in range(n): + rows[k] = np.ptp(mX[k].compressed()) + assert_(eq(mX.ptp(0), cols)) + assert_(eq(mX.ptp(1), rows)) + + def test_swapaxes(self): + (x, X, XX, m, mx, mX, mXX,) = self.d + mXswapped = mX.swapaxes(0, 1) + assert_(eq(mXswapped[-1], mX[:, -1])) + mXXswapped = mXX.swapaxes(0, 2) + assert_equal(mXXswapped.shape, (2, 2, 3, 3)) + + def test_cumprod(self): + (x, X, XX, m, mx, mX, mXX,) = self.d + mXcp = mX.cumprod(0) + assert_(eq(mXcp._data, mX.filled(1).cumprod(0))) + mXcp = mX.cumprod(1) + assert_(eq(mXcp._data, mX.filled(1).cumprod(1))) + + def test_cumsum(self): + (x, X, XX, m, mx, mX, mXX,) = self.d + mXcp = mX.cumsum(0) + assert_(eq(mXcp._data, mX.filled(0).cumsum(0))) + mXcp = mX.cumsum(1) + assert_(eq(mXcp._data, mX.filled(0).cumsum(1))) + + def test_varstd(self): + (x, X, XX, m, mx, mX, mXX,) = self.d + assert_(eq(mX.var(axis=None), mX.compressed().var())) + assert_(eq(mX.std(axis=None), mX.compressed().std())) + assert_(eq(mXX.var(axis=3).shape, XX.var(axis=3).shape)) + assert_(eq(mX.var().shape, X.var().shape)) + (mXvar0, mXvar1) = (mX.var(axis=0), mX.var(axis=1)) + for k in range(6): + assert_(eq(mXvar1[k], mX[k].compressed().var())) + assert_(eq(mXvar0[k], mX[:, k].compressed().var())) + assert_(eq(np.sqrt(mXvar0[k]), + mX[:, k].compressed().std())) + + +def eqmask(m1, m2): + if m1 is nomask: + return m2 is nomask + if m2 is nomask: + return m1 is nomask + return (m1 == m2).all() diff --git a/.venv/lib/python3.12/site-packages/numpy/ma/tests/test_regression.py b/.venv/lib/python3.12/site-packages/numpy/ma/tests/test_regression.py new file mode 100644 index 0000000..025387b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/ma/tests/test_regression.py @@ -0,0 +1,100 @@ +import numpy as np +from numpy.testing import ( + assert_, + assert_allclose, + assert_array_equal, + suppress_warnings, +) + + +class TestRegression: + def test_masked_array_create(self): + # Ticket #17 + x = np.ma.masked_array([0, 1, 2, 3, 0, 4, 5, 6], + mask=[0, 0, 0, 1, 1, 1, 0, 0]) + assert_array_equal(np.ma.nonzero(x), [[1, 2, 6, 7]]) + + def test_masked_array(self): + # Ticket #61 + np.ma.array(1, mask=[1]) + + def test_mem_masked_where(self): + # Ticket #62 + from numpy.ma import MaskType, masked_where + a = np.zeros((1, 1)) + b = np.zeros(a.shape, MaskType) + c = masked_where(b, a) + a - c + + def test_masked_array_multiply(self): + # Ticket #254 + a = np.ma.zeros((4, 1)) + a[2, 0] = np.ma.masked + b = np.zeros((4, 2)) + a * b + b * a + + def test_masked_array_repeat(self): + # Ticket #271 + np.ma.array([1], mask=False).repeat(10) + + def test_masked_array_repr_unicode(self): + # Ticket #1256 + repr(np.ma.array("Unicode")) + + def test_atleast_2d(self): + # Ticket #1559 + a = np.ma.masked_array([0.0, 1.2, 3.5], mask=[False, True, False]) + b = np.atleast_2d(a) + assert_(a.mask.ndim == 1) + assert_(b.mask.ndim == 2) + + def test_set_fill_value_unicode_py3(self): + # Ticket #2733 + a = np.ma.masked_array(['a', 'b', 'c'], mask=[1, 0, 0]) + a.fill_value = 'X' + assert_(a.fill_value == 'X') + + def test_var_sets_maskedarray_scalar(self): + # Issue gh-2757 + a = np.ma.array(np.arange(5), mask=True) + mout = np.ma.array(-1, dtype=float) + a.var(out=mout) + assert_(mout._data == 0) + + def test_ddof_corrcoef(self): + # See gh-3336 + x = np.ma.masked_equal([1, 2, 3, 4, 5], 4) + y = np.array([2, 2.5, 3.1, 3, 5]) + # this test can be removed after deprecation. + with suppress_warnings() as sup: + sup.filter(DeprecationWarning, "bias and ddof have no effect") + r0 = np.ma.corrcoef(x, y, ddof=0) + r1 = np.ma.corrcoef(x, y, ddof=1) + # ddof should not have an effect (it gets cancelled out) + assert_allclose(r0.data, r1.data) + + def test_mask_not_backmangled(self): + # See gh-10314. Test case taken from gh-3140. + a = np.ma.MaskedArray([1., 2.], mask=[False, False]) + assert_(a.mask.shape == (2,)) + b = np.tile(a, (2, 1)) + # Check that the above no longer changes a.shape to (1, 2) + assert_(a.mask.shape == (2,)) + assert_(b.shape == (2, 2)) + assert_(b.mask.shape == (2, 2)) + + def test_empty_list_on_structured(self): + # See gh-12464. Indexing with empty list should give empty result. + ma = np.ma.MaskedArray([(1, 1.), (2, 2.), (3, 3.)], dtype='i4,f4') + assert_array_equal(ma[[]], ma[:0]) + + def test_masked_array_tobytes_fortran(self): + ma = np.ma.arange(4).reshape((2, 2)) + assert_array_equal(ma.tobytes(order='F'), ma.T.tobytes()) + + def test_structured_array(self): + # see gh-22041 + np.ma.array((1, (b"", b"")), + dtype=[("x", np.int_), + ("y", [("i", np.void), ("j", np.void)])]) diff --git a/.venv/lib/python3.12/site-packages/numpy/ma/tests/test_subclassing.py b/.venv/lib/python3.12/site-packages/numpy/ma/tests/test_subclassing.py new file mode 100644 index 0000000..3364e56 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/ma/tests/test_subclassing.py @@ -0,0 +1,469 @@ +"""Tests suite for MaskedArray & subclassing. + +:author: Pierre Gerard-Marchant +:contact: pierregm_at_uga_dot_edu + +""" +import numpy as np +from numpy.lib.mixins import NDArrayOperatorsMixin +from numpy.ma.core import ( + MaskedArray, + add, + arange, + array, + asanyarray, + asarray, + divide, + hypot, + log, + masked, + masked_array, + nomask, +) +from numpy.ma.testutils import assert_equal +from numpy.testing import assert_, assert_raises + +# from numpy.ma.core import ( + +def assert_startswith(a, b): + # produces a better error message than assert_(a.startswith(b)) + assert_equal(a[:len(b)], b) + +class SubArray(np.ndarray): + # Defines a generic np.ndarray subclass, that stores some metadata + # in the dictionary `info`. + def __new__(cls, arr, info={}): + x = np.asanyarray(arr).view(cls) + x.info = info.copy() + return x + + def __array_finalize__(self, obj): + super().__array_finalize__(obj) + self.info = getattr(obj, 'info', {}).copy() + + def __add__(self, other): + result = super().__add__(other) + result.info['added'] = result.info.get('added', 0) + 1 + return result + + def __iadd__(self, other): + result = super().__iadd__(other) + result.info['iadded'] = result.info.get('iadded', 0) + 1 + return result + + +subarray = SubArray + + +class SubMaskedArray(MaskedArray): + """Pure subclass of MaskedArray, keeping some info on subclass.""" + def __new__(cls, info=None, **kwargs): + obj = super().__new__(cls, **kwargs) + obj._optinfo['info'] = info + return obj + + +class MSubArray(SubArray, MaskedArray): + + def __new__(cls, data, info={}, mask=nomask): + subarr = SubArray(data, info) + _data = MaskedArray.__new__(cls, data=subarr, mask=mask) + _data.info = subarr.info + return _data + + @property + def _series(self): + _view = self.view(MaskedArray) + _view._sharedmask = False + return _view + + +msubarray = MSubArray + + +# Also a subclass that overrides __str__, __repr__ and __setitem__, disallowing +# setting to non-class values (and thus np.ma.core.masked_print_option) +# and overrides __array_wrap__, updating the info dict, to check that this +# doesn't get destroyed by MaskedArray._update_from. But this one also needs +# its own iterator... +class CSAIterator: + """ + Flat iterator object that uses its own setter/getter + (works around ndarray.flat not propagating subclass setters/getters + see https://github.com/numpy/numpy/issues/4564) + roughly following MaskedIterator + """ + def __init__(self, a): + self._original = a + self._dataiter = a.view(np.ndarray).flat + + def __iter__(self): + return self + + def __getitem__(self, indx): + out = self._dataiter.__getitem__(indx) + if not isinstance(out, np.ndarray): + out = out.__array__() + out = out.view(type(self._original)) + return out + + def __setitem__(self, index, value): + self._dataiter[index] = self._original._validate_input(value) + + def __next__(self): + return next(self._dataiter).__array__().view(type(self._original)) + + +class ComplicatedSubArray(SubArray): + + def __str__(self): + return f'myprefix {self.view(SubArray)} mypostfix' + + def __repr__(self): + # Return a repr that does not start with 'name(' + return f'<{self.__class__.__name__} {self}>' + + def _validate_input(self, value): + if not isinstance(value, ComplicatedSubArray): + raise ValueError("Can only set to MySubArray values") + return value + + def __setitem__(self, item, value): + # validation ensures direct assignment with ndarray or + # masked_print_option will fail + super().__setitem__(item, self._validate_input(value)) + + def __getitem__(self, item): + # ensure getter returns our own class also for scalars + value = super().__getitem__(item) + if not isinstance(value, np.ndarray): # scalar + value = value.__array__().view(ComplicatedSubArray) + return value + + @property + def flat(self): + return CSAIterator(self) + + @flat.setter + def flat(self, value): + y = self.ravel() + y[:] = value + + def __array_wrap__(self, obj, context=None, return_scalar=False): + obj = super().__array_wrap__(obj, context, return_scalar) + if context is not None and context[0] is np.multiply: + obj.info['multiplied'] = obj.info.get('multiplied', 0) + 1 + + return obj + + +class WrappedArray(NDArrayOperatorsMixin): + """ + Wrapping a MaskedArray rather than subclassing to test that + ufunc deferrals are commutative. + See: https://github.com/numpy/numpy/issues/15200) + """ + __slots__ = ('_array', 'attrs') + __array_priority__ = 20 + + def __init__(self, array, **attrs): + self._array = array + self.attrs = attrs + + def __repr__(self): + return f"{self.__class__.__name__}(\n{self._array}\n{self.attrs}\n)" + + def __array__(self, dtype=None, copy=None): + return np.asarray(self._array) + + def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): + if method == '__call__': + inputs = [arg._array if isinstance(arg, self.__class__) else arg + for arg in inputs] + return self.__class__(ufunc(*inputs, **kwargs), **self.attrs) + else: + return NotImplemented + + +class TestSubclassing: + # Test suite for masked subclasses of ndarray. + + def setup_method(self): + x = np.arange(5, dtype='float') + mx = msubarray(x, mask=[0, 1, 0, 0, 0]) + self.data = (x, mx) + + def test_data_subclassing(self): + # Tests whether the subclass is kept. + x = np.arange(5) + m = [0, 0, 1, 0, 0] + xsub = SubArray(x) + xmsub = masked_array(xsub, mask=m) + assert_(isinstance(xmsub, MaskedArray)) + assert_equal(xmsub._data, xsub) + assert_(isinstance(xmsub._data, SubArray)) + + def test_maskedarray_subclassing(self): + # Tests subclassing MaskedArray + (x, mx) = self.data + assert_(isinstance(mx._data, subarray)) + + def test_masked_unary_operations(self): + # Tests masked_unary_operation + (x, mx) = self.data + with np.errstate(divide='ignore'): + assert_(isinstance(log(mx), msubarray)) + assert_equal(log(x), np.log(x)) + + def test_masked_binary_operations(self): + # Tests masked_binary_operation + (x, mx) = self.data + # Result should be a msubarray + assert_(isinstance(add(mx, mx), msubarray)) + assert_(isinstance(add(mx, x), msubarray)) + # Result should work + assert_equal(add(mx, x), mx + x) + assert_(isinstance(add(mx, mx)._data, subarray)) + assert_(isinstance(add.outer(mx, mx), msubarray)) + assert_(isinstance(hypot(mx, mx), msubarray)) + assert_(isinstance(hypot(mx, x), msubarray)) + + def test_masked_binary_operations2(self): + # Tests domained_masked_binary_operation + (x, mx) = self.data + xmx = masked_array(mx.data.__array__(), mask=mx.mask) + assert_(isinstance(divide(mx, mx), msubarray)) + assert_(isinstance(divide(mx, x), msubarray)) + assert_equal(divide(mx, mx), divide(xmx, xmx)) + + def test_attributepropagation(self): + x = array(arange(5), mask=[0] + [1] * 4) + my = masked_array(subarray(x)) + ym = msubarray(x) + # + z = (my + 1) + assert_(isinstance(z, MaskedArray)) + assert_(not isinstance(z, MSubArray)) + assert_(isinstance(z._data, SubArray)) + assert_equal(z._data.info, {}) + # + z = (ym + 1) + assert_(isinstance(z, MaskedArray)) + assert_(isinstance(z, MSubArray)) + assert_(isinstance(z._data, SubArray)) + assert_(z._data.info['added'] > 0) + # Test that inplace methods from data get used (gh-4617) + ym += 1 + assert_(isinstance(ym, MaskedArray)) + assert_(isinstance(ym, MSubArray)) + assert_(isinstance(ym._data, SubArray)) + assert_(ym._data.info['iadded'] > 0) + # + ym._set_mask([1, 0, 0, 0, 1]) + assert_equal(ym._mask, [1, 0, 0, 0, 1]) + ym._series._set_mask([0, 0, 0, 0, 1]) + assert_equal(ym._mask, [0, 0, 0, 0, 1]) + # + xsub = subarray(x, info={'name': 'x'}) + mxsub = masked_array(xsub) + assert_(hasattr(mxsub, 'info')) + assert_equal(mxsub.info, xsub.info) + + def test_subclasspreservation(self): + # Checks that masked_array(...,subok=True) preserves the class. + x = np.arange(5) + m = [0, 0, 1, 0, 0] + xinfo = list(zip(x, m)) + xsub = MSubArray(x, mask=m, info={'xsub': xinfo}) + # + mxsub = masked_array(xsub, subok=False) + assert_(not isinstance(mxsub, MSubArray)) + assert_(isinstance(mxsub, MaskedArray)) + assert_equal(mxsub._mask, m) + # + mxsub = asarray(xsub) + assert_(not isinstance(mxsub, MSubArray)) + assert_(isinstance(mxsub, MaskedArray)) + assert_equal(mxsub._mask, m) + # + mxsub = masked_array(xsub, subok=True) + assert_(isinstance(mxsub, MSubArray)) + assert_equal(mxsub.info, xsub.info) + assert_equal(mxsub._mask, xsub._mask) + # + mxsub = asanyarray(xsub) + assert_(isinstance(mxsub, MSubArray)) + assert_equal(mxsub.info, xsub.info) + assert_equal(mxsub._mask, m) + + def test_subclass_items(self): + """test that getter and setter go via baseclass""" + x = np.arange(5) + xcsub = ComplicatedSubArray(x) + mxcsub = masked_array(xcsub, mask=[True, False, True, False, False]) + # getter should return a ComplicatedSubArray, even for single item + # first check we wrote ComplicatedSubArray correctly + assert_(isinstance(xcsub[1], ComplicatedSubArray)) + assert_(isinstance(xcsub[1, ...], ComplicatedSubArray)) + assert_(isinstance(xcsub[1:4], ComplicatedSubArray)) + + # now that it propagates inside the MaskedArray + assert_(isinstance(mxcsub[1], ComplicatedSubArray)) + assert_(isinstance(mxcsub[1, ...].data, ComplicatedSubArray)) + assert_(mxcsub[0] is masked) + assert_(isinstance(mxcsub[0, ...].data, ComplicatedSubArray)) + assert_(isinstance(mxcsub[1:4].data, ComplicatedSubArray)) + + # also for flattened version (which goes via MaskedIterator) + assert_(isinstance(mxcsub.flat[1].data, ComplicatedSubArray)) + assert_(mxcsub.flat[0] is masked) + assert_(isinstance(mxcsub.flat[1:4].base, ComplicatedSubArray)) + + # setter should only work with ComplicatedSubArray input + # first check we wrote ComplicatedSubArray correctly + assert_raises(ValueError, xcsub.__setitem__, 1, x[4]) + # now that it propagates inside the MaskedArray + assert_raises(ValueError, mxcsub.__setitem__, 1, x[4]) + assert_raises(ValueError, mxcsub.__setitem__, slice(1, 4), x[1:4]) + mxcsub[1] = xcsub[4] + mxcsub[1:4] = xcsub[1:4] + # also for flattened version (which goes via MaskedIterator) + assert_raises(ValueError, mxcsub.flat.__setitem__, 1, x[4]) + assert_raises(ValueError, mxcsub.flat.__setitem__, slice(1, 4), x[1:4]) + mxcsub.flat[1] = xcsub[4] + mxcsub.flat[1:4] = xcsub[1:4] + + def test_subclass_nomask_items(self): + x = np.arange(5) + xcsub = ComplicatedSubArray(x) + mxcsub_nomask = masked_array(xcsub) + + assert_(isinstance(mxcsub_nomask[1, ...].data, ComplicatedSubArray)) + assert_(isinstance(mxcsub_nomask[0, ...].data, ComplicatedSubArray)) + + assert_(isinstance(mxcsub_nomask[1], ComplicatedSubArray)) + assert_(isinstance(mxcsub_nomask[0], ComplicatedSubArray)) + + def test_subclass_repr(self): + """test that repr uses the name of the subclass + and 'array' for np.ndarray""" + x = np.arange(5) + mx = masked_array(x, mask=[True, False, True, False, False]) + assert_startswith(repr(mx), 'masked_array') + xsub = SubArray(x) + mxsub = masked_array(xsub, mask=[True, False, True, False, False]) + assert_startswith(repr(mxsub), + f'masked_{SubArray.__name__}(data=[--, 1, --, 3, 4]') + + def test_subclass_str(self): + """test str with subclass that has overridden str, setitem""" + # first without override + x = np.arange(5) + xsub = SubArray(x) + mxsub = masked_array(xsub, mask=[True, False, True, False, False]) + assert_equal(str(mxsub), '[-- 1 -- 3 4]') + + xcsub = ComplicatedSubArray(x) + assert_raises(ValueError, xcsub.__setitem__, 0, + np.ma.core.masked_print_option) + mxcsub = masked_array(xcsub, mask=[True, False, True, False, False]) + assert_equal(str(mxcsub), 'myprefix [-- 1 -- 3 4] mypostfix') + + def test_pure_subclass_info_preservation(self): + # Test that ufuncs and methods conserve extra information consistently; + # see gh-7122. + arr1 = SubMaskedArray('test', data=[1, 2, 3, 4, 5, 6]) + arr2 = SubMaskedArray(data=[0, 1, 2, 3, 4, 5]) + diff1 = np.subtract(arr1, arr2) + assert_('info' in diff1._optinfo) + assert_(diff1._optinfo['info'] == 'test') + diff2 = arr1 - arr2 + assert_('info' in diff2._optinfo) + assert_(diff2._optinfo['info'] == 'test') + + +class ArrayNoInheritance: + """Quantity-like class that does not inherit from ndarray""" + def __init__(self, data, units): + self.magnitude = data + self.units = units + + def __getattr__(self, attr): + return getattr(self.magnitude, attr) + + +def test_array_no_inheritance(): + data_masked = np.ma.array([1, 2, 3], mask=[True, False, True]) + data_masked_units = ArrayNoInheritance(data_masked, 'meters') + + # Get the masked representation of the Quantity-like class + new_array = np.ma.array(data_masked_units) + assert_equal(data_masked.data, new_array.data) + assert_equal(data_masked.mask, new_array.mask) + # Test sharing the mask + data_masked.mask = [True, False, False] + assert_equal(data_masked.mask, new_array.mask) + assert_(new_array.sharedmask) + + # Get the masked representation of the Quantity-like class + new_array = np.ma.array(data_masked_units, copy=True) + assert_equal(data_masked.data, new_array.data) + assert_equal(data_masked.mask, new_array.mask) + # Test that the mask is not shared when copy=True + data_masked.mask = [True, False, True] + assert_equal([True, False, False], new_array.mask) + assert_(not new_array.sharedmask) + + # Get the masked representation of the Quantity-like class + new_array = np.ma.array(data_masked_units, keep_mask=False) + assert_equal(data_masked.data, new_array.data) + # The change did not affect the original mask + assert_equal(data_masked.mask, [True, False, True]) + # Test that the mask is False and not shared when keep_mask=False + assert_(not new_array.mask) + assert_(not new_array.sharedmask) + + +class TestClassWrapping: + # Test suite for classes that wrap MaskedArrays + + def setup_method(self): + m = np.ma.masked_array([1, 3, 5], mask=[False, True, False]) + wm = WrappedArray(m) + self.data = (m, wm) + + def test_masked_unary_operations(self): + # Tests masked_unary_operation + (m, wm) = self.data + with np.errstate(divide='ignore'): + assert_(isinstance(np.log(wm), WrappedArray)) + + def test_masked_binary_operations(self): + # Tests masked_binary_operation + (m, wm) = self.data + # Result should be a WrappedArray + assert_(isinstance(np.add(wm, wm), WrappedArray)) + assert_(isinstance(np.add(m, wm), WrappedArray)) + assert_(isinstance(np.add(wm, m), WrappedArray)) + # add and '+' should call the same ufunc + assert_equal(np.add(m, wm), m + wm) + assert_(isinstance(np.hypot(m, wm), WrappedArray)) + assert_(isinstance(np.hypot(wm, m), WrappedArray)) + # Test domained binary operations + assert_(isinstance(np.divide(wm, m), WrappedArray)) + assert_(isinstance(np.divide(m, wm), WrappedArray)) + assert_equal(np.divide(wm, m) * m, np.divide(m, m) * wm) + # Test broadcasting + m2 = np.stack([m, m]) + assert_(isinstance(np.divide(wm, m2), WrappedArray)) + assert_(isinstance(np.divide(m2, wm), WrappedArray)) + assert_equal(np.divide(m2, wm), np.divide(wm, m2)) + + def test_mixins_have_slots(self): + mixin = NDArrayOperatorsMixin() + # Should raise an error + assert_raises(AttributeError, mixin.__setattr__, "not_a_real_attr", 1) + + m = np.ma.masked_array([1, 3, 5], mask=[False, True, False]) + wm = WrappedArray(m) + assert_raises(AttributeError, wm.__setattr__, "not_an_attr", 2) diff --git a/.venv/lib/python3.12/site-packages/numpy/ma/testutils.py b/.venv/lib/python3.12/site-packages/numpy/ma/testutils.py new file mode 100644 index 0000000..bffcc34 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/ma/testutils.py @@ -0,0 +1,294 @@ +"""Miscellaneous functions for testing masked arrays and subclasses + +:author: Pierre Gerard-Marchant +:contact: pierregm_at_uga_dot_edu + +""" +import operator + +import numpy as np +import numpy._core.umath as umath +import numpy.testing +from numpy import ndarray +from numpy.testing import ( # noqa: F401 + assert_, + assert_allclose, + assert_array_almost_equal_nulp, + assert_raises, + build_err_msg, +) + +from .core import filled, getmask, mask_or, masked, masked_array, nomask + +__all__masked = [ + 'almost', 'approx', 'assert_almost_equal', 'assert_array_almost_equal', + 'assert_array_approx_equal', 'assert_array_compare', + 'assert_array_equal', 'assert_array_less', 'assert_close', + 'assert_equal', 'assert_equal_records', 'assert_mask_equal', + 'assert_not_equal', 'fail_if_array_equal', + ] + +# Include some normal test functions to avoid breaking other projects who +# have mistakenly included them from this file. SciPy is one. That is +# unfortunate, as some of these functions are not intended to work with +# masked arrays. But there was no way to tell before. +from unittest import TestCase # noqa: F401 + +__some__from_testing = [ + 'TestCase', 'assert_', 'assert_allclose', 'assert_array_almost_equal_nulp', + 'assert_raises' + ] + +__all__ = __all__masked + __some__from_testing # noqa: PLE0605 + + +def approx(a, b, fill_value=True, rtol=1e-5, atol=1e-8): + """ + Returns true if all components of a and b are equal to given tolerances. + + If fill_value is True, masked values considered equal. Otherwise, + masked values are considered unequal. The relative error rtol should + be positive and << 1.0 The absolute error atol comes into play for + those elements of b that are very small or zero; it says how small a + must be also. + + """ + m = mask_or(getmask(a), getmask(b)) + d1 = filled(a) + d2 = filled(b) + if d1.dtype.char == "O" or d2.dtype.char == "O": + return np.equal(d1, d2).ravel() + x = filled( + masked_array(d1, copy=False, mask=m), fill_value + ).astype(np.float64) + y = filled(masked_array(d2, copy=False, mask=m), 1).astype(np.float64) + d = np.less_equal(umath.absolute(x - y), atol + rtol * umath.absolute(y)) + return d.ravel() + + +def almost(a, b, decimal=6, fill_value=True): + """ + Returns True if a and b are equal up to decimal places. + + If fill_value is True, masked values considered equal. Otherwise, + masked values are considered unequal. + + """ + m = mask_or(getmask(a), getmask(b)) + d1 = filled(a) + d2 = filled(b) + if d1.dtype.char == "O" or d2.dtype.char == "O": + return np.equal(d1, d2).ravel() + x = filled( + masked_array(d1, copy=False, mask=m), fill_value + ).astype(np.float64) + y = filled(masked_array(d2, copy=False, mask=m), 1).astype(np.float64) + d = np.around(np.abs(x - y), decimal) <= 10.0 ** (-decimal) + return d.ravel() + + +def _assert_equal_on_sequences(actual, desired, err_msg=''): + """ + Asserts the equality of two non-array sequences. + + """ + assert_equal(len(actual), len(desired), err_msg) + for k in range(len(desired)): + assert_equal(actual[k], desired[k], f'item={k!r}\n{err_msg}') + + +def assert_equal_records(a, b): + """ + Asserts that two records are equal. + + Pretty crude for now. + + """ + assert_equal(a.dtype, b.dtype) + for f in a.dtype.names: + (af, bf) = (operator.getitem(a, f), operator.getitem(b, f)) + if not (af is masked) and not (bf is masked): + assert_equal(operator.getitem(a, f), operator.getitem(b, f)) + + +def assert_equal(actual, desired, err_msg=''): + """ + Asserts that two items are equal. + + """ + # Case #1: dictionary ..... + if isinstance(desired, dict): + if not isinstance(actual, dict): + raise AssertionError(repr(type(actual))) + assert_equal(len(actual), len(desired), err_msg) + for k, i in desired.items(): + if k not in actual: + raise AssertionError(f"{k} not in {actual}") + assert_equal(actual[k], desired[k], f'key={k!r}\n{err_msg}') + return + # Case #2: lists ..... + if isinstance(desired, (list, tuple)) and isinstance(actual, (list, tuple)): + return _assert_equal_on_sequences(actual, desired, err_msg='') + if not (isinstance(actual, ndarray) or isinstance(desired, ndarray)): + msg = build_err_msg([actual, desired], err_msg,) + if not desired == actual: + raise AssertionError(msg) + return + # Case #4. arrays or equivalent + if ((actual is masked) and not (desired is masked)) or \ + ((desired is masked) and not (actual is masked)): + msg = build_err_msg([actual, desired], + err_msg, header='', names=('x', 'y')) + raise ValueError(msg) + actual = np.asanyarray(actual) + desired = np.asanyarray(desired) + (actual_dtype, desired_dtype) = (actual.dtype, desired.dtype) + if actual_dtype.char == "S" and desired_dtype.char == "S": + return _assert_equal_on_sequences(actual.tolist(), + desired.tolist(), + err_msg='') + return assert_array_equal(actual, desired, err_msg) + + +def fail_if_equal(actual, desired, err_msg='',): + """ + Raises an assertion error if two items are equal. + + """ + if isinstance(desired, dict): + if not isinstance(actual, dict): + raise AssertionError(repr(type(actual))) + fail_if_equal(len(actual), len(desired), err_msg) + for k, i in desired.items(): + if k not in actual: + raise AssertionError(repr(k)) + fail_if_equal(actual[k], desired[k], f'key={k!r}\n{err_msg}') + return + if isinstance(desired, (list, tuple)) and isinstance(actual, (list, tuple)): + fail_if_equal(len(actual), len(desired), err_msg) + for k in range(len(desired)): + fail_if_equal(actual[k], desired[k], f'item={k!r}\n{err_msg}') + return + if isinstance(actual, np.ndarray) or isinstance(desired, np.ndarray): + return fail_if_array_equal(actual, desired, err_msg) + msg = build_err_msg([actual, desired], err_msg) + if not desired != actual: + raise AssertionError(msg) + + +assert_not_equal = fail_if_equal + + +def assert_almost_equal(actual, desired, decimal=7, err_msg='', verbose=True): + """ + Asserts that two items are almost equal. + + The test is equivalent to abs(desired-actual) < 0.5 * 10**(-decimal). + + """ + if isinstance(actual, np.ndarray) or isinstance(desired, np.ndarray): + return assert_array_almost_equal(actual, desired, decimal=decimal, + err_msg=err_msg, verbose=verbose) + msg = build_err_msg([actual, desired], + err_msg=err_msg, verbose=verbose) + if not round(abs(desired - actual), decimal) == 0: + raise AssertionError(msg) + + +assert_close = assert_almost_equal + + +def assert_array_compare(comparison, x, y, err_msg='', verbose=True, header='', + fill_value=True): + """ + Asserts that comparison between two masked arrays is satisfied. + + The comparison is elementwise. + + """ + # Allocate a common mask and refill + m = mask_or(getmask(x), getmask(y)) + x = masked_array(x, copy=False, mask=m, keep_mask=False, subok=False) + y = masked_array(y, copy=False, mask=m, keep_mask=False, subok=False) + if ((x is masked) and not (y is masked)) or \ + ((y is masked) and not (x is masked)): + msg = build_err_msg([x, y], err_msg=err_msg, verbose=verbose, + header=header, names=('x', 'y')) + raise ValueError(msg) + # OK, now run the basic tests on filled versions + return np.testing.assert_array_compare(comparison, + x.filled(fill_value), + y.filled(fill_value), + err_msg=err_msg, + verbose=verbose, header=header) + + +def assert_array_equal(x, y, err_msg='', verbose=True): + """ + Checks the elementwise equality of two masked arrays. + + """ + assert_array_compare(operator.__eq__, x, y, + err_msg=err_msg, verbose=verbose, + header='Arrays are not equal') + + +def fail_if_array_equal(x, y, err_msg='', verbose=True): + """ + Raises an assertion error if two masked arrays are not equal elementwise. + + """ + def compare(x, y): + return (not np.all(approx(x, y))) + assert_array_compare(compare, x, y, err_msg=err_msg, verbose=verbose, + header='Arrays are not equal') + + +def assert_array_approx_equal(x, y, decimal=6, err_msg='', verbose=True): + """ + Checks the equality of two masked arrays, up to given number odecimals. + + The equality is checked elementwise. + + """ + def compare(x, y): + "Returns the result of the loose comparison between x and y)." + return approx(x, y, rtol=10. ** -decimal) + assert_array_compare(compare, x, y, err_msg=err_msg, verbose=verbose, + header='Arrays are not almost equal') + + +def assert_array_almost_equal(x, y, decimal=6, err_msg='', verbose=True): + """ + Checks the equality of two masked arrays, up to given number odecimals. + + The equality is checked elementwise. + + """ + def compare(x, y): + "Returns the result of the loose comparison between x and y)." + return almost(x, y, decimal) + assert_array_compare(compare, x, y, err_msg=err_msg, verbose=verbose, + header='Arrays are not almost equal') + + +def assert_array_less(x, y, err_msg='', verbose=True): + """ + Checks that x is smaller than y elementwise. + + """ + assert_array_compare(operator.__lt__, x, y, + err_msg=err_msg, verbose=verbose, + header='Arrays are not less-ordered') + + +def assert_mask_equal(m1, m2, err_msg=''): + """ + Asserts the equality of two masks. + + """ + if m1 is nomask: + assert_(m2 is nomask) + if m2 is nomask: + assert_(m1 is nomask) + assert_array_equal(m1, m2, err_msg=err_msg) diff --git a/.venv/lib/python3.12/site-packages/numpy/matlib.py b/.venv/lib/python3.12/site-packages/numpy/matlib.py new file mode 100644 index 0000000..f27d503 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/matlib.py @@ -0,0 +1,380 @@ +import warnings + +# 2018-05-29, PendingDeprecationWarning added to matrix.__new__ +# 2020-01-23, numpy 1.19.0 PendingDeprecatonWarning +warnings.warn("Importing from numpy.matlib is deprecated since 1.19.0. " + "The matrix subclass is not the recommended way to represent " + "matrices or deal with linear algebra (see " + "https://docs.scipy.org/doc/numpy/user/numpy-for-matlab-users.html). " + "Please adjust your code to use regular ndarray. ", + PendingDeprecationWarning, stacklevel=2) + +import numpy as np + +# Matlib.py contains all functions in the numpy namespace with a few +# replacements. See doc/source/reference/routines.matlib.rst for details. +# Need * as we're copying the numpy namespace. +from numpy import * # noqa: F403 +from numpy.matrixlib.defmatrix import asmatrix, matrix + +__version__ = np.__version__ + +__all__ = ['rand', 'randn', 'repmat'] +__all__ += np.__all__ + +def empty(shape, dtype=None, order='C'): + """Return a new matrix of given shape and type, without initializing entries. + + Parameters + ---------- + shape : int or tuple of int + Shape of the empty matrix. + dtype : data-type, optional + Desired output data-type. + order : {'C', 'F'}, optional + Whether to store multi-dimensional data in row-major + (C-style) or column-major (Fortran-style) order in + memory. + + See Also + -------- + numpy.empty : Equivalent array function. + matlib.zeros : Return a matrix of zeros. + matlib.ones : Return a matrix of ones. + + Notes + ----- + Unlike other matrix creation functions (e.g. `matlib.zeros`, + `matlib.ones`), `matlib.empty` does not initialize the values of the + matrix, and may therefore be marginally faster. However, the values + stored in the newly allocated matrix are arbitrary. For reproducible + behavior, be sure to set each element of the matrix before reading. + + Examples + -------- + >>> import numpy.matlib + >>> np.matlib.empty((2, 2)) # filled with random data + matrix([[ 6.76425276e-320, 9.79033856e-307], # random + [ 7.39337286e-309, 3.22135945e-309]]) + >>> np.matlib.empty((2, 2), dtype=int) + matrix([[ 6600475, 0], # random + [ 6586976, 22740995]]) + + """ + return ndarray.__new__(matrix, shape, dtype, order=order) + +def ones(shape, dtype=None, order='C'): + """ + Matrix of ones. + + Return a matrix of given shape and type, filled with ones. + + Parameters + ---------- + shape : {sequence of ints, int} + Shape of the matrix + dtype : data-type, optional + The desired data-type for the matrix, default is np.float64. + order : {'C', 'F'}, optional + Whether to store matrix in C- or Fortran-contiguous order, + default is 'C'. + + Returns + ------- + out : matrix + Matrix of ones of given shape, dtype, and order. + + See Also + -------- + ones : Array of ones. + matlib.zeros : Zero matrix. + + Notes + ----- + If `shape` has length one i.e. ``(N,)``, or is a scalar ``N``, + `out` becomes a single row matrix of shape ``(1,N)``. + + Examples + -------- + >>> np.matlib.ones((2,3)) + matrix([[1., 1., 1.], + [1., 1., 1.]]) + + >>> np.matlib.ones(2) + matrix([[1., 1.]]) + + """ + a = ndarray.__new__(matrix, shape, dtype, order=order) + a.fill(1) + return a + +def zeros(shape, dtype=None, order='C'): + """ + Return a matrix of given shape and type, filled with zeros. + + Parameters + ---------- + shape : int or sequence of ints + Shape of the matrix + dtype : data-type, optional + The desired data-type for the matrix, default is float. + order : {'C', 'F'}, optional + Whether to store the result in C- or Fortran-contiguous order, + default is 'C'. + + Returns + ------- + out : matrix + Zero matrix of given shape, dtype, and order. + + See Also + -------- + numpy.zeros : Equivalent array function. + matlib.ones : Return a matrix of ones. + + Notes + ----- + If `shape` has length one i.e. ``(N,)``, or is a scalar ``N``, + `out` becomes a single row matrix of shape ``(1,N)``. + + Examples + -------- + >>> import numpy.matlib + >>> np.matlib.zeros((2, 3)) + matrix([[0., 0., 0.], + [0., 0., 0.]]) + + >>> np.matlib.zeros(2) + matrix([[0., 0.]]) + + """ + a = ndarray.__new__(matrix, shape, dtype, order=order) + a.fill(0) + return a + +def identity(n, dtype=None): + """ + Returns the square identity matrix of given size. + + Parameters + ---------- + n : int + Size of the returned identity matrix. + dtype : data-type, optional + Data-type of the output. Defaults to ``float``. + + Returns + ------- + out : matrix + `n` x `n` matrix with its main diagonal set to one, + and all other elements zero. + + See Also + -------- + numpy.identity : Equivalent array function. + matlib.eye : More general matrix identity function. + + Examples + -------- + >>> import numpy.matlib + >>> np.matlib.identity(3, dtype=int) + matrix([[1, 0, 0], + [0, 1, 0], + [0, 0, 1]]) + + """ + a = array([1] + n * [0], dtype=dtype) + b = empty((n, n), dtype=dtype) + b.flat = a + return b + +def eye(n, M=None, k=0, dtype=float, order='C'): + """ + Return a matrix with ones on the diagonal and zeros elsewhere. + + Parameters + ---------- + n : int + Number of rows in the output. + M : int, optional + Number of columns in the output, defaults to `n`. + k : int, optional + Index of the diagonal: 0 refers to the main diagonal, + a positive value refers to an upper diagonal, + and a negative value to a lower diagonal. + dtype : dtype, optional + Data-type of the returned matrix. + order : {'C', 'F'}, optional + Whether the output should be stored in row-major (C-style) or + column-major (Fortran-style) order in memory. + + Returns + ------- + I : matrix + A `n` x `M` matrix where all elements are equal to zero, + except for the `k`-th diagonal, whose values are equal to one. + + See Also + -------- + numpy.eye : Equivalent array function. + identity : Square identity matrix. + + Examples + -------- + >>> import numpy.matlib + >>> np.matlib.eye(3, k=1, dtype=float) + matrix([[0., 1., 0.], + [0., 0., 1.], + [0., 0., 0.]]) + + """ + return asmatrix(np.eye(n, M=M, k=k, dtype=dtype, order=order)) + +def rand(*args): + """ + Return a matrix of random values with given shape. + + Create a matrix of the given shape and propagate it with + random samples from a uniform distribution over ``[0, 1)``. + + Parameters + ---------- + \\*args : Arguments + Shape of the output. + If given as N integers, each integer specifies the size of one + dimension. + If given as a tuple, this tuple gives the complete shape. + + Returns + ------- + out : ndarray + The matrix of random values with shape given by `\\*args`. + + See Also + -------- + randn, numpy.random.RandomState.rand + + Examples + -------- + >>> np.random.seed(123) + >>> import numpy.matlib + >>> np.matlib.rand(2, 3) + matrix([[0.69646919, 0.28613933, 0.22685145], + [0.55131477, 0.71946897, 0.42310646]]) + >>> np.matlib.rand((2, 3)) + matrix([[0.9807642 , 0.68482974, 0.4809319 ], + [0.39211752, 0.34317802, 0.72904971]]) + + If the first argument is a tuple, other arguments are ignored: + + >>> np.matlib.rand((2, 3), 4) + matrix([[0.43857224, 0.0596779 , 0.39804426], + [0.73799541, 0.18249173, 0.17545176]]) + + """ + if isinstance(args[0], tuple): + args = args[0] + return asmatrix(np.random.rand(*args)) + +def randn(*args): + """ + Return a random matrix with data from the "standard normal" distribution. + + `randn` generates a matrix filled with random floats sampled from a + univariate "normal" (Gaussian) distribution of mean 0 and variance 1. + + Parameters + ---------- + \\*args : Arguments + Shape of the output. + If given as N integers, each integer specifies the size of one + dimension. If given as a tuple, this tuple gives the complete shape. + + Returns + ------- + Z : matrix of floats + A matrix of floating-point samples drawn from the standard normal + distribution. + + See Also + -------- + rand, numpy.random.RandomState.randn + + Notes + ----- + For random samples from the normal distribution with mean ``mu`` and + standard deviation ``sigma``, use:: + + sigma * np.matlib.randn(...) + mu + + Examples + -------- + >>> np.random.seed(123) + >>> import numpy.matlib + >>> np.matlib.randn(1) + matrix([[-1.0856306]]) + >>> np.matlib.randn(1, 2, 3) + matrix([[ 0.99734545, 0.2829785 , -1.50629471], + [-0.57860025, 1.65143654, -2.42667924]]) + + Two-by-four matrix of samples from the normal distribution with + mean 3 and standard deviation 2.5: + + >>> 2.5 * np.matlib.randn((2, 4)) + 3 + matrix([[1.92771843, 6.16484065, 0.83314899, 1.30278462], + [2.76322758, 6.72847407, 1.40274501, 1.8900451 ]]) + + """ + if isinstance(args[0], tuple): + args = args[0] + return asmatrix(np.random.randn(*args)) + +def repmat(a, m, n): + """ + Repeat a 0-D to 2-D array or matrix MxN times. + + Parameters + ---------- + a : array_like + The array or matrix to be repeated. + m, n : int + The number of times `a` is repeated along the first and second axes. + + Returns + ------- + out : ndarray + The result of repeating `a`. + + Examples + -------- + >>> import numpy.matlib + >>> a0 = np.array(1) + >>> np.matlib.repmat(a0, 2, 3) + array([[1, 1, 1], + [1, 1, 1]]) + + >>> a1 = np.arange(4) + >>> np.matlib.repmat(a1, 2, 2) + array([[0, 1, 2, 3, 0, 1, 2, 3], + [0, 1, 2, 3, 0, 1, 2, 3]]) + + >>> a2 = np.asmatrix(np.arange(6).reshape(2, 3)) + >>> np.matlib.repmat(a2, 2, 3) + matrix([[0, 1, 2, 0, 1, 2, 0, 1, 2], + [3, 4, 5, 3, 4, 5, 3, 4, 5], + [0, 1, 2, 0, 1, 2, 0, 1, 2], + [3, 4, 5, 3, 4, 5, 3, 4, 5]]) + + """ + a = asanyarray(a) + ndim = a.ndim + if ndim == 0: + origrows, origcols = (1, 1) + elif ndim == 1: + origrows, origcols = (1, a.shape[0]) + else: + origrows, origcols = a.shape + rows = origrows * m + cols = origcols * n + c = a.reshape(1, a.size).repeat(m, 0).reshape(rows, origcols).repeat(n, 0) + return c.reshape(rows, cols) diff --git a/.venv/lib/python3.12/site-packages/numpy/matlib.pyi b/.venv/lib/python3.12/site-packages/numpy/matlib.pyi new file mode 100644 index 0000000..baeadc0 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/matlib.pyi @@ -0,0 +1,582 @@ +from typing import Any, Literal, TypeAlias, TypeVar, overload + +import numpy as np +import numpy.typing as npt +from numpy import ( # noqa: F401 + False_, + ScalarType, + True_, + __array_namespace_info__, + __version__, + abs, + absolute, + acos, + acosh, + add, + all, + allclose, + amax, + amin, + angle, + any, + append, + apply_along_axis, + apply_over_axes, + arange, + arccos, + arccosh, + arcsin, + arcsinh, + arctan, + arctan2, + arctanh, + argmax, + argmin, + argpartition, + argsort, + argwhere, + around, + array, + array2string, + array_equal, + array_equiv, + array_repr, + array_split, + array_str, + asanyarray, + asarray, + asarray_chkfinite, + ascontiguousarray, + asfortranarray, + asin, + asinh, + asmatrix, + astype, + atan, + atan2, + atanh, + atleast_1d, + atleast_2d, + atleast_3d, + average, + bartlett, + base_repr, + binary_repr, + bincount, + bitwise_and, + bitwise_count, + bitwise_invert, + bitwise_left_shift, + bitwise_not, + bitwise_or, + bitwise_right_shift, + bitwise_xor, + blackman, + block, + bmat, + bool, + bool_, + broadcast, + broadcast_arrays, + broadcast_shapes, + broadcast_to, + busday_count, + busday_offset, + busdaycalendar, + byte, + bytes_, + c_, + can_cast, + cbrt, + cdouble, + ceil, + char, + character, + choose, + clip, + clongdouble, + column_stack, + common_type, + complex64, + complex128, + complex256, + complexfloating, + compress, + concat, + concatenate, + conj, + conjugate, + convolve, + copy, + copysign, + copyto, + core, + corrcoef, + correlate, + cos, + cosh, + count_nonzero, + cov, + cross, + csingle, + ctypeslib, + cumprod, + cumsum, + cumulative_prod, + cumulative_sum, + datetime64, + datetime_as_string, + datetime_data, + deg2rad, + degrees, + delete, + diag, + diag_indices, + diag_indices_from, + diagflat, + diagonal, + diff, + digitize, + divide, + divmod, + dot, + double, + dsplit, + dstack, + dtype, + dtypes, + e, + ediff1d, + einsum, + einsum_path, + emath, + empty_like, + equal, + errstate, + euler_gamma, + exceptions, + exp, + exp2, + expand_dims, + expm1, + extract, + f2py, + fabs, + fft, + fill_diagonal, + finfo, + fix, + flatiter, + flatnonzero, + flexible, + flip, + fliplr, + flipud, + float16, + float32, + float64, + float128, + float_power, + floating, + floor, + floor_divide, + fmax, + fmin, + fmod, + format_float_positional, + format_float_scientific, + frexp, + from_dlpack, + frombuffer, + fromfile, + fromfunction, + fromiter, + frompyfunc, + fromregex, + fromstring, + full, + full_like, + gcd, + generic, + genfromtxt, + geomspace, + get_include, + get_printoptions, + getbufsize, + geterr, + geterrcall, + gradient, + greater, + greater_equal, + half, + hamming, + hanning, + heaviside, + histogram, + histogram2d, + histogram_bin_edges, + histogramdd, + hsplit, + hstack, + hypot, + i0, + iinfo, + imag, + in1d, + index_exp, + indices, + inexact, + inf, + info, + inner, + insert, + int8, + int16, + int32, + int64, + int_, + intc, + integer, + interp, + intersect1d, + intp, + invert, + is_busday, + isclose, + iscomplex, + iscomplexobj, + isdtype, + isfinite, + isfortran, + isin, + isinf, + isnan, + isnat, + isneginf, + isposinf, + isreal, + isrealobj, + isscalar, + issubdtype, + iterable, + ix_, + kaiser, + kron, + lcm, + ldexp, + left_shift, + less, + less_equal, + lexsort, + lib, + linalg, + linspace, + little_endian, + load, + loadtxt, + log, + log1p, + log2, + log10, + logaddexp, + logaddexp2, + logical_and, + logical_not, + logical_or, + logical_xor, + logspace, + long, + longdouble, + longlong, + ma, + mask_indices, + matmul, + matrix, + matrix_transpose, + matvec, + max, + maximum, + may_share_memory, + mean, + median, + memmap, + meshgrid, + mgrid, + min, + min_scalar_type, + minimum, + mintypecode, + mod, + modf, + moveaxis, + multiply, + nan, + nan_to_num, + nanargmax, + nanargmin, + nancumprod, + nancumsum, + nanmax, + nanmean, + nanmedian, + nanmin, + nanpercentile, + nanprod, + nanquantile, + nanstd, + nansum, + nanvar, + ndarray, + ndenumerate, + ndim, + ndindex, + nditer, + negative, + nested_iters, + newaxis, + nextafter, + nonzero, + not_equal, + number, + object_, + ogrid, + ones_like, + outer, + packbits, + pad, + partition, + percentile, + permute_dims, + pi, + piecewise, + place, + poly, + poly1d, + polyadd, + polyder, + polydiv, + polyfit, + polyint, + polymul, + polynomial, + polysub, + polyval, + positive, + pow, + power, + printoptions, + prod, + promote_types, + ptp, + put, + put_along_axis, + putmask, + quantile, + r_, + rad2deg, + radians, + random, + ravel, + ravel_multi_index, + real, + real_if_close, + rec, + recarray, + reciprocal, + record, + remainder, + repeat, + require, + reshape, + resize, + result_type, + right_shift, + rint, + roll, + rollaxis, + roots, + rot90, + round, + row_stack, + s_, + save, + savetxt, + savez, + savez_compressed, + sctypeDict, + searchsorted, + select, + set_printoptions, + setbufsize, + setdiff1d, + seterr, + seterrcall, + setxor1d, + shape, + shares_memory, + short, + show_config, + show_runtime, + sign, + signbit, + signedinteger, + sin, + sinc, + single, + sinh, + size, + sort, + sort_complex, + spacing, + split, + sqrt, + square, + squeeze, + stack, + std, + str_, + strings, + subtract, + sum, + swapaxes, + take, + take_along_axis, + tan, + tanh, + tensordot, + test, + testing, + tile, + timedelta64, + trace, + transpose, + trapezoid, + trapz, + tri, + tril, + tril_indices, + tril_indices_from, + trim_zeros, + triu, + triu_indices, + triu_indices_from, + true_divide, + trunc, + typecodes, + typename, + typing, + ubyte, + ufunc, + uint, + uint8, + uint16, + uint32, + uint64, + uintc, + uintp, + ulong, + ulonglong, + union1d, + unique, + unique_all, + unique_counts, + unique_inverse, + unique_values, + unpackbits, + unravel_index, + unsignedinteger, + unstack, + unwrap, + ushort, + vander, + var, + vdot, + vecdot, + vecmat, + vectorize, + void, + vsplit, + vstack, + where, + zeros_like, +) +from numpy._typing import _ArrayLike, _DTypeLike + +__all__ = ["rand", "randn", "repmat"] +__all__ += np.__all__ + +### + +_T = TypeVar("_T", bound=np.generic) +_Matrix: TypeAlias = np.matrix[tuple[int, int], np.dtype[_T]] +_Order: TypeAlias = Literal["C", "F"] + +### + +# +@overload +def empty(shape: int | tuple[int, int], dtype: None = None, order: _Order = "C") -> _Matrix[np.float64]: ... +@overload +def empty(shape: int | tuple[int, int], dtype: _DTypeLike[_T], order: _Order = "C") -> _Matrix[_T]: ... +@overload +def empty(shape: int | tuple[int, int], dtype: npt.DTypeLike, order: _Order = "C") -> _Matrix[Any]: ... + +# +@overload +def ones(shape: int | tuple[int, int], dtype: None = None, order: _Order = "C") -> _Matrix[np.float64]: ... +@overload +def ones(shape: int | tuple[int, int], dtype: _DTypeLike[_T], order: _Order = "C") -> _Matrix[_T]: ... +@overload +def ones(shape: int | tuple[int, int], dtype: npt.DTypeLike, order: _Order = "C") -> _Matrix[Any]: ... + +# +@overload +def zeros(shape: int | tuple[int, int], dtype: None = None, order: _Order = "C") -> _Matrix[np.float64]: ... +@overload +def zeros(shape: int | tuple[int, int], dtype: _DTypeLike[_T], order: _Order = "C") -> _Matrix[_T]: ... +@overload +def zeros(shape: int | tuple[int, int], dtype: npt.DTypeLike, order: _Order = "C") -> _Matrix[Any]: ... + +# +@overload +def identity(n: int, dtype: None = None) -> _Matrix[np.float64]: ... +@overload +def identity(n: int, dtype: _DTypeLike[_T]) -> _Matrix[_T]: ... +@overload +def identity(n: int, dtype: npt.DTypeLike | None = None) -> _Matrix[Any]: ... + +# +@overload +def eye( + n: int, + M: int | None = None, + k: int = 0, + dtype: type[np.float64] | None = ..., + order: _Order = "C", +) -> _Matrix[np.float64]: ... +@overload +def eye(n: int, M: int | None, k: int, dtype: _DTypeLike[_T], order: _Order = "C") -> _Matrix[_T]: ... +@overload +def eye(n: int, M: int | None = None, k: int = 0, *, dtype: _DTypeLike[_T], order: _Order = "C") -> _Matrix[_T]: ... +@overload +def eye(n: int, M: int | None = None, k: int = 0, dtype: npt.DTypeLike = ..., order: _Order = "C") -> _Matrix[Any]: ... + +# +@overload +def rand(arg: int | tuple[()] | tuple[int] | tuple[int, int], /) -> _Matrix[np.float64]: ... +@overload +def rand(arg: int, /, *args: int) -> _Matrix[np.float64]: ... + +# +@overload +def randn(arg: int | tuple[()] | tuple[int] | tuple[int, int], /) -> _Matrix[np.float64]: ... +@overload +def randn(arg: int, /, *args: int) -> _Matrix[np.float64]: ... + +# +@overload +def repmat(a: _Matrix[_T], m: int, n: int) -> _Matrix[_T]: ... +@overload +def repmat(a: _ArrayLike[_T], m: int, n: int) -> npt.NDArray[_T]: ... +@overload +def repmat(a: npt.ArrayLike, m: int, n: int) -> npt.NDArray[Any]: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/matrixlib/__init__.py b/.venv/lib/python3.12/site-packages/numpy/matrixlib/__init__.py new file mode 100644 index 0000000..1ff5cb5 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/matrixlib/__init__.py @@ -0,0 +1,12 @@ +"""Sub-package containing the matrix class and related functions. + +""" +from . import defmatrix +from .defmatrix import * + +__all__ = defmatrix.__all__ + +from numpy._pytesttester import PytestTester + +test = PytestTester(__name__) +del PytestTester diff --git a/.venv/lib/python3.12/site-packages/numpy/matrixlib/__init__.pyi b/.venv/lib/python3.12/site-packages/numpy/matrixlib/__init__.pyi new file mode 100644 index 0000000..56ae8bf --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/matrixlib/__init__.pyi @@ -0,0 +1,5 @@ +from numpy import matrix + +from .defmatrix import asmatrix, bmat + +__all__ = ["matrix", "bmat", "asmatrix"] diff --git a/.venv/lib/python3.12/site-packages/numpy/matrixlib/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/matrixlib/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..7efcd96 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/matrixlib/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/matrixlib/__pycache__/defmatrix.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/matrixlib/__pycache__/defmatrix.cpython-312.pyc new file mode 100644 index 0000000..79b8dd0 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/matrixlib/__pycache__/defmatrix.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/matrixlib/defmatrix.py b/.venv/lib/python3.12/site-packages/numpy/matrixlib/defmatrix.py new file mode 100644 index 0000000..39b9a93 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/matrixlib/defmatrix.py @@ -0,0 +1,1119 @@ +__all__ = ['matrix', 'bmat', 'asmatrix'] + +import ast +import sys +import warnings + +import numpy._core.numeric as N +from numpy._core.numeric import concatenate, isscalar +from numpy._utils import set_module + +# While not in __all__, matrix_power used to be defined here, so we import +# it for backward compatibility. +from numpy.linalg import matrix_power + + +def _convert_from_string(data): + for char in '[]': + data = data.replace(char, '') + + rows = data.split(';') + newdata = [] + for count, row in enumerate(rows): + trow = row.split(',') + newrow = [] + for col in trow: + temp = col.split() + newrow.extend(map(ast.literal_eval, temp)) + if count == 0: + Ncols = len(newrow) + elif len(newrow) != Ncols: + raise ValueError("Rows not the same size.") + newdata.append(newrow) + return newdata + + +@set_module('numpy') +def asmatrix(data, dtype=None): + """ + Interpret the input as a matrix. + + Unlike `matrix`, `asmatrix` does not make a copy if the input is already + a matrix or an ndarray. Equivalent to ``matrix(data, copy=False)``. + + Parameters + ---------- + data : array_like + Input data. + dtype : data-type + Data-type of the output matrix. + + Returns + ------- + mat : matrix + `data` interpreted as a matrix. + + Examples + -------- + >>> import numpy as np + >>> x = np.array([[1, 2], [3, 4]]) + + >>> m = np.asmatrix(x) + + >>> x[0,0] = 5 + + >>> m + matrix([[5, 2], + [3, 4]]) + + """ + return matrix(data, dtype=dtype, copy=False) + + +@set_module('numpy') +class matrix(N.ndarray): + """ + matrix(data, dtype=None, copy=True) + + Returns a matrix from an array-like object, or from a string of data. + + A matrix is a specialized 2-D array that retains its 2-D nature + through operations. It has certain special operators, such as ``*`` + (matrix multiplication) and ``**`` (matrix power). + + .. note:: It is no longer recommended to use this class, even for linear + algebra. Instead use regular arrays. The class may be removed + in the future. + + Parameters + ---------- + data : array_like or string + If `data` is a string, it is interpreted as a matrix with commas + or spaces separating columns, and semicolons separating rows. + dtype : data-type + Data-type of the output matrix. + copy : bool + If `data` is already an `ndarray`, then this flag determines + whether the data is copied (the default), or whether a view is + constructed. + + See Also + -------- + array + + Examples + -------- + >>> import numpy as np + >>> a = np.matrix('1 2; 3 4') + >>> a + matrix([[1, 2], + [3, 4]]) + + >>> np.matrix([[1, 2], [3, 4]]) + matrix([[1, 2], + [3, 4]]) + + """ + __array_priority__ = 10.0 + + def __new__(subtype, data, dtype=None, copy=True): + warnings.warn('the matrix subclass is not the recommended way to ' + 'represent matrices or deal with linear algebra (see ' + 'https://docs.scipy.org/doc/numpy/user/' + 'numpy-for-matlab-users.html). ' + 'Please adjust your code to use regular ndarray.', + PendingDeprecationWarning, stacklevel=2) + if isinstance(data, matrix): + dtype2 = data.dtype + if (dtype is None): + dtype = dtype2 + if (dtype2 == dtype) and (not copy): + return data + return data.astype(dtype) + + if isinstance(data, N.ndarray): + if dtype is None: + intype = data.dtype + else: + intype = N.dtype(dtype) + new = data.view(subtype) + if intype != data.dtype: + return new.astype(intype) + if copy: + return new.copy() + else: + return new + + if isinstance(data, str): + data = _convert_from_string(data) + + # now convert data to an array + copy = None if not copy else True + arr = N.array(data, dtype=dtype, copy=copy) + ndim = arr.ndim + shape = arr.shape + if (ndim > 2): + raise ValueError("matrix must be 2-dimensional") + elif ndim == 0: + shape = (1, 1) + elif ndim == 1: + shape = (1, shape[0]) + + order = 'C' + if (ndim == 2) and arr.flags.fortran: + order = 'F' + + if not (order or arr.flags.contiguous): + arr = arr.copy() + + ret = N.ndarray.__new__(subtype, shape, arr.dtype, + buffer=arr, + order=order) + return ret + + def __array_finalize__(self, obj): + self._getitem = False + if (isinstance(obj, matrix) and obj._getitem): + return + ndim = self.ndim + if (ndim == 2): + return + if (ndim > 2): + newshape = tuple(x for x in self.shape if x > 1) + ndim = len(newshape) + if ndim == 2: + self.shape = newshape + return + elif (ndim > 2): + raise ValueError("shape too large to be a matrix.") + else: + newshape = self.shape + if ndim == 0: + self.shape = (1, 1) + elif ndim == 1: + self.shape = (1, newshape[0]) + return + + def __getitem__(self, index): + self._getitem = True + + try: + out = N.ndarray.__getitem__(self, index) + finally: + self._getitem = False + + if not isinstance(out, N.ndarray): + return out + + if out.ndim == 0: + return out[()] + if out.ndim == 1: + sh = out.shape[0] + # Determine when we should have a column array + try: + n = len(index) + except Exception: + n = 0 + if n > 1 and isscalar(index[1]): + out.shape = (sh, 1) + else: + out.shape = (1, sh) + return out + + def __mul__(self, other): + if isinstance(other, (N.ndarray, list, tuple)): + # This promotes 1-D vectors to row vectors + return N.dot(self, asmatrix(other)) + if isscalar(other) or not hasattr(other, '__rmul__'): + return N.dot(self, other) + return NotImplemented + + def __rmul__(self, other): + return N.dot(other, self) + + def __imul__(self, other): + self[:] = self * other + return self + + def __pow__(self, other): + return matrix_power(self, other) + + def __ipow__(self, other): + self[:] = self ** other + return self + + def __rpow__(self, other): + return NotImplemented + + def _align(self, axis): + """A convenience function for operations that need to preserve axis + orientation. + """ + if axis is None: + return self[0, 0] + elif axis == 0: + return self + elif axis == 1: + return self.transpose() + else: + raise ValueError("unsupported axis") + + def _collapse(self, axis): + """A convenience function for operations that want to collapse + to a scalar like _align, but are using keepdims=True + """ + if axis is None: + return self[0, 0] + else: + return self + + # Necessary because base-class tolist expects dimension + # reduction by x[0] + def tolist(self): + """ + Return the matrix as a (possibly nested) list. + + See `ndarray.tolist` for full documentation. + + See Also + -------- + ndarray.tolist + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3,4))); x + matrix([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> x.tolist() + [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]] + + """ + return self.__array__().tolist() + + # To preserve orientation of result... + def sum(self, axis=None, dtype=None, out=None): + """ + Returns the sum of the matrix elements, along the given axis. + + Refer to `numpy.sum` for full documentation. + + See Also + -------- + numpy.sum + + Notes + ----- + This is the same as `ndarray.sum`, except that where an `ndarray` would + be returned, a `matrix` object is returned instead. + + Examples + -------- + >>> x = np.matrix([[1, 2], [4, 3]]) + >>> x.sum() + 10 + >>> x.sum(axis=1) + matrix([[3], + [7]]) + >>> x.sum(axis=1, dtype='float') + matrix([[3.], + [7.]]) + >>> out = np.zeros((2, 1), dtype='float') + >>> x.sum(axis=1, dtype='float', out=np.asmatrix(out)) + matrix([[3.], + [7.]]) + + """ + return N.ndarray.sum(self, axis, dtype, out, keepdims=True)._collapse(axis) + + # To update docstring from array to matrix... + def squeeze(self, axis=None): + """ + Return a possibly reshaped matrix. + + Refer to `numpy.squeeze` for more documentation. + + Parameters + ---------- + axis : None or int or tuple of ints, optional + Selects a subset of the axes of length one in the shape. + If an axis is selected with shape entry greater than one, + an error is raised. + + Returns + ------- + squeezed : matrix + The matrix, but as a (1, N) matrix if it had shape (N, 1). + + See Also + -------- + numpy.squeeze : related function + + Notes + ----- + If `m` has a single column then that column is returned + as the single row of a matrix. Otherwise `m` is returned. + The returned matrix is always either `m` itself or a view into `m`. + Supplying an axis keyword argument will not affect the returned matrix + but it may cause an error to be raised. + + Examples + -------- + >>> c = np.matrix([[1], [2]]) + >>> c + matrix([[1], + [2]]) + >>> c.squeeze() + matrix([[1, 2]]) + >>> r = c.T + >>> r + matrix([[1, 2]]) + >>> r.squeeze() + matrix([[1, 2]]) + >>> m = np.matrix([[1, 2], [3, 4]]) + >>> m.squeeze() + matrix([[1, 2], + [3, 4]]) + + """ + return N.ndarray.squeeze(self, axis=axis) + + # To update docstring from array to matrix... + def flatten(self, order='C'): + """ + Return a flattened copy of the matrix. + + All `N` elements of the matrix are placed into a single row. + + Parameters + ---------- + order : {'C', 'F', 'A', 'K'}, optional + 'C' means to flatten in row-major (C-style) order. 'F' means to + flatten in column-major (Fortran-style) order. 'A' means to + flatten in column-major order if `m` is Fortran *contiguous* in + memory, row-major order otherwise. 'K' means to flatten `m` in + the order the elements occur in memory. The default is 'C'. + + Returns + ------- + y : matrix + A copy of the matrix, flattened to a `(1, N)` matrix where `N` + is the number of elements in the original matrix. + + See Also + -------- + ravel : Return a flattened array. + flat : A 1-D flat iterator over the matrix. + + Examples + -------- + >>> m = np.matrix([[1,2], [3,4]]) + >>> m.flatten() + matrix([[1, 2, 3, 4]]) + >>> m.flatten('F') + matrix([[1, 3, 2, 4]]) + + """ + return N.ndarray.flatten(self, order=order) + + def mean(self, axis=None, dtype=None, out=None): + """ + Returns the average of the matrix elements along the given axis. + + Refer to `numpy.mean` for full documentation. + + See Also + -------- + numpy.mean + + Notes + ----- + Same as `ndarray.mean` except that, where that returns an `ndarray`, + this returns a `matrix` object. + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3, 4))) + >>> x + matrix([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> x.mean() + 5.5 + >>> x.mean(0) + matrix([[4., 5., 6., 7.]]) + >>> x.mean(1) + matrix([[ 1.5], + [ 5.5], + [ 9.5]]) + + """ + return N.ndarray.mean(self, axis, dtype, out, keepdims=True)._collapse(axis) + + def std(self, axis=None, dtype=None, out=None, ddof=0): + """ + Return the standard deviation of the array elements along the given axis. + + Refer to `numpy.std` for full documentation. + + See Also + -------- + numpy.std + + Notes + ----- + This is the same as `ndarray.std`, except that where an `ndarray` would + be returned, a `matrix` object is returned instead. + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3, 4))) + >>> x + matrix([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> x.std() + 3.4520525295346629 # may vary + >>> x.std(0) + matrix([[ 3.26598632, 3.26598632, 3.26598632, 3.26598632]]) # may vary + >>> x.std(1) + matrix([[ 1.11803399], + [ 1.11803399], + [ 1.11803399]]) + + """ + return N.ndarray.std(self, axis, dtype, out, ddof, + keepdims=True)._collapse(axis) + + def var(self, axis=None, dtype=None, out=None, ddof=0): + """ + Returns the variance of the matrix elements, along the given axis. + + Refer to `numpy.var` for full documentation. + + See Also + -------- + numpy.var + + Notes + ----- + This is the same as `ndarray.var`, except that where an `ndarray` would + be returned, a `matrix` object is returned instead. + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3, 4))) + >>> x + matrix([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> x.var() + 11.916666666666666 + >>> x.var(0) + matrix([[ 10.66666667, 10.66666667, 10.66666667, 10.66666667]]) # may vary + >>> x.var(1) + matrix([[1.25], + [1.25], + [1.25]]) + + """ + return N.ndarray.var(self, axis, dtype, out, ddof, + keepdims=True)._collapse(axis) + + def prod(self, axis=None, dtype=None, out=None): + """ + Return the product of the array elements over the given axis. + + Refer to `prod` for full documentation. + + See Also + -------- + prod, ndarray.prod + + Notes + ----- + Same as `ndarray.prod`, except, where that returns an `ndarray`, this + returns a `matrix` object instead. + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3,4))); x + matrix([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> x.prod() + 0 + >>> x.prod(0) + matrix([[ 0, 45, 120, 231]]) + >>> x.prod(1) + matrix([[ 0], + [ 840], + [7920]]) + + """ + return N.ndarray.prod(self, axis, dtype, out, keepdims=True)._collapse(axis) + + def any(self, axis=None, out=None): + """ + Test whether any array element along a given axis evaluates to True. + + Refer to `numpy.any` for full documentation. + + Parameters + ---------- + axis : int, optional + Axis along which logical OR is performed + out : ndarray, optional + Output to existing array instead of creating new one, must have + same shape as expected output + + Returns + ------- + any : bool, ndarray + Returns a single bool if `axis` is ``None``; otherwise, + returns `ndarray` + + """ + return N.ndarray.any(self, axis, out, keepdims=True)._collapse(axis) + + def all(self, axis=None, out=None): + """ + Test whether all matrix elements along a given axis evaluate to True. + + Parameters + ---------- + See `numpy.all` for complete descriptions + + See Also + -------- + numpy.all + + Notes + ----- + This is the same as `ndarray.all`, but it returns a `matrix` object. + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3,4))); x + matrix([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> y = x[0]; y + matrix([[0, 1, 2, 3]]) + >>> (x == y) + matrix([[ True, True, True, True], + [False, False, False, False], + [False, False, False, False]]) + >>> (x == y).all() + False + >>> (x == y).all(0) + matrix([[False, False, False, False]]) + >>> (x == y).all(1) + matrix([[ True], + [False], + [False]]) + + """ + return N.ndarray.all(self, axis, out, keepdims=True)._collapse(axis) + + def max(self, axis=None, out=None): + """ + Return the maximum value along an axis. + + Parameters + ---------- + See `amax` for complete descriptions + + See Also + -------- + amax, ndarray.max + + Notes + ----- + This is the same as `ndarray.max`, but returns a `matrix` object + where `ndarray.max` would return an ndarray. + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3,4))); x + matrix([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> x.max() + 11 + >>> x.max(0) + matrix([[ 8, 9, 10, 11]]) + >>> x.max(1) + matrix([[ 3], + [ 7], + [11]]) + + """ + return N.ndarray.max(self, axis, out, keepdims=True)._collapse(axis) + + def argmax(self, axis=None, out=None): + """ + Indexes of the maximum values along an axis. + + Return the indexes of the first occurrences of the maximum values + along the specified axis. If axis is None, the index is for the + flattened matrix. + + Parameters + ---------- + See `numpy.argmax` for complete descriptions + + See Also + -------- + numpy.argmax + + Notes + ----- + This is the same as `ndarray.argmax`, but returns a `matrix` object + where `ndarray.argmax` would return an `ndarray`. + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3,4))); x + matrix([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> x.argmax() + 11 + >>> x.argmax(0) + matrix([[2, 2, 2, 2]]) + >>> x.argmax(1) + matrix([[3], + [3], + [3]]) + + """ + return N.ndarray.argmax(self, axis, out)._align(axis) + + def min(self, axis=None, out=None): + """ + Return the minimum value along an axis. + + Parameters + ---------- + See `amin` for complete descriptions. + + See Also + -------- + amin, ndarray.min + + Notes + ----- + This is the same as `ndarray.min`, but returns a `matrix` object + where `ndarray.min` would return an ndarray. + + Examples + -------- + >>> x = -np.matrix(np.arange(12).reshape((3,4))); x + matrix([[ 0, -1, -2, -3], + [ -4, -5, -6, -7], + [ -8, -9, -10, -11]]) + >>> x.min() + -11 + >>> x.min(0) + matrix([[ -8, -9, -10, -11]]) + >>> x.min(1) + matrix([[ -3], + [ -7], + [-11]]) + + """ + return N.ndarray.min(self, axis, out, keepdims=True)._collapse(axis) + + def argmin(self, axis=None, out=None): + """ + Indexes of the minimum values along an axis. + + Return the indexes of the first occurrences of the minimum values + along the specified axis. If axis is None, the index is for the + flattened matrix. + + Parameters + ---------- + See `numpy.argmin` for complete descriptions. + + See Also + -------- + numpy.argmin + + Notes + ----- + This is the same as `ndarray.argmin`, but returns a `matrix` object + where `ndarray.argmin` would return an `ndarray`. + + Examples + -------- + >>> x = -np.matrix(np.arange(12).reshape((3,4))); x + matrix([[ 0, -1, -2, -3], + [ -4, -5, -6, -7], + [ -8, -9, -10, -11]]) + >>> x.argmin() + 11 + >>> x.argmin(0) + matrix([[2, 2, 2, 2]]) + >>> x.argmin(1) + matrix([[3], + [3], + [3]]) + + """ + return N.ndarray.argmin(self, axis, out)._align(axis) + + def ptp(self, axis=None, out=None): + """ + Peak-to-peak (maximum - minimum) value along the given axis. + + Refer to `numpy.ptp` for full documentation. + + See Also + -------- + numpy.ptp + + Notes + ----- + Same as `ndarray.ptp`, except, where that would return an `ndarray` object, + this returns a `matrix` object. + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3,4))); x + matrix([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> x.ptp() + 11 + >>> x.ptp(0) + matrix([[8, 8, 8, 8]]) + >>> x.ptp(1) + matrix([[3], + [3], + [3]]) + + """ + return N.ptp(self, axis, out)._align(axis) + + @property + def I(self): # noqa: E743 + """ + Returns the (multiplicative) inverse of invertible `self`. + + Parameters + ---------- + None + + Returns + ------- + ret : matrix object + If `self` is non-singular, `ret` is such that ``ret * self`` == + ``self * ret`` == ``np.matrix(np.eye(self[0,:].size))`` all return + ``True``. + + Raises + ------ + numpy.linalg.LinAlgError: Singular matrix + If `self` is singular. + + See Also + -------- + linalg.inv + + Examples + -------- + >>> m = np.matrix('[1, 2; 3, 4]'); m + matrix([[1, 2], + [3, 4]]) + >>> m.getI() + matrix([[-2. , 1. ], + [ 1.5, -0.5]]) + >>> m.getI() * m + matrix([[ 1., 0.], # may vary + [ 0., 1.]]) + + """ + M, N = self.shape + if M == N: + from numpy.linalg import inv as func + else: + from numpy.linalg import pinv as func + return asmatrix(func(self)) + + @property + def A(self): + """ + Return `self` as an `ndarray` object. + + Equivalent to ``np.asarray(self)``. + + Parameters + ---------- + None + + Returns + ------- + ret : ndarray + `self` as an `ndarray` + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3,4))); x + matrix([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> x.getA() + array([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + + """ + return self.__array__() + + @property + def A1(self): + """ + Return `self` as a flattened `ndarray`. + + Equivalent to ``np.asarray(x).ravel()`` + + Parameters + ---------- + None + + Returns + ------- + ret : ndarray + `self`, 1-D, as an `ndarray` + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3,4))); x + matrix([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> x.getA1() + array([ 0, 1, 2, ..., 9, 10, 11]) + + + """ + return self.__array__().ravel() + + def ravel(self, order='C'): + """ + Return a flattened matrix. + + Refer to `numpy.ravel` for more documentation. + + Parameters + ---------- + order : {'C', 'F', 'A', 'K'}, optional + The elements of `m` are read using this index order. 'C' means to + index the elements in C-like order, with the last axis index + changing fastest, back to the first axis index changing slowest. + 'F' means to index the elements in Fortran-like index order, with + the first index changing fastest, and the last index changing + slowest. Note that the 'C' and 'F' options take no account of the + memory layout of the underlying array, and only refer to the order + of axis indexing. 'A' means to read the elements in Fortran-like + index order if `m` is Fortran *contiguous* in memory, C-like order + otherwise. 'K' means to read the elements in the order they occur + in memory, except for reversing the data when strides are negative. + By default, 'C' index order is used. + + Returns + ------- + ret : matrix + Return the matrix flattened to shape `(1, N)` where `N` + is the number of elements in the original matrix. + A copy is made only if necessary. + + See Also + -------- + matrix.flatten : returns a similar output matrix but always a copy + matrix.flat : a flat iterator on the array. + numpy.ravel : related function which returns an ndarray + + """ + return N.ndarray.ravel(self, order=order) + + @property + def T(self): + """ + Returns the transpose of the matrix. + + Does *not* conjugate! For the complex conjugate transpose, use ``.H``. + + Parameters + ---------- + None + + Returns + ------- + ret : matrix object + The (non-conjugated) transpose of the matrix. + + See Also + -------- + transpose, getH + + Examples + -------- + >>> m = np.matrix('[1, 2; 3, 4]') + >>> m + matrix([[1, 2], + [3, 4]]) + >>> m.getT() + matrix([[1, 3], + [2, 4]]) + + """ + return self.transpose() + + @property + def H(self): + """ + Returns the (complex) conjugate transpose of `self`. + + Equivalent to ``np.transpose(self)`` if `self` is real-valued. + + Parameters + ---------- + None + + Returns + ------- + ret : matrix object + complex conjugate transpose of `self` + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3,4))) + >>> z = x - 1j*x; z + matrix([[ 0. +0.j, 1. -1.j, 2. -2.j, 3. -3.j], + [ 4. -4.j, 5. -5.j, 6. -6.j, 7. -7.j], + [ 8. -8.j, 9. -9.j, 10.-10.j, 11.-11.j]]) + >>> z.getH() + matrix([[ 0. -0.j, 4. +4.j, 8. +8.j], + [ 1. +1.j, 5. +5.j, 9. +9.j], + [ 2. +2.j, 6. +6.j, 10.+10.j], + [ 3. +3.j, 7. +7.j, 11.+11.j]]) + + """ + if issubclass(self.dtype.type, N.complexfloating): + return self.transpose().conjugate() + else: + return self.transpose() + + # kept for compatibility + getT = T.fget + getA = A.fget + getA1 = A1.fget + getH = H.fget + getI = I.fget + +def _from_string(str, gdict, ldict): + rows = str.split(';') + rowtup = [] + for row in rows: + trow = row.split(',') + newrow = [] + for x in trow: + newrow.extend(x.split()) + trow = newrow + coltup = [] + for col in trow: + col = col.strip() + try: + thismat = ldict[col] + except KeyError: + try: + thismat = gdict[col] + except KeyError as e: + raise NameError(f"name {col!r} is not defined") from None + + coltup.append(thismat) + rowtup.append(concatenate(coltup, axis=-1)) + return concatenate(rowtup, axis=0) + + +@set_module('numpy') +def bmat(obj, ldict=None, gdict=None): + """ + Build a matrix object from a string, nested sequence, or array. + + Parameters + ---------- + obj : str or array_like + Input data. If a string, variables in the current scope may be + referenced by name. + ldict : dict, optional + A dictionary that replaces local operands in current frame. + Ignored if `obj` is not a string or `gdict` is None. + gdict : dict, optional + A dictionary that replaces global operands in current frame. + Ignored if `obj` is not a string. + + Returns + ------- + out : matrix + Returns a matrix object, which is a specialized 2-D array. + + See Also + -------- + block : + A generalization of this function for N-d arrays, that returns normal + ndarrays. + + Examples + -------- + >>> import numpy as np + >>> A = np.asmatrix('1 1; 1 1') + >>> B = np.asmatrix('2 2; 2 2') + >>> C = np.asmatrix('3 4; 5 6') + >>> D = np.asmatrix('7 8; 9 0') + + All the following expressions construct the same block matrix: + + >>> np.bmat([[A, B], [C, D]]) + matrix([[1, 1, 2, 2], + [1, 1, 2, 2], + [3, 4, 7, 8], + [5, 6, 9, 0]]) + >>> np.bmat(np.r_[np.c_[A, B], np.c_[C, D]]) + matrix([[1, 1, 2, 2], + [1, 1, 2, 2], + [3, 4, 7, 8], + [5, 6, 9, 0]]) + >>> np.bmat('A,B; C,D') + matrix([[1, 1, 2, 2], + [1, 1, 2, 2], + [3, 4, 7, 8], + [5, 6, 9, 0]]) + + """ + if isinstance(obj, str): + if gdict is None: + # get previous frame + frame = sys._getframe().f_back + glob_dict = frame.f_globals + loc_dict = frame.f_locals + else: + glob_dict = gdict + loc_dict = ldict + + return matrix(_from_string(obj, glob_dict, loc_dict)) + + if isinstance(obj, (tuple, list)): + # [[A,B],[C,D]] + arr_rows = [] + for row in obj: + if isinstance(row, N.ndarray): # not 2-d + return matrix(concatenate(obj, axis=-1)) + else: + arr_rows.append(concatenate(row, axis=-1)) + return matrix(concatenate(arr_rows, axis=0)) + if isinstance(obj, N.ndarray): + return matrix(obj) diff --git a/.venv/lib/python3.12/site-packages/numpy/matrixlib/defmatrix.pyi b/.venv/lib/python3.12/site-packages/numpy/matrixlib/defmatrix.pyi new file mode 100644 index 0000000..ee8f837 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/matrixlib/defmatrix.pyi @@ -0,0 +1,17 @@ +from collections.abc import Mapping, Sequence +from typing import Any + +from numpy import matrix +from numpy._typing import ArrayLike, DTypeLike, NDArray + +__all__ = ["asmatrix", "bmat", "matrix"] + +def bmat( + obj: str | Sequence[ArrayLike] | NDArray[Any], + ldict: Mapping[str, Any] | None = ..., + gdict: Mapping[str, Any] | None = ..., +) -> matrix[tuple[int, int], Any]: ... + +def asmatrix( + data: ArrayLike, dtype: DTypeLike = ... +) -> matrix[tuple[int, int], Any]: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/matrixlib/tests/__init__.py b/.venv/lib/python3.12/site-packages/numpy/matrixlib/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/.venv/lib/python3.12/site-packages/numpy/matrixlib/tests/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/matrixlib/tests/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..bb87ce7 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/matrixlib/tests/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/matrixlib/tests/__pycache__/test_defmatrix.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/matrixlib/tests/__pycache__/test_defmatrix.cpython-312.pyc new file mode 100644 index 0000000..ab51fab Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/matrixlib/tests/__pycache__/test_defmatrix.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/matrixlib/tests/__pycache__/test_interaction.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/matrixlib/tests/__pycache__/test_interaction.cpython-312.pyc new file mode 100644 index 0000000..9526b45 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/matrixlib/tests/__pycache__/test_interaction.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/matrixlib/tests/__pycache__/test_masked_matrix.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/matrixlib/tests/__pycache__/test_masked_matrix.cpython-312.pyc new file mode 100644 index 0000000..045e8b1 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/matrixlib/tests/__pycache__/test_masked_matrix.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/matrixlib/tests/__pycache__/test_matrix_linalg.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/matrixlib/tests/__pycache__/test_matrix_linalg.cpython-312.pyc new file mode 100644 index 0000000..0730084 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/matrixlib/tests/__pycache__/test_matrix_linalg.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/matrixlib/tests/__pycache__/test_multiarray.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/matrixlib/tests/__pycache__/test_multiarray.cpython-312.pyc new file mode 100644 index 0000000..623b355 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/matrixlib/tests/__pycache__/test_multiarray.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/matrixlib/tests/__pycache__/test_numeric.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/matrixlib/tests/__pycache__/test_numeric.cpython-312.pyc new file mode 100644 index 0000000..9259519 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/matrixlib/tests/__pycache__/test_numeric.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/matrixlib/tests/__pycache__/test_regression.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/matrixlib/tests/__pycache__/test_regression.cpython-312.pyc new file mode 100644 index 0000000..f766ba8 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/matrixlib/tests/__pycache__/test_regression.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/matrixlib/tests/test_defmatrix.py b/.venv/lib/python3.12/site-packages/numpy/matrixlib/tests/test_defmatrix.py new file mode 100644 index 0000000..ce23933 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/matrixlib/tests/test_defmatrix.py @@ -0,0 +1,455 @@ +import collections.abc + +import numpy as np +from numpy import asmatrix, bmat, matrix +from numpy.linalg import matrix_power +from numpy.testing import ( + assert_, + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, + assert_equal, + assert_raises, +) + + +class TestCtor: + def test_basic(self): + A = np.array([[1, 2], [3, 4]]) + mA = matrix(A) + assert_(np.all(mA.A == A)) + + B = bmat("A,A;A,A") + C = bmat([[A, A], [A, A]]) + D = np.array([[1, 2, 1, 2], + [3, 4, 3, 4], + [1, 2, 1, 2], + [3, 4, 3, 4]]) + assert_(np.all(B.A == D)) + assert_(np.all(C.A == D)) + + E = np.array([[5, 6], [7, 8]]) + AEresult = matrix([[1, 2, 5, 6], [3, 4, 7, 8]]) + assert_(np.all(bmat([A, E]) == AEresult)) + + vec = np.arange(5) + mvec = matrix(vec) + assert_(mvec.shape == (1, 5)) + + def test_exceptions(self): + # Check for ValueError when called with invalid string data. + assert_raises(ValueError, matrix, "invalid") + + def test_bmat_nondefault_str(self): + A = np.array([[1, 2], [3, 4]]) + B = np.array([[5, 6], [7, 8]]) + Aresult = np.array([[1, 2, 1, 2], + [3, 4, 3, 4], + [1, 2, 1, 2], + [3, 4, 3, 4]]) + mixresult = np.array([[1, 2, 5, 6], + [3, 4, 7, 8], + [5, 6, 1, 2], + [7, 8, 3, 4]]) + assert_(np.all(bmat("A,A;A,A") == Aresult)) + assert_(np.all(bmat("A,A;A,A", ldict={'A': B}) == Aresult)) + assert_raises(TypeError, bmat, "A,A;A,A", gdict={'A': B}) + assert_( + np.all(bmat("A,A;A,A", ldict={'A': A}, gdict={'A': B}) == Aresult)) + b2 = bmat("A,B;C,D", ldict={'A': A, 'B': B}, gdict={'C': B, 'D': A}) + assert_(np.all(b2 == mixresult)) + + +class TestProperties: + def test_sum(self): + """Test whether matrix.sum(axis=1) preserves orientation. + Fails in NumPy <= 0.9.6.2127. + """ + M = matrix([[1, 2, 0, 0], + [3, 4, 0, 0], + [1, 2, 1, 2], + [3, 4, 3, 4]]) + sum0 = matrix([8, 12, 4, 6]) + sum1 = matrix([3, 7, 6, 14]).T + sumall = 30 + assert_array_equal(sum0, M.sum(axis=0)) + assert_array_equal(sum1, M.sum(axis=1)) + assert_equal(sumall, M.sum()) + + assert_array_equal(sum0, np.sum(M, axis=0)) + assert_array_equal(sum1, np.sum(M, axis=1)) + assert_equal(sumall, np.sum(M)) + + def test_prod(self): + x = matrix([[1, 2, 3], [4, 5, 6]]) + assert_equal(x.prod(), 720) + assert_equal(x.prod(0), matrix([[4, 10, 18]])) + assert_equal(x.prod(1), matrix([[6], [120]])) + + assert_equal(np.prod(x), 720) + assert_equal(np.prod(x, axis=0), matrix([[4, 10, 18]])) + assert_equal(np.prod(x, axis=1), matrix([[6], [120]])) + + y = matrix([0, 1, 3]) + assert_(y.prod() == 0) + + def test_max(self): + x = matrix([[1, 2, 3], [4, 5, 6]]) + assert_equal(x.max(), 6) + assert_equal(x.max(0), matrix([[4, 5, 6]])) + assert_equal(x.max(1), matrix([[3], [6]])) + + assert_equal(np.max(x), 6) + assert_equal(np.max(x, axis=0), matrix([[4, 5, 6]])) + assert_equal(np.max(x, axis=1), matrix([[3], [6]])) + + def test_min(self): + x = matrix([[1, 2, 3], [4, 5, 6]]) + assert_equal(x.min(), 1) + assert_equal(x.min(0), matrix([[1, 2, 3]])) + assert_equal(x.min(1), matrix([[1], [4]])) + + assert_equal(np.min(x), 1) + assert_equal(np.min(x, axis=0), matrix([[1, 2, 3]])) + assert_equal(np.min(x, axis=1), matrix([[1], [4]])) + + def test_ptp(self): + x = np.arange(4).reshape((2, 2)) + mx = x.view(np.matrix) + assert_(mx.ptp() == 3) + assert_(np.all(mx.ptp(0) == np.array([2, 2]))) + assert_(np.all(mx.ptp(1) == np.array([1, 1]))) + + def test_var(self): + x = np.arange(9).reshape((3, 3)) + mx = x.view(np.matrix) + assert_equal(x.var(ddof=0), mx.var(ddof=0)) + assert_equal(x.var(ddof=1), mx.var(ddof=1)) + + def test_basic(self): + import numpy.linalg as linalg + + A = np.array([[1., 2.], + [3., 4.]]) + mA = matrix(A) + assert_(np.allclose(linalg.inv(A), mA.I)) + assert_(np.all(np.array(np.transpose(A) == mA.T))) + assert_(np.all(np.array(np.transpose(A) == mA.H))) + assert_(np.all(A == mA.A)) + + B = A + 2j * A + mB = matrix(B) + assert_(np.allclose(linalg.inv(B), mB.I)) + assert_(np.all(np.array(np.transpose(B) == mB.T))) + assert_(np.all(np.array(np.transpose(B).conj() == mB.H))) + + def test_pinv(self): + x = matrix(np.arange(6).reshape(2, 3)) + xpinv = matrix([[-0.77777778, 0.27777778], + [-0.11111111, 0.11111111], + [ 0.55555556, -0.05555556]]) + assert_almost_equal(x.I, xpinv) + + def test_comparisons(self): + A = np.arange(100).reshape(10, 10) + mA = matrix(A) + mB = matrix(A) + 0.1 + assert_(np.all(mB == A + 0.1)) + assert_(np.all(mB == matrix(A + 0.1))) + assert_(not np.any(mB == matrix(A - 0.1))) + assert_(np.all(mA < mB)) + assert_(np.all(mA <= mB)) + assert_(np.all(mA <= mA)) + assert_(not np.any(mA < mA)) + + assert_(not np.any(mB < mA)) + assert_(np.all(mB >= mA)) + assert_(np.all(mB >= mB)) + assert_(not np.any(mB > mB)) + + assert_(np.all(mA == mA)) + assert_(not np.any(mA == mB)) + assert_(np.all(mB != mA)) + + assert_(not np.all(abs(mA) > 0)) + assert_(np.all(abs(mB > 0))) + + def test_asmatrix(self): + A = np.arange(100).reshape(10, 10) + mA = asmatrix(A) + A[0, 0] = -10 + assert_(A[0, 0] == mA[0, 0]) + + def test_noaxis(self): + A = matrix([[1, 0], [0, 1]]) + assert_(A.sum() == matrix(2)) + assert_(A.mean() == matrix(0.5)) + + def test_repr(self): + A = matrix([[1, 0], [0, 1]]) + assert_(repr(A) == "matrix([[1, 0],\n [0, 1]])") + + def test_make_bool_matrix_from_str(self): + A = matrix('True; True; False') + B = matrix([[True], [True], [False]]) + assert_array_equal(A, B) + +class TestCasting: + def test_basic(self): + A = np.arange(100).reshape(10, 10) + mA = matrix(A) + + mB = mA.copy() + O = np.ones((10, 10), np.float64) * 0.1 + mB = mB + O + assert_(mB.dtype.type == np.float64) + assert_(np.all(mA != mB)) + assert_(np.all(mB == mA + 0.1)) + + mC = mA.copy() + O = np.ones((10, 10), np.complex128) + mC = mC * O + assert_(mC.dtype.type == np.complex128) + assert_(np.all(mA != mB)) + + +class TestAlgebra: + def test_basic(self): + import numpy.linalg as linalg + + A = np.array([[1., 2.], [3., 4.]]) + mA = matrix(A) + + B = np.identity(2) + for i in range(6): + assert_(np.allclose((mA ** i).A, B)) + B = np.dot(B, A) + + Ainv = linalg.inv(A) + B = np.identity(2) + for i in range(6): + assert_(np.allclose((mA ** -i).A, B)) + B = np.dot(B, Ainv) + + assert_(np.allclose((mA * mA).A, np.dot(A, A))) + assert_(np.allclose((mA + mA).A, (A + A))) + assert_(np.allclose((3 * mA).A, (3 * A))) + + mA2 = matrix(A) + mA2 *= 3 + assert_(np.allclose(mA2.A, 3 * A)) + + def test_pow(self): + """Test raising a matrix to an integer power works as expected.""" + m = matrix("1. 2.; 3. 4.") + m2 = m.copy() + m2 **= 2 + mi = m.copy() + mi **= -1 + m4 = m2.copy() + m4 **= 2 + assert_array_almost_equal(m2, m**2) + assert_array_almost_equal(m4, np.dot(m2, m2)) + assert_array_almost_equal(np.dot(mi, m), np.eye(2)) + + def test_scalar_type_pow(self): + m = matrix([[1, 2], [3, 4]]) + for scalar_t in [np.int8, np.uint8]: + two = scalar_t(2) + assert_array_almost_equal(m ** 2, m ** two) + + def test_notimplemented(self): + '''Check that 'not implemented' operations produce a failure.''' + A = matrix([[1., 2.], + [3., 4.]]) + + # __rpow__ + with assert_raises(TypeError): + 1.0**A + + # __mul__ with something not a list, ndarray, tuple, or scalar + with assert_raises(TypeError): + A * object() + + +class TestMatrixReturn: + def test_instance_methods(self): + a = matrix([1.0], dtype='f8') + methodargs = { + 'astype': ('intc',), + 'clip': (0.0, 1.0), + 'compress': ([1],), + 'repeat': (1,), + 'reshape': (1,), + 'swapaxes': (0, 0), + 'dot': np.array([1.0]), + } + excluded_methods = [ + 'argmin', 'choose', 'dump', 'dumps', 'fill', 'getfield', + 'getA', 'getA1', 'item', 'nonzero', 'put', 'putmask', 'resize', + 'searchsorted', 'setflags', 'setfield', 'sort', + 'partition', 'argpartition', 'newbyteorder', 'to_device', + 'take', 'tofile', 'tolist', 'tobytes', 'all', 'any', + 'sum', 'argmax', 'argmin', 'min', 'max', 'mean', 'var', 'ptp', + 'prod', 'std', 'ctypes', 'itemset', 'bitwise_count', + ] + for attrib in dir(a): + if attrib.startswith('_') or attrib in excluded_methods: + continue + f = getattr(a, attrib) + if isinstance(f, collections.abc.Callable): + # reset contents of a + a.astype('f8') + a.fill(1.0) + args = methodargs.get(attrib, ()) + b = f(*args) + assert_(type(b) is matrix, f"{attrib}") + assert_(type(a.real) is matrix) + assert_(type(a.imag) is matrix) + c, d = matrix([0.0]).nonzero() + assert_(type(c) is np.ndarray) + assert_(type(d) is np.ndarray) + + +class TestIndexing: + def test_basic(self): + x = asmatrix(np.zeros((3, 2), float)) + y = np.zeros((3, 1), float) + y[:, 0] = [0.8, 0.2, 0.3] + x[:, 1] = y > 0.5 + assert_equal(x, [[0, 1], [0, 0], [0, 0]]) + + +class TestNewScalarIndexing: + a = matrix([[1, 2], [3, 4]]) + + def test_dimesions(self): + a = self.a + x = a[0] + assert_equal(x.ndim, 2) + + def test_array_from_matrix_list(self): + a = self.a + x = np.array([a, a]) + assert_equal(x.shape, [2, 2, 2]) + + def test_array_to_list(self): + a = self.a + assert_equal(a.tolist(), [[1, 2], [3, 4]]) + + def test_fancy_indexing(self): + a = self.a + x = a[1, [0, 1, 0]] + assert_(isinstance(x, matrix)) + assert_equal(x, matrix([[3, 4, 3]])) + x = a[[1, 0]] + assert_(isinstance(x, matrix)) + assert_equal(x, matrix([[3, 4], [1, 2]])) + x = a[[[1], [0]], [[1, 0], [0, 1]]] + assert_(isinstance(x, matrix)) + assert_equal(x, matrix([[4, 3], [1, 2]])) + + def test_matrix_element(self): + x = matrix([[1, 2, 3], [4, 5, 6]]) + assert_equal(x[0][0], matrix([[1, 2, 3]])) + assert_equal(x[0][0].shape, (1, 3)) + assert_equal(x[0].shape, (1, 3)) + assert_equal(x[:, 0].shape, (2, 1)) + + x = matrix(0) + assert_equal(x[0, 0], 0) + assert_equal(x[0], 0) + assert_equal(x[:, 0].shape, x.shape) + + def test_scalar_indexing(self): + x = asmatrix(np.zeros((3, 2), float)) + assert_equal(x[0, 0], x[0][0]) + + def test_row_column_indexing(self): + x = asmatrix(np.eye(2)) + assert_array_equal(x[0, :], [[1, 0]]) + assert_array_equal(x[1, :], [[0, 1]]) + assert_array_equal(x[:, 0], [[1], [0]]) + assert_array_equal(x[:, 1], [[0], [1]]) + + def test_boolean_indexing(self): + A = np.arange(6) + A.shape = (3, 2) + x = asmatrix(A) + assert_array_equal(x[:, np.array([True, False])], x[:, 0]) + assert_array_equal(x[np.array([True, False, False]), :], x[0, :]) + + def test_list_indexing(self): + A = np.arange(6) + A.shape = (3, 2) + x = asmatrix(A) + assert_array_equal(x[:, [1, 0]], x[:, ::-1]) + assert_array_equal(x[[2, 1, 0], :], x[::-1, :]) + + +class TestPower: + def test_returntype(self): + a = np.array([[0, 1], [0, 0]]) + assert_(type(matrix_power(a, 2)) is np.ndarray) + a = asmatrix(a) + assert_(type(matrix_power(a, 2)) is matrix) + + def test_list(self): + assert_array_equal(matrix_power([[0, 1], [0, 0]], 2), [[0, 0], [0, 0]]) + + +class TestShape: + + a = np.array([[1], [2]]) + m = matrix([[1], [2]]) + + def test_shape(self): + assert_equal(self.a.shape, (2, 1)) + assert_equal(self.m.shape, (2, 1)) + + def test_numpy_ravel(self): + assert_equal(np.ravel(self.a).shape, (2,)) + assert_equal(np.ravel(self.m).shape, (2,)) + + def test_member_ravel(self): + assert_equal(self.a.ravel().shape, (2,)) + assert_equal(self.m.ravel().shape, (1, 2)) + + def test_member_flatten(self): + assert_equal(self.a.flatten().shape, (2,)) + assert_equal(self.m.flatten().shape, (1, 2)) + + def test_numpy_ravel_order(self): + x = np.array([[1, 2, 3], [4, 5, 6]]) + assert_equal(np.ravel(x), [1, 2, 3, 4, 5, 6]) + assert_equal(np.ravel(x, order='F'), [1, 4, 2, 5, 3, 6]) + assert_equal(np.ravel(x.T), [1, 4, 2, 5, 3, 6]) + assert_equal(np.ravel(x.T, order='A'), [1, 2, 3, 4, 5, 6]) + x = matrix([[1, 2, 3], [4, 5, 6]]) + assert_equal(np.ravel(x), [1, 2, 3, 4, 5, 6]) + assert_equal(np.ravel(x, order='F'), [1, 4, 2, 5, 3, 6]) + assert_equal(np.ravel(x.T), [1, 4, 2, 5, 3, 6]) + assert_equal(np.ravel(x.T, order='A'), [1, 2, 3, 4, 5, 6]) + + def test_matrix_ravel_order(self): + x = matrix([[1, 2, 3], [4, 5, 6]]) + assert_equal(x.ravel(), [[1, 2, 3, 4, 5, 6]]) + assert_equal(x.ravel(order='F'), [[1, 4, 2, 5, 3, 6]]) + assert_equal(x.T.ravel(), [[1, 4, 2, 5, 3, 6]]) + assert_equal(x.T.ravel(order='A'), [[1, 2, 3, 4, 5, 6]]) + + def test_array_memory_sharing(self): + assert_(np.may_share_memory(self.a, self.a.ravel())) + assert_(not np.may_share_memory(self.a, self.a.flatten())) + + def test_matrix_memory_sharing(self): + assert_(np.may_share_memory(self.m, self.m.ravel())) + assert_(not np.may_share_memory(self.m, self.m.flatten())) + + def test_expand_dims_matrix(self): + # matrices are always 2d - so expand_dims only makes sense when the + # type is changed away from matrix. + a = np.arange(10).reshape((2, 5)).view(np.matrix) + expanded = np.expand_dims(a, axis=1) + assert_equal(expanded.ndim, 3) + assert_(not isinstance(expanded, np.matrix)) diff --git a/.venv/lib/python3.12/site-packages/numpy/matrixlib/tests/test_interaction.py b/.venv/lib/python3.12/site-packages/numpy/matrixlib/tests/test_interaction.py new file mode 100644 index 0000000..87d133a --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/matrixlib/tests/test_interaction.py @@ -0,0 +1,360 @@ +"""Tests of interaction of matrix with other parts of numpy. + +Note that tests with MaskedArray and linalg are done in separate files. +""" +import textwrap +import warnings + +import pytest + +import numpy as np +from numpy.testing import ( + assert_, + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, + assert_equal, + assert_raises, + assert_raises_regex, +) + + +def test_fancy_indexing(): + # The matrix class messes with the shape. While this is always + # weird (getitem is not used, it does not have setitem nor knows + # about fancy indexing), this tests gh-3110 + # 2018-04-29: moved here from core.tests.test_index. + m = np.matrix([[1, 2], [3, 4]]) + + assert_(isinstance(m[[0, 1, 0], :], np.matrix)) + + # gh-3110. Note the transpose currently because matrices do *not* + # support dimension fixing for fancy indexing correctly. + x = np.asmatrix(np.arange(50).reshape(5, 10)) + assert_equal(x[:2, np.array(-1)], x[:2, -1].T) + + +def test_polynomial_mapdomain(): + # test that polynomial preserved matrix subtype. + # 2018-04-29: moved here from polynomial.tests.polyutils. + dom1 = [0, 4] + dom2 = [1, 3] + x = np.matrix([dom1, dom1]) + res = np.polynomial.polyutils.mapdomain(x, dom1, dom2) + assert_(isinstance(res, np.matrix)) + + +def test_sort_matrix_none(): + # 2018-04-29: moved here from core.tests.test_multiarray + a = np.matrix([[2, 1, 0]]) + actual = np.sort(a, axis=None) + expected = np.matrix([[0, 1, 2]]) + assert_equal(actual, expected) + assert_(type(expected) is np.matrix) + + +def test_partition_matrix_none(): + # gh-4301 + # 2018-04-29: moved here from core.tests.test_multiarray + a = np.matrix([[2, 1, 0]]) + actual = np.partition(a, 1, axis=None) + expected = np.matrix([[0, 1, 2]]) + assert_equal(actual, expected) + assert_(type(expected) is np.matrix) + + +def test_dot_scalar_and_matrix_of_objects(): + # Ticket #2469 + # 2018-04-29: moved here from core.tests.test_multiarray + arr = np.matrix([1, 2], dtype=object) + desired = np.matrix([[3, 6]], dtype=object) + assert_equal(np.dot(arr, 3), desired) + assert_equal(np.dot(3, arr), desired) + + +def test_inner_scalar_and_matrix(): + # 2018-04-29: moved here from core.tests.test_multiarray + for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?': + sca = np.array(3, dtype=dt)[()] + arr = np.matrix([[1, 2], [3, 4]], dtype=dt) + desired = np.matrix([[3, 6], [9, 12]], dtype=dt) + assert_equal(np.inner(arr, sca), desired) + assert_equal(np.inner(sca, arr), desired) + + +def test_inner_scalar_and_matrix_of_objects(): + # Ticket #4482 + # 2018-04-29: moved here from core.tests.test_multiarray + arr = np.matrix([1, 2], dtype=object) + desired = np.matrix([[3, 6]], dtype=object) + assert_equal(np.inner(arr, 3), desired) + assert_equal(np.inner(3, arr), desired) + + +def test_iter_allocate_output_subtype(): + # Make sure that the subtype with priority wins + # 2018-04-29: moved here from core.tests.test_nditer, given the + # matrix specific shape test. + + # matrix vs ndarray + a = np.matrix([[1, 2], [3, 4]]) + b = np.arange(4).reshape(2, 2).T + i = np.nditer([a, b, None], [], + [['readonly'], ['readonly'], ['writeonly', 'allocate']]) + assert_(type(i.operands[2]) is np.matrix) + assert_(type(i.operands[2]) is not np.ndarray) + assert_equal(i.operands[2].shape, (2, 2)) + + # matrix always wants things to be 2D + b = np.arange(4).reshape(1, 2, 2) + assert_raises(RuntimeError, np.nditer, [a, b, None], [], + [['readonly'], ['readonly'], ['writeonly', 'allocate']]) + # but if subtypes are disabled, the result can still work + i = np.nditer([a, b, None], [], + [['readonly'], ['readonly'], + ['writeonly', 'allocate', 'no_subtype']]) + assert_(type(i.operands[2]) is np.ndarray) + assert_(type(i.operands[2]) is not np.matrix) + assert_equal(i.operands[2].shape, (1, 2, 2)) + + +def like_function(): + # 2018-04-29: moved here from core.tests.test_numeric + a = np.matrix([[1, 2], [3, 4]]) + for like_function in np.zeros_like, np.ones_like, np.empty_like: + b = like_function(a) + assert_(type(b) is np.matrix) + + c = like_function(a, subok=False) + assert_(type(c) is not np.matrix) + + +def test_array_astype(): + # 2018-04-29: copied here from core.tests.test_api + # subok=True passes through a matrix + a = np.matrix([[0, 1, 2], [3, 4, 5]], dtype='f4') + b = a.astype('f4', subok=True, copy=False) + assert_(a is b) + + # subok=True is default, and creates a subtype on a cast + b = a.astype('i4', copy=False) + assert_equal(a, b) + assert_equal(type(b), np.matrix) + + # subok=False never returns a matrix + b = a.astype('f4', subok=False, copy=False) + assert_equal(a, b) + assert_(not (a is b)) + assert_(type(b) is not np.matrix) + + +def test_stack(): + # 2018-04-29: copied here from core.tests.test_shape_base + # check np.matrix cannot be stacked + m = np.matrix([[1, 2], [3, 4]]) + assert_raises_regex(ValueError, 'shape too large to be a matrix', + np.stack, [m, m]) + + +def test_object_scalar_multiply(): + # Tickets #2469 and #4482 + # 2018-04-29: moved here from core.tests.test_ufunc + arr = np.matrix([1, 2], dtype=object) + desired = np.matrix([[3, 6]], dtype=object) + assert_equal(np.multiply(arr, 3), desired) + assert_equal(np.multiply(3, arr), desired) + + +def test_nanfunctions_matrices(): + # Check that it works and that type and + # shape are preserved + # 2018-04-29: moved here from core.tests.test_nanfunctions + mat = np.matrix(np.eye(3)) + for f in [np.nanmin, np.nanmax]: + res = f(mat, axis=0) + assert_(isinstance(res, np.matrix)) + assert_(res.shape == (1, 3)) + res = f(mat, axis=1) + assert_(isinstance(res, np.matrix)) + assert_(res.shape == (3, 1)) + res = f(mat) + assert_(np.isscalar(res)) + # check that rows of nan are dealt with for subclasses (#4628) + mat[1] = np.nan + for f in [np.nanmin, np.nanmax]: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + res = f(mat, axis=0) + assert_(isinstance(res, np.matrix)) + assert_(not np.any(np.isnan(res))) + assert_(len(w) == 0) + + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + res = f(mat, axis=1) + assert_(isinstance(res, np.matrix)) + assert_(np.isnan(res[1, 0]) and not np.isnan(res[0, 0]) + and not np.isnan(res[2, 0])) + assert_(len(w) == 1, 'no warning raised') + assert_(issubclass(w[0].category, RuntimeWarning)) + + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + res = f(mat) + assert_(np.isscalar(res)) + assert_(res != np.nan) + assert_(len(w) == 0) + + +def test_nanfunctions_matrices_general(): + # Check that it works and that type and + # shape are preserved + # 2018-04-29: moved here from core.tests.test_nanfunctions + mat = np.matrix(np.eye(3)) + for f in (np.nanargmin, np.nanargmax, np.nansum, np.nanprod, + np.nanmean, np.nanvar, np.nanstd): + res = f(mat, axis=0) + assert_(isinstance(res, np.matrix)) + assert_(res.shape == (1, 3)) + res = f(mat, axis=1) + assert_(isinstance(res, np.matrix)) + assert_(res.shape == (3, 1)) + res = f(mat) + assert_(np.isscalar(res)) + + for f in np.nancumsum, np.nancumprod: + res = f(mat, axis=0) + assert_(isinstance(res, np.matrix)) + assert_(res.shape == (3, 3)) + res = f(mat, axis=1) + assert_(isinstance(res, np.matrix)) + assert_(res.shape == (3, 3)) + res = f(mat) + assert_(isinstance(res, np.matrix)) + assert_(res.shape == (1, 3 * 3)) + + +def test_average_matrix(): + # 2018-04-29: moved here from core.tests.test_function_base. + y = np.matrix(np.random.rand(5, 5)) + assert_array_equal(y.mean(0), np.average(y, 0)) + + a = np.matrix([[1, 2], [3, 4]]) + w = np.matrix([[1, 2], [3, 4]]) + + r = np.average(a, axis=0, weights=w) + assert_equal(type(r), np.matrix) + assert_equal(r, [[2.5, 10.0 / 3]]) + + +def test_dot_matrix(): + # Test to make sure matrices give the same answer as ndarrays + # 2018-04-29: moved here from core.tests.test_function_base. + x = np.linspace(0, 5) + y = np.linspace(-5, 0) + mx = np.matrix(x) + my = np.matrix(y) + r = np.dot(x, y) + mr = np.dot(mx, my.T) + assert_almost_equal(mr, r) + + +def test_ediff1d_matrix(): + # 2018-04-29: moved here from core.tests.test_arraysetops. + assert isinstance(np.ediff1d(np.matrix(1)), np.matrix) + assert isinstance(np.ediff1d(np.matrix(1), to_begin=1), np.matrix) + + +def test_apply_along_axis_matrix(): + # this test is particularly malicious because matrix + # refuses to become 1d + # 2018-04-29: moved here from core.tests.test_shape_base. + def double(row): + return row * 2 + + m = np.matrix([[0, 1], [2, 3]]) + expected = np.matrix([[0, 2], [4, 6]]) + + result = np.apply_along_axis(double, 0, m) + assert_(isinstance(result, np.matrix)) + assert_array_equal(result, expected) + + result = np.apply_along_axis(double, 1, m) + assert_(isinstance(result, np.matrix)) + assert_array_equal(result, expected) + + +def test_kron_matrix(): + # 2018-04-29: moved here from core.tests.test_shape_base. + a = np.ones([2, 2]) + m = np.asmatrix(a) + assert_equal(type(np.kron(a, a)), np.ndarray) + assert_equal(type(np.kron(m, m)), np.matrix) + assert_equal(type(np.kron(a, m)), np.matrix) + assert_equal(type(np.kron(m, a)), np.matrix) + + +class TestConcatenatorMatrix: + # 2018-04-29: moved here from core.tests.test_index_tricks. + def test_matrix(self): + a = [1, 2] + b = [3, 4] + + ab_r = np.r_['r', a, b] + ab_c = np.r_['c', a, b] + + assert_equal(type(ab_r), np.matrix) + assert_equal(type(ab_c), np.matrix) + + assert_equal(np.array(ab_r), [[1, 2, 3, 4]]) + assert_equal(np.array(ab_c), [[1], [2], [3], [4]]) + + assert_raises(ValueError, lambda: np.r_['rc', a, b]) + + def test_matrix_scalar(self): + r = np.r_['r', [1, 2], 3] + assert_equal(type(r), np.matrix) + assert_equal(np.array(r), [[1, 2, 3]]) + + def test_matrix_builder(self): + a = np.array([1]) + b = np.array([2]) + c = np.array([3]) + d = np.array([4]) + actual = np.r_['a, b; c, d'] + expected = np.bmat([[a, b], [c, d]]) + + assert_equal(actual, expected) + assert_equal(type(actual), type(expected)) + + +def test_array_equal_error_message_matrix(): + # 2018-04-29: moved here from testing.tests.test_utils. + with pytest.raises(AssertionError) as exc_info: + assert_equal(np.array([1, 2]), np.matrix([1, 2])) + msg = str(exc_info.value) + msg_reference = textwrap.dedent("""\ + + Arrays are not equal + + (shapes (2,), (1, 2) mismatch) + ACTUAL: array([1, 2]) + DESIRED: matrix([[1, 2]])""") + assert_equal(msg, msg_reference) + + +def test_array_almost_equal_matrix(): + # Matrix slicing keeps things 2-D, while array does not necessarily. + # See gh-8452. + # 2018-04-29: moved here from testing.tests.test_utils. + m1 = np.matrix([[1., 2.]]) + m2 = np.matrix([[1., np.nan]]) + m3 = np.matrix([[1., -np.inf]]) + m4 = np.matrix([[np.nan, np.inf]]) + m5 = np.matrix([[1., 2.], [np.nan, np.inf]]) + for assert_func in assert_array_almost_equal, assert_almost_equal: + for m in m1, m2, m3, m4, m5: + assert_func(m, m) + a = np.array(m) + assert_func(a, m) + assert_func(m, a) diff --git a/.venv/lib/python3.12/site-packages/numpy/matrixlib/tests/test_masked_matrix.py b/.venv/lib/python3.12/site-packages/numpy/matrixlib/tests/test_masked_matrix.py new file mode 100644 index 0000000..e6df047 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/matrixlib/tests/test_masked_matrix.py @@ -0,0 +1,240 @@ +import pickle + +import numpy as np +from numpy.ma.core import ( + MaskedArray, + MaskType, + add, + allequal, + divide, + getmask, + hypot, + log, + masked, + masked_array, + masked_values, + nomask, +) +from numpy.ma.extras import mr_ +from numpy.ma.testutils import assert_, assert_array_equal, assert_equal, assert_raises + + +class MMatrix(MaskedArray, np.matrix,): + + def __new__(cls, data, mask=nomask): + mat = np.matrix(data) + _data = MaskedArray.__new__(cls, data=mat, mask=mask) + return _data + + def __array_finalize__(self, obj): + np.matrix.__array_finalize__(self, obj) + MaskedArray.__array_finalize__(self, obj) + + @property + def _series(self): + _view = self.view(MaskedArray) + _view._sharedmask = False + return _view + + +class TestMaskedMatrix: + def test_matrix_indexing(self): + # Tests conversions and indexing + x1 = np.matrix([[1, 2, 3], [4, 3, 2]]) + x2 = masked_array(x1, mask=[[1, 0, 0], [0, 1, 0]]) + x3 = masked_array(x1, mask=[[0, 1, 0], [1, 0, 0]]) + x4 = masked_array(x1) + # test conversion to strings + str(x2) # raises? + repr(x2) # raises? + # tests of indexing + assert_(type(x2[1, 0]) is type(x1[1, 0])) + assert_(x1[1, 0] == x2[1, 0]) + assert_(x2[1, 1] is masked) + assert_equal(x1[0, 2], x2[0, 2]) + assert_equal(x1[0, 1:], x2[0, 1:]) + assert_equal(x1[:, 2], x2[:, 2]) + assert_equal(x1[:], x2[:]) + assert_equal(x1[1:], x3[1:]) + x1[0, 2] = 9 + x2[0, 2] = 9 + assert_equal(x1, x2) + x1[0, 1:] = 99 + x2[0, 1:] = 99 + assert_equal(x1, x2) + x2[0, 1] = masked + assert_equal(x1, x2) + x2[0, 1:] = masked + assert_equal(x1, x2) + x2[0, :] = x1[0, :] + x2[0, 1] = masked + assert_(allequal(getmask(x2), np.array([[0, 1, 0], [0, 1, 0]]))) + x3[1, :] = masked_array([1, 2, 3], [1, 1, 0]) + assert_(allequal(getmask(x3)[1], masked_array([1, 1, 0]))) + assert_(allequal(getmask(x3[1]), masked_array([1, 1, 0]))) + x4[1, :] = masked_array([1, 2, 3], [1, 1, 0]) + assert_(allequal(getmask(x4[1]), masked_array([1, 1, 0]))) + assert_(allequal(x4[1], masked_array([1, 2, 3]))) + x1 = np.matrix(np.arange(5) * 1.0) + x2 = masked_values(x1, 3.0) + assert_equal(x1, x2) + assert_(allequal(masked_array([0, 0, 0, 1, 0], dtype=MaskType), + x2.mask)) + assert_equal(3.0, x2.fill_value) + + def test_pickling_subbaseclass(self): + # Test pickling w/ a subclass of ndarray + a = masked_array(np.matrix(list(range(10))), mask=[1, 0, 1, 0, 0] * 2) + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + a_pickled = pickle.loads(pickle.dumps(a, protocol=proto)) + assert_equal(a_pickled._mask, a._mask) + assert_equal(a_pickled, a) + assert_(isinstance(a_pickled._data, np.matrix)) + + def test_count_mean_with_matrix(self): + m = masked_array(np.matrix([[1, 2], [3, 4]]), mask=np.zeros((2, 2))) + + assert_equal(m.count(axis=0).shape, (1, 2)) + assert_equal(m.count(axis=1).shape, (2, 1)) + + # Make sure broadcasting inside mean and var work + assert_equal(m.mean(axis=0), [[2., 3.]]) + assert_equal(m.mean(axis=1), [[1.5], [3.5]]) + + def test_flat(self): + # Test that flat can return items even for matrices [#4585, #4615] + # test simple access + test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1]) + assert_equal(test.flat[1], 2) + assert_equal(test.flat[2], masked) + assert_(np.all(test.flat[0:2] == test[0, 0:2])) + # Test flat on masked_matrices + test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1]) + test.flat = masked_array([3, 2, 1], mask=[1, 0, 0]) + control = masked_array(np.matrix([[3, 2, 1]]), mask=[1, 0, 0]) + assert_equal(test, control) + # Test setting + test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1]) + testflat = test.flat + testflat[:] = testflat[[2, 1, 0]] + assert_equal(test, control) + testflat[0] = 9 + # test that matrices keep the correct shape (#4615) + a = masked_array(np.matrix(np.eye(2)), mask=0) + b = a.flat + b01 = b[:2] + assert_equal(b01.data, np.array([[1., 0.]])) + assert_equal(b01.mask, np.array([[False, False]])) + + def test_allany_onmatrices(self): + x = np.array([[0.13, 0.26, 0.90], + [0.28, 0.33, 0.63], + [0.31, 0.87, 0.70]]) + X = np.matrix(x) + m = np.array([[True, False, False], + [False, False, False], + [True, True, False]], dtype=np.bool) + mX = masked_array(X, mask=m) + mXbig = (mX > 0.5) + mXsmall = (mX < 0.5) + + assert_(not mXbig.all()) + assert_(mXbig.any()) + assert_equal(mXbig.all(0), np.matrix([False, False, True])) + assert_equal(mXbig.all(1), np.matrix([False, False, True]).T) + assert_equal(mXbig.any(0), np.matrix([False, False, True])) + assert_equal(mXbig.any(1), np.matrix([True, True, True]).T) + + assert_(not mXsmall.all()) + assert_(mXsmall.any()) + assert_equal(mXsmall.all(0), np.matrix([True, True, False])) + assert_equal(mXsmall.all(1), np.matrix([False, False, False]).T) + assert_equal(mXsmall.any(0), np.matrix([True, True, False])) + assert_equal(mXsmall.any(1), np.matrix([True, True, False]).T) + + def test_compressed(self): + a = masked_array(np.matrix([1, 2, 3, 4]), mask=[0, 0, 0, 0]) + b = a.compressed() + assert_equal(b, a) + assert_(isinstance(b, np.matrix)) + a[0, 0] = masked + b = a.compressed() + assert_equal(b, [[2, 3, 4]]) + + def test_ravel(self): + a = masked_array(np.matrix([1, 2, 3, 4, 5]), mask=[[0, 1, 0, 0, 0]]) + aravel = a.ravel() + assert_equal(aravel.shape, (1, 5)) + assert_equal(aravel._mask.shape, a.shape) + + def test_view(self): + # Test view w/ flexible dtype + iterator = list(zip(np.arange(10), np.random.rand(10))) + data = np.array(iterator) + a = masked_array(iterator, dtype=[('a', float), ('b', float)]) + a.mask[0] = (1, 0) + test = a.view((float, 2), np.matrix) + assert_equal(test, data) + assert_(isinstance(test, np.matrix)) + assert_(not isinstance(test, MaskedArray)) + + +class TestSubclassing: + # Test suite for masked subclasses of ndarray. + + def setup_method(self): + x = np.arange(5, dtype='float') + mx = MMatrix(x, mask=[0, 1, 0, 0, 0]) + self.data = (x, mx) + + def test_maskedarray_subclassing(self): + # Tests subclassing MaskedArray + (x, mx) = self.data + assert_(isinstance(mx._data, np.matrix)) + + def test_masked_unary_operations(self): + # Tests masked_unary_operation + (x, mx) = self.data + with np.errstate(divide='ignore'): + assert_(isinstance(log(mx), MMatrix)) + assert_equal(log(x), np.log(x)) + + def test_masked_binary_operations(self): + # Tests masked_binary_operation + (x, mx) = self.data + # Result should be a MMatrix + assert_(isinstance(add(mx, mx), MMatrix)) + assert_(isinstance(add(mx, x), MMatrix)) + # Result should work + assert_equal(add(mx, x), mx + x) + assert_(isinstance(add(mx, mx)._data, np.matrix)) + with assert_raises(TypeError): + add.outer(mx, mx) + assert_(isinstance(hypot(mx, mx), MMatrix)) + assert_(isinstance(hypot(mx, x), MMatrix)) + + def test_masked_binary_operations2(self): + # Tests domained_masked_binary_operation + (x, mx) = self.data + xmx = masked_array(mx.data.__array__(), mask=mx.mask) + assert_(isinstance(divide(mx, mx), MMatrix)) + assert_(isinstance(divide(mx, x), MMatrix)) + assert_equal(divide(mx, mx), divide(xmx, xmx)) + +class TestConcatenator: + # Tests for mr_, the equivalent of r_ for masked arrays. + + def test_matrix_builder(self): + assert_raises(np.ma.MAError, lambda: mr_['1, 2; 3, 4']) + + def test_matrix(self): + # Test consistency with unmasked version. If we ever deprecate + # matrix, this test should either still pass, or both actual and + # expected should fail to be build. + actual = mr_['r', 1, 2, 3] + expected = np.ma.array(np.r_['r', 1, 2, 3]) + assert_array_equal(actual, expected) + + # outer type is masked array, inner type is matrix + assert_equal(type(actual), type(expected)) + assert_equal(type(actual.data), type(expected.data)) diff --git a/.venv/lib/python3.12/site-packages/numpy/matrixlib/tests/test_matrix_linalg.py b/.venv/lib/python3.12/site-packages/numpy/matrixlib/tests/test_matrix_linalg.py new file mode 100644 index 0000000..4e63965 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/matrixlib/tests/test_matrix_linalg.py @@ -0,0 +1,105 @@ +""" Test functions for linalg module using the matrix class.""" +import numpy as np +from numpy.linalg.tests.test_linalg import ( + CondCases, + DetCases, + EigCases, + EigvalsCases, + InvCases, + LinalgCase, + LinalgTestCase, + LstsqCases, + PinvCases, + SolveCases, + SVDCases, + _TestNorm2D, + _TestNormDoubleBase, + _TestNormInt64Base, + _TestNormSingleBase, + apply_tag, +) +from numpy.linalg.tests.test_linalg import TestQR as _TestQR + +CASES = [] + +# square test cases +CASES += apply_tag('square', [ + LinalgCase("0x0_matrix", + np.empty((0, 0), dtype=np.double).view(np.matrix), + np.empty((0, 1), dtype=np.double).view(np.matrix), + tags={'size-0'}), + LinalgCase("matrix_b_only", + np.array([[1., 2.], [3., 4.]]), + np.matrix([2., 1.]).T), + LinalgCase("matrix_a_and_b", + np.matrix([[1., 2.], [3., 4.]]), + np.matrix([2., 1.]).T), +]) + +# hermitian test-cases +CASES += apply_tag('hermitian', [ + LinalgCase("hmatrix_a_and_b", + np.matrix([[1., 2.], [2., 1.]]), + None), +]) +# No need to make generalized or strided cases for matrices. + + +class MatrixTestCase(LinalgTestCase): + TEST_CASES = CASES + + +class TestSolveMatrix(SolveCases, MatrixTestCase): + pass + + +class TestInvMatrix(InvCases, MatrixTestCase): + pass + + +class TestEigvalsMatrix(EigvalsCases, MatrixTestCase): + pass + + +class TestEigMatrix(EigCases, MatrixTestCase): + pass + + +class TestSVDMatrix(SVDCases, MatrixTestCase): + pass + + +class TestCondMatrix(CondCases, MatrixTestCase): + pass + + +class TestPinvMatrix(PinvCases, MatrixTestCase): + pass + + +class TestDetMatrix(DetCases, MatrixTestCase): + pass + + +class TestLstsqMatrix(LstsqCases, MatrixTestCase): + pass + + +class _TestNorm2DMatrix(_TestNorm2D): + array = np.matrix + + +class TestNormDoubleMatrix(_TestNorm2DMatrix, _TestNormDoubleBase): + pass + + +class TestNormSingleMatrix(_TestNorm2DMatrix, _TestNormSingleBase): + pass + + +class TestNormInt64Matrix(_TestNorm2DMatrix, _TestNormInt64Base): + pass + + +class TestQRMatrix(_TestQR): + array = np.matrix diff --git a/.venv/lib/python3.12/site-packages/numpy/matrixlib/tests/test_multiarray.py b/.venv/lib/python3.12/site-packages/numpy/matrixlib/tests/test_multiarray.py new file mode 100644 index 0000000..2d9d1f8 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/matrixlib/tests/test_multiarray.py @@ -0,0 +1,17 @@ +import numpy as np +from numpy.testing import assert_, assert_array_equal, assert_equal + + +class TestView: + def test_type(self): + x = np.array([1, 2, 3]) + assert_(isinstance(x.view(np.matrix), np.matrix)) + + def test_keywords(self): + x = np.array([(1, 2)], dtype=[('a', np.int8), ('b', np.int8)]) + # We must be specific about the endianness here: + y = x.view(dtype='>> from numpy.polynomial import Chebyshev + >>> xdata = [1, 2, 3, 4] + >>> ydata = [1, 4, 9, 16] + >>> c = Chebyshev.fit(xdata, ydata, deg=1) + +is preferred over the `chebyshev.chebfit` function from the +``np.polynomial.chebyshev`` module:: + + >>> from numpy.polynomial.chebyshev import chebfit + >>> c = chebfit(xdata, ydata, deg=1) + +See :doc:`routines.polynomials.classes` for more details. + +Convenience Classes +=================== + +The following lists the various constants and methods common to all of +the classes representing the various kinds of polynomials. In the following, +the term ``Poly`` represents any one of the convenience classes (e.g. +`~polynomial.Polynomial`, `~chebyshev.Chebyshev`, `~hermite.Hermite`, etc.) +while the lowercase ``p`` represents an **instance** of a polynomial class. + +Constants +--------- + +- ``Poly.domain`` -- Default domain +- ``Poly.window`` -- Default window +- ``Poly.basis_name`` -- String used to represent the basis +- ``Poly.maxpower`` -- Maximum value ``n`` such that ``p**n`` is allowed + +Creation +-------- + +Methods for creating polynomial instances. + +- ``Poly.basis(degree)`` -- Basis polynomial of given degree +- ``Poly.identity()`` -- ``p`` where ``p(x) = x`` for all ``x`` +- ``Poly.fit(x, y, deg)`` -- ``p`` of degree ``deg`` with coefficients + determined by the least-squares fit to the data ``x``, ``y`` +- ``Poly.fromroots(roots)`` -- ``p`` with specified roots +- ``p.copy()`` -- Create a copy of ``p`` + +Conversion +---------- + +Methods for converting a polynomial instance of one kind to another. + +- ``p.cast(Poly)`` -- Convert ``p`` to instance of kind ``Poly`` +- ``p.convert(Poly)`` -- Convert ``p`` to instance of kind ``Poly`` or map + between ``domain`` and ``window`` + +Calculus +-------- +- ``p.deriv()`` -- Take the derivative of ``p`` +- ``p.integ()`` -- Integrate ``p`` + +Validation +---------- +- ``Poly.has_samecoef(p1, p2)`` -- Check if coefficients match +- ``Poly.has_samedomain(p1, p2)`` -- Check if domains match +- ``Poly.has_sametype(p1, p2)`` -- Check if types match +- ``Poly.has_samewindow(p1, p2)`` -- Check if windows match + +Misc +---- +- ``p.linspace()`` -- Return ``x, p(x)`` at equally-spaced points in ``domain`` +- ``p.mapparms()`` -- Return the parameters for the linear mapping between + ``domain`` and ``window``. +- ``p.roots()`` -- Return the roots of ``p``. +- ``p.trim()`` -- Remove trailing coefficients. +- ``p.cutdeg(degree)`` -- Truncate ``p`` to given degree +- ``p.truncate(size)`` -- Truncate ``p`` to given size + +""" +from .chebyshev import Chebyshev +from .hermite import Hermite +from .hermite_e import HermiteE +from .laguerre import Laguerre +from .legendre import Legendre +from .polynomial import Polynomial + +__all__ = [ # noqa: F822 + "set_default_printstyle", + "polynomial", "Polynomial", + "chebyshev", "Chebyshev", + "legendre", "Legendre", + "hermite", "Hermite", + "hermite_e", "HermiteE", + "laguerre", "Laguerre", +] + + +def set_default_printstyle(style): + """ + Set the default format for the string representation of polynomials. + + Values for ``style`` must be valid inputs to ``__format__``, i.e. 'ascii' + or 'unicode'. + + Parameters + ---------- + style : str + Format string for default printing style. Must be either 'ascii' or + 'unicode'. + + Notes + ----- + The default format depends on the platform: 'unicode' is used on + Unix-based systems and 'ascii' on Windows. This determination is based on + default font support for the unicode superscript and subscript ranges. + + Examples + -------- + >>> p = np.polynomial.Polynomial([1, 2, 3]) + >>> c = np.polynomial.Chebyshev([1, 2, 3]) + >>> np.polynomial.set_default_printstyle('unicode') + >>> print(p) + 1.0 + 2.0·x + 3.0·x² + >>> print(c) + 1.0 + 2.0·T₁(x) + 3.0·T₂(x) + >>> np.polynomial.set_default_printstyle('ascii') + >>> print(p) + 1.0 + 2.0 x + 3.0 x**2 + >>> print(c) + 1.0 + 2.0 T_1(x) + 3.0 T_2(x) + >>> # Formatting supersedes all class/package-level defaults + >>> print(f"{p:unicode}") + 1.0 + 2.0·x + 3.0·x² + """ + if style not in ('unicode', 'ascii'): + raise ValueError( + f"Unsupported format string '{style}'. Valid options are 'ascii' " + f"and 'unicode'" + ) + _use_unicode = True + if style == 'ascii': + _use_unicode = False + from ._polybase import ABCPolyBase + ABCPolyBase._use_unicode = _use_unicode + + +from numpy._pytesttester import PytestTester + +test = PytestTester(__name__) +del PytestTester diff --git a/.venv/lib/python3.12/site-packages/numpy/polynomial/__init__.pyi b/.venv/lib/python3.12/site-packages/numpy/polynomial/__init__.pyi new file mode 100644 index 0000000..6fb0fb5 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/polynomial/__init__.pyi @@ -0,0 +1,25 @@ +from typing import Final, Literal + +from . import chebyshev, hermite, hermite_e, laguerre, legendre, polynomial +from .chebyshev import Chebyshev +from .hermite import Hermite +from .hermite_e import HermiteE +from .laguerre import Laguerre +from .legendre import Legendre +from .polynomial import Polynomial + +__all__ = [ + "set_default_printstyle", + "polynomial", "Polynomial", + "chebyshev", "Chebyshev", + "legendre", "Legendre", + "hermite", "Hermite", + "hermite_e", "HermiteE", + "laguerre", "Laguerre", +] + +def set_default_printstyle(style: Literal["ascii", "unicode"]) -> None: ... + +from numpy._pytesttester import PytestTester as _PytestTester + +test: Final[_PytestTester] diff --git a/.venv/lib/python3.12/site-packages/numpy/polynomial/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/polynomial/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..46c5c57 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/polynomial/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/polynomial/__pycache__/_polybase.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/polynomial/__pycache__/_polybase.cpython-312.pyc new file mode 100644 index 0000000..efd4aca Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/polynomial/__pycache__/_polybase.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/polynomial/__pycache__/chebyshev.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/polynomial/__pycache__/chebyshev.cpython-312.pyc new file mode 100644 index 0000000..09ba742 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/polynomial/__pycache__/chebyshev.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/polynomial/__pycache__/hermite.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/polynomial/__pycache__/hermite.cpython-312.pyc new file mode 100644 index 0000000..9b94bf5 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/polynomial/__pycache__/hermite.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/polynomial/__pycache__/hermite_e.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/polynomial/__pycache__/hermite_e.cpython-312.pyc new file mode 100644 index 0000000..dbd433b Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/polynomial/__pycache__/hermite_e.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/polynomial/__pycache__/laguerre.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/polynomial/__pycache__/laguerre.cpython-312.pyc new file mode 100644 index 0000000..e078664 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/polynomial/__pycache__/laguerre.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/polynomial/__pycache__/legendre.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/polynomial/__pycache__/legendre.cpython-312.pyc new file mode 100644 index 0000000..d0efc66 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/polynomial/__pycache__/legendre.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/polynomial/__pycache__/polynomial.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/polynomial/__pycache__/polynomial.cpython-312.pyc new file mode 100644 index 0000000..e359c4c Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/polynomial/__pycache__/polynomial.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/polynomial/__pycache__/polyutils.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/polynomial/__pycache__/polyutils.cpython-312.pyc new file mode 100644 index 0000000..4261d30 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/polynomial/__pycache__/polyutils.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/polynomial/_polybase.py b/.venv/lib/python3.12/site-packages/numpy/polynomial/_polybase.py new file mode 100644 index 0000000..f893433 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/polynomial/_polybase.py @@ -0,0 +1,1191 @@ +""" +Abstract base class for the various polynomial Classes. + +The ABCPolyBase class provides the methods needed to implement the common API +for the various polynomial classes. It operates as a mixin, but uses the +abc module from the stdlib, hence it is only available for Python >= 2.6. + +""" +import abc +import numbers +import os +from collections.abc import Callable + +import numpy as np + +from . import polyutils as pu + +__all__ = ['ABCPolyBase'] + +class ABCPolyBase(abc.ABC): + """An abstract base class for immutable series classes. + + ABCPolyBase provides the standard Python numerical methods + '+', '-', '*', '//', '%', 'divmod', '**', and '()' along with the + methods listed below. + + Parameters + ---------- + coef : array_like + Series coefficients in order of increasing degree, i.e., + ``(1, 2, 3)`` gives ``1*P_0(x) + 2*P_1(x) + 3*P_2(x)``, where + ``P_i`` is the basis polynomials of degree ``i``. + domain : (2,) array_like, optional + Domain to use. The interval ``[domain[0], domain[1]]`` is mapped + to the interval ``[window[0], window[1]]`` by shifting and scaling. + The default value is the derived class domain. + window : (2,) array_like, optional + Window, see domain for its use. The default value is the + derived class window. + symbol : str, optional + Symbol used to represent the independent variable in string + representations of the polynomial expression, e.g. for printing. + The symbol must be a valid Python identifier. Default value is 'x'. + + .. versionadded:: 1.24 + + Attributes + ---------- + coef : (N,) ndarray + Series coefficients in order of increasing degree. + domain : (2,) ndarray + Domain that is mapped to window. + window : (2,) ndarray + Window that domain is mapped to. + symbol : str + Symbol representing the independent variable. + + Class Attributes + ---------------- + maxpower : int + Maximum power allowed, i.e., the largest number ``n`` such that + ``p(x)**n`` is allowed. This is to limit runaway polynomial size. + domain : (2,) ndarray + Default domain of the class. + window : (2,) ndarray + Default window of the class. + + """ + + # Not hashable + __hash__ = None + + # Opt out of numpy ufuncs and Python ops with ndarray subclasses. + __array_ufunc__ = None + + # Limit runaway size. T_n^m has degree n*m + maxpower = 100 + + # Unicode character mappings for improved __str__ + _superscript_mapping = str.maketrans({ + "0": "⁰", + "1": "¹", + "2": "²", + "3": "³", + "4": "⁴", + "5": "⁵", + "6": "⁶", + "7": "⁷", + "8": "⁸", + "9": "⁹" + }) + _subscript_mapping = str.maketrans({ + "0": "₀", + "1": "₁", + "2": "₂", + "3": "₃", + "4": "₄", + "5": "₅", + "6": "₆", + "7": "₇", + "8": "₈", + "9": "₉" + }) + # Some fonts don't support full unicode character ranges necessary for + # the full set of superscripts and subscripts, including common/default + # fonts in Windows shells/terminals. Therefore, default to ascii-only + # printing on windows. + _use_unicode = not os.name == 'nt' + + @property + def symbol(self): + return self._symbol + + @property + @abc.abstractmethod + def domain(self): + pass + + @property + @abc.abstractmethod + def window(self): + pass + + @property + @abc.abstractmethod + def basis_name(self): + pass + + @staticmethod + @abc.abstractmethod + def _add(c1, c2): + pass + + @staticmethod + @abc.abstractmethod + def _sub(c1, c2): + pass + + @staticmethod + @abc.abstractmethod + def _mul(c1, c2): + pass + + @staticmethod + @abc.abstractmethod + def _div(c1, c2): + pass + + @staticmethod + @abc.abstractmethod + def _pow(c, pow, maxpower=None): + pass + + @staticmethod + @abc.abstractmethod + def _val(x, c): + pass + + @staticmethod + @abc.abstractmethod + def _int(c, m, k, lbnd, scl): + pass + + @staticmethod + @abc.abstractmethod + def _der(c, m, scl): + pass + + @staticmethod + @abc.abstractmethod + def _fit(x, y, deg, rcond, full): + pass + + @staticmethod + @abc.abstractmethod + def _line(off, scl): + pass + + @staticmethod + @abc.abstractmethod + def _roots(c): + pass + + @staticmethod + @abc.abstractmethod + def _fromroots(r): + pass + + def has_samecoef(self, other): + """Check if coefficients match. + + Parameters + ---------- + other : class instance + The other class must have the ``coef`` attribute. + + Returns + ------- + bool : boolean + True if the coefficients are the same, False otherwise. + + """ + return ( + len(self.coef) == len(other.coef) + and np.all(self.coef == other.coef) + ) + + def has_samedomain(self, other): + """Check if domains match. + + Parameters + ---------- + other : class instance + The other class must have the ``domain`` attribute. + + Returns + ------- + bool : boolean + True if the domains are the same, False otherwise. + + """ + return np.all(self.domain == other.domain) + + def has_samewindow(self, other): + """Check if windows match. + + Parameters + ---------- + other : class instance + The other class must have the ``window`` attribute. + + Returns + ------- + bool : boolean + True if the windows are the same, False otherwise. + + """ + return np.all(self.window == other.window) + + def has_sametype(self, other): + """Check if types match. + + Parameters + ---------- + other : object + Class instance. + + Returns + ------- + bool : boolean + True if other is same class as self + + """ + return isinstance(other, self.__class__) + + def _get_coefficients(self, other): + """Interpret other as polynomial coefficients. + + The `other` argument is checked to see if it is of the same + class as self with identical domain and window. If so, + return its coefficients, otherwise return `other`. + + Parameters + ---------- + other : anything + Object to be checked. + + Returns + ------- + coef + The coefficients of`other` if it is a compatible instance, + of ABCPolyBase, otherwise `other`. + + Raises + ------ + TypeError + When `other` is an incompatible instance of ABCPolyBase. + + """ + if isinstance(other, ABCPolyBase): + if not isinstance(other, self.__class__): + raise TypeError("Polynomial types differ") + elif not np.all(self.domain == other.domain): + raise TypeError("Domains differ") + elif not np.all(self.window == other.window): + raise TypeError("Windows differ") + elif self.symbol != other.symbol: + raise ValueError("Polynomial symbols differ") + return other.coef + return other + + def __init__(self, coef, domain=None, window=None, symbol='x'): + [coef] = pu.as_series([coef], trim=False) + self.coef = coef + + if domain is not None: + [domain] = pu.as_series([domain], trim=False) + if len(domain) != 2: + raise ValueError("Domain has wrong number of elements.") + self.domain = domain + + if window is not None: + [window] = pu.as_series([window], trim=False) + if len(window) != 2: + raise ValueError("Window has wrong number of elements.") + self.window = window + + # Validation for symbol + try: + if not symbol.isidentifier(): + raise ValueError( + "Symbol string must be a valid Python identifier" + ) + # If a user passes in something other than a string, the above + # results in an AttributeError. Catch this and raise a more + # informative exception + except AttributeError: + raise TypeError("Symbol must be a non-empty string") + + self._symbol = symbol + + def __repr__(self): + coef = repr(self.coef)[6:-1] + domain = repr(self.domain)[6:-1] + window = repr(self.window)[6:-1] + name = self.__class__.__name__ + return (f"{name}({coef}, domain={domain}, window={window}, " + f"symbol='{self.symbol}')") + + def __format__(self, fmt_str): + if fmt_str == '': + return self.__str__() + if fmt_str not in ('ascii', 'unicode'): + raise ValueError( + f"Unsupported format string '{fmt_str}' passed to " + f"{self.__class__}.__format__. Valid options are " + f"'ascii' and 'unicode'" + ) + if fmt_str == 'ascii': + return self._generate_string(self._str_term_ascii) + return self._generate_string(self._str_term_unicode) + + def __str__(self): + if self._use_unicode: + return self._generate_string(self._str_term_unicode) + return self._generate_string(self._str_term_ascii) + + def _generate_string(self, term_method): + """ + Generate the full string representation of the polynomial, using + ``term_method`` to generate each polynomial term. + """ + # Get configuration for line breaks + linewidth = np.get_printoptions().get('linewidth', 75) + if linewidth < 1: + linewidth = 1 + out = pu.format_float(self.coef[0]) + + off, scale = self.mapparms() + + scaled_symbol, needs_parens = self._format_term(pu.format_float, + off, scale) + if needs_parens: + scaled_symbol = '(' + scaled_symbol + ')' + + for i, coef in enumerate(self.coef[1:]): + out += " " + power = str(i + 1) + # Polynomial coefficient + # The coefficient array can be an object array with elements that + # will raise a TypeError with >= 0 (e.g. strings or Python + # complex). In this case, represent the coefficient as-is. + try: + if coef >= 0: + next_term = "+ " + pu.format_float(coef, parens=True) + else: + next_term = "- " + pu.format_float(-coef, parens=True) + except TypeError: + next_term = f"+ {coef}" + # Polynomial term + next_term += term_method(power, scaled_symbol) + # Length of the current line with next term added + line_len = len(out.split('\n')[-1]) + len(next_term) + # If not the last term in the polynomial, it will be two + # characters longer due to the +/- with the next term + if i < len(self.coef[1:]) - 1: + line_len += 2 + # Handle linebreaking + if line_len >= linewidth: + next_term = next_term.replace(" ", "\n", 1) + out += next_term + return out + + @classmethod + def _str_term_unicode(cls, i, arg_str): + """ + String representation of single polynomial term using unicode + characters for superscripts and subscripts. + """ + if cls.basis_name is None: + raise NotImplementedError( + "Subclasses must define either a basis_name, or override " + "_str_term_unicode(cls, i, arg_str)" + ) + return (f"·{cls.basis_name}{i.translate(cls._subscript_mapping)}" + f"({arg_str})") + + @classmethod + def _str_term_ascii(cls, i, arg_str): + """ + String representation of a single polynomial term using ** and _ to + represent superscripts and subscripts, respectively. + """ + if cls.basis_name is None: + raise NotImplementedError( + "Subclasses must define either a basis_name, or override " + "_str_term_ascii(cls, i, arg_str)" + ) + return f" {cls.basis_name}_{i}({arg_str})" + + @classmethod + def _repr_latex_term(cls, i, arg_str, needs_parens): + if cls.basis_name is None: + raise NotImplementedError( + "Subclasses must define either a basis name, or override " + "_repr_latex_term(i, arg_str, needs_parens)") + # since we always add parens, we don't care if the expression needs them + return f"{{{cls.basis_name}}}_{{{i}}}({arg_str})" + + @staticmethod + def _repr_latex_scalar(x, parens=False): + # TODO: we're stuck with disabling math formatting until we handle + # exponents in this function + return fr'\text{{{pu.format_float(x, parens=parens)}}}' + + def _format_term(self, scalar_format: Callable, off: float, scale: float): + """ Format a single term in the expansion """ + if off == 0 and scale == 1: + term = self.symbol + needs_parens = False + elif scale == 1: + term = f"{scalar_format(off)} + {self.symbol}" + needs_parens = True + elif off == 0: + term = f"{scalar_format(scale)}{self.symbol}" + needs_parens = True + else: + term = ( + f"{scalar_format(off)} + " + f"{scalar_format(scale)}{self.symbol}" + ) + needs_parens = True + return term, needs_parens + + def _repr_latex_(self): + # get the scaled argument string to the basis functions + off, scale = self.mapparms() + term, needs_parens = self._format_term(self._repr_latex_scalar, + off, scale) + + mute = r"\color{{LightGray}}{{{}}}".format + + parts = [] + for i, c in enumerate(self.coef): + # prevent duplication of + and - signs + if i == 0: + coef_str = f"{self._repr_latex_scalar(c)}" + elif not isinstance(c, numbers.Real): + coef_str = f" + ({self._repr_latex_scalar(c)})" + elif c >= 0: + coef_str = f" + {self._repr_latex_scalar(c, parens=True)}" + else: + coef_str = f" - {self._repr_latex_scalar(-c, parens=True)}" + + # produce the string for the term + term_str = self._repr_latex_term(i, term, needs_parens) + if term_str == '1': + part = coef_str + else: + part = rf"{coef_str}\,{term_str}" + + if c == 0: + part = mute(part) + + parts.append(part) + + if parts: + body = ''.join(parts) + else: + # in case somehow there are no coefficients at all + body = '0' + + return rf"${self.symbol} \mapsto {body}$" + + # Pickle and copy + + def __getstate__(self): + ret = self.__dict__.copy() + ret['coef'] = self.coef.copy() + ret['domain'] = self.domain.copy() + ret['window'] = self.window.copy() + ret['symbol'] = self.symbol + return ret + + def __setstate__(self, dict): + self.__dict__ = dict + + # Call + + def __call__(self, arg): + arg = pu.mapdomain(arg, self.domain, self.window) + return self._val(arg, self.coef) + + def __iter__(self): + return iter(self.coef) + + def __len__(self): + return len(self.coef) + + # Numeric properties. + + def __neg__(self): + return self.__class__( + -self.coef, self.domain, self.window, self.symbol + ) + + def __pos__(self): + return self + + def __add__(self, other): + othercoef = self._get_coefficients(other) + try: + coef = self._add(self.coef, othercoef) + except Exception: + return NotImplemented + return self.__class__(coef, self.domain, self.window, self.symbol) + + def __sub__(self, other): + othercoef = self._get_coefficients(other) + try: + coef = self._sub(self.coef, othercoef) + except Exception: + return NotImplemented + return self.__class__(coef, self.domain, self.window, self.symbol) + + def __mul__(self, other): + othercoef = self._get_coefficients(other) + try: + coef = self._mul(self.coef, othercoef) + except Exception: + return NotImplemented + return self.__class__(coef, self.domain, self.window, self.symbol) + + def __truediv__(self, other): + # there is no true divide if the rhs is not a Number, although it + # could return the first n elements of an infinite series. + # It is hard to see where n would come from, though. + if not isinstance(other, numbers.Number) or isinstance(other, bool): + raise TypeError( + f"unsupported types for true division: " + f"'{type(self)}', '{type(other)}'" + ) + return self.__floordiv__(other) + + def __floordiv__(self, other): + res = self.__divmod__(other) + if res is NotImplemented: + return res + return res[0] + + def __mod__(self, other): + res = self.__divmod__(other) + if res is NotImplemented: + return res + return res[1] + + def __divmod__(self, other): + othercoef = self._get_coefficients(other) + try: + quo, rem = self._div(self.coef, othercoef) + except ZeroDivisionError: + raise + except Exception: + return NotImplemented + quo = self.__class__(quo, self.domain, self.window, self.symbol) + rem = self.__class__(rem, self.domain, self.window, self.symbol) + return quo, rem + + def __pow__(self, other): + coef = self._pow(self.coef, other, maxpower=self.maxpower) + res = self.__class__(coef, self.domain, self.window, self.symbol) + return res + + def __radd__(self, other): + try: + coef = self._add(other, self.coef) + except Exception: + return NotImplemented + return self.__class__(coef, self.domain, self.window, self.symbol) + + def __rsub__(self, other): + try: + coef = self._sub(other, self.coef) + except Exception: + return NotImplemented + return self.__class__(coef, self.domain, self.window, self.symbol) + + def __rmul__(self, other): + try: + coef = self._mul(other, self.coef) + except Exception: + return NotImplemented + return self.__class__(coef, self.domain, self.window, self.symbol) + + def __rtruediv__(self, other): + # An instance of ABCPolyBase is not considered a + # Number. + return NotImplemented + + def __rfloordiv__(self, other): + res = self.__rdivmod__(other) + if res is NotImplemented: + return res + return res[0] + + def __rmod__(self, other): + res = self.__rdivmod__(other) + if res is NotImplemented: + return res + return res[1] + + def __rdivmod__(self, other): + try: + quo, rem = self._div(other, self.coef) + except ZeroDivisionError: + raise + except Exception: + return NotImplemented + quo = self.__class__(quo, self.domain, self.window, self.symbol) + rem = self.__class__(rem, self.domain, self.window, self.symbol) + return quo, rem + + def __eq__(self, other): + res = (isinstance(other, self.__class__) and + np.all(self.domain == other.domain) and + np.all(self.window == other.window) and + (self.coef.shape == other.coef.shape) and + np.all(self.coef == other.coef) and + (self.symbol == other.symbol)) + return res + + def __ne__(self, other): + return not self.__eq__(other) + + # + # Extra methods. + # + + def copy(self): + """Return a copy. + + Returns + ------- + new_series : series + Copy of self. + + """ + return self.__class__(self.coef, self.domain, self.window, self.symbol) + + def degree(self): + """The degree of the series. + + Returns + ------- + degree : int + Degree of the series, one less than the number of coefficients. + + Examples + -------- + + Create a polynomial object for ``1 + 7*x + 4*x**2``: + + >>> np.polynomial.set_default_printstyle("unicode") + >>> poly = np.polynomial.Polynomial([1, 7, 4]) + >>> print(poly) + 1.0 + 7.0·x + 4.0·x² + >>> poly.degree() + 2 + + Note that this method does not check for non-zero coefficients. + You must trim the polynomial to remove any trailing zeroes: + + >>> poly = np.polynomial.Polynomial([1, 7, 0]) + >>> print(poly) + 1.0 + 7.0·x + 0.0·x² + >>> poly.degree() + 2 + >>> poly.trim().degree() + 1 + + """ + return len(self) - 1 + + def cutdeg(self, deg): + """Truncate series to the given degree. + + Reduce the degree of the series to `deg` by discarding the + high order terms. If `deg` is greater than the current degree a + copy of the current series is returned. This can be useful in least + squares where the coefficients of the high degree terms may be very + small. + + Parameters + ---------- + deg : non-negative int + The series is reduced to degree `deg` by discarding the high + order terms. The value of `deg` must be a non-negative integer. + + Returns + ------- + new_series : series + New instance of series with reduced degree. + + """ + return self.truncate(deg + 1) + + def trim(self, tol=0): + """Remove trailing coefficients + + Remove trailing coefficients until a coefficient is reached whose + absolute value greater than `tol` or the beginning of the series is + reached. If all the coefficients would be removed the series is set + to ``[0]``. A new series instance is returned with the new + coefficients. The current instance remains unchanged. + + Parameters + ---------- + tol : non-negative number. + All trailing coefficients less than `tol` will be removed. + + Returns + ------- + new_series : series + New instance of series with trimmed coefficients. + + """ + coef = pu.trimcoef(self.coef, tol) + return self.__class__(coef, self.domain, self.window, self.symbol) + + def truncate(self, size): + """Truncate series to length `size`. + + Reduce the series to length `size` by discarding the high + degree terms. The value of `size` must be a positive integer. This + can be useful in least squares where the coefficients of the + high degree terms may be very small. + + Parameters + ---------- + size : positive int + The series is reduced to length `size` by discarding the high + degree terms. The value of `size` must be a positive integer. + + Returns + ------- + new_series : series + New instance of series with truncated coefficients. + + """ + isize = int(size) + if isize != size or isize < 1: + raise ValueError("size must be a positive integer") + if isize >= len(self.coef): + coef = self.coef + else: + coef = self.coef[:isize] + return self.__class__(coef, self.domain, self.window, self.symbol) + + def convert(self, domain=None, kind=None, window=None): + """Convert series to a different kind and/or domain and/or window. + + Parameters + ---------- + domain : array_like, optional + The domain of the converted series. If the value is None, + the default domain of `kind` is used. + kind : class, optional + The polynomial series type class to which the current instance + should be converted. If kind is None, then the class of the + current instance is used. + window : array_like, optional + The window of the converted series. If the value is None, + the default window of `kind` is used. + + Returns + ------- + new_series : series + The returned class can be of different type than the current + instance and/or have a different domain and/or different + window. + + Notes + ----- + Conversion between domains and class types can result in + numerically ill defined series. + + """ + if kind is None: + kind = self.__class__ + if domain is None: + domain = kind.domain + if window is None: + window = kind.window + return self(kind.identity(domain, window=window, symbol=self.symbol)) + + def mapparms(self): + """Return the mapping parameters. + + The returned values define a linear map ``off + scl*x`` that is + applied to the input arguments before the series is evaluated. The + map depends on the ``domain`` and ``window``; if the current + ``domain`` is equal to the ``window`` the resulting map is the + identity. If the coefficients of the series instance are to be + used by themselves outside this class, then the linear function + must be substituted for the ``x`` in the standard representation of + the base polynomials. + + Returns + ------- + off, scl : float or complex + The mapping function is defined by ``off + scl*x``. + + Notes + ----- + If the current domain is the interval ``[l1, r1]`` and the window + is ``[l2, r2]``, then the linear mapping function ``L`` is + defined by the equations:: + + L(l1) = l2 + L(r1) = r2 + + """ + return pu.mapparms(self.domain, self.window) + + def integ(self, m=1, k=[], lbnd=None): + """Integrate. + + Return a series instance that is the definite integral of the + current series. + + Parameters + ---------- + m : non-negative int + The number of integrations to perform. + k : array_like + Integration constants. The first constant is applied to the + first integration, the second to the second, and so on. The + list of values must less than or equal to `m` in length and any + missing values are set to zero. + lbnd : Scalar + The lower bound of the definite integral. + + Returns + ------- + new_series : series + A new series representing the integral. The domain is the same + as the domain of the integrated series. + + """ + off, scl = self.mapparms() + if lbnd is None: + lbnd = 0 + else: + lbnd = off + scl * lbnd + coef = self._int(self.coef, m, k, lbnd, 1. / scl) + return self.__class__(coef, self.domain, self.window, self.symbol) + + def deriv(self, m=1): + """Differentiate. + + Return a series instance of that is the derivative of the current + series. + + Parameters + ---------- + m : non-negative int + Find the derivative of order `m`. + + Returns + ------- + new_series : series + A new series representing the derivative. The domain is the same + as the domain of the differentiated series. + + """ + off, scl = self.mapparms() + coef = self._der(self.coef, m, scl) + return self.__class__(coef, self.domain, self.window, self.symbol) + + def roots(self): + """Return the roots of the series polynomial. + + Compute the roots for the series. Note that the accuracy of the + roots decreases the further outside the `domain` they lie. + + Returns + ------- + roots : ndarray + Array containing the roots of the series. + + """ + roots = self._roots(self.coef) + return pu.mapdomain(roots, self.window, self.domain) + + def linspace(self, n=100, domain=None): + """Return x, y values at equally spaced points in domain. + + Returns the x, y values at `n` linearly spaced points across the + domain. Here y is the value of the polynomial at the points x. By + default the domain is the same as that of the series instance. + This method is intended mostly as a plotting aid. + + Parameters + ---------- + n : int, optional + Number of point pairs to return. The default value is 100. + domain : {None, array_like}, optional + If not None, the specified domain is used instead of that of + the calling instance. It should be of the form ``[beg,end]``. + The default is None which case the class domain is used. + + Returns + ------- + x, y : ndarray + x is equal to linspace(self.domain[0], self.domain[1], n) and + y is the series evaluated at element of x. + + """ + if domain is None: + domain = self.domain + x = np.linspace(domain[0], domain[1], n) + y = self(x) + return x, y + + @classmethod + def fit(cls, x, y, deg, domain=None, rcond=None, full=False, w=None, + window=None, symbol='x'): + """Least squares fit to data. + + Return a series instance that is the least squares fit to the data + `y` sampled at `x`. The domain of the returned instance can be + specified and this will often result in a superior fit with less + chance of ill conditioning. + + Parameters + ---------- + x : array_like, shape (M,) + x-coordinates of the M sample points ``(x[i], y[i])``. + y : array_like, shape (M,) + y-coordinates of the M sample points ``(x[i], y[i])``. + deg : int or 1-D array_like + Degree(s) of the fitting polynomials. If `deg` is a single integer + all terms up to and including the `deg`'th term are included in the + fit. For NumPy versions >= 1.11.0 a list of integers specifying the + degrees of the terms to include may be used instead. + domain : {None, [beg, end], []}, optional + Domain to use for the returned series. If ``None``, + then a minimal domain that covers the points `x` is chosen. If + ``[]`` the class domain is used. The default value was the + class domain in NumPy 1.4 and ``None`` in later versions. + The ``[]`` option was added in numpy 1.5.0. + rcond : float, optional + Relative condition number of the fit. Singular values smaller + than this relative to the largest singular value will be + ignored. The default value is ``len(x)*eps``, where eps is the + relative precision of the float type, about 2e-16 in most + cases. + full : bool, optional + Switch determining nature of return value. When it is False + (the default) just the coefficients are returned, when True + diagnostic information from the singular value decomposition is + also returned. + w : array_like, shape (M,), optional + Weights. If not None, the weight ``w[i]`` applies to the unsquared + residual ``y[i] - y_hat[i]`` at ``x[i]``. Ideally the weights are + chosen so that the errors of the products ``w[i]*y[i]`` all have + the same variance. When using inverse-variance weighting, use + ``w[i] = 1/sigma(y[i])``. The default value is None. + window : {[beg, end]}, optional + Window to use for the returned series. The default + value is the default class domain + symbol : str, optional + Symbol representing the independent variable. Default is 'x'. + + Returns + ------- + new_series : series + A series that represents the least squares fit to the data and + has the domain and window specified in the call. If the + coefficients for the unscaled and unshifted basis polynomials are + of interest, do ``new_series.convert().coef``. + + [resid, rank, sv, rcond] : list + These values are only returned if ``full == True`` + + - resid -- sum of squared residuals of the least squares fit + - rank -- the numerical rank of the scaled Vandermonde matrix + - sv -- singular values of the scaled Vandermonde matrix + - rcond -- value of `rcond`. + + For more details, see `linalg.lstsq`. + + """ + if domain is None: + domain = pu.getdomain(x) + if domain[0] == domain[1]: + domain[0] -= 1 + domain[1] += 1 + elif isinstance(domain, list) and len(domain) == 0: + domain = cls.domain + + if window is None: + window = cls.window + + xnew = pu.mapdomain(x, domain, window) + res = cls._fit(xnew, y, deg, w=w, rcond=rcond, full=full) + if full: + [coef, status] = res + return ( + cls(coef, domain=domain, window=window, symbol=symbol), status + ) + else: + coef = res + return cls(coef, domain=domain, window=window, symbol=symbol) + + @classmethod + def fromroots(cls, roots, domain=[], window=None, symbol='x'): + """Return series instance that has the specified roots. + + Returns a series representing the product + ``(x - r[0])*(x - r[1])*...*(x - r[n-1])``, where ``r`` is a + list of roots. + + Parameters + ---------- + roots : array_like + List of roots. + domain : {[], None, array_like}, optional + Domain for the resulting series. If None the domain is the + interval from the smallest root to the largest. If [] the + domain is the class domain. The default is []. + window : {None, array_like}, optional + Window for the returned series. If None the class window is + used. The default is None. + symbol : str, optional + Symbol representing the independent variable. Default is 'x'. + + Returns + ------- + new_series : series + Series with the specified roots. + + """ + [roots] = pu.as_series([roots], trim=False) + if domain is None: + domain = pu.getdomain(roots) + elif isinstance(domain, list) and len(domain) == 0: + domain = cls.domain + + if window is None: + window = cls.window + + deg = len(roots) + off, scl = pu.mapparms(domain, window) + rnew = off + scl * roots + coef = cls._fromroots(rnew) / scl**deg + return cls(coef, domain=domain, window=window, symbol=symbol) + + @classmethod + def identity(cls, domain=None, window=None, symbol='x'): + """Identity function. + + If ``p`` is the returned series, then ``p(x) == x`` for all + values of x. + + Parameters + ---------- + domain : {None, array_like}, optional + If given, the array must be of the form ``[beg, end]``, where + ``beg`` and ``end`` are the endpoints of the domain. If None is + given then the class domain is used. The default is None. + window : {None, array_like}, optional + If given, the resulting array must be if the form + ``[beg, end]``, where ``beg`` and ``end`` are the endpoints of + the window. If None is given then the class window is used. The + default is None. + symbol : str, optional + Symbol representing the independent variable. Default is 'x'. + + Returns + ------- + new_series : series + Series of representing the identity. + + """ + if domain is None: + domain = cls.domain + if window is None: + window = cls.window + off, scl = pu.mapparms(window, domain) + coef = cls._line(off, scl) + return cls(coef, domain, window, symbol) + + @classmethod + def basis(cls, deg, domain=None, window=None, symbol='x'): + """Series basis polynomial of degree `deg`. + + Returns the series representing the basis polynomial of degree `deg`. + + Parameters + ---------- + deg : int + Degree of the basis polynomial for the series. Must be >= 0. + domain : {None, array_like}, optional + If given, the array must be of the form ``[beg, end]``, where + ``beg`` and ``end`` are the endpoints of the domain. If None is + given then the class domain is used. The default is None. + window : {None, array_like}, optional + If given, the resulting array must be if the form + ``[beg, end]``, where ``beg`` and ``end`` are the endpoints of + the window. If None is given then the class window is used. The + default is None. + symbol : str, optional + Symbol representing the independent variable. Default is 'x'. + + Returns + ------- + new_series : series + A series with the coefficient of the `deg` term set to one and + all others zero. + + """ + if domain is None: + domain = cls.domain + if window is None: + window = cls.window + ideg = int(deg) + + if ideg != deg or ideg < 0: + raise ValueError("deg must be non-negative integer") + return cls([0] * ideg + [1], domain, window, symbol) + + @classmethod + def cast(cls, series, domain=None, window=None): + """Convert series to series of this class. + + The `series` is expected to be an instance of some polynomial + series of one of the types supported by by the numpy.polynomial + module, but could be some other class that supports the convert + method. + + Parameters + ---------- + series : series + The series instance to be converted. + domain : {None, array_like}, optional + If given, the array must be of the form ``[beg, end]``, where + ``beg`` and ``end`` are the endpoints of the domain. If None is + given then the class domain is used. The default is None. + window : {None, array_like}, optional + If given, the resulting array must be if the form + ``[beg, end]``, where ``beg`` and ``end`` are the endpoints of + the window. If None is given then the class window is used. The + default is None. + + Returns + ------- + new_series : series + A series of the same kind as the calling class and equal to + `series` when evaluated. + + See Also + -------- + convert : similar instance method + + """ + if domain is None: + domain = cls.domain + if window is None: + window = cls.window + return series.convert(domain, cls, window) diff --git a/.venv/lib/python3.12/site-packages/numpy/polynomial/_polybase.pyi b/.venv/lib/python3.12/site-packages/numpy/polynomial/_polybase.pyi new file mode 100644 index 0000000..6d71a8c --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/polynomial/_polybase.pyi @@ -0,0 +1,285 @@ +import abc +import decimal +import numbers +from collections.abc import Iterator, Mapping, Sequence +from typing import ( + Any, + ClassVar, + Generic, + Literal, + LiteralString, + Self, + SupportsIndex, + TypeAlias, + overload, +) + +from typing_extensions import TypeIs, TypeVar + +import numpy as np +import numpy.typing as npt +from numpy._typing import ( + _ArrayLikeComplex_co, + _ArrayLikeFloat_co, + _FloatLike_co, + _NumberLike_co, +) + +from ._polytypes import ( + _AnyInt, + _Array2, + _ArrayLikeCoef_co, + _ArrayLikeCoefObject_co, + _CoefLike_co, + _CoefSeries, + _Series, + _SeriesLikeCoef_co, + _SeriesLikeInt_co, + _Tuple2, +) + +__all__ = ["ABCPolyBase"] + +_NameCo = TypeVar( + "_NameCo", + bound=LiteralString | None, + covariant=True, + default=LiteralString | None +) +_Other = TypeVar("_Other", bound=ABCPolyBase) + +_AnyOther: TypeAlias = ABCPolyBase | _CoefLike_co | _SeriesLikeCoef_co +_Hundred: TypeAlias = Literal[100] + +class ABCPolyBase(Generic[_NameCo], abc.ABC): + __hash__: ClassVar[None] # type: ignore[assignment] # pyright: ignore[reportIncompatibleMethodOverride] + __array_ufunc__: ClassVar[None] + + maxpower: ClassVar[_Hundred] + _superscript_mapping: ClassVar[Mapping[int, str]] + _subscript_mapping: ClassVar[Mapping[int, str]] + _use_unicode: ClassVar[bool] + + basis_name: _NameCo + coef: _CoefSeries + domain: _Array2[np.inexact | np.object_] + window: _Array2[np.inexact | np.object_] + + _symbol: LiteralString + @property + def symbol(self, /) -> LiteralString: ... + + def __init__( + self, + /, + coef: _SeriesLikeCoef_co, + domain: _SeriesLikeCoef_co | None = ..., + window: _SeriesLikeCoef_co | None = ..., + symbol: str = ..., + ) -> None: ... + + @overload + def __call__(self, /, arg: _Other) -> _Other: ... + # TODO: Once `_ShapeT@ndarray` is covariant and bounded (see #26081), + # additionally include 0-d arrays as input types with scalar return type. + @overload + def __call__( + self, + /, + arg: _FloatLike_co | decimal.Decimal | numbers.Real | np.object_, + ) -> np.float64 | np.complex128: ... + @overload + def __call__( + self, + /, + arg: _NumberLike_co | numbers.Complex, + ) -> np.complex128: ... + @overload + def __call__(self, /, arg: _ArrayLikeFloat_co) -> ( + npt.NDArray[np.float64] + | npt.NDArray[np.complex128] + | npt.NDArray[np.object_] + ): ... + @overload + def __call__( + self, + /, + arg: _ArrayLikeComplex_co, + ) -> npt.NDArray[np.complex128] | npt.NDArray[np.object_]: ... + @overload + def __call__( + self, + /, + arg: _ArrayLikeCoefObject_co, + ) -> npt.NDArray[np.object_]: ... + + def __format__(self, fmt_str: str, /) -> str: ... + def __eq__(self, x: object, /) -> bool: ... + def __ne__(self, x: object, /) -> bool: ... + def __neg__(self, /) -> Self: ... + def __pos__(self, /) -> Self: ... + def __add__(self, x: _AnyOther, /) -> Self: ... + def __sub__(self, x: _AnyOther, /) -> Self: ... + def __mul__(self, x: _AnyOther, /) -> Self: ... + def __truediv__(self, x: _AnyOther, /) -> Self: ... + def __floordiv__(self, x: _AnyOther, /) -> Self: ... + def __mod__(self, x: _AnyOther, /) -> Self: ... + def __divmod__(self, x: _AnyOther, /) -> _Tuple2[Self]: ... + def __pow__(self, x: _AnyOther, /) -> Self: ... + def __radd__(self, x: _AnyOther, /) -> Self: ... + def __rsub__(self, x: _AnyOther, /) -> Self: ... + def __rmul__(self, x: _AnyOther, /) -> Self: ... + def __rtruediv__(self, x: _AnyOther, /) -> Self: ... + def __rfloordiv__(self, x: _AnyOther, /) -> Self: ... + def __rmod__(self, x: _AnyOther, /) -> Self: ... + def __rdivmod__(self, x: _AnyOther, /) -> _Tuple2[Self]: ... + def __len__(self, /) -> int: ... + def __iter__(self, /) -> Iterator[np.inexact | object]: ... + def __getstate__(self, /) -> dict[str, Any]: ... + def __setstate__(self, dict: dict[str, Any], /) -> None: ... + + def has_samecoef(self, /, other: ABCPolyBase) -> bool: ... + def has_samedomain(self, /, other: ABCPolyBase) -> bool: ... + def has_samewindow(self, /, other: ABCPolyBase) -> bool: ... + @overload + def has_sametype(self, /, other: ABCPolyBase) -> TypeIs[Self]: ... + @overload + def has_sametype(self, /, other: object) -> Literal[False]: ... + + def copy(self, /) -> Self: ... + def degree(self, /) -> int: ... + def cutdeg(self, /) -> Self: ... + def trim(self, /, tol: _FloatLike_co = ...) -> Self: ... + def truncate(self, /, size: _AnyInt) -> Self: ... + + @overload + def convert( + self, + /, + domain: _SeriesLikeCoef_co | None, + kind: type[_Other], + window: _SeriesLikeCoef_co | None = ..., + ) -> _Other: ... + @overload + def convert( + self, + /, + domain: _SeriesLikeCoef_co | None = ..., + *, + kind: type[_Other], + window: _SeriesLikeCoef_co | None = ..., + ) -> _Other: ... + @overload + def convert( + self, + /, + domain: _SeriesLikeCoef_co | None = ..., + kind: None = None, + window: _SeriesLikeCoef_co | None = ..., + ) -> Self: ... + + def mapparms(self, /) -> _Tuple2[Any]: ... + + def integ( + self, + /, + m: SupportsIndex = ..., + k: _CoefLike_co | _SeriesLikeCoef_co = ..., + lbnd: _CoefLike_co | None = ..., + ) -> Self: ... + + def deriv(self, /, m: SupportsIndex = ...) -> Self: ... + + def roots(self, /) -> _CoefSeries: ... + + def linspace( + self, + /, + n: SupportsIndex = ..., + domain: _SeriesLikeCoef_co | None = ..., + ) -> _Tuple2[_Series[np.float64 | np.complex128]]: ... + + @overload + @classmethod + def fit( + cls, + x: _SeriesLikeCoef_co, + y: _SeriesLikeCoef_co, + deg: int | _SeriesLikeInt_co, + domain: _SeriesLikeCoef_co | None = ..., + rcond: _FloatLike_co = ..., + full: Literal[False] = ..., + w: _SeriesLikeCoef_co | None = ..., + window: _SeriesLikeCoef_co | None = ..., + symbol: str = ..., + ) -> Self: ... + @overload + @classmethod + def fit( + cls, + x: _SeriesLikeCoef_co, + y: _SeriesLikeCoef_co, + deg: int | _SeriesLikeInt_co, + domain: _SeriesLikeCoef_co | None = ..., + rcond: _FloatLike_co = ..., + *, + full: Literal[True], + w: _SeriesLikeCoef_co | None = ..., + window: _SeriesLikeCoef_co | None = ..., + symbol: str = ..., + ) -> tuple[Self, Sequence[np.inexact | np.int32]]: ... + @overload + @classmethod + def fit( + cls, + x: _SeriesLikeCoef_co, + y: _SeriesLikeCoef_co, + deg: int | _SeriesLikeInt_co, + domain: _SeriesLikeCoef_co | None, + rcond: _FloatLike_co, + full: Literal[True], /, + w: _SeriesLikeCoef_co | None = ..., + window: _SeriesLikeCoef_co | None = ..., + symbol: str = ..., + ) -> tuple[Self, Sequence[np.inexact | np.int32]]: ... + + @classmethod + def fromroots( + cls, + roots: _ArrayLikeCoef_co, + domain: _SeriesLikeCoef_co | None = ..., + window: _SeriesLikeCoef_co | None = ..., + symbol: str = ..., + ) -> Self: ... + + @classmethod + def identity( + cls, + domain: _SeriesLikeCoef_co | None = ..., + window: _SeriesLikeCoef_co | None = ..., + symbol: str = ..., + ) -> Self: ... + + @classmethod + def basis( + cls, + deg: _AnyInt, + domain: _SeriesLikeCoef_co | None = ..., + window: _SeriesLikeCoef_co | None = ..., + symbol: str = ..., + ) -> Self: ... + + @classmethod + def cast( + cls, + series: ABCPolyBase, + domain: _SeriesLikeCoef_co | None = ..., + window: _SeriesLikeCoef_co | None = ..., + ) -> Self: ... + + @classmethod + def _str_term_unicode(cls, /, i: str, arg_str: str) -> str: ... + @staticmethod + def _str_term_ascii(i: str, arg_str: str) -> str: ... + @staticmethod + def _repr_latex_term(i: str, arg_str: str, needs_parens: bool) -> str: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/polynomial/_polytypes.pyi b/.venv/lib/python3.12/site-packages/numpy/polynomial/_polytypes.pyi new file mode 100644 index 0000000..241a65b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/polynomial/_polytypes.pyi @@ -0,0 +1,892 @@ +# ruff: noqa: PYI046, PYI047 + +from collections.abc import Callable, Sequence +from typing import ( + Any, + Literal, + LiteralString, + NoReturn, + Protocol, + Self, + SupportsIndex, + SupportsInt, + TypeAlias, + TypeVar, + overload, + type_check_only, +) + +import numpy as np +import numpy.typing as npt +from numpy._typing import ( + _ArrayLikeComplex_co, + # array-likes + _ArrayLikeFloat_co, + _ArrayLikeNumber_co, + _ArrayLikeObject_co, + _ComplexLike_co, + _FloatLike_co, + # scalar-likes + _IntLike_co, + _NestedSequence, + _NumberLike_co, + _SupportsArray, +) + +_T = TypeVar("_T") +_T_contra = TypeVar("_T_contra", contravariant=True) +_ScalarT = TypeVar("_ScalarT", bound=np.number | np.bool | np.object_) + +# compatible with e.g. int, float, complex, Decimal, Fraction, and ABCPolyBase +@type_check_only +class _SupportsCoefOps(Protocol[_T_contra]): + def __eq__(self, x: object, /) -> bool: ... + def __ne__(self, x: object, /) -> bool: ... + + def __neg__(self, /) -> Self: ... + def __pos__(self, /) -> Self: ... + + def __add__(self, x: _T_contra, /) -> Self: ... + def __sub__(self, x: _T_contra, /) -> Self: ... + def __mul__(self, x: _T_contra, /) -> Self: ... + def __pow__(self, x: _T_contra, /) -> Self | float: ... + + def __radd__(self, x: _T_contra, /) -> Self: ... + def __rsub__(self, x: _T_contra, /) -> Self: ... + def __rmul__(self, x: _T_contra, /) -> Self: ... + +_Series: TypeAlias = np.ndarray[tuple[int], np.dtype[_ScalarT]] + +_FloatSeries: TypeAlias = _Series[np.floating] +_ComplexSeries: TypeAlias = _Series[np.complexfloating] +_ObjectSeries: TypeAlias = _Series[np.object_] +_CoefSeries: TypeAlias = _Series[np.inexact | np.object_] + +_FloatArray: TypeAlias = npt.NDArray[np.floating] +_ComplexArray: TypeAlias = npt.NDArray[np.complexfloating] +_ObjectArray: TypeAlias = npt.NDArray[np.object_] +_CoefArray: TypeAlias = npt.NDArray[np.inexact | np.object_] + +_Tuple2: TypeAlias = tuple[_T, _T] +_Array1: TypeAlias = np.ndarray[tuple[Literal[1]], np.dtype[_ScalarT]] +_Array2: TypeAlias = np.ndarray[tuple[Literal[2]], np.dtype[_ScalarT]] + +_AnyInt: TypeAlias = SupportsInt | SupportsIndex + +_CoefObjectLike_co: TypeAlias = np.object_ | _SupportsCoefOps[Any] +_CoefLike_co: TypeAlias = _NumberLike_co | _CoefObjectLike_co + +# The term "series" is used here to refer to 1-d arrays of numeric scalars. +_SeriesLikeBool_co: TypeAlias = ( + _SupportsArray[np.dtype[np.bool]] + | Sequence[bool | np.bool] +) +_SeriesLikeInt_co: TypeAlias = ( + _SupportsArray[np.dtype[np.integer | np.bool]] + | Sequence[_IntLike_co] +) +_SeriesLikeFloat_co: TypeAlias = ( + _SupportsArray[np.dtype[np.floating | np.integer | np.bool]] + | Sequence[_FloatLike_co] +) +_SeriesLikeComplex_co: TypeAlias = ( + _SupportsArray[np.dtype[np.inexact | np.integer | np.bool]] + | Sequence[_ComplexLike_co] +) +_SeriesLikeObject_co: TypeAlias = ( + _SupportsArray[np.dtype[np.object_]] + | Sequence[_CoefObjectLike_co] +) +_SeriesLikeCoef_co: TypeAlias = ( + _SupportsArray[np.dtype[np.number | np.bool | np.object_]] + | Sequence[_CoefLike_co] +) + +_ArrayLikeCoefObject_co: TypeAlias = ( + _CoefObjectLike_co + | _SeriesLikeObject_co + | _NestedSequence[_SeriesLikeObject_co] +) +_ArrayLikeCoef_co: TypeAlias = ( + npt.NDArray[np.number | np.bool | np.object_] + | _ArrayLikeNumber_co + | _ArrayLikeCoefObject_co +) + +_Name_co = TypeVar( + "_Name_co", + bound=LiteralString, + covariant=True, + default=LiteralString +) + +@type_check_only +class _Named(Protocol[_Name_co]): + @property + def __name__(self, /) -> _Name_co: ... + +_Line: TypeAlias = np.ndarray[tuple[Literal[1, 2]], np.dtype[_ScalarT]] + +@type_check_only +class _FuncLine(_Named[_Name_co], Protocol[_Name_co]): + @overload + def __call__(self, /, off: _ScalarT, scl: _ScalarT) -> _Line[_ScalarT]: ... + @overload + def __call__(self, /, off: int, scl: int) -> _Line[np.int_]: ... + @overload + def __call__(self, /, off: float, scl: float) -> _Line[np.float64]: ... + @overload + def __call__( + self, + /, + off: complex, + scl: complex, + ) -> _Line[np.complex128]: ... + @overload + def __call__( + self, + /, + off: _SupportsCoefOps[Any], + scl: _SupportsCoefOps[Any], + ) -> _Line[np.object_]: ... + +@type_check_only +class _FuncFromRoots(_Named[_Name_co], Protocol[_Name_co]): + @overload + def __call__(self, /, roots: _SeriesLikeFloat_co) -> _FloatSeries: ... + @overload + def __call__(self, /, roots: _SeriesLikeComplex_co) -> _ComplexSeries: ... + @overload + def __call__(self, /, roots: _SeriesLikeCoef_co) -> _ObjectSeries: ... + +@type_check_only +class _FuncBinOp(_Named[_Name_co], Protocol[_Name_co]): + @overload + def __call__( + self, + /, + c1: _SeriesLikeBool_co, + c2: _SeriesLikeBool_co, + ) -> NoReturn: ... + @overload + def __call__( + self, + /, + c1: _SeriesLikeFloat_co, + c2: _SeriesLikeFloat_co, + ) -> _FloatSeries: ... + @overload + def __call__( + self, + /, + c1: _SeriesLikeComplex_co, + c2: _SeriesLikeComplex_co, + ) -> _ComplexSeries: ... + @overload + def __call__( + self, + /, + c1: _SeriesLikeCoef_co, + c2: _SeriesLikeCoef_co, + ) -> _ObjectSeries: ... + +@type_check_only +class _FuncUnOp(_Named[_Name_co], Protocol[_Name_co]): + @overload + def __call__(self, /, c: _SeriesLikeFloat_co) -> _FloatSeries: ... + @overload + def __call__(self, /, c: _SeriesLikeComplex_co) -> _ComplexSeries: ... + @overload + def __call__(self, /, c: _SeriesLikeCoef_co) -> _ObjectSeries: ... + +@type_check_only +class _FuncPoly2Ortho(_Named[_Name_co], Protocol[_Name_co]): + @overload + def __call__(self, /, pol: _SeriesLikeFloat_co) -> _FloatSeries: ... + @overload + def __call__(self, /, pol: _SeriesLikeComplex_co) -> _ComplexSeries: ... + @overload + def __call__(self, /, pol: _SeriesLikeCoef_co) -> _ObjectSeries: ... + +@type_check_only +class _FuncPow(_Named[_Name_co], Protocol[_Name_co]): + @overload + def __call__( + self, + /, + c: _SeriesLikeFloat_co, + pow: _IntLike_co, + maxpower: _IntLike_co | None = ..., + ) -> _FloatSeries: ... + @overload + def __call__( + self, + /, + c: _SeriesLikeComplex_co, + pow: _IntLike_co, + maxpower: _IntLike_co | None = ..., + ) -> _ComplexSeries: ... + @overload + def __call__( + self, + /, + c: _SeriesLikeCoef_co, + pow: _IntLike_co, + maxpower: _IntLike_co | None = ..., + ) -> _ObjectSeries: ... + +@type_check_only +class _FuncDer(_Named[_Name_co], Protocol[_Name_co]): + @overload + def __call__( + self, + /, + c: _ArrayLikeFloat_co, + m: SupportsIndex = ..., + scl: _FloatLike_co = ..., + axis: SupportsIndex = ..., + ) -> _FloatArray: ... + @overload + def __call__( + self, + /, + c: _ArrayLikeComplex_co, + m: SupportsIndex = ..., + scl: _ComplexLike_co = ..., + axis: SupportsIndex = ..., + ) -> _ComplexArray: ... + @overload + def __call__( + self, + /, + c: _ArrayLikeCoef_co, + m: SupportsIndex = ..., + scl: _CoefLike_co = ..., + axis: SupportsIndex = ..., + ) -> _ObjectArray: ... + +@type_check_only +class _FuncInteg(_Named[_Name_co], Protocol[_Name_co]): + @overload + def __call__( + self, + /, + c: _ArrayLikeFloat_co, + m: SupportsIndex = ..., + k: _FloatLike_co | _SeriesLikeFloat_co = ..., + lbnd: _FloatLike_co = ..., + scl: _FloatLike_co = ..., + axis: SupportsIndex = ..., + ) -> _FloatArray: ... + @overload + def __call__( + self, + /, + c: _ArrayLikeComplex_co, + m: SupportsIndex = ..., + k: _ComplexLike_co | _SeriesLikeComplex_co = ..., + lbnd: _ComplexLike_co = ..., + scl: _ComplexLike_co = ..., + axis: SupportsIndex = ..., + ) -> _ComplexArray: ... + @overload + def __call__( + self, + /, + c: _ArrayLikeCoef_co, + m: SupportsIndex = ..., + k: _CoefLike_co | _SeriesLikeCoef_co = ..., + lbnd: _CoefLike_co = ..., + scl: _CoefLike_co = ..., + axis: SupportsIndex = ..., + ) -> _ObjectArray: ... + +@type_check_only +class _FuncValFromRoots(_Named[_Name_co], Protocol[_Name_co]): + @overload + def __call__( + self, + /, + x: _FloatLike_co, + r: _FloatLike_co, + tensor: bool = ..., + ) -> np.floating: ... + @overload + def __call__( + self, + /, + x: _NumberLike_co, + r: _NumberLike_co, + tensor: bool = ..., + ) -> np.complexfloating: ... + @overload + def __call__( + self, + /, + x: _FloatLike_co | _ArrayLikeFloat_co, + r: _ArrayLikeFloat_co, + tensor: bool = ..., + ) -> _FloatArray: ... + @overload + def __call__( + self, + /, + x: _NumberLike_co | _ArrayLikeComplex_co, + r: _ArrayLikeComplex_co, + tensor: bool = ..., + ) -> _ComplexArray: ... + @overload + def __call__( + self, + /, + x: _CoefLike_co | _ArrayLikeCoef_co, + r: _ArrayLikeCoef_co, + tensor: bool = ..., + ) -> _ObjectArray: ... + @overload + def __call__( + self, + /, + x: _CoefLike_co, + r: _CoefLike_co, + tensor: bool = ..., + ) -> _SupportsCoefOps[Any]: ... + +@type_check_only +class _FuncVal(_Named[_Name_co], Protocol[_Name_co]): + @overload + def __call__( + self, + /, + x: _FloatLike_co, + c: _SeriesLikeFloat_co, + tensor: bool = ..., + ) -> np.floating: ... + @overload + def __call__( + self, + /, + x: _NumberLike_co, + c: _SeriesLikeComplex_co, + tensor: bool = ..., + ) -> np.complexfloating: ... + @overload + def __call__( + self, + /, + x: _ArrayLikeFloat_co, + c: _ArrayLikeFloat_co, + tensor: bool = ..., + ) -> _FloatArray: ... + @overload + def __call__( + self, + /, + x: _ArrayLikeComplex_co, + c: _ArrayLikeComplex_co, + tensor: bool = ..., + ) -> _ComplexArray: ... + @overload + def __call__( + self, + /, + x: _ArrayLikeCoef_co, + c: _ArrayLikeCoef_co, + tensor: bool = ..., + ) -> _ObjectArray: ... + @overload + def __call__( + self, + /, + x: _CoefLike_co, + c: _SeriesLikeObject_co, + tensor: bool = ..., + ) -> _SupportsCoefOps[Any]: ... + +@type_check_only +class _FuncVal2D(_Named[_Name_co], Protocol[_Name_co]): + @overload + def __call__( + self, + /, + x: _FloatLike_co, + y: _FloatLike_co, + c: _SeriesLikeFloat_co, + ) -> np.floating: ... + @overload + def __call__( + self, + /, + x: _NumberLike_co, + y: _NumberLike_co, + c: _SeriesLikeComplex_co, + ) -> np.complexfloating: ... + @overload + def __call__( + self, + /, + x: _ArrayLikeFloat_co, + y: _ArrayLikeFloat_co, + c: _ArrayLikeFloat_co, + ) -> _FloatArray: ... + @overload + def __call__( + self, + /, + x: _ArrayLikeComplex_co, + y: _ArrayLikeComplex_co, + c: _ArrayLikeComplex_co, + ) -> _ComplexArray: ... + @overload + def __call__( + self, + /, + x: _ArrayLikeCoef_co, + y: _ArrayLikeCoef_co, + c: _ArrayLikeCoef_co, + ) -> _ObjectArray: ... + @overload + def __call__( + self, + /, + x: _CoefLike_co, + y: _CoefLike_co, + c: _SeriesLikeCoef_co, + ) -> _SupportsCoefOps[Any]: ... + +@type_check_only +class _FuncVal3D(_Named[_Name_co], Protocol[_Name_co]): + @overload + def __call__( + self, + /, + x: _FloatLike_co, + y: _FloatLike_co, + z: _FloatLike_co, + c: _SeriesLikeFloat_co + ) -> np.floating: ... + @overload + def __call__( + self, + /, + x: _NumberLike_co, + y: _NumberLike_co, + z: _NumberLike_co, + c: _SeriesLikeComplex_co, + ) -> np.complexfloating: ... + @overload + def __call__( + self, + /, + x: _ArrayLikeFloat_co, + y: _ArrayLikeFloat_co, + z: _ArrayLikeFloat_co, + c: _ArrayLikeFloat_co, + ) -> _FloatArray: ... + @overload + def __call__( + self, + /, + x: _ArrayLikeComplex_co, + y: _ArrayLikeComplex_co, + z: _ArrayLikeComplex_co, + c: _ArrayLikeComplex_co, + ) -> _ComplexArray: ... + @overload + def __call__( + self, + /, + x: _ArrayLikeCoef_co, + y: _ArrayLikeCoef_co, + z: _ArrayLikeCoef_co, + c: _ArrayLikeCoef_co, + ) -> _ObjectArray: ... + @overload + def __call__( + self, + /, + x: _CoefLike_co, + y: _CoefLike_co, + z: _CoefLike_co, + c: _SeriesLikeCoef_co, + ) -> _SupportsCoefOps[Any]: ... + +_AnyValF: TypeAlias = Callable[ + [npt.ArrayLike, npt.ArrayLike, bool], + _CoefArray, +] + +@type_check_only +class _FuncValND(_Named[_Name_co], Protocol[_Name_co]): + @overload + def __call__( + self, + val_f: _AnyValF, + c: _SeriesLikeFloat_co, + /, + *args: _FloatLike_co, + ) -> np.floating: ... + @overload + def __call__( + self, + val_f: _AnyValF, + c: _SeriesLikeComplex_co, + /, + *args: _NumberLike_co, + ) -> np.complexfloating: ... + @overload + def __call__( + self, + val_f: _AnyValF, + c: _ArrayLikeFloat_co, + /, + *args: _ArrayLikeFloat_co, + ) -> _FloatArray: ... + @overload + def __call__( + self, + val_f: _AnyValF, + c: _ArrayLikeComplex_co, + /, + *args: _ArrayLikeComplex_co, + ) -> _ComplexArray: ... + @overload + def __call__( + self, + val_f: _AnyValF, + c: _SeriesLikeObject_co, + /, + *args: _CoefObjectLike_co, + ) -> _SupportsCoefOps[Any]: ... + @overload + def __call__( + self, + val_f: _AnyValF, + c: _ArrayLikeCoef_co, + /, + *args: _ArrayLikeCoef_co, + ) -> _ObjectArray: ... + +@type_check_only +class _FuncVander(_Named[_Name_co], Protocol[_Name_co]): + @overload + def __call__( + self, + /, + x: _ArrayLikeFloat_co, + deg: SupportsIndex, + ) -> _FloatArray: ... + @overload + def __call__( + self, + /, + x: _ArrayLikeComplex_co, + deg: SupportsIndex, + ) -> _ComplexArray: ... + @overload + def __call__( + self, + /, + x: _ArrayLikeCoef_co, + deg: SupportsIndex, + ) -> _ObjectArray: ... + @overload + def __call__( + self, + /, + x: npt.ArrayLike, + deg: SupportsIndex, + ) -> _CoefArray: ... + +_AnyDegrees: TypeAlias = Sequence[SupportsIndex] + +@type_check_only +class _FuncVander2D(_Named[_Name_co], Protocol[_Name_co]): + @overload + def __call__( + self, + /, + x: _ArrayLikeFloat_co, + y: _ArrayLikeFloat_co, + deg: _AnyDegrees, + ) -> _FloatArray: ... + @overload + def __call__( + self, + /, + x: _ArrayLikeComplex_co, + y: _ArrayLikeComplex_co, + deg: _AnyDegrees, + ) -> _ComplexArray: ... + @overload + def __call__( + self, + /, + x: _ArrayLikeCoef_co, + y: _ArrayLikeCoef_co, + deg: _AnyDegrees, + ) -> _ObjectArray: ... + @overload + def __call__( + self, + /, + x: npt.ArrayLike, + y: npt.ArrayLike, + deg: _AnyDegrees, + ) -> _CoefArray: ... + +@type_check_only +class _FuncVander3D(_Named[_Name_co], Protocol[_Name_co]): + @overload + def __call__( + self, + /, + x: _ArrayLikeFloat_co, + y: _ArrayLikeFloat_co, + z: _ArrayLikeFloat_co, + deg: _AnyDegrees, + ) -> _FloatArray: ... + @overload + def __call__( + self, + /, + x: _ArrayLikeComplex_co, + y: _ArrayLikeComplex_co, + z: _ArrayLikeComplex_co, + deg: _AnyDegrees, + ) -> _ComplexArray: ... + @overload + def __call__( + self, + /, + x: _ArrayLikeCoef_co, + y: _ArrayLikeCoef_co, + z: _ArrayLikeCoef_co, + deg: _AnyDegrees, + ) -> _ObjectArray: ... + @overload + def __call__( + self, + /, + x: npt.ArrayLike, + y: npt.ArrayLike, + z: npt.ArrayLike, + deg: _AnyDegrees, + ) -> _CoefArray: ... + +# keep in sync with the broadest overload of `._FuncVander` +_AnyFuncVander: TypeAlias = Callable[ + [npt.ArrayLike, SupportsIndex], + _CoefArray, +] + +@type_check_only +class _FuncVanderND(_Named[_Name_co], Protocol[_Name_co]): + @overload + def __call__( + self, + /, + vander_fs: Sequence[_AnyFuncVander], + points: Sequence[_ArrayLikeFloat_co], + degrees: Sequence[SupportsIndex], + ) -> _FloatArray: ... + @overload + def __call__( + self, + /, + vander_fs: Sequence[_AnyFuncVander], + points: Sequence[_ArrayLikeComplex_co], + degrees: Sequence[SupportsIndex], + ) -> _ComplexArray: ... + @overload + def __call__( + self, + /, + vander_fs: Sequence[_AnyFuncVander], + points: Sequence[ + _ArrayLikeObject_co | _ArrayLikeComplex_co, + ], + degrees: Sequence[SupportsIndex], + ) -> _ObjectArray: ... + @overload + def __call__( + self, + /, + vander_fs: Sequence[_AnyFuncVander], + points: Sequence[npt.ArrayLike], + degrees: Sequence[SupportsIndex], + ) -> _CoefArray: ... + +_FullFitResult: TypeAlias = Sequence[np.inexact | np.int32] + +@type_check_only +class _FuncFit(_Named[_Name_co], Protocol[_Name_co]): + @overload + def __call__( + self, + /, + x: _SeriesLikeFloat_co, + y: _ArrayLikeFloat_co, + deg: int | _SeriesLikeInt_co, + rcond: float | None = ..., + full: Literal[False] = ..., + w: _SeriesLikeFloat_co | None = ..., + ) -> _FloatArray: ... + @overload + def __call__( + self, + x: _SeriesLikeFloat_co, + y: _ArrayLikeFloat_co, + deg: int | _SeriesLikeInt_co, + rcond: float | None, + full: Literal[True], + /, + w: _SeriesLikeFloat_co | None = ..., + ) -> tuple[_FloatArray, _FullFitResult]: ... + @overload + def __call__( + self, + /, + x: _SeriesLikeFloat_co, + y: _ArrayLikeFloat_co, + deg: int | _SeriesLikeInt_co, + rcond: float | None = ..., + *, + full: Literal[True], + w: _SeriesLikeFloat_co | None = ..., + ) -> tuple[_FloatArray, _FullFitResult]: ... + + @overload + def __call__( + self, + /, + x: _SeriesLikeComplex_co, + y: _ArrayLikeComplex_co, + deg: int | _SeriesLikeInt_co, + rcond: float | None = ..., + full: Literal[False] = ..., + w: _SeriesLikeFloat_co | None = ..., + ) -> _ComplexArray: ... + @overload + def __call__( + self, + x: _SeriesLikeComplex_co, + y: _ArrayLikeComplex_co, + deg: int | _SeriesLikeInt_co, + rcond: float | None, + full: Literal[True], + /, + w: _SeriesLikeFloat_co | None = ..., + ) -> tuple[_ComplexArray, _FullFitResult]: ... + @overload + def __call__( + self, + /, + x: _SeriesLikeComplex_co, + y: _ArrayLikeComplex_co, + deg: int | _SeriesLikeInt_co, + rcond: float | None = ..., + *, + full: Literal[True], + w: _SeriesLikeFloat_co | None = ..., + ) -> tuple[_ComplexArray, _FullFitResult]: ... + + @overload + def __call__( + self, + /, + x: _SeriesLikeComplex_co, + y: _ArrayLikeCoef_co, + deg: int | _SeriesLikeInt_co, + rcond: float | None = ..., + full: Literal[False] = ..., + w: _SeriesLikeFloat_co | None = ..., + ) -> _ObjectArray: ... + @overload + def __call__( + self, + x: _SeriesLikeComplex_co, + y: _ArrayLikeCoef_co, + deg: int | _SeriesLikeInt_co, + rcond: float | None, + full: Literal[True], + /, + w: _SeriesLikeFloat_co | None = ..., + ) -> tuple[_ObjectArray, _FullFitResult]: ... + @overload + def __call__( + self, + /, + x: _SeriesLikeComplex_co, + y: _ArrayLikeCoef_co, + deg: int | _SeriesLikeInt_co, + rcond: float | None = ..., + *, + full: Literal[True], + w: _SeriesLikeFloat_co | None = ..., + ) -> tuple[_ObjectArray, _FullFitResult]: ... + +@type_check_only +class _FuncRoots(_Named[_Name_co], Protocol[_Name_co]): + @overload + def __call__( + self, + /, + c: _SeriesLikeFloat_co, + ) -> _Series[np.float64]: ... + @overload + def __call__( + self, + /, + c: _SeriesLikeComplex_co, + ) -> _Series[np.complex128]: ... + @overload + def __call__(self, /, c: _SeriesLikeCoef_co) -> _ObjectSeries: ... + +_Companion: TypeAlias = np.ndarray[tuple[int, int], np.dtype[_ScalarT]] + +@type_check_only +class _FuncCompanion(_Named[_Name_co], Protocol[_Name_co]): + @overload + def __call__( + self, + /, + c: _SeriesLikeFloat_co, + ) -> _Companion[np.float64]: ... + @overload + def __call__( + self, + /, + c: _SeriesLikeComplex_co, + ) -> _Companion[np.complex128]: ... + @overload + def __call__(self, /, c: _SeriesLikeCoef_co) -> _Companion[np.object_]: ... + +@type_check_only +class _FuncGauss(_Named[_Name_co], Protocol[_Name_co]): + def __call__( + self, + /, + deg: SupportsIndex, + ) -> _Tuple2[_Series[np.float64]]: ... + +@type_check_only +class _FuncWeight(_Named[_Name_co], Protocol[_Name_co]): + @overload + def __call__( + self, + /, + c: _ArrayLikeFloat_co, + ) -> npt.NDArray[np.float64]: ... + @overload + def __call__( + self, + /, + c: _ArrayLikeComplex_co, + ) -> npt.NDArray[np.complex128]: ... + @overload + def __call__(self, /, c: _ArrayLikeCoef_co) -> _ObjectArray: ... + +@type_check_only +class _FuncPts(_Named[_Name_co], Protocol[_Name_co]): + def __call__(self, /, npts: _AnyInt) -> _Series[np.float64]: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/polynomial/chebyshev.py b/.venv/lib/python3.12/site-packages/numpy/polynomial/chebyshev.py new file mode 100644 index 0000000..58fce60 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/polynomial/chebyshev.py @@ -0,0 +1,2003 @@ +""" +==================================================== +Chebyshev Series (:mod:`numpy.polynomial.chebyshev`) +==================================================== + +This module provides a number of objects (mostly functions) useful for +dealing with Chebyshev series, including a `Chebyshev` class that +encapsulates the usual arithmetic operations. (General information +on how this module represents and works with such polynomials is in the +docstring for its "parent" sub-package, `numpy.polynomial`). + +Classes +------- + +.. autosummary:: + :toctree: generated/ + + Chebyshev + + +Constants +--------- + +.. autosummary:: + :toctree: generated/ + + chebdomain + chebzero + chebone + chebx + +Arithmetic +---------- + +.. autosummary:: + :toctree: generated/ + + chebadd + chebsub + chebmulx + chebmul + chebdiv + chebpow + chebval + chebval2d + chebval3d + chebgrid2d + chebgrid3d + +Calculus +-------- + +.. autosummary:: + :toctree: generated/ + + chebder + chebint + +Misc Functions +-------------- + +.. autosummary:: + :toctree: generated/ + + chebfromroots + chebroots + chebvander + chebvander2d + chebvander3d + chebgauss + chebweight + chebcompanion + chebfit + chebpts1 + chebpts2 + chebtrim + chebline + cheb2poly + poly2cheb + chebinterpolate + +See also +-------- +`numpy.polynomial` + +Notes +----- +The implementations of multiplication, division, integration, and +differentiation use the algebraic identities [1]_: + +.. math:: + T_n(x) = \\frac{z^n + z^{-n}}{2} \\\\ + z\\frac{dx}{dz} = \\frac{z - z^{-1}}{2}. + +where + +.. math:: x = \\frac{z + z^{-1}}{2}. + +These identities allow a Chebyshev series to be expressed as a finite, +symmetric Laurent series. In this module, this sort of Laurent series +is referred to as a "z-series." + +References +---------- +.. [1] A. T. Benjamin, et al., "Combinatorial Trigonometry with Chebyshev + Polynomials," *Journal of Statistical Planning and Inference 14*, 2008 + (https://web.archive.org/web/20080221202153/https://www.math.hmc.edu/~benjamin/papers/CombTrig.pdf, pg. 4) + +""" # noqa: E501 +import numpy as np +import numpy.linalg as la +from numpy.lib.array_utils import normalize_axis_index + +from . import polyutils as pu +from ._polybase import ABCPolyBase + +__all__ = [ + 'chebzero', 'chebone', 'chebx', 'chebdomain', 'chebline', 'chebadd', + 'chebsub', 'chebmulx', 'chebmul', 'chebdiv', 'chebpow', 'chebval', + 'chebder', 'chebint', 'cheb2poly', 'poly2cheb', 'chebfromroots', + 'chebvander', 'chebfit', 'chebtrim', 'chebroots', 'chebpts1', + 'chebpts2', 'Chebyshev', 'chebval2d', 'chebval3d', 'chebgrid2d', + 'chebgrid3d', 'chebvander2d', 'chebvander3d', 'chebcompanion', + 'chebgauss', 'chebweight', 'chebinterpolate'] + +chebtrim = pu.trimcoef + +# +# A collection of functions for manipulating z-series. These are private +# functions and do minimal error checking. +# + +def _cseries_to_zseries(c): + """Convert Chebyshev series to z-series. + + Convert a Chebyshev series to the equivalent z-series. The result is + never an empty array. The dtype of the return is the same as that of + the input. No checks are run on the arguments as this routine is for + internal use. + + Parameters + ---------- + c : 1-D ndarray + Chebyshev coefficients, ordered from low to high + + Returns + ------- + zs : 1-D ndarray + Odd length symmetric z-series, ordered from low to high. + + """ + n = c.size + zs = np.zeros(2 * n - 1, dtype=c.dtype) + zs[n - 1:] = c / 2 + return zs + zs[::-1] + + +def _zseries_to_cseries(zs): + """Convert z-series to a Chebyshev series. + + Convert a z series to the equivalent Chebyshev series. The result is + never an empty array. The dtype of the return is the same as that of + the input. No checks are run on the arguments as this routine is for + internal use. + + Parameters + ---------- + zs : 1-D ndarray + Odd length symmetric z-series, ordered from low to high. + + Returns + ------- + c : 1-D ndarray + Chebyshev coefficients, ordered from low to high. + + """ + n = (zs.size + 1) // 2 + c = zs[n - 1:].copy() + c[1:n] *= 2 + return c + + +def _zseries_mul(z1, z2): + """Multiply two z-series. + + Multiply two z-series to produce a z-series. + + Parameters + ---------- + z1, z2 : 1-D ndarray + The arrays must be 1-D but this is not checked. + + Returns + ------- + product : 1-D ndarray + The product z-series. + + Notes + ----- + This is simply convolution. If symmetric/anti-symmetric z-series are + denoted by S/A then the following rules apply: + + S*S, A*A -> S + S*A, A*S -> A + + """ + return np.convolve(z1, z2) + + +def _zseries_div(z1, z2): + """Divide the first z-series by the second. + + Divide `z1` by `z2` and return the quotient and remainder as z-series. + Warning: this implementation only applies when both z1 and z2 have the + same symmetry, which is sufficient for present purposes. + + Parameters + ---------- + z1, z2 : 1-D ndarray + The arrays must be 1-D and have the same symmetry, but this is not + checked. + + Returns + ------- + + (quotient, remainder) : 1-D ndarrays + Quotient and remainder as z-series. + + Notes + ----- + This is not the same as polynomial division on account of the desired form + of the remainder. If symmetric/anti-symmetric z-series are denoted by S/A + then the following rules apply: + + S/S -> S,S + A/A -> S,A + + The restriction to types of the same symmetry could be fixed but seems like + unneeded generality. There is no natural form for the remainder in the case + where there is no symmetry. + + """ + z1 = z1.copy() + z2 = z2.copy() + lc1 = len(z1) + lc2 = len(z2) + if lc2 == 1: + z1 /= z2 + return z1, z1[:1] * 0 + elif lc1 < lc2: + return z1[:1] * 0, z1 + else: + dlen = lc1 - lc2 + scl = z2[0] + z2 /= scl + quo = np.empty(dlen + 1, dtype=z1.dtype) + i = 0 + j = dlen + while i < j: + r = z1[i] + quo[i] = z1[i] + quo[dlen - i] = r + tmp = r * z2 + z1[i:i + lc2] -= tmp + z1[j:j + lc2] -= tmp + i += 1 + j -= 1 + r = z1[i] + quo[i] = r + tmp = r * z2 + z1[i:i + lc2] -= tmp + quo /= scl + rem = z1[i + 1:i - 1 + lc2].copy() + return quo, rem + + +def _zseries_der(zs): + """Differentiate a z-series. + + The derivative is with respect to x, not z. This is achieved using the + chain rule and the value of dx/dz given in the module notes. + + Parameters + ---------- + zs : z-series + The z-series to differentiate. + + Returns + ------- + derivative : z-series + The derivative + + Notes + ----- + The zseries for x (ns) has been multiplied by two in order to avoid + using floats that are incompatible with Decimal and likely other + specialized scalar types. This scaling has been compensated by + multiplying the value of zs by two also so that the two cancels in the + division. + + """ + n = len(zs) // 2 + ns = np.array([-1, 0, 1], dtype=zs.dtype) + zs *= np.arange(-n, n + 1) * 2 + d, r = _zseries_div(zs, ns) + return d + + +def _zseries_int(zs): + """Integrate a z-series. + + The integral is with respect to x, not z. This is achieved by a change + of variable using dx/dz given in the module notes. + + Parameters + ---------- + zs : z-series + The z-series to integrate + + Returns + ------- + integral : z-series + The indefinite integral + + Notes + ----- + The zseries for x (ns) has been multiplied by two in order to avoid + using floats that are incompatible with Decimal and likely other + specialized scalar types. This scaling has been compensated by + dividing the resulting zs by two. + + """ + n = 1 + len(zs) // 2 + ns = np.array([-1, 0, 1], dtype=zs.dtype) + zs = _zseries_mul(zs, ns) + div = np.arange(-n, n + 1) * 2 + zs[:n] /= div[:n] + zs[n + 1:] /= div[n + 1:] + zs[n] = 0 + return zs + +# +# Chebyshev series functions +# + + +def poly2cheb(pol): + """ + Convert a polynomial to a Chebyshev series. + + Convert an array representing the coefficients of a polynomial (relative + to the "standard" basis) ordered from lowest degree to highest, to an + array of the coefficients of the equivalent Chebyshev series, ordered + from lowest to highest degree. + + Parameters + ---------- + pol : array_like + 1-D array containing the polynomial coefficients + + Returns + ------- + c : ndarray + 1-D array containing the coefficients of the equivalent Chebyshev + series. + + See Also + -------- + cheb2poly + + Notes + ----- + The easy way to do conversions between polynomial basis sets + is to use the convert method of a class instance. + + Examples + -------- + >>> from numpy import polynomial as P + >>> p = P.Polynomial(range(4)) + >>> p + Polynomial([0., 1., 2., 3.], domain=[-1., 1.], window=[-1., 1.], symbol='x') + >>> c = p.convert(kind=P.Chebyshev) + >>> c + Chebyshev([1. , 3.25, 1. , 0.75], domain=[-1., 1.], window=[-1., ... + >>> P.chebyshev.poly2cheb(range(4)) + array([1. , 3.25, 1. , 0.75]) + + """ + [pol] = pu.as_series([pol]) + deg = len(pol) - 1 + res = 0 + for i in range(deg, -1, -1): + res = chebadd(chebmulx(res), pol[i]) + return res + + +def cheb2poly(c): + """ + Convert a Chebyshev series to a polynomial. + + Convert an array representing the coefficients of a Chebyshev series, + ordered from lowest degree to highest, to an array of the coefficients + of the equivalent polynomial (relative to the "standard" basis) ordered + from lowest to highest degree. + + Parameters + ---------- + c : array_like + 1-D array containing the Chebyshev series coefficients, ordered + from lowest order term to highest. + + Returns + ------- + pol : ndarray + 1-D array containing the coefficients of the equivalent polynomial + (relative to the "standard" basis) ordered from lowest order term + to highest. + + See Also + -------- + poly2cheb + + Notes + ----- + The easy way to do conversions between polynomial basis sets + is to use the convert method of a class instance. + + Examples + -------- + >>> from numpy import polynomial as P + >>> c = P.Chebyshev(range(4)) + >>> c + Chebyshev([0., 1., 2., 3.], domain=[-1., 1.], window=[-1., 1.], symbol='x') + >>> p = c.convert(kind=P.Polynomial) + >>> p + Polynomial([-2., -8., 4., 12.], domain=[-1., 1.], window=[-1., 1.], ... + >>> P.chebyshev.cheb2poly(range(4)) + array([-2., -8., 4., 12.]) + + """ + from .polynomial import polyadd, polymulx, polysub + + [c] = pu.as_series([c]) + n = len(c) + if n < 3: + return c + else: + c0 = c[-2] + c1 = c[-1] + # i is the current degree of c1 + for i in range(n - 1, 1, -1): + tmp = c0 + c0 = polysub(c[i - 2], c1) + c1 = polyadd(tmp, polymulx(c1) * 2) + return polyadd(c0, polymulx(c1)) + + +# +# These are constant arrays are of integer type so as to be compatible +# with the widest range of other types, such as Decimal. +# + +# Chebyshev default domain. +chebdomain = np.array([-1., 1.]) + +# Chebyshev coefficients representing zero. +chebzero = np.array([0]) + +# Chebyshev coefficients representing one. +chebone = np.array([1]) + +# Chebyshev coefficients representing the identity x. +chebx = np.array([0, 1]) + + +def chebline(off, scl): + """ + Chebyshev series whose graph is a straight line. + + Parameters + ---------- + off, scl : scalars + The specified line is given by ``off + scl*x``. + + Returns + ------- + y : ndarray + This module's representation of the Chebyshev series for + ``off + scl*x``. + + See Also + -------- + numpy.polynomial.polynomial.polyline + numpy.polynomial.legendre.legline + numpy.polynomial.laguerre.lagline + numpy.polynomial.hermite.hermline + numpy.polynomial.hermite_e.hermeline + + Examples + -------- + >>> import numpy.polynomial.chebyshev as C + >>> C.chebline(3,2) + array([3, 2]) + >>> C.chebval(-3, C.chebline(3,2)) # should be -3 + -3.0 + + """ + if scl != 0: + return np.array([off, scl]) + else: + return np.array([off]) + + +def chebfromroots(roots): + """ + Generate a Chebyshev series with given roots. + + The function returns the coefficients of the polynomial + + .. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n), + + in Chebyshev form, where the :math:`r_n` are the roots specified in + `roots`. If a zero has multiplicity n, then it must appear in `roots` + n times. For instance, if 2 is a root of multiplicity three and 3 is a + root of multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. + The roots can appear in any order. + + If the returned coefficients are `c`, then + + .. math:: p(x) = c_0 + c_1 * T_1(x) + ... + c_n * T_n(x) + + The coefficient of the last term is not generally 1 for monic + polynomials in Chebyshev form. + + Parameters + ---------- + roots : array_like + Sequence containing the roots. + + Returns + ------- + out : ndarray + 1-D array of coefficients. If all roots are real then `out` is a + real array, if some of the roots are complex, then `out` is complex + even if all the coefficients in the result are real (see Examples + below). + + See Also + -------- + numpy.polynomial.polynomial.polyfromroots + numpy.polynomial.legendre.legfromroots + numpy.polynomial.laguerre.lagfromroots + numpy.polynomial.hermite.hermfromroots + numpy.polynomial.hermite_e.hermefromroots + + Examples + -------- + >>> import numpy.polynomial.chebyshev as C + >>> C.chebfromroots((-1,0,1)) # x^3 - x relative to the standard basis + array([ 0. , -0.25, 0. , 0.25]) + >>> j = complex(0,1) + >>> C.chebfromroots((-j,j)) # x^2 + 1 relative to the standard basis + array([1.5+0.j, 0. +0.j, 0.5+0.j]) + + """ + return pu._fromroots(chebline, chebmul, roots) + + +def chebadd(c1, c2): + """ + Add one Chebyshev series to another. + + Returns the sum of two Chebyshev series `c1` + `c2`. The arguments + are sequences of coefficients ordered from lowest order term to + highest, i.e., [1,2,3] represents the series ``T_0 + 2*T_1 + 3*T_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Chebyshev series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Array representing the Chebyshev series of their sum. + + See Also + -------- + chebsub, chebmulx, chebmul, chebdiv, chebpow + + Notes + ----- + Unlike multiplication, division, etc., the sum of two Chebyshev series + is a Chebyshev series (without having to "reproject" the result onto + the basis set) so addition, just like that of "standard" polynomials, + is simply "component-wise." + + Examples + -------- + >>> from numpy.polynomial import chebyshev as C + >>> c1 = (1,2,3) + >>> c2 = (3,2,1) + >>> C.chebadd(c1,c2) + array([4., 4., 4.]) + + """ + return pu._add(c1, c2) + + +def chebsub(c1, c2): + """ + Subtract one Chebyshev series from another. + + Returns the difference of two Chebyshev series `c1` - `c2`. The + sequences of coefficients are from lowest order term to highest, i.e., + [1,2,3] represents the series ``T_0 + 2*T_1 + 3*T_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Chebyshev series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Of Chebyshev series coefficients representing their difference. + + See Also + -------- + chebadd, chebmulx, chebmul, chebdiv, chebpow + + Notes + ----- + Unlike multiplication, division, etc., the difference of two Chebyshev + series is a Chebyshev series (without having to "reproject" the result + onto the basis set) so subtraction, just like that of "standard" + polynomials, is simply "component-wise." + + Examples + -------- + >>> from numpy.polynomial import chebyshev as C + >>> c1 = (1,2,3) + >>> c2 = (3,2,1) + >>> C.chebsub(c1,c2) + array([-2., 0., 2.]) + >>> C.chebsub(c2,c1) # -C.chebsub(c1,c2) + array([ 2., 0., -2.]) + + """ + return pu._sub(c1, c2) + + +def chebmulx(c): + """Multiply a Chebyshev series by x. + + Multiply the polynomial `c` by x, where x is the independent + variable. + + + Parameters + ---------- + c : array_like + 1-D array of Chebyshev series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Array representing the result of the multiplication. + + See Also + -------- + chebadd, chebsub, chebmul, chebdiv, chebpow + + Examples + -------- + >>> from numpy.polynomial import chebyshev as C + >>> C.chebmulx([1,2,3]) + array([1. , 2.5, 1. , 1.5]) + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + # The zero series needs special treatment + if len(c) == 1 and c[0] == 0: + return c + + prd = np.empty(len(c) + 1, dtype=c.dtype) + prd[0] = c[0] * 0 + prd[1] = c[0] + if len(c) > 1: + tmp = c[1:] / 2 + prd[2:] = tmp + prd[0:-2] += tmp + return prd + + +def chebmul(c1, c2): + """ + Multiply one Chebyshev series by another. + + Returns the product of two Chebyshev series `c1` * `c2`. The arguments + are sequences of coefficients, from lowest order "term" to highest, + e.g., [1,2,3] represents the series ``T_0 + 2*T_1 + 3*T_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Chebyshev series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Of Chebyshev series coefficients representing their product. + + See Also + -------- + chebadd, chebsub, chebmulx, chebdiv, chebpow + + Notes + ----- + In general, the (polynomial) product of two C-series results in terms + that are not in the Chebyshev polynomial basis set. Thus, to express + the product as a C-series, it is typically necessary to "reproject" + the product onto said basis set, which typically produces + "unintuitive live" (but correct) results; see Examples section below. + + Examples + -------- + >>> from numpy.polynomial import chebyshev as C + >>> c1 = (1,2,3) + >>> c2 = (3,2,1) + >>> C.chebmul(c1,c2) # multiplication requires "reprojection" + array([ 6.5, 12. , 12. , 4. , 1.5]) + + """ + # c1, c2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + z1 = _cseries_to_zseries(c1) + z2 = _cseries_to_zseries(c2) + prd = _zseries_mul(z1, z2) + ret = _zseries_to_cseries(prd) + return pu.trimseq(ret) + + +def chebdiv(c1, c2): + """ + Divide one Chebyshev series by another. + + Returns the quotient-with-remainder of two Chebyshev series + `c1` / `c2`. The arguments are sequences of coefficients from lowest + order "term" to highest, e.g., [1,2,3] represents the series + ``T_0 + 2*T_1 + 3*T_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Chebyshev series coefficients ordered from low to + high. + + Returns + ------- + [quo, rem] : ndarrays + Of Chebyshev series coefficients representing the quotient and + remainder. + + See Also + -------- + chebadd, chebsub, chebmulx, chebmul, chebpow + + Notes + ----- + In general, the (polynomial) division of one C-series by another + results in quotient and remainder terms that are not in the Chebyshev + polynomial basis set. Thus, to express these results as C-series, it + is typically necessary to "reproject" the results onto said basis + set, which typically produces "unintuitive" (but correct) results; + see Examples section below. + + Examples + -------- + >>> from numpy.polynomial import chebyshev as C + >>> c1 = (1,2,3) + >>> c2 = (3,2,1) + >>> C.chebdiv(c1,c2) # quotient "intuitive," remainder not + (array([3.]), array([-8., -4.])) + >>> c2 = (0,1,2,3) + >>> C.chebdiv(c2,c1) # neither "intuitive" + (array([0., 2.]), array([-2., -4.])) + + """ + # c1, c2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + if c2[-1] == 0: + raise ZeroDivisionError # FIXME: add message with details to exception + + # note: this is more efficient than `pu._div(chebmul, c1, c2)` + lc1 = len(c1) + lc2 = len(c2) + if lc1 < lc2: + return c1[:1] * 0, c1 + elif lc2 == 1: + return c1 / c2[-1], c1[:1] * 0 + else: + z1 = _cseries_to_zseries(c1) + z2 = _cseries_to_zseries(c2) + quo, rem = _zseries_div(z1, z2) + quo = pu.trimseq(_zseries_to_cseries(quo)) + rem = pu.trimseq(_zseries_to_cseries(rem)) + return quo, rem + + +def chebpow(c, pow, maxpower=16): + """Raise a Chebyshev series to a power. + + Returns the Chebyshev series `c` raised to the power `pow`. The + argument `c` is a sequence of coefficients ordered from low to high. + i.e., [1,2,3] is the series ``T_0 + 2*T_1 + 3*T_2.`` + + Parameters + ---------- + c : array_like + 1-D array of Chebyshev series coefficients ordered from low to + high. + pow : integer + Power to which the series will be raised + maxpower : integer, optional + Maximum power allowed. This is mainly to limit growth of the series + to unmanageable size. Default is 16 + + Returns + ------- + coef : ndarray + Chebyshev series of power. + + See Also + -------- + chebadd, chebsub, chebmulx, chebmul, chebdiv + + Examples + -------- + >>> from numpy.polynomial import chebyshev as C + >>> C.chebpow([1, 2, 3, 4], 2) + array([15.5, 22. , 16. , ..., 12.5, 12. , 8. ]) + + """ + # note: this is more efficient than `pu._pow(chebmul, c1, c2)`, as it + # avoids converting between z and c series repeatedly + + # c is a trimmed copy + [c] = pu.as_series([c]) + power = int(pow) + if power != pow or power < 0: + raise ValueError("Power must be a non-negative integer.") + elif maxpower is not None and power > maxpower: + raise ValueError("Power is too large") + elif power == 0: + return np.array([1], dtype=c.dtype) + elif power == 1: + return c + else: + # This can be made more efficient by using powers of two + # in the usual way. + zs = _cseries_to_zseries(c) + prd = zs + for i in range(2, power + 1): + prd = np.convolve(prd, zs) + return _zseries_to_cseries(prd) + + +def chebder(c, m=1, scl=1, axis=0): + """ + Differentiate a Chebyshev series. + + Returns the Chebyshev series coefficients `c` differentiated `m` times + along `axis`. At each iteration the result is multiplied by `scl` (the + scaling factor is for use in a linear change of variable). The argument + `c` is an array of coefficients from low to high degree along each + axis, e.g., [1,2,3] represents the series ``1*T_0 + 2*T_1 + 3*T_2`` + while [[1,2],[1,2]] represents ``1*T_0(x)*T_0(y) + 1*T_1(x)*T_0(y) + + 2*T_0(x)*T_1(y) + 2*T_1(x)*T_1(y)`` if axis=0 is ``x`` and axis=1 is + ``y``. + + Parameters + ---------- + c : array_like + Array of Chebyshev series coefficients. If c is multidimensional + the different axis correspond to different variables with the + degree in each axis given by the corresponding index. + m : int, optional + Number of derivatives taken, must be non-negative. (Default: 1) + scl : scalar, optional + Each differentiation is multiplied by `scl`. The end result is + multiplication by ``scl**m``. This is for use in a linear change of + variable. (Default: 1) + axis : int, optional + Axis over which the derivative is taken. (Default: 0). + + Returns + ------- + der : ndarray + Chebyshev series of the derivative. + + See Also + -------- + chebint + + Notes + ----- + In general, the result of differentiating a C-series needs to be + "reprojected" onto the C-series basis set. Thus, typically, the + result of this function is "unintuitive," albeit correct; see Examples + section below. + + Examples + -------- + >>> from numpy.polynomial import chebyshev as C + >>> c = (1,2,3,4) + >>> C.chebder(c) + array([14., 12., 24.]) + >>> C.chebder(c,3) + array([96.]) + >>> C.chebder(c,scl=-1) + array([-14., -12., -24.]) + >>> C.chebder(c,2,-1) + array([12., 96.]) + + """ + c = np.array(c, ndmin=1, copy=True) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + cnt = pu._as_int(m, "the order of derivation") + iaxis = pu._as_int(axis, "the axis") + if cnt < 0: + raise ValueError("The order of derivation must be non-negative") + iaxis = normalize_axis_index(iaxis, c.ndim) + + if cnt == 0: + return c + + c = np.moveaxis(c, iaxis, 0) + n = len(c) + if cnt >= n: + c = c[:1] * 0 + else: + for i in range(cnt): + n = n - 1 + c *= scl + der = np.empty((n,) + c.shape[1:], dtype=c.dtype) + for j in range(n, 2, -1): + der[j - 1] = (2 * j) * c[j] + c[j - 2] += (j * c[j]) / (j - 2) + if n > 1: + der[1] = 4 * c[2] + der[0] = c[1] + c = der + c = np.moveaxis(c, 0, iaxis) + return c + + +def chebint(c, m=1, k=[], lbnd=0, scl=1, axis=0): + """ + Integrate a Chebyshev series. + + Returns the Chebyshev series coefficients `c` integrated `m` times from + `lbnd` along `axis`. At each iteration the resulting series is + **multiplied** by `scl` and an integration constant, `k`, is added. + The scaling factor is for use in a linear change of variable. ("Buyer + beware": note that, depending on what one is doing, one may want `scl` + to be the reciprocal of what one might expect; for more information, + see the Notes section below.) The argument `c` is an array of + coefficients from low to high degree along each axis, e.g., [1,2,3] + represents the series ``T_0 + 2*T_1 + 3*T_2`` while [[1,2],[1,2]] + represents ``1*T_0(x)*T_0(y) + 1*T_1(x)*T_0(y) + 2*T_0(x)*T_1(y) + + 2*T_1(x)*T_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``. + + Parameters + ---------- + c : array_like + Array of Chebyshev series coefficients. If c is multidimensional + the different axis correspond to different variables with the + degree in each axis given by the corresponding index. + m : int, optional + Order of integration, must be positive. (Default: 1) + k : {[], list, scalar}, optional + Integration constant(s). The value of the first integral at zero + is the first value in the list, the value of the second integral + at zero is the second value, etc. If ``k == []`` (the default), + all constants are set to zero. If ``m == 1``, a single scalar can + be given instead of a list. + lbnd : scalar, optional + The lower bound of the integral. (Default: 0) + scl : scalar, optional + Following each integration the result is *multiplied* by `scl` + before the integration constant is added. (Default: 1) + axis : int, optional + Axis over which the integral is taken. (Default: 0). + + Returns + ------- + S : ndarray + C-series coefficients of the integral. + + Raises + ------ + ValueError + If ``m < 1``, ``len(k) > m``, ``np.ndim(lbnd) != 0``, or + ``np.ndim(scl) != 0``. + + See Also + -------- + chebder + + Notes + ----- + Note that the result of each integration is *multiplied* by `scl`. + Why is this important to note? Say one is making a linear change of + variable :math:`u = ax + b` in an integral relative to `x`. Then + :math:`dx = du/a`, so one will need to set `scl` equal to + :math:`1/a`- perhaps not what one would have first thought. + + Also note that, in general, the result of integrating a C-series needs + to be "reprojected" onto the C-series basis set. Thus, typically, + the result of this function is "unintuitive," albeit correct; see + Examples section below. + + Examples + -------- + >>> from numpy.polynomial import chebyshev as C + >>> c = (1,2,3) + >>> C.chebint(c) + array([ 0.5, -0.5, 0.5, 0.5]) + >>> C.chebint(c,3) + array([ 0.03125 , -0.1875 , 0.04166667, -0.05208333, 0.01041667, # may vary + 0.00625 ]) + >>> C.chebint(c, k=3) + array([ 3.5, -0.5, 0.5, 0.5]) + >>> C.chebint(c,lbnd=-2) + array([ 8.5, -0.5, 0.5, 0.5]) + >>> C.chebint(c,scl=-2) + array([-1., 1., -1., -1.]) + + """ + c = np.array(c, ndmin=1, copy=True) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + if not np.iterable(k): + k = [k] + cnt = pu._as_int(m, "the order of integration") + iaxis = pu._as_int(axis, "the axis") + if cnt < 0: + raise ValueError("The order of integration must be non-negative") + if len(k) > cnt: + raise ValueError("Too many integration constants") + if np.ndim(lbnd) != 0: + raise ValueError("lbnd must be a scalar.") + if np.ndim(scl) != 0: + raise ValueError("scl must be a scalar.") + iaxis = normalize_axis_index(iaxis, c.ndim) + + if cnt == 0: + return c + + c = np.moveaxis(c, iaxis, 0) + k = list(k) + [0] * (cnt - len(k)) + for i in range(cnt): + n = len(c) + c *= scl + if n == 1 and np.all(c[0] == 0): + c[0] += k[i] + else: + tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype) + tmp[0] = c[0] * 0 + tmp[1] = c[0] + if n > 1: + tmp[2] = c[1] / 4 + for j in range(2, n): + tmp[j + 1] = c[j] / (2 * (j + 1)) + tmp[j - 1] -= c[j] / (2 * (j - 1)) + tmp[0] += k[i] - chebval(lbnd, tmp) + c = tmp + c = np.moveaxis(c, 0, iaxis) + return c + + +def chebval(x, c, tensor=True): + """ + Evaluate a Chebyshev series at points x. + + If `c` is of length `n + 1`, this function returns the value: + + .. math:: p(x) = c_0 * T_0(x) + c_1 * T_1(x) + ... + c_n * T_n(x) + + The parameter `x` is converted to an array only if it is a tuple or a + list, otherwise it is treated as a scalar. In either case, either `x` + or its elements must support multiplication and addition both with + themselves and with the elements of `c`. + + If `c` is a 1-D array, then ``p(x)`` will have the same shape as `x`. If + `c` is multidimensional, then the shape of the result depends on the + value of `tensor`. If `tensor` is true the shape will be c.shape[1:] + + x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that + scalars have shape (,). + + Trailing zeros in the coefficients will be used in the evaluation, so + they should be avoided if efficiency is a concern. + + Parameters + ---------- + x : array_like, compatible object + If `x` is a list or tuple, it is converted to an ndarray, otherwise + it is left unchanged and treated as a scalar. In either case, `x` + or its elements must support addition and multiplication with + themselves and with the elements of `c`. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree n are contained in c[n]. If `c` is multidimensional the + remaining indices enumerate multiple polynomials. In the two + dimensional case the coefficients may be thought of as stored in + the columns of `c`. + tensor : boolean, optional + If True, the shape of the coefficient array is extended with ones + on the right, one for each dimension of `x`. Scalars have dimension 0 + for this action. The result is that every column of coefficients in + `c` is evaluated for every element of `x`. If False, `x` is broadcast + over the columns of `c` for the evaluation. This keyword is useful + when `c` is multidimensional. The default value is True. + + Returns + ------- + values : ndarray, algebra_like + The shape of the return value is described above. + + See Also + -------- + chebval2d, chebgrid2d, chebval3d, chebgrid3d + + Notes + ----- + The evaluation uses Clenshaw recursion, aka synthetic division. + + """ + c = np.array(c, ndmin=1, copy=True) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + if isinstance(x, (tuple, list)): + x = np.asarray(x) + if isinstance(x, np.ndarray) and tensor: + c = c.reshape(c.shape + (1,) * x.ndim) + + if len(c) == 1: + c0 = c[0] + c1 = 0 + elif len(c) == 2: + c0 = c[0] + c1 = c[1] + else: + x2 = 2 * x + c0 = c[-2] + c1 = c[-1] + for i in range(3, len(c) + 1): + tmp = c0 + c0 = c[-i] - c1 + c1 = tmp + c1 * x2 + return c0 + c1 * x + + +def chebval2d(x, y, c): + """ + Evaluate a 2-D Chebyshev series at points (x, y). + + This function returns the values: + + .. math:: p(x,y) = \\sum_{i,j} c_{i,j} * T_i(x) * T_j(y) + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars and they + must have the same shape after conversion. In either case, either `x` + and `y` or their elements must support multiplication and addition both + with themselves and with the elements of `c`. + + If `c` is a 1-D array a one is implicitly appended to its shape to make + it 2-D. The shape of the result will be c.shape[2:] + x.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points ``(x, y)``, + where `x` and `y` must have the same shape. If `x` or `y` is a list + or tuple, it is first converted to an ndarray, otherwise it is left + unchanged and if it isn't an ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term + of multi-degree i,j is contained in ``c[i,j]``. If `c` has + dimension greater than 2 the remaining indices enumerate multiple + sets of coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional Chebyshev series at points formed + from pairs of corresponding values from `x` and `y`. + + See Also + -------- + chebval, chebgrid2d, chebval3d, chebgrid3d + """ + return pu._valnd(chebval, c, x, y) + + +def chebgrid2d(x, y, c): + """ + Evaluate a 2-D Chebyshev series on the Cartesian product of x and y. + + This function returns the values: + + .. math:: p(a,b) = \\sum_{i,j} c_{i,j} * T_i(a) * T_j(b), + + where the points `(a, b)` consist of all pairs formed by taking + `a` from `x` and `b` from `y`. The resulting points form a grid with + `x` in the first dimension and `y` in the second. + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars. In either + case, either `x` and `y` or their elements must support multiplication + and addition both with themselves and with the elements of `c`. + + If `c` has fewer than two dimensions, ones are implicitly appended to + its shape to make it 2-D. The shape of the result will be c.shape[2:] + + x.shape + y.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points in the + Cartesian product of `x` and `y`. If `x` or `y` is a list or + tuple, it is first converted to an ndarray, otherwise it is left + unchanged and, if it isn't an ndarray, it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term of + multi-degree i,j is contained in ``c[i,j]``. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional Chebyshev series at points in the + Cartesian product of `x` and `y`. + + See Also + -------- + chebval, chebval2d, chebval3d, chebgrid3d + """ + return pu._gridnd(chebval, c, x, y) + + +def chebval3d(x, y, z, c): + """ + Evaluate a 3-D Chebyshev series at points (x, y, z). + + This function returns the values: + + .. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * T_i(x) * T_j(y) * T_k(z) + + The parameters `x`, `y`, and `z` are converted to arrays only if + they are tuples or a lists, otherwise they are treated as a scalars and + they must have the same shape after conversion. In either case, either + `x`, `y`, and `z` or their elements must support multiplication and + addition both with themselves and with the elements of `c`. + + If `c` has fewer than 3 dimensions, ones are implicitly appended to its + shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape. + + Parameters + ---------- + x, y, z : array_like, compatible object + The three dimensional series is evaluated at the points + ``(x, y, z)``, where `x`, `y`, and `z` must have the same shape. If + any of `x`, `y`, or `z` is a list or tuple, it is first converted + to an ndarray, otherwise it is left unchanged and if it isn't an + ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term of + multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension + greater than 3 the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the multidimensional polynomial on points formed with + triples of corresponding values from `x`, `y`, and `z`. + + See Also + -------- + chebval, chebval2d, chebgrid2d, chebgrid3d + """ + return pu._valnd(chebval, c, x, y, z) + + +def chebgrid3d(x, y, z, c): + """ + Evaluate a 3-D Chebyshev series on the Cartesian product of x, y, and z. + + This function returns the values: + + .. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * T_i(a) * T_j(b) * T_k(c) + + where the points ``(a, b, c)`` consist of all triples formed by taking + `a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form + a grid with `x` in the first dimension, `y` in the second, and `z` in + the third. + + The parameters `x`, `y`, and `z` are converted to arrays only if they + are tuples or a lists, otherwise they are treated as a scalars. In + either case, either `x`, `y`, and `z` or their elements must support + multiplication and addition both with themselves and with the elements + of `c`. + + If `c` has fewer than three dimensions, ones are implicitly appended to + its shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape + y.shape + z.shape. + + Parameters + ---------- + x, y, z : array_like, compatible objects + The three dimensional series is evaluated at the points in the + Cartesian product of `x`, `y`, and `z`. If `x`, `y`, or `z` is a + list or tuple, it is first converted to an ndarray, otherwise it is + left unchanged and, if it isn't an ndarray, it is treated as a + scalar. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree i,j are contained in ``c[i,j]``. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points in the Cartesian + product of `x` and `y`. + + See Also + -------- + chebval, chebval2d, chebgrid2d, chebval3d + """ + return pu._gridnd(chebval, c, x, y, z) + + +def chebvander(x, deg): + """Pseudo-Vandermonde matrix of given degree. + + Returns the pseudo-Vandermonde matrix of degree `deg` and sample points + `x`. The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., i] = T_i(x), + + where ``0 <= i <= deg``. The leading indices of `V` index the elements of + `x` and the last index is the degree of the Chebyshev polynomial. + + If `c` is a 1-D array of coefficients of length ``n + 1`` and `V` is the + matrix ``V = chebvander(x, n)``, then ``np.dot(V, c)`` and + ``chebval(x, c)`` are the same up to roundoff. This equivalence is + useful both for least squares fitting and for the evaluation of a large + number of Chebyshev series of the same degree and sample points. + + Parameters + ---------- + x : array_like + Array of points. The dtype is converted to float64 or complex128 + depending on whether any of the elements are complex. If `x` is + scalar it is converted to a 1-D array. + deg : int + Degree of the resulting matrix. + + Returns + ------- + vander : ndarray + The pseudo Vandermonde matrix. The shape of the returned matrix is + ``x.shape + (deg + 1,)``, where The last index is the degree of the + corresponding Chebyshev polynomial. The dtype will be the same as + the converted `x`. + + """ + ideg = pu._as_int(deg, "deg") + if ideg < 0: + raise ValueError("deg must be non-negative") + + x = np.array(x, copy=None, ndmin=1) + 0.0 + dims = (ideg + 1,) + x.shape + dtyp = x.dtype + v = np.empty(dims, dtype=dtyp) + # Use forward recursion to generate the entries. + v[0] = x * 0 + 1 + if ideg > 0: + x2 = 2 * x + v[1] = x + for i in range(2, ideg + 1): + v[i] = v[i - 1] * x2 - v[i - 2] + return np.moveaxis(v, 0, -1) + + +def chebvander2d(x, y, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points ``(x, y)``. The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., (deg[1] + 1)*i + j] = T_i(x) * T_j(y), + + where ``0 <= i <= deg[0]`` and ``0 <= j <= deg[1]``. The leading indices of + `V` index the points ``(x, y)`` and the last index encodes the degrees of + the Chebyshev polynomials. + + If ``V = chebvander2d(x, y, [xdeg, ydeg])``, then the columns of `V` + correspond to the elements of a 2-D coefficient array `c` of shape + (xdeg + 1, ydeg + 1) in the order + + .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ... + + and ``np.dot(V, c.flat)`` and ``chebval2d(x, y, c)`` will be the same + up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 2-D Chebyshev + series of the same degrees and sample points. + + Parameters + ---------- + x, y : array_like + Arrays of point coordinates, all of the same shape. The dtypes + will be converted to either float64 or complex128 depending on + whether any of the elements are complex. Scalars are converted to + 1-D arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg]. + + Returns + ------- + vander2d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg[1]+1)`. The dtype will be the same + as the converted `x` and `y`. + + See Also + -------- + chebvander, chebvander3d, chebval2d, chebval3d + """ + return pu._vander_nd_flat((chebvander, chebvander), (x, y), deg) + + +def chebvander3d(x, y, z, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points ``(x, y, z)``. If `l`, `m`, `n` are the given degrees in `x`, `y`, `z`, + then The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = T_i(x)*T_j(y)*T_k(z), + + where ``0 <= i <= l``, ``0 <= j <= m``, and ``0 <= j <= n``. The leading + indices of `V` index the points ``(x, y, z)`` and the last index encodes + the degrees of the Chebyshev polynomials. + + If ``V = chebvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns + of `V` correspond to the elements of a 3-D coefficient array `c` of + shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order + + .. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},... + + and ``np.dot(V, c.flat)`` and ``chebval3d(x, y, z, c)`` will be the + same up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 3-D Chebyshev + series of the same degrees and sample points. + + Parameters + ---------- + x, y, z : array_like + Arrays of point coordinates, all of the same shape. The dtypes will + be converted to either float64 or complex128 depending on whether + any of the elements are complex. Scalars are converted to 1-D + arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg, z_deg]. + + Returns + ------- + vander3d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg[1]+1)*(deg[2]+1)`. The dtype will + be the same as the converted `x`, `y`, and `z`. + + See Also + -------- + chebvander, chebvander3d, chebval2d, chebval3d + """ + return pu._vander_nd_flat((chebvander, chebvander, chebvander), (x, y, z), deg) + + +def chebfit(x, y, deg, rcond=None, full=False, w=None): + """ + Least squares fit of Chebyshev series to data. + + Return the coefficients of a Chebyshev series of degree `deg` that is the + least squares fit to the data values `y` given at points `x`. If `y` is + 1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple + fits are done, one for each column of `y`, and the resulting + coefficients are stored in the corresponding columns of a 2-D return. + The fitted polynomial(s) are in the form + + .. math:: p(x) = c_0 + c_1 * T_1(x) + ... + c_n * T_n(x), + + where `n` is `deg`. + + Parameters + ---------- + x : array_like, shape (M,) + x-coordinates of the M sample points ``(x[i], y[i])``. + y : array_like, shape (M,) or (M, K) + y-coordinates of the sample points. Several data sets of sample + points sharing the same x-coordinates can be fitted at once by + passing in a 2D-array that contains one dataset per column. + deg : int or 1-D array_like + Degree(s) of the fitting polynomials. If `deg` is a single integer, + all terms up to and including the `deg`'th term are included in the + fit. For NumPy versions >= 1.11.0 a list of integers specifying the + degrees of the terms to include may be used instead. + rcond : float, optional + Relative condition number of the fit. Singular values smaller than + this relative to the largest singular value will be ignored. The + default value is ``len(x)*eps``, where eps is the relative precision of + the float type, about 2e-16 in most cases. + full : bool, optional + Switch determining nature of return value. When it is False (the + default) just the coefficients are returned, when True diagnostic + information from the singular value decomposition is also returned. + w : array_like, shape (`M`,), optional + Weights. If not None, the weight ``w[i]`` applies to the unsquared + residual ``y[i] - y_hat[i]`` at ``x[i]``. Ideally the weights are + chosen so that the errors of the products ``w[i]*y[i]`` all have the + same variance. When using inverse-variance weighting, use + ``w[i] = 1/sigma(y[i])``. The default value is None. + + Returns + ------- + coef : ndarray, shape (M,) or (M, K) + Chebyshev coefficients ordered from low to high. If `y` was 2-D, + the coefficients for the data in column k of `y` are in column + `k`. + + [residuals, rank, singular_values, rcond] : list + These values are only returned if ``full == True`` + + - residuals -- sum of squared residuals of the least squares fit + - rank -- the numerical rank of the scaled Vandermonde matrix + - singular_values -- singular values of the scaled Vandermonde matrix + - rcond -- value of `rcond`. + + For more details, see `numpy.linalg.lstsq`. + + Warns + ----- + RankWarning + The rank of the coefficient matrix in the least-squares fit is + deficient. The warning is only raised if ``full == False``. The + warnings can be turned off by + + >>> import warnings + >>> warnings.simplefilter('ignore', np.exceptions.RankWarning) + + See Also + -------- + numpy.polynomial.polynomial.polyfit + numpy.polynomial.legendre.legfit + numpy.polynomial.laguerre.lagfit + numpy.polynomial.hermite.hermfit + numpy.polynomial.hermite_e.hermefit + chebval : Evaluates a Chebyshev series. + chebvander : Vandermonde matrix of Chebyshev series. + chebweight : Chebyshev weight function. + numpy.linalg.lstsq : Computes a least-squares fit from the matrix. + scipy.interpolate.UnivariateSpline : Computes spline fits. + + Notes + ----- + The solution is the coefficients of the Chebyshev series `p` that + minimizes the sum of the weighted squared errors + + .. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2, + + where :math:`w_j` are the weights. This problem is solved by setting up + as the (typically) overdetermined matrix equation + + .. math:: V(x) * c = w * y, + + where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the + coefficients to be solved for, `w` are the weights, and `y` are the + observed values. This equation is then solved using the singular value + decomposition of `V`. + + If some of the singular values of `V` are so small that they are + neglected, then a `~exceptions.RankWarning` will be issued. This means that + the coefficient values may be poorly determined. Using a lower order fit + will usually get rid of the warning. The `rcond` parameter can also be + set to a value smaller than its default, but the resulting fit may be + spurious and have large contributions from roundoff error. + + Fits using Chebyshev series are usually better conditioned than fits + using power series, but much can depend on the distribution of the + sample points and the smoothness of the data. If the quality of the fit + is inadequate splines may be a good alternative. + + References + ---------- + .. [1] Wikipedia, "Curve fitting", + https://en.wikipedia.org/wiki/Curve_fitting + + Examples + -------- + + """ + return pu._fit(chebvander, x, y, deg, rcond, full, w) + + +def chebcompanion(c): + """Return the scaled companion matrix of c. + + The basis polynomials are scaled so that the companion matrix is + symmetric when `c` is a Chebyshev basis polynomial. This provides + better eigenvalue estimates than the unscaled case and for basis + polynomials the eigenvalues are guaranteed to be real if + `numpy.linalg.eigvalsh` is used to obtain them. + + Parameters + ---------- + c : array_like + 1-D array of Chebyshev series coefficients ordered from low to high + degree. + + Returns + ------- + mat : ndarray + Scaled companion matrix of dimensions (deg, deg). + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) < 2: + raise ValueError('Series must have maximum degree of at least 1.') + if len(c) == 2: + return np.array([[-c[0] / c[1]]]) + + n = len(c) - 1 + mat = np.zeros((n, n), dtype=c.dtype) + scl = np.array([1.] + [np.sqrt(.5)] * (n - 1)) + top = mat.reshape(-1)[1::n + 1] + bot = mat.reshape(-1)[n::n + 1] + top[0] = np.sqrt(.5) + top[1:] = 1 / 2 + bot[...] = top + mat[:, -1] -= (c[:-1] / c[-1]) * (scl / scl[-1]) * .5 + return mat + + +def chebroots(c): + """ + Compute the roots of a Chebyshev series. + + Return the roots (a.k.a. "zeros") of the polynomial + + .. math:: p(x) = \\sum_i c[i] * T_i(x). + + Parameters + ---------- + c : 1-D array_like + 1-D array of coefficients. + + Returns + ------- + out : ndarray + Array of the roots of the series. If all the roots are real, + then `out` is also real, otherwise it is complex. + + See Also + -------- + numpy.polynomial.polynomial.polyroots + numpy.polynomial.legendre.legroots + numpy.polynomial.laguerre.lagroots + numpy.polynomial.hermite.hermroots + numpy.polynomial.hermite_e.hermeroots + + Notes + ----- + The root estimates are obtained as the eigenvalues of the companion + matrix, Roots far from the origin of the complex plane may have large + errors due to the numerical instability of the series for such + values. Roots with multiplicity greater than 1 will also show larger + errors as the value of the series near such points is relatively + insensitive to errors in the roots. Isolated roots near the origin can + be improved by a few iterations of Newton's method. + + The Chebyshev series basis polynomials aren't powers of `x` so the + results of this function may seem unintuitive. + + Examples + -------- + >>> import numpy.polynomial.chebyshev as cheb + >>> cheb.chebroots((-1, 1,-1, 1)) # T3 - T2 + T1 - T0 has real roots + array([ -5.00000000e-01, 2.60860684e-17, 1.00000000e+00]) # may vary + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) < 2: + return np.array([], dtype=c.dtype) + if len(c) == 2: + return np.array([-c[0] / c[1]]) + + # rotated companion matrix reduces error + m = chebcompanion(c)[::-1, ::-1] + r = la.eigvals(m) + r.sort() + return r + + +def chebinterpolate(func, deg, args=()): + """Interpolate a function at the Chebyshev points of the first kind. + + Returns the Chebyshev series that interpolates `func` at the Chebyshev + points of the first kind in the interval [-1, 1]. The interpolating + series tends to a minmax approximation to `func` with increasing `deg` + if the function is continuous in the interval. + + Parameters + ---------- + func : function + The function to be approximated. It must be a function of a single + variable of the form ``f(x, a, b, c...)``, where ``a, b, c...`` are + extra arguments passed in the `args` parameter. + deg : int + Degree of the interpolating polynomial + args : tuple, optional + Extra arguments to be used in the function call. Default is no extra + arguments. + + Returns + ------- + coef : ndarray, shape (deg + 1,) + Chebyshev coefficients of the interpolating series ordered from low to + high. + + Examples + -------- + >>> import numpy.polynomial.chebyshev as C + >>> C.chebinterpolate(lambda x: np.tanh(x) + 0.5, 8) + array([ 5.00000000e-01, 8.11675684e-01, -9.86864911e-17, + -5.42457905e-02, -2.71387850e-16, 4.51658839e-03, + 2.46716228e-17, -3.79694221e-04, -3.26899002e-16]) + + Notes + ----- + The Chebyshev polynomials used in the interpolation are orthogonal when + sampled at the Chebyshev points of the first kind. If it is desired to + constrain some of the coefficients they can simply be set to the desired + value after the interpolation, no new interpolation or fit is needed. This + is especially useful if it is known apriori that some of coefficients are + zero. For instance, if the function is even then the coefficients of the + terms of odd degree in the result can be set to zero. + + """ + deg = np.asarray(deg) + + # check arguments. + if deg.ndim > 0 or deg.dtype.kind not in 'iu' or deg.size == 0: + raise TypeError("deg must be an int") + if deg < 0: + raise ValueError("expected deg >= 0") + + order = deg + 1 + xcheb = chebpts1(order) + yfunc = func(xcheb, *args) + m = chebvander(xcheb, deg) + c = np.dot(m.T, yfunc) + c[0] /= order + c[1:] /= 0.5 * order + + return c + + +def chebgauss(deg): + """ + Gauss-Chebyshev quadrature. + + Computes the sample points and weights for Gauss-Chebyshev quadrature. + These sample points and weights will correctly integrate polynomials of + degree :math:`2*deg - 1` or less over the interval :math:`[-1, 1]` with + the weight function :math:`f(x) = 1/\\sqrt{1 - x^2}`. + + Parameters + ---------- + deg : int + Number of sample points and weights. It must be >= 1. + + Returns + ------- + x : ndarray + 1-D ndarray containing the sample points. + y : ndarray + 1-D ndarray containing the weights. + + Notes + ----- + The results have only been tested up to degree 100, higher degrees may + be problematic. For Gauss-Chebyshev there are closed form solutions for + the sample points and weights. If n = `deg`, then + + .. math:: x_i = \\cos(\\pi (2 i - 1) / (2 n)) + + .. math:: w_i = \\pi / n + + """ + ideg = pu._as_int(deg, "deg") + if ideg <= 0: + raise ValueError("deg must be a positive integer") + + x = np.cos(np.pi * np.arange(1, 2 * ideg, 2) / (2.0 * ideg)) + w = np.ones(ideg) * (np.pi / ideg) + + return x, w + + +def chebweight(x): + """ + The weight function of the Chebyshev polynomials. + + The weight function is :math:`1/\\sqrt{1 - x^2}` and the interval of + integration is :math:`[-1, 1]`. The Chebyshev polynomials are + orthogonal, but not normalized, with respect to this weight function. + + Parameters + ---------- + x : array_like + Values at which the weight function will be computed. + + Returns + ------- + w : ndarray + The weight function at `x`. + """ + w = 1. / (np.sqrt(1. + x) * np.sqrt(1. - x)) + return w + + +def chebpts1(npts): + """ + Chebyshev points of the first kind. + + The Chebyshev points of the first kind are the points ``cos(x)``, + where ``x = [pi*(k + .5)/npts for k in range(npts)]``. + + Parameters + ---------- + npts : int + Number of sample points desired. + + Returns + ------- + pts : ndarray + The Chebyshev points of the first kind. + + See Also + -------- + chebpts2 + """ + _npts = int(npts) + if _npts != npts: + raise ValueError("npts must be integer") + if _npts < 1: + raise ValueError("npts must be >= 1") + + x = 0.5 * np.pi / _npts * np.arange(-_npts + 1, _npts + 1, 2) + return np.sin(x) + + +def chebpts2(npts): + """ + Chebyshev points of the second kind. + + The Chebyshev points of the second kind are the points ``cos(x)``, + where ``x = [pi*k/(npts - 1) for k in range(npts)]`` sorted in ascending + order. + + Parameters + ---------- + npts : int + Number of sample points desired. + + Returns + ------- + pts : ndarray + The Chebyshev points of the second kind. + """ + _npts = int(npts) + if _npts != npts: + raise ValueError("npts must be integer") + if _npts < 2: + raise ValueError("npts must be >= 2") + + x = np.linspace(-np.pi, 0, _npts) + return np.cos(x) + + +# +# Chebyshev series class +# + +class Chebyshev(ABCPolyBase): + """A Chebyshev series class. + + The Chebyshev class provides the standard Python numerical methods + '+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the + attributes and methods listed below. + + Parameters + ---------- + coef : array_like + Chebyshev coefficients in order of increasing degree, i.e., + ``(1, 2, 3)`` gives ``1*T_0(x) + 2*T_1(x) + 3*T_2(x)``. + domain : (2,) array_like, optional + Domain to use. The interval ``[domain[0], domain[1]]`` is mapped + to the interval ``[window[0], window[1]]`` by shifting and scaling. + The default value is [-1., 1.]. + window : (2,) array_like, optional + Window, see `domain` for its use. The default value is [-1., 1.]. + symbol : str, optional + Symbol used to represent the independent variable in string + representations of the polynomial expression, e.g. for printing. + The symbol must be a valid Python identifier. Default value is 'x'. + + .. versionadded:: 1.24 + + """ + # Virtual Functions + _add = staticmethod(chebadd) + _sub = staticmethod(chebsub) + _mul = staticmethod(chebmul) + _div = staticmethod(chebdiv) + _pow = staticmethod(chebpow) + _val = staticmethod(chebval) + _int = staticmethod(chebint) + _der = staticmethod(chebder) + _fit = staticmethod(chebfit) + _line = staticmethod(chebline) + _roots = staticmethod(chebroots) + _fromroots = staticmethod(chebfromroots) + + @classmethod + def interpolate(cls, func, deg, domain=None, args=()): + """Interpolate a function at the Chebyshev points of the first kind. + + Returns the series that interpolates `func` at the Chebyshev points of + the first kind scaled and shifted to the `domain`. The resulting series + tends to a minmax approximation of `func` when the function is + continuous in the domain. + + Parameters + ---------- + func : function + The function to be interpolated. It must be a function of a single + variable of the form ``f(x, a, b, c...)``, where ``a, b, c...`` are + extra arguments passed in the `args` parameter. + deg : int + Degree of the interpolating polynomial. + domain : {None, [beg, end]}, optional + Domain over which `func` is interpolated. The default is None, in + which case the domain is [-1, 1]. + args : tuple, optional + Extra arguments to be used in the function call. Default is no + extra arguments. + + Returns + ------- + polynomial : Chebyshev instance + Interpolating Chebyshev instance. + + Notes + ----- + See `numpy.polynomial.chebinterpolate` for more details. + + """ + if domain is None: + domain = cls.domain + xfunc = lambda x: func(pu.mapdomain(x, cls.window, domain), *args) + coef = chebinterpolate(xfunc, deg) + return cls(coef, domain=domain) + + # Virtual properties + domain = np.array(chebdomain) + window = np.array(chebdomain) + basis_name = 'T' diff --git a/.venv/lib/python3.12/site-packages/numpy/polynomial/chebyshev.pyi b/.venv/lib/python3.12/site-packages/numpy/polynomial/chebyshev.pyi new file mode 100644 index 0000000..ec342df --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/polynomial/chebyshev.pyi @@ -0,0 +1,181 @@ +from collections.abc import Callable, Iterable +from typing import Any, Concatenate, Final, Self, TypeVar, overload +from typing import Literal as L + +import numpy as np +import numpy.typing as npt +from numpy._typing import _IntLike_co + +from ._polybase import ABCPolyBase +from ._polytypes import ( + _Array1, + _Array2, + _CoefSeries, + _FuncBinOp, + _FuncCompanion, + _FuncDer, + _FuncFit, + _FuncFromRoots, + _FuncGauss, + _FuncInteg, + _FuncLine, + _FuncPoly2Ortho, + _FuncPow, + _FuncPts, + _FuncRoots, + _FuncUnOp, + _FuncVal, + _FuncVal2D, + _FuncVal3D, + _FuncValFromRoots, + _FuncVander, + _FuncVander2D, + _FuncVander3D, + _FuncWeight, + _Series, + _SeriesLikeCoef_co, +) +from .polyutils import trimcoef as chebtrim + +__all__ = [ + "chebzero", + "chebone", + "chebx", + "chebdomain", + "chebline", + "chebadd", + "chebsub", + "chebmulx", + "chebmul", + "chebdiv", + "chebpow", + "chebval", + "chebder", + "chebint", + "cheb2poly", + "poly2cheb", + "chebfromroots", + "chebvander", + "chebfit", + "chebtrim", + "chebroots", + "chebpts1", + "chebpts2", + "Chebyshev", + "chebval2d", + "chebval3d", + "chebgrid2d", + "chebgrid3d", + "chebvander2d", + "chebvander3d", + "chebcompanion", + "chebgauss", + "chebweight", + "chebinterpolate", +] + +_NumberOrObjectT = TypeVar("_NumberOrObjectT", bound=np.number | np.object_) +def _cseries_to_zseries(c: npt.NDArray[_NumberOrObjectT]) -> _Series[_NumberOrObjectT]: ... +def _zseries_to_cseries(zs: npt.NDArray[_NumberOrObjectT]) -> _Series[_NumberOrObjectT]: ... +def _zseries_mul( + z1: npt.NDArray[_NumberOrObjectT], + z2: npt.NDArray[_NumberOrObjectT], +) -> _Series[_NumberOrObjectT]: ... +def _zseries_div( + z1: npt.NDArray[_NumberOrObjectT], + z2: npt.NDArray[_NumberOrObjectT], +) -> _Series[_NumberOrObjectT]: ... +def _zseries_der(zs: npt.NDArray[_NumberOrObjectT]) -> _Series[_NumberOrObjectT]: ... +def _zseries_int(zs: npt.NDArray[_NumberOrObjectT]) -> _Series[_NumberOrObjectT]: ... + +poly2cheb: _FuncPoly2Ortho[L["poly2cheb"]] +cheb2poly: _FuncUnOp[L["cheb2poly"]] + +chebdomain: Final[_Array2[np.float64]] +chebzero: Final[_Array1[np.int_]] +chebone: Final[_Array1[np.int_]] +chebx: Final[_Array2[np.int_]] + +chebline: _FuncLine[L["chebline"]] +chebfromroots: _FuncFromRoots[L["chebfromroots"]] +chebadd: _FuncBinOp[L["chebadd"]] +chebsub: _FuncBinOp[L["chebsub"]] +chebmulx: _FuncUnOp[L["chebmulx"]] +chebmul: _FuncBinOp[L["chebmul"]] +chebdiv: _FuncBinOp[L["chebdiv"]] +chebpow: _FuncPow[L["chebpow"]] +chebder: _FuncDer[L["chebder"]] +chebint: _FuncInteg[L["chebint"]] +chebval: _FuncVal[L["chebval"]] +chebval2d: _FuncVal2D[L["chebval2d"]] +chebval3d: _FuncVal3D[L["chebval3d"]] +chebvalfromroots: _FuncValFromRoots[L["chebvalfromroots"]] +chebgrid2d: _FuncVal2D[L["chebgrid2d"]] +chebgrid3d: _FuncVal3D[L["chebgrid3d"]] +chebvander: _FuncVander[L["chebvander"]] +chebvander2d: _FuncVander2D[L["chebvander2d"]] +chebvander3d: _FuncVander3D[L["chebvander3d"]] +chebfit: _FuncFit[L["chebfit"]] +chebcompanion: _FuncCompanion[L["chebcompanion"]] +chebroots: _FuncRoots[L["chebroots"]] +chebgauss: _FuncGauss[L["chebgauss"]] +chebweight: _FuncWeight[L["chebweight"]] +chebpts1: _FuncPts[L["chebpts1"]] +chebpts2: _FuncPts[L["chebpts2"]] + +# keep in sync with `Chebyshev.interpolate` +_RT = TypeVar("_RT", bound=np.number | np.bool | np.object_) +@overload +def chebinterpolate( + func: np.ufunc, + deg: _IntLike_co, + args: tuple[()] = ..., +) -> npt.NDArray[np.float64 | np.complex128 | np.object_]: ... +@overload +def chebinterpolate( + func: Callable[[npt.NDArray[np.float64]], _RT], + deg: _IntLike_co, + args: tuple[()] = ..., +) -> npt.NDArray[_RT]: ... +@overload +def chebinterpolate( + func: Callable[Concatenate[npt.NDArray[np.float64], ...], _RT], + deg: _IntLike_co, + args: Iterable[Any], +) -> npt.NDArray[_RT]: ... + +class Chebyshev(ABCPolyBase[L["T"]]): + @overload + @classmethod + def interpolate( + cls, + func: Callable[[npt.NDArray[np.float64]], _CoefSeries], + deg: _IntLike_co, + domain: _SeriesLikeCoef_co | None = ..., + args: tuple[()] = ..., + ) -> Self: ... + @overload + @classmethod + def interpolate( + cls, + func: Callable[ + Concatenate[npt.NDArray[np.float64], ...], + _CoefSeries, + ], + deg: _IntLike_co, + domain: _SeriesLikeCoef_co | None = ..., + *, + args: Iterable[Any], + ) -> Self: ... + @overload + @classmethod + def interpolate( + cls, + func: Callable[ + Concatenate[npt.NDArray[np.float64], ...], + _CoefSeries, + ], + deg: _IntLike_co, + domain: _SeriesLikeCoef_co | None, + args: Iterable[Any], + ) -> Self: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/polynomial/hermite.py b/.venv/lib/python3.12/site-packages/numpy/polynomial/hermite.py new file mode 100644 index 0000000..47e1dfc --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/polynomial/hermite.py @@ -0,0 +1,1740 @@ +""" +============================================================== +Hermite Series, "Physicists" (:mod:`numpy.polynomial.hermite`) +============================================================== + +This module provides a number of objects (mostly functions) useful for +dealing with Hermite series, including a `Hermite` class that +encapsulates the usual arithmetic operations. (General information +on how this module represents and works with such polynomials is in the +docstring for its "parent" sub-package, `numpy.polynomial`). + +Classes +------- +.. autosummary:: + :toctree: generated/ + + Hermite + +Constants +--------- +.. autosummary:: + :toctree: generated/ + + hermdomain + hermzero + hermone + hermx + +Arithmetic +---------- +.. autosummary:: + :toctree: generated/ + + hermadd + hermsub + hermmulx + hermmul + hermdiv + hermpow + hermval + hermval2d + hermval3d + hermgrid2d + hermgrid3d + +Calculus +-------- +.. autosummary:: + :toctree: generated/ + + hermder + hermint + +Misc Functions +-------------- +.. autosummary:: + :toctree: generated/ + + hermfromroots + hermroots + hermvander + hermvander2d + hermvander3d + hermgauss + hermweight + hermcompanion + hermfit + hermtrim + hermline + herm2poly + poly2herm + +See also +-------- +`numpy.polynomial` + +""" +import numpy as np +import numpy.linalg as la +from numpy.lib.array_utils import normalize_axis_index + +from . import polyutils as pu +from ._polybase import ABCPolyBase + +__all__ = [ + 'hermzero', 'hermone', 'hermx', 'hermdomain', 'hermline', 'hermadd', + 'hermsub', 'hermmulx', 'hermmul', 'hermdiv', 'hermpow', 'hermval', + 'hermder', 'hermint', 'herm2poly', 'poly2herm', 'hermfromroots', + 'hermvander', 'hermfit', 'hermtrim', 'hermroots', 'Hermite', + 'hermval2d', 'hermval3d', 'hermgrid2d', 'hermgrid3d', 'hermvander2d', + 'hermvander3d', 'hermcompanion', 'hermgauss', 'hermweight'] + +hermtrim = pu.trimcoef + + +def poly2herm(pol): + """ + poly2herm(pol) + + Convert a polynomial to a Hermite series. + + Convert an array representing the coefficients of a polynomial (relative + to the "standard" basis) ordered from lowest degree to highest, to an + array of the coefficients of the equivalent Hermite series, ordered + from lowest to highest degree. + + Parameters + ---------- + pol : array_like + 1-D array containing the polynomial coefficients + + Returns + ------- + c : ndarray + 1-D array containing the coefficients of the equivalent Hermite + series. + + See Also + -------- + herm2poly + + Notes + ----- + The easy way to do conversions between polynomial basis sets + is to use the convert method of a class instance. + + Examples + -------- + >>> from numpy.polynomial.hermite import poly2herm + >>> poly2herm(np.arange(4)) + array([1. , 2.75 , 0.5 , 0.375]) + + """ + [pol] = pu.as_series([pol]) + deg = len(pol) - 1 + res = 0 + for i in range(deg, -1, -1): + res = hermadd(hermmulx(res), pol[i]) + return res + + +def herm2poly(c): + """ + Convert a Hermite series to a polynomial. + + Convert an array representing the coefficients of a Hermite series, + ordered from lowest degree to highest, to an array of the coefficients + of the equivalent polynomial (relative to the "standard" basis) ordered + from lowest to highest degree. + + Parameters + ---------- + c : array_like + 1-D array containing the Hermite series coefficients, ordered + from lowest order term to highest. + + Returns + ------- + pol : ndarray + 1-D array containing the coefficients of the equivalent polynomial + (relative to the "standard" basis) ordered from lowest order term + to highest. + + See Also + -------- + poly2herm + + Notes + ----- + The easy way to do conversions between polynomial basis sets + is to use the convert method of a class instance. + + Examples + -------- + >>> from numpy.polynomial.hermite import herm2poly + >>> herm2poly([ 1. , 2.75 , 0.5 , 0.375]) + array([0., 1., 2., 3.]) + + """ + from .polynomial import polyadd, polymulx, polysub + + [c] = pu.as_series([c]) + n = len(c) + if n == 1: + return c + if n == 2: + c[1] *= 2 + return c + else: + c0 = c[-2] + c1 = c[-1] + # i is the current degree of c1 + for i in range(n - 1, 1, -1): + tmp = c0 + c0 = polysub(c[i - 2], c1 * (2 * (i - 1))) + c1 = polyadd(tmp, polymulx(c1) * 2) + return polyadd(c0, polymulx(c1) * 2) + + +# +# These are constant arrays are of integer type so as to be compatible +# with the widest range of other types, such as Decimal. +# + +# Hermite +hermdomain = np.array([-1., 1.]) + +# Hermite coefficients representing zero. +hermzero = np.array([0]) + +# Hermite coefficients representing one. +hermone = np.array([1]) + +# Hermite coefficients representing the identity x. +hermx = np.array([0, 1 / 2]) + + +def hermline(off, scl): + """ + Hermite series whose graph is a straight line. + + + + Parameters + ---------- + off, scl : scalars + The specified line is given by ``off + scl*x``. + + Returns + ------- + y : ndarray + This module's representation of the Hermite series for + ``off + scl*x``. + + See Also + -------- + numpy.polynomial.polynomial.polyline + numpy.polynomial.chebyshev.chebline + numpy.polynomial.legendre.legline + numpy.polynomial.laguerre.lagline + numpy.polynomial.hermite_e.hermeline + + Examples + -------- + >>> from numpy.polynomial.hermite import hermline, hermval + >>> hermval(0,hermline(3, 2)) + 3.0 + >>> hermval(1,hermline(3, 2)) + 5.0 + + """ + if scl != 0: + return np.array([off, scl / 2]) + else: + return np.array([off]) + + +def hermfromroots(roots): + """ + Generate a Hermite series with given roots. + + The function returns the coefficients of the polynomial + + .. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n), + + in Hermite form, where the :math:`r_n` are the roots specified in `roots`. + If a zero has multiplicity n, then it must appear in `roots` n times. + For instance, if 2 is a root of multiplicity three and 3 is a root of + multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The + roots can appear in any order. + + If the returned coefficients are `c`, then + + .. math:: p(x) = c_0 + c_1 * H_1(x) + ... + c_n * H_n(x) + + The coefficient of the last term is not generally 1 for monic + polynomials in Hermite form. + + Parameters + ---------- + roots : array_like + Sequence containing the roots. + + Returns + ------- + out : ndarray + 1-D array of coefficients. If all roots are real then `out` is a + real array, if some of the roots are complex, then `out` is complex + even if all the coefficients in the result are real (see Examples + below). + + See Also + -------- + numpy.polynomial.polynomial.polyfromroots + numpy.polynomial.legendre.legfromroots + numpy.polynomial.laguerre.lagfromroots + numpy.polynomial.chebyshev.chebfromroots + numpy.polynomial.hermite_e.hermefromroots + + Examples + -------- + >>> from numpy.polynomial.hermite import hermfromroots, hermval + >>> coef = hermfromroots((-1, 0, 1)) + >>> hermval((-1, 0, 1), coef) + array([0., 0., 0.]) + >>> coef = hermfromroots((-1j, 1j)) + >>> hermval((-1j, 1j), coef) + array([0.+0.j, 0.+0.j]) + + """ + return pu._fromroots(hermline, hermmul, roots) + + +def hermadd(c1, c2): + """ + Add one Hermite series to another. + + Returns the sum of two Hermite series `c1` + `c2`. The arguments + are sequences of coefficients ordered from lowest order term to + highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Hermite series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Array representing the Hermite series of their sum. + + See Also + -------- + hermsub, hermmulx, hermmul, hermdiv, hermpow + + Notes + ----- + Unlike multiplication, division, etc., the sum of two Hermite series + is a Hermite series (without having to "reproject" the result onto + the basis set) so addition, just like that of "standard" polynomials, + is simply "component-wise." + + Examples + -------- + >>> from numpy.polynomial.hermite import hermadd + >>> hermadd([1, 2, 3], [1, 2, 3, 4]) + array([2., 4., 6., 4.]) + + """ + return pu._add(c1, c2) + + +def hermsub(c1, c2): + """ + Subtract one Hermite series from another. + + Returns the difference of two Hermite series `c1` - `c2`. The + sequences of coefficients are from lowest order term to highest, i.e., + [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Hermite series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Of Hermite series coefficients representing their difference. + + See Also + -------- + hermadd, hermmulx, hermmul, hermdiv, hermpow + + Notes + ----- + Unlike multiplication, division, etc., the difference of two Hermite + series is a Hermite series (without having to "reproject" the result + onto the basis set) so subtraction, just like that of "standard" + polynomials, is simply "component-wise." + + Examples + -------- + >>> from numpy.polynomial.hermite import hermsub + >>> hermsub([1, 2, 3, 4], [1, 2, 3]) + array([0., 0., 0., 4.]) + + """ + return pu._sub(c1, c2) + + +def hermmulx(c): + """Multiply a Hermite series by x. + + Multiply the Hermite series `c` by x, where x is the independent + variable. + + + Parameters + ---------- + c : array_like + 1-D array of Hermite series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Array representing the result of the multiplication. + + See Also + -------- + hermadd, hermsub, hermmul, hermdiv, hermpow + + Notes + ----- + The multiplication uses the recursion relationship for Hermite + polynomials in the form + + .. math:: + + xP_i(x) = (P_{i + 1}(x)/2 + i*P_{i - 1}(x)) + + Examples + -------- + >>> from numpy.polynomial.hermite import hermmulx + >>> hermmulx([1, 2, 3]) + array([2. , 6.5, 1. , 1.5]) + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + # The zero series needs special treatment + if len(c) == 1 and c[0] == 0: + return c + + prd = np.empty(len(c) + 1, dtype=c.dtype) + prd[0] = c[0] * 0 + prd[1] = c[0] / 2 + for i in range(1, len(c)): + prd[i + 1] = c[i] / 2 + prd[i - 1] += c[i] * i + return prd + + +def hermmul(c1, c2): + """ + Multiply one Hermite series by another. + + Returns the product of two Hermite series `c1` * `c2`. The arguments + are sequences of coefficients, from lowest order "term" to highest, + e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Hermite series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Of Hermite series coefficients representing their product. + + See Also + -------- + hermadd, hermsub, hermmulx, hermdiv, hermpow + + Notes + ----- + In general, the (polynomial) product of two C-series results in terms + that are not in the Hermite polynomial basis set. Thus, to express + the product as a Hermite series, it is necessary to "reproject" the + product onto said basis set, which may produce "unintuitive" (but + correct) results; see Examples section below. + + Examples + -------- + >>> from numpy.polynomial.hermite import hermmul + >>> hermmul([1, 2, 3], [0, 1, 2]) + array([52., 29., 52., 7., 6.]) + + """ + # s1, s2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + + if len(c1) > len(c2): + c = c2 + xs = c1 + else: + c = c1 + xs = c2 + + if len(c) == 1: + c0 = c[0] * xs + c1 = 0 + elif len(c) == 2: + c0 = c[0] * xs + c1 = c[1] * xs + else: + nd = len(c) + c0 = c[-2] * xs + c1 = c[-1] * xs + for i in range(3, len(c) + 1): + tmp = c0 + nd = nd - 1 + c0 = hermsub(c[-i] * xs, c1 * (2 * (nd - 1))) + c1 = hermadd(tmp, hermmulx(c1) * 2) + return hermadd(c0, hermmulx(c1) * 2) + + +def hermdiv(c1, c2): + """ + Divide one Hermite series by another. + + Returns the quotient-with-remainder of two Hermite series + `c1` / `c2`. The arguments are sequences of coefficients from lowest + order "term" to highest, e.g., [1,2,3] represents the series + ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Hermite series coefficients ordered from low to + high. + + Returns + ------- + [quo, rem] : ndarrays + Of Hermite series coefficients representing the quotient and + remainder. + + See Also + -------- + hermadd, hermsub, hermmulx, hermmul, hermpow + + Notes + ----- + In general, the (polynomial) division of one Hermite series by another + results in quotient and remainder terms that are not in the Hermite + polynomial basis set. Thus, to express these results as a Hermite + series, it is necessary to "reproject" the results onto the Hermite + basis set, which may produce "unintuitive" (but correct) results; see + Examples section below. + + Examples + -------- + >>> from numpy.polynomial.hermite import hermdiv + >>> hermdiv([ 52., 29., 52., 7., 6.], [0, 1, 2]) + (array([1., 2., 3.]), array([0.])) + >>> hermdiv([ 54., 31., 52., 7., 6.], [0, 1, 2]) + (array([1., 2., 3.]), array([2., 2.])) + >>> hermdiv([ 53., 30., 52., 7., 6.], [0, 1, 2]) + (array([1., 2., 3.]), array([1., 1.])) + + """ + return pu._div(hermmul, c1, c2) + + +def hermpow(c, pow, maxpower=16): + """Raise a Hermite series to a power. + + Returns the Hermite series `c` raised to the power `pow`. The + argument `c` is a sequence of coefficients ordered from low to high. + i.e., [1,2,3] is the series ``P_0 + 2*P_1 + 3*P_2.`` + + Parameters + ---------- + c : array_like + 1-D array of Hermite series coefficients ordered from low to + high. + pow : integer + Power to which the series will be raised + maxpower : integer, optional + Maximum power allowed. This is mainly to limit growth of the series + to unmanageable size. Default is 16 + + Returns + ------- + coef : ndarray + Hermite series of power. + + See Also + -------- + hermadd, hermsub, hermmulx, hermmul, hermdiv + + Examples + -------- + >>> from numpy.polynomial.hermite import hermpow + >>> hermpow([1, 2, 3], 2) + array([81., 52., 82., 12., 9.]) + + """ + return pu._pow(hermmul, c, pow, maxpower) + + +def hermder(c, m=1, scl=1, axis=0): + """ + Differentiate a Hermite series. + + Returns the Hermite series coefficients `c` differentiated `m` times + along `axis`. At each iteration the result is multiplied by `scl` (the + scaling factor is for use in a linear change of variable). The argument + `c` is an array of coefficients from low to high degree along each + axis, e.g., [1,2,3] represents the series ``1*H_0 + 2*H_1 + 3*H_2`` + while [[1,2],[1,2]] represents ``1*H_0(x)*H_0(y) + 1*H_1(x)*H_0(y) + + 2*H_0(x)*H_1(y) + 2*H_1(x)*H_1(y)`` if axis=0 is ``x`` and axis=1 is + ``y``. + + Parameters + ---------- + c : array_like + Array of Hermite series coefficients. If `c` is multidimensional the + different axis correspond to different variables with the degree in + each axis given by the corresponding index. + m : int, optional + Number of derivatives taken, must be non-negative. (Default: 1) + scl : scalar, optional + Each differentiation is multiplied by `scl`. The end result is + multiplication by ``scl**m``. This is for use in a linear change of + variable. (Default: 1) + axis : int, optional + Axis over which the derivative is taken. (Default: 0). + + Returns + ------- + der : ndarray + Hermite series of the derivative. + + See Also + -------- + hermint + + Notes + ----- + In general, the result of differentiating a Hermite series does not + resemble the same operation on a power series. Thus the result of this + function may be "unintuitive," albeit correct; see Examples section + below. + + Examples + -------- + >>> from numpy.polynomial.hermite import hermder + >>> hermder([ 1. , 0.5, 0.5, 0.5]) + array([1., 2., 3.]) + >>> hermder([-0.5, 1./2., 1./8., 1./12., 1./16.], m=2) + array([1., 2., 3.]) + + """ + c = np.array(c, ndmin=1, copy=True) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + cnt = pu._as_int(m, "the order of derivation") + iaxis = pu._as_int(axis, "the axis") + if cnt < 0: + raise ValueError("The order of derivation must be non-negative") + iaxis = normalize_axis_index(iaxis, c.ndim) + + if cnt == 0: + return c + + c = np.moveaxis(c, iaxis, 0) + n = len(c) + if cnt >= n: + c = c[:1] * 0 + else: + for i in range(cnt): + n = n - 1 + c *= scl + der = np.empty((n,) + c.shape[1:], dtype=c.dtype) + for j in range(n, 0, -1): + der[j - 1] = (2 * j) * c[j] + c = der + c = np.moveaxis(c, 0, iaxis) + return c + + +def hermint(c, m=1, k=[], lbnd=0, scl=1, axis=0): + """ + Integrate a Hermite series. + + Returns the Hermite series coefficients `c` integrated `m` times from + `lbnd` along `axis`. At each iteration the resulting series is + **multiplied** by `scl` and an integration constant, `k`, is added. + The scaling factor is for use in a linear change of variable. ("Buyer + beware": note that, depending on what one is doing, one may want `scl` + to be the reciprocal of what one might expect; for more information, + see the Notes section below.) The argument `c` is an array of + coefficients from low to high degree along each axis, e.g., [1,2,3] + represents the series ``H_0 + 2*H_1 + 3*H_2`` while [[1,2],[1,2]] + represents ``1*H_0(x)*H_0(y) + 1*H_1(x)*H_0(y) + 2*H_0(x)*H_1(y) + + 2*H_1(x)*H_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``. + + Parameters + ---------- + c : array_like + Array of Hermite series coefficients. If c is multidimensional the + different axis correspond to different variables with the degree in + each axis given by the corresponding index. + m : int, optional + Order of integration, must be positive. (Default: 1) + k : {[], list, scalar}, optional + Integration constant(s). The value of the first integral at + ``lbnd`` is the first value in the list, the value of the second + integral at ``lbnd`` is the second value, etc. If ``k == []`` (the + default), all constants are set to zero. If ``m == 1``, a single + scalar can be given instead of a list. + lbnd : scalar, optional + The lower bound of the integral. (Default: 0) + scl : scalar, optional + Following each integration the result is *multiplied* by `scl` + before the integration constant is added. (Default: 1) + axis : int, optional + Axis over which the integral is taken. (Default: 0). + + Returns + ------- + S : ndarray + Hermite series coefficients of the integral. + + Raises + ------ + ValueError + If ``m < 0``, ``len(k) > m``, ``np.ndim(lbnd) != 0``, or + ``np.ndim(scl) != 0``. + + See Also + -------- + hermder + + Notes + ----- + Note that the result of each integration is *multiplied* by `scl`. + Why is this important to note? Say one is making a linear change of + variable :math:`u = ax + b` in an integral relative to `x`. Then + :math:`dx = du/a`, so one will need to set `scl` equal to + :math:`1/a` - perhaps not what one would have first thought. + + Also note that, in general, the result of integrating a C-series needs + to be "reprojected" onto the C-series basis set. Thus, typically, + the result of this function is "unintuitive," albeit correct; see + Examples section below. + + Examples + -------- + >>> from numpy.polynomial.hermite import hermint + >>> hermint([1,2,3]) # integrate once, value 0 at 0. + array([1. , 0.5, 0.5, 0.5]) + >>> hermint([1,2,3], m=2) # integrate twice, value & deriv 0 at 0 + array([-0.5 , 0.5 , 0.125 , 0.08333333, 0.0625 ]) # may vary + >>> hermint([1,2,3], k=1) # integrate once, value 1 at 0. + array([2. , 0.5, 0.5, 0.5]) + >>> hermint([1,2,3], lbnd=-1) # integrate once, value 0 at -1 + array([-2. , 0.5, 0.5, 0.5]) + >>> hermint([1,2,3], m=2, k=[1,2], lbnd=-1) + array([ 1.66666667, -0.5 , 0.125 , 0.08333333, 0.0625 ]) # may vary + + """ + c = np.array(c, ndmin=1, copy=True) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + if not np.iterable(k): + k = [k] + cnt = pu._as_int(m, "the order of integration") + iaxis = pu._as_int(axis, "the axis") + if cnt < 0: + raise ValueError("The order of integration must be non-negative") + if len(k) > cnt: + raise ValueError("Too many integration constants") + if np.ndim(lbnd) != 0: + raise ValueError("lbnd must be a scalar.") + if np.ndim(scl) != 0: + raise ValueError("scl must be a scalar.") + iaxis = normalize_axis_index(iaxis, c.ndim) + + if cnt == 0: + return c + + c = np.moveaxis(c, iaxis, 0) + k = list(k) + [0] * (cnt - len(k)) + for i in range(cnt): + n = len(c) + c *= scl + if n == 1 and np.all(c[0] == 0): + c[0] += k[i] + else: + tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype) + tmp[0] = c[0] * 0 + tmp[1] = c[0] / 2 + for j in range(1, n): + tmp[j + 1] = c[j] / (2 * (j + 1)) + tmp[0] += k[i] - hermval(lbnd, tmp) + c = tmp + c = np.moveaxis(c, 0, iaxis) + return c + + +def hermval(x, c, tensor=True): + """ + Evaluate an Hermite series at points x. + + If `c` is of length ``n + 1``, this function returns the value: + + .. math:: p(x) = c_0 * H_0(x) + c_1 * H_1(x) + ... + c_n * H_n(x) + + The parameter `x` is converted to an array only if it is a tuple or a + list, otherwise it is treated as a scalar. In either case, either `x` + or its elements must support multiplication and addition both with + themselves and with the elements of `c`. + + If `c` is a 1-D array, then ``p(x)`` will have the same shape as `x`. If + `c` is multidimensional, then the shape of the result depends on the + value of `tensor`. If `tensor` is true the shape will be c.shape[1:] + + x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that + scalars have shape (,). + + Trailing zeros in the coefficients will be used in the evaluation, so + they should be avoided if efficiency is a concern. + + Parameters + ---------- + x : array_like, compatible object + If `x` is a list or tuple, it is converted to an ndarray, otherwise + it is left unchanged and treated as a scalar. In either case, `x` + or its elements must support addition and multiplication with + themselves and with the elements of `c`. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree n are contained in c[n]. If `c` is multidimensional the + remaining indices enumerate multiple polynomials. In the two + dimensional case the coefficients may be thought of as stored in + the columns of `c`. + tensor : boolean, optional + If True, the shape of the coefficient array is extended with ones + on the right, one for each dimension of `x`. Scalars have dimension 0 + for this action. The result is that every column of coefficients in + `c` is evaluated for every element of `x`. If False, `x` is broadcast + over the columns of `c` for the evaluation. This keyword is useful + when `c` is multidimensional. The default value is True. + + Returns + ------- + values : ndarray, algebra_like + The shape of the return value is described above. + + See Also + -------- + hermval2d, hermgrid2d, hermval3d, hermgrid3d + + Notes + ----- + The evaluation uses Clenshaw recursion, aka synthetic division. + + Examples + -------- + >>> from numpy.polynomial.hermite import hermval + >>> coef = [1,2,3] + >>> hermval(1, coef) + 11.0 + >>> hermval([[1,2],[3,4]], coef) + array([[ 11., 51.], + [115., 203.]]) + + """ + c = np.array(c, ndmin=1, copy=None) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + if isinstance(x, (tuple, list)): + x = np.asarray(x) + if isinstance(x, np.ndarray) and tensor: + c = c.reshape(c.shape + (1,) * x.ndim) + + x2 = x * 2 + if len(c) == 1: + c0 = c[0] + c1 = 0 + elif len(c) == 2: + c0 = c[0] + c1 = c[1] + else: + nd = len(c) + c0 = c[-2] + c1 = c[-1] + for i in range(3, len(c) + 1): + tmp = c0 + nd = nd - 1 + c0 = c[-i] - c1 * (2 * (nd - 1)) + c1 = tmp + c1 * x2 + return c0 + c1 * x2 + + +def hermval2d(x, y, c): + """ + Evaluate a 2-D Hermite series at points (x, y). + + This function returns the values: + + .. math:: p(x,y) = \\sum_{i,j} c_{i,j} * H_i(x) * H_j(y) + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars and they + must have the same shape after conversion. In either case, either `x` + and `y` or their elements must support multiplication and addition both + with themselves and with the elements of `c`. + + If `c` is a 1-D array a one is implicitly appended to its shape to make + it 2-D. The shape of the result will be c.shape[2:] + x.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points ``(x, y)``, + where `x` and `y` must have the same shape. If `x` or `y` is a list + or tuple, it is first converted to an ndarray, otherwise it is left + unchanged and if it isn't an ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term + of multi-degree i,j is contained in ``c[i,j]``. If `c` has + dimension greater than two the remaining indices enumerate multiple + sets of coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points formed with + pairs of corresponding values from `x` and `y`. + + See Also + -------- + hermval, hermgrid2d, hermval3d, hermgrid3d + + Examples + -------- + >>> from numpy.polynomial.hermite import hermval2d + >>> x = [1, 2] + >>> y = [4, 5] + >>> c = [[1, 2, 3], [4, 5, 6]] + >>> hermval2d(x, y, c) + array([1035., 2883.]) + + """ + return pu._valnd(hermval, c, x, y) + + +def hermgrid2d(x, y, c): + """ + Evaluate a 2-D Hermite series on the Cartesian product of x and y. + + This function returns the values: + + .. math:: p(a,b) = \\sum_{i,j} c_{i,j} * H_i(a) * H_j(b) + + where the points ``(a, b)`` consist of all pairs formed by taking + `a` from `x` and `b` from `y`. The resulting points form a grid with + `x` in the first dimension and `y` in the second. + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars. In either + case, either `x` and `y` or their elements must support multiplication + and addition both with themselves and with the elements of `c`. + + If `c` has fewer than two dimensions, ones are implicitly appended to + its shape to make it 2-D. The shape of the result will be c.shape[2:] + + x.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points in the + Cartesian product of `x` and `y`. If `x` or `y` is a list or + tuple, it is first converted to an ndarray, otherwise it is left + unchanged and, if it isn't an ndarray, it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree i,j are contained in ``c[i,j]``. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points in the Cartesian + product of `x` and `y`. + + See Also + -------- + hermval, hermval2d, hermval3d, hermgrid3d + + Examples + -------- + >>> from numpy.polynomial.hermite import hermgrid2d + >>> x = [1, 2, 3] + >>> y = [4, 5] + >>> c = [[1, 2, 3], [4, 5, 6]] + >>> hermgrid2d(x, y, c) + array([[1035., 1599.], + [1867., 2883.], + [2699., 4167.]]) + + """ + return pu._gridnd(hermval, c, x, y) + + +def hermval3d(x, y, z, c): + """ + Evaluate a 3-D Hermite series at points (x, y, z). + + This function returns the values: + + .. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * H_i(x) * H_j(y) * H_k(z) + + The parameters `x`, `y`, and `z` are converted to arrays only if + they are tuples or a lists, otherwise they are treated as a scalars and + they must have the same shape after conversion. In either case, either + `x`, `y`, and `z` or their elements must support multiplication and + addition both with themselves and with the elements of `c`. + + If `c` has fewer than 3 dimensions, ones are implicitly appended to its + shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape. + + Parameters + ---------- + x, y, z : array_like, compatible object + The three dimensional series is evaluated at the points + ``(x, y, z)``, where `x`, `y`, and `z` must have the same shape. If + any of `x`, `y`, or `z` is a list or tuple, it is first converted + to an ndarray, otherwise it is left unchanged and if it isn't an + ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term of + multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension + greater than 3 the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the multidimensional polynomial on points formed with + triples of corresponding values from `x`, `y`, and `z`. + + See Also + -------- + hermval, hermval2d, hermgrid2d, hermgrid3d + + Examples + -------- + >>> from numpy.polynomial.hermite import hermval3d + >>> x = [1, 2] + >>> y = [4, 5] + >>> z = [6, 7] + >>> c = [[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]] + >>> hermval3d(x, y, z, c) + array([ 40077., 120131.]) + + """ + return pu._valnd(hermval, c, x, y, z) + + +def hermgrid3d(x, y, z, c): + """ + Evaluate a 3-D Hermite series on the Cartesian product of x, y, and z. + + This function returns the values: + + .. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * H_i(a) * H_j(b) * H_k(c) + + where the points ``(a, b, c)`` consist of all triples formed by taking + `a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form + a grid with `x` in the first dimension, `y` in the second, and `z` in + the third. + + The parameters `x`, `y`, and `z` are converted to arrays only if they + are tuples or a lists, otherwise they are treated as a scalars. In + either case, either `x`, `y`, and `z` or their elements must support + multiplication and addition both with themselves and with the elements + of `c`. + + If `c` has fewer than three dimensions, ones are implicitly appended to + its shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape + y.shape + z.shape. + + Parameters + ---------- + x, y, z : array_like, compatible objects + The three dimensional series is evaluated at the points in the + Cartesian product of `x`, `y`, and `z`. If `x`, `y`, or `z` is a + list or tuple, it is first converted to an ndarray, otherwise it is + left unchanged and, if it isn't an ndarray, it is treated as a + scalar. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree i,j are contained in ``c[i,j]``. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points in the Cartesian + product of `x` and `y`. + + See Also + -------- + hermval, hermval2d, hermgrid2d, hermval3d + + Examples + -------- + >>> from numpy.polynomial.hermite import hermgrid3d + >>> x = [1, 2] + >>> y = [4, 5] + >>> z = [6, 7] + >>> c = [[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]] + >>> hermgrid3d(x, y, z, c) + array([[[ 40077., 54117.], + [ 49293., 66561.]], + [[ 72375., 97719.], + [ 88975., 120131.]]]) + + """ + return pu._gridnd(hermval, c, x, y, z) + + +def hermvander(x, deg): + """Pseudo-Vandermonde matrix of given degree. + + Returns the pseudo-Vandermonde matrix of degree `deg` and sample points + `x`. The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., i] = H_i(x), + + where ``0 <= i <= deg``. The leading indices of `V` index the elements of + `x` and the last index is the degree of the Hermite polynomial. + + If `c` is a 1-D array of coefficients of length ``n + 1`` and `V` is the + array ``V = hermvander(x, n)``, then ``np.dot(V, c)`` and + ``hermval(x, c)`` are the same up to roundoff. This equivalence is + useful both for least squares fitting and for the evaluation of a large + number of Hermite series of the same degree and sample points. + + Parameters + ---------- + x : array_like + Array of points. The dtype is converted to float64 or complex128 + depending on whether any of the elements are complex. If `x` is + scalar it is converted to a 1-D array. + deg : int + Degree of the resulting matrix. + + Returns + ------- + vander : ndarray + The pseudo-Vandermonde matrix. The shape of the returned matrix is + ``x.shape + (deg + 1,)``, where The last index is the degree of the + corresponding Hermite polynomial. The dtype will be the same as + the converted `x`. + + Examples + -------- + >>> import numpy as np + >>> from numpy.polynomial.hermite import hermvander + >>> x = np.array([-1, 0, 1]) + >>> hermvander(x, 3) + array([[ 1., -2., 2., 4.], + [ 1., 0., -2., -0.], + [ 1., 2., 2., -4.]]) + + """ + ideg = pu._as_int(deg, "deg") + if ideg < 0: + raise ValueError("deg must be non-negative") + + x = np.array(x, copy=None, ndmin=1) + 0.0 + dims = (ideg + 1,) + x.shape + dtyp = x.dtype + v = np.empty(dims, dtype=dtyp) + v[0] = x * 0 + 1 + if ideg > 0: + x2 = x * 2 + v[1] = x2 + for i in range(2, ideg + 1): + v[i] = (v[i - 1] * x2 - v[i - 2] * (2 * (i - 1))) + return np.moveaxis(v, 0, -1) + + +def hermvander2d(x, y, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points ``(x, y)``. The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., (deg[1] + 1)*i + j] = H_i(x) * H_j(y), + + where ``0 <= i <= deg[0]`` and ``0 <= j <= deg[1]``. The leading indices of + `V` index the points ``(x, y)`` and the last index encodes the degrees of + the Hermite polynomials. + + If ``V = hermvander2d(x, y, [xdeg, ydeg])``, then the columns of `V` + correspond to the elements of a 2-D coefficient array `c` of shape + (xdeg + 1, ydeg + 1) in the order + + .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ... + + and ``np.dot(V, c.flat)`` and ``hermval2d(x, y, c)`` will be the same + up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 2-D Hermite + series of the same degrees and sample points. + + Parameters + ---------- + x, y : array_like + Arrays of point coordinates, all of the same shape. The dtypes + will be converted to either float64 or complex128 depending on + whether any of the elements are complex. Scalars are converted to 1-D + arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg]. + + Returns + ------- + vander2d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg[1]+1)`. The dtype will be the same + as the converted `x` and `y`. + + See Also + -------- + hermvander, hermvander3d, hermval2d, hermval3d + + Examples + -------- + >>> import numpy as np + >>> from numpy.polynomial.hermite import hermvander2d + >>> x = np.array([-1, 0, 1]) + >>> y = np.array([-1, 0, 1]) + >>> hermvander2d(x, y, [2, 2]) + array([[ 1., -2., 2., -2., 4., -4., 2., -4., 4.], + [ 1., 0., -2., 0., 0., -0., -2., -0., 4.], + [ 1., 2., 2., 2., 4., 4., 2., 4., 4.]]) + + """ + return pu._vander_nd_flat((hermvander, hermvander), (x, y), deg) + + +def hermvander3d(x, y, z, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points ``(x, y, z)``. If `l`, `m`, `n` are the given degrees in `x`, `y`, `z`, + then The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = H_i(x)*H_j(y)*H_k(z), + + where ``0 <= i <= l``, ``0 <= j <= m``, and ``0 <= j <= n``. The leading + indices of `V` index the points ``(x, y, z)`` and the last index encodes + the degrees of the Hermite polynomials. + + If ``V = hermvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns + of `V` correspond to the elements of a 3-D coefficient array `c` of + shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order + + .. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},... + + and ``np.dot(V, c.flat)`` and ``hermval3d(x, y, z, c)`` will be the + same up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 3-D Hermite + series of the same degrees and sample points. + + Parameters + ---------- + x, y, z : array_like + Arrays of point coordinates, all of the same shape. The dtypes will + be converted to either float64 or complex128 depending on whether + any of the elements are complex. Scalars are converted to 1-D + arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg, z_deg]. + + Returns + ------- + vander3d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg[1]+1)*(deg[2]+1)`. The dtype will + be the same as the converted `x`, `y`, and `z`. + + See Also + -------- + hermvander, hermvander3d, hermval2d, hermval3d + + Examples + -------- + >>> from numpy.polynomial.hermite import hermvander3d + >>> x = np.array([-1, 0, 1]) + >>> y = np.array([-1, 0, 1]) + >>> z = np.array([-1, 0, 1]) + >>> hermvander3d(x, y, z, [0, 1, 2]) + array([[ 1., -2., 2., -2., 4., -4.], + [ 1., 0., -2., 0., 0., -0.], + [ 1., 2., 2., 2., 4., 4.]]) + + """ + return pu._vander_nd_flat((hermvander, hermvander, hermvander), (x, y, z), deg) + + +def hermfit(x, y, deg, rcond=None, full=False, w=None): + """ + Least squares fit of Hermite series to data. + + Return the coefficients of a Hermite series of degree `deg` that is the + least squares fit to the data values `y` given at points `x`. If `y` is + 1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple + fits are done, one for each column of `y`, and the resulting + coefficients are stored in the corresponding columns of a 2-D return. + The fitted polynomial(s) are in the form + + .. math:: p(x) = c_0 + c_1 * H_1(x) + ... + c_n * H_n(x), + + where `n` is `deg`. + + Parameters + ---------- + x : array_like, shape (M,) + x-coordinates of the M sample points ``(x[i], y[i])``. + y : array_like, shape (M,) or (M, K) + y-coordinates of the sample points. Several data sets of sample + points sharing the same x-coordinates can be fitted at once by + passing in a 2D-array that contains one dataset per column. + deg : int or 1-D array_like + Degree(s) of the fitting polynomials. If `deg` is a single integer + all terms up to and including the `deg`'th term are included in the + fit. For NumPy versions >= 1.11.0 a list of integers specifying the + degrees of the terms to include may be used instead. + rcond : float, optional + Relative condition number of the fit. Singular values smaller than + this relative to the largest singular value will be ignored. The + default value is len(x)*eps, where eps is the relative precision of + the float type, about 2e-16 in most cases. + full : bool, optional + Switch determining nature of return value. When it is False (the + default) just the coefficients are returned, when True diagnostic + information from the singular value decomposition is also returned. + w : array_like, shape (`M`,), optional + Weights. If not None, the weight ``w[i]`` applies to the unsquared + residual ``y[i] - y_hat[i]`` at ``x[i]``. Ideally the weights are + chosen so that the errors of the products ``w[i]*y[i]`` all have the + same variance. When using inverse-variance weighting, use + ``w[i] = 1/sigma(y[i])``. The default value is None. + + Returns + ------- + coef : ndarray, shape (M,) or (M, K) + Hermite coefficients ordered from low to high. If `y` was 2-D, + the coefficients for the data in column k of `y` are in column + `k`. + + [residuals, rank, singular_values, rcond] : list + These values are only returned if ``full == True`` + + - residuals -- sum of squared residuals of the least squares fit + - rank -- the numerical rank of the scaled Vandermonde matrix + - singular_values -- singular values of the scaled Vandermonde matrix + - rcond -- value of `rcond`. + + For more details, see `numpy.linalg.lstsq`. + + Warns + ----- + RankWarning + The rank of the coefficient matrix in the least-squares fit is + deficient. The warning is only raised if ``full == False``. The + warnings can be turned off by + + >>> import warnings + >>> warnings.simplefilter('ignore', np.exceptions.RankWarning) + + See Also + -------- + numpy.polynomial.chebyshev.chebfit + numpy.polynomial.legendre.legfit + numpy.polynomial.laguerre.lagfit + numpy.polynomial.polynomial.polyfit + numpy.polynomial.hermite_e.hermefit + hermval : Evaluates a Hermite series. + hermvander : Vandermonde matrix of Hermite series. + hermweight : Hermite weight function + numpy.linalg.lstsq : Computes a least-squares fit from the matrix. + scipy.interpolate.UnivariateSpline : Computes spline fits. + + Notes + ----- + The solution is the coefficients of the Hermite series `p` that + minimizes the sum of the weighted squared errors + + .. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2, + + where the :math:`w_j` are the weights. This problem is solved by + setting up the (typically) overdetermined matrix equation + + .. math:: V(x) * c = w * y, + + where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the + coefficients to be solved for, `w` are the weights, `y` are the + observed values. This equation is then solved using the singular value + decomposition of `V`. + + If some of the singular values of `V` are so small that they are + neglected, then a `~exceptions.RankWarning` will be issued. This means that + the coefficient values may be poorly determined. Using a lower order fit + will usually get rid of the warning. The `rcond` parameter can also be + set to a value smaller than its default, but the resulting fit may be + spurious and have large contributions from roundoff error. + + Fits using Hermite series are probably most useful when the data can be + approximated by ``sqrt(w(x)) * p(x)``, where ``w(x)`` is the Hermite + weight. In that case the weight ``sqrt(w(x[i]))`` should be used + together with data values ``y[i]/sqrt(w(x[i]))``. The weight function is + available as `hermweight`. + + References + ---------- + .. [1] Wikipedia, "Curve fitting", + https://en.wikipedia.org/wiki/Curve_fitting + + Examples + -------- + >>> import numpy as np + >>> from numpy.polynomial.hermite import hermfit, hermval + >>> x = np.linspace(-10, 10) + >>> rng = np.random.default_rng() + >>> err = rng.normal(scale=1./10, size=len(x)) + >>> y = hermval(x, [1, 2, 3]) + err + >>> hermfit(x, y, 2) + array([1.02294967, 2.00016403, 2.99994614]) # may vary + + """ + return pu._fit(hermvander, x, y, deg, rcond, full, w) + + +def hermcompanion(c): + """Return the scaled companion matrix of c. + + The basis polynomials are scaled so that the companion matrix is + symmetric when `c` is an Hermite basis polynomial. This provides + better eigenvalue estimates than the unscaled case and for basis + polynomials the eigenvalues are guaranteed to be real if + `numpy.linalg.eigvalsh` is used to obtain them. + + Parameters + ---------- + c : array_like + 1-D array of Hermite series coefficients ordered from low to high + degree. + + Returns + ------- + mat : ndarray + Scaled companion matrix of dimensions (deg, deg). + + Examples + -------- + >>> from numpy.polynomial.hermite import hermcompanion + >>> hermcompanion([1, 0, 1]) + array([[0. , 0.35355339], + [0.70710678, 0. ]]) + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) < 2: + raise ValueError('Series must have maximum degree of at least 1.') + if len(c) == 2: + return np.array([[-.5 * c[0] / c[1]]]) + + n = len(c) - 1 + mat = np.zeros((n, n), dtype=c.dtype) + scl = np.hstack((1., 1. / np.sqrt(2. * np.arange(n - 1, 0, -1)))) + scl = np.multiply.accumulate(scl)[::-1] + top = mat.reshape(-1)[1::n + 1] + bot = mat.reshape(-1)[n::n + 1] + top[...] = np.sqrt(.5 * np.arange(1, n)) + bot[...] = top + mat[:, -1] -= scl * c[:-1] / (2.0 * c[-1]) + return mat + + +def hermroots(c): + """ + Compute the roots of a Hermite series. + + Return the roots (a.k.a. "zeros") of the polynomial + + .. math:: p(x) = \\sum_i c[i] * H_i(x). + + Parameters + ---------- + c : 1-D array_like + 1-D array of coefficients. + + Returns + ------- + out : ndarray + Array of the roots of the series. If all the roots are real, + then `out` is also real, otherwise it is complex. + + See Also + -------- + numpy.polynomial.polynomial.polyroots + numpy.polynomial.legendre.legroots + numpy.polynomial.laguerre.lagroots + numpy.polynomial.chebyshev.chebroots + numpy.polynomial.hermite_e.hermeroots + + Notes + ----- + The root estimates are obtained as the eigenvalues of the companion + matrix, Roots far from the origin of the complex plane may have large + errors due to the numerical instability of the series for such + values. Roots with multiplicity greater than 1 will also show larger + errors as the value of the series near such points is relatively + insensitive to errors in the roots. Isolated roots near the origin can + be improved by a few iterations of Newton's method. + + The Hermite series basis polynomials aren't powers of `x` so the + results of this function may seem unintuitive. + + Examples + -------- + >>> from numpy.polynomial.hermite import hermroots, hermfromroots + >>> coef = hermfromroots([-1, 0, 1]) + >>> coef + array([0. , 0.25 , 0. , 0.125]) + >>> hermroots(coef) + array([-1.00000000e+00, -1.38777878e-17, 1.00000000e+00]) + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) <= 1: + return np.array([], dtype=c.dtype) + if len(c) == 2: + return np.array([-.5 * c[0] / c[1]]) + + # rotated companion matrix reduces error + m = hermcompanion(c)[::-1, ::-1] + r = la.eigvals(m) + r.sort() + return r + + +def _normed_hermite_n(x, n): + """ + Evaluate a normalized Hermite polynomial. + + Compute the value of the normalized Hermite polynomial of degree ``n`` + at the points ``x``. + + + Parameters + ---------- + x : ndarray of double. + Points at which to evaluate the function + n : int + Degree of the normalized Hermite function to be evaluated. + + Returns + ------- + values : ndarray + The shape of the return value is described above. + + Notes + ----- + This function is needed for finding the Gauss points and integration + weights for high degrees. The values of the standard Hermite functions + overflow when n >= 207. + + """ + if n == 0: + return np.full(x.shape, 1 / np.sqrt(np.sqrt(np.pi))) + + c0 = 0. + c1 = 1. / np.sqrt(np.sqrt(np.pi)) + nd = float(n) + for i in range(n - 1): + tmp = c0 + c0 = -c1 * np.sqrt((nd - 1.) / nd) + c1 = tmp + c1 * x * np.sqrt(2. / nd) + nd = nd - 1.0 + return c0 + c1 * x * np.sqrt(2) + + +def hermgauss(deg): + """ + Gauss-Hermite quadrature. + + Computes the sample points and weights for Gauss-Hermite quadrature. + These sample points and weights will correctly integrate polynomials of + degree :math:`2*deg - 1` or less over the interval :math:`[-\\inf, \\inf]` + with the weight function :math:`f(x) = \\exp(-x^2)`. + + Parameters + ---------- + deg : int + Number of sample points and weights. It must be >= 1. + + Returns + ------- + x : ndarray + 1-D ndarray containing the sample points. + y : ndarray + 1-D ndarray containing the weights. + + Notes + ----- + The results have only been tested up to degree 100, higher degrees may + be problematic. The weights are determined by using the fact that + + .. math:: w_k = c / (H'_n(x_k) * H_{n-1}(x_k)) + + where :math:`c` is a constant independent of :math:`k` and :math:`x_k` + is the k'th root of :math:`H_n`, and then scaling the results to get + the right value when integrating 1. + + Examples + -------- + >>> from numpy.polynomial.hermite import hermgauss + >>> hermgauss(2) + (array([-0.70710678, 0.70710678]), array([0.88622693, 0.88622693])) + + """ + ideg = pu._as_int(deg, "deg") + if ideg <= 0: + raise ValueError("deg must be a positive integer") + + # first approximation of roots. We use the fact that the companion + # matrix is symmetric in this case in order to obtain better zeros. + c = np.array([0] * deg + [1], dtype=np.float64) + m = hermcompanion(c) + x = la.eigvalsh(m) + + # improve roots by one application of Newton + dy = _normed_hermite_n(x, ideg) + df = _normed_hermite_n(x, ideg - 1) * np.sqrt(2 * ideg) + x -= dy / df + + # compute the weights. We scale the factor to avoid possible numerical + # overflow. + fm = _normed_hermite_n(x, ideg - 1) + fm /= np.abs(fm).max() + w = 1 / (fm * fm) + + # for Hermite we can also symmetrize + w = (w + w[::-1]) / 2 + x = (x - x[::-1]) / 2 + + # scale w to get the right value + w *= np.sqrt(np.pi) / w.sum() + + return x, w + + +def hermweight(x): + """ + Weight function of the Hermite polynomials. + + The weight function is :math:`\\exp(-x^2)` and the interval of + integration is :math:`[-\\inf, \\inf]`. the Hermite polynomials are + orthogonal, but not normalized, with respect to this weight function. + + Parameters + ---------- + x : array_like + Values at which the weight function will be computed. + + Returns + ------- + w : ndarray + The weight function at `x`. + + Examples + -------- + >>> import numpy as np + >>> from numpy.polynomial.hermite import hermweight + >>> x = np.arange(-2, 2) + >>> hermweight(x) + array([0.01831564, 0.36787944, 1. , 0.36787944]) + + """ + w = np.exp(-x**2) + return w + + +# +# Hermite series class +# + +class Hermite(ABCPolyBase): + """An Hermite series class. + + The Hermite class provides the standard Python numerical methods + '+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the + attributes and methods listed below. + + Parameters + ---------- + coef : array_like + Hermite coefficients in order of increasing degree, i.e, + ``(1, 2, 3)`` gives ``1*H_0(x) + 2*H_1(x) + 3*H_2(x)``. + domain : (2,) array_like, optional + Domain to use. The interval ``[domain[0], domain[1]]`` is mapped + to the interval ``[window[0], window[1]]`` by shifting and scaling. + The default value is [-1., 1.]. + window : (2,) array_like, optional + Window, see `domain` for its use. The default value is [-1., 1.]. + symbol : str, optional + Symbol used to represent the independent variable in string + representations of the polynomial expression, e.g. for printing. + The symbol must be a valid Python identifier. Default value is 'x'. + + .. versionadded:: 1.24 + + """ + # Virtual Functions + _add = staticmethod(hermadd) + _sub = staticmethod(hermsub) + _mul = staticmethod(hermmul) + _div = staticmethod(hermdiv) + _pow = staticmethod(hermpow) + _val = staticmethod(hermval) + _int = staticmethod(hermint) + _der = staticmethod(hermder) + _fit = staticmethod(hermfit) + _line = staticmethod(hermline) + _roots = staticmethod(hermroots) + _fromroots = staticmethod(hermfromroots) + + # Virtual properties + domain = np.array(hermdomain) + window = np.array(hermdomain) + basis_name = 'H' diff --git a/.venv/lib/python3.12/site-packages/numpy/polynomial/hermite.pyi b/.venv/lib/python3.12/site-packages/numpy/polynomial/hermite.pyi new file mode 100644 index 0000000..f7d907c --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/polynomial/hermite.pyi @@ -0,0 +1,107 @@ +from typing import Any, Final, TypeVar +from typing import Literal as L + +import numpy as np + +from ._polybase import ABCPolyBase +from ._polytypes import ( + _Array1, + _Array2, + _FuncBinOp, + _FuncCompanion, + _FuncDer, + _FuncFit, + _FuncFromRoots, + _FuncGauss, + _FuncInteg, + _FuncLine, + _FuncPoly2Ortho, + _FuncPow, + _FuncRoots, + _FuncUnOp, + _FuncVal, + _FuncVal2D, + _FuncVal3D, + _FuncValFromRoots, + _FuncVander, + _FuncVander2D, + _FuncVander3D, + _FuncWeight, +) +from .polyutils import trimcoef as hermtrim + +__all__ = [ + "hermzero", + "hermone", + "hermx", + "hermdomain", + "hermline", + "hermadd", + "hermsub", + "hermmulx", + "hermmul", + "hermdiv", + "hermpow", + "hermval", + "hermder", + "hermint", + "herm2poly", + "poly2herm", + "hermfromroots", + "hermvander", + "hermfit", + "hermtrim", + "hermroots", + "Hermite", + "hermval2d", + "hermval3d", + "hermgrid2d", + "hermgrid3d", + "hermvander2d", + "hermvander3d", + "hermcompanion", + "hermgauss", + "hermweight", +] + +poly2herm: _FuncPoly2Ortho[L["poly2herm"]] +herm2poly: _FuncUnOp[L["herm2poly"]] + +hermdomain: Final[_Array2[np.float64]] +hermzero: Final[_Array1[np.int_]] +hermone: Final[_Array1[np.int_]] +hermx: Final[_Array2[np.int_]] + +hermline: _FuncLine[L["hermline"]] +hermfromroots: _FuncFromRoots[L["hermfromroots"]] +hermadd: _FuncBinOp[L["hermadd"]] +hermsub: _FuncBinOp[L["hermsub"]] +hermmulx: _FuncUnOp[L["hermmulx"]] +hermmul: _FuncBinOp[L["hermmul"]] +hermdiv: _FuncBinOp[L["hermdiv"]] +hermpow: _FuncPow[L["hermpow"]] +hermder: _FuncDer[L["hermder"]] +hermint: _FuncInteg[L["hermint"]] +hermval: _FuncVal[L["hermval"]] +hermval2d: _FuncVal2D[L["hermval2d"]] +hermval3d: _FuncVal3D[L["hermval3d"]] +hermvalfromroots: _FuncValFromRoots[L["hermvalfromroots"]] +hermgrid2d: _FuncVal2D[L["hermgrid2d"]] +hermgrid3d: _FuncVal3D[L["hermgrid3d"]] +hermvander: _FuncVander[L["hermvander"]] +hermvander2d: _FuncVander2D[L["hermvander2d"]] +hermvander3d: _FuncVander3D[L["hermvander3d"]] +hermfit: _FuncFit[L["hermfit"]] +hermcompanion: _FuncCompanion[L["hermcompanion"]] +hermroots: _FuncRoots[L["hermroots"]] + +_ND = TypeVar("_ND", bound=Any) +def _normed_hermite_n( + x: np.ndarray[_ND, np.dtype[np.float64]], + n: int | np.intp, +) -> np.ndarray[_ND, np.dtype[np.float64]]: ... + +hermgauss: _FuncGauss[L["hermgauss"]] +hermweight: _FuncWeight[L["hermweight"]] + +class Hermite(ABCPolyBase[L["H"]]): ... diff --git a/.venv/lib/python3.12/site-packages/numpy/polynomial/hermite_e.py b/.venv/lib/python3.12/site-packages/numpy/polynomial/hermite_e.py new file mode 100644 index 0000000..d30fc1b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/polynomial/hermite_e.py @@ -0,0 +1,1642 @@ +""" +=================================================================== +HermiteE Series, "Probabilists" (:mod:`numpy.polynomial.hermite_e`) +=================================================================== + +This module provides a number of objects (mostly functions) useful for +dealing with Hermite_e series, including a `HermiteE` class that +encapsulates the usual arithmetic operations. (General information +on how this module represents and works with such polynomials is in the +docstring for its "parent" sub-package, `numpy.polynomial`). + +Classes +------- +.. autosummary:: + :toctree: generated/ + + HermiteE + +Constants +--------- +.. autosummary:: + :toctree: generated/ + + hermedomain + hermezero + hermeone + hermex + +Arithmetic +---------- +.. autosummary:: + :toctree: generated/ + + hermeadd + hermesub + hermemulx + hermemul + hermediv + hermepow + hermeval + hermeval2d + hermeval3d + hermegrid2d + hermegrid3d + +Calculus +-------- +.. autosummary:: + :toctree: generated/ + + hermeder + hermeint + +Misc Functions +-------------- +.. autosummary:: + :toctree: generated/ + + hermefromroots + hermeroots + hermevander + hermevander2d + hermevander3d + hermegauss + hermeweight + hermecompanion + hermefit + hermetrim + hermeline + herme2poly + poly2herme + +See also +-------- +`numpy.polynomial` + +""" +import numpy as np +import numpy.linalg as la +from numpy.lib.array_utils import normalize_axis_index + +from . import polyutils as pu +from ._polybase import ABCPolyBase + +__all__ = [ + 'hermezero', 'hermeone', 'hermex', 'hermedomain', 'hermeline', + 'hermeadd', 'hermesub', 'hermemulx', 'hermemul', 'hermediv', + 'hermepow', 'hermeval', 'hermeder', 'hermeint', 'herme2poly', + 'poly2herme', 'hermefromroots', 'hermevander', 'hermefit', 'hermetrim', + 'hermeroots', 'HermiteE', 'hermeval2d', 'hermeval3d', 'hermegrid2d', + 'hermegrid3d', 'hermevander2d', 'hermevander3d', 'hermecompanion', + 'hermegauss', 'hermeweight'] + +hermetrim = pu.trimcoef + + +def poly2herme(pol): + """ + poly2herme(pol) + + Convert a polynomial to a Hermite series. + + Convert an array representing the coefficients of a polynomial (relative + to the "standard" basis) ordered from lowest degree to highest, to an + array of the coefficients of the equivalent Hermite series, ordered + from lowest to highest degree. + + Parameters + ---------- + pol : array_like + 1-D array containing the polynomial coefficients + + Returns + ------- + c : ndarray + 1-D array containing the coefficients of the equivalent Hermite + series. + + See Also + -------- + herme2poly + + Notes + ----- + The easy way to do conversions between polynomial basis sets + is to use the convert method of a class instance. + + Examples + -------- + >>> import numpy as np + >>> from numpy.polynomial.hermite_e import poly2herme + >>> poly2herme(np.arange(4)) + array([ 2., 10., 2., 3.]) + + """ + [pol] = pu.as_series([pol]) + deg = len(pol) - 1 + res = 0 + for i in range(deg, -1, -1): + res = hermeadd(hermemulx(res), pol[i]) + return res + + +def herme2poly(c): + """ + Convert a Hermite series to a polynomial. + + Convert an array representing the coefficients of a Hermite series, + ordered from lowest degree to highest, to an array of the coefficients + of the equivalent polynomial (relative to the "standard" basis) ordered + from lowest to highest degree. + + Parameters + ---------- + c : array_like + 1-D array containing the Hermite series coefficients, ordered + from lowest order term to highest. + + Returns + ------- + pol : ndarray + 1-D array containing the coefficients of the equivalent polynomial + (relative to the "standard" basis) ordered from lowest order term + to highest. + + See Also + -------- + poly2herme + + Notes + ----- + The easy way to do conversions between polynomial basis sets + is to use the convert method of a class instance. + + Examples + -------- + >>> from numpy.polynomial.hermite_e import herme2poly + >>> herme2poly([ 2., 10., 2., 3.]) + array([0., 1., 2., 3.]) + + """ + from .polynomial import polyadd, polymulx, polysub + + [c] = pu.as_series([c]) + n = len(c) + if n == 1: + return c + if n == 2: + return c + else: + c0 = c[-2] + c1 = c[-1] + # i is the current degree of c1 + for i in range(n - 1, 1, -1): + tmp = c0 + c0 = polysub(c[i - 2], c1 * (i - 1)) + c1 = polyadd(tmp, polymulx(c1)) + return polyadd(c0, polymulx(c1)) + + +# +# These are constant arrays are of integer type so as to be compatible +# with the widest range of other types, such as Decimal. +# + +# Hermite +hermedomain = np.array([-1., 1.]) + +# Hermite coefficients representing zero. +hermezero = np.array([0]) + +# Hermite coefficients representing one. +hermeone = np.array([1]) + +# Hermite coefficients representing the identity x. +hermex = np.array([0, 1]) + + +def hermeline(off, scl): + """ + Hermite series whose graph is a straight line. + + Parameters + ---------- + off, scl : scalars + The specified line is given by ``off + scl*x``. + + Returns + ------- + y : ndarray + This module's representation of the Hermite series for + ``off + scl*x``. + + See Also + -------- + numpy.polynomial.polynomial.polyline + numpy.polynomial.chebyshev.chebline + numpy.polynomial.legendre.legline + numpy.polynomial.laguerre.lagline + numpy.polynomial.hermite.hermline + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermeline + >>> from numpy.polynomial.hermite_e import hermeline, hermeval + >>> hermeval(0,hermeline(3, 2)) + 3.0 + >>> hermeval(1,hermeline(3, 2)) + 5.0 + + """ + if scl != 0: + return np.array([off, scl]) + else: + return np.array([off]) + + +def hermefromroots(roots): + """ + Generate a HermiteE series with given roots. + + The function returns the coefficients of the polynomial + + .. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n), + + in HermiteE form, where the :math:`r_n` are the roots specified in `roots`. + If a zero has multiplicity n, then it must appear in `roots` n times. + For instance, if 2 is a root of multiplicity three and 3 is a root of + multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The + roots can appear in any order. + + If the returned coefficients are `c`, then + + .. math:: p(x) = c_0 + c_1 * He_1(x) + ... + c_n * He_n(x) + + The coefficient of the last term is not generally 1 for monic + polynomials in HermiteE form. + + Parameters + ---------- + roots : array_like + Sequence containing the roots. + + Returns + ------- + out : ndarray + 1-D array of coefficients. If all roots are real then `out` is a + real array, if some of the roots are complex, then `out` is complex + even if all the coefficients in the result are real (see Examples + below). + + See Also + -------- + numpy.polynomial.polynomial.polyfromroots + numpy.polynomial.legendre.legfromroots + numpy.polynomial.laguerre.lagfromroots + numpy.polynomial.hermite.hermfromroots + numpy.polynomial.chebyshev.chebfromroots + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermefromroots, hermeval + >>> coef = hermefromroots((-1, 0, 1)) + >>> hermeval((-1, 0, 1), coef) + array([0., 0., 0.]) + >>> coef = hermefromroots((-1j, 1j)) + >>> hermeval((-1j, 1j), coef) + array([0.+0.j, 0.+0.j]) + + """ + return pu._fromroots(hermeline, hermemul, roots) + + +def hermeadd(c1, c2): + """ + Add one Hermite series to another. + + Returns the sum of two Hermite series `c1` + `c2`. The arguments + are sequences of coefficients ordered from lowest order term to + highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Hermite series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Array representing the Hermite series of their sum. + + See Also + -------- + hermesub, hermemulx, hermemul, hermediv, hermepow + + Notes + ----- + Unlike multiplication, division, etc., the sum of two Hermite series + is a Hermite series (without having to "reproject" the result onto + the basis set) so addition, just like that of "standard" polynomials, + is simply "component-wise." + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermeadd + >>> hermeadd([1, 2, 3], [1, 2, 3, 4]) + array([2., 4., 6., 4.]) + + """ + return pu._add(c1, c2) + + +def hermesub(c1, c2): + """ + Subtract one Hermite series from another. + + Returns the difference of two Hermite series `c1` - `c2`. The + sequences of coefficients are from lowest order term to highest, i.e., + [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Hermite series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Of Hermite series coefficients representing their difference. + + See Also + -------- + hermeadd, hermemulx, hermemul, hermediv, hermepow + + Notes + ----- + Unlike multiplication, division, etc., the difference of two Hermite + series is a Hermite series (without having to "reproject" the result + onto the basis set) so subtraction, just like that of "standard" + polynomials, is simply "component-wise." + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermesub + >>> hermesub([1, 2, 3, 4], [1, 2, 3]) + array([0., 0., 0., 4.]) + + """ + return pu._sub(c1, c2) + + +def hermemulx(c): + """Multiply a Hermite series by x. + + Multiply the Hermite series `c` by x, where x is the independent + variable. + + + Parameters + ---------- + c : array_like + 1-D array of Hermite series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Array representing the result of the multiplication. + + See Also + -------- + hermeadd, hermesub, hermemul, hermediv, hermepow + + Notes + ----- + The multiplication uses the recursion relationship for Hermite + polynomials in the form + + .. math:: + + xP_i(x) = (P_{i + 1}(x) + iP_{i - 1}(x))) + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermemulx + >>> hermemulx([1, 2, 3]) + array([2., 7., 2., 3.]) + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + # The zero series needs special treatment + if len(c) == 1 and c[0] == 0: + return c + + prd = np.empty(len(c) + 1, dtype=c.dtype) + prd[0] = c[0] * 0 + prd[1] = c[0] + for i in range(1, len(c)): + prd[i + 1] = c[i] + prd[i - 1] += c[i] * i + return prd + + +def hermemul(c1, c2): + """ + Multiply one Hermite series by another. + + Returns the product of two Hermite series `c1` * `c2`. The arguments + are sequences of coefficients, from lowest order "term" to highest, + e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Hermite series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Of Hermite series coefficients representing their product. + + See Also + -------- + hermeadd, hermesub, hermemulx, hermediv, hermepow + + Notes + ----- + In general, the (polynomial) product of two C-series results in terms + that are not in the Hermite polynomial basis set. Thus, to express + the product as a Hermite series, it is necessary to "reproject" the + product onto said basis set, which may produce "unintuitive" (but + correct) results; see Examples section below. + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermemul + >>> hermemul([1, 2, 3], [0, 1, 2]) + array([14., 15., 28., 7., 6.]) + + """ + # s1, s2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + + if len(c1) > len(c2): + c = c2 + xs = c1 + else: + c = c1 + xs = c2 + + if len(c) == 1: + c0 = c[0] * xs + c1 = 0 + elif len(c) == 2: + c0 = c[0] * xs + c1 = c[1] * xs + else: + nd = len(c) + c0 = c[-2] * xs + c1 = c[-1] * xs + for i in range(3, len(c) + 1): + tmp = c0 + nd = nd - 1 + c0 = hermesub(c[-i] * xs, c1 * (nd - 1)) + c1 = hermeadd(tmp, hermemulx(c1)) + return hermeadd(c0, hermemulx(c1)) + + +def hermediv(c1, c2): + """ + Divide one Hermite series by another. + + Returns the quotient-with-remainder of two Hermite series + `c1` / `c2`. The arguments are sequences of coefficients from lowest + order "term" to highest, e.g., [1,2,3] represents the series + ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Hermite series coefficients ordered from low to + high. + + Returns + ------- + [quo, rem] : ndarrays + Of Hermite series coefficients representing the quotient and + remainder. + + See Also + -------- + hermeadd, hermesub, hermemulx, hermemul, hermepow + + Notes + ----- + In general, the (polynomial) division of one Hermite series by another + results in quotient and remainder terms that are not in the Hermite + polynomial basis set. Thus, to express these results as a Hermite + series, it is necessary to "reproject" the results onto the Hermite + basis set, which may produce "unintuitive" (but correct) results; see + Examples section below. + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermediv + >>> hermediv([ 14., 15., 28., 7., 6.], [0, 1, 2]) + (array([1., 2., 3.]), array([0.])) + >>> hermediv([ 15., 17., 28., 7., 6.], [0, 1, 2]) + (array([1., 2., 3.]), array([1., 2.])) + + """ + return pu._div(hermemul, c1, c2) + + +def hermepow(c, pow, maxpower=16): + """Raise a Hermite series to a power. + + Returns the Hermite series `c` raised to the power `pow`. The + argument `c` is a sequence of coefficients ordered from low to high. + i.e., [1,2,3] is the series ``P_0 + 2*P_1 + 3*P_2.`` + + Parameters + ---------- + c : array_like + 1-D array of Hermite series coefficients ordered from low to + high. + pow : integer + Power to which the series will be raised + maxpower : integer, optional + Maximum power allowed. This is mainly to limit growth of the series + to unmanageable size. Default is 16 + + Returns + ------- + coef : ndarray + Hermite series of power. + + See Also + -------- + hermeadd, hermesub, hermemulx, hermemul, hermediv + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermepow + >>> hermepow([1, 2, 3], 2) + array([23., 28., 46., 12., 9.]) + + """ + return pu._pow(hermemul, c, pow, maxpower) + + +def hermeder(c, m=1, scl=1, axis=0): + """ + Differentiate a Hermite_e series. + + Returns the series coefficients `c` differentiated `m` times along + `axis`. At each iteration the result is multiplied by `scl` (the + scaling factor is for use in a linear change of variable). The argument + `c` is an array of coefficients from low to high degree along each + axis, e.g., [1,2,3] represents the series ``1*He_0 + 2*He_1 + 3*He_2`` + while [[1,2],[1,2]] represents ``1*He_0(x)*He_0(y) + 1*He_1(x)*He_0(y) + + 2*He_0(x)*He_1(y) + 2*He_1(x)*He_1(y)`` if axis=0 is ``x`` and axis=1 + is ``y``. + + Parameters + ---------- + c : array_like + Array of Hermite_e series coefficients. If `c` is multidimensional + the different axis correspond to different variables with the + degree in each axis given by the corresponding index. + m : int, optional + Number of derivatives taken, must be non-negative. (Default: 1) + scl : scalar, optional + Each differentiation is multiplied by `scl`. The end result is + multiplication by ``scl**m``. This is for use in a linear change of + variable. (Default: 1) + axis : int, optional + Axis over which the derivative is taken. (Default: 0). + + Returns + ------- + der : ndarray + Hermite series of the derivative. + + See Also + -------- + hermeint + + Notes + ----- + In general, the result of differentiating a Hermite series does not + resemble the same operation on a power series. Thus the result of this + function may be "unintuitive," albeit correct; see Examples section + below. + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermeder + >>> hermeder([ 1., 1., 1., 1.]) + array([1., 2., 3.]) + >>> hermeder([-0.25, 1., 1./2., 1./3., 1./4 ], m=2) + array([1., 2., 3.]) + + """ + c = np.array(c, ndmin=1, copy=True) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + cnt = pu._as_int(m, "the order of derivation") + iaxis = pu._as_int(axis, "the axis") + if cnt < 0: + raise ValueError("The order of derivation must be non-negative") + iaxis = normalize_axis_index(iaxis, c.ndim) + + if cnt == 0: + return c + + c = np.moveaxis(c, iaxis, 0) + n = len(c) + if cnt >= n: + return c[:1] * 0 + else: + for i in range(cnt): + n = n - 1 + c *= scl + der = np.empty((n,) + c.shape[1:], dtype=c.dtype) + for j in range(n, 0, -1): + der[j - 1] = j * c[j] + c = der + c = np.moveaxis(c, 0, iaxis) + return c + + +def hermeint(c, m=1, k=[], lbnd=0, scl=1, axis=0): + """ + Integrate a Hermite_e series. + + Returns the Hermite_e series coefficients `c` integrated `m` times from + `lbnd` along `axis`. At each iteration the resulting series is + **multiplied** by `scl` and an integration constant, `k`, is added. + The scaling factor is for use in a linear change of variable. ("Buyer + beware": note that, depending on what one is doing, one may want `scl` + to be the reciprocal of what one might expect; for more information, + see the Notes section below.) The argument `c` is an array of + coefficients from low to high degree along each axis, e.g., [1,2,3] + represents the series ``H_0 + 2*H_1 + 3*H_2`` while [[1,2],[1,2]] + represents ``1*H_0(x)*H_0(y) + 1*H_1(x)*H_0(y) + 2*H_0(x)*H_1(y) + + 2*H_1(x)*H_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``. + + Parameters + ---------- + c : array_like + Array of Hermite_e series coefficients. If c is multidimensional + the different axis correspond to different variables with the + degree in each axis given by the corresponding index. + m : int, optional + Order of integration, must be positive. (Default: 1) + k : {[], list, scalar}, optional + Integration constant(s). The value of the first integral at + ``lbnd`` is the first value in the list, the value of the second + integral at ``lbnd`` is the second value, etc. If ``k == []`` (the + default), all constants are set to zero. If ``m == 1``, a single + scalar can be given instead of a list. + lbnd : scalar, optional + The lower bound of the integral. (Default: 0) + scl : scalar, optional + Following each integration the result is *multiplied* by `scl` + before the integration constant is added. (Default: 1) + axis : int, optional + Axis over which the integral is taken. (Default: 0). + + Returns + ------- + S : ndarray + Hermite_e series coefficients of the integral. + + Raises + ------ + ValueError + If ``m < 0``, ``len(k) > m``, ``np.ndim(lbnd) != 0``, or + ``np.ndim(scl) != 0``. + + See Also + -------- + hermeder + + Notes + ----- + Note that the result of each integration is *multiplied* by `scl`. + Why is this important to note? Say one is making a linear change of + variable :math:`u = ax + b` in an integral relative to `x`. Then + :math:`dx = du/a`, so one will need to set `scl` equal to + :math:`1/a` - perhaps not what one would have first thought. + + Also note that, in general, the result of integrating a C-series needs + to be "reprojected" onto the C-series basis set. Thus, typically, + the result of this function is "unintuitive," albeit correct; see + Examples section below. + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermeint + >>> hermeint([1, 2, 3]) # integrate once, value 0 at 0. + array([1., 1., 1., 1.]) + >>> hermeint([1, 2, 3], m=2) # integrate twice, value & deriv 0 at 0 + array([-0.25 , 1. , 0.5 , 0.33333333, 0.25 ]) # may vary + >>> hermeint([1, 2, 3], k=1) # integrate once, value 1 at 0. + array([2., 1., 1., 1.]) + >>> hermeint([1, 2, 3], lbnd=-1) # integrate once, value 0 at -1 + array([-1., 1., 1., 1.]) + >>> hermeint([1, 2, 3], m=2, k=[1, 2], lbnd=-1) + array([ 1.83333333, 0. , 0.5 , 0.33333333, 0.25 ]) # may vary + + """ + c = np.array(c, ndmin=1, copy=True) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + if not np.iterable(k): + k = [k] + cnt = pu._as_int(m, "the order of integration") + iaxis = pu._as_int(axis, "the axis") + if cnt < 0: + raise ValueError("The order of integration must be non-negative") + if len(k) > cnt: + raise ValueError("Too many integration constants") + if np.ndim(lbnd) != 0: + raise ValueError("lbnd must be a scalar.") + if np.ndim(scl) != 0: + raise ValueError("scl must be a scalar.") + iaxis = normalize_axis_index(iaxis, c.ndim) + + if cnt == 0: + return c + + c = np.moveaxis(c, iaxis, 0) + k = list(k) + [0] * (cnt - len(k)) + for i in range(cnt): + n = len(c) + c *= scl + if n == 1 and np.all(c[0] == 0): + c[0] += k[i] + else: + tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype) + tmp[0] = c[0] * 0 + tmp[1] = c[0] + for j in range(1, n): + tmp[j + 1] = c[j] / (j + 1) + tmp[0] += k[i] - hermeval(lbnd, tmp) + c = tmp + c = np.moveaxis(c, 0, iaxis) + return c + + +def hermeval(x, c, tensor=True): + """ + Evaluate an HermiteE series at points x. + + If `c` is of length ``n + 1``, this function returns the value: + + .. math:: p(x) = c_0 * He_0(x) + c_1 * He_1(x) + ... + c_n * He_n(x) + + The parameter `x` is converted to an array only if it is a tuple or a + list, otherwise it is treated as a scalar. In either case, either `x` + or its elements must support multiplication and addition both with + themselves and with the elements of `c`. + + If `c` is a 1-D array, then ``p(x)`` will have the same shape as `x`. If + `c` is multidimensional, then the shape of the result depends on the + value of `tensor`. If `tensor` is true the shape will be c.shape[1:] + + x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that + scalars have shape (,). + + Trailing zeros in the coefficients will be used in the evaluation, so + they should be avoided if efficiency is a concern. + + Parameters + ---------- + x : array_like, compatible object + If `x` is a list or tuple, it is converted to an ndarray, otherwise + it is left unchanged and treated as a scalar. In either case, `x` + or its elements must support addition and multiplication with + with themselves and with the elements of `c`. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree n are contained in c[n]. If `c` is multidimensional the + remaining indices enumerate multiple polynomials. In the two + dimensional case the coefficients may be thought of as stored in + the columns of `c`. + tensor : boolean, optional + If True, the shape of the coefficient array is extended with ones + on the right, one for each dimension of `x`. Scalars have dimension 0 + for this action. The result is that every column of coefficients in + `c` is evaluated for every element of `x`. If False, `x` is broadcast + over the columns of `c` for the evaluation. This keyword is useful + when `c` is multidimensional. The default value is True. + + Returns + ------- + values : ndarray, algebra_like + The shape of the return value is described above. + + See Also + -------- + hermeval2d, hermegrid2d, hermeval3d, hermegrid3d + + Notes + ----- + The evaluation uses Clenshaw recursion, aka synthetic division. + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermeval + >>> coef = [1,2,3] + >>> hermeval(1, coef) + 3.0 + >>> hermeval([[1,2],[3,4]], coef) + array([[ 3., 14.], + [31., 54.]]) + + """ + c = np.array(c, ndmin=1, copy=None) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + if isinstance(x, (tuple, list)): + x = np.asarray(x) + if isinstance(x, np.ndarray) and tensor: + c = c.reshape(c.shape + (1,) * x.ndim) + + if len(c) == 1: + c0 = c[0] + c1 = 0 + elif len(c) == 2: + c0 = c[0] + c1 = c[1] + else: + nd = len(c) + c0 = c[-2] + c1 = c[-1] + for i in range(3, len(c) + 1): + tmp = c0 + nd = nd - 1 + c0 = c[-i] - c1 * (nd - 1) + c1 = tmp + c1 * x + return c0 + c1 * x + + +def hermeval2d(x, y, c): + """ + Evaluate a 2-D HermiteE series at points (x, y). + + This function returns the values: + + .. math:: p(x,y) = \\sum_{i,j} c_{i,j} * He_i(x) * He_j(y) + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars and they + must have the same shape after conversion. In either case, either `x` + and `y` or their elements must support multiplication and addition both + with themselves and with the elements of `c`. + + If `c` is a 1-D array a one is implicitly appended to its shape to make + it 2-D. The shape of the result will be c.shape[2:] + x.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points ``(x, y)``, + where `x` and `y` must have the same shape. If `x` or `y` is a list + or tuple, it is first converted to an ndarray, otherwise it is left + unchanged and if it isn't an ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term + of multi-degree i,j is contained in ``c[i,j]``. If `c` has + dimension greater than two the remaining indices enumerate multiple + sets of coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points formed with + pairs of corresponding values from `x` and `y`. + + See Also + -------- + hermeval, hermegrid2d, hermeval3d, hermegrid3d + """ + return pu._valnd(hermeval, c, x, y) + + +def hermegrid2d(x, y, c): + """ + Evaluate a 2-D HermiteE series on the Cartesian product of x and y. + + This function returns the values: + + .. math:: p(a,b) = \\sum_{i,j} c_{i,j} * H_i(a) * H_j(b) + + where the points ``(a, b)`` consist of all pairs formed by taking + `a` from `x` and `b` from `y`. The resulting points form a grid with + `x` in the first dimension and `y` in the second. + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars. In either + case, either `x` and `y` or their elements must support multiplication + and addition both with themselves and with the elements of `c`. + + If `c` has fewer than two dimensions, ones are implicitly appended to + its shape to make it 2-D. The shape of the result will be c.shape[2:] + + x.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points in the + Cartesian product of `x` and `y`. If `x` or `y` is a list or + tuple, it is first converted to an ndarray, otherwise it is left + unchanged and, if it isn't an ndarray, it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree i,j are contained in ``c[i,j]``. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points in the Cartesian + product of `x` and `y`. + + See Also + -------- + hermeval, hermeval2d, hermeval3d, hermegrid3d + """ + return pu._gridnd(hermeval, c, x, y) + + +def hermeval3d(x, y, z, c): + """ + Evaluate a 3-D Hermite_e series at points (x, y, z). + + This function returns the values: + + .. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * He_i(x) * He_j(y) * He_k(z) + + The parameters `x`, `y`, and `z` are converted to arrays only if + they are tuples or a lists, otherwise they are treated as a scalars and + they must have the same shape after conversion. In either case, either + `x`, `y`, and `z` or their elements must support multiplication and + addition both with themselves and with the elements of `c`. + + If `c` has fewer than 3 dimensions, ones are implicitly appended to its + shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape. + + Parameters + ---------- + x, y, z : array_like, compatible object + The three dimensional series is evaluated at the points + `(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If + any of `x`, `y`, or `z` is a list or tuple, it is first converted + to an ndarray, otherwise it is left unchanged and if it isn't an + ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term of + multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension + greater than 3 the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the multidimensional polynomial on points formed with + triples of corresponding values from `x`, `y`, and `z`. + + See Also + -------- + hermeval, hermeval2d, hermegrid2d, hermegrid3d + """ + return pu._valnd(hermeval, c, x, y, z) + + +def hermegrid3d(x, y, z, c): + """ + Evaluate a 3-D HermiteE series on the Cartesian product of x, y, and z. + + This function returns the values: + + .. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * He_i(a) * He_j(b) * He_k(c) + + where the points ``(a, b, c)`` consist of all triples formed by taking + `a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form + a grid with `x` in the first dimension, `y` in the second, and `z` in + the third. + + The parameters `x`, `y`, and `z` are converted to arrays only if they + are tuples or a lists, otherwise they are treated as a scalars. In + either case, either `x`, `y`, and `z` or their elements must support + multiplication and addition both with themselves and with the elements + of `c`. + + If `c` has fewer than three dimensions, ones are implicitly appended to + its shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape + y.shape + z.shape. + + Parameters + ---------- + x, y, z : array_like, compatible objects + The three dimensional series is evaluated at the points in the + Cartesian product of `x`, `y`, and `z`. If `x`, `y`, or `z` is a + list or tuple, it is first converted to an ndarray, otherwise it is + left unchanged and, if it isn't an ndarray, it is treated as a + scalar. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree i,j are contained in ``c[i,j]``. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points in the Cartesian + product of `x` and `y`. + + See Also + -------- + hermeval, hermeval2d, hermegrid2d, hermeval3d + """ + return pu._gridnd(hermeval, c, x, y, z) + + +def hermevander(x, deg): + """Pseudo-Vandermonde matrix of given degree. + + Returns the pseudo-Vandermonde matrix of degree `deg` and sample points + `x`. The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., i] = He_i(x), + + where ``0 <= i <= deg``. The leading indices of `V` index the elements of + `x` and the last index is the degree of the HermiteE polynomial. + + If `c` is a 1-D array of coefficients of length ``n + 1`` and `V` is the + array ``V = hermevander(x, n)``, then ``np.dot(V, c)`` and + ``hermeval(x, c)`` are the same up to roundoff. This equivalence is + useful both for least squares fitting and for the evaluation of a large + number of HermiteE series of the same degree and sample points. + + Parameters + ---------- + x : array_like + Array of points. The dtype is converted to float64 or complex128 + depending on whether any of the elements are complex. If `x` is + scalar it is converted to a 1-D array. + deg : int + Degree of the resulting matrix. + + Returns + ------- + vander : ndarray + The pseudo-Vandermonde matrix. The shape of the returned matrix is + ``x.shape + (deg + 1,)``, where The last index is the degree of the + corresponding HermiteE polynomial. The dtype will be the same as + the converted `x`. + + Examples + -------- + >>> import numpy as np + >>> from numpy.polynomial.hermite_e import hermevander + >>> x = np.array([-1, 0, 1]) + >>> hermevander(x, 3) + array([[ 1., -1., 0., 2.], + [ 1., 0., -1., -0.], + [ 1., 1., 0., -2.]]) + + """ + ideg = pu._as_int(deg, "deg") + if ideg < 0: + raise ValueError("deg must be non-negative") + + x = np.array(x, copy=None, ndmin=1) + 0.0 + dims = (ideg + 1,) + x.shape + dtyp = x.dtype + v = np.empty(dims, dtype=dtyp) + v[0] = x * 0 + 1 + if ideg > 0: + v[1] = x + for i in range(2, ideg + 1): + v[i] = (v[i - 1] * x - v[i - 2] * (i - 1)) + return np.moveaxis(v, 0, -1) + + +def hermevander2d(x, y, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points ``(x, y)``. The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., (deg[1] + 1)*i + j] = He_i(x) * He_j(y), + + where ``0 <= i <= deg[0]`` and ``0 <= j <= deg[1]``. The leading indices of + `V` index the points ``(x, y)`` and the last index encodes the degrees of + the HermiteE polynomials. + + If ``V = hermevander2d(x, y, [xdeg, ydeg])``, then the columns of `V` + correspond to the elements of a 2-D coefficient array `c` of shape + (xdeg + 1, ydeg + 1) in the order + + .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ... + + and ``np.dot(V, c.flat)`` and ``hermeval2d(x, y, c)`` will be the same + up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 2-D HermiteE + series of the same degrees and sample points. + + Parameters + ---------- + x, y : array_like + Arrays of point coordinates, all of the same shape. The dtypes + will be converted to either float64 or complex128 depending on + whether any of the elements are complex. Scalars are converted to + 1-D arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg]. + + Returns + ------- + vander2d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg[1]+1)`. The dtype will be the same + as the converted `x` and `y`. + + See Also + -------- + hermevander, hermevander3d, hermeval2d, hermeval3d + """ + return pu._vander_nd_flat((hermevander, hermevander), (x, y), deg) + + +def hermevander3d(x, y, z, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points ``(x, y, z)``. If `l`, `m`, `n` are the given degrees in `x`, `y`, `z`, + then Hehe pseudo-Vandermonde matrix is defined by + + .. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = He_i(x)*He_j(y)*He_k(z), + + where ``0 <= i <= l``, ``0 <= j <= m``, and ``0 <= j <= n``. The leading + indices of `V` index the points ``(x, y, z)`` and the last index encodes + the degrees of the HermiteE polynomials. + + If ``V = hermevander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns + of `V` correspond to the elements of a 3-D coefficient array `c` of + shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order + + .. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},... + + and ``np.dot(V, c.flat)`` and ``hermeval3d(x, y, z, c)`` will be the + same up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 3-D HermiteE + series of the same degrees and sample points. + + Parameters + ---------- + x, y, z : array_like + Arrays of point coordinates, all of the same shape. The dtypes will + be converted to either float64 or complex128 depending on whether + any of the elements are complex. Scalars are converted to 1-D + arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg, z_deg]. + + Returns + ------- + vander3d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg[1]+1)*(deg[2]+1)`. The dtype will + be the same as the converted `x`, `y`, and `z`. + + See Also + -------- + hermevander, hermevander3d, hermeval2d, hermeval3d + """ + return pu._vander_nd_flat((hermevander, hermevander, hermevander), (x, y, z), deg) + + +def hermefit(x, y, deg, rcond=None, full=False, w=None): + """ + Least squares fit of Hermite series to data. + + Return the coefficients of a HermiteE series of degree `deg` that is + the least squares fit to the data values `y` given at points `x`. If + `y` is 1-D the returned coefficients will also be 1-D. If `y` is 2-D + multiple fits are done, one for each column of `y`, and the resulting + coefficients are stored in the corresponding columns of a 2-D return. + The fitted polynomial(s) are in the form + + .. math:: p(x) = c_0 + c_1 * He_1(x) + ... + c_n * He_n(x), + + where `n` is `deg`. + + Parameters + ---------- + x : array_like, shape (M,) + x-coordinates of the M sample points ``(x[i], y[i])``. + y : array_like, shape (M,) or (M, K) + y-coordinates of the sample points. Several data sets of sample + points sharing the same x-coordinates can be fitted at once by + passing in a 2D-array that contains one dataset per column. + deg : int or 1-D array_like + Degree(s) of the fitting polynomials. If `deg` is a single integer + all terms up to and including the `deg`'th term are included in the + fit. For NumPy versions >= 1.11.0 a list of integers specifying the + degrees of the terms to include may be used instead. + rcond : float, optional + Relative condition number of the fit. Singular values smaller than + this relative to the largest singular value will be ignored. The + default value is len(x)*eps, where eps is the relative precision of + the float type, about 2e-16 in most cases. + full : bool, optional + Switch determining nature of return value. When it is False (the + default) just the coefficients are returned, when True diagnostic + information from the singular value decomposition is also returned. + w : array_like, shape (`M`,), optional + Weights. If not None, the weight ``w[i]`` applies to the unsquared + residual ``y[i] - y_hat[i]`` at ``x[i]``. Ideally the weights are + chosen so that the errors of the products ``w[i]*y[i]`` all have the + same variance. When using inverse-variance weighting, use + ``w[i] = 1/sigma(y[i])``. The default value is None. + + Returns + ------- + coef : ndarray, shape (M,) or (M, K) + Hermite coefficients ordered from low to high. If `y` was 2-D, + the coefficients for the data in column k of `y` are in column + `k`. + + [residuals, rank, singular_values, rcond] : list + These values are only returned if ``full == True`` + + - residuals -- sum of squared residuals of the least squares fit + - rank -- the numerical rank of the scaled Vandermonde matrix + - singular_values -- singular values of the scaled Vandermonde matrix + - rcond -- value of `rcond`. + + For more details, see `numpy.linalg.lstsq`. + + Warns + ----- + RankWarning + The rank of the coefficient matrix in the least-squares fit is + deficient. The warning is only raised if ``full = False``. The + warnings can be turned off by + + >>> import warnings + >>> warnings.simplefilter('ignore', np.exceptions.RankWarning) + + See Also + -------- + numpy.polynomial.chebyshev.chebfit + numpy.polynomial.legendre.legfit + numpy.polynomial.polynomial.polyfit + numpy.polynomial.hermite.hermfit + numpy.polynomial.laguerre.lagfit + hermeval : Evaluates a Hermite series. + hermevander : pseudo Vandermonde matrix of Hermite series. + hermeweight : HermiteE weight function. + numpy.linalg.lstsq : Computes a least-squares fit from the matrix. + scipy.interpolate.UnivariateSpline : Computes spline fits. + + Notes + ----- + The solution is the coefficients of the HermiteE series `p` that + minimizes the sum of the weighted squared errors + + .. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2, + + where the :math:`w_j` are the weights. This problem is solved by + setting up the (typically) overdetermined matrix equation + + .. math:: V(x) * c = w * y, + + where `V` is the pseudo Vandermonde matrix of `x`, the elements of `c` + are the coefficients to be solved for, and the elements of `y` are the + observed values. This equation is then solved using the singular value + decomposition of `V`. + + If some of the singular values of `V` are so small that they are + neglected, then a `~exceptions.RankWarning` will be issued. This means that + the coefficient values may be poorly determined. Using a lower order fit + will usually get rid of the warning. The `rcond` parameter can also be + set to a value smaller than its default, but the resulting fit may be + spurious and have large contributions from roundoff error. + + Fits using HermiteE series are probably most useful when the data can + be approximated by ``sqrt(w(x)) * p(x)``, where ``w(x)`` is the HermiteE + weight. In that case the weight ``sqrt(w(x[i]))`` should be used + together with data values ``y[i]/sqrt(w(x[i]))``. The weight function is + available as `hermeweight`. + + References + ---------- + .. [1] Wikipedia, "Curve fitting", + https://en.wikipedia.org/wiki/Curve_fitting + + Examples + -------- + >>> import numpy as np + >>> from numpy.polynomial.hermite_e import hermefit, hermeval + >>> x = np.linspace(-10, 10) + >>> rng = np.random.default_rng() + >>> err = rng.normal(scale=1./10, size=len(x)) + >>> y = hermeval(x, [1, 2, 3]) + err + >>> hermefit(x, y, 2) + array([1.02284196, 2.00032805, 2.99978457]) # may vary + + """ + return pu._fit(hermevander, x, y, deg, rcond, full, w) + + +def hermecompanion(c): + """ + Return the scaled companion matrix of c. + + The basis polynomials are scaled so that the companion matrix is + symmetric when `c` is an HermiteE basis polynomial. This provides + better eigenvalue estimates than the unscaled case and for basis + polynomials the eigenvalues are guaranteed to be real if + `numpy.linalg.eigvalsh` is used to obtain them. + + Parameters + ---------- + c : array_like + 1-D array of HermiteE series coefficients ordered from low to high + degree. + + Returns + ------- + mat : ndarray + Scaled companion matrix of dimensions (deg, deg). + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) < 2: + raise ValueError('Series must have maximum degree of at least 1.') + if len(c) == 2: + return np.array([[-c[0] / c[1]]]) + + n = len(c) - 1 + mat = np.zeros((n, n), dtype=c.dtype) + scl = np.hstack((1., 1. / np.sqrt(np.arange(n - 1, 0, -1)))) + scl = np.multiply.accumulate(scl)[::-1] + top = mat.reshape(-1)[1::n + 1] + bot = mat.reshape(-1)[n::n + 1] + top[...] = np.sqrt(np.arange(1, n)) + bot[...] = top + mat[:, -1] -= scl * c[:-1] / c[-1] + return mat + + +def hermeroots(c): + """ + Compute the roots of a HermiteE series. + + Return the roots (a.k.a. "zeros") of the polynomial + + .. math:: p(x) = \\sum_i c[i] * He_i(x). + + Parameters + ---------- + c : 1-D array_like + 1-D array of coefficients. + + Returns + ------- + out : ndarray + Array of the roots of the series. If all the roots are real, + then `out` is also real, otherwise it is complex. + + See Also + -------- + numpy.polynomial.polynomial.polyroots + numpy.polynomial.legendre.legroots + numpy.polynomial.laguerre.lagroots + numpy.polynomial.hermite.hermroots + numpy.polynomial.chebyshev.chebroots + + Notes + ----- + The root estimates are obtained as the eigenvalues of the companion + matrix, Roots far from the origin of the complex plane may have large + errors due to the numerical instability of the series for such + values. Roots with multiplicity greater than 1 will also show larger + errors as the value of the series near such points is relatively + insensitive to errors in the roots. Isolated roots near the origin can + be improved by a few iterations of Newton's method. + + The HermiteE series basis polynomials aren't powers of `x` so the + results of this function may seem unintuitive. + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermeroots, hermefromroots + >>> coef = hermefromroots([-1, 0, 1]) + >>> coef + array([0., 2., 0., 1.]) + >>> hermeroots(coef) + array([-1., 0., 1.]) # may vary + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) <= 1: + return np.array([], dtype=c.dtype) + if len(c) == 2: + return np.array([-c[0] / c[1]]) + + # rotated companion matrix reduces error + m = hermecompanion(c)[::-1, ::-1] + r = la.eigvals(m) + r.sort() + return r + + +def _normed_hermite_e_n(x, n): + """ + Evaluate a normalized HermiteE polynomial. + + Compute the value of the normalized HermiteE polynomial of degree ``n`` + at the points ``x``. + + + Parameters + ---------- + x : ndarray of double. + Points at which to evaluate the function + n : int + Degree of the normalized HermiteE function to be evaluated. + + Returns + ------- + values : ndarray + The shape of the return value is described above. + + Notes + ----- + This function is needed for finding the Gauss points and integration + weights for high degrees. The values of the standard HermiteE functions + overflow when n >= 207. + + """ + if n == 0: + return np.full(x.shape, 1 / np.sqrt(np.sqrt(2 * np.pi))) + + c0 = 0. + c1 = 1. / np.sqrt(np.sqrt(2 * np.pi)) + nd = float(n) + for i in range(n - 1): + tmp = c0 + c0 = -c1 * np.sqrt((nd - 1.) / nd) + c1 = tmp + c1 * x * np.sqrt(1. / nd) + nd = nd - 1.0 + return c0 + c1 * x + + +def hermegauss(deg): + """ + Gauss-HermiteE quadrature. + + Computes the sample points and weights for Gauss-HermiteE quadrature. + These sample points and weights will correctly integrate polynomials of + degree :math:`2*deg - 1` or less over the interval :math:`[-\\inf, \\inf]` + with the weight function :math:`f(x) = \\exp(-x^2/2)`. + + Parameters + ---------- + deg : int + Number of sample points and weights. It must be >= 1. + + Returns + ------- + x : ndarray + 1-D ndarray containing the sample points. + y : ndarray + 1-D ndarray containing the weights. + + Notes + ----- + The results have only been tested up to degree 100, higher degrees may + be problematic. The weights are determined by using the fact that + + .. math:: w_k = c / (He'_n(x_k) * He_{n-1}(x_k)) + + where :math:`c` is a constant independent of :math:`k` and :math:`x_k` + is the k'th root of :math:`He_n`, and then scaling the results to get + the right value when integrating 1. + + """ + ideg = pu._as_int(deg, "deg") + if ideg <= 0: + raise ValueError("deg must be a positive integer") + + # first approximation of roots. We use the fact that the companion + # matrix is symmetric in this case in order to obtain better zeros. + c = np.array([0] * deg + [1]) + m = hermecompanion(c) + x = la.eigvalsh(m) + + # improve roots by one application of Newton + dy = _normed_hermite_e_n(x, ideg) + df = _normed_hermite_e_n(x, ideg - 1) * np.sqrt(ideg) + x -= dy / df + + # compute the weights. We scale the factor to avoid possible numerical + # overflow. + fm = _normed_hermite_e_n(x, ideg - 1) + fm /= np.abs(fm).max() + w = 1 / (fm * fm) + + # for Hermite_e we can also symmetrize + w = (w + w[::-1]) / 2 + x = (x - x[::-1]) / 2 + + # scale w to get the right value + w *= np.sqrt(2 * np.pi) / w.sum() + + return x, w + + +def hermeweight(x): + """Weight function of the Hermite_e polynomials. + + The weight function is :math:`\\exp(-x^2/2)` and the interval of + integration is :math:`[-\\inf, \\inf]`. the HermiteE polynomials are + orthogonal, but not normalized, with respect to this weight function. + + Parameters + ---------- + x : array_like + Values at which the weight function will be computed. + + Returns + ------- + w : ndarray + The weight function at `x`. + """ + w = np.exp(-.5 * x**2) + return w + + +# +# HermiteE series class +# + +class HermiteE(ABCPolyBase): + """An HermiteE series class. + + The HermiteE class provides the standard Python numerical methods + '+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the + attributes and methods listed below. + + Parameters + ---------- + coef : array_like + HermiteE coefficients in order of increasing degree, i.e, + ``(1, 2, 3)`` gives ``1*He_0(x) + 2*He_1(X) + 3*He_2(x)``. + domain : (2,) array_like, optional + Domain to use. The interval ``[domain[0], domain[1]]`` is mapped + to the interval ``[window[0], window[1]]`` by shifting and scaling. + The default value is [-1., 1.]. + window : (2,) array_like, optional + Window, see `domain` for its use. The default value is [-1., 1.]. + symbol : str, optional + Symbol used to represent the independent variable in string + representations of the polynomial expression, e.g. for printing. + The symbol must be a valid Python identifier. Default value is 'x'. + + .. versionadded:: 1.24 + + """ + # Virtual Functions + _add = staticmethod(hermeadd) + _sub = staticmethod(hermesub) + _mul = staticmethod(hermemul) + _div = staticmethod(hermediv) + _pow = staticmethod(hermepow) + _val = staticmethod(hermeval) + _int = staticmethod(hermeint) + _der = staticmethod(hermeder) + _fit = staticmethod(hermefit) + _line = staticmethod(hermeline) + _roots = staticmethod(hermeroots) + _fromroots = staticmethod(hermefromroots) + + # Virtual properties + domain = np.array(hermedomain) + window = np.array(hermedomain) + basis_name = 'He' diff --git a/.venv/lib/python3.12/site-packages/numpy/polynomial/hermite_e.pyi b/.venv/lib/python3.12/site-packages/numpy/polynomial/hermite_e.pyi new file mode 100644 index 0000000..e8013e6 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/polynomial/hermite_e.pyi @@ -0,0 +1,107 @@ +from typing import Any, Final, TypeVar +from typing import Literal as L + +import numpy as np + +from ._polybase import ABCPolyBase +from ._polytypes import ( + _Array1, + _Array2, + _FuncBinOp, + _FuncCompanion, + _FuncDer, + _FuncFit, + _FuncFromRoots, + _FuncGauss, + _FuncInteg, + _FuncLine, + _FuncPoly2Ortho, + _FuncPow, + _FuncRoots, + _FuncUnOp, + _FuncVal, + _FuncVal2D, + _FuncVal3D, + _FuncValFromRoots, + _FuncVander, + _FuncVander2D, + _FuncVander3D, + _FuncWeight, +) +from .polyutils import trimcoef as hermetrim + +__all__ = [ + "hermezero", + "hermeone", + "hermex", + "hermedomain", + "hermeline", + "hermeadd", + "hermesub", + "hermemulx", + "hermemul", + "hermediv", + "hermepow", + "hermeval", + "hermeder", + "hermeint", + "herme2poly", + "poly2herme", + "hermefromroots", + "hermevander", + "hermefit", + "hermetrim", + "hermeroots", + "HermiteE", + "hermeval2d", + "hermeval3d", + "hermegrid2d", + "hermegrid3d", + "hermevander2d", + "hermevander3d", + "hermecompanion", + "hermegauss", + "hermeweight", +] + +poly2herme: _FuncPoly2Ortho[L["poly2herme"]] +herme2poly: _FuncUnOp[L["herme2poly"]] + +hermedomain: Final[_Array2[np.float64]] +hermezero: Final[_Array1[np.int_]] +hermeone: Final[_Array1[np.int_]] +hermex: Final[_Array2[np.int_]] + +hermeline: _FuncLine[L["hermeline"]] +hermefromroots: _FuncFromRoots[L["hermefromroots"]] +hermeadd: _FuncBinOp[L["hermeadd"]] +hermesub: _FuncBinOp[L["hermesub"]] +hermemulx: _FuncUnOp[L["hermemulx"]] +hermemul: _FuncBinOp[L["hermemul"]] +hermediv: _FuncBinOp[L["hermediv"]] +hermepow: _FuncPow[L["hermepow"]] +hermeder: _FuncDer[L["hermeder"]] +hermeint: _FuncInteg[L["hermeint"]] +hermeval: _FuncVal[L["hermeval"]] +hermeval2d: _FuncVal2D[L["hermeval2d"]] +hermeval3d: _FuncVal3D[L["hermeval3d"]] +hermevalfromroots: _FuncValFromRoots[L["hermevalfromroots"]] +hermegrid2d: _FuncVal2D[L["hermegrid2d"]] +hermegrid3d: _FuncVal3D[L["hermegrid3d"]] +hermevander: _FuncVander[L["hermevander"]] +hermevander2d: _FuncVander2D[L["hermevander2d"]] +hermevander3d: _FuncVander3D[L["hermevander3d"]] +hermefit: _FuncFit[L["hermefit"]] +hermecompanion: _FuncCompanion[L["hermecompanion"]] +hermeroots: _FuncRoots[L["hermeroots"]] + +_ND = TypeVar("_ND", bound=Any) +def _normed_hermite_e_n( + x: np.ndarray[_ND, np.dtype[np.float64]], + n: int | np.intp, +) -> np.ndarray[_ND, np.dtype[np.float64]]: ... + +hermegauss: _FuncGauss[L["hermegauss"]] +hermeweight: _FuncWeight[L["hermeweight"]] + +class HermiteE(ABCPolyBase[L["He"]]): ... diff --git a/.venv/lib/python3.12/site-packages/numpy/polynomial/laguerre.py b/.venv/lib/python3.12/site-packages/numpy/polynomial/laguerre.py new file mode 100644 index 0000000..38eb5a8 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/polynomial/laguerre.py @@ -0,0 +1,1675 @@ +""" +================================================== +Laguerre Series (:mod:`numpy.polynomial.laguerre`) +================================================== + +This module provides a number of objects (mostly functions) useful for +dealing with Laguerre series, including a `Laguerre` class that +encapsulates the usual arithmetic operations. (General information +on how this module represents and works with such polynomials is in the +docstring for its "parent" sub-package, `numpy.polynomial`). + +Classes +------- +.. autosummary:: + :toctree: generated/ + + Laguerre + +Constants +--------- +.. autosummary:: + :toctree: generated/ + + lagdomain + lagzero + lagone + lagx + +Arithmetic +---------- +.. autosummary:: + :toctree: generated/ + + lagadd + lagsub + lagmulx + lagmul + lagdiv + lagpow + lagval + lagval2d + lagval3d + laggrid2d + laggrid3d + +Calculus +-------- +.. autosummary:: + :toctree: generated/ + + lagder + lagint + +Misc Functions +-------------- +.. autosummary:: + :toctree: generated/ + + lagfromroots + lagroots + lagvander + lagvander2d + lagvander3d + laggauss + lagweight + lagcompanion + lagfit + lagtrim + lagline + lag2poly + poly2lag + +See also +-------- +`numpy.polynomial` + +""" +import numpy as np +import numpy.linalg as la +from numpy.lib.array_utils import normalize_axis_index + +from . import polyutils as pu +from ._polybase import ABCPolyBase + +__all__ = [ + 'lagzero', 'lagone', 'lagx', 'lagdomain', 'lagline', 'lagadd', + 'lagsub', 'lagmulx', 'lagmul', 'lagdiv', 'lagpow', 'lagval', 'lagder', + 'lagint', 'lag2poly', 'poly2lag', 'lagfromroots', 'lagvander', + 'lagfit', 'lagtrim', 'lagroots', 'Laguerre', 'lagval2d', 'lagval3d', + 'laggrid2d', 'laggrid3d', 'lagvander2d', 'lagvander3d', 'lagcompanion', + 'laggauss', 'lagweight'] + +lagtrim = pu.trimcoef + + +def poly2lag(pol): + """ + poly2lag(pol) + + Convert a polynomial to a Laguerre series. + + Convert an array representing the coefficients of a polynomial (relative + to the "standard" basis) ordered from lowest degree to highest, to an + array of the coefficients of the equivalent Laguerre series, ordered + from lowest to highest degree. + + Parameters + ---------- + pol : array_like + 1-D array containing the polynomial coefficients + + Returns + ------- + c : ndarray + 1-D array containing the coefficients of the equivalent Laguerre + series. + + See Also + -------- + lag2poly + + Notes + ----- + The easy way to do conversions between polynomial basis sets + is to use the convert method of a class instance. + + Examples + -------- + >>> import numpy as np + >>> from numpy.polynomial.laguerre import poly2lag + >>> poly2lag(np.arange(4)) + array([ 23., -63., 58., -18.]) + + """ + [pol] = pu.as_series([pol]) + res = 0 + for p in pol[::-1]: + res = lagadd(lagmulx(res), p) + return res + + +def lag2poly(c): + """ + Convert a Laguerre series to a polynomial. + + Convert an array representing the coefficients of a Laguerre series, + ordered from lowest degree to highest, to an array of the coefficients + of the equivalent polynomial (relative to the "standard" basis) ordered + from lowest to highest degree. + + Parameters + ---------- + c : array_like + 1-D array containing the Laguerre series coefficients, ordered + from lowest order term to highest. + + Returns + ------- + pol : ndarray + 1-D array containing the coefficients of the equivalent polynomial + (relative to the "standard" basis) ordered from lowest order term + to highest. + + See Also + -------- + poly2lag + + Notes + ----- + The easy way to do conversions between polynomial basis sets + is to use the convert method of a class instance. + + Examples + -------- + >>> from numpy.polynomial.laguerre import lag2poly + >>> lag2poly([ 23., -63., 58., -18.]) + array([0., 1., 2., 3.]) + + """ + from .polynomial import polyadd, polymulx, polysub + + [c] = pu.as_series([c]) + n = len(c) + if n == 1: + return c + else: + c0 = c[-2] + c1 = c[-1] + # i is the current degree of c1 + for i in range(n - 1, 1, -1): + tmp = c0 + c0 = polysub(c[i - 2], (c1 * (i - 1)) / i) + c1 = polyadd(tmp, polysub((2 * i - 1) * c1, polymulx(c1)) / i) + return polyadd(c0, polysub(c1, polymulx(c1))) + + +# +# These are constant arrays are of integer type so as to be compatible +# with the widest range of other types, such as Decimal. +# + +# Laguerre +lagdomain = np.array([0., 1.]) + +# Laguerre coefficients representing zero. +lagzero = np.array([0]) + +# Laguerre coefficients representing one. +lagone = np.array([1]) + +# Laguerre coefficients representing the identity x. +lagx = np.array([1, -1]) + + +def lagline(off, scl): + """ + Laguerre series whose graph is a straight line. + + Parameters + ---------- + off, scl : scalars + The specified line is given by ``off + scl*x``. + + Returns + ------- + y : ndarray + This module's representation of the Laguerre series for + ``off + scl*x``. + + See Also + -------- + numpy.polynomial.polynomial.polyline + numpy.polynomial.chebyshev.chebline + numpy.polynomial.legendre.legline + numpy.polynomial.hermite.hermline + numpy.polynomial.hermite_e.hermeline + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagline, lagval + >>> lagval(0,lagline(3, 2)) + 3.0 + >>> lagval(1,lagline(3, 2)) + 5.0 + + """ + if scl != 0: + return np.array([off + scl, -scl]) + else: + return np.array([off]) + + +def lagfromroots(roots): + """ + Generate a Laguerre series with given roots. + + The function returns the coefficients of the polynomial + + .. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n), + + in Laguerre form, where the :math:`r_n` are the roots specified in `roots`. + If a zero has multiplicity n, then it must appear in `roots` n times. + For instance, if 2 is a root of multiplicity three and 3 is a root of + multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The + roots can appear in any order. + + If the returned coefficients are `c`, then + + .. math:: p(x) = c_0 + c_1 * L_1(x) + ... + c_n * L_n(x) + + The coefficient of the last term is not generally 1 for monic + polynomials in Laguerre form. + + Parameters + ---------- + roots : array_like + Sequence containing the roots. + + Returns + ------- + out : ndarray + 1-D array of coefficients. If all roots are real then `out` is a + real array, if some of the roots are complex, then `out` is complex + even if all the coefficients in the result are real (see Examples + below). + + See Also + -------- + numpy.polynomial.polynomial.polyfromroots + numpy.polynomial.legendre.legfromroots + numpy.polynomial.chebyshev.chebfromroots + numpy.polynomial.hermite.hermfromroots + numpy.polynomial.hermite_e.hermefromroots + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagfromroots, lagval + >>> coef = lagfromroots((-1, 0, 1)) + >>> lagval((-1, 0, 1), coef) + array([0., 0., 0.]) + >>> coef = lagfromroots((-1j, 1j)) + >>> lagval((-1j, 1j), coef) + array([0.+0.j, 0.+0.j]) + + """ + return pu._fromroots(lagline, lagmul, roots) + + +def lagadd(c1, c2): + """ + Add one Laguerre series to another. + + Returns the sum of two Laguerre series `c1` + `c2`. The arguments + are sequences of coefficients ordered from lowest order term to + highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Laguerre series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Array representing the Laguerre series of their sum. + + See Also + -------- + lagsub, lagmulx, lagmul, lagdiv, lagpow + + Notes + ----- + Unlike multiplication, division, etc., the sum of two Laguerre series + is a Laguerre series (without having to "reproject" the result onto + the basis set) so addition, just like that of "standard" polynomials, + is simply "component-wise." + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagadd + >>> lagadd([1, 2, 3], [1, 2, 3, 4]) + array([2., 4., 6., 4.]) + + """ + return pu._add(c1, c2) + + +def lagsub(c1, c2): + """ + Subtract one Laguerre series from another. + + Returns the difference of two Laguerre series `c1` - `c2`. The + sequences of coefficients are from lowest order term to highest, i.e., + [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Laguerre series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Of Laguerre series coefficients representing their difference. + + See Also + -------- + lagadd, lagmulx, lagmul, lagdiv, lagpow + + Notes + ----- + Unlike multiplication, division, etc., the difference of two Laguerre + series is a Laguerre series (without having to "reproject" the result + onto the basis set) so subtraction, just like that of "standard" + polynomials, is simply "component-wise." + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagsub + >>> lagsub([1, 2, 3, 4], [1, 2, 3]) + array([0., 0., 0., 4.]) + + """ + return pu._sub(c1, c2) + + +def lagmulx(c): + """Multiply a Laguerre series by x. + + Multiply the Laguerre series `c` by x, where x is the independent + variable. + + + Parameters + ---------- + c : array_like + 1-D array of Laguerre series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Array representing the result of the multiplication. + + See Also + -------- + lagadd, lagsub, lagmul, lagdiv, lagpow + + Notes + ----- + The multiplication uses the recursion relationship for Laguerre + polynomials in the form + + .. math:: + + xP_i(x) = (-(i + 1)*P_{i + 1}(x) + (2i + 1)P_{i}(x) - iP_{i - 1}(x)) + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagmulx + >>> lagmulx([1, 2, 3]) + array([-1., -1., 11., -9.]) + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + # The zero series needs special treatment + if len(c) == 1 and c[0] == 0: + return c + + prd = np.empty(len(c) + 1, dtype=c.dtype) + prd[0] = c[0] + prd[1] = -c[0] + for i in range(1, len(c)): + prd[i + 1] = -c[i] * (i + 1) + prd[i] += c[i] * (2 * i + 1) + prd[i - 1] -= c[i] * i + return prd + + +def lagmul(c1, c2): + """ + Multiply one Laguerre series by another. + + Returns the product of two Laguerre series `c1` * `c2`. The arguments + are sequences of coefficients, from lowest order "term" to highest, + e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Laguerre series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Of Laguerre series coefficients representing their product. + + See Also + -------- + lagadd, lagsub, lagmulx, lagdiv, lagpow + + Notes + ----- + In general, the (polynomial) product of two C-series results in terms + that are not in the Laguerre polynomial basis set. Thus, to express + the product as a Laguerre series, it is necessary to "reproject" the + product onto said basis set, which may produce "unintuitive" (but + correct) results; see Examples section below. + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagmul + >>> lagmul([1, 2, 3], [0, 1, 2]) + array([ 8., -13., 38., -51., 36.]) + + """ + # s1, s2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + + if len(c1) > len(c2): + c = c2 + xs = c1 + else: + c = c1 + xs = c2 + + if len(c) == 1: + c0 = c[0] * xs + c1 = 0 + elif len(c) == 2: + c0 = c[0] * xs + c1 = c[1] * xs + else: + nd = len(c) + c0 = c[-2] * xs + c1 = c[-1] * xs + for i in range(3, len(c) + 1): + tmp = c0 + nd = nd - 1 + c0 = lagsub(c[-i] * xs, (c1 * (nd - 1)) / nd) + c1 = lagadd(tmp, lagsub((2 * nd - 1) * c1, lagmulx(c1)) / nd) + return lagadd(c0, lagsub(c1, lagmulx(c1))) + + +def lagdiv(c1, c2): + """ + Divide one Laguerre series by another. + + Returns the quotient-with-remainder of two Laguerre series + `c1` / `c2`. The arguments are sequences of coefficients from lowest + order "term" to highest, e.g., [1,2,3] represents the series + ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Laguerre series coefficients ordered from low to + high. + + Returns + ------- + [quo, rem] : ndarrays + Of Laguerre series coefficients representing the quotient and + remainder. + + See Also + -------- + lagadd, lagsub, lagmulx, lagmul, lagpow + + Notes + ----- + In general, the (polynomial) division of one Laguerre series by another + results in quotient and remainder terms that are not in the Laguerre + polynomial basis set. Thus, to express these results as a Laguerre + series, it is necessary to "reproject" the results onto the Laguerre + basis set, which may produce "unintuitive" (but correct) results; see + Examples section below. + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagdiv + >>> lagdiv([ 8., -13., 38., -51., 36.], [0, 1, 2]) + (array([1., 2., 3.]), array([0.])) + >>> lagdiv([ 9., -12., 38., -51., 36.], [0, 1, 2]) + (array([1., 2., 3.]), array([1., 1.])) + + """ + return pu._div(lagmul, c1, c2) + + +def lagpow(c, pow, maxpower=16): + """Raise a Laguerre series to a power. + + Returns the Laguerre series `c` raised to the power `pow`. The + argument `c` is a sequence of coefficients ordered from low to high. + i.e., [1,2,3] is the series ``P_0 + 2*P_1 + 3*P_2.`` + + Parameters + ---------- + c : array_like + 1-D array of Laguerre series coefficients ordered from low to + high. + pow : integer + Power to which the series will be raised + maxpower : integer, optional + Maximum power allowed. This is mainly to limit growth of the series + to unmanageable size. Default is 16 + + Returns + ------- + coef : ndarray + Laguerre series of power. + + See Also + -------- + lagadd, lagsub, lagmulx, lagmul, lagdiv + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagpow + >>> lagpow([1, 2, 3], 2) + array([ 14., -16., 56., -72., 54.]) + + """ + return pu._pow(lagmul, c, pow, maxpower) + + +def lagder(c, m=1, scl=1, axis=0): + """ + Differentiate a Laguerre series. + + Returns the Laguerre series coefficients `c` differentiated `m` times + along `axis`. At each iteration the result is multiplied by `scl` (the + scaling factor is for use in a linear change of variable). The argument + `c` is an array of coefficients from low to high degree along each + axis, e.g., [1,2,3] represents the series ``1*L_0 + 2*L_1 + 3*L_2`` + while [[1,2],[1,2]] represents ``1*L_0(x)*L_0(y) + 1*L_1(x)*L_0(y) + + 2*L_0(x)*L_1(y) + 2*L_1(x)*L_1(y)`` if axis=0 is ``x`` and axis=1 is + ``y``. + + Parameters + ---------- + c : array_like + Array of Laguerre series coefficients. If `c` is multidimensional + the different axis correspond to different variables with the + degree in each axis given by the corresponding index. + m : int, optional + Number of derivatives taken, must be non-negative. (Default: 1) + scl : scalar, optional + Each differentiation is multiplied by `scl`. The end result is + multiplication by ``scl**m``. This is for use in a linear change of + variable. (Default: 1) + axis : int, optional + Axis over which the derivative is taken. (Default: 0). + + Returns + ------- + der : ndarray + Laguerre series of the derivative. + + See Also + -------- + lagint + + Notes + ----- + In general, the result of differentiating a Laguerre series does not + resemble the same operation on a power series. Thus the result of this + function may be "unintuitive," albeit correct; see Examples section + below. + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagder + >>> lagder([ 1., 1., 1., -3.]) + array([1., 2., 3.]) + >>> lagder([ 1., 0., 0., -4., 3.], m=2) + array([1., 2., 3.]) + + """ + c = np.array(c, ndmin=1, copy=True) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + + cnt = pu._as_int(m, "the order of derivation") + iaxis = pu._as_int(axis, "the axis") + if cnt < 0: + raise ValueError("The order of derivation must be non-negative") + iaxis = normalize_axis_index(iaxis, c.ndim) + + if cnt == 0: + return c + + c = np.moveaxis(c, iaxis, 0) + n = len(c) + if cnt >= n: + c = c[:1] * 0 + else: + for i in range(cnt): + n = n - 1 + c *= scl + der = np.empty((n,) + c.shape[1:], dtype=c.dtype) + for j in range(n, 1, -1): + der[j - 1] = -c[j] + c[j - 1] += c[j] + der[0] = -c[1] + c = der + c = np.moveaxis(c, 0, iaxis) + return c + + +def lagint(c, m=1, k=[], lbnd=0, scl=1, axis=0): + """ + Integrate a Laguerre series. + + Returns the Laguerre series coefficients `c` integrated `m` times from + `lbnd` along `axis`. At each iteration the resulting series is + **multiplied** by `scl` and an integration constant, `k`, is added. + The scaling factor is for use in a linear change of variable. ("Buyer + beware": note that, depending on what one is doing, one may want `scl` + to be the reciprocal of what one might expect; for more information, + see the Notes section below.) The argument `c` is an array of + coefficients from low to high degree along each axis, e.g., [1,2,3] + represents the series ``L_0 + 2*L_1 + 3*L_2`` while [[1,2],[1,2]] + represents ``1*L_0(x)*L_0(y) + 1*L_1(x)*L_0(y) + 2*L_0(x)*L_1(y) + + 2*L_1(x)*L_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``. + + + Parameters + ---------- + c : array_like + Array of Laguerre series coefficients. If `c` is multidimensional + the different axis correspond to different variables with the + degree in each axis given by the corresponding index. + m : int, optional + Order of integration, must be positive. (Default: 1) + k : {[], list, scalar}, optional + Integration constant(s). The value of the first integral at + ``lbnd`` is the first value in the list, the value of the second + integral at ``lbnd`` is the second value, etc. If ``k == []`` (the + default), all constants are set to zero. If ``m == 1``, a single + scalar can be given instead of a list. + lbnd : scalar, optional + The lower bound of the integral. (Default: 0) + scl : scalar, optional + Following each integration the result is *multiplied* by `scl` + before the integration constant is added. (Default: 1) + axis : int, optional + Axis over which the integral is taken. (Default: 0). + + Returns + ------- + S : ndarray + Laguerre series coefficients of the integral. + + Raises + ------ + ValueError + If ``m < 0``, ``len(k) > m``, ``np.ndim(lbnd) != 0``, or + ``np.ndim(scl) != 0``. + + See Also + -------- + lagder + + Notes + ----- + Note that the result of each integration is *multiplied* by `scl`. + Why is this important to note? Say one is making a linear change of + variable :math:`u = ax + b` in an integral relative to `x`. Then + :math:`dx = du/a`, so one will need to set `scl` equal to + :math:`1/a` - perhaps not what one would have first thought. + + Also note that, in general, the result of integrating a C-series needs + to be "reprojected" onto the C-series basis set. Thus, typically, + the result of this function is "unintuitive," albeit correct; see + Examples section below. + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagint + >>> lagint([1,2,3]) + array([ 1., 1., 1., -3.]) + >>> lagint([1,2,3], m=2) + array([ 1., 0., 0., -4., 3.]) + >>> lagint([1,2,3], k=1) + array([ 2., 1., 1., -3.]) + >>> lagint([1,2,3], lbnd=-1) + array([11.5, 1. , 1. , -3. ]) + >>> lagint([1,2], m=2, k=[1,2], lbnd=-1) + array([ 11.16666667, -5. , -3. , 2. ]) # may vary + + """ + c = np.array(c, ndmin=1, copy=True) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + if not np.iterable(k): + k = [k] + cnt = pu._as_int(m, "the order of integration") + iaxis = pu._as_int(axis, "the axis") + if cnt < 0: + raise ValueError("The order of integration must be non-negative") + if len(k) > cnt: + raise ValueError("Too many integration constants") + if np.ndim(lbnd) != 0: + raise ValueError("lbnd must be a scalar.") + if np.ndim(scl) != 0: + raise ValueError("scl must be a scalar.") + iaxis = normalize_axis_index(iaxis, c.ndim) + + if cnt == 0: + return c + + c = np.moveaxis(c, iaxis, 0) + k = list(k) + [0] * (cnt - len(k)) + for i in range(cnt): + n = len(c) + c *= scl + if n == 1 and np.all(c[0] == 0): + c[0] += k[i] + else: + tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype) + tmp[0] = c[0] + tmp[1] = -c[0] + for j in range(1, n): + tmp[j] += c[j] + tmp[j + 1] = -c[j] + tmp[0] += k[i] - lagval(lbnd, tmp) + c = tmp + c = np.moveaxis(c, 0, iaxis) + return c + + +def lagval(x, c, tensor=True): + """ + Evaluate a Laguerre series at points x. + + If `c` is of length ``n + 1``, this function returns the value: + + .. math:: p(x) = c_0 * L_0(x) + c_1 * L_1(x) + ... + c_n * L_n(x) + + The parameter `x` is converted to an array only if it is a tuple or a + list, otherwise it is treated as a scalar. In either case, either `x` + or its elements must support multiplication and addition both with + themselves and with the elements of `c`. + + If `c` is a 1-D array, then ``p(x)`` will have the same shape as `x`. If + `c` is multidimensional, then the shape of the result depends on the + value of `tensor`. If `tensor` is true the shape will be c.shape[1:] + + x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that + scalars have shape (,). + + Trailing zeros in the coefficients will be used in the evaluation, so + they should be avoided if efficiency is a concern. + + Parameters + ---------- + x : array_like, compatible object + If `x` is a list or tuple, it is converted to an ndarray, otherwise + it is left unchanged and treated as a scalar. In either case, `x` + or its elements must support addition and multiplication with + themselves and with the elements of `c`. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree n are contained in c[n]. If `c` is multidimensional the + remaining indices enumerate multiple polynomials. In the two + dimensional case the coefficients may be thought of as stored in + the columns of `c`. + tensor : boolean, optional + If True, the shape of the coefficient array is extended with ones + on the right, one for each dimension of `x`. Scalars have dimension 0 + for this action. The result is that every column of coefficients in + `c` is evaluated for every element of `x`. If False, `x` is broadcast + over the columns of `c` for the evaluation. This keyword is useful + when `c` is multidimensional. The default value is True. + + Returns + ------- + values : ndarray, algebra_like + The shape of the return value is described above. + + See Also + -------- + lagval2d, laggrid2d, lagval3d, laggrid3d + + Notes + ----- + The evaluation uses Clenshaw recursion, aka synthetic division. + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagval + >>> coef = [1, 2, 3] + >>> lagval(1, coef) + -0.5 + >>> lagval([[1, 2],[3, 4]], coef) + array([[-0.5, -4. ], + [-4.5, -2. ]]) + + """ + c = np.array(c, ndmin=1, copy=None) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + if isinstance(x, (tuple, list)): + x = np.asarray(x) + if isinstance(x, np.ndarray) and tensor: + c = c.reshape(c.shape + (1,) * x.ndim) + + if len(c) == 1: + c0 = c[0] + c1 = 0 + elif len(c) == 2: + c0 = c[0] + c1 = c[1] + else: + nd = len(c) + c0 = c[-2] + c1 = c[-1] + for i in range(3, len(c) + 1): + tmp = c0 + nd = nd - 1 + c0 = c[-i] - (c1 * (nd - 1)) / nd + c1 = tmp + (c1 * ((2 * nd - 1) - x)) / nd + return c0 + c1 * (1 - x) + + +def lagval2d(x, y, c): + """ + Evaluate a 2-D Laguerre series at points (x, y). + + This function returns the values: + + .. math:: p(x,y) = \\sum_{i,j} c_{i,j} * L_i(x) * L_j(y) + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars and they + must have the same shape after conversion. In either case, either `x` + and `y` or their elements must support multiplication and addition both + with themselves and with the elements of `c`. + + If `c` is a 1-D array a one is implicitly appended to its shape to make + it 2-D. The shape of the result will be c.shape[2:] + x.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points ``(x, y)``, + where `x` and `y` must have the same shape. If `x` or `y` is a list + or tuple, it is first converted to an ndarray, otherwise it is left + unchanged and if it isn't an ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term + of multi-degree i,j is contained in ``c[i,j]``. If `c` has + dimension greater than two the remaining indices enumerate multiple + sets of coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points formed with + pairs of corresponding values from `x` and `y`. + + See Also + -------- + lagval, laggrid2d, lagval3d, laggrid3d + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagval2d + >>> c = [[1, 2],[3, 4]] + >>> lagval2d(1, 1, c) + 1.0 + """ + return pu._valnd(lagval, c, x, y) + + +def laggrid2d(x, y, c): + """ + Evaluate a 2-D Laguerre series on the Cartesian product of x and y. + + This function returns the values: + + .. math:: p(a,b) = \\sum_{i,j} c_{i,j} * L_i(a) * L_j(b) + + where the points ``(a, b)`` consist of all pairs formed by taking + `a` from `x` and `b` from `y`. The resulting points form a grid with + `x` in the first dimension and `y` in the second. + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars. In either + case, either `x` and `y` or their elements must support multiplication + and addition both with themselves and with the elements of `c`. + + If `c` has fewer than two dimensions, ones are implicitly appended to + its shape to make it 2-D. The shape of the result will be c.shape[2:] + + x.shape + y.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points in the + Cartesian product of `x` and `y`. If `x` or `y` is a list or + tuple, it is first converted to an ndarray, otherwise it is left + unchanged and, if it isn't an ndarray, it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term of + multi-degree i,j is contained in ``c[i,j]``. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional Chebyshev series at points in the + Cartesian product of `x` and `y`. + + See Also + -------- + lagval, lagval2d, lagval3d, laggrid3d + + Examples + -------- + >>> from numpy.polynomial.laguerre import laggrid2d + >>> c = [[1, 2], [3, 4]] + >>> laggrid2d([0, 1], [0, 1], c) + array([[10., 4.], + [ 3., 1.]]) + + """ + return pu._gridnd(lagval, c, x, y) + + +def lagval3d(x, y, z, c): + """ + Evaluate a 3-D Laguerre series at points (x, y, z). + + This function returns the values: + + .. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * L_i(x) * L_j(y) * L_k(z) + + The parameters `x`, `y`, and `z` are converted to arrays only if + they are tuples or a lists, otherwise they are treated as a scalars and + they must have the same shape after conversion. In either case, either + `x`, `y`, and `z` or their elements must support multiplication and + addition both with themselves and with the elements of `c`. + + If `c` has fewer than 3 dimensions, ones are implicitly appended to its + shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape. + + Parameters + ---------- + x, y, z : array_like, compatible object + The three dimensional series is evaluated at the points + ``(x, y, z)``, where `x`, `y`, and `z` must have the same shape. If + any of `x`, `y`, or `z` is a list or tuple, it is first converted + to an ndarray, otherwise it is left unchanged and if it isn't an + ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term of + multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension + greater than 3 the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the multidimensional polynomial on points formed with + triples of corresponding values from `x`, `y`, and `z`. + + See Also + -------- + lagval, lagval2d, laggrid2d, laggrid3d + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagval3d + >>> c = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]] + >>> lagval3d(1, 1, 2, c) + -1.0 + + """ + return pu._valnd(lagval, c, x, y, z) + + +def laggrid3d(x, y, z, c): + """ + Evaluate a 3-D Laguerre series on the Cartesian product of x, y, and z. + + This function returns the values: + + .. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * L_i(a) * L_j(b) * L_k(c) + + where the points ``(a, b, c)`` consist of all triples formed by taking + `a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form + a grid with `x` in the first dimension, `y` in the second, and `z` in + the third. + + The parameters `x`, `y`, and `z` are converted to arrays only if they + are tuples or a lists, otherwise they are treated as a scalars. In + either case, either `x`, `y`, and `z` or their elements must support + multiplication and addition both with themselves and with the elements + of `c`. + + If `c` has fewer than three dimensions, ones are implicitly appended to + its shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape + y.shape + z.shape. + + Parameters + ---------- + x, y, z : array_like, compatible objects + The three dimensional series is evaluated at the points in the + Cartesian product of `x`, `y`, and `z`. If `x`, `y`, or `z` is a + list or tuple, it is first converted to an ndarray, otherwise it is + left unchanged and, if it isn't an ndarray, it is treated as a + scalar. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree i,j are contained in ``c[i,j]``. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points in the Cartesian + product of `x` and `y`. + + See Also + -------- + lagval, lagval2d, laggrid2d, lagval3d + + Examples + -------- + >>> from numpy.polynomial.laguerre import laggrid3d + >>> c = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]] + >>> laggrid3d([0, 1], [0, 1], [2, 4], c) + array([[[ -4., -44.], + [ -2., -18.]], + [[ -2., -14.], + [ -1., -5.]]]) + + """ + return pu._gridnd(lagval, c, x, y, z) + + +def lagvander(x, deg): + """Pseudo-Vandermonde matrix of given degree. + + Returns the pseudo-Vandermonde matrix of degree `deg` and sample points + `x`. The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., i] = L_i(x) + + where ``0 <= i <= deg``. The leading indices of `V` index the elements of + `x` and the last index is the degree of the Laguerre polynomial. + + If `c` is a 1-D array of coefficients of length ``n + 1`` and `V` is the + array ``V = lagvander(x, n)``, then ``np.dot(V, c)`` and + ``lagval(x, c)`` are the same up to roundoff. This equivalence is + useful both for least squares fitting and for the evaluation of a large + number of Laguerre series of the same degree and sample points. + + Parameters + ---------- + x : array_like + Array of points. The dtype is converted to float64 or complex128 + depending on whether any of the elements are complex. If `x` is + scalar it is converted to a 1-D array. + deg : int + Degree of the resulting matrix. + + Returns + ------- + vander : ndarray + The pseudo-Vandermonde matrix. The shape of the returned matrix is + ``x.shape + (deg + 1,)``, where The last index is the degree of the + corresponding Laguerre polynomial. The dtype will be the same as + the converted `x`. + + Examples + -------- + >>> import numpy as np + >>> from numpy.polynomial.laguerre import lagvander + >>> x = np.array([0, 1, 2]) + >>> lagvander(x, 3) + array([[ 1. , 1. , 1. , 1. ], + [ 1. , 0. , -0.5 , -0.66666667], + [ 1. , -1. , -1. , -0.33333333]]) + + """ + ideg = pu._as_int(deg, "deg") + if ideg < 0: + raise ValueError("deg must be non-negative") + + x = np.array(x, copy=None, ndmin=1) + 0.0 + dims = (ideg + 1,) + x.shape + dtyp = x.dtype + v = np.empty(dims, dtype=dtyp) + v[0] = x * 0 + 1 + if ideg > 0: + v[1] = 1 - x + for i in range(2, ideg + 1): + v[i] = (v[i - 1] * (2 * i - 1 - x) - v[i - 2] * (i - 1)) / i + return np.moveaxis(v, 0, -1) + + +def lagvander2d(x, y, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points ``(x, y)``. The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., (deg[1] + 1)*i + j] = L_i(x) * L_j(y), + + where ``0 <= i <= deg[0]`` and ``0 <= j <= deg[1]``. The leading indices of + `V` index the points ``(x, y)`` and the last index encodes the degrees of + the Laguerre polynomials. + + If ``V = lagvander2d(x, y, [xdeg, ydeg])``, then the columns of `V` + correspond to the elements of a 2-D coefficient array `c` of shape + (xdeg + 1, ydeg + 1) in the order + + .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ... + + and ``np.dot(V, c.flat)`` and ``lagval2d(x, y, c)`` will be the same + up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 2-D Laguerre + series of the same degrees and sample points. + + Parameters + ---------- + x, y : array_like + Arrays of point coordinates, all of the same shape. The dtypes + will be converted to either float64 or complex128 depending on + whether any of the elements are complex. Scalars are converted to + 1-D arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg]. + + Returns + ------- + vander2d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg[1]+1)`. The dtype will be the same + as the converted `x` and `y`. + + See Also + -------- + lagvander, lagvander3d, lagval2d, lagval3d + + Examples + -------- + >>> import numpy as np + >>> from numpy.polynomial.laguerre import lagvander2d + >>> x = np.array([0]) + >>> y = np.array([2]) + >>> lagvander2d(x, y, [2, 1]) + array([[ 1., -1., 1., -1., 1., -1.]]) + + """ + return pu._vander_nd_flat((lagvander, lagvander), (x, y), deg) + + +def lagvander3d(x, y, z, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points ``(x, y, z)``. If `l`, `m`, `n` are the given degrees in `x`, `y`, `z`, + then The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = L_i(x)*L_j(y)*L_k(z), + + where ``0 <= i <= l``, ``0 <= j <= m``, and ``0 <= j <= n``. The leading + indices of `V` index the points ``(x, y, z)`` and the last index encodes + the degrees of the Laguerre polynomials. + + If ``V = lagvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns + of `V` correspond to the elements of a 3-D coefficient array `c` of + shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order + + .. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},... + + and ``np.dot(V, c.flat)`` and ``lagval3d(x, y, z, c)`` will be the + same up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 3-D Laguerre + series of the same degrees and sample points. + + Parameters + ---------- + x, y, z : array_like + Arrays of point coordinates, all of the same shape. The dtypes will + be converted to either float64 or complex128 depending on whether + any of the elements are complex. Scalars are converted to 1-D + arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg, z_deg]. + + Returns + ------- + vander3d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg[1]+1)*(deg[2]+1)`. The dtype will + be the same as the converted `x`, `y`, and `z`. + + See Also + -------- + lagvander, lagvander3d, lagval2d, lagval3d + + Examples + -------- + >>> import numpy as np + >>> from numpy.polynomial.laguerre import lagvander3d + >>> x = np.array([0]) + >>> y = np.array([2]) + >>> z = np.array([0]) + >>> lagvander3d(x, y, z, [2, 1, 3]) + array([[ 1., 1., 1., 1., -1., -1., -1., -1., 1., 1., 1., 1., -1., + -1., -1., -1., 1., 1., 1., 1., -1., -1., -1., -1.]]) + + """ + return pu._vander_nd_flat((lagvander, lagvander, lagvander), (x, y, z), deg) + + +def lagfit(x, y, deg, rcond=None, full=False, w=None): + """ + Least squares fit of Laguerre series to data. + + Return the coefficients of a Laguerre series of degree `deg` that is the + least squares fit to the data values `y` given at points `x`. If `y` is + 1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple + fits are done, one for each column of `y`, and the resulting + coefficients are stored in the corresponding columns of a 2-D return. + The fitted polynomial(s) are in the form + + .. math:: p(x) = c_0 + c_1 * L_1(x) + ... + c_n * L_n(x), + + where ``n`` is `deg`. + + Parameters + ---------- + x : array_like, shape (M,) + x-coordinates of the M sample points ``(x[i], y[i])``. + y : array_like, shape (M,) or (M, K) + y-coordinates of the sample points. Several data sets of sample + points sharing the same x-coordinates can be fitted at once by + passing in a 2D-array that contains one dataset per column. + deg : int or 1-D array_like + Degree(s) of the fitting polynomials. If `deg` is a single integer + all terms up to and including the `deg`'th term are included in the + fit. For NumPy versions >= 1.11.0 a list of integers specifying the + degrees of the terms to include may be used instead. + rcond : float, optional + Relative condition number of the fit. Singular values smaller than + this relative to the largest singular value will be ignored. The + default value is len(x)*eps, where eps is the relative precision of + the float type, about 2e-16 in most cases. + full : bool, optional + Switch determining nature of return value. When it is False (the + default) just the coefficients are returned, when True diagnostic + information from the singular value decomposition is also returned. + w : array_like, shape (`M`,), optional + Weights. If not None, the weight ``w[i]`` applies to the unsquared + residual ``y[i] - y_hat[i]`` at ``x[i]``. Ideally the weights are + chosen so that the errors of the products ``w[i]*y[i]`` all have the + same variance. When using inverse-variance weighting, use + ``w[i] = 1/sigma(y[i])``. The default value is None. + + Returns + ------- + coef : ndarray, shape (M,) or (M, K) + Laguerre coefficients ordered from low to high. If `y` was 2-D, + the coefficients for the data in column *k* of `y` are in column + *k*. + + [residuals, rank, singular_values, rcond] : list + These values are only returned if ``full == True`` + + - residuals -- sum of squared residuals of the least squares fit + - rank -- the numerical rank of the scaled Vandermonde matrix + - singular_values -- singular values of the scaled Vandermonde matrix + - rcond -- value of `rcond`. + + For more details, see `numpy.linalg.lstsq`. + + Warns + ----- + RankWarning + The rank of the coefficient matrix in the least-squares fit is + deficient. The warning is only raised if ``full == False``. The + warnings can be turned off by + + >>> import warnings + >>> warnings.simplefilter('ignore', np.exceptions.RankWarning) + + See Also + -------- + numpy.polynomial.polynomial.polyfit + numpy.polynomial.legendre.legfit + numpy.polynomial.chebyshev.chebfit + numpy.polynomial.hermite.hermfit + numpy.polynomial.hermite_e.hermefit + lagval : Evaluates a Laguerre series. + lagvander : pseudo Vandermonde matrix of Laguerre series. + lagweight : Laguerre weight function. + numpy.linalg.lstsq : Computes a least-squares fit from the matrix. + scipy.interpolate.UnivariateSpline : Computes spline fits. + + Notes + ----- + The solution is the coefficients of the Laguerre series ``p`` that + minimizes the sum of the weighted squared errors + + .. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2, + + where the :math:`w_j` are the weights. This problem is solved by + setting up as the (typically) overdetermined matrix equation + + .. math:: V(x) * c = w * y, + + where ``V`` is the weighted pseudo Vandermonde matrix of `x`, ``c`` are the + coefficients to be solved for, `w` are the weights, and `y` are the + observed values. This equation is then solved using the singular value + decomposition of ``V``. + + If some of the singular values of `V` are so small that they are + neglected, then a `~exceptions.RankWarning` will be issued. This means that + the coefficient values may be poorly determined. Using a lower order fit + will usually get rid of the warning. The `rcond` parameter can also be + set to a value smaller than its default, but the resulting fit may be + spurious and have large contributions from roundoff error. + + Fits using Laguerre series are probably most useful when the data can + be approximated by ``sqrt(w(x)) * p(x)``, where ``w(x)`` is the Laguerre + weight. In that case the weight ``sqrt(w(x[i]))`` should be used + together with data values ``y[i]/sqrt(w(x[i]))``. The weight function is + available as `lagweight`. + + References + ---------- + .. [1] Wikipedia, "Curve fitting", + https://en.wikipedia.org/wiki/Curve_fitting + + Examples + -------- + >>> import numpy as np + >>> from numpy.polynomial.laguerre import lagfit, lagval + >>> x = np.linspace(0, 10) + >>> rng = np.random.default_rng() + >>> err = rng.normal(scale=1./10, size=len(x)) + >>> y = lagval(x, [1, 2, 3]) + err + >>> lagfit(x, y, 2) + array([1.00578369, 1.99417356, 2.99827656]) # may vary + + """ + return pu._fit(lagvander, x, y, deg, rcond, full, w) + + +def lagcompanion(c): + """ + Return the companion matrix of c. + + The usual companion matrix of the Laguerre polynomials is already + symmetric when `c` is a basis Laguerre polynomial, so no scaling is + applied. + + Parameters + ---------- + c : array_like + 1-D array of Laguerre series coefficients ordered from low to high + degree. + + Returns + ------- + mat : ndarray + Companion matrix of dimensions (deg, deg). + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagcompanion + >>> lagcompanion([1, 2, 3]) + array([[ 1. , -0.33333333], + [-1. , 4.33333333]]) + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) < 2: + raise ValueError('Series must have maximum degree of at least 1.') + if len(c) == 2: + return np.array([[1 + c[0] / c[1]]]) + + n = len(c) - 1 + mat = np.zeros((n, n), dtype=c.dtype) + top = mat.reshape(-1)[1::n + 1] + mid = mat.reshape(-1)[0::n + 1] + bot = mat.reshape(-1)[n::n + 1] + top[...] = -np.arange(1, n) + mid[...] = 2. * np.arange(n) + 1. + bot[...] = top + mat[:, -1] += (c[:-1] / c[-1]) * n + return mat + + +def lagroots(c): + """ + Compute the roots of a Laguerre series. + + Return the roots (a.k.a. "zeros") of the polynomial + + .. math:: p(x) = \\sum_i c[i] * L_i(x). + + Parameters + ---------- + c : 1-D array_like + 1-D array of coefficients. + + Returns + ------- + out : ndarray + Array of the roots of the series. If all the roots are real, + then `out` is also real, otherwise it is complex. + + See Also + -------- + numpy.polynomial.polynomial.polyroots + numpy.polynomial.legendre.legroots + numpy.polynomial.chebyshev.chebroots + numpy.polynomial.hermite.hermroots + numpy.polynomial.hermite_e.hermeroots + + Notes + ----- + The root estimates are obtained as the eigenvalues of the companion + matrix, Roots far from the origin of the complex plane may have large + errors due to the numerical instability of the series for such + values. Roots with multiplicity greater than 1 will also show larger + errors as the value of the series near such points is relatively + insensitive to errors in the roots. Isolated roots near the origin can + be improved by a few iterations of Newton's method. + + The Laguerre series basis polynomials aren't powers of `x` so the + results of this function may seem unintuitive. + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagroots, lagfromroots + >>> coef = lagfromroots([0, 1, 2]) + >>> coef + array([ 2., -8., 12., -6.]) + >>> lagroots(coef) + array([-4.4408921e-16, 1.0000000e+00, 2.0000000e+00]) + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) <= 1: + return np.array([], dtype=c.dtype) + if len(c) == 2: + return np.array([1 + c[0] / c[1]]) + + # rotated companion matrix reduces error + m = lagcompanion(c)[::-1, ::-1] + r = la.eigvals(m) + r.sort() + return r + + +def laggauss(deg): + """ + Gauss-Laguerre quadrature. + + Computes the sample points and weights for Gauss-Laguerre quadrature. + These sample points and weights will correctly integrate polynomials of + degree :math:`2*deg - 1` or less over the interval :math:`[0, \\inf]` + with the weight function :math:`f(x) = \\exp(-x)`. + + Parameters + ---------- + deg : int + Number of sample points and weights. It must be >= 1. + + Returns + ------- + x : ndarray + 1-D ndarray containing the sample points. + y : ndarray + 1-D ndarray containing the weights. + + Notes + ----- + The results have only been tested up to degree 100 higher degrees may + be problematic. The weights are determined by using the fact that + + .. math:: w_k = c / (L'_n(x_k) * L_{n-1}(x_k)) + + where :math:`c` is a constant independent of :math:`k` and :math:`x_k` + is the k'th root of :math:`L_n`, and then scaling the results to get + the right value when integrating 1. + + Examples + -------- + >>> from numpy.polynomial.laguerre import laggauss + >>> laggauss(2) + (array([0.58578644, 3.41421356]), array([0.85355339, 0.14644661])) + + """ + ideg = pu._as_int(deg, "deg") + if ideg <= 0: + raise ValueError("deg must be a positive integer") + + # first approximation of roots. We use the fact that the companion + # matrix is symmetric in this case in order to obtain better zeros. + c = np.array([0] * deg + [1]) + m = lagcompanion(c) + x = la.eigvalsh(m) + + # improve roots by one application of Newton + dy = lagval(x, c) + df = lagval(x, lagder(c)) + x -= dy / df + + # compute the weights. We scale the factor to avoid possible numerical + # overflow. + fm = lagval(x, c[1:]) + fm /= np.abs(fm).max() + df /= np.abs(df).max() + w = 1 / (fm * df) + + # scale w to get the right value, 1 in this case + w /= w.sum() + + return x, w + + +def lagweight(x): + """Weight function of the Laguerre polynomials. + + The weight function is :math:`exp(-x)` and the interval of integration + is :math:`[0, \\inf]`. The Laguerre polynomials are orthogonal, but not + normalized, with respect to this weight function. + + Parameters + ---------- + x : array_like + Values at which the weight function will be computed. + + Returns + ------- + w : ndarray + The weight function at `x`. + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagweight + >>> x = np.array([0, 1, 2]) + >>> lagweight(x) + array([1. , 0.36787944, 0.13533528]) + + """ + w = np.exp(-x) + return w + +# +# Laguerre series class +# + +class Laguerre(ABCPolyBase): + """A Laguerre series class. + + The Laguerre class provides the standard Python numerical methods + '+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the + attributes and methods listed below. + + Parameters + ---------- + coef : array_like + Laguerre coefficients in order of increasing degree, i.e, + ``(1, 2, 3)`` gives ``1*L_0(x) + 2*L_1(X) + 3*L_2(x)``. + domain : (2,) array_like, optional + Domain to use. The interval ``[domain[0], domain[1]]`` is mapped + to the interval ``[window[0], window[1]]`` by shifting and scaling. + The default value is [0., 1.]. + window : (2,) array_like, optional + Window, see `domain` for its use. The default value is [0., 1.]. + symbol : str, optional + Symbol used to represent the independent variable in string + representations of the polynomial expression, e.g. for printing. + The symbol must be a valid Python identifier. Default value is 'x'. + + .. versionadded:: 1.24 + + """ + # Virtual Functions + _add = staticmethod(lagadd) + _sub = staticmethod(lagsub) + _mul = staticmethod(lagmul) + _div = staticmethod(lagdiv) + _pow = staticmethod(lagpow) + _val = staticmethod(lagval) + _int = staticmethod(lagint) + _der = staticmethod(lagder) + _fit = staticmethod(lagfit) + _line = staticmethod(lagline) + _roots = staticmethod(lagroots) + _fromroots = staticmethod(lagfromroots) + + # Virtual properties + domain = np.array(lagdomain) + window = np.array(lagdomain) + basis_name = 'L' diff --git a/.venv/lib/python3.12/site-packages/numpy/polynomial/laguerre.pyi b/.venv/lib/python3.12/site-packages/numpy/polynomial/laguerre.pyi new file mode 100644 index 0000000..6f67257 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/polynomial/laguerre.pyi @@ -0,0 +1,100 @@ +from typing import Final +from typing import Literal as L + +import numpy as np + +from ._polybase import ABCPolyBase +from ._polytypes import ( + _Array1, + _Array2, + _FuncBinOp, + _FuncCompanion, + _FuncDer, + _FuncFit, + _FuncFromRoots, + _FuncGauss, + _FuncInteg, + _FuncLine, + _FuncPoly2Ortho, + _FuncPow, + _FuncRoots, + _FuncUnOp, + _FuncVal, + _FuncVal2D, + _FuncVal3D, + _FuncValFromRoots, + _FuncVander, + _FuncVander2D, + _FuncVander3D, + _FuncWeight, +) +from .polyutils import trimcoef as lagtrim + +__all__ = [ + "lagzero", + "lagone", + "lagx", + "lagdomain", + "lagline", + "lagadd", + "lagsub", + "lagmulx", + "lagmul", + "lagdiv", + "lagpow", + "lagval", + "lagder", + "lagint", + "lag2poly", + "poly2lag", + "lagfromroots", + "lagvander", + "lagfit", + "lagtrim", + "lagroots", + "Laguerre", + "lagval2d", + "lagval3d", + "laggrid2d", + "laggrid3d", + "lagvander2d", + "lagvander3d", + "lagcompanion", + "laggauss", + "lagweight", +] + +poly2lag: _FuncPoly2Ortho[L["poly2lag"]] +lag2poly: _FuncUnOp[L["lag2poly"]] + +lagdomain: Final[_Array2[np.float64]] +lagzero: Final[_Array1[np.int_]] +lagone: Final[_Array1[np.int_]] +lagx: Final[_Array2[np.int_]] + +lagline: _FuncLine[L["lagline"]] +lagfromroots: _FuncFromRoots[L["lagfromroots"]] +lagadd: _FuncBinOp[L["lagadd"]] +lagsub: _FuncBinOp[L["lagsub"]] +lagmulx: _FuncUnOp[L["lagmulx"]] +lagmul: _FuncBinOp[L["lagmul"]] +lagdiv: _FuncBinOp[L["lagdiv"]] +lagpow: _FuncPow[L["lagpow"]] +lagder: _FuncDer[L["lagder"]] +lagint: _FuncInteg[L["lagint"]] +lagval: _FuncVal[L["lagval"]] +lagval2d: _FuncVal2D[L["lagval2d"]] +lagval3d: _FuncVal3D[L["lagval3d"]] +lagvalfromroots: _FuncValFromRoots[L["lagvalfromroots"]] +laggrid2d: _FuncVal2D[L["laggrid2d"]] +laggrid3d: _FuncVal3D[L["laggrid3d"]] +lagvander: _FuncVander[L["lagvander"]] +lagvander2d: _FuncVander2D[L["lagvander2d"]] +lagvander3d: _FuncVander3D[L["lagvander3d"]] +lagfit: _FuncFit[L["lagfit"]] +lagcompanion: _FuncCompanion[L["lagcompanion"]] +lagroots: _FuncRoots[L["lagroots"]] +laggauss: _FuncGauss[L["laggauss"]] +lagweight: _FuncWeight[L["lagweight"]] + +class Laguerre(ABCPolyBase[L["L"]]): ... diff --git a/.venv/lib/python3.12/site-packages/numpy/polynomial/legendre.py b/.venv/lib/python3.12/site-packages/numpy/polynomial/legendre.py new file mode 100644 index 0000000..b43bdfa --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/polynomial/legendre.py @@ -0,0 +1,1605 @@ +""" +================================================== +Legendre Series (:mod:`numpy.polynomial.legendre`) +================================================== + +This module provides a number of objects (mostly functions) useful for +dealing with Legendre series, including a `Legendre` class that +encapsulates the usual arithmetic operations. (General information +on how this module represents and works with such polynomials is in the +docstring for its "parent" sub-package, `numpy.polynomial`). + +Classes +------- +.. autosummary:: + :toctree: generated/ + + Legendre + +Constants +--------- + +.. autosummary:: + :toctree: generated/ + + legdomain + legzero + legone + legx + +Arithmetic +---------- + +.. autosummary:: + :toctree: generated/ + + legadd + legsub + legmulx + legmul + legdiv + legpow + legval + legval2d + legval3d + leggrid2d + leggrid3d + +Calculus +-------- + +.. autosummary:: + :toctree: generated/ + + legder + legint + +Misc Functions +-------------- + +.. autosummary:: + :toctree: generated/ + + legfromroots + legroots + legvander + legvander2d + legvander3d + leggauss + legweight + legcompanion + legfit + legtrim + legline + leg2poly + poly2leg + +See also +-------- +numpy.polynomial + +""" +import numpy as np +import numpy.linalg as la +from numpy.lib.array_utils import normalize_axis_index + +from . import polyutils as pu +from ._polybase import ABCPolyBase + +__all__ = [ + 'legzero', 'legone', 'legx', 'legdomain', 'legline', 'legadd', + 'legsub', 'legmulx', 'legmul', 'legdiv', 'legpow', 'legval', 'legder', + 'legint', 'leg2poly', 'poly2leg', 'legfromroots', 'legvander', + 'legfit', 'legtrim', 'legroots', 'Legendre', 'legval2d', 'legval3d', + 'leggrid2d', 'leggrid3d', 'legvander2d', 'legvander3d', 'legcompanion', + 'leggauss', 'legweight'] + +legtrim = pu.trimcoef + + +def poly2leg(pol): + """ + Convert a polynomial to a Legendre series. + + Convert an array representing the coefficients of a polynomial (relative + to the "standard" basis) ordered from lowest degree to highest, to an + array of the coefficients of the equivalent Legendre series, ordered + from lowest to highest degree. + + Parameters + ---------- + pol : array_like + 1-D array containing the polynomial coefficients + + Returns + ------- + c : ndarray + 1-D array containing the coefficients of the equivalent Legendre + series. + + See Also + -------- + leg2poly + + Notes + ----- + The easy way to do conversions between polynomial basis sets + is to use the convert method of a class instance. + + Examples + -------- + >>> import numpy as np + >>> from numpy import polynomial as P + >>> p = P.Polynomial(np.arange(4)) + >>> p + Polynomial([0., 1., 2., 3.], domain=[-1., 1.], window=[-1., 1.], ... + >>> c = P.Legendre(P.legendre.poly2leg(p.coef)) + >>> c + Legendre([ 1. , 3.25, 1. , 0.75], domain=[-1, 1], window=[-1, 1]) # may vary + + """ + [pol] = pu.as_series([pol]) + deg = len(pol) - 1 + res = 0 + for i in range(deg, -1, -1): + res = legadd(legmulx(res), pol[i]) + return res + + +def leg2poly(c): + """ + Convert a Legendre series to a polynomial. + + Convert an array representing the coefficients of a Legendre series, + ordered from lowest degree to highest, to an array of the coefficients + of the equivalent polynomial (relative to the "standard" basis) ordered + from lowest to highest degree. + + Parameters + ---------- + c : array_like + 1-D array containing the Legendre series coefficients, ordered + from lowest order term to highest. + + Returns + ------- + pol : ndarray + 1-D array containing the coefficients of the equivalent polynomial + (relative to the "standard" basis) ordered from lowest order term + to highest. + + See Also + -------- + poly2leg + + Notes + ----- + The easy way to do conversions between polynomial basis sets + is to use the convert method of a class instance. + + Examples + -------- + >>> from numpy import polynomial as P + >>> c = P.Legendre(range(4)) + >>> c + Legendre([0., 1., 2., 3.], domain=[-1., 1.], window=[-1., 1.], symbol='x') + >>> p = c.convert(kind=P.Polynomial) + >>> p + Polynomial([-1. , -3.5, 3. , 7.5], domain=[-1., 1.], window=[-1., ... + >>> P.legendre.leg2poly(range(4)) + array([-1. , -3.5, 3. , 7.5]) + + + """ + from .polynomial import polyadd, polymulx, polysub + + [c] = pu.as_series([c]) + n = len(c) + if n < 3: + return c + else: + c0 = c[-2] + c1 = c[-1] + # i is the current degree of c1 + for i in range(n - 1, 1, -1): + tmp = c0 + c0 = polysub(c[i - 2], (c1 * (i - 1)) / i) + c1 = polyadd(tmp, (polymulx(c1) * (2 * i - 1)) / i) + return polyadd(c0, polymulx(c1)) + + +# +# These are constant arrays are of integer type so as to be compatible +# with the widest range of other types, such as Decimal. +# + +# Legendre +legdomain = np.array([-1., 1.]) + +# Legendre coefficients representing zero. +legzero = np.array([0]) + +# Legendre coefficients representing one. +legone = np.array([1]) + +# Legendre coefficients representing the identity x. +legx = np.array([0, 1]) + + +def legline(off, scl): + """ + Legendre series whose graph is a straight line. + + + + Parameters + ---------- + off, scl : scalars + The specified line is given by ``off + scl*x``. + + Returns + ------- + y : ndarray + This module's representation of the Legendre series for + ``off + scl*x``. + + See Also + -------- + numpy.polynomial.polynomial.polyline + numpy.polynomial.chebyshev.chebline + numpy.polynomial.laguerre.lagline + numpy.polynomial.hermite.hermline + numpy.polynomial.hermite_e.hermeline + + Examples + -------- + >>> import numpy.polynomial.legendre as L + >>> L.legline(3,2) + array([3, 2]) + >>> L.legval(-3, L.legline(3,2)) # should be -3 + -3.0 + + """ + if scl != 0: + return np.array([off, scl]) + else: + return np.array([off]) + + +def legfromroots(roots): + """ + Generate a Legendre series with given roots. + + The function returns the coefficients of the polynomial + + .. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n), + + in Legendre form, where the :math:`r_n` are the roots specified in `roots`. + If a zero has multiplicity n, then it must appear in `roots` n times. + For instance, if 2 is a root of multiplicity three and 3 is a root of + multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The + roots can appear in any order. + + If the returned coefficients are `c`, then + + .. math:: p(x) = c_0 + c_1 * L_1(x) + ... + c_n * L_n(x) + + The coefficient of the last term is not generally 1 for monic + polynomials in Legendre form. + + Parameters + ---------- + roots : array_like + Sequence containing the roots. + + Returns + ------- + out : ndarray + 1-D array of coefficients. If all roots are real then `out` is a + real array, if some of the roots are complex, then `out` is complex + even if all the coefficients in the result are real (see Examples + below). + + See Also + -------- + numpy.polynomial.polynomial.polyfromroots + numpy.polynomial.chebyshev.chebfromroots + numpy.polynomial.laguerre.lagfromroots + numpy.polynomial.hermite.hermfromroots + numpy.polynomial.hermite_e.hermefromroots + + Examples + -------- + >>> import numpy.polynomial.legendre as L + >>> L.legfromroots((-1,0,1)) # x^3 - x relative to the standard basis + array([ 0. , -0.4, 0. , 0.4]) + >>> j = complex(0,1) + >>> L.legfromroots((-j,j)) # x^2 + 1 relative to the standard basis + array([ 1.33333333+0.j, 0.00000000+0.j, 0.66666667+0.j]) # may vary + + """ + return pu._fromroots(legline, legmul, roots) + + +def legadd(c1, c2): + """ + Add one Legendre series to another. + + Returns the sum of two Legendre series `c1` + `c2`. The arguments + are sequences of coefficients ordered from lowest order term to + highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Legendre series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Array representing the Legendre series of their sum. + + See Also + -------- + legsub, legmulx, legmul, legdiv, legpow + + Notes + ----- + Unlike multiplication, division, etc., the sum of two Legendre series + is a Legendre series (without having to "reproject" the result onto + the basis set) so addition, just like that of "standard" polynomials, + is simply "component-wise." + + Examples + -------- + >>> from numpy.polynomial import legendre as L + >>> c1 = (1,2,3) + >>> c2 = (3,2,1) + >>> L.legadd(c1,c2) + array([4., 4., 4.]) + + """ + return pu._add(c1, c2) + + +def legsub(c1, c2): + """ + Subtract one Legendre series from another. + + Returns the difference of two Legendre series `c1` - `c2`. The + sequences of coefficients are from lowest order term to highest, i.e., + [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Legendre series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Of Legendre series coefficients representing their difference. + + See Also + -------- + legadd, legmulx, legmul, legdiv, legpow + + Notes + ----- + Unlike multiplication, division, etc., the difference of two Legendre + series is a Legendre series (without having to "reproject" the result + onto the basis set) so subtraction, just like that of "standard" + polynomials, is simply "component-wise." + + Examples + -------- + >>> from numpy.polynomial import legendre as L + >>> c1 = (1,2,3) + >>> c2 = (3,2,1) + >>> L.legsub(c1,c2) + array([-2., 0., 2.]) + >>> L.legsub(c2,c1) # -C.legsub(c1,c2) + array([ 2., 0., -2.]) + + """ + return pu._sub(c1, c2) + + +def legmulx(c): + """Multiply a Legendre series by x. + + Multiply the Legendre series `c` by x, where x is the independent + variable. + + + Parameters + ---------- + c : array_like + 1-D array of Legendre series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Array representing the result of the multiplication. + + See Also + -------- + legadd, legsub, legmul, legdiv, legpow + + Notes + ----- + The multiplication uses the recursion relationship for Legendre + polynomials in the form + + .. math:: + + xP_i(x) = ((i + 1)*P_{i + 1}(x) + i*P_{i - 1}(x))/(2i + 1) + + Examples + -------- + >>> from numpy.polynomial import legendre as L + >>> L.legmulx([1,2,3]) + array([ 0.66666667, 2.2, 1.33333333, 1.8]) # may vary + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + # The zero series needs special treatment + if len(c) == 1 and c[0] == 0: + return c + + prd = np.empty(len(c) + 1, dtype=c.dtype) + prd[0] = c[0] * 0 + prd[1] = c[0] + for i in range(1, len(c)): + j = i + 1 + k = i - 1 + s = i + j + prd[j] = (c[i] * j) / s + prd[k] += (c[i] * i) / s + return prd + + +def legmul(c1, c2): + """ + Multiply one Legendre series by another. + + Returns the product of two Legendre series `c1` * `c2`. The arguments + are sequences of coefficients, from lowest order "term" to highest, + e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Legendre series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Of Legendre series coefficients representing their product. + + See Also + -------- + legadd, legsub, legmulx, legdiv, legpow + + Notes + ----- + In general, the (polynomial) product of two C-series results in terms + that are not in the Legendre polynomial basis set. Thus, to express + the product as a Legendre series, it is necessary to "reproject" the + product onto said basis set, which may produce "unintuitive" (but + correct) results; see Examples section below. + + Examples + -------- + >>> from numpy.polynomial import legendre as L + >>> c1 = (1,2,3) + >>> c2 = (3,2) + >>> L.legmul(c1,c2) # multiplication requires "reprojection" + array([ 4.33333333, 10.4 , 11.66666667, 3.6 ]) # may vary + + """ + # s1, s2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + + if len(c1) > len(c2): + c = c2 + xs = c1 + else: + c = c1 + xs = c2 + + if len(c) == 1: + c0 = c[0] * xs + c1 = 0 + elif len(c) == 2: + c0 = c[0] * xs + c1 = c[1] * xs + else: + nd = len(c) + c0 = c[-2] * xs + c1 = c[-1] * xs + for i in range(3, len(c) + 1): + tmp = c0 + nd = nd - 1 + c0 = legsub(c[-i] * xs, (c1 * (nd - 1)) / nd) + c1 = legadd(tmp, (legmulx(c1) * (2 * nd - 1)) / nd) + return legadd(c0, legmulx(c1)) + + +def legdiv(c1, c2): + """ + Divide one Legendre series by another. + + Returns the quotient-with-remainder of two Legendre series + `c1` / `c2`. The arguments are sequences of coefficients from lowest + order "term" to highest, e.g., [1,2,3] represents the series + ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Legendre series coefficients ordered from low to + high. + + Returns + ------- + quo, rem : ndarrays + Of Legendre series coefficients representing the quotient and + remainder. + + See Also + -------- + legadd, legsub, legmulx, legmul, legpow + + Notes + ----- + In general, the (polynomial) division of one Legendre series by another + results in quotient and remainder terms that are not in the Legendre + polynomial basis set. Thus, to express these results as a Legendre + series, it is necessary to "reproject" the results onto the Legendre + basis set, which may produce "unintuitive" (but correct) results; see + Examples section below. + + Examples + -------- + >>> from numpy.polynomial import legendre as L + >>> c1 = (1,2,3) + >>> c2 = (3,2,1) + >>> L.legdiv(c1,c2) # quotient "intuitive," remainder not + (array([3.]), array([-8., -4.])) + >>> c2 = (0,1,2,3) + >>> L.legdiv(c2,c1) # neither "intuitive" + (array([-0.07407407, 1.66666667]), array([-1.03703704, -2.51851852])) # may vary + + """ + return pu._div(legmul, c1, c2) + + +def legpow(c, pow, maxpower=16): + """Raise a Legendre series to a power. + + Returns the Legendre series `c` raised to the power `pow`. The + argument `c` is a sequence of coefficients ordered from low to high. + i.e., [1,2,3] is the series ``P_0 + 2*P_1 + 3*P_2.`` + + Parameters + ---------- + c : array_like + 1-D array of Legendre series coefficients ordered from low to + high. + pow : integer + Power to which the series will be raised + maxpower : integer, optional + Maximum power allowed. This is mainly to limit growth of the series + to unmanageable size. Default is 16 + + Returns + ------- + coef : ndarray + Legendre series of power. + + See Also + -------- + legadd, legsub, legmulx, legmul, legdiv + + """ + return pu._pow(legmul, c, pow, maxpower) + + +def legder(c, m=1, scl=1, axis=0): + """ + Differentiate a Legendre series. + + Returns the Legendre series coefficients `c` differentiated `m` times + along `axis`. At each iteration the result is multiplied by `scl` (the + scaling factor is for use in a linear change of variable). The argument + `c` is an array of coefficients from low to high degree along each + axis, e.g., [1,2,3] represents the series ``1*L_0 + 2*L_1 + 3*L_2`` + while [[1,2],[1,2]] represents ``1*L_0(x)*L_0(y) + 1*L_1(x)*L_0(y) + + 2*L_0(x)*L_1(y) + 2*L_1(x)*L_1(y)`` if axis=0 is ``x`` and axis=1 is + ``y``. + + Parameters + ---------- + c : array_like + Array of Legendre series coefficients. If c is multidimensional the + different axis correspond to different variables with the degree in + each axis given by the corresponding index. + m : int, optional + Number of derivatives taken, must be non-negative. (Default: 1) + scl : scalar, optional + Each differentiation is multiplied by `scl`. The end result is + multiplication by ``scl**m``. This is for use in a linear change of + variable. (Default: 1) + axis : int, optional + Axis over which the derivative is taken. (Default: 0). + + Returns + ------- + der : ndarray + Legendre series of the derivative. + + See Also + -------- + legint + + Notes + ----- + In general, the result of differentiating a Legendre series does not + resemble the same operation on a power series. Thus the result of this + function may be "unintuitive," albeit correct; see Examples section + below. + + Examples + -------- + >>> from numpy.polynomial import legendre as L + >>> c = (1,2,3,4) + >>> L.legder(c) + array([ 6., 9., 20.]) + >>> L.legder(c, 3) + array([60.]) + >>> L.legder(c, scl=-1) + array([ -6., -9., -20.]) + >>> L.legder(c, 2,-1) + array([ 9., 60.]) + + """ + c = np.array(c, ndmin=1, copy=True) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + cnt = pu._as_int(m, "the order of derivation") + iaxis = pu._as_int(axis, "the axis") + if cnt < 0: + raise ValueError("The order of derivation must be non-negative") + iaxis = normalize_axis_index(iaxis, c.ndim) + + if cnt == 0: + return c + + c = np.moveaxis(c, iaxis, 0) + n = len(c) + if cnt >= n: + c = c[:1] * 0 + else: + for i in range(cnt): + n = n - 1 + c *= scl + der = np.empty((n,) + c.shape[1:], dtype=c.dtype) + for j in range(n, 2, -1): + der[j - 1] = (2 * j - 1) * c[j] + c[j - 2] += c[j] + if n > 1: + der[1] = 3 * c[2] + der[0] = c[1] + c = der + c = np.moveaxis(c, 0, iaxis) + return c + + +def legint(c, m=1, k=[], lbnd=0, scl=1, axis=0): + """ + Integrate a Legendre series. + + Returns the Legendre series coefficients `c` integrated `m` times from + `lbnd` along `axis`. At each iteration the resulting series is + **multiplied** by `scl` and an integration constant, `k`, is added. + The scaling factor is for use in a linear change of variable. ("Buyer + beware": note that, depending on what one is doing, one may want `scl` + to be the reciprocal of what one might expect; for more information, + see the Notes section below.) The argument `c` is an array of + coefficients from low to high degree along each axis, e.g., [1,2,3] + represents the series ``L_0 + 2*L_1 + 3*L_2`` while [[1,2],[1,2]] + represents ``1*L_0(x)*L_0(y) + 1*L_1(x)*L_0(y) + 2*L_0(x)*L_1(y) + + 2*L_1(x)*L_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``. + + Parameters + ---------- + c : array_like + Array of Legendre series coefficients. If c is multidimensional the + different axis correspond to different variables with the degree in + each axis given by the corresponding index. + m : int, optional + Order of integration, must be positive. (Default: 1) + k : {[], list, scalar}, optional + Integration constant(s). The value of the first integral at + ``lbnd`` is the first value in the list, the value of the second + integral at ``lbnd`` is the second value, etc. If ``k == []`` (the + default), all constants are set to zero. If ``m == 1``, a single + scalar can be given instead of a list. + lbnd : scalar, optional + The lower bound of the integral. (Default: 0) + scl : scalar, optional + Following each integration the result is *multiplied* by `scl` + before the integration constant is added. (Default: 1) + axis : int, optional + Axis over which the integral is taken. (Default: 0). + + Returns + ------- + S : ndarray + Legendre series coefficient array of the integral. + + Raises + ------ + ValueError + If ``m < 0``, ``len(k) > m``, ``np.ndim(lbnd) != 0``, or + ``np.ndim(scl) != 0``. + + See Also + -------- + legder + + Notes + ----- + Note that the result of each integration is *multiplied* by `scl`. + Why is this important to note? Say one is making a linear change of + variable :math:`u = ax + b` in an integral relative to `x`. Then + :math:`dx = du/a`, so one will need to set `scl` equal to + :math:`1/a` - perhaps not what one would have first thought. + + Also note that, in general, the result of integrating a C-series needs + to be "reprojected" onto the C-series basis set. Thus, typically, + the result of this function is "unintuitive," albeit correct; see + Examples section below. + + Examples + -------- + >>> from numpy.polynomial import legendre as L + >>> c = (1,2,3) + >>> L.legint(c) + array([ 0.33333333, 0.4 , 0.66666667, 0.6 ]) # may vary + >>> L.legint(c, 3) + array([ 1.66666667e-02, -1.78571429e-02, 4.76190476e-02, # may vary + -1.73472348e-18, 1.90476190e-02, 9.52380952e-03]) + >>> L.legint(c, k=3) + array([ 3.33333333, 0.4 , 0.66666667, 0.6 ]) # may vary + >>> L.legint(c, lbnd=-2) + array([ 7.33333333, 0.4 , 0.66666667, 0.6 ]) # may vary + >>> L.legint(c, scl=2) + array([ 0.66666667, 0.8 , 1.33333333, 1.2 ]) # may vary + + """ + c = np.array(c, ndmin=1, copy=True) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + if not np.iterable(k): + k = [k] + cnt = pu._as_int(m, "the order of integration") + iaxis = pu._as_int(axis, "the axis") + if cnt < 0: + raise ValueError("The order of integration must be non-negative") + if len(k) > cnt: + raise ValueError("Too many integration constants") + if np.ndim(lbnd) != 0: + raise ValueError("lbnd must be a scalar.") + if np.ndim(scl) != 0: + raise ValueError("scl must be a scalar.") + iaxis = normalize_axis_index(iaxis, c.ndim) + + if cnt == 0: + return c + + c = np.moveaxis(c, iaxis, 0) + k = list(k) + [0] * (cnt - len(k)) + for i in range(cnt): + n = len(c) + c *= scl + if n == 1 and np.all(c[0] == 0): + c[0] += k[i] + else: + tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype) + tmp[0] = c[0] * 0 + tmp[1] = c[0] + if n > 1: + tmp[2] = c[1] / 3 + for j in range(2, n): + t = c[j] / (2 * j + 1) + tmp[j + 1] = t + tmp[j - 1] -= t + tmp[0] += k[i] - legval(lbnd, tmp) + c = tmp + c = np.moveaxis(c, 0, iaxis) + return c + + +def legval(x, c, tensor=True): + """ + Evaluate a Legendre series at points x. + + If `c` is of length ``n + 1``, this function returns the value: + + .. math:: p(x) = c_0 * L_0(x) + c_1 * L_1(x) + ... + c_n * L_n(x) + + The parameter `x` is converted to an array only if it is a tuple or a + list, otherwise it is treated as a scalar. In either case, either `x` + or its elements must support multiplication and addition both with + themselves and with the elements of `c`. + + If `c` is a 1-D array, then ``p(x)`` will have the same shape as `x`. If + `c` is multidimensional, then the shape of the result depends on the + value of `tensor`. If `tensor` is true the shape will be c.shape[1:] + + x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that + scalars have shape (,). + + Trailing zeros in the coefficients will be used in the evaluation, so + they should be avoided if efficiency is a concern. + + Parameters + ---------- + x : array_like, compatible object + If `x` is a list or tuple, it is converted to an ndarray, otherwise + it is left unchanged and treated as a scalar. In either case, `x` + or its elements must support addition and multiplication with + themselves and with the elements of `c`. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree n are contained in c[n]. If `c` is multidimensional the + remaining indices enumerate multiple polynomials. In the two + dimensional case the coefficients may be thought of as stored in + the columns of `c`. + tensor : boolean, optional + If True, the shape of the coefficient array is extended with ones + on the right, one for each dimension of `x`. Scalars have dimension 0 + for this action. The result is that every column of coefficients in + `c` is evaluated for every element of `x`. If False, `x` is broadcast + over the columns of `c` for the evaluation. This keyword is useful + when `c` is multidimensional. The default value is True. + + Returns + ------- + values : ndarray, algebra_like + The shape of the return value is described above. + + See Also + -------- + legval2d, leggrid2d, legval3d, leggrid3d + + Notes + ----- + The evaluation uses Clenshaw recursion, aka synthetic division. + + """ + c = np.array(c, ndmin=1, copy=None) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + if isinstance(x, (tuple, list)): + x = np.asarray(x) + if isinstance(x, np.ndarray) and tensor: + c = c.reshape(c.shape + (1,) * x.ndim) + + if len(c) == 1: + c0 = c[0] + c1 = 0 + elif len(c) == 2: + c0 = c[0] + c1 = c[1] + else: + nd = len(c) + c0 = c[-2] + c1 = c[-1] + for i in range(3, len(c) + 1): + tmp = c0 + nd = nd - 1 + c0 = c[-i] - c1 * ((nd - 1) / nd) + c1 = tmp + c1 * x * ((2 * nd - 1) / nd) + return c0 + c1 * x + + +def legval2d(x, y, c): + """ + Evaluate a 2-D Legendre series at points (x, y). + + This function returns the values: + + .. math:: p(x,y) = \\sum_{i,j} c_{i,j} * L_i(x) * L_j(y) + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars and they + must have the same shape after conversion. In either case, either `x` + and `y` or their elements must support multiplication and addition both + with themselves and with the elements of `c`. + + If `c` is a 1-D array a one is implicitly appended to its shape to make + it 2-D. The shape of the result will be c.shape[2:] + x.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points ``(x, y)``, + where `x` and `y` must have the same shape. If `x` or `y` is a list + or tuple, it is first converted to an ndarray, otherwise it is left + unchanged and if it isn't an ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term + of multi-degree i,j is contained in ``c[i,j]``. If `c` has + dimension greater than two the remaining indices enumerate multiple + sets of coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional Legendre series at points formed + from pairs of corresponding values from `x` and `y`. + + See Also + -------- + legval, leggrid2d, legval3d, leggrid3d + """ + return pu._valnd(legval, c, x, y) + + +def leggrid2d(x, y, c): + """ + Evaluate a 2-D Legendre series on the Cartesian product of x and y. + + This function returns the values: + + .. math:: p(a,b) = \\sum_{i,j} c_{i,j} * L_i(a) * L_j(b) + + where the points ``(a, b)`` consist of all pairs formed by taking + `a` from `x` and `b` from `y`. The resulting points form a grid with + `x` in the first dimension and `y` in the second. + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars. In either + case, either `x` and `y` or their elements must support multiplication + and addition both with themselves and with the elements of `c`. + + If `c` has fewer than two dimensions, ones are implicitly appended to + its shape to make it 2-D. The shape of the result will be c.shape[2:] + + x.shape + y.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points in the + Cartesian product of `x` and `y`. If `x` or `y` is a list or + tuple, it is first converted to an ndarray, otherwise it is left + unchanged and, if it isn't an ndarray, it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term of + multi-degree i,j is contained in ``c[i,j]``. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional Chebyshev series at points in the + Cartesian product of `x` and `y`. + + See Also + -------- + legval, legval2d, legval3d, leggrid3d + """ + return pu._gridnd(legval, c, x, y) + + +def legval3d(x, y, z, c): + """ + Evaluate a 3-D Legendre series at points (x, y, z). + + This function returns the values: + + .. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * L_i(x) * L_j(y) * L_k(z) + + The parameters `x`, `y`, and `z` are converted to arrays only if + they are tuples or a lists, otherwise they are treated as a scalars and + they must have the same shape after conversion. In either case, either + `x`, `y`, and `z` or their elements must support multiplication and + addition both with themselves and with the elements of `c`. + + If `c` has fewer than 3 dimensions, ones are implicitly appended to its + shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape. + + Parameters + ---------- + x, y, z : array_like, compatible object + The three dimensional series is evaluated at the points + ``(x, y, z)``, where `x`, `y`, and `z` must have the same shape. If + any of `x`, `y`, or `z` is a list or tuple, it is first converted + to an ndarray, otherwise it is left unchanged and if it isn't an + ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term of + multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension + greater than 3 the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the multidimensional polynomial on points formed with + triples of corresponding values from `x`, `y`, and `z`. + + See Also + -------- + legval, legval2d, leggrid2d, leggrid3d + """ + return pu._valnd(legval, c, x, y, z) + + +def leggrid3d(x, y, z, c): + """ + Evaluate a 3-D Legendre series on the Cartesian product of x, y, and z. + + This function returns the values: + + .. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * L_i(a) * L_j(b) * L_k(c) + + where the points ``(a, b, c)`` consist of all triples formed by taking + `a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form + a grid with `x` in the first dimension, `y` in the second, and `z` in + the third. + + The parameters `x`, `y`, and `z` are converted to arrays only if they + are tuples or a lists, otherwise they are treated as a scalars. In + either case, either `x`, `y`, and `z` or their elements must support + multiplication and addition both with themselves and with the elements + of `c`. + + If `c` has fewer than three dimensions, ones are implicitly appended to + its shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape + y.shape + z.shape. + + Parameters + ---------- + x, y, z : array_like, compatible objects + The three dimensional series is evaluated at the points in the + Cartesian product of `x`, `y`, and `z`. If `x`, `y`, or `z` is a + list or tuple, it is first converted to an ndarray, otherwise it is + left unchanged and, if it isn't an ndarray, it is treated as a + scalar. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree i,j are contained in ``c[i,j]``. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points in the Cartesian + product of `x` and `y`. + + See Also + -------- + legval, legval2d, leggrid2d, legval3d + """ + return pu._gridnd(legval, c, x, y, z) + + +def legvander(x, deg): + """Pseudo-Vandermonde matrix of given degree. + + Returns the pseudo-Vandermonde matrix of degree `deg` and sample points + `x`. The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., i] = L_i(x) + + where ``0 <= i <= deg``. The leading indices of `V` index the elements of + `x` and the last index is the degree of the Legendre polynomial. + + If `c` is a 1-D array of coefficients of length ``n + 1`` and `V` is the + array ``V = legvander(x, n)``, then ``np.dot(V, c)`` and + ``legval(x, c)`` are the same up to roundoff. This equivalence is + useful both for least squares fitting and for the evaluation of a large + number of Legendre series of the same degree and sample points. + + Parameters + ---------- + x : array_like + Array of points. The dtype is converted to float64 or complex128 + depending on whether any of the elements are complex. If `x` is + scalar it is converted to a 1-D array. + deg : int + Degree of the resulting matrix. + + Returns + ------- + vander : ndarray + The pseudo-Vandermonde matrix. The shape of the returned matrix is + ``x.shape + (deg + 1,)``, where The last index is the degree of the + corresponding Legendre polynomial. The dtype will be the same as + the converted `x`. + + """ + ideg = pu._as_int(deg, "deg") + if ideg < 0: + raise ValueError("deg must be non-negative") + + x = np.array(x, copy=None, ndmin=1) + 0.0 + dims = (ideg + 1,) + x.shape + dtyp = x.dtype + v = np.empty(dims, dtype=dtyp) + # Use forward recursion to generate the entries. This is not as accurate + # as reverse recursion in this application but it is more efficient. + v[0] = x * 0 + 1 + if ideg > 0: + v[1] = x + for i in range(2, ideg + 1): + v[i] = (v[i - 1] * x * (2 * i - 1) - v[i - 2] * (i - 1)) / i + return np.moveaxis(v, 0, -1) + + +def legvander2d(x, y, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points ``(x, y)``. The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., (deg[1] + 1)*i + j] = L_i(x) * L_j(y), + + where ``0 <= i <= deg[0]`` and ``0 <= j <= deg[1]``. The leading indices of + `V` index the points ``(x, y)`` and the last index encodes the degrees of + the Legendre polynomials. + + If ``V = legvander2d(x, y, [xdeg, ydeg])``, then the columns of `V` + correspond to the elements of a 2-D coefficient array `c` of shape + (xdeg + 1, ydeg + 1) in the order + + .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ... + + and ``np.dot(V, c.flat)`` and ``legval2d(x, y, c)`` will be the same + up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 2-D Legendre + series of the same degrees and sample points. + + Parameters + ---------- + x, y : array_like + Arrays of point coordinates, all of the same shape. The dtypes + will be converted to either float64 or complex128 depending on + whether any of the elements are complex. Scalars are converted to + 1-D arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg]. + + Returns + ------- + vander2d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg[1]+1)`. The dtype will be the same + as the converted `x` and `y`. + + See Also + -------- + legvander, legvander3d, legval2d, legval3d + """ + return pu._vander_nd_flat((legvander, legvander), (x, y), deg) + + +def legvander3d(x, y, z, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points ``(x, y, z)``. If `l`, `m`, `n` are the given degrees in `x`, `y`, `z`, + then The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = L_i(x)*L_j(y)*L_k(z), + + where ``0 <= i <= l``, ``0 <= j <= m``, and ``0 <= j <= n``. The leading + indices of `V` index the points ``(x, y, z)`` and the last index encodes + the degrees of the Legendre polynomials. + + If ``V = legvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns + of `V` correspond to the elements of a 3-D coefficient array `c` of + shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order + + .. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},... + + and ``np.dot(V, c.flat)`` and ``legval3d(x, y, z, c)`` will be the + same up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 3-D Legendre + series of the same degrees and sample points. + + Parameters + ---------- + x, y, z : array_like + Arrays of point coordinates, all of the same shape. The dtypes will + be converted to either float64 or complex128 depending on whether + any of the elements are complex. Scalars are converted to 1-D + arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg, z_deg]. + + Returns + ------- + vander3d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg[1]+1)*(deg[2]+1)`. The dtype will + be the same as the converted `x`, `y`, and `z`. + + See Also + -------- + legvander, legvander3d, legval2d, legval3d + """ + return pu._vander_nd_flat((legvander, legvander, legvander), (x, y, z), deg) + + +def legfit(x, y, deg, rcond=None, full=False, w=None): + """ + Least squares fit of Legendre series to data. + + Return the coefficients of a Legendre series of degree `deg` that is the + least squares fit to the data values `y` given at points `x`. If `y` is + 1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple + fits are done, one for each column of `y`, and the resulting + coefficients are stored in the corresponding columns of a 2-D return. + The fitted polynomial(s) are in the form + + .. math:: p(x) = c_0 + c_1 * L_1(x) + ... + c_n * L_n(x), + + where `n` is `deg`. + + Parameters + ---------- + x : array_like, shape (M,) + x-coordinates of the M sample points ``(x[i], y[i])``. + y : array_like, shape (M,) or (M, K) + y-coordinates of the sample points. Several data sets of sample + points sharing the same x-coordinates can be fitted at once by + passing in a 2D-array that contains one dataset per column. + deg : int or 1-D array_like + Degree(s) of the fitting polynomials. If `deg` is a single integer + all terms up to and including the `deg`'th term are included in the + fit. For NumPy versions >= 1.11.0 a list of integers specifying the + degrees of the terms to include may be used instead. + rcond : float, optional + Relative condition number of the fit. Singular values smaller than + this relative to the largest singular value will be ignored. The + default value is len(x)*eps, where eps is the relative precision of + the float type, about 2e-16 in most cases. + full : bool, optional + Switch determining nature of return value. When it is False (the + default) just the coefficients are returned, when True diagnostic + information from the singular value decomposition is also returned. + w : array_like, shape (`M`,), optional + Weights. If not None, the weight ``w[i]`` applies to the unsquared + residual ``y[i] - y_hat[i]`` at ``x[i]``. Ideally the weights are + chosen so that the errors of the products ``w[i]*y[i]`` all have the + same variance. When using inverse-variance weighting, use + ``w[i] = 1/sigma(y[i])``. The default value is None. + + Returns + ------- + coef : ndarray, shape (M,) or (M, K) + Legendre coefficients ordered from low to high. If `y` was + 2-D, the coefficients for the data in column k of `y` are in + column `k`. If `deg` is specified as a list, coefficients for + terms not included in the fit are set equal to zero in the + returned `coef`. + + [residuals, rank, singular_values, rcond] : list + These values are only returned if ``full == True`` + + - residuals -- sum of squared residuals of the least squares fit + - rank -- the numerical rank of the scaled Vandermonde matrix + - singular_values -- singular values of the scaled Vandermonde matrix + - rcond -- value of `rcond`. + + For more details, see `numpy.linalg.lstsq`. + + Warns + ----- + RankWarning + The rank of the coefficient matrix in the least-squares fit is + deficient. The warning is only raised if ``full == False``. The + warnings can be turned off by + + >>> import warnings + >>> warnings.simplefilter('ignore', np.exceptions.RankWarning) + + See Also + -------- + numpy.polynomial.polynomial.polyfit + numpy.polynomial.chebyshev.chebfit + numpy.polynomial.laguerre.lagfit + numpy.polynomial.hermite.hermfit + numpy.polynomial.hermite_e.hermefit + legval : Evaluates a Legendre series. + legvander : Vandermonde matrix of Legendre series. + legweight : Legendre weight function (= 1). + numpy.linalg.lstsq : Computes a least-squares fit from the matrix. + scipy.interpolate.UnivariateSpline : Computes spline fits. + + Notes + ----- + The solution is the coefficients of the Legendre series `p` that + minimizes the sum of the weighted squared errors + + .. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2, + + where :math:`w_j` are the weights. This problem is solved by setting up + as the (typically) overdetermined matrix equation + + .. math:: V(x) * c = w * y, + + where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the + coefficients to be solved for, `w` are the weights, and `y` are the + observed values. This equation is then solved using the singular value + decomposition of `V`. + + If some of the singular values of `V` are so small that they are + neglected, then a `~exceptions.RankWarning` will be issued. This means that + the coefficient values may be poorly determined. Using a lower order fit + will usually get rid of the warning. The `rcond` parameter can also be + set to a value smaller than its default, but the resulting fit may be + spurious and have large contributions from roundoff error. + + Fits using Legendre series are usually better conditioned than fits + using power series, but much can depend on the distribution of the + sample points and the smoothness of the data. If the quality of the fit + is inadequate splines may be a good alternative. + + References + ---------- + .. [1] Wikipedia, "Curve fitting", + https://en.wikipedia.org/wiki/Curve_fitting + + Examples + -------- + + """ + return pu._fit(legvander, x, y, deg, rcond, full, w) + + +def legcompanion(c): + """Return the scaled companion matrix of c. + + The basis polynomials are scaled so that the companion matrix is + symmetric when `c` is an Legendre basis polynomial. This provides + better eigenvalue estimates than the unscaled case and for basis + polynomials the eigenvalues are guaranteed to be real if + `numpy.linalg.eigvalsh` is used to obtain them. + + Parameters + ---------- + c : array_like + 1-D array of Legendre series coefficients ordered from low to high + degree. + + Returns + ------- + mat : ndarray + Scaled companion matrix of dimensions (deg, deg). + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) < 2: + raise ValueError('Series must have maximum degree of at least 1.') + if len(c) == 2: + return np.array([[-c[0] / c[1]]]) + + n = len(c) - 1 + mat = np.zeros((n, n), dtype=c.dtype) + scl = 1. / np.sqrt(2 * np.arange(n) + 1) + top = mat.reshape(-1)[1::n + 1] + bot = mat.reshape(-1)[n::n + 1] + top[...] = np.arange(1, n) * scl[:n - 1] * scl[1:n] + bot[...] = top + mat[:, -1] -= (c[:-1] / c[-1]) * (scl / scl[-1]) * (n / (2 * n - 1)) + return mat + + +def legroots(c): + """ + Compute the roots of a Legendre series. + + Return the roots (a.k.a. "zeros") of the polynomial + + .. math:: p(x) = \\sum_i c[i] * L_i(x). + + Parameters + ---------- + c : 1-D array_like + 1-D array of coefficients. + + Returns + ------- + out : ndarray + Array of the roots of the series. If all the roots are real, + then `out` is also real, otherwise it is complex. + + See Also + -------- + numpy.polynomial.polynomial.polyroots + numpy.polynomial.chebyshev.chebroots + numpy.polynomial.laguerre.lagroots + numpy.polynomial.hermite.hermroots + numpy.polynomial.hermite_e.hermeroots + + Notes + ----- + The root estimates are obtained as the eigenvalues of the companion + matrix, Roots far from the origin of the complex plane may have large + errors due to the numerical instability of the series for such values. + Roots with multiplicity greater than 1 will also show larger errors as + the value of the series near such points is relatively insensitive to + errors in the roots. Isolated roots near the origin can be improved by + a few iterations of Newton's method. + + The Legendre series basis polynomials aren't powers of ``x`` so the + results of this function may seem unintuitive. + + Examples + -------- + >>> import numpy.polynomial.legendre as leg + >>> leg.legroots((1, 2, 3, 4)) # 4L_3 + 3L_2 + 2L_1 + 1L_0, all real roots + array([-0.85099543, -0.11407192, 0.51506735]) # may vary + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) < 2: + return np.array([], dtype=c.dtype) + if len(c) == 2: + return np.array([-c[0] / c[1]]) + + # rotated companion matrix reduces error + m = legcompanion(c)[::-1, ::-1] + r = la.eigvals(m) + r.sort() + return r + + +def leggauss(deg): + """ + Gauss-Legendre quadrature. + + Computes the sample points and weights for Gauss-Legendre quadrature. + These sample points and weights will correctly integrate polynomials of + degree :math:`2*deg - 1` or less over the interval :math:`[-1, 1]` with + the weight function :math:`f(x) = 1`. + + Parameters + ---------- + deg : int + Number of sample points and weights. It must be >= 1. + + Returns + ------- + x : ndarray + 1-D ndarray containing the sample points. + y : ndarray + 1-D ndarray containing the weights. + + Notes + ----- + The results have only been tested up to degree 100, higher degrees may + be problematic. The weights are determined by using the fact that + + .. math:: w_k = c / (L'_n(x_k) * L_{n-1}(x_k)) + + where :math:`c` is a constant independent of :math:`k` and :math:`x_k` + is the k'th root of :math:`L_n`, and then scaling the results to get + the right value when integrating 1. + + """ + ideg = pu._as_int(deg, "deg") + if ideg <= 0: + raise ValueError("deg must be a positive integer") + + # first approximation of roots. We use the fact that the companion + # matrix is symmetric in this case in order to obtain better zeros. + c = np.array([0] * deg + [1]) + m = legcompanion(c) + x = la.eigvalsh(m) + + # improve roots by one application of Newton + dy = legval(x, c) + df = legval(x, legder(c)) + x -= dy / df + + # compute the weights. We scale the factor to avoid possible numerical + # overflow. + fm = legval(x, c[1:]) + fm /= np.abs(fm).max() + df /= np.abs(df).max() + w = 1 / (fm * df) + + # for Legendre we can also symmetrize + w = (w + w[::-1]) / 2 + x = (x - x[::-1]) / 2 + + # scale w to get the right value + w *= 2. / w.sum() + + return x, w + + +def legweight(x): + """ + Weight function of the Legendre polynomials. + + The weight function is :math:`1` and the interval of integration is + :math:`[-1, 1]`. The Legendre polynomials are orthogonal, but not + normalized, with respect to this weight function. + + Parameters + ---------- + x : array_like + Values at which the weight function will be computed. + + Returns + ------- + w : ndarray + The weight function at `x`. + """ + w = x * 0.0 + 1.0 + return w + +# +# Legendre series class +# + +class Legendre(ABCPolyBase): + """A Legendre series class. + + The Legendre class provides the standard Python numerical methods + '+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the + attributes and methods listed below. + + Parameters + ---------- + coef : array_like + Legendre coefficients in order of increasing degree, i.e., + ``(1, 2, 3)`` gives ``1*P_0(x) + 2*P_1(x) + 3*P_2(x)``. + domain : (2,) array_like, optional + Domain to use. The interval ``[domain[0], domain[1]]`` is mapped + to the interval ``[window[0], window[1]]`` by shifting and scaling. + The default value is [-1., 1.]. + window : (2,) array_like, optional + Window, see `domain` for its use. The default value is [-1., 1.]. + symbol : str, optional + Symbol used to represent the independent variable in string + representations of the polynomial expression, e.g. for printing. + The symbol must be a valid Python identifier. Default value is 'x'. + + .. versionadded:: 1.24 + + """ + # Virtual Functions + _add = staticmethod(legadd) + _sub = staticmethod(legsub) + _mul = staticmethod(legmul) + _div = staticmethod(legdiv) + _pow = staticmethod(legpow) + _val = staticmethod(legval) + _int = staticmethod(legint) + _der = staticmethod(legder) + _fit = staticmethod(legfit) + _line = staticmethod(legline) + _roots = staticmethod(legroots) + _fromroots = staticmethod(legfromroots) + + # Virtual properties + domain = np.array(legdomain) + window = np.array(legdomain) + basis_name = 'P' diff --git a/.venv/lib/python3.12/site-packages/numpy/polynomial/legendre.pyi b/.venv/lib/python3.12/site-packages/numpy/polynomial/legendre.pyi new file mode 100644 index 0000000..35ea2ff --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/polynomial/legendre.pyi @@ -0,0 +1,100 @@ +from typing import Final +from typing import Literal as L + +import numpy as np + +from ._polybase import ABCPolyBase +from ._polytypes import ( + _Array1, + _Array2, + _FuncBinOp, + _FuncCompanion, + _FuncDer, + _FuncFit, + _FuncFromRoots, + _FuncGauss, + _FuncInteg, + _FuncLine, + _FuncPoly2Ortho, + _FuncPow, + _FuncRoots, + _FuncUnOp, + _FuncVal, + _FuncVal2D, + _FuncVal3D, + _FuncValFromRoots, + _FuncVander, + _FuncVander2D, + _FuncVander3D, + _FuncWeight, +) +from .polyutils import trimcoef as legtrim + +__all__ = [ + "legzero", + "legone", + "legx", + "legdomain", + "legline", + "legadd", + "legsub", + "legmulx", + "legmul", + "legdiv", + "legpow", + "legval", + "legder", + "legint", + "leg2poly", + "poly2leg", + "legfromroots", + "legvander", + "legfit", + "legtrim", + "legroots", + "Legendre", + "legval2d", + "legval3d", + "leggrid2d", + "leggrid3d", + "legvander2d", + "legvander3d", + "legcompanion", + "leggauss", + "legweight", +] + +poly2leg: _FuncPoly2Ortho[L["poly2leg"]] +leg2poly: _FuncUnOp[L["leg2poly"]] + +legdomain: Final[_Array2[np.float64]] +legzero: Final[_Array1[np.int_]] +legone: Final[_Array1[np.int_]] +legx: Final[_Array2[np.int_]] + +legline: _FuncLine[L["legline"]] +legfromroots: _FuncFromRoots[L["legfromroots"]] +legadd: _FuncBinOp[L["legadd"]] +legsub: _FuncBinOp[L["legsub"]] +legmulx: _FuncUnOp[L["legmulx"]] +legmul: _FuncBinOp[L["legmul"]] +legdiv: _FuncBinOp[L["legdiv"]] +legpow: _FuncPow[L["legpow"]] +legder: _FuncDer[L["legder"]] +legint: _FuncInteg[L["legint"]] +legval: _FuncVal[L["legval"]] +legval2d: _FuncVal2D[L["legval2d"]] +legval3d: _FuncVal3D[L["legval3d"]] +legvalfromroots: _FuncValFromRoots[L["legvalfromroots"]] +leggrid2d: _FuncVal2D[L["leggrid2d"]] +leggrid3d: _FuncVal3D[L["leggrid3d"]] +legvander: _FuncVander[L["legvander"]] +legvander2d: _FuncVander2D[L["legvander2d"]] +legvander3d: _FuncVander3D[L["legvander3d"]] +legfit: _FuncFit[L["legfit"]] +legcompanion: _FuncCompanion[L["legcompanion"]] +legroots: _FuncRoots[L["legroots"]] +leggauss: _FuncGauss[L["leggauss"]] +legweight: _FuncWeight[L["legweight"]] + +class Legendre(ABCPolyBase[L["P"]]): ... diff --git a/.venv/lib/python3.12/site-packages/numpy/polynomial/polynomial.py b/.venv/lib/python3.12/site-packages/numpy/polynomial/polynomial.py new file mode 100644 index 0000000..32b53b7 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/polynomial/polynomial.py @@ -0,0 +1,1616 @@ +""" +================================================= +Power Series (:mod:`numpy.polynomial.polynomial`) +================================================= + +This module provides a number of objects (mostly functions) useful for +dealing with polynomials, including a `Polynomial` class that +encapsulates the usual arithmetic operations. (General information +on how this module represents and works with polynomial objects is in +the docstring for its "parent" sub-package, `numpy.polynomial`). + +Classes +------- +.. autosummary:: + :toctree: generated/ + + Polynomial + +Constants +--------- +.. autosummary:: + :toctree: generated/ + + polydomain + polyzero + polyone + polyx + +Arithmetic +---------- +.. autosummary:: + :toctree: generated/ + + polyadd + polysub + polymulx + polymul + polydiv + polypow + polyval + polyval2d + polyval3d + polygrid2d + polygrid3d + +Calculus +-------- +.. autosummary:: + :toctree: generated/ + + polyder + polyint + +Misc Functions +-------------- +.. autosummary:: + :toctree: generated/ + + polyfromroots + polyroots + polyvalfromroots + polyvander + polyvander2d + polyvander3d + polycompanion + polyfit + polytrim + polyline + +See Also +-------- +`numpy.polynomial` + +""" +__all__ = [ + 'polyzero', 'polyone', 'polyx', 'polydomain', 'polyline', 'polyadd', + 'polysub', 'polymulx', 'polymul', 'polydiv', 'polypow', 'polyval', + 'polyvalfromroots', 'polyder', 'polyint', 'polyfromroots', 'polyvander', + 'polyfit', 'polytrim', 'polyroots', 'Polynomial', 'polyval2d', 'polyval3d', + 'polygrid2d', 'polygrid3d', 'polyvander2d', 'polyvander3d', + 'polycompanion'] + +import numpy as np +import numpy.linalg as la +from numpy.lib.array_utils import normalize_axis_index + +from . import polyutils as pu +from ._polybase import ABCPolyBase + +polytrim = pu.trimcoef + +# +# These are constant arrays are of integer type so as to be compatible +# with the widest range of other types, such as Decimal. +# + +# Polynomial default domain. +polydomain = np.array([-1., 1.]) + +# Polynomial coefficients representing zero. +polyzero = np.array([0]) + +# Polynomial coefficients representing one. +polyone = np.array([1]) + +# Polynomial coefficients representing the identity x. +polyx = np.array([0, 1]) + +# +# Polynomial series functions +# + + +def polyline(off, scl): + """ + Returns an array representing a linear polynomial. + + Parameters + ---------- + off, scl : scalars + The "y-intercept" and "slope" of the line, respectively. + + Returns + ------- + y : ndarray + This module's representation of the linear polynomial ``off + + scl*x``. + + See Also + -------- + numpy.polynomial.chebyshev.chebline + numpy.polynomial.legendre.legline + numpy.polynomial.laguerre.lagline + numpy.polynomial.hermite.hermline + numpy.polynomial.hermite_e.hermeline + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> P.polyline(1, -1) + array([ 1, -1]) + >>> P.polyval(1, P.polyline(1, -1)) # should be 0 + 0.0 + + """ + if scl != 0: + return np.array([off, scl]) + else: + return np.array([off]) + + +def polyfromroots(roots): + """ + Generate a monic polynomial with given roots. + + Return the coefficients of the polynomial + + .. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n), + + where the :math:`r_n` are the roots specified in `roots`. If a zero has + multiplicity n, then it must appear in `roots` n times. For instance, + if 2 is a root of multiplicity three and 3 is a root of multiplicity 2, + then `roots` looks something like [2, 2, 2, 3, 3]. The roots can appear + in any order. + + If the returned coefficients are `c`, then + + .. math:: p(x) = c_0 + c_1 * x + ... + x^n + + The coefficient of the last term is 1 for monic polynomials in this + form. + + Parameters + ---------- + roots : array_like + Sequence containing the roots. + + Returns + ------- + out : ndarray + 1-D array of the polynomial's coefficients If all the roots are + real, then `out` is also real, otherwise it is complex. (see + Examples below). + + See Also + -------- + numpy.polynomial.chebyshev.chebfromroots + numpy.polynomial.legendre.legfromroots + numpy.polynomial.laguerre.lagfromroots + numpy.polynomial.hermite.hermfromroots + numpy.polynomial.hermite_e.hermefromroots + + Notes + ----- + The coefficients are determined by multiplying together linear factors + of the form ``(x - r_i)``, i.e. + + .. math:: p(x) = (x - r_0) (x - r_1) ... (x - r_n) + + where ``n == len(roots) - 1``; note that this implies that ``1`` is always + returned for :math:`a_n`. + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> P.polyfromroots((-1,0,1)) # x(x - 1)(x + 1) = x^3 - x + array([ 0., -1., 0., 1.]) + >>> j = complex(0,1) + >>> P.polyfromroots((-j,j)) # complex returned, though values are real + array([1.+0.j, 0.+0.j, 1.+0.j]) + + """ + return pu._fromroots(polyline, polymul, roots) + + +def polyadd(c1, c2): + """ + Add one polynomial to another. + + Returns the sum of two polynomials `c1` + `c2`. The arguments are + sequences of coefficients from lowest order term to highest, i.e., + [1,2,3] represents the polynomial ``1 + 2*x + 3*x**2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of polynomial coefficients ordered from low to high. + + Returns + ------- + out : ndarray + The coefficient array representing their sum. + + See Also + -------- + polysub, polymulx, polymul, polydiv, polypow + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> c1 = (1, 2, 3) + >>> c2 = (3, 2, 1) + >>> sum = P.polyadd(c1,c2); sum + array([4., 4., 4.]) + >>> P.polyval(2, sum) # 4 + 4(2) + 4(2**2) + 28.0 + + """ + return pu._add(c1, c2) + + +def polysub(c1, c2): + """ + Subtract one polynomial from another. + + Returns the difference of two polynomials `c1` - `c2`. The arguments + are sequences of coefficients from lowest order term to highest, i.e., + [1,2,3] represents the polynomial ``1 + 2*x + 3*x**2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of polynomial coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Of coefficients representing their difference. + + See Also + -------- + polyadd, polymulx, polymul, polydiv, polypow + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> c1 = (1, 2, 3) + >>> c2 = (3, 2, 1) + >>> P.polysub(c1,c2) + array([-2., 0., 2.]) + >>> P.polysub(c2, c1) # -P.polysub(c1,c2) + array([ 2., 0., -2.]) + + """ + return pu._sub(c1, c2) + + +def polymulx(c): + """Multiply a polynomial by x. + + Multiply the polynomial `c` by x, where x is the independent + variable. + + + Parameters + ---------- + c : array_like + 1-D array of polynomial coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Array representing the result of the multiplication. + + See Also + -------- + polyadd, polysub, polymul, polydiv, polypow + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> c = (1, 2, 3) + >>> P.polymulx(c) + array([0., 1., 2., 3.]) + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + # The zero series needs special treatment + if len(c) == 1 and c[0] == 0: + return c + + prd = np.empty(len(c) + 1, dtype=c.dtype) + prd[0] = c[0] * 0 + prd[1:] = c + return prd + + +def polymul(c1, c2): + """ + Multiply one polynomial by another. + + Returns the product of two polynomials `c1` * `c2`. The arguments are + sequences of coefficients, from lowest order term to highest, e.g., + [1,2,3] represents the polynomial ``1 + 2*x + 3*x**2.`` + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of coefficients representing a polynomial, relative to the + "standard" basis, and ordered from lowest order term to highest. + + Returns + ------- + out : ndarray + Of the coefficients of their product. + + See Also + -------- + polyadd, polysub, polymulx, polydiv, polypow + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> c1 = (1, 2, 3) + >>> c2 = (3, 2, 1) + >>> P.polymul(c1, c2) + array([ 3., 8., 14., 8., 3.]) + + """ + # c1, c2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + ret = np.convolve(c1, c2) + return pu.trimseq(ret) + + +def polydiv(c1, c2): + """ + Divide one polynomial by another. + + Returns the quotient-with-remainder of two polynomials `c1` / `c2`. + The arguments are sequences of coefficients, from lowest order term + to highest, e.g., [1,2,3] represents ``1 + 2*x + 3*x**2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of polynomial coefficients ordered from low to high. + + Returns + ------- + [quo, rem] : ndarrays + Of coefficient series representing the quotient and remainder. + + See Also + -------- + polyadd, polysub, polymulx, polymul, polypow + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> c1 = (1, 2, 3) + >>> c2 = (3, 2, 1) + >>> P.polydiv(c1, c2) + (array([3.]), array([-8., -4.])) + >>> P.polydiv(c2, c1) + (array([ 0.33333333]), array([ 2.66666667, 1.33333333])) # may vary + + """ + # c1, c2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + if c2[-1] == 0: + raise ZeroDivisionError # FIXME: add message with details to exception + + # note: this is more efficient than `pu._div(polymul, c1, c2)` + lc1 = len(c1) + lc2 = len(c2) + if lc1 < lc2: + return c1[:1] * 0, c1 + elif lc2 == 1: + return c1 / c2[-1], c1[:1] * 0 + else: + dlen = lc1 - lc2 + scl = c2[-1] + c2 = c2[:-1] / scl + i = dlen + j = lc1 - 1 + while i >= 0: + c1[i:j] -= c2 * c1[j] + i -= 1 + j -= 1 + return c1[j + 1:] / scl, pu.trimseq(c1[:j + 1]) + + +def polypow(c, pow, maxpower=None): + """Raise a polynomial to a power. + + Returns the polynomial `c` raised to the power `pow`. The argument + `c` is a sequence of coefficients ordered from low to high. i.e., + [1,2,3] is the series ``1 + 2*x + 3*x**2.`` + + Parameters + ---------- + c : array_like + 1-D array of array of series coefficients ordered from low to + high degree. + pow : integer + Power to which the series will be raised + maxpower : integer, optional + Maximum power allowed. This is mainly to limit growth of the series + to unmanageable size. Default is 16 + + Returns + ------- + coef : ndarray + Power series of power. + + See Also + -------- + polyadd, polysub, polymulx, polymul, polydiv + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> P.polypow([1, 2, 3], 2) + array([ 1., 4., 10., 12., 9.]) + + """ + # note: this is more efficient than `pu._pow(polymul, c1, c2)`, as it + # avoids calling `as_series` repeatedly + return pu._pow(np.convolve, c, pow, maxpower) + + +def polyder(c, m=1, scl=1, axis=0): + """ + Differentiate a polynomial. + + Returns the polynomial coefficients `c` differentiated `m` times along + `axis`. At each iteration the result is multiplied by `scl` (the + scaling factor is for use in a linear change of variable). The + argument `c` is an array of coefficients from low to high degree along + each axis, e.g., [1,2,3] represents the polynomial ``1 + 2*x + 3*x**2`` + while [[1,2],[1,2]] represents ``1 + 1*x + 2*y + 2*x*y`` if axis=0 is + ``x`` and axis=1 is ``y``. + + Parameters + ---------- + c : array_like + Array of polynomial coefficients. If c is multidimensional the + different axis correspond to different variables with the degree + in each axis given by the corresponding index. + m : int, optional + Number of derivatives taken, must be non-negative. (Default: 1) + scl : scalar, optional + Each differentiation is multiplied by `scl`. The end result is + multiplication by ``scl**m``. This is for use in a linear change + of variable. (Default: 1) + axis : int, optional + Axis over which the derivative is taken. (Default: 0). + + Returns + ------- + der : ndarray + Polynomial coefficients of the derivative. + + See Also + -------- + polyint + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> c = (1, 2, 3, 4) + >>> P.polyder(c) # (d/dx)(c) + array([ 2., 6., 12.]) + >>> P.polyder(c, 3) # (d**3/dx**3)(c) + array([24.]) + >>> P.polyder(c, scl=-1) # (d/d(-x))(c) + array([ -2., -6., -12.]) + >>> P.polyder(c, 2, -1) # (d**2/d(-x)**2)(c) + array([ 6., 24.]) + + """ + c = np.array(c, ndmin=1, copy=True) + if c.dtype.char in '?bBhHiIlLqQpP': + # astype fails with NA + c = c + 0.0 + cdt = c.dtype + cnt = pu._as_int(m, "the order of derivation") + iaxis = pu._as_int(axis, "the axis") + if cnt < 0: + raise ValueError("The order of derivation must be non-negative") + iaxis = normalize_axis_index(iaxis, c.ndim) + + if cnt == 0: + return c + + c = np.moveaxis(c, iaxis, 0) + n = len(c) + if cnt >= n: + c = c[:1] * 0 + else: + for i in range(cnt): + n = n - 1 + c *= scl + der = np.empty((n,) + c.shape[1:], dtype=cdt) + for j in range(n, 0, -1): + der[j - 1] = j * c[j] + c = der + c = np.moveaxis(c, 0, iaxis) + return c + + +def polyint(c, m=1, k=[], lbnd=0, scl=1, axis=0): + """ + Integrate a polynomial. + + Returns the polynomial coefficients `c` integrated `m` times from + `lbnd` along `axis`. At each iteration the resulting series is + **multiplied** by `scl` and an integration constant, `k`, is added. + The scaling factor is for use in a linear change of variable. ("Buyer + beware": note that, depending on what one is doing, one may want `scl` + to be the reciprocal of what one might expect; for more information, + see the Notes section below.) The argument `c` is an array of + coefficients, from low to high degree along each axis, e.g., [1,2,3] + represents the polynomial ``1 + 2*x + 3*x**2`` while [[1,2],[1,2]] + represents ``1 + 1*x + 2*y + 2*x*y`` if axis=0 is ``x`` and axis=1 is + ``y``. + + Parameters + ---------- + c : array_like + 1-D array of polynomial coefficients, ordered from low to high. + m : int, optional + Order of integration, must be positive. (Default: 1) + k : {[], list, scalar}, optional + Integration constant(s). The value of the first integral at zero + is the first value in the list, the value of the second integral + at zero is the second value, etc. If ``k == []`` (the default), + all constants are set to zero. If ``m == 1``, a single scalar can + be given instead of a list. + lbnd : scalar, optional + The lower bound of the integral. (Default: 0) + scl : scalar, optional + Following each integration the result is *multiplied* by `scl` + before the integration constant is added. (Default: 1) + axis : int, optional + Axis over which the integral is taken. (Default: 0). + + Returns + ------- + S : ndarray + Coefficient array of the integral. + + Raises + ------ + ValueError + If ``m < 1``, ``len(k) > m``, ``np.ndim(lbnd) != 0``, or + ``np.ndim(scl) != 0``. + + See Also + -------- + polyder + + Notes + ----- + Note that the result of each integration is *multiplied* by `scl`. Why + is this important to note? Say one is making a linear change of + variable :math:`u = ax + b` in an integral relative to `x`. Then + :math:`dx = du/a`, so one will need to set `scl` equal to + :math:`1/a` - perhaps not what one would have first thought. + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> c = (1, 2, 3) + >>> P.polyint(c) # should return array([0, 1, 1, 1]) + array([0., 1., 1., 1.]) + >>> P.polyint(c, 3) # should return array([0, 0, 0, 1/6, 1/12, 1/20]) + array([ 0. , 0. , 0. , 0.16666667, 0.08333333, # may vary + 0.05 ]) + >>> P.polyint(c, k=3) # should return array([3, 1, 1, 1]) + array([3., 1., 1., 1.]) + >>> P.polyint(c,lbnd=-2) # should return array([6, 1, 1, 1]) + array([6., 1., 1., 1.]) + >>> P.polyint(c,scl=-2) # should return array([0, -2, -2, -2]) + array([ 0., -2., -2., -2.]) + + """ + c = np.array(c, ndmin=1, copy=True) + if c.dtype.char in '?bBhHiIlLqQpP': + # astype doesn't preserve mask attribute. + c = c + 0.0 + cdt = c.dtype + if not np.iterable(k): + k = [k] + cnt = pu._as_int(m, "the order of integration") + iaxis = pu._as_int(axis, "the axis") + if cnt < 0: + raise ValueError("The order of integration must be non-negative") + if len(k) > cnt: + raise ValueError("Too many integration constants") + if np.ndim(lbnd) != 0: + raise ValueError("lbnd must be a scalar.") + if np.ndim(scl) != 0: + raise ValueError("scl must be a scalar.") + iaxis = normalize_axis_index(iaxis, c.ndim) + + if cnt == 0: + return c + + k = list(k) + [0] * (cnt - len(k)) + c = np.moveaxis(c, iaxis, 0) + for i in range(cnt): + n = len(c) + c *= scl + if n == 1 and np.all(c[0] == 0): + c[0] += k[i] + else: + tmp = np.empty((n + 1,) + c.shape[1:], dtype=cdt) + tmp[0] = c[0] * 0 + tmp[1] = c[0] + for j in range(1, n): + tmp[j + 1] = c[j] / (j + 1) + tmp[0] += k[i] - polyval(lbnd, tmp) + c = tmp + c = np.moveaxis(c, 0, iaxis) + return c + + +def polyval(x, c, tensor=True): + """ + Evaluate a polynomial at points x. + + If `c` is of length ``n + 1``, this function returns the value + + .. math:: p(x) = c_0 + c_1 * x + ... + c_n * x^n + + The parameter `x` is converted to an array only if it is a tuple or a + list, otherwise it is treated as a scalar. In either case, either `x` + or its elements must support multiplication and addition both with + themselves and with the elements of `c`. + + If `c` is a 1-D array, then ``p(x)`` will have the same shape as `x`. If + `c` is multidimensional, then the shape of the result depends on the + value of `tensor`. If `tensor` is true the shape will be c.shape[1:] + + x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that + scalars have shape (,). + + Trailing zeros in the coefficients will be used in the evaluation, so + they should be avoided if efficiency is a concern. + + Parameters + ---------- + x : array_like, compatible object + If `x` is a list or tuple, it is converted to an ndarray, otherwise + it is left unchanged and treated as a scalar. In either case, `x` + or its elements must support addition and multiplication with + with themselves and with the elements of `c`. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree n are contained in c[n]. If `c` is multidimensional the + remaining indices enumerate multiple polynomials. In the two + dimensional case the coefficients may be thought of as stored in + the columns of `c`. + tensor : boolean, optional + If True, the shape of the coefficient array is extended with ones + on the right, one for each dimension of `x`. Scalars have dimension 0 + for this action. The result is that every column of coefficients in + `c` is evaluated for every element of `x`. If False, `x` is broadcast + over the columns of `c` for the evaluation. This keyword is useful + when `c` is multidimensional. The default value is True. + + Returns + ------- + values : ndarray, compatible object + The shape of the returned array is described above. + + See Also + -------- + polyval2d, polygrid2d, polyval3d, polygrid3d + + Notes + ----- + The evaluation uses Horner's method. + + Examples + -------- + >>> import numpy as np + >>> from numpy.polynomial.polynomial import polyval + >>> polyval(1, [1,2,3]) + 6.0 + >>> a = np.arange(4).reshape(2,2) + >>> a + array([[0, 1], + [2, 3]]) + >>> polyval(a, [1, 2, 3]) + array([[ 1., 6.], + [17., 34.]]) + >>> coef = np.arange(4).reshape(2, 2) # multidimensional coefficients + >>> coef + array([[0, 1], + [2, 3]]) + >>> polyval([1, 2], coef, tensor=True) + array([[2., 4.], + [4., 7.]]) + >>> polyval([1, 2], coef, tensor=False) + array([2., 7.]) + + """ + c = np.array(c, ndmin=1, copy=None) + if c.dtype.char in '?bBhHiIlLqQpP': + # astype fails with NA + c = c + 0.0 + if isinstance(x, (tuple, list)): + x = np.asarray(x) + if isinstance(x, np.ndarray) and tensor: + c = c.reshape(c.shape + (1,) * x.ndim) + + c0 = c[-1] + x * 0 + for i in range(2, len(c) + 1): + c0 = c[-i] + c0 * x + return c0 + + +def polyvalfromroots(x, r, tensor=True): + """ + Evaluate a polynomial specified by its roots at points x. + + If `r` is of length ``N``, this function returns the value + + .. math:: p(x) = \\prod_{n=1}^{N} (x - r_n) + + The parameter `x` is converted to an array only if it is a tuple or a + list, otherwise it is treated as a scalar. In either case, either `x` + or its elements must support multiplication and addition both with + themselves and with the elements of `r`. + + If `r` is a 1-D array, then ``p(x)`` will have the same shape as `x`. If `r` + is multidimensional, then the shape of the result depends on the value of + `tensor`. If `tensor` is ``True`` the shape will be r.shape[1:] + x.shape; + that is, each polynomial is evaluated at every value of `x`. If `tensor` is + ``False``, the shape will be r.shape[1:]; that is, each polynomial is + evaluated only for the corresponding broadcast value of `x`. Note that + scalars have shape (,). + + Parameters + ---------- + x : array_like, compatible object + If `x` is a list or tuple, it is converted to an ndarray, otherwise + it is left unchanged and treated as a scalar. In either case, `x` + or its elements must support addition and multiplication with + with themselves and with the elements of `r`. + r : array_like + Array of roots. If `r` is multidimensional the first index is the + root index, while the remaining indices enumerate multiple + polynomials. For instance, in the two dimensional case the roots + of each polynomial may be thought of as stored in the columns of `r`. + tensor : boolean, optional + If True, the shape of the roots array is extended with ones on the + right, one for each dimension of `x`. Scalars have dimension 0 for this + action. The result is that every column of coefficients in `r` is + evaluated for every element of `x`. If False, `x` is broadcast over the + columns of `r` for the evaluation. This keyword is useful when `r` is + multidimensional. The default value is True. + + Returns + ------- + values : ndarray, compatible object + The shape of the returned array is described above. + + See Also + -------- + polyroots, polyfromroots, polyval + + Examples + -------- + >>> from numpy.polynomial.polynomial import polyvalfromroots + >>> polyvalfromroots(1, [1, 2, 3]) + 0.0 + >>> a = np.arange(4).reshape(2, 2) + >>> a + array([[0, 1], + [2, 3]]) + >>> polyvalfromroots(a, [-1, 0, 1]) + array([[-0., 0.], + [ 6., 24.]]) + >>> r = np.arange(-2, 2).reshape(2,2) # multidimensional coefficients + >>> r # each column of r defines one polynomial + array([[-2, -1], + [ 0, 1]]) + >>> b = [-2, 1] + >>> polyvalfromroots(b, r, tensor=True) + array([[-0., 3.], + [ 3., 0.]]) + >>> polyvalfromroots(b, r, tensor=False) + array([-0., 0.]) + + """ + r = np.array(r, ndmin=1, copy=None) + if r.dtype.char in '?bBhHiIlLqQpP': + r = r.astype(np.double) + if isinstance(x, (tuple, list)): + x = np.asarray(x) + if isinstance(x, np.ndarray): + if tensor: + r = r.reshape(r.shape + (1,) * x.ndim) + elif x.ndim >= r.ndim: + raise ValueError("x.ndim must be < r.ndim when tensor == False") + return np.prod(x - r, axis=0) + + +def polyval2d(x, y, c): + """ + Evaluate a 2-D polynomial at points (x, y). + + This function returns the value + + .. math:: p(x,y) = \\sum_{i,j} c_{i,j} * x^i * y^j + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars and they + must have the same shape after conversion. In either case, either `x` + and `y` or their elements must support multiplication and addition both + with themselves and with the elements of `c`. + + If `c` has fewer than two dimensions, ones are implicitly appended to + its shape to make it 2-D. The shape of the result will be c.shape[2:] + + x.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points ``(x, y)``, + where `x` and `y` must have the same shape. If `x` or `y` is a list + or tuple, it is first converted to an ndarray, otherwise it is left + unchanged and, if it isn't an ndarray, it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term + of multi-degree i,j is contained in ``c[i,j]``. If `c` has + dimension greater than two the remaining indices enumerate multiple + sets of coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points formed with + pairs of corresponding values from `x` and `y`. + + See Also + -------- + polyval, polygrid2d, polyval3d, polygrid3d + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> c = ((1, 2, 3), (4, 5, 6)) + >>> P.polyval2d(1, 1, c) + 21.0 + + """ + return pu._valnd(polyval, c, x, y) + + +def polygrid2d(x, y, c): + """ + Evaluate a 2-D polynomial on the Cartesian product of x and y. + + This function returns the values: + + .. math:: p(a,b) = \\sum_{i,j} c_{i,j} * a^i * b^j + + where the points ``(a, b)`` consist of all pairs formed by taking + `a` from `x` and `b` from `y`. The resulting points form a grid with + `x` in the first dimension and `y` in the second. + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars. In either + case, either `x` and `y` or their elements must support multiplication + and addition both with themselves and with the elements of `c`. + + If `c` has fewer than two dimensions, ones are implicitly appended to + its shape to make it 2-D. The shape of the result will be c.shape[2:] + + x.shape + y.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points in the + Cartesian product of `x` and `y`. If `x` or `y` is a list or + tuple, it is first converted to an ndarray, otherwise it is left + unchanged and, if it isn't an ndarray, it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree i,j are contained in ``c[i,j]``. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points in the Cartesian + product of `x` and `y`. + + See Also + -------- + polyval, polyval2d, polyval3d, polygrid3d + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> c = ((1, 2, 3), (4, 5, 6)) + >>> P.polygrid2d([0, 1], [0, 1], c) + array([[ 1., 6.], + [ 5., 21.]]) + + """ + return pu._gridnd(polyval, c, x, y) + + +def polyval3d(x, y, z, c): + """ + Evaluate a 3-D polynomial at points (x, y, z). + + This function returns the values: + + .. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * x^i * y^j * z^k + + The parameters `x`, `y`, and `z` are converted to arrays only if + they are tuples or a lists, otherwise they are treated as a scalars and + they must have the same shape after conversion. In either case, either + `x`, `y`, and `z` or their elements must support multiplication and + addition both with themselves and with the elements of `c`. + + If `c` has fewer than 3 dimensions, ones are implicitly appended to its + shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape. + + Parameters + ---------- + x, y, z : array_like, compatible object + The three dimensional series is evaluated at the points + ``(x, y, z)``, where `x`, `y`, and `z` must have the same shape. If + any of `x`, `y`, or `z` is a list or tuple, it is first converted + to an ndarray, otherwise it is left unchanged and if it isn't an + ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term of + multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension + greater than 3 the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the multidimensional polynomial on points formed with + triples of corresponding values from `x`, `y`, and `z`. + + See Also + -------- + polyval, polyval2d, polygrid2d, polygrid3d + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> c = ((1, 2, 3), (4, 5, 6), (7, 8, 9)) + >>> P.polyval3d(1, 1, 1, c) + 45.0 + + """ + return pu._valnd(polyval, c, x, y, z) + + +def polygrid3d(x, y, z, c): + """ + Evaluate a 3-D polynomial on the Cartesian product of x, y and z. + + This function returns the values: + + .. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * a^i * b^j * c^k + + where the points ``(a, b, c)`` consist of all triples formed by taking + `a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form + a grid with `x` in the first dimension, `y` in the second, and `z` in + the third. + + The parameters `x`, `y`, and `z` are converted to arrays only if they + are tuples or a lists, otherwise they are treated as a scalars. In + either case, either `x`, `y`, and `z` or their elements must support + multiplication and addition both with themselves and with the elements + of `c`. + + If `c` has fewer than three dimensions, ones are implicitly appended to + its shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape + y.shape + z.shape. + + Parameters + ---------- + x, y, z : array_like, compatible objects + The three dimensional series is evaluated at the points in the + Cartesian product of `x`, `y`, and `z`. If `x`, `y`, or `z` is a + list or tuple, it is first converted to an ndarray, otherwise it is + left unchanged and, if it isn't an ndarray, it is treated as a + scalar. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree i,j are contained in ``c[i,j]``. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points in the Cartesian + product of `x` and `y`. + + See Also + -------- + polyval, polyval2d, polygrid2d, polyval3d + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> c = ((1, 2, 3), (4, 5, 6), (7, 8, 9)) + >>> P.polygrid3d([0, 1], [0, 1], [0, 1], c) + array([[ 1., 13.], + [ 6., 51.]]) + + """ + return pu._gridnd(polyval, c, x, y, z) + + +def polyvander(x, deg): + """Vandermonde matrix of given degree. + + Returns the Vandermonde matrix of degree `deg` and sample points + `x`. The Vandermonde matrix is defined by + + .. math:: V[..., i] = x^i, + + where ``0 <= i <= deg``. The leading indices of `V` index the elements of + `x` and the last index is the power of `x`. + + If `c` is a 1-D array of coefficients of length ``n + 1`` and `V` is the + matrix ``V = polyvander(x, n)``, then ``np.dot(V, c)`` and + ``polyval(x, c)`` are the same up to roundoff. This equivalence is + useful both for least squares fitting and for the evaluation of a large + number of polynomials of the same degree and sample points. + + Parameters + ---------- + x : array_like + Array of points. The dtype is converted to float64 or complex128 + depending on whether any of the elements are complex. If `x` is + scalar it is converted to a 1-D array. + deg : int + Degree of the resulting matrix. + + Returns + ------- + vander : ndarray. + The Vandermonde matrix. The shape of the returned matrix is + ``x.shape + (deg + 1,)``, where the last index is the power of `x`. + The dtype will be the same as the converted `x`. + + See Also + -------- + polyvander2d, polyvander3d + + Examples + -------- + The Vandermonde matrix of degree ``deg = 5`` and sample points + ``x = [-1, 2, 3]`` contains the element-wise powers of `x` + from 0 to 5 as its columns. + + >>> from numpy.polynomial import polynomial as P + >>> x, deg = [-1, 2, 3], 5 + >>> P.polyvander(x=x, deg=deg) + array([[ 1., -1., 1., -1., 1., -1.], + [ 1., 2., 4., 8., 16., 32.], + [ 1., 3., 9., 27., 81., 243.]]) + + """ + ideg = pu._as_int(deg, "deg") + if ideg < 0: + raise ValueError("deg must be non-negative") + + x = np.array(x, copy=None, ndmin=1) + 0.0 + dims = (ideg + 1,) + x.shape + dtyp = x.dtype + v = np.empty(dims, dtype=dtyp) + v[0] = x * 0 + 1 + if ideg > 0: + v[1] = x + for i in range(2, ideg + 1): + v[i] = v[i - 1] * x + return np.moveaxis(v, 0, -1) + + +def polyvander2d(x, y, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points ``(x, y)``. The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., (deg[1] + 1)*i + j] = x^i * y^j, + + where ``0 <= i <= deg[0]`` and ``0 <= j <= deg[1]``. The leading indices of + `V` index the points ``(x, y)`` and the last index encodes the powers of + `x` and `y`. + + If ``V = polyvander2d(x, y, [xdeg, ydeg])``, then the columns of `V` + correspond to the elements of a 2-D coefficient array `c` of shape + (xdeg + 1, ydeg + 1) in the order + + .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ... + + and ``np.dot(V, c.flat)`` and ``polyval2d(x, y, c)`` will be the same + up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 2-D polynomials + of the same degrees and sample points. + + Parameters + ---------- + x, y : array_like + Arrays of point coordinates, all of the same shape. The dtypes + will be converted to either float64 or complex128 depending on + whether any of the elements are complex. Scalars are converted to + 1-D arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg]. + + Returns + ------- + vander2d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg([1]+1)`. The dtype will be the same + as the converted `x` and `y`. + + See Also + -------- + polyvander, polyvander3d, polyval2d, polyval3d + + Examples + -------- + >>> import numpy as np + + The 2-D pseudo-Vandermonde matrix of degree ``[1, 2]`` and sample + points ``x = [-1, 2]`` and ``y = [1, 3]`` is as follows: + + >>> from numpy.polynomial import polynomial as P + >>> x = np.array([-1, 2]) + >>> y = np.array([1, 3]) + >>> m, n = 1, 2 + >>> deg = np.array([m, n]) + >>> V = P.polyvander2d(x=x, y=y, deg=deg) + >>> V + array([[ 1., 1., 1., -1., -1., -1.], + [ 1., 3., 9., 2., 6., 18.]]) + + We can verify the columns for any ``0 <= i <= m`` and ``0 <= j <= n``: + + >>> i, j = 0, 1 + >>> V[:, (deg[1]+1)*i + j] == x**i * y**j + array([ True, True]) + + The (1D) Vandermonde matrix of sample points ``x`` and degree ``m`` is a + special case of the (2D) pseudo-Vandermonde matrix with ``y`` points all + zero and degree ``[m, 0]``. + + >>> P.polyvander2d(x=x, y=0*x, deg=(m, 0)) == P.polyvander(x=x, deg=m) + array([[ True, True], + [ True, True]]) + + """ + return pu._vander_nd_flat((polyvander, polyvander), (x, y), deg) + + +def polyvander3d(x, y, z, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points ``(x, y, z)``. If `l`, `m`, `n` are the given degrees in `x`, `y`, `z`, + then The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = x^i * y^j * z^k, + + where ``0 <= i <= l``, ``0 <= j <= m``, and ``0 <= j <= n``. The leading + indices of `V` index the points ``(x, y, z)`` and the last index encodes + the powers of `x`, `y`, and `z`. + + If ``V = polyvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns + of `V` correspond to the elements of a 3-D coefficient array `c` of + shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order + + .. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},... + + and ``np.dot(V, c.flat)`` and ``polyval3d(x, y, z, c)`` will be the + same up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 3-D polynomials + of the same degrees and sample points. + + Parameters + ---------- + x, y, z : array_like + Arrays of point coordinates, all of the same shape. The dtypes will + be converted to either float64 or complex128 depending on whether + any of the elements are complex. Scalars are converted to 1-D + arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg, z_deg]. + + Returns + ------- + vander3d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg([1]+1)*(deg[2]+1)`. The dtype will + be the same as the converted `x`, `y`, and `z`. + + See Also + -------- + polyvander, polyvander3d, polyval2d, polyval3d + + Examples + -------- + >>> import numpy as np + >>> from numpy.polynomial import polynomial as P + >>> x = np.asarray([-1, 2, 1]) + >>> y = np.asarray([1, -2, -3]) + >>> z = np.asarray([2, 2, 5]) + >>> l, m, n = [2, 2, 1] + >>> deg = [l, m, n] + >>> V = P.polyvander3d(x=x, y=y, z=z, deg=deg) + >>> V + array([[ 1., 2., 1., 2., 1., 2., -1., -2., -1., + -2., -1., -2., 1., 2., 1., 2., 1., 2.], + [ 1., 2., -2., -4., 4., 8., 2., 4., -4., + -8., 8., 16., 4., 8., -8., -16., 16., 32.], + [ 1., 5., -3., -15., 9., 45., 1., 5., -3., + -15., 9., 45., 1., 5., -3., -15., 9., 45.]]) + + We can verify the columns for any ``0 <= i <= l``, ``0 <= j <= m``, + and ``0 <= k <= n`` + + >>> i, j, k = 2, 1, 0 + >>> V[:, (m+1)*(n+1)*i + (n+1)*j + k] == x**i * y**j * z**k + array([ True, True, True]) + + """ + return pu._vander_nd_flat((polyvander, polyvander, polyvander), (x, y, z), deg) + + +def polyfit(x, y, deg, rcond=None, full=False, w=None): + """ + Least-squares fit of a polynomial to data. + + Return the coefficients of a polynomial of degree `deg` that is the + least squares fit to the data values `y` given at points `x`. If `y` is + 1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple + fits are done, one for each column of `y`, and the resulting + coefficients are stored in the corresponding columns of a 2-D return. + The fitted polynomial(s) are in the form + + .. math:: p(x) = c_0 + c_1 * x + ... + c_n * x^n, + + where `n` is `deg`. + + Parameters + ---------- + x : array_like, shape (`M`,) + x-coordinates of the `M` sample (data) points ``(x[i], y[i])``. + y : array_like, shape (`M`,) or (`M`, `K`) + y-coordinates of the sample points. Several sets of sample points + sharing the same x-coordinates can be (independently) fit with one + call to `polyfit` by passing in for `y` a 2-D array that contains + one data set per column. + deg : int or 1-D array_like + Degree(s) of the fitting polynomials. If `deg` is a single integer + all terms up to and including the `deg`'th term are included in the + fit. For NumPy versions >= 1.11.0 a list of integers specifying the + degrees of the terms to include may be used instead. + rcond : float, optional + Relative condition number of the fit. Singular values smaller + than `rcond`, relative to the largest singular value, will be + ignored. The default value is ``len(x)*eps``, where `eps` is the + relative precision of the platform's float type, about 2e-16 in + most cases. + full : bool, optional + Switch determining the nature of the return value. When ``False`` + (the default) just the coefficients are returned; when ``True``, + diagnostic information from the singular value decomposition (used + to solve the fit's matrix equation) is also returned. + w : array_like, shape (`M`,), optional + Weights. If not None, the weight ``w[i]`` applies to the unsquared + residual ``y[i] - y_hat[i]`` at ``x[i]``. Ideally the weights are + chosen so that the errors of the products ``w[i]*y[i]`` all have the + same variance. When using inverse-variance weighting, use + ``w[i] = 1/sigma(y[i])``. The default value is None. + + Returns + ------- + coef : ndarray, shape (`deg` + 1,) or (`deg` + 1, `K`) + Polynomial coefficients ordered from low to high. If `y` was 2-D, + the coefficients in column `k` of `coef` represent the polynomial + fit to the data in `y`'s `k`-th column. + + [residuals, rank, singular_values, rcond] : list + These values are only returned if ``full == True`` + + - residuals -- sum of squared residuals of the least squares fit + - rank -- the numerical rank of the scaled Vandermonde matrix + - singular_values -- singular values of the scaled Vandermonde matrix + - rcond -- value of `rcond`. + + For more details, see `numpy.linalg.lstsq`. + + Raises + ------ + RankWarning + Raised if the matrix in the least-squares fit is rank deficient. + The warning is only raised if ``full == False``. The warnings can + be turned off by: + + >>> import warnings + >>> warnings.simplefilter('ignore', np.exceptions.RankWarning) + + See Also + -------- + numpy.polynomial.chebyshev.chebfit + numpy.polynomial.legendre.legfit + numpy.polynomial.laguerre.lagfit + numpy.polynomial.hermite.hermfit + numpy.polynomial.hermite_e.hermefit + polyval : Evaluates a polynomial. + polyvander : Vandermonde matrix for powers. + numpy.linalg.lstsq : Computes a least-squares fit from the matrix. + scipy.interpolate.UnivariateSpline : Computes spline fits. + + Notes + ----- + The solution is the coefficients of the polynomial `p` that minimizes + the sum of the weighted squared errors + + .. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2, + + where the :math:`w_j` are the weights. This problem is solved by + setting up the (typically) over-determined matrix equation: + + .. math:: V(x) * c = w * y, + + where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the + coefficients to be solved for, `w` are the weights, and `y` are the + observed values. This equation is then solved using the singular value + decomposition of `V`. + + If some of the singular values of `V` are so small that they are + neglected (and `full` == ``False``), a `~exceptions.RankWarning` will be + raised. This means that the coefficient values may be poorly determined. + Fitting to a lower order polynomial will usually get rid of the warning + (but may not be what you want, of course; if you have independent + reason(s) for choosing the degree which isn't working, you may have to: + a) reconsider those reasons, and/or b) reconsider the quality of your + data). The `rcond` parameter can also be set to a value smaller than + its default, but the resulting fit may be spurious and have large + contributions from roundoff error. + + Polynomial fits using double precision tend to "fail" at about + (polynomial) degree 20. Fits using Chebyshev or Legendre series are + generally better conditioned, but much can still depend on the + distribution of the sample points and the smoothness of the data. If + the quality of the fit is inadequate, splines may be a good + alternative. + + Examples + -------- + >>> import numpy as np + >>> from numpy.polynomial import polynomial as P + >>> x = np.linspace(-1,1,51) # x "data": [-1, -0.96, ..., 0.96, 1] + >>> rng = np.random.default_rng() + >>> err = rng.normal(size=len(x)) + >>> y = x**3 - x + err # x^3 - x + Gaussian noise + >>> c, stats = P.polyfit(x,y,3,full=True) + >>> c # c[0], c[1] approx. -1, c[2] should be approx. 0, c[3] approx. 1 + array([ 0.23111996, -1.02785049, -0.2241444 , 1.08405657]) # may vary + >>> stats # note the large SSR, explaining the rather poor results + [array([48.312088]), # may vary + 4, + array([1.38446749, 1.32119158, 0.50443316, 0.28853036]), + 1.1324274851176597e-14] + + Same thing without the added noise + + >>> y = x**3 - x + >>> c, stats = P.polyfit(x,y,3,full=True) + >>> c # c[0], c[1] ~= -1, c[2] should be "very close to 0", c[3] ~= 1 + array([-6.73496154e-17, -1.00000000e+00, 0.00000000e+00, 1.00000000e+00]) + >>> stats # note the minuscule SSR + [array([8.79579319e-31]), + np.int32(4), + array([1.38446749, 1.32119158, 0.50443316, 0.28853036]), + 1.1324274851176597e-14] + + """ + return pu._fit(polyvander, x, y, deg, rcond, full, w) + + +def polycompanion(c): + """ + Return the companion matrix of c. + + The companion matrix for power series cannot be made symmetric by + scaling the basis, so this function differs from those for the + orthogonal polynomials. + + Parameters + ---------- + c : array_like + 1-D array of polynomial coefficients ordered from low to high + degree. + + Returns + ------- + mat : ndarray + Companion matrix of dimensions (deg, deg). + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> c = (1, 2, 3) + >>> P.polycompanion(c) + array([[ 0. , -0.33333333], + [ 1. , -0.66666667]]) + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) < 2: + raise ValueError('Series must have maximum degree of at least 1.') + if len(c) == 2: + return np.array([[-c[0] / c[1]]]) + + n = len(c) - 1 + mat = np.zeros((n, n), dtype=c.dtype) + bot = mat.reshape(-1)[n::n + 1] + bot[...] = 1 + mat[:, -1] -= c[:-1] / c[-1] + return mat + + +def polyroots(c): + """ + Compute the roots of a polynomial. + + Return the roots (a.k.a. "zeros") of the polynomial + + .. math:: p(x) = \\sum_i c[i] * x^i. + + Parameters + ---------- + c : 1-D array_like + 1-D array of polynomial coefficients. + + Returns + ------- + out : ndarray + Array of the roots of the polynomial. If all the roots are real, + then `out` is also real, otherwise it is complex. + + See Also + -------- + numpy.polynomial.chebyshev.chebroots + numpy.polynomial.legendre.legroots + numpy.polynomial.laguerre.lagroots + numpy.polynomial.hermite.hermroots + numpy.polynomial.hermite_e.hermeroots + + Notes + ----- + The root estimates are obtained as the eigenvalues of the companion + matrix, Roots far from the origin of the complex plane may have large + errors due to the numerical instability of the power series for such + values. Roots with multiplicity greater than 1 will also show larger + errors as the value of the series near such points is relatively + insensitive to errors in the roots. Isolated roots near the origin can + be improved by a few iterations of Newton's method. + + Examples + -------- + >>> import numpy.polynomial.polynomial as poly + >>> poly.polyroots(poly.polyfromroots((-1,0,1))) + array([-1., 0., 1.]) + >>> poly.polyroots(poly.polyfromroots((-1,0,1))).dtype + dtype('float64') + >>> j = complex(0,1) + >>> poly.polyroots(poly.polyfromroots((-j,0,j))) + array([ 0.00000000e+00+0.j, 0.00000000e+00+1.j, 2.77555756e-17-1.j]) # may vary + + """ # noqa: E501 + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) < 2: + return np.array([], dtype=c.dtype) + if len(c) == 2: + return np.array([-c[0] / c[1]]) + + m = polycompanion(c) + r = la.eigvals(m) + r.sort() + return r + + +# +# polynomial class +# + +class Polynomial(ABCPolyBase): + """A power series class. + + The Polynomial class provides the standard Python numerical methods + '+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the + attributes and methods listed below. + + Parameters + ---------- + coef : array_like + Polynomial coefficients in order of increasing degree, i.e., + ``(1, 2, 3)`` give ``1 + 2*x + 3*x**2``. + domain : (2,) array_like, optional + Domain to use. The interval ``[domain[0], domain[1]]`` is mapped + to the interval ``[window[0], window[1]]`` by shifting and scaling. + The default value is [-1., 1.]. + window : (2,) array_like, optional + Window, see `domain` for its use. The default value is [-1., 1.]. + symbol : str, optional + Symbol used to represent the independent variable in string + representations of the polynomial expression, e.g. for printing. + The symbol must be a valid Python identifier. Default value is 'x'. + + .. versionadded:: 1.24 + + """ + # Virtual Functions + _add = staticmethod(polyadd) + _sub = staticmethod(polysub) + _mul = staticmethod(polymul) + _div = staticmethod(polydiv) + _pow = staticmethod(polypow) + _val = staticmethod(polyval) + _int = staticmethod(polyint) + _der = staticmethod(polyder) + _fit = staticmethod(polyfit) + _line = staticmethod(polyline) + _roots = staticmethod(polyroots) + _fromroots = staticmethod(polyfromroots) + + # Virtual properties + domain = np.array(polydomain) + window = np.array(polydomain) + basis_name = None + + @classmethod + def _str_term_unicode(cls, i, arg_str): + if i == '1': + return f"·{arg_str}" + else: + return f"·{arg_str}{i.translate(cls._superscript_mapping)}" + + @staticmethod + def _str_term_ascii(i, arg_str): + if i == '1': + return f" {arg_str}" + else: + return f" {arg_str}**{i}" + + @staticmethod + def _repr_latex_term(i, arg_str, needs_parens): + if needs_parens: + arg_str = rf"\left({arg_str}\right)" + if i == 0: + return '1' + elif i == 1: + return arg_str + else: + return f"{arg_str}^{{{i}}}" diff --git a/.venv/lib/python3.12/site-packages/numpy/polynomial/polynomial.pyi b/.venv/lib/python3.12/site-packages/numpy/polynomial/polynomial.pyi new file mode 100644 index 0000000..b4c7844 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/polynomial/polynomial.pyi @@ -0,0 +1,89 @@ +from typing import Final +from typing import Literal as L + +import numpy as np + +from ._polybase import ABCPolyBase +from ._polytypes import ( + _Array1, + _Array2, + _FuncBinOp, + _FuncCompanion, + _FuncDer, + _FuncFit, + _FuncFromRoots, + _FuncInteg, + _FuncLine, + _FuncPow, + _FuncRoots, + _FuncUnOp, + _FuncVal, + _FuncVal2D, + _FuncVal3D, + _FuncValFromRoots, + _FuncVander, + _FuncVander2D, + _FuncVander3D, +) +from .polyutils import trimcoef as polytrim + +__all__ = [ + "polyzero", + "polyone", + "polyx", + "polydomain", + "polyline", + "polyadd", + "polysub", + "polymulx", + "polymul", + "polydiv", + "polypow", + "polyval", + "polyvalfromroots", + "polyder", + "polyint", + "polyfromroots", + "polyvander", + "polyfit", + "polytrim", + "polyroots", + "Polynomial", + "polyval2d", + "polyval3d", + "polygrid2d", + "polygrid3d", + "polyvander2d", + "polyvander3d", + "polycompanion", +] + +polydomain: Final[_Array2[np.float64]] +polyzero: Final[_Array1[np.int_]] +polyone: Final[_Array1[np.int_]] +polyx: Final[_Array2[np.int_]] + +polyline: _FuncLine[L["Polyline"]] +polyfromroots: _FuncFromRoots[L["polyfromroots"]] +polyadd: _FuncBinOp[L["polyadd"]] +polysub: _FuncBinOp[L["polysub"]] +polymulx: _FuncUnOp[L["polymulx"]] +polymul: _FuncBinOp[L["polymul"]] +polydiv: _FuncBinOp[L["polydiv"]] +polypow: _FuncPow[L["polypow"]] +polyder: _FuncDer[L["polyder"]] +polyint: _FuncInteg[L["polyint"]] +polyval: _FuncVal[L["polyval"]] +polyval2d: _FuncVal2D[L["polyval2d"]] +polyval3d: _FuncVal3D[L["polyval3d"]] +polyvalfromroots: _FuncValFromRoots[L["polyvalfromroots"]] +polygrid2d: _FuncVal2D[L["polygrid2d"]] +polygrid3d: _FuncVal3D[L["polygrid3d"]] +polyvander: _FuncVander[L["polyvander"]] +polyvander2d: _FuncVander2D[L["polyvander2d"]] +polyvander3d: _FuncVander3D[L["polyvander3d"]] +polyfit: _FuncFit[L["polyfit"]] +polycompanion: _FuncCompanion[L["polycompanion"]] +polyroots: _FuncRoots[L["polyroots"]] + +class Polynomial(ABCPolyBase[None]): ... diff --git a/.venv/lib/python3.12/site-packages/numpy/polynomial/polyutils.py b/.venv/lib/python3.12/site-packages/numpy/polynomial/polyutils.py new file mode 100644 index 0000000..18dc0a8 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/polynomial/polyutils.py @@ -0,0 +1,759 @@ +""" +Utility classes and functions for the polynomial modules. + +This module provides: error and warning objects; a polynomial base class; +and some routines used in both the `polynomial` and `chebyshev` modules. + +Functions +--------- + +.. autosummary:: + :toctree: generated/ + + as_series convert list of array_likes into 1-D arrays of common type. + trimseq remove trailing zeros. + trimcoef remove small trailing coefficients. + getdomain return the domain appropriate for a given set of abscissae. + mapdomain maps points between domains. + mapparms parameters of the linear map between domains. + +""" +import functools +import operator +import warnings + +import numpy as np +from numpy._core.multiarray import dragon4_positional, dragon4_scientific +from numpy.exceptions import RankWarning + +__all__ = [ + 'as_series', 'trimseq', 'trimcoef', 'getdomain', 'mapdomain', 'mapparms', + 'format_float'] + +# +# Helper functions to convert inputs to 1-D arrays +# +def trimseq(seq): + """Remove small Poly series coefficients. + + Parameters + ---------- + seq : sequence + Sequence of Poly series coefficients. + + Returns + ------- + series : sequence + Subsequence with trailing zeros removed. If the resulting sequence + would be empty, return the first element. The returned sequence may + or may not be a view. + + Notes + ----- + Do not lose the type info if the sequence contains unknown objects. + + """ + if len(seq) == 0 or seq[-1] != 0: + return seq + else: + for i in range(len(seq) - 1, -1, -1): + if seq[i] != 0: + break + return seq[:i + 1] + + +def as_series(alist, trim=True): + """ + Return argument as a list of 1-d arrays. + + The returned list contains array(s) of dtype double, complex double, or + object. A 1-d argument of shape ``(N,)`` is parsed into ``N`` arrays of + size one; a 2-d argument of shape ``(M,N)`` is parsed into ``M`` arrays + of size ``N`` (i.e., is "parsed by row"); and a higher dimensional array + raises a Value Error if it is not first reshaped into either a 1-d or 2-d + array. + + Parameters + ---------- + alist : array_like + A 1- or 2-d array_like + trim : boolean, optional + When True, trailing zeros are removed from the inputs. + When False, the inputs are passed through intact. + + Returns + ------- + [a1, a2,...] : list of 1-D arrays + A copy of the input data as a list of 1-d arrays. + + Raises + ------ + ValueError + Raised when `as_series` cannot convert its input to 1-d arrays, or at + least one of the resulting arrays is empty. + + Examples + -------- + >>> import numpy as np + >>> from numpy.polynomial import polyutils as pu + >>> a = np.arange(4) + >>> pu.as_series(a) + [array([0.]), array([1.]), array([2.]), array([3.])] + >>> b = np.arange(6).reshape((2,3)) + >>> pu.as_series(b) + [array([0., 1., 2.]), array([3., 4., 5.])] + + >>> pu.as_series((1, np.arange(3), np.arange(2, dtype=np.float16))) + [array([1.]), array([0., 1., 2.]), array([0., 1.])] + + >>> pu.as_series([2, [1.1, 0.]]) + [array([2.]), array([1.1])] + + >>> pu.as_series([2, [1.1, 0.]], trim=False) + [array([2.]), array([1.1, 0. ])] + + """ + arrays = [np.array(a, ndmin=1, copy=None) for a in alist] + for a in arrays: + if a.size == 0: + raise ValueError("Coefficient array is empty") + if a.ndim != 1: + raise ValueError("Coefficient array is not 1-d") + if trim: + arrays = [trimseq(a) for a in arrays] + + try: + dtype = np.common_type(*arrays) + except Exception as e: + object_dtype = np.dtypes.ObjectDType() + has_one_object_type = False + ret = [] + for a in arrays: + if a.dtype != object_dtype: + tmp = np.empty(len(a), dtype=object_dtype) + tmp[:] = a[:] + ret.append(tmp) + else: + has_one_object_type = True + ret.append(a.copy()) + if not has_one_object_type: + raise ValueError("Coefficient arrays have no common type") from e + else: + ret = [np.array(a, copy=True, dtype=dtype) for a in arrays] + return ret + + +def trimcoef(c, tol=0): + """ + Remove "small" "trailing" coefficients from a polynomial. + + "Small" means "small in absolute value" and is controlled by the + parameter `tol`; "trailing" means highest order coefficient(s), e.g., in + ``[0, 1, 1, 0, 0]`` (which represents ``0 + x + x**2 + 0*x**3 + 0*x**4``) + both the 3-rd and 4-th order coefficients would be "trimmed." + + Parameters + ---------- + c : array_like + 1-d array of coefficients, ordered from lowest order to highest. + tol : number, optional + Trailing (i.e., highest order) elements with absolute value less + than or equal to `tol` (default value is zero) are removed. + + Returns + ------- + trimmed : ndarray + 1-d array with trailing zeros removed. If the resulting series + would be empty, a series containing a single zero is returned. + + Raises + ------ + ValueError + If `tol` < 0 + + Examples + -------- + >>> from numpy.polynomial import polyutils as pu + >>> pu.trimcoef((0,0,3,0,5,0,0)) + array([0., 0., 3., 0., 5.]) + >>> pu.trimcoef((0,0,1e-3,0,1e-5,0,0),1e-3) # item == tol is trimmed + array([0.]) + >>> i = complex(0,1) # works for complex + >>> pu.trimcoef((3e-4,1e-3*(1-i),5e-4,2e-5*(1+i)), 1e-3) + array([0.0003+0.j , 0.001 -0.001j]) + + """ + if tol < 0: + raise ValueError("tol must be non-negative") + + [c] = as_series([c]) + [ind] = np.nonzero(np.abs(c) > tol) + if len(ind) == 0: + return c[:1] * 0 + else: + return c[:ind[-1] + 1].copy() + +def getdomain(x): + """ + Return a domain suitable for given abscissae. + + Find a domain suitable for a polynomial or Chebyshev series + defined at the values supplied. + + Parameters + ---------- + x : array_like + 1-d array of abscissae whose domain will be determined. + + Returns + ------- + domain : ndarray + 1-d array containing two values. If the inputs are complex, then + the two returned points are the lower left and upper right corners + of the smallest rectangle (aligned with the axes) in the complex + plane containing the points `x`. If the inputs are real, then the + two points are the ends of the smallest interval containing the + points `x`. + + See Also + -------- + mapparms, mapdomain + + Examples + -------- + >>> import numpy as np + >>> from numpy.polynomial import polyutils as pu + >>> points = np.arange(4)**2 - 5; points + array([-5, -4, -1, 4]) + >>> pu.getdomain(points) + array([-5., 4.]) + >>> c = np.exp(complex(0,1)*np.pi*np.arange(12)/6) # unit circle + >>> pu.getdomain(c) + array([-1.-1.j, 1.+1.j]) + + """ + [x] = as_series([x], trim=False) + if x.dtype.char in np.typecodes['Complex']: + rmin, rmax = x.real.min(), x.real.max() + imin, imax = x.imag.min(), x.imag.max() + return np.array((complex(rmin, imin), complex(rmax, imax))) + else: + return np.array((x.min(), x.max())) + +def mapparms(old, new): + """ + Linear map parameters between domains. + + Return the parameters of the linear map ``offset + scale*x`` that maps + `old` to `new` such that ``old[i] -> new[i]``, ``i = 0, 1``. + + Parameters + ---------- + old, new : array_like + Domains. Each domain must (successfully) convert to a 1-d array + containing precisely two values. + + Returns + ------- + offset, scale : scalars + The map ``L(x) = offset + scale*x`` maps the first domain to the + second. + + See Also + -------- + getdomain, mapdomain + + Notes + ----- + Also works for complex numbers, and thus can be used to calculate the + parameters required to map any line in the complex plane to any other + line therein. + + Examples + -------- + >>> from numpy.polynomial import polyutils as pu + >>> pu.mapparms((-1,1),(-1,1)) + (0.0, 1.0) + >>> pu.mapparms((1,-1),(-1,1)) + (-0.0, -1.0) + >>> i = complex(0,1) + >>> pu.mapparms((-i,-1),(1,i)) + ((1+1j), (1-0j)) + + """ + oldlen = old[1] - old[0] + newlen = new[1] - new[0] + off = (old[1] * new[0] - old[0] * new[1]) / oldlen + scl = newlen / oldlen + return off, scl + +def mapdomain(x, old, new): + """ + Apply linear map to input points. + + The linear map ``offset + scale*x`` that maps the domain `old` to + the domain `new` is applied to the points `x`. + + Parameters + ---------- + x : array_like + Points to be mapped. If `x` is a subtype of ndarray the subtype + will be preserved. + old, new : array_like + The two domains that determine the map. Each must (successfully) + convert to 1-d arrays containing precisely two values. + + Returns + ------- + x_out : ndarray + Array of points of the same shape as `x`, after application of the + linear map between the two domains. + + See Also + -------- + getdomain, mapparms + + Notes + ----- + Effectively, this implements: + + .. math:: + x\\_out = new[0] + m(x - old[0]) + + where + + .. math:: + m = \\frac{new[1]-new[0]}{old[1]-old[0]} + + Examples + -------- + >>> import numpy as np + >>> from numpy.polynomial import polyutils as pu + >>> old_domain = (-1,1) + >>> new_domain = (0,2*np.pi) + >>> x = np.linspace(-1,1,6); x + array([-1. , -0.6, -0.2, 0.2, 0.6, 1. ]) + >>> x_out = pu.mapdomain(x, old_domain, new_domain); x_out + array([ 0. , 1.25663706, 2.51327412, 3.76991118, 5.02654825, # may vary + 6.28318531]) + >>> x - pu.mapdomain(x_out, new_domain, old_domain) + array([0., 0., 0., 0., 0., 0.]) + + Also works for complex numbers (and thus can be used to map any line in + the complex plane to any other line therein). + + >>> i = complex(0,1) + >>> old = (-1 - i, 1 + i) + >>> new = (-1 + i, 1 - i) + >>> z = np.linspace(old[0], old[1], 6); z + array([-1. -1.j , -0.6-0.6j, -0.2-0.2j, 0.2+0.2j, 0.6+0.6j, 1. +1.j ]) + >>> new_z = pu.mapdomain(z, old, new); new_z + array([-1.0+1.j , -0.6+0.6j, -0.2+0.2j, 0.2-0.2j, 0.6-0.6j, 1.0-1.j ]) # may vary + + """ + if type(x) not in (int, float, complex) and not isinstance(x, np.generic): + x = np.asanyarray(x) + off, scl = mapparms(old, new) + return off + scl * x + + +def _nth_slice(i, ndim): + sl = [np.newaxis] * ndim + sl[i] = slice(None) + return tuple(sl) + + +def _vander_nd(vander_fs, points, degrees): + r""" + A generalization of the Vandermonde matrix for N dimensions + + The result is built by combining the results of 1d Vandermonde matrices, + + .. math:: + W[i_0, \ldots, i_M, j_0, \ldots, j_N] = \prod_{k=0}^N{V_k(x_k)[i_0, \ldots, i_M, j_k]} + + where + + .. math:: + N &= \texttt{len(points)} = \texttt{len(degrees)} = \texttt{len(vander\_fs)} \\ + M &= \texttt{points[k].ndim} \\ + V_k &= \texttt{vander\_fs[k]} \\ + x_k &= \texttt{points[k]} \\ + 0 \le j_k &\le \texttt{degrees[k]} + + Expanding the one-dimensional :math:`V_k` functions gives: + + .. math:: + W[i_0, \ldots, i_M, j_0, \ldots, j_N] = \prod_{k=0}^N{B_{k, j_k}(x_k[i_0, \ldots, i_M])} + + where :math:`B_{k,m}` is the m'th basis of the polynomial construction used along + dimension :math:`k`. For a regular polynomial, :math:`B_{k, m}(x) = P_m(x) = x^m`. + + Parameters + ---------- + vander_fs : Sequence[function(array_like, int) -> ndarray] + The 1d vander function to use for each axis, such as ``polyvander`` + points : Sequence[array_like] + Arrays of point coordinates, all of the same shape. The dtypes + will be converted to either float64 or complex128 depending on + whether any of the elements are complex. Scalars are converted to + 1-D arrays. + This must be the same length as `vander_fs`. + degrees : Sequence[int] + The maximum degree (inclusive) to use for each axis. + This must be the same length as `vander_fs`. + + Returns + ------- + vander_nd : ndarray + An array of shape ``points[0].shape + tuple(d + 1 for d in degrees)``. + """ # noqa: E501 + n_dims = len(vander_fs) + if n_dims != len(points): + raise ValueError( + f"Expected {n_dims} dimensions of sample points, got {len(points)}") + if n_dims != len(degrees): + raise ValueError( + f"Expected {n_dims} dimensions of degrees, got {len(degrees)}") + if n_dims == 0: + raise ValueError("Unable to guess a dtype or shape when no points are given") + + # convert to the same shape and type + points = tuple(np.asarray(tuple(points)) + 0.0) + + # produce the vandermonde matrix for each dimension, placing the last + # axis of each in an independent trailing axis of the output + vander_arrays = ( + vander_fs[i](points[i], degrees[i])[(...,) + _nth_slice(i, n_dims)] + for i in range(n_dims) + ) + + # we checked this wasn't empty already, so no `initial` needed + return functools.reduce(operator.mul, vander_arrays) + + +def _vander_nd_flat(vander_fs, points, degrees): + """ + Like `_vander_nd`, but flattens the last ``len(degrees)`` axes into a single axis + + Used to implement the public ``vanderd`` functions. + """ + v = _vander_nd(vander_fs, points, degrees) + return v.reshape(v.shape[:-len(degrees)] + (-1,)) + + +def _fromroots(line_f, mul_f, roots): + """ + Helper function used to implement the ``fromroots`` functions. + + Parameters + ---------- + line_f : function(float, float) -> ndarray + The ``line`` function, such as ``polyline`` + mul_f : function(array_like, array_like) -> ndarray + The ``mul`` function, such as ``polymul`` + roots + See the ``fromroots`` functions for more detail + """ + if len(roots) == 0: + return np.ones(1) + else: + [roots] = as_series([roots], trim=False) + roots.sort() + p = [line_f(-r, 1) for r in roots] + n = len(p) + while n > 1: + m, r = divmod(n, 2) + tmp = [mul_f(p[i], p[i + m]) for i in range(m)] + if r: + tmp[0] = mul_f(tmp[0], p[-1]) + p = tmp + n = m + return p[0] + + +def _valnd(val_f, c, *args): + """ + Helper function used to implement the ``vald`` functions. + + Parameters + ---------- + val_f : function(array_like, array_like, tensor: bool) -> array_like + The ``val`` function, such as ``polyval`` + c, args + See the ``vald`` functions for more detail + """ + args = [np.asanyarray(a) for a in args] + shape0 = args[0].shape + if not all(a.shape == shape0 for a in args[1:]): + if len(args) == 3: + raise ValueError('x, y, z are incompatible') + elif len(args) == 2: + raise ValueError('x, y are incompatible') + else: + raise ValueError('ordinates are incompatible') + it = iter(args) + x0 = next(it) + + # use tensor on only the first + c = val_f(x0, c) + for xi in it: + c = val_f(xi, c, tensor=False) + return c + + +def _gridnd(val_f, c, *args): + """ + Helper function used to implement the ``gridd`` functions. + + Parameters + ---------- + val_f : function(array_like, array_like, tensor: bool) -> array_like + The ``val`` function, such as ``polyval`` + c, args + See the ``gridd`` functions for more detail + """ + for xi in args: + c = val_f(xi, c) + return c + + +def _div(mul_f, c1, c2): + """ + Helper function used to implement the ``div`` functions. + + Implementation uses repeated subtraction of c2 multiplied by the nth basis. + For some polynomial types, a more efficient approach may be possible. + + Parameters + ---------- + mul_f : function(array_like, array_like) -> array_like + The ``mul`` function, such as ``polymul`` + c1, c2 + See the ``div`` functions for more detail + """ + # c1, c2 are trimmed copies + [c1, c2] = as_series([c1, c2]) + if c2[-1] == 0: + raise ZeroDivisionError # FIXME: add message with details to exception + + lc1 = len(c1) + lc2 = len(c2) + if lc1 < lc2: + return c1[:1] * 0, c1 + elif lc2 == 1: + return c1 / c2[-1], c1[:1] * 0 + else: + quo = np.empty(lc1 - lc2 + 1, dtype=c1.dtype) + rem = c1 + for i in range(lc1 - lc2, - 1, -1): + p = mul_f([0] * i + [1], c2) + q = rem[-1] / p[-1] + rem = rem[:-1] - q * p[:-1] + quo[i] = q + return quo, trimseq(rem) + + +def _add(c1, c2): + """ Helper function used to implement the ``add`` functions. """ + # c1, c2 are trimmed copies + [c1, c2] = as_series([c1, c2]) + if len(c1) > len(c2): + c1[:c2.size] += c2 + ret = c1 + else: + c2[:c1.size] += c1 + ret = c2 + return trimseq(ret) + + +def _sub(c1, c2): + """ Helper function used to implement the ``sub`` functions. """ + # c1, c2 are trimmed copies + [c1, c2] = as_series([c1, c2]) + if len(c1) > len(c2): + c1[:c2.size] -= c2 + ret = c1 + else: + c2 = -c2 + c2[:c1.size] += c1 + ret = c2 + return trimseq(ret) + + +def _fit(vander_f, x, y, deg, rcond=None, full=False, w=None): + """ + Helper function used to implement the ``fit`` functions. + + Parameters + ---------- + vander_f : function(array_like, int) -> ndarray + The 1d vander function, such as ``polyvander`` + c1, c2 + See the ``fit`` functions for more detail + """ + x = np.asarray(x) + 0.0 + y = np.asarray(y) + 0.0 + deg = np.asarray(deg) + + # check arguments. + if deg.ndim > 1 or deg.dtype.kind not in 'iu' or deg.size == 0: + raise TypeError("deg must be an int or non-empty 1-D array of int") + if deg.min() < 0: + raise ValueError("expected deg >= 0") + if x.ndim != 1: + raise TypeError("expected 1D vector for x") + if x.size == 0: + raise TypeError("expected non-empty vector for x") + if y.ndim < 1 or y.ndim > 2: + raise TypeError("expected 1D or 2D array for y") + if len(x) != len(y): + raise TypeError("expected x and y to have same length") + + if deg.ndim == 0: + lmax = deg + order = lmax + 1 + van = vander_f(x, lmax) + else: + deg = np.sort(deg) + lmax = deg[-1] + order = len(deg) + van = vander_f(x, lmax)[:, deg] + + # set up the least squares matrices in transposed form + lhs = van.T + rhs = y.T + if w is not None: + w = np.asarray(w) + 0.0 + if w.ndim != 1: + raise TypeError("expected 1D vector for w") + if len(x) != len(w): + raise TypeError("expected x and w to have same length") + # apply weights. Don't use inplace operations as they + # can cause problems with NA. + lhs = lhs * w + rhs = rhs * w + + # set rcond + if rcond is None: + rcond = len(x) * np.finfo(x.dtype).eps + + # Determine the norms of the design matrix columns. + if issubclass(lhs.dtype.type, np.complexfloating): + scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1)) + else: + scl = np.sqrt(np.square(lhs).sum(1)) + scl[scl == 0] = 1 + + # Solve the least squares problem. + c, resids, rank, s = np.linalg.lstsq(lhs.T / scl, rhs.T, rcond) + c = (c.T / scl).T + + # Expand c to include non-fitted coefficients which are set to zero + if deg.ndim > 0: + if c.ndim == 2: + cc = np.zeros((lmax + 1, c.shape[1]), dtype=c.dtype) + else: + cc = np.zeros(lmax + 1, dtype=c.dtype) + cc[deg] = c + c = cc + + # warn on rank reduction + if rank != order and not full: + msg = "The fit may be poorly conditioned" + warnings.warn(msg, RankWarning, stacklevel=2) + + if full: + return c, [resids, rank, s, rcond] + else: + return c + + +def _pow(mul_f, c, pow, maxpower): + """ + Helper function used to implement the ``pow`` functions. + + Parameters + ---------- + mul_f : function(array_like, array_like) -> ndarray + The ``mul`` function, such as ``polymul`` + c : array_like + 1-D array of array of series coefficients + pow, maxpower + See the ``pow`` functions for more detail + """ + # c is a trimmed copy + [c] = as_series([c]) + power = int(pow) + if power != pow or power < 0: + raise ValueError("Power must be a non-negative integer.") + elif maxpower is not None and power > maxpower: + raise ValueError("Power is too large") + elif power == 0: + return np.array([1], dtype=c.dtype) + elif power == 1: + return c + else: + # This can be made more efficient by using powers of two + # in the usual way. + prd = c + for i in range(2, power + 1): + prd = mul_f(prd, c) + return prd + + +def _as_int(x, desc): + """ + Like `operator.index`, but emits a custom exception when passed an + incorrect type + + Parameters + ---------- + x : int-like + Value to interpret as an integer + desc : str + description to include in any error message + + Raises + ------ + TypeError : if x is a float or non-numeric + """ + try: + return operator.index(x) + except TypeError as e: + raise TypeError(f"{desc} must be an integer, received {x}") from e + + +def format_float(x, parens=False): + if not np.issubdtype(type(x), np.floating): + return str(x) + + opts = np.get_printoptions() + + if np.isnan(x): + return opts['nanstr'] + elif np.isinf(x): + return opts['infstr'] + + exp_format = False + if x != 0: + a = np.abs(x) + if a >= 1.e8 or a < 10**min(0, -(opts['precision'] - 1) // 2): + exp_format = True + + trim, unique = '0', True + if opts['floatmode'] == 'fixed': + trim, unique = 'k', False + + if exp_format: + s = dragon4_scientific(x, precision=opts['precision'], + unique=unique, trim=trim, + sign=opts['sign'] == '+') + if parens: + s = '(' + s + ')' + else: + s = dragon4_positional(x, precision=opts['precision'], + fractional=True, + unique=unique, trim=trim, + sign=opts['sign'] == '+') + return s diff --git a/.venv/lib/python3.12/site-packages/numpy/polynomial/polyutils.pyi b/.venv/lib/python3.12/site-packages/numpy/polynomial/polyutils.pyi new file mode 100644 index 0000000..c627e16 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/polynomial/polyutils.pyi @@ -0,0 +1,423 @@ +from collections.abc import Callable, Iterable, Sequence +from typing import ( + Final, + Literal, + SupportsIndex, + TypeAlias, + TypeVar, + overload, +) + +import numpy as np +import numpy.typing as npt +from numpy._typing import ( + _ArrayLikeComplex_co, + _ArrayLikeFloat_co, + _FloatLike_co, + _NumberLike_co, +) + +from ._polytypes import ( + _AnyInt, + _Array2, + _ArrayLikeCoef_co, + _CoefArray, + _CoefLike_co, + _CoefSeries, + _ComplexArray, + _ComplexSeries, + _FloatArray, + _FloatSeries, + _FuncBinOp, + _FuncValND, + _FuncVanderND, + _ObjectArray, + _ObjectSeries, + _SeriesLikeCoef_co, + _SeriesLikeComplex_co, + _SeriesLikeFloat_co, + _SeriesLikeInt_co, + _Tuple2, +) + +__all__: Final[Sequence[str]] = [ + "as_series", + "format_float", + "getdomain", + "mapdomain", + "mapparms", + "trimcoef", + "trimseq", +] + +_AnyLineF: TypeAlias = Callable[ + [_CoefLike_co, _CoefLike_co], + _CoefArray, +] +_AnyMulF: TypeAlias = Callable[ + [npt.ArrayLike, npt.ArrayLike], + _CoefArray, +] +_AnyVanderF: TypeAlias = Callable[ + [npt.ArrayLike, SupportsIndex], + _CoefArray, +] + +@overload +def as_series( + alist: npt.NDArray[np.integer] | _FloatArray, + trim: bool = ..., +) -> list[_FloatSeries]: ... +@overload +def as_series( + alist: _ComplexArray, + trim: bool = ..., +) -> list[_ComplexSeries]: ... +@overload +def as_series( + alist: _ObjectArray, + trim: bool = ..., +) -> list[_ObjectSeries]: ... +@overload +def as_series( # type: ignore[overload-overlap] + alist: Iterable[_FloatArray | npt.NDArray[np.integer]], + trim: bool = ..., +) -> list[_FloatSeries]: ... +@overload +def as_series( + alist: Iterable[_ComplexArray], + trim: bool = ..., +) -> list[_ComplexSeries]: ... +@overload +def as_series( + alist: Iterable[_ObjectArray], + trim: bool = ..., +) -> list[_ObjectSeries]: ... +@overload +def as_series( # type: ignore[overload-overlap] + alist: Iterable[_SeriesLikeFloat_co | float], + trim: bool = ..., +) -> list[_FloatSeries]: ... +@overload +def as_series( + alist: Iterable[_SeriesLikeComplex_co | complex], + trim: bool = ..., +) -> list[_ComplexSeries]: ... +@overload +def as_series( + alist: Iterable[_SeriesLikeCoef_co | object], + trim: bool = ..., +) -> list[_ObjectSeries]: ... + +_T_seq = TypeVar("_T_seq", bound=_CoefArray | Sequence[_CoefLike_co]) +def trimseq(seq: _T_seq) -> _T_seq: ... + +@overload +def trimcoef( # type: ignore[overload-overlap] + c: npt.NDArray[np.integer] | _FloatArray, + tol: _FloatLike_co = ..., +) -> _FloatSeries: ... +@overload +def trimcoef( + c: _ComplexArray, + tol: _FloatLike_co = ..., +) -> _ComplexSeries: ... +@overload +def trimcoef( + c: _ObjectArray, + tol: _FloatLike_co = ..., +) -> _ObjectSeries: ... +@overload +def trimcoef( # type: ignore[overload-overlap] + c: _SeriesLikeFloat_co | float, + tol: _FloatLike_co = ..., +) -> _FloatSeries: ... +@overload +def trimcoef( + c: _SeriesLikeComplex_co | complex, + tol: _FloatLike_co = ..., +) -> _ComplexSeries: ... +@overload +def trimcoef( + c: _SeriesLikeCoef_co | object, + tol: _FloatLike_co = ..., +) -> _ObjectSeries: ... + +@overload +def getdomain( # type: ignore[overload-overlap] + x: _FloatArray | npt.NDArray[np.integer], +) -> _Array2[np.float64]: ... +@overload +def getdomain( + x: _ComplexArray, +) -> _Array2[np.complex128]: ... +@overload +def getdomain( + x: _ObjectArray, +) -> _Array2[np.object_]: ... +@overload +def getdomain( # type: ignore[overload-overlap] + x: _SeriesLikeFloat_co | float, +) -> _Array2[np.float64]: ... +@overload +def getdomain( + x: _SeriesLikeComplex_co | complex, +) -> _Array2[np.complex128]: ... +@overload +def getdomain( + x: _SeriesLikeCoef_co | object, +) -> _Array2[np.object_]: ... + +@overload +def mapparms( # type: ignore[overload-overlap] + old: npt.NDArray[np.floating | np.integer], + new: npt.NDArray[np.floating | np.integer], +) -> _Tuple2[np.floating]: ... +@overload +def mapparms( + old: npt.NDArray[np.number], + new: npt.NDArray[np.number], +) -> _Tuple2[np.complexfloating]: ... +@overload +def mapparms( + old: npt.NDArray[np.object_ | np.number], + new: npt.NDArray[np.object_ | np.number], +) -> _Tuple2[object]: ... +@overload +def mapparms( # type: ignore[overload-overlap] + old: Sequence[float], + new: Sequence[float], +) -> _Tuple2[float]: ... +@overload +def mapparms( + old: Sequence[complex], + new: Sequence[complex], +) -> _Tuple2[complex]: ... +@overload +def mapparms( + old: _SeriesLikeFloat_co, + new: _SeriesLikeFloat_co, +) -> _Tuple2[np.floating]: ... +@overload +def mapparms( + old: _SeriesLikeComplex_co, + new: _SeriesLikeComplex_co, +) -> _Tuple2[np.complexfloating]: ... +@overload +def mapparms( + old: _SeriesLikeCoef_co, + new: _SeriesLikeCoef_co, +) -> _Tuple2[object]: ... + +@overload +def mapdomain( # type: ignore[overload-overlap] + x: _FloatLike_co, + old: _SeriesLikeFloat_co, + new: _SeriesLikeFloat_co, +) -> np.floating: ... +@overload +def mapdomain( + x: _NumberLike_co, + old: _SeriesLikeComplex_co, + new: _SeriesLikeComplex_co, +) -> np.complexfloating: ... +@overload +def mapdomain( # type: ignore[overload-overlap] + x: npt.NDArray[np.floating | np.integer], + old: npt.NDArray[np.floating | np.integer], + new: npt.NDArray[np.floating | np.integer], +) -> _FloatSeries: ... +@overload +def mapdomain( + x: npt.NDArray[np.number], + old: npt.NDArray[np.number], + new: npt.NDArray[np.number], +) -> _ComplexSeries: ... +@overload +def mapdomain( + x: npt.NDArray[np.object_ | np.number], + old: npt.NDArray[np.object_ | np.number], + new: npt.NDArray[np.object_ | np.number], +) -> _ObjectSeries: ... +@overload +def mapdomain( # type: ignore[overload-overlap] + x: _SeriesLikeFloat_co, + old: _SeriesLikeFloat_co, + new: _SeriesLikeFloat_co, +) -> _FloatSeries: ... +@overload +def mapdomain( + x: _SeriesLikeComplex_co, + old: _SeriesLikeComplex_co, + new: _SeriesLikeComplex_co, +) -> _ComplexSeries: ... +@overload +def mapdomain( + x: _SeriesLikeCoef_co, + old: _SeriesLikeCoef_co, + new: _SeriesLikeCoef_co, +) -> _ObjectSeries: ... +@overload +def mapdomain( + x: _CoefLike_co, + old: _SeriesLikeCoef_co, + new: _SeriesLikeCoef_co, +) -> object: ... + +def _nth_slice( + i: SupportsIndex, + ndim: SupportsIndex, +) -> tuple[slice | None, ...]: ... + +_vander_nd: _FuncVanderND[Literal["_vander_nd"]] +_vander_nd_flat: _FuncVanderND[Literal["_vander_nd_flat"]] + +# keep in sync with `._polytypes._FuncFromRoots` +@overload +def _fromroots( # type: ignore[overload-overlap] + line_f: _AnyLineF, + mul_f: _AnyMulF, + roots: _SeriesLikeFloat_co, +) -> _FloatSeries: ... +@overload +def _fromroots( + line_f: _AnyLineF, + mul_f: _AnyMulF, + roots: _SeriesLikeComplex_co, +) -> _ComplexSeries: ... +@overload +def _fromroots( + line_f: _AnyLineF, + mul_f: _AnyMulF, + roots: _SeriesLikeCoef_co, +) -> _ObjectSeries: ... +@overload +def _fromroots( + line_f: _AnyLineF, + mul_f: _AnyMulF, + roots: _SeriesLikeCoef_co, +) -> _CoefSeries: ... + +_valnd: _FuncValND[Literal["_valnd"]] +_gridnd: _FuncValND[Literal["_gridnd"]] + +# keep in sync with `_polytypes._FuncBinOp` +@overload +def _div( # type: ignore[overload-overlap] + mul_f: _AnyMulF, + c1: _SeriesLikeFloat_co, + c2: _SeriesLikeFloat_co, +) -> _Tuple2[_FloatSeries]: ... +@overload +def _div( + mul_f: _AnyMulF, + c1: _SeriesLikeComplex_co, + c2: _SeriesLikeComplex_co, +) -> _Tuple2[_ComplexSeries]: ... +@overload +def _div( + mul_f: _AnyMulF, + c1: _SeriesLikeCoef_co, + c2: _SeriesLikeCoef_co, +) -> _Tuple2[_ObjectSeries]: ... +@overload +def _div( + mul_f: _AnyMulF, + c1: _SeriesLikeCoef_co, + c2: _SeriesLikeCoef_co, +) -> _Tuple2[_CoefSeries]: ... + +_add: Final[_FuncBinOp] +_sub: Final[_FuncBinOp] + +# keep in sync with `_polytypes._FuncPow` +@overload +def _pow( # type: ignore[overload-overlap] + mul_f: _AnyMulF, + c: _SeriesLikeFloat_co, + pow: _AnyInt, + maxpower: _AnyInt | None = ..., +) -> _FloatSeries: ... +@overload +def _pow( + mul_f: _AnyMulF, + c: _SeriesLikeComplex_co, + pow: _AnyInt, + maxpower: _AnyInt | None = ..., +) -> _ComplexSeries: ... +@overload +def _pow( + mul_f: _AnyMulF, + c: _SeriesLikeCoef_co, + pow: _AnyInt, + maxpower: _AnyInt | None = ..., +) -> _ObjectSeries: ... +@overload +def _pow( + mul_f: _AnyMulF, + c: _SeriesLikeCoef_co, + pow: _AnyInt, + maxpower: _AnyInt | None = ..., +) -> _CoefSeries: ... + +# keep in sync with `_polytypes._FuncFit` +@overload +def _fit( # type: ignore[overload-overlap] + vander_f: _AnyVanderF, + x: _SeriesLikeFloat_co, + y: _ArrayLikeFloat_co, + deg: _SeriesLikeInt_co, + domain: _SeriesLikeFloat_co | None = ..., + rcond: _FloatLike_co | None = ..., + full: Literal[False] = ..., + w: _SeriesLikeFloat_co | None = ..., +) -> _FloatArray: ... +@overload +def _fit( + vander_f: _AnyVanderF, + x: _SeriesLikeComplex_co, + y: _ArrayLikeComplex_co, + deg: _SeriesLikeInt_co, + domain: _SeriesLikeComplex_co | None = ..., + rcond: _FloatLike_co | None = ..., + full: Literal[False] = ..., + w: _SeriesLikeComplex_co | None = ..., +) -> _ComplexArray: ... +@overload +def _fit( + vander_f: _AnyVanderF, + x: _SeriesLikeCoef_co, + y: _ArrayLikeCoef_co, + deg: _SeriesLikeInt_co, + domain: _SeriesLikeCoef_co | None = ..., + rcond: _FloatLike_co | None = ..., + full: Literal[False] = ..., + w: _SeriesLikeCoef_co | None = ..., +) -> _CoefArray: ... +@overload +def _fit( + vander_f: _AnyVanderF, + x: _SeriesLikeCoef_co, + y: _SeriesLikeCoef_co, + deg: _SeriesLikeInt_co, + domain: _SeriesLikeCoef_co | None, + rcond: _FloatLike_co | None, + full: Literal[True], + /, + w: _SeriesLikeCoef_co | None = ..., +) -> tuple[_CoefSeries, Sequence[np.inexact | np.int32]]: ... +@overload +def _fit( + vander_f: _AnyVanderF, + x: _SeriesLikeCoef_co, + y: _SeriesLikeCoef_co, + deg: _SeriesLikeInt_co, + domain: _SeriesLikeCoef_co | None = ..., + rcond: _FloatLike_co | None = ..., + *, + full: Literal[True], + w: _SeriesLikeCoef_co | None = ..., +) -> tuple[_CoefSeries, Sequence[np.inexact | np.int32]]: ... + +def _as_int(x: SupportsIndex, desc: str) -> int: ... +def format_float(x: _FloatLike_co, parens: bool = ...) -> str: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/polynomial/tests/__init__.py b/.venv/lib/python3.12/site-packages/numpy/polynomial/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/.venv/lib/python3.12/site-packages/numpy/polynomial/tests/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/polynomial/tests/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..f0cc006 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/polynomial/tests/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/polynomial/tests/__pycache__/test_chebyshev.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/polynomial/tests/__pycache__/test_chebyshev.cpython-312.pyc new file mode 100644 index 0000000..cf8f747 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/polynomial/tests/__pycache__/test_chebyshev.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/polynomial/tests/__pycache__/test_classes.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/polynomial/tests/__pycache__/test_classes.cpython-312.pyc new file mode 100644 index 0000000..32aec82 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/polynomial/tests/__pycache__/test_classes.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/polynomial/tests/__pycache__/test_hermite.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/polynomial/tests/__pycache__/test_hermite.cpython-312.pyc new file mode 100644 index 0000000..44fd6b0 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/polynomial/tests/__pycache__/test_hermite.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/polynomial/tests/__pycache__/test_hermite_e.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/polynomial/tests/__pycache__/test_hermite_e.cpython-312.pyc new file mode 100644 index 0000000..1725154 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/polynomial/tests/__pycache__/test_hermite_e.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/polynomial/tests/__pycache__/test_laguerre.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/polynomial/tests/__pycache__/test_laguerre.cpython-312.pyc new file mode 100644 index 0000000..defff35 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/polynomial/tests/__pycache__/test_laguerre.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/polynomial/tests/__pycache__/test_legendre.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/polynomial/tests/__pycache__/test_legendre.cpython-312.pyc new file mode 100644 index 0000000..a61cdda Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/polynomial/tests/__pycache__/test_legendre.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/polynomial/tests/__pycache__/test_polynomial.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/polynomial/tests/__pycache__/test_polynomial.cpython-312.pyc new file mode 100644 index 0000000..81bdbbb Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/polynomial/tests/__pycache__/test_polynomial.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/polynomial/tests/__pycache__/test_polyutils.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/polynomial/tests/__pycache__/test_polyutils.cpython-312.pyc new file mode 100644 index 0000000..5792df5 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/polynomial/tests/__pycache__/test_polyutils.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/polynomial/tests/__pycache__/test_printing.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/polynomial/tests/__pycache__/test_printing.cpython-312.pyc new file mode 100644 index 0000000..6f369fd Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/polynomial/tests/__pycache__/test_printing.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/polynomial/tests/__pycache__/test_symbol.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/polynomial/tests/__pycache__/test_symbol.cpython-312.pyc new file mode 100644 index 0000000..b9e8eb7 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/polynomial/tests/__pycache__/test_symbol.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/polynomial/tests/test_chebyshev.py b/.venv/lib/python3.12/site-packages/numpy/polynomial/tests/test_chebyshev.py new file mode 100644 index 0000000..2cead45 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/polynomial/tests/test_chebyshev.py @@ -0,0 +1,623 @@ +"""Tests for chebyshev module. + +""" +from functools import reduce + +import numpy as np +import numpy.polynomial.chebyshev as cheb +from numpy.polynomial.polynomial import polyval +from numpy.testing import ( + assert_, + assert_almost_equal, + assert_equal, + assert_raises, +) + + +def trim(x): + return cheb.chebtrim(x, tol=1e-6) + + +T0 = [1] +T1 = [0, 1] +T2 = [-1, 0, 2] +T3 = [0, -3, 0, 4] +T4 = [1, 0, -8, 0, 8] +T5 = [0, 5, 0, -20, 0, 16] +T6 = [-1, 0, 18, 0, -48, 0, 32] +T7 = [0, -7, 0, 56, 0, -112, 0, 64] +T8 = [1, 0, -32, 0, 160, 0, -256, 0, 128] +T9 = [0, 9, 0, -120, 0, 432, 0, -576, 0, 256] + +Tlist = [T0, T1, T2, T3, T4, T5, T6, T7, T8, T9] + + +class TestPrivate: + + def test__cseries_to_zseries(self): + for i in range(5): + inp = np.array([2] + [1] * i, np.double) + tgt = np.array([.5] * i + [2] + [.5] * i, np.double) + res = cheb._cseries_to_zseries(inp) + assert_equal(res, tgt) + + def test__zseries_to_cseries(self): + for i in range(5): + inp = np.array([.5] * i + [2] + [.5] * i, np.double) + tgt = np.array([2] + [1] * i, np.double) + res = cheb._zseries_to_cseries(inp) + assert_equal(res, tgt) + + +class TestConstants: + + def test_chebdomain(self): + assert_equal(cheb.chebdomain, [-1, 1]) + + def test_chebzero(self): + assert_equal(cheb.chebzero, [0]) + + def test_chebone(self): + assert_equal(cheb.chebone, [1]) + + def test_chebx(self): + assert_equal(cheb.chebx, [0, 1]) + + +class TestArithmetic: + + def test_chebadd(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] += 1 + res = cheb.chebadd([0] * i + [1], [0] * j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_chebsub(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] -= 1 + res = cheb.chebsub([0] * i + [1], [0] * j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_chebmulx(self): + assert_equal(cheb.chebmulx([0]), [0]) + assert_equal(cheb.chebmulx([1]), [0, 1]) + for i in range(1, 5): + ser = [0] * i + [1] + tgt = [0] * (i - 1) + [.5, 0, .5] + assert_equal(cheb.chebmulx(ser), tgt) + + def test_chebmul(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + tgt = np.zeros(i + j + 1) + tgt[i + j] += .5 + tgt[abs(i - j)] += .5 + res = cheb.chebmul([0] * i + [1], [0] * j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_chebdiv(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + ci = [0] * i + [1] + cj = [0] * j + [1] + tgt = cheb.chebadd(ci, cj) + quo, rem = cheb.chebdiv(tgt, ci) + res = cheb.chebadd(cheb.chebmul(quo, ci), rem) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_chebpow(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + c = np.arange(i + 1) + tgt = reduce(cheb.chebmul, [c] * j, np.array([1])) + res = cheb.chebpow(c, j) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + +class TestEvaluation: + # coefficients of 1 + 2*x + 3*x**2 + c1d = np.array([2.5, 2., 1.5]) + c2d = np.einsum('i,j->ij', c1d, c1d) + c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d) + + # some random values in [-1, 1) + x = np.random.random((3, 5)) * 2 - 1 + y = polyval(x, [1., 2., 3.]) + + def test_chebval(self): + # check empty input + assert_equal(cheb.chebval([], [1]).size, 0) + + # check normal input) + x = np.linspace(-1, 1) + y = [polyval(x, c) for c in Tlist] + for i in range(10): + msg = f"At i={i}" + tgt = y[i] + res = cheb.chebval(x, [0] * i + [1]) + assert_almost_equal(res, tgt, err_msg=msg) + + # check that shape is preserved + for i in range(3): + dims = [2] * i + x = np.zeros(dims) + assert_equal(cheb.chebval(x, [1]).shape, dims) + assert_equal(cheb.chebval(x, [1, 0]).shape, dims) + assert_equal(cheb.chebval(x, [1, 0, 0]).shape, dims) + + def test_chebval2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + # test exceptions + assert_raises(ValueError, cheb.chebval2d, x1, x2[:2], self.c2d) + + # test values + tgt = y1 * y2 + res = cheb.chebval2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + # test shape + z = np.ones((2, 3)) + res = cheb.chebval2d(z, z, self.c2d) + assert_(res.shape == (2, 3)) + + def test_chebval3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + # test exceptions + assert_raises(ValueError, cheb.chebval3d, x1, x2, x3[:2], self.c3d) + + # test values + tgt = y1 * y2 * y3 + res = cheb.chebval3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + # test shape + z = np.ones((2, 3)) + res = cheb.chebval3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3)) + + def test_chebgrid2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + # test values + tgt = np.einsum('i,j->ij', y1, y2) + res = cheb.chebgrid2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + # test shape + z = np.ones((2, 3)) + res = cheb.chebgrid2d(z, z, self.c2d) + assert_(res.shape == (2, 3) * 2) + + def test_chebgrid3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + # test values + tgt = np.einsum('i,j,k->ijk', y1, y2, y3) + res = cheb.chebgrid3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + # test shape + z = np.ones((2, 3)) + res = cheb.chebgrid3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3) * 3) + + +class TestIntegral: + + def test_chebint(self): + # check exceptions + assert_raises(TypeError, cheb.chebint, [0], .5) + assert_raises(ValueError, cheb.chebint, [0], -1) + assert_raises(ValueError, cheb.chebint, [0], 1, [0, 0]) + assert_raises(ValueError, cheb.chebint, [0], lbnd=[0]) + assert_raises(ValueError, cheb.chebint, [0], scl=[0]) + assert_raises(TypeError, cheb.chebint, [0], axis=.5) + + # test integration of zero polynomial + for i in range(2, 5): + k = [0] * (i - 2) + [1] + res = cheb.chebint([0], m=i, k=k) + assert_almost_equal(res, [0, 1]) + + # check single integration with integration constant + for i in range(5): + scl = i + 1 + pol = [0] * i + [1] + tgt = [i] + [0] * i + [1 / scl] + chebpol = cheb.poly2cheb(pol) + chebint = cheb.chebint(chebpol, m=1, k=[i]) + res = cheb.cheb2poly(chebint) + assert_almost_equal(trim(res), trim(tgt)) + + # check single integration with integration constant and lbnd + for i in range(5): + scl = i + 1 + pol = [0] * i + [1] + chebpol = cheb.poly2cheb(pol) + chebint = cheb.chebint(chebpol, m=1, k=[i], lbnd=-1) + assert_almost_equal(cheb.chebval(-1, chebint), i) + + # check single integration with integration constant and scaling + for i in range(5): + scl = i + 1 + pol = [0] * i + [1] + tgt = [i] + [0] * i + [2 / scl] + chebpol = cheb.poly2cheb(pol) + chebint = cheb.chebint(chebpol, m=1, k=[i], scl=2) + res = cheb.cheb2poly(chebint) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with default k + for i in range(5): + for j in range(2, 5): + pol = [0] * i + [1] + tgt = pol[:] + for k in range(j): + tgt = cheb.chebint(tgt, m=1) + res = cheb.chebint(pol, m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with defined k + for i in range(5): + for j in range(2, 5): + pol = [0] * i + [1] + tgt = pol[:] + for k in range(j): + tgt = cheb.chebint(tgt, m=1, k=[k]) + res = cheb.chebint(pol, m=j, k=list(range(j))) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with lbnd + for i in range(5): + for j in range(2, 5): + pol = [0] * i + [1] + tgt = pol[:] + for k in range(j): + tgt = cheb.chebint(tgt, m=1, k=[k], lbnd=-1) + res = cheb.chebint(pol, m=j, k=list(range(j)), lbnd=-1) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with scaling + for i in range(5): + for j in range(2, 5): + pol = [0] * i + [1] + tgt = pol[:] + for k in range(j): + tgt = cheb.chebint(tgt, m=1, k=[k], scl=2) + res = cheb.chebint(pol, m=j, k=list(range(j)), scl=2) + assert_almost_equal(trim(res), trim(tgt)) + + def test_chebint_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([cheb.chebint(c) for c in c2d.T]).T + res = cheb.chebint(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([cheb.chebint(c) for c in c2d]) + res = cheb.chebint(c2d, axis=1) + assert_almost_equal(res, tgt) + + tgt = np.vstack([cheb.chebint(c, k=3) for c in c2d]) + res = cheb.chebint(c2d, k=3, axis=1) + assert_almost_equal(res, tgt) + + +class TestDerivative: + + def test_chebder(self): + # check exceptions + assert_raises(TypeError, cheb.chebder, [0], .5) + assert_raises(ValueError, cheb.chebder, [0], -1) + + # check that zeroth derivative does nothing + for i in range(5): + tgt = [0] * i + [1] + res = cheb.chebder(tgt, m=0) + assert_equal(trim(res), trim(tgt)) + + # check that derivation is the inverse of integration + for i in range(5): + for j in range(2, 5): + tgt = [0] * i + [1] + res = cheb.chebder(cheb.chebint(tgt, m=j), m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check derivation with scaling + for i in range(5): + for j in range(2, 5): + tgt = [0] * i + [1] + res = cheb.chebder(cheb.chebint(tgt, m=j, scl=2), m=j, scl=.5) + assert_almost_equal(trim(res), trim(tgt)) + + def test_chebder_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([cheb.chebder(c) for c in c2d.T]).T + res = cheb.chebder(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([cheb.chebder(c) for c in c2d]) + res = cheb.chebder(c2d, axis=1) + assert_almost_equal(res, tgt) + + +class TestVander: + # some random values in [-1, 1) + x = np.random.random((3, 5)) * 2 - 1 + + def test_chebvander(self): + # check for 1d x + x = np.arange(3) + v = cheb.chebvander(x, 3) + assert_(v.shape == (3, 4)) + for i in range(4): + coef = [0] * i + [1] + assert_almost_equal(v[..., i], cheb.chebval(x, coef)) + + # check for 2d x + x = np.array([[1, 2], [3, 4], [5, 6]]) + v = cheb.chebvander(x, 3) + assert_(v.shape == (3, 2, 4)) + for i in range(4): + coef = [0] * i + [1] + assert_almost_equal(v[..., i], cheb.chebval(x, coef)) + + def test_chebvander2d(self): + # also tests chebval2d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3)) + van = cheb.chebvander2d(x1, x2, [1, 2]) + tgt = cheb.chebval2d(x1, x2, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = cheb.chebvander2d([x1], [x2], [1, 2]) + assert_(van.shape == (1, 5, 6)) + + def test_chebvander3d(self): + # also tests chebval3d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3, 4)) + van = cheb.chebvander3d(x1, x2, x3, [1, 2, 3]) + tgt = cheb.chebval3d(x1, x2, x3, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = cheb.chebvander3d([x1], [x2], [x3], [1, 2, 3]) + assert_(van.shape == (1, 5, 24)) + + +class TestFitting: + + def test_chebfit(self): + def f(x): + return x * (x - 1) * (x - 2) + + def f2(x): + return x**4 + x**2 + 1 + + # Test exceptions + assert_raises(ValueError, cheb.chebfit, [1], [1], -1) + assert_raises(TypeError, cheb.chebfit, [[1]], [1], 0) + assert_raises(TypeError, cheb.chebfit, [], [1], 0) + assert_raises(TypeError, cheb.chebfit, [1], [[[1]]], 0) + assert_raises(TypeError, cheb.chebfit, [1, 2], [1], 0) + assert_raises(TypeError, cheb.chebfit, [1], [1, 2], 0) + assert_raises(TypeError, cheb.chebfit, [1], [1], 0, w=[[1]]) + assert_raises(TypeError, cheb.chebfit, [1], [1], 0, w=[1, 1]) + assert_raises(ValueError, cheb.chebfit, [1], [1], [-1,]) + assert_raises(ValueError, cheb.chebfit, [1], [1], [2, -1, 6]) + assert_raises(TypeError, cheb.chebfit, [1], [1], []) + + # Test fit + x = np.linspace(0, 2) + y = f(x) + # + coef3 = cheb.chebfit(x, y, 3) + assert_equal(len(coef3), 4) + assert_almost_equal(cheb.chebval(x, coef3), y) + coef3 = cheb.chebfit(x, y, [0, 1, 2, 3]) + assert_equal(len(coef3), 4) + assert_almost_equal(cheb.chebval(x, coef3), y) + # + coef4 = cheb.chebfit(x, y, 4) + assert_equal(len(coef4), 5) + assert_almost_equal(cheb.chebval(x, coef4), y) + coef4 = cheb.chebfit(x, y, [0, 1, 2, 3, 4]) + assert_equal(len(coef4), 5) + assert_almost_equal(cheb.chebval(x, coef4), y) + # check things still work if deg is not in strict increasing + coef4 = cheb.chebfit(x, y, [2, 3, 4, 1, 0]) + assert_equal(len(coef4), 5) + assert_almost_equal(cheb.chebval(x, coef4), y) + # + coef2d = cheb.chebfit(x, np.array([y, y]).T, 3) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + coef2d = cheb.chebfit(x, np.array([y, y]).T, [0, 1, 2, 3]) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + # test weighting + w = np.zeros_like(x) + yw = y.copy() + w[1::2] = 1 + y[0::2] = 0 + wcoef3 = cheb.chebfit(x, yw, 3, w=w) + assert_almost_equal(wcoef3, coef3) + wcoef3 = cheb.chebfit(x, yw, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef3, coef3) + # + wcoef2d = cheb.chebfit(x, np.array([yw, yw]).T, 3, w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + wcoef2d = cheb.chebfit(x, np.array([yw, yw]).T, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + # test scaling with complex values x points whose square + # is zero when summed. + x = [1, 1j, -1, -1j] + assert_almost_equal(cheb.chebfit(x, x, 1), [0, 1]) + assert_almost_equal(cheb.chebfit(x, x, [0, 1]), [0, 1]) + # test fitting only even polynomials + x = np.linspace(-1, 1) + y = f2(x) + coef1 = cheb.chebfit(x, y, 4) + assert_almost_equal(cheb.chebval(x, coef1), y) + coef2 = cheb.chebfit(x, y, [0, 2, 4]) + assert_almost_equal(cheb.chebval(x, coef2), y) + assert_almost_equal(coef1, coef2) + + +class TestInterpolate: + + def f(self, x): + return x * (x - 1) * (x - 2) + + def test_raises(self): + assert_raises(ValueError, cheb.chebinterpolate, self.f, -1) + assert_raises(TypeError, cheb.chebinterpolate, self.f, 10.) + + def test_dimensions(self): + for deg in range(1, 5): + assert_(cheb.chebinterpolate(self.f, deg).shape == (deg + 1,)) + + def test_approximation(self): + + def powx(x, p): + return x**p + + x = np.linspace(-1, 1, 10) + for deg in range(10): + for p in range(deg + 1): + c = cheb.chebinterpolate(powx, deg, (p,)) + assert_almost_equal(cheb.chebval(x, c), powx(x, p), decimal=12) + + +class TestCompanion: + + def test_raises(self): + assert_raises(ValueError, cheb.chebcompanion, []) + assert_raises(ValueError, cheb.chebcompanion, [1]) + + def test_dimensions(self): + for i in range(1, 5): + coef = [0] * i + [1] + assert_(cheb.chebcompanion(coef).shape == (i, i)) + + def test_linear_root(self): + assert_(cheb.chebcompanion([1, 2])[0, 0] == -.5) + + +class TestGauss: + + def test_100(self): + x, w = cheb.chebgauss(100) + + # test orthogonality. Note that the results need to be normalized, + # otherwise the huge values that can arise from fast growing + # functions like Laguerre can be very confusing. + v = cheb.chebvander(x, 99) + vv = np.dot(v.T * w, v) + vd = 1 / np.sqrt(vv.diagonal()) + vv = vd[:, None] * vv * vd + assert_almost_equal(vv, np.eye(100)) + + # check that the integral of 1 is correct + tgt = np.pi + assert_almost_equal(w.sum(), tgt) + + +class TestMisc: + + def test_chebfromroots(self): + res = cheb.chebfromroots([]) + assert_almost_equal(trim(res), [1]) + for i in range(1, 5): + roots = np.cos(np.linspace(-np.pi, 0, 2 * i + 1)[1::2]) + tgt = [0] * i + [1] + res = cheb.chebfromroots(roots) * 2**(i - 1) + assert_almost_equal(trim(res), trim(tgt)) + + def test_chebroots(self): + assert_almost_equal(cheb.chebroots([1]), []) + assert_almost_equal(cheb.chebroots([1, 2]), [-.5]) + for i in range(2, 5): + tgt = np.linspace(-1, 1, i) + res = cheb.chebroots(cheb.chebfromroots(tgt)) + assert_almost_equal(trim(res), trim(tgt)) + + def test_chebtrim(self): + coef = [2, -1, 1, 0] + + # Test exceptions + assert_raises(ValueError, cheb.chebtrim, coef, -1) + + # Test results + assert_equal(cheb.chebtrim(coef), coef[:-1]) + assert_equal(cheb.chebtrim(coef, 1), coef[:-3]) + assert_equal(cheb.chebtrim(coef, 2), [0]) + + def test_chebline(self): + assert_equal(cheb.chebline(3, 4), [3, 4]) + + def test_cheb2poly(self): + for i in range(10): + assert_almost_equal(cheb.cheb2poly([0] * i + [1]), Tlist[i]) + + def test_poly2cheb(self): + for i in range(10): + assert_almost_equal(cheb.poly2cheb(Tlist[i]), [0] * i + [1]) + + def test_weight(self): + x = np.linspace(-1, 1, 11)[1:-1] + tgt = 1. / (np.sqrt(1 + x) * np.sqrt(1 - x)) + res = cheb.chebweight(x) + assert_almost_equal(res, tgt) + + def test_chebpts1(self): + # test exceptions + assert_raises(ValueError, cheb.chebpts1, 1.5) + assert_raises(ValueError, cheb.chebpts1, 0) + + # test points + tgt = [0] + assert_almost_equal(cheb.chebpts1(1), tgt) + tgt = [-0.70710678118654746, 0.70710678118654746] + assert_almost_equal(cheb.chebpts1(2), tgt) + tgt = [-0.86602540378443871, 0, 0.86602540378443871] + assert_almost_equal(cheb.chebpts1(3), tgt) + tgt = [-0.9238795325, -0.3826834323, 0.3826834323, 0.9238795325] + assert_almost_equal(cheb.chebpts1(4), tgt) + + def test_chebpts2(self): + # test exceptions + assert_raises(ValueError, cheb.chebpts2, 1.5) + assert_raises(ValueError, cheb.chebpts2, 1) + + # test points + tgt = [-1, 1] + assert_almost_equal(cheb.chebpts2(2), tgt) + tgt = [-1, 0, 1] + assert_almost_equal(cheb.chebpts2(3), tgt) + tgt = [-1, -0.5, .5, 1] + assert_almost_equal(cheb.chebpts2(4), tgt) + tgt = [-1.0, -0.707106781187, 0, 0.707106781187, 1.0] + assert_almost_equal(cheb.chebpts2(5), tgt) diff --git a/.venv/lib/python3.12/site-packages/numpy/polynomial/tests/test_classes.py b/.venv/lib/python3.12/site-packages/numpy/polynomial/tests/test_classes.py new file mode 100644 index 0000000..d10aafb --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/polynomial/tests/test_classes.py @@ -0,0 +1,618 @@ +"""Test inter-conversion of different polynomial classes. + +This tests the convert and cast methods of all the polynomial classes. + +""" +import operator as op +from numbers import Number + +import pytest + +import numpy as np +from numpy.exceptions import RankWarning +from numpy.polynomial import ( + Chebyshev, + Hermite, + HermiteE, + Laguerre, + Legendre, + Polynomial, +) +from numpy.testing import ( + assert_, + assert_almost_equal, + assert_equal, + assert_raises, +) + +# +# fixtures +# + +classes = ( + Polynomial, Legendre, Chebyshev, Laguerre, + Hermite, HermiteE + ) +classids = tuple(cls.__name__ for cls in classes) + +@pytest.fixture(params=classes, ids=classids) +def Poly(request): + return request.param + + +# +# helper functions +# +random = np.random.random + + +def assert_poly_almost_equal(p1, p2, msg=""): + try: + assert_(np.all(p1.domain == p2.domain)) + assert_(np.all(p1.window == p2.window)) + assert_almost_equal(p1.coef, p2.coef) + except AssertionError: + msg = f"Result: {p1}\nTarget: {p2}" + raise AssertionError(msg) + + +# +# Test conversion methods that depend on combinations of two classes. +# + +Poly1 = Poly +Poly2 = Poly + + +def test_conversion(Poly1, Poly2): + x = np.linspace(0, 1, 10) + coef = random((3,)) + + d1 = Poly1.domain + random((2,)) * .25 + w1 = Poly1.window + random((2,)) * .25 + p1 = Poly1(coef, domain=d1, window=w1) + + d2 = Poly2.domain + random((2,)) * .25 + w2 = Poly2.window + random((2,)) * .25 + p2 = p1.convert(kind=Poly2, domain=d2, window=w2) + + assert_almost_equal(p2.domain, d2) + assert_almost_equal(p2.window, w2) + assert_almost_equal(p2(x), p1(x)) + + +def test_cast(Poly1, Poly2): + x = np.linspace(0, 1, 10) + coef = random((3,)) + + d1 = Poly1.domain + random((2,)) * .25 + w1 = Poly1.window + random((2,)) * .25 + p1 = Poly1(coef, domain=d1, window=w1) + + d2 = Poly2.domain + random((2,)) * .25 + w2 = Poly2.window + random((2,)) * .25 + p2 = Poly2.cast(p1, domain=d2, window=w2) + + assert_almost_equal(p2.domain, d2) + assert_almost_equal(p2.window, w2) + assert_almost_equal(p2(x), p1(x)) + + +# +# test methods that depend on one class +# + + +def test_identity(Poly): + d = Poly.domain + random((2,)) * .25 + w = Poly.window + random((2,)) * .25 + x = np.linspace(d[0], d[1], 11) + p = Poly.identity(domain=d, window=w) + assert_equal(p.domain, d) + assert_equal(p.window, w) + assert_almost_equal(p(x), x) + + +def test_basis(Poly): + d = Poly.domain + random((2,)) * .25 + w = Poly.window + random((2,)) * .25 + p = Poly.basis(5, domain=d, window=w) + assert_equal(p.domain, d) + assert_equal(p.window, w) + assert_equal(p.coef, [0] * 5 + [1]) + + +def test_fromroots(Poly): + # check that requested roots are zeros of a polynomial + # of correct degree, domain, and window. + d = Poly.domain + random((2,)) * .25 + w = Poly.window + random((2,)) * .25 + r = random((5,)) + p1 = Poly.fromroots(r, domain=d, window=w) + assert_equal(p1.degree(), len(r)) + assert_equal(p1.domain, d) + assert_equal(p1.window, w) + assert_almost_equal(p1(r), 0) + + # check that polynomial is monic + pdom = Polynomial.domain + pwin = Polynomial.window + p2 = Polynomial.cast(p1, domain=pdom, window=pwin) + assert_almost_equal(p2.coef[-1], 1) + + +def test_bad_conditioned_fit(Poly): + + x = [0., 0., 1.] + y = [1., 2., 3.] + + # check RankWarning is raised + with pytest.warns(RankWarning) as record: + Poly.fit(x, y, 2) + assert record[0].message.args[0] == "The fit may be poorly conditioned" + + +def test_fit(Poly): + + def f(x): + return x * (x - 1) * (x - 2) + x = np.linspace(0, 3) + y = f(x) + + # check default value of domain and window + p = Poly.fit(x, y, 3) + assert_almost_equal(p.domain, [0, 3]) + assert_almost_equal(p(x), y) + assert_equal(p.degree(), 3) + + # check with given domains and window + d = Poly.domain + random((2,)) * .25 + w = Poly.window + random((2,)) * .25 + p = Poly.fit(x, y, 3, domain=d, window=w) + assert_almost_equal(p(x), y) + assert_almost_equal(p.domain, d) + assert_almost_equal(p.window, w) + p = Poly.fit(x, y, [0, 1, 2, 3], domain=d, window=w) + assert_almost_equal(p(x), y) + assert_almost_equal(p.domain, d) + assert_almost_equal(p.window, w) + + # check with class domain default + p = Poly.fit(x, y, 3, []) + assert_equal(p.domain, Poly.domain) + assert_equal(p.window, Poly.window) + p = Poly.fit(x, y, [0, 1, 2, 3], []) + assert_equal(p.domain, Poly.domain) + assert_equal(p.window, Poly.window) + + # check that fit accepts weights. + w = np.zeros_like(x) + z = y + random(y.shape) * .25 + w[::2] = 1 + p1 = Poly.fit(x[::2], z[::2], 3) + p2 = Poly.fit(x, z, 3, w=w) + p3 = Poly.fit(x, z, [0, 1, 2, 3], w=w) + assert_almost_equal(p1(x), p2(x)) + assert_almost_equal(p2(x), p3(x)) + + +def test_equal(Poly): + p1 = Poly([1, 2, 3], domain=[0, 1], window=[2, 3]) + p2 = Poly([1, 1, 1], domain=[0, 1], window=[2, 3]) + p3 = Poly([1, 2, 3], domain=[1, 2], window=[2, 3]) + p4 = Poly([1, 2, 3], domain=[0, 1], window=[1, 2]) + assert_(p1 == p1) + assert_(not p1 == p2) + assert_(not p1 == p3) + assert_(not p1 == p4) + + +def test_not_equal(Poly): + p1 = Poly([1, 2, 3], domain=[0, 1], window=[2, 3]) + p2 = Poly([1, 1, 1], domain=[0, 1], window=[2, 3]) + p3 = Poly([1, 2, 3], domain=[1, 2], window=[2, 3]) + p4 = Poly([1, 2, 3], domain=[0, 1], window=[1, 2]) + assert_(not p1 != p1) + assert_(p1 != p2) + assert_(p1 != p3) + assert_(p1 != p4) + + +def test_add(Poly): + # This checks commutation, not numerical correctness + c1 = list(random((4,)) + .5) + c2 = list(random((3,)) + .5) + p1 = Poly(c1) + p2 = Poly(c2) + p3 = p1 + p2 + assert_poly_almost_equal(p2 + p1, p3) + assert_poly_almost_equal(p1 + c2, p3) + assert_poly_almost_equal(c2 + p1, p3) + assert_poly_almost_equal(p1 + tuple(c2), p3) + assert_poly_almost_equal(tuple(c2) + p1, p3) + assert_poly_almost_equal(p1 + np.array(c2), p3) + assert_poly_almost_equal(np.array(c2) + p1, p3) + assert_raises(TypeError, op.add, p1, Poly([0], domain=Poly.domain + 1)) + assert_raises(TypeError, op.add, p1, Poly([0], window=Poly.window + 1)) + if Poly is Polynomial: + assert_raises(TypeError, op.add, p1, Chebyshev([0])) + else: + assert_raises(TypeError, op.add, p1, Polynomial([0])) + + +def test_sub(Poly): + # This checks commutation, not numerical correctness + c1 = list(random((4,)) + .5) + c2 = list(random((3,)) + .5) + p1 = Poly(c1) + p2 = Poly(c2) + p3 = p1 - p2 + assert_poly_almost_equal(p2 - p1, -p3) + assert_poly_almost_equal(p1 - c2, p3) + assert_poly_almost_equal(c2 - p1, -p3) + assert_poly_almost_equal(p1 - tuple(c2), p3) + assert_poly_almost_equal(tuple(c2) - p1, -p3) + assert_poly_almost_equal(p1 - np.array(c2), p3) + assert_poly_almost_equal(np.array(c2) - p1, -p3) + assert_raises(TypeError, op.sub, p1, Poly([0], domain=Poly.domain + 1)) + assert_raises(TypeError, op.sub, p1, Poly([0], window=Poly.window + 1)) + if Poly is Polynomial: + assert_raises(TypeError, op.sub, p1, Chebyshev([0])) + else: + assert_raises(TypeError, op.sub, p1, Polynomial([0])) + + +def test_mul(Poly): + c1 = list(random((4,)) + .5) + c2 = list(random((3,)) + .5) + p1 = Poly(c1) + p2 = Poly(c2) + p3 = p1 * p2 + assert_poly_almost_equal(p2 * p1, p3) + assert_poly_almost_equal(p1 * c2, p3) + assert_poly_almost_equal(c2 * p1, p3) + assert_poly_almost_equal(p1 * tuple(c2), p3) + assert_poly_almost_equal(tuple(c2) * p1, p3) + assert_poly_almost_equal(p1 * np.array(c2), p3) + assert_poly_almost_equal(np.array(c2) * p1, p3) + assert_poly_almost_equal(p1 * 2, p1 * Poly([2])) + assert_poly_almost_equal(2 * p1, p1 * Poly([2])) + assert_raises(TypeError, op.mul, p1, Poly([0], domain=Poly.domain + 1)) + assert_raises(TypeError, op.mul, p1, Poly([0], window=Poly.window + 1)) + if Poly is Polynomial: + assert_raises(TypeError, op.mul, p1, Chebyshev([0])) + else: + assert_raises(TypeError, op.mul, p1, Polynomial([0])) + + +def test_floordiv(Poly): + c1 = list(random((4,)) + .5) + c2 = list(random((3,)) + .5) + c3 = list(random((2,)) + .5) + p1 = Poly(c1) + p2 = Poly(c2) + p3 = Poly(c3) + p4 = p1 * p2 + p3 + c4 = list(p4.coef) + assert_poly_almost_equal(p4 // p2, p1) + assert_poly_almost_equal(p4 // c2, p1) + assert_poly_almost_equal(c4 // p2, p1) + assert_poly_almost_equal(p4 // tuple(c2), p1) + assert_poly_almost_equal(tuple(c4) // p2, p1) + assert_poly_almost_equal(p4 // np.array(c2), p1) + assert_poly_almost_equal(np.array(c4) // p2, p1) + assert_poly_almost_equal(2 // p2, Poly([0])) + assert_poly_almost_equal(p2 // 2, 0.5 * p2) + assert_raises( + TypeError, op.floordiv, p1, Poly([0], domain=Poly.domain + 1)) + assert_raises( + TypeError, op.floordiv, p1, Poly([0], window=Poly.window + 1)) + if Poly is Polynomial: + assert_raises(TypeError, op.floordiv, p1, Chebyshev([0])) + else: + assert_raises(TypeError, op.floordiv, p1, Polynomial([0])) + + +def test_truediv(Poly): + # true division is valid only if the denominator is a Number and + # not a python bool. + p1 = Poly([1, 2, 3]) + p2 = p1 * 5 + + for stype in np.ScalarType: + if not issubclass(stype, Number) or issubclass(stype, bool): + continue + s = stype(5) + assert_poly_almost_equal(op.truediv(p2, s), p1) + assert_raises(TypeError, op.truediv, s, p2) + for stype in (int, float): + s = stype(5) + assert_poly_almost_equal(op.truediv(p2, s), p1) + assert_raises(TypeError, op.truediv, s, p2) + for stype in [complex]: + s = stype(5, 0) + assert_poly_almost_equal(op.truediv(p2, s), p1) + assert_raises(TypeError, op.truediv, s, p2) + for s in [(), [], {}, False, np.array([1])]: + assert_raises(TypeError, op.truediv, p2, s) + assert_raises(TypeError, op.truediv, s, p2) + for ptype in classes: + assert_raises(TypeError, op.truediv, p2, ptype(1)) + + +def test_mod(Poly): + # This checks commutation, not numerical correctness + c1 = list(random((4,)) + .5) + c2 = list(random((3,)) + .5) + c3 = list(random((2,)) + .5) + p1 = Poly(c1) + p2 = Poly(c2) + p3 = Poly(c3) + p4 = p1 * p2 + p3 + c4 = list(p4.coef) + assert_poly_almost_equal(p4 % p2, p3) + assert_poly_almost_equal(p4 % c2, p3) + assert_poly_almost_equal(c4 % p2, p3) + assert_poly_almost_equal(p4 % tuple(c2), p3) + assert_poly_almost_equal(tuple(c4) % p2, p3) + assert_poly_almost_equal(p4 % np.array(c2), p3) + assert_poly_almost_equal(np.array(c4) % p2, p3) + assert_poly_almost_equal(2 % p2, Poly([2])) + assert_poly_almost_equal(p2 % 2, Poly([0])) + assert_raises(TypeError, op.mod, p1, Poly([0], domain=Poly.domain + 1)) + assert_raises(TypeError, op.mod, p1, Poly([0], window=Poly.window + 1)) + if Poly is Polynomial: + assert_raises(TypeError, op.mod, p1, Chebyshev([0])) + else: + assert_raises(TypeError, op.mod, p1, Polynomial([0])) + + +def test_divmod(Poly): + # This checks commutation, not numerical correctness + c1 = list(random((4,)) + .5) + c2 = list(random((3,)) + .5) + c3 = list(random((2,)) + .5) + p1 = Poly(c1) + p2 = Poly(c2) + p3 = Poly(c3) + p4 = p1 * p2 + p3 + c4 = list(p4.coef) + quo, rem = divmod(p4, p2) + assert_poly_almost_equal(quo, p1) + assert_poly_almost_equal(rem, p3) + quo, rem = divmod(p4, c2) + assert_poly_almost_equal(quo, p1) + assert_poly_almost_equal(rem, p3) + quo, rem = divmod(c4, p2) + assert_poly_almost_equal(quo, p1) + assert_poly_almost_equal(rem, p3) + quo, rem = divmod(p4, tuple(c2)) + assert_poly_almost_equal(quo, p1) + assert_poly_almost_equal(rem, p3) + quo, rem = divmod(tuple(c4), p2) + assert_poly_almost_equal(quo, p1) + assert_poly_almost_equal(rem, p3) + quo, rem = divmod(p4, np.array(c2)) + assert_poly_almost_equal(quo, p1) + assert_poly_almost_equal(rem, p3) + quo, rem = divmod(np.array(c4), p2) + assert_poly_almost_equal(quo, p1) + assert_poly_almost_equal(rem, p3) + quo, rem = divmod(p2, 2) + assert_poly_almost_equal(quo, 0.5 * p2) + assert_poly_almost_equal(rem, Poly([0])) + quo, rem = divmod(2, p2) + assert_poly_almost_equal(quo, Poly([0])) + assert_poly_almost_equal(rem, Poly([2])) + assert_raises(TypeError, divmod, p1, Poly([0], domain=Poly.domain + 1)) + assert_raises(TypeError, divmod, p1, Poly([0], window=Poly.window + 1)) + if Poly is Polynomial: + assert_raises(TypeError, divmod, p1, Chebyshev([0])) + else: + assert_raises(TypeError, divmod, p1, Polynomial([0])) + + +def test_roots(Poly): + d = Poly.domain * 1.25 + .25 + w = Poly.window + tgt = np.linspace(d[0], d[1], 5) + res = np.sort(Poly.fromroots(tgt, domain=d, window=w).roots()) + assert_almost_equal(res, tgt) + # default domain and window + res = np.sort(Poly.fromroots(tgt).roots()) + assert_almost_equal(res, tgt) + + +def test_degree(Poly): + p = Poly.basis(5) + assert_equal(p.degree(), 5) + + +def test_copy(Poly): + p1 = Poly.basis(5) + p2 = p1.copy() + assert_(p1 == p2) + assert_(p1 is not p2) + assert_(p1.coef is not p2.coef) + assert_(p1.domain is not p2.domain) + assert_(p1.window is not p2.window) + + +def test_integ(Poly): + P = Polynomial + # Check defaults + p0 = Poly.cast(P([1 * 2, 2 * 3, 3 * 4])) + p1 = P.cast(p0.integ()) + p2 = P.cast(p0.integ(2)) + assert_poly_almost_equal(p1, P([0, 2, 3, 4])) + assert_poly_almost_equal(p2, P([0, 0, 1, 1, 1])) + # Check with k + p0 = Poly.cast(P([1 * 2, 2 * 3, 3 * 4])) + p1 = P.cast(p0.integ(k=1)) + p2 = P.cast(p0.integ(2, k=[1, 1])) + assert_poly_almost_equal(p1, P([1, 2, 3, 4])) + assert_poly_almost_equal(p2, P([1, 1, 1, 1, 1])) + # Check with lbnd + p0 = Poly.cast(P([1 * 2, 2 * 3, 3 * 4])) + p1 = P.cast(p0.integ(lbnd=1)) + p2 = P.cast(p0.integ(2, lbnd=1)) + assert_poly_almost_equal(p1, P([-9, 2, 3, 4])) + assert_poly_almost_equal(p2, P([6, -9, 1, 1, 1])) + # Check scaling + d = 2 * Poly.domain + p0 = Poly.cast(P([1 * 2, 2 * 3, 3 * 4]), domain=d) + p1 = P.cast(p0.integ()) + p2 = P.cast(p0.integ(2)) + assert_poly_almost_equal(p1, P([0, 2, 3, 4])) + assert_poly_almost_equal(p2, P([0, 0, 1, 1, 1])) + + +def test_deriv(Poly): + # Check that the derivative is the inverse of integration. It is + # assumes that the integration has been checked elsewhere. + d = Poly.domain + random((2,)) * .25 + w = Poly.window + random((2,)) * .25 + p1 = Poly([1, 2, 3], domain=d, window=w) + p2 = p1.integ(2, k=[1, 2]) + p3 = p1.integ(1, k=[1]) + assert_almost_equal(p2.deriv(1).coef, p3.coef) + assert_almost_equal(p2.deriv(2).coef, p1.coef) + # default domain and window + p1 = Poly([1, 2, 3]) + p2 = p1.integ(2, k=[1, 2]) + p3 = p1.integ(1, k=[1]) + assert_almost_equal(p2.deriv(1).coef, p3.coef) + assert_almost_equal(p2.deriv(2).coef, p1.coef) + + +def test_linspace(Poly): + d = Poly.domain + random((2,)) * .25 + w = Poly.window + random((2,)) * .25 + p = Poly([1, 2, 3], domain=d, window=w) + # check default domain + xtgt = np.linspace(d[0], d[1], 20) + ytgt = p(xtgt) + xres, yres = p.linspace(20) + assert_almost_equal(xres, xtgt) + assert_almost_equal(yres, ytgt) + # check specified domain + xtgt = np.linspace(0, 2, 20) + ytgt = p(xtgt) + xres, yres = p.linspace(20, domain=[0, 2]) + assert_almost_equal(xres, xtgt) + assert_almost_equal(yres, ytgt) + + +def test_pow(Poly): + d = Poly.domain + random((2,)) * .25 + w = Poly.window + random((2,)) * .25 + tgt = Poly([1], domain=d, window=w) + tst = Poly([1, 2, 3], domain=d, window=w) + for i in range(5): + assert_poly_almost_equal(tst**i, tgt) + tgt = tgt * tst + # default domain and window + tgt = Poly([1]) + tst = Poly([1, 2, 3]) + for i in range(5): + assert_poly_almost_equal(tst**i, tgt) + tgt = tgt * tst + # check error for invalid powers + assert_raises(ValueError, op.pow, tgt, 1.5) + assert_raises(ValueError, op.pow, tgt, -1) + + +def test_call(Poly): + P = Polynomial + d = Poly.domain + x = np.linspace(d[0], d[1], 11) + + # Check defaults + p = Poly.cast(P([1, 2, 3])) + tgt = 1 + x * (2 + 3 * x) + res = p(x) + assert_almost_equal(res, tgt) + + +def test_call_with_list(Poly): + p = Poly([1, 2, 3]) + x = [-1, 0, 2] + res = p(x) + assert_equal(res, p(np.array(x))) + + +def test_cutdeg(Poly): + p = Poly([1, 2, 3]) + assert_raises(ValueError, p.cutdeg, .5) + assert_raises(ValueError, p.cutdeg, -1) + assert_equal(len(p.cutdeg(3)), 3) + assert_equal(len(p.cutdeg(2)), 3) + assert_equal(len(p.cutdeg(1)), 2) + assert_equal(len(p.cutdeg(0)), 1) + + +def test_truncate(Poly): + p = Poly([1, 2, 3]) + assert_raises(ValueError, p.truncate, .5) + assert_raises(ValueError, p.truncate, 0) + assert_equal(len(p.truncate(4)), 3) + assert_equal(len(p.truncate(3)), 3) + assert_equal(len(p.truncate(2)), 2) + assert_equal(len(p.truncate(1)), 1) + + +def test_trim(Poly): + c = [1, 1e-6, 1e-12, 0] + p = Poly(c) + assert_equal(p.trim().coef, c[:3]) + assert_equal(p.trim(1e-10).coef, c[:2]) + assert_equal(p.trim(1e-5).coef, c[:1]) + + +def test_mapparms(Poly): + # check with defaults. Should be identity. + d = Poly.domain + w = Poly.window + p = Poly([1], domain=d, window=w) + assert_almost_equal([0, 1], p.mapparms()) + # + w = 2 * d + 1 + p = Poly([1], domain=d, window=w) + assert_almost_equal([1, 2], p.mapparms()) + + +def test_ufunc_override(Poly): + p = Poly([1, 2, 3]) + x = np.ones(3) + assert_raises(TypeError, np.add, p, x) + assert_raises(TypeError, np.add, x, p) + + +# +# Test class method that only exists for some classes +# + + +class TestInterpolate: + + def f(self, x): + return x * (x - 1) * (x - 2) + + def test_raises(self): + assert_raises(ValueError, Chebyshev.interpolate, self.f, -1) + assert_raises(TypeError, Chebyshev.interpolate, self.f, 10.) + + def test_dimensions(self): + for deg in range(1, 5): + assert_(Chebyshev.interpolate(self.f, deg).degree() == deg) + + def test_approximation(self): + + def powx(x, p): + return x**p + + x = np.linspace(0, 2, 10) + for deg in range(10): + for t in range(deg + 1): + p = Chebyshev.interpolate(powx, deg, domain=[0, 2], args=(t,)) + assert_almost_equal(p(x), powx(x, t), decimal=11) diff --git a/.venv/lib/python3.12/site-packages/numpy/polynomial/tests/test_hermite.py b/.venv/lib/python3.12/site-packages/numpy/polynomial/tests/test_hermite.py new file mode 100644 index 0000000..8bd3951 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/polynomial/tests/test_hermite.py @@ -0,0 +1,558 @@ +"""Tests for hermite module. + +""" +from functools import reduce + +import numpy as np +import numpy.polynomial.hermite as herm +from numpy.polynomial.polynomial import polyval +from numpy.testing import ( + assert_, + assert_almost_equal, + assert_equal, + assert_raises, +) + +H0 = np.array([1]) +H1 = np.array([0, 2]) +H2 = np.array([-2, 0, 4]) +H3 = np.array([0, -12, 0, 8]) +H4 = np.array([12, 0, -48, 0, 16]) +H5 = np.array([0, 120, 0, -160, 0, 32]) +H6 = np.array([-120, 0, 720, 0, -480, 0, 64]) +H7 = np.array([0, -1680, 0, 3360, 0, -1344, 0, 128]) +H8 = np.array([1680, 0, -13440, 0, 13440, 0, -3584, 0, 256]) +H9 = np.array([0, 30240, 0, -80640, 0, 48384, 0, -9216, 0, 512]) + +Hlist = [H0, H1, H2, H3, H4, H5, H6, H7, H8, H9] + + +def trim(x): + return herm.hermtrim(x, tol=1e-6) + + +class TestConstants: + + def test_hermdomain(self): + assert_equal(herm.hermdomain, [-1, 1]) + + def test_hermzero(self): + assert_equal(herm.hermzero, [0]) + + def test_hermone(self): + assert_equal(herm.hermone, [1]) + + def test_hermx(self): + assert_equal(herm.hermx, [0, .5]) + + +class TestArithmetic: + x = np.linspace(-3, 3, 100) + + def test_hermadd(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] += 1 + res = herm.hermadd([0] * i + [1], [0] * j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_hermsub(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] -= 1 + res = herm.hermsub([0] * i + [1], [0] * j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_hermmulx(self): + assert_equal(herm.hermmulx([0]), [0]) + assert_equal(herm.hermmulx([1]), [0, .5]) + for i in range(1, 5): + ser = [0] * i + [1] + tgt = [0] * (i - 1) + [i, 0, .5] + assert_equal(herm.hermmulx(ser), tgt) + + def test_hermmul(self): + # check values of result + for i in range(5): + pol1 = [0] * i + [1] + val1 = herm.hermval(self.x, pol1) + for j in range(5): + msg = f"At i={i}, j={j}" + pol2 = [0] * j + [1] + val2 = herm.hermval(self.x, pol2) + pol3 = herm.hermmul(pol1, pol2) + val3 = herm.hermval(self.x, pol3) + assert_(len(pol3) == i + j + 1, msg) + assert_almost_equal(val3, val1 * val2, err_msg=msg) + + def test_hermdiv(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + ci = [0] * i + [1] + cj = [0] * j + [1] + tgt = herm.hermadd(ci, cj) + quo, rem = herm.hermdiv(tgt, ci) + res = herm.hermadd(herm.hermmul(quo, ci), rem) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_hermpow(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + c = np.arange(i + 1) + tgt = reduce(herm.hermmul, [c] * j, np.array([1])) + res = herm.hermpow(c, j) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + +class TestEvaluation: + # coefficients of 1 + 2*x + 3*x**2 + c1d = np.array([2.5, 1., .75]) + c2d = np.einsum('i,j->ij', c1d, c1d) + c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d) + + # some random values in [-1, 1) + x = np.random.random((3, 5)) * 2 - 1 + y = polyval(x, [1., 2., 3.]) + + def test_hermval(self): + # check empty input + assert_equal(herm.hermval([], [1]).size, 0) + + # check normal input) + x = np.linspace(-1, 1) + y = [polyval(x, c) for c in Hlist] + for i in range(10): + msg = f"At i={i}" + tgt = y[i] + res = herm.hermval(x, [0] * i + [1]) + assert_almost_equal(res, tgt, err_msg=msg) + + # check that shape is preserved + for i in range(3): + dims = [2] * i + x = np.zeros(dims) + assert_equal(herm.hermval(x, [1]).shape, dims) + assert_equal(herm.hermval(x, [1, 0]).shape, dims) + assert_equal(herm.hermval(x, [1, 0, 0]).shape, dims) + + def test_hermval2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + # test exceptions + assert_raises(ValueError, herm.hermval2d, x1, x2[:2], self.c2d) + + # test values + tgt = y1 * y2 + res = herm.hermval2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + # test shape + z = np.ones((2, 3)) + res = herm.hermval2d(z, z, self.c2d) + assert_(res.shape == (2, 3)) + + def test_hermval3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + # test exceptions + assert_raises(ValueError, herm.hermval3d, x1, x2, x3[:2], self.c3d) + + # test values + tgt = y1 * y2 * y3 + res = herm.hermval3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + # test shape + z = np.ones((2, 3)) + res = herm.hermval3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3)) + + def test_hermgrid2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + # test values + tgt = np.einsum('i,j->ij', y1, y2) + res = herm.hermgrid2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + # test shape + z = np.ones((2, 3)) + res = herm.hermgrid2d(z, z, self.c2d) + assert_(res.shape == (2, 3) * 2) + + def test_hermgrid3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + # test values + tgt = np.einsum('i,j,k->ijk', y1, y2, y3) + res = herm.hermgrid3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + # test shape + z = np.ones((2, 3)) + res = herm.hermgrid3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3) * 3) + + +class TestIntegral: + + def test_hermint(self): + # check exceptions + assert_raises(TypeError, herm.hermint, [0], .5) + assert_raises(ValueError, herm.hermint, [0], -1) + assert_raises(ValueError, herm.hermint, [0], 1, [0, 0]) + assert_raises(ValueError, herm.hermint, [0], lbnd=[0]) + assert_raises(ValueError, herm.hermint, [0], scl=[0]) + assert_raises(TypeError, herm.hermint, [0], axis=.5) + + # test integration of zero polynomial + for i in range(2, 5): + k = [0] * (i - 2) + [1] + res = herm.hermint([0], m=i, k=k) + assert_almost_equal(res, [0, .5]) + + # check single integration with integration constant + for i in range(5): + scl = i + 1 + pol = [0] * i + [1] + tgt = [i] + [0] * i + [1 / scl] + hermpol = herm.poly2herm(pol) + hermint = herm.hermint(hermpol, m=1, k=[i]) + res = herm.herm2poly(hermint) + assert_almost_equal(trim(res), trim(tgt)) + + # check single integration with integration constant and lbnd + for i in range(5): + scl = i + 1 + pol = [0] * i + [1] + hermpol = herm.poly2herm(pol) + hermint = herm.hermint(hermpol, m=1, k=[i], lbnd=-1) + assert_almost_equal(herm.hermval(-1, hermint), i) + + # check single integration with integration constant and scaling + for i in range(5): + scl = i + 1 + pol = [0] * i + [1] + tgt = [i] + [0] * i + [2 / scl] + hermpol = herm.poly2herm(pol) + hermint = herm.hermint(hermpol, m=1, k=[i], scl=2) + res = herm.herm2poly(hermint) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with default k + for i in range(5): + for j in range(2, 5): + pol = [0] * i + [1] + tgt = pol[:] + for k in range(j): + tgt = herm.hermint(tgt, m=1) + res = herm.hermint(pol, m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with defined k + for i in range(5): + for j in range(2, 5): + pol = [0] * i + [1] + tgt = pol[:] + for k in range(j): + tgt = herm.hermint(tgt, m=1, k=[k]) + res = herm.hermint(pol, m=j, k=list(range(j))) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with lbnd + for i in range(5): + for j in range(2, 5): + pol = [0] * i + [1] + tgt = pol[:] + for k in range(j): + tgt = herm.hermint(tgt, m=1, k=[k], lbnd=-1) + res = herm.hermint(pol, m=j, k=list(range(j)), lbnd=-1) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with scaling + for i in range(5): + for j in range(2, 5): + pol = [0] * i + [1] + tgt = pol[:] + for k in range(j): + tgt = herm.hermint(tgt, m=1, k=[k], scl=2) + res = herm.hermint(pol, m=j, k=list(range(j)), scl=2) + assert_almost_equal(trim(res), trim(tgt)) + + def test_hermint_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([herm.hermint(c) for c in c2d.T]).T + res = herm.hermint(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([herm.hermint(c) for c in c2d]) + res = herm.hermint(c2d, axis=1) + assert_almost_equal(res, tgt) + + tgt = np.vstack([herm.hermint(c, k=3) for c in c2d]) + res = herm.hermint(c2d, k=3, axis=1) + assert_almost_equal(res, tgt) + + +class TestDerivative: + + def test_hermder(self): + # check exceptions + assert_raises(TypeError, herm.hermder, [0], .5) + assert_raises(ValueError, herm.hermder, [0], -1) + + # check that zeroth derivative does nothing + for i in range(5): + tgt = [0] * i + [1] + res = herm.hermder(tgt, m=0) + assert_equal(trim(res), trim(tgt)) + + # check that derivation is the inverse of integration + for i in range(5): + for j in range(2, 5): + tgt = [0] * i + [1] + res = herm.hermder(herm.hermint(tgt, m=j), m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check derivation with scaling + for i in range(5): + for j in range(2, 5): + tgt = [0] * i + [1] + res = herm.hermder(herm.hermint(tgt, m=j, scl=2), m=j, scl=.5) + assert_almost_equal(trim(res), trim(tgt)) + + def test_hermder_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([herm.hermder(c) for c in c2d.T]).T + res = herm.hermder(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([herm.hermder(c) for c in c2d]) + res = herm.hermder(c2d, axis=1) + assert_almost_equal(res, tgt) + + +class TestVander: + # some random values in [-1, 1) + x = np.random.random((3, 5)) * 2 - 1 + + def test_hermvander(self): + # check for 1d x + x = np.arange(3) + v = herm.hermvander(x, 3) + assert_(v.shape == (3, 4)) + for i in range(4): + coef = [0] * i + [1] + assert_almost_equal(v[..., i], herm.hermval(x, coef)) + + # check for 2d x + x = np.array([[1, 2], [3, 4], [5, 6]]) + v = herm.hermvander(x, 3) + assert_(v.shape == (3, 2, 4)) + for i in range(4): + coef = [0] * i + [1] + assert_almost_equal(v[..., i], herm.hermval(x, coef)) + + def test_hermvander2d(self): + # also tests hermval2d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3)) + van = herm.hermvander2d(x1, x2, [1, 2]) + tgt = herm.hermval2d(x1, x2, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = herm.hermvander2d([x1], [x2], [1, 2]) + assert_(van.shape == (1, 5, 6)) + + def test_hermvander3d(self): + # also tests hermval3d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3, 4)) + van = herm.hermvander3d(x1, x2, x3, [1, 2, 3]) + tgt = herm.hermval3d(x1, x2, x3, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = herm.hermvander3d([x1], [x2], [x3], [1, 2, 3]) + assert_(van.shape == (1, 5, 24)) + + +class TestFitting: + + def test_hermfit(self): + def f(x): + return x * (x - 1) * (x - 2) + + def f2(x): + return x**4 + x**2 + 1 + + # Test exceptions + assert_raises(ValueError, herm.hermfit, [1], [1], -1) + assert_raises(TypeError, herm.hermfit, [[1]], [1], 0) + assert_raises(TypeError, herm.hermfit, [], [1], 0) + assert_raises(TypeError, herm.hermfit, [1], [[[1]]], 0) + assert_raises(TypeError, herm.hermfit, [1, 2], [1], 0) + assert_raises(TypeError, herm.hermfit, [1], [1, 2], 0) + assert_raises(TypeError, herm.hermfit, [1], [1], 0, w=[[1]]) + assert_raises(TypeError, herm.hermfit, [1], [1], 0, w=[1, 1]) + assert_raises(ValueError, herm.hermfit, [1], [1], [-1,]) + assert_raises(ValueError, herm.hermfit, [1], [1], [2, -1, 6]) + assert_raises(TypeError, herm.hermfit, [1], [1], []) + + # Test fit + x = np.linspace(0, 2) + y = f(x) + # + coef3 = herm.hermfit(x, y, 3) + assert_equal(len(coef3), 4) + assert_almost_equal(herm.hermval(x, coef3), y) + coef3 = herm.hermfit(x, y, [0, 1, 2, 3]) + assert_equal(len(coef3), 4) + assert_almost_equal(herm.hermval(x, coef3), y) + # + coef4 = herm.hermfit(x, y, 4) + assert_equal(len(coef4), 5) + assert_almost_equal(herm.hermval(x, coef4), y) + coef4 = herm.hermfit(x, y, [0, 1, 2, 3, 4]) + assert_equal(len(coef4), 5) + assert_almost_equal(herm.hermval(x, coef4), y) + # check things still work if deg is not in strict increasing + coef4 = herm.hermfit(x, y, [2, 3, 4, 1, 0]) + assert_equal(len(coef4), 5) + assert_almost_equal(herm.hermval(x, coef4), y) + # + coef2d = herm.hermfit(x, np.array([y, y]).T, 3) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + coef2d = herm.hermfit(x, np.array([y, y]).T, [0, 1, 2, 3]) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + # test weighting + w = np.zeros_like(x) + yw = y.copy() + w[1::2] = 1 + y[0::2] = 0 + wcoef3 = herm.hermfit(x, yw, 3, w=w) + assert_almost_equal(wcoef3, coef3) + wcoef3 = herm.hermfit(x, yw, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef3, coef3) + # + wcoef2d = herm.hermfit(x, np.array([yw, yw]).T, 3, w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + wcoef2d = herm.hermfit(x, np.array([yw, yw]).T, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + # test scaling with complex values x points whose square + # is zero when summed. + x = [1, 1j, -1, -1j] + assert_almost_equal(herm.hermfit(x, x, 1), [0, .5]) + assert_almost_equal(herm.hermfit(x, x, [0, 1]), [0, .5]) + # test fitting only even Legendre polynomials + x = np.linspace(-1, 1) + y = f2(x) + coef1 = herm.hermfit(x, y, 4) + assert_almost_equal(herm.hermval(x, coef1), y) + coef2 = herm.hermfit(x, y, [0, 2, 4]) + assert_almost_equal(herm.hermval(x, coef2), y) + assert_almost_equal(coef1, coef2) + + +class TestCompanion: + + def test_raises(self): + assert_raises(ValueError, herm.hermcompanion, []) + assert_raises(ValueError, herm.hermcompanion, [1]) + + def test_dimensions(self): + for i in range(1, 5): + coef = [0] * i + [1] + assert_(herm.hermcompanion(coef).shape == (i, i)) + + def test_linear_root(self): + assert_(herm.hermcompanion([1, 2])[0, 0] == -.25) + + +class TestGauss: + + def test_100(self): + x, w = herm.hermgauss(100) + + # test orthogonality. Note that the results need to be normalized, + # otherwise the huge values that can arise from fast growing + # functions like Laguerre can be very confusing. + v = herm.hermvander(x, 99) + vv = np.dot(v.T * w, v) + vd = 1 / np.sqrt(vv.diagonal()) + vv = vd[:, None] * vv * vd + assert_almost_equal(vv, np.eye(100)) + + # check that the integral of 1 is correct + tgt = np.sqrt(np.pi) + assert_almost_equal(w.sum(), tgt) + + +class TestMisc: + + def test_hermfromroots(self): + res = herm.hermfromroots([]) + assert_almost_equal(trim(res), [1]) + for i in range(1, 5): + roots = np.cos(np.linspace(-np.pi, 0, 2 * i + 1)[1::2]) + pol = herm.hermfromroots(roots) + res = herm.hermval(roots, pol) + tgt = 0 + assert_(len(pol) == i + 1) + assert_almost_equal(herm.herm2poly(pol)[-1], 1) + assert_almost_equal(res, tgt) + + def test_hermroots(self): + assert_almost_equal(herm.hermroots([1]), []) + assert_almost_equal(herm.hermroots([1, 1]), [-.5]) + for i in range(2, 5): + tgt = np.linspace(-1, 1, i) + res = herm.hermroots(herm.hermfromroots(tgt)) + assert_almost_equal(trim(res), trim(tgt)) + + def test_hermtrim(self): + coef = [2, -1, 1, 0] + + # Test exceptions + assert_raises(ValueError, herm.hermtrim, coef, -1) + + # Test results + assert_equal(herm.hermtrim(coef), coef[:-1]) + assert_equal(herm.hermtrim(coef, 1), coef[:-3]) + assert_equal(herm.hermtrim(coef, 2), [0]) + + def test_hermline(self): + assert_equal(herm.hermline(3, 4), [3, 2]) + + def test_herm2poly(self): + for i in range(10): + assert_almost_equal(herm.herm2poly([0] * i + [1]), Hlist[i]) + + def test_poly2herm(self): + for i in range(10): + assert_almost_equal(herm.poly2herm(Hlist[i]), [0] * i + [1]) + + def test_weight(self): + x = np.linspace(-5, 5, 11) + tgt = np.exp(-x**2) + res = herm.hermweight(x) + assert_almost_equal(res, tgt) diff --git a/.venv/lib/python3.12/site-packages/numpy/polynomial/tests/test_hermite_e.py b/.venv/lib/python3.12/site-packages/numpy/polynomial/tests/test_hermite_e.py new file mode 100644 index 0000000..29f34f6 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/polynomial/tests/test_hermite_e.py @@ -0,0 +1,559 @@ +"""Tests for hermite_e module. + +""" +from functools import reduce + +import numpy as np +import numpy.polynomial.hermite_e as herme +from numpy.polynomial.polynomial import polyval +from numpy.testing import ( + assert_, + assert_almost_equal, + assert_equal, + assert_raises, +) + +He0 = np.array([1]) +He1 = np.array([0, 1]) +He2 = np.array([-1, 0, 1]) +He3 = np.array([0, -3, 0, 1]) +He4 = np.array([3, 0, -6, 0, 1]) +He5 = np.array([0, 15, 0, -10, 0, 1]) +He6 = np.array([-15, 0, 45, 0, -15, 0, 1]) +He7 = np.array([0, -105, 0, 105, 0, -21, 0, 1]) +He8 = np.array([105, 0, -420, 0, 210, 0, -28, 0, 1]) +He9 = np.array([0, 945, 0, -1260, 0, 378, 0, -36, 0, 1]) + +Helist = [He0, He1, He2, He3, He4, He5, He6, He7, He8, He9] + + +def trim(x): + return herme.hermetrim(x, tol=1e-6) + + +class TestConstants: + + def test_hermedomain(self): + assert_equal(herme.hermedomain, [-1, 1]) + + def test_hermezero(self): + assert_equal(herme.hermezero, [0]) + + def test_hermeone(self): + assert_equal(herme.hermeone, [1]) + + def test_hermex(self): + assert_equal(herme.hermex, [0, 1]) + + +class TestArithmetic: + x = np.linspace(-3, 3, 100) + + def test_hermeadd(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] += 1 + res = herme.hermeadd([0] * i + [1], [0] * j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_hermesub(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] -= 1 + res = herme.hermesub([0] * i + [1], [0] * j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_hermemulx(self): + assert_equal(herme.hermemulx([0]), [0]) + assert_equal(herme.hermemulx([1]), [0, 1]) + for i in range(1, 5): + ser = [0] * i + [1] + tgt = [0] * (i - 1) + [i, 0, 1] + assert_equal(herme.hermemulx(ser), tgt) + + def test_hermemul(self): + # check values of result + for i in range(5): + pol1 = [0] * i + [1] + val1 = herme.hermeval(self.x, pol1) + for j in range(5): + msg = f"At i={i}, j={j}" + pol2 = [0] * j + [1] + val2 = herme.hermeval(self.x, pol2) + pol3 = herme.hermemul(pol1, pol2) + val3 = herme.hermeval(self.x, pol3) + assert_(len(pol3) == i + j + 1, msg) + assert_almost_equal(val3, val1 * val2, err_msg=msg) + + def test_hermediv(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + ci = [0] * i + [1] + cj = [0] * j + [1] + tgt = herme.hermeadd(ci, cj) + quo, rem = herme.hermediv(tgt, ci) + res = herme.hermeadd(herme.hermemul(quo, ci), rem) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_hermepow(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + c = np.arange(i + 1) + tgt = reduce(herme.hermemul, [c] * j, np.array([1])) + res = herme.hermepow(c, j) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + +class TestEvaluation: + # coefficients of 1 + 2*x + 3*x**2 + c1d = np.array([4., 2., 3.]) + c2d = np.einsum('i,j->ij', c1d, c1d) + c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d) + + # some random values in [-1, 1) + x = np.random.random((3, 5)) * 2 - 1 + y = polyval(x, [1., 2., 3.]) + + def test_hermeval(self): + # check empty input + assert_equal(herme.hermeval([], [1]).size, 0) + + # check normal input) + x = np.linspace(-1, 1) + y = [polyval(x, c) for c in Helist] + for i in range(10): + msg = f"At i={i}" + tgt = y[i] + res = herme.hermeval(x, [0] * i + [1]) + assert_almost_equal(res, tgt, err_msg=msg) + + # check that shape is preserved + for i in range(3): + dims = [2] * i + x = np.zeros(dims) + assert_equal(herme.hermeval(x, [1]).shape, dims) + assert_equal(herme.hermeval(x, [1, 0]).shape, dims) + assert_equal(herme.hermeval(x, [1, 0, 0]).shape, dims) + + def test_hermeval2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + # test exceptions + assert_raises(ValueError, herme.hermeval2d, x1, x2[:2], self.c2d) + + # test values + tgt = y1 * y2 + res = herme.hermeval2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + # test shape + z = np.ones((2, 3)) + res = herme.hermeval2d(z, z, self.c2d) + assert_(res.shape == (2, 3)) + + def test_hermeval3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + # test exceptions + assert_raises(ValueError, herme.hermeval3d, x1, x2, x3[:2], self.c3d) + + # test values + tgt = y1 * y2 * y3 + res = herme.hermeval3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + # test shape + z = np.ones((2, 3)) + res = herme.hermeval3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3)) + + def test_hermegrid2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + # test values + tgt = np.einsum('i,j->ij', y1, y2) + res = herme.hermegrid2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + # test shape + z = np.ones((2, 3)) + res = herme.hermegrid2d(z, z, self.c2d) + assert_(res.shape == (2, 3) * 2) + + def test_hermegrid3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + # test values + tgt = np.einsum('i,j,k->ijk', y1, y2, y3) + res = herme.hermegrid3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + # test shape + z = np.ones((2, 3)) + res = herme.hermegrid3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3) * 3) + + +class TestIntegral: + + def test_hermeint(self): + # check exceptions + assert_raises(TypeError, herme.hermeint, [0], .5) + assert_raises(ValueError, herme.hermeint, [0], -1) + assert_raises(ValueError, herme.hermeint, [0], 1, [0, 0]) + assert_raises(ValueError, herme.hermeint, [0], lbnd=[0]) + assert_raises(ValueError, herme.hermeint, [0], scl=[0]) + assert_raises(TypeError, herme.hermeint, [0], axis=.5) + + # test integration of zero polynomial + for i in range(2, 5): + k = [0] * (i - 2) + [1] + res = herme.hermeint([0], m=i, k=k) + assert_almost_equal(res, [0, 1]) + + # check single integration with integration constant + for i in range(5): + scl = i + 1 + pol = [0] * i + [1] + tgt = [i] + [0] * i + [1 / scl] + hermepol = herme.poly2herme(pol) + hermeint = herme.hermeint(hermepol, m=1, k=[i]) + res = herme.herme2poly(hermeint) + assert_almost_equal(trim(res), trim(tgt)) + + # check single integration with integration constant and lbnd + for i in range(5): + scl = i + 1 + pol = [0] * i + [1] + hermepol = herme.poly2herme(pol) + hermeint = herme.hermeint(hermepol, m=1, k=[i], lbnd=-1) + assert_almost_equal(herme.hermeval(-1, hermeint), i) + + # check single integration with integration constant and scaling + for i in range(5): + scl = i + 1 + pol = [0] * i + [1] + tgt = [i] + [0] * i + [2 / scl] + hermepol = herme.poly2herme(pol) + hermeint = herme.hermeint(hermepol, m=1, k=[i], scl=2) + res = herme.herme2poly(hermeint) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with default k + for i in range(5): + for j in range(2, 5): + pol = [0] * i + [1] + tgt = pol[:] + for k in range(j): + tgt = herme.hermeint(tgt, m=1) + res = herme.hermeint(pol, m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with defined k + for i in range(5): + for j in range(2, 5): + pol = [0] * i + [1] + tgt = pol[:] + for k in range(j): + tgt = herme.hermeint(tgt, m=1, k=[k]) + res = herme.hermeint(pol, m=j, k=list(range(j))) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with lbnd + for i in range(5): + for j in range(2, 5): + pol = [0] * i + [1] + tgt = pol[:] + for k in range(j): + tgt = herme.hermeint(tgt, m=1, k=[k], lbnd=-1) + res = herme.hermeint(pol, m=j, k=list(range(j)), lbnd=-1) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with scaling + for i in range(5): + for j in range(2, 5): + pol = [0] * i + [1] + tgt = pol[:] + for k in range(j): + tgt = herme.hermeint(tgt, m=1, k=[k], scl=2) + res = herme.hermeint(pol, m=j, k=list(range(j)), scl=2) + assert_almost_equal(trim(res), trim(tgt)) + + def test_hermeint_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([herme.hermeint(c) for c in c2d.T]).T + res = herme.hermeint(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([herme.hermeint(c) for c in c2d]) + res = herme.hermeint(c2d, axis=1) + assert_almost_equal(res, tgt) + + tgt = np.vstack([herme.hermeint(c, k=3) for c in c2d]) + res = herme.hermeint(c2d, k=3, axis=1) + assert_almost_equal(res, tgt) + + +class TestDerivative: + + def test_hermeder(self): + # check exceptions + assert_raises(TypeError, herme.hermeder, [0], .5) + assert_raises(ValueError, herme.hermeder, [0], -1) + + # check that zeroth derivative does nothing + for i in range(5): + tgt = [0] * i + [1] + res = herme.hermeder(tgt, m=0) + assert_equal(trim(res), trim(tgt)) + + # check that derivation is the inverse of integration + for i in range(5): + for j in range(2, 5): + tgt = [0] * i + [1] + res = herme.hermeder(herme.hermeint(tgt, m=j), m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check derivation with scaling + for i in range(5): + for j in range(2, 5): + tgt = [0] * i + [1] + res = herme.hermeder( + herme.hermeint(tgt, m=j, scl=2), m=j, scl=.5) + assert_almost_equal(trim(res), trim(tgt)) + + def test_hermeder_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([herme.hermeder(c) for c in c2d.T]).T + res = herme.hermeder(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([herme.hermeder(c) for c in c2d]) + res = herme.hermeder(c2d, axis=1) + assert_almost_equal(res, tgt) + + +class TestVander: + # some random values in [-1, 1) + x = np.random.random((3, 5)) * 2 - 1 + + def test_hermevander(self): + # check for 1d x + x = np.arange(3) + v = herme.hermevander(x, 3) + assert_(v.shape == (3, 4)) + for i in range(4): + coef = [0] * i + [1] + assert_almost_equal(v[..., i], herme.hermeval(x, coef)) + + # check for 2d x + x = np.array([[1, 2], [3, 4], [5, 6]]) + v = herme.hermevander(x, 3) + assert_(v.shape == (3, 2, 4)) + for i in range(4): + coef = [0] * i + [1] + assert_almost_equal(v[..., i], herme.hermeval(x, coef)) + + def test_hermevander2d(self): + # also tests hermeval2d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3)) + van = herme.hermevander2d(x1, x2, [1, 2]) + tgt = herme.hermeval2d(x1, x2, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = herme.hermevander2d([x1], [x2], [1, 2]) + assert_(van.shape == (1, 5, 6)) + + def test_hermevander3d(self): + # also tests hermeval3d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3, 4)) + van = herme.hermevander3d(x1, x2, x3, [1, 2, 3]) + tgt = herme.hermeval3d(x1, x2, x3, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = herme.hermevander3d([x1], [x2], [x3], [1, 2, 3]) + assert_(van.shape == (1, 5, 24)) + + +class TestFitting: + + def test_hermefit(self): + def f(x): + return x * (x - 1) * (x - 2) + + def f2(x): + return x**4 + x**2 + 1 + + # Test exceptions + assert_raises(ValueError, herme.hermefit, [1], [1], -1) + assert_raises(TypeError, herme.hermefit, [[1]], [1], 0) + assert_raises(TypeError, herme.hermefit, [], [1], 0) + assert_raises(TypeError, herme.hermefit, [1], [[[1]]], 0) + assert_raises(TypeError, herme.hermefit, [1, 2], [1], 0) + assert_raises(TypeError, herme.hermefit, [1], [1, 2], 0) + assert_raises(TypeError, herme.hermefit, [1], [1], 0, w=[[1]]) + assert_raises(TypeError, herme.hermefit, [1], [1], 0, w=[1, 1]) + assert_raises(ValueError, herme.hermefit, [1], [1], [-1,]) + assert_raises(ValueError, herme.hermefit, [1], [1], [2, -1, 6]) + assert_raises(TypeError, herme.hermefit, [1], [1], []) + + # Test fit + x = np.linspace(0, 2) + y = f(x) + # + coef3 = herme.hermefit(x, y, 3) + assert_equal(len(coef3), 4) + assert_almost_equal(herme.hermeval(x, coef3), y) + coef3 = herme.hermefit(x, y, [0, 1, 2, 3]) + assert_equal(len(coef3), 4) + assert_almost_equal(herme.hermeval(x, coef3), y) + # + coef4 = herme.hermefit(x, y, 4) + assert_equal(len(coef4), 5) + assert_almost_equal(herme.hermeval(x, coef4), y) + coef4 = herme.hermefit(x, y, [0, 1, 2, 3, 4]) + assert_equal(len(coef4), 5) + assert_almost_equal(herme.hermeval(x, coef4), y) + # check things still work if deg is not in strict increasing + coef4 = herme.hermefit(x, y, [2, 3, 4, 1, 0]) + assert_equal(len(coef4), 5) + assert_almost_equal(herme.hermeval(x, coef4), y) + # + coef2d = herme.hermefit(x, np.array([y, y]).T, 3) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + coef2d = herme.hermefit(x, np.array([y, y]).T, [0, 1, 2, 3]) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + # test weighting + w = np.zeros_like(x) + yw = y.copy() + w[1::2] = 1 + y[0::2] = 0 + wcoef3 = herme.hermefit(x, yw, 3, w=w) + assert_almost_equal(wcoef3, coef3) + wcoef3 = herme.hermefit(x, yw, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef3, coef3) + # + wcoef2d = herme.hermefit(x, np.array([yw, yw]).T, 3, w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + wcoef2d = herme.hermefit(x, np.array([yw, yw]).T, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + # test scaling with complex values x points whose square + # is zero when summed. + x = [1, 1j, -1, -1j] + assert_almost_equal(herme.hermefit(x, x, 1), [0, 1]) + assert_almost_equal(herme.hermefit(x, x, [0, 1]), [0, 1]) + # test fitting only even Legendre polynomials + x = np.linspace(-1, 1) + y = f2(x) + coef1 = herme.hermefit(x, y, 4) + assert_almost_equal(herme.hermeval(x, coef1), y) + coef2 = herme.hermefit(x, y, [0, 2, 4]) + assert_almost_equal(herme.hermeval(x, coef2), y) + assert_almost_equal(coef1, coef2) + + +class TestCompanion: + + def test_raises(self): + assert_raises(ValueError, herme.hermecompanion, []) + assert_raises(ValueError, herme.hermecompanion, [1]) + + def test_dimensions(self): + for i in range(1, 5): + coef = [0] * i + [1] + assert_(herme.hermecompanion(coef).shape == (i, i)) + + def test_linear_root(self): + assert_(herme.hermecompanion([1, 2])[0, 0] == -.5) + + +class TestGauss: + + def test_100(self): + x, w = herme.hermegauss(100) + + # test orthogonality. Note that the results need to be normalized, + # otherwise the huge values that can arise from fast growing + # functions like Laguerre can be very confusing. + v = herme.hermevander(x, 99) + vv = np.dot(v.T * w, v) + vd = 1 / np.sqrt(vv.diagonal()) + vv = vd[:, None] * vv * vd + assert_almost_equal(vv, np.eye(100)) + + # check that the integral of 1 is correct + tgt = np.sqrt(2 * np.pi) + assert_almost_equal(w.sum(), tgt) + + +class TestMisc: + + def test_hermefromroots(self): + res = herme.hermefromroots([]) + assert_almost_equal(trim(res), [1]) + for i in range(1, 5): + roots = np.cos(np.linspace(-np.pi, 0, 2 * i + 1)[1::2]) + pol = herme.hermefromroots(roots) + res = herme.hermeval(roots, pol) + tgt = 0 + assert_(len(pol) == i + 1) + assert_almost_equal(herme.herme2poly(pol)[-1], 1) + assert_almost_equal(res, tgt) + + def test_hermeroots(self): + assert_almost_equal(herme.hermeroots([1]), []) + assert_almost_equal(herme.hermeroots([1, 1]), [-1]) + for i in range(2, 5): + tgt = np.linspace(-1, 1, i) + res = herme.hermeroots(herme.hermefromroots(tgt)) + assert_almost_equal(trim(res), trim(tgt)) + + def test_hermetrim(self): + coef = [2, -1, 1, 0] + + # Test exceptions + assert_raises(ValueError, herme.hermetrim, coef, -1) + + # Test results + assert_equal(herme.hermetrim(coef), coef[:-1]) + assert_equal(herme.hermetrim(coef, 1), coef[:-3]) + assert_equal(herme.hermetrim(coef, 2), [0]) + + def test_hermeline(self): + assert_equal(herme.hermeline(3, 4), [3, 4]) + + def test_herme2poly(self): + for i in range(10): + assert_almost_equal(herme.herme2poly([0] * i + [1]), Helist[i]) + + def test_poly2herme(self): + for i in range(10): + assert_almost_equal(herme.poly2herme(Helist[i]), [0] * i + [1]) + + def test_weight(self): + x = np.linspace(-5, 5, 11) + tgt = np.exp(-.5 * x**2) + res = herme.hermeweight(x) + assert_almost_equal(res, tgt) diff --git a/.venv/lib/python3.12/site-packages/numpy/polynomial/tests/test_laguerre.py b/.venv/lib/python3.12/site-packages/numpy/polynomial/tests/test_laguerre.py new file mode 100644 index 0000000..6793b78 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/polynomial/tests/test_laguerre.py @@ -0,0 +1,540 @@ +"""Tests for laguerre module. + +""" +from functools import reduce + +import numpy as np +import numpy.polynomial.laguerre as lag +from numpy.polynomial.polynomial import polyval +from numpy.testing import ( + assert_, + assert_almost_equal, + assert_equal, + assert_raises, +) + +L0 = np.array([1]) / 1 +L1 = np.array([1, -1]) / 1 +L2 = np.array([2, -4, 1]) / 2 +L3 = np.array([6, -18, 9, -1]) / 6 +L4 = np.array([24, -96, 72, -16, 1]) / 24 +L5 = np.array([120, -600, 600, -200, 25, -1]) / 120 +L6 = np.array([720, -4320, 5400, -2400, 450, -36, 1]) / 720 + +Llist = [L0, L1, L2, L3, L4, L5, L6] + + +def trim(x): + return lag.lagtrim(x, tol=1e-6) + + +class TestConstants: + + def test_lagdomain(self): + assert_equal(lag.lagdomain, [0, 1]) + + def test_lagzero(self): + assert_equal(lag.lagzero, [0]) + + def test_lagone(self): + assert_equal(lag.lagone, [1]) + + def test_lagx(self): + assert_equal(lag.lagx, [1, -1]) + + +class TestArithmetic: + x = np.linspace(-3, 3, 100) + + def test_lagadd(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] += 1 + res = lag.lagadd([0] * i + [1], [0] * j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_lagsub(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] -= 1 + res = lag.lagsub([0] * i + [1], [0] * j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_lagmulx(self): + assert_equal(lag.lagmulx([0]), [0]) + assert_equal(lag.lagmulx([1]), [1, -1]) + for i in range(1, 5): + ser = [0] * i + [1] + tgt = [0] * (i - 1) + [-i, 2 * i + 1, -(i + 1)] + assert_almost_equal(lag.lagmulx(ser), tgt) + + def test_lagmul(self): + # check values of result + for i in range(5): + pol1 = [0] * i + [1] + val1 = lag.lagval(self.x, pol1) + for j in range(5): + msg = f"At i={i}, j={j}" + pol2 = [0] * j + [1] + val2 = lag.lagval(self.x, pol2) + pol3 = lag.lagmul(pol1, pol2) + val3 = lag.lagval(self.x, pol3) + assert_(len(pol3) == i + j + 1, msg) + assert_almost_equal(val3, val1 * val2, err_msg=msg) + + def test_lagdiv(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + ci = [0] * i + [1] + cj = [0] * j + [1] + tgt = lag.lagadd(ci, cj) + quo, rem = lag.lagdiv(tgt, ci) + res = lag.lagadd(lag.lagmul(quo, ci), rem) + assert_almost_equal(trim(res), trim(tgt), err_msg=msg) + + def test_lagpow(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + c = np.arange(i + 1) + tgt = reduce(lag.lagmul, [c] * j, np.array([1])) + res = lag.lagpow(c, j) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + +class TestEvaluation: + # coefficients of 1 + 2*x + 3*x**2 + c1d = np.array([9., -14., 6.]) + c2d = np.einsum('i,j->ij', c1d, c1d) + c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d) + + # some random values in [-1, 1) + x = np.random.random((3, 5)) * 2 - 1 + y = polyval(x, [1., 2., 3.]) + + def test_lagval(self): + # check empty input + assert_equal(lag.lagval([], [1]).size, 0) + + # check normal input) + x = np.linspace(-1, 1) + y = [polyval(x, c) for c in Llist] + for i in range(7): + msg = f"At i={i}" + tgt = y[i] + res = lag.lagval(x, [0] * i + [1]) + assert_almost_equal(res, tgt, err_msg=msg) + + # check that shape is preserved + for i in range(3): + dims = [2] * i + x = np.zeros(dims) + assert_equal(lag.lagval(x, [1]).shape, dims) + assert_equal(lag.lagval(x, [1, 0]).shape, dims) + assert_equal(lag.lagval(x, [1, 0, 0]).shape, dims) + + def test_lagval2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + # test exceptions + assert_raises(ValueError, lag.lagval2d, x1, x2[:2], self.c2d) + + # test values + tgt = y1 * y2 + res = lag.lagval2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + # test shape + z = np.ones((2, 3)) + res = lag.lagval2d(z, z, self.c2d) + assert_(res.shape == (2, 3)) + + def test_lagval3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + # test exceptions + assert_raises(ValueError, lag.lagval3d, x1, x2, x3[:2], self.c3d) + + # test values + tgt = y1 * y2 * y3 + res = lag.lagval3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + # test shape + z = np.ones((2, 3)) + res = lag.lagval3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3)) + + def test_laggrid2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + # test values + tgt = np.einsum('i,j->ij', y1, y2) + res = lag.laggrid2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + # test shape + z = np.ones((2, 3)) + res = lag.laggrid2d(z, z, self.c2d) + assert_(res.shape == (2, 3) * 2) + + def test_laggrid3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + # test values + tgt = np.einsum('i,j,k->ijk', y1, y2, y3) + res = lag.laggrid3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + # test shape + z = np.ones((2, 3)) + res = lag.laggrid3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3) * 3) + + +class TestIntegral: + + def test_lagint(self): + # check exceptions + assert_raises(TypeError, lag.lagint, [0], .5) + assert_raises(ValueError, lag.lagint, [0], -1) + assert_raises(ValueError, lag.lagint, [0], 1, [0, 0]) + assert_raises(ValueError, lag.lagint, [0], lbnd=[0]) + assert_raises(ValueError, lag.lagint, [0], scl=[0]) + assert_raises(TypeError, lag.lagint, [0], axis=.5) + + # test integration of zero polynomial + for i in range(2, 5): + k = [0] * (i - 2) + [1] + res = lag.lagint([0], m=i, k=k) + assert_almost_equal(res, [1, -1]) + + # check single integration with integration constant + for i in range(5): + scl = i + 1 + pol = [0] * i + [1] + tgt = [i] + [0] * i + [1 / scl] + lagpol = lag.poly2lag(pol) + lagint = lag.lagint(lagpol, m=1, k=[i]) + res = lag.lag2poly(lagint) + assert_almost_equal(trim(res), trim(tgt)) + + # check single integration with integration constant and lbnd + for i in range(5): + scl = i + 1 + pol = [0] * i + [1] + lagpol = lag.poly2lag(pol) + lagint = lag.lagint(lagpol, m=1, k=[i], lbnd=-1) + assert_almost_equal(lag.lagval(-1, lagint), i) + + # check single integration with integration constant and scaling + for i in range(5): + scl = i + 1 + pol = [0] * i + [1] + tgt = [i] + [0] * i + [2 / scl] + lagpol = lag.poly2lag(pol) + lagint = lag.lagint(lagpol, m=1, k=[i], scl=2) + res = lag.lag2poly(lagint) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with default k + for i in range(5): + for j in range(2, 5): + pol = [0] * i + [1] + tgt = pol[:] + for k in range(j): + tgt = lag.lagint(tgt, m=1) + res = lag.lagint(pol, m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with defined k + for i in range(5): + for j in range(2, 5): + pol = [0] * i + [1] + tgt = pol[:] + for k in range(j): + tgt = lag.lagint(tgt, m=1, k=[k]) + res = lag.lagint(pol, m=j, k=list(range(j))) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with lbnd + for i in range(5): + for j in range(2, 5): + pol = [0] * i + [1] + tgt = pol[:] + for k in range(j): + tgt = lag.lagint(tgt, m=1, k=[k], lbnd=-1) + res = lag.lagint(pol, m=j, k=list(range(j)), lbnd=-1) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with scaling + for i in range(5): + for j in range(2, 5): + pol = [0] * i + [1] + tgt = pol[:] + for k in range(j): + tgt = lag.lagint(tgt, m=1, k=[k], scl=2) + res = lag.lagint(pol, m=j, k=list(range(j)), scl=2) + assert_almost_equal(trim(res), trim(tgt)) + + def test_lagint_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([lag.lagint(c) for c in c2d.T]).T + res = lag.lagint(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([lag.lagint(c) for c in c2d]) + res = lag.lagint(c2d, axis=1) + assert_almost_equal(res, tgt) + + tgt = np.vstack([lag.lagint(c, k=3) for c in c2d]) + res = lag.lagint(c2d, k=3, axis=1) + assert_almost_equal(res, tgt) + + +class TestDerivative: + + def test_lagder(self): + # check exceptions + assert_raises(TypeError, lag.lagder, [0], .5) + assert_raises(ValueError, lag.lagder, [0], -1) + + # check that zeroth derivative does nothing + for i in range(5): + tgt = [0] * i + [1] + res = lag.lagder(tgt, m=0) + assert_equal(trim(res), trim(tgt)) + + # check that derivation is the inverse of integration + for i in range(5): + for j in range(2, 5): + tgt = [0] * i + [1] + res = lag.lagder(lag.lagint(tgt, m=j), m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check derivation with scaling + for i in range(5): + for j in range(2, 5): + tgt = [0] * i + [1] + res = lag.lagder(lag.lagint(tgt, m=j, scl=2), m=j, scl=.5) + assert_almost_equal(trim(res), trim(tgt)) + + def test_lagder_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([lag.lagder(c) for c in c2d.T]).T + res = lag.lagder(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([lag.lagder(c) for c in c2d]) + res = lag.lagder(c2d, axis=1) + assert_almost_equal(res, tgt) + + +class TestVander: + # some random values in [-1, 1) + x = np.random.random((3, 5)) * 2 - 1 + + def test_lagvander(self): + # check for 1d x + x = np.arange(3) + v = lag.lagvander(x, 3) + assert_(v.shape == (3, 4)) + for i in range(4): + coef = [0] * i + [1] + assert_almost_equal(v[..., i], lag.lagval(x, coef)) + + # check for 2d x + x = np.array([[1, 2], [3, 4], [5, 6]]) + v = lag.lagvander(x, 3) + assert_(v.shape == (3, 2, 4)) + for i in range(4): + coef = [0] * i + [1] + assert_almost_equal(v[..., i], lag.lagval(x, coef)) + + def test_lagvander2d(self): + # also tests lagval2d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3)) + van = lag.lagvander2d(x1, x2, [1, 2]) + tgt = lag.lagval2d(x1, x2, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = lag.lagvander2d([x1], [x2], [1, 2]) + assert_(van.shape == (1, 5, 6)) + + def test_lagvander3d(self): + # also tests lagval3d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3, 4)) + van = lag.lagvander3d(x1, x2, x3, [1, 2, 3]) + tgt = lag.lagval3d(x1, x2, x3, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = lag.lagvander3d([x1], [x2], [x3], [1, 2, 3]) + assert_(van.shape == (1, 5, 24)) + + +class TestFitting: + + def test_lagfit(self): + def f(x): + return x * (x - 1) * (x - 2) + + # Test exceptions + assert_raises(ValueError, lag.lagfit, [1], [1], -1) + assert_raises(TypeError, lag.lagfit, [[1]], [1], 0) + assert_raises(TypeError, lag.lagfit, [], [1], 0) + assert_raises(TypeError, lag.lagfit, [1], [[[1]]], 0) + assert_raises(TypeError, lag.lagfit, [1, 2], [1], 0) + assert_raises(TypeError, lag.lagfit, [1], [1, 2], 0) + assert_raises(TypeError, lag.lagfit, [1], [1], 0, w=[[1]]) + assert_raises(TypeError, lag.lagfit, [1], [1], 0, w=[1, 1]) + assert_raises(ValueError, lag.lagfit, [1], [1], [-1,]) + assert_raises(ValueError, lag.lagfit, [1], [1], [2, -1, 6]) + assert_raises(TypeError, lag.lagfit, [1], [1], []) + + # Test fit + x = np.linspace(0, 2) + y = f(x) + # + coef3 = lag.lagfit(x, y, 3) + assert_equal(len(coef3), 4) + assert_almost_equal(lag.lagval(x, coef3), y) + coef3 = lag.lagfit(x, y, [0, 1, 2, 3]) + assert_equal(len(coef3), 4) + assert_almost_equal(lag.lagval(x, coef3), y) + # + coef4 = lag.lagfit(x, y, 4) + assert_equal(len(coef4), 5) + assert_almost_equal(lag.lagval(x, coef4), y) + coef4 = lag.lagfit(x, y, [0, 1, 2, 3, 4]) + assert_equal(len(coef4), 5) + assert_almost_equal(lag.lagval(x, coef4), y) + # + coef2d = lag.lagfit(x, np.array([y, y]).T, 3) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + coef2d = lag.lagfit(x, np.array([y, y]).T, [0, 1, 2, 3]) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + # test weighting + w = np.zeros_like(x) + yw = y.copy() + w[1::2] = 1 + y[0::2] = 0 + wcoef3 = lag.lagfit(x, yw, 3, w=w) + assert_almost_equal(wcoef3, coef3) + wcoef3 = lag.lagfit(x, yw, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef3, coef3) + # + wcoef2d = lag.lagfit(x, np.array([yw, yw]).T, 3, w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + wcoef2d = lag.lagfit(x, np.array([yw, yw]).T, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + # test scaling with complex values x points whose square + # is zero when summed. + x = [1, 1j, -1, -1j] + assert_almost_equal(lag.lagfit(x, x, 1), [1, -1]) + assert_almost_equal(lag.lagfit(x, x, [0, 1]), [1, -1]) + + +class TestCompanion: + + def test_raises(self): + assert_raises(ValueError, lag.lagcompanion, []) + assert_raises(ValueError, lag.lagcompanion, [1]) + + def test_dimensions(self): + for i in range(1, 5): + coef = [0] * i + [1] + assert_(lag.lagcompanion(coef).shape == (i, i)) + + def test_linear_root(self): + assert_(lag.lagcompanion([1, 2])[0, 0] == 1.5) + + +class TestGauss: + + def test_100(self): + x, w = lag.laggauss(100) + + # test orthogonality. Note that the results need to be normalized, + # otherwise the huge values that can arise from fast growing + # functions like Laguerre can be very confusing. + v = lag.lagvander(x, 99) + vv = np.dot(v.T * w, v) + vd = 1 / np.sqrt(vv.diagonal()) + vv = vd[:, None] * vv * vd + assert_almost_equal(vv, np.eye(100)) + + # check that the integral of 1 is correct + tgt = 1.0 + assert_almost_equal(w.sum(), tgt) + + +class TestMisc: + + def test_lagfromroots(self): + res = lag.lagfromroots([]) + assert_almost_equal(trim(res), [1]) + for i in range(1, 5): + roots = np.cos(np.linspace(-np.pi, 0, 2 * i + 1)[1::2]) + pol = lag.lagfromroots(roots) + res = lag.lagval(roots, pol) + tgt = 0 + assert_(len(pol) == i + 1) + assert_almost_equal(lag.lag2poly(pol)[-1], 1) + assert_almost_equal(res, tgt) + + def test_lagroots(self): + assert_almost_equal(lag.lagroots([1]), []) + assert_almost_equal(lag.lagroots([0, 1]), [1]) + for i in range(2, 5): + tgt = np.linspace(0, 3, i) + res = lag.lagroots(lag.lagfromroots(tgt)) + assert_almost_equal(trim(res), trim(tgt)) + + def test_lagtrim(self): + coef = [2, -1, 1, 0] + + # Test exceptions + assert_raises(ValueError, lag.lagtrim, coef, -1) + + # Test results + assert_equal(lag.lagtrim(coef), coef[:-1]) + assert_equal(lag.lagtrim(coef, 1), coef[:-3]) + assert_equal(lag.lagtrim(coef, 2), [0]) + + def test_lagline(self): + assert_equal(lag.lagline(3, 4), [7, -4]) + + def test_lag2poly(self): + for i in range(7): + assert_almost_equal(lag.lag2poly([0] * i + [1]), Llist[i]) + + def test_poly2lag(self): + for i in range(7): + assert_almost_equal(lag.poly2lag(Llist[i]), [0] * i + [1]) + + def test_weight(self): + x = np.linspace(0, 10, 11) + tgt = np.exp(-x) + res = lag.lagweight(x) + assert_almost_equal(res, tgt) diff --git a/.venv/lib/python3.12/site-packages/numpy/polynomial/tests/test_legendre.py b/.venv/lib/python3.12/site-packages/numpy/polynomial/tests/test_legendre.py new file mode 100644 index 0000000..d0ed706 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/polynomial/tests/test_legendre.py @@ -0,0 +1,571 @@ +"""Tests for legendre module. + +""" +from functools import reduce + +import numpy as np +import numpy.polynomial.legendre as leg +from numpy.polynomial.polynomial import polyval +from numpy.testing import ( + assert_, + assert_almost_equal, + assert_equal, + assert_raises, +) + +L0 = np.array([1]) +L1 = np.array([0, 1]) +L2 = np.array([-1, 0, 3]) / 2 +L3 = np.array([0, -3, 0, 5]) / 2 +L4 = np.array([3, 0, -30, 0, 35]) / 8 +L5 = np.array([0, 15, 0, -70, 0, 63]) / 8 +L6 = np.array([-5, 0, 105, 0, -315, 0, 231]) / 16 +L7 = np.array([0, -35, 0, 315, 0, -693, 0, 429]) / 16 +L8 = np.array([35, 0, -1260, 0, 6930, 0, -12012, 0, 6435]) / 128 +L9 = np.array([0, 315, 0, -4620, 0, 18018, 0, -25740, 0, 12155]) / 128 + +Llist = [L0, L1, L2, L3, L4, L5, L6, L7, L8, L9] + + +def trim(x): + return leg.legtrim(x, tol=1e-6) + + +class TestConstants: + + def test_legdomain(self): + assert_equal(leg.legdomain, [-1, 1]) + + def test_legzero(self): + assert_equal(leg.legzero, [0]) + + def test_legone(self): + assert_equal(leg.legone, [1]) + + def test_legx(self): + assert_equal(leg.legx, [0, 1]) + + +class TestArithmetic: + x = np.linspace(-1, 1, 100) + + def test_legadd(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] += 1 + res = leg.legadd([0] * i + [1], [0] * j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_legsub(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] -= 1 + res = leg.legsub([0] * i + [1], [0] * j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_legmulx(self): + assert_equal(leg.legmulx([0]), [0]) + assert_equal(leg.legmulx([1]), [0, 1]) + for i in range(1, 5): + tmp = 2 * i + 1 + ser = [0] * i + [1] + tgt = [0] * (i - 1) + [i / tmp, 0, (i + 1) / tmp] + assert_equal(leg.legmulx(ser), tgt) + + def test_legmul(self): + # check values of result + for i in range(5): + pol1 = [0] * i + [1] + val1 = leg.legval(self.x, pol1) + for j in range(5): + msg = f"At i={i}, j={j}" + pol2 = [0] * j + [1] + val2 = leg.legval(self.x, pol2) + pol3 = leg.legmul(pol1, pol2) + val3 = leg.legval(self.x, pol3) + assert_(len(pol3) == i + j + 1, msg) + assert_almost_equal(val3, val1 * val2, err_msg=msg) + + def test_legdiv(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + ci = [0] * i + [1] + cj = [0] * j + [1] + tgt = leg.legadd(ci, cj) + quo, rem = leg.legdiv(tgt, ci) + res = leg.legadd(leg.legmul(quo, ci), rem) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_legpow(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + c = np.arange(i + 1) + tgt = reduce(leg.legmul, [c] * j, np.array([1])) + res = leg.legpow(c, j) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + +class TestEvaluation: + # coefficients of 1 + 2*x + 3*x**2 + c1d = np.array([2., 2., 2.]) + c2d = np.einsum('i,j->ij', c1d, c1d) + c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d) + + # some random values in [-1, 1) + x = np.random.random((3, 5)) * 2 - 1 + y = polyval(x, [1., 2., 3.]) + + def test_legval(self): + # check empty input + assert_equal(leg.legval([], [1]).size, 0) + + # check normal input) + x = np.linspace(-1, 1) + y = [polyval(x, c) for c in Llist] + for i in range(10): + msg = f"At i={i}" + tgt = y[i] + res = leg.legval(x, [0] * i + [1]) + assert_almost_equal(res, tgt, err_msg=msg) + + # check that shape is preserved + for i in range(3): + dims = [2] * i + x = np.zeros(dims) + assert_equal(leg.legval(x, [1]).shape, dims) + assert_equal(leg.legval(x, [1, 0]).shape, dims) + assert_equal(leg.legval(x, [1, 0, 0]).shape, dims) + + def test_legval2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + # test exceptions + assert_raises(ValueError, leg.legval2d, x1, x2[:2], self.c2d) + + # test values + tgt = y1 * y2 + res = leg.legval2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + # test shape + z = np.ones((2, 3)) + res = leg.legval2d(z, z, self.c2d) + assert_(res.shape == (2, 3)) + + def test_legval3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + # test exceptions + assert_raises(ValueError, leg.legval3d, x1, x2, x3[:2], self.c3d) + + # test values + tgt = y1 * y2 * y3 + res = leg.legval3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + # test shape + z = np.ones((2, 3)) + res = leg.legval3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3)) + + def test_leggrid2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + # test values + tgt = np.einsum('i,j->ij', y1, y2) + res = leg.leggrid2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + # test shape + z = np.ones((2, 3)) + res = leg.leggrid2d(z, z, self.c2d) + assert_(res.shape == (2, 3) * 2) + + def test_leggrid3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + # test values + tgt = np.einsum('i,j,k->ijk', y1, y2, y3) + res = leg.leggrid3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + # test shape + z = np.ones((2, 3)) + res = leg.leggrid3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3) * 3) + + +class TestIntegral: + + def test_legint(self): + # check exceptions + assert_raises(TypeError, leg.legint, [0], .5) + assert_raises(ValueError, leg.legint, [0], -1) + assert_raises(ValueError, leg.legint, [0], 1, [0, 0]) + assert_raises(ValueError, leg.legint, [0], lbnd=[0]) + assert_raises(ValueError, leg.legint, [0], scl=[0]) + assert_raises(TypeError, leg.legint, [0], axis=.5) + + # test integration of zero polynomial + for i in range(2, 5): + k = [0] * (i - 2) + [1] + res = leg.legint([0], m=i, k=k) + assert_almost_equal(res, [0, 1]) + + # check single integration with integration constant + for i in range(5): + scl = i + 1 + pol = [0] * i + [1] + tgt = [i] + [0] * i + [1 / scl] + legpol = leg.poly2leg(pol) + legint = leg.legint(legpol, m=1, k=[i]) + res = leg.leg2poly(legint) + assert_almost_equal(trim(res), trim(tgt)) + + # check single integration with integration constant and lbnd + for i in range(5): + scl = i + 1 + pol = [0] * i + [1] + legpol = leg.poly2leg(pol) + legint = leg.legint(legpol, m=1, k=[i], lbnd=-1) + assert_almost_equal(leg.legval(-1, legint), i) + + # check single integration with integration constant and scaling + for i in range(5): + scl = i + 1 + pol = [0] * i + [1] + tgt = [i] + [0] * i + [2 / scl] + legpol = leg.poly2leg(pol) + legint = leg.legint(legpol, m=1, k=[i], scl=2) + res = leg.leg2poly(legint) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with default k + for i in range(5): + for j in range(2, 5): + pol = [0] * i + [1] + tgt = pol[:] + for k in range(j): + tgt = leg.legint(tgt, m=1) + res = leg.legint(pol, m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with defined k + for i in range(5): + for j in range(2, 5): + pol = [0] * i + [1] + tgt = pol[:] + for k in range(j): + tgt = leg.legint(tgt, m=1, k=[k]) + res = leg.legint(pol, m=j, k=list(range(j))) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with lbnd + for i in range(5): + for j in range(2, 5): + pol = [0] * i + [1] + tgt = pol[:] + for k in range(j): + tgt = leg.legint(tgt, m=1, k=[k], lbnd=-1) + res = leg.legint(pol, m=j, k=list(range(j)), lbnd=-1) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with scaling + for i in range(5): + for j in range(2, 5): + pol = [0] * i + [1] + tgt = pol[:] + for k in range(j): + tgt = leg.legint(tgt, m=1, k=[k], scl=2) + res = leg.legint(pol, m=j, k=list(range(j)), scl=2) + assert_almost_equal(trim(res), trim(tgt)) + + def test_legint_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([leg.legint(c) for c in c2d.T]).T + res = leg.legint(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([leg.legint(c) for c in c2d]) + res = leg.legint(c2d, axis=1) + assert_almost_equal(res, tgt) + + tgt = np.vstack([leg.legint(c, k=3) for c in c2d]) + res = leg.legint(c2d, k=3, axis=1) + assert_almost_equal(res, tgt) + + def test_legint_zerointord(self): + assert_equal(leg.legint((1, 2, 3), 0), (1, 2, 3)) + + +class TestDerivative: + + def test_legder(self): + # check exceptions + assert_raises(TypeError, leg.legder, [0], .5) + assert_raises(ValueError, leg.legder, [0], -1) + + # check that zeroth derivative does nothing + for i in range(5): + tgt = [0] * i + [1] + res = leg.legder(tgt, m=0) + assert_equal(trim(res), trim(tgt)) + + # check that derivation is the inverse of integration + for i in range(5): + for j in range(2, 5): + tgt = [0] * i + [1] + res = leg.legder(leg.legint(tgt, m=j), m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check derivation with scaling + for i in range(5): + for j in range(2, 5): + tgt = [0] * i + [1] + res = leg.legder(leg.legint(tgt, m=j, scl=2), m=j, scl=.5) + assert_almost_equal(trim(res), trim(tgt)) + + def test_legder_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([leg.legder(c) for c in c2d.T]).T + res = leg.legder(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([leg.legder(c) for c in c2d]) + res = leg.legder(c2d, axis=1) + assert_almost_equal(res, tgt) + + def test_legder_orderhigherthancoeff(self): + c = (1, 2, 3, 4) + assert_equal(leg.legder(c, 4), [0]) + +class TestVander: + # some random values in [-1, 1) + x = np.random.random((3, 5)) * 2 - 1 + + def test_legvander(self): + # check for 1d x + x = np.arange(3) + v = leg.legvander(x, 3) + assert_(v.shape == (3, 4)) + for i in range(4): + coef = [0] * i + [1] + assert_almost_equal(v[..., i], leg.legval(x, coef)) + + # check for 2d x + x = np.array([[1, 2], [3, 4], [5, 6]]) + v = leg.legvander(x, 3) + assert_(v.shape == (3, 2, 4)) + for i in range(4): + coef = [0] * i + [1] + assert_almost_equal(v[..., i], leg.legval(x, coef)) + + def test_legvander2d(self): + # also tests polyval2d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3)) + van = leg.legvander2d(x1, x2, [1, 2]) + tgt = leg.legval2d(x1, x2, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = leg.legvander2d([x1], [x2], [1, 2]) + assert_(van.shape == (1, 5, 6)) + + def test_legvander3d(self): + # also tests polyval3d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3, 4)) + van = leg.legvander3d(x1, x2, x3, [1, 2, 3]) + tgt = leg.legval3d(x1, x2, x3, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = leg.legvander3d([x1], [x2], [x3], [1, 2, 3]) + assert_(van.shape == (1, 5, 24)) + + def test_legvander_negdeg(self): + assert_raises(ValueError, leg.legvander, (1, 2, 3), -1) + + +class TestFitting: + + def test_legfit(self): + def f(x): + return x * (x - 1) * (x - 2) + + def f2(x): + return x**4 + x**2 + 1 + + # Test exceptions + assert_raises(ValueError, leg.legfit, [1], [1], -1) + assert_raises(TypeError, leg.legfit, [[1]], [1], 0) + assert_raises(TypeError, leg.legfit, [], [1], 0) + assert_raises(TypeError, leg.legfit, [1], [[[1]]], 0) + assert_raises(TypeError, leg.legfit, [1, 2], [1], 0) + assert_raises(TypeError, leg.legfit, [1], [1, 2], 0) + assert_raises(TypeError, leg.legfit, [1], [1], 0, w=[[1]]) + assert_raises(TypeError, leg.legfit, [1], [1], 0, w=[1, 1]) + assert_raises(ValueError, leg.legfit, [1], [1], [-1,]) + assert_raises(ValueError, leg.legfit, [1], [1], [2, -1, 6]) + assert_raises(TypeError, leg.legfit, [1], [1], []) + + # Test fit + x = np.linspace(0, 2) + y = f(x) + # + coef3 = leg.legfit(x, y, 3) + assert_equal(len(coef3), 4) + assert_almost_equal(leg.legval(x, coef3), y) + coef3 = leg.legfit(x, y, [0, 1, 2, 3]) + assert_equal(len(coef3), 4) + assert_almost_equal(leg.legval(x, coef3), y) + # + coef4 = leg.legfit(x, y, 4) + assert_equal(len(coef4), 5) + assert_almost_equal(leg.legval(x, coef4), y) + coef4 = leg.legfit(x, y, [0, 1, 2, 3, 4]) + assert_equal(len(coef4), 5) + assert_almost_equal(leg.legval(x, coef4), y) + # check things still work if deg is not in strict increasing + coef4 = leg.legfit(x, y, [2, 3, 4, 1, 0]) + assert_equal(len(coef4), 5) + assert_almost_equal(leg.legval(x, coef4), y) + # + coef2d = leg.legfit(x, np.array([y, y]).T, 3) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + coef2d = leg.legfit(x, np.array([y, y]).T, [0, 1, 2, 3]) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + # test weighting + w = np.zeros_like(x) + yw = y.copy() + w[1::2] = 1 + y[0::2] = 0 + wcoef3 = leg.legfit(x, yw, 3, w=w) + assert_almost_equal(wcoef3, coef3) + wcoef3 = leg.legfit(x, yw, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef3, coef3) + # + wcoef2d = leg.legfit(x, np.array([yw, yw]).T, 3, w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + wcoef2d = leg.legfit(x, np.array([yw, yw]).T, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + # test scaling with complex values x points whose square + # is zero when summed. + x = [1, 1j, -1, -1j] + assert_almost_equal(leg.legfit(x, x, 1), [0, 1]) + assert_almost_equal(leg.legfit(x, x, [0, 1]), [0, 1]) + # test fitting only even Legendre polynomials + x = np.linspace(-1, 1) + y = f2(x) + coef1 = leg.legfit(x, y, 4) + assert_almost_equal(leg.legval(x, coef1), y) + coef2 = leg.legfit(x, y, [0, 2, 4]) + assert_almost_equal(leg.legval(x, coef2), y) + assert_almost_equal(coef1, coef2) + + +class TestCompanion: + + def test_raises(self): + assert_raises(ValueError, leg.legcompanion, []) + assert_raises(ValueError, leg.legcompanion, [1]) + + def test_dimensions(self): + for i in range(1, 5): + coef = [0] * i + [1] + assert_(leg.legcompanion(coef).shape == (i, i)) + + def test_linear_root(self): + assert_(leg.legcompanion([1, 2])[0, 0] == -.5) + + +class TestGauss: + + def test_100(self): + x, w = leg.leggauss(100) + + # test orthogonality. Note that the results need to be normalized, + # otherwise the huge values that can arise from fast growing + # functions like Laguerre can be very confusing. + v = leg.legvander(x, 99) + vv = np.dot(v.T * w, v) + vd = 1 / np.sqrt(vv.diagonal()) + vv = vd[:, None] * vv * vd + assert_almost_equal(vv, np.eye(100)) + + # check that the integral of 1 is correct + tgt = 2.0 + assert_almost_equal(w.sum(), tgt) + + +class TestMisc: + + def test_legfromroots(self): + res = leg.legfromroots([]) + assert_almost_equal(trim(res), [1]) + for i in range(1, 5): + roots = np.cos(np.linspace(-np.pi, 0, 2 * i + 1)[1::2]) + pol = leg.legfromroots(roots) + res = leg.legval(roots, pol) + tgt = 0 + assert_(len(pol) == i + 1) + assert_almost_equal(leg.leg2poly(pol)[-1], 1) + assert_almost_equal(res, tgt) + + def test_legroots(self): + assert_almost_equal(leg.legroots([1]), []) + assert_almost_equal(leg.legroots([1, 2]), [-.5]) + for i in range(2, 5): + tgt = np.linspace(-1, 1, i) + res = leg.legroots(leg.legfromroots(tgt)) + assert_almost_equal(trim(res), trim(tgt)) + + def test_legtrim(self): + coef = [2, -1, 1, 0] + + # Test exceptions + assert_raises(ValueError, leg.legtrim, coef, -1) + + # Test results + assert_equal(leg.legtrim(coef), coef[:-1]) + assert_equal(leg.legtrim(coef, 1), coef[:-3]) + assert_equal(leg.legtrim(coef, 2), [0]) + + def test_legline(self): + assert_equal(leg.legline(3, 4), [3, 4]) + + def test_legline_zeroscl(self): + assert_equal(leg.legline(3, 0), [3]) + + def test_leg2poly(self): + for i in range(10): + assert_almost_equal(leg.leg2poly([0] * i + [1]), Llist[i]) + + def test_poly2leg(self): + for i in range(10): + assert_almost_equal(leg.poly2leg(Llist[i]), [0] * i + [1]) + + def test_weight(self): + x = np.linspace(-1, 1, 11) + tgt = 1. + res = leg.legweight(x) + assert_almost_equal(res, tgt) diff --git a/.venv/lib/python3.12/site-packages/numpy/polynomial/tests/test_polynomial.py b/.venv/lib/python3.12/site-packages/numpy/polynomial/tests/test_polynomial.py new file mode 100644 index 0000000..27513fd --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/polynomial/tests/test_polynomial.py @@ -0,0 +1,669 @@ +"""Tests for polynomial module. + +""" +import pickle +from copy import deepcopy +from fractions import Fraction +from functools import reduce + +import numpy as np +import numpy.polynomial.polynomial as poly +import numpy.polynomial.polyutils as pu +from numpy.testing import ( + assert_, + assert_almost_equal, + assert_array_equal, + assert_equal, + assert_raises, + assert_raises_regex, + assert_warns, +) + + +def trim(x): + return poly.polytrim(x, tol=1e-6) + + +T0 = [1] +T1 = [0, 1] +T2 = [-1, 0, 2] +T3 = [0, -3, 0, 4] +T4 = [1, 0, -8, 0, 8] +T5 = [0, 5, 0, -20, 0, 16] +T6 = [-1, 0, 18, 0, -48, 0, 32] +T7 = [0, -7, 0, 56, 0, -112, 0, 64] +T8 = [1, 0, -32, 0, 160, 0, -256, 0, 128] +T9 = [0, 9, 0, -120, 0, 432, 0, -576, 0, 256] + +Tlist = [T0, T1, T2, T3, T4, T5, T6, T7, T8, T9] + + +class TestConstants: + + def test_polydomain(self): + assert_equal(poly.polydomain, [-1, 1]) + + def test_polyzero(self): + assert_equal(poly.polyzero, [0]) + + def test_polyone(self): + assert_equal(poly.polyone, [1]) + + def test_polyx(self): + assert_equal(poly.polyx, [0, 1]) + + def test_copy(self): + x = poly.Polynomial([1, 2, 3]) + y = deepcopy(x) + assert_equal(x, y) + + def test_pickle(self): + x = poly.Polynomial([1, 2, 3]) + y = pickle.loads(pickle.dumps(x)) + assert_equal(x, y) + +class TestArithmetic: + + def test_polyadd(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] += 1 + res = poly.polyadd([0] * i + [1], [0] * j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_polysub(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] -= 1 + res = poly.polysub([0] * i + [1], [0] * j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_polymulx(self): + assert_equal(poly.polymulx([0]), [0]) + assert_equal(poly.polymulx([1]), [0, 1]) + for i in range(1, 5): + ser = [0] * i + [1] + tgt = [0] * (i + 1) + [1] + assert_equal(poly.polymulx(ser), tgt) + + def test_polymul(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + tgt = np.zeros(i + j + 1) + tgt[i + j] += 1 + res = poly.polymul([0] * i + [1], [0] * j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_polydiv(self): + # check zero division + assert_raises(ZeroDivisionError, poly.polydiv, [1], [0]) + + # check scalar division + quo, rem = poly.polydiv([2], [2]) + assert_equal((quo, rem), (1, 0)) + quo, rem = poly.polydiv([2, 2], [2]) + assert_equal((quo, rem), ((1, 1), 0)) + + # check rest. + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + ci = [0] * i + [1, 2] + cj = [0] * j + [1, 2] + tgt = poly.polyadd(ci, cj) + quo, rem = poly.polydiv(tgt, ci) + res = poly.polyadd(poly.polymul(quo, ci), rem) + assert_equal(res, tgt, err_msg=msg) + + def test_polypow(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + c = np.arange(i + 1) + tgt = reduce(poly.polymul, [c] * j, np.array([1])) + res = poly.polypow(c, j) + assert_equal(trim(res), trim(tgt), err_msg=msg) + +class TestFraction: + + def test_Fraction(self): + # assert we can use Polynomials with coefficients of object dtype + f = Fraction(2, 3) + one = Fraction(1, 1) + zero = Fraction(0, 1) + p = poly.Polynomial([f, f], domain=[zero, one], window=[zero, one]) + + x = 2 * p + p ** 2 + assert_equal(x.coef, np.array([Fraction(16, 9), Fraction(20, 9), + Fraction(4, 9)], dtype=object)) + assert_equal(p.domain, [zero, one]) + assert_equal(p.coef.dtype, np.dtypes.ObjectDType()) + assert_(isinstance(p(f), Fraction)) + assert_equal(p(f), Fraction(10, 9)) + p_deriv = poly.Polynomial([Fraction(2, 3)], domain=[zero, one], + window=[zero, one]) + assert_equal(p.deriv(), p_deriv) + +class TestEvaluation: + # coefficients of 1 + 2*x + 3*x**2 + c1d = np.array([1., 2., 3.]) + c2d = np.einsum('i,j->ij', c1d, c1d) + c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d) + + # some random values in [-1, 1) + x = np.random.random((3, 5)) * 2 - 1 + y = poly.polyval(x, [1., 2., 3.]) + + def test_polyval(self): + # check empty input + assert_equal(poly.polyval([], [1]).size, 0) + + # check normal input) + x = np.linspace(-1, 1) + y = [x**i for i in range(5)] + for i in range(5): + tgt = y[i] + res = poly.polyval(x, [0] * i + [1]) + assert_almost_equal(res, tgt) + tgt = x * (x**2 - 1) + res = poly.polyval(x, [0, -1, 0, 1]) + assert_almost_equal(res, tgt) + + # check that shape is preserved + for i in range(3): + dims = [2] * i + x = np.zeros(dims) + assert_equal(poly.polyval(x, [1]).shape, dims) + assert_equal(poly.polyval(x, [1, 0]).shape, dims) + assert_equal(poly.polyval(x, [1, 0, 0]).shape, dims) + + # check masked arrays are processed correctly + mask = [False, True, False] + mx = np.ma.array([1, 2, 3], mask=mask) + res = np.polyval([7, 5, 3], mx) + assert_array_equal(res.mask, mask) + + # check subtypes of ndarray are preserved + class C(np.ndarray): + pass + + cx = np.array([1, 2, 3]).view(C) + assert_equal(type(np.polyval([2, 3, 4], cx)), C) + + def test_polyvalfromroots(self): + # check exception for broadcasting x values over root array with + # too few dimensions + assert_raises(ValueError, poly.polyvalfromroots, + [1], [1], tensor=False) + + # check empty input + assert_equal(poly.polyvalfromroots([], [1]).size, 0) + assert_(poly.polyvalfromroots([], [1]).shape == (0,)) + + # check empty input + multidimensional roots + assert_equal(poly.polyvalfromroots([], [[1] * 5]).size, 0) + assert_(poly.polyvalfromroots([], [[1] * 5]).shape == (5, 0)) + + # check scalar input + assert_equal(poly.polyvalfromroots(1, 1), 0) + assert_(poly.polyvalfromroots(1, np.ones((3, 3))).shape == (3,)) + + # check normal input) + x = np.linspace(-1, 1) + y = [x**i for i in range(5)] + for i in range(1, 5): + tgt = y[i] + res = poly.polyvalfromroots(x, [0] * i) + assert_almost_equal(res, tgt) + tgt = x * (x - 1) * (x + 1) + res = poly.polyvalfromroots(x, [-1, 0, 1]) + assert_almost_equal(res, tgt) + + # check that shape is preserved + for i in range(3): + dims = [2] * i + x = np.zeros(dims) + assert_equal(poly.polyvalfromroots(x, [1]).shape, dims) + assert_equal(poly.polyvalfromroots(x, [1, 0]).shape, dims) + assert_equal(poly.polyvalfromroots(x, [1, 0, 0]).shape, dims) + + # check compatibility with factorization + ptest = [15, 2, -16, -2, 1] + r = poly.polyroots(ptest) + x = np.linspace(-1, 1) + assert_almost_equal(poly.polyval(x, ptest), + poly.polyvalfromroots(x, r)) + + # check multidimensional arrays of roots and values + # check tensor=False + rshape = (3, 5) + x = np.arange(-3, 2) + r = np.random.randint(-5, 5, size=rshape) + res = poly.polyvalfromroots(x, r, tensor=False) + tgt = np.empty(r.shape[1:]) + for ii in range(tgt.size): + tgt[ii] = poly.polyvalfromroots(x[ii], r[:, ii]) + assert_equal(res, tgt) + + # check tensor=True + x = np.vstack([x, 2 * x]) + res = poly.polyvalfromroots(x, r, tensor=True) + tgt = np.empty(r.shape[1:] + x.shape) + for ii in range(r.shape[1]): + for jj in range(x.shape[0]): + tgt[ii, jj, :] = poly.polyvalfromroots(x[jj], r[:, ii]) + assert_equal(res, tgt) + + def test_polyval2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + # test exceptions + assert_raises_regex(ValueError, 'incompatible', + poly.polyval2d, x1, x2[:2], self.c2d) + + # test values + tgt = y1 * y2 + res = poly.polyval2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + # test shape + z = np.ones((2, 3)) + res = poly.polyval2d(z, z, self.c2d) + assert_(res.shape == (2, 3)) + + def test_polyval3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + # test exceptions + assert_raises_regex(ValueError, 'incompatible', + poly.polyval3d, x1, x2, x3[:2], self.c3d) + + # test values + tgt = y1 * y2 * y3 + res = poly.polyval3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + # test shape + z = np.ones((2, 3)) + res = poly.polyval3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3)) + + def test_polygrid2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + # test values + tgt = np.einsum('i,j->ij', y1, y2) + res = poly.polygrid2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + # test shape + z = np.ones((2, 3)) + res = poly.polygrid2d(z, z, self.c2d) + assert_(res.shape == (2, 3) * 2) + + def test_polygrid3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + # test values + tgt = np.einsum('i,j,k->ijk', y1, y2, y3) + res = poly.polygrid3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + # test shape + z = np.ones((2, 3)) + res = poly.polygrid3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3) * 3) + + +class TestIntegral: + + def test_polyint(self): + # check exceptions + assert_raises(TypeError, poly.polyint, [0], .5) + assert_raises(ValueError, poly.polyint, [0], -1) + assert_raises(ValueError, poly.polyint, [0], 1, [0, 0]) + assert_raises(ValueError, poly.polyint, [0], lbnd=[0]) + assert_raises(ValueError, poly.polyint, [0], scl=[0]) + assert_raises(TypeError, poly.polyint, [0], axis=.5) + assert_raises(TypeError, poly.polyint, [1, 1], 1.) + + # test integration of zero polynomial + for i in range(2, 5): + k = [0] * (i - 2) + [1] + res = poly.polyint([0], m=i, k=k) + assert_almost_equal(res, [0, 1]) + + # check single integration with integration constant + for i in range(5): + scl = i + 1 + pol = [0] * i + [1] + tgt = [i] + [0] * i + [1 / scl] + res = poly.polyint(pol, m=1, k=[i]) + assert_almost_equal(trim(res), trim(tgt)) + + # check single integration with integration constant and lbnd + for i in range(5): + scl = i + 1 + pol = [0] * i + [1] + res = poly.polyint(pol, m=1, k=[i], lbnd=-1) + assert_almost_equal(poly.polyval(-1, res), i) + + # check single integration with integration constant and scaling + for i in range(5): + scl = i + 1 + pol = [0] * i + [1] + tgt = [i] + [0] * i + [2 / scl] + res = poly.polyint(pol, m=1, k=[i], scl=2) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with default k + for i in range(5): + for j in range(2, 5): + pol = [0] * i + [1] + tgt = pol[:] + for k in range(j): + tgt = poly.polyint(tgt, m=1) + res = poly.polyint(pol, m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with defined k + for i in range(5): + for j in range(2, 5): + pol = [0] * i + [1] + tgt = pol[:] + for k in range(j): + tgt = poly.polyint(tgt, m=1, k=[k]) + res = poly.polyint(pol, m=j, k=list(range(j))) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with lbnd + for i in range(5): + for j in range(2, 5): + pol = [0] * i + [1] + tgt = pol[:] + for k in range(j): + tgt = poly.polyint(tgt, m=1, k=[k], lbnd=-1) + res = poly.polyint(pol, m=j, k=list(range(j)), lbnd=-1) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with scaling + for i in range(5): + for j in range(2, 5): + pol = [0] * i + [1] + tgt = pol[:] + for k in range(j): + tgt = poly.polyint(tgt, m=1, k=[k], scl=2) + res = poly.polyint(pol, m=j, k=list(range(j)), scl=2) + assert_almost_equal(trim(res), trim(tgt)) + + def test_polyint_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([poly.polyint(c) for c in c2d.T]).T + res = poly.polyint(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([poly.polyint(c) for c in c2d]) + res = poly.polyint(c2d, axis=1) + assert_almost_equal(res, tgt) + + tgt = np.vstack([poly.polyint(c, k=3) for c in c2d]) + res = poly.polyint(c2d, k=3, axis=1) + assert_almost_equal(res, tgt) + + +class TestDerivative: + + def test_polyder(self): + # check exceptions + assert_raises(TypeError, poly.polyder, [0], .5) + assert_raises(ValueError, poly.polyder, [0], -1) + + # check that zeroth derivative does nothing + for i in range(5): + tgt = [0] * i + [1] + res = poly.polyder(tgt, m=0) + assert_equal(trim(res), trim(tgt)) + + # check that derivation is the inverse of integration + for i in range(5): + for j in range(2, 5): + tgt = [0] * i + [1] + res = poly.polyder(poly.polyint(tgt, m=j), m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check derivation with scaling + for i in range(5): + for j in range(2, 5): + tgt = [0] * i + [1] + res = poly.polyder(poly.polyint(tgt, m=j, scl=2), m=j, scl=.5) + assert_almost_equal(trim(res), trim(tgt)) + + def test_polyder_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([poly.polyder(c) for c in c2d.T]).T + res = poly.polyder(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([poly.polyder(c) for c in c2d]) + res = poly.polyder(c2d, axis=1) + assert_almost_equal(res, tgt) + + +class TestVander: + # some random values in [-1, 1) + x = np.random.random((3, 5)) * 2 - 1 + + def test_polyvander(self): + # check for 1d x + x = np.arange(3) + v = poly.polyvander(x, 3) + assert_(v.shape == (3, 4)) + for i in range(4): + coef = [0] * i + [1] + assert_almost_equal(v[..., i], poly.polyval(x, coef)) + + # check for 2d x + x = np.array([[1, 2], [3, 4], [5, 6]]) + v = poly.polyvander(x, 3) + assert_(v.shape == (3, 2, 4)) + for i in range(4): + coef = [0] * i + [1] + assert_almost_equal(v[..., i], poly.polyval(x, coef)) + + def test_polyvander2d(self): + # also tests polyval2d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3)) + van = poly.polyvander2d(x1, x2, [1, 2]) + tgt = poly.polyval2d(x1, x2, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = poly.polyvander2d([x1], [x2], [1, 2]) + assert_(van.shape == (1, 5, 6)) + + def test_polyvander3d(self): + # also tests polyval3d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3, 4)) + van = poly.polyvander3d(x1, x2, x3, [1, 2, 3]) + tgt = poly.polyval3d(x1, x2, x3, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = poly.polyvander3d([x1], [x2], [x3], [1, 2, 3]) + assert_(van.shape == (1, 5, 24)) + + def test_polyvandernegdeg(self): + x = np.arange(3) + assert_raises(ValueError, poly.polyvander, x, -1) + + +class TestCompanion: + + def test_raises(self): + assert_raises(ValueError, poly.polycompanion, []) + assert_raises(ValueError, poly.polycompanion, [1]) + + def test_dimensions(self): + for i in range(1, 5): + coef = [0] * i + [1] + assert_(poly.polycompanion(coef).shape == (i, i)) + + def test_linear_root(self): + assert_(poly.polycompanion([1, 2])[0, 0] == -.5) + + +class TestMisc: + + def test_polyfromroots(self): + res = poly.polyfromroots([]) + assert_almost_equal(trim(res), [1]) + for i in range(1, 5): + roots = np.cos(np.linspace(-np.pi, 0, 2 * i + 1)[1::2]) + tgt = Tlist[i] + res = poly.polyfromroots(roots) * 2**(i - 1) + assert_almost_equal(trim(res), trim(tgt)) + + def test_polyroots(self): + assert_almost_equal(poly.polyroots([1]), []) + assert_almost_equal(poly.polyroots([1, 2]), [-.5]) + for i in range(2, 5): + tgt = np.linspace(-1, 1, i) + res = poly.polyroots(poly.polyfromroots(tgt)) + assert_almost_equal(trim(res), trim(tgt)) + + # Testing for larger root values + for i in np.logspace(10, 25, num=1000, base=10): + tgt = np.array([-1, 1, i]) + res = poly.polyroots(poly.polyfromroots(tgt)) + # Adapting the expected precision according to the root value, + # to take into account numerical calculation error. + assert_almost_equal(res, tgt, 15 - int(np.log10(i))) + for i in np.logspace(10, 25, num=1000, base=10): + tgt = np.array([-1, 1.01, i]) + res = poly.polyroots(poly.polyfromroots(tgt)) + # Adapting the expected precision according to the root value, + # to take into account numerical calculation error. + assert_almost_equal(res, tgt, 14 - int(np.log10(i))) + + def test_polyfit(self): + def f(x): + return x * (x - 1) * (x - 2) + + def f2(x): + return x**4 + x**2 + 1 + + # Test exceptions + assert_raises(ValueError, poly.polyfit, [1], [1], -1) + assert_raises(TypeError, poly.polyfit, [[1]], [1], 0) + assert_raises(TypeError, poly.polyfit, [], [1], 0) + assert_raises(TypeError, poly.polyfit, [1], [[[1]]], 0) + assert_raises(TypeError, poly.polyfit, [1, 2], [1], 0) + assert_raises(TypeError, poly.polyfit, [1], [1, 2], 0) + assert_raises(TypeError, poly.polyfit, [1], [1], 0, w=[[1]]) + assert_raises(TypeError, poly.polyfit, [1], [1], 0, w=[1, 1]) + assert_raises(ValueError, poly.polyfit, [1], [1], [-1,]) + assert_raises(ValueError, poly.polyfit, [1], [1], [2, -1, 6]) + assert_raises(TypeError, poly.polyfit, [1], [1], []) + + # Test fit + x = np.linspace(0, 2) + y = f(x) + # + coef3 = poly.polyfit(x, y, 3) + assert_equal(len(coef3), 4) + assert_almost_equal(poly.polyval(x, coef3), y) + coef3 = poly.polyfit(x, y, [0, 1, 2, 3]) + assert_equal(len(coef3), 4) + assert_almost_equal(poly.polyval(x, coef3), y) + # + coef4 = poly.polyfit(x, y, 4) + assert_equal(len(coef4), 5) + assert_almost_equal(poly.polyval(x, coef4), y) + coef4 = poly.polyfit(x, y, [0, 1, 2, 3, 4]) + assert_equal(len(coef4), 5) + assert_almost_equal(poly.polyval(x, coef4), y) + # + coef2d = poly.polyfit(x, np.array([y, y]).T, 3) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + coef2d = poly.polyfit(x, np.array([y, y]).T, [0, 1, 2, 3]) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + # test weighting + w = np.zeros_like(x) + yw = y.copy() + w[1::2] = 1 + yw[0::2] = 0 + wcoef3 = poly.polyfit(x, yw, 3, w=w) + assert_almost_equal(wcoef3, coef3) + wcoef3 = poly.polyfit(x, yw, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef3, coef3) + # + wcoef2d = poly.polyfit(x, np.array([yw, yw]).T, 3, w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + wcoef2d = poly.polyfit(x, np.array([yw, yw]).T, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + # test scaling with complex values x points whose square + # is zero when summed. + x = [1, 1j, -1, -1j] + assert_almost_equal(poly.polyfit(x, x, 1), [0, 1]) + assert_almost_equal(poly.polyfit(x, x, [0, 1]), [0, 1]) + # test fitting only even Polyendre polynomials + x = np.linspace(-1, 1) + y = f2(x) + coef1 = poly.polyfit(x, y, 4) + assert_almost_equal(poly.polyval(x, coef1), y) + coef2 = poly.polyfit(x, y, [0, 2, 4]) + assert_almost_equal(poly.polyval(x, coef2), y) + assert_almost_equal(coef1, coef2) + + def test_polytrim(self): + coef = [2, -1, 1, 0] + + # Test exceptions + assert_raises(ValueError, poly.polytrim, coef, -1) + + # Test results + assert_equal(poly.polytrim(coef), coef[:-1]) + assert_equal(poly.polytrim(coef, 1), coef[:-3]) + assert_equal(poly.polytrim(coef, 2), [0]) + + def test_polyline(self): + assert_equal(poly.polyline(3, 4), [3, 4]) + + def test_polyline_zero(self): + assert_equal(poly.polyline(3, 0), [3]) + + def test_fit_degenerate_domain(self): + p = poly.Polynomial.fit([1], [2], deg=0) + assert_equal(p.coef, [2.]) + p = poly.Polynomial.fit([1, 1], [2, 2.1], deg=0) + assert_almost_equal(p.coef, [2.05]) + with assert_warns(pu.RankWarning): + p = poly.Polynomial.fit([1, 1], [2, 2.1], deg=1) + + def test_result_type(self): + w = np.array([-1, 1], dtype=np.float32) + p = np.polynomial.Polynomial(w, domain=w, window=w) + v = p(2) + assert_equal(v.dtype, np.float32) + + arr = np.polydiv(1, np.float32(1)) + assert_equal(arr[0].dtype, np.float64) diff --git a/.venv/lib/python3.12/site-packages/numpy/polynomial/tests/test_polyutils.py b/.venv/lib/python3.12/site-packages/numpy/polynomial/tests/test_polyutils.py new file mode 100644 index 0000000..96e88b9 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/polynomial/tests/test_polyutils.py @@ -0,0 +1,128 @@ +"""Tests for polyutils module. + +""" +import numpy as np +import numpy.polynomial.polyutils as pu +from numpy.testing import ( + assert_, + assert_almost_equal, + assert_equal, + assert_raises, +) + + +class TestMisc: + + def test_trimseq(self): + tgt = [1] + for num_trailing_zeros in range(5): + res = pu.trimseq([1] + [0] * num_trailing_zeros) + assert_equal(res, tgt) + + def test_trimseq_empty_input(self): + for empty_seq in [[], np.array([], dtype=np.int32)]: + assert_equal(pu.trimseq(empty_seq), empty_seq) + + def test_as_series(self): + # check exceptions + assert_raises(ValueError, pu.as_series, [[]]) + assert_raises(ValueError, pu.as_series, [[[1, 2]]]) + assert_raises(ValueError, pu.as_series, [[1], ['a']]) + # check common types + types = ['i', 'd', 'O'] + for i in range(len(types)): + for j in range(i): + ci = np.ones(1, types[i]) + cj = np.ones(1, types[j]) + [resi, resj] = pu.as_series([ci, cj]) + assert_(resi.dtype.char == resj.dtype.char) + assert_(resj.dtype.char == types[i]) + + def test_trimcoef(self): + coef = [2, -1, 1, 0] + # Test exceptions + assert_raises(ValueError, pu.trimcoef, coef, -1) + # Test results + assert_equal(pu.trimcoef(coef), coef[:-1]) + assert_equal(pu.trimcoef(coef, 1), coef[:-3]) + assert_equal(pu.trimcoef(coef, 2), [0]) + + def test_vander_nd_exception(self): + # n_dims != len(points) + assert_raises(ValueError, pu._vander_nd, (), (1, 2, 3), [90]) + # n_dims != len(degrees) + assert_raises(ValueError, pu._vander_nd, (), (), [90.65]) + # n_dims == 0 + assert_raises(ValueError, pu._vander_nd, (), (), []) + + def test_div_zerodiv(self): + # c2[-1] == 0 + assert_raises(ZeroDivisionError, pu._div, pu._div, (1, 2, 3), [0]) + + def test_pow_too_large(self): + # power > maxpower + assert_raises(ValueError, pu._pow, (), [1, 2, 3], 5, 4) + +class TestDomain: + + def test_getdomain(self): + # test for real values + x = [1, 10, 3, -1] + tgt = [-1, 10] + res = pu.getdomain(x) + assert_almost_equal(res, tgt) + + # test for complex values + x = [1 + 1j, 1 - 1j, 0, 2] + tgt = [-1j, 2 + 1j] + res = pu.getdomain(x) + assert_almost_equal(res, tgt) + + def test_mapdomain(self): + # test for real values + dom1 = [0, 4] + dom2 = [1, 3] + tgt = dom2 + res = pu.mapdomain(dom1, dom1, dom2) + assert_almost_equal(res, tgt) + + # test for complex values + dom1 = [0 - 1j, 2 + 1j] + dom2 = [-2, 2] + tgt = dom2 + x = dom1 + res = pu.mapdomain(x, dom1, dom2) + assert_almost_equal(res, tgt) + + # test for multidimensional arrays + dom1 = [0, 4] + dom2 = [1, 3] + tgt = np.array([dom2, dom2]) + x = np.array([dom1, dom1]) + res = pu.mapdomain(x, dom1, dom2) + assert_almost_equal(res, tgt) + + # test that subtypes are preserved. + class MyNDArray(np.ndarray): + pass + + dom1 = [0, 4] + dom2 = [1, 3] + x = np.array([dom1, dom1]).view(MyNDArray) + res = pu.mapdomain(x, dom1, dom2) + assert_(isinstance(res, MyNDArray)) + + def test_mapparms(self): + # test for real values + dom1 = [0, 4] + dom2 = [1, 3] + tgt = [1, .5] + res = pu. mapparms(dom1, dom2) + assert_almost_equal(res, tgt) + + # test for complex values + dom1 = [0 - 1j, 2 + 1j] + dom2 = [-2, 2] + tgt = [-1 + 1j, 1 - 1j] + res = pu.mapparms(dom1, dom2) + assert_almost_equal(res, tgt) diff --git a/.venv/lib/python3.12/site-packages/numpy/polynomial/tests/test_printing.py b/.venv/lib/python3.12/site-packages/numpy/polynomial/tests/test_printing.py new file mode 100644 index 0000000..d3735e3 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/polynomial/tests/test_printing.py @@ -0,0 +1,555 @@ +from decimal import Decimal + +# For testing polynomial printing with object arrays +from fractions import Fraction +from math import inf, nan + +import pytest + +import numpy.polynomial as poly +from numpy._core import arange, array, printoptions +from numpy.testing import assert_, assert_equal + + +class TestStrUnicodeSuperSubscripts: + + @pytest.fixture(scope='class', autouse=True) + def use_unicode(self): + poly.set_default_printstyle('unicode') + + @pytest.mark.parametrize(('inp', 'tgt'), ( + ([1, 2, 3], "1.0 + 2.0·x + 3.0·x²"), + ([-1, 0, 3, -1], "-1.0 + 0.0·x + 3.0·x² - 1.0·x³"), + (arange(12), ("0.0 + 1.0·x + 2.0·x² + 3.0·x³ + 4.0·x⁴ + 5.0·x⁵ + " + "6.0·x⁶ + 7.0·x⁷ +\n8.0·x⁸ + 9.0·x⁹ + 10.0·x¹⁰ + " + "11.0·x¹¹")), + )) + def test_polynomial_str(self, inp, tgt): + p = poly.Polynomial(inp) + res = str(p) + assert_equal(res, tgt) + + @pytest.mark.parametrize(('inp', 'tgt'), ( + ([1, 2, 3], "1.0 + 2.0·T₁(x) + 3.0·T₂(x)"), + ([-1, 0, 3, -1], "-1.0 + 0.0·T₁(x) + 3.0·T₂(x) - 1.0·T₃(x)"), + (arange(12), ("0.0 + 1.0·T₁(x) + 2.0·T₂(x) + 3.0·T₃(x) + 4.0·T₄(x) + " + "5.0·T₅(x) +\n6.0·T₆(x) + 7.0·T₇(x) + 8.0·T₈(x) + " + "9.0·T₉(x) + 10.0·T₁₀(x) + 11.0·T₁₁(x)")), + )) + def test_chebyshev_str(self, inp, tgt): + res = str(poly.Chebyshev(inp)) + assert_equal(res, tgt) + + @pytest.mark.parametrize(('inp', 'tgt'), ( + ([1, 2, 3], "1.0 + 2.0·P₁(x) + 3.0·P₂(x)"), + ([-1, 0, 3, -1], "-1.0 + 0.0·P₁(x) + 3.0·P₂(x) - 1.0·P₃(x)"), + (arange(12), ("0.0 + 1.0·P₁(x) + 2.0·P₂(x) + 3.0·P₃(x) + 4.0·P₄(x) + " + "5.0·P₅(x) +\n6.0·P₆(x) + 7.0·P₇(x) + 8.0·P₈(x) + " + "9.0·P₉(x) + 10.0·P₁₀(x) + 11.0·P₁₁(x)")), + )) + def test_legendre_str(self, inp, tgt): + res = str(poly.Legendre(inp)) + assert_equal(res, tgt) + + @pytest.mark.parametrize(('inp', 'tgt'), ( + ([1, 2, 3], "1.0 + 2.0·H₁(x) + 3.0·H₂(x)"), + ([-1, 0, 3, -1], "-1.0 + 0.0·H₁(x) + 3.0·H₂(x) - 1.0·H₃(x)"), + (arange(12), ("0.0 + 1.0·H₁(x) + 2.0·H₂(x) + 3.0·H₃(x) + 4.0·H₄(x) + " + "5.0·H₅(x) +\n6.0·H₆(x) + 7.0·H₇(x) + 8.0·H₈(x) + " + "9.0·H₉(x) + 10.0·H₁₀(x) + 11.0·H₁₁(x)")), + )) + def test_hermite_str(self, inp, tgt): + res = str(poly.Hermite(inp)) + assert_equal(res, tgt) + + @pytest.mark.parametrize(('inp', 'tgt'), ( + ([1, 2, 3], "1.0 + 2.0·He₁(x) + 3.0·He₂(x)"), + ([-1, 0, 3, -1], "-1.0 + 0.0·He₁(x) + 3.0·He₂(x) - 1.0·He₃(x)"), + (arange(12), ("0.0 + 1.0·He₁(x) + 2.0·He₂(x) + 3.0·He₃(x) + " + "4.0·He₄(x) + 5.0·He₅(x) +\n6.0·He₆(x) + 7.0·He₇(x) + " + "8.0·He₈(x) + 9.0·He₉(x) + 10.0·He₁₀(x) +\n" + "11.0·He₁₁(x)")), + )) + def test_hermiteE_str(self, inp, tgt): + res = str(poly.HermiteE(inp)) + assert_equal(res, tgt) + + @pytest.mark.parametrize(('inp', 'tgt'), ( + ([1, 2, 3], "1.0 + 2.0·L₁(x) + 3.0·L₂(x)"), + ([-1, 0, 3, -1], "-1.0 + 0.0·L₁(x) + 3.0·L₂(x) - 1.0·L₃(x)"), + (arange(12), ("0.0 + 1.0·L₁(x) + 2.0·L₂(x) + 3.0·L₃(x) + 4.0·L₄(x) + " + "5.0·L₅(x) +\n6.0·L₆(x) + 7.0·L₇(x) + 8.0·L₈(x) + " + "9.0·L₉(x) + 10.0·L₁₀(x) + 11.0·L₁₁(x)")), + )) + def test_laguerre_str(self, inp, tgt): + res = str(poly.Laguerre(inp)) + assert_equal(res, tgt) + + def test_polynomial_str_domains(self): + res = str(poly.Polynomial([0, 1])) + tgt = '0.0 + 1.0·x' + assert_equal(res, tgt) + + res = str(poly.Polynomial([0, 1], domain=[1, 2])) + tgt = '0.0 + 1.0·(-3.0 + 2.0x)' + assert_equal(res, tgt) + +class TestStrAscii: + + @pytest.fixture(scope='class', autouse=True) + def use_ascii(self): + poly.set_default_printstyle('ascii') + + @pytest.mark.parametrize(('inp', 'tgt'), ( + ([1, 2, 3], "1.0 + 2.0 x + 3.0 x**2"), + ([-1, 0, 3, -1], "-1.0 + 0.0 x + 3.0 x**2 - 1.0 x**3"), + (arange(12), ("0.0 + 1.0 x + 2.0 x**2 + 3.0 x**3 + 4.0 x**4 + " + "5.0 x**5 + 6.0 x**6 +\n7.0 x**7 + 8.0 x**8 + " + "9.0 x**9 + 10.0 x**10 + 11.0 x**11")), + )) + def test_polynomial_str(self, inp, tgt): + res = str(poly.Polynomial(inp)) + assert_equal(res, tgt) + + @pytest.mark.parametrize(('inp', 'tgt'), ( + ([1, 2, 3], "1.0 + 2.0 T_1(x) + 3.0 T_2(x)"), + ([-1, 0, 3, -1], "-1.0 + 0.0 T_1(x) + 3.0 T_2(x) - 1.0 T_3(x)"), + (arange(12), ("0.0 + 1.0 T_1(x) + 2.0 T_2(x) + 3.0 T_3(x) + " + "4.0 T_4(x) + 5.0 T_5(x) +\n6.0 T_6(x) + 7.0 T_7(x) + " + "8.0 T_8(x) + 9.0 T_9(x) + 10.0 T_10(x) +\n" + "11.0 T_11(x)")), + )) + def test_chebyshev_str(self, inp, tgt): + res = str(poly.Chebyshev(inp)) + assert_equal(res, tgt) + + @pytest.mark.parametrize(('inp', 'tgt'), ( + ([1, 2, 3], "1.0 + 2.0 P_1(x) + 3.0 P_2(x)"), + ([-1, 0, 3, -1], "-1.0 + 0.0 P_1(x) + 3.0 P_2(x) - 1.0 P_3(x)"), + (arange(12), ("0.0 + 1.0 P_1(x) + 2.0 P_2(x) + 3.0 P_3(x) + " + "4.0 P_4(x) + 5.0 P_5(x) +\n6.0 P_6(x) + 7.0 P_7(x) + " + "8.0 P_8(x) + 9.0 P_9(x) + 10.0 P_10(x) +\n" + "11.0 P_11(x)")), + )) + def test_legendre_str(self, inp, tgt): + res = str(poly.Legendre(inp)) + assert_equal(res, tgt) + + @pytest.mark.parametrize(('inp', 'tgt'), ( + ([1, 2, 3], "1.0 + 2.0 H_1(x) + 3.0 H_2(x)"), + ([-1, 0, 3, -1], "-1.0 + 0.0 H_1(x) + 3.0 H_2(x) - 1.0 H_3(x)"), + (arange(12), ("0.0 + 1.0 H_1(x) + 2.0 H_2(x) + 3.0 H_3(x) + " + "4.0 H_4(x) + 5.0 H_5(x) +\n6.0 H_6(x) + 7.0 H_7(x) + " + "8.0 H_8(x) + 9.0 H_9(x) + 10.0 H_10(x) +\n" + "11.0 H_11(x)")), + )) + def test_hermite_str(self, inp, tgt): + res = str(poly.Hermite(inp)) + assert_equal(res, tgt) + + @pytest.mark.parametrize(('inp', 'tgt'), ( + ([1, 2, 3], "1.0 + 2.0 He_1(x) + 3.0 He_2(x)"), + ([-1, 0, 3, -1], "-1.0 + 0.0 He_1(x) + 3.0 He_2(x) - 1.0 He_3(x)"), + (arange(12), ("0.0 + 1.0 He_1(x) + 2.0 He_2(x) + 3.0 He_3(x) + " + "4.0 He_4(x) +\n5.0 He_5(x) + 6.0 He_6(x) + " + "7.0 He_7(x) + 8.0 He_8(x) + 9.0 He_9(x) +\n" + "10.0 He_10(x) + 11.0 He_11(x)")), + )) + def test_hermiteE_str(self, inp, tgt): + res = str(poly.HermiteE(inp)) + assert_equal(res, tgt) + + @pytest.mark.parametrize(('inp', 'tgt'), ( + ([1, 2, 3], "1.0 + 2.0 L_1(x) + 3.0 L_2(x)"), + ([-1, 0, 3, -1], "-1.0 + 0.0 L_1(x) + 3.0 L_2(x) - 1.0 L_3(x)"), + (arange(12), ("0.0 + 1.0 L_1(x) + 2.0 L_2(x) + 3.0 L_3(x) + " + "4.0 L_4(x) + 5.0 L_5(x) +\n6.0 L_6(x) + 7.0 L_7(x) + " + "8.0 L_8(x) + 9.0 L_9(x) + 10.0 L_10(x) +\n" + "11.0 L_11(x)")), + )) + def test_laguerre_str(self, inp, tgt): + res = str(poly.Laguerre(inp)) + assert_equal(res, tgt) + + def test_polynomial_str_domains(self): + res = str(poly.Polynomial([0, 1])) + tgt = '0.0 + 1.0 x' + assert_equal(res, tgt) + + res = str(poly.Polynomial([0, 1], domain=[1, 2])) + tgt = '0.0 + 1.0 (-3.0 + 2.0x)' + assert_equal(res, tgt) + +class TestLinebreaking: + + @pytest.fixture(scope='class', autouse=True) + def use_ascii(self): + poly.set_default_printstyle('ascii') + + def test_single_line_one_less(self): + # With 'ascii' style, len(str(p)) is default linewidth - 1 (i.e. 74) + p = poly.Polynomial([12345678, 12345678, 12345678, 12345678, 123]) + assert_equal(len(str(p)), 74) + assert_equal(str(p), ( + '12345678.0 + 12345678.0 x + 12345678.0 x**2 + ' + '12345678.0 x**3 + 123.0 x**4' + )) + + def test_num_chars_is_linewidth(self): + # len(str(p)) == default linewidth == 75 + p = poly.Polynomial([12345678, 12345678, 12345678, 12345678, 1234]) + assert_equal(len(str(p)), 75) + assert_equal(str(p), ( + '12345678.0 + 12345678.0 x + 12345678.0 x**2 + ' + '12345678.0 x**3 +\n1234.0 x**4' + )) + + def test_first_linebreak_multiline_one_less_than_linewidth(self): + # Multiline str where len(first_line) + len(next_term) == lw - 1 == 74 + p = poly.Polynomial( + [12345678, 12345678, 12345678, 12345678, 1, 12345678] + ) + assert_equal(len(str(p).split('\n')[0]), 74) + assert_equal(str(p), ( + '12345678.0 + 12345678.0 x + 12345678.0 x**2 + ' + '12345678.0 x**3 + 1.0 x**4 +\n12345678.0 x**5' + )) + + def test_first_linebreak_multiline_on_linewidth(self): + # First line is one character longer than previous test + p = poly.Polynomial( + [12345678, 12345678, 12345678, 12345678.12, 1, 12345678] + ) + assert_equal(str(p), ( + '12345678.0 + 12345678.0 x + 12345678.0 x**2 + ' + '12345678.12 x**3 +\n1.0 x**4 + 12345678.0 x**5' + )) + + @pytest.mark.parametrize(('lw', 'tgt'), ( + (75, ('0.0 + 10.0 x + 200.0 x**2 + 3000.0 x**3 + 40000.0 x**4 + ' + '500000.0 x**5 +\n600000.0 x**6 + 70000.0 x**7 + 8000.0 x**8 + ' + '900.0 x**9')), + (45, ('0.0 + 10.0 x + 200.0 x**2 + 3000.0 x**3 +\n40000.0 x**4 + ' + '500000.0 x**5 +\n600000.0 x**6 + 70000.0 x**7 + 8000.0 x**8 +\n' + '900.0 x**9')), + (132, ('0.0 + 10.0 x + 200.0 x**2 + 3000.0 x**3 + 40000.0 x**4 + ' + '500000.0 x**5 + 600000.0 x**6 + 70000.0 x**7 + 8000.0 x**8 + ' + '900.0 x**9')), + )) + def test_linewidth_printoption(self, lw, tgt): + p = poly.Polynomial( + [0, 10, 200, 3000, 40000, 500000, 600000, 70000, 8000, 900] + ) + with printoptions(linewidth=lw): + assert_equal(str(p), tgt) + for line in str(p).split('\n'): + assert_(len(line) < lw) + + +def test_set_default_printoptions(): + p = poly.Polynomial([1, 2, 3]) + c = poly.Chebyshev([1, 2, 3]) + poly.set_default_printstyle('ascii') + assert_equal(str(p), "1.0 + 2.0 x + 3.0 x**2") + assert_equal(str(c), "1.0 + 2.0 T_1(x) + 3.0 T_2(x)") + poly.set_default_printstyle('unicode') + assert_equal(str(p), "1.0 + 2.0·x + 3.0·x²") + assert_equal(str(c), "1.0 + 2.0·T₁(x) + 3.0·T₂(x)") + with pytest.raises(ValueError): + poly.set_default_printstyle('invalid_input') + + +def test_complex_coefficients(): + """Test both numpy and built-in complex.""" + coefs = [0 + 1j, 1 + 1j, -2 + 2j, 3 + 0j] + # numpy complex + p1 = poly.Polynomial(coefs) + # Python complex + p2 = poly.Polynomial(array(coefs, dtype=object)) + poly.set_default_printstyle('unicode') + assert_equal(str(p1), "1j + (1+1j)·x - (2-2j)·x² + (3+0j)·x³") + assert_equal(str(p2), "1j + (1+1j)·x + (-2+2j)·x² + (3+0j)·x³") + poly.set_default_printstyle('ascii') + assert_equal(str(p1), "1j + (1+1j) x - (2-2j) x**2 + (3+0j) x**3") + assert_equal(str(p2), "1j + (1+1j) x + (-2+2j) x**2 + (3+0j) x**3") + + +@pytest.mark.parametrize(('coefs', 'tgt'), ( + (array([Fraction(1, 2), Fraction(3, 4)], dtype=object), ( + "1/2 + 3/4·x" + )), + (array([1, 2, Fraction(5, 7)], dtype=object), ( + "1 + 2·x + 5/7·x²" + )), + (array([Decimal('1.00'), Decimal('2.2'), 3], dtype=object), ( + "1.00 + 2.2·x + 3·x²" + )), +)) +def test_numeric_object_coefficients(coefs, tgt): + p = poly.Polynomial(coefs) + poly.set_default_printstyle('unicode') + assert_equal(str(p), tgt) + + +@pytest.mark.parametrize(('coefs', 'tgt'), ( + (array([1, 2, 'f'], dtype=object), '1 + 2·x + f·x²'), + (array([1, 2, [3, 4]], dtype=object), '1 + 2·x + [3, 4]·x²'), +)) +def test_nonnumeric_object_coefficients(coefs, tgt): + """ + Test coef fallback for object arrays of non-numeric coefficients. + """ + p = poly.Polynomial(coefs) + poly.set_default_printstyle('unicode') + assert_equal(str(p), tgt) + + +class TestFormat: + def test_format_unicode(self): + poly.set_default_printstyle('ascii') + p = poly.Polynomial([1, 2, 0, -1]) + assert_equal(format(p, 'unicode'), "1.0 + 2.0·x + 0.0·x² - 1.0·x³") + + def test_format_ascii(self): + poly.set_default_printstyle('unicode') + p = poly.Polynomial([1, 2, 0, -1]) + assert_equal( + format(p, 'ascii'), "1.0 + 2.0 x + 0.0 x**2 - 1.0 x**3" + ) + + def test_empty_formatstr(self): + poly.set_default_printstyle('ascii') + p = poly.Polynomial([1, 2, 3]) + assert_equal(format(p), "1.0 + 2.0 x + 3.0 x**2") + assert_equal(f"{p}", "1.0 + 2.0 x + 3.0 x**2") + + def test_bad_formatstr(self): + p = poly.Polynomial([1, 2, 0, -1]) + with pytest.raises(ValueError): + format(p, '.2f') + + +@pytest.mark.parametrize(('poly', 'tgt'), ( + (poly.Polynomial, '1.0 + 2.0·z + 3.0·z²'), + (poly.Chebyshev, '1.0 + 2.0·T₁(z) + 3.0·T₂(z)'), + (poly.Hermite, '1.0 + 2.0·H₁(z) + 3.0·H₂(z)'), + (poly.HermiteE, '1.0 + 2.0·He₁(z) + 3.0·He₂(z)'), + (poly.Laguerre, '1.0 + 2.0·L₁(z) + 3.0·L₂(z)'), + (poly.Legendre, '1.0 + 2.0·P₁(z) + 3.0·P₂(z)'), +)) +def test_symbol(poly, tgt): + p = poly([1, 2, 3], symbol='z') + assert_equal(f"{p:unicode}", tgt) + + +class TestRepr: + def test_polynomial_repr(self): + res = repr(poly.Polynomial([0, 1])) + tgt = ( + "Polynomial([0., 1.], domain=[-1., 1.], window=[-1., 1.], " + "symbol='x')" + ) + assert_equal(res, tgt) + + def test_chebyshev_repr(self): + res = repr(poly.Chebyshev([0, 1])) + tgt = ( + "Chebyshev([0., 1.], domain=[-1., 1.], window=[-1., 1.], " + "symbol='x')" + ) + assert_equal(res, tgt) + + def test_legendre_repr(self): + res = repr(poly.Legendre([0, 1])) + tgt = ( + "Legendre([0., 1.], domain=[-1., 1.], window=[-1., 1.], " + "symbol='x')" + ) + assert_equal(res, tgt) + + def test_hermite_repr(self): + res = repr(poly.Hermite([0, 1])) + tgt = ( + "Hermite([0., 1.], domain=[-1., 1.], window=[-1., 1.], " + "symbol='x')" + ) + assert_equal(res, tgt) + + def test_hermiteE_repr(self): + res = repr(poly.HermiteE([0, 1])) + tgt = ( + "HermiteE([0., 1.], domain=[-1., 1.], window=[-1., 1.], " + "symbol='x')" + ) + assert_equal(res, tgt) + + def test_laguerre_repr(self): + res = repr(poly.Laguerre([0, 1])) + tgt = ( + "Laguerre([0., 1.], domain=[0., 1.], window=[0., 1.], " + "symbol='x')" + ) + assert_equal(res, tgt) + + +class TestLatexRepr: + """Test the latex repr used by Jupyter""" + + @staticmethod + def as_latex(obj): + # right now we ignore the formatting of scalars in our tests, since + # it makes them too verbose. Ideally, the formatting of scalars will + # be fixed such that tests below continue to pass + obj._repr_latex_scalar = lambda x, parens=False: str(x) + try: + return obj._repr_latex_() + finally: + del obj._repr_latex_scalar + + def test_simple_polynomial(self): + # default input + p = poly.Polynomial([1, 2, 3]) + assert_equal(self.as_latex(p), + r'$x \mapsto 1.0 + 2.0\,x + 3.0\,x^{2}$') + + # translated input + p = poly.Polynomial([1, 2, 3], domain=[-2, 0]) + assert_equal(self.as_latex(p), + r'$x \mapsto 1.0 + 2.0\,\left(1.0 + x\right) + 3.0\,\left(1.0 + x\right)^{2}$') # noqa: E501 + + # scaled input + p = poly.Polynomial([1, 2, 3], domain=[-0.5, 0.5]) + assert_equal(self.as_latex(p), + r'$x \mapsto 1.0 + 2.0\,\left(2.0x\right) + 3.0\,\left(2.0x\right)^{2}$') + + # affine input + p = poly.Polynomial([1, 2, 3], domain=[-1, 0]) + assert_equal(self.as_latex(p), + r'$x \mapsto 1.0 + 2.0\,\left(1.0 + 2.0x\right) + 3.0\,\left(1.0 + 2.0x\right)^{2}$') # noqa: E501 + + def test_basis_func(self): + p = poly.Chebyshev([1, 2, 3]) + assert_equal(self.as_latex(p), + r'$x \mapsto 1.0\,{T}_{0}(x) + 2.0\,{T}_{1}(x) + 3.0\,{T}_{2}(x)$') + # affine input - check no surplus parens are added + p = poly.Chebyshev([1, 2, 3], domain=[-1, 0]) + assert_equal(self.as_latex(p), + r'$x \mapsto 1.0\,{T}_{0}(1.0 + 2.0x) + 2.0\,{T}_{1}(1.0 + 2.0x) + 3.0\,{T}_{2}(1.0 + 2.0x)$') # noqa: E501 + + def test_multichar_basis_func(self): + p = poly.HermiteE([1, 2, 3]) + assert_equal(self.as_latex(p), + r'$x \mapsto 1.0\,{He}_{0}(x) + 2.0\,{He}_{1}(x) + 3.0\,{He}_{2}(x)$') + + def test_symbol_basic(self): + # default input + p = poly.Polynomial([1, 2, 3], symbol='z') + assert_equal(self.as_latex(p), + r'$z \mapsto 1.0 + 2.0\,z + 3.0\,z^{2}$') + + # translated input + p = poly.Polynomial([1, 2, 3], domain=[-2, 0], symbol='z') + assert_equal( + self.as_latex(p), + ( + r'$z \mapsto 1.0 + 2.0\,\left(1.0 + z\right) + 3.0\,' + r'\left(1.0 + z\right)^{2}$' + ), + ) + + # scaled input + p = poly.Polynomial([1, 2, 3], domain=[-0.5, 0.5], symbol='z') + assert_equal( + self.as_latex(p), + ( + r'$z \mapsto 1.0 + 2.0\,\left(2.0z\right) + 3.0\,' + r'\left(2.0z\right)^{2}$' + ), + ) + + # affine input + p = poly.Polynomial([1, 2, 3], domain=[-1, 0], symbol='z') + assert_equal( + self.as_latex(p), + ( + r'$z \mapsto 1.0 + 2.0\,\left(1.0 + 2.0z\right) + 3.0\,' + r'\left(1.0 + 2.0z\right)^{2}$' + ), + ) + + def test_numeric_object_coefficients(self): + coefs = array([Fraction(1, 2), Fraction(1)]) + p = poly.Polynomial(coefs) + assert_equal(self.as_latex(p), '$x \\mapsto 1/2 + 1\\,x$') + + +SWITCH_TO_EXP = ( + '1.0 + (1.0e-01) x + (1.0e-02) x**2', + '1.2 + (1.2e-01) x + (1.2e-02) x**2', + '1.23 + 0.12 x + (1.23e-02) x**2 + (1.23e-03) x**3', + '1.235 + 0.123 x + (1.235e-02) x**2 + (1.235e-03) x**3', + '1.2346 + 0.1235 x + 0.0123 x**2 + (1.2346e-03) x**3 + (1.2346e-04) x**4', + '1.23457 + 0.12346 x + 0.01235 x**2 + (1.23457e-03) x**3 + ' + '(1.23457e-04) x**4', + '1.234568 + 0.123457 x + 0.012346 x**2 + 0.001235 x**3 + ' + '(1.234568e-04) x**4 + (1.234568e-05) x**5', + '1.2345679 + 0.1234568 x + 0.0123457 x**2 + 0.0012346 x**3 + ' + '(1.2345679e-04) x**4 + (1.2345679e-05) x**5') + +class TestPrintOptions: + """ + Test the output is properly configured via printoptions. + The exponential notation is enabled automatically when the values + are too small or too large. + """ + + @pytest.fixture(scope='class', autouse=True) + def use_ascii(self): + poly.set_default_printstyle('ascii') + + def test_str(self): + p = poly.Polynomial([1 / 2, 1 / 7, 1 / 7 * 10**8, 1 / 7 * 10**9]) + assert_equal(str(p), '0.5 + 0.14285714 x + 14285714.28571429 x**2 ' + '+ (1.42857143e+08) x**3') + + with printoptions(precision=3): + assert_equal(str(p), '0.5 + 0.143 x + 14285714.286 x**2 ' + '+ (1.429e+08) x**3') + + def test_latex(self): + p = poly.Polynomial([1 / 2, 1 / 7, 1 / 7 * 10**8, 1 / 7 * 10**9]) + assert_equal(p._repr_latex_(), + r'$x \mapsto \text{0.5} + \text{0.14285714}\,x + ' + r'\text{14285714.28571429}\,x^{2} + ' + r'\text{(1.42857143e+08)}\,x^{3}$') + + with printoptions(precision=3): + assert_equal(p._repr_latex_(), + r'$x \mapsto \text{0.5} + \text{0.143}\,x + ' + r'\text{14285714.286}\,x^{2} + \text{(1.429e+08)}\,x^{3}$') + + def test_fixed(self): + p = poly.Polynomial([1 / 2]) + assert_equal(str(p), '0.5') + + with printoptions(floatmode='fixed'): + assert_equal(str(p), '0.50000000') + + with printoptions(floatmode='fixed', precision=4): + assert_equal(str(p), '0.5000') + + def test_switch_to_exp(self): + for i, s in enumerate(SWITCH_TO_EXP): + with printoptions(precision=i): + p = poly.Polynomial([1.23456789 * 10**-i + for i in range(i // 2 + 3)]) + assert str(p).replace('\n', ' ') == s + + def test_non_finite(self): + p = poly.Polynomial([nan, inf]) + assert str(p) == 'nan + inf x' + assert p._repr_latex_() == r'$x \mapsto \text{nan} + \text{inf}\,x$' # noqa: RUF027 + with printoptions(nanstr='NAN', infstr='INF'): + assert str(p) == 'NAN + INF x' + assert p._repr_latex_() == \ + r'$x \mapsto \text{NAN} + \text{INF}\,x$' diff --git a/.venv/lib/python3.12/site-packages/numpy/polynomial/tests/test_symbol.py b/.venv/lib/python3.12/site-packages/numpy/polynomial/tests/test_symbol.py new file mode 100644 index 0000000..3de9e38 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/polynomial/tests/test_symbol.py @@ -0,0 +1,217 @@ +""" +Tests related to the ``symbol`` attribute of the ABCPolyBase class. +""" + +import pytest + +import numpy.polynomial as poly +from numpy._core import array +from numpy.testing import assert_, assert_equal, assert_raises + + +class TestInit: + """ + Test polynomial creation with symbol kwarg. + """ + c = [1, 2, 3] + + def test_default_symbol(self): + p = poly.Polynomial(self.c) + assert_equal(p.symbol, 'x') + + @pytest.mark.parametrize(('bad_input', 'exception'), ( + ('', ValueError), + ('3', ValueError), + (None, TypeError), + (1, TypeError), + )) + def test_symbol_bad_input(self, bad_input, exception): + with pytest.raises(exception): + p = poly.Polynomial(self.c, symbol=bad_input) + + @pytest.mark.parametrize('symbol', ( + 'x', + 'x_1', + 'A', + 'xyz', + 'β', + )) + def test_valid_symbols(self, symbol): + """ + Values for symbol that should pass input validation. + """ + p = poly.Polynomial(self.c, symbol=symbol) + assert_equal(p.symbol, symbol) + + def test_property(self): + """ + 'symbol' attribute is read only. + """ + p = poly.Polynomial(self.c, symbol='x') + with pytest.raises(AttributeError): + p.symbol = 'z' + + def test_change_symbol(self): + p = poly.Polynomial(self.c, symbol='y') + # Create new polynomial from p with different symbol + pt = poly.Polynomial(p.coef, symbol='t') + assert_equal(pt.symbol, 't') + + +class TestUnaryOperators: + p = poly.Polynomial([1, 2, 3], symbol='z') + + def test_neg(self): + n = -self.p + assert_equal(n.symbol, 'z') + + def test_scalarmul(self): + out = self.p * 10 + assert_equal(out.symbol, 'z') + + def test_rscalarmul(self): + out = 10 * self.p + assert_equal(out.symbol, 'z') + + def test_pow(self): + out = self.p ** 3 + assert_equal(out.symbol, 'z') + + +@pytest.mark.parametrize( + 'rhs', + ( + poly.Polynomial([4, 5, 6], symbol='z'), + array([4, 5, 6]), + ), +) +class TestBinaryOperatorsSameSymbol: + """ + Ensure symbol is preserved for numeric operations on polynomials with + the same symbol + """ + p = poly.Polynomial([1, 2, 3], symbol='z') + + def test_add(self, rhs): + out = self.p + rhs + assert_equal(out.symbol, 'z') + + def test_sub(self, rhs): + out = self.p - rhs + assert_equal(out.symbol, 'z') + + def test_polymul(self, rhs): + out = self.p * rhs + assert_equal(out.symbol, 'z') + + def test_divmod(self, rhs): + for out in divmod(self.p, rhs): + assert_equal(out.symbol, 'z') + + def test_radd(self, rhs): + out = rhs + self.p + assert_equal(out.symbol, 'z') + + def test_rsub(self, rhs): + out = rhs - self.p + assert_equal(out.symbol, 'z') + + def test_rmul(self, rhs): + out = rhs * self.p + assert_equal(out.symbol, 'z') + + def test_rdivmod(self, rhs): + for out in divmod(rhs, self.p): + assert_equal(out.symbol, 'z') + + +class TestBinaryOperatorsDifferentSymbol: + p = poly.Polynomial([1, 2, 3], symbol='x') + other = poly.Polynomial([4, 5, 6], symbol='y') + ops = (p.__add__, p.__sub__, p.__mul__, p.__floordiv__, p.__mod__) + + @pytest.mark.parametrize('f', ops) + def test_binops_fails(self, f): + assert_raises(ValueError, f, self.other) + + +class TestEquality: + p = poly.Polynomial([1, 2, 3], symbol='x') + + def test_eq(self): + other = poly.Polynomial([1, 2, 3], symbol='x') + assert_(self.p == other) + + def test_neq(self): + other = poly.Polynomial([1, 2, 3], symbol='y') + assert_(not self.p == other) + + +class TestExtraMethods: + """ + Test other methods for manipulating/creating polynomial objects. + """ + p = poly.Polynomial([1, 2, 3, 0], symbol='z') + + def test_copy(self): + other = self.p.copy() + assert_equal(other.symbol, 'z') + + def test_trim(self): + other = self.p.trim() + assert_equal(other.symbol, 'z') + + def test_truncate(self): + other = self.p.truncate(2) + assert_equal(other.symbol, 'z') + + @pytest.mark.parametrize('kwarg', ( + {'domain': [-10, 10]}, + {'window': [-10, 10]}, + {'kind': poly.Chebyshev}, + )) + def test_convert(self, kwarg): + other = self.p.convert(**kwarg) + assert_equal(other.symbol, 'z') + + def test_integ(self): + other = self.p.integ() + assert_equal(other.symbol, 'z') + + def test_deriv(self): + other = self.p.deriv() + assert_equal(other.symbol, 'z') + + +def test_composition(): + p = poly.Polynomial([3, 2, 1], symbol="t") + q = poly.Polynomial([5, 1, 0, -1], symbol="λ_1") + r = p(q) + assert r.symbol == "λ_1" + + +# +# Class methods that result in new polynomial class instances +# + + +def test_fit(): + x, y = (range(10),) * 2 + p = poly.Polynomial.fit(x, y, deg=1, symbol='z') + assert_equal(p.symbol, 'z') + + +def test_froomroots(): + roots = [-2, 2] + p = poly.Polynomial.fromroots(roots, symbol='z') + assert_equal(p.symbol, 'z') + + +def test_identity(): + p = poly.Polynomial.identity(domain=[-1, 1], window=[5, 20], symbol='z') + assert_equal(p.symbol, 'z') + + +def test_basis(): + p = poly.Polynomial.basis(3, symbol='z') + assert_equal(p.symbol, 'z') diff --git a/.venv/lib/python3.12/site-packages/numpy/py.typed b/.venv/lib/python3.12/site-packages/numpy/py.typed new file mode 100644 index 0000000..e69de29 diff --git a/.venv/lib/python3.12/site-packages/numpy/random/LICENSE.md b/.venv/lib/python3.12/site-packages/numpy/random/LICENSE.md new file mode 100644 index 0000000..a6cf1b1 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/random/LICENSE.md @@ -0,0 +1,71 @@ +**This software is dual-licensed under the The University of Illinois/NCSA +Open Source License (NCSA) and The 3-Clause BSD License** + +# NCSA Open Source License +**Copyright (c) 2019 Kevin Sheppard. All rights reserved.** + +Developed by: Kevin Sheppard (, +) +[http://www.kevinsheppard.com](http://www.kevinsheppard.com) + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal with +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +Redistributions of source code must retain the above copyright notice, this +list of conditions and the following disclaimers. + +Redistributions in binary form must reproduce the above copyright notice, this +list of conditions and the following disclaimers in the documentation and/or +other materials provided with the distribution. + +Neither the names of Kevin Sheppard, nor the names of any contributors may be +used to endorse or promote products derived from this Software without specific +prior written permission. + +**THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH +THE SOFTWARE.** + + +# 3-Clause BSD License +**Copyright (c) 2019 Kevin Sheppard. All rights reserved.** + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +**THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +THE POSSIBILITY OF SUCH DAMAGE.** + +# Components + +Many parts of this module have been derived from original sources, +often the algorithm's designer. Component licenses are located with +the component code. diff --git a/.venv/lib/python3.12/site-packages/numpy/random/__init__.pxd b/.venv/lib/python3.12/site-packages/numpy/random/__init__.pxd new file mode 100644 index 0000000..1f90572 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/random/__init__.pxd @@ -0,0 +1,14 @@ +cimport numpy as np +from libc.stdint cimport uint32_t, uint64_t + +cdef extern from "numpy/random/bitgen.h": + struct bitgen: + void *state + uint64_t (*next_uint64)(void *st) nogil + uint32_t (*next_uint32)(void *st) nogil + double (*next_double)(void *st) nogil + uint64_t (*next_raw)(void *st) nogil + + ctypedef bitgen bitgen_t + +from numpy.random.bit_generator cimport BitGenerator, SeedSequence diff --git a/.venv/lib/python3.12/site-packages/numpy/random/__init__.py b/.venv/lib/python3.12/site-packages/numpy/random/__init__.py new file mode 100644 index 0000000..3e21d59 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/random/__init__.py @@ -0,0 +1,213 @@ +""" +======================== +Random Number Generation +======================== + +Use ``default_rng()`` to create a `Generator` and call its methods. + +=============== ========================================================= +Generator +--------------- --------------------------------------------------------- +Generator Class implementing all of the random number distributions +default_rng Default constructor for ``Generator`` +=============== ========================================================= + +============================================= === +BitGenerator Streams that work with Generator +--------------------------------------------- --- +MT19937 +PCG64 +PCG64DXSM +Philox +SFC64 +============================================= === + +============================================= === +Getting entropy to initialize a BitGenerator +--------------------------------------------- --- +SeedSequence +============================================= === + + +Legacy +------ + +For backwards compatibility with previous versions of numpy before 1.17, the +various aliases to the global `RandomState` methods are left alone and do not +use the new `Generator` API. + +==================== ========================================================= +Utility functions +-------------------- --------------------------------------------------------- +random Uniformly distributed floats over ``[0, 1)`` +bytes Uniformly distributed random bytes. +permutation Randomly permute a sequence / generate a random sequence. +shuffle Randomly permute a sequence in place. +choice Random sample from 1-D array. +==================== ========================================================= + +==================== ========================================================= +Compatibility +functions - removed +in the new API +-------------------- --------------------------------------------------------- +rand Uniformly distributed values. +randn Normally distributed values. +ranf Uniformly distributed floating point numbers. +random_integers Uniformly distributed integers in a given range. + (deprecated, use ``integers(..., closed=True)`` instead) +random_sample Alias for `random_sample` +randint Uniformly distributed integers in a given range +seed Seed the legacy random number generator. +==================== ========================================================= + +==================== ========================================================= +Univariate +distributions +-------------------- --------------------------------------------------------- +beta Beta distribution over ``[0, 1]``. +binomial Binomial distribution. +chisquare :math:`\\chi^2` distribution. +exponential Exponential distribution. +f F (Fisher-Snedecor) distribution. +gamma Gamma distribution. +geometric Geometric distribution. +gumbel Gumbel distribution. +hypergeometric Hypergeometric distribution. +laplace Laplace distribution. +logistic Logistic distribution. +lognormal Log-normal distribution. +logseries Logarithmic series distribution. +negative_binomial Negative binomial distribution. +noncentral_chisquare Non-central chi-square distribution. +noncentral_f Non-central F distribution. +normal Normal / Gaussian distribution. +pareto Pareto distribution. +poisson Poisson distribution. +power Power distribution. +rayleigh Rayleigh distribution. +triangular Triangular distribution. +uniform Uniform distribution. +vonmises Von Mises circular distribution. +wald Wald (inverse Gaussian) distribution. +weibull Weibull distribution. +zipf Zipf's distribution over ranked data. +==================== ========================================================= + +==================== ========================================================== +Multivariate +distributions +-------------------- ---------------------------------------------------------- +dirichlet Multivariate generalization of Beta distribution. +multinomial Multivariate generalization of the binomial distribution. +multivariate_normal Multivariate generalization of the normal distribution. +==================== ========================================================== + +==================== ========================================================= +Standard +distributions +-------------------- --------------------------------------------------------- +standard_cauchy Standard Cauchy-Lorentz distribution. +standard_exponential Standard exponential distribution. +standard_gamma Standard Gamma distribution. +standard_normal Standard normal distribution. +standard_t Standard Student's t-distribution. +==================== ========================================================= + +==================== ========================================================= +Internal functions +-------------------- --------------------------------------------------------- +get_state Get tuple representing internal state of generator. +set_state Set state of generator. +==================== ========================================================= + + +""" +__all__ = [ + 'beta', + 'binomial', + 'bytes', + 'chisquare', + 'choice', + 'dirichlet', + 'exponential', + 'f', + 'gamma', + 'geometric', + 'get_state', + 'gumbel', + 'hypergeometric', + 'laplace', + 'logistic', + 'lognormal', + 'logseries', + 'multinomial', + 'multivariate_normal', + 'negative_binomial', + 'noncentral_chisquare', + 'noncentral_f', + 'normal', + 'pareto', + 'permutation', + 'poisson', + 'power', + 'rand', + 'randint', + 'randn', + 'random', + 'random_integers', + 'random_sample', + 'ranf', + 'rayleigh', + 'sample', + 'seed', + 'set_state', + 'shuffle', + 'standard_cauchy', + 'standard_exponential', + 'standard_gamma', + 'standard_normal', + 'standard_t', + 'triangular', + 'uniform', + 'vonmises', + 'wald', + 'weibull', + 'zipf', +] + +# add these for module-freeze analysis (like PyInstaller) +from . import _bounded_integers, _common, _pickle +from ._generator import Generator, default_rng +from ._mt19937 import MT19937 +from ._pcg64 import PCG64, PCG64DXSM +from ._philox import Philox +from ._sfc64 import SFC64 +from .bit_generator import BitGenerator, SeedSequence +from .mtrand import * + +__all__ += ['Generator', 'RandomState', 'SeedSequence', 'MT19937', + 'Philox', 'PCG64', 'PCG64DXSM', 'SFC64', 'default_rng', + 'BitGenerator'] + + +def __RandomState_ctor(): + """Return a RandomState instance. + + This function exists solely to assist (un)pickling. + + Note that the state of the RandomState returned here is irrelevant, as this + function's entire purpose is to return a newly allocated RandomState whose + state pickle can set. Consequently the RandomState returned by this function + is a freshly allocated copy with a seed=0. + + See https://github.com/numpy/numpy/issues/4763 for a detailed discussion + + """ + return RandomState(seed=0) + + +from numpy._pytesttester import PytestTester + +test = PytestTester(__name__) +del PytestTester diff --git a/.venv/lib/python3.12/site-packages/numpy/random/__init__.pyi b/.venv/lib/python3.12/site-packages/numpy/random/__init__.pyi new file mode 100644 index 0000000..e9b9fb5 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/random/__init__.pyi @@ -0,0 +1,124 @@ +from ._generator import Generator, default_rng +from ._mt19937 import MT19937 +from ._pcg64 import PCG64, PCG64DXSM +from ._philox import Philox +from ._sfc64 import SFC64 +from .bit_generator import BitGenerator, SeedSequence +from .mtrand import ( + RandomState, + beta, + binomial, + bytes, + chisquare, + choice, + dirichlet, + exponential, + f, + gamma, + geometric, + get_bit_generator, # noqa: F401 + get_state, + gumbel, + hypergeometric, + laplace, + logistic, + lognormal, + logseries, + multinomial, + multivariate_normal, + negative_binomial, + noncentral_chisquare, + noncentral_f, + normal, + pareto, + permutation, + poisson, + power, + rand, + randint, + randn, + random, + random_integers, + random_sample, + ranf, + rayleigh, + sample, + seed, + set_bit_generator, # noqa: F401 + set_state, + shuffle, + standard_cauchy, + standard_exponential, + standard_gamma, + standard_normal, + standard_t, + triangular, + uniform, + vonmises, + wald, + weibull, + zipf, +) + +__all__ = [ + "beta", + "binomial", + "bytes", + "chisquare", + "choice", + "dirichlet", + "exponential", + "f", + "gamma", + "geometric", + "get_state", + "gumbel", + "hypergeometric", + "laplace", + "logistic", + "lognormal", + "logseries", + "multinomial", + "multivariate_normal", + "negative_binomial", + "noncentral_chisquare", + "noncentral_f", + "normal", + "pareto", + "permutation", + "poisson", + "power", + "rand", + "randint", + "randn", + "random", + "random_integers", + "random_sample", + "ranf", + "rayleigh", + "sample", + "seed", + "set_state", + "shuffle", + "standard_cauchy", + "standard_exponential", + "standard_gamma", + "standard_normal", + "standard_t", + "triangular", + "uniform", + "vonmises", + "wald", + "weibull", + "zipf", + "Generator", + "RandomState", + "SeedSequence", + "MT19937", + "Philox", + "PCG64", + "PCG64DXSM", + "SFC64", + "default_rng", + "BitGenerator", +] diff --git a/.venv/lib/python3.12/site-packages/numpy/random/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/random/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..a2ee112 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/random/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/random/__pycache__/_pickle.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/random/__pycache__/_pickle.cpython-312.pyc new file mode 100644 index 0000000..7053a59 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/random/__pycache__/_pickle.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/random/_bounded_integers.cpython-312-x86_64-linux-gnu.so b/.venv/lib/python3.12/site-packages/numpy/random/_bounded_integers.cpython-312-x86_64-linux-gnu.so new file mode 100755 index 0000000..3fb18fc Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/random/_bounded_integers.cpython-312-x86_64-linux-gnu.so differ diff --git a/.venv/lib/python3.12/site-packages/numpy/random/_bounded_integers.pxd b/.venv/lib/python3.12/site-packages/numpy/random/_bounded_integers.pxd new file mode 100644 index 0000000..607014c --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/random/_bounded_integers.pxd @@ -0,0 +1,29 @@ +from libc.stdint cimport (uint8_t, uint16_t, uint32_t, uint64_t, + int8_t, int16_t, int32_t, int64_t, intptr_t) +import numpy as np +cimport numpy as np +ctypedef np.npy_bool bool_t + +from numpy.random cimport bitgen_t + +cdef inline uint64_t _gen_mask(uint64_t max_val) noexcept nogil: + """Mask generator for use in bounded random numbers""" + # Smallest bit mask >= max + cdef uint64_t mask = max_val + mask |= mask >> 1 + mask |= mask >> 2 + mask |= mask >> 4 + mask |= mask >> 8 + mask |= mask >> 16 + mask |= mask >> 32 + return mask + +cdef object _rand_uint64(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock) +cdef object _rand_uint32(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock) +cdef object _rand_uint16(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock) +cdef object _rand_uint8(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock) +cdef object _rand_bool(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock) +cdef object _rand_int64(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock) +cdef object _rand_int32(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock) +cdef object _rand_int16(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock) +cdef object _rand_int8(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock) diff --git a/.venv/lib/python3.12/site-packages/numpy/random/_bounded_integers.pyi b/.venv/lib/python3.12/site-packages/numpy/random/_bounded_integers.pyi new file mode 100644 index 0000000..c9c2ef6 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/random/_bounded_integers.pyi @@ -0,0 +1 @@ +__all__: list[str] = [] diff --git a/.venv/lib/python3.12/site-packages/numpy/random/_common.cpython-312-x86_64-linux-gnu.so b/.venv/lib/python3.12/site-packages/numpy/random/_common.cpython-312-x86_64-linux-gnu.so new file mode 100755 index 0000000..9efc51a Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/random/_common.cpython-312-x86_64-linux-gnu.so differ diff --git a/.venv/lib/python3.12/site-packages/numpy/random/_common.pxd b/.venv/lib/python3.12/site-packages/numpy/random/_common.pxd new file mode 100644 index 0000000..0de4456 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/random/_common.pxd @@ -0,0 +1,107 @@ +#cython: language_level=3 + +from libc.stdint cimport uint32_t, uint64_t, int32_t, int64_t + +import numpy as np +cimport numpy as np + +from numpy.random cimport bitgen_t + +cdef double POISSON_LAM_MAX +cdef double LEGACY_POISSON_LAM_MAX +cdef uint64_t MAXSIZE + +cdef enum ConstraintType: + CONS_NONE + CONS_NON_NEGATIVE + CONS_POSITIVE + CONS_POSITIVE_NOT_NAN + CONS_BOUNDED_0_1 + CONS_BOUNDED_GT_0_1 + CONS_BOUNDED_LT_0_1 + CONS_GT_1 + CONS_GTE_1 + CONS_POISSON + LEGACY_CONS_POISSON + LEGACY_CONS_NON_NEGATIVE_INBOUNDS_LONG + +ctypedef ConstraintType constraint_type + +cdef object benchmark(bitgen_t *bitgen, object lock, Py_ssize_t cnt, object method) +cdef object random_raw(bitgen_t *bitgen, object lock, object size, object output) +cdef object prepare_cffi(bitgen_t *bitgen) +cdef object prepare_ctypes(bitgen_t *bitgen) +cdef int check_constraint(double val, object name, constraint_type cons) except -1 +cdef int check_array_constraint(np.ndarray val, object name, constraint_type cons) except -1 + +cdef extern from "include/aligned_malloc.h": + cdef void *PyArray_realloc_aligned(void *p, size_t n) + cdef void *PyArray_malloc_aligned(size_t n) + cdef void *PyArray_calloc_aligned(size_t n, size_t s) + cdef void PyArray_free_aligned(void *p) + +ctypedef void (*random_double_fill)(bitgen_t *state, np.npy_intp count, double* out) noexcept nogil +ctypedef double (*random_double_0)(void *state) noexcept nogil +ctypedef double (*random_double_1)(void *state, double a) noexcept nogil +ctypedef double (*random_double_2)(void *state, double a, double b) noexcept nogil +ctypedef double (*random_double_3)(void *state, double a, double b, double c) noexcept nogil + +ctypedef void (*random_float_fill)(bitgen_t *state, np.npy_intp count, float* out) noexcept nogil +ctypedef float (*random_float_0)(bitgen_t *state) noexcept nogil +ctypedef float (*random_float_1)(bitgen_t *state, float a) noexcept nogil + +ctypedef int64_t (*random_uint_0)(void *state) noexcept nogil +ctypedef int64_t (*random_uint_d)(void *state, double a) noexcept nogil +ctypedef int64_t (*random_uint_dd)(void *state, double a, double b) noexcept nogil +ctypedef int64_t (*random_uint_di)(void *state, double a, uint64_t b) noexcept nogil +ctypedef int64_t (*random_uint_i)(void *state, int64_t a) noexcept nogil +ctypedef int64_t (*random_uint_iii)(void *state, int64_t a, int64_t b, int64_t c) noexcept nogil + +ctypedef uint32_t (*random_uint_0_32)(bitgen_t *state) noexcept nogil +ctypedef uint32_t (*random_uint_1_i_32)(bitgen_t *state, uint32_t a) noexcept nogil + +ctypedef int32_t (*random_int_2_i_32)(bitgen_t *state, int32_t a, int32_t b) noexcept nogil +ctypedef int64_t (*random_int_2_i)(bitgen_t *state, int64_t a, int64_t b) noexcept nogil + +cdef double kahan_sum(double *darr, np.npy_intp n) noexcept + +cdef inline double uint64_to_double(uint64_t rnd) noexcept nogil: + return (rnd >> 11) * (1.0 / 9007199254740992.0) + +cdef object double_fill(void *func, bitgen_t *state, object size, object lock, object out) + +cdef object float_fill(void *func, bitgen_t *state, object size, object lock, object out) + +cdef object float_fill_from_double(void *func, bitgen_t *state, object size, object lock, object out) + +cdef object wrap_int(object val, object bits) + +cdef np.ndarray int_to_array(object value, object name, object bits, object uint_size) + +cdef validate_output_shape(iter_shape, np.ndarray output) + +cdef object cont(void *func, void *state, object size, object lock, int narg, + object a, object a_name, constraint_type a_constraint, + object b, object b_name, constraint_type b_constraint, + object c, object c_name, constraint_type c_constraint, + object out) + +cdef object disc(void *func, void *state, object size, object lock, + int narg_double, int narg_int64, + object a, object a_name, constraint_type a_constraint, + object b, object b_name, constraint_type b_constraint, + object c, object c_name, constraint_type c_constraint) + +cdef object cont_f(void *func, bitgen_t *state, object size, object lock, + object a, object a_name, constraint_type a_constraint, + object out) + +cdef object cont_broadcast_3(void *func, void *state, object size, object lock, + np.ndarray a_arr, object a_name, constraint_type a_constraint, + np.ndarray b_arr, object b_name, constraint_type b_constraint, + np.ndarray c_arr, object c_name, constraint_type c_constraint) + +cdef object discrete_broadcast_iii(void *func, void *state, object size, object lock, + np.ndarray a_arr, object a_name, constraint_type a_constraint, + np.ndarray b_arr, object b_name, constraint_type b_constraint, + np.ndarray c_arr, object c_name, constraint_type c_constraint) diff --git a/.venv/lib/python3.12/site-packages/numpy/random/_common.pyi b/.venv/lib/python3.12/site-packages/numpy/random/_common.pyi new file mode 100644 index 0000000..b667fd1 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/random/_common.pyi @@ -0,0 +1,16 @@ +from collections.abc import Callable +from typing import Any, NamedTuple, TypeAlias + +import numpy as np + +__all__: list[str] = ["interface"] + +_CDataVoidPointer: TypeAlias = Any + +class interface(NamedTuple): + state_address: int + state: _CDataVoidPointer + next_uint64: Callable[..., np.uint64] + next_uint32: Callable[..., np.uint32] + next_double: Callable[..., np.float64] + bit_generator: _CDataVoidPointer diff --git a/.venv/lib/python3.12/site-packages/numpy/random/_examples/cffi/__pycache__/extending.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/random/_examples/cffi/__pycache__/extending.cpython-312.pyc new file mode 100644 index 0000000..98c5019 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/random/_examples/cffi/__pycache__/extending.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/random/_examples/cffi/__pycache__/parse.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/random/_examples/cffi/__pycache__/parse.cpython-312.pyc new file mode 100644 index 0000000..49d6df2 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/random/_examples/cffi/__pycache__/parse.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/random/_examples/cffi/extending.py b/.venv/lib/python3.12/site-packages/numpy/random/_examples/cffi/extending.py new file mode 100644 index 0000000..ad4c9ac --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/random/_examples/cffi/extending.py @@ -0,0 +1,44 @@ +""" +Use cffi to access any of the underlying C functions from distributions.h +""" +import os + +import cffi + +import numpy as np + +from .parse import parse_distributions_h + +ffi = cffi.FFI() + +inc_dir = os.path.join(np.get_include(), 'numpy') + +# Basic numpy types +ffi.cdef(''' + typedef intptr_t npy_intp; + typedef unsigned char npy_bool; + +''') + +parse_distributions_h(ffi, inc_dir) + +lib = ffi.dlopen(np.random._generator.__file__) + +# Compare the distributions.h random_standard_normal_fill to +# Generator.standard_random +bit_gen = np.random.PCG64() +rng = np.random.Generator(bit_gen) +state = bit_gen.state + +interface = rng.bit_generator.cffi +n = 100 +vals_cffi = ffi.new('double[%d]' % n) +lib.random_standard_normal_fill(interface.bit_generator, n, vals_cffi) + +# reset the state +bit_gen.state = state + +vals = rng.standard_normal(n) + +for i in range(n): + assert vals[i] == vals_cffi[i] diff --git a/.venv/lib/python3.12/site-packages/numpy/random/_examples/cffi/parse.py b/.venv/lib/python3.12/site-packages/numpy/random/_examples/cffi/parse.py new file mode 100644 index 0000000..0f80adb --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/random/_examples/cffi/parse.py @@ -0,0 +1,53 @@ +import os + + +def parse_distributions_h(ffi, inc_dir): + """ + Parse distributions.h located in inc_dir for CFFI, filling in the ffi.cdef + + Read the function declarations without the "#define ..." macros that will + be filled in when loading the library. + """ + + with open(os.path.join(inc_dir, 'random', 'bitgen.h')) as fid: + s = [] + for line in fid: + # massage the include file + if line.strip().startswith('#'): + continue + s.append(line) + ffi.cdef('\n'.join(s)) + + with open(os.path.join(inc_dir, 'random', 'distributions.h')) as fid: + s = [] + in_skip = 0 + ignoring = False + for line in fid: + # check for and remove extern "C" guards + if ignoring: + if line.strip().startswith('#endif'): + ignoring = False + continue + if line.strip().startswith('#ifdef __cplusplus'): + ignoring = True + + # massage the include file + if line.strip().startswith('#'): + continue + + # skip any inlined function definition + # which starts with 'static inline xxx(...) {' + # and ends with a closing '}' + if line.strip().startswith('static inline'): + in_skip += line.count('{') + continue + elif in_skip > 0: + in_skip += line.count('{') + in_skip -= line.count('}') + continue + + # replace defines with their value or remove them + line = line.replace('DECLDIR', '') + line = line.replace('RAND_INT_TYPE', 'int64_t') + s.append(line) + ffi.cdef('\n'.join(s)) diff --git a/.venv/lib/python3.12/site-packages/numpy/random/_examples/cython/extending.pyx b/.venv/lib/python3.12/site-packages/numpy/random/_examples/cython/extending.pyx new file mode 100644 index 0000000..6a0f45e --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/random/_examples/cython/extending.pyx @@ -0,0 +1,77 @@ +#cython: language_level=3 + +from libc.stdint cimport uint32_t +from cpython.pycapsule cimport PyCapsule_IsValid, PyCapsule_GetPointer + +import numpy as np +cimport numpy as np +cimport cython + +from numpy.random cimport bitgen_t +from numpy.random import PCG64 + +np.import_array() + + +@cython.boundscheck(False) +@cython.wraparound(False) +def uniform_mean(Py_ssize_t n): + cdef Py_ssize_t i + cdef bitgen_t *rng + cdef const char *capsule_name = "BitGenerator" + cdef double[::1] random_values + cdef np.ndarray randoms + + x = PCG64() + capsule = x.capsule + if not PyCapsule_IsValid(capsule, capsule_name): + raise ValueError("Invalid pointer to anon_func_state") + rng = PyCapsule_GetPointer(capsule, capsule_name) + random_values = np.empty(n) + # Best practice is to acquire the lock whenever generating random values. + # This prevents other threads from modifying the state. Acquiring the lock + # is only necessary if the GIL is also released, as in this example. + with x.lock, nogil: + for i in range(n): + random_values[i] = rng.next_double(rng.state) + randoms = np.asarray(random_values) + return randoms.mean() + + +# This function is declared nogil so it can be used without the GIL below +cdef uint32_t bounded_uint(uint32_t lb, uint32_t ub, bitgen_t *rng) nogil: + cdef uint32_t mask, delta, val + mask = delta = ub - lb + mask |= mask >> 1 + mask |= mask >> 2 + mask |= mask >> 4 + mask |= mask >> 8 + mask |= mask >> 16 + + val = rng.next_uint32(rng.state) & mask + while val > delta: + val = rng.next_uint32(rng.state) & mask + + return lb + val + + +@cython.boundscheck(False) +@cython.wraparound(False) +def bounded_uints(uint32_t lb, uint32_t ub, Py_ssize_t n): + cdef Py_ssize_t i + cdef bitgen_t *rng + cdef uint32_t[::1] out + cdef const char *capsule_name = "BitGenerator" + + x = PCG64() + out = np.empty(n, dtype=np.uint32) + capsule = x.capsule + + if not PyCapsule_IsValid(capsule, capsule_name): + raise ValueError("Invalid pointer to anon_func_state") + rng = PyCapsule_GetPointer(capsule, capsule_name) + + with x.lock, nogil: + for i in range(n): + out[i] = bounded_uint(lb, ub, rng) + return np.asarray(out) diff --git a/.venv/lib/python3.12/site-packages/numpy/random/_examples/cython/extending_distributions.pyx b/.venv/lib/python3.12/site-packages/numpy/random/_examples/cython/extending_distributions.pyx new file mode 100644 index 0000000..e1d1ea6 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/random/_examples/cython/extending_distributions.pyx @@ -0,0 +1,118 @@ +#cython: language_level=3 +""" +This file shows how the to use a BitGenerator to create a distribution. +""" +import numpy as np +cimport numpy as np +cimport cython +from cpython.pycapsule cimport PyCapsule_IsValid, PyCapsule_GetPointer +from libc.stdint cimport uint16_t, uint64_t +from numpy.random cimport bitgen_t +from numpy.random import PCG64 +from numpy.random.c_distributions cimport ( + random_standard_uniform_fill, random_standard_uniform_fill_f) + +np.import_array() + + +@cython.boundscheck(False) +@cython.wraparound(False) +def uniforms(Py_ssize_t n): + """ + Create an array of `n` uniformly distributed doubles. + A 'real' distribution would want to process the values into + some non-uniform distribution + """ + cdef Py_ssize_t i + cdef bitgen_t *rng + cdef const char *capsule_name = "BitGenerator" + cdef double[::1] random_values + + x = PCG64() + capsule = x.capsule + # Optional check that the capsule if from a BitGenerator + if not PyCapsule_IsValid(capsule, capsule_name): + raise ValueError("Invalid pointer to anon_func_state") + # Cast the pointer + rng = PyCapsule_GetPointer(capsule, capsule_name) + random_values = np.empty(n, dtype='float64') + with x.lock, nogil: + for i in range(n): + # Call the function + random_values[i] = rng.next_double(rng.state) + randoms = np.asarray(random_values) + + return randoms + +# cython example 2 +@cython.boundscheck(False) +@cython.wraparound(False) +def uint10_uniforms(Py_ssize_t n): + """Uniform 10 bit integers stored as 16-bit unsigned integers""" + cdef Py_ssize_t i + cdef bitgen_t *rng + cdef const char *capsule_name = "BitGenerator" + cdef uint16_t[::1] random_values + cdef int bits_remaining + cdef int width = 10 + cdef uint64_t buff, mask = 0x3FF + + x = PCG64() + capsule = x.capsule + if not PyCapsule_IsValid(capsule, capsule_name): + raise ValueError("Invalid pointer to anon_func_state") + rng = PyCapsule_GetPointer(capsule, capsule_name) + random_values = np.empty(n, dtype='uint16') + # Best practice is to release GIL and acquire the lock + bits_remaining = 0 + with x.lock, nogil: + for i in range(n): + if bits_remaining < width: + buff = rng.next_uint64(rng.state) + random_values[i] = buff & mask + buff >>= width + + randoms = np.asarray(random_values) + return randoms + +# cython example 3 +def uniforms_ex(bit_generator, Py_ssize_t n, dtype=np.float64): + """ + Create an array of `n` uniformly distributed doubles via a "fill" function. + + A 'real' distribution would want to process the values into + some non-uniform distribution + + Parameters + ---------- + bit_generator: BitGenerator instance + n: int + Output vector length + dtype: {str, dtype}, optional + Desired dtype, either 'd' (or 'float64') or 'f' (or 'float32'). The + default dtype value is 'd' + """ + cdef Py_ssize_t i + cdef bitgen_t *rng + cdef const char *capsule_name = "BitGenerator" + cdef np.ndarray randoms + + capsule = bit_generator.capsule + # Optional check that the capsule if from a BitGenerator + if not PyCapsule_IsValid(capsule, capsule_name): + raise ValueError("Invalid pointer to anon_func_state") + # Cast the pointer + rng = PyCapsule_GetPointer(capsule, capsule_name) + + _dtype = np.dtype(dtype) + randoms = np.empty(n, dtype=_dtype) + if _dtype == np.float32: + with bit_generator.lock: + random_standard_uniform_fill_f(rng, n, np.PyArray_DATA(randoms)) + elif _dtype == np.float64: + with bit_generator.lock: + random_standard_uniform_fill(rng, n, np.PyArray_DATA(randoms)) + else: + raise TypeError('Unsupported dtype %r for random' % _dtype) + return randoms + diff --git a/.venv/lib/python3.12/site-packages/numpy/random/_examples/cython/meson.build b/.venv/lib/python3.12/site-packages/numpy/random/_examples/cython/meson.build new file mode 100644 index 0000000..7aa367d --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/random/_examples/cython/meson.build @@ -0,0 +1,53 @@ +project('random-build-examples', 'c', 'cpp', 'cython') + +py_mod = import('python') +py3 = py_mod.find_installation(pure: false) + +cc = meson.get_compiler('c') +cy = meson.get_compiler('cython') + +# Keep synced with pyproject.toml +if not cy.version().version_compare('>=3.0.6') + error('tests requires Cython >= 3.0.6') +endif + +base_cython_args = [] +if cy.version().version_compare('>=3.1.0') + base_cython_args += ['-Xfreethreading_compatible=True'] +endif + +_numpy_abs = run_command(py3, ['-c', + 'import os; os.chdir(".."); import numpy; print(os.path.abspath(numpy.get_include() + "../../.."))'], + check: true).stdout().strip() + +npymath_path = _numpy_abs / '_core' / 'lib' +npy_include_path = _numpy_abs / '_core' / 'include' +npyrandom_path = _numpy_abs / 'random' / 'lib' +npymath_lib = cc.find_library('npymath', dirs: npymath_path) +npyrandom_lib = cc.find_library('npyrandom', dirs: npyrandom_path) + +py3.extension_module( + 'extending_distributions', + 'extending_distributions.pyx', + install: false, + include_directories: [npy_include_path], + dependencies: [npyrandom_lib, npymath_lib], + cython_args: base_cython_args, +) +py3.extension_module( + 'extending', + 'extending.pyx', + install: false, + include_directories: [npy_include_path], + dependencies: [npyrandom_lib, npymath_lib], + cython_args: base_cython_args, +) +py3.extension_module( + 'extending_cpp', + 'extending_distributions.pyx', + install: false, + override_options : ['cython_language=cpp'], + cython_args: base_cython_args + ['--module-name', 'extending_cpp'], + include_directories: [npy_include_path], + dependencies: [npyrandom_lib, npymath_lib], +) diff --git a/.venv/lib/python3.12/site-packages/numpy/random/_examples/numba/__pycache__/extending.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/random/_examples/numba/__pycache__/extending.cpython-312.pyc new file mode 100644 index 0000000..5880d0a Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/random/_examples/numba/__pycache__/extending.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/random/_examples/numba/__pycache__/extending_distributions.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/random/_examples/numba/__pycache__/extending_distributions.cpython-312.pyc new file mode 100644 index 0000000..dc9e447 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/random/_examples/numba/__pycache__/extending_distributions.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/random/_examples/numba/extending.py b/.venv/lib/python3.12/site-packages/numpy/random/_examples/numba/extending.py new file mode 100644 index 0000000..c1d0f4f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/random/_examples/numba/extending.py @@ -0,0 +1,86 @@ +from timeit import timeit + +import numba as nb + +import numpy as np +from numpy.random import PCG64 + +bit_gen = PCG64() +next_d = bit_gen.cffi.next_double +state_addr = bit_gen.cffi.state_address + +def normals(n, state): + out = np.empty(n) + for i in range((n + 1) // 2): + x1 = 2.0 * next_d(state) - 1.0 + x2 = 2.0 * next_d(state) - 1.0 + r2 = x1 * x1 + x2 * x2 + while r2 >= 1.0 or r2 == 0.0: + x1 = 2.0 * next_d(state) - 1.0 + x2 = 2.0 * next_d(state) - 1.0 + r2 = x1 * x1 + x2 * x2 + f = np.sqrt(-2.0 * np.log(r2) / r2) + out[2 * i] = f * x1 + if 2 * i + 1 < n: + out[2 * i + 1] = f * x2 + return out + + +# Compile using Numba +normalsj = nb.jit(normals, nopython=True) +# Must use state address not state with numba +n = 10000 + +def numbacall(): + return normalsj(n, state_addr) + + +rg = np.random.Generator(PCG64()) + +def numpycall(): + return rg.normal(size=n) + + +# Check that the functions work +r1 = numbacall() +r2 = numpycall() +assert r1.shape == (n,) +assert r1.shape == r2.shape + +t1 = timeit(numbacall, number=1000) +print(f'{t1:.2f} secs for {n} PCG64 (Numba/PCG64) gaussian randoms') +t2 = timeit(numpycall, number=1000) +print(f'{t2:.2f} secs for {n} PCG64 (NumPy/PCG64) gaussian randoms') + +# example 2 + +next_u32 = bit_gen.ctypes.next_uint32 +ctypes_state = bit_gen.ctypes.state + +@nb.jit(nopython=True) +def bounded_uint(lb, ub, state): + mask = delta = ub - lb + mask |= mask >> 1 + mask |= mask >> 2 + mask |= mask >> 4 + mask |= mask >> 8 + mask |= mask >> 16 + + val = next_u32(state) & mask + while val > delta: + val = next_u32(state) & mask + + return lb + val + + +print(bounded_uint(323, 2394691, ctypes_state.value)) + + +@nb.jit(nopython=True) +def bounded_uints(lb, ub, n, state): + out = np.empty(n, dtype=np.uint32) + for i in range(n): + out[i] = bounded_uint(lb, ub, state) + + +bounded_uints(323, 2394691, 10000000, ctypes_state.value) diff --git a/.venv/lib/python3.12/site-packages/numpy/random/_examples/numba/extending_distributions.py b/.venv/lib/python3.12/site-packages/numpy/random/_examples/numba/extending_distributions.py new file mode 100644 index 0000000..d0462e7 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/random/_examples/numba/extending_distributions.py @@ -0,0 +1,67 @@ +r""" +Building the required library in this example requires a source distribution +of NumPy or clone of the NumPy git repository since distributions.c is not +included in binary distributions. + +On *nix, execute in numpy/random/src/distributions + +export ${PYTHON_VERSION}=3.8 # Python version +export PYTHON_INCLUDE=#path to Python's include folder, usually \ + ${PYTHON_HOME}/include/python${PYTHON_VERSION}m +export NUMPY_INCLUDE=#path to numpy's include folder, usually \ + ${PYTHON_HOME}/lib/python${PYTHON_VERSION}/site-packages/numpy/_core/include +gcc -shared -o libdistributions.so -fPIC distributions.c \ + -I${NUMPY_INCLUDE} -I${PYTHON_INCLUDE} +mv libdistributions.so ../../_examples/numba/ + +On Windows + +rem PYTHON_HOME and PYTHON_VERSION are setup dependent, this is an example +set PYTHON_HOME=c:\Anaconda +set PYTHON_VERSION=38 +cl.exe /LD .\distributions.c -DDLL_EXPORT \ + -I%PYTHON_HOME%\lib\site-packages\numpy\_core\include \ + -I%PYTHON_HOME%\include %PYTHON_HOME%\libs\python%PYTHON_VERSION%.lib +move distributions.dll ../../_examples/numba/ +""" +import os + +import numba as nb +from cffi import FFI + +import numpy as np +from numpy.random import PCG64 + +ffi = FFI() +if os.path.exists('./distributions.dll'): + lib = ffi.dlopen('./distributions.dll') +elif os.path.exists('./libdistributions.so'): + lib = ffi.dlopen('./libdistributions.so') +else: + raise RuntimeError('Required DLL/so file was not found.') + +ffi.cdef(""" +double random_standard_normal(void *bitgen_state); +""") +x = PCG64() +xffi = x.cffi +bit_generator = xffi.bit_generator + +random_standard_normal = lib.random_standard_normal + + +def normals(n, bit_generator): + out = np.empty(n) + for i in range(n): + out[i] = random_standard_normal(bit_generator) + return out + + +normalsj = nb.jit(normals, nopython=True) + +# Numba requires a memory address for void * +# Can also get address from x.ctypes.bit_generator.value +bit_generator_address = int(ffi.cast('uintptr_t', bit_generator)) + +norm = normalsj(1000, bit_generator_address) +print(norm[:12]) diff --git a/.venv/lib/python3.12/site-packages/numpy/random/_generator.cpython-312-x86_64-linux-gnu.so b/.venv/lib/python3.12/site-packages/numpy/random/_generator.cpython-312-x86_64-linux-gnu.so new file mode 100755 index 0000000..a7650b5 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/random/_generator.cpython-312-x86_64-linux-gnu.so differ diff --git a/.venv/lib/python3.12/site-packages/numpy/random/_generator.pyi b/.venv/lib/python3.12/site-packages/numpy/random/_generator.pyi new file mode 100644 index 0000000..dc78a76 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/random/_generator.pyi @@ -0,0 +1,856 @@ +from collections.abc import Callable +from typing import Any, Literal, TypeAlias, TypeVar, overload + +import numpy as np +from numpy import dtype, float32, float64, int64 +from numpy._typing import ( + ArrayLike, + DTypeLike, + NDArray, + _ArrayLikeFloat_co, + _ArrayLikeInt_co, + _BoolCodes, + _DoubleCodes, + _DTypeLike, + _DTypeLikeBool, + _Float32Codes, + _Float64Codes, + _FloatLike_co, + _Int8Codes, + _Int16Codes, + _Int32Codes, + _Int64Codes, + _IntPCodes, + _ShapeLike, + _SingleCodes, + _SupportsDType, + _UInt8Codes, + _UInt16Codes, + _UInt32Codes, + _UInt64Codes, + _UIntPCodes, +) +from numpy.random import BitGenerator, RandomState, SeedSequence + +_IntegerT = TypeVar("_IntegerT", bound=np.integer) + +_DTypeLikeFloat32: TypeAlias = ( + dtype[float32] + | _SupportsDType[dtype[float32]] + | type[float32] + | _Float32Codes + | _SingleCodes +) + +_DTypeLikeFloat64: TypeAlias = ( + dtype[float64] + | _SupportsDType[dtype[float64]] + | type[float] + | type[float64] + | _Float64Codes + | _DoubleCodes +) + +class Generator: + def __init__(self, bit_generator: BitGenerator) -> None: ... + def __repr__(self) -> str: ... + def __str__(self) -> str: ... + def __getstate__(self) -> None: ... + def __setstate__(self, state: dict[str, Any] | None) -> None: ... + def __reduce__(self) -> tuple[ + Callable[[BitGenerator], Generator], + tuple[BitGenerator], + None]: ... + @property + def bit_generator(self) -> BitGenerator: ... + def spawn(self, n_children: int) -> list[Generator]: ... + def bytes(self, length: int) -> bytes: ... + @overload + def standard_normal( # type: ignore[misc] + self, + size: None = ..., + dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ..., + out: None = ..., + ) -> float: ... + @overload + def standard_normal( # type: ignore[misc] + self, + size: _ShapeLike = ..., + ) -> NDArray[float64]: ... + @overload + def standard_normal( # type: ignore[misc] + self, + *, + out: NDArray[float64] = ..., + ) -> NDArray[float64]: ... + @overload + def standard_normal( # type: ignore[misc] + self, + size: _ShapeLike = ..., + dtype: _DTypeLikeFloat32 = ..., + out: NDArray[float32] | None = ..., + ) -> NDArray[float32]: ... + @overload + def standard_normal( # type: ignore[misc] + self, + size: _ShapeLike = ..., + dtype: _DTypeLikeFloat64 = ..., + out: NDArray[float64] | None = ..., + ) -> NDArray[float64]: ... + @overload + def permutation(self, x: int, axis: int = ...) -> NDArray[int64]: ... + @overload + def permutation(self, x: ArrayLike, axis: int = ...) -> NDArray[Any]: ... + @overload + def standard_exponential( # type: ignore[misc] + self, + size: None = ..., + dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ..., + method: Literal["zig", "inv"] = ..., + out: None = ..., + ) -> float: ... + @overload + def standard_exponential( + self, + size: _ShapeLike = ..., + ) -> NDArray[float64]: ... + @overload + def standard_exponential( + self, + *, + out: NDArray[float64] = ..., + ) -> NDArray[float64]: ... + @overload + def standard_exponential( + self, + size: _ShapeLike = ..., + *, + method: Literal["zig", "inv"] = ..., + out: NDArray[float64] | None = ..., + ) -> NDArray[float64]: ... + @overload + def standard_exponential( + self, + size: _ShapeLike = ..., + dtype: _DTypeLikeFloat32 = ..., + method: Literal["zig", "inv"] = ..., + out: NDArray[float32] | None = ..., + ) -> NDArray[float32]: ... + @overload + def standard_exponential( + self, + size: _ShapeLike = ..., + dtype: _DTypeLikeFloat64 = ..., + method: Literal["zig", "inv"] = ..., + out: NDArray[float64] | None = ..., + ) -> NDArray[float64]: ... + @overload + def random( # type: ignore[misc] + self, + size: None = ..., + dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ..., + out: None = ..., + ) -> float: ... + @overload + def random( + self, + *, + out: NDArray[float64] = ..., + ) -> NDArray[float64]: ... + @overload + def random( + self, + size: _ShapeLike = ..., + *, + out: NDArray[float64] | None = ..., + ) -> NDArray[float64]: ... + @overload + def random( + self, + size: _ShapeLike = ..., + dtype: _DTypeLikeFloat32 = ..., + out: NDArray[float32] | None = ..., + ) -> NDArray[float32]: ... + @overload + def random( + self, + size: _ShapeLike = ..., + dtype: _DTypeLikeFloat64 = ..., + out: NDArray[float64] | None = ..., + ) -> NDArray[float64]: ... + @overload + def beta( + self, + a: _FloatLike_co, + b: _FloatLike_co, + size: None = ..., + ) -> float: ... # type: ignore[misc] + @overload + def beta( + self, + a: _ArrayLikeFloat_co, + b: _ArrayLikeFloat_co, + size: _ShapeLike | None = ... + ) -> NDArray[float64]: ... + @overload + def exponential(self, scale: _FloatLike_co = ..., size: None = ...) -> float: ... # type: ignore[misc] + @overload + def exponential(self, scale: _ArrayLikeFloat_co = ..., size: _ShapeLike | None = ...) -> NDArray[float64]: ... + + # + @overload + def integers( + self, + low: int, + high: int | None = None, + size: None = None, + dtype: _DTypeLike[np.int64] | _Int64Codes = ..., + endpoint: bool = False, + ) -> np.int64: ... + @overload + def integers( + self, + low: int, + high: int | None = None, + size: None = None, + *, + dtype: type[bool], + endpoint: bool = False, + ) -> bool: ... + @overload + def integers( + self, + low: int, + high: int | None = None, + size: None = None, + *, + dtype: type[int], + endpoint: bool = False, + ) -> int: ... + @overload + def integers( + self, + low: int, + high: int | None = None, + size: None = None, + *, + dtype: _DTypeLike[np.bool] | _BoolCodes, + endpoint: bool = False, + ) -> np.bool: ... + @overload + def integers( + self, + low: int, + high: int | None = None, + size: None = None, + *, + dtype: _DTypeLike[_IntegerT], + endpoint: bool = False, + ) -> _IntegerT: ... + @overload + def integers( + self, + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + dtype: _DTypeLike[np.int64] | _Int64Codes = ..., + endpoint: bool = False, + ) -> NDArray[np.int64]: ... + @overload + def integers( + self, + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + *, + dtype: _DTypeLikeBool, + endpoint: bool = False, + ) -> NDArray[np.bool]: ... + @overload + def integers( + self, + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + *, + dtype: _DTypeLike[_IntegerT], + endpoint: bool = False, + ) -> NDArray[_IntegerT]: ... + @overload + def integers( + self, + low: int, + high: int | None = None, + size: None = None, + *, + dtype: _Int8Codes, + endpoint: bool = False, + ) -> np.int8: ... + @overload + def integers( + self, + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + *, + dtype: _Int8Codes, + endpoint: bool = False, + ) -> NDArray[np.int8]: ... + @overload + def integers( + self, + low: int, + high: int | None = None, + size: None = None, + *, + dtype: _UInt8Codes, + endpoint: bool = False, + ) -> np.uint8: ... + @overload + def integers( + self, + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + *, + dtype: _UInt8Codes, + endpoint: bool = False, + ) -> NDArray[np.uint8]: ... + @overload + def integers( + self, + low: int, + high: int | None = None, + size: None = None, + *, + dtype: _Int16Codes, + endpoint: bool = False, + ) -> np.int16: ... + @overload + def integers( + self, + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + *, + dtype: _Int16Codes, + endpoint: bool = False, + ) -> NDArray[np.int16]: ... + @overload + def integers( + self, + low: int, + high: int | None = None, + size: None = None, + *, + dtype: _UInt16Codes, + endpoint: bool = False, + ) -> np.uint16: ... + @overload + def integers( + self, + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + *, + dtype: _UInt16Codes, + endpoint: bool = False, + ) -> NDArray[np.uint16]: ... + @overload + def integers( + self, + low: int, + high: int | None = None, + size: None = None, + *, + dtype: _Int32Codes, + endpoint: bool = False, + ) -> np.int32: ... + @overload + def integers( + self, + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + *, + dtype: _Int32Codes, + endpoint: bool = False, + ) -> NDArray[np.int32]: ... + @overload + def integers( + self, + low: int, + high: int | None = None, + size: None = None, + *, + dtype: _UInt32Codes, + endpoint: bool = False, + ) -> np.uint32: ... + @overload + def integers( + self, + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + *, + dtype: _UInt32Codes, + endpoint: bool = False, + ) -> NDArray[np.uint32]: ... + @overload + def integers( + self, + low: int, + high: int | None = None, + size: None = None, + *, + dtype: _UInt64Codes, + endpoint: bool = False, + ) -> np.uint64: ... + @overload + def integers( + self, + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + *, + dtype: _UInt64Codes, + endpoint: bool = False, + ) -> NDArray[np.uint64]: ... + @overload + def integers( + self, + low: int, + high: int | None = None, + size: None = None, + *, + dtype: _IntPCodes, + endpoint: bool = False, + ) -> np.intp: ... + @overload + def integers( + self, + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + *, + dtype: _IntPCodes, + endpoint: bool = False, + ) -> NDArray[np.intp]: ... + @overload + def integers( + self, + low: int, + high: int | None = None, + size: None = None, + *, + dtype: _UIntPCodes, + endpoint: bool = False, + ) -> np.uintp: ... + @overload + def integers( + self, + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + *, + dtype: _UIntPCodes, + endpoint: bool = False, + ) -> NDArray[np.uintp]: ... + @overload + def integers( + self, + low: int, + high: int | None = None, + size: None = None, + dtype: DTypeLike = ..., + endpoint: bool = False, + ) -> Any: ... + @overload + def integers( + self, + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + dtype: DTypeLike = ..., + endpoint: bool = False, + ) -> NDArray[Any]: ... + + # TODO: Use a TypeVar _T here to get away from Any output? + # Should be int->NDArray[int64], ArrayLike[_T] -> _T | NDArray[Any] + @overload + def choice( + self, + a: int, + size: None = ..., + replace: bool = ..., + p: _ArrayLikeFloat_co | None = ..., + axis: int = ..., + shuffle: bool = ..., + ) -> int: ... + @overload + def choice( + self, + a: int, + size: _ShapeLike = ..., + replace: bool = ..., + p: _ArrayLikeFloat_co | None = ..., + axis: int = ..., + shuffle: bool = ..., + ) -> NDArray[int64]: ... + @overload + def choice( + self, + a: ArrayLike, + size: None = ..., + replace: bool = ..., + p: _ArrayLikeFloat_co | None = ..., + axis: int = ..., + shuffle: bool = ..., + ) -> Any: ... + @overload + def choice( + self, + a: ArrayLike, + size: _ShapeLike = ..., + replace: bool = ..., + p: _ArrayLikeFloat_co | None = ..., + axis: int = ..., + shuffle: bool = ..., + ) -> NDArray[Any]: ... + @overload + def uniform( + self, + low: _FloatLike_co = ..., + high: _FloatLike_co = ..., + size: None = ..., + ) -> float: ... # type: ignore[misc] + @overload + def uniform( + self, + low: _ArrayLikeFloat_co = ..., + high: _ArrayLikeFloat_co = ..., + size: _ShapeLike | None = ..., + ) -> NDArray[float64]: ... + @overload + def normal( + self, + loc: _FloatLike_co = ..., + scale: _FloatLike_co = ..., + size: None = ..., + ) -> float: ... # type: ignore[misc] + @overload + def normal( + self, + loc: _ArrayLikeFloat_co = ..., + scale: _ArrayLikeFloat_co = ..., + size: _ShapeLike | None = ..., + ) -> NDArray[float64]: ... + @overload + def standard_gamma( # type: ignore[misc] + self, + shape: _FloatLike_co, + size: None = ..., + dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ..., + out: None = ..., + ) -> float: ... + @overload + def standard_gamma( + self, + shape: _ArrayLikeFloat_co, + size: _ShapeLike | None = ..., + ) -> NDArray[float64]: ... + @overload + def standard_gamma( + self, + shape: _ArrayLikeFloat_co, + *, + out: NDArray[float64] = ..., + ) -> NDArray[float64]: ... + @overload + def standard_gamma( + self, + shape: _ArrayLikeFloat_co, + size: _ShapeLike | None = ..., + dtype: _DTypeLikeFloat32 = ..., + out: NDArray[float32] | None = ..., + ) -> NDArray[float32]: ... + @overload + def standard_gamma( + self, + shape: _ArrayLikeFloat_co, + size: _ShapeLike | None = ..., + dtype: _DTypeLikeFloat64 = ..., + out: NDArray[float64] | None = ..., + ) -> NDArray[float64]: ... + @overload + def gamma( + self, shape: _FloatLike_co, scale: _FloatLike_co = ..., size: None = ... + ) -> float: ... # type: ignore[misc] + @overload + def gamma( + self, + shape: _ArrayLikeFloat_co, + scale: _ArrayLikeFloat_co = ..., + size: _ShapeLike | None = ..., + ) -> NDArray[float64]: ... + @overload + def f( + self, dfnum: _FloatLike_co, dfden: _FloatLike_co, size: None = ... + ) -> float: ... # type: ignore[misc] + @overload + def f( + self, + dfnum: _ArrayLikeFloat_co, + dfden: _ArrayLikeFloat_co, + size: _ShapeLike | None = ... + ) -> NDArray[float64]: ... + @overload + def noncentral_f( + self, + dfnum: _FloatLike_co, + dfden: _FloatLike_co, + nonc: _FloatLike_co, size: None = ... + ) -> float: ... # type: ignore[misc] + @overload + def noncentral_f( + self, + dfnum: _ArrayLikeFloat_co, + dfden: _ArrayLikeFloat_co, + nonc: _ArrayLikeFloat_co, + size: _ShapeLike | None = ..., + ) -> NDArray[float64]: ... + @overload + def chisquare(self, df: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def chisquare( + self, df: _ArrayLikeFloat_co, size: _ShapeLike | None = ... + ) -> NDArray[float64]: ... + @overload + def noncentral_chisquare( + self, df: _FloatLike_co, nonc: _FloatLike_co, size: None = ... + ) -> float: ... # type: ignore[misc] + @overload + def noncentral_chisquare( + self, + df: _ArrayLikeFloat_co, + nonc: _ArrayLikeFloat_co, + size: _ShapeLike | None = ... + ) -> NDArray[float64]: ... + @overload + def standard_t(self, df: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def standard_t( + self, df: _ArrayLikeFloat_co, size: None = ... + ) -> NDArray[float64]: ... + @overload + def standard_t( + self, df: _ArrayLikeFloat_co, size: _ShapeLike = ... + ) -> NDArray[float64]: ... + @overload + def vonmises( + self, mu: _FloatLike_co, kappa: _FloatLike_co, size: None = ... + ) -> float: ... # type: ignore[misc] + @overload + def vonmises( + self, + mu: _ArrayLikeFloat_co, + kappa: _ArrayLikeFloat_co, + size: _ShapeLike | None = ... + ) -> NDArray[float64]: ... + @overload + def pareto(self, a: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def pareto( + self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = ... + ) -> NDArray[float64]: ... + @overload + def weibull(self, a: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def weibull( + self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = ... + ) -> NDArray[float64]: ... + @overload + def power(self, a: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def power( + self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = ... + ) -> NDArray[float64]: ... + @overload + def standard_cauchy(self, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def standard_cauchy(self, size: _ShapeLike = ...) -> NDArray[float64]: ... + @overload + def laplace( + self, + loc: _FloatLike_co = ..., + scale: _FloatLike_co = ..., + size: None = ..., + ) -> float: ... # type: ignore[misc] + @overload + def laplace( + self, + loc: _ArrayLikeFloat_co = ..., + scale: _ArrayLikeFloat_co = ..., + size: _ShapeLike | None = ..., + ) -> NDArray[float64]: ... + @overload + def gumbel( + self, + loc: _FloatLike_co = ..., + scale: _FloatLike_co = ..., + size: None = ..., + ) -> float: ... # type: ignore[misc] + @overload + def gumbel( + self, + loc: _ArrayLikeFloat_co = ..., + scale: _ArrayLikeFloat_co = ..., + size: _ShapeLike | None = ..., + ) -> NDArray[float64]: ... + @overload + def logistic( + self, + loc: _FloatLike_co = ..., + scale: _FloatLike_co = ..., + size: None = ..., + ) -> float: ... # type: ignore[misc] + @overload + def logistic( + self, + loc: _ArrayLikeFloat_co = ..., + scale: _ArrayLikeFloat_co = ..., + size: _ShapeLike | None = ..., + ) -> NDArray[float64]: ... + @overload + def lognormal( + self, + mean: _FloatLike_co = ..., + sigma: _FloatLike_co = ..., + size: None = ..., + ) -> float: ... # type: ignore[misc] + @overload + def lognormal( + self, + mean: _ArrayLikeFloat_co = ..., + sigma: _ArrayLikeFloat_co = ..., + size: _ShapeLike | None = ..., + ) -> NDArray[float64]: ... + @overload + def rayleigh(self, scale: _FloatLike_co = ..., size: None = ...) -> float: ... # type: ignore[misc] + @overload + def rayleigh( + self, scale: _ArrayLikeFloat_co = ..., size: _ShapeLike | None = ... + ) -> NDArray[float64]: ... + @overload + def wald( + self, mean: _FloatLike_co, scale: _FloatLike_co, size: None = ... + ) -> float: ... # type: ignore[misc] + @overload + def wald( + self, + mean: _ArrayLikeFloat_co, + scale: _ArrayLikeFloat_co, + size: _ShapeLike | None = ... + ) -> NDArray[float64]: ... + @overload + def triangular( + self, + left: _FloatLike_co, + mode: _FloatLike_co, + right: _FloatLike_co, + size: None = ..., + ) -> float: ... # type: ignore[misc] + @overload + def triangular( + self, + left: _ArrayLikeFloat_co, + mode: _ArrayLikeFloat_co, + right: _ArrayLikeFloat_co, + size: _ShapeLike | None = ..., + ) -> NDArray[float64]: ... + @overload + def binomial(self, n: int, p: _FloatLike_co, size: None = ...) -> int: ... # type: ignore[misc] + @overload + def binomial( + self, n: _ArrayLikeInt_co, p: _ArrayLikeFloat_co, size: _ShapeLike | None = ... + ) -> NDArray[int64]: ... + @overload + def negative_binomial( + self, n: _FloatLike_co, p: _FloatLike_co, size: None = ... + ) -> int: ... # type: ignore[misc] + @overload + def negative_binomial( + self, + n: _ArrayLikeFloat_co, + p: _ArrayLikeFloat_co, + size: _ShapeLike | None = ... + ) -> NDArray[int64]: ... + @overload + def poisson(self, lam: _FloatLike_co = ..., size: None = ...) -> int: ... # type: ignore[misc] + @overload + def poisson( + self, lam: _ArrayLikeFloat_co = ..., size: _ShapeLike | None = ... + ) -> NDArray[int64]: ... + @overload + def zipf(self, a: _FloatLike_co, size: None = ...) -> int: ... # type: ignore[misc] + @overload + def zipf( + self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = ... + ) -> NDArray[int64]: ... + @overload + def geometric(self, p: _FloatLike_co, size: None = ...) -> int: ... # type: ignore[misc] + @overload + def geometric( + self, p: _ArrayLikeFloat_co, size: _ShapeLike | None = ... + ) -> NDArray[int64]: ... + @overload + def hypergeometric( + self, ngood: int, nbad: int, nsample: int, size: None = ... + ) -> int: ... # type: ignore[misc] + @overload + def hypergeometric( + self, + ngood: _ArrayLikeInt_co, + nbad: _ArrayLikeInt_co, + nsample: _ArrayLikeInt_co, + size: _ShapeLike | None = ..., + ) -> NDArray[int64]: ... + @overload + def logseries(self, p: _FloatLike_co, size: None = ...) -> int: ... # type: ignore[misc] + @overload + def logseries( + self, p: _ArrayLikeFloat_co, size: _ShapeLike | None = ... + ) -> NDArray[int64]: ... + def multivariate_normal( + self, + mean: _ArrayLikeFloat_co, + cov: _ArrayLikeFloat_co, + size: _ShapeLike | None = ..., + check_valid: Literal["warn", "raise", "ignore"] = ..., + tol: float = ..., + *, + method: Literal["svd", "eigh", "cholesky"] = ..., + ) -> NDArray[float64]: ... + def multinomial( + self, n: _ArrayLikeInt_co, + pvals: _ArrayLikeFloat_co, + size: _ShapeLike | None = ... + ) -> NDArray[int64]: ... + def multivariate_hypergeometric( + self, + colors: _ArrayLikeInt_co, + nsample: int, + size: _ShapeLike | None = ..., + method: Literal["marginals", "count"] = ..., + ) -> NDArray[int64]: ... + def dirichlet( + self, alpha: _ArrayLikeFloat_co, size: _ShapeLike | None = ... + ) -> NDArray[float64]: ... + def permuted( + self, x: ArrayLike, *, axis: int | None = ..., out: NDArray[Any] | None = ... + ) -> NDArray[Any]: ... + def shuffle(self, x: ArrayLike, axis: int = ...) -> None: ... + +def default_rng( + seed: _ArrayLikeInt_co | SeedSequence | BitGenerator | Generator | RandomState | None = ... +) -> Generator: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/random/_mt19937.cpython-312-x86_64-linux-gnu.so b/.venv/lib/python3.12/site-packages/numpy/random/_mt19937.cpython-312-x86_64-linux-gnu.so new file mode 100755 index 0000000..7435865 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/random/_mt19937.cpython-312-x86_64-linux-gnu.so differ diff --git a/.venv/lib/python3.12/site-packages/numpy/random/_mt19937.pyi b/.venv/lib/python3.12/site-packages/numpy/random/_mt19937.pyi new file mode 100644 index 0000000..70b2506 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/random/_mt19937.pyi @@ -0,0 +1,25 @@ +from typing import TypedDict, type_check_only + +from numpy import uint32 +from numpy._typing import _ArrayLikeInt_co +from numpy.random.bit_generator import BitGenerator, SeedSequence +from numpy.typing import NDArray + +@type_check_only +class _MT19937Internal(TypedDict): + key: NDArray[uint32] + pos: int + +@type_check_only +class _MT19937State(TypedDict): + bit_generator: str + state: _MT19937Internal + +class MT19937(BitGenerator): + def __init__(self, seed: _ArrayLikeInt_co | SeedSequence | None = ...) -> None: ... + def _legacy_seeding(self, seed: _ArrayLikeInt_co) -> None: ... + def jumped(self, jumps: int = ...) -> MT19937: ... + @property + def state(self) -> _MT19937State: ... + @state.setter + def state(self, value: _MT19937State) -> None: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/random/_pcg64.cpython-312-x86_64-linux-gnu.so b/.venv/lib/python3.12/site-packages/numpy/random/_pcg64.cpython-312-x86_64-linux-gnu.so new file mode 100755 index 0000000..26ec05b Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/random/_pcg64.cpython-312-x86_64-linux-gnu.so differ diff --git a/.venv/lib/python3.12/site-packages/numpy/random/_pcg64.pyi b/.venv/lib/python3.12/site-packages/numpy/random/_pcg64.pyi new file mode 100644 index 0000000..5dc7bb6 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/random/_pcg64.pyi @@ -0,0 +1,44 @@ +from typing import TypedDict, type_check_only + +from numpy._typing import _ArrayLikeInt_co +from numpy.random.bit_generator import BitGenerator, SeedSequence + +@type_check_only +class _PCG64Internal(TypedDict): + state: int + inc: int + +@type_check_only +class _PCG64State(TypedDict): + bit_generator: str + state: _PCG64Internal + has_uint32: int + uinteger: int + +class PCG64(BitGenerator): + def __init__(self, seed: _ArrayLikeInt_co | SeedSequence | None = ...) -> None: ... + def jumped(self, jumps: int = ...) -> PCG64: ... + @property + def state( + self, + ) -> _PCG64State: ... + @state.setter + def state( + self, + value: _PCG64State, + ) -> None: ... + def advance(self, delta: int) -> PCG64: ... + +class PCG64DXSM(BitGenerator): + def __init__(self, seed: _ArrayLikeInt_co | SeedSequence | None = ...) -> None: ... + def jumped(self, jumps: int = ...) -> PCG64DXSM: ... + @property + def state( + self, + ) -> _PCG64State: ... + @state.setter + def state( + self, + value: _PCG64State, + ) -> None: ... + def advance(self, delta: int) -> PCG64DXSM: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/random/_philox.cpython-312-x86_64-linux-gnu.so b/.venv/lib/python3.12/site-packages/numpy/random/_philox.cpython-312-x86_64-linux-gnu.so new file mode 100755 index 0000000..2cb63d5 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/random/_philox.cpython-312-x86_64-linux-gnu.so differ diff --git a/.venv/lib/python3.12/site-packages/numpy/random/_philox.pyi b/.venv/lib/python3.12/site-packages/numpy/random/_philox.pyi new file mode 100644 index 0000000..d8895bb --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/random/_philox.pyi @@ -0,0 +1,39 @@ +from typing import TypedDict, type_check_only + +from numpy import uint64 +from numpy._typing import _ArrayLikeInt_co +from numpy.random.bit_generator import BitGenerator, SeedSequence +from numpy.typing import NDArray + +@type_check_only +class _PhiloxInternal(TypedDict): + counter: NDArray[uint64] + key: NDArray[uint64] + +@type_check_only +class _PhiloxState(TypedDict): + bit_generator: str + state: _PhiloxInternal + buffer: NDArray[uint64] + buffer_pos: int + has_uint32: int + uinteger: int + +class Philox(BitGenerator): + def __init__( + self, + seed: _ArrayLikeInt_co | SeedSequence | None = ..., + counter: _ArrayLikeInt_co | None = ..., + key: _ArrayLikeInt_co | None = ..., + ) -> None: ... + @property + def state( + self, + ) -> _PhiloxState: ... + @state.setter + def state( + self, + value: _PhiloxState, + ) -> None: ... + def jumped(self, jumps: int = ...) -> Philox: ... + def advance(self, delta: int) -> Philox: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/random/_pickle.py b/.venv/lib/python3.12/site-packages/numpy/random/_pickle.py new file mode 100644 index 0000000..05f7232 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/random/_pickle.py @@ -0,0 +1,88 @@ +from ._generator import Generator +from ._mt19937 import MT19937 +from ._pcg64 import PCG64, PCG64DXSM +from ._philox import Philox +from ._sfc64 import SFC64 +from .bit_generator import BitGenerator +from .mtrand import RandomState + +BitGenerators = {'MT19937': MT19937, + 'PCG64': PCG64, + 'PCG64DXSM': PCG64DXSM, + 'Philox': Philox, + 'SFC64': SFC64, + } + + +def __bit_generator_ctor(bit_generator: str | type[BitGenerator] = 'MT19937'): + """ + Pickling helper function that returns a bit generator object + + Parameters + ---------- + bit_generator : type[BitGenerator] or str + BitGenerator class or string containing the name of the BitGenerator + + Returns + ------- + BitGenerator + BitGenerator instance + """ + if isinstance(bit_generator, type): + bit_gen_class = bit_generator + elif bit_generator in BitGenerators: + bit_gen_class = BitGenerators[bit_generator] + else: + raise ValueError( + str(bit_generator) + ' is not a known BitGenerator module.' + ) + + return bit_gen_class() + + +def __generator_ctor(bit_generator_name="MT19937", + bit_generator_ctor=__bit_generator_ctor): + """ + Pickling helper function that returns a Generator object + + Parameters + ---------- + bit_generator_name : str or BitGenerator + String containing the core BitGenerator's name or a + BitGenerator instance + bit_generator_ctor : callable, optional + Callable function that takes bit_generator_name as its only argument + and returns an instantized bit generator. + + Returns + ------- + rg : Generator + Generator using the named core BitGenerator + """ + if isinstance(bit_generator_name, BitGenerator): + return Generator(bit_generator_name) + # Legacy path that uses a bit generator name and ctor + return Generator(bit_generator_ctor(bit_generator_name)) + + +def __randomstate_ctor(bit_generator_name="MT19937", + bit_generator_ctor=__bit_generator_ctor): + """ + Pickling helper function that returns a legacy RandomState-like object + + Parameters + ---------- + bit_generator_name : str + String containing the core BitGenerator's name + bit_generator_ctor : callable, optional + Callable function that takes bit_generator_name as its only argument + and returns an instantized bit generator. + + Returns + ------- + rs : RandomState + Legacy RandomState using the named core BitGenerator + """ + if isinstance(bit_generator_name, BitGenerator): + return RandomState(bit_generator_name) + return RandomState(bit_generator_ctor(bit_generator_name)) diff --git a/.venv/lib/python3.12/site-packages/numpy/random/_pickle.pyi b/.venv/lib/python3.12/site-packages/numpy/random/_pickle.pyi new file mode 100644 index 0000000..b8b1b7b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/random/_pickle.pyi @@ -0,0 +1,43 @@ +from collections.abc import Callable +from typing import Final, Literal, TypedDict, TypeVar, overload, type_check_only + +from numpy.random._generator import Generator +from numpy.random._mt19937 import MT19937 +from numpy.random._pcg64 import PCG64, PCG64DXSM +from numpy.random._philox import Philox +from numpy.random._sfc64 import SFC64 +from numpy.random.bit_generator import BitGenerator +from numpy.random.mtrand import RandomState + +_T = TypeVar("_T", bound=BitGenerator) + +@type_check_only +class _BitGenerators(TypedDict): + MT19937: type[MT19937] + PCG64: type[PCG64] + PCG64DXSM: type[PCG64DXSM] + Philox: type[Philox] + SFC64: type[SFC64] + +BitGenerators: Final[_BitGenerators] = ... + +@overload +def __bit_generator_ctor(bit_generator: Literal["MT19937"] = "MT19937") -> MT19937: ... +@overload +def __bit_generator_ctor(bit_generator: Literal["PCG64"]) -> PCG64: ... +@overload +def __bit_generator_ctor(bit_generator: Literal["PCG64DXSM"]) -> PCG64DXSM: ... +@overload +def __bit_generator_ctor(bit_generator: Literal["Philox"]) -> Philox: ... +@overload +def __bit_generator_ctor(bit_generator: Literal["SFC64"]) -> SFC64: ... +@overload +def __bit_generator_ctor(bit_generator: type[_T]) -> _T: ... +def __generator_ctor( + bit_generator_name: str | type[BitGenerator] | BitGenerator = "MT19937", + bit_generator_ctor: Callable[[str | type[BitGenerator]], BitGenerator] = ..., +) -> Generator: ... +def __randomstate_ctor( + bit_generator_name: str | type[BitGenerator] | BitGenerator = "MT19937", + bit_generator_ctor: Callable[[str | type[BitGenerator]], BitGenerator] = ..., +) -> RandomState: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/random/_sfc64.cpython-312-x86_64-linux-gnu.so b/.venv/lib/python3.12/site-packages/numpy/random/_sfc64.cpython-312-x86_64-linux-gnu.so new file mode 100755 index 0000000..f52e574 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/random/_sfc64.cpython-312-x86_64-linux-gnu.so differ diff --git a/.venv/lib/python3.12/site-packages/numpy/random/_sfc64.pyi b/.venv/lib/python3.12/site-packages/numpy/random/_sfc64.pyi new file mode 100644 index 0000000..a6f0d84 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/random/_sfc64.pyi @@ -0,0 +1,28 @@ +from typing import TypedDict, type_check_only + +from numpy import uint64 +from numpy._typing import NDArray, _ArrayLikeInt_co +from numpy.random.bit_generator import BitGenerator, SeedSequence + +@type_check_only +class _SFC64Internal(TypedDict): + state: NDArray[uint64] + +@type_check_only +class _SFC64State(TypedDict): + bit_generator: str + state: _SFC64Internal + has_uint32: int + uinteger: int + +class SFC64(BitGenerator): + def __init__(self, seed: _ArrayLikeInt_co | SeedSequence | None = ...) -> None: ... + @property + def state( + self, + ) -> _SFC64State: ... + @state.setter + def state( + self, + value: _SFC64State, + ) -> None: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/random/bit_generator.cpython-312-x86_64-linux-gnu.so b/.venv/lib/python3.12/site-packages/numpy/random/bit_generator.cpython-312-x86_64-linux-gnu.so new file mode 100755 index 0000000..4be63b1 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/random/bit_generator.cpython-312-x86_64-linux-gnu.so differ diff --git a/.venv/lib/python3.12/site-packages/numpy/random/bit_generator.pxd b/.venv/lib/python3.12/site-packages/numpy/random/bit_generator.pxd new file mode 100644 index 0000000..dfa7d0a --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/random/bit_generator.pxd @@ -0,0 +1,35 @@ +cimport numpy as np +from libc.stdint cimport uint32_t, uint64_t + +cdef extern from "numpy/random/bitgen.h": + struct bitgen: + void *state + uint64_t (*next_uint64)(void *st) nogil + uint32_t (*next_uint32)(void *st) nogil + double (*next_double)(void *st) nogil + uint64_t (*next_raw)(void *st) nogil + + ctypedef bitgen bitgen_t + +cdef class BitGenerator(): + cdef readonly object _seed_seq + cdef readonly object lock + cdef bitgen_t _bitgen + cdef readonly object _ctypes + cdef readonly object _cffi + cdef readonly object capsule + + +cdef class SeedSequence(): + cdef readonly object entropy + cdef readonly tuple spawn_key + cdef readonly Py_ssize_t pool_size + cdef readonly object pool + cdef readonly uint32_t n_children_spawned + + cdef mix_entropy(self, np.ndarray[np.npy_uint32, ndim=1] mixer, + np.ndarray[np.npy_uint32, ndim=1] entropy_array) + cdef get_assembled_entropy(self) + +cdef class SeedlessSequence(): + pass diff --git a/.venv/lib/python3.12/site-packages/numpy/random/bit_generator.pyi b/.venv/lib/python3.12/site-packages/numpy/random/bit_generator.pyi new file mode 100644 index 0000000..6ce4f4b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/random/bit_generator.pyi @@ -0,0 +1,124 @@ +import abc +from collections.abc import Callable, Mapping, Sequence +from threading import Lock +from typing import ( + Any, + ClassVar, + Literal, + NamedTuple, + Self, + TypeAlias, + TypedDict, + overload, + type_check_only, +) + +from _typeshed import Incomplete +from typing_extensions import CapsuleType + +import numpy as np +from numpy._typing import ( + NDArray, + _ArrayLikeInt_co, + _DTypeLike, + _ShapeLike, + _UInt32Codes, + _UInt64Codes, +) + +__all__ = ["BitGenerator", "SeedSequence"] + +### + +_DTypeLikeUint_: TypeAlias = _DTypeLike[np.uint32 | np.uint64] | _UInt32Codes | _UInt64Codes + +@type_check_only +class _SeedSeqState(TypedDict): + entropy: int | Sequence[int] | None + spawn_key: tuple[int, ...] + pool_size: int + n_children_spawned: int + +@type_check_only +class _Interface(NamedTuple): + state_address: Incomplete + state: Incomplete + next_uint64: Incomplete + next_uint32: Incomplete + next_double: Incomplete + bit_generator: Incomplete + +@type_check_only +class _CythonMixin: + def __setstate_cython__(self, pyx_state: object, /) -> None: ... + def __reduce_cython__(self) -> Any: ... # noqa: ANN401 + +@type_check_only +class _GenerateStateMixin(_CythonMixin): + def generate_state(self, /, n_words: int, dtype: _DTypeLikeUint_ = ...) -> NDArray[np.uint32 | np.uint64]: ... + +### + +class ISeedSequence(abc.ABC): + @abc.abstractmethod + def generate_state(self, /, n_words: int, dtype: _DTypeLikeUint_ = ...) -> NDArray[np.uint32 | np.uint64]: ... + +class ISpawnableSeedSequence(ISeedSequence, abc.ABC): + @abc.abstractmethod + def spawn(self, /, n_children: int) -> list[Self]: ... + +class SeedlessSeedSequence(_GenerateStateMixin, ISpawnableSeedSequence): + def spawn(self, /, n_children: int) -> list[Self]: ... + +class SeedSequence(_GenerateStateMixin, ISpawnableSeedSequence): + __pyx_vtable__: ClassVar[CapsuleType] = ... + + entropy: int | Sequence[int] | None + spawn_key: tuple[int, ...] + pool_size: int + n_children_spawned: int + pool: NDArray[np.uint32] + + def __init__( + self, + /, + entropy: _ArrayLikeInt_co | None = None, + *, + spawn_key: Sequence[int] = (), + pool_size: int = 4, + n_children_spawned: int = ..., + ) -> None: ... + def spawn(self, /, n_children: int) -> list[Self]: ... + @property + def state(self) -> _SeedSeqState: ... + +class BitGenerator(_CythonMixin, abc.ABC): + lock: Lock + @property + def state(self) -> Mapping[str, Any]: ... + @state.setter + def state(self, value: Mapping[str, Any], /) -> None: ... + @property + def seed_seq(self) -> ISeedSequence: ... + @property + def ctypes(self) -> _Interface: ... + @property + def cffi(self) -> _Interface: ... + @property + def capsule(self) -> CapsuleType: ... + + # + def __init__(self, /, seed: _ArrayLikeInt_co | SeedSequence | None = None) -> None: ... + def __reduce__(self) -> tuple[Callable[[str], Self], tuple[str], tuple[Mapping[str, Any], ISeedSequence]]: ... + def spawn(self, /, n_children: int) -> list[Self]: ... + def _benchmark(self, /, cnt: int, method: str = "uint64") -> None: ... + + # + @overload + def random_raw(self, /, size: None = None, output: Literal[True] = True) -> int: ... + @overload + def random_raw(self, /, size: _ShapeLike, output: Literal[True] = True) -> NDArray[np.uint64]: ... + @overload + def random_raw(self, /, size: _ShapeLike | None, output: Literal[False]) -> None: ... + @overload + def random_raw(self, /, size: _ShapeLike | None = None, *, output: Literal[False]) -> None: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/random/c_distributions.pxd b/.venv/lib/python3.12/site-packages/numpy/random/c_distributions.pxd new file mode 100644 index 0000000..da790ca --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/random/c_distributions.pxd @@ -0,0 +1,119 @@ +#cython: wraparound=False, nonecheck=False, boundscheck=False, cdivision=True, language_level=3 +from numpy cimport npy_intp + +from libc.stdint cimport (uint64_t, int32_t, int64_t) +from numpy.random cimport bitgen_t + +cdef extern from "numpy/random/distributions.h": + + struct s_binomial_t: + int has_binomial + double psave + int64_t nsave + double r + double q + double fm + int64_t m + double p1 + double xm + double xl + double xr + double c + double laml + double lamr + double p2 + double p3 + double p4 + + ctypedef s_binomial_t binomial_t + + float random_standard_uniform_f(bitgen_t *bitgen_state) nogil + double random_standard_uniform(bitgen_t *bitgen_state) nogil + void random_standard_uniform_fill(bitgen_t* bitgen_state, npy_intp cnt, double *out) nogil + void random_standard_uniform_fill_f(bitgen_t *bitgen_state, npy_intp cnt, float *out) nogil + + double random_standard_exponential(bitgen_t *bitgen_state) nogil + float random_standard_exponential_f(bitgen_t *bitgen_state) nogil + void random_standard_exponential_fill(bitgen_t *bitgen_state, npy_intp cnt, double *out) nogil + void random_standard_exponential_fill_f(bitgen_t *bitgen_state, npy_intp cnt, float *out) nogil + void random_standard_exponential_inv_fill(bitgen_t *bitgen_state, npy_intp cnt, double *out) nogil + void random_standard_exponential_inv_fill_f(bitgen_t *bitgen_state, npy_intp cnt, float *out) nogil + + double random_standard_normal(bitgen_t* bitgen_state) nogil + float random_standard_normal_f(bitgen_t *bitgen_state) nogil + void random_standard_normal_fill(bitgen_t *bitgen_state, npy_intp count, double *out) nogil + void random_standard_normal_fill_f(bitgen_t *bitgen_state, npy_intp count, float *out) nogil + double random_standard_gamma(bitgen_t *bitgen_state, double shape) nogil + float random_standard_gamma_f(bitgen_t *bitgen_state, float shape) nogil + + float random_standard_uniform_f(bitgen_t *bitgen_state) nogil + void random_standard_uniform_fill_f(bitgen_t* bitgen_state, npy_intp cnt, float *out) nogil + float random_standard_normal_f(bitgen_t* bitgen_state) nogil + float random_standard_gamma_f(bitgen_t *bitgen_state, float shape) nogil + + int64_t random_positive_int64(bitgen_t *bitgen_state) nogil + int32_t random_positive_int32(bitgen_t *bitgen_state) nogil + int64_t random_positive_int(bitgen_t *bitgen_state) nogil + uint64_t random_uint(bitgen_t *bitgen_state) nogil + + double random_normal(bitgen_t *bitgen_state, double loc, double scale) nogil + + double random_gamma(bitgen_t *bitgen_state, double shape, double scale) nogil + float random_gamma_f(bitgen_t *bitgen_state, float shape, float scale) nogil + + double random_exponential(bitgen_t *bitgen_state, double scale) nogil + double random_uniform(bitgen_t *bitgen_state, double lower, double range) nogil + double random_beta(bitgen_t *bitgen_state, double a, double b) nogil + double random_chisquare(bitgen_t *bitgen_state, double df) nogil + double random_f(bitgen_t *bitgen_state, double dfnum, double dfden) nogil + double random_standard_cauchy(bitgen_t *bitgen_state) nogil + double random_pareto(bitgen_t *bitgen_state, double a) nogil + double random_weibull(bitgen_t *bitgen_state, double a) nogil + double random_power(bitgen_t *bitgen_state, double a) nogil + double random_laplace(bitgen_t *bitgen_state, double loc, double scale) nogil + double random_gumbel(bitgen_t *bitgen_state, double loc, double scale) nogil + double random_logistic(bitgen_t *bitgen_state, double loc, double scale) nogil + double random_lognormal(bitgen_t *bitgen_state, double mean, double sigma) nogil + double random_rayleigh(bitgen_t *bitgen_state, double mode) nogil + double random_standard_t(bitgen_t *bitgen_state, double df) nogil + double random_noncentral_chisquare(bitgen_t *bitgen_state, double df, + double nonc) nogil + double random_noncentral_f(bitgen_t *bitgen_state, double dfnum, + double dfden, double nonc) nogil + double random_wald(bitgen_t *bitgen_state, double mean, double scale) nogil + double random_vonmises(bitgen_t *bitgen_state, double mu, double kappa) nogil + double random_triangular(bitgen_t *bitgen_state, double left, double mode, + double right) nogil + + int64_t random_poisson(bitgen_t *bitgen_state, double lam) nogil + int64_t random_negative_binomial(bitgen_t *bitgen_state, double n, double p) nogil + int64_t random_binomial(bitgen_t *bitgen_state, double p, int64_t n, binomial_t *binomial) nogil + int64_t random_logseries(bitgen_t *bitgen_state, double p) nogil + int64_t random_geometric_search(bitgen_t *bitgen_state, double p) nogil + int64_t random_geometric_inversion(bitgen_t *bitgen_state, double p) nogil + int64_t random_geometric(bitgen_t *bitgen_state, double p) nogil + int64_t random_zipf(bitgen_t *bitgen_state, double a) nogil + int64_t random_hypergeometric(bitgen_t *bitgen_state, int64_t good, int64_t bad, + int64_t sample) nogil + + uint64_t random_interval(bitgen_t *bitgen_state, uint64_t max) nogil + + # Generate random uint64 numbers in closed interval [off, off + rng]. + uint64_t random_bounded_uint64(bitgen_t *bitgen_state, + uint64_t off, uint64_t rng, + uint64_t mask, bint use_masked) nogil + + void random_multinomial(bitgen_t *bitgen_state, int64_t n, int64_t *mnix, + double *pix, npy_intp d, binomial_t *binomial) nogil + + int random_multivariate_hypergeometric_count(bitgen_t *bitgen_state, + int64_t total, + size_t num_colors, int64_t *colors, + int64_t nsample, + size_t num_variates, int64_t *variates) nogil + void random_multivariate_hypergeometric_marginals(bitgen_t *bitgen_state, + int64_t total, + size_t num_colors, int64_t *colors, + int64_t nsample, + size_t num_variates, int64_t *variates) nogil + diff --git a/.venv/lib/python3.12/site-packages/numpy/random/lib/libnpyrandom.a b/.venv/lib/python3.12/site-packages/numpy/random/lib/libnpyrandom.a new file mode 100644 index 0000000..6c180dd Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/random/lib/libnpyrandom.a differ diff --git a/.venv/lib/python3.12/site-packages/numpy/random/mtrand.cpython-312-x86_64-linux-gnu.so b/.venv/lib/python3.12/site-packages/numpy/random/mtrand.cpython-312-x86_64-linux-gnu.so new file mode 100755 index 0000000..c51105e Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/random/mtrand.cpython-312-x86_64-linux-gnu.so differ diff --git a/.venv/lib/python3.12/site-packages/numpy/random/mtrand.pyi b/.venv/lib/python3.12/site-packages/numpy/random/mtrand.pyi new file mode 100644 index 0000000..54bb146 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/random/mtrand.pyi @@ -0,0 +1,703 @@ +import builtins +from collections.abc import Callable +from typing import Any, Literal, overload + +import numpy as np +from numpy import ( + dtype, + float64, + int8, + int16, + int32, + int64, + int_, + long, + uint, + uint8, + uint16, + uint32, + uint64, + ulong, +) +from numpy._typing import ( + ArrayLike, + NDArray, + _ArrayLikeFloat_co, + _ArrayLikeInt_co, + _DTypeLikeBool, + _Int8Codes, + _Int16Codes, + _Int32Codes, + _Int64Codes, + _IntCodes, + _LongCodes, + _ShapeLike, + _SupportsDType, + _UInt8Codes, + _UInt16Codes, + _UInt32Codes, + _UInt64Codes, + _UIntCodes, + _ULongCodes, +) +from numpy.random.bit_generator import BitGenerator + +class RandomState: + _bit_generator: BitGenerator + def __init__(self, seed: _ArrayLikeInt_co | BitGenerator | None = ...) -> None: ... + def __repr__(self) -> str: ... + def __str__(self) -> str: ... + def __getstate__(self) -> dict[str, Any]: ... + def __setstate__(self, state: dict[str, Any]) -> None: ... + def __reduce__(self) -> tuple[Callable[[BitGenerator], RandomState], tuple[BitGenerator], dict[str, Any]]: ... # noqa: E501 + def seed(self, seed: _ArrayLikeFloat_co | None = ...) -> None: ... + @overload + def get_state(self, legacy: Literal[False] = ...) -> dict[str, Any]: ... + @overload + def get_state( + self, legacy: Literal[True] = ... + ) -> dict[str, Any] | tuple[str, NDArray[uint32], int, int, float]: ... + def set_state( + self, state: dict[str, Any] | tuple[str, NDArray[uint32], int, int, float] + ) -> None: ... + @overload + def random_sample(self, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def random_sample(self, size: _ShapeLike) -> NDArray[float64]: ... + @overload + def random(self, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def random(self, size: _ShapeLike) -> NDArray[float64]: ... + @overload + def beta(self, a: float, b: float, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def beta( + self, + a: _ArrayLikeFloat_co, + b: _ArrayLikeFloat_co, + size: _ShapeLike | None = ... + ) -> NDArray[float64]: ... + @overload + def exponential(self, scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc] + @overload + def exponential( + self, scale: _ArrayLikeFloat_co = ..., size: _ShapeLike | None = ... + ) -> NDArray[float64]: ... + @overload + def standard_exponential(self, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def standard_exponential(self, size: _ShapeLike) -> NDArray[float64]: ... + @overload + def tomaxint(self, size: None = ...) -> int: ... # type: ignore[misc] + @overload + # Generates long values, but stores it in a 64bit int: + def tomaxint(self, size: _ShapeLike) -> NDArray[int64]: ... + @overload + def randint( # type: ignore[misc] + self, + low: int, + high: int | None = ..., + size: None = ..., + ) -> int: ... + @overload + def randint( # type: ignore[misc] + self, + low: int, + high: int | None = ..., + size: None = ..., + dtype: type[bool] = ..., + ) -> bool: ... + @overload + def randint( # type: ignore[misc] + self, + low: int, + high: int | None = ..., + size: None = ..., + dtype: type[np.bool] = ..., + ) -> np.bool: ... + @overload + def randint( # type: ignore[misc] + self, + low: int, + high: int | None = ..., + size: None = ..., + dtype: type[int] = ..., + ) -> int: ... + @overload + def randint( # type: ignore[misc] + self, + low: int, + high: int | None = ..., + size: None = ..., + dtype: dtype[uint8] | type[uint8] | _UInt8Codes | _SupportsDType[dtype[uint8]] = ..., # noqa: E501 + ) -> uint8: ... + @overload + def randint( # type: ignore[misc] + self, + low: int, + high: int | None = ..., + size: None = ..., + dtype: dtype[uint16] | type[uint16] | _UInt16Codes | _SupportsDType[dtype[uint16]] = ..., # noqa: E501 + ) -> uint16: ... + @overload + def randint( # type: ignore[misc] + self, + low: int, + high: int | None = ..., + size: None = ..., + dtype: dtype[uint32] | type[uint32] | _UInt32Codes | _SupportsDType[dtype[uint32]] = ..., # noqa: E501 + ) -> uint32: ... + @overload + def randint( # type: ignore[misc] + self, + low: int, + high: int | None = ..., + size: None = ..., + dtype: dtype[uint] | type[uint] | _UIntCodes | _SupportsDType[dtype[uint]] = ..., # noqa: E501 + ) -> uint: ... + @overload + def randint( # type: ignore[misc] + self, + low: int, + high: int | None = ..., + size: None = ..., + dtype: dtype[ulong] | type[ulong] | _ULongCodes | _SupportsDType[dtype[ulong]] = ..., # noqa: E501 + ) -> ulong: ... + @overload + def randint( # type: ignore[misc] + self, + low: int, + high: int | None = ..., + size: None = ..., + dtype: dtype[uint64] | type[uint64] | _UInt64Codes | _SupportsDType[dtype[uint64]] = ..., # noqa: E501 + ) -> uint64: ... + @overload + def randint( # type: ignore[misc] + self, + low: int, + high: int | None = ..., + size: None = ..., + dtype: dtype[int8] | type[int8] | _Int8Codes | _SupportsDType[dtype[int8]] = ..., # noqa: E501 + ) -> int8: ... + @overload + def randint( # type: ignore[misc] + self, + low: int, + high: int | None = ..., + size: None = ..., + dtype: dtype[int16] | type[int16] | _Int16Codes | _SupportsDType[dtype[int16]] = ..., # noqa: E501 + ) -> int16: ... + @overload + def randint( # type: ignore[misc] + self, + low: int, + high: int | None = ..., + size: None = ..., + dtype: dtype[int32] | type[int32] | _Int32Codes | _SupportsDType[dtype[int32]] = ..., # noqa: E501 + ) -> int32: ... + @overload + def randint( # type: ignore[misc] + self, + low: int, + high: int | None = ..., + size: None = ..., + dtype: dtype[int_] | type[int_] | _IntCodes | _SupportsDType[dtype[int_]] = ..., # noqa: E501 + ) -> int_: ... + @overload + def randint( # type: ignore[misc] + self, + low: int, + high: int | None = ..., + size: None = ..., + dtype: dtype[long] | type[long] | _LongCodes | _SupportsDType[dtype[long]] = ..., # noqa: E501 + ) -> long: ... + @overload + def randint( # type: ignore[misc] + self, + low: int, + high: int | None = ..., + size: None = ..., + dtype: dtype[int64] | type[int64] | _Int64Codes | _SupportsDType[dtype[int64]] = ..., # noqa: E501 + ) -> int64: ... + @overload + def randint( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = ..., + size: _ShapeLike | None = ..., + ) -> NDArray[long]: ... + @overload + def randint( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = ..., + size: _ShapeLike | None = ..., + dtype: _DTypeLikeBool = ..., + ) -> NDArray[np.bool]: ... + @overload + def randint( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = ..., + size: _ShapeLike | None = ..., + dtype: dtype[int8] | type[int8] | _Int8Codes | _SupportsDType[dtype[int8]] = ..., # noqa: E501 + ) -> NDArray[int8]: ... + @overload + def randint( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = ..., + size: _ShapeLike | None = ..., + dtype: dtype[int16] | type[int16] | _Int16Codes | _SupportsDType[dtype[int16]] = ..., # noqa: E501 + ) -> NDArray[int16]: ... + @overload + def randint( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = ..., + size: _ShapeLike | None = ..., + dtype: dtype[int32] | type[int32] | _Int32Codes | _SupportsDType[dtype[int32]] = ..., # noqa: E501 + ) -> NDArray[int32]: ... + @overload + def randint( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = ..., + size: _ShapeLike | None = ..., + dtype: dtype[int64] | type[int64] | _Int64Codes | _SupportsDType[dtype[int64]] | None = ..., # noqa: E501 + ) -> NDArray[int64]: ... + @overload + def randint( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = ..., + size: _ShapeLike | None = ..., + dtype: dtype[uint8] | type[uint8] | _UInt8Codes | _SupportsDType[dtype[uint8]] = ..., # noqa: E501 + ) -> NDArray[uint8]: ... + @overload + def randint( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = ..., + size: _ShapeLike | None = ..., + dtype: dtype[uint16] | type[uint16] | _UInt16Codes | _SupportsDType[dtype[uint16]] = ..., # noqa: E501 + ) -> NDArray[uint16]: ... + @overload + def randint( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = ..., + size: _ShapeLike | None = ..., + dtype: dtype[uint32] | type[uint32] | _UInt32Codes | _SupportsDType[dtype[uint32]] = ..., # noqa: E501 + ) -> NDArray[uint32]: ... + @overload + def randint( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = ..., + size: _ShapeLike | None = ..., + dtype: dtype[uint64] | type[uint64] | _UInt64Codes | _SupportsDType[dtype[uint64]] = ..., # noqa: E501 + ) -> NDArray[uint64]: ... + @overload + def randint( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = ..., + size: _ShapeLike | None = ..., + dtype: dtype[long] | type[int] | type[long] | _LongCodes | _SupportsDType[dtype[long]] = ..., # noqa: E501 + ) -> NDArray[long]: ... + @overload + def randint( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = ..., + size: _ShapeLike | None = ..., + dtype: dtype[ulong] | type[ulong] | _ULongCodes | _SupportsDType[dtype[ulong]] = ..., # noqa: E501 + ) -> NDArray[ulong]: ... + def bytes(self, length: int) -> builtins.bytes: ... + @overload + def choice( + self, + a: int, + size: None = ..., + replace: bool = ..., + p: _ArrayLikeFloat_co | None = ..., + ) -> int: ... + @overload + def choice( + self, + a: int, + size: _ShapeLike = ..., + replace: bool = ..., + p: _ArrayLikeFloat_co | None = ..., + ) -> NDArray[long]: ... + @overload + def choice( + self, + a: ArrayLike, + size: None = ..., + replace: bool = ..., + p: _ArrayLikeFloat_co | None = ..., + ) -> Any: ... + @overload + def choice( + self, + a: ArrayLike, + size: _ShapeLike = ..., + replace: bool = ..., + p: _ArrayLikeFloat_co | None = ..., + ) -> NDArray[Any]: ... + @overload + def uniform( + self, low: float = ..., high: float = ..., size: None = ... + ) -> float: ... # type: ignore[misc] + @overload + def uniform( + self, + low: _ArrayLikeFloat_co = ..., + high: _ArrayLikeFloat_co = ..., + size: _ShapeLike | None = ..., + ) -> NDArray[float64]: ... + @overload + def rand(self) -> float: ... + @overload + def rand(self, *args: int) -> NDArray[float64]: ... + @overload + def randn(self) -> float: ... + @overload + def randn(self, *args: int) -> NDArray[float64]: ... + @overload + def random_integers( + self, low: int, high: int | None = ..., size: None = ... + ) -> int: ... # type: ignore[misc] + @overload + def random_integers( + self, + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = ..., + size: _ShapeLike | None = ..., + ) -> NDArray[long]: ... + @overload + def standard_normal(self, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def standard_normal( # type: ignore[misc] + self, size: _ShapeLike = ... + ) -> NDArray[float64]: ... + @overload + def normal( + self, loc: float = ..., scale: float = ..., size: None = ... + ) -> float: ... # type: ignore[misc] + @overload + def normal( + self, + loc: _ArrayLikeFloat_co = ..., + scale: _ArrayLikeFloat_co = ..., + size: _ShapeLike | None = ..., + ) -> NDArray[float64]: ... + @overload + def standard_gamma( # type: ignore[misc] + self, + shape: float, + size: None = ..., + ) -> float: ... + @overload + def standard_gamma( + self, + shape: _ArrayLikeFloat_co, + size: _ShapeLike | None = ..., + ) -> NDArray[float64]: ... + @overload + def gamma(self, shape: float, scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc] + @overload + def gamma( + self, + shape: _ArrayLikeFloat_co, + scale: _ArrayLikeFloat_co = ..., + size: _ShapeLike | None = ..., + ) -> NDArray[float64]: ... + @overload + def f(self, dfnum: float, dfden: float, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def f( + self, + dfnum: _ArrayLikeFloat_co, + dfden: _ArrayLikeFloat_co, + size: _ShapeLike | None = ... + ) -> NDArray[float64]: ... + @overload + def noncentral_f( + self, dfnum: float, dfden: float, nonc: float, size: None = ... + ) -> float: ... # type: ignore[misc] + @overload + def noncentral_f( + self, + dfnum: _ArrayLikeFloat_co, + dfden: _ArrayLikeFloat_co, + nonc: _ArrayLikeFloat_co, + size: _ShapeLike | None = ..., + ) -> NDArray[float64]: ... + @overload + def chisquare(self, df: float, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def chisquare( + self, df: _ArrayLikeFloat_co, size: _ShapeLike | None = ... + ) -> NDArray[float64]: ... + @overload + def noncentral_chisquare( + self, df: float, nonc: float, size: None = ... + ) -> float: ... # type: ignore[misc] + @overload + def noncentral_chisquare( + self, + df: _ArrayLikeFloat_co, + nonc: _ArrayLikeFloat_co, + size: _ShapeLike | None = ... + ) -> NDArray[float64]: ... + @overload + def standard_t(self, df: float, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def standard_t( + self, df: _ArrayLikeFloat_co, size: None = ... + ) -> NDArray[float64]: ... + @overload + def standard_t( + self, df: _ArrayLikeFloat_co, size: _ShapeLike = ... + ) -> NDArray[float64]: ... + @overload + def vonmises(self, mu: float, kappa: float, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def vonmises( + self, + mu: _ArrayLikeFloat_co, + kappa: _ArrayLikeFloat_co, + size: _ShapeLike | None = ... + ) -> NDArray[float64]: ... + @overload + def pareto(self, a: float, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def pareto( + self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = ... + ) -> NDArray[float64]: ... + @overload + def weibull(self, a: float, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def weibull( + self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = ... + ) -> NDArray[float64]: ... + @overload + def power(self, a: float, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def power( + self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = ... + ) -> NDArray[float64]: ... + @overload + def standard_cauchy(self, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def standard_cauchy(self, size: _ShapeLike = ...) -> NDArray[float64]: ... + @overload + def laplace( + self, loc: float = ..., scale: float = ..., size: None = ... + ) -> float: ... # type: ignore[misc] + @overload + def laplace( + self, + loc: _ArrayLikeFloat_co = ..., + scale: _ArrayLikeFloat_co = ..., + size: _ShapeLike | None = ..., + ) -> NDArray[float64]: ... + @overload + def gumbel( + self, loc: float = ..., scale: float = ..., size: None = ... + ) -> float: ... # type: ignore[misc] + @overload + def gumbel( + self, + loc: _ArrayLikeFloat_co = ..., + scale: _ArrayLikeFloat_co = ..., + size: _ShapeLike | None = ..., + ) -> NDArray[float64]: ... + @overload + def logistic( + self, loc: float = ..., scale: float = ..., size: None = ... + ) -> float: ... # type: ignore[misc] + @overload + def logistic( + self, + loc: _ArrayLikeFloat_co = ..., + scale: _ArrayLikeFloat_co = ..., + size: _ShapeLike | None = ..., + ) -> NDArray[float64]: ... + @overload + def lognormal( + self, mean: float = ..., sigma: float = ..., size: None = ... + ) -> float: ... # type: ignore[misc] + @overload + def lognormal( + self, + mean: _ArrayLikeFloat_co = ..., + sigma: _ArrayLikeFloat_co = ..., + size: _ShapeLike | None = ..., + ) -> NDArray[float64]: ... + @overload + def rayleigh(self, scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc] + @overload + def rayleigh( + self, scale: _ArrayLikeFloat_co = ..., size: _ShapeLike | None = ... + ) -> NDArray[float64]: ... + @overload + def wald(self, mean: float, scale: float, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def wald( + self, + mean: _ArrayLikeFloat_co, + scale: _ArrayLikeFloat_co, + size: _ShapeLike | None = ... + ) -> NDArray[float64]: ... + @overload + def triangular( + self, left: float, mode: float, right: float, size: None = ... + ) -> float: ... # type: ignore[misc] + @overload + def triangular( + self, + left: _ArrayLikeFloat_co, + mode: _ArrayLikeFloat_co, + right: _ArrayLikeFloat_co, + size: _ShapeLike | None = ..., + ) -> NDArray[float64]: ... + @overload + def binomial( + self, n: int, p: float, size: None = ... + ) -> int: ... # type: ignore[misc] + @overload + def binomial( + self, n: _ArrayLikeInt_co, p: _ArrayLikeFloat_co, size: _ShapeLike | None = ... + ) -> NDArray[long]: ... + @overload + def negative_binomial( + self, n: float, p: float, size: None = ... + ) -> int: ... # type: ignore[misc] + @overload + def negative_binomial( + self, + n: _ArrayLikeFloat_co, + p: _ArrayLikeFloat_co, + size: _ShapeLike | None = ... + ) -> NDArray[long]: ... + @overload + def poisson( + self, lam: float = ..., size: None = ... + ) -> int: ... # type: ignore[misc] + @overload + def poisson( + self, lam: _ArrayLikeFloat_co = ..., size: _ShapeLike | None = ... + ) -> NDArray[long]: ... + @overload + def zipf(self, a: float, size: None = ...) -> int: ... # type: ignore[misc] + @overload + def zipf( + self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = ... + ) -> NDArray[long]: ... + @overload + def geometric(self, p: float, size: None = ...) -> int: ... # type: ignore[misc] + @overload + def geometric( + self, p: _ArrayLikeFloat_co, size: _ShapeLike | None = ... + ) -> NDArray[long]: ... + @overload + def hypergeometric( + self, ngood: int, nbad: int, nsample: int, size: None = ... + ) -> int: ... # type: ignore[misc] + @overload + def hypergeometric( + self, + ngood: _ArrayLikeInt_co, + nbad: _ArrayLikeInt_co, + nsample: _ArrayLikeInt_co, + size: _ShapeLike | None = ..., + ) -> NDArray[long]: ... + @overload + def logseries(self, p: float, size: None = ...) -> int: ... # type: ignore[misc] + @overload + def logseries( + self, p: _ArrayLikeFloat_co, size: _ShapeLike | None = ... + ) -> NDArray[long]: ... + def multivariate_normal( + self, + mean: _ArrayLikeFloat_co, + cov: _ArrayLikeFloat_co, + size: _ShapeLike | None = ..., + check_valid: Literal["warn", "raise", "ignore"] = ..., + tol: float = ..., + ) -> NDArray[float64]: ... + def multinomial( + self, n: _ArrayLikeInt_co, + pvals: _ArrayLikeFloat_co, + size: _ShapeLike | None = ... + ) -> NDArray[long]: ... + def dirichlet( + self, alpha: _ArrayLikeFloat_co, size: _ShapeLike | None = ... + ) -> NDArray[float64]: ... + def shuffle(self, x: ArrayLike) -> None: ... + @overload + def permutation(self, x: int) -> NDArray[long]: ... + @overload + def permutation(self, x: ArrayLike) -> NDArray[Any]: ... + +_rand: RandomState + +beta = _rand.beta +binomial = _rand.binomial +bytes = _rand.bytes +chisquare = _rand.chisquare +choice = _rand.choice +dirichlet = _rand.dirichlet +exponential = _rand.exponential +f = _rand.f +gamma = _rand.gamma +get_state = _rand.get_state +geometric = _rand.geometric +gumbel = _rand.gumbel +hypergeometric = _rand.hypergeometric +laplace = _rand.laplace +logistic = _rand.logistic +lognormal = _rand.lognormal +logseries = _rand.logseries +multinomial = _rand.multinomial +multivariate_normal = _rand.multivariate_normal +negative_binomial = _rand.negative_binomial +noncentral_chisquare = _rand.noncentral_chisquare +noncentral_f = _rand.noncentral_f +normal = _rand.normal +pareto = _rand.pareto +permutation = _rand.permutation +poisson = _rand.poisson +power = _rand.power +rand = _rand.rand +randint = _rand.randint +randn = _rand.randn +random = _rand.random +random_integers = _rand.random_integers +random_sample = _rand.random_sample +rayleigh = _rand.rayleigh +seed = _rand.seed +set_state = _rand.set_state +shuffle = _rand.shuffle +standard_cauchy = _rand.standard_cauchy +standard_exponential = _rand.standard_exponential +standard_gamma = _rand.standard_gamma +standard_normal = _rand.standard_normal +standard_t = _rand.standard_t +triangular = _rand.triangular +uniform = _rand.uniform +vonmises = _rand.vonmises +wald = _rand.wald +weibull = _rand.weibull +zipf = _rand.zipf +# Two legacy that are trivial wrappers around random_sample +sample = _rand.random_sample +ranf = _rand.random_sample + +def set_bit_generator(bitgen: BitGenerator) -> None: ... + +def get_bit_generator() -> BitGenerator: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/random/tests/__init__.py b/.venv/lib/python3.12/site-packages/numpy/random/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/.venv/lib/python3.12/site-packages/numpy/random/tests/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/random/tests/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..ff19c68 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/random/tests/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/random/tests/__pycache__/test_direct.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/random/tests/__pycache__/test_direct.cpython-312.pyc new file mode 100644 index 0000000..a6c17ee Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/random/tests/__pycache__/test_direct.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/random/tests/__pycache__/test_extending.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/random/tests/__pycache__/test_extending.cpython-312.pyc new file mode 100644 index 0000000..d9dc433 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/random/tests/__pycache__/test_extending.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/random/tests/__pycache__/test_generator_mt19937.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/random/tests/__pycache__/test_generator_mt19937.cpython-312.pyc new file mode 100644 index 0000000..5fe19c4 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/random/tests/__pycache__/test_generator_mt19937.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/random/tests/__pycache__/test_generator_mt19937_regressions.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/random/tests/__pycache__/test_generator_mt19937_regressions.cpython-312.pyc new file mode 100644 index 0000000..6c2cbe4 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/random/tests/__pycache__/test_generator_mt19937_regressions.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/random/tests/__pycache__/test_random.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/random/tests/__pycache__/test_random.cpython-312.pyc new file mode 100644 index 0000000..3220f3e Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/random/tests/__pycache__/test_random.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/random/tests/__pycache__/test_randomstate.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/random/tests/__pycache__/test_randomstate.cpython-312.pyc new file mode 100644 index 0000000..569cef5 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/random/tests/__pycache__/test_randomstate.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/random/tests/__pycache__/test_randomstate_regression.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/random/tests/__pycache__/test_randomstate_regression.cpython-312.pyc new file mode 100644 index 0000000..fea121a Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/random/tests/__pycache__/test_randomstate_regression.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/random/tests/__pycache__/test_regression.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/random/tests/__pycache__/test_regression.cpython-312.pyc new file mode 100644 index 0000000..f6674e9 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/random/tests/__pycache__/test_regression.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/random/tests/__pycache__/test_seed_sequence.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/random/tests/__pycache__/test_seed_sequence.cpython-312.pyc new file mode 100644 index 0000000..b5989a2 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/random/tests/__pycache__/test_seed_sequence.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/random/tests/__pycache__/test_smoke.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/random/tests/__pycache__/test_smoke.cpython-312.pyc new file mode 100644 index 0000000..97fc783 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/random/tests/__pycache__/test_smoke.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/random/tests/data/__init__.py b/.venv/lib/python3.12/site-packages/numpy/random/tests/data/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/.venv/lib/python3.12/site-packages/numpy/random/tests/data/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/random/tests/data/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..0637ee9 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/random/tests/data/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/random/tests/data/generator_pcg64_np121.pkl.gz b/.venv/lib/python3.12/site-packages/numpy/random/tests/data/generator_pcg64_np121.pkl.gz new file mode 100644 index 0000000..b7ad03d Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/random/tests/data/generator_pcg64_np121.pkl.gz differ diff --git a/.venv/lib/python3.12/site-packages/numpy/random/tests/data/generator_pcg64_np126.pkl.gz b/.venv/lib/python3.12/site-packages/numpy/random/tests/data/generator_pcg64_np126.pkl.gz new file mode 100644 index 0000000..6c5130b Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/random/tests/data/generator_pcg64_np126.pkl.gz differ diff --git a/.venv/lib/python3.12/site-packages/numpy/random/tests/data/mt19937-testset-1.csv b/.venv/lib/python3.12/site-packages/numpy/random/tests/data/mt19937-testset-1.csv new file mode 100644 index 0000000..b97bfa6 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/random/tests/data/mt19937-testset-1.csv @@ -0,0 +1,1001 @@ +seed, 0xdeadbeaf +0, 0xc816921f +1, 0xb3623c6d +2, 0x5fa391bb +3, 0x40178d9 +4, 0x7dcc9811 +5, 0x548eb8e6 +6, 0x92ba3125 +7, 0x65fde68d +8, 0x2f81ec95 +9, 0xbd94f7a2 +10, 0xdc4d9bcc +11, 0xa672bf13 +12, 0xb41113e +13, 0xec7e0066 +14, 0x50239372 +15, 0xd9d66b1d +16, 0xab72a161 +17, 0xddc2e29f +18, 0x7ea29ab4 +19, 0x80d141ba +20, 0xb1c7edf1 +21, 0x44d29203 +22, 0xe224d98 +23, 0x5b3e9d26 +24, 0x14fd567c +25, 0x27d98c96 +26, 0x838779fc +27, 0x92a138a +28, 0x5d08965b +29, 0x531e0ad6 +30, 0x984ee8f4 +31, 0x1ed78539 +32, 0x32bd6d8d +33, 0xc37c8516 +34, 0x9aef5c6b +35, 0x3aacd139 +36, 0xd96ed154 +37, 0x489cd1ed +38, 0x2cba4b3b +39, 0x76c6ae72 +40, 0x2dae02b9 +41, 0x52ac5fd6 +42, 0xc2b5e265 +43, 0x630e6a28 +44, 0x3f560d5d +45, 0x9315bdf3 +46, 0xf1055aba +47, 0x840e42c6 +48, 0xf2099c6b +49, 0x15ff7696 +50, 0x7948d146 +51, 0x97342961 +52, 0x7a7a21c +53, 0xc66f4fb1 +54, 0x23c4103e +55, 0xd7321f98 +56, 0xeb7efb75 +57, 0xe02490b5 +58, 0x2aa02de +59, 0x8bee0bf7 +60, 0xfc2da059 +61, 0xae835034 +62, 0x678f2075 +63, 0x6d03094b +64, 0x56455e05 +65, 0x18b32373 +66, 0x8ff0356b +67, 0x1fe442fb +68, 0x3f1ab6c3 +69, 0xb6fd21b +70, 0xfc310eb2 +71, 0xb19e9a4d +72, 0x17ddee72 +73, 0xfd534251 +74, 0x9e500564 +75, 0x9013a036 +76, 0xcf08f118 +77, 0x6b6d5969 +78, 0x3ccf1977 +79, 0x7cc11497 +80, 0x651c6ac9 +81, 0x4d6b104b +82, 0x9a28314e +83, 0x14c237be +84, 0x9cfc8d52 +85, 0x2947fad5 +86, 0xd71eff49 +87, 0x5188730e +88, 0x4b894614 +89, 0xf4fa2a34 +90, 0x42f7cc69 +91, 0x4089c9e8 +92, 0xbf0bbfe4 +93, 0x3cea65c +94, 0xc6221207 +95, 0x1bb71a8f +96, 0x54843fe7 +97, 0xbc59de4c +98, 0x79c6ee64 +99, 0x14e57a26 +100, 0x68d88fe +101, 0x2b86ef64 +102, 0x8ffff3c1 +103, 0x5bdd573f +104, 0x85671813 +105, 0xefe32ca2 +106, 0x105ded1e +107, 0x90ca2769 +108, 0xb33963ac +109, 0x363fbbc3 +110, 0x3b3763ae +111, 0x1d50ab88 +112, 0xc9ec01eb +113, 0xc8bbeada +114, 0x5d704692 +115, 0x5fd9e40 +116, 0xe61c125 +117, 0x2fe05792 +118, 0xda8afb72 +119, 0x4cbaa653 +120, 0xdd2243df +121, 0x896fd3f5 +122, 0x5bc23db +123, 0xa1c4e807 +124, 0x57d1a24d +125, 0x66503ddc +126, 0xcf7c0838 +127, 0x19e034fc +128, 0x66807450 +129, 0xfc219b3b +130, 0xe8a843e7 +131, 0x9ce61f08 +132, 0x92b950d6 +133, 0xce955ec4 +134, 0xda0d1f0d +135, 0x960c6250 +136, 0x39552432 +137, 0xde845e84 +138, 0xff3b4b11 +139, 0x5d918e6f +140, 0xbb930df2 +141, 0x7cfb0993 +142, 0x5400e1e9 +143, 0x3bfa0954 +144, 0x7e2605fb +145, 0x11941591 +146, 0x887e6994 +147, 0xdc8bed45 +148, 0x45b3fb50 +149, 0xfbdf8358 +150, 0x41507468 +151, 0x34c87166 +152, 0x17f64d77 +153, 0x3bbaf4f8 +154, 0x4f26f37e +155, 0x4a56ebf2 +156, 0x81100f1 +157, 0x96d94eae +158, 0xca88fda5 +159, 0x2eef3a60 +160, 0x952afbd3 +161, 0x2bec88c7 +162, 0x52335c4b +163, 0x8296db8e +164, 0x4da7d00a +165, 0xc00ac899 +166, 0xadff8c72 +167, 0xbecf26cf +168, 0x8835c83c +169, 0x1d13c804 +170, 0xaa940ddc +171, 0x68222cfe +172, 0x4569c0e1 +173, 0x29077976 +174, 0x32d4a5af +175, 0xd31fcdef +176, 0xdc60682b +177, 0x7c95c368 +178, 0x75a70213 +179, 0x43021751 +180, 0x5e52e0a6 +181, 0xf7e190b5 +182, 0xee3e4bb +183, 0x2fe3b150 +184, 0xcf419c07 +185, 0x478a4570 +186, 0xe5c3ea50 +187, 0x417f30a8 +188, 0xf0cfdaa0 +189, 0xd1f7f738 +190, 0x2c70fc23 +191, 0x54fc89f9 +192, 0x444dcf01 +193, 0xec2a002d +194, 0xef0c3a88 +195, 0xde21be9 +196, 0x88ab3296 +197, 0x3028897c +198, 0x264b200b +199, 0xd8ae0706 +200, 0x9eef901a +201, 0xbd1b96e0 +202, 0xea71366c +203, 0x1465b694 +204, 0x5a794650 +205, 0x83df52d4 +206, 0x8262413d +207, 0x5bc148c0 +208, 0xe0ecd80c +209, 0x40649571 +210, 0xb4d2ee5f +211, 0xedfd7d09 +212, 0xa082e25f +213, 0xc62992d1 +214, 0xbc7e65ee +215, 0x5499cf8a +216, 0xac28f775 +217, 0x649840fb +218, 0xd4c54805 +219, 0x1d166ba6 +220, 0xbeb1171f +221, 0x45b66703 +222, 0x78c03349 +223, 0x38d2a6ff +224, 0x935cae8b +225, 0x1d07dc3f +226, 0x6c1ed365 +227, 0x579fc585 +228, 0x1320c0ec +229, 0x632757eb +230, 0xd265a397 +231, 0x70e9b6c2 +232, 0xc81e322c +233, 0xa27153cf +234, 0x2118ba19 +235, 0x514ec400 +236, 0x2bd0ecd6 +237, 0xc3e7dae3 +238, 0xfa39355e +239, 0x48f23cc1 +240, 0xbcf75948 +241, 0x53ccc70c +242, 0x75346423 +243, 0x951181e0 +244, 0x348e90df +245, 0x14365d7f +246, 0xfbc95d7a +247, 0xdc98a9e6 +248, 0xed202df7 +249, 0xa59ec913 +250, 0x6b6e9ae2 +251, 0x1697f265 +252, 0x15d322d0 +253, 0xa2e7ee0a +254, 0x88860b7e +255, 0x455d8b9d +256, 0x2f5c59cb +257, 0xac49c9f1 +258, 0xa6a6a039 +259, 0xc057f56b +260, 0xf1ff1208 +261, 0x5eb8dc9d +262, 0xe6702509 +263, 0xe238b0ed +264, 0x5ae32e3d +265, 0xa88ebbdf +266, 0xef885ae7 +267, 0xafa6d49b +268, 0xc94499e0 +269, 0x1a196325 +270, 0x88938da3 +271, 0x14f4345 +272, 0xd8e33637 +273, 0xa3551bd5 +274, 0x73fe35c7 +275, 0x9561e94b +276, 0xd673bf68 +277, 0x16134872 +278, 0x68c42f9f +279, 0xdf7574c8 +280, 0x8809bab9 +281, 0x1432cf69 +282, 0xafb66bf1 +283, 0xc184aa7b +284, 0xedbf2007 +285, 0xbd420ce1 +286, 0x761033a0 +287, 0xff7e351f +288, 0xd6c3780e +289, 0x5844416f +290, 0xc6c0ee1c +291, 0xd2e147db +292, 0x92ac601a +293, 0x393e846b +294, 0x18196cca +295, 0x54a22be +296, 0x32bab1c4 +297, 0x60365183 +298, 0x64fa342 +299, 0xca24a493 +300, 0xd8cc8b83 +301, 0x3faf102b +302, 0x6e09bb58 +303, 0x812f0ea +304, 0x592c95d8 +305, 0xe45ea4c5 +306, 0x23aebf83 +307, 0xbd9691d4 +308, 0xf47b4baa +309, 0x4ac7b487 +310, 0xcce18803 +311, 0x3377556e +312, 0x3ff8e6b6 +313, 0x99d22063 +314, 0x23250bec +315, 0x4e1f9861 +316, 0x8554249b +317, 0x8635c2fc +318, 0xe8426e8a +319, 0x966c29d8 +320, 0x270b6082 +321, 0x3180a8a1 +322, 0xe7e1668b +323, 0x7f868dc +324, 0xcf4c17cf +325, 0xe31de4d1 +326, 0xc8c8aff4 +327, 0xae8db704 +328, 0x3c928cc2 +329, 0xe12cd48 +330, 0xb33ecd04 +331, 0xb93d7cbe +332, 0x49c69d6a +333, 0x7d3bce64 +334, 0x86bc219 +335, 0x8408233b +336, 0x44dc7479 +337, 0xdf80d538 +338, 0xf3db02c3 +339, 0xbbbd31d7 +340, 0x121281f +341, 0x7521e9a3 +342, 0x8859675a +343, 0x75aa6502 +344, 0x430ed15b +345, 0xecf0a28d +346, 0x659774fd +347, 0xd58a2311 +348, 0x512389a9 +349, 0xff65e1ff +350, 0xb6ddf222 +351, 0xe3458895 +352, 0x8b13cd6e +353, 0xd4a22870 +354, 0xe604c50c +355, 0x27f54f26 +356, 0x8f7f422f +357, 0x9735b4cf +358, 0x414072b0 +359, 0x76a1c6d5 +360, 0xa2208c06 +361, 0x83cd0f61 +362, 0x6c4f7ead +363, 0x6553cf76 +364, 0xeffcf44 +365, 0x7f434a3f +366, 0x9dc364bd +367, 0x3cdf52ed +368, 0xad597594 +369, 0x9c3e211b +370, 0x6c04a33f +371, 0x885dafa6 +372, 0xbbdaca71 +373, 0x7ae5dd5c +374, 0x37675644 +375, 0x251853c6 +376, 0x130b086b +377, 0x143fa54b +378, 0x54cdc282 +379, 0x9faff5b3 +380, 0x502a5c8b +381, 0xd9524550 +382, 0xae221aa6 +383, 0x55cf759b +384, 0x24782da4 +385, 0xd715d815 +386, 0x250ea09a +387, 0x4e0744ac +388, 0x11e15814 +389, 0xabe5f9df +390, 0xc8146350 +391, 0xfba67d9b +392, 0x2b82e42f +393, 0xd4ea96fc +394, 0x5ffc179e +395, 0x1598bafe +396, 0x7fb6d662 +397, 0x1a12a0db +398, 0x450cee4a +399, 0x85f8e12 +400, 0xce71b594 +401, 0xd4bb1d19 +402, 0x968f379d +403, 0x54cc1d52 +404, 0x467e6066 +405, 0x7da5f9a9 +406, 0x70977034 +407, 0x49e65c4b +408, 0xd08570d1 +409, 0x7acdf60b +410, 0xdffa038b +411, 0x9ce14e4c +412, 0x107cbbf8 +413, 0xdd746ca0 +414, 0xc6370a46 +415, 0xe7f83312 +416, 0x373fa9ce +417, 0xd822a2c6 +418, 0x1d4efea6 +419, 0xc53dcadb +420, 0x9b4e898f +421, 0x71daa6bf +422, 0x7a0bc78b +423, 0xd7b86f50 +424, 0x1b8b3286 +425, 0xcf9425dd +426, 0xd5263220 +427, 0x4ea0b647 +428, 0xc767fe64 +429, 0xcfc5e67 +430, 0xcc6a2942 +431, 0xa51eff00 +432, 0x76092e1b +433, 0xf606e80f +434, 0x824b5e20 +435, 0xebb55e14 +436, 0x783d96a6 +437, 0x10696512 +438, 0x17ee510a +439, 0x3ab70a1f +440, 0xcce6b210 +441, 0x8f72f0fb +442, 0xf0610b41 +443, 0x83d01fb5 +444, 0x6b3de36 +445, 0xe4c2e84f +446, 0x9c43bb15 +447, 0xddf2905 +448, 0x7dd63556 +449, 0x3662ca09 +450, 0xfb81f35b +451, 0xc2c8a72a +452, 0x8e93c37 +453, 0xa93da2d4 +454, 0xa03af8f1 +455, 0x8d75159a +456, 0x15f010b0 +457, 0xa296ab06 +458, 0xe55962ba +459, 0xeae700a9 +460, 0xe388964a +461, 0x917f2bec +462, 0x1c203fea +463, 0x792a01ba +464, 0xa93a80ac +465, 0x9eb8a197 +466, 0x56c0bc73 +467, 0xb8f05799 +468, 0xf429a8c8 +469, 0xb92cee42 +470, 0xf8864ec +471, 0x62f2518a +472, 0x3a7bfa3e +473, 0x12e56e6d +474, 0xd7a18313 +475, 0x41fa3899 +476, 0xa09c4956 +477, 0xebcfd94a +478, 0xc485f90b +479, 0x4391ce40 +480, 0x742a3333 +481, 0xc932f9e5 +482, 0x75c6c263 +483, 0x80937f0 +484, 0xcf21833c +485, 0x16027520 +486, 0xd42e669f +487, 0xb0f01fb7 +488, 0xb35896f1 +489, 0x763737a9 +490, 0x1bb20209 +491, 0x3551f189 +492, 0x56bc2602 +493, 0xb6eacf4 +494, 0x42ec4d11 +495, 0x245cc68 +496, 0xc27ac43b +497, 0x9d903466 +498, 0xce3f0c05 +499, 0xb708c31c +500, 0xc0fd37eb +501, 0x95938b2c +502, 0xf20175a7 +503, 0x4a86ee9b +504, 0xbe039a58 +505, 0xd41cabe7 +506, 0x83bc99ba +507, 0x761d60e1 +508, 0x7737cc2e +509, 0x2b82fc4b +510, 0x375aa401 +511, 0xfe9597a0 +512, 0x5543806a +513, 0x44f31238 +514, 0x7df31538 +515, 0x74cfa770 +516, 0x8755d881 +517, 0x1fde665a +518, 0xda8bf315 +519, 0x973d8e95 +520, 0x72205228 +521, 0x8fe59717 +522, 0x7bb90b34 +523, 0xef6ed945 +524, 0x16fd4a38 +525, 0x5db44de1 +526, 0xf09f93b3 +527, 0xe84824cc +528, 0x945bb50e +529, 0xd0be4aa5 +530, 0x47c277c2 +531, 0xd3800c28 +532, 0xac1c33ec +533, 0xd3dacce +534, 0x811c8387 +535, 0x6761b36 +536, 0x70d3882f +537, 0xd6e62e3a +538, 0xea25daa2 +539, 0xb07f39d1 +540, 0x391d89d7 +541, 0x84b6fb5e +542, 0x3dda3fca +543, 0x229e80a4 +544, 0x3d94a4b7 +545, 0x5d3d576a +546, 0xad7818a0 +547, 0xce23b03a +548, 0x7aa2079c +549, 0x9a6be555 +550, 0x83f3b34a +551, 0x1848f9d9 +552, 0xd8fefc1c +553, 0x48e6ce48 +554, 0x52e55750 +555, 0xf41a71cf +556, 0xba08e259 +557, 0xfaf06a15 +558, 0xeaaac0fb +559, 0x34f90098 +560, 0xb1dfffbb +561, 0x718daec2 +562, 0xab4dda21 +563, 0xd27cc1ee +564, 0x4aafbc4c +565, 0x356dfb4f +566, 0x83fcdfd6 +567, 0x8f0bcde0 +568, 0x4363f844 +569, 0xadc0f4d5 +570, 0x3bde994e +571, 0x3884d452 +572, 0x21876b4a +573, 0x9c985398 +574, 0xca55a226 +575, 0x3a88c583 +576, 0x916dc33c +577, 0x8f67d1d7 +578, 0x3b26a667 +579, 0xe4ddeb4b +580, 0x1a9d8c33 +581, 0x81c9b74f +582, 0x9ed1e9df +583, 0x6e61aecf +584, 0x95e95a5d +585, 0x68864ff5 +586, 0xb8fa5b9 +587, 0x72b1b3de +588, 0x5e18a86b +589, 0xd7f2337d +590, 0xd70e0925 +591, 0xb573a4c1 +592, 0xc77b3f8a +593, 0x389b20de +594, 0x16cf6afb +595, 0xa39bd275 +596, 0xf491cf01 +597, 0x6f88a802 +598, 0x8510af05 +599, 0xe7cd549a +600, 0x8603179a +601, 0xef43f191 +602, 0xf9b64c60 +603, 0xb00254a7 +604, 0xd7c06a2d +605, 0x17e9380b +606, 0x529e727b +607, 0xaaa8fe0a +608, 0xfb64ff4c +609, 0xcd75af26 +610, 0xfb717c87 +611, 0xa0789899 +612, 0x10391ec9 +613, 0x7e9b40b3 +614, 0x18536554 +615, 0x728c05f7 +616, 0x787dca98 +617, 0xad948d1 +618, 0x44c18def +619, 0x3303f2ec +620, 0xa15acb5 +621, 0xb58d38f4 +622, 0xfe041ef8 +623, 0xd151a956 +624, 0x7b9168e8 +625, 0x5ebeca06 +626, 0x90fe95df +627, 0xf76875aa +628, 0xb2e0d664 +629, 0x2e3253b7 +630, 0x68e34469 +631, 0x1f0c2d89 +632, 0x13a34ac2 +633, 0x5ffeb841 +634, 0xe381e91c +635, 0xb8549a92 +636, 0x3f35cf1 +637, 0xda0f9dcb +638, 0xdd9828a6 +639, 0xe1428f29 +640, 0xf4db80b5 +641, 0xdac30af5 +642, 0x1af1dd17 +643, 0x9a540254 +644, 0xcab68a38 +645, 0x33560361 +646, 0x2fbf3886 +647, 0xbc785923 +648, 0xe081cd10 +649, 0x8e473356 +650, 0xd102c357 +651, 0xeea4fe48 +652, 0x248d3453 +653, 0x1da79ac +654, 0x815a65ff +655, 0x27693e76 +656, 0xb7d5af40 +657, 0x6d245d30 +658, 0x9e06fa8f +659, 0xb0570dcb +660, 0x469f0005 +661, 0x3e0ca132 +662, 0xd89bbf3 +663, 0xd61ccd47 +664, 0x6383878 +665, 0x62b5956 +666, 0x4dc83675 +667, 0x93fd8492 +668, 0x5a0091f5 +669, 0xc9f9bc3 +670, 0xa26e7778 +671, 0xeabf2d01 +672, 0xe612dc06 +673, 0x85d89ff9 +674, 0xd1763179 +675, 0xcb88947b +676, 0x9e8757a5 +677, 0xe100e85c +678, 0x904166eb +679, 0x4996243d +680, 0x4038e1cb +681, 0x2be2c63d +682, 0x77017e81 +683, 0x3b1f556b +684, 0x1c785c77 +685, 0x6869b8bd +686, 0xe1217ed4 +687, 0x4012ab2f +688, 0xc06c0d8e +689, 0x2122eb68 +690, 0xad1783fd +691, 0x5f0c80e3 +692, 0x828f7efa +693, 0x29328399 +694, 0xeadf1087 +695, 0x85dc0037 +696, 0x9691ef26 +697, 0xc0947a53 +698, 0x2a178d2a +699, 0x2a2c7e8f +700, 0x90378380 +701, 0xaad8d326 +702, 0x9cf1c3c8 +703, 0x84eccd44 +704, 0x79e61808 +705, 0x8b3f454e +706, 0x209e6e1 +707, 0x51f88378 +708, 0xc210226f +709, 0xd982adb5 +710, 0x55d44a31 +711, 0x9817d443 +712, 0xa328c626 +713, 0x13455966 +714, 0xb8f681d3 +715, 0x2a3c713b +716, 0xc186959b +717, 0x814a74b0 +718, 0xed7bc90 +719, 0xa88d3d6d +720, 0x88a9f561 +721, 0x73aa1c0a +722, 0xdfeff404 +723, 0xec037e4b +724, 0xa5c209f0 +725, 0xb3a223b4 +726, 0x24ce3709 +727, 0x3184c790 +728, 0xa1398c62 +729, 0x2f92034e +730, 0xbb37a79a +731, 0x605287b4 +732, 0x8faa772c +733, 0x6ce56c1d +734, 0xc035fb4c +735, 0x7cf5b316 +736, 0x6502645 +737, 0xa283d810 +738, 0x778bc2f1 +739, 0xfdf99313 +740, 0x1f513265 +741, 0xbd3837e2 +742, 0x9b84a9a +743, 0x2139ce91 +744, 0x61a8e890 +745, 0xf9ff12db +746, 0xb43d2ea7 +747, 0x88532e61 +748, 0x175a6655 +749, 0x7a6c4f72 +750, 0x6dafc1b7 +751, 0x449b1459 +752, 0x514f654f +753, 0x9a6731e2 +754, 0x8632da43 +755, 0xc81b0422 +756, 0x81fe9005 +757, 0x15b79618 +758, 0xb5fa629f +759, 0x987a474f +760, 0x1c74f54e +761, 0xf9743232 +762, 0xec4b55f +763, 0x87d761e5 +764, 0xd1ad78b7 +765, 0x453d9350 +766, 0xc7a7d85 +767, 0xb2576ff5 +768, 0xcdde49b7 +769, 0x8e1f763e +770, 0x1338583e +771, 0xfd65b9dc +772, 0x4f19c4f4 +773, 0x3a52d73d +774, 0xd3509c4c +775, 0xda24fe31 +776, 0xe2de56ba +777, 0x2db5e540 +778, 0x23172734 +779, 0x4db572f +780, 0xeb941718 +781, 0x84c2649a +782, 0x3b1e5b6a +783, 0x4c9c61b9 +784, 0x3bccd11 +785, 0xb4d7b78e +786, 0x48580ae5 +787, 0xd273ab68 +788, 0x25c11615 +789, 0x470b53f6 +790, 0x329c2068 +791, 0x1693721b +792, 0xf8c9aacf +793, 0x4c3d5693 +794, 0xd778284e +795, 0xae1cb24f +796, 0x3c11d1b3 +797, 0xddd2b0c0 +798, 0x90269fa7 +799, 0x5666e0a2 +800, 0xf9f195a4 +801, 0x61d78eb2 +802, 0xada5a7c0 +803, 0xaa272fbe +804, 0xba3bae2f +805, 0xd0b70fc2 +806, 0x529f32b +807, 0xda7a3e21 +808, 0x9a776a20 +809, 0xb21f9635 +810, 0xb3acc14e +811, 0xac55f56 +812, 0x29dccf41 +813, 0x32dabdb3 +814, 0xaa032f58 +815, 0xfa406af4 +816, 0xce3c415d +817, 0xb44fb4d9 +818, 0x32248d1c +819, 0x680c6440 +820, 0xae2337b +821, 0x294cb597 +822, 0x5bca48fe +823, 0xaef19f40 +824, 0xad60406 +825, 0x4781f090 +826, 0xfd691ffc +827, 0xb6568268 +828, 0xa56c72cb +829, 0xf8a9e0fc +830, 0x9af4fd02 +831, 0x2cd30932 +832, 0x776cefd7 +833, 0xe31f476e +834, 0x6d94a437 +835, 0xb3cab598 +836, 0xf582d13f +837, 0x3bf8759d +838, 0xc3777dc +839, 0x5e425ea8 +840, 0x1c7ff4ed +841, 0x1c2e97d1 +842, 0xc062d2b4 +843, 0x46dc80e0 +844, 0xbcdb47e6 +845, 0x32282fe0 +846, 0xaba89063 +847, 0x5e94e9bb +848, 0x3e667f78 +849, 0xea6eb21a +850, 0xe56e54e8 +851, 0xa0383510 +852, 0x6768fe2b +853, 0xb53ac3e0 +854, 0x779569a0 +855, 0xeca83c6a +856, 0x24db4d2d +857, 0x4585f696 +858, 0xf84748b2 +859, 0xf6a4dd5b +860, 0x31fb524d +861, 0x67ab39fe +862, 0x5882a899 +863, 0x9a05fcf6 +864, 0x712b5674 +865, 0xe8c6958f +866, 0x4b448bb3 +867, 0x530b9abf +868, 0xb491f491 +869, 0x98352c62 +870, 0x2d0a50e3 +871, 0xeb4384da +872, 0x36246f07 +873, 0xcbc5c1a +874, 0xae24031d +875, 0x44d11ed6 +876, 0xf07f1608 +877, 0xf296aadd +878, 0x3bcfe3be +879, 0x8fa1e7df +880, 0xfd317a6e +881, 0xe4975c44 +882, 0x15205892 +883, 0xa762d4df +884, 0xf1167365 +885, 0x6811cc00 +886, 0x8315f23 +887, 0xe045b4b1 +888, 0xa8496414 +889, 0xbed313ae +890, 0xcdae3ddb +891, 0xa9c22c9 +892, 0x275fab1a +893, 0xedd65fa +894, 0x4c188229 +895, 0x63a83e58 +896, 0x18aa9207 +897, 0xa41f2e78 +898, 0xd9f63653 +899, 0xbe2be73b +900, 0xa3364d39 +901, 0x896d5428 +902, 0xc737539e +903, 0x745a78c6 +904, 0xf0b2b042 +905, 0x510773b4 +906, 0x92ad8e37 +907, 0x27f2f8c4 +908, 0x23704cc8 +909, 0x3d95a77f +910, 0xf08587a4 +911, 0xbd696a25 +912, 0x948924f3 +913, 0x8cddb634 +914, 0xcd2a4910 +915, 0x8e0e300e +916, 0x83815a9b +917, 0x67383510 +918, 0x3c18f0d0 +919, 0xc7a7bccc +920, 0x7cc2d3a2 +921, 0x52eb2eeb +922, 0xe4a257e5 +923, 0xec76160e +924, 0x63f9ad68 +925, 0x36d0bbbf +926, 0x957bc4e4 +927, 0xc9ed90ff +928, 0x4cb6059d +929, 0x2f86eca1 +930, 0x3e3665a3 +931, 0x9b7eb6f4 +932, 0x492e7e18 +933, 0xa098aa51 +934, 0x7eb568b2 +935, 0x3fd639ba +936, 0x7bebcf1 +937, 0x99c844ad +938, 0x43cb5ec7 +939, 0x8dfbbef5 +940, 0x5be413ff +941, 0xd93b976d +942, 0xc1c7a86d +943, 0x1f0e93d0 +944, 0x498204a2 +945, 0xe8fe832a +946, 0x2236bd7 +947, 0x89953769 +948, 0x2acc3491 +949, 0x2c4f22c6 +950, 0xd7996277 +951, 0x3bcdc349 +952, 0xfc286630 +953, 0x5f8909fd +954, 0x242677c0 +955, 0x4cb34104 +956, 0xa6ff8100 +957, 0x39ea47ec +958, 0x9bd54140 +959, 0x7502ffe8 +960, 0x7ebef8ae +961, 0x1ed8abe4 +962, 0xfaba8450 +963, 0xc197b65f +964, 0x19431455 +965, 0xe229c176 +966, 0xeb2967da +967, 0xe0c5dc05 +968, 0xa84e3227 +969, 0x10dd9e0f +970, 0xbdb70b02 +971, 0xce24808a +972, 0x423edab8 +973, 0x194caf71 +974, 0x144f150d +975, 0xf811c2d2 +976, 0xc224ee85 +977, 0x2b217a5b +978, 0xf78a5a79 +979, 0x6554a4b1 +980, 0x769582df +981, 0xf4b2cf93 +982, 0x89648483 +983, 0xb3283a3e +984, 0x82b895db +985, 0x79388ef0 +986, 0x54bc42a6 +987, 0xc4dd39d9 +988, 0x45b33b7d +989, 0x8703b2c1 +990, 0x1cc94806 +991, 0xe0f43e49 +992, 0xcaa7b6bc +993, 0x4f88e9af +994, 0x1477cce5 +995, 0x347dd115 +996, 0x36e335fa +997, 0xb93c9a31 +998, 0xaac3a175 +999, 0x68a19647 diff --git a/.venv/lib/python3.12/site-packages/numpy/random/tests/data/mt19937-testset-2.csv b/.venv/lib/python3.12/site-packages/numpy/random/tests/data/mt19937-testset-2.csv new file mode 100644 index 0000000..cdb8e47 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/random/tests/data/mt19937-testset-2.csv @@ -0,0 +1,1001 @@ +seed, 0x0 +0, 0x7ab4ea94 +1, 0x9b561119 +2, 0x4957d02e +3, 0x7dd3fdc2 +4, 0x5affe54 +5, 0x5a01741c +6, 0x8b9e8c1f +7, 0xda5bf11a +8, 0x509226 +9, 0x64e2ea17 +10, 0x82c6dab5 +11, 0xe4302515 +12, 0x8198b873 +13, 0xc3ec9a82 +14, 0x829dff28 +15, 0x5278e44f +16, 0x994a7d2c +17, 0xf1c89398 +18, 0xaf2fddec +19, 0x22abc6ee +20, 0x963dbd43 +21, 0xc29edffb +22, 0x41c1ce07 +23, 0x9c90034d +24, 0x1f17a796 +25, 0x3833caa8 +26, 0xb8795528 +27, 0xebc595a2 +28, 0xf8f5b5dd +29, 0xc2881f72 +30, 0x18e5d3f0 +31, 0x9b19ac7a +32, 0xb9992436 +33, 0xc00052b3 +34, 0xb63f4475 +35, 0x962642d9 +36, 0x63506c10 +37, 0x2be6b127 +38, 0x569bdbc6 +39, 0x7f185e01 +40, 0xebb55f53 +41, 0x1c30198c +42, 0x7c8d75c6 +43, 0xd3f2186b +44, 0xaca5b9b1 +45, 0xbc49ff45 +46, 0xc4a802af +47, 0x2cecd86f +48, 0x8e0da529 +49, 0x1f22b00e +50, 0x4559ea80 +51, 0x60f587d8 +52, 0x7c7460e9 +53, 0x67be0a4a +54, 0x987a0183 +55, 0x7bd30f1 +56, 0xab18c4ac +57, 0xffdbfb64 +58, 0x9ea917f9 +59, 0x1239dab7 +60, 0x38efabeb +61, 0x5da91888 +62, 0x8f49ed62 +63, 0x83f60b1e +64, 0x5950a3fc +65, 0xd8911104 +66, 0x19e8859e +67, 0x1a4d89ec +68, 0x968ca180 +69, 0x9e1b6da3 +70, 0x3d99c2c +71, 0x55f76289 +72, 0x8fa28b9e +73, 0x9fe01d33 +74, 0xdade4e38 +75, 0x1ea04290 +76, 0xa7263313 +77, 0xaafc762e +78, 0x460476d6 +79, 0x31226e12 +80, 0x451d3f05 +81, 0xd0d2764b +82, 0xd06e1ab3 +83, 0x1394e3f4 +84, 0x2fc04ea3 +85, 0x5b8401c +86, 0xebd6c929 +87, 0xe881687c +88, 0x94bdd66a +89, 0xabf85983 +90, 0x223ad12d +91, 0x2aaeeaa3 +92, 0x1f704934 +93, 0x2db2efb6 +94, 0xf49b8dfb +95, 0x5bdbbb9d +96, 0xba0cd0db +97, 0x4ec4674e +98, 0xad0129e +99, 0x7a66129b +100, 0x50d12c5e +101, 0x85b1d335 +102, 0x3efda58a +103, 0xecd886fb +104, 0x8ecadd3d +105, 0x60ebac0f +106, 0x5e10fe79 +107, 0xa84f7e5d +108, 0x43931288 +109, 0xfacf448 +110, 0x4ee01997 +111, 0xcdc0a651 +112, 0x33c87037 +113, 0x8b50fc03 +114, 0xf52aad34 +115, 0xda6cd856 +116, 0x7585bea0 +117, 0xe947c762 +118, 0x4ddff5d8 +119, 0xe0e79b3b +120, 0xb804cf09 +121, 0x84765c44 +122, 0x3ff666b4 +123, 0xe31621ad +124, 0x816f2236 +125, 0x228176bc +126, 0xfdc14904 +127, 0x635f5077 +128, 0x6981a817 +129, 0xfd9a0300 +130, 0xd3fa8a24 +131, 0xd67c1a77 +132, 0x903fe97a +133, 0xf7c4a4d5 +134, 0x109f2058 +135, 0x48ab87fe +136, 0xfd6f1928 +137, 0x707e9452 +138, 0xf327db9e +139, 0x7b80d76d +140, 0xfb6ba193 +141, 0x454a1ad0 +142, 0xe20b51e +143, 0xb774d085 +144, 0x6b1ed574 +145, 0xb1e77de4 +146, 0xe2a83b37 +147, 0x33d3176f +148, 0x2f0ca0fc +149, 0x17f51e2 +150, 0x7c1fbf55 +151, 0xf09e9cd0 +152, 0xe3d9bacd +153, 0x4244db0a +154, 0x876c09fc +155, 0x9db4fc2f +156, 0xd3771d60 +157, 0x25fc6a75 +158, 0xb309915c +159, 0xc50ee027 +160, 0xaa5b7b38 +161, 0x4c650ded +162, 0x1acb2879 +163, 0x50db5887 +164, 0x90054847 +165, 0xfef23e5b +166, 0x2dd7b7d5 +167, 0x990b8c2e +168, 0x6001a601 +169, 0xb5d314c4 +170, 0xfbfb7bf9 +171, 0x1aba997d +172, 0x814e7304 +173, 0x989d956a +174, 0x86d5a29c +175, 0x70a9fa08 +176, 0xc4ccba87 +177, 0x7e9cb366 +178, 0xee18eb0a +179, 0x44f5be58 +180, 0x91d4af2d +181, 0x5ab6e593 +182, 0x9fd6bb4d +183, 0x85894ce +184, 0x728a2401 +185, 0xf006f6d4 +186, 0xd782741e +187, 0x842cd5bd +188, 0xfb5883aa +189, 0x7e5a471 +190, 0x83ff6965 +191, 0xc9675c6b +192, 0xb6ced3c7 +193, 0x3de6425b +194, 0x25e14db4 +195, 0x69ca3dec +196, 0x81342d13 +197, 0xd7cd8417 +198, 0x88d15e69 +199, 0xefba17c9 +200, 0x43d595e6 +201, 0x89d4cf25 +202, 0x7cae9b9b +203, 0x2242c621 +204, 0x27fc3598 +205, 0x467b1d84 +206, 0xe84d4622 +207, 0xa26bf980 +208, 0x80411010 +209, 0xe2c2bfea +210, 0xbc6ca25a +211, 0x3ddb592a +212, 0xdd46eb9e +213, 0xdfe8f657 +214, 0x2cedc974 +215, 0xf0dc546b +216, 0xd46be68f +217, 0x26d8a5aa +218, 0x76e96ba3 +219, 0x7d5b5353 +220, 0xf532237c +221, 0x6478b79 +222, 0x9b81a5e5 +223, 0x5fc68e5c +224, 0x68436e70 +225, 0x2a0043f9 +226, 0x108d523c +227, 0x7a4c32a3 +228, 0x9c84c742 +229, 0x6f813dae +230, 0xfcc5bbcc +231, 0x215b6f3a +232, 0x84cb321d +233, 0x7913a248 +234, 0xb1e6b585 +235, 0x49376b31 +236, 0x1dc896b0 +237, 0x347051ad +238, 0x5524c042 +239, 0xda0eef9d +240, 0xf2e73342 +241, 0xbeee2f9d +242, 0x7c702874 +243, 0x9eb3bd34 +244, 0x97b09700 +245, 0xcdbab1d4 +246, 0x4a2f6ed1 +247, 0x2047bda5 +248, 0x3ecc7005 +249, 0x8d0d5e67 +250, 0x40876fb5 +251, 0xb5fd2187 +252, 0xe915d8af +253, 0x9a2351c7 +254, 0xccc658ae +255, 0xebb1eddc +256, 0xc4a83671 +257, 0xffb2548f +258, 0xe4fe387a +259, 0x477aaab4 +260, 0x8475a4e4 +261, 0xf8823e46 +262, 0xe4130f71 +263, 0xbdb54482 +264, 0x98fe0462 +265, 0xf36b27b8 +266, 0xed7733da +267, 0x5f428afc +268, 0x43a3a21a +269, 0xf8370b55 +270, 0xfade1de1 +271, 0xd9a038ea +272, 0x3c69af23 +273, 0x24df7dd0 +274, 0xf66d9353 +275, 0x71d811be +276, 0xcc4d024b +277, 0xb8c30bf0 +278, 0x4198509d +279, 0x8b37ba36 +280, 0xa41ae29a +281, 0x8cf7799e +282, 0x5cd0136a +283, 0xa11324ef +284, 0x2f8b6d4b +285, 0x3657cf17 +286, 0x35b6873f +287, 0xee6e5bd7 +288, 0xbeeaa98 +289, 0x9ad3c581 +290, 0xe2376c3f +291, 0x738027cc +292, 0x536ac839 +293, 0xf066227 +294, 0x6c9cb0f9 +295, 0x84082ae6 +296, 0xab38ae9d +297, 0x493eade9 +298, 0xcb630b3a +299, 0x64d44250 +300, 0xe5efb557 +301, 0xea2424d9 +302, 0x11a690ba +303, 0x30a48ae4 +304, 0x58987e53 +305, 0x94ec6076 +306, 0x5d3308fa +307, 0xf1635ebb +308, 0x56a5ab90 +309, 0x2b2f2ee4 +310, 0x6f9e6483 +311, 0x8b93e327 +312, 0xa7ce140b +313, 0x4c8aa42 +314, 0x7657bb3f +315, 0xf250fd75 +316, 0x1edfcb0f +317, 0xdb42ace3 +318, 0xf8147e16 +319, 0xd1992bd +320, 0x64bb14d1 +321, 0x423e724d +322, 0x7b172f7c +323, 0x17171696 +324, 0x4acaf83b +325, 0x7a83527e +326, 0xfc980c60 +327, 0xc8b56bb +328, 0x2453f77f +329, 0x85ad1bf9 +330, 0x62a85dfe +331, 0x48238c4d +332, 0xbb3ec1eb +333, 0x4c1c039c +334, 0x1f37f571 +335, 0x98aecb63 +336, 0xc3b3ddd6 +337, 0xd22dad4 +338, 0xe49671a3 +339, 0xe3baf945 +340, 0xb9e21680 +341, 0xda562856 +342, 0xe8b88ce4 +343, 0x86f88de2 +344, 0x986faf76 +345, 0x6f0025c3 +346, 0x3fe21234 +347, 0xd8d3f729 +348, 0xc2d11c6f +349, 0xd4f9e8f +350, 0xf61a0aa +351, 0xc48bb313 +352, 0xe944e940 +353, 0xf1801b2e +354, 0x253590be +355, 0x981f069d +356, 0x891454d8 +357, 0xa4f824ad +358, 0x6dd2cc48 +359, 0x3018827e +360, 0x3fb329e6 +361, 0x65276517 +362, 0x8d2c0dd2 +363, 0xc965b48e +364, 0x85d14d90 +365, 0x5a51623c +366, 0xa9573d6a +367, 0x82d00edf +368, 0x5ed7ce07 +369, 0x1d946abc +370, 0x24fa567b +371, 0x83ef5ecc +372, 0x9001724a +373, 0xc4fe48f3 +374, 0x1e07c25c +375, 0xf4d5e65e +376, 0xb734f6e9 +377, 0x327a2df8 +378, 0x766d59b7 +379, 0x625e6b61 +380, 0xe82f32d7 +381, 0x1566c638 +382, 0x2e815871 +383, 0x606514aa +384, 0x36b7386e +385, 0xcaa8ce08 +386, 0xb453fe9c +387, 0x48574e23 +388, 0x71f0da06 +389, 0xa8a79463 +390, 0x6b590210 +391, 0x86e989db +392, 0x42899f4f +393, 0x7a654ef9 +394, 0x4c4fe932 +395, 0x77b2fd10 +396, 0xb6b4565c +397, 0xa2e537a3 +398, 0xef5a3dca +399, 0x41235ea8 +400, 0x95c90541 +401, 0x50ad32c4 +402, 0xc1b8e0a4 +403, 0x498e9aab +404, 0xffc965f1 +405, 0x72633485 +406, 0x3a731aef +407, 0x7cfddd0b +408, 0xb04d4129 +409, 0x184fc28e +410, 0x424369b0 +411, 0xf9ae13a1 +412, 0xaf357c8d +413, 0x7a19228e +414, 0xb46de2a8 +415, 0xeff2ac76 +416, 0xa6c9357b +417, 0x614f19c1 +418, 0x8ee1a53f +419, 0xbe1257b1 +420, 0xf72651fe +421, 0xd347c298 +422, 0x96dd2f23 +423, 0x5bb1d63e +424, 0x32e10887 +425, 0x36a144da +426, 0x9d70e791 +427, 0x5e535a25 +428, 0x214253da +429, 0x2e43dd40 +430, 0xfc0413f4 +431, 0x1f5ea409 +432, 0x1754c126 +433, 0xcdbeebbe +434, 0x1fb44a14 +435, 0xaec7926 +436, 0xb9d9a1e +437, 0x9e4a6577 +438, 0x8b1f04c5 +439, 0x19854e8a +440, 0x531080cd +441, 0xc0cbd73 +442, 0x20399d77 +443, 0x7d8e9ed5 +444, 0x66177598 +445, 0x4d18a5c2 +446, 0xe08ebf58 +447, 0xb1f9c87b +448, 0x66bedb10 +449, 0x26670d21 +450, 0x7a7892da +451, 0x69b69d86 +452, 0xd04f1d1c +453, 0xaf469625 +454, 0x7946b813 +455, 0x1ee596bd +456, 0x7f365d85 +457, 0x795b662b +458, 0x194ad02d +459, 0x5a9649b5 +460, 0x6085e278 +461, 0x2cf54550 +462, 0x9c77ea0b +463, 0x3c6ff8b +464, 0x2141cd34 +465, 0xb90bc671 +466, 0x35037c4b +467, 0xd04c0d76 +468, 0xc75bff8 +469, 0x8f52003b +470, 0xfad3d031 +471, 0x667024bc +472, 0xcb04ea36 +473, 0x3e03d587 +474, 0x2644d3a0 +475, 0xa8fe99ba +476, 0x2b9a55fc +477, 0x45c4d44a +478, 0xd059881 +479, 0xe07fcd20 +480, 0x4e22046c +481, 0x7c2cbf81 +482, 0xbf7f23de +483, 0x69d924c3 +484, 0xe53cd01 +485, 0x3879017c +486, 0xa590e558 +487, 0x263bc076 +488, 0x245465b1 +489, 0x449212c6 +490, 0x249dcb29 +491, 0x703d42d7 +492, 0x140eb9ec +493, 0xc86c5741 +494, 0x7992aa5b +495, 0xb8b76a91 +496, 0x771dac3d +497, 0x4ecd81e3 +498, 0xe5ac30b3 +499, 0xf4d7a5a6 +500, 0xac24b97 +501, 0x63494d78 +502, 0x627ffa89 +503, 0xfa4f330 +504, 0x8098a1aa +505, 0xcc0c61dc +506, 0x34749fa0 +507, 0x7f217822 +508, 0x418d6f15 +509, 0xa4b6e51e +510, 0x1036de68 +511, 0x1436986e +512, 0x44df961d +513, 0x368e4651 +514, 0x6a9e5d8c +515, 0x27d1597e +516, 0xa1926c62 +517, 0x8d1f2b55 +518, 0x5797eb42 +519, 0xa90f9e81 +520, 0x57547b10 +521, 0xdbbcca8e +522, 0x9edd2d86 +523, 0xbb0a7527 +524, 0x7662380c +525, 0xe7c98590 +526, 0x950fbf3f +527, 0xdc2b76b3 +528, 0x8a945102 +529, 0x3f0a1a85 +530, 0xeb215834 +531, 0xc59f2802 +532, 0xe2a4610 +533, 0x8b5a8665 +534, 0x8b2d9933 +535, 0x40a4f0bc +536, 0xaab5bc67 +537, 0x1442a69e +538, 0xdf531193 +539, 0x698d3db4 +540, 0x2d40324e +541, 0x1a25feb2 +542, 0xe8cc898f +543, 0xf12e98f5 +544, 0xc03ad34c +545, 0xf62fceff +546, 0xdd827e1e +547, 0x7d8ccb3b +548, 0xab2d6bc1 +549, 0xc323a124 +550, 0x8184a19a +551, 0xc3c4e934 +552, 0x5487424d +553, 0xd6a81a44 +554, 0x90a8689d +555, 0xe69c4c67 +556, 0xbdae02dd +557, 0x72a18a79 +558, 0x2a88e907 +559, 0x31cf4b5d +560, 0xb157772f +561, 0x206ba601 +562, 0x18529232 +563, 0x7dac90d8 +564, 0x3a5f8a09 +565, 0x9f4b64a3 +566, 0xae373af9 +567, 0x1d79447c +568, 0x2a23684b +569, 0x41fb7ba4 +570, 0x55e4bb9e +571, 0xd7619d3e +572, 0xc04e4dd8 +573, 0x8418d516 +574, 0x2b2ca585 +575, 0xfa8eedf +576, 0x5bafd977 +577, 0x31974fb0 +578, 0x9eb6697b +579, 0xc8be22f5 +580, 0x173b126a +581, 0x8809becf +582, 0x3e41efe1 +583, 0x3d6cbbb8 +584, 0x278c81d8 +585, 0xa6f08434 +586, 0xa0e6601d +587, 0x2fccd88d +588, 0x3cbc8beb +589, 0x5f65d864 +590, 0xa1ff8ddf +591, 0x609dcb7c +592, 0x4a4e1663 +593, 0xeae5531 +594, 0x962a7c85 +595, 0x1e110607 +596, 0x8c5db5d0 +597, 0xc7f2337e +598, 0xc94fcc9c +599, 0xe7f62629 +600, 0x6c9aa9f8 +601, 0x2e27fe0e +602, 0x4d0dae12 +603, 0x9eecf588 +604, 0x977ba3f2 +605, 0xed0a51af +606, 0x3f3ec633 +607, 0xc174b2ec +608, 0x590be8a9 +609, 0x4f630d18 +610, 0xf579e989 +611, 0xe2a55584 +612, 0xee11edcd +613, 0x150a4833 +614, 0xc0a0535c +615, 0xb5e00993 +616, 0xb6435700 +617, 0xa98dbff +618, 0x315716af +619, 0x94395776 +620, 0x6cbd48d9 +621, 0xab17f8fc +622, 0xa794ffb7 +623, 0x6b55e231 +624, 0x89ff5783 +625, 0x431dcb26 +626, 0x270f9bf8 +627, 0x2af1b8d0 +628, 0x881745ed +629, 0x17e1be4e +630, 0x132a0ec4 +631, 0x5712df17 +632, 0x2dfb3334 +633, 0xf5a35519 +634, 0xcafbdac6 +635, 0x73b6189d +636, 0x10107cac +637, 0x18c1045e +638, 0xbc19bbad +639, 0x8b4f05ac +640, 0x5830d038 +641, 0x468cd98a +642, 0x5b83a201 +643, 0xf0ccdd9c +644, 0xcb20c4bd +645, 0x1ff186c9 +646, 0xcdddb47f +647, 0x5c65ce6 +648, 0xb748c580 +649, 0x23b6f262 +650, 0xe2ba8e5c +651, 0x9a164a03 +652, 0x62d3322e +653, 0x918d8b43 +654, 0x45c8b49d +655, 0xce172c6e +656, 0x23febc6 +657, 0x84fdc5b7 +658, 0xe7d1fd82 +659, 0xf0ddf3a6 +660, 0x87050436 +661, 0x13d46375 +662, 0x5b191c78 +663, 0x2cbd99c0 +664, 0x7686c7f +665, 0xcff56c84 +666, 0x7f9b4486 +667, 0xefc997fe +668, 0x984d4588 +669, 0xfa44f36a +670, 0x7a5276c1 +671, 0xcfde6176 +672, 0xcacf7b1d +673, 0xcffae9a7 +674, 0xe98848d5 +675, 0xd4346001 +676, 0xa2196cac +677, 0x217f07dc +678, 0x42d5bef +679, 0x6f2e8838 +680, 0x4677a24 +681, 0x4ad9cd54 +682, 0x43df42af +683, 0x2dde417 +684, 0xaef5acb1 +685, 0xf377f4b3 +686, 0x7d870d40 +687, 0xe53df1c2 +688, 0xaeb5be50 +689, 0x7c92eac0 +690, 0x4f00838c +691, 0x91e05e84 +692, 0x23856c80 +693, 0xc4266fa6 +694, 0x912fddb +695, 0x34d42d22 +696, 0x6c02ffa +697, 0xe47d093 +698, 0x183c55b3 +699, 0xc161d142 +700, 0x3d43ff5f +701, 0xc944a36 +702, 0x27bb9fc6 +703, 0x75c91080 +704, 0x2460d0dc +705, 0xd2174558 +706, 0x68062dbf +707, 0x778e5c6e +708, 0xa4dc9a +709, 0x7a191e69 +710, 0xc084b2ba +711, 0xbb391d2 +712, 0x88849be +713, 0x69c02714 +714, 0x69d4a389 +715, 0x8f51854d +716, 0xaf10bb82 +717, 0x4d5d1c77 +718, 0x53b53109 +719, 0xa0a92aa0 +720, 0x83ecb757 +721, 0x5325752a +722, 0x114e466e +723, 0x4b3f2780 +724, 0xa7a6a39c +725, 0x5e723357 +726, 0xa6b8be9b +727, 0x157c32ff +728, 0x8b898012 +729, 0xd7ff2b1e +730, 0x69cd8444 +731, 0x6ad8030c +732, 0xa08a49ec +733, 0xfbc055d3 +734, 0xedf17e46 +735, 0xc9526200 +736, 0x3849b88a +737, 0x2746860b +738, 0xae13d0c1 +739, 0x4f15154f +740, 0xd65c3975 +741, 0x6a377278 +742, 0x54d501f7 +743, 0x81a054ea +744, 0x143592ba +745, 0x97714ad6 +746, 0x4f9926d9 +747, 0x4f7ac56d +748, 0xe87ca939 +749, 0x58b76f6f +750, 0x60901ad8 +751, 0x3e401bb6 +752, 0xa058468e +753, 0xc0bb14f6 +754, 0x2cb8f02a +755, 0x7c2cf756 +756, 0x34c31de5 +757, 0x9b243e83 +758, 0xa5c85ab4 +759, 0x2741e3b3 +760, 0x1249000e +761, 0x3fc4e72b +762, 0xa3e038a2 +763, 0x952dd92c +764, 0x2b821966 +765, 0xfa81b365 +766, 0x530919b9 +767, 0x4486d66f +768, 0xccf4f3c1 +769, 0xa8bddd1d +770, 0xcc295eb9 +771, 0xfccbe42f +772, 0x38bacd8d +773, 0x2261854f +774, 0x56068c62 +775, 0x9bdaeb8 +776, 0x555fa5b6 +777, 0x20fe615e +778, 0x49fb23d3 +779, 0xd093bad6 +780, 0x54919e86 +781, 0x7373eb24 +782, 0xfbaa7a98 +783, 0x5f62fb39 +784, 0xe03bc9ec +785, 0xa5074d41 +786, 0xa1cefb1 +787, 0x13912d74 +788, 0xf6421b8 +789, 0xfcb48812 +790, 0x8f1db50b +791, 0xc1654b87 +792, 0x948b43c2 +793, 0xf503ef77 +794, 0x117d891d +795, 0x5493ffa +796, 0x171313b1 +797, 0xa4b62e1e +798, 0x77454ea6 +799, 0xbea0aff0 +800, 0x13c36389 +801, 0xe3b60bac +802, 0xa176bed3 +803, 0x2863d428 +804, 0xe2314f46 +805, 0xa85cd3d4 +806, 0x7866e57 +807, 0x8f03f5bc +808, 0x239ae +809, 0x46f279fb +810, 0xcca00559 +811, 0xaa07a104 +812, 0x89123d08 +813, 0x2e6856ba +814, 0x43a9780d +815, 0x676cff25 +816, 0x6744b87d +817, 0xee260d4f +818, 0xb98d8b77 +819, 0x9b0ca455 +820, 0x659f6fe +821, 0x28d20d1c +822, 0x601f2657 +823, 0xdec3073e +824, 0x61263863 +825, 0x1a13435a +826, 0x27497d1e +827, 0x17a8458e +828, 0xdddc407d +829, 0x4bb2e8ac +830, 0x16b2aedb +831, 0x77ccd696 +832, 0x9d108fcd +833, 0x25ad233e +834, 0xaa9bc370 +835, 0xa873ab50 +836, 0xaf19c9d9 +837, 0x696e1e6b +838, 0x1fdc4bf4 +839, 0x4c2ebc81 +840, 0xde4929ed +841, 0xf4d0c10c +842, 0xb6595b76 +843, 0x75cbb1b3 +844, 0xbcb6de49 +845, 0xe23157fd +846, 0x5e596078 +847, 0xa69b0d29 +848, 0x2118a41 +849, 0x7088c16 +850, 0xc75e1e1 +851, 0x6a4af2d6 +852, 0xf19c6521 +853, 0xaff7b3b1 +854, 0x615295c7 +855, 0xbda3a8d7 +856, 0x5b5ca72e +857, 0xdad9d80f +858, 0xfa81c084 +859, 0xf4703fa +860, 0x3ca54540 +861, 0xa8961d51 +862, 0x53d1ecc2 +863, 0x808d83b6 +864, 0x68e8c48e +865, 0x89be2039 +866, 0x9088ea11 +867, 0xb8665d12 +868, 0x91272f9 +869, 0x53dddff2 +870, 0xb7a54ab +871, 0xd2b645ca +872, 0x99fb8590 +873, 0x5315c8e +874, 0x2a913806 +875, 0x7f15eb2b +876, 0xa7f1cc5d +877, 0xbb2ee836 +878, 0xd9fafd60 +879, 0x17448d6f +880, 0x999ec436 +881, 0x482ec606 +882, 0x9b403c0e +883, 0x569eb51b +884, 0xb275d1a6 +885, 0xadd29c31 +886, 0xb7ebdb15 +887, 0xdfef3662 +888, 0x51aba6db +889, 0x6d41946d +890, 0x77bf8896 +891, 0xcafa6fab +892, 0x976ab40f +893, 0x49a6d86b +894, 0x56639e55 +895, 0x9945b996 +896, 0x81459b50 +897, 0xbce97542 +898, 0xe397c9c9 +899, 0x247a5955 +900, 0xb72b1573 +901, 0x86306f86 +902, 0x34f65dc5 +903, 0x909360c0 +904, 0xf3f696ef +905, 0xcb9faae5 +906, 0x93daecd9 +907, 0xde1af7af +908, 0x43a1f2d +909, 0x6d75cde5 +910, 0x9e412b6 +911, 0x5673fed +912, 0x16bb511a +913, 0x35ef4cca +914, 0x4e615aca +915, 0x5cdaf47a +916, 0x26676047 +917, 0x8c199325 +918, 0x2adf0cb9 +919, 0x84f2e6fd +920, 0x5e627f64 +921, 0xb7cee354 +922, 0x542ab4a6 +923, 0xe59cd83b +924, 0x89cc3f10 +925, 0x92b0f5f +926, 0xc1328370 +927, 0x8208d9f7 +928, 0x68eb00cf +929, 0xfadd4ac4 +930, 0x2517784f +931, 0x4042b99 +932, 0x75ce0230 +933, 0x97c5a1b4 +934, 0x1a97f709 +935, 0x4c62781e +936, 0xf530a83 +937, 0x75776413 +938, 0x321c7240 +939, 0x6afe4e36 +940, 0xad00a2b4 +941, 0xbc05477d +942, 0xb0911e80 +943, 0x9935b87d +944, 0xd535eec5 +945, 0x149af45e +946, 0x786934b0 +947, 0xbc13cdac +948, 0x208bfa2e +949, 0xcf4b39cc +950, 0x6ac6c172 +951, 0xbfa9a37 +952, 0x42d28db6 +953, 0x2bf1ea63 +954, 0xbed6e677 +955, 0x50325d27 +956, 0xa79d3b8b +957, 0x52448bb1 +958, 0xefaad1bd +959, 0x833a2e54 +960, 0xd9de549a +961, 0x9f59672f +962, 0x9d5f5f16 +963, 0x1c914489 +964, 0xc08fa058 +965, 0xb188698b +966, 0xdc4672b5 +967, 0x594f720e +968, 0x56ed428f +969, 0x9b0898af +970, 0x8a64d3d5 +971, 0x773308d6 +972, 0x84d62098 +973, 0x46da7cf9 +974, 0x1114eae7 +975, 0xf9f2a092 +976, 0x5363a28 +977, 0xf2db7b3a +978, 0x102c71a9 +979, 0xe8e76aaf +980, 0x77a97b3b +981, 0x77b090d +982, 0x1099620e +983, 0xa6daaae6 +984, 0x86ff4713 +985, 0xc0ef85b8 +986, 0xf621d409 +987, 0xfd1561e2 +988, 0x4bcc687d +989, 0x596f760 +990, 0x7c8819f9 +991, 0x8cb865b8 +992, 0xadea115a +993, 0x56609348 +994, 0xb321ac14 +995, 0x1bac7db2 +996, 0x5fe6ee2 +997, 0xe9bfe072 +998, 0x15549e74 +999, 0xad8c191b diff --git a/.venv/lib/python3.12/site-packages/numpy/random/tests/data/pcg64-testset-1.csv b/.venv/lib/python3.12/site-packages/numpy/random/tests/data/pcg64-testset-1.csv new file mode 100644 index 0000000..0c8271f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/random/tests/data/pcg64-testset-1.csv @@ -0,0 +1,1001 @@ +seed, 0xdeadbeaf +0, 0x60d24054e17a0698 +1, 0xd5e79d89856e4f12 +2, 0xd254972fe64bd782 +3, 0xf1e3072a53c72571 +4, 0xd7c1d7393d4115c9 +5, 0x77b75928b763e1e2 +6, 0xee6dee05190f7909 +7, 0x15f7b1c51d7fa319 +8, 0x27e44105f26ac2d7 +9, 0xcc0d88b29e5b415 +10, 0xe07b1a90c685e361 +11, 0xd2e430240de95e38 +12, 0x3260bca9a24ca9da +13, 0x9b3cf2e92385adb7 +14, 0x30b5514548271976 +15, 0xa3a1fa16c124faf9 +16, 0xf53e17e918e45bb6 +17, 0x26f19faaeb833bfc +18, 0x95e1d605730cce1b +19, 0xa7b520c5c093c1aa +20, 0x4b68c010c9b106a3 +21, 0x25e19fe91df703f0 +22, 0x898364bb0bf593cb +23, 0x5bd6ab7dbaa125db +24, 0xd1fe47f25152045c +25, 0x3bb11919addf2409 +26, 0x26a8cb7b3f54af8 +27, 0xe6a27ee11200aa24 +28, 0x7cb585ab01e22000 +29, 0x78e60028676d2ef3 +30, 0x5c32535e5a899528 +31, 0x83e8b6f8c4a46fb3 +32, 0xe56ef7668a161246 +33, 0x36dcbc15aeb73055 +34, 0x5ea247f0bd188acb +35, 0x438b547b84601a80 +36, 0x8acda2a1273e9e3d +37, 0x2b05e30a4b40c24c +38, 0xfd87236bd13af032 +39, 0x471df211d8d985ef +40, 0x18e8a5609a793292 +41, 0x46f0951fab6dc4e3 +42, 0x6c199c4e700f6795 +43, 0xf04aa16bfb7d22cb +44, 0xd763d269fbaffc89 +45, 0x9991930cefbe5c2b +46, 0xb2a11b953f824c96 +47, 0x63fd9f52172c44b0 +48, 0x183bdad907b1d848 +49, 0xe17953cddb931c52 +50, 0x515cf16726ec205a +51, 0x88c327605150711a +52, 0xc7090dd79cbc8dc3 +53, 0xcb487cedeb00a350 +54, 0xc8abf254d87b657 +55, 0xd43cc4cbfb493d1a +56, 0x8705452e5d9ed1e +57, 0xcecd11446769cf43 +58, 0xde72156c8d65bc69 +59, 0x796a8f0f47d52ee8 +60, 0xb4c0da443917d6c3 +61, 0xe07ad7568a8e3dc3 +62, 0xc24a8da39ce6dc21 +63, 0x92b21ea80a8556eb +64, 0x572f21e531edf3af +65, 0x9b917ed56bbed198 +66, 0xe65fd8ddc5ab3d7d +67, 0xf55a80a8ec84fa18 +68, 0x18fc22e1a5227b61 +69, 0x72305dc7eeaa79d3 +70, 0x47ce58a36e7592cf +71, 0x14c6374340c0f7cc +72, 0x6f98273d4eb5a2c +73, 0x59a8702c46fe8f8a +74, 0xb67cbd8113cfe57f +75, 0xaa03c5db5f5b7690 +76, 0x3fb0f77ea4568013 +77, 0x756530990398b26e +78, 0x4c1952b2a3a6a343 +79, 0x1da15c5383074582 +80, 0xb405b21c81c274f7 +81, 0xbe664677a16788b +82, 0x9d2e37550bcee656 +83, 0x8b4589f0d9defe02 +84, 0x2935f018ee06a59 +85, 0x3834bf88be97ed11 +86, 0xa610d049cea79b6d +87, 0xd49ffc0d09a59ea9 +88, 0x4073365b76567adf +89, 0x499eefb9bb7513e2 +90, 0x74a743ee6b0138a9 +91, 0x3bf0880f2d947594 +92, 0x555d1c0498600a99 +93, 0x923b32a88ef2ffa4 +94, 0x7325411065fbedea +95, 0x9f4129ff8b79d300 +96, 0xab2b0a9b8a3785dc +97, 0x11734bdfba3a1713 +98, 0xc8333398841ba585 +99, 0xee2409cc234e6742 +100, 0xf6638e700872ecd2 +101, 0x10875300c13cd284 +102, 0x27a9bbed7c15b2d3 +103, 0x3c87f8fef31ce9bd +104, 0x92be263cd0914a95 +105, 0xa7b0f11bc742307e +106, 0x4a56f788cc1c1a3c +107, 0x4a130fa32257a48b +108, 0x5d4d9eda16e90286 +109, 0x7cc2af564844bedc +110, 0x2532867bfe7cda1a +111, 0xb1c504676611fd17 +112, 0xce8e86cfb4189aee +113, 0x99685898980d1970 +114, 0x8c3b67db23bcf1e +115, 0x73e14c93905b135f +116, 0xf0271b64ac2bd4d3 +117, 0xf4beba82f3ec1b2d +118, 0x1cdbf3ee9f210af +119, 0x2e938557c09c3ea6 +120, 0x2d314ccfa6ffd81d +121, 0x31ad47079950ade4 +122, 0x342b27547b900872 +123, 0x171b0e20b9ef1a76 +124, 0xdf10ce6318b03654 +125, 0x1d625df4aa718897 +126, 0x8712715a9f6e02ec +127, 0xb4a072da725bca3b +128, 0x19d346cb7734bd42 +129, 0xfd4281d311cb2958 +130, 0x58274c9519fc8789 +131, 0x4cacf29d885fd544 +132, 0x784b14d1c2523b80 +133, 0x2d25242131bb2373 +134, 0xcd2a5e43a7d9abf9 +135, 0x15eda3806e650ecb +136, 0xdaac5e277d764d96 +137, 0xdc5a5dd59aaa94e0 +138, 0x40d00237a46d5999 +139, 0x6205dd35a692743f +140, 0xbbd8236740361f09 +141, 0x1625c9f4e7288bf9 +142, 0xb74f12df1479e3ce +143, 0xb2d72a51b43d7131 +144, 0xf006a324b3707c83 +145, 0x28e8ab4abe7655b8 +146, 0xfb480093ad7ab55 +147, 0x3f8abd0d6ff8d272 +148, 0xc81a94177ac26bb7 +149, 0x3cdc178307751b14 +150, 0x9de84cc2b10ba025 +151, 0x3f8ab5aefcd046e2 +152, 0x43bdb894e1ee83b2 +153, 0xe288a40f3f06ac9d +154, 0xdab62a7d04b4f30f +155, 0x49f4e20295e1a805 +156, 0x3643764805e0edef +157, 0x9449954618b6b +158, 0x6c87e0d4508e0ce0 +159, 0x3a334be688a9dd7b +160, 0xb35c39228776e499 +161, 0xc4118bfff938490e +162, 0x88cbde3dcbb034b2 +163, 0xf91b287793c417c3 +164, 0x42b15f731a59f5b3 +165, 0xffa27104bbe4814d +166, 0x1b6789d138beccde +167, 0x542c2c1440d0ceb9 +168, 0x367294504d18fa0d +169, 0xf918b60e804a1b58 +170, 0xd390964e33a9d0e3 +171, 0x23bb1be7c4030fe8 +172, 0x9731054d039a8afb +173, 0x1a6205026b9d139b +174, 0x2fa13b318254a07e +175, 0x69571de7d8520626 +176, 0x641a13d7c03332b7 +177, 0x76a6237818f7a441 +178, 0x4e77860d0c660d81 +179, 0x4441448a1c1cbdb2 +180, 0xccd7783a042046e5 +181, 0xf620d8e0805e3200 +182, 0x7de02971367fdd0c +183, 0x539c263c5914cab1 +184, 0x9c3b9ba1a87bbf08 +185, 0x6d95baa34cda215f +186, 0x2db3f83ace0bac5f +187, 0x7f5af1da2dc670a4 +188, 0xfcc098d16c891bfb +189, 0x81a33df1d7a5ab12 +190, 0x767b0f863c8e9882 +191, 0x7a92983830de483d +192, 0xfa7598c37a79ac25 +193, 0xb89b3ca42ce03053 +194, 0x457a542b8efed4f7 +195, 0x571b7737fd0eeda7 +196, 0xa0f59e524485c0a +197, 0x82dca766b7901efd +198, 0xa68243caf6a3bd5d +199, 0x1bac981c6c740e5e +200, 0xbcd51bedf9103e44 +201, 0x4e197efd3ae5a7bf +202, 0x523568efd782268b +203, 0x5ec4ef1191fef09 +204, 0xed751ed5e31c9ab +205, 0x44eac24de03e1b29 +206, 0x9237d57c011d3fb3 +207, 0xa8c6da0f7692f235 +208, 0x9f9eb6bc15d6cac7 +209, 0x34bb8e0c93427aad +210, 0x115febd738eaac4a +211, 0xa439991ed139d27a +212, 0x45c7c2633d8710a2 +213, 0x48b7475f3405a3ce +214, 0x80158497c77bd00b +215, 0x935c316a5b1657cb +216, 0x59c5d54440e9695e +217, 0x337c78c5b3d0ede2 +218, 0x8c46bb956b93790d +219, 0xbf1dd03e471d71c5 +220, 0x2d375e90a4bef583 +221, 0xd0365428331b3790 +222, 0xfcd3969ac827ecd4 +223, 0x392fb6c580498410 +224, 0x6d6db4ceab5ea6c0 +225, 0x9bf84f1972e24786 +226, 0x798dfd820959dcc5 +227, 0x2e425095e65e8bfb +228, 0x8c1aa11536b1c9c3 +229, 0xd28e2ef9b12f6f74 +230, 0x86583bc98c8f78d2 +231, 0x489877530e3f93e7 +232, 0xb1d9430631104a15 +233, 0x1814f6098e6263bd +234, 0x8e2658a4e0d4cd53 +235, 0x5afe20e2531cdb2a +236, 0x30d02f7c4755c9bf +237, 0xe1e217cda16ed2d2 +238, 0xccb4913a42e3b791 +239, 0xfff21363ac183226 +240, 0xe788690bbda147a7 +241, 0x76905cf5917bfc6a +242, 0x2a8fa58f7916f52c +243, 0xf903c0cc0357815a +244, 0x15d20f243a4998d2 +245, 0x5b7decee5a86ea44 +246, 0x114f7fc421211185 +247, 0x328eb21715764c50 +248, 0xaffaa3f45c0678fd +249, 0x2579e6ef50378393 +250, 0x7610ab7743c19795 +251, 0xf9923d2bd101b197 +252, 0x57e42e7a62ba7e53 +253, 0x9f1dc217b4f02901 +254, 0x88a9ebd86509b234 +255, 0x867fc926aecc8591 +256, 0xaf22c1bfef04c718 +257, 0x39f701f0313f4288 +258, 0x6171ad397e6faab2 +259, 0x239bb5b9abdec4fc +260, 0xd9a591e25dd01c6e +261, 0x826dc4a75b628e49 +262, 0xf112b152c408f47 +263, 0x6843a06110f86c0 +264, 0x965e56a7185c1332 +265, 0x8d84492edbc71710 +266, 0xeee8ec111cfd1319 +267, 0xf2858e94ad98e458 +268, 0xbc9589fdf5f3a97e +269, 0xaf0ceef3bc375130 +270, 0x48f4aaf13fa75c1e +271, 0x111e9db47bee758f +272, 0xea3171df130164ba +273, 0x2a7bbe30bf827ab6 +274, 0xc516c3fdbf758c35 +275, 0xec55097754b04be5 +276, 0x374a997d52b6d3e6 +277, 0x487df5456085ffbc +278, 0x528883b84df8eafe +279, 0x805f77ab5ba26f86 +280, 0x8eb81477dc04f213 +281, 0x471ea08ec6794d72 +282, 0x69d3667ecc4d2176 +283, 0x98b7b6e295548a66 +284, 0x3877713c173f8f2 +285, 0xa00542570d0e8de3 +286, 0xf534b1bfa4033e50 +287, 0x7e1fedeac8bf6b26 +288, 0x8043f37c89628af4 +289, 0x1dd7039ec295e86d +290, 0xce9c05b763a40cc4 +291, 0x246926481e61028f +292, 0xb7cb0f1babf5893b +293, 0xefe6b777f37fc63e +294, 0xebbcabb4cb35cdcb +295, 0x39fa63cd711eeea9 +296, 0xad5d3ba7aaf30c8d +297, 0x8e9e78fe46021990 +298, 0xc7eaef6e7d5a3c62 +299, 0xefccdd5495d3f386 +300, 0x2179557ee8cfc76a +301, 0x88a77f621f0885ce +302, 0xafda62674543d90c +303, 0xb8e6fbe2e13e56c0 +304, 0x8bfbbe26a14f9b1a +305, 0x1404f59f5851f8c3 +306, 0x1140c53a0489566d +307, 0x3edf2d138b5c3f1d +308, 0x75d6bb275d817dc +309, 0x8e660ae27107664e +310, 0x7a8021038ee303e1 +311, 0x2042ef5eefa9079f +312, 0xe3e7b90bbf6d457a +313, 0xf3f819d2bb9405b +314, 0x522e42155cae0c10 +315, 0xf5bfbb975b40e233 +316, 0x2cf82b614dd95cfa +317, 0x183ef4a96bc40e55 +318, 0x9f6e351c5ba4e752 +319, 0x37c1110683c90846 +320, 0x1d89b7a996d8a977 +321, 0x18a444f77c7cb4d9 +322, 0xd0a8a971b78dc893 +323, 0x860232fb9e6543f1 +324, 0x60b6097f51002555 +325, 0xca1e5214123e3894 +326, 0xe03fe695c95f99bb +327, 0x2c7c6779d5f03622 +328, 0xafeeee42f63055d1 +329, 0x670dde905515936a +330, 0x9a922f42b59fb094 +331, 0xddb5ff49af5a651a +332, 0xe61b04c9e58ebbf8 +333, 0x4e459dcf272e7fc4 +334, 0xd549e92c16adceeb +335, 0x7a17dba1299d4a9c +336, 0x825d756109f2b585 +337, 0xba142e61a9cb203e +338, 0xc2a19f00e9c04a30 +339, 0x2d0f8140d23d0652 +340, 0x8b866d4d4d6caaf4 +341, 0x4f11d90dd91f8217 +342, 0xf6efc37373b9e0d +343, 0x248493d6cd6a4736 +344, 0xd12b6ae74a951a3e +345, 0x56e34722070b70a7 +346, 0x22d3f201cc9fa0eb +347, 0xbfdcc320008291b7 +348, 0x1a7a6922e9204fbd +349, 0x831421e0c4945ae4 +350, 0x66316feddddf0e11 +351, 0xa8c86a1517456554 +352, 0x14a9049ad989e335 +353, 0x837022259f141ecd +354, 0xcb71793a06c261f7 +355, 0x4aeefc07ebe09a79 +356, 0x8982f15aa3b6594b +357, 0x67bccfa7ed9b0d5b +358, 0xb377463b523e9dec +359, 0x53d3d594870fecb7 +360, 0xa5274b1caec5a60a +361, 0xd6316d0cb643db39 +362, 0xabc1a9b536de88ce +363, 0xed2fdb1383d2a077 +364, 0x12319c6feb97221b +365, 0x7e0f6cd40ef47403 +366, 0x86135c84fe26dbf8 +367, 0xc96622d3fbbee19b +368, 0xe3989d8d8511573f +369, 0x42cc365554d1fdc7 +370, 0x4c1a1eb8bbce8b4f +371, 0xfc4e30e7ef2034c1 +372, 0xc490444317a91e76 +373, 0x7ccdf469ff5dc81c +374, 0xf5a0da4110cc09d7 +375, 0x505227baf34c0fb5 +376, 0xbe58737e8a35cc88 +377, 0xd449bee91b3e8c41 +378, 0x3e590e23299d0e6 +379, 0x291a7d9e0a64caf7 +380, 0xdc6fafbdfebd2293 +381, 0x8223f1e259fe8a65 +382, 0x6186fbc9efd9e3df +383, 0xfda39b07e4007ffb +384, 0xfc19aea98574dc02 +385, 0xd0e10d354fcacd8c +386, 0xc9619916544a55a5 +387, 0xd454d50a8c8558cd +388, 0xcd94a246712d91e +389, 0x76a771f5d1231cce +390, 0xdd20cb2b7b370ee5 +391, 0xa6f4f50feca57c49 +392, 0x78c8fb431f17ab9c +393, 0x1b692b79a59b43cc +394, 0x4c45045d287da7e6 +395, 0x522132e18bf43928 +396, 0x25c458983138b41c +397, 0x2a1fb426ef229796 +398, 0x74dc324c74e5dd3d +399, 0x6df75e3eb6eb5374 +400, 0xb63f2f4f9ca25b61 +401, 0xac72286112ee54d6 +402, 0x5a966f3d0a6863c4 +403, 0x8d7046bc64a46fc2 +404, 0xa7b740fd6e3087eb +405, 0xcdbcbe0340cfcdf5 +406, 0xcb632613bf312b65 +407, 0xa91b3f2c2aac238b +408, 0xa06deb3f5ae555a3 +409, 0x29d72e1f8db69 +410, 0x2d004bae09728ea6 +411, 0xc6eee5dce0736cc1 +412, 0xa7493145500ff60f +413, 0xc4d68c4aa18ab93c +414, 0x8210c29e79d48d7f +415, 0xd0999d7889ecbef6 +416, 0x6e3bd61e66e93566 +417, 0xe6cc13d47d7d7b1f +418, 0x3d6f181f42e03979 +419, 0xbed4e14fd867604a +420, 0xbe511c84067bd86d +421, 0x49a876d89e697d38 +422, 0xc04c3dde8f889c98 +423, 0xaf293eeab0f53e3f +424, 0x9f6291dd65732cd6 +425, 0xd7811ac01de78c01 +426, 0xe385cf0261d50ec2 +427, 0x5a64134b3542bbf +428, 0xf9d1302bc6f13a68 +429, 0x5d2aabbea37d8c31 +430, 0xd9842e99a5192970 +431, 0x713eadc4cd30e837 +432, 0xb7b002fc72abb413 +433, 0x276cfeea526af1cf +434, 0x8519fe79b633a0ce +435, 0x2f0e87363705a3e2 +436, 0x9adbac0be3c371e7 +437, 0xf3f44ba899a6173c +438, 0x782d6c29618fde2b +439, 0x7f61062acec408f +440, 0x6e79cd836359258f +441, 0x5c8e9b138df5785a +442, 0xa54359c9f39a9a84 +443, 0xeec3f033135084b0 +444, 0x883ee717787a535c +445, 0x9a2422b513a73b00 +446, 0x2dd4beddcdd64a58 +447, 0x90c8a13202239c7b +448, 0x85b352ab759646d9 +449, 0x139f5cb2e46c53aa +450, 0xe1d3ba6c721c66d1 +451, 0xaa66e0edc4b60a98 +452, 0x3521275c75be29b6 +453, 0x490a5190b3edfa5d +454, 0xd2abcdd2ccb2f14e +455, 0x9d9be8bef4a5857d +456, 0xde19676f13ef7755 +457, 0xdac2fee2e42615f3 +458, 0xf4239801cb02f2ab +459, 0xaa8bf923ed91875c +460, 0x61d18a1940e4c7c0 +461, 0x1eb6aa3d5f077a6d +462, 0xee7374c063bf29d8 +463, 0x2f0a59e34d76268d +464, 0xc92e80e17d1eb3e9 +465, 0xafd05b3ec3d2ca72 +466, 0x28a61ad8d6c497b8 +467, 0xa7094d6834ad7d47 +468, 0x57d80ea9eccbb4f +469, 0xb047e0fee6cdaf16 +470, 0x44f41b5eb48c00bb +471, 0xd6dc8e1eb9c8c9ba +472, 0x47adfd2c638c7849 +473, 0x365d63db7d526c68 +474, 0xc21cda439016135d +475, 0x14d10c3f0f98863c +476, 0xa93e56f74e037602 +477, 0x3b4e9c8915bdc9 +478, 0xb46f5ae155e54aa2 +479, 0x8e470d21ce1943e1 +480, 0x60b96301b5ba2e8d +481, 0x1b473a41d381f9ff +482, 0xabcf5a8e3269e73f +483, 0xd410f6e94fb21fa1 +484, 0x65d1a47eebf87e5e +485, 0x48eaa201c61cb843 +486, 0x212c1abc2499bfc5 +487, 0x4255ad8377d2d8d +488, 0x44caeef472010612 +489, 0xffae764524f572f2 +490, 0x78d374d20c9ee550 +491, 0x6e003206c0511cee +492, 0x7998a159145bfb82 +493, 0x921239650bda1d4d +494, 0xae05025509bcfdc5 +495, 0xc6430c980be407b4 +496, 0x78524f1744b153f1 +497, 0x84089e6f468181fe +498, 0x8d0d21d7dfb6c254 +499, 0x90bad90502a33603 +500, 0x3072a403cbd16315 +501, 0xdfadddf3f1c040c2 +502, 0x22f0b0639d9ff975 +503, 0xb49e48a4cad0765b +504, 0x95a0a04f8239709d +505, 0x56e147a24a4c481f +506, 0xacf16ef61dea4c7e +507, 0x424040afd2700de6 +508, 0xc67e8096a3c717a9 +509, 0x39f164181dd0a399 +510, 0x2449cedc1d62198c +511, 0x7a53df11a1f1a61c +512, 0x5596f1d4a3badae3 +513, 0x38ed4c822072b3d0 +514, 0xf07ef346b3fd730a +515, 0xfd349c35c3ed51fd +516, 0x2f15c9c7890f8f32 +517, 0x3b470df52b173c29 +518, 0xd31bfc8981281af7 +519, 0xbbcc9bdf561215bb +520, 0x5782fffea326574f +521, 0xb0ebdcfcc5e03290 +522, 0x7fd89d93d2b3fbef +523, 0x280ea1865d9ba2 +524, 0xe726959845b2c100 +525, 0xd0361f032cd7dbb1 +526, 0x3c65ec2028b81a22 +527, 0x5221e9b2188920bf +528, 0xeb5ab27c4125ec20 +529, 0x80a32dd48b54f0a4 +530, 0x369b5ced1012bebb +531, 0x582d35d76530bc6f +532, 0x7b50dc9b48e1e37d +533, 0x37fdfe8bbacf8dad +534, 0x7a0cb7e6e93840ea +535, 0xa1132c870be0b2ce +536, 0x9d8ac2c68267cd1a +537, 0x470969b647fa7df4 +538, 0xabcb7d8adf7e2d24 +539, 0xacdebec9bdf9eb1c +540, 0xe30f4cbf7eb6a59 +541, 0x746673836c4df41d +542, 0x75120a6b647bb326 +543, 0x2f4eab556c3f6878 +544, 0xd84651ab05405b7a +545, 0x9e695808b9622284 +546, 0xc93b71e56aa6e1a5 +547, 0x2be7f3be4a7b7050 +548, 0x6497e910b6733241 +549, 0xcf7050dfd08076fc +550, 0x4e3cc156eca183f7 +551, 0xf801a33d9326c265 +552, 0x6aa293c8a47d40e6 +553, 0x28c429755faa6230 +554, 0x82b818651f54e7bb +555, 0xa84d726d7acdbead +556, 0x5cfa535d5774965d +557, 0x4a34b7b1cb48d53 +558, 0x86a7b5bce426de84 +559, 0xfcd2307cecdb7318 +560, 0x16dbaaa71181a038 +561, 0x88e7e8cd261c2547 +562, 0x3c09ba6d1d5ea913 +563, 0x5dd3d643734ee5b6 +564, 0x326d725fe8cbb33 +565, 0x7bcca9ca2da8e784 +566, 0x482dcf6b11d7f9a4 +567, 0x1291b605b4cd3e04 +568, 0x6988181b50e2f4a8 +569, 0x649e3c37131fc292 +570, 0x4eeb67b9e21eba54 +571, 0xc051d39073dec45f +572, 0xc99c52e110270d67 +573, 0xcb813d5d77868add +574, 0x423a5f13573e7ac0 +575, 0x231ac4cc4fe73616 +576, 0x4c22b888a6e600ea +577, 0x8059a6dc7c9e25c6 +578, 0x49f498a5b8ad22de +579, 0xf1e812cc6d1826c8 +580, 0xbbaf60abe8b11e00 +581, 0x1d31d7f4d8be9a6a +582, 0xfeadce70a9a10c14 +583, 0xb47c635bc136996a +584, 0xd88e694c8da030cb +585, 0xc41bbe132aff1364 +586, 0x34249ab18a4b0800 +587, 0xf14b5c825aa736cc +588, 0x2710be6b08df78e +589, 0x2ab56bcc9bf9e740 +590, 0x9b7f6e591b5f648 +591, 0xfb665c3772f34135 +592, 0x628a0a5d2db5d8d5 +593, 0xb3e3f251e61b5259 +594, 0x82310ae33faf1b23 +595, 0x24af8723a65cbd0b +596, 0x671c93282fc4ad97 +597, 0x6cabeaac77270cad +598, 0xef4643fe38b02b7f +599, 0x7b011549d1ac6653 +600, 0xe2af87b9fccfe89 +601, 0x36b71ad67197ac8a +602, 0xdbba55d06f2fd93b +603, 0xf571dbd764b7f7e5 +604, 0x38ea402501cdbd45 +605, 0xb8ab5b5b1bab2913 +606, 0xfab973c4d45f32bd +607, 0x9364f1717c2636b9 +608, 0xfad00f4d983e00fe +609, 0xc90c532a11aef75a +610, 0x64a6eda96e44783c +611, 0x35891f2eb84520be +612, 0x28d216080caed43 +613, 0x129629cc5bd206f6 +614, 0x22c3d39822cbb4b3 +615, 0xf1efbf4cce1eaa2b +616, 0x7070cba12524ed08 +617, 0xa7ed0be9deabf20d +618, 0x8ddb4cd6b454f76b +619, 0xb82814b1db37b63 +620, 0x418e83b36de01876 +621, 0x9a538c7f39c6413 +622, 0xee0cd7abf8a2ecb9 +623, 0xa9222b07e95590f3 +624, 0x6296a415d68341e6 +625, 0x981e0a5a8f811929 +626, 0x4bb372d3b0de283d +627, 0xa9805b5971866e16 +628, 0xaf3b5f5183497657 +629, 0x2152b0fd23c3d9f +630, 0xb730c325b7173180 +631, 0x1e3439d231608c19 +632, 0x1c5ba6031379823c +633, 0x87f5d12d6d365cbc +634, 0xd3bc7f29614bc594 +635, 0x63102214bb391268 +636, 0x482bbd5bba648a44 +637, 0x6a23604690759dc4 +638, 0x4091d41408d3a39e +639, 0x7cd017f922101b15 +640, 0x7ce9004ac5f9231 +641, 0x978bc3d8ec7f7fdf +642, 0x5bd0c4d780580c11 +643, 0x4313c068bb040153 +644, 0x3ab7dab7bc38bf80 +645, 0x3aaf9c187728deea +646, 0x6633a4ce8efb88d9 +647, 0x7263b089878f00fc +648, 0xd0d767e96fe00eb8 +649, 0x184a7c0c01908028 +650, 0x1ebdf41e6f76e186 +651, 0xeb740ee1d0402083 +652, 0xfccf4974edb1c339 +653, 0x16e2707aa28306d +654, 0x1684f0bdb018c3a5 +655, 0x887b6b67b88aa862 +656, 0x923d7810a2bea33a +657, 0x56b3560babef5d6b +658, 0xb39a14614c54b8c6 +659, 0x33e4dc545a509fc8 +660, 0x26e21f84142da9b +661, 0xdd07598125756855 +662, 0x572d49a071d7ae0a +663, 0xba3c7e3baea28760 +664, 0x7ecdb2d714db4b61 +665, 0x1c62b4920e1b2fe2 +666, 0x71bfafb70092834a +667, 0xd710a4228f60d56a +668, 0xeb16277d4ce4e95b +669, 0x968168c90b16d3a1 +670, 0xac3439dfe8ad0062 +671, 0x5a8226f9dd5876ad +672, 0xb843affe917291b0 +673, 0xd76d1e67051f8259 +674, 0xb73a6638cce8ccde +675, 0xa0e6afd3c7295f9 +676, 0xff8857b4bbb5f4c6 +677, 0x99becf78938f0426 +678, 0xfcd17edc1e70f004 +679, 0x6223b8b23f2f50 +680, 0xca875f3e84587b4c +681, 0x7d1e81e589f87fb9 +682, 0x9eb621586aa826fc +683, 0xf46fb9ef5b9c2086 +684, 0x2882c9b7092725f3 +685, 0x5493f099bbedcd02 +686, 0x90c1ec979ffa811d +687, 0x963f765025bcc53 +688, 0x56194e3ec3d9d4e9 +689, 0x7ec4720954cac1f0 +690, 0xfab3145171af7f90 +691, 0x52a0b4e41a13b593 +692, 0x740e2d4d5909d126 +693, 0x98f5339c09c94a28 +694, 0x1700e462fe8dec76 +695, 0x3dbffc2aa4695ac3 +696, 0x5763edacabdfe2a1 +697, 0x7b5b623ce49ef21d +698, 0x30addc66f49860df +699, 0xcc7511a6c31bceda +700, 0x1b25b61ca75db43b +701, 0x416bc4c298e59046 +702, 0x4cd11fe2d74e4649 +703, 0xb54458a9229fc978 +704, 0x8c21a27882b6ca35 +705, 0x57887c8b5e01639b +706, 0xf4e893da996680bb +707, 0x8d601297702c9c0d +708, 0x2a27904a30aa53af +709, 0x497800f6917ea8d0 +710, 0xe96db3340ada9c00 +711, 0xcc23166f14c010ee +712, 0x782690d78fa65ec9 +713, 0xf3e00d74a0878eda +714, 0xa7cbb683decca0a3 +715, 0xdd2e038e683a94aa +716, 0xe2096ff8da896ca5 +717, 0xf7c83400afdabe11 +718, 0x395b8c6f6a4086a4 +719, 0x4a164ec05bee71d4 +720, 0xe87aa5d1ca0462fe +721, 0x8dbc5aed6dff9ceb +722, 0x12120d1e9552707b +723, 0x877dca6889b3e6cd +724, 0xbd65605c01e900fb +725, 0xbd6b82c4157c3115 +726, 0x8b60282732caf78a +727, 0x279fcf5e5de9e57f +728, 0x34b34ebfb6a37eae +729, 0xd258cc1a14e03b7b +730, 0x9a528ba3db4a13fb +731, 0xffa0aea59d057746 +732, 0x27fa7f456cd37c4e +733, 0xe1117a57a6fdce63 +734, 0xdc8fc903970a1551 +735, 0x492dd104f30faf29 +736, 0x110def0959e5652b +737, 0x7f8d1997636fdd15 +738, 0xfb77b05e538a9b59 +739, 0x2e41fa35b4b01fc6 +740, 0xbc35ae69a3374085 +741, 0x192c2a681c2d9b4b +742, 0x12566b8866c189d6 +743, 0x9d88ea785c5185c8 +744, 0x30a621ad5f983c4 +745, 0x8b875efe1206f587 +746, 0x224d25c3af6e3423 +747, 0x7503e976a1ac7bcc +748, 0x3c98aa869e823859 +749, 0x3d8835304b646892 +750, 0xf6353330ff970bc2 +751, 0x8a673f5e2edb8acb +752, 0xf2fdcc53493838b9 +753, 0x85ddcd526236af16 +754, 0x60afb99814c676c5 +755, 0x32a1c2749e281ca8 +756, 0x2367a92ae3bee9ca +757, 0x219fe082703743cc +758, 0x34d8b74dc85182a9 +759, 0xdd04164c72db23f +760, 0xe293ac28fe2671a9 +761, 0x9ca7d169cbda6f45 +762, 0x705c47972b4240ed +763, 0xc10eda9eeb536209 +764, 0xc36ddacd0c94e85d +765, 0x8eb592c27e8cd0d2 +766, 0x3e815991c76e7cc4 +767, 0xac9cfce31acf7580 +768, 0xbf7a4cb31c7aee94 +769, 0x663077444aceecf6 +770, 0xe7f614ff386eb568 +771, 0x79d7a229c66912c0 +772, 0x161ed4311f63e1f3 +773, 0x308a5faeb9982ede +774, 0x7b38ddb9b7efd10 +775, 0x1e103a2589b27ecf +776, 0x67b02baf4259f27e +777, 0x868921c115ea2eee +778, 0x959791912200f71e +779, 0x4dd55f36dec10557 +780, 0xe3464d90080cb99d +781, 0xfb2d4f6accce652f +782, 0x109900a9257d77ba +783, 0x3c4bda8e2c83684c +784, 0xc9ae040fb7f868c6 +785, 0x78098ffe994f4905 +786, 0x7a94c33eca77f0b4 +787, 0xbe6a2a95e9b5c0e8 +788, 0x797d39cf963f4837 +789, 0x8d2e249e4425d06d +790, 0x6ae2c30cd5da06f4 +791, 0x904489de762b179f +792, 0x84713e2dfb591e3b +793, 0x6405a40da3f6f51b +794, 0x976b560d663a2df1 +795, 0xed1c544784ba1e22 +796, 0xca658e995ed9344c +797, 0x2b1c6b8e4db49025 +798, 0x52b1513da528bad +799, 0x3c63406d256d9968 +800, 0x63a31ca3d423f85e +801, 0xb05a81f55789a720 +802, 0xd04412992c476c8e +803, 0x828ec2f77a150a3d +804, 0xee50926671bb60c6 +805, 0x5aa70f93e2df61b4 +806, 0x94d60fa2e8655858 +807, 0x3f5e5b770703cc7d +808, 0xc62dfb2688ca7784 +809, 0xaaf02e1e8ba89fe4 +810, 0x4ab74e0d8c047405 +811, 0x31ee04fbac6fcead +812, 0x1203b78b8228f5af +813, 0x412a70836f9aa71a +814, 0xab51cf98c03f1819 +815, 0x783a3ce9ce137f65 +816, 0x8897085b0a072cf2 +817, 0x685dd9bde8798cb +818, 0x9a1fac7b1705e2c1 +819, 0xf3e9ff98de48e9cb +820, 0x5c2d3eb1a1fbe917 +821, 0x3bda718b6b54d82e +822, 0x29f2dd18f22f0821 +823, 0xb992da1572ac3597 +824, 0xacb69e7aa14b34f7 +825, 0xcd36e3ad14f088d1 +826, 0x6aaacc96a1ec55e8 +827, 0xf8ac593f154fe68f +828, 0x18fc9cbff012339f +829, 0x2f3368ccbbb99899 +830, 0x7cec7d17f37031f7 +831, 0x96e86bfaadcb8fc2 +832, 0x74f9e7ee3d42a752 +833, 0xbd52f6c7d9b0733 +834, 0xa48e6d96bb6ce1c9 +835, 0xaefa058254b82133 +836, 0xb7a19edfd0929107 +837, 0x6160ce9125b26e26 +838, 0x6537dbbde1d2aed +839, 0xc567f9a6bec52dde +840, 0xca29fd3f22443342 +841, 0x7732aa6db6a1c476 +842, 0x8f5a4d7df6b11b3 +843, 0x76649262aa7e31e1 +844, 0x60a13eb125fbc829 +845, 0xc81e4d123dd21ac1 +846, 0x643cbb09bb72f86b +847, 0xf971a98fb25555a6 +848, 0xffa2774c66692d56 +849, 0xcb33c16c50b13ea9 +850, 0xfabf388dffda0e9b +851, 0x55d41ec12ca24b9f +852, 0x91cf693a3467e807 +853, 0x6be2c00b2c31d6dd +854, 0xc5cf513b5251ae28 +855, 0xffc4384212403dec +856, 0x45d4e1865255a69d +857, 0xfb1dcf956972086a +858, 0xcae946a55c4c55b8 +859, 0x7351ac7720e385c1 +860, 0x19aa8ffd86240254 +861, 0x8f515ae78f4040da +862, 0x1e1ed2058de50fce +863, 0x22d006dcdb374243 +864, 0x6e0f0ede7c95b441 +865, 0x70e8aa81b53b4d25 +866, 0x998f309ea41e3814 +867, 0x89ed6598fb66f390 +868, 0xb5997dc3278060df +869, 0xb2a021eac4f7e046 +870, 0x3705b60aa2fd0768 +871, 0xfc415079ab9200e +872, 0xf2871ac4cf45ecc9 +873, 0x24bf758d2246175f +874, 0xac503dd6f8141b3 +875, 0x4e879d12d9f03b3 +876, 0x82034af8cf93b644 +877, 0x59899dd7e478a6c7 +878, 0xae90addb6eb11507 +879, 0x1524ddf76730cdef +880, 0x6fd4afd5456b1c9d +881, 0xcddb9221ea001cbc +882, 0x64ff400bbf2e8604 +883, 0x6dda10549b06ed9b +884, 0xed2c85104c261527 +885, 0xc7e09217d29929a8 +886, 0x56284df611a428b1 +887, 0x1a7608289c0a61 +888, 0x7cb63db15166ff66 +889, 0xc6013c76fcdcdc72 +890, 0x8e5dd566c7a5a676 +891, 0x5a8e8565f40d133b +892, 0xe465973455848c44 +893, 0xf92eecbfe0f3c2c0 +894, 0x7d64155d4dcc5cac +895, 0xf17595706f988dad +896, 0xd590a001a6a19c5c +897, 0x82a164475758db3d +898, 0x6b144993ea1bbe32 +899, 0x22a81a7a6e453779 +900, 0x8e8c298df1a68a73 +901, 0x78056afd6d936b4c +902, 0xaaceef0325faaf62 +903, 0xe78bb7699f82266f +904, 0x523a2d283c5a5166 +905, 0x7076d87088f6c6db +906, 0x6087dd54cff5aeb2 +907, 0x7ef82e62cb851680 +908, 0x4e8bcc8ed84d03d8 +909, 0xd12fa0361df3cfd3 +910, 0xefb89c79f8127297 +911, 0xa9af4e2fbce0b1f8 +912, 0x462136685b70331e +913, 0xe9e74c93da699b77 +914, 0x9ec69215fb11d0c3 +915, 0xc10f229939e3e111 +916, 0x3f67fa79e41d2374 +917, 0xd5e7c1a9a7185162 +918, 0xa1dcce9ec91492fe +919, 0xd4e61f0727b5d21b +920, 0xdf6cdce46551800a +921, 0xa3f256ce906982d3 +922, 0x209742a6b9ffc27 +923, 0x4006c96958526a57 +924, 0x9606aebc75a1967e +925, 0x91b9f42fb64189df +926, 0xb27119defcb938bc +927, 0x128cc7a84ba05597 +928, 0x6c3df613c62d0d30 +929, 0x3adf69d48b629ec7 +930, 0xda42ee493837b128 +931, 0xb8e770480e760bb5 +932, 0x9feb55d57c99c626 +933, 0x29812d80afdae3ed +934, 0xae4222a64276a8c7 +935, 0xe3897212a5b4ed53 +936, 0x98bedfd13886e669 +937, 0xca858675d7fc0d0e +938, 0x28a359f665354234 +939, 0xfac2ccabe4128b35 +940, 0x61373cc5d11ca180 +941, 0x7007605a4512a87a +942, 0xe71f8eade7b30b3d +943, 0x3a9e77f9b99bd04d +944, 0x70d3e42488098866 +945, 0xd30fc159c7cd4d99 +946, 0xe4d3f6600d2e2d6f +947, 0x1088324dfa955c25 +948, 0x516437acd4764623 +949, 0x38a31abe50d0aa03 +950, 0x72e1054e9dc02ba +951, 0xe6971dd664d1a2e2 +952, 0xf6698cb095d3b702 +953, 0xad995a5a8c19bd92 +954, 0x34e53c6936f656e6 +955, 0x10de240bc07c757a +956, 0x3e3b9a6861c2bd1c +957, 0x9c0b0b97d3712ec9 +958, 0xabf1505a75043aed +959, 0xbdf93d3de3274179 +960, 0x28fa5904d3f62c28 +961, 0xc3b97b39ef6c5133 +962, 0xf2b2219225b8679d +963, 0x8be4ec0f930c0aaa +964, 0x47de5a56aa590643 +965, 0xb6f871b304129856 +966, 0x80a61c06233ab0f9 +967, 0x3ce6c3af8101b055 +968, 0x85b911708274e7d1 +969, 0x4cab65d093a488b7 +970, 0xaabc4b10661fe28e +971, 0x35b16dea64474a68 +972, 0x1d6eb5b093361223 +973, 0xc39107b92f0fe1fb +974, 0x1d09e048073c4841 +975, 0xc6a02f43aca8cb2f +976, 0xaf6613dbc7da909c +977, 0x5ac2a40c230aa756 +978, 0x33afb5e7c01c39a5 +979, 0xc7b0b20ea8b7d0ef +980, 0xdf7306c8ccb1bbea +981, 0x9710efc0c188b2a0 +982, 0xd6303eadb72c873e +983, 0xa38ca609b118f35a +984, 0x8390613065c6e535 +985, 0xdf9a0106757e431f +986, 0x8bcf77039788e143 +987, 0x6026806a986b378e +988, 0x482ff3b1394cb1dc +989, 0x2a27d0ccac9ede9c +990, 0x53c77f26e271b3ab +991, 0x1ba004cf276cf3f +992, 0xc135b0517dc81f7c +993, 0x5d137838db75e442 +994, 0x3fe505f93d1dbdd7 +995, 0x351654ae7d598294 +996, 0x173f8d182af9d84d +997, 0xf97dfcd164fe11c5 +998, 0xcda423e5ad43b290 +999, 0xa5cb380b8de10d10 diff --git a/.venv/lib/python3.12/site-packages/numpy/random/tests/data/pcg64-testset-2.csv b/.venv/lib/python3.12/site-packages/numpy/random/tests/data/pcg64-testset-2.csv new file mode 100644 index 0000000..7c13e31 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/random/tests/data/pcg64-testset-2.csv @@ -0,0 +1,1001 @@ +seed, 0x0 +0, 0xa30febcfd9c2825f +1, 0x4510bdf882d9d721 +2, 0xa7d3da94ecde8b8 +3, 0x43b27b61342f01d +4, 0xd0327a782cde513b +5, 0xe9aa5979a6401c4e +6, 0x9b4c7b7180edb27f +7, 0xbac0495ff8829a45 +8, 0x8b2b01e7a1dc7fbf +9, 0xef60e8078f56bfed +10, 0xd0dbc74d4700374c +11, 0xb37868abbe90b0 +12, 0xdb7ed8bf64e6f5f0 +13, 0x89910738de7951f +14, 0xbacab307c3cfd379 +15, 0x2cf7c449d8b927a6 +16, 0xdcf94b3a16db7f0e +17, 0x8a9d33d905a8792e +18, 0x4cb9eb2014951238 +19, 0x6c353acf7b26d6f1 +20, 0x73ff53d673aa30c +21, 0x1fd10760015eca68 +22, 0xabae0aa9021eeba8 +23, 0xa5ae363a868ee2bb +24, 0x9d89e0f041de6631 +25, 0x6238b133c3991a65 +26, 0xff49267d75fef51a +27, 0xfb180656ce13c53f +28, 0xaf7fadf36128712d +29, 0xa6847fc6f339c63e +30, 0xb03e0b80d71ea5bc +31, 0x63905abcb43969af +32, 0x2295af3ee00a3bba +33, 0xb8b375b994330415 +34, 0x867d9ef1d8716a3b +35, 0x4f6c02f5601b4e18 +36, 0x7c5fb4c16c470d18 +37, 0xe3b57986b804b343 +38, 0xef1d79d212aca692 +39, 0x5b98774c8806209c +40, 0x924fc76bac38a5d1 +41, 0x5266084c412ddeed +42, 0x98240bf9b831d6a3 +43, 0x5681599e81219442 +44, 0x6441248fc2ba92bc +45, 0xe3e9051a540349ea +46, 0x3a2700034390baa3 +47, 0x9f893155b6d402bc +48, 0x158207910c6d8aef +49, 0xd5282ab7608c2cbc +50, 0xc97f4651669dee4f +51, 0x3d4750d95103ed60 +52, 0xe0614542caac1f04 +53, 0xefe5092144cfc6c +54, 0x560bc486abd7e9ae +55, 0x2678b71392daa4b8 +56, 0x734970d3dc2ba416 +57, 0xcbdbe849e51e4aaf +58, 0x3b0b5e28b491556c +59, 0xd51449ac45abd88 +60, 0x6790b59991f1b7ab +61, 0x32d1c039ff2415bc +62, 0x173b9772f24f72e0 +63, 0x9490a9ca9f883b1b +64, 0x4c775989e6214222 +65, 0xac07db37e6ee6114 +66, 0x331371b2e3f10aee +67, 0xf12e5326c21c28e4 +68, 0x5d77dc280c70d614 +69, 0x1b01bd17a2f281ec +70, 0xa10d3b5882938487 +71, 0xed5a0033c394ae8f +72, 0x70bc8ea568ea44b4 +73, 0xf4600ae77965e730 +74, 0x7ff92c0b321ce233 +75, 0x6cdbc87d0cc1d670 +76, 0x9ec64f0cf2000eb1 +77, 0xfebea50259800f68 +78, 0xf2edf9019a8fd343 +79, 0x75c584ac042e5468 +80, 0xc1fa8481d5bf9a1d +81, 0x7f57180168514ac2 +82, 0x878100716b94f81e +83, 0xc929406e3af17fd2 +84, 0x6a26e2c013e4bf4d +85, 0xbc071d8848280955 +86, 0xb60d75abbfd1bdac +87, 0xee9b76afeca9fa69 +88, 0x1d6c399d2f452810 +89, 0xbaa0bc1621e25c83 +90, 0xed6ba792f8671ba5 +91, 0xf7ca02c2ab11d8d7 +92, 0x3c3cadadf0b21e3 +93, 0xdd1784571e864e9c +94, 0xfb2f992015157509 +95, 0xf50bb9f0d3ced743 +96, 0x261565f75c3e185f +97, 0xf8fe33b284513e60 +98, 0xe3d2d10b5e024664 +99, 0xd28717566242cf35 +100, 0x7ae07d133ac5b789 +101, 0x3b7ccaaa53ac338e +102, 0xcd480bace4871650 +103, 0xec6c78f923c080e9 +104, 0x44211d0ff8919d59 +105, 0x89f79af76d2a45fe +106, 0x71583fd8a837548b +107, 0xee57269c261511f5 +108, 0xa5ee8f3b128c5d1 +109, 0xbb64c20ed0765a17 +110, 0x9d4790ab2eeaf7e4 +111, 0x742f3db806d9e98 +112, 0xb81ec97aed6a0d1b +113, 0x41808b34f6a8a23 +114, 0xc20913af175dfd4d +115, 0x834427db263b22bb +116, 0xedd9c632e611828a +117, 0x10eac8524496f571 +118, 0xd76091b97eb00ab7 +119, 0x111298ae9fe95666 +120, 0x5824b2e2a6719c43 +121, 0x6e280ec539e934ed +122, 0xf74fd832df90083e +123, 0x8fee6d0f241c2e97 +124, 0x4244f331c2f19c3c +125, 0x3dde75a845cce97f +126, 0xe35bb8e635a9915b +127, 0x39d2943037f7932e +128, 0x1fe2d134201d0970 +129, 0x49d00b63c749b804 +130, 0x960c2942cd4e4e04 +131, 0x8dd8e009dbc0435f +132, 0xcf493495c3a055cd +133, 0x8f7b5a1c0f9fe9cd +134, 0x49d5f90374641a25 +135, 0x69b3932073d3524c +136, 0xd170603e7de84ee2 +137, 0xa062ba3ed3539948 +138, 0xf5861cc5b5d56c82 +139, 0x5e914998a30c7e76 +140, 0x8d77f2ad1503c0f1 +141, 0x980b6a9e3b4181fb +142, 0xd9299cd50694c084 +143, 0x253dc0f8f1cec4c5 +144, 0x68110fb9d1b3e695 +145, 0xe8f3120d0aabc461 +146, 0xb066e7df0dfb042 +147, 0xd29ce0f797e6b60b +148, 0x6a569bb7ca33bd42 +149, 0xd46e08b2dc2385f8 +150, 0x28c61d11d055767 +151, 0x5d73aa3d1a2bb725 +152, 0x1421191e1c14829a +153, 0xa711bfb6423df35e +154, 0x461af97a86308006 +155, 0xb3e1018ff3519367 +156, 0xf19cf866a268ef2b +157, 0x207715eac9199d1d +158, 0xdd621c410975b78c +159, 0xf390aea68683610 +160, 0x617a2d107a0047d9 +161, 0x6e05ac416e5bebf0 +162, 0x7d253e70506c1bed +163, 0xf9f96f4a7dd53810 +164, 0xc693b29cb1573f73 +165, 0x4f1146b0020ea544 +166, 0x45140608fbd40579 +167, 0xdcf57219828ce6be +168, 0xe19d58cca37b5b32 +169, 0x82bda95b2a161235 +170, 0x5823c3d8a2b6c9ba +171, 0xfeb2e74092fdf89a +172, 0x50e1ad1abc8f869d +173, 0x2ec63d0c105eb8da +174, 0xe14e1c4845a3264a +175, 0xcff53670455eb6aa +176, 0xaafaccd24619fa3e +177, 0xf55a988486e2422a +178, 0xecfba16a90ff4d04 +179, 0xbf8d36c2f644757a +180, 0xdc56ed75a0dd6249 +181, 0x3f45023eff17c3bb +182, 0x2428bbfe90023fab +183, 0xab892c611adcb70c +184, 0xb6f13d8c0c2b9d74 +185, 0x2ac3fb11d224f2a8 +186, 0x65433dcfae2d9351 +187, 0xe906859ae4b45f82 +188, 0x8fb7f5f093d76a3b +189, 0x940dd290b5e88d1a +190, 0x31b27d21bef116e7 +191, 0x86a964e2c83b5296 +192, 0x85ffd17bc079a9e8 +193, 0x16c47c724e7ab7f1 +194, 0xfb6098a9867e7d7f +195, 0x9246fb69092c6cb2 +196, 0x1a4033572760f32 +197, 0xc5cc568a8b273b84 +198, 0xfa6f9f2fbdd44abc +199, 0x9701b8e087718ba3 +200, 0x51d6a7dcf73f8f3a +201, 0x30008172cc6a972d +202, 0xac2ab49a5ca6ac81 +203, 0x31f28ef79461e54c +204, 0x93e35a8da8cc6132 +205, 0x9a2c58beeba3d5b9 +206, 0xf6615c1de266ac39 +207, 0x127ff9f8166b766b +208, 0x7ffe380e80a69556 +209, 0xbe7d2c228e1542f7 +210, 0x2d5ebb4e50ba1746 +211, 0x63585761ae1bf684 +212, 0x1019eb5cee022fea +213, 0xb9d3540ab58da30d +214, 0x1677f4cb45620eb9 +215, 0x6524baee51783822 +216, 0xdf9f2ddcfabb0adc +217, 0x78e8acc43b287935 +218, 0xe9a1974e999222b5 +219, 0xc41324ec2291e780 +220, 0xea52abc9ecdcbc9f +221, 0x209d7bcd46ec6b04 +222, 0x12d504c09803db2e +223, 0x1200e6bf21475d81 +224, 0xde6d3c2b35fd2cfc +225, 0xa2526900ac33bd3c +226, 0x7f1f5290fc432bc5 +227, 0x29ddfb380a3d69c8 +228, 0xac79cb6942a2909d +229, 0x516996685b67a92a +230, 0xb5fc39041cb828bb +231, 0x75d9d8ca0644a276 +232, 0x81e98b76be92a3e9 +233, 0xca27888fafe12179 +234, 0x17be2ae039925765 +235, 0x9429846c0e6d0342 +236, 0x327dfd50439815e9 +237, 0xcee20cd7bc254aeb +238, 0x7d250389f453f29e +239, 0xfd1b232a85c95569 +240, 0x2ed55fac80f3e9e9 +241, 0xf6886c20417a1be7 +242, 0xcd08e61f0b0fdfde +243, 0x7b33e34da5c27bff +244, 0xd043c4b7d5603dd5 +245, 0x9a544e4c70a3b686 +246, 0xa7b60398c381f771 +247, 0xe9e7a3487c4bd4f2 +248, 0x10b58fdfe1ff112c +249, 0xd5c1c9748c0f4ceb +250, 0x61be9d09159d54ff +251, 0x5356f51e8239f510 +252, 0xfe7889d9b202ecef +253, 0xc7fc19ca5d263d5d +254, 0x7c4c07e61dfd9f69 +255, 0x6c315fe5015f300a +256, 0xe0a5bc00039747b4 +257, 0x16397fdcf829ee80 +258, 0xb55aee80d16a5169 +259, 0xca0609944d007eea +260, 0xcc982249f65a02ce +261, 0x528161feb149c148 +262, 0xcbf08ba49b41c006 +263, 0x39af1ff0b6f14138 +264, 0x5cc036be69799aec +265, 0x6adde125b1db21c5 +266, 0x8a99d83d6b613b67 +267, 0x1cd43fca9451f74c +268, 0x682dbb26ecc96365 +269, 0x13b4be2ceb43e3 +270, 0xbe8fbc3b6f4f581e +271, 0xda148a2f4bda5719 +272, 0x239106ca3319f393 +273, 0xb42b4dde641f0dd5 +274, 0xd233cfdf4cb0af74 +275, 0xfb5919d905589afc +276, 0xd802a8860c10b66a +277, 0x6c923e1d00e7b5bc +278, 0xfacce1134f383b89 +279, 0xf9570abda7a6d553 +280, 0x80f0f9796a208f18 +281, 0xc0e1df5280951c57 +282, 0xe9f143f08257bbe0 +283, 0x79e4c6463123d588 +284, 0xdd2118583f2b1684 +285, 0xb399ff5f2329fa18 +286, 0x4b3e9ebae96f813c +287, 0xc484dbf247787384 +288, 0x921865eb97603f2c +289, 0x18063c68e257d300 +290, 0x643181f345e7fc26 +291, 0x12e0b0e8eadf9fa7 +292, 0x79e613fe73dfa354 +293, 0x6db4c59203b7217a +294, 0x6c7a0e9ba6139eaf +295, 0x9617c7ac4e3f6d97 +296, 0x1f68a7b4fb1b4b75 +297, 0xef0b7ab24944f466 +298, 0xaf1dee1f4be1bc89 +299, 0xd2e355c959f5fd8d +300, 0xe594c3fb95d96efc +301, 0x9554766ca3342906 +302, 0xa4bbdc77d12842c +303, 0xb62400211ee489a8 +304, 0x91abadaaa3bbe67c +305, 0xd371eeb91deb42bb +306, 0x883bab35cbd2b6e5 +307, 0xd030c3d9411a9041 +308, 0xff3c110a858ff000 +309, 0x59bdf5ca47d0bde7 +310, 0x2bc80fa3cdba1853 +311, 0x6444ccb652662cb8 +312, 0xc0c7e256b9e90339 +313, 0x70714ea9c9d72302 +314, 0x96a0142f9d897d27 +315, 0x209a9097c5a91ef7 +316, 0xb9e33afc5171e009 +317, 0x47b37af433a58d40 +318, 0x30cc4ffbfa831d26 +319, 0xdcea4a85ff815466 +320, 0x907d5bd027f2e5cc +321, 0x7c081f6852e04a4b +322, 0xe61950749c1d502b +323, 0x1604e937ee69834a +324, 0xb2372d952dd25309 +325, 0x53f6a5b834c72577 +326, 0x2ce7a74395e0b694 +327, 0xacbf9ab4fe91f225 +328, 0x5ce1e63d3a2bb90f +329, 0x54740da3a5ed139b +330, 0xf194ddb39f29880b +331, 0x3305374f5d8ec08b +332, 0x831dd0164927ff4a +333, 0x625baa78e4458cf +334, 0x29d27dc0a4a71152 +335, 0xe227bae9a1401034 +336, 0xca0c209831846b2b +337, 0x8e8cc54b08b5a411 +338, 0x38f2b4acaac27db6 +339, 0x8ec88baac814e86b +340, 0x31c08e46b007bde +341, 0xb686c02722794c09 +342, 0xb77cf8fc682e3907 +343, 0xa56334e7f606f4b2 +344, 0x9c80b127bddd5f4f +345, 0x12df14834cd858bf +346, 0x3f14762a9cf5fb9f +347, 0x930a70941ef5779e +348, 0x64e96c849c30c080 +349, 0xfdf53bfba1300484 +350, 0xec7a9363c21bc616 +351, 0x26e9fd6a115ecb47 +352, 0x9707a84b5bc77fbb +353, 0xb23b2737b20d5903 +354, 0x22f4825ae80f6501 +355, 0x500644b12be6a01b +356, 0xb746645b2af082db +357, 0xe6af051f697892f8 +358, 0x577c724248a1cfc6 +359, 0x3d2b6a434c84eed3 +360, 0xd260f5efd7328314 +361, 0x95c16cc84bb3f55c +362, 0x7a01b2e4e0e80ca7 +363, 0x41930c3ce70a0935 +364, 0x1299bccf39d4e110 +365, 0x494883ba1a8a87f +366, 0x9478ecfe2d918e60 +367, 0x30ec9a5670cda8af +368, 0xf9bc877e833e2b99 +369, 0x1b83a0acfbb4a8db +370, 0x73bc1740c0d18880 +371, 0x65086ca9773cb3e1 +372, 0x3b78c3ccd63cff2e +373, 0xbfae748795acfb31 +374, 0xa4c9d5d56a15ba20 +375, 0xb9cb41721e52b71e +376, 0x1532f15d4dc47748 +377, 0x5a4d647a4b9ee632 +378, 0x8513c7c5a50898d9 +379, 0x6d3d98ccd5461b2e +380, 0xa65e99be2fe98d6 +381, 0x31abc8855334a0e5 +382, 0xf1ed22a661dca5b8 +383, 0x299e2b63229e03be +384, 0xda201a06687bce48 +385, 0xd27794b302142c55 +386, 0x642bd3e1c7898a9d +387, 0x777f1ff00afa1a87 +388, 0xd2f1c84fb3877baa +389, 0xae417583289191fd +390, 0xd641f1d88e0e2d55 +391, 0xc1f1d98fb5d18ebf +392, 0xb0f72aecdadce97b +393, 0xe9b8abc764f6018a +394, 0xd2a37cff8e890594 +395, 0x2dd70d631a528771 +396, 0xbf8ba0478c18e336 +397, 0x1630bf47f372ce0a +398, 0x6d04ea20dc3f46b8 +399, 0x6591881bf34337f2 +400, 0x33c149c7eb5b4103 +401, 0xf01a8c9857c86748 +402, 0x184348cdfc16d215 +403, 0x141168b253d2ed7 +404, 0x52aaf012ef50a6f1 +405, 0xfda1722387e16f4c +406, 0x43c30f57d6c038fa +407, 0xd4a8611f5f96d214 +408, 0x2c512ce17e987f2c +409, 0x961ce450f0fa2822 +410, 0xf55a506ec6cea9cd +411, 0xb76d694d9c7f5ef6 +412, 0xfb029216dbd8e988 +413, 0x93162501896a0081 +414, 0xfbbbd2c5ab300f5c +415, 0xd648b6da7387d491 +416, 0xc73b4697471d9d98 +417, 0xe37412bf1c93ee76 +418, 0xa1a96d96570e6637 +419, 0x5b3ab4f82428f65c +420, 0x873d849b188aa36f +421, 0x39fbee0ffc9fa9ff +422, 0xc70d21b744d677fe +423, 0x2b8a43c23043d209 +424, 0x93c33eaa37370d16 +425, 0x8930ac1880f2b0ef +426, 0xac01d27707036af0 +427, 0xc2af3fee504343a0 +428, 0x1c1dae2ad5535d97 +429, 0x9ffc21804b76a480 +430, 0x69f903412cc13563 +431, 0x9d3c4e2759a0c47d +432, 0xb1a8f894be6302b9 +433, 0x95e1fd7951479506 +434, 0xbb9e6c03cd4ae8e3 +435, 0x85206010c9b737cf +436, 0x767e813694d6238c +437, 0x4969af329ccbb30a +438, 0x3aa9af1075aaea5c +439, 0xb1ff519e8118a993 +440, 0xb21a23a3c91180fe +441, 0x320b24582ca3fd88 +442, 0xf8ca56415fb4e453 +443, 0xabd0899c07205e77 +444, 0x87fdc7a44b4ad50f +445, 0xd75744911641a278 +446, 0x7c8c9a65df6fcb95 +447, 0x79d785e3c7a5b695 +448, 0x421e4565ba1f592f +449, 0x27f87eb2517835cf +450, 0xb62cc4297441c83e +451, 0xd817a80ac815ca6d +452, 0xad84388130df2aa8 +453, 0x5e6b1640452d6ac8 +454, 0x936285e15edce2a3 +455, 0x903bccc4969768e8 +456, 0xefc2cb7b109d3140 +457, 0x633e9dfdda2d903a +458, 0x2a2f3225925678a1 +459, 0xe07eac91a27f8547 +460, 0xe50ced40eda78cb3 +461, 0xc5b22500e1c7441 +462, 0x32becf61bca3aa72 +463, 0xa2e37c4b30671344 +464, 0xc9f1c1910f45d544 +465, 0x9b50333b2dcdf730 +466, 0x310bfd53a1684b94 +467, 0x1e1dc21e66ac6455 +468, 0x81876c2bfb1ed5a1 +469, 0xd0c54a3e25eadc7b +470, 0x3791b6fbbd5c7ba0 +471, 0x133be57356c599fc +472, 0x8d1148eb8e83fdea +473, 0x311aedba0d8b42cc +474, 0x1142ae52745f94bb +475, 0xc5f4ab2fbde8c4a3 +476, 0xd23be827b5b24f6d +477, 0x65f95194cd122715 +478, 0x4b48969d73125922 +479, 0x46f165052b8ff988 +480, 0x5c689f94b9275ff4 +481, 0x93b03823ff2d536b +482, 0x871f3775aa4e3523 +483, 0x5af829f7cc0f66a5 +484, 0xa32e05739cbeac8c +485, 0xacff1856ddace0fe +486, 0x8eeb5e7f991a5322 +487, 0x6325c2720e0dbdea +488, 0x9fb817bc4fdf5200 +489, 0x9786f0d850e43d78 +490, 0x571f76dd7f9fb77a +491, 0x4d9e94e181cbc63f +492, 0x8bb632d3376c547a +493, 0x9cc26d9efd1c88b9 +494, 0x9c5d49579df52b0b +495, 0x6201abf7e1cda07b +496, 0x90d68f0c6c884963 +497, 0xfc5b66188ef7f561 +498, 0x6d9303cf2e0e0f95 +499, 0xd7cfcff535f5ed07 +500, 0x14d1a1228daa4ac6 +501, 0xe00ef5762f66ae50 +502, 0xf113a79471582978 +503, 0x430985281785dc7a +504, 0x31914108c206ed5 +505, 0x7ba6707b6419971c +506, 0x2ec63b033ce112e5 +507, 0xf8bcd36ced3b41e3 +508, 0xe5cf908c8010414b +509, 0xf5ee224b7c703e30 +510, 0x9a9733af0b12338b +511, 0x83e18cc00ace34f8 +512, 0xd52cff39e23008b8 +513, 0xa700578136b9c0c5 +514, 0x3fa179d32ac51f99 +515, 0xef2d5eab6d4ad380 +516, 0x709024a5abd032df +517, 0xc607c7ee349ede87 +518, 0x803d784e9731eb5f +519, 0x2ef06f4ba769282d +520, 0x4bc1dca1e9f07eb9 +521, 0x930c958a7a72f94d +522, 0x249bc8db2cc7a3bf +523, 0x3845305798f9a5d +524, 0x6f137eca9ab6f948 +525, 0xc31f5a963d31bd67 +526, 0x9d39693d5383626f +527, 0x52fb41c335a8b98e +528, 0xb79d1a29a06006ec +529, 0x7c0926a7a3eda2cc +530, 0xffdf5214406fd53e +531, 0xc6aa02a7e94282b9 +532, 0xd4a4431b4aa301ee +533, 0x4271cc0f9420d3ab +534, 0x26fccd7cc7fc2485 +535, 0x330594bb945b8d5a +536, 0x6ea8eaad12e5cb8c +537, 0x831c3467726bede3 +538, 0x31d1eb10017eaa61 +539, 0xc7aa75e41508f5cb +540, 0xde51810f0cadd0b5 +541, 0x50e5b3e73692f80b +542, 0x82107ec55636e188 +543, 0x9828ef175d843ab4 +544, 0xb8edc6a860dd421e +545, 0x25c0c138fd537ac3 +546, 0x47e72a771e8eb563 +547, 0xbb0f8c5333f4a2cc +548, 0x91750d2fb9b2d479 +549, 0xe662d8f6fe38df36 +550, 0x72a6d879fb5619f0 +551, 0x6817c7878dcbf077 +552, 0x4e7741cb484661e8 +553, 0x3b3b3ba0be5711bf +554, 0xa6989f5d25868765 +555, 0x43c276398997e4e0 +556, 0xdcbe16a94da28870 +557, 0x454936980a699c99 +558, 0xac614bfa8f0266c6 +559, 0x9174841392e213d5 +560, 0xa0e2acffc5fc9d1f +561, 0xe53a08a7a0e6521a +562, 0x2b845cf7c24172e0 +563, 0x265a4fc5f7adec0d +564, 0x1f34fbe5f1e49420 +565, 0x139181f6fb647f20 +566, 0x88c35d46e2fcd05e +567, 0x2a6d5b55903c0459 +568, 0xcea28eb621ad7bf1 +569, 0x5c9cdc13e7aaa30 +570, 0x5fe63e14746e7103 +571, 0x7923e53d73835db9 +572, 0x376e661210bf1b06 +573, 0x5b1cab85450efdd5 +574, 0x3908dc096c70b452 +575, 0x4825e303cd1f396f +576, 0xed476bfd702957c3 +577, 0x6acc013aff5db743 +578, 0x62c80b776343d488 +579, 0x9c75edcd5b012697 +580, 0xaa053362a3b9770a +581, 0xa907e236c7c07e94 +582, 0x15b2c380451692c0 +583, 0x94f79142697bd61f +584, 0xbc657d31ea98d44f +585, 0xcbaa5e52517a1f5e +586, 0x96aa2e44a7c4a03f +587, 0x216d3c66db2b515d +588, 0x157001807e3ca88a +589, 0x52b3a596bdd3859a +590, 0xed747e7fc5e3adac +591, 0x78fd765ddb2c448d +592, 0xe53dc7299ed8614e +593, 0x75ad41fb1d7a790a +594, 0xc14f6b944b0e6cb1 +595, 0x7c314b69fce3df1c +596, 0xb56d82eb740d7abc +597, 0x5132a93c41251fdb +598, 0xe3ce35bd2a82f958 +599, 0x440571a981c722f2 +600, 0x194cdfd9f186bc9 +601, 0xb89e522a5db00939 +602, 0xad35f339f68df3c8 +603, 0xa82ab18420322293 +604, 0xaffa6df9b72b27c4 +605, 0x9615694d23beaa2c +606, 0x1d82ebe563abad91 +607, 0xab50ef65fbd94385 +608, 0x1b070dbd70a9a14 +609, 0x2ececa796abbadf0 +610, 0x6bbeafe9e81ab2a2 +611, 0x60dcd0d2a9b76914 +612, 0x1e748039ef05c33f +613, 0x6d4d17f2213ccdff +614, 0x9fa56132957bc987 +615, 0x60a17185de2428eb +616, 0xb56038ddf306479c +617, 0x3b1db5df92d06d8b +618, 0x24d1bba8bdedf580 +619, 0xbfb7e6740ebaa4d9 +620, 0xab31c4473e46f61d +621, 0x6deb3cdd8fd5869f +622, 0x23032e47746d72d6 +623, 0xa9e72d734e10f2e8 +624, 0xbffd199b6157bc23 +625, 0x29f8254df273fb62 +626, 0xb076142130ee55ec +627, 0x5b0b08374126c309 +628, 0xea4536aae979521f +629, 0xc064e7abec91a174 +630, 0x46133ef80c59d935 +631, 0xf0227e2da1b14160 +632, 0x675a76641e1af5a +633, 0x2f50a069b33d198c +634, 0x3ded5a65e1d657eb +635, 0xbb6999b020694f6b +636, 0x86b2f2b33487aed7 +637, 0x76e14e85f8bfb4cf +638, 0x38f7f1e44bd4e0db +639, 0xc1a7d41b7e80d4ae +640, 0x1dfaaf80bbceb42e +641, 0x3f51c11497720c2b +642, 0xce6da1415ddb8b80 +643, 0x7377d8bcd359b5f3 +644, 0xe077208f3f810aca +645, 0x9a06a8a2dacbffce +646, 0xca1f99156b09b735 +647, 0x2ff9a93064d91451 +648, 0x50f3ea93f351a7ef +649, 0x606fceccb07054de +650, 0x7e83d6d2f8f6685d +651, 0x78f3995291c5d407 +652, 0xd28d2460e22d0228 +653, 0x2c5636f68a0054dd +654, 0xd9fafb1c56c8f6cb +655, 0xe39889b5f9d74464 +656, 0x1355372bf5db2cc1 +657, 0x26768426b9ac323 +658, 0x4af1dbdc1111fd89 +659, 0x66973587943b927f +660, 0xf86f5f50684dfb1d +661, 0x1247d574ff79b534 +662, 0xc8039f3259210fe2 +663, 0x79b573235c92a9f5 +664, 0x213f642d8450e2f0 +665, 0x5db7706973376566 +666, 0x6182c12e69b373d7 +667, 0x3e5ac47300aec07f +668, 0x4b5b6c57b1574376 +669, 0x6b7fcceefd56b17c +670, 0xf656c3455cb9d4b8 +671, 0x7577e2e13329721f +672, 0xf33c0c53ce956e8d +673, 0x7d0f328ee356174 +674, 0x10ec9a168088686e +675, 0x71ef1776d062dfa +676, 0xaa7b590a488a6bc4 +677, 0x38612b6dd8049a1c +678, 0x939045e36874f731 +679, 0xcb9d1d74c56d5ac9 +680, 0x54f1c1c8fef1d8ff +681, 0x3ee4b85c8c7e939e +682, 0xb9b4608e019f352c +683, 0x79d4701275d12e6a +684, 0x2632a2d9835c7f19 +685, 0x1662cd9fba293692 +686, 0xbcb70265115ee944 +687, 0xdc43fb9761468604 +688, 0xe3eec4e7d3871352 +689, 0x829531753226989d +690, 0x2748cc67f540e074 +691, 0x39c4af25d607837d +692, 0x741a243f4cb5df99 +693, 0xda1353287e18b49a +694, 0xa6735689d751ea74 +695, 0x46326d587340ce0b +696, 0xc18531df4550012b +697, 0x6f7901e05dd4b818 +698, 0xfb966afc4c001d63 +699, 0x6dc10fca67a9cfdb +700, 0xd6527ffadf0feaae +701, 0x3b900172045e25d +702, 0xb7dd594cdded6a46 +703, 0x6602aee7ec1599fc +704, 0x7fbf12f23747546a +705, 0x32e63f662bd2de0d +706, 0xedf47770b67ed641 +707, 0x331bef83481c5c2a +708, 0x8fc4256fdf05158c +709, 0x98eba48dabccf5e0 +710, 0xdbc2f2cdb7b1c154 +711, 0x7777755616517ad3 +712, 0xd473c147d2628ac1 +713, 0x861e15d1d760b5a7 +714, 0xf4d25926405ecb07 +715, 0xb7739c69effff86e +716, 0xe97fbafa6f96830c +717, 0xf13e8a334e8bede1 +718, 0xcd60010cba4ee4f9 +719, 0x1f537ac2b82e6008 +720, 0x1fda8d781a89140a +721, 0x9dc204f3f4a463f0 +722, 0x456dcd18eb56a1ab +723, 0x629957bc87bd16a1 +724, 0x2c8000ddb8c75253 +725, 0xc31dae9ec8449284 +726, 0xdac05c8baa2b691a +727, 0x21ff7be9ffa3e7ac +728, 0x844f4b5ed4ee08d0 +729, 0x651f913fd636c994 +730, 0xca3e71a2110b2d49 +731, 0x7709bc42253ed09d +732, 0xbb164d45b6569d43 +733, 0x90ec2f040c20a112 +734, 0xfa6e77e9166f5be4 +735, 0x6b6d12c1842d587d +736, 0xfcd7ff8466e25e2a +737, 0x6a5a2ed8bd971297 +738, 0x2ec35f6bba5adcbc +739, 0xc83676e16651249a +740, 0x458f6064cefe10ba +741, 0x90d54d527e6cd028 +742, 0xa5613e88db27c388 +743, 0x331e0c7d85aa1abc +744, 0x8cee4977e210358 +745, 0xfcae379aa6cbff8e +746, 0xd1407afc97a57e86 +747, 0x1fab25c864f094ae +748, 0xd914864a63004552 +749, 0x4214d226a20f1384 +750, 0x3f4e0d80c488b715 +751, 0xc5ca2f654024b7c8 +752, 0xc1e27a124e7c821c +753, 0xd890a915ffc7918c +754, 0x22fba040ce51a9f8 +755, 0xbf61cebd8891617a +756, 0x7846609ee228e319 +757, 0x536d1854375509b8 +758, 0xbbfb45fc6e666f50 +759, 0xd85b4c0527f9d7d6 +760, 0x528cc9c7fa2a84c8 +761, 0x27a1baece647f2cb +762, 0xfddf0cb92fe09dc3 +763, 0xeb5008fe965d8d96 +764, 0x4a3307937eb2e5c8 +765, 0xd07d74c240c6c363 +766, 0x16f62290179d1bbf +767, 0xe99c9bcc9cb1ece7 +768, 0xc64f9be03c8a93be +769, 0x32659effaf666c1f +770, 0x4bb228cfb30b6672 +771, 0x98764870842068a5 +772, 0x5b12ef2d2cd8bdcc +773, 0xbc79d1c1b41f28b8 +774, 0x97a517cf3279fc9a +775, 0x34ffd46c1d4d6025 +776, 0x9c302307ee25c8f0 +777, 0x399604eed1f18a8 +778, 0x1c9b813c2043142a +779, 0x2944ea5e55267fe9 +780, 0x5a8a9f5e728ea667 +781, 0x30c8440adb804a0 +782, 0xee0e6b627099a937 +783, 0x3d50757ada3c52da +784, 0x4548916b32c813ab +785, 0x602a186fe5bf109b +786, 0xf0d440a2227ba304 +787, 0x5a10d4e0ca9ea32b +788, 0x6e5eb90da13ba64c +789, 0x4c6af8fd04241ab2 +790, 0xf9eb31d26e093006 +791, 0x5d674878839fe3ea +792, 0x1562b55b2484e47c +793, 0xa87188c099c1cb61 +794, 0xb7736b8aa02a3392 +795, 0x5f4b301125abb20f +796, 0x361d566984637f44 +797, 0x68c4b3feac8bd0c3 +798, 0x7066c634dd2503c1 +799, 0xfecbf7c9441eb6ea +800, 0xdbc26ae0fc81436b +801, 0x9ef3e2b48252e7a4 +802, 0x31a49b4c339b37c7 +803, 0xb01b2a83cf346cf4 +804, 0xc24dc2347f82fbe3 +805, 0x134cad272dcd410f +806, 0x61260742823ba59c +807, 0x53ac4c193a97c730 +808, 0x9207c9833af34b52 +809, 0xa72e7ee77078d1f5 +810, 0x2e6f6e1b05936885 +811, 0x783b99ce5dbf9464 +812, 0xfdfeb6f0d027bb44 +813, 0x40eeb27096f92b0 +814, 0x5ef96ff5d4a4521f +815, 0x5595806ae873718a +816, 0x67d449eecf4ca1c3 +817, 0xde837ab611364f3f +818, 0x7034c24d2b139be9 +819, 0xe21166603e0a9c86 +820, 0x935694435c1f0d51 +821, 0x6cb3bec90c126088 +822, 0x4096ef662b7a9f89 +823, 0xd2d85b8d238d8c15 +824, 0xa4ea533ce3ec59b2 +825, 0x3654729d80a2db29 +826, 0x214c4cc3906d29d4 +827, 0x201c447e7588e373 +828, 0xe8b8f0ae25f683eb +829, 0x6744aaf5754e38af +830, 0xd1ffb10d6f27a061 +831, 0xe536733a7b3a6c30 +832, 0x39f0f66e47cbf2c9 +833, 0x856a9593526fde2 +834, 0x2e2a817a0098ea4b +835, 0xc5e1eeb551a0e3d3 +836, 0x3f21e2f5e2d50b2 +837, 0x906af56c66dd9f8c +838, 0x30f6dbd70329fac8 +839, 0xc443dfddf3c01a60 +840, 0x7ab85d9aa9675470 +841, 0x8c9080bd39717bfc +842, 0x4b1ccdb3c3597f6f +843, 0x74e2542d70ab5d67 +844, 0xbb3d236aad00f74 +845, 0xcf3cadf9a2804774 +846, 0xe851d9750e42bd07 +847, 0xc0ad82029b1c371f +848, 0x7ee119eb552d6c07 +849, 0xd8024049bd1d784a +850, 0xfa67a899760363 +851, 0xaa7c2f438b178197 +852, 0xc473674a47ffe064 +853, 0x539fbe3fc674c270 +854, 0xdb48484748a76f3b +855, 0xc73b2b092060d +856, 0xa1d2a15345016f5d +857, 0x4d0fe8599f9bba47 +858, 0xa0edc275e6f8f1d1 +859, 0x40590a8655bc8d72 +860, 0x35b4223161f05f75 +861, 0xa04c0c0f616752dc +862, 0x7f371ed2ca45432d +863, 0x2ff1a08f75ac6438 +864, 0xe2dc5c3682282f48 +865, 0xe1e4179fa98d9013 +866, 0x8cb083d6843a73d5 +867, 0xb4c2b5921b706854 +868, 0x738e14c0e7352445 +869, 0xcd2b646f91afd8c7 +870, 0xd5779a5b57a264fd +871, 0xc39ff855586c7d07 +872, 0x3e3f0098c631a859 +873, 0x644e02fae032110 +874, 0xa8834613c0a45278 +875, 0x69482f2c08e10657 +876, 0xe4ee475bdb87e69a +877, 0xdc1ef7b25c0d0019 +878, 0x88a3fa2be18d8744 +879, 0x60a02e0b21c5bec7 +880, 0xb6867b88aa19bc1a +881, 0xb599409affcf10eb +882, 0xaeaa1778a5e59daa +883, 0xd7a91a52c16663e3 +884, 0x93cb269affe07b1c +885, 0x841b6ced3a4ba815 +886, 0x84541768e1540a5c +887, 0xe3943c84f83b3020 +888, 0x5de366fbd7b45258 +889, 0xd787cc3bde91a661 +890, 0x814071446edecb57 +891, 0x15d8c602a1141514 +892, 0x72f07bc8002d1d0d +893, 0x4a8bd8dc9a1f0f3e +894, 0x8723796ae0f20d35 +895, 0xda7283c2051f73b2 +896, 0x2df0cc247f90bd3b +897, 0x79a8522b968f990a +898, 0x951ede190c8b9d02 +899, 0xc512f1a5b14b018a +900, 0xf0e3ddc03b9a4259 +901, 0x8cf4a35ad312e15f +902, 0xebef28926b11094b +903, 0x5628ba687325921c +904, 0xc3aa75e57edc49c3 +905, 0xc38382fa98e762ba +906, 0x8d209e896285848e +907, 0x2c7d6adf592b4a3e +908, 0x62de48e36f8338f3 +909, 0x4a752741e00de30e +910, 0xf7855b70f1f6ec2b +911, 0xa505fa4428199e43 +912, 0xe8b6b423b826bbac +913, 0x4bd1206cf8786d05 +914, 0x6dcf040391fe3bf4 +915, 0x913f500f87e1bba3 +916, 0x5acf775aa180a5d5 +917, 0x74dd28d9432ce739 +918, 0x996c2ff2f0dc2495 +919, 0x73dbfe6c56effe4 +920, 0x56fddd25196f5e40 +921, 0xe87810158f5b7 +922, 0x7b8795e996383f1f +923, 0x9ba5ee7c777c4c82 +924, 0x17ce3908d270fe1c +925, 0x3df9e613c1aedfae +926, 0xcdd26871b32fc8e1 +927, 0xd71cb13afc633979 +928, 0x63427c8ea9b1c79e +929, 0xd070f7664d3b405d +930, 0x46f2a9e32d9fb769 +931, 0xb4c3822a45e9fe9b +932, 0x8ba30b97fe6f5ec7 +933, 0x70aa554ee2fc11f9 +934, 0xa80c99dbe0cfcfaf +935, 0x36d9250cb2d68ed +936, 0x2995e4b9e1cd1db4 +937, 0x4b3803ba57fc570f +938, 0xae3959e7d740eaa5 +939, 0xb4cbd6662adbae08 +940, 0xae46576446e8dbc4 +941, 0xc4828e008a9a8a54 +942, 0x145d7db8e6554b2f +943, 0x1b1b8916a730c371 +944, 0xdaf84b2bebe31963 +945, 0x5b59b80ef23a2403 +946, 0x9180c7e89cab6fd3 +947, 0x80e58f5411babf34 +948, 0xa06cf55185b9b005 +949, 0x13b2c798424173ad +950, 0xc510f8e706311d49 +951, 0x1f974b83b6046d3a +952, 0xae6e8e85e822d1c3 +953, 0x66f2c8dc3274a31a +954, 0x7e04dbcbf65bd377 +955, 0xabf41ede01ec20a4 +956, 0x5efa0948f6bbb2ea +957, 0xbc91c99d8592255 +958, 0xf6d6917911d86d75 +959, 0x85ce273d54e9097a +960, 0xbdfd30f2420fff92 +961, 0x8802f02f610b537c +962, 0xd1d70037ed543229 +963, 0x908aaf97f9693a46 +964, 0x1f6cfeaa0834d53a +965, 0xa453fd1648ce04d2 +966, 0x2c38bb85ebc64af9 +967, 0xd2daff551c90c4f8 +968, 0xae5a0d949797d784 +969, 0xf0974c8552ac9593 +970, 0xa10b70499f65c693 +971, 0x39a449ebd594ddff +972, 0x8ea090f2b17b9b49 +973, 0xc592de318090fd83 +974, 0xb63e4fbc467b6912 +975, 0x57a0c1c5ce0e4dcc +976, 0xa7c517cf3d436b35 +977, 0xef6dcb0f3fad038b +978, 0xaf4fb60315b91287 +979, 0x5e0776f67304f331 +980, 0xe927753b8e6f7932 +981, 0xd3df2dd92559e304 +982, 0xdaed52aa6af44413 +983, 0x1b59f4dac1e181f8 +984, 0x4a73c2293877ef39 +985, 0xca45d0d015fe44de +986, 0x4659c8b7853735a8 +987, 0x12de6466bdf8adeb +988, 0xaeea857a09bfec15 +989, 0xcc9cf4b3c0b88a23 +990, 0xa44ae52396a5e1bf +991, 0x5847a724305d137f +992, 0x8f4d4de223956182 +993, 0x58254dfada867a8 +994, 0x900a98222c2f339e +995, 0xdb575260935d51d5 +996, 0x13fb4bfbbc0d7b53 +997, 0x62213850186bb92b +998, 0x2a34823312c00388 +999, 0x6148329042f743b0 diff --git a/.venv/lib/python3.12/site-packages/numpy/random/tests/data/pcg64dxsm-testset-1.csv b/.venv/lib/python3.12/site-packages/numpy/random/tests/data/pcg64dxsm-testset-1.csv new file mode 100644 index 0000000..39cef05 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/random/tests/data/pcg64dxsm-testset-1.csv @@ -0,0 +1,1001 @@ +seed, 0xdeadbeaf +0, 0xdf1ddcf1e22521fe +1, 0xc71b2f9c706cf151 +2, 0x6922a8cc24ad96b2 +3, 0x82738c549beccc30 +4, 0x5e8415cdb1f17580 +5, 0x64c54ad0c09cb43 +6, 0x361a17a607dce278 +7, 0x4346f6afb7acad68 +8, 0x6e9f14d4f6398d6b +9, 0xf818d4343f8ed822 +10, 0x6327647daf508ed6 +11, 0xe1d1dbe5496a262a +12, 0xfc081e619076b2e0 +13, 0x37126563a956ab1 +14, 0x8bb46e155db16b9 +15, 0x56449f006c9f3fb4 +16, 0x34a9273550941803 +17, 0x5b4df62660f99462 +18, 0xb8665cad532e3018 +19, 0x72fc3e5f7f84216a +20, 0x71d3c47f6fd59939 +21, 0xfd4218afa1de463b +22, 0xc84054c78e0a9a71 +23, 0xae59034726be61a8 +24, 0xa6a5f21de983654d +25, 0x3b633acf572009da +26, 0x6a0884f347ab54c8 +27, 0x7a907ebe9adcab50 +28, 0xbe779be53d7b8d4a +29, 0xf5976e8c69b9dcd1 +30, 0x1d8302f114699e11 +31, 0x7d37e43042c038a0 +32, 0x2cc1d4edc2a40f35 +33, 0x83e3347bb2d581f1 +34, 0x253f8698651a844d +35, 0x4312dea0dd4e32f6 +36, 0x10f106439964ea3a +37, 0x810eb374844868cc +38, 0x366342a54b1978cc +39, 0x9fb39b13aaddfb5e +40, 0xdb91fd0d9482bed7 +41, 0x89f6ea4ca9c68204 +42, 0x146b31ccca461792 +43, 0x203fd9724deb2486 +44, 0x58a84f23748e25cb +45, 0x2f20eb6aeb94e88 +46, 0x14d3581460e473c +47, 0xad5bd0d25f37d047 +48, 0x1cf88fa16de258b2 +49, 0x3bcab6485b7a341 +50, 0xb2433b37f227d90c +51, 0x2cffd7e0a8360cc8 +52, 0x5d2eeff7c9ebc847 +53, 0x6fd7c7ae23f9f64b +54, 0x381650b2d00f175d +55, 0x9d93edcedc873cae +56, 0x56e369a033d4cb49 +57, 0x7547997116a3bac +58, 0x11debaa897fd4665 +59, 0xdf799d2b73bd6fb8 +60, 0x3747d299c66624d +61, 0xac9346701afd0cfa +62, 0xac90e150fa13c7bf +63, 0x85c56ad2248c2871 +64, 0xdea66bf35c45f195 +65, 0x59cf910ea079fb74 +66, 0x2f841bb782274586 +67, 0x9814df4384d92bd9 +68, 0x15bc70824be09925 +69, 0x16d4d0524c0503a3 +70, 0xf04ea249135c0cc7 +71, 0xa707ab509b7e3032 +72, 0x465459efa869e372 +73, 0x64cbf70a783fab67 +74, 0x36b3541a14ca8ed7 +75, 0x9a4dfae8f4c596bf +76, 0x11d9a04224281be3 +77, 0xe09bbe6d5e98ec32 +78, 0xa6c60d908973aa0d +79, 0x7c524c57dd5915c8 +80, 0xa810c170b27f1fdc +81, 0xce5d409819621583 +82, 0xfe2ee3d5332a3525 +83, 0x162fb7c8b32045eb +84, 0x4a3327156b0b2d83 +85, 0x808d0282f971064 +86, 0x2e6f04cf5ed27e60 +87, 0xaf6800699cca67a9 +88, 0xc7590aae7244c3bf +89, 0x7824345f4713f5f9 +90, 0x8f713505f8fd059b +91, 0x3d5b5b9bb6b1e80e +92, 0x8674f45e5dc40d79 +93, 0xcb1e36846aa14773 +94, 0xe0ae45b2b9b778c1 +95, 0xd7254ce931eefcfb +96, 0xef34e15e4f55ac0a +97, 0xf17cc0ba15a99bc4 +98, 0x77bb0f7ffe7b31f1 +99, 0x6ee86438d2e71d38 +100, 0x584890f86829a455 +101, 0x7baf0d8d30ba70fe +102, 0xb1ac8f326b8403ae +103, 0xcc1963435c874ba7 +104, 0x9c483b953d1334ce +105, 0xc0924bcbf3e10941 +106, 0x21bcc581558717b1 +107, 0x2c5ad1623f8d292b +108, 0xa8ea110f6124557e +109, 0x15f24a6c5c4c591 +110, 0x40fe0d9cd7629126 +111, 0xcfe8f2b3b081484d +112, 0x891383f4b4cac284 +113, 0x76f2fcdef7fa845 +114, 0x4edd12133aed0584 +115, 0xd53c06d12308873d +116, 0xf7f22882c17f86bf +117, 0xfbaa4aad72f35e10 +118, 0x627610da2e3c0cc3 +119, 0x582b16a143634d9a +120, 0x9b4a7f69ed38f4a0 +121, 0x2df694974d1e1cbe +122, 0xe5be6eaafed5d4b +123, 0xc48e2a288ad6605e +124, 0xbcb088149ce27c2b +125, 0x3cb6a7fb06ceecbe +126, 0x516735fff3b9e3ac +127, 0x5cbafc551ee5008d +128, 0xee27d1ab855c5fd5 +129, 0xc99fb341f6baf846 +130, 0x7ad8891b92058e6d +131, 0xf50310d03c1ac6c7 +132, 0x947e281d998cbd3e +133, 0x1d4d94a93824fe80 +134, 0x5568b77289e7ee73 +135, 0x7d82d1b2b41e3c8b +136, 0x1af462c7abc787b +137, 0xcfd8dfe80bfae1ef +138, 0xd314caeb723a63ea +139, 0x1c63ddcfc1145429 +140, 0x3801b7cc6cbf2437 +141, 0xc327d5b9fdafddd3 +142, 0xe140278430ca3c78 +143, 0x4d0345a685cb6ef8 +144, 0x47640dc86e261ff9 +145, 0xab817f158523ebf4 +146, 0x37c51e35fbe65a6b +147, 0xab090f475d30a178 +148, 0x4d3ec225bf599fc1 +149, 0xefd517b0041679b1 +150, 0x20ad50bca4da32c5 +151, 0x75e1f7cd07fad86d +152, 0x348cf781ee655f4b +153, 0x9375f0e5ffc2d2ec +154, 0x7689082fd5f7279c +155, 0x633e56f763561e77 +156, 0x9d1752d70861f9fd +157, 0xa3c994b4e70b0b0f +158, 0xabf7276a58701b88 +159, 0xbfa18d1a0540d000 +160, 0xc6a28a2475646d26 +161, 0x7cdf108583f65085 +162, 0x82dcefb9f32104be +163, 0xc6baadd0adc6b446 +164, 0x7a63cff01075b1b4 +165, 0x67ac62e575c89919 +166, 0x96fa4320a0942035 +167, 0xc4658859385b325f +168, 0xde22c17ff47808f6 +169, 0xbb952c4d89e2f2ec +170, 0x638251fbc55bdc37 +171, 0x38918b307a03b3ea +172, 0xccb60f2cedbb570b +173, 0x3c06f4086a28f012 +174, 0x4e8d238388986e33 +175, 0x1760b7793514a143 +176, 0xa3f924efe49ee7d6 +177, 0xaf6be2dbaebc0bdf +178, 0x6782682090dffe09 +179, 0xb63a4d90d848e8ef +180, 0x5f649c7eaf4c54c5 +181, 0xbe57582426a085ba +182, 0xb5dd825aa52fb76d +183, 0x74cb4e6ca4039617 +184, 0x382e578bf0a49588 +185, 0xc043e8ea6e1dcdae +186, 0xf902addd5c04fa7c +187, 0xf3337994612528db +188, 0x4e8fd48d6d15b4e6 +189, 0x7190a509927c07ab +190, 0x864c2dee5b7108ae +191, 0xbb9972ddc196f467 +192, 0x1ea02ab3ca10a448 +193, 0xe50a8ffde35ddef9 +194, 0x7bd2f59a67183541 +195, 0x5a940b30d8fcd27a +196, 0x82b4cea62623d4d3 +197, 0x6fbda76d4afef445 +198, 0x8b1f6880f418328e +199, 0x8b69a025c72c54b7 +200, 0xb71e0f3986a3835f +201, 0xa4a7ddb8b9816825 +202, 0x945dcda28228b1d8 +203, 0xb471abf2f8044d72 +204, 0xf07d4af64742b1ba +205, 0xfca5190bc4dd6a2a +206, 0xd681497262e11bc5 +207, 0xbe95d5f00c577028 +208, 0x56313439fd8bde19 +209, 0x3f3d9ac9b5ee6522 +210, 0x7b8d457dd2b49bbe +211, 0xe76b5747885d214b +212, 0xa8a695b3deb493ea +213, 0x5292446548c95d71 +214, 0xbf5cdf0d436412df +215, 0x7936abaed779d28d +216, 0x659c6e8073b3a06d +217, 0x86c9ff28f5543b71 +218, 0x6faa748445a99146 +219, 0xdcc1e6ab57904fd7 +220, 0x770bd61233addc5f +221, 0x16963e041e46d94f +222, 0x158e6cb2934157ac +223, 0xb65088a8fd246441 +224, 0x2b12ced6ce8a68c3 +225, 0x59a18d02cd6082b3 +226, 0x4ddbc318cb5488ee +227, 0x3d4cf520b3ed20a1 +228, 0x7028b3a92e2b292d +229, 0xf141da264a250e4d +230, 0x9788d53e86041c37 +231, 0x1bb91238a7c97dbf +232, 0x81953d0ddb634309 +233, 0xfa39ccfe14d2d46 +234, 0xf7c7861c9b7e8399 +235, 0x18d27ca50d9dc249 +236, 0x258dfdf38510d0d9 +237, 0x9e72d8af910ea76f +238, 0x4f8ef24b96de50ad +239, 0xb9d9c12297e03dc9 +240, 0x91994e41b4a1929c +241, 0x8defa79b2ccc83b9 +242, 0x948566748706dac5 +243, 0x7b0454946e70e4cf +244, 0x340b7cb298c70ed7 +245, 0x6602005330cebd95 +246, 0xf71cb803aa61f722 +247, 0x4683fb07fc70ae8a +248, 0xc6db9f0c4de3ed88 +249, 0x3e8dfae2a593cef9 +250, 0x615f7c38e3862b33 +251, 0x676c7996550d857 +252, 0xc6d520d54a5c266a +253, 0x202b1e8eef14aa2e +254, 0xa3a84891a27a582 +255, 0x84dbee451658d47f +256, 0x254c7cd97e777e3a +257, 0xf50b6e977f0eba50 +258, 0x2898b1d3062a4798 +259, 0x4096f7cbbb019773 +260, 0x9fb8e75548062c50 +261, 0x4647071e5ca318ec +262, 0x2b4750bdb3b3b01 +263, 0x88ac41cc69a39786 +264, 0x705e25476ef46fa3 +265, 0xc0c1db19884a48a6 +266, 0x1364c0afdbb465e5 +267, 0x58e98534701272a6 +268, 0x746a5ea9701517c0 +269, 0x523a70bc6b300b67 +270, 0x9b1c098eda8564ad +271, 0xfbaeb28d3637067f +272, 0xddd9a13551fdba65 +273, 0x56461a670559e832 +274, 0xab4fd79be85570ad +275, 0xd4b691ecaff8ca55 +276, 0x11a4495939e7f004 +277, 0x40d069d19477eb47 +278, 0xe790783d285cd81e +279, 0xde8218b16d935bc7 +280, 0x2635e8c65cd4182d +281, 0xeae402623e3454 +282, 0x9f99c833184e0279 +283, 0x3d0f79a0d52d84e7 +284, 0xc1f8edb10c625b90 +285, 0x9b4546363d1f0489 +286, 0x98d86d0b1212a282 +287, 0x386b53863161200d +288, 0xbe1165c7fe48a135 +289, 0xb9658b04dbbfdc8c +290, 0xcea14eddfe84d71a +291, 0x55d03298be74abe7 +292, 0x5be3b50d961ffd7e +293, 0xc76b1045dc4b78e1 +294, 0x7830e3ff3f6c3d4c +295, 0xb617adb36ca3729 +296, 0x4a51bdb194f14aa9 +297, 0x246024e54e6b682a +298, 0x33d42fc9c6d33083 +299, 0xadccba149f31e1d +300, 0x5183e66b9002f8b +301, 0x70eb2416404d51b7 +302, 0x26c25eb225535351 +303, 0xbc2d5b0d23076561 +304, 0x5823019ddead1da +305, 0x85cfa109fca69f62 +306, 0x26017933e7e1efd9 +307, 0x3ec7be9a32212753 +308, 0x697e8a0697cd6f60 +309, 0x44735f6cca03920f +310, 0x8cc655eb94ee212e +311, 0x8b8b74eba84929a0 +312, 0x7708ccedd0c98c80 +313, 0x1b6f21f19777cbe1 +314, 0x363e564bd5fadedb +315, 0x5921543a641591fe +316, 0xc390786d68ea8a1b +317, 0x9b293138dc033fca +318, 0x45447ca8dc843345 +319, 0xee6ef6755bc49c5e +320, 0x70a3a1f5163c3be5 +321, 0xf05e25448b6343b0 +322, 0x4739f4f8717b7e69 +323, 0xb006141975bf957 +324, 0x31874a91b707f452 +325, 0x3a07f2c90bae2869 +326, 0xb73dae5499a55c5e +327, 0x489070893bb51575 +328, 0x7129acf423940575 +329, 0x38c41f4b90130972 +330, 0xc5260ca65f5a84a1 +331, 0x6e76194f39563932 +332, 0x62ca1f9ca3de3ca6 +333, 0xb4a97874e640853f +334, 0x38ed0f71e311cc02 +335, 0xde183b81099e8f47 +336, 0x9bb8bf8e6694346 +337, 0xd15497b6bf81e0f2 +338, 0xaaae52536c00111 +339, 0x4e4e60d1435aaafd +340, 0x5a15512e5d6ea721 +341, 0xff0f1ffabfc6664f +342, 0xba3ffcedc5f97fec +343, 0xef87f391c0c6bfb6 +344, 0x4a888c5d31eb0f98 +345, 0x559a3fbfd7946e95 +346, 0xe45b44a0db5a9bad +347, 0x9457898964190af1 +348, 0xd9357dfaab76cd9e +349, 0xa60e907178d965a1 +350, 0x76b2dc3032dc2f4a +351, 0x13549b9c2802120 +352, 0x8656b965a66a1800 +353, 0x16802e6e22456a23 +354, 0x23b62edc60efaa9 +355, 0x6832a366e1e4ea3b +356, 0x46b1b41093ff2b1e +357, 0x55c857128143f219 +358, 0x7fc35ddf5e138200 +359, 0x790abe78be67467e +360, 0xa4446fc08babd466 +361, 0xc23d70327999b855 +362, 0x2e019d1597148196 +363, 0xfefd98e560403ab8 +364, 0xbe5f0a33da330d58 +365, 0x3078a4e9d43ca395 +366, 0x511bfedd6f12f2b3 +367, 0x8bc138e335be987c +368, 0x24640f803465716d +369, 0xf6530b04d0bd618f +370, 0x9b7833e5aa782716 +371, 0x778cd35aea5841b1 +372, 0xecea3c458cefbc60 +373, 0x5107ae83fc527f46 +374, 0x278ad83d44bd2d1a +375, 0x7014a382295aeb16 +376, 0xf326dd762048743f +377, 0x858633d56279e553 +378, 0x76408154085f01bc +379, 0x3e77d3364d02e746 +380, 0x2f26cea26cadd50b +381, 0x6d6846a4ecb84273 +382, 0x4847e96f2df5f76 +383, 0x5a8610f46e13ff61 +384, 0x4e7a7cac403e10dd +385, 0x754bdf2e20c7bc90 +386, 0x8bdd80e6c51bd0be +387, 0x61c655fae2b4bc52 +388, 0x60873ef48e3d2f03 +389, 0x9d7d8d3698a0b4a4 +390, 0xdf48e9c355cd5d4b +391, 0x69ecf03e20be99ac +392, 0xc1a0c5a339bd1815 +393, 0x2e3263a6a3adccb +394, 0x23557459719adbdc +395, 0xd1b709a3b330e5a +396, 0xade5ab00a5d88b9d +397, 0x69a6bd644120cfad +398, 0x40187ecceee92342 +399, 0x1c41964ba1ac78da +400, 0x9ac5c51cbecabe67 +401, 0xbdc075781cf36d55 +402, 0xeaf5a32246ded56 +403, 0xcda0b67e39c0fb71 +404, 0x4839ee456ef7cc95 +405, 0xf17092fdd41d5658 +406, 0x2b5d422e60ae3253 +407, 0x3effe71102008551 +408, 0x20a47108e83934b7 +409, 0xd02da65fe768a88f +410, 0xeb046bd56afa4026 +411, 0x70c0509c08e0fbe0 +412, 0x1d35c38d4f8bac6c +413, 0x9aa8eb6466f392e0 +414, 0x587bd4a430740f30 +415, 0x82978fe4bad4195 +416, 0xdc4ebc4c0feb50ab +417, 0xd3b7164d0240c06f +418, 0x6e2ad6e5a5003a63 +419, 0xa24b430e2ee6b59c +420, 0x2905f49fd5073094 +421, 0x5f209e4de03aa941 +422, 0x57b7da3e0bedb1dc +423, 0x5e054018875b01f5 +424, 0xb2f2da6145658db3 +425, 0xbd9c94a69a8eb651 +426, 0x9c5f9a07cd6ac749 +427, 0x2296c4af4d529c38 +428, 0x522ed800fafdefab +429, 0xe2a447ced0c66791 +430, 0x937f10d45e455fef +431, 0xc882987d9e29a24 +432, 0x4610bfd6a247ee1a +433, 0x562ba3e50870059 +434, 0x59d8d58793602189 +435, 0xfe9a606e3e34abe +436, 0x6825f7932a5e9282 +437, 0xe77f7061bab476ad +438, 0xbf42001da340ace3 +439, 0x9c3e9230f5e47960 +440, 0x2c0f700d96d5ad58 +441, 0x330048b7cd18f1f9 +442, 0xffc08785eca5cca9 +443, 0xb5879046915f07a5 +444, 0xef51fe26f83c988e +445, 0xfa4c2968e7881a9a +446, 0xc0a9744455a4aad +447, 0xbd2ad686d6313928 +448, 0x6b9f0984c127682a +449, 0xc9aaa00a5da59ed8 +450, 0x762a0c4b98980dbf +451, 0x52d1a2393d3ca2d1 +452, 0x1e9308f2861db15c +453, 0xe7b3c74fe4b4a844 +454, 0x485e15704a7fc594 +455, 0x9e7f67ea44c221f6 +456, 0xbab9ad47fde916e0 +457, 0x50e383912b7fc1f4 +458, 0xaad63db8abcef62d +459, 0xc2f0c5699f47f013 +460, 0xee15b36ada826812 +461, 0x2a1b1cf1e1777142 +462, 0x8adb03ede79e937d +463, 0xf14105ef65643bf3 +464, 0x752bbaefc374a3c7 +465, 0xa4980a08a5a21d23 +466, 0x418a1c05194b2db7 +467, 0xdd6ff32efe1c3cd6 +468, 0x272473ed1f0d3aa2 +469, 0x1e7fdebadabe6c06 +470, 0xd1baa90c17b3842f +471, 0xd3d3a778e9c8404a +472, 0x781ae7fda49fa1a0 +473, 0x61c44fdbdacc672d +474, 0x6d447d0a1404f257 +475, 0x9303e8bdfbfb894d +476, 0x3b3482cdec016244 +477, 0xb149bf245d062e7b +478, 0x96f8d54b14cf992d +479, 0x4741549a01f8c3d0 +480, 0x48270811b2992af +481, 0x7b58f175cd25d147 +482, 0x8f19a840b56f4be9 +483, 0x84a77f43c0951a93 +484, 0x34e1a69381f0c374 +485, 0xb158383c9b4040f +486, 0x372f1abc7cf3a9fa +487, 0x5439819a84571763 +488, 0xabf8515e9084e2fa +489, 0xb02312b9387ff99 +490, 0x238a85bb47a68b12 +491, 0x2068cb83857c49bb +492, 0xc6170e743083664c +493, 0x745cf8470bcb8467 +494, 0xe3a759a301670300 +495, 0x292c7686ad3e67da +496, 0x359efedaff192a45 +497, 0x511f2c31a2d8c475 +498, 0x97fd041bf21c20b3 +499, 0x25ef1fe841b7b3f6 +500, 0xbb71739e656f262d +501, 0x2729b0e989b6b7b8 +502, 0xd2142702ec7dbabf +503, 0x7008decd2488ee3f +504, 0x69daa95e303298d7 +505, 0xc35eca4efb8baa5a +506, 0xf3f16d261cec3b6c +507, 0x22371c1d75396bd3 +508, 0x7aefa08eccae857e +509, 0x255b493c5e3c2a2f +510, 0x779474a077d34241 +511, 0x5199c42686bea241 +512, 0x16c83931e293b8d3 +513, 0xa57fe8db8c0302c7 +514, 0xd7ace619e5312eb1 +515, 0x8740f013306d217c +516, 0xb6a1ad5e29f4d453 +517, 0x31abf7c964688597 +518, 0xbc3d791daed71e7 +519, 0x31ee4ca67b7056ed +520, 0x1ab5416bfe290ea3 +521, 0x93db416f6d3b843a +522, 0xed83bbe5b1dd2fed +523, 0xece38271470d9b6d +524, 0x3a620f42663cd8ae +525, 0x50c87e02acafee5d +526, 0xcabeb8bedbc6dab5 +527, 0x2880a6d09970c729 +528, 0x4aba5dd3bfc81bc +529, 0xaba54edf41080cec +530, 0xb86bb916fc85a169 +531, 0x4c41de87bc79d8ca +532, 0xcce2a202622945fe +533, 0x513f086fad94c107 +534, 0x18b3960c11f8cc96 +535, 0x2f0d1cfd1896e236 +536, 0x1702ae3880d79b15 +537, 0x88923749029ae81 +538, 0x84810d4bdec668eb +539, 0xf85b0a123f4fc68d +540, 0x93efd68974b6e4d1 +541, 0x5d16d6d993a071c9 +542, 0x94436858f94ca43b +543, 0xb3dbb9ed0cb180b6 +544, 0x6447030a010b8c99 +545, 0xd7224897c62925d8 +546, 0xb0c13c1d50605d3a +547, 0xdff02c7cb9d45f30 +548, 0xe8103179f983570d +549, 0xbc552037d6d0a24e +550, 0x775e500b01486b0d +551, 0x2050ac632c694dd6 +552, 0x218910387c4d7ae7 +553, 0xf83e8b68ff885d5d +554, 0xe3374ec25fca51a3 +555, 0xfa750ffa3a60f3af +556, 0x29ee40ba6df5592e +557, 0x70e21a68f48260d2 +558, 0x3805ca72cd40886e +559, 0x2f23e73f8eabf062 +560, 0x2296f80cdf6531ae +561, 0x903099ed968db43a +562, 0xf044445cf9f2929f +563, 0xcd47fdc2de1b7a1 +564, 0xaab1cbd4f849da99 +565, 0x5fc990688da01acb +566, 0xa9cee52ea7dab392 +567, 0xecefc3a4349283a8 +568, 0xdd6b572972e3fafc +569, 0xc1f0b1a2ffb155da +570, 0xc30d53fc17bd25c8 +571, 0x8afa89c77834db28 +572, 0x5569a596fb32896c +573, 0x36f207fc8df3e3d4 +574, 0x57c2bd58517d81db +575, 0xb524693e73d0061c +576, 0xb69f6eb233f5c48b +577, 0x4f0fb23cab8dc695 +578, 0x492c1ad0a48df8df +579, 0xf6dcc348ec8dec1f +580, 0xa4d8708d6eb2e262 +581, 0x4c2072c2c9766ff1 +582, 0xa9bf27c4304875f0 +583, 0xfc8fb8066d4f9ae2 +584, 0x188095f6235fec3c +585, 0x1d8227a2938c2864 +586, 0x89ea50c599010378 +587, 0xcac86df0a7c6d56d +588, 0x47a8c5df84c7d78 +589, 0xe607ae24ea228bfa +590, 0x36624a7996efe104 +591, 0x5d72881c1227d810 +592, 0x78694a6750374c8 +593, 0x7b9a217d4ab5ff45 +594, 0xd53e5d6f7504becc +595, 0x197a72d3f4889a0e +596, 0xfdc70c4755a8df36 +597, 0xd0fda83748c77f74 +598, 0x7ddc919ac9d6dcc9 +599, 0x785c810a6a2dc08b +600, 0xba4be83e7e36896c +601, 0x379d6fe80cf2bffe +602, 0x74cae2dabc429206 +603, 0x1efac32d5d34c917 +604, 0x3cb64e2f98d36e70 +605, 0xc0a7c3cdc3c60aa7 +606, 0x699dfadd38790ebe +607, 0x4861e61b3ecfbeac +608, 0x531744826c345baa +609, 0x5ec26427ad450cba +610, 0xf2c1741479abdcae +611, 0xe9328a78b2595458 +612, 0x30cd1bdf087acd7f +613, 0x7491ced4e009adbe +614, 0xdcd942df1e2e7023 +615, 0xfe63f01689fee35 +616, 0x80282dfe5eaedc42 +617, 0x6ecdea86495f8427 +618, 0xe0adfdd5e9ed31c3 +619, 0xf32bd2a7418127e +620, 0x8aabba078db6ee2 +621, 0xa8a8e60499145aca +622, 0xf76b086ac4e8a0f2 +623, 0x6e55b3c452ff27f8 +624, 0xe18fa7cd025a71bf +625, 0xeed7b685fde0fa25 +626, 0xba9b6c95867fa721 +627, 0x4c2603bc69de2df2 +628, 0xaac87eee1b58cd66 +629, 0x3c9af6656e01282c +630, 0x2dfa05ce8ff476b6 +631, 0xeae9143fcf92f23d +632, 0x3f0699f631be3bc8 +633, 0xa0f5f79f2492bd67 +634, 0x59c47722388131ed +635, 0x5f6e9d2941cef1de +636, 0xe9ad915c09788b7b +637, 0x92c6d37e4f9482f5 +638, 0x57d301b7fdadd911 +639, 0x7e952d23d2a8443 +640, 0xbb2fa5e0704b3871 +641, 0xe5642199be36e2d5 +642, 0x5020b60d54358291 +643, 0xa0b6317ec3f60343 +644, 0xb57b08b99540bc5c +645, 0x21f1890adc997a88 +646, 0xfcf824200dd9da2d +647, 0x8146293d83d425d1 +648, 0xdadfbf5fbb99d420 +649, 0x1eb9bbc5e6482b7d +650, 0xd40ff44f1bbd0f1c +651, 0xa9f948ba2d08afa5 +652, 0x638cc07c5301e601 +653, 0x1f984baa606e14e8 +654, 0x44e153671081f398 +655, 0xb17882eeb1d77a5d +656, 0x5fd8dbee995f14c +657, 0xff3533e87f81b7fe +658, 0x2f44124293c49795 +659, 0x3bf6b51e9360248 +660, 0x72d615edf1436371 +661, 0x8fc5cf4a38adab9d +662, 0xfa517e9022078374 +663, 0xf356733f3e26f4d8 +664, 0x20ea099cdc6aad40 +665, 0xe15b977deb37637d +666, 0xcc85601b89dae88d +667, 0x5768c62f8dd4905c +668, 0xa43cc632b4e56ea +669, 0xc4240cf980e82458 +670, 0xb194e8ffb4b3eeb6 +671, 0xee753cf2219c5fa1 +672, 0xfe2500192181d44d +673, 0x2d03d7d6493dd821 +674, 0xff0e787bb98e7f9b +675, 0xa05cf8d3bd810ce7 +676, 0x718d5d6dcbbdcd65 +677, 0x8d0b5343a06931c +678, 0xae3a00a932e7eaf9 +679, 0x7ed3d8f18f983e18 +680, 0x3bb778ee466dc143 +681, 0x711c685c4e9062c0 +682, 0x104c3af5d7ac9834 +683, 0x17bdbb671fb5d5cf +684, 0xabf26caead4d2292 +685, 0xa45f02866467c005 +686, 0xf3769a32dc945d2d +687, 0xe78d0007f6aabb66 +688, 0x34b60be4acbd8d4b +689, 0x58c0b04b69359084 +690, 0x3a8bb354c212b1 +691, 0x6b82a8f3d70058d5 +692, 0x405bdef80a276a4a +693, 0xe20ca40ee9195cad +694, 0xf5dd96ba2446fefd +695, 0xc1e180c55fe55e3c +696, 0xa329caf6daa952b3 +697, 0xb4809dd0c84a6b0a +698, 0xd27f82661070cee7 +699, 0xa7121f15ee2b0d8a +700, 0x4bdaea70d6b34583 +701, 0xe821dc2f310f7a49 +702, 0x4c00a5a68e76f647 +703, 0x331065b064a2d5ea +704, 0xac0c2ce3dc04fa37 +705, 0x56b32b37b8229008 +706, 0xe757cdb51534fcfa +707, 0xd3ff183576b2fad7 +708, 0x179e1f4190f197a7 +709, 0xf874c626a7c9aae5 +710, 0xd58514ffc37c80e4 +711, 0xc65de31d33fa7fd3 +712, 0x6f6637052025769b +713, 0xca1c6bdadb519cc0 +714, 0xd1f3534cde37828a +715, 0xc858c339eee4830a +716, 0x2371eacc215e02f4 +717, 0x84e5022db85bbbe9 +718, 0x5f71c50bba48610e +719, 0xe420192dad9c323f +720, 0x2889342721fca003 +721, 0x83e64f63334f501d +722, 0xac2617172953f2c +723, 0xfa1f78d8433938ff +724, 0x5578382760051462 +725, 0x375d7a2e3b90af16 +726, 0xb93ff44e6c07552d +727, 0xded1d5ad811e818c +728, 0x7cf256b3b29e3a8c +729, 0x78d581b8e7bf95e8 +730, 0x5b69192f2caa6ad3 +731, 0xa9e25855a52de3ce +732, 0x69d8e8fc45cc188d +733, 0x5dd012c139ad347d +734, 0xfcb01c07b77db606 +735, 0x56253e36ab3d1cce +736, 0x1181edbb3ea2192 +737, 0x325bef47ff19a08d +738, 0xd3e231ceb27e5f7 +739, 0x8e819dd2de7956d2 +740, 0x34a9689fe6f84a51 +741, 0x3e4eeb719a9c2927 +742, 0x5c3b3440581d0aaf +743, 0x57caf51897d7c920 +744, 0xec6a458130464b40 +745, 0xe98f044e0da40e9b +746, 0xbe38662020eeb8e7 +747, 0x7b8c407c632724ae +748, 0x16c7cfa97b33a544 +749, 0xd23359e2e978ae5a +750, 0x4fdba458250933dd +751, 0x3c9e0713cfe616ba +752, 0x6f0df87b13163b42 +753, 0xc460902cb852cc97 +754, 0x289df8fefd6b0bce +755, 0x4ac2a2a1c3fb8029 +756, 0x2fc3e24d8b68eef7 +757, 0x34564386a59aab9a +758, 0x31047391ebd67ce4 +759, 0x6c23d070a0564d41 +760, 0xba6387b2b72545f7 +761, 0xcdcf1008058387af +762, 0xc9308fa98db05192 +763, 0xdbdbb5abd01a9d84 +764, 0x937088275c7804ab +765, 0x6f6accfefe34ee81 +766, 0x5c33c74c49cfdb2c +767, 0x5e1a771edfb92bd3 +768, 0x6e89b009069ecae7 +769, 0x34d64e17ec0e8968 +770, 0x841203d0cde0c330 +771, 0x7642cc9d7eb9e9cb +772, 0xca01d2e8c128b97e +773, 0x5b8390617b3304ab +774, 0x52ec4ed10de1eb2d +775, 0xb90f288b9616f237 +776, 0x5bd43cd49617b2e2 +777, 0x1a53e21d25230596 +778, 0x36ccd15207a21cd6 +779, 0xc8263d780618fd3c +780, 0x6eb520598c6ce1cb +781, 0x493c99a3b341564f +782, 0xab999e9c5aa8764f +783, 0xab2fa4ceaba84b +784, 0xbbd2f17e5cb2331b +785, 0xc8b4d377c0cc4e81 +786, 0x31f71a6e165c4b1e +787, 0xd1011e55fb3addaa +788, 0x5f7ec34728dfa59 +789, 0x2aef59e60a84eb0f +790, 0x5dde6f09aec9ad5f +791, 0x968c6cdbc0ef0438 +792, 0x1957133afa15b13a +793, 0xbaf28f27573a64c2 +794, 0xc6f6ddd543ebf862 +795, 0xdd7534315ec9ae1e +796, 0xd2b80cd2758dd3b +797, 0xa38c3da00cc81538 +798, 0x15c95b82d3f9b0f9 +799, 0x6704930287ce2571 +800, 0x9c40cc2f6f4ecb0c +801, 0xc8de91f50b22e94e +802, 0x39272e8fddbfdf0a +803, 0x879e0aa810a117d +804, 0xa312fff4e9e5f3bd +805, 0x10dd747f2835dfec +806, 0xeb8466db7171cdae +807, 0xaa808d87b9ad040a +808, 0xab4d2229a329243a +809, 0x7c622f70d46f789c +810, 0x5d41cef5965b2a8e +811, 0xce97ec4702410d99 +812, 0x5beba2812c91211b +813, 0xf134b46c93a3fec7 +814, 0x76401d5630127226 +815, 0xc55fc9d9eacd4ec1 +816, 0xaec8cefaa12f813f +817, 0x2f845dcfd7b00722 +818, 0x3380ab4c20885921 +819, 0xdb68ad2597691b74 +820, 0x8a7e4951455f563f +821, 0x2372d007ed761c53 +822, 0xcab691907714c4f1 +823, 0x16bc31d6f3abec1a +824, 0x7dff639fbcf1824 +825, 0x6666985fbcff543d +826, 0xb618948e3d8e6d0c +827, 0x77b87837c794e068 +828, 0xcd48288d54fcb5a8 +829, 0x47a773ed6ae30dc3 +830, 0xba85ae44e203c942 +831, 0xa7a7b21791a25b2d +832, 0x4029dd92e63f19e0 +833, 0xc2ad66ab85e7d5aa +834, 0xa0f237c96fdab0db +835, 0xffefb0ab1ca18ed +836, 0x90cb4500785fd7d5 +837, 0xa7dd3120f4876435 +838, 0x53f7872624694300 +839, 0xea111326ff0040d9 +840, 0x5f83cb4cce40c83b +841, 0x918e04936c3b504d +842, 0x87a8db4c0e15e87c +843, 0x7cff39da6a0dedd0 +844, 0x36f7de2037f85381 +845, 0xd1d8d94022a1e9a7 +846, 0x2c9930127dc33ec9 +847, 0x6cb4719dcd0101c6 +848, 0xc01868cde76935f7 +849, 0x6b86f2ec1ab50143 +850, 0x68af607d8d94ae61 +851, 0xe216c5b95feedf34 +852, 0x4b866bd91efe2e4b +853, 0x4bff79df08f92c99 +854, 0x6ff664ea806acfd1 +855, 0x7fce0b3f9ece39bc +856, 0x29bc90b59cb3db97 +857, 0x833c4b419198607d +858, 0xf3573e36ca4d4768 +859, 0x50d71c0a3c2a3fa8 +860, 0xd754591aea2017e7 +861, 0x3f9126f1ee1ebf3 +862, 0xe775d7f4b1e43de8 +863, 0xe93d51628c263060 +864, 0x83e77f6fb32d6d82 +865, 0x43dd7eef823408e4 +866, 0x1c843c2c90180662 +867, 0xe924dafb9a16066b +868, 0x6af3ee96e7b7fbd9 +869, 0x94d5c4f37befcd1f +870, 0x40ffb04bedef4236 +871, 0x71c17bbc20e553e +872, 0x101f7a0a6208729f +873, 0x5ca34570cf923548 +874, 0x8e3139db2e96e814 +875, 0x3ab96d96263d048d +876, 0x97f3c0bbc6755c3c +877, 0x31fc72daedaef3dc +878, 0x71f8d7855d10789b +879, 0xce6dc97b4662333b +880, 0xfddc2aabd342bc61 +881, 0xefbd4007ff8c7d2e +882, 0xf72cd6c689ef8758 +883, 0x932c8b0c0e755137 +884, 0x94cc4dedd58ff69 +885, 0xde4dfd6890535979 +886, 0xdb00dcd2dcb4a50a +887, 0xb0466240b4548107 +888, 0x9cb9264c7b90d1a3 +889, 0x357e378e9be5766b +890, 0x6e0316ef03367bbf +891, 0x201ea18839544ca +892, 0x803ff3406be5f338 +893, 0xf9d5e82fd4144bb2 +894, 0x1b6b88ca701e9f47 +895, 0xd1fe5ab8e1f89cc0 +896, 0x14171fe176c4bece +897, 0x887948bdef78beaa +898, 0x80449ddc3eb9b977 +899, 0x5f4e1f900fb4bcf3 +900, 0xbe30f8701909f8e2 +901, 0xd1f2a2fb5503306d +902, 0x6b1c77238dc23803 +903, 0x102156a6c9860f66 +904, 0x4cd446e099edf4c1 +905, 0xc79ac6cbc911f33b +906, 0x3ee096ffe3384f1c +907, 0xb58f83b18a306dc7 +908, 0x9f76582141de56b2 +909, 0x9ddfa85e02c13866 +910, 0x4d9a19d4ce90a543 +911, 0xbf81ab39fd17d376 +912, 0x5327e5054c6a74f1 +913, 0xd5062dd31db1a9b7 +914, 0x645853735527edc +915, 0x485393967f91af08 +916, 0xeff9667dcf77ca68 +917, 0xd012313f5fbec464 +918, 0xbeae35bdfae55144 +919, 0x302c41ebac8444a0 +920, 0x9ccdb6c2fe58fba8 +921, 0x567753af68ed23f8 +922, 0xff90f790e43efec3 +923, 0x970cc756fb799696 +924, 0xe59239d1c44915 +925, 0x4d2d189fb3941f05 +926, 0x96f23085db165a9c +927, 0xa1202dec7a37b1a5 +928, 0xc0c1ee74bcd7dc1a +929, 0x9edcf2048b30333a +930, 0xd848588ba7e865fb +931, 0x8d9f0897317cab40 +932, 0x67b96f15e25924fb +933, 0xefc8d8536619ee42 +934, 0xf3f621d22bdde0c2 +935, 0x68610a0de862ae32 +936, 0xa22ca5142de24cbd +937, 0x8815452f4e6b4801 +938, 0x4e9c1b607b2750e5 +939, 0x19b3c09ba6fc9b25 +940, 0x9b2543c8836780ac +941, 0xe702b8f950e56431 +942, 0xb357cc329cac3917 +943, 0x387bf86a17a31e08 +944, 0x9940b983d331b163 +945, 0xf5d89d7fe9095e18 +946, 0x4362682329e5c4d1 +947, 0xd2132573f6ae7b42 +948, 0xc0a5849e23a61606 +949, 0xdadbddf47265bc02 +950, 0x1b96f00339a705f7 +951, 0x94e6642329288913 +952, 0x825ab3f10e6d330b +953, 0x1a1c31ac9d883ea0 +954, 0xb49076b7155c6f47 +955, 0x920cf3085dfe3ccb +956, 0x9743407c9f28e825 +957, 0x6ce8a28622402719 +958, 0xce2fe67e06baf8a6 +959, 0x3a16b34784ecf5e6 +960, 0x140467cc1d162a0c +961, 0x32d4772692ab625 +962, 0xa4f4b28562f43336 +963, 0x885b4335457bd84a +964, 0x499d3ed26c87ad8a +965, 0xc7328bcedb9a545e +966, 0xc6dd76a6cbf5d2b2 +967, 0xba9c22be404ee1aa +968, 0x70e6aee45f23521d +969, 0x61e03a798593c177 +970, 0x171671f809c68213 +971, 0x28d54872fc1d914c +972, 0x43c2fcd9bd098b53 +973, 0x172ad4c4a98b9d37 +974, 0x330860c9460f2516 +975, 0x49547f472df984f4 +976, 0x873b2436d3f0e114 +977, 0x6f99accf4ea050b6 +978, 0x5968ac874ed51613 +979, 0x4939d70d29a3c611 +980, 0x11f381ed28738d3d +981, 0xa97430d36ab3a869 +982, 0xe6fa880801129e22 +983, 0xf84decbd8f48c913 +984, 0x4425c0ed1e9a82a5 +985, 0x7a1f9485e9929d5a +986, 0xc7c51f155dfce1c6 +987, 0x9619a39501d74f2b +988, 0x7c7035955dbf4c1b +989, 0xc61ee569cf57c2c9 +990, 0x3eaf7c5b0df734e1 +991, 0xe71cb4064d1ede05 +992, 0x356e3cec80e418b2 +993, 0xca04306243a15be6 +994, 0x941cf3881fa18896 +995, 0x30dbb0e819d644e0 +996, 0xaae22c0bef02859a +997, 0x7bd30917bbaa8a94 +998, 0x2672547bc8d7d329 +999, 0x4955c92aaa231578 diff --git a/.venv/lib/python3.12/site-packages/numpy/random/tests/data/pcg64dxsm-testset-2.csv b/.venv/lib/python3.12/site-packages/numpy/random/tests/data/pcg64dxsm-testset-2.csv new file mode 100644 index 0000000..878c5ea --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/random/tests/data/pcg64dxsm-testset-2.csv @@ -0,0 +1,1001 @@ +seed, 0x0 +0, 0xd97e4a147f788a70 +1, 0x8dfa7bce56e3a253 +2, 0x13556ed9f53d3c10 +3, 0x55dbf1c241341e98 +4, 0xa2cd98f722eb0e0a +5, 0x83dfc407203ade8 +6, 0xeaa083df518f030d +7, 0x44968c87e432852b +8, 0x573107b9cb8d9ecc +9, 0x9eedd1da50b9daca +10, 0xb33a6735ca451e3c +11, 0x72830d2b39677262 +12, 0x9da8c512fd0207e8 +13, 0x1fc5c91954a2672b +14, 0xd33479437116e08 +15, 0x9ccdd9390cee46f3 +16, 0x1fd39bb01acd9e76 +17, 0xedc1869a42ff7fe5 +18, 0xbd68ca0b42a6e7e9 +19, 0x620b67df09621b1f +20, 0xfa11d51bd6950221 +21, 0xc8c45b36e7d28d08 +22, 0xe9c91272fbaad777 +23, 0x2dc87a143f220e90 +24, 0x6376a7c82361f49d +25, 0x552c5e434232fe75 +26, 0x468f7f872ac195bc +27, 0x32bed6858125cf89 +28, 0xe4f06111494d09d3 +29, 0xa5c166ffea248b80 +30, 0x4e26605b97064a3f +31, 0xceafd9f6fc5569d +32, 0xb772f2f9eed9e106 +33, 0x672c65e6a93534e2 +34, 0xcdc5e1a28d1bd6a0 +35, 0x1ed9c96daeebd3e3 +36, 0x4d189dcfc0c93c3f +37, 0x50df5a95c62f4b43 +38, 0xcccf4949fa65bbb8 +39, 0x19b8073d53cdc984 +40, 0x6fb40bba35483703 +41, 0xb02de4aef86b515a +42, 0x4d90c63655350310 +43, 0xea44e4089825b16c +44, 0x8d676958b1f9da2b +45, 0x6d313940917ae195 +46, 0x1b1d35a4c1dd19f4 +47, 0x117720f8397337ef +48, 0xcc073cf3ac11eeaa +49, 0x8331ec58a9ff8acb +50, 0xf3dc2a308b6b866f +51, 0x7eba1202663382b6 +52, 0x8269839debeb4e5a +53, 0x87fd3dc0f9181a8e +54, 0xabe62ddd3c925f03 +55, 0x7f56f146944fe8d4 +56, 0xc535972150852068 +57, 0x60b252d453bd3a68 +58, 0x4251f0134634490a +59, 0x338950da210dfeb2 +60, 0xcadfe932971c9471 +61, 0xfb7049457fab470e +62, 0x9bfb8145a4459dff +63, 0x4a89dda3898f9d8a +64, 0x88cc560151483929 +65, 0x277dc820f4b6796e +66, 0x3524bd07ea0afb88 +67, 0x92eb6ffb2bf14311 +68, 0xf6559be0783f3fe9 +69, 0xf0844f9af54af00d +70, 0xdd5e0b59adcef8a +71, 0x4ff7e4f2ab18554c +72, 0x3fa22c8a02634587 +73, 0x1db8e1a9442fe300 +74, 0x40cf15953ad3d3e7 +75, 0x92af15fe1a9f6f0a +76, 0xab4a0e466fb0cfd +77, 0x944f1555a06cca82 +78, 0x10cf48412f1f6066 +79, 0x7f51f9a455f9e8e1 +80, 0x47ee93530f024c7e +81, 0x36cf2f0413e0f6f2 +82, 0xa315e23731969407 +83, 0xd8e2796327cf5f87 +84, 0xa86072696a555c34 +85, 0xee3f0b8804feaab7 +86, 0x41e80dc858f8360b +87, 0x31ec2e9b78f5b29 +88, 0xd397fb9b8561344c +89, 0x28081e724e649b74 +90, 0x5c135fc3fc672348 +91, 0x9a276ca70ce9caa0 +92, 0x9216da059229050a +93, 0xcf7d375ed68007b0 +94, 0xa68ad1963724a770 +95, 0xd4350de8d3b6787c +96, 0xee7d2c2cc275b6d2 +97, 0x71645ec738749735 +98, 0x45abdf8c68d33dbb +99, 0xe71cadb692c705ea +100, 0x60af6f061fd90622 +101, 0x1eabe2072632c99d +102, 0x947dda995a402cb6 +103, 0xbb19f49a3454f3b +104, 0xe6e43e907407758c +105, 0xfe2b67016bd6873a +106, 0x7fdb4dd8ab30a722 +107, 0x39d3265b0ff1a45b +108, 0xed24c0e4fce8d0c2 +109, 0xf6e074f86faf669d +110, 0x9142040df8dc2a79 +111, 0x9682ab16bc939a9c +112, 0x6a4e80c378d971c8 +113, 0x31309c2c7fc2d3d6 +114, 0xb7237ec682993339 +115, 0x6a30c06bb83dccd9 +116, 0x21c8e9b6d8e7c382 +117, 0x258a24ae6f086a19 +118, 0xb76edb5be7df5c35 +119, 0x3c11d7d5c16e7175 +120, 0xbdfc34c31eff66e1 +121, 0x8af66e44be8bf3a2 +122, 0x3053292e193dec28 +123, 0xd0cc44545b454995 +124, 0x408ac01a9289d56 +125, 0x4e02d34318ec2e85 +126, 0x9413ff3777c6eb6b +127, 0xa3a301f8e37eb3df +128, 0x14e6306bd8d8f9f9 +129, 0xd3ea06ce16c4a653 +130, 0x170abe5429122982 +131, 0x7f9e6fddc6cacb85 +132, 0xa41b93e10a10a4c8 +133, 0x239216f9d5b6d0b5 +134, 0x985fcb6cb4190d98 +135, 0xb45e3e7c68f480c6 +136, 0xc1b2fc2e0446211c +137, 0x4596adb28858c498 +138, 0x2dd706f3458ddc75 +139, 0x29c988c86f75464 +140, 0xac33a65aa679a60 +141, 0xa28fef762d39d938 +142, 0x541e6fa48647f53 +143, 0x27838d56b2649735 +144, 0x8e143d318a796212 +145, 0xaea6097745f586b8 +146, 0x636143330f8ee2e6 +147, 0xc2d05fd8b945b172 +148, 0x6e355f9eb4353055 +149, 0xeb64ca42e8bf282e +150, 0xe8202dfd9da0fe5 +151, 0x7305689c9d790cba +152, 0xf122f8b1bef32970 +153, 0x9562887e38c32ba5 +154, 0xf9cd9be121b738d +155, 0x6238e0c398307913 +156, 0x5f2e79bb07c30f47 +157, 0x8ce8e45c465006e +158, 0x39281fe1e99e2441 +159, 0xafb10c2ca2874fea +160, 0x6e52f91633f83cf +161, 0x8ff12c1ac73c4494 +162, 0xe48608a09365af59 +163, 0xefd9bbc7e76e6a33 +164, 0xbe16a39d5c38ec92 +165, 0x6a6ffbcaf5a2330f +166, 0xdd5d6ac7d998d43d +167, 0x207bf978226d4f11 +168, 0xf8eec56bd2a0f62e +169, 0xa5bccf05dce0d975 +170, 0x93cf3ec1afe457a6 +171, 0x38651466d201f736 +172, 0x3ad21473985c9184 +173, 0xc6407a3bd38c92a6 +174, 0xb1ec42c7afa90a25 +175, 0xbdeca984df8b7dd3 +176, 0xb6926b1d00aa6c55 +177, 0x86141d0022352d49 +178, 0x169316256135ee09 +179, 0xffb1c7767af02a5c +180, 0x502af38ad19f5c91 +181, 0xfbf6cbc080086658 +182, 0x33cf9b219edae501 +183, 0x46e69bebd77b8862 +184, 0xf11e0cc91125d041 +185, 0xb4cd1649f85e078f +186, 0xb49be408db4e952 +187, 0xb0b8db46140cce3c +188, 0xba647f2174012be7 +189, 0x4f0a09e406970ac9 +190, 0xf868c7aec9890a5c +191, 0xde4c8fa7498ea090 +192, 0x872ceb197978c1d4 +193, 0x1eb5cd9c3269b258 +194, 0x3ea189f91724f014 +195, 0x41379656f7746f2c +196, 0x7bd18493aca60e51 +197, 0x5380c23b0cbbf15e +198, 0x920b72835f88246b +199, 0x24d7f734a4548b8e +200, 0x9944edb57e5aa145 +201, 0x4628e136ebb8afe1 +202, 0xb4ee6a776356e2a7 +203, 0x481cbe9744ccf7d7 +204, 0x7e8d67e8b0b995d9 +205, 0xeeacde100af7b47e +206, 0x103da08f2487dab7 +207, 0x6b9890a91d831459 +208, 0xd0c5beae37b572c7 +209, 0xfdccc371ee73fcc +210, 0x65438f0a367a2003 +211, 0x5d23b2c818a7e943 +212, 0x9a8ed45ac04b58b3 +213, 0xdaf3c3f1695dce10 +214, 0x5960eec706fa2bc0 +215, 0x98ca652facb80d40 +216, 0x72970ae5e2194143 +217, 0x18c6374d878c5c94 +218, 0x20fa51f997381900 +219, 0x3af253dba26d6e1d +220, 0x1b23d65db15c7f78 +221, 0x9f53ae976259b0e3 +222, 0x9a6addb28dc92d49 +223, 0x1e085c4accd0a7d7 +224, 0xe9d3f4cc9bad6ce5 +225, 0xe018fad78b5b1059 +226, 0x5ef7682232b4b95 +227, 0xb2242aa649f5de80 +228, 0x8f3e6d8dd99b9e4e +229, 0xb9be6cc22949d62a +230, 0xecbdc7beaa5ff1fe +231, 0xd388db43a855bdf0 +232, 0xd71ee3238852568d +233, 0x85ab3056304c04b5 +234, 0x2ed7ae7ad3cfc3cb +235, 0x781d1b03d40b6c48 +236, 0x7d3c740886657e6d +237, 0x982cfa6828daa6b0 +238, 0x278579599c529464 +239, 0x773adecfae9f0e08 +240, 0x63a243ea4b85c5d7 +241, 0x59940074fc3709e1 +242, 0xc914a2eed58a6363 +243, 0x2602b04274dd724c +244, 0xdf636eb7636c2c42 +245, 0x891a334d0d26c547 +246, 0xde8cd586d499e22d +247, 0x3ea1aa4d9b7035b6 +248, 0xd085cff6f9501523 +249, 0xe82a872f374959e +250, 0x55cb495bbd42cc53 +251, 0x5f42b3226e56ca97 +252, 0xea463f6f203493a3 +253, 0xeef3718e57731737 +254, 0x1bd4f9d62b7f9f3c +255, 0x19284f5e74817511 +256, 0xaf6e842c7450ca87 +257, 0x1d27d2b08a6b3600 +258, 0xfb4b912b396a52e3 +259, 0x30804d4c5c710121 +260, 0x4907e82564e36338 +261, 0x6441cf3b2900ddb7 +262, 0xd76de6f51988dc66 +263, 0x4f298ef96fd5e6d2 +264, 0x65432960c009f83d +265, 0x65ebed07e1d2e3df +266, 0xf83ee8078febca20 +267, 0x7bb18e9d74fc5b29 +268, 0x597b5fbc2261d91 +269, 0xea4f8ed0732b15b2 +270, 0xba2267f74f458268 +271, 0x3f304acabd746bbb +272, 0x7bd187af85659a82 +273, 0x88e20dbdb7a08ea3 +274, 0x2a2dc948c772fcb4 +275, 0x87784fec2993c867 +276, 0x89163933cd362d4e +277, 0xfd7b24f04302f957 +278, 0x9bdd544405dfb153 +279, 0xddee0fac58ffc611 +280, 0xa8e8993417e71ec1 +281, 0x55e0ab46ff7757af +282, 0x53e7645f08d3d7df +283, 0xbf78e563bc656ba2 +284, 0x1d162253b45ee2de +285, 0x15e2bfefedf29eb4 +286, 0x4e2a4584aa394702 +287, 0xa89fb12b01525897 +288, 0x825bd98f0544e4df +289, 0xfc6c50da6750700 +290, 0xc24aaabde7d28423 +291, 0x79d6f4660fcb19e5 +292, 0xee7d4fb40c8d659f +293, 0x70bc281b462e811d +294, 0x23ed4dc9636519a7 +295, 0xcb7c3f5a5711b935 +296, 0xe73090e0508c5d9d +297, 0xb25a331f375952a6 +298, 0xa64c86e0c04740f6 +299, 0xb8f3ffc8d56ac124 +300, 0x2479266fc5ee6b15 +301, 0x8d5792d27f5ffbcb +302, 0xb064298be946cd52 +303, 0xf0934a98912ffe26 +304, 0xbe805682c6634d98 +305, 0xe0e6e2c010012b4f +306, 0x58c47d475f75976 +307, 0x358c9a6e646b2b4a +308, 0x7e7c4ffca5b17ba7 +309, 0x43585c8c9a24a04c +310, 0x5154ddbcd68d5c2c +311, 0x4a2b062d3742a5e +312, 0xca5691191da2b946 +313, 0x696a542109457466 +314, 0x9eb5d658a5022ba5 +315, 0x8158cf6b599ab8dc +316, 0x1b95391eaa4af4a6 +317, 0x9953e79bd0fc3107 +318, 0x8639690086748123 +319, 0x2d35781c287c6842 +320, 0x393ef0001cd7bc8f +321, 0xe3a61be8c5f2c22a +322, 0x5e4ff21b847cc29b +323, 0x4c9c9389a370eb84 +324, 0xd43a25a8fc3635fa +325, 0xf6790e4a85385508 +326, 0x37edf0c81cb95e1d +327, 0x52db00d6e6e79af8 +328, 0x3b202bceeb7f096 +329, 0x2a164a1c776136bb +330, 0x73e03ee3fd80fd1b +331, 0xd2c58c0746b8d858 +332, 0x2ed2cb0038153d22 +333, 0x98996d0fc8ceeacc +334, 0xa4ed0589936b37f +335, 0x5f61cf41a6d2c172 +336, 0xa6d4afb538c110d7 +337, 0xe85834541baadf1a +338, 0x4c8967107fd49212 +339, 0x49bafb762ab1a8c1 +340, 0x45d540e2a834bf17 +341, 0x1c0ec8b4ed671dac +342, 0x3d503ce2c83fe883 +343, 0x437bfffd95f42022 +344, 0xc82d1e3d5c2bc8d2 +345, 0x7a0a9cbfcb0d3f24 +346, 0xc0a4f00251b7a3be +347, 0xb5be24e74bb6a1c6 +348, 0xa3104b94b57545b1 +349, 0x86de7d0c4b97b361 +350, 0x879c1483f26538a6 +351, 0xd74c87557f6accfb +352, 0x2f9be40dbf0fe8a1 +353, 0x445a93398f608d89 +354, 0x7b3cb8a7211d7fdc +355, 0xe86cc51290d031e7 +356, 0x33ef3594052ad79f +357, 0xc61911d241dbb590 +358, 0x37cccb0c0e3de461 +359, 0xb75259124080b48b +360, 0xd81e8961beb4abe5 +361, 0xf4542deb84a754e +362, 0x6ea036d00385f02e +363, 0xa7b60b0ac3b88681 +364, 0x108a6c36ca30baf5 +365, 0x4a2adc5bbfe2bf07 +366, 0x4079501f892a5342 +367, 0x55e113963c5448f0 +368, 0x8019ff4903b37242 +369, 0x109c6dcdb7ec6618 +370, 0x1239ac50944da450 +371, 0xe1399c7f94c651c1 +372, 0x5a6bbbae388d365a +373, 0x4d72be57b8810929 +374, 0x3f067df24384e1fb +375, 0x4f8b9e0f7f6c7be +376, 0x202492c342a3b08 +377, 0x250753192af93a3 +378, 0xfba1159d9de2cb8e +379, 0xba964497ab05505c +380, 0x1329ec5d8a709dca +381, 0x32927cacb6cd22bb +382, 0x6b4d7db904187d56 +383, 0xe76adccf8e841e02 +384, 0x8c4bf4b6a788202 +385, 0x3013a3b409831651 +386, 0x7427d125c475412f +387, 0x84dcc4bb2bf43202 +388, 0x117526f1101372a5 +389, 0xfe95d64b8984bd72 +390, 0x524e129934cc55c1 +391, 0xc3db4b0418c36d30 +392, 0xe1cb2047e9c19f7a +393, 0xea43d6c8d8982795 +394, 0xe80ac8a37df89ed +395, 0xfecc2104329ed306 +396, 0xa5c38aac9c1d51ea +397, 0x3abe5d1c01e4fe17 +398, 0x717a805d97fcc7ac +399, 0x94441f8207a1fb78 +400, 0x22d7869c5f002607 +401, 0x349e899f28c3a1b9 +402, 0x5639950cdea92b75 +403, 0x7e08450497c375b +404, 0x94bf898b475d211d +405, 0x75c761a402375104 +406, 0x1930920ec9d2a1e7 +407, 0xb774ba1bc6f6e4e2 +408, 0xf715602412e5d900 +409, 0x87bb995f4a13f0ba +410, 0xa3c787868dfa9c8d +411, 0xa17fd42a5a4f0987 +412, 0x4a9f7d435242b86 +413, 0x240364aff88f8aef +414, 0xe7cd4cf4bf39f144 +415, 0xd030f313ca4c2692 +416, 0xc46696f4e03ec1e9 +417, 0x22c60f1ec21060b3 +418, 0x16c88058fd68986f +419, 0x69ca448e8e6bde3f +420, 0x3466c2cdec218abd +421, 0x837ac4d05e6b117d +422, 0x911210e154690191 +423, 0x9ece851d6fa358b7 +424, 0x42f79cb0c45e7897 +425, 0xbf7583babd7c499b +426, 0x2059fe8031c6e0b9 +427, 0xabbec8fc00f7e51d +428, 0x88809d86a3a256e1 +429, 0xd36056df829fdcb5 +430, 0x515632b6cb914c64 +431, 0xba76d06c2558874 +432, 0x632c54ca4214d253 +433, 0xadec487adf2cb215 +434, 0x521e663e1940513d +435, 0xb1b638b548806694 +436, 0xbe2d5bfbe57d2c72 +437, 0x8b89e7719db02f7 +438, 0x90ba5281c1d56e63 +439, 0x899e1b92fceea102 +440, 0xf90d918e15182fa6 +441, 0x94a489ce96c948c4 +442, 0xad34db453517fcd4 +443, 0xc5264eb2de15930f +444, 0x101b4e6603a21cee +445, 0xef9b6258d6e85fff +446, 0x6075c7d6c048bd7a +447, 0x6f03232c64e438aa +448, 0x18c983d7105ee469 +449, 0x3ffc23f5c1375879 +450, 0xbc1b4a00afb1f9f +451, 0x5afa6b2bb8c6b46e +452, 0xe7fce4af2f2c152a +453, 0x5b00ab5c4b3982c7 +454, 0x2d4b0c9c0eb4bd0c +455, 0x61d926270642f1f2 +456, 0x7219c485c23a2377 +457, 0x7e471c752fecd895 +458, 0x23c4d30a4d17ba1f +459, 0x65cb277fe565ca22 +460, 0xcbb56ed9c701363b +461, 0xfd04ab3a6eba8282 +462, 0x19c9e5c8bab38500 +463, 0xea4c15227676b65b +464, 0x20f3412606c8da6f +465, 0xb06782d3bf61a239 +466, 0xf96e02d5276a9a31 +467, 0x835d256b42aa52a6 +468, 0x25b09151747f39c1 +469, 0x64507386e1103eda +470, 0x51cbc05716ef88e4 +471, 0x998cd9b7989e81cc +472, 0x9d7115416bec28d1 +473, 0xc992ca39de97906b +474, 0xd571e6f7ca598214 +475, 0xafc7fb6ccd9abbf8 +476, 0x88ef456febff7bf4 +477, 0xdbe87ccc55b157d2 +478, 0xaab95e405f8a4f6d +479, 0xad586a385e74af4f +480, 0x23cd15225c8485aa +481, 0x370940bf47900ac7 +482, 0xefd6afda1a4b0ead +483, 0x9cb1a4c90993dd7a +484, 0xff7893e8b2f70b11 +485, 0xb09e1807c0638e8e +486, 0xb10915dcb4978f74 +487, 0x88212ab0051a85eb +488, 0x7af41b76e1ec793f +489, 0x2e5c486406d3fefd +490, 0xebe54eff67f513cc +491, 0xab6c90d0876a79b8 +492, 0x224df82f93fe9089 +493, 0xc51c1ce053dc9cd2 +494, 0x5ef35a4d8a633ee7 +495, 0x4aca033459c2585f +496, 0xd066932c6eefb23d +497, 0x5309768aab9a7591 +498, 0xa2a3e33823df37f9 +499, 0xcec77ff6a359ee9 +500, 0x784dc62d999d3483 +501, 0x84e789fb8acc985d +502, 0xd590237e86aa60f +503, 0x737e2ffe1c8ad600 +504, 0xc019c3a39a99eab8 +505, 0x6a39e9836964c516 +506, 0xe0fe43129535d9da +507, 0xdfc5f603d639d4de +508, 0x7b9a7d048a9c03b6 +509, 0xbb5aa520faa27fdd +510, 0x2a09b4200f398fa2 +511, 0x38cc88107904064e +512, 0xa9a90d0b2d92bb25 +513, 0x9419762f87e987e3 +514, 0x1a52c525153dedcd +515, 0xc26d9973dd65ae99 +516, 0x8e89bd9d0dc6e6a1 +517, 0x2f30868dc01bfb53 +518, 0x20f09d99b46501c4 +519, 0x78b468a563b8f1e9 +520, 0xcccf34b0b6c380c7 +521, 0xf554e7dc815297e6 +522, 0x332a585cfb4a50ef +523, 0xa9fb64a2b6da41d7 +524, 0xdcd2a5a337391ce0 +525, 0x8a9bd3e324c6463d +526, 0x9f4487d725503bdd +527, 0xf72282d82f1d0ff +528, 0x308f4160abb72d42 +529, 0x648de1db3a601b08 +530, 0x36cab5192e7ebd39 +531, 0x7975fbe4ab6a1c66 +532, 0xd515b4d72243864e +533, 0x43a568f8b915e895 +534, 0x15fa9f2057bdb91d +535, 0x7a43858ef7a222dc +536, 0x17b4a9175ac074fe +537, 0xa932c833b8d0f8f8 +538, 0x1d2db93a9a587678 +539, 0x98abd1d146124d27 +540, 0xf0ab0431671740aa +541, 0xa9d182467540ad33 +542, 0x41c8a6cfc331b7fc +543, 0xa52c6bd0fcd1d228 +544, 0x2773c29a34dc6fa3 +545, 0x3098230746fc1f37 +546, 0xd63311bb4f23fabe +547, 0x6712bf530cd2faec +548, 0x342e8f342e42c4dd +549, 0xfbd83331851cdcad +550, 0xe903be1361bbc34d +551, 0xd94372e5077e3ef9 +552, 0x95aaa234f194bd8 +553, 0x20c0c8fb11e27538 +554, 0xfaf47dc90462b30b +555, 0x8ddc6d144147682a +556, 0xf626833fd926af55 +557, 0x5df93c34290d1793 +558, 0xb06a903e6e9fca5e +559, 0x10c792dc851d77ca +560, 0xd9b1b817b18e56cb +561, 0x3a81730c408eb408 +562, 0x65052c04a8d4b63c +563, 0x3328546598e33742 +564, 0xeca44a13f62d156d +565, 0x69f83d1d86b20170 +566, 0x937764200412027d +567, 0xc57eb1b58df0f191 +568, 0xa1c7d67dce81bc41 +569, 0x8e709c59a6a579ce +570, 0x776a2f5155d46c70 +571, 0xd92906fbbc373aa5 +572, 0xe97ad478a2a98bf6 +573, 0xc296c8819ac815f +574, 0x613ede67ba70e93e +575, 0xe145222498f99cde +576, 0xafcdfa7a3c1cf9bf +577, 0x1c89252176db670d +578, 0xad245eda5c0865ff +579, 0x249463d3053eb917 +580, 0xc9be16d337517c0b +581, 0xefcc82bf67b8f731 +582, 0x1e01577d029e0d00 +583, 0xad9c24b2a4f3d418 +584, 0xed2cceb510db4d0f +585, 0xbddadcdb92400c70 +586, 0x67d6b0476ef82186 +587, 0xbc7662ff7bf19f73 +588, 0x9d94452a729e6e92 +589, 0x6b278d8594f55428 +590, 0x6c4b31cceb1b2109 +591, 0xccc6c3a726701e9 +592, 0x6bc28ece07df8925 +593, 0xc0422b7bf150ccc4 +594, 0xab7158f044e73479 +595, 0xdf3347546d9ed83f +596, 0x3b3235a02c70dff4 +597, 0x2551c49c14ea8d77 +598, 0xee2f7f5bb3cc228e +599, 0x39b87bfe8c882d39 +600, 0x7dd420fad380b51c +601, 0xffe64976af093f96 +602, 0x4a4f48dc6e7eaa5f +603, 0x85f2514d32fdc8cc +604, 0x1ab1215fd7f94801 +605, 0x4cd1200fc795b774 +606, 0xcf8af463a38942ee +607, 0x319caa7ce3022721 +608, 0x8cd9798a76d1aea4 +609, 0x2bd3933ac7afd34e +610, 0x85d4c323403cf811 +611, 0xd7b956d3064efa30 +612, 0x67a078dbf1f13068 +613, 0x665fa6c83e87c290 +614, 0x9333ac2416d2469b +615, 0xdfb1fd21a0094977 +616, 0xa1962a6e2c25f8ff +617, 0x1f3b10a7ed5287cf +618, 0x70641efb3d362713 +619, 0xe527a2cf85d00918 +620, 0x9741e45d3f9890a3 +621, 0x6cb74b5d4d36db4b +622, 0xf24734d622bd2209 +623, 0xadd6d94f78e9d378 +624, 0xc3bbdb59225cca7f +625, 0x5ad36614275b30cd +626, 0x495568dd74eea434 +627, 0xf35de47e0ffe1f2d +628, 0xefa209dca719ab18 +629, 0x844ddcaeb5b99ae8 +630, 0x37449670a1dc7b19 +631, 0x5a4612c166f845c1 +632, 0xe70f7782f2087947 +633, 0x98d484deac365721 +634, 0x705302198cf52457 +635, 0x7135ae0f5b77df41 +636, 0x342ac6e44a9b6fc3 +637, 0x2713fd2a59af5826 +638, 0x6e1a3f90f84efa75 +639, 0x9fb3b4dd446ca040 +640, 0x530044ae91e6bd49 +641, 0xe984c4183974dc3e +642, 0x40c1fa961997d066 +643, 0xb7868250d8c21559 +644, 0x8bc929fa085fd1de +645, 0x7bdb63288dc8733e +646, 0xac4faad24326a468 +647, 0x1c6e799833aea0b1 +648, 0xcc8a749e94f20f36 +649, 0x4e7abfd0443547c5 +650, 0xb661c73bb8caa358 +651, 0x4a800f5728ff2351 +652, 0x8c15e15189b9f7ed +653, 0xab367846b811362c +654, 0x4ba7508f0851ca2a +655, 0xe9af891acbafc356 +656, 0xbdebe183989601f8 +657, 0x4c665ea496afc061 +658, 0x3ca1d14a5f2ed7c +659, 0xfbdff10a1027dd21 +660, 0xdfd28f77c8cff968 +661, 0xc4fbaadf8a3e9c77 +662, 0xdac7e448b218c589 +663, 0xb26390b5befd19e2 +664, 0xd2ef14916c66dba9 +665, 0xfab600284b0ff86b +666, 0xf04a1c229b58dabb +667, 0xc21c45637e452476 +668, 0xd1435966f75e0791 +669, 0xc1f28522eda4a2d0 +670, 0x52332ae8f1222185 +671, 0x81c6c0790c0bf47e +672, 0xfebd215e7d8ffb86 +673, 0x68c5dce55dbe962b +674, 0x231d09cb0d2531d1 +675, 0x3218fba199dbbc6b +676, 0x8f23c535f8ea0bf6 +677, 0x6c228963e1df8bd9 +678, 0x9843c7722ed153e3 +679, 0xd032d99e419bddec +680, 0xe2dca88aa7814cab +681, 0x4d53fb8c6a59cdc2 +682, 0x8fb3abc46157b68b +683, 0xa3e733087e09b8e +684, 0x6bdc1aee029d6b96 +685, 0x4089667a8906d65b +686, 0x8f3026a52d39dd03 +687, 0x6d2e0ccb567bae84 +688, 0x74bad450199e464 +689, 0xf114fb68a8f300d5 +690, 0xc7a5cc7b374c7d10 +691, 0xf0e93da639b279d1 +692, 0xb9943841ad493166 +693, 0x77a69290455a3664 +694, 0x41530da2ebea054b +695, 0xe8f9fab03ea24abf +696, 0xaa931f0c9f55a57a +697, 0xb4d68a75d56f97ae +698, 0x3d58ff898b6ba297 +699, 0x49d81e08faf5a3f5 +700, 0xfc5207b9f3697f3b +701, 0xa25911abb3cf19b7 +702, 0x6b8908eb67c3a41 +703, 0xd63ef402e2e3fa33 +704, 0x728e75d3f33b14c5 +705, 0x248cb1b8bc6f379a +706, 0x3aa3d6d2b8c72996 +707, 0x49cc50bd2d3d2860 +708, 0xb4e1387647c72075 +709, 0x435a1630a4a81ed3 +710, 0xa5ea13005d2460cf +711, 0xc7a613df37d159ec +712, 0x95721ccc218b857e +713, 0xd4b70d8c86b124d3 +714, 0x2b82bcc4b612d494 +715, 0xaf13062885276050 +716, 0xcbd8fcf571a33d9c +717, 0x3f7f67ca1125fc15 +718, 0xddf4bb45aac81b4c +719, 0x23606da62de9c040 +720, 0xa3a172375666b636 +721, 0x292f87387a6c6c3c +722, 0xd1d10d00c5496fe1 +723, 0x86b0411ce8a25550 +724, 0x38e0487872e33976 +725, 0x363e49f88ddfd42c +726, 0x45bdf1e9f6b66b0a +727, 0x8a6fff3de394f9b5 +728, 0x8502158bb03f6209 +729, 0x22e24d16dba42907 +730, 0x3fe3ba427cc2b779 +731, 0x77144793f66b3d7e +732, 0xcf8912ccb29b8af9 +733, 0xdc856caff2abd670 +734, 0xe6d3ae0b0d9d4c8b +735, 0xb8f5d40e454c539f +736, 0x79ca953114fbc6b7 +737, 0x478d6f4bbfa38837 +738, 0x9babae1a3ffdc340 +739, 0x40edd56802bae613 +740, 0x97a56c2dcccf0641 +741, 0xafc250257f027f8e +742, 0x8da41ef1edf69125 +743, 0x6574b0280ff9d309 +744, 0x197c776151b8f820 +745, 0x6b03e077c9dac3b6 +746, 0x24a40ebbc5c341c5 +747, 0x50e585169a6a1c4b +748, 0x37783a5a6a3e4e02 +749, 0xb3de81ee6fbad647 +750, 0xf4f292f57ca4591e +751, 0x6214e9e7d44d30a +752, 0x5920190c56d21c12 +753, 0x9ac163419b5e0c9b +754, 0xfc2328761ae8ed93 +755, 0xc68f945b545508c6 +756, 0x687c49a17ce0a5e2 +757, 0x276d8f53d30d4ab4 +758, 0x8201804970343ce1 +759, 0x1b5d323cc2e7fb7e +760, 0x6f351ef04fd904b +761, 0x6c793a7d455d5198 +762, 0x46f5d108430ae91f +763, 0xac16a15b2a0cf77f +764, 0xa0d479d9e4122b9d +765, 0x3afd94604307f19 +766, 0x2573ed6d39d38dbf +767, 0xa58e14ba60b4294b +768, 0xe69c1aed5840d156 +769, 0x4cf6fda7f04855c2 +770, 0x2fb65a56ef5f22da +771, 0xf95819434d5dc220 +772, 0x29c65133623dafba +773, 0x8e997bd018467523 +774, 0xfd08ba9d498461a7 +775, 0xdd52243bc78a5592 +776, 0x39c30108f6db88b3 +777, 0x38af8e1894f259b9 +778, 0x97eedf3b4ae5f6de +779, 0x757825add80c5ece +780, 0xf0fdd90ac14edb14 +781, 0xbbb19d4cc8cac6d4 +782, 0x9a82234edfae05e3 +783, 0x704401c61d1edf1c +784, 0x8b0eb481fb3a1fb2 +785, 0xef6f36e7cc06c002 +786, 0x7a208b17e04b8cd7 +787, 0xf20e33d498838fe9 +788, 0xc2bdb22117058326 +789, 0x6ec31939eb4ca543 +790, 0x6f1654838f507a21 +791, 0xc65ab81a955d2b93 +792, 0x40b1420fdd9531b8 +793, 0xe31f221cab9f4f40 +794, 0x798cdd414c1deb7a +795, 0x9c84e9c7d41cd983 +796, 0x63d6b1ae3b60b7fa +797, 0xb42bfdd1a2f78ffa +798, 0x37e431eaccaaa8e9 +799, 0x7508142a0f73eac9 +800, 0x91662a023df5893a +801, 0x59782070e2fe3031 +802, 0xb2acd589a8ce7961 +803, 0xa224743fa877b292 +804, 0xaa5362aa27e6ed9e +805, 0xa394a4e520c0c1c7 +806, 0xe49b16d2018ffb6f +807, 0xb8074b9f2f1e762b +808, 0xcf5f86143d5c23a7 +809, 0xfd838785db987087 +810, 0x31b1889df389aff8 +811, 0x30aaca876a4383b +812, 0x1731bb71c4c38d4f +813, 0x9a83a65395e05458 +814, 0x99cd0c8d67c8f4fc +815, 0xfbd9fdc849b761a5 +816, 0x82c04834fc466889 +817, 0xdeef9d6e715e8c97 +818, 0x549c281c16da6078 +819, 0x2d70661254ad599d +820, 0x57995793a72acac +821, 0xf1727005116183ba +822, 0xa22bb38945285de3 +823, 0x4f2d687fe45131ff +824, 0x5666c87ddbbc981f +825, 0xbcb4b2d4e7a517d0 +826, 0x5e794dd2e20b785d +827, 0x449ad020149e093c +828, 0x7704ee0412d106f5 +829, 0x83cbdf257b072ac1 +830, 0xae5c4fc9f638b0da +831, 0x7b9e5a64e372ed47 +832, 0x7eddbbb22c2cdf57 +833, 0x3f19ebfa155b08e +834, 0x91d991154dfd7177 +835, 0x611ae74b952d387f +836, 0x3fdf7a335bda36ee +837, 0xdf182433fc7a7c05 +838, 0x62c78598d1f8db0a +839, 0xc3750c69d2c5c1f0 +840, 0xf1318024709efdee +841, 0xaa3fd360d224dc29 +842, 0x62af53b2f307c19 +843, 0xdf527683c58120c2 +844, 0x3281deecc496f93d +845, 0x4f704ad31527ef08 +846, 0x127a14a5e07cfdfc +847, 0x90d0b1f549255c92 +848, 0xbc3406b212c5e1fc +849, 0x4e89f39379dba91d +850, 0x1290ef43c4998e6e +851, 0xecfeb1a1cb1c6e1b +852, 0x2067e90403003bf1 +853, 0x38ae04be30bdbeba +854, 0x8a3537f298baedda +855, 0xd07f3b825cdb2936 +856, 0xea020b5aebae8b45 +857, 0xfcd614ab031132b0 +858, 0x5fb682a4ff2268f5 +859, 0xd1c4662ce65596f4 +860, 0x7026b8270dd0b8dc +861, 0x8101ec4b4beae45a +862, 0xa0e9dc87940610a6 +863, 0x83ec33679d83165b +864, 0x981847ca82e86d41 +865, 0xda84c188a304a0b7 +866, 0x3c37529c5a5bbbb8 +867, 0x34a8491ce3e19a5a +868, 0xd36ad716a2fa6cb8 +869, 0xfd1d1d6a5189a15c +870, 0x9716eb47851e8d8d +871, 0x7dfb13ea3b15c5aa +872, 0xbdf6e707f45113a5 +873, 0xb8118261b04bd097 +874, 0x6191f9895881bec6 +875, 0x7aac257ae11acf9b +876, 0x35a491e1537ff120 +877, 0xe078943432efa71c +878, 0xb3338485dd3dc2b9 +879, 0x456060975d2bb3b5 +880, 0xaddc4c451bdfc44c +881, 0x18bfa7beacf96430 +882, 0x8802ebcaf0f67498 +883, 0xad922a5a825bd780 +884, 0x9fb4587d748f4efa +885, 0xdb2a445136cd5e7 +886, 0xb98b3676ea8e96ac +887, 0xb02d8d244d784878 +888, 0xa1a8442b18860abb +889, 0x6a3029ba1361e5d1 +890, 0xf426d5fac161eb1 +891, 0xfa5ac2b87acecb23 +892, 0xaa659896e50535df +893, 0xf40dd7a3d3c5c8ed +894, 0x3f8367abecb705bc +895, 0x2d60e7525873358f +896, 0xc4a9d3948a0c3937 +897, 0x5ecc04fef6003909 +898, 0x7a865004918cba2 +899, 0x47ae110a678ec10b +900, 0xa0f02f629d91aa67 +901, 0x4848b99e7fac9347 +902, 0xaa858346d63b80ac +903, 0xeb5bf42ee161eeef +904, 0x4d35d723d3c6ba37 +905, 0xdf22ca6ca93b64a7 +906, 0x9d198520f97b25b1 +907, 0x3068415350778efe +908, 0xf3709f2e8793c2fe +909, 0xd1517bac8dd9f16f +910, 0xfb99bccaa15861dc +911, 0xa9ad607d796a2521 +912, 0x55d3793d36bd22e4 +913, 0xf99270d891ff7401 +914, 0x401750a5c4aa8238 +915, 0xd84b3003e6f28309 +916, 0x8a23798b5fa7c98b +917, 0xadd58bbc8f43e399 +918, 0xbd8c741ada62c6a8 +919, 0xbdc6937bc55b49fa +920, 0x4aefa82201b8502 +921, 0x17adf29a717b303 +922, 0xa6ed2197be168f6c +923, 0x1ba47543f4359a95 +924, 0xe34299949ac01ae9 +925, 0x711c76cffc9b62f3 +926, 0xbac259895508a4b7 +927, 0x3c8b3b3626b0d900 +928, 0x1a8d23fbe2ae71bf +929, 0xca984fa3b5a5c3a1 +930, 0xb1986ab7521a9c93 +931, 0xd6b5b2c8d47a75b5 +932, 0xc7f1c4a88afb4957 +933, 0xdeb58033a3acd6cc +934, 0xabe49ddfe1167e67 +935, 0x8d559c10205c06e3 +936, 0xea07a1a7de67a651 +937, 0xcbef60db15b6fef8 +938, 0xbfca142cff280e7 +939, 0x362693eba0732221 +940, 0x7463237e134db103 +941, 0x45574ddb5035e17a +942, 0xfc65e0cb9b94a1aa +943, 0x3154c55f1d86b36d +944, 0x2d93a96dd6ab2d8b +945, 0xbe3bc1d1f2542a25 +946, 0xdd4b541f7385bdaa +947, 0x3b56b919d914e3f8 +948, 0x82fd51468a21895f +949, 0x8988cf120731b916 +950, 0xa06a61db5fb93e32 +951, 0x6ed66c1b36f68623 +952, 0x875ae844d2f01c59 +953, 0x17ccd7ac912e5925 +954, 0x12fe2a66b8e40cb1 +955, 0xf843e5e3923ad791 +956, 0xa17560f2fd4ef48 +957, 0x27a2968191a8ee07 +958, 0xa9aab4d22ff44a3c +959, 0x63cd0dcc3bb083ae +960, 0x7a30b48c6160bf85 +961, 0x956160fb572503b3 +962, 0xc47f6b7546640257 +963, 0xaf4b625f7f49153 +964, 0x2f5c86a790e0c7e8 +965, 0xb52e0610ae07f0b8 +966, 0x38a589292c3d849e +967, 0xc3e9ef655d30b4ef +968, 0xb5695f765cda998a +969, 0xde5d5e692a028e91 +970, 0x839476721555f72e +971, 0x48b20679b17d9ebf +972, 0xe3d4c6b2c26fb0df +973, 0xce5a9834f0b4e71f +974, 0x533abb253d5d420e +975, 0x9eac5ad9aed34627 +976, 0xc0f2a01ab3c90dbb +977, 0x6528eda93f6a066c +978, 0xc16a1b625e467ade +979, 0x1a4a320fb5e8b098 +980, 0x8819cccd8b4ab32f +981, 0x42daa88531fd0bfd +982, 0xcf732226409be17c +983, 0xfddcdb25ccbf378c +984, 0x9b15b603bf589fc1 +985, 0x2436066b95d366fe +986, 0x8d42eff2e9cbda90 +987, 0x694b2fc8a4e8303c +988, 0x8e207f98aaea3ccd +989, 0x4730d7a620f822d9 +990, 0x468dc9ca30fe2fd4 +991, 0x74b36d8a1c0f031b +992, 0x3c1aac1c488c1a94 +993, 0x19d0101042444585 +994, 0x8ec50c56d0c8adf4 +995, 0x721ec629e4d66394 +996, 0x3ca5ad93abeac4a4 +997, 0xaaebc76e71592623 +998, 0x969cc319e3ed6058 +999, 0xc0a277e3b2bfc3de diff --git a/.venv/lib/python3.12/site-packages/numpy/random/tests/data/philox-testset-1.csv b/.venv/lib/python3.12/site-packages/numpy/random/tests/data/philox-testset-1.csv new file mode 100644 index 0000000..e448cbf --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/random/tests/data/philox-testset-1.csv @@ -0,0 +1,1001 @@ +seed, 0xdeadbeaf +0, 0xedc95200e2bd66a5 +1, 0x581d4e43b7682352 +2, 0x4be7278f5e373eab +3, 0xee47f17991a9e7ea +4, 0x38a7d2ae422f2e2c +5, 0xe2a6730a3b4a8a15 +6, 0x1588b7a841486442 +7, 0x13ad777246700504 +8, 0x14d157e0f5e18204 +9, 0xd87c22a7ee8c13f1 +10, 0x30cc389ce3542ba1 +11, 0xb8a53348955bb2e9 +12, 0xc08802e3c454f74f +13, 0xb444f627671a5780 +14, 0x4b6dd42b29cbf567 +15, 0x6109c7dc0bc5f7d5 +16, 0x85c954715d6b5b1e +17, 0x646178d3d9a3a5d5 +18, 0xebbde42b1cd83465 +19, 0x3d015102f6bc9c1a +20, 0x720fe2ec3798d5fd +21, 0x93120961289ceb2e +22, 0xc9207e960a56fae2 +23, 0xa7f042f31d991b98 +24, 0x5fac117415fae74b +25, 0xd0a970ba8dddc287 +26, 0x84b4e7e51b43106 +27, 0x6ad02bf525ea265f +28, 0xcdc7e5992b36ef8f +29, 0x44d4985209261d60 +30, 0x628c02d50f4b902e +31, 0xc7b1914922d1e76d +32, 0xfde99ff895cba51d +33, 0x175a0be050fa985f +34, 0x47297d3699e03228 +35, 0xccf1e9aeaa3339cd +36, 0x9fdd18ebeeaf15b1 +37, 0x7c94c9ab68747011 +38, 0x612d8ef22c1fa80f +39, 0x13f52b860de89ab5 +40, 0x81f264b8c139c43b +41, 0x8d017ba4ef1e85ba +42, 0x6d0556f46219951e +43, 0x8ee7b85663cf67b6 +44, 0x2432fc707645fe67 +45, 0xaf814046051e5941 +46, 0x4d432a83739ac76f +47, 0x59e5060d0983ccdd +48, 0xdd20e828b83d9b53 +49, 0x1b891800d7385f4c +50, 0x10e86a026c52ff5e +51, 0xb932f11723f7b90c +52, 0xb2413d0a1f3582d0 +53, 0xe7cd4edda65fc6b5 +54, 0x6d3808848d56593b +55, 0x192a727c3c7f47d9 +56, 0x9659d8aea5db8c16 +57, 0x4242c79fe2c77c16 +58, 0x605f90c913827cea +59, 0x53e153c8bfc2138a +60, 0xed2158fbdef5910e +61, 0xae9e6e29d4cb5060 +62, 0x7dd51afaad3b11ce +63, 0x2b9ba533d01a5453 +64, 0x7e0e9cf2b6c72c8 +65, 0x1cc8b3c7747ed147 +66, 0x9b102651e2e11b48 +67, 0x30b0b53cbaac33ea +68, 0x70c28aec39b99b85 +69, 0x5f1417ff536fdb75 +70, 0x3a1d91abd53acf58 +71, 0xba116a1772168259 +72, 0xf5369bc9bd284151 +73, 0x67bf11373bf183ca +74, 0xef0b2d44dbd33dc7 +75, 0xbfd567ee1a2953ed +76, 0x7d373f2579b5e5c6 +77, 0x756eeae7bcdd99be +78, 0x75f16eb9faa56f3b +79, 0x96d55ded2b54b9a5 +80, 0x94495191db692c24 +81, 0x32358bdd56bab38c +82, 0x3f6b64078576579 +83, 0x7177e7948bc064c9 +84, 0x2cbf23f09ba9bc91 +85, 0x9b97cc31c26645f5 +86, 0x5af2d239ff9028b1 +87, 0x316fa920e0332abe +88, 0x46535b7d1cae10a0 +89, 0x21f0a6869298022c +90, 0xf395c623b12deb14 +91, 0x8573995180675aa7 +92, 0xc3076509f4dc42d5 +93, 0x15e11e49760c6066 +94, 0xe8a6d311e67a021d +95, 0x7482f389c883339b +96, 0xda6f881573cba403 +97, 0xb110ffb847e42f07 +98, 0x2c3393140605ccf9 +99, 0xba1c8ba37d8bdc33 +100, 0x59adf43db7a86fe0 +101, 0xb4fcbf6aa585ca85 +102, 0xd794a93c18033fa6 +103, 0x6e839c01985f9d4 +104, 0x64065bf28222b2c7 +105, 0x6a6359b293fa0640 +106, 0x5ff610969e383e44 +107, 0xa8172c263f05c7f7 +108, 0x62a0172e8bd75d07 +109, 0x7be66e3c453b65ac +110, 0x6a3b8d5a14014292 +111, 0xa2583e6087450020 +112, 0xd5d3ecc480c627d2 +113, 0xa24e83f1eec8a27c +114, 0xa23febd2a99ee75a +115, 0x9a5fbf91c7310366 +116, 0x5b63156932e039b +117, 0x942af3c569908505 +118, 0x89a850f71ab6a912 +119, 0xfeadc803ac132fe9 +120, 0x67bf60e758250f3 +121, 0x533c25103466a697 +122, 0xb7deede3482f9769 +123, 0x325e043b53bba915 +124, 0x9e8d9e7fde132006 +125, 0x6bacc6860bbc436e +126, 0xb3ea0534c42b1c53 +127, 0xb2389334db583172 +128, 0xa74b1bfbf5242ee4 +129, 0x53a487e2dc51d15c +130, 0xe5a3b538d2c7a82e +131, 0x7b6c70bb0c4cadaf +132, 0xae20791b2081df1 +133, 0xc685c12e3c61d32c +134, 0x60110e6b0286e882 +135, 0x49682119c774045c +136, 0x53dc11a3bbd072e +137, 0xbdc87c6e732d9c2d +138, 0xcc4620861ebac8fd +139, 0x7e9c3558759350cc +140, 0x157408dee34891ba +141, 0x9bcad1855b80651b +142, 0xd81b29141d636908 +143, 0x1ed041a9f319c69d +144, 0x805b2f541208b490 +145, 0x484ef3bba2eb7c66 +146, 0xb6b5e37d50a99691 +147, 0xabc26a7d9e97e85f +148, 0xcba2a3cce0417c2f +149, 0xa030dfffd701993c +150, 0x2bf2dc50582ebf33 +151, 0xd9df13dd3eb9993e +152, 0x31ca28b757232ae5 +153, 0x614562a0ccf37263 +154, 0x44d635b01725afbb +155, 0x5ae230bc9ca9cd +156, 0xb23a124eb98705c6 +157, 0x6395675444981b11 +158, 0xd97314c34119f9ca +159, 0x9de61048327dd980 +160, 0x16bac6bded819707 +161, 0xcea3700e3e84b8c7 +162, 0xaa96955e2ee9c408 +163, 0x95361dcc93b5bc99 +164, 0x306921aed3713287 +165, 0x4df87f3130cd302a +166, 0x37c451daeb6a4af5 +167, 0x8dbbe35f911d5cc1 +168, 0x518157ce61cb10f9 +169, 0x669f577aebc7b35b +170, 0x4b0a5824a8786040 +171, 0x519bc3528de379f5 +172, 0x6128012516b54e02 +173, 0x98e4f165e5e6a6dd +174, 0x6404d03618a9b882 +175, 0x15b6aeb3d9cd8dc5 +176, 0x87ed2c1bae83c35b +177, 0x8377fc0252d41278 +178, 0x843f89d257a9ba02 +179, 0xcdda696ea95d0180 +180, 0xcfc4b23a50a89def +181, 0xf37fd270d5e29902 +182, 0xafe14418f76b7efa +183, 0xf984b81577076842 +184, 0xe8c60649ccb5458d +185, 0x3b7be8e50f8ff27b +186, 0xaa7506f25cef1464 +187, 0x5e513da59f106688 +188, 0x3c585e1f21a90d91 +189, 0x1df0e2075af292a +190, 0x29fdd36d4f72795f +191, 0xb162fe6c24cb4741 +192, 0x45073a8c02bd12c4 +193, 0xcbaaa395c2106f34 +194, 0x5db3c4c6011bc21c +195, 0x1b02aac4f752e377 +196, 0xa2dfb583eb7bec5 +197, 0xfe1d728805d34bb1 +198, 0xf647fb78bb4601ec +199, 0xd17be06f0d1f51ef +200, 0x39ec97c26e3d18a0 +201, 0xb7117c6037e142c8 +202, 0xe3a6ce6e6c71a028 +203, 0xe70a265e5db90bb2 +204, 0x24da4480530def1e +205, 0xfd82b28ce11d9a90 +206, 0x5bf61ead55074a1d +207, 0xbe9899c61dec480d +208, 0xae7d66d21e51ec9e +209, 0x384ee62c26a08419 +210, 0x6648dccb7c2f4abf +211, 0xc72aa0c2c708bdc9 +212, 0x205c5946b2b5ba71 +213, 0xd4d8d0b01890a812 +214, 0x56f185493625378d +215, 0x92f8072c81d39bd0 +216, 0xa60b3ceecb3e4979 +217, 0xfcf41d88b63b5896 +218, 0xf5a49aa845c14003 +219, 0xffcc7e99eee1e705 +220, 0xdd98312a7a43b32d +221, 0xa6339bd7730b004 +222, 0xdac7874ba7e30386 +223, 0xadf6f0b0d321c8 +224, 0x126a173ae4ffa39f +225, 0x5c854b137385c1e7 +226, 0x8173d471b1e69c00 +227, 0x23fa34de43581e27 +228, 0x343b373aef4507b1 +229, 0xa482d262b4ea919c +230, 0xf7fbef1b6f7fbba +231, 0xd8ce559487976613 +232, 0xbf3c8dd1e6ebc654 +233, 0xda41ed375451e988 +234, 0xf54906371fd4b9b3 +235, 0x5b6bb41231a04230 +236, 0x866d816482b29c17 +237, 0x11315b96941f27dc +238, 0xff95c79205c47d50 +239, 0x19c4fff96fbdac98 +240, 0xbfb1ae6e4131d0f4 +241, 0x9d20923f3cdb82c9 +242, 0x282175507c865dff +243, 0xdfd5e58a40fe29be +244, 0xedbd906ff40c8e4f +245, 0x11b04fc82614ccb3 +246, 0xeceb8afda76ae49f +247, 0xa4856913847c2cdf +248, 0x6f1425f15a627f2a +249, 0xdf144ffedf60349e +250, 0x392d7ecfd77cc65f +251, 0x72b8e2531049b2c6 +252, 0x5a7eb2bdb0ec9529 +253, 0xdcfd4306443e78c1 +254, 0x89ad67ed86cd7583 +255, 0x276b06c0779a6c8f +256, 0xb2dbb723196a0ac3 +257, 0x66c86a3b65906016 +258, 0x938348768a730b47 +259, 0x5f5282de938d1a96 +260, 0xa4d4588c4b473b1f +261, 0x8daed5962be4796f +262, 0x9dde8d796985a56e +263, 0x46be06dbd9ed9543 +264, 0xdf98286ceb9c5955 +265, 0xa1da1f52d7a7ca2b +266, 0x5a7f1449f24bbd62 +267, 0x3aedc4e324e525fd +268, 0xced62464cd0154e1 +269, 0x148fc035e7d88ce3 +270, 0x82f8878948f40d4c +271, 0x4c04d9cdd6135c17 +272, 0xdf046948d86b3b93 +273, 0x2f0dec84f403fe40 +274, 0xa61954fb71e63c0d +275, 0x616d8496f00382e8 +276, 0x162c622472746e27 +277, 0x43bcfe48731d2ceb +278, 0xff22432f9ff16d85 +279, 0xc033ed32bb0ad5a4 +280, 0x5d3717cc91c0ce09 +281, 0x7a39a4852d251075 +282, 0x61cd73d71d6e6a6 +283, 0xe37e2ea4783ab1a5 +284, 0x60e1882162579ea8 +285, 0x9258ec33f1a88e00 +286, 0x24b32acf029f0407 +287, 0x1410fc9aea6d3fac +288, 0x6054cf2a3c71d8f7 +289, 0x82f7605157a66183 +290, 0x3b34c1c0dff9eac5 +291, 0xfebe01b6d5c61819 +292, 0x7372187c68b777f2 +293, 0xc6923812cda479f0 +294, 0x386613be41b45156 +295, 0x92cfebe8cc4014b +296, 0x8e13c4595849828b +297, 0x90e47390d412291f +298, 0x6b21a1d93d285138 +299, 0xbf5b1f5922f04b12 +300, 0x21e65d1643b3cb69 +301, 0xf7683b131948ac3c +302, 0xe5d99fc926196ed2 +303, 0x7b138debbec90116 +304, 0x8a2650a75c2c2a5c +305, 0x20689a768f9b347b +306, 0xdfa2900cfb72dc6e +307, 0x98959c3855611cc2 +308, 0x5fdb71b89596cc7c +309, 0x1c14ac5c49568c7b +310, 0x958c4293016091fe +311, 0x7484522eb0087243 +312, 0xc4018dfb34fc190f +313, 0xca638567e9888860 +314, 0x102cd4805f0c0e89 +315, 0xcc3bc438e04548f8 +316, 0xb808944bb56ea5be +317, 0xffd4778dbf945c57 +318, 0xfe42617784c0233b +319, 0x3eccbfeae9b42d3c +320, 0xd9f1b585fd0bfa60 +321, 0x5c063d1b2705d5dd +322, 0x8e8bec3519941b64 +323, 0x9e94c36cbec2a42 +324, 0x1cd19f5b64ffd3ad +325, 0x9632e3aebfc68e66 +326, 0x98960c2d9da4ae45 +327, 0xb76994b1f2bbfc1f +328, 0xca184a737d3971cc +329, 0x964d31b07183adfb +330, 0xe9e0ff351cd276d4 +331, 0xb5747c860b05bbe4 +332, 0x5549ddc3bd3862e2 +333, 0x495496677b27873b +334, 0x53910baa26e3ea18 +335, 0xaa07a07ad0a688d3 +336, 0xbb43bd1f09ecdb1e +337, 0xe2ebc105699dd84 +338, 0x6e815a2729584035 +339, 0x2caab1713b17948a +340, 0x43d39d209fa41c90 +341, 0xfe3e71089d5d1c3a +342, 0xa778646c32f81177 +343, 0x8d42bfb86e6e92d5 +344, 0x175571f70b4fcfbe +345, 0x2a66a6fe10dc3b5b +346, 0xd9545e85235ca709 +347, 0x5642781c77ced48a +348, 0x24facc40b72ccd09 +349, 0xa800fbacce33f6f8 +350, 0x675f58a0ff19fba +351, 0x35aedf57bb5cde1b +352, 0xe5535a6b63f6d068 +353, 0x84dffd0102aaa85d +354, 0x621faad65467aaa7 +355, 0x596ad85b556b112f +356, 0x837545fff8894c7a +357, 0x3d9a4ae1356bc6a6 +358, 0xcd8b7153205d4ad0 +359, 0x98afdd40f1ed09a6 +360, 0xa38b2dc55a5cf87f +361, 0x484aecce2b6838bc +362, 0x6af05c26bdab18d9 +363, 0xf418b7399dcf2e4b +364, 0x1cfa38789b0d2445 +365, 0xfbed23c34166ee67 +366, 0x38e6820039e4912a +367, 0x1fe94911e963591e +368, 0x1291c79aee29ad70 +369, 0x65eccfc89506f963 +370, 0x7d14de3b2f55b1f6 +371, 0x82eb79c36cd2a739 +372, 0x41ffe3b75ea0def5 +373, 0x9eba9156470a51d9 +374, 0xd17c00b981db37d1 +375, 0xf688769a75601aa7 +376, 0xbcf738e9e03d571e +377, 0x14712e56df8f919b +378, 0xab14e227d156e310 +379, 0xf53d193e993e351e +380, 0x857fae46bd312141 +381, 0xc2dd71e41b639966 +382, 0x74f8b987a3d00ad1 +383, 0x5bce8526dc527981 +384, 0x94910926c172a379 +385, 0x503c45557688a9d5 +386, 0x244d03834e05807f +387, 0x6e014cbab9c7a31f +388, 0xae544c638530facf +389, 0x9b853aaaf9cbc22d +390, 0xfb42ab7024d060ed +391, 0x74cc3fba0dfd7ff2 +392, 0x24ec9e8f62144ad5 +393, 0x72f082954307bbe7 +394, 0x36feda21bbf67577 +395, 0x3222191611b832f1 +396, 0xd0584e81bcac8b0b +397, 0xdce8d793ef75e771 +398, 0x978824c6c2578fc +399, 0x6e8f77503b3c2ee4 +400, 0xc85d2d86fecf5d03 +401, 0x3d35b4a5d4d723c4 +402, 0xd3987dfd4727fff3 +403, 0xd3cde63fb6a31add +404, 0xf6699e86165bdaeb +405, 0x9d60ba158ec364c4 +406, 0x920c3c18b346bfc9 +407, 0x770fd1fdfbc236ca +408, 0x45998cfc5fc12ddd +409, 0xd74a3454e888834b +410, 0xbf2aa68081a4a28f +411, 0xea41b26a6f1da1b3 +412, 0x5560a2d24b9d5903 +413, 0xe3791f652a228d8b +414, 0x365116d3b5a8520c +415, 0xb1b2bd46528f8969 +416, 0xfcfe14943ef16ae7 +417, 0xf4d43425e8a535dc +418, 0xe6cf10a78782a7e0 +419, 0x9c7ac0de46556e3e +420, 0xc667ae0856eed9ef +421, 0x47dbb532e16f9c7e +422, 0xdf4785a5d89ee82e +423, 0xbd014925ce79dbcf +424, 0xea0d663fb58fa5be +425, 0x51af07d5cc3821fb +426, 0x27a1bdcdc4159a9d +427, 0x520c986c59b1e140 +428, 0x50b73fd9bacd5b39 +429, 0xae5240641f51e4f3 +430, 0x71faecc164ed9681 +431, 0xda95aa35529a7ee +432, 0xe25ba29b853c1c6d +433, 0x9871a925cda53735 +434, 0xde481ad8540e114d +435, 0xa2997f540e8abca0 +436, 0xc9683c5035e28185 +437, 0x1082471b57182bac +438, 0xbd3ecf0f0b788988 +439, 0xf479760776fbb342 +440, 0x3730929200d91f44 +441, 0xc1762d79ae72809c +442, 0xfaa0a4c7b1686cb3 +443, 0xd581e6d55afdafcd +444, 0x6cf57bdfba2dcf6d +445, 0xdef79d9fe6a5bcef +446, 0x13ed376e18132bd3 +447, 0xbe67efd72defa2a +448, 0x5acc176c468966ea +449, 0x8b35b626af139187 +450, 0x446de3fac0d973ac +451, 0xe1d49e06dc890317 +452, 0x817bc3fd21fc09b7 +453, 0xb71c3958a13d5579 +454, 0x8746e010f73d7148 +455, 0x1b61c06009922e83 +456, 0xba17e62e6b092316 +457, 0x1375fa23c4db8290 +458, 0x3f071230f51245a6 +459, 0x51c99a086a61cd13 +460, 0x5f0f2ae78589e1fd +461, 0x604834e114bbbc27 +462, 0x5eb2a7a34814e9a9 +463, 0x77a6907f386bf11e +464, 0x99525de2bd407eeb +465, 0xb818348c57b3b98f +466, 0x25f5f9e702fbe78d +467, 0x8f66669e6f884473 +468, 0x1e47d46e2af4f919 +469, 0xf6a19df846476833 +470, 0xff00c67bcd06621f +471, 0xe3dfe069795d72d8 +472, 0x8affc88b2fea4d73 +473, 0x66df747e5f827168 +474, 0xf368ec338d898a0e +475, 0x9e1f1a739c5984a2 +476, 0x46a1c90e1ca32cbc +477, 0xc261bc305ed8d762 +478, 0x754d7949f7da9e72 +479, 0x4c8fbbb14ef47b17 +480, 0xccbdc67a3848d80d +481, 0x3c25e6f58bae751d +482, 0x7078b163b936d9b6 +483, 0x440e27463c134ecf +484, 0x6c83ee39f324db0f +485, 0x27cf901b22aea535 +486, 0x57262dec79a3f366 +487, 0x91db09f1dbb524fb +488, 0xd7436eefba865df2 +489, 0x16c86b0a275a3f43 +490, 0x689493e6681deaa9 +491, 0x7e1dc536c1a9ac42 +492, 0x1145beac3ac7f5cc +493, 0x3d05e211a104b2b0 +494, 0x4f9e77ced3c52f44 +495, 0x53de1369354add72 +496, 0x1fb60f835f47cdeb +497, 0x6ab36f089e40c106 +498, 0xaabffcb0d3d04c7 +499, 0xaa399686d921bd25 +500, 0x2bf8dd8b6d6fa7f0 +501, 0x1ddbf4e124329613 +502, 0x466a740241466a72 +503, 0x98d7381eb68a761 +504, 0x817691510bc4857a +505, 0x8837622c0171fe33 +506, 0xcba078873179ee16 +507, 0x13adad1ab7b75af4 +508, 0x3bac3f502428840c +509, 0xbeb3cce138de9a91 +510, 0x30ef556e40b5f0b4 +511, 0x19c22abdf3bbb108 +512, 0x977e66ea4ddc7cf +513, 0x9f4a505f223d3bf3 +514, 0x6bc3f42ac79ec87b +515, 0x31e77712158d6c23 +516, 0x6d8de4295a28af0d +517, 0xee1807dbda72adb7 +518, 0xda54140179cd038f +519, 0x715aa5cdac38e062 +520, 0x5a7e55e99a22fa16 +521, 0xf190c36aa8edbe4f +522, 0xccadd93a82c1d044 +523, 0x7070e6d5012c3f15 +524, 0x50a83341a26c1ba5 +525, 0x11bca7cc634142e5 +526, 0x623a0d27867d8b04 +527, 0x75c18acff54fbf6e +528, 0x455ae7d933497a6f +529, 0xf624cf27d030c3d3 +530, 0x7a852716f8758bac +531, 0xe7a497ac1fa2b5b4 +532, 0xf84f097498f57562 +533, 0xc4bb392f87f65943 +534, 0x618e79a5d499fbfb +535, 0xb3c0b61d82b48b8 +536, 0x4750a10815c78ea7 +537, 0x9cf09cca3ddece69 +538, 0x2a69f1c94cc901a2 +539, 0x347a0e446e1ce86d +540, 0xb06f3a5a5ab37bb1 +541, 0x8035bd0713d591db +542, 0x539c9637042c3a1f +543, 0xd7ba4dc6b273cbd7 +544, 0x12f3f99933444f85 +545, 0x4a9517b9783fb9a4 +546, 0x6422b2ea95093bc5 +547, 0x3a5ecff0f996c2a6 +548, 0x31de504efc76a723 +549, 0x7ccb7c5233c21a9f +550, 0xc687d9e6ce4186e8 +551, 0x6e40769d6940376a +552, 0xf51207314f1f7528 +553, 0x67ee3acb190865e3 +554, 0xe08d586270588761 +555, 0xe387fa489af1a75c +556, 0x73414a52d29d8375 +557, 0x671a38191cf2a357 +558, 0xe00fb25b1aa54008 +559, 0x11a0610e22cf549b +560, 0xc90cc865d57c75be +561, 0x90d0863cc15f2b79 +562, 0x8b3e60d32ebcb856 +563, 0xb28cc55af621e04a +564, 0xcf60bd3cb2a5ab1d +565, 0x212cb5d421948f86 +566, 0xee297b96e0a3363f +567, 0x4e9392ff998760d1 +568, 0x61940c8d0105ba3e +569, 0x14ebcbae72a59a16 +570, 0xdf0f39a3d10c02af +571, 0xfc047b2b3c1c549d +572, 0x91718b5b98e3b286 +573, 0x9ea9539b1547d326 +574, 0x7a5a624a89a165e6 +575, 0x145b37dcaa8c4166 +576, 0x63814bbb90e5616c +577, 0xc4bc3ca6c38bb739 +578, 0x853c3a61ddc6626c +579, 0xa7ce8481c433829a +580, 0x8aff426941cc07b +581, 0x2dc3347ca68d8b95 +582, 0xce69f44f349e9917 +583, 0x2fa5cb8aca009b11 +584, 0xf26bb012115d9aca +585, 0xafa01c2f2d27235a +586, 0xabcba21f1b40305e +587, 0xfec20c896c0c1128 +588, 0xc5f7a71ebacadfa0 +589, 0xc8479ad14bab4eef +590, 0xad86ec9a3e7d3dc +591, 0xbbecd65292b915c5 +592, 0xb1f9e28149e67446 +593, 0x708d081c03dad352 +594, 0xaa8a84dbd1de916c +595, 0x9aa3efb29ba9480b +596, 0xd3c63969ff11443e +597, 0x1e9e9ac861315919 +598, 0x4fe227f91e66b41d +599, 0xefc0212d43d253ab +600, 0x98341437727c42d1 +601, 0x5ea85c0fe9008adc +602, 0x7891b15faa808613 +603, 0x32db2d63989aacfd +604, 0xc92f7f28e88fd7bc +605, 0x3513545eb6549475 +606, 0x49abe0082906fbf8 +607, 0xcee1e1a6551e729c +608, 0x38556672b592a28e +609, 0xc3e61409c4ec2d45 +610, 0x96c67ce2995a0fd4 +611, 0x9b9b0cada870293 +612, 0x82d6dd5dada48037 +613, 0xeea4f415299f1706 +614, 0x371107895f152ab3 +615, 0x2f6686159f4396bb +616, 0x61005a2ff3680089 +617, 0x9d2f2cafb595e6b6 +618, 0x4a812a920f011672 +619, 0x317554d3a77385d7 +620, 0x24c01086727eb74b +621, 0xa15ff76d618a3a9e +622, 0x2121bfd983859940 +623, 0x384d11577eea8114 +624, 0xab0f4299f3c44d88 +625, 0x136fd4b07cfa14d9 +626, 0x665fe45cbfaa972a +627, 0x76c5a23398a314e9 +628, 0x5507036357ccda98 +629, 0xd9b8c5ac9dce632b +630, 0x366bc71781da6e27 +631, 0xdd2b2ba1d6be6d15 +632, 0xf33ed0d50ea6f1a6 +633, 0xf05a9b1900174c18 +634, 0x3947e1419e2787cf +635, 0x6c742b1e029637d0 +636, 0x32aba12196a0d2e8 +637, 0x1b94aab2e82e7df +638, 0x68b617db19229d6 +639, 0x6c88a95ac0a33f98 +640, 0xdc9b95fd60c2d23e +641, 0x999e6971d3afc8b3 +642, 0x7071fc6ad8b60129 +643, 0x41a8184ef62485f6 +644, 0xb68e0605c7d5e713 +645, 0x272b961a1d1bbee +646, 0x23f04e76446187b0 +647, 0x999a7a8f6d33f260 +648, 0xdbd6318df4f168d +649, 0x8f5e74c84c40711e +650, 0x8ccc6b04393a19d6 +651, 0xadcd24b782dd8d3d +652, 0x1a966b4f80ef9499 +653, 0xcb6d4f9ff5a280f0 +654, 0x8095ff2b8484018a +655, 0xbfd3389611b8e771 +656, 0x278eb670b7d12d51 +657, 0x31df54ca8d65c20f +658, 0x121c7fb38af6985e +659, 0x84fb94f38fe1d0a +660, 0x15ae8af1a6d48f02 +661, 0x8d51e4a62cba1a28 +662, 0x58e6b6b3ae0f9e42 +663, 0x9365a0a85669cc99 +664, 0xe56e92f65a2106df +665, 0x68fa299c66b428fc +666, 0x55e51bb0b0a832c6 +667, 0x48b565293f9bc494 +668, 0x73d8132b1cbabb57 +669, 0x9178ac3926c36cbc +670, 0xe2f22c7b28ea5e0f +671, 0x6af45322a99afb12 +672, 0x59072fcb486a46f4 +673, 0x166b717b08d3d8e +674, 0xd4e627a2dfacc4ab +675, 0x33dad6f2921dedaa +676, 0x4b13b806834a6704 +677, 0xe5f7971b398ed54d +678, 0x20bfae65e3e6899b +679, 0x881dab45d2b4fc98 +680, 0x6f248126b5b885be +681, 0x7aeb39e986f9deee +682, 0xf819f9574b8c3a03 +683, 0xff3d93ed6bd9781a +684, 0x3a31e2e24a2f6385 +685, 0x7888a88f8944a5e +686, 0x4faee12f5de95537 +687, 0x7f3e4efccdb2ed67 +688, 0x91e0f2fc12593af5 +689, 0xb5be8a4b886a40d3 +690, 0x998e8288ac3a9b1b +691, 0x85c48fc8b1349e7b +692, 0xf03af25222d8fae5 +693, 0x45467e805b242c2e +694, 0xa2350db793dbebdc +695, 0xfebe5b61d2174553 +696, 0xa9a331f02c54ad0b +697, 0xe94e49a0f905aef3 +698, 0xe54b4c812b55e3da +699, 0xdc454114c6bc0278 +700, 0x99c7765ab476baa2 +701, 0xccd9590e47fdff7c +702, 0xfa2bcae7afd6cb71 +703, 0x2c1bf1a433a6f0f7 +704, 0x53882c62ff0aab28 +705, 0x80ac900f844dacc +706, 0x27ba8eb5c4a44d54 +707, 0x78f3dfb072a46004 +708, 0x34e00e6ec629edce +709, 0x5b88d19b552d1fbd +710, 0xe4df375dc79df432 +711, 0x37446312ff79c3b4 +712, 0xb72256900a95fa6d +713, 0x89f3171fbdff0bfc +714, 0xd37885b048687eba +715, 0xbb033213b283b60e +716, 0xcf10b523ee769030 +717, 0xbf8070b6cfd7bafb +718, 0xb7194da81fd1763b +719, 0xbfc303de88e68d24 +720, 0xb949c7a5aea8a072 +721, 0x844216e7bae90455 +722, 0xf1e7f20840049a33 +723, 0x96e3263ad0cae794 +724, 0x10772d51f6e9ba49 +725, 0xcea24fccae9d23b3 +726, 0xefd378add9dde040 +727, 0xba0c7c5275805976 +728, 0x2e2a04608f64fa8c +729, 0xafb42ec43aa0fa7 +730, 0x30444b84241ac465 +731, 0x19ef384bac4493ab +732, 0xfd1ac615d3ba5ab9 +733, 0x6cc781ba38643aff +734, 0x30ff27ebed875cfd +735, 0xee1a261aca97ae62 +736, 0xc5a92715202bc940 +737, 0x9e6ec76f93c657ff +738, 0x9b9fd55f55191ca5 +739, 0x654b13af008d8f03 +740, 0x1b7f030d9bd0719f +741, 0x6d622e277550cb7f +742, 0x3f8ee6b8830d0538 +743, 0x475462bcd0de190f +744, 0x21380e8a513bdbcd +745, 0x629bf3771b1bd7a4 +746, 0x3b5fd0b62c353709 +747, 0xf95634006ec3867e +748, 0x1be8bb584a6653c2 +749, 0x2e2d3cfa85320ce8 +750, 0x5b904b692252d11d +751, 0x4bfd76631d527990 +752, 0xc019571ca2bec4a0 +753, 0xf2eb730cea4cd751 +754, 0xd4571d709530191a +755, 0x3b5bd947061f5a7d +756, 0x56e2322cd2d1d1c0 +757, 0xa8830a5f62019f83 +758, 0x901d130c1b873cf3 +759, 0xb5dd29b363c61299 +760, 0xbb710bec3a17b26d +761, 0xc0c464daca0f2328 +762, 0x4dc8055df02650f5 +763, 0x3d3cd9bbe8b957af +764, 0xdb79612c2635b828 +765, 0xe25b3a8ad8fa3040 +766, 0xd5875c563cbf236b +767, 0x46861c1c3849c9bc +768, 0xf84bf1a2814dff43 +769, 0x6d8103902e0ad5e6 +770, 0x99f51c9be8af79e5 +771, 0xb0bfa8540ff94a96 +772, 0xaf45109a4e06f7d0 +773, 0x281df3e55aea9bfc +774, 0x6a1155ca8aa40e60 +775, 0x754d32c5de1f5da +776, 0xce1eafb1c6ca916f +777, 0xc4f2185fa8577bd1 +778, 0x4a188e9bdb5501d9 +779, 0xbb14107e99bd5550 +780, 0xf0381d8425ec2962 +781, 0x213dbfffc16ec4f6 +782, 0x7a999c5a28ea65bc +783, 0x23758c2aba7709ff +784, 0xea7e4bb205e93b44 +785, 0x9c5a31e53911c658 +786, 0x7f04d0bbdc689ddc +787, 0xe3ed89ab8d78dcb3 +788, 0x73c38bfb43986210 +789, 0x740c7d787eb8e158 +790, 0x5284fafdfb3fb9ec +791, 0x2e91a58ac1fb1409 +792, 0xb94a600bf0a09af3 +793, 0x533ea4dbe07d81dd +794, 0x48c3f1a736b3c5fd +795, 0x56ae3499fa8720ce +796, 0x526f2def663ca818 +797, 0x2f085759c65665c4 +798, 0xf715f042c69e0db4 +799, 0x110889c399231e60 +800, 0x64584a244866f3a0 +801, 0xf02ec101a39405d3 +802, 0xe73cd5e9a7f17283 +803, 0xfea64869e7028234 +804, 0x97559974ad877891 +805, 0xc8695aba1dc9f2e5 +806, 0x7b62b76ffc2264ec +807, 0xf5e1df172ec5ccd +808, 0xafaeb68765e443bd +809, 0xd3870eb2e8337623 +810, 0x4f944d684138fb39 +811, 0x6977c575038916ad +812, 0x8ada1a225df95a56 +813, 0xe4044c6c58d15e54 +814, 0x4e5121366681cf2 +815, 0xcf8640b079357b0d +816, 0xcd5b157d44106fa3 +817, 0x9d7a5481279e25a1 +818, 0xe10e9db41fb4b34f +819, 0x1052607be1eadff9 +820, 0x3403d67232fe2265 +821, 0xac9358f498c34afc +822, 0x820172da0dc39c9 +823, 0xe186e91a3b826b6a +824, 0x1a838e2a40284445 +825, 0x1870b617ebd7bce6 +826, 0xcb7cba4424be1ed7 +827, 0x6a2e56e40fdf9041 +828, 0xace93bbe108f97ee +829, 0xfeb9bc74ac41ca08 +830, 0x8cb2d05b0f6a1f51 +831, 0x73792309f3fac0a9 +832, 0x2507343d431308ca +833, 0xd0ea1197be615412 +834, 0xb1870812f1d2fa94 +835, 0x6d067b6935dcd23e +836, 0xaf161014e5492c31 +837, 0xd4be0dce97064be4 +838, 0xf8edfe3fc75c20f1 +839, 0x894751dc442d2d9c +840, 0xb4a95f6a6663456c +841, 0x74e93162e2d805db +842, 0x784bc5f3a7a2f645 +843, 0xd234d7c5b0582ea9 +844, 0x491f28d0ab6cb97c +845, 0xa79419e5cf4336c3 +846, 0x66b00141978c849 +847, 0xa7ddbd64698d563f +848, 0xefc33a4a5d97d4b2 +849, 0x95075514a65aebdc +850, 0x40eca5b3e28cd25e +851, 0x90ec7d00e9c9e35d +852, 0x63e84104d5af417a +853, 0xdaca0ea32df5744 +854, 0x7ed54f2587795881 +855, 0x5a73931760af4ee0 +856, 0x857d1a185a3081ec +857, 0x6eac2aabe67fb463 +858, 0xd1f86155d8bfc55f +859, 0x6d56398f3e7877ef +860, 0x7642f61dfc62bc17 +861, 0x1d76b12843246ffa +862, 0xde7817809b8a31d0 +863, 0xbcca9cd091198f9d +864, 0xf71ca566dddcdfd4 +865, 0xea4386ee8b61d082 +866, 0xe351729d6010bac4 +867, 0xfd685d8a49910dd6 +868, 0xa7a20ea6c686bd3 +869, 0x1cdaf82f4dbd5536 +870, 0xa3da1d1e77dda3e0 +871, 0x4f723b3818ff8b2a +872, 0x1290669eca152469 +873, 0xb54158b52d30651b +874, 0xc06b74f2c7f0fee +875, 0x7d5840bcbf702379 +876, 0x19fa4c1254a82ed +877, 0xcf5ce090ad0b38ea +878, 0xd4edd6ac9437e16d +879, 0xc6ebf25eb623b426 +880, 0xd2b6dbdf00d8fea2 +881, 0x949cf98391cc59e1 +882, 0x380a0c7d0356f7b3 +883, 0x8ffefe32465473bf +884, 0x637b6542d27c861e +885, 0x347d12ffc664ecd9 +886, 0xea66e3a0c75a6b37 +887, 0xc3aff6f34fb537a1 +888, 0x67bdf3579959bf49 +889, 0xa17a348e3a74b723 +890, 0x93c9ef26ddadd569 +891, 0x483909059a5ac0b2 +892, 0x26ec9074b56d5a0d +893, 0x6216000d9a48403a +894, 0x79b43909eab1ec05 +895, 0xe4a8e8d03649e0de +896, 0x1435d666f3ccdc08 +897, 0xb9e22ba902650a0e +898, 0x44dffcccc68b41f8 +899, 0x23e60dcc7a559a17 +900, 0x6fd1735eacd81266 +901, 0xf6bda0745ea20c8e +902, 0x85efcaefe271e07c +903, 0x9be996ee931cef42 +904, 0xe78b41c158611d64 +905, 0xd6201df605839830 +906, 0x702e8e47d2769fd3 +907, 0xb8dcf70e18cf14c +908, 0xac2690bab1bf5c17 +909, 0x92b166b71205d696 +910, 0xb0e73c795fc6df28 +911, 0x4bf2322c8b6b6f0d +912, 0xa842fbe67918cea0 +913, 0xb01a8675d9294e54 +914, 0xfbe3c94f03ca5af2 +915, 0x51a5c089600c441f +916, 0x60f0fd7512d85ded +917, 0xef3113d3bc2cadb0 +918, 0xe1ea128ade300d60 +919, 0xde413b7f8d92d746 +920, 0xfc32c6d43f47c5d8 +921, 0x69d551d8c2b54c68 +922, 0xb9bc68c175777943 +923, 0xb9c79c687f0dae90 +924, 0xd799421ef883c06e +925, 0xbff553ca95a29a3e +926, 0xfc9ffac46bd0aca1 +927, 0x4f6c3a30c80c3e5a +928, 0x8b7245bc6dc4a0a +929, 0xaf4e191a4575ff60 +930, 0x41218c4a76b90f0b +931, 0x986052aa51b8e89b +932, 0x284b464ed5622f9 +933, 0xba6bded912626b40 +934, 0x43cad3ed7443cb5c +935, 0x21641fa95725f328 +936, 0x6d99d6d09d755822 +937, 0x8246dfa2d4838492 +938, 0xd2ee70b9056f4726 +939, 0x87db515a786fbb8b +940, 0x7c63e4c1d7786e7d +941, 0xd1a9d548f10b3e88 +942, 0xa00856475f3b74c9 +943, 0x7f1964ce67148bf4 +944, 0x446650ec71e6018c +945, 0xb1805ca07d1b6345 +946, 0x869c0a1625b7271b +947, 0x79d6da06ce2ecfe2 +948, 0xec7b3cafc5e3c85f +949, 0x1745ce21e39f2c3d +950, 0xd9a0a7af6ee97825 +951, 0x680e0e52a6e11d5c +952, 0xd86b3f344ff7f4cd +953, 0xab56af117c840b9c +954, 0x5c5404c7e333a10e +955, 0x4f1eb462f35d990d +956, 0xf857605a5644458e +957, 0x3bb87cdf09262f86 +958, 0xd57295baf6da64b +959, 0xb5993f48472f2894 +960, 0x7d1a501608c060b2 +961, 0x45fabe2d0e54adf0 +962, 0xbb41c3806afb4efe +963, 0xbfbc506049424c8 +964, 0xb7dd6b67f2203344 +965, 0x389ce52eff883b81 +966, 0xe259c55c0cf6d000 +967, 0x70fb3e3824f7d213 +968, 0x9f36d5599ed55f4b +969, 0xd14cf6f12f83c4f7 +970, 0x570a09d56aaa0b66 +971, 0x8accafd527f4598 +972, 0xa42d64c62175adfd +973, 0xddb9c6a87b6e1558 +974, 0xd80b6c69fa1cde2a +975, 0x44ebaac10082207b +976, 0xf99be8889552fa1a +977, 0x38253cd4b38b5dc5 +978, 0x85356c8b02675791 +979, 0xbf91677b2ecdcf55 +980, 0x2316cb85e93f366e +981, 0x9abf35954db6b053 +982, 0xf49f7425e086b45a +983, 0x8f5b625e074afde2 +984, 0xe0d614559791b080 +985, 0xbf7b866afab2a525 +986, 0xde89d7e1641a6412 +987, 0x1d10687d8ae5b86f +988, 0x1f034caa0e904cbd +989, 0x2086357aec8a7a2c +990, 0x22dc476b80c56e1e +991, 0xbef5a73cc0e3a493 +992, 0xddfa3829b26ed797 +993, 0x8917a87ec3d4dc78 +994, 0xfeabe390628c365e +995, 0x581b0c4f6fb2d642 +996, 0x1ef8c590adbf5b9a +997, 0x4d8e13aac0cce879 +998, 0xfe38f71e5977fad0 +999, 0x1f83a32d4adfd2ed diff --git a/.venv/lib/python3.12/site-packages/numpy/random/tests/data/philox-testset-2.csv b/.venv/lib/python3.12/site-packages/numpy/random/tests/data/philox-testset-2.csv new file mode 100644 index 0000000..69d24c3 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/random/tests/data/philox-testset-2.csv @@ -0,0 +1,1001 @@ +seed, 0x0 +0, 0x399e5b222b82fa9 +1, 0x41fd08c1f00f3bc5 +2, 0x78b8824162ee4d04 +3, 0x176747919e02739d +4, 0xfaa88f002a8d3596 +5, 0x418eb6f592e6c227 +6, 0xef83020b8344dd45 +7, 0x30a74a1a6eaa064b +8, 0x93d43bf97a490c3 +9, 0xe4ba28b442194cc +10, 0xc829083a168a8656 +11, 0x73f45d50f8e22849 +12, 0xf912db57352824cc +13, 0xf524216927b12ada +14, 0x22b7697473b1dfda +15, 0x311e2a936414b39f +16, 0xb905abfdcc425be6 +17, 0x4b14630d031eac9c +18, 0x1cf0c4ae01222bc8 +19, 0xa6c33efc6e82ef3 +20, 0x43b3576937ba0948 +21, 0x1e483d17cdde108a +22, 0x6722784cac11ac88 +23, 0xee87569a48fc45d7 +24, 0xb821dcbe74d18661 +25, 0xa5d1876ef3da1a81 +26, 0xe4121c2af72a483 +27, 0x2d747e355a52cf43 +28, 0x609059957bd03725 +29, 0xc3327244b49e16c5 +30, 0xb5ae6cb000dde769 +31, 0x774315003209017 +32, 0xa2013397ba8db605 +33, 0x73b228945dbcd957 +34, 0x801af7190375d3c0 +35, 0xae6dca29f24c9c67 +36, 0xd1cc0bcb1ca26249 +37, 0x1defa62a5bd853be +38, 0x67c2f5557fa89462 +39, 0xf1729b58122fab02 +40, 0xb67eb71949ec6c42 +41, 0x5456366ec1f8f7d7 +42, 0x44492b32eb7966f5 +43, 0xa801804159f175f1 +44, 0x5a416f23cac70d84 +45, 0x186f55293302303d +46, 0x7339d5d7b6a43639 +47, 0xfc6df38d6a566121 +48, 0xed2fe018f150b39e +49, 0x508e0b04a781fa1b +50, 0x8bee9d50f32eaf50 +51, 0x9870015d37e63cc +52, 0x93c6b12309c14f2d +53, 0xb571cf798abe93ff +54, 0x85c35a297a88ae6e +55, 0x9b1b79afe497a2ae +56, 0x1ca02e5b95d96b8d +57, 0x5bb695a666c0a94a +58, 0x4e3caf9bbab0b208 +59, 0x44a44be1a89f2dc1 +60, 0x4ff37c33445758d1 +61, 0xd0e02875322f35da +62, 0xfd449a91fb92646b +63, 0xbe0b49096b95db4d +64, 0xffa3647cad13ef5d +65, 0x75c127a61acd10c8 +66, 0xd65f697756f5f98e +67, 0x3ced84be93d94434 +68, 0x4da3095c2fc46d68 +69, 0x67564e2a771ee9ac +70, 0x36944775180644a9 +71, 0xf458db1c177cdb60 +72, 0x5b58406dcd034c8 +73, 0x793301a3fdab2a73 +74, 0x1c2a1a16d6db6128 +75, 0xc2dacd4ddddbe56c +76, 0x2e7d15be2301a111 +77, 0xd4f4a6341b3bcd18 +78, 0x3622996bbe6a9e3b +79, 0xaf29aa9a7d6d47da +80, 0x6d7dbb74a4cd68ae +81, 0xc260a17e0f39f841 +82, 0xdee0170f2af66f0d +83, 0xf84ae780d7b5a06e +84, 0x8326247b73f43c3a +85, 0xd44eef44b4f98b84 +86, 0x3d10aee62ec895e3 +87, 0x4f23fef01bf703b3 +88, 0xf8e50aa57d888df6 +89, 0x7da67411e3bef261 +90, 0x1d00f2769b2f96d7 +91, 0x7ef9a15b7444b84e +92, 0xcfa16436cc2b7e21 +93, 0x29ab8cfac00460ff +94, 0x23613de8608b0e70 +95, 0xb1aa0980625798a8 +96, 0xb9256fd29db7df99 +97, 0xdacf311bf3e7fa18 +98, 0xa013c8f9fada20d8 +99, 0xaf5fd4fe8230fe3e +100, 0xd3d59ca55102bc5c +101, 0x9d08e2aa5242767f +102, 0x40278fe131e83b53 +103, 0x56397d03c7c14c98 +104, 0xe874b77b119359b3 +105, 0x926a1ba4304ab19f +106, 0x1e115d5aa695a91d +107, 0xc6a459df441f2fe3 +108, 0x2ca842bc1b0b3c6a +109, 0x24c804cf8e5eed16 +110, 0x7ca00fc4a4c3ebd3 +111, 0x546af7cecc4a4ba6 +112, 0x8faae1fa18fd6e3 +113, 0x40420b0089641a6a +114, 0x88175a35d9abcb83 +115, 0xf7d746d1b8b1357c +116, 0x7dae771a651be970 +117, 0x2f6485247ee4df84 +118, 0x6883702fab2d8ec5 +119, 0xeb7eea829a67f9a6 +120, 0x60d5880b485562ed +121, 0x7d4ca3d7e41a4e7e +122, 0xbb7fef961ab8de18 +123, 0x3b92452fb810c164 +124, 0x5f4b4755348b338 +125, 0xca45a715a7539806 +126, 0xc33efd9da5399dd +127, 0x593d665a51d4aedd +128, 0x75d6b8636563036b +129, 0x7b57caa55e262082 +130, 0x4ede7427969e0dd5 +131, 0xc3f19b6f78ea00b +132, 0xeea7bab9be2181ea +133, 0x652c45fe9c420c04 +134, 0x14ba9e3d175670ee +135, 0xd2ad156ba6490474 +136, 0x4d65ae41065f614 +137, 0x6ff911c8afa28eb1 +138, 0xedc2b33588f3cb68 +139, 0x437c8bc324666a2f +140, 0x828cee25457a3f0 +141, 0x530c986091f31b9b +142, 0x2f34671e8326ade7 +143, 0x4f686a8f4d77f6da +144, 0xa4c1987083498895 +145, 0xbce5a88b672b0fb1 +146, 0x8476115a9e6a00cc +147, 0x16de18a55dd2c238 +148, 0xdf38cf4c416232bc +149, 0x2cb837924e7559f3 +150, 0xfad4727484e982ed +151, 0x32a55d4b7801e4f +152, 0x8b9ef96804bd10a5 +153, 0xa1fd422c9b5cf2a9 +154, 0xf46ddb122eb7e442 +155, 0x6e3842547afa3b33 +156, 0x863dee1c34afe5c4 +157, 0x6a43a1935b6db171 +158, 0x1060a5c2f8145821 +159, 0xf783ec9ed34c4607 +160, 0x1da4a86bf5f8c0b0 +161, 0x4c7714041ba12af8 +162, 0x580da7010be2f192 +163, 0xad682fe795a7ea7a +164, 0x6687b6cb88a9ed2c +165, 0x3c8d4b175517cd18 +166, 0xe9247c3a524a6b6b +167, 0x337ca9cfaa02658 +168, 0xed95399481c6feec +169, 0x58726a088e606062 +170, 0xfe7588a5b4ee342a +171, 0xee434c7ed146fdee +172, 0xe2ade8b60fdc4ba5 +173, 0xd57e4c155de4eaab +174, 0xdefeae12de1137cb +175, 0xb7a276a241316ac1 +176, 0xeb838b1b1df4ca15 +177, 0x6f78965edea32f6f +178, 0x18bebd264d7a5d53 +179, 0x3641c691d77005ec +180, 0xbe70ed7efea8c24c +181, 0x33047fa8d03ca560 +182, 0x3bed0d2221ff0f87 +183, 0x23083a6ffbcf38a2 +184, 0xc23eb827073d3fa5 +185, 0xc873bb3415e9fb9b +186, 0xa4645179e54147fe +187, 0x2c72fb443f66e207 +188, 0x98084915dd89d8f4 +189, 0x88baa2de12c99037 +190, 0x85c74ab238cb795f +191, 0xe122186469ea3a26 +192, 0x4c3bba99b3249292 +193, 0x85d6845d9a015234 +194, 0x147ddd69c13e6a31 +195, 0x255f4d678c9a570b +196, 0x2d7c0c410bf962b4 +197, 0x58eb7649e0aa16ca +198, 0x9d240bf662fe0783 +199, 0x5f74f6fa32d293cc +200, 0x4928e52f0f79d9b9 +201, 0xe61c2b87146b706d +202, 0xcfcd90d100cf5431 +203, 0xf15ea8138e6aa178 +204, 0x6ab8287024f9a819 +205, 0xed8942593db74e01 +206, 0xefc00e4ec2ae36dd +207, 0xc21429fb9387f334 +208, 0xf9a3389e285a9bce +209, 0xacdee8c43aae49b3 +210, 0xefc382f02ad55c25 +211, 0x1153b50e8d406b72 +212, 0xb00d39ebcc2f89d8 +213, 0xde62f0b9831c8850 +214, 0xc076994662eef6c7 +215, 0x66f08f4752f1e3ef +216, 0x283b90619796249a +217, 0x4e4869bc4227499e +218, 0xb45ad78a49efd7ed +219, 0xffe19aa77abf5f4b +220, 0xfce11a0daf913aef +221, 0x7e4e64450d5cdceb +222, 0xe9621997cfd62762 +223, 0x4d2c9e156868081 +224, 0x4e2d96eb7cc9a08 +225, 0xda74849bba6e3bd3 +226, 0x6f4621da935e7fde +227, 0xb94b914aa0497259 +228, 0xd50d03e8b8db1563 +229, 0x1a45c1ce5dca422e +230, 0xc8d30d33276f843f +231, 0xb57245774e4176b4 +232, 0x8d36342c05abbbb1 +233, 0x3591ad893ecf9e78 +234, 0x62f4717239ee0ac8 +235, 0x9b71148a1a1d4200 +236, 0x65f8e0f56dd94463 +237, 0x453b1fcfd4fac8c2 +238, 0x4c25e48e54a55865 +239, 0xa866baa05112ace2 +240, 0x7741d3c69c6e79c5 +241, 0x7deb375e8f4f7a8a +242, 0xc242087ede42abd8 +243, 0x2fa9d1d488750c4b +244, 0xe8940137a935d3d3 +245, 0x1dab4918ca24b2f2 +246, 0xe2368c782168fe3e +247, 0x6e8b2d1d73695909 +248, 0x70455ebea268b33e +249, 0x656a919202e28da1 +250, 0x5a5a8935647da999 +251, 0x428c6f77e118c13c +252, 0xa87aee2b675bb083 +253, 0x3873a6412b239969 +254, 0x5f72c1e91cb8a2ee +255, 0xa25af80a1beb5679 +256, 0x1af65d27c7b4abc3 +257, 0x133437060670e067 +258, 0xb1990fa39a97d32e +259, 0x724adc89ae10ed17 +260, 0x3f682a3f2363a240 +261, 0x29198f8dbd343499 +262, 0xdfaeeaa42bc51105 +263, 0x5baff3901b9480c2 +264, 0x3f760a67043e77f5 +265, 0x610fa7aa355a43ba +266, 0x394856ac09c4f7a7 +267, 0x1d9229d058aee82e +268, 0x19c674804c41aeec +269, 0x74cf12372012f4aa +270, 0xa5d89b353fa2f6ca +271, 0x697e4f672ac363dd +272, 0xde6f55ba73df5af9 +273, 0x679cf537510bd68f +274, 0x3dc916114ae9ef7e +275, 0xd7e31a66ec2ee7ba +276, 0xc21bebb968728495 +277, 0xc5e0781414e2adfd +278, 0x71147b5412ddd4bd +279, 0x3b864b410625cca9 +280, 0x433d67c0036cdc6 +281, 0x48083afa0ae20b1b +282, 0x2d80beecd64ac4e8 +283, 0x2a753c27c3a3ee3e +284, 0xb2c5e6afd1fe051a +285, 0xea677930cd66c46b +286, 0x4c3960932f92810a +287, 0xf1b367a9e527eaba +288, 0xb7d92a8a9a69a98e +289, 0x9f9ad3210bd6b453 +290, 0x817f2889db2dcbd8 +291, 0x4270a665ac15813c +292, 0x90b85353bd2be4dd +293, 0x10c0460f7b2d68d +294, 0x11cef32b94f947f5 +295, 0x3cf29ed8e7d477e8 +296, 0x793aaa9bd50599ef +297, 0xbac15d1190014aad +298, 0x987944ae80b5cb13 +299, 0x460aa51f8d57c484 +300, 0xc77df0385f97c2d3 +301, 0x92e743b7293a3822 +302, 0xbc3458bcfbcbb8c0 +303, 0xe277bcf3d04b4ed7 +304, 0xa537ae5cf1c9a31c +305, 0x95eb00d30bd8cfb2 +306, 0x6376361c24e4f2dd +307, 0x374477fe87b9ea8e +308, 0x8210f1a9a039902e +309, 0xe7628f7031321f68 +310, 0x8b8e9c0888fc1d3d +311, 0x306be461fdc9e0ed +312, 0x510009372f9b56f5 +313, 0xa6e6fa486b7a027a +314, 0x9d3f002025203b5a +315, 0x7a46e0e81ecbef86 +316, 0x41e280c611d04df0 +317, 0xedcec10418a99e8a +318, 0x5c27b6327e0b9dbd +319, 0xa81ed2035b509f07 +320, 0x3581e855983a4cc4 +321, 0x4744594b25e9809d +322, 0xc737ac7c27fbd0ed +323, 0x1b523a307045433a +324, 0x8b4ce9171076f1d9 +325, 0x2db02d817cd5eec0 +326, 0x24a1f1229af50288 +327, 0x5550c0dcf583ff16 +328, 0x3587baaa122ec422 +329, 0xf9d3dc894229e510 +330, 0xf3100430d5cf8e87 +331, 0xc31af79862f8e2fb +332, 0xd20582063b9f3537 +333, 0xac5e90ac95fcc7ad +334, 0x107c4c704d5109d4 +335, 0xebc8628906dbfd70 +336, 0x215242776da8c531 +337, 0xa98002f1dcf08b51 +338, 0xbc3bdc07f3b09718 +339, 0x238677062495b512 +340, 0x53b4796f2a3c49e8 +341, 0x6424286467e22f0e +342, 0x14d0952a11a71bac +343, 0x2f97098149b82514 +344, 0x3777f2fdc425ad2 +345, 0xa32f2382938876d4 +346, 0xda8a39a021f20ae3 +347, 0x364361ef0a6ac32c +348, 0x4413eede008ff05a +349, 0x8dda8ace851aa327 +350, 0x4303cabbdcecd1ee +351, 0x2e69f06d74aa549f +352, 0x4797079cd4d9275c +353, 0xc7b1890917e98307 +354, 0x34031b0e822a4b4c +355, 0xfc79f76b566303ea +356, 0x77014adbe255a930 +357, 0xab6c43dd162f3be5 +358, 0xa430041f3463f6b9 +359, 0x5c191a32ada3f84a +360, 0xe8674a0781645a31 +361, 0x3a11cb667b8d0916 +362, 0xaedc73e80c39fd8a +363, 0xfde12c1b42328765 +364, 0x97abb7dcccdc1a0b +365, 0x52475c14d2167bc8 +366, 0x540e8811196d5aff +367, 0xa867e4ccdb2b4b77 +368, 0x2be04af61e5bcfb9 +369, 0x81b645102bfc5dfd +370, 0x96a52c9a66c6450f +371, 0x632ec2d136889234 +372, 0x4ed530c0b36a6c25 +373, 0x6f4851225546b75 +374, 0x2c065d6ba46a1144 +375, 0xf8a3613ff416551d +376, 0xb5f0fd60e9c971a9 +377, 0x339011a03bb4be65 +378, 0x9439f72b6995ded6 +379, 0xc1b03f3ef3b2292d +380, 0xad12fd221daab3ae +381, 0xf615b770f2cf996f +382, 0x269d0fdcb764172 +383, 0x67837025e8039256 +384, 0x6402831fc823fafa +385, 0x22854146a4abb964 +386, 0x7b5ad9b5a1bad7a8 +387, 0x67170e7beb6ac935 +388, 0xfc2d1e8e24adfaaa +389, 0x7ded4395345ff40d +390, 0x418981760a80dd07 +391, 0xc03bef38022c1d2 +392, 0x3a11850b26eade29 +393, 0xaa56d02c7175c5f4 +394, 0xd83b7917b9bfbff5 +395, 0x3c1df2f8fa6fced3 +396, 0xf3d6e2999c0bb760 +397, 0xc66d683a59a950e3 +398, 0x8e3972a9d73ffabf +399, 0x97720a0443edffd9 +400, 0xa85f5d2fe198444a +401, 0xfc5f0458e1b0de5e +402, 0xe3973f03df632b87 +403, 0xe151073c84c594b3 +404, 0x68eb4e22e7ff8ecf +405, 0x274f36eaed7cae27 +406, 0x3b87b1eb60896b13 +407, 0xbe0b2f831442d70a +408, 0x2782ed7a48a1b328 +409, 0xb3619d890310f704 +410, 0xb03926b11b55921a +411, 0xdb46fc44aa6a0ce4 +412, 0x4b063e2ef2e9453a +413, 0xe1584f1aeec60fb5 +414, 0x7092bd6a879c5a49 +415, 0xb84e1e7c7d52b0e6 +416, 0x29d09ca48db64dfb +417, 0x8f6c4a402066e905 +418, 0x77390795eabc36b +419, 0xcc2dc2e4141cc69f +420, 0x2727f83beb9e3c7c +421, 0x1b29868619331de0 +422, 0xd38c571e192c246f +423, 0x535327479fe37b6f +424, 0xaff9ce5758617eb3 +425, 0x5658539e9288a4e4 +426, 0x8df91d87126c4c6d +427, 0xe931cf8fdba6e255 +428, 0x815dfdf25fbee9e8 +429, 0x5c61f4c7cba91697 +430, 0xdd5f5512fe2313a1 +431, 0x499dd918a92a53cd +432, 0xa7e969d007c97dfd +433, 0xb8d39c6fc81ac0bb +434, 0x1d646983def5746c +435, 0x44d4b3b17432a60c +436, 0x65664232a14db1e3 +437, 0xda8fae6433e7500b +438, 0xbe51b94ff2a3fe94 +439, 0xe9b1bd9a9098ef9f +440, 0xfe47d54176297ef5 +441, 0xb8ab99bc03bb7135 +442, 0xcfad97f608565b38 +443, 0xf05da71f6760d9c1 +444, 0xef8da40a7c70e7b +445, 0xe0465d58dbd5d138 +446, 0xb54a2d70eb1a938 +447, 0xfdd50c905958f2d8 +448, 0x3c41933c90a57d43 +449, 0x678f6d894c6ad0bb +450, 0x403e8f4582274e8 +451, 0x5cbbe975668df6b0 +452, 0x297e6520a7902f03 +453, 0x8f6dded33cd1efd7 +454, 0x8e903c97be8d783b +455, 0x10bd015577e30f77 +456, 0x3fcd69d1c36eab0c +457, 0xb45989f3ca198d3 +458, 0x507655ce02b491a9 +459, 0xa92cf99bb78602ce +460, 0xebfb82055fbc2f0f +461, 0x3334256279289b7a +462, 0xc19d2a0f740ee0ac +463, 0x8bb070dea3934905 +464, 0xa4ab57d3a8d1b3eb +465, 0xfee1b09bcacf7ff4 +466, 0xccc7fb41ceec41fa +467, 0xd4da49094eb5a74d +468, 0xed5c693770af02ed +469, 0x369dabc9bbfaa8e4 +470, 0x7eab9f360d054199 +471, 0xe36dbebf5ee94076 +472, 0xd30840e499b23d7 +473, 0x8678e6cb545015ff +474, 0x3a47932ca0b336e +475, 0xeb7c742b6e93d6fe +476, 0x1404ea51fe5a62a9 +477, 0xa72cd49db978e288 +478, 0xfd7bada020173dcf +479, 0xc9e74fc7abe50054 +480, 0x93197847bb66808d +481, 0x25fd5f053dce5698 +482, 0xe198a9b18cc21f4 +483, 0x5cc27b1689452d5d +484, 0x8b3657af955a98dc +485, 0xc17f7584f54aa1c0 +486, 0xe821b088246b1427 +487, 0x32b5a9f6b45b6fa0 +488, 0x2aef7c315c2bae0c +489, 0xe1af8129846b705a +490, 0x4123b4c091b34614 +491, 0x6999d61ec341c073 +492, 0x14b9a8fcf86831ea +493, 0xfd4cff6548f46c9f +494, 0x350c3b7e6cc8d7d6 +495, 0x202a5047fecafcd5 +496, 0xa82509fe496bb57d +497, 0x835e4b2608b575fe +498, 0xf3abe3da919f54ec +499, 0x8705a21e2c9b8796 +500, 0xfd02d1427005c314 +501, 0xa38458faa637f49b +502, 0x61622f2360e7622a +503, 0xe89335a773c2963b +504, 0x481264b659b0e0d0 +505, 0x1e82ae94ebf62f15 +506, 0x8ea7812de49209d4 +507, 0xff963d764680584 +508, 0x418a68bef717f4af +509, 0x581f0e7621a8ab91 +510, 0x840337e9a0ec4150 +511, 0x951ef61b344be505 +512, 0xc8b1b899feb61ec2 +513, 0x8b78ca13c56f6ed9 +514, 0x3d2fd793715a946f +515, 0xf1c04fabcd0f4084 +516, 0x92b602614a9a9fcc +517, 0x7991bd7a94a65be7 +518, 0x5dead10b06cad2d7 +519, 0xda7719b33f722f06 +520, 0x9d87a722b7bff71e +521, 0xb038e479071409e9 +522, 0xf4e8bbec48054775 +523, 0x4fec2cd7a28a88ea +524, 0x839e28526aad3e56 +525, 0xd37ec57852a98bf0 +526, 0xdef2cbbe00f3a02d +527, 0x1aecfe01a9e4d801 +528, 0x59018d3c8beaf067 +529, 0x892753e6ac8bf3cd +530, 0xefdd3437023d2d1c +531, 0x447bfbd148c8cb88 +532, 0x282380221bd442b8 +533, 0xfce8658d1347384a +534, 0x60b211a7ec6bfa8 +535, 0xd21729cfcc692974 +536, 0x162087ecd5038a47 +537, 0x2b17000c4bce39d2 +538, 0x3a1f75ff6adcdce0 +539, 0x721a411d312f1a2c +540, 0x9c13b6133f66934d +541, 0xaa975d14978980e5 +542, 0x9403dbd4754203fa +543, 0x588c15762fdd643 +544, 0xdd1290f8d0ada73a +545, 0xd9b77380936103f4 +546, 0xb2e2047a356eb829 +547, 0x7019e5e7f76f7a47 +548, 0x3c29a461f62b001d +549, 0xa07dc6cfab59c116 +550, 0x9b97e278433f8eb +551, 0x6affc714e7236588 +552, 0x36170aeb32911a73 +553, 0x4a665104d364a789 +554, 0x4be01464ec276c9c +555, 0x71bb10271a8b4ecf +556, 0xbf62e1d068bc018 +557, 0xc9ada5db2cbbb413 +558, 0x2bded75e726650e5 +559, 0x33d5a7af2f34385d +560, 0x8179c46661d85657 +561, 0x324ebcfd29267359 +562, 0xac4c9311dc9f9110 +563, 0xc14bb6a52f9f9c0 +564, 0xc430abe15e7fb9db +565, 0xf1cce5c14df91c38 +566, 0x651e3efa2c0750d3 +567, 0x38a33604a8be5c75 +568, 0x7aaf77fe7ff56a49 +569, 0xc0d1cc56bbf27706 +570, 0x887aa47324e156c6 +571, 0x12547c004b085e8d +572, 0xd86a8d6fbbbfd011 +573, 0x57c860188c92d7b4 +574, 0xcd5d3843d361b8ca +575, 0x8f586ef05a9cb3ef +576, 0x174456e1ba6267d5 +577, 0xf5dc302c62fe583c +578, 0xa349442fabcdb71 +579, 0xe5123c1a8b6fd08e +580, 0x80681552aa318593 +581, 0xb295396deaef1e31 +582, 0xabb626e0b900e32b +583, 0xf024db8d3f19c15e +584, 0x1d04bb9548e2fb6c +585, 0xd8ed2b2214936c2b +586, 0x618ca1e430a52bc9 +587, 0xccbca44a6088136b +588, 0xd0481855c8b9ccbe +589, 0x3c92a2fade28bdf7 +590, 0x855e9fefc38c0816 +591, 0x1269bbfe55a7b27c +592, 0x1d6c853d83726d43 +593, 0xc8655511cc7fcafc +594, 0x301503eb125a9b0e +595, 0xb3108e4532016b11 +596, 0xbb7ab6245da9cb3d +597, 0x18004c49116d85eb +598, 0x3480849c20f61129 +599, 0xe28f45157463937b +600, 0x8e85e61060f2ce1 +601, 0x1673da4ec589ba5e +602, 0x74b9a6bd1b194712 +603, 0xed39e147fa8b7601 +604, 0x28ce54019102ca77 +605, 0x42e0347f6d7a2f30 +606, 0xb6a908d1c4814731 +607, 0x16c3435e4e9a126d +608, 0x8880190514c1ad54 +609, 0xfffd86229a6f773c +610, 0x4f2420cdb0aa1a93 +611, 0xf8e1acb4120fc1fa +612, 0x63a8c553ab36a2f2 +613, 0x86b88cf3c0a6a190 +614, 0x44d8b2801622c792 +615, 0xf6eae14e93082ff1 +616, 0xd9ed4f5d1b8fac61 +617, 0x1808ce17f4e1f70 +618, 0x446e83ea336f262f +619, 0xc7c802b04c0917b7 +620, 0x626f45fd64968b73 +621, 0x9ffa540edc9b2c5c +622, 0xa96a1e219e486af8 +623, 0x2bb8963884e887a1 +624, 0xba7f68a5d029e3c4 +625, 0xefc45f44392d9ca0 +626, 0x98d77762503c5eab +627, 0xd89bcf62f2da627c +628, 0xa3cab8347f833151 +629, 0xa095b7595907d5c7 +630, 0x3b3041274286181 +631, 0xb518db8919eb71fa +632, 0x187036c14fdc9a36 +633, 0xd06e28301e696f5d +634, 0xdbc71184e0c56492 +635, 0xfe51e9cae6125bfd +636, 0x3b12d17cd014df24 +637, 0x3b95e4e2c986ac1a +638, 0x29c1cce59fb2dea2 +639, 0x58c05793182a49d6 +640, 0xc016477e330d8c00 +641, 0x79ef335133ada5d +642, 0x168e2cad941203f3 +643, 0xf99d0f219d702ef0 +644, 0x655628068f8f135b +645, 0xdcdea51910ae3f92 +646, 0x8e4505039c567892 +647, 0x91a9ec7e947c89ae +648, 0x8717172530f93949 +649, 0x1c80aba9a440171a +650, 0x9c8f83f6ebe7441e +651, 0x6c05e1efea4aa7f9 +652, 0x10af696b777c01b +653, 0x5892e9d9a92fc309 +654, 0xd2ba7da71e709432 +655, 0x46378c7c3269a466 +656, 0x942c63dfe18e772c +657, 0x6245cf02ef2476f +658, 0x6f265b2759ea2aea +659, 0x5aa757f17d17f4a6 +660, 0x1ad6a3c44fa09be6 +661, 0xe861af14e7015fb8 +662, 0x86be2e7db388c77 +663, 0x5c7bba32b519e9a0 +664, 0x3feb314850c4437b +665, 0x97955add60cfb45b +666, 0xfdb536230a540bdc +667, 0xdac9d7bf6e58512e +668, 0x4894c00e474e8120 +669, 0xa1918a37739da366 +670, 0xa8097f2096532807 +671, 0x592afe50e6c5e643 +672, 0xd69050ee6dcb33dc +673, 0xa6956b262dd3c561 +674, 0x1a55c815555e63f7 +675, 0x2ec7fd37516de2bb +676, 0x8ec251d9c70e76ba +677, 0x9b76e4abafd2689 +678, 0x9ce3f5c751a57df1 +679, 0x915c4818bf287bc7 +680, 0x2293a0d1fe07c735 +681, 0x7627dcd5d5a66d3d +682, 0xb5e4f92cc49c7138 +683, 0x6fc51298731d268c +684, 0xd19800aa95441f87 +685, 0x14f70f31162fa115 +686, 0x41a3da3752936f59 +687, 0xbec0652be95652ee +688, 0x7aa4bdb1020a290f +689, 0x4382d0d9bee899ef +690, 0xe6d988ae4277d6ff +691, 0xe618088ccb2a32d1 +692, 0x411669dfaa899e90 +693, 0x234e2bf4ba76d9f +694, 0xe109fe4cb7828687 +695, 0x1fb96b5022b0b360 +696, 0x6b24ad76c061a716 +697, 0x7e1781d4d7ecee15 +698, 0xf20c2dbe82ba38ba +699, 0xeda8e8ae1d943655 +700, 0xa58d196e2a77eaec +701, 0x44564765a5995a0b +702, 0x11902fe871ecae21 +703, 0x2ea60279900e675d +704, 0x38427227c18a9a96 +705, 0xe0af01490a1b1b48 +706, 0x826f91997e057824 +707, 0x1e57308e6e50451 +708, 0xb42d469bbbfdc350 +709, 0xb9734cff1109c49b +710, 0x98967559bb9d364f +711, 0xd6be360041907c12 +712, 0xa86a1279122a1e21 +713, 0x26f99a8527bfc698 +714, 0xfa8b85758f28f5d6 +715, 0xe3057429940806ae +716, 0x4bee2d7e84f93b2b +717, 0x948350a76ea506f4 +718, 0xa139154488045e74 +719, 0x8893579ba5e78085 +720, 0x5f21c215c6a9e397 +721, 0x456134f3a59641dc +722, 0x92c0273f8e97a9c6 +723, 0xd2936c9c3f0c6936 +724, 0xcfa4221e752c4735 +725, 0x28cd5a7457355dca +726, 0xecdfdde23d90999f +727, 0x60631b2d494d032b +728, 0xf67289df269a827f +729, 0xcbe8011ef0f5b7ef +730, 0x20eea973c70a84f5 +731, 0xbe1fd200398557ce +732, 0xd2279ee030191bba +733, 0xf2bd4291dedaf819 +734, 0xfc6d167dbe8c402 +735, 0x39ac298da5d0044b +736, 0xceac026f5f561ce +737, 0x10a5b0bdd8ad60e6 +738, 0xdeb3c626df6d4bcb +739, 0x3c128962e77ff6ca +740, 0xc786262e9c67a0e5 +741, 0x4332855b3febcdc0 +742, 0x7bda9724d1c0e020 +743, 0x6a8c93399bc4df22 +744, 0xa9b20100ac707396 +745, 0xa11a3458502c4eb5 +746, 0xb185461c60478941 +747, 0x13131d56195b7ff6 +748, 0x8d55875ddbd4aa1c +749, 0xc09b67425f469aa5 +750, 0x39e33786cc7594c4 +751, 0x75e96db8e4b08b93 +752, 0xda01cd12a3275d1e +753, 0x2c49e7822344fab5 +754, 0x9bd5f10612514ca7 +755, 0x1c801a5c828e7332 +756, 0x29797d3f4f6c7b4c +757, 0xac992715e21e4e53 +758, 0xe40e89ee887ddb37 +759, 0x15189a2b265a783b +760, 0xa854159a52af5c5 +761, 0xb9d8a5a81c12bead +762, 0x3240cdc9d59e2a58 +763, 0x1d0b872234cf8e23 +764, 0xc01224cf6ce12cff +765, 0x2601e9f3905c8663 +766, 0xd4ecf9890168d6b4 +767, 0xa45db796d89bfdd5 +768, 0x9f389406dad64ab4 +769, 0xa5a851adce43ffe3 +770, 0xd0962c41c26e5aa9 +771, 0x8a671679e48510a4 +772, 0xc196dc0924a6bfeb +773, 0x3ead661043b549cb +774, 0x51af4ca737d405ac +775, 0xf4425b5c62275fb6 +776, 0x71e69d1f818c10f5 +777, 0xacaf4af2d3c70162 +778, 0x2e1f1d4fd7524244 +779, 0xe54fdd8f388890e8 +780, 0xfda0d33e84eb2b83 +781, 0x53965c5e392b81da +782, 0x5c92288267263097 +783, 0xcac1b431c878c66c +784, 0x36c0e1cf417241c6 +785, 0x5cc4d9cd1a36bf2c +786, 0x32e4257bb5d3e470 +787, 0x4aecff904adb44fb +788, 0x4d91a8e0d1d60cac +789, 0xa3b478388385b038 +790, 0x48d955f24eba70be +791, 0x310e4deb07f24f68 +792, 0x8853e73b1f30a5a +793, 0x278aee45c2a65c5 +794, 0xf6932eedbd62fb0b +795, 0xafb95958c82fafad +796, 0x78e807c18616c16c +797, 0xd7abadda7488ed9f +798, 0x2dd72e2572aa2ae6 +799, 0x6ec3791982c2be09 +800, 0x6865bb314fac478f +801, 0xa14dc0ce09000d1a +802, 0xb8081ad134da10f2 +803, 0xc4ac1534aa825ef5 +804, 0xd83aeb48ae2d538f +805, 0x38052027e3074be4 +806, 0xa9833e06ef136582 +807, 0x4f02d790ec9fd78 +808, 0xec2f60bc711c5bdc +809, 0x9253b0d12268e561 +810, 0xa8ac607fdd62c206 +811, 0x895e28ebc920289f +812, 0xe2fd42b154243ac7 +813, 0xc69cac2f776eee19 +814, 0xf4d4ac11db56d0dc +815, 0xa8d37049b9f39833 +816, 0x75abbf8a196c337c +817, 0xb115bb76750d27b8 +818, 0x39426d187839154 +819, 0xd488423e7f38bf83 +820, 0xbb92e0c76ecb6a62 +821, 0x3055a018ce39f4e3 +822, 0xc93fe0e907729bfb +823, 0x65985d17c5863340 +824, 0x2088ae081b2028e1 +825, 0x6e628de873314057 +826, 0x864377cccf573f0e +827, 0xae03f4c9aa63d132 +828, 0xb1db766d6404c66d +829, 0xdce5a22414a374b +830, 0x622155b777819997 +831, 0x69fe96e620371f3c +832, 0xa9c67dbc326d94fc +833, 0x932a84ae5dd43bab +834, 0xe2301a20f6c48c3f +835, 0x795d2e79c6477300 +836, 0xd8e3e631289521e7 +837, 0xae2684979002dfd6 +838, 0xc9c2392377550f89 +839, 0xa1b0c99d508ef7ec +840, 0x593aef3c5a5272ec +841, 0xe32e511a4b7162cd +842, 0xab3b81655f5a2857 +843, 0x1b535e1a0aaf053e +844, 0x5b33f56c1b6a07e2 +845, 0x782dc8cfcac4ef36 +846, 0xb3d4f256eecfd202 +847, 0xf73a6598f58c4f7e +848, 0xd5722189524870ae +849, 0x707878de6b995fc0 +850, 0xc3eb6ba73e3d7e8a +851, 0xca75c017655b75a7 +852, 0x1b29369ea3541e5f +853, 0x352e98858bdb58a3 +854, 0x1e4412d184b6b27d +855, 0x2d375ba0304b2d17 +856, 0x56c30fce69a5d08e +857, 0x6b8c2b0c06584bda +858, 0xde4dfff228c8c91f +859, 0xb7c9edd574e6287f +860, 0xf6078281c9fca2b2 +861, 0xb9b9a51de02a2f1e +862, 0xa411bef31c0103b0 +863, 0xc5facd8fc5e1d7a3 +864, 0x54e631c05ddf7359 +865, 0x815b42b3fd06c474 +866, 0xc9ac07566fda18ec +867, 0xd84ea62957bd8e15 +868, 0x5575f74b5cfd8803 +869, 0x5779a8d460c2e304 +870, 0xfd6e87e264a85587 +871, 0xa1d674daa320b26d +872, 0x2c3c3ec64b35afc4 +873, 0x393a274ff03e6935 +874, 0x1f40ecbac52c50ea +875, 0xc3de64fa324ffc0c +876, 0x56ae828b7f9deb04 +877, 0xe7c1a77b5c1f2cb3 +878, 0xa4c4aab19ea921cc +879, 0xec164c238825822c +880, 0xa6a3304770c03b03 +881, 0x3a63641d5b1e8123 +882, 0x42677be3a54617ef +883, 0xa2680423e3a200c0 +884, 0x8b17cf75f3f37277 +885, 0xe7ce65a49242be3d +886, 0x7f85934271323e4b +887, 0xcfb0f431f79a4fab +888, 0x392e4041a8505b65 +889, 0xd3e5daf0d8b25ea6 +890, 0x9447eff675d80f53 +891, 0xea27a9d53cfaeea8 +892, 0xe3f2335945a83ba +893, 0x8875a43ce216413b +894, 0xe49941f9eabce33e +895, 0x9357c1296683a5b1 +896, 0xf0f16439e81ee701 +897, 0x3181515295ffd79a +898, 0x9d7150fffd169ed8 +899, 0x2d6a1d281e255a72 +900, 0x81bf1286fb3a92b6 +901, 0x566d3079b499e279 +902, 0xc7939ca8f047341 +903, 0xb1f8050e7c2d59f6 +904, 0x605701045e7be192 +905, 0x51b73360e8e31a1c +906, 0x9f4ad54483ba9fe0 +907, 0xd3085b8fcf69d1c8 +908, 0xc3e7475026dc5f0b +909, 0x5800f8554b157354 +910, 0x37dfdf858cfcd963 +911, 0x3a1fce05ce385072 +912, 0xf495c062645c20c3 +913, 0xdcbeec2c3492c773 +914, 0xc38f427589d1d0b4 +915, 0x681ead60216a8184 +916, 0x4bd569c40cc88c41 +917, 0x49b0d442e130b7a2 +918, 0xee349156b7d1fa3f +919, 0x2bde2d2db055135b +920, 0xc6a460d2fbcb2378 +921, 0xd0f170494ff3dbb +922, 0xb294422492528a23 +923, 0xfc95873c854e7b86 +924, 0x6c9c3ad1797bb19c +925, 0xe0c06f2aab65062d +926, 0x58e32ce0f11e3a81 +927, 0xa745fcd729ff5036 +928, 0x599b249b2fc2cdb2 +929, 0x78f23b5b0dd5b082 +930, 0x6de3e957f549ecfc +931, 0x9d0712fa6d878756 +932, 0x9076e8554e4a413a +933, 0xf3185818c0294de8 +934, 0x5de7cdf4b455b9b6 +935, 0xb15f6908ed703f7d +936, 0x98c654dfedc6818 +937, 0x120502ab0e93ae42 +938, 0x67966a98a58dc120 +939, 0x1caa0fc628989482 +940, 0xd8b2c3cd480a8625 +941, 0x85c70071b3aed671 +942, 0xff385f8473714662 +943, 0xe2868e4bf3773b63 +944, 0x96cf8019b279298e +945, 0x8511cc930bd74800 +946, 0x5312e48fdd55f5ab +947, 0xfcdae564b52df78d +948, 0x9eee48373e652176 +949, 0x953788f6bcbc56b0 +950, 0xd1a3855dbd2f6b37 +951, 0x3ad32acf77f4d1e9 +952, 0x917c7be81b003e30 +953, 0x9ce817da1e2e9dfb +954, 0x2968983db162d44d +955, 0x1e005decef5828ad +956, 0xc38fe59d1aa4f3d5 +957, 0xf357f1710dc02f1d +958, 0x2613912a4c83ec67 +959, 0x832a11470b9a17cb +960, 0x5e85508a611f0dad +961, 0x2781131677f59d56 +962, 0xa82358d7d4b0237f +963, 0xfbf8b3cc030c3af6 +964, 0x68b2f68ac8a55adb +965, 0x3b6fcf353add0ada +966, 0xd1956049bcd15bd5 +967, 0x95b76f31c7f98b6d +968, 0x814b6690df971a84 +969, 0xdcf7959cddd819e4 +970, 0xcf8c72c5d804fc88 +971, 0x56883769c8945a22 +972, 0x1f034652f658cf46 +973, 0x41df1324cda235a1 +974, 0xeccd32524504a054 +975, 0x974e0910a04ec02c +976, 0x72104507b821f6db +977, 0x791f8d089f273044 +978, 0xe0f79a4f567f73c3 +979, 0x52fe5bea3997f024 +980, 0x5f8b9b446494f78 +981, 0xfd9f511947059190 +982, 0x3aea9dac6063bce3 +983, 0xbfdae4dfc24aee60 +984, 0xa82cdbbf0a280318 +985, 0xf460aae18d70aa9d +986, 0x997367cb204a57c4 +987, 0x616e21ab95ba05ef +988, 0x9bfc93bec116769f +989, 0x2b2ee27c37a3fa5b +990, 0xb25c6ed54006ee38 +991, 0xab04d4a5c69e69a5 +992, 0x6d2f6b45f2d8438f +993, 0x4ad2f32afc82f092 +994, 0x513d718908f709c0 +995, 0x5272aadc4fffca51 +996, 0xeb3f87e66156ef5d +997, 0xf8a3d5a46a86ba85 +998, 0xdb4548a86f27abfd +999, 0x57c05f47ff62380d diff --git a/.venv/lib/python3.12/site-packages/numpy/random/tests/data/sfc64-testset-1.csv b/.venv/lib/python3.12/site-packages/numpy/random/tests/data/sfc64-testset-1.csv new file mode 100644 index 0000000..4fffe69 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/random/tests/data/sfc64-testset-1.csv @@ -0,0 +1,1001 @@ +seed, 0xdeadbeaf +0, 0xa475f55fbb6bc638 +1, 0xb2d594b6c29d971c +2, 0x275bc4ece4484fb1 +3, 0x569be72d9b3492fb +4, 0x89a5bb9b206a670c +5, 0xd951bfa06afdc3f9 +6, 0x7ee2e1029d52a265 +7, 0x12ef1d4de0cb4d4c +8, 0x41658ba8f0ef0280 +9, 0x5b650c82e4fe09c5 +10, 0x638a9f3e30ec4e94 +11, 0x147487fb2ba9233e +12, 0x89ef035603d2d1fb +13, 0xe66ca57a190e6cbe +14, 0x330f673740dd61fc +15, 0xc71d3dce2f8bb34e +16, 0x3c07c39ff150b185 +17, 0x5df952b6cae8f099 +18, 0x9f09f2b1f0ceac80 +19, 0x19598eee2d0c4c67 +20, 0x64e06483702e0ebd +21, 0xda04d1fdb545f7fa +22, 0xf2cf53b61a0c4f9b +23, 0xf0bb724ce196f66e +24, 0x71cefde55d9cf0f +25, 0x6323f62824a20048 +26, 0x1e93604680f14b4e +27, 0xd9d8fad1d4654025 +28, 0xf4ee25af2e76ca08 +29, 0x6af3325896befa98 +30, 0xad9e43abf5e04053 +31, 0xbf930e318ce09de3 +32, 0x61f9583b4f9ffe76 +33, 0x9b69d0b3d5ec8958 +34, 0xa608f250f9b2ca41 +35, 0x6fdba7073dc2bb5d +36, 0xa9d57601efea6d26 +37, 0xc24a88a994954105 +38, 0xc728b1f78d88fe5b +39, 0x88da88c2b083b3b2 +40, 0xa9e27f7303c76cfd +41, 0xc4c24608c29176eb +42, 0x5420b58466b972fd +43, 0xd2018a661b6756c8 +44, 0x7caed83d9573fc7 +45, 0x562a3d81b849a06a +46, 0x16588af120c21f2c +47, 0x658109a7e0eb4837 +48, 0x877aabb14d3822e1 +49, 0x95704c342c3745fe +50, 0xeeb8a0dc81603616 +51, 0x431bf94889290419 +52, 0xe4a9410ab92a5863 +53, 0xbc6be64ea60f12ba +54, 0x328a2da920015063 +55, 0x40f6b3bf8271ae07 +56, 0x4068ff00a0e854f8 +57, 0x1b287572ca13fa78 +58, 0xa11624a600490b99 +59, 0x4a04ef29eb7150fa +60, 0xcc9469ab5ffb739 +61, 0x99a6a9f8d95e782 +62, 0x8e90356573e7a070 +63, 0xa740b8fb415c81c4 +64, 0x47eccef67447f3da +65, 0x2c720afe3a62a49b +66, 0xe2a747f0a43eacf4 +67, 0xba063a87ab165576 +68, 0xbc1c78ed27feb5a3 +69, 0x285a19fa3974f9d +70, 0x489c61e704f5f0e3 +71, 0xf5ab04f6b03f238b +72, 0x7e25f88138a110dd +73, 0xc3d1cef3d7c1f1d1 +74, 0xc3de6ec64d0d8e00 +75, 0x73682a15b6cc5088 +76, 0x6fecbeb319163dc5 +77, 0x7e100d5defe570a1 +78, 0xad2af9af076dce57 +79, 0x3c65100e23cd3a9a +80, 0x4b442cc6cfe521bb +81, 0xe89dc50f8ab1ef75 +82, 0x8b3c6fdc2496566 +83, 0xdfc50042bc2c308c +84, 0xe39c5f158b33d2b2 +85, 0x92f6adefdfeb0ac +86, 0xdf5808a949c85b3e +87, 0x437384021c9dace9 +88, 0xa7b5ed0d3d67d8f +89, 0xe1408f8b21da3c34 +90, 0xa1bba125c1e80522 +91, 0x7611dc4710385264 +92, 0xb00a46ea84082917 +93, 0x51bf8002ffa87cef +94, 0x9bb81013e9810adc +95, 0xd28f6600013541cd +96, 0xc2ca3b1fa7791c1f +97, 0x47f9ad58f099c82c +98, 0x4d1bb9458469caf9 +99, 0xca0b165b2844257 +100, 0xc3b2e667d075dc66 +101, 0xde22f71136a3dbb1 +102, 0x23b4e3b6f219e4c3 +103, 0x327e0db4c9782f66 +104, 0x9365506a6c7a1807 +105, 0x3e868382dedd3be7 +106, 0xff04fa6534bcaa99 +107, 0x96621a8862995305 +108, 0x81bf39cb5f8e1df7 +109, 0x79b684bb8c37af7a +110, 0xae3bc073c3cde33c +111, 0x7805674112c899ac +112, 0xd95a27995abb20f2 +113, 0x71a503c57b105c40 +114, 0x5ff00d6a73ec8acc +115, 0x12f96391d91e47c2 +116, 0xd55ca097b3bd4947 +117, 0x794d79d20468b04 +118, 0x35d814efb0d7a07d +119, 0xfa9ac9bd0aae76d3 +120, 0xa77b8a3711e175cd +121, 0xe6694fbf421f9489 +122, 0xd8f1756525a1a0aa +123, 0xe38dfa8426277433 +124, 0x16b640c269bbcd44 +125, 0x2a7a5a67ca24cfeb +126, 0x669039c28d5344b4 +127, 0x2a445ee81fd596bb +128, 0x600df94cf25607e0 +129, 0x9358561a7579abff +130, 0xee1d52ea179fc274 +131, 0x21a8b325e89d31be +132, 0x36fc0917486eec0a +133, 0x3d99f40717a6be9f +134, 0x39ac140051ca55ff +135, 0xcef7447c26711575 +136, 0xf22666870eff441d +137, 0x4a53c6134e1c7268 +138, 0xd26de518ad6bdb1b +139, 0x1a736bf75b8b0e55 +140, 0xef1523f4e6bd0219 +141, 0xb287b32fd615ad92 +142, 0x2583d6af5e841dd5 +143, 0x4b9294aae7ca670c +144, 0xf5aa4a84174f3ca9 +145, 0x886300f9e0dc6376 +146, 0x3611401e475ef130 +147, 0x69b56432b367e1ac +148, 0x30c330e9ab36b7c4 +149, 0x1e0e73079a85b8d5 +150, 0x40fdfc7a5bfaecf +151, 0xd7760f3e8e75a085 +152, 0x1cc1891f7f625313 +153, 0xeece1fe6165b4272 +154, 0xe61111b0c166a3c1 +155, 0x2f1201563312f185 +156, 0xfd10e8ecdd2a57cb +157, 0x51cdc8c9dd3a89bf +158, 0xed13cc93938b5496 +159, 0x843816129750526b +160, 0xd09995cd6819ada +161, 0x4601e778d40607df +162, 0xef9df06bd66c2ea0 +163, 0xae0bdecd3db65d69 +164, 0xbb921a3c65a4ae9a +165, 0xd66698ce8e9361be +166, 0xacdc91647b6068f4 +167, 0xe505ef68f2a5c1c0 +168, 0xd6e62fd27c6ab137 +169, 0x6a2ba2c6a4641d86 +170, 0x9c89143715c3b81 +171, 0xe408c4e00362601a +172, 0x986155cbf5d4bd9d +173, 0xb9e6831728c893a7 +174, 0xb985497c3bf88d8c +175, 0xd0d729214b727bec +176, 0x4e557f75fece38a +177, 0x6572067fdfd623ca +178, 0x178d49bb4d5cd794 +179, 0xe6baf59f60445d82 +180, 0x5607d53518e3a8d2 +181, 0xba7931adb6ebbd61 +182, 0xe853576172611329 +183, 0xe945daff96000c44 +184, 0x565b9ba3d952a176 +185, 0xcdb54d4f88c584c8 +186, 0x482a7499bee9b5e5 +187, 0x76560dd0affe825b +188, 0x2a56221faa5ca22c +189, 0x7729be5b361f5a25 +190, 0xd6f2195795764876 +191, 0x59ef7f8f423f18c5 +192, 0x7ebefed6d02adde1 +193, 0xcfec7265329c73e5 +194, 0x4fd8606a5e59881c +195, 0x95860982ae370b73 +196, 0xdecfa33b1f902acc +197, 0xf9b8a57400b7c0a6 +198, 0xd20b822672ec857b +199, 0x4eb81084096c7364 +200, 0xe535c29a44d9b6ad +201, 0xdef8b48ebacb2e29 +202, 0x1063bc2b8ba0e915 +203, 0xe4e837fb53d76d02 +204, 0x4df935db53579fb8 +205, 0xa30a0c8053869a89 +206, 0xe891ee58a388a7b5 +207, 0x17931a0c64b8a985 +208, 0xaf2d350b494ce1b3 +209, 0x2ab9345ffbcfed82 +210, 0x7de3fe628a2592f0 +211, 0x85cf54fab8b7e79d +212, 0x42d221520edab71b +213, 0x17b695b3af36c233 +214, 0xa4ffe50fe53eb485 +215, 0x1102d242db800e4d +216, 0xc8dc01f0233b3b6 +217, 0x984a030321053d36 +218, 0x27fa8dc7b7112c0e +219, 0xba634dd8294e177f +220, 0xe67ce34b36332eb +221, 0x8f1351e1894fb41a +222, 0xb522a3048761fd30 +223, 0xc350ad9bc6729edc +224, 0xe0ed105bd3c805e1 +225, 0xa14043d2b0825aa7 +226, 0xee7779ce7fc11fdf +227, 0xc0fa8ba23a60ab25 +228, 0xb596d1ce259afbad +229, 0xaa9b8445537fdf62 +230, 0x770ab2c700762e13 +231, 0xe812f1183e40cc1 +232, 0x44bc898e57aefbbd +233, 0xdd8a871df785c996 +234, 0x88836a5e371eb36b +235, 0xb6081c9152623f27 +236, 0x895acbcd6528ca96 +237, 0xfb67e33ddfbed435 +238, 0xaf7af47d323ce26 +239, 0xe354a510c3c39b2d +240, 0x5cacdedda0672ba3 +241, 0xa440d9a2c6c22b09 +242, 0x6395099f48d64304 +243, 0xc11cf04c75f655b5 +244, 0x1c4e054d144ddb30 +245, 0x3e0c2db89d336636 +246, 0x127ecf18a5b0b9a7 +247, 0x3b50551a88ea7a73 +248, 0xbd27003e47f1f684 +249, 0xf32d657782baac9b +250, 0x727f5cabf020bc9 +251, 0x39c1c1c226197dc7 +252, 0x5552c87b35deeb69 +253, 0x64d54067b5ce493f +254, 0x3494b091fe28dda0 +255, 0xdf0278bc85ee2965 +256, 0xdef16fec25efbd66 +257, 0xe2be09f578c4ce28 +258, 0xd27a9271979d3019 +259, 0x427f6fcd71845e3 +260, 0x26b52c5f81ec142b +261, 0x98267efc3986ad46 +262, 0x7bf4165ddb7e4374 +263, 0xd05f7996d7941010 +264, 0x3b3991de97b45f14 +265, 0x9068217fb4f27a30 +266, 0xd8fe295160afc7f3 +267, 0x8a159fab4c3bc06f +268, 0x57855506d19080b6 +269, 0x7636df6b3f2367a4 +270, 0x2844ee3abd1d5ec9 +271, 0xe5788de061f51c16 +272, 0x69e78cc9132a164 +273, 0xacd53cde6d8cd421 +274, 0xb23f3100068e91da +275, 0x4140070a47f53891 +276, 0xe4a422225a96e53a +277, 0xb82a8925a272a2ac +278, 0x7c2f9573590fe3b7 +279, 0xbaf80764db170575 +280, 0x955abffa54358368 +281, 0x355ce7460614a869 +282, 0x3700ede779a4afbf +283, 0x10a6ec01d92d68cd +284, 0x3308f5a0a4c0afef +285, 0x97b892d7601136c9 +286, 0x4955c3b941b8552e +287, 0xca85aa67e941961d +288, 0xb1859ae5db28e9d2 +289, 0x305d072ac1521fbd +290, 0xed52a868996085bb +291, 0x723bfa6a76358852 +292, 0x78d946ecd97c5fb3 +293, 0x39205b30a8e23e79 +294, 0xb927e3d086baadbe +295, 0xa18d6946136e1ff5 +296, 0xdab6f0b51c1eb5ff +297, 0xf0a640bf7a1af60c +298, 0xf0e81db09004d0d4 +299, 0xfe76cebdbe5a4dde +300, 0x2dafe9cc3decc376 +301, 0x4c871fdf1af34205 +302, 0xe79617d0c8fa893b +303, 0xee658aaad3a141f7 +304, 0xfd91aa74863e19f1 +305, 0x841b8f55c103cc22 +306, 0x22766ed65444ad5d +307, 0x56d03d1beca6c17a +308, 0x5fd4c112c92036ae +309, 0x75466ae58a5616dc +310, 0xfbf98b1081e802a9 +311, 0xdc325e957bf6d8f5 +312, 0xb08da7015ebd19b7 +313, 0xf25a9c0944f0c073 +314, 0xf4625bafa0ced718 +315, 0x4349c9e093a9e692 +316, 0x75a9ccd4dd8935cb +317, 0x7e6cf9e539361e91 +318, 0x20fdd22fb6edd475 +319, 0x5973021b57c2311f +320, 0x75392403667edc15 +321, 0xed9b2156ea70d9f1 +322, 0xf40c114db50b64a0 +323, 0xe26bb2c9eef20c62 +324, 0x409c1e3037869f03 +325, 0xcdfd71fdda3b7f91 +326, 0xa0dfae46816777d6 +327, 0xde060a8f61a8deb8 +328, 0x890e082a8b0ca4fc +329, 0xb9f2958eddf2d0db +330, 0xd17c148020d20e30 +331, 0xffdc9cc176fe7201 +332, 0xffb83d925b764c1 +333, 0x817ea639e313da8d +334, 0xa4dd335dd891ca91 +335, 0x1342d25a5e81f488 +336, 0xfa7eb9c3cf466b03 +337, 0xfe0a423d44b185d0 +338, 0x101cfd430ab96049 +339, 0x7b5d3eda9c4504b +340, 0xe20ccc006e0193f1 +341, 0xf54ccddedebc5df0 +342, 0xc0edd142bd58f1db +343, 0x3831f40d378d2430 +344, 0x80132353f0a88289 +345, 0x688f23c419d03ef8 +346, 0x4c6837e697884066 +347, 0x699387bb2e9a3a8f +348, 0x8996f860342448d8 +349, 0xb0f80dff99bfa5cc +350, 0x3e927a7f9ea12c8e +351, 0xd7e498d1e5f9dff3 +352, 0x78ecb97bb3f864cc +353, 0x3c4ffd069a014d38 +354, 0xf8d5073a1e09b4d4 +355, 0x8717e854f9faef23 +356, 0xfbcc5478d8d0ad7 +357, 0xd3cd8b233ca274ff +358, 0x8bd8f11f79beb265 +359, 0xf64498a832d8fd0e +360, 0xb01bba75112131ec +361, 0x55572445a7869781 +362, 0x7b56622f18cb3d7a +363, 0x7f192c9e075bdb83 +364, 0xd9a112f836b83ff3 +365, 0x68673b37269653dc +366, 0xe46a9433fb6a0879 +367, 0x127d756ca4779001 +368, 0xc1378e8b1e8eab94 +369, 0x1006edb0f51d078c +370, 0xc6dd53961232d926 +371, 0x9a4aeef44038256d +372, 0xd357f4fa652d4f5f +373, 0x59f3d2cc3378598 +374, 0xe76e6207a824a7fc +375, 0x5fc5e33712ceffef +376, 0x77d24aeb0ccb1adc +377, 0x5be4b2826805659e +378, 0x257c69d787e64634 +379, 0x58dd52ca6bc727b1 +380, 0x3ab997767235ea33 +381, 0x986a2a7a966fad14 +382, 0xc900f8b27761dcc4 +383, 0x44991bdb13795700 +384, 0xe5c145a4fe733b2 +385, 0x56f041b56bffe0d3 +386, 0x5779c4fef8067996 +387, 0xa0fe8748e829532d +388, 0x840c1277d78d9dd4 +389, 0x37ebcb315432acbc +390, 0xf4bc8738433ba3be +391, 0x8b122993f2e10062 +392, 0xe1fe8481f2681ed5 +393, 0x8e23f1630d9f494a +394, 0xda24661a01b7d0b3 +395, 0x7a02942a179cee36 +396, 0xf1e08a3c09b71ac +397, 0x3dec2cc7ee0bd8fd +398, 0x1f3e480113d805d4 +399, 0xc061b973ad4e3f2c +400, 0x6bea750f17a66836 +401, 0xbc2add72eac84c25 +402, 0xcff058d3f97934ca +403, 0x54ccc30987778ec2 +404, 0x93449ec1e1469558 +405, 0xe2ff369eb0c6836 +406, 0x41c2df2d63bf8e55 +407, 0xf9302629b6c71be2 +408, 0xdd30376b8e5ab29a +409, 0x12db9e04f911d754 +410, 0x8d03d6cd359f1b97 +411, 0xe15956511abf1cee +412, 0x9b68e10e2c2fd940 +413, 0x2e28de6491c1ce53 +414, 0x52b329b72d0c109d +415, 0xc2c0b115f9da2a60 +416, 0x6ca084105271bbff +417, 0x49b92b8676058c1e +418, 0x767fc92a70f7e5a3 +419, 0x87ba4ed4b65a6aa0 +420, 0xf70b052e0a3975e9 +421, 0x3e925c3306db9eec +422, 0x43253f1d96ac9513 +423, 0xe3e04f1a1ea454c4 +424, 0x763e3f4cc81ba0c8 +425, 0x2a2721ac69265705 +426, 0xdf3b0ac6416ea214 +427, 0xa6a6b57450f3e000 +428, 0xc3d3b1ac7dbfe6ac +429, 0xb66e5e6f7d2e4ec0 +430, 0x43c65296f98f0f04 +431, 0xdb0f6e3ff974d842 +432, 0x3d6b48e02ebb203b +433, 0xd74674ebf09d8f27 +434, 0xbe65243c58fc1200 +435, 0x55eb210a68d42625 +436, 0x87badab097dbe883 +437, 0xada3fda85a53824f +438, 0xef2791e8f48cd37a +439, 0x3fe7fceb927a641a +440, 0xd3bffd3ff031ac78 +441, 0xb94efe03da4d18fb +442, 0x162a0ad8da65ea68 +443, 0x300f234ef5b7e4a6 +444, 0xa2a8b4c77024e4fb +445, 0x5950f095ddd7b109 +446, 0xded66dd2b1bb02ba +447, 0x8ec24b7fa509bcb6 +448, 0x9bede53d924bdad6 +449, 0xa9c3f46423be1930 +450, 0x6dfc90597f8de8b4 +451, 0xb7419ebc65b434f0 +452, 0xa6596949238f58b9 +453, 0x966cbade640829b8 +454, 0x58c74877bdcbf65e +455, 0xaa103b8f89b0c453 +456, 0x219f0a86e41179a4 +457, 0x90f534fc06ddc57f +458, 0x8db7cdd644f1affa +459, 0x38f91de0167127ac +460, 0xdcd2a65e4df43daa +461, 0x3e04f34a7e01f834 +462, 0x5b237eea68007768 +463, 0x7ff4d2b015921768 +464, 0xf786b286549d3d51 +465, 0xaefa053fc2c3884c +466, 0x8e6a8ff381515d36 +467, 0x35b94f3d0a1fce3c +468, 0x165266d19e9abb64 +469, 0x1deb5caa5f9d8076 +470, 0x13ab91290c7cfe9d +471, 0x3651ca9856be3e05 +472, 0xe7b705f6e9cccc19 +473, 0xd6e7f79668c127ed +474, 0xa9faf37154896f92 +475, 0x89fbf190603e0ab1 +476, 0xb34d155a86f942d0 +477, 0xb2d4400a78bfdd76 +478, 0x7c0946aca8cfb3f0 +479, 0x7492771591c9d0e8 +480, 0xd084d95c5ca2eb28 +481, 0xb18d12bd3a6023e +482, 0xea217ed7b864d80b +483, 0xe52f69a755dd5c6f +484, 0x127133993d81c4aa +485, 0xe07188fcf1670bfb +486, 0x178fbfe668e4661d +487, 0x1c9ee14bb0cda154 +488, 0x8d043b96b6668f98 +489, 0xbc858986ec96ca2b +490, 0x7660f779d528b6b7 +491, 0xd448c6a1f74ae1d3 +492, 0x178e122cfc2a6862 +493, 0x236f000abaf2d23b +494, 0x171b27f3f0921915 +495, 0x4c3ff07652f50a70 +496, 0x18663e5e7d3a66ca +497, 0xb38c97946c750cc9 +498, 0xc5031aae6f78f909 +499, 0x4d1514e2925e95c1 +500, 0x4c2184a741dabfbb +501, 0xfd410364edf77182 +502, 0xc228157f863ee873 +503, 0x9856fdc735cc09fc +504, 0x660496cd1e41d60e +505, 0x2edf1d7e01954c32 +506, 0xd32e94639bdd98cf +507, 0x8e153f48709a77d +508, 0x89357f332d2d6561 +509, 0x1840d512c97085e6 +510, 0x2f18d035c9e26a85 +511, 0x77b88b1448b26d5b +512, 0xc1ca6ef4cdae0799 +513, 0xcc203f9e4508165f +514, 0xeaf762fbc9e0cbbe +515, 0xc070c687f3c4a290 +516, 0xd49ed321068d5c15 +517, 0x84a55eec17ee64ee +518, 0x4d8ee685298a8871 +519, 0x9ff5f17d7e029793 +520, 0x791d7d0d62e46302 +521, 0xab218b9114e22bc6 +522, 0x4902b7ab3f7119a7 +523, 0x694930f2e29b049e +524, 0x1a3c90650848999f +525, 0x79f1b9d8499c932b +526, 0xfacb6d3d55e3c92f +527, 0x8fd8b4f25a5da9f5 +528, 0xd037dcc3a7e62ae7 +529, 0xfecf57300d8f84f4 +530, 0x32079b1e1dc12d48 +531, 0xe5f8f1e62b288f54 +532, 0x97feba3a9c108894 +533, 0xd279a51e1899a9a0 +534, 0xd68eea8e8e363fa8 +535, 0x7394cf2deeca9386 +536, 0x5f70b0c80f1dbf10 +537, 0x8d646916ed40462 +538, 0xd253bb1c8a12bbb6 +539, 0x38f399a821fbd73e +540, 0x947523a26333ac90 +541, 0xb52e90affbc52a37 +542, 0xcf899cd964654da4 +543, 0xdf66ae9cca8d99e7 +544, 0x6051478e57c21b6a +545, 0xffa7dc975af3c1da +546, 0x195c7bff2d1a8f5 +547, 0x64f12b6575cf984d +548, 0x536034cb842cf9e1 +549, 0x180f247ce5bbfad +550, 0x8ced45081b134867 +551, 0x532bbfdf426710f3 +552, 0x4747933e74c4f54d +553, 0x197a890dc4793401 +554, 0x76c7cc2bd42fae2 +555, 0xdabfd67f69675dd0 +556, 0x85c690a68cdb3197 +557, 0xe482cec89ce8f92 +558, 0x20bc9fb7797011b1 +559, 0x76dc85a2185782ad +560, 0x3df37c164422117a +561, 0x99211f5d231e0ab0 +562, 0xef7fd794a0a91f4 +563, 0x419577151915f5fe +564, 0x3ce14a0a7135dae3 +565, 0x389b57598a075d6a +566, 0x8cc2a9d51b5af9aa +567, 0xe80a9beffbd13f13 +568, 0x65e96b22ea8a54d8 +569, 0x79f38c4164138ede +570, 0xd1955846cba03d81 +571, 0x60359fe58e4f26d6 +572, 0x4ea724f585f8d13e +573, 0x316dfdbadc801a3c +574, 0x20aa29b7c6dd66fe +575, 0x65eaf83a6a008caa +576, 0x407000aff1b9e8cb +577, 0xb4d49bfb2b268c40 +578, 0xd4e6fe8a7a0f14a9 +579, 0xe34afef924e8f58e +580, 0xe377b0c891844824 +581, 0x29c2e20c112d30c8 +582, 0x906aad1fe0c18a95 +583, 0x308385f0efbb6474 +584, 0xf23900481bf70445 +585, 0xfdfe3ade7f937a55 +586, 0xf37aae71c33c4f97 +587, 0x1c81e3775a8bed85 +588, 0x7eb5013882ce35ea +589, 0x37a1c1692495818d +590, 0x3f90ae118622a0ba +591, 0x58e4fe6fea29b037 +592, 0xd10ff1d269808825 +593, 0xbce30edb60c21bba +594, 0x123732329afd6fee +595, 0x429b4059f797d840 +596, 0x421166568a8c4be1 +597, 0x88f895c424c1bd7f +598, 0x2adaf7a7b9f781cb +599, 0xa425644b26cb698 +600, 0x8cc44d2486cc5743 +601, 0xdb9f357a33abf6ba +602, 0x1a57c4ea77a4d70c +603, 0x1dea29be75239e44 +604, 0x463141a137121a06 +605, 0x8fecfbbe0b8a9517 +606, 0x92c83984b3566123 +607, 0x3b1c69180ed28665 +608, 0x14a6073425ea8717 +609, 0x71f4c2b3283238d7 +610, 0xb3d491e3152f19f +611, 0x3a0ba3a11ebac5d2 +612, 0xddb4d1dd4c0f54ac +613, 0xdb8f36fe02414035 +614, 0x1cf5df5031b1902c +615, 0x23a20ed12ef95870 +616, 0xf113e573b2dedcbb +617, 0x308e2395cde0a9fa +618, 0xd377a22581c3a7da +619, 0xe0ced97a947a66fb +620, 0xe44f4de9cd754b00 +621, 0x2344943337d9d1bf +622, 0x4b5ae5e2ea6e749c +623, 0x9b8d2e3ef41d1c01 +624, 0x59a5a53ebbd24c6b +625, 0x4f7611bf9e8a06fb +626, 0xea38c7b61361cd06 +627, 0xf125a2bfdd2c0c7 +628, 0x2df8dcb5926b9ebb +629, 0x233e18720cc56988 +630, 0x974c61379b4aa95e +631, 0xc7fe24c1c868910b +632, 0x818fd1affc82a842 +633, 0xcee92a952a26d38e +634, 0x8962f575ebcbf43 +635, 0x7770687e3678c460 +636, 0xdfb1db4ed1298117 +637, 0xb9db54cb03d434d3 +638, 0x34aebbf2244257ad +639, 0xd836db0cb210c490 +640, 0x935daed7138957cd +641, 0x3cd914b14e7948fd +642, 0xd0472e9ed0a0f7f0 +643, 0xa9df33dca697f75e +644, 0x15e9ea259398721a +645, 0x23eeba0f970abd60 +646, 0x2217fdf8bbe99a12 +647, 0x5ea490a95717b198 +648, 0xf4e2bfc28280b639 +649, 0x9d19916072d6f05c +650, 0x5e0387cab1734c6a +651, 0x93c2c8ac26e5f01e +652, 0xb0d934354d957eb1 +653, 0xee5099a1eef3188c +654, 0x8be0abca8edc1115 +655, 0x989a60845dbf5aa3 +656, 0x181c7ed964eee892 +657, 0x49838ea07481288d +658, 0x17dbc75d66116b2e +659, 0xa4cafb7a87c0117e +660, 0xab2d0ae44cdc2e6e +661, 0xdf802f2457e7da6 +662, 0x4b966c4b9187e124 +663, 0x62de9db6f4811e1a +664, 0x1e20485968bc62 +665, 0xe9ac288265caca94 +666, 0xc5c694d349aa8c1a +667, 0x3d67f2083d9bdf10 +668, 0x9a2468e503085486 +669, 0x9d6acd3dc152d1a3 +670, 0xca951e2aeee8df77 +671, 0x2707371af9cdd7b0 +672, 0x2347ae6a4eb5ecbd +673, 0x16abe5582cb426f +674, 0x523af4ff980bbccb +675, 0xb07a0f043e3694aa +676, 0x14d7c3da81b2de7 +677, 0xf471f1b8ac22305b +678, 0xdb087ffff9e18520 +679, 0x1a352db3574359e8 +680, 0x48d5431502cc7476 +681, 0x7c9b7e7003dfd1bf +682, 0x4f43a48aae987169 +683, 0x9a5d3eb66dedb3e9 +684, 0xa7b331af76a9f817 +685, 0xba440154b118ab2d +686, 0x64d22344ce24c9c6 +687, 0xa22377bd52bd043 +688, 0x9dfa1bb18ca6c5f7 +689, 0xdccf44a92f644c8b +690, 0xf623d0a49fd18145 +691, 0x556d5c37978e28b3 +692, 0xad96e32ce9d2bb8b +693, 0x2e479c120be52798 +694, 0x7501cf871af7b2f7 +695, 0xd02536a5d026a5b8 +696, 0x4b37ff53e76ab5a4 +697, 0xdb3a4039caaeab13 +698, 0x6cbd65e3b700c7be +699, 0x7367abd98761a147 +700, 0xf4f9ba216a35aa77 +701, 0xf88ca25ce921eb86 +702, 0xb211de082ec2cbf2 +703, 0xdd94aa46ec57e12e +704, 0xa967d74ad8210240 +705, 0xdaa1fada8cfa887 +706, 0x85901d081c4488ee +707, 0xcf67f79a699ef06 +708, 0x7f2f1f0de921ee14 +709, 0x28bc61e9d3f2328b +710, 0x3332f2963faf18e5 +711, 0x4167ac71fcf43a6 +712, 0x843c1746b0160b74 +713, 0xd9be80070c578a5e +714, 0xbd7250c9af1473e7 +715, 0x43f78afaa3647899 +716, 0x91c6b5dd715a75a5 +717, 0x29cc66c8a07bfef3 +718, 0x3f5c667311dc22be +719, 0x4f49cd47958260cd +720, 0xbef8be43d920b64e +721, 0x7a892a5f13061d8b +722, 0x9532f40125c819b1 +723, 0x924fca3045f8a564 +724, 0x9b2c6442453b0c20 +725, 0x7e21009085b8e793 +726, 0x9b98c17e17af59d2 +727, 0xba61acb73e3ae89a +728, 0xb9d61a710555c138 +729, 0xc2a425d80978974b +730, 0xa275e13592da7d67 +731, 0xe962103202d9ad0f +732, 0xbdf8367a4d6f33fd +733, 0xe59beb2f8648bdc8 +734, 0xb4c387d8fbc4ac1c +735, 0x5e3f276b63054b75 +736, 0xf27e616aa54d8464 +737, 0x3f271661d1cd7426 +738, 0x43a69dbee7502c78 +739, 0x8066fcea6df059a1 +740, 0x3c10f19409bdc993 +741, 0x6ba6f43fb21f23e0 +742, 0x9e182d70a5bccf09 +743, 0x1520783d2a63a199 +744, 0xba1dcc0c70b9cace +745, 0x1009e1e9b1032d8 +746, 0xf632f6a95fb0315 +747, 0x48e711c7114cbfff +748, 0xef281dcec67debf7 +749, 0x33789894d6abf59b +750, 0x6c8e541fffbe7f9c +751, 0x85417f13b08e0a88 +752, 0x9a581e36d589608f +753, 0x461dca50b1befd35 +754, 0x5a3231680dde6462 +755, 0xcc57acf729780b97 +756, 0x50301efef62e1054 +757, 0x675d042cd4f6bbc9 +758, 0x1652fdd3794384c9 +759, 0x1c93bbeeb763cd4d +760, 0x44b7240c4b105242 +761, 0x4c6af2a1b606ccfb +762, 0x18fc43ece2ec1a40 +763, 0x859a5511aeae8acb +764, 0x2f56826f1996ad2f +765, 0xa8e95ce8bb363bdf +766, 0xf4da396054e50e4b +767, 0x5493865e9895883c +768, 0x768e4c8b332ac0e3 +769, 0x32195d2aa583fca5 +770, 0xf2f353f21266bc15 +771, 0x43cddf1d021307d +772, 0x6031e3aa30300e4a +773, 0x4f1298469ac6088f +774, 0x4b4d450bafac574e +775, 0x23e1cf9c0582a22b +776, 0x2e9036980db49cd0 +777, 0xe4e228b113c411b2 +778, 0x8bddcdb82b51706 +779, 0xd2a7ea8288593629 +780, 0x67fe90e98fdda61 +781, 0x7b63494dba95717b +782, 0x105625904510d782 +783, 0xdf4aa2242454e50a +784, 0x32541d6cd7d6c7e3 +785, 0x5661fb432591cf3b +786, 0xce920a5ed047bce7 +787, 0xed4178a3c96eea8f +788, 0xe378cd996e39863b +789, 0x169e1fdc8e2b05e1 +790, 0xaee1812ef7149a96 +791, 0x648571c7453d12c5 +792, 0xb7b6bc9328573c43 +793, 0xe7fb969078e270d7 +794, 0xdfc2b1b8985f6e6f +795, 0x862b6527ee39a1aa +796, 0x1ee329aea91d7882 +797, 0x20d25324f2fe704 +798, 0xbfcc47401fc3bbfd +799, 0x1515cdc8d48b2904 +800, 0xbd6eefe86284261c +801, 0x9b1f28e3b35f22ee +802, 0x842a29d35e5aecda +803, 0xf2346109ad370765 +804, 0x24d68add5a71afd9 +805, 0x4a691421613d91e2 +806, 0x60e3058b3c244051 +807, 0x79194905cdaa5de8 +808, 0xe0e2df35c01e8987 +809, 0xe29b78beffbb5e4a +810, 0xcdcdbc020218c19e +811, 0x5ae0af8c16feae43 +812, 0x8109292feeaf14fa +813, 0x34113f7508dfa521 +814, 0xc062ac163f56730a +815, 0xf1660e66ec6d4c4c +816, 0x5966c55f60151c80 +817, 0x3865ae8ec934b17 +818, 0x472a7314afb055ec +819, 0x7a24277309a44a44 +820, 0x556e02dd35d38baa +821, 0x9849611a1bc96ec1 +822, 0xd176f5d5a8eb0843 +823, 0x44db12ec60510030 +824, 0x272e3a06a0030078 +825, 0x7c4764dbefc075ea +826, 0x910712f3735c1183 +827, 0xd49a2da74ae7aff6 +828, 0xcf9b3e6e8f776d71 +829, 0x27789fe3ec481a02 +830, 0x86659f82c6b5912b +831, 0xe044b3dbf339158c +832, 0x99d81f6bb62a37b0 +833, 0x5f5830c246fada9a +834, 0xe68abab1eeb432cb +835, 0x49c5c5ace04e104 +836, 0x1ac3871b3fc6771b +837, 0x773b39f32d070652 +838, 0x9c4138c2ae58b1f3 +839, 0xac41c63d7452ac60 +840, 0x9248826b245359e1 +841, 0x99bba1c7a64f1670 +842, 0xe0dc99ff4ebb92f2 +843, 0x113638652740f87c +844, 0xebf51e94da88cfc +845, 0x5441c344b81b2585 +846, 0xe1e69e0bc2de652a +847, 0xe9ab6d64ae42ed1e +848, 0x879af8730e305f31 +849, 0x36b9ad912c7e00d6 +850, 0x83ef5e9fca853886 +851, 0xda54d48bb20ea974 +852, 0x32c6d93aefa92aa2 +853, 0x4e887b2c3391847d +854, 0x50966e815f42b1b8 +855, 0x53411ac087832837 +856, 0x46f64fef79df4f29 +857, 0xb34aae3924cd272c +858, 0xf5ad455869a0adbe +859, 0x8351ded7144edac8 +860, 0xeb558af089677494 +861, 0x36ed71d69293a8d6 +862, 0x659f90bf5431b254 +863, 0x53349102b7519949 +864, 0x3db83e20b1713610 +865, 0x6d63f96090556254 +866, 0x4cc0467e8f45c645 +867, 0xb8840c4bd5cd4091 +868, 0xbd381463cc93d584 +869, 0x203410d878c2066d +870, 0x2ebea06213cf71c8 +871, 0x598e8fb75e3fceb4 +872, 0xdcca41ceba0fce02 +873, 0x61bf69212b56aae5 +874, 0x97eed7f70c9114fa +875, 0xf46f37a8b7a063f9 +876, 0x66c8f4ffe5bd6efa +877, 0xe43fd6efda2d4e32 +878, 0x12d6c799e5ad01de +879, 0x9ac83e7f8b709360 +880, 0xbbb7bb3c1957513d +881, 0x7f87c08d4b3796b0 +882, 0x9a7d1d74b6aa4a5c +883, 0xa4314530ff741b6f +884, 0x99a80c6b6f15fca8 +885, 0xd2fec81d6d5fc3ce +886, 0x15a98be1cc40cea +887, 0x98693eb7719366f3 +888, 0x36ccdc2a9e9d4de8 +889, 0x3c8208f63d77df25 +890, 0xca2e376e2343df6 +891, 0xcc9b17cbb54420c6 +892, 0x8724c44a64d7dcb8 +893, 0x9d00c6949ff33869 +894, 0xf4f8e584d2699372 +895, 0x88f4748cdd5a2d53 +896, 0xe215072a1205bc6d +897, 0x190934fe6d740442 +898, 0x7fac5c0ab2af106d +899, 0x1b86633a0bd84fa1 +900, 0x1293e54318492dfb +901, 0x433324fd390f34b9 +902, 0x4c5eb2c67a44643b +903, 0x59a6e281c388b0dd +904, 0xe78e03f9c44623b7 +905, 0x91307a93c768fc3d +906, 0xde8867b004d8e3ff +907, 0xdf52c3f57b7c5862 +908, 0x993f3e1d10358a92 +909, 0x9ccb10bc3e18662d +910, 0x45093ce48a114c73 +911, 0xd59d05979d26330a +912, 0x417c0e03300119a9 +913, 0x1c336500f90cde81 +914, 0x1c8ccd29ead9b85b +915, 0xb76baf3e55d4d950 +916, 0x133ad6196c75fd7e +917, 0x34200b0cde7ed560 +918, 0x9c7c3dacb213c8d9 +919, 0xd97563c4fd9bf1b6 +920, 0x5d910e871835b6cb +921, 0x7d46c4733a16bdf9 +922, 0xe41d73194ddc87b2 +923, 0x7d3d8a0855a465a9 +924, 0x70c2a8b5d3f90c0f +925, 0x9e7565ca5dccfe12 +926, 0x2c0acb4577aa51b1 +927, 0x3d2cd211145b79c7 +928, 0x15a7b17aa6da7732 +929, 0xab44a3730c27d780 +930, 0xf008bd6c802bde3a +931, 0x82ed86ddf3619f77 +932, 0xaabe982ab15c49f9 +933, 0x9bcad8fa6d8e58a4 +934, 0x8f39ed8243718aa1 +935, 0xe9489340e03e3cb6 +936, 0xc722314f5eefb8d0 +937, 0x870e8869a436df59 +938, 0x4dae75b8087a8204 +939, 0xe1d790f6ec6e425b +940, 0xafd39ea1b1d0ed09 +941, 0xdf2c99e464ddf08f +942, 0x74936d859ab9644d +943, 0x3871302164250e73 +944, 0x764b68921e911886 +945, 0x2a1d024b26bb9d66 +946, 0x797fba43918e75b4 +947, 0x62ec6d24ccca335b +948, 0xf4bd8b951762b520 +949, 0x9d450dede9119397 +950, 0x5393a26d10f8c124 +951, 0x6b74769392896b57 +952, 0x7f61dbcc0e328581 +953, 0x64e1df3884d0d94 +954, 0xba77dcdf23738c37 +955, 0xf8e288bc0a177475 +956, 0x4a8abfd1702ecb7d +957, 0x53f22886694736a7 +958, 0x8fc982597ced3e3 +959, 0x1bc46090f820fff7 +960, 0x8bd31f965d02229f +961, 0x65cd0cb29996ee53 +962, 0x702e0f4fcf8c2e9f +963, 0x293b77bff307a9a0 +964, 0x125a986b8b305788 +965, 0x416b0eea428ebf3c +966, 0xeac85421ab0e8469 +967, 0x7f5496095019aa68 +968, 0x1a96d7afbc708e0 +969, 0xb91262e6766e01e1 +970, 0xd0a549cc4ccc6954 +971, 0x75a9a073f50c8a0d +972, 0xae275d2c1c6cd23c +973, 0xcf159b5ec5d28fd4 +974, 0x75d0838ce9b92b +975, 0xd4eddcee6dc4677f +976, 0x6a0a8ad5df6b75b8 +977, 0x6f3fd0ef0f13ecc4 +978, 0xb75a5826c1a8f8a8 +979, 0xd47098bbc7943766 +980, 0x3d4ddd62d5f23dd1 +981, 0x760a904e4583841c +982, 0x2afeb5022b4cf1f +983, 0x66d5f653729f0a13 +984, 0x9a6a5ab62980d30f +985, 0xc332f5643bbf8d5b +986, 0x848fb702e4056a90 +987, 0xa057beaf3f9e8c5f +988, 0x6cc603e4560a6c6a +989, 0xec761811a7b23211 +990, 0xb14aa4090a82aaa5 +991, 0xe29d9d028a5b2dbb +992, 0x5564e53738d68f97 +993, 0xfabca36542eaaf3b +994, 0xb9912fcb782020a2 +995, 0xe865e01b349284fd +996, 0x540b5ff11c5f9274 +997, 0x3463f64e1e7451dc +998, 0xe15d3e2f33b735f8 +999, 0xf5433336eadef6e diff --git a/.venv/lib/python3.12/site-packages/numpy/random/tests/data/sfc64-testset-2.csv b/.venv/lib/python3.12/site-packages/numpy/random/tests/data/sfc64-testset-2.csv new file mode 100644 index 0000000..70aebd5 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/random/tests/data/sfc64-testset-2.csv @@ -0,0 +1,1001 @@ +seed, 0x0 +0, 0x91959e5fb96a6332 +1, 0x3c1dd8a25a7e9f21 +2, 0x657bdffc99798d9e +3, 0x1a04de320b19e022 +4, 0x65b92af0e5f3c61c +5, 0x9c84070ce8f743c0 +6, 0xbb10e573693cdb25 +7, 0xd65ea9e76b37fb6b +8, 0x503efd0e76c8ae66 +9, 0xd711dcd04c26d0f +10, 0x12f53f435814ac8c +11, 0xb392cd402cfc82bd +12, 0x461764550e06c889 +13, 0x716a48b3514e6979 +14, 0xdd0a322213c18ad7 +15, 0x6673a8ca0a05c4d7 +16, 0x2992ef333437f844 +17, 0xc4aaf7e8240b2aad +18, 0x6ab0a1af1f41474f +19, 0xb0bae400c226941d +20, 0xe5f80c2eeeab48c6 +21, 0x3832c6a93a4024bf +22, 0x280bd824fabe8368 +23, 0x66b626228321e5ff +24, 0xe0bdfba5325a307e +25, 0x3a5f65c6ef254e05 +26, 0x99ea12503cb02f94 +27, 0x5d01fd2db77d420b +28, 0x6959bf5f36b2368d +29, 0xd856e30c62b5f5be +30, 0xe33233e1d8140e66 +31, 0xb78be619d415fa8d +32, 0x4f943bb2cc63d3b +33, 0x9b1460b290952d81 +34, 0x19205d794826740e +35, 0x64617bd9d7a6a1ff +36, 0x30442124b55ea76a +37, 0xebbbc3b29d0333fc +38, 0x39235a0fe359751c +39, 0xf9629768891121aa +40, 0x32052f53f366e05a +41, 0x60cc5b412c925bc8 +42, 0xf8b7ecda1c0e5a9 +43, 0x195f036e170a2568 +44, 0xfe06d0381a9ca782 +45, 0x919d89e8b88eebbf +46, 0xa47fb30148cf0d43 +47, 0x5c983e99d5f9fd56 +48, 0xe7492cdb6a1d42cd +49, 0xf9cfe5c865b0cfd8 +50, 0x35b653367bbc3b99 +51, 0xb1d92f6f4d4e440b +52, 0x737e1d5bd87ed9c0 +53, 0x7a880ca1498f8e17 +54, 0x687dae8494f9a3f7 +55, 0x6bae1989f441d5d7 +56, 0x71ad3fa5a9195c2e +57, 0x16b3969779f5d03 +58, 0xd1bce2ac973f15b3 +59, 0xa114b1ee2ce0dcdd +60, 0x270d75c11eb1b8d5 +61, 0xc48ffa087c0a7bc +62, 0xaaf9dc48cda9848d +63, 0x8111cf10ef6e584d +64, 0x6736df6af40ee6f4 +65, 0x1a1a111682fbf98d +66, 0xeb217658e1cb3b5d +67, 0xcaf58a8b79de9dec +68, 0x25d0ffd63c88d7a1 +69, 0x4c498cd871b7f176 +70, 0x4069a6156eb0cf3c +71, 0xdf012f12edcdd867 +72, 0x7734c0ac8edb1689 +73, 0xed6960ac53dbc245 +74, 0x305e20da8868c661 +75, 0x5f0c7a3719956f95 +76, 0x66842bbe3b28895 +77, 0xb608bc9a31eac410 +78, 0xfcb17d5529503abd +79, 0x829ae5cbc29b92ee +80, 0x17f2f0027bc24f3a +81, 0x435926c33d8f44cc +82, 0x3ab899327098dbec +83, 0xaf78573b27f8ead8 +84, 0xa8b334fabcf8dc60 +85, 0xcdf3b366a6a303db +86, 0x8da9379dd62b34c8 +87, 0xb0ba511955f264a7 +88, 0x9d72e21a644f961d +89, 0xfac28382e2e7e710 +90, 0xd457065f048410aa +91, 0x1cae57d952563969 +92, 0x5a160a6223253e03 +93, 0x2c45df736d73c8bd +94, 0x7f651ebc6ad9cec5 +95, 0x77a6be96c7d2e7e7 +96, 0x1721fb1dbfd6546a +97, 0xf73f433ecff3c997 +98, 0xed1e80f680965bfe +99, 0x6705ad67a3003b30 +100, 0xac21134efcadb9f7 +101, 0x4d2ba0a91d456ac +102, 0x59da7b59434eb52b +103, 0x26c1d070fd414b5f +104, 0xed7079ddfce83d9a +105, 0x9277d21f88e0fb7a +106, 0xfae16b9a8d53d282 +107, 0xb08a0e2e405fdf7d +108, 0x2ea20df44229d6ec +109, 0x80e4634cd3612825 +110, 0xbe62e8aeba8f8a1a +111, 0x4981209769c190fb +112, 0xcec96ef14c7e1f65 +113, 0x73fe4457b47e7b53 +114, 0x1d66300677315c31 +115, 0xe26821290498c4cc +116, 0xf6110248fd8fb1c5 +117, 0x30fd7fe32dbd8be3 +118, 0x534ec9b910a2bd72 +119, 0x8f9bfe878bbf7382 +120, 0x4f4eb5295c0c2193 +121, 0xdeb22f03a913be9e +122, 0x40f716f8e2a8886c +123, 0xc65007d0e386cdb1 +124, 0x9bdd26d92b143a14 +125, 0xf644b0b77ea44625 +126, 0x75f5a53f6b01993a +127, 0xfe803e347bf41010 +128, 0x594bff5fa17bc360 +129, 0x3551edfb450373c7 +130, 0x898f9dad433615db +131, 0x923d2406daa26d49 +132, 0x99e07faccbc33426 +133, 0x7389f9ff4470f807 +134, 0xdc2a25957c6df90b +135, 0x33c6d8965ef3053f +136, 0x51a8f07e838f1ab +137, 0x91c5db369380274f +138, 0xc37de65ac56b207e +139, 0xfcc6d2375dde7f14 +140, 0xa4e6418bff505958 +141, 0x4b8b9f78e46953c4 +142, 0x255ab2e0f93cf278 +143, 0xdf650717af3d96ef +144, 0x2caa21cba3aae2b2 +145, 0xce7e46c6f393daa4 +146, 0x1d5b3573f9997ac7 +147, 0x5280c556e850847d +148, 0x32edc31bef920ad7 +149, 0xefaa6b0b08cf2c6 +150, 0x5151c99d97b111c5 +151, 0x35ccf4bf53d17590 +152, 0xa210d7bd8697b385 +153, 0xa9419f95738fbe61 +154, 0xdeccf93a1a4fdc90 +155, 0xd0ea3365b18e7a05 +156, 0x84122df6dcd31b9a +157, 0x33040a2125cea5f5 +158, 0xfe18306a862f6d86 +159, 0xdb97c8392e5c4457 +160, 0xc3e0fa735e80e422 +161, 0x7d106ff36467a0c1 +162, 0xb9825eecc720a76d +163, 0x7fefc6f771647081 +164, 0xf5df3f5b3977bf13 +165, 0x18fb22736d36f1e0 +166, 0xadc4637b4953abfc +167, 0x174e66d3e17974bd +168, 0xf1614c51df4db5db +169, 0x6664ecde5717b293 +170, 0xd5bc5b6839265c26 +171, 0xf6ca9ce1af3f1832 +172, 0xca696789a9d506ea +173, 0x7399c246c8f9d53 +174, 0xadf49049626417e2 +175, 0xbcd84af37d09ab91 +176, 0xbb41c177f3a3fa45 +177, 0x592becc814d55302 +178, 0xa88b4e65f6cfe5f7 +179, 0xa0a55e34ff879426 +180, 0x3c2ea6aa725b42b7 +181, 0x65ac4a407b1f9521 +182, 0xde63d53f7e88b556 +183, 0x18bc76696d015f40 +184, 0xd1363f2cd4c116a8 +185, 0x2fe859be19a48e4a +186, 0x83d6099b1415e656 +187, 0x43f2cbc1a4ee6410 +188, 0xb2eca3d3421c533d +189, 0xc52b98ea3f031f5d +190, 0xfe57eb01da07e9d1 +191, 0xf9377883537a6031 +192, 0x364030c05dac7add +193, 0x6815cb06b35d4404 +194, 0xceae2d4ce31894be +195, 0xc602bcdf6062bf6a +196, 0xc8e4bd8dcc6062e3 +197, 0x9c29e87b92a1a791 +198, 0x41e626b871ca9651 +199, 0x325c3d1fb8efbcd8 +200, 0x7dbbacf8e3419fb3 +201, 0x3602e72516bb7319 +202, 0x537a008ebd94d24b +203, 0xda7714fc9d4d161d +204, 0x1c8c73700e1b621b +205, 0x2749b80937d6c939 +206, 0x76ee6abac5b14d33 +207, 0xf18d1e92cb6a8b5c +208, 0x6ce9579d9291c721 +209, 0x60523c745a40e58 +210, 0x637f837fcc901757 +211, 0x2ff71b19661dc5b3 +212, 0x393ab586326ad16f +213, 0xa0970ea30fe742b7 +214, 0x570222d7f27fe5ae +215, 0x3b5806d43fd38629 +216, 0x129a0ad7420180c5 +217, 0x1c4726355778d52c +218, 0x7c1459cf77656499 +219, 0xfe038a0932132069 +220, 0x4c4cc317a937483a +221, 0xa333d24067e926ba +222, 0x401d9b6ab37f6ef2 +223, 0x87ad0e491ebe4a2a +224, 0xfc02f312e72d121d +225, 0xfde715b3b99767b2 +226, 0xd111c342ba521c92 +227, 0x83b221b10879c617 +228, 0x6a1bf5c01fdf4277 +229, 0x166bfc0c3f5892ee +230, 0x4608d556d7c57856 +231, 0x8d786857c95ece49 +232, 0x2d357445a1aca4ac +233, 0x79620dae28ecd796 +234, 0x90e715dc0f2201c4 +235, 0x173b68b4c9f4b665 +236, 0x4e14d040ebac4eef +237, 0xbd25960b4b892e +238, 0x911a199db6f1989d +239, 0xfe822d7c601fd2e0 +240, 0x9b4c1d58d8223a69 +241, 0x907c1891283843b0 +242, 0xf4868bf54061c4b2 +243, 0x17f8cd1fc24efd85 +244, 0xd44253f9af14c3aa +245, 0x16d0da0cb911d43c +246, 0x3c6a46615828e79a +247, 0x498591c1138e11a5 +248, 0xcc0f26336d0d6141 +249, 0x4d3ebc873212309a +250, 0x16bad7792d5c2c6a +251, 0x474215a80b2bbd11 +252, 0x7159848abd8492fc +253, 0x359341c50973685f +254, 0x27512ee7bf784a4a +255, 0x45228ea080f70447 +256, 0x880cab616500d50e +257, 0x12fae93f9830d56e +258, 0x6744ee64348d9acd +259, 0x484dada28cd2a828 +260, 0x98491d0729e41863 +261, 0x2f15aac43c2863b0 +262, 0x5727a34d77a1da0f +263, 0xa435cebef6a62eed +264, 0xd211697d57b053b0 +265, 0x65aa757b68bd557 +266, 0xe3a1b7a2d8a3e06a +267, 0x2adf64e67252a7a9 +268, 0xadadcb75cadee276 +269, 0x7934bc57ac8d97bf +270, 0xccff0d0f412e0606 +271, 0x101a82aa3e8f3db9 +272, 0xb0f2498094b4575c +273, 0xba2561d9ef26ed8a +274, 0xfbcd1268fc3febe1 +275, 0x9aa10bb19eb152e0 +276, 0xf496217a601a6d72 +277, 0xe4be1e4f2fa91363 +278, 0x473a602bf3dd68eb +279, 0xfe8ed2a48c26f4b5 +280, 0x20e94b1a00159476 +281, 0x93e1cb1c6af86ec7 +282, 0x4fcba3898f7442ba +283, 0x5150c3a3d94891df +284, 0x91cfce6c85b033ea +285, 0x625e8a832a806491 +286, 0x28c97ba72e3ec0b2 +287, 0x8e172de217c71ea1 +288, 0x926b80216c732639 +289, 0x28b19431a649ae3d +290, 0x57c039a6e95a3795 +291, 0xfbc354182fe52718 +292, 0x819dfd7c7d534cef +293, 0xabb4093a619ed44f +294, 0xe785b7ac6f656745 +295, 0xb647b4588b2f942f +296, 0x64cf870a14c72d27 +297, 0x6d4a4a2a0ba9b37e +298, 0x78bfb0427d7ce6b0 +299, 0x8dcc72b8bfc79ac6 +300, 0x1c14d915d5e76c99 +301, 0xaf48ddea6f096d79 +302, 0x51b39b67aa130d8 +303, 0x1aeeb39d4def06de +304, 0xd678092ffedfdd27 +305, 0x8f54787f325111d3 +306, 0xf2ca2e827beaa6bc +307, 0x339d134099e98545 +308, 0x1f6a8a7b33942e43 +309, 0x952c8065dbef669a +310, 0xe066aeb6690147f7 +311, 0xed25aa92cf58ebb6 +312, 0x7601edce215ef521 +313, 0xed1c5b396abd9434 +314, 0x4fd1e407535de9d5 +315, 0xccc8315a0d4d1441 +316, 0x85753e250bb86976 +317, 0xf232e469378761c3 +318, 0x81d691b8e9aef3c6 +319, 0x224a2f9cab0ad0e +320, 0x978f3d3e50007f4e +321, 0xd3713e6a6c0cbe60 +322, 0xcce8f1eadd41f80d +323, 0x34bda028a97d469 +324, 0x90e242fdf0f59183 +325, 0x4d749754fbc5f092 +326, 0x4399f5b7851cc87b +327, 0xcb921a5f25f6c5d7 +328, 0x120bf5d0162101 +329, 0x1304cc2aa352735a +330, 0xf7236c5d0d5d417b +331, 0xc31b320fc1654306 +332, 0xb468c6b23f3fb4e7 +333, 0xb5985b5bfaca4166 +334, 0x898285a1cd2f8375 +335, 0xa13493da372aa7c9 +336, 0x15c80c09c12634e7 +337, 0x9b765c5cc9d438bd +338, 0xee7da816a9201dcb +339, 0x92e269f73b5a248e +340, 0xa8086c5de81400ce +341, 0xe0053901853d42be +342, 0x821df32c012f433e +343, 0x17a6d69ca37387c7 +344, 0x2b10044bfba3501f +345, 0x8dfd262afc2e8515 +346, 0xd68c2c7b60226371 +347, 0xe81ac114e4416774 +348, 0x5896d60061ebc471 +349, 0xa996e3147811dbd1 +350, 0xa819c7b80ecb3661 +351, 0x982ad71b38afbc01 +352, 0xab152b65aa17b7fe +353, 0x4582bc282ef187ef +354, 0xab5a17fe8d9bc669 +355, 0x83664fa9cb0284b7 +356, 0x234c4b0091968f52 +357, 0x8ab5f51805688d37 +358, 0xe9e11186e0c53eda +359, 0x10df37ef1de2eccf +360, 0x780f1b0d52db968f +361, 0x50bd4ff292872cd5 +362, 0x51e681c265f5ad0 +363, 0x842c49660a527566 +364, 0x6e56ee026e9eda87 +365, 0x4cf39e40d8c80393 +366, 0x13e466df371f7e1f +367, 0xf2ce1799f38e028e +368, 0x833c8db7adc6ff0e +369, 0xc6e189abc2ec98f +370, 0xafebb3721283fec5 +371, 0xb49bc1eb5cc17bdc +372, 0xf1d02e818f5e4488 +373, 0xe5e9d5b41a1dd815 +374, 0xce8aca6573b1bfe5 +375, 0x9b0a5d70e268b1d5 +376, 0xf3c0503a8358f4de +377, 0x2681605dd755669d +378, 0xea265ca7601efc70 +379, 0xa93747f0a159439f +380, 0x62a86ede78a23e50 +381, 0xac8a18935c3d063c +382, 0x729c0a298f5059f5 +383, 0xbbf195e5b54399f4 +384, 0x38aa9d551f968900 +385, 0x3b3e700c58778caa +386, 0x68e6e33c4443957a +387, 0x7c56fc13eb269815 +388, 0xaf7daca39711804a +389, 0x50fde6d10f9544b3 +390, 0xf3d37159f6f6c03d +391, 0x82d298f5c1a71685 +392, 0x478661ac54c5002c +393, 0x6053768e1a324ae0 +394, 0xde8fb4a7e56707ea +395, 0xaa2809301faa8cf4 +396, 0x690a8d49fedd0722 +397, 0xe17c481b9c217de9 +398, 0x60d1d8a2b57288e3 +399, 0x149adfaadc6b0886 +400, 0xa3c18b6eb79cd5fa +401, 0x5774e3a091af5f58 +402, 0x2acca57ff30e5712 +403, 0x94454d67367c4b0c +404, 0x581b2985ac2df5ca +405, 0x71618e50744f3e70 +406, 0x270a7f3bd9a94ae6 +407, 0x3ef81af9bb36cd7b +408, 0x8a4a2592875254aa +409, 0x704ac6086fbb414a +410, 0xda774d5d3f57414d +411, 0xe20d3358b918ae9e +412, 0x934a6b9f7b91e247 +413, 0xf91649cde87ec42c +414, 0x248cec5f9b6ced30 +415, 0x56791809fd8d64ba +416, 0xf502b2765c1395f +417, 0x6b04ec973d75aa7f +418, 0xb0339f2794bb26f +419, 0x4c524636efbaea49 +420, 0x6bbf3876e9738748 +421, 0xf686524e754e9e24 +422, 0x8dafa05a42d19cd3 +423, 0xc5f069ab2434008e +424, 0x4fd64cc713cba76 +425, 0xdbf93450c881ed5f +426, 0x492e278ebabb59a2 +427, 0x993fddfde4542642 +428, 0xecde68a72c8d4e52 +429, 0xe0760b3074c311fd +430, 0x68dc0e7e06528707 +431, 0x52b50edf49c0fdc7 +432, 0xb2bd4185c138f412 +433, 0x431496d7e1d86f3 +434, 0xa4e605b037e26c44 +435, 0x58236ae1f0aca2b5 +436, 0x26c72c420fc314d8 +437, 0x20134e982ab99a2b +438, 0x544b59b8b211374b +439, 0x1301c42f3a14d993 +440, 0x52a6ea740f763b0f +441, 0xf209d70c2bebf119 +442, 0xac66a4ebc2aa1be +443, 0x683713ed35878788 +444, 0x2b5578acec06b80c +445, 0x86428efa11c45b36 +446, 0xb49010adb17d291e +447, 0x73b686bd8664b6be +448, 0x6d28ebf57b6884cc +449, 0x9712091230ff58d9 +450, 0xc9c91f74c38b286 +451, 0x776310ac41dc008e +452, 0x2f3739df0bf6a88e +453, 0x5792dc62b94db675 +454, 0x5715910d024b06af +455, 0xeb1dd745458da08 +456, 0xfce7b07ccfa851a7 +457, 0xc305f1e983ac368 +458, 0x485aa9519ac00bb0 +459, 0xa5354f6589fb0ea0 +460, 0x32fee02dfdbf4454 +461, 0x4d1ddc304bbefaaa +462, 0x789a270a1737e57e +463, 0x9f3072f4b1ed8156 +464, 0x4de3c00e89058120 +465, 0xb00a02529e0a86fa +466, 0x539f6f0edd845d9a +467, 0x85e578fe15a8c001 +468, 0xa12c8e1a72cce7d8 +469, 0xc6908abbc2b1828 +470, 0xcf70090774cbb38c +471, 0x3b636a6977b45d4a +472, 0xf0a731b220680b57 +473, 0x18973929f51443a8 +474, 0xe93e1fbe7eadabe +475, 0x8233730f0a6dfa02 +476, 0x66e50b6919b0ab74 +477, 0xb1aba87c97fd08a2 +478, 0xd4dffc1fbc117ad6 +479, 0x6f7fa65724b96e6a +480, 0x4bd5800dee92e0fa +481, 0xe18a959db6256da +482, 0xe53a291bc66df487 +483, 0xb7ec306a08651806 +484, 0x1847a6b80d2821e1 +485, 0xda50391283b14d39 +486, 0xacc4d3cd7cceb97a +487, 0x57f70185165b7bc6 +488, 0x302b6d597c3aaba7 +489, 0xa47f32d037eab51e +490, 0xe1509b4408abc559 +491, 0x4f30a1d7c2934157 +492, 0x2ad03e6c60b650b2 +493, 0x334d9c337b0a9064 +494, 0xc7f442821e7aac12 +495, 0xbcdeb09298694cdd +496, 0xe42402389f8f0fb4 +497, 0xe5de56af539df727 +498, 0x7017f9b2101ee240 +499, 0x1ee5e68d5b10001d +500, 0x436229051836387a +501, 0xcd532d6d6ec38fb7 +502, 0x30a66606fdf38272 +503, 0xfdaa2ab9cf798496 +504, 0x4277b4adec70e7df +505, 0x72cfc30256e0eaef +506, 0x3c3359fd9bd34917 +507, 0xb7aa89598856efb0 +508, 0xf72226f8bf299ef5 +509, 0x258c499275a4356f +510, 0x999a56bfc7f20d76 +511, 0x2b3e7432e20c18b +512, 0x2d1251332f760cb5 +513, 0x7420e0eea62157c5 +514, 0xe85c895aa27cec3d +515, 0x27a0545c7020d57c +516, 0xc68638a65b4fff0d +517, 0xfda473983a4ea747 +518, 0xd19fe65fb4c06062 +519, 0x6b1374e050ee15e4 +520, 0x80065ecd49bc4bef +521, 0x4ee655954bc838de +522, 0xe8fb777504a72299 +523, 0x86b652ea70f4bdde +524, 0xcdc9e0fbde7e4f33 +525, 0x352c0a50cd3ac56 +526, 0x4b8605d368be75dc +527, 0x1ac9ea8129efbc37 +528, 0x470325faa99f39c5 +529, 0x25dd7ef9adccf7a1 +530, 0x5ae2c7a03e965816 +531, 0xf733d2df59dacc7d +532, 0xa05bbf0a8a1a7a70 +533, 0xe8aa3f102846ef5f +534, 0xc9b85ec49ae71789 +535, 0xb904c14ed1cb1936 +536, 0x5ae618230b5f0444 +537, 0x97987fe47b5d7467 +538, 0xabb3aca8865ca761 +539, 0x38bfdf29d4508228 +540, 0x353654f408353330 +541, 0xeb7e92930ae4ef0d +542, 0xec50f1a7ca526b96 +543, 0xd5e2dc08b5697544 +544, 0x24c7fd69d5ec32df +545, 0x6f7e1095568b8620 +546, 0x6ed9c16ca13b3c8 +547, 0xe676ef460002130f +548, 0xa3a01a3992c4b430 +549, 0xe2130406c3b1f202 +550, 0xa8f7263e2aedcd20 +551, 0xc45d71ef2e35f507 +552, 0x37155594021da7ba +553, 0x22dc94f19de73159 +554, 0x7969fc6bffc5443f +555, 0x97def7e44faa6bfe +556, 0x8b940f5e8931d71f +557, 0xd95b1dd3f1a3fdd5 +558, 0x1c83bfdca615701a +559, 0xb7fcb56279ceca6b +560, 0xd84f8950f20dcd0 +561, 0xb03343698de3cbe0 +562, 0xf64565d448d71f71 +563, 0xda52b4676e0ae662 +564, 0xda39c2c05b4ffb91 +565, 0xb35e2560421f6a85 +566, 0x1a7b108d48ac3646 +567, 0xc4e264dc390d79ed +568, 0xa10727dfd9813256 +569, 0x40d23154e720e4f7 +570, 0xd9fa7cd7e313e119 +571, 0xcbf29107859e6013 +572, 0xc357338553d940b7 +573, 0x2641b7ab0bdfcbaa +574, 0xd12f2b6060533ae7 +575, 0xd0435aa626411c56 +576, 0x44af4a488a9cec72 +577, 0xb934232ea8fa5696 +578, 0x760a8b12072b572d +579, 0xfab18f9942cfa9b3 +580, 0x5676834c1fe84d16 +581, 0x9c54e4fddb353236 +582, 0xab49edfc9551f293 +583, 0x567f1fb45a871d +584, 0x32a967c873998834 +585, 0x99240aad380ef8d1 +586, 0x7f66cbd432859a64 +587, 0x4cdc8a4658166822 +588, 0x984e3984a5766492 +589, 0xa3b2d0a3d64d3d94 +590, 0x177f667172f2affc +591, 0xb1a90607a73a303f +592, 0xe600b6c36427f878 +593, 0xf758f9834cb7f466 +594, 0x8ee9fce4a3f36449 +595, 0xcb8f11533e7da347 +596, 0xe7cf647794dabd7c +597, 0xc9d92cfe6110806 +598, 0xea1335fa9145a1ec +599, 0xbc6c29821d094552 +600, 0x37b9d6a858cc8bc3 +601, 0xf24e4c694929893e +602, 0x55d025ce2d7d0004 +603, 0xccdc69acccf4267b +604, 0xc491c04340c222eb +605, 0xba50f75ecec9befb +606, 0x1ec7bd85b8fe3bb9 +607, 0xe4de66498c59ae8a +608, 0x38aa9e912712c889 +609, 0xcee0e43c5cc31566 +610, 0x72b69aa708fc7ed +611, 0xdff70b7f6fa96679 +612, 0xd6d71d82112aadc3 +613, 0x365177892cb78531 +614, 0xa54852b39de4f72c +615, 0x11dd5832bf16dd59 +616, 0x248a0f3369c97097 +617, 0xa14cec0260e26792 +618, 0x3517616ff142bed1 +619, 0x9b693ad39dab7636 +620, 0x739dff825e994434 +621, 0x67711e7356098c9 +622, 0xa81f8515d2fdf458 +623, 0xdac2908113fe568e +624, 0xe99944ebc6e2806a +625, 0x671728ca5b030975 +626, 0xfdad20edb2b4a789 +627, 0xedc6e466bd0369d2 +628, 0x88b5d469821f7e1b +629, 0x2eabf94049a522a5 +630, 0x247794b7a2f5a8e3 +631, 0x278942bdbe02c649 +632, 0xbe5a9a9196ab99c1 +633, 0x75955060866da1b5 +634, 0xdedcfa149273c0b5 +635, 0xdbeb7a57758f3867 +636, 0x7b9053347a2c8d5a +637, 0xa059b3f2eed338a5 +638, 0x59401a46ded3b79f +639, 0x38044ba56a6d19fb +640, 0x72c7221b4e77e779 +641, 0x526df3491a3a34da +642, 0xc3b31184ba16c0c2 +643, 0xd94c7144488624af +644, 0xcf966ee4dc373f91 +645, 0x62049e65dd416266 +646, 0x7c2adccb925bf8f +647, 0xd5fa5c22ed4ef8e1 +648, 0xd00134ebd11f2cd1 +649, 0xfbdf81767bed3634 +650, 0x62e8cc8ff66b6e26 +651, 0x3a72d6bcd4f2dcf7 +652, 0xf1cd45b1b46a86ed +653, 0x1271f98e0938bb9a +654, 0x82e6927e83dc31fa +655, 0x7b9b0e0acb67b92d +656, 0x6df503e397b2e701 +657, 0x93888f6fb561e0c3 +658, 0x393fb6069a40291 +659, 0x967a7d894cc0754d +660, 0x6e298996ad866333 +661, 0x5ff3cf5559d6ab46 +662, 0xd0d70508c40349f5 +663, 0xc64c66c0dd426b33 +664, 0x8fea340ee35c64dd +665, 0xf9cd381eb3060005 +666, 0xfcc37c2799fc0b11 +667, 0x6a37c91d65b489fa +668, 0x57231000fa0a0c9d +669, 0x55f6e292c6703f9a +670, 0xd0508ffbfa55a7a6 +671, 0x885db543276bdac8 +672, 0xc26dbe6a26b0e704 +673, 0x21f884874ebd709e +674, 0x711f0b6c8f732220 +675, 0x354d0a361eaee195 +676, 0x721344d8d30b006a +677, 0xa0e090a0d3a56f07 +678, 0x16b3d5d823a4952b +679, 0x59d7874bc9eae7b6 +680, 0x9bbb32710076455f +681, 0xd4fb22242ffabafd +682, 0xe1d4ac6770be1d89 +683, 0xb259cedebc73dc8a +684, 0x35faaa3b4246ab69 +685, 0x5d26addefdaee89 +686, 0x8e7ec350da0f3545 +687, 0xd0f316eed9f8fc79 +688, 0x98b2a52c9bf291b2 +689, 0xe4d294a8aca6a314 +690, 0x25bd554e6aa7673c +691, 0xcfde5dcba5be2a6c +692, 0xb5e01fb48d2d2107 +693, 0xe1caf28948028536 +694, 0xd434aa0a26f3ee9b +695, 0xd17723381641b8f6 +696, 0xfe73bd1f3f3768a2 +697, 0x1cc6b1abd08d67e9 +698, 0x247e328371a28de0 +699, 0x502e7942e5a9104a +700, 0x6a030fd242eb4502 +701, 0xa2ffe02744014ce8 +702, 0x59290763b18fe04e +703, 0xcf14241564271436 +704, 0xb0fb73c3c1503aff +705, 0x94e27c622f82137a +706, 0x747a5b406ac3e1f0 +707, 0x9a914e96a732031d +708, 0x59f68c6c8f078835 +709, 0x809d012c73eb4724 +710, 0x5b3c3b73e1b37d74 +711, 0xdde60ef3ba49cdf7 +712, 0x87a14e1f9c761986 +713, 0x4109b960604522af +714, 0x122d0e1ed0eb6bb9 +715, 0xadc0d29e80bfe33 +716, 0xa25b1b44f5fc8e4e +717, 0xbab85d8a9b793f20 +718, 0x825f4cbced0e7d1e +719, 0x2d6ae8807acb37ea +720, 0x8234420adce2e39 +721, 0x4a8ad4da6b804807 +722, 0x1e19f9bc215e5245 +723, 0x1d6f4848a916dd5e +724, 0x9ac40dfcdc2d39cc +725, 0x9f3524e3086155ec +726, 0x861fffc43124b2ef +727, 0xe640e3b756396372 +728, 0x41cb0f0c5e149669 +729, 0xe0bd37e1192e4205 +730, 0x62917d3858f4ce47 +731, 0xa36e7eb4d855820a +732, 0x204b90255a3bf724 +733, 0x66ee83a0175535bc +734, 0x2c14ce7c6b0c1423 +735, 0x85d9495fa514f70d +736, 0x5a4fe45ead874dbc +737, 0xe72248dcb8cfc863 +738, 0xfc21ff2932ed98cd +739, 0xcbba1edd735b5cad +740, 0x91ddc32809679bf5 +741, 0x192cdf2c7631ea1f +742, 0xbbc451ddf2ea286f +743, 0xad9e80cae2397a64 +744, 0x6918f0119b95d0e5 +745, 0xa40379017a27d70a +746, 0x1aaeddb600e61e1 +747, 0x15afd93cbd7adda9 +748, 0x156719bc2b757ff4 +749, 0x13d9a59e2b2df49d +750, 0x9a490986eaddf0a +751, 0xef9a350f0b3eb6b4 +752, 0x5de7f6295ba4fa4d +753, 0x7f37fd087c3fdb49 +754, 0xa9fe3749d6f3f209 +755, 0x50912ac036d9bfb +756, 0x982cb4d726a441f8 +757, 0x8ca8d8af59b872d0 +758, 0x7f8adfb0ceeade8a +759, 0xdad390ec742be44 +760, 0xa637944d0045be5b +761, 0x3569a3b3af807061 +762, 0x9599da8eae14511d +763, 0xc333e8d19589b01a +764, 0xfb9b524a20b571e1 +765, 0xbd9dc8b37ce5c3e1 +766, 0x142333005fa389ac +767, 0x1368bc37cd5bcce1 +768, 0x16094907ad6ecf73 +769, 0xb32c90dbba4c1130 +770, 0x82761d97c1747dd0 +771, 0x599f9f267ae3444d +772, 0x79ad3382994852e1 +773, 0x2511f06d9ef06e54 +774, 0xb35e6ab7d5bbddae +775, 0xfca9fa83a2988732 +776, 0x7d4350f0394ac3ba +777, 0xa52a9527bb176ea3 +778, 0xb49fa0ceb2aa8353 +779, 0x1f62e504d1468cc0 +780, 0xe1a77bfccce6efc3 +781, 0x776cdff4dc0d6797 +782, 0x56612e39b652c1f2 +783, 0x5f096a29294eda04 +784, 0x7978abc3aabd8b23 +785, 0x79dd875e0485b979 +786, 0x8a98aa4d5735d778 +787, 0xcca43940f69d2388 +788, 0xb2d4b156f144f93a +789, 0xbd528a676e9a862 +790, 0x2a394939c8e7ec5e +791, 0xb1da900c6efe4abc +792, 0x9869af479de4c034 +793, 0x78dbdfb88ac7c1db +794, 0x18cb169143088041 +795, 0xe69e5461c51a3e13 +796, 0x5389fa16ea98183c +797, 0xed7c80d1be1ea520 +798, 0x87246fc359758ced +799, 0xab323eba95fae4ed +800, 0xbc4c0dde7f8a1828 +801, 0xdb739f7955610b1a +802, 0xecd8c68c3434cc +803, 0x138c2eb88c477f44 +804, 0x28a65f96727aae41 +805, 0xdee879f2cf5629d +806, 0x684f0c90ef20070f +807, 0xa24a819ef5621800 +808, 0x8d0054f870e4fdcb +809, 0x99e8c6e695b600b +810, 0x50b705245891f7c3 +811, 0xc02eed3a6e58e51a +812, 0x443d64e95443606c +813, 0xca24959cfbd2d120 +814, 0xe072609ea48815bc +815, 0xbcc715026590315b +816, 0x3e76df24d7aa5938 +817, 0xd8ff04940d9b79ae +818, 0x54474ce790059bcd +819, 0x278390dd6aa70e81 +820, 0xf4df619fe35414e4 +821, 0x757d71270264e615 +822, 0x1e8a373699c11b23 +823, 0xef68c82046e67dd6 +824, 0xe280006599972620 +825, 0x234e095183b0f4d6 +826, 0xe3b7560ed9839749 +827, 0xcd5ec4086572332e +828, 0xc41c0d4aaa279108 +829, 0x4b9cd6126bc16a6d +830, 0x4a7252734f3e3dd0 +831, 0xb3132df156cc103a +832, 0xf9e4abbf7b64464a +833, 0xf936df27fb3c47b7 +834, 0x9142960873f6d71a +835, 0x4ba6aa3235cdb10d +836, 0x3237a2e765ba7766 +837, 0xd62f0b94c8e99e54 +838, 0x26b682f90a3ae41b +839, 0x40ad5e82072b6f81 +840, 0xd0198101f5484000 +841, 0xe4fac60ba11c332 +842, 0x472d0b0a95ef9d38 +843, 0x8512557aec5a3d8f +844, 0xef83169d3efd4de9 +845, 0x53fe89283e7a7676 +846, 0x2f50933053d69fc4 +847, 0x76f5e4362e2e53a2 +848, 0x8676fdccce28874a +849, 0x2737764c1fb1f821 +850, 0x4a6f70afc066ab55 +851, 0x27f8e151e310fca4 +852, 0xd606960ccbe85161 +853, 0xcce51d7ddd270a32 +854, 0xb4235999794875c2 +855, 0x580084e358e884 +856, 0x2159d5e6dc8586d7 +857, 0x87bd54d8599b3ba4 +858, 0x3e9ade6a2181664 +859, 0x5e6e140406d97623 +860, 0x511545d5aa0080a2 +861, 0xf49d78ed219aac57 +862, 0xbece1f9c90b8ea87 +863, 0x1c741cac36a2c514 +864, 0x7453c141047db967 +865, 0xd751832a5037eba2 +866, 0x71370a3f30ada1f7 +867, 0x7c01cf2dcb408631 +868, 0x1052a4fbdccc0fa1 +869, 0x13d525c9df3fb6c +870, 0xa3aa8dbfee760c55 +871, 0xc0288d200f5155cf +872, 0x79f4bcd12af567c3 +873, 0x8160d163bb548755 +874, 0x5cf2995fb69fd2df +875, 0xcc98ed01396639df +876, 0xad95f1d9cfc8256e +877, 0xa3df27d9fbdbfb9d +878, 0x83e5f5dda4d52929 +879, 0x9adc05043009f55b +880, 0xdfe8329dfde1c001 +881, 0x9980ccdd5298e6a2 +882, 0x636a7bd134f6ef56 +883, 0xef5ff780c4be6ba4 +884, 0x290d71dc77a56d16 +885, 0x6d65db9ff58de1e6 +886, 0x944b063b3805a696 +887, 0xce468ca2cce33008 +888, 0x5ba1ccb840f80f48 +889, 0x28ddce36fc9ad268 +890, 0x4f77ef254d507a21 +891, 0xce9b4057fadf3ab +892, 0xb518bc68298730e6 +893, 0xd2eb5b8e2ec665b0 +894, 0xe1583303a4f87344 +895, 0x9d5a0df4fbe1bed5 +896, 0x2ba9bc03ec8cfd07 +897, 0x479ed880a96ca669 +898, 0xcedf96338324771a +899, 0x312f4fc2da41ffaa +900, 0xa0eb9cf23b5e1ed8 +901, 0xf8f88f975dc3f539 +902, 0x4a37e185d0e96e0f +903, 0xf829654a5c0b46f9 +904, 0x3909cca7a7f8c7fb +905, 0x4c2e1d66ceb45105 +906, 0xaffaa19e1db8af87 +907, 0x9ec498246bd18c76 +908, 0x21d51558edc089da +909, 0xe8984112cd1b1561 +910, 0x7de1d2cf54b0c0e1 +911, 0xa06729aed50bfb9d +912, 0xcf19f733e5db19e1 +913, 0x70edf2624ab777cd +914, 0x46685becad10e078 +915, 0x825e0f6add46785 +916, 0x66d4af3b15f70de4 +917, 0xc676614b0666b21 +918, 0x282a916c864f5cb7 +919, 0x2707283a3f512167 +920, 0x37ff3afda7461623 +921, 0xc767eb1205e4ca86 +922, 0x46b359aecc4ea25b +923, 0x67fbbb797a16dbb1 +924, 0x64fd4ba57122290e +925, 0x8acc2a8ae59d8fac +926, 0x64a49298599acc67 +927, 0xedf00de67177ce30 +928, 0x1ea9d8d7e76d2d2c +929, 0x363fcac323f70eb2 +930, 0x19e6e3ec8a9712eb +931, 0xca541e96b0961f09 +932, 0x4d8fd34c2822ec46 +933, 0x2fdd56a50b32f705 +934, 0xaac2fcf251e3fd3 +935, 0xb0c600299e57045c +936, 0xd951ec589e909e38 +937, 0x4dc8414390cae508 +938, 0x537ef9d5e2321344 +939, 0xa57bc21fd31aa2dc +940, 0xa3a60df564183750 +941, 0xbe69a5ce2e369fb6 +942, 0x7744601f4c053ec8 +943, 0x3838452af42f2612 +944, 0xd4f0dad7115a54e9 +945, 0x629cf68d8009a624 +946, 0x2211c8fa34cb98cb +947, 0x8040b19e2213db83 +948, 0xb2a86d3ba2384fd +949, 0x4b85cec4f93f0dab +950, 0xc8d212d21ea6845d +951, 0x5b271a03a4fe2be0 +952, 0xff4f671319ad8434 +953, 0x8e615a919d5afa96 +954, 0xea7f47c53161160a +955, 0x33273930b13c6efc +956, 0x98eedda27fb59c3c +957, 0x188dc5e92e939677 +958, 0x9dbd0fa0911430f1 +959, 0x5b3dcf3fa75dfd2b +960, 0x3f03846febdb275d +961, 0x20cc24faea9e9cf6 +962, 0x854f3ac66199ff5d +963, 0x31169ac99d341e6f +964, 0xa85daed3c0bc1bbe +965, 0x64633711e71ba5dd +966, 0x530e79978dc73334 +967, 0x636f2ee6e20aef13 +968, 0xf6220f8b6d9a58fb +969, 0x425db8fa32141a7b +970, 0xac7c210f4b02be95 +971, 0x5fe8cfbe197a7754 +972, 0xfff7d40c79420ea +973, 0x5f8bab9ef4697b77 +974, 0xaf6fe54e45b23fe8 +975, 0xce79456ccc70bbce +976, 0x645ef680f48f1c00 +977, 0xa4dfac46e2028595 +978, 0x6bece4c41effc5df +979, 0xd316df886442641f +980, 0xa4f6ff994edd2a6 +981, 0x30281ae3cc49abe4 +982, 0x39acb7b663dea974 +983, 0x5e8829b01a7c06fb +984, 0x87bdb08cf027f13e +985, 0xdfa5ede784e802f6 +986, 0x46d03d55711c38cc +987, 0xa55a961fc9788306 +988, 0xbf09ded495a2e57a +989, 0xcd601b29a639cc16 +990, 0x2193ce026bfd1085 +991, 0x25ba27f3f225be13 +992, 0x6f685be82f64f2fe +993, 0xec8454108229c450 +994, 0x6e79d8d205447a44 +995, 0x9ed7b6a96b9ccd68 +996, 0xae7134b3b7f8ee37 +997, 0x66963de0e5ebcc02 +998, 0x29c8dcd0d17c423f +999, 0xfb8482c827eb90bc diff --git a/.venv/lib/python3.12/site-packages/numpy/random/tests/data/sfc64_np126.pkl.gz b/.venv/lib/python3.12/site-packages/numpy/random/tests/data/sfc64_np126.pkl.gz new file mode 100644 index 0000000..94fbceb Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/random/tests/data/sfc64_np126.pkl.gz differ diff --git a/.venv/lib/python3.12/site-packages/numpy/random/tests/test_direct.py b/.venv/lib/python3.12/site-packages/numpy/random/tests/test_direct.py new file mode 100644 index 0000000..6f069e4 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/random/tests/test_direct.py @@ -0,0 +1,592 @@ +import os +import sys +from os.path import join + +import pytest + +import numpy as np +from numpy.random import ( + MT19937, + PCG64, + PCG64DXSM, + SFC64, + Generator, + Philox, + RandomState, + SeedSequence, + default_rng, +) +from numpy.random._common import interface +from numpy.testing import ( + assert_allclose, + assert_array_equal, + assert_equal, + assert_raises, +) + +try: + import cffi # noqa: F401 + + MISSING_CFFI = False +except ImportError: + MISSING_CFFI = True + +try: + import ctypes # noqa: F401 + + MISSING_CTYPES = False +except ImportError: + MISSING_CTYPES = False + +if sys.flags.optimize > 1: + # no docstrings present to inspect when PYTHONOPTIMIZE/Py_OptimizeFlag > 1 + # cffi cannot succeed + MISSING_CFFI = True + + +pwd = os.path.dirname(os.path.abspath(__file__)) + + +def assert_state_equal(actual, target): + for key in actual: + if isinstance(actual[key], dict): + assert_state_equal(actual[key], target[key]) + elif isinstance(actual[key], np.ndarray): + assert_array_equal(actual[key], target[key]) + else: + assert actual[key] == target[key] + + +def uint32_to_float32(u): + return ((u >> np.uint32(8)) * (1.0 / 2**24)).astype(np.float32) + + +def uniform32_from_uint64(x): + x = np.uint64(x) + upper = np.array(x >> np.uint64(32), dtype=np.uint32) + lower = np.uint64(0xffffffff) + lower = np.array(x & lower, dtype=np.uint32) + joined = np.column_stack([lower, upper]).ravel() + return uint32_to_float32(joined) + + +def uniform32_from_uint53(x): + x = np.uint64(x) >> np.uint64(16) + x = np.uint32(x & np.uint64(0xffffffff)) + return uint32_to_float32(x) + + +def uniform32_from_uint32(x): + return uint32_to_float32(x) + + +def uniform32_from_uint(x, bits): + if bits == 64: + return uniform32_from_uint64(x) + elif bits == 53: + return uniform32_from_uint53(x) + elif bits == 32: + return uniform32_from_uint32(x) + else: + raise NotImplementedError + + +def uniform_from_uint(x, bits): + if bits in (64, 63, 53): + return uniform_from_uint64(x) + elif bits == 32: + return uniform_from_uint32(x) + + +def uniform_from_uint64(x): + return (x >> np.uint64(11)) * (1.0 / 9007199254740992.0) + + +def uniform_from_uint32(x): + out = np.empty(len(x) // 2) + for i in range(0, len(x), 2): + a = x[i] >> 5 + b = x[i + 1] >> 6 + out[i // 2] = (a * 67108864.0 + b) / 9007199254740992.0 + return out + + +def uniform_from_dsfmt(x): + return x.view(np.double) - 1.0 + + +def gauss_from_uint(x, n, bits): + if bits in (64, 63): + doubles = uniform_from_uint64(x) + elif bits == 32: + doubles = uniform_from_uint32(x) + else: # bits == 'dsfmt' + doubles = uniform_from_dsfmt(x) + gauss = [] + loc = 0 + x1 = x2 = 0.0 + while len(gauss) < n: + r2 = 2 + while r2 >= 1.0 or r2 == 0.0: + x1 = 2.0 * doubles[loc] - 1.0 + x2 = 2.0 * doubles[loc + 1] - 1.0 + r2 = x1 * x1 + x2 * x2 + loc += 2 + + f = np.sqrt(-2.0 * np.log(r2) / r2) + gauss.append(f * x2) + gauss.append(f * x1) + + return gauss[:n] + + +def test_seedsequence(): + from numpy.random.bit_generator import ( + ISeedSequence, + ISpawnableSeedSequence, + SeedlessSeedSequence, + ) + + s1 = SeedSequence(range(10), spawn_key=(1, 2), pool_size=6) + s1.spawn(10) + s2 = SeedSequence(**s1.state) + assert_equal(s1.state, s2.state) + assert_equal(s1.n_children_spawned, s2.n_children_spawned) + + # The interfaces cannot be instantiated themselves. + assert_raises(TypeError, ISeedSequence) + assert_raises(TypeError, ISpawnableSeedSequence) + dummy = SeedlessSeedSequence() + assert_raises(NotImplementedError, dummy.generate_state, 10) + assert len(dummy.spawn(10)) == 10 + + +def test_generator_spawning(): + """ Test spawning new generators and bit_generators directly. + """ + rng = np.random.default_rng() + seq = rng.bit_generator.seed_seq + new_ss = seq.spawn(5) + expected_keys = [seq.spawn_key + (i,) for i in range(5)] + assert [c.spawn_key for c in new_ss] == expected_keys + + new_bgs = rng.bit_generator.spawn(5) + expected_keys = [seq.spawn_key + (i,) for i in range(5, 10)] + assert [bg.seed_seq.spawn_key for bg in new_bgs] == expected_keys + + new_rngs = rng.spawn(5) + expected_keys = [seq.spawn_key + (i,) for i in range(10, 15)] + found_keys = [rng.bit_generator.seed_seq.spawn_key for rng in new_rngs] + assert found_keys == expected_keys + + # Sanity check that streams are actually different: + assert new_rngs[0].uniform() != new_rngs[1].uniform() + + +def test_non_spawnable(): + from numpy.random.bit_generator import ISeedSequence + + class FakeSeedSequence: + def generate_state(self, n_words, dtype=np.uint32): + return np.zeros(n_words, dtype=dtype) + + ISeedSequence.register(FakeSeedSequence) + + rng = np.random.default_rng(FakeSeedSequence()) + + with pytest.raises(TypeError, match="The underlying SeedSequence"): + rng.spawn(5) + + with pytest.raises(TypeError, match="The underlying SeedSequence"): + rng.bit_generator.spawn(5) + + +class Base: + dtype = np.uint64 + data2 = data1 = {} + + @classmethod + def setup_class(cls): + cls.bit_generator = PCG64 + cls.bits = 64 + cls.dtype = np.uint64 + cls.seed_error_type = TypeError + cls.invalid_init_types = [] + cls.invalid_init_values = [] + + @classmethod + def _read_csv(cls, filename): + with open(filename) as csv: + seed = csv.readline() + seed = seed.split(',') + seed = [int(s.strip(), 0) for s in seed[1:]] + data = [] + for line in csv: + data.append(int(line.split(',')[-1].strip(), 0)) + return {'seed': seed, 'data': np.array(data, dtype=cls.dtype)} + + def test_raw(self): + bit_generator = self.bit_generator(*self.data1['seed']) + uints = bit_generator.random_raw(1000) + assert_equal(uints, self.data1['data']) + + bit_generator = self.bit_generator(*self.data1['seed']) + uints = bit_generator.random_raw() + assert_equal(uints, self.data1['data'][0]) + + bit_generator = self.bit_generator(*self.data2['seed']) + uints = bit_generator.random_raw(1000) + assert_equal(uints, self.data2['data']) + + def test_random_raw(self): + bit_generator = self.bit_generator(*self.data1['seed']) + uints = bit_generator.random_raw(output=False) + assert uints is None + uints = bit_generator.random_raw(1000, output=False) + assert uints is None + + def test_gauss_inv(self): + n = 25 + rs = RandomState(self.bit_generator(*self.data1['seed'])) + gauss = rs.standard_normal(n) + assert_allclose(gauss, + gauss_from_uint(self.data1['data'], n, self.bits)) + + rs = RandomState(self.bit_generator(*self.data2['seed'])) + gauss = rs.standard_normal(25) + assert_allclose(gauss, + gauss_from_uint(self.data2['data'], n, self.bits)) + + def test_uniform_double(self): + rs = Generator(self.bit_generator(*self.data1['seed'])) + vals = uniform_from_uint(self.data1['data'], self.bits) + uniforms = rs.random(len(vals)) + assert_allclose(uniforms, vals) + assert_equal(uniforms.dtype, np.float64) + + rs = Generator(self.bit_generator(*self.data2['seed'])) + vals = uniform_from_uint(self.data2['data'], self.bits) + uniforms = rs.random(len(vals)) + assert_allclose(uniforms, vals) + assert_equal(uniforms.dtype, np.float64) + + def test_uniform_float(self): + rs = Generator(self.bit_generator(*self.data1['seed'])) + vals = uniform32_from_uint(self.data1['data'], self.bits) + uniforms = rs.random(len(vals), dtype=np.float32) + assert_allclose(uniforms, vals) + assert_equal(uniforms.dtype, np.float32) + + rs = Generator(self.bit_generator(*self.data2['seed'])) + vals = uniform32_from_uint(self.data2['data'], self.bits) + uniforms = rs.random(len(vals), dtype=np.float32) + assert_allclose(uniforms, vals) + assert_equal(uniforms.dtype, np.float32) + + def test_repr(self): + rs = Generator(self.bit_generator(*self.data1['seed'])) + assert 'Generator' in repr(rs) + assert f'{id(rs):#x}'.upper().replace('X', 'x') in repr(rs) + + def test_str(self): + rs = Generator(self.bit_generator(*self.data1['seed'])) + assert 'Generator' in str(rs) + assert str(self.bit_generator.__name__) in str(rs) + assert f'{id(rs):#x}'.upper().replace('X', 'x') not in str(rs) + + def test_pickle(self): + import pickle + + bit_generator = self.bit_generator(*self.data1['seed']) + state = bit_generator.state + bitgen_pkl = pickle.dumps(bit_generator) + reloaded = pickle.loads(bitgen_pkl) + reloaded_state = reloaded.state + assert_array_equal(Generator(bit_generator).standard_normal(1000), + Generator(reloaded).standard_normal(1000)) + assert bit_generator is not reloaded + assert_state_equal(reloaded_state, state) + + ss = SeedSequence(100) + aa = pickle.loads(pickle.dumps(ss)) + assert_equal(ss.state, aa.state) + + def test_pickle_preserves_seed_sequence(self): + # GH 26234 + # Add explicit test that bit generators preserve seed sequences + import pickle + + bit_generator = self.bit_generator(*self.data1['seed']) + ss = bit_generator.seed_seq + bg_plk = pickle.loads(pickle.dumps(bit_generator)) + ss_plk = bg_plk.seed_seq + assert_equal(ss.state, ss_plk.state) + assert_equal(ss.pool, ss_plk.pool) + + bit_generator.seed_seq.spawn(10) + bg_plk = pickle.loads(pickle.dumps(bit_generator)) + ss_plk = bg_plk.seed_seq + assert_equal(ss.state, ss_plk.state) + assert_equal(ss.n_children_spawned, ss_plk.n_children_spawned) + + def test_invalid_state_type(self): + bit_generator = self.bit_generator(*self.data1['seed']) + with pytest.raises(TypeError): + bit_generator.state = {'1'} + + def test_invalid_state_value(self): + bit_generator = self.bit_generator(*self.data1['seed']) + state = bit_generator.state + state['bit_generator'] = 'otherBitGenerator' + with pytest.raises(ValueError): + bit_generator.state = state + + def test_invalid_init_type(self): + bit_generator = self.bit_generator + for st in self.invalid_init_types: + with pytest.raises(TypeError): + bit_generator(*st) + + def test_invalid_init_values(self): + bit_generator = self.bit_generator + for st in self.invalid_init_values: + with pytest.raises((ValueError, OverflowError)): + bit_generator(*st) + + def test_benchmark(self): + bit_generator = self.bit_generator(*self.data1['seed']) + bit_generator._benchmark(1) + bit_generator._benchmark(1, 'double') + with pytest.raises(ValueError): + bit_generator._benchmark(1, 'int32') + + @pytest.mark.skipif(MISSING_CFFI, reason='cffi not available') + def test_cffi(self): + bit_generator = self.bit_generator(*self.data1['seed']) + cffi_interface = bit_generator.cffi + assert isinstance(cffi_interface, interface) + other_cffi_interface = bit_generator.cffi + assert other_cffi_interface is cffi_interface + + @pytest.mark.skipif(MISSING_CTYPES, reason='ctypes not available') + def test_ctypes(self): + bit_generator = self.bit_generator(*self.data1['seed']) + ctypes_interface = bit_generator.ctypes + assert isinstance(ctypes_interface, interface) + other_ctypes_interface = bit_generator.ctypes + assert other_ctypes_interface is ctypes_interface + + def test_getstate(self): + bit_generator = self.bit_generator(*self.data1['seed']) + state = bit_generator.state + alt_state = bit_generator.__getstate__() + assert isinstance(alt_state, tuple) + assert_state_equal(state, alt_state[0]) + assert isinstance(alt_state[1], SeedSequence) + +class TestPhilox(Base): + @classmethod + def setup_class(cls): + cls.bit_generator = Philox + cls.bits = 64 + cls.dtype = np.uint64 + cls.data1 = cls._read_csv( + join(pwd, './data/philox-testset-1.csv')) + cls.data2 = cls._read_csv( + join(pwd, './data/philox-testset-2.csv')) + cls.seed_error_type = TypeError + cls.invalid_init_types = [] + cls.invalid_init_values = [(1, None, 1), (-1,), (None, None, 2 ** 257 + 1)] + + def test_set_key(self): + bit_generator = self.bit_generator(*self.data1['seed']) + state = bit_generator.state + keyed = self.bit_generator(counter=state['state']['counter'], + key=state['state']['key']) + assert_state_equal(bit_generator.state, keyed.state) + + +class TestPCG64(Base): + @classmethod + def setup_class(cls): + cls.bit_generator = PCG64 + cls.bits = 64 + cls.dtype = np.uint64 + cls.data1 = cls._read_csv(join(pwd, './data/pcg64-testset-1.csv')) + cls.data2 = cls._read_csv(join(pwd, './data/pcg64-testset-2.csv')) + cls.seed_error_type = (ValueError, TypeError) + cls.invalid_init_types = [(3.2,), ([None],), (1, None)] + cls.invalid_init_values = [(-1,)] + + def test_advance_symmetry(self): + rs = Generator(self.bit_generator(*self.data1['seed'])) + state = rs.bit_generator.state + step = -0x9e3779b97f4a7c150000000000000000 + rs.bit_generator.advance(step) + val_neg = rs.integers(10) + rs.bit_generator.state = state + rs.bit_generator.advance(2**128 + step) + val_pos = rs.integers(10) + rs.bit_generator.state = state + rs.bit_generator.advance(10 * 2**128 + step) + val_big = rs.integers(10) + assert val_neg == val_pos + assert val_big == val_pos + + def test_advange_large(self): + rs = Generator(self.bit_generator(38219308213743)) + pcg = rs.bit_generator + state = pcg.state["state"] + initial_state = 287608843259529770491897792873167516365 + assert state["state"] == initial_state + pcg.advance(sum(2**i for i in (96, 64, 32, 16, 8, 4, 2, 1))) + state = pcg.state["state"] + advanced_state = 135275564607035429730177404003164635391 + assert state["state"] == advanced_state + + +class TestPCG64DXSM(Base): + @classmethod + def setup_class(cls): + cls.bit_generator = PCG64DXSM + cls.bits = 64 + cls.dtype = np.uint64 + cls.data1 = cls._read_csv(join(pwd, './data/pcg64dxsm-testset-1.csv')) + cls.data2 = cls._read_csv(join(pwd, './data/pcg64dxsm-testset-2.csv')) + cls.seed_error_type = (ValueError, TypeError) + cls.invalid_init_types = [(3.2,), ([None],), (1, None)] + cls.invalid_init_values = [(-1,)] + + def test_advance_symmetry(self): + rs = Generator(self.bit_generator(*self.data1['seed'])) + state = rs.bit_generator.state + step = -0x9e3779b97f4a7c150000000000000000 + rs.bit_generator.advance(step) + val_neg = rs.integers(10) + rs.bit_generator.state = state + rs.bit_generator.advance(2**128 + step) + val_pos = rs.integers(10) + rs.bit_generator.state = state + rs.bit_generator.advance(10 * 2**128 + step) + val_big = rs.integers(10) + assert val_neg == val_pos + assert val_big == val_pos + + def test_advange_large(self): + rs = Generator(self.bit_generator(38219308213743)) + pcg = rs.bit_generator + state = pcg.state + initial_state = 287608843259529770491897792873167516365 + assert state["state"]["state"] == initial_state + pcg.advance(sum(2**i for i in (96, 64, 32, 16, 8, 4, 2, 1))) + state = pcg.state["state"] + advanced_state = 277778083536782149546677086420637664879 + assert state["state"] == advanced_state + + +class TestMT19937(Base): + @classmethod + def setup_class(cls): + cls.bit_generator = MT19937 + cls.bits = 32 + cls.dtype = np.uint32 + cls.data1 = cls._read_csv(join(pwd, './data/mt19937-testset-1.csv')) + cls.data2 = cls._read_csv(join(pwd, './data/mt19937-testset-2.csv')) + cls.seed_error_type = ValueError + cls.invalid_init_types = [] + cls.invalid_init_values = [(-1,)] + + def test_seed_float_array(self): + assert_raises(TypeError, self.bit_generator, np.array([np.pi])) + assert_raises(TypeError, self.bit_generator, np.array([-np.pi])) + assert_raises(TypeError, self.bit_generator, np.array([np.pi, -np.pi])) + assert_raises(TypeError, self.bit_generator, np.array([0, np.pi])) + assert_raises(TypeError, self.bit_generator, [np.pi]) + assert_raises(TypeError, self.bit_generator, [0, np.pi]) + + def test_state_tuple(self): + rs = Generator(self.bit_generator(*self.data1['seed'])) + bit_generator = rs.bit_generator + state = bit_generator.state + desired = rs.integers(2 ** 16) + tup = (state['bit_generator'], state['state']['key'], + state['state']['pos']) + bit_generator.state = tup + actual = rs.integers(2 ** 16) + assert_equal(actual, desired) + tup = tup + (0, 0.0) + bit_generator.state = tup + actual = rs.integers(2 ** 16) + assert_equal(actual, desired) + + +class TestSFC64(Base): + @classmethod + def setup_class(cls): + cls.bit_generator = SFC64 + cls.bits = 64 + cls.dtype = np.uint64 + cls.data1 = cls._read_csv( + join(pwd, './data/sfc64-testset-1.csv')) + cls.data2 = cls._read_csv( + join(pwd, './data/sfc64-testset-2.csv')) + cls.seed_error_type = (ValueError, TypeError) + cls.invalid_init_types = [(3.2,), ([None],), (1, None)] + cls.invalid_init_values = [(-1,)] + + def test_legacy_pickle(self): + # Pickling format was changed in 2.0.x + import gzip + import pickle + + expected_state = np.array( + [ + 9957867060933711493, + 532597980065565856, + 14769588338631205282, + 13 + ], + dtype=np.uint64 + ) + + base_path = os.path.split(os.path.abspath(__file__))[0] + pkl_file = os.path.join(base_path, "data", "sfc64_np126.pkl.gz") + with gzip.open(pkl_file) as gz: + sfc = pickle.load(gz) + + assert isinstance(sfc, SFC64) + assert_equal(sfc.state["state"]["state"], expected_state) + + +class TestDefaultRNG: + def test_seed(self): + for args in [(), (None,), (1234,), ([1234, 5678],)]: + rg = default_rng(*args) + assert isinstance(rg.bit_generator, PCG64) + + def test_passthrough(self): + bg = Philox() + rg = default_rng(bg) + assert rg.bit_generator is bg + rg2 = default_rng(rg) + assert rg2 is rg + assert rg2.bit_generator is bg + + def test_coercion_RandomState_Generator(self): + # use default_rng to coerce RandomState to Generator + rs = RandomState(1234) + rg = default_rng(rs) + assert isinstance(rg.bit_generator, MT19937) + assert rg.bit_generator is rs._bit_generator + + # RandomState with a non MT19937 bit generator + _original = np.random.get_bit_generator() + bg = PCG64(12342298) + np.random.set_bit_generator(bg) + rs = np.random.mtrand._rand + rg = default_rng(rs) + assert rg.bit_generator is bg + + # vital to get global state back to original, otherwise + # other tests start to fail. + np.random.set_bit_generator(_original) diff --git a/.venv/lib/python3.12/site-packages/numpy/random/tests/test_extending.py b/.venv/lib/python3.12/site-packages/numpy/random/tests/test_extending.py new file mode 100644 index 0000000..7a079d6 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/random/tests/test_extending.py @@ -0,0 +1,127 @@ +import os +import shutil +import subprocess +import sys +import sysconfig +import warnings +from importlib.util import module_from_spec, spec_from_file_location + +import pytest + +import numpy as np +from numpy.testing import IS_EDITABLE, IS_WASM + +try: + import cffi +except ImportError: + cffi = None + +if sys.flags.optimize > 1: + # no docstrings present to inspect when PYTHONOPTIMIZE/Py_OptimizeFlag > 1 + # cffi cannot succeed + cffi = None + +try: + with warnings.catch_warnings(record=True) as w: + # numba issue gh-4733 + warnings.filterwarnings('always', '', DeprecationWarning) + import numba +except (ImportError, SystemError): + # Certain numpy/numba versions trigger a SystemError due to a numba bug + numba = None + +try: + import cython + from Cython.Compiler.Version import version as cython_version +except ImportError: + cython = None +else: + from numpy._utils import _pep440 + # Note: keep in sync with the one in pyproject.toml + required_version = '3.0.6' + if _pep440.parse(cython_version) < _pep440.Version(required_version): + # too old or wrong cython, skip the test + cython = None + + +@pytest.mark.skipif( + IS_EDITABLE, + reason='Editable install cannot find .pxd headers' +) +@pytest.mark.skipif( + sys.platform == "win32" and sys.maxsize < 2**32, + reason="Failing in 32-bit Windows wheel build job, skip for now" +) +@pytest.mark.skipif(IS_WASM, reason="Can't start subprocess") +@pytest.mark.skipif(cython is None, reason="requires cython") +@pytest.mark.skipif(sysconfig.get_platform() == 'win-arm64', + reason='Meson unable to find MSVC linker on win-arm64') +@pytest.mark.slow +def test_cython(tmp_path): + import glob + # build the examples in a temporary directory + srcdir = os.path.join(os.path.dirname(__file__), '..') + shutil.copytree(srcdir, tmp_path / 'random') + build_dir = tmp_path / 'random' / '_examples' / 'cython' + target_dir = build_dir / "build" + os.makedirs(target_dir, exist_ok=True) + # Ensure we use the correct Python interpreter even when `meson` is + # installed in a different Python environment (see gh-24956) + native_file = str(build_dir / 'interpreter-native-file.ini') + with open(native_file, 'w') as f: + f.write("[binaries]\n") + f.write(f"python = '{sys.executable}'\n") + f.write(f"python3 = '{sys.executable}'") + if sys.platform == "win32": + subprocess.check_call(["meson", "setup", + "--buildtype=release", + "--vsenv", "--native-file", native_file, + str(build_dir)], + cwd=target_dir, + ) + else: + subprocess.check_call(["meson", "setup", + "--native-file", native_file, str(build_dir)], + cwd=target_dir + ) + subprocess.check_call(["meson", "compile", "-vv"], cwd=target_dir) + + # gh-16162: make sure numpy's __init__.pxd was used for cython + # not really part of this test, but it is a convenient place to check + + g = glob.glob(str(target_dir / "*" / "extending.pyx.c")) + with open(g[0]) as fid: + txt_to_find = 'NumPy API declarations from "numpy/__init__' + for line in fid: + if txt_to_find in line: + break + else: + assert False, f"Could not find '{txt_to_find}' in C file, wrong pxd used" + # import without adding the directory to sys.path + suffix = sysconfig.get_config_var('EXT_SUFFIX') + + def load(modname): + so = (target_dir / modname).with_suffix(suffix) + spec = spec_from_file_location(modname, so) + mod = module_from_spec(spec) + spec.loader.exec_module(mod) + return mod + + # test that the module can be imported + load("extending") + load("extending_cpp") + # actually test the cython c-extension + extending_distributions = load("extending_distributions") + from numpy.random import PCG64 + values = extending_distributions.uniforms_ex(PCG64(0), 10, 'd') + assert values.shape == (10,) + assert values.dtype == np.float64 + +@pytest.mark.skipif(numba is None or cffi is None, + reason="requires numba and cffi") +def test_numba(): + from numpy.random._examples.numba import extending # noqa: F401 + +@pytest.mark.skipif(cffi is None, reason="requires cffi") +def test_cffi(): + from numpy.random._examples.cffi import extending # noqa: F401 diff --git a/.venv/lib/python3.12/site-packages/numpy/random/tests/test_generator_mt19937.py b/.venv/lib/python3.12/site-packages/numpy/random/tests/test_generator_mt19937.py new file mode 100644 index 0000000..d09cbba --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/random/tests/test_generator_mt19937.py @@ -0,0 +1,2804 @@ +import hashlib +import os.path +import sys + +import pytest + +import numpy as np +from numpy.exceptions import AxisError +from numpy.linalg import LinAlgError +from numpy.random import MT19937, Generator, RandomState, SeedSequence +from numpy.testing import ( + IS_WASM, + assert_, + assert_allclose, + assert_array_almost_equal, + assert_array_equal, + assert_equal, + assert_no_warnings, + assert_raises, + assert_warns, + suppress_warnings, +) + +random = Generator(MT19937()) + +JUMP_TEST_DATA = [ + { + "seed": 0, + "steps": 10, + "initial": {"key_sha256": "bb1636883c2707b51c5b7fc26c6927af4430f2e0785a8c7bc886337f919f9edf", "pos": 9}, # noqa: E501 + "jumped": {"key_sha256": "ff682ac12bb140f2d72fba8d3506cf4e46817a0db27aae1683867629031d8d55", "pos": 598}, # noqa: E501 + }, + { + "seed": 384908324, + "steps": 312, + "initial": {"key_sha256": "16b791a1e04886ccbbb4d448d6ff791267dc458ae599475d08d5cced29d11614", "pos": 311}, # noqa: E501 + "jumped": {"key_sha256": "a0110a2cf23b56be0feaed8f787a7fc84bef0cb5623003d75b26bdfa1c18002c", "pos": 276}, # noqa: E501 + }, + { + "seed": [839438204, 980239840, 859048019, 821], + "steps": 511, + "initial": {"key_sha256": "d306cf01314d51bd37892d874308200951a35265ede54d200f1e065004c3e9ea", "pos": 510}, # noqa: E501 + "jumped": {"key_sha256": "0e00ab449f01a5195a83b4aee0dfbc2ce8d46466a640b92e33977d2e42f777f8", "pos": 475}, # noqa: E501 + }, +] + + +@pytest.fixture(scope='module', params=[True, False]) +def endpoint(request): + return request.param + + +class TestSeed: + def test_scalar(self): + s = Generator(MT19937(0)) + assert_equal(s.integers(1000), 479) + s = Generator(MT19937(4294967295)) + assert_equal(s.integers(1000), 324) + + def test_array(self): + s = Generator(MT19937(range(10))) + assert_equal(s.integers(1000), 465) + s = Generator(MT19937(np.arange(10))) + assert_equal(s.integers(1000), 465) + s = Generator(MT19937([0])) + assert_equal(s.integers(1000), 479) + s = Generator(MT19937([4294967295])) + assert_equal(s.integers(1000), 324) + + def test_seedsequence(self): + s = MT19937(SeedSequence(0)) + assert_equal(s.random_raw(1), 2058676884) + + def test_invalid_scalar(self): + # seed must be an unsigned 32 bit integer + assert_raises(TypeError, MT19937, -0.5) + assert_raises(ValueError, MT19937, -1) + + def test_invalid_array(self): + # seed must be an unsigned integer + assert_raises(TypeError, MT19937, [-0.5]) + assert_raises(ValueError, MT19937, [-1]) + assert_raises(ValueError, MT19937, [1, -2, 4294967296]) + + def test_noninstantized_bitgen(self): + assert_raises(ValueError, Generator, MT19937) + + +class TestBinomial: + def test_n_zero(self): + # Tests the corner case of n == 0 for the binomial distribution. + # binomial(0, p) should be zero for any p in [0, 1]. + # This test addresses issue #3480. + zeros = np.zeros(2, dtype='int') + for p in [0, .5, 1]: + assert_(random.binomial(0, p) == 0) + assert_array_equal(random.binomial(zeros, p), zeros) + + def test_p_is_nan(self): + # Issue #4571. + assert_raises(ValueError, random.binomial, 1, np.nan) + + +class TestMultinomial: + def test_basic(self): + random.multinomial(100, [0.2, 0.8]) + + def test_zero_probability(self): + random.multinomial(100, [0.2, 0.8, 0.0, 0.0, 0.0]) + + def test_int_negative_interval(self): + assert_(-5 <= random.integers(-5, -1) < -1) + x = random.integers(-5, -1, 5) + assert_(np.all(-5 <= x)) + assert_(np.all(x < -1)) + + def test_size(self): + # gh-3173 + p = [0.5, 0.5] + assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2)) + assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2)) + assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2)) + assert_equal(random.multinomial(1, p, [2, 2]).shape, (2, 2, 2)) + assert_equal(random.multinomial(1, p, (2, 2)).shape, (2, 2, 2)) + assert_equal(random.multinomial(1, p, np.array((2, 2))).shape, + (2, 2, 2)) + + assert_raises(TypeError, random.multinomial, 1, p, + float(1)) + + def test_invalid_prob(self): + assert_raises(ValueError, random.multinomial, 100, [1.1, 0.2]) + assert_raises(ValueError, random.multinomial, 100, [-.1, 0.9]) + + def test_invalid_n(self): + assert_raises(ValueError, random.multinomial, -1, [0.8, 0.2]) + assert_raises(ValueError, random.multinomial, [-1] * 10, [0.8, 0.2]) + + def test_p_non_contiguous(self): + p = np.arange(15.) + p /= np.sum(p[1::3]) + pvals = p[1::3] + random = Generator(MT19937(1432985819)) + non_contig = random.multinomial(100, pvals=pvals) + random = Generator(MT19937(1432985819)) + contig = random.multinomial(100, pvals=np.ascontiguousarray(pvals)) + assert_array_equal(non_contig, contig) + + def test_multinomial_pvals_float32(self): + x = np.array([9.9e-01, 9.9e-01, 1.0e-09, 1.0e-09, 1.0e-09, 1.0e-09, + 1.0e-09, 1.0e-09, 1.0e-09, 1.0e-09], dtype=np.float32) + pvals = x / x.sum() + random = Generator(MT19937(1432985819)) + match = r"[\w\s]*pvals array is cast to 64-bit floating" + with pytest.raises(ValueError, match=match): + random.multinomial(1, pvals) + + +class TestMultivariateHypergeometric: + + def setup_method(self): + self.seed = 8675309 + + def test_argument_validation(self): + # Error cases... + + # `colors` must be a 1-d sequence + assert_raises(ValueError, random.multivariate_hypergeometric, + 10, 4) + + # Negative nsample + assert_raises(ValueError, random.multivariate_hypergeometric, + [2, 3, 4], -1) + + # Negative color + assert_raises(ValueError, random.multivariate_hypergeometric, + [-1, 2, 3], 2) + + # nsample exceeds sum(colors) + assert_raises(ValueError, random.multivariate_hypergeometric, + [2, 3, 4], 10) + + # nsample exceeds sum(colors) (edge case of empty colors) + assert_raises(ValueError, random.multivariate_hypergeometric, + [], 1) + + # Validation errors associated with very large values in colors. + assert_raises(ValueError, random.multivariate_hypergeometric, + [999999999, 101], 5, 1, 'marginals') + + int64_info = np.iinfo(np.int64) + max_int64 = int64_info.max + max_int64_index = max_int64 // int64_info.dtype.itemsize + assert_raises(ValueError, random.multivariate_hypergeometric, + [max_int64_index - 100, 101], 5, 1, 'count') + + @pytest.mark.parametrize('method', ['count', 'marginals']) + def test_edge_cases(self, method): + # Set the seed, but in fact, all the results in this test are + # deterministic, so we don't really need this. + random = Generator(MT19937(self.seed)) + + x = random.multivariate_hypergeometric([0, 0, 0], 0, method=method) + assert_array_equal(x, [0, 0, 0]) + + x = random.multivariate_hypergeometric([], 0, method=method) + assert_array_equal(x, []) + + x = random.multivariate_hypergeometric([], 0, size=1, method=method) + assert_array_equal(x, np.empty((1, 0), dtype=np.int64)) + + x = random.multivariate_hypergeometric([1, 2, 3], 0, method=method) + assert_array_equal(x, [0, 0, 0]) + + x = random.multivariate_hypergeometric([9, 0, 0], 3, method=method) + assert_array_equal(x, [3, 0, 0]) + + colors = [1, 1, 0, 1, 1] + x = random.multivariate_hypergeometric(colors, sum(colors), + method=method) + assert_array_equal(x, colors) + + x = random.multivariate_hypergeometric([3, 4, 5], 12, size=3, + method=method) + assert_array_equal(x, [[3, 4, 5]] * 3) + + # Cases for nsample: + # nsample < 10 + # 10 <= nsample < colors.sum()/2 + # colors.sum()/2 < nsample < colors.sum() - 10 + # colors.sum() - 10 < nsample < colors.sum() + @pytest.mark.parametrize('nsample', [8, 25, 45, 55]) + @pytest.mark.parametrize('method', ['count', 'marginals']) + @pytest.mark.parametrize('size', [5, (2, 3), 150000]) + def test_typical_cases(self, nsample, method, size): + random = Generator(MT19937(self.seed)) + + colors = np.array([10, 5, 20, 25]) + sample = random.multivariate_hypergeometric(colors, nsample, size, + method=method) + if isinstance(size, int): + expected_shape = (size,) + colors.shape + else: + expected_shape = size + colors.shape + assert_equal(sample.shape, expected_shape) + assert_((sample >= 0).all()) + assert_((sample <= colors).all()) + assert_array_equal(sample.sum(axis=-1), + np.full(size, fill_value=nsample, dtype=int)) + if isinstance(size, int) and size >= 100000: + # This sample is large enough to compare its mean to + # the expected values. + assert_allclose(sample.mean(axis=0), + nsample * colors / colors.sum(), + rtol=1e-3, atol=0.005) + + def test_repeatability1(self): + random = Generator(MT19937(self.seed)) + sample = random.multivariate_hypergeometric([3, 4, 5], 5, size=5, + method='count') + expected = np.array([[2, 1, 2], + [2, 1, 2], + [1, 1, 3], + [2, 0, 3], + [2, 1, 2]]) + assert_array_equal(sample, expected) + + def test_repeatability2(self): + random = Generator(MT19937(self.seed)) + sample = random.multivariate_hypergeometric([20, 30, 50], 50, + size=5, + method='marginals') + expected = np.array([[ 9, 17, 24], + [ 7, 13, 30], + [ 9, 15, 26], + [ 9, 17, 24], + [12, 14, 24]]) + assert_array_equal(sample, expected) + + def test_repeatability3(self): + random = Generator(MT19937(self.seed)) + sample = random.multivariate_hypergeometric([20, 30, 50], 12, + size=5, + method='marginals') + expected = np.array([[2, 3, 7], + [5, 3, 4], + [2, 5, 5], + [5, 3, 4], + [1, 5, 6]]) + assert_array_equal(sample, expected) + + +class TestSetState: + def setup_method(self): + self.seed = 1234567890 + self.rg = Generator(MT19937(self.seed)) + self.bit_generator = self.rg.bit_generator + self.state = self.bit_generator.state + self.legacy_state = (self.state['bit_generator'], + self.state['state']['key'], + self.state['state']['pos']) + + def test_gaussian_reset(self): + # Make sure the cached every-other-Gaussian is reset. + old = self.rg.standard_normal(size=3) + self.bit_generator.state = self.state + new = self.rg.standard_normal(size=3) + assert_(np.all(old == new)) + + def test_gaussian_reset_in_media_res(self): + # When the state is saved with a cached Gaussian, make sure the + # cached Gaussian is restored. + + self.rg.standard_normal() + state = self.bit_generator.state + old = self.rg.standard_normal(size=3) + self.bit_generator.state = state + new = self.rg.standard_normal(size=3) + assert_(np.all(old == new)) + + def test_negative_binomial(self): + # Ensure that the negative binomial results take floating point + # arguments without truncation. + self.rg.negative_binomial(0.5, 0.5) + + +class TestIntegers: + rfunc = random.integers + + # valid integer/boolean types + itype = [bool, np.int8, np.uint8, np.int16, np.uint16, + np.int32, np.uint32, np.int64, np.uint64] + + def test_unsupported_type(self, endpoint): + assert_raises(TypeError, self.rfunc, 1, endpoint=endpoint, dtype=float) + + def test_bounds_checking(self, endpoint): + for dt in self.itype: + lbnd = 0 if dt is bool else np.iinfo(dt).min + ubnd = 2 if dt is bool else np.iinfo(dt).max + 1 + ubnd = ubnd - 1 if endpoint else ubnd + assert_raises(ValueError, self.rfunc, lbnd - 1, ubnd, + endpoint=endpoint, dtype=dt) + assert_raises(ValueError, self.rfunc, lbnd, ubnd + 1, + endpoint=endpoint, dtype=dt) + assert_raises(ValueError, self.rfunc, ubnd, lbnd, + endpoint=endpoint, dtype=dt) + assert_raises(ValueError, self.rfunc, 1, 0, endpoint=endpoint, + dtype=dt) + + assert_raises(ValueError, self.rfunc, [lbnd - 1], ubnd, + endpoint=endpoint, dtype=dt) + assert_raises(ValueError, self.rfunc, [lbnd], [ubnd + 1], + endpoint=endpoint, dtype=dt) + assert_raises(ValueError, self.rfunc, [ubnd], [lbnd], + endpoint=endpoint, dtype=dt) + assert_raises(ValueError, self.rfunc, 1, [0], + endpoint=endpoint, dtype=dt) + assert_raises(ValueError, self.rfunc, [ubnd + 1], [ubnd], + endpoint=endpoint, dtype=dt) + + def test_bounds_checking_array(self, endpoint): + for dt in self.itype: + lbnd = 0 if dt is bool else np.iinfo(dt).min + ubnd = 2 if dt is bool else np.iinfo(dt).max + (not endpoint) + + assert_raises(ValueError, self.rfunc, [lbnd - 1] * 2, [ubnd] * 2, + endpoint=endpoint, dtype=dt) + assert_raises(ValueError, self.rfunc, [lbnd] * 2, + [ubnd + 1] * 2, endpoint=endpoint, dtype=dt) + assert_raises(ValueError, self.rfunc, ubnd, [lbnd] * 2, + endpoint=endpoint, dtype=dt) + assert_raises(ValueError, self.rfunc, [1] * 2, 0, + endpoint=endpoint, dtype=dt) + + def test_rng_zero_and_extremes(self, endpoint): + for dt in self.itype: + lbnd = 0 if dt is bool else np.iinfo(dt).min + ubnd = 2 if dt is bool else np.iinfo(dt).max + 1 + ubnd = ubnd - 1 if endpoint else ubnd + is_open = not endpoint + + tgt = ubnd - 1 + assert_equal(self.rfunc(tgt, tgt + is_open, size=1000, + endpoint=endpoint, dtype=dt), tgt) + assert_equal(self.rfunc([tgt], tgt + is_open, size=1000, + endpoint=endpoint, dtype=dt), tgt) + + tgt = lbnd + assert_equal(self.rfunc(tgt, tgt + is_open, size=1000, + endpoint=endpoint, dtype=dt), tgt) + assert_equal(self.rfunc(tgt, [tgt + is_open], size=1000, + endpoint=endpoint, dtype=dt), tgt) + + tgt = (lbnd + ubnd) // 2 + assert_equal(self.rfunc(tgt, tgt + is_open, size=1000, + endpoint=endpoint, dtype=dt), tgt) + assert_equal(self.rfunc([tgt], [tgt + is_open], + size=1000, endpoint=endpoint, dtype=dt), + tgt) + + def test_rng_zero_and_extremes_array(self, endpoint): + size = 1000 + for dt in self.itype: + lbnd = 0 if dt is bool else np.iinfo(dt).min + ubnd = 2 if dt is bool else np.iinfo(dt).max + 1 + ubnd = ubnd - 1 if endpoint else ubnd + + tgt = ubnd - 1 + assert_equal(self.rfunc([tgt], [tgt + 1], + size=size, dtype=dt), tgt) + assert_equal(self.rfunc( + [tgt] * size, [tgt + 1] * size, dtype=dt), tgt) + assert_equal(self.rfunc( + [tgt] * size, [tgt + 1] * size, size=size, dtype=dt), tgt) + + tgt = lbnd + assert_equal(self.rfunc([tgt], [tgt + 1], + size=size, dtype=dt), tgt) + assert_equal(self.rfunc( + [tgt] * size, [tgt + 1] * size, dtype=dt), tgt) + assert_equal(self.rfunc( + [tgt] * size, [tgt + 1] * size, size=size, dtype=dt), tgt) + + tgt = (lbnd + ubnd) // 2 + assert_equal(self.rfunc([tgt], [tgt + 1], + size=size, dtype=dt), tgt) + assert_equal(self.rfunc( + [tgt] * size, [tgt + 1] * size, dtype=dt), tgt) + assert_equal(self.rfunc( + [tgt] * size, [tgt + 1] * size, size=size, dtype=dt), tgt) + + def test_full_range(self, endpoint): + # Test for ticket #1690 + + for dt in self.itype: + lbnd = 0 if dt is bool else np.iinfo(dt).min + ubnd = 2 if dt is bool else np.iinfo(dt).max + 1 + ubnd = ubnd - 1 if endpoint else ubnd + + try: + self.rfunc(lbnd, ubnd, endpoint=endpoint, dtype=dt) + except Exception as e: + raise AssertionError("No error should have been raised, " + "but one was with the following " + "message:\n\n%s" % str(e)) + + def test_full_range_array(self, endpoint): + # Test for ticket #1690 + + for dt in self.itype: + lbnd = 0 if dt is bool else np.iinfo(dt).min + ubnd = 2 if dt is bool else np.iinfo(dt).max + 1 + ubnd = ubnd - 1 if endpoint else ubnd + + try: + self.rfunc([lbnd] * 2, [ubnd], endpoint=endpoint, dtype=dt) + except Exception as e: + raise AssertionError("No error should have been raised, " + "but one was with the following " + "message:\n\n%s" % str(e)) + + def test_in_bounds_fuzz(self, endpoint): + # Don't use fixed seed + random = Generator(MT19937()) + + for dt in self.itype[1:]: + for ubnd in [4, 8, 16]: + vals = self.rfunc(2, ubnd - endpoint, size=2 ** 16, + endpoint=endpoint, dtype=dt) + assert_(vals.max() < ubnd) + assert_(vals.min() >= 2) + + vals = self.rfunc(0, 2 - endpoint, size=2 ** 16, endpoint=endpoint, + dtype=bool) + assert_(vals.max() < 2) + assert_(vals.min() >= 0) + + def test_scalar_array_equiv(self, endpoint): + for dt in self.itype: + lbnd = 0 if dt is bool else np.iinfo(dt).min + ubnd = 2 if dt is bool else np.iinfo(dt).max + 1 + ubnd = ubnd - 1 if endpoint else ubnd + + size = 1000 + random = Generator(MT19937(1234)) + scalar = random.integers(lbnd, ubnd, size=size, endpoint=endpoint, + dtype=dt) + + random = Generator(MT19937(1234)) + scalar_array = random.integers([lbnd], [ubnd], size=size, + endpoint=endpoint, dtype=dt) + + random = Generator(MT19937(1234)) + array = random.integers([lbnd] * size, [ubnd] * + size, size=size, endpoint=endpoint, dtype=dt) + assert_array_equal(scalar, scalar_array) + assert_array_equal(scalar, array) + + def test_repeatability(self, endpoint): + # We use a sha256 hash of generated sequences of 1000 samples + # in the range [0, 6) for all but bool, where the range + # is [0, 2). Hashes are for little endian numbers. + tgt = {'bool': '053594a9b82d656f967c54869bc6970aa0358cf94ad469c81478459c6a90eee3', # noqa: E501 + 'int16': '54de9072b6ee9ff7f20b58329556a46a447a8a29d67db51201bf88baa6e4e5d4', # noqa: E501 + 'int32': 'd3a0d5efb04542b25ac712e50d21f39ac30f312a5052e9bbb1ad3baa791ac84b', # noqa: E501 + 'int64': '14e224389ac4580bfbdccb5697d6190b496f91227cf67df60989de3d546389b1', # noqa: E501 + 'int8': '0e203226ff3fbbd1580f15da4621e5f7164d0d8d6b51696dd42d004ece2cbec1', # noqa: E501 + 'uint16': '54de9072b6ee9ff7f20b58329556a46a447a8a29d67db51201bf88baa6e4e5d4', # noqa: E501 + 'uint32': 'd3a0d5efb04542b25ac712e50d21f39ac30f312a5052e9bbb1ad3baa791ac84b', # noqa: E501 + 'uint64': '14e224389ac4580bfbdccb5697d6190b496f91227cf67df60989de3d546389b1', # noqa: E501 + 'uint8': '0e203226ff3fbbd1580f15da4621e5f7164d0d8d6b51696dd42d004ece2cbec1'} # noqa: E501 + + for dt in self.itype[1:]: + random = Generator(MT19937(1234)) + + # view as little endian for hash + if sys.byteorder == 'little': + val = random.integers(0, 6 - endpoint, size=1000, endpoint=endpoint, + dtype=dt) + else: + val = random.integers(0, 6 - endpoint, size=1000, endpoint=endpoint, + dtype=dt).byteswap() + + res = hashlib.sha256(val).hexdigest() + assert_(tgt[np.dtype(dt).name] == res) + + # bools do not depend on endianness + random = Generator(MT19937(1234)) + val = random.integers(0, 2 - endpoint, size=1000, endpoint=endpoint, + dtype=bool).view(np.int8) + res = hashlib.sha256(val).hexdigest() + assert_(tgt[np.dtype(bool).name] == res) + + def test_repeatability_broadcasting(self, endpoint): + for dt in self.itype: + lbnd = 0 if dt in (bool, np.bool) else np.iinfo(dt).min + ubnd = 2 if dt in (bool, np.bool) else np.iinfo(dt).max + 1 + ubnd = ubnd - 1 if endpoint else ubnd + + # view as little endian for hash + random = Generator(MT19937(1234)) + val = random.integers(lbnd, ubnd, size=1000, endpoint=endpoint, + dtype=dt) + + random = Generator(MT19937(1234)) + val_bc = random.integers([lbnd] * 1000, ubnd, endpoint=endpoint, + dtype=dt) + + assert_array_equal(val, val_bc) + + random = Generator(MT19937(1234)) + val_bc = random.integers([lbnd] * 1000, [ubnd] * 1000, + endpoint=endpoint, dtype=dt) + + assert_array_equal(val, val_bc) + + @pytest.mark.parametrize( + 'bound, expected', + [(2**32 - 1, np.array([517043486, 1364798665, 1733884389, 1353720612, + 3769704066, 1170797179, 4108474671])), + (2**32, np.array([517043487, 1364798666, 1733884390, 1353720613, + 3769704067, 1170797180, 4108474672])), + (2**32 + 1, np.array([517043487, 1733884390, 3769704068, 4108474673, + 1831631863, 1215661561, 3869512430]))] + ) + def test_repeatability_32bit_boundary(self, bound, expected): + for size in [None, len(expected)]: + random = Generator(MT19937(1234)) + x = random.integers(bound, size=size) + assert_equal(x, expected if size is not None else expected[0]) + + def test_repeatability_32bit_boundary_broadcasting(self): + desired = np.array([[[1622936284, 3620788691, 1659384060], + [1417365545, 760222891, 1909653332], + [3788118662, 660249498, 4092002593]], + [[3625610153, 2979601262, 3844162757], + [ 685800658, 120261497, 2694012896], + [1207779440, 1586594375, 3854335050]], + [[3004074748, 2310761796, 3012642217], + [2067714190, 2786677879, 1363865881], + [ 791663441, 1867303284, 2169727960]], + [[1939603804, 1250951100, 298950036], + [1040128489, 3791912209, 3317053765], + [3155528714, 61360675, 2305155588]], + [[ 817688762, 1335621943, 3288952434], + [1770890872, 1102951817, 1957607470], + [3099996017, 798043451, 48334215]]]) + for size in [None, (5, 3, 3)]: + random = Generator(MT19937(12345)) + x = random.integers([[-1], [0], [1]], + [2**32 - 1, 2**32, 2**32 + 1], + size=size) + assert_array_equal(x, desired if size is not None else desired[0]) + + def test_int64_uint64_broadcast_exceptions(self, endpoint): + configs = {np.uint64: ((0, 2**65), (-1, 2**62), (10, 9), (0, 0)), + np.int64: ((0, 2**64), (-(2**64), 2**62), (10, 9), (0, 0), + (-2**63 - 1, -2**63 - 1))} + for dtype in configs: + for config in configs[dtype]: + low, high = config + high = high - endpoint + low_a = np.array([[low] * 10]) + high_a = np.array([high] * 10) + assert_raises(ValueError, random.integers, low, high, + endpoint=endpoint, dtype=dtype) + assert_raises(ValueError, random.integers, low_a, high, + endpoint=endpoint, dtype=dtype) + assert_raises(ValueError, random.integers, low, high_a, + endpoint=endpoint, dtype=dtype) + assert_raises(ValueError, random.integers, low_a, high_a, + endpoint=endpoint, dtype=dtype) + + low_o = np.array([[low] * 10], dtype=object) + high_o = np.array([high] * 10, dtype=object) + assert_raises(ValueError, random.integers, low_o, high, + endpoint=endpoint, dtype=dtype) + assert_raises(ValueError, random.integers, low, high_o, + endpoint=endpoint, dtype=dtype) + assert_raises(ValueError, random.integers, low_o, high_o, + endpoint=endpoint, dtype=dtype) + + def test_int64_uint64_corner_case(self, endpoint): + # When stored in Numpy arrays, `lbnd` is casted + # as np.int64, and `ubnd` is casted as np.uint64. + # Checking whether `lbnd` >= `ubnd` used to be + # done solely via direct comparison, which is incorrect + # because when Numpy tries to compare both numbers, + # it casts both to np.float64 because there is + # no integer superset of np.int64 and np.uint64. However, + # `ubnd` is too large to be represented in np.float64, + # causing it be round down to np.iinfo(np.int64).max, + # leading to a ValueError because `lbnd` now equals + # the new `ubnd`. + + dt = np.int64 + tgt = np.iinfo(np.int64).max + lbnd = np.int64(np.iinfo(np.int64).max) + ubnd = np.uint64(np.iinfo(np.int64).max + 1 - endpoint) + + # None of these function calls should + # generate a ValueError now. + actual = random.integers(lbnd, ubnd, endpoint=endpoint, dtype=dt) + assert_equal(actual, tgt) + + def test_respect_dtype_singleton(self, endpoint): + # See gh-7203 + for dt in self.itype: + lbnd = 0 if dt is bool else np.iinfo(dt).min + ubnd = 2 if dt is bool else np.iinfo(dt).max + 1 + ubnd = ubnd - 1 if endpoint else ubnd + dt = np.bool if dt is bool else dt + + sample = self.rfunc(lbnd, ubnd, endpoint=endpoint, dtype=dt) + assert_equal(sample.dtype, dt) + + for dt in (bool, int): + lbnd = 0 if dt is bool else np.iinfo(dt).min + ubnd = 2 if dt is bool else np.iinfo(dt).max + 1 + ubnd = ubnd - 1 if endpoint else ubnd + + # gh-7284: Ensure that we get Python data types + sample = self.rfunc(lbnd, ubnd, endpoint=endpoint, dtype=dt) + assert not hasattr(sample, 'dtype') + assert_equal(type(sample), dt) + + def test_respect_dtype_array(self, endpoint): + # See gh-7203 + for dt in self.itype: + lbnd = 0 if dt is bool else np.iinfo(dt).min + ubnd = 2 if dt is bool else np.iinfo(dt).max + 1 + ubnd = ubnd - 1 if endpoint else ubnd + dt = np.bool if dt is bool else dt + + sample = self.rfunc([lbnd], [ubnd], endpoint=endpoint, dtype=dt) + assert_equal(sample.dtype, dt) + sample = self.rfunc([lbnd] * 2, [ubnd] * 2, endpoint=endpoint, + dtype=dt) + assert_equal(sample.dtype, dt) + + def test_zero_size(self, endpoint): + # See gh-7203 + for dt in self.itype: + sample = self.rfunc(0, 0, (3, 0, 4), endpoint=endpoint, dtype=dt) + assert sample.shape == (3, 0, 4) + assert sample.dtype == dt + assert self.rfunc(0, -10, 0, endpoint=endpoint, + dtype=dt).shape == (0,) + assert_equal(random.integers(0, 0, size=(3, 0, 4)).shape, + (3, 0, 4)) + assert_equal(random.integers(0, -10, size=0).shape, (0,)) + assert_equal(random.integers(10, 10, size=0).shape, (0,)) + + def test_error_byteorder(self): + other_byteord_dt = 'i4' + with pytest.raises(ValueError): + random.integers(0, 200, size=10, dtype=other_byteord_dt) + + # chi2max is the maximum acceptable chi-squared value. + @pytest.mark.slow + @pytest.mark.parametrize('sample_size,high,dtype,chi2max', + [(5000000, 5, np.int8, 125.0), # p-value ~4.6e-25 + (5000000, 7, np.uint8, 150.0), # p-value ~7.7e-30 + (10000000, 2500, np.int16, 3300.0), # p-value ~3.0e-25 + (50000000, 5000, np.uint16, 6500.0), # p-value ~3.5e-25 + ]) + def test_integers_small_dtype_chisquared(self, sample_size, high, + dtype, chi2max): + # Regression test for gh-14774. + samples = random.integers(high, size=sample_size, dtype=dtype) + + values, counts = np.unique(samples, return_counts=True) + expected = sample_size / high + chi2 = ((counts - expected)**2 / expected).sum() + assert chi2 < chi2max + + +class TestRandomDist: + # Make sure the random distribution returns the correct value for a + # given seed + + def setup_method(self): + self.seed = 1234567890 + + def test_integers(self): + random = Generator(MT19937(self.seed)) + actual = random.integers(-99, 99, size=(3, 2)) + desired = np.array([[-80, -56], [41, 37], [-83, -16]]) + assert_array_equal(actual, desired) + + def test_integers_masked(self): + # Test masked rejection sampling algorithm to generate array of + # uint32 in an interval. + random = Generator(MT19937(self.seed)) + actual = random.integers(0, 99, size=(3, 2), dtype=np.uint32) + desired = np.array([[9, 21], [70, 68], [8, 41]], dtype=np.uint32) + assert_array_equal(actual, desired) + + def test_integers_closed(self): + random = Generator(MT19937(self.seed)) + actual = random.integers(-99, 99, size=(3, 2), endpoint=True) + desired = np.array([[-80, -56], [41, 38], [-83, -15]]) + assert_array_equal(actual, desired) + + def test_integers_max_int(self): + # Tests whether integers with closed=True can generate the + # maximum allowed Python int that can be converted + # into a C long. Previous implementations of this + # method have thrown an OverflowError when attempting + # to generate this integer. + actual = random.integers(np.iinfo('l').max, np.iinfo('l').max, + endpoint=True) + + desired = np.iinfo('l').max + assert_equal(actual, desired) + + def test_random(self): + random = Generator(MT19937(self.seed)) + actual = random.random((3, 2)) + desired = np.array([[0.096999199829214, 0.707517457682192], + [0.084364834598269, 0.767731206553125], + [0.665069021359413, 0.715487190596693]]) + assert_array_almost_equal(actual, desired, decimal=15) + + random = Generator(MT19937(self.seed)) + actual = random.random() + assert_array_almost_equal(actual, desired[0, 0], decimal=15) + + def test_random_float(self): + random = Generator(MT19937(self.seed)) + actual = random.random((3, 2)) + desired = np.array([[0.0969992 , 0.70751746], # noqa: E203 + [0.08436483, 0.76773121], + [0.66506902, 0.71548719]]) + assert_array_almost_equal(actual, desired, decimal=7) + + def test_random_float_scalar(self): + random = Generator(MT19937(self.seed)) + actual = random.random(dtype=np.float32) + desired = 0.0969992 + assert_array_almost_equal(actual, desired, decimal=7) + + @pytest.mark.parametrize('dtype, uint_view_type', + [(np.float32, np.uint32), + (np.float64, np.uint64)]) + def test_random_distribution_of_lsb(self, dtype, uint_view_type): + random = Generator(MT19937(self.seed)) + sample = random.random(100000, dtype=dtype) + num_ones_in_lsb = np.count_nonzero(sample.view(uint_view_type) & 1) + # The probability of a 1 in the least significant bit is 0.25. + # With a sample size of 100000, the probability that num_ones_in_lsb + # is outside the following range is less than 5e-11. + assert 24100 < num_ones_in_lsb < 25900 + + def test_random_unsupported_type(self): + assert_raises(TypeError, random.random, dtype='int32') + + def test_choice_uniform_replace(self): + random = Generator(MT19937(self.seed)) + actual = random.choice(4, 4) + desired = np.array([0, 0, 2, 2], dtype=np.int64) + assert_array_equal(actual, desired) + + def test_choice_nonuniform_replace(self): + random = Generator(MT19937(self.seed)) + actual = random.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1]) + desired = np.array([0, 1, 0, 1], dtype=np.int64) + assert_array_equal(actual, desired) + + def test_choice_uniform_noreplace(self): + random = Generator(MT19937(self.seed)) + actual = random.choice(4, 3, replace=False) + desired = np.array([2, 0, 3], dtype=np.int64) + assert_array_equal(actual, desired) + actual = random.choice(4, 4, replace=False, shuffle=False) + desired = np.arange(4, dtype=np.int64) + assert_array_equal(actual, desired) + + def test_choice_nonuniform_noreplace(self): + random = Generator(MT19937(self.seed)) + actual = random.choice(4, 3, replace=False, p=[0.1, 0.3, 0.5, 0.1]) + desired = np.array([0, 2, 3], dtype=np.int64) + assert_array_equal(actual, desired) + + def test_choice_noninteger(self): + random = Generator(MT19937(self.seed)) + actual = random.choice(['a', 'b', 'c', 'd'], 4) + desired = np.array(['a', 'a', 'c', 'c']) + assert_array_equal(actual, desired) + + def test_choice_multidimensional_default_axis(self): + random = Generator(MT19937(self.seed)) + actual = random.choice([[0, 1], [2, 3], [4, 5], [6, 7]], 3) + desired = np.array([[0, 1], [0, 1], [4, 5]]) + assert_array_equal(actual, desired) + + def test_choice_multidimensional_custom_axis(self): + random = Generator(MT19937(self.seed)) + actual = random.choice([[0, 1], [2, 3], [4, 5], [6, 7]], 1, axis=1) + desired = np.array([[0], [2], [4], [6]]) + assert_array_equal(actual, desired) + + def test_choice_exceptions(self): + sample = random.choice + assert_raises(ValueError, sample, -1, 3) + assert_raises(ValueError, sample, 3., 3) + assert_raises(ValueError, sample, [], 3) + assert_raises(ValueError, sample, [1, 2, 3, 4], 3, + p=[[0.25, 0.25], [0.25, 0.25]]) + assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4, 0.2]) + assert_raises(ValueError, sample, [1, 2], 3, p=[1.1, -0.1]) + assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4]) + assert_raises(ValueError, sample, [1, 2, 3], 4, replace=False) + # gh-13087 + assert_raises(ValueError, sample, [1, 2, 3], -2, replace=False) + assert_raises(ValueError, sample, [1, 2, 3], (-1,), replace=False) + assert_raises(ValueError, sample, [1, 2, 3], (-1, 1), replace=False) + assert_raises(ValueError, sample, [1, 2, 3], 2, + replace=False, p=[1, 0, 0]) + + def test_choice_return_shape(self): + p = [0.1, 0.9] + # Check scalar + assert_(np.isscalar(random.choice(2, replace=True))) + assert_(np.isscalar(random.choice(2, replace=False))) + assert_(np.isscalar(random.choice(2, replace=True, p=p))) + assert_(np.isscalar(random.choice(2, replace=False, p=p))) + assert_(np.isscalar(random.choice([1, 2], replace=True))) + assert_(random.choice([None], replace=True) is None) + a = np.array([1, 2]) + arr = np.empty(1, dtype=object) + arr[0] = a + assert_(random.choice(arr, replace=True) is a) + + # Check 0-d array + s = () + assert_(not np.isscalar(random.choice(2, s, replace=True))) + assert_(not np.isscalar(random.choice(2, s, replace=False))) + assert_(not np.isscalar(random.choice(2, s, replace=True, p=p))) + assert_(not np.isscalar(random.choice(2, s, replace=False, p=p))) + assert_(not np.isscalar(random.choice([1, 2], s, replace=True))) + assert_(random.choice([None], s, replace=True).ndim == 0) + a = np.array([1, 2]) + arr = np.empty(1, dtype=object) + arr[0] = a + assert_(random.choice(arr, s, replace=True).item() is a) + + # Check multi dimensional array + s = (2, 3) + p = [0.1, 0.1, 0.1, 0.1, 0.4, 0.2] + assert_equal(random.choice(6, s, replace=True).shape, s) + assert_equal(random.choice(6, s, replace=False).shape, s) + assert_equal(random.choice(6, s, replace=True, p=p).shape, s) + assert_equal(random.choice(6, s, replace=False, p=p).shape, s) + assert_equal(random.choice(np.arange(6), s, replace=True).shape, s) + + # Check zero-size + assert_equal(random.integers(0, 0, size=(3, 0, 4)).shape, (3, 0, 4)) + assert_equal(random.integers(0, -10, size=0).shape, (0,)) + assert_equal(random.integers(10, 10, size=0).shape, (0,)) + assert_equal(random.choice(0, size=0).shape, (0,)) + assert_equal(random.choice([], size=(0,)).shape, (0,)) + assert_equal(random.choice(['a', 'b'], size=(3, 0, 4)).shape, + (3, 0, 4)) + assert_raises(ValueError, random.choice, [], 10) + + def test_choice_nan_probabilities(self): + a = np.array([42, 1, 2]) + p = [None, None, None] + assert_raises(ValueError, random.choice, a, p=p) + + def test_choice_p_non_contiguous(self): + p = np.ones(10) / 5 + p[1::2] = 3.0 + random = Generator(MT19937(self.seed)) + non_contig = random.choice(5, 3, p=p[::2]) + random = Generator(MT19937(self.seed)) + contig = random.choice(5, 3, p=np.ascontiguousarray(p[::2])) + assert_array_equal(non_contig, contig) + + def test_choice_return_type(self): + # gh 9867 + p = np.ones(4) / 4. + actual = random.choice(4, 2) + assert actual.dtype == np.int64 + actual = random.choice(4, 2, replace=False) + assert actual.dtype == np.int64 + actual = random.choice(4, 2, p=p) + assert actual.dtype == np.int64 + actual = random.choice(4, 2, p=p, replace=False) + assert actual.dtype == np.int64 + + def test_choice_large_sample(self): + choice_hash = '4266599d12bfcfb815213303432341c06b4349f5455890446578877bb322e222' + random = Generator(MT19937(self.seed)) + actual = random.choice(10000, 5000, replace=False) + if sys.byteorder != 'little': + actual = actual.byteswap() + res = hashlib.sha256(actual.view(np.int8)).hexdigest() + assert_(choice_hash == res) + + def test_choice_array_size_empty_tuple(self): + random = Generator(MT19937(self.seed)) + assert_array_equal(random.choice([1, 2, 3], size=()), np.array(1), + strict=True) + assert_array_equal(random.choice([[1, 2, 3]], size=()), [1, 2, 3]) + assert_array_equal(random.choice([[1]], size=()), [1], strict=True) + assert_array_equal(random.choice([[1]], size=(), axis=1), [1], + strict=True) + + def test_bytes(self): + random = Generator(MT19937(self.seed)) + actual = random.bytes(10) + desired = b'\x86\xf0\xd4\x18\xe1\x81\t8%\xdd' + assert_equal(actual, desired) + + def test_shuffle(self): + # Test lists, arrays (of various dtypes), and multidimensional versions + # of both, c-contiguous or not: + for conv in [lambda x: np.array([]), + lambda x: x, + lambda x: np.asarray(x).astype(np.int8), + lambda x: np.asarray(x).astype(np.float32), + lambda x: np.asarray(x).astype(np.complex64), + lambda x: np.asarray(x).astype(object), + lambda x: [(i, i) for i in x], + lambda x: np.asarray([[i, i] for i in x]), + lambda x: np.vstack([x, x]).T, + # gh-11442 + lambda x: (np.asarray([(i, i) for i in x], + [("a", int), ("b", int)]) + .view(np.recarray)), + # gh-4270 + lambda x: np.asarray([(i, i) for i in x], + [("a", object, (1,)), + ("b", np.int32, (1,))])]: + random = Generator(MT19937(self.seed)) + alist = conv([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]) + random.shuffle(alist) + actual = alist + desired = conv([4, 1, 9, 8, 0, 5, 3, 6, 2, 7]) + assert_array_equal(actual, desired) + + def test_shuffle_custom_axis(self): + random = Generator(MT19937(self.seed)) + actual = np.arange(16).reshape((4, 4)) + random.shuffle(actual, axis=1) + desired = np.array([[ 0, 3, 1, 2], + [ 4, 7, 5, 6], + [ 8, 11, 9, 10], + [12, 15, 13, 14]]) + assert_array_equal(actual, desired) + random = Generator(MT19937(self.seed)) + actual = np.arange(16).reshape((4, 4)) + random.shuffle(actual, axis=-1) + assert_array_equal(actual, desired) + + def test_shuffle_custom_axis_empty(self): + random = Generator(MT19937(self.seed)) + desired = np.array([]).reshape((0, 6)) + for axis in (0, 1): + actual = np.array([]).reshape((0, 6)) + random.shuffle(actual, axis=axis) + assert_array_equal(actual, desired) + + def test_shuffle_axis_nonsquare(self): + y1 = np.arange(20).reshape(2, 10) + y2 = y1.copy() + random = Generator(MT19937(self.seed)) + random.shuffle(y1, axis=1) + random = Generator(MT19937(self.seed)) + random.shuffle(y2.T) + assert_array_equal(y1, y2) + + def test_shuffle_masked(self): + # gh-3263 + a = np.ma.masked_values(np.reshape(range(20), (5, 4)) % 3 - 1, -1) + b = np.ma.masked_values(np.arange(20) % 3 - 1, -1) + a_orig = a.copy() + b_orig = b.copy() + for i in range(50): + random.shuffle(a) + assert_equal( + sorted(a.data[~a.mask]), sorted(a_orig.data[~a_orig.mask])) + random.shuffle(b) + assert_equal( + sorted(b.data[~b.mask]), sorted(b_orig.data[~b_orig.mask])) + + def test_shuffle_exceptions(self): + random = Generator(MT19937(self.seed)) + arr = np.arange(10) + assert_raises(AxisError, random.shuffle, arr, 1) + arr = np.arange(9).reshape((3, 3)) + assert_raises(AxisError, random.shuffle, arr, 3) + assert_raises(TypeError, random.shuffle, arr, slice(1, 2, None)) + arr = [[1, 2, 3], [4, 5, 6]] + assert_raises(NotImplementedError, random.shuffle, arr, 1) + + arr = np.array(3) + assert_raises(TypeError, random.shuffle, arr) + arr = np.ones((3, 2)) + assert_raises(AxisError, random.shuffle, arr, 2) + + def test_shuffle_not_writeable(self): + random = Generator(MT19937(self.seed)) + a = np.zeros(5) + a.flags.writeable = False + with pytest.raises(ValueError, match='read-only'): + random.shuffle(a) + + def test_permutation(self): + random = Generator(MT19937(self.seed)) + alist = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0] + actual = random.permutation(alist) + desired = [4, 1, 9, 8, 0, 5, 3, 6, 2, 7] + assert_array_equal(actual, desired) + + random = Generator(MT19937(self.seed)) + arr_2d = np.atleast_2d([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]).T + actual = random.permutation(arr_2d) + assert_array_equal(actual, np.atleast_2d(desired).T) + + bad_x_str = "abcd" + assert_raises(AxisError, random.permutation, bad_x_str) + + bad_x_float = 1.2 + assert_raises(AxisError, random.permutation, bad_x_float) + + random = Generator(MT19937(self.seed)) + integer_val = 10 + desired = [3, 0, 8, 7, 9, 4, 2, 5, 1, 6] + + actual = random.permutation(integer_val) + assert_array_equal(actual, desired) + + def test_permutation_custom_axis(self): + a = np.arange(16).reshape((4, 4)) + desired = np.array([[ 0, 3, 1, 2], + [ 4, 7, 5, 6], + [ 8, 11, 9, 10], + [12, 15, 13, 14]]) + random = Generator(MT19937(self.seed)) + actual = random.permutation(a, axis=1) + assert_array_equal(actual, desired) + random = Generator(MT19937(self.seed)) + actual = random.permutation(a, axis=-1) + assert_array_equal(actual, desired) + + def test_permutation_exceptions(self): + random = Generator(MT19937(self.seed)) + arr = np.arange(10) + assert_raises(AxisError, random.permutation, arr, 1) + arr = np.arange(9).reshape((3, 3)) + assert_raises(AxisError, random.permutation, arr, 3) + assert_raises(TypeError, random.permutation, arr, slice(1, 2, None)) + + @pytest.mark.parametrize("dtype", [int, object]) + @pytest.mark.parametrize("axis, expected", + [(None, np.array([[3, 7, 0, 9, 10, 11], + [8, 4, 2, 5, 1, 6]])), + (0, np.array([[6, 1, 2, 9, 10, 11], + [0, 7, 8, 3, 4, 5]])), + (1, np.array([[ 5, 3, 4, 0, 2, 1], + [11, 9, 10, 6, 8, 7]]))]) + def test_permuted(self, dtype, axis, expected): + random = Generator(MT19937(self.seed)) + x = np.arange(12).reshape(2, 6).astype(dtype) + random.permuted(x, axis=axis, out=x) + assert_array_equal(x, expected) + + random = Generator(MT19937(self.seed)) + x = np.arange(12).reshape(2, 6).astype(dtype) + y = random.permuted(x, axis=axis) + assert y.dtype == dtype + assert_array_equal(y, expected) + + def test_permuted_with_strides(self): + random = Generator(MT19937(self.seed)) + x0 = np.arange(22).reshape(2, 11) + x1 = x0.copy() + x = x0[:, ::3] + y = random.permuted(x, axis=1, out=x) + expected = np.array([[0, 9, 3, 6], + [14, 20, 11, 17]]) + assert_array_equal(y, expected) + x1[:, ::3] = expected + # Verify that the original x0 was modified in-place as expected. + assert_array_equal(x1, x0) + + def test_permuted_empty(self): + y = random.permuted([]) + assert_array_equal(y, []) + + @pytest.mark.parametrize('outshape', [(2, 3), 5]) + def test_permuted_out_with_wrong_shape(self, outshape): + a = np.array([1, 2, 3]) + out = np.zeros(outshape, dtype=a.dtype) + with pytest.raises(ValueError, match='same shape'): + random.permuted(a, out=out) + + def test_permuted_out_with_wrong_type(self): + out = np.zeros((3, 5), dtype=np.int32) + x = np.ones((3, 5)) + with pytest.raises(TypeError, match='Cannot cast'): + random.permuted(x, axis=1, out=out) + + def test_permuted_not_writeable(self): + x = np.zeros((2, 5)) + x.flags.writeable = False + with pytest.raises(ValueError, match='read-only'): + random.permuted(x, axis=1, out=x) + + def test_beta(self): + random = Generator(MT19937(self.seed)) + actual = random.beta(.1, .9, size=(3, 2)) + desired = np.array( + [[1.083029353267698e-10, 2.449965303168024e-11], + [2.397085162969853e-02, 3.590779671820755e-08], + [2.830254190078299e-04, 1.744709918330393e-01]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_binomial(self): + random = Generator(MT19937(self.seed)) + actual = random.binomial(100.123, .456, size=(3, 2)) + desired = np.array([[42, 41], + [42, 48], + [44, 50]]) + assert_array_equal(actual, desired) + + random = Generator(MT19937(self.seed)) + actual = random.binomial(100.123, .456) + desired = 42 + assert_array_equal(actual, desired) + + def test_chisquare(self): + random = Generator(MT19937(self.seed)) + actual = random.chisquare(50, size=(3, 2)) + desired = np.array([[32.9850547060149, 39.0219480493301], + [56.2006134779419, 57.3474165711485], + [55.4243733880198, 55.4209797925213]]) + assert_array_almost_equal(actual, desired, decimal=13) + + def test_dirichlet(self): + random = Generator(MT19937(self.seed)) + alpha = np.array([51.72840233779265162, 39.74494232180943953]) + actual = random.dirichlet(alpha, size=(3, 2)) + desired = np.array([[[0.5439892869558927, 0.45601071304410745], + [0.5588917345860708, 0.4411082654139292 ]], # noqa: E202 + [[0.5632074165063435, 0.43679258349365657], + [0.54862581112627, 0.45137418887373015]], + [[0.49961831357047226, 0.5003816864295278 ], # noqa: E202 + [0.52374806183482, 0.47625193816517997]]]) + assert_array_almost_equal(actual, desired, decimal=15) + bad_alpha = np.array([5.4e-01, -1.0e-16]) + assert_raises(ValueError, random.dirichlet, bad_alpha) + + random = Generator(MT19937(self.seed)) + alpha = np.array([51.72840233779265162, 39.74494232180943953]) + actual = random.dirichlet(alpha) + assert_array_almost_equal(actual, desired[0, 0], decimal=15) + + def test_dirichlet_size(self): + # gh-3173 + p = np.array([51.72840233779265162, 39.74494232180943953]) + assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2)) + assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2)) + assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2)) + assert_equal(random.dirichlet(p, [2, 2]).shape, (2, 2, 2)) + assert_equal(random.dirichlet(p, (2, 2)).shape, (2, 2, 2)) + assert_equal(random.dirichlet(p, np.array((2, 2))).shape, (2, 2, 2)) + + assert_raises(TypeError, random.dirichlet, p, float(1)) + + def test_dirichlet_bad_alpha(self): + # gh-2089 + alpha = np.array([5.4e-01, -1.0e-16]) + assert_raises(ValueError, random.dirichlet, alpha) + + # gh-15876 + assert_raises(ValueError, random.dirichlet, [[5, 1]]) + assert_raises(ValueError, random.dirichlet, [[5], [1]]) + assert_raises(ValueError, random.dirichlet, [[[5], [1]], [[1], [5]]]) + assert_raises(ValueError, random.dirichlet, np.array([[5, 1], [1, 5]])) + + def test_dirichlet_alpha_non_contiguous(self): + a = np.array([51.72840233779265162, -1.0, 39.74494232180943953]) + alpha = a[::2] + random = Generator(MT19937(self.seed)) + non_contig = random.dirichlet(alpha, size=(3, 2)) + random = Generator(MT19937(self.seed)) + contig = random.dirichlet(np.ascontiguousarray(alpha), + size=(3, 2)) + assert_array_almost_equal(non_contig, contig) + + def test_dirichlet_small_alpha(self): + eps = 1.0e-9 # 1.0e-10 -> runtime x 10; 1e-11 -> runtime x 200, etc. + alpha = eps * np.array([1., 1.0e-3]) + random = Generator(MT19937(self.seed)) + actual = random.dirichlet(alpha, size=(3, 2)) + expected = np.array([ + [[1., 0.], + [1., 0.]], + [[1., 0.], + [1., 0.]], + [[1., 0.], + [1., 0.]] + ]) + assert_array_almost_equal(actual, expected, decimal=15) + + @pytest.mark.slow + def test_dirichlet_moderately_small_alpha(self): + # Use alpha.max() < 0.1 to trigger stick breaking code path + alpha = np.array([0.02, 0.04, 0.03]) + exact_mean = alpha / alpha.sum() + random = Generator(MT19937(self.seed)) + sample = random.dirichlet(alpha, size=20000000) + sample_mean = sample.mean(axis=0) + assert_allclose(sample_mean, exact_mean, rtol=1e-3) + + # This set of parameters includes inputs with alpha.max() >= 0.1 and + # alpha.max() < 0.1 to exercise both generation methods within the + # dirichlet code. + @pytest.mark.parametrize( + 'alpha', + [[5, 9, 0, 8], + [0.5, 0, 0, 0], + [1, 5, 0, 0, 1.5, 0, 0, 0], + [0.01, 0.03, 0, 0.005], + [1e-5, 0, 0, 0], + [0.002, 0.015, 0, 0, 0.04, 0, 0, 0], + [0.0], + [0, 0, 0]], + ) + def test_dirichlet_multiple_zeros_in_alpha(self, alpha): + alpha = np.array(alpha) + y = random.dirichlet(alpha) + assert_equal(y[alpha == 0], 0.0) + + def test_exponential(self): + random = Generator(MT19937(self.seed)) + actual = random.exponential(1.1234, size=(3, 2)) + desired = np.array([[0.098845481066258, 1.560752510746964], + [0.075730916041636, 1.769098974710777], + [1.488602544592235, 2.49684815275751 ]]) # noqa: E202 + assert_array_almost_equal(actual, desired, decimal=15) + + def test_exponential_0(self): + assert_equal(random.exponential(scale=0), 0) + assert_raises(ValueError, random.exponential, scale=-0.) + + def test_f(self): + random = Generator(MT19937(self.seed)) + actual = random.f(12, 77, size=(3, 2)) + desired = np.array([[0.461720027077085, 1.100441958872451], + [1.100337455217484, 0.91421736740018 ], # noqa: E202 + [0.500811891303113, 0.826802454552058]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_gamma(self): + random = Generator(MT19937(self.seed)) + actual = random.gamma(5, 3, size=(3, 2)) + desired = np.array([[ 5.03850858902096, 7.9228656732049 ], # noqa: E202 + [18.73983605132985, 19.57961681699238], + [18.17897755150825, 18.17653912505234]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_gamma_0(self): + assert_equal(random.gamma(shape=0, scale=0), 0) + assert_raises(ValueError, random.gamma, shape=-0., scale=-0.) + + def test_geometric(self): + random = Generator(MT19937(self.seed)) + actual = random.geometric(.123456789, size=(3, 2)) + desired = np.array([[1, 11], + [1, 12], + [11, 17]]) + assert_array_equal(actual, desired) + + def test_geometric_exceptions(self): + assert_raises(ValueError, random.geometric, 1.1) + assert_raises(ValueError, random.geometric, [1.1] * 10) + assert_raises(ValueError, random.geometric, -0.1) + assert_raises(ValueError, random.geometric, [-0.1] * 10) + with np.errstate(invalid='ignore'): + assert_raises(ValueError, random.geometric, np.nan) + assert_raises(ValueError, random.geometric, [np.nan] * 10) + + def test_gumbel(self): + random = Generator(MT19937(self.seed)) + actual = random.gumbel(loc=.123456789, scale=2.0, size=(3, 2)) + desired = np.array([[ 4.688397515056245, -0.289514845417841], + [ 4.981176042584683, -0.633224272589149], + [-0.055915275687488, -0.333962478257953]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_gumbel_0(self): + assert_equal(random.gumbel(scale=0), 0) + assert_raises(ValueError, random.gumbel, scale=-0.) + + def test_hypergeometric(self): + random = Generator(MT19937(self.seed)) + actual = random.hypergeometric(10.1, 5.5, 14, size=(3, 2)) + desired = np.array([[ 9, 9], + [ 9, 9], + [10, 9]]) + assert_array_equal(actual, desired) + + # Test nbad = 0 + actual = random.hypergeometric(5, 0, 3, size=4) + desired = np.array([3, 3, 3, 3]) + assert_array_equal(actual, desired) + + actual = random.hypergeometric(15, 0, 12, size=4) + desired = np.array([12, 12, 12, 12]) + assert_array_equal(actual, desired) + + # Test ngood = 0 + actual = random.hypergeometric(0, 5, 3, size=4) + desired = np.array([0, 0, 0, 0]) + assert_array_equal(actual, desired) + + actual = random.hypergeometric(0, 15, 12, size=4) + desired = np.array([0, 0, 0, 0]) + assert_array_equal(actual, desired) + + def test_laplace(self): + random = Generator(MT19937(self.seed)) + actual = random.laplace(loc=.123456789, scale=2.0, size=(3, 2)) + desired = np.array([[-3.156353949272393, 1.195863024830054], + [-3.435458081645966, 1.656882398925444], + [ 0.924824032467446, 1.251116432209336]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_laplace_0(self): + assert_equal(random.laplace(scale=0), 0) + assert_raises(ValueError, random.laplace, scale=-0.) + + def test_logistic(self): + random = Generator(MT19937(self.seed)) + actual = random.logistic(loc=.123456789, scale=2.0, size=(3, 2)) + desired = np.array([[-4.338584631510999, 1.890171436749954], + [-4.64547787337966 , 2.514545562919217], # noqa: E203 + [ 1.495389489198666, 1.967827627577474]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_lognormal(self): + random = Generator(MT19937(self.seed)) + actual = random.lognormal(mean=.123456789, sigma=2.0, size=(3, 2)) + desired = np.array([[ 0.0268252166335, 13.9534486483053], + [ 0.1204014788936, 2.2422077497792], + [ 4.2484199496128, 12.0093343977523]]) + assert_array_almost_equal(actual, desired, decimal=13) + + def test_lognormal_0(self): + assert_equal(random.lognormal(sigma=0), 1) + assert_raises(ValueError, random.lognormal, sigma=-0.) + + def test_logseries(self): + random = Generator(MT19937(self.seed)) + actual = random.logseries(p=.923456789, size=(3, 2)) + desired = np.array([[14, 17], + [3, 18], + [5, 1]]) + assert_array_equal(actual, desired) + + def test_logseries_zero(self): + random = Generator(MT19937(self.seed)) + assert random.logseries(0) == 1 + + @pytest.mark.parametrize("value", [np.nextafter(0., -1), 1., np.nan, 5.]) + def test_logseries_exceptions(self, value): + random = Generator(MT19937(self.seed)) + with np.errstate(invalid="ignore"): + with pytest.raises(ValueError): + random.logseries(value) + with pytest.raises(ValueError): + # contiguous path: + random.logseries(np.array([value] * 10)) + with pytest.raises(ValueError): + # non-contiguous path: + random.logseries(np.array([value] * 10)[::2]) + + def test_multinomial(self): + random = Generator(MT19937(self.seed)) + actual = random.multinomial(20, [1 / 6.] * 6, size=(3, 2)) + desired = np.array([[[1, 5, 1, 6, 4, 3], + [4, 2, 6, 2, 4, 2]], + [[5, 3, 2, 6, 3, 1], + [4, 4, 0, 2, 3, 7]], + [[6, 3, 1, 5, 3, 2], + [5, 5, 3, 1, 2, 4]]]) + assert_array_equal(actual, desired) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + @pytest.mark.parametrize("method", ["svd", "eigh", "cholesky"]) + def test_multivariate_normal(self, method): + random = Generator(MT19937(self.seed)) + mean = (.123456789, 10) + cov = [[1, 0], [0, 1]] + size = (3, 2) + actual = random.multivariate_normal(mean, cov, size, method=method) + desired = np.array([[[-1.747478062846581, 11.25613495182354 ], # noqa: E202 + [-0.9967333370066214, 10.342002097029821]], + [[ 0.7850019631242964, 11.181113712443013], + [ 0.8901349653255224, 8.873825399642492]], + [[ 0.7130260107430003, 9.551628690083056], + [ 0.7127098726541128, 11.991709234143173]]]) + + assert_array_almost_equal(actual, desired, decimal=15) + + # Check for default size, was raising deprecation warning + actual = random.multivariate_normal(mean, cov, method=method) + desired = np.array([0.233278563284287, 9.424140804347195]) + assert_array_almost_equal(actual, desired, decimal=15) + # Check that non symmetric covariance input raises exception when + # check_valid='raises' if using default svd method. + mean = [0, 0] + cov = [[1, 2], [1, 2]] + assert_raises(ValueError, random.multivariate_normal, mean, cov, + check_valid='raise') + + # Check that non positive-semidefinite covariance warns with + # RuntimeWarning + cov = [[1, 2], [2, 1]] + assert_warns(RuntimeWarning, random.multivariate_normal, mean, cov) + assert_warns(RuntimeWarning, random.multivariate_normal, mean, cov, + method='eigh') + assert_raises(LinAlgError, random.multivariate_normal, mean, cov, + method='cholesky') + + # and that it doesn't warn with RuntimeWarning check_valid='ignore' + assert_no_warnings(random.multivariate_normal, mean, cov, + check_valid='ignore') + + # and that it raises with RuntimeWarning check_valid='raises' + assert_raises(ValueError, random.multivariate_normal, mean, cov, + check_valid='raise') + assert_raises(ValueError, random.multivariate_normal, mean, cov, + check_valid='raise', method='eigh') + + # check degenerate samples from singular covariance matrix + cov = [[1, 1], [1, 1]] + if method in ('svd', 'eigh'): + samples = random.multivariate_normal(mean, cov, size=(3, 2), + method=method) + assert_array_almost_equal(samples[..., 0], samples[..., 1], + decimal=6) + else: + assert_raises(LinAlgError, random.multivariate_normal, mean, cov, + method='cholesky') + + cov = np.array([[1, 0.1], [0.1, 1]], dtype=np.float32) + with suppress_warnings() as sup: + random.multivariate_normal(mean, cov, method=method) + w = sup.record(RuntimeWarning) + assert len(w) == 0 + + mu = np.zeros(2) + cov = np.eye(2) + assert_raises(ValueError, random.multivariate_normal, mean, cov, + check_valid='other') + assert_raises(ValueError, random.multivariate_normal, + np.zeros((2, 1, 1)), cov) + assert_raises(ValueError, random.multivariate_normal, + mu, np.empty((3, 2))) + assert_raises(ValueError, random.multivariate_normal, + mu, np.eye(3)) + + @pytest.mark.parametrize('mean, cov', [([0], [[1 + 1j]]), ([0j], [[1]])]) + def test_multivariate_normal_disallow_complex(self, mean, cov): + random = Generator(MT19937(self.seed)) + with pytest.raises(TypeError, match="must not be complex"): + random.multivariate_normal(mean, cov) + + @pytest.mark.parametrize("method", ["svd", "eigh", "cholesky"]) + def test_multivariate_normal_basic_stats(self, method): + random = Generator(MT19937(self.seed)) + n_s = 1000 + mean = np.array([1, 2]) + cov = np.array([[2, 1], [1, 2]]) + s = random.multivariate_normal(mean, cov, size=(n_s,), method=method) + s_center = s - mean + cov_emp = (s_center.T @ s_center) / (n_s - 1) + # these are pretty loose and are only designed to detect major errors + assert np.all(np.abs(s_center.mean(-2)) < 0.1) + assert np.all(np.abs(cov_emp - cov) < 0.2) + + def test_negative_binomial(self): + random = Generator(MT19937(self.seed)) + actual = random.negative_binomial(n=100, p=.12345, size=(3, 2)) + desired = np.array([[543, 727], + [775, 760], + [600, 674]]) + assert_array_equal(actual, desired) + + def test_negative_binomial_exceptions(self): + with np.errstate(invalid='ignore'): + assert_raises(ValueError, random.negative_binomial, 100, np.nan) + assert_raises(ValueError, random.negative_binomial, 100, + [np.nan] * 10) + + def test_negative_binomial_p0_exception(self): + # Verify that p=0 raises an exception. + with assert_raises(ValueError): + x = random.negative_binomial(1, 0) + + def test_negative_binomial_invalid_p_n_combination(self): + # Verify that values of p and n that would result in an overflow + # or infinite loop raise an exception. + with np.errstate(invalid='ignore'): + assert_raises(ValueError, random.negative_binomial, 2**62, 0.1) + assert_raises(ValueError, random.negative_binomial, [2**62], [0.1]) + + def test_noncentral_chisquare(self): + random = Generator(MT19937(self.seed)) + actual = random.noncentral_chisquare(df=5, nonc=5, size=(3, 2)) + desired = np.array([[ 1.70561552362133, 15.97378184942111], + [13.71483425173724, 20.17859633310629], + [11.3615477156643 , 3.67891108738029]]) # noqa: E203 + assert_array_almost_equal(actual, desired, decimal=14) + + actual = random.noncentral_chisquare(df=.5, nonc=.2, size=(3, 2)) + desired = np.array([[9.41427665607629e-04, 1.70473157518850e-04], + [1.14554372041263e+00, 1.38187755933435e-03], + [1.90659181905387e+00, 1.21772577941822e+00]]) + assert_array_almost_equal(actual, desired, decimal=14) + + random = Generator(MT19937(self.seed)) + actual = random.noncentral_chisquare(df=5, nonc=0, size=(3, 2)) + desired = np.array([[0.82947954590419, 1.80139670767078], + [6.58720057417794, 7.00491463609814], + [6.31101879073157, 6.30982307753005]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_noncentral_f(self): + random = Generator(MT19937(self.seed)) + actual = random.noncentral_f(dfnum=5, dfden=2, nonc=1, + size=(3, 2)) + desired = np.array([[0.060310671139 , 0.23866058175939], # noqa: E203 + [0.86860246709073, 0.2668510459738 ], # noqa: E202 + [0.23375780078364, 1.88922102885943]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_noncentral_f_nan(self): + random = Generator(MT19937(self.seed)) + actual = random.noncentral_f(dfnum=5, dfden=2, nonc=np.nan) + assert np.isnan(actual) + + def test_normal(self): + random = Generator(MT19937(self.seed)) + actual = random.normal(loc=.123456789, scale=2.0, size=(3, 2)) + desired = np.array([[-3.618412914693162, 2.635726692647081], + [-2.116923463013243, 0.807460983059643], + [ 1.446547137248593, 2.485684213886024]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_normal_0(self): + assert_equal(random.normal(scale=0), 0) + assert_raises(ValueError, random.normal, scale=-0.) + + def test_pareto(self): + random = Generator(MT19937(self.seed)) + actual = random.pareto(a=.123456789, size=(3, 2)) + desired = np.array([[1.0394926776069018e+00, 7.7142534343505773e+04], + [7.2640150889064703e-01, 3.4650454783825594e+05], + [4.5852344481994740e+04, 6.5851383009539105e+07]]) + # For some reason on 32-bit x86 Ubuntu 12.10 the [1, 0] entry in this + # matrix differs by 24 nulps. Discussion: + # https://mail.python.org/pipermail/numpy-discussion/2012-September/063801.html + # Consensus is that this is probably some gcc quirk that affects + # rounding but not in any important way, so we just use a looser + # tolerance on this test: + np.testing.assert_array_almost_equal_nulp(actual, desired, nulp=30) + + def test_poisson(self): + random = Generator(MT19937(self.seed)) + actual = random.poisson(lam=.123456789, size=(3, 2)) + desired = np.array([[0, 0], + [0, 0], + [0, 0]]) + assert_array_equal(actual, desired) + + def test_poisson_exceptions(self): + lambig = np.iinfo('int64').max + lamneg = -1 + assert_raises(ValueError, random.poisson, lamneg) + assert_raises(ValueError, random.poisson, [lamneg] * 10) + assert_raises(ValueError, random.poisson, lambig) + assert_raises(ValueError, random.poisson, [lambig] * 10) + with np.errstate(invalid='ignore'): + assert_raises(ValueError, random.poisson, np.nan) + assert_raises(ValueError, random.poisson, [np.nan] * 10) + + def test_power(self): + random = Generator(MT19937(self.seed)) + actual = random.power(a=.123456789, size=(3, 2)) + desired = np.array([[1.977857368842754e-09, 9.806792196620341e-02], + [2.482442984543471e-10, 1.527108843266079e-01], + [8.188283434244285e-02, 3.950547209346948e-01]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_rayleigh(self): + random = Generator(MT19937(self.seed)) + actual = random.rayleigh(scale=10, size=(3, 2)) + desired = np.array([[4.19494429102666, 16.66920198906598], + [3.67184544902662, 17.74695521962917], + [16.27935397855501, 21.08355560691792]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_rayleigh_0(self): + assert_equal(random.rayleigh(scale=0), 0) + assert_raises(ValueError, random.rayleigh, scale=-0.) + + def test_standard_cauchy(self): + random = Generator(MT19937(self.seed)) + actual = random.standard_cauchy(size=(3, 2)) + desired = np.array([[-1.489437778266206, -3.275389641569784], + [ 0.560102864910406, -0.680780916282552], + [-1.314912905226277, 0.295852965660225]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_standard_exponential(self): + random = Generator(MT19937(self.seed)) + actual = random.standard_exponential(size=(3, 2), method='inv') + desired = np.array([[0.102031839440643, 1.229350298474972], + [0.088137284693098, 1.459859985522667], + [1.093830802293668, 1.256977002164613]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_standard_expoential_type_error(self): + assert_raises(TypeError, random.standard_exponential, dtype=np.int32) + + def test_standard_gamma(self): + random = Generator(MT19937(self.seed)) + actual = random.standard_gamma(shape=3, size=(3, 2)) + desired = np.array([[0.62970724056362, 1.22379851271008], + [3.899412530884 , 4.12479964250139], # noqa: E203 + [3.74994102464584, 3.74929307690815]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_standard_gammma_scalar_float(self): + random = Generator(MT19937(self.seed)) + actual = random.standard_gamma(3, dtype=np.float32) + desired = 2.9242148399353027 + assert_array_almost_equal(actual, desired, decimal=6) + + def test_standard_gamma_float(self): + random = Generator(MT19937(self.seed)) + actual = random.standard_gamma(shape=3, size=(3, 2)) + desired = np.array([[0.62971, 1.2238], + [3.89941, 4.1248], + [3.74994, 3.74929]]) + assert_array_almost_equal(actual, desired, decimal=5) + + def test_standard_gammma_float_out(self): + actual = np.zeros((3, 2), dtype=np.float32) + random = Generator(MT19937(self.seed)) + random.standard_gamma(10.0, out=actual, dtype=np.float32) + desired = np.array([[10.14987, 7.87012], + [ 9.46284, 12.56832], + [13.82495, 7.81533]], dtype=np.float32) + assert_array_almost_equal(actual, desired, decimal=5) + + random = Generator(MT19937(self.seed)) + random.standard_gamma(10.0, out=actual, size=(3, 2), dtype=np.float32) + assert_array_almost_equal(actual, desired, decimal=5) + + def test_standard_gamma_unknown_type(self): + assert_raises(TypeError, random.standard_gamma, 1., + dtype='int32') + + def test_out_size_mismatch(self): + out = np.zeros(10) + assert_raises(ValueError, random.standard_gamma, 10.0, size=20, + out=out) + assert_raises(ValueError, random.standard_gamma, 10.0, size=(10, 1), + out=out) + + def test_standard_gamma_0(self): + assert_equal(random.standard_gamma(shape=0), 0) + assert_raises(ValueError, random.standard_gamma, shape=-0.) + + def test_standard_normal(self): + random = Generator(MT19937(self.seed)) + actual = random.standard_normal(size=(3, 2)) + desired = np.array([[-1.870934851846581, 1.25613495182354 ], # noqa: E202 + [-1.120190126006621, 0.342002097029821], + [ 0.661545174124296, 1.181113712443012]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_standard_normal_unsupported_type(self): + assert_raises(TypeError, random.standard_normal, dtype=np.int32) + + def test_standard_t(self): + random = Generator(MT19937(self.seed)) + actual = random.standard_t(df=10, size=(3, 2)) + desired = np.array([[-1.484666193042647, 0.30597891831161], + [ 1.056684299648085, -0.407312602088507], + [ 0.130704414281157, -2.038053410490321]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_triangular(self): + random = Generator(MT19937(self.seed)) + actual = random.triangular(left=5.12, mode=10.23, right=20.34, + size=(3, 2)) + desired = np.array([[ 7.86664070590917, 13.6313848513185 ], # noqa: E202 + [ 7.68152445215983, 14.36169131136546], + [13.16105603911429, 13.72341621856971]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_uniform(self): + random = Generator(MT19937(self.seed)) + actual = random.uniform(low=1.23, high=10.54, size=(3, 2)) + desired = np.array([[2.13306255040998 , 7.816987531021207], # noqa: E203 + [2.015436610109887, 8.377577533009589], + [7.421792588856135, 7.891185744455209]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_uniform_range_bounds(self): + fmin = np.finfo('float').min + fmax = np.finfo('float').max + + func = random.uniform + assert_raises(OverflowError, func, -np.inf, 0) + assert_raises(OverflowError, func, 0, np.inf) + assert_raises(OverflowError, func, fmin, fmax) + assert_raises(OverflowError, func, [-np.inf], [0]) + assert_raises(OverflowError, func, [0], [np.inf]) + + # (fmax / 1e17) - fmin is within range, so this should not throw + # account for i386 extended precision DBL_MAX / 1e17 + DBL_MAX > + # DBL_MAX by increasing fmin a bit + random.uniform(low=np.nextafter(fmin, 1), high=fmax / 1e17) + + def test_uniform_zero_range(self): + func = random.uniform + result = func(1.5, 1.5) + assert_allclose(result, 1.5) + result = func([0.0, np.pi], [0.0, np.pi]) + assert_allclose(result, [0.0, np.pi]) + result = func([[2145.12], [2145.12]], [2145.12, 2145.12]) + assert_allclose(result, 2145.12 + np.zeros((2, 2))) + + def test_uniform_neg_range(self): + func = random.uniform + assert_raises(ValueError, func, 2, 1) + assert_raises(ValueError, func, [1, 2], [1, 1]) + assert_raises(ValueError, func, [[0, 1], [2, 3]], 2) + + def test_scalar_exception_propagation(self): + # Tests that exceptions are correctly propagated in distributions + # when called with objects that throw exceptions when converted to + # scalars. + # + # Regression test for gh: 8865 + + class ThrowingFloat(np.ndarray): + def __float__(self): + raise TypeError + + throwing_float = np.array(1.0).view(ThrowingFloat) + assert_raises(TypeError, random.uniform, throwing_float, + throwing_float) + + class ThrowingInteger(np.ndarray): + def __int__(self): + raise TypeError + + throwing_int = np.array(1).view(ThrowingInteger) + assert_raises(TypeError, random.hypergeometric, throwing_int, 1, 1) + + def test_vonmises(self): + random = Generator(MT19937(self.seed)) + actual = random.vonmises(mu=1.23, kappa=1.54, size=(3, 2)) + desired = np.array([[ 1.107972248690106, 2.841536476232361], + [ 1.832602376042457, 1.945511926976032], + [-0.260147475776542, 2.058047492231698]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_vonmises_small(self): + # check infinite loop, gh-4720 + random = Generator(MT19937(self.seed)) + r = random.vonmises(mu=0., kappa=1.1e-8, size=10**6) + assert_(np.isfinite(r).all()) + + def test_vonmises_nan(self): + random = Generator(MT19937(self.seed)) + r = random.vonmises(mu=0., kappa=np.nan) + assert_(np.isnan(r)) + + @pytest.mark.parametrize("kappa", [1e4, 1e15]) + def test_vonmises_large_kappa(self, kappa): + random = Generator(MT19937(self.seed)) + rs = RandomState(random.bit_generator) + state = random.bit_generator.state + + random_state_vals = rs.vonmises(0, kappa, size=10) + random.bit_generator.state = state + gen_vals = random.vonmises(0, kappa, size=10) + if kappa < 1e6: + assert_allclose(random_state_vals, gen_vals) + else: + assert np.all(random_state_vals != gen_vals) + + @pytest.mark.parametrize("mu", [-7., -np.pi, -3.1, np.pi, 3.2]) + @pytest.mark.parametrize("kappa", [1e-9, 1e-6, 1, 1e3, 1e15]) + def test_vonmises_large_kappa_range(self, mu, kappa): + random = Generator(MT19937(self.seed)) + r = random.vonmises(mu, kappa, 50) + assert_(np.all(r > -np.pi) and np.all(r <= np.pi)) + + def test_wald(self): + random = Generator(MT19937(self.seed)) + actual = random.wald(mean=1.23, scale=1.54, size=(3, 2)) + desired = np.array([[0.26871721804551, 3.2233942732115 ], # noqa: E202 + [2.20328374987066, 2.40958405189353], + [2.07093587449261, 0.73073890064369]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_weibull(self): + random = Generator(MT19937(self.seed)) + actual = random.weibull(a=1.23, size=(3, 2)) + desired = np.array([[0.138613914769468, 1.306463419753191], + [0.111623365934763, 1.446570494646721], + [1.257145775276011, 1.914247725027957]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_weibull_0(self): + random = Generator(MT19937(self.seed)) + assert_equal(random.weibull(a=0, size=12), np.zeros(12)) + assert_raises(ValueError, random.weibull, a=-0.) + + def test_zipf(self): + random = Generator(MT19937(self.seed)) + actual = random.zipf(a=1.23, size=(3, 2)) + desired = np.array([[ 1, 1], + [ 10, 867], + [354, 2]]) + assert_array_equal(actual, desired) + + +class TestBroadcast: + # tests that functions that broadcast behave + # correctly when presented with non-scalar arguments + def setup_method(self): + self.seed = 123456789 + + def test_uniform(self): + random = Generator(MT19937(self.seed)) + low = [0] + high = [1] + uniform = random.uniform + desired = np.array([0.16693771389729, 0.19635129550675, 0.75563050964095]) + + random = Generator(MT19937(self.seed)) + actual = random.uniform(low * 3, high) + assert_array_almost_equal(actual, desired, decimal=14) + + random = Generator(MT19937(self.seed)) + actual = random.uniform(low, high * 3) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_normal(self): + loc = [0] + scale = [1] + bad_scale = [-1] + random = Generator(MT19937(self.seed)) + desired = np.array([-0.38736406738527, 0.79594375042255, 0.0197076236097]) + + random = Generator(MT19937(self.seed)) + actual = random.normal(loc * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, random.normal, loc * 3, bad_scale) + + random = Generator(MT19937(self.seed)) + normal = random.normal + actual = normal(loc, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, normal, loc, bad_scale * 3) + + def test_beta(self): + a = [1] + b = [2] + bad_a = [-1] + bad_b = [-2] + desired = np.array([0.18719338682602, 0.73234824491364, 0.17928615186455]) + + random = Generator(MT19937(self.seed)) + beta = random.beta + actual = beta(a * 3, b) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, beta, bad_a * 3, b) + assert_raises(ValueError, beta, a * 3, bad_b) + + random = Generator(MT19937(self.seed)) + actual = random.beta(a, b * 3) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_exponential(self): + scale = [1] + bad_scale = [-1] + desired = np.array([0.67245993212806, 0.21380495318094, 0.7177848928629]) + + random = Generator(MT19937(self.seed)) + actual = random.exponential(scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, random.exponential, bad_scale * 3) + + def test_standard_gamma(self): + shape = [1] + bad_shape = [-1] + desired = np.array([0.67245993212806, 0.21380495318094, 0.7177848928629]) + + random = Generator(MT19937(self.seed)) + std_gamma = random.standard_gamma + actual = std_gamma(shape * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, std_gamma, bad_shape * 3) + + def test_gamma(self): + shape = [1] + scale = [2] + bad_shape = [-1] + bad_scale = [-2] + desired = np.array([1.34491986425611, 0.42760990636187, 1.4355697857258]) + + random = Generator(MT19937(self.seed)) + gamma = random.gamma + actual = gamma(shape * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, gamma, bad_shape * 3, scale) + assert_raises(ValueError, gamma, shape * 3, bad_scale) + + random = Generator(MT19937(self.seed)) + gamma = random.gamma + actual = gamma(shape, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, gamma, bad_shape, scale * 3) + assert_raises(ValueError, gamma, shape, bad_scale * 3) + + def test_f(self): + dfnum = [1] + dfden = [2] + bad_dfnum = [-1] + bad_dfden = [-2] + desired = np.array([0.07765056244107, 7.72951397913186, 0.05786093891763]) + + random = Generator(MT19937(self.seed)) + f = random.f + actual = f(dfnum * 3, dfden) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, f, bad_dfnum * 3, dfden) + assert_raises(ValueError, f, dfnum * 3, bad_dfden) + + random = Generator(MT19937(self.seed)) + f = random.f + actual = f(dfnum, dfden * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, f, bad_dfnum, dfden * 3) + assert_raises(ValueError, f, dfnum, bad_dfden * 3) + + def test_noncentral_f(self): + dfnum = [2] + dfden = [3] + nonc = [4] + bad_dfnum = [0] + bad_dfden = [-1] + bad_nonc = [-2] + desired = np.array([2.02434240411421, 12.91838601070124, 1.24395160354629]) + + random = Generator(MT19937(self.seed)) + nonc_f = random.noncentral_f + actual = nonc_f(dfnum * 3, dfden, nonc) + assert_array_almost_equal(actual, desired, decimal=14) + assert np.all(np.isnan(nonc_f(dfnum, dfden, [np.nan] * 3))) + + assert_raises(ValueError, nonc_f, bad_dfnum * 3, dfden, nonc) + assert_raises(ValueError, nonc_f, dfnum * 3, bad_dfden, nonc) + assert_raises(ValueError, nonc_f, dfnum * 3, dfden, bad_nonc) + + random = Generator(MT19937(self.seed)) + nonc_f = random.noncentral_f + actual = nonc_f(dfnum, dfden * 3, nonc) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, nonc_f, bad_dfnum, dfden * 3, nonc) + assert_raises(ValueError, nonc_f, dfnum, bad_dfden * 3, nonc) + assert_raises(ValueError, nonc_f, dfnum, dfden * 3, bad_nonc) + + random = Generator(MT19937(self.seed)) + nonc_f = random.noncentral_f + actual = nonc_f(dfnum, dfden, nonc * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, nonc_f, bad_dfnum, dfden, nonc * 3) + assert_raises(ValueError, nonc_f, dfnum, bad_dfden, nonc * 3) + assert_raises(ValueError, nonc_f, dfnum, dfden, bad_nonc * 3) + + def test_noncentral_f_small_df(self): + random = Generator(MT19937(self.seed)) + desired = np.array([0.04714867120827, 0.1239390327694]) + actual = random.noncentral_f(0.9, 0.9, 2, size=2) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_chisquare(self): + df = [1] + bad_df = [-1] + desired = np.array([0.05573640064251, 1.47220224353539, 2.9469379318589]) + + random = Generator(MT19937(self.seed)) + actual = random.chisquare(df * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, random.chisquare, bad_df * 3) + + def test_noncentral_chisquare(self): + df = [1] + nonc = [2] + bad_df = [-1] + bad_nonc = [-2] + desired = np.array([0.07710766249436, 5.27829115110304, 0.630732147399]) + + random = Generator(MT19937(self.seed)) + nonc_chi = random.noncentral_chisquare + actual = nonc_chi(df * 3, nonc) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, nonc_chi, bad_df * 3, nonc) + assert_raises(ValueError, nonc_chi, df * 3, bad_nonc) + + random = Generator(MT19937(self.seed)) + nonc_chi = random.noncentral_chisquare + actual = nonc_chi(df, nonc * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, nonc_chi, bad_df, nonc * 3) + assert_raises(ValueError, nonc_chi, df, bad_nonc * 3) + + def test_standard_t(self): + df = [1] + bad_df = [-1] + desired = np.array([-1.39498829447098, -1.23058658835223, 0.17207021065983]) + + random = Generator(MT19937(self.seed)) + actual = random.standard_t(df * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, random.standard_t, bad_df * 3) + + def test_vonmises(self): + mu = [2] + kappa = [1] + bad_kappa = [-1] + desired = np.array([2.25935584988528, 2.23326261461399, -2.84152146503326]) + + random = Generator(MT19937(self.seed)) + actual = random.vonmises(mu * 3, kappa) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, random.vonmises, mu * 3, bad_kappa) + + random = Generator(MT19937(self.seed)) + actual = random.vonmises(mu, kappa * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, random.vonmises, mu, bad_kappa * 3) + + def test_pareto(self): + a = [1] + bad_a = [-1] + desired = np.array([0.95905052946317, 0.2383810889437, 1.04988745750013]) + + random = Generator(MT19937(self.seed)) + actual = random.pareto(a * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, random.pareto, bad_a * 3) + + def test_weibull(self): + a = [1] + bad_a = [-1] + desired = np.array([0.67245993212806, 0.21380495318094, 0.7177848928629]) + + random = Generator(MT19937(self.seed)) + actual = random.weibull(a * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, random.weibull, bad_a * 3) + + def test_power(self): + a = [1] + bad_a = [-1] + desired = np.array([0.48954864361052, 0.19249412888486, 0.51216834058807]) + + random = Generator(MT19937(self.seed)) + actual = random.power(a * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, random.power, bad_a * 3) + + def test_laplace(self): + loc = [0] + scale = [1] + bad_scale = [-1] + desired = np.array([-1.09698732625119, -0.93470271947368, 0.71592671378202]) + + random = Generator(MT19937(self.seed)) + laplace = random.laplace + actual = laplace(loc * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, laplace, loc * 3, bad_scale) + + random = Generator(MT19937(self.seed)) + laplace = random.laplace + actual = laplace(loc, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, laplace, loc, bad_scale * 3) + + def test_gumbel(self): + loc = [0] + scale = [1] + bad_scale = [-1] + desired = np.array([1.70020068231762, 1.52054354273631, -0.34293267607081]) + + random = Generator(MT19937(self.seed)) + gumbel = random.gumbel + actual = gumbel(loc * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, gumbel, loc * 3, bad_scale) + + random = Generator(MT19937(self.seed)) + gumbel = random.gumbel + actual = gumbel(loc, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, gumbel, loc, bad_scale * 3) + + def test_logistic(self): + loc = [0] + scale = [1] + bad_scale = [-1] + desired = np.array([-1.607487640433, -1.40925686003678, 1.12887112820397]) + + random = Generator(MT19937(self.seed)) + actual = random.logistic(loc * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, random.logistic, loc * 3, bad_scale) + + random = Generator(MT19937(self.seed)) + actual = random.logistic(loc, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, random.logistic, loc, bad_scale * 3) + assert_equal(random.logistic(1.0, 0.0), 1.0) + + def test_lognormal(self): + mean = [0] + sigma = [1] + bad_sigma = [-1] + desired = np.array([0.67884390500697, 2.21653186290321, 1.01990310084276]) + + random = Generator(MT19937(self.seed)) + lognormal = random.lognormal + actual = lognormal(mean * 3, sigma) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, lognormal, mean * 3, bad_sigma) + + random = Generator(MT19937(self.seed)) + actual = random.lognormal(mean, sigma * 3) + assert_raises(ValueError, random.lognormal, mean, bad_sigma * 3) + + def test_rayleigh(self): + scale = [1] + bad_scale = [-1] + desired = np.array( + [1.1597068009872629, + 0.6539188836253857, + 1.1981526554349398] + ) + + random = Generator(MT19937(self.seed)) + actual = random.rayleigh(scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, random.rayleigh, bad_scale * 3) + + def test_wald(self): + mean = [0.5] + scale = [1] + bad_mean = [0] + bad_scale = [-2] + desired = np.array([0.38052407392905, 0.50701641508592, 0.484935249864]) + + random = Generator(MT19937(self.seed)) + actual = random.wald(mean * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, random.wald, bad_mean * 3, scale) + assert_raises(ValueError, random.wald, mean * 3, bad_scale) + + random = Generator(MT19937(self.seed)) + actual = random.wald(mean, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, random.wald, bad_mean, scale * 3) + assert_raises(ValueError, random.wald, mean, bad_scale * 3) + + def test_triangular(self): + left = [1] + right = [3] + mode = [2] + bad_left_one = [3] + bad_mode_one = [4] + bad_left_two, bad_mode_two = right * 2 + desired = np.array([1.57781954604754, 1.62665986867957, 2.30090130831326]) + + random = Generator(MT19937(self.seed)) + triangular = random.triangular + actual = triangular(left * 3, mode, right) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, triangular, bad_left_one * 3, mode, right) + assert_raises(ValueError, triangular, left * 3, bad_mode_one, right) + assert_raises(ValueError, triangular, bad_left_two * 3, bad_mode_two, + right) + + random = Generator(MT19937(self.seed)) + triangular = random.triangular + actual = triangular(left, mode * 3, right) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, triangular, bad_left_one, mode * 3, right) + assert_raises(ValueError, triangular, left, bad_mode_one * 3, right) + assert_raises(ValueError, triangular, bad_left_two, bad_mode_two * 3, + right) + + random = Generator(MT19937(self.seed)) + triangular = random.triangular + actual = triangular(left, mode, right * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, triangular, bad_left_one, mode, right * 3) + assert_raises(ValueError, triangular, left, bad_mode_one, right * 3) + assert_raises(ValueError, triangular, bad_left_two, bad_mode_two, + right * 3) + + assert_raises(ValueError, triangular, 10., 0., 20.) + assert_raises(ValueError, triangular, 10., 25., 20.) + assert_raises(ValueError, triangular, 10., 10., 10.) + + def test_binomial(self): + n = [1] + p = [0.5] + bad_n = [-1] + bad_p_one = [-1] + bad_p_two = [1.5] + desired = np.array([0, 0, 1]) + + random = Generator(MT19937(self.seed)) + binom = random.binomial + actual = binom(n * 3, p) + assert_array_equal(actual, desired) + assert_raises(ValueError, binom, bad_n * 3, p) + assert_raises(ValueError, binom, n * 3, bad_p_one) + assert_raises(ValueError, binom, n * 3, bad_p_two) + + random = Generator(MT19937(self.seed)) + actual = random.binomial(n, p * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, binom, bad_n, p * 3) + assert_raises(ValueError, binom, n, bad_p_one * 3) + assert_raises(ValueError, binom, n, bad_p_two * 3) + + def test_negative_binomial(self): + n = [1] + p = [0.5] + bad_n = [-1] + bad_p_one = [-1] + bad_p_two = [1.5] + desired = np.array([0, 2, 1], dtype=np.int64) + + random = Generator(MT19937(self.seed)) + neg_binom = random.negative_binomial + actual = neg_binom(n * 3, p) + assert_array_equal(actual, desired) + assert_raises(ValueError, neg_binom, bad_n * 3, p) + assert_raises(ValueError, neg_binom, n * 3, bad_p_one) + assert_raises(ValueError, neg_binom, n * 3, bad_p_two) + + random = Generator(MT19937(self.seed)) + neg_binom = random.negative_binomial + actual = neg_binom(n, p * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, neg_binom, bad_n, p * 3) + assert_raises(ValueError, neg_binom, n, bad_p_one * 3) + assert_raises(ValueError, neg_binom, n, bad_p_two * 3) + + def test_poisson(self): + + lam = [1] + bad_lam_one = [-1] + desired = np.array([0, 0, 3]) + + random = Generator(MT19937(self.seed)) + max_lam = random._poisson_lam_max + bad_lam_two = [max_lam * 2] + poisson = random.poisson + actual = poisson(lam * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, poisson, bad_lam_one * 3) + assert_raises(ValueError, poisson, bad_lam_two * 3) + + def test_zipf(self): + a = [2] + bad_a = [0] + desired = np.array([1, 8, 1]) + + random = Generator(MT19937(self.seed)) + zipf = random.zipf + actual = zipf(a * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, zipf, bad_a * 3) + with np.errstate(invalid='ignore'): + assert_raises(ValueError, zipf, np.nan) + assert_raises(ValueError, zipf, [0, 0, np.nan]) + + def test_geometric(self): + p = [0.5] + bad_p_one = [-1] + bad_p_two = [1.5] + desired = np.array([1, 1, 3]) + + random = Generator(MT19937(self.seed)) + geometric = random.geometric + actual = geometric(p * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, geometric, bad_p_one * 3) + assert_raises(ValueError, geometric, bad_p_two * 3) + + def test_hypergeometric(self): + ngood = [1] + nbad = [2] + nsample = [2] + bad_ngood = [-1] + bad_nbad = [-2] + bad_nsample_one = [-1] + bad_nsample_two = [4] + desired = np.array([0, 0, 1]) + + random = Generator(MT19937(self.seed)) + actual = random.hypergeometric(ngood * 3, nbad, nsample) + assert_array_equal(actual, desired) + assert_raises(ValueError, random.hypergeometric, bad_ngood * 3, nbad, nsample) + assert_raises(ValueError, random.hypergeometric, ngood * 3, bad_nbad, nsample) + assert_raises(ValueError, random.hypergeometric, ngood * 3, nbad, bad_nsample_one) # noqa: E501 + assert_raises(ValueError, random.hypergeometric, ngood * 3, nbad, bad_nsample_two) # noqa: E501 + + random = Generator(MT19937(self.seed)) + actual = random.hypergeometric(ngood, nbad * 3, nsample) + assert_array_equal(actual, desired) + assert_raises(ValueError, random.hypergeometric, bad_ngood, nbad * 3, nsample) + assert_raises(ValueError, random.hypergeometric, ngood, bad_nbad * 3, nsample) + assert_raises(ValueError, random.hypergeometric, ngood, nbad * 3, bad_nsample_one) # noqa: E501 + assert_raises(ValueError, random.hypergeometric, ngood, nbad * 3, bad_nsample_two) # noqa: E501 + + random = Generator(MT19937(self.seed)) + hypergeom = random.hypergeometric + actual = hypergeom(ngood, nbad, nsample * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, hypergeom, bad_ngood, nbad, nsample * 3) + assert_raises(ValueError, hypergeom, ngood, bad_nbad, nsample * 3) + assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_one * 3) + assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_two * 3) + + assert_raises(ValueError, hypergeom, -1, 10, 20) + assert_raises(ValueError, hypergeom, 10, -1, 20) + assert_raises(ValueError, hypergeom, 10, 10, -1) + assert_raises(ValueError, hypergeom, 10, 10, 25) + + # ValueError for arguments that are too big. + assert_raises(ValueError, hypergeom, 2**30, 10, 20) + assert_raises(ValueError, hypergeom, 999, 2**31, 50) + assert_raises(ValueError, hypergeom, 999, [2**29, 2**30], 1000) + + def test_logseries(self): + p = [0.5] + bad_p_one = [2] + bad_p_two = [-1] + desired = np.array([1, 1, 1]) + + random = Generator(MT19937(self.seed)) + logseries = random.logseries + actual = logseries(p * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, logseries, bad_p_one * 3) + assert_raises(ValueError, logseries, bad_p_two * 3) + + def test_multinomial(self): + random = Generator(MT19937(self.seed)) + actual = random.multinomial([5, 20], [1 / 6.] * 6, size=(3, 2)) + desired = np.array([[[0, 0, 2, 1, 2, 0], + [2, 3, 6, 4, 2, 3]], + [[1, 0, 1, 0, 2, 1], + [7, 2, 2, 1, 4, 4]], + [[0, 2, 0, 1, 2, 0], + [3, 2, 3, 3, 4, 5]]], dtype=np.int64) + assert_array_equal(actual, desired) + + random = Generator(MT19937(self.seed)) + actual = random.multinomial([5, 20], [1 / 6.] * 6) + desired = np.array([[0, 0, 2, 1, 2, 0], + [2, 3, 6, 4, 2, 3]], dtype=np.int64) + assert_array_equal(actual, desired) + + random = Generator(MT19937(self.seed)) + actual = random.multinomial([5, 20], [[1 / 6.] * 6] * 2) + desired = np.array([[0, 0, 2, 1, 2, 0], + [2, 3, 6, 4, 2, 3]], dtype=np.int64) + assert_array_equal(actual, desired) + + random = Generator(MT19937(self.seed)) + actual = random.multinomial([[5], [20]], [[1 / 6.] * 6] * 2) + desired = np.array([[[0, 0, 2, 1, 2, 0], + [0, 0, 2, 1, 1, 1]], + [[4, 2, 3, 3, 5, 3], + [7, 2, 2, 1, 4, 4]]], dtype=np.int64) + assert_array_equal(actual, desired) + + @pytest.mark.parametrize("n", [10, + np.array([10, 10]), + np.array([[[10]], [[10]]]) + ] + ) + def test_multinomial_pval_broadcast(self, n): + random = Generator(MT19937(self.seed)) + pvals = np.array([1 / 4] * 4) + actual = random.multinomial(n, pvals) + n_shape = () if isinstance(n, int) else n.shape + expected_shape = n_shape + (4,) + assert actual.shape == expected_shape + pvals = np.vstack([pvals, pvals]) + actual = random.multinomial(n, pvals) + expected_shape = np.broadcast_shapes(n_shape, pvals.shape[:-1]) + (4,) + assert actual.shape == expected_shape + + pvals = np.vstack([[pvals], [pvals]]) + actual = random.multinomial(n, pvals) + expected_shape = np.broadcast_shapes(n_shape, pvals.shape[:-1]) + assert actual.shape == expected_shape + (4,) + actual = random.multinomial(n, pvals, size=(3, 2) + expected_shape) + assert actual.shape == (3, 2) + expected_shape + (4,) + + with pytest.raises(ValueError): + # Ensure that size is not broadcast + actual = random.multinomial(n, pvals, size=(1,) * 6) + + def test_invalid_pvals_broadcast(self): + random = Generator(MT19937(self.seed)) + pvals = [[1 / 6] * 6, [1 / 4] * 6] + assert_raises(ValueError, random.multinomial, 1, pvals) + assert_raises(ValueError, random.multinomial, 6, 0.5) + + def test_empty_outputs(self): + random = Generator(MT19937(self.seed)) + actual = random.multinomial(np.empty((10, 0, 6), "i8"), [1 / 6] * 6) + assert actual.shape == (10, 0, 6, 6) + actual = random.multinomial(12, np.empty((10, 0, 10))) + assert actual.shape == (10, 0, 10) + actual = random.multinomial(np.empty((3, 0, 7), "i8"), + np.empty((3, 0, 7, 4))) + assert actual.shape == (3, 0, 7, 4) + + +@pytest.mark.skipif(IS_WASM, reason="can't start thread") +class TestThread: + # make sure each state produces the same sequence even in threads + def setup_method(self): + self.seeds = range(4) + + def check_function(self, function, sz): + from threading import Thread + + out1 = np.empty((len(self.seeds),) + sz) + out2 = np.empty((len(self.seeds),) + sz) + + # threaded generation + t = [Thread(target=function, args=(Generator(MT19937(s)), o)) + for s, o in zip(self.seeds, out1)] + [x.start() for x in t] + [x.join() for x in t] + + # the same serial + for s, o in zip(self.seeds, out2): + function(Generator(MT19937(s)), o) + + # these platforms change x87 fpu precision mode in threads + if np.intp().dtype.itemsize == 4 and sys.platform == "win32": + assert_array_almost_equal(out1, out2) + else: + assert_array_equal(out1, out2) + + def test_normal(self): + def gen_random(state, out): + out[...] = state.normal(size=10000) + + self.check_function(gen_random, sz=(10000,)) + + def test_exp(self): + def gen_random(state, out): + out[...] = state.exponential(scale=np.ones((100, 1000))) + + self.check_function(gen_random, sz=(100, 1000)) + + def test_multinomial(self): + def gen_random(state, out): + out[...] = state.multinomial(10, [1 / 6.] * 6, size=10000) + + self.check_function(gen_random, sz=(10000, 6)) + + +# See Issue #4263 +class TestSingleEltArrayInput: + def setup_method(self): + self.argOne = np.array([2]) + self.argTwo = np.array([3]) + self.argThree = np.array([4]) + self.tgtShape = (1,) + + def test_one_arg_funcs(self): + funcs = (random.exponential, random.standard_gamma, + random.chisquare, random.standard_t, + random.pareto, random.weibull, + random.power, random.rayleigh, + random.poisson, random.zipf, + random.geometric, random.logseries) + + probfuncs = (random.geometric, random.logseries) + + for func in funcs: + if func in probfuncs: # p < 1.0 + out = func(np.array([0.5])) + + else: + out = func(self.argOne) + + assert_equal(out.shape, self.tgtShape) + + def test_two_arg_funcs(self): + funcs = (random.uniform, random.normal, + random.beta, random.gamma, + random.f, random.noncentral_chisquare, + random.vonmises, random.laplace, + random.gumbel, random.logistic, + random.lognormal, random.wald, + random.binomial, random.negative_binomial) + + probfuncs = (random.binomial, random.negative_binomial) + + for func in funcs: + if func in probfuncs: # p <= 1 + argTwo = np.array([0.5]) + + else: + argTwo = self.argTwo + + out = func(self.argOne, argTwo) + assert_equal(out.shape, self.tgtShape) + + out = func(self.argOne[0], argTwo) + assert_equal(out.shape, self.tgtShape) + + out = func(self.argOne, argTwo[0]) + assert_equal(out.shape, self.tgtShape) + + def test_integers(self, endpoint): + itype = [np.bool, np.int8, np.uint8, np.int16, np.uint16, + np.int32, np.uint32, np.int64, np.uint64] + func = random.integers + high = np.array([1]) + low = np.array([0]) + + for dt in itype: + out = func(low, high, endpoint=endpoint, dtype=dt) + assert_equal(out.shape, self.tgtShape) + + out = func(low[0], high, endpoint=endpoint, dtype=dt) + assert_equal(out.shape, self.tgtShape) + + out = func(low, high[0], endpoint=endpoint, dtype=dt) + assert_equal(out.shape, self.tgtShape) + + def test_three_arg_funcs(self): + funcs = [random.noncentral_f, random.triangular, + random.hypergeometric] + + for func in funcs: + out = func(self.argOne, self.argTwo, self.argThree) + assert_equal(out.shape, self.tgtShape) + + out = func(self.argOne[0], self.argTwo, self.argThree) + assert_equal(out.shape, self.tgtShape) + + out = func(self.argOne, self.argTwo[0], self.argThree) + assert_equal(out.shape, self.tgtShape) + + +@pytest.mark.parametrize("config", JUMP_TEST_DATA) +def test_jumped(config): + # Each config contains the initial seed, a number of raw steps + # the sha256 hashes of the initial and the final states' keys and + # the position of the initial and the final state. + # These were produced using the original C implementation. + seed = config["seed"] + steps = config["steps"] + + mt19937 = MT19937(seed) + # Burn step + mt19937.random_raw(steps) + key = mt19937.state["state"]["key"] + if sys.byteorder == 'big': + key = key.byteswap() + sha256 = hashlib.sha256(key) + assert mt19937.state["state"]["pos"] == config["initial"]["pos"] + assert sha256.hexdigest() == config["initial"]["key_sha256"] + + jumped = mt19937.jumped() + key = jumped.state["state"]["key"] + if sys.byteorder == 'big': + key = key.byteswap() + sha256 = hashlib.sha256(key) + assert jumped.state["state"]["pos"] == config["jumped"]["pos"] + assert sha256.hexdigest() == config["jumped"]["key_sha256"] + + +def test_broadcast_size_error(): + mu = np.ones(3) + sigma = np.ones((4, 3)) + size = (10, 4, 2) + assert random.normal(mu, sigma, size=(5, 4, 3)).shape == (5, 4, 3) + with pytest.raises(ValueError): + random.normal(mu, sigma, size=size) + with pytest.raises(ValueError): + random.normal(mu, sigma, size=(1, 3)) + with pytest.raises(ValueError): + random.normal(mu, sigma, size=(4, 1, 1)) + # 1 arg + shape = np.ones((4, 3)) + with pytest.raises(ValueError): + random.standard_gamma(shape, size=size) + with pytest.raises(ValueError): + random.standard_gamma(shape, size=(3,)) + with pytest.raises(ValueError): + random.standard_gamma(shape, size=3) + # Check out + out = np.empty(size) + with pytest.raises(ValueError): + random.standard_gamma(shape, out=out) + + # 2 arg + with pytest.raises(ValueError): + random.binomial(1, [0.3, 0.7], size=(2, 1)) + with pytest.raises(ValueError): + random.binomial([1, 2], 0.3, size=(2, 1)) + with pytest.raises(ValueError): + random.binomial([1, 2], [0.3, 0.7], size=(2, 1)) + with pytest.raises(ValueError): + random.multinomial([2, 2], [.3, .7], size=(2, 1)) + + # 3 arg + a = random.chisquare(5, size=3) + b = random.chisquare(5, size=(4, 3)) + c = random.chisquare(5, size=(5, 4, 3)) + assert random.noncentral_f(a, b, c).shape == (5, 4, 3) + with pytest.raises(ValueError, match=r"Output size \(6, 5, 1, 1\) is"): + random.noncentral_f(a, b, c, size=(6, 5, 1, 1)) + + +def test_broadcast_size_scalar(): + mu = np.ones(3) + sigma = np.ones(3) + random.normal(mu, sigma, size=3) + with pytest.raises(ValueError): + random.normal(mu, sigma, size=2) + + +def test_ragged_shuffle(): + # GH 18142 + seq = [[], [], 1] + gen = Generator(MT19937(0)) + assert_no_warnings(gen.shuffle, seq) + assert seq == [1, [], []] + + +@pytest.mark.parametrize("high", [-2, [-2]]) +@pytest.mark.parametrize("endpoint", [True, False]) +def test_single_arg_integer_exception(high, endpoint): + # GH 14333 + gen = Generator(MT19937(0)) + msg = 'high < 0' if endpoint else 'high <= 0' + with pytest.raises(ValueError, match=msg): + gen.integers(high, endpoint=endpoint) + msg = 'low > high' if endpoint else 'low >= high' + with pytest.raises(ValueError, match=msg): + gen.integers(-1, high, endpoint=endpoint) + with pytest.raises(ValueError, match=msg): + gen.integers([-1], high, endpoint=endpoint) + + +@pytest.mark.parametrize("dtype", ["f4", "f8"]) +def test_c_contig_req_out(dtype): + # GH 18704 + out = np.empty((2, 3), order="F", dtype=dtype) + shape = [1, 2, 3] + with pytest.raises(ValueError, match="Supplied output array"): + random.standard_gamma(shape, out=out, dtype=dtype) + with pytest.raises(ValueError, match="Supplied output array"): + random.standard_gamma(shape, out=out, size=out.shape, dtype=dtype) + + +@pytest.mark.parametrize("dtype", ["f4", "f8"]) +@pytest.mark.parametrize("order", ["F", "C"]) +@pytest.mark.parametrize("dist", [random.standard_normal, random.random]) +def test_contig_req_out(dist, order, dtype): + # GH 18704 + out = np.empty((2, 3), dtype=dtype, order=order) + variates = dist(out=out, dtype=dtype) + assert variates is out + variates = dist(out=out, dtype=dtype, size=out.shape) + assert variates is out + + +def test_generator_ctor_old_style_pickle(): + rg = np.random.Generator(np.random.PCG64DXSM(0)) + rg.standard_normal(1) + # Directly call reduce which is used in pickling + ctor, (bit_gen, ), _ = rg.__reduce__() + # Simulate unpickling an old pickle that only has the name + assert bit_gen.__class__.__name__ == "PCG64DXSM" + print(ctor) + b = ctor(*("PCG64DXSM",)) + print(b) + b.bit_generator.state = bit_gen.state + state_b = b.bit_generator.state + assert bit_gen.state == state_b + + +def test_pickle_preserves_seed_sequence(): + # GH 26234 + # Add explicit test that bit generators preserve seed sequences + import pickle + + rg = np.random.Generator(np.random.PCG64DXSM(20240411)) + ss = rg.bit_generator.seed_seq + rg_plk = pickle.loads(pickle.dumps(rg)) + ss_plk = rg_plk.bit_generator.seed_seq + assert_equal(ss.state, ss_plk.state) + assert_equal(ss.pool, ss_plk.pool) + + rg.bit_generator.seed_seq.spawn(10) + rg_plk = pickle.loads(pickle.dumps(rg)) + ss_plk = rg_plk.bit_generator.seed_seq + assert_equal(ss.state, ss_plk.state) + + +@pytest.mark.parametrize("version", [121, 126]) +def test_legacy_pickle(version): + # Pickling format was changes in 1.22.x and in 2.0.x + import gzip + import pickle + + base_path = os.path.split(os.path.abspath(__file__))[0] + pkl_file = os.path.join( + base_path, "data", f"generator_pcg64_np{version}.pkl.gz" + ) + with gzip.open(pkl_file) as gz: + rg = pickle.load(gz) + state = rg.bit_generator.state['state'] + + assert isinstance(rg, Generator) + assert isinstance(rg.bit_generator, np.random.PCG64) + assert state['state'] == 35399562948360463058890781895381311971 + assert state['inc'] == 87136372517582989555478159403783844777 diff --git a/.venv/lib/python3.12/site-packages/numpy/random/tests/test_generator_mt19937_regressions.py b/.venv/lib/python3.12/site-packages/numpy/random/tests/test_generator_mt19937_regressions.py new file mode 100644 index 0000000..abfacb8 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/random/tests/test_generator_mt19937_regressions.py @@ -0,0 +1,207 @@ +import pytest + +import numpy as np +from numpy.random import MT19937, Generator +from numpy.testing import assert_, assert_array_equal + + +class TestRegression: + + def setup_method(self): + self.mt19937 = Generator(MT19937(121263137472525314065)) + + def test_vonmises_range(self): + # Make sure generated random variables are in [-pi, pi]. + # Regression test for ticket #986. + for mu in np.linspace(-7., 7., 5): + r = self.mt19937.vonmises(mu, 1, 50) + assert_(np.all(r > -np.pi) and np.all(r <= np.pi)) + + def test_hypergeometric_range(self): + # Test for ticket #921 + assert_(np.all(self.mt19937.hypergeometric(3, 18, 11, size=10) < 4)) + assert_(np.all(self.mt19937.hypergeometric(18, 3, 11, size=10) > 0)) + + # Test for ticket #5623 + args = (2**20 - 2, 2**20 - 2, 2**20 - 2) # Check for 32-bit systems + assert_(self.mt19937.hypergeometric(*args) > 0) + + def test_logseries_convergence(self): + # Test for ticket #923 + N = 1000 + rvsn = self.mt19937.logseries(0.8, size=N) + # these two frequency counts should be close to theoretical + # numbers with this large sample + # theoretical large N result is 0.49706795 + freq = np.sum(rvsn == 1) / N + msg = f'Frequency was {freq:f}, should be > 0.45' + assert_(freq > 0.45, msg) + # theoretical large N result is 0.19882718 + freq = np.sum(rvsn == 2) / N + msg = f'Frequency was {freq:f}, should be < 0.23' + assert_(freq < 0.23, msg) + + def test_shuffle_mixed_dimension(self): + # Test for trac ticket #2074 + for t in [[1, 2, 3, None], + [(1, 1), (2, 2), (3, 3), None], + [1, (2, 2), (3, 3), None], + [(1, 1), 2, 3, None]]: + mt19937 = Generator(MT19937(12345)) + shuffled = np.array(t, dtype=object) + mt19937.shuffle(shuffled) + expected = np.array([t[2], t[0], t[3], t[1]], dtype=object) + assert_array_equal(np.array(shuffled, dtype=object), expected) + + def test_call_within_randomstate(self): + # Check that custom BitGenerator does not call into global state + res = np.array([1, 8, 0, 1, 5, 3, 3, 8, 1, 4]) + for i in range(3): + mt19937 = Generator(MT19937(i)) + m = Generator(MT19937(4321)) + # If m.state is not honored, the result will change + assert_array_equal(m.choice(10, size=10, p=np.ones(10) / 10.), res) + + def test_multivariate_normal_size_types(self): + # Test for multivariate_normal issue with 'size' argument. + # Check that the multivariate_normal size argument can be a + # numpy integer. + self.mt19937.multivariate_normal([0], [[0]], size=1) + self.mt19937.multivariate_normal([0], [[0]], size=np.int_(1)) + self.mt19937.multivariate_normal([0], [[0]], size=np.int64(1)) + + def test_beta_small_parameters(self): + # Test that beta with small a and b parameters does not produce + # NaNs due to roundoff errors causing 0 / 0, gh-5851 + x = self.mt19937.beta(0.0001, 0.0001, size=100) + assert_(not np.any(np.isnan(x)), 'Nans in mt19937.beta') + + def test_beta_very_small_parameters(self): + # gh-24203: beta would hang with very small parameters. + self.mt19937.beta(1e-49, 1e-40) + + def test_beta_ridiculously_small_parameters(self): + # gh-24266: beta would generate nan when the parameters + # were subnormal or a small multiple of the smallest normal. + tiny = np.finfo(1.0).tiny + x = self.mt19937.beta(tiny / 32, tiny / 40, size=50) + assert not np.any(np.isnan(x)) + + def test_beta_expected_zero_frequency(self): + # gh-24475: For small a and b (e.g. a=0.0025, b=0.0025), beta + # would generate too many zeros. + a = 0.0025 + b = 0.0025 + n = 1000000 + x = self.mt19937.beta(a, b, size=n) + nzeros = np.count_nonzero(x == 0) + # beta CDF at x = np.finfo(np.double).smallest_subnormal/2 + # is p = 0.0776169083131899, e.g, + # + # import numpy as np + # from mpmath import mp + # mp.dps = 160 + # x = mp.mpf(np.finfo(np.float64).smallest_subnormal)/2 + # # CDF of the beta distribution at x: + # p = mp.betainc(a, b, x1=0, x2=x, regularized=True) + # n = 1000000 + # exprected_freq = float(n*p) + # + expected_freq = 77616.90831318991 + assert 0.95 * expected_freq < nzeros < 1.05 * expected_freq + + def test_choice_sum_of_probs_tolerance(self): + # The sum of probs should be 1.0 with some tolerance. + # For low precision dtypes the tolerance was too tight. + # See numpy github issue 6123. + a = [1, 2, 3] + counts = [4, 4, 2] + for dt in np.float16, np.float32, np.float64: + probs = np.array(counts, dtype=dt) / sum(counts) + c = self.mt19937.choice(a, p=probs) + assert_(c in a) + with pytest.raises(ValueError): + self.mt19937.choice(a, p=probs * 0.9) + + def test_shuffle_of_array_of_different_length_strings(self): + # Test that permuting an array of different length strings + # will not cause a segfault on garbage collection + # Tests gh-7710 + + a = np.array(['a', 'a' * 1000]) + + for _ in range(100): + self.mt19937.shuffle(a) + + # Force Garbage Collection - should not segfault. + import gc + gc.collect() + + def test_shuffle_of_array_of_objects(self): + # Test that permuting an array of objects will not cause + # a segfault on garbage collection. + # See gh-7719 + a = np.array([np.arange(1), np.arange(4)], dtype=object) + + for _ in range(1000): + self.mt19937.shuffle(a) + + # Force Garbage Collection - should not segfault. + import gc + gc.collect() + + def test_permutation_subclass(self): + + class N(np.ndarray): + pass + + mt19937 = Generator(MT19937(1)) + orig = np.arange(3).view(N) + perm = mt19937.permutation(orig) + assert_array_equal(perm, np.array([2, 0, 1])) + assert_array_equal(orig, np.arange(3).view(N)) + + class M: + a = np.arange(5) + + def __array__(self, dtype=None, copy=None): + return self.a + + mt19937 = Generator(MT19937(1)) + m = M() + perm = mt19937.permutation(m) + assert_array_equal(perm, np.array([4, 1, 3, 0, 2])) + assert_array_equal(m.__array__(), np.arange(5)) + + def test_gamma_0(self): + assert self.mt19937.standard_gamma(0.0) == 0.0 + assert_array_equal(self.mt19937.standard_gamma([0.0]), 0.0) + + actual = self.mt19937.standard_gamma([0.0], dtype='float') + expected = np.array([0.], dtype=np.float32) + assert_array_equal(actual, expected) + + def test_geometric_tiny_prob(self): + # Regression test for gh-17007. + # When p = 1e-30, the probability that a sample will exceed 2**63-1 + # is 0.9999999999907766, so we expect the result to be all 2**63-1. + assert_array_equal(self.mt19937.geometric(p=1e-30, size=3), + np.iinfo(np.int64).max) + + def test_zipf_large_parameter(self): + # Regression test for part of gh-9829: a call such as rng.zipf(10000) + # would hang. + n = 8 + sample = self.mt19937.zipf(10000, size=n) + assert_array_equal(sample, np.ones(n, dtype=np.int64)) + + def test_zipf_a_near_1(self): + # Regression test for gh-9829: a call such as rng.zipf(1.0000000000001) + # would hang. + n = 100000 + sample = self.mt19937.zipf(1.0000000000001, size=n) + # Not much of a test, but let's do something more than verify that + # it doesn't hang. Certainly for a monotonically decreasing + # discrete distribution truncated to signed 64 bit integers, more + # than half should be less than 2**62. + assert np.count_nonzero(sample < 2**62) > n / 2 diff --git a/.venv/lib/python3.12/site-packages/numpy/random/tests/test_random.py b/.venv/lib/python3.12/site-packages/numpy/random/tests/test_random.py new file mode 100644 index 0000000..d598190 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/random/tests/test_random.py @@ -0,0 +1,1757 @@ +import sys +import warnings + +import pytest + +import numpy as np +from numpy import random +from numpy.testing import ( + IS_WASM, + assert_, + assert_array_almost_equal, + assert_array_equal, + assert_equal, + assert_no_warnings, + assert_raises, + assert_warns, + suppress_warnings, +) + + +class TestSeed: + def test_scalar(self): + s = np.random.RandomState(0) + assert_equal(s.randint(1000), 684) + s = np.random.RandomState(4294967295) + assert_equal(s.randint(1000), 419) + + def test_array(self): + s = np.random.RandomState(range(10)) + assert_equal(s.randint(1000), 468) + s = np.random.RandomState(np.arange(10)) + assert_equal(s.randint(1000), 468) + s = np.random.RandomState([0]) + assert_equal(s.randint(1000), 973) + s = np.random.RandomState([4294967295]) + assert_equal(s.randint(1000), 265) + + def test_invalid_scalar(self): + # seed must be an unsigned 32 bit integer + assert_raises(TypeError, np.random.RandomState, -0.5) + assert_raises(ValueError, np.random.RandomState, -1) + + def test_invalid_array(self): + # seed must be an unsigned 32 bit integer + assert_raises(TypeError, np.random.RandomState, [-0.5]) + assert_raises(ValueError, np.random.RandomState, [-1]) + assert_raises(ValueError, np.random.RandomState, [4294967296]) + assert_raises(ValueError, np.random.RandomState, [1, 2, 4294967296]) + assert_raises(ValueError, np.random.RandomState, [1, -2, 4294967296]) + + def test_invalid_array_shape(self): + # gh-9832 + assert_raises(ValueError, np.random.RandomState, + np.array([], dtype=np.int64)) + assert_raises(ValueError, np.random.RandomState, [[1, 2, 3]]) + assert_raises(ValueError, np.random.RandomState, [[1, 2, 3], + [4, 5, 6]]) + + +class TestBinomial: + def test_n_zero(self): + # Tests the corner case of n == 0 for the binomial distribution. + # binomial(0, p) should be zero for any p in [0, 1]. + # This test addresses issue #3480. + zeros = np.zeros(2, dtype='int') + for p in [0, .5, 1]: + assert_(random.binomial(0, p) == 0) + assert_array_equal(random.binomial(zeros, p), zeros) + + def test_p_is_nan(self): + # Issue #4571. + assert_raises(ValueError, random.binomial, 1, np.nan) + + +class TestMultinomial: + def test_basic(self): + random.multinomial(100, [0.2, 0.8]) + + def test_zero_probability(self): + random.multinomial(100, [0.2, 0.8, 0.0, 0.0, 0.0]) + + def test_int_negative_interval(self): + assert_(-5 <= random.randint(-5, -1) < -1) + x = random.randint(-5, -1, 5) + assert_(np.all(-5 <= x)) + assert_(np.all(x < -1)) + + def test_size(self): + # gh-3173 + p = [0.5, 0.5] + assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2)) + assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2)) + assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2)) + assert_equal(np.random.multinomial(1, p, [2, 2]).shape, (2, 2, 2)) + assert_equal(np.random.multinomial(1, p, (2, 2)).shape, (2, 2, 2)) + assert_equal(np.random.multinomial(1, p, np.array((2, 2))).shape, + (2, 2, 2)) + + assert_raises(TypeError, np.random.multinomial, 1, p, + float(1)) + + def test_multidimensional_pvals(self): + assert_raises(ValueError, np.random.multinomial, 10, [[0, 1]]) + assert_raises(ValueError, np.random.multinomial, 10, [[0], [1]]) + assert_raises(ValueError, np.random.multinomial, 10, [[[0], [1]], [[1], [0]]]) + assert_raises(ValueError, np.random.multinomial, 10, np.array([[0, 1], [1, 0]])) + + +class TestSetState: + def setup_method(self): + self.seed = 1234567890 + self.prng = random.RandomState(self.seed) + self.state = self.prng.get_state() + + def test_basic(self): + old = self.prng.tomaxint(16) + self.prng.set_state(self.state) + new = self.prng.tomaxint(16) + assert_(np.all(old == new)) + + def test_gaussian_reset(self): + # Make sure the cached every-other-Gaussian is reset. + old = self.prng.standard_normal(size=3) + self.prng.set_state(self.state) + new = self.prng.standard_normal(size=3) + assert_(np.all(old == new)) + + def test_gaussian_reset_in_media_res(self): + # When the state is saved with a cached Gaussian, make sure the + # cached Gaussian is restored. + + self.prng.standard_normal() + state = self.prng.get_state() + old = self.prng.standard_normal(size=3) + self.prng.set_state(state) + new = self.prng.standard_normal(size=3) + assert_(np.all(old == new)) + + def test_backwards_compatibility(self): + # Make sure we can accept old state tuples that do not have the + # cached Gaussian value. + old_state = self.state[:-2] + x1 = self.prng.standard_normal(size=16) + self.prng.set_state(old_state) + x2 = self.prng.standard_normal(size=16) + self.prng.set_state(self.state) + x3 = self.prng.standard_normal(size=16) + assert_(np.all(x1 == x2)) + assert_(np.all(x1 == x3)) + + def test_negative_binomial(self): + # Ensure that the negative binomial results take floating point + # arguments without truncation. + self.prng.negative_binomial(0.5, 0.5) + + def test_set_invalid_state(self): + # gh-25402 + with pytest.raises(IndexError): + self.prng.set_state(()) + + +class TestRandint: + + rfunc = np.random.randint + + # valid integer/boolean types + itype = [np.bool, np.int8, np.uint8, np.int16, np.uint16, + np.int32, np.uint32, np.int64, np.uint64] + + def test_unsupported_type(self): + assert_raises(TypeError, self.rfunc, 1, dtype=float) + + def test_bounds_checking(self): + for dt in self.itype: + lbnd = 0 if dt is np.bool else np.iinfo(dt).min + ubnd = 2 if dt is np.bool else np.iinfo(dt).max + 1 + assert_raises(ValueError, self.rfunc, lbnd - 1, ubnd, dtype=dt) + assert_raises(ValueError, self.rfunc, lbnd, ubnd + 1, dtype=dt) + assert_raises(ValueError, self.rfunc, ubnd, lbnd, dtype=dt) + assert_raises(ValueError, self.rfunc, 1, 0, dtype=dt) + + def test_rng_zero_and_extremes(self): + for dt in self.itype: + lbnd = 0 if dt is np.bool else np.iinfo(dt).min + ubnd = 2 if dt is np.bool else np.iinfo(dt).max + 1 + + tgt = ubnd - 1 + assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt) + + tgt = lbnd + assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt) + + tgt = (lbnd + ubnd) // 2 + assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt) + + def test_full_range(self): + # Test for ticket #1690 + + for dt in self.itype: + lbnd = 0 if dt is np.bool else np.iinfo(dt).min + ubnd = 2 if dt is np.bool else np.iinfo(dt).max + 1 + + try: + self.rfunc(lbnd, ubnd, dtype=dt) + except Exception as e: + raise AssertionError("No error should have been raised, " + "but one was with the following " + "message:\n\n%s" % str(e)) + + def test_in_bounds_fuzz(self): + # Don't use fixed seed + np.random.seed() + + for dt in self.itype[1:]: + for ubnd in [4, 8, 16]: + vals = self.rfunc(2, ubnd, size=2**16, dtype=dt) + assert_(vals.max() < ubnd) + assert_(vals.min() >= 2) + + vals = self.rfunc(0, 2, size=2**16, dtype=np.bool) + + assert_(vals.max() < 2) + assert_(vals.min() >= 0) + + def test_repeatability(self): + import hashlib + # We use a sha256 hash of generated sequences of 1000 samples + # in the range [0, 6) for all but bool, where the range + # is [0, 2). Hashes are for little endian numbers. + tgt = {'bool': '509aea74d792fb931784c4b0135392c65aec64beee12b0cc167548a2c3d31e71', # noqa: E501 + 'int16': '7b07f1a920e46f6d0fe02314155a2330bcfd7635e708da50e536c5ebb631a7d4', # noqa: E501 + 'int32': 'e577bfed6c935de944424667e3da285012e741892dcb7051a8f1ce68ab05c92f', # noqa: E501 + 'int64': '0fbead0b06759df2cfb55e43148822d4a1ff953c7eb19a5b08445a63bb64fa9e', # noqa: E501 + 'int8': '001aac3a5acb935a9b186cbe14a1ca064b8bb2dd0b045d48abeacf74d0203404', # noqa: E501 + 'uint16': '7b07f1a920e46f6d0fe02314155a2330bcfd7635e708da50e536c5ebb631a7d4', # noqa: E501 + 'uint32': 'e577bfed6c935de944424667e3da285012e741892dcb7051a8f1ce68ab05c92f', # noqa: E501 + 'uint64': '0fbead0b06759df2cfb55e43148822d4a1ff953c7eb19a5b08445a63bb64fa9e', # noqa: E501 + 'uint8': '001aac3a5acb935a9b186cbe14a1ca064b8bb2dd0b045d48abeacf74d0203404'} # noqa: E501 + + for dt in self.itype[1:]: + np.random.seed(1234) + + # view as little endian for hash + if sys.byteorder == 'little': + val = self.rfunc(0, 6, size=1000, dtype=dt) + else: + val = self.rfunc(0, 6, size=1000, dtype=dt).byteswap() + + res = hashlib.sha256(val.view(np.int8)).hexdigest() + assert_(tgt[np.dtype(dt).name] == res) + + # bools do not depend on endianness + np.random.seed(1234) + val = self.rfunc(0, 2, size=1000, dtype=bool).view(np.int8) + res = hashlib.sha256(val).hexdigest() + assert_(tgt[np.dtype(bool).name] == res) + + def test_int64_uint64_corner_case(self): + # When stored in Numpy arrays, `lbnd` is casted + # as np.int64, and `ubnd` is casted as np.uint64. + # Checking whether `lbnd` >= `ubnd` used to be + # done solely via direct comparison, which is incorrect + # because when Numpy tries to compare both numbers, + # it casts both to np.float64 because there is + # no integer superset of np.int64 and np.uint64. However, + # `ubnd` is too large to be represented in np.float64, + # causing it be round down to np.iinfo(np.int64).max, + # leading to a ValueError because `lbnd` now equals + # the new `ubnd`. + + dt = np.int64 + tgt = np.iinfo(np.int64).max + lbnd = np.int64(np.iinfo(np.int64).max) + ubnd = np.uint64(np.iinfo(np.int64).max + 1) + + # None of these function calls should + # generate a ValueError now. + actual = np.random.randint(lbnd, ubnd, dtype=dt) + assert_equal(actual, tgt) + + def test_respect_dtype_singleton(self): + # See gh-7203 + for dt in self.itype: + lbnd = 0 if dt is np.bool else np.iinfo(dt).min + ubnd = 2 if dt is np.bool else np.iinfo(dt).max + 1 + + sample = self.rfunc(lbnd, ubnd, dtype=dt) + assert_equal(sample.dtype, np.dtype(dt)) + + for dt in (bool, int): + # The legacy rng uses "long" as the default integer: + lbnd = 0 if dt is bool else np.iinfo("long").min + ubnd = 2 if dt is bool else np.iinfo("long").max + 1 + + # gh-7284: Ensure that we get Python data types + sample = self.rfunc(lbnd, ubnd, dtype=dt) + assert_(not hasattr(sample, 'dtype')) + assert_equal(type(sample), dt) + + +class TestRandomDist: + # Make sure the random distribution returns the correct value for a + # given seed + + def setup_method(self): + self.seed = 1234567890 + + def test_rand(self): + np.random.seed(self.seed) + actual = np.random.rand(3, 2) + desired = np.array([[0.61879477158567997, 0.59162362775974664], + [0.88868358904449662, 0.89165480011560816], + [0.4575674820298663, 0.7781880808593471]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_randn(self): + np.random.seed(self.seed) + actual = np.random.randn(3, 2) + desired = np.array([[1.34016345771863121, 1.73759122771936081], + [1.498988344300628, -0.2286433324536169], + [2.031033998682787, 2.17032494605655257]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_randint(self): + np.random.seed(self.seed) + actual = np.random.randint(-99, 99, size=(3, 2)) + desired = np.array([[31, 3], + [-52, 41], + [-48, -66]]) + assert_array_equal(actual, desired) + + def test_random_integers(self): + np.random.seed(self.seed) + with suppress_warnings() as sup: + w = sup.record(DeprecationWarning) + actual = np.random.random_integers(-99, 99, size=(3, 2)) + assert_(len(w) == 1) + desired = np.array([[31, 3], + [-52, 41], + [-48, -66]]) + assert_array_equal(actual, desired) + + def test_random_integers_max_int(self): + # Tests whether random_integers can generate the + # maximum allowed Python int that can be converted + # into a C long. Previous implementations of this + # method have thrown an OverflowError when attempting + # to generate this integer. + with suppress_warnings() as sup: + w = sup.record(DeprecationWarning) + actual = np.random.random_integers(np.iinfo('l').max, + np.iinfo('l').max) + assert_(len(w) == 1) + + desired = np.iinfo('l').max + assert_equal(actual, desired) + + def test_random_integers_deprecated(self): + with warnings.catch_warnings(): + warnings.simplefilter("error", DeprecationWarning) + + # DeprecationWarning raised with high == None + assert_raises(DeprecationWarning, + np.random.random_integers, + np.iinfo('l').max) + + # DeprecationWarning raised with high != None + assert_raises(DeprecationWarning, + np.random.random_integers, + np.iinfo('l').max, np.iinfo('l').max) + + def test_random(self): + np.random.seed(self.seed) + actual = np.random.random((3, 2)) + desired = np.array([[0.61879477158567997, 0.59162362775974664], + [0.88868358904449662, 0.89165480011560816], + [0.4575674820298663, 0.7781880808593471]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_choice_uniform_replace(self): + np.random.seed(self.seed) + actual = np.random.choice(4, 4) + desired = np.array([2, 3, 2, 3]) + assert_array_equal(actual, desired) + + def test_choice_nonuniform_replace(self): + np.random.seed(self.seed) + actual = np.random.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1]) + desired = np.array([1, 1, 2, 2]) + assert_array_equal(actual, desired) + + def test_choice_uniform_noreplace(self): + np.random.seed(self.seed) + actual = np.random.choice(4, 3, replace=False) + desired = np.array([0, 1, 3]) + assert_array_equal(actual, desired) + + def test_choice_nonuniform_noreplace(self): + np.random.seed(self.seed) + actual = np.random.choice(4, 3, replace=False, + p=[0.1, 0.3, 0.5, 0.1]) + desired = np.array([2, 3, 1]) + assert_array_equal(actual, desired) + + def test_choice_noninteger(self): + np.random.seed(self.seed) + actual = np.random.choice(['a', 'b', 'c', 'd'], 4) + desired = np.array(['c', 'd', 'c', 'd']) + assert_array_equal(actual, desired) + + def test_choice_exceptions(self): + sample = np.random.choice + assert_raises(ValueError, sample, -1, 3) + assert_raises(ValueError, sample, 3., 3) + assert_raises(ValueError, sample, [[1, 2], [3, 4]], 3) + assert_raises(ValueError, sample, [], 3) + assert_raises(ValueError, sample, [1, 2, 3, 4], 3, + p=[[0.25, 0.25], [0.25, 0.25]]) + assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4, 0.2]) + assert_raises(ValueError, sample, [1, 2], 3, p=[1.1, -0.1]) + assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4]) + assert_raises(ValueError, sample, [1, 2, 3], 4, replace=False) + # gh-13087 + assert_raises(ValueError, sample, [1, 2, 3], -2, replace=False) + assert_raises(ValueError, sample, [1, 2, 3], (-1,), replace=False) + assert_raises(ValueError, sample, [1, 2, 3], (-1, 1), replace=False) + assert_raises(ValueError, sample, [1, 2, 3], 2, + replace=False, p=[1, 0, 0]) + + def test_choice_return_shape(self): + p = [0.1, 0.9] + # Check scalar + assert_(np.isscalar(np.random.choice(2, replace=True))) + assert_(np.isscalar(np.random.choice(2, replace=False))) + assert_(np.isscalar(np.random.choice(2, replace=True, p=p))) + assert_(np.isscalar(np.random.choice(2, replace=False, p=p))) + assert_(np.isscalar(np.random.choice([1, 2], replace=True))) + assert_(np.random.choice([None], replace=True) is None) + a = np.array([1, 2]) + arr = np.empty(1, dtype=object) + arr[0] = a + assert_(np.random.choice(arr, replace=True) is a) + + # Check 0-d array + s = () + assert_(not np.isscalar(np.random.choice(2, s, replace=True))) + assert_(not np.isscalar(np.random.choice(2, s, replace=False))) + assert_(not np.isscalar(np.random.choice(2, s, replace=True, p=p))) + assert_(not np.isscalar(np.random.choice(2, s, replace=False, p=p))) + assert_(not np.isscalar(np.random.choice([1, 2], s, replace=True))) + assert_(np.random.choice([None], s, replace=True).ndim == 0) + a = np.array([1, 2]) + arr = np.empty(1, dtype=object) + arr[0] = a + assert_(np.random.choice(arr, s, replace=True).item() is a) + + # Check multi dimensional array + s = (2, 3) + p = [0.1, 0.1, 0.1, 0.1, 0.4, 0.2] + assert_equal(np.random.choice(6, s, replace=True).shape, s) + assert_equal(np.random.choice(6, s, replace=False).shape, s) + assert_equal(np.random.choice(6, s, replace=True, p=p).shape, s) + assert_equal(np.random.choice(6, s, replace=False, p=p).shape, s) + assert_equal(np.random.choice(np.arange(6), s, replace=True).shape, s) + + # Check zero-size + assert_equal(np.random.randint(0, 0, size=(3, 0, 4)).shape, (3, 0, 4)) + assert_equal(np.random.randint(0, -10, size=0).shape, (0,)) + assert_equal(np.random.randint(10, 10, size=0).shape, (0,)) + assert_equal(np.random.choice(0, size=0).shape, (0,)) + assert_equal(np.random.choice([], size=(0,)).shape, (0,)) + assert_equal(np.random.choice(['a', 'b'], size=(3, 0, 4)).shape, + (3, 0, 4)) + assert_raises(ValueError, np.random.choice, [], 10) + + def test_choice_nan_probabilities(self): + a = np.array([42, 1, 2]) + p = [None, None, None] + assert_raises(ValueError, np.random.choice, a, p=p) + + def test_bytes(self): + np.random.seed(self.seed) + actual = np.random.bytes(10) + desired = b'\x82Ui\x9e\xff\x97+Wf\xa5' + assert_equal(actual, desired) + + def test_shuffle(self): + # Test lists, arrays (of various dtypes), and multidimensional versions + # of both, c-contiguous or not: + for conv in [lambda x: np.array([]), + lambda x: x, + lambda x: np.asarray(x).astype(np.int8), + lambda x: np.asarray(x).astype(np.float32), + lambda x: np.asarray(x).astype(np.complex64), + lambda x: np.asarray(x).astype(object), + lambda x: [(i, i) for i in x], + lambda x: np.asarray([[i, i] for i in x]), + lambda x: np.vstack([x, x]).T, + # gh-11442 + lambda x: (np.asarray([(i, i) for i in x], + [("a", int), ("b", int)]) + .view(np.recarray)), + # gh-4270 + lambda x: np.asarray([(i, i) for i in x], + [("a", object), ("b", np.int32)])]: + np.random.seed(self.seed) + alist = conv([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]) + np.random.shuffle(alist) + actual = alist + desired = conv([0, 1, 9, 6, 2, 4, 5, 8, 7, 3]) + assert_array_equal(actual, desired) + + def test_shuffle_masked(self): + # gh-3263 + a = np.ma.masked_values(np.reshape(range(20), (5, 4)) % 3 - 1, -1) + b = np.ma.masked_values(np.arange(20) % 3 - 1, -1) + a_orig = a.copy() + b_orig = b.copy() + for i in range(50): + np.random.shuffle(a) + assert_equal( + sorted(a.data[~a.mask]), sorted(a_orig.data[~a_orig.mask])) + np.random.shuffle(b) + assert_equal( + sorted(b.data[~b.mask]), sorted(b_orig.data[~b_orig.mask])) + + @pytest.mark.parametrize("random", + [np.random, np.random.RandomState(), np.random.default_rng()]) + def test_shuffle_untyped_warning(self, random): + # Create a dict works like a sequence but isn't one + values = {0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6} + with pytest.warns(UserWarning, + match="you are shuffling a 'dict' object") as rec: + random.shuffle(values) + assert "test_random" in rec[0].filename + + @pytest.mark.parametrize("random", + [np.random, np.random.RandomState(), np.random.default_rng()]) + @pytest.mark.parametrize("use_array_like", [True, False]) + def test_shuffle_no_object_unpacking(self, random, use_array_like): + class MyArr(np.ndarray): + pass + + items = [ + None, np.array([3]), np.float64(3), np.array(10), np.float64(7) + ] + arr = np.array(items, dtype=object) + item_ids = {id(i) for i in items} + if use_array_like: + arr = arr.view(MyArr) + + # The array was created fine, and did not modify any objects: + assert all(id(i) in item_ids for i in arr) + + if use_array_like and not isinstance(random, np.random.Generator): + # The old API gives incorrect results, but warns about it. + with pytest.warns(UserWarning, + match="Shuffling a one dimensional array.*"): + random.shuffle(arr) + else: + random.shuffle(arr) + assert all(id(i) in item_ids for i in arr) + + def test_shuffle_memoryview(self): + # gh-18273 + # allow graceful handling of memoryviews + # (treat the same as arrays) + np.random.seed(self.seed) + a = np.arange(5).data + np.random.shuffle(a) + assert_equal(np.asarray(a), [0, 1, 4, 3, 2]) + rng = np.random.RandomState(self.seed) + rng.shuffle(a) + assert_equal(np.asarray(a), [0, 1, 2, 3, 4]) + rng = np.random.default_rng(self.seed) + rng.shuffle(a) + assert_equal(np.asarray(a), [4, 1, 0, 3, 2]) + + def test_shuffle_not_writeable(self): + a = np.zeros(3) + a.flags.writeable = False + with pytest.raises(ValueError, match='read-only'): + np.random.shuffle(a) + + def test_beta(self): + np.random.seed(self.seed) + actual = np.random.beta(.1, .9, size=(3, 2)) + desired = np.array( + [[1.45341850513746058e-02, 5.31297615662868145e-04], + [1.85366619058432324e-06, 4.19214516800110563e-03], + [1.58405155108498093e-04, 1.26252891949397652e-04]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_binomial(self): + np.random.seed(self.seed) + actual = np.random.binomial(100, .456, size=(3, 2)) + desired = np.array([[37, 43], + [42, 48], + [46, 45]]) + assert_array_equal(actual, desired) + + def test_chisquare(self): + np.random.seed(self.seed) + actual = np.random.chisquare(50, size=(3, 2)) + desired = np.array([[63.87858175501090585, 68.68407748911370447], + [65.77116116901505904, 47.09686762438974483], + [72.3828403199695174, 74.18408615260374006]]) + assert_array_almost_equal(actual, desired, decimal=13) + + def test_dirichlet(self): + np.random.seed(self.seed) + alpha = np.array([51.72840233779265162, 39.74494232180943953]) + actual = np.random.mtrand.dirichlet(alpha, size=(3, 2)) + desired = np.array([[[0.54539444573611562, 0.45460555426388438], + [0.62345816822039413, 0.37654183177960598]], + [[0.55206000085785778, 0.44793999914214233], + [0.58964023305154301, 0.41035976694845688]], + [[0.59266909280647828, 0.40733090719352177], + [0.56974431743975207, 0.43025568256024799]]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_dirichlet_size(self): + # gh-3173 + p = np.array([51.72840233779265162, 39.74494232180943953]) + assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2)) + assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2)) + assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2)) + assert_equal(np.random.dirichlet(p, [2, 2]).shape, (2, 2, 2)) + assert_equal(np.random.dirichlet(p, (2, 2)).shape, (2, 2, 2)) + assert_equal(np.random.dirichlet(p, np.array((2, 2))).shape, (2, 2, 2)) + + assert_raises(TypeError, np.random.dirichlet, p, float(1)) + + def test_dirichlet_bad_alpha(self): + # gh-2089 + alpha = np.array([5.4e-01, -1.0e-16]) + assert_raises(ValueError, np.random.mtrand.dirichlet, alpha) + + # gh-15876 + assert_raises(ValueError, random.dirichlet, [[5, 1]]) + assert_raises(ValueError, random.dirichlet, [[5], [1]]) + assert_raises(ValueError, random.dirichlet, [[[5], [1]], [[1], [5]]]) + assert_raises(ValueError, random.dirichlet, np.array([[5, 1], [1, 5]])) + + def test_exponential(self): + np.random.seed(self.seed) + actual = np.random.exponential(1.1234, size=(3, 2)) + desired = np.array([[1.08342649775011624, 1.00607889924557314], + [2.46628830085216721, 2.49668106809923884], + [0.68717433461363442, 1.69175666993575979]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_exponential_0(self): + assert_equal(np.random.exponential(scale=0), 0) + assert_raises(ValueError, np.random.exponential, scale=-0.) + + def test_f(self): + np.random.seed(self.seed) + actual = np.random.f(12, 77, size=(3, 2)) + desired = np.array([[1.21975394418575878, 1.75135759791559775], + [1.44803115017146489, 1.22108959480396262], + [1.02176975757740629, 1.34431827623300415]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_gamma(self): + np.random.seed(self.seed) + actual = np.random.gamma(5, 3, size=(3, 2)) + desired = np.array([[24.60509188649287182, 28.54993563207210627], + [26.13476110204064184, 12.56988482927716078], + [31.71863275789960568, 33.30143302795922011]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_gamma_0(self): + assert_equal(np.random.gamma(shape=0, scale=0), 0) + assert_raises(ValueError, np.random.gamma, shape=-0., scale=-0.) + + def test_geometric(self): + np.random.seed(self.seed) + actual = np.random.geometric(.123456789, size=(3, 2)) + desired = np.array([[8, 7], + [17, 17], + [5, 12]]) + assert_array_equal(actual, desired) + + def test_gumbel(self): + np.random.seed(self.seed) + actual = np.random.gumbel(loc=.123456789, scale=2.0, size=(3, 2)) + desired = np.array([[0.19591898743416816, 0.34405539668096674], + [-1.4492522252274278, -1.47374816298446865], + [1.10651090478803416, -0.69535848626236174]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_gumbel_0(self): + assert_equal(np.random.gumbel(scale=0), 0) + assert_raises(ValueError, np.random.gumbel, scale=-0.) + + def test_hypergeometric(self): + np.random.seed(self.seed) + actual = np.random.hypergeometric(10, 5, 14, size=(3, 2)) + desired = np.array([[10, 10], + [10, 10], + [9, 9]]) + assert_array_equal(actual, desired) + + # Test nbad = 0 + actual = np.random.hypergeometric(5, 0, 3, size=4) + desired = np.array([3, 3, 3, 3]) + assert_array_equal(actual, desired) + + actual = np.random.hypergeometric(15, 0, 12, size=4) + desired = np.array([12, 12, 12, 12]) + assert_array_equal(actual, desired) + + # Test ngood = 0 + actual = np.random.hypergeometric(0, 5, 3, size=4) + desired = np.array([0, 0, 0, 0]) + assert_array_equal(actual, desired) + + actual = np.random.hypergeometric(0, 15, 12, size=4) + desired = np.array([0, 0, 0, 0]) + assert_array_equal(actual, desired) + + def test_laplace(self): + np.random.seed(self.seed) + actual = np.random.laplace(loc=.123456789, scale=2.0, size=(3, 2)) + desired = np.array([[0.66599721112760157, 0.52829452552221945], + [3.12791959514407125, 3.18202813572992005], + [-0.05391065675859356, 1.74901336242837324]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_laplace_0(self): + assert_equal(np.random.laplace(scale=0), 0) + assert_raises(ValueError, np.random.laplace, scale=-0.) + + def test_logistic(self): + np.random.seed(self.seed) + actual = np.random.logistic(loc=.123456789, scale=2.0, size=(3, 2)) + desired = np.array([[1.09232835305011444, 0.8648196662399954], + [4.27818590694950185, 4.33897006346929714], + [-0.21682183359214885, 2.63373365386060332]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_lognormal(self): + np.random.seed(self.seed) + actual = np.random.lognormal(mean=.123456789, sigma=2.0, size=(3, 2)) + desired = np.array([[16.50698631688883822, 36.54846706092654784], + [22.67886599981281748, 0.71617561058995771], + [65.72798501792723869, 86.84341601437161273]]) + assert_array_almost_equal(actual, desired, decimal=13) + + def test_lognormal_0(self): + assert_equal(np.random.lognormal(sigma=0), 1) + assert_raises(ValueError, np.random.lognormal, sigma=-0.) + + def test_logseries(self): + np.random.seed(self.seed) + actual = np.random.logseries(p=.923456789, size=(3, 2)) + desired = np.array([[2, 2], + [6, 17], + [3, 6]]) + assert_array_equal(actual, desired) + + def test_multinomial(self): + np.random.seed(self.seed) + actual = np.random.multinomial(20, [1 / 6.] * 6, size=(3, 2)) + desired = np.array([[[4, 3, 5, 4, 2, 2], + [5, 2, 8, 2, 2, 1]], + [[3, 4, 3, 6, 0, 4], + [2, 1, 4, 3, 6, 4]], + [[4, 4, 2, 5, 2, 3], + [4, 3, 4, 2, 3, 4]]]) + assert_array_equal(actual, desired) + + def test_multivariate_normal(self): + np.random.seed(self.seed) + mean = (.123456789, 10) + cov = [[1, 0], [0, 1]] + size = (3, 2) + actual = np.random.multivariate_normal(mean, cov, size) + desired = np.array([[[1.463620246718631, 11.73759122771936], + [1.622445133300628, 9.771356667546383]], + [[2.154490787682787, 12.170324946056553], + [1.719909438201865, 9.230548443648306]], + [[0.689515026297799, 9.880729819607714], + [-0.023054015651998, 9.201096623542879]]]) + + assert_array_almost_equal(actual, desired, decimal=15) + + # Check for default size, was raising deprecation warning + actual = np.random.multivariate_normal(mean, cov) + desired = np.array([0.895289569463708, 9.17180864067987]) + assert_array_almost_equal(actual, desired, decimal=15) + + # Check that non positive-semidefinite covariance warns with + # RuntimeWarning + mean = [0, 0] + cov = [[1, 2], [2, 1]] + assert_warns(RuntimeWarning, np.random.multivariate_normal, mean, cov) + + # and that it doesn't warn with RuntimeWarning check_valid='ignore' + assert_no_warnings(np.random.multivariate_normal, mean, cov, + check_valid='ignore') + + # and that it raises with RuntimeWarning check_valid='raises' + assert_raises(ValueError, np.random.multivariate_normal, mean, cov, + check_valid='raise') + + cov = np.array([[1, 0.1], [0.1, 1]], dtype=np.float32) + with suppress_warnings() as sup: + np.random.multivariate_normal(mean, cov) + w = sup.record(RuntimeWarning) + assert len(w) == 0 + + def test_negative_binomial(self): + np.random.seed(self.seed) + actual = np.random.negative_binomial(n=100, p=.12345, size=(3, 2)) + desired = np.array([[848, 841], + [892, 611], + [779, 647]]) + assert_array_equal(actual, desired) + + def test_noncentral_chisquare(self): + np.random.seed(self.seed) + actual = np.random.noncentral_chisquare(df=5, nonc=5, size=(3, 2)) + desired = np.array([[23.91905354498517511, 13.35324692733826346], + [31.22452661329736401, 16.60047399466177254], + [5.03461598262724586, 17.94973089023519464]]) + assert_array_almost_equal(actual, desired, decimal=14) + + actual = np.random.noncentral_chisquare(df=.5, nonc=.2, size=(3, 2)) + desired = np.array([[1.47145377828516666, 0.15052899268012659], + [0.00943803056963588, 1.02647251615666169], + [0.332334982684171, 0.15451287602753125]]) + assert_array_almost_equal(actual, desired, decimal=14) + + np.random.seed(self.seed) + actual = np.random.noncentral_chisquare(df=5, nonc=0, size=(3, 2)) + desired = np.array([[9.597154162763948, 11.725484450296079], + [10.413711048138335, 3.694475922923986], + [13.484222138963087, 14.377255424602957]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_noncentral_f(self): + np.random.seed(self.seed) + actual = np.random.noncentral_f(dfnum=5, dfden=2, nonc=1, + size=(3, 2)) + desired = np.array([[1.40598099674926669, 0.34207973179285761], + [3.57715069265772545, 7.92632662577829805], + [0.43741599463544162, 1.1774208752428319]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_normal(self): + np.random.seed(self.seed) + actual = np.random.normal(loc=.123456789, scale=2.0, size=(3, 2)) + desired = np.array([[2.80378370443726244, 3.59863924443872163], + [3.121433477601256, -0.33382987590723379], + [4.18552478636557357, 4.46410668111310471]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_normal_0(self): + assert_equal(np.random.normal(scale=0), 0) + assert_raises(ValueError, np.random.normal, scale=-0.) + + def test_pareto(self): + np.random.seed(self.seed) + actual = np.random.pareto(a=.123456789, size=(3, 2)) + desired = np.array( + [[2.46852460439034849e+03, 1.41286880810518346e+03], + [5.28287797029485181e+07, 6.57720981047328785e+07], + [1.40840323350391515e+02, 1.98390255135251704e+05]]) + # For some reason on 32-bit x86 Ubuntu 12.10 the [1, 0] entry in this + # matrix differs by 24 nulps. Discussion: + # https://mail.python.org/pipermail/numpy-discussion/2012-September/063801.html + # Consensus is that this is probably some gcc quirk that affects + # rounding but not in any important way, so we just use a looser + # tolerance on this test: + np.testing.assert_array_almost_equal_nulp(actual, desired, nulp=30) + + def test_poisson(self): + np.random.seed(self.seed) + actual = np.random.poisson(lam=.123456789, size=(3, 2)) + desired = np.array([[0, 0], + [1, 0], + [0, 0]]) + assert_array_equal(actual, desired) + + def test_poisson_exceptions(self): + lambig = np.iinfo('l').max + lamneg = -1 + assert_raises(ValueError, np.random.poisson, lamneg) + assert_raises(ValueError, np.random.poisson, [lamneg] * 10) + assert_raises(ValueError, np.random.poisson, lambig) + assert_raises(ValueError, np.random.poisson, [lambig] * 10) + + def test_power(self): + np.random.seed(self.seed) + actual = np.random.power(a=.123456789, size=(3, 2)) + desired = np.array([[0.02048932883240791, 0.01424192241128213], + [0.38446073748535298, 0.39499689943484395], + [0.00177699707563439, 0.13115505880863756]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_rayleigh(self): + np.random.seed(self.seed) + actual = np.random.rayleigh(scale=10, size=(3, 2)) + desired = np.array([[13.8882496494248393, 13.383318339044731], + [20.95413364294492098, 21.08285015800712614], + [11.06066537006854311, 17.35468505778271009]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_rayleigh_0(self): + assert_equal(np.random.rayleigh(scale=0), 0) + assert_raises(ValueError, np.random.rayleigh, scale=-0.) + + def test_standard_cauchy(self): + np.random.seed(self.seed) + actual = np.random.standard_cauchy(size=(3, 2)) + desired = np.array([[0.77127660196445336, -6.55601161955910605], + [0.93582023391158309, -2.07479293013759447], + [-4.74601644297011926, 0.18338989290760804]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_standard_exponential(self): + np.random.seed(self.seed) + actual = np.random.standard_exponential(size=(3, 2)) + desired = np.array([[0.96441739162374596, 0.89556604882105506], + [2.1953785836319808, 2.22243285392490542], + [0.6116915921431676, 1.50592546727413201]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_standard_gamma(self): + np.random.seed(self.seed) + actual = np.random.standard_gamma(shape=3, size=(3, 2)) + desired = np.array([[5.50841531318455058, 6.62953470301903103], + [5.93988484943779227, 2.31044849402133989], + [7.54838614231317084, 8.012756093271868]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_standard_gamma_0(self): + assert_equal(np.random.standard_gamma(shape=0), 0) + assert_raises(ValueError, np.random.standard_gamma, shape=-0.) + + def test_standard_normal(self): + np.random.seed(self.seed) + actual = np.random.standard_normal(size=(3, 2)) + desired = np.array([[1.34016345771863121, 1.73759122771936081], + [1.498988344300628, -0.2286433324536169], + [2.031033998682787, 2.17032494605655257]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_standard_t(self): + np.random.seed(self.seed) + actual = np.random.standard_t(df=10, size=(3, 2)) + desired = np.array([[0.97140611862659965, -0.08830486548450577], + [1.36311143689505321, -0.55317463909867071], + [-0.18473749069684214, 0.61181537341755321]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_triangular(self): + np.random.seed(self.seed) + actual = np.random.triangular(left=5.12, mode=10.23, right=20.34, + size=(3, 2)) + desired = np.array([[12.68117178949215784, 12.4129206149193152], + [16.20131377335158263, 16.25692138747600524], + [11.20400690911820263, 14.4978144835829923]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_uniform(self): + np.random.seed(self.seed) + actual = np.random.uniform(low=1.23, high=10.54, size=(3, 2)) + desired = np.array([[6.99097932346268003, 6.73801597444323974], + [9.50364421400426274, 9.53130618907631089], + [5.48995325769805476, 8.47493103280052118]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_uniform_range_bounds(self): + fmin = np.finfo('float').min + fmax = np.finfo('float').max + + func = np.random.uniform + assert_raises(OverflowError, func, -np.inf, 0) + assert_raises(OverflowError, func, 0, np.inf) + assert_raises(OverflowError, func, fmin, fmax) + assert_raises(OverflowError, func, [-np.inf], [0]) + assert_raises(OverflowError, func, [0], [np.inf]) + + # (fmax / 1e17) - fmin is within range, so this should not throw + # account for i386 extended precision DBL_MAX / 1e17 + DBL_MAX > + # DBL_MAX by increasing fmin a bit + np.random.uniform(low=np.nextafter(fmin, 1), high=fmax / 1e17) + + def test_scalar_exception_propagation(self): + # Tests that exceptions are correctly propagated in distributions + # when called with objects that throw exceptions when converted to + # scalars. + # + # Regression test for gh: 8865 + + class ThrowingFloat(np.ndarray): + def __float__(self): + raise TypeError + + throwing_float = np.array(1.0).view(ThrowingFloat) + assert_raises(TypeError, np.random.uniform, throwing_float, + throwing_float) + + class ThrowingInteger(np.ndarray): + def __int__(self): + raise TypeError + + __index__ = __int__ + + throwing_int = np.array(1).view(ThrowingInteger) + assert_raises(TypeError, np.random.hypergeometric, throwing_int, 1, 1) + + def test_vonmises(self): + np.random.seed(self.seed) + actual = np.random.vonmises(mu=1.23, kappa=1.54, size=(3, 2)) + desired = np.array([[2.28567572673902042, 2.89163838442285037], + [0.38198375564286025, 2.57638023113890746], + [1.19153771588353052, 1.83509849681825354]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_vonmises_small(self): + # check infinite loop, gh-4720 + np.random.seed(self.seed) + r = np.random.vonmises(mu=0., kappa=1.1e-8, size=10**6) + np.testing.assert_(np.isfinite(r).all()) + + def test_wald(self): + np.random.seed(self.seed) + actual = np.random.wald(mean=1.23, scale=1.54, size=(3, 2)) + desired = np.array([[3.82935265715889983, 5.13125249184285526], + [0.35045403618358717, 1.50832396872003538], + [0.24124319895843183, 0.22031101461955038]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_weibull(self): + np.random.seed(self.seed) + actual = np.random.weibull(a=1.23, size=(3, 2)) + desired = np.array([[0.97097342648766727, 0.91422896443565516], + [1.89517770034962929, 1.91414357960479564], + [0.67057783752390987, 1.39494046635066793]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_weibull_0(self): + np.random.seed(self.seed) + assert_equal(np.random.weibull(a=0, size=12), np.zeros(12)) + assert_raises(ValueError, np.random.weibull, a=-0.) + + def test_zipf(self): + np.random.seed(self.seed) + actual = np.random.zipf(a=1.23, size=(3, 2)) + desired = np.array([[66, 29], + [1, 1], + [3, 13]]) + assert_array_equal(actual, desired) + + +class TestBroadcast: + # tests that functions that broadcast behave + # correctly when presented with non-scalar arguments + def setup_method(self): + self.seed = 123456789 + + def setSeed(self): + np.random.seed(self.seed) + + # TODO: Include test for randint once it can broadcast + # Can steal the test written in PR #6938 + + def test_uniform(self): + low = [0] + high = [1] + uniform = np.random.uniform + desired = np.array([0.53283302478975902, + 0.53413660089041659, + 0.50955303552646702]) + + self.setSeed() + actual = uniform(low * 3, high) + assert_array_almost_equal(actual, desired, decimal=14) + + self.setSeed() + actual = uniform(low, high * 3) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_normal(self): + loc = [0] + scale = [1] + bad_scale = [-1] + normal = np.random.normal + desired = np.array([2.2129019979039612, + 2.1283977976520019, + 1.8417114045748335]) + + self.setSeed() + actual = normal(loc * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, normal, loc * 3, bad_scale) + + self.setSeed() + actual = normal(loc, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, normal, loc, bad_scale * 3) + + def test_beta(self): + a = [1] + b = [2] + bad_a = [-1] + bad_b = [-2] + beta = np.random.beta + desired = np.array([0.19843558305989056, + 0.075230336409423643, + 0.24976865978980844]) + + self.setSeed() + actual = beta(a * 3, b) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, beta, bad_a * 3, b) + assert_raises(ValueError, beta, a * 3, bad_b) + + self.setSeed() + actual = beta(a, b * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, beta, bad_a, b * 3) + assert_raises(ValueError, beta, a, bad_b * 3) + + def test_exponential(self): + scale = [1] + bad_scale = [-1] + exponential = np.random.exponential + desired = np.array([0.76106853658845242, + 0.76386282278691653, + 0.71243813125891797]) + + self.setSeed() + actual = exponential(scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, exponential, bad_scale * 3) + + def test_standard_gamma(self): + shape = [1] + bad_shape = [-1] + std_gamma = np.random.standard_gamma + desired = np.array([0.76106853658845242, + 0.76386282278691653, + 0.71243813125891797]) + + self.setSeed() + actual = std_gamma(shape * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, std_gamma, bad_shape * 3) + + def test_gamma(self): + shape = [1] + scale = [2] + bad_shape = [-1] + bad_scale = [-2] + gamma = np.random.gamma + desired = np.array([1.5221370731769048, + 1.5277256455738331, + 1.4248762625178359]) + + self.setSeed() + actual = gamma(shape * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, gamma, bad_shape * 3, scale) + assert_raises(ValueError, gamma, shape * 3, bad_scale) + + self.setSeed() + actual = gamma(shape, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, gamma, bad_shape, scale * 3) + assert_raises(ValueError, gamma, shape, bad_scale * 3) + + def test_f(self): + dfnum = [1] + dfden = [2] + bad_dfnum = [-1] + bad_dfden = [-2] + f = np.random.f + desired = np.array([0.80038951638264799, + 0.86768719635363512, + 2.7251095168386801]) + + self.setSeed() + actual = f(dfnum * 3, dfden) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, f, bad_dfnum * 3, dfden) + assert_raises(ValueError, f, dfnum * 3, bad_dfden) + + self.setSeed() + actual = f(dfnum, dfden * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, f, bad_dfnum, dfden * 3) + assert_raises(ValueError, f, dfnum, bad_dfden * 3) + + def test_noncentral_f(self): + dfnum = [2] + dfden = [3] + nonc = [4] + bad_dfnum = [0] + bad_dfden = [-1] + bad_nonc = [-2] + nonc_f = np.random.noncentral_f + desired = np.array([9.1393943263705211, + 13.025456344595602, + 8.8018098359100545]) + + self.setSeed() + actual = nonc_f(dfnum * 3, dfden, nonc) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, nonc_f, bad_dfnum * 3, dfden, nonc) + assert_raises(ValueError, nonc_f, dfnum * 3, bad_dfden, nonc) + assert_raises(ValueError, nonc_f, dfnum * 3, dfden, bad_nonc) + + self.setSeed() + actual = nonc_f(dfnum, dfden * 3, nonc) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, nonc_f, bad_dfnum, dfden * 3, nonc) + assert_raises(ValueError, nonc_f, dfnum, bad_dfden * 3, nonc) + assert_raises(ValueError, nonc_f, dfnum, dfden * 3, bad_nonc) + + self.setSeed() + actual = nonc_f(dfnum, dfden, nonc * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, nonc_f, bad_dfnum, dfden, nonc * 3) + assert_raises(ValueError, nonc_f, dfnum, bad_dfden, nonc * 3) + assert_raises(ValueError, nonc_f, dfnum, dfden, bad_nonc * 3) + + def test_noncentral_f_small_df(self): + self.setSeed() + desired = np.array([6.869638627492048, 0.785880199263955]) + actual = np.random.noncentral_f(0.9, 0.9, 2, size=2) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_chisquare(self): + df = [1] + bad_df = [-1] + chisquare = np.random.chisquare + desired = np.array([0.57022801133088286, + 0.51947702108840776, + 0.1320969254923558]) + + self.setSeed() + actual = chisquare(df * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, chisquare, bad_df * 3) + + def test_noncentral_chisquare(self): + df = [1] + nonc = [2] + bad_df = [-1] + bad_nonc = [-2] + nonc_chi = np.random.noncentral_chisquare + desired = np.array([9.0015599467913763, + 4.5804135049718742, + 6.0872302432834564]) + + self.setSeed() + actual = nonc_chi(df * 3, nonc) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, nonc_chi, bad_df * 3, nonc) + assert_raises(ValueError, nonc_chi, df * 3, bad_nonc) + + self.setSeed() + actual = nonc_chi(df, nonc * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, nonc_chi, bad_df, nonc * 3) + assert_raises(ValueError, nonc_chi, df, bad_nonc * 3) + + def test_standard_t(self): + df = [1] + bad_df = [-1] + t = np.random.standard_t + desired = np.array([3.0702872575217643, + 5.8560725167361607, + 1.0274791436474273]) + + self.setSeed() + actual = t(df * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, t, bad_df * 3) + + def test_vonmises(self): + mu = [2] + kappa = [1] + bad_kappa = [-1] + vonmises = np.random.vonmises + desired = np.array([2.9883443664201312, + -2.7064099483995943, + -1.8672476700665914]) + + self.setSeed() + actual = vonmises(mu * 3, kappa) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, vonmises, mu * 3, bad_kappa) + + self.setSeed() + actual = vonmises(mu, kappa * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, vonmises, mu, bad_kappa * 3) + + def test_pareto(self): + a = [1] + bad_a = [-1] + pareto = np.random.pareto + desired = np.array([1.1405622680198362, + 1.1465519762044529, + 1.0389564467453547]) + + self.setSeed() + actual = pareto(a * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, pareto, bad_a * 3) + + def test_weibull(self): + a = [1] + bad_a = [-1] + weibull = np.random.weibull + desired = np.array([0.76106853658845242, + 0.76386282278691653, + 0.71243813125891797]) + + self.setSeed() + actual = weibull(a * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, weibull, bad_a * 3) + + def test_power(self): + a = [1] + bad_a = [-1] + power = np.random.power + desired = np.array([0.53283302478975902, + 0.53413660089041659, + 0.50955303552646702]) + + self.setSeed() + actual = power(a * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, power, bad_a * 3) + + def test_laplace(self): + loc = [0] + scale = [1] + bad_scale = [-1] + laplace = np.random.laplace + desired = np.array([0.067921356028507157, + 0.070715642226971326, + 0.019290950698972624]) + + self.setSeed() + actual = laplace(loc * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, laplace, loc * 3, bad_scale) + + self.setSeed() + actual = laplace(loc, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, laplace, loc, bad_scale * 3) + + def test_gumbel(self): + loc = [0] + scale = [1] + bad_scale = [-1] + gumbel = np.random.gumbel + desired = np.array([0.2730318639556768, + 0.26936705726291116, + 0.33906220393037939]) + + self.setSeed() + actual = gumbel(loc * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, gumbel, loc * 3, bad_scale) + + self.setSeed() + actual = gumbel(loc, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, gumbel, loc, bad_scale * 3) + + def test_logistic(self): + loc = [0] + scale = [1] + bad_scale = [-1] + logistic = np.random.logistic + desired = np.array([0.13152135837586171, + 0.13675915696285773, + 0.038216792802833396]) + + self.setSeed() + actual = logistic(loc * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, logistic, loc * 3, bad_scale) + + self.setSeed() + actual = logistic(loc, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, logistic, loc, bad_scale * 3) + + def test_lognormal(self): + mean = [0] + sigma = [1] + bad_sigma = [-1] + lognormal = np.random.lognormal + desired = np.array([9.1422086044848427, + 8.4013952870126261, + 6.3073234116578671]) + + self.setSeed() + actual = lognormal(mean * 3, sigma) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, lognormal, mean * 3, bad_sigma) + + self.setSeed() + actual = lognormal(mean, sigma * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, lognormal, mean, bad_sigma * 3) + + def test_rayleigh(self): + scale = [1] + bad_scale = [-1] + rayleigh = np.random.rayleigh + desired = np.array([1.2337491937897689, + 1.2360119924878694, + 1.1936818095781789]) + + self.setSeed() + actual = rayleigh(scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, rayleigh, bad_scale * 3) + + def test_wald(self): + mean = [0.5] + scale = [1] + bad_mean = [0] + bad_scale = [-2] + wald = np.random.wald + desired = np.array([0.11873681120271318, + 0.12450084820795027, + 0.9096122728408238]) + + self.setSeed() + actual = wald(mean * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, wald, bad_mean * 3, scale) + assert_raises(ValueError, wald, mean * 3, bad_scale) + + self.setSeed() + actual = wald(mean, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, wald, bad_mean, scale * 3) + assert_raises(ValueError, wald, mean, bad_scale * 3) + assert_raises(ValueError, wald, 0.0, 1) + assert_raises(ValueError, wald, 0.5, 0.0) + + def test_triangular(self): + left = [1] + right = [3] + mode = [2] + bad_left_one = [3] + bad_mode_one = [4] + bad_left_two, bad_mode_two = right * 2 + triangular = np.random.triangular + desired = np.array([2.03339048710429, + 2.0347400359389356, + 2.0095991069536208]) + + self.setSeed() + actual = triangular(left * 3, mode, right) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, triangular, bad_left_one * 3, mode, right) + assert_raises(ValueError, triangular, left * 3, bad_mode_one, right) + assert_raises(ValueError, triangular, bad_left_two * 3, bad_mode_two, + right) + + self.setSeed() + actual = triangular(left, mode * 3, right) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, triangular, bad_left_one, mode * 3, right) + assert_raises(ValueError, triangular, left, bad_mode_one * 3, right) + assert_raises(ValueError, triangular, bad_left_two, bad_mode_two * 3, + right) + + self.setSeed() + actual = triangular(left, mode, right * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, triangular, bad_left_one, mode, right * 3) + assert_raises(ValueError, triangular, left, bad_mode_one, right * 3) + assert_raises(ValueError, triangular, bad_left_two, bad_mode_two, + right * 3) + + def test_binomial(self): + n = [1] + p = [0.5] + bad_n = [-1] + bad_p_one = [-1] + bad_p_two = [1.5] + binom = np.random.binomial + desired = np.array([1, 1, 1]) + + self.setSeed() + actual = binom(n * 3, p) + assert_array_equal(actual, desired) + assert_raises(ValueError, binom, bad_n * 3, p) + assert_raises(ValueError, binom, n * 3, bad_p_one) + assert_raises(ValueError, binom, n * 3, bad_p_two) + + self.setSeed() + actual = binom(n, p * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, binom, bad_n, p * 3) + assert_raises(ValueError, binom, n, bad_p_one * 3) + assert_raises(ValueError, binom, n, bad_p_two * 3) + + def test_negative_binomial(self): + n = [1] + p = [0.5] + bad_n = [-1] + bad_p_one = [-1] + bad_p_two = [1.5] + neg_binom = np.random.negative_binomial + desired = np.array([1, 0, 1]) + + self.setSeed() + actual = neg_binom(n * 3, p) + assert_array_equal(actual, desired) + assert_raises(ValueError, neg_binom, bad_n * 3, p) + assert_raises(ValueError, neg_binom, n * 3, bad_p_one) + assert_raises(ValueError, neg_binom, n * 3, bad_p_two) + + self.setSeed() + actual = neg_binom(n, p * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, neg_binom, bad_n, p * 3) + assert_raises(ValueError, neg_binom, n, bad_p_one * 3) + assert_raises(ValueError, neg_binom, n, bad_p_two * 3) + + def test_poisson(self): + max_lam = np.random.RandomState()._poisson_lam_max + + lam = [1] + bad_lam_one = [-1] + bad_lam_two = [max_lam * 2] + poisson = np.random.poisson + desired = np.array([1, 1, 0]) + + self.setSeed() + actual = poisson(lam * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, poisson, bad_lam_one * 3) + assert_raises(ValueError, poisson, bad_lam_two * 3) + + def test_zipf(self): + a = [2] + bad_a = [0] + zipf = np.random.zipf + desired = np.array([2, 2, 1]) + + self.setSeed() + actual = zipf(a * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, zipf, bad_a * 3) + with np.errstate(invalid='ignore'): + assert_raises(ValueError, zipf, np.nan) + assert_raises(ValueError, zipf, [0, 0, np.nan]) + + def test_geometric(self): + p = [0.5] + bad_p_one = [-1] + bad_p_two = [1.5] + geom = np.random.geometric + desired = np.array([2, 2, 2]) + + self.setSeed() + actual = geom(p * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, geom, bad_p_one * 3) + assert_raises(ValueError, geom, bad_p_two * 3) + + def test_hypergeometric(self): + ngood = [1] + nbad = [2] + nsample = [2] + bad_ngood = [-1] + bad_nbad = [-2] + bad_nsample_one = [0] + bad_nsample_two = [4] + hypergeom = np.random.hypergeometric + desired = np.array([1, 1, 1]) + + self.setSeed() + actual = hypergeom(ngood * 3, nbad, nsample) + assert_array_equal(actual, desired) + assert_raises(ValueError, hypergeom, bad_ngood * 3, nbad, nsample) + assert_raises(ValueError, hypergeom, ngood * 3, bad_nbad, nsample) + assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_one) + assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_two) + + self.setSeed() + actual = hypergeom(ngood, nbad * 3, nsample) + assert_array_equal(actual, desired) + assert_raises(ValueError, hypergeom, bad_ngood, nbad * 3, nsample) + assert_raises(ValueError, hypergeom, ngood, bad_nbad * 3, nsample) + assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_one) + assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_two) + + self.setSeed() + actual = hypergeom(ngood, nbad, nsample * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, hypergeom, bad_ngood, nbad, nsample * 3) + assert_raises(ValueError, hypergeom, ngood, bad_nbad, nsample * 3) + assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_one * 3) + assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_two * 3) + + def test_logseries(self): + p = [0.5] + bad_p_one = [2] + bad_p_two = [-1] + logseries = np.random.logseries + desired = np.array([1, 1, 1]) + + self.setSeed() + actual = logseries(p * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, logseries, bad_p_one * 3) + assert_raises(ValueError, logseries, bad_p_two * 3) + + +@pytest.mark.skipif(IS_WASM, reason="can't start thread") +class TestThread: + # make sure each state produces the same sequence even in threads + def setup_method(self): + self.seeds = range(4) + + def check_function(self, function, sz): + from threading import Thread + + out1 = np.empty((len(self.seeds),) + sz) + out2 = np.empty((len(self.seeds),) + sz) + + # threaded generation + t = [Thread(target=function, args=(np.random.RandomState(s), o)) + for s, o in zip(self.seeds, out1)] + [x.start() for x in t] + [x.join() for x in t] + + # the same serial + for s, o in zip(self.seeds, out2): + function(np.random.RandomState(s), o) + + # these platforms change x87 fpu precision mode in threads + if np.intp().dtype.itemsize == 4 and sys.platform == "win32": + assert_array_almost_equal(out1, out2) + else: + assert_array_equal(out1, out2) + + def test_normal(self): + def gen_random(state, out): + out[...] = state.normal(size=10000) + self.check_function(gen_random, sz=(10000,)) + + def test_exp(self): + def gen_random(state, out): + out[...] = state.exponential(scale=np.ones((100, 1000))) + self.check_function(gen_random, sz=(100, 1000)) + + def test_multinomial(self): + def gen_random(state, out): + out[...] = state.multinomial(10, [1 / 6.] * 6, size=10000) + self.check_function(gen_random, sz=(10000, 6)) + + +# See Issue #4263 +class TestSingleEltArrayInput: + def setup_method(self): + self.argOne = np.array([2]) + self.argTwo = np.array([3]) + self.argThree = np.array([4]) + self.tgtShape = (1,) + + def test_one_arg_funcs(self): + funcs = (np.random.exponential, np.random.standard_gamma, + np.random.chisquare, np.random.standard_t, + np.random.pareto, np.random.weibull, + np.random.power, np.random.rayleigh, + np.random.poisson, np.random.zipf, + np.random.geometric, np.random.logseries) + + probfuncs = (np.random.geometric, np.random.logseries) + + for func in funcs: + if func in probfuncs: # p < 1.0 + out = func(np.array([0.5])) + + else: + out = func(self.argOne) + + assert_equal(out.shape, self.tgtShape) + + def test_two_arg_funcs(self): + funcs = (np.random.uniform, np.random.normal, + np.random.beta, np.random.gamma, + np.random.f, np.random.noncentral_chisquare, + np.random.vonmises, np.random.laplace, + np.random.gumbel, np.random.logistic, + np.random.lognormal, np.random.wald, + np.random.binomial, np.random.negative_binomial) + + probfuncs = (np.random.binomial, np.random.negative_binomial) + + for func in funcs: + if func in probfuncs: # p <= 1 + argTwo = np.array([0.5]) + + else: + argTwo = self.argTwo + + out = func(self.argOne, argTwo) + assert_equal(out.shape, self.tgtShape) + + out = func(self.argOne[0], argTwo) + assert_equal(out.shape, self.tgtShape) + + out = func(self.argOne, argTwo[0]) + assert_equal(out.shape, self.tgtShape) + + def test_randint(self): + itype = [bool, np.int8, np.uint8, np.int16, np.uint16, + np.int32, np.uint32, np.int64, np.uint64] + func = np.random.randint + high = np.array([1]) + low = np.array([0]) + + for dt in itype: + out = func(low, high, dtype=dt) + assert_equal(out.shape, self.tgtShape) + + out = func(low[0], high, dtype=dt) + assert_equal(out.shape, self.tgtShape) + + out = func(low, high[0], dtype=dt) + assert_equal(out.shape, self.tgtShape) + + def test_three_arg_funcs(self): + funcs = [np.random.noncentral_f, np.random.triangular, + np.random.hypergeometric] + + for func in funcs: + out = func(self.argOne, self.argTwo, self.argThree) + assert_equal(out.shape, self.tgtShape) + + out = func(self.argOne[0], self.argTwo, self.argThree) + assert_equal(out.shape, self.tgtShape) + + out = func(self.argOne, self.argTwo[0], self.argThree) + assert_equal(out.shape, self.tgtShape) diff --git a/.venv/lib/python3.12/site-packages/numpy/random/tests/test_randomstate.py b/.venv/lib/python3.12/site-packages/numpy/random/tests/test_randomstate.py new file mode 100644 index 0000000..cf44885 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/random/tests/test_randomstate.py @@ -0,0 +1,2130 @@ +import hashlib +import pickle +import sys +import warnings + +import pytest + +import numpy as np +from numpy import random +from numpy.random import MT19937, PCG64 +from numpy.testing import ( + IS_WASM, + assert_, + assert_array_almost_equal, + assert_array_equal, + assert_equal, + assert_no_warnings, + assert_raises, + assert_warns, + suppress_warnings, +) + +INT_FUNCS = {'binomial': (100.0, 0.6), + 'geometric': (.5,), + 'hypergeometric': (20, 20, 10), + 'logseries': (.5,), + 'multinomial': (20, np.ones(6) / 6.0), + 'negative_binomial': (100, .5), + 'poisson': (10.0,), + 'zipf': (2,), + } + +if np.iinfo(np.long).max < 2**32: + # Windows and some 32-bit platforms, e.g., ARM + INT_FUNC_HASHES = {'binomial': '2fbead005fc63942decb5326d36a1f32fe2c9d32c904ee61e46866b88447c263', # noqa: E501 + 'logseries': '23ead5dcde35d4cfd4ef2c105e4c3d43304b45dc1b1444b7823b9ee4fa144ebb', # noqa: E501 + 'geometric': '0d764db64f5c3bad48c8c33551c13b4d07a1e7b470f77629bef6c985cac76fcf', # noqa: E501 + 'hypergeometric': '7b59bf2f1691626c5815cdcd9a49e1dd68697251d4521575219e4d2a1b8b2c67', # noqa: E501 + 'multinomial': 'd754fa5b92943a38ec07630de92362dd2e02c43577fc147417dc5b9db94ccdd3', # noqa: E501 + 'negative_binomial': '8eb216f7cb2a63cf55605422845caaff002fddc64a7dc8b2d45acd477a49e824', # noqa: E501 + 'poisson': '70c891d76104013ebd6f6bcf30d403a9074b886ff62e4e6b8eb605bf1a4673b7', # noqa: E501 + 'zipf': '01f074f97517cd5d21747148ac6ca4074dde7fcb7acbaec0a936606fecacd93f', # noqa: E501 + } +else: + INT_FUNC_HASHES = {'binomial': '8626dd9d052cb608e93d8868de0a7b347258b199493871a1dc56e2a26cacb112', # noqa: E501 + 'geometric': '8edd53d272e49c4fc8fbbe6c7d08d563d62e482921f3131d0a0e068af30f0db9', # noqa: E501 + 'hypergeometric': '83496cc4281c77b786c9b7ad88b74d42e01603a55c60577ebab81c3ba8d45657', # noqa: E501 + 'logseries': '65878a38747c176bc00e930ebafebb69d4e1e16cd3a704e264ea8f5e24f548db', # noqa: E501 + 'multinomial': '7a984ae6dca26fd25374479e118b22f55db0aedccd5a0f2584ceada33db98605', # noqa: E501 + 'negative_binomial': 'd636d968e6a24ae92ab52fe11c46ac45b0897e98714426764e820a7d77602a61', # noqa: E501 + 'poisson': '956552176f77e7c9cb20d0118fc9cf690be488d790ed4b4c4747b965e61b0bb4', # noqa: E501 + 'zipf': 'f84ba7feffda41e606e20b28dfc0f1ea9964a74574513d4a4cbc98433a8bfa45', # noqa: E501 + } + + +@pytest.fixture(scope='module', params=INT_FUNCS) +def int_func(request): + return (request.param, INT_FUNCS[request.param], + INT_FUNC_HASHES[request.param]) + + +@pytest.fixture +def restore_singleton_bitgen(): + """Ensures that the singleton bitgen is restored after a test""" + orig_bitgen = np.random.get_bit_generator() + yield + np.random.set_bit_generator(orig_bitgen) + + +def assert_mt19937_state_equal(a, b): + assert_equal(a['bit_generator'], b['bit_generator']) + assert_array_equal(a['state']['key'], b['state']['key']) + assert_array_equal(a['state']['pos'], b['state']['pos']) + assert_equal(a['has_gauss'], b['has_gauss']) + assert_equal(a['gauss'], b['gauss']) + + +class TestSeed: + def test_scalar(self): + s = random.RandomState(0) + assert_equal(s.randint(1000), 684) + s = random.RandomState(4294967295) + assert_equal(s.randint(1000), 419) + + def test_array(self): + s = random.RandomState(range(10)) + assert_equal(s.randint(1000), 468) + s = random.RandomState(np.arange(10)) + assert_equal(s.randint(1000), 468) + s = random.RandomState([0]) + assert_equal(s.randint(1000), 973) + s = random.RandomState([4294967295]) + assert_equal(s.randint(1000), 265) + + def test_invalid_scalar(self): + # seed must be an unsigned 32 bit integer + assert_raises(TypeError, random.RandomState, -0.5) + assert_raises(ValueError, random.RandomState, -1) + + def test_invalid_array(self): + # seed must be an unsigned 32 bit integer + assert_raises(TypeError, random.RandomState, [-0.5]) + assert_raises(ValueError, random.RandomState, [-1]) + assert_raises(ValueError, random.RandomState, [4294967296]) + assert_raises(ValueError, random.RandomState, [1, 2, 4294967296]) + assert_raises(ValueError, random.RandomState, [1, -2, 4294967296]) + + def test_invalid_array_shape(self): + # gh-9832 + assert_raises(ValueError, random.RandomState, np.array([], + dtype=np.int64)) + assert_raises(ValueError, random.RandomState, [[1, 2, 3]]) + assert_raises(ValueError, random.RandomState, [[1, 2, 3], + [4, 5, 6]]) + + def test_cannot_seed(self): + rs = random.RandomState(PCG64(0)) + with assert_raises(TypeError): + rs.seed(1234) + + def test_invalid_initialization(self): + assert_raises(ValueError, random.RandomState, MT19937) + + +class TestBinomial: + def test_n_zero(self): + # Tests the corner case of n == 0 for the binomial distribution. + # binomial(0, p) should be zero for any p in [0, 1]. + # This test addresses issue #3480. + zeros = np.zeros(2, dtype='int') + for p in [0, .5, 1]: + assert_(random.binomial(0, p) == 0) + assert_array_equal(random.binomial(zeros, p), zeros) + + def test_p_is_nan(self): + # Issue #4571. + assert_raises(ValueError, random.binomial, 1, np.nan) + + +class TestMultinomial: + def test_basic(self): + random.multinomial(100, [0.2, 0.8]) + + def test_zero_probability(self): + random.multinomial(100, [0.2, 0.8, 0.0, 0.0, 0.0]) + + def test_int_negative_interval(self): + assert_(-5 <= random.randint(-5, -1) < -1) + x = random.randint(-5, -1, 5) + assert_(np.all(-5 <= x)) + assert_(np.all(x < -1)) + + def test_size(self): + # gh-3173 + p = [0.5, 0.5] + assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2)) + assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2)) + assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2)) + assert_equal(random.multinomial(1, p, [2, 2]).shape, (2, 2, 2)) + assert_equal(random.multinomial(1, p, (2, 2)).shape, (2, 2, 2)) + assert_equal(random.multinomial(1, p, np.array((2, 2))).shape, + (2, 2, 2)) + + assert_raises(TypeError, random.multinomial, 1, p, + float(1)) + + def test_invalid_prob(self): + assert_raises(ValueError, random.multinomial, 100, [1.1, 0.2]) + assert_raises(ValueError, random.multinomial, 100, [-.1, 0.9]) + + def test_invalid_n(self): + assert_raises(ValueError, random.multinomial, -1, [0.8, 0.2]) + + def test_p_non_contiguous(self): + p = np.arange(15.) + p /= np.sum(p[1::3]) + pvals = p[1::3] + random.seed(1432985819) + non_contig = random.multinomial(100, pvals=pvals) + random.seed(1432985819) + contig = random.multinomial(100, pvals=np.ascontiguousarray(pvals)) + assert_array_equal(non_contig, contig) + + def test_multinomial_pvals_float32(self): + x = np.array([9.9e-01, 9.9e-01, 1.0e-09, 1.0e-09, 1.0e-09, 1.0e-09, + 1.0e-09, 1.0e-09, 1.0e-09, 1.0e-09], dtype=np.float32) + pvals = x / x.sum() + match = r"[\w\s]*pvals array is cast to 64-bit floating" + with pytest.raises(ValueError, match=match): + random.multinomial(1, pvals) + + def test_multinomial_n_float(self): + # Non-index integer types should gracefully truncate floats + random.multinomial(100.5, [0.2, 0.8]) + +class TestSetState: + def setup_method(self): + self.seed = 1234567890 + self.random_state = random.RandomState(self.seed) + self.state = self.random_state.get_state() + + def test_basic(self): + old = self.random_state.tomaxint(16) + self.random_state.set_state(self.state) + new = self.random_state.tomaxint(16) + assert_(np.all(old == new)) + + def test_gaussian_reset(self): + # Make sure the cached every-other-Gaussian is reset. + old = self.random_state.standard_normal(size=3) + self.random_state.set_state(self.state) + new = self.random_state.standard_normal(size=3) + assert_(np.all(old == new)) + + def test_gaussian_reset_in_media_res(self): + # When the state is saved with a cached Gaussian, make sure the + # cached Gaussian is restored. + + self.random_state.standard_normal() + state = self.random_state.get_state() + old = self.random_state.standard_normal(size=3) + self.random_state.set_state(state) + new = self.random_state.standard_normal(size=3) + assert_(np.all(old == new)) + + def test_backwards_compatibility(self): + # Make sure we can accept old state tuples that do not have the + # cached Gaussian value. + old_state = self.state[:-2] + x1 = self.random_state.standard_normal(size=16) + self.random_state.set_state(old_state) + x2 = self.random_state.standard_normal(size=16) + self.random_state.set_state(self.state) + x3 = self.random_state.standard_normal(size=16) + assert_(np.all(x1 == x2)) + assert_(np.all(x1 == x3)) + + def test_negative_binomial(self): + # Ensure that the negative binomial results take floating point + # arguments without truncation. + self.random_state.negative_binomial(0.5, 0.5) + + def test_get_state_warning(self): + rs = random.RandomState(PCG64()) + with suppress_warnings() as sup: + w = sup.record(RuntimeWarning) + state = rs.get_state() + assert_(len(w) == 1) + assert isinstance(state, dict) + assert state['bit_generator'] == 'PCG64' + + def test_invalid_legacy_state_setting(self): + state = self.random_state.get_state() + new_state = ('Unknown', ) + state[1:] + assert_raises(ValueError, self.random_state.set_state, new_state) + assert_raises(TypeError, self.random_state.set_state, + np.array(new_state, dtype=object)) + state = self.random_state.get_state(legacy=False) + del state['bit_generator'] + assert_raises(ValueError, self.random_state.set_state, state) + + def test_pickle(self): + self.random_state.seed(0) + self.random_state.random_sample(100) + self.random_state.standard_normal() + pickled = self.random_state.get_state(legacy=False) + assert_equal(pickled['has_gauss'], 1) + rs_unpick = pickle.loads(pickle.dumps(self.random_state)) + unpickled = rs_unpick.get_state(legacy=False) + assert_mt19937_state_equal(pickled, unpickled) + + def test_state_setting(self): + attr_state = self.random_state.__getstate__() + self.random_state.standard_normal() + self.random_state.__setstate__(attr_state) + state = self.random_state.get_state(legacy=False) + assert_mt19937_state_equal(attr_state, state) + + def test_repr(self): + assert repr(self.random_state).startswith('RandomState(MT19937)') + + +class TestRandint: + + rfunc = random.randint + + # valid integer/boolean types + itype = [np.bool, np.int8, np.uint8, np.int16, np.uint16, + np.int32, np.uint32, np.int64, np.uint64] + + def test_unsupported_type(self): + assert_raises(TypeError, self.rfunc, 1, dtype=float) + + def test_bounds_checking(self): + for dt in self.itype: + lbnd = 0 if dt is np.bool else np.iinfo(dt).min + ubnd = 2 if dt is np.bool else np.iinfo(dt).max + 1 + assert_raises(ValueError, self.rfunc, lbnd - 1, ubnd, dtype=dt) + assert_raises(ValueError, self.rfunc, lbnd, ubnd + 1, dtype=dt) + assert_raises(ValueError, self.rfunc, ubnd, lbnd, dtype=dt) + assert_raises(ValueError, self.rfunc, 1, 0, dtype=dt) + + def test_rng_zero_and_extremes(self): + for dt in self.itype: + lbnd = 0 if dt is np.bool else np.iinfo(dt).min + ubnd = 2 if dt is np.bool else np.iinfo(dt).max + 1 + + tgt = ubnd - 1 + assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt) + + tgt = lbnd + assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt) + + tgt = (lbnd + ubnd) // 2 + assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt) + + def test_full_range(self): + # Test for ticket #1690 + + for dt in self.itype: + lbnd = 0 if dt is np.bool else np.iinfo(dt).min + ubnd = 2 if dt is np.bool else np.iinfo(dt).max + 1 + + try: + self.rfunc(lbnd, ubnd, dtype=dt) + except Exception as e: + raise AssertionError("No error should have been raised, " + "but one was with the following " + "message:\n\n%s" % str(e)) + + def test_in_bounds_fuzz(self): + # Don't use fixed seed + random.seed() + + for dt in self.itype[1:]: + for ubnd in [4, 8, 16]: + vals = self.rfunc(2, ubnd, size=2**16, dtype=dt) + assert_(vals.max() < ubnd) + assert_(vals.min() >= 2) + + vals = self.rfunc(0, 2, size=2**16, dtype=np.bool) + + assert_(vals.max() < 2) + assert_(vals.min() >= 0) + + def test_repeatability(self): + # We use a sha256 hash of generated sequences of 1000 samples + # in the range [0, 6) for all but bool, where the range + # is [0, 2). Hashes are for little endian numbers. + tgt = {'bool': '509aea74d792fb931784c4b0135392c65aec64beee12b0cc167548a2c3d31e71', # noqa: E501 + 'int16': '7b07f1a920e46f6d0fe02314155a2330bcfd7635e708da50e536c5ebb631a7d4', # noqa: E501 + 'int32': 'e577bfed6c935de944424667e3da285012e741892dcb7051a8f1ce68ab05c92f', # noqa: E501 + 'int64': '0fbead0b06759df2cfb55e43148822d4a1ff953c7eb19a5b08445a63bb64fa9e', # noqa: E501 + 'int8': '001aac3a5acb935a9b186cbe14a1ca064b8bb2dd0b045d48abeacf74d0203404', # noqa: E501 + 'uint16': '7b07f1a920e46f6d0fe02314155a2330bcfd7635e708da50e536c5ebb631a7d4', # noqa: E501 + 'uint32': 'e577bfed6c935de944424667e3da285012e741892dcb7051a8f1ce68ab05c92f', # noqa: E501 + 'uint64': '0fbead0b06759df2cfb55e43148822d4a1ff953c7eb19a5b08445a63bb64fa9e', # noqa: E501 + 'uint8': '001aac3a5acb935a9b186cbe14a1ca064b8bb2dd0b045d48abeacf74d0203404'} # noqa: E501 + + for dt in self.itype[1:]: + random.seed(1234) + + # view as little endian for hash + if sys.byteorder == 'little': + val = self.rfunc(0, 6, size=1000, dtype=dt) + else: + val = self.rfunc(0, 6, size=1000, dtype=dt).byteswap() + + res = hashlib.sha256(val.view(np.int8)).hexdigest() + assert_(tgt[np.dtype(dt).name] == res) + + # bools do not depend on endianness + random.seed(1234) + val = self.rfunc(0, 2, size=1000, dtype=bool).view(np.int8) + res = hashlib.sha256(val).hexdigest() + assert_(tgt[np.dtype(bool).name] == res) + + @pytest.mark.skipif(np.iinfo('l').max < 2**32, + reason='Cannot test with 32-bit C long') + def test_repeatability_32bit_boundary_broadcasting(self): + desired = np.array([[[3992670689, 2438360420, 2557845020], + [4107320065, 4142558326, 3216529513], + [1605979228, 2807061240, 665605495]], + [[3211410639, 4128781000, 457175120], + [1712592594, 1282922662, 3081439808], + [3997822960, 2008322436, 1563495165]], + [[1398375547, 4269260146, 115316740], + [3414372578, 3437564012, 2112038651], + [3572980305, 2260248732, 3908238631]], + [[2561372503, 223155946, 3127879445], + [ 441282060, 3514786552, 2148440361], + [1629275283, 3479737011, 3003195987]], + [[ 412181688, 940383289, 3047321305], + [2978368172, 764731833, 2282559898], + [ 105711276, 720447391, 3596512484]]]) + for size in [None, (5, 3, 3)]: + random.seed(12345) + x = self.rfunc([[-1], [0], [1]], [2**32 - 1, 2**32, 2**32 + 1], + size=size) + assert_array_equal(x, desired if size is not None else desired[0]) + + def test_int64_uint64_corner_case(self): + # When stored in Numpy arrays, `lbnd` is casted + # as np.int64, and `ubnd` is casted as np.uint64. + # Checking whether `lbnd` >= `ubnd` used to be + # done solely via direct comparison, which is incorrect + # because when Numpy tries to compare both numbers, + # it casts both to np.float64 because there is + # no integer superset of np.int64 and np.uint64. However, + # `ubnd` is too large to be represented in np.float64, + # causing it be round down to np.iinfo(np.int64).max, + # leading to a ValueError because `lbnd` now equals + # the new `ubnd`. + + dt = np.int64 + tgt = np.iinfo(np.int64).max + lbnd = np.int64(np.iinfo(np.int64).max) + ubnd = np.uint64(np.iinfo(np.int64).max + 1) + + # None of these function calls should + # generate a ValueError now. + actual = random.randint(lbnd, ubnd, dtype=dt) + assert_equal(actual, tgt) + + def test_respect_dtype_singleton(self): + # See gh-7203 + for dt in self.itype: + lbnd = 0 if dt is np.bool else np.iinfo(dt).min + ubnd = 2 if dt is np.bool else np.iinfo(dt).max + 1 + + sample = self.rfunc(lbnd, ubnd, dtype=dt) + assert_equal(sample.dtype, np.dtype(dt)) + + for dt in (bool, int): + # The legacy random generation forces the use of "long" on this + # branch even when the input is `int` and the default dtype + # for int changed (dtype=int is also the functions default) + op_dtype = "long" if dt is int else "bool" + lbnd = 0 if dt is bool else np.iinfo(op_dtype).min + ubnd = 2 if dt is bool else np.iinfo(op_dtype).max + 1 + + sample = self.rfunc(lbnd, ubnd, dtype=dt) + assert_(not hasattr(sample, 'dtype')) + assert_equal(type(sample), dt) + + +class TestRandomDist: + # Make sure the random distribution returns the correct value for a + # given seed + + def setup_method(self): + self.seed = 1234567890 + + def test_rand(self): + random.seed(self.seed) + actual = random.rand(3, 2) + desired = np.array([[0.61879477158567997, 0.59162362775974664], + [0.88868358904449662, 0.89165480011560816], + [0.4575674820298663, 0.7781880808593471]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_rand_singleton(self): + random.seed(self.seed) + actual = random.rand() + desired = 0.61879477158567997 + assert_array_almost_equal(actual, desired, decimal=15) + + def test_randn(self): + random.seed(self.seed) + actual = random.randn(3, 2) + desired = np.array([[1.34016345771863121, 1.73759122771936081], + [1.498988344300628, -0.2286433324536169], + [2.031033998682787, 2.17032494605655257]]) + assert_array_almost_equal(actual, desired, decimal=15) + + random.seed(self.seed) + actual = random.randn() + assert_array_almost_equal(actual, desired[0, 0], decimal=15) + + def test_randint(self): + random.seed(self.seed) + actual = random.randint(-99, 99, size=(3, 2)) + desired = np.array([[31, 3], + [-52, 41], + [-48, -66]]) + assert_array_equal(actual, desired) + + def test_random_integers(self): + random.seed(self.seed) + with suppress_warnings() as sup: + w = sup.record(DeprecationWarning) + actual = random.random_integers(-99, 99, size=(3, 2)) + assert_(len(w) == 1) + desired = np.array([[31, 3], + [-52, 41], + [-48, -66]]) + assert_array_equal(actual, desired) + + random.seed(self.seed) + with suppress_warnings() as sup: + w = sup.record(DeprecationWarning) + actual = random.random_integers(198, size=(3, 2)) + assert_(len(w) == 1) + assert_array_equal(actual, desired + 100) + + def test_tomaxint(self): + random.seed(self.seed) + rs = random.RandomState(self.seed) + actual = rs.tomaxint(size=(3, 2)) + if np.iinfo(np.long).max == 2147483647: + desired = np.array([[1328851649, 731237375], + [1270502067, 320041495], + [1908433478, 499156889]], dtype=np.int64) + else: + desired = np.array([[5707374374421908479, 5456764827585442327], + [8196659375100692377, 8224063923314595285], + [4220315081820346526, 7177518203184491332]], + dtype=np.int64) + + assert_equal(actual, desired) + + rs.seed(self.seed) + actual = rs.tomaxint() + assert_equal(actual, desired[0, 0]) + + def test_random_integers_max_int(self): + # Tests whether random_integers can generate the + # maximum allowed Python int that can be converted + # into a C long. Previous implementations of this + # method have thrown an OverflowError when attempting + # to generate this integer. + with suppress_warnings() as sup: + w = sup.record(DeprecationWarning) + actual = random.random_integers(np.iinfo('l').max, + np.iinfo('l').max) + assert_(len(w) == 1) + + desired = np.iinfo('l').max + assert_equal(actual, desired) + with suppress_warnings() as sup: + w = sup.record(DeprecationWarning) + typer = np.dtype('l').type + actual = random.random_integers(typer(np.iinfo('l').max), + typer(np.iinfo('l').max)) + assert_(len(w) == 1) + assert_equal(actual, desired) + + def test_random_integers_deprecated(self): + with warnings.catch_warnings(): + warnings.simplefilter("error", DeprecationWarning) + + # DeprecationWarning raised with high == None + assert_raises(DeprecationWarning, + random.random_integers, + np.iinfo('l').max) + + # DeprecationWarning raised with high != None + assert_raises(DeprecationWarning, + random.random_integers, + np.iinfo('l').max, np.iinfo('l').max) + + def test_random_sample(self): + random.seed(self.seed) + actual = random.random_sample((3, 2)) + desired = np.array([[0.61879477158567997, 0.59162362775974664], + [0.88868358904449662, 0.89165480011560816], + [0.4575674820298663, 0.7781880808593471]]) + assert_array_almost_equal(actual, desired, decimal=15) + + random.seed(self.seed) + actual = random.random_sample() + assert_array_almost_equal(actual, desired[0, 0], decimal=15) + + def test_choice_uniform_replace(self): + random.seed(self.seed) + actual = random.choice(4, 4) + desired = np.array([2, 3, 2, 3]) + assert_array_equal(actual, desired) + + def test_choice_nonuniform_replace(self): + random.seed(self.seed) + actual = random.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1]) + desired = np.array([1, 1, 2, 2]) + assert_array_equal(actual, desired) + + def test_choice_uniform_noreplace(self): + random.seed(self.seed) + actual = random.choice(4, 3, replace=False) + desired = np.array([0, 1, 3]) + assert_array_equal(actual, desired) + + def test_choice_nonuniform_noreplace(self): + random.seed(self.seed) + actual = random.choice(4, 3, replace=False, p=[0.1, 0.3, 0.5, 0.1]) + desired = np.array([2, 3, 1]) + assert_array_equal(actual, desired) + + def test_choice_noninteger(self): + random.seed(self.seed) + actual = random.choice(['a', 'b', 'c', 'd'], 4) + desired = np.array(['c', 'd', 'c', 'd']) + assert_array_equal(actual, desired) + + def test_choice_exceptions(self): + sample = random.choice + assert_raises(ValueError, sample, -1, 3) + assert_raises(ValueError, sample, 3., 3) + assert_raises(ValueError, sample, [[1, 2], [3, 4]], 3) + assert_raises(ValueError, sample, [], 3) + assert_raises(ValueError, sample, [1, 2, 3, 4], 3, + p=[[0.25, 0.25], [0.25, 0.25]]) + assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4, 0.2]) + assert_raises(ValueError, sample, [1, 2], 3, p=[1.1, -0.1]) + assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4]) + assert_raises(ValueError, sample, [1, 2, 3], 4, replace=False) + # gh-13087 + assert_raises(ValueError, sample, [1, 2, 3], -2, replace=False) + assert_raises(ValueError, sample, [1, 2, 3], (-1,), replace=False) + assert_raises(ValueError, sample, [1, 2, 3], (-1, 1), replace=False) + assert_raises(ValueError, sample, [1, 2, 3], 2, + replace=False, p=[1, 0, 0]) + + def test_choice_return_shape(self): + p = [0.1, 0.9] + # Check scalar + assert_(np.isscalar(random.choice(2, replace=True))) + assert_(np.isscalar(random.choice(2, replace=False))) + assert_(np.isscalar(random.choice(2, replace=True, p=p))) + assert_(np.isscalar(random.choice(2, replace=False, p=p))) + assert_(np.isscalar(random.choice([1, 2], replace=True))) + assert_(random.choice([None], replace=True) is None) + a = np.array([1, 2]) + arr = np.empty(1, dtype=object) + arr[0] = a + assert_(random.choice(arr, replace=True) is a) + + # Check 0-d array + s = () + assert_(not np.isscalar(random.choice(2, s, replace=True))) + assert_(not np.isscalar(random.choice(2, s, replace=False))) + assert_(not np.isscalar(random.choice(2, s, replace=True, p=p))) + assert_(not np.isscalar(random.choice(2, s, replace=False, p=p))) + assert_(not np.isscalar(random.choice([1, 2], s, replace=True))) + assert_(random.choice([None], s, replace=True).ndim == 0) + a = np.array([1, 2]) + arr = np.empty(1, dtype=object) + arr[0] = a + assert_(random.choice(arr, s, replace=True).item() is a) + + # Check multi dimensional array + s = (2, 3) + p = [0.1, 0.1, 0.1, 0.1, 0.4, 0.2] + assert_equal(random.choice(6, s, replace=True).shape, s) + assert_equal(random.choice(6, s, replace=False).shape, s) + assert_equal(random.choice(6, s, replace=True, p=p).shape, s) + assert_equal(random.choice(6, s, replace=False, p=p).shape, s) + assert_equal(random.choice(np.arange(6), s, replace=True).shape, s) + + # Check zero-size + assert_equal(random.randint(0, 0, size=(3, 0, 4)).shape, (3, 0, 4)) + assert_equal(random.randint(0, -10, size=0).shape, (0,)) + assert_equal(random.randint(10, 10, size=0).shape, (0,)) + assert_equal(random.choice(0, size=0).shape, (0,)) + assert_equal(random.choice([], size=(0,)).shape, (0,)) + assert_equal(random.choice(['a', 'b'], size=(3, 0, 4)).shape, + (3, 0, 4)) + assert_raises(ValueError, random.choice, [], 10) + + def test_choice_nan_probabilities(self): + a = np.array([42, 1, 2]) + p = [None, None, None] + assert_raises(ValueError, random.choice, a, p=p) + + def test_choice_p_non_contiguous(self): + p = np.ones(10) / 5 + p[1::2] = 3.0 + random.seed(self.seed) + non_contig = random.choice(5, 3, p=p[::2]) + random.seed(self.seed) + contig = random.choice(5, 3, p=np.ascontiguousarray(p[::2])) + assert_array_equal(non_contig, contig) + + def test_bytes(self): + random.seed(self.seed) + actual = random.bytes(10) + desired = b'\x82Ui\x9e\xff\x97+Wf\xa5' + assert_equal(actual, desired) + + def test_shuffle(self): + # Test lists, arrays (of various dtypes), and multidimensional versions + # of both, c-contiguous or not: + for conv in [lambda x: np.array([]), + lambda x: x, + lambda x: np.asarray(x).astype(np.int8), + lambda x: np.asarray(x).astype(np.float32), + lambda x: np.asarray(x).astype(np.complex64), + lambda x: np.asarray(x).astype(object), + lambda x: [(i, i) for i in x], + lambda x: np.asarray([[i, i] for i in x]), + lambda x: np.vstack([x, x]).T, + # gh-11442 + lambda x: (np.asarray([(i, i) for i in x], + [("a", int), ("b", int)]) + .view(np.recarray)), + # gh-4270 + lambda x: np.asarray([(i, i) for i in x], + [("a", object, (1,)), + ("b", np.int32, (1,))])]: + random.seed(self.seed) + alist = conv([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]) + random.shuffle(alist) + actual = alist + desired = conv([0, 1, 9, 6, 2, 4, 5, 8, 7, 3]) + assert_array_equal(actual, desired) + + def test_shuffle_masked(self): + # gh-3263 + a = np.ma.masked_values(np.reshape(range(20), (5, 4)) % 3 - 1, -1) + b = np.ma.masked_values(np.arange(20) % 3 - 1, -1) + a_orig = a.copy() + b_orig = b.copy() + for i in range(50): + random.shuffle(a) + assert_equal( + sorted(a.data[~a.mask]), sorted(a_orig.data[~a_orig.mask])) + random.shuffle(b) + assert_equal( + sorted(b.data[~b.mask]), sorted(b_orig.data[~b_orig.mask])) + + def test_shuffle_invalid_objects(self): + x = np.array(3) + assert_raises(TypeError, random.shuffle, x) + + def test_permutation(self): + random.seed(self.seed) + alist = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0] + actual = random.permutation(alist) + desired = [0, 1, 9, 6, 2, 4, 5, 8, 7, 3] + assert_array_equal(actual, desired) + + random.seed(self.seed) + arr_2d = np.atleast_2d([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]).T + actual = random.permutation(arr_2d) + assert_array_equal(actual, np.atleast_2d(desired).T) + + random.seed(self.seed) + bad_x_str = "abcd" + assert_raises(IndexError, random.permutation, bad_x_str) + + random.seed(self.seed) + bad_x_float = 1.2 + assert_raises(IndexError, random.permutation, bad_x_float) + + integer_val = 10 + desired = [9, 0, 8, 5, 1, 3, 4, 7, 6, 2] + + random.seed(self.seed) + actual = random.permutation(integer_val) + assert_array_equal(actual, desired) + + def test_beta(self): + random.seed(self.seed) + actual = random.beta(.1, .9, size=(3, 2)) + desired = np.array( + [[1.45341850513746058e-02, 5.31297615662868145e-04], + [1.85366619058432324e-06, 4.19214516800110563e-03], + [1.58405155108498093e-04, 1.26252891949397652e-04]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_binomial(self): + random.seed(self.seed) + actual = random.binomial(100.123, .456, size=(3, 2)) + desired = np.array([[37, 43], + [42, 48], + [46, 45]]) + assert_array_equal(actual, desired) + + random.seed(self.seed) + actual = random.binomial(100.123, .456) + desired = 37 + assert_array_equal(actual, desired) + + def test_chisquare(self): + random.seed(self.seed) + actual = random.chisquare(50, size=(3, 2)) + desired = np.array([[63.87858175501090585, 68.68407748911370447], + [65.77116116901505904, 47.09686762438974483], + [72.3828403199695174, 74.18408615260374006]]) + assert_array_almost_equal(actual, desired, decimal=13) + + def test_dirichlet(self): + random.seed(self.seed) + alpha = np.array([51.72840233779265162, 39.74494232180943953]) + actual = random.dirichlet(alpha, size=(3, 2)) + desired = np.array([[[0.54539444573611562, 0.45460555426388438], + [0.62345816822039413, 0.37654183177960598]], + [[0.55206000085785778, 0.44793999914214233], + [0.58964023305154301, 0.41035976694845688]], + [[0.59266909280647828, 0.40733090719352177], + [0.56974431743975207, 0.43025568256024799]]]) + assert_array_almost_equal(actual, desired, decimal=15) + bad_alpha = np.array([5.4e-01, -1.0e-16]) + assert_raises(ValueError, random.dirichlet, bad_alpha) + + random.seed(self.seed) + alpha = np.array([51.72840233779265162, 39.74494232180943953]) + actual = random.dirichlet(alpha) + assert_array_almost_equal(actual, desired[0, 0], decimal=15) + + def test_dirichlet_size(self): + # gh-3173 + p = np.array([51.72840233779265162, 39.74494232180943953]) + assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2)) + assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2)) + assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2)) + assert_equal(random.dirichlet(p, [2, 2]).shape, (2, 2, 2)) + assert_equal(random.dirichlet(p, (2, 2)).shape, (2, 2, 2)) + assert_equal(random.dirichlet(p, np.array((2, 2))).shape, (2, 2, 2)) + + assert_raises(TypeError, random.dirichlet, p, float(1)) + + def test_dirichlet_bad_alpha(self): + # gh-2089 + alpha = np.array([5.4e-01, -1.0e-16]) + assert_raises(ValueError, random.dirichlet, alpha) + + def test_dirichlet_alpha_non_contiguous(self): + a = np.array([51.72840233779265162, -1.0, 39.74494232180943953]) + alpha = a[::2] + random.seed(self.seed) + non_contig = random.dirichlet(alpha, size=(3, 2)) + random.seed(self.seed) + contig = random.dirichlet(np.ascontiguousarray(alpha), + size=(3, 2)) + assert_array_almost_equal(non_contig, contig) + + def test_exponential(self): + random.seed(self.seed) + actual = random.exponential(1.1234, size=(3, 2)) + desired = np.array([[1.08342649775011624, 1.00607889924557314], + [2.46628830085216721, 2.49668106809923884], + [0.68717433461363442, 1.69175666993575979]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_exponential_0(self): + assert_equal(random.exponential(scale=0), 0) + assert_raises(ValueError, random.exponential, scale=-0.) + + def test_f(self): + random.seed(self.seed) + actual = random.f(12, 77, size=(3, 2)) + desired = np.array([[1.21975394418575878, 1.75135759791559775], + [1.44803115017146489, 1.22108959480396262], + [1.02176975757740629, 1.34431827623300415]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_gamma(self): + random.seed(self.seed) + actual = random.gamma(5, 3, size=(3, 2)) + desired = np.array([[24.60509188649287182, 28.54993563207210627], + [26.13476110204064184, 12.56988482927716078], + [31.71863275789960568, 33.30143302795922011]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_gamma_0(self): + assert_equal(random.gamma(shape=0, scale=0), 0) + assert_raises(ValueError, random.gamma, shape=-0., scale=-0.) + + def test_geometric(self): + random.seed(self.seed) + actual = random.geometric(.123456789, size=(3, 2)) + desired = np.array([[8, 7], + [17, 17], + [5, 12]]) + assert_array_equal(actual, desired) + + def test_geometric_exceptions(self): + assert_raises(ValueError, random.geometric, 1.1) + assert_raises(ValueError, random.geometric, [1.1] * 10) + assert_raises(ValueError, random.geometric, -0.1) + assert_raises(ValueError, random.geometric, [-0.1] * 10) + with suppress_warnings() as sup: + sup.record(RuntimeWarning) + assert_raises(ValueError, random.geometric, np.nan) + assert_raises(ValueError, random.geometric, [np.nan] * 10) + + def test_gumbel(self): + random.seed(self.seed) + actual = random.gumbel(loc=.123456789, scale=2.0, size=(3, 2)) + desired = np.array([[0.19591898743416816, 0.34405539668096674], + [-1.4492522252274278, -1.47374816298446865], + [1.10651090478803416, -0.69535848626236174]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_gumbel_0(self): + assert_equal(random.gumbel(scale=0), 0) + assert_raises(ValueError, random.gumbel, scale=-0.) + + def test_hypergeometric(self): + random.seed(self.seed) + actual = random.hypergeometric(10.1, 5.5, 14, size=(3, 2)) + desired = np.array([[10, 10], + [10, 10], + [9, 9]]) + assert_array_equal(actual, desired) + + # Test nbad = 0 + actual = random.hypergeometric(5, 0, 3, size=4) + desired = np.array([3, 3, 3, 3]) + assert_array_equal(actual, desired) + + actual = random.hypergeometric(15, 0, 12, size=4) + desired = np.array([12, 12, 12, 12]) + assert_array_equal(actual, desired) + + # Test ngood = 0 + actual = random.hypergeometric(0, 5, 3, size=4) + desired = np.array([0, 0, 0, 0]) + assert_array_equal(actual, desired) + + actual = random.hypergeometric(0, 15, 12, size=4) + desired = np.array([0, 0, 0, 0]) + assert_array_equal(actual, desired) + + def test_laplace(self): + random.seed(self.seed) + actual = random.laplace(loc=.123456789, scale=2.0, size=(3, 2)) + desired = np.array([[0.66599721112760157, 0.52829452552221945], + [3.12791959514407125, 3.18202813572992005], + [-0.05391065675859356, 1.74901336242837324]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_laplace_0(self): + assert_equal(random.laplace(scale=0), 0) + assert_raises(ValueError, random.laplace, scale=-0.) + + def test_logistic(self): + random.seed(self.seed) + actual = random.logistic(loc=.123456789, scale=2.0, size=(3, 2)) + desired = np.array([[1.09232835305011444, 0.8648196662399954], + [4.27818590694950185, 4.33897006346929714], + [-0.21682183359214885, 2.63373365386060332]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_lognormal(self): + random.seed(self.seed) + actual = random.lognormal(mean=.123456789, sigma=2.0, size=(3, 2)) + desired = np.array([[16.50698631688883822, 36.54846706092654784], + [22.67886599981281748, 0.71617561058995771], + [65.72798501792723869, 86.84341601437161273]]) + assert_array_almost_equal(actual, desired, decimal=13) + + def test_lognormal_0(self): + assert_equal(random.lognormal(sigma=0), 1) + assert_raises(ValueError, random.lognormal, sigma=-0.) + + def test_logseries(self): + random.seed(self.seed) + actual = random.logseries(p=.923456789, size=(3, 2)) + desired = np.array([[2, 2], + [6, 17], + [3, 6]]) + assert_array_equal(actual, desired) + + def test_logseries_zero(self): + assert random.logseries(0) == 1 + + @pytest.mark.parametrize("value", [np.nextafter(0., -1), 1., np.nan, 5.]) + def test_logseries_exceptions(self, value): + with np.errstate(invalid="ignore"): + with pytest.raises(ValueError): + random.logseries(value) + with pytest.raises(ValueError): + # contiguous path: + random.logseries(np.array([value] * 10)) + with pytest.raises(ValueError): + # non-contiguous path: + random.logseries(np.array([value] * 10)[::2]) + + def test_multinomial(self): + random.seed(self.seed) + actual = random.multinomial(20, [1 / 6.] * 6, size=(3, 2)) + desired = np.array([[[4, 3, 5, 4, 2, 2], + [5, 2, 8, 2, 2, 1]], + [[3, 4, 3, 6, 0, 4], + [2, 1, 4, 3, 6, 4]], + [[4, 4, 2, 5, 2, 3], + [4, 3, 4, 2, 3, 4]]]) + assert_array_equal(actual, desired) + + def test_multivariate_normal(self): + random.seed(self.seed) + mean = (.123456789, 10) + cov = [[1, 0], [0, 1]] + size = (3, 2) + actual = random.multivariate_normal(mean, cov, size) + desired = np.array([[[1.463620246718631, 11.73759122771936], + [1.622445133300628, 9.771356667546383]], + [[2.154490787682787, 12.170324946056553], + [1.719909438201865, 9.230548443648306]], + [[0.689515026297799, 9.880729819607714], + [-0.023054015651998, 9.201096623542879]]]) + + assert_array_almost_equal(actual, desired, decimal=15) + + # Check for default size, was raising deprecation warning + actual = random.multivariate_normal(mean, cov) + desired = np.array([0.895289569463708, 9.17180864067987]) + assert_array_almost_equal(actual, desired, decimal=15) + + # Check that non positive-semidefinite covariance warns with + # RuntimeWarning + mean = [0, 0] + cov = [[1, 2], [2, 1]] + assert_warns(RuntimeWarning, random.multivariate_normal, mean, cov) + + # and that it doesn't warn with RuntimeWarning check_valid='ignore' + assert_no_warnings(random.multivariate_normal, mean, cov, + check_valid='ignore') + + # and that it raises with RuntimeWarning check_valid='raises' + assert_raises(ValueError, random.multivariate_normal, mean, cov, + check_valid='raise') + + cov = np.array([[1, 0.1], [0.1, 1]], dtype=np.float32) + with suppress_warnings() as sup: + random.multivariate_normal(mean, cov) + w = sup.record(RuntimeWarning) + assert len(w) == 0 + + mu = np.zeros(2) + cov = np.eye(2) + assert_raises(ValueError, random.multivariate_normal, mean, cov, + check_valid='other') + assert_raises(ValueError, random.multivariate_normal, + np.zeros((2, 1, 1)), cov) + assert_raises(ValueError, random.multivariate_normal, + mu, np.empty((3, 2))) + assert_raises(ValueError, random.multivariate_normal, + mu, np.eye(3)) + + def test_negative_binomial(self): + random.seed(self.seed) + actual = random.negative_binomial(n=100, p=.12345, size=(3, 2)) + desired = np.array([[848, 841], + [892, 611], + [779, 647]]) + assert_array_equal(actual, desired) + + def test_negative_binomial_exceptions(self): + with suppress_warnings() as sup: + sup.record(RuntimeWarning) + assert_raises(ValueError, random.negative_binomial, 100, np.nan) + assert_raises(ValueError, random.negative_binomial, 100, + [np.nan] * 10) + + def test_noncentral_chisquare(self): + random.seed(self.seed) + actual = random.noncentral_chisquare(df=5, nonc=5, size=(3, 2)) + desired = np.array([[23.91905354498517511, 13.35324692733826346], + [31.22452661329736401, 16.60047399466177254], + [5.03461598262724586, 17.94973089023519464]]) + assert_array_almost_equal(actual, desired, decimal=14) + + actual = random.noncentral_chisquare(df=.5, nonc=.2, size=(3, 2)) + desired = np.array([[1.47145377828516666, 0.15052899268012659], + [0.00943803056963588, 1.02647251615666169], + [0.332334982684171, 0.15451287602753125]]) + assert_array_almost_equal(actual, desired, decimal=14) + + random.seed(self.seed) + actual = random.noncentral_chisquare(df=5, nonc=0, size=(3, 2)) + desired = np.array([[9.597154162763948, 11.725484450296079], + [10.413711048138335, 3.694475922923986], + [13.484222138963087, 14.377255424602957]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_noncentral_f(self): + random.seed(self.seed) + actual = random.noncentral_f(dfnum=5, dfden=2, nonc=1, + size=(3, 2)) + desired = np.array([[1.40598099674926669, 0.34207973179285761], + [3.57715069265772545, 7.92632662577829805], + [0.43741599463544162, 1.1774208752428319]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_noncentral_f_nan(self): + random.seed(self.seed) + actual = random.noncentral_f(dfnum=5, dfden=2, nonc=np.nan) + assert np.isnan(actual) + + def test_normal(self): + random.seed(self.seed) + actual = random.normal(loc=.123456789, scale=2.0, size=(3, 2)) + desired = np.array([[2.80378370443726244, 3.59863924443872163], + [3.121433477601256, -0.33382987590723379], + [4.18552478636557357, 4.46410668111310471]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_normal_0(self): + assert_equal(random.normal(scale=0), 0) + assert_raises(ValueError, random.normal, scale=-0.) + + def test_pareto(self): + random.seed(self.seed) + actual = random.pareto(a=.123456789, size=(3, 2)) + desired = np.array( + [[2.46852460439034849e+03, 1.41286880810518346e+03], + [5.28287797029485181e+07, 6.57720981047328785e+07], + [1.40840323350391515e+02, 1.98390255135251704e+05]]) + # For some reason on 32-bit x86 Ubuntu 12.10 the [1, 0] entry in this + # matrix differs by 24 nulps. Discussion: + # https://mail.python.org/pipermail/numpy-discussion/2012-September/063801.html + # Consensus is that this is probably some gcc quirk that affects + # rounding but not in any important way, so we just use a looser + # tolerance on this test: + np.testing.assert_array_almost_equal_nulp(actual, desired, nulp=30) + + def test_poisson(self): + random.seed(self.seed) + actual = random.poisson(lam=.123456789, size=(3, 2)) + desired = np.array([[0, 0], + [1, 0], + [0, 0]]) + assert_array_equal(actual, desired) + + def test_poisson_exceptions(self): + lambig = np.iinfo('l').max + lamneg = -1 + assert_raises(ValueError, random.poisson, lamneg) + assert_raises(ValueError, random.poisson, [lamneg] * 10) + assert_raises(ValueError, random.poisson, lambig) + assert_raises(ValueError, random.poisson, [lambig] * 10) + with suppress_warnings() as sup: + sup.record(RuntimeWarning) + assert_raises(ValueError, random.poisson, np.nan) + assert_raises(ValueError, random.poisson, [np.nan] * 10) + + def test_power(self): + random.seed(self.seed) + actual = random.power(a=.123456789, size=(3, 2)) + desired = np.array([[0.02048932883240791, 0.01424192241128213], + [0.38446073748535298, 0.39499689943484395], + [0.00177699707563439, 0.13115505880863756]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_rayleigh(self): + random.seed(self.seed) + actual = random.rayleigh(scale=10, size=(3, 2)) + desired = np.array([[13.8882496494248393, 13.383318339044731], + [20.95413364294492098, 21.08285015800712614], + [11.06066537006854311, 17.35468505778271009]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_rayleigh_0(self): + assert_equal(random.rayleigh(scale=0), 0) + assert_raises(ValueError, random.rayleigh, scale=-0.) + + def test_standard_cauchy(self): + random.seed(self.seed) + actual = random.standard_cauchy(size=(3, 2)) + desired = np.array([[0.77127660196445336, -6.55601161955910605], + [0.93582023391158309, -2.07479293013759447], + [-4.74601644297011926, 0.18338989290760804]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_standard_exponential(self): + random.seed(self.seed) + actual = random.standard_exponential(size=(3, 2)) + desired = np.array([[0.96441739162374596, 0.89556604882105506], + [2.1953785836319808, 2.22243285392490542], + [0.6116915921431676, 1.50592546727413201]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_standard_gamma(self): + random.seed(self.seed) + actual = random.standard_gamma(shape=3, size=(3, 2)) + desired = np.array([[5.50841531318455058, 6.62953470301903103], + [5.93988484943779227, 2.31044849402133989], + [7.54838614231317084, 8.012756093271868]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_standard_gamma_0(self): + assert_equal(random.standard_gamma(shape=0), 0) + assert_raises(ValueError, random.standard_gamma, shape=-0.) + + def test_standard_normal(self): + random.seed(self.seed) + actual = random.standard_normal(size=(3, 2)) + desired = np.array([[1.34016345771863121, 1.73759122771936081], + [1.498988344300628, -0.2286433324536169], + [2.031033998682787, 2.17032494605655257]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_randn_singleton(self): + random.seed(self.seed) + actual = random.randn() + desired = np.array(1.34016345771863121) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_standard_t(self): + random.seed(self.seed) + actual = random.standard_t(df=10, size=(3, 2)) + desired = np.array([[0.97140611862659965, -0.08830486548450577], + [1.36311143689505321, -0.55317463909867071], + [-0.18473749069684214, 0.61181537341755321]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_triangular(self): + random.seed(self.seed) + actual = random.triangular(left=5.12, mode=10.23, right=20.34, + size=(3, 2)) + desired = np.array([[12.68117178949215784, 12.4129206149193152], + [16.20131377335158263, 16.25692138747600524], + [11.20400690911820263, 14.4978144835829923]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_uniform(self): + random.seed(self.seed) + actual = random.uniform(low=1.23, high=10.54, size=(3, 2)) + desired = np.array([[6.99097932346268003, 6.73801597444323974], + [9.50364421400426274, 9.53130618907631089], + [5.48995325769805476, 8.47493103280052118]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_uniform_range_bounds(self): + fmin = np.finfo('float').min + fmax = np.finfo('float').max + + func = random.uniform + assert_raises(OverflowError, func, -np.inf, 0) + assert_raises(OverflowError, func, 0, np.inf) + assert_raises(OverflowError, func, fmin, fmax) + assert_raises(OverflowError, func, [-np.inf], [0]) + assert_raises(OverflowError, func, [0], [np.inf]) + + # (fmax / 1e17) - fmin is within range, so this should not throw + # account for i386 extended precision DBL_MAX / 1e17 + DBL_MAX > + # DBL_MAX by increasing fmin a bit + random.uniform(low=np.nextafter(fmin, 1), high=fmax / 1e17) + + def test_scalar_exception_propagation(self): + # Tests that exceptions are correctly propagated in distributions + # when called with objects that throw exceptions when converted to + # scalars. + # + # Regression test for gh: 8865 + + class ThrowingFloat(np.ndarray): + def __float__(self): + raise TypeError + + throwing_float = np.array(1.0).view(ThrowingFloat) + assert_raises(TypeError, random.uniform, throwing_float, + throwing_float) + + class ThrowingInteger(np.ndarray): + def __int__(self): + raise TypeError + + throwing_int = np.array(1).view(ThrowingInteger) + assert_raises(TypeError, random.hypergeometric, throwing_int, 1, 1) + + def test_vonmises(self): + random.seed(self.seed) + actual = random.vonmises(mu=1.23, kappa=1.54, size=(3, 2)) + desired = np.array([[2.28567572673902042, 2.89163838442285037], + [0.38198375564286025, 2.57638023113890746], + [1.19153771588353052, 1.83509849681825354]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_vonmises_small(self): + # check infinite loop, gh-4720 + random.seed(self.seed) + r = random.vonmises(mu=0., kappa=1.1e-8, size=10**6) + assert_(np.isfinite(r).all()) + + def test_vonmises_large(self): + # guard against changes in RandomState when Generator is fixed + random.seed(self.seed) + actual = random.vonmises(mu=0., kappa=1e7, size=3) + desired = np.array([4.634253748521111e-04, + 3.558873596114509e-04, + -2.337119622577433e-04]) + assert_array_almost_equal(actual, desired, decimal=8) + + def test_vonmises_nan(self): + random.seed(self.seed) + r = random.vonmises(mu=0., kappa=np.nan) + assert_(np.isnan(r)) + + def test_wald(self): + random.seed(self.seed) + actual = random.wald(mean=1.23, scale=1.54, size=(3, 2)) + desired = np.array([[3.82935265715889983, 5.13125249184285526], + [0.35045403618358717, 1.50832396872003538], + [0.24124319895843183, 0.22031101461955038]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_weibull(self): + random.seed(self.seed) + actual = random.weibull(a=1.23, size=(3, 2)) + desired = np.array([[0.97097342648766727, 0.91422896443565516], + [1.89517770034962929, 1.91414357960479564], + [0.67057783752390987, 1.39494046635066793]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_weibull_0(self): + random.seed(self.seed) + assert_equal(random.weibull(a=0, size=12), np.zeros(12)) + assert_raises(ValueError, random.weibull, a=-0.) + + def test_zipf(self): + random.seed(self.seed) + actual = random.zipf(a=1.23, size=(3, 2)) + desired = np.array([[66, 29], + [1, 1], + [3, 13]]) + assert_array_equal(actual, desired) + + +class TestBroadcast: + # tests that functions that broadcast behave + # correctly when presented with non-scalar arguments + def setup_method(self): + self.seed = 123456789 + + def set_seed(self): + random.seed(self.seed) + + def test_uniform(self): + low = [0] + high = [1] + uniform = random.uniform + desired = np.array([0.53283302478975902, + 0.53413660089041659, + 0.50955303552646702]) + + self.set_seed() + actual = uniform(low * 3, high) + assert_array_almost_equal(actual, desired, decimal=14) + + self.set_seed() + actual = uniform(low, high * 3) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_normal(self): + loc = [0] + scale = [1] + bad_scale = [-1] + normal = random.normal + desired = np.array([2.2129019979039612, + 2.1283977976520019, + 1.8417114045748335]) + + self.set_seed() + actual = normal(loc * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, normal, loc * 3, bad_scale) + + self.set_seed() + actual = normal(loc, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, normal, loc, bad_scale * 3) + + def test_beta(self): + a = [1] + b = [2] + bad_a = [-1] + bad_b = [-2] + beta = random.beta + desired = np.array([0.19843558305989056, + 0.075230336409423643, + 0.24976865978980844]) + + self.set_seed() + actual = beta(a * 3, b) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, beta, bad_a * 3, b) + assert_raises(ValueError, beta, a * 3, bad_b) + + self.set_seed() + actual = beta(a, b * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, beta, bad_a, b * 3) + assert_raises(ValueError, beta, a, bad_b * 3) + + def test_exponential(self): + scale = [1] + bad_scale = [-1] + exponential = random.exponential + desired = np.array([0.76106853658845242, + 0.76386282278691653, + 0.71243813125891797]) + + self.set_seed() + actual = exponential(scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, exponential, bad_scale * 3) + + def test_standard_gamma(self): + shape = [1] + bad_shape = [-1] + std_gamma = random.standard_gamma + desired = np.array([0.76106853658845242, + 0.76386282278691653, + 0.71243813125891797]) + + self.set_seed() + actual = std_gamma(shape * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, std_gamma, bad_shape * 3) + + def test_gamma(self): + shape = [1] + scale = [2] + bad_shape = [-1] + bad_scale = [-2] + gamma = random.gamma + desired = np.array([1.5221370731769048, + 1.5277256455738331, + 1.4248762625178359]) + + self.set_seed() + actual = gamma(shape * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, gamma, bad_shape * 3, scale) + assert_raises(ValueError, gamma, shape * 3, bad_scale) + + self.set_seed() + actual = gamma(shape, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, gamma, bad_shape, scale * 3) + assert_raises(ValueError, gamma, shape, bad_scale * 3) + + def test_f(self): + dfnum = [1] + dfden = [2] + bad_dfnum = [-1] + bad_dfden = [-2] + f = random.f + desired = np.array([0.80038951638264799, + 0.86768719635363512, + 2.7251095168386801]) + + self.set_seed() + actual = f(dfnum * 3, dfden) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, f, bad_dfnum * 3, dfden) + assert_raises(ValueError, f, dfnum * 3, bad_dfden) + + self.set_seed() + actual = f(dfnum, dfden * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, f, bad_dfnum, dfden * 3) + assert_raises(ValueError, f, dfnum, bad_dfden * 3) + + def test_noncentral_f(self): + dfnum = [2] + dfden = [3] + nonc = [4] + bad_dfnum = [0] + bad_dfden = [-1] + bad_nonc = [-2] + nonc_f = random.noncentral_f + desired = np.array([9.1393943263705211, + 13.025456344595602, + 8.8018098359100545]) + + self.set_seed() + actual = nonc_f(dfnum * 3, dfden, nonc) + assert_array_almost_equal(actual, desired, decimal=14) + assert np.all(np.isnan(nonc_f(dfnum, dfden, [np.nan] * 3))) + + assert_raises(ValueError, nonc_f, bad_dfnum * 3, dfden, nonc) + assert_raises(ValueError, nonc_f, dfnum * 3, bad_dfden, nonc) + assert_raises(ValueError, nonc_f, dfnum * 3, dfden, bad_nonc) + + self.set_seed() + actual = nonc_f(dfnum, dfden * 3, nonc) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, nonc_f, bad_dfnum, dfden * 3, nonc) + assert_raises(ValueError, nonc_f, dfnum, bad_dfden * 3, nonc) + assert_raises(ValueError, nonc_f, dfnum, dfden * 3, bad_nonc) + + self.set_seed() + actual = nonc_f(dfnum, dfden, nonc * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, nonc_f, bad_dfnum, dfden, nonc * 3) + assert_raises(ValueError, nonc_f, dfnum, bad_dfden, nonc * 3) + assert_raises(ValueError, nonc_f, dfnum, dfden, bad_nonc * 3) + + def test_noncentral_f_small_df(self): + self.set_seed() + desired = np.array([6.869638627492048, 0.785880199263955]) + actual = random.noncentral_f(0.9, 0.9, 2, size=2) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_chisquare(self): + df = [1] + bad_df = [-1] + chisquare = random.chisquare + desired = np.array([0.57022801133088286, + 0.51947702108840776, + 0.1320969254923558]) + + self.set_seed() + actual = chisquare(df * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, chisquare, bad_df * 3) + + def test_noncentral_chisquare(self): + df = [1] + nonc = [2] + bad_df = [-1] + bad_nonc = [-2] + nonc_chi = random.noncentral_chisquare + desired = np.array([9.0015599467913763, + 4.5804135049718742, + 6.0872302432834564]) + + self.set_seed() + actual = nonc_chi(df * 3, nonc) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, nonc_chi, bad_df * 3, nonc) + assert_raises(ValueError, nonc_chi, df * 3, bad_nonc) + + self.set_seed() + actual = nonc_chi(df, nonc * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, nonc_chi, bad_df, nonc * 3) + assert_raises(ValueError, nonc_chi, df, bad_nonc * 3) + + def test_standard_t(self): + df = [1] + bad_df = [-1] + t = random.standard_t + desired = np.array([3.0702872575217643, + 5.8560725167361607, + 1.0274791436474273]) + + self.set_seed() + actual = t(df * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, t, bad_df * 3) + assert_raises(ValueError, random.standard_t, bad_df * 3) + + def test_vonmises(self): + mu = [2] + kappa = [1] + bad_kappa = [-1] + vonmises = random.vonmises + desired = np.array([2.9883443664201312, + -2.7064099483995943, + -1.8672476700665914]) + + self.set_seed() + actual = vonmises(mu * 3, kappa) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, vonmises, mu * 3, bad_kappa) + + self.set_seed() + actual = vonmises(mu, kappa * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, vonmises, mu, bad_kappa * 3) + + def test_pareto(self): + a = [1] + bad_a = [-1] + pareto = random.pareto + desired = np.array([1.1405622680198362, + 1.1465519762044529, + 1.0389564467453547]) + + self.set_seed() + actual = pareto(a * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, pareto, bad_a * 3) + assert_raises(ValueError, random.pareto, bad_a * 3) + + def test_weibull(self): + a = [1] + bad_a = [-1] + weibull = random.weibull + desired = np.array([0.76106853658845242, + 0.76386282278691653, + 0.71243813125891797]) + + self.set_seed() + actual = weibull(a * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, weibull, bad_a * 3) + assert_raises(ValueError, random.weibull, bad_a * 3) + + def test_power(self): + a = [1] + bad_a = [-1] + power = random.power + desired = np.array([0.53283302478975902, + 0.53413660089041659, + 0.50955303552646702]) + + self.set_seed() + actual = power(a * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, power, bad_a * 3) + assert_raises(ValueError, random.power, bad_a * 3) + + def test_laplace(self): + loc = [0] + scale = [1] + bad_scale = [-1] + laplace = random.laplace + desired = np.array([0.067921356028507157, + 0.070715642226971326, + 0.019290950698972624]) + + self.set_seed() + actual = laplace(loc * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, laplace, loc * 3, bad_scale) + + self.set_seed() + actual = laplace(loc, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, laplace, loc, bad_scale * 3) + + def test_gumbel(self): + loc = [0] + scale = [1] + bad_scale = [-1] + gumbel = random.gumbel + desired = np.array([0.2730318639556768, + 0.26936705726291116, + 0.33906220393037939]) + + self.set_seed() + actual = gumbel(loc * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, gumbel, loc * 3, bad_scale) + + self.set_seed() + actual = gumbel(loc, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, gumbel, loc, bad_scale * 3) + + def test_logistic(self): + loc = [0] + scale = [1] + bad_scale = [-1] + logistic = random.logistic + desired = np.array([0.13152135837586171, + 0.13675915696285773, + 0.038216792802833396]) + + self.set_seed() + actual = logistic(loc * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, logistic, loc * 3, bad_scale) + + self.set_seed() + actual = logistic(loc, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, logistic, loc, bad_scale * 3) + assert_equal(random.logistic(1.0, 0.0), 1.0) + + def test_lognormal(self): + mean = [0] + sigma = [1] + bad_sigma = [-1] + lognormal = random.lognormal + desired = np.array([9.1422086044848427, + 8.4013952870126261, + 6.3073234116578671]) + + self.set_seed() + actual = lognormal(mean * 3, sigma) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, lognormal, mean * 3, bad_sigma) + assert_raises(ValueError, random.lognormal, mean * 3, bad_sigma) + + self.set_seed() + actual = lognormal(mean, sigma * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, lognormal, mean, bad_sigma * 3) + assert_raises(ValueError, random.lognormal, mean, bad_sigma * 3) + + def test_rayleigh(self): + scale = [1] + bad_scale = [-1] + rayleigh = random.rayleigh + desired = np.array([1.2337491937897689, + 1.2360119924878694, + 1.1936818095781789]) + + self.set_seed() + actual = rayleigh(scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, rayleigh, bad_scale * 3) + + def test_wald(self): + mean = [0.5] + scale = [1] + bad_mean = [0] + bad_scale = [-2] + wald = random.wald + desired = np.array([0.11873681120271318, + 0.12450084820795027, + 0.9096122728408238]) + + self.set_seed() + actual = wald(mean * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, wald, bad_mean * 3, scale) + assert_raises(ValueError, wald, mean * 3, bad_scale) + assert_raises(ValueError, random.wald, bad_mean * 3, scale) + assert_raises(ValueError, random.wald, mean * 3, bad_scale) + + self.set_seed() + actual = wald(mean, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, wald, bad_mean, scale * 3) + assert_raises(ValueError, wald, mean, bad_scale * 3) + assert_raises(ValueError, wald, 0.0, 1) + assert_raises(ValueError, wald, 0.5, 0.0) + + def test_triangular(self): + left = [1] + right = [3] + mode = [2] + bad_left_one = [3] + bad_mode_one = [4] + bad_left_two, bad_mode_two = right * 2 + triangular = random.triangular + desired = np.array([2.03339048710429, + 2.0347400359389356, + 2.0095991069536208]) + + self.set_seed() + actual = triangular(left * 3, mode, right) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, triangular, bad_left_one * 3, mode, right) + assert_raises(ValueError, triangular, left * 3, bad_mode_one, right) + assert_raises(ValueError, triangular, bad_left_two * 3, bad_mode_two, + right) + + self.set_seed() + actual = triangular(left, mode * 3, right) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, triangular, bad_left_one, mode * 3, right) + assert_raises(ValueError, triangular, left, bad_mode_one * 3, right) + assert_raises(ValueError, triangular, bad_left_two, bad_mode_two * 3, + right) + + self.set_seed() + actual = triangular(left, mode, right * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, triangular, bad_left_one, mode, right * 3) + assert_raises(ValueError, triangular, left, bad_mode_one, right * 3) + assert_raises(ValueError, triangular, bad_left_two, bad_mode_two, + right * 3) + + assert_raises(ValueError, triangular, 10., 0., 20.) + assert_raises(ValueError, triangular, 10., 25., 20.) + assert_raises(ValueError, triangular, 10., 10., 10.) + + def test_binomial(self): + n = [1] + p = [0.5] + bad_n = [-1] + bad_p_one = [-1] + bad_p_two = [1.5] + binom = random.binomial + desired = np.array([1, 1, 1]) + + self.set_seed() + actual = binom(n * 3, p) + assert_array_equal(actual, desired) + assert_raises(ValueError, binom, bad_n * 3, p) + assert_raises(ValueError, binom, n * 3, bad_p_one) + assert_raises(ValueError, binom, n * 3, bad_p_two) + + self.set_seed() + actual = binom(n, p * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, binom, bad_n, p * 3) + assert_raises(ValueError, binom, n, bad_p_one * 3) + assert_raises(ValueError, binom, n, bad_p_two * 3) + + def test_negative_binomial(self): + n = [1] + p = [0.5] + bad_n = [-1] + bad_p_one = [-1] + bad_p_two = [1.5] + neg_binom = random.negative_binomial + desired = np.array([1, 0, 1]) + + self.set_seed() + actual = neg_binom(n * 3, p) + assert_array_equal(actual, desired) + assert_raises(ValueError, neg_binom, bad_n * 3, p) + assert_raises(ValueError, neg_binom, n * 3, bad_p_one) + assert_raises(ValueError, neg_binom, n * 3, bad_p_two) + + self.set_seed() + actual = neg_binom(n, p * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, neg_binom, bad_n, p * 3) + assert_raises(ValueError, neg_binom, n, bad_p_one * 3) + assert_raises(ValueError, neg_binom, n, bad_p_two * 3) + + def test_poisson(self): + max_lam = random.RandomState()._poisson_lam_max + + lam = [1] + bad_lam_one = [-1] + bad_lam_two = [max_lam * 2] + poisson = random.poisson + desired = np.array([1, 1, 0]) + + self.set_seed() + actual = poisson(lam * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, poisson, bad_lam_one * 3) + assert_raises(ValueError, poisson, bad_lam_two * 3) + + def test_zipf(self): + a = [2] + bad_a = [0] + zipf = random.zipf + desired = np.array([2, 2, 1]) + + self.set_seed() + actual = zipf(a * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, zipf, bad_a * 3) + with np.errstate(invalid='ignore'): + assert_raises(ValueError, zipf, np.nan) + assert_raises(ValueError, zipf, [0, 0, np.nan]) + + def test_geometric(self): + p = [0.5] + bad_p_one = [-1] + bad_p_two = [1.5] + geom = random.geometric + desired = np.array([2, 2, 2]) + + self.set_seed() + actual = geom(p * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, geom, bad_p_one * 3) + assert_raises(ValueError, geom, bad_p_two * 3) + + def test_hypergeometric(self): + ngood = [1] + nbad = [2] + nsample = [2] + bad_ngood = [-1] + bad_nbad = [-2] + bad_nsample_one = [0] + bad_nsample_two = [4] + hypergeom = random.hypergeometric + desired = np.array([1, 1, 1]) + + self.set_seed() + actual = hypergeom(ngood * 3, nbad, nsample) + assert_array_equal(actual, desired) + assert_raises(ValueError, hypergeom, bad_ngood * 3, nbad, nsample) + assert_raises(ValueError, hypergeom, ngood * 3, bad_nbad, nsample) + assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_one) + assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_two) + + self.set_seed() + actual = hypergeom(ngood, nbad * 3, nsample) + assert_array_equal(actual, desired) + assert_raises(ValueError, hypergeom, bad_ngood, nbad * 3, nsample) + assert_raises(ValueError, hypergeom, ngood, bad_nbad * 3, nsample) + assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_one) + assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_two) + + self.set_seed() + actual = hypergeom(ngood, nbad, nsample * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, hypergeom, bad_ngood, nbad, nsample * 3) + assert_raises(ValueError, hypergeom, ngood, bad_nbad, nsample * 3) + assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_one * 3) + assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_two * 3) + + assert_raises(ValueError, hypergeom, -1, 10, 20) + assert_raises(ValueError, hypergeom, 10, -1, 20) + assert_raises(ValueError, hypergeom, 10, 10, 0) + assert_raises(ValueError, hypergeom, 10, 10, 25) + + def test_logseries(self): + p = [0.5] + bad_p_one = [2] + bad_p_two = [-1] + logseries = random.logseries + desired = np.array([1, 1, 1]) + + self.set_seed() + actual = logseries(p * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, logseries, bad_p_one * 3) + assert_raises(ValueError, logseries, bad_p_two * 3) + + +@pytest.mark.skipif(IS_WASM, reason="can't start thread") +class TestThread: + # make sure each state produces the same sequence even in threads + def setup_method(self): + self.seeds = range(4) + + def check_function(self, function, sz): + from threading import Thread + + out1 = np.empty((len(self.seeds),) + sz) + out2 = np.empty((len(self.seeds),) + sz) + + # threaded generation + t = [Thread(target=function, args=(random.RandomState(s), o)) + for s, o in zip(self.seeds, out1)] + [x.start() for x in t] + [x.join() for x in t] + + # the same serial + for s, o in zip(self.seeds, out2): + function(random.RandomState(s), o) + + # these platforms change x87 fpu precision mode in threads + if np.intp().dtype.itemsize == 4 and sys.platform == "win32": + assert_array_almost_equal(out1, out2) + else: + assert_array_equal(out1, out2) + + def test_normal(self): + def gen_random(state, out): + out[...] = state.normal(size=10000) + + self.check_function(gen_random, sz=(10000,)) + + def test_exp(self): + def gen_random(state, out): + out[...] = state.exponential(scale=np.ones((100, 1000))) + + self.check_function(gen_random, sz=(100, 1000)) + + def test_multinomial(self): + def gen_random(state, out): + out[...] = state.multinomial(10, [1 / 6.] * 6, size=10000) + + self.check_function(gen_random, sz=(10000, 6)) + + +# See Issue #4263 +class TestSingleEltArrayInput: + def setup_method(self): + self.argOne = np.array([2]) + self.argTwo = np.array([3]) + self.argThree = np.array([4]) + self.tgtShape = (1,) + + def test_one_arg_funcs(self): + funcs = (random.exponential, random.standard_gamma, + random.chisquare, random.standard_t, + random.pareto, random.weibull, + random.power, random.rayleigh, + random.poisson, random.zipf, + random.geometric, random.logseries) + + probfuncs = (random.geometric, random.logseries) + + for func in funcs: + if func in probfuncs: # p < 1.0 + out = func(np.array([0.5])) + + else: + out = func(self.argOne) + + assert_equal(out.shape, self.tgtShape) + + def test_two_arg_funcs(self): + funcs = (random.uniform, random.normal, + random.beta, random.gamma, + random.f, random.noncentral_chisquare, + random.vonmises, random.laplace, + random.gumbel, random.logistic, + random.lognormal, random.wald, + random.binomial, random.negative_binomial) + + probfuncs = (random.binomial, random.negative_binomial) + + for func in funcs: + if func in probfuncs: # p <= 1 + argTwo = np.array([0.5]) + + else: + argTwo = self.argTwo + + out = func(self.argOne, argTwo) + assert_equal(out.shape, self.tgtShape) + + out = func(self.argOne[0], argTwo) + assert_equal(out.shape, self.tgtShape) + + out = func(self.argOne, argTwo[0]) + assert_equal(out.shape, self.tgtShape) + + def test_three_arg_funcs(self): + funcs = [random.noncentral_f, random.triangular, + random.hypergeometric] + + for func in funcs: + out = func(self.argOne, self.argTwo, self.argThree) + assert_equal(out.shape, self.tgtShape) + + out = func(self.argOne[0], self.argTwo, self.argThree) + assert_equal(out.shape, self.tgtShape) + + out = func(self.argOne, self.argTwo[0], self.argThree) + assert_equal(out.shape, self.tgtShape) + + +# Ensure returned array dtype is correct for platform +def test_integer_dtype(int_func): + random.seed(123456789) + fname, args, sha256 = int_func + f = getattr(random, fname) + actual = f(*args, size=2) + assert_(actual.dtype == np.dtype('l')) + + +def test_integer_repeat(int_func): + random.seed(123456789) + fname, args, sha256 = int_func + f = getattr(random, fname) + val = f(*args, size=1000000) + if sys.byteorder != 'little': + val = val.byteswap() + res = hashlib.sha256(val.view(np.int8)).hexdigest() + assert_(res == sha256) + + +def test_broadcast_size_error(): + # GH-16833 + with pytest.raises(ValueError): + random.binomial(1, [0.3, 0.7], size=(2, 1)) + with pytest.raises(ValueError): + random.binomial([1, 2], 0.3, size=(2, 1)) + with pytest.raises(ValueError): + random.binomial([1, 2], [0.3, 0.7], size=(2, 1)) + + +def test_randomstate_ctor_old_style_pickle(): + rs = np.random.RandomState(MT19937(0)) + rs.standard_normal(1) + # Directly call reduce which is used in pickling + ctor, args, state_a = rs.__reduce__() + # Simulate unpickling an old pickle that only has the name + assert args[0].__class__.__name__ == "MT19937" + b = ctor(*("MT19937",)) + b.set_state(state_a) + state_b = b.get_state(legacy=False) + + assert_equal(state_a['bit_generator'], state_b['bit_generator']) + assert_array_equal(state_a['state']['key'], state_b['state']['key']) + assert_array_equal(state_a['state']['pos'], state_b['state']['pos']) + assert_equal(state_a['has_gauss'], state_b['has_gauss']) + assert_equal(state_a['gauss'], state_b['gauss']) + + +def test_hot_swap(restore_singleton_bitgen): + # GH 21808 + def_bg = np.random.default_rng(0) + bg = def_bg.bit_generator + np.random.set_bit_generator(bg) + assert isinstance(np.random.mtrand._rand._bit_generator, type(bg)) + + second_bg = np.random.get_bit_generator() + assert bg is second_bg + + +def test_seed_alt_bit_gen(restore_singleton_bitgen): + # GH 21808 + bg = PCG64(0) + np.random.set_bit_generator(bg) + state = np.random.get_state(legacy=False) + np.random.seed(1) + new_state = np.random.get_state(legacy=False) + print(state) + print(new_state) + assert state["bit_generator"] == "PCG64" + assert state["state"]["state"] != new_state["state"]["state"] + assert state["state"]["inc"] != new_state["state"]["inc"] + + +def test_state_error_alt_bit_gen(restore_singleton_bitgen): + # GH 21808 + state = np.random.get_state() + bg = PCG64(0) + np.random.set_bit_generator(bg) + with pytest.raises(ValueError, match="state must be for a PCG64"): + np.random.set_state(state) + + +def test_swap_worked(restore_singleton_bitgen): + # GH 21808 + np.random.seed(98765) + vals = np.random.randint(0, 2 ** 30, 10) + bg = PCG64(0) + state = bg.state + np.random.set_bit_generator(bg) + state_direct = np.random.get_state(legacy=False) + for field in state: + assert state[field] == state_direct[field] + np.random.seed(98765) + pcg_vals = np.random.randint(0, 2 ** 30, 10) + assert not np.all(vals == pcg_vals) + new_state = bg.state + assert new_state["state"]["state"] != state["state"]["state"] + assert new_state["state"]["inc"] == new_state["state"]["inc"] + + +def test_swapped_singleton_against_direct(restore_singleton_bitgen): + np.random.set_bit_generator(PCG64(98765)) + singleton_vals = np.random.randint(0, 2 ** 30, 10) + rg = np.random.RandomState(PCG64(98765)) + non_singleton_vals = rg.randint(0, 2 ** 30, 10) + assert_equal(non_singleton_vals, singleton_vals) diff --git a/.venv/lib/python3.12/site-packages/numpy/random/tests/test_randomstate_regression.py b/.venv/lib/python3.12/site-packages/numpy/random/tests/test_randomstate_regression.py new file mode 100644 index 0000000..6ccc618 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/random/tests/test_randomstate_regression.py @@ -0,0 +1,217 @@ +import sys + +import pytest + +import numpy as np +from numpy import random +from numpy.testing import ( + assert_, + assert_array_equal, + assert_raises, +) + + +class TestRegression: + + def test_VonMises_range(self): + # Make sure generated random variables are in [-pi, pi]. + # Regression test for ticket #986. + for mu in np.linspace(-7., 7., 5): + r = random.vonmises(mu, 1, 50) + assert_(np.all(r > -np.pi) and np.all(r <= np.pi)) + + def test_hypergeometric_range(self): + # Test for ticket #921 + assert_(np.all(random.hypergeometric(3, 18, 11, size=10) < 4)) + assert_(np.all(random.hypergeometric(18, 3, 11, size=10) > 0)) + + # Test for ticket #5623 + args = [ + (2**20 - 2, 2**20 - 2, 2**20 - 2), # Check for 32-bit systems + ] + is_64bits = sys.maxsize > 2**32 + if is_64bits and sys.platform != 'win32': + # Check for 64-bit systems + args.append((2**40 - 2, 2**40 - 2, 2**40 - 2)) + for arg in args: + assert_(random.hypergeometric(*arg) > 0) + + def test_logseries_convergence(self): + # Test for ticket #923 + N = 1000 + random.seed(0) + rvsn = random.logseries(0.8, size=N) + # these two frequency counts should be close to theoretical + # numbers with this large sample + # theoretical large N result is 0.49706795 + freq = np.sum(rvsn == 1) / N + msg = f'Frequency was {freq:f}, should be > 0.45' + assert_(freq > 0.45, msg) + # theoretical large N result is 0.19882718 + freq = np.sum(rvsn == 2) / N + msg = f'Frequency was {freq:f}, should be < 0.23' + assert_(freq < 0.23, msg) + + def test_shuffle_mixed_dimension(self): + # Test for trac ticket #2074 + for t in [[1, 2, 3, None], + [(1, 1), (2, 2), (3, 3), None], + [1, (2, 2), (3, 3), None], + [(1, 1), 2, 3, None]]: + random.seed(12345) + shuffled = list(t) + random.shuffle(shuffled) + expected = np.array([t[0], t[3], t[1], t[2]], dtype=object) + assert_array_equal(np.array(shuffled, dtype=object), expected) + + def test_call_within_randomstate(self): + # Check that custom RandomState does not call into global state + m = random.RandomState() + res = np.array([0, 8, 7, 2, 1, 9, 4, 7, 0, 3]) + for i in range(3): + random.seed(i) + m.seed(4321) + # If m.state is not honored, the result will change + assert_array_equal(m.choice(10, size=10, p=np.ones(10) / 10.), res) + + def test_multivariate_normal_size_types(self): + # Test for multivariate_normal issue with 'size' argument. + # Check that the multivariate_normal size argument can be a + # numpy integer. + random.multivariate_normal([0], [[0]], size=1) + random.multivariate_normal([0], [[0]], size=np.int_(1)) + random.multivariate_normal([0], [[0]], size=np.int64(1)) + + def test_beta_small_parameters(self): + # Test that beta with small a and b parameters does not produce + # NaNs due to roundoff errors causing 0 / 0, gh-5851 + random.seed(1234567890) + x = random.beta(0.0001, 0.0001, size=100) + assert_(not np.any(np.isnan(x)), 'Nans in random.beta') + + def test_choice_sum_of_probs_tolerance(self): + # The sum of probs should be 1.0 with some tolerance. + # For low precision dtypes the tolerance was too tight. + # See numpy github issue 6123. + random.seed(1234) + a = [1, 2, 3] + counts = [4, 4, 2] + for dt in np.float16, np.float32, np.float64: + probs = np.array(counts, dtype=dt) / sum(counts) + c = random.choice(a, p=probs) + assert_(c in a) + assert_raises(ValueError, random.choice, a, p=probs * 0.9) + + def test_shuffle_of_array_of_different_length_strings(self): + # Test that permuting an array of different length strings + # will not cause a segfault on garbage collection + # Tests gh-7710 + random.seed(1234) + + a = np.array(['a', 'a' * 1000]) + + for _ in range(100): + random.shuffle(a) + + # Force Garbage Collection - should not segfault. + import gc + gc.collect() + + def test_shuffle_of_array_of_objects(self): + # Test that permuting an array of objects will not cause + # a segfault on garbage collection. + # See gh-7719 + random.seed(1234) + a = np.array([np.arange(1), np.arange(4)], dtype=object) + + for _ in range(1000): + random.shuffle(a) + + # Force Garbage Collection - should not segfault. + import gc + gc.collect() + + def test_permutation_subclass(self): + class N(np.ndarray): + pass + + random.seed(1) + orig = np.arange(3).view(N) + perm = random.permutation(orig) + assert_array_equal(perm, np.array([0, 2, 1])) + assert_array_equal(orig, np.arange(3).view(N)) + + class M: + a = np.arange(5) + + def __array__(self, dtype=None, copy=None): + return self.a + + random.seed(1) + m = M() + perm = random.permutation(m) + assert_array_equal(perm, np.array([2, 1, 4, 0, 3])) + assert_array_equal(m.__array__(), np.arange(5)) + + def test_warns_byteorder(self): + # GH 13159 + other_byteord_dt = 'i4' + with pytest.deprecated_call(match='non-native byteorder is not'): + random.randint(0, 200, size=10, dtype=other_byteord_dt) + + def test_named_argument_initialization(self): + # GH 13669 + rs1 = np.random.RandomState(123456789) + rs2 = np.random.RandomState(seed=123456789) + assert rs1.randint(0, 100) == rs2.randint(0, 100) + + def test_choice_retun_dtype(self): + # GH 9867, now long since the NumPy default changed. + c = np.random.choice(10, p=[.1] * 10, size=2) + assert c.dtype == np.dtype(np.long) + c = np.random.choice(10, p=[.1] * 10, replace=False, size=2) + assert c.dtype == np.dtype(np.long) + c = np.random.choice(10, size=2) + assert c.dtype == np.dtype(np.long) + c = np.random.choice(10, replace=False, size=2) + assert c.dtype == np.dtype(np.long) + + @pytest.mark.skipif(np.iinfo('l').max < 2**32, + reason='Cannot test with 32-bit C long') + def test_randint_117(self): + # GH 14189 + random.seed(0) + expected = np.array([2357136044, 2546248239, 3071714933, 3626093760, + 2588848963, 3684848379, 2340255427, 3638918503, + 1819583497, 2678185683], dtype='int64') + actual = random.randint(2**32, size=10) + assert_array_equal(actual, expected) + + def test_p_zero_stream(self): + # Regression test for gh-14522. Ensure that future versions + # generate the same variates as version 1.16. + np.random.seed(12345) + assert_array_equal(random.binomial(1, [0, 0.25, 0.5, 0.75, 1]), + [0, 0, 0, 1, 1]) + + def test_n_zero_stream(self): + # Regression test for gh-14522. Ensure that future versions + # generate the same variates as version 1.16. + np.random.seed(8675309) + expected = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [3, 4, 2, 3, 3, 1, 5, 3, 1, 3]]) + assert_array_equal(random.binomial([[0], [10]], 0.25, size=(2, 10)), + expected) + + +def test_multinomial_empty(): + # gh-20483 + # Ensure that empty p-vals are correctly handled + assert random.multinomial(10, []).shape == (0,) + assert random.multinomial(3, [], size=(7, 5, 3)).shape == (7, 5, 3, 0) + + +def test_multinomial_1d_pval(): + # gh-20483 + with pytest.raises(TypeError, match="pvals must be a 1-d"): + random.multinomial(10, 0.3) diff --git a/.venv/lib/python3.12/site-packages/numpy/random/tests/test_regression.py b/.venv/lib/python3.12/site-packages/numpy/random/tests/test_regression.py new file mode 100644 index 0000000..39b7d8c --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/random/tests/test_regression.py @@ -0,0 +1,152 @@ +import sys + +import numpy as np +from numpy import random +from numpy.testing import ( + assert_, + assert_array_equal, + assert_raises, +) + + +class TestRegression: + + def test_VonMises_range(self): + # Make sure generated random variables are in [-pi, pi]. + # Regression test for ticket #986. + for mu in np.linspace(-7., 7., 5): + r = random.mtrand.vonmises(mu, 1, 50) + assert_(np.all(r > -np.pi) and np.all(r <= np.pi)) + + def test_hypergeometric_range(self): + # Test for ticket #921 + assert_(np.all(np.random.hypergeometric(3, 18, 11, size=10) < 4)) + assert_(np.all(np.random.hypergeometric(18, 3, 11, size=10) > 0)) + + # Test for ticket #5623 + args = [ + (2**20 - 2, 2**20 - 2, 2**20 - 2), # Check for 32-bit systems + ] + is_64bits = sys.maxsize > 2**32 + if is_64bits and sys.platform != 'win32': + # Check for 64-bit systems + args.append((2**40 - 2, 2**40 - 2, 2**40 - 2)) + for arg in args: + assert_(np.random.hypergeometric(*arg) > 0) + + def test_logseries_convergence(self): + # Test for ticket #923 + N = 1000 + np.random.seed(0) + rvsn = np.random.logseries(0.8, size=N) + # these two frequency counts should be close to theoretical + # numbers with this large sample + # theoretical large N result is 0.49706795 + freq = np.sum(rvsn == 1) / N + msg = f'Frequency was {freq:f}, should be > 0.45' + assert_(freq > 0.45, msg) + # theoretical large N result is 0.19882718 + freq = np.sum(rvsn == 2) / N + msg = f'Frequency was {freq:f}, should be < 0.23' + assert_(freq < 0.23, msg) + + def test_shuffle_mixed_dimension(self): + # Test for trac ticket #2074 + for t in [[1, 2, 3, None], + [(1, 1), (2, 2), (3, 3), None], + [1, (2, 2), (3, 3), None], + [(1, 1), 2, 3, None]]: + np.random.seed(12345) + shuffled = list(t) + random.shuffle(shuffled) + expected = np.array([t[0], t[3], t[1], t[2]], dtype=object) + assert_array_equal(np.array(shuffled, dtype=object), expected) + + def test_call_within_randomstate(self): + # Check that custom RandomState does not call into global state + m = np.random.RandomState() + res = np.array([0, 8, 7, 2, 1, 9, 4, 7, 0, 3]) + for i in range(3): + np.random.seed(i) + m.seed(4321) + # If m.state is not honored, the result will change + assert_array_equal(m.choice(10, size=10, p=np.ones(10) / 10.), res) + + def test_multivariate_normal_size_types(self): + # Test for multivariate_normal issue with 'size' argument. + # Check that the multivariate_normal size argument can be a + # numpy integer. + np.random.multivariate_normal([0], [[0]], size=1) + np.random.multivariate_normal([0], [[0]], size=np.int_(1)) + np.random.multivariate_normal([0], [[0]], size=np.int64(1)) + + def test_beta_small_parameters(self): + # Test that beta with small a and b parameters does not produce + # NaNs due to roundoff errors causing 0 / 0, gh-5851 + np.random.seed(1234567890) + x = np.random.beta(0.0001, 0.0001, size=100) + assert_(not np.any(np.isnan(x)), 'Nans in np.random.beta') + + def test_choice_sum_of_probs_tolerance(self): + # The sum of probs should be 1.0 with some tolerance. + # For low precision dtypes the tolerance was too tight. + # See numpy github issue 6123. + np.random.seed(1234) + a = [1, 2, 3] + counts = [4, 4, 2] + for dt in np.float16, np.float32, np.float64: + probs = np.array(counts, dtype=dt) / sum(counts) + c = np.random.choice(a, p=probs) + assert_(c in a) + assert_raises(ValueError, np.random.choice, a, p=probs * 0.9) + + def test_shuffle_of_array_of_different_length_strings(self): + # Test that permuting an array of different length strings + # will not cause a segfault on garbage collection + # Tests gh-7710 + np.random.seed(1234) + + a = np.array(['a', 'a' * 1000]) + + for _ in range(100): + np.random.shuffle(a) + + # Force Garbage Collection - should not segfault. + import gc + gc.collect() + + def test_shuffle_of_array_of_objects(self): + # Test that permuting an array of objects will not cause + # a segfault on garbage collection. + # See gh-7719 + np.random.seed(1234) + a = np.array([np.arange(1), np.arange(4)], dtype=object) + + for _ in range(1000): + np.random.shuffle(a) + + # Force Garbage Collection - should not segfault. + import gc + gc.collect() + + def test_permutation_subclass(self): + class N(np.ndarray): + pass + + np.random.seed(1) + orig = np.arange(3).view(N) + perm = np.random.permutation(orig) + assert_array_equal(perm, np.array([0, 2, 1])) + assert_array_equal(orig, np.arange(3).view(N)) + + class M: + a = np.arange(5) + + def __array__(self, dtype=None, copy=None): + return self.a + + np.random.seed(1) + m = M() + perm = np.random.permutation(m) + assert_array_equal(perm, np.array([2, 1, 4, 0, 3])) + assert_array_equal(m.__array__(), np.arange(5)) diff --git a/.venv/lib/python3.12/site-packages/numpy/random/tests/test_seed_sequence.py b/.venv/lib/python3.12/site-packages/numpy/random/tests/test_seed_sequence.py new file mode 100644 index 0000000..87ae4ff --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/random/tests/test_seed_sequence.py @@ -0,0 +1,79 @@ +import numpy as np +from numpy.random import SeedSequence +from numpy.testing import assert_array_compare, assert_array_equal + + +def test_reference_data(): + """ Check that SeedSequence generates data the same as the C++ reference. + + https://gist.github.com/imneme/540829265469e673d045 + """ + inputs = [ + [3735928559, 195939070, 229505742, 305419896], + [3668361503, 4165561550, 1661411377, 3634257570], + [164546577, 4166754639, 1765190214, 1303880213], + [446610472, 3941463886, 522937693, 1882353782], + [1864922766, 1719732118, 3882010307, 1776744564], + [4141682960, 3310988675, 553637289, 902896340], + [1134851934, 2352871630, 3699409824, 2648159817], + [1240956131, 3107113773, 1283198141, 1924506131], + [2669565031, 579818610, 3042504477, 2774880435], + [2766103236, 2883057919, 4029656435, 862374500], + ] + outputs = [ + [3914649087, 576849849, 3593928901, 2229911004], + [2240804226, 3691353228, 1365957195, 2654016646], + [3562296087, 3191708229, 1147942216, 3726991905], + [1403443605, 3591372999, 1291086759, 441919183], + [1086200464, 2191331643, 560336446, 3658716651], + [3249937430, 2346751812, 847844327, 2996632307], + [2584285912, 4034195531, 3523502488, 169742686], + [959045797, 3875435559, 1886309314, 359682705], + [3978441347, 432478529, 3223635119, 138903045], + [296367413, 4262059219, 13109864, 3283683422], + ] + outputs64 = [ + [2477551240072187391, 9577394838764454085], + [15854241394484835714, 11398914698975566411], + [13708282465491374871, 16007308345579681096], + [15424829579845884309, 1898028439751125927], + [9411697742461147792, 15714068361935982142], + [10079222287618677782, 12870437757549876199], + [17326737873898640088, 729039288628699544], + [16644868984619524261, 1544825456798124994], + [1857481142255628931, 596584038813451439], + [18305404959516669237, 14103312907920476776], + ] + for seed, expected, expected64 in zip(inputs, outputs, outputs64): + expected = np.array(expected, dtype=np.uint32) + ss = SeedSequence(seed) + state = ss.generate_state(len(expected)) + assert_array_equal(state, expected) + state64 = ss.generate_state(len(expected64), dtype=np.uint64) + assert_array_equal(state64, expected64) + + +def test_zero_padding(): + """ Ensure that the implicit zero-padding does not cause problems. + """ + # Ensure that large integers are inserted in little-endian fashion to avoid + # trailing 0s. + ss0 = SeedSequence(42) + ss1 = SeedSequence(42 << 32) + assert_array_compare( + np.not_equal, + ss0.generate_state(4), + ss1.generate_state(4)) + + # Ensure backwards compatibility with the original 0.17 release for small + # integers and no spawn key. + expected42 = np.array([3444837047, 2669555309, 2046530742, 3581440988], + dtype=np.uint32) + assert_array_equal(SeedSequence(42).generate_state(4), expected42) + + # Regression test for gh-16539 to ensure that the implicit 0s don't + # conflict with spawn keys. + assert_array_compare( + np.not_equal, + SeedSequence(42, spawn_key=(0,)).generate_state(4), + expected42) diff --git a/.venv/lib/python3.12/site-packages/numpy/random/tests/test_smoke.py b/.venv/lib/python3.12/site-packages/numpy/random/tests/test_smoke.py new file mode 100644 index 0000000..6f07443 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/random/tests/test_smoke.py @@ -0,0 +1,819 @@ +import pickle +from functools import partial + +import pytest + +import numpy as np +from numpy.random import MT19937, PCG64, PCG64DXSM, SFC64, Generator, Philox +from numpy.testing import assert_, assert_array_equal, assert_equal + + +@pytest.fixture(scope='module', + params=(np.bool, np.int8, np.int16, np.int32, np.int64, + np.uint8, np.uint16, np.uint32, np.uint64)) +def dtype(request): + return request.param + + +def params_0(f): + val = f() + assert_(np.isscalar(val)) + val = f(10) + assert_(val.shape == (10,)) + val = f((10, 10)) + assert_(val.shape == (10, 10)) + val = f((10, 10, 10)) + assert_(val.shape == (10, 10, 10)) + val = f(size=(5, 5)) + assert_(val.shape == (5, 5)) + + +def params_1(f, bounded=False): + a = 5.0 + b = np.arange(2.0, 12.0) + c = np.arange(2.0, 102.0).reshape((10, 10)) + d = np.arange(2.0, 1002.0).reshape((10, 10, 10)) + e = np.array([2.0, 3.0]) + g = np.arange(2.0, 12.0).reshape((1, 10, 1)) + if bounded: + a = 0.5 + b = b / (1.5 * b.max()) + c = c / (1.5 * c.max()) + d = d / (1.5 * d.max()) + e = e / (1.5 * e.max()) + g = g / (1.5 * g.max()) + + # Scalar + f(a) + # Scalar - size + f(a, size=(10, 10)) + # 1d + f(b) + # 2d + f(c) + # 3d + f(d) + # 1d size + f(b, size=10) + # 2d - size - broadcast + f(e, size=(10, 2)) + # 3d - size + f(g, size=(10, 10, 10)) + + +def comp_state(state1, state2): + identical = True + if isinstance(state1, dict): + for key in state1: + identical &= comp_state(state1[key], state2[key]) + elif type(state1) != type(state2): + identical &= type(state1) == type(state2) + elif (isinstance(state1, (list, tuple, np.ndarray)) and isinstance( + state2, (list, tuple, np.ndarray))): + for s1, s2 in zip(state1, state2): + identical &= comp_state(s1, s2) + else: + identical &= state1 == state2 + return identical + + +def warmup(rg, n=None): + if n is None: + n = 11 + np.random.randint(0, 20) + rg.standard_normal(n) + rg.standard_normal(n) + rg.standard_normal(n, dtype=np.float32) + rg.standard_normal(n, dtype=np.float32) + rg.integers(0, 2 ** 24, n, dtype=np.uint64) + rg.integers(0, 2 ** 48, n, dtype=np.uint64) + rg.standard_gamma(11.0, n) + rg.standard_gamma(11.0, n, dtype=np.float32) + rg.random(n, dtype=np.float64) + rg.random(n, dtype=np.float32) + + +class RNG: + @classmethod + def setup_class(cls): + # Overridden in test classes. Place holder to silence IDE noise + cls.bit_generator = PCG64 + cls.advance = None + cls.seed = [12345] + cls.rg = Generator(cls.bit_generator(*cls.seed)) + cls.initial_state = cls.rg.bit_generator.state + cls.seed_vector_bits = 64 + cls._extra_setup() + + @classmethod + def _extra_setup(cls): + cls.vec_1d = np.arange(2.0, 102.0) + cls.vec_2d = np.arange(2.0, 102.0)[None, :] + cls.mat = np.arange(2.0, 102.0, 0.01).reshape((100, 100)) + cls.seed_error = TypeError + + def _reset_state(self): + self.rg.bit_generator.state = self.initial_state + + def test_init(self): + rg = Generator(self.bit_generator()) + state = rg.bit_generator.state + rg.standard_normal(1) + rg.standard_normal(1) + rg.bit_generator.state = state + new_state = rg.bit_generator.state + assert_(comp_state(state, new_state)) + + def test_advance(self): + state = self.rg.bit_generator.state + if hasattr(self.rg.bit_generator, 'advance'): + self.rg.bit_generator.advance(self.advance) + assert_(not comp_state(state, self.rg.bit_generator.state)) + else: + bitgen_name = self.rg.bit_generator.__class__.__name__ + pytest.skip(f'Advance is not supported by {bitgen_name}') + + def test_jump(self): + state = self.rg.bit_generator.state + if hasattr(self.rg.bit_generator, 'jumped'): + bit_gen2 = self.rg.bit_generator.jumped() + jumped_state = bit_gen2.state + assert_(not comp_state(state, jumped_state)) + self.rg.random(2 * 3 * 5 * 7 * 11 * 13 * 17) + self.rg.bit_generator.state = state + bit_gen3 = self.rg.bit_generator.jumped() + rejumped_state = bit_gen3.state + assert_(comp_state(jumped_state, rejumped_state)) + else: + bitgen_name = self.rg.bit_generator.__class__.__name__ + if bitgen_name not in ('SFC64',): + raise AttributeError(f'no "jumped" in {bitgen_name}') + pytest.skip(f'Jump is not supported by {bitgen_name}') + + def test_uniform(self): + r = self.rg.uniform(-1.0, 0.0, size=10) + assert_(len(r) == 10) + assert_((r > -1).all()) + assert_((r <= 0).all()) + + def test_uniform_array(self): + r = self.rg.uniform(np.array([-1.0] * 10), 0.0, size=10) + assert_(len(r) == 10) + assert_((r > -1).all()) + assert_((r <= 0).all()) + r = self.rg.uniform(np.array([-1.0] * 10), + np.array([0.0] * 10), size=10) + assert_(len(r) == 10) + assert_((r > -1).all()) + assert_((r <= 0).all()) + r = self.rg.uniform(-1.0, np.array([0.0] * 10), size=10) + assert_(len(r) == 10) + assert_((r > -1).all()) + assert_((r <= 0).all()) + + def test_random(self): + assert_(len(self.rg.random(10)) == 10) + params_0(self.rg.random) + + def test_standard_normal_zig(self): + assert_(len(self.rg.standard_normal(10)) == 10) + + def test_standard_normal(self): + assert_(len(self.rg.standard_normal(10)) == 10) + params_0(self.rg.standard_normal) + + def test_standard_gamma(self): + assert_(len(self.rg.standard_gamma(10, 10)) == 10) + assert_(len(self.rg.standard_gamma(np.array([10] * 10), 10)) == 10) + params_1(self.rg.standard_gamma) + + def test_standard_exponential(self): + assert_(len(self.rg.standard_exponential(10)) == 10) + params_0(self.rg.standard_exponential) + + def test_standard_exponential_float(self): + randoms = self.rg.standard_exponential(10, dtype='float32') + assert_(len(randoms) == 10) + assert randoms.dtype == np.float32 + params_0(partial(self.rg.standard_exponential, dtype='float32')) + + def test_standard_exponential_float_log(self): + randoms = self.rg.standard_exponential(10, dtype='float32', + method='inv') + assert_(len(randoms) == 10) + assert randoms.dtype == np.float32 + params_0(partial(self.rg.standard_exponential, dtype='float32', + method='inv')) + + def test_standard_cauchy(self): + assert_(len(self.rg.standard_cauchy(10)) == 10) + params_0(self.rg.standard_cauchy) + + def test_standard_t(self): + assert_(len(self.rg.standard_t(10, 10)) == 10) + params_1(self.rg.standard_t) + + def test_binomial(self): + assert_(self.rg.binomial(10, .5) >= 0) + assert_(self.rg.binomial(1000, .5) >= 0) + + def test_reset_state(self): + state = self.rg.bit_generator.state + int_1 = self.rg.integers(2**31) + self.rg.bit_generator.state = state + int_2 = self.rg.integers(2**31) + assert_(int_1 == int_2) + + def test_entropy_init(self): + rg = Generator(self.bit_generator()) + rg2 = Generator(self.bit_generator()) + assert_(not comp_state(rg.bit_generator.state, + rg2.bit_generator.state)) + + def test_seed(self): + rg = Generator(self.bit_generator(*self.seed)) + rg2 = Generator(self.bit_generator(*self.seed)) + rg.random() + rg2.random() + assert_(comp_state(rg.bit_generator.state, rg2.bit_generator.state)) + + def test_reset_state_gauss(self): + rg = Generator(self.bit_generator(*self.seed)) + rg.standard_normal() + state = rg.bit_generator.state + n1 = rg.standard_normal(size=10) + rg2 = Generator(self.bit_generator()) + rg2.bit_generator.state = state + n2 = rg2.standard_normal(size=10) + assert_array_equal(n1, n2) + + def test_reset_state_uint32(self): + rg = Generator(self.bit_generator(*self.seed)) + rg.integers(0, 2 ** 24, 120, dtype=np.uint32) + state = rg.bit_generator.state + n1 = rg.integers(0, 2 ** 24, 10, dtype=np.uint32) + rg2 = Generator(self.bit_generator()) + rg2.bit_generator.state = state + n2 = rg2.integers(0, 2 ** 24, 10, dtype=np.uint32) + assert_array_equal(n1, n2) + + def test_reset_state_float(self): + rg = Generator(self.bit_generator(*self.seed)) + rg.random(dtype='float32') + state = rg.bit_generator.state + n1 = rg.random(size=10, dtype='float32') + rg2 = Generator(self.bit_generator()) + rg2.bit_generator.state = state + n2 = rg2.random(size=10, dtype='float32') + assert_((n1 == n2).all()) + + def test_shuffle(self): + original = np.arange(200, 0, -1) + permuted = self.rg.permutation(original) + assert_((original != permuted).any()) + + def test_permutation(self): + original = np.arange(200, 0, -1) + permuted = self.rg.permutation(original) + assert_((original != permuted).any()) + + def test_beta(self): + vals = self.rg.beta(2.0, 2.0, 10) + assert_(len(vals) == 10) + vals = self.rg.beta(np.array([2.0] * 10), 2.0) + assert_(len(vals) == 10) + vals = self.rg.beta(2.0, np.array([2.0] * 10)) + assert_(len(vals) == 10) + vals = self.rg.beta(np.array([2.0] * 10), np.array([2.0] * 10)) + assert_(len(vals) == 10) + vals = self.rg.beta(np.array([2.0] * 10), np.array([[2.0]] * 10)) + assert_(vals.shape == (10, 10)) + + def test_bytes(self): + vals = self.rg.bytes(10) + assert_(len(vals) == 10) + + def test_chisquare(self): + vals = self.rg.chisquare(2.0, 10) + assert_(len(vals) == 10) + params_1(self.rg.chisquare) + + def test_exponential(self): + vals = self.rg.exponential(2.0, 10) + assert_(len(vals) == 10) + params_1(self.rg.exponential) + + def test_f(self): + vals = self.rg.f(3, 1000, 10) + assert_(len(vals) == 10) + + def test_gamma(self): + vals = self.rg.gamma(3, 2, 10) + assert_(len(vals) == 10) + + def test_geometric(self): + vals = self.rg.geometric(0.5, 10) + assert_(len(vals) == 10) + params_1(self.rg.exponential, bounded=True) + + def test_gumbel(self): + vals = self.rg.gumbel(2.0, 2.0, 10) + assert_(len(vals) == 10) + + def test_laplace(self): + vals = self.rg.laplace(2.0, 2.0, 10) + assert_(len(vals) == 10) + + def test_logitic(self): + vals = self.rg.logistic(2.0, 2.0, 10) + assert_(len(vals) == 10) + + def test_logseries(self): + vals = self.rg.logseries(0.5, 10) + assert_(len(vals) == 10) + + def test_negative_binomial(self): + vals = self.rg.negative_binomial(10, 0.2, 10) + assert_(len(vals) == 10) + + def test_noncentral_chisquare(self): + vals = self.rg.noncentral_chisquare(10, 2, 10) + assert_(len(vals) == 10) + + def test_noncentral_f(self): + vals = self.rg.noncentral_f(3, 1000, 2, 10) + assert_(len(vals) == 10) + vals = self.rg.noncentral_f(np.array([3] * 10), 1000, 2) + assert_(len(vals) == 10) + vals = self.rg.noncentral_f(3, np.array([1000] * 10), 2) + assert_(len(vals) == 10) + vals = self.rg.noncentral_f(3, 1000, np.array([2] * 10)) + assert_(len(vals) == 10) + + def test_normal(self): + vals = self.rg.normal(10, 0.2, 10) + assert_(len(vals) == 10) + + def test_pareto(self): + vals = self.rg.pareto(3.0, 10) + assert_(len(vals) == 10) + + def test_poisson(self): + vals = self.rg.poisson(10, 10) + assert_(len(vals) == 10) + vals = self.rg.poisson(np.array([10] * 10)) + assert_(len(vals) == 10) + params_1(self.rg.poisson) + + def test_power(self): + vals = self.rg.power(0.2, 10) + assert_(len(vals) == 10) + + def test_integers(self): + vals = self.rg.integers(10, 20, 10) + assert_(len(vals) == 10) + + def test_rayleigh(self): + vals = self.rg.rayleigh(0.2, 10) + assert_(len(vals) == 10) + params_1(self.rg.rayleigh, bounded=True) + + def test_vonmises(self): + vals = self.rg.vonmises(10, 0.2, 10) + assert_(len(vals) == 10) + + def test_wald(self): + vals = self.rg.wald(1.0, 1.0, 10) + assert_(len(vals) == 10) + + def test_weibull(self): + vals = self.rg.weibull(1.0, 10) + assert_(len(vals) == 10) + + def test_zipf(self): + vals = self.rg.zipf(10, 10) + assert_(len(vals) == 10) + vals = self.rg.zipf(self.vec_1d) + assert_(len(vals) == 100) + vals = self.rg.zipf(self.vec_2d) + assert_(vals.shape == (1, 100)) + vals = self.rg.zipf(self.mat) + assert_(vals.shape == (100, 100)) + + def test_hypergeometric(self): + vals = self.rg.hypergeometric(25, 25, 20) + assert_(np.isscalar(vals)) + vals = self.rg.hypergeometric(np.array([25] * 10), 25, 20) + assert_(vals.shape == (10,)) + + def test_triangular(self): + vals = self.rg.triangular(-5, 0, 5) + assert_(np.isscalar(vals)) + vals = self.rg.triangular(-5, np.array([0] * 10), 5) + assert_(vals.shape == (10,)) + + def test_multivariate_normal(self): + mean = [0, 0] + cov = [[1, 0], [0, 100]] # diagonal covariance + x = self.rg.multivariate_normal(mean, cov, 5000) + assert_(x.shape == (5000, 2)) + x_zig = self.rg.multivariate_normal(mean, cov, 5000) + assert_(x.shape == (5000, 2)) + x_inv = self.rg.multivariate_normal(mean, cov, 5000) + assert_(x.shape == (5000, 2)) + assert_((x_zig != x_inv).any()) + + def test_multinomial(self): + vals = self.rg.multinomial(100, [1.0 / 3, 2.0 / 3]) + assert_(vals.shape == (2,)) + vals = self.rg.multinomial(100, [1.0 / 3, 2.0 / 3], size=10) + assert_(vals.shape == (10, 2)) + + def test_dirichlet(self): + s = self.rg.dirichlet((10, 5, 3), 20) + assert_(s.shape == (20, 3)) + + def test_pickle(self): + pick = pickle.dumps(self.rg) + unpick = pickle.loads(pick) + assert_(type(self.rg) == type(unpick)) + assert_(comp_state(self.rg.bit_generator.state, + unpick.bit_generator.state)) + + pick = pickle.dumps(self.rg) + unpick = pickle.loads(pick) + assert_(type(self.rg) == type(unpick)) + assert_(comp_state(self.rg.bit_generator.state, + unpick.bit_generator.state)) + + def test_seed_array(self): + if self.seed_vector_bits is None: + bitgen_name = self.bit_generator.__name__ + pytest.skip(f'Vector seeding is not supported by {bitgen_name}') + + if self.seed_vector_bits == 32: + dtype = np.uint32 + else: + dtype = np.uint64 + seed = np.array([1], dtype=dtype) + bg = self.bit_generator(seed) + state1 = bg.state + bg = self.bit_generator(1) + state2 = bg.state + assert_(comp_state(state1, state2)) + + seed = np.arange(4, dtype=dtype) + bg = self.bit_generator(seed) + state1 = bg.state + bg = self.bit_generator(seed[0]) + state2 = bg.state + assert_(not comp_state(state1, state2)) + + seed = np.arange(1500, dtype=dtype) + bg = self.bit_generator(seed) + state1 = bg.state + bg = self.bit_generator(seed[0]) + state2 = bg.state + assert_(not comp_state(state1, state2)) + + seed = 2 ** np.mod(np.arange(1500, dtype=dtype), + self.seed_vector_bits - 1) + 1 + bg = self.bit_generator(seed) + state1 = bg.state + bg = self.bit_generator(seed[0]) + state2 = bg.state + assert_(not comp_state(state1, state2)) + + def test_uniform_float(self): + rg = Generator(self.bit_generator(12345)) + warmup(rg) + state = rg.bit_generator.state + r1 = rg.random(11, dtype=np.float32) + rg2 = Generator(self.bit_generator()) + warmup(rg2) + rg2.bit_generator.state = state + r2 = rg2.random(11, dtype=np.float32) + assert_array_equal(r1, r2) + assert_equal(r1.dtype, np.float32) + assert_(comp_state(rg.bit_generator.state, rg2.bit_generator.state)) + + def test_gamma_floats(self): + rg = Generator(self.bit_generator()) + warmup(rg) + state = rg.bit_generator.state + r1 = rg.standard_gamma(4.0, 11, dtype=np.float32) + rg2 = Generator(self.bit_generator()) + warmup(rg2) + rg2.bit_generator.state = state + r2 = rg2.standard_gamma(4.0, 11, dtype=np.float32) + assert_array_equal(r1, r2) + assert_equal(r1.dtype, np.float32) + assert_(comp_state(rg.bit_generator.state, rg2.bit_generator.state)) + + def test_normal_floats(self): + rg = Generator(self.bit_generator()) + warmup(rg) + state = rg.bit_generator.state + r1 = rg.standard_normal(11, dtype=np.float32) + rg2 = Generator(self.bit_generator()) + warmup(rg2) + rg2.bit_generator.state = state + r2 = rg2.standard_normal(11, dtype=np.float32) + assert_array_equal(r1, r2) + assert_equal(r1.dtype, np.float32) + assert_(comp_state(rg.bit_generator.state, rg2.bit_generator.state)) + + def test_normal_zig_floats(self): + rg = Generator(self.bit_generator()) + warmup(rg) + state = rg.bit_generator.state + r1 = rg.standard_normal(11, dtype=np.float32) + rg2 = Generator(self.bit_generator()) + warmup(rg2) + rg2.bit_generator.state = state + r2 = rg2.standard_normal(11, dtype=np.float32) + assert_array_equal(r1, r2) + assert_equal(r1.dtype, np.float32) + assert_(comp_state(rg.bit_generator.state, rg2.bit_generator.state)) + + def test_output_fill(self): + rg = self.rg + state = rg.bit_generator.state + size = (31, 7, 97) + existing = np.empty(size) + rg.bit_generator.state = state + rg.standard_normal(out=existing) + rg.bit_generator.state = state + direct = rg.standard_normal(size=size) + assert_equal(direct, existing) + + sized = np.empty(size) + rg.bit_generator.state = state + rg.standard_normal(out=sized, size=sized.shape) + + existing = np.empty(size, dtype=np.float32) + rg.bit_generator.state = state + rg.standard_normal(out=existing, dtype=np.float32) + rg.bit_generator.state = state + direct = rg.standard_normal(size=size, dtype=np.float32) + assert_equal(direct, existing) + + def test_output_filling_uniform(self): + rg = self.rg + state = rg.bit_generator.state + size = (31, 7, 97) + existing = np.empty(size) + rg.bit_generator.state = state + rg.random(out=existing) + rg.bit_generator.state = state + direct = rg.random(size=size) + assert_equal(direct, existing) + + existing = np.empty(size, dtype=np.float32) + rg.bit_generator.state = state + rg.random(out=existing, dtype=np.float32) + rg.bit_generator.state = state + direct = rg.random(size=size, dtype=np.float32) + assert_equal(direct, existing) + + def test_output_filling_exponential(self): + rg = self.rg + state = rg.bit_generator.state + size = (31, 7, 97) + existing = np.empty(size) + rg.bit_generator.state = state + rg.standard_exponential(out=existing) + rg.bit_generator.state = state + direct = rg.standard_exponential(size=size) + assert_equal(direct, existing) + + existing = np.empty(size, dtype=np.float32) + rg.bit_generator.state = state + rg.standard_exponential(out=existing, dtype=np.float32) + rg.bit_generator.state = state + direct = rg.standard_exponential(size=size, dtype=np.float32) + assert_equal(direct, existing) + + def test_output_filling_gamma(self): + rg = self.rg + state = rg.bit_generator.state + size = (31, 7, 97) + existing = np.zeros(size) + rg.bit_generator.state = state + rg.standard_gamma(1.0, out=existing) + rg.bit_generator.state = state + direct = rg.standard_gamma(1.0, size=size) + assert_equal(direct, existing) + + existing = np.zeros(size, dtype=np.float32) + rg.bit_generator.state = state + rg.standard_gamma(1.0, out=existing, dtype=np.float32) + rg.bit_generator.state = state + direct = rg.standard_gamma(1.0, size=size, dtype=np.float32) + assert_equal(direct, existing) + + def test_output_filling_gamma_broadcast(self): + rg = self.rg + state = rg.bit_generator.state + size = (31, 7, 97) + mu = np.arange(97.0) + 1.0 + existing = np.zeros(size) + rg.bit_generator.state = state + rg.standard_gamma(mu, out=existing) + rg.bit_generator.state = state + direct = rg.standard_gamma(mu, size=size) + assert_equal(direct, existing) + + existing = np.zeros(size, dtype=np.float32) + rg.bit_generator.state = state + rg.standard_gamma(mu, out=existing, dtype=np.float32) + rg.bit_generator.state = state + direct = rg.standard_gamma(mu, size=size, dtype=np.float32) + assert_equal(direct, existing) + + def test_output_fill_error(self): + rg = self.rg + size = (31, 7, 97) + existing = np.empty(size) + with pytest.raises(TypeError): + rg.standard_normal(out=existing, dtype=np.float32) + with pytest.raises(ValueError): + rg.standard_normal(out=existing[::3]) + existing = np.empty(size, dtype=np.float32) + with pytest.raises(TypeError): + rg.standard_normal(out=existing, dtype=np.float64) + + existing = np.zeros(size, dtype=np.float32) + with pytest.raises(TypeError): + rg.standard_gamma(1.0, out=existing, dtype=np.float64) + with pytest.raises(ValueError): + rg.standard_gamma(1.0, out=existing[::3], dtype=np.float32) + existing = np.zeros(size, dtype=np.float64) + with pytest.raises(TypeError): + rg.standard_gamma(1.0, out=existing, dtype=np.float32) + with pytest.raises(ValueError): + rg.standard_gamma(1.0, out=existing[::3]) + + def test_integers_broadcast(self, dtype): + if dtype == np.bool: + upper = 2 + lower = 0 + else: + info = np.iinfo(dtype) + upper = int(info.max) + 1 + lower = info.min + self._reset_state() + a = self.rg.integers(lower, [upper] * 10, dtype=dtype) + self._reset_state() + b = self.rg.integers([lower] * 10, upper, dtype=dtype) + assert_equal(a, b) + self._reset_state() + c = self.rg.integers(lower, upper, size=10, dtype=dtype) + assert_equal(a, c) + self._reset_state() + d = self.rg.integers(np.array( + [lower] * 10), np.array([upper], dtype=object), size=10, + dtype=dtype) + assert_equal(a, d) + self._reset_state() + e = self.rg.integers( + np.array([lower] * 10), np.array([upper] * 10), size=10, + dtype=dtype) + assert_equal(a, e) + + self._reset_state() + a = self.rg.integers(0, upper, size=10, dtype=dtype) + self._reset_state() + b = self.rg.integers([upper] * 10, dtype=dtype) + assert_equal(a, b) + + def test_integers_numpy(self, dtype): + high = np.array([1]) + low = np.array([0]) + + out = self.rg.integers(low, high, dtype=dtype) + assert out.shape == (1,) + + out = self.rg.integers(low[0], high, dtype=dtype) + assert out.shape == (1,) + + out = self.rg.integers(low, high[0], dtype=dtype) + assert out.shape == (1,) + + def test_integers_broadcast_errors(self, dtype): + if dtype == np.bool: + upper = 2 + lower = 0 + else: + info = np.iinfo(dtype) + upper = int(info.max) + 1 + lower = info.min + with pytest.raises(ValueError): + self.rg.integers(lower, [upper + 1] * 10, dtype=dtype) + with pytest.raises(ValueError): + self.rg.integers(lower - 1, [upper] * 10, dtype=dtype) + with pytest.raises(ValueError): + self.rg.integers([lower - 1], [upper] * 10, dtype=dtype) + with pytest.raises(ValueError): + self.rg.integers([0], [0], dtype=dtype) + + +class TestMT19937(RNG): + @classmethod + def setup_class(cls): + cls.bit_generator = MT19937 + cls.advance = None + cls.seed = [2 ** 21 + 2 ** 16 + 2 ** 5 + 1] + cls.rg = Generator(cls.bit_generator(*cls.seed)) + cls.initial_state = cls.rg.bit_generator.state + cls.seed_vector_bits = 32 + cls._extra_setup() + cls.seed_error = ValueError + + def test_numpy_state(self): + nprg = np.random.RandomState() + nprg.standard_normal(99) + state = nprg.get_state() + self.rg.bit_generator.state = state + state2 = self.rg.bit_generator.state + assert_((state[1] == state2['state']['key']).all()) + assert_(state[2] == state2['state']['pos']) + + +class TestPhilox(RNG): + @classmethod + def setup_class(cls): + cls.bit_generator = Philox + cls.advance = 2**63 + 2**31 + 2**15 + 1 + cls.seed = [12345] + cls.rg = Generator(cls.bit_generator(*cls.seed)) + cls.initial_state = cls.rg.bit_generator.state + cls.seed_vector_bits = 64 + cls._extra_setup() + + +class TestSFC64(RNG): + @classmethod + def setup_class(cls): + cls.bit_generator = SFC64 + cls.advance = None + cls.seed = [12345] + cls.rg = Generator(cls.bit_generator(*cls.seed)) + cls.initial_state = cls.rg.bit_generator.state + cls.seed_vector_bits = 192 + cls._extra_setup() + + +class TestPCG64(RNG): + @classmethod + def setup_class(cls): + cls.bit_generator = PCG64 + cls.advance = 2**63 + 2**31 + 2**15 + 1 + cls.seed = [12345] + cls.rg = Generator(cls.bit_generator(*cls.seed)) + cls.initial_state = cls.rg.bit_generator.state + cls.seed_vector_bits = 64 + cls._extra_setup() + + +class TestPCG64DXSM(RNG): + @classmethod + def setup_class(cls): + cls.bit_generator = PCG64DXSM + cls.advance = 2**63 + 2**31 + 2**15 + 1 + cls.seed = [12345] + cls.rg = Generator(cls.bit_generator(*cls.seed)) + cls.initial_state = cls.rg.bit_generator.state + cls.seed_vector_bits = 64 + cls._extra_setup() + + +class TestDefaultRNG(RNG): + @classmethod + def setup_class(cls): + # This will duplicate some tests that directly instantiate a fresh + # Generator(), but that's okay. + cls.bit_generator = PCG64 + cls.advance = 2**63 + 2**31 + 2**15 + 1 + cls.seed = [12345] + cls.rg = np.random.default_rng(*cls.seed) + cls.initial_state = cls.rg.bit_generator.state + cls.seed_vector_bits = 64 + cls._extra_setup() + + def test_default_is_pcg64(self): + # In order to change the default BitGenerator, we'll go through + # a deprecation cycle to move to a different function. + assert_(isinstance(self.rg.bit_generator, PCG64)) + + def test_seed(self): + np.random.default_rng() + np.random.default_rng(None) + np.random.default_rng(12345) + np.random.default_rng(0) + np.random.default_rng(43660444402423911716352051725018508569) + np.random.default_rng([43660444402423911716352051725018508569, + 279705150948142787361475340226491943209]) + with pytest.raises(ValueError): + np.random.default_rng(-1) + with pytest.raises(ValueError): + np.random.default_rng([12345, -1]) diff --git a/.venv/lib/python3.12/site-packages/numpy/rec/__init__.py b/.venv/lib/python3.12/site-packages/numpy/rec/__init__.py new file mode 100644 index 0000000..420240c --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/rec/__init__.py @@ -0,0 +1,2 @@ +from numpy._core.records import * +from numpy._core.records import __all__, __doc__ diff --git a/.venv/lib/python3.12/site-packages/numpy/rec/__init__.pyi b/.venv/lib/python3.12/site-packages/numpy/rec/__init__.pyi new file mode 100644 index 0000000..6a78c66 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/rec/__init__.pyi @@ -0,0 +1,23 @@ +from numpy._core.records import ( + array, + find_duplicate, + format_parser, + fromarrays, + fromfile, + fromrecords, + fromstring, + recarray, + record, +) + +__all__ = [ + "record", + "recarray", + "format_parser", + "fromarrays", + "fromrecords", + "fromstring", + "fromfile", + "array", + "find_duplicate", +] diff --git a/.venv/lib/python3.12/site-packages/numpy/rec/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/rec/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..8aab5df Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/rec/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/strings/__init__.py b/.venv/lib/python3.12/site-packages/numpy/strings/__init__.py new file mode 100644 index 0000000..561dadc --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/strings/__init__.py @@ -0,0 +1,2 @@ +from numpy._core.strings import * +from numpy._core.strings import __all__, __doc__ diff --git a/.venv/lib/python3.12/site-packages/numpy/strings/__init__.pyi b/.venv/lib/python3.12/site-packages/numpy/strings/__init__.pyi new file mode 100644 index 0000000..b2fb363 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/strings/__init__.pyi @@ -0,0 +1,97 @@ +from numpy._core.strings import ( + add, + capitalize, + center, + count, + decode, + encode, + endswith, + equal, + expandtabs, + find, + greater, + greater_equal, + index, + isalnum, + isalpha, + isdecimal, + isdigit, + islower, + isnumeric, + isspace, + istitle, + isupper, + less, + less_equal, + ljust, + lower, + lstrip, + mod, + multiply, + not_equal, + partition, + replace, + rfind, + rindex, + rjust, + rpartition, + rstrip, + slice, + startswith, + str_len, + strip, + swapcase, + title, + translate, + upper, + zfill, +) + +__all__ = [ + "equal", + "not_equal", + "less", + "less_equal", + "greater", + "greater_equal", + "add", + "multiply", + "isalpha", + "isdigit", + "isspace", + "isalnum", + "islower", + "isupper", + "istitle", + "isdecimal", + "isnumeric", + "str_len", + "find", + "rfind", + "index", + "rindex", + "count", + "startswith", + "endswith", + "lstrip", + "rstrip", + "strip", + "replace", + "expandtabs", + "center", + "ljust", + "rjust", + "zfill", + "partition", + "rpartition", + "upper", + "lower", + "swapcase", + "capitalize", + "title", + "mod", + "decode", + "encode", + "translate", + "slice", +] diff --git a/.venv/lib/python3.12/site-packages/numpy/strings/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/strings/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..13abdf8 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/strings/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/testing/__init__.py b/.venv/lib/python3.12/site-packages/numpy/testing/__init__.py new file mode 100644 index 0000000..fe0c4f2 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/testing/__init__.py @@ -0,0 +1,22 @@ +"""Common test support for all numpy test scripts. + +This single module should provide all the common functionality for numpy tests +in a single location, so that test scripts can just import it and work right +away. + +""" +from unittest import TestCase + +from . import _private, overrides +from ._private import extbuild +from ._private.utils import * +from ._private.utils import _assert_valid_refcount, _gen_alignment_data + +__all__ = ( + _private.utils.__all__ + ['TestCase', 'overrides'] +) + +from numpy._pytesttester import PytestTester + +test = PytestTester(__name__) +del PytestTester diff --git a/.venv/lib/python3.12/site-packages/numpy/testing/__init__.pyi b/.venv/lib/python3.12/site-packages/numpy/testing/__init__.pyi new file mode 100644 index 0000000..ba3c9a2 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/testing/__init__.pyi @@ -0,0 +1,102 @@ +from unittest import TestCase + +from . import overrides +from ._private.utils import ( + HAS_LAPACK64, + HAS_REFCOUNT, + IS_EDITABLE, + IS_INSTALLED, + IS_MUSL, + IS_PYPY, + IS_PYSTON, + IS_WASM, + NOGIL_BUILD, + NUMPY_ROOT, + IgnoreException, + KnownFailureException, + SkipTest, + assert_, + assert_allclose, + assert_almost_equal, + assert_approx_equal, + assert_array_almost_equal, + assert_array_almost_equal_nulp, + assert_array_compare, + assert_array_equal, + assert_array_less, + assert_array_max_ulp, + assert_equal, + assert_no_gc_cycles, + assert_no_warnings, + assert_raises, + assert_raises_regex, + assert_string_equal, + assert_warns, + break_cycles, + build_err_msg, + check_support_sve, + clear_and_catch_warnings, + decorate_methods, + jiffies, + measure, + memusage, + print_assert_equal, + run_threaded, + rundocs, + runstring, + suppress_warnings, + tempdir, + temppath, + verbose, +) + +__all__ = [ + "HAS_LAPACK64", + "HAS_REFCOUNT", + "IS_EDITABLE", + "IS_INSTALLED", + "IS_MUSL", + "IS_PYPY", + "IS_PYSTON", + "IS_WASM", + "NOGIL_BUILD", + "NUMPY_ROOT", + "IgnoreException", + "KnownFailureException", + "SkipTest", + "TestCase", + "assert_", + "assert_allclose", + "assert_almost_equal", + "assert_approx_equal", + "assert_array_almost_equal", + "assert_array_almost_equal_nulp", + "assert_array_compare", + "assert_array_equal", + "assert_array_less", + "assert_array_max_ulp", + "assert_equal", + "assert_no_gc_cycles", + "assert_no_warnings", + "assert_raises", + "assert_raises_regex", + "assert_string_equal", + "assert_warns", + "break_cycles", + "build_err_msg", + "check_support_sve", + "clear_and_catch_warnings", + "decorate_methods", + "jiffies", + "measure", + "memusage", + "overrides", + "print_assert_equal", + "run_threaded", + "rundocs", + "runstring", + "suppress_warnings", + "tempdir", + "temppath", + "verbose", +] diff --git a/.venv/lib/python3.12/site-packages/numpy/testing/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/testing/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..c5eec57 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/testing/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/testing/__pycache__/overrides.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/testing/__pycache__/overrides.cpython-312.pyc new file mode 100644 index 0000000..1edf3e9 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/testing/__pycache__/overrides.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/testing/__pycache__/print_coercion_tables.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/testing/__pycache__/print_coercion_tables.cpython-312.pyc new file mode 100644 index 0000000..c60e33b Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/testing/__pycache__/print_coercion_tables.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/testing/_private/__init__.py b/.venv/lib/python3.12/site-packages/numpy/testing/_private/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/.venv/lib/python3.12/site-packages/numpy/testing/_private/__init__.pyi b/.venv/lib/python3.12/site-packages/numpy/testing/_private/__init__.pyi new file mode 100644 index 0000000..e69de29 diff --git a/.venv/lib/python3.12/site-packages/numpy/testing/_private/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/testing/_private/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..fb663ce Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/testing/_private/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/testing/_private/__pycache__/extbuild.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/testing/_private/__pycache__/extbuild.cpython-312.pyc new file mode 100644 index 0000000..8d5d383 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/testing/_private/__pycache__/extbuild.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/testing/_private/__pycache__/utils.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/testing/_private/__pycache__/utils.cpython-312.pyc new file mode 100644 index 0000000..2959b53 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/testing/_private/__pycache__/utils.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/testing/_private/extbuild.py b/.venv/lib/python3.12/site-packages/numpy/testing/_private/extbuild.py new file mode 100644 index 0000000..2a724b7 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/testing/_private/extbuild.py @@ -0,0 +1,250 @@ +""" +Build a c-extension module on-the-fly in tests. +See build_and_import_extensions for usage hints + +""" + +import os +import pathlib +import subprocess +import sys +import sysconfig +import textwrap + +__all__ = ['build_and_import_extension', 'compile_extension_module'] + + +def build_and_import_extension( + modname, functions, *, prologue="", build_dir=None, + include_dirs=None, more_init=""): + """ + Build and imports a c-extension module `modname` from a list of function + fragments `functions`. + + + Parameters + ---------- + functions : list of fragments + Each fragment is a sequence of func_name, calling convention, snippet. + prologue : string + Code to precede the rest, usually extra ``#include`` or ``#define`` + macros. + build_dir : pathlib.Path + Where to build the module, usually a temporary directory + include_dirs : list + Extra directories to find include files when compiling + more_init : string + Code to appear in the module PyMODINIT_FUNC + + Returns + ------- + out: module + The module will have been loaded and is ready for use + + Examples + -------- + >>> functions = [("test_bytes", "METH_O", \"\"\" + if ( !PyBytesCheck(args)) { + Py_RETURN_FALSE; + } + Py_RETURN_TRUE; + \"\"\")] + >>> mod = build_and_import_extension("testme", functions) + >>> assert not mod.test_bytes('abc') + >>> assert mod.test_bytes(b'abc') + """ + if include_dirs is None: + include_dirs = [] + body = prologue + _make_methods(functions, modname) + init = """ + PyObject *mod = PyModule_Create(&moduledef); + #ifdef Py_GIL_DISABLED + PyUnstable_Module_SetGIL(mod, Py_MOD_GIL_NOT_USED); + #endif + """ + if not build_dir: + build_dir = pathlib.Path('.') + if more_init: + init += """#define INITERROR return NULL + """ + init += more_init + init += "\nreturn mod;" + source_string = _make_source(modname, init, body) + mod_so = compile_extension_module( + modname, build_dir, include_dirs, source_string) + import importlib.util + spec = importlib.util.spec_from_file_location(modname, mod_so) + foo = importlib.util.module_from_spec(spec) + spec.loader.exec_module(foo) + return foo + + +def compile_extension_module( + name, builddir, include_dirs, + source_string, libraries=None, library_dirs=None): + """ + Build an extension module and return the filename of the resulting + native code file. + + Parameters + ---------- + name : string + name of the module, possibly including dots if it is a module inside a + package. + builddir : pathlib.Path + Where to build the module, usually a temporary directory + include_dirs : list + Extra directories to find include files when compiling + libraries : list + Libraries to link into the extension module + library_dirs: list + Where to find the libraries, ``-L`` passed to the linker + """ + modname = name.split('.')[-1] + dirname = builddir / name + dirname.mkdir(exist_ok=True) + cfile = _convert_str_to_file(source_string, dirname) + include_dirs = include_dirs or [] + libraries = libraries or [] + library_dirs = library_dirs or [] + + return _c_compile( + cfile, outputfilename=dirname / modname, + include_dirs=include_dirs, libraries=libraries, + library_dirs=library_dirs, + ) + + +def _convert_str_to_file(source, dirname): + """Helper function to create a file ``source.c`` in `dirname` that contains + the string in `source`. Returns the file name + """ + filename = dirname / 'source.c' + with filename.open('w') as f: + f.write(str(source)) + return filename + + +def _make_methods(functions, modname): + """ Turns the name, signature, code in functions into complete functions + and lists them in a methods_table. Then turns the methods_table into a + ``PyMethodDef`` structure and returns the resulting code fragment ready + for compilation + """ + methods_table = [] + codes = [] + for funcname, flags, code in functions: + cfuncname = f"{modname}_{funcname}" + if 'METH_KEYWORDS' in flags: + signature = '(PyObject *self, PyObject *args, PyObject *kwargs)' + else: + signature = '(PyObject *self, PyObject *args)' + methods_table.append( + "{\"%s\", (PyCFunction)%s, %s}," % (funcname, cfuncname, flags)) + func_code = f""" + static PyObject* {cfuncname}{signature} + {{ + {code} + }} + """ + codes.append(func_code) + + body = "\n".join(codes) + """ + static PyMethodDef methods[] = { + %(methods)s + { NULL } + }; + static struct PyModuleDef moduledef = { + PyModuleDef_HEAD_INIT, + "%(modname)s", /* m_name */ + NULL, /* m_doc */ + -1, /* m_size */ + methods, /* m_methods */ + }; + """ % {'methods': '\n'.join(methods_table), 'modname': modname} + return body + + +def _make_source(name, init, body): + """ Combines the code fragments into source code ready to be compiled + """ + code = """ + #include + + %(body)s + + PyMODINIT_FUNC + PyInit_%(name)s(void) { + %(init)s + } + """ % { + 'name': name, 'init': init, 'body': body, + } + return code + + +def _c_compile(cfile, outputfilename, include_dirs, libraries, + library_dirs): + link_extra = [] + if sys.platform == 'win32': + compile_extra = ["/we4013"] + link_extra.append('/DEBUG') # generate .pdb file + elif sys.platform.startswith('linux'): + compile_extra = [ + "-O0", "-g", "-Werror=implicit-function-declaration", "-fPIC"] + else: + compile_extra = [] + + return build( + cfile, outputfilename, + compile_extra, link_extra, + include_dirs, libraries, library_dirs) + + +def build(cfile, outputfilename, compile_extra, link_extra, + include_dirs, libraries, library_dirs): + "use meson to build" + + build_dir = cfile.parent / "build" + os.makedirs(build_dir, exist_ok=True) + with open(cfile.parent / "meson.build", "wt") as fid: + link_dirs = ['-L' + d for d in library_dirs] + fid.write(textwrap.dedent(f"""\ + project('foo', 'c') + py = import('python').find_installation(pure: false) + py.extension_module( + '{outputfilename.parts[-1]}', + '{cfile.parts[-1]}', + c_args: {compile_extra}, + link_args: {link_dirs}, + include_directories: {include_dirs}, + ) + """)) + native_file_name = cfile.parent / ".mesonpy-native-file.ini" + with open(native_file_name, "wt") as fid: + fid.write(textwrap.dedent(f"""\ + [binaries] + python = '{sys.executable}' + """)) + if sys.platform == "win32": + subprocess.check_call(["meson", "setup", + "--buildtype=release", + "--vsenv", ".."], + cwd=build_dir, + ) + else: + subprocess.check_call(["meson", "setup", "--vsenv", + "..", f'--native-file={os.fspath(native_file_name)}'], + cwd=build_dir + ) + + so_name = outputfilename.parts[-1] + get_so_suffix() + subprocess.check_call(["meson", "compile"], cwd=build_dir) + os.rename(str(build_dir / so_name), cfile.parent / so_name) + return cfile.parent / so_name + + +def get_so_suffix(): + ret = sysconfig.get_config_var('EXT_SUFFIX') + assert ret + return ret diff --git a/.venv/lib/python3.12/site-packages/numpy/testing/_private/extbuild.pyi b/.venv/lib/python3.12/site-packages/numpy/testing/_private/extbuild.pyi new file mode 100644 index 0000000..609a45e --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/testing/_private/extbuild.pyi @@ -0,0 +1,25 @@ +import pathlib +import types +from collections.abc import Sequence + +__all__ = ["build_and_import_extension", "compile_extension_module"] + +def build_and_import_extension( + modname: str, + functions: Sequence[tuple[str, str, str]], + *, + prologue: str = "", + build_dir: pathlib.Path | None = None, + include_dirs: Sequence[str] = [], + more_init: str = "", +) -> types.ModuleType: ... + +# +def compile_extension_module( + name: str, + builddir: pathlib.Path, + include_dirs: Sequence[str], + source_string: str, + libraries: Sequence[str] = [], + library_dirs: Sequence[str] = [], +) -> pathlib.Path: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/testing/_private/utils.py b/.venv/lib/python3.12/site-packages/numpy/testing/_private/utils.py new file mode 100644 index 0000000..65f4059 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/testing/_private/utils.py @@ -0,0 +1,2759 @@ +""" +Utility function to facilitate testing. + +""" +import concurrent.futures +import contextlib +import gc +import importlib.metadata +import operator +import os +import pathlib +import platform +import pprint +import re +import shutil +import sys +import sysconfig +import threading +import warnings +from functools import partial, wraps +from io import StringIO +from tempfile import mkdtemp, mkstemp +from unittest.case import SkipTest +from warnings import WarningMessage + +import numpy as np +import numpy.linalg._umath_linalg +from numpy import isfinite, isinf, isnan +from numpy._core import arange, array, array_repr, empty, float32, intp, isnat, ndarray + +__all__ = [ + 'assert_equal', 'assert_almost_equal', 'assert_approx_equal', + 'assert_array_equal', 'assert_array_less', 'assert_string_equal', + 'assert_array_almost_equal', 'assert_raises', 'build_err_msg', + 'decorate_methods', 'jiffies', 'memusage', 'print_assert_equal', + 'rundocs', 'runstring', 'verbose', 'measure', + 'assert_', 'assert_array_almost_equal_nulp', 'assert_raises_regex', + 'assert_array_max_ulp', 'assert_warns', 'assert_no_warnings', + 'assert_allclose', 'IgnoreException', 'clear_and_catch_warnings', + 'SkipTest', 'KnownFailureException', 'temppath', 'tempdir', 'IS_PYPY', + 'HAS_REFCOUNT', "IS_WASM", 'suppress_warnings', 'assert_array_compare', + 'assert_no_gc_cycles', 'break_cycles', 'HAS_LAPACK64', 'IS_PYSTON', + 'IS_MUSL', 'check_support_sve', 'NOGIL_BUILD', + 'IS_EDITABLE', 'IS_INSTALLED', 'NUMPY_ROOT', 'run_threaded', 'IS_64BIT', + 'BLAS_SUPPORTS_FPE', + ] + + +class KnownFailureException(Exception): + '''Raise this exception to mark a test as a known failing test.''' + pass + + +KnownFailureTest = KnownFailureException # backwards compat +verbose = 0 + +NUMPY_ROOT = pathlib.Path(np.__file__).parent + +try: + np_dist = importlib.metadata.distribution('numpy') +except importlib.metadata.PackageNotFoundError: + IS_INSTALLED = IS_EDITABLE = False +else: + IS_INSTALLED = True + try: + if sys.version_info >= (3, 13): + IS_EDITABLE = np_dist.origin.dir_info.editable + else: + # Backport importlib.metadata.Distribution.origin + import json # noqa: E401 + import types + origin = json.loads( + np_dist.read_text('direct_url.json') or '{}', + object_hook=lambda data: types.SimpleNamespace(**data), + ) + IS_EDITABLE = origin.dir_info.editable + except AttributeError: + IS_EDITABLE = False + + # spin installs numpy directly via meson, instead of using meson-python, and + # runs the module by setting PYTHONPATH. This is problematic because the + # resulting installation lacks the Python metadata (.dist-info), and numpy + # might already be installed on the environment, causing us to find its + # metadata, even though we are not actually loading that package. + # Work around this issue by checking if the numpy root matches. + if not IS_EDITABLE and np_dist.locate_file('numpy') != NUMPY_ROOT: + IS_INSTALLED = False + +IS_WASM = platform.machine() in ["wasm32", "wasm64"] +IS_PYPY = sys.implementation.name == 'pypy' +IS_PYSTON = hasattr(sys, "pyston_version_info") +HAS_REFCOUNT = getattr(sys, 'getrefcount', None) is not None and not IS_PYSTON +BLAS_SUPPORTS_FPE = True +if platform.system() == 'Darwin' or platform.machine() == 'arm64': + try: + blas = np.__config__.CONFIG['Build Dependencies']['blas'] + if blas['name'] == 'accelerate': + BLAS_SUPPORTS_FPE = False + except KeyError: + pass + +HAS_LAPACK64 = numpy.linalg._umath_linalg._ilp64 + +IS_MUSL = False +# alternate way is +# from packaging.tags import sys_tags +# _tags = list(sys_tags()) +# if 'musllinux' in _tags[0].platform: +_v = sysconfig.get_config_var('HOST_GNU_TYPE') or '' +if 'musl' in _v: + IS_MUSL = True + +NOGIL_BUILD = bool(sysconfig.get_config_var("Py_GIL_DISABLED")) +IS_64BIT = np.dtype(np.intp).itemsize == 8 + +def assert_(val, msg=''): + """ + Assert that works in release mode. + Accepts callable msg to allow deferring evaluation until failure. + + The Python built-in ``assert`` does not work when executing code in + optimized mode (the ``-O`` flag) - no byte-code is generated for it. + + For documentation on usage, refer to the Python documentation. + + """ + __tracebackhide__ = True # Hide traceback for py.test + if not val: + try: + smsg = msg() + except TypeError: + smsg = msg + raise AssertionError(smsg) + + +if os.name == 'nt': + # Code "stolen" from enthought/debug/memusage.py + def GetPerformanceAttributes(object, counter, instance=None, + inum=-1, format=None, machine=None): + # NOTE: Many counters require 2 samples to give accurate results, + # including "% Processor Time" (as by definition, at any instant, a + # thread's CPU usage is either 0 or 100). To read counters like this, + # you should copy this function, but keep the counter open, and call + # CollectQueryData() each time you need to know. + # See http://msdn.microsoft.com/library/en-us/dnperfmo/html/perfmonpt2.asp + # (dead link) + # My older explanation for this was that the "AddCounter" process + # forced the CPU to 100%, but the above makes more sense :) + import win32pdh + if format is None: + format = win32pdh.PDH_FMT_LONG + path = win32pdh.MakeCounterPath((machine, object, instance, None, + inum, counter)) + hq = win32pdh.OpenQuery() + try: + hc = win32pdh.AddCounter(hq, path) + try: + win32pdh.CollectQueryData(hq) + type, val = win32pdh.GetFormattedCounterValue(hc, format) + return val + finally: + win32pdh.RemoveCounter(hc) + finally: + win32pdh.CloseQuery(hq) + + def memusage(processName="python", instance=0): + # from win32pdhutil, part of the win32all package + import win32pdh + return GetPerformanceAttributes("Process", "Virtual Bytes", + processName, instance, + win32pdh.PDH_FMT_LONG, None) +elif sys.platform[:5] == 'linux': + + def memusage(_proc_pid_stat=None): + """ + Return virtual memory size in bytes of the running python. + + """ + _proc_pid_stat = _proc_pid_stat or f'/proc/{os.getpid()}/stat' + try: + with open(_proc_pid_stat) as f: + l = f.readline().split(' ') + return int(l[22]) + except Exception: + return +else: + def memusage(): + """ + Return memory usage of running python. [Not implemented] + + """ + raise NotImplementedError + + +if sys.platform[:5] == 'linux': + def jiffies(_proc_pid_stat=None, _load_time=None): + """ + Return number of jiffies elapsed. + + Return number of jiffies (1/100ths of a second) that this + process has been scheduled in user mode. See man 5 proc. + + """ + _proc_pid_stat = _proc_pid_stat or f'/proc/{os.getpid()}/stat' + _load_time = _load_time or [] + import time + if not _load_time: + _load_time.append(time.time()) + try: + with open(_proc_pid_stat) as f: + l = f.readline().split(' ') + return int(l[13]) + except Exception: + return int(100 * (time.time() - _load_time[0])) +else: + # os.getpid is not in all platforms available. + # Using time is safe but inaccurate, especially when process + # was suspended or sleeping. + def jiffies(_load_time=[]): + """ + Return number of jiffies elapsed. + + Return number of jiffies (1/100ths of a second) that this + process has been scheduled in user mode. See man 5 proc. + + """ + import time + if not _load_time: + _load_time.append(time.time()) + return int(100 * (time.time() - _load_time[0])) + + +def build_err_msg(arrays, err_msg, header='Items are not equal:', + verbose=True, names=('ACTUAL', 'DESIRED'), precision=8): + msg = ['\n' + header] + err_msg = str(err_msg) + if err_msg: + if err_msg.find('\n') == -1 and len(err_msg) < 79 - len(header): + msg = [msg[0] + ' ' + err_msg] + else: + msg.append(err_msg) + if verbose: + for i, a in enumerate(arrays): + + if isinstance(a, ndarray): + # precision argument is only needed if the objects are ndarrays + r_func = partial(array_repr, precision=precision) + else: + r_func = repr + + try: + r = r_func(a) + except Exception as exc: + r = f'[repr failed for <{type(a).__name__}>: {exc}]' + if r.count('\n') > 3: + r = '\n'.join(r.splitlines()[:3]) + r += '...' + msg.append(f' {names[i]}: {r}') + return '\n'.join(msg) + + +def assert_equal(actual, desired, err_msg='', verbose=True, *, strict=False): + """ + Raises an AssertionError if two objects are not equal. + + Given two objects (scalars, lists, tuples, dictionaries or numpy arrays), + check that all elements of these objects are equal. An exception is raised + at the first conflicting values. + + This function handles NaN comparisons as if NaN was a "normal" number. + That is, AssertionError is not raised if both objects have NaNs in the same + positions. This is in contrast to the IEEE standard on NaNs, which says + that NaN compared to anything must return False. + + Parameters + ---------- + actual : array_like + The object to check. + desired : array_like + The expected object. + err_msg : str, optional + The error message to be printed in case of failure. + verbose : bool, optional + If True, the conflicting values are appended to the error message. + strict : bool, optional + If True and either of the `actual` and `desired` arguments is an array, + raise an ``AssertionError`` when either the shape or the data type of + the arguments does not match. If neither argument is an array, this + parameter has no effect. + + .. versionadded:: 2.0.0 + + Raises + ------ + AssertionError + If actual and desired are not equal. + + See Also + -------- + assert_allclose + assert_array_almost_equal_nulp, + assert_array_max_ulp, + + Notes + ----- + By default, when one of `actual` and `desired` is a scalar and the other is + an array, the function checks that each element of the array is equal to + the scalar. This behaviour can be disabled by setting ``strict==True``. + + Examples + -------- + >>> np.testing.assert_equal([4, 5], [4, 6]) + Traceback (most recent call last): + ... + AssertionError: + Items are not equal: + item=1 + ACTUAL: 5 + DESIRED: 6 + + The following comparison does not raise an exception. There are NaNs + in the inputs, but they are in the same positions. + + >>> np.testing.assert_equal(np.array([1.0, 2.0, np.nan]), [1, 2, np.nan]) + + As mentioned in the Notes section, `assert_equal` has special + handling for scalars when one of the arguments is an array. + Here, the test checks that each value in `x` is 3: + + >>> x = np.full((2, 5), fill_value=3) + >>> np.testing.assert_equal(x, 3) + + Use `strict` to raise an AssertionError when comparing a scalar with an + array of a different shape: + + >>> np.testing.assert_equal(x, 3, strict=True) + Traceback (most recent call last): + ... + AssertionError: + Arrays are not equal + + (shapes (2, 5), () mismatch) + ACTUAL: array([[3, 3, 3, 3, 3], + [3, 3, 3, 3, 3]]) + DESIRED: array(3) + + The `strict` parameter also ensures that the array data types match: + + >>> x = np.array([2, 2, 2]) + >>> y = np.array([2., 2., 2.], dtype=np.float32) + >>> np.testing.assert_equal(x, y, strict=True) + Traceback (most recent call last): + ... + AssertionError: + Arrays are not equal + + (dtypes int64, float32 mismatch) + ACTUAL: array([2, 2, 2]) + DESIRED: array([2., 2., 2.], dtype=float32) + """ + __tracebackhide__ = True # Hide traceback for py.test + if isinstance(desired, dict): + if not isinstance(actual, dict): + raise AssertionError(repr(type(actual))) + assert_equal(len(actual), len(desired), err_msg, verbose) + for k, i in desired.items(): + if k not in actual: + raise AssertionError(repr(k)) + assert_equal(actual[k], desired[k], f'key={k!r}\n{err_msg}', + verbose) + return + if isinstance(desired, (list, tuple)) and isinstance(actual, (list, tuple)): + assert_equal(len(actual), len(desired), err_msg, verbose) + for k in range(len(desired)): + assert_equal(actual[k], desired[k], f'item={k!r}\n{err_msg}', + verbose) + return + from numpy import imag, iscomplexobj, real + from numpy._core import isscalar, ndarray, signbit + if isinstance(actual, ndarray) or isinstance(desired, ndarray): + return assert_array_equal(actual, desired, err_msg, verbose, + strict=strict) + msg = build_err_msg([actual, desired], err_msg, verbose=verbose) + + # Handle complex numbers: separate into real/imag to handle + # nan/inf/negative zero correctly + # XXX: catch ValueError for subclasses of ndarray where iscomplex fail + try: + usecomplex = iscomplexobj(actual) or iscomplexobj(desired) + except (ValueError, TypeError): + usecomplex = False + + if usecomplex: + if iscomplexobj(actual): + actualr = real(actual) + actuali = imag(actual) + else: + actualr = actual + actuali = 0 + if iscomplexobj(desired): + desiredr = real(desired) + desiredi = imag(desired) + else: + desiredr = desired + desiredi = 0 + try: + assert_equal(actualr, desiredr) + assert_equal(actuali, desiredi) + except AssertionError: + raise AssertionError(msg) + + # isscalar test to check cases such as [np.nan] != np.nan + if isscalar(desired) != isscalar(actual): + raise AssertionError(msg) + + try: + isdesnat = isnat(desired) + isactnat = isnat(actual) + dtypes_match = (np.asarray(desired).dtype.type == + np.asarray(actual).dtype.type) + if isdesnat and isactnat: + # If both are NaT (and have the same dtype -- datetime or + # timedelta) they are considered equal. + if dtypes_match: + return + else: + raise AssertionError(msg) + + except (TypeError, ValueError, NotImplementedError): + pass + + # Inf/nan/negative zero handling + try: + isdesnan = isnan(desired) + isactnan = isnan(actual) + if isdesnan and isactnan: + return # both nan, so equal + + # handle signed zero specially for floats + array_actual = np.asarray(actual) + array_desired = np.asarray(desired) + if (array_actual.dtype.char in 'Mm' or + array_desired.dtype.char in 'Mm'): + # version 1.18 + # until this version, isnan failed for datetime64 and timedelta64. + # Now it succeeds but comparison to scalar with a different type + # emits a DeprecationWarning. + # Avoid that by skipping the next check + raise NotImplementedError('cannot compare to a scalar ' + 'with a different type') + + if desired == 0 and actual == 0: + if not signbit(desired) == signbit(actual): + raise AssertionError(msg) + + except (TypeError, ValueError, NotImplementedError): + pass + + try: + # Explicitly use __eq__ for comparison, gh-2552 + if not (desired == actual): + raise AssertionError(msg) + + except (DeprecationWarning, FutureWarning) as e: + # this handles the case when the two types are not even comparable + if 'elementwise == comparison' in e.args[0]: + raise AssertionError(msg) + else: + raise + + +def print_assert_equal(test_string, actual, desired): + """ + Test if two objects are equal, and print an error message if test fails. + + The test is performed with ``actual == desired``. + + Parameters + ---------- + test_string : str + The message supplied to AssertionError. + actual : object + The object to test for equality against `desired`. + desired : object + The expected result. + + Examples + -------- + >>> np.testing.print_assert_equal('Test XYZ of func xyz', [0, 1], [0, 1]) + >>> np.testing.print_assert_equal('Test XYZ of func xyz', [0, 1], [0, 2]) + Traceback (most recent call last): + ... + AssertionError: Test XYZ of func xyz failed + ACTUAL: + [0, 1] + DESIRED: + [0, 2] + + """ + __tracebackhide__ = True # Hide traceback for py.test + import pprint + + if not (actual == desired): + msg = StringIO() + msg.write(test_string) + msg.write(' failed\nACTUAL: \n') + pprint.pprint(actual, msg) + msg.write('DESIRED: \n') + pprint.pprint(desired, msg) + raise AssertionError(msg.getvalue()) + + +def assert_almost_equal(actual, desired, decimal=7, err_msg='', verbose=True): + """ + Raises an AssertionError if two items are not equal up to desired + precision. + + .. note:: It is recommended to use one of `assert_allclose`, + `assert_array_almost_equal_nulp` or `assert_array_max_ulp` + instead of this function for more consistent floating point + comparisons. + + The test verifies that the elements of `actual` and `desired` satisfy:: + + abs(desired-actual) < float64(1.5 * 10**(-decimal)) + + That is a looser test than originally documented, but agrees with what the + actual implementation in `assert_array_almost_equal` did up to rounding + vagaries. An exception is raised at conflicting values. For ndarrays this + delegates to assert_array_almost_equal + + Parameters + ---------- + actual : array_like + The object to check. + desired : array_like + The expected object. + decimal : int, optional + Desired precision, default is 7. + err_msg : str, optional + The error message to be printed in case of failure. + verbose : bool, optional + If True, the conflicting values are appended to the error message. + + Raises + ------ + AssertionError + If actual and desired are not equal up to specified precision. + + See Also + -------- + assert_allclose: Compare two array_like objects for equality with desired + relative and/or absolute precision. + assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal + + Examples + -------- + >>> from numpy.testing import assert_almost_equal + >>> assert_almost_equal(2.3333333333333, 2.33333334) + >>> assert_almost_equal(2.3333333333333, 2.33333334, decimal=10) + Traceback (most recent call last): + ... + AssertionError: + Arrays are not almost equal to 10 decimals + ACTUAL: 2.3333333333333 + DESIRED: 2.33333334 + + >>> assert_almost_equal(np.array([1.0,2.3333333333333]), + ... np.array([1.0,2.33333334]), decimal=9) + Traceback (most recent call last): + ... + AssertionError: + Arrays are not almost equal to 9 decimals + + Mismatched elements: 1 / 2 (50%) + Max absolute difference among violations: 6.66669964e-09 + Max relative difference among violations: 2.85715698e-09 + ACTUAL: array([1. , 2.333333333]) + DESIRED: array([1. , 2.33333334]) + + """ + __tracebackhide__ = True # Hide traceback for py.test + from numpy import imag, iscomplexobj, real + from numpy._core import ndarray + + # Handle complex numbers: separate into real/imag to handle + # nan/inf/negative zero correctly + # XXX: catch ValueError for subclasses of ndarray where iscomplex fail + try: + usecomplex = iscomplexobj(actual) or iscomplexobj(desired) + except ValueError: + usecomplex = False + + def _build_err_msg(): + header = ('Arrays are not almost equal to %d decimals' % decimal) + return build_err_msg([actual, desired], err_msg, verbose=verbose, + header=header) + + if usecomplex: + if iscomplexobj(actual): + actualr = real(actual) + actuali = imag(actual) + else: + actualr = actual + actuali = 0 + if iscomplexobj(desired): + desiredr = real(desired) + desiredi = imag(desired) + else: + desiredr = desired + desiredi = 0 + try: + assert_almost_equal(actualr, desiredr, decimal=decimal) + assert_almost_equal(actuali, desiredi, decimal=decimal) + except AssertionError: + raise AssertionError(_build_err_msg()) + + if isinstance(actual, (ndarray, tuple, list)) \ + or isinstance(desired, (ndarray, tuple, list)): + return assert_array_almost_equal(actual, desired, decimal, err_msg) + try: + # If one of desired/actual is not finite, handle it specially here: + # check that both are nan if any is a nan, and test for equality + # otherwise + if not (isfinite(desired) and isfinite(actual)): + if isnan(desired) or isnan(actual): + if not (isnan(desired) and isnan(actual)): + raise AssertionError(_build_err_msg()) + elif not desired == actual: + raise AssertionError(_build_err_msg()) + return + except (NotImplementedError, TypeError): + pass + if abs(desired - actual) >= np.float64(1.5 * 10.0**(-decimal)): + raise AssertionError(_build_err_msg()) + + +def assert_approx_equal(actual, desired, significant=7, err_msg='', + verbose=True): + """ + Raises an AssertionError if two items are not equal up to significant + digits. + + .. note:: It is recommended to use one of `assert_allclose`, + `assert_array_almost_equal_nulp` or `assert_array_max_ulp` + instead of this function for more consistent floating point + comparisons. + + Given two numbers, check that they are approximately equal. + Approximately equal is defined as the number of significant digits + that agree. + + Parameters + ---------- + actual : scalar + The object to check. + desired : scalar + The expected object. + significant : int, optional + Desired precision, default is 7. + err_msg : str, optional + The error message to be printed in case of failure. + verbose : bool, optional + If True, the conflicting values are appended to the error message. + + Raises + ------ + AssertionError + If actual and desired are not equal up to specified precision. + + See Also + -------- + assert_allclose: Compare two array_like objects for equality with desired + relative and/or absolute precision. + assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal + + Examples + -------- + >>> np.testing.assert_approx_equal(0.12345677777777e-20, 0.1234567e-20) + >>> np.testing.assert_approx_equal(0.12345670e-20, 0.12345671e-20, + ... significant=8) + >>> np.testing.assert_approx_equal(0.12345670e-20, 0.12345672e-20, + ... significant=8) + Traceback (most recent call last): + ... + AssertionError: + Items are not equal to 8 significant digits: + ACTUAL: 1.234567e-21 + DESIRED: 1.2345672e-21 + + the evaluated condition that raises the exception is + + >>> abs(0.12345670e-20/1e-21 - 0.12345672e-20/1e-21) >= 10**-(8-1) + True + + """ + __tracebackhide__ = True # Hide traceback for py.test + import numpy as np + + (actual, desired) = map(float, (actual, desired)) + if desired == actual: + return + # Normalized the numbers to be in range (-10.0,10.0) + # scale = float(pow(10,math.floor(math.log10(0.5*(abs(desired)+abs(actual)))))) + with np.errstate(invalid='ignore'): + scale = 0.5 * (np.abs(desired) + np.abs(actual)) + scale = np.power(10, np.floor(np.log10(scale))) + try: + sc_desired = desired / scale + except ZeroDivisionError: + sc_desired = 0.0 + try: + sc_actual = actual / scale + except ZeroDivisionError: + sc_actual = 0.0 + msg = build_err_msg( + [actual, desired], err_msg, + header='Items are not equal to %d significant digits:' % significant, + verbose=verbose) + try: + # If one of desired/actual is not finite, handle it specially here: + # check that both are nan if any is a nan, and test for equality + # otherwise + if not (isfinite(desired) and isfinite(actual)): + if isnan(desired) or isnan(actual): + if not (isnan(desired) and isnan(actual)): + raise AssertionError(msg) + elif not desired == actual: + raise AssertionError(msg) + return + except (TypeError, NotImplementedError): + pass + if np.abs(sc_desired - sc_actual) >= np.power(10., -(significant - 1)): + raise AssertionError(msg) + + +def assert_array_compare(comparison, x, y, err_msg='', verbose=True, header='', + precision=6, equal_nan=True, equal_inf=True, + *, strict=False, names=('ACTUAL', 'DESIRED')): + __tracebackhide__ = True # Hide traceback for py.test + from numpy._core import all, array2string, errstate, inf, isnan, max, object_ + + x = np.asanyarray(x) + y = np.asanyarray(y) + + # original array for output formatting + ox, oy = x, y + + def isnumber(x): + return x.dtype.char in '?bhilqpBHILQPefdgFDG' + + def istime(x): + return x.dtype.char in "Mm" + + def isvstring(x): + return x.dtype.char == "T" + + def func_assert_same_pos(x, y, func=isnan, hasval='nan'): + """Handling nan/inf. + + Combine results of running func on x and y, checking that they are True + at the same locations. + + """ + __tracebackhide__ = True # Hide traceback for py.test + + x_id = func(x) + y_id = func(y) + # We include work-arounds here to handle three types of slightly + # pathological ndarray subclasses: + # (1) all() on `masked` array scalars can return masked arrays, so we + # use != True + # (2) __eq__ on some ndarray subclasses returns Python booleans + # instead of element-wise comparisons, so we cast to np.bool() and + # use isinstance(..., bool) checks + # (3) subclasses with bare-bones __array_function__ implementations may + # not implement np.all(), so favor using the .all() method + # We are not committed to supporting such subclasses, but it's nice to + # support them if possible. + if np.bool(x_id == y_id).all() != True: + msg = build_err_msg( + [x, y], + err_msg + '\n%s location mismatch:' + % (hasval), verbose=verbose, header=header, + names=names, + precision=precision) + raise AssertionError(msg) + # If there is a scalar, then here we know the array has the same + # flag as it everywhere, so we should return the scalar flag. + if isinstance(x_id, bool) or x_id.ndim == 0: + return np.bool(x_id) + elif isinstance(y_id, bool) or y_id.ndim == 0: + return np.bool(y_id) + else: + return y_id + + try: + if strict: + cond = x.shape == y.shape and x.dtype == y.dtype + else: + cond = (x.shape == () or y.shape == ()) or x.shape == y.shape + if not cond: + if x.shape != y.shape: + reason = f'\n(shapes {x.shape}, {y.shape} mismatch)' + else: + reason = f'\n(dtypes {x.dtype}, {y.dtype} mismatch)' + msg = build_err_msg([x, y], + err_msg + + reason, + verbose=verbose, header=header, + names=names, + precision=precision) + raise AssertionError(msg) + + flagged = np.bool(False) + if isnumber(x) and isnumber(y): + if equal_nan: + flagged = func_assert_same_pos(x, y, func=isnan, hasval='nan') + + if equal_inf: + flagged |= func_assert_same_pos(x, y, + func=lambda xy: xy == +inf, + hasval='+inf') + flagged |= func_assert_same_pos(x, y, + func=lambda xy: xy == -inf, + hasval='-inf') + + elif istime(x) and istime(y): + # If one is datetime64 and the other timedelta64 there is no point + if equal_nan and x.dtype.type == y.dtype.type: + flagged = func_assert_same_pos(x, y, func=isnat, hasval="NaT") + + elif isvstring(x) and isvstring(y): + dt = x.dtype + if equal_nan and dt == y.dtype and hasattr(dt, 'na_object'): + is_nan = (isinstance(dt.na_object, float) and + np.isnan(dt.na_object)) + bool_errors = 0 + try: + bool(dt.na_object) + except TypeError: + bool_errors = 1 + if is_nan or bool_errors: + # nan-like NA object + flagged = func_assert_same_pos( + x, y, func=isnan, hasval=x.dtype.na_object) + + if flagged.ndim > 0: + x, y = x[~flagged], y[~flagged] + # Only do the comparison if actual values are left + if x.size == 0: + return + elif flagged: + # no sense doing comparison if everything is flagged. + return + + val = comparison(x, y) + invalids = np.logical_not(val) + + if isinstance(val, bool): + cond = val + reduced = array([val]) + else: + reduced = val.ravel() + cond = reduced.all() + + # The below comparison is a hack to ensure that fully masked + # results, for which val.ravel().all() returns np.ma.masked, + # do not trigger a failure (np.ma.masked != True evaluates as + # np.ma.masked, which is falsy). + if cond != True: + n_mismatch = reduced.size - reduced.sum(dtype=intp) + n_elements = flagged.size if flagged.ndim != 0 else reduced.size + percent_mismatch = 100 * n_mismatch / n_elements + remarks = [f'Mismatched elements: {n_mismatch} / {n_elements} ' + f'({percent_mismatch:.3g}%)'] + + with errstate(all='ignore'): + # ignore errors for non-numeric types + with contextlib.suppress(TypeError): + error = abs(x - y) + if np.issubdtype(x.dtype, np.unsignedinteger): + error2 = abs(y - x) + np.minimum(error, error2, out=error) + + reduced_error = error[invalids] + max_abs_error = max(reduced_error) + if getattr(error, 'dtype', object_) == object_: + remarks.append( + 'Max absolute difference among violations: ' + + str(max_abs_error)) + else: + remarks.append( + 'Max absolute difference among violations: ' + + array2string(max_abs_error)) + + # note: this definition of relative error matches that one + # used by assert_allclose (found in np.isclose) + # Filter values where the divisor would be zero + nonzero = np.bool(y != 0) + nonzero_and_invalid = np.logical_and(invalids, nonzero) + + if all(~nonzero_and_invalid): + max_rel_error = array(inf) + else: + nonzero_invalid_error = error[nonzero_and_invalid] + broadcasted_y = np.broadcast_to(y, error.shape) + nonzero_invalid_y = broadcasted_y[nonzero_and_invalid] + max_rel_error = max(nonzero_invalid_error + / abs(nonzero_invalid_y)) + + if getattr(error, 'dtype', object_) == object_: + remarks.append( + 'Max relative difference among violations: ' + + str(max_rel_error)) + else: + remarks.append( + 'Max relative difference among violations: ' + + array2string(max_rel_error)) + err_msg = str(err_msg) + err_msg += '\n' + '\n'.join(remarks) + msg = build_err_msg([ox, oy], err_msg, + verbose=verbose, header=header, + names=names, + precision=precision) + raise AssertionError(msg) + except ValueError: + import traceback + efmt = traceback.format_exc() + header = f'error during assertion:\n\n{efmt}\n\n{header}' + + msg = build_err_msg([x, y], err_msg, verbose=verbose, header=header, + names=names, precision=precision) + raise ValueError(msg) + + +def assert_array_equal(actual, desired, err_msg='', verbose=True, *, + strict=False): + """ + Raises an AssertionError if two array_like objects are not equal. + + Given two array_like objects, check that the shape is equal and all + elements of these objects are equal (but see the Notes for the special + handling of a scalar). An exception is raised at shape mismatch or + conflicting values. In contrast to the standard usage in numpy, NaNs + are compared like numbers, no assertion is raised if both objects have + NaNs in the same positions. + + The usual caution for verifying equality with floating point numbers is + advised. + + .. note:: When either `actual` or `desired` is already an instance of + `numpy.ndarray` and `desired` is not a ``dict``, the behavior of + ``assert_equal(actual, desired)`` is identical to the behavior of this + function. Otherwise, this function performs `np.asanyarray` on the + inputs before comparison, whereas `assert_equal` defines special + comparison rules for common Python types. For example, only + `assert_equal` can be used to compare nested Python lists. In new code, + consider using only `assert_equal`, explicitly converting either + `actual` or `desired` to arrays if the behavior of `assert_array_equal` + is desired. + + Parameters + ---------- + actual : array_like + The actual object to check. + desired : array_like + The desired, expected object. + err_msg : str, optional + The error message to be printed in case of failure. + verbose : bool, optional + If True, the conflicting values are appended to the error message. + strict : bool, optional + If True, raise an AssertionError when either the shape or the data + type of the array_like objects does not match. The special + handling for scalars mentioned in the Notes section is disabled. + + .. versionadded:: 1.24.0 + + Raises + ------ + AssertionError + If actual and desired objects are not equal. + + See Also + -------- + assert_allclose: Compare two array_like objects for equality with desired + relative and/or absolute precision. + assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal + + Notes + ----- + When one of `actual` and `desired` is a scalar and the other is array_like, + the function checks that each element of the array_like object is equal to + the scalar. This behaviour can be disabled with the `strict` parameter. + + Examples + -------- + The first assert does not raise an exception: + + >>> np.testing.assert_array_equal([1.0,2.33333,np.nan], + ... [np.exp(0),2.33333, np.nan]) + + Assert fails with numerical imprecision with floats: + + >>> np.testing.assert_array_equal([1.0,np.pi,np.nan], + ... [1, np.sqrt(np.pi)**2, np.nan]) + Traceback (most recent call last): + ... + AssertionError: + Arrays are not equal + + Mismatched elements: 1 / 3 (33.3%) + Max absolute difference among violations: 4.4408921e-16 + Max relative difference among violations: 1.41357986e-16 + ACTUAL: array([1. , 3.141593, nan]) + DESIRED: array([1. , 3.141593, nan]) + + Use `assert_allclose` or one of the nulp (number of floating point values) + functions for these cases instead: + + >>> np.testing.assert_allclose([1.0,np.pi,np.nan], + ... [1, np.sqrt(np.pi)**2, np.nan], + ... rtol=1e-10, atol=0) + + As mentioned in the Notes section, `assert_array_equal` has special + handling for scalars. Here the test checks that each value in `x` is 3: + + >>> x = np.full((2, 5), fill_value=3) + >>> np.testing.assert_array_equal(x, 3) + + Use `strict` to raise an AssertionError when comparing a scalar with an + array: + + >>> np.testing.assert_array_equal(x, 3, strict=True) + Traceback (most recent call last): + ... + AssertionError: + Arrays are not equal + + (shapes (2, 5), () mismatch) + ACTUAL: array([[3, 3, 3, 3, 3], + [3, 3, 3, 3, 3]]) + DESIRED: array(3) + + The `strict` parameter also ensures that the array data types match: + + >>> x = np.array([2, 2, 2]) + >>> y = np.array([2., 2., 2.], dtype=np.float32) + >>> np.testing.assert_array_equal(x, y, strict=True) + Traceback (most recent call last): + ... + AssertionError: + Arrays are not equal + + (dtypes int64, float32 mismatch) + ACTUAL: array([2, 2, 2]) + DESIRED: array([2., 2., 2.], dtype=float32) + """ + __tracebackhide__ = True # Hide traceback for py.test + assert_array_compare(operator.__eq__, actual, desired, err_msg=err_msg, + verbose=verbose, header='Arrays are not equal', + strict=strict) + + +def assert_array_almost_equal(actual, desired, decimal=6, err_msg='', + verbose=True): + """ + Raises an AssertionError if two objects are not equal up to desired + precision. + + .. note:: It is recommended to use one of `assert_allclose`, + `assert_array_almost_equal_nulp` or `assert_array_max_ulp` + instead of this function for more consistent floating point + comparisons. + + The test verifies identical shapes and that the elements of ``actual`` and + ``desired`` satisfy:: + + abs(desired-actual) < 1.5 * 10**(-decimal) + + That is a looser test than originally documented, but agrees with what the + actual implementation did up to rounding vagaries. An exception is raised + at shape mismatch or conflicting values. In contrast to the standard usage + in numpy, NaNs are compared like numbers, no assertion is raised if both + objects have NaNs in the same positions. + + Parameters + ---------- + actual : array_like + The actual object to check. + desired : array_like + The desired, expected object. + decimal : int, optional + Desired precision, default is 6. + err_msg : str, optional + The error message to be printed in case of failure. + verbose : bool, optional + If True, the conflicting values are appended to the error message. + + Raises + ------ + AssertionError + If actual and desired are not equal up to specified precision. + + See Also + -------- + assert_allclose: Compare two array_like objects for equality with desired + relative and/or absolute precision. + assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal + + Examples + -------- + the first assert does not raise an exception + + >>> np.testing.assert_array_almost_equal([1.0,2.333,np.nan], + ... [1.0,2.333,np.nan]) + + >>> np.testing.assert_array_almost_equal([1.0,2.33333,np.nan], + ... [1.0,2.33339,np.nan], decimal=5) + Traceback (most recent call last): + ... + AssertionError: + Arrays are not almost equal to 5 decimals + + Mismatched elements: 1 / 3 (33.3%) + Max absolute difference among violations: 6.e-05 + Max relative difference among violations: 2.57136612e-05 + ACTUAL: array([1. , 2.33333, nan]) + DESIRED: array([1. , 2.33339, nan]) + + >>> np.testing.assert_array_almost_equal([1.0,2.33333,np.nan], + ... [1.0,2.33333, 5], decimal=5) + Traceback (most recent call last): + ... + AssertionError: + Arrays are not almost equal to 5 decimals + + nan location mismatch: + ACTUAL: array([1. , 2.33333, nan]) + DESIRED: array([1. , 2.33333, 5. ]) + + """ + __tracebackhide__ = True # Hide traceback for py.test + from numpy._core import number, result_type + from numpy._core.fromnumeric import any as npany + from numpy._core.numerictypes import issubdtype + + def compare(x, y): + try: + if npany(isinf(x)) or npany(isinf(y)): + xinfid = isinf(x) + yinfid = isinf(y) + if not (xinfid == yinfid).all(): + return False + # if one item, x and y is +- inf + if x.size == y.size == 1: + return x == y + x = x[~xinfid] + y = y[~yinfid] + except (TypeError, NotImplementedError): + pass + + # make sure y is an inexact type to avoid abs(MIN_INT); will cause + # casting of x later. + dtype = result_type(y, 1.) + y = np.asanyarray(y, dtype) + z = abs(x - y) + + if not issubdtype(z.dtype, number): + z = z.astype(np.float64) # handle object arrays + + return z < 1.5 * 10.0**(-decimal) + + assert_array_compare(compare, actual, desired, err_msg=err_msg, + verbose=verbose, + header=('Arrays are not almost equal to %d decimals' % decimal), + precision=decimal) + + +def assert_array_less(x, y, err_msg='', verbose=True, *, strict=False): + """ + Raises an AssertionError if two array_like objects are not ordered by less + than. + + Given two array_like objects `x` and `y`, check that the shape is equal and + all elements of `x` are strictly less than the corresponding elements of + `y` (but see the Notes for the special handling of a scalar). An exception + is raised at shape mismatch or values that are not correctly ordered. In + contrast to the standard usage in NumPy, no assertion is raised if both + objects have NaNs in the same positions. + + Parameters + ---------- + x : array_like + The smaller object to check. + y : array_like + The larger object to compare. + err_msg : string + The error message to be printed in case of failure. + verbose : bool + If True, the conflicting values are appended to the error message. + strict : bool, optional + If True, raise an AssertionError when either the shape or the data + type of the array_like objects does not match. The special + handling for scalars mentioned in the Notes section is disabled. + + .. versionadded:: 2.0.0 + + Raises + ------ + AssertionError + If x is not strictly smaller than y, element-wise. + + See Also + -------- + assert_array_equal: tests objects for equality + assert_array_almost_equal: test objects for equality up to precision + + Notes + ----- + When one of `x` and `y` is a scalar and the other is array_like, the + function performs the comparison as though the scalar were broadcasted + to the shape of the array. This behaviour can be disabled with the `strict` + parameter. + + Examples + -------- + The following assertion passes because each finite element of `x` is + strictly less than the corresponding element of `y`, and the NaNs are in + corresponding locations. + + >>> x = [1.0, 1.0, np.nan] + >>> y = [1.1, 2.0, np.nan] + >>> np.testing.assert_array_less(x, y) + + The following assertion fails because the zeroth element of `x` is no + longer strictly less than the zeroth element of `y`. + + >>> y[0] = 1 + >>> np.testing.assert_array_less(x, y) + Traceback (most recent call last): + ... + AssertionError: + Arrays are not strictly ordered `x < y` + + Mismatched elements: 1 / 3 (33.3%) + Max absolute difference among violations: 0. + Max relative difference among violations: 0. + x: array([ 1., 1., nan]) + y: array([ 1., 2., nan]) + + Here, `y` is a scalar, so each element of `x` is compared to `y`, and + the assertion passes. + + >>> x = [1.0, 4.0] + >>> y = 5.0 + >>> np.testing.assert_array_less(x, y) + + However, with ``strict=True``, the assertion will fail because the shapes + do not match. + + >>> np.testing.assert_array_less(x, y, strict=True) + Traceback (most recent call last): + ... + AssertionError: + Arrays are not strictly ordered `x < y` + + (shapes (2,), () mismatch) + x: array([1., 4.]) + y: array(5.) + + With ``strict=True``, the assertion also fails if the dtypes of the two + arrays do not match. + + >>> y = [5, 5] + >>> np.testing.assert_array_less(x, y, strict=True) + Traceback (most recent call last): + ... + AssertionError: + Arrays are not strictly ordered `x < y` + + (dtypes float64, int64 mismatch) + x: array([1., 4.]) + y: array([5, 5]) + """ + __tracebackhide__ = True # Hide traceback for py.test + assert_array_compare(operator.__lt__, x, y, err_msg=err_msg, + verbose=verbose, + header='Arrays are not strictly ordered `x < y`', + equal_inf=False, + strict=strict, + names=('x', 'y')) + + +def runstring(astr, dict): + exec(astr, dict) + + +def assert_string_equal(actual, desired): + """ + Test if two strings are equal. + + If the given strings are equal, `assert_string_equal` does nothing. + If they are not equal, an AssertionError is raised, and the diff + between the strings is shown. + + Parameters + ---------- + actual : str + The string to test for equality against the expected string. + desired : str + The expected string. + + Examples + -------- + >>> np.testing.assert_string_equal('abc', 'abc') + >>> np.testing.assert_string_equal('abc', 'abcd') + Traceback (most recent call last): + File "", line 1, in + ... + AssertionError: Differences in strings: + - abc+ abcd? + + + """ + # delay import of difflib to reduce startup time + __tracebackhide__ = True # Hide traceback for py.test + import difflib + + if not isinstance(actual, str): + raise AssertionError(repr(type(actual))) + if not isinstance(desired, str): + raise AssertionError(repr(type(desired))) + if desired == actual: + return + + diff = list(difflib.Differ().compare(actual.splitlines(True), + desired.splitlines(True))) + diff_list = [] + while diff: + d1 = diff.pop(0) + if d1.startswith(' '): + continue + if d1.startswith('- '): + l = [d1] + d2 = diff.pop(0) + if d2.startswith('? '): + l.append(d2) + d2 = diff.pop(0) + if not d2.startswith('+ '): + raise AssertionError(repr(d2)) + l.append(d2) + if diff: + d3 = diff.pop(0) + if d3.startswith('? '): + l.append(d3) + else: + diff.insert(0, d3) + if d2[2:] == d1[2:]: + continue + diff_list.extend(l) + continue + raise AssertionError(repr(d1)) + if not diff_list: + return + msg = f"Differences in strings:\n{''.join(diff_list).rstrip()}" + if actual != desired: + raise AssertionError(msg) + + +def rundocs(filename=None, raise_on_error=True): + """ + Run doctests found in the given file. + + By default `rundocs` raises an AssertionError on failure. + + Parameters + ---------- + filename : str + The path to the file for which the doctests are run. + raise_on_error : bool + Whether to raise an AssertionError when a doctest fails. Default is + True. + + Notes + ----- + The doctests can be run by the user/developer by adding the ``doctests`` + argument to the ``test()`` call. For example, to run all tests (including + doctests) for ``numpy.lib``: + + >>> np.lib.test(doctests=True) # doctest: +SKIP + """ + import doctest + + from numpy.distutils.misc_util import exec_mod_from_location + if filename is None: + f = sys._getframe(1) + filename = f.f_globals['__file__'] + name = os.path.splitext(os.path.basename(filename))[0] + m = exec_mod_from_location(name, filename) + + tests = doctest.DocTestFinder().find(m) + runner = doctest.DocTestRunner(verbose=False) + + msg = [] + if raise_on_error: + out = msg.append + else: + out = None + + for test in tests: + runner.run(test, out=out) + + if runner.failures > 0 and raise_on_error: + raise AssertionError("Some doctests failed:\n%s" % "\n".join(msg)) + + +def check_support_sve(__cache=[]): + """ + gh-22982 + """ + + if __cache: + return __cache[0] + + import subprocess + cmd = 'lscpu' + try: + output = subprocess.run(cmd, capture_output=True, text=True) + result = 'sve' in output.stdout + except (OSError, subprocess.SubprocessError): + result = False + __cache.append(result) + return __cache[0] + + +# +# assert_raises and assert_raises_regex are taken from unittest. +# +import unittest + + +class _Dummy(unittest.TestCase): + def nop(self): + pass + + +_d = _Dummy('nop') + + +def assert_raises(*args, **kwargs): + """ + assert_raises(exception_class, callable, *args, **kwargs) + assert_raises(exception_class) + + Fail unless an exception of class exception_class is thrown + by callable when invoked with arguments args and keyword + arguments kwargs. If a different type of exception is + thrown, it will not be caught, and the test case will be + deemed to have suffered an error, exactly as for an + unexpected exception. + + Alternatively, `assert_raises` can be used as a context manager: + + >>> from numpy.testing import assert_raises + >>> with assert_raises(ZeroDivisionError): + ... 1 / 0 + + is equivalent to + + >>> def div(x, y): + ... return x / y + >>> assert_raises(ZeroDivisionError, div, 1, 0) + + """ + __tracebackhide__ = True # Hide traceback for py.test + return _d.assertRaises(*args, **kwargs) + + +def assert_raises_regex(exception_class, expected_regexp, *args, **kwargs): + """ + assert_raises_regex(exception_class, expected_regexp, callable, *args, + **kwargs) + assert_raises_regex(exception_class, expected_regexp) + + Fail unless an exception of class exception_class and with message that + matches expected_regexp is thrown by callable when invoked with arguments + args and keyword arguments kwargs. + + Alternatively, can be used as a context manager like `assert_raises`. + """ + __tracebackhide__ = True # Hide traceback for py.test + return _d.assertRaisesRegex(exception_class, expected_regexp, *args, **kwargs) + + +def decorate_methods(cls, decorator, testmatch=None): + """ + Apply a decorator to all methods in a class matching a regular expression. + + The given decorator is applied to all public methods of `cls` that are + matched by the regular expression `testmatch` + (``testmatch.search(methodname)``). Methods that are private, i.e. start + with an underscore, are ignored. + + Parameters + ---------- + cls : class + Class whose methods to decorate. + decorator : function + Decorator to apply to methods + testmatch : compiled regexp or str, optional + The regular expression. Default value is None, in which case the + nose default (``re.compile(r'(?:^|[\\b_\\.%s-])[Tt]est' % os.sep)``) + is used. + If `testmatch` is a string, it is compiled to a regular expression + first. + + """ + if testmatch is None: + testmatch = re.compile(r'(?:^|[\\b_\\.%s-])[Tt]est' % os.sep) + else: + testmatch = re.compile(testmatch) + cls_attr = cls.__dict__ + + # delayed import to reduce startup time + from inspect import isfunction + + methods = [_m for _m in cls_attr.values() if isfunction(_m)] + for function in methods: + try: + if hasattr(function, 'compat_func_name'): + funcname = function.compat_func_name + else: + funcname = function.__name__ + except AttributeError: + # not a function + continue + if testmatch.search(funcname) and not funcname.startswith('_'): + setattr(cls, funcname, decorator(function)) + + +def measure(code_str, times=1, label=None): + """ + Return elapsed time for executing code in the namespace of the caller. + + The supplied code string is compiled with the Python builtin ``compile``. + The precision of the timing is 10 milli-seconds. If the code will execute + fast on this timescale, it can be executed many times to get reasonable + timing accuracy. + + Parameters + ---------- + code_str : str + The code to be timed. + times : int, optional + The number of times the code is executed. Default is 1. The code is + only compiled once. + label : str, optional + A label to identify `code_str` with. This is passed into ``compile`` + as the second argument (for run-time error messages). + + Returns + ------- + elapsed : float + Total elapsed time in seconds for executing `code_str` `times` times. + + Examples + -------- + >>> times = 10 + >>> etime = np.testing.measure('for i in range(1000): np.sqrt(i**2)', times=times) + >>> print("Time for a single execution : ", etime / times, "s") # doctest: +SKIP + Time for a single execution : 0.005 s + + """ + frame = sys._getframe(1) + locs, globs = frame.f_locals, frame.f_globals + + code = compile(code_str, f'Test name: {label} ', 'exec') + i = 0 + elapsed = jiffies() + while i < times: + i += 1 + exec(code, globs, locs) + elapsed = jiffies() - elapsed + return 0.01 * elapsed + + +def _assert_valid_refcount(op): + """ + Check that ufuncs don't mishandle refcount of object `1`. + Used in a few regression tests. + """ + if not HAS_REFCOUNT: + return True + + import gc + + import numpy as np + + b = np.arange(100 * 100).reshape(100, 100) + c = b + i = 1 + + gc.disable() + try: + rc = sys.getrefcount(i) + for j in range(15): + d = op(b, c) + assert_(sys.getrefcount(i) >= rc) + finally: + gc.enable() + + +def assert_allclose(actual, desired, rtol=1e-7, atol=0, equal_nan=True, + err_msg='', verbose=True, *, strict=False): + """ + Raises an AssertionError if two objects are not equal up to desired + tolerance. + + Given two array_like objects, check that their shapes and all elements + are equal (but see the Notes for the special handling of a scalar). An + exception is raised if the shapes mismatch or any values conflict. In + contrast to the standard usage in numpy, NaNs are compared like numbers, + no assertion is raised if both objects have NaNs in the same positions. + + The test is equivalent to ``allclose(actual, desired, rtol, atol)`` (note + that ``allclose`` has different default values). It compares the difference + between `actual` and `desired` to ``atol + rtol * abs(desired)``. + + Parameters + ---------- + actual : array_like + Array obtained. + desired : array_like + Array desired. + rtol : float, optional + Relative tolerance. + atol : float, optional + Absolute tolerance. + equal_nan : bool, optional. + If True, NaNs will compare equal. + err_msg : str, optional + The error message to be printed in case of failure. + verbose : bool, optional + If True, the conflicting values are appended to the error message. + strict : bool, optional + If True, raise an ``AssertionError`` when either the shape or the data + type of the arguments does not match. The special handling of scalars + mentioned in the Notes section is disabled. + + .. versionadded:: 2.0.0 + + Raises + ------ + AssertionError + If actual and desired are not equal up to specified precision. + + See Also + -------- + assert_array_almost_equal_nulp, assert_array_max_ulp + + Notes + ----- + When one of `actual` and `desired` is a scalar and the other is + array_like, the function performs the comparison as if the scalar were + broadcasted to the shape of the array. + This behaviour can be disabled with the `strict` parameter. + + Examples + -------- + >>> x = [1e-5, 1e-3, 1e-1] + >>> y = np.arccos(np.cos(x)) + >>> np.testing.assert_allclose(x, y, rtol=1e-5, atol=0) + + As mentioned in the Notes section, `assert_allclose` has special + handling for scalars. Here, the test checks that the value of `numpy.sin` + is nearly zero at integer multiples of π. + + >>> x = np.arange(3) * np.pi + >>> np.testing.assert_allclose(np.sin(x), 0, atol=1e-15) + + Use `strict` to raise an ``AssertionError`` when comparing an array + with one or more dimensions against a scalar. + + >>> np.testing.assert_allclose(np.sin(x), 0, atol=1e-15, strict=True) + Traceback (most recent call last): + ... + AssertionError: + Not equal to tolerance rtol=1e-07, atol=1e-15 + + (shapes (3,), () mismatch) + ACTUAL: array([ 0.000000e+00, 1.224647e-16, -2.449294e-16]) + DESIRED: array(0) + + The `strict` parameter also ensures that the array data types match: + + >>> y = np.zeros(3, dtype=np.float32) + >>> np.testing.assert_allclose(np.sin(x), y, atol=1e-15, strict=True) + Traceback (most recent call last): + ... + AssertionError: + Not equal to tolerance rtol=1e-07, atol=1e-15 + + (dtypes float64, float32 mismatch) + ACTUAL: array([ 0.000000e+00, 1.224647e-16, -2.449294e-16]) + DESIRED: array([0., 0., 0.], dtype=float32) + + """ + __tracebackhide__ = True # Hide traceback for py.test + import numpy as np + + def compare(x, y): + return np._core.numeric.isclose(x, y, rtol=rtol, atol=atol, + equal_nan=equal_nan) + + actual, desired = np.asanyarray(actual), np.asanyarray(desired) + header = f'Not equal to tolerance rtol={rtol:g}, atol={atol:g}' + assert_array_compare(compare, actual, desired, err_msg=str(err_msg), + verbose=verbose, header=header, equal_nan=equal_nan, + strict=strict) + + +def assert_array_almost_equal_nulp(x, y, nulp=1): + """ + Compare two arrays relatively to their spacing. + + This is a relatively robust method to compare two arrays whose amplitude + is variable. + + Parameters + ---------- + x, y : array_like + Input arrays. + nulp : int, optional + The maximum number of unit in the last place for tolerance (see Notes). + Default is 1. + + Returns + ------- + None + + Raises + ------ + AssertionError + If the spacing between `x` and `y` for one or more elements is larger + than `nulp`. + + See Also + -------- + assert_array_max_ulp : Check that all items of arrays differ in at most + N Units in the Last Place. + spacing : Return the distance between x and the nearest adjacent number. + + Notes + ----- + An assertion is raised if the following condition is not met:: + + abs(x - y) <= nulp * spacing(maximum(abs(x), abs(y))) + + Examples + -------- + >>> x = np.array([1., 1e-10, 1e-20]) + >>> eps = np.finfo(x.dtype).eps + >>> np.testing.assert_array_almost_equal_nulp(x, x*eps/2 + x) + + >>> np.testing.assert_array_almost_equal_nulp(x, x*eps + x) + Traceback (most recent call last): + ... + AssertionError: Arrays are not equal to 1 ULP (max is 2) + + """ + __tracebackhide__ = True # Hide traceback for py.test + import numpy as np + ax = np.abs(x) + ay = np.abs(y) + ref = nulp * np.spacing(np.where(ax > ay, ax, ay)) + if not np.all(np.abs(x - y) <= ref): + if np.iscomplexobj(x) or np.iscomplexobj(y): + msg = f"Arrays are not equal to {nulp} ULP" + else: + max_nulp = np.max(nulp_diff(x, y)) + msg = f"Arrays are not equal to {nulp} ULP (max is {max_nulp:g})" + raise AssertionError(msg) + + +def assert_array_max_ulp(a, b, maxulp=1, dtype=None): + """ + Check that all items of arrays differ in at most N Units in the Last Place. + + Parameters + ---------- + a, b : array_like + Input arrays to be compared. + maxulp : int, optional + The maximum number of units in the last place that elements of `a` and + `b` can differ. Default is 1. + dtype : dtype, optional + Data-type to convert `a` and `b` to if given. Default is None. + + Returns + ------- + ret : ndarray + Array containing number of representable floating point numbers between + items in `a` and `b`. + + Raises + ------ + AssertionError + If one or more elements differ by more than `maxulp`. + + Notes + ----- + For computing the ULP difference, this API does not differentiate between + various representations of NAN (ULP difference between 0x7fc00000 and 0xffc00000 + is zero). + + See Also + -------- + assert_array_almost_equal_nulp : Compare two arrays relatively to their + spacing. + + Examples + -------- + >>> a = np.linspace(0., 1., 100) + >>> res = np.testing.assert_array_max_ulp(a, np.arcsin(np.sin(a))) + + """ + __tracebackhide__ = True # Hide traceback for py.test + import numpy as np + ret = nulp_diff(a, b, dtype) + if not np.all(ret <= maxulp): + raise AssertionError("Arrays are not almost equal up to %g " + "ULP (max difference is %g ULP)" % + (maxulp, np.max(ret))) + return ret + + +def nulp_diff(x, y, dtype=None): + """For each item in x and y, return the number of representable floating + points between them. + + Parameters + ---------- + x : array_like + first input array + y : array_like + second input array + dtype : dtype, optional + Data-type to convert `x` and `y` to if given. Default is None. + + Returns + ------- + nulp : array_like + number of representable floating point numbers between each item in x + and y. + + Notes + ----- + For computing the ULP difference, this API does not differentiate between + various representations of NAN (ULP difference between 0x7fc00000 and 0xffc00000 + is zero). + + Examples + -------- + # By definition, epsilon is the smallest number such as 1 + eps != 1, so + # there should be exactly one ULP between 1 and 1 + eps + >>> nulp_diff(1, 1 + np.finfo(x.dtype).eps) + 1.0 + """ + import numpy as np + if dtype: + x = np.asarray(x, dtype=dtype) + y = np.asarray(y, dtype=dtype) + else: + x = np.asarray(x) + y = np.asarray(y) + + t = np.common_type(x, y) + if np.iscomplexobj(x) or np.iscomplexobj(y): + raise NotImplementedError("_nulp not implemented for complex array") + + x = np.array([x], dtype=t) + y = np.array([y], dtype=t) + + x[np.isnan(x)] = np.nan + y[np.isnan(y)] = np.nan + + if not x.shape == y.shape: + raise ValueError(f"Arrays do not have the same shape: {x.shape} - {y.shape}") + + def _diff(rx, ry, vdt): + diff = np.asarray(rx - ry, dtype=vdt) + return np.abs(diff) + + rx = integer_repr(x) + ry = integer_repr(y) + return _diff(rx, ry, t) + + +def _integer_repr(x, vdt, comp): + # Reinterpret binary representation of the float as sign-magnitude: + # take into account two-complement representation + # See also + # https://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/ + rx = x.view(vdt) + if not (rx.size == 1): + rx[rx < 0] = comp - rx[rx < 0] + elif rx < 0: + rx = comp - rx + + return rx + + +def integer_repr(x): + """Return the signed-magnitude interpretation of the binary representation + of x.""" + import numpy as np + if x.dtype == np.float16: + return _integer_repr(x, np.int16, np.int16(-2**15)) + elif x.dtype == np.float32: + return _integer_repr(x, np.int32, np.int32(-2**31)) + elif x.dtype == np.float64: + return _integer_repr(x, np.int64, np.int64(-2**63)) + else: + raise ValueError(f'Unsupported dtype {x.dtype}') + + +@contextlib.contextmanager +def _assert_warns_context(warning_class, name=None): + __tracebackhide__ = True # Hide traceback for py.test + with suppress_warnings() as sup: + l = sup.record(warning_class) + yield + if not len(l) > 0: + name_str = f' when calling {name}' if name is not None else '' + raise AssertionError("No warning raised" + name_str) + + +def assert_warns(warning_class, *args, **kwargs): + """ + Fail unless the given callable throws the specified warning. + + A warning of class warning_class should be thrown by the callable when + invoked with arguments args and keyword arguments kwargs. + If a different type of warning is thrown, it will not be caught. + + If called with all arguments other than the warning class omitted, may be + used as a context manager:: + + with assert_warns(SomeWarning): + do_something() + + The ability to be used as a context manager is new in NumPy v1.11.0. + + Parameters + ---------- + warning_class : class + The class defining the warning that `func` is expected to throw. + func : callable, optional + Callable to test + *args : Arguments + Arguments for `func`. + **kwargs : Kwargs + Keyword arguments for `func`. + + Returns + ------- + The value returned by `func`. + + Examples + -------- + >>> import warnings + >>> def deprecated_func(num): + ... warnings.warn("Please upgrade", DeprecationWarning) + ... return num*num + >>> with np.testing.assert_warns(DeprecationWarning): + ... assert deprecated_func(4) == 16 + >>> # or passing a func + >>> ret = np.testing.assert_warns(DeprecationWarning, deprecated_func, 4) + >>> assert ret == 16 + """ + if not args and not kwargs: + return _assert_warns_context(warning_class) + elif len(args) < 1: + if "match" in kwargs: + raise RuntimeError( + "assert_warns does not use 'match' kwarg, " + "use pytest.warns instead" + ) + raise RuntimeError("assert_warns(...) needs at least one arg") + + func = args[0] + args = args[1:] + with _assert_warns_context(warning_class, name=func.__name__): + return func(*args, **kwargs) + + +@contextlib.contextmanager +def _assert_no_warnings_context(name=None): + __tracebackhide__ = True # Hide traceback for py.test + with warnings.catch_warnings(record=True) as l: + warnings.simplefilter('always') + yield + if len(l) > 0: + name_str = f' when calling {name}' if name is not None else '' + raise AssertionError(f'Got warnings{name_str}: {l}') + + +def assert_no_warnings(*args, **kwargs): + """ + Fail if the given callable produces any warnings. + + If called with all arguments omitted, may be used as a context manager:: + + with assert_no_warnings(): + do_something() + + The ability to be used as a context manager is new in NumPy v1.11.0. + + Parameters + ---------- + func : callable + The callable to test. + \\*args : Arguments + Arguments passed to `func`. + \\*\\*kwargs : Kwargs + Keyword arguments passed to `func`. + + Returns + ------- + The value returned by `func`. + + """ + if not args: + return _assert_no_warnings_context() + + func = args[0] + args = args[1:] + with _assert_no_warnings_context(name=func.__name__): + return func(*args, **kwargs) + + +def _gen_alignment_data(dtype=float32, type='binary', max_size=24): + """ + generator producing data with different alignment and offsets + to test simd vectorization + + Parameters + ---------- + dtype : dtype + data type to produce + type : string + 'unary': create data for unary operations, creates one input + and output array + 'binary': create data for unary operations, creates two input + and output array + max_size : integer + maximum size of data to produce + + Returns + ------- + if type is 'unary' yields one output, one input array and a message + containing information on the data + if type is 'binary' yields one output array, two input array and a message + containing information on the data + + """ + ufmt = 'unary offset=(%d, %d), size=%d, dtype=%r, %s' + bfmt = 'binary offset=(%d, %d, %d), size=%d, dtype=%r, %s' + for o in range(3): + for s in range(o + 2, max(o + 3, max_size)): + if type == 'unary': + inp = lambda: arange(s, dtype=dtype)[o:] + out = empty((s,), dtype=dtype)[o:] + yield out, inp(), ufmt % (o, o, s, dtype, 'out of place') + d = inp() + yield d, d, ufmt % (o, o, s, dtype, 'in place') + yield out[1:], inp()[:-1], ufmt % \ + (o + 1, o, s - 1, dtype, 'out of place') + yield out[:-1], inp()[1:], ufmt % \ + (o, o + 1, s - 1, dtype, 'out of place') + yield inp()[:-1], inp()[1:], ufmt % \ + (o, o + 1, s - 1, dtype, 'aliased') + yield inp()[1:], inp()[:-1], ufmt % \ + (o + 1, o, s - 1, dtype, 'aliased') + if type == 'binary': + inp1 = lambda: arange(s, dtype=dtype)[o:] + inp2 = lambda: arange(s, dtype=dtype)[o:] + out = empty((s,), dtype=dtype)[o:] + yield out, inp1(), inp2(), bfmt % \ + (o, o, o, s, dtype, 'out of place') + d = inp1() + yield d, d, inp2(), bfmt % \ + (o, o, o, s, dtype, 'in place1') + d = inp2() + yield d, inp1(), d, bfmt % \ + (o, o, o, s, dtype, 'in place2') + yield out[1:], inp1()[:-1], inp2()[:-1], bfmt % \ + (o + 1, o, o, s - 1, dtype, 'out of place') + yield out[:-1], inp1()[1:], inp2()[:-1], bfmt % \ + (o, o + 1, o, s - 1, dtype, 'out of place') + yield out[:-1], inp1()[:-1], inp2()[1:], bfmt % \ + (o, o, o + 1, s - 1, dtype, 'out of place') + yield inp1()[1:], inp1()[:-1], inp2()[:-1], bfmt % \ + (o + 1, o, o, s - 1, dtype, 'aliased') + yield inp1()[:-1], inp1()[1:], inp2()[:-1], bfmt % \ + (o, o + 1, o, s - 1, dtype, 'aliased') + yield inp1()[:-1], inp1()[:-1], inp2()[1:], bfmt % \ + (o, o, o + 1, s - 1, dtype, 'aliased') + + +class IgnoreException(Exception): + "Ignoring this exception due to disabled feature" + pass + + +@contextlib.contextmanager +def tempdir(*args, **kwargs): + """Context manager to provide a temporary test folder. + + All arguments are passed as this to the underlying tempfile.mkdtemp + function. + + """ + tmpdir = mkdtemp(*args, **kwargs) + try: + yield tmpdir + finally: + shutil.rmtree(tmpdir) + + +@contextlib.contextmanager +def temppath(*args, **kwargs): + """Context manager for temporary files. + + Context manager that returns the path to a closed temporary file. Its + parameters are the same as for tempfile.mkstemp and are passed directly + to that function. The underlying file is removed when the context is + exited, so it should be closed at that time. + + Windows does not allow a temporary file to be opened if it is already + open, so the underlying file must be closed after opening before it + can be opened again. + + """ + fd, path = mkstemp(*args, **kwargs) + os.close(fd) + try: + yield path + finally: + os.remove(path) + + +class clear_and_catch_warnings(warnings.catch_warnings): + """ Context manager that resets warning registry for catching warnings + + Warnings can be slippery, because, whenever a warning is triggered, Python + adds a ``__warningregistry__`` member to the *calling* module. This makes + it impossible to retrigger the warning in this module, whatever you put in + the warnings filters. This context manager accepts a sequence of `modules` + as a keyword argument to its constructor and: + + * stores and removes any ``__warningregistry__`` entries in given `modules` + on entry; + * resets ``__warningregistry__`` to its previous state on exit. + + This makes it possible to trigger any warning afresh inside the context + manager without disturbing the state of warnings outside. + + For compatibility with Python, please consider all arguments to be + keyword-only. + + Parameters + ---------- + record : bool, optional + Specifies whether warnings should be captured by a custom + implementation of ``warnings.showwarning()`` and be appended to a list + returned by the context manager. Otherwise None is returned by the + context manager. The objects appended to the list are arguments whose + attributes mirror the arguments to ``showwarning()``. + modules : sequence, optional + Sequence of modules for which to reset warnings registry on entry and + restore on exit. To work correctly, all 'ignore' filters should + filter by one of these modules. + + Examples + -------- + >>> import warnings + >>> with np.testing.clear_and_catch_warnings( + ... modules=[np._core.fromnumeric]): + ... warnings.simplefilter('always') + ... warnings.filterwarnings('ignore', module='np._core.fromnumeric') + ... # do something that raises a warning but ignore those in + ... # np._core.fromnumeric + """ + class_modules = () + + def __init__(self, record=False, modules=()): + self.modules = set(modules).union(self.class_modules) + self._warnreg_copies = {} + super().__init__(record=record) + + def __enter__(self): + for mod in self.modules: + if hasattr(mod, '__warningregistry__'): + mod_reg = mod.__warningregistry__ + self._warnreg_copies[mod] = mod_reg.copy() + mod_reg.clear() + return super().__enter__() + + def __exit__(self, *exc_info): + super().__exit__(*exc_info) + for mod in self.modules: + if hasattr(mod, '__warningregistry__'): + mod.__warningregistry__.clear() + if mod in self._warnreg_copies: + mod.__warningregistry__.update(self._warnreg_copies[mod]) + + +class suppress_warnings: + """ + Context manager and decorator doing much the same as + ``warnings.catch_warnings``. + + However, it also provides a filter mechanism to work around + https://bugs.python.org/issue4180. + + This bug causes Python before 3.4 to not reliably show warnings again + after they have been ignored once (even within catch_warnings). It + means that no "ignore" filter can be used easily, since following + tests might need to see the warning. Additionally it allows easier + specificity for testing warnings and can be nested. + + Parameters + ---------- + forwarding_rule : str, optional + One of "always", "once", "module", or "location". Analogous to + the usual warnings module filter mode, it is useful to reduce + noise mostly on the outmost level. Unsuppressed and unrecorded + warnings will be forwarded based on this rule. Defaults to "always". + "location" is equivalent to the warnings "default", match by exact + location the warning warning originated from. + + Notes + ----- + Filters added inside the context manager will be discarded again + when leaving it. Upon entering all filters defined outside a + context will be applied automatically. + + When a recording filter is added, matching warnings are stored in the + ``log`` attribute as well as in the list returned by ``record``. + + If filters are added and the ``module`` keyword is given, the + warning registry of this module will additionally be cleared when + applying it, entering the context, or exiting it. This could cause + warnings to appear a second time after leaving the context if they + were configured to be printed once (default) and were already + printed before the context was entered. + + Nesting this context manager will work as expected when the + forwarding rule is "always" (default). Unfiltered and unrecorded + warnings will be passed out and be matched by the outer level. + On the outmost level they will be printed (or caught by another + warnings context). The forwarding rule argument can modify this + behaviour. + + Like ``catch_warnings`` this context manager is not threadsafe. + + Examples + -------- + + With a context manager:: + + with np.testing.suppress_warnings() as sup: + sup.filter(DeprecationWarning, "Some text") + sup.filter(module=np.ma.core) + log = sup.record(FutureWarning, "Does this occur?") + command_giving_warnings() + # The FutureWarning was given once, the filtered warnings were + # ignored. All other warnings abide outside settings (may be + # printed/error) + assert_(len(log) == 1) + assert_(len(sup.log) == 1) # also stored in log attribute + + Or as a decorator:: + + sup = np.testing.suppress_warnings() + sup.filter(module=np.ma.core) # module must match exactly + @sup + def some_function(): + # do something which causes a warning in np.ma.core + pass + """ + def __init__(self, forwarding_rule="always"): + self._entered = False + + # Suppressions are either instance or defined inside one with block: + self._suppressions = [] + + if forwarding_rule not in {"always", "module", "once", "location"}: + raise ValueError("unsupported forwarding rule.") + self._forwarding_rule = forwarding_rule + + def _clear_registries(self): + if hasattr(warnings, "_filters_mutated"): + # clearing the registry should not be necessary on new pythons, + # instead the filters should be mutated. + warnings._filters_mutated() + return + # Simply clear the registry, this should normally be harmless, + # note that on new pythons it would be invalidated anyway. + for module in self._tmp_modules: + if hasattr(module, "__warningregistry__"): + module.__warningregistry__.clear() + + def _filter(self, category=Warning, message="", module=None, record=False): + if record: + record = [] # The log where to store warnings + else: + record = None + if self._entered: + if module is None: + warnings.filterwarnings( + "always", category=category, message=message) + else: + module_regex = module.__name__.replace('.', r'\.') + '$' + warnings.filterwarnings( + "always", category=category, message=message, + module=module_regex) + self._tmp_modules.add(module) + self._clear_registries() + + self._tmp_suppressions.append( + (category, message, re.compile(message, re.I), module, record)) + else: + self._suppressions.append( + (category, message, re.compile(message, re.I), module, record)) + + return record + + def filter(self, category=Warning, message="", module=None): + """ + Add a new suppressing filter or apply it if the state is entered. + + Parameters + ---------- + category : class, optional + Warning class to filter + message : string, optional + Regular expression matching the warning message. + module : module, optional + Module to filter for. Note that the module (and its file) + must match exactly and cannot be a submodule. This may make + it unreliable for external modules. + + Notes + ----- + When added within a context, filters are only added inside + the context and will be forgotten when the context is exited. + """ + self._filter(category=category, message=message, module=module, + record=False) + + def record(self, category=Warning, message="", module=None): + """ + Append a new recording filter or apply it if the state is entered. + + All warnings matching will be appended to the ``log`` attribute. + + Parameters + ---------- + category : class, optional + Warning class to filter + message : string, optional + Regular expression matching the warning message. + module : module, optional + Module to filter for. Note that the module (and its file) + must match exactly and cannot be a submodule. This may make + it unreliable for external modules. + + Returns + ------- + log : list + A list which will be filled with all matched warnings. + + Notes + ----- + When added within a context, filters are only added inside + the context and will be forgotten when the context is exited. + """ + return self._filter(category=category, message=message, module=module, + record=True) + + def __enter__(self): + if self._entered: + raise RuntimeError("cannot enter suppress_warnings twice.") + + self._orig_show = warnings.showwarning + self._filters = warnings.filters + warnings.filters = self._filters[:] + + self._entered = True + self._tmp_suppressions = [] + self._tmp_modules = set() + self._forwarded = set() + + self.log = [] # reset global log (no need to keep same list) + + for cat, mess, _, mod, log in self._suppressions: + if log is not None: + del log[:] # clear the log + if mod is None: + warnings.filterwarnings( + "always", category=cat, message=mess) + else: + module_regex = mod.__name__.replace('.', r'\.') + '$' + warnings.filterwarnings( + "always", category=cat, message=mess, + module=module_regex) + self._tmp_modules.add(mod) + warnings.showwarning = self._showwarning + self._clear_registries() + + return self + + def __exit__(self, *exc_info): + warnings.showwarning = self._orig_show + warnings.filters = self._filters + self._clear_registries() + self._entered = False + del self._orig_show + del self._filters + + def _showwarning(self, message, category, filename, lineno, + *args, use_warnmsg=None, **kwargs): + for cat, _, pattern, mod, rec in ( + self._suppressions + self._tmp_suppressions)[::-1]: + if (issubclass(category, cat) and + pattern.match(message.args[0]) is not None): + if mod is None: + # Message and category match, either recorded or ignored + if rec is not None: + msg = WarningMessage(message, category, filename, + lineno, **kwargs) + self.log.append(msg) + rec.append(msg) + return + # Use startswith, because warnings strips the c or o from + # .pyc/.pyo files. + elif mod.__file__.startswith(filename): + # The message and module (filename) match + if rec is not None: + msg = WarningMessage(message, category, filename, + lineno, **kwargs) + self.log.append(msg) + rec.append(msg) + return + + # There is no filter in place, so pass to the outside handler + # unless we should only pass it once + if self._forwarding_rule == "always": + if use_warnmsg is None: + self._orig_show(message, category, filename, lineno, + *args, **kwargs) + else: + self._orig_showmsg(use_warnmsg) + return + + if self._forwarding_rule == "once": + signature = (message.args, category) + elif self._forwarding_rule == "module": + signature = (message.args, category, filename) + elif self._forwarding_rule == "location": + signature = (message.args, category, filename, lineno) + + if signature in self._forwarded: + return + self._forwarded.add(signature) + if use_warnmsg is None: + self._orig_show(message, category, filename, lineno, *args, + **kwargs) + else: + self._orig_showmsg(use_warnmsg) + + def __call__(self, func): + """ + Function decorator to apply certain suppressions to a whole + function. + """ + @wraps(func) + def new_func(*args, **kwargs): + with self: + return func(*args, **kwargs) + + return new_func + + +@contextlib.contextmanager +def _assert_no_gc_cycles_context(name=None): + __tracebackhide__ = True # Hide traceback for py.test + + # not meaningful to test if there is no refcounting + if not HAS_REFCOUNT: + yield + return + + assert_(gc.isenabled()) + gc.disable() + gc_debug = gc.get_debug() + try: + for i in range(100): + if gc.collect() == 0: + break + else: + raise RuntimeError( + "Unable to fully collect garbage - perhaps a __del__ method " + "is creating more reference cycles?") + + gc.set_debug(gc.DEBUG_SAVEALL) + yield + # gc.collect returns the number of unreachable objects in cycles that + # were found -- we are checking that no cycles were created in the context + n_objects_in_cycles = gc.collect() + objects_in_cycles = gc.garbage[:] + finally: + del gc.garbage[:] + gc.set_debug(gc_debug) + gc.enable() + + if n_objects_in_cycles: + name_str = f' when calling {name}' if name is not None else '' + raise AssertionError( + "Reference cycles were found{}: {} objects were collected, " + "of which {} are shown below:{}" + .format( + name_str, + n_objects_in_cycles, + len(objects_in_cycles), + ''.join( + "\n {} object with id={}:\n {}".format( + type(o).__name__, + id(o), + pprint.pformat(o).replace('\n', '\n ') + ) for o in objects_in_cycles + ) + ) + ) + + +def assert_no_gc_cycles(*args, **kwargs): + """ + Fail if the given callable produces any reference cycles. + + If called with all arguments omitted, may be used as a context manager:: + + with assert_no_gc_cycles(): + do_something() + + Parameters + ---------- + func : callable + The callable to test. + \\*args : Arguments + Arguments passed to `func`. + \\*\\*kwargs : Kwargs + Keyword arguments passed to `func`. + + Returns + ------- + Nothing. The result is deliberately discarded to ensure that all cycles + are found. + + """ + if not args: + return _assert_no_gc_cycles_context() + + func = args[0] + args = args[1:] + with _assert_no_gc_cycles_context(name=func.__name__): + func(*args, **kwargs) + + +def break_cycles(): + """ + Break reference cycles by calling gc.collect + Objects can call other objects' methods (for instance, another object's + __del__) inside their own __del__. On PyPy, the interpreter only runs + between calls to gc.collect, so multiple calls are needed to completely + release all cycles. + """ + + gc.collect() + if IS_PYPY: + # a few more, just to make sure all the finalizers are called + gc.collect() + gc.collect() + gc.collect() + gc.collect() + + +def requires_memory(free_bytes): + """Decorator to skip a test if not enough memory is available""" + import pytest + + def decorator(func): + @wraps(func) + def wrapper(*a, **kw): + msg = check_free_memory(free_bytes) + if msg is not None: + pytest.skip(msg) + + try: + return func(*a, **kw) + except MemoryError: + # Probably ran out of memory regardless: don't regard as failure + pytest.xfail("MemoryError raised") + + return wrapper + + return decorator + + +def check_free_memory(free_bytes): + """ + Check whether `free_bytes` amount of memory is currently free. + Returns: None if enough memory available, otherwise error message + """ + env_var = 'NPY_AVAILABLE_MEM' + env_value = os.environ.get(env_var) + if env_value is not None: + try: + mem_free = _parse_size(env_value) + except ValueError as exc: + raise ValueError(f'Invalid environment variable {env_var}: {exc}') + + msg = (f'{free_bytes / 1e9} GB memory required, but environment variable ' + f'NPY_AVAILABLE_MEM={env_value} set') + else: + mem_free = _get_mem_available() + + if mem_free is None: + msg = ("Could not determine available memory; set NPY_AVAILABLE_MEM " + "environment variable (e.g. NPY_AVAILABLE_MEM=16GB) to run " + "the test.") + mem_free = -1 + else: + free_bytes_gb = free_bytes / 1e9 + mem_free_gb = mem_free / 1e9 + msg = f'{free_bytes_gb} GB memory required, but {mem_free_gb} GB available' + + return msg if mem_free < free_bytes else None + + +def _parse_size(size_str): + """Convert memory size strings ('12 GB' etc.) to float""" + suffixes = {'': 1, 'b': 1, + 'k': 1000, 'm': 1000**2, 'g': 1000**3, 't': 1000**4, + 'kb': 1000, 'mb': 1000**2, 'gb': 1000**3, 'tb': 1000**4, + 'kib': 1024, 'mib': 1024**2, 'gib': 1024**3, 'tib': 1024**4} + + pipe_suffixes = "|".join(suffixes.keys()) + + size_re = re.compile(fr'^\s*(\d+|\d+\.\d+)\s*({pipe_suffixes})\s*$', re.I) + + m = size_re.match(size_str.lower()) + if not m or m.group(2) not in suffixes: + raise ValueError(f'value {size_str!r} not a valid size') + return int(float(m.group(1)) * suffixes[m.group(2)]) + + +def _get_mem_available(): + """Return available memory in bytes, or None if unknown.""" + try: + import psutil + return psutil.virtual_memory().available + except (ImportError, AttributeError): + pass + + if sys.platform.startswith('linux'): + info = {} + with open('/proc/meminfo') as f: + for line in f: + p = line.split() + info[p[0].strip(':').lower()] = int(p[1]) * 1024 + + if 'memavailable' in info: + # Linux >= 3.14 + return info['memavailable'] + else: + return info['memfree'] + info['cached'] + + return None + + +def _no_tracing(func): + """ + Decorator to temporarily turn off tracing for the duration of a test. + Needed in tests that check refcounting, otherwise the tracing itself + influences the refcounts + """ + if not hasattr(sys, 'gettrace'): + return func + else: + @wraps(func) + def wrapper(*args, **kwargs): + original_trace = sys.gettrace() + try: + sys.settrace(None) + return func(*args, **kwargs) + finally: + sys.settrace(original_trace) + return wrapper + + +def _get_glibc_version(): + try: + ver = os.confstr('CS_GNU_LIBC_VERSION').rsplit(' ')[1] + except Exception: + ver = '0.0' + + return ver + + +_glibcver = _get_glibc_version() +_glibc_older_than = lambda x: (_glibcver != '0.0' and _glibcver < x) + + +def run_threaded(func, max_workers=8, pass_count=False, + pass_barrier=False, outer_iterations=1, + prepare_args=None): + """Runs a function many times in parallel""" + for _ in range(outer_iterations): + with (concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) + as tpe): + if prepare_args is None: + args = [] + else: + args = prepare_args() + if pass_barrier: + barrier = threading.Barrier(max_workers) + args.append(barrier) + if pass_count: + all_args = [(func, i, *args) for i in range(max_workers)] + else: + all_args = [(func, *args) for i in range(max_workers)] + try: + futures = [] + for arg in all_args: + futures.append(tpe.submit(*arg)) + except RuntimeError as e: + import pytest + pytest.skip(f"Spawning {max_workers} threads failed with " + f"error {e!r} (likely due to resource limits on the " + "system running the tests)") + finally: + if len(futures) < max_workers and pass_barrier: + barrier.abort() + for f in futures: + f.result() diff --git a/.venv/lib/python3.12/site-packages/numpy/testing/_private/utils.pyi b/.venv/lib/python3.12/site-packages/numpy/testing/_private/utils.pyi new file mode 100644 index 0000000..4e3b60a --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/testing/_private/utils.pyi @@ -0,0 +1,499 @@ +import ast +import sys +import types +import unittest +import warnings +from collections.abc import Callable, Iterable, Sequence +from contextlib import _GeneratorContextManager +from pathlib import Path +from re import Pattern +from typing import ( + Any, + AnyStr, + ClassVar, + Final, + Generic, + NoReturn, + ParamSpec, + Self, + SupportsIndex, + TypeAlias, + TypeVarTuple, + overload, + type_check_only, +) +from typing import Literal as L +from unittest.case import SkipTest + +from _typeshed import ConvertibleToFloat, GenericPath, StrOrBytesPath, StrPath +from typing_extensions import TypeVar + +import numpy as np +from numpy._typing import ( + ArrayLike, + DTypeLike, + NDArray, + _ArrayLikeDT64_co, + _ArrayLikeNumber_co, + _ArrayLikeObject_co, + _ArrayLikeTD64_co, +) + +__all__ = [ # noqa: RUF022 + "IS_EDITABLE", + "IS_MUSL", + "IS_PYPY", + "IS_PYSTON", + "IS_WASM", + "HAS_LAPACK64", + "HAS_REFCOUNT", + "NOGIL_BUILD", + "assert_", + "assert_array_almost_equal_nulp", + "assert_raises_regex", + "assert_array_max_ulp", + "assert_warns", + "assert_no_warnings", + "assert_allclose", + "assert_equal", + "assert_almost_equal", + "assert_approx_equal", + "assert_array_equal", + "assert_array_less", + "assert_string_equal", + "assert_array_almost_equal", + "assert_raises", + "build_err_msg", + "decorate_methods", + "jiffies", + "memusage", + "print_assert_equal", + "rundocs", + "runstring", + "verbose", + "measure", + "IgnoreException", + "clear_and_catch_warnings", + "SkipTest", + "KnownFailureException", + "temppath", + "tempdir", + "suppress_warnings", + "assert_array_compare", + "assert_no_gc_cycles", + "break_cycles", + "check_support_sve", + "run_threaded", +] + +### + +_T = TypeVar("_T") +_Ts = TypeVarTuple("_Ts") +_Tss = ParamSpec("_Tss") +_ET = TypeVar("_ET", bound=BaseException, default=BaseException) +_FT = TypeVar("_FT", bound=Callable[..., Any]) +_W_co = TypeVar("_W_co", bound=_WarnLog | None, default=_WarnLog | None, covariant=True) +_T_or_bool = TypeVar("_T_or_bool", default=bool) + +_StrLike: TypeAlias = str | bytes +_RegexLike: TypeAlias = _StrLike | Pattern[Any] +_NumericArrayLike: TypeAlias = _ArrayLikeNumber_co | _ArrayLikeObject_co + +_ExceptionSpec: TypeAlias = type[_ET] | tuple[type[_ET], ...] +_WarningSpec: TypeAlias = type[Warning] +_WarnLog: TypeAlias = list[warnings.WarningMessage] +_ToModules: TypeAlias = Iterable[types.ModuleType] + +# Must return a bool or an ndarray/generic type that is supported by `np.logical_and.reduce` +_ComparisonFunc: TypeAlias = Callable[ + [NDArray[Any], NDArray[Any]], + bool | np.bool | np.number | NDArray[np.bool | np.number | np.object_], +] + +# Type-check only `clear_and_catch_warnings` subclasses for both values of the +# `record` parameter. Copied from the stdlib `warnings` stubs. +@type_check_only +class _clear_and_catch_warnings_with_records(clear_and_catch_warnings): + def __enter__(self) -> list[warnings.WarningMessage]: ... + +@type_check_only +class _clear_and_catch_warnings_without_records(clear_and_catch_warnings): + def __enter__(self) -> None: ... + +### + +verbose: int = 0 +NUMPY_ROOT: Final[Path] = ... +IS_INSTALLED: Final[bool] = ... +IS_EDITABLE: Final[bool] = ... +IS_MUSL: Final[bool] = ... +IS_PYPY: Final[bool] = ... +IS_PYSTON: Final[bool] = ... +IS_WASM: Final[bool] = ... +HAS_REFCOUNT: Final[bool] = ... +HAS_LAPACK64: Final[bool] = ... +NOGIL_BUILD: Final[bool] = ... + +class KnownFailureException(Exception): ... +class IgnoreException(Exception): ... + +# NOTE: `warnings.catch_warnings` is incorrectly defined as invariant in typeshed +class clear_and_catch_warnings(warnings.catch_warnings[_W_co], Generic[_W_co]): # type: ignore[type-var] # pyright: ignore[reportInvalidTypeArguments] + class_modules: ClassVar[tuple[types.ModuleType, ...]] = () + modules: Final[set[types.ModuleType]] + @overload # record: True + def __init__(self: clear_and_catch_warnings[_WarnLog], /, record: L[True], modules: _ToModules = ()) -> None: ... + @overload # record: False (default) + def __init__(self: clear_and_catch_warnings[None], /, record: L[False] = False, modules: _ToModules = ()) -> None: ... + @overload # record; bool + def __init__(self, /, record: bool, modules: _ToModules = ()) -> None: ... + +class suppress_warnings: + log: Final[_WarnLog] + def __init__(self, /, forwarding_rule: L["always", "module", "once", "location"] = "always") -> None: ... + def __enter__(self) -> Self: ... + def __exit__(self, cls: type[BaseException] | None, exc: BaseException | None, tb: types.TracebackType | None, /) -> None: ... + def __call__(self, /, func: _FT) -> _FT: ... + + # + def filter(self, /, category: type[Warning] = ..., message: str = "", module: types.ModuleType | None = None) -> None: ... + def record(self, /, category: type[Warning] = ..., message: str = "", module: types.ModuleType | None = None) -> _WarnLog: ... + +# Contrary to runtime we can't do `os.name` checks while type checking, +# only `sys.platform` checks +if sys.platform == "win32" or sys.platform == "cygwin": + def memusage(processName: str = ..., instance: int = ...) -> int: ... +elif sys.platform == "linux": + def memusage(_proc_pid_stat: StrOrBytesPath = ...) -> int | None: ... +else: + def memusage() -> NoReturn: ... + +if sys.platform == "linux": + def jiffies(_proc_pid_stat: StrOrBytesPath = ..., _load_time: list[float] = []) -> int: ... +else: + def jiffies(_load_time: list[float] = []) -> int: ... + +# +def build_err_msg( + arrays: Iterable[object], + err_msg: object, + header: str = ..., + verbose: bool = ..., + names: Sequence[str] = ..., + precision: SupportsIndex | None = ..., +) -> str: ... + +# +def print_assert_equal(test_string: str, actual: object, desired: object) -> None: ... + +# +def assert_(val: object, msg: str | Callable[[], str] = "") -> None: ... + +# +def assert_equal( + actual: object, + desired: object, + err_msg: object = "", + verbose: bool = True, + *, + strict: bool = False, +) -> None: ... + +def assert_almost_equal( + actual: _NumericArrayLike, + desired: _NumericArrayLike, + decimal: int = 7, + err_msg: object = "", + verbose: bool = True, +) -> None: ... + +# +def assert_approx_equal( + actual: ConvertibleToFloat, + desired: ConvertibleToFloat, + significant: int = 7, + err_msg: object = "", + verbose: bool = True, +) -> None: ... + +# +def assert_array_compare( + comparison: _ComparisonFunc, + x: ArrayLike, + y: ArrayLike, + err_msg: object = "", + verbose: bool = True, + header: str = "", + precision: SupportsIndex = 6, + equal_nan: bool = True, + equal_inf: bool = True, + *, + strict: bool = False, + names: tuple[str, str] = ("ACTUAL", "DESIRED"), +) -> None: ... + +# +def assert_array_equal( + actual: object, + desired: object, + err_msg: object = "", + verbose: bool = True, + *, + strict: bool = False, +) -> None: ... + +# +def assert_array_almost_equal( + actual: _NumericArrayLike, + desired: _NumericArrayLike, + decimal: float = 6, + err_msg: object = "", + verbose: bool = True, +) -> None: ... + +@overload +def assert_array_less( + x: _ArrayLikeDT64_co, + y: _ArrayLikeDT64_co, + err_msg: object = "", + verbose: bool = True, + *, + strict: bool = False, +) -> None: ... +@overload +def assert_array_less( + x: _ArrayLikeTD64_co, + y: _ArrayLikeTD64_co, + err_msg: object = "", + verbose: bool = True, + *, + strict: bool = False, +) -> None: ... +@overload +def assert_array_less( + x: _NumericArrayLike, + y: _NumericArrayLike, + err_msg: object = "", + verbose: bool = True, + *, + strict: bool = False, +) -> None: ... + +# +def assert_string_equal(actual: str, desired: str) -> None: ... + +# +@overload +def assert_raises( + exception_class: _ExceptionSpec[_ET], + /, + *, + msg: str | None = None, +) -> unittest.case._AssertRaisesContext[_ET]: ... +@overload +def assert_raises( + exception_class: _ExceptionSpec, + callable: Callable[_Tss, Any], + /, + *args: _Tss.args, + **kwargs: _Tss.kwargs, +) -> None: ... + +# +@overload +def assert_raises_regex( + exception_class: _ExceptionSpec[_ET], + expected_regexp: _RegexLike, + *, + msg: str | None = None, +) -> unittest.case._AssertRaisesContext[_ET]: ... +@overload +def assert_raises_regex( + exception_class: _ExceptionSpec, + expected_regexp: _RegexLike, + callable: Callable[_Tss, Any], + *args: _Tss.args, + **kwargs: _Tss.kwargs, +) -> None: ... + +# +@overload +def assert_allclose( + actual: _ArrayLikeTD64_co, + desired: _ArrayLikeTD64_co, + rtol: float = 1e-7, + atol: float = 0, + equal_nan: bool = True, + err_msg: object = "", + verbose: bool = True, + *, + strict: bool = False, +) -> None: ... +@overload +def assert_allclose( + actual: _NumericArrayLike, + desired: _NumericArrayLike, + rtol: float = 1e-7, + atol: float = 0, + equal_nan: bool = True, + err_msg: object = "", + verbose: bool = True, + *, + strict: bool = False, +) -> None: ... + +# +def assert_array_almost_equal_nulp( + x: _ArrayLikeNumber_co, + y: _ArrayLikeNumber_co, + nulp: float = 1, +) -> None: ... + +# +def assert_array_max_ulp( + a: _ArrayLikeNumber_co, + b: _ArrayLikeNumber_co, + maxulp: float = 1, + dtype: DTypeLike | None = None, +) -> NDArray[Any]: ... + +# +@overload +def assert_warns(warning_class: _WarningSpec) -> _GeneratorContextManager[None]: ... +@overload +def assert_warns(warning_class: _WarningSpec, func: Callable[_Tss, _T], *args: _Tss.args, **kwargs: _Tss.kwargs) -> _T: ... + +# +@overload +def assert_no_warnings() -> _GeneratorContextManager[None]: ... +@overload +def assert_no_warnings(func: Callable[_Tss, _T], /, *args: _Tss.args, **kwargs: _Tss.kwargs) -> _T: ... + +# +@overload +def assert_no_gc_cycles() -> _GeneratorContextManager[None]: ... +@overload +def assert_no_gc_cycles(func: Callable[_Tss, Any], /, *args: _Tss.args, **kwargs: _Tss.kwargs) -> None: ... + +### + +# +@overload +def tempdir( + suffix: None = None, + prefix: None = None, + dir: None = None, +) -> _GeneratorContextManager[str]: ... +@overload +def tempdir( + suffix: AnyStr | None = None, + prefix: AnyStr | None = None, + *, + dir: GenericPath[AnyStr], +) -> _GeneratorContextManager[AnyStr]: ... +@overload +def tempdir( + suffix: AnyStr | None = None, + *, + prefix: AnyStr, + dir: GenericPath[AnyStr] | None = None, +) -> _GeneratorContextManager[AnyStr]: ... +@overload +def tempdir( + suffix: AnyStr, + prefix: AnyStr | None = None, + dir: GenericPath[AnyStr] | None = None, +) -> _GeneratorContextManager[AnyStr]: ... + +# +@overload +def temppath( + suffix: None = None, + prefix: None = None, + dir: None = None, + text: bool = False, +) -> _GeneratorContextManager[str]: ... +@overload +def temppath( + suffix: AnyStr | None, + prefix: AnyStr | None, + dir: GenericPath[AnyStr], + text: bool = False, +) -> _GeneratorContextManager[AnyStr]: ... +@overload +def temppath( + suffix: AnyStr | None = None, + prefix: AnyStr | None = None, + *, + dir: GenericPath[AnyStr], + text: bool = False, +) -> _GeneratorContextManager[AnyStr]: ... +@overload +def temppath( + suffix: AnyStr | None, + prefix: AnyStr, + dir: GenericPath[AnyStr] | None = None, + text: bool = False, +) -> _GeneratorContextManager[AnyStr]: ... +@overload +def temppath( + suffix: AnyStr | None = None, + *, + prefix: AnyStr, + dir: GenericPath[AnyStr] | None = None, + text: bool = False, +) -> _GeneratorContextManager[AnyStr]: ... +@overload +def temppath( + suffix: AnyStr, + prefix: AnyStr | None = None, + dir: GenericPath[AnyStr] | None = None, + text: bool = False, +) -> _GeneratorContextManager[AnyStr]: ... + +# +def check_support_sve(__cache: list[_T_or_bool] = []) -> _T_or_bool: ... # noqa: PYI063 + +# +def decorate_methods( + cls: type, + decorator: Callable[[Callable[..., Any]], Any], + testmatch: _RegexLike | None = None, +) -> None: ... + +# +@overload +def run_threaded( + func: Callable[[], None], + max_workers: int = 8, + pass_count: bool = False, + pass_barrier: bool = False, + outer_iterations: int = 1, + prepare_args: None = None, +) -> None: ... +@overload +def run_threaded( + func: Callable[[*_Ts], None], + max_workers: int, + pass_count: bool, + pass_barrier: bool, + outer_iterations: int, + prepare_args: tuple[*_Ts], +) -> None: ... +@overload +def run_threaded( + func: Callable[[*_Ts], None], + max_workers: int = 8, + pass_count: bool = False, + pass_barrier: bool = False, + outer_iterations: int = 1, + *, + prepare_args: tuple[*_Ts], +) -> None: ... + +# +def runstring(astr: _StrLike | types.CodeType, dict: dict[str, Any] | None) -> Any: ... # noqa: ANN401 +def rundocs(filename: StrPath | None = None, raise_on_error: bool = True) -> None: ... +def measure(code_str: _StrLike | ast.AST, times: int = 1, label: str | None = None) -> float: ... +def break_cycles() -> None: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/testing/overrides.py b/.venv/lib/python3.12/site-packages/numpy/testing/overrides.py new file mode 100644 index 0000000..61771c4 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/testing/overrides.py @@ -0,0 +1,84 @@ +"""Tools for testing implementations of __array_function__ and ufunc overrides + + +""" + +import numpy._core.umath as _umath +from numpy import ufunc as _ufunc +from numpy._core.overrides import ARRAY_FUNCTIONS as _array_functions + + +def get_overridable_numpy_ufuncs(): + """List all numpy ufuncs overridable via `__array_ufunc__` + + Parameters + ---------- + None + + Returns + ------- + set + A set containing all overridable ufuncs in the public numpy API. + """ + ufuncs = {obj for obj in _umath.__dict__.values() + if isinstance(obj, _ufunc)} + return ufuncs + + +def allows_array_ufunc_override(func): + """Determine if a function can be overridden via `__array_ufunc__` + + Parameters + ---------- + func : callable + Function that may be overridable via `__array_ufunc__` + + Returns + ------- + bool + `True` if `func` is overridable via `__array_ufunc__` and + `False` otherwise. + + Notes + ----- + This function is equivalent to ``isinstance(func, np.ufunc)`` and + will work correctly for ufuncs defined outside of Numpy. + + """ + return isinstance(func, _ufunc) + + +def get_overridable_numpy_array_functions(): + """List all numpy functions overridable via `__array_function__` + + Parameters + ---------- + None + + Returns + ------- + set + A set containing all functions in the public numpy API that are + overridable via `__array_function__`. + + """ + # 'import numpy' doesn't import recfunctions, so make sure it's imported + # so ufuncs defined there show up in the ufunc listing + from numpy.lib import recfunctions # noqa: F401 + return _array_functions.copy() + +def allows_array_function_override(func): + """Determine if a Numpy function can be overridden via `__array_function__` + + Parameters + ---------- + func : callable + Function that may be overridable via `__array_function__` + + Returns + ------- + bool + `True` if `func` is a function in the Numpy API that is + overridable via `__array_function__` and `False` otherwise. + """ + return func in _array_functions diff --git a/.venv/lib/python3.12/site-packages/numpy/testing/overrides.pyi b/.venv/lib/python3.12/site-packages/numpy/testing/overrides.pyi new file mode 100644 index 0000000..3fefc3f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/testing/overrides.pyi @@ -0,0 +1,11 @@ +from collections.abc import Callable, Hashable +from typing import Any + +from typing_extensions import TypeIs + +import numpy as np + +def get_overridable_numpy_ufuncs() -> set[np.ufunc]: ... +def get_overridable_numpy_array_functions() -> set[Callable[..., Any]]: ... +def allows_array_ufunc_override(func: object) -> TypeIs[np.ufunc]: ... +def allows_array_function_override(func: Hashable) -> bool: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/testing/print_coercion_tables.py b/.venv/lib/python3.12/site-packages/numpy/testing/print_coercion_tables.py new file mode 100755 index 0000000..89f0de3 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/testing/print_coercion_tables.py @@ -0,0 +1,207 @@ +#!/usr/bin/env python3 +"""Prints type-coercion tables for the built-in NumPy types + +""" +from collections import namedtuple + +import numpy as np +from numpy._core.numerictypes import obj2sctype + + +# Generic object that can be added, but doesn't do anything else +class GenericObject: + def __init__(self, v): + self.v = v + + def __add__(self, other): + return self + + def __radd__(self, other): + return self + + dtype = np.dtype('O') + +def print_cancast_table(ntypes): + print('X', end=' ') + for char in ntypes: + print(char, end=' ') + print() + for row in ntypes: + print(row, end=' ') + for col in ntypes: + if np.can_cast(row, col, "equiv"): + cast = "#" + elif np.can_cast(row, col, "safe"): + cast = "=" + elif np.can_cast(row, col, "same_kind"): + cast = "~" + elif np.can_cast(row, col, "unsafe"): + cast = "." + else: + cast = " " + print(cast, end=' ') + print() + +def print_coercion_table(ntypes, inputfirstvalue, inputsecondvalue, firstarray, + use_promote_types=False): + print('+', end=' ') + for char in ntypes: + print(char, end=' ') + print() + for row in ntypes: + if row == 'O': + rowtype = GenericObject + else: + rowtype = obj2sctype(row) + + print(row, end=' ') + for col in ntypes: + if col == 'O': + coltype = GenericObject + else: + coltype = obj2sctype(col) + try: + if firstarray: + rowvalue = np.array([rowtype(inputfirstvalue)], dtype=rowtype) + else: + rowvalue = rowtype(inputfirstvalue) + colvalue = coltype(inputsecondvalue) + if use_promote_types: + char = np.promote_types(rowvalue.dtype, colvalue.dtype).char + else: + value = np.add(rowvalue, colvalue) + if isinstance(value, np.ndarray): + char = value.dtype.char + else: + char = np.dtype(type(value)).char + except ValueError: + char = '!' + except OverflowError: + char = '@' + except TypeError: + char = '#' + print(char, end=' ') + print() + + +def print_new_cast_table(*, can_cast=True, legacy=False, flags=False): + """Prints new casts, the values given are default "can-cast" values, not + actual ones. + """ + from numpy._core._multiarray_tests import get_all_cast_information + + cast_table = { + -1: " ", + 0: "#", # No cast (classify as equivalent here) + 1: "#", # equivalent casting + 2: "=", # safe casting + 3: "~", # same-kind casting + 4: ".", # unsafe casting + } + flags_table = { + 0: "▗", 7: "█", + 1: "▚", 2: "▐", 4: "▄", + 3: "▜", 5: "▙", + 6: "▟", + } + + cast_info = namedtuple("cast_info", ["can_cast", "legacy", "flags"]) + no_cast_info = cast_info(" ", " ", " ") + + casts = get_all_cast_information() + table = {} + dtypes = set() + for cast in casts: + dtypes.add(cast["from"]) + dtypes.add(cast["to"]) + + if cast["from"] not in table: + table[cast["from"]] = {} + to_dict = table[cast["from"]] + + can_cast = cast_table[cast["casting"]] + legacy = "L" if cast["legacy"] else "." + flags = 0 + if cast["requires_pyapi"]: + flags |= 1 + if cast["supports_unaligned"]: + flags |= 2 + if cast["no_floatingpoint_errors"]: + flags |= 4 + + flags = flags_table[flags] + to_dict[cast["to"]] = cast_info(can_cast=can_cast, legacy=legacy, flags=flags) + + # The np.dtype(x.type) is a bit strange, because dtype classes do + # not expose much yet. + types = np.typecodes["All"] + + def sorter(x): + # This is a bit weird hack, to get a table as close as possible to + # the one printing all typecodes (but expecting user-dtypes). + dtype = np.dtype(x.type) + try: + indx = types.index(dtype.char) + except ValueError: + indx = np.inf + return (indx, dtype.char) + + dtypes = sorted(dtypes, key=sorter) + + def print_table(field="can_cast"): + print('X', end=' ') + for dt in dtypes: + print(np.dtype(dt.type).char, end=' ') + print() + for from_dt in dtypes: + print(np.dtype(from_dt.type).char, end=' ') + row = table.get(from_dt, {}) + for to_dt in dtypes: + print(getattr(row.get(to_dt, no_cast_info), field), end=' ') + print() + + if can_cast: + # Print the actual table: + print() + print("Casting: # is equivalent, = is safe, ~ is same-kind, and . is unsafe") + print() + print_table("can_cast") + + if legacy: + print() + print("L denotes a legacy cast . a non-legacy one.") + print() + print_table("legacy") + + if flags: + print() + print(f"{flags_table[0]}: no flags, " + f"{flags_table[1]}: PyAPI, " + f"{flags_table[2]}: supports unaligned, " + f"{flags_table[4]}: no-float-errors") + print() + print_table("flags") + + +if __name__ == '__main__': + print("can cast") + print_cancast_table(np.typecodes['All']) + print() + print("In these tables, ValueError is '!', OverflowError is '@', TypeError is '#'") + print() + print("scalar + scalar") + print_coercion_table(np.typecodes['All'], 0, 0, False) + print() + print("scalar + neg scalar") + print_coercion_table(np.typecodes['All'], 0, -1, False) + print() + print("array + scalar") + print_coercion_table(np.typecodes['All'], 0, 0, True) + print() + print("array + neg scalar") + print_coercion_table(np.typecodes['All'], 0, -1, True) + print() + print("promote_types") + print_coercion_table(np.typecodes['All'], 0, 0, False, True) + print("New casting type promotion:") + print_new_cast_table(can_cast=True, legacy=True, flags=True) diff --git a/.venv/lib/python3.12/site-packages/numpy/testing/print_coercion_tables.pyi b/.venv/lib/python3.12/site-packages/numpy/testing/print_coercion_tables.pyi new file mode 100644 index 0000000..c859305 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/testing/print_coercion_tables.pyi @@ -0,0 +1,27 @@ +from collections.abc import Iterable +from typing import ClassVar, Generic, Self + +from typing_extensions import TypeVar + +import numpy as np + +_VT_co = TypeVar("_VT_co", default=object, covariant=True) + +# undocumented +class GenericObject(Generic[_VT_co]): + dtype: ClassVar[np.dtype[np.object_]] = ... + v: _VT_co + + def __init__(self, /, v: _VT_co) -> None: ... + def __add__(self, other: object, /) -> Self: ... + def __radd__(self, other: object, /) -> Self: ... + +def print_cancast_table(ntypes: Iterable[str]) -> None: ... +def print_coercion_table( + ntypes: Iterable[str], + inputfirstvalue: int, + inputsecondvalue: int, + firstarray: bool, + use_promote_types: bool = False, +) -> None: ... +def print_new_cast_table(*, can_cast: bool = True, legacy: bool = False, flags: bool = False) -> None: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/testing/tests/__init__.py b/.venv/lib/python3.12/site-packages/numpy/testing/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/.venv/lib/python3.12/site-packages/numpy/testing/tests/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/testing/tests/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..bd01e5a Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/testing/tests/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/testing/tests/__pycache__/test_utils.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/testing/tests/__pycache__/test_utils.cpython-312.pyc new file mode 100644 index 0000000..99b8502 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/testing/tests/__pycache__/test_utils.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/testing/tests/test_utils.py b/.venv/lib/python3.12/site-packages/numpy/testing/tests/test_utils.py new file mode 100644 index 0000000..fcf2009 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/testing/tests/test_utils.py @@ -0,0 +1,1917 @@ +import itertools +import os +import re +import sys +import warnings +import weakref + +import pytest + +import numpy as np +import numpy._core._multiarray_umath as ncu +from numpy.testing import ( + HAS_REFCOUNT, + assert_, + assert_allclose, + assert_almost_equal, + assert_approx_equal, + assert_array_almost_equal, + assert_array_almost_equal_nulp, + assert_array_equal, + assert_array_less, + assert_array_max_ulp, + assert_equal, + assert_no_gc_cycles, + assert_no_warnings, + assert_raises, + assert_string_equal, + assert_warns, + build_err_msg, + clear_and_catch_warnings, + suppress_warnings, + tempdir, + temppath, +) + + +class _GenericTest: + + def _test_equal(self, a, b): + self._assert_func(a, b) + + def _test_not_equal(self, a, b): + with assert_raises(AssertionError): + self._assert_func(a, b) + + def test_array_rank1_eq(self): + """Test two equal array of rank 1 are found equal.""" + a = np.array([1, 2]) + b = np.array([1, 2]) + + self._test_equal(a, b) + + def test_array_rank1_noteq(self): + """Test two different array of rank 1 are found not equal.""" + a = np.array([1, 2]) + b = np.array([2, 2]) + + self._test_not_equal(a, b) + + def test_array_rank2_eq(self): + """Test two equal array of rank 2 are found equal.""" + a = np.array([[1, 2], [3, 4]]) + b = np.array([[1, 2], [3, 4]]) + + self._test_equal(a, b) + + def test_array_diffshape(self): + """Test two arrays with different shapes are found not equal.""" + a = np.array([1, 2]) + b = np.array([[1, 2], [1, 2]]) + + self._test_not_equal(a, b) + + def test_objarray(self): + """Test object arrays.""" + a = np.array([1, 1], dtype=object) + self._test_equal(a, 1) + + def test_array_likes(self): + self._test_equal([1, 2, 3], (1, 2, 3)) + + +class TestArrayEqual(_GenericTest): + + def setup_method(self): + self._assert_func = assert_array_equal + + def test_generic_rank1(self): + """Test rank 1 array for all dtypes.""" + def foo(t): + a = np.empty(2, t) + a.fill(1) + b = a.copy() + c = a.copy() + c.fill(0) + self._test_equal(a, b) + self._test_not_equal(c, b) + + # Test numeric types and object + for t in '?bhilqpBHILQPfdgFDG': + foo(t) + + # Test strings + for t in ['S1', 'U1']: + foo(t) + + def test_0_ndim_array(self): + x = np.array(473963742225900817127911193656584771) + y = np.array(18535119325151578301457182298393896) + + with pytest.raises(AssertionError) as exc_info: + self._assert_func(x, y) + msg = str(exc_info.value) + assert_('Mismatched elements: 1 / 1 (100%)\n' + in msg) + + y = x + self._assert_func(x, y) + + x = np.array(4395065348745.5643764887869876) + y = np.array(0) + expected_msg = ('Mismatched elements: 1 / 1 (100%)\n' + 'Max absolute difference among violations: ' + '4.39506535e+12\n' + 'Max relative difference among violations: inf\n') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._assert_func(x, y) + + x = y + self._assert_func(x, y) + + def test_generic_rank3(self): + """Test rank 3 array for all dtypes.""" + def foo(t): + a = np.empty((4, 2, 3), t) + a.fill(1) + b = a.copy() + c = a.copy() + c.fill(0) + self._test_equal(a, b) + self._test_not_equal(c, b) + + # Test numeric types and object + for t in '?bhilqpBHILQPfdgFDG': + foo(t) + + # Test strings + for t in ['S1', 'U1']: + foo(t) + + def test_nan_array(self): + """Test arrays with nan values in them.""" + a = np.array([1, 2, np.nan]) + b = np.array([1, 2, np.nan]) + + self._test_equal(a, b) + + c = np.array([1, 2, 3]) + self._test_not_equal(c, b) + + def test_string_arrays(self): + """Test two arrays with different shapes are found not equal.""" + a = np.array(['floupi', 'floupa']) + b = np.array(['floupi', 'floupa']) + + self._test_equal(a, b) + + c = np.array(['floupipi', 'floupa']) + + self._test_not_equal(c, b) + + def test_recarrays(self): + """Test record arrays.""" + a = np.empty(2, [('floupi', float), ('floupa', float)]) + a['floupi'] = [1, 2] + a['floupa'] = [1, 2] + b = a.copy() + + self._test_equal(a, b) + + c = np.empty(2, [('floupipi', float), + ('floupi', float), ('floupa', float)]) + c['floupipi'] = a['floupi'].copy() + c['floupa'] = a['floupa'].copy() + + with pytest.raises(TypeError): + self._test_not_equal(c, b) + + def test_masked_nan_inf(self): + # Regression test for gh-11121 + a = np.ma.MaskedArray([3., 4., 6.5], mask=[False, True, False]) + b = np.array([3., np.nan, 6.5]) + self._test_equal(a, b) + self._test_equal(b, a) + a = np.ma.MaskedArray([3., 4., 6.5], mask=[True, False, False]) + b = np.array([np.inf, 4., 6.5]) + self._test_equal(a, b) + self._test_equal(b, a) + + def test_subclass_that_overrides_eq(self): + # While we cannot guarantee testing functions will always work for + # subclasses, the tests should ideally rely only on subclasses having + # comparison operators, not on them being able to store booleans + # (which, e.g., astropy Quantity cannot usefully do). See gh-8452. + class MyArray(np.ndarray): + def __eq__(self, other): + return bool(np.equal(self, other).all()) + + def __ne__(self, other): + return not self == other + + a = np.array([1., 2.]).view(MyArray) + b = np.array([2., 3.]).view(MyArray) + assert_(type(a == a), bool) + assert_(a == a) + assert_(a != b) + self._test_equal(a, a) + self._test_not_equal(a, b) + self._test_not_equal(b, a) + + expected_msg = ('Mismatched elements: 1 / 2 (50%)\n' + 'Max absolute difference among violations: 1.\n' + 'Max relative difference among violations: 0.5') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._test_equal(a, b) + + c = np.array([0., 2.9]).view(MyArray) + expected_msg = ('Mismatched elements: 1 / 2 (50%)\n' + 'Max absolute difference among violations: 2.\n' + 'Max relative difference among violations: inf') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._test_equal(b, c) + + def test_subclass_that_does_not_implement_npall(self): + class MyArray(np.ndarray): + def __array_function__(self, *args, **kwargs): + return NotImplemented + + a = np.array([1., 2.]).view(MyArray) + b = np.array([2., 3.]).view(MyArray) + with assert_raises(TypeError): + np.all(a) + self._test_equal(a, a) + self._test_not_equal(a, b) + self._test_not_equal(b, a) + + def test_suppress_overflow_warnings(self): + # Based on issue #18992 + with pytest.raises(AssertionError): + with np.errstate(all="raise"): + np.testing.assert_array_equal( + np.array([1, 2, 3], np.float32), + np.array([1, 1e-40, 3], np.float32)) + + def test_array_vs_scalar_is_equal(self): + """Test comparing an array with a scalar when all values are equal.""" + a = np.array([1., 1., 1.]) + b = 1. + + self._test_equal(a, b) + + def test_array_vs_array_not_equal(self): + """Test comparing an array with a scalar when not all values equal.""" + a = np.array([34986, 545676, 439655, 563766]) + b = np.array([34986, 545676, 439655, 0]) + + expected_msg = ('Mismatched elements: 1 / 4 (25%)\n' + 'Max absolute difference among violations: 563766\n' + 'Max relative difference among violations: inf') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._assert_func(a, b) + + a = np.array([34986, 545676, 439655.2, 563766]) + expected_msg = ('Mismatched elements: 2 / 4 (50%)\n' + 'Max absolute difference among violations: ' + '563766.\n' + 'Max relative difference among violations: ' + '4.54902139e-07') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._assert_func(a, b) + + def test_array_vs_scalar_strict(self): + """Test comparing an array with a scalar with strict option.""" + a = np.array([1., 1., 1.]) + b = 1. + + with pytest.raises(AssertionError): + self._assert_func(a, b, strict=True) + + def test_array_vs_array_strict(self): + """Test comparing two arrays with strict option.""" + a = np.array([1., 1., 1.]) + b = np.array([1., 1., 1.]) + + self._assert_func(a, b, strict=True) + + def test_array_vs_float_array_strict(self): + """Test comparing two arrays with strict option.""" + a = np.array([1, 1, 1]) + b = np.array([1., 1., 1.]) + + with pytest.raises(AssertionError): + self._assert_func(a, b, strict=True) + + +class TestBuildErrorMessage: + + def test_build_err_msg_defaults(self): + x = np.array([1.00001, 2.00002, 3.00003]) + y = np.array([1.00002, 2.00003, 3.00004]) + err_msg = 'There is a mismatch' + + a = build_err_msg([x, y], err_msg) + b = ('\nItems are not equal: There is a mismatch\n ACTUAL: array([' + '1.00001, 2.00002, 3.00003])\n DESIRED: array([1.00002, ' + '2.00003, 3.00004])') + assert_equal(a, b) + + def test_build_err_msg_no_verbose(self): + x = np.array([1.00001, 2.00002, 3.00003]) + y = np.array([1.00002, 2.00003, 3.00004]) + err_msg = 'There is a mismatch' + + a = build_err_msg([x, y], err_msg, verbose=False) + b = '\nItems are not equal: There is a mismatch' + assert_equal(a, b) + + def test_build_err_msg_custom_names(self): + x = np.array([1.00001, 2.00002, 3.00003]) + y = np.array([1.00002, 2.00003, 3.00004]) + err_msg = 'There is a mismatch' + + a = build_err_msg([x, y], err_msg, names=('FOO', 'BAR')) + b = ('\nItems are not equal: There is a mismatch\n FOO: array([' + '1.00001, 2.00002, 3.00003])\n BAR: array([1.00002, 2.00003, ' + '3.00004])') + assert_equal(a, b) + + def test_build_err_msg_custom_precision(self): + x = np.array([1.000000001, 2.00002, 3.00003]) + y = np.array([1.000000002, 2.00003, 3.00004]) + err_msg = 'There is a mismatch' + + a = build_err_msg([x, y], err_msg, precision=10) + b = ('\nItems are not equal: There is a mismatch\n ACTUAL: array([' + '1.000000001, 2.00002 , 3.00003 ])\n DESIRED: array([' + '1.000000002, 2.00003 , 3.00004 ])') + assert_equal(a, b) + + +class TestEqual(TestArrayEqual): + + def setup_method(self): + self._assert_func = assert_equal + + def test_nan_items(self): + self._assert_func(np.nan, np.nan) + self._assert_func([np.nan], [np.nan]) + self._test_not_equal(np.nan, [np.nan]) + self._test_not_equal(np.nan, 1) + + def test_inf_items(self): + self._assert_func(np.inf, np.inf) + self._assert_func([np.inf], [np.inf]) + self._test_not_equal(np.inf, [np.inf]) + + def test_datetime(self): + self._test_equal( + np.datetime64("2017-01-01", "s"), + np.datetime64("2017-01-01", "s") + ) + self._test_equal( + np.datetime64("2017-01-01", "s"), + np.datetime64("2017-01-01", "m") + ) + + # gh-10081 + self._test_not_equal( + np.datetime64("2017-01-01", "s"), + np.datetime64("2017-01-02", "s") + ) + self._test_not_equal( + np.datetime64("2017-01-01", "s"), + np.datetime64("2017-01-02", "m") + ) + + def test_nat_items(self): + # not a datetime + nadt_no_unit = np.datetime64("NaT") + nadt_s = np.datetime64("NaT", "s") + nadt_d = np.datetime64("NaT", "ns") + # not a timedelta + natd_no_unit = np.timedelta64("NaT") + natd_s = np.timedelta64("NaT", "s") + natd_d = np.timedelta64("NaT", "ns") + + dts = [nadt_no_unit, nadt_s, nadt_d] + tds = [natd_no_unit, natd_s, natd_d] + for a, b in itertools.product(dts, dts): + self._assert_func(a, b) + self._assert_func([a], [b]) + self._test_not_equal([a], b) + + for a, b in itertools.product(tds, tds): + self._assert_func(a, b) + self._assert_func([a], [b]) + self._test_not_equal([a], b) + + for a, b in itertools.product(tds, dts): + self._test_not_equal(a, b) + self._test_not_equal(a, [b]) + self._test_not_equal([a], [b]) + self._test_not_equal([a], np.datetime64("2017-01-01", "s")) + self._test_not_equal([b], np.datetime64("2017-01-01", "s")) + self._test_not_equal([a], np.timedelta64(123, "s")) + self._test_not_equal([b], np.timedelta64(123, "s")) + + def test_non_numeric(self): + self._assert_func('ab', 'ab') + self._test_not_equal('ab', 'abb') + + def test_complex_item(self): + self._assert_func(complex(1, 2), complex(1, 2)) + self._assert_func(complex(1, np.nan), complex(1, np.nan)) + self._test_not_equal(complex(1, np.nan), complex(1, 2)) + self._test_not_equal(complex(np.nan, 1), complex(1, np.nan)) + self._test_not_equal(complex(np.nan, np.inf), complex(np.nan, 2)) + + def test_negative_zero(self): + self._test_not_equal(ncu.PZERO, ncu.NZERO) + + def test_complex(self): + x = np.array([complex(1, 2), complex(1, np.nan)]) + y = np.array([complex(1, 2), complex(1, 2)]) + self._assert_func(x, x) + self._test_not_equal(x, y) + + def test_object(self): + # gh-12942 + import datetime + a = np.array([datetime.datetime(2000, 1, 1), + datetime.datetime(2000, 1, 2)]) + self._test_not_equal(a, a[::-1]) + + +class TestArrayAlmostEqual(_GenericTest): + + def setup_method(self): + self._assert_func = assert_array_almost_equal + + def test_closeness(self): + # Note that in the course of time we ended up with + # `abs(x - y) < 1.5 * 10**(-decimal)` + # instead of the previously documented + # `abs(x - y) < 0.5 * 10**(-decimal)` + # so this check serves to preserve the wrongness. + + # test scalars + expected_msg = ('Mismatched elements: 1 / 1 (100%)\n' + 'Max absolute difference among violations: 1.5\n' + 'Max relative difference among violations: inf') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._assert_func(1.5, 0.0, decimal=0) + + # test arrays + self._assert_func([1.499999], [0.0], decimal=0) + + expected_msg = ('Mismatched elements: 1 / 1 (100%)\n' + 'Max absolute difference among violations: 1.5\n' + 'Max relative difference among violations: inf') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._assert_func([1.5], [0.0], decimal=0) + + a = [1.4999999, 0.00003] + b = [1.49999991, 0] + expected_msg = ('Mismatched elements: 1 / 2 (50%)\n' + 'Max absolute difference among violations: 3.e-05\n' + 'Max relative difference among violations: inf') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._assert_func(a, b, decimal=7) + + expected_msg = ('Mismatched elements: 1 / 2 (50%)\n' + 'Max absolute difference among violations: 3.e-05\n' + 'Max relative difference among violations: 1.') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._assert_func(b, a, decimal=7) + + def test_simple(self): + x = np.array([1234.2222]) + y = np.array([1234.2223]) + + self._assert_func(x, y, decimal=3) + self._assert_func(x, y, decimal=4) + + expected_msg = ('Mismatched elements: 1 / 1 (100%)\n' + 'Max absolute difference among violations: ' + '1.e-04\n' + 'Max relative difference among violations: ' + '8.10226812e-08') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._assert_func(x, y, decimal=5) + + def test_array_vs_scalar(self): + a = [5498.42354, 849.54345, 0.00] + b = 5498.42354 + expected_msg = ('Mismatched elements: 2 / 3 (66.7%)\n' + 'Max absolute difference among violations: ' + '5498.42354\n' + 'Max relative difference among violations: 1.') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._assert_func(a, b, decimal=9) + + expected_msg = ('Mismatched elements: 2 / 3 (66.7%)\n' + 'Max absolute difference among violations: ' + '5498.42354\n' + 'Max relative difference among violations: 5.4722099') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._assert_func(b, a, decimal=9) + + a = [5498.42354, 0.00] + expected_msg = ('Mismatched elements: 1 / 2 (50%)\n' + 'Max absolute difference among violations: ' + '5498.42354\n' + 'Max relative difference among violations: inf') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._assert_func(b, a, decimal=7) + + b = 0 + expected_msg = ('Mismatched elements: 1 / 2 (50%)\n' + 'Max absolute difference among violations: ' + '5498.42354\n' + 'Max relative difference among violations: inf') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._assert_func(a, b, decimal=7) + + def test_nan(self): + anan = np.array([np.nan]) + aone = np.array([1]) + ainf = np.array([np.inf]) + self._assert_func(anan, anan) + assert_raises(AssertionError, + lambda: self._assert_func(anan, aone)) + assert_raises(AssertionError, + lambda: self._assert_func(anan, ainf)) + assert_raises(AssertionError, + lambda: self._assert_func(ainf, anan)) + + def test_inf(self): + a = np.array([[1., 2.], [3., 4.]]) + b = a.copy() + a[0, 0] = np.inf + assert_raises(AssertionError, + lambda: self._assert_func(a, b)) + b[0, 0] = -np.inf + assert_raises(AssertionError, + lambda: self._assert_func(a, b)) + + def test_subclass(self): + a = np.array([[1., 2.], [3., 4.]]) + b = np.ma.masked_array([[1., 2.], [0., 4.]], + [[False, False], [True, False]]) + self._assert_func(a, b) + self._assert_func(b, a) + self._assert_func(b, b) + + # Test fully masked as well (see gh-11123). + a = np.ma.MaskedArray(3.5, mask=True) + b = np.array([3., 4., 6.5]) + self._test_equal(a, b) + self._test_equal(b, a) + a = np.ma.masked + b = np.array([3., 4., 6.5]) + self._test_equal(a, b) + self._test_equal(b, a) + a = np.ma.MaskedArray([3., 4., 6.5], mask=[True, True, True]) + b = np.array([1., 2., 3.]) + self._test_equal(a, b) + self._test_equal(b, a) + a = np.ma.MaskedArray([3., 4., 6.5], mask=[True, True, True]) + b = np.array(1.) + self._test_equal(a, b) + self._test_equal(b, a) + + def test_subclass_2(self): + # While we cannot guarantee testing functions will always work for + # subclasses, the tests should ideally rely only on subclasses having + # comparison operators, not on them being able to store booleans + # (which, e.g., astropy Quantity cannot usefully do). See gh-8452. + class MyArray(np.ndarray): + def __eq__(self, other): + return super().__eq__(other).view(np.ndarray) + + def __lt__(self, other): + return super().__lt__(other).view(np.ndarray) + + def all(self, *args, **kwargs): + return all(self) + + a = np.array([1., 2.]).view(MyArray) + self._assert_func(a, a) + + z = np.array([True, True]).view(MyArray) + all(z) + b = np.array([1., 202]).view(MyArray) + expected_msg = ('Mismatched elements: 1 / 2 (50%)\n' + 'Max absolute difference among violations: 200.\n' + 'Max relative difference among violations: 0.99009') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._assert_func(a, b) + + def test_subclass_that_cannot_be_bool(self): + # While we cannot guarantee testing functions will always work for + # subclasses, the tests should ideally rely only on subclasses having + # comparison operators, not on them being able to store booleans + # (which, e.g., astropy Quantity cannot usefully do). See gh-8452. + class MyArray(np.ndarray): + def __eq__(self, other): + return super().__eq__(other).view(np.ndarray) + + def __lt__(self, other): + return super().__lt__(other).view(np.ndarray) + + def all(self, *args, **kwargs): + raise NotImplementedError + + a = np.array([1., 2.]).view(MyArray) + self._assert_func(a, a) + + +class TestAlmostEqual(_GenericTest): + + def setup_method(self): + self._assert_func = assert_almost_equal + + def test_closeness(self): + # Note that in the course of time we ended up with + # `abs(x - y) < 1.5 * 10**(-decimal)` + # instead of the previously documented + # `abs(x - y) < 0.5 * 10**(-decimal)` + # so this check serves to preserve the wrongness. + + # test scalars + self._assert_func(1.499999, 0.0, decimal=0) + assert_raises(AssertionError, + lambda: self._assert_func(1.5, 0.0, decimal=0)) + + # test arrays + self._assert_func([1.499999], [0.0], decimal=0) + assert_raises(AssertionError, + lambda: self._assert_func([1.5], [0.0], decimal=0)) + + def test_nan_item(self): + self._assert_func(np.nan, np.nan) + assert_raises(AssertionError, + lambda: self._assert_func(np.nan, 1)) + assert_raises(AssertionError, + lambda: self._assert_func(np.nan, np.inf)) + assert_raises(AssertionError, + lambda: self._assert_func(np.inf, np.nan)) + + def test_inf_item(self): + self._assert_func(np.inf, np.inf) + self._assert_func(-np.inf, -np.inf) + assert_raises(AssertionError, + lambda: self._assert_func(np.inf, 1)) + assert_raises(AssertionError, + lambda: self._assert_func(-np.inf, np.inf)) + + def test_simple_item(self): + self._test_not_equal(1, 2) + + def test_complex_item(self): + self._assert_func(complex(1, 2), complex(1, 2)) + self._assert_func(complex(1, np.nan), complex(1, np.nan)) + self._assert_func(complex(np.inf, np.nan), complex(np.inf, np.nan)) + self._test_not_equal(complex(1, np.nan), complex(1, 2)) + self._test_not_equal(complex(np.nan, 1), complex(1, np.nan)) + self._test_not_equal(complex(np.nan, np.inf), complex(np.nan, 2)) + + def test_complex(self): + x = np.array([complex(1, 2), complex(1, np.nan)]) + z = np.array([complex(1, 2), complex(np.nan, 1)]) + y = np.array([complex(1, 2), complex(1, 2)]) + self._assert_func(x, x) + self._test_not_equal(x, y) + self._test_not_equal(x, z) + + def test_error_message(self): + """Check the message is formatted correctly for the decimal value. + Also check the message when input includes inf or nan (gh12200)""" + x = np.array([1.00000000001, 2.00000000002, 3.00003]) + y = np.array([1.00000000002, 2.00000000003, 3.00004]) + + # Test with a different amount of decimal digits + expected_msg = ('Mismatched elements: 3 / 3 (100%)\n' + 'Max absolute difference among violations: 1.e-05\n' + 'Max relative difference among violations: ' + '3.33328889e-06\n' + ' ACTUAL: array([1.00000000001, ' + '2.00000000002, ' + '3.00003 ])\n' + ' DESIRED: array([1.00000000002, 2.00000000003, ' + '3.00004 ])') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._assert_func(x, y, decimal=12) + + # With the default value of decimal digits, only the 3rd element + # differs. Note that we only check for the formatting of the arrays + # themselves. + expected_msg = ('Mismatched elements: 1 / 3 (33.3%)\n' + 'Max absolute difference among violations: 1.e-05\n' + 'Max relative difference among violations: ' + '3.33328889e-06\n' + ' ACTUAL: array([1. , 2. , 3.00003])\n' + ' DESIRED: array([1. , 2. , 3.00004])') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._assert_func(x, y) + + # Check the error message when input includes inf + x = np.array([np.inf, 0]) + y = np.array([np.inf, 1]) + expected_msg = ('Mismatched elements: 1 / 2 (50%)\n' + 'Max absolute difference among violations: 1.\n' + 'Max relative difference among violations: 1.\n' + ' ACTUAL: array([inf, 0.])\n' + ' DESIRED: array([inf, 1.])') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._assert_func(x, y) + + # Check the error message when dividing by zero + x = np.array([1, 2]) + y = np.array([0, 0]) + expected_msg = ('Mismatched elements: 2 / 2 (100%)\n' + 'Max absolute difference among violations: 2\n' + 'Max relative difference among violations: inf') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._assert_func(x, y) + + def test_error_message_2(self): + """Check the message is formatted correctly """ + """when either x or y is a scalar.""" + x = 2 + y = np.ones(20) + expected_msg = ('Mismatched elements: 20 / 20 (100%)\n' + 'Max absolute difference among violations: 1.\n' + 'Max relative difference among violations: 1.') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._assert_func(x, y) + + y = 2 + x = np.ones(20) + expected_msg = ('Mismatched elements: 20 / 20 (100%)\n' + 'Max absolute difference among violations: 1.\n' + 'Max relative difference among violations: 0.5') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._assert_func(x, y) + + def test_subclass_that_cannot_be_bool(self): + # While we cannot guarantee testing functions will always work for + # subclasses, the tests should ideally rely only on subclasses having + # comparison operators, not on them being able to store booleans + # (which, e.g., astropy Quantity cannot usefully do). See gh-8452. + class MyArray(np.ndarray): + def __eq__(self, other): + return super().__eq__(other).view(np.ndarray) + + def __lt__(self, other): + return super().__lt__(other).view(np.ndarray) + + def all(self, *args, **kwargs): + raise NotImplementedError + + a = np.array([1., 2.]).view(MyArray) + self._assert_func(a, a) + + +class TestApproxEqual: + + def setup_method(self): + self._assert_func = assert_approx_equal + + def test_simple_0d_arrays(self): + x = np.array(1234.22) + y = np.array(1234.23) + + self._assert_func(x, y, significant=5) + self._assert_func(x, y, significant=6) + assert_raises(AssertionError, + lambda: self._assert_func(x, y, significant=7)) + + def test_simple_items(self): + x = 1234.22 + y = 1234.23 + + self._assert_func(x, y, significant=4) + self._assert_func(x, y, significant=5) + self._assert_func(x, y, significant=6) + assert_raises(AssertionError, + lambda: self._assert_func(x, y, significant=7)) + + def test_nan_array(self): + anan = np.array(np.nan) + aone = np.array(1) + ainf = np.array(np.inf) + self._assert_func(anan, anan) + assert_raises(AssertionError, lambda: self._assert_func(anan, aone)) + assert_raises(AssertionError, lambda: self._assert_func(anan, ainf)) + assert_raises(AssertionError, lambda: self._assert_func(ainf, anan)) + + def test_nan_items(self): + anan = np.array(np.nan) + aone = np.array(1) + ainf = np.array(np.inf) + self._assert_func(anan, anan) + assert_raises(AssertionError, lambda: self._assert_func(anan, aone)) + assert_raises(AssertionError, lambda: self._assert_func(anan, ainf)) + assert_raises(AssertionError, lambda: self._assert_func(ainf, anan)) + + +class TestArrayAssertLess: + + def setup_method(self): + self._assert_func = assert_array_less + + def test_simple_arrays(self): + x = np.array([1.1, 2.2]) + y = np.array([1.2, 2.3]) + + self._assert_func(x, y) + assert_raises(AssertionError, lambda: self._assert_func(y, x)) + + y = np.array([1.0, 2.3]) + + assert_raises(AssertionError, lambda: self._assert_func(x, y)) + assert_raises(AssertionError, lambda: self._assert_func(y, x)) + + a = np.array([1, 3, 6, 20]) + b = np.array([2, 4, 6, 8]) + + expected_msg = ('Mismatched elements: 2 / 4 (50%)\n' + 'Max absolute difference among violations: 12\n' + 'Max relative difference among violations: 1.5') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._assert_func(a, b) + + def test_rank2(self): + x = np.array([[1.1, 2.2], [3.3, 4.4]]) + y = np.array([[1.2, 2.3], [3.4, 4.5]]) + + self._assert_func(x, y) + expected_msg = ('Mismatched elements: 4 / 4 (100%)\n' + 'Max absolute difference among violations: 0.1\n' + 'Max relative difference among violations: 0.09090909') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._assert_func(y, x) + + y = np.array([[1.0, 2.3], [3.4, 4.5]]) + assert_raises(AssertionError, lambda: self._assert_func(x, y)) + assert_raises(AssertionError, lambda: self._assert_func(y, x)) + + def test_rank3(self): + x = np.ones(shape=(2, 2, 2)) + y = np.ones(shape=(2, 2, 2)) + 1 + + self._assert_func(x, y) + assert_raises(AssertionError, lambda: self._assert_func(y, x)) + + y[0, 0, 0] = 0 + expected_msg = ('Mismatched elements: 1 / 8 (12.5%)\n' + 'Max absolute difference among violations: 1.\n' + 'Max relative difference among violations: inf') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._assert_func(x, y) + + assert_raises(AssertionError, lambda: self._assert_func(y, x)) + + def test_simple_items(self): + x = 1.1 + y = 2.2 + + self._assert_func(x, y) + expected_msg = ('Mismatched elements: 1 / 1 (100%)\n' + 'Max absolute difference among violations: 1.1\n' + 'Max relative difference among violations: 1.') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._assert_func(y, x) + + y = np.array([2.2, 3.3]) + + self._assert_func(x, y) + assert_raises(AssertionError, lambda: self._assert_func(y, x)) + + y = np.array([1.0, 3.3]) + + assert_raises(AssertionError, lambda: self._assert_func(x, y)) + + def test_simple_items_and_array(self): + x = np.array([[621.345454, 390.5436, 43.54657, 626.4535], + [54.54, 627.3399, 13., 405.5435], + [543.545, 8.34, 91.543, 333.3]]) + y = 627.34 + self._assert_func(x, y) + + y = 8.339999 + self._assert_func(y, x) + + x = np.array([[3.4536, 2390.5436, 435.54657, 324525.4535], + [5449.54, 999090.54, 130303.54, 405.5435], + [543.545, 8.34, 91.543, 999090.53999]]) + y = 999090.54 + + expected_msg = ('Mismatched elements: 1 / 12 (8.33%)\n' + 'Max absolute difference among violations: 0.\n' + 'Max relative difference among violations: 0.') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._assert_func(x, y) + + expected_msg = ('Mismatched elements: 12 / 12 (100%)\n' + 'Max absolute difference among violations: ' + '999087.0864\n' + 'Max relative difference among violations: ' + '289288.5934676') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._assert_func(y, x) + + def test_zeroes(self): + x = np.array([546456., 0, 15.455]) + y = np.array(87654.) + + expected_msg = ('Mismatched elements: 1 / 3 (33.3%)\n' + 'Max absolute difference among violations: 458802.\n' + 'Max relative difference among violations: 5.23423917') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._assert_func(x, y) + + expected_msg = ('Mismatched elements: 2 / 3 (66.7%)\n' + 'Max absolute difference among violations: 87654.\n' + 'Max relative difference among violations: ' + '5670.5626011') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._assert_func(y, x) + + y = 0 + + expected_msg = ('Mismatched elements: 3 / 3 (100%)\n' + 'Max absolute difference among violations: 546456.\n' + 'Max relative difference among violations: inf') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._assert_func(x, y) + + expected_msg = ('Mismatched elements: 1 / 3 (33.3%)\n' + 'Max absolute difference among violations: 0.\n' + 'Max relative difference among violations: inf') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._assert_func(y, x) + + def test_nan_noncompare(self): + anan = np.array(np.nan) + aone = np.array(1) + ainf = np.array(np.inf) + self._assert_func(anan, anan) + assert_raises(AssertionError, lambda: self._assert_func(aone, anan)) + assert_raises(AssertionError, lambda: self._assert_func(anan, aone)) + assert_raises(AssertionError, lambda: self._assert_func(anan, ainf)) + assert_raises(AssertionError, lambda: self._assert_func(ainf, anan)) + + def test_nan_noncompare_array(self): + x = np.array([1.1, 2.2, 3.3]) + anan = np.array(np.nan) + + assert_raises(AssertionError, lambda: self._assert_func(x, anan)) + assert_raises(AssertionError, lambda: self._assert_func(anan, x)) + + x = np.array([1.1, 2.2, np.nan]) + + assert_raises(AssertionError, lambda: self._assert_func(x, anan)) + assert_raises(AssertionError, lambda: self._assert_func(anan, x)) + + y = np.array([1.0, 2.0, np.nan]) + + self._assert_func(y, x) + assert_raises(AssertionError, lambda: self._assert_func(x, y)) + + def test_inf_compare(self): + aone = np.array(1) + ainf = np.array(np.inf) + + self._assert_func(aone, ainf) + self._assert_func(-ainf, aone) + self._assert_func(-ainf, ainf) + assert_raises(AssertionError, lambda: self._assert_func(ainf, aone)) + assert_raises(AssertionError, lambda: self._assert_func(aone, -ainf)) + assert_raises(AssertionError, lambda: self._assert_func(ainf, ainf)) + assert_raises(AssertionError, lambda: self._assert_func(ainf, -ainf)) + assert_raises(AssertionError, lambda: self._assert_func(-ainf, -ainf)) + + def test_inf_compare_array(self): + x = np.array([1.1, 2.2, np.inf]) + ainf = np.array(np.inf) + + assert_raises(AssertionError, lambda: self._assert_func(x, ainf)) + assert_raises(AssertionError, lambda: self._assert_func(ainf, x)) + assert_raises(AssertionError, lambda: self._assert_func(x, -ainf)) + assert_raises(AssertionError, lambda: self._assert_func(-x, -ainf)) + assert_raises(AssertionError, lambda: self._assert_func(-ainf, -x)) + self._assert_func(-ainf, x) + + def test_strict(self): + """Test the behavior of the `strict` option.""" + x = np.zeros(3) + y = np.ones(()) + self._assert_func(x, y) + with pytest.raises(AssertionError): + self._assert_func(x, y, strict=True) + y = np.broadcast_to(y, x.shape) + self._assert_func(x, y) + with pytest.raises(AssertionError): + self._assert_func(x, y.astype(np.float32), strict=True) + + +class TestWarns: + + def test_warn(self): + def f(): + warnings.warn("yo") + return 3 + + before_filters = sys.modules['warnings'].filters[:] + assert_equal(assert_warns(UserWarning, f), 3) + after_filters = sys.modules['warnings'].filters + + assert_raises(AssertionError, assert_no_warnings, f) + assert_equal(assert_no_warnings(lambda x: x, 1), 1) + + # Check that the warnings state is unchanged + assert_equal(before_filters, after_filters, + "assert_warns does not preserver warnings state") + + def test_context_manager(self): + + before_filters = sys.modules['warnings'].filters[:] + with assert_warns(UserWarning): + warnings.warn("yo") + after_filters = sys.modules['warnings'].filters + + def no_warnings(): + with assert_no_warnings(): + warnings.warn("yo") + + assert_raises(AssertionError, no_warnings) + assert_equal(before_filters, after_filters, + "assert_warns does not preserver warnings state") + + def test_args(self): + def f(a=0, b=1): + warnings.warn("yo") + return a + b + + assert assert_warns(UserWarning, f, b=20) == 20 + + with pytest.raises(RuntimeError) as exc: + # assert_warns cannot do regexp matching, use pytest.warns + with assert_warns(UserWarning, match="A"): + warnings.warn("B", UserWarning) + assert "assert_warns" in str(exc) + assert "pytest.warns" in str(exc) + + with pytest.raises(RuntimeError) as exc: + # assert_warns cannot do regexp matching, use pytest.warns + with assert_warns(UserWarning, wrong="A"): + warnings.warn("B", UserWarning) + assert "assert_warns" in str(exc) + assert "pytest.warns" not in str(exc) + + def test_warn_wrong_warning(self): + def f(): + warnings.warn("yo", DeprecationWarning) + + failed = False + with warnings.catch_warnings(): + warnings.simplefilter("error", DeprecationWarning) + try: + # Should raise a DeprecationWarning + assert_warns(UserWarning, f) + failed = True + except DeprecationWarning: + pass + + if failed: + raise AssertionError("wrong warning caught by assert_warn") + + +class TestAssertAllclose: + + def test_simple(self): + x = 1e-3 + y = 1e-9 + + assert_allclose(x, y, atol=1) + assert_raises(AssertionError, assert_allclose, x, y) + + expected_msg = ('Mismatched elements: 1 / 1 (100%)\n' + 'Max absolute difference among violations: 0.001\n' + 'Max relative difference among violations: 999999.') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + assert_allclose(x, y) + + z = 0 + expected_msg = ('Mismatched elements: 1 / 1 (100%)\n' + 'Max absolute difference among violations: 1.e-09\n' + 'Max relative difference among violations: inf') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + assert_allclose(y, z) + + expected_msg = ('Mismatched elements: 1 / 1 (100%)\n' + 'Max absolute difference among violations: 1.e-09\n' + 'Max relative difference among violations: 1.') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + assert_allclose(z, y) + + a = np.array([x, y, x, y]) + b = np.array([x, y, x, x]) + + assert_allclose(a, b, atol=1) + assert_raises(AssertionError, assert_allclose, a, b) + + b[-1] = y * (1 + 1e-8) + assert_allclose(a, b) + assert_raises(AssertionError, assert_allclose, a, b, rtol=1e-9) + + assert_allclose(6, 10, rtol=0.5) + assert_raises(AssertionError, assert_allclose, 10, 6, rtol=0.5) + + b = np.array([x, y, x, x]) + c = np.array([x, y, x, z]) + expected_msg = ('Mismatched elements: 1 / 4 (25%)\n' + 'Max absolute difference among violations: 0.001\n' + 'Max relative difference among violations: inf') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + assert_allclose(b, c) + + expected_msg = ('Mismatched elements: 1 / 4 (25%)\n' + 'Max absolute difference among violations: 0.001\n' + 'Max relative difference among violations: 1.') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + assert_allclose(c, b) + + def test_min_int(self): + a = np.array([np.iinfo(np.int_).min], dtype=np.int_) + # Should not raise: + assert_allclose(a, a) + + def test_report_fail_percentage(self): + a = np.array([1, 1, 1, 1]) + b = np.array([1, 1, 1, 2]) + + expected_msg = ('Mismatched elements: 1 / 4 (25%)\n' + 'Max absolute difference among violations: 1\n' + 'Max relative difference among violations: 0.5') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + assert_allclose(a, b) + + def test_equal_nan(self): + a = np.array([np.nan]) + b = np.array([np.nan]) + # Should not raise: + assert_allclose(a, b, equal_nan=True) + + def test_not_equal_nan(self): + a = np.array([np.nan]) + b = np.array([np.nan]) + assert_raises(AssertionError, assert_allclose, a, b, equal_nan=False) + + def test_equal_nan_default(self): + # Make sure equal_nan default behavior remains unchanged. (All + # of these functions use assert_array_compare under the hood.) + # None of these should raise. + a = np.array([np.nan]) + b = np.array([np.nan]) + assert_array_equal(a, b) + assert_array_almost_equal(a, b) + assert_array_less(a, b) + assert_allclose(a, b) + + def test_report_max_relative_error(self): + a = np.array([0, 1]) + b = np.array([0, 2]) + + expected_msg = 'Max relative difference among violations: 0.5' + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + assert_allclose(a, b) + + def test_timedelta(self): + # see gh-18286 + a = np.array([[1, 2, 3, "NaT"]], dtype="m8[ns]") + assert_allclose(a, a) + + def test_error_message_unsigned(self): + """Check the message is formatted correctly when overflow can occur + (gh21768)""" + # Ensure to test for potential overflow in the case of: + # x - y + # and + # y - x + x = np.asarray([0, 1, 8], dtype='uint8') + y = np.asarray([4, 4, 4], dtype='uint8') + expected_msg = 'Max absolute difference among violations: 4' + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + assert_allclose(x, y, atol=3) + + def test_strict(self): + """Test the behavior of the `strict` option.""" + x = np.ones(3) + y = np.ones(()) + assert_allclose(x, y) + with pytest.raises(AssertionError): + assert_allclose(x, y, strict=True) + assert_allclose(x, x) + with pytest.raises(AssertionError): + assert_allclose(x, x.astype(np.float32), strict=True) + + +class TestArrayAlmostEqualNulp: + + def test_float64_pass(self): + # The number of units of least precision + # In this case, use a few places above the lowest level (ie nulp=1) + nulp = 5 + x = np.linspace(-20, 20, 50, dtype=np.float64) + x = 10**x + x = np.r_[-x, x] + + # Addition + eps = np.finfo(x.dtype).eps + y = x + x * eps * nulp / 2. + assert_array_almost_equal_nulp(x, y, nulp) + + # Subtraction + epsneg = np.finfo(x.dtype).epsneg + y = x - x * epsneg * nulp / 2. + assert_array_almost_equal_nulp(x, y, nulp) + + def test_float64_fail(self): + nulp = 5 + x = np.linspace(-20, 20, 50, dtype=np.float64) + x = 10**x + x = np.r_[-x, x] + + eps = np.finfo(x.dtype).eps + y = x + x * eps * nulp * 2. + assert_raises(AssertionError, assert_array_almost_equal_nulp, + x, y, nulp) + + epsneg = np.finfo(x.dtype).epsneg + y = x - x * epsneg * nulp * 2. + assert_raises(AssertionError, assert_array_almost_equal_nulp, + x, y, nulp) + + def test_float64_ignore_nan(self): + # Ignore ULP differences between various NAN's + # Note that MIPS may reverse quiet and signaling nans + # so we use the builtin version as a base. + offset = np.uint64(0xffffffff) + nan1_i64 = np.array(np.nan, dtype=np.float64).view(np.uint64) + nan2_i64 = nan1_i64 ^ offset # nan payload on MIPS is all ones. + nan1_f64 = nan1_i64.view(np.float64) + nan2_f64 = nan2_i64.view(np.float64) + assert_array_max_ulp(nan1_f64, nan2_f64, 0) + + def test_float32_pass(self): + nulp = 5 + x = np.linspace(-20, 20, 50, dtype=np.float32) + x = 10**x + x = np.r_[-x, x] + + eps = np.finfo(x.dtype).eps + y = x + x * eps * nulp / 2. + assert_array_almost_equal_nulp(x, y, nulp) + + epsneg = np.finfo(x.dtype).epsneg + y = x - x * epsneg * nulp / 2. + assert_array_almost_equal_nulp(x, y, nulp) + + def test_float32_fail(self): + nulp = 5 + x = np.linspace(-20, 20, 50, dtype=np.float32) + x = 10**x + x = np.r_[-x, x] + + eps = np.finfo(x.dtype).eps + y = x + x * eps * nulp * 2. + assert_raises(AssertionError, assert_array_almost_equal_nulp, + x, y, nulp) + + epsneg = np.finfo(x.dtype).epsneg + y = x - x * epsneg * nulp * 2. + assert_raises(AssertionError, assert_array_almost_equal_nulp, + x, y, nulp) + + def test_float32_ignore_nan(self): + # Ignore ULP differences between various NAN's + # Note that MIPS may reverse quiet and signaling nans + # so we use the builtin version as a base. + offset = np.uint32(0xffff) + nan1_i32 = np.array(np.nan, dtype=np.float32).view(np.uint32) + nan2_i32 = nan1_i32 ^ offset # nan payload on MIPS is all ones. + nan1_f32 = nan1_i32.view(np.float32) + nan2_f32 = nan2_i32.view(np.float32) + assert_array_max_ulp(nan1_f32, nan2_f32, 0) + + def test_float16_pass(self): + nulp = 5 + x = np.linspace(-4, 4, 10, dtype=np.float16) + x = 10**x + x = np.r_[-x, x] + + eps = np.finfo(x.dtype).eps + y = x + x * eps * nulp / 2. + assert_array_almost_equal_nulp(x, y, nulp) + + epsneg = np.finfo(x.dtype).epsneg + y = x - x * epsneg * nulp / 2. + assert_array_almost_equal_nulp(x, y, nulp) + + def test_float16_fail(self): + nulp = 5 + x = np.linspace(-4, 4, 10, dtype=np.float16) + x = 10**x + x = np.r_[-x, x] + + eps = np.finfo(x.dtype).eps + y = x + x * eps * nulp * 2. + assert_raises(AssertionError, assert_array_almost_equal_nulp, + x, y, nulp) + + epsneg = np.finfo(x.dtype).epsneg + y = x - x * epsneg * nulp * 2. + assert_raises(AssertionError, assert_array_almost_equal_nulp, + x, y, nulp) + + def test_float16_ignore_nan(self): + # Ignore ULP differences between various NAN's + # Note that MIPS may reverse quiet and signaling nans + # so we use the builtin version as a base. + offset = np.uint16(0xff) + nan1_i16 = np.array(np.nan, dtype=np.float16).view(np.uint16) + nan2_i16 = nan1_i16 ^ offset # nan payload on MIPS is all ones. + nan1_f16 = nan1_i16.view(np.float16) + nan2_f16 = nan2_i16.view(np.float16) + assert_array_max_ulp(nan1_f16, nan2_f16, 0) + + def test_complex128_pass(self): + nulp = 5 + x = np.linspace(-20, 20, 50, dtype=np.float64) + x = 10**x + x = np.r_[-x, x] + xi = x + x * 1j + + eps = np.finfo(x.dtype).eps + y = x + x * eps * nulp / 2. + assert_array_almost_equal_nulp(xi, x + y * 1j, nulp) + assert_array_almost_equal_nulp(xi, y + x * 1j, nulp) + # The test condition needs to be at least a factor of sqrt(2) smaller + # because the real and imaginary parts both change + y = x + x * eps * nulp / 4. + assert_array_almost_equal_nulp(xi, y + y * 1j, nulp) + + epsneg = np.finfo(x.dtype).epsneg + y = x - x * epsneg * nulp / 2. + assert_array_almost_equal_nulp(xi, x + y * 1j, nulp) + assert_array_almost_equal_nulp(xi, y + x * 1j, nulp) + y = x - x * epsneg * nulp / 4. + assert_array_almost_equal_nulp(xi, y + y * 1j, nulp) + + def test_complex128_fail(self): + nulp = 5 + x = np.linspace(-20, 20, 50, dtype=np.float64) + x = 10**x + x = np.r_[-x, x] + xi = x + x * 1j + + eps = np.finfo(x.dtype).eps + y = x + x * eps * nulp * 2. + assert_raises(AssertionError, assert_array_almost_equal_nulp, + xi, x + y * 1j, nulp) + assert_raises(AssertionError, assert_array_almost_equal_nulp, + xi, y + x * 1j, nulp) + # The test condition needs to be at least a factor of sqrt(2) smaller + # because the real and imaginary parts both change + y = x + x * eps * nulp + assert_raises(AssertionError, assert_array_almost_equal_nulp, + xi, y + y * 1j, nulp) + + epsneg = np.finfo(x.dtype).epsneg + y = x - x * epsneg * nulp * 2. + assert_raises(AssertionError, assert_array_almost_equal_nulp, + xi, x + y * 1j, nulp) + assert_raises(AssertionError, assert_array_almost_equal_nulp, + xi, y + x * 1j, nulp) + y = x - x * epsneg * nulp + assert_raises(AssertionError, assert_array_almost_equal_nulp, + xi, y + y * 1j, nulp) + + def test_complex64_pass(self): + nulp = 5 + x = np.linspace(-20, 20, 50, dtype=np.float32) + x = 10**x + x = np.r_[-x, x] + xi = x + x * 1j + + eps = np.finfo(x.dtype).eps + y = x + x * eps * nulp / 2. + assert_array_almost_equal_nulp(xi, x + y * 1j, nulp) + assert_array_almost_equal_nulp(xi, y + x * 1j, nulp) + y = x + x * eps * nulp / 4. + assert_array_almost_equal_nulp(xi, y + y * 1j, nulp) + + epsneg = np.finfo(x.dtype).epsneg + y = x - x * epsneg * nulp / 2. + assert_array_almost_equal_nulp(xi, x + y * 1j, nulp) + assert_array_almost_equal_nulp(xi, y + x * 1j, nulp) + y = x - x * epsneg * nulp / 4. + assert_array_almost_equal_nulp(xi, y + y * 1j, nulp) + + def test_complex64_fail(self): + nulp = 5 + x = np.linspace(-20, 20, 50, dtype=np.float32) + x = 10**x + x = np.r_[-x, x] + xi = x + x * 1j + + eps = np.finfo(x.dtype).eps + y = x + x * eps * nulp * 2. + assert_raises(AssertionError, assert_array_almost_equal_nulp, + xi, x + y * 1j, nulp) + assert_raises(AssertionError, assert_array_almost_equal_nulp, + xi, y + x * 1j, nulp) + y = x + x * eps * nulp + assert_raises(AssertionError, assert_array_almost_equal_nulp, + xi, y + y * 1j, nulp) + + epsneg = np.finfo(x.dtype).epsneg + y = x - x * epsneg * nulp * 2. + assert_raises(AssertionError, assert_array_almost_equal_nulp, + xi, x + y * 1j, nulp) + assert_raises(AssertionError, assert_array_almost_equal_nulp, + xi, y + x * 1j, nulp) + y = x - x * epsneg * nulp + assert_raises(AssertionError, assert_array_almost_equal_nulp, + xi, y + y * 1j, nulp) + + +class TestULP: + + def test_equal(self): + x = np.random.randn(10) + assert_array_max_ulp(x, x, maxulp=0) + + def test_single(self): + # Generate 1 + small deviation, check that adding eps gives a few UNL + x = np.ones(10).astype(np.float32) + x += 0.01 * np.random.randn(10).astype(np.float32) + eps = np.finfo(np.float32).eps + assert_array_max_ulp(x, x + eps, maxulp=20) + + def test_double(self): + # Generate 1 + small deviation, check that adding eps gives a few UNL + x = np.ones(10).astype(np.float64) + x += 0.01 * np.random.randn(10).astype(np.float64) + eps = np.finfo(np.float64).eps + assert_array_max_ulp(x, x + eps, maxulp=200) + + def test_inf(self): + for dt in [np.float32, np.float64]: + inf = np.array([np.inf]).astype(dt) + big = np.array([np.finfo(dt).max]) + assert_array_max_ulp(inf, big, maxulp=200) + + def test_nan(self): + # Test that nan is 'far' from small, tiny, inf, max and min + for dt in [np.float32, np.float64]: + if dt == np.float32: + maxulp = 1e6 + else: + maxulp = 1e12 + inf = np.array([np.inf]).astype(dt) + nan = np.array([np.nan]).astype(dt) + big = np.array([np.finfo(dt).max]) + tiny = np.array([np.finfo(dt).tiny]) + zero = np.array([0.0]).astype(dt) + nzero = np.array([-0.0]).astype(dt) + assert_raises(AssertionError, + lambda: assert_array_max_ulp(nan, inf, + maxulp=maxulp)) + assert_raises(AssertionError, + lambda: assert_array_max_ulp(nan, big, + maxulp=maxulp)) + assert_raises(AssertionError, + lambda: assert_array_max_ulp(nan, tiny, + maxulp=maxulp)) + assert_raises(AssertionError, + lambda: assert_array_max_ulp(nan, zero, + maxulp=maxulp)) + assert_raises(AssertionError, + lambda: assert_array_max_ulp(nan, nzero, + maxulp=maxulp)) + + +class TestStringEqual: + def test_simple(self): + assert_string_equal("hello", "hello") + assert_string_equal("hello\nmultiline", "hello\nmultiline") + + with pytest.raises(AssertionError) as exc_info: + assert_string_equal("foo\nbar", "hello\nbar") + msg = str(exc_info.value) + assert_equal(msg, "Differences in strings:\n- foo\n+ hello") + + assert_raises(AssertionError, + lambda: assert_string_equal("foo", "hello")) + + def test_regex(self): + assert_string_equal("a+*b", "a+*b") + + assert_raises(AssertionError, + lambda: assert_string_equal("aaa", "a+b")) + + +def assert_warn_len_equal(mod, n_in_context): + try: + mod_warns = mod.__warningregistry__ + except AttributeError: + # the lack of a __warningregistry__ + # attribute means that no warning has + # occurred; this can be triggered in + # a parallel test scenario, while in + # a serial test scenario an initial + # warning (and therefore the attribute) + # are always created first + mod_warns = {} + + num_warns = len(mod_warns) + + if 'version' in mod_warns: + # Python adds a 'version' entry to the registry, + # do not count it. + num_warns -= 1 + + assert_equal(num_warns, n_in_context) + + +def test_warn_len_equal_call_scenarios(): + # assert_warn_len_equal is called under + # varying circumstances depending on serial + # vs. parallel test scenarios; this test + # simply aims to probe both code paths and + # check that no assertion is uncaught + + # parallel scenario -- no warning issued yet + class mod: + pass + + mod_inst = mod() + + assert_warn_len_equal(mod=mod_inst, + n_in_context=0) + + # serial test scenario -- the __warningregistry__ + # attribute should be present + class mod: + def __init__(self): + self.__warningregistry__ = {'warning1': 1, + 'warning2': 2} + + mod_inst = mod() + assert_warn_len_equal(mod=mod_inst, + n_in_context=2) + + +def _get_fresh_mod(): + # Get this module, with warning registry empty + my_mod = sys.modules[__name__] + try: + my_mod.__warningregistry__.clear() + except AttributeError: + # will not have a __warningregistry__ unless warning has been + # raised in the module at some point + pass + return my_mod + + +def test_clear_and_catch_warnings(): + # Initial state of module, no warnings + my_mod = _get_fresh_mod() + assert_equal(getattr(my_mod, '__warningregistry__', {}), {}) + with clear_and_catch_warnings(modules=[my_mod]): + warnings.simplefilter('ignore') + warnings.warn('Some warning') + assert_equal(my_mod.__warningregistry__, {}) + # Without specified modules, don't clear warnings during context. + # catch_warnings doesn't make an entry for 'ignore'. + with clear_and_catch_warnings(): + warnings.simplefilter('ignore') + warnings.warn('Some warning') + assert_warn_len_equal(my_mod, 0) + + # Manually adding two warnings to the registry: + my_mod.__warningregistry__ = {'warning1': 1, + 'warning2': 2} + + # Confirm that specifying module keeps old warning, does not add new + with clear_and_catch_warnings(modules=[my_mod]): + warnings.simplefilter('ignore') + warnings.warn('Another warning') + assert_warn_len_equal(my_mod, 2) + + # Another warning, no module spec it clears up registry + with clear_and_catch_warnings(): + warnings.simplefilter('ignore') + warnings.warn('Another warning') + assert_warn_len_equal(my_mod, 0) + + +def test_suppress_warnings_module(): + # Initial state of module, no warnings + my_mod = _get_fresh_mod() + assert_equal(getattr(my_mod, '__warningregistry__', {}), {}) + + def warn_other_module(): + # Apply along axis is implemented in python; stacklevel=2 means + # we end up inside its module, not ours. + def warn(arr): + warnings.warn("Some warning 2", stacklevel=2) + return arr + np.apply_along_axis(warn, 0, [0]) + + # Test module based warning suppression: + assert_warn_len_equal(my_mod, 0) + with suppress_warnings() as sup: + sup.record(UserWarning) + # suppress warning from other module (may have .pyc ending), + # if apply_along_axis is moved, had to be changed. + sup.filter(module=np.lib._shape_base_impl) + warnings.warn("Some warning") + warn_other_module() + # Check that the suppression did test the file correctly (this module + # got filtered) + assert_equal(len(sup.log), 1) + assert_equal(sup.log[0].message.args[0], "Some warning") + assert_warn_len_equal(my_mod, 0) + sup = suppress_warnings() + # Will have to be changed if apply_along_axis is moved: + sup.filter(module=my_mod) + with sup: + warnings.warn('Some warning') + assert_warn_len_equal(my_mod, 0) + # And test repeat works: + sup.filter(module=my_mod) + with sup: + warnings.warn('Some warning') + assert_warn_len_equal(my_mod, 0) + + # Without specified modules + with suppress_warnings(): + warnings.simplefilter('ignore') + warnings.warn('Some warning') + assert_warn_len_equal(my_mod, 0) + + +def test_suppress_warnings_type(): + # Initial state of module, no warnings + my_mod = _get_fresh_mod() + assert_equal(getattr(my_mod, '__warningregistry__', {}), {}) + + # Test module based warning suppression: + with suppress_warnings() as sup: + sup.filter(UserWarning) + warnings.warn('Some warning') + assert_warn_len_equal(my_mod, 0) + sup = suppress_warnings() + sup.filter(UserWarning) + with sup: + warnings.warn('Some warning') + assert_warn_len_equal(my_mod, 0) + # And test repeat works: + sup.filter(module=my_mod) + with sup: + warnings.warn('Some warning') + assert_warn_len_equal(my_mod, 0) + + # Without specified modules + with suppress_warnings(): + warnings.simplefilter('ignore') + warnings.warn('Some warning') + assert_warn_len_equal(my_mod, 0) + + +def test_suppress_warnings_decorate_no_record(): + sup = suppress_warnings() + sup.filter(UserWarning) + + @sup + def warn(category): + warnings.warn('Some warning', category) + + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + warn(UserWarning) # should be suppressed + warn(RuntimeWarning) + assert_equal(len(w), 1) + + +def test_suppress_warnings_record(): + sup = suppress_warnings() + log1 = sup.record() + + with sup: + log2 = sup.record(message='Some other warning 2') + sup.filter(message='Some warning') + warnings.warn('Some warning') + warnings.warn('Some other warning') + warnings.warn('Some other warning 2') + + assert_equal(len(sup.log), 2) + assert_equal(len(log1), 1) + assert_equal(len(log2), 1) + assert_equal(log2[0].message.args[0], 'Some other warning 2') + + # Do it again, with the same context to see if some warnings survived: + with sup: + log2 = sup.record(message='Some other warning 2') + sup.filter(message='Some warning') + warnings.warn('Some warning') + warnings.warn('Some other warning') + warnings.warn('Some other warning 2') + + assert_equal(len(sup.log), 2) + assert_equal(len(log1), 1) + assert_equal(len(log2), 1) + assert_equal(log2[0].message.args[0], 'Some other warning 2') + + # Test nested: + with suppress_warnings() as sup: + sup.record() + with suppress_warnings() as sup2: + sup2.record(message='Some warning') + warnings.warn('Some warning') + warnings.warn('Some other warning') + assert_equal(len(sup2.log), 1) + assert_equal(len(sup.log), 1) + + +def test_suppress_warnings_forwarding(): + def warn_other_module(): + # Apply along axis is implemented in python; stacklevel=2 means + # we end up inside its module, not ours. + def warn(arr): + warnings.warn("Some warning", stacklevel=2) + return arr + np.apply_along_axis(warn, 0, [0]) + + with suppress_warnings() as sup: + sup.record() + with suppress_warnings("always"): + for i in range(2): + warnings.warn("Some warning") + + assert_equal(len(sup.log), 2) + + with suppress_warnings() as sup: + sup.record() + with suppress_warnings("location"): + for i in range(2): + warnings.warn("Some warning") + warnings.warn("Some warning") + + assert_equal(len(sup.log), 2) + + with suppress_warnings() as sup: + sup.record() + with suppress_warnings("module"): + for i in range(2): + warnings.warn("Some warning") + warnings.warn("Some warning") + warn_other_module() + + assert_equal(len(sup.log), 2) + + with suppress_warnings() as sup: + sup.record() + with suppress_warnings("once"): + for i in range(2): + warnings.warn("Some warning") + warnings.warn("Some other warning") + warn_other_module() + + assert_equal(len(sup.log), 2) + + +def test_tempdir(): + with tempdir() as tdir: + fpath = os.path.join(tdir, 'tmp') + with open(fpath, 'w'): + pass + assert_(not os.path.isdir(tdir)) + + raised = False + try: + with tempdir() as tdir: + raise ValueError + except ValueError: + raised = True + assert_(raised) + assert_(not os.path.isdir(tdir)) + + +def test_temppath(): + with temppath() as fpath: + with open(fpath, 'w'): + pass + assert_(not os.path.isfile(fpath)) + + raised = False + try: + with temppath() as fpath: + raise ValueError + except ValueError: + raised = True + assert_(raised) + assert_(not os.path.isfile(fpath)) + + +class my_cacw(clear_and_catch_warnings): + + class_modules = (sys.modules[__name__],) + + +def test_clear_and_catch_warnings_inherit(): + # Test can subclass and add default modules + my_mod = _get_fresh_mod() + with my_cacw(): + warnings.simplefilter('ignore') + warnings.warn('Some warning') + assert_equal(my_mod.__warningregistry__, {}) + + +@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") +class TestAssertNoGcCycles: + """ Test assert_no_gc_cycles """ + + def test_passes(self): + def no_cycle(): + b = [] + b.append([]) + return b + + with assert_no_gc_cycles(): + no_cycle() + + assert_no_gc_cycles(no_cycle) + + def test_asserts(self): + def make_cycle(): + a = [] + a.append(a) + a.append(a) + return a + + with assert_raises(AssertionError): + with assert_no_gc_cycles(): + make_cycle() + + with assert_raises(AssertionError): + assert_no_gc_cycles(make_cycle) + + @pytest.mark.slow + def test_fails(self): + """ + Test that in cases where the garbage cannot be collected, we raise an + error, instead of hanging forever trying to clear it. + """ + + class ReferenceCycleInDel: + """ + An object that not only contains a reference cycle, but creates new + cycles whenever it's garbage-collected and its __del__ runs + """ + make_cycle = True + + def __init__(self): + self.cycle = self + + def __del__(self): + # break the current cycle so that `self` can be freed + self.cycle = None + + if ReferenceCycleInDel.make_cycle: + # but create a new one so that the garbage collector (GC) has more + # work to do. + ReferenceCycleInDel() + + try: + w = weakref.ref(ReferenceCycleInDel()) + try: + with assert_raises(RuntimeError): + # this will be unable to get a baseline empty garbage + assert_no_gc_cycles(lambda: None) + except AssertionError: + # the above test is only necessary if the GC actually tried to free + # our object anyway. + if w() is not None: + pytest.skip("GC does not call __del__ on cyclic objects") + raise + + finally: + # make sure that we stop creating reference cycles + ReferenceCycleInDel.make_cycle = False diff --git a/.venv/lib/python3.12/site-packages/numpy/tests/__init__.py b/.venv/lib/python3.12/site-packages/numpy/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/.venv/lib/python3.12/site-packages/numpy/tests/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/tests/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..06f9d4b Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/tests/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/tests/__pycache__/test__all__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/tests/__pycache__/test__all__.cpython-312.pyc new file mode 100644 index 0000000..ee12204 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/tests/__pycache__/test__all__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/tests/__pycache__/test_configtool.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/tests/__pycache__/test_configtool.cpython-312.pyc new file mode 100644 index 0000000..fd1272e Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/tests/__pycache__/test_configtool.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/tests/__pycache__/test_ctypeslib.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/tests/__pycache__/test_ctypeslib.cpython-312.pyc new file mode 100644 index 0000000..61d8ccd Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/tests/__pycache__/test_ctypeslib.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/tests/__pycache__/test_lazyloading.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/tests/__pycache__/test_lazyloading.cpython-312.pyc new file mode 100644 index 0000000..cea7387 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/tests/__pycache__/test_lazyloading.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/tests/__pycache__/test_matlib.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/tests/__pycache__/test_matlib.cpython-312.pyc new file mode 100644 index 0000000..be7bde1 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/tests/__pycache__/test_matlib.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/tests/__pycache__/test_numpy_config.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/tests/__pycache__/test_numpy_config.cpython-312.pyc new file mode 100644 index 0000000..0064655 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/tests/__pycache__/test_numpy_config.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/tests/__pycache__/test_numpy_version.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/tests/__pycache__/test_numpy_version.cpython-312.pyc new file mode 100644 index 0000000..532ac43 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/tests/__pycache__/test_numpy_version.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/tests/__pycache__/test_public_api.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/tests/__pycache__/test_public_api.cpython-312.pyc new file mode 100644 index 0000000..b11a6ee Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/tests/__pycache__/test_public_api.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/tests/__pycache__/test_reloading.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/tests/__pycache__/test_reloading.cpython-312.pyc new file mode 100644 index 0000000..26f499c Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/tests/__pycache__/test_reloading.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/tests/__pycache__/test_scripts.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/tests/__pycache__/test_scripts.cpython-312.pyc new file mode 100644 index 0000000..697fcb8 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/tests/__pycache__/test_scripts.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/tests/__pycache__/test_warnings.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/tests/__pycache__/test_warnings.cpython-312.pyc new file mode 100644 index 0000000..81495c6 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/tests/__pycache__/test_warnings.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/tests/test__all__.py b/.venv/lib/python3.12/site-packages/numpy/tests/test__all__.py new file mode 100644 index 0000000..2dc8166 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/tests/test__all__.py @@ -0,0 +1,10 @@ + +import collections + +import numpy as np + + +def test_no_duplicates_in_np__all__(): + # Regression test for gh-10198. + dups = {k: v for k, v in collections.Counter(np.__all__).items() if v > 1} + assert len(dups) == 0 diff --git a/.venv/lib/python3.12/site-packages/numpy/tests/test_configtool.py b/.venv/lib/python3.12/site-packages/numpy/tests/test_configtool.py new file mode 100644 index 0000000..e0b9bb1 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/tests/test_configtool.py @@ -0,0 +1,48 @@ +import importlib +import importlib.metadata +import os +import pathlib +import subprocess + +import pytest + +import numpy as np +import numpy._core.include +import numpy._core.lib.pkgconfig +from numpy.testing import IS_EDITABLE, IS_INSTALLED, IS_WASM, NUMPY_ROOT + +INCLUDE_DIR = NUMPY_ROOT / '_core' / 'include' +PKG_CONFIG_DIR = NUMPY_ROOT / '_core' / 'lib' / 'pkgconfig' + + +@pytest.mark.skipif(not IS_INSTALLED, reason="`numpy-config` not expected to be installed") +@pytest.mark.skipif(IS_WASM, reason="wasm interpreter cannot start subprocess") +class TestNumpyConfig: + def check_numpyconfig(self, arg): + p = subprocess.run(['numpy-config', arg], capture_output=True, text=True) + p.check_returncode() + return p.stdout.strip() + + def test_configtool_version(self): + stdout = self.check_numpyconfig('--version') + assert stdout == np.__version__ + + def test_configtool_cflags(self): + stdout = self.check_numpyconfig('--cflags') + assert f'-I{os.fspath(INCLUDE_DIR)}' in stdout + + def test_configtool_pkgconfigdir(self): + stdout = self.check_numpyconfig('--pkgconfigdir') + assert pathlib.Path(stdout) == PKG_CONFIG_DIR + + +@pytest.mark.skipif(not IS_INSTALLED, reason="numpy must be installed to check its entrypoints") +def test_pkg_config_entrypoint(): + (entrypoint,) = importlib.metadata.entry_points(group='pkg_config', name='numpy') + assert entrypoint.value == numpy._core.lib.pkgconfig.__name__ + + +@pytest.mark.skipif(not IS_INSTALLED, reason="numpy.pc is only available when numpy is installed") +@pytest.mark.skipif(IS_EDITABLE, reason="editable installs don't have a numpy.pc") +def test_pkg_config_config_exists(): + assert PKG_CONFIG_DIR.joinpath('numpy.pc').is_file() diff --git a/.venv/lib/python3.12/site-packages/numpy/tests/test_ctypeslib.py b/.venv/lib/python3.12/site-packages/numpy/tests/test_ctypeslib.py new file mode 100644 index 0000000..68d3141 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/tests/test_ctypeslib.py @@ -0,0 +1,377 @@ +import sys +import sysconfig +import weakref +from pathlib import Path + +import pytest + +import numpy as np +from numpy.ctypeslib import as_array, load_library, ndpointer +from numpy.testing import assert_, assert_array_equal, assert_equal, assert_raises + +try: + import ctypes +except ImportError: + ctypes = None +else: + cdll = None + test_cdll = None + if hasattr(sys, 'gettotalrefcount'): + try: + cdll = load_library( + '_multiarray_umath_d', np._core._multiarray_umath.__file__ + ) + except OSError: + pass + try: + test_cdll = load_library( + '_multiarray_tests', np._core._multiarray_tests.__file__ + ) + except OSError: + pass + if cdll is None: + cdll = load_library( + '_multiarray_umath', np._core._multiarray_umath.__file__) + if test_cdll is None: + test_cdll = load_library( + '_multiarray_tests', np._core._multiarray_tests.__file__ + ) + + c_forward_pointer = test_cdll.forward_pointer + + +@pytest.mark.skipif(ctypes is None, + reason="ctypes not available in this python") +@pytest.mark.skipif(sys.platform == 'cygwin', + reason="Known to fail on cygwin") +class TestLoadLibrary: + def test_basic(self): + loader_path = np._core._multiarray_umath.__file__ + + out1 = load_library('_multiarray_umath', loader_path) + out2 = load_library(Path('_multiarray_umath'), loader_path) + out3 = load_library('_multiarray_umath', Path(loader_path)) + out4 = load_library(b'_multiarray_umath', loader_path) + + assert isinstance(out1, ctypes.CDLL) + assert out1 is out2 is out3 is out4 + + def test_basic2(self): + # Regression for #801: load_library with a full library name + # (including extension) does not work. + try: + so_ext = sysconfig.get_config_var('EXT_SUFFIX') + load_library(f'_multiarray_umath{so_ext}', + np._core._multiarray_umath.__file__) + except ImportError as e: + msg = ("ctypes is not available on this python: skipping the test" + " (import error was: %s)" % str(e)) + print(msg) + + +class TestNdpointer: + def test_dtype(self): + dt = np.intc + p = ndpointer(dtype=dt) + assert_(p.from_param(np.array([1], dt))) + dt = 'i4') + p = ndpointer(dtype=dt) + p.from_param(np.array([1], dt)) + assert_raises(TypeError, p.from_param, + np.array([1], dt.newbyteorder('swap'))) + dtnames = ['x', 'y'] + dtformats = [np.intc, np.float64] + dtdescr = {'names': dtnames, 'formats': dtformats} + dt = np.dtype(dtdescr) + p = ndpointer(dtype=dt) + assert_(p.from_param(np.zeros((10,), dt))) + samedt = np.dtype(dtdescr) + p = ndpointer(dtype=samedt) + assert_(p.from_param(np.zeros((10,), dt))) + dt2 = np.dtype(dtdescr, align=True) + if dt.itemsize != dt2.itemsize: + assert_raises(TypeError, p.from_param, np.zeros((10,), dt2)) + else: + assert_(p.from_param(np.zeros((10,), dt2))) + + def test_ndim(self): + p = ndpointer(ndim=0) + assert_(p.from_param(np.array(1))) + assert_raises(TypeError, p.from_param, np.array([1])) + p = ndpointer(ndim=1) + assert_raises(TypeError, p.from_param, np.array(1)) + assert_(p.from_param(np.array([1]))) + p = ndpointer(ndim=2) + assert_(p.from_param(np.array([[1]]))) + + def test_shape(self): + p = ndpointer(shape=(1, 2)) + assert_(p.from_param(np.array([[1, 2]]))) + assert_raises(TypeError, p.from_param, np.array([[1], [2]])) + p = ndpointer(shape=()) + assert_(p.from_param(np.array(1))) + + def test_flags(self): + x = np.array([[1, 2], [3, 4]], order='F') + p = ndpointer(flags='FORTRAN') + assert_(p.from_param(x)) + p = ndpointer(flags='CONTIGUOUS') + assert_raises(TypeError, p.from_param, x) + p = ndpointer(flags=x.flags.num) + assert_(p.from_param(x)) + assert_raises(TypeError, p.from_param, np.array([[1, 2], [3, 4]])) + + def test_cache(self): + assert_(ndpointer(dtype=np.float64) is ndpointer(dtype=np.float64)) + + # shapes are normalized + assert_(ndpointer(shape=2) is ndpointer(shape=(2,))) + + # 1.12 <= v < 1.16 had a bug that made these fail + assert_(ndpointer(shape=2) is not ndpointer(ndim=2)) + assert_(ndpointer(ndim=2) is not ndpointer(shape=2)) + +@pytest.mark.skipif(ctypes is None, + reason="ctypes not available on this python installation") +class TestNdpointerCFunc: + def test_arguments(self): + """ Test that arguments are coerced from arrays """ + c_forward_pointer.restype = ctypes.c_void_p + c_forward_pointer.argtypes = (ndpointer(ndim=2),) + + c_forward_pointer(np.zeros((2, 3))) + # too many dimensions + assert_raises( + ctypes.ArgumentError, c_forward_pointer, np.zeros((2, 3, 4))) + + @pytest.mark.parametrize( + 'dt', [ + float, + np.dtype({ + 'formats': ['u2') + ct = np.ctypeslib.as_ctypes_type(dt) + assert_equal(ct, ctypes.c_uint16.__ctype_be__) + + dt = np.dtype('u2') + ct = np.ctypeslib.as_ctypes_type(dt) + assert_equal(ct, ctypes.c_uint16) + + def test_subarray(self): + dt = np.dtype((np.int32, (2, 3))) + ct = np.ctypeslib.as_ctypes_type(dt) + assert_equal(ct, 2 * (3 * ctypes.c_int32)) + + def test_structure(self): + dt = np.dtype([ + ('a', np.uint16), + ('b', np.uint32), + ]) + + ct = np.ctypeslib.as_ctypes_type(dt) + assert_(issubclass(ct, ctypes.Structure)) + assert_equal(ctypes.sizeof(ct), dt.itemsize) + assert_equal(ct._fields_, [ + ('a', ctypes.c_uint16), + ('b', ctypes.c_uint32), + ]) + + def test_structure_aligned(self): + dt = np.dtype([ + ('a', np.uint16), + ('b', np.uint32), + ], align=True) + + ct = np.ctypeslib.as_ctypes_type(dt) + assert_(issubclass(ct, ctypes.Structure)) + assert_equal(ctypes.sizeof(ct), dt.itemsize) + assert_equal(ct._fields_, [ + ('a', ctypes.c_uint16), + ('', ctypes.c_char * 2), # padding + ('b', ctypes.c_uint32), + ]) + + def test_union(self): + dt = np.dtype({ + 'names': ['a', 'b'], + 'offsets': [0, 0], + 'formats': [np.uint16, np.uint32] + }) + + ct = np.ctypeslib.as_ctypes_type(dt) + assert_(issubclass(ct, ctypes.Union)) + assert_equal(ctypes.sizeof(ct), dt.itemsize) + assert_equal(ct._fields_, [ + ('a', ctypes.c_uint16), + ('b', ctypes.c_uint32), + ]) + + def test_padded_union(self): + dt = np.dtype({ + 'names': ['a', 'b'], + 'offsets': [0, 0], + 'formats': [np.uint16, np.uint32], + 'itemsize': 5, + }) + + ct = np.ctypeslib.as_ctypes_type(dt) + assert_(issubclass(ct, ctypes.Union)) + assert_equal(ctypes.sizeof(ct), dt.itemsize) + assert_equal(ct._fields_, [ + ('a', ctypes.c_uint16), + ('b', ctypes.c_uint32), + ('', ctypes.c_char * 5), # padding + ]) + + def test_overlapping(self): + dt = np.dtype({ + 'names': ['a', 'b'], + 'offsets': [0, 2], + 'formats': [np.uint32, np.uint32] + }) + assert_raises(NotImplementedError, np.ctypeslib.as_ctypes_type, dt) diff --git a/.venv/lib/python3.12/site-packages/numpy/tests/test_lazyloading.py b/.venv/lib/python3.12/site-packages/numpy/tests/test_lazyloading.py new file mode 100644 index 0000000..5f6233f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/tests/test_lazyloading.py @@ -0,0 +1,38 @@ +import sys +from importlib.util import LazyLoader, find_spec, module_from_spec + +import pytest + + +# Warning raised by _reload_guard() in numpy/__init__.py +@pytest.mark.filterwarnings("ignore:The NumPy module was reloaded") +def test_lazy_load(): + # gh-22045. lazyload doesn't import submodule names into the namespace + # muck with sys.modules to test the importing system + old_numpy = sys.modules.pop("numpy") + + numpy_modules = {} + for mod_name, mod in list(sys.modules.items()): + if mod_name[:6] == "numpy.": + numpy_modules[mod_name] = mod + sys.modules.pop(mod_name) + + try: + # create lazy load of numpy as np + spec = find_spec("numpy") + module = module_from_spec(spec) + sys.modules["numpy"] = module + loader = LazyLoader(spec.loader) + loader.exec_module(module) + np = module + + # test a subpackage import + from numpy.lib import recfunctions # noqa: F401 + + # test triggering the import of the package + np.ndarray + + finally: + if old_numpy: + sys.modules["numpy"] = old_numpy + sys.modules.update(numpy_modules) diff --git a/.venv/lib/python3.12/site-packages/numpy/tests/test_matlib.py b/.venv/lib/python3.12/site-packages/numpy/tests/test_matlib.py new file mode 100644 index 0000000..2aac1f2 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/tests/test_matlib.py @@ -0,0 +1,59 @@ +import numpy as np +import numpy.matlib +from numpy.testing import assert_, assert_array_equal + + +def test_empty(): + x = numpy.matlib.empty((2,)) + assert_(isinstance(x, np.matrix)) + assert_(x.shape, (1, 2)) + +def test_ones(): + assert_array_equal(numpy.matlib.ones((2, 3)), + np.matrix([[ 1., 1., 1.], + [ 1., 1., 1.]])) + + assert_array_equal(numpy.matlib.ones(2), np.matrix([[ 1., 1.]])) + +def test_zeros(): + assert_array_equal(numpy.matlib.zeros((2, 3)), + np.matrix([[ 0., 0., 0.], + [ 0., 0., 0.]])) + + assert_array_equal(numpy.matlib.zeros(2), np.matrix([[0., 0.]])) + +def test_identity(): + x = numpy.matlib.identity(2, dtype=int) + assert_array_equal(x, np.matrix([[1, 0], [0, 1]])) + +def test_eye(): + xc = numpy.matlib.eye(3, k=1, dtype=int) + assert_array_equal(xc, np.matrix([[ 0, 1, 0], + [ 0, 0, 1], + [ 0, 0, 0]])) + assert xc.flags.c_contiguous + assert not xc.flags.f_contiguous + + xf = numpy.matlib.eye(3, 4, dtype=int, order='F') + assert_array_equal(xf, np.matrix([[ 1, 0, 0, 0], + [ 0, 1, 0, 0], + [ 0, 0, 1, 0]])) + assert not xf.flags.c_contiguous + assert xf.flags.f_contiguous + +def test_rand(): + x = numpy.matlib.rand(3) + # check matrix type, array would have shape (3,) + assert_(x.ndim == 2) + +def test_randn(): + x = np.matlib.randn(3) + # check matrix type, array would have shape (3,) + assert_(x.ndim == 2) + +def test_repmat(): + a1 = np.arange(4) + x = numpy.matlib.repmat(a1, 2, 2) + y = np.array([[0, 1, 2, 3, 0, 1, 2, 3], + [0, 1, 2, 3, 0, 1, 2, 3]]) + assert_array_equal(x, y) diff --git a/.venv/lib/python3.12/site-packages/numpy/tests/test_numpy_config.py b/.venv/lib/python3.12/site-packages/numpy/tests/test_numpy_config.py new file mode 100644 index 0000000..f01a279 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/tests/test_numpy_config.py @@ -0,0 +1,46 @@ +""" +Check the numpy config is valid. +""" +from unittest.mock import patch + +import pytest + +import numpy as np + +pytestmark = pytest.mark.skipif( + not hasattr(np.__config__, "_built_with_meson"), + reason="Requires Meson builds", +) + + +class TestNumPyConfigs: + REQUIRED_CONFIG_KEYS = [ + "Compilers", + "Machine Information", + "Python Information", + ] + + @patch("numpy.__config__._check_pyyaml") + def test_pyyaml_not_found(self, mock_yaml_importer): + mock_yaml_importer.side_effect = ModuleNotFoundError() + with pytest.warns(UserWarning): + np.show_config() + + def test_dict_mode(self): + config = np.show_config(mode="dicts") + + assert isinstance(config, dict) + assert all(key in config for key in self.REQUIRED_CONFIG_KEYS), ( + "Required key missing," + " see index of `False` with `REQUIRED_CONFIG_KEYS`" + ) + + def test_invalid_mode(self): + with pytest.raises(AttributeError): + np.show_config(mode="foo") + + def test_warn_to_add_tests(self): + assert len(np.__config__.DisplayModes) == 2, ( + "New mode detected," + " please add UT if applicable and increment this count" + ) diff --git a/.venv/lib/python3.12/site-packages/numpy/tests/test_numpy_version.py b/.venv/lib/python3.12/site-packages/numpy/tests/test_numpy_version.py new file mode 100644 index 0000000..ea16422 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/tests/test_numpy_version.py @@ -0,0 +1,54 @@ +""" +Check the numpy version is valid. + +Note that a development version is marked by the presence of 'dev0' or '+' +in the version string, all else is treated as a release. The version string +itself is set from the output of ``git describe`` which relies on tags. + +Examples +-------- + +Valid Development: 1.22.0.dev0 1.22.0.dev0+5-g7999db4df2 1.22.0+5-g7999db4df2 +Valid Release: 1.21.0.rc1, 1.21.0.b1, 1.21.0 +Invalid: 1.22.0.dev, 1.22.0.dev0-5-g7999db4dfB, 1.21.0.d1, 1.21.a + +Note that a release is determined by the version string, which in turn +is controlled by the result of the ``git describe`` command. +""" +import re + +import numpy as np +from numpy.testing import assert_ + + +def test_valid_numpy_version(): + # Verify that the numpy version is a valid one (no .post suffix or other + # nonsense). See gh-6431 for an issue caused by an invalid version. + version_pattern = r"^[0-9]+\.[0-9]+\.[0-9]+(a[0-9]|b[0-9]|rc[0-9])?" + dev_suffix = r"(\.dev[0-9]+(\+git[0-9]+\.[0-9a-f]+)?)?" + res = re.match(version_pattern + dev_suffix + '$', np.__version__) + + assert_(res is not None, np.__version__) + + +def test_short_version(): + # Check numpy.short_version actually exists + if np.version.release: + assert_(np.__version__ == np.version.short_version, + "short_version mismatch in release version") + else: + assert_(np.__version__.split("+")[0] == np.version.short_version, + "short_version mismatch in development version") + + +def test_version_module(): + contents = {s for s in dir(np.version) if not s.startswith('_')} + expected = { + 'full_version', + 'git_revision', + 'release', + 'short_version', + 'version', + } + + assert contents == expected diff --git a/.venv/lib/python3.12/site-packages/numpy/tests/test_public_api.py b/.venv/lib/python3.12/site-packages/numpy/tests/test_public_api.py new file mode 100644 index 0000000..a56cd13 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/tests/test_public_api.py @@ -0,0 +1,806 @@ +import functools +import importlib +import inspect +import pkgutil +import subprocess +import sys +import sysconfig +import types +import warnings + +import pytest + +import numpy +import numpy as np +from numpy.testing import IS_WASM + +try: + import ctypes +except ImportError: + ctypes = None + + +def check_dir(module, module_name=None): + """Returns a mapping of all objects with the wrong __module__ attribute.""" + if module_name is None: + module_name = module.__name__ + results = {} + for name in dir(module): + if name == "core": + continue + item = getattr(module, name) + if (hasattr(item, '__module__') and hasattr(item, '__name__') + and item.__module__ != module_name): + results[name] = item.__module__ + '.' + item.__name__ + return results + + +def test_numpy_namespace(): + # We override dir to not show these members + allowlist = { + 'recarray': 'numpy.rec.recarray', + } + bad_results = check_dir(np) + # pytest gives better error messages with the builtin assert than with + # assert_equal + assert bad_results == allowlist + + +@pytest.mark.skipif(IS_WASM, reason="can't start subprocess") +@pytest.mark.parametrize('name', ['testing']) +def test_import_lazy_import(name): + """Make sure we can actually use the modules we lazy load. + + While not exported as part of the public API, it was accessible. With the + use of __getattr__ and __dir__, this isn't always true It can happen that + an infinite recursion may happen. + + This is the only way I found that would force the failure to appear on the + badly implemented code. + + We also test for the presence of the lazily imported modules in dir + + """ + exe = (sys.executable, '-c', "import numpy; numpy." + name) + result = subprocess.check_output(exe) + assert not result + + # Make sure they are still in the __dir__ + assert name in dir(np) + + +def test_dir_testing(): + """Assert that output of dir has only one "testing/tester" + attribute without duplicate""" + assert len(dir(np)) == len(set(dir(np))) + + +def test_numpy_linalg(): + bad_results = check_dir(np.linalg) + assert bad_results == {} + + +def test_numpy_fft(): + bad_results = check_dir(np.fft) + assert bad_results == {} + + +@pytest.mark.skipif(ctypes is None, + reason="ctypes not available in this python") +def test_NPY_NO_EXPORT(): + cdll = ctypes.CDLL(np._core._multiarray_tests.__file__) + # Make sure an arbitrary NPY_NO_EXPORT function is actually hidden + f = getattr(cdll, 'test_not_exported', None) + assert f is None, ("'test_not_exported' is mistakenly exported, " + "NPY_NO_EXPORT does not work") + + +# Historically NumPy has not used leading underscores for private submodules +# much. This has resulted in lots of things that look like public modules +# (i.e. things that can be imported as `import numpy.somesubmodule.somefile`), +# but were never intended to be public. The PUBLIC_MODULES list contains +# modules that are either public because they were meant to be, or because they +# contain public functions/objects that aren't present in any other namespace +# for whatever reason and therefore should be treated as public. +# +# The PRIVATE_BUT_PRESENT_MODULES list contains modules that look public (lack +# of underscores) but should not be used. For many of those modules the +# current status is fine. For others it may make sense to work on making them +# private, to clean up our public API and avoid confusion. +PUBLIC_MODULES = ['numpy.' + s for s in [ + "ctypeslib", + "dtypes", + "exceptions", + "f2py", + "fft", + "lib", + "lib.array_utils", + "lib.format", + "lib.introspect", + "lib.mixins", + "lib.npyio", + "lib.recfunctions", # note: still needs cleaning, was forgotten for 2.0 + "lib.scimath", + "lib.stride_tricks", + "linalg", + "ma", + "ma.extras", + "ma.mrecords", + "polynomial", + "polynomial.chebyshev", + "polynomial.hermite", + "polynomial.hermite_e", + "polynomial.laguerre", + "polynomial.legendre", + "polynomial.polynomial", + "random", + "strings", + "testing", + "testing.overrides", + "typing", + "typing.mypy_plugin", + "version", +]] +if sys.version_info < (3, 12): + PUBLIC_MODULES += [ + 'numpy.' + s for s in [ + "distutils", + "distutils.cpuinfo", + "distutils.exec_command", + "distutils.misc_util", + "distutils.log", + "distutils.system_info", + ] + ] + + +PUBLIC_ALIASED_MODULES = [ + "numpy.char", + "numpy.emath", + "numpy.rec", +] + + +PRIVATE_BUT_PRESENT_MODULES = ['numpy.' + s for s in [ + "conftest", + "core", + "core.multiarray", + "core.numeric", + "core.umath", + "core.arrayprint", + "core.defchararray", + "core.einsumfunc", + "core.fromnumeric", + "core.function_base", + "core.getlimits", + "core.numerictypes", + "core.overrides", + "core.records", + "core.shape_base", + "f2py.auxfuncs", + "f2py.capi_maps", + "f2py.cb_rules", + "f2py.cfuncs", + "f2py.common_rules", + "f2py.crackfortran", + "f2py.diagnose", + "f2py.f2py2e", + "f2py.f90mod_rules", + "f2py.func2subr", + "f2py.rules", + "f2py.symbolic", + "f2py.use_rules", + "fft.helper", + "lib.user_array", # note: not in np.lib, but probably should just be deleted + "linalg.lapack_lite", + "linalg.linalg", + "ma.core", + "ma.testutils", + "matlib", + "matrixlib", + "matrixlib.defmatrix", + "polynomial.polyutils", + "random.mtrand", + "random.bit_generator", + "testing.print_coercion_tables", +]] +if sys.version_info < (3, 12): + PRIVATE_BUT_PRESENT_MODULES += [ + 'numpy.' + s for s in [ + "distutils.armccompiler", + "distutils.fujitsuccompiler", + "distutils.ccompiler", + 'distutils.ccompiler_opt', + "distutils.command", + "distutils.command.autodist", + "distutils.command.bdist_rpm", + "distutils.command.build", + "distutils.command.build_clib", + "distutils.command.build_ext", + "distutils.command.build_py", + "distutils.command.build_scripts", + "distutils.command.build_src", + "distutils.command.config", + "distutils.command.config_compiler", + "distutils.command.develop", + "distutils.command.egg_info", + "distutils.command.install", + "distutils.command.install_clib", + "distutils.command.install_data", + "distutils.command.install_headers", + "distutils.command.sdist", + "distutils.conv_template", + "distutils.core", + "distutils.extension", + "distutils.fcompiler", + "distutils.fcompiler.absoft", + "distutils.fcompiler.arm", + "distutils.fcompiler.compaq", + "distutils.fcompiler.environment", + "distutils.fcompiler.g95", + "distutils.fcompiler.gnu", + "distutils.fcompiler.hpux", + "distutils.fcompiler.ibm", + "distutils.fcompiler.intel", + "distutils.fcompiler.lahey", + "distutils.fcompiler.mips", + "distutils.fcompiler.nag", + "distutils.fcompiler.none", + "distutils.fcompiler.pathf95", + "distutils.fcompiler.pg", + "distutils.fcompiler.nv", + "distutils.fcompiler.sun", + "distutils.fcompiler.vast", + "distutils.fcompiler.fujitsu", + "distutils.from_template", + "distutils.intelccompiler", + "distutils.lib2def", + "distutils.line_endings", + "distutils.mingw32ccompiler", + "distutils.msvccompiler", + "distutils.npy_pkg_config", + "distutils.numpy_distribution", + "distutils.pathccompiler", + "distutils.unixccompiler", + ] + ] + + +def is_unexpected(name): + """Check if this needs to be considered.""" + return ( + '._' not in name and '.tests' not in name and '.setup' not in name + and name not in PUBLIC_MODULES + and name not in PUBLIC_ALIASED_MODULES + and name not in PRIVATE_BUT_PRESENT_MODULES + ) + + +if sys.version_info >= (3, 12): + SKIP_LIST = [] +else: + SKIP_LIST = ["numpy.distutils.msvc9compiler"] + + +def test_all_modules_are_expected(): + """ + Test that we don't add anything that looks like a new public module by + accident. Check is based on filenames. + """ + + modnames = [] + for _, modname, ispkg in pkgutil.walk_packages(path=np.__path__, + prefix=np.__name__ + '.', + onerror=None): + if is_unexpected(modname) and modname not in SKIP_LIST: + # We have a name that is new. If that's on purpose, add it to + # PUBLIC_MODULES. We don't expect to have to add anything to + # PRIVATE_BUT_PRESENT_MODULES. Use an underscore in the name! + modnames.append(modname) + + if modnames: + raise AssertionError(f'Found unexpected modules: {modnames}') + + +# Stuff that clearly shouldn't be in the API and is detected by the next test +# below +SKIP_LIST_2 = [ + 'numpy.lib.math', + 'numpy.matlib.char', + 'numpy.matlib.rec', + 'numpy.matlib.emath', + 'numpy.matlib.exceptions', + 'numpy.matlib.math', + 'numpy.matlib.linalg', + 'numpy.matlib.fft', + 'numpy.matlib.random', + 'numpy.matlib.ctypeslib', + 'numpy.matlib.ma', +] +if sys.version_info < (3, 12): + SKIP_LIST_2 += [ + 'numpy.distutils.log.sys', + 'numpy.distutils.log.logging', + 'numpy.distutils.log.warnings', + ] + + +def test_all_modules_are_expected_2(): + """ + Method checking all objects. The pkgutil-based method in + `test_all_modules_are_expected` does not catch imports into a namespace, + only filenames. So this test is more thorough, and checks this like: + + import .lib.scimath as emath + + To check if something in a module is (effectively) public, one can check if + there's anything in that namespace that's a public function/object but is + not exposed in a higher-level namespace. For example for a `numpy.lib` + submodule:: + + mod = np.lib.mixins + for obj in mod.__all__: + if obj in np.__all__: + continue + elif obj in np.lib.__all__: + continue + + else: + print(obj) + + """ + + def find_unexpected_members(mod_name): + members = [] + module = importlib.import_module(mod_name) + if hasattr(module, '__all__'): + objnames = module.__all__ + else: + objnames = dir(module) + + for objname in objnames: + if not objname.startswith('_'): + fullobjname = mod_name + '.' + objname + if isinstance(getattr(module, objname), types.ModuleType): + if is_unexpected(fullobjname): + if fullobjname not in SKIP_LIST_2: + members.append(fullobjname) + + return members + + unexpected_members = find_unexpected_members("numpy") + for modname in PUBLIC_MODULES: + unexpected_members.extend(find_unexpected_members(modname)) + + if unexpected_members: + raise AssertionError("Found unexpected object(s) that look like " + f"modules: {unexpected_members}") + + +def test_api_importable(): + """ + Check that all submodules listed higher up in this file can be imported + + Note that if a PRIVATE_BUT_PRESENT_MODULES entry goes missing, it may + simply need to be removed from the list (deprecation may or may not be + needed - apply common sense). + """ + def check_importable(module_name): + try: + importlib.import_module(module_name) + except (ImportError, AttributeError): + return False + + return True + + module_names = [] + for module_name in PUBLIC_MODULES: + if not check_importable(module_name): + module_names.append(module_name) + + if module_names: + raise AssertionError("Modules in the public API that cannot be " + f"imported: {module_names}") + + for module_name in PUBLIC_ALIASED_MODULES: + try: + eval(module_name) + except AttributeError: + module_names.append(module_name) + + if module_names: + raise AssertionError("Modules in the public API that were not " + f"found: {module_names}") + + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', category=DeprecationWarning) + warnings.filterwarnings('always', category=ImportWarning) + for module_name in PRIVATE_BUT_PRESENT_MODULES: + if not check_importable(module_name): + module_names.append(module_name) + + if module_names: + raise AssertionError("Modules that are not really public but looked " + "public and can not be imported: " + f"{module_names}") + + +@pytest.mark.xfail( + sysconfig.get_config_var("Py_DEBUG") not in (None, 0, "0"), + reason=( + "NumPy possibly built with `USE_DEBUG=True ./tools/travis-test.sh`, " + "which does not expose the `array_api` entry point. " + "See https://github.com/numpy/numpy/pull/19800" + ), +) +def test_array_api_entry_point(): + """ + Entry point for Array API implementation can be found with importlib and + returns the main numpy namespace. + """ + # For a development install that did not go through meson-python, + # the entrypoint will not have been installed. So ensure this test fails + # only if numpy is inside site-packages. + numpy_in_sitepackages = sysconfig.get_path('platlib') in np.__file__ + + eps = importlib.metadata.entry_points() + xp_eps = eps.select(group="array_api") + if len(xp_eps) == 0: + if numpy_in_sitepackages: + msg = "No entry points for 'array_api' found" + raise AssertionError(msg) from None + return + + try: + ep = next(ep for ep in xp_eps if ep.name == "numpy") + except StopIteration: + if numpy_in_sitepackages: + msg = "'numpy' not in array_api entry points" + raise AssertionError(msg) from None + return + + if ep.value == 'numpy.array_api': + # Looks like the entrypoint for the current numpy build isn't + # installed, but an older numpy is also installed and hence the + # entrypoint is pointing to the old (no longer existing) location. + # This isn't a problem except for when running tests with `spin` or an + # in-place build. + return + + xp = ep.load() + msg = ( + f"numpy entry point value '{ep.value}' " + "does not point to our Array API implementation" + ) + assert xp is numpy, msg + + +def test_main_namespace_all_dir_coherence(): + """ + Checks if `dir(np)` and `np.__all__` are consistent and return + the same content, excluding exceptions and private members. + """ + def _remove_private_members(member_set): + return {m for m in member_set if not m.startswith('_')} + + def _remove_exceptions(member_set): + return member_set.difference({ + "bool" # included only in __dir__ + }) + + all_members = _remove_private_members(np.__all__) + all_members = _remove_exceptions(all_members) + + dir_members = _remove_private_members(np.__dir__()) + dir_members = _remove_exceptions(dir_members) + + assert all_members == dir_members, ( + "Members that break symmetry: " + f"{all_members.symmetric_difference(dir_members)}" + ) + + +@pytest.mark.filterwarnings( + r"ignore:numpy.core(\.\w+)? is deprecated:DeprecationWarning" +) +def test_core_shims_coherence(): + """ + Check that all "semi-public" members of `numpy._core` are also accessible + from `numpy.core` shims. + """ + import numpy.core as core + + for member_name in dir(np._core): + # Skip private and test members. Also if a module is aliased, + # no need to add it to np.core + if ( + member_name.startswith("_") + or member_name in ["tests", "strings"] + or f"numpy.{member_name}" in PUBLIC_ALIASED_MODULES + ): + continue + + member = getattr(np._core, member_name) + + # np.core is a shim and all submodules of np.core are shims + # but we should be able to import everything in those shims + # that are available in the "real" modules in np._core, with + # the exception of the namespace packages (__spec__.origin is None), + # like numpy._core.include, or numpy._core.lib.pkgconfig. + if ( + inspect.ismodule(member) + and member.__spec__ and member.__spec__.origin is not None + ): + submodule = member + submodule_name = member_name + for submodule_member_name in dir(submodule): + # ignore dunder names + if submodule_member_name.startswith("__"): + continue + submodule_member = getattr(submodule, submodule_member_name) + + core_submodule = __import__( + f"numpy.core.{submodule_name}", + fromlist=[submodule_member_name] + ) + + assert submodule_member is getattr( + core_submodule, submodule_member_name + ) + + else: + assert member is getattr(core, member_name) + + +def test_functions_single_location(): + """ + Check that each public function is available from one location only. + + Test performs BFS search traversing NumPy's public API. It flags + any function-like object that is accessible from more that one place. + """ + from collections.abc import Callable + from typing import Any + + from numpy._core._multiarray_umath import ( + _ArrayFunctionDispatcher as dispatched_function, + ) + + visited_modules: set[types.ModuleType] = {np} + visited_functions: set[Callable[..., Any]] = set() + # Functions often have `__name__` overridden, therefore we need + # to keep track of locations where functions have been found. + functions_original_paths: dict[Callable[..., Any], str] = {} + + # Here we aggregate functions with more than one location. + # It must be empty for the test to pass. + duplicated_functions: list[tuple] = [] + + modules_queue = [np] + + while len(modules_queue) > 0: + + module = modules_queue.pop() + + for member_name in dir(module): + member = getattr(module, member_name) + + # first check if we got a module + if ( + inspect.ismodule(member) and # it's a module + "numpy" in member.__name__ and # inside NumPy + not member_name.startswith("_") and # not private + "numpy._core" not in member.__name__ and # outside _core + # not a legacy or testing module + member_name not in ["f2py", "ma", "testing", "tests"] and + member not in visited_modules # not visited yet + ): + modules_queue.append(member) + visited_modules.add(member) + + # else check if we got a function-like object + elif ( + inspect.isfunction(member) or + isinstance(member, (dispatched_function, np.ufunc)) + ): + if member in visited_functions: + + # skip main namespace functions with aliases + if ( + member.__name__ in [ + "absolute", # np.abs + "arccos", # np.acos + "arccosh", # np.acosh + "arcsin", # np.asin + "arcsinh", # np.asinh + "arctan", # np.atan + "arctan2", # np.atan2 + "arctanh", # np.atanh + "left_shift", # np.bitwise_left_shift + "right_shift", # np.bitwise_right_shift + "conjugate", # np.conj + "invert", # np.bitwise_not & np.bitwise_invert + "remainder", # np.mod + "divide", # np.true_divide + "concatenate", # np.concat + "power", # np.pow + "transpose", # np.permute_dims + ] and + module.__name__ == "numpy" + ): + continue + # skip trimcoef from numpy.polynomial as it is + # duplicated by design. + if ( + member.__name__ == "trimcoef" and + module.__name__.startswith("numpy.polynomial") + ): + continue + + # skip ufuncs that are exported in np.strings as well + if member.__name__ in ( + "add", + "equal", + "not_equal", + "greater", + "greater_equal", + "less", + "less_equal", + ) and module.__name__ == "numpy.strings": + continue + + # numpy.char reexports all numpy.strings functions for + # backwards-compatibility + if module.__name__ == "numpy.char": + continue + + # function is present in more than one location! + duplicated_functions.append( + (member.__name__, + module.__name__, + functions_original_paths[member]) + ) + else: + visited_functions.add(member) + functions_original_paths[member] = module.__name__ + + del visited_functions, visited_modules, functions_original_paths + + assert len(duplicated_functions) == 0, duplicated_functions + + +def test___module___attribute(): + modules_queue = [np] + visited_modules = {np} + visited_functions = set() + incorrect_entries = [] + + while len(modules_queue) > 0: + module = modules_queue.pop() + for member_name in dir(module): + member = getattr(module, member_name) + # first check if we got a module + if ( + inspect.ismodule(member) and # it's a module + "numpy" in member.__name__ and # inside NumPy + not member_name.startswith("_") and # not private + "numpy._core" not in member.__name__ and # outside _core + # not in a skip module list + member_name not in [ + "char", "core", "f2py", "ma", "lapack_lite", "mrecords", + "testing", "tests", "polynomial", "typing", "mtrand", + "bit_generator", + ] and + member not in visited_modules # not visited yet + ): + modules_queue.append(member) + visited_modules.add(member) + elif ( + not inspect.ismodule(member) and + hasattr(member, "__name__") and + not member.__name__.startswith("_") and + member.__module__ != module.__name__ and + member not in visited_functions + ): + # skip ufuncs that are exported in np.strings as well + if member.__name__ in ( + "add", "equal", "not_equal", "greater", "greater_equal", + "less", "less_equal", + ) and module.__name__ == "numpy.strings": + continue + + # recarray and record are exported in np and np.rec + if ( + (member.__name__ == "recarray" and module.__name__ == "numpy") or + (member.__name__ == "record" and module.__name__ == "numpy.rec") + ): + continue + + # ctypeslib exports ctypes c_long/c_longlong + if ( + member.__name__ in ("c_long", "c_longlong") and + module.__name__ == "numpy.ctypeslib" + ): + continue + + # skip cdef classes + if member.__name__ in ( + "BitGenerator", "Generator", "MT19937", "PCG64", "PCG64DXSM", + "Philox", "RandomState", "SFC64", "SeedSequence", + ): + continue + + incorrect_entries.append( + { + "Func": member.__name__, + "actual": member.__module__, + "expected": module.__name__, + } + ) + visited_functions.add(member) + + if incorrect_entries: + assert len(incorrect_entries) == 0, incorrect_entries + + +def _check_correct_qualname_and_module(obj) -> bool: + qualname = obj.__qualname__ + name = obj.__name__ + module_name = obj.__module__ + assert name == qualname.split(".")[-1] + + module = sys.modules[module_name] + actual_obj = functools.reduce(getattr, qualname.split("."), module) + return ( + actual_obj is obj or + # `obj` may be a bound method/property of `actual_obj`: + ( + hasattr(actual_obj, "__get__") and hasattr(obj, "__self__") and + actual_obj.__module__ == obj.__module__ and + actual_obj.__qualname__ == qualname + ) + ) + + +def test___qualname___and___module___attribute(): + # NumPy messes with module and name/qualname attributes, but any object + # should be discoverable based on its module and qualname, so test that. + # We do this for anything with a name (ensuring qualname is also set). + modules_queue = [np] + visited_modules = {np} + visited_functions = set() + incorrect_entries = [] + + while len(modules_queue) > 0: + module = modules_queue.pop() + for member_name in dir(module): + member = getattr(module, member_name) + # first check if we got a module + if ( + inspect.ismodule(member) and # it's a module + "numpy" in member.__name__ and # inside NumPy + not member_name.startswith("_") and # not private + member_name not in {"tests", "typing"} and # 2024-12: type names don't match + "numpy._core" not in member.__name__ and # outside _core + member not in visited_modules # not visited yet + ): + modules_queue.append(member) + visited_modules.add(member) + elif ( + not inspect.ismodule(member) and + hasattr(member, "__name__") and + not member.__name__.startswith("_") and + not member_name.startswith("_") and + not _check_correct_qualname_and_module(member) and + member not in visited_functions + ): + incorrect_entries.append( + { + "found_at": f"{module.__name__}:{member_name}", + "advertises": f"{member.__module__}:{member.__qualname__}", + } + ) + visited_functions.add(member) + + if incorrect_entries: + assert len(incorrect_entries) == 0, incorrect_entries diff --git a/.venv/lib/python3.12/site-packages/numpy/tests/test_reloading.py b/.venv/lib/python3.12/site-packages/numpy/tests/test_reloading.py new file mode 100644 index 0000000..c21dc00 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/tests/test_reloading.py @@ -0,0 +1,74 @@ +import pickle +import subprocess +import sys +import textwrap +from importlib import reload + +import pytest + +import numpy.exceptions as ex +from numpy.testing import ( + IS_WASM, + assert_, + assert_equal, + assert_raises, + assert_warns, +) + + +def test_numpy_reloading(): + # gh-7844. Also check that relevant globals retain their identity. + import numpy as np + import numpy._globals + + _NoValue = np._NoValue + VisibleDeprecationWarning = ex.VisibleDeprecationWarning + ModuleDeprecationWarning = ex.ModuleDeprecationWarning + + with assert_warns(UserWarning): + reload(np) + assert_(_NoValue is np._NoValue) + assert_(ModuleDeprecationWarning is ex.ModuleDeprecationWarning) + assert_(VisibleDeprecationWarning is ex.VisibleDeprecationWarning) + + assert_raises(RuntimeError, reload, numpy._globals) + with assert_warns(UserWarning): + reload(np) + assert_(_NoValue is np._NoValue) + assert_(ModuleDeprecationWarning is ex.ModuleDeprecationWarning) + assert_(VisibleDeprecationWarning is ex.VisibleDeprecationWarning) + +def test_novalue(): + import numpy as np + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + assert_equal(repr(np._NoValue), '') + assert_(pickle.loads(pickle.dumps(np._NoValue, + protocol=proto)) is np._NoValue) + + +@pytest.mark.skipif(IS_WASM, reason="can't start subprocess") +def test_full_reimport(): + """At the time of writing this, it is *not* truly supported, but + apparently enough users rely on it, for it to be an annoying change + when it started failing previously. + """ + # Test within a new process, to ensure that we do not mess with the + # global state during the test run (could lead to cryptic test failures). + # This is generally unsafe, especially, since we also reload the C-modules. + code = textwrap.dedent(r""" + import sys + from pytest import warns + import numpy as np + + for k in list(sys.modules.keys()): + if "numpy" in k: + del sys.modules[k] + + with warns(UserWarning): + import numpy as np + """) + p = subprocess.run([sys.executable, '-c', code], capture_output=True) + if p.returncode: + raise AssertionError( + f"Non-zero return code: {p.returncode!r}\n\n{p.stderr.decode()}" + ) diff --git a/.venv/lib/python3.12/site-packages/numpy/tests/test_scripts.py b/.venv/lib/python3.12/site-packages/numpy/tests/test_scripts.py new file mode 100644 index 0000000..d8ce958 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/tests/test_scripts.py @@ -0,0 +1,49 @@ +""" Test scripts + +Test that we can run executable scripts that have been installed with numpy. +""" +import os +import subprocess +import sys +from os.path import dirname, isfile +from os.path import join as pathjoin + +import pytest + +import numpy as np +from numpy.testing import IS_WASM, assert_equal + +is_inplace = isfile(pathjoin(dirname(np.__file__), '..', 'setup.py')) + + +def find_f2py_commands(): + if sys.platform == 'win32': + exe_dir = dirname(sys.executable) + if exe_dir.endswith('Scripts'): # virtualenv + return [os.path.join(exe_dir, 'f2py')] + else: + return [os.path.join(exe_dir, "Scripts", 'f2py')] + else: + # Three scripts are installed in Unix-like systems: + # 'f2py', 'f2py{major}', and 'f2py{major.minor}'. For example, + # if installed with python3.9 the scripts would be named + # 'f2py', 'f2py3', and 'f2py3.9'. + version = sys.version_info + major = str(version.major) + minor = str(version.minor) + return ['f2py', 'f2py' + major, 'f2py' + major + '.' + minor] + + +@pytest.mark.skipif(is_inplace, reason="Cannot test f2py command inplace") +@pytest.mark.xfail(reason="Test is unreliable") +@pytest.mark.parametrize('f2py_cmd', find_f2py_commands()) +def test_f2py(f2py_cmd): + # test that we can run f2py script + stdout = subprocess.check_output([f2py_cmd, '-v']) + assert_equal(stdout.strip(), np.__version__.encode('ascii')) + + +@pytest.mark.skipif(IS_WASM, reason="Cannot start subprocess") +def test_pep338(): + stdout = subprocess.check_output([sys.executable, '-mnumpy.f2py', '-v']) + assert_equal(stdout.strip(), np.__version__.encode('ascii')) diff --git a/.venv/lib/python3.12/site-packages/numpy/tests/test_warnings.py b/.venv/lib/python3.12/site-packages/numpy/tests/test_warnings.py new file mode 100644 index 0000000..560ee61 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/tests/test_warnings.py @@ -0,0 +1,78 @@ +""" +Tests which scan for certain occurrences in the code, they may not find +all of these occurrences but should catch almost all. +""" +import ast +import tokenize +from pathlib import Path + +import pytest + +import numpy + + +class ParseCall(ast.NodeVisitor): + def __init__(self): + self.ls = [] + + def visit_Attribute(self, node): + ast.NodeVisitor.generic_visit(self, node) + self.ls.append(node.attr) + + def visit_Name(self, node): + self.ls.append(node.id) + + +class FindFuncs(ast.NodeVisitor): + def __init__(self, filename): + super().__init__() + self.__filename = filename + + def visit_Call(self, node): + p = ParseCall() + p.visit(node.func) + ast.NodeVisitor.generic_visit(self, node) + + if p.ls[-1] == 'simplefilter' or p.ls[-1] == 'filterwarnings': + if node.args[0].value == "ignore": + raise AssertionError( + "warnings should have an appropriate stacklevel; " + f"found in {self.__filename} on line {node.lineno}") + + if p.ls[-1] == 'warn' and ( + len(p.ls) == 1 or p.ls[-2] == 'warnings'): + + if "testing/tests/test_warnings.py" == self.__filename: + # This file + return + + # See if stacklevel exists: + if len(node.args) == 3: + return + args = {kw.arg for kw in node.keywords} + if "stacklevel" in args: + return + raise AssertionError( + "warnings should have an appropriate stacklevel; " + f"found in {self.__filename} on line {node.lineno}") + + +@pytest.mark.slow +def test_warning_calls(): + # combined "ignore" and stacklevel error + base = Path(numpy.__file__).parent + + for path in base.rglob("*.py"): + if base / "testing" in path.parents: + continue + if path == base / "__init__.py": + continue + if path == base / "random" / "__init__.py": + continue + if path == base / "conftest.py": + continue + # use tokenize to auto-detect encoding on systems where no + # default encoding is defined (e.g. LANG='C') + with tokenize.open(str(path)) as file: + tree = ast.parse(file.read()) + FindFuncs(path).visit(tree) diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/__init__.py b/.venv/lib/python3.12/site-packages/numpy/typing/__init__.py new file mode 100644 index 0000000..173c094 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/__init__.py @@ -0,0 +1,201 @@ +""" +============================ +Typing (:mod:`numpy.typing`) +============================ + +.. versionadded:: 1.20 + +Large parts of the NumPy API have :pep:`484`-style type annotations. In +addition a number of type aliases are available to users, most prominently +the two below: + +- `ArrayLike`: objects that can be converted to arrays +- `DTypeLike`: objects that can be converted to dtypes + +.. _typing-extensions: https://pypi.org/project/typing-extensions/ + +Mypy plugin +----------- + +.. versionadded:: 1.21 + +.. automodule:: numpy.typing.mypy_plugin + +.. currentmodule:: numpy.typing + +Differences from the runtime NumPy API +-------------------------------------- + +NumPy is very flexible. Trying to describe the full range of +possibilities statically would result in types that are not very +helpful. For that reason, the typed NumPy API is often stricter than +the runtime NumPy API. This section describes some notable +differences. + +ArrayLike +~~~~~~~~~ + +The `ArrayLike` type tries to avoid creating object arrays. For +example, + +.. code-block:: python + + >>> np.array(x**2 for x in range(10)) + array( at ...>, dtype=object) + +is valid NumPy code which will create a 0-dimensional object +array. Type checkers will complain about the above example when using +the NumPy types however. If you really intended to do the above, then +you can either use a ``# type: ignore`` comment: + +.. code-block:: python + + >>> np.array(x**2 for x in range(10)) # type: ignore + +or explicitly type the array like object as `~typing.Any`: + +.. code-block:: python + + >>> from typing import Any + >>> array_like: Any = (x**2 for x in range(10)) + >>> np.array(array_like) + array( at ...>, dtype=object) + +ndarray +~~~~~~~ + +It's possible to mutate the dtype of an array at runtime. For example, +the following code is valid: + +.. code-block:: python + + >>> x = np.array([1, 2]) + >>> x.dtype = np.bool + +This sort of mutation is not allowed by the types. Users who want to +write statically typed code should instead use the `numpy.ndarray.view` +method to create a view of the array with a different dtype. + +DTypeLike +~~~~~~~~~ + +The `DTypeLike` type tries to avoid creation of dtype objects using +dictionary of fields like below: + +.. code-block:: python + + >>> x = np.dtype({"field1": (float, 1), "field2": (int, 3)}) + +Although this is valid NumPy code, the type checker will complain about it, +since its usage is discouraged. +Please see : :ref:`Data type objects ` + +Number precision +~~~~~~~~~~~~~~~~ + +The precision of `numpy.number` subclasses is treated as a invariant generic +parameter (see :class:`~NBitBase`), simplifying the annotating of processes +involving precision-based casting. + +.. code-block:: python + + >>> from typing import TypeVar + >>> import numpy as np + >>> import numpy.typing as npt + + >>> T = TypeVar("T", bound=npt.NBitBase) + >>> def func(a: "np.floating[T]", b: "np.floating[T]") -> "np.floating[T]": + ... ... + +Consequently, the likes of `~numpy.float16`, `~numpy.float32` and +`~numpy.float64` are still sub-types of `~numpy.floating`, but, contrary to +runtime, they're not necessarily considered as sub-classes. + +Timedelta64 +~~~~~~~~~~~ + +The `~numpy.timedelta64` class is not considered a subclass of +`~numpy.signedinteger`, the former only inheriting from `~numpy.generic` +while static type checking. + +0D arrays +~~~~~~~~~ + +During runtime numpy aggressively casts any passed 0D arrays into their +corresponding `~numpy.generic` instance. Until the introduction of shape +typing (see :pep:`646`) it is unfortunately not possible to make the +necessary distinction between 0D and >0D arrays. While thus not strictly +correct, all operations that can potentially perform a 0D-array -> scalar +cast are currently annotated as exclusively returning an `~numpy.ndarray`. + +If it is known in advance that an operation *will* perform a +0D-array -> scalar cast, then one can consider manually remedying the +situation with either `typing.cast` or a ``# type: ignore`` comment. + +Record array dtypes +~~~~~~~~~~~~~~~~~~~ + +The dtype of `numpy.recarray`, and the :ref:`routines.array-creation.rec` +functions in general, can be specified in one of two ways: + +* Directly via the ``dtype`` argument. +* With up to five helper arguments that operate via `numpy.rec.format_parser`: + ``formats``, ``names``, ``titles``, ``aligned`` and ``byteorder``. + +These two approaches are currently typed as being mutually exclusive, +*i.e.* if ``dtype`` is specified than one may not specify ``formats``. +While this mutual exclusivity is not (strictly) enforced during runtime, +combining both dtype specifiers can lead to unexpected or even downright +buggy behavior. + +API +--- + +""" +# NOTE: The API section will be appended with additional entries +# further down in this file + +# pyright: reportDeprecated=false + +from numpy._typing import ArrayLike, DTypeLike, NBitBase, NDArray + +__all__ = ["ArrayLike", "DTypeLike", "NBitBase", "NDArray"] + + +__DIR = __all__ + [k for k in globals() if k.startswith("__") and k.endswith("__")] +__DIR_SET = frozenset(__DIR) + + +def __dir__() -> list[str]: + return __DIR + +def __getattr__(name: str): + if name == "NBitBase": + import warnings + + # Deprecated in NumPy 2.3, 2025-05-01 + warnings.warn( + "`NBitBase` is deprecated and will be removed from numpy.typing in the " + "future. Use `@typing.overload` or a `TypeVar` with a scalar-type as upper " + "bound, instead. (deprecated in NumPy 2.3)", + DeprecationWarning, + stacklevel=2, + ) + return NBitBase + + if name in __DIR_SET: + return globals()[name] + + raise AttributeError(f"module {__name__!r} has no attribute {name!r}") + + +if __doc__ is not None: + from numpy._typing._add_docstring import _docstrings + __doc__ += _docstrings + __doc__ += '\n.. autoclass:: numpy.typing.NBitBase\n' + del _docstrings + +from numpy._pytesttester import PytestTester + +test = PytestTester(__name__) +del PytestTester diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/typing/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..b646085 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/typing/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/__pycache__/mypy_plugin.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/typing/__pycache__/mypy_plugin.cpython-312.pyc new file mode 100644 index 0000000..6fc5417 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/typing/__pycache__/mypy_plugin.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/mypy_plugin.py b/.venv/lib/python3.12/site-packages/numpy/typing/mypy_plugin.py new file mode 100644 index 0000000..dc1e256 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/mypy_plugin.py @@ -0,0 +1,195 @@ +"""A mypy_ plugin for managing a number of platform-specific annotations. +Its functionality can be split into three distinct parts: + +* Assigning the (platform-dependent) precisions of certain `~numpy.number` + subclasses, including the likes of `~numpy.int_`, `~numpy.intp` and + `~numpy.longlong`. See the documentation on + :ref:`scalar types ` for a comprehensive overview + of the affected classes. Without the plugin the precision of all relevant + classes will be inferred as `~typing.Any`. +* Removing all extended-precision `~numpy.number` subclasses that are + unavailable for the platform in question. Most notably this includes the + likes of `~numpy.float128` and `~numpy.complex256`. Without the plugin *all* + extended-precision types will, as far as mypy is concerned, be available + to all platforms. +* Assigning the (platform-dependent) precision of `~numpy.ctypeslib.c_intp`. + Without the plugin the type will default to `ctypes.c_int64`. + + .. versionadded:: 1.22 + +.. deprecated:: 2.3 + +Examples +-------- +To enable the plugin, one must add it to their mypy `configuration file`_: + +.. code-block:: ini + + [mypy] + plugins = numpy.typing.mypy_plugin + +.. _mypy: https://mypy-lang.org/ +.. _configuration file: https://mypy.readthedocs.io/en/stable/config_file.html + +""" + +from collections.abc import Callable, Iterable +from typing import TYPE_CHECKING, Final, TypeAlias, cast + +import numpy as np + +__all__: list[str] = [] + + +def _get_precision_dict() -> dict[str, str]: + names = [ + ("_NBitByte", np.byte), + ("_NBitShort", np.short), + ("_NBitIntC", np.intc), + ("_NBitIntP", np.intp), + ("_NBitInt", np.int_), + ("_NBitLong", np.long), + ("_NBitLongLong", np.longlong), + + ("_NBitHalf", np.half), + ("_NBitSingle", np.single), + ("_NBitDouble", np.double), + ("_NBitLongDouble", np.longdouble), + ] + ret: dict[str, str] = {} + for name, typ in names: + n = 8 * np.dtype(typ).itemsize + ret[f"{_MODULE}._nbit.{name}"] = f"{_MODULE}._nbit_base._{n}Bit" + return ret + + +def _get_extended_precision_list() -> list[str]: + extended_names = [ + "float96", + "float128", + "complex192", + "complex256", + ] + return [i for i in extended_names if hasattr(np, i)] + +def _get_c_intp_name() -> str: + # Adapted from `np.core._internal._getintp_ctype` + return { + "i": "c_int", + "l": "c_long", + "q": "c_longlong", + }.get(np.dtype("n").char, "c_long") + + +_MODULE: Final = "numpy._typing" + +#: A dictionary mapping type-aliases in `numpy._typing._nbit` to +#: concrete `numpy.typing.NBitBase` subclasses. +_PRECISION_DICT: Final = _get_precision_dict() + +#: A list with the names of all extended precision `np.number` subclasses. +_EXTENDED_PRECISION_LIST: Final = _get_extended_precision_list() + +#: The name of the ctypes equivalent of `np.intp` +_C_INTP: Final = _get_c_intp_name() + + +try: + if TYPE_CHECKING: + from mypy.typeanal import TypeAnalyser + + import mypy.types + from mypy.build import PRI_MED + from mypy.nodes import ImportFrom, MypyFile, Statement + from mypy.plugin import AnalyzeTypeContext, Plugin + +except ModuleNotFoundError as e: + + def plugin(version: str) -> type: + raise e + +else: + + _HookFunc: TypeAlias = Callable[[AnalyzeTypeContext], mypy.types.Type] + + def _hook(ctx: AnalyzeTypeContext) -> mypy.types.Type: + """Replace a type-alias with a concrete ``NBitBase`` subclass.""" + typ, _, api = ctx + name = typ.name.split(".")[-1] + name_new = _PRECISION_DICT[f"{_MODULE}._nbit.{name}"] + return cast("TypeAnalyser", api).named_type(name_new) + + def _index(iterable: Iterable[Statement], id: str) -> int: + """Identify the first ``ImportFrom`` instance the specified `id`.""" + for i, value in enumerate(iterable): + if getattr(value, "id", None) == id: + return i + raise ValueError("Failed to identify a `ImportFrom` instance " + f"with the following id: {id!r}") + + def _override_imports( + file: MypyFile, + module: str, + imports: list[tuple[str, str | None]], + ) -> None: + """Override the first `module`-based import with new `imports`.""" + # Construct a new `from module import y` statement + import_obj = ImportFrom(module, 0, names=imports) + import_obj.is_top_level = True + + # Replace the first `module`-based import statement with `import_obj` + for lst in [file.defs, cast("list[Statement]", file.imports)]: + i = _index(lst, module) + lst[i] = import_obj + + class _NumpyPlugin(Plugin): + """A mypy plugin for handling versus numpy-specific typing tasks.""" + + def get_type_analyze_hook(self, fullname: str) -> _HookFunc | None: + """Set the precision of platform-specific `numpy.number` + subclasses. + + For example: `numpy.int_`, `numpy.longlong` and `numpy.longdouble`. + """ + if fullname in _PRECISION_DICT: + return _hook + return None + + def get_additional_deps( + self, file: MypyFile + ) -> list[tuple[int, str, int]]: + """Handle all import-based overrides. + + * Import platform-specific extended-precision `numpy.number` + subclasses (*e.g.* `numpy.float96` and `numpy.float128`). + * Import the appropriate `ctypes` equivalent to `numpy.intp`. + + """ + fullname = file.fullname + if fullname == "numpy": + _override_imports( + file, + f"{_MODULE}._extended_precision", + imports=[(v, v) for v in _EXTENDED_PRECISION_LIST], + ) + elif fullname == "numpy.ctypeslib": + _override_imports( + file, + "ctypes", + imports=[(_C_INTP, "_c_intp")], + ) + return [(PRI_MED, fullname, -1)] + + def plugin(version: str) -> type: + import warnings + + plugin = "numpy.typing.mypy_plugin" + # Deprecated 2025-01-10, NumPy 2.3 + warn_msg = ( + f"`{plugin}` is deprecated, and will be removed in a future " + f"release. Please remove `plugins = {plugin}` in your mypy config." + f"(deprecated in NumPy 2.3)" + ) + warnings.warn(warn_msg, DeprecationWarning, stacklevel=3) + + return _NumpyPlugin diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/__init__.py b/.venv/lib/python3.12/site-packages/numpy/typing/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/typing/tests/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..1ccfe4c Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/typing/tests/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/__pycache__/test_isfile.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/typing/tests/__pycache__/test_isfile.cpython-312.pyc new file mode 100644 index 0000000..d0fe78e Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/typing/tests/__pycache__/test_isfile.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/__pycache__/test_runtime.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/typing/tests/__pycache__/test_runtime.cpython-312.pyc new file mode 100644 index 0000000..b25ee90 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/typing/tests/__pycache__/test_runtime.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/__pycache__/test_typing.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/typing/tests/__pycache__/test_typing.cpython-312.pyc new file mode 100644 index 0000000..2e87de8 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/typing/tests/__pycache__/test_typing.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/arithmetic.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/arithmetic.pyi new file mode 100644 index 0000000..e696083 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/arithmetic.pyi @@ -0,0 +1,126 @@ +from typing import Any + +import numpy as np +import numpy.typing as npt + +b_ = np.bool() +dt = np.datetime64(0, "D") +td = np.timedelta64(0, "D") + +AR_b: npt.NDArray[np.bool] +AR_u: npt.NDArray[np.uint32] +AR_i: npt.NDArray[np.int64] +AR_f: npt.NDArray[np.longdouble] +AR_c: npt.NDArray[np.complex128] +AR_m: npt.NDArray[np.timedelta64] +AR_M: npt.NDArray[np.datetime64] + +ANY: Any + +AR_LIKE_b: list[bool] +AR_LIKE_u: list[np.uint32] +AR_LIKE_i: list[int] +AR_LIKE_f: list[float] +AR_LIKE_c: list[complex] +AR_LIKE_m: list[np.timedelta64] +AR_LIKE_M: list[np.datetime64] + +# Array subtraction + +# NOTE: mypys `NoReturn` errors are, unfortunately, not that great +_1 = AR_b - AR_LIKE_b # type: ignore[var-annotated] +_2 = AR_LIKE_b - AR_b # type: ignore[var-annotated] +AR_i - bytes() # type: ignore[operator] + +AR_f - AR_LIKE_m # type: ignore[operator] +AR_f - AR_LIKE_M # type: ignore[operator] +AR_c - AR_LIKE_m # type: ignore[operator] +AR_c - AR_LIKE_M # type: ignore[operator] + +AR_m - AR_LIKE_f # type: ignore[operator] +AR_M - AR_LIKE_f # type: ignore[operator] +AR_m - AR_LIKE_c # type: ignore[operator] +AR_M - AR_LIKE_c # type: ignore[operator] + +AR_m - AR_LIKE_M # type: ignore[operator] +AR_LIKE_m - AR_M # type: ignore[operator] + +# array floor division + +AR_M // AR_LIKE_b # type: ignore[operator] +AR_M // AR_LIKE_u # type: ignore[operator] +AR_M // AR_LIKE_i # type: ignore[operator] +AR_M // AR_LIKE_f # type: ignore[operator] +AR_M // AR_LIKE_c # type: ignore[operator] +AR_M // AR_LIKE_m # type: ignore[operator] +AR_M // AR_LIKE_M # type: ignore[operator] + +AR_b // AR_LIKE_M # type: ignore[operator] +AR_u // AR_LIKE_M # type: ignore[operator] +AR_i // AR_LIKE_M # type: ignore[operator] +AR_f // AR_LIKE_M # type: ignore[operator] +AR_c // AR_LIKE_M # type: ignore[operator] +AR_m // AR_LIKE_M # type: ignore[operator] +AR_M // AR_LIKE_M # type: ignore[operator] + +_3 = AR_m // AR_LIKE_b # type: ignore[var-annotated] +AR_m // AR_LIKE_c # type: ignore[operator] + +AR_b // AR_LIKE_m # type: ignore[operator] +AR_u // AR_LIKE_m # type: ignore[operator] +AR_i // AR_LIKE_m # type: ignore[operator] +AR_f // AR_LIKE_m # type: ignore[operator] +AR_c // AR_LIKE_m # type: ignore[operator] + +# regression tests for https://github.com/numpy/numpy/issues/28957 +AR_c // 2 # type: ignore[operator] +AR_c // AR_i # type: ignore[operator] +AR_c // AR_c # type: ignore[operator] + +# Array multiplication + +AR_b *= AR_LIKE_u # type: ignore[arg-type] +AR_b *= AR_LIKE_i # type: ignore[arg-type] +AR_b *= AR_LIKE_f # type: ignore[arg-type] +AR_b *= AR_LIKE_c # type: ignore[arg-type] +AR_b *= AR_LIKE_m # type: ignore[arg-type] + +AR_u *= AR_LIKE_f # type: ignore[arg-type] +AR_u *= AR_LIKE_c # type: ignore[arg-type] +AR_u *= AR_LIKE_m # type: ignore[arg-type] + +AR_i *= AR_LIKE_f # type: ignore[arg-type] +AR_i *= AR_LIKE_c # type: ignore[arg-type] +AR_i *= AR_LIKE_m # type: ignore[arg-type] + +AR_f *= AR_LIKE_c # type: ignore[arg-type] +AR_f *= AR_LIKE_m # type: ignore[arg-type] + +# Array power + +AR_b **= AR_LIKE_b # type: ignore[misc] +AR_b **= AR_LIKE_u # type: ignore[misc] +AR_b **= AR_LIKE_i # type: ignore[misc] +AR_b **= AR_LIKE_f # type: ignore[misc] +AR_b **= AR_LIKE_c # type: ignore[misc] + +AR_u **= AR_LIKE_f # type: ignore[arg-type] +AR_u **= AR_LIKE_c # type: ignore[arg-type] + +AR_i **= AR_LIKE_f # type: ignore[arg-type] +AR_i **= AR_LIKE_c # type: ignore[arg-type] + +AR_f **= AR_LIKE_c # type: ignore[arg-type] + +# Scalars + +b_ - b_ # type: ignore[call-overload] + +dt + dt # type: ignore[operator] +td - dt # type: ignore[operator] +td % 1 # type: ignore[operator] +td / dt # type: ignore[operator] +td % dt # type: ignore[operator] + +-b_ # type: ignore[operator] ++b_ # type: ignore[operator] diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/array_constructors.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/array_constructors.pyi new file mode 100644 index 0000000..cadc2ae --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/array_constructors.pyi @@ -0,0 +1,34 @@ +import numpy as np +import numpy.typing as npt + +a: npt.NDArray[np.float64] +generator = (i for i in range(10)) + +np.require(a, requirements=1) # type: ignore[call-overload] +np.require(a, requirements="TEST") # type: ignore[arg-type] + +np.zeros("test") # type: ignore[arg-type] +np.zeros() # type: ignore[call-overload] + +np.ones("test") # type: ignore[arg-type] +np.ones() # type: ignore[call-overload] + +np.array(0, float, True) # type: ignore[call-overload] + +np.linspace(None, 'bob') # type: ignore[call-overload] +np.linspace(0, 2, num=10.0) # type: ignore[call-overload] +np.linspace(0, 2, endpoint='True') # type: ignore[call-overload] +np.linspace(0, 2, retstep=b'False') # type: ignore[call-overload] +np.linspace(0, 2, dtype=0) # type: ignore[call-overload] +np.linspace(0, 2, axis=None) # type: ignore[call-overload] + +np.logspace(None, 'bob') # type: ignore[call-overload] +np.logspace(0, 2, base=None) # type: ignore[call-overload] + +np.geomspace(None, 'bob') # type: ignore[call-overload] + +np.stack(generator) # type: ignore[call-overload] +np.hstack({1, 2}) # type: ignore[call-overload] +np.vstack(1) # type: ignore[call-overload] + +np.array([1], like=1) # type: ignore[call-overload] diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/array_like.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/array_like.pyi new file mode 100644 index 0000000..4e37354 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/array_like.pyi @@ -0,0 +1,15 @@ +import numpy as np +from numpy._typing import ArrayLike + +class A: ... + +x1: ArrayLike = (i for i in range(10)) # type: ignore[assignment] +x2: ArrayLike = A() # type: ignore[assignment] +x3: ArrayLike = {1: "foo", 2: "bar"} # type: ignore[assignment] + +scalar = np.int64(1) +scalar.__array__(dtype=np.float64) # type: ignore[call-overload] +array = np.array([1]) +array.__array__(dtype=np.float64) # type: ignore[call-overload] + +array.setfield(np.eye(1), np.int32, (0, 1)) # type: ignore[arg-type] diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/array_pad.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/array_pad.pyi new file mode 100644 index 0000000..42e61c8 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/array_pad.pyi @@ -0,0 +1,6 @@ +import numpy as np +import numpy.typing as npt + +AR_i8: npt.NDArray[np.int64] + +np.pad(AR_i8, 2, mode="bob") # type: ignore[call-overload] diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/arrayprint.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/arrayprint.pyi new file mode 100644 index 0000000..224a410 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/arrayprint.pyi @@ -0,0 +1,16 @@ +from collections.abc import Callable +from typing import Any + +import numpy as np +import numpy.typing as npt + +AR: npt.NDArray[np.float64] +func1: Callable[[Any], str] +func2: Callable[[np.integer], str] + +np.array2string(AR, style=None) # type: ignore[call-overload] +np.array2string(AR, legacy="1.14") # type: ignore[call-overload] +np.array2string(AR, sign="*") # type: ignore[call-overload] +np.array2string(AR, floatmode="default") # type: ignore[call-overload] +np.array2string(AR, formatter={"A": func1}) # type: ignore[call-overload] +np.array2string(AR, formatter={"float": func2}) # type: ignore[call-overload] diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/arrayterator.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/arrayterator.pyi new file mode 100644 index 0000000..8d2295a --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/arrayterator.pyi @@ -0,0 +1,14 @@ +import numpy as np +import numpy.typing as npt + +AR_i8: npt.NDArray[np.int64] +ar_iter = np.lib.Arrayterator(AR_i8) + +np.lib.Arrayterator(np.int64()) # type: ignore[arg-type] +ar_iter.shape = (10, 5) # type: ignore[misc] +ar_iter[None] # type: ignore[index] +ar_iter[None, 1] # type: ignore[index] +ar_iter[np.intp()] # type: ignore[index] +ar_iter[np.intp(), ...] # type: ignore[index] +ar_iter[AR_i8] # type: ignore[index] +ar_iter[AR_i8, :] # type: ignore[index] diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/bitwise_ops.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/bitwise_ops.pyi new file mode 100644 index 0000000..3538ec7 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/bitwise_ops.pyi @@ -0,0 +1,17 @@ +import numpy as np + +i8 = np.int64() +i4 = np.int32() +u8 = np.uint64() +b_ = np.bool() +i = int() + +f8 = np.float64() + +b_ >> f8 # type: ignore[call-overload] +i8 << f8 # type: ignore[call-overload] +i | f8 # type: ignore[operator] +i8 ^ f8 # type: ignore[call-overload] +u8 & f8 # type: ignore[call-overload] +~f8 # type: ignore[operator] +# TODO: Certain mixes like i4 << u8 go to float and thus should fail diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/char.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/char.pyi new file mode 100644 index 0000000..62c4475 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/char.pyi @@ -0,0 +1,65 @@ +import numpy as np +import numpy.typing as npt + +AR_U: npt.NDArray[np.str_] +AR_S: npt.NDArray[np.bytes_] + +np.char.equal(AR_U, AR_S) # type: ignore[arg-type] +np.char.not_equal(AR_U, AR_S) # type: ignore[arg-type] + +np.char.greater_equal(AR_U, AR_S) # type: ignore[arg-type] +np.char.less_equal(AR_U, AR_S) # type: ignore[arg-type] +np.char.greater(AR_U, AR_S) # type: ignore[arg-type] +np.char.less(AR_U, AR_S) # type: ignore[arg-type] + +np.char.encode(AR_S) # type: ignore[arg-type] +np.char.decode(AR_U) # type: ignore[arg-type] + +np.char.join(AR_U, b"_") # type: ignore[arg-type] +np.char.join(AR_S, "_") # type: ignore[arg-type] + +np.char.ljust(AR_U, 5, fillchar=b"a") # type: ignore[arg-type] +np.char.ljust(AR_S, 5, fillchar="a") # type: ignore[arg-type] +np.char.rjust(AR_U, 5, fillchar=b"a") # type: ignore[arg-type] +np.char.rjust(AR_S, 5, fillchar="a") # type: ignore[arg-type] + +np.char.lstrip(AR_U, chars=b"a") # type: ignore[arg-type] +np.char.lstrip(AR_S, chars="a") # type: ignore[arg-type] +np.char.strip(AR_U, chars=b"a") # type: ignore[arg-type] +np.char.strip(AR_S, chars="a") # type: ignore[arg-type] +np.char.rstrip(AR_U, chars=b"a") # type: ignore[arg-type] +np.char.rstrip(AR_S, chars="a") # type: ignore[arg-type] + +np.char.partition(AR_U, b"a") # type: ignore[arg-type] +np.char.partition(AR_S, "a") # type: ignore[arg-type] +np.char.rpartition(AR_U, b"a") # type: ignore[arg-type] +np.char.rpartition(AR_S, "a") # type: ignore[arg-type] + +np.char.replace(AR_U, b"_", b"-") # type: ignore[arg-type] +np.char.replace(AR_S, "_", "-") # type: ignore[arg-type] + +np.char.split(AR_U, b"_") # type: ignore[arg-type] +np.char.split(AR_S, "_") # type: ignore[arg-type] +np.char.rsplit(AR_U, b"_") # type: ignore[arg-type] +np.char.rsplit(AR_S, "_") # type: ignore[arg-type] + +np.char.count(AR_U, b"a", start=[1, 2, 3]) # type: ignore[arg-type] +np.char.count(AR_S, "a", end=9) # type: ignore[arg-type] + +np.char.endswith(AR_U, b"a", start=[1, 2, 3]) # type: ignore[arg-type] +np.char.endswith(AR_S, "a", end=9) # type: ignore[arg-type] +np.char.startswith(AR_U, b"a", start=[1, 2, 3]) # type: ignore[arg-type] +np.char.startswith(AR_S, "a", end=9) # type: ignore[arg-type] + +np.char.find(AR_U, b"a", start=[1, 2, 3]) # type: ignore[arg-type] +np.char.find(AR_S, "a", end=9) # type: ignore[arg-type] +np.char.rfind(AR_U, b"a", start=[1, 2, 3]) # type: ignore[arg-type] +np.char.rfind(AR_S, "a", end=9) # type: ignore[arg-type] + +np.char.index(AR_U, b"a", start=[1, 2, 3]) # type: ignore[arg-type] +np.char.index(AR_S, "a", end=9) # type: ignore[arg-type] +np.char.rindex(AR_U, b"a", start=[1, 2, 3]) # type: ignore[arg-type] +np.char.rindex(AR_S, "a", end=9) # type: ignore[arg-type] + +np.char.isdecimal(AR_S) # type: ignore[arg-type] +np.char.isnumeric(AR_S) # type: ignore[arg-type] diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/chararray.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/chararray.pyi new file mode 100644 index 0000000..fb52f73 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/chararray.pyi @@ -0,0 +1,62 @@ +from typing import Any +import numpy as np + +AR_U: np.char.chararray[tuple[Any, ...], np.dtype[np.str_]] +AR_S: np.char.chararray[tuple[Any, ...], np.dtype[np.bytes_]] + +AR_S.encode() # type: ignore[misc] +AR_U.decode() # type: ignore[misc] + +AR_U.join(b"_") # type: ignore[arg-type] +AR_S.join("_") # type: ignore[arg-type] + +AR_U.ljust(5, fillchar=b"a") # type: ignore[arg-type] +AR_S.ljust(5, fillchar="a") # type: ignore[arg-type] +AR_U.rjust(5, fillchar=b"a") # type: ignore[arg-type] +AR_S.rjust(5, fillchar="a") # type: ignore[arg-type] + +AR_U.lstrip(chars=b"a") # type: ignore[arg-type] +AR_S.lstrip(chars="a") # type: ignore[arg-type] +AR_U.strip(chars=b"a") # type: ignore[arg-type] +AR_S.strip(chars="a") # type: ignore[arg-type] +AR_U.rstrip(chars=b"a") # type: ignore[arg-type] +AR_S.rstrip(chars="a") # type: ignore[arg-type] + +AR_U.partition(b"a") # type: ignore[arg-type] +AR_S.partition("a") # type: ignore[arg-type] +AR_U.rpartition(b"a") # type: ignore[arg-type] +AR_S.rpartition("a") # type: ignore[arg-type] + +AR_U.replace(b"_", b"-") # type: ignore[arg-type] +AR_S.replace("_", "-") # type: ignore[arg-type] + +AR_U.split(b"_") # type: ignore[arg-type] +AR_S.split("_") # type: ignore[arg-type] +AR_S.split(1) # type: ignore[arg-type] +AR_U.rsplit(b"_") # type: ignore[arg-type] +AR_S.rsplit("_") # type: ignore[arg-type] + +AR_U.count(b"a", start=[1, 2, 3]) # type: ignore[arg-type] +AR_S.count("a", end=9) # type: ignore[arg-type] + +AR_U.endswith(b"a", start=[1, 2, 3]) # type: ignore[arg-type] +AR_S.endswith("a", end=9) # type: ignore[arg-type] +AR_U.startswith(b"a", start=[1, 2, 3]) # type: ignore[arg-type] +AR_S.startswith("a", end=9) # type: ignore[arg-type] + +AR_U.find(b"a", start=[1, 2, 3]) # type: ignore[arg-type] +AR_S.find("a", end=9) # type: ignore[arg-type] +AR_U.rfind(b"a", start=[1, 2, 3]) # type: ignore[arg-type] +AR_S.rfind("a", end=9) # type: ignore[arg-type] + +AR_U.index(b"a", start=[1, 2, 3]) # type: ignore[arg-type] +AR_S.index("a", end=9) # type: ignore[arg-type] +AR_U.rindex(b"a", start=[1, 2, 3]) # type: ignore[arg-type] +AR_S.rindex("a", end=9) # type: ignore[arg-type] + +AR_U == AR_S # type: ignore[operator] +AR_U != AR_S # type: ignore[operator] +AR_U >= AR_S # type: ignore[operator] +AR_U <= AR_S # type: ignore[operator] +AR_U > AR_S # type: ignore[operator] +AR_U < AR_S # type: ignore[operator] diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/comparisons.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/comparisons.pyi new file mode 100644 index 0000000..3c8a94b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/comparisons.pyi @@ -0,0 +1,27 @@ +import numpy as np +import numpy.typing as npt + +AR_i: npt.NDArray[np.int64] +AR_f: npt.NDArray[np.float64] +AR_c: npt.NDArray[np.complex128] +AR_m: npt.NDArray[np.timedelta64] +AR_M: npt.NDArray[np.datetime64] + +AR_f > AR_m # type: ignore[operator] +AR_c > AR_m # type: ignore[operator] + +AR_m > AR_f # type: ignore[operator] +AR_m > AR_c # type: ignore[operator] + +AR_i > AR_M # type: ignore[operator] +AR_f > AR_M # type: ignore[operator] +AR_m > AR_M # type: ignore[operator] + +AR_M > AR_i # type: ignore[operator] +AR_M > AR_f # type: ignore[operator] +AR_M > AR_m # type: ignore[operator] + +AR_i > str() # type: ignore[operator] +AR_i > bytes() # type: ignore[operator] +str() > AR_M # type: ignore[operator] +bytes() > AR_M # type: ignore[operator] diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/constants.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/constants.pyi new file mode 100644 index 0000000..10717f6 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/constants.pyi @@ -0,0 +1,3 @@ +import numpy as np + +np.little_endian = np.little_endian # type: ignore[misc] diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/datasource.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/datasource.pyi new file mode 100644 index 0000000..267b672 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/datasource.pyi @@ -0,0 +1,15 @@ +from pathlib import Path +import numpy as np + +path: Path +d1: np.lib.npyio.DataSource + +d1.abspath(path) # type: ignore[arg-type] +d1.abspath(b"...") # type: ignore[arg-type] + +d1.exists(path) # type: ignore[arg-type] +d1.exists(b"...") # type: ignore[arg-type] + +d1.open(path, "r") # type: ignore[arg-type] +d1.open(b"...", encoding="utf8") # type: ignore[arg-type] +d1.open(None, newline="/n") # type: ignore[arg-type] diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/dtype.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/dtype.pyi new file mode 100644 index 0000000..64a7c3f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/dtype.pyi @@ -0,0 +1,17 @@ +import numpy as np + +class Test1: + not_dtype = np.dtype(float) + +class Test2: + dtype = float + +np.dtype(Test1()) # type: ignore[call-overload] +np.dtype(Test2()) # type: ignore[arg-type] + +np.dtype( # type: ignore[call-overload] + { + "field1": (float, 1), + "field2": (int, 3), + } +) diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/einsumfunc.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/einsumfunc.pyi new file mode 100644 index 0000000..982ad98 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/einsumfunc.pyi @@ -0,0 +1,12 @@ +import numpy as np +import numpy.typing as npt + +AR_i: npt.NDArray[np.int64] +AR_f: npt.NDArray[np.float64] +AR_m: npt.NDArray[np.timedelta64] +AR_U: npt.NDArray[np.str_] + +np.einsum("i,i->i", AR_i, AR_m) # type: ignore[arg-type] +np.einsum("i,i->i", AR_f, AR_f, dtype=np.int32) # type: ignore[arg-type] +np.einsum("i,i->i", AR_i, AR_i, out=AR_U) # type: ignore[type-var] +np.einsum("i,i->i", AR_i, AR_i, out=AR_U, casting="unsafe") # type: ignore[call-overload] diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/flatiter.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/flatiter.pyi new file mode 100644 index 0000000..06e23fe --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/flatiter.pyi @@ -0,0 +1,20 @@ +import numpy as np +import numpy._typing as npt + +class Index: + def __index__(self) -> int: ... + +a: np.flatiter[npt.NDArray[np.float64]] +supports_array: npt._SupportsArray[np.dtype[np.float64]] + +a.base = object() # type: ignore[assignment, misc] +a.coords = object() # type: ignore[assignment, misc] +a.index = object() # type: ignore[assignment, misc] +a.copy(order='C') # type: ignore[call-arg] + +# NOTE: Contrary to `ndarray.__getitem__` its counterpart in `flatiter` +# does not accept objects with the `__array__` or `__index__` protocols; +# boolean indexing is just plain broken (gh-17175) +a[np.bool()] # type: ignore[index] +a[Index()] # type: ignore[call-overload] +a[supports_array] # type: ignore[index] diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/fromnumeric.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/fromnumeric.pyi new file mode 100644 index 0000000..51ef268 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/fromnumeric.pyi @@ -0,0 +1,148 @@ +"""Tests for :mod:`numpy._core.fromnumeric`.""" + +import numpy as np +import numpy.typing as npt + +A = np.array(True, ndmin=2, dtype=bool) +A.setflags(write=False) +AR_U: npt.NDArray[np.str_] +AR_M: npt.NDArray[np.datetime64] +AR_f4: npt.NDArray[np.float32] + +a = np.bool(True) + +np.take(a, None) # type: ignore[call-overload] +np.take(a, axis=1.0) # type: ignore[call-overload] +np.take(A, out=1) # type: ignore[call-overload] +np.take(A, mode="bob") # type: ignore[call-overload] + +np.reshape(a, None) # type: ignore[call-overload] +np.reshape(A, 1, order="bob") # type: ignore[call-overload] + +np.choose(a, None) # type: ignore[call-overload] +np.choose(a, out=1.0) # type: ignore[call-overload] +np.choose(A, mode="bob") # type: ignore[call-overload] + +np.repeat(a, None) # type: ignore[call-overload] +np.repeat(A, 1, axis=1.0) # type: ignore[call-overload] + +np.swapaxes(A, None, 1) # type: ignore[call-overload] +np.swapaxes(A, 1, [0]) # type: ignore[call-overload] + +np.transpose(A, axes=1.0) # type: ignore[call-overload] + +np.partition(a, None) # type: ignore[call-overload] +np.partition(a, 0, axis="bob") # type: ignore[call-overload] +np.partition(A, 0, kind="bob") # type: ignore[call-overload] +np.partition(A, 0, order=range(5)) # type: ignore[arg-type] + +np.argpartition(a, None) # type: ignore[arg-type] +np.argpartition(a, 0, axis="bob") # type: ignore[arg-type] +np.argpartition(A, 0, kind="bob") # type: ignore[arg-type] +np.argpartition(A, 0, order=range(5)) # type: ignore[arg-type] + +np.sort(A, axis="bob") # type: ignore[call-overload] +np.sort(A, kind="bob") # type: ignore[call-overload] +np.sort(A, order=range(5)) # type: ignore[arg-type] + +np.argsort(A, axis="bob") # type: ignore[arg-type] +np.argsort(A, kind="bob") # type: ignore[arg-type] +np.argsort(A, order=range(5)) # type: ignore[arg-type] + +np.argmax(A, axis="bob") # type: ignore[call-overload] +np.argmax(A, kind="bob") # type: ignore[call-overload] +np.argmax(A, out=AR_f4) # type: ignore[type-var] + +np.argmin(A, axis="bob") # type: ignore[call-overload] +np.argmin(A, kind="bob") # type: ignore[call-overload] +np.argmin(A, out=AR_f4) # type: ignore[type-var] + +np.searchsorted(A[0], 0, side="bob") # type: ignore[call-overload] +np.searchsorted(A[0], 0, sorter=1.0) # type: ignore[call-overload] + +np.resize(A, 1.0) # type: ignore[call-overload] + +np.squeeze(A, 1.0) # type: ignore[call-overload] + +np.diagonal(A, offset=None) # type: ignore[call-overload] +np.diagonal(A, axis1="bob") # type: ignore[call-overload] +np.diagonal(A, axis2=[]) # type: ignore[call-overload] + +np.trace(A, offset=None) # type: ignore[call-overload] +np.trace(A, axis1="bob") # type: ignore[call-overload] +np.trace(A, axis2=[]) # type: ignore[call-overload] + +np.ravel(a, order="bob") # type: ignore[call-overload] + +np.nonzero(0) # type: ignore[arg-type] + +np.compress([True], A, axis=1.0) # type: ignore[call-overload] + +np.clip(a, 1, 2, out=1) # type: ignore[call-overload] + +np.sum(a, axis=1.0) # type: ignore[call-overload] +np.sum(a, keepdims=1.0) # type: ignore[call-overload] +np.sum(a, initial=[1]) # type: ignore[call-overload] + +np.all(a, axis=1.0) # type: ignore[call-overload] +np.all(a, keepdims=1.0) # type: ignore[call-overload] +np.all(a, out=1.0) # type: ignore[call-overload] + +np.any(a, axis=1.0) # type: ignore[call-overload] +np.any(a, keepdims=1.0) # type: ignore[call-overload] +np.any(a, out=1.0) # type: ignore[call-overload] + +np.cumsum(a, axis=1.0) # type: ignore[call-overload] +np.cumsum(a, dtype=1.0) # type: ignore[call-overload] +np.cumsum(a, out=1.0) # type: ignore[call-overload] + +np.ptp(a, axis=1.0) # type: ignore[call-overload] +np.ptp(a, keepdims=1.0) # type: ignore[call-overload] +np.ptp(a, out=1.0) # type: ignore[call-overload] + +np.amax(a, axis=1.0) # type: ignore[call-overload] +np.amax(a, keepdims=1.0) # type: ignore[call-overload] +np.amax(a, out=1.0) # type: ignore[call-overload] +np.amax(a, initial=[1.0]) # type: ignore[call-overload] +np.amax(a, where=[1.0]) # type: ignore[arg-type] + +np.amin(a, axis=1.0) # type: ignore[call-overload] +np.amin(a, keepdims=1.0) # type: ignore[call-overload] +np.amin(a, out=1.0) # type: ignore[call-overload] +np.amin(a, initial=[1.0]) # type: ignore[call-overload] +np.amin(a, where=[1.0]) # type: ignore[arg-type] + +np.prod(a, axis=1.0) # type: ignore[call-overload] +np.prod(a, out=False) # type: ignore[call-overload] +np.prod(a, keepdims=1.0) # type: ignore[call-overload] +np.prod(a, initial=int) # type: ignore[call-overload] +np.prod(a, where=1.0) # type: ignore[call-overload] +np.prod(AR_U) # type: ignore[arg-type] + +np.cumprod(a, axis=1.0) # type: ignore[call-overload] +np.cumprod(a, out=False) # type: ignore[call-overload] +np.cumprod(AR_U) # type: ignore[arg-type] + +np.size(a, axis=1.0) # type: ignore[arg-type] + +np.around(a, decimals=1.0) # type: ignore[call-overload] +np.around(a, out=type) # type: ignore[call-overload] +np.around(AR_U) # type: ignore[arg-type] + +np.mean(a, axis=1.0) # type: ignore[call-overload] +np.mean(a, out=False) # type: ignore[call-overload] +np.mean(a, keepdims=1.0) # type: ignore[call-overload] +np.mean(AR_U) # type: ignore[arg-type] +np.mean(AR_M) # type: ignore[arg-type] + +np.std(a, axis=1.0) # type: ignore[call-overload] +np.std(a, out=False) # type: ignore[call-overload] +np.std(a, ddof='test') # type: ignore[call-overload] +np.std(a, keepdims=1.0) # type: ignore[call-overload] +np.std(AR_U) # type: ignore[arg-type] + +np.var(a, axis=1.0) # type: ignore[call-overload] +np.var(a, out=False) # type: ignore[call-overload] +np.var(a, ddof='test') # type: ignore[call-overload] +np.var(a, keepdims=1.0) # type: ignore[call-overload] +np.var(AR_U) # type: ignore[arg-type] diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/histograms.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/histograms.pyi new file mode 100644 index 0000000..5f78927 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/histograms.pyi @@ -0,0 +1,12 @@ +import numpy as np +import numpy.typing as npt + +AR_i8: npt.NDArray[np.int64] +AR_f8: npt.NDArray[np.float64] + +np.histogram_bin_edges(AR_i8, range=(0, 1, 2)) # type: ignore[arg-type] + +np.histogram(AR_i8, range=(0, 1, 2)) # type: ignore[arg-type] + +np.histogramdd(AR_i8, range=(0, 1)) # type: ignore[arg-type] +np.histogramdd(AR_i8, range=[(0, 1, 2)]) # type: ignore[list-item] diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/index_tricks.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/index_tricks.pyi new file mode 100644 index 0000000..8b7b1ae --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/index_tricks.pyi @@ -0,0 +1,14 @@ +import numpy as np + +AR_LIKE_i: list[int] +AR_LIKE_f: list[float] + +np.ndindex([1, 2, 3]) # type: ignore[call-overload] +np.unravel_index(AR_LIKE_f, (1, 2, 3)) # type: ignore[arg-type] +np.ravel_multi_index(AR_LIKE_i, (1, 2, 3), mode="bob") # type: ignore[call-overload] +np.mgrid[1] # type: ignore[index] +np.mgrid[...] # type: ignore[index] +np.ogrid[1] # type: ignore[index] +np.ogrid[...] # type: ignore[index] +np.fill_diagonal(AR_LIKE_f, 2) # type: ignore[arg-type] +np.diag_indices(1.0) # type: ignore[arg-type] diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/lib_function_base.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/lib_function_base.pyi new file mode 100644 index 0000000..f0bf634 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/lib_function_base.pyi @@ -0,0 +1,62 @@ +from typing import Any + +import numpy as np +import numpy.typing as npt + +AR_f8: npt.NDArray[np.float64] +AR_c16: npt.NDArray[np.complex128] +AR_m: npt.NDArray[np.timedelta64] +AR_M: npt.NDArray[np.datetime64] +AR_O: npt.NDArray[np.object_] +AR_b_list: list[npt.NDArray[np.bool]] + +def fn_none_i(a: None, /) -> npt.NDArray[Any]: ... +def fn_ar_i(a: npt.NDArray[np.float64], posarg: int, /) -> npt.NDArray[Any]: ... + +np.average(AR_m) # type: ignore[arg-type] +np.select(1, [AR_f8]) # type: ignore[arg-type] +np.angle(AR_m) # type: ignore[arg-type] +np.unwrap(AR_m) # type: ignore[arg-type] +np.unwrap(AR_c16) # type: ignore[arg-type] +np.trim_zeros(1) # type: ignore[arg-type] +np.place(1, [True], 1.5) # type: ignore[arg-type] +np.vectorize(1) # type: ignore[arg-type] +np.place(AR_f8, slice(None), 5) # type: ignore[arg-type] + +np.piecewise(AR_f8, True, [fn_ar_i], 42) # type: ignore[call-overload] +# TODO: enable these once mypy actually supports ParamSpec (released in 2021) +# NOTE: pyright correctly reports errors for these (`reportCallIssue`) +# np.piecewise(AR_f8, AR_b_list, [fn_none_i]) # type: ignore[call-overload]s +# np.piecewise(AR_f8, AR_b_list, [fn_ar_i]) # type: ignore[call-overload] +# np.piecewise(AR_f8, AR_b_list, [fn_ar_i], 3.14) # type: ignore[call-overload] +# np.piecewise(AR_f8, AR_b_list, [fn_ar_i], 42, None) # type: ignore[call-overload] +# np.piecewise(AR_f8, AR_b_list, [fn_ar_i], 42, _=None) # type: ignore[call-overload] + +np.interp(AR_f8, AR_c16, AR_f8) # type: ignore[arg-type] +np.interp(AR_c16, AR_f8, AR_f8) # type: ignore[arg-type] +np.interp(AR_f8, AR_f8, AR_f8, period=AR_c16) # type: ignore[call-overload] +np.interp(AR_f8, AR_f8, AR_O) # type: ignore[arg-type] + +np.cov(AR_m) # type: ignore[arg-type] +np.cov(AR_O) # type: ignore[arg-type] +np.corrcoef(AR_m) # type: ignore[arg-type] +np.corrcoef(AR_O) # type: ignore[arg-type] +np.corrcoef(AR_f8, bias=True) # type: ignore[call-overload] +np.corrcoef(AR_f8, ddof=2) # type: ignore[call-overload] +np.blackman(1j) # type: ignore[arg-type] +np.bartlett(1j) # type: ignore[arg-type] +np.hanning(1j) # type: ignore[arg-type] +np.hamming(1j) # type: ignore[arg-type] +np.hamming(AR_c16) # type: ignore[arg-type] +np.kaiser(1j, 1) # type: ignore[arg-type] +np.sinc(AR_O) # type: ignore[arg-type] +np.median(AR_M) # type: ignore[arg-type] + +np.percentile(AR_f8, 50j) # type: ignore[call-overload] +np.percentile(AR_f8, 50, interpolation="bob") # type: ignore[call-overload] +np.quantile(AR_f8, 0.5j) # type: ignore[call-overload] +np.quantile(AR_f8, 0.5, interpolation="bob") # type: ignore[call-overload] +np.meshgrid(AR_f8, AR_f8, indexing="bob") # type: ignore[call-overload] +np.delete(AR_f8, AR_f8) # type: ignore[arg-type] +np.insert(AR_f8, AR_f8, 1.5) # type: ignore[arg-type] +np.digitize(AR_f8, 1j) # type: ignore[call-overload] diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/lib_polynomial.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/lib_polynomial.pyi new file mode 100644 index 0000000..727eb7f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/lib_polynomial.pyi @@ -0,0 +1,29 @@ +import numpy as np +import numpy.typing as npt + +AR_f8: npt.NDArray[np.float64] +AR_c16: npt.NDArray[np.complex128] +AR_O: npt.NDArray[np.object_] +AR_U: npt.NDArray[np.str_] + +poly_obj: np.poly1d + +np.polymul(AR_f8, AR_U) # type: ignore[arg-type] +np.polydiv(AR_f8, AR_U) # type: ignore[arg-type] + +5**poly_obj # type: ignore[operator] + +np.polyint(AR_U) # type: ignore[arg-type] +np.polyint(AR_f8, m=1j) # type: ignore[call-overload] + +np.polyder(AR_U) # type: ignore[arg-type] +np.polyder(AR_f8, m=1j) # type: ignore[call-overload] + +np.polyfit(AR_O, AR_f8, 1) # type: ignore[arg-type] +np.polyfit(AR_f8, AR_f8, 1, rcond=1j) # type: ignore[call-overload] +np.polyfit(AR_f8, AR_f8, 1, w=AR_c16) # type: ignore[arg-type] +np.polyfit(AR_f8, AR_f8, 1, cov="bob") # type: ignore[call-overload] + +np.polyval(AR_f8, AR_U) # type: ignore[arg-type] +np.polyadd(AR_f8, AR_U) # type: ignore[arg-type] +np.polysub(AR_f8, AR_U) # type: ignore[arg-type] diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/lib_utils.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/lib_utils.pyi new file mode 100644 index 0000000..25af32b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/lib_utils.pyi @@ -0,0 +1,3 @@ +import numpy.lib.array_utils as array_utils + +array_utils.byte_bounds(1) # type: ignore[arg-type] diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/lib_version.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/lib_version.pyi new file mode 100644 index 0000000..62011a8 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/lib_version.pyi @@ -0,0 +1,6 @@ +from numpy.lib import NumpyVersion + +version: NumpyVersion + +NumpyVersion(b"1.8.0") # type: ignore[arg-type] +version >= b"1.8.0" # type: ignore[operator] diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/linalg.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/linalg.pyi new file mode 100644 index 0000000..c4695ee --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/linalg.pyi @@ -0,0 +1,48 @@ +import numpy as np +import numpy.typing as npt + +AR_f8: npt.NDArray[np.float64] +AR_O: npt.NDArray[np.object_] +AR_M: npt.NDArray[np.datetime64] + +np.linalg.tensorsolve(AR_O, AR_O) # type: ignore[arg-type] + +np.linalg.solve(AR_O, AR_O) # type: ignore[arg-type] + +np.linalg.tensorinv(AR_O) # type: ignore[arg-type] + +np.linalg.inv(AR_O) # type: ignore[arg-type] + +np.linalg.matrix_power(AR_M, 5) # type: ignore[arg-type] + +np.linalg.cholesky(AR_O) # type: ignore[arg-type] + +np.linalg.qr(AR_O) # type: ignore[arg-type] +np.linalg.qr(AR_f8, mode="bob") # type: ignore[call-overload] + +np.linalg.eigvals(AR_O) # type: ignore[arg-type] + +np.linalg.eigvalsh(AR_O) # type: ignore[arg-type] +np.linalg.eigvalsh(AR_O, UPLO="bob") # type: ignore[call-overload] + +np.linalg.eig(AR_O) # type: ignore[arg-type] + +np.linalg.eigh(AR_O) # type: ignore[arg-type] +np.linalg.eigh(AR_O, UPLO="bob") # type: ignore[call-overload] + +np.linalg.svd(AR_O) # type: ignore[arg-type] + +np.linalg.cond(AR_O) # type: ignore[arg-type] +np.linalg.cond(AR_f8, p="bob") # type: ignore[arg-type] + +np.linalg.matrix_rank(AR_O) # type: ignore[arg-type] + +np.linalg.pinv(AR_O) # type: ignore[arg-type] + +np.linalg.slogdet(AR_O) # type: ignore[arg-type] + +np.linalg.det(AR_O) # type: ignore[arg-type] + +np.linalg.norm(AR_f8, ord="bob") # type: ignore[call-overload] + +np.linalg.multi_dot([AR_M]) # type: ignore[list-item] diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/ma.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/ma.pyi new file mode 100644 index 0000000..41306b2 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/ma.pyi @@ -0,0 +1,143 @@ +from typing import TypeAlias, TypeVar + +import numpy as np +import numpy.typing as npt +from numpy._typing import _Shape + +_ScalarT = TypeVar("_ScalarT", bound=np.generic) +MaskedArray: TypeAlias = np.ma.MaskedArray[_Shape, np.dtype[_ScalarT]] + +MAR_1d_f8: np.ma.MaskedArray[tuple[int], np.dtype[np.float64]] +MAR_b: MaskedArray[np.bool] +MAR_c: MaskedArray[np.complex128] +MAR_td64: MaskedArray[np.timedelta64] + +AR_b: npt.NDArray[np.bool] + +MAR_1d_f8.shape = (3, 1) # type: ignore[assignment] +MAR_1d_f8.dtype = np.bool # type: ignore[assignment] + +np.ma.min(MAR_1d_f8, axis=1.0) # type: ignore[call-overload] +np.ma.min(MAR_1d_f8, keepdims=1.0) # type: ignore[call-overload] +np.ma.min(MAR_1d_f8, out=1.0) # type: ignore[call-overload] +np.ma.min(MAR_1d_f8, fill_value=lambda x: 27) # type: ignore[call-overload] + +MAR_1d_f8.min(axis=1.0) # type: ignore[call-overload] +MAR_1d_f8.min(keepdims=1.0) # type: ignore[call-overload] +MAR_1d_f8.min(out=1.0) # type: ignore[call-overload] +MAR_1d_f8.min(fill_value=lambda x: 27) # type: ignore[call-overload] + +np.ma.max(MAR_1d_f8, axis=1.0) # type: ignore[call-overload] +np.ma.max(MAR_1d_f8, keepdims=1.0) # type: ignore[call-overload] +np.ma.max(MAR_1d_f8, out=1.0) # type: ignore[call-overload] +np.ma.max(MAR_1d_f8, fill_value=lambda x: 27) # type: ignore[call-overload] + +MAR_1d_f8.max(axis=1.0) # type: ignore[call-overload] +MAR_1d_f8.max(keepdims=1.0) # type: ignore[call-overload] +MAR_1d_f8.max(out=1.0) # type: ignore[call-overload] +MAR_1d_f8.max(fill_value=lambda x: 27) # type: ignore[call-overload] + +np.ma.ptp(MAR_1d_f8, axis=1.0) # type: ignore[call-overload] +np.ma.ptp(MAR_1d_f8, keepdims=1.0) # type: ignore[call-overload] +np.ma.ptp(MAR_1d_f8, out=1.0) # type: ignore[call-overload] +np.ma.ptp(MAR_1d_f8, fill_value=lambda x: 27) # type: ignore[call-overload] + +MAR_1d_f8.ptp(axis=1.0) # type: ignore[call-overload] +MAR_1d_f8.ptp(keepdims=1.0) # type: ignore[call-overload] +MAR_1d_f8.ptp(out=1.0) # type: ignore[call-overload] +MAR_1d_f8.ptp(fill_value=lambda x: 27) # type: ignore[call-overload] + +MAR_1d_f8.argmin(axis=1.0) # type: ignore[call-overload] +MAR_1d_f8.argmin(keepdims=1.0) # type: ignore[call-overload] +MAR_1d_f8.argmin(out=1.0) # type: ignore[call-overload] +MAR_1d_f8.argmin(fill_value=lambda x: 27) # type: ignore[call-overload] + +np.ma.argmin(MAR_1d_f8, axis=1.0) # type: ignore[call-overload] +np.ma.argmin(MAR_1d_f8, axis=(1,)) # type: ignore[call-overload] +np.ma.argmin(MAR_1d_f8, keepdims=1.0) # type: ignore[call-overload] +np.ma.argmin(MAR_1d_f8, out=1.0) # type: ignore[call-overload] +np.ma.argmin(MAR_1d_f8, fill_value=lambda x: 27) # type: ignore[call-overload] + +MAR_1d_f8.argmax(axis=1.0) # type: ignore[call-overload] +MAR_1d_f8.argmax(keepdims=1.0) # type: ignore[call-overload] +MAR_1d_f8.argmax(out=1.0) # type: ignore[call-overload] +MAR_1d_f8.argmax(fill_value=lambda x: 27) # type: ignore[call-overload] + +np.ma.argmax(MAR_1d_f8, axis=1.0) # type: ignore[call-overload] +np.ma.argmax(MAR_1d_f8, axis=(0,)) # type: ignore[call-overload] +np.ma.argmax(MAR_1d_f8, keepdims=1.0) # type: ignore[call-overload] +np.ma.argmax(MAR_1d_f8, out=1.0) # type: ignore[call-overload] +np.ma.argmax(MAR_1d_f8, fill_value=lambda x: 27) # type: ignore[call-overload] + +MAR_1d_f8.all(axis=1.0) # type: ignore[call-overload] +MAR_1d_f8.all(keepdims=1.0) # type: ignore[call-overload] +MAR_1d_f8.all(out=1.0) # type: ignore[call-overload] + +MAR_1d_f8.any(axis=1.0) # type: ignore[call-overload] +MAR_1d_f8.any(keepdims=1.0) # type: ignore[call-overload] +MAR_1d_f8.any(out=1.0) # type: ignore[call-overload] + +MAR_1d_f8.sort(axis=(0,1)) # type: ignore[arg-type] +MAR_1d_f8.sort(axis=None) # type: ignore[arg-type] +MAR_1d_f8.sort(kind='cabbage') # type: ignore[arg-type] +MAR_1d_f8.sort(order=lambda: 'cabbage') # type: ignore[arg-type] +MAR_1d_f8.sort(endwith='cabbage') # type: ignore[arg-type] +MAR_1d_f8.sort(fill_value=lambda: 'cabbage') # type: ignore[arg-type] +MAR_1d_f8.sort(stable='cabbage') # type: ignore[arg-type] +MAR_1d_f8.sort(stable=True) # type: ignore[arg-type] + +MAR_1d_f8.take(axis=1.0) # type: ignore[call-overload] +MAR_1d_f8.take(out=1) # type: ignore[call-overload] +MAR_1d_f8.take(mode="bob") # type: ignore[call-overload] + +np.ma.take(None) # type: ignore[call-overload] +np.ma.take(axis=1.0) # type: ignore[call-overload] +np.ma.take(out=1) # type: ignore[call-overload] +np.ma.take(mode="bob") # type: ignore[call-overload] + +MAR_1d_f8.partition(['cabbage']) # type: ignore[arg-type] +MAR_1d_f8.partition(axis=(0,1)) # type: ignore[arg-type, call-arg] +MAR_1d_f8.partition(kind='cabbage') # type: ignore[arg-type, call-arg] +MAR_1d_f8.partition(order=lambda: 'cabbage') # type: ignore[arg-type, call-arg] +MAR_1d_f8.partition(AR_b) # type: ignore[arg-type] + +MAR_1d_f8.argpartition(['cabbage']) # type: ignore[arg-type] +MAR_1d_f8.argpartition(axis=(0,1)) # type: ignore[arg-type, call-arg] +MAR_1d_f8.argpartition(kind='cabbage') # type: ignore[arg-type, call-arg] +MAR_1d_f8.argpartition(order=lambda: 'cabbage') # type: ignore[arg-type, call-arg] +MAR_1d_f8.argpartition(AR_b) # type: ignore[arg-type] + +np.ma.ndim(lambda: 'lambda') # type: ignore[arg-type] + +np.ma.size(AR_b, axis='0') # type: ignore[arg-type] + +MAR_1d_f8 >= (lambda x: 'mango') # type: ignore[operator] +MAR_1d_f8 > (lambda x: 'mango') # type: ignore[operator] +MAR_1d_f8 <= (lambda x: 'mango') # type: ignore[operator] +MAR_1d_f8 < (lambda x: 'mango') # type: ignore[operator] + +MAR_1d_f8.count(axis=0.) # type: ignore[call-overload] + +np.ma.count(MAR_1d_f8, axis=0.) # type: ignore[call-overload] + +MAR_1d_f8.put(4, 999, mode='flip') # type: ignore[arg-type] + +np.ma.put(MAR_1d_f8, 4, 999, mode='flip') # type: ignore[arg-type] + +np.ma.put([1,1,3], 0, 999) # type: ignore[arg-type] + +np.ma.compressed(lambda: 'compress me') # type: ignore[call-overload] + +np.ma.allequal(MAR_1d_f8, [1,2,3], fill_value=1.5) # type: ignore[arg-type] + +np.ma.allclose(MAR_1d_f8, [1,2,3], masked_equal=4.5) # type: ignore[arg-type] +np.ma.allclose(MAR_1d_f8, [1,2,3], rtol='.4') # type: ignore[arg-type] +np.ma.allclose(MAR_1d_f8, [1,2,3], atol='.5') # type: ignore[arg-type] + +MAR_1d_f8.__setmask__('mask') # type: ignore[arg-type] + +MAR_b *= 2 # type: ignore[arg-type] +MAR_c //= 2 # type: ignore[misc] +MAR_td64 **= 2 # type: ignore[misc] + +MAR_1d_f8.swapaxes(axis1=1, axis2=0) # type: ignore[call-arg] diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/memmap.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/memmap.pyi new file mode 100644 index 0000000..3a4fc7d --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/memmap.pyi @@ -0,0 +1,5 @@ +import numpy as np + +with open("file.txt", "r") as f: + np.memmap(f) # type: ignore[call-overload] +np.memmap("test.txt", shape=[10, 5]) # type: ignore[call-overload] diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/modules.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/modules.pyi new file mode 100644 index 0000000..c12a182 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/modules.pyi @@ -0,0 +1,17 @@ +import numpy as np + +np.testing.bob # type: ignore[attr-defined] +np.bob # type: ignore[attr-defined] + +# Stdlib modules in the namespace by accident +np.warnings # type: ignore[attr-defined] +np.sys # type: ignore[attr-defined] +np.os # type: ignore[attr-defined] +np.math # type: ignore[attr-defined] + +# Public sub-modules that are not imported to their parent module by default; +# e.g. one must first execute `import numpy.lib.recfunctions` +np.lib.recfunctions # type: ignore[attr-defined] + +np.__deprecated_attrs__ # type: ignore[attr-defined] +np.__expired_functions__ # type: ignore[attr-defined] diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/multiarray.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/multiarray.pyi new file mode 100644 index 0000000..1f9ef68 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/multiarray.pyi @@ -0,0 +1,52 @@ +import numpy as np +import numpy.typing as npt + +i8: np.int64 + +AR_b: npt.NDArray[np.bool] +AR_u1: npt.NDArray[np.uint8] +AR_i8: npt.NDArray[np.int64] +AR_f8: npt.NDArray[np.float64] +AR_M: npt.NDArray[np.datetime64] + +M: np.datetime64 + +AR_LIKE_f: list[float] + +def func(a: int) -> None: ... + +np.where(AR_b, 1) # type: ignore[call-overload] + +np.can_cast(AR_f8, 1) # type: ignore[arg-type] + +np.vdot(AR_M, AR_M) # type: ignore[arg-type] + +np.copyto(AR_LIKE_f, AR_f8) # type: ignore[arg-type] + +np.putmask(AR_LIKE_f, [True, True, False], 1.5) # type: ignore[arg-type] + +np.packbits(AR_f8) # type: ignore[arg-type] +np.packbits(AR_u1, bitorder=">") # type: ignore[arg-type] + +np.unpackbits(AR_i8) # type: ignore[arg-type] +np.unpackbits(AR_u1, bitorder=">") # type: ignore[arg-type] + +np.shares_memory(1, 1, max_work=i8) # type: ignore[arg-type] +np.may_share_memory(1, 1, max_work=i8) # type: ignore[arg-type] + +np.arange(stop=10) # type: ignore[call-overload] + +np.datetime_data(int) # type: ignore[arg-type] + +np.busday_offset("2012", 10) # type: ignore[call-overload] + +np.datetime_as_string("2012") # type: ignore[call-overload] + +np.char.compare_chararrays("a", b"a", "==", False) # type: ignore[call-overload] + +np.nested_iters([AR_i8, AR_i8]) # type: ignore[call-arg] +np.nested_iters([AR_i8, AR_i8], 0) # type: ignore[arg-type] +np.nested_iters([AR_i8, AR_i8], [0]) # type: ignore[list-item] +np.nested_iters([AR_i8, AR_i8], [[0], [1]], flags=["test"]) # type: ignore[list-item] +np.nested_iters([AR_i8, AR_i8], [[0], [1]], op_flags=[["test"]]) # type: ignore[list-item] +np.nested_iters([AR_i8, AR_i8], [[0], [1]], buffersize=1.0) # type: ignore[arg-type] diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/ndarray.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/ndarray.pyi new file mode 100644 index 0000000..2aeec08 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/ndarray.pyi @@ -0,0 +1,11 @@ +import numpy as np + +# Ban setting dtype since mutating the type of the array in place +# makes having ndarray be generic over dtype impossible. Generally +# users should use `ndarray.view` in this situation anyway. See +# +# https://github.com/numpy/numpy-stubs/issues/7 +# +# for more context. +float_array = np.array([1.0]) +float_array.dtype = np.bool # type: ignore[assignment, misc] diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/ndarray_misc.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/ndarray_misc.pyi new file mode 100644 index 0000000..93e1bce --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/ndarray_misc.pyi @@ -0,0 +1,36 @@ +""" +Tests for miscellaneous (non-magic) ``np.ndarray``/``np.generic`` methods. + +More extensive tests are performed for the methods' +function-based counterpart in `../from_numeric.py`. + +""" + +import numpy as np +import numpy.typing as npt + +f8: np.float64 +AR_f8: npt.NDArray[np.float64] +AR_M: npt.NDArray[np.datetime64] +AR_b: npt.NDArray[np.bool] + +ctypes_obj = AR_f8.ctypes + +f8.argpartition(0) # type: ignore[attr-defined] +f8.diagonal() # type: ignore[attr-defined] +f8.dot(1) # type: ignore[attr-defined] +f8.nonzero() # type: ignore[attr-defined] +f8.partition(0) # type: ignore[attr-defined] +f8.put(0, 2) # type: ignore[attr-defined] +f8.setfield(2, np.float64) # type: ignore[attr-defined] +f8.sort() # type: ignore[attr-defined] +f8.trace() # type: ignore[attr-defined] + +AR_M.__complex__() # type: ignore[misc] +AR_b.__index__() # type: ignore[misc] + +AR_f8[1.5] # type: ignore[call-overload] +AR_f8["field_a"] # type: ignore[call-overload] +AR_f8[["field_a", "field_b"]] # type: ignore[index] + +AR_f8.__array_finalize__(object()) # type: ignore[arg-type] diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/nditer.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/nditer.pyi new file mode 100644 index 0000000..cb64061 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/nditer.pyi @@ -0,0 +1,8 @@ +import numpy as np + +class Test(np.nditer): ... # type: ignore[misc] + +np.nditer([0, 1], flags=["test"]) # type: ignore[list-item] +np.nditer([0, 1], op_flags=[["test"]]) # type: ignore[list-item] +np.nditer([0, 1], itershape=(1.0,)) # type: ignore[arg-type] +np.nditer([0, 1], buffersize=1.0) # type: ignore[arg-type] diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/nested_sequence.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/nested_sequence.pyi new file mode 100644 index 0000000..a28d3df --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/nested_sequence.pyi @@ -0,0 +1,16 @@ +from collections.abc import Sequence +from numpy._typing import _NestedSequence + +a: Sequence[float] +b: list[complex] +c: tuple[str, ...] +d: int +e: str + +def func(a: _NestedSequence[int]) -> None: ... + +reveal_type(func(a)) # type: ignore[arg-type, misc] +reveal_type(func(b)) # type: ignore[arg-type, misc] +reveal_type(func(c)) # type: ignore[arg-type, misc] +reveal_type(func(d)) # type: ignore[arg-type, misc] +reveal_type(func(e)) # type: ignore[arg-type, misc] diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/npyio.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/npyio.pyi new file mode 100644 index 0000000..e204566 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/npyio.pyi @@ -0,0 +1,24 @@ +import pathlib +from typing import IO + +import numpy.typing as npt +import numpy as np + +str_path: str +bytes_path: bytes +pathlib_path: pathlib.Path +str_file: IO[str] +AR_i8: npt.NDArray[np.int64] + +np.load(str_file) # type: ignore[arg-type] + +np.save(bytes_path, AR_i8) # type: ignore[call-overload] +np.save(str_path, AR_i8, fix_imports=True) # type: ignore[deprecated] # pyright: ignore[reportDeprecated] + +np.savez(bytes_path, AR_i8) # type: ignore[arg-type] + +np.savez_compressed(bytes_path, AR_i8) # type: ignore[arg-type] + +np.loadtxt(bytes_path) # type: ignore[arg-type] + +np.fromregex(bytes_path, ".", np.int64) # type: ignore[call-overload] diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/numerictypes.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/numerictypes.pyi new file mode 100644 index 0000000..a1fd47a --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/numerictypes.pyi @@ -0,0 +1,5 @@ +import numpy as np + +np.isdtype(1, np.int64) # type: ignore[arg-type] + +np.issubdtype(1, np.int64) # type: ignore[arg-type] diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/random.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/random.pyi new file mode 100644 index 0000000..1abf4b7 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/random.pyi @@ -0,0 +1,62 @@ +import numpy as np +import numpy.typing as npt + +SEED_FLOAT: float = 457.3 +SEED_ARR_FLOAT: npt.NDArray[np.float64] = np.array([1.0, 2, 3, 4]) +SEED_ARRLIKE_FLOAT: list[float] = [1.0, 2.0, 3.0, 4.0] +SEED_SEED_SEQ: np.random.SeedSequence = np.random.SeedSequence(0) +SEED_STR: str = "String seeding not allowed" + +# default rng +np.random.default_rng(SEED_FLOAT) # type: ignore[arg-type] +np.random.default_rng(SEED_ARR_FLOAT) # type: ignore[arg-type] +np.random.default_rng(SEED_ARRLIKE_FLOAT) # type: ignore[arg-type] +np.random.default_rng(SEED_STR) # type: ignore[arg-type] + +# Seed Sequence +np.random.SeedSequence(SEED_FLOAT) # type: ignore[arg-type] +np.random.SeedSequence(SEED_ARR_FLOAT) # type: ignore[arg-type] +np.random.SeedSequence(SEED_ARRLIKE_FLOAT) # type: ignore[arg-type] +np.random.SeedSequence(SEED_SEED_SEQ) # type: ignore[arg-type] +np.random.SeedSequence(SEED_STR) # type: ignore[arg-type] + +seed_seq: np.random.bit_generator.SeedSequence = np.random.SeedSequence() +seed_seq.spawn(11.5) # type: ignore[arg-type] +seed_seq.generate_state(3.14) # type: ignore[arg-type] +seed_seq.generate_state(3, np.uint8) # type: ignore[arg-type] +seed_seq.generate_state(3, "uint8") # type: ignore[arg-type] +seed_seq.generate_state(3, "u1") # type: ignore[arg-type] +seed_seq.generate_state(3, np.uint16) # type: ignore[arg-type] +seed_seq.generate_state(3, "uint16") # type: ignore[arg-type] +seed_seq.generate_state(3, "u2") # type: ignore[arg-type] +seed_seq.generate_state(3, np.int32) # type: ignore[arg-type] +seed_seq.generate_state(3, "int32") # type: ignore[arg-type] +seed_seq.generate_state(3, "i4") # type: ignore[arg-type] + +# Bit Generators +np.random.MT19937(SEED_FLOAT) # type: ignore[arg-type] +np.random.MT19937(SEED_ARR_FLOAT) # type: ignore[arg-type] +np.random.MT19937(SEED_ARRLIKE_FLOAT) # type: ignore[arg-type] +np.random.MT19937(SEED_STR) # type: ignore[arg-type] + +np.random.PCG64(SEED_FLOAT) # type: ignore[arg-type] +np.random.PCG64(SEED_ARR_FLOAT) # type: ignore[arg-type] +np.random.PCG64(SEED_ARRLIKE_FLOAT) # type: ignore[arg-type] +np.random.PCG64(SEED_STR) # type: ignore[arg-type] + +np.random.Philox(SEED_FLOAT) # type: ignore[arg-type] +np.random.Philox(SEED_ARR_FLOAT) # type: ignore[arg-type] +np.random.Philox(SEED_ARRLIKE_FLOAT) # type: ignore[arg-type] +np.random.Philox(SEED_STR) # type: ignore[arg-type] + +np.random.SFC64(SEED_FLOAT) # type: ignore[arg-type] +np.random.SFC64(SEED_ARR_FLOAT) # type: ignore[arg-type] +np.random.SFC64(SEED_ARRLIKE_FLOAT) # type: ignore[arg-type] +np.random.SFC64(SEED_STR) # type: ignore[arg-type] + +# Generator +np.random.Generator(None) # type: ignore[arg-type] +np.random.Generator(12333283902830213) # type: ignore[arg-type] +np.random.Generator("OxFEEDF00D") # type: ignore[arg-type] +np.random.Generator([123, 234]) # type: ignore[arg-type] +np.random.Generator(np.array([123, 234], dtype="u4")) # type: ignore[arg-type] diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/rec.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/rec.pyi new file mode 100644 index 0000000..c9d43dd --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/rec.pyi @@ -0,0 +1,17 @@ +import numpy as np +import numpy.typing as npt + +AR_i8: npt.NDArray[np.int64] + +np.rec.fromarrays(1) # type: ignore[call-overload] +np.rec.fromarrays([1, 2, 3], dtype=[("f8", "f8")], formats=["f8", "f8"]) # type: ignore[call-overload] + +np.rec.fromrecords(AR_i8) # type: ignore[arg-type] +np.rec.fromrecords([(1.5,)], dtype=[("f8", "f8")], formats=["f8", "f8"]) # type: ignore[call-overload] + +np.rec.fromstring("string", dtype=[("f8", "f8")]) # type: ignore[call-overload] +np.rec.fromstring(b"bytes") # type: ignore[call-overload] +np.rec.fromstring(b"(1.5,)", dtype=[("f8", "f8")], formats=["f8", "f8"]) # type: ignore[call-overload] + +with open("test", "r") as f: + np.rec.fromfile(f, dtype=[("f8", "f8")]) # type: ignore[call-overload] diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/scalars.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/scalars.pyi new file mode 100644 index 0000000..bfbe912 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/scalars.pyi @@ -0,0 +1,87 @@ +import sys +import numpy as np + +f2: np.float16 +f8: np.float64 +c8: np.complex64 + +# Construction + +np.float32(3j) # type: ignore[arg-type] + +# Technically the following examples are valid NumPy code. But they +# are not considered a best practice, and people who wish to use the +# stubs should instead do +# +# np.array([1.0, 0.0, 0.0], dtype=np.float32) +# np.array([], dtype=np.complex64) +# +# See e.g. the discussion on the mailing list +# +# https://mail.python.org/pipermail/numpy-discussion/2020-April/080566.html +# +# and the issue +# +# https://github.com/numpy/numpy-stubs/issues/41 +# +# for more context. +np.float32([1.0, 0.0, 0.0]) # type: ignore[arg-type] +np.complex64([]) # type: ignore[call-overload] + +# TODO: protocols (can't check for non-existent protocols w/ __getattr__) + +np.datetime64(0) # type: ignore[call-overload] + +class A: + def __float__(self) -> float: ... + +np.int8(A()) # type: ignore[arg-type] +np.int16(A()) # type: ignore[arg-type] +np.int32(A()) # type: ignore[arg-type] +np.int64(A()) # type: ignore[arg-type] +np.uint8(A()) # type: ignore[arg-type] +np.uint16(A()) # type: ignore[arg-type] +np.uint32(A()) # type: ignore[arg-type] +np.uint64(A()) # type: ignore[arg-type] + +np.void("test") # type: ignore[call-overload] +np.void("test", dtype=None) # type: ignore[call-overload] + +np.generic(1) # type: ignore[abstract] +np.number(1) # type: ignore[abstract] +np.integer(1) # type: ignore[abstract] +np.inexact(1) # type: ignore[abstract] +np.character("test") # type: ignore[abstract] +np.flexible(b"test") # type: ignore[abstract] + +np.float64(value=0.0) # type: ignore[call-arg] +np.int64(value=0) # type: ignore[call-arg] +np.uint64(value=0) # type: ignore[call-arg] +np.complex128(value=0.0j) # type: ignore[call-overload] +np.str_(value='bob') # type: ignore[call-overload] +np.bytes_(value=b'test') # type: ignore[call-overload] +np.void(value=b'test') # type: ignore[call-overload] +np.bool(value=True) # type: ignore[call-overload] +np.datetime64(value="2019") # type: ignore[call-overload] +np.timedelta64(value=0) # type: ignore[call-overload] + +np.bytes_(b"hello", encoding='utf-8') # type: ignore[call-overload] +np.str_("hello", encoding='utf-8') # type: ignore[call-overload] + +f8.item(1) # type: ignore[call-overload] +f8.item((0, 1)) # type: ignore[arg-type] +f8.squeeze(axis=1) # type: ignore[arg-type] +f8.squeeze(axis=(0, 1)) # type: ignore[arg-type] +f8.transpose(1) # type: ignore[arg-type] + +def func(a: np.float32) -> None: ... + +func(f2) # type: ignore[arg-type] +func(f8) # type: ignore[arg-type] + +c8.__getnewargs__() # type: ignore[attr-defined] +f2.__getnewargs__() # type: ignore[attr-defined] +f2.hex() # type: ignore[attr-defined] +np.float16.fromhex("0x0.0p+0") # type: ignore[attr-defined] +f2.__trunc__() # type: ignore[attr-defined] +f2.__getformat__("float") # type: ignore[attr-defined] diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/shape.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/shape.pyi new file mode 100644 index 0000000..fea0555 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/shape.pyi @@ -0,0 +1,6 @@ +from typing import Any +import numpy as np + +# test bounds of _ShapeT_co + +np.ndarray[tuple[str, str], Any] # type: ignore[type-var] diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/shape_base.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/shape_base.pyi new file mode 100644 index 0000000..652b24b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/shape_base.pyi @@ -0,0 +1,8 @@ +import numpy as np + +class DTypeLike: + dtype: np.dtype[np.int_] + +dtype_like: DTypeLike + +np.expand_dims(dtype_like, (5, 10)) # type: ignore[call-overload] diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/stride_tricks.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/stride_tricks.pyi new file mode 100644 index 0000000..7f9a26b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/stride_tricks.pyi @@ -0,0 +1,9 @@ +import numpy as np +import numpy.typing as npt + +AR_f8: npt.NDArray[np.float64] + +np.lib.stride_tricks.as_strided(AR_f8, shape=8) # type: ignore[call-overload] +np.lib.stride_tricks.as_strided(AR_f8, strides=8) # type: ignore[call-overload] + +np.lib.stride_tricks.sliding_window_view(AR_f8, axis=(1,)) # type: ignore[call-overload] diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/strings.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/strings.pyi new file mode 100644 index 0000000..328a521 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/strings.pyi @@ -0,0 +1,52 @@ +import numpy as np +import numpy.typing as npt + +AR_U: npt.NDArray[np.str_] +AR_S: npt.NDArray[np.bytes_] + +np.strings.equal(AR_U, AR_S) # type: ignore[arg-type] +np.strings.not_equal(AR_U, AR_S) # type: ignore[arg-type] + +np.strings.greater_equal(AR_U, AR_S) # type: ignore[arg-type] +np.strings.less_equal(AR_U, AR_S) # type: ignore[arg-type] +np.strings.greater(AR_U, AR_S) # type: ignore[arg-type] +np.strings.less(AR_U, AR_S) # type: ignore[arg-type] + +np.strings.encode(AR_S) # type: ignore[arg-type] +np.strings.decode(AR_U) # type: ignore[arg-type] + +np.strings.lstrip(AR_U, b"a") # type: ignore[arg-type] +np.strings.lstrip(AR_S, "a") # type: ignore[arg-type] +np.strings.strip(AR_U, b"a") # type: ignore[arg-type] +np.strings.strip(AR_S, "a") # type: ignore[arg-type] +np.strings.rstrip(AR_U, b"a") # type: ignore[arg-type] +np.strings.rstrip(AR_S, "a") # type: ignore[arg-type] + +np.strings.partition(AR_U, b"a") # type: ignore[arg-type] +np.strings.partition(AR_S, "a") # type: ignore[arg-type] +np.strings.rpartition(AR_U, b"a") # type: ignore[arg-type] +np.strings.rpartition(AR_S, "a") # type: ignore[arg-type] + +np.strings.count(AR_U, b"a", [1, 2, 3], [1, 2, 3]) # type: ignore[arg-type] +np.strings.count(AR_S, "a", 0, 9) # type: ignore[arg-type] + +np.strings.endswith(AR_U, b"a", [1, 2, 3], [1, 2, 3]) # type: ignore[arg-type] +np.strings.endswith(AR_S, "a", 0, 9) # type: ignore[arg-type] +np.strings.startswith(AR_U, b"a", [1, 2, 3], [1, 2, 3]) # type: ignore[arg-type] +np.strings.startswith(AR_S, "a", 0, 9) # type: ignore[arg-type] + +np.strings.find(AR_U, b"a", [1, 2, 3], [1, 2, 3]) # type: ignore[arg-type] +np.strings.find(AR_S, "a", 0, 9) # type: ignore[arg-type] +np.strings.rfind(AR_U, b"a", [1, 2, 3], [1, 2, 3]) # type: ignore[arg-type] +np.strings.rfind(AR_S, "a", 0, 9) # type: ignore[arg-type] + +np.strings.index(AR_U, b"a", start=[1, 2, 3]) # type: ignore[arg-type] +np.strings.index(AR_S, "a", end=9) # type: ignore[arg-type] +np.strings.rindex(AR_U, b"a", start=[1, 2, 3]) # type: ignore[arg-type] +np.strings.rindex(AR_S, "a", end=9) # type: ignore[arg-type] + +np.strings.isdecimal(AR_S) # type: ignore[arg-type] +np.strings.isnumeric(AR_S) # type: ignore[arg-type] + +np.strings.replace(AR_U, b"_", b"-", 10) # type: ignore[arg-type] +np.strings.replace(AR_S, "_", "-", 1) # type: ignore[arg-type] diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/testing.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/testing.pyi new file mode 100644 index 0000000..517062c --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/testing.pyi @@ -0,0 +1,28 @@ +import numpy as np +import numpy.typing as npt + +AR_U: npt.NDArray[np.str_] + +def func(x: object) -> bool: ... + +np.testing.assert_(True, msg=1) # type: ignore[arg-type] +np.testing.build_err_msg(1, "test") # type: ignore[arg-type] +np.testing.assert_almost_equal(AR_U, AR_U) # type: ignore[arg-type] +np.testing.assert_approx_equal([1, 2, 3], [1, 2, 3]) # type: ignore[arg-type] +np.testing.assert_array_almost_equal(AR_U, AR_U) # type: ignore[arg-type] +np.testing.assert_array_less(AR_U, AR_U) # type: ignore[arg-type] +np.testing.assert_string_equal(b"a", b"a") # type: ignore[arg-type] + +np.testing.assert_raises(expected_exception=TypeError, callable=func) # type: ignore[call-overload] +np.testing.assert_raises_regex(expected_exception=TypeError, expected_regex="T", callable=func) # type: ignore[call-overload] + +np.testing.assert_allclose(AR_U, AR_U) # type: ignore[arg-type] +np.testing.assert_array_almost_equal_nulp(AR_U, AR_U) # type: ignore[arg-type] +np.testing.assert_array_max_ulp(AR_U, AR_U) # type: ignore[arg-type] + +np.testing.assert_warns(RuntimeWarning, func) # type: ignore[call-overload] +np.testing.assert_no_warnings(func=func) # type: ignore[call-overload] +np.testing.assert_no_warnings(func) # type: ignore[call-overload] +np.testing.assert_no_warnings(func, y=None) # type: ignore[call-overload] + +np.testing.assert_no_gc_cycles(func=func) # type: ignore[call-overload] diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/twodim_base.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/twodim_base.pyi new file mode 100644 index 0000000..d0f2b7a --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/twodim_base.pyi @@ -0,0 +1,32 @@ +from typing import Any, TypeVar + +import numpy as np +import numpy.typing as npt + +def func1(ar: npt.NDArray[Any], a: int) -> npt.NDArray[np.str_]: ... + +def func2(ar: npt.NDArray[Any], a: float) -> float: ... + +AR_b: npt.NDArray[np.bool] +AR_m: npt.NDArray[np.timedelta64] + +AR_LIKE_b: list[bool] + +np.eye(10, M=20.0) # type: ignore[call-overload] +np.eye(10, k=2.5, dtype=int) # type: ignore[call-overload] + +np.diag(AR_b, k=0.5) # type: ignore[call-overload] +np.diagflat(AR_b, k=0.5) # type: ignore[call-overload] + +np.tri(10, M=20.0) # type: ignore[call-overload] +np.tri(10, k=2.5, dtype=int) # type: ignore[call-overload] + +np.tril(AR_b, k=0.5) # type: ignore[call-overload] +np.triu(AR_b, k=0.5) # type: ignore[call-overload] + +np.vander(AR_m) # type: ignore[arg-type] + +np.histogram2d(AR_m) # type: ignore[call-overload] + +np.mask_indices(10, func1) # type: ignore[arg-type] +np.mask_indices(10, func2, 10.5) # type: ignore[arg-type] diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/type_check.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/type_check.pyi new file mode 100644 index 0000000..94b6ee4 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/type_check.pyi @@ -0,0 +1,13 @@ +import numpy as np +import numpy.typing as npt + +DTYPE_i8: np.dtype[np.int64] + +np.mintypecode(DTYPE_i8) # type: ignore[arg-type] +np.iscomplexobj(DTYPE_i8) # type: ignore[arg-type] +np.isrealobj(DTYPE_i8) # type: ignore[arg-type] + +np.typename(DTYPE_i8) # type: ignore[call-overload] +np.typename("invalid") # type: ignore[call-overload] + +np.common_type(np.timedelta64()) # type: ignore[arg-type] diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/ufunc_config.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/ufunc_config.pyi new file mode 100644 index 0000000..c67b6a3 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/ufunc_config.pyi @@ -0,0 +1,21 @@ +"""Typing tests for `numpy._core._ufunc_config`.""" + +import numpy as np + +def func1(a: str, b: int, c: float) -> None: ... +def func2(a: str, *, b: int) -> None: ... + +class Write1: + def write1(self, a: str) -> None: ... + +class Write2: + def write(self, a: str, b: str) -> None: ... + +class Write3: + def write(self, *, a: str) -> None: ... + +np.seterrcall(func1) # type: ignore[arg-type] +np.seterrcall(func2) # type: ignore[arg-type] +np.seterrcall(Write1()) # type: ignore[arg-type] +np.seterrcall(Write2()) # type: ignore[arg-type] +np.seterrcall(Write3()) # type: ignore[arg-type] diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/ufunclike.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/ufunclike.pyi new file mode 100644 index 0000000..e556e40 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/ufunclike.pyi @@ -0,0 +1,21 @@ +import numpy as np +import numpy.typing as npt + +AR_c: npt.NDArray[np.complex128] +AR_m: npt.NDArray[np.timedelta64] +AR_M: npt.NDArray[np.datetime64] +AR_O: npt.NDArray[np.object_] + +np.fix(AR_c) # type: ignore[arg-type] +np.fix(AR_m) # type: ignore[arg-type] +np.fix(AR_M) # type: ignore[arg-type] + +np.isposinf(AR_c) # type: ignore[arg-type] +np.isposinf(AR_m) # type: ignore[arg-type] +np.isposinf(AR_M) # type: ignore[arg-type] +np.isposinf(AR_O) # type: ignore[arg-type] + +np.isneginf(AR_c) # type: ignore[arg-type] +np.isneginf(AR_m) # type: ignore[arg-type] +np.isneginf(AR_M) # type: ignore[arg-type] +np.isneginf(AR_O) # type: ignore[arg-type] diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/ufuncs.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/ufuncs.pyi new file mode 100644 index 0000000..1b1628d --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/ufuncs.pyi @@ -0,0 +1,17 @@ +import numpy as np +import numpy.typing as npt + +AR_f8: npt.NDArray[np.float64] + +np.sin.nin + "foo" # type: ignore[operator] +np.sin(1, foo="bar") # type: ignore[call-overload] + +np.abs(None) # type: ignore[call-overload] + +np.add(1, 1, 1) # type: ignore[call-overload] +np.add(1, 1, axis=0) # type: ignore[call-overload] + +np.matmul(AR_f8, AR_f8, where=True) # type: ignore[call-overload] + +np.frexp(AR_f8, out=None) # type: ignore[call-overload] +np.frexp(AR_f8, out=AR_f8) # type: ignore[call-overload] diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/warnings_and_errors.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/warnings_and_errors.pyi new file mode 100644 index 0000000..8ba34f6 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/fail/warnings_and_errors.pyi @@ -0,0 +1,5 @@ +import numpy.exceptions as ex + +ex.AxisError(1.0) # type: ignore[call-overload] +ex.AxisError(1, ndim=2.0) # type: ignore[call-overload] +ex.AxisError(2, msg_prefix=404) # type: ignore[call-overload] diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/misc/extended_precision.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/misc/extended_precision.pyi new file mode 100644 index 0000000..84b5f51 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/misc/extended_precision.pyi @@ -0,0 +1,9 @@ +import numpy as np +from numpy._typing import _96Bit, _128Bit + +from typing import assert_type + +assert_type(np.float96(), np.floating[_96Bit]) +assert_type(np.float128(), np.floating[_128Bit]) +assert_type(np.complex192(), np.complexfloating[_96Bit, _96Bit]) +assert_type(np.complex256(), np.complexfloating[_128Bit, _128Bit]) diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/mypy.ini b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/mypy.ini new file mode 100644 index 0000000..bca2032 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/mypy.ini @@ -0,0 +1,9 @@ +[mypy] +enable_error_code = deprecated, ignore-without-code, truthy-bool +strict_bytes = True +warn_unused_ignores = True +implicit_reexport = False +disallow_any_unimported = True +disallow_any_generics = True +show_absolute_path = True +pretty = True diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/arithmetic.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/arithmetic.cpython-312.pyc new file mode 100644 index 0000000..168bf9b Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/arithmetic.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/array_constructors.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/array_constructors.cpython-312.pyc new file mode 100644 index 0000000..032a3db Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/array_constructors.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/array_like.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/array_like.cpython-312.pyc new file mode 100644 index 0000000..631a7e0 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/array_like.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/arrayprint.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/arrayprint.cpython-312.pyc new file mode 100644 index 0000000..b7fed61 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/arrayprint.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/arrayterator.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/arrayterator.cpython-312.pyc new file mode 100644 index 0000000..9499aab Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/arrayterator.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/bitwise_ops.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/bitwise_ops.cpython-312.pyc new file mode 100644 index 0000000..12367b3 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/bitwise_ops.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/comparisons.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/comparisons.cpython-312.pyc new file mode 100644 index 0000000..4edce0a Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/comparisons.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/dtype.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/dtype.cpython-312.pyc new file mode 100644 index 0000000..e190854 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/dtype.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/einsumfunc.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/einsumfunc.cpython-312.pyc new file mode 100644 index 0000000..6642667 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/einsumfunc.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/flatiter.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/flatiter.cpython-312.pyc new file mode 100644 index 0000000..ba4ae7f Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/flatiter.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/fromnumeric.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/fromnumeric.cpython-312.pyc new file mode 100644 index 0000000..f7edbef Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/fromnumeric.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/index_tricks.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/index_tricks.cpython-312.pyc new file mode 100644 index 0000000..fdec271 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/index_tricks.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/lib_user_array.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/lib_user_array.cpython-312.pyc new file mode 100644 index 0000000..fac9585 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/lib_user_array.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/lib_utils.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/lib_utils.cpython-312.pyc new file mode 100644 index 0000000..7069fe1 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/lib_utils.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/lib_version.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/lib_version.cpython-312.pyc new file mode 100644 index 0000000..afb06f3 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/lib_version.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/literal.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/literal.cpython-312.pyc new file mode 100644 index 0000000..2fdbd51 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/literal.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/ma.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/ma.cpython-312.pyc new file mode 100644 index 0000000..9578aaf Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/ma.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/mod.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/mod.cpython-312.pyc new file mode 100644 index 0000000..ad6cfea Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/mod.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/modules.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/modules.cpython-312.pyc new file mode 100644 index 0000000..b1036ef Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/modules.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/multiarray.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/multiarray.cpython-312.pyc new file mode 100644 index 0000000..71e3cd6 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/multiarray.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/ndarray_conversion.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/ndarray_conversion.cpython-312.pyc new file mode 100644 index 0000000..4d8b635 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/ndarray_conversion.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/ndarray_misc.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/ndarray_misc.cpython-312.pyc new file mode 100644 index 0000000..1eea401 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/ndarray_misc.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/ndarray_shape_manipulation.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/ndarray_shape_manipulation.cpython-312.pyc new file mode 100644 index 0000000..6b6f1ab Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/ndarray_shape_manipulation.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/nditer.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/nditer.cpython-312.pyc new file mode 100644 index 0000000..42e3cc5 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/nditer.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/numeric.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/numeric.cpython-312.pyc new file mode 100644 index 0000000..c6e185f Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/numeric.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/numerictypes.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/numerictypes.cpython-312.pyc new file mode 100644 index 0000000..419dc2f Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/numerictypes.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/random.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/random.cpython-312.pyc new file mode 100644 index 0000000..df27ed5 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/random.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/recfunctions.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/recfunctions.cpython-312.pyc new file mode 100644 index 0000000..78ef490 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/recfunctions.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/scalars.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/scalars.cpython-312.pyc new file mode 100644 index 0000000..f80244a Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/scalars.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/shape.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/shape.cpython-312.pyc new file mode 100644 index 0000000..fac4a9e Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/shape.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/simple.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/simple.cpython-312.pyc new file mode 100644 index 0000000..4fc8f13 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/simple.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/simple_py3.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/simple_py3.cpython-312.pyc new file mode 100644 index 0000000..ff3b500 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/simple_py3.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/ufunc_config.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/ufunc_config.cpython-312.pyc new file mode 100644 index 0000000..14c61c2 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/ufunc_config.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/ufunclike.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/ufunclike.cpython-312.pyc new file mode 100644 index 0000000..07c7c97 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/ufunclike.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/ufuncs.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/ufuncs.cpython-312.pyc new file mode 100644 index 0000000..40f630c Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/ufuncs.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/warnings_and_errors.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/warnings_and_errors.cpython-312.pyc new file mode 100644 index 0000000..6bf947e Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/__pycache__/warnings_and_errors.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/arithmetic.py b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/arithmetic.py new file mode 100644 index 0000000..3b2901c --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/arithmetic.py @@ -0,0 +1,612 @@ +from __future__ import annotations + +from typing import Any, cast +import numpy as np +import numpy.typing as npt +import pytest + +c16 = np.complex128(1) +f8 = np.float64(1) +i8 = np.int64(1) +u8 = np.uint64(1) + +c8 = np.complex64(1) +f4 = np.float32(1) +i4 = np.int32(1) +u4 = np.uint32(1) + +dt = np.datetime64(1, "D") +td = np.timedelta64(1, "D") + +b_ = np.bool(1) + +b = bool(1) +c = complex(1) +f = float(1) +i = int(1) + + +class Object: + def __array__(self, dtype: np.typing.DTypeLike = None, + copy: bool | None = None) -> np.ndarray[Any, np.dtype[np.object_]]: + ret = np.empty((), dtype=object) + ret[()] = self + return ret + + def __sub__(self, value: Any) -> Object: + return self + + def __rsub__(self, value: Any) -> Object: + return self + + def __floordiv__(self, value: Any) -> Object: + return self + + def __rfloordiv__(self, value: Any) -> Object: + return self + + def __mul__(self, value: Any) -> Object: + return self + + def __rmul__(self, value: Any) -> Object: + return self + + def __pow__(self, value: Any) -> Object: + return self + + def __rpow__(self, value: Any) -> Object: + return self + + +AR_b: npt.NDArray[np.bool] = np.array([True]) +AR_u: npt.NDArray[np.uint32] = np.array([1], dtype=np.uint32) +AR_i: npt.NDArray[np.int64] = np.array([1]) +AR_integer: npt.NDArray[np.integer] = cast(npt.NDArray[np.integer], AR_i) +AR_f: npt.NDArray[np.float64] = np.array([1.0]) +AR_c: npt.NDArray[np.complex128] = np.array([1j]) +AR_m: npt.NDArray[np.timedelta64] = np.array([np.timedelta64(1, "D")]) +AR_M: npt.NDArray[np.datetime64] = np.array([np.datetime64(1, "D")]) +AR_O: npt.NDArray[np.object_] = np.array([Object()]) + +AR_LIKE_b = [True] +AR_LIKE_u = [np.uint32(1)] +AR_LIKE_i = [1] +AR_LIKE_f = [1.0] +AR_LIKE_c = [1j] +AR_LIKE_m = [np.timedelta64(1, "D")] +AR_LIKE_M = [np.datetime64(1, "D")] +AR_LIKE_O = [Object()] + +# Array subtractions + +AR_b - AR_LIKE_u +AR_b - AR_LIKE_i +AR_b - AR_LIKE_f +AR_b - AR_LIKE_c +AR_b - AR_LIKE_m +AR_b - AR_LIKE_O + +AR_LIKE_u - AR_b +AR_LIKE_i - AR_b +AR_LIKE_f - AR_b +AR_LIKE_c - AR_b +AR_LIKE_m - AR_b +AR_LIKE_M - AR_b +AR_LIKE_O - AR_b + +AR_u - AR_LIKE_b +AR_u - AR_LIKE_u +AR_u - AR_LIKE_i +AR_u - AR_LIKE_f +AR_u - AR_LIKE_c +AR_u - AR_LIKE_m +AR_u - AR_LIKE_O + +AR_LIKE_b - AR_u +AR_LIKE_u - AR_u +AR_LIKE_i - AR_u +AR_LIKE_f - AR_u +AR_LIKE_c - AR_u +AR_LIKE_m - AR_u +AR_LIKE_M - AR_u +AR_LIKE_O - AR_u + +AR_i - AR_LIKE_b +AR_i - AR_LIKE_u +AR_i - AR_LIKE_i +AR_i - AR_LIKE_f +AR_i - AR_LIKE_c +AR_i - AR_LIKE_m +AR_i - AR_LIKE_O + +AR_LIKE_b - AR_i +AR_LIKE_u - AR_i +AR_LIKE_i - AR_i +AR_LIKE_f - AR_i +AR_LIKE_c - AR_i +AR_LIKE_m - AR_i +AR_LIKE_M - AR_i +AR_LIKE_O - AR_i + +AR_f - AR_LIKE_b +AR_f - AR_LIKE_u +AR_f - AR_LIKE_i +AR_f - AR_LIKE_f +AR_f - AR_LIKE_c +AR_f - AR_LIKE_O + +AR_LIKE_b - AR_f +AR_LIKE_u - AR_f +AR_LIKE_i - AR_f +AR_LIKE_f - AR_f +AR_LIKE_c - AR_f +AR_LIKE_O - AR_f + +AR_c - AR_LIKE_b +AR_c - AR_LIKE_u +AR_c - AR_LIKE_i +AR_c - AR_LIKE_f +AR_c - AR_LIKE_c +AR_c - AR_LIKE_O + +AR_LIKE_b - AR_c +AR_LIKE_u - AR_c +AR_LIKE_i - AR_c +AR_LIKE_f - AR_c +AR_LIKE_c - AR_c +AR_LIKE_O - AR_c + +AR_m - AR_LIKE_b +AR_m - AR_LIKE_u +AR_m - AR_LIKE_i +AR_m - AR_LIKE_m + +AR_LIKE_b - AR_m +AR_LIKE_u - AR_m +AR_LIKE_i - AR_m +AR_LIKE_m - AR_m +AR_LIKE_M - AR_m + +AR_M - AR_LIKE_b +AR_M - AR_LIKE_u +AR_M - AR_LIKE_i +AR_M - AR_LIKE_m +AR_M - AR_LIKE_M + +AR_LIKE_M - AR_M + +AR_O - AR_LIKE_b +AR_O - AR_LIKE_u +AR_O - AR_LIKE_i +AR_O - AR_LIKE_f +AR_O - AR_LIKE_c +AR_O - AR_LIKE_O + +AR_LIKE_b - AR_O +AR_LIKE_u - AR_O +AR_LIKE_i - AR_O +AR_LIKE_f - AR_O +AR_LIKE_c - AR_O +AR_LIKE_O - AR_O + +AR_u += AR_b +AR_u += AR_u +AR_u += 1 # Allowed during runtime as long as the object is 0D and >=0 + +# Array floor division + +AR_b // AR_LIKE_b +AR_b // AR_LIKE_u +AR_b // AR_LIKE_i +AR_b // AR_LIKE_f +AR_b // AR_LIKE_O + +AR_LIKE_b // AR_b +AR_LIKE_u // AR_b +AR_LIKE_i // AR_b +AR_LIKE_f // AR_b +AR_LIKE_O // AR_b + +AR_u // AR_LIKE_b +AR_u // AR_LIKE_u +AR_u // AR_LIKE_i +AR_u // AR_LIKE_f +AR_u // AR_LIKE_O + +AR_LIKE_b // AR_u +AR_LIKE_u // AR_u +AR_LIKE_i // AR_u +AR_LIKE_f // AR_u +AR_LIKE_m // AR_u +AR_LIKE_O // AR_u + +AR_i // AR_LIKE_b +AR_i // AR_LIKE_u +AR_i // AR_LIKE_i +AR_i // AR_LIKE_f +AR_i // AR_LIKE_O + +AR_LIKE_b // AR_i +AR_LIKE_u // AR_i +AR_LIKE_i // AR_i +AR_LIKE_f // AR_i +AR_LIKE_m // AR_i +AR_LIKE_O // AR_i + +AR_f // AR_LIKE_b +AR_f // AR_LIKE_u +AR_f // AR_LIKE_i +AR_f // AR_LIKE_f +AR_f // AR_LIKE_O + +AR_LIKE_b // AR_f +AR_LIKE_u // AR_f +AR_LIKE_i // AR_f +AR_LIKE_f // AR_f +AR_LIKE_m // AR_f +AR_LIKE_O // AR_f + +AR_m // AR_LIKE_u +AR_m // AR_LIKE_i +AR_m // AR_LIKE_f +AR_m // AR_LIKE_m + +AR_LIKE_m // AR_m + +AR_m /= f +AR_m //= f +AR_m /= AR_f +AR_m /= AR_LIKE_f +AR_m //= AR_f +AR_m //= AR_LIKE_f + +AR_O // AR_LIKE_b +AR_O // AR_LIKE_u +AR_O // AR_LIKE_i +AR_O // AR_LIKE_f +AR_O // AR_LIKE_O + +AR_LIKE_b // AR_O +AR_LIKE_u // AR_O +AR_LIKE_i // AR_O +AR_LIKE_f // AR_O +AR_LIKE_O // AR_O + +# Inplace multiplication + +AR_b *= AR_LIKE_b + +AR_u *= AR_LIKE_b +AR_u *= AR_LIKE_u + +AR_i *= AR_LIKE_b +AR_i *= AR_LIKE_u +AR_i *= AR_LIKE_i + +AR_integer *= AR_LIKE_b +AR_integer *= AR_LIKE_u +AR_integer *= AR_LIKE_i + +AR_f *= AR_LIKE_b +AR_f *= AR_LIKE_u +AR_f *= AR_LIKE_i +AR_f *= AR_LIKE_f + +AR_c *= AR_LIKE_b +AR_c *= AR_LIKE_u +AR_c *= AR_LIKE_i +AR_c *= AR_LIKE_f +AR_c *= AR_LIKE_c + +AR_m *= AR_LIKE_b +AR_m *= AR_LIKE_u +AR_m *= AR_LIKE_i +AR_m *= AR_LIKE_f + +AR_O *= AR_LIKE_b +AR_O *= AR_LIKE_u +AR_O *= AR_LIKE_i +AR_O *= AR_LIKE_f +AR_O *= AR_LIKE_c +AR_O *= AR_LIKE_O + +# Inplace power + +AR_u **= AR_LIKE_b +AR_u **= AR_LIKE_u + +AR_i **= AR_LIKE_b +AR_i **= AR_LIKE_u +AR_i **= AR_LIKE_i + +AR_integer **= AR_LIKE_b +AR_integer **= AR_LIKE_u +AR_integer **= AR_LIKE_i + +AR_f **= AR_LIKE_b +AR_f **= AR_LIKE_u +AR_f **= AR_LIKE_i +AR_f **= AR_LIKE_f + +AR_c **= AR_LIKE_b +AR_c **= AR_LIKE_u +AR_c **= AR_LIKE_i +AR_c **= AR_LIKE_f +AR_c **= AR_LIKE_c + +AR_O **= AR_LIKE_b +AR_O **= AR_LIKE_u +AR_O **= AR_LIKE_i +AR_O **= AR_LIKE_f +AR_O **= AR_LIKE_c +AR_O **= AR_LIKE_O + +# unary ops + +-c16 +-c8 +-f8 +-f4 +-i8 +-i4 +with pytest.warns(RuntimeWarning): + -u8 + -u4 +-td +-AR_f + ++c16 ++c8 ++f8 ++f4 ++i8 ++i4 ++u8 ++u4 ++td ++AR_f + +abs(c16) +abs(c8) +abs(f8) +abs(f4) +abs(i8) +abs(i4) +abs(u8) +abs(u4) +abs(td) +abs(b_) +abs(AR_f) + +# Time structures + +dt + td +dt + i +dt + i4 +dt + i8 +dt - dt +dt - i +dt - i4 +dt - i8 + +td + td +td + i +td + i4 +td + i8 +td - td +td - i +td - i4 +td - i8 +td / f +td / f4 +td / f8 +td / td +td // td +td % td + + +# boolean + +b_ / b +b_ / b_ +b_ / i +b_ / i8 +b_ / i4 +b_ / u8 +b_ / u4 +b_ / f +b_ / f8 +b_ / f4 +b_ / c +b_ / c16 +b_ / c8 + +b / b_ +b_ / b_ +i / b_ +i8 / b_ +i4 / b_ +u8 / b_ +u4 / b_ +f / b_ +f8 / b_ +f4 / b_ +c / b_ +c16 / b_ +c8 / b_ + +# Complex + +c16 + c16 +c16 + f8 +c16 + i8 +c16 + c8 +c16 + f4 +c16 + i4 +c16 + b_ +c16 + b +c16 + c +c16 + f +c16 + i +c16 + AR_f + +c16 + c16 +f8 + c16 +i8 + c16 +c8 + c16 +f4 + c16 +i4 + c16 +b_ + c16 +b + c16 +c + c16 +f + c16 +i + c16 +AR_f + c16 + +c8 + c16 +c8 + f8 +c8 + i8 +c8 + c8 +c8 + f4 +c8 + i4 +c8 + b_ +c8 + b +c8 + c +c8 + f +c8 + i +c8 + AR_f + +c16 + c8 +f8 + c8 +i8 + c8 +c8 + c8 +f4 + c8 +i4 + c8 +b_ + c8 +b + c8 +c + c8 +f + c8 +i + c8 +AR_f + c8 + +# Float + +f8 + f8 +f8 + i8 +f8 + f4 +f8 + i4 +f8 + b_ +f8 + b +f8 + c +f8 + f +f8 + i +f8 + AR_f + +f8 + f8 +i8 + f8 +f4 + f8 +i4 + f8 +b_ + f8 +b + f8 +c + f8 +f + f8 +i + f8 +AR_f + f8 + +f4 + f8 +f4 + i8 +f4 + f4 +f4 + i4 +f4 + b_ +f4 + b +f4 + c +f4 + f +f4 + i +f4 + AR_f + +f8 + f4 +i8 + f4 +f4 + f4 +i4 + f4 +b_ + f4 +b + f4 +c + f4 +f + f4 +i + f4 +AR_f + f4 + +# Int + +i8 + i8 +i8 + u8 +i8 + i4 +i8 + u4 +i8 + b_ +i8 + b +i8 + c +i8 + f +i8 + i +i8 + AR_f + +u8 + u8 +u8 + i4 +u8 + u4 +u8 + b_ +u8 + b +u8 + c +u8 + f +u8 + i +u8 + AR_f + +i8 + i8 +u8 + i8 +i4 + i8 +u4 + i8 +b_ + i8 +b + i8 +c + i8 +f + i8 +i + i8 +AR_f + i8 + +u8 + u8 +i4 + u8 +u4 + u8 +b_ + u8 +b + u8 +c + u8 +f + u8 +i + u8 +AR_f + u8 + +i4 + i8 +i4 + i4 +i4 + i +i4 + b_ +i4 + b +i4 + AR_f + +u4 + i8 +u4 + i4 +u4 + u8 +u4 + u4 +u4 + i +u4 + b_ +u4 + b +u4 + AR_f + +i8 + i4 +i4 + i4 +i + i4 +b_ + i4 +b + i4 +AR_f + i4 + +i8 + u4 +i4 + u4 +u8 + u4 +u4 + u4 +b_ + u4 +b + u4 +i + u4 +AR_f + u4 diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/array_constructors.py b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/array_constructors.py new file mode 100644 index 0000000..17b6fab --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/array_constructors.py @@ -0,0 +1,137 @@ +from typing import Any + +import numpy as np +import numpy.typing as npt + +class Index: + def __index__(self) -> int: + return 0 + + +class SubClass(npt.NDArray[np.float64]): + pass + + +def func(i: int, j: int, **kwargs: Any) -> SubClass: + return B + + +i8 = np.int64(1) + +A = np.array([1]) +B = A.view(SubClass).copy() +B_stack = np.array([[1], [1]]).view(SubClass) +C = [1] + +np.ndarray(Index()) +np.ndarray([Index()]) + +np.array(1, dtype=float) +np.array(1, copy=None) +np.array(1, order='F') +np.array(1, order=None) +np.array(1, subok=True) +np.array(1, ndmin=3) +np.array(1, str, copy=True, order='C', subok=False, ndmin=2) + +np.asarray(A) +np.asarray(B) +np.asarray(C) + +np.asanyarray(A) +np.asanyarray(B) +np.asanyarray(B, dtype=int) +np.asanyarray(C) + +np.ascontiguousarray(A) +np.ascontiguousarray(B) +np.ascontiguousarray(C) + +np.asfortranarray(A) +np.asfortranarray(B) +np.asfortranarray(C) + +np.require(A) +np.require(B) +np.require(B, dtype=int) +np.require(B, requirements=None) +np.require(B, requirements="E") +np.require(B, requirements=["ENSUREARRAY"]) +np.require(B, requirements={"F", "E"}) +np.require(B, requirements=["C", "OWNDATA"]) +np.require(B, requirements="W") +np.require(B, requirements="A") +np.require(C) + +np.linspace(0, 2) +np.linspace(0.5, [0, 1, 2]) +np.linspace([0, 1, 2], 3) +np.linspace(0j, 2) +np.linspace(0, 2, num=10) +np.linspace(0, 2, endpoint=True) +np.linspace(0, 2, retstep=True) +np.linspace(0j, 2j, retstep=True) +np.linspace(0, 2, dtype=bool) +np.linspace([0, 1], [2, 3], axis=Index()) + +np.logspace(0, 2, base=2) +np.logspace(0, 2, base=2) +np.logspace(0, 2, base=[1j, 2j], num=2) + +np.geomspace(1, 2) + +np.zeros_like(A) +np.zeros_like(C) +np.zeros_like(B) +np.zeros_like(B, dtype=np.int64) + +np.ones_like(A) +np.ones_like(C) +np.ones_like(B) +np.ones_like(B, dtype=np.int64) + +np.empty_like(A) +np.empty_like(C) +np.empty_like(B) +np.empty_like(B, dtype=np.int64) + +np.full_like(A, i8) +np.full_like(C, i8) +np.full_like(B, i8) +np.full_like(B, i8, dtype=np.int64) + +np.ones(1) +np.ones([1, 1, 1]) + +np.full(1, i8) +np.full([1, 1, 1], i8) + +np.indices([1, 2, 3]) +np.indices([1, 2, 3], sparse=True) + +np.fromfunction(func, (3, 5)) + +np.identity(10) + +np.atleast_1d(C) +np.atleast_1d(A) +np.atleast_1d(C, C) +np.atleast_1d(C, A) +np.atleast_1d(A, A) + +np.atleast_2d(C) + +np.atleast_3d(C) + +np.vstack([C, C]) +np.vstack([C, A]) +np.vstack([A, A]) + +np.hstack([C, C]) + +np.stack([C, C]) +np.stack([C, C], axis=0) +np.stack([C, C], out=B_stack) + +np.block([[C, C], [C, C]]) +np.block(A) diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/array_like.py b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/array_like.py new file mode 100644 index 0000000..264ec55 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/array_like.py @@ -0,0 +1,43 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +import numpy as np + +if TYPE_CHECKING: + from numpy._typing import NDArray, ArrayLike, _SupportsArray + +x1: ArrayLike = True +x2: ArrayLike = 5 +x3: ArrayLike = 1.0 +x4: ArrayLike = 1 + 1j +x5: ArrayLike = np.int8(1) +x6: ArrayLike = np.float64(1) +x7: ArrayLike = np.complex128(1) +x8: ArrayLike = np.array([1, 2, 3]) +x9: ArrayLike = [1, 2, 3] +x10: ArrayLike = (1, 2, 3) +x11: ArrayLike = "foo" +x12: ArrayLike = memoryview(b'foo') + + +class A: + def __array__(self, dtype: np.dtype | None = None) -> NDArray[np.float64]: + return np.array([1.0, 2.0, 3.0]) + + +x13: ArrayLike = A() + +scalar: _SupportsArray[np.dtype[np.int64]] = np.int64(1) +scalar.__array__() +array: _SupportsArray[np.dtype[np.int_]] = np.array(1) +array.__array__() + +a: _SupportsArray[np.dtype[np.float64]] = A() +a.__array__() +a.__array__() + +# Escape hatch for when you mean to make something like an object +# array. +object_array_scalar: object = (i for i in range(10)) +np.array(object_array_scalar) diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/arrayprint.py b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/arrayprint.py new file mode 100644 index 0000000..6c704c7 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/arrayprint.py @@ -0,0 +1,37 @@ +import numpy as np + +AR = np.arange(10) +AR.setflags(write=False) + +with np.printoptions(): + np.set_printoptions( + precision=1, + threshold=2, + edgeitems=3, + linewidth=4, + suppress=False, + nanstr="Bob", + infstr="Bill", + formatter={}, + sign="+", + floatmode="unique", + ) + np.get_printoptions() + str(AR) + + np.array2string( + AR, + max_line_width=5, + precision=2, + suppress_small=True, + separator=";", + prefix="test", + threshold=5, + floatmode="fixed", + suffix="?", + legacy="1.13", + ) + np.format_float_scientific(1, precision=5) + np.format_float_positional(1, trim="k") + np.array_repr(AR) + np.array_str(AR) diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/arrayterator.py b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/arrayterator.py new file mode 100644 index 0000000..572be5e --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/arrayterator.py @@ -0,0 +1,27 @@ + +from __future__ import annotations + +from typing import Any +import numpy as np + +AR_i8: np.ndarray[Any, np.dtype[np.int_]] = np.arange(10) +ar_iter = np.lib.Arrayterator(AR_i8) + +ar_iter.var +ar_iter.buf_size +ar_iter.start +ar_iter.stop +ar_iter.step +ar_iter.shape +ar_iter.flat + +ar_iter.__array__() + +for i in ar_iter: + pass + +ar_iter[0] +ar_iter[...] +ar_iter[:] +ar_iter[0, 0, 0] +ar_iter[..., 0, :] diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/bitwise_ops.py b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/bitwise_ops.py new file mode 100644 index 0000000..22a245d --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/bitwise_ops.py @@ -0,0 +1,131 @@ +import numpy as np + +i8 = np.int64(1) +u8 = np.uint64(1) + +i4 = np.int32(1) +u4 = np.uint32(1) + +b_ = np.bool(1) + +b = bool(1) +i = int(1) + +AR = np.array([0, 1, 2], dtype=np.int32) +AR.setflags(write=False) + + +i8 << i8 +i8 >> i8 +i8 | i8 +i8 ^ i8 +i8 & i8 + +i << AR +i >> AR +i | AR +i ^ AR +i & AR + +i8 << AR +i8 >> AR +i8 | AR +i8 ^ AR +i8 & AR + +i4 << i4 +i4 >> i4 +i4 | i4 +i4 ^ i4 +i4 & i4 + +i8 << i4 +i8 >> i4 +i8 | i4 +i8 ^ i4 +i8 & i4 + +i8 << i +i8 >> i +i8 | i +i8 ^ i +i8 & i + +i8 << b_ +i8 >> b_ +i8 | b_ +i8 ^ b_ +i8 & b_ + +i8 << b +i8 >> b +i8 | b +i8 ^ b +i8 & b + +u8 << u8 +u8 >> u8 +u8 | u8 +u8 ^ u8 +u8 & u8 + +u4 << u4 +u4 >> u4 +u4 | u4 +u4 ^ u4 +u4 & u4 + +u4 << i4 +u4 >> i4 +u4 | i4 +u4 ^ i4 +u4 & i4 + +u4 << i +u4 >> i +u4 | i +u4 ^ i +u4 & i + +u8 << b_ +u8 >> b_ +u8 | b_ +u8 ^ b_ +u8 & b_ + +u8 << b +u8 >> b +u8 | b +u8 ^ b +u8 & b + +b_ << b_ +b_ >> b_ +b_ | b_ +b_ ^ b_ +b_ & b_ + +b_ << AR +b_ >> AR +b_ | AR +b_ ^ AR +b_ & AR + +b_ << b +b_ >> b +b_ | b +b_ ^ b +b_ & b + +b_ << i +b_ >> i +b_ | i +b_ ^ i +b_ & i + +~i8 +~i4 +~u8 +~u4 +~b_ +~AR diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/comparisons.py b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/comparisons.py new file mode 100644 index 0000000..a461d8b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/comparisons.py @@ -0,0 +1,315 @@ +from __future__ import annotations + +from typing import cast, Any +import numpy as np + +c16 = np.complex128() +f8 = np.float64() +i8 = np.int64() +u8 = np.uint64() + +c8 = np.complex64() +f4 = np.float32() +i4 = np.int32() +u4 = np.uint32() + +dt = np.datetime64(0, "D") +td = np.timedelta64(0, "D") + +b_ = np.bool() + +b = bool() +c = complex() +f = float() +i = int() + +SEQ = (0, 1, 2, 3, 4) + +AR_b: np.ndarray[Any, np.dtype[np.bool]] = np.array([True]) +AR_u: np.ndarray[Any, np.dtype[np.uint32]] = np.array([1], dtype=np.uint32) +AR_i: np.ndarray[Any, np.dtype[np.int_]] = np.array([1]) +AR_f: np.ndarray[Any, np.dtype[np.float64]] = np.array([1.0]) +AR_c: np.ndarray[Any, np.dtype[np.complex128]] = np.array([1.0j]) +AR_S: np.ndarray[Any, np.dtype[np.bytes_]] = np.array([b"a"], "S") +AR_T = cast(np.ndarray[Any, np.dtypes.StringDType], np.array(["a"], "T")) +AR_U: np.ndarray[Any, np.dtype[np.str_]] = np.array(["a"], "U") +AR_m: np.ndarray[Any, np.dtype[np.timedelta64]] = np.array([np.timedelta64("1")]) +AR_M: np.ndarray[Any, np.dtype[np.datetime64]] = np.array([np.datetime64("1")]) +AR_O: np.ndarray[Any, np.dtype[np.object_]] = np.array([1], dtype=object) + +# Arrays + +AR_b > AR_b +AR_b > AR_u +AR_b > AR_i +AR_b > AR_f +AR_b > AR_c + +AR_u > AR_b +AR_u > AR_u +AR_u > AR_i +AR_u > AR_f +AR_u > AR_c + +AR_i > AR_b +AR_i > AR_u +AR_i > AR_i +AR_i > AR_f +AR_i > AR_c + +AR_f > AR_b +AR_f > AR_u +AR_f > AR_i +AR_f > AR_f +AR_f > AR_c + +AR_c > AR_b +AR_c > AR_u +AR_c > AR_i +AR_c > AR_f +AR_c > AR_c + +AR_S > AR_S +AR_S > b"" + +AR_T > AR_T +AR_T > AR_U +AR_T > "" + +AR_U > AR_U +AR_U > AR_T +AR_U > "" + +AR_m > AR_b +AR_m > AR_u +AR_m > AR_i +AR_b > AR_m +AR_u > AR_m +AR_i > AR_m + +AR_M > AR_M + +AR_O > AR_O +1 > AR_O +AR_O > 1 + +# Time structures + +dt > dt + +td > td +td > i +td > i4 +td > i8 +td > AR_i +td > SEQ + +# boolean + +b_ > b +b_ > b_ +b_ > i +b_ > i8 +b_ > i4 +b_ > u8 +b_ > u4 +b_ > f +b_ > f8 +b_ > f4 +b_ > c +b_ > c16 +b_ > c8 +b_ > AR_i +b_ > SEQ + +# Complex + +c16 > c16 +c16 > f8 +c16 > i8 +c16 > c8 +c16 > f4 +c16 > i4 +c16 > b_ +c16 > b +c16 > c +c16 > f +c16 > i +c16 > AR_i +c16 > SEQ + +c16 > c16 +f8 > c16 +i8 > c16 +c8 > c16 +f4 > c16 +i4 > c16 +b_ > c16 +b > c16 +c > c16 +f > c16 +i > c16 +AR_i > c16 +SEQ > c16 + +c8 > c16 +c8 > f8 +c8 > i8 +c8 > c8 +c8 > f4 +c8 > i4 +c8 > b_ +c8 > b +c8 > c +c8 > f +c8 > i +c8 > AR_i +c8 > SEQ + +c16 > c8 +f8 > c8 +i8 > c8 +c8 > c8 +f4 > c8 +i4 > c8 +b_ > c8 +b > c8 +c > c8 +f > c8 +i > c8 +AR_i > c8 +SEQ > c8 + +# Float + +f8 > f8 +f8 > i8 +f8 > f4 +f8 > i4 +f8 > b_ +f8 > b +f8 > c +f8 > f +f8 > i +f8 > AR_i +f8 > SEQ + +f8 > f8 +i8 > f8 +f4 > f8 +i4 > f8 +b_ > f8 +b > f8 +c > f8 +f > f8 +i > f8 +AR_i > f8 +SEQ > f8 + +f4 > f8 +f4 > i8 +f4 > f4 +f4 > i4 +f4 > b_ +f4 > b +f4 > c +f4 > f +f4 > i +f4 > AR_i +f4 > SEQ + +f8 > f4 +i8 > f4 +f4 > f4 +i4 > f4 +b_ > f4 +b > f4 +c > f4 +f > f4 +i > f4 +AR_i > f4 +SEQ > f4 + +# Int + +i8 > i8 +i8 > u8 +i8 > i4 +i8 > u4 +i8 > b_ +i8 > b +i8 > c +i8 > f +i8 > i +i8 > AR_i +i8 > SEQ + +u8 > u8 +u8 > i4 +u8 > u4 +u8 > b_ +u8 > b +u8 > c +u8 > f +u8 > i +u8 > AR_i +u8 > SEQ + +i8 > i8 +u8 > i8 +i4 > i8 +u4 > i8 +b_ > i8 +b > i8 +c > i8 +f > i8 +i > i8 +AR_i > i8 +SEQ > i8 + +u8 > u8 +i4 > u8 +u4 > u8 +b_ > u8 +b > u8 +c > u8 +f > u8 +i > u8 +AR_i > u8 +SEQ > u8 + +i4 > i8 +i4 > i4 +i4 > i +i4 > b_ +i4 > b +i4 > AR_i +i4 > SEQ + +u4 > i8 +u4 > i4 +u4 > u8 +u4 > u4 +u4 > i +u4 > b_ +u4 > b +u4 > AR_i +u4 > SEQ + +i8 > i4 +i4 > i4 +i > i4 +b_ > i4 +b > i4 +AR_i > i4 +SEQ > i4 + +i8 > u4 +i4 > u4 +u8 > u4 +u4 > u4 +b_ > u4 +b > u4 +i > u4 +AR_i > u4 +SEQ > u4 diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/dtype.py b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/dtype.py new file mode 100644 index 0000000..9f11518 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/dtype.py @@ -0,0 +1,57 @@ +import numpy as np + +dtype_obj = np.dtype(np.str_) +void_dtype_obj = np.dtype([("f0", np.float64), ("f1", np.float32)]) + +np.dtype(dtype=np.int64) +np.dtype(int) +np.dtype("int") +np.dtype(None) + +np.dtype((int, 2)) +np.dtype((int, (1,))) + +np.dtype({"names": ["a", "b"], "formats": [int, float]}) +np.dtype({"names": ["a"], "formats": [int], "titles": [object]}) +np.dtype({"names": ["a"], "formats": [int], "titles": [object()]}) + +np.dtype([("name", np.str_, 16), ("grades", np.float64, (2,)), ("age", "int32")]) + +np.dtype( + { + "names": ["a", "b"], + "formats": [int, float], + "itemsize": 9, + "aligned": False, + "titles": ["x", "y"], + "offsets": [0, 1], + } +) + +np.dtype((np.float64, float)) + + +class Test: + dtype = np.dtype(float) + + +np.dtype(Test()) + +# Methods and attributes +dtype_obj.base +dtype_obj.subdtype +dtype_obj.newbyteorder() +dtype_obj.type +dtype_obj.name +dtype_obj.names + +dtype_obj * 0 +dtype_obj * 2 + +0 * dtype_obj +2 * dtype_obj + +void_dtype_obj["f0"] +void_dtype_obj[0] +void_dtype_obj[["f0", "f1"]] +void_dtype_obj[["f0"]] diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/einsumfunc.py b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/einsumfunc.py new file mode 100644 index 0000000..429764e --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/einsumfunc.py @@ -0,0 +1,36 @@ +from __future__ import annotations + +from typing import Any + +import numpy as np + +AR_LIKE_b = [True, True, True] +AR_LIKE_u = [np.uint32(1), np.uint32(2), np.uint32(3)] +AR_LIKE_i = [1, 2, 3] +AR_LIKE_f = [1.0, 2.0, 3.0] +AR_LIKE_c = [1j, 2j, 3j] +AR_LIKE_U = ["1", "2", "3"] + +OUT_f: np.ndarray[Any, np.dtype[np.float64]] = np.empty(3, dtype=np.float64) +OUT_c: np.ndarray[Any, np.dtype[np.complex128]] = np.empty(3, dtype=np.complex128) + +np.einsum("i,i->i", AR_LIKE_b, AR_LIKE_b) +np.einsum("i,i->i", AR_LIKE_u, AR_LIKE_u) +np.einsum("i,i->i", AR_LIKE_i, AR_LIKE_i) +np.einsum("i,i->i", AR_LIKE_f, AR_LIKE_f) +np.einsum("i,i->i", AR_LIKE_c, AR_LIKE_c) +np.einsum("i,i->i", AR_LIKE_b, AR_LIKE_i) +np.einsum("i,i,i,i->i", AR_LIKE_b, AR_LIKE_u, AR_LIKE_i, AR_LIKE_c) + +np.einsum("i,i->i", AR_LIKE_f, AR_LIKE_f, dtype="c16") +np.einsum("i,i->i", AR_LIKE_U, AR_LIKE_U, dtype=bool, casting="unsafe") +np.einsum("i,i->i", AR_LIKE_f, AR_LIKE_f, out=OUT_c) +np.einsum("i,i->i", AR_LIKE_U, AR_LIKE_U, dtype=int, casting="unsafe", out=OUT_f) + +np.einsum_path("i,i->i", AR_LIKE_b, AR_LIKE_b) +np.einsum_path("i,i->i", AR_LIKE_u, AR_LIKE_u) +np.einsum_path("i,i->i", AR_LIKE_i, AR_LIKE_i) +np.einsum_path("i,i->i", AR_LIKE_f, AR_LIKE_f) +np.einsum_path("i,i->i", AR_LIKE_c, AR_LIKE_c) +np.einsum_path("i,i->i", AR_LIKE_b, AR_LIKE_i) +np.einsum_path("i,i,i,i->i", AR_LIKE_b, AR_LIKE_u, AR_LIKE_i, AR_LIKE_c) diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/flatiter.py b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/flatiter.py new file mode 100644 index 0000000..e64e426 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/flatiter.py @@ -0,0 +1,19 @@ +import numpy as np + +a = np.empty((2, 2)).flat + +a.base +a.copy() +a.coords +a.index +iter(a) +next(a) +a[0] +a[[0, 1, 2]] +a[...] +a[:] +a.__array__() +a.__array__(np.dtype(np.float64)) + +b = np.array([1]).flat +a[b] diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/fromnumeric.py b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/fromnumeric.py new file mode 100644 index 0000000..7cc2bcf --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/fromnumeric.py @@ -0,0 +1,272 @@ +"""Tests for :mod:`numpy._core.fromnumeric`.""" + +import numpy as np + +A = np.array(True, ndmin=2, dtype=bool) +B = np.array(1.0, ndmin=2, dtype=np.float32) +A.setflags(write=False) +B.setflags(write=False) + +a = np.bool(True) +b = np.float32(1.0) +c = 1.0 +d = np.array(1.0, dtype=np.float32) # writeable + +np.take(a, 0) +np.take(b, 0) +np.take(c, 0) +np.take(A, 0) +np.take(B, 0) +np.take(A, [0]) +np.take(B, [0]) + +np.reshape(a, 1) +np.reshape(b, 1) +np.reshape(c, 1) +np.reshape(A, 1) +np.reshape(B, 1) + +np.choose(a, [True, True]) +np.choose(A, [1.0, 1.0]) + +np.repeat(a, 1) +np.repeat(b, 1) +np.repeat(c, 1) +np.repeat(A, 1) +np.repeat(B, 1) + +np.swapaxes(A, 0, 0) +np.swapaxes(B, 0, 0) + +np.transpose(a) +np.transpose(b) +np.transpose(c) +np.transpose(A) +np.transpose(B) + +np.partition(a, 0, axis=None) +np.partition(b, 0, axis=None) +np.partition(c, 0, axis=None) +np.partition(A, 0) +np.partition(B, 0) + +np.argpartition(a, 0) +np.argpartition(b, 0) +np.argpartition(c, 0) +np.argpartition(A, 0) +np.argpartition(B, 0) + +np.sort(A, 0) +np.sort(B, 0) + +np.argsort(A, 0) +np.argsort(B, 0) + +np.argmax(A) +np.argmax(B) +np.argmax(A, axis=0) +np.argmax(B, axis=0) + +np.argmin(A) +np.argmin(B) +np.argmin(A, axis=0) +np.argmin(B, axis=0) + +np.searchsorted(A[0], 0) +np.searchsorted(B[0], 0) +np.searchsorted(A[0], [0]) +np.searchsorted(B[0], [0]) + +np.resize(a, (5, 5)) +np.resize(b, (5, 5)) +np.resize(c, (5, 5)) +np.resize(A, (5, 5)) +np.resize(B, (5, 5)) + +np.squeeze(a) +np.squeeze(b) +np.squeeze(c) +np.squeeze(A) +np.squeeze(B) + +np.diagonal(A) +np.diagonal(B) + +np.trace(A) +np.trace(B) + +np.ravel(a) +np.ravel(b) +np.ravel(c) +np.ravel(A) +np.ravel(B) + +np.nonzero(A) +np.nonzero(B) + +np.shape(a) +np.shape(b) +np.shape(c) +np.shape(A) +np.shape(B) + +np.compress([True], a) +np.compress([True], b) +np.compress([True], c) +np.compress([True], A) +np.compress([True], B) + +np.clip(a, 0, 1.0) +np.clip(b, -1, 1) +np.clip(a, 0, None) +np.clip(b, None, 1) +np.clip(c, 0, 1) +np.clip(A, 0, 1) +np.clip(B, 0, 1) +np.clip(B, [0, 1], [1, 2]) + +np.sum(a) +np.sum(b) +np.sum(c) +np.sum(A) +np.sum(B) +np.sum(A, axis=0) +np.sum(B, axis=0) + +np.all(a) +np.all(b) +np.all(c) +np.all(A) +np.all(B) +np.all(A, axis=0) +np.all(B, axis=0) +np.all(A, keepdims=True) +np.all(B, keepdims=True) + +np.any(a) +np.any(b) +np.any(c) +np.any(A) +np.any(B) +np.any(A, axis=0) +np.any(B, axis=0) +np.any(A, keepdims=True) +np.any(B, keepdims=True) + +np.cumsum(a) +np.cumsum(b) +np.cumsum(c) +np.cumsum(A) +np.cumsum(B) + +np.cumulative_sum(a) +np.cumulative_sum(b) +np.cumulative_sum(c) +np.cumulative_sum(A, axis=0) +np.cumulative_sum(B, axis=0) + +np.ptp(b) +np.ptp(c) +np.ptp(B) +np.ptp(B, axis=0) +np.ptp(B, keepdims=True) + +np.amax(a) +np.amax(b) +np.amax(c) +np.amax(A) +np.amax(B) +np.amax(A, axis=0) +np.amax(B, axis=0) +np.amax(A, keepdims=True) +np.amax(B, keepdims=True) + +np.amin(a) +np.amin(b) +np.amin(c) +np.amin(A) +np.amin(B) +np.amin(A, axis=0) +np.amin(B, axis=0) +np.amin(A, keepdims=True) +np.amin(B, keepdims=True) + +np.prod(a) +np.prod(b) +np.prod(c) +np.prod(A) +np.prod(B) +np.prod(a, dtype=None) +np.prod(A, dtype=None) +np.prod(A, axis=0) +np.prod(B, axis=0) +np.prod(A, keepdims=True) +np.prod(B, keepdims=True) +np.prod(b, out=d) +np.prod(B, out=d) + +np.cumprod(a) +np.cumprod(b) +np.cumprod(c) +np.cumprod(A) +np.cumprod(B) + +np.cumulative_prod(a) +np.cumulative_prod(b) +np.cumulative_prod(c) +np.cumulative_prod(A, axis=0) +np.cumulative_prod(B, axis=0) + +np.ndim(a) +np.ndim(b) +np.ndim(c) +np.ndim(A) +np.ndim(B) + +np.size(a) +np.size(b) +np.size(c) +np.size(A) +np.size(B) + +np.around(a) +np.around(b) +np.around(c) +np.around(A) +np.around(B) + +np.mean(a) +np.mean(b) +np.mean(c) +np.mean(A) +np.mean(B) +np.mean(A, axis=0) +np.mean(B, axis=0) +np.mean(A, keepdims=True) +np.mean(B, keepdims=True) +np.mean(b, out=d) +np.mean(B, out=d) + +np.std(a) +np.std(b) +np.std(c) +np.std(A) +np.std(B) +np.std(A, axis=0) +np.std(B, axis=0) +np.std(A, keepdims=True) +np.std(B, keepdims=True) +np.std(b, out=d) +np.std(B, out=d) + +np.var(a) +np.var(b) +np.var(c) +np.var(A) +np.var(B) +np.var(A, axis=0) +np.var(B, axis=0) +np.var(A, keepdims=True) +np.var(B, keepdims=True) +np.var(b, out=d) +np.var(B, out=d) diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/index_tricks.py b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/index_tricks.py new file mode 100644 index 0000000..dfc4ff2 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/index_tricks.py @@ -0,0 +1,60 @@ +from __future__ import annotations +from typing import Any +import numpy as np + +AR_LIKE_b = [[True, True], [True, True]] +AR_LIKE_i = [[1, 2], [3, 4]] +AR_LIKE_f = [[1.0, 2.0], [3.0, 4.0]] +AR_LIKE_U = [["1", "2"], ["3", "4"]] + +AR_i8: np.ndarray[Any, np.dtype[np.int64]] = np.array(AR_LIKE_i, dtype=np.int64) + +np.ndenumerate(AR_i8) +np.ndenumerate(AR_LIKE_f) +np.ndenumerate(AR_LIKE_U) + +next(np.ndenumerate(AR_i8)) +next(np.ndenumerate(AR_LIKE_f)) +next(np.ndenumerate(AR_LIKE_U)) + +iter(np.ndenumerate(AR_i8)) +iter(np.ndenumerate(AR_LIKE_f)) +iter(np.ndenumerate(AR_LIKE_U)) + +iter(np.ndindex(1, 2, 3)) +next(np.ndindex(1, 2, 3)) + +np.unravel_index([22, 41, 37], (7, 6)) +np.unravel_index([31, 41, 13], (7, 6), order='F') +np.unravel_index(1621, (6, 7, 8, 9)) + +np.ravel_multi_index(AR_LIKE_i, (7, 6)) +np.ravel_multi_index(AR_LIKE_i, (7, 6), order='F') +np.ravel_multi_index(AR_LIKE_i, (4, 6), mode='clip') +np.ravel_multi_index(AR_LIKE_i, (4, 4), mode=('clip', 'wrap')) +np.ravel_multi_index((3, 1, 4, 1), (6, 7, 8, 9)) + +np.mgrid[1:1:2] +np.mgrid[1:1:2, None:10] + +np.ogrid[1:1:2] +np.ogrid[1:1:2, None:10] + +np.index_exp[0:1] +np.index_exp[0:1, None:3] +np.index_exp[0, 0:1, ..., [0, 1, 3]] + +np.s_[0:1] +np.s_[0:1, None:3] +np.s_[0, 0:1, ..., [0, 1, 3]] + +np.ix_(AR_LIKE_b[0]) +np.ix_(AR_LIKE_i[0], AR_LIKE_f[0]) +np.ix_(AR_i8[0]) + +np.fill_diagonal(AR_i8, 5) + +np.diag_indices(4) +np.diag_indices(2, 3) + +np.diag_indices_from(AR_i8) diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/lib_user_array.py b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/lib_user_array.py new file mode 100644 index 0000000..62b7e85 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/lib_user_array.py @@ -0,0 +1,22 @@ +"""Based on the `if __name__ == "__main__"` test code in `lib/_user_array_impl.py`.""" + +from __future__ import annotations + +import numpy as np +from numpy.lib.user_array import container + +N = 10_000 +W = H = int(N**0.5) + +a: np.ndarray[tuple[int, int], np.dtype[np.int32]] +ua: container[tuple[int, int], np.dtype[np.int32]] + +a = np.arange(N, dtype=np.int32).reshape(W, H) +ua = container(a) + +ua_small: container[tuple[int, int], np.dtype[np.int32]] = ua[:3, :5] +ua_small[0, 0] = 10 + +ua_bool: container[tuple[int, int], np.dtype[np.bool]] = ua_small > 1 + +# shape: tuple[int, int] = np.shape(ua) diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/lib_utils.py b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/lib_utils.py new file mode 100644 index 0000000..f9b3381 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/lib_utils.py @@ -0,0 +1,19 @@ +from __future__ import annotations + +from io import StringIO + +import numpy as np +import numpy.lib.array_utils as array_utils + +FILE = StringIO() +AR = np.arange(10, dtype=np.float64) + + +def func(a: int) -> bool: + return True + + +array_utils.byte_bounds(AR) +array_utils.byte_bounds(np.float64()) + +np.info(1, output=FILE) diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/lib_version.py b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/lib_version.py new file mode 100644 index 0000000..f3825ec --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/lib_version.py @@ -0,0 +1,18 @@ +from numpy.lib import NumpyVersion + +version = NumpyVersion("1.8.0") + +version.vstring +version.version +version.major +version.minor +version.bugfix +version.pre_release +version.is_devversion + +version == version +version != version +version < "1.8.0" +version <= version +version > version +version >= "1.8.0" diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/literal.py b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/literal.py new file mode 100644 index 0000000..c8fa476 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/literal.py @@ -0,0 +1,51 @@ +from __future__ import annotations + +from typing import Any, TYPE_CHECKING +from functools import partial + +import pytest +import numpy as np + +if TYPE_CHECKING: + from collections.abc import Callable + +AR = np.array(0) +AR.setflags(write=False) + +KACF = frozenset({None, "K", "A", "C", "F"}) +ACF = frozenset({None, "A", "C", "F"}) +CF = frozenset({None, "C", "F"}) + +order_list: list[tuple[frozenset[str | None], Callable[..., Any]]] = [ + (KACF, AR.tobytes), + (KACF, partial(AR.astype, int)), + (KACF, AR.copy), + (ACF, partial(AR.reshape, 1)), + (KACF, AR.flatten), + (KACF, AR.ravel), + (KACF, partial(np.array, 1)), + # NOTE: __call__ is needed due to mypy bugs (#17620, #17631) + (KACF, partial(np.ndarray.__call__, 1)), + (CF, partial(np.zeros.__call__, 1)), + (CF, partial(np.ones.__call__, 1)), + (CF, partial(np.empty.__call__, 1)), + (CF, partial(np.full, 1, 1)), + (KACF, partial(np.zeros_like, AR)), + (KACF, partial(np.ones_like, AR)), + (KACF, partial(np.empty_like, AR)), + (KACF, partial(np.full_like, AR, 1)), + (KACF, partial(np.add.__call__, 1, 1)), # i.e. np.ufunc.__call__ + (ACF, partial(np.reshape, AR, 1)), + (KACF, partial(np.ravel, AR)), + (KACF, partial(np.asarray, 1)), + (KACF, partial(np.asanyarray, 1)), +] + +for order_set, func in order_list: + for order in order_set: + func(order=order) + + invalid_orders = KACF - order_set + for order in invalid_orders: + with pytest.raises(ValueError): + func(order=order) diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/ma.py b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/ma.py new file mode 100644 index 0000000..e7915a5 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/ma.py @@ -0,0 +1,174 @@ +from typing import Any, TypeAlias, TypeVar, cast + +import numpy as np +import numpy.typing as npt +from numpy._typing import _Shape + +_ScalarT = TypeVar("_ScalarT", bound=np.generic) +MaskedArray: TypeAlias = np.ma.MaskedArray[_Shape, np.dtype[_ScalarT]] + +MAR_b: MaskedArray[np.bool] = np.ma.MaskedArray([True]) +MAR_u: MaskedArray[np.uint32] = np.ma.MaskedArray([1], dtype=np.uint32) +MAR_i: MaskedArray[np.int64] = np.ma.MaskedArray([1]) +MAR_f: MaskedArray[np.float64] = np.ma.MaskedArray([1.0]) +MAR_c: MaskedArray[np.complex128] = np.ma.MaskedArray([1j]) +MAR_td64: MaskedArray[np.timedelta64] = np.ma.MaskedArray([np.timedelta64(1, "D")]) +MAR_M_dt64: MaskedArray[np.datetime64] = np.ma.MaskedArray([np.datetime64(1, "D")]) +MAR_S: MaskedArray[np.bytes_] = np.ma.MaskedArray([b'foo'], dtype=np.bytes_) +MAR_U: MaskedArray[np.str_] = np.ma.MaskedArray(['foo'], dtype=np.str_) +MAR_T = cast(np.ma.MaskedArray[Any, np.dtypes.StringDType], + np.ma.MaskedArray(["a"], dtype="T")) + +AR_b: npt.NDArray[np.bool] = np.array([True, False, True]) + +AR_LIKE_b = [True] +AR_LIKE_u = [np.uint32(1)] +AR_LIKE_i = [1] +AR_LIKE_f = [1.0] +AR_LIKE_c = [1j] +AR_LIKE_m = [np.timedelta64(1, "D")] +AR_LIKE_M = [np.datetime64(1, "D")] + +MAR_f.mask = AR_b +MAR_f.mask = np.False_ + +# Inplace addition + +MAR_b += AR_LIKE_b + +MAR_u += AR_LIKE_b +MAR_u += AR_LIKE_u + +MAR_i += AR_LIKE_b +MAR_i += 2 +MAR_i += AR_LIKE_i + +MAR_f += AR_LIKE_b +MAR_f += 2 +MAR_f += AR_LIKE_u +MAR_f += AR_LIKE_i +MAR_f += AR_LIKE_f + +MAR_c += AR_LIKE_b +MAR_c += AR_LIKE_u +MAR_c += AR_LIKE_i +MAR_c += AR_LIKE_f +MAR_c += AR_LIKE_c + +MAR_td64 += AR_LIKE_b +MAR_td64 += AR_LIKE_u +MAR_td64 += AR_LIKE_i +MAR_td64 += AR_LIKE_m +MAR_M_dt64 += AR_LIKE_b +MAR_M_dt64 += AR_LIKE_u +MAR_M_dt64 += AR_LIKE_i +MAR_M_dt64 += AR_LIKE_m + +MAR_S += b'snakes' +MAR_U += 'snakes' +MAR_T += 'snakes' + +# Inplace subtraction + +MAR_u -= AR_LIKE_b +MAR_u -= AR_LIKE_u + +MAR_i -= AR_LIKE_b +MAR_i -= AR_LIKE_i + +MAR_f -= AR_LIKE_b +MAR_f -= AR_LIKE_u +MAR_f -= AR_LIKE_i +MAR_f -= AR_LIKE_f + +MAR_c -= AR_LIKE_b +MAR_c -= AR_LIKE_u +MAR_c -= AR_LIKE_i +MAR_c -= AR_LIKE_f +MAR_c -= AR_LIKE_c + +MAR_td64 -= AR_LIKE_b +MAR_td64 -= AR_LIKE_u +MAR_td64 -= AR_LIKE_i +MAR_td64 -= AR_LIKE_m +MAR_M_dt64 -= AR_LIKE_b +MAR_M_dt64 -= AR_LIKE_u +MAR_M_dt64 -= AR_LIKE_i +MAR_M_dt64 -= AR_LIKE_m + +# Inplace floor division + +MAR_f //= AR_LIKE_b +MAR_f //= 2 +MAR_f //= AR_LIKE_u +MAR_f //= AR_LIKE_i +MAR_f //= AR_LIKE_f + +MAR_td64 //= AR_LIKE_i + +# Inplace true division + +MAR_f /= AR_LIKE_b +MAR_f /= 2 +MAR_f /= AR_LIKE_u +MAR_f /= AR_LIKE_i +MAR_f /= AR_LIKE_f + +MAR_c /= AR_LIKE_b +MAR_c /= AR_LIKE_u +MAR_c /= AR_LIKE_i +MAR_c /= AR_LIKE_f +MAR_c /= AR_LIKE_c + +MAR_td64 /= AR_LIKE_i + +# Inplace multiplication + +MAR_b *= AR_LIKE_b + +MAR_u *= AR_LIKE_b +MAR_u *= AR_LIKE_u + +MAR_i *= AR_LIKE_b +MAR_i *= 2 +MAR_i *= AR_LIKE_i + +MAR_f *= AR_LIKE_b +MAR_f *= 2 +MAR_f *= AR_LIKE_u +MAR_f *= AR_LIKE_i +MAR_f *= AR_LIKE_f + +MAR_c *= AR_LIKE_b +MAR_c *= AR_LIKE_u +MAR_c *= AR_LIKE_i +MAR_c *= AR_LIKE_f +MAR_c *= AR_LIKE_c + +MAR_td64 *= AR_LIKE_b +MAR_td64 *= AR_LIKE_u +MAR_td64 *= AR_LIKE_i +MAR_td64 *= AR_LIKE_f + +MAR_S *= 2 +MAR_U *= 2 +MAR_T *= 2 + +# Inplace power + +MAR_u **= AR_LIKE_b +MAR_u **= AR_LIKE_u + +MAR_i **= AR_LIKE_b +MAR_i **= AR_LIKE_i + +MAR_f **= AR_LIKE_b +MAR_f **= AR_LIKE_u +MAR_f **= AR_LIKE_i +MAR_f **= AR_LIKE_f + +MAR_c **= AR_LIKE_b +MAR_c **= AR_LIKE_u +MAR_c **= AR_LIKE_i +MAR_c **= AR_LIKE_f +MAR_c **= AR_LIKE_c diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/mod.py b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/mod.py new file mode 100644 index 0000000..2b7e6cd --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/mod.py @@ -0,0 +1,149 @@ +import numpy as np + +f8 = np.float64(1) +i8 = np.int64(1) +u8 = np.uint64(1) + +f4 = np.float32(1) +i4 = np.int32(1) +u4 = np.uint32(1) + +td = np.timedelta64(1, "D") +b_ = np.bool(1) + +b = bool(1) +f = float(1) +i = int(1) + +AR = np.array([1], dtype=np.bool) +AR.setflags(write=False) + +AR2 = np.array([1], dtype=np.timedelta64) +AR2.setflags(write=False) + +# Time structures + +td % td +td % AR2 +AR2 % td + +divmod(td, td) +divmod(td, AR2) +divmod(AR2, td) + +# Bool + +b_ % b +b_ % i +b_ % f +b_ % b_ +b_ % i8 +b_ % u8 +b_ % f8 +b_ % AR + +divmod(b_, b) +divmod(b_, i) +divmod(b_, f) +divmod(b_, b_) +divmod(b_, i8) +divmod(b_, u8) +divmod(b_, f8) +divmod(b_, AR) + +b % b_ +i % b_ +f % b_ +b_ % b_ +i8 % b_ +u8 % b_ +f8 % b_ +AR % b_ + +divmod(b, b_) +divmod(i, b_) +divmod(f, b_) +divmod(b_, b_) +divmod(i8, b_) +divmod(u8, b_) +divmod(f8, b_) +divmod(AR, b_) + +# int + +i8 % b +i8 % i +i8 % f +i8 % i8 +i8 % f8 +i4 % i8 +i4 % f8 +i4 % i4 +i4 % f4 +i8 % AR + +divmod(i8, b) +divmod(i8, i) +divmod(i8, f) +divmod(i8, i8) +divmod(i8, f8) +divmod(i8, i4) +divmod(i8, f4) +divmod(i4, i4) +divmod(i4, f4) +divmod(i8, AR) + +b % i8 +i % i8 +f % i8 +i8 % i8 +f8 % i8 +i8 % i4 +f8 % i4 +i4 % i4 +f4 % i4 +AR % i8 + +divmod(b, i8) +divmod(i, i8) +divmod(f, i8) +divmod(i8, i8) +divmod(f8, i8) +divmod(i4, i8) +divmod(f4, i8) +divmod(i4, i4) +divmod(f4, i4) +divmod(AR, i8) + +# float + +f8 % b +f8 % i +f8 % f +i8 % f4 +f4 % f4 +f8 % AR + +divmod(f8, b) +divmod(f8, i) +divmod(f8, f) +divmod(f8, f8) +divmod(f8, f4) +divmod(f4, f4) +divmod(f8, AR) + +b % f8 +i % f8 +f % f8 +f8 % f8 +f8 % f8 +f4 % f4 +AR % f8 + +divmod(b, f8) +divmod(i, f8) +divmod(f, f8) +divmod(f8, f8) +divmod(f4, f8) +divmod(f4, f4) +divmod(AR, f8) diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/modules.py b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/modules.py new file mode 100644 index 0000000..0c2fd4b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/modules.py @@ -0,0 +1,45 @@ +import numpy as np +from numpy import f2py + +np.char +np.ctypeslib +np.emath +np.fft +np.lib +np.linalg +np.ma +np.matrixlib +np.polynomial +np.random +np.rec +np.strings +np.testing +np.version + +np.lib.format +np.lib.mixins +np.lib.scimath +np.lib.stride_tricks +np.lib.array_utils +np.ma.extras +np.polynomial.chebyshev +np.polynomial.hermite +np.polynomial.hermite_e +np.polynomial.laguerre +np.polynomial.legendre +np.polynomial.polynomial + +np.__path__ +np.__version__ + +np.__all__ +np.char.__all__ +np.ctypeslib.__all__ +np.emath.__all__ +np.lib.__all__ +np.ma.__all__ +np.random.__all__ +np.rec.__all__ +np.strings.__all__ +np.testing.__all__ +f2py.__all__ diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/multiarray.py b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/multiarray.py new file mode 100644 index 0000000..26cedfd --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/multiarray.py @@ -0,0 +1,76 @@ +import numpy as np +import numpy.typing as npt + +AR_f8: npt.NDArray[np.float64] = np.array([1.0]) +AR_i4 = np.array([1], dtype=np.int32) +AR_u1 = np.array([1], dtype=np.uint8) + +AR_LIKE_f = [1.5] +AR_LIKE_i = [1] + +b_f8 = np.broadcast(AR_f8) +b_i4_f8_f8 = np.broadcast(AR_i4, AR_f8, AR_f8) + +next(b_f8) +b_f8.reset() +b_f8.index +b_f8.iters +b_f8.nd +b_f8.ndim +b_f8.numiter +b_f8.shape +b_f8.size + +next(b_i4_f8_f8) +b_i4_f8_f8.reset() +b_i4_f8_f8.ndim +b_i4_f8_f8.index +b_i4_f8_f8.iters +b_i4_f8_f8.nd +b_i4_f8_f8.numiter +b_i4_f8_f8.shape +b_i4_f8_f8.size + +np.inner(AR_f8, AR_i4) + +np.where([True, True, False]) +np.where([True, True, False], 1, 0) + +np.lexsort([0, 1, 2]) + +np.can_cast(np.dtype("i8"), int) +np.can_cast(AR_f8, "f8") +np.can_cast(AR_f8, np.complex128, casting="unsafe") + +np.min_scalar_type([1]) +np.min_scalar_type(AR_f8) + +np.result_type(int, AR_i4) +np.result_type(AR_f8, AR_u1) +np.result_type(AR_f8, np.complex128) + +np.dot(AR_LIKE_f, AR_i4) +np.dot(AR_u1, 1) +np.dot(1.5j, 1) +np.dot(AR_u1, 1, out=AR_f8) + +np.vdot(AR_LIKE_f, AR_i4) +np.vdot(AR_u1, 1) +np.vdot(1.5j, 1) + +np.bincount(AR_i4) + +np.copyto(AR_f8, [1.6]) + +np.putmask(AR_f8, [True], 1.5) + +np.packbits(AR_i4) +np.packbits(AR_u1) + +np.unpackbits(AR_u1) + +np.shares_memory(1, 2) +np.shares_memory(AR_f8, AR_f8, max_work=1) + +np.may_share_memory(1, 2) +np.may_share_memory(AR_f8, AR_f8, max_work=1) diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/ndarray_conversion.py b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/ndarray_conversion.py new file mode 100644 index 0000000..76da1da --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/ndarray_conversion.py @@ -0,0 +1,87 @@ +import os +import tempfile + +import numpy as np + +nd = np.array([[1, 2], [3, 4]]) +scalar_array = np.array(1) + +# item +scalar_array.item() +nd.item(1) +nd.item(0, 1) +nd.item((0, 1)) + +# tobytes +nd.tobytes() +nd.tobytes("C") +nd.tobytes(None) + +# tofile +if os.name != "nt": + with tempfile.NamedTemporaryFile(suffix=".txt") as tmp: + nd.tofile(tmp.name) + nd.tofile(tmp.name, "") + nd.tofile(tmp.name, sep="") + + nd.tofile(tmp.name, "", "%s") + nd.tofile(tmp.name, format="%s") + + nd.tofile(tmp) + +# dump is pretty simple +# dumps is pretty simple + +# astype +nd.astype("float") +nd.astype(float) + +nd.astype(float, "K") +nd.astype(float, order="K") + +nd.astype(float, "K", "unsafe") +nd.astype(float, casting="unsafe") + +nd.astype(float, "K", "unsafe", True) +nd.astype(float, subok=True) + +nd.astype(float, "K", "unsafe", True, True) +nd.astype(float, copy=True) + +# byteswap +nd.byteswap() +nd.byteswap(True) + +# copy +nd.copy() +nd.copy("C") + +# view +nd.view() +nd.view(np.int64) +nd.view(dtype=np.int64) +nd.view(np.int64, np.matrix) +nd.view(type=np.matrix) + +# getfield +complex_array = np.array([[1 + 1j, 0], [0, 1 - 1j]], dtype=np.complex128) + +complex_array.getfield("float") +complex_array.getfield(float) + +complex_array.getfield("float", 8) +complex_array.getfield(float, offset=8) + +# setflags +nd.setflags() + +nd.setflags(True) +nd.setflags(write=True) + +nd.setflags(True, True) +nd.setflags(write=True, align=True) + +nd.setflags(True, True, False) +nd.setflags(write=True, align=True, uic=False) + +# fill is pretty simple diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/ndarray_misc.py b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/ndarray_misc.py new file mode 100644 index 0000000..bb290cd --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/ndarray_misc.py @@ -0,0 +1,198 @@ +""" +Tests for miscellaneous (non-magic) ``np.ndarray``/``np.generic`` methods. + +More extensive tests are performed for the methods' +function-based counterpart in `../from_numeric.py`. + +""" + +from __future__ import annotations + +import operator +from typing import cast, Any + +import numpy as np +import numpy.typing as npt + +class SubClass(npt.NDArray[np.float64]): ... +class IntSubClass(npt.NDArray[np.intp]): ... + +i4 = np.int32(1) +A: np.ndarray[Any, np.dtype[np.int32]] = np.array([[1]], dtype=np.int32) +B0 = np.empty((), dtype=np.int32).view(SubClass) +B1 = np.empty((1,), dtype=np.int32).view(SubClass) +B2 = np.empty((1, 1), dtype=np.int32).view(SubClass) +B_int0: IntSubClass = np.empty((), dtype=np.intp).view(IntSubClass) +C: np.ndarray[Any, np.dtype[np.int32]] = np.array([0, 1, 2], dtype=np.int32) +D = np.ones(3).view(SubClass) + +ctypes_obj = A.ctypes + +i4.all() +A.all() +A.all(axis=0) +A.all(keepdims=True) +A.all(out=B0) + +i4.any() +A.any() +A.any(axis=0) +A.any(keepdims=True) +A.any(out=B0) + +i4.argmax() +A.argmax() +A.argmax(axis=0) +A.argmax(out=B_int0) + +i4.argmin() +A.argmin() +A.argmin(axis=0) +A.argmin(out=B_int0) + +i4.argsort() +A.argsort() + +i4.choose([()]) +_choices = np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8]], dtype=np.int32) +C.choose(_choices) +C.choose(_choices, out=D) + +i4.clip(1) +A.clip(1) +A.clip(None, 1) +A.clip(1, out=B2) +A.clip(None, 1, out=B2) + +i4.compress([1]) +A.compress([1]) +A.compress([1], out=B1) + +i4.conj() +A.conj() +B0.conj() + +i4.conjugate() +A.conjugate() +B0.conjugate() + +i4.cumprod() +A.cumprod() +A.cumprod(out=B1) + +i4.cumsum() +A.cumsum() +A.cumsum(out=B1) + +i4.max() +A.max() +A.max(axis=0) +A.max(keepdims=True) +A.max(out=B0) + +i4.mean() +A.mean() +A.mean(axis=0) +A.mean(keepdims=True) +A.mean(out=B0) + +i4.min() +A.min() +A.min(axis=0) +A.min(keepdims=True) +A.min(out=B0) + +i4.prod() +A.prod() +A.prod(axis=0) +A.prod(keepdims=True) +A.prod(out=B0) + +i4.round() +A.round() +A.round(out=B2) + +i4.repeat(1) +A.repeat(1) +B0.repeat(1) + +i4.std() +A.std() +A.std(axis=0) +A.std(keepdims=True) +A.std(out=B0.astype(np.float64)) + +i4.sum() +A.sum() +A.sum(axis=0) +A.sum(keepdims=True) +A.sum(out=B0) + +i4.take(0) +A.take(0) +A.take([0]) +A.take(0, out=B0) +A.take([0], out=B1) + +i4.var() +A.var() +A.var(axis=0) +A.var(keepdims=True) +A.var(out=B0) + +A.argpartition([0]) + +A.diagonal() + +A.dot(1) +A.dot(1, out=B2) + +A.nonzero() + +C.searchsorted(1) + +A.trace() +A.trace(out=B0) + +void = cast(np.void, np.array(1, dtype=[("f", np.float64)]).take(0)) +void.setfield(10, np.float64) + +A.item(0) +C.item(0) + +A.ravel() +C.ravel() + +A.flatten() +C.flatten() + +A.reshape(1) +C.reshape(3) + +int(np.array(1.0, dtype=np.float64)) +int(np.array("1", dtype=np.str_)) + +float(np.array(1.0, dtype=np.float64)) +float(np.array("1", dtype=np.str_)) + +complex(np.array(1.0, dtype=np.float64)) + +operator.index(np.array(1, dtype=np.int64)) + +# this fails on numpy 2.2.1 +# https://github.com/scipy/scipy/blob/a755ee77ec47a64849abe42c349936475a6c2f24/scipy/io/arff/tests/test_arffread.py#L41-L44 +A_float = np.array([[1, 5], [2, 4], [np.nan, np.nan]]) +A_void: npt.NDArray[np.void] = np.empty(3, [("yop", float), ("yap", float)]) +A_void["yop"] = A_float[:, 0] +A_void["yap"] = A_float[:, 1] + +# deprecated + +with np.testing.assert_warns(DeprecationWarning): + ctypes_obj.get_data() # type: ignore[deprecated] # pyright: ignore[reportDeprecated] +with np.testing.assert_warns(DeprecationWarning): + ctypes_obj.get_shape() # type: ignore[deprecated] # pyright: ignore[reportDeprecated] +with np.testing.assert_warns(DeprecationWarning): + ctypes_obj.get_strides() # type: ignore[deprecated] # pyright: ignore[reportDeprecated] +with np.testing.assert_warns(DeprecationWarning): + ctypes_obj.get_as_parameter() # type: ignore[deprecated] # pyright: ignore[reportDeprecated] diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/ndarray_shape_manipulation.py b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/ndarray_shape_manipulation.py new file mode 100644 index 0000000..0ca3dff --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/ndarray_shape_manipulation.py @@ -0,0 +1,47 @@ +import numpy as np + +nd1 = np.array([[1, 2], [3, 4]]) + +# reshape +nd1.reshape(4) +nd1.reshape(2, 2) +nd1.reshape((2, 2)) + +nd1.reshape((2, 2), order="C") +nd1.reshape(4, order="C") + +# resize +nd1.resize() +nd1.resize(4) +nd1.resize(2, 2) +nd1.resize((2, 2)) + +nd1.resize((2, 2), refcheck=True) +nd1.resize(4, refcheck=True) + +nd2 = np.array([[1, 2], [3, 4]]) + +# transpose +nd2.transpose() +nd2.transpose(1, 0) +nd2.transpose((1, 0)) + +# swapaxes +nd2.swapaxes(0, 1) + +# flatten +nd2.flatten() +nd2.flatten("C") + +# ravel +nd2.ravel() +nd2.ravel("C") + +# squeeze +nd2.squeeze() + +nd3 = np.array([[1, 2]]) +nd3.squeeze(0) + +nd4 = np.array([[[1, 2]]]) +nd4.squeeze((0, 1)) diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/nditer.py b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/nditer.py new file mode 100644 index 0000000..25a5b44 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/nditer.py @@ -0,0 +1,4 @@ +import numpy as np + +arr = np.array([1]) +np.nditer([arr, None]) diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/numeric.py b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/numeric.py new file mode 100644 index 0000000..1eb14cf --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/numeric.py @@ -0,0 +1,95 @@ +""" +Tests for :mod:`numpy._core.numeric`. + +Does not include tests which fall under ``array_constructors``. + +""" + +from __future__ import annotations +from typing import cast + +import numpy as np +import numpy.typing as npt + +class SubClass(npt.NDArray[np.float64]): ... + + +i8 = np.int64(1) + +A = cast( + np.ndarray[tuple[int, int, int], np.dtype[np.intp]], + np.arange(27).reshape(3, 3, 3), +) +B: list[list[list[int]]] = A.tolist() +C = np.empty((27, 27)).view(SubClass) + +np.count_nonzero(i8) +np.count_nonzero(A) +np.count_nonzero(B) +np.count_nonzero(A, keepdims=True) +np.count_nonzero(A, axis=0) + +np.isfortran(i8) +np.isfortran(A) + +np.argwhere(i8) +np.argwhere(A) + +np.flatnonzero(i8) +np.flatnonzero(A) + +np.correlate(B[0][0], A.ravel(), mode="valid") +np.correlate(A.ravel(), A.ravel(), mode="same") + +np.convolve(B[0][0], A.ravel(), mode="valid") +np.convolve(A.ravel(), A.ravel(), mode="same") + +np.outer(i8, A) +np.outer(B, A) +np.outer(A, A) +np.outer(A, A, out=C) + +np.tensordot(B, A) +np.tensordot(A, A) +np.tensordot(A, A, axes=0) +np.tensordot(A, A, axes=(0, 1)) + +np.isscalar(i8) +np.isscalar(A) +np.isscalar(B) + +np.roll(A, 1) +np.roll(A, (1, 2)) +np.roll(B, 1) + +np.rollaxis(A, 0, 1) + +np.moveaxis(A, 0, 1) +np.moveaxis(A, (0, 1), (1, 2)) + +np.cross(B, A) +np.cross(A, A) + +np.indices([0, 1, 2]) +np.indices([0, 1, 2], sparse=False) +np.indices([0, 1, 2], sparse=True) + +np.binary_repr(1) + +np.base_repr(1) + +np.allclose(i8, A) +np.allclose(B, A) +np.allclose(A, A) + +np.isclose(i8, A) +np.isclose(B, A) +np.isclose(A, A) + +np.array_equal(i8, A) +np.array_equal(B, A) +np.array_equal(A, A) + +np.array_equiv(i8, A) +np.array_equiv(B, A) +np.array_equiv(A, A) diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/numerictypes.py b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/numerictypes.py new file mode 100644 index 0000000..24e1a99 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/numerictypes.py @@ -0,0 +1,17 @@ +import numpy as np + +np.isdtype(np.float64, (np.int64, np.float64)) +np.isdtype(np.int64, "signed integer") + +np.issubdtype("S1", np.bytes_) +np.issubdtype(np.float64, np.float32) + +np.ScalarType +np.ScalarType[0] +np.ScalarType[3] +np.ScalarType[8] +np.ScalarType[10] + +np.typecodes["Character"] +np.typecodes["Complex"] +np.typecodes["All"] diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/random.py b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/random.py new file mode 100644 index 0000000..bce204a --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/random.py @@ -0,0 +1,1497 @@ +from __future__ import annotations + +from typing import Any +import numpy as np + +SEED_NONE = None +SEED_INT = 4579435749574957634658964293569 +SEED_ARR: np.ndarray[Any, np.dtype[np.int64]] = np.array([1, 2, 3, 4], dtype=np.int64) +SEED_ARRLIKE: list[int] = [1, 2, 3, 4] +SEED_SEED_SEQ: np.random.SeedSequence = np.random.SeedSequence(0) +SEED_MT19937: np.random.MT19937 = np.random.MT19937(0) +SEED_PCG64: np.random.PCG64 = np.random.PCG64(0) +SEED_PHILOX: np.random.Philox = np.random.Philox(0) +SEED_SFC64: np.random.SFC64 = np.random.SFC64(0) + +# default rng +np.random.default_rng() +np.random.default_rng(SEED_NONE) +np.random.default_rng(SEED_INT) +np.random.default_rng(SEED_ARR) +np.random.default_rng(SEED_ARRLIKE) +np.random.default_rng(SEED_SEED_SEQ) +np.random.default_rng(SEED_MT19937) +np.random.default_rng(SEED_PCG64) +np.random.default_rng(SEED_PHILOX) +np.random.default_rng(SEED_SFC64) + +# Seed Sequence +np.random.SeedSequence(SEED_NONE) +np.random.SeedSequence(SEED_INT) +np.random.SeedSequence(SEED_ARR) +np.random.SeedSequence(SEED_ARRLIKE) + +# Bit Generators +np.random.MT19937(SEED_NONE) +np.random.MT19937(SEED_INT) +np.random.MT19937(SEED_ARR) +np.random.MT19937(SEED_ARRLIKE) +np.random.MT19937(SEED_SEED_SEQ) + +np.random.PCG64(SEED_NONE) +np.random.PCG64(SEED_INT) +np.random.PCG64(SEED_ARR) +np.random.PCG64(SEED_ARRLIKE) +np.random.PCG64(SEED_SEED_SEQ) + +np.random.Philox(SEED_NONE) +np.random.Philox(SEED_INT) +np.random.Philox(SEED_ARR) +np.random.Philox(SEED_ARRLIKE) +np.random.Philox(SEED_SEED_SEQ) + +np.random.SFC64(SEED_NONE) +np.random.SFC64(SEED_INT) +np.random.SFC64(SEED_ARR) +np.random.SFC64(SEED_ARRLIKE) +np.random.SFC64(SEED_SEED_SEQ) + +seed_seq: np.random.bit_generator.SeedSequence = np.random.SeedSequence(SEED_NONE) +seed_seq.spawn(10) +seed_seq.generate_state(3) +seed_seq.generate_state(3, "u4") +seed_seq.generate_state(3, "uint32") +seed_seq.generate_state(3, "u8") +seed_seq.generate_state(3, "uint64") +seed_seq.generate_state(3, np.uint32) +seed_seq.generate_state(3, np.uint64) + + +def_gen: np.random.Generator = np.random.default_rng() + +D_arr_0p1: np.ndarray[Any, np.dtype[np.float64]] = np.array([0.1]) +D_arr_0p5: np.ndarray[Any, np.dtype[np.float64]] = np.array([0.5]) +D_arr_0p9: np.ndarray[Any, np.dtype[np.float64]] = np.array([0.9]) +D_arr_1p5: np.ndarray[Any, np.dtype[np.float64]] = np.array([1.5]) +I_arr_10: np.ndarray[Any, np.dtype[np.int_]] = np.array([10], dtype=np.int_) +I_arr_20: np.ndarray[Any, np.dtype[np.int_]] = np.array([20], dtype=np.int_) +D_arr_like_0p1: list[float] = [0.1] +D_arr_like_0p5: list[float] = [0.5] +D_arr_like_0p9: list[float] = [0.9] +D_arr_like_1p5: list[float] = [1.5] +I_arr_like_10: list[int] = [10] +I_arr_like_20: list[int] = [20] +D_2D_like: list[list[float]] = [[1, 2], [2, 3], [3, 4], [4, 5.1]] +D_2D: np.ndarray[Any, np.dtype[np.float64]] = np.array(D_2D_like) + +S_out: np.ndarray[Any, np.dtype[np.float32]] = np.empty(1, dtype=np.float32) +D_out: np.ndarray[Any, np.dtype[np.float64]] = np.empty(1) + +def_gen.standard_normal() +def_gen.standard_normal(dtype=np.float32) +def_gen.standard_normal(dtype="float32") +def_gen.standard_normal(dtype="double") +def_gen.standard_normal(dtype=np.float64) +def_gen.standard_normal(size=None) +def_gen.standard_normal(size=1) +def_gen.standard_normal(size=1, dtype=np.float32) +def_gen.standard_normal(size=1, dtype="f4") +def_gen.standard_normal(size=1, dtype="float32", out=S_out) +def_gen.standard_normal(dtype=np.float32, out=S_out) +def_gen.standard_normal(size=1, dtype=np.float64) +def_gen.standard_normal(size=1, dtype="float64") +def_gen.standard_normal(size=1, dtype="f8") +def_gen.standard_normal(out=D_out) +def_gen.standard_normal(size=1, dtype="float64") +def_gen.standard_normal(size=1, dtype="float64", out=D_out) + +def_gen.random() +def_gen.random(dtype=np.float32) +def_gen.random(dtype="float32") +def_gen.random(dtype="double") +def_gen.random(dtype=np.float64) +def_gen.random(size=None) +def_gen.random(size=1) +def_gen.random(size=1, dtype=np.float32) +def_gen.random(size=1, dtype="f4") +def_gen.random(size=1, dtype="float32", out=S_out) +def_gen.random(dtype=np.float32, out=S_out) +def_gen.random(size=1, dtype=np.float64) +def_gen.random(size=1, dtype="float64") +def_gen.random(size=1, dtype="f8") +def_gen.random(out=D_out) +def_gen.random(size=1, dtype="float64") +def_gen.random(size=1, dtype="float64", out=D_out) + +def_gen.standard_cauchy() +def_gen.standard_cauchy(size=None) +def_gen.standard_cauchy(size=1) + +def_gen.standard_exponential() +def_gen.standard_exponential(method="inv") +def_gen.standard_exponential(dtype=np.float32) +def_gen.standard_exponential(dtype="float32") +def_gen.standard_exponential(dtype="double") +def_gen.standard_exponential(dtype=np.float64) +def_gen.standard_exponential(size=None) +def_gen.standard_exponential(size=None, method="inv") +def_gen.standard_exponential(size=1, method="inv") +def_gen.standard_exponential(size=1, dtype=np.float32) +def_gen.standard_exponential(size=1, dtype="f4", method="inv") +def_gen.standard_exponential(size=1, dtype="float32", out=S_out) +def_gen.standard_exponential(dtype=np.float32, out=S_out) +def_gen.standard_exponential(size=1, dtype=np.float64, method="inv") +def_gen.standard_exponential(size=1, dtype="float64") +def_gen.standard_exponential(size=1, dtype="f8") +def_gen.standard_exponential(out=D_out) +def_gen.standard_exponential(size=1, dtype="float64") +def_gen.standard_exponential(size=1, dtype="float64", out=D_out) + +def_gen.zipf(1.5) +def_gen.zipf(1.5, size=None) +def_gen.zipf(1.5, size=1) +def_gen.zipf(D_arr_1p5) +def_gen.zipf(D_arr_1p5, size=1) +def_gen.zipf(D_arr_like_1p5) +def_gen.zipf(D_arr_like_1p5, size=1) + +def_gen.weibull(0.5) +def_gen.weibull(0.5, size=None) +def_gen.weibull(0.5, size=1) +def_gen.weibull(D_arr_0p5) +def_gen.weibull(D_arr_0p5, size=1) +def_gen.weibull(D_arr_like_0p5) +def_gen.weibull(D_arr_like_0p5, size=1) + +def_gen.standard_t(0.5) +def_gen.standard_t(0.5, size=None) +def_gen.standard_t(0.5, size=1) +def_gen.standard_t(D_arr_0p5) +def_gen.standard_t(D_arr_0p5, size=1) +def_gen.standard_t(D_arr_like_0p5) +def_gen.standard_t(D_arr_like_0p5, size=1) + +def_gen.poisson(0.5) +def_gen.poisson(0.5, size=None) +def_gen.poisson(0.5, size=1) +def_gen.poisson(D_arr_0p5) +def_gen.poisson(D_arr_0p5, size=1) +def_gen.poisson(D_arr_like_0p5) +def_gen.poisson(D_arr_like_0p5, size=1) + +def_gen.power(0.5) +def_gen.power(0.5, size=None) +def_gen.power(0.5, size=1) +def_gen.power(D_arr_0p5) +def_gen.power(D_arr_0p5, size=1) +def_gen.power(D_arr_like_0p5) +def_gen.power(D_arr_like_0p5, size=1) + +def_gen.pareto(0.5) +def_gen.pareto(0.5, size=None) +def_gen.pareto(0.5, size=1) +def_gen.pareto(D_arr_0p5) +def_gen.pareto(D_arr_0p5, size=1) +def_gen.pareto(D_arr_like_0p5) +def_gen.pareto(D_arr_like_0p5, size=1) + +def_gen.chisquare(0.5) +def_gen.chisquare(0.5, size=None) +def_gen.chisquare(0.5, size=1) +def_gen.chisquare(D_arr_0p5) +def_gen.chisquare(D_arr_0p5, size=1) +def_gen.chisquare(D_arr_like_0p5) +def_gen.chisquare(D_arr_like_0p5, size=1) + +def_gen.exponential(0.5) +def_gen.exponential(0.5, size=None) +def_gen.exponential(0.5, size=1) +def_gen.exponential(D_arr_0p5) +def_gen.exponential(D_arr_0p5, size=1) +def_gen.exponential(D_arr_like_0p5) +def_gen.exponential(D_arr_like_0p5, size=1) + +def_gen.geometric(0.5) +def_gen.geometric(0.5, size=None) +def_gen.geometric(0.5, size=1) +def_gen.geometric(D_arr_0p5) +def_gen.geometric(D_arr_0p5, size=1) +def_gen.geometric(D_arr_like_0p5) +def_gen.geometric(D_arr_like_0p5, size=1) + +def_gen.logseries(0.5) +def_gen.logseries(0.5, size=None) +def_gen.logseries(0.5, size=1) +def_gen.logseries(D_arr_0p5) +def_gen.logseries(D_arr_0p5, size=1) +def_gen.logseries(D_arr_like_0p5) +def_gen.logseries(D_arr_like_0p5, size=1) + +def_gen.rayleigh(0.5) +def_gen.rayleigh(0.5, size=None) +def_gen.rayleigh(0.5, size=1) +def_gen.rayleigh(D_arr_0p5) +def_gen.rayleigh(D_arr_0p5, size=1) +def_gen.rayleigh(D_arr_like_0p5) +def_gen.rayleigh(D_arr_like_0p5, size=1) + +def_gen.standard_gamma(0.5) +def_gen.standard_gamma(0.5, size=None) +def_gen.standard_gamma(0.5, dtype="float32") +def_gen.standard_gamma(0.5, size=None, dtype="float32") +def_gen.standard_gamma(0.5, size=1) +def_gen.standard_gamma(D_arr_0p5) +def_gen.standard_gamma(D_arr_0p5, dtype="f4") +def_gen.standard_gamma(0.5, size=1, dtype="float32", out=S_out) +def_gen.standard_gamma(D_arr_0p5, dtype=np.float32, out=S_out) +def_gen.standard_gamma(D_arr_0p5, size=1) +def_gen.standard_gamma(D_arr_like_0p5) +def_gen.standard_gamma(D_arr_like_0p5, size=1) +def_gen.standard_gamma(0.5, out=D_out) +def_gen.standard_gamma(D_arr_like_0p5, out=D_out) +def_gen.standard_gamma(D_arr_like_0p5, size=1) +def_gen.standard_gamma(D_arr_like_0p5, size=1, out=D_out, dtype=np.float64) + +def_gen.vonmises(0.5, 0.5) +def_gen.vonmises(0.5, 0.5, size=None) +def_gen.vonmises(0.5, 0.5, size=1) +def_gen.vonmises(D_arr_0p5, 0.5) +def_gen.vonmises(0.5, D_arr_0p5) +def_gen.vonmises(D_arr_0p5, 0.5, size=1) +def_gen.vonmises(0.5, D_arr_0p5, size=1) +def_gen.vonmises(D_arr_like_0p5, 0.5) +def_gen.vonmises(0.5, D_arr_like_0p5) +def_gen.vonmises(D_arr_0p5, D_arr_0p5) +def_gen.vonmises(D_arr_like_0p5, D_arr_like_0p5) +def_gen.vonmises(D_arr_0p5, D_arr_0p5, size=1) +def_gen.vonmises(D_arr_like_0p5, D_arr_like_0p5, size=1) + +def_gen.wald(0.5, 0.5) +def_gen.wald(0.5, 0.5, size=None) +def_gen.wald(0.5, 0.5, size=1) +def_gen.wald(D_arr_0p5, 0.5) +def_gen.wald(0.5, D_arr_0p5) +def_gen.wald(D_arr_0p5, 0.5, size=1) +def_gen.wald(0.5, D_arr_0p5, size=1) +def_gen.wald(D_arr_like_0p5, 0.5) +def_gen.wald(0.5, D_arr_like_0p5) +def_gen.wald(D_arr_0p5, D_arr_0p5) +def_gen.wald(D_arr_like_0p5, D_arr_like_0p5) +def_gen.wald(D_arr_0p5, D_arr_0p5, size=1) +def_gen.wald(D_arr_like_0p5, D_arr_like_0p5, size=1) + +def_gen.uniform(0.5, 0.5) +def_gen.uniform(0.5, 0.5, size=None) +def_gen.uniform(0.5, 0.5, size=1) +def_gen.uniform(D_arr_0p5, 0.5) +def_gen.uniform(0.5, D_arr_0p5) +def_gen.uniform(D_arr_0p5, 0.5, size=1) +def_gen.uniform(0.5, D_arr_0p5, size=1) +def_gen.uniform(D_arr_like_0p5, 0.5) +def_gen.uniform(0.5, D_arr_like_0p5) +def_gen.uniform(D_arr_0p5, D_arr_0p5) +def_gen.uniform(D_arr_like_0p5, D_arr_like_0p5) +def_gen.uniform(D_arr_0p5, D_arr_0p5, size=1) +def_gen.uniform(D_arr_like_0p5, D_arr_like_0p5, size=1) + +def_gen.beta(0.5, 0.5) +def_gen.beta(0.5, 0.5, size=None) +def_gen.beta(0.5, 0.5, size=1) +def_gen.beta(D_arr_0p5, 0.5) +def_gen.beta(0.5, D_arr_0p5) +def_gen.beta(D_arr_0p5, 0.5, size=1) +def_gen.beta(0.5, D_arr_0p5, size=1) +def_gen.beta(D_arr_like_0p5, 0.5) +def_gen.beta(0.5, D_arr_like_0p5) +def_gen.beta(D_arr_0p5, D_arr_0p5) +def_gen.beta(D_arr_like_0p5, D_arr_like_0p5) +def_gen.beta(D_arr_0p5, D_arr_0p5, size=1) +def_gen.beta(D_arr_like_0p5, D_arr_like_0p5, size=1) + +def_gen.f(0.5, 0.5) +def_gen.f(0.5, 0.5, size=None) +def_gen.f(0.5, 0.5, size=1) +def_gen.f(D_arr_0p5, 0.5) +def_gen.f(0.5, D_arr_0p5) +def_gen.f(D_arr_0p5, 0.5, size=1) +def_gen.f(0.5, D_arr_0p5, size=1) +def_gen.f(D_arr_like_0p5, 0.5) +def_gen.f(0.5, D_arr_like_0p5) +def_gen.f(D_arr_0p5, D_arr_0p5) +def_gen.f(D_arr_like_0p5, D_arr_like_0p5) +def_gen.f(D_arr_0p5, D_arr_0p5, size=1) +def_gen.f(D_arr_like_0p5, D_arr_like_0p5, size=1) + +def_gen.gamma(0.5, 0.5) +def_gen.gamma(0.5, 0.5, size=None) +def_gen.gamma(0.5, 0.5, size=1) +def_gen.gamma(D_arr_0p5, 0.5) +def_gen.gamma(0.5, D_arr_0p5) +def_gen.gamma(D_arr_0p5, 0.5, size=1) +def_gen.gamma(0.5, D_arr_0p5, size=1) +def_gen.gamma(D_arr_like_0p5, 0.5) +def_gen.gamma(0.5, D_arr_like_0p5) +def_gen.gamma(D_arr_0p5, D_arr_0p5) +def_gen.gamma(D_arr_like_0p5, D_arr_like_0p5) +def_gen.gamma(D_arr_0p5, D_arr_0p5, size=1) +def_gen.gamma(D_arr_like_0p5, D_arr_like_0p5, size=1) + +def_gen.gumbel(0.5, 0.5) +def_gen.gumbel(0.5, 0.5, size=None) +def_gen.gumbel(0.5, 0.5, size=1) +def_gen.gumbel(D_arr_0p5, 0.5) +def_gen.gumbel(0.5, D_arr_0p5) +def_gen.gumbel(D_arr_0p5, 0.5, size=1) +def_gen.gumbel(0.5, D_arr_0p5, size=1) +def_gen.gumbel(D_arr_like_0p5, 0.5) +def_gen.gumbel(0.5, D_arr_like_0p5) +def_gen.gumbel(D_arr_0p5, D_arr_0p5) +def_gen.gumbel(D_arr_like_0p5, D_arr_like_0p5) +def_gen.gumbel(D_arr_0p5, D_arr_0p5, size=1) +def_gen.gumbel(D_arr_like_0p5, D_arr_like_0p5, size=1) + +def_gen.laplace(0.5, 0.5) +def_gen.laplace(0.5, 0.5, size=None) +def_gen.laplace(0.5, 0.5, size=1) +def_gen.laplace(D_arr_0p5, 0.5) +def_gen.laplace(0.5, D_arr_0p5) +def_gen.laplace(D_arr_0p5, 0.5, size=1) +def_gen.laplace(0.5, D_arr_0p5, size=1) +def_gen.laplace(D_arr_like_0p5, 0.5) +def_gen.laplace(0.5, D_arr_like_0p5) +def_gen.laplace(D_arr_0p5, D_arr_0p5) +def_gen.laplace(D_arr_like_0p5, D_arr_like_0p5) +def_gen.laplace(D_arr_0p5, D_arr_0p5, size=1) +def_gen.laplace(D_arr_like_0p5, D_arr_like_0p5, size=1) + +def_gen.logistic(0.5, 0.5) +def_gen.logistic(0.5, 0.5, size=None) +def_gen.logistic(0.5, 0.5, size=1) +def_gen.logistic(D_arr_0p5, 0.5) +def_gen.logistic(0.5, D_arr_0p5) +def_gen.logistic(D_arr_0p5, 0.5, size=1) +def_gen.logistic(0.5, D_arr_0p5, size=1) +def_gen.logistic(D_arr_like_0p5, 0.5) +def_gen.logistic(0.5, D_arr_like_0p5) +def_gen.logistic(D_arr_0p5, D_arr_0p5) +def_gen.logistic(D_arr_like_0p5, D_arr_like_0p5) +def_gen.logistic(D_arr_0p5, D_arr_0p5, size=1) +def_gen.logistic(D_arr_like_0p5, D_arr_like_0p5, size=1) + +def_gen.lognormal(0.5, 0.5) +def_gen.lognormal(0.5, 0.5, size=None) +def_gen.lognormal(0.5, 0.5, size=1) +def_gen.lognormal(D_arr_0p5, 0.5) +def_gen.lognormal(0.5, D_arr_0p5) +def_gen.lognormal(D_arr_0p5, 0.5, size=1) +def_gen.lognormal(0.5, D_arr_0p5, size=1) +def_gen.lognormal(D_arr_like_0p5, 0.5) +def_gen.lognormal(0.5, D_arr_like_0p5) +def_gen.lognormal(D_arr_0p5, D_arr_0p5) +def_gen.lognormal(D_arr_like_0p5, D_arr_like_0p5) +def_gen.lognormal(D_arr_0p5, D_arr_0p5, size=1) +def_gen.lognormal(D_arr_like_0p5, D_arr_like_0p5, size=1) + +def_gen.noncentral_chisquare(0.5, 0.5) +def_gen.noncentral_chisquare(0.5, 0.5, size=None) +def_gen.noncentral_chisquare(0.5, 0.5, size=1) +def_gen.noncentral_chisquare(D_arr_0p5, 0.5) +def_gen.noncentral_chisquare(0.5, D_arr_0p5) +def_gen.noncentral_chisquare(D_arr_0p5, 0.5, size=1) +def_gen.noncentral_chisquare(0.5, D_arr_0p5, size=1) +def_gen.noncentral_chisquare(D_arr_like_0p5, 0.5) +def_gen.noncentral_chisquare(0.5, D_arr_like_0p5) +def_gen.noncentral_chisquare(D_arr_0p5, D_arr_0p5) +def_gen.noncentral_chisquare(D_arr_like_0p5, D_arr_like_0p5) +def_gen.noncentral_chisquare(D_arr_0p5, D_arr_0p5, size=1) +def_gen.noncentral_chisquare(D_arr_like_0p5, D_arr_like_0p5, size=1) + +def_gen.normal(0.5, 0.5) +def_gen.normal(0.5, 0.5, size=None) +def_gen.normal(0.5, 0.5, size=1) +def_gen.normal(D_arr_0p5, 0.5) +def_gen.normal(0.5, D_arr_0p5) +def_gen.normal(D_arr_0p5, 0.5, size=1) +def_gen.normal(0.5, D_arr_0p5, size=1) +def_gen.normal(D_arr_like_0p5, 0.5) +def_gen.normal(0.5, D_arr_like_0p5) +def_gen.normal(D_arr_0p5, D_arr_0p5) +def_gen.normal(D_arr_like_0p5, D_arr_like_0p5) +def_gen.normal(D_arr_0p5, D_arr_0p5, size=1) +def_gen.normal(D_arr_like_0p5, D_arr_like_0p5, size=1) + +def_gen.triangular(0.1, 0.5, 0.9) +def_gen.triangular(0.1, 0.5, 0.9, size=None) +def_gen.triangular(0.1, 0.5, 0.9, size=1) +def_gen.triangular(D_arr_0p1, 0.5, 0.9) +def_gen.triangular(0.1, D_arr_0p5, 0.9) +def_gen.triangular(D_arr_0p1, 0.5, D_arr_like_0p9, size=1) +def_gen.triangular(0.1, D_arr_0p5, 0.9, size=1) +def_gen.triangular(D_arr_like_0p1, 0.5, D_arr_0p9) +def_gen.triangular(0.5, D_arr_like_0p5, 0.9) +def_gen.triangular(D_arr_0p1, D_arr_0p5, 0.9) +def_gen.triangular(D_arr_like_0p1, D_arr_like_0p5, 0.9) +def_gen.triangular(D_arr_0p1, D_arr_0p5, D_arr_0p9, size=1) +def_gen.triangular(D_arr_like_0p1, D_arr_like_0p5, D_arr_like_0p9, size=1) + +def_gen.noncentral_f(0.1, 0.5, 0.9) +def_gen.noncentral_f(0.1, 0.5, 0.9, size=None) +def_gen.noncentral_f(0.1, 0.5, 0.9, size=1) +def_gen.noncentral_f(D_arr_0p1, 0.5, 0.9) +def_gen.noncentral_f(0.1, D_arr_0p5, 0.9) +def_gen.noncentral_f(D_arr_0p1, 0.5, D_arr_like_0p9, size=1) +def_gen.noncentral_f(0.1, D_arr_0p5, 0.9, size=1) +def_gen.noncentral_f(D_arr_like_0p1, 0.5, D_arr_0p9) +def_gen.noncentral_f(0.5, D_arr_like_0p5, 0.9) +def_gen.noncentral_f(D_arr_0p1, D_arr_0p5, 0.9) +def_gen.noncentral_f(D_arr_like_0p1, D_arr_like_0p5, 0.9) +def_gen.noncentral_f(D_arr_0p1, D_arr_0p5, D_arr_0p9, size=1) +def_gen.noncentral_f(D_arr_like_0p1, D_arr_like_0p5, D_arr_like_0p9, size=1) + +def_gen.binomial(10, 0.5) +def_gen.binomial(10, 0.5, size=None) +def_gen.binomial(10, 0.5, size=1) +def_gen.binomial(I_arr_10, 0.5) +def_gen.binomial(10, D_arr_0p5) +def_gen.binomial(I_arr_10, 0.5, size=1) +def_gen.binomial(10, D_arr_0p5, size=1) +def_gen.binomial(I_arr_like_10, 0.5) +def_gen.binomial(10, D_arr_like_0p5) +def_gen.binomial(I_arr_10, D_arr_0p5) +def_gen.binomial(I_arr_like_10, D_arr_like_0p5) +def_gen.binomial(I_arr_10, D_arr_0p5, size=1) +def_gen.binomial(I_arr_like_10, D_arr_like_0p5, size=1) + +def_gen.negative_binomial(10, 0.5) +def_gen.negative_binomial(10, 0.5, size=None) +def_gen.negative_binomial(10, 0.5, size=1) +def_gen.negative_binomial(I_arr_10, 0.5) +def_gen.negative_binomial(10, D_arr_0p5) +def_gen.negative_binomial(I_arr_10, 0.5, size=1) +def_gen.negative_binomial(10, D_arr_0p5, size=1) +def_gen.negative_binomial(I_arr_like_10, 0.5) +def_gen.negative_binomial(10, D_arr_like_0p5) +def_gen.negative_binomial(I_arr_10, D_arr_0p5) +def_gen.negative_binomial(I_arr_like_10, D_arr_like_0p5) +def_gen.negative_binomial(I_arr_10, D_arr_0p5, size=1) +def_gen.negative_binomial(I_arr_like_10, D_arr_like_0p5, size=1) + +def_gen.hypergeometric(20, 20, 10) +def_gen.hypergeometric(20, 20, 10, size=None) +def_gen.hypergeometric(20, 20, 10, size=1) +def_gen.hypergeometric(I_arr_20, 20, 10) +def_gen.hypergeometric(20, I_arr_20, 10) +def_gen.hypergeometric(I_arr_20, 20, I_arr_like_10, size=1) +def_gen.hypergeometric(20, I_arr_20, 10, size=1) +def_gen.hypergeometric(I_arr_like_20, 20, I_arr_10) +def_gen.hypergeometric(20, I_arr_like_20, 10) +def_gen.hypergeometric(I_arr_20, I_arr_20, 10) +def_gen.hypergeometric(I_arr_like_20, I_arr_like_20, 10) +def_gen.hypergeometric(I_arr_20, I_arr_20, I_arr_10, size=1) +def_gen.hypergeometric(I_arr_like_20, I_arr_like_20, I_arr_like_10, size=1) + +I_int64_100: np.ndarray[Any, np.dtype[np.int64]] = np.array([100], dtype=np.int64) + +def_gen.integers(0, 100) +def_gen.integers(100) +def_gen.integers([100]) +def_gen.integers(0, [100]) + +I_bool_low: np.ndarray[Any, np.dtype[np.bool]] = np.array([0], dtype=np.bool) +I_bool_low_like: list[int] = [0] +I_bool_high_open: np.ndarray[Any, np.dtype[np.bool]] = np.array([1], dtype=np.bool) +I_bool_high_closed: np.ndarray[Any, np.dtype[np.bool]] = np.array([1], dtype=np.bool) + +def_gen.integers(2, dtype=bool) +def_gen.integers(0, 2, dtype=bool) +def_gen.integers(1, dtype=bool, endpoint=True) +def_gen.integers(0, 1, dtype=bool, endpoint=True) +def_gen.integers(I_bool_low_like, 1, dtype=bool, endpoint=True) +def_gen.integers(I_bool_high_open, dtype=bool) +def_gen.integers(I_bool_low, I_bool_high_open, dtype=bool) +def_gen.integers(0, I_bool_high_open, dtype=bool) +def_gen.integers(I_bool_high_closed, dtype=bool, endpoint=True) +def_gen.integers(I_bool_low, I_bool_high_closed, dtype=bool, endpoint=True) +def_gen.integers(0, I_bool_high_closed, dtype=bool, endpoint=True) + +def_gen.integers(2, dtype=np.bool) +def_gen.integers(0, 2, dtype=np.bool) +def_gen.integers(1, dtype=np.bool, endpoint=True) +def_gen.integers(0, 1, dtype=np.bool, endpoint=True) +def_gen.integers(I_bool_low_like, 1, dtype=np.bool, endpoint=True) +def_gen.integers(I_bool_high_open, dtype=np.bool) +def_gen.integers(I_bool_low, I_bool_high_open, dtype=np.bool) +def_gen.integers(0, I_bool_high_open, dtype=np.bool) +def_gen.integers(I_bool_high_closed, dtype=np.bool, endpoint=True) +def_gen.integers(I_bool_low, I_bool_high_closed, dtype=np.bool, endpoint=True) +def_gen.integers(0, I_bool_high_closed, dtype=np.bool, endpoint=True) + +I_u1_low: np.ndarray[Any, np.dtype[np.uint8]] = np.array([0], dtype=np.uint8) +I_u1_low_like: list[int] = [0] +I_u1_high_open: np.ndarray[Any, np.dtype[np.uint8]] = np.array([255], dtype=np.uint8) +I_u1_high_closed: np.ndarray[Any, np.dtype[np.uint8]] = np.array([255], dtype=np.uint8) + +def_gen.integers(256, dtype="u1") +def_gen.integers(0, 256, dtype="u1") +def_gen.integers(255, dtype="u1", endpoint=True) +def_gen.integers(0, 255, dtype="u1", endpoint=True) +def_gen.integers(I_u1_low_like, 255, dtype="u1", endpoint=True) +def_gen.integers(I_u1_high_open, dtype="u1") +def_gen.integers(I_u1_low, I_u1_high_open, dtype="u1") +def_gen.integers(0, I_u1_high_open, dtype="u1") +def_gen.integers(I_u1_high_closed, dtype="u1", endpoint=True) +def_gen.integers(I_u1_low, I_u1_high_closed, dtype="u1", endpoint=True) +def_gen.integers(0, I_u1_high_closed, dtype="u1", endpoint=True) + +def_gen.integers(256, dtype="uint8") +def_gen.integers(0, 256, dtype="uint8") +def_gen.integers(255, dtype="uint8", endpoint=True) +def_gen.integers(0, 255, dtype="uint8", endpoint=True) +def_gen.integers(I_u1_low_like, 255, dtype="uint8", endpoint=True) +def_gen.integers(I_u1_high_open, dtype="uint8") +def_gen.integers(I_u1_low, I_u1_high_open, dtype="uint8") +def_gen.integers(0, I_u1_high_open, dtype="uint8") +def_gen.integers(I_u1_high_closed, dtype="uint8", endpoint=True) +def_gen.integers(I_u1_low, I_u1_high_closed, dtype="uint8", endpoint=True) +def_gen.integers(0, I_u1_high_closed, dtype="uint8", endpoint=True) + +def_gen.integers(256, dtype=np.uint8) +def_gen.integers(0, 256, dtype=np.uint8) +def_gen.integers(255, dtype=np.uint8, endpoint=True) +def_gen.integers(0, 255, dtype=np.uint8, endpoint=True) +def_gen.integers(I_u1_low_like, 255, dtype=np.uint8, endpoint=True) +def_gen.integers(I_u1_high_open, dtype=np.uint8) +def_gen.integers(I_u1_low, I_u1_high_open, dtype=np.uint8) +def_gen.integers(0, I_u1_high_open, dtype=np.uint8) +def_gen.integers(I_u1_high_closed, dtype=np.uint8, endpoint=True) +def_gen.integers(I_u1_low, I_u1_high_closed, dtype=np.uint8, endpoint=True) +def_gen.integers(0, I_u1_high_closed, dtype=np.uint8, endpoint=True) + +I_u2_low: np.ndarray[Any, np.dtype[np.uint16]] = np.array([0], dtype=np.uint16) +I_u2_low_like: list[int] = [0] +I_u2_high_open: np.ndarray[Any, np.dtype[np.uint16]] = np.array([65535], dtype=np.uint16) +I_u2_high_closed: np.ndarray[Any, np.dtype[np.uint16]] = np.array([65535], dtype=np.uint16) + +def_gen.integers(65536, dtype="u2") +def_gen.integers(0, 65536, dtype="u2") +def_gen.integers(65535, dtype="u2", endpoint=True) +def_gen.integers(0, 65535, dtype="u2", endpoint=True) +def_gen.integers(I_u2_low_like, 65535, dtype="u2", endpoint=True) +def_gen.integers(I_u2_high_open, dtype="u2") +def_gen.integers(I_u2_low, I_u2_high_open, dtype="u2") +def_gen.integers(0, I_u2_high_open, dtype="u2") +def_gen.integers(I_u2_high_closed, dtype="u2", endpoint=True) +def_gen.integers(I_u2_low, I_u2_high_closed, dtype="u2", endpoint=True) +def_gen.integers(0, I_u2_high_closed, dtype="u2", endpoint=True) + +def_gen.integers(65536, dtype="uint16") +def_gen.integers(0, 65536, dtype="uint16") +def_gen.integers(65535, dtype="uint16", endpoint=True) +def_gen.integers(0, 65535, dtype="uint16", endpoint=True) +def_gen.integers(I_u2_low_like, 65535, dtype="uint16", endpoint=True) +def_gen.integers(I_u2_high_open, dtype="uint16") +def_gen.integers(I_u2_low, I_u2_high_open, dtype="uint16") +def_gen.integers(0, I_u2_high_open, dtype="uint16") +def_gen.integers(I_u2_high_closed, dtype="uint16", endpoint=True) +def_gen.integers(I_u2_low, I_u2_high_closed, dtype="uint16", endpoint=True) +def_gen.integers(0, I_u2_high_closed, dtype="uint16", endpoint=True) + +def_gen.integers(65536, dtype=np.uint16) +def_gen.integers(0, 65536, dtype=np.uint16) +def_gen.integers(65535, dtype=np.uint16, endpoint=True) +def_gen.integers(0, 65535, dtype=np.uint16, endpoint=True) +def_gen.integers(I_u2_low_like, 65535, dtype=np.uint16, endpoint=True) +def_gen.integers(I_u2_high_open, dtype=np.uint16) +def_gen.integers(I_u2_low, I_u2_high_open, dtype=np.uint16) +def_gen.integers(0, I_u2_high_open, dtype=np.uint16) +def_gen.integers(I_u2_high_closed, dtype=np.uint16, endpoint=True) +def_gen.integers(I_u2_low, I_u2_high_closed, dtype=np.uint16, endpoint=True) +def_gen.integers(0, I_u2_high_closed, dtype=np.uint16, endpoint=True) + +I_u4_low: np.ndarray[Any, np.dtype[np.uint32]] = np.array([0], dtype=np.uint32) +I_u4_low_like: list[int] = [0] +I_u4_high_open: np.ndarray[Any, np.dtype[np.uint32]] = np.array([4294967295], dtype=np.uint32) +I_u4_high_closed: np.ndarray[Any, np.dtype[np.uint32]] = np.array([4294967295], dtype=np.uint32) + +def_gen.integers(4294967296, dtype="u4") +def_gen.integers(0, 4294967296, dtype="u4") +def_gen.integers(4294967295, dtype="u4", endpoint=True) +def_gen.integers(0, 4294967295, dtype="u4", endpoint=True) +def_gen.integers(I_u4_low_like, 4294967295, dtype="u4", endpoint=True) +def_gen.integers(I_u4_high_open, dtype="u4") +def_gen.integers(I_u4_low, I_u4_high_open, dtype="u4") +def_gen.integers(0, I_u4_high_open, dtype="u4") +def_gen.integers(I_u4_high_closed, dtype="u4", endpoint=True) +def_gen.integers(I_u4_low, I_u4_high_closed, dtype="u4", endpoint=True) +def_gen.integers(0, I_u4_high_closed, dtype="u4", endpoint=True) + +def_gen.integers(4294967296, dtype="uint32") +def_gen.integers(0, 4294967296, dtype="uint32") +def_gen.integers(4294967295, dtype="uint32", endpoint=True) +def_gen.integers(0, 4294967295, dtype="uint32", endpoint=True) +def_gen.integers(I_u4_low_like, 4294967295, dtype="uint32", endpoint=True) +def_gen.integers(I_u4_high_open, dtype="uint32") +def_gen.integers(I_u4_low, I_u4_high_open, dtype="uint32") +def_gen.integers(0, I_u4_high_open, dtype="uint32") +def_gen.integers(I_u4_high_closed, dtype="uint32", endpoint=True) +def_gen.integers(I_u4_low, I_u4_high_closed, dtype="uint32", endpoint=True) +def_gen.integers(0, I_u4_high_closed, dtype="uint32", endpoint=True) + +def_gen.integers(4294967296, dtype=np.uint32) +def_gen.integers(0, 4294967296, dtype=np.uint32) +def_gen.integers(4294967295, dtype=np.uint32, endpoint=True) +def_gen.integers(0, 4294967295, dtype=np.uint32, endpoint=True) +def_gen.integers(I_u4_low_like, 4294967295, dtype=np.uint32, endpoint=True) +def_gen.integers(I_u4_high_open, dtype=np.uint32) +def_gen.integers(I_u4_low, I_u4_high_open, dtype=np.uint32) +def_gen.integers(0, I_u4_high_open, dtype=np.uint32) +def_gen.integers(I_u4_high_closed, dtype=np.uint32, endpoint=True) +def_gen.integers(I_u4_low, I_u4_high_closed, dtype=np.uint32, endpoint=True) +def_gen.integers(0, I_u4_high_closed, dtype=np.uint32, endpoint=True) + +I_u8_low: np.ndarray[Any, np.dtype[np.uint64]] = np.array([0], dtype=np.uint64) +I_u8_low_like: list[int] = [0] +I_u8_high_open: np.ndarray[Any, np.dtype[np.uint64]] = np.array([18446744073709551615], dtype=np.uint64) +I_u8_high_closed: np.ndarray[Any, np.dtype[np.uint64]] = np.array([18446744073709551615], dtype=np.uint64) + +def_gen.integers(18446744073709551616, dtype="u8") +def_gen.integers(0, 18446744073709551616, dtype="u8") +def_gen.integers(18446744073709551615, dtype="u8", endpoint=True) +def_gen.integers(0, 18446744073709551615, dtype="u8", endpoint=True) +def_gen.integers(I_u8_low_like, 18446744073709551615, dtype="u8", endpoint=True) +def_gen.integers(I_u8_high_open, dtype="u8") +def_gen.integers(I_u8_low, I_u8_high_open, dtype="u8") +def_gen.integers(0, I_u8_high_open, dtype="u8") +def_gen.integers(I_u8_high_closed, dtype="u8", endpoint=True) +def_gen.integers(I_u8_low, I_u8_high_closed, dtype="u8", endpoint=True) +def_gen.integers(0, I_u8_high_closed, dtype="u8", endpoint=True) + +def_gen.integers(18446744073709551616, dtype="uint64") +def_gen.integers(0, 18446744073709551616, dtype="uint64") +def_gen.integers(18446744073709551615, dtype="uint64", endpoint=True) +def_gen.integers(0, 18446744073709551615, dtype="uint64", endpoint=True) +def_gen.integers(I_u8_low_like, 18446744073709551615, dtype="uint64", endpoint=True) +def_gen.integers(I_u8_high_open, dtype="uint64") +def_gen.integers(I_u8_low, I_u8_high_open, dtype="uint64") +def_gen.integers(0, I_u8_high_open, dtype="uint64") +def_gen.integers(I_u8_high_closed, dtype="uint64", endpoint=True) +def_gen.integers(I_u8_low, I_u8_high_closed, dtype="uint64", endpoint=True) +def_gen.integers(0, I_u8_high_closed, dtype="uint64", endpoint=True) + +def_gen.integers(18446744073709551616, dtype=np.uint64) +def_gen.integers(0, 18446744073709551616, dtype=np.uint64) +def_gen.integers(18446744073709551615, dtype=np.uint64, endpoint=True) +def_gen.integers(0, 18446744073709551615, dtype=np.uint64, endpoint=True) +def_gen.integers(I_u8_low_like, 18446744073709551615, dtype=np.uint64, endpoint=True) +def_gen.integers(I_u8_high_open, dtype=np.uint64) +def_gen.integers(I_u8_low, I_u8_high_open, dtype=np.uint64) +def_gen.integers(0, I_u8_high_open, dtype=np.uint64) +def_gen.integers(I_u8_high_closed, dtype=np.uint64, endpoint=True) +def_gen.integers(I_u8_low, I_u8_high_closed, dtype=np.uint64, endpoint=True) +def_gen.integers(0, I_u8_high_closed, dtype=np.uint64, endpoint=True) + +I_i1_low: np.ndarray[Any, np.dtype[np.int8]] = np.array([-128], dtype=np.int8) +I_i1_low_like: list[int] = [-128] +I_i1_high_open: np.ndarray[Any, np.dtype[np.int8]] = np.array([127], dtype=np.int8) +I_i1_high_closed: np.ndarray[Any, np.dtype[np.int8]] = np.array([127], dtype=np.int8) + +def_gen.integers(128, dtype="i1") +def_gen.integers(-128, 128, dtype="i1") +def_gen.integers(127, dtype="i1", endpoint=True) +def_gen.integers(-128, 127, dtype="i1", endpoint=True) +def_gen.integers(I_i1_low_like, 127, dtype="i1", endpoint=True) +def_gen.integers(I_i1_high_open, dtype="i1") +def_gen.integers(I_i1_low, I_i1_high_open, dtype="i1") +def_gen.integers(-128, I_i1_high_open, dtype="i1") +def_gen.integers(I_i1_high_closed, dtype="i1", endpoint=True) +def_gen.integers(I_i1_low, I_i1_high_closed, dtype="i1", endpoint=True) +def_gen.integers(-128, I_i1_high_closed, dtype="i1", endpoint=True) + +def_gen.integers(128, dtype="int8") +def_gen.integers(-128, 128, dtype="int8") +def_gen.integers(127, dtype="int8", endpoint=True) +def_gen.integers(-128, 127, dtype="int8", endpoint=True) +def_gen.integers(I_i1_low_like, 127, dtype="int8", endpoint=True) +def_gen.integers(I_i1_high_open, dtype="int8") +def_gen.integers(I_i1_low, I_i1_high_open, dtype="int8") +def_gen.integers(-128, I_i1_high_open, dtype="int8") +def_gen.integers(I_i1_high_closed, dtype="int8", endpoint=True) +def_gen.integers(I_i1_low, I_i1_high_closed, dtype="int8", endpoint=True) +def_gen.integers(-128, I_i1_high_closed, dtype="int8", endpoint=True) + +def_gen.integers(128, dtype=np.int8) +def_gen.integers(-128, 128, dtype=np.int8) +def_gen.integers(127, dtype=np.int8, endpoint=True) +def_gen.integers(-128, 127, dtype=np.int8, endpoint=True) +def_gen.integers(I_i1_low_like, 127, dtype=np.int8, endpoint=True) +def_gen.integers(I_i1_high_open, dtype=np.int8) +def_gen.integers(I_i1_low, I_i1_high_open, dtype=np.int8) +def_gen.integers(-128, I_i1_high_open, dtype=np.int8) +def_gen.integers(I_i1_high_closed, dtype=np.int8, endpoint=True) +def_gen.integers(I_i1_low, I_i1_high_closed, dtype=np.int8, endpoint=True) +def_gen.integers(-128, I_i1_high_closed, dtype=np.int8, endpoint=True) + +I_i2_low: np.ndarray[Any, np.dtype[np.int16]] = np.array([-32768], dtype=np.int16) +I_i2_low_like: list[int] = [-32768] +I_i2_high_open: np.ndarray[Any, np.dtype[np.int16]] = np.array([32767], dtype=np.int16) +I_i2_high_closed: np.ndarray[Any, np.dtype[np.int16]] = np.array([32767], dtype=np.int16) + +def_gen.integers(32768, dtype="i2") +def_gen.integers(-32768, 32768, dtype="i2") +def_gen.integers(32767, dtype="i2", endpoint=True) +def_gen.integers(-32768, 32767, dtype="i2", endpoint=True) +def_gen.integers(I_i2_low_like, 32767, dtype="i2", endpoint=True) +def_gen.integers(I_i2_high_open, dtype="i2") +def_gen.integers(I_i2_low, I_i2_high_open, dtype="i2") +def_gen.integers(-32768, I_i2_high_open, dtype="i2") +def_gen.integers(I_i2_high_closed, dtype="i2", endpoint=True) +def_gen.integers(I_i2_low, I_i2_high_closed, dtype="i2", endpoint=True) +def_gen.integers(-32768, I_i2_high_closed, dtype="i2", endpoint=True) + +def_gen.integers(32768, dtype="int16") +def_gen.integers(-32768, 32768, dtype="int16") +def_gen.integers(32767, dtype="int16", endpoint=True) +def_gen.integers(-32768, 32767, dtype="int16", endpoint=True) +def_gen.integers(I_i2_low_like, 32767, dtype="int16", endpoint=True) +def_gen.integers(I_i2_high_open, dtype="int16") +def_gen.integers(I_i2_low, I_i2_high_open, dtype="int16") +def_gen.integers(-32768, I_i2_high_open, dtype="int16") +def_gen.integers(I_i2_high_closed, dtype="int16", endpoint=True) +def_gen.integers(I_i2_low, I_i2_high_closed, dtype="int16", endpoint=True) +def_gen.integers(-32768, I_i2_high_closed, dtype="int16", endpoint=True) + +def_gen.integers(32768, dtype=np.int16) +def_gen.integers(-32768, 32768, dtype=np.int16) +def_gen.integers(32767, dtype=np.int16, endpoint=True) +def_gen.integers(-32768, 32767, dtype=np.int16, endpoint=True) +def_gen.integers(I_i2_low_like, 32767, dtype=np.int16, endpoint=True) +def_gen.integers(I_i2_high_open, dtype=np.int16) +def_gen.integers(I_i2_low, I_i2_high_open, dtype=np.int16) +def_gen.integers(-32768, I_i2_high_open, dtype=np.int16) +def_gen.integers(I_i2_high_closed, dtype=np.int16, endpoint=True) +def_gen.integers(I_i2_low, I_i2_high_closed, dtype=np.int16, endpoint=True) +def_gen.integers(-32768, I_i2_high_closed, dtype=np.int16, endpoint=True) + +I_i4_low: np.ndarray[Any, np.dtype[np.int32]] = np.array([-2147483648], dtype=np.int32) +I_i4_low_like: list[int] = [-2147483648] +I_i4_high_open: np.ndarray[Any, np.dtype[np.int32]] = np.array([2147483647], dtype=np.int32) +I_i4_high_closed: np.ndarray[Any, np.dtype[np.int32]] = np.array([2147483647], dtype=np.int32) + +def_gen.integers(2147483648, dtype="i4") +def_gen.integers(-2147483648, 2147483648, dtype="i4") +def_gen.integers(2147483647, dtype="i4", endpoint=True) +def_gen.integers(-2147483648, 2147483647, dtype="i4", endpoint=True) +def_gen.integers(I_i4_low_like, 2147483647, dtype="i4", endpoint=True) +def_gen.integers(I_i4_high_open, dtype="i4") +def_gen.integers(I_i4_low, I_i4_high_open, dtype="i4") +def_gen.integers(-2147483648, I_i4_high_open, dtype="i4") +def_gen.integers(I_i4_high_closed, dtype="i4", endpoint=True) +def_gen.integers(I_i4_low, I_i4_high_closed, dtype="i4", endpoint=True) +def_gen.integers(-2147483648, I_i4_high_closed, dtype="i4", endpoint=True) + +def_gen.integers(2147483648, dtype="int32") +def_gen.integers(-2147483648, 2147483648, dtype="int32") +def_gen.integers(2147483647, dtype="int32", endpoint=True) +def_gen.integers(-2147483648, 2147483647, dtype="int32", endpoint=True) +def_gen.integers(I_i4_low_like, 2147483647, dtype="int32", endpoint=True) +def_gen.integers(I_i4_high_open, dtype="int32") +def_gen.integers(I_i4_low, I_i4_high_open, dtype="int32") +def_gen.integers(-2147483648, I_i4_high_open, dtype="int32") +def_gen.integers(I_i4_high_closed, dtype="int32", endpoint=True) +def_gen.integers(I_i4_low, I_i4_high_closed, dtype="int32", endpoint=True) +def_gen.integers(-2147483648, I_i4_high_closed, dtype="int32", endpoint=True) + +def_gen.integers(2147483648, dtype=np.int32) +def_gen.integers(-2147483648, 2147483648, dtype=np.int32) +def_gen.integers(2147483647, dtype=np.int32, endpoint=True) +def_gen.integers(-2147483648, 2147483647, dtype=np.int32, endpoint=True) +def_gen.integers(I_i4_low_like, 2147483647, dtype=np.int32, endpoint=True) +def_gen.integers(I_i4_high_open, dtype=np.int32) +def_gen.integers(I_i4_low, I_i4_high_open, dtype=np.int32) +def_gen.integers(-2147483648, I_i4_high_open, dtype=np.int32) +def_gen.integers(I_i4_high_closed, dtype=np.int32, endpoint=True) +def_gen.integers(I_i4_low, I_i4_high_closed, dtype=np.int32, endpoint=True) +def_gen.integers(-2147483648, I_i4_high_closed, dtype=np.int32, endpoint=True) + +I_i8_low: np.ndarray[Any, np.dtype[np.int64]] = np.array([-9223372036854775808], dtype=np.int64) +I_i8_low_like: list[int] = [-9223372036854775808] +I_i8_high_open: np.ndarray[Any, np.dtype[np.int64]] = np.array([9223372036854775807], dtype=np.int64) +I_i8_high_closed: np.ndarray[Any, np.dtype[np.int64]] = np.array([9223372036854775807], dtype=np.int64) + +def_gen.integers(9223372036854775808, dtype="i8") +def_gen.integers(-9223372036854775808, 9223372036854775808, dtype="i8") +def_gen.integers(9223372036854775807, dtype="i8", endpoint=True) +def_gen.integers(-9223372036854775808, 9223372036854775807, dtype="i8", endpoint=True) +def_gen.integers(I_i8_low_like, 9223372036854775807, dtype="i8", endpoint=True) +def_gen.integers(I_i8_high_open, dtype="i8") +def_gen.integers(I_i8_low, I_i8_high_open, dtype="i8") +def_gen.integers(-9223372036854775808, I_i8_high_open, dtype="i8") +def_gen.integers(I_i8_high_closed, dtype="i8", endpoint=True) +def_gen.integers(I_i8_low, I_i8_high_closed, dtype="i8", endpoint=True) +def_gen.integers(-9223372036854775808, I_i8_high_closed, dtype="i8", endpoint=True) + +def_gen.integers(9223372036854775808, dtype="int64") +def_gen.integers(-9223372036854775808, 9223372036854775808, dtype="int64") +def_gen.integers(9223372036854775807, dtype="int64", endpoint=True) +def_gen.integers(-9223372036854775808, 9223372036854775807, dtype="int64", endpoint=True) +def_gen.integers(I_i8_low_like, 9223372036854775807, dtype="int64", endpoint=True) +def_gen.integers(I_i8_high_open, dtype="int64") +def_gen.integers(I_i8_low, I_i8_high_open, dtype="int64") +def_gen.integers(-9223372036854775808, I_i8_high_open, dtype="int64") +def_gen.integers(I_i8_high_closed, dtype="int64", endpoint=True) +def_gen.integers(I_i8_low, I_i8_high_closed, dtype="int64", endpoint=True) +def_gen.integers(-9223372036854775808, I_i8_high_closed, dtype="int64", endpoint=True) + +def_gen.integers(9223372036854775808, dtype=np.int64) +def_gen.integers(-9223372036854775808, 9223372036854775808, dtype=np.int64) +def_gen.integers(9223372036854775807, dtype=np.int64, endpoint=True) +def_gen.integers(-9223372036854775808, 9223372036854775807, dtype=np.int64, endpoint=True) +def_gen.integers(I_i8_low_like, 9223372036854775807, dtype=np.int64, endpoint=True) +def_gen.integers(I_i8_high_open, dtype=np.int64) +def_gen.integers(I_i8_low, I_i8_high_open, dtype=np.int64) +def_gen.integers(-9223372036854775808, I_i8_high_open, dtype=np.int64) +def_gen.integers(I_i8_high_closed, dtype=np.int64, endpoint=True) +def_gen.integers(I_i8_low, I_i8_high_closed, dtype=np.int64, endpoint=True) +def_gen.integers(-9223372036854775808, I_i8_high_closed, dtype=np.int64, endpoint=True) + + +def_gen.bit_generator + +def_gen.bytes(2) + +def_gen.choice(5) +def_gen.choice(5, 3) +def_gen.choice(5, 3, replace=True) +def_gen.choice(5, 3, p=[1 / 5] * 5) +def_gen.choice(5, 3, p=[1 / 5] * 5, replace=False) + +def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"]) +def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"], 3) +def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, p=[1 / 4] * 4) +def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, replace=True) +def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, replace=False, p=np.array([1 / 8, 1 / 8, 1 / 2, 1 / 4])) + +def_gen.dirichlet([0.5, 0.5]) +def_gen.dirichlet(np.array([0.5, 0.5])) +def_gen.dirichlet(np.array([0.5, 0.5]), size=3) + +def_gen.multinomial(20, [1 / 6.0] * 6) +def_gen.multinomial(20, np.array([0.5, 0.5])) +def_gen.multinomial(20, [1 / 6.0] * 6, size=2) +def_gen.multinomial([[10], [20]], [1 / 6.0] * 6, size=(2, 2)) +def_gen.multinomial(np.array([[10], [20]]), np.array([0.5, 0.5]), size=(2, 2)) + +def_gen.multivariate_hypergeometric([3, 5, 7], 2) +def_gen.multivariate_hypergeometric(np.array([3, 5, 7]), 2) +def_gen.multivariate_hypergeometric(np.array([3, 5, 7]), 2, size=4) +def_gen.multivariate_hypergeometric(np.array([3, 5, 7]), 2, size=(4, 7)) +def_gen.multivariate_hypergeometric([3, 5, 7], 2, method="count") +def_gen.multivariate_hypergeometric(np.array([3, 5, 7]), 2, method="marginals") + +def_gen.multivariate_normal([0.0], [[1.0]]) +def_gen.multivariate_normal([0.0], np.array([[1.0]])) +def_gen.multivariate_normal(np.array([0.0]), [[1.0]]) +def_gen.multivariate_normal([0.0], np.array([[1.0]])) + +def_gen.permutation(10) +def_gen.permutation([1, 2, 3, 4]) +def_gen.permutation(np.array([1, 2, 3, 4])) +def_gen.permutation(D_2D, axis=1) +def_gen.permuted(D_2D) +def_gen.permuted(D_2D_like) +def_gen.permuted(D_2D, axis=1) +def_gen.permuted(D_2D, out=D_2D) +def_gen.permuted(D_2D_like, out=D_2D) +def_gen.permuted(D_2D_like, out=D_2D) +def_gen.permuted(D_2D, axis=1, out=D_2D) + +def_gen.shuffle(np.arange(10)) +def_gen.shuffle([1, 2, 3, 4, 5]) +def_gen.shuffle(D_2D, axis=1) + +def_gen.__str__() +def_gen.__repr__() +def_gen.__setstate__(dict(def_gen.bit_generator.state)) + +# RandomState +random_st: np.random.RandomState = np.random.RandomState() + +random_st.standard_normal() +random_st.standard_normal(size=None) +random_st.standard_normal(size=1) + +random_st.random() +random_st.random(size=None) +random_st.random(size=1) + +random_st.standard_cauchy() +random_st.standard_cauchy(size=None) +random_st.standard_cauchy(size=1) + +random_st.standard_exponential() +random_st.standard_exponential(size=None) +random_st.standard_exponential(size=1) + +random_st.zipf(1.5) +random_st.zipf(1.5, size=None) +random_st.zipf(1.5, size=1) +random_st.zipf(D_arr_1p5) +random_st.zipf(D_arr_1p5, size=1) +random_st.zipf(D_arr_like_1p5) +random_st.zipf(D_arr_like_1p5, size=1) + +random_st.weibull(0.5) +random_st.weibull(0.5, size=None) +random_st.weibull(0.5, size=1) +random_st.weibull(D_arr_0p5) +random_st.weibull(D_arr_0p5, size=1) +random_st.weibull(D_arr_like_0p5) +random_st.weibull(D_arr_like_0p5, size=1) + +random_st.standard_t(0.5) +random_st.standard_t(0.5, size=None) +random_st.standard_t(0.5, size=1) +random_st.standard_t(D_arr_0p5) +random_st.standard_t(D_arr_0p5, size=1) +random_st.standard_t(D_arr_like_0p5) +random_st.standard_t(D_arr_like_0p5, size=1) + +random_st.poisson(0.5) +random_st.poisson(0.5, size=None) +random_st.poisson(0.5, size=1) +random_st.poisson(D_arr_0p5) +random_st.poisson(D_arr_0p5, size=1) +random_st.poisson(D_arr_like_0p5) +random_st.poisson(D_arr_like_0p5, size=1) + +random_st.power(0.5) +random_st.power(0.5, size=None) +random_st.power(0.5, size=1) +random_st.power(D_arr_0p5) +random_st.power(D_arr_0p5, size=1) +random_st.power(D_arr_like_0p5) +random_st.power(D_arr_like_0p5, size=1) + +random_st.pareto(0.5) +random_st.pareto(0.5, size=None) +random_st.pareto(0.5, size=1) +random_st.pareto(D_arr_0p5) +random_st.pareto(D_arr_0p5, size=1) +random_st.pareto(D_arr_like_0p5) +random_st.pareto(D_arr_like_0p5, size=1) + +random_st.chisquare(0.5) +random_st.chisquare(0.5, size=None) +random_st.chisquare(0.5, size=1) +random_st.chisquare(D_arr_0p5) +random_st.chisquare(D_arr_0p5, size=1) +random_st.chisquare(D_arr_like_0p5) +random_st.chisquare(D_arr_like_0p5, size=1) + +random_st.exponential(0.5) +random_st.exponential(0.5, size=None) +random_st.exponential(0.5, size=1) +random_st.exponential(D_arr_0p5) +random_st.exponential(D_arr_0p5, size=1) +random_st.exponential(D_arr_like_0p5) +random_st.exponential(D_arr_like_0p5, size=1) + +random_st.geometric(0.5) +random_st.geometric(0.5, size=None) +random_st.geometric(0.5, size=1) +random_st.geometric(D_arr_0p5) +random_st.geometric(D_arr_0p5, size=1) +random_st.geometric(D_arr_like_0p5) +random_st.geometric(D_arr_like_0p5, size=1) + +random_st.logseries(0.5) +random_st.logseries(0.5, size=None) +random_st.logseries(0.5, size=1) +random_st.logseries(D_arr_0p5) +random_st.logseries(D_arr_0p5, size=1) +random_st.logseries(D_arr_like_0p5) +random_st.logseries(D_arr_like_0p5, size=1) + +random_st.rayleigh(0.5) +random_st.rayleigh(0.5, size=None) +random_st.rayleigh(0.5, size=1) +random_st.rayleigh(D_arr_0p5) +random_st.rayleigh(D_arr_0p5, size=1) +random_st.rayleigh(D_arr_like_0p5) +random_st.rayleigh(D_arr_like_0p5, size=1) + +random_st.standard_gamma(0.5) +random_st.standard_gamma(0.5, size=None) +random_st.standard_gamma(0.5, size=1) +random_st.standard_gamma(D_arr_0p5) +random_st.standard_gamma(D_arr_0p5, size=1) +random_st.standard_gamma(D_arr_like_0p5) +random_st.standard_gamma(D_arr_like_0p5, size=1) +random_st.standard_gamma(D_arr_like_0p5, size=1) + +random_st.vonmises(0.5, 0.5) +random_st.vonmises(0.5, 0.5, size=None) +random_st.vonmises(0.5, 0.5, size=1) +random_st.vonmises(D_arr_0p5, 0.5) +random_st.vonmises(0.5, D_arr_0p5) +random_st.vonmises(D_arr_0p5, 0.5, size=1) +random_st.vonmises(0.5, D_arr_0p5, size=1) +random_st.vonmises(D_arr_like_0p5, 0.5) +random_st.vonmises(0.5, D_arr_like_0p5) +random_st.vonmises(D_arr_0p5, D_arr_0p5) +random_st.vonmises(D_arr_like_0p5, D_arr_like_0p5) +random_st.vonmises(D_arr_0p5, D_arr_0p5, size=1) +random_st.vonmises(D_arr_like_0p5, D_arr_like_0p5, size=1) + +random_st.wald(0.5, 0.5) +random_st.wald(0.5, 0.5, size=None) +random_st.wald(0.5, 0.5, size=1) +random_st.wald(D_arr_0p5, 0.5) +random_st.wald(0.5, D_arr_0p5) +random_st.wald(D_arr_0p5, 0.5, size=1) +random_st.wald(0.5, D_arr_0p5, size=1) +random_st.wald(D_arr_like_0p5, 0.5) +random_st.wald(0.5, D_arr_like_0p5) +random_st.wald(D_arr_0p5, D_arr_0p5) +random_st.wald(D_arr_like_0p5, D_arr_like_0p5) +random_st.wald(D_arr_0p5, D_arr_0p5, size=1) +random_st.wald(D_arr_like_0p5, D_arr_like_0p5, size=1) + +random_st.uniform(0.5, 0.5) +random_st.uniform(0.5, 0.5, size=None) +random_st.uniform(0.5, 0.5, size=1) +random_st.uniform(D_arr_0p5, 0.5) +random_st.uniform(0.5, D_arr_0p5) +random_st.uniform(D_arr_0p5, 0.5, size=1) +random_st.uniform(0.5, D_arr_0p5, size=1) +random_st.uniform(D_arr_like_0p5, 0.5) +random_st.uniform(0.5, D_arr_like_0p5) +random_st.uniform(D_arr_0p5, D_arr_0p5) +random_st.uniform(D_arr_like_0p5, D_arr_like_0p5) +random_st.uniform(D_arr_0p5, D_arr_0p5, size=1) +random_st.uniform(D_arr_like_0p5, D_arr_like_0p5, size=1) + +random_st.beta(0.5, 0.5) +random_st.beta(0.5, 0.5, size=None) +random_st.beta(0.5, 0.5, size=1) +random_st.beta(D_arr_0p5, 0.5) +random_st.beta(0.5, D_arr_0p5) +random_st.beta(D_arr_0p5, 0.5, size=1) +random_st.beta(0.5, D_arr_0p5, size=1) +random_st.beta(D_arr_like_0p5, 0.5) +random_st.beta(0.5, D_arr_like_0p5) +random_st.beta(D_arr_0p5, D_arr_0p5) +random_st.beta(D_arr_like_0p5, D_arr_like_0p5) +random_st.beta(D_arr_0p5, D_arr_0p5, size=1) +random_st.beta(D_arr_like_0p5, D_arr_like_0p5, size=1) + +random_st.f(0.5, 0.5) +random_st.f(0.5, 0.5, size=None) +random_st.f(0.5, 0.5, size=1) +random_st.f(D_arr_0p5, 0.5) +random_st.f(0.5, D_arr_0p5) +random_st.f(D_arr_0p5, 0.5, size=1) +random_st.f(0.5, D_arr_0p5, size=1) +random_st.f(D_arr_like_0p5, 0.5) +random_st.f(0.5, D_arr_like_0p5) +random_st.f(D_arr_0p5, D_arr_0p5) +random_st.f(D_arr_like_0p5, D_arr_like_0p5) +random_st.f(D_arr_0p5, D_arr_0p5, size=1) +random_st.f(D_arr_like_0p5, D_arr_like_0p5, size=1) + +random_st.gamma(0.5, 0.5) +random_st.gamma(0.5, 0.5, size=None) +random_st.gamma(0.5, 0.5, size=1) +random_st.gamma(D_arr_0p5, 0.5) +random_st.gamma(0.5, D_arr_0p5) +random_st.gamma(D_arr_0p5, 0.5, size=1) +random_st.gamma(0.5, D_arr_0p5, size=1) +random_st.gamma(D_arr_like_0p5, 0.5) +random_st.gamma(0.5, D_arr_like_0p5) +random_st.gamma(D_arr_0p5, D_arr_0p5) +random_st.gamma(D_arr_like_0p5, D_arr_like_0p5) +random_st.gamma(D_arr_0p5, D_arr_0p5, size=1) +random_st.gamma(D_arr_like_0p5, D_arr_like_0p5, size=1) + +random_st.gumbel(0.5, 0.5) +random_st.gumbel(0.5, 0.5, size=None) +random_st.gumbel(0.5, 0.5, size=1) +random_st.gumbel(D_arr_0p5, 0.5) +random_st.gumbel(0.5, D_arr_0p5) +random_st.gumbel(D_arr_0p5, 0.5, size=1) +random_st.gumbel(0.5, D_arr_0p5, size=1) +random_st.gumbel(D_arr_like_0p5, 0.5) +random_st.gumbel(0.5, D_arr_like_0p5) +random_st.gumbel(D_arr_0p5, D_arr_0p5) +random_st.gumbel(D_arr_like_0p5, D_arr_like_0p5) +random_st.gumbel(D_arr_0p5, D_arr_0p5, size=1) +random_st.gumbel(D_arr_like_0p5, D_arr_like_0p5, size=1) + +random_st.laplace(0.5, 0.5) +random_st.laplace(0.5, 0.5, size=None) +random_st.laplace(0.5, 0.5, size=1) +random_st.laplace(D_arr_0p5, 0.5) +random_st.laplace(0.5, D_arr_0p5) +random_st.laplace(D_arr_0p5, 0.5, size=1) +random_st.laplace(0.5, D_arr_0p5, size=1) +random_st.laplace(D_arr_like_0p5, 0.5) +random_st.laplace(0.5, D_arr_like_0p5) +random_st.laplace(D_arr_0p5, D_arr_0p5) +random_st.laplace(D_arr_like_0p5, D_arr_like_0p5) +random_st.laplace(D_arr_0p5, D_arr_0p5, size=1) +random_st.laplace(D_arr_like_0p5, D_arr_like_0p5, size=1) + +random_st.logistic(0.5, 0.5) +random_st.logistic(0.5, 0.5, size=None) +random_st.logistic(0.5, 0.5, size=1) +random_st.logistic(D_arr_0p5, 0.5) +random_st.logistic(0.5, D_arr_0p5) +random_st.logistic(D_arr_0p5, 0.5, size=1) +random_st.logistic(0.5, D_arr_0p5, size=1) +random_st.logistic(D_arr_like_0p5, 0.5) +random_st.logistic(0.5, D_arr_like_0p5) +random_st.logistic(D_arr_0p5, D_arr_0p5) +random_st.logistic(D_arr_like_0p5, D_arr_like_0p5) +random_st.logistic(D_arr_0p5, D_arr_0p5, size=1) +random_st.logistic(D_arr_like_0p5, D_arr_like_0p5, size=1) + +random_st.lognormal(0.5, 0.5) +random_st.lognormal(0.5, 0.5, size=None) +random_st.lognormal(0.5, 0.5, size=1) +random_st.lognormal(D_arr_0p5, 0.5) +random_st.lognormal(0.5, D_arr_0p5) +random_st.lognormal(D_arr_0p5, 0.5, size=1) +random_st.lognormal(0.5, D_arr_0p5, size=1) +random_st.lognormal(D_arr_like_0p5, 0.5) +random_st.lognormal(0.5, D_arr_like_0p5) +random_st.lognormal(D_arr_0p5, D_arr_0p5) +random_st.lognormal(D_arr_like_0p5, D_arr_like_0p5) +random_st.lognormal(D_arr_0p5, D_arr_0p5, size=1) +random_st.lognormal(D_arr_like_0p5, D_arr_like_0p5, size=1) + +random_st.noncentral_chisquare(0.5, 0.5) +random_st.noncentral_chisquare(0.5, 0.5, size=None) +random_st.noncentral_chisquare(0.5, 0.5, size=1) +random_st.noncentral_chisquare(D_arr_0p5, 0.5) +random_st.noncentral_chisquare(0.5, D_arr_0p5) +random_st.noncentral_chisquare(D_arr_0p5, 0.5, size=1) +random_st.noncentral_chisquare(0.5, D_arr_0p5, size=1) +random_st.noncentral_chisquare(D_arr_like_0p5, 0.5) +random_st.noncentral_chisquare(0.5, D_arr_like_0p5) +random_st.noncentral_chisquare(D_arr_0p5, D_arr_0p5) +random_st.noncentral_chisquare(D_arr_like_0p5, D_arr_like_0p5) +random_st.noncentral_chisquare(D_arr_0p5, D_arr_0p5, size=1) +random_st.noncentral_chisquare(D_arr_like_0p5, D_arr_like_0p5, size=1) + +random_st.normal(0.5, 0.5) +random_st.normal(0.5, 0.5, size=None) +random_st.normal(0.5, 0.5, size=1) +random_st.normal(D_arr_0p5, 0.5) +random_st.normal(0.5, D_arr_0p5) +random_st.normal(D_arr_0p5, 0.5, size=1) +random_st.normal(0.5, D_arr_0p5, size=1) +random_st.normal(D_arr_like_0p5, 0.5) +random_st.normal(0.5, D_arr_like_0p5) +random_st.normal(D_arr_0p5, D_arr_0p5) +random_st.normal(D_arr_like_0p5, D_arr_like_0p5) +random_st.normal(D_arr_0p5, D_arr_0p5, size=1) +random_st.normal(D_arr_like_0p5, D_arr_like_0p5, size=1) + +random_st.triangular(0.1, 0.5, 0.9) +random_st.triangular(0.1, 0.5, 0.9, size=None) +random_st.triangular(0.1, 0.5, 0.9, size=1) +random_st.triangular(D_arr_0p1, 0.5, 0.9) +random_st.triangular(0.1, D_arr_0p5, 0.9) +random_st.triangular(D_arr_0p1, 0.5, D_arr_like_0p9, size=1) +random_st.triangular(0.1, D_arr_0p5, 0.9, size=1) +random_st.triangular(D_arr_like_0p1, 0.5, D_arr_0p9) +random_st.triangular(0.5, D_arr_like_0p5, 0.9) +random_st.triangular(D_arr_0p1, D_arr_0p5, 0.9) +random_st.triangular(D_arr_like_0p1, D_arr_like_0p5, 0.9) +random_st.triangular(D_arr_0p1, D_arr_0p5, D_arr_0p9, size=1) +random_st.triangular(D_arr_like_0p1, D_arr_like_0p5, D_arr_like_0p9, size=1) + +random_st.noncentral_f(0.1, 0.5, 0.9) +random_st.noncentral_f(0.1, 0.5, 0.9, size=None) +random_st.noncentral_f(0.1, 0.5, 0.9, size=1) +random_st.noncentral_f(D_arr_0p1, 0.5, 0.9) +random_st.noncentral_f(0.1, D_arr_0p5, 0.9) +random_st.noncentral_f(D_arr_0p1, 0.5, D_arr_like_0p9, size=1) +random_st.noncentral_f(0.1, D_arr_0p5, 0.9, size=1) +random_st.noncentral_f(D_arr_like_0p1, 0.5, D_arr_0p9) +random_st.noncentral_f(0.5, D_arr_like_0p5, 0.9) +random_st.noncentral_f(D_arr_0p1, D_arr_0p5, 0.9) +random_st.noncentral_f(D_arr_like_0p1, D_arr_like_0p5, 0.9) +random_st.noncentral_f(D_arr_0p1, D_arr_0p5, D_arr_0p9, size=1) +random_st.noncentral_f(D_arr_like_0p1, D_arr_like_0p5, D_arr_like_0p9, size=1) + +random_st.binomial(10, 0.5) +random_st.binomial(10, 0.5, size=None) +random_st.binomial(10, 0.5, size=1) +random_st.binomial(I_arr_10, 0.5) +random_st.binomial(10, D_arr_0p5) +random_st.binomial(I_arr_10, 0.5, size=1) +random_st.binomial(10, D_arr_0p5, size=1) +random_st.binomial(I_arr_like_10, 0.5) +random_st.binomial(10, D_arr_like_0p5) +random_st.binomial(I_arr_10, D_arr_0p5) +random_st.binomial(I_arr_like_10, D_arr_like_0p5) +random_st.binomial(I_arr_10, D_arr_0p5, size=1) +random_st.binomial(I_arr_like_10, D_arr_like_0p5, size=1) + +random_st.negative_binomial(10, 0.5) +random_st.negative_binomial(10, 0.5, size=None) +random_st.negative_binomial(10, 0.5, size=1) +random_st.negative_binomial(I_arr_10, 0.5) +random_st.negative_binomial(10, D_arr_0p5) +random_st.negative_binomial(I_arr_10, 0.5, size=1) +random_st.negative_binomial(10, D_arr_0p5, size=1) +random_st.negative_binomial(I_arr_like_10, 0.5) +random_st.negative_binomial(10, D_arr_like_0p5) +random_st.negative_binomial(I_arr_10, D_arr_0p5) +random_st.negative_binomial(I_arr_like_10, D_arr_like_0p5) +random_st.negative_binomial(I_arr_10, D_arr_0p5, size=1) +random_st.negative_binomial(I_arr_like_10, D_arr_like_0p5, size=1) + +random_st.hypergeometric(20, 20, 10) +random_st.hypergeometric(20, 20, 10, size=None) +random_st.hypergeometric(20, 20, 10, size=1) +random_st.hypergeometric(I_arr_20, 20, 10) +random_st.hypergeometric(20, I_arr_20, 10) +random_st.hypergeometric(I_arr_20, 20, I_arr_like_10, size=1) +random_st.hypergeometric(20, I_arr_20, 10, size=1) +random_st.hypergeometric(I_arr_like_20, 20, I_arr_10) +random_st.hypergeometric(20, I_arr_like_20, 10) +random_st.hypergeometric(I_arr_20, I_arr_20, 10) +random_st.hypergeometric(I_arr_like_20, I_arr_like_20, 10) +random_st.hypergeometric(I_arr_20, I_arr_20, I_arr_10, size=1) +random_st.hypergeometric(I_arr_like_20, I_arr_like_20, I_arr_like_10, size=1) + +random_st.randint(0, 100) +random_st.randint(100) +random_st.randint([100]) +random_st.randint(0, [100]) + +random_st.randint(2, dtype=bool) +random_st.randint(0, 2, dtype=bool) +random_st.randint(I_bool_high_open, dtype=bool) +random_st.randint(I_bool_low, I_bool_high_open, dtype=bool) +random_st.randint(0, I_bool_high_open, dtype=bool) + +random_st.randint(2, dtype=np.bool) +random_st.randint(0, 2, dtype=np.bool) +random_st.randint(I_bool_high_open, dtype=np.bool) +random_st.randint(I_bool_low, I_bool_high_open, dtype=np.bool) +random_st.randint(0, I_bool_high_open, dtype=np.bool) + +random_st.randint(256, dtype="u1") +random_st.randint(0, 256, dtype="u1") +random_st.randint(I_u1_high_open, dtype="u1") +random_st.randint(I_u1_low, I_u1_high_open, dtype="u1") +random_st.randint(0, I_u1_high_open, dtype="u1") + +random_st.randint(256, dtype="uint8") +random_st.randint(0, 256, dtype="uint8") +random_st.randint(I_u1_high_open, dtype="uint8") +random_st.randint(I_u1_low, I_u1_high_open, dtype="uint8") +random_st.randint(0, I_u1_high_open, dtype="uint8") + +random_st.randint(256, dtype=np.uint8) +random_st.randint(0, 256, dtype=np.uint8) +random_st.randint(I_u1_high_open, dtype=np.uint8) +random_st.randint(I_u1_low, I_u1_high_open, dtype=np.uint8) +random_st.randint(0, I_u1_high_open, dtype=np.uint8) + +random_st.randint(65536, dtype="u2") +random_st.randint(0, 65536, dtype="u2") +random_st.randint(I_u2_high_open, dtype="u2") +random_st.randint(I_u2_low, I_u2_high_open, dtype="u2") +random_st.randint(0, I_u2_high_open, dtype="u2") + +random_st.randint(65536, dtype="uint16") +random_st.randint(0, 65536, dtype="uint16") +random_st.randint(I_u2_high_open, dtype="uint16") +random_st.randint(I_u2_low, I_u2_high_open, dtype="uint16") +random_st.randint(0, I_u2_high_open, dtype="uint16") + +random_st.randint(65536, dtype=np.uint16) +random_st.randint(0, 65536, dtype=np.uint16) +random_st.randint(I_u2_high_open, dtype=np.uint16) +random_st.randint(I_u2_low, I_u2_high_open, dtype=np.uint16) +random_st.randint(0, I_u2_high_open, dtype=np.uint16) + +random_st.randint(4294967296, dtype="u4") +random_st.randint(0, 4294967296, dtype="u4") +random_st.randint(I_u4_high_open, dtype="u4") +random_st.randint(I_u4_low, I_u4_high_open, dtype="u4") +random_st.randint(0, I_u4_high_open, dtype="u4") + +random_st.randint(4294967296, dtype="uint32") +random_st.randint(0, 4294967296, dtype="uint32") +random_st.randint(I_u4_high_open, dtype="uint32") +random_st.randint(I_u4_low, I_u4_high_open, dtype="uint32") +random_st.randint(0, I_u4_high_open, dtype="uint32") + +random_st.randint(4294967296, dtype=np.uint32) +random_st.randint(0, 4294967296, dtype=np.uint32) +random_st.randint(I_u4_high_open, dtype=np.uint32) +random_st.randint(I_u4_low, I_u4_high_open, dtype=np.uint32) +random_st.randint(0, I_u4_high_open, dtype=np.uint32) + + +random_st.randint(18446744073709551616, dtype="u8") +random_st.randint(0, 18446744073709551616, dtype="u8") +random_st.randint(I_u8_high_open, dtype="u8") +random_st.randint(I_u8_low, I_u8_high_open, dtype="u8") +random_st.randint(0, I_u8_high_open, dtype="u8") + +random_st.randint(18446744073709551616, dtype="uint64") +random_st.randint(0, 18446744073709551616, dtype="uint64") +random_st.randint(I_u8_high_open, dtype="uint64") +random_st.randint(I_u8_low, I_u8_high_open, dtype="uint64") +random_st.randint(0, I_u8_high_open, dtype="uint64") + +random_st.randint(18446744073709551616, dtype=np.uint64) +random_st.randint(0, 18446744073709551616, dtype=np.uint64) +random_st.randint(I_u8_high_open, dtype=np.uint64) +random_st.randint(I_u8_low, I_u8_high_open, dtype=np.uint64) +random_st.randint(0, I_u8_high_open, dtype=np.uint64) + +random_st.randint(128, dtype="i1") +random_st.randint(-128, 128, dtype="i1") +random_st.randint(I_i1_high_open, dtype="i1") +random_st.randint(I_i1_low, I_i1_high_open, dtype="i1") +random_st.randint(-128, I_i1_high_open, dtype="i1") + +random_st.randint(128, dtype="int8") +random_st.randint(-128, 128, dtype="int8") +random_st.randint(I_i1_high_open, dtype="int8") +random_st.randint(I_i1_low, I_i1_high_open, dtype="int8") +random_st.randint(-128, I_i1_high_open, dtype="int8") + +random_st.randint(128, dtype=np.int8) +random_st.randint(-128, 128, dtype=np.int8) +random_st.randint(I_i1_high_open, dtype=np.int8) +random_st.randint(I_i1_low, I_i1_high_open, dtype=np.int8) +random_st.randint(-128, I_i1_high_open, dtype=np.int8) + +random_st.randint(32768, dtype="i2") +random_st.randint(-32768, 32768, dtype="i2") +random_st.randint(I_i2_high_open, dtype="i2") +random_st.randint(I_i2_low, I_i2_high_open, dtype="i2") +random_st.randint(-32768, I_i2_high_open, dtype="i2") +random_st.randint(32768, dtype="int16") +random_st.randint(-32768, 32768, dtype="int16") +random_st.randint(I_i2_high_open, dtype="int16") +random_st.randint(I_i2_low, I_i2_high_open, dtype="int16") +random_st.randint(-32768, I_i2_high_open, dtype="int16") +random_st.randint(32768, dtype=np.int16) +random_st.randint(-32768, 32768, dtype=np.int16) +random_st.randint(I_i2_high_open, dtype=np.int16) +random_st.randint(I_i2_low, I_i2_high_open, dtype=np.int16) +random_st.randint(-32768, I_i2_high_open, dtype=np.int16) + +random_st.randint(2147483648, dtype="i4") +random_st.randint(-2147483648, 2147483648, dtype="i4") +random_st.randint(I_i4_high_open, dtype="i4") +random_st.randint(I_i4_low, I_i4_high_open, dtype="i4") +random_st.randint(-2147483648, I_i4_high_open, dtype="i4") + +random_st.randint(2147483648, dtype="int32") +random_st.randint(-2147483648, 2147483648, dtype="int32") +random_st.randint(I_i4_high_open, dtype="int32") +random_st.randint(I_i4_low, I_i4_high_open, dtype="int32") +random_st.randint(-2147483648, I_i4_high_open, dtype="int32") + +random_st.randint(2147483648, dtype=np.int32) +random_st.randint(-2147483648, 2147483648, dtype=np.int32) +random_st.randint(I_i4_high_open, dtype=np.int32) +random_st.randint(I_i4_low, I_i4_high_open, dtype=np.int32) +random_st.randint(-2147483648, I_i4_high_open, dtype=np.int32) + +random_st.randint(9223372036854775808, dtype="i8") +random_st.randint(-9223372036854775808, 9223372036854775808, dtype="i8") +random_st.randint(I_i8_high_open, dtype="i8") +random_st.randint(I_i8_low, I_i8_high_open, dtype="i8") +random_st.randint(-9223372036854775808, I_i8_high_open, dtype="i8") + +random_st.randint(9223372036854775808, dtype="int64") +random_st.randint(-9223372036854775808, 9223372036854775808, dtype="int64") +random_st.randint(I_i8_high_open, dtype="int64") +random_st.randint(I_i8_low, I_i8_high_open, dtype="int64") +random_st.randint(-9223372036854775808, I_i8_high_open, dtype="int64") + +random_st.randint(9223372036854775808, dtype=np.int64) +random_st.randint(-9223372036854775808, 9223372036854775808, dtype=np.int64) +random_st.randint(I_i8_high_open, dtype=np.int64) +random_st.randint(I_i8_low, I_i8_high_open, dtype=np.int64) +random_st.randint(-9223372036854775808, I_i8_high_open, dtype=np.int64) + +bg: np.random.BitGenerator = random_st._bit_generator + +random_st.bytes(2) + +random_st.choice(5) +random_st.choice(5, 3) +random_st.choice(5, 3, replace=True) +random_st.choice(5, 3, p=[1 / 5] * 5) +random_st.choice(5, 3, p=[1 / 5] * 5, replace=False) + +random_st.choice(["pooh", "rabbit", "piglet", "Christopher"]) +random_st.choice(["pooh", "rabbit", "piglet", "Christopher"], 3) +random_st.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, p=[1 / 4] * 4) +random_st.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, replace=True) +random_st.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, replace=False, p=np.array([1 / 8, 1 / 8, 1 / 2, 1 / 4])) + +random_st.dirichlet([0.5, 0.5]) +random_st.dirichlet(np.array([0.5, 0.5])) +random_st.dirichlet(np.array([0.5, 0.5]), size=3) + +random_st.multinomial(20, [1 / 6.0] * 6) +random_st.multinomial(20, np.array([0.5, 0.5])) +random_st.multinomial(20, [1 / 6.0] * 6, size=2) + +random_st.multivariate_normal([0.0], [[1.0]]) +random_st.multivariate_normal([0.0], np.array([[1.0]])) +random_st.multivariate_normal(np.array([0.0]), [[1.0]]) +random_st.multivariate_normal([0.0], np.array([[1.0]])) + +random_st.permutation(10) +random_st.permutation([1, 2, 3, 4]) +random_st.permutation(np.array([1, 2, 3, 4])) +random_st.permutation(D_2D) + +random_st.shuffle(np.arange(10)) +random_st.shuffle([1, 2, 3, 4, 5]) +random_st.shuffle(D_2D) + +np.random.RandomState(SEED_PCG64) +np.random.RandomState(0) +np.random.RandomState([0, 1, 2]) +random_st.__str__() +random_st.__repr__() +random_st_state = random_st.__getstate__() +random_st.__setstate__(random_st_state) +random_st.seed() +random_st.seed(1) +random_st.seed([0, 1]) +random_st_get_state = random_st.get_state() +random_st_get_state_legacy = random_st.get_state(legacy=True) +random_st.set_state(random_st_get_state) + +random_st.rand() +random_st.rand(1) +random_st.rand(1, 2) +random_st.randn() +random_st.randn(1) +random_st.randn(1, 2) +random_st.random_sample() +random_st.random_sample(1) +random_st.random_sample(size=(1, 2)) + +random_st.tomaxint() +random_st.tomaxint(1) +random_st.tomaxint((1,)) + +np.random.mtrand.set_bit_generator(SEED_PCG64) +np.random.mtrand.get_bit_generator() diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/recfunctions.py b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/recfunctions.py new file mode 100644 index 0000000..52a3d78 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/recfunctions.py @@ -0,0 +1,161 @@ +"""These tests are based on the doctests from `numpy/lib/recfunctions.py`.""" + +from typing import Any, assert_type + +import numpy as np +import numpy.typing as npt +from numpy.lib import recfunctions as rfn + + +def test_recursive_fill_fields() -> None: + a: npt.NDArray[np.void] = np.array( + [(1, 10.0), (2, 20.0)], + dtype=[("A", np.int64), ("B", np.float64)], + ) + b = np.zeros((int(3),), dtype=a.dtype) + out = rfn.recursive_fill_fields(a, b) + assert_type(out, np.ndarray[tuple[int], np.dtype[np.void]]) + + +def test_get_names() -> None: + names: tuple[str | Any, ...] + names = rfn.get_names(np.empty((1,), dtype=[("A", int)]).dtype) + names = rfn.get_names(np.empty((1,), dtype=[("A", int), ("B", float)]).dtype) + + adtype = np.dtype([("a", int), ("b", [("b_a", int), ("b_b", int)])]) + names = rfn.get_names(adtype) + + +def test_get_names_flat() -> None: + names: tuple[str, ...] + names = rfn.get_names_flat(np.empty((1,), dtype=[("A", int)]).dtype) + names = rfn.get_names_flat(np.empty((1,), dtype=[("A", int), ("B", float)]).dtype) + + adtype = np.dtype([("a", int), ("b", [("b_a", int), ("b_b", int)])]) + names = rfn.get_names_flat(adtype) + + +def test_flatten_descr() -> None: + ndtype = np.dtype([("a", " None: + ndtype = np.dtype([ + ("A", int), + ("B", [("B_A", int), ("B_B", [("B_B_A", int), ("B_B_B", int)])]), + ]) + assert_type(rfn.get_fieldstructure(ndtype), dict[str, list[str]]) + + +def test_merge_arrays() -> None: + assert_type( + rfn.merge_arrays(( + np.ones((int(2),), np.int_), + np.ones((int(3),), np.float64), + )), + np.recarray[tuple[int], np.dtype[np.void]], + ) + + +def test_drop_fields() -> None: + ndtype = [("a", np.int64), ("b", [("b_a", np.double), ("b_b", np.int64)])] + a = np.ones((int(3),), dtype=ndtype) + + assert_type( + rfn.drop_fields(a, "a"), + np.ndarray[tuple[int], np.dtype[np.void]], + ) + assert_type( + rfn.drop_fields(a, "a", asrecarray=True), + np.rec.recarray[tuple[int], np.dtype[np.void]], + ) + assert_type( + rfn.rec_drop_fields(a, "a"), + np.rec.recarray[tuple[int], np.dtype[np.void]], + ) + + +def test_rename_fields() -> None: + ndtype = [("a", np.int64), ("b", [("b_a", np.double), ("b_b", np.int64)])] + a = np.ones((int(3),), dtype=ndtype) + + assert_type( + rfn.rename_fields(a, {"a": "A", "b_b": "B_B"}), + np.ndarray[tuple[int], np.dtype[np.void]], + ) + + +def test_repack_fields() -> None: + dt: np.dtype[np.void] = np.dtype("u1, None: + a = np.zeros(4, dtype=[("a", "i4"), ("b", "f4,u2"), ("c", "f4", 2)]) + assert_type(rfn.structured_to_unstructured(a), npt.NDArray[Any]) + + +def unstructured_to_structured() -> None: + dt: np.dtype[np.void] = np.dtype([("a", "i4"), ("b", "f4,u2"), ("c", "f4", 2)]) + a = np.arange(20, dtype=np.int32).reshape((4, 5)) + assert_type(rfn.unstructured_to_structured(a, dt), npt.NDArray[np.void]) + + +def test_apply_along_fields() -> None: + b = np.ones(4, dtype=[("x", "i4"), ("y", "f4"), ("z", "f8")]) + assert_type( + rfn.apply_along_fields(np.mean, b), + np.ndarray[tuple[int], np.dtype[np.void]], + ) + + +def test_assign_fields_by_name() -> None: + b = np.ones(4, dtype=[("x", "i4"), ("y", "f4"), ("z", "f8")]) + assert_type( + rfn.apply_along_fields(np.mean, b), + np.ndarray[tuple[int], np.dtype[np.void]], + ) + + +def test_require_fields() -> None: + a = np.ones(4, dtype=[("a", "i4"), ("b", "f8"), ("c", "u1")]) + assert_type( + rfn.require_fields(a, [("b", "f4"), ("c", "u1")]), + np.ndarray[tuple[int], np.dtype[np.void]], + ) + + +def test_stack_arrays() -> None: + x = np.zeros((int(2),), np.int32) + assert_type( + rfn.stack_arrays(x), + np.ndarray[tuple[int], np.dtype[np.int32]], + ) + + z = np.ones((int(2),), [("A", "|S3"), ("B", float)]) + zz = np.ones((int(2),), [("A", "|S3"), ("B", np.float64), ("C", np.float64)]) + assert_type( + rfn.stack_arrays((z, zz)), + np.ma.MaskedArray[tuple[Any, ...], np.dtype[np.void]], + ) + + +def test_find_duplicates() -> None: + ndtype = np.dtype([("a", int)]) + + a = np.ma.ones(7, mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype) + assert_type(rfn.find_duplicates(a), np.ma.MaskedArray[Any, np.dtype[np.void]]) + assert_type( + rfn.find_duplicates(a, ignoremask=True, return_index=True), + tuple[ + np.ma.MaskedArray[Any, np.dtype[np.void]], + np.ndarray[Any, np.dtype[np.int_]], + ], + ) diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/scalars.py b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/scalars.py new file mode 100644 index 0000000..655903a --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/scalars.py @@ -0,0 +1,248 @@ +import datetime as dt + +import pytest +import numpy as np + +b = np.bool() +b_ = np.bool_() +u8 = np.uint64() +i8 = np.int64() +f8 = np.float64() +c16 = np.complex128() +U = np.str_() +S = np.bytes_() + + +# Construction +class D: + def __index__(self) -> int: + return 0 + + +class C: + def __complex__(self) -> complex: + return 3j + + +class B: + def __int__(self) -> int: + return 4 + + +class A: + def __float__(self) -> float: + return 4.0 + + +np.complex64(3j) +np.complex64(A()) +np.complex64(C()) +np.complex128(3j) +np.complex128(C()) +np.complex128(None) +np.complex64("1.2") +np.complex128(b"2j") + +np.int8(4) +np.int16(3.4) +np.int32(4) +np.int64(-1) +np.uint8(B()) +np.uint32() +np.int32("1") +np.int64(b"2") + +np.float16(A()) +np.float32(16) +np.float64(3.0) +np.float64(None) +np.float32("1") +np.float16(b"2.5") + +np.uint64(D()) +np.float32(D()) +np.complex64(D()) + +np.bytes_(b"hello") +np.bytes_("hello", 'utf-8') +np.bytes_("hello", encoding='utf-8') +np.str_("hello") +np.str_(b"hello", 'utf-8') +np.str_(b"hello", encoding='utf-8') + +# Array-ish semantics +np.int8().real +np.int16().imag +np.int32().data +np.int64().flags + +np.uint8().itemsize * 2 +np.uint16().ndim + 1 +np.uint32().strides +np.uint64().shape + +# Time structures +np.datetime64() +np.datetime64(0, "D") +np.datetime64(0, b"D") +np.datetime64(0, ('ms', 3)) +np.datetime64("2019") +np.datetime64(b"2019") +np.datetime64("2019", "D") +np.datetime64("2019", "us") +np.datetime64("2019", "as") +np.datetime64(np.datetime64()) +np.datetime64(np.datetime64()) +np.datetime64(dt.datetime(2000, 5, 3)) +np.datetime64(dt.datetime(2000, 5, 3), "D") +np.datetime64(dt.datetime(2000, 5, 3), "us") +np.datetime64(dt.datetime(2000, 5, 3), "as") +np.datetime64(dt.date(2000, 5, 3)) +np.datetime64(dt.date(2000, 5, 3), "D") +np.datetime64(dt.date(2000, 5, 3), "us") +np.datetime64(dt.date(2000, 5, 3), "as") +np.datetime64(None) +np.datetime64(None, "D") + +np.timedelta64() +np.timedelta64(0) +np.timedelta64(0, "D") +np.timedelta64(0, ('ms', 3)) +np.timedelta64(0, b"D") +np.timedelta64("3") +np.timedelta64(b"5") +np.timedelta64(np.timedelta64(2)) +np.timedelta64(dt.timedelta(2)) +np.timedelta64(None) +np.timedelta64(None, "D") + +np.void(1) +np.void(np.int64(1)) +np.void(True) +np.void(np.bool(True)) +np.void(b"test") +np.void(np.bytes_("test")) +np.void(object(), [("a", "O"), ("b", "O")]) +np.void(object(), dtype=[("a", "O"), ("b", "O")]) + +# Protocols +i8 = np.int64() +u8 = np.uint64() +f8 = np.float64() +c16 = np.complex128() +b = np.bool() +td = np.timedelta64() +U = np.str_("1") +S = np.bytes_("1") +AR = np.array(1, dtype=np.float64) + +int(i8) +int(u8) +int(f8) +int(b) +int(td) +int(U) +int(S) +int(AR) +with pytest.warns(np.exceptions.ComplexWarning): + int(c16) + +float(i8) +float(u8) +float(f8) +float(b_) +float(td) +float(U) +float(S) +float(AR) +with pytest.warns(np.exceptions.ComplexWarning): + float(c16) + +complex(i8) +complex(u8) +complex(f8) +complex(c16) +complex(b_) +complex(td) +complex(U) +complex(AR) + + +# Misc +c16.dtype +c16.real +c16.imag +c16.real.real +c16.real.imag +c16.ndim +c16.size +c16.itemsize +c16.shape +c16.strides +c16.squeeze() +c16.byteswap() +c16.transpose() + +# Aliases +np.byte() +np.short() +np.intc() +np.intp() +np.int_() +np.longlong() + +np.ubyte() +np.ushort() +np.uintc() +np.uintp() +np.uint() +np.ulonglong() + +np.half() +np.single() +np.double() +np.longdouble() + +np.csingle() +np.cdouble() +np.clongdouble() + +b.item() +i8.item() +u8.item() +f8.item() +c16.item() +U.item() +S.item() + +b.tolist() +i8.tolist() +u8.tolist() +f8.tolist() +c16.tolist() +U.tolist() +S.tolist() + +b.ravel() +i8.ravel() +u8.ravel() +f8.ravel() +c16.ravel() +U.ravel() +S.ravel() + +b.flatten() +i8.flatten() +u8.flatten() +f8.flatten() +c16.flatten() +U.flatten() +S.flatten() + +b.reshape(1) +i8.reshape(1) +u8.reshape(1) +f8.reshape(1) +c16.reshape(1) +U.reshape(1) +S.reshape(1) diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/shape.py b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/shape.py new file mode 100644 index 0000000..286c8a8 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/shape.py @@ -0,0 +1,19 @@ +from typing import Any, NamedTuple, cast + +import numpy as np + + +# Subtype of tuple[int, int] +class XYGrid(NamedTuple): + x_axis: int + y_axis: int + +# Test variance of _ShapeT_co +def accepts_2d(a: np.ndarray[tuple[int, int], Any]) -> None: + return None + + +accepts_2d(np.empty(XYGrid(2, 2))) +accepts_2d(np.zeros(XYGrid(2, 2), dtype=int)) +accepts_2d(np.ones(XYGrid(2, 2), dtype=int)) +accepts_2d(np.full(XYGrid(2, 2), fill_value=5, dtype=int)) diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/simple.py b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/simple.py new file mode 100644 index 0000000..8f44e6e --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/simple.py @@ -0,0 +1,168 @@ +"""Simple expression that should pass with mypy.""" +import operator + +import numpy as np +import numpy.typing as npt +from collections.abc import Iterable + +# Basic checks +array = np.array([1, 2]) + + +def ndarray_func(x: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]: + return x + + +ndarray_func(np.array([1, 2], dtype=np.float64)) +array == 1 +array.dtype == float + +# Dtype construction +np.dtype(float) +np.dtype(np.float64) +np.dtype(None) +np.dtype("float64") +np.dtype(np.dtype(float)) +np.dtype(("U", 10)) +np.dtype((np.int32, (2, 2))) +# Define the arguments on the previous line to prevent bidirectional +# type inference in mypy from broadening the types. +two_tuples_dtype = [("R", "u1"), ("G", "u1"), ("B", "u1")] +np.dtype(two_tuples_dtype) + +three_tuples_dtype = [("R", "u1", 2)] +np.dtype(three_tuples_dtype) + +mixed_tuples_dtype = [("R", "u1"), ("G", np.str_, 1)] +np.dtype(mixed_tuples_dtype) + +shape_tuple_dtype = [("R", "u1", (2, 2))] +np.dtype(shape_tuple_dtype) + +shape_like_dtype = [("R", "u1", (2, 2)), ("G", np.str_, 1)] +np.dtype(shape_like_dtype) + +object_dtype = [("field1", object)] +np.dtype(object_dtype) + +np.dtype((np.int32, (np.int8, 4))) + +# Dtype comparison +np.dtype(float) == float +np.dtype(float) != np.float64 +np.dtype(float) < None +np.dtype(float) <= "float64" +np.dtype(float) > np.dtype(float) +np.dtype(float) >= np.dtype(("U", 10)) + +# Iteration and indexing +def iterable_func(x: Iterable[object]) -> Iterable[object]: + return x + + +iterable_func(array) +list(array) +iter(array) +zip(array, array) +array[1] +array[:] +array[...] +array[:] = 0 + +array_2d = np.ones((3, 3)) +array_2d[:2, :2] +array_2d[:2, :2] = 0 +array_2d[..., 0] +array_2d[..., 0] = 2 +array_2d[-1, -1] = None + +array_obj = np.zeros(1, dtype=np.object_) +array_obj[0] = slice(None) + +# Other special methods +len(array) +str(array) +array_scalar = np.array(1) +int(array_scalar) +float(array_scalar) +complex(array_scalar) +bytes(array_scalar) +operator.index(array_scalar) +bool(array_scalar) + +# comparisons +array < 1 +array <= 1 +array == 1 +array != 1 +array > 1 +array >= 1 +1 < array +1 <= array +1 == array +1 != array +1 > array +1 >= array + +# binary arithmetic +array + 1 +1 + array +array += 1 + +array - 1 +1 - array +array -= 1 + +array * 1 +1 * array +array *= 1 + +nonzero_array = np.array([1, 2]) +array / 1 +1 / nonzero_array +float_array = np.array([1.0, 2.0]) +float_array /= 1 + +array // 1 +1 // nonzero_array +array //= 1 + +array % 1 +1 % nonzero_array +array %= 1 + +divmod(array, 1) +divmod(1, nonzero_array) + +array ** 1 +1 ** array +array **= 1 + +array << 1 +1 << array +array <<= 1 + +array >> 1 +1 >> array +array >>= 1 + +array & 1 +1 & array +array &= 1 + +array ^ 1 +1 ^ array +array ^= 1 + +array | 1 +1 | array +array |= 1 + +# unary arithmetic +-array ++array +abs(array) +~array + +# Other methods +np.array([1, 2]).transpose() diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/simple_py3.py b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/simple_py3.py new file mode 100644 index 0000000..c05a1ce --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/simple_py3.py @@ -0,0 +1,6 @@ +import numpy as np + +array = np.array([1, 2]) + +# The @ operator is not in python 2 +array @ array diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/ufunc_config.py b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/ufunc_config.py new file mode 100644 index 0000000..778e1b5 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/ufunc_config.py @@ -0,0 +1,64 @@ +"""Typing tests for `numpy._core._ufunc_config`.""" + +import numpy as np + + +def func1(a: str, b: int) -> None: + return None + + +def func2(a: str, b: int, c: float = 1.0) -> None: + return None + + +def func3(a: str, b: int) -> int: + return 0 + + +class Write1: + def write(self, a: str) -> None: + return None + + +class Write2: + def write(self, a: str, b: int = 1) -> None: + return None + + +class Write3: + def write(self, a: str) -> int: + return 0 + + +_err_default = np.geterr() +_bufsize_default = np.getbufsize() +_errcall_default = np.geterrcall() + +try: + np.seterr(all=None) + np.seterr(divide="ignore") + np.seterr(over="warn") + np.seterr(under="call") + np.seterr(invalid="raise") + np.geterr() + + np.setbufsize(4096) + np.getbufsize() + + np.seterrcall(func1) + np.seterrcall(func2) + np.seterrcall(func3) + np.seterrcall(Write1()) + np.seterrcall(Write2()) + np.seterrcall(Write3()) + np.geterrcall() + + with np.errstate(call=func1, all="call"): + pass + with np.errstate(call=Write1(), divide="log", over="log"): + pass + +finally: + np.seterr(**_err_default) + np.setbufsize(_bufsize_default) + np.seterrcall(_errcall_default) diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/ufunclike.py b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/ufunclike.py new file mode 100644 index 0000000..f993939 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/ufunclike.py @@ -0,0 +1,47 @@ +from __future__ import annotations +from typing import Any +import numpy as np + + +class Object: + def __ceil__(self) -> Object: + return self + + def __floor__(self) -> Object: + return self + + def __ge__(self, value: object) -> bool: + return True + + def __array__(self, dtype: np.typing.DTypeLike | None = None, + copy: bool | None = None) -> np.ndarray[Any, np.dtype[np.object_]]: + ret = np.empty((), dtype=object) + ret[()] = self + return ret + + +AR_LIKE_b = [True, True, False] +AR_LIKE_u = [np.uint32(1), np.uint32(2), np.uint32(3)] +AR_LIKE_i = [1, 2, 3] +AR_LIKE_f = [1.0, 2.0, 3.0] +AR_LIKE_O = [Object(), Object(), Object()] +AR_U: np.ndarray[Any, np.dtype[np.str_]] = np.zeros(3, dtype="U5") + +np.fix(AR_LIKE_b) +np.fix(AR_LIKE_u) +np.fix(AR_LIKE_i) +np.fix(AR_LIKE_f) +np.fix(AR_LIKE_O) +np.fix(AR_LIKE_f, out=AR_U) + +np.isposinf(AR_LIKE_b) +np.isposinf(AR_LIKE_u) +np.isposinf(AR_LIKE_i) +np.isposinf(AR_LIKE_f) +np.isposinf(AR_LIKE_f, out=AR_U) + +np.isneginf(AR_LIKE_b) +np.isneginf(AR_LIKE_u) +np.isneginf(AR_LIKE_i) +np.isneginf(AR_LIKE_f) +np.isneginf(AR_LIKE_f, out=AR_U) diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/ufuncs.py b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/ufuncs.py new file mode 100644 index 0000000..dbc61bb --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/ufuncs.py @@ -0,0 +1,16 @@ +import numpy as np + +np.sin(1) +np.sin([1, 2, 3]) +np.sin(1, out=np.empty(1)) +np.matmul(np.ones((2, 2, 2)), np.ones((2, 2, 2)), axes=[(0, 1), (0, 1), (0, 1)]) +np.sin(1, signature="D->D") +# NOTE: `np.generic` subclasses are not guaranteed to support addition; +# re-enable this we can infer the exact return type of `np.sin(...)`. +# +# np.sin(1) + np.sin(1) +np.sin.types[0] +np.sin.__name__ +np.sin.__doc__ + +np.abs(np.array([1])) diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/warnings_and_errors.py b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/warnings_and_errors.py new file mode 100644 index 0000000..c351afb --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/pass/warnings_and_errors.py @@ -0,0 +1,6 @@ +import numpy.exceptions as ex + +ex.AxisError("test") +ex.AxisError(1, ndim=2) +ex.AxisError(1, ndim=2, msg_prefix="error") +ex.AxisError(1, ndim=2, msg_prefix=None) diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/arithmetic.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/arithmetic.pyi new file mode 100644 index 0000000..5dd78a1 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/arithmetic.pyi @@ -0,0 +1,720 @@ +import datetime as dt +from typing import Any, assert_type + +import numpy as np +import numpy.typing as npt +from numpy._typing import _32Bit, _64Bit, _128Bit + +b: bool +c: complex +f: float +i: int + +c16: np.complex128 +c8: np.complex64 + +# Can't directly import `np.float128` as it is not available on all platforms +f16: np.floating[_128Bit] +f8: np.float64 +f4: np.float32 + +i8: np.int64 +i4: np.int32 + +u8: np.uint64 +u4: np.uint32 + +b_: np.bool + +M8: np.datetime64 +M8_none: np.datetime64[None] +M8_date: np.datetime64[dt.date] +M8_time: np.datetime64[dt.datetime] +M8_int: np.datetime64[int] +date: dt.date +time: dt.datetime + +m8: np.timedelta64 +m8_none: np.timedelta64[None] +m8_int: np.timedelta64[int] +m8_delta: np.timedelta64[dt.timedelta] +delta: dt.timedelta + +AR_b: npt.NDArray[np.bool] +AR_u: npt.NDArray[np.uint32] +AR_i: npt.NDArray[np.int64] +AR_f: npt.NDArray[np.float64] +AR_c: npt.NDArray[np.complex128] +AR_m: npt.NDArray[np.timedelta64] +AR_M: npt.NDArray[np.datetime64] +AR_O: npt.NDArray[np.object_] +AR_S: npt.NDArray[np.bytes_] +AR_U: npt.NDArray[np.str_] +AR_T: np.ndarray[tuple[Any, ...], np.dtypes.StringDType] +AR_floating: npt.NDArray[np.floating] +AR_number: npt.NDArray[np.number] +AR_Any: npt.NDArray[Any] + +AR_LIKE_b: list[bool] +AR_LIKE_u: list[np.uint32] +AR_LIKE_i: list[int] +AR_LIKE_f: list[float] +AR_LIKE_c: list[complex] +AR_LIKE_m: list[np.timedelta64] +AR_LIKE_M: list[np.datetime64] +AR_LIKE_O: list[np.object_] + + +# Array subtraction + +assert_type(AR_number - AR_number, npt.NDArray[np.number]) + +assert_type(AR_b - AR_LIKE_u, npt.NDArray[np.uint32]) +assert_type(AR_b - AR_LIKE_i, npt.NDArray[np.signedinteger]) +assert_type(AR_b - AR_LIKE_f, npt.NDArray[np.floating]) +assert_type(AR_b - AR_LIKE_c, npt.NDArray[np.complexfloating]) +assert_type(AR_b - AR_LIKE_m, npt.NDArray[np.timedelta64]) +assert_type(AR_b - AR_LIKE_O, Any) + +assert_type(AR_LIKE_u - AR_b, npt.NDArray[np.uint32]) +assert_type(AR_LIKE_i - AR_b, npt.NDArray[np.signedinteger]) +assert_type(AR_LIKE_f - AR_b, npt.NDArray[np.floating]) +assert_type(AR_LIKE_c - AR_b, npt.NDArray[np.complexfloating]) +assert_type(AR_LIKE_m - AR_b, npt.NDArray[np.timedelta64]) +assert_type(AR_LIKE_M - AR_b, npt.NDArray[np.datetime64]) +assert_type(AR_LIKE_O - AR_b, Any) + +assert_type(AR_u - AR_LIKE_b, npt.NDArray[np.uint32]) +assert_type(AR_u - AR_LIKE_u, npt.NDArray[np.unsignedinteger]) +assert_type(AR_u - AR_LIKE_i, npt.NDArray[np.signedinteger]) +assert_type(AR_u - AR_LIKE_f, npt.NDArray[np.floating]) +assert_type(AR_u - AR_LIKE_c, npt.NDArray[np.complexfloating]) +assert_type(AR_u - AR_LIKE_m, npt.NDArray[np.timedelta64]) +assert_type(AR_u - AR_LIKE_O, Any) + +assert_type(AR_LIKE_b - AR_u, npt.NDArray[np.uint32]) +assert_type(AR_LIKE_u - AR_u, npt.NDArray[np.unsignedinteger]) +assert_type(AR_LIKE_i - AR_u, npt.NDArray[np.signedinteger]) +assert_type(AR_LIKE_f - AR_u, npt.NDArray[np.floating]) +assert_type(AR_LIKE_c - AR_u, npt.NDArray[np.complexfloating]) +assert_type(AR_LIKE_m - AR_u, npt.NDArray[np.timedelta64]) +assert_type(AR_LIKE_M - AR_u, npt.NDArray[np.datetime64]) +assert_type(AR_LIKE_O - AR_u, Any) + +assert_type(AR_i - AR_LIKE_b, npt.NDArray[np.int64]) +assert_type(AR_i - AR_LIKE_u, npt.NDArray[np.signedinteger]) +assert_type(AR_i - AR_LIKE_i, npt.NDArray[np.signedinteger]) +assert_type(AR_i - AR_LIKE_f, npt.NDArray[np.floating]) +assert_type(AR_i - AR_LIKE_c, npt.NDArray[np.complexfloating]) +assert_type(AR_i - AR_LIKE_m, npt.NDArray[np.timedelta64]) +assert_type(AR_i - AR_LIKE_O, Any) + +assert_type(AR_LIKE_b - AR_i, npt.NDArray[np.int64]) +assert_type(AR_LIKE_u - AR_i, npt.NDArray[np.signedinteger]) +assert_type(AR_LIKE_i - AR_i, npt.NDArray[np.signedinteger]) +assert_type(AR_LIKE_f - AR_i, npt.NDArray[np.floating]) +assert_type(AR_LIKE_c - AR_i, npt.NDArray[np.complexfloating]) +assert_type(AR_LIKE_m - AR_i, npt.NDArray[np.timedelta64]) +assert_type(AR_LIKE_M - AR_i, npt.NDArray[np.datetime64]) +assert_type(AR_LIKE_O - AR_i, Any) + +assert_type(AR_f - AR_LIKE_b, npt.NDArray[np.float64]) +assert_type(AR_f - AR_LIKE_u, npt.NDArray[np.float64]) +assert_type(AR_f - AR_LIKE_i, npt.NDArray[np.float64]) +assert_type(AR_f - AR_LIKE_f, npt.NDArray[np.float64]) +assert_type(AR_f - AR_LIKE_c, npt.NDArray[np.complexfloating]) +assert_type(AR_f - AR_LIKE_O, Any) + +assert_type(AR_LIKE_b - AR_f, npt.NDArray[np.float64]) +assert_type(AR_LIKE_u - AR_f, npt.NDArray[np.float64]) +assert_type(AR_LIKE_i - AR_f, npt.NDArray[np.float64]) +assert_type(AR_LIKE_f - AR_f, npt.NDArray[np.float64]) +assert_type(AR_LIKE_c - AR_f, npt.NDArray[np.complexfloating]) +assert_type(AR_LIKE_O - AR_f, Any) + +assert_type(AR_c - AR_LIKE_b, npt.NDArray[np.complex128]) +assert_type(AR_c - AR_LIKE_u, npt.NDArray[np.complex128]) +assert_type(AR_c - AR_LIKE_i, npt.NDArray[np.complex128]) +assert_type(AR_c - AR_LIKE_f, npt.NDArray[np.complex128]) +assert_type(AR_c - AR_LIKE_c, npt.NDArray[np.complex128]) +assert_type(AR_c - AR_LIKE_O, Any) + +assert_type(AR_LIKE_b - AR_c, npt.NDArray[np.complex128]) +assert_type(AR_LIKE_u - AR_c, npt.NDArray[np.complex128]) +assert_type(AR_LIKE_i - AR_c, npt.NDArray[np.complex128]) +assert_type(AR_LIKE_f - AR_c, npt.NDArray[np.complex128]) +assert_type(AR_LIKE_c - AR_c, npt.NDArray[np.complex128]) +assert_type(AR_LIKE_O - AR_c, Any) + +assert_type(AR_m - AR_LIKE_b, npt.NDArray[np.timedelta64]) +assert_type(AR_m - AR_LIKE_u, npt.NDArray[np.timedelta64]) +assert_type(AR_m - AR_LIKE_i, npt.NDArray[np.timedelta64]) +assert_type(AR_m - AR_LIKE_m, npt.NDArray[np.timedelta64]) +assert_type(AR_m - AR_LIKE_O, Any) + +assert_type(AR_LIKE_b - AR_m, npt.NDArray[np.timedelta64]) +assert_type(AR_LIKE_u - AR_m, npt.NDArray[np.timedelta64]) +assert_type(AR_LIKE_i - AR_m, npt.NDArray[np.timedelta64]) +assert_type(AR_LIKE_m - AR_m, npt.NDArray[np.timedelta64]) +assert_type(AR_LIKE_M - AR_m, npt.NDArray[np.datetime64]) +assert_type(AR_LIKE_O - AR_m, Any) + +assert_type(AR_M - AR_LIKE_b, npt.NDArray[np.datetime64]) +assert_type(AR_M - AR_LIKE_u, npt.NDArray[np.datetime64]) +assert_type(AR_M - AR_LIKE_i, npt.NDArray[np.datetime64]) +assert_type(AR_M - AR_LIKE_m, npt.NDArray[np.datetime64]) +assert_type(AR_M - AR_LIKE_M, npt.NDArray[np.timedelta64]) +assert_type(AR_M - AR_LIKE_O, Any) + +assert_type(AR_LIKE_M - AR_M, npt.NDArray[np.timedelta64]) +assert_type(AR_LIKE_O - AR_M, Any) + +assert_type(AR_O - AR_LIKE_b, Any) +assert_type(AR_O - AR_LIKE_u, Any) +assert_type(AR_O - AR_LIKE_i, Any) +assert_type(AR_O - AR_LIKE_f, Any) +assert_type(AR_O - AR_LIKE_c, Any) +assert_type(AR_O - AR_LIKE_m, Any) +assert_type(AR_O - AR_LIKE_M, Any) +assert_type(AR_O - AR_LIKE_O, Any) + +assert_type(AR_LIKE_b - AR_O, Any) +assert_type(AR_LIKE_u - AR_O, Any) +assert_type(AR_LIKE_i - AR_O, Any) +assert_type(AR_LIKE_f - AR_O, Any) +assert_type(AR_LIKE_c - AR_O, Any) +assert_type(AR_LIKE_m - AR_O, Any) +assert_type(AR_LIKE_M - AR_O, Any) +assert_type(AR_LIKE_O - AR_O, Any) + +# Array "true" division + +assert_type(AR_f / b, npt.NDArray[np.float64]) +assert_type(AR_f / i, npt.NDArray[np.float64]) +assert_type(AR_f / f, npt.NDArray[np.float64]) + +assert_type(b / AR_f, npt.NDArray[np.float64]) +assert_type(i / AR_f, npt.NDArray[np.float64]) +assert_type(f / AR_f, npt.NDArray[np.float64]) + +assert_type(AR_b / AR_LIKE_b, npt.NDArray[np.float64]) +assert_type(AR_b / AR_LIKE_u, npt.NDArray[np.float64]) +assert_type(AR_b / AR_LIKE_i, npt.NDArray[np.float64]) +assert_type(AR_b / AR_LIKE_f, npt.NDArray[np.float64]) +assert_type(AR_b / AR_LIKE_O, Any) + +assert_type(AR_LIKE_b / AR_b, npt.NDArray[np.float64]) +assert_type(AR_LIKE_u / AR_b, npt.NDArray[np.float64]) +assert_type(AR_LIKE_i / AR_b, npt.NDArray[np.float64]) +assert_type(AR_LIKE_f / AR_b, npt.NDArray[np.float64]) +assert_type(AR_LIKE_O / AR_b, Any) + +assert_type(AR_u / AR_LIKE_b, npt.NDArray[np.float64]) +assert_type(AR_u / AR_LIKE_u, npt.NDArray[np.float64]) +assert_type(AR_u / AR_LIKE_i, npt.NDArray[np.float64]) +assert_type(AR_u / AR_LIKE_f, npt.NDArray[np.float64]) +assert_type(AR_u / AR_LIKE_O, Any) + +assert_type(AR_LIKE_b / AR_u, npt.NDArray[np.float64]) +assert_type(AR_LIKE_u / AR_u, npt.NDArray[np.float64]) +assert_type(AR_LIKE_i / AR_u, npt.NDArray[np.float64]) +assert_type(AR_LIKE_f / AR_u, npt.NDArray[np.float64]) +assert_type(AR_LIKE_m / AR_u, npt.NDArray[np.timedelta64]) +assert_type(AR_LIKE_O / AR_u, Any) + +assert_type(AR_i / AR_LIKE_b, npt.NDArray[np.float64]) +assert_type(AR_i / AR_LIKE_u, npt.NDArray[np.float64]) +assert_type(AR_i / AR_LIKE_i, npt.NDArray[np.float64]) +assert_type(AR_i / AR_LIKE_f, npt.NDArray[np.float64]) +assert_type(AR_i / AR_LIKE_O, Any) + +assert_type(AR_LIKE_b / AR_i, npt.NDArray[np.float64]) +assert_type(AR_LIKE_u / AR_i, npt.NDArray[np.float64]) +assert_type(AR_LIKE_i / AR_i, npt.NDArray[np.float64]) +assert_type(AR_LIKE_f / AR_i, npt.NDArray[np.float64]) +assert_type(AR_LIKE_m / AR_i, npt.NDArray[np.timedelta64]) +assert_type(AR_LIKE_O / AR_i, Any) + +assert_type(AR_f / AR_LIKE_b, npt.NDArray[np.float64]) +assert_type(AR_f / AR_LIKE_u, npt.NDArray[np.float64]) +assert_type(AR_f / AR_LIKE_i, npt.NDArray[np.float64]) +assert_type(AR_f / AR_LIKE_f, npt.NDArray[np.float64]) +assert_type(AR_f / AR_LIKE_O, Any) + +assert_type(AR_LIKE_b / AR_f, npt.NDArray[np.float64]) +assert_type(AR_LIKE_u / AR_f, npt.NDArray[np.float64]) +assert_type(AR_LIKE_i / AR_f, npt.NDArray[np.float64]) +assert_type(AR_LIKE_f / AR_f, npt.NDArray[np.float64]) +assert_type(AR_LIKE_m / AR_f, npt.NDArray[np.timedelta64]) +assert_type(AR_LIKE_O / AR_f, Any) + +assert_type(AR_m / AR_LIKE_u, npt.NDArray[np.timedelta64]) +assert_type(AR_m / AR_LIKE_i, npt.NDArray[np.timedelta64]) +assert_type(AR_m / AR_LIKE_f, npt.NDArray[np.timedelta64]) +assert_type(AR_m / AR_LIKE_m, npt.NDArray[np.float64]) +assert_type(AR_m / AR_LIKE_O, Any) + +assert_type(AR_LIKE_m / AR_m, npt.NDArray[np.float64]) +assert_type(AR_LIKE_O / AR_m, Any) + +assert_type(AR_O / AR_LIKE_b, Any) +assert_type(AR_O / AR_LIKE_u, Any) +assert_type(AR_O / AR_LIKE_i, Any) +assert_type(AR_O / AR_LIKE_f, Any) +assert_type(AR_O / AR_LIKE_m, Any) +assert_type(AR_O / AR_LIKE_M, Any) +assert_type(AR_O / AR_LIKE_O, Any) + +assert_type(AR_LIKE_b / AR_O, Any) +assert_type(AR_LIKE_u / AR_O, Any) +assert_type(AR_LIKE_i / AR_O, Any) +assert_type(AR_LIKE_f / AR_O, Any) +assert_type(AR_LIKE_m / AR_O, Any) +assert_type(AR_LIKE_M / AR_O, Any) +assert_type(AR_LIKE_O / AR_O, Any) + +# Array floor division + +assert_type(AR_b // AR_LIKE_b, npt.NDArray[np.int8]) +assert_type(AR_b // AR_LIKE_u, npt.NDArray[np.uint32]) +assert_type(AR_b // AR_LIKE_i, npt.NDArray[np.signedinteger]) +assert_type(AR_b // AR_LIKE_f, npt.NDArray[np.floating]) +assert_type(AR_b // AR_LIKE_O, Any) + +assert_type(AR_LIKE_b // AR_b, npt.NDArray[np.int8]) +assert_type(AR_LIKE_u // AR_b, npt.NDArray[np.uint32]) +assert_type(AR_LIKE_i // AR_b, npt.NDArray[np.signedinteger]) +assert_type(AR_LIKE_f // AR_b, npt.NDArray[np.floating]) +assert_type(AR_LIKE_O // AR_b, Any) + +assert_type(AR_u // AR_LIKE_b, npt.NDArray[np.uint32]) +assert_type(AR_u // AR_LIKE_u, npt.NDArray[np.unsignedinteger]) +assert_type(AR_u // AR_LIKE_i, npt.NDArray[np.signedinteger]) +assert_type(AR_u // AR_LIKE_f, npt.NDArray[np.floating]) +assert_type(AR_u // AR_LIKE_O, Any) + +assert_type(AR_LIKE_b // AR_u, npt.NDArray[np.uint32]) +assert_type(AR_LIKE_u // AR_u, npt.NDArray[np.unsignedinteger]) +assert_type(AR_LIKE_i // AR_u, npt.NDArray[np.signedinteger]) +assert_type(AR_LIKE_f // AR_u, npt.NDArray[np.floating]) +assert_type(AR_LIKE_m // AR_u, npt.NDArray[np.timedelta64]) +assert_type(AR_LIKE_O // AR_u, Any) + +assert_type(AR_i // AR_LIKE_b, npt.NDArray[np.int64]) +assert_type(AR_i // AR_LIKE_u, npt.NDArray[np.signedinteger]) +assert_type(AR_i // AR_LIKE_i, npt.NDArray[np.signedinteger]) +assert_type(AR_i // AR_LIKE_f, npt.NDArray[np.floating]) +assert_type(AR_i // AR_LIKE_O, Any) + +assert_type(AR_LIKE_b // AR_i, npt.NDArray[np.int64]) +assert_type(AR_LIKE_u // AR_i, npt.NDArray[np.signedinteger]) +assert_type(AR_LIKE_i // AR_i, npt.NDArray[np.signedinteger]) +assert_type(AR_LIKE_f // AR_i, npt.NDArray[np.floating]) +assert_type(AR_LIKE_m // AR_i, npt.NDArray[np.timedelta64]) +assert_type(AR_LIKE_O // AR_i, Any) + +assert_type(AR_f // AR_LIKE_b, npt.NDArray[np.float64]) +assert_type(AR_f // AR_LIKE_u, npt.NDArray[np.float64]) +assert_type(AR_f // AR_LIKE_i, npt.NDArray[np.float64]) +assert_type(AR_f // AR_LIKE_f, npt.NDArray[np.float64]) +assert_type(AR_f // AR_LIKE_O, Any) + +assert_type(AR_LIKE_b // AR_f, npt.NDArray[np.float64]) +assert_type(AR_LIKE_u // AR_f, npt.NDArray[np.float64]) +assert_type(AR_LIKE_i // AR_f, npt.NDArray[np.float64]) +assert_type(AR_LIKE_f // AR_f, npt.NDArray[np.float64]) +assert_type(AR_LIKE_m // AR_f, npt.NDArray[np.timedelta64]) +assert_type(AR_LIKE_O // AR_f, Any) + +assert_type(AR_m // AR_LIKE_u, npt.NDArray[np.timedelta64]) +assert_type(AR_m // AR_LIKE_i, npt.NDArray[np.timedelta64]) +assert_type(AR_m // AR_LIKE_f, npt.NDArray[np.timedelta64]) +assert_type(AR_m // AR_LIKE_m, npt.NDArray[np.int64]) +assert_type(AR_m // AR_LIKE_O, Any) + +assert_type(AR_LIKE_m // AR_m, npt.NDArray[np.int64]) +assert_type(AR_LIKE_O // AR_m, Any) + +assert_type(AR_O // AR_LIKE_b, Any) +assert_type(AR_O // AR_LIKE_u, Any) +assert_type(AR_O // AR_LIKE_i, Any) +assert_type(AR_O // AR_LIKE_f, Any) +assert_type(AR_O // AR_LIKE_m, Any) +assert_type(AR_O // AR_LIKE_M, Any) +assert_type(AR_O // AR_LIKE_O, Any) + +assert_type(AR_LIKE_b // AR_O, Any) +assert_type(AR_LIKE_u // AR_O, Any) +assert_type(AR_LIKE_i // AR_O, Any) +assert_type(AR_LIKE_f // AR_O, Any) +assert_type(AR_LIKE_m // AR_O, Any) +assert_type(AR_LIKE_M // AR_O, Any) +assert_type(AR_LIKE_O // AR_O, Any) + +# unary ops + +assert_type(-f16, np.floating[_128Bit]) +assert_type(-c16, np.complex128) +assert_type(-c8, np.complex64) +assert_type(-f8, np.float64) +assert_type(-f4, np.float32) +assert_type(-i8, np.int64) +assert_type(-i4, np.int32) +assert_type(-u8, np.uint64) +assert_type(-u4, np.uint32) +assert_type(-m8, np.timedelta64) +assert_type(-m8_none, np.timedelta64[None]) +assert_type(-m8_int, np.timedelta64[int]) +assert_type(-m8_delta, np.timedelta64[dt.timedelta]) +assert_type(-AR_f, npt.NDArray[np.float64]) + +assert_type(+f16, np.floating[_128Bit]) +assert_type(+c16, np.complex128) +assert_type(+c8, np.complex64) +assert_type(+f8, np.float64) +assert_type(+f4, np.float32) +assert_type(+i8, np.int64) +assert_type(+i4, np.int32) +assert_type(+u8, np.uint64) +assert_type(+u4, np.uint32) +assert_type(+m8_none, np.timedelta64[None]) +assert_type(+m8_int, np.timedelta64[int]) +assert_type(+m8_delta, np.timedelta64[dt.timedelta]) +assert_type(+AR_f, npt.NDArray[np.float64]) + +assert_type(abs(f16), np.floating[_128Bit]) +assert_type(abs(c16), np.float64) +assert_type(abs(c8), np.float32) +assert_type(abs(f8), np.float64) +assert_type(abs(f4), np.float32) +assert_type(abs(i8), np.int64) +assert_type(abs(i4), np.int32) +assert_type(abs(u8), np.uint64) +assert_type(abs(u4), np.uint32) +assert_type(abs(m8), np.timedelta64) +assert_type(abs(m8_none), np.timedelta64[None]) +assert_type(abs(m8_int), np.timedelta64[int]) +assert_type(abs(m8_delta), np.timedelta64[dt.timedelta]) +assert_type(abs(b_), np.bool) +assert_type(abs(AR_O), npt.NDArray[np.object_]) + +# Time structures + +assert_type(M8 + m8, np.datetime64) +assert_type(M8 + i, np.datetime64) +assert_type(M8 + i8, np.datetime64) +assert_type(M8 - M8, np.timedelta64) +assert_type(M8 - i, np.datetime64) +assert_type(M8 - i8, np.datetime64) + +assert_type(M8_none + m8, np.datetime64[None]) +assert_type(M8_none + i, np.datetime64[None]) +assert_type(M8_none + i8, np.datetime64[None]) +assert_type(M8_none - M8, np.timedelta64[None]) +assert_type(M8_none - m8, np.datetime64[None]) +assert_type(M8_none - i, np.datetime64[None]) +assert_type(M8_none - i8, np.datetime64[None]) + +assert_type(m8 + m8, np.timedelta64) +assert_type(m8 + i, np.timedelta64) +assert_type(m8 + i8, np.timedelta64) +assert_type(m8 - m8, np.timedelta64) +assert_type(m8 - i, np.timedelta64) +assert_type(m8 - i8, np.timedelta64) +assert_type(m8 * f, np.timedelta64) +assert_type(m8 * f4, np.timedelta64) +assert_type(m8 * np.True_, np.timedelta64) +assert_type(m8 / f, np.timedelta64) +assert_type(m8 / f4, np.timedelta64) +assert_type(m8 / m8, np.float64) +assert_type(m8 // m8, np.int64) +assert_type(m8 % m8, np.timedelta64) +assert_type(divmod(m8, m8), tuple[np.int64, np.timedelta64]) + +assert_type(m8_none + m8, np.timedelta64[None]) +assert_type(m8_none + i, np.timedelta64[None]) +assert_type(m8_none + i8, np.timedelta64[None]) +assert_type(m8_none - i, np.timedelta64[None]) +assert_type(m8_none - i8, np.timedelta64[None]) + +assert_type(m8_int + i, np.timedelta64[int]) +assert_type(m8_int + m8_delta, np.timedelta64[int]) +assert_type(m8_int + m8, np.timedelta64[int | None]) +assert_type(m8_int - i, np.timedelta64[int]) +assert_type(m8_int - m8_delta, np.timedelta64[int]) +assert_type(m8_int - m8, np.timedelta64[int | None]) + +assert_type(m8_delta + date, dt.date) +assert_type(m8_delta + time, dt.datetime) +assert_type(m8_delta + delta, dt.timedelta) +assert_type(m8_delta - delta, dt.timedelta) +assert_type(m8_delta / delta, float) +assert_type(m8_delta // delta, int) +assert_type(m8_delta % delta, dt.timedelta) +assert_type(divmod(m8_delta, delta), tuple[int, dt.timedelta]) + +# boolean + +assert_type(b_ / b, np.float64) +assert_type(b_ / b_, np.float64) +assert_type(b_ / i, np.float64) +assert_type(b_ / i8, np.float64) +assert_type(b_ / i4, np.float64) +assert_type(b_ / u8, np.float64) +assert_type(b_ / u4, np.float64) +assert_type(b_ / f, np.float64) +assert_type(b_ / f16, np.floating[_128Bit]) +assert_type(b_ / f8, np.float64) +assert_type(b_ / f4, np.float32) +assert_type(b_ / c, np.complex128) +assert_type(b_ / c16, np.complex128) +assert_type(b_ / c8, np.complex64) + +assert_type(b / b_, np.float64) +assert_type(b_ / b_, np.float64) +assert_type(i / b_, np.float64) +assert_type(i8 / b_, np.float64) +assert_type(i4 / b_, np.float64) +assert_type(u8 / b_, np.float64) +assert_type(u4 / b_, np.float64) +assert_type(f / b_, np.float64) +assert_type(f16 / b_, np.floating[_128Bit]) +assert_type(f8 / b_, np.float64) +assert_type(f4 / b_, np.float32) +assert_type(c / b_, np.complex128) +assert_type(c16 / b_, np.complex128) +assert_type(c8 / b_, np.complex64) + +# Complex + +assert_type(c16 + f16, np.complex128 | np.complexfloating[_128Bit, _128Bit]) +assert_type(c16 + c16, np.complex128) +assert_type(c16 + f8, np.complex128) +assert_type(c16 + i8, np.complex128) +assert_type(c16 + c8, np.complex128) +assert_type(c16 + f4, np.complex128) +assert_type(c16 + i4, np.complex128) +assert_type(c16 + b_, np.complex128) +assert_type(c16 + b, np.complex128) +assert_type(c16 + c, np.complex128) +assert_type(c16 + f, np.complex128) +assert_type(c16 + AR_f, npt.NDArray[np.complex128]) + +assert_type(f16 + c16, np.complex128 | np.complexfloating[_128Bit, _128Bit]) +assert_type(c16 + c16, np.complex128) +assert_type(f8 + c16, np.complex128) +assert_type(i8 + c16, np.complex128) +assert_type(c8 + c16, np.complex128 | np.complex64) +assert_type(f4 + c16, np.complex128 | np.complex64) +assert_type(i4 + c16, np.complex128) +assert_type(b_ + c16, np.complex128) +assert_type(b + c16, np.complex128) +assert_type(c + c16, np.complex128) +assert_type(f + c16, np.complex128) +assert_type(AR_f + c16, npt.NDArray[np.complex128]) + +assert_type(c8 + f16, np.complexfloating[_32Bit, _32Bit] | np.complexfloating[_128Bit, _128Bit]) +assert_type(c8 + c16, np.complex64 | np.complex128) +assert_type(c8 + f8, np.complex64 | np.complex128) +assert_type(c8 + i8, np.complexfloating[_32Bit, _32Bit] | np.complexfloating[_64Bit, _64Bit]) +assert_type(c8 + c8, np.complex64) +assert_type(c8 + f4, np.complex64) +assert_type(c8 + i4, np.complex64) +assert_type(c8 + b_, np.complex64) +assert_type(c8 + b, np.complex64) +assert_type(c8 + c, np.complex64 | np.complex128) +assert_type(c8 + f, np.complex64 | np.complex128) +assert_type(c8 + AR_f, npt.NDArray[np.complexfloating]) + +assert_type(f16 + c8, np.complexfloating[_128Bit, _128Bit] | np.complex64) +assert_type(c16 + c8, np.complex128) +assert_type(f8 + c8, np.complexfloating[_64Bit, _64Bit]) +assert_type(i8 + c8, np.complexfloating[_64Bit, _64Bit] | np.complex64) +assert_type(c8 + c8, np.complex64) +assert_type(f4 + c8, np.complex64) +assert_type(i4 + c8, np.complex64) +assert_type(b_ + c8, np.complex64) +assert_type(b + c8, np.complex64) +assert_type(c + c8, np.complex64 | np.complex128) +assert_type(f + c8, np.complex64 | np.complex128) +assert_type(AR_f + c8, npt.NDArray[np.complexfloating]) + +# Float + +assert_type(f8 + f16, np.float64 | np.floating[_128Bit]) +assert_type(f8 + f8, np.float64) +assert_type(f8 + i8, np.float64) +assert_type(f8 + f4, np.float64) +assert_type(f8 + i4, np.float64) +assert_type(f8 + b_, np.float64) +assert_type(f8 + b, np.float64) +assert_type(f8 + c, np.float64 | np.complex128) +assert_type(f8 + f, np.float64) +assert_type(f8 + AR_f, npt.NDArray[np.float64]) + +assert_type(f16 + f8, np.floating[_128Bit] | np.float64) +assert_type(f8 + f8, np.float64) +assert_type(i8 + f8, np.float64) +assert_type(f4 + f8, np.float32 | np.float64) +assert_type(i4 + f8,np.float64) +assert_type(b_ + f8, np.float64) +assert_type(b + f8, np.float64) +assert_type(c + f8, np.complex128 | np.float64) +assert_type(f + f8, np.float64) +assert_type(AR_f + f8, npt.NDArray[np.float64]) + +assert_type(f4 + f16, np.float32 | np.floating[_128Bit]) +assert_type(f4 + f8, np.float32 | np.float64) +assert_type(f4 + i8, np.float32 | np.floating[_64Bit]) +assert_type(f4 + f4, np.float32) +assert_type(f4 + i4, np.float32) +assert_type(f4 + b_, np.float32) +assert_type(f4 + b, np.float32) +assert_type(f4 + c, np.complex64 | np.complex128) +assert_type(f4 + f, np.float32 | np.float64) +assert_type(f4 + AR_f, npt.NDArray[np.float64]) + +assert_type(f16 + f4, np.floating[_128Bit] | np.float32) +assert_type(f8 + f4, np.float64) +assert_type(i8 + f4, np.floating[_32Bit] | np.floating[_64Bit]) +assert_type(f4 + f4, np.float32) +assert_type(i4 + f4, np.float32) +assert_type(b_ + f4, np.float32) +assert_type(b + f4, np.float32) +assert_type(c + f4, np.complex64 | np.complex128) +assert_type(f + f4, np.float64 | np.float32) +assert_type(AR_f + f4, npt.NDArray[np.float64]) + +# Int + +assert_type(i8 + i8, np.int64) +assert_type(i8 + u8, Any) +assert_type(i8 + i4, np.signedinteger[_32Bit] | np.signedinteger[_64Bit]) +assert_type(i8 + u4, Any) +assert_type(i8 + b_, np.int64) +assert_type(i8 + b, np.int64) +assert_type(i8 + c, np.complex128) +assert_type(i8 + f, np.float64) +assert_type(i8 + AR_f, npt.NDArray[np.float64]) + +assert_type(u8 + u8, np.uint64) +assert_type(u8 + i4, Any) +assert_type(u8 + u4, np.unsignedinteger[_32Bit] | np.unsignedinteger[_64Bit]) +assert_type(u8 + b_, np.uint64) +assert_type(u8 + b, np.uint64) +assert_type(u8 + c, np.complex128) +assert_type(u8 + f, np.float64) +assert_type(u8 + AR_f, npt.NDArray[np.float64]) + +assert_type(i8 + i8, np.int64) +assert_type(u8 + i8, Any) +assert_type(i4 + i8, np.signedinteger[_32Bit] | np.signedinteger[_64Bit]) +assert_type(u4 + i8, Any) +assert_type(b_ + i8, np.int64) +assert_type(b + i8, np.int64) +assert_type(c + i8, np.complex128) +assert_type(f + i8, np.float64) +assert_type(AR_f + i8, npt.NDArray[np.float64]) + +assert_type(u8 + u8, np.uint64) +assert_type(i4 + u8, Any) +assert_type(u4 + u8, np.unsignedinteger[_32Bit] | np.unsignedinteger[_64Bit]) +assert_type(b_ + u8, np.uint64) +assert_type(b + u8, np.uint64) +assert_type(c + u8, np.complex128) +assert_type(f + u8, np.float64) +assert_type(AR_f + u8, npt.NDArray[np.float64]) + +assert_type(i4 + i8, np.signedinteger[_32Bit] | np.signedinteger[_64Bit]) +assert_type(i4 + i4, np.int32) +assert_type(i4 + b_, np.int32) +assert_type(i4 + b, np.int32) +assert_type(i4 + AR_f, npt.NDArray[np.float64]) + +assert_type(u4 + i8, Any) +assert_type(u4 + i4, Any) +assert_type(u4 + u8, np.unsignedinteger[_32Bit] | np.unsignedinteger[_64Bit]) +assert_type(u4 + u4, np.uint32) +assert_type(u4 + b_, np.uint32) +assert_type(u4 + b, np.uint32) +assert_type(u4 + AR_f, npt.NDArray[np.float64]) + +assert_type(i8 + i4, np.signedinteger[_32Bit] | np.signedinteger[_64Bit]) +assert_type(i4 + i4, np.int32) +assert_type(b_ + i4, np.int32) +assert_type(b + i4, np.int32) +assert_type(AR_f + i4, npt.NDArray[np.float64]) + +assert_type(i8 + u4, Any) +assert_type(i4 + u4, Any) +assert_type(u8 + u4, np.unsignedinteger[_32Bit] | np.unsignedinteger[_64Bit]) +assert_type(u4 + u4, np.uint32) +assert_type(b_ + u4, np.uint32) +assert_type(b + u4, np.uint32) +assert_type(AR_f + u4, npt.NDArray[np.float64]) + +# Any + +assert_type(AR_Any + 2, npt.NDArray[Any]) + +# regression tests for https://github.com/numpy/numpy/issues/28805 + +assert_type(AR_floating + f, npt.NDArray[np.floating]) +assert_type(AR_floating - f, npt.NDArray[np.floating]) +assert_type(AR_floating * f, npt.NDArray[np.floating]) +assert_type(AR_floating ** f, npt.NDArray[np.floating]) +assert_type(AR_floating / f, npt.NDArray[np.floating]) +assert_type(AR_floating // f, npt.NDArray[np.floating]) +assert_type(AR_floating % f, npt.NDArray[np.floating]) +assert_type(divmod(AR_floating, f), tuple[npt.NDArray[np.floating], npt.NDArray[np.floating]]) + +assert_type(f + AR_floating, npt.NDArray[np.floating]) +assert_type(f - AR_floating, npt.NDArray[np.floating]) +assert_type(f * AR_floating, npt.NDArray[np.floating]) +assert_type(f ** AR_floating, npt.NDArray[np.floating]) +assert_type(f / AR_floating, npt.NDArray[np.floating]) +assert_type(f // AR_floating, npt.NDArray[np.floating]) +assert_type(f % AR_floating, npt.NDArray[np.floating]) +assert_type(divmod(f, AR_floating), tuple[npt.NDArray[np.floating], npt.NDArray[np.floating]]) + +# character-like + +assert_type(AR_S + b"", npt.NDArray[np.bytes_]) +assert_type(AR_S + [b""], npt.NDArray[np.bytes_]) +assert_type([b""] + AR_S, npt.NDArray[np.bytes_]) +assert_type(AR_S + AR_S, npt.NDArray[np.bytes_]) + +assert_type(AR_U + "", npt.NDArray[np.str_]) +assert_type(AR_U + [""], npt.NDArray[np.str_]) +assert_type("" + AR_U, npt.NDArray[np.str_]) +assert_type([""] + AR_U, npt.NDArray[np.str_]) +assert_type(AR_U + AR_U, npt.NDArray[np.str_]) + +assert_type(AR_T + "", np.ndarray[tuple[Any, ...], np.dtypes.StringDType]) +assert_type(AR_T + [""], np.ndarray[tuple[Any, ...], np.dtypes.StringDType]) +assert_type("" + AR_T, np.ndarray[tuple[Any, ...], np.dtypes.StringDType]) +assert_type([""] + AR_T, np.ndarray[tuple[Any, ...], np.dtypes.StringDType]) +assert_type(AR_T + AR_T, np.ndarray[tuple[Any, ...], np.dtypes.StringDType]) +assert_type(AR_T + AR_U, np.ndarray[tuple[Any, ...], np.dtypes.StringDType]) +assert_type(AR_U + AR_T, np.ndarray[tuple[Any, ...], np.dtypes.StringDType]) + +assert_type(AR_S * i, np.ndarray[tuple[Any, ...], np.dtype[np.bytes_]]) +assert_type(AR_S * AR_LIKE_i, np.ndarray[tuple[Any, ...], np.dtype[np.bytes_]]) +assert_type(AR_S * AR_i, np.ndarray[tuple[Any, ...], np.dtype[np.bytes_]]) +assert_type(i * AR_S, np.ndarray[tuple[Any, ...], np.dtype[np.bytes_]]) +# mypy incorrectly infers `AR_LIKE_i * AR_S` as `list[int]` +assert_type(AR_i * AR_S, np.ndarray[tuple[Any, ...], np.dtype[np.bytes_]]) + +assert_type(AR_U * i, np.ndarray[tuple[Any, ...], np.dtype[np.str_]]) +assert_type(AR_U * AR_LIKE_i, np.ndarray[tuple[Any, ...], np.dtype[np.str_]]) +assert_type(AR_U * AR_i, np.ndarray[tuple[Any, ...], np.dtype[np.str_]]) +assert_type(i * AR_U, np.ndarray[tuple[Any, ...], np.dtype[np.str_]]) +# mypy incorrectly infers `AR_LIKE_i * AR_U` as `list[int]` +assert_type(AR_i * AR_U, np.ndarray[tuple[Any, ...], np.dtype[np.str_]]) + +assert_type(AR_T * i, np.ndarray[tuple[Any, ...], np.dtypes.StringDType]) +assert_type(AR_T * AR_LIKE_i, np.ndarray[tuple[Any, ...], np.dtypes.StringDType]) +assert_type(AR_T * AR_i, np.ndarray[tuple[Any, ...], np.dtypes.StringDType]) +assert_type(i * AR_T, np.ndarray[tuple[Any, ...], np.dtypes.StringDType]) +# mypy incorrectly infers `AR_LIKE_i * AR_T` as `list[int]` +assert_type(AR_i * AR_T, np.ndarray[tuple[Any, ...], np.dtypes.StringDType]) diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/array_api_info.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/array_api_info.pyi new file mode 100644 index 0000000..765f9ef --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/array_api_info.pyi @@ -0,0 +1,70 @@ +from typing import Literal, Never, assert_type + +import numpy as np + +info = np.__array_namespace_info__() + +assert_type(info.__module__, Literal["numpy"]) + +assert_type(info.default_device(), Literal["cpu"]) +assert_type(info.devices()[0], Literal["cpu"]) +assert_type(info.devices()[-1], Literal["cpu"]) + +assert_type(info.capabilities()["boolean indexing"], Literal[True]) +assert_type(info.capabilities()["data-dependent shapes"], Literal[True]) + +assert_type(info.default_dtypes()["real floating"], np.dtype[np.float64]) +assert_type(info.default_dtypes()["complex floating"], np.dtype[np.complex128]) +assert_type(info.default_dtypes()["integral"], np.dtype[np.int_]) +assert_type(info.default_dtypes()["indexing"], np.dtype[np.intp]) + +assert_type(info.dtypes()["bool"], np.dtype[np.bool]) +assert_type(info.dtypes()["int8"], np.dtype[np.int8]) +assert_type(info.dtypes()["uint8"], np.dtype[np.uint8]) +assert_type(info.dtypes()["float32"], np.dtype[np.float32]) +assert_type(info.dtypes()["complex64"], np.dtype[np.complex64]) + +assert_type(info.dtypes(kind="bool")["bool"], np.dtype[np.bool]) +assert_type(info.dtypes(kind="signed integer")["int64"], np.dtype[np.int64]) +assert_type(info.dtypes(kind="unsigned integer")["uint64"], np.dtype[np.uint64]) +assert_type(info.dtypes(kind="integral")["int32"], np.dtype[np.int32]) +assert_type(info.dtypes(kind="integral")["uint32"], np.dtype[np.uint32]) +assert_type(info.dtypes(kind="real floating")["float64"], np.dtype[np.float64]) +assert_type(info.dtypes(kind="complex floating")["complex128"], np.dtype[np.complex128]) +assert_type(info.dtypes(kind="numeric")["int16"], np.dtype[np.int16]) +assert_type(info.dtypes(kind="numeric")["uint16"], np.dtype[np.uint16]) +assert_type(info.dtypes(kind="numeric")["float64"], np.dtype[np.float64]) +assert_type(info.dtypes(kind="numeric")["complex128"], np.dtype[np.complex128]) + +assert_type(info.dtypes(kind=()), dict[Never, Never]) + +assert_type(info.dtypes(kind=("bool",))["bool"], np.dtype[np.bool]) +assert_type(info.dtypes(kind=("signed integer",))["int64"], np.dtype[np.int64]) +assert_type(info.dtypes(kind=("integral",))["uint32"], np.dtype[np.uint32]) +assert_type(info.dtypes(kind=("complex floating",))["complex128"], np.dtype[np.complex128]) +assert_type(info.dtypes(kind=("numeric",))["float64"], np.dtype[np.float64]) + +assert_type( + info.dtypes(kind=("signed integer", "unsigned integer"))["int8"], + np.dtype[np.int8], +) +assert_type( + info.dtypes(kind=("signed integer", "unsigned integer"))["uint8"], + np.dtype[np.uint8], +) +assert_type( + info.dtypes(kind=("integral", "real floating", "complex floating"))["int16"], + np.dtype[np.int16], +) +assert_type( + info.dtypes(kind=("integral", "real floating", "complex floating"))["uint16"], + np.dtype[np.uint16], +) +assert_type( + info.dtypes(kind=("integral", "real floating", "complex floating"))["float32"], + np.dtype[np.float32], +) +assert_type( + info.dtypes(kind=("integral", "real floating", "complex floating"))["complex64"], + np.dtype[np.complex64], +) diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/array_constructors.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/array_constructors.pyi new file mode 100644 index 0000000..45cc986 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/array_constructors.pyi @@ -0,0 +1,249 @@ +import sys +from collections import deque +from pathlib import Path +from typing import Any, TypeVar, assert_type + +import numpy as np +import numpy.typing as npt + +_ScalarT_co = TypeVar("_ScalarT_co", bound=np.generic, covariant=True) + +class SubClass(npt.NDArray[_ScalarT_co]): ... + +i8: np.int64 + +A: npt.NDArray[np.float64] +B: SubClass[np.float64] +C: list[int] +D: SubClass[np.float64 | np.int64] + +mixed_shape: tuple[int, np.int64] + +def func(i: int, j: int, **kwargs: Any) -> SubClass[np.float64]: ... + +assert_type(np.empty_like(A), npt.NDArray[np.float64]) +assert_type(np.empty_like(B), SubClass[np.float64]) +assert_type(np.empty_like([1, 1.0]), npt.NDArray[Any]) +assert_type(np.empty_like(A, dtype=np.int64), npt.NDArray[np.int64]) +assert_type(np.empty_like(A, dtype='c16'), npt.NDArray[Any]) + +assert_type(np.array(A), npt.NDArray[np.float64]) +assert_type(np.array(B), npt.NDArray[np.float64]) +assert_type(np.array([1, 1.0]), npt.NDArray[Any]) +assert_type(np.array(deque([1, 2, 3])), npt.NDArray[Any]) +assert_type(np.array(A, dtype=np.int64), npt.NDArray[np.int64]) +assert_type(np.array(A, dtype='c16'), npt.NDArray[Any]) +assert_type(np.array(A, like=A), npt.NDArray[np.float64]) +assert_type(np.array(A, subok=True), npt.NDArray[np.float64]) +assert_type(np.array(B, subok=True), SubClass[np.float64]) +assert_type(np.array(B, subok=True, ndmin=0), SubClass[np.float64]) +assert_type(np.array(B, subok=True, ndmin=1), SubClass[np.float64]) +assert_type(np.array(D), npt.NDArray[np.float64 | np.int64]) +# https://github.com/numpy/numpy/issues/29245 +assert_type(np.array([], dtype=np.bool), npt.NDArray[np.bool]) + +assert_type(np.zeros([1, 5, 6]), npt.NDArray[np.float64]) +assert_type(np.zeros([1, 5, 6], dtype=np.int64), npt.NDArray[np.int64]) +assert_type(np.zeros([1, 5, 6], dtype='c16'), npt.NDArray[Any]) +assert_type(np.zeros(mixed_shape), npt.NDArray[np.float64]) + +assert_type(np.empty([1, 5, 6]), npt.NDArray[np.float64]) +assert_type(np.empty([1, 5, 6], dtype=np.int64), npt.NDArray[np.int64]) +assert_type(np.empty([1, 5, 6], dtype='c16'), npt.NDArray[Any]) +assert_type(np.empty(mixed_shape), npt.NDArray[np.float64]) + +assert_type(np.concatenate(A), npt.NDArray[np.float64]) +assert_type(np.concatenate([A, A]), Any) # pyright correctly infers this as NDArray[float64] +assert_type(np.concatenate([[1], A]), npt.NDArray[Any]) +assert_type(np.concatenate([[1], [1]]), npt.NDArray[Any]) +assert_type(np.concatenate((A, A)), npt.NDArray[np.float64]) +assert_type(np.concatenate(([1], [1])), npt.NDArray[Any]) +assert_type(np.concatenate([1, 1.0]), npt.NDArray[Any]) +assert_type(np.concatenate(A, dtype=np.int64), npt.NDArray[np.int64]) +assert_type(np.concatenate(A, dtype='c16'), npt.NDArray[Any]) +assert_type(np.concatenate([1, 1.0], out=A), npt.NDArray[np.float64]) + +assert_type(np.asarray(A), npt.NDArray[np.float64]) +assert_type(np.asarray(B), npt.NDArray[np.float64]) +assert_type(np.asarray([1, 1.0]), npt.NDArray[Any]) +assert_type(np.asarray(A, dtype=np.int64), npt.NDArray[np.int64]) +assert_type(np.asarray(A, dtype='c16'), npt.NDArray[Any]) + +assert_type(np.asanyarray(A), npt.NDArray[np.float64]) +assert_type(np.asanyarray(B), SubClass[np.float64]) +assert_type(np.asanyarray([1, 1.0]), npt.NDArray[Any]) +assert_type(np.asanyarray(A, dtype=np.int64), npt.NDArray[np.int64]) +assert_type(np.asanyarray(A, dtype='c16'), npt.NDArray[Any]) + +assert_type(np.ascontiguousarray(A), npt.NDArray[np.float64]) +assert_type(np.ascontiguousarray(B), npt.NDArray[np.float64]) +assert_type(np.ascontiguousarray([1, 1.0]), npt.NDArray[Any]) +assert_type(np.ascontiguousarray(A, dtype=np.int64), npt.NDArray[np.int64]) +assert_type(np.ascontiguousarray(A, dtype='c16'), npt.NDArray[Any]) + +assert_type(np.asfortranarray(A), npt.NDArray[np.float64]) +assert_type(np.asfortranarray(B), npt.NDArray[np.float64]) +assert_type(np.asfortranarray([1, 1.0]), npt.NDArray[Any]) +assert_type(np.asfortranarray(A, dtype=np.int64), npt.NDArray[np.int64]) +assert_type(np.asfortranarray(A, dtype='c16'), npt.NDArray[Any]) + +assert_type(np.fromstring("1 1 1", sep=" "), npt.NDArray[np.float64]) +assert_type(np.fromstring(b"1 1 1", sep=" "), npt.NDArray[np.float64]) +assert_type(np.fromstring("1 1 1", dtype=np.int64, sep=" "), npt.NDArray[np.int64]) +assert_type(np.fromstring(b"1 1 1", dtype=np.int64, sep=" "), npt.NDArray[np.int64]) +assert_type(np.fromstring("1 1 1", dtype="c16", sep=" "), npt.NDArray[Any]) +assert_type(np.fromstring(b"1 1 1", dtype="c16", sep=" "), npt.NDArray[Any]) + +assert_type(np.fromfile("test.txt", sep=" "), npt.NDArray[np.float64]) +assert_type(np.fromfile("test.txt", dtype=np.int64, sep=" "), npt.NDArray[np.int64]) +assert_type(np.fromfile("test.txt", dtype="c16", sep=" "), npt.NDArray[Any]) +with open("test.txt") as f: + assert_type(np.fromfile(f, sep=" "), npt.NDArray[np.float64]) + assert_type(np.fromfile(b"test.txt", sep=" "), npt.NDArray[np.float64]) + assert_type(np.fromfile(Path("test.txt"), sep=" "), npt.NDArray[np.float64]) + +assert_type(np.fromiter("12345", np.float64), npt.NDArray[np.float64]) +assert_type(np.fromiter("12345", float), npt.NDArray[Any]) + +assert_type(np.frombuffer(A), npt.NDArray[np.float64]) +assert_type(np.frombuffer(A, dtype=np.int64), npt.NDArray[np.int64]) +assert_type(np.frombuffer(A, dtype="c16"), npt.NDArray[Any]) + +assert_type(np.arange(False, True), np.ndarray[tuple[int], np.dtype[np.signedinteger]]) +assert_type(np.arange(10), np.ndarray[tuple[int], np.dtype[np.signedinteger]]) +assert_type(np.arange(0, 10, step=2), np.ndarray[tuple[int], np.dtype[np.signedinteger]]) +assert_type(np.arange(10.0), np.ndarray[tuple[int], np.dtype[np.floating]]) +assert_type(np.arange(start=0, stop=10.0), np.ndarray[tuple[int], np.dtype[np.floating]]) +assert_type(np.arange(np.timedelta64(0)), np.ndarray[tuple[int], np.dtype[np.timedelta64]]) +assert_type(np.arange(0, np.timedelta64(10)), np.ndarray[tuple[int], np.dtype[np.timedelta64]]) +assert_type(np.arange(np.datetime64("0"), np.datetime64("10")), np.ndarray[tuple[int], np.dtype[np.datetime64]]) +assert_type(np.arange(10, dtype=np.float64), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.arange(0, 10, step=2, dtype=np.int16), np.ndarray[tuple[int], np.dtype[np.int16]]) +assert_type(np.arange(10, dtype=int), np.ndarray[tuple[int], np.dtype]) +assert_type(np.arange(0, 10, dtype="f8"), np.ndarray[tuple[int], np.dtype]) + +assert_type(np.require(A), npt.NDArray[np.float64]) +assert_type(np.require(B), SubClass[np.float64]) +assert_type(np.require(B, requirements=None), SubClass[np.float64]) +assert_type(np.require(B, dtype=int), npt.NDArray[Any]) +assert_type(np.require(B, requirements="E"), npt.NDArray[Any]) +assert_type(np.require(B, requirements=["ENSUREARRAY"]), npt.NDArray[Any]) +assert_type(np.require(B, requirements={"F", "E"}), npt.NDArray[Any]) +assert_type(np.require(B, requirements=["C", "OWNDATA"]), SubClass[np.float64]) +assert_type(np.require(B, requirements="W"), SubClass[np.float64]) +assert_type(np.require(B, requirements="A"), SubClass[np.float64]) +assert_type(np.require(C), npt.NDArray[Any]) + +assert_type(np.linspace(0, 10), npt.NDArray[np.float64]) +assert_type(np.linspace(0, 10j), npt.NDArray[np.complexfloating]) +assert_type(np.linspace(0, 10, dtype=np.int64), npt.NDArray[np.int64]) +assert_type(np.linspace(0, 10, dtype=int), npt.NDArray[Any]) +assert_type(np.linspace(0, 10, retstep=True), tuple[npt.NDArray[np.float64], np.float64]) +assert_type(np.linspace(0j, 10, retstep=True), tuple[npt.NDArray[np.complexfloating], np.complexfloating]) +assert_type(np.linspace(0, 10, retstep=True, dtype=np.int64), tuple[npt.NDArray[np.int64], np.int64]) +assert_type(np.linspace(0j, 10, retstep=True, dtype=int), tuple[npt.NDArray[Any], Any]) + +assert_type(np.logspace(0, 10), npt.NDArray[np.float64]) +assert_type(np.logspace(0, 10j), npt.NDArray[np.complexfloating]) +assert_type(np.logspace(0, 10, dtype=np.int64), npt.NDArray[np.int64]) +assert_type(np.logspace(0, 10, dtype=int), npt.NDArray[Any]) + +assert_type(np.geomspace(0, 10), npt.NDArray[np.float64]) +assert_type(np.geomspace(0, 10j), npt.NDArray[np.complexfloating]) +assert_type(np.geomspace(0, 10, dtype=np.int64), npt.NDArray[np.int64]) +assert_type(np.geomspace(0, 10, dtype=int), npt.NDArray[Any]) + +assert_type(np.zeros_like(A), npt.NDArray[np.float64]) +assert_type(np.zeros_like(C), npt.NDArray[Any]) +assert_type(np.zeros_like(A, dtype=float), npt.NDArray[Any]) +assert_type(np.zeros_like(B), SubClass[np.float64]) +assert_type(np.zeros_like(B, dtype=np.int64), npt.NDArray[np.int64]) + +assert_type(np.ones_like(A), npt.NDArray[np.float64]) +assert_type(np.ones_like(C), npt.NDArray[Any]) +assert_type(np.ones_like(A, dtype=float), npt.NDArray[Any]) +assert_type(np.ones_like(B), SubClass[np.float64]) +assert_type(np.ones_like(B, dtype=np.int64), npt.NDArray[np.int64]) + +assert_type(np.full_like(A, i8), npt.NDArray[np.float64]) +assert_type(np.full_like(C, i8), npt.NDArray[Any]) +assert_type(np.full_like(A, i8, dtype=int), npt.NDArray[Any]) +assert_type(np.full_like(B, i8), SubClass[np.float64]) +assert_type(np.full_like(B, i8, dtype=np.int64), npt.NDArray[np.int64]) + +_size: int +_shape_0d: tuple[()] +_shape_1d: tuple[int] +_shape_2d: tuple[int, int] +_shape_nd: tuple[int, ...] +_shape_like: list[int] + +assert_type(np.ones(_shape_0d), np.ndarray[tuple[()], np.dtype[np.float64]]) +assert_type(np.ones(_size), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.ones(_shape_2d), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(np.ones(_shape_nd), np.ndarray[tuple[int, ...], np.dtype[np.float64]]) +assert_type(np.ones(_shape_1d, dtype=np.int64), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(np.ones(_shape_like), npt.NDArray[np.float64]) +assert_type(np.ones(_shape_like, dtype=np.dtypes.Int64DType()), np.ndarray[Any, np.dtypes.Int64DType]) +assert_type(np.ones(_shape_like, dtype=int), npt.NDArray[Any]) +assert_type(np.ones(mixed_shape), npt.NDArray[np.float64]) + +assert_type(np.full(_size, i8), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(np.full(_shape_2d, i8), np.ndarray[tuple[int, int], np.dtype[np.int64]]) +assert_type(np.full(_shape_like, i8), npt.NDArray[np.int64]) +assert_type(np.full(_shape_like, 42), npt.NDArray[Any]) +assert_type(np.full(_size, i8, dtype=np.float64), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.full(_size, i8, dtype=float), np.ndarray[tuple[int], np.dtype]) +assert_type(np.full(_shape_like, 42, dtype=float), npt.NDArray[Any]) +assert_type(np.full(_shape_0d, i8, dtype=object), np.ndarray[tuple[()], np.dtype]) + +assert_type(np.indices([1, 2, 3]), npt.NDArray[np.int_]) +assert_type(np.indices([1, 2, 3], sparse=True), tuple[npt.NDArray[np.int_], ...]) + +assert_type(np.fromfunction(func, (3, 5)), SubClass[np.float64]) + +assert_type(np.identity(10), npt.NDArray[np.float64]) +assert_type(np.identity(10, dtype=np.int64), npt.NDArray[np.int64]) +assert_type(np.identity(10, dtype=int), npt.NDArray[Any]) + +assert_type(np.atleast_1d(A), npt.NDArray[np.float64]) +assert_type(np.atleast_1d(C), npt.NDArray[Any]) +assert_type(np.atleast_1d(A, A), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]) +assert_type(np.atleast_1d(A, C), tuple[npt.NDArray[Any], npt.NDArray[Any]]) +assert_type(np.atleast_1d(C, C), tuple[npt.NDArray[Any], npt.NDArray[Any]]) +assert_type(np.atleast_1d(A, A, A), tuple[npt.NDArray[np.float64], ...]) +assert_type(np.atleast_1d(C, C, C), tuple[npt.NDArray[Any], ...]) + +assert_type(np.atleast_2d(A), npt.NDArray[np.float64]) +assert_type(np.atleast_2d(A, A), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]) +assert_type(np.atleast_2d(A, A, A), tuple[npt.NDArray[np.float64], ...]) + +assert_type(np.atleast_3d(A), npt.NDArray[np.float64]) +assert_type(np.atleast_3d(A, A), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]) +assert_type(np.atleast_3d(A, A, A), tuple[npt.NDArray[np.float64], ...]) + +assert_type(np.vstack([A, A]), np.ndarray[Any, Any]) # pyright correctly infers this as NDArray[float64] +assert_type(np.vstack([A, A], dtype=np.float32), npt.NDArray[np.float32]) +assert_type(np.vstack([A, C]), npt.NDArray[Any]) +assert_type(np.vstack([C, C]), npt.NDArray[Any]) + +assert_type(np.hstack([A, A]), np.ndarray[Any, Any]) # pyright correctly infers this as NDArray[float64] +assert_type(np.hstack([A, A], dtype=np.float32), npt.NDArray[np.float32]) + +assert_type(np.stack([A, A]), np.ndarray[Any, Any]) # pyright correctly infers this as NDArray[float64] +assert_type(np.stack([A, A], dtype=np.float32), npt.NDArray[np.float32]) +assert_type(np.stack([A, C]), npt.NDArray[Any]) +assert_type(np.stack([C, C]), npt.NDArray[Any]) +assert_type(np.stack([A, A], axis=0), np.ndarray[Any, Any]) # pyright correctly infers this as NDArray[float64] +assert_type(np.stack([A, A], out=B), SubClass[np.float64]) + +assert_type(np.block([[A, A], [A, A]]), npt.NDArray[Any]) # pyright correctly infers this as NDArray[float64] +assert_type(np.block(C), npt.NDArray[Any]) + +if sys.version_info >= (3, 12): + from collections.abc import Buffer + + def create_array(obj: npt.ArrayLike) -> npt.NDArray[Any]: ... + + buffer: Buffer + assert_type(create_array(buffer), npt.NDArray[Any]) diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/arraypad.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/arraypad.pyi new file mode 100644 index 0000000..c5a443d --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/arraypad.pyi @@ -0,0 +1,22 @@ +from collections.abc import Mapping +from typing import Any, SupportsIndex, assert_type + +import numpy as np +import numpy.typing as npt + +def mode_func( + ar: npt.NDArray[np.number], + width: tuple[int, int], + iaxis: SupportsIndex, + kwargs: Mapping[str, Any], +) -> None: ... + +AR_i8: npt.NDArray[np.int64] +AR_f8: npt.NDArray[np.float64] +AR_LIKE: list[int] + +assert_type(np.pad(AR_i8, (2, 3), "constant"), npt.NDArray[np.int64]) +assert_type(np.pad(AR_LIKE, (2, 3), "constant"), npt.NDArray[Any]) + +assert_type(np.pad(AR_f8, (2, 3), mode_func), npt.NDArray[np.float64]) +assert_type(np.pad(AR_f8, (2, 3), mode_func, a=1, b=2), npt.NDArray[np.float64]) diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/arrayprint.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/arrayprint.pyi new file mode 100644 index 0000000..3b339ed --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/arrayprint.pyi @@ -0,0 +1,25 @@ +import contextlib +from collections.abc import Callable +from typing import Any, assert_type + +import numpy as np +import numpy.typing as npt +from numpy._core.arrayprint import _FormatOptions + +AR: npt.NDArray[np.int64] +func_float: Callable[[np.floating], str] +func_int: Callable[[np.integer], str] + +assert_type(np.get_printoptions(), _FormatOptions) +assert_type( + np.array2string(AR, formatter={'float_kind': func_float, 'int_kind': func_int}), + str, +) +assert_type(np.format_float_scientific(1.0), str) +assert_type(np.format_float_positional(1), str) +assert_type(np.array_repr(AR), str) +assert_type(np.array_str(AR), str) + +assert_type(np.printoptions(), contextlib._GeneratorContextManager[_FormatOptions]) +with np.printoptions() as dct: + assert_type(dct, _FormatOptions) diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/arraysetops.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/arraysetops.pyi new file mode 100644 index 0000000..7e5ca5c --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/arraysetops.pyi @@ -0,0 +1,74 @@ +from typing import Any, assert_type + +import numpy as np +import numpy.typing as npt +from numpy.lib._arraysetops_impl import ( + UniqueAllResult, + UniqueCountsResult, + UniqueInverseResult, +) + +AR_b: npt.NDArray[np.bool] +AR_i8: npt.NDArray[np.int64] +AR_f8: npt.NDArray[np.float64] +AR_M: npt.NDArray[np.datetime64] +AR_O: npt.NDArray[np.object_] + +AR_LIKE_f8: list[float] + +assert_type(np.ediff1d(AR_b), npt.NDArray[np.int8]) +assert_type(np.ediff1d(AR_i8, to_end=[1, 2, 3]), npt.NDArray[np.int64]) +assert_type(np.ediff1d(AR_M), npt.NDArray[np.timedelta64]) +assert_type(np.ediff1d(AR_O), npt.NDArray[np.object_]) +assert_type(np.ediff1d(AR_LIKE_f8, to_begin=[1, 1.5]), npt.NDArray[Any]) + +assert_type(np.intersect1d(AR_i8, AR_i8), npt.NDArray[np.int64]) +assert_type(np.intersect1d(AR_M, AR_M, assume_unique=True), npt.NDArray[np.datetime64]) +assert_type(np.intersect1d(AR_f8, AR_i8), npt.NDArray[Any]) +assert_type( + np.intersect1d(AR_f8, AR_f8, return_indices=True), + tuple[npt.NDArray[np.float64], npt.NDArray[np.intp], npt.NDArray[np.intp]], +) + +assert_type(np.setxor1d(AR_i8, AR_i8), npt.NDArray[np.int64]) +assert_type(np.setxor1d(AR_M, AR_M, assume_unique=True), npt.NDArray[np.datetime64]) +assert_type(np.setxor1d(AR_f8, AR_i8), npt.NDArray[Any]) + +assert_type(np.isin(AR_i8, AR_i8), npt.NDArray[np.bool]) +assert_type(np.isin(AR_M, AR_M, assume_unique=True), npt.NDArray[np.bool]) +assert_type(np.isin(AR_f8, AR_i8), npt.NDArray[np.bool]) +assert_type(np.isin(AR_f8, AR_LIKE_f8, invert=True), npt.NDArray[np.bool]) + +assert_type(np.union1d(AR_i8, AR_i8), npt.NDArray[np.int64]) +assert_type(np.union1d(AR_M, AR_M), npt.NDArray[np.datetime64]) +assert_type(np.union1d(AR_f8, AR_i8), npt.NDArray[Any]) + +assert_type(np.setdiff1d(AR_i8, AR_i8), npt.NDArray[np.int64]) +assert_type(np.setdiff1d(AR_M, AR_M, assume_unique=True), npt.NDArray[np.datetime64]) +assert_type(np.setdiff1d(AR_f8, AR_i8), npt.NDArray[Any]) + +assert_type(np.unique(AR_f8), npt.NDArray[np.float64]) +assert_type(np.unique(AR_LIKE_f8, axis=0), npt.NDArray[Any]) +assert_type(np.unique(AR_f8, return_index=True), tuple[npt.NDArray[np.float64], npt.NDArray[np.intp]]) +assert_type(np.unique(AR_LIKE_f8, return_index=True), tuple[npt.NDArray[Any], npt.NDArray[np.intp]]) +assert_type(np.unique(AR_f8, return_inverse=True), tuple[npt.NDArray[np.float64], npt.NDArray[np.intp]]) +assert_type(np.unique(AR_LIKE_f8, return_inverse=True), tuple[npt.NDArray[Any], npt.NDArray[np.intp]]) +assert_type(np.unique(AR_f8, return_counts=True), tuple[npt.NDArray[np.float64], npt.NDArray[np.intp]]) +assert_type(np.unique(AR_LIKE_f8, return_counts=True), tuple[npt.NDArray[Any], npt.NDArray[np.intp]]) +assert_type(np.unique(AR_f8, return_index=True, return_inverse=True), tuple[npt.NDArray[np.float64], npt.NDArray[np.intp], npt.NDArray[np.intp]]) +assert_type(np.unique(AR_LIKE_f8, return_index=True, return_inverse=True), tuple[npt.NDArray[Any], npt.NDArray[np.intp], npt.NDArray[np.intp]]) +assert_type(np.unique(AR_f8, return_index=True, return_counts=True), tuple[npt.NDArray[np.float64], npt.NDArray[np.intp], npt.NDArray[np.intp]]) +assert_type(np.unique(AR_LIKE_f8, return_index=True, return_counts=True), tuple[npt.NDArray[Any], npt.NDArray[np.intp], npt.NDArray[np.intp]]) +assert_type(np.unique(AR_f8, return_inverse=True, return_counts=True), tuple[npt.NDArray[np.float64], npt.NDArray[np.intp], npt.NDArray[np.intp]]) +assert_type(np.unique(AR_LIKE_f8, return_inverse=True, return_counts=True), tuple[npt.NDArray[Any], npt.NDArray[np.intp], npt.NDArray[np.intp]]) +assert_type(np.unique(AR_f8, return_index=True, return_inverse=True, return_counts=True), tuple[npt.NDArray[np.float64], npt.NDArray[np.intp], npt.NDArray[np.intp], npt.NDArray[np.intp]]) +assert_type(np.unique(AR_LIKE_f8, return_index=True, return_inverse=True, return_counts=True), tuple[npt.NDArray[Any], npt.NDArray[np.intp], npt.NDArray[np.intp], npt.NDArray[np.intp]]) + +assert_type(np.unique_all(AR_f8), UniqueAllResult[np.float64]) +assert_type(np.unique_all(AR_LIKE_f8), UniqueAllResult[Any]) +assert_type(np.unique_counts(AR_f8), UniqueCountsResult[np.float64]) +assert_type(np.unique_counts(AR_LIKE_f8), UniqueCountsResult[Any]) +assert_type(np.unique_inverse(AR_f8), UniqueInverseResult[np.float64]) +assert_type(np.unique_inverse(AR_LIKE_f8), UniqueInverseResult[Any]) +assert_type(np.unique_values(AR_f8), npt.NDArray[np.float64]) +assert_type(np.unique_values(AR_LIKE_f8), npt.NDArray[Any]) diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/arrayterator.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/arrayterator.pyi new file mode 100644 index 0000000..470160c --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/arrayterator.pyi @@ -0,0 +1,27 @@ +from collections.abc import Generator +from typing import Any, assert_type + +import numpy as np +import numpy.typing as npt + +AR_i8: npt.NDArray[np.int64] +ar_iter = np.lib.Arrayterator(AR_i8) + +assert_type(ar_iter.var, npt.NDArray[np.int64]) +assert_type(ar_iter.buf_size, int | None) +assert_type(ar_iter.start, list[int]) +assert_type(ar_iter.stop, list[int]) +assert_type(ar_iter.step, list[int]) +assert_type(ar_iter.shape, tuple[Any, ...]) +assert_type(ar_iter.flat, Generator[np.int64, None, None]) + +assert_type(ar_iter.__array__(), npt.NDArray[np.int64]) + +for i in ar_iter: + assert_type(i, npt.NDArray[np.int64]) + +assert_type(ar_iter[0], np.lib.Arrayterator[tuple[Any, ...], np.dtype[np.int64]]) +assert_type(ar_iter[...], np.lib.Arrayterator[tuple[Any, ...], np.dtype[np.int64]]) +assert_type(ar_iter[:], np.lib.Arrayterator[tuple[Any, ...], np.dtype[np.int64]]) +assert_type(ar_iter[0, 0, 0], np.lib.Arrayterator[tuple[Any, ...], np.dtype[np.int64]]) +assert_type(ar_iter[..., 0, :], np.lib.Arrayterator[tuple[Any, ...], np.dtype[np.int64]]) diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/bitwise_ops.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/bitwise_ops.pyi new file mode 100644 index 0000000..6c6b561 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/bitwise_ops.pyi @@ -0,0 +1,168 @@ +from typing import Any, TypeAlias, assert_type +from typing import Literal as L + +import numpy as np +import numpy.typing as npt +from numpy._typing import _32Bit, _64Bit + +FalseType: TypeAlias = L[False] +TrueType: TypeAlias = L[True] + +i4: np.int32 +i8: np.int64 + +u4: np.uint32 +u8: np.uint64 + +b_: np.bool[bool] +b0_: np.bool[FalseType] +b1_: np.bool[TrueType] + +b: bool +b0: FalseType +b1: TrueType + +i: int + +AR: npt.NDArray[np.int32] + +assert_type(i8 << i8, np.int64) +assert_type(i8 >> i8, np.int64) +assert_type(i8 | i8, np.int64) +assert_type(i8 ^ i8, np.int64) +assert_type(i8 & i8, np.int64) + +assert_type(i8 << AR, npt.NDArray[np.signedinteger]) +assert_type(i8 >> AR, npt.NDArray[np.signedinteger]) +assert_type(i8 | AR, npt.NDArray[np.signedinteger]) +assert_type(i8 ^ AR, npt.NDArray[np.signedinteger]) +assert_type(i8 & AR, npt.NDArray[np.signedinteger]) + +assert_type(i4 << i4, np.int32) +assert_type(i4 >> i4, np.int32) +assert_type(i4 | i4, np.int32) +assert_type(i4 ^ i4, np.int32) +assert_type(i4 & i4, np.int32) + +assert_type(i8 << i4, np.signedinteger[_32Bit] | np.signedinteger[_64Bit]) +assert_type(i8 >> i4, np.signedinteger[_32Bit] | np.signedinteger[_64Bit]) +assert_type(i8 | i4, np.signedinteger[_32Bit] | np.signedinteger[_64Bit]) +assert_type(i8 ^ i4, np.signedinteger[_32Bit] | np.signedinteger[_64Bit]) +assert_type(i8 & i4, np.signedinteger[_32Bit] | np.signedinteger[_64Bit]) + +assert_type(i8 << b_, np.int64) +assert_type(i8 >> b_, np.int64) +assert_type(i8 | b_, np.int64) +assert_type(i8 ^ b_, np.int64) +assert_type(i8 & b_, np.int64) + +assert_type(i8 << b, np.int64) +assert_type(i8 >> b, np.int64) +assert_type(i8 | b, np.int64) +assert_type(i8 ^ b, np.int64) +assert_type(i8 & b, np.int64) + +assert_type(u8 << u8, np.uint64) +assert_type(u8 >> u8, np.uint64) +assert_type(u8 | u8, np.uint64) +assert_type(u8 ^ u8, np.uint64) +assert_type(u8 & u8, np.uint64) + +assert_type(u8 << AR, npt.NDArray[np.signedinteger]) +assert_type(u8 >> AR, npt.NDArray[np.signedinteger]) +assert_type(u8 | AR, npt.NDArray[np.signedinteger]) +assert_type(u8 ^ AR, npt.NDArray[np.signedinteger]) +assert_type(u8 & AR, npt.NDArray[np.signedinteger]) + +assert_type(u4 << u4, np.uint32) +assert_type(u4 >> u4, np.uint32) +assert_type(u4 | u4, np.uint32) +assert_type(u4 ^ u4, np.uint32) +assert_type(u4 & u4, np.uint32) + +assert_type(u4 << i4, np.signedinteger) +assert_type(u4 >> i4, np.signedinteger) +assert_type(u4 | i4, np.signedinteger) +assert_type(u4 ^ i4, np.signedinteger) +assert_type(u4 & i4, np.signedinteger) + +assert_type(u4 << i, np.signedinteger) +assert_type(u4 >> i, np.signedinteger) +assert_type(u4 | i, np.signedinteger) +assert_type(u4 ^ i, np.signedinteger) +assert_type(u4 & i, np.signedinteger) + +assert_type(u8 << b_, np.uint64) +assert_type(u8 >> b_, np.uint64) +assert_type(u8 | b_, np.uint64) +assert_type(u8 ^ b_, np.uint64) +assert_type(u8 & b_, np.uint64) + +assert_type(u8 << b, np.uint64) +assert_type(u8 >> b, np.uint64) +assert_type(u8 | b, np.uint64) +assert_type(u8 ^ b, np.uint64) +assert_type(u8 & b, np.uint64) + +assert_type(b_ << b_, np.int8) +assert_type(b_ >> b_, np.int8) +assert_type(b_ | b_, np.bool) +assert_type(b_ ^ b_, np.bool) +assert_type(b_ & b_, np.bool) + +assert_type(b_ << AR, npt.NDArray[np.signedinteger]) +assert_type(b_ >> AR, npt.NDArray[np.signedinteger]) +assert_type(b_ | AR, npt.NDArray[np.signedinteger]) +assert_type(b_ ^ AR, npt.NDArray[np.signedinteger]) +assert_type(b_ & AR, npt.NDArray[np.signedinteger]) + +assert_type(b_ << b, np.int8) +assert_type(b_ >> b, np.int8) +assert_type(b_ | b, np.bool) +assert_type(b_ ^ b, np.bool) +assert_type(b_ & b, np.bool) + +assert_type(b_ << i, np.int_) +assert_type(b_ >> i, np.int_) +assert_type(b_ | i, np.bool | np.int_) +assert_type(b_ ^ i, np.bool | np.int_) +assert_type(b_ & i, np.bool | np.int_) + +assert_type(~i8, np.int64) +assert_type(~i4, np.int32) +assert_type(~u8, np.uint64) +assert_type(~u4, np.uint32) +assert_type(~b_, np.bool) +assert_type(~b0_, np.bool[TrueType]) +assert_type(~b1_, np.bool[FalseType]) +assert_type(~AR, npt.NDArray[np.int32]) + +assert_type(b_ | b0_, np.bool) +assert_type(b0_ | b_, np.bool) +assert_type(b_ | b1_, np.bool[TrueType]) +assert_type(b1_ | b_, np.bool[TrueType]) + +assert_type(b_ ^ b0_, np.bool) +assert_type(b0_ ^ b_, np.bool) +assert_type(b_ ^ b1_, np.bool) +assert_type(b1_ ^ b_, np.bool) + +assert_type(b_ & b0_, np.bool[FalseType]) +assert_type(b0_ & b_, np.bool[FalseType]) +assert_type(b_ & b1_, np.bool) +assert_type(b1_ & b_, np.bool) + +assert_type(b0_ | b0_, np.bool[FalseType]) +assert_type(b0_ | b1_, np.bool[TrueType]) +assert_type(b1_ | b0_, np.bool[TrueType]) +assert_type(b1_ | b1_, np.bool[TrueType]) + +assert_type(b0_ ^ b0_, np.bool[FalseType]) +assert_type(b0_ ^ b1_, np.bool[TrueType]) +assert_type(b1_ ^ b0_, np.bool[TrueType]) +assert_type(b1_ ^ b1_, np.bool[FalseType]) + +assert_type(b0_ & b0_, np.bool[FalseType]) +assert_type(b0_ & b1_, np.bool[FalseType]) +assert_type(b1_ & b0_, np.bool[FalseType]) +assert_type(b1_ & b1_, np.bool[TrueType]) diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/char.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/char.pyi new file mode 100644 index 0000000..5c6af73 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/char.pyi @@ -0,0 +1,224 @@ +from typing import TypeAlias, assert_type + +import numpy as np +import numpy._typing as np_t +import numpy.typing as npt + +AR_T_alias: TypeAlias = np.ndarray[np_t._AnyShape, np.dtypes.StringDType] +AR_TU_alias: TypeAlias = AR_T_alias | npt.NDArray[np.str_] + +AR_U: npt.NDArray[np.str_] +AR_S: npt.NDArray[np.bytes_] +AR_T: AR_T_alias + +assert_type(np.char.equal(AR_U, AR_U), npt.NDArray[np.bool]) +assert_type(np.char.equal(AR_S, AR_S), npt.NDArray[np.bool]) +assert_type(np.char.equal(AR_T, AR_T), npt.NDArray[np.bool]) + +assert_type(np.char.not_equal(AR_U, AR_U), npt.NDArray[np.bool]) +assert_type(np.char.not_equal(AR_S, AR_S), npt.NDArray[np.bool]) +assert_type(np.char.not_equal(AR_T, AR_T), npt.NDArray[np.bool]) + +assert_type(np.char.greater_equal(AR_U, AR_U), npt.NDArray[np.bool]) +assert_type(np.char.greater_equal(AR_S, AR_S), npt.NDArray[np.bool]) +assert_type(np.char.greater_equal(AR_T, AR_T), npt.NDArray[np.bool]) + +assert_type(np.char.less_equal(AR_U, AR_U), npt.NDArray[np.bool]) +assert_type(np.char.less_equal(AR_S, AR_S), npt.NDArray[np.bool]) +assert_type(np.char.less_equal(AR_T, AR_T), npt.NDArray[np.bool]) + +assert_type(np.char.greater(AR_U, AR_U), npt.NDArray[np.bool]) +assert_type(np.char.greater(AR_S, AR_S), npt.NDArray[np.bool]) +assert_type(np.char.greater(AR_T, AR_T), npt.NDArray[np.bool]) + +assert_type(np.char.less(AR_U, AR_U), npt.NDArray[np.bool]) +assert_type(np.char.less(AR_S, AR_S), npt.NDArray[np.bool]) +assert_type(np.char.less(AR_T, AR_T), npt.NDArray[np.bool]) + +assert_type(np.char.multiply(AR_U, 5), npt.NDArray[np.str_]) +assert_type(np.char.multiply(AR_S, [5, 4, 3]), npt.NDArray[np.bytes_]) +assert_type(np.char.multiply(AR_T, 5), AR_T_alias) + +assert_type(np.char.mod(AR_U, "test"), npt.NDArray[np.str_]) +assert_type(np.char.mod(AR_S, "test"), npt.NDArray[np.bytes_]) +assert_type(np.char.mod(AR_T, "test"), AR_T_alias) + +assert_type(np.char.capitalize(AR_U), npt.NDArray[np.str_]) +assert_type(np.char.capitalize(AR_S), npt.NDArray[np.bytes_]) +assert_type(np.char.capitalize(AR_T), AR_T_alias) + +assert_type(np.char.center(AR_U, 5), npt.NDArray[np.str_]) +assert_type(np.char.center(AR_S, [2, 3, 4], b"a"), npt.NDArray[np.bytes_]) +assert_type(np.char.center(AR_T, 5), AR_T_alias) + +assert_type(np.char.encode(AR_U), npt.NDArray[np.bytes_]) +assert_type(np.char.encode(AR_T), npt.NDArray[np.bytes_]) +assert_type(np.char.decode(AR_S), npt.NDArray[np.str_]) + +assert_type(np.char.expandtabs(AR_U), npt.NDArray[np.str_]) +assert_type(np.char.expandtabs(AR_S, tabsize=4), npt.NDArray[np.bytes_]) +assert_type(np.char.expandtabs(AR_T), AR_T_alias) + +assert_type(np.char.join(AR_U, "_"), npt.NDArray[np.str_]) +assert_type(np.char.join(AR_S, [b"_", b""]), npt.NDArray[np.bytes_]) +assert_type(np.char.join(AR_T, "_"), AR_TU_alias) + +assert_type(np.char.ljust(AR_U, 5), npt.NDArray[np.str_]) +assert_type(np.char.ljust(AR_S, [4, 3, 1], fillchar=[b"a", b"b", b"c"]), npt.NDArray[np.bytes_]) +assert_type(np.char.ljust(AR_T, 5), AR_T_alias) +assert_type(np.char.ljust(AR_T, [4, 2, 1], fillchar=["a", "b", "c"]), AR_TU_alias) + +assert_type(np.char.rjust(AR_U, 5), npt.NDArray[np.str_]) +assert_type(np.char.rjust(AR_S, [4, 3, 1], fillchar=[b"a", b"b", b"c"]), npt.NDArray[np.bytes_]) +assert_type(np.char.rjust(AR_T, 5), AR_T_alias) +assert_type(np.char.rjust(AR_T, [4, 2, 1], fillchar=["a", "b", "c"]), AR_TU_alias) + +assert_type(np.char.lstrip(AR_U), npt.NDArray[np.str_]) +assert_type(np.char.lstrip(AR_S, b"_"), npt.NDArray[np.bytes_]) +assert_type(np.char.lstrip(AR_T), AR_T_alias) +assert_type(np.char.lstrip(AR_T, "_"), AR_TU_alias) + +assert_type(np.char.rstrip(AR_U), npt.NDArray[np.str_]) +assert_type(np.char.rstrip(AR_S, b"_"), npt.NDArray[np.bytes_]) +assert_type(np.char.rstrip(AR_T), AR_T_alias) +assert_type(np.char.rstrip(AR_T, "_"), AR_TU_alias) + +assert_type(np.char.strip(AR_U), npt.NDArray[np.str_]) +assert_type(np.char.strip(AR_S, b"_"), npt.NDArray[np.bytes_]) +assert_type(np.char.strip(AR_T), AR_T_alias) +assert_type(np.char.strip(AR_T, "_"), AR_TU_alias) + +assert_type(np.char.count(AR_U, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) +assert_type(np.char.count(AR_S, [b"a", b"b", b"c"], end=9), npt.NDArray[np.int_]) +assert_type(np.char.count(AR_T, AR_T, start=[1, 2, 3]), npt.NDArray[np.int_]) +assert_type(np.char.count(AR_T, ["a", "b", "c"], end=9), npt.NDArray[np.int_]) + +assert_type(np.char.partition(AR_U, "\n"), npt.NDArray[np.str_]) +assert_type(np.char.partition(AR_S, [b"a", b"b", b"c"]), npt.NDArray[np.bytes_]) +assert_type(np.char.partition(AR_T, "\n"), AR_TU_alias) + +assert_type(np.char.rpartition(AR_U, "\n"), npt.NDArray[np.str_]) +assert_type(np.char.rpartition(AR_S, [b"a", b"b", b"c"]), npt.NDArray[np.bytes_]) +assert_type(np.char.rpartition(AR_T, "\n"), AR_TU_alias) + +assert_type(np.char.replace(AR_U, "_", "-"), npt.NDArray[np.str_]) +assert_type(np.char.replace(AR_S, [b"_", b""], [b"a", b"b"]), npt.NDArray[np.bytes_]) +assert_type(np.char.replace(AR_T, "_", "_"), AR_TU_alias) + +assert_type(np.char.split(AR_U, "_"), npt.NDArray[np.object_]) +assert_type(np.char.split(AR_S, maxsplit=[1, 2, 3]), npt.NDArray[np.object_]) +assert_type(np.char.split(AR_T, "_"), npt.NDArray[np.object_]) + +assert_type(np.char.rsplit(AR_U, "_"), npt.NDArray[np.object_]) +assert_type(np.char.rsplit(AR_S, maxsplit=[1, 2, 3]), npt.NDArray[np.object_]) +assert_type(np.char.rsplit(AR_T, "_"), npt.NDArray[np.object_]) + +assert_type(np.char.splitlines(AR_U), npt.NDArray[np.object_]) +assert_type(np.char.splitlines(AR_S, keepends=[True, True, False]), npt.NDArray[np.object_]) +assert_type(np.char.splitlines(AR_T), npt.NDArray[np.object_]) + +assert_type(np.char.lower(AR_U), npt.NDArray[np.str_]) +assert_type(np.char.lower(AR_S), npt.NDArray[np.bytes_]) +assert_type(np.char.lower(AR_T), AR_T_alias) + +assert_type(np.char.upper(AR_U), npt.NDArray[np.str_]) +assert_type(np.char.upper(AR_S), npt.NDArray[np.bytes_]) +assert_type(np.char.upper(AR_T), AR_T_alias) + +assert_type(np.char.swapcase(AR_U), npt.NDArray[np.str_]) +assert_type(np.char.swapcase(AR_S), npt.NDArray[np.bytes_]) +assert_type(np.char.swapcase(AR_T), AR_T_alias) + +assert_type(np.char.title(AR_U), npt.NDArray[np.str_]) +assert_type(np.char.title(AR_S), npt.NDArray[np.bytes_]) +assert_type(np.char.title(AR_T), AR_T_alias) + +assert_type(np.char.zfill(AR_U, 5), npt.NDArray[np.str_]) +assert_type(np.char.zfill(AR_S, [2, 3, 4]), npt.NDArray[np.bytes_]) +assert_type(np.char.zfill(AR_T, 5), AR_T_alias) + +assert_type(np.char.endswith(AR_U, "a", start=[1, 2, 3]), npt.NDArray[np.bool]) +assert_type(np.char.endswith(AR_S, [b"a", b"b", b"c"], end=9), npt.NDArray[np.bool]) +assert_type(np.char.endswith(AR_T, "a", start=[1, 2, 3]), npt.NDArray[np.bool]) + +assert_type(np.char.startswith(AR_U, "a", start=[1, 2, 3]), npt.NDArray[np.bool]) +assert_type(np.char.startswith(AR_S, [b"a", b"b", b"c"], end=9), npt.NDArray[np.bool]) +assert_type(np.char.startswith(AR_T, "a", start=[1, 2, 3]), npt.NDArray[np.bool]) + +assert_type(np.char.find(AR_U, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) +assert_type(np.char.find(AR_S, [b"a", b"b", b"c"], end=9), npt.NDArray[np.int_]) +assert_type(np.char.find(AR_T, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) + +assert_type(np.char.rfind(AR_U, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) +assert_type(np.char.rfind(AR_S, [b"a", b"b", b"c"], end=9), npt.NDArray[np.int_]) +assert_type(np.char.rfind(AR_T, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) + +assert_type(np.char.index(AR_U, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) +assert_type(np.char.index(AR_S, [b"a", b"b", b"c"], end=9), npt.NDArray[np.int_]) +assert_type(np.char.index(AR_T, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) + +assert_type(np.char.rindex(AR_U, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) +assert_type(np.char.rindex(AR_S, [b"a", b"b", b"c"], end=9), npt.NDArray[np.int_]) +assert_type(np.char.rindex(AR_T, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) + +assert_type(np.char.isalpha(AR_U), npt.NDArray[np.bool]) +assert_type(np.char.isalpha(AR_S), npt.NDArray[np.bool]) +assert_type(np.char.isalpha(AR_T), npt.NDArray[np.bool]) + +assert_type(np.char.isalnum(AR_U), npt.NDArray[np.bool]) +assert_type(np.char.isalnum(AR_S), npt.NDArray[np.bool]) +assert_type(np.char.isalnum(AR_T), npt.NDArray[np.bool]) + +assert_type(np.char.isdecimal(AR_U), npt.NDArray[np.bool]) +assert_type(np.char.isdecimal(AR_T), npt.NDArray[np.bool]) + +assert_type(np.char.isdigit(AR_U), npt.NDArray[np.bool]) +assert_type(np.char.isdigit(AR_S), npt.NDArray[np.bool]) +assert_type(np.char.isdigit(AR_T), npt.NDArray[np.bool]) + +assert_type(np.char.islower(AR_U), npt.NDArray[np.bool]) +assert_type(np.char.islower(AR_S), npt.NDArray[np.bool]) +assert_type(np.char.islower(AR_T), npt.NDArray[np.bool]) + +assert_type(np.char.isnumeric(AR_U), npt.NDArray[np.bool]) +assert_type(np.char.isnumeric(AR_T), npt.NDArray[np.bool]) + +assert_type(np.char.isspace(AR_U), npt.NDArray[np.bool]) +assert_type(np.char.isspace(AR_S), npt.NDArray[np.bool]) +assert_type(np.char.isspace(AR_T), npt.NDArray[np.bool]) + +assert_type(np.char.istitle(AR_U), npt.NDArray[np.bool]) +assert_type(np.char.istitle(AR_S), npt.NDArray[np.bool]) +assert_type(np.char.istitle(AR_T), npt.NDArray[np.bool]) + +assert_type(np.char.isupper(AR_U), npt.NDArray[np.bool]) +assert_type(np.char.isupper(AR_S), npt.NDArray[np.bool]) +assert_type(np.char.isupper(AR_T), npt.NDArray[np.bool]) + +assert_type(np.char.str_len(AR_U), npt.NDArray[np.int_]) +assert_type(np.char.str_len(AR_S), npt.NDArray[np.int_]) +assert_type(np.char.str_len(AR_T), npt.NDArray[np.int_]) + +assert_type(np.char.translate(AR_U, ""), npt.NDArray[np.str_]) +assert_type(np.char.translate(AR_S, ""), npt.NDArray[np.bytes_]) +assert_type(np.char.translate(AR_T, ""), AR_T_alias) + +assert_type(np.char.array(AR_U), np.char.chararray[np_t._AnyShape, np.dtype[np.str_]]) +assert_type(np.char.array(AR_S, order="K"), np.char.chararray[np_t._AnyShape, np.dtype[np.bytes_]]) +assert_type(np.char.array("bob", copy=True), np.char.chararray[np_t._AnyShape, np.dtype[np.str_]]) +assert_type(np.char.array(b"bob", itemsize=5), np.char.chararray[np_t._AnyShape, np.dtype[np.bytes_]]) +assert_type(np.char.array(1, unicode=False), np.char.chararray[np_t._AnyShape, np.dtype[np.bytes_]]) +assert_type(np.char.array(1, unicode=True), np.char.chararray[np_t._AnyShape, np.dtype[np.str_]]) +assert_type(np.char.array(1), np.char.chararray[np_t._AnyShape, np.dtype[np.str_]] | np.char.chararray[np_t._AnyShape, np.dtype[np.bytes_]]) +assert_type(np.char.array(AR_U, unicode=False), np.char.chararray[np_t._AnyShape, np.dtype[np.bytes_]]) +assert_type(np.char.array(AR_S, unicode=True), np.char.chararray[np_t._AnyShape, np.dtype[np.str_]]) + +assert_type(np.char.asarray(AR_U), np.char.chararray[np_t._AnyShape, np.dtype[np.str_]]) +assert_type(np.char.asarray(AR_S, order="K"), np.char.chararray[np_t._AnyShape, np.dtype[np.bytes_]]) +assert_type(np.char.asarray("bob"), np.char.chararray[np_t._AnyShape, np.dtype[np.str_]]) +assert_type(np.char.asarray(b"bob", itemsize=5), np.char.chararray[np_t._AnyShape, np.dtype[np.bytes_]]) +assert_type(np.char.asarray(1, unicode=False), np.char.chararray[np_t._AnyShape, np.dtype[np.bytes_]]) +assert_type(np.char.asarray(1, unicode=True), np.char.chararray[np_t._AnyShape, np.dtype[np.str_]]) +assert_type(np.char.asarray(1), np.char.chararray[np_t._AnyShape, np.dtype[np.str_]] | np.char.chararray[np_t._AnyShape, np.dtype[np.bytes_]]) +assert_type(np.char.asarray(AR_U, unicode=False), np.char.chararray[np_t._AnyShape, np.dtype[np.bytes_]]) +assert_type(np.char.asarray(AR_S, unicode=True), np.char.chararray[np_t._AnyShape, np.dtype[np.str_]]) diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/chararray.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/chararray.pyi new file mode 100644 index 0000000..b5f4392 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/chararray.pyi @@ -0,0 +1,137 @@ +from typing import Any, TypeAlias, assert_type + +import numpy as np +import numpy.typing as npt + +_BytesCharArray: TypeAlias = np.char.chararray[tuple[Any, ...], np.dtype[np.bytes_]] +_StrCharArray: TypeAlias = np.char.chararray[tuple[Any, ...], np.dtype[np.str_]] + +AR_U: _StrCharArray +AR_S: _BytesCharArray + +assert_type(AR_U == AR_U, npt.NDArray[np.bool]) +assert_type(AR_S == AR_S, npt.NDArray[np.bool]) + +assert_type(AR_U != AR_U, npt.NDArray[np.bool]) +assert_type(AR_S != AR_S, npt.NDArray[np.bool]) + +assert_type(AR_U >= AR_U, npt.NDArray[np.bool]) +assert_type(AR_S >= AR_S, npt.NDArray[np.bool]) + +assert_type(AR_U <= AR_U, npt.NDArray[np.bool]) +assert_type(AR_S <= AR_S, npt.NDArray[np.bool]) + +assert_type(AR_U > AR_U, npt.NDArray[np.bool]) +assert_type(AR_S > AR_S, npt.NDArray[np.bool]) + +assert_type(AR_U < AR_U, npt.NDArray[np.bool]) +assert_type(AR_S < AR_S, npt.NDArray[np.bool]) + +assert_type(AR_U * 5, _StrCharArray) +assert_type(AR_S * [5], _BytesCharArray) + +assert_type(AR_U % "test", _StrCharArray) +assert_type(AR_S % b"test", _BytesCharArray) + +assert_type(AR_U.capitalize(), _StrCharArray) +assert_type(AR_S.capitalize(), _BytesCharArray) + +assert_type(AR_U.center(5), _StrCharArray) +assert_type(AR_S.center([2, 3, 4], b"a"), _BytesCharArray) + +assert_type(AR_U.encode(), _BytesCharArray) +assert_type(AR_S.decode(), _StrCharArray) + +assert_type(AR_U.expandtabs(), _StrCharArray) +assert_type(AR_S.expandtabs(tabsize=4), _BytesCharArray) + +assert_type(AR_U.join("_"), _StrCharArray) +assert_type(AR_S.join([b"_", b""]), _BytesCharArray) + +assert_type(AR_U.ljust(5), _StrCharArray) +assert_type(AR_S.ljust([4, 3, 1], fillchar=[b"a", b"b", b"c"]), _BytesCharArray) +assert_type(AR_U.rjust(5), _StrCharArray) +assert_type(AR_S.rjust([4, 3, 1], fillchar=[b"a", b"b", b"c"]), _BytesCharArray) + +assert_type(AR_U.lstrip(), _StrCharArray) +assert_type(AR_S.lstrip(chars=b"_"), _BytesCharArray) +assert_type(AR_U.rstrip(), _StrCharArray) +assert_type(AR_S.rstrip(chars=b"_"), _BytesCharArray) +assert_type(AR_U.strip(), _StrCharArray) +assert_type(AR_S.strip(chars=b"_"), _BytesCharArray) + +assert_type(AR_U.partition("\n"), _StrCharArray) +assert_type(AR_S.partition([b"a", b"b", b"c"]), _BytesCharArray) +assert_type(AR_U.rpartition("\n"), _StrCharArray) +assert_type(AR_S.rpartition([b"a", b"b", b"c"]), _BytesCharArray) + +assert_type(AR_U.replace("_", "-"), _StrCharArray) +assert_type(AR_S.replace([b"_", b""], [b"a", b"b"]), _BytesCharArray) + +assert_type(AR_U.split("_"), npt.NDArray[np.object_]) +assert_type(AR_S.split(maxsplit=[1, 2, 3]), npt.NDArray[np.object_]) +assert_type(AR_U.rsplit("_"), npt.NDArray[np.object_]) +assert_type(AR_S.rsplit(maxsplit=[1, 2, 3]), npt.NDArray[np.object_]) + +assert_type(AR_U.splitlines(), npt.NDArray[np.object_]) +assert_type(AR_S.splitlines(keepends=[True, True, False]), npt.NDArray[np.object_]) + +assert_type(AR_U.swapcase(), _StrCharArray) +assert_type(AR_S.swapcase(), _BytesCharArray) + +assert_type(AR_U.title(), _StrCharArray) +assert_type(AR_S.title(), _BytesCharArray) + +assert_type(AR_U.upper(), _StrCharArray) +assert_type(AR_S.upper(), _BytesCharArray) + +assert_type(AR_U.zfill(5), _StrCharArray) +assert_type(AR_S.zfill([2, 3, 4]), _BytesCharArray) + +assert_type(AR_U.count("a", start=[1, 2, 3]), npt.NDArray[np.int_]) +assert_type(AR_S.count([b"a", b"b", b"c"], end=9), npt.NDArray[np.int_]) + +assert_type(AR_U.endswith("a", start=[1, 2, 3]), npt.NDArray[np.bool]) +assert_type(AR_S.endswith([b"a", b"b", b"c"], end=9), npt.NDArray[np.bool]) +assert_type(AR_U.startswith("a", start=[1, 2, 3]), npt.NDArray[np.bool]) +assert_type(AR_S.startswith([b"a", b"b", b"c"], end=9), npt.NDArray[np.bool]) + +assert_type(AR_U.find("a", start=[1, 2, 3]), npt.NDArray[np.int_]) +assert_type(AR_S.find([b"a", b"b", b"c"], end=9), npt.NDArray[np.int_]) +assert_type(AR_U.rfind("a", start=[1, 2, 3]), npt.NDArray[np.int_]) +assert_type(AR_S.rfind([b"a", b"b", b"c"], end=9), npt.NDArray[np.int_]) + +assert_type(AR_U.index("a", start=[1, 2, 3]), npt.NDArray[np.int_]) +assert_type(AR_S.index([b"a", b"b", b"c"], end=9), npt.NDArray[np.int_]) +assert_type(AR_U.rindex("a", start=[1, 2, 3]), npt.NDArray[np.int_]) +assert_type(AR_S.rindex([b"a", b"b", b"c"], end=9), npt.NDArray[np.int_]) + +assert_type(AR_U.isalpha(), npt.NDArray[np.bool]) +assert_type(AR_S.isalpha(), npt.NDArray[np.bool]) + +assert_type(AR_U.isalnum(), npt.NDArray[np.bool]) +assert_type(AR_S.isalnum(), npt.NDArray[np.bool]) + +assert_type(AR_U.isdecimal(), npt.NDArray[np.bool]) +assert_type(AR_S.isdecimal(), npt.NDArray[np.bool]) + +assert_type(AR_U.isdigit(), npt.NDArray[np.bool]) +assert_type(AR_S.isdigit(), npt.NDArray[np.bool]) + +assert_type(AR_U.islower(), npt.NDArray[np.bool]) +assert_type(AR_S.islower(), npt.NDArray[np.bool]) + +assert_type(AR_U.isnumeric(), npt.NDArray[np.bool]) +assert_type(AR_S.isnumeric(), npt.NDArray[np.bool]) + +assert_type(AR_U.isspace(), npt.NDArray[np.bool]) +assert_type(AR_S.isspace(), npt.NDArray[np.bool]) + +assert_type(AR_U.istitle(), npt.NDArray[np.bool]) +assert_type(AR_S.istitle(), npt.NDArray[np.bool]) + +assert_type(AR_U.isupper(), npt.NDArray[np.bool]) +assert_type(AR_S.isupper(), npt.NDArray[np.bool]) + +assert_type(AR_U.__array_finalize__(object()), None) +assert_type(AR_S.__array_finalize__(object()), None) diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/comparisons.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/comparisons.pyi new file mode 100644 index 0000000..2165d17 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/comparisons.pyi @@ -0,0 +1,264 @@ +import decimal +import fractions +from typing import Any, assert_type + +import numpy as np +import numpy.typing as npt + +c16 = np.complex128() +f8 = np.float64() +i8 = np.int64() +u8 = np.uint64() + +c8 = np.complex64() +f4 = np.float32() +i4 = np.int32() +u4 = np.uint32() + +dt = np.datetime64(0, "D") +td = np.timedelta64(0, "D") + +b_ = np.bool() + +b = bool() +c = complex() +f = float() +i = int() + +AR = np.array([0], dtype=np.int64) +AR.setflags(write=False) + +SEQ = (0, 1, 2, 3, 4) + +# object-like comparisons + +assert_type(i8 > fractions.Fraction(1, 5), np.bool) +assert_type(i8 > [fractions.Fraction(1, 5)], npt.NDArray[np.bool]) +assert_type(i8 > decimal.Decimal("1.5"), np.bool) +assert_type(i8 > [decimal.Decimal("1.5")], npt.NDArray[np.bool]) + +# Time structures + +assert_type(dt > dt, np.bool) + +assert_type(td > td, np.bool) +assert_type(td > i, np.bool) +assert_type(td > i4, np.bool) +assert_type(td > i8, np.bool) + +assert_type(td > AR, npt.NDArray[np.bool]) +assert_type(td > SEQ, npt.NDArray[np.bool]) +assert_type(AR > SEQ, npt.NDArray[np.bool]) +assert_type(AR > td, npt.NDArray[np.bool]) +assert_type(SEQ > td, npt.NDArray[np.bool]) +assert_type(SEQ > AR, npt.NDArray[np.bool]) + +# boolean + +assert_type(b_ > b, np.bool) +assert_type(b_ > b_, np.bool) +assert_type(b_ > i, np.bool) +assert_type(b_ > i8, np.bool) +assert_type(b_ > i4, np.bool) +assert_type(b_ > u8, np.bool) +assert_type(b_ > u4, np.bool) +assert_type(b_ > f, np.bool) +assert_type(b_ > f8, np.bool) +assert_type(b_ > f4, np.bool) +assert_type(b_ > c, np.bool) +assert_type(b_ > c16, np.bool) +assert_type(b_ > c8, np.bool) +assert_type(b_ > AR, npt.NDArray[np.bool]) +assert_type(b_ > SEQ, npt.NDArray[np.bool]) + +# Complex + +assert_type(c16 > c16, np.bool) +assert_type(c16 > f8, np.bool) +assert_type(c16 > i8, np.bool) +assert_type(c16 > c8, np.bool) +assert_type(c16 > f4, np.bool) +assert_type(c16 > i4, np.bool) +assert_type(c16 > b_, np.bool) +assert_type(c16 > b, np.bool) +assert_type(c16 > c, np.bool) +assert_type(c16 > f, np.bool) +assert_type(c16 > i, np.bool) +assert_type(c16 > AR, npt.NDArray[np.bool]) +assert_type(c16 > SEQ, npt.NDArray[np.bool]) + +assert_type(c16 > c16, np.bool) +assert_type(f8 > c16, np.bool) +assert_type(i8 > c16, np.bool) +assert_type(c8 > c16, np.bool) +assert_type(f4 > c16, np.bool) +assert_type(i4 > c16, np.bool) +assert_type(b_ > c16, np.bool) +assert_type(b > c16, np.bool) +assert_type(c > c16, np.bool) +assert_type(f > c16, np.bool) +assert_type(i > c16, np.bool) +assert_type(AR > c16, npt.NDArray[np.bool]) +assert_type(SEQ > c16, npt.NDArray[np.bool]) + +assert_type(c8 > c16, np.bool) +assert_type(c8 > f8, np.bool) +assert_type(c8 > i8, np.bool) +assert_type(c8 > c8, np.bool) +assert_type(c8 > f4, np.bool) +assert_type(c8 > i4, np.bool) +assert_type(c8 > b_, np.bool) +assert_type(c8 > b, np.bool) +assert_type(c8 > c, np.bool) +assert_type(c8 > f, np.bool) +assert_type(c8 > i, np.bool) +assert_type(c8 > AR, npt.NDArray[np.bool]) +assert_type(c8 > SEQ, npt.NDArray[np.bool]) + +assert_type(c16 > c8, np.bool) +assert_type(f8 > c8, np.bool) +assert_type(i8 > c8, np.bool) +assert_type(c8 > c8, np.bool) +assert_type(f4 > c8, np.bool) +assert_type(i4 > c8, np.bool) +assert_type(b_ > c8, np.bool) +assert_type(b > c8, np.bool) +assert_type(c > c8, np.bool) +assert_type(f > c8, np.bool) +assert_type(i > c8, np.bool) +assert_type(AR > c8, npt.NDArray[np.bool]) +assert_type(SEQ > c8, npt.NDArray[np.bool]) + +# Float + +assert_type(f8 > f8, np.bool) +assert_type(f8 > i8, np.bool) +assert_type(f8 > f4, np.bool) +assert_type(f8 > i4, np.bool) +assert_type(f8 > b_, np.bool) +assert_type(f8 > b, np.bool) +assert_type(f8 > c, np.bool) +assert_type(f8 > f, np.bool) +assert_type(f8 > i, np.bool) +assert_type(f8 > AR, npt.NDArray[np.bool]) +assert_type(f8 > SEQ, npt.NDArray[np.bool]) + +assert_type(f8 > f8, np.bool) +assert_type(i8 > f8, np.bool) +assert_type(f4 > f8, np.bool) +assert_type(i4 > f8, np.bool) +assert_type(b_ > f8, np.bool) +assert_type(b > f8, np.bool) +assert_type(c > f8, np.bool) +assert_type(f > f8, np.bool) +assert_type(i > f8, np.bool) +assert_type(AR > f8, npt.NDArray[np.bool]) +assert_type(SEQ > f8, npt.NDArray[np.bool]) + +assert_type(f4 > f8, np.bool) +assert_type(f4 > i8, np.bool) +assert_type(f4 > f4, np.bool) +assert_type(f4 > i4, np.bool) +assert_type(f4 > b_, np.bool) +assert_type(f4 > b, np.bool) +assert_type(f4 > c, np.bool) +assert_type(f4 > f, np.bool) +assert_type(f4 > i, np.bool) +assert_type(f4 > AR, npt.NDArray[np.bool]) +assert_type(f4 > SEQ, npt.NDArray[np.bool]) + +assert_type(f8 > f4, np.bool) +assert_type(i8 > f4, np.bool) +assert_type(f4 > f4, np.bool) +assert_type(i4 > f4, np.bool) +assert_type(b_ > f4, np.bool) +assert_type(b > f4, np.bool) +assert_type(c > f4, np.bool) +assert_type(f > f4, np.bool) +assert_type(i > f4, np.bool) +assert_type(AR > f4, npt.NDArray[np.bool]) +assert_type(SEQ > f4, npt.NDArray[np.bool]) + +# Int + +assert_type(i8 > i8, np.bool) +assert_type(i8 > u8, np.bool) +assert_type(i8 > i4, np.bool) +assert_type(i8 > u4, np.bool) +assert_type(i8 > b_, np.bool) +assert_type(i8 > b, np.bool) +assert_type(i8 > c, np.bool) +assert_type(i8 > f, np.bool) +assert_type(i8 > i, np.bool) +assert_type(i8 > AR, npt.NDArray[np.bool]) +assert_type(i8 > SEQ, npt.NDArray[np.bool]) + +assert_type(u8 > u8, np.bool) +assert_type(u8 > i4, np.bool) +assert_type(u8 > u4, np.bool) +assert_type(u8 > b_, np.bool) +assert_type(u8 > b, np.bool) +assert_type(u8 > c, np.bool) +assert_type(u8 > f, np.bool) +assert_type(u8 > i, np.bool) +assert_type(u8 > AR, npt.NDArray[np.bool]) +assert_type(u8 > SEQ, npt.NDArray[np.bool]) + +assert_type(i8 > i8, np.bool) +assert_type(u8 > i8, np.bool) +assert_type(i4 > i8, np.bool) +assert_type(u4 > i8, np.bool) +assert_type(b_ > i8, np.bool) +assert_type(b > i8, np.bool) +assert_type(c > i8, np.bool) +assert_type(f > i8, np.bool) +assert_type(i > i8, np.bool) +assert_type(AR > i8, npt.NDArray[np.bool]) +assert_type(SEQ > i8, npt.NDArray[np.bool]) + +assert_type(u8 > u8, np.bool) +assert_type(i4 > u8, np.bool) +assert_type(u4 > u8, np.bool) +assert_type(b_ > u8, np.bool) +assert_type(b > u8, np.bool) +assert_type(c > u8, np.bool) +assert_type(f > u8, np.bool) +assert_type(i > u8, np.bool) +assert_type(AR > u8, npt.NDArray[np.bool]) +assert_type(SEQ > u8, npt.NDArray[np.bool]) + +assert_type(i4 > i8, np.bool) +assert_type(i4 > i4, np.bool) +assert_type(i4 > i, np.bool) +assert_type(i4 > b_, np.bool) +assert_type(i4 > b, np.bool) +assert_type(i4 > AR, npt.NDArray[np.bool]) +assert_type(i4 > SEQ, npt.NDArray[np.bool]) + +assert_type(u4 > i8, np.bool) +assert_type(u4 > i4, np.bool) +assert_type(u4 > u8, np.bool) +assert_type(u4 > u4, np.bool) +assert_type(u4 > i, np.bool) +assert_type(u4 > b_, np.bool) +assert_type(u4 > b, np.bool) +assert_type(u4 > AR, npt.NDArray[np.bool]) +assert_type(u4 > SEQ, npt.NDArray[np.bool]) + +assert_type(i8 > i4, np.bool) +assert_type(i4 > i4, np.bool) +assert_type(i > i4, np.bool) +assert_type(b_ > i4, np.bool) +assert_type(b > i4, np.bool) +assert_type(AR > i4, npt.NDArray[np.bool]) +assert_type(SEQ > i4, npt.NDArray[np.bool]) + +assert_type(i8 > u4, np.bool) +assert_type(i4 > u4, np.bool) +assert_type(u8 > u4, np.bool) +assert_type(u4 > u4, np.bool) +assert_type(b_ > u4, np.bool) +assert_type(b > u4, np.bool) +assert_type(i > u4, np.bool) +assert_type(AR > u4, npt.NDArray[np.bool]) +assert_type(SEQ > u4, npt.NDArray[np.bool]) diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/constants.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/constants.pyi new file mode 100644 index 0000000..d4474f4 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/constants.pyi @@ -0,0 +1,14 @@ +from typing import Literal, assert_type + +import numpy as np + +assert_type(np.e, float) +assert_type(np.euler_gamma, float) +assert_type(np.inf, float) +assert_type(np.nan, float) +assert_type(np.pi, float) + +assert_type(np.little_endian, bool) + +assert_type(np.True_, np.bool[Literal[True]]) +assert_type(np.False_, np.bool[Literal[False]]) diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/ctypeslib.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/ctypeslib.pyi new file mode 100644 index 0000000..0564d72 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/ctypeslib.pyi @@ -0,0 +1,81 @@ +import ctypes as ct +from typing import Any, assert_type + +import numpy as np +import numpy.typing as npt +from numpy import ctypeslib + +AR_bool: npt.NDArray[np.bool] +AR_ubyte: npt.NDArray[np.ubyte] +AR_ushort: npt.NDArray[np.ushort] +AR_uintc: npt.NDArray[np.uintc] +AR_ulong: npt.NDArray[np.ulong] +AR_ulonglong: npt.NDArray[np.ulonglong] +AR_byte: npt.NDArray[np.byte] +AR_short: npt.NDArray[np.short] +AR_intc: npt.NDArray[np.intc] +AR_long: npt.NDArray[np.long] +AR_longlong: npt.NDArray[np.longlong] +AR_single: npt.NDArray[np.single] +AR_double: npt.NDArray[np.double] +AR_longdouble: npt.NDArray[np.longdouble] +AR_void: npt.NDArray[np.void] + +pointer: ct._Pointer[Any] + +assert_type(np.ctypeslib.c_intp(), ctypeslib.c_intp) + +assert_type(np.ctypeslib.ndpointer(), type[ctypeslib._ndptr[None]]) +assert_type(np.ctypeslib.ndpointer(dtype=np.float64), type[ctypeslib._ndptr[np.dtype[np.float64]]]) +assert_type(np.ctypeslib.ndpointer(dtype=float), type[ctypeslib._ndptr[np.dtype]]) +assert_type(np.ctypeslib.ndpointer(shape=(10, 3)), type[ctypeslib._ndptr[None]]) +assert_type(np.ctypeslib.ndpointer(np.int64, shape=(10, 3)), type[ctypeslib._concrete_ndptr[np.dtype[np.int64]]]) +assert_type(np.ctypeslib.ndpointer(int, shape=(1,)), type[np.ctypeslib._concrete_ndptr[np.dtype]]) + +assert_type(np.ctypeslib.as_ctypes_type(np.bool), type[ct.c_bool]) +assert_type(np.ctypeslib.as_ctypes_type(np.ubyte), type[ct.c_ubyte]) +assert_type(np.ctypeslib.as_ctypes_type(np.ushort), type[ct.c_ushort]) +assert_type(np.ctypeslib.as_ctypes_type(np.uintc), type[ct.c_uint]) +assert_type(np.ctypeslib.as_ctypes_type(np.byte), type[ct.c_byte]) +assert_type(np.ctypeslib.as_ctypes_type(np.short), type[ct.c_short]) +assert_type(np.ctypeslib.as_ctypes_type(np.intc), type[ct.c_int]) +assert_type(np.ctypeslib.as_ctypes_type(np.single), type[ct.c_float]) +assert_type(np.ctypeslib.as_ctypes_type(np.double), type[ct.c_double]) +assert_type(np.ctypeslib.as_ctypes_type(ct.c_double), type[ct.c_double]) +assert_type(np.ctypeslib.as_ctypes_type("q"), type[ct.c_longlong]) +assert_type(np.ctypeslib.as_ctypes_type([("i8", np.int64), ("f8", np.float64)]), type[Any]) +assert_type(np.ctypeslib.as_ctypes_type("i8"), type[Any]) +assert_type(np.ctypeslib.as_ctypes_type("f8"), type[Any]) + +assert_type(np.ctypeslib.as_ctypes(AR_bool.take(0)), ct.c_bool) +assert_type(np.ctypeslib.as_ctypes(AR_ubyte.take(0)), ct.c_ubyte) +assert_type(np.ctypeslib.as_ctypes(AR_ushort.take(0)), ct.c_ushort) +assert_type(np.ctypeslib.as_ctypes(AR_uintc.take(0)), ct.c_uint) + +assert_type(np.ctypeslib.as_ctypes(AR_byte.take(0)), ct.c_byte) +assert_type(np.ctypeslib.as_ctypes(AR_short.take(0)), ct.c_short) +assert_type(np.ctypeslib.as_ctypes(AR_intc.take(0)), ct.c_int) +assert_type(np.ctypeslib.as_ctypes(AR_single.take(0)), ct.c_float) +assert_type(np.ctypeslib.as_ctypes(AR_double.take(0)), ct.c_double) +assert_type(np.ctypeslib.as_ctypes(AR_void.take(0)), Any) +assert_type(np.ctypeslib.as_ctypes(AR_bool), ct.Array[ct.c_bool]) +assert_type(np.ctypeslib.as_ctypes(AR_ubyte), ct.Array[ct.c_ubyte]) +assert_type(np.ctypeslib.as_ctypes(AR_ushort), ct.Array[ct.c_ushort]) +assert_type(np.ctypeslib.as_ctypes(AR_uintc), ct.Array[ct.c_uint]) +assert_type(np.ctypeslib.as_ctypes(AR_byte), ct.Array[ct.c_byte]) +assert_type(np.ctypeslib.as_ctypes(AR_short), ct.Array[ct.c_short]) +assert_type(np.ctypeslib.as_ctypes(AR_intc), ct.Array[ct.c_int]) +assert_type(np.ctypeslib.as_ctypes(AR_single), ct.Array[ct.c_float]) +assert_type(np.ctypeslib.as_ctypes(AR_double), ct.Array[ct.c_double]) +assert_type(np.ctypeslib.as_ctypes(AR_void), ct.Array[Any]) + +assert_type(np.ctypeslib.as_array(AR_ubyte), npt.NDArray[np.ubyte]) +assert_type(np.ctypeslib.as_array(1), npt.NDArray[Any]) +assert_type(np.ctypeslib.as_array(pointer), npt.NDArray[Any]) + +assert_type(np.ctypeslib.as_ctypes_type(np.long), type[ct.c_long]) +assert_type(np.ctypeslib.as_ctypes_type(np.ulong), type[ct.c_ulong]) +assert_type(np.ctypeslib.as_ctypes(AR_ulong), ct.Array[ct.c_ulong]) +assert_type(np.ctypeslib.as_ctypes(AR_long), ct.Array[ct.c_long]) +assert_type(np.ctypeslib.as_ctypes(AR_long.take(0)), ct.c_long) +assert_type(np.ctypeslib.as_ctypes(AR_ulong.take(0)), ct.c_ulong) diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/datasource.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/datasource.pyi new file mode 100644 index 0000000..9f01791 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/datasource.pyi @@ -0,0 +1,23 @@ +from pathlib import Path +from typing import IO, Any, assert_type + +import numpy as np + +path1: Path +path2: str + +d1 = np.lib.npyio.DataSource(path1) +d2 = np.lib.npyio.DataSource(path2) +d3 = np.lib.npyio.DataSource(None) + +assert_type(d1.abspath("..."), str) +assert_type(d2.abspath("..."), str) +assert_type(d3.abspath("..."), str) + +assert_type(d1.exists("..."), bool) +assert_type(d2.exists("..."), bool) +assert_type(d3.exists("..."), bool) + +assert_type(d1.open("...", "r"), IO[Any]) +assert_type(d2.open("...", encoding="utf8"), IO[Any]) +assert_type(d3.open("...", newline="/n"), IO[Any]) diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/dtype.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/dtype.pyi new file mode 100644 index 0000000..721d270 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/dtype.pyi @@ -0,0 +1,136 @@ +import ctypes as ct +import datetime as dt +from decimal import Decimal +from fractions import Fraction +from typing import Any, Literal, LiteralString, TypeAlias, assert_type + +import numpy as np +from numpy.dtypes import StringDType + +# a combination of likely `object` dtype-like candidates (no `_co`) +_PyObjectLike: TypeAlias = Decimal | Fraction | dt.datetime | dt.timedelta + +dtype_U: np.dtype[np.str_] +dtype_V: np.dtype[np.void] +dtype_i8: np.dtype[np.int64] + +py_int_co: type[int] +py_float_co: type[float] +py_complex_co: type[complex] +py_object: type[_PyObjectLike] +py_character: type[str | bytes] +py_flexible: type[str | bytes | memoryview] + +ct_floating: type[ct.c_float | ct.c_double | ct.c_longdouble] +ct_number: type[ct.c_uint8 | ct.c_float] +ct_generic: type[ct.c_bool | ct.c_char] + +cs_integer: Literal["u1", "V", "S"] +cs_generic: Literal["H", "U", "h", "|M8[Y]", "?"] + +dt_inexact: np.dtype[np.inexact] +dt_string: StringDType + +assert_type(np.dtype(np.float64), np.dtype[np.float64]) +assert_type(np.dtype(np.float64, metadata={"test": "test"}), np.dtype[np.float64]) +assert_type(np.dtype(np.int64), np.dtype[np.int64]) + +# String aliases +assert_type(np.dtype("float64"), np.dtype[np.float64]) +assert_type(np.dtype("float32"), np.dtype[np.float32]) +assert_type(np.dtype("int64"), np.dtype[np.int64]) +assert_type(np.dtype("int32"), np.dtype[np.int32]) +assert_type(np.dtype("bool"), np.dtype[np.bool]) +assert_type(np.dtype("bytes"), np.dtype[np.bytes_]) +assert_type(np.dtype("str"), np.dtype[np.str_]) + +# Python types +assert_type(np.dtype(bool), np.dtype[np.bool]) +assert_type(np.dtype(py_int_co), np.dtype[np.int_ | np.bool]) +assert_type(np.dtype(int), np.dtype[np.int_ | np.bool]) +assert_type(np.dtype(py_float_co), np.dtype[np.float64 | np.int_ | np.bool]) +assert_type(np.dtype(float), np.dtype[np.float64 | np.int_ | np.bool]) +assert_type(np.dtype(py_complex_co), np.dtype[np.complex128 | np.float64 | np.int_ | np.bool]) +assert_type(np.dtype(complex), np.dtype[np.complex128 | np.float64 | np.int_ | np.bool]) +assert_type(np.dtype(py_object), np.dtype[np.object_]) +assert_type(np.dtype(str), np.dtype[np.str_]) +assert_type(np.dtype(bytes), np.dtype[np.bytes_]) +assert_type(np.dtype(py_character), np.dtype[np.character]) +assert_type(np.dtype(memoryview), np.dtype[np.void]) +assert_type(np.dtype(py_flexible), np.dtype[np.flexible]) + +assert_type(np.dtype(list), np.dtype[np.object_]) +assert_type(np.dtype(dt.datetime), np.dtype[np.object_]) +assert_type(np.dtype(dt.timedelta), np.dtype[np.object_]) +assert_type(np.dtype(Decimal), np.dtype[np.object_]) +assert_type(np.dtype(Fraction), np.dtype[np.object_]) + +# char-codes +assert_type(np.dtype("?"), np.dtype[np.bool]) +assert_type(np.dtype("|b1"), np.dtype[np.bool]) +assert_type(np.dtype("u1"), np.dtype[np.uint8]) +assert_type(np.dtype("l"), np.dtype[np.long]) +assert_type(np.dtype("longlong"), np.dtype[np.longlong]) +assert_type(np.dtype(">g"), np.dtype[np.longdouble]) +assert_type(np.dtype(cs_integer), np.dtype[np.integer]) +assert_type(np.dtype(cs_number), np.dtype[np.number]) +assert_type(np.dtype(cs_flex), np.dtype[np.flexible]) +assert_type(np.dtype(cs_generic), np.dtype[np.generic]) + +# ctypes +assert_type(np.dtype(ct.c_double), np.dtype[np.double]) +assert_type(np.dtype(ct.c_longlong), np.dtype[np.longlong]) +assert_type(np.dtype(ct.c_uint32), np.dtype[np.uint32]) +assert_type(np.dtype(ct.c_bool), np.dtype[np.bool]) +assert_type(np.dtype(ct.c_char), np.dtype[np.bytes_]) +assert_type(np.dtype(ct.py_object), np.dtype[np.object_]) + +# Special case for None +assert_type(np.dtype(None), np.dtype[np.float64]) + +# Dypes of dtypes +assert_type(np.dtype(np.dtype(np.float64)), np.dtype[np.float64]) +assert_type(np.dtype(dt_inexact), np.dtype[np.inexact]) + +# Parameterized dtypes +assert_type(np.dtype("S8"), np.dtype) + +# Void +assert_type(np.dtype(("U", 10)), np.dtype[np.void]) + +# StringDType +assert_type(np.dtype(dt_string), StringDType) +assert_type(np.dtype("T"), StringDType) +assert_type(np.dtype("=T"), StringDType) +assert_type(np.dtype("|T"), StringDType) + +# Methods and attributes +assert_type(dtype_U.base, np.dtype) +assert_type(dtype_U.subdtype, tuple[np.dtype, tuple[Any, ...]] | None) +assert_type(dtype_U.newbyteorder(), np.dtype[np.str_]) +assert_type(dtype_U.type, type[np.str_]) +assert_type(dtype_U.name, LiteralString) +assert_type(dtype_U.names, tuple[str, ...] | None) + +assert_type(dtype_U * 0, np.dtype[np.str_]) +assert_type(dtype_U * 1, np.dtype[np.str_]) +assert_type(dtype_U * 2, np.dtype[np.str_]) + +assert_type(dtype_i8 * 0, np.dtype[np.void]) +assert_type(dtype_i8 * 1, np.dtype[np.int64]) +assert_type(dtype_i8 * 2, np.dtype[np.void]) + +assert_type(0 * dtype_U, np.dtype[np.str_]) +assert_type(1 * dtype_U, np.dtype[np.str_]) +assert_type(2 * dtype_U, np.dtype[np.str_]) + +assert_type(0 * dtype_i8, np.dtype) +assert_type(1 * dtype_i8, np.dtype) +assert_type(2 * dtype_i8, np.dtype) + +assert_type(dtype_V["f0"], np.dtype) +assert_type(dtype_V[0], np.dtype) +assert_type(dtype_V[["f0", "f1"]], np.dtype[np.void]) +assert_type(dtype_V[["f0"]], np.dtype[np.void]) diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/einsumfunc.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/einsumfunc.pyi new file mode 100644 index 0000000..cc58f00 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/einsumfunc.pyi @@ -0,0 +1,39 @@ +from typing import Any, assert_type + +import numpy as np +import numpy.typing as npt + +AR_LIKE_b: list[bool] +AR_LIKE_u: list[np.uint32] +AR_LIKE_i: list[int] +AR_LIKE_f: list[float] +AR_LIKE_c: list[complex] +AR_LIKE_U: list[str] +AR_o: npt.NDArray[np.object_] + +OUT_f: npt.NDArray[np.float64] + +assert_type(np.einsum("i,i->i", AR_LIKE_b, AR_LIKE_b), Any) +assert_type(np.einsum("i,i->i", AR_o, AR_o), Any) +assert_type(np.einsum("i,i->i", AR_LIKE_u, AR_LIKE_u), Any) +assert_type(np.einsum("i,i->i", AR_LIKE_i, AR_LIKE_i), Any) +assert_type(np.einsum("i,i->i", AR_LIKE_f, AR_LIKE_f), Any) +assert_type(np.einsum("i,i->i", AR_LIKE_c, AR_LIKE_c), Any) +assert_type(np.einsum("i,i->i", AR_LIKE_b, AR_LIKE_i), Any) +assert_type(np.einsum("i,i,i,i->i", AR_LIKE_b, AR_LIKE_u, AR_LIKE_i, AR_LIKE_c), Any) + +assert_type(np.einsum("i,i->i", AR_LIKE_c, AR_LIKE_c, out=OUT_f), npt.NDArray[np.float64]) +assert_type(np.einsum("i,i->i", AR_LIKE_U, AR_LIKE_U, dtype=bool, casting="unsafe", out=OUT_f), npt.NDArray[np.float64]) +assert_type(np.einsum("i,i->i", AR_LIKE_f, AR_LIKE_f, dtype="c16"), Any) +assert_type(np.einsum("i,i->i", AR_LIKE_U, AR_LIKE_U, dtype=bool, casting="unsafe"), Any) + +assert_type(np.einsum_path("i,i->i", AR_LIKE_b, AR_LIKE_b), tuple[list[Any], str]) +assert_type(np.einsum_path("i,i->i", AR_LIKE_u, AR_LIKE_u), tuple[list[Any], str]) +assert_type(np.einsum_path("i,i->i", AR_LIKE_i, AR_LIKE_i), tuple[list[Any], str]) +assert_type(np.einsum_path("i,i->i", AR_LIKE_f, AR_LIKE_f), tuple[list[Any], str]) +assert_type(np.einsum_path("i,i->i", AR_LIKE_c, AR_LIKE_c), tuple[list[Any], str]) +assert_type(np.einsum_path("i,i->i", AR_LIKE_b, AR_LIKE_i), tuple[list[Any], str]) +assert_type(np.einsum_path("i,i,i,i->i", AR_LIKE_b, AR_LIKE_u, AR_LIKE_i, AR_LIKE_c), tuple[list[Any], str]) + +assert_type(np.einsum([[1, 1], [1, 1]], AR_LIKE_i, AR_LIKE_i), Any) +assert_type(np.einsum_path([[1, 1], [1, 1]], AR_LIKE_i, AR_LIKE_i), tuple[list[Any], str]) diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/emath.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/emath.pyi new file mode 100644 index 0000000..1d7bff8 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/emath.pyi @@ -0,0 +1,54 @@ +from typing import Any, assert_type + +import numpy as np +import numpy.typing as npt + +AR_f8: npt.NDArray[np.float64] +AR_c16: npt.NDArray[np.complex128] +f8: np.float64 +c16: np.complex128 + +assert_type(np.emath.sqrt(f8), Any) +assert_type(np.emath.sqrt(AR_f8), npt.NDArray[Any]) +assert_type(np.emath.sqrt(c16), np.complexfloating) +assert_type(np.emath.sqrt(AR_c16), npt.NDArray[np.complexfloating]) + +assert_type(np.emath.log(f8), Any) +assert_type(np.emath.log(AR_f8), npt.NDArray[Any]) +assert_type(np.emath.log(c16), np.complexfloating) +assert_type(np.emath.log(AR_c16), npt.NDArray[np.complexfloating]) + +assert_type(np.emath.log10(f8), Any) +assert_type(np.emath.log10(AR_f8), npt.NDArray[Any]) +assert_type(np.emath.log10(c16), np.complexfloating) +assert_type(np.emath.log10(AR_c16), npt.NDArray[np.complexfloating]) + +assert_type(np.emath.log2(f8), Any) +assert_type(np.emath.log2(AR_f8), npt.NDArray[Any]) +assert_type(np.emath.log2(c16), np.complexfloating) +assert_type(np.emath.log2(AR_c16), npt.NDArray[np.complexfloating]) + +assert_type(np.emath.logn(f8, 2), Any) +assert_type(np.emath.logn(AR_f8, 4), npt.NDArray[Any]) +assert_type(np.emath.logn(f8, 1j), np.complexfloating) +assert_type(np.emath.logn(AR_c16, 1.5), npt.NDArray[np.complexfloating]) + +assert_type(np.emath.power(f8, 2), Any) +assert_type(np.emath.power(AR_f8, 4), npt.NDArray[Any]) +assert_type(np.emath.power(f8, 2j), np.complexfloating) +assert_type(np.emath.power(AR_c16, 1.5), npt.NDArray[np.complexfloating]) + +assert_type(np.emath.arccos(f8), Any) +assert_type(np.emath.arccos(AR_f8), npt.NDArray[Any]) +assert_type(np.emath.arccos(c16), np.complexfloating) +assert_type(np.emath.arccos(AR_c16), npt.NDArray[np.complexfloating]) + +assert_type(np.emath.arcsin(f8), Any) +assert_type(np.emath.arcsin(AR_f8), npt.NDArray[Any]) +assert_type(np.emath.arcsin(c16), np.complexfloating) +assert_type(np.emath.arcsin(AR_c16), npt.NDArray[np.complexfloating]) + +assert_type(np.emath.arctanh(f8), Any) +assert_type(np.emath.arctanh(AR_f8), npt.NDArray[Any]) +assert_type(np.emath.arctanh(c16), np.complexfloating) +assert_type(np.emath.arctanh(AR_c16), npt.NDArray[np.complexfloating]) diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/fft.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/fft.pyi new file mode 100644 index 0000000..dacd2b8 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/fft.pyi @@ -0,0 +1,37 @@ +from typing import Any, assert_type + +import numpy as np +import numpy.typing as npt + +AR_f8: npt.NDArray[np.float64] +AR_c16: npt.NDArray[np.complex128] +AR_LIKE_f8: list[float] + +assert_type(np.fft.fftshift(AR_f8), npt.NDArray[np.float64]) +assert_type(np.fft.fftshift(AR_LIKE_f8, axes=0), npt.NDArray[Any]) + +assert_type(np.fft.ifftshift(AR_f8), npt.NDArray[np.float64]) +assert_type(np.fft.ifftshift(AR_LIKE_f8, axes=0), npt.NDArray[Any]) + +assert_type(np.fft.fftfreq(5, AR_f8), npt.NDArray[np.floating]) +assert_type(np.fft.fftfreq(np.int64(), AR_c16), npt.NDArray[np.complexfloating]) + +assert_type(np.fft.fftfreq(5, AR_f8), npt.NDArray[np.floating]) +assert_type(np.fft.fftfreq(np.int64(), AR_c16), npt.NDArray[np.complexfloating]) + +assert_type(np.fft.fft(AR_f8), npt.NDArray[np.complex128]) +assert_type(np.fft.ifft(AR_f8, axis=1), npt.NDArray[np.complex128]) +assert_type(np.fft.rfft(AR_f8, n=None), npt.NDArray[np.complex128]) +assert_type(np.fft.irfft(AR_f8, norm="ortho"), npt.NDArray[np.float64]) +assert_type(np.fft.hfft(AR_f8, n=2), npt.NDArray[np.float64]) +assert_type(np.fft.ihfft(AR_f8), npt.NDArray[np.complex128]) + +assert_type(np.fft.fftn(AR_f8), npt.NDArray[np.complex128]) +assert_type(np.fft.ifftn(AR_f8), npt.NDArray[np.complex128]) +assert_type(np.fft.rfftn(AR_f8), npt.NDArray[np.complex128]) +assert_type(np.fft.irfftn(AR_f8), npt.NDArray[np.float64]) + +assert_type(np.fft.rfft2(AR_f8), npt.NDArray[np.complex128]) +assert_type(np.fft.ifft2(AR_f8), npt.NDArray[np.complex128]) +assert_type(np.fft.fft2(AR_f8), npt.NDArray[np.complex128]) +assert_type(np.fft.irfft2(AR_f8), npt.NDArray[np.float64]) diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/flatiter.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/flatiter.pyi new file mode 100644 index 0000000..e188d30 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/flatiter.pyi @@ -0,0 +1,47 @@ +from typing import Literal, TypeAlias, assert_type + +import numpy as np +import numpy.typing as npt + +a: np.flatiter[npt.NDArray[np.str_]] +a_1d: np.flatiter[np.ndarray[tuple[int], np.dtype[np.bytes_]]] + +Size: TypeAlias = Literal[42] +a_1d_fixed: np.flatiter[np.ndarray[tuple[Size], np.dtype[np.object_]]] + +assert_type(a.base, npt.NDArray[np.str_]) +assert_type(a.copy(), npt.NDArray[np.str_]) +assert_type(a.coords, tuple[int, ...]) +assert_type(a.index, int) +assert_type(iter(a), np.flatiter[npt.NDArray[np.str_]]) +assert_type(next(a), np.str_) +assert_type(a[0], np.str_) +assert_type(a[[0, 1, 2]], npt.NDArray[np.str_]) +assert_type(a[...], npt.NDArray[np.str_]) +assert_type(a[:], npt.NDArray[np.str_]) +assert_type(a[(...,)], npt.NDArray[np.str_]) +assert_type(a[(0,)], np.str_) + +assert_type(a.__array__(), npt.NDArray[np.str_]) +assert_type(a.__array__(np.dtype(np.float64)), npt.NDArray[np.float64]) +assert_type( + a_1d.__array__(), + np.ndarray[tuple[int], np.dtype[np.bytes_]], +) +assert_type( + a_1d.__array__(np.dtype(np.float64)), + np.ndarray[tuple[int], np.dtype[np.float64]], +) +assert_type( + a_1d_fixed.__array__(), + np.ndarray[tuple[Size], np.dtype[np.object_]], +) +assert_type( + a_1d_fixed.__array__(np.dtype(np.float64)), + np.ndarray[tuple[Size], np.dtype[np.float64]], +) + +a[0] = "a" +a[:5] = "a" +a[...] = "a" +a[(...,)] = "a" diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/fromnumeric.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/fromnumeric.pyi new file mode 100644 index 0000000..5438e00 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/fromnumeric.pyi @@ -0,0 +1,347 @@ +"""Tests for :mod:`_core.fromnumeric`.""" + +from typing import Any, assert_type +from typing import Literal as L + +import numpy as np +import numpy.typing as npt + +class NDArraySubclass(npt.NDArray[np.complex128]): ... + +AR_b: npt.NDArray[np.bool] +AR_f4: npt.NDArray[np.float32] +AR_c16: npt.NDArray[np.complex128] +AR_u8: npt.NDArray[np.uint64] +AR_i8: npt.NDArray[np.int64] +AR_O: npt.NDArray[np.object_] +AR_subclass: NDArraySubclass +AR_m: npt.NDArray[np.timedelta64] +AR_0d: np.ndarray[tuple[()]] +AR_1d: np.ndarray[tuple[int]] +AR_nd: np.ndarray + +b: np.bool +f4: np.float32 +i8: np.int64 +f: float + +# integer‑dtype subclass for argmin/argmax +class NDArrayIntSubclass(npt.NDArray[np.intp]): ... +AR_sub_i: NDArrayIntSubclass + +assert_type(np.take(b, 0), np.bool) +assert_type(np.take(f4, 0), np.float32) +assert_type(np.take(f, 0), Any) +assert_type(np.take(AR_b, 0), np.bool) +assert_type(np.take(AR_f4, 0), np.float32) +assert_type(np.take(AR_b, [0]), npt.NDArray[np.bool]) +assert_type(np.take(AR_f4, [0]), npt.NDArray[np.float32]) +assert_type(np.take([1], [0]), npt.NDArray[Any]) +assert_type(np.take(AR_f4, [0], out=AR_subclass), NDArraySubclass) + +assert_type(np.reshape(b, 1), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(np.reshape(f4, 1), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(np.reshape(f, 1), np.ndarray[tuple[int], np.dtype]) +assert_type(np.reshape(AR_b, 1), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(np.reshape(AR_f4, 1), np.ndarray[tuple[int], np.dtype[np.float32]]) + +assert_type(np.choose(1, [True, True]), Any) +assert_type(np.choose([1], [True, True]), npt.NDArray[Any]) +assert_type(np.choose([1], AR_b), npt.NDArray[np.bool]) +assert_type(np.choose([1], AR_b, out=AR_f4), npt.NDArray[np.float32]) + +assert_type(np.repeat(b, 1), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(np.repeat(b, 1, axis=0), npt.NDArray[np.bool]) +assert_type(np.repeat(f4, 1), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(np.repeat(f, 1), np.ndarray[tuple[int], np.dtype[Any]]) +assert_type(np.repeat(AR_b, 1), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(np.repeat(AR_f4, 1), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(np.repeat(AR_f4, 1, axis=0), npt.NDArray[np.float32]) + +# TODO: array_bdd tests for np.put() + +assert_type(np.swapaxes([[0, 1]], 0, 0), npt.NDArray[Any]) +assert_type(np.swapaxes(AR_b, 0, 0), npt.NDArray[np.bool]) +assert_type(np.swapaxes(AR_f4, 0, 0), npt.NDArray[np.float32]) + +assert_type(np.transpose(b), npt.NDArray[np.bool]) +assert_type(np.transpose(f4), npt.NDArray[np.float32]) +assert_type(np.transpose(f), npt.NDArray[Any]) +assert_type(np.transpose(AR_b), npt.NDArray[np.bool]) +assert_type(np.transpose(AR_f4), npt.NDArray[np.float32]) + +assert_type(np.partition(b, 0, axis=None), npt.NDArray[np.bool]) +assert_type(np.partition(f4, 0, axis=None), npt.NDArray[np.float32]) +assert_type(np.partition(f, 0, axis=None), npt.NDArray[Any]) +assert_type(np.partition(AR_b, 0), npt.NDArray[np.bool]) +assert_type(np.partition(AR_f4, 0), npt.NDArray[np.float32]) + +assert_type(np.argpartition(b, 0), npt.NDArray[np.intp]) +assert_type(np.argpartition(f4, 0), npt.NDArray[np.intp]) +assert_type(np.argpartition(f, 0), npt.NDArray[np.intp]) +assert_type(np.argpartition(AR_b, 0), npt.NDArray[np.intp]) +assert_type(np.argpartition(AR_f4, 0), npt.NDArray[np.intp]) + +assert_type(np.sort([2, 1], 0), npt.NDArray[Any]) +assert_type(np.sort(AR_b, 0), npt.NDArray[np.bool]) +assert_type(np.sort(AR_f4, 0), npt.NDArray[np.float32]) + +assert_type(np.argsort(AR_b, 0), npt.NDArray[np.intp]) +assert_type(np.argsort(AR_f4, 0), npt.NDArray[np.intp]) + +assert_type(np.argmax(AR_b), np.intp) +assert_type(np.argmax(AR_f4), np.intp) +assert_type(np.argmax(AR_b, axis=0), Any) +assert_type(np.argmax(AR_f4, axis=0), Any) +assert_type(np.argmax(AR_f4, out=AR_sub_i), NDArrayIntSubclass) + +assert_type(np.argmin(AR_b), np.intp) +assert_type(np.argmin(AR_f4), np.intp) +assert_type(np.argmin(AR_b, axis=0), Any) +assert_type(np.argmin(AR_f4, axis=0), Any) +assert_type(np.argmin(AR_f4, out=AR_sub_i), NDArrayIntSubclass) + +assert_type(np.searchsorted(AR_b[0], 0), np.intp) +assert_type(np.searchsorted(AR_f4[0], 0), np.intp) +assert_type(np.searchsorted(AR_b[0], [0]), npt.NDArray[np.intp]) +assert_type(np.searchsorted(AR_f4[0], [0]), npt.NDArray[np.intp]) + +assert_type(np.resize(b, (5, 5)), np.ndarray[tuple[int, int], np.dtype[np.bool]]) +assert_type(np.resize(f4, (5, 5)), np.ndarray[tuple[int, int], np.dtype[np.float32]]) +assert_type(np.resize(f, (5, 5)), np.ndarray[tuple[int, int], np.dtype]) +assert_type(np.resize(AR_b, (5, 5)), np.ndarray[tuple[int, int], np.dtype[np.bool]]) +assert_type(np.resize(AR_f4, (5, 5)), np.ndarray[tuple[int, int], np.dtype[np.float32]]) + +assert_type(np.squeeze(b), np.bool) +assert_type(np.squeeze(f4), np.float32) +assert_type(np.squeeze(f), npt.NDArray[Any]) +assert_type(np.squeeze(AR_b), npt.NDArray[np.bool]) +assert_type(np.squeeze(AR_f4), npt.NDArray[np.float32]) + +assert_type(np.diagonal(AR_b), npt.NDArray[np.bool]) +assert_type(np.diagonal(AR_f4), npt.NDArray[np.float32]) + +assert_type(np.trace(AR_b), Any) +assert_type(np.trace(AR_f4), Any) +assert_type(np.trace(AR_f4, out=AR_subclass), NDArraySubclass) + +assert_type(np.ravel(b), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(np.ravel(f4), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(np.ravel(f), np.ndarray[tuple[int], np.dtype[np.float64 | np.int_ | np.bool]]) +assert_type(np.ravel(AR_b), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(np.ravel(AR_f4), np.ndarray[tuple[int], np.dtype[np.float32]]) + +assert_type(np.nonzero(AR_b), tuple[npt.NDArray[np.intp], ...]) +assert_type(np.nonzero(AR_f4), tuple[npt.NDArray[np.intp], ...]) +assert_type(np.nonzero(AR_1d), tuple[npt.NDArray[np.intp], ...]) +assert_type(np.nonzero(AR_nd), tuple[npt.NDArray[np.intp], ...]) + +assert_type(np.shape(b), tuple[()]) +assert_type(np.shape(f), tuple[()]) +assert_type(np.shape([1]), tuple[int]) +assert_type(np.shape([[2]]), tuple[int, int]) +assert_type(np.shape([[[3]]]), tuple[Any, ...]) +assert_type(np.shape(AR_b), tuple[Any, ...]) +assert_type(np.shape(AR_nd), tuple[Any, ...]) +# these fail on mypy, but it works as expected with pyright/pylance +# assert_type(np.shape(AR_0d), tuple[()]) +# assert_type(np.shape(AR_1d), tuple[int]) +# assert_type(np.shape(AR_2d), tuple[int, int]) + +assert_type(np.compress([True], b), npt.NDArray[np.bool]) +assert_type(np.compress([True], f4), npt.NDArray[np.float32]) +assert_type(np.compress([True], f), npt.NDArray[Any]) +assert_type(np.compress([True], AR_b), npt.NDArray[np.bool]) +assert_type(np.compress([True], AR_f4), npt.NDArray[np.float32]) + +assert_type(np.clip(b, 0, 1.0), np.bool) +assert_type(np.clip(f4, -1, 1), np.float32) +assert_type(np.clip(f, 0, 1), Any) +assert_type(np.clip(AR_b, 0, 1), npt.NDArray[np.bool]) +assert_type(np.clip(AR_f4, 0, 1), npt.NDArray[np.float32]) +assert_type(np.clip([0], 0, 1), npt.NDArray[Any]) +assert_type(np.clip(AR_b, 0, 1, out=AR_subclass), NDArraySubclass) + +assert_type(np.sum(b), np.bool) +assert_type(np.sum(f4), np.float32) +assert_type(np.sum(f), Any) +assert_type(np.sum(AR_b), np.bool) +assert_type(np.sum(AR_f4), np.float32) +assert_type(np.sum(AR_b, axis=0), Any) +assert_type(np.sum(AR_f4, axis=0), Any) +assert_type(np.sum(AR_f4, out=AR_subclass), NDArraySubclass) +assert_type(np.sum(AR_f4, dtype=np.float64), np.float64) +assert_type(np.sum(AR_f4, None, np.float64), np.float64) +assert_type(np.sum(AR_f4, dtype=np.float64, keepdims=False), np.float64) +assert_type(np.sum(AR_f4, None, np.float64, keepdims=False), np.float64) +assert_type(np.sum(AR_f4, dtype=np.float64, keepdims=True), np.float64 | npt.NDArray[np.float64]) +assert_type(np.sum(AR_f4, None, np.float64, keepdims=True), np.float64 | npt.NDArray[np.float64]) + +assert_type(np.all(b), np.bool) +assert_type(np.all(f4), np.bool) +assert_type(np.all(f), np.bool) +assert_type(np.all(AR_b), np.bool) +assert_type(np.all(AR_f4), np.bool) +assert_type(np.all(AR_b, axis=0), Any) +assert_type(np.all(AR_f4, axis=0), Any) +assert_type(np.all(AR_b, keepdims=True), Any) +assert_type(np.all(AR_f4, keepdims=True), Any) +assert_type(np.all(AR_f4, out=AR_subclass), NDArraySubclass) + +assert_type(np.any(b), np.bool) +assert_type(np.any(f4), np.bool) +assert_type(np.any(f), np.bool) +assert_type(np.any(AR_b), np.bool) +assert_type(np.any(AR_f4), np.bool) +assert_type(np.any(AR_b, axis=0), Any) +assert_type(np.any(AR_f4, axis=0), Any) +assert_type(np.any(AR_b, keepdims=True), Any) +assert_type(np.any(AR_f4, keepdims=True), Any) +assert_type(np.any(AR_f4, out=AR_subclass), NDArraySubclass) + +assert_type(np.cumsum(b), npt.NDArray[np.bool]) +assert_type(np.cumsum(f4), npt.NDArray[np.float32]) +assert_type(np.cumsum(f), npt.NDArray[Any]) +assert_type(np.cumsum(AR_b), npt.NDArray[np.bool]) +assert_type(np.cumsum(AR_f4), npt.NDArray[np.float32]) +assert_type(np.cumsum(f, dtype=float), npt.NDArray[Any]) +assert_type(np.cumsum(f, dtype=np.float64), npt.NDArray[np.float64]) +assert_type(np.cumsum(AR_f4, out=AR_subclass), NDArraySubclass) + +assert_type(np.cumulative_sum(b), npt.NDArray[np.bool]) +assert_type(np.cumulative_sum(f4), npt.NDArray[np.float32]) +assert_type(np.cumulative_sum(f), npt.NDArray[Any]) +assert_type(np.cumulative_sum(AR_b), npt.NDArray[np.bool]) +assert_type(np.cumulative_sum(AR_f4), npt.NDArray[np.float32]) +assert_type(np.cumulative_sum(f, dtype=float), npt.NDArray[Any]) +assert_type(np.cumulative_sum(f, dtype=np.float64), npt.NDArray[np.float64]) +assert_type(np.cumulative_sum(AR_f4, out=AR_subclass), NDArraySubclass) + +assert_type(np.ptp(b), np.bool) +assert_type(np.ptp(f4), np.float32) +assert_type(np.ptp(f), Any) +assert_type(np.ptp(AR_b), np.bool) +assert_type(np.ptp(AR_f4), np.float32) +assert_type(np.ptp(AR_b, axis=0), Any) +assert_type(np.ptp(AR_f4, axis=0), Any) +assert_type(np.ptp(AR_b, keepdims=True), Any) +assert_type(np.ptp(AR_f4, keepdims=True), Any) +assert_type(np.ptp(AR_f4, out=AR_subclass), NDArraySubclass) + +assert_type(np.amax(b), np.bool) +assert_type(np.amax(f4), np.float32) +assert_type(np.amax(f), Any) +assert_type(np.amax(AR_b), np.bool) +assert_type(np.amax(AR_f4), np.float32) +assert_type(np.amax(AR_b, axis=0), Any) +assert_type(np.amax(AR_f4, axis=0), Any) +assert_type(np.amax(AR_b, keepdims=True), Any) +assert_type(np.amax(AR_f4, keepdims=True), Any) +assert_type(np.amax(AR_f4, out=AR_subclass), NDArraySubclass) + +assert_type(np.amin(b), np.bool) +assert_type(np.amin(f4), np.float32) +assert_type(np.amin(f), Any) +assert_type(np.amin(AR_b), np.bool) +assert_type(np.amin(AR_f4), np.float32) +assert_type(np.amin(AR_b, axis=0), Any) +assert_type(np.amin(AR_f4, axis=0), Any) +assert_type(np.amin(AR_b, keepdims=True), Any) +assert_type(np.amin(AR_f4, keepdims=True), Any) +assert_type(np.amin(AR_f4, out=AR_subclass), NDArraySubclass) + +assert_type(np.prod(AR_b), np.int_) +assert_type(np.prod(AR_u8), np.uint64) +assert_type(np.prod(AR_i8), np.int64) +assert_type(np.prod(AR_f4), np.floating) +assert_type(np.prod(AR_c16), np.complexfloating) +assert_type(np.prod(AR_O), Any) +assert_type(np.prod(AR_f4, axis=0), Any) +assert_type(np.prod(AR_f4, keepdims=True), Any) +assert_type(np.prod(AR_f4, dtype=np.float64), np.float64) +assert_type(np.prod(AR_f4, dtype=float), Any) +assert_type(np.prod(AR_f4, out=AR_subclass), NDArraySubclass) + +assert_type(np.cumprod(AR_b), npt.NDArray[np.int_]) +assert_type(np.cumprod(AR_u8), npt.NDArray[np.uint64]) +assert_type(np.cumprod(AR_i8), npt.NDArray[np.int64]) +assert_type(np.cumprod(AR_f4), npt.NDArray[np.floating]) +assert_type(np.cumprod(AR_c16), npt.NDArray[np.complexfloating]) +assert_type(np.cumprod(AR_O), npt.NDArray[np.object_]) +assert_type(np.cumprod(AR_f4, axis=0), npt.NDArray[np.floating]) +assert_type(np.cumprod(AR_f4, dtype=np.float64), npt.NDArray[np.float64]) +assert_type(np.cumprod(AR_f4, dtype=float), npt.NDArray[Any]) +assert_type(np.cumprod(AR_f4, out=AR_subclass), NDArraySubclass) + +assert_type(np.cumulative_prod(AR_b), npt.NDArray[np.int_]) +assert_type(np.cumulative_prod(AR_u8), npt.NDArray[np.uint64]) +assert_type(np.cumulative_prod(AR_i8), npt.NDArray[np.int64]) +assert_type(np.cumulative_prod(AR_f4), npt.NDArray[np.floating]) +assert_type(np.cumulative_prod(AR_c16), npt.NDArray[np.complexfloating]) +assert_type(np.cumulative_prod(AR_O), npt.NDArray[np.object_]) +assert_type(np.cumulative_prod(AR_f4, axis=0), npt.NDArray[np.floating]) +assert_type(np.cumulative_prod(AR_f4, dtype=np.float64), npt.NDArray[np.float64]) +assert_type(np.cumulative_prod(AR_f4, dtype=float), npt.NDArray[Any]) +assert_type(np.cumulative_prod(AR_f4, out=AR_subclass), NDArraySubclass) + +assert_type(np.ndim(b), int) +assert_type(np.ndim(f4), int) +assert_type(np.ndim(f), int) +assert_type(np.ndim(AR_b), int) +assert_type(np.ndim(AR_f4), int) + +assert_type(np.size(b), int) +assert_type(np.size(f4), int) +assert_type(np.size(f), int) +assert_type(np.size(AR_b), int) +assert_type(np.size(AR_f4), int) + +assert_type(np.around(b), np.float16) +assert_type(np.around(f), Any) +assert_type(np.around(i8), np.int64) +assert_type(np.around(f4), np.float32) +assert_type(np.around(AR_b), npt.NDArray[np.float16]) +assert_type(np.around(AR_i8), npt.NDArray[np.int64]) +assert_type(np.around(AR_f4), npt.NDArray[np.float32]) +assert_type(np.around([1.5]), npt.NDArray[Any]) +assert_type(np.around(AR_f4, out=AR_subclass), NDArraySubclass) + +assert_type(np.mean(AR_b), np.floating) +assert_type(np.mean(AR_i8), np.floating) +assert_type(np.mean(AR_f4), np.floating) +assert_type(np.mean(AR_m), np.timedelta64) +assert_type(np.mean(AR_c16), np.complexfloating) +assert_type(np.mean(AR_O), Any) +assert_type(np.mean(AR_f4, axis=0), Any) +assert_type(np.mean(AR_f4, keepdims=True), Any) +assert_type(np.mean(AR_f4, dtype=float), Any) +assert_type(np.mean(AR_f4, dtype=np.float64), np.float64) +assert_type(np.mean(AR_f4, out=AR_subclass), NDArraySubclass) +assert_type(np.mean(AR_f4, dtype=np.float64), np.float64) +assert_type(np.mean(AR_f4, None, np.float64), np.float64) +assert_type(np.mean(AR_f4, dtype=np.float64, keepdims=False), np.float64) +assert_type(np.mean(AR_f4, None, np.float64, keepdims=False), np.float64) +assert_type(np.mean(AR_f4, dtype=np.float64, keepdims=True), np.float64 | npt.NDArray[np.float64]) +assert_type(np.mean(AR_f4, None, np.float64, keepdims=True), np.float64 | npt.NDArray[np.float64]) + +assert_type(np.std(AR_b), np.floating) +assert_type(np.std(AR_i8), np.floating) +assert_type(np.std(AR_f4), np.floating) +assert_type(np.std(AR_c16), np.floating) +assert_type(np.std(AR_O), Any) +assert_type(np.std(AR_f4, axis=0), Any) +assert_type(np.std(AR_f4, keepdims=True), Any) +assert_type(np.std(AR_f4, dtype=float), Any) +assert_type(np.std(AR_f4, dtype=np.float64), np.float64) +assert_type(np.std(AR_f4, out=AR_subclass), NDArraySubclass) + +assert_type(np.var(AR_b), np.floating) +assert_type(np.var(AR_i8), np.floating) +assert_type(np.var(AR_f4), np.floating) +assert_type(np.var(AR_c16), np.floating) +assert_type(np.var(AR_O), Any) +assert_type(np.var(AR_f4, axis=0), Any) +assert_type(np.var(AR_f4, keepdims=True), Any) +assert_type(np.var(AR_f4, dtype=float), Any) +assert_type(np.var(AR_f4, dtype=np.float64), np.float64) +assert_type(np.var(AR_f4, out=AR_subclass), NDArraySubclass) diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/getlimits.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/getlimits.pyi new file mode 100644 index 0000000..825daba --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/getlimits.pyi @@ -0,0 +1,51 @@ +from typing import Any, LiteralString, assert_type + +import numpy as np +from numpy._typing import _64Bit + +f: float +f8: np.float64 +c8: np.complex64 + +i: int +i8: np.int64 +u4: np.uint32 + +finfo_f8: np.finfo[np.float64] +iinfo_i8: np.iinfo[np.int64] + +assert_type(np.finfo(f), np.finfo[np.float64]) +assert_type(np.finfo(f8), np.finfo[np.floating[_64Bit]]) +assert_type(np.finfo(c8), np.finfo[np.float32]) +assert_type(np.finfo('f2'), np.finfo[np.floating]) + +assert_type(finfo_f8.dtype, np.dtype[np.float64]) +assert_type(finfo_f8.bits, int) +assert_type(finfo_f8.eps, np.float64) +assert_type(finfo_f8.epsneg, np.float64) +assert_type(finfo_f8.iexp, int) +assert_type(finfo_f8.machep, int) +assert_type(finfo_f8.max, np.float64) +assert_type(finfo_f8.maxexp, int) +assert_type(finfo_f8.min, np.float64) +assert_type(finfo_f8.minexp, int) +assert_type(finfo_f8.negep, int) +assert_type(finfo_f8.nexp, int) +assert_type(finfo_f8.nmant, int) +assert_type(finfo_f8.precision, int) +assert_type(finfo_f8.resolution, np.float64) +assert_type(finfo_f8.tiny, np.float64) +assert_type(finfo_f8.smallest_normal, np.float64) +assert_type(finfo_f8.smallest_subnormal, np.float64) + +assert_type(np.iinfo(i), np.iinfo[np.int_]) +assert_type(np.iinfo(i8), np.iinfo[np.int64]) +assert_type(np.iinfo(u4), np.iinfo[np.uint32]) +assert_type(np.iinfo('i2'), np.iinfo[Any]) + +assert_type(iinfo_i8.dtype, np.dtype[np.int64]) +assert_type(iinfo_i8.kind, LiteralString) +assert_type(iinfo_i8.bits, int) +assert_type(iinfo_i8.key, LiteralString) +assert_type(iinfo_i8.min, int) +assert_type(iinfo_i8.max, int) diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/histograms.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/histograms.pyi new file mode 100644 index 0000000..c1c63d5 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/histograms.pyi @@ -0,0 +1,25 @@ +from typing import Any, assert_type + +import numpy as np +import numpy.typing as npt + +AR_i8: npt.NDArray[np.int64] +AR_f8: npt.NDArray[np.float64] + +assert_type(np.histogram_bin_edges(AR_i8, bins="auto"), npt.NDArray[Any]) +assert_type(np.histogram_bin_edges(AR_i8, bins="rice", range=(0, 3)), npt.NDArray[Any]) +assert_type(np.histogram_bin_edges(AR_i8, bins="scott", weights=AR_f8), npt.NDArray[Any]) + +assert_type(np.histogram(AR_i8, bins="auto"), tuple[npt.NDArray[Any], npt.NDArray[Any]]) +assert_type(np.histogram(AR_i8, bins="rice", range=(0, 3)), tuple[npt.NDArray[Any], npt.NDArray[Any]]) +assert_type(np.histogram(AR_i8, bins="scott", weights=AR_f8), tuple[npt.NDArray[Any], npt.NDArray[Any]]) +assert_type(np.histogram(AR_f8, bins=1, density=True), tuple[npt.NDArray[Any], npt.NDArray[Any]]) + +assert_type(np.histogramdd(AR_i8, bins=[1]), + tuple[npt.NDArray[Any], tuple[npt.NDArray[Any], ...]]) +assert_type(np.histogramdd(AR_i8, range=[(0, 3)]), + tuple[npt.NDArray[Any], tuple[npt.NDArray[Any], ...]]) +assert_type(np.histogramdd(AR_i8, weights=AR_f8), + tuple[npt.NDArray[Any], tuple[npt.NDArray[Any], ...]]) +assert_type(np.histogramdd(AR_f8, density=True), + tuple[npt.NDArray[Any], tuple[npt.NDArray[Any], ...]]) diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/index_tricks.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/index_tricks.pyi new file mode 100644 index 0000000..f6067c3 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/index_tricks.pyi @@ -0,0 +1,70 @@ +from types import EllipsisType +from typing import Any, Literal, assert_type + +import numpy as np +import numpy.typing as npt + +AR_LIKE_b: list[bool] +AR_LIKE_i: list[int] +AR_LIKE_f: list[float] +AR_LIKE_U: list[str] +AR_LIKE_O: list[object] + +AR_i8: npt.NDArray[np.int64] +AR_O: npt.NDArray[np.object_] + +assert_type(np.ndenumerate(AR_i8), np.ndenumerate[np.int64]) +assert_type(np.ndenumerate(AR_LIKE_f), np.ndenumerate[np.float64]) +assert_type(np.ndenumerate(AR_LIKE_U), np.ndenumerate[np.str_]) +assert_type(np.ndenumerate(AR_LIKE_O), np.ndenumerate[Any]) + +assert_type(next(np.ndenumerate(AR_i8)), tuple[tuple[Any, ...], np.int64]) +assert_type(next(np.ndenumerate(AR_LIKE_f)), tuple[tuple[Any, ...], np.float64]) +assert_type(next(np.ndenumerate(AR_LIKE_U)), tuple[tuple[Any, ...], np.str_]) +assert_type(next(np.ndenumerate(AR_LIKE_O)), tuple[tuple[Any, ...], Any]) + +assert_type(iter(np.ndenumerate(AR_i8)), np.ndenumerate[np.int64]) +assert_type(iter(np.ndenumerate(AR_LIKE_f)), np.ndenumerate[np.float64]) +assert_type(iter(np.ndenumerate(AR_LIKE_U)), np.ndenumerate[np.str_]) +assert_type(iter(np.ndenumerate(AR_LIKE_O)), np.ndenumerate[Any]) + +assert_type(np.ndindex(1, 2, 3), np.ndindex) +assert_type(np.ndindex((1, 2, 3)), np.ndindex) +assert_type(iter(np.ndindex(1, 2, 3)), np.ndindex) +assert_type(next(np.ndindex(1, 2, 3)), tuple[Any, ...]) + +assert_type(np.unravel_index([22, 41, 37], (7, 6)), tuple[npt.NDArray[np.intp], ...]) +assert_type(np.unravel_index([31, 41, 13], (7, 6), order="F"), tuple[npt.NDArray[np.intp], ...]) +assert_type(np.unravel_index(1621, (6, 7, 8, 9)), tuple[np.intp, ...]) + +assert_type(np.ravel_multi_index([[1]], (7, 6)), npt.NDArray[np.intp]) +assert_type(np.ravel_multi_index(AR_LIKE_i, (7, 6)), np.intp) +assert_type(np.ravel_multi_index(AR_LIKE_i, (7, 6), order="F"), np.intp) +assert_type(np.ravel_multi_index(AR_LIKE_i, (4, 6), mode="clip"), np.intp) +assert_type(np.ravel_multi_index(AR_LIKE_i, (4, 4), mode=("clip", "wrap")), np.intp) +assert_type(np.ravel_multi_index((3, 1, 4, 1), (6, 7, 8, 9)), np.intp) + +assert_type(np.mgrid[1:1:2], npt.NDArray[Any]) +assert_type(np.mgrid[1:1:2, None:10], npt.NDArray[Any]) + +assert_type(np.ogrid[1:1:2], tuple[npt.NDArray[Any], ...]) +assert_type(np.ogrid[1:1:2, None:10], tuple[npt.NDArray[Any], ...]) + +assert_type(np.index_exp[0:1], tuple[slice[int, int, None]]) +assert_type(np.index_exp[0:1, None:3], tuple[slice[int, int, None], slice[None, int, None]]) +assert_type(np.index_exp[0, 0:1, ..., [0, 1, 3]], tuple[Literal[0], slice[int, int, None], EllipsisType, list[int]]) + +assert_type(np.s_[0:1], slice[int, int, None]) +assert_type(np.s_[0:1, None:3], tuple[slice[int, int, None], slice[None, int, None]]) +assert_type(np.s_[0, 0:1, ..., [0, 1, 3]], tuple[Literal[0], slice[int, int, None], EllipsisType, list[int]]) + +assert_type(np.ix_(AR_LIKE_b), tuple[npt.NDArray[np.bool], ...]) +assert_type(np.ix_(AR_LIKE_i, AR_LIKE_f), tuple[npt.NDArray[np.float64], ...]) +assert_type(np.ix_(AR_i8), tuple[npt.NDArray[np.int64], ...]) + +assert_type(np.fill_diagonal(AR_i8, 5), None) + +assert_type(np.diag_indices(4), tuple[npt.NDArray[np.int_], ...]) +assert_type(np.diag_indices(2, 3), tuple[npt.NDArray[np.int_], ...]) + +assert_type(np.diag_indices_from(AR_i8), tuple[npt.NDArray[np.int_], ...]) diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/lib_function_base.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/lib_function_base.pyi new file mode 100644 index 0000000..3ce8d37 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/lib_function_base.pyi @@ -0,0 +1,213 @@ +from collections.abc import Callable +from fractions import Fraction +from typing import Any, assert_type + +import numpy as np +import numpy.typing as npt + +vectorized_func: np.vectorize + +f8: np.float64 +AR_LIKE_f8: list[float] +AR_LIKE_c16: list[complex] +AR_LIKE_O: list[Fraction] + +AR_i8: npt.NDArray[np.int64] +AR_f8: npt.NDArray[np.float64] +AR_c16: npt.NDArray[np.complex128] +AR_m: npt.NDArray[np.timedelta64] +AR_M: npt.NDArray[np.datetime64] +AR_O: npt.NDArray[np.object_] +AR_b: npt.NDArray[np.bool] +AR_U: npt.NDArray[np.str_] +CHAR_AR_U: np.char.chararray[tuple[Any, ...], np.dtype[np.str_]] + +AR_b_list: list[npt.NDArray[np.bool]] + +def func( + a: npt.NDArray[Any], + posarg: bool = ..., + /, + arg: int = ..., + *, + kwarg: str = ..., +) -> npt.NDArray[Any]: ... + +assert_type(vectorized_func.pyfunc, Callable[..., Any]) +assert_type(vectorized_func.cache, bool) +assert_type(vectorized_func.signature, str | None) +assert_type(vectorized_func.otypes, str | None) +assert_type(vectorized_func.excluded, set[int | str]) +assert_type(vectorized_func.__doc__, str | None) +assert_type(vectorized_func([1]), Any) +assert_type(np.vectorize(int), np.vectorize) +assert_type( + np.vectorize(int, otypes="i", doc="doc", excluded=(), cache=True, signature=None), + np.vectorize, +) + +assert_type(np.rot90(AR_f8, k=2), npt.NDArray[np.float64]) +assert_type(np.rot90(AR_LIKE_f8, axes=(0, 1)), npt.NDArray[Any]) + +assert_type(np.flip(f8), np.float64) +assert_type(np.flip(1.0), Any) +assert_type(np.flip(AR_f8, axis=(0, 1)), npt.NDArray[np.float64]) +assert_type(np.flip(AR_LIKE_f8, axis=0), npt.NDArray[Any]) + +assert_type(np.iterable(1), bool) +assert_type(np.iterable([1]), bool) + +assert_type(np.average(AR_f8), np.floating) +assert_type(np.average(AR_f8, weights=AR_c16), np.complexfloating) +assert_type(np.average(AR_O), Any) +assert_type(np.average(AR_f8, returned=True), tuple[np.floating, np.floating]) +assert_type(np.average(AR_f8, weights=AR_c16, returned=True), tuple[np.complexfloating, np.complexfloating]) +assert_type(np.average(AR_O, returned=True), tuple[Any, Any]) +assert_type(np.average(AR_f8, axis=0), Any) +assert_type(np.average(AR_f8, axis=0, returned=True), tuple[Any, Any]) + +assert_type(np.asarray_chkfinite(AR_f8), npt.NDArray[np.float64]) +assert_type(np.asarray_chkfinite(AR_LIKE_f8), npt.NDArray[Any]) +assert_type(np.asarray_chkfinite(AR_f8, dtype=np.float64), npt.NDArray[np.float64]) +assert_type(np.asarray_chkfinite(AR_f8, dtype=float), npt.NDArray[Any]) + +assert_type(np.piecewise(AR_f8, AR_b, [func]), npt.NDArray[np.float64]) +assert_type(np.piecewise(AR_f8, AR_b_list, [func]), npt.NDArray[np.float64]) +assert_type(np.piecewise(AR_f8, AR_b_list, [func], True, -1, kwarg=''), npt.NDArray[np.float64]) +assert_type(np.piecewise(AR_f8, AR_b_list, [func], True, arg=-1, kwarg=''), npt.NDArray[np.float64]) +assert_type(np.piecewise(AR_LIKE_f8, AR_b_list, [func]), npt.NDArray[Any]) + +assert_type(np.select([AR_f8], [AR_f8]), npt.NDArray[Any]) + +assert_type(np.copy(AR_LIKE_f8), npt.NDArray[Any]) +assert_type(np.copy(AR_U), npt.NDArray[np.str_]) +assert_type(np.copy(CHAR_AR_U), np.ndarray[Any, Any]) # pyright correctly infers `NDArray[str_]` +assert_type(np.copy(CHAR_AR_U, "K", subok=True), np.char.chararray[tuple[Any, ...], np.dtype[np.str_]]) +assert_type(np.copy(CHAR_AR_U, subok=True), np.char.chararray[tuple[Any, ...], np.dtype[np.str_]]) + +assert_type(np.gradient(AR_f8, axis=None), Any) +assert_type(np.gradient(AR_LIKE_f8, edge_order=2), Any) + +assert_type(np.diff("bob", n=0), str) +assert_type(np.diff(AR_f8, axis=0), npt.NDArray[Any]) +assert_type(np.diff(AR_LIKE_f8, prepend=1.5), npt.NDArray[Any]) + +assert_type(np.interp(1, [1], AR_f8), np.float64) +assert_type(np.interp(1, [1], [1]), np.float64) +assert_type(np.interp(1, [1], AR_c16), np.complex128) +assert_type(np.interp(1, [1], [1j]), np.complex128) # pyright correctly infers `complex128 | float64` +assert_type(np.interp([1], [1], AR_f8), npt.NDArray[np.float64]) +assert_type(np.interp([1], [1], [1]), npt.NDArray[np.float64]) +assert_type(np.interp([1], [1], AR_c16), npt.NDArray[np.complex128]) +assert_type(np.interp([1], [1], [1j]), npt.NDArray[np.complex128]) # pyright correctly infers `NDArray[complex128 | float64]` + +assert_type(np.angle(f8), np.floating) +assert_type(np.angle(AR_f8), npt.NDArray[np.floating]) +assert_type(np.angle(AR_c16, deg=True), npt.NDArray[np.floating]) +assert_type(np.angle(AR_O), npt.NDArray[np.object_]) + +assert_type(np.unwrap(AR_f8), npt.NDArray[np.floating]) +assert_type(np.unwrap(AR_O), npt.NDArray[np.object_]) + +assert_type(np.sort_complex(AR_f8), npt.NDArray[np.complexfloating]) + +assert_type(np.trim_zeros(AR_f8), npt.NDArray[np.float64]) +assert_type(np.trim_zeros(AR_LIKE_f8), list[float]) + +assert_type(np.extract(AR_i8, AR_f8), npt.NDArray[np.float64]) +assert_type(np.extract(AR_i8, AR_LIKE_f8), npt.NDArray[Any]) + +assert_type(np.place(AR_f8, mask=AR_i8, vals=5.0), None) + +assert_type(np.cov(AR_f8, bias=True), npt.NDArray[np.floating]) +assert_type(np.cov(AR_f8, AR_c16, ddof=1), npt.NDArray[np.complexfloating]) +assert_type(np.cov(AR_f8, aweights=AR_f8, dtype=np.float32), npt.NDArray[np.float32]) +assert_type(np.cov(AR_f8, fweights=AR_f8, dtype=float), npt.NDArray[Any]) + +assert_type(np.corrcoef(AR_f8, rowvar=True), npt.NDArray[np.floating]) +assert_type(np.corrcoef(AR_f8, AR_c16), npt.NDArray[np.complexfloating]) +assert_type(np.corrcoef(AR_f8, dtype=np.float32), npt.NDArray[np.float32]) +assert_type(np.corrcoef(AR_f8, dtype=float), npt.NDArray[Any]) + +assert_type(np.blackman(5), npt.NDArray[np.floating]) +assert_type(np.bartlett(6), npt.NDArray[np.floating]) +assert_type(np.hanning(4.5), npt.NDArray[np.floating]) +assert_type(np.hamming(0), npt.NDArray[np.floating]) +assert_type(np.i0(AR_i8), npt.NDArray[np.floating]) +assert_type(np.kaiser(4, 5.9), npt.NDArray[np.floating]) + +assert_type(np.sinc(1.0), np.floating) +assert_type(np.sinc(1j), np.complexfloating) +assert_type(np.sinc(AR_f8), npt.NDArray[np.floating]) +assert_type(np.sinc(AR_c16), npt.NDArray[np.complexfloating]) + +assert_type(np.median(AR_f8, keepdims=False), np.floating) +assert_type(np.median(AR_c16, overwrite_input=True), np.complexfloating) +assert_type(np.median(AR_m), np.timedelta64) +assert_type(np.median(AR_O), Any) +assert_type(np.median(AR_f8, keepdims=True), Any) +assert_type(np.median(AR_c16, axis=0), Any) +assert_type(np.median(AR_LIKE_f8, out=AR_c16), npt.NDArray[np.complex128]) + +assert_type(np.percentile(AR_f8, 50), np.floating) +assert_type(np.percentile(AR_c16, 50), np.complexfloating) +assert_type(np.percentile(AR_m, 50), np.timedelta64) +assert_type(np.percentile(AR_M, 50, overwrite_input=True), np.datetime64) +assert_type(np.percentile(AR_O, 50), Any) +assert_type(np.percentile(AR_f8, [50]), npt.NDArray[np.floating]) +assert_type(np.percentile(AR_c16, [50]), npt.NDArray[np.complexfloating]) +assert_type(np.percentile(AR_m, [50]), npt.NDArray[np.timedelta64]) +assert_type(np.percentile(AR_M, [50], method="nearest"), npt.NDArray[np.datetime64]) +assert_type(np.percentile(AR_O, [50]), npt.NDArray[np.object_]) +assert_type(np.percentile(AR_f8, [50], keepdims=True), Any) +assert_type(np.percentile(AR_f8, [50], axis=[1]), Any) +assert_type(np.percentile(AR_f8, [50], out=AR_c16), npt.NDArray[np.complex128]) + +assert_type(np.quantile(AR_f8, 0.5), np.floating) +assert_type(np.quantile(AR_c16, 0.5), np.complexfloating) +assert_type(np.quantile(AR_m, 0.5), np.timedelta64) +assert_type(np.quantile(AR_M, 0.5, overwrite_input=True), np.datetime64) +assert_type(np.quantile(AR_O, 0.5), Any) +assert_type(np.quantile(AR_f8, [0.5]), npt.NDArray[np.floating]) +assert_type(np.quantile(AR_c16, [0.5]), npt.NDArray[np.complexfloating]) +assert_type(np.quantile(AR_m, [0.5]), npt.NDArray[np.timedelta64]) +assert_type(np.quantile(AR_M, [0.5], method="nearest"), npt.NDArray[np.datetime64]) +assert_type(np.quantile(AR_O, [0.5]), npt.NDArray[np.object_]) +assert_type(np.quantile(AR_f8, [0.5], keepdims=True), Any) +assert_type(np.quantile(AR_f8, [0.5], axis=[1]), Any) +assert_type(np.quantile(AR_f8, [0.5], out=AR_c16), npt.NDArray[np.complex128]) + +assert_type(np.trapezoid(AR_LIKE_f8), np.float64) +assert_type(np.trapezoid(AR_LIKE_f8, AR_LIKE_f8), np.float64) +assert_type(np.trapezoid(AR_LIKE_c16), np.complex128) +assert_type(np.trapezoid(AR_LIKE_c16, AR_LIKE_f8), np.complex128) +assert_type(np.trapezoid(AR_LIKE_f8, AR_LIKE_c16), np.complex128) +assert_type(np.trapezoid(AR_LIKE_O), float) +assert_type(np.trapezoid(AR_LIKE_O, AR_LIKE_f8), float) +assert_type(np.trapezoid(AR_f8), np.float64 | npt.NDArray[np.float64]) +assert_type(np.trapezoid(AR_f8, AR_f8), np.float64 | npt.NDArray[np.float64]) +assert_type(np.trapezoid(AR_c16), np.complex128 | npt.NDArray[np.complex128]) +assert_type(np.trapezoid(AR_c16, AR_c16), np.complex128 | npt.NDArray[np.complex128]) +assert_type(np.trapezoid(AR_m), np.timedelta64 | npt.NDArray[np.timedelta64]) +assert_type(np.trapezoid(AR_O), float | npt.NDArray[np.object_]) +assert_type(np.trapezoid(AR_O, AR_LIKE_f8), float | npt.NDArray[np.object_]) + +assert_type(np.meshgrid(), tuple[()]) +assert_type(np.meshgrid(AR_c16, indexing="ij"), tuple[npt.NDArray[np.complex128]]) +assert_type(np.meshgrid(AR_i8, AR_f8, copy=False), tuple[npt.NDArray[np.int64], npt.NDArray[np.float64]]) +assert_type(np.meshgrid(AR_LIKE_f8, AR_f8), tuple[npt.NDArray[Any], npt.NDArray[np.float64]]) +assert_type(np.meshgrid(AR_LIKE_f8, AR_i8, AR_c16), tuple[npt.NDArray[Any], npt.NDArray[Any], npt.NDArray[Any]]) +assert_type(np.meshgrid(AR_f8, AR_f8, AR_f8, AR_f8), tuple[npt.NDArray[Any], npt.NDArray[Any], npt.NDArray[Any], npt.NDArray[Any]]) +assert_type(np.meshgrid(*AR_LIKE_f8), tuple[npt.NDArray[Any], ...]) + +assert_type(np.delete(AR_f8, np.s_[:5]), npt.NDArray[np.float64]) +assert_type(np.delete(AR_LIKE_f8, [0, 4, 9], axis=0), npt.NDArray[Any]) + +assert_type(np.insert(AR_f8, np.s_[:5], 5), npt.NDArray[np.float64]) +assert_type(np.insert(AR_LIKE_f8, [0, 4, 9], [0.5, 9.2, 7], axis=0), npt.NDArray[Any]) + +assert_type(np.append(AR_f8, 5), npt.NDArray[Any]) +assert_type(np.append(AR_LIKE_f8, 1j, axis=0), npt.NDArray[Any]) + +assert_type(np.digitize(4.5, [1]), np.intp) +assert_type(np.digitize(AR_f8, [1, 2, 3]), npt.NDArray[np.intp]) diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/lib_polynomial.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/lib_polynomial.pyi new file mode 100644 index 0000000..8b0a9f3 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/lib_polynomial.pyi @@ -0,0 +1,144 @@ +from collections.abc import Iterator +from typing import Any, NoReturn, assert_type + +import numpy as np +import numpy.typing as npt + +AR_b: npt.NDArray[np.bool] +AR_u4: npt.NDArray[np.uint32] +AR_i8: npt.NDArray[np.int64] +AR_f8: npt.NDArray[np.float64] +AR_c16: npt.NDArray[np.complex128] +AR_O: npt.NDArray[np.object_] + +poly_obj: np.poly1d + +assert_type(poly_obj.variable, str) +assert_type(poly_obj.order, int) +assert_type(poly_obj.o, int) +assert_type(poly_obj.roots, npt.NDArray[Any]) +assert_type(poly_obj.r, npt.NDArray[Any]) +assert_type(poly_obj.coeffs, npt.NDArray[Any]) +assert_type(poly_obj.c, npt.NDArray[Any]) +assert_type(poly_obj.coef, npt.NDArray[Any]) +assert_type(poly_obj.coefficients, npt.NDArray[Any]) +assert_type(poly_obj.__hash__, None) + +assert_type(poly_obj(1), Any) +assert_type(poly_obj([1]), npt.NDArray[Any]) +assert_type(poly_obj(poly_obj), np.poly1d) + +assert_type(len(poly_obj), int) +assert_type(-poly_obj, np.poly1d) +assert_type(+poly_obj, np.poly1d) + +assert_type(poly_obj * 5, np.poly1d) +assert_type(5 * poly_obj, np.poly1d) +assert_type(poly_obj + 5, np.poly1d) +assert_type(5 + poly_obj, np.poly1d) +assert_type(poly_obj - 5, np.poly1d) +assert_type(5 - poly_obj, np.poly1d) +assert_type(poly_obj**1, np.poly1d) +assert_type(poly_obj**1.0, np.poly1d) +assert_type(poly_obj / 5, np.poly1d) +assert_type(5 / poly_obj, np.poly1d) + +assert_type(poly_obj[0], Any) +poly_obj[0] = 5 +assert_type(iter(poly_obj), Iterator[Any]) +assert_type(poly_obj.deriv(), np.poly1d) +assert_type(poly_obj.integ(), np.poly1d) + +assert_type(np.poly(poly_obj), npt.NDArray[np.floating]) +assert_type(np.poly(AR_f8), npt.NDArray[np.floating]) +assert_type(np.poly(AR_c16), npt.NDArray[np.floating]) + +assert_type(np.polyint(poly_obj), np.poly1d) +assert_type(np.polyint(AR_f8), npt.NDArray[np.floating]) +assert_type(np.polyint(AR_f8, k=AR_c16), npt.NDArray[np.complexfloating]) +assert_type(np.polyint(AR_O, m=2), npt.NDArray[np.object_]) + +assert_type(np.polyder(poly_obj), np.poly1d) +assert_type(np.polyder(AR_f8), npt.NDArray[np.floating]) +assert_type(np.polyder(AR_c16), npt.NDArray[np.complexfloating]) +assert_type(np.polyder(AR_O, m=2), npt.NDArray[np.object_]) + +assert_type(np.polyfit(AR_f8, AR_f8, 2), npt.NDArray[np.float64]) +assert_type( + np.polyfit(AR_f8, AR_i8, 1, full=True), + tuple[ + npt.NDArray[np.float64], + npt.NDArray[np.float64], + npt.NDArray[np.int32], + npt.NDArray[np.float64], + npt.NDArray[np.float64], + ], +) +assert_type( + np.polyfit(AR_u4, AR_f8, 1.0, cov="unscaled"), + tuple[ + npt.NDArray[np.float64], + npt.NDArray[np.float64], + ], +) +assert_type(np.polyfit(AR_c16, AR_f8, 2), npt.NDArray[np.complex128]) +assert_type( + np.polyfit(AR_f8, AR_c16, 1, full=True), + tuple[ + npt.NDArray[np.complex128], + npt.NDArray[np.float64], + npt.NDArray[np.int32], + npt.NDArray[np.float64], + npt.NDArray[np.float64], + ], +) +assert_type( + np.polyfit(AR_u4, AR_c16, 1.0, cov=True), + tuple[ + npt.NDArray[np.complex128], + npt.NDArray[np.complex128], + ], +) + +assert_type(np.polyval(AR_b, AR_b), npt.NDArray[np.int64]) +assert_type(np.polyval(AR_u4, AR_b), npt.NDArray[np.unsignedinteger]) +assert_type(np.polyval(AR_i8, AR_i8), npt.NDArray[np.signedinteger]) +assert_type(np.polyval(AR_f8, AR_i8), npt.NDArray[np.floating]) +assert_type(np.polyval(AR_i8, AR_c16), npt.NDArray[np.complexfloating]) +assert_type(np.polyval(AR_O, AR_O), npt.NDArray[np.object_]) + +assert_type(np.polyadd(poly_obj, AR_i8), np.poly1d) +assert_type(np.polyadd(AR_f8, poly_obj), np.poly1d) +assert_type(np.polyadd(AR_b, AR_b), npt.NDArray[np.bool]) +assert_type(np.polyadd(AR_u4, AR_b), npt.NDArray[np.unsignedinteger]) +assert_type(np.polyadd(AR_i8, AR_i8), npt.NDArray[np.signedinteger]) +assert_type(np.polyadd(AR_f8, AR_i8), npt.NDArray[np.floating]) +assert_type(np.polyadd(AR_i8, AR_c16), npt.NDArray[np.complexfloating]) +assert_type(np.polyadd(AR_O, AR_O), npt.NDArray[np.object_]) + +assert_type(np.polysub(poly_obj, AR_i8), np.poly1d) +assert_type(np.polysub(AR_f8, poly_obj), np.poly1d) +assert_type(np.polysub(AR_b, AR_b), NoReturn) +assert_type(np.polysub(AR_u4, AR_b), npt.NDArray[np.unsignedinteger]) +assert_type(np.polysub(AR_i8, AR_i8), npt.NDArray[np.signedinteger]) +assert_type(np.polysub(AR_f8, AR_i8), npt.NDArray[np.floating]) +assert_type(np.polysub(AR_i8, AR_c16), npt.NDArray[np.complexfloating]) +assert_type(np.polysub(AR_O, AR_O), npt.NDArray[np.object_]) + +assert_type(np.polymul(poly_obj, AR_i8), np.poly1d) +assert_type(np.polymul(AR_f8, poly_obj), np.poly1d) +assert_type(np.polymul(AR_b, AR_b), npt.NDArray[np.bool]) +assert_type(np.polymul(AR_u4, AR_b), npt.NDArray[np.unsignedinteger]) +assert_type(np.polymul(AR_i8, AR_i8), npt.NDArray[np.signedinteger]) +assert_type(np.polymul(AR_f8, AR_i8), npt.NDArray[np.floating]) +assert_type(np.polymul(AR_i8, AR_c16), npt.NDArray[np.complexfloating]) +assert_type(np.polymul(AR_O, AR_O), npt.NDArray[np.object_]) + +assert_type(np.polydiv(poly_obj, AR_i8), tuple[np.poly1d, np.poly1d]) +assert_type(np.polydiv(AR_f8, poly_obj), tuple[np.poly1d, np.poly1d]) +assert_type(np.polydiv(AR_b, AR_b), tuple[npt.NDArray[np.floating], npt.NDArray[np.floating]]) +assert_type(np.polydiv(AR_u4, AR_b), tuple[npt.NDArray[np.floating], npt.NDArray[np.floating]]) +assert_type(np.polydiv(AR_i8, AR_i8), tuple[npt.NDArray[np.floating], npt.NDArray[np.floating]]) +assert_type(np.polydiv(AR_f8, AR_i8), tuple[npt.NDArray[np.floating], npt.NDArray[np.floating]]) +assert_type(np.polydiv(AR_i8, AR_c16), tuple[npt.NDArray[np.complexfloating], npt.NDArray[np.complexfloating]]) +assert_type(np.polydiv(AR_O, AR_O), tuple[npt.NDArray[Any], npt.NDArray[Any]]) diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/lib_utils.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/lib_utils.pyi new file mode 100644 index 0000000..c9470e0 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/lib_utils.pyi @@ -0,0 +1,17 @@ +from io import StringIO +from typing import assert_type + +import numpy as np +import numpy.lib.array_utils as array_utils +import numpy.typing as npt + +AR: npt.NDArray[np.float64] +AR_DICT: dict[str, npt.NDArray[np.float64]] +FILE: StringIO + +def func(a: int) -> bool: ... + +assert_type(array_utils.byte_bounds(AR), tuple[int, int]) +assert_type(array_utils.byte_bounds(np.float64()), tuple[int, int]) + +assert_type(np.info(1, output=FILE), None) diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/lib_version.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/lib_version.pyi new file mode 100644 index 0000000..0373537 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/lib_version.pyi @@ -0,0 +1,20 @@ +from typing import assert_type + +from numpy.lib import NumpyVersion + +version = NumpyVersion("1.8.0") + +assert_type(version.vstring, str) +assert_type(version.version, str) +assert_type(version.major, int) +assert_type(version.minor, int) +assert_type(version.bugfix, int) +assert_type(version.pre_release, str) +assert_type(version.is_devversion, bool) + +assert_type(version == version, bool) +assert_type(version != version, bool) +assert_type(version < "1.8.0", bool) +assert_type(version <= version, bool) +assert_type(version > version, bool) +assert_type(version >= "1.8.0", bool) diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/linalg.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/linalg.pyi new file mode 100644 index 0000000..417fb0d --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/linalg.pyi @@ -0,0 +1,132 @@ +from typing import Any, assert_type + +import numpy as np +import numpy.typing as npt +from numpy.linalg._linalg import ( + EighResult, + EigResult, + QRResult, + SlogdetResult, + SVDResult, +) + +AR_i8: npt.NDArray[np.int64] +AR_f8: npt.NDArray[np.float64] +AR_c16: npt.NDArray[np.complex128] +AR_O: npt.NDArray[np.object_] +AR_m: npt.NDArray[np.timedelta64] +AR_S: npt.NDArray[np.str_] +AR_b: npt.NDArray[np.bool] + +assert_type(np.linalg.tensorsolve(AR_i8, AR_i8), npt.NDArray[np.float64]) +assert_type(np.linalg.tensorsolve(AR_i8, AR_f8), npt.NDArray[np.floating]) +assert_type(np.linalg.tensorsolve(AR_c16, AR_f8), npt.NDArray[np.complexfloating]) + +assert_type(np.linalg.solve(AR_i8, AR_i8), npt.NDArray[np.float64]) +assert_type(np.linalg.solve(AR_i8, AR_f8), npt.NDArray[np.floating]) +assert_type(np.linalg.solve(AR_c16, AR_f8), npt.NDArray[np.complexfloating]) + +assert_type(np.linalg.tensorinv(AR_i8), npt.NDArray[np.float64]) +assert_type(np.linalg.tensorinv(AR_f8), npt.NDArray[np.floating]) +assert_type(np.linalg.tensorinv(AR_c16), npt.NDArray[np.complexfloating]) + +assert_type(np.linalg.inv(AR_i8), npt.NDArray[np.float64]) +assert_type(np.linalg.inv(AR_f8), npt.NDArray[np.floating]) +assert_type(np.linalg.inv(AR_c16), npt.NDArray[np.complexfloating]) + +assert_type(np.linalg.matrix_power(AR_i8, -1), npt.NDArray[Any]) +assert_type(np.linalg.matrix_power(AR_f8, 0), npt.NDArray[Any]) +assert_type(np.linalg.matrix_power(AR_c16, 1), npt.NDArray[Any]) +assert_type(np.linalg.matrix_power(AR_O, 2), npt.NDArray[Any]) + +assert_type(np.linalg.cholesky(AR_i8), npt.NDArray[np.float64]) +assert_type(np.linalg.cholesky(AR_f8), npt.NDArray[np.floating]) +assert_type(np.linalg.cholesky(AR_c16), npt.NDArray[np.complexfloating]) + +assert_type(np.linalg.outer(AR_i8, AR_i8), npt.NDArray[np.signedinteger]) +assert_type(np.linalg.outer(AR_f8, AR_f8), npt.NDArray[np.floating]) +assert_type(np.linalg.outer(AR_c16, AR_c16), npt.NDArray[np.complexfloating]) +assert_type(np.linalg.outer(AR_b, AR_b), npt.NDArray[np.bool]) +assert_type(np.linalg.outer(AR_O, AR_O), npt.NDArray[np.object_]) +assert_type(np.linalg.outer(AR_i8, AR_m), npt.NDArray[np.timedelta64]) + +assert_type(np.linalg.qr(AR_i8), QRResult) +assert_type(np.linalg.qr(AR_f8), QRResult) +assert_type(np.linalg.qr(AR_c16), QRResult) + +assert_type(np.linalg.eigvals(AR_i8), npt.NDArray[np.float64] | npt.NDArray[np.complex128]) +assert_type(np.linalg.eigvals(AR_f8), npt.NDArray[np.floating] | npt.NDArray[np.complexfloating]) +assert_type(np.linalg.eigvals(AR_c16), npt.NDArray[np.complexfloating]) + +assert_type(np.linalg.eigvalsh(AR_i8), npt.NDArray[np.float64]) +assert_type(np.linalg.eigvalsh(AR_f8), npt.NDArray[np.floating]) +assert_type(np.linalg.eigvalsh(AR_c16), npt.NDArray[np.floating]) + +assert_type(np.linalg.eig(AR_i8), EigResult) +assert_type(np.linalg.eig(AR_f8), EigResult) +assert_type(np.linalg.eig(AR_c16), EigResult) + +assert_type(np.linalg.eigh(AR_i8), EighResult) +assert_type(np.linalg.eigh(AR_f8), EighResult) +assert_type(np.linalg.eigh(AR_c16), EighResult) + +assert_type(np.linalg.svd(AR_i8), SVDResult) +assert_type(np.linalg.svd(AR_f8), SVDResult) +assert_type(np.linalg.svd(AR_c16), SVDResult) +assert_type(np.linalg.svd(AR_i8, compute_uv=False), npt.NDArray[np.float64]) +assert_type(np.linalg.svd(AR_f8, compute_uv=False), npt.NDArray[np.floating]) +assert_type(np.linalg.svd(AR_c16, compute_uv=False), npt.NDArray[np.floating]) + +assert_type(np.linalg.cond(AR_i8), Any) +assert_type(np.linalg.cond(AR_f8), Any) +assert_type(np.linalg.cond(AR_c16), Any) + +assert_type(np.linalg.matrix_rank(AR_i8), Any) +assert_type(np.linalg.matrix_rank(AR_f8), Any) +assert_type(np.linalg.matrix_rank(AR_c16), Any) + +assert_type(np.linalg.pinv(AR_i8), npt.NDArray[np.float64]) +assert_type(np.linalg.pinv(AR_f8), npt.NDArray[np.floating]) +assert_type(np.linalg.pinv(AR_c16), npt.NDArray[np.complexfloating]) + +assert_type(np.linalg.slogdet(AR_i8), SlogdetResult) +assert_type(np.linalg.slogdet(AR_f8), SlogdetResult) +assert_type(np.linalg.slogdet(AR_c16), SlogdetResult) + +assert_type(np.linalg.det(AR_i8), Any) +assert_type(np.linalg.det(AR_f8), Any) +assert_type(np.linalg.det(AR_c16), Any) + +assert_type(np.linalg.lstsq(AR_i8, AR_i8), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64], np.int32, npt.NDArray[np.float64]]) +assert_type(np.linalg.lstsq(AR_i8, AR_f8), tuple[npt.NDArray[np.floating], npt.NDArray[np.floating], np.int32, npt.NDArray[np.floating]]) +assert_type(np.linalg.lstsq(AR_f8, AR_c16), tuple[npt.NDArray[np.complexfloating], npt.NDArray[np.floating], np.int32, npt.NDArray[np.floating]]) + +assert_type(np.linalg.norm(AR_i8), np.floating) +assert_type(np.linalg.norm(AR_f8), np.floating) +assert_type(np.linalg.norm(AR_c16), np.floating) +assert_type(np.linalg.norm(AR_S), np.floating) +assert_type(np.linalg.norm(AR_f8, axis=0), Any) + +assert_type(np.linalg.matrix_norm(AR_i8), np.floating) +assert_type(np.linalg.matrix_norm(AR_f8), np.floating) +assert_type(np.linalg.matrix_norm(AR_c16), np.floating) +assert_type(np.linalg.matrix_norm(AR_S), np.floating) + +assert_type(np.linalg.vector_norm(AR_i8), np.floating) +assert_type(np.linalg.vector_norm(AR_f8), np.floating) +assert_type(np.linalg.vector_norm(AR_c16), np.floating) +assert_type(np.linalg.vector_norm(AR_S), np.floating) + +assert_type(np.linalg.multi_dot([AR_i8, AR_i8]), Any) +assert_type(np.linalg.multi_dot([AR_i8, AR_f8]), Any) +assert_type(np.linalg.multi_dot([AR_f8, AR_c16]), Any) +assert_type(np.linalg.multi_dot([AR_O, AR_O]), Any) +assert_type(np.linalg.multi_dot([AR_m, AR_m]), Any) + +assert_type(np.linalg.cross(AR_i8, AR_i8), npt.NDArray[np.signedinteger]) +assert_type(np.linalg.cross(AR_f8, AR_f8), npt.NDArray[np.floating]) +assert_type(np.linalg.cross(AR_c16, AR_c16), npt.NDArray[np.complexfloating]) + +assert_type(np.linalg.matmul(AR_i8, AR_i8), npt.NDArray[np.signedinteger]) +assert_type(np.linalg.matmul(AR_f8, AR_f8), npt.NDArray[np.floating]) +assert_type(np.linalg.matmul(AR_c16, AR_c16), npt.NDArray[np.complexfloating]) diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/ma.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/ma.pyi new file mode 100644 index 0000000..2c65534 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/ma.pyi @@ -0,0 +1,369 @@ +from typing import Any, Literal, TypeAlias, TypeVar, assert_type + +import numpy as np +from numpy import dtype, generic +from numpy._typing import NDArray, _AnyShape + +_ScalarT = TypeVar("_ScalarT", bound=generic) +MaskedArray: TypeAlias = np.ma.MaskedArray[_AnyShape, dtype[_ScalarT]] +_Array1D: TypeAlias = np.ndarray[tuple[int], np.dtype[_ScalarT]] + +class MaskedArraySubclass(MaskedArray[np.complex128]): ... + +AR_b: NDArray[np.bool] +AR_f4: NDArray[np.float32] +AR_dt64: NDArray[np.datetime64] +AR_td64: NDArray[np.timedelta64] +AR_o: NDArray[np.timedelta64] + +MAR_c16: MaskedArray[np.complex128] +MAR_b: MaskedArray[np.bool] +MAR_f4: MaskedArray[np.float32] +MAR_f8: MaskedArray[np.float64] +MAR_i8: MaskedArray[np.int64] +MAR_dt64: MaskedArray[np.datetime64] +MAR_td64: MaskedArray[np.timedelta64] +MAR_o: MaskedArray[np.object_] +MAR_s: MaskedArray[np.str_] +MAR_byte: MaskedArray[np.bytes_] +MAR_V: MaskedArray[np.void] + +MAR_subclass: MaskedArraySubclass + +MAR_1d: np.ma.MaskedArray[tuple[int], np.dtype] +MAR_2d_f4: np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]] + +b: np.bool +f4: np.float32 +f: float + +assert_type(MAR_1d.shape, tuple[int]) + +assert_type(MAR_f4.dtype, np.dtype[np.float32]) + +assert_type(int(MAR_i8), int) +assert_type(float(MAR_f4), float) + +assert_type(np.ma.min(MAR_b), np.bool) +assert_type(np.ma.min(MAR_f4), np.float32) +assert_type(np.ma.min(MAR_b, axis=0), Any) +assert_type(np.ma.min(MAR_f4, axis=0), Any) +assert_type(np.ma.min(MAR_b, keepdims=True), Any) +assert_type(np.ma.min(MAR_f4, keepdims=True), Any) +assert_type(np.ma.min(MAR_f4, out=MAR_subclass), MaskedArraySubclass) +assert_type(np.ma.min(MAR_f4, 0, MAR_subclass), MaskedArraySubclass) +assert_type(np.ma.min(MAR_f4, None, MAR_subclass), MaskedArraySubclass) + +assert_type(MAR_b.min(), np.bool) +assert_type(MAR_f4.min(), np.float32) +assert_type(MAR_b.min(axis=0), Any) +assert_type(MAR_f4.min(axis=0), Any) +assert_type(MAR_b.min(keepdims=True), Any) +assert_type(MAR_f4.min(keepdims=True), Any) +assert_type(MAR_f4.min(out=MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f4.min(0, MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f4.min(None, MAR_subclass), MaskedArraySubclass) + +assert_type(np.ma.max(MAR_b), np.bool) +assert_type(np.ma.max(MAR_f4), np.float32) +assert_type(np.ma.max(MAR_b, axis=0), Any) +assert_type(np.ma.max(MAR_f4, axis=0), Any) +assert_type(np.ma.max(MAR_b, keepdims=True), Any) +assert_type(np.ma.max(MAR_f4, keepdims=True), Any) +assert_type(np.ma.max(MAR_f4, out=MAR_subclass), MaskedArraySubclass) +assert_type(np.ma.max(MAR_f4, 0, MAR_subclass), MaskedArraySubclass) +assert_type(np.ma.max(MAR_f4, None, MAR_subclass), MaskedArraySubclass) + +assert_type(MAR_b.max(), np.bool) +assert_type(MAR_f4.max(), np.float32) +assert_type(MAR_b.max(axis=0), Any) +assert_type(MAR_f4.max(axis=0), Any) +assert_type(MAR_b.max(keepdims=True), Any) +assert_type(MAR_f4.max(keepdims=True), Any) +assert_type(MAR_f4.max(out=MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f4.max(0, MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f4.max(None, MAR_subclass), MaskedArraySubclass) + +assert_type(np.ma.ptp(MAR_b), np.bool) +assert_type(np.ma.ptp(MAR_f4), np.float32) +assert_type(np.ma.ptp(MAR_b, axis=0), Any) +assert_type(np.ma.ptp(MAR_f4, axis=0), Any) +assert_type(np.ma.ptp(MAR_b, keepdims=True), Any) +assert_type(np.ma.ptp(MAR_f4, keepdims=True), Any) +assert_type(np.ma.ptp(MAR_f4, out=MAR_subclass), MaskedArraySubclass) +assert_type(np.ma.ptp(MAR_f4, 0, MAR_subclass), MaskedArraySubclass) +assert_type(np.ma.ptp(MAR_f4, None, MAR_subclass), MaskedArraySubclass) + +assert_type(MAR_b.ptp(), np.bool) +assert_type(MAR_f4.ptp(), np.float32) +assert_type(MAR_b.ptp(axis=0), Any) +assert_type(MAR_f4.ptp(axis=0), Any) +assert_type(MAR_b.ptp(keepdims=True), Any) +assert_type(MAR_f4.ptp(keepdims=True), Any) +assert_type(MAR_f4.ptp(out=MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f4.ptp(0, MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f4.ptp(None, MAR_subclass), MaskedArraySubclass) + +assert_type(MAR_b.argmin(), np.intp) +assert_type(MAR_f4.argmin(), np.intp) +assert_type(MAR_f4.argmax(fill_value=6.28318, keepdims=False), np.intp) +assert_type(MAR_b.argmin(axis=0), Any) +assert_type(MAR_f4.argmin(axis=0), Any) +assert_type(MAR_b.argmin(keepdims=True), Any) +assert_type(MAR_f4.argmin(out=MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f4.argmin(None, None, out=MAR_subclass), MaskedArraySubclass) + +assert_type(np.ma.argmin(MAR_b), np.intp) +assert_type(np.ma.argmin(MAR_f4), np.intp) +assert_type(np.ma.argmin(MAR_f4, fill_value=6.28318, keepdims=False), np.intp) +assert_type(np.ma.argmin(MAR_b, axis=0), Any) +assert_type(np.ma.argmin(MAR_f4, axis=0), Any) +assert_type(np.ma.argmin(MAR_b, keepdims=True), Any) +assert_type(np.ma.argmin(MAR_f4, out=MAR_subclass), MaskedArraySubclass) +assert_type(np.ma.argmin(MAR_f4, None, None, out=MAR_subclass), MaskedArraySubclass) + +assert_type(MAR_b.argmax(), np.intp) +assert_type(MAR_f4.argmax(), np.intp) +assert_type(MAR_f4.argmax(fill_value=6.28318, keepdims=False), np.intp) +assert_type(MAR_b.argmax(axis=0), Any) +assert_type(MAR_f4.argmax(axis=0), Any) +assert_type(MAR_b.argmax(keepdims=True), Any) +assert_type(MAR_f4.argmax(out=MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f4.argmax(None, None, out=MAR_subclass), MaskedArraySubclass) + +assert_type(np.ma.argmax(MAR_b), np.intp) +assert_type(np.ma.argmax(MAR_f4), np.intp) +assert_type(np.ma.argmax(MAR_f4, fill_value=6.28318, keepdims=False), np.intp) +assert_type(np.ma.argmax(MAR_b, axis=0), Any) +assert_type(np.ma.argmax(MAR_f4, axis=0), Any) +assert_type(np.ma.argmax(MAR_b, keepdims=True), Any) +assert_type(np.ma.argmax(MAR_f4, out=MAR_subclass), MaskedArraySubclass) +assert_type(np.ma.argmax(MAR_f4, None, None, out=MAR_subclass), MaskedArraySubclass) + +assert_type(MAR_b.all(), np.bool) +assert_type(MAR_f4.all(), np.bool) +assert_type(MAR_f4.all(keepdims=False), np.bool) +assert_type(MAR_b.all(axis=0), np.bool | MaskedArray[np.bool]) +assert_type(MAR_b.all(axis=0, keepdims=True), MaskedArray[np.bool]) +assert_type(MAR_b.all(0, None, True), MaskedArray[np.bool]) +assert_type(MAR_f4.all(axis=0), np.bool | MaskedArray[np.bool]) +assert_type(MAR_b.all(keepdims=True), MaskedArray[np.bool]) +assert_type(MAR_f4.all(out=MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f4.all(None, out=MAR_subclass), MaskedArraySubclass) + +assert_type(MAR_b.any(), np.bool) +assert_type(MAR_f4.any(), np.bool) +assert_type(MAR_f4.any(keepdims=False), np.bool) +assert_type(MAR_b.any(axis=0), np.bool | MaskedArray[np.bool]) +assert_type(MAR_b.any(axis=0, keepdims=True), MaskedArray[np.bool]) +assert_type(MAR_b.any(0, None, True), MaskedArray[np.bool]) +assert_type(MAR_f4.any(axis=0), np.bool | MaskedArray[np.bool]) +assert_type(MAR_b.any(keepdims=True), MaskedArray[np.bool]) +assert_type(MAR_f4.any(out=MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f4.any(None, out=MAR_subclass), MaskedArraySubclass) + +assert_type(MAR_f4.sort(), None) +assert_type(MAR_f4.sort(axis=0, kind='quicksort', order='K', endwith=False, fill_value=42., stable=False), None) + +assert_type(np.ma.sort(MAR_f4), MaskedArray[np.float32]) +assert_type(np.ma.sort(MAR_subclass), MaskedArraySubclass) +assert_type(np.ma.sort([[0, 1], [2, 3]]), NDArray[Any]) +assert_type(np.ma.sort(AR_f4), NDArray[np.float32]) + +assert_type(MAR_f8.take(0), np.float64) +assert_type(MAR_1d.take(0), Any) +assert_type(MAR_f8.take([0]), MaskedArray[np.float64]) +assert_type(MAR_f8.take(0, out=MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f8.take([0], out=MAR_subclass), MaskedArraySubclass) + +assert_type(np.ma.take(f, 0), Any) +assert_type(np.ma.take(f4, 0), np.float32) +assert_type(np.ma.take(MAR_f8, 0), np.float64) +assert_type(np.ma.take(AR_f4, 0), np.float32) +assert_type(np.ma.take(MAR_1d, 0), Any) +assert_type(np.ma.take(MAR_f8, [0]), MaskedArray[np.float64]) +assert_type(np.ma.take(AR_f4, [0]), MaskedArray[np.float32]) +assert_type(np.ma.take(MAR_f8, 0, out=MAR_subclass), MaskedArraySubclass) +assert_type(np.ma.take(MAR_f8, [0], out=MAR_subclass), MaskedArraySubclass) +assert_type(np.ma.take([1], [0]), MaskedArray[Any]) +assert_type(np.ma.take(np.eye(2), 1, axis=0), MaskedArray[np.float64]) + +assert_type(MAR_f4.partition(1), None) +assert_type(MAR_V.partition(1, axis=0, kind='introselect', order='K'), None) + +assert_type(MAR_f4.argpartition(1), MaskedArray[np.intp]) +assert_type(MAR_1d.argpartition(1, axis=0, kind='introselect', order='K'), MaskedArray[np.intp]) + +assert_type(np.ma.ndim(f4), int) +assert_type(np.ma.ndim(MAR_b), int) +assert_type(np.ma.ndim(AR_f4), int) + +assert_type(np.ma.size(b), int) +assert_type(np.ma.size(MAR_f4, axis=0), int) +assert_type(np.ma.size(AR_f4), int) + +assert_type(np.ma.is_masked(MAR_f4), bool) + +assert_type(MAR_f4.ids(), tuple[int, int]) + +assert_type(MAR_f4.iscontiguous(), bool) + +assert_type(MAR_f4 >= 3, MaskedArray[np.bool]) +assert_type(MAR_i8 >= AR_td64, MaskedArray[np.bool]) +assert_type(MAR_b >= AR_td64, MaskedArray[np.bool]) +assert_type(MAR_td64 >= AR_td64, MaskedArray[np.bool]) +assert_type(MAR_dt64 >= AR_dt64, MaskedArray[np.bool]) +assert_type(MAR_o >= AR_o, MaskedArray[np.bool]) +assert_type(MAR_1d >= 0, MaskedArray[np.bool]) +assert_type(MAR_s >= MAR_s, MaskedArray[np.bool]) +assert_type(MAR_byte >= MAR_byte, MaskedArray[np.bool]) + +assert_type(MAR_f4 > 3, MaskedArray[np.bool]) +assert_type(MAR_i8 > AR_td64, MaskedArray[np.bool]) +assert_type(MAR_b > AR_td64, MaskedArray[np.bool]) +assert_type(MAR_td64 > AR_td64, MaskedArray[np.bool]) +assert_type(MAR_dt64 > AR_dt64, MaskedArray[np.bool]) +assert_type(MAR_o > AR_o, MaskedArray[np.bool]) +assert_type(MAR_1d > 0, MaskedArray[np.bool]) +assert_type(MAR_s > MAR_s, MaskedArray[np.bool]) +assert_type(MAR_byte > MAR_byte, MaskedArray[np.bool]) + +assert_type(MAR_f4 <= 3, MaskedArray[np.bool]) +assert_type(MAR_i8 <= AR_td64, MaskedArray[np.bool]) +assert_type(MAR_b <= AR_td64, MaskedArray[np.bool]) +assert_type(MAR_td64 <= AR_td64, MaskedArray[np.bool]) +assert_type(MAR_dt64 <= AR_dt64, MaskedArray[np.bool]) +assert_type(MAR_o <= AR_o, MaskedArray[np.bool]) +assert_type(MAR_1d <= 0, MaskedArray[np.bool]) +assert_type(MAR_s <= MAR_s, MaskedArray[np.bool]) +assert_type(MAR_byte <= MAR_byte, MaskedArray[np.bool]) + +assert_type(MAR_f4 < 3, MaskedArray[np.bool]) +assert_type(MAR_i8 < AR_td64, MaskedArray[np.bool]) +assert_type(MAR_b < AR_td64, MaskedArray[np.bool]) +assert_type(MAR_td64 < AR_td64, MaskedArray[np.bool]) +assert_type(MAR_dt64 < AR_dt64, MaskedArray[np.bool]) +assert_type(MAR_o < AR_o, MaskedArray[np.bool]) +assert_type(MAR_1d < 0, MaskedArray[np.bool]) +assert_type(MAR_s < MAR_s, MaskedArray[np.bool]) +assert_type(MAR_byte < MAR_byte, MaskedArray[np.bool]) + +assert_type(MAR_f4 <= 3, MaskedArray[np.bool]) +assert_type(MAR_i8 <= AR_td64, MaskedArray[np.bool]) +assert_type(MAR_b <= AR_td64, MaskedArray[np.bool]) +assert_type(MAR_td64 <= AR_td64, MaskedArray[np.bool]) +assert_type(MAR_dt64 <= AR_dt64, MaskedArray[np.bool]) +assert_type(MAR_o <= AR_o, MaskedArray[np.bool]) +assert_type(MAR_1d <= 0, MaskedArray[np.bool]) +assert_type(MAR_s <= MAR_s, MaskedArray[np.bool]) +assert_type(MAR_byte <= MAR_byte, MaskedArray[np.bool]) + +assert_type(MAR_byte.count(), int) +assert_type(MAR_f4.count(axis=None), int) +assert_type(MAR_f4.count(axis=0), NDArray[np.int_]) +assert_type(MAR_b.count(axis=(0,1)), NDArray[np.int_]) +assert_type(MAR_o.count(keepdims=True), NDArray[np.int_]) +assert_type(MAR_o.count(axis=None, keepdims=True), NDArray[np.int_]) +assert_type(MAR_o.count(None, True), NDArray[np.int_]) + +assert_type(np.ma.count(MAR_byte), int) +assert_type(np.ma.count(MAR_byte, axis=None), int) +assert_type(np.ma.count(MAR_f4, axis=0), NDArray[np.int_]) +assert_type(np.ma.count(MAR_b, axis=(0,1)), NDArray[np.int_]) +assert_type(np.ma.count(MAR_o, keepdims=True), NDArray[np.int_]) +assert_type(np.ma.count(MAR_o, axis=None, keepdims=True), NDArray[np.int_]) +assert_type(np.ma.count(MAR_o, None, True), NDArray[np.int_]) + +assert_type(MAR_f4.compressed(), np.ndarray[tuple[int], np.dtype[np.float32]]) + +assert_type(np.ma.compressed(MAR_i8), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(np.ma.compressed([[1,2,3]]), np.ndarray[tuple[int], np.dtype]) + +assert_type(MAR_f4.put([0,4,8], [10,20,30]), None) +assert_type(MAR_f4.put(4, 999), None) +assert_type(MAR_f4.put(4, 999, mode='clip'), None) + +assert_type(np.ma.put(MAR_f4, [0,4,8], [10,20,30]), None) +assert_type(np.ma.put(MAR_f4, 4, 999), None) +assert_type(np.ma.put(MAR_f4, 4, 999, mode='clip'), None) + +assert_type(np.ma.putmask(MAR_f4, [True, False], [0, 1]), None) +assert_type(np.ma.putmask(MAR_f4, np.False_, [0, 1]), None) + +assert_type(MAR_f4.filled(float('nan')), NDArray[np.float32]) +assert_type(MAR_i8.filled(), NDArray[np.int64]) +assert_type(MAR_1d.filled(), np.ndarray[tuple[int], np.dtype]) + +assert_type(np.ma.filled(MAR_f4, float('nan')), NDArray[np.float32]) +assert_type(np.ma.filled([[1,2,3]]), NDArray[Any]) +# PyRight detects this one correctly, but mypy doesn't. +# https://github.com/numpy/numpy/pull/28742#discussion_r2048968375 +assert_type(np.ma.filled(MAR_1d), np.ndarray[tuple[int], np.dtype]) # type: ignore[assert-type] + +assert_type(MAR_b.repeat(3), np.ma.MaskedArray[tuple[int], np.dtype[np.bool]]) +assert_type(MAR_2d_f4.repeat(MAR_i8), np.ma.MaskedArray[tuple[int], np.dtype[np.float32]]) +assert_type(MAR_2d_f4.repeat(MAR_i8, axis=None), np.ma.MaskedArray[tuple[int], np.dtype[np.float32]]) +assert_type(MAR_2d_f4.repeat(MAR_i8, axis=0), MaskedArray[np.float32]) + +assert_type(np.ma.allequal(AR_f4, MAR_f4), bool) +assert_type(np.ma.allequal(AR_f4, MAR_f4, fill_value=False), bool) + +assert_type(np.ma.allclose(AR_f4, MAR_f4), bool) +assert_type(np.ma.allclose(AR_f4, MAR_f4, masked_equal=False), bool) +assert_type(np.ma.allclose(AR_f4, MAR_f4, rtol=.4, atol=.3), bool) + +assert_type(MAR_2d_f4.ravel(), np.ma.MaskedArray[tuple[int], np.dtype[np.float32]]) +assert_type(MAR_1d.ravel(order='A'), np.ma.MaskedArray[tuple[int], np.dtype[Any]]) + +assert_type(np.ma.getmask(MAR_f4), NDArray[np.bool] | np.bool) +# PyRight detects this one correctly, but mypy doesn't: +# `Revealed type is "Union[numpy.ndarray[Any, Any], numpy.bool[Any]]"` +assert_type(np.ma.getmask(MAR_1d), np.ndarray[tuple[int], np.dtype[np.bool]] | np.bool) # type: ignore[assert-type] +assert_type(np.ma.getmask(MAR_2d_f4), np.ndarray[tuple[int, int], np.dtype[np.bool]] | np.bool) +assert_type(np.ma.getmask([1,2]), NDArray[np.bool] | np.bool) +assert_type(np.ma.getmask(np.int64(1)), np.bool) + +assert_type(np.ma.is_mask(MAR_1d), bool) +assert_type(np.ma.is_mask(AR_b), bool) + +def func(x: object) -> None: + if np.ma.is_mask(x): + assert_type(x, NDArray[np.bool]) + else: + assert_type(x, object) + +assert_type(MAR_2d_f4.mT, np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]]) + +assert_type(MAR_c16.real, MaskedArray[np.float64]) +assert_type(MAR_c16.imag, MaskedArray[np.float64]) + +assert_type(MAR_2d_f4.baseclass, type[NDArray[Any]]) + +assert_type(MAR_b.swapaxes(0, 1), MaskedArray[np.bool]) +assert_type(MAR_2d_f4.swapaxes(1, 0), MaskedArray[np.float32]) + +assert_type(np.ma.nomask, np.bool[Literal[False]]) +assert_type(np.ma.MaskType, type[np.bool]) + +assert_type(MAR_1d.__setmask__([True, False]), None) +assert_type(MAR_1d.__setmask__(np.False_), None) + +assert_type(MAR_2d_f4.harden_mask(), np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]]) +assert_type(MAR_i8.harden_mask(), MaskedArray[np.int64]) +assert_type(MAR_2d_f4.soften_mask(), np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]]) +assert_type(MAR_i8.soften_mask(), MaskedArray[np.int64]) +assert_type(MAR_f4.unshare_mask(), MaskedArray[np.float32]) +assert_type(MAR_b.shrink_mask(), MaskedArray[np.bool_]) + +assert_type(MAR_i8.hardmask, bool) +assert_type(MAR_i8.sharedmask, bool) + +assert_type(MAR_b.transpose(), MaskedArray[np.bool]) +assert_type(MAR_2d_f4.transpose(), np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]]) +assert_type(MAR_2d_f4.transpose(1, 0), np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]]) +assert_type(MAR_2d_f4.transpose((1, 0)), np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]]) +assert_type(MAR_b.T, MaskedArray[np.bool]) +assert_type(MAR_2d_f4.T, np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]]) + +assert_type(MAR_2d_f4.nonzero(), tuple[_Array1D[np.intp], *tuple[_Array1D[np.intp], ...]]) +assert_type(MAR_2d_f4.nonzero()[0], _Array1D[np.intp]) diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/matrix.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/matrix.pyi new file mode 100644 index 0000000..1a7285d --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/matrix.pyi @@ -0,0 +1,73 @@ +from typing import Any, TypeAlias, assert_type + +import numpy as np +import numpy.typing as npt + +_Shape2D: TypeAlias = tuple[int, int] + +mat: np.matrix[_Shape2D, np.dtype[np.int64]] +ar_f8: npt.NDArray[np.float64] +ar_ip: npt.NDArray[np.intp] + +assert_type(mat * 5, np.matrix[_Shape2D, Any]) +assert_type(5 * mat, np.matrix[_Shape2D, Any]) +mat *= 5 + +assert_type(mat**5, np.matrix[_Shape2D, Any]) +mat **= 5 + +assert_type(mat.sum(), Any) +assert_type(mat.mean(), Any) +assert_type(mat.std(), Any) +assert_type(mat.var(), Any) +assert_type(mat.prod(), Any) +assert_type(mat.any(), np.bool) +assert_type(mat.all(), np.bool) +assert_type(mat.max(), np.int64) +assert_type(mat.min(), np.int64) +assert_type(mat.argmax(), np.intp) +assert_type(mat.argmin(), np.intp) +assert_type(mat.ptp(), np.int64) + +assert_type(mat.sum(axis=0), np.matrix[_Shape2D, Any]) +assert_type(mat.mean(axis=0), np.matrix[_Shape2D, Any]) +assert_type(mat.std(axis=0), np.matrix[_Shape2D, Any]) +assert_type(mat.var(axis=0), np.matrix[_Shape2D, Any]) +assert_type(mat.prod(axis=0), np.matrix[_Shape2D, Any]) +assert_type(mat.any(axis=0), np.matrix[_Shape2D, np.dtype[np.bool]]) +assert_type(mat.all(axis=0), np.matrix[_Shape2D, np.dtype[np.bool]]) +assert_type(mat.max(axis=0), np.matrix[_Shape2D, np.dtype[np.int64]]) +assert_type(mat.min(axis=0), np.matrix[_Shape2D, np.dtype[np.int64]]) +assert_type(mat.argmax(axis=0), np.matrix[_Shape2D, np.dtype[np.intp]]) +assert_type(mat.argmin(axis=0), np.matrix[_Shape2D, np.dtype[np.intp]]) +assert_type(mat.ptp(axis=0), np.matrix[_Shape2D, np.dtype[np.int64]]) + +assert_type(mat.sum(out=ar_f8), npt.NDArray[np.float64]) +assert_type(mat.mean(out=ar_f8), npt.NDArray[np.float64]) +assert_type(mat.std(out=ar_f8), npt.NDArray[np.float64]) +assert_type(mat.var(out=ar_f8), npt.NDArray[np.float64]) +assert_type(mat.prod(out=ar_f8), npt.NDArray[np.float64]) +assert_type(mat.any(out=ar_f8), npt.NDArray[np.float64]) +assert_type(mat.all(out=ar_f8), npt.NDArray[np.float64]) +assert_type(mat.max(out=ar_f8), npt.NDArray[np.float64]) +assert_type(mat.min(out=ar_f8), npt.NDArray[np.float64]) +assert_type(mat.argmax(out=ar_ip), npt.NDArray[np.intp]) +assert_type(mat.argmin(out=ar_ip), npt.NDArray[np.intp]) +assert_type(mat.ptp(out=ar_f8), npt.NDArray[np.float64]) + +assert_type(mat.T, np.matrix[_Shape2D, np.dtype[np.int64]]) +assert_type(mat.I, np.matrix[_Shape2D, Any]) +assert_type(mat.A, np.ndarray[_Shape2D, np.dtype[np.int64]]) +assert_type(mat.A1, npt.NDArray[np.int64]) +assert_type(mat.H, np.matrix[_Shape2D, np.dtype[np.int64]]) +assert_type(mat.getT(), np.matrix[_Shape2D, np.dtype[np.int64]]) +assert_type(mat.getI(), np.matrix[_Shape2D, Any]) +assert_type(mat.getA(), np.ndarray[_Shape2D, np.dtype[np.int64]]) +assert_type(mat.getA1(), npt.NDArray[np.int64]) +assert_type(mat.getH(), np.matrix[_Shape2D, np.dtype[np.int64]]) + +assert_type(np.bmat(ar_f8), np.matrix[_Shape2D, Any]) +assert_type(np.bmat([[0, 1, 2]]), np.matrix[_Shape2D, Any]) +assert_type(np.bmat("mat"), np.matrix[_Shape2D, Any]) + +assert_type(np.asmatrix(ar_f8, dtype=np.int64), np.matrix[_Shape2D, Any]) diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/memmap.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/memmap.pyi new file mode 100644 index 0000000..f3e20ed --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/memmap.pyi @@ -0,0 +1,19 @@ +from typing import Any, assert_type + +import numpy as np + +memmap_obj: np.memmap[Any, np.dtype[np.str_]] + +assert_type(np.memmap.__array_priority__, float) +assert_type(memmap_obj.__array_priority__, float) +assert_type(memmap_obj.filename, str | None) +assert_type(memmap_obj.offset, int) +assert_type(memmap_obj.mode, str) +assert_type(memmap_obj.flush(), None) + +assert_type(np.memmap("file.txt", offset=5), np.memmap[Any, np.dtype[np.uint8]]) +assert_type(np.memmap(b"file.txt", dtype=np.float64, shape=(10, 3)), np.memmap[Any, np.dtype[np.float64]]) +with open("file.txt", "rb") as f: + assert_type(np.memmap(f, dtype=float, order="K"), np.memmap[Any, np.dtype]) + +assert_type(memmap_obj.__array_finalize__(object()), None) diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/mod.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/mod.pyi new file mode 100644 index 0000000..59a6a10 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/mod.pyi @@ -0,0 +1,180 @@ +import datetime as dt +from typing import Literal as L +from typing import assert_type + +import numpy as np +import numpy.typing as npt +from numpy._typing import _64Bit + +f8: np.float64 +i8: np.int64 +u8: np.uint64 + +f4: np.float32 +i4: np.int32 +u4: np.uint32 + +m: np.timedelta64 +m_nat: np.timedelta64[None] +m_int0: np.timedelta64[L[0]] +m_int: np.timedelta64[int] +m_td: np.timedelta64[dt.timedelta] + +b_: np.bool + +b: bool +i: int +f: float + +AR_b: npt.NDArray[np.bool] +AR_m: npt.NDArray[np.timedelta64] + +# Time structures + +assert_type(m % m, np.timedelta64) +assert_type(m % m_nat, np.timedelta64[None]) +assert_type(m % m_int0, np.timedelta64[None]) +assert_type(m % m_int, np.timedelta64[int | None]) +assert_type(m_nat % m, np.timedelta64[None]) +assert_type(m_int % m_nat, np.timedelta64[None]) +assert_type(m_int % m_int0, np.timedelta64[None]) +assert_type(m_int % m_int, np.timedelta64[int | None]) +assert_type(m_int % m_td, np.timedelta64[int | None]) +assert_type(m_td % m_nat, np.timedelta64[None]) +assert_type(m_td % m_int0, np.timedelta64[None]) +assert_type(m_td % m_int, np.timedelta64[int | None]) +assert_type(m_td % m_td, np.timedelta64[dt.timedelta | None]) + +assert_type(AR_m % m, npt.NDArray[np.timedelta64]) +assert_type(m % AR_m, npt.NDArray[np.timedelta64]) + +assert_type(divmod(m, m), tuple[np.int64, np.timedelta64]) +assert_type(divmod(m, m_nat), tuple[np.int64, np.timedelta64[None]]) +assert_type(divmod(m, m_int0), tuple[np.int64, np.timedelta64[None]]) +# workarounds for https://github.com/microsoft/pyright/issues/9663 +assert_type(m.__divmod__(m_int), tuple[np.int64, np.timedelta64[int | None]]) +assert_type(divmod(m_nat, m), tuple[np.int64, np.timedelta64[None]]) +assert_type(divmod(m_int, m_nat), tuple[np.int64, np.timedelta64[None]]) +assert_type(divmod(m_int, m_int0), tuple[np.int64, np.timedelta64[None]]) +assert_type(divmod(m_int, m_int), tuple[np.int64, np.timedelta64[int | None]]) +assert_type(divmod(m_int, m_td), tuple[np.int64, np.timedelta64[int | None]]) +assert_type(divmod(m_td, m_nat), tuple[np.int64, np.timedelta64[None]]) +assert_type(divmod(m_td, m_int0), tuple[np.int64, np.timedelta64[None]]) +assert_type(divmod(m_td, m_int), tuple[np.int64, np.timedelta64[int | None]]) +assert_type(divmod(m_td, m_td), tuple[np.int64, np.timedelta64[dt.timedelta | None]]) + +assert_type(divmod(AR_m, m), tuple[npt.NDArray[np.int64], npt.NDArray[np.timedelta64]]) +assert_type(divmod(m, AR_m), tuple[npt.NDArray[np.int64], npt.NDArray[np.timedelta64]]) + +# Bool + +assert_type(b_ % b, np.int8) +assert_type(b_ % i, np.int_) +assert_type(b_ % f, np.float64) +assert_type(b_ % b_, np.int8) +assert_type(b_ % i8, np.int64) +assert_type(b_ % u8, np.uint64) +assert_type(b_ % f8, np.float64) +assert_type(b_ % AR_b, npt.NDArray[np.int8]) + +assert_type(divmod(b_, b), tuple[np.int8, np.int8]) +assert_type(divmod(b_, b_), tuple[np.int8, np.int8]) +# workarounds for https://github.com/microsoft/pyright/issues/9663 +assert_type(b_.__divmod__(i), tuple[np.int_, np.int_]) +assert_type(b_.__divmod__(f), tuple[np.float64, np.float64]) +assert_type(b_.__divmod__(i8), tuple[np.int64, np.int64]) +assert_type(b_.__divmod__(u8), tuple[np.uint64, np.uint64]) +assert_type(divmod(b_, f8), tuple[np.float64, np.float64]) +assert_type(divmod(b_, AR_b), tuple[npt.NDArray[np.int8], npt.NDArray[np.int8]]) + +assert_type(b % b_, np.int8) +assert_type(i % b_, np.int_) +assert_type(f % b_, np.float64) +assert_type(b_ % b_, np.int8) +assert_type(i8 % b_, np.int64) +assert_type(u8 % b_, np.uint64) +assert_type(f8 % b_, np.float64) +assert_type(AR_b % b_, npt.NDArray[np.int8]) + +assert_type(divmod(b, b_), tuple[np.int8, np.int8]) +assert_type(divmod(i, b_), tuple[np.int_, np.int_]) +assert_type(divmod(f, b_), tuple[np.float64, np.float64]) +assert_type(divmod(b_, b_), tuple[np.int8, np.int8]) +assert_type(divmod(i8, b_), tuple[np.int64, np.int64]) +assert_type(divmod(u8, b_), tuple[np.uint64, np.uint64]) +assert_type(divmod(f8, b_), tuple[np.float64, np.float64]) +assert_type(divmod(AR_b, b_), tuple[npt.NDArray[np.int8], npt.NDArray[np.int8]]) + +# int + +assert_type(i8 % b, np.int64) +assert_type(i8 % i8, np.int64) +assert_type(i8 % f, np.float64 | np.floating[_64Bit]) +assert_type(i8 % f8, np.float64 | np.floating[_64Bit]) +assert_type(i4 % i8, np.int64 | np.int32) +assert_type(i4 % f8, np.float64 | np.float32) +assert_type(i4 % i4, np.int32) +assert_type(i4 % f4, np.float32) +assert_type(i8 % AR_b, npt.NDArray[np.int64]) + +assert_type(divmod(i8, b), tuple[np.int64, np.int64]) +assert_type(divmod(i8, i4), tuple[np.int64, np.int64] | tuple[np.int32, np.int32]) +assert_type(divmod(i8, i8), tuple[np.int64, np.int64]) +# workarounds for https://github.com/microsoft/pyright/issues/9663 +assert_type(i8.__divmod__(f), tuple[np.floating[_64Bit], np.floating[_64Bit]] | tuple[np.float64, np.float64]) +assert_type(i8.__divmod__(f8), tuple[np.floating[_64Bit], np.floating[_64Bit]] | tuple[np.float64, np.float64]) +assert_type(divmod(i8, f4), tuple[np.floating[_64Bit], np.floating[_64Bit]] | tuple[np.float32, np.float32]) +assert_type(divmod(i4, i4), tuple[np.int32, np.int32]) +assert_type(divmod(i4, f4), tuple[np.float32, np.float32]) +assert_type(divmod(i8, AR_b), tuple[npt.NDArray[np.int64], npt.NDArray[np.int64]]) + +assert_type(b % i8, np.int64) +assert_type(f % i8, np.float64 | np.floating[_64Bit]) +assert_type(i8 % i8, np.int64) +assert_type(f8 % i8, np.float64) +assert_type(i8 % i4, np.int64 | np.int32) +assert_type(f8 % i4, np.float64) +assert_type(i4 % i4, np.int32) +assert_type(f4 % i4, np.float32) +assert_type(AR_b % i8, npt.NDArray[np.int64]) + +assert_type(divmod(b, i8), tuple[np.int64, np.int64]) +assert_type(divmod(f, i8), tuple[np.floating[_64Bit], np.floating[_64Bit]] | tuple[np.float64, np.float64]) +assert_type(divmod(i8, i8), tuple[np.int64, np.int64]) +assert_type(divmod(f8, i8), tuple[np.float64, np.float64]) +assert_type(divmod(i4, i8), tuple[np.int64, np.int64] | tuple[np.int32, np.int32]) +assert_type(divmod(i4, i4), tuple[np.int32, np.int32]) +# workarounds for https://github.com/microsoft/pyright/issues/9663 +assert_type(f4.__divmod__(i8), tuple[np.floating[_64Bit], np.floating[_64Bit]] | tuple[np.float32, np.float32]) +assert_type(f4.__divmod__(i4), tuple[np.float32, np.float32]) +assert_type(AR_b.__divmod__(i8), tuple[npt.NDArray[np.int64], npt.NDArray[np.int64]]) + +# float + +assert_type(f8 % b, np.float64) +assert_type(f8 % f, np.float64) +assert_type(i8 % f4, np.floating[_64Bit] | np.float32) +assert_type(f4 % f4, np.float32) +assert_type(f8 % AR_b, npt.NDArray[np.float64]) + +assert_type(divmod(f8, b), tuple[np.float64, np.float64]) +assert_type(divmod(f8, f), tuple[np.float64, np.float64]) +assert_type(divmod(f8, f8), tuple[np.float64, np.float64]) +assert_type(divmod(f8, f4), tuple[np.float64, np.float64]) +assert_type(divmod(f4, f4), tuple[np.float32, np.float32]) +assert_type(divmod(f8, AR_b), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]) + +assert_type(b % f8, np.float64) +assert_type(f % f8, np.float64) # pyright: ignore[reportAssertTypeFailure] # pyright incorrectly infers `builtins.float` +assert_type(f8 % f8, np.float64) +assert_type(f8 % f8, np.float64) +assert_type(f4 % f4, np.float32) +assert_type(AR_b % f8, npt.NDArray[np.float64]) + +assert_type(divmod(b, f8), tuple[np.float64, np.float64]) +assert_type(divmod(f8, f8), tuple[np.float64, np.float64]) +assert_type(divmod(f4, f4), tuple[np.float32, np.float32]) +# workarounds for https://github.com/microsoft/pyright/issues/9663 +assert_type(f8.__rdivmod__(f), tuple[np.float64, np.float64]) +assert_type(f8.__rdivmod__(f4), tuple[np.float64, np.float64]) +assert_type(AR_b.__divmod__(f8), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]) diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/modules.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/modules.pyi new file mode 100644 index 0000000..628fb50 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/modules.pyi @@ -0,0 +1,51 @@ +import types +from typing import assert_type + +import numpy as np +from numpy import f2py + +assert_type(np, types.ModuleType) + +assert_type(np.char, types.ModuleType) +assert_type(np.ctypeslib, types.ModuleType) +assert_type(np.emath, types.ModuleType) +assert_type(np.fft, types.ModuleType) +assert_type(np.lib, types.ModuleType) +assert_type(np.linalg, types.ModuleType) +assert_type(np.ma, types.ModuleType) +assert_type(np.matrixlib, types.ModuleType) +assert_type(np.polynomial, types.ModuleType) +assert_type(np.random, types.ModuleType) +assert_type(np.rec, types.ModuleType) +assert_type(np.testing, types.ModuleType) +assert_type(np.version, types.ModuleType) +assert_type(np.exceptions, types.ModuleType) +assert_type(np.dtypes, types.ModuleType) + +assert_type(np.lib.format, types.ModuleType) +assert_type(np.lib.mixins, types.ModuleType) +assert_type(np.lib.scimath, types.ModuleType) +assert_type(np.lib.stride_tricks, types.ModuleType) +assert_type(np.ma.extras, types.ModuleType) +assert_type(np.polynomial.chebyshev, types.ModuleType) +assert_type(np.polynomial.hermite, types.ModuleType) +assert_type(np.polynomial.hermite_e, types.ModuleType) +assert_type(np.polynomial.laguerre, types.ModuleType) +assert_type(np.polynomial.legendre, types.ModuleType) +assert_type(np.polynomial.polynomial, types.ModuleType) + +assert_type(np.__path__, list[str]) +assert_type(np.__version__, str) +assert_type(np.test, np._pytesttester.PytestTester) +assert_type(np.test.module_name, str) + +assert_type(np.__all__, list[str]) +assert_type(np.char.__all__, list[str]) +assert_type(np.ctypeslib.__all__, list[str]) +assert_type(np.emath.__all__, list[str]) +assert_type(np.lib.__all__, list[str]) +assert_type(np.ma.__all__, list[str]) +assert_type(np.random.__all__, list[str]) +assert_type(np.rec.__all__, list[str]) +assert_type(np.testing.__all__, list[str]) +assert_type(f2py.__all__, list[str]) diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/multiarray.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/multiarray.pyi new file mode 100644 index 0000000..6ba3fcd --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/multiarray.pyi @@ -0,0 +1,194 @@ +import datetime as dt +from typing import Any, Literal, TypeVar, assert_type + +import numpy as np +import numpy.typing as npt + +_ScalarT_co = TypeVar("_ScalarT_co", bound=np.generic, covariant=True) + +class SubClass(npt.NDArray[_ScalarT_co]): ... + +subclass: SubClass[np.float64] + +AR_f8: npt.NDArray[np.float64] +AR_i8: npt.NDArray[np.int64] +AR_u1: npt.NDArray[np.uint8] +AR_m: npt.NDArray[np.timedelta64] +AR_M: npt.NDArray[np.datetime64] + +AR_LIKE_f: list[float] +AR_LIKE_i: list[int] + +m: np.timedelta64 +M: np.datetime64 + +b_f8 = np.broadcast(AR_f8) +b_i8_f8_f8 = np.broadcast(AR_i8, AR_f8, AR_f8) + +nditer_obj: np.nditer + +date_scalar: dt.date +date_seq: list[dt.date] +timedelta_seq: list[dt.timedelta] + +n1: Literal[1] +n2: Literal[2] +n3: Literal[3] + +f8: np.float64 + +def func11(a: int) -> bool: ... +def func21(a: int, b: int) -> int: ... +def func12(a: int) -> tuple[complex, bool]: ... + +assert_type(next(b_f8), tuple[Any, ...]) +assert_type(b_f8.reset(), None) +assert_type(b_f8.index, int) +assert_type(b_f8.iters, tuple[np.flatiter[Any], ...]) +assert_type(b_f8.nd, int) +assert_type(b_f8.ndim, int) +assert_type(b_f8.numiter, int) +assert_type(b_f8.shape, tuple[Any, ...]) +assert_type(b_f8.size, int) + +assert_type(next(b_i8_f8_f8), tuple[Any, ...]) +assert_type(b_i8_f8_f8.reset(), None) +assert_type(b_i8_f8_f8.index, int) +assert_type(b_i8_f8_f8.iters, tuple[np.flatiter[Any], ...]) +assert_type(b_i8_f8_f8.nd, int) +assert_type(b_i8_f8_f8.ndim, int) +assert_type(b_i8_f8_f8.numiter, int) +assert_type(b_i8_f8_f8.shape, tuple[Any, ...]) +assert_type(b_i8_f8_f8.size, int) + +assert_type(np.inner(AR_f8, AR_i8), Any) + +assert_type(np.where([True, True, False]), tuple[npt.NDArray[np.intp], ...]) +assert_type(np.where([True, True, False], 1, 0), npt.NDArray[Any]) + +assert_type(np.lexsort([0, 1, 2]), Any) + +assert_type(np.can_cast(np.dtype("i8"), int), bool) +assert_type(np.can_cast(AR_f8, "f8"), bool) +assert_type(np.can_cast(AR_f8, np.complex128, casting="unsafe"), bool) + +assert_type(np.min_scalar_type([1]), np.dtype) +assert_type(np.min_scalar_type(AR_f8), np.dtype) + +assert_type(np.result_type(int, [1]), np.dtype) +assert_type(np.result_type(AR_f8, AR_u1), np.dtype) +assert_type(np.result_type(AR_f8, np.complex128), np.dtype) + +assert_type(np.dot(AR_LIKE_f, AR_i8), Any) +assert_type(np.dot(AR_u1, 1), Any) +assert_type(np.dot(1.5j, 1), Any) +assert_type(np.dot(AR_u1, 1, out=AR_f8), npt.NDArray[np.float64]) + +assert_type(np.vdot(AR_LIKE_f, AR_i8), np.floating) +assert_type(np.vdot(AR_u1, 1), np.signedinteger) +assert_type(np.vdot(1.5j, 1), np.complexfloating) + +assert_type(np.bincount(AR_i8), npt.NDArray[np.intp]) + +assert_type(np.copyto(AR_f8, [1., 1.5, 1.6]), None) + +assert_type(np.putmask(AR_f8, [True, True, False], 1.5), None) + +assert_type(np.packbits(AR_i8), npt.NDArray[np.uint8]) +assert_type(np.packbits(AR_u1), npt.NDArray[np.uint8]) + +assert_type(np.unpackbits(AR_u1), npt.NDArray[np.uint8]) + +assert_type(np.shares_memory(1, 2), bool) +assert_type(np.shares_memory(AR_f8, AR_f8, max_work=1), bool) + +assert_type(np.may_share_memory(1, 2), bool) +assert_type(np.may_share_memory(AR_f8, AR_f8, max_work=1), bool) + +assert_type(np.promote_types(np.int32, np.int64), np.dtype) +assert_type(np.promote_types("f4", float), np.dtype) + +assert_type(np.frompyfunc(func11, n1, n1).nin, Literal[1]) +assert_type(np.frompyfunc(func11, n1, n1).nout, Literal[1]) +assert_type(np.frompyfunc(func11, n1, n1).nargs, Literal[2]) +assert_type(np.frompyfunc(func11, n1, n1).ntypes, Literal[1]) +assert_type(np.frompyfunc(func11, n1, n1).identity, None) +assert_type(np.frompyfunc(func11, n1, n1).signature, None) +assert_type(np.frompyfunc(func11, n1, n1)(f8), bool) +assert_type(np.frompyfunc(func11, n1, n1)(AR_f8), bool | npt.NDArray[np.object_]) +assert_type(np.frompyfunc(func11, n1, n1).at(AR_f8, AR_i8), None) + +assert_type(np.frompyfunc(func21, n2, n1).nin, Literal[2]) +assert_type(np.frompyfunc(func21, n2, n1).nout, Literal[1]) +assert_type(np.frompyfunc(func21, n2, n1).nargs, Literal[3]) +assert_type(np.frompyfunc(func21, n2, n1).ntypes, Literal[1]) +assert_type(np.frompyfunc(func21, n2, n1).identity, None) +assert_type(np.frompyfunc(func21, n2, n1).signature, None) +assert_type(np.frompyfunc(func21, n2, n1)(f8, f8), int) +assert_type(np.frompyfunc(func21, n2, n1)(AR_f8, f8), int | npt.NDArray[np.object_]) +assert_type(np.frompyfunc(func21, n2, n1)(f8, AR_f8), int | npt.NDArray[np.object_]) +assert_type(np.frompyfunc(func21, n2, n1).reduce(AR_f8, axis=0), int | npt.NDArray[np.object_]) +assert_type(np.frompyfunc(func21, n2, n1).accumulate(AR_f8), npt.NDArray[np.object_]) +assert_type(np.frompyfunc(func21, n2, n1).reduceat(AR_f8, AR_i8), npt.NDArray[np.object_]) +assert_type(np.frompyfunc(func21, n2, n1).outer(f8, f8), int) +assert_type(np.frompyfunc(func21, n2, n1).outer(AR_f8, f8), int | npt.NDArray[np.object_]) + +assert_type(np.frompyfunc(func21, n2, n1, identity=0).nin, Literal[2]) +assert_type(np.frompyfunc(func21, n2, n1, identity=0).nout, Literal[1]) +assert_type(np.frompyfunc(func21, n2, n1, identity=0).nargs, Literal[3]) +assert_type(np.frompyfunc(func21, n2, n1, identity=0).ntypes, Literal[1]) +assert_type(np.frompyfunc(func21, n2, n1, identity=0).identity, int) +assert_type(np.frompyfunc(func21, n2, n1, identity=0).signature, None) + +assert_type(np.frompyfunc(func12, n1, n2).nin, Literal[1]) +assert_type(np.frompyfunc(func12, n1, n2).nout, Literal[2]) +assert_type(np.frompyfunc(func12, n1, n2).nargs, int) +assert_type(np.frompyfunc(func12, n1, n2).ntypes, Literal[1]) +assert_type(np.frompyfunc(func12, n1, n2).identity, None) +assert_type(np.frompyfunc(func12, n1, n2).signature, None) +assert_type( + np.frompyfunc(func12, n2, n2)(f8, f8), + tuple[complex, complex, *tuple[complex, ...]], +) +assert_type( + np.frompyfunc(func12, n2, n2)(AR_f8, f8), + tuple[ + complex | npt.NDArray[np.object_], + complex | npt.NDArray[np.object_], + *tuple[complex | npt.NDArray[np.object_], ...], + ], +) + +assert_type(np.datetime_data("m8[D]"), tuple[str, int]) +assert_type(np.datetime_data(np.datetime64), tuple[str, int]) +assert_type(np.datetime_data(np.dtype(np.timedelta64)), tuple[str, int]) + +assert_type(np.busday_count("2011-01", "2011-02"), np.int_) +assert_type(np.busday_count(["2011-01"], "2011-02"), npt.NDArray[np.int_]) +assert_type(np.busday_count(["2011-01"], date_scalar), npt.NDArray[np.int_]) + +assert_type(np.busday_offset(M, m), np.datetime64) +assert_type(np.busday_offset(date_scalar, m), np.datetime64) +assert_type(np.busday_offset(M, 5), np.datetime64) +assert_type(np.busday_offset(AR_M, m), npt.NDArray[np.datetime64]) +assert_type(np.busday_offset(M, timedelta_seq), npt.NDArray[np.datetime64]) +assert_type(np.busday_offset("2011-01", "2011-02", roll="forward"), np.datetime64) +assert_type(np.busday_offset(["2011-01"], "2011-02", roll="forward"), npt.NDArray[np.datetime64]) + +assert_type(np.is_busday("2012"), np.bool) +assert_type(np.is_busday(date_scalar), np.bool) +assert_type(np.is_busday(["2012"]), npt.NDArray[np.bool]) + +assert_type(np.datetime_as_string(M), np.str_) +assert_type(np.datetime_as_string(AR_M), npt.NDArray[np.str_]) + +assert_type(np.busdaycalendar(holidays=date_seq), np.busdaycalendar) +assert_type(np.busdaycalendar(holidays=[M]), np.busdaycalendar) + +assert_type(np.char.compare_chararrays("a", "b", "!=", rstrip=False), npt.NDArray[np.bool]) +assert_type(np.char.compare_chararrays(b"a", b"a", "==", True), npt.NDArray[np.bool]) + +assert_type(np.nested_iters([AR_i8, AR_i8], [[0], [1]], flags=["c_index"]), tuple[np.nditer, ...]) +assert_type(np.nested_iters([AR_i8, AR_i8], [[0], [1]], op_flags=[["readonly", "readonly"]]), tuple[np.nditer, ...]) +assert_type(np.nested_iters([AR_i8, AR_i8], [[0], [1]], op_dtypes=np.int_), tuple[np.nditer, ...]) +assert_type(np.nested_iters([AR_i8, AR_i8], [[0], [1]], order="C", casting="no"), tuple[np.nditer, ...]) diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/nbit_base_example.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/nbit_base_example.pyi new file mode 100644 index 0000000..3322966 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/nbit_base_example.pyi @@ -0,0 +1,21 @@ +from typing import TypeVar, assert_type + +import numpy as np +import numpy.typing as npt +from numpy._typing import _32Bit, _64Bit + +T1 = TypeVar("T1", bound=npt.NBitBase) # type: ignore[deprecated] # pyright: ignore[reportDeprecated] +T2 = TypeVar("T2", bound=npt.NBitBase) # type: ignore[deprecated] # pyright: ignore[reportDeprecated] + +def add(a: np.floating[T1], b: np.integer[T2]) -> np.floating[T1 | T2]: + return a + b + +i8: np.int64 +i4: np.int32 +f8: np.float64 +f4: np.float32 + +assert_type(add(f8, i8), np.floating[_64Bit]) +assert_type(add(f4, i8), np.floating[_32Bit | _64Bit]) +assert_type(add(f8, i4), np.floating[_32Bit | _64Bit]) +assert_type(add(f4, i4), np.floating[_32Bit]) diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/ndarray_assignability.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/ndarray_assignability.pyi new file mode 100644 index 0000000..d754a94 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/ndarray_assignability.pyi @@ -0,0 +1,77 @@ +from typing import Protocol, TypeAlias, TypeVar, assert_type + +import numpy as np +from numpy._typing import _64Bit + +_T = TypeVar("_T") +_T_co = TypeVar("_T_co", covariant=True) + +class CanAbs(Protocol[_T_co]): + def __abs__(self, /) -> _T_co: ... + +class CanInvert(Protocol[_T_co]): + def __invert__(self, /) -> _T_co: ... + +class CanNeg(Protocol[_T_co]): + def __neg__(self, /) -> _T_co: ... + +class CanPos(Protocol[_T_co]): + def __pos__(self, /) -> _T_co: ... + +def do_abs(x: CanAbs[_T]) -> _T: ... +def do_invert(x: CanInvert[_T]) -> _T: ... +def do_neg(x: CanNeg[_T]) -> _T: ... +def do_pos(x: CanPos[_T]) -> _T: ... + +_Bool_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.bool]] +_UInt8_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.uint8]] +_Int16_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.int16]] +_LongLong_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.longlong]] +_Float32_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.float32]] +_Float64_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.float64]] +_LongDouble_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.longdouble]] +_Complex64_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.complex64]] +_Complex128_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.complex128]] +_CLongDouble_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.clongdouble]] + +b1_1d: _Bool_1d +u1_1d: _UInt8_1d +i2_1d: _Int16_1d +q_1d: _LongLong_1d +f4_1d: _Float32_1d +f8_1d: _Float64_1d +g_1d: _LongDouble_1d +c8_1d: _Complex64_1d +c16_1d: _Complex128_1d +G_1d: _CLongDouble_1d + +assert_type(do_abs(b1_1d), _Bool_1d) +assert_type(do_abs(u1_1d), _UInt8_1d) +assert_type(do_abs(i2_1d), _Int16_1d) +assert_type(do_abs(q_1d), _LongLong_1d) +assert_type(do_abs(f4_1d), _Float32_1d) +assert_type(do_abs(f8_1d), _Float64_1d) +assert_type(do_abs(g_1d), _LongDouble_1d) + +assert_type(do_abs(c8_1d), _Float32_1d) +# NOTE: Unfortunately it's not possible to have this return a `float64` sctype, see +# https://github.com/python/mypy/issues/14070 +assert_type(do_abs(c16_1d), np.ndarray[tuple[int], np.dtype[np.floating[_64Bit]]]) +assert_type(do_abs(G_1d), _LongDouble_1d) + +assert_type(do_invert(b1_1d), _Bool_1d) +assert_type(do_invert(u1_1d), _UInt8_1d) +assert_type(do_invert(i2_1d), _Int16_1d) +assert_type(do_invert(q_1d), _LongLong_1d) + +assert_type(do_neg(u1_1d), _UInt8_1d) +assert_type(do_neg(i2_1d), _Int16_1d) +assert_type(do_neg(q_1d), _LongLong_1d) +assert_type(do_neg(f4_1d), _Float32_1d) +assert_type(do_neg(c16_1d), _Complex128_1d) + +assert_type(do_pos(u1_1d), _UInt8_1d) +assert_type(do_pos(i2_1d), _Int16_1d) +assert_type(do_pos(q_1d), _LongLong_1d) +assert_type(do_pos(f4_1d), _Float32_1d) +assert_type(do_pos(c16_1d), _Complex128_1d) diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/ndarray_conversion.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/ndarray_conversion.pyi new file mode 100644 index 0000000..bbd4257 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/ndarray_conversion.pyi @@ -0,0 +1,85 @@ +from typing import Any, assert_type + +import numpy as np +import numpy.typing as npt + +b1_0d: np.ndarray[tuple[()], np.dtype[np.bool]] +u2_1d: np.ndarray[tuple[int], np.dtype[np.uint16]] +i4_2d: np.ndarray[tuple[int, int], np.dtype[np.int32]] +f8_3d: np.ndarray[tuple[int, int, int], np.dtype[np.float64]] +cG_4d: np.ndarray[tuple[int, int, int, int], np.dtype[np.clongdouble]] +i0_nd: npt.NDArray[np.int_] +uncertain_dtype: np.int32 | np.float64 | np.str_ + +# item +assert_type(i0_nd.item(), int) +assert_type(i0_nd.item(1), int) +assert_type(i0_nd.item(0, 1), int) +assert_type(i0_nd.item((0, 1)), int) + +assert_type(b1_0d.item(()), bool) +assert_type(u2_1d.item((0,)), int) +assert_type(i4_2d.item(-1, 2), int) +assert_type(f8_3d.item(2, 1, -1), float) +assert_type(cG_4d.item(-0xEd_fed_Deb_a_dead_bee), complex) # c'mon Ed, we talked about this... + +# tolist +assert_type(b1_0d.tolist(), bool) +assert_type(u2_1d.tolist(), list[int]) +assert_type(i4_2d.tolist(), list[list[int]]) +assert_type(f8_3d.tolist(), list[list[list[float]]]) +assert_type(cG_4d.tolist(), Any) +assert_type(i0_nd.tolist(), Any) + +# regression tests for numpy/numpy#27944 +any_dtype: np.ndarray[Any, Any] +any_sctype: np.ndarray[Any, Any] +assert_type(any_dtype.tolist(), Any) +assert_type(any_sctype.tolist(), Any) + + +# itemset does not return a value +# tobytes is pretty simple +# tofile does not return a value +# dump does not return a value +# dumps is pretty simple + +# astype +assert_type(i0_nd.astype("float"), npt.NDArray[Any]) +assert_type(i0_nd.astype(float), npt.NDArray[Any]) +assert_type(i0_nd.astype(np.float64), npt.NDArray[np.float64]) +assert_type(i0_nd.astype(np.float64, "K"), npt.NDArray[np.float64]) +assert_type(i0_nd.astype(np.float64, "K", "unsafe"), npt.NDArray[np.float64]) +assert_type(i0_nd.astype(np.float64, "K", "unsafe", True), npt.NDArray[np.float64]) +assert_type(i0_nd.astype(np.float64, "K", "unsafe", True, True), npt.NDArray[np.float64]) + +assert_type(np.astype(i0_nd, np.float64), npt.NDArray[np.float64]) + +assert_type(i4_2d.astype(np.uint16), np.ndarray[tuple[int, int], np.dtype[np.uint16]]) +assert_type(np.astype(i4_2d, np.uint16), np.ndarray[tuple[int, int], np.dtype[np.uint16]]) +assert_type(f8_3d.astype(np.int16), np.ndarray[tuple[int, int, int], np.dtype[np.int16]]) +assert_type(np.astype(f8_3d, np.int16), np.ndarray[tuple[int, int, int], np.dtype[np.int16]]) +assert_type(i4_2d.astype(uncertain_dtype), np.ndarray[tuple[int, int], np.dtype[np.generic]]) +assert_type(np.astype(i4_2d, uncertain_dtype), np.ndarray[tuple[int, int], np.dtype]) + +# byteswap +assert_type(i0_nd.byteswap(), npt.NDArray[np.int_]) +assert_type(i0_nd.byteswap(True), npt.NDArray[np.int_]) + +# copy +assert_type(i0_nd.copy(), npt.NDArray[np.int_]) +assert_type(i0_nd.copy("C"), npt.NDArray[np.int_]) + +assert_type(i0_nd.view(), npt.NDArray[np.int_]) +assert_type(i0_nd.view(np.float64), npt.NDArray[np.float64]) +assert_type(i0_nd.view(float), npt.NDArray[Any]) +assert_type(i0_nd.view(np.float64, np.matrix), np.matrix[tuple[int, int], Any]) + +# getfield +assert_type(i0_nd.getfield("float"), npt.NDArray[Any]) +assert_type(i0_nd.getfield(float), npt.NDArray[Any]) +assert_type(i0_nd.getfield(np.float64), npt.NDArray[np.float64]) +assert_type(i0_nd.getfield(np.float64, 8), npt.NDArray[np.float64]) + +# setflags does not return a value +# fill does not return a value diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/ndarray_misc.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/ndarray_misc.pyi new file mode 100644 index 0000000..4cbb906 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/ndarray_misc.pyi @@ -0,0 +1,247 @@ +""" +Tests for miscellaneous (non-magic) ``np.ndarray``/``np.generic`` methods. + +More extensive tests are performed for the methods' +function-based counterpart in `../from_numeric.py`. + +""" + +from collections.abc import Iterator +import ctypes as ct +import operator +from types import ModuleType +from typing import Any, Literal, assert_type + +from typing_extensions import CapsuleType + +import numpy as np +import numpy.typing as npt + +class SubClass(npt.NDArray[np.object_]): ... + +f8: np.float64 +i8: np.int64 +B: SubClass +AR_f8: npt.NDArray[np.float64] +AR_i8: npt.NDArray[np.int64] +AR_u1: npt.NDArray[np.uint8] +AR_c8: npt.NDArray[np.complex64] +AR_m: npt.NDArray[np.timedelta64] +AR_U: npt.NDArray[np.str_] +AR_V: npt.NDArray[np.void] + +AR_f8_1d: np.ndarray[tuple[int], np.dtype[np.float64]] +AR_f8_2d: np.ndarray[tuple[int, int], np.dtype[np.float64]] +AR_f8_3d: np.ndarray[tuple[int, int, int], np.dtype[np.float64]] + +ctypes_obj = AR_f8.ctypes + +assert_type(AR_f8.__dlpack__(), CapsuleType) +assert_type(AR_f8.__dlpack_device__(), tuple[Literal[1], Literal[0]]) + +assert_type(ctypes_obj.data, int) +assert_type(ctypes_obj.shape, ct.Array[np.ctypeslib.c_intp]) +assert_type(ctypes_obj.strides, ct.Array[np.ctypeslib.c_intp]) +assert_type(ctypes_obj._as_parameter_, ct.c_void_p) + +assert_type(ctypes_obj.data_as(ct.c_void_p), ct.c_void_p) +assert_type(ctypes_obj.shape_as(ct.c_longlong), ct.Array[ct.c_longlong]) +assert_type(ctypes_obj.strides_as(ct.c_ubyte), ct.Array[ct.c_ubyte]) + +assert_type(f8.all(), np.bool) +assert_type(AR_f8.all(), np.bool) +assert_type(AR_f8.all(axis=0), np.bool | npt.NDArray[np.bool]) +assert_type(AR_f8.all(keepdims=True), np.bool | npt.NDArray[np.bool]) +assert_type(AR_f8.all(out=B), SubClass) + +assert_type(f8.any(), np.bool) +assert_type(AR_f8.any(), np.bool) +assert_type(AR_f8.any(axis=0), np.bool | npt.NDArray[np.bool]) +assert_type(AR_f8.any(keepdims=True), np.bool | npt.NDArray[np.bool]) +assert_type(AR_f8.any(out=B), SubClass) + +assert_type(f8.argmax(), np.intp) +assert_type(AR_f8.argmax(), np.intp) +assert_type(AR_f8.argmax(axis=0), Any) +assert_type(AR_f8.argmax(out=AR_i8), npt.NDArray[np.intp]) + +assert_type(f8.argmin(), np.intp) +assert_type(AR_f8.argmin(), np.intp) +assert_type(AR_f8.argmin(axis=0), Any) +assert_type(AR_f8.argmin(out=AR_i8), npt.NDArray[np.intp]) + +assert_type(f8.argsort(), npt.NDArray[Any]) +assert_type(AR_f8.argsort(), npt.NDArray[Any]) + +assert_type(f8.astype(np.int64).choose([()]), npt.NDArray[Any]) +assert_type(AR_f8.choose([0]), npt.NDArray[Any]) +assert_type(AR_f8.choose([0], out=B), SubClass) + +assert_type(f8.clip(1), npt.NDArray[Any]) +assert_type(AR_f8.clip(1), npt.NDArray[Any]) +assert_type(AR_f8.clip(None, 1), npt.NDArray[Any]) +assert_type(AR_f8.clip(1, out=B), SubClass) +assert_type(AR_f8.clip(None, 1, out=B), SubClass) + +assert_type(f8.compress([0]), npt.NDArray[Any]) +assert_type(AR_f8.compress([0]), npt.NDArray[Any]) +assert_type(AR_f8.compress([0], out=B), SubClass) + +assert_type(f8.conj(), np.float64) +assert_type(AR_f8.conj(), npt.NDArray[np.float64]) +assert_type(B.conj(), SubClass) + +assert_type(f8.conjugate(), np.float64) +assert_type(AR_f8.conjugate(), npt.NDArray[np.float64]) +assert_type(B.conjugate(), SubClass) + +assert_type(f8.cumprod(), npt.NDArray[Any]) +assert_type(AR_f8.cumprod(), npt.NDArray[Any]) +assert_type(AR_f8.cumprod(out=B), SubClass) + +assert_type(f8.cumsum(), npt.NDArray[Any]) +assert_type(AR_f8.cumsum(), npt.NDArray[Any]) +assert_type(AR_f8.cumsum(out=B), SubClass) + +assert_type(f8.max(), Any) +assert_type(AR_f8.max(), Any) +assert_type(AR_f8.max(axis=0), Any) +assert_type(AR_f8.max(keepdims=True), Any) +assert_type(AR_f8.max(out=B), SubClass) + +assert_type(f8.mean(), Any) +assert_type(AR_f8.mean(), Any) +assert_type(AR_f8.mean(axis=0), Any) +assert_type(AR_f8.mean(keepdims=True), Any) +assert_type(AR_f8.mean(out=B), SubClass) + +assert_type(f8.min(), Any) +assert_type(AR_f8.min(), Any) +assert_type(AR_f8.min(axis=0), Any) +assert_type(AR_f8.min(keepdims=True), Any) +assert_type(AR_f8.min(out=B), SubClass) + +assert_type(f8.prod(), Any) +assert_type(AR_f8.prod(), Any) +assert_type(AR_f8.prod(axis=0), Any) +assert_type(AR_f8.prod(keepdims=True), Any) +assert_type(AR_f8.prod(out=B), SubClass) + +assert_type(f8.round(), np.float64) +assert_type(AR_f8.round(), npt.NDArray[np.float64]) +assert_type(AR_f8.round(out=B), SubClass) + +assert_type(f8.repeat(1), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(f8.repeat(1, axis=0), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(AR_f8.repeat(1), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(AR_f8.repeat(1, axis=0), npt.NDArray[np.float64]) +assert_type(B.repeat(1), np.ndarray[tuple[int], np.dtype[np.object_]]) +assert_type(B.repeat(1, axis=0), npt.NDArray[np.object_]) + +assert_type(f8.std(), Any) +assert_type(AR_f8.std(), Any) +assert_type(AR_f8.std(axis=0), Any) +assert_type(AR_f8.std(keepdims=True), Any) +assert_type(AR_f8.std(out=B), SubClass) + +assert_type(f8.sum(), Any) +assert_type(AR_f8.sum(), Any) +assert_type(AR_f8.sum(axis=0), Any) +assert_type(AR_f8.sum(keepdims=True), Any) +assert_type(AR_f8.sum(out=B), SubClass) + +assert_type(f8.take(0), np.float64) +assert_type(AR_f8.take(0), np.float64) +assert_type(AR_f8.take([0]), npt.NDArray[np.float64]) +assert_type(AR_f8.take(0, out=B), SubClass) +assert_type(AR_f8.take([0], out=B), SubClass) + +assert_type(f8.var(), Any) +assert_type(AR_f8.var(), Any) +assert_type(AR_f8.var(axis=0), Any) +assert_type(AR_f8.var(keepdims=True), Any) +assert_type(AR_f8.var(out=B), SubClass) + +assert_type(AR_f8.argpartition([0]), npt.NDArray[np.intp]) + +assert_type(AR_f8.diagonal(), npt.NDArray[np.float64]) + +assert_type(AR_f8.dot(1), npt.NDArray[Any]) +assert_type(AR_f8.dot([1]), Any) +assert_type(AR_f8.dot(1, out=B), SubClass) + +assert_type(AR_f8.nonzero(), tuple[npt.NDArray[np.intp], ...]) + +assert_type(AR_f8.searchsorted(1), np.intp) +assert_type(AR_f8.searchsorted([1]), npt.NDArray[np.intp]) + +assert_type(AR_f8.trace(), Any) +assert_type(AR_f8.trace(out=B), SubClass) + +assert_type(AR_f8.item(), float) +assert_type(AR_U.item(), str) + +assert_type(AR_f8.ravel(), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(AR_U.ravel(), np.ndarray[tuple[int], np.dtype[np.str_]]) + +assert_type(AR_f8.flatten(), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(AR_U.flatten(), np.ndarray[tuple[int], np.dtype[np.str_]]) + +assert_type(AR_i8.reshape(None), npt.NDArray[np.int64]) +assert_type(AR_f8.reshape(-1), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(AR_c8.reshape(2, 3, 4, 5), np.ndarray[tuple[int, int, int, int], np.dtype[np.complex64]]) +assert_type(AR_m.reshape(()), np.ndarray[tuple[()], np.dtype[np.timedelta64]]) +assert_type(AR_U.reshape([]), np.ndarray[tuple[()], np.dtype[np.str_]]) +assert_type(AR_V.reshape((480, 720, 4)), np.ndarray[tuple[int, int, int], np.dtype[np.void]]) + +assert_type(int(AR_f8), int) +assert_type(int(AR_U), int) + +assert_type(float(AR_f8), float) +assert_type(float(AR_U), float) + +assert_type(complex(AR_f8), complex) + +assert_type(operator.index(AR_i8), int) + +assert_type(AR_f8.__array_wrap__(B), npt.NDArray[np.object_]) + +assert_type(AR_V[0], Any) +assert_type(AR_V[0, 0], Any) +assert_type(AR_V[AR_i8], npt.NDArray[np.void]) +assert_type(AR_V[AR_i8, AR_i8], npt.NDArray[np.void]) +assert_type(AR_V[AR_i8, None], npt.NDArray[np.void]) +assert_type(AR_V[0, ...], npt.NDArray[np.void]) +assert_type(AR_V[[0]], npt.NDArray[np.void]) +assert_type(AR_V[[0], [0]], npt.NDArray[np.void]) +assert_type(AR_V[:], npt.NDArray[np.void]) +assert_type(AR_V["a"], npt.NDArray[Any]) +assert_type(AR_V[["a", "b"]], npt.NDArray[np.void]) + +assert_type(AR_f8.dump("test_file"), None) +assert_type(AR_f8.dump(b"test_file"), None) +with open("test_file", "wb") as f: + assert_type(AR_f8.dump(f), None) + +assert_type(AR_f8.__array_finalize__(None), None) +assert_type(AR_f8.__array_finalize__(B), None) +assert_type(AR_f8.__array_finalize__(AR_f8), None) + +assert_type(f8.device, Literal["cpu"]) +assert_type(AR_f8.device, Literal["cpu"]) + +assert_type(f8.to_device("cpu"), np.float64) +assert_type(i8.to_device("cpu"), np.int64) +assert_type(AR_f8.to_device("cpu"), npt.NDArray[np.float64]) +assert_type(AR_i8.to_device("cpu"), npt.NDArray[np.int64]) +assert_type(AR_u1.to_device("cpu"), npt.NDArray[np.uint8]) +assert_type(AR_c8.to_device("cpu"), npt.NDArray[np.complex64]) +assert_type(AR_m.to_device("cpu"), npt.NDArray[np.timedelta64]) + +assert_type(f8.__array_namespace__(), ModuleType) +assert_type(AR_f8.__array_namespace__(), ModuleType) + +assert_type(iter(AR_f8), Iterator[Any]) # any-D +assert_type(iter(AR_f8_1d), Iterator[np.float64]) # 1-D +assert_type(iter(AR_f8_2d), Iterator[npt.NDArray[np.float64]]) # 2-D +assert_type(iter(AR_f8_3d), Iterator[npt.NDArray[np.float64]]) # 3-D diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/ndarray_shape_manipulation.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/ndarray_shape_manipulation.pyi new file mode 100644 index 0000000..4447bb1 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/ndarray_shape_manipulation.pyi @@ -0,0 +1,39 @@ +from typing import assert_type + +import numpy as np +import numpy.typing as npt + +nd: npt.NDArray[np.int64] + +# reshape +assert_type(nd.reshape(None), npt.NDArray[np.int64]) +assert_type(nd.reshape(4), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(nd.reshape((4,)), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(nd.reshape(2, 2), np.ndarray[tuple[int, int], np.dtype[np.int64]]) +assert_type(nd.reshape((2, 2)), np.ndarray[tuple[int, int], np.dtype[np.int64]]) + +assert_type(nd.reshape((2, 2), order="C"), np.ndarray[tuple[int, int], np.dtype[np.int64]]) +assert_type(nd.reshape(4, order="C"), np.ndarray[tuple[int], np.dtype[np.int64]]) + +# resize does not return a value + +# transpose +assert_type(nd.transpose(), npt.NDArray[np.int64]) +assert_type(nd.transpose(1, 0), npt.NDArray[np.int64]) +assert_type(nd.transpose((1, 0)), npt.NDArray[np.int64]) + +# swapaxes +assert_type(nd.swapaxes(0, 1), npt.NDArray[np.int64]) + +# flatten +assert_type(nd.flatten(), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(nd.flatten("C"), np.ndarray[tuple[int], np.dtype[np.int64]]) + +# ravel +assert_type(nd.ravel(), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(nd.ravel("C"), np.ndarray[tuple[int], np.dtype[np.int64]]) + +# squeeze +assert_type(nd.squeeze(), npt.NDArray[np.int64]) +assert_type(nd.squeeze(0), npt.NDArray[np.int64]) +assert_type(nd.squeeze((0, 2)), npt.NDArray[np.int64]) diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/nditer.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/nditer.pyi new file mode 100644 index 0000000..8965f3c --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/nditer.pyi @@ -0,0 +1,49 @@ +from typing import Any, assert_type + +import numpy as np +import numpy.typing as npt + +nditer_obj: np.nditer + +assert_type(np.nditer([0, 1], flags=["c_index"]), np.nditer) +assert_type(np.nditer([0, 1], op_flags=[["readonly", "readonly"]]), np.nditer) +assert_type(np.nditer([0, 1], op_dtypes=np.int_), np.nditer) +assert_type(np.nditer([0, 1], order="C", casting="no"), np.nditer) + +assert_type(nditer_obj.dtypes, tuple[np.dtype, ...]) +assert_type(nditer_obj.finished, bool) +assert_type(nditer_obj.has_delayed_bufalloc, bool) +assert_type(nditer_obj.has_index, bool) +assert_type(nditer_obj.has_multi_index, bool) +assert_type(nditer_obj.index, int) +assert_type(nditer_obj.iterationneedsapi, bool) +assert_type(nditer_obj.iterindex, int) +assert_type(nditer_obj.iterrange, tuple[int, ...]) +assert_type(nditer_obj.itersize, int) +assert_type(nditer_obj.itviews, tuple[npt.NDArray[Any], ...]) +assert_type(nditer_obj.multi_index, tuple[int, ...]) +assert_type(nditer_obj.ndim, int) +assert_type(nditer_obj.nop, int) +assert_type(nditer_obj.operands, tuple[npt.NDArray[Any], ...]) +assert_type(nditer_obj.shape, tuple[int, ...]) +assert_type(nditer_obj.value, tuple[npt.NDArray[Any], ...]) + +assert_type(nditer_obj.close(), None) +assert_type(nditer_obj.copy(), np.nditer) +assert_type(nditer_obj.debug_print(), None) +assert_type(nditer_obj.enable_external_loop(), None) +assert_type(nditer_obj.iternext(), bool) +assert_type(nditer_obj.remove_axis(0), None) +assert_type(nditer_obj.remove_multi_index(), None) +assert_type(nditer_obj.reset(), None) + +assert_type(len(nditer_obj), int) +assert_type(iter(nditer_obj), np.nditer) +assert_type(next(nditer_obj), tuple[npt.NDArray[Any], ...]) +assert_type(nditer_obj.__copy__(), np.nditer) +with nditer_obj as f: + assert_type(f, np.nditer) +assert_type(nditer_obj[0], npt.NDArray[Any]) +assert_type(nditer_obj[:], tuple[npt.NDArray[Any], ...]) +nditer_obj[0] = 0 +nditer_obj[:] = [0, 1] diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/nested_sequence.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/nested_sequence.pyi new file mode 100644 index 0000000..b4f98b7 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/nested_sequence.pyi @@ -0,0 +1,25 @@ +from collections.abc import Sequence +from typing import Any, assert_type + +from numpy._typing import _NestedSequence + +a: Sequence[int] +b: Sequence[Sequence[int]] +c: Sequence[Sequence[Sequence[int]]] +d: Sequence[Sequence[Sequence[Sequence[int]]]] +e: Sequence[bool] +f: tuple[int, ...] +g: list[int] +h: Sequence[Any] + +def func(a: _NestedSequence[int]) -> None: ... + +assert_type(func(a), None) +assert_type(func(b), None) +assert_type(func(c), None) +assert_type(func(d), None) +assert_type(func(e), None) +assert_type(func(f), None) +assert_type(func(g), None) +assert_type(func(h), None) +assert_type(func(range(15)), None) diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/npyio.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/npyio.pyi new file mode 100644 index 0000000..40da72c --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/npyio.pyi @@ -0,0 +1,83 @@ +import pathlib +import re +import zipfile +from collections.abc import Mapping +from typing import IO, Any, assert_type + +import numpy as np +import numpy.typing as npt +from numpy.lib._npyio_impl import BagObj + +str_path: str +pathlib_path: pathlib.Path +str_file: IO[str] +bytes_file: IO[bytes] + +npz_file: np.lib.npyio.NpzFile + +AR_i8: npt.NDArray[np.int64] +AR_LIKE_f8: list[float] + +class BytesWriter: + def write(self, data: bytes) -> None: ... + +class BytesReader: + def read(self, n: int = ...) -> bytes: ... + def seek(self, offset: int, whence: int = ...) -> int: ... + +bytes_writer: BytesWriter +bytes_reader: BytesReader + +assert_type(npz_file.zip, zipfile.ZipFile) +assert_type(npz_file.fid, IO[str] | None) +assert_type(npz_file.files, list[str]) +assert_type(npz_file.allow_pickle, bool) +assert_type(npz_file.pickle_kwargs, Mapping[str, Any] | None) +assert_type(npz_file.f, BagObj[np.lib.npyio.NpzFile]) +assert_type(npz_file["test"], npt.NDArray[Any]) +assert_type(len(npz_file), int) +with npz_file as f: + assert_type(f, np.lib.npyio.NpzFile) + +assert_type(np.load(bytes_file), Any) +assert_type(np.load(pathlib_path, allow_pickle=True), Any) +assert_type(np.load(str_path, encoding="bytes"), Any) +assert_type(np.load(bytes_reader), Any) + +assert_type(np.save(bytes_file, AR_LIKE_f8), None) +assert_type(np.save(pathlib_path, AR_i8, allow_pickle=True), None) +assert_type(np.save(str_path, AR_LIKE_f8), None) +assert_type(np.save(bytes_writer, AR_LIKE_f8), None) + +assert_type(np.savez(bytes_file, AR_LIKE_f8), None) +assert_type(np.savez(pathlib_path, ar1=AR_i8, ar2=AR_i8), None) +assert_type(np.savez(str_path, AR_LIKE_f8, ar1=AR_i8), None) +assert_type(np.savez(bytes_writer, AR_LIKE_f8, ar1=AR_i8), None) + +assert_type(np.savez_compressed(bytes_file, AR_LIKE_f8), None) +assert_type(np.savez_compressed(pathlib_path, ar1=AR_i8, ar2=AR_i8), None) +assert_type(np.savez_compressed(str_path, AR_LIKE_f8, ar1=AR_i8), None) +assert_type(np.savez_compressed(bytes_writer, AR_LIKE_f8, ar1=AR_i8), None) + +assert_type(np.loadtxt(bytes_file), npt.NDArray[np.float64]) +assert_type(np.loadtxt(pathlib_path, dtype=np.str_), npt.NDArray[np.str_]) +assert_type(np.loadtxt(str_path, dtype=str, skiprows=2), npt.NDArray[Any]) +assert_type(np.loadtxt(str_file, comments="test"), npt.NDArray[np.float64]) +assert_type(np.loadtxt(str_file, comments=None), npt.NDArray[np.float64]) +assert_type(np.loadtxt(str_path, delimiter="\n"), npt.NDArray[np.float64]) +assert_type(np.loadtxt(str_path, ndmin=2), npt.NDArray[np.float64]) +assert_type(np.loadtxt(["1", "2", "3"]), npt.NDArray[np.float64]) + +assert_type(np.fromregex(bytes_file, "test", np.float64), npt.NDArray[np.float64]) +assert_type(np.fromregex(str_file, b"test", dtype=float), npt.NDArray[Any]) +assert_type(np.fromregex(str_path, re.compile("test"), dtype=np.str_, encoding="utf8"), npt.NDArray[np.str_]) +assert_type(np.fromregex(pathlib_path, "test", np.float64), npt.NDArray[np.float64]) +assert_type(np.fromregex(bytes_reader, "test", np.float64), npt.NDArray[np.float64]) + +assert_type(np.genfromtxt(bytes_file), npt.NDArray[Any]) +assert_type(np.genfromtxt(pathlib_path, dtype=np.str_), npt.NDArray[np.str_]) +assert_type(np.genfromtxt(str_path, dtype=str, skip_header=2), npt.NDArray[Any]) +assert_type(np.genfromtxt(str_file, comments="test"), npt.NDArray[Any]) +assert_type(np.genfromtxt(str_path, delimiter="\n"), npt.NDArray[Any]) +assert_type(np.genfromtxt(str_path, ndmin=2), npt.NDArray[Any]) +assert_type(np.genfromtxt(["1", "2", "3"], ndmin=2), npt.NDArray[Any]) diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/numeric.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/numeric.pyi new file mode 100644 index 0000000..7c1ea89 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/numeric.pyi @@ -0,0 +1,134 @@ +""" +Tests for :mod:`_core.numeric`. + +Does not include tests which fall under ``array_constructors``. + +""" + +from typing import Any, assert_type + +import numpy as np +import numpy.typing as npt + +class SubClass(npt.NDArray[np.int64]): ... + +i8: np.int64 + +AR_b: npt.NDArray[np.bool] +AR_u8: npt.NDArray[np.uint64] +AR_i8: npt.NDArray[np.int64] +AR_f8: npt.NDArray[np.float64] +AR_c16: npt.NDArray[np.complex128] +AR_m: npt.NDArray[np.timedelta64] +AR_O: npt.NDArray[np.object_] + +B: list[int] +C: SubClass + +assert_type(np.count_nonzero(i8), np.intp) +assert_type(np.count_nonzero(AR_i8), np.intp) +assert_type(np.count_nonzero(B), np.intp) +assert_type(np.count_nonzero(AR_i8, keepdims=True), npt.NDArray[np.intp]) +assert_type(np.count_nonzero(AR_i8, axis=0), Any) + +assert_type(np.isfortran(i8), bool) +assert_type(np.isfortran(AR_i8), bool) + +assert_type(np.argwhere(i8), npt.NDArray[np.intp]) +assert_type(np.argwhere(AR_i8), npt.NDArray[np.intp]) + +assert_type(np.flatnonzero(i8), npt.NDArray[np.intp]) +assert_type(np.flatnonzero(AR_i8), npt.NDArray[np.intp]) + +assert_type(np.correlate(B, AR_i8, mode="valid"), npt.NDArray[np.signedinteger]) +assert_type(np.correlate(AR_i8, AR_i8, mode="same"), npt.NDArray[np.signedinteger]) +assert_type(np.correlate(AR_b, AR_b), npt.NDArray[np.bool]) +assert_type(np.correlate(AR_b, AR_u8), npt.NDArray[np.unsignedinteger]) +assert_type(np.correlate(AR_i8, AR_b), npt.NDArray[np.signedinteger]) +assert_type(np.correlate(AR_i8, AR_f8), npt.NDArray[np.floating]) +assert_type(np.correlate(AR_i8, AR_c16), npt.NDArray[np.complexfloating]) +assert_type(np.correlate(AR_i8, AR_m), npt.NDArray[np.timedelta64]) +assert_type(np.correlate(AR_O, AR_O), npt.NDArray[np.object_]) + +assert_type(np.convolve(B, AR_i8, mode="valid"), npt.NDArray[np.signedinteger]) +assert_type(np.convolve(AR_i8, AR_i8, mode="same"), npt.NDArray[np.signedinteger]) +assert_type(np.convolve(AR_b, AR_b), npt.NDArray[np.bool]) +assert_type(np.convolve(AR_b, AR_u8), npt.NDArray[np.unsignedinteger]) +assert_type(np.convolve(AR_i8, AR_b), npt.NDArray[np.signedinteger]) +assert_type(np.convolve(AR_i8, AR_f8), npt.NDArray[np.floating]) +assert_type(np.convolve(AR_i8, AR_c16), npt.NDArray[np.complexfloating]) +assert_type(np.convolve(AR_i8, AR_m), npt.NDArray[np.timedelta64]) +assert_type(np.convolve(AR_O, AR_O), npt.NDArray[np.object_]) + +assert_type(np.outer(i8, AR_i8), npt.NDArray[np.signedinteger]) +assert_type(np.outer(B, AR_i8), npt.NDArray[np.signedinteger]) +assert_type(np.outer(AR_i8, AR_i8), npt.NDArray[np.signedinteger]) +assert_type(np.outer(AR_i8, AR_i8, out=C), SubClass) +assert_type(np.outer(AR_b, AR_b), npt.NDArray[np.bool]) +assert_type(np.outer(AR_b, AR_u8), npt.NDArray[np.unsignedinteger]) +assert_type(np.outer(AR_i8, AR_b), npt.NDArray[np.signedinteger]) +assert_type(np.convolve(AR_i8, AR_f8), npt.NDArray[np.floating]) +assert_type(np.outer(AR_i8, AR_c16), npt.NDArray[np.complexfloating]) +assert_type(np.outer(AR_i8, AR_m), npt.NDArray[np.timedelta64]) +assert_type(np.outer(AR_O, AR_O), npt.NDArray[np.object_]) + +assert_type(np.tensordot(B, AR_i8), npt.NDArray[np.signedinteger]) +assert_type(np.tensordot(AR_i8, AR_i8), npt.NDArray[np.signedinteger]) +assert_type(np.tensordot(AR_i8, AR_i8, axes=0), npt.NDArray[np.signedinteger]) +assert_type(np.tensordot(AR_i8, AR_i8, axes=(0, 1)), npt.NDArray[np.signedinteger]) +assert_type(np.tensordot(AR_b, AR_b), npt.NDArray[np.bool]) +assert_type(np.tensordot(AR_b, AR_u8), npt.NDArray[np.unsignedinteger]) +assert_type(np.tensordot(AR_i8, AR_b), npt.NDArray[np.signedinteger]) +assert_type(np.tensordot(AR_i8, AR_f8), npt.NDArray[np.floating]) +assert_type(np.tensordot(AR_i8, AR_c16), npt.NDArray[np.complexfloating]) +assert_type(np.tensordot(AR_i8, AR_m), npt.NDArray[np.timedelta64]) +assert_type(np.tensordot(AR_O, AR_O), npt.NDArray[np.object_]) + +assert_type(np.isscalar(i8), bool) +assert_type(np.isscalar(AR_i8), bool) +assert_type(np.isscalar(B), bool) + +assert_type(np.roll(AR_i8, 1), npt.NDArray[np.int64]) +assert_type(np.roll(AR_i8, (1, 2)), npt.NDArray[np.int64]) +assert_type(np.roll(B, 1), npt.NDArray[Any]) + +assert_type(np.rollaxis(AR_i8, 0, 1), npt.NDArray[np.int64]) + +assert_type(np.moveaxis(AR_i8, 0, 1), npt.NDArray[np.int64]) +assert_type(np.moveaxis(AR_i8, (0, 1), (1, 2)), npt.NDArray[np.int64]) + +assert_type(np.cross(B, AR_i8), npt.NDArray[np.signedinteger]) +assert_type(np.cross(AR_i8, AR_i8), npt.NDArray[np.signedinteger]) +assert_type(np.cross(AR_b, AR_u8), npt.NDArray[np.unsignedinteger]) +assert_type(np.cross(AR_i8, AR_b), npt.NDArray[np.signedinteger]) +assert_type(np.cross(AR_i8, AR_f8), npt.NDArray[np.floating]) +assert_type(np.cross(AR_i8, AR_c16), npt.NDArray[np.complexfloating]) +assert_type(np.cross(AR_O, AR_O), npt.NDArray[np.object_]) + +assert_type(np.indices([0, 1, 2]), npt.NDArray[np.int_]) +assert_type(np.indices([0, 1, 2], sparse=True), tuple[npt.NDArray[np.int_], ...]) +assert_type(np.indices([0, 1, 2], dtype=np.float64), npt.NDArray[np.float64]) +assert_type(np.indices([0, 1, 2], sparse=True, dtype=np.float64), tuple[npt.NDArray[np.float64], ...]) +assert_type(np.indices([0, 1, 2], dtype=float), npt.NDArray[Any]) +assert_type(np.indices([0, 1, 2], sparse=True, dtype=float), tuple[npt.NDArray[Any], ...]) + +assert_type(np.binary_repr(1), str) + +assert_type(np.base_repr(1), str) + +assert_type(np.allclose(i8, AR_i8), bool) +assert_type(np.allclose(B, AR_i8), bool) +assert_type(np.allclose(AR_i8, AR_i8), bool) + +assert_type(np.isclose(i8, i8), np.bool) +assert_type(np.isclose(i8, AR_i8), npt.NDArray[np.bool]) +assert_type(np.isclose(B, AR_i8), npt.NDArray[np.bool]) +assert_type(np.isclose(AR_i8, AR_i8), npt.NDArray[np.bool]) + +assert_type(np.array_equal(i8, AR_i8), bool) +assert_type(np.array_equal(B, AR_i8), bool) +assert_type(np.array_equal(AR_i8, AR_i8), bool) + +assert_type(np.array_equiv(i8, AR_i8), bool) +assert_type(np.array_equiv(B, AR_i8), bool) +assert_type(np.array_equiv(AR_i8, AR_i8), bool) diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/numerictypes.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/numerictypes.pyi new file mode 100644 index 0000000..4a3e02c --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/numerictypes.pyi @@ -0,0 +1,51 @@ +from typing import Literal, assert_type + +import numpy as np + +assert_type( + np.ScalarType, + tuple[ + type[int], + type[float], + type[complex], + type[bool], + type[bytes], + type[str], + type[memoryview], + type[np.bool], + type[np.csingle], + type[np.cdouble], + type[np.clongdouble], + type[np.half], + type[np.single], + type[np.double], + type[np.longdouble], + type[np.byte], + type[np.short], + type[np.intc], + type[np.long], + type[np.longlong], + type[np.timedelta64], + type[np.datetime64], + type[np.object_], + type[np.bytes_], + type[np.str_], + type[np.ubyte], + type[np.ushort], + type[np.uintc], + type[np.ulong], + type[np.ulonglong], + type[np.void], + ], +) +assert_type(np.ScalarType[0], type[int]) +assert_type(np.ScalarType[3], type[bool]) +assert_type(np.ScalarType[8], type[np.csingle]) +assert_type(np.ScalarType[10], type[np.clongdouble]) +assert_type(np.bool_(object()), np.bool) + +assert_type(np.typecodes["Character"], Literal["c"]) +assert_type(np.typecodes["Complex"], Literal["FDG"]) +assert_type(np.typecodes["All"], Literal["?bhilqnpBHILQNPefdgFDGSUVOMm"]) + +assert_type(np.sctypeDict['uint8'], type[np.generic]) diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/polynomial_polybase.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/polynomial_polybase.pyi new file mode 100644 index 0000000..bb92703 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/polynomial_polybase.pyi @@ -0,0 +1,220 @@ +from collections.abc import Sequence +from decimal import Decimal +from fractions import Fraction +from typing import Any, LiteralString, TypeAlias, TypeVar, assert_type +from typing import Literal as L + +import numpy as np +import numpy.polynomial as npp +import numpy.typing as npt + +_Ar_x: TypeAlias = npt.NDArray[np.inexact | np.object_] +_Ar_f: TypeAlias = npt.NDArray[np.floating] +_Ar_c: TypeAlias = npt.NDArray[np.complexfloating] +_Ar_O: TypeAlias = npt.NDArray[np.object_] + +_Ar_x_n: TypeAlias = np.ndarray[tuple[int], np.dtype[np.inexact | np.object_]] +_Ar_f_n: TypeAlias = np.ndarray[tuple[int], np.dtype[np.floating]] +_Ar_c_n: TypeAlias = np.ndarray[tuple[int], np.dtype[np.complexfloating]] +_Ar_O_n: TypeAlias = np.ndarray[tuple[int], np.dtype[np.object_]] + +_Ar_x_2: TypeAlias = np.ndarray[tuple[L[2]], np.dtype[np.inexact | np.object_]] +_Ar_f_2: TypeAlias = np.ndarray[tuple[L[2]], np.dtype[np.floating]] +_Ar_c_2: TypeAlias = np.ndarray[tuple[L[2]], np.dtype[np.complexfloating]] +_Ar_O_2: TypeAlias = np.ndarray[tuple[L[2]], np.dtype[np.object_]] + +_ScalarT = TypeVar("_ScalarT", bound=np.generic) +_Ar_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[_ScalarT]] + +_BasisName: TypeAlias = L["X"] + +SC_i: np.int_ +SC_i_co: int | np.int_ +SC_f: np.float64 +SC_f_co: float | np.float64 | np.int_ +SC_c: np.complex128 +SC_c_co: complex | np.complex128 +SC_O: Decimal + +AR_i: npt.NDArray[np.int_] +AR_f: npt.NDArray[np.float64] +AR_f_co: npt.NDArray[np.float64] | npt.NDArray[np.int_] +AR_c: npt.NDArray[np.complex128] +AR_c_co: npt.NDArray[np.complex128] | npt.NDArray[np.float64] | npt.NDArray[np.int_] +AR_O: npt.NDArray[np.object_] +AR_O_co: npt.NDArray[np.object_ | np.number] + +SQ_i: Sequence[int] +SQ_f: Sequence[float] +SQ_c: Sequence[complex] +SQ_O: Sequence[Decimal] + +PS_poly: npp.Polynomial +PS_cheb: npp.Chebyshev +PS_herm: npp.Hermite +PS_herme: npp.HermiteE +PS_lag: npp.Laguerre +PS_leg: npp.Legendre +PS_all: ( + npp.Polynomial + | npp.Chebyshev + | npp.Hermite + | npp.HermiteE + | npp.Laguerre + | npp.Legendre +) + +# static- and classmethods + +assert_type(type(PS_poly).basis_name, None) +assert_type(type(PS_cheb).basis_name, L['T']) +assert_type(type(PS_herm).basis_name, L['H']) +assert_type(type(PS_herme).basis_name, L['He']) +assert_type(type(PS_lag).basis_name, L['L']) +assert_type(type(PS_leg).basis_name, L['P']) + +assert_type(type(PS_all).__hash__, None) +assert_type(type(PS_all).__array_ufunc__, None) +assert_type(type(PS_all).maxpower, L[100]) + +assert_type(type(PS_poly).fromroots(SC_i), npp.Polynomial) +assert_type(type(PS_poly).fromroots(SQ_i), npp.Polynomial) +assert_type(type(PS_poly).fromroots(AR_i), npp.Polynomial) +assert_type(type(PS_cheb).fromroots(SC_f), npp.Chebyshev) +assert_type(type(PS_cheb).fromroots(SQ_f), npp.Chebyshev) +assert_type(type(PS_cheb).fromroots(AR_f_co), npp.Chebyshev) +assert_type(type(PS_herm).fromroots(SC_c), npp.Hermite) +assert_type(type(PS_herm).fromroots(SQ_c), npp.Hermite) +assert_type(type(PS_herm).fromroots(AR_c_co), npp.Hermite) +assert_type(type(PS_leg).fromroots(SC_O), npp.Legendre) +assert_type(type(PS_leg).fromroots(SQ_O), npp.Legendre) +assert_type(type(PS_leg).fromroots(AR_O_co), npp.Legendre) + +assert_type(type(PS_poly).identity(), npp.Polynomial) +assert_type(type(PS_cheb).identity(symbol='z'), npp.Chebyshev) + +assert_type(type(PS_lag).basis(SC_i), npp.Laguerre) +assert_type(type(PS_leg).basis(32, symbol='u'), npp.Legendre) + +assert_type(type(PS_herm).cast(PS_poly), npp.Hermite) +assert_type(type(PS_herme).cast(PS_leg), npp.HermiteE) + +# attributes / properties + +assert_type(PS_all.coef, _Ar_x_n) +assert_type(PS_all.domain, _Ar_x_2) +assert_type(PS_all.window, _Ar_x_2) +assert_type(PS_all.symbol, LiteralString) + +# instance methods + +assert_type(PS_all.has_samecoef(PS_all), bool) +assert_type(PS_all.has_samedomain(PS_all), bool) +assert_type(PS_all.has_samewindow(PS_all), bool) +assert_type(PS_all.has_sametype(PS_all), bool) +assert_type(PS_poly.has_sametype(PS_poly), bool) +assert_type(PS_poly.has_sametype(PS_leg), bool) +assert_type(PS_poly.has_sametype(NotADirectoryError), L[False]) + +assert_type(PS_poly.copy(), npp.Polynomial) +assert_type(PS_cheb.copy(), npp.Chebyshev) +assert_type(PS_herm.copy(), npp.Hermite) +assert_type(PS_herme.copy(), npp.HermiteE) +assert_type(PS_lag.copy(), npp.Laguerre) +assert_type(PS_leg.copy(), npp.Legendre) + +assert_type(PS_leg.cutdeg(), npp.Legendre) +assert_type(PS_leg.trim(), npp.Legendre) +assert_type(PS_leg.trim(tol=SC_f_co), npp.Legendre) +assert_type(PS_leg.truncate(SC_i_co), npp.Legendre) + +assert_type(PS_all.convert(None, npp.Chebyshev), npp.Chebyshev) +assert_type(PS_all.convert((0, 1), npp.Laguerre), npp.Laguerre) +assert_type(PS_all.convert([0, 1], npp.Hermite, [-1, 1]), npp.Hermite) + +assert_type(PS_all.degree(), int) +assert_type(PS_all.mapparms(), tuple[Any, Any]) + +assert_type(PS_poly.integ(), npp.Polynomial) +assert_type(PS_herme.integ(SC_i_co), npp.HermiteE) +assert_type(PS_lag.integ(SC_i_co, SC_f_co), npp.Laguerre) +assert_type(PS_poly.deriv(), npp.Polynomial) +assert_type(PS_herm.deriv(SC_i_co), npp.Hermite) + +assert_type(PS_poly.roots(), _Ar_x_n) + +assert_type( + PS_poly.linspace(), + tuple[_Ar_1d[np.float64 | np.complex128], _Ar_1d[np.float64 | np.complex128]], +) + +assert_type( + PS_poly.linspace(9), + tuple[_Ar_1d[np.float64 | np.complex128], _Ar_1d[np.float64 | np.complex128]], +) + +assert_type(PS_cheb.fit(AR_c_co, AR_c_co, SC_i_co), npp.Chebyshev) +assert_type(PS_leg.fit(AR_c_co, AR_c_co, AR_i), npp.Legendre) +assert_type(PS_herm.fit(AR_c_co, AR_c_co, SQ_i), npp.Hermite) +assert_type(PS_poly.fit(AR_c_co, SQ_c, SQ_i), npp.Polynomial) +assert_type(PS_lag.fit(SQ_c, SQ_c, SQ_i, full=False), npp.Laguerre) +assert_type( + PS_herme.fit(SQ_c, AR_c_co, SC_i_co, full=True), + tuple[npp.HermiteE, Sequence[np.inexact | np.int32]], +) + +# custom operations + +assert_type(PS_all.__hash__, None) +assert_type(PS_all.__array_ufunc__, None) + +assert_type(str(PS_all), str) +assert_type(repr(PS_all), str) +assert_type(format(PS_all), str) + +assert_type(len(PS_all), int) +assert_type(next(iter(PS_all)), np.inexact | object) + +assert_type(PS_all(SC_f_co), np.float64 | np.complex128) +assert_type(PS_all(SC_c_co), np.complex128) +assert_type(PS_all(Decimal()), np.float64 | np.complex128) +assert_type(PS_all(Fraction()), np.float64 | np.complex128) +assert_type(PS_poly(SQ_f), npt.NDArray[np.float64] | npt.NDArray[np.complex128] | npt.NDArray[np.object_]) +assert_type(PS_poly(SQ_c), npt.NDArray[np.complex128] | npt.NDArray[np.object_]) +assert_type(PS_poly(SQ_O), npt.NDArray[np.object_]) +assert_type(PS_poly(AR_f), npt.NDArray[np.float64] | npt.NDArray[np.complex128] | npt.NDArray[np.object_]) +assert_type(PS_poly(AR_c), npt.NDArray[np.complex128] | npt.NDArray[np.object_]) +assert_type(PS_poly(AR_O), npt.NDArray[np.object_]) +assert_type(PS_all(PS_poly), npp.Polynomial) + +assert_type(PS_poly == PS_poly, bool) +assert_type(PS_poly != PS_poly, bool) + +assert_type(-PS_poly, npp.Polynomial) +assert_type(+PS_poly, npp.Polynomial) + +assert_type(PS_poly + 5, npp.Polynomial) +assert_type(PS_poly - 5, npp.Polynomial) +assert_type(PS_poly * 5, npp.Polynomial) +assert_type(PS_poly / 5, npp.Polynomial) +assert_type(PS_poly // 5, npp.Polynomial) +assert_type(PS_poly % 5, npp.Polynomial) + +assert_type(PS_poly + PS_leg, npp.Polynomial) +assert_type(PS_poly - PS_leg, npp.Polynomial) +assert_type(PS_poly * PS_leg, npp.Polynomial) +assert_type(PS_poly / PS_leg, npp.Polynomial) +assert_type(PS_poly // PS_leg, npp.Polynomial) +assert_type(PS_poly % PS_leg, npp.Polynomial) + +assert_type(5 + PS_poly, npp.Polynomial) +assert_type(5 - PS_poly, npp.Polynomial) +assert_type(5 * PS_poly, npp.Polynomial) +assert_type(5 / PS_poly, npp.Polynomial) +assert_type(5 // PS_poly, npp.Polynomial) +assert_type(5 % PS_poly, npp.Polynomial) +assert_type(divmod(PS_poly, 5), tuple[npp.Polynomial, npp.Polynomial]) +assert_type(divmod(5, PS_poly), tuple[npp.Polynomial, npp.Polynomial]) + +assert_type(PS_poly**1, npp.Polynomial) +assert_type(PS_poly**1.0, npp.Polynomial) diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/polynomial_polyutils.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/polynomial_polyutils.pyi new file mode 100644 index 0000000..45522e7 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/polynomial_polyutils.pyi @@ -0,0 +1,219 @@ +from collections.abc import Sequence +from decimal import Decimal +from fractions import Fraction +from typing import Any, TypeAlias, assert_type +from typing import Literal as L + +import numpy as np +import numpy.polynomial.polyutils as pu +import numpy.typing as npt +from numpy.polynomial._polytypes import _Tuple2 + +_ArrFloat1D: TypeAlias = np.ndarray[tuple[int], np.dtype[np.floating]] +_ArrComplex1D: TypeAlias = np.ndarray[tuple[int], np.dtype[np.complexfloating]] +_ArrObject1D: TypeAlias = np.ndarray[tuple[int], np.dtype[np.object_]] + +_ArrFloat1D_2: TypeAlias = np.ndarray[tuple[L[2]], np.dtype[np.float64]] +_ArrComplex1D_2: TypeAlias = np.ndarray[tuple[L[2]], np.dtype[np.complex128]] +_ArrObject1D_2: TypeAlias = np.ndarray[tuple[L[2]], np.dtype[np.object_]] + +num_int: int +num_float: float +num_complex: complex +# will result in an `object_` dtype +num_object: Decimal | Fraction + +sct_int: np.int_ +sct_float: np.float64 +sct_complex: np.complex128 +sct_object: np.object_ # doesn't exist at runtime + +arr_int: npt.NDArray[np.int_] +arr_float: npt.NDArray[np.float64] +arr_complex: npt.NDArray[np.complex128] +arr_object: npt.NDArray[np.object_] + +seq_num_int: Sequence[int] +seq_num_float: Sequence[float] +seq_num_complex: Sequence[complex] +seq_num_object: Sequence[Decimal | Fraction] + +seq_sct_int: Sequence[np.int_] +seq_sct_float: Sequence[np.float64] +seq_sct_complex: Sequence[np.complex128] +seq_sct_object: Sequence[np.object_] + +seq_arr_int: Sequence[npt.NDArray[np.int_]] +seq_arr_float: Sequence[npt.NDArray[np.float64]] +seq_arr_complex: Sequence[npt.NDArray[np.complex128]] +seq_arr_object: Sequence[npt.NDArray[np.object_]] + +seq_seq_num_int: Sequence[Sequence[int]] +seq_seq_num_float: Sequence[Sequence[float]] +seq_seq_num_complex: Sequence[Sequence[complex]] +seq_seq_num_object: Sequence[Sequence[Decimal | Fraction]] + +seq_seq_sct_int: Sequence[Sequence[np.int_]] +seq_seq_sct_float: Sequence[Sequence[np.float64]] +seq_seq_sct_complex: Sequence[Sequence[np.complex128]] +seq_seq_sct_object: Sequence[Sequence[np.object_]] # doesn't exist at runtime + +# as_series + +assert_type(pu.as_series(arr_int), list[_ArrFloat1D]) +assert_type(pu.as_series(arr_float), list[_ArrFloat1D]) +assert_type(pu.as_series(arr_complex), list[_ArrComplex1D]) +assert_type(pu.as_series(arr_object), list[_ArrObject1D]) + +assert_type(pu.as_series(seq_num_int), list[_ArrFloat1D]) +assert_type(pu.as_series(seq_num_float), list[_ArrFloat1D]) +assert_type(pu.as_series(seq_num_complex), list[_ArrComplex1D]) +assert_type(pu.as_series(seq_num_object), list[_ArrObject1D]) + +assert_type(pu.as_series(seq_sct_int), list[_ArrFloat1D]) +assert_type(pu.as_series(seq_sct_float), list[_ArrFloat1D]) +assert_type(pu.as_series(seq_sct_complex), list[_ArrComplex1D]) +assert_type(pu.as_series(seq_sct_object), list[_ArrObject1D]) + +assert_type(pu.as_series(seq_arr_int), list[_ArrFloat1D]) +assert_type(pu.as_series(seq_arr_float), list[_ArrFloat1D]) +assert_type(pu.as_series(seq_arr_complex), list[_ArrComplex1D]) +assert_type(pu.as_series(seq_arr_object), list[_ArrObject1D]) + +assert_type(pu.as_series(seq_seq_num_int), list[_ArrFloat1D]) +assert_type(pu.as_series(seq_seq_num_float), list[_ArrFloat1D]) +assert_type(pu.as_series(seq_seq_num_complex), list[_ArrComplex1D]) +assert_type(pu.as_series(seq_seq_num_object), list[_ArrObject1D]) + +assert_type(pu.as_series(seq_seq_sct_int), list[_ArrFloat1D]) +assert_type(pu.as_series(seq_seq_sct_float), list[_ArrFloat1D]) +assert_type(pu.as_series(seq_seq_sct_complex), list[_ArrComplex1D]) +assert_type(pu.as_series(seq_seq_sct_object), list[_ArrObject1D]) + +# trimcoef + +assert_type(pu.trimcoef(num_int), _ArrFloat1D) +assert_type(pu.trimcoef(num_float), _ArrFloat1D) +assert_type(pu.trimcoef(num_complex), _ArrComplex1D) +assert_type(pu.trimcoef(num_object), _ArrObject1D) +assert_type(pu.trimcoef(num_object), _ArrObject1D) + +assert_type(pu.trimcoef(sct_int), _ArrFloat1D) +assert_type(pu.trimcoef(sct_float), _ArrFloat1D) +assert_type(pu.trimcoef(sct_complex), _ArrComplex1D) +assert_type(pu.trimcoef(sct_object), _ArrObject1D) + +assert_type(pu.trimcoef(arr_int), _ArrFloat1D) +assert_type(pu.trimcoef(arr_float), _ArrFloat1D) +assert_type(pu.trimcoef(arr_complex), _ArrComplex1D) +assert_type(pu.trimcoef(arr_object), _ArrObject1D) + +assert_type(pu.trimcoef(seq_num_int), _ArrFloat1D) +assert_type(pu.trimcoef(seq_num_float), _ArrFloat1D) +assert_type(pu.trimcoef(seq_num_complex), _ArrComplex1D) +assert_type(pu.trimcoef(seq_num_object), _ArrObject1D) + +assert_type(pu.trimcoef(seq_sct_int), _ArrFloat1D) +assert_type(pu.trimcoef(seq_sct_float), _ArrFloat1D) +assert_type(pu.trimcoef(seq_sct_complex), _ArrComplex1D) +assert_type(pu.trimcoef(seq_sct_object), _ArrObject1D) + +# getdomain + +assert_type(pu.getdomain(num_int), _ArrFloat1D_2) +assert_type(pu.getdomain(num_float), _ArrFloat1D_2) +assert_type(pu.getdomain(num_complex), _ArrComplex1D_2) +assert_type(pu.getdomain(num_object), _ArrObject1D_2) +assert_type(pu.getdomain(num_object), _ArrObject1D_2) + +assert_type(pu.getdomain(sct_int), _ArrFloat1D_2) +assert_type(pu.getdomain(sct_float), _ArrFloat1D_2) +assert_type(pu.getdomain(sct_complex), _ArrComplex1D_2) +assert_type(pu.getdomain(sct_object), _ArrObject1D_2) + +assert_type(pu.getdomain(arr_int), _ArrFloat1D_2) +assert_type(pu.getdomain(arr_float), _ArrFloat1D_2) +assert_type(pu.getdomain(arr_complex), _ArrComplex1D_2) +assert_type(pu.getdomain(arr_object), _ArrObject1D_2) + +assert_type(pu.getdomain(seq_num_int), _ArrFloat1D_2) +assert_type(pu.getdomain(seq_num_float), _ArrFloat1D_2) +assert_type(pu.getdomain(seq_num_complex), _ArrComplex1D_2) +assert_type(pu.getdomain(seq_num_object), _ArrObject1D_2) + +assert_type(pu.getdomain(seq_sct_int), _ArrFloat1D_2) +assert_type(pu.getdomain(seq_sct_float), _ArrFloat1D_2) +assert_type(pu.getdomain(seq_sct_complex), _ArrComplex1D_2) +assert_type(pu.getdomain(seq_sct_object), _ArrObject1D_2) + +# mapparms + +assert_type(pu.mapparms(seq_num_int, seq_num_int), _Tuple2[float]) +assert_type(pu.mapparms(seq_num_int, seq_num_float), _Tuple2[float]) +assert_type(pu.mapparms(seq_num_float, seq_num_float), _Tuple2[float]) +assert_type(pu.mapparms(seq_num_float, seq_num_complex), _Tuple2[complex]) +assert_type(pu.mapparms(seq_num_complex, seq_num_complex), _Tuple2[complex]) +assert_type(pu.mapparms(seq_num_complex, seq_num_object), _Tuple2[object]) +assert_type(pu.mapparms(seq_num_object, seq_num_object), _Tuple2[object]) + +assert_type(pu.mapparms(seq_sct_int, seq_sct_int), _Tuple2[np.floating]) +assert_type(pu.mapparms(seq_sct_int, seq_sct_float), _Tuple2[np.floating]) +assert_type(pu.mapparms(seq_sct_float, seq_sct_float), _Tuple2[float]) +assert_type(pu.mapparms(seq_sct_float, seq_sct_complex), _Tuple2[complex]) +assert_type(pu.mapparms(seq_sct_complex, seq_sct_complex), _Tuple2[complex]) +assert_type(pu.mapparms(seq_sct_complex, seq_sct_object), _Tuple2[object]) +assert_type(pu.mapparms(seq_sct_object, seq_sct_object), _Tuple2[object]) + +assert_type(pu.mapparms(arr_int, arr_int), _Tuple2[np.floating]) +assert_type(pu.mapparms(arr_int, arr_float), _Tuple2[np.floating]) +assert_type(pu.mapparms(arr_float, arr_float), _Tuple2[np.floating]) +assert_type(pu.mapparms(arr_float, arr_complex), _Tuple2[np.complexfloating]) +assert_type(pu.mapparms(arr_complex, arr_complex), _Tuple2[np.complexfloating]) +assert_type(pu.mapparms(arr_complex, arr_object), _Tuple2[object]) +assert_type(pu.mapparms(arr_object, arr_object), _Tuple2[object]) + +# mapdomain + +assert_type(pu.mapdomain(num_int, seq_num_int, seq_num_int), np.floating) +assert_type(pu.mapdomain(num_int, seq_num_int, seq_num_float), np.floating) +assert_type(pu.mapdomain(num_int, seq_num_float, seq_num_float), np.floating) +assert_type(pu.mapdomain(num_float, seq_num_float, seq_num_float), np.floating) +assert_type(pu.mapdomain(num_float, seq_num_float, seq_num_complex), np.complexfloating) +assert_type(pu.mapdomain(num_float, seq_num_complex, seq_num_complex), np.complexfloating) +assert_type(pu.mapdomain(num_complex, seq_num_complex, seq_num_complex), np.complexfloating) +assert_type(pu.mapdomain(num_complex, seq_num_complex, seq_num_object), object) +assert_type(pu.mapdomain(num_complex, seq_num_object, seq_num_object), object) +assert_type(pu.mapdomain(num_object, seq_num_object, seq_num_object), object) + +assert_type(pu.mapdomain(seq_num_int, seq_num_int, seq_num_int), _ArrFloat1D) +assert_type(pu.mapdomain(seq_num_int, seq_num_int, seq_num_float), _ArrFloat1D) +assert_type(pu.mapdomain(seq_num_int, seq_num_float, seq_num_float), _ArrFloat1D) +assert_type(pu.mapdomain(seq_num_float, seq_num_float, seq_num_float), _ArrFloat1D) +assert_type(pu.mapdomain(seq_num_float, seq_num_float, seq_num_complex), _ArrComplex1D) +assert_type(pu.mapdomain(seq_num_float, seq_num_complex, seq_num_complex), _ArrComplex1D) +assert_type(pu.mapdomain(seq_num_complex, seq_num_complex, seq_num_complex), _ArrComplex1D) +assert_type(pu.mapdomain(seq_num_complex, seq_num_complex, seq_num_object), _ArrObject1D) +assert_type(pu.mapdomain(seq_num_complex, seq_num_object, seq_num_object), _ArrObject1D) +assert_type(pu.mapdomain(seq_num_object, seq_num_object, seq_num_object), _ArrObject1D) + +assert_type(pu.mapdomain(seq_sct_int, seq_sct_int, seq_sct_int), _ArrFloat1D) +assert_type(pu.mapdomain(seq_sct_int, seq_sct_int, seq_sct_float), _ArrFloat1D) +assert_type(pu.mapdomain(seq_sct_int, seq_sct_float, seq_sct_float), _ArrFloat1D) +assert_type(pu.mapdomain(seq_sct_float, seq_sct_float, seq_sct_float), _ArrFloat1D) +assert_type(pu.mapdomain(seq_sct_float, seq_sct_float, seq_sct_complex), _ArrComplex1D) +assert_type(pu.mapdomain(seq_sct_float, seq_sct_complex, seq_sct_complex), _ArrComplex1D) +assert_type(pu.mapdomain(seq_sct_complex, seq_sct_complex, seq_sct_complex), _ArrComplex1D) +assert_type(pu.mapdomain(seq_sct_complex, seq_sct_complex, seq_sct_object), _ArrObject1D) +assert_type(pu.mapdomain(seq_sct_complex, seq_sct_object, seq_sct_object), _ArrObject1D) +assert_type(pu.mapdomain(seq_sct_object, seq_sct_object, seq_sct_object), _ArrObject1D) + +assert_type(pu.mapdomain(arr_int, arr_int, arr_int), _ArrFloat1D) +assert_type(pu.mapdomain(arr_int, arr_int, arr_float), _ArrFloat1D) +assert_type(pu.mapdomain(arr_int, arr_float, arr_float), _ArrFloat1D) +assert_type(pu.mapdomain(arr_float, arr_float, arr_float), _ArrFloat1D) +assert_type(pu.mapdomain(arr_float, arr_float, arr_complex), _ArrComplex1D) +assert_type(pu.mapdomain(arr_float, arr_complex, arr_complex), _ArrComplex1D) +assert_type(pu.mapdomain(arr_complex, arr_complex, arr_complex), _ArrComplex1D) +assert_type(pu.mapdomain(arr_complex, arr_complex, arr_object), _ArrObject1D) +assert_type(pu.mapdomain(arr_complex, arr_object, arr_object), _ArrObject1D) +assert_type(pu.mapdomain(arr_object, arr_object, arr_object), _ArrObject1D) diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/polynomial_series.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/polynomial_series.pyi new file mode 100644 index 0000000..93f0799 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/polynomial_series.pyi @@ -0,0 +1,138 @@ +from collections.abc import Sequence +from typing import Any, TypeAlias, assert_type + +import numpy as np +import numpy.polynomial as npp +import numpy.typing as npt + +_ArrFloat1D: TypeAlias = np.ndarray[tuple[int], np.dtype[np.floating]] +_ArrFloat1D64: TypeAlias = np.ndarray[tuple[int], np.dtype[np.float64]] +_ArrComplex1D: TypeAlias = np.ndarray[tuple[int], np.dtype[np.complexfloating]] +_ArrComplex1D128: TypeAlias = np.ndarray[tuple[int], np.dtype[np.complex128]] +_ArrObject1D: TypeAlias = np.ndarray[tuple[int], np.dtype[np.object_]] + +AR_b: npt.NDArray[np.bool] +AR_u4: npt.NDArray[np.uint32] +AR_i8: npt.NDArray[np.int64] +AR_f8: npt.NDArray[np.float64] +AR_c16: npt.NDArray[np.complex128] +AR_O: npt.NDArray[np.object_] + +PS_poly: npp.Polynomial +PS_cheb: npp.Chebyshev + +assert_type(npp.polynomial.polyroots(AR_f8), _ArrFloat1D64) +assert_type(npp.polynomial.polyroots(AR_c16), _ArrComplex1D128) +assert_type(npp.polynomial.polyroots(AR_O), _ArrObject1D) + +assert_type(npp.polynomial.polyfromroots(AR_f8), _ArrFloat1D) +assert_type(npp.polynomial.polyfromroots(AR_c16), _ArrComplex1D) +assert_type(npp.polynomial.polyfromroots(AR_O), _ArrObject1D) + +# assert_type(npp.polynomial.polyadd(AR_b, AR_b), NoReturn) +assert_type(npp.polynomial.polyadd(AR_u4, AR_b), _ArrFloat1D) +assert_type(npp.polynomial.polyadd(AR_i8, AR_i8), _ArrFloat1D) +assert_type(npp.polynomial.polyadd(AR_f8, AR_i8), _ArrFloat1D) +assert_type(npp.polynomial.polyadd(AR_i8, AR_c16), _ArrComplex1D) +assert_type(npp.polynomial.polyadd(AR_O, AR_O), _ArrObject1D) + +assert_type(npp.polynomial.polymulx(AR_u4), _ArrFloat1D) +assert_type(npp.polynomial.polymulx(AR_i8), _ArrFloat1D) +assert_type(npp.polynomial.polymulx(AR_f8), _ArrFloat1D) +assert_type(npp.polynomial.polymulx(AR_c16), _ArrComplex1D) +assert_type(npp.polynomial.polymulx(AR_O), _ArrObject1D) + +assert_type(npp.polynomial.polypow(AR_u4, 2), _ArrFloat1D) +assert_type(npp.polynomial.polypow(AR_i8, 2), _ArrFloat1D) +assert_type(npp.polynomial.polypow(AR_f8, 2), _ArrFloat1D) +assert_type(npp.polynomial.polypow(AR_c16, 2), _ArrComplex1D) +assert_type(npp.polynomial.polypow(AR_O, 2), _ArrObject1D) + +# assert_type(npp.polynomial.polyder(PS_poly), npt.NDArray[np.object_]) +assert_type(npp.polynomial.polyder(AR_f8), npt.NDArray[np.floating]) +assert_type(npp.polynomial.polyder(AR_c16), npt.NDArray[np.complexfloating]) +assert_type(npp.polynomial.polyder(AR_O, m=2), npt.NDArray[np.object_]) + +# assert_type(npp.polynomial.polyint(PS_poly), npt.NDArray[np.object_]) +assert_type(npp.polynomial.polyint(AR_f8), npt.NDArray[np.floating]) +assert_type(npp.polynomial.polyint(AR_f8, k=AR_c16), npt.NDArray[np.complexfloating]) +assert_type(npp.polynomial.polyint(AR_O, m=2), npt.NDArray[np.object_]) + +assert_type(npp.polynomial.polyval(AR_b, AR_b), npt.NDArray[np.floating]) +assert_type(npp.polynomial.polyval(AR_u4, AR_b), npt.NDArray[np.floating]) +assert_type(npp.polynomial.polyval(AR_i8, AR_i8), npt.NDArray[np.floating]) +assert_type(npp.polynomial.polyval(AR_f8, AR_i8), npt.NDArray[np.floating]) +assert_type(npp.polynomial.polyval(AR_i8, AR_c16), npt.NDArray[np.complexfloating]) +assert_type(npp.polynomial.polyval(AR_O, AR_O), npt.NDArray[np.object_]) + +assert_type(npp.polynomial.polyval2d(AR_b, AR_b, AR_b), npt.NDArray[np.floating]) +assert_type(npp.polynomial.polyval2d(AR_u4, AR_u4, AR_b), npt.NDArray[np.floating]) +assert_type(npp.polynomial.polyval2d(AR_i8, AR_i8, AR_i8), npt.NDArray[np.floating]) +assert_type(npp.polynomial.polyval2d(AR_f8, AR_f8, AR_i8), npt.NDArray[np.floating]) +assert_type(npp.polynomial.polyval2d(AR_i8, AR_i8, AR_c16), npt.NDArray[np.complexfloating]) +assert_type(npp.polynomial.polyval2d(AR_O, AR_O, AR_O), npt.NDArray[np.object_]) + +assert_type(npp.polynomial.polyval3d(AR_b, AR_b, AR_b, AR_b), npt.NDArray[np.floating]) +assert_type(npp.polynomial.polyval3d(AR_u4, AR_u4, AR_u4, AR_b), npt.NDArray[np.floating]) +assert_type(npp.polynomial.polyval3d(AR_i8, AR_i8, AR_i8, AR_i8), npt.NDArray[np.floating]) +assert_type(npp.polynomial.polyval3d(AR_f8, AR_f8, AR_f8, AR_i8), npt.NDArray[np.floating]) +assert_type(npp.polynomial.polyval3d(AR_i8, AR_i8, AR_i8, AR_c16), npt.NDArray[np.complexfloating]) +assert_type(npp.polynomial.polyval3d(AR_O, AR_O, AR_O, AR_O), npt.NDArray[np.object_]) + +assert_type(npp.polynomial.polyvalfromroots(AR_b, AR_b), npt.NDArray[np.floating]) +assert_type(npp.polynomial.polyvalfromroots(AR_u4, AR_b), npt.NDArray[np.floating]) +assert_type(npp.polynomial.polyvalfromroots(AR_i8, AR_i8), npt.NDArray[np.floating]) +assert_type(npp.polynomial.polyvalfromroots(AR_f8, AR_i8), npt.NDArray[np.floating]) +assert_type(npp.polynomial.polyvalfromroots(AR_i8, AR_c16), npt.NDArray[np.complexfloating]) +assert_type(npp.polynomial.polyvalfromroots(AR_O, AR_O), npt.NDArray[np.object_]) + +assert_type(npp.polynomial.polyvander(AR_f8, 3), npt.NDArray[np.floating]) +assert_type(npp.polynomial.polyvander(AR_c16, 3), npt.NDArray[np.complexfloating]) +assert_type(npp.polynomial.polyvander(AR_O, 3), npt.NDArray[np.object_]) + +assert_type(npp.polynomial.polyvander2d(AR_f8, AR_f8, [4, 2]), npt.NDArray[np.floating]) +assert_type(npp.polynomial.polyvander2d(AR_c16, AR_c16, [4, 2]), npt.NDArray[np.complexfloating]) +assert_type(npp.polynomial.polyvander2d(AR_O, AR_O, [4, 2]), npt.NDArray[np.object_]) + +assert_type(npp.polynomial.polyvander3d(AR_f8, AR_f8, AR_f8, [4, 3, 2]), npt.NDArray[np.floating]) +assert_type(npp.polynomial.polyvander3d(AR_c16, AR_c16, AR_c16, [4, 3, 2]), npt.NDArray[np.complexfloating]) +assert_type(npp.polynomial.polyvander3d(AR_O, AR_O, AR_O, [4, 3, 2]), npt.NDArray[np.object_]) + +assert_type( + npp.polynomial.polyfit(AR_f8, AR_f8, 2), + npt.NDArray[np.floating], +) +assert_type( + npp.polynomial.polyfit(AR_f8, AR_i8, 1, full=True), + tuple[npt.NDArray[np.floating], Sequence[np.inexact | np.int32]], +) +assert_type( + npp.polynomial.polyfit(AR_c16, AR_f8, 2), + npt.NDArray[np.complexfloating], +) +assert_type( + npp.polynomial.polyfit(AR_f8, AR_c16, 1, full=True)[0], + npt.NDArray[np.complexfloating], +) + +assert_type(npp.chebyshev.chebgauss(2), tuple[_ArrFloat1D64, _ArrFloat1D64]) + +assert_type(npp.chebyshev.chebweight(AR_f8), npt.NDArray[np.float64]) +assert_type(npp.chebyshev.chebweight(AR_c16), npt.NDArray[np.complex128]) +assert_type(npp.chebyshev.chebweight(AR_O), npt.NDArray[np.object_]) + +assert_type(npp.chebyshev.poly2cheb(AR_f8), _ArrFloat1D) +assert_type(npp.chebyshev.poly2cheb(AR_c16), _ArrComplex1D) +assert_type(npp.chebyshev.poly2cheb(AR_O), _ArrObject1D) + +assert_type(npp.chebyshev.cheb2poly(AR_f8), _ArrFloat1D) +assert_type(npp.chebyshev.cheb2poly(AR_c16), _ArrComplex1D) +assert_type(npp.chebyshev.cheb2poly(AR_O), _ArrObject1D) + +assert_type(npp.chebyshev.chebpts1(6), _ArrFloat1D64) +assert_type(npp.chebyshev.chebpts2(6), _ArrFloat1D64) + +assert_type( + npp.chebyshev.chebinterpolate(np.tanh, 3), + npt.NDArray[np.float64 | np.complex128 | np.object_], +) diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/random.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/random.pyi new file mode 100644 index 0000000..e188eb0 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/random.pyi @@ -0,0 +1,1546 @@ +import threading +from collections.abc import Sequence +from typing import Any, assert_type + +import numpy as np +import numpy.typing as npt +from numpy.random._generator import Generator +from numpy.random._mt19937 import MT19937 +from numpy.random._pcg64 import PCG64 +from numpy.random._philox import Philox +from numpy.random._sfc64 import SFC64 +from numpy.random.bit_generator import SeedlessSeedSequence, SeedSequence + +def_rng = np.random.default_rng() +seed_seq = np.random.SeedSequence() +mt19937 = np.random.MT19937() +pcg64 = np.random.PCG64() +sfc64 = np.random.SFC64() +philox = np.random.Philox() +seedless_seq = SeedlessSeedSequence() + +assert_type(def_rng, Generator) +assert_type(mt19937, MT19937) +assert_type(pcg64, PCG64) +assert_type(sfc64, SFC64) +assert_type(philox, Philox) +assert_type(seed_seq, SeedSequence) +assert_type(seedless_seq, SeedlessSeedSequence) + +mt19937_jumped = mt19937.jumped() +mt19937_jumped3 = mt19937.jumped(3) +mt19937_raw = mt19937.random_raw() +mt19937_raw_arr = mt19937.random_raw(5) + +assert_type(mt19937_jumped, MT19937) +assert_type(mt19937_jumped3, MT19937) +assert_type(mt19937_raw, int) +assert_type(mt19937_raw_arr, npt.NDArray[np.uint64]) +assert_type(mt19937.lock, threading.Lock) + +pcg64_jumped = pcg64.jumped() +pcg64_jumped3 = pcg64.jumped(3) +pcg64_adv = pcg64.advance(3) +pcg64_raw = pcg64.random_raw() +pcg64_raw_arr = pcg64.random_raw(5) + +assert_type(pcg64_jumped, PCG64) +assert_type(pcg64_jumped3, PCG64) +assert_type(pcg64_adv, PCG64) +assert_type(pcg64_raw, int) +assert_type(pcg64_raw_arr, npt.NDArray[np.uint64]) +assert_type(pcg64.lock, threading.Lock) + +philox_jumped = philox.jumped() +philox_jumped3 = philox.jumped(3) +philox_adv = philox.advance(3) +philox_raw = philox.random_raw() +philox_raw_arr = philox.random_raw(5) + +assert_type(philox_jumped, Philox) +assert_type(philox_jumped3, Philox) +assert_type(philox_adv, Philox) +assert_type(philox_raw, int) +assert_type(philox_raw_arr, npt.NDArray[np.uint64]) +assert_type(philox.lock, threading.Lock) + +sfc64_raw = sfc64.random_raw() +sfc64_raw_arr = sfc64.random_raw(5) + +assert_type(sfc64_raw, int) +assert_type(sfc64_raw_arr, npt.NDArray[np.uint64]) +assert_type(sfc64.lock, threading.Lock) + +assert_type(seed_seq.pool, npt.NDArray[np.uint32]) +assert_type(seed_seq.entropy, int | Sequence[int] | None) +assert_type(seed_seq.spawn(1), list[np.random.SeedSequence]) +assert_type(seed_seq.generate_state(8, "uint32"), npt.NDArray[np.uint32 | np.uint64]) +assert_type(seed_seq.generate_state(8, "uint64"), npt.NDArray[np.uint32 | np.uint64]) + +def_gen: np.random.Generator = np.random.default_rng() + +D_arr_0p1: npt.NDArray[np.float64] = np.array([0.1]) +D_arr_0p5: npt.NDArray[np.float64] = np.array([0.5]) +D_arr_0p9: npt.NDArray[np.float64] = np.array([0.9]) +D_arr_1p5: npt.NDArray[np.float64] = np.array([1.5]) +I_arr_10: npt.NDArray[np.int_] = np.array([10], dtype=np.int_) +I_arr_20: npt.NDArray[np.int_] = np.array([20], dtype=np.int_) +D_arr_like_0p1: list[float] = [0.1] +D_arr_like_0p5: list[float] = [0.5] +D_arr_like_0p9: list[float] = [0.9] +D_arr_like_1p5: list[float] = [1.5] +I_arr_like_10: list[int] = [10] +I_arr_like_20: list[int] = [20] +D_2D_like: list[list[float]] = [[1, 2], [2, 3], [3, 4], [4, 5.1]] +D_2D: npt.NDArray[np.float64] = np.array(D_2D_like) +S_out: npt.NDArray[np.float32] = np.empty(1, dtype=np.float32) +D_out: npt.NDArray[np.float64] = np.empty(1) + +assert_type(def_gen.standard_normal(), float) +assert_type(def_gen.standard_normal(dtype=np.float32), float) +assert_type(def_gen.standard_normal(dtype="float32"), float) +assert_type(def_gen.standard_normal(dtype="double"), float) +assert_type(def_gen.standard_normal(dtype=np.float64), float) +assert_type(def_gen.standard_normal(size=None), float) +assert_type(def_gen.standard_normal(size=1), npt.NDArray[np.float64]) +assert_type(def_gen.standard_normal(size=1, dtype=np.float32), npt.NDArray[np.float32]) +assert_type(def_gen.standard_normal(size=1, dtype="f4"), npt.NDArray[np.float32]) +assert_type(def_gen.standard_normal(size=1, dtype="float32", out=S_out), npt.NDArray[np.float32]) +assert_type(def_gen.standard_normal(dtype=np.float32, out=S_out), npt.NDArray[np.float32]) +assert_type(def_gen.standard_normal(size=1, dtype=np.float64), npt.NDArray[np.float64]) +assert_type(def_gen.standard_normal(size=1, dtype="float64"), npt.NDArray[np.float64]) +assert_type(def_gen.standard_normal(size=1, dtype="f8"), npt.NDArray[np.float64]) +assert_type(def_gen.standard_normal(out=D_out), npt.NDArray[np.float64]) +assert_type(def_gen.standard_normal(size=1, dtype="float64"), npt.NDArray[np.float64]) +assert_type(def_gen.standard_normal(size=1, dtype="float64", out=D_out), npt.NDArray[np.float64]) + +assert_type(def_gen.random(), float) +assert_type(def_gen.random(dtype=np.float32), float) +assert_type(def_gen.random(dtype="float32"), float) +assert_type(def_gen.random(dtype="double"), float) +assert_type(def_gen.random(dtype=np.float64), float) +assert_type(def_gen.random(size=None), float) +assert_type(def_gen.random(size=1), npt.NDArray[np.float64]) +assert_type(def_gen.random(size=1, dtype=np.float32), npt.NDArray[np.float32]) +assert_type(def_gen.random(size=1, dtype="f4"), npt.NDArray[np.float32]) +assert_type(def_gen.random(size=1, dtype="float32", out=S_out), npt.NDArray[np.float32]) +assert_type(def_gen.random(dtype=np.float32, out=S_out), npt.NDArray[np.float32]) +assert_type(def_gen.random(size=1, dtype=np.float64), npt.NDArray[np.float64]) +assert_type(def_gen.random(size=1, dtype="float64"), npt.NDArray[np.float64]) +assert_type(def_gen.random(size=1, dtype="f8"), npt.NDArray[np.float64]) +assert_type(def_gen.random(out=D_out), npt.NDArray[np.float64]) +assert_type(def_gen.random(size=1, dtype="float64"), npt.NDArray[np.float64]) +assert_type(def_gen.random(size=1, dtype="float64", out=D_out), npt.NDArray[np.float64]) + +assert_type(def_gen.standard_cauchy(), float) +assert_type(def_gen.standard_cauchy(size=None), float) +assert_type(def_gen.standard_cauchy(size=1), npt.NDArray[np.float64]) + +assert_type(def_gen.standard_exponential(), float) +assert_type(def_gen.standard_exponential(method="inv"), float) +assert_type(def_gen.standard_exponential(dtype=np.float32), float) +assert_type(def_gen.standard_exponential(dtype="float32"), float) +assert_type(def_gen.standard_exponential(dtype="double"), float) +assert_type(def_gen.standard_exponential(dtype=np.float64), float) +assert_type(def_gen.standard_exponential(size=None), float) +assert_type(def_gen.standard_exponential(size=None, method="inv"), float) +assert_type(def_gen.standard_exponential(size=1, method="inv"), npt.NDArray[np.float64]) +assert_type(def_gen.standard_exponential(size=1, dtype=np.float32), npt.NDArray[np.float32]) +assert_type(def_gen.standard_exponential(size=1, dtype="f4", method="inv"), npt.NDArray[np.float32]) +assert_type(def_gen.standard_exponential(size=1, dtype="float32", out=S_out), npt.NDArray[np.float32]) +assert_type(def_gen.standard_exponential(dtype=np.float32, out=S_out), npt.NDArray[np.float32]) +assert_type(def_gen.standard_exponential(size=1, dtype=np.float64, method="inv"), npt.NDArray[np.float64]) +assert_type(def_gen.standard_exponential(size=1, dtype="float64"), npt.NDArray[np.float64]) +assert_type(def_gen.standard_exponential(size=1, dtype="f8"), npt.NDArray[np.float64]) +assert_type(def_gen.standard_exponential(out=D_out), npt.NDArray[np.float64]) +assert_type(def_gen.standard_exponential(size=1, dtype="float64"), npt.NDArray[np.float64]) +assert_type(def_gen.standard_exponential(size=1, dtype="float64", out=D_out), npt.NDArray[np.float64]) + +assert_type(def_gen.zipf(1.5), int) +assert_type(def_gen.zipf(1.5, size=None), int) +assert_type(def_gen.zipf(1.5, size=1), npt.NDArray[np.int64]) +assert_type(def_gen.zipf(D_arr_1p5), npt.NDArray[np.int64]) +assert_type(def_gen.zipf(D_arr_1p5, size=1), npt.NDArray[np.int64]) +assert_type(def_gen.zipf(D_arr_like_1p5), npt.NDArray[np.int64]) +assert_type(def_gen.zipf(D_arr_like_1p5, size=1), npt.NDArray[np.int64]) + +assert_type(def_gen.weibull(0.5), float) +assert_type(def_gen.weibull(0.5, size=None), float) +assert_type(def_gen.weibull(0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.weibull(D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.weibull(D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.weibull(D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.weibull(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(def_gen.standard_t(0.5), float) +assert_type(def_gen.standard_t(0.5, size=None), float) +assert_type(def_gen.standard_t(0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.standard_t(D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.standard_t(D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.standard_t(D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.standard_t(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(def_gen.poisson(0.5), int) +assert_type(def_gen.poisson(0.5, size=None), int) +assert_type(def_gen.poisson(0.5, size=1), npt.NDArray[np.int64]) +assert_type(def_gen.poisson(D_arr_0p5), npt.NDArray[np.int64]) +assert_type(def_gen.poisson(D_arr_0p5, size=1), npt.NDArray[np.int64]) +assert_type(def_gen.poisson(D_arr_like_0p5), npt.NDArray[np.int64]) +assert_type(def_gen.poisson(D_arr_like_0p5, size=1), npt.NDArray[np.int64]) + +assert_type(def_gen.power(0.5), float) +assert_type(def_gen.power(0.5, size=None), float) +assert_type(def_gen.power(0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.power(D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.power(D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.power(D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.power(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(def_gen.pareto(0.5), float) +assert_type(def_gen.pareto(0.5, size=None), float) +assert_type(def_gen.pareto(0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.pareto(D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.pareto(D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.pareto(D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.pareto(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(def_gen.chisquare(0.5), float) +assert_type(def_gen.chisquare(0.5, size=None), float) +assert_type(def_gen.chisquare(0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.chisquare(D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.chisquare(D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.chisquare(D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.chisquare(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(def_gen.exponential(0.5), float) +assert_type(def_gen.exponential(0.5, size=None), float) +assert_type(def_gen.exponential(0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.exponential(D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.exponential(D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.exponential(D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.exponential(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(def_gen.geometric(0.5), int) +assert_type(def_gen.geometric(0.5, size=None), int) +assert_type(def_gen.geometric(0.5, size=1), npt.NDArray[np.int64]) +assert_type(def_gen.geometric(D_arr_0p5), npt.NDArray[np.int64]) +assert_type(def_gen.geometric(D_arr_0p5, size=1), npt.NDArray[np.int64]) +assert_type(def_gen.geometric(D_arr_like_0p5), npt.NDArray[np.int64]) +assert_type(def_gen.geometric(D_arr_like_0p5, size=1), npt.NDArray[np.int64]) + +assert_type(def_gen.logseries(0.5), int) +assert_type(def_gen.logseries(0.5, size=None), int) +assert_type(def_gen.logseries(0.5, size=1), npt.NDArray[np.int64]) +assert_type(def_gen.logseries(D_arr_0p5), npt.NDArray[np.int64]) +assert_type(def_gen.logseries(D_arr_0p5, size=1), npt.NDArray[np.int64]) +assert_type(def_gen.logseries(D_arr_like_0p5), npt.NDArray[np.int64]) +assert_type(def_gen.logseries(D_arr_like_0p5, size=1), npt.NDArray[np.int64]) + +assert_type(def_gen.rayleigh(0.5), float) +assert_type(def_gen.rayleigh(0.5, size=None), float) +assert_type(def_gen.rayleigh(0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.rayleigh(D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.rayleigh(D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.rayleigh(D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.rayleigh(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(def_gen.standard_gamma(0.5), float) +assert_type(def_gen.standard_gamma(0.5, size=None), float) +assert_type(def_gen.standard_gamma(0.5, dtype="float32"), float) +assert_type(def_gen.standard_gamma(0.5, size=None, dtype="float32"), float) +assert_type(def_gen.standard_gamma(0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.standard_gamma(D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.standard_gamma(D_arr_0p5, dtype="f4"), npt.NDArray[np.float32]) +assert_type(def_gen.standard_gamma(0.5, size=1, dtype="float32", out=S_out), npt.NDArray[np.float32]) +assert_type(def_gen.standard_gamma(D_arr_0p5, dtype=np.float32, out=S_out), npt.NDArray[np.float32]) +assert_type(def_gen.standard_gamma(D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.standard_gamma(D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.standard_gamma(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.standard_gamma(0.5, out=D_out), npt.NDArray[np.float64]) +assert_type(def_gen.standard_gamma(D_arr_like_0p5, out=D_out), npt.NDArray[np.float64]) +assert_type(def_gen.standard_gamma(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.standard_gamma(D_arr_like_0p5, size=1, out=D_out, dtype=np.float64), npt.NDArray[np.float64]) + +assert_type(def_gen.vonmises(0.5, 0.5), float) +assert_type(def_gen.vonmises(0.5, 0.5, size=None), float) +assert_type(def_gen.vonmises(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.vonmises(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.vonmises(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.vonmises(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.vonmises(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.vonmises(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.vonmises(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.vonmises(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.vonmises(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.vonmises(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.vonmises(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(def_gen.wald(0.5, 0.5), float) +assert_type(def_gen.wald(0.5, 0.5, size=None), float) +assert_type(def_gen.wald(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.wald(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.wald(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.wald(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.wald(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.wald(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.wald(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.wald(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.wald(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.wald(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.wald(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(def_gen.uniform(0.5, 0.5), float) +assert_type(def_gen.uniform(0.5, 0.5, size=None), float) +assert_type(def_gen.uniform(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.uniform(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.uniform(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.uniform(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.uniform(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.uniform(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.uniform(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.uniform(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.uniform(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.uniform(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.uniform(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(def_gen.beta(0.5, 0.5), float) +assert_type(def_gen.beta(0.5, 0.5, size=None), float) +assert_type(def_gen.beta(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.beta(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.beta(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.beta(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.beta(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.beta(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.beta(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.beta(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.beta(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.beta(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.beta(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(def_gen.f(0.5, 0.5), float) +assert_type(def_gen.f(0.5, 0.5, size=None), float) +assert_type(def_gen.f(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.f(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.f(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.f(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.f(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.f(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.f(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.f(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.f(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.f(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.f(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(def_gen.gamma(0.5, 0.5), float) +assert_type(def_gen.gamma(0.5, 0.5, size=None), float) +assert_type(def_gen.gamma(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.gamma(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.gamma(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.gamma(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.gamma(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.gamma(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.gamma(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.gamma(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.gamma(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.gamma(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.gamma(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(def_gen.gumbel(0.5, 0.5), float) +assert_type(def_gen.gumbel(0.5, 0.5, size=None), float) +assert_type(def_gen.gumbel(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.gumbel(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.gumbel(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.gumbel(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.gumbel(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.gumbel(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.gumbel(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.gumbel(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.gumbel(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.gumbel(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.gumbel(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(def_gen.laplace(0.5, 0.5), float) +assert_type(def_gen.laplace(0.5, 0.5, size=None), float) +assert_type(def_gen.laplace(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.laplace(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.laplace(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.laplace(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.laplace(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.laplace(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.laplace(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.laplace(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.laplace(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.laplace(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.laplace(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(def_gen.logistic(0.5, 0.5), float) +assert_type(def_gen.logistic(0.5, 0.5, size=None), float) +assert_type(def_gen.logistic(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.logistic(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.logistic(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.logistic(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.logistic(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.logistic(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.logistic(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.logistic(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.logistic(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.logistic(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.logistic(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(def_gen.lognormal(0.5, 0.5), float) +assert_type(def_gen.lognormal(0.5, 0.5, size=None), float) +assert_type(def_gen.lognormal(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.lognormal(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.lognormal(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.lognormal(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.lognormal(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.lognormal(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.lognormal(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.lognormal(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.lognormal(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.lognormal(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.lognormal(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(def_gen.noncentral_chisquare(0.5, 0.5), float) +assert_type(def_gen.noncentral_chisquare(0.5, 0.5, size=None), float) +assert_type(def_gen.noncentral_chisquare(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.noncentral_chisquare(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.noncentral_chisquare(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.noncentral_chisquare(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.noncentral_chisquare(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.noncentral_chisquare(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.noncentral_chisquare(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.noncentral_chisquare(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.noncentral_chisquare(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.noncentral_chisquare(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.noncentral_chisquare(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(def_gen.normal(0.5, 0.5), float) +assert_type(def_gen.normal(0.5, 0.5, size=None), float) +assert_type(def_gen.normal(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.normal(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.normal(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.normal(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.normal(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.normal(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.normal(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.normal(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.normal(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.normal(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.normal(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(def_gen.triangular(0.1, 0.5, 0.9), float) +assert_type(def_gen.triangular(0.1, 0.5, 0.9, size=None), float) +assert_type(def_gen.triangular(0.1, 0.5, 0.9, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.triangular(D_arr_0p1, 0.5, 0.9), npt.NDArray[np.float64]) +assert_type(def_gen.triangular(0.1, D_arr_0p5, 0.9), npt.NDArray[np.float64]) +assert_type(def_gen.triangular(D_arr_0p1, 0.5, D_arr_like_0p9, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.triangular(0.1, D_arr_0p5, 0.9, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.triangular(D_arr_like_0p1, 0.5, D_arr_0p9), npt.NDArray[np.float64]) +assert_type(def_gen.triangular(0.5, D_arr_like_0p5, 0.9), npt.NDArray[np.float64]) +assert_type(def_gen.triangular(D_arr_0p1, D_arr_0p5, 0.9), npt.NDArray[np.float64]) +assert_type(def_gen.triangular(D_arr_like_0p1, D_arr_like_0p5, 0.9), npt.NDArray[np.float64]) +assert_type(def_gen.triangular(D_arr_0p1, D_arr_0p5, D_arr_0p9, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.triangular(D_arr_like_0p1, D_arr_like_0p5, D_arr_like_0p9, size=1), npt.NDArray[np.float64]) + +assert_type(def_gen.noncentral_f(0.1, 0.5, 0.9), float) +assert_type(def_gen.noncentral_f(0.1, 0.5, 0.9, size=None), float) +assert_type(def_gen.noncentral_f(0.1, 0.5, 0.9, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.noncentral_f(D_arr_0p1, 0.5, 0.9), npt.NDArray[np.float64]) +assert_type(def_gen.noncentral_f(0.1, D_arr_0p5, 0.9), npt.NDArray[np.float64]) +assert_type(def_gen.noncentral_f(D_arr_0p1, 0.5, D_arr_like_0p9, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.noncentral_f(0.1, D_arr_0p5, 0.9, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.noncentral_f(D_arr_like_0p1, 0.5, D_arr_0p9), npt.NDArray[np.float64]) +assert_type(def_gen.noncentral_f(0.5, D_arr_like_0p5, 0.9), npt.NDArray[np.float64]) +assert_type(def_gen.noncentral_f(D_arr_0p1, D_arr_0p5, 0.9), npt.NDArray[np.float64]) +assert_type(def_gen.noncentral_f(D_arr_like_0p1, D_arr_like_0p5, 0.9), npt.NDArray[np.float64]) +assert_type(def_gen.noncentral_f(D_arr_0p1, D_arr_0p5, D_arr_0p9, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.noncentral_f(D_arr_like_0p1, D_arr_like_0p5, D_arr_like_0p9, size=1), npt.NDArray[np.float64]) + +assert_type(def_gen.binomial(10, 0.5), int) +assert_type(def_gen.binomial(10, 0.5, size=None), int) +assert_type(def_gen.binomial(10, 0.5, size=1), npt.NDArray[np.int64]) +assert_type(def_gen.binomial(I_arr_10, 0.5), npt.NDArray[np.int64]) +assert_type(def_gen.binomial(10, D_arr_0p5), npt.NDArray[np.int64]) +assert_type(def_gen.binomial(I_arr_10, 0.5, size=1), npt.NDArray[np.int64]) +assert_type(def_gen.binomial(10, D_arr_0p5, size=1), npt.NDArray[np.int64]) +assert_type(def_gen.binomial(I_arr_like_10, 0.5), npt.NDArray[np.int64]) +assert_type(def_gen.binomial(10, D_arr_like_0p5), npt.NDArray[np.int64]) +assert_type(def_gen.binomial(I_arr_10, D_arr_0p5), npt.NDArray[np.int64]) +assert_type(def_gen.binomial(I_arr_like_10, D_arr_like_0p5), npt.NDArray[np.int64]) +assert_type(def_gen.binomial(I_arr_10, D_arr_0p5, size=1), npt.NDArray[np.int64]) +assert_type(def_gen.binomial(I_arr_like_10, D_arr_like_0p5, size=1), npt.NDArray[np.int64]) + +assert_type(def_gen.negative_binomial(10, 0.5), int) +assert_type(def_gen.negative_binomial(10, 0.5, size=None), int) +assert_type(def_gen.negative_binomial(10, 0.5, size=1), npt.NDArray[np.int64]) +assert_type(def_gen.negative_binomial(I_arr_10, 0.5), npt.NDArray[np.int64]) +assert_type(def_gen.negative_binomial(10, D_arr_0p5), npt.NDArray[np.int64]) +assert_type(def_gen.negative_binomial(I_arr_10, 0.5, size=1), npt.NDArray[np.int64]) +assert_type(def_gen.negative_binomial(10, D_arr_0p5, size=1), npt.NDArray[np.int64]) +assert_type(def_gen.negative_binomial(I_arr_like_10, 0.5), npt.NDArray[np.int64]) +assert_type(def_gen.negative_binomial(10, D_arr_like_0p5), npt.NDArray[np.int64]) +assert_type(def_gen.negative_binomial(I_arr_10, D_arr_0p5), npt.NDArray[np.int64]) +assert_type(def_gen.negative_binomial(I_arr_like_10, D_arr_like_0p5), npt.NDArray[np.int64]) +assert_type(def_gen.negative_binomial(I_arr_10, D_arr_0p5, size=1), npt.NDArray[np.int64]) +assert_type(def_gen.negative_binomial(I_arr_like_10, D_arr_like_0p5, size=1), npt.NDArray[np.int64]) + +assert_type(def_gen.hypergeometric(20, 20, 10), int) +assert_type(def_gen.hypergeometric(20, 20, 10, size=None), int) +assert_type(def_gen.hypergeometric(20, 20, 10, size=1), npt.NDArray[np.int64]) +assert_type(def_gen.hypergeometric(I_arr_20, 20, 10), npt.NDArray[np.int64]) +assert_type(def_gen.hypergeometric(20, I_arr_20, 10), npt.NDArray[np.int64]) +assert_type(def_gen.hypergeometric(I_arr_20, 20, I_arr_like_10, size=1), npt.NDArray[np.int64]) +assert_type(def_gen.hypergeometric(20, I_arr_20, 10, size=1), npt.NDArray[np.int64]) +assert_type(def_gen.hypergeometric(I_arr_like_20, 20, I_arr_10), npt.NDArray[np.int64]) +assert_type(def_gen.hypergeometric(20, I_arr_like_20, 10), npt.NDArray[np.int64]) +assert_type(def_gen.hypergeometric(I_arr_20, I_arr_20, 10), npt.NDArray[np.int64]) +assert_type(def_gen.hypergeometric(I_arr_like_20, I_arr_like_20, 10), npt.NDArray[np.int64]) +assert_type(def_gen.hypergeometric(I_arr_20, I_arr_20, I_arr_10, size=1), npt.NDArray[np.int64]) +assert_type(def_gen.hypergeometric(I_arr_like_20, I_arr_like_20, I_arr_like_10, size=1), npt.NDArray[np.int64]) + +I_int64_100: npt.NDArray[np.int64] = np.array([100], dtype=np.int64) + +assert_type(def_gen.integers(0, 100), np.int64) +assert_type(def_gen.integers(100), np.int64) +assert_type(def_gen.integers([100]), npt.NDArray[np.int64]) +assert_type(def_gen.integers(0, [100]), npt.NDArray[np.int64]) + +I_bool_low: npt.NDArray[np.bool] = np.array([0], dtype=np.bool) +I_bool_low_like: list[int] = [0] +I_bool_high_open: npt.NDArray[np.bool] = np.array([1], dtype=np.bool) +I_bool_high_closed: npt.NDArray[np.bool] = np.array([1], dtype=np.bool) + +assert_type(def_gen.integers(2, dtype=bool), bool) +assert_type(def_gen.integers(0, 2, dtype=bool), bool) +assert_type(def_gen.integers(1, dtype=bool, endpoint=True), bool) +assert_type(def_gen.integers(0, 1, dtype=bool, endpoint=True), bool) +assert_type(def_gen.integers(I_bool_low_like, 1, dtype=bool, endpoint=True), npt.NDArray[np.bool]) +assert_type(def_gen.integers(I_bool_high_open, dtype=bool), npt.NDArray[np.bool]) +assert_type(def_gen.integers(I_bool_low, I_bool_high_open, dtype=bool), npt.NDArray[np.bool]) +assert_type(def_gen.integers(0, I_bool_high_open, dtype=bool), npt.NDArray[np.bool]) +assert_type(def_gen.integers(I_bool_high_closed, dtype=bool, endpoint=True), npt.NDArray[np.bool]) +assert_type(def_gen.integers(I_bool_low, I_bool_high_closed, dtype=bool, endpoint=True), npt.NDArray[np.bool]) +assert_type(def_gen.integers(0, I_bool_high_closed, dtype=bool, endpoint=True), npt.NDArray[np.bool]) + +assert_type(def_gen.integers(2, dtype=np.bool), np.bool) +assert_type(def_gen.integers(0, 2, dtype=np.bool), np.bool) +assert_type(def_gen.integers(1, dtype=np.bool, endpoint=True), np.bool) +assert_type(def_gen.integers(0, 1, dtype=np.bool, endpoint=True), np.bool) +assert_type(def_gen.integers(I_bool_low_like, 1, dtype=np.bool, endpoint=True), npt.NDArray[np.bool]) +assert_type(def_gen.integers(I_bool_high_open, dtype=np.bool), npt.NDArray[np.bool]) +assert_type(def_gen.integers(I_bool_low, I_bool_high_open, dtype=np.bool), npt.NDArray[np.bool]) +assert_type(def_gen.integers(0, I_bool_high_open, dtype=np.bool), npt.NDArray[np.bool]) +assert_type(def_gen.integers(I_bool_high_closed, dtype=np.bool, endpoint=True), npt.NDArray[np.bool]) +assert_type(def_gen.integers(I_bool_low, I_bool_high_closed, dtype=np.bool, endpoint=True), npt.NDArray[np.bool]) +assert_type(def_gen.integers(0, I_bool_high_closed, dtype=np.bool, endpoint=True), npt.NDArray[np.bool]) + +I_u1_low: npt.NDArray[np.uint8] = np.array([0], dtype=np.uint8) +I_u1_low_like: list[int] = [0] +I_u1_high_open: npt.NDArray[np.uint8] = np.array([255], dtype=np.uint8) +I_u1_high_closed: npt.NDArray[np.uint8] = np.array([255], dtype=np.uint8) + +assert_type(def_gen.integers(256, dtype="u1"), np.uint8) +assert_type(def_gen.integers(0, 256, dtype="u1"), np.uint8) +assert_type(def_gen.integers(255, dtype="u1", endpoint=True), np.uint8) +assert_type(def_gen.integers(0, 255, dtype="u1", endpoint=True), np.uint8) +assert_type(def_gen.integers(I_u1_low_like, 255, dtype="u1", endpoint=True), npt.NDArray[np.uint8]) +assert_type(def_gen.integers(I_u1_high_open, dtype="u1"), npt.NDArray[np.uint8]) +assert_type(def_gen.integers(I_u1_low, I_u1_high_open, dtype="u1"), npt.NDArray[np.uint8]) +assert_type(def_gen.integers(0, I_u1_high_open, dtype="u1"), npt.NDArray[np.uint8]) +assert_type(def_gen.integers(I_u1_high_closed, dtype="u1", endpoint=True), npt.NDArray[np.uint8]) +assert_type(def_gen.integers(I_u1_low, I_u1_high_closed, dtype="u1", endpoint=True), npt.NDArray[np.uint8]) +assert_type(def_gen.integers(0, I_u1_high_closed, dtype="u1", endpoint=True), npt.NDArray[np.uint8]) + +assert_type(def_gen.integers(256, dtype="uint8"), np.uint8) +assert_type(def_gen.integers(0, 256, dtype="uint8"), np.uint8) +assert_type(def_gen.integers(255, dtype="uint8", endpoint=True), np.uint8) +assert_type(def_gen.integers(0, 255, dtype="uint8", endpoint=True), np.uint8) +assert_type(def_gen.integers(I_u1_low_like, 255, dtype="uint8", endpoint=True), npt.NDArray[np.uint8]) +assert_type(def_gen.integers(I_u1_high_open, dtype="uint8"), npt.NDArray[np.uint8]) +assert_type(def_gen.integers(I_u1_low, I_u1_high_open, dtype="uint8"), npt.NDArray[np.uint8]) +assert_type(def_gen.integers(0, I_u1_high_open, dtype="uint8"), npt.NDArray[np.uint8]) +assert_type(def_gen.integers(I_u1_high_closed, dtype="uint8", endpoint=True), npt.NDArray[np.uint8]) +assert_type(def_gen.integers(I_u1_low, I_u1_high_closed, dtype="uint8", endpoint=True), npt.NDArray[np.uint8]) +assert_type(def_gen.integers(0, I_u1_high_closed, dtype="uint8", endpoint=True), npt.NDArray[np.uint8]) + +assert_type(def_gen.integers(256, dtype=np.uint8), np.uint8) +assert_type(def_gen.integers(0, 256, dtype=np.uint8), np.uint8) +assert_type(def_gen.integers(255, dtype=np.uint8, endpoint=True), np.uint8) +assert_type(def_gen.integers(0, 255, dtype=np.uint8, endpoint=True), np.uint8) +assert_type(def_gen.integers(I_u1_low_like, 255, dtype=np.uint8, endpoint=True), npt.NDArray[np.uint8]) +assert_type(def_gen.integers(I_u1_high_open, dtype=np.uint8), npt.NDArray[np.uint8]) +assert_type(def_gen.integers(I_u1_low, I_u1_high_open, dtype=np.uint8), npt.NDArray[np.uint8]) +assert_type(def_gen.integers(0, I_u1_high_open, dtype=np.uint8), npt.NDArray[np.uint8]) +assert_type(def_gen.integers(I_u1_high_closed, dtype=np.uint8, endpoint=True), npt.NDArray[np.uint8]) +assert_type(def_gen.integers(I_u1_low, I_u1_high_closed, dtype=np.uint8, endpoint=True), npt.NDArray[np.uint8]) +assert_type(def_gen.integers(0, I_u1_high_closed, dtype=np.uint8, endpoint=True), npt.NDArray[np.uint8]) + +I_u2_low: npt.NDArray[np.uint16] = np.array([0], dtype=np.uint16) +I_u2_low_like: list[int] = [0] +I_u2_high_open: npt.NDArray[np.uint16] = np.array([65535], dtype=np.uint16) +I_u2_high_closed: npt.NDArray[np.uint16] = np.array([65535], dtype=np.uint16) + +assert_type(def_gen.integers(65536, dtype="u2"), np.uint16) +assert_type(def_gen.integers(0, 65536, dtype="u2"), np.uint16) +assert_type(def_gen.integers(65535, dtype="u2", endpoint=True), np.uint16) +assert_type(def_gen.integers(0, 65535, dtype="u2", endpoint=True), np.uint16) +assert_type(def_gen.integers(I_u2_low_like, 65535, dtype="u2", endpoint=True), npt.NDArray[np.uint16]) +assert_type(def_gen.integers(I_u2_high_open, dtype="u2"), npt.NDArray[np.uint16]) +assert_type(def_gen.integers(I_u2_low, I_u2_high_open, dtype="u2"), npt.NDArray[np.uint16]) +assert_type(def_gen.integers(0, I_u2_high_open, dtype="u2"), npt.NDArray[np.uint16]) +assert_type(def_gen.integers(I_u2_high_closed, dtype="u2", endpoint=True), npt.NDArray[np.uint16]) +assert_type(def_gen.integers(I_u2_low, I_u2_high_closed, dtype="u2", endpoint=True), npt.NDArray[np.uint16]) +assert_type(def_gen.integers(0, I_u2_high_closed, dtype="u2", endpoint=True), npt.NDArray[np.uint16]) + +assert_type(def_gen.integers(65536, dtype="uint16"), np.uint16) +assert_type(def_gen.integers(0, 65536, dtype="uint16"), np.uint16) +assert_type(def_gen.integers(65535, dtype="uint16", endpoint=True), np.uint16) +assert_type(def_gen.integers(0, 65535, dtype="uint16", endpoint=True), np.uint16) +assert_type(def_gen.integers(I_u2_low_like, 65535, dtype="uint16", endpoint=True), npt.NDArray[np.uint16]) +assert_type(def_gen.integers(I_u2_high_open, dtype="uint16"), npt.NDArray[np.uint16]) +assert_type(def_gen.integers(I_u2_low, I_u2_high_open, dtype="uint16"), npt.NDArray[np.uint16]) +assert_type(def_gen.integers(0, I_u2_high_open, dtype="uint16"), npt.NDArray[np.uint16]) +assert_type(def_gen.integers(I_u2_high_closed, dtype="uint16", endpoint=True), npt.NDArray[np.uint16]) +assert_type(def_gen.integers(I_u2_low, I_u2_high_closed, dtype="uint16", endpoint=True), npt.NDArray[np.uint16]) +assert_type(def_gen.integers(0, I_u2_high_closed, dtype="uint16", endpoint=True), npt.NDArray[np.uint16]) + +assert_type(def_gen.integers(65536, dtype=np.uint16), np.uint16) +assert_type(def_gen.integers(0, 65536, dtype=np.uint16), np.uint16) +assert_type(def_gen.integers(65535, dtype=np.uint16, endpoint=True), np.uint16) +assert_type(def_gen.integers(0, 65535, dtype=np.uint16, endpoint=True), np.uint16) +assert_type(def_gen.integers(I_u2_low_like, 65535, dtype=np.uint16, endpoint=True), npt.NDArray[np.uint16]) +assert_type(def_gen.integers(I_u2_high_open, dtype=np.uint16), npt.NDArray[np.uint16]) +assert_type(def_gen.integers(I_u2_low, I_u2_high_open, dtype=np.uint16), npt.NDArray[np.uint16]) +assert_type(def_gen.integers(0, I_u2_high_open, dtype=np.uint16), npt.NDArray[np.uint16]) +assert_type(def_gen.integers(I_u2_high_closed, dtype=np.uint16, endpoint=True), npt.NDArray[np.uint16]) +assert_type(def_gen.integers(I_u2_low, I_u2_high_closed, dtype=np.uint16, endpoint=True), npt.NDArray[np.uint16]) +assert_type(def_gen.integers(0, I_u2_high_closed, dtype=np.uint16, endpoint=True), npt.NDArray[np.uint16]) + +I_u4_low: npt.NDArray[np.uint32] = np.array([0], dtype=np.uint32) +I_u4_low_like: list[int] = [0] +I_u4_high_open: npt.NDArray[np.uint32] = np.array([4294967295], dtype=np.uint32) +I_u4_high_closed: npt.NDArray[np.uint32] = np.array([4294967295], dtype=np.uint32) + +assert_type(def_gen.integers(4294967296, dtype=np.int_), np.int_) +assert_type(def_gen.integers(0, 4294967296, dtype=np.int_), np.int_) +assert_type(def_gen.integers(4294967295, dtype=np.int_, endpoint=True), np.int_) +assert_type(def_gen.integers(0, 4294967295, dtype=np.int_, endpoint=True), np.int_) +assert_type(def_gen.integers(I_u4_low_like, 4294967295, dtype=np.int_, endpoint=True), npt.NDArray[np.int_]) +assert_type(def_gen.integers(I_u4_high_open, dtype=np.int_), npt.NDArray[np.int_]) +assert_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype=np.int_), npt.NDArray[np.int_]) +assert_type(def_gen.integers(0, I_u4_high_open, dtype=np.int_), npt.NDArray[np.int_]) +assert_type(def_gen.integers(I_u4_high_closed, dtype=np.int_, endpoint=True), npt.NDArray[np.int_]) +assert_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype=np.int_, endpoint=True), npt.NDArray[np.int_]) +assert_type(def_gen.integers(0, I_u4_high_closed, dtype=np.int_, endpoint=True), npt.NDArray[np.int_]) + +assert_type(def_gen.integers(4294967296, dtype="u4"), np.uint32) +assert_type(def_gen.integers(0, 4294967296, dtype="u4"), np.uint32) +assert_type(def_gen.integers(4294967295, dtype="u4", endpoint=True), np.uint32) +assert_type(def_gen.integers(0, 4294967295, dtype="u4", endpoint=True), np.uint32) +assert_type(def_gen.integers(I_u4_low_like, 4294967295, dtype="u4", endpoint=True), npt.NDArray[np.uint32]) +assert_type(def_gen.integers(I_u4_high_open, dtype="u4"), npt.NDArray[np.uint32]) +assert_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype="u4"), npt.NDArray[np.uint32]) +assert_type(def_gen.integers(0, I_u4_high_open, dtype="u4"), npt.NDArray[np.uint32]) +assert_type(def_gen.integers(I_u4_high_closed, dtype="u4", endpoint=True), npt.NDArray[np.uint32]) +assert_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype="u4", endpoint=True), npt.NDArray[np.uint32]) +assert_type(def_gen.integers(0, I_u4_high_closed, dtype="u4", endpoint=True), npt.NDArray[np.uint32]) + +assert_type(def_gen.integers(4294967296, dtype="uint32"), np.uint32) +assert_type(def_gen.integers(0, 4294967296, dtype="uint32"), np.uint32) +assert_type(def_gen.integers(4294967295, dtype="uint32", endpoint=True), np.uint32) +assert_type(def_gen.integers(0, 4294967295, dtype="uint32", endpoint=True), np.uint32) +assert_type(def_gen.integers(I_u4_low_like, 4294967295, dtype="uint32", endpoint=True), npt.NDArray[np.uint32]) +assert_type(def_gen.integers(I_u4_high_open, dtype="uint32"), npt.NDArray[np.uint32]) +assert_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype="uint32"), npt.NDArray[np.uint32]) +assert_type(def_gen.integers(0, I_u4_high_open, dtype="uint32"), npt.NDArray[np.uint32]) +assert_type(def_gen.integers(I_u4_high_closed, dtype="uint32", endpoint=True), npt.NDArray[np.uint32]) +assert_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype="uint32", endpoint=True), npt.NDArray[np.uint32]) +assert_type(def_gen.integers(0, I_u4_high_closed, dtype="uint32", endpoint=True), npt.NDArray[np.uint32]) + +assert_type(def_gen.integers(4294967296, dtype=np.uint32), np.uint32) +assert_type(def_gen.integers(0, 4294967296, dtype=np.uint32), np.uint32) +assert_type(def_gen.integers(4294967295, dtype=np.uint32, endpoint=True), np.uint32) +assert_type(def_gen.integers(0, 4294967295, dtype=np.uint32, endpoint=True), np.uint32) +assert_type(def_gen.integers(I_u4_low_like, 4294967295, dtype=np.uint32, endpoint=True), npt.NDArray[np.uint32]) +assert_type(def_gen.integers(I_u4_high_open, dtype=np.uint32), npt.NDArray[np.uint32]) +assert_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype=np.uint32), npt.NDArray[np.uint32]) +assert_type(def_gen.integers(0, I_u4_high_open, dtype=np.uint32), npt.NDArray[np.uint32]) +assert_type(def_gen.integers(I_u4_high_closed, dtype=np.uint32, endpoint=True), npt.NDArray[np.uint32]) +assert_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype=np.uint32, endpoint=True), npt.NDArray[np.uint32]) +assert_type(def_gen.integers(0, I_u4_high_closed, dtype=np.uint32, endpoint=True), npt.NDArray[np.uint32]) + +assert_type(def_gen.integers(4294967296, dtype=np.uint), np.uint) +assert_type(def_gen.integers(0, 4294967296, dtype=np.uint), np.uint) +assert_type(def_gen.integers(4294967295, dtype=np.uint, endpoint=True), np.uint) +assert_type(def_gen.integers(0, 4294967295, dtype=np.uint, endpoint=True), np.uint) +assert_type(def_gen.integers(I_u4_low_like, 4294967295, dtype=np.uint, endpoint=True), npt.NDArray[np.uint]) +assert_type(def_gen.integers(I_u4_high_open, dtype=np.uint), npt.NDArray[np.uint]) +assert_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype=np.uint), npt.NDArray[np.uint]) +assert_type(def_gen.integers(0, I_u4_high_open, dtype=np.uint), npt.NDArray[np.uint]) +assert_type(def_gen.integers(I_u4_high_closed, dtype=np.uint, endpoint=True), npt.NDArray[np.uint]) +assert_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype=np.uint, endpoint=True), npt.NDArray[np.uint]) +assert_type(def_gen.integers(0, I_u4_high_closed, dtype=np.uint, endpoint=True), npt.NDArray[np.uint]) + +I_u8_low: npt.NDArray[np.uint64] = np.array([0], dtype=np.uint64) +I_u8_low_like: list[int] = [0] +I_u8_high_open: npt.NDArray[np.uint64] = np.array([18446744073709551615], dtype=np.uint64) +I_u8_high_closed: npt.NDArray[np.uint64] = np.array([18446744073709551615], dtype=np.uint64) + +assert_type(def_gen.integers(18446744073709551616, dtype="u8"), np.uint64) +assert_type(def_gen.integers(0, 18446744073709551616, dtype="u8"), np.uint64) +assert_type(def_gen.integers(18446744073709551615, dtype="u8", endpoint=True), np.uint64) +assert_type(def_gen.integers(0, 18446744073709551615, dtype="u8", endpoint=True), np.uint64) +assert_type(def_gen.integers(I_u8_low_like, 18446744073709551615, dtype="u8", endpoint=True), npt.NDArray[np.uint64]) +assert_type(def_gen.integers(I_u8_high_open, dtype="u8"), npt.NDArray[np.uint64]) +assert_type(def_gen.integers(I_u8_low, I_u8_high_open, dtype="u8"), npt.NDArray[np.uint64]) +assert_type(def_gen.integers(0, I_u8_high_open, dtype="u8"), npt.NDArray[np.uint64]) +assert_type(def_gen.integers(I_u8_high_closed, dtype="u8", endpoint=True), npt.NDArray[np.uint64]) +assert_type(def_gen.integers(I_u8_low, I_u8_high_closed, dtype="u8", endpoint=True), npt.NDArray[np.uint64]) +assert_type(def_gen.integers(0, I_u8_high_closed, dtype="u8", endpoint=True), npt.NDArray[np.uint64]) + +assert_type(def_gen.integers(18446744073709551616, dtype="uint64"), np.uint64) +assert_type(def_gen.integers(0, 18446744073709551616, dtype="uint64"), np.uint64) +assert_type(def_gen.integers(18446744073709551615, dtype="uint64", endpoint=True), np.uint64) +assert_type(def_gen.integers(0, 18446744073709551615, dtype="uint64", endpoint=True), np.uint64) +assert_type(def_gen.integers(I_u8_low_like, 18446744073709551615, dtype="uint64", endpoint=True), npt.NDArray[np.uint64]) +assert_type(def_gen.integers(I_u8_high_open, dtype="uint64"), npt.NDArray[np.uint64]) +assert_type(def_gen.integers(I_u8_low, I_u8_high_open, dtype="uint64"), npt.NDArray[np.uint64]) +assert_type(def_gen.integers(0, I_u8_high_open, dtype="uint64"), npt.NDArray[np.uint64]) +assert_type(def_gen.integers(I_u8_high_closed, dtype="uint64", endpoint=True), npt.NDArray[np.uint64]) +assert_type(def_gen.integers(I_u8_low, I_u8_high_closed, dtype="uint64", endpoint=True), npt.NDArray[np.uint64]) +assert_type(def_gen.integers(0, I_u8_high_closed, dtype="uint64", endpoint=True), npt.NDArray[np.uint64]) + +assert_type(def_gen.integers(18446744073709551616, dtype=np.uint64), np.uint64) +assert_type(def_gen.integers(0, 18446744073709551616, dtype=np.uint64), np.uint64) +assert_type(def_gen.integers(18446744073709551615, dtype=np.uint64, endpoint=True), np.uint64) +assert_type(def_gen.integers(0, 18446744073709551615, dtype=np.uint64, endpoint=True), np.uint64) +assert_type(def_gen.integers(I_u8_low_like, 18446744073709551615, dtype=np.uint64, endpoint=True), npt.NDArray[np.uint64]) +assert_type(def_gen.integers(I_u8_high_open, dtype=np.uint64), npt.NDArray[np.uint64]) +assert_type(def_gen.integers(I_u8_low, I_u8_high_open, dtype=np.uint64), npt.NDArray[np.uint64]) +assert_type(def_gen.integers(0, I_u8_high_open, dtype=np.uint64), npt.NDArray[np.uint64]) +assert_type(def_gen.integers(I_u8_high_closed, dtype=np.uint64, endpoint=True), npt.NDArray[np.uint64]) +assert_type(def_gen.integers(I_u8_low, I_u8_high_closed, dtype=np.uint64, endpoint=True), npt.NDArray[np.uint64]) +assert_type(def_gen.integers(0, I_u8_high_closed, dtype=np.uint64, endpoint=True), npt.NDArray[np.uint64]) + +I_i1_low: npt.NDArray[np.int8] = np.array([-128], dtype=np.int8) +I_i1_low_like: list[int] = [-128] +I_i1_high_open: npt.NDArray[np.int8] = np.array([127], dtype=np.int8) +I_i1_high_closed: npt.NDArray[np.int8] = np.array([127], dtype=np.int8) + +assert_type(def_gen.integers(128, dtype="i1"), np.int8) +assert_type(def_gen.integers(-128, 128, dtype="i1"), np.int8) +assert_type(def_gen.integers(127, dtype="i1", endpoint=True), np.int8) +assert_type(def_gen.integers(-128, 127, dtype="i1", endpoint=True), np.int8) +assert_type(def_gen.integers(I_i1_low_like, 127, dtype="i1", endpoint=True), npt.NDArray[np.int8]) +assert_type(def_gen.integers(I_i1_high_open, dtype="i1"), npt.NDArray[np.int8]) +assert_type(def_gen.integers(I_i1_low, I_i1_high_open, dtype="i1"), npt.NDArray[np.int8]) +assert_type(def_gen.integers(-128, I_i1_high_open, dtype="i1"), npt.NDArray[np.int8]) +assert_type(def_gen.integers(I_i1_high_closed, dtype="i1", endpoint=True), npt.NDArray[np.int8]) +assert_type(def_gen.integers(I_i1_low, I_i1_high_closed, dtype="i1", endpoint=True), npt.NDArray[np.int8]) +assert_type(def_gen.integers(-128, I_i1_high_closed, dtype="i1", endpoint=True), npt.NDArray[np.int8]) + +assert_type(def_gen.integers(128, dtype="int8"), np.int8) +assert_type(def_gen.integers(-128, 128, dtype="int8"), np.int8) +assert_type(def_gen.integers(127, dtype="int8", endpoint=True), np.int8) +assert_type(def_gen.integers(-128, 127, dtype="int8", endpoint=True), np.int8) +assert_type(def_gen.integers(I_i1_low_like, 127, dtype="int8", endpoint=True), npt.NDArray[np.int8]) +assert_type(def_gen.integers(I_i1_high_open, dtype="int8"), npt.NDArray[np.int8]) +assert_type(def_gen.integers(I_i1_low, I_i1_high_open, dtype="int8"), npt.NDArray[np.int8]) +assert_type(def_gen.integers(-128, I_i1_high_open, dtype="int8"), npt.NDArray[np.int8]) +assert_type(def_gen.integers(I_i1_high_closed, dtype="int8", endpoint=True), npt.NDArray[np.int8]) +assert_type(def_gen.integers(I_i1_low, I_i1_high_closed, dtype="int8", endpoint=True), npt.NDArray[np.int8]) +assert_type(def_gen.integers(-128, I_i1_high_closed, dtype="int8", endpoint=True), npt.NDArray[np.int8]) + +assert_type(def_gen.integers(128, dtype=np.int8), np.int8) +assert_type(def_gen.integers(-128, 128, dtype=np.int8), np.int8) +assert_type(def_gen.integers(127, dtype=np.int8, endpoint=True), np.int8) +assert_type(def_gen.integers(-128, 127, dtype=np.int8, endpoint=True), np.int8) +assert_type(def_gen.integers(I_i1_low_like, 127, dtype=np.int8, endpoint=True), npt.NDArray[np.int8]) +assert_type(def_gen.integers(I_i1_high_open, dtype=np.int8), npt.NDArray[np.int8]) +assert_type(def_gen.integers(I_i1_low, I_i1_high_open, dtype=np.int8), npt.NDArray[np.int8]) +assert_type(def_gen.integers(-128, I_i1_high_open, dtype=np.int8), npt.NDArray[np.int8]) +assert_type(def_gen.integers(I_i1_high_closed, dtype=np.int8, endpoint=True), npt.NDArray[np.int8]) +assert_type(def_gen.integers(I_i1_low, I_i1_high_closed, dtype=np.int8, endpoint=True), npt.NDArray[np.int8]) +assert_type(def_gen.integers(-128, I_i1_high_closed, dtype=np.int8, endpoint=True), npt.NDArray[np.int8]) + +I_i2_low: npt.NDArray[np.int16] = np.array([-32768], dtype=np.int16) +I_i2_low_like: list[int] = [-32768] +I_i2_high_open: npt.NDArray[np.int16] = np.array([32767], dtype=np.int16) +I_i2_high_closed: npt.NDArray[np.int16] = np.array([32767], dtype=np.int16) + +assert_type(def_gen.integers(32768, dtype="i2"), np.int16) +assert_type(def_gen.integers(-32768, 32768, dtype="i2"), np.int16) +assert_type(def_gen.integers(32767, dtype="i2", endpoint=True), np.int16) +assert_type(def_gen.integers(-32768, 32767, dtype="i2", endpoint=True), np.int16) +assert_type(def_gen.integers(I_i2_low_like, 32767, dtype="i2", endpoint=True), npt.NDArray[np.int16]) +assert_type(def_gen.integers(I_i2_high_open, dtype="i2"), npt.NDArray[np.int16]) +assert_type(def_gen.integers(I_i2_low, I_i2_high_open, dtype="i2"), npt.NDArray[np.int16]) +assert_type(def_gen.integers(-32768, I_i2_high_open, dtype="i2"), npt.NDArray[np.int16]) +assert_type(def_gen.integers(I_i2_high_closed, dtype="i2", endpoint=True), npt.NDArray[np.int16]) +assert_type(def_gen.integers(I_i2_low, I_i2_high_closed, dtype="i2", endpoint=True), npt.NDArray[np.int16]) +assert_type(def_gen.integers(-32768, I_i2_high_closed, dtype="i2", endpoint=True), npt.NDArray[np.int16]) + +assert_type(def_gen.integers(32768, dtype="int16"), np.int16) +assert_type(def_gen.integers(-32768, 32768, dtype="int16"), np.int16) +assert_type(def_gen.integers(32767, dtype="int16", endpoint=True), np.int16) +assert_type(def_gen.integers(-32768, 32767, dtype="int16", endpoint=True), np.int16) +assert_type(def_gen.integers(I_i2_low_like, 32767, dtype="int16", endpoint=True), npt.NDArray[np.int16]) +assert_type(def_gen.integers(I_i2_high_open, dtype="int16"), npt.NDArray[np.int16]) +assert_type(def_gen.integers(I_i2_low, I_i2_high_open, dtype="int16"), npt.NDArray[np.int16]) +assert_type(def_gen.integers(-32768, I_i2_high_open, dtype="int16"), npt.NDArray[np.int16]) +assert_type(def_gen.integers(I_i2_high_closed, dtype="int16", endpoint=True), npt.NDArray[np.int16]) +assert_type(def_gen.integers(I_i2_low, I_i2_high_closed, dtype="int16", endpoint=True), npt.NDArray[np.int16]) +assert_type(def_gen.integers(-32768, I_i2_high_closed, dtype="int16", endpoint=True), npt.NDArray[np.int16]) + +assert_type(def_gen.integers(32768, dtype=np.int16), np.int16) +assert_type(def_gen.integers(-32768, 32768, dtype=np.int16), np.int16) +assert_type(def_gen.integers(32767, dtype=np.int16, endpoint=True), np.int16) +assert_type(def_gen.integers(-32768, 32767, dtype=np.int16, endpoint=True), np.int16) +assert_type(def_gen.integers(I_i2_low_like, 32767, dtype=np.int16, endpoint=True), npt.NDArray[np.int16]) +assert_type(def_gen.integers(I_i2_high_open, dtype=np.int16), npt.NDArray[np.int16]) +assert_type(def_gen.integers(I_i2_low, I_i2_high_open, dtype=np.int16), npt.NDArray[np.int16]) +assert_type(def_gen.integers(-32768, I_i2_high_open, dtype=np.int16), npt.NDArray[np.int16]) +assert_type(def_gen.integers(I_i2_high_closed, dtype=np.int16, endpoint=True), npt.NDArray[np.int16]) +assert_type(def_gen.integers(I_i2_low, I_i2_high_closed, dtype=np.int16, endpoint=True), npt.NDArray[np.int16]) +assert_type(def_gen.integers(-32768, I_i2_high_closed, dtype=np.int16, endpoint=True), npt.NDArray[np.int16]) + +I_i4_low: npt.NDArray[np.int32] = np.array([-2147483648], dtype=np.int32) +I_i4_low_like: list[int] = [-2147483648] +I_i4_high_open: npt.NDArray[np.int32] = np.array([2147483647], dtype=np.int32) +I_i4_high_closed: npt.NDArray[np.int32] = np.array([2147483647], dtype=np.int32) + +assert_type(def_gen.integers(2147483648, dtype="i4"), np.int32) +assert_type(def_gen.integers(-2147483648, 2147483648, dtype="i4"), np.int32) +assert_type(def_gen.integers(2147483647, dtype="i4", endpoint=True), np.int32) +assert_type(def_gen.integers(-2147483648, 2147483647, dtype="i4", endpoint=True), np.int32) +assert_type(def_gen.integers(I_i4_low_like, 2147483647, dtype="i4", endpoint=True), npt.NDArray[np.int32]) +assert_type(def_gen.integers(I_i4_high_open, dtype="i4"), npt.NDArray[np.int32]) +assert_type(def_gen.integers(I_i4_low, I_i4_high_open, dtype="i4"), npt.NDArray[np.int32]) +assert_type(def_gen.integers(-2147483648, I_i4_high_open, dtype="i4"), npt.NDArray[np.int32]) +assert_type(def_gen.integers(I_i4_high_closed, dtype="i4", endpoint=True), npt.NDArray[np.int32]) +assert_type(def_gen.integers(I_i4_low, I_i4_high_closed, dtype="i4", endpoint=True), npt.NDArray[np.int32]) +assert_type(def_gen.integers(-2147483648, I_i4_high_closed, dtype="i4", endpoint=True), npt.NDArray[np.int32]) + +assert_type(def_gen.integers(2147483648, dtype="int32"), np.int32) +assert_type(def_gen.integers(-2147483648, 2147483648, dtype="int32"), np.int32) +assert_type(def_gen.integers(2147483647, dtype="int32", endpoint=True), np.int32) +assert_type(def_gen.integers(-2147483648, 2147483647, dtype="int32", endpoint=True), np.int32) +assert_type(def_gen.integers(I_i4_low_like, 2147483647, dtype="int32", endpoint=True), npt.NDArray[np.int32]) +assert_type(def_gen.integers(I_i4_high_open, dtype="int32"), npt.NDArray[np.int32]) +assert_type(def_gen.integers(I_i4_low, I_i4_high_open, dtype="int32"), npt.NDArray[np.int32]) +assert_type(def_gen.integers(-2147483648, I_i4_high_open, dtype="int32"), npt.NDArray[np.int32]) +assert_type(def_gen.integers(I_i4_high_closed, dtype="int32", endpoint=True), npt.NDArray[np.int32]) +assert_type(def_gen.integers(I_i4_low, I_i4_high_closed, dtype="int32", endpoint=True), npt.NDArray[np.int32]) +assert_type(def_gen.integers(-2147483648, I_i4_high_closed, dtype="int32", endpoint=True), npt.NDArray[np.int32]) + +assert_type(def_gen.integers(2147483648, dtype=np.int32), np.int32) +assert_type(def_gen.integers(-2147483648, 2147483648, dtype=np.int32), np.int32) +assert_type(def_gen.integers(2147483647, dtype=np.int32, endpoint=True), np.int32) +assert_type(def_gen.integers(-2147483648, 2147483647, dtype=np.int32, endpoint=True), np.int32) +assert_type(def_gen.integers(I_i4_low_like, 2147483647, dtype=np.int32, endpoint=True), npt.NDArray[np.int32]) +assert_type(def_gen.integers(I_i4_high_open, dtype=np.int32), npt.NDArray[np.int32]) +assert_type(def_gen.integers(I_i4_low, I_i4_high_open, dtype=np.int32), npt.NDArray[np.int32]) +assert_type(def_gen.integers(-2147483648, I_i4_high_open, dtype=np.int32), npt.NDArray[np.int32]) +assert_type(def_gen.integers(I_i4_high_closed, dtype=np.int32, endpoint=True), npt.NDArray[np.int32]) +assert_type(def_gen.integers(I_i4_low, I_i4_high_closed, dtype=np.int32, endpoint=True), npt.NDArray[np.int32]) +assert_type(def_gen.integers(-2147483648, I_i4_high_closed, dtype=np.int32, endpoint=True), npt.NDArray[np.int32]) + +I_i8_low: npt.NDArray[np.int64] = np.array([-9223372036854775808], dtype=np.int64) +I_i8_low_like: list[int] = [-9223372036854775808] +I_i8_high_open: npt.NDArray[np.int64] = np.array([9223372036854775807], dtype=np.int64) +I_i8_high_closed: npt.NDArray[np.int64] = np.array([9223372036854775807], dtype=np.int64) + +assert_type(def_gen.integers(9223372036854775808, dtype="i8"), np.int64) +assert_type(def_gen.integers(-9223372036854775808, 9223372036854775808, dtype="i8"), np.int64) +assert_type(def_gen.integers(9223372036854775807, dtype="i8", endpoint=True), np.int64) +assert_type(def_gen.integers(-9223372036854775808, 9223372036854775807, dtype="i8", endpoint=True), np.int64) +assert_type(def_gen.integers(I_i8_low_like, 9223372036854775807, dtype="i8", endpoint=True), npt.NDArray[np.int64]) +assert_type(def_gen.integers(I_i8_high_open, dtype="i8"), npt.NDArray[np.int64]) +assert_type(def_gen.integers(I_i8_low, I_i8_high_open, dtype="i8"), npt.NDArray[np.int64]) +assert_type(def_gen.integers(-9223372036854775808, I_i8_high_open, dtype="i8"), npt.NDArray[np.int64]) +assert_type(def_gen.integers(I_i8_high_closed, dtype="i8", endpoint=True), npt.NDArray[np.int64]) +assert_type(def_gen.integers(I_i8_low, I_i8_high_closed, dtype="i8", endpoint=True), npt.NDArray[np.int64]) +assert_type(def_gen.integers(-9223372036854775808, I_i8_high_closed, dtype="i8", endpoint=True), npt.NDArray[np.int64]) + +assert_type(def_gen.integers(9223372036854775808, dtype="int64"), np.int64) +assert_type(def_gen.integers(-9223372036854775808, 9223372036854775808, dtype="int64"), np.int64) +assert_type(def_gen.integers(9223372036854775807, dtype="int64", endpoint=True), np.int64) +assert_type(def_gen.integers(-9223372036854775808, 9223372036854775807, dtype="int64", endpoint=True), np.int64) +assert_type(def_gen.integers(I_i8_low_like, 9223372036854775807, dtype="int64", endpoint=True), npt.NDArray[np.int64]) +assert_type(def_gen.integers(I_i8_high_open, dtype="int64"), npt.NDArray[np.int64]) +assert_type(def_gen.integers(I_i8_low, I_i8_high_open, dtype="int64"), npt.NDArray[np.int64]) +assert_type(def_gen.integers(-9223372036854775808, I_i8_high_open, dtype="int64"), npt.NDArray[np.int64]) +assert_type(def_gen.integers(I_i8_high_closed, dtype="int64", endpoint=True), npt.NDArray[np.int64]) +assert_type(def_gen.integers(I_i8_low, I_i8_high_closed, dtype="int64", endpoint=True), npt.NDArray[np.int64]) +assert_type(def_gen.integers(-9223372036854775808, I_i8_high_closed, dtype="int64", endpoint=True), npt.NDArray[np.int64]) + +assert_type(def_gen.integers(9223372036854775808, dtype=np.int64), np.int64) +assert_type(def_gen.integers(-9223372036854775808, 9223372036854775808, dtype=np.int64), np.int64) +assert_type(def_gen.integers(9223372036854775807, dtype=np.int64, endpoint=True), np.int64) +assert_type(def_gen.integers(-9223372036854775808, 9223372036854775807, dtype=np.int64, endpoint=True), np.int64) +assert_type(def_gen.integers(I_i8_low_like, 9223372036854775807, dtype=np.int64, endpoint=True), npt.NDArray[np.int64]) +assert_type(def_gen.integers(I_i8_high_open, dtype=np.int64), npt.NDArray[np.int64]) +assert_type(def_gen.integers(I_i8_low, I_i8_high_open, dtype=np.int64), npt.NDArray[np.int64]) +assert_type(def_gen.integers(-9223372036854775808, I_i8_high_open, dtype=np.int64), npt.NDArray[np.int64]) +assert_type(def_gen.integers(I_i8_high_closed, dtype=np.int64, endpoint=True), npt.NDArray[np.int64]) +assert_type(def_gen.integers(I_i8_low, I_i8_high_closed, dtype=np.int64, endpoint=True), npt.NDArray[np.int64]) +assert_type(def_gen.integers(-9223372036854775808, I_i8_high_closed, dtype=np.int64, endpoint=True), npt.NDArray[np.int64]) + +assert_type(def_gen.bit_generator, np.random.BitGenerator) + +assert_type(def_gen.bytes(2), bytes) + +assert_type(def_gen.choice(5), int) +assert_type(def_gen.choice(5, 3), npt.NDArray[np.int64]) +assert_type(def_gen.choice(5, 3, replace=True), npt.NDArray[np.int64]) +assert_type(def_gen.choice(5, 3, p=[1 / 5] * 5), npt.NDArray[np.int64]) +assert_type(def_gen.choice(5, 3, p=[1 / 5] * 5, replace=False), npt.NDArray[np.int64]) + +assert_type(def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"]), Any) +assert_type(def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"], 3), npt.NDArray[Any]) +assert_type(def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, p=[1 / 4] * 4), npt.NDArray[Any]) +assert_type(def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, replace=True), npt.NDArray[Any]) +assert_type(def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, replace=False, p=np.array([1 / 8, 1 / 8, 1 / 2, 1 / 4])), npt.NDArray[Any]) + +assert_type(def_gen.dirichlet([0.5, 0.5]), npt.NDArray[np.float64]) +assert_type(def_gen.dirichlet(np.array([0.5, 0.5])), npt.NDArray[np.float64]) +assert_type(def_gen.dirichlet(np.array([0.5, 0.5]), size=3), npt.NDArray[np.float64]) + +assert_type(def_gen.multinomial(20, [1 / 6.0] * 6), npt.NDArray[np.int64]) +assert_type(def_gen.multinomial(20, np.array([0.5, 0.5])), npt.NDArray[np.int64]) +assert_type(def_gen.multinomial(20, [1 / 6.0] * 6, size=2), npt.NDArray[np.int64]) +assert_type(def_gen.multinomial([[10], [20]], [1 / 6.0] * 6, size=(2, 2)), npt.NDArray[np.int64]) +assert_type(def_gen.multinomial(np.array([[10], [20]]), np.array([0.5, 0.5]), size=(2, 2)), npt.NDArray[np.int64]) + +assert_type(def_gen.multivariate_hypergeometric([3, 5, 7], 2), npt.NDArray[np.int64]) +assert_type(def_gen.multivariate_hypergeometric(np.array([3, 5, 7]), 2), npt.NDArray[np.int64]) +assert_type(def_gen.multivariate_hypergeometric(np.array([3, 5, 7]), 2, size=4), npt.NDArray[np.int64]) +assert_type(def_gen.multivariate_hypergeometric(np.array([3, 5, 7]), 2, size=(4, 7)), npt.NDArray[np.int64]) +assert_type(def_gen.multivariate_hypergeometric([3, 5, 7], 2, method="count"), npt.NDArray[np.int64]) +assert_type(def_gen.multivariate_hypergeometric(np.array([3, 5, 7]), 2, method="marginals"), npt.NDArray[np.int64]) + +assert_type(def_gen.multivariate_normal([0.0], [[1.0]]), npt.NDArray[np.float64]) +assert_type(def_gen.multivariate_normal([0.0], np.array([[1.0]])), npt.NDArray[np.float64]) +assert_type(def_gen.multivariate_normal(np.array([0.0]), [[1.0]]), npt.NDArray[np.float64]) +assert_type(def_gen.multivariate_normal([0.0], np.array([[1.0]])), npt.NDArray[np.float64]) + +assert_type(def_gen.permutation(10), npt.NDArray[np.int64]) +assert_type(def_gen.permutation([1, 2, 3, 4]), npt.NDArray[Any]) +assert_type(def_gen.permutation(np.array([1, 2, 3, 4])), npt.NDArray[Any]) +assert_type(def_gen.permutation(D_2D, axis=1), npt.NDArray[Any]) +assert_type(def_gen.permuted(D_2D), npt.NDArray[Any]) +assert_type(def_gen.permuted(D_2D_like), npt.NDArray[Any]) +assert_type(def_gen.permuted(D_2D, axis=1), npt.NDArray[Any]) +assert_type(def_gen.permuted(D_2D, out=D_2D), npt.NDArray[Any]) +assert_type(def_gen.permuted(D_2D_like, out=D_2D), npt.NDArray[Any]) +assert_type(def_gen.permuted(D_2D_like, out=D_2D), npt.NDArray[Any]) +assert_type(def_gen.permuted(D_2D, axis=1, out=D_2D), npt.NDArray[Any]) + +assert_type(def_gen.shuffle(np.arange(10)), None) +assert_type(def_gen.shuffle([1, 2, 3, 4, 5]), None) +assert_type(def_gen.shuffle(D_2D, axis=1), None) + +assert_type(np.random.Generator(pcg64), np.random.Generator) +assert_type(def_gen.__str__(), str) +assert_type(def_gen.__repr__(), str) +assert_type(def_gen.__setstate__(dict(def_gen.bit_generator.state)), None) + +# RandomState +random_st: np.random.RandomState = np.random.RandomState() + +assert_type(random_st.standard_normal(), float) +assert_type(random_st.standard_normal(size=None), float) +assert_type(random_st.standard_normal(size=1), npt.NDArray[np.float64]) + +assert_type(random_st.random(), float) +assert_type(random_st.random(size=None), float) +assert_type(random_st.random(size=1), npt.NDArray[np.float64]) + +assert_type(random_st.standard_cauchy(), float) +assert_type(random_st.standard_cauchy(size=None), float) +assert_type(random_st.standard_cauchy(size=1), npt.NDArray[np.float64]) + +assert_type(random_st.standard_exponential(), float) +assert_type(random_st.standard_exponential(size=None), float) +assert_type(random_st.standard_exponential(size=1), npt.NDArray[np.float64]) + +assert_type(random_st.zipf(1.5), int) +assert_type(random_st.zipf(1.5, size=None), int) +assert_type(random_st.zipf(1.5, size=1), npt.NDArray[np.long]) +assert_type(random_st.zipf(D_arr_1p5), npt.NDArray[np.long]) +assert_type(random_st.zipf(D_arr_1p5, size=1), npt.NDArray[np.long]) +assert_type(random_st.zipf(D_arr_like_1p5), npt.NDArray[np.long]) +assert_type(random_st.zipf(D_arr_like_1p5, size=1), npt.NDArray[np.long]) + +assert_type(random_st.weibull(0.5), float) +assert_type(random_st.weibull(0.5, size=None), float) +assert_type(random_st.weibull(0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.weibull(D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.weibull(D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.weibull(D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.weibull(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(random_st.standard_t(0.5), float) +assert_type(random_st.standard_t(0.5, size=None), float) +assert_type(random_st.standard_t(0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.standard_t(D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.standard_t(D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.standard_t(D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.standard_t(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(random_st.poisson(0.5), int) +assert_type(random_st.poisson(0.5, size=None), int) +assert_type(random_st.poisson(0.5, size=1), npt.NDArray[np.long]) +assert_type(random_st.poisson(D_arr_0p5), npt.NDArray[np.long]) +assert_type(random_st.poisson(D_arr_0p5, size=1), npt.NDArray[np.long]) +assert_type(random_st.poisson(D_arr_like_0p5), npt.NDArray[np.long]) +assert_type(random_st.poisson(D_arr_like_0p5, size=1), npt.NDArray[np.long]) + +assert_type(random_st.power(0.5), float) +assert_type(random_st.power(0.5, size=None), float) +assert_type(random_st.power(0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.power(D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.power(D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.power(D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.power(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(random_st.pareto(0.5), float) +assert_type(random_st.pareto(0.5, size=None), float) +assert_type(random_st.pareto(0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.pareto(D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.pareto(D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.pareto(D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.pareto(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(random_st.chisquare(0.5), float) +assert_type(random_st.chisquare(0.5, size=None), float) +assert_type(random_st.chisquare(0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.chisquare(D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.chisquare(D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.chisquare(D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.chisquare(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(random_st.exponential(0.5), float) +assert_type(random_st.exponential(0.5, size=None), float) +assert_type(random_st.exponential(0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.exponential(D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.exponential(D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.exponential(D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.exponential(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(random_st.geometric(0.5), int) +assert_type(random_st.geometric(0.5, size=None), int) +assert_type(random_st.geometric(0.5, size=1), npt.NDArray[np.long]) +assert_type(random_st.geometric(D_arr_0p5), npt.NDArray[np.long]) +assert_type(random_st.geometric(D_arr_0p5, size=1), npt.NDArray[np.long]) +assert_type(random_st.geometric(D_arr_like_0p5), npt.NDArray[np.long]) +assert_type(random_st.geometric(D_arr_like_0p5, size=1), npt.NDArray[np.long]) + +assert_type(random_st.logseries(0.5), int) +assert_type(random_st.logseries(0.5, size=None), int) +assert_type(random_st.logseries(0.5, size=1), npt.NDArray[np.long]) +assert_type(random_st.logseries(D_arr_0p5), npt.NDArray[np.long]) +assert_type(random_st.logseries(D_arr_0p5, size=1), npt.NDArray[np.long]) +assert_type(random_st.logseries(D_arr_like_0p5), npt.NDArray[np.long]) +assert_type(random_st.logseries(D_arr_like_0p5, size=1), npt.NDArray[np.long]) + +assert_type(random_st.rayleigh(0.5), float) +assert_type(random_st.rayleigh(0.5, size=None), float) +assert_type(random_st.rayleigh(0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.rayleigh(D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.rayleigh(D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.rayleigh(D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.rayleigh(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(random_st.standard_gamma(0.5), float) +assert_type(random_st.standard_gamma(0.5, size=None), float) +assert_type(random_st.standard_gamma(0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.standard_gamma(D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.standard_gamma(D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.standard_gamma(D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.standard_gamma(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.standard_gamma(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(random_st.vonmises(0.5, 0.5), float) +assert_type(random_st.vonmises(0.5, 0.5, size=None), float) +assert_type(random_st.vonmises(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.vonmises(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.vonmises(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.vonmises(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.vonmises(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.vonmises(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.vonmises(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.vonmises(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.vonmises(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.vonmises(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.vonmises(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(random_st.wald(0.5, 0.5), float) +assert_type(random_st.wald(0.5, 0.5, size=None), float) +assert_type(random_st.wald(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.wald(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.wald(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.wald(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.wald(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.wald(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.wald(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.wald(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.wald(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.wald(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.wald(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(random_st.uniform(0.5, 0.5), float) +assert_type(random_st.uniform(0.5, 0.5, size=None), float) +assert_type(random_st.uniform(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.uniform(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.uniform(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.uniform(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.uniform(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.uniform(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.uniform(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.uniform(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.uniform(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.uniform(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.uniform(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(random_st.beta(0.5, 0.5), float) +assert_type(random_st.beta(0.5, 0.5, size=None), float) +assert_type(random_st.beta(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.beta(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.beta(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.beta(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.beta(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.beta(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.beta(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.beta(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.beta(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.beta(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.beta(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(random_st.f(0.5, 0.5), float) +assert_type(random_st.f(0.5, 0.5, size=None), float) +assert_type(random_st.f(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.f(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.f(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.f(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.f(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.f(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.f(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.f(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.f(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.f(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.f(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(random_st.gamma(0.5, 0.5), float) +assert_type(random_st.gamma(0.5, 0.5, size=None), float) +assert_type(random_st.gamma(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.gamma(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.gamma(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.gamma(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.gamma(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.gamma(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.gamma(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.gamma(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.gamma(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.gamma(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.gamma(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(random_st.gumbel(0.5, 0.5), float) +assert_type(random_st.gumbel(0.5, 0.5, size=None), float) +assert_type(random_st.gumbel(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.gumbel(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.gumbel(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.gumbel(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.gumbel(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.gumbel(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.gumbel(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.gumbel(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.gumbel(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.gumbel(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.gumbel(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(random_st.laplace(0.5, 0.5), float) +assert_type(random_st.laplace(0.5, 0.5, size=None), float) +assert_type(random_st.laplace(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.laplace(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.laplace(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.laplace(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.laplace(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.laplace(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.laplace(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.laplace(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.laplace(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.laplace(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.laplace(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(random_st.logistic(0.5, 0.5), float) +assert_type(random_st.logistic(0.5, 0.5, size=None), float) +assert_type(random_st.logistic(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.logistic(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.logistic(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.logistic(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.logistic(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.logistic(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.logistic(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.logistic(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.logistic(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.logistic(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.logistic(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(random_st.lognormal(0.5, 0.5), float) +assert_type(random_st.lognormal(0.5, 0.5, size=None), float) +assert_type(random_st.lognormal(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.lognormal(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.lognormal(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.lognormal(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.lognormal(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.lognormal(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.lognormal(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.lognormal(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.lognormal(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.lognormal(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.lognormal(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(random_st.noncentral_chisquare(0.5, 0.5), float) +assert_type(random_st.noncentral_chisquare(0.5, 0.5, size=None), float) +assert_type(random_st.noncentral_chisquare(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.noncentral_chisquare(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.noncentral_chisquare(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.noncentral_chisquare(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.noncentral_chisquare(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.noncentral_chisquare(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.noncentral_chisquare(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.noncentral_chisquare(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.noncentral_chisquare(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.noncentral_chisquare(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.noncentral_chisquare(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(random_st.normal(0.5, 0.5), float) +assert_type(random_st.normal(0.5, 0.5, size=None), float) +assert_type(random_st.normal(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.normal(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.normal(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.normal(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.normal(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.normal(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.normal(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.normal(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.normal(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.normal(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.normal(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(random_st.triangular(0.1, 0.5, 0.9), float) +assert_type(random_st.triangular(0.1, 0.5, 0.9, size=None), float) +assert_type(random_st.triangular(0.1, 0.5, 0.9, size=1), npt.NDArray[np.float64]) +assert_type(random_st.triangular(D_arr_0p1, 0.5, 0.9), npt.NDArray[np.float64]) +assert_type(random_st.triangular(0.1, D_arr_0p5, 0.9), npt.NDArray[np.float64]) +assert_type(random_st.triangular(D_arr_0p1, 0.5, D_arr_like_0p9, size=1), npt.NDArray[np.float64]) +assert_type(random_st.triangular(0.1, D_arr_0p5, 0.9, size=1), npt.NDArray[np.float64]) +assert_type(random_st.triangular(D_arr_like_0p1, 0.5, D_arr_0p9), npt.NDArray[np.float64]) +assert_type(random_st.triangular(0.5, D_arr_like_0p5, 0.9), npt.NDArray[np.float64]) +assert_type(random_st.triangular(D_arr_0p1, D_arr_0p5, 0.9), npt.NDArray[np.float64]) +assert_type(random_st.triangular(D_arr_like_0p1, D_arr_like_0p5, 0.9), npt.NDArray[np.float64]) +assert_type(random_st.triangular(D_arr_0p1, D_arr_0p5, D_arr_0p9, size=1), npt.NDArray[np.float64]) +assert_type(random_st.triangular(D_arr_like_0p1, D_arr_like_0p5, D_arr_like_0p9, size=1), npt.NDArray[np.float64]) + +assert_type(random_st.noncentral_f(0.1, 0.5, 0.9), float) +assert_type(random_st.noncentral_f(0.1, 0.5, 0.9, size=None), float) +assert_type(random_st.noncentral_f(0.1, 0.5, 0.9, size=1), npt.NDArray[np.float64]) +assert_type(random_st.noncentral_f(D_arr_0p1, 0.5, 0.9), npt.NDArray[np.float64]) +assert_type(random_st.noncentral_f(0.1, D_arr_0p5, 0.9), npt.NDArray[np.float64]) +assert_type(random_st.noncentral_f(D_arr_0p1, 0.5, D_arr_like_0p9, size=1), npt.NDArray[np.float64]) +assert_type(random_st.noncentral_f(0.1, D_arr_0p5, 0.9, size=1), npt.NDArray[np.float64]) +assert_type(random_st.noncentral_f(D_arr_like_0p1, 0.5, D_arr_0p9), npt.NDArray[np.float64]) +assert_type(random_st.noncentral_f(0.5, D_arr_like_0p5, 0.9), npt.NDArray[np.float64]) +assert_type(random_st.noncentral_f(D_arr_0p1, D_arr_0p5, 0.9), npt.NDArray[np.float64]) +assert_type(random_st.noncentral_f(D_arr_like_0p1, D_arr_like_0p5, 0.9), npt.NDArray[np.float64]) +assert_type(random_st.noncentral_f(D_arr_0p1, D_arr_0p5, D_arr_0p9, size=1), npt.NDArray[np.float64]) +assert_type(random_st.noncentral_f(D_arr_like_0p1, D_arr_like_0p5, D_arr_like_0p9, size=1), npt.NDArray[np.float64]) + +assert_type(random_st.binomial(10, 0.5), int) +assert_type(random_st.binomial(10, 0.5, size=None), int) +assert_type(random_st.binomial(10, 0.5, size=1), npt.NDArray[np.long]) +assert_type(random_st.binomial(I_arr_10, 0.5), npt.NDArray[np.long]) +assert_type(random_st.binomial(10, D_arr_0p5), npt.NDArray[np.long]) +assert_type(random_st.binomial(I_arr_10, 0.5, size=1), npt.NDArray[np.long]) +assert_type(random_st.binomial(10, D_arr_0p5, size=1), npt.NDArray[np.long]) +assert_type(random_st.binomial(I_arr_like_10, 0.5), npt.NDArray[np.long]) +assert_type(random_st.binomial(10, D_arr_like_0p5), npt.NDArray[np.long]) +assert_type(random_st.binomial(I_arr_10, D_arr_0p5), npt.NDArray[np.long]) +assert_type(random_st.binomial(I_arr_like_10, D_arr_like_0p5), npt.NDArray[np.long]) +assert_type(random_st.binomial(I_arr_10, D_arr_0p5, size=1), npt.NDArray[np.long]) +assert_type(random_st.binomial(I_arr_like_10, D_arr_like_0p5, size=1), npt.NDArray[np.long]) + +assert_type(random_st.negative_binomial(10, 0.5), int) +assert_type(random_st.negative_binomial(10, 0.5, size=None), int) +assert_type(random_st.negative_binomial(10, 0.5, size=1), npt.NDArray[np.long]) +assert_type(random_st.negative_binomial(I_arr_10, 0.5), npt.NDArray[np.long]) +assert_type(random_st.negative_binomial(10, D_arr_0p5), npt.NDArray[np.long]) +assert_type(random_st.negative_binomial(I_arr_10, 0.5, size=1), npt.NDArray[np.long]) +assert_type(random_st.negative_binomial(10, D_arr_0p5, size=1), npt.NDArray[np.long]) +assert_type(random_st.negative_binomial(I_arr_like_10, 0.5), npt.NDArray[np.long]) +assert_type(random_st.negative_binomial(10, D_arr_like_0p5), npt.NDArray[np.long]) +assert_type(random_st.negative_binomial(I_arr_10, D_arr_0p5), npt.NDArray[np.long]) +assert_type(random_st.negative_binomial(I_arr_like_10, D_arr_like_0p5), npt.NDArray[np.long]) +assert_type(random_st.negative_binomial(I_arr_10, D_arr_0p5, size=1), npt.NDArray[np.long]) +assert_type(random_st.negative_binomial(I_arr_like_10, D_arr_like_0p5, size=1), npt.NDArray[np.long]) + +assert_type(random_st.hypergeometric(20, 20, 10), int) +assert_type(random_st.hypergeometric(20, 20, 10, size=None), int) +assert_type(random_st.hypergeometric(20, 20, 10, size=1), npt.NDArray[np.long]) +assert_type(random_st.hypergeometric(I_arr_20, 20, 10), npt.NDArray[np.long]) +assert_type(random_st.hypergeometric(20, I_arr_20, 10), npt.NDArray[np.long]) +assert_type(random_st.hypergeometric(I_arr_20, 20, I_arr_like_10, size=1), npt.NDArray[np.long]) +assert_type(random_st.hypergeometric(20, I_arr_20, 10, size=1), npt.NDArray[np.long]) +assert_type(random_st.hypergeometric(I_arr_like_20, 20, I_arr_10), npt.NDArray[np.long]) +assert_type(random_st.hypergeometric(20, I_arr_like_20, 10), npt.NDArray[np.long]) +assert_type(random_st.hypergeometric(I_arr_20, I_arr_20, 10), npt.NDArray[np.long]) +assert_type(random_st.hypergeometric(I_arr_like_20, I_arr_like_20, 10), npt.NDArray[np.long]) +assert_type(random_st.hypergeometric(I_arr_20, I_arr_20, I_arr_10, size=1), npt.NDArray[np.long]) +assert_type(random_st.hypergeometric(I_arr_like_20, I_arr_like_20, I_arr_like_10, size=1), npt.NDArray[np.long]) + +assert_type(random_st.randint(0, 100), int) +assert_type(random_st.randint(100), int) +assert_type(random_st.randint([100]), npt.NDArray[np.long]) +assert_type(random_st.randint(0, [100]), npt.NDArray[np.long]) + +assert_type(random_st.randint(2, dtype=bool), bool) +assert_type(random_st.randint(0, 2, dtype=bool), bool) +assert_type(random_st.randint(I_bool_high_open, dtype=bool), npt.NDArray[np.bool]) +assert_type(random_st.randint(I_bool_low, I_bool_high_open, dtype=bool), npt.NDArray[np.bool]) +assert_type(random_st.randint(0, I_bool_high_open, dtype=bool), npt.NDArray[np.bool]) + +assert_type(random_st.randint(2, dtype=np.bool), np.bool) +assert_type(random_st.randint(0, 2, dtype=np.bool), np.bool) +assert_type(random_st.randint(I_bool_high_open, dtype=np.bool), npt.NDArray[np.bool]) +assert_type(random_st.randint(I_bool_low, I_bool_high_open, dtype=np.bool), npt.NDArray[np.bool]) +assert_type(random_st.randint(0, I_bool_high_open, dtype=np.bool), npt.NDArray[np.bool]) + +assert_type(random_st.randint(256, dtype="u1"), np.uint8) +assert_type(random_st.randint(0, 256, dtype="u1"), np.uint8) +assert_type(random_st.randint(I_u1_high_open, dtype="u1"), npt.NDArray[np.uint8]) +assert_type(random_st.randint(I_u1_low, I_u1_high_open, dtype="u1"), npt.NDArray[np.uint8]) +assert_type(random_st.randint(0, I_u1_high_open, dtype="u1"), npt.NDArray[np.uint8]) + +assert_type(random_st.randint(256, dtype="uint8"), np.uint8) +assert_type(random_st.randint(0, 256, dtype="uint8"), np.uint8) +assert_type(random_st.randint(I_u1_high_open, dtype="uint8"), npt.NDArray[np.uint8]) +assert_type(random_st.randint(I_u1_low, I_u1_high_open, dtype="uint8"), npt.NDArray[np.uint8]) +assert_type(random_st.randint(0, I_u1_high_open, dtype="uint8"), npt.NDArray[np.uint8]) + +assert_type(random_st.randint(256, dtype=np.uint8), np.uint8) +assert_type(random_st.randint(0, 256, dtype=np.uint8), np.uint8) +assert_type(random_st.randint(I_u1_high_open, dtype=np.uint8), npt.NDArray[np.uint8]) +assert_type(random_st.randint(I_u1_low, I_u1_high_open, dtype=np.uint8), npt.NDArray[np.uint8]) +assert_type(random_st.randint(0, I_u1_high_open, dtype=np.uint8), npt.NDArray[np.uint8]) + +assert_type(random_st.randint(65536, dtype="u2"), np.uint16) +assert_type(random_st.randint(0, 65536, dtype="u2"), np.uint16) +assert_type(random_st.randint(I_u2_high_open, dtype="u2"), npt.NDArray[np.uint16]) +assert_type(random_st.randint(I_u2_low, I_u2_high_open, dtype="u2"), npt.NDArray[np.uint16]) +assert_type(random_st.randint(0, I_u2_high_open, dtype="u2"), npt.NDArray[np.uint16]) + +assert_type(random_st.randint(65536, dtype="uint16"), np.uint16) +assert_type(random_st.randint(0, 65536, dtype="uint16"), np.uint16) +assert_type(random_st.randint(I_u2_high_open, dtype="uint16"), npt.NDArray[np.uint16]) +assert_type(random_st.randint(I_u2_low, I_u2_high_open, dtype="uint16"), npt.NDArray[np.uint16]) +assert_type(random_st.randint(0, I_u2_high_open, dtype="uint16"), npt.NDArray[np.uint16]) + +assert_type(random_st.randint(65536, dtype=np.uint16), np.uint16) +assert_type(random_st.randint(0, 65536, dtype=np.uint16), np.uint16) +assert_type(random_st.randint(I_u2_high_open, dtype=np.uint16), npt.NDArray[np.uint16]) +assert_type(random_st.randint(I_u2_low, I_u2_high_open, dtype=np.uint16), npt.NDArray[np.uint16]) +assert_type(random_st.randint(0, I_u2_high_open, dtype=np.uint16), npt.NDArray[np.uint16]) + +assert_type(random_st.randint(4294967296, dtype="u4"), np.uint32) +assert_type(random_st.randint(0, 4294967296, dtype="u4"), np.uint32) +assert_type(random_st.randint(I_u4_high_open, dtype="u4"), npt.NDArray[np.uint32]) +assert_type(random_st.randint(I_u4_low, I_u4_high_open, dtype="u4"), npt.NDArray[np.uint32]) +assert_type(random_st.randint(0, I_u4_high_open, dtype="u4"), npt.NDArray[np.uint32]) + +assert_type(random_st.randint(4294967296, dtype="uint32"), np.uint32) +assert_type(random_st.randint(0, 4294967296, dtype="uint32"), np.uint32) +assert_type(random_st.randint(I_u4_high_open, dtype="uint32"), npt.NDArray[np.uint32]) +assert_type(random_st.randint(I_u4_low, I_u4_high_open, dtype="uint32"), npt.NDArray[np.uint32]) +assert_type(random_st.randint(0, I_u4_high_open, dtype="uint32"), npt.NDArray[np.uint32]) + +assert_type(random_st.randint(4294967296, dtype=np.uint32), np.uint32) +assert_type(random_st.randint(0, 4294967296, dtype=np.uint32), np.uint32) +assert_type(random_st.randint(I_u4_high_open, dtype=np.uint32), npt.NDArray[np.uint32]) +assert_type(random_st.randint(I_u4_low, I_u4_high_open, dtype=np.uint32), npt.NDArray[np.uint32]) +assert_type(random_st.randint(0, I_u4_high_open, dtype=np.uint32), npt.NDArray[np.uint32]) + +assert_type(random_st.randint(4294967296, dtype=np.uint), np.uint) +assert_type(random_st.randint(0, 4294967296, dtype=np.uint), np.uint) +assert_type(random_st.randint(I_u4_high_open, dtype=np.uint), npt.NDArray[np.uint]) +assert_type(random_st.randint(I_u4_low, I_u4_high_open, dtype=np.uint), npt.NDArray[np.uint]) +assert_type(random_st.randint(0, I_u4_high_open, dtype=np.uint), npt.NDArray[np.uint]) + +assert_type(random_st.randint(18446744073709551616, dtype="u8"), np.uint64) +assert_type(random_st.randint(0, 18446744073709551616, dtype="u8"), np.uint64) +assert_type(random_st.randint(I_u8_high_open, dtype="u8"), npt.NDArray[np.uint64]) +assert_type(random_st.randint(I_u8_low, I_u8_high_open, dtype="u8"), npt.NDArray[np.uint64]) +assert_type(random_st.randint(0, I_u8_high_open, dtype="u8"), npt.NDArray[np.uint64]) + +assert_type(random_st.randint(18446744073709551616, dtype="uint64"), np.uint64) +assert_type(random_st.randint(0, 18446744073709551616, dtype="uint64"), np.uint64) +assert_type(random_st.randint(I_u8_high_open, dtype="uint64"), npt.NDArray[np.uint64]) +assert_type(random_st.randint(I_u8_low, I_u8_high_open, dtype="uint64"), npt.NDArray[np.uint64]) +assert_type(random_st.randint(0, I_u8_high_open, dtype="uint64"), npt.NDArray[np.uint64]) + +assert_type(random_st.randint(18446744073709551616, dtype=np.uint64), np.uint64) +assert_type(random_st.randint(0, 18446744073709551616, dtype=np.uint64), np.uint64) +assert_type(random_st.randint(I_u8_high_open, dtype=np.uint64), npt.NDArray[np.uint64]) +assert_type(random_st.randint(I_u8_low, I_u8_high_open, dtype=np.uint64), npt.NDArray[np.uint64]) +assert_type(random_st.randint(0, I_u8_high_open, dtype=np.uint64), npt.NDArray[np.uint64]) + +assert_type(random_st.randint(128, dtype="i1"), np.int8) +assert_type(random_st.randint(-128, 128, dtype="i1"), np.int8) +assert_type(random_st.randint(I_i1_high_open, dtype="i1"), npt.NDArray[np.int8]) +assert_type(random_st.randint(I_i1_low, I_i1_high_open, dtype="i1"), npt.NDArray[np.int8]) +assert_type(random_st.randint(-128, I_i1_high_open, dtype="i1"), npt.NDArray[np.int8]) + +assert_type(random_st.randint(128, dtype="int8"), np.int8) +assert_type(random_st.randint(-128, 128, dtype="int8"), np.int8) +assert_type(random_st.randint(I_i1_high_open, dtype="int8"), npt.NDArray[np.int8]) +assert_type(random_st.randint(I_i1_low, I_i1_high_open, dtype="int8"), npt.NDArray[np.int8]) +assert_type(random_st.randint(-128, I_i1_high_open, dtype="int8"), npt.NDArray[np.int8]) + +assert_type(random_st.randint(128, dtype=np.int8), np.int8) +assert_type(random_st.randint(-128, 128, dtype=np.int8), np.int8) +assert_type(random_st.randint(I_i1_high_open, dtype=np.int8), npt.NDArray[np.int8]) +assert_type(random_st.randint(I_i1_low, I_i1_high_open, dtype=np.int8), npt.NDArray[np.int8]) +assert_type(random_st.randint(-128, I_i1_high_open, dtype=np.int8), npt.NDArray[np.int8]) + +assert_type(random_st.randint(32768, dtype="i2"), np.int16) +assert_type(random_st.randint(-32768, 32768, dtype="i2"), np.int16) +assert_type(random_st.randint(I_i2_high_open, dtype="i2"), npt.NDArray[np.int16]) +assert_type(random_st.randint(I_i2_low, I_i2_high_open, dtype="i2"), npt.NDArray[np.int16]) +assert_type(random_st.randint(-32768, I_i2_high_open, dtype="i2"), npt.NDArray[np.int16]) + +assert_type(random_st.randint(32768, dtype="int16"), np.int16) +assert_type(random_st.randint(-32768, 32768, dtype="int16"), np.int16) +assert_type(random_st.randint(I_i2_high_open, dtype="int16"), npt.NDArray[np.int16]) +assert_type(random_st.randint(I_i2_low, I_i2_high_open, dtype="int16"), npt.NDArray[np.int16]) +assert_type(random_st.randint(-32768, I_i2_high_open, dtype="int16"), npt.NDArray[np.int16]) + +assert_type(random_st.randint(32768, dtype=np.int16), np.int16) +assert_type(random_st.randint(-32768, 32768, dtype=np.int16), np.int16) +assert_type(random_st.randint(I_i2_high_open, dtype=np.int16), npt.NDArray[np.int16]) +assert_type(random_st.randint(I_i2_low, I_i2_high_open, dtype=np.int16), npt.NDArray[np.int16]) +assert_type(random_st.randint(-32768, I_i2_high_open, dtype=np.int16), npt.NDArray[np.int16]) + +assert_type(random_st.randint(2147483648, dtype="i4"), np.int32) +assert_type(random_st.randint(-2147483648, 2147483648, dtype="i4"), np.int32) +assert_type(random_st.randint(I_i4_high_open, dtype="i4"), npt.NDArray[np.int32]) +assert_type(random_st.randint(I_i4_low, I_i4_high_open, dtype="i4"), npt.NDArray[np.int32]) +assert_type(random_st.randint(-2147483648, I_i4_high_open, dtype="i4"), npt.NDArray[np.int32]) + +assert_type(random_st.randint(2147483648, dtype="int32"), np.int32) +assert_type(random_st.randint(-2147483648, 2147483648, dtype="int32"), np.int32) +assert_type(random_st.randint(I_i4_high_open, dtype="int32"), npt.NDArray[np.int32]) +assert_type(random_st.randint(I_i4_low, I_i4_high_open, dtype="int32"), npt.NDArray[np.int32]) +assert_type(random_st.randint(-2147483648, I_i4_high_open, dtype="int32"), npt.NDArray[np.int32]) + +assert_type(random_st.randint(2147483648, dtype=np.int32), np.int32) +assert_type(random_st.randint(-2147483648, 2147483648, dtype=np.int32), np.int32) +assert_type(random_st.randint(I_i4_high_open, dtype=np.int32), npt.NDArray[np.int32]) +assert_type(random_st.randint(I_i4_low, I_i4_high_open, dtype=np.int32), npt.NDArray[np.int32]) +assert_type(random_st.randint(-2147483648, I_i4_high_open, dtype=np.int32), npt.NDArray[np.int32]) + +assert_type(random_st.randint(2147483648, dtype=np.int_), np.int_) +assert_type(random_st.randint(-2147483648, 2147483648, dtype=np.int_), np.int_) +assert_type(random_st.randint(I_i4_high_open, dtype=np.int_), npt.NDArray[np.int_]) +assert_type(random_st.randint(I_i4_low, I_i4_high_open, dtype=np.int_), npt.NDArray[np.int_]) +assert_type(random_st.randint(-2147483648, I_i4_high_open, dtype=np.int_), npt.NDArray[np.int_]) + +assert_type(random_st.randint(9223372036854775808, dtype="i8"), np.int64) +assert_type(random_st.randint(-9223372036854775808, 9223372036854775808, dtype="i8"), np.int64) +assert_type(random_st.randint(I_i8_high_open, dtype="i8"), npt.NDArray[np.int64]) +assert_type(random_st.randint(I_i8_low, I_i8_high_open, dtype="i8"), npt.NDArray[np.int64]) +assert_type(random_st.randint(-9223372036854775808, I_i8_high_open, dtype="i8"), npt.NDArray[np.int64]) + +assert_type(random_st.randint(9223372036854775808, dtype="int64"), np.int64) +assert_type(random_st.randint(-9223372036854775808, 9223372036854775808, dtype="int64"), np.int64) +assert_type(random_st.randint(I_i8_high_open, dtype="int64"), npt.NDArray[np.int64]) +assert_type(random_st.randint(I_i8_low, I_i8_high_open, dtype="int64"), npt.NDArray[np.int64]) +assert_type(random_st.randint(-9223372036854775808, I_i8_high_open, dtype="int64"), npt.NDArray[np.int64]) + +assert_type(random_st.randint(9223372036854775808, dtype=np.int64), np.int64) +assert_type(random_st.randint(-9223372036854775808, 9223372036854775808, dtype=np.int64), np.int64) +assert_type(random_st.randint(I_i8_high_open, dtype=np.int64), npt.NDArray[np.int64]) +assert_type(random_st.randint(I_i8_low, I_i8_high_open, dtype=np.int64), npt.NDArray[np.int64]) +assert_type(random_st.randint(-9223372036854775808, I_i8_high_open, dtype=np.int64), npt.NDArray[np.int64]) + +assert_type(random_st._bit_generator, np.random.BitGenerator) + +assert_type(random_st.bytes(2), bytes) + +assert_type(random_st.choice(5), int) +assert_type(random_st.choice(5, 3), npt.NDArray[np.long]) +assert_type(random_st.choice(5, 3, replace=True), npt.NDArray[np.long]) +assert_type(random_st.choice(5, 3, p=[1 / 5] * 5), npt.NDArray[np.long]) +assert_type(random_st.choice(5, 3, p=[1 / 5] * 5, replace=False), npt.NDArray[np.long]) + +assert_type(random_st.choice(["pooh", "rabbit", "piglet", "Christopher"]), Any) +assert_type(random_st.choice(["pooh", "rabbit", "piglet", "Christopher"], 3), npt.NDArray[Any]) +assert_type(random_st.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, p=[1 / 4] * 4), npt.NDArray[Any]) +assert_type(random_st.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, replace=True), npt.NDArray[Any]) +assert_type(random_st.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, replace=False, p=np.array([1 / 8, 1 / 8, 1 / 2, 1 / 4])), npt.NDArray[Any]) + +assert_type(random_st.dirichlet([0.5, 0.5]), npt.NDArray[np.float64]) +assert_type(random_st.dirichlet(np.array([0.5, 0.5])), npt.NDArray[np.float64]) +assert_type(random_st.dirichlet(np.array([0.5, 0.5]), size=3), npt.NDArray[np.float64]) + +assert_type(random_st.multinomial(20, [1 / 6.0] * 6), npt.NDArray[np.long]) +assert_type(random_st.multinomial(20, np.array([0.5, 0.5])), npt.NDArray[np.long]) +assert_type(random_st.multinomial(20, [1 / 6.0] * 6, size=2), npt.NDArray[np.long]) + +assert_type(random_st.multivariate_normal([0.0], [[1.0]]), npt.NDArray[np.float64]) +assert_type(random_st.multivariate_normal([0.0], np.array([[1.0]])), npt.NDArray[np.float64]) +assert_type(random_st.multivariate_normal(np.array([0.0]), [[1.0]]), npt.NDArray[np.float64]) +assert_type(random_st.multivariate_normal([0.0], np.array([[1.0]])), npt.NDArray[np.float64]) + +assert_type(random_st.permutation(10), npt.NDArray[np.long]) +assert_type(random_st.permutation([1, 2, 3, 4]), npt.NDArray[Any]) +assert_type(random_st.permutation(np.array([1, 2, 3, 4])), npt.NDArray[Any]) +assert_type(random_st.permutation(D_2D), npt.NDArray[Any]) + +assert_type(random_st.shuffle(np.arange(10)), None) +assert_type(random_st.shuffle([1, 2, 3, 4, 5]), None) +assert_type(random_st.shuffle(D_2D), None) + +assert_type(np.random.RandomState(pcg64), np.random.RandomState) +assert_type(np.random.RandomState(0), np.random.RandomState) +assert_type(np.random.RandomState([0, 1, 2]), np.random.RandomState) +assert_type(random_st.__str__(), str) +assert_type(random_st.__repr__(), str) +random_st_state = random_st.__getstate__() +assert_type(random_st_state, dict[str, Any]) +assert_type(random_st.__setstate__(random_st_state), None) +assert_type(random_st.seed(), None) +assert_type(random_st.seed(1), None) +assert_type(random_st.seed([0, 1]), None) +random_st_get_state = random_st.get_state() +assert_type(random_st_state, dict[str, Any]) +random_st_get_state_legacy = random_st.get_state(legacy=True) +assert_type(random_st_get_state_legacy, dict[str, Any] | tuple[str, npt.NDArray[np.uint32], int, int, float]) +assert_type(random_st.set_state(random_st_get_state), None) + +assert_type(random_st.rand(), float) +assert_type(random_st.rand(1), npt.NDArray[np.float64]) +assert_type(random_st.rand(1, 2), npt.NDArray[np.float64]) +assert_type(random_st.randn(), float) +assert_type(random_st.randn(1), npt.NDArray[np.float64]) +assert_type(random_st.randn(1, 2), npt.NDArray[np.float64]) +assert_type(random_st.random_sample(), float) +assert_type(random_st.random_sample(1), npt.NDArray[np.float64]) +assert_type(random_st.random_sample(size=(1, 2)), npt.NDArray[np.float64]) + +assert_type(random_st.tomaxint(), int) +assert_type(random_st.tomaxint(1), npt.NDArray[np.int64]) +assert_type(random_st.tomaxint((1,)), npt.NDArray[np.int64]) + +assert_type(np.random.mtrand.set_bit_generator(pcg64), None) +assert_type(np.random.mtrand.get_bit_generator(), np.random.BitGenerator) diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/rec.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/rec.pyi new file mode 100644 index 0000000..aacf217 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/rec.pyi @@ -0,0 +1,171 @@ +import io +from typing import Any, TypeAlias, assert_type + +import numpy as np +import numpy.typing as npt + +_RecArray: TypeAlias = np.recarray[tuple[Any, ...], np.dtype[np.record]] + +AR_i8: npt.NDArray[np.int64] +REC_AR_V: _RecArray +AR_LIST: list[npt.NDArray[np.int64]] + +record: np.record +file_obj: io.BufferedIOBase + +assert_type(np.rec.format_parser( + formats=[np.float64, np.int64, np.bool], + names=["f8", "i8", "?"], + titles=None, + aligned=True, +), np.rec.format_parser) +assert_type(np.rec.format_parser.dtype, np.dtype[np.void]) + +assert_type(record.field_a, Any) +assert_type(record.field_b, Any) +assert_type(record["field_a"], Any) +assert_type(record["field_b"], Any) +assert_type(record.pprint(), str) +record.field_c = 5 + +assert_type(REC_AR_V.field(0), Any) +assert_type(REC_AR_V.field("field_a"), Any) +assert_type(REC_AR_V.field(0, AR_i8), None) +assert_type(REC_AR_V.field("field_a", AR_i8), None) +assert_type(REC_AR_V["field_a"], npt.NDArray[Any]) +assert_type(REC_AR_V.field_a, Any) +assert_type(REC_AR_V.__array_finalize__(object()), None) + +assert_type( + np.recarray( + shape=(10, 5), + formats=[np.float64, np.int64, np.bool], + order="K", + byteorder="|", + ), + _RecArray, +) + +assert_type( + np.recarray( + shape=(10, 5), + dtype=[("f8", np.float64), ("i8", np.int64)], + strides=(5, 5), + ), + np.recarray, +) + +assert_type(np.rec.fromarrays(AR_LIST), np.recarray) +assert_type( + np.rec.fromarrays(AR_LIST, dtype=np.int64), + np.recarray, +) +assert_type( + np.rec.fromarrays( + AR_LIST, + formats=[np.int64, np.float64], + names=["i8", "f8"] + ), + _RecArray, +) + +assert_type( + np.rec.fromrecords((1, 1.5)), + _RecArray +) + +assert_type( + np.rec.fromrecords( + [(1, 1.5)], + dtype=[("i8", np.int64), ("f8", np.float64)], + ), + _RecArray, +) + +assert_type( + np.rec.fromrecords( + REC_AR_V, + formats=[np.int64, np.float64], + names=["i8", "f8"] + ), + _RecArray, +) + +assert_type( + np.rec.fromstring( + b"(1, 1.5)", + dtype=[("i8", np.int64), ("f8", np.float64)], + ), + _RecArray, +) + +assert_type( + np.rec.fromstring( + REC_AR_V, + formats=[np.int64, np.float64], + names=["i8", "f8"] + ), + _RecArray, +) + +assert_type( + np.rec.fromfile( + "test_file.txt", + dtype=[("i8", np.int64), ("f8", np.float64)], + ), + np.recarray, +) + +assert_type( + np.rec.fromfile( + file_obj, + formats=[np.int64, np.float64], + names=["i8", "f8"] + ), + _RecArray, +) + +assert_type(np.rec.array(AR_i8), np.recarray[tuple[Any, ...], np.dtype[np.int64]]) + +assert_type( + np.rec.array([(1, 1.5)], dtype=[("i8", np.int64), ("f8", np.float64)]), + np.recarray, +) + +assert_type( + np.rec.array( + [(1, 1.5)], + formats=[np.int64, np.float64], + names=["i8", "f8"] + ), + _RecArray, +) + +assert_type( + np.rec.array( + None, + dtype=np.float64, + shape=(10, 3), + ), + np.recarray, +) + +assert_type( + np.rec.array( + None, + formats=[np.int64, np.float64], + names=["i8", "f8"], + shape=(10, 3), + ), + _RecArray, +) + +assert_type( + np.rec.array(file_obj, dtype=np.float64), + np.recarray, +) + +assert_type( + np.rec.array(file_obj, formats=[np.int64, np.float64], names=["i8", "f8"]), + _RecArray, +) diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/scalars.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/scalars.pyi new file mode 100644 index 0000000..d7b2777 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/scalars.pyi @@ -0,0 +1,191 @@ +from typing import Any, Literal, TypeAlias, assert_type + +import numpy as np + +_1: TypeAlias = Literal[1] + +b: np.bool +u8: np.uint64 +i8: np.int64 +f8: np.float64 +c8: np.complex64 +c16: np.complex128 +m: np.timedelta64 +U: np.str_ +S: np.bytes_ +V: np.void +O: np.object_ # cannot exists at runtime + +array_nd: np.ndarray[Any, Any] +array_0d: np.ndarray[tuple[()], Any] +array_2d_2x2: np.ndarray[tuple[Literal[2], Literal[2]], Any] + +assert_type(c8.real, np.float32) +assert_type(c8.imag, np.float32) + +assert_type(c8.real.real, np.float32) +assert_type(c8.real.imag, np.float32) + +assert_type(c8.itemsize, int) +assert_type(c8.shape, tuple[()]) +assert_type(c8.strides, tuple[()]) + +assert_type(c8.ndim, Literal[0]) +assert_type(c8.size, Literal[1]) + +assert_type(c8.squeeze(), np.complex64) +assert_type(c8.byteswap(), np.complex64) +assert_type(c8.transpose(), np.complex64) + +assert_type(c8.dtype, np.dtype[np.complex64]) + +assert_type(c8.real, np.float32) +assert_type(c16.imag, np.float64) + +assert_type(np.str_('foo'), np.str_) + +assert_type(V[0], Any) +assert_type(V["field1"], Any) +assert_type(V[["field1", "field2"]], np.void) +V[0] = 5 + +# Aliases +assert_type(np.bool_(), np.bool[Literal[False]]) +assert_type(np.byte(), np.byte) +assert_type(np.short(), np.short) +assert_type(np.intc(), np.intc) +assert_type(np.intp(), np.intp) +assert_type(np.int_(), np.int_) +assert_type(np.long(), np.long) +assert_type(np.longlong(), np.longlong) + +assert_type(np.ubyte(), np.ubyte) +assert_type(np.ushort(), np.ushort) +assert_type(np.uintc(), np.uintc) +assert_type(np.uintp(), np.uintp) +assert_type(np.uint(), np.uint) +assert_type(np.ulong(), np.ulong) +assert_type(np.ulonglong(), np.ulonglong) + +assert_type(np.half(), np.half) +assert_type(np.single(), np.single) +assert_type(np.double(), np.double) +assert_type(np.longdouble(), np.longdouble) + +assert_type(np.csingle(), np.csingle) +assert_type(np.cdouble(), np.cdouble) +assert_type(np.clongdouble(), np.clongdouble) + +assert_type(b.item(), bool) +assert_type(i8.item(), int) +assert_type(u8.item(), int) +assert_type(f8.item(), float) +assert_type(c16.item(), complex) +assert_type(U.item(), str) +assert_type(S.item(), bytes) + +assert_type(b.tolist(), bool) +assert_type(i8.tolist(), int) +assert_type(u8.tolist(), int) +assert_type(f8.tolist(), float) +assert_type(c16.tolist(), complex) +assert_type(U.tolist(), str) +assert_type(S.tolist(), bytes) + +assert_type(b.ravel(), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(i8.ravel(), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(u8.ravel(), np.ndarray[tuple[int], np.dtype[np.uint64]]) +assert_type(f8.ravel(), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(c16.ravel(), np.ndarray[tuple[int], np.dtype[np.complex128]]) +assert_type(U.ravel(), np.ndarray[tuple[int], np.dtype[np.str_]]) +assert_type(S.ravel(), np.ndarray[tuple[int], np.dtype[np.bytes_]]) + +assert_type(b.flatten(), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(i8.flatten(), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(u8.flatten(), np.ndarray[tuple[int], np.dtype[np.uint64]]) +assert_type(f8.flatten(), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(c16.flatten(), np.ndarray[tuple[int], np.dtype[np.complex128]]) +assert_type(U.flatten(), np.ndarray[tuple[int], np.dtype[np.str_]]) +assert_type(S.flatten(), np.ndarray[tuple[int], np.dtype[np.bytes_]]) + +assert_type(b.reshape(()), np.bool) +assert_type(i8.reshape([]), np.int64) +assert_type(b.reshape(1), np.ndarray[tuple[_1], np.dtype[np.bool]]) +assert_type(i8.reshape(-1), np.ndarray[tuple[_1], np.dtype[np.int64]]) +assert_type(u8.reshape(1, 1), np.ndarray[tuple[_1, _1], np.dtype[np.uint64]]) +assert_type(f8.reshape(1, -1), np.ndarray[tuple[_1, _1], np.dtype[np.float64]]) +assert_type(c16.reshape(1, 1, 1), np.ndarray[tuple[_1, _1, _1], np.dtype[np.complex128]]) +assert_type(U.reshape(1, 1, 1, 1), np.ndarray[tuple[_1, _1, _1, _1], np.dtype[np.str_]]) +assert_type( + S.reshape(1, 1, 1, 1, 1), + np.ndarray[ + # len(shape) >= 5 + tuple[_1, _1, _1, _1, _1, *tuple[_1, ...]], + np.dtype[np.bytes_], + ], +) + +assert_type(i8.astype(float), Any) +assert_type(i8.astype(np.float64), np.float64) + +assert_type(i8.view(), np.int64) +assert_type(i8.view(np.float64), np.float64) +assert_type(i8.view(float), Any) +assert_type(i8.view(np.float64, np.ndarray), np.float64) + +assert_type(i8.getfield(float), Any) +assert_type(i8.getfield(np.float64), np.float64) +assert_type(i8.getfield(np.float64, 8), np.float64) + +assert_type(f8.as_integer_ratio(), tuple[int, int]) +assert_type(f8.is_integer(), bool) +assert_type(f8.__trunc__(), int) +assert_type(f8.__getformat__("float"), str) +assert_type(f8.hex(), str) +assert_type(np.float64.fromhex("0x0.0p+0"), np.float64) + +assert_type(f8.__getnewargs__(), tuple[float]) +assert_type(c16.__getnewargs__(), tuple[float, float]) + +assert_type(i8.numerator, np.int64) +assert_type(i8.denominator, Literal[1]) +assert_type(u8.numerator, np.uint64) +assert_type(u8.denominator, Literal[1]) +assert_type(m.numerator, np.timedelta64) +assert_type(m.denominator, Literal[1]) + +assert_type(round(i8), int) +assert_type(round(i8, 3), np.int64) +assert_type(round(u8), int) +assert_type(round(u8, 3), np.uint64) +assert_type(round(f8), int) +assert_type(round(f8, 3), np.float64) + +assert_type(f8.__ceil__(), int) +assert_type(f8.__floor__(), int) + +assert_type(i8.is_integer(), Literal[True]) + +assert_type(O.real, np.object_) +assert_type(O.imag, np.object_) +assert_type(int(O), int) +assert_type(float(O), float) +assert_type(complex(O), complex) + +# These fail fail because of a mypy __new__ bug: +# https://github.com/python/mypy/issues/15182 +# According to the typing spec, the following statements are valid, see +# https://typing.readthedocs.io/en/latest/spec/constructors.html#new-method + +# assert_type(np.object_(), None) +# assert_type(np.object_(None), None) +# assert_type(np.object_(array_nd), np.ndarray[Any, np.dtype[np.object_]]) +# assert_type(np.object_([]), npt.NDArray[np.object_]) +# assert_type(np.object_(()), npt.NDArray[np.object_]) +# assert_type(np.object_(range(4)), npt.NDArray[np.object_]) +# assert_type(np.object_(+42), int) +# assert_type(np.object_(1 / 137), float) +# assert_type(np.object_('Developers! ' * (1 << 6)), str) +# assert_type(np.object_(object()), object) +# assert_type(np.object_({False, True, NotADirectoryError}), set[Any]) +# assert_type(np.object_({'spam': 'food', 'ham': 'food'}), dict[str, str]) diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/shape.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/shape.pyi new file mode 100644 index 0000000..2406a39 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/shape.pyi @@ -0,0 +1,13 @@ +from typing import Any, NamedTuple, assert_type + +import numpy as np + +# Subtype of tuple[int, int] +class XYGrid(NamedTuple): + x_axis: int + y_axis: int + +arr: np.ndarray[XYGrid, Any] + +# Test shape property matches shape typevar +assert_type(arr.shape, XYGrid) diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/shape_base.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/shape_base.pyi new file mode 100644 index 0000000..e409a53 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/shape_base.pyi @@ -0,0 +1,52 @@ +from typing import Any, assert_type + +import numpy as np +import numpy.typing as npt + +i8: np.int64 +f8: np.float64 + +AR_b: npt.NDArray[np.bool] +AR_i8: npt.NDArray[np.int64] +AR_f8: npt.NDArray[np.float64] + +AR_LIKE_f8: list[float] + +assert_type(np.take_along_axis(AR_f8, AR_i8, axis=1), npt.NDArray[np.float64]) +assert_type(np.take_along_axis(f8, AR_i8, axis=None), npt.NDArray[np.float64]) + +assert_type(np.put_along_axis(AR_f8, AR_i8, "1.0", axis=1), None) + +assert_type(np.expand_dims(AR_i8, 2), npt.NDArray[np.int64]) +assert_type(np.expand_dims(AR_LIKE_f8, 2), npt.NDArray[Any]) + +assert_type(np.column_stack([AR_i8]), npt.NDArray[np.int64]) +assert_type(np.column_stack([AR_LIKE_f8]), npt.NDArray[Any]) + +assert_type(np.dstack([AR_i8]), npt.NDArray[np.int64]) +assert_type(np.dstack([AR_LIKE_f8]), npt.NDArray[Any]) + +assert_type(np.array_split(AR_i8, [3, 5, 6, 10]), list[npt.NDArray[np.int64]]) +assert_type(np.array_split(AR_LIKE_f8, [3, 5, 6, 10]), list[npt.NDArray[Any]]) + +assert_type(np.split(AR_i8, [3, 5, 6, 10]), list[npt.NDArray[np.int64]]) +assert_type(np.split(AR_LIKE_f8, [3, 5, 6, 10]), list[npt.NDArray[Any]]) + +assert_type(np.hsplit(AR_i8, [3, 5, 6, 10]), list[npt.NDArray[np.int64]]) +assert_type(np.hsplit(AR_LIKE_f8, [3, 5, 6, 10]), list[npt.NDArray[Any]]) + +assert_type(np.vsplit(AR_i8, [3, 5, 6, 10]), list[npt.NDArray[np.int64]]) +assert_type(np.vsplit(AR_LIKE_f8, [3, 5, 6, 10]), list[npt.NDArray[Any]]) + +assert_type(np.dsplit(AR_i8, [3, 5, 6, 10]), list[npt.NDArray[np.int64]]) +assert_type(np.dsplit(AR_LIKE_f8, [3, 5, 6, 10]), list[npt.NDArray[Any]]) + +assert_type(np.kron(AR_b, AR_b), npt.NDArray[np.bool]) +assert_type(np.kron(AR_b, AR_i8), npt.NDArray[np.signedinteger]) +assert_type(np.kron(AR_f8, AR_f8), npt.NDArray[np.floating]) + +assert_type(np.tile(AR_i8, 5), npt.NDArray[np.int64]) +assert_type(np.tile(AR_LIKE_f8, [2, 2]), npt.NDArray[Any]) + +assert_type(np.unstack(AR_i8, axis=0), tuple[npt.NDArray[np.int64], ...]) +assert_type(np.unstack(AR_LIKE_f8, axis=0), tuple[npt.NDArray[Any], ...]) diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/stride_tricks.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/stride_tricks.pyi new file mode 100644 index 0000000..8fde9b8 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/stride_tricks.pyi @@ -0,0 +1,27 @@ +from typing import Any, assert_type + +import numpy as np +import numpy.typing as npt + +AR_f8: npt.NDArray[np.float64] +AR_LIKE_f: list[float] +interface_dict: dict[str, Any] + +assert_type(np.lib.stride_tricks.as_strided(AR_f8), npt.NDArray[np.float64]) +assert_type(np.lib.stride_tricks.as_strided(AR_LIKE_f), npt.NDArray[Any]) +assert_type(np.lib.stride_tricks.as_strided(AR_f8, strides=(1, 5)), npt.NDArray[np.float64]) +assert_type(np.lib.stride_tricks.as_strided(AR_f8, shape=[9, 20]), npt.NDArray[np.float64]) + +assert_type(np.lib.stride_tricks.sliding_window_view(AR_f8, 5), npt.NDArray[np.float64]) +assert_type(np.lib.stride_tricks.sliding_window_view(AR_LIKE_f, (1, 5)), npt.NDArray[Any]) +assert_type(np.lib.stride_tricks.sliding_window_view(AR_f8, [9], axis=1), npt.NDArray[np.float64]) + +assert_type(np.broadcast_to(AR_f8, 5), npt.NDArray[np.float64]) +assert_type(np.broadcast_to(AR_LIKE_f, (1, 5)), npt.NDArray[Any]) +assert_type(np.broadcast_to(AR_f8, [4, 6], subok=True), npt.NDArray[np.float64]) + +assert_type(np.broadcast_shapes((1, 2), [3, 1], (3, 2)), tuple[Any, ...]) +assert_type(np.broadcast_shapes((6, 7), (5, 6, 1), 7, (5, 1, 7)), tuple[Any, ...]) + +assert_type(np.broadcast_arrays(AR_f8, AR_f8), tuple[npt.NDArray[Any], ...]) +assert_type(np.broadcast_arrays(AR_f8, AR_LIKE_f), tuple[npt.NDArray[Any], ...]) diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/strings.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/strings.pyi new file mode 100644 index 0000000..18bd252 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/strings.pyi @@ -0,0 +1,196 @@ +from typing import TypeAlias, assert_type + +import numpy as np +import numpy._typing as np_t +import numpy.typing as npt + +AR_T_alias: TypeAlias = np.ndarray[np_t._AnyShape, np.dtypes.StringDType] +AR_TU_alias: TypeAlias = AR_T_alias | npt.NDArray[np.str_] + +AR_U: npt.NDArray[np.str_] +AR_S: npt.NDArray[np.bytes_] +AR_T: AR_T_alias + +assert_type(np.strings.equal(AR_U, AR_U), npt.NDArray[np.bool]) +assert_type(np.strings.equal(AR_S, AR_S), npt.NDArray[np.bool]) +assert_type(np.strings.equal(AR_T, AR_T), npt.NDArray[np.bool]) + +assert_type(np.strings.not_equal(AR_U, AR_U), npt.NDArray[np.bool]) +assert_type(np.strings.not_equal(AR_S, AR_S), npt.NDArray[np.bool]) +assert_type(np.strings.not_equal(AR_T, AR_T), npt.NDArray[np.bool]) + +assert_type(np.strings.greater_equal(AR_U, AR_U), npt.NDArray[np.bool]) +assert_type(np.strings.greater_equal(AR_S, AR_S), npt.NDArray[np.bool]) +assert_type(np.strings.greater_equal(AR_T, AR_T), npt.NDArray[np.bool]) + +assert_type(np.strings.less_equal(AR_U, AR_U), npt.NDArray[np.bool]) +assert_type(np.strings.less_equal(AR_S, AR_S), npt.NDArray[np.bool]) +assert_type(np.strings.less_equal(AR_T, AR_T), npt.NDArray[np.bool]) + +assert_type(np.strings.greater(AR_U, AR_U), npt.NDArray[np.bool]) +assert_type(np.strings.greater(AR_S, AR_S), npt.NDArray[np.bool]) +assert_type(np.strings.greater(AR_T, AR_T), npt.NDArray[np.bool]) + +assert_type(np.strings.less(AR_U, AR_U), npt.NDArray[np.bool]) +assert_type(np.strings.less(AR_S, AR_S), npt.NDArray[np.bool]) +assert_type(np.strings.less(AR_T, AR_T), npt.NDArray[np.bool]) + +assert_type(np.strings.add(AR_U, AR_U), npt.NDArray[np.str_]) +assert_type(np.strings.add(AR_S, AR_S), npt.NDArray[np.bytes_]) +assert_type(np.strings.add(AR_T, AR_T), AR_T_alias) + +assert_type(np.strings.multiply(AR_U, 5), npt.NDArray[np.str_]) +assert_type(np.strings.multiply(AR_S, [5, 4, 3]), npt.NDArray[np.bytes_]) +assert_type(np.strings.multiply(AR_T, 5), AR_T_alias) + +assert_type(np.strings.mod(AR_U, "test"), npt.NDArray[np.str_]) +assert_type(np.strings.mod(AR_S, "test"), npt.NDArray[np.bytes_]) +assert_type(np.strings.mod(AR_T, "test"), AR_T_alias) + +assert_type(np.strings.capitalize(AR_U), npt.NDArray[np.str_]) +assert_type(np.strings.capitalize(AR_S), npt.NDArray[np.bytes_]) +assert_type(np.strings.capitalize(AR_T), AR_T_alias) + +assert_type(np.strings.center(AR_U, 5), npt.NDArray[np.str_]) +assert_type(np.strings.center(AR_S, [2, 3, 4], b"a"), npt.NDArray[np.bytes_]) +assert_type(np.strings.center(AR_T, 5), AR_T_alias) + +assert_type(np.strings.encode(AR_U), npt.NDArray[np.bytes_]) +assert_type(np.strings.encode(AR_T), npt.NDArray[np.bytes_]) +assert_type(np.strings.decode(AR_S), npt.NDArray[np.str_]) + +assert_type(np.strings.expandtabs(AR_U), npt.NDArray[np.str_]) +assert_type(np.strings.expandtabs(AR_S, tabsize=4), npt.NDArray[np.bytes_]) +assert_type(np.strings.expandtabs(AR_T), AR_T_alias) + +assert_type(np.strings.ljust(AR_U, 5), npt.NDArray[np.str_]) +assert_type(np.strings.ljust(AR_S, [4, 3, 1], fillchar=[b"a", b"b", b"c"]), npt.NDArray[np.bytes_]) +assert_type(np.strings.ljust(AR_T, 5), AR_T_alias) +assert_type(np.strings.ljust(AR_T, [4, 2, 1], fillchar=["a", "b", "c"]), AR_T_alias) + +assert_type(np.strings.rjust(AR_U, 5), npt.NDArray[np.str_]) +assert_type(np.strings.rjust(AR_S, [4, 3, 1], fillchar=[b"a", b"b", b"c"]), npt.NDArray[np.bytes_]) +assert_type(np.strings.rjust(AR_T, 5), AR_T_alias) +assert_type(np.strings.rjust(AR_T, [4, 2, 1], fillchar=["a", "b", "c"]), AR_T_alias) + +assert_type(np.strings.lstrip(AR_U), npt.NDArray[np.str_]) +assert_type(np.strings.lstrip(AR_S, b"_"), npt.NDArray[np.bytes_]) +assert_type(np.strings.lstrip(AR_T), AR_T_alias) +assert_type(np.strings.lstrip(AR_T, "_"), AR_T_alias) + +assert_type(np.strings.rstrip(AR_U), npt.NDArray[np.str_]) +assert_type(np.strings.rstrip(AR_S, b"_"), npt.NDArray[np.bytes_]) +assert_type(np.strings.rstrip(AR_T), AR_T_alias) +assert_type(np.strings.rstrip(AR_T, "_"), AR_T_alias) + +assert_type(np.strings.strip(AR_U), npt.NDArray[np.str_]) +assert_type(np.strings.strip(AR_S, b"_"), npt.NDArray[np.bytes_]) +assert_type(np.strings.strip(AR_T), AR_T_alias) +assert_type(np.strings.strip(AR_T, "_"), AR_T_alias) + +assert_type(np.strings.count(AR_U, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) +assert_type(np.strings.count(AR_S, [b"a", b"b", b"c"], end=9), npt.NDArray[np.int_]) +assert_type(np.strings.count(AR_T, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) +assert_type(np.strings.count(AR_T, ["a", "b", "c"], end=9), npt.NDArray[np.int_]) + +assert_type(np.strings.partition(AR_U, "\n"), npt.NDArray[np.str_]) +assert_type(np.strings.partition(AR_S, [b"a", b"b", b"c"]), npt.NDArray[np.bytes_]) +assert_type(np.strings.partition(AR_T, "\n"), AR_TU_alias) + +assert_type(np.strings.rpartition(AR_U, "\n"), npt.NDArray[np.str_]) +assert_type(np.strings.rpartition(AR_S, [b"a", b"b", b"c"]), npt.NDArray[np.bytes_]) +assert_type(np.strings.rpartition(AR_T, "\n"), AR_TU_alias) + +assert_type(np.strings.replace(AR_U, "_", "-"), npt.NDArray[np.str_]) +assert_type(np.strings.replace(AR_S, [b"_", b""], [b"a", b"b"]), npt.NDArray[np.bytes_]) +assert_type(np.strings.replace(AR_T, "_", "_"), AR_TU_alias) + +assert_type(np.strings.lower(AR_U), npt.NDArray[np.str_]) +assert_type(np.strings.lower(AR_S), npt.NDArray[np.bytes_]) +assert_type(np.strings.lower(AR_T), AR_T_alias) + +assert_type(np.strings.upper(AR_U), npt.NDArray[np.str_]) +assert_type(np.strings.upper(AR_S), npt.NDArray[np.bytes_]) +assert_type(np.strings.upper(AR_T), AR_T_alias) + +assert_type(np.strings.swapcase(AR_U), npt.NDArray[np.str_]) +assert_type(np.strings.swapcase(AR_S), npt.NDArray[np.bytes_]) +assert_type(np.strings.swapcase(AR_T), AR_T_alias) + +assert_type(np.strings.title(AR_U), npt.NDArray[np.str_]) +assert_type(np.strings.title(AR_S), npt.NDArray[np.bytes_]) +assert_type(np.strings.title(AR_T), AR_T_alias) + +assert_type(np.strings.zfill(AR_U, 5), npt.NDArray[np.str_]) +assert_type(np.strings.zfill(AR_S, [2, 3, 4]), npt.NDArray[np.bytes_]) +assert_type(np.strings.zfill(AR_T, 5), AR_T_alias) + +assert_type(np.strings.endswith(AR_U, "a", start=[1, 2, 3]), npt.NDArray[np.bool]) +assert_type(np.strings.endswith(AR_S, [b"a", b"b", b"c"], end=9), npt.NDArray[np.bool]) +assert_type(np.strings.endswith(AR_T, "a", start=[1, 2, 3]), npt.NDArray[np.bool]) + +assert_type(np.strings.startswith(AR_U, "a", start=[1, 2, 3]), npt.NDArray[np.bool]) +assert_type(np.strings.startswith(AR_S, [b"a", b"b", b"c"], end=9), npt.NDArray[np.bool]) +assert_type(np.strings.startswith(AR_T, "a", start=[1, 2, 3]), npt.NDArray[np.bool]) + +assert_type(np.strings.find(AR_U, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) +assert_type(np.strings.find(AR_S, [b"a", b"b", b"c"], end=9), npt.NDArray[np.int_]) +assert_type(np.strings.find(AR_T, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) + +assert_type(np.strings.rfind(AR_U, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) +assert_type(np.strings.rfind(AR_S, [b"a", b"b", b"c"], end=9), npt.NDArray[np.int_]) +assert_type(np.strings.rfind(AR_T, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) + +assert_type(np.strings.index(AR_U, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) +assert_type(np.strings.index(AR_S, [b"a", b"b", b"c"], end=9), npt.NDArray[np.int_]) +assert_type(np.strings.index(AR_T, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) + +assert_type(np.strings.rindex(AR_U, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) +assert_type(np.strings.rindex(AR_S, [b"a", b"b", b"c"], end=9), npt.NDArray[np.int_]) +assert_type(np.strings.rindex(AR_T, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) + +assert_type(np.strings.isalpha(AR_U), npt.NDArray[np.bool]) +assert_type(np.strings.isalpha(AR_S), npt.NDArray[np.bool]) +assert_type(np.strings.isalpha(AR_T), npt.NDArray[np.bool]) + +assert_type(np.strings.isalnum(AR_U), npt.NDArray[np.bool]) +assert_type(np.strings.isalnum(AR_S), npt.NDArray[np.bool]) +assert_type(np.strings.isalnum(AR_T), npt.NDArray[np.bool]) + +assert_type(np.strings.isdecimal(AR_U), npt.NDArray[np.bool]) +assert_type(np.strings.isdecimal(AR_T), npt.NDArray[np.bool]) + +assert_type(np.strings.isdigit(AR_U), npt.NDArray[np.bool]) +assert_type(np.strings.isdigit(AR_S), npt.NDArray[np.bool]) +assert_type(np.strings.isdigit(AR_T), npt.NDArray[np.bool]) + +assert_type(np.strings.islower(AR_U), npt.NDArray[np.bool]) +assert_type(np.strings.islower(AR_S), npt.NDArray[np.bool]) +assert_type(np.strings.islower(AR_T), npt.NDArray[np.bool]) + +assert_type(np.strings.isnumeric(AR_U), npt.NDArray[np.bool]) +assert_type(np.strings.isnumeric(AR_T), npt.NDArray[np.bool]) + +assert_type(np.strings.isspace(AR_U), npt.NDArray[np.bool]) +assert_type(np.strings.isspace(AR_S), npt.NDArray[np.bool]) +assert_type(np.strings.isspace(AR_T), npt.NDArray[np.bool]) + +assert_type(np.strings.istitle(AR_U), npt.NDArray[np.bool]) +assert_type(np.strings.istitle(AR_S), npt.NDArray[np.bool]) +assert_type(np.strings.istitle(AR_T), npt.NDArray[np.bool]) + +assert_type(np.strings.isupper(AR_U), npt.NDArray[np.bool]) +assert_type(np.strings.isupper(AR_S), npt.NDArray[np.bool]) +assert_type(np.strings.isupper(AR_T), npt.NDArray[np.bool]) + +assert_type(np.strings.str_len(AR_U), npt.NDArray[np.int_]) +assert_type(np.strings.str_len(AR_S), npt.NDArray[np.int_]) +assert_type(np.strings.str_len(AR_T), npt.NDArray[np.int_]) + +assert_type(np.strings.translate(AR_U, ""), npt.NDArray[np.str_]) +assert_type(np.strings.translate(AR_S, ""), npt.NDArray[np.bytes_]) +assert_type(np.strings.translate(AR_T, ""), AR_T_alias) + +assert_type(np.strings.slice(AR_U, 1, 5, 2), npt.NDArray[np.str_]) +assert_type(np.strings.slice(AR_S, 1, 5, 2), npt.NDArray[np.bytes_]) +assert_type(np.strings.slice(AR_T, 1, 5, 2), AR_T_alias) diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/testing.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/testing.pyi new file mode 100644 index 0000000..d70bc97 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/testing.pyi @@ -0,0 +1,198 @@ +import contextlib +import re +import sys +import types +import unittest +import warnings +from collections.abc import Callable +from pathlib import Path +from typing import Any, TypeVar, assert_type + +import numpy as np +import numpy.typing as npt + +AR_f8: npt.NDArray[np.float64] +AR_i8: npt.NDArray[np.int64] + +bool_obj: bool +suppress_obj: np.testing.suppress_warnings +FT = TypeVar("FT", bound=Callable[..., Any]) + +def func() -> int: ... + +def func2( + x: npt.NDArray[np.number], + y: npt.NDArray[np.number], +) -> npt.NDArray[np.bool]: ... + +assert_type(np.testing.KnownFailureException(), np.testing.KnownFailureException) +assert_type(np.testing.IgnoreException(), np.testing.IgnoreException) + +assert_type( + np.testing.clear_and_catch_warnings(modules=[np.testing]), + np.testing.clear_and_catch_warnings[None], +) +assert_type( + np.testing.clear_and_catch_warnings(True), + np.testing.clear_and_catch_warnings[list[warnings.WarningMessage]], +) +assert_type( + np.testing.clear_and_catch_warnings(False), + np.testing.clear_and_catch_warnings[None], +) +assert_type( + np.testing.clear_and_catch_warnings(bool_obj), + np.testing.clear_and_catch_warnings, +) +assert_type( + np.testing.clear_and_catch_warnings.class_modules, + tuple[types.ModuleType, ...], +) +assert_type( + np.testing.clear_and_catch_warnings.modules, + set[types.ModuleType], +) + +with np.testing.clear_and_catch_warnings(True) as c1: + assert_type(c1, list[warnings.WarningMessage]) +with np.testing.clear_and_catch_warnings() as c2: + assert_type(c2, None) + +assert_type(np.testing.suppress_warnings("once"), np.testing.suppress_warnings) +assert_type(np.testing.suppress_warnings()(func), Callable[[], int]) +assert_type(suppress_obj.filter(RuntimeWarning), None) +assert_type(suppress_obj.record(RuntimeWarning), list[warnings.WarningMessage]) +with suppress_obj as c3: + assert_type(c3, np.testing.suppress_warnings) + +assert_type(np.testing.verbose, int) +assert_type(np.testing.IS_PYPY, bool) +assert_type(np.testing.HAS_REFCOUNT, bool) +assert_type(np.testing.HAS_LAPACK64, bool) + +assert_type(np.testing.assert_(1, msg="test"), None) +assert_type(np.testing.assert_(2, msg=lambda: "test"), None) + +if sys.platform == "win32" or sys.platform == "cygwin": + assert_type(np.testing.memusage(), int) +elif sys.platform == "linux": + assert_type(np.testing.memusage(), int | None) + +assert_type(np.testing.jiffies(), int) + +assert_type(np.testing.build_err_msg([0, 1, 2], "test"), str) +assert_type(np.testing.build_err_msg(range(2), "test", header="header"), str) +assert_type(np.testing.build_err_msg(np.arange(9).reshape(3, 3), "test", verbose=False), str) +assert_type(np.testing.build_err_msg("abc", "test", names=["x", "y"]), str) +assert_type(np.testing.build_err_msg([1.0, 2.0], "test", precision=5), str) + +assert_type(np.testing.assert_equal({1}, {1}), None) +assert_type(np.testing.assert_equal([1, 2, 3], [1, 2, 3], err_msg="fail"), None) +assert_type(np.testing.assert_equal(1, 1.0, verbose=True), None) + +assert_type(np.testing.print_assert_equal('Test XYZ of func xyz', [0, 1], [0, 1]), None) + +assert_type(np.testing.assert_almost_equal(1.0, 1.1), None) +assert_type(np.testing.assert_almost_equal([1, 2, 3], [1, 2, 3], err_msg="fail"), None) +assert_type(np.testing.assert_almost_equal(1, 1.0, verbose=True), None) +assert_type(np.testing.assert_almost_equal(1, 1.0001, decimal=2), None) + +assert_type(np.testing.assert_approx_equal(1.0, 1.1), None) +assert_type(np.testing.assert_approx_equal("1", "2", err_msg="fail"), None) +assert_type(np.testing.assert_approx_equal(1, 1.0, verbose=True), None) +assert_type(np.testing.assert_approx_equal(1, 1.0001, significant=2), None) + +assert_type(np.testing.assert_array_compare(func2, AR_i8, AR_f8, err_msg="test"), None) +assert_type(np.testing.assert_array_compare(func2, AR_i8, AR_f8, verbose=True), None) +assert_type(np.testing.assert_array_compare(func2, AR_i8, AR_f8, header="header"), None) +assert_type(np.testing.assert_array_compare(func2, AR_i8, AR_f8, precision=np.int64()), None) +assert_type(np.testing.assert_array_compare(func2, AR_i8, AR_f8, equal_nan=False), None) +assert_type(np.testing.assert_array_compare(func2, AR_i8, AR_f8, equal_inf=True), None) + +assert_type(np.testing.assert_array_equal(AR_i8, AR_f8), None) +assert_type(np.testing.assert_array_equal(AR_i8, AR_f8, err_msg="test"), None) +assert_type(np.testing.assert_array_equal(AR_i8, AR_f8, verbose=True), None) + +assert_type(np.testing.assert_array_almost_equal(AR_i8, AR_f8), None) +assert_type(np.testing.assert_array_almost_equal(AR_i8, AR_f8, err_msg="test"), None) +assert_type(np.testing.assert_array_almost_equal(AR_i8, AR_f8, verbose=True), None) +assert_type(np.testing.assert_array_almost_equal(AR_i8, AR_f8, decimal=1), None) + +assert_type(np.testing.assert_array_less(AR_i8, AR_f8), None) +assert_type(np.testing.assert_array_less(AR_i8, AR_f8, err_msg="test"), None) +assert_type(np.testing.assert_array_less(AR_i8, AR_f8, verbose=True), None) + +assert_type(np.testing.runstring("1 + 1", {}), Any) +assert_type(np.testing.runstring("int64() + 1", {"int64": np.int64}), Any) + +assert_type(np.testing.assert_string_equal("1", "1"), None) + +assert_type(np.testing.rundocs(), None) +assert_type(np.testing.rundocs("test.py"), None) +assert_type(np.testing.rundocs(Path("test.py"), raise_on_error=True), None) + +def func3(a: int) -> bool: ... + +assert_type( + np.testing.assert_raises(RuntimeWarning), + unittest.case._AssertRaisesContext[RuntimeWarning], +) +assert_type(np.testing.assert_raises(RuntimeWarning, func3, 5), None) + +assert_type( + np.testing.assert_raises_regex(RuntimeWarning, r"test"), + unittest.case._AssertRaisesContext[RuntimeWarning], +) +assert_type(np.testing.assert_raises_regex(RuntimeWarning, b"test", func3, 5), None) +assert_type(np.testing.assert_raises_regex(RuntimeWarning, re.compile(b"test"), func3, 5), None) + +class Test: ... + +def decorate(a: FT) -> FT: + return a + +assert_type(np.testing.decorate_methods(Test, decorate), None) +assert_type(np.testing.decorate_methods(Test, decorate, None), None) +assert_type(np.testing.decorate_methods(Test, decorate, "test"), None) +assert_type(np.testing.decorate_methods(Test, decorate, b"test"), None) +assert_type(np.testing.decorate_methods(Test, decorate, re.compile("test")), None) + +assert_type(np.testing.measure("for i in range(1000): np.sqrt(i**2)"), float) +assert_type(np.testing.measure(b"for i in range(1000): np.sqrt(i**2)", times=5), float) + +assert_type(np.testing.assert_allclose(AR_i8, AR_f8), None) +assert_type(np.testing.assert_allclose(AR_i8, AR_f8, rtol=0.005), None) +assert_type(np.testing.assert_allclose(AR_i8, AR_f8, atol=1), None) +assert_type(np.testing.assert_allclose(AR_i8, AR_f8, equal_nan=True), None) +assert_type(np.testing.assert_allclose(AR_i8, AR_f8, err_msg="err"), None) +assert_type(np.testing.assert_allclose(AR_i8, AR_f8, verbose=False), None) + +assert_type(np.testing.assert_array_almost_equal_nulp(AR_i8, AR_f8, nulp=2), None) + +assert_type(np.testing.assert_array_max_ulp(AR_i8, AR_f8, maxulp=2), npt.NDArray[Any]) +assert_type(np.testing.assert_array_max_ulp(AR_i8, AR_f8, dtype=np.float32), npt.NDArray[Any]) + +assert_type(np.testing.assert_warns(RuntimeWarning), contextlib._GeneratorContextManager[None]) +assert_type(np.testing.assert_warns(RuntimeWarning, func3, 5), bool) + +def func4(a: int, b: str) -> bool: ... + +assert_type(np.testing.assert_no_warnings(), contextlib._GeneratorContextManager[None]) +assert_type(np.testing.assert_no_warnings(func3, 5), bool) +assert_type(np.testing.assert_no_warnings(func4, a=1, b="test"), bool) +assert_type(np.testing.assert_no_warnings(func4, 1, "test"), bool) + +assert_type(np.testing.tempdir("test_dir"), contextlib._GeneratorContextManager[str]) +assert_type(np.testing.tempdir(prefix=b"test"), contextlib._GeneratorContextManager[bytes]) +assert_type(np.testing.tempdir("test_dir", dir=Path("here")), contextlib._GeneratorContextManager[str]) + +assert_type(np.testing.temppath("test_dir", text=True), contextlib._GeneratorContextManager[str]) +assert_type(np.testing.temppath(prefix=b"test"), contextlib._GeneratorContextManager[bytes]) +assert_type(np.testing.temppath("test_dir", dir=Path("here")), contextlib._GeneratorContextManager[str]) + +assert_type(np.testing.assert_no_gc_cycles(), contextlib._GeneratorContextManager[None]) +assert_type(np.testing.assert_no_gc_cycles(func3, 5), None) + +assert_type(np.testing.break_cycles(), None) + +assert_type(np.testing.TestCase(), unittest.case.TestCase) diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/twodim_base.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/twodim_base.pyi new file mode 100644 index 0000000..7e9563a --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/twodim_base.pyi @@ -0,0 +1,145 @@ +from typing import Any, TypeVar, assert_type + +import numpy as np +import numpy.typing as npt + +_ScalarT = TypeVar("_ScalarT", bound=np.generic) + +def func1(ar: npt.NDArray[_ScalarT], a: int) -> npt.NDArray[_ScalarT]: ... + +def func2(ar: npt.NDArray[np.number], a: str) -> npt.NDArray[np.float64]: ... + +AR_b: npt.NDArray[np.bool] +AR_u: npt.NDArray[np.uint64] +AR_i: npt.NDArray[np.int64] +AR_f: npt.NDArray[np.float64] +AR_c: npt.NDArray[np.complex128] +AR_O: npt.NDArray[np.object_] + +AR_LIKE_b: list[bool] +AR_LIKE_c: list[complex] + +assert_type(np.fliplr(AR_b), npt.NDArray[np.bool]) +assert_type(np.fliplr(AR_LIKE_b), npt.NDArray[Any]) + +assert_type(np.flipud(AR_b), npt.NDArray[np.bool]) +assert_type(np.flipud(AR_LIKE_b), npt.NDArray[Any]) + +assert_type(np.eye(10), npt.NDArray[np.float64]) +assert_type(np.eye(10, M=20, dtype=np.int64), npt.NDArray[np.int64]) +assert_type(np.eye(10, k=2, dtype=int), npt.NDArray[Any]) + +assert_type(np.diag(AR_b), npt.NDArray[np.bool]) +assert_type(np.diag(AR_LIKE_b, k=0), npt.NDArray[Any]) + +assert_type(np.diagflat(AR_b), npt.NDArray[np.bool]) +assert_type(np.diagflat(AR_LIKE_b, k=0), npt.NDArray[Any]) + +assert_type(np.tri(10), npt.NDArray[np.float64]) +assert_type(np.tri(10, M=20, dtype=np.int64), npt.NDArray[np.int64]) +assert_type(np.tri(10, k=2, dtype=int), npt.NDArray[Any]) + +assert_type(np.tril(AR_b), npt.NDArray[np.bool]) +assert_type(np.tril(AR_LIKE_b, k=0), npt.NDArray[Any]) + +assert_type(np.triu(AR_b), npt.NDArray[np.bool]) +assert_type(np.triu(AR_LIKE_b, k=0), npt.NDArray[Any]) + +assert_type(np.vander(AR_b), npt.NDArray[np.signedinteger]) +assert_type(np.vander(AR_u), npt.NDArray[np.signedinteger]) +assert_type(np.vander(AR_i, N=2), npt.NDArray[np.signedinteger]) +assert_type(np.vander(AR_f, increasing=True), npt.NDArray[np.floating]) +assert_type(np.vander(AR_c), npt.NDArray[np.complexfloating]) +assert_type(np.vander(AR_O), npt.NDArray[np.object_]) + +assert_type( + np.histogram2d(AR_LIKE_c, AR_LIKE_c), + tuple[ + npt.NDArray[np.float64], + npt.NDArray[np.complex128 | np.float64], + npt.NDArray[np.complex128 | np.float64], + ], +) +assert_type( + np.histogram2d(AR_i, AR_b), + tuple[ + npt.NDArray[np.float64], + npt.NDArray[np.float64], + npt.NDArray[np.float64], + ], +) +assert_type( + np.histogram2d(AR_f, AR_i), + tuple[ + npt.NDArray[np.float64], + npt.NDArray[np.float64], + npt.NDArray[np.float64], + ], +) +assert_type( + np.histogram2d(AR_i, AR_f), + tuple[ + npt.NDArray[np.float64], + npt.NDArray[np.float64], + npt.NDArray[np.float64], + ], +) +assert_type( + np.histogram2d(AR_f, AR_c, weights=AR_LIKE_b), + tuple[ + npt.NDArray[np.float64], + npt.NDArray[np.complex128], + npt.NDArray[np.complex128], + ], +) +assert_type( + np.histogram2d(AR_f, AR_c, bins=8), + tuple[ + npt.NDArray[np.float64], + npt.NDArray[np.complex128], + npt.NDArray[np.complex128], + ], +) +assert_type( + np.histogram2d(AR_c, AR_f, bins=(8, 5)), + tuple[ + npt.NDArray[np.float64], + npt.NDArray[np.complex128], + npt.NDArray[np.complex128], + ], +) +assert_type( + np.histogram2d(AR_c, AR_i, bins=AR_u), + tuple[ + npt.NDArray[np.float64], + npt.NDArray[np.uint64], + npt.NDArray[np.uint64], + ], +) +assert_type( + np.histogram2d(AR_c, AR_c, bins=(AR_u, AR_u)), + tuple[ + npt.NDArray[np.float64], + npt.NDArray[np.uint64], + npt.NDArray[np.uint64], + ], +) +assert_type( + np.histogram2d(AR_c, AR_c, bins=(AR_b, 8)), + tuple[ + npt.NDArray[np.float64], + npt.NDArray[np.bool | np.complex128], + npt.NDArray[np.bool | np.complex128], + ], +) + +assert_type(np.mask_indices(10, func1), tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]) +assert_type(np.mask_indices(8, func2, "0"), tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]) + +assert_type(np.tril_indices(10), tuple[npt.NDArray[np.int_], npt.NDArray[np.int_]]) + +assert_type(np.tril_indices_from(AR_b), tuple[npt.NDArray[np.int_], npt.NDArray[np.int_]]) + +assert_type(np.triu_indices(10), tuple[npt.NDArray[np.int_], npt.NDArray[np.int_]]) + +assert_type(np.triu_indices_from(AR_b), tuple[npt.NDArray[np.int_], npt.NDArray[np.int_]]) diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/type_check.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/type_check.pyi new file mode 100644 index 0000000..df95da7 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/type_check.pyi @@ -0,0 +1,67 @@ +from typing import Any, Literal, assert_type + +import numpy as np +import numpy.typing as npt + +f8: np.float64 +f: float + +# NOTE: Avoid importing the platform specific `np.float128` type +AR_i8: npt.NDArray[np.int64] +AR_i4: npt.NDArray[np.int32] +AR_f2: npt.NDArray[np.float16] +AR_f8: npt.NDArray[np.float64] +AR_f16: npt.NDArray[np.longdouble] +AR_c8: npt.NDArray[np.complex64] +AR_c16: npt.NDArray[np.complex128] + +AR_LIKE_f: list[float] + +class ComplexObj: + real: slice + imag: slice + +assert_type(np.mintypecode(["f8"], typeset="qfQF"), str) + +assert_type(np.real(ComplexObj()), slice) +assert_type(np.real(AR_f8), npt.NDArray[np.float64]) +assert_type(np.real(AR_c16), npt.NDArray[np.float64]) +assert_type(np.real(AR_LIKE_f), npt.NDArray[Any]) + +assert_type(np.imag(ComplexObj()), slice) +assert_type(np.imag(AR_f8), npt.NDArray[np.float64]) +assert_type(np.imag(AR_c16), npt.NDArray[np.float64]) +assert_type(np.imag(AR_LIKE_f), npt.NDArray[Any]) + +assert_type(np.iscomplex(f8), np.bool) +assert_type(np.iscomplex(AR_f8), npt.NDArray[np.bool]) +assert_type(np.iscomplex(AR_LIKE_f), npt.NDArray[np.bool]) + +assert_type(np.isreal(f8), np.bool) +assert_type(np.isreal(AR_f8), npt.NDArray[np.bool]) +assert_type(np.isreal(AR_LIKE_f), npt.NDArray[np.bool]) + +assert_type(np.iscomplexobj(f8), bool) +assert_type(np.isrealobj(f8), bool) + +assert_type(np.nan_to_num(f8), np.float64) +assert_type(np.nan_to_num(f, copy=True), Any) +assert_type(np.nan_to_num(AR_f8, nan=1.5), npt.NDArray[np.float64]) +assert_type(np.nan_to_num(AR_LIKE_f, posinf=9999), npt.NDArray[Any]) + +assert_type(np.real_if_close(AR_f8), npt.NDArray[np.float64]) +assert_type(np.real_if_close(AR_c16), npt.NDArray[np.float64 | np.complex128]) +assert_type(np.real_if_close(AR_c8), npt.NDArray[np.float32 | np.complex64]) +assert_type(np.real_if_close(AR_LIKE_f), npt.NDArray[Any]) + +assert_type(np.typename("h"), Literal["short"]) +assert_type(np.typename("B"), Literal["unsigned char"]) +assert_type(np.typename("V"), Literal["void"]) +assert_type(np.typename("S1"), Literal["character"]) + +assert_type(np.common_type(AR_i4), type[np.float64]) +assert_type(np.common_type(AR_f2), type[np.float16]) +assert_type(np.common_type(AR_f2, AR_i4), type[np.float64]) +assert_type(np.common_type(AR_f16, AR_i4), type[np.longdouble]) +assert_type(np.common_type(AR_c8, AR_f2), type[np.complex64]) +assert_type(np.common_type(AR_f2, AR_c8, AR_i4), type[np.complexfloating]) diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/ufunc_config.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/ufunc_config.pyi new file mode 100644 index 0000000..7485075 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/ufunc_config.pyi @@ -0,0 +1,30 @@ +"""Typing tests for `_core._ufunc_config`.""" + +from collections.abc import Callable +from typing import Any, assert_type + +from _typeshed import SupportsWrite + +import numpy as np + +def func(a: str, b: int) -> None: ... + +class Write: + def write(self, value: str) -> None: ... + +assert_type(np.seterr(all=None), np._core._ufunc_config._ErrDict) +assert_type(np.seterr(divide="ignore"), np._core._ufunc_config._ErrDict) +assert_type(np.seterr(over="warn"), np._core._ufunc_config._ErrDict) +assert_type(np.seterr(under="call"), np._core._ufunc_config._ErrDict) +assert_type(np.seterr(invalid="raise"), np._core._ufunc_config._ErrDict) +assert_type(np.geterr(), np._core._ufunc_config._ErrDict) + +assert_type(np.setbufsize(4096), int) +assert_type(np.getbufsize(), int) + +assert_type(np.seterrcall(func), Callable[[str, int], Any] | SupportsWrite[str] | None) +assert_type(np.seterrcall(Write()), Callable[[str, int], Any] | SupportsWrite[str] | None) +assert_type(np.geterrcall(), Callable[[str, int], Any] | SupportsWrite[str] | None) + +assert_type(np.errstate(call=func, all="call"), np.errstate) +assert_type(np.errstate(call=Write(), divide="log", over="log"), np.errstate) diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/ufunclike.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/ufunclike.pyi new file mode 100644 index 0000000..a0ede60 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/ufunclike.pyi @@ -0,0 +1,31 @@ +from typing import Any, assert_type + +import numpy as np +import numpy.typing as npt + +AR_LIKE_b: list[bool] +AR_LIKE_u: list[np.uint32] +AR_LIKE_i: list[int] +AR_LIKE_f: list[float] +AR_LIKE_O: list[np.object_] + +AR_U: npt.NDArray[np.str_] + +assert_type(np.fix(AR_LIKE_b), npt.NDArray[np.floating]) +assert_type(np.fix(AR_LIKE_u), npt.NDArray[np.floating]) +assert_type(np.fix(AR_LIKE_i), npt.NDArray[np.floating]) +assert_type(np.fix(AR_LIKE_f), npt.NDArray[np.floating]) +assert_type(np.fix(AR_LIKE_O), npt.NDArray[np.object_]) +assert_type(np.fix(AR_LIKE_f, out=AR_U), npt.NDArray[np.str_]) + +assert_type(np.isposinf(AR_LIKE_b), npt.NDArray[np.bool]) +assert_type(np.isposinf(AR_LIKE_u), npt.NDArray[np.bool]) +assert_type(np.isposinf(AR_LIKE_i), npt.NDArray[np.bool]) +assert_type(np.isposinf(AR_LIKE_f), npt.NDArray[np.bool]) +assert_type(np.isposinf(AR_LIKE_f, out=AR_U), npt.NDArray[np.str_]) + +assert_type(np.isneginf(AR_LIKE_b), npt.NDArray[np.bool]) +assert_type(np.isneginf(AR_LIKE_u), npt.NDArray[np.bool]) +assert_type(np.isneginf(AR_LIKE_i), npt.NDArray[np.bool]) +assert_type(np.isneginf(AR_LIKE_f), npt.NDArray[np.bool]) +assert_type(np.isneginf(AR_LIKE_f, out=AR_U), npt.NDArray[np.str_]) diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/ufuncs.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/ufuncs.pyi new file mode 100644 index 0000000..93a8bfb --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/ufuncs.pyi @@ -0,0 +1,123 @@ +from typing import Any, Literal, NoReturn, assert_type + +import numpy as np +import numpy.typing as npt + +i8: np.int64 +f8: np.float64 +AR_f8: npt.NDArray[np.float64] +AR_i8: npt.NDArray[np.int64] + +assert_type(np.absolute.__doc__, str) +assert_type(np.absolute.types, list[str]) + +assert_type(np.absolute.__name__, Literal["absolute"]) +assert_type(np.absolute.__qualname__, Literal["absolute"]) +assert_type(np.absolute.ntypes, Literal[20]) +assert_type(np.absolute.identity, None) +assert_type(np.absolute.nin, Literal[1]) +assert_type(np.absolute.nin, Literal[1]) +assert_type(np.absolute.nout, Literal[1]) +assert_type(np.absolute.nargs, Literal[2]) +assert_type(np.absolute.signature, None) +assert_type(np.absolute(f8), Any) +assert_type(np.absolute(AR_f8), npt.NDArray[Any]) +assert_type(np.absolute.at(AR_f8, AR_i8), None) + +assert_type(np.add.__name__, Literal["add"]) +assert_type(np.add.__qualname__, Literal["add"]) +assert_type(np.add.ntypes, Literal[22]) +assert_type(np.add.identity, Literal[0]) +assert_type(np.add.nin, Literal[2]) +assert_type(np.add.nout, Literal[1]) +assert_type(np.add.nargs, Literal[3]) +assert_type(np.add.signature, None) +assert_type(np.add(f8, f8), Any) +assert_type(np.add(AR_f8, f8), npt.NDArray[Any]) +assert_type(np.add.at(AR_f8, AR_i8, f8), None) +assert_type(np.add.reduce(AR_f8, axis=0), Any) +assert_type(np.add.accumulate(AR_f8), npt.NDArray[Any]) +assert_type(np.add.reduceat(AR_f8, AR_i8), npt.NDArray[Any]) +assert_type(np.add.outer(f8, f8), Any) +assert_type(np.add.outer(AR_f8, f8), npt.NDArray[Any]) + +assert_type(np.frexp.__name__, Literal["frexp"]) +assert_type(np.frexp.__qualname__, Literal["frexp"]) +assert_type(np.frexp.ntypes, Literal[4]) +assert_type(np.frexp.identity, None) +assert_type(np.frexp.nin, Literal[1]) +assert_type(np.frexp.nout, Literal[2]) +assert_type(np.frexp.nargs, Literal[3]) +assert_type(np.frexp.signature, None) +assert_type(np.frexp(f8), tuple[Any, Any]) +assert_type(np.frexp(AR_f8), tuple[npt.NDArray[Any], npt.NDArray[Any]]) + +assert_type(np.divmod.__name__, Literal["divmod"]) +assert_type(np.divmod.__qualname__, Literal["divmod"]) +assert_type(np.divmod.ntypes, Literal[15]) +assert_type(np.divmod.identity, None) +assert_type(np.divmod.nin, Literal[2]) +assert_type(np.divmod.nout, Literal[2]) +assert_type(np.divmod.nargs, Literal[4]) +assert_type(np.divmod.signature, None) +assert_type(np.divmod(f8, f8), tuple[Any, Any]) +assert_type(np.divmod(AR_f8, f8), tuple[npt.NDArray[Any], npt.NDArray[Any]]) + +assert_type(np.matmul.__name__, Literal["matmul"]) +assert_type(np.matmul.__qualname__, Literal["matmul"]) +assert_type(np.matmul.ntypes, Literal[19]) +assert_type(np.matmul.identity, None) +assert_type(np.matmul.nin, Literal[2]) +assert_type(np.matmul.nout, Literal[1]) +assert_type(np.matmul.nargs, Literal[3]) +assert_type(np.matmul.signature, Literal["(n?,k),(k,m?)->(n?,m?)"]) +assert_type(np.matmul.identity, None) +assert_type(np.matmul(AR_f8, AR_f8), Any) +assert_type(np.matmul(AR_f8, AR_f8, axes=[(0, 1), (0, 1), (0, 1)]), Any) + +assert_type(np.vecdot.__name__, Literal["vecdot"]) +assert_type(np.vecdot.__qualname__, Literal["vecdot"]) +assert_type(np.vecdot.ntypes, Literal[19]) +assert_type(np.vecdot.identity, None) +assert_type(np.vecdot.nin, Literal[2]) +assert_type(np.vecdot.nout, Literal[1]) +assert_type(np.vecdot.nargs, Literal[3]) +assert_type(np.vecdot.signature, Literal["(n),(n)->()"]) +assert_type(np.vecdot.identity, None) +assert_type(np.vecdot(AR_f8, AR_f8), Any) + +assert_type(np.bitwise_count.__name__, Literal["bitwise_count"]) +assert_type(np.bitwise_count.__qualname__, Literal["bitwise_count"]) +assert_type(np.bitwise_count.ntypes, Literal[11]) +assert_type(np.bitwise_count.identity, None) +assert_type(np.bitwise_count.nin, Literal[1]) +assert_type(np.bitwise_count.nout, Literal[1]) +assert_type(np.bitwise_count.nargs, Literal[2]) +assert_type(np.bitwise_count.signature, None) +assert_type(np.bitwise_count.identity, None) +assert_type(np.bitwise_count(i8), Any) +assert_type(np.bitwise_count(AR_i8), npt.NDArray[Any]) + +assert_type(np.absolute.outer(), NoReturn) +assert_type(np.frexp.outer(), NoReturn) +assert_type(np.divmod.outer(), NoReturn) +assert_type(np.matmul.outer(), NoReturn) + +assert_type(np.absolute.reduceat(), NoReturn) +assert_type(np.frexp.reduceat(), NoReturn) +assert_type(np.divmod.reduceat(), NoReturn) +assert_type(np.matmul.reduceat(), NoReturn) + +assert_type(np.absolute.reduce(), NoReturn) +assert_type(np.frexp.reduce(), NoReturn) +assert_type(np.divmod.reduce(), NoReturn) +assert_type(np.matmul.reduce(), NoReturn) + +assert_type(np.absolute.accumulate(), NoReturn) +assert_type(np.frexp.accumulate(), NoReturn) +assert_type(np.divmod.accumulate(), NoReturn) +assert_type(np.matmul.accumulate(), NoReturn) + +assert_type(np.frexp.at(), NoReturn) +assert_type(np.divmod.at(), NoReturn) +assert_type(np.matmul.at(), NoReturn) diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/warnings_and_errors.pyi b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/warnings_and_errors.pyi new file mode 100644 index 0000000..f756a8e --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/data/reveal/warnings_and_errors.pyi @@ -0,0 +1,11 @@ +from typing import assert_type + +import numpy.exceptions as ex + +assert_type(ex.ModuleDeprecationWarning(), ex.ModuleDeprecationWarning) +assert_type(ex.VisibleDeprecationWarning(), ex.VisibleDeprecationWarning) +assert_type(ex.ComplexWarning(), ex.ComplexWarning) +assert_type(ex.RankWarning(), ex.RankWarning) +assert_type(ex.TooHardError(), ex.TooHardError) +assert_type(ex.AxisError("test"), ex.AxisError) +assert_type(ex.AxisError(5, 1), ex.AxisError) diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/test_isfile.py b/.venv/lib/python3.12/site-packages/numpy/typing/tests/test_isfile.py new file mode 100644 index 0000000..f72122f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/test_isfile.py @@ -0,0 +1,32 @@ +import os +import sys +from pathlib import Path + +import numpy as np +from numpy.testing import assert_ + +ROOT = Path(np.__file__).parents[0] +FILES = [ + ROOT / "py.typed", + ROOT / "__init__.pyi", + ROOT / "ctypeslib" / "__init__.pyi", + ROOT / "_core" / "__init__.pyi", + ROOT / "f2py" / "__init__.pyi", + ROOT / "fft" / "__init__.pyi", + ROOT / "lib" / "__init__.pyi", + ROOT / "linalg" / "__init__.pyi", + ROOT / "ma" / "__init__.pyi", + ROOT / "matrixlib" / "__init__.pyi", + ROOT / "polynomial" / "__init__.pyi", + ROOT / "random" / "__init__.pyi", + ROOT / "testing" / "__init__.pyi", +] +if sys.version_info < (3, 12): + FILES += [ROOT / "distutils" / "__init__.pyi"] + + +class TestIsFile: + def test_isfile(self): + """Test if all ``.pyi`` files are properly installed.""" + for file in FILES: + assert_(os.path.isfile(file)) diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/test_runtime.py b/.venv/lib/python3.12/site-packages/numpy/typing/tests/test_runtime.py new file mode 100644 index 0000000..2369521 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/test_runtime.py @@ -0,0 +1,102 @@ +"""Test the runtime usage of `numpy.typing`.""" + +from typing import ( + Any, + NamedTuple, + Union, # pyright: ignore[reportDeprecated] + get_args, + get_origin, + get_type_hints, +) + +import pytest + +import numpy as np +import numpy._typing as _npt +import numpy.typing as npt + + +class TypeTup(NamedTuple): + typ: type + args: tuple[type, ...] + origin: type | None + + +NDArrayTup = TypeTup(npt.NDArray, npt.NDArray.__args__, np.ndarray) + +TYPES = { + "ArrayLike": TypeTup(npt.ArrayLike, npt.ArrayLike.__args__, Union), + "DTypeLike": TypeTup(npt.DTypeLike, npt.DTypeLike.__args__, Union), + "NBitBase": TypeTup(npt.NBitBase, (), None), + "NDArray": NDArrayTup, +} + + +@pytest.mark.parametrize("name,tup", TYPES.items(), ids=TYPES.keys()) +def test_get_args(name: type, tup: TypeTup) -> None: + """Test `typing.get_args`.""" + typ, ref = tup.typ, tup.args + out = get_args(typ) + assert out == ref + + +@pytest.mark.parametrize("name,tup", TYPES.items(), ids=TYPES.keys()) +def test_get_origin(name: type, tup: TypeTup) -> None: + """Test `typing.get_origin`.""" + typ, ref = tup.typ, tup.origin + out = get_origin(typ) + assert out == ref + + +@pytest.mark.parametrize("name,tup", TYPES.items(), ids=TYPES.keys()) +def test_get_type_hints(name: type, tup: TypeTup) -> None: + """Test `typing.get_type_hints`.""" + typ = tup.typ + + def func(a: typ) -> None: pass + + out = get_type_hints(func) + ref = {"a": typ, "return": type(None)} + assert out == ref + + +@pytest.mark.parametrize("name,tup", TYPES.items(), ids=TYPES.keys()) +def test_get_type_hints_str(name: type, tup: TypeTup) -> None: + """Test `typing.get_type_hints` with string-representation of types.""" + typ_str, typ = f"npt.{name}", tup.typ + + def func(a: typ_str) -> None: pass + + out = get_type_hints(func) + ref = {"a": typ, "return": type(None)} + assert out == ref + + +def test_keys() -> None: + """Test that ``TYPES.keys()`` and ``numpy.typing.__all__`` are synced.""" + keys = TYPES.keys() + ref = set(npt.__all__) + assert keys == ref + + +PROTOCOLS: dict[str, tuple[type[Any], object]] = { + "_SupportsDType": (_npt._SupportsDType, np.int64(1)), + "_SupportsArray": (_npt._SupportsArray, np.arange(10)), + "_SupportsArrayFunc": (_npt._SupportsArrayFunc, np.arange(10)), + "_NestedSequence": (_npt._NestedSequence, [1]), +} + + +@pytest.mark.parametrize("cls,obj", PROTOCOLS.values(), ids=PROTOCOLS.keys()) +class TestRuntimeProtocol: + def test_isinstance(self, cls: type[Any], obj: object) -> None: + assert isinstance(obj, cls) + assert not isinstance(None, cls) + + def test_issubclass(self, cls: type[Any], obj: object) -> None: + if cls is _npt._SupportsDType: + pytest.xfail( + "Protocols with non-method members don't support issubclass()" + ) + assert issubclass(type(obj), cls) + assert not issubclass(type(None), cls) diff --git a/.venv/lib/python3.12/site-packages/numpy/typing/tests/test_typing.py b/.venv/lib/python3.12/site-packages/numpy/typing/tests/test_typing.py new file mode 100644 index 0000000..ca4cf37 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/typing/tests/test_typing.py @@ -0,0 +1,205 @@ +import importlib.util +import os +import re +import shutil +import textwrap +from collections import defaultdict +from typing import TYPE_CHECKING + +import pytest + +# Only trigger a full `mypy` run if this environment variable is set +# Note that these tests tend to take over a minute even on a macOS M1 CPU, +# and more than that in CI. +RUN_MYPY = "NPY_RUN_MYPY_IN_TESTSUITE" in os.environ +if RUN_MYPY and RUN_MYPY not in ('0', '', 'false'): + RUN_MYPY = True + +# Skips all functions in this file +pytestmark = pytest.mark.skipif( + not RUN_MYPY, + reason="`NPY_RUN_MYPY_IN_TESTSUITE` not set" +) + + +try: + from mypy import api +except ImportError: + NO_MYPY = True +else: + NO_MYPY = False + +if TYPE_CHECKING: + from collections.abc import Iterator + + # We need this as annotation, but it's located in a private namespace. + # As a compromise, do *not* import it during runtime + from _pytest.mark.structures import ParameterSet + +DATA_DIR = os.path.join(os.path.dirname(__file__), "data") +PASS_DIR = os.path.join(DATA_DIR, "pass") +FAIL_DIR = os.path.join(DATA_DIR, "fail") +REVEAL_DIR = os.path.join(DATA_DIR, "reveal") +MISC_DIR = os.path.join(DATA_DIR, "misc") +MYPY_INI = os.path.join(DATA_DIR, "mypy.ini") +CACHE_DIR = os.path.join(DATA_DIR, ".mypy_cache") + +#: A dictionary with file names as keys and lists of the mypy stdout as values. +#: To-be populated by `run_mypy`. +OUTPUT_MYPY: defaultdict[str, list[str]] = defaultdict(list) + + +def _key_func(key: str) -> str: + """Split at the first occurrence of the ``:`` character. + + Windows drive-letters (*e.g.* ``C:``) are ignored herein. + """ + drive, tail = os.path.splitdrive(key) + return os.path.join(drive, tail.split(":", 1)[0]) + + +def _strip_filename(msg: str) -> tuple[int, str]: + """Strip the filename and line number from a mypy message.""" + _, tail = os.path.splitdrive(msg) + _, lineno, msg = tail.split(":", 2) + return int(lineno), msg.strip() + + +def strip_func(match: re.Match[str]) -> str: + """`re.sub` helper function for stripping module names.""" + return match.groups()[1] + + +@pytest.fixture(scope="module", autouse=True) +def run_mypy() -> None: + """Clears the cache and run mypy before running any of the typing tests. + + The mypy results are cached in `OUTPUT_MYPY` for further use. + + The cache refresh can be skipped using + + NUMPY_TYPING_TEST_CLEAR_CACHE=0 pytest numpy/typing/tests + """ + if ( + os.path.isdir(CACHE_DIR) + and bool(os.environ.get("NUMPY_TYPING_TEST_CLEAR_CACHE", True)) # noqa: PLW1508 + ): + shutil.rmtree(CACHE_DIR) + + split_pattern = re.compile(r"(\s+)?\^(\~+)?") + for directory in (PASS_DIR, REVEAL_DIR, FAIL_DIR, MISC_DIR): + # Run mypy + stdout, stderr, exit_code = api.run([ + "--config-file", + MYPY_INI, + "--cache-dir", + CACHE_DIR, + directory, + ]) + if stderr: + pytest.fail(f"Unexpected mypy standard error\n\n{stderr}", False) + elif exit_code not in {0, 1}: + pytest.fail(f"Unexpected mypy exit code: {exit_code}\n\n{stdout}", False) + + str_concat = "" + filename: str | None = None + for i in stdout.split("\n"): + if "note:" in i: + continue + if filename is None: + filename = _key_func(i) + + str_concat += f"{i}\n" + if split_pattern.match(i) is not None: + OUTPUT_MYPY[filename].append(str_concat) + str_concat = "" + filename = None + + +def get_test_cases(*directories: str) -> "Iterator[ParameterSet]": + for directory in directories: + for root, _, files in os.walk(directory): + for fname in files: + short_fname, ext = os.path.splitext(fname) + if ext not in (".pyi", ".py"): + continue + + fullpath = os.path.join(root, fname) + yield pytest.param(fullpath, id=short_fname) + + +_FAIL_INDENT = " " * 4 +_FAIL_SEP = "\n" + "_" * 79 + "\n\n" + +_FAIL_MSG_REVEAL = """{}:{} - reveal mismatch: + +{}""" + + +@pytest.mark.slow +@pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed") +@pytest.mark.parametrize("path", get_test_cases(PASS_DIR, FAIL_DIR)) +def test_pass(path) -> None: + # Alias `OUTPUT_MYPY` so that it appears in the local namespace + output_mypy = OUTPUT_MYPY + + if path not in output_mypy: + return + + relpath = os.path.relpath(path) + + # collect any reported errors, and clean up the output + messages = [] + for message in output_mypy[path]: + lineno, content = _strip_filename(message) + content = content.removeprefix("error:").lstrip() + messages.append(f"{relpath}:{lineno} - {content}") + + if messages: + pytest.fail("\n".join(messages), pytrace=False) + + +@pytest.mark.slow +@pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed") +@pytest.mark.parametrize("path", get_test_cases(REVEAL_DIR)) +def test_reveal(path: str) -> None: + """Validate that mypy correctly infers the return-types of + the expressions in `path`. + """ + __tracebackhide__ = True + + output_mypy = OUTPUT_MYPY + if path not in output_mypy: + return + + relpath = os.path.relpath(path) + + # collect any reported errors, and clean up the output + failures = [] + for error_line in output_mypy[path]: + lineno, error_msg = _strip_filename(error_line) + error_msg = textwrap.indent(error_msg, _FAIL_INDENT) + reason = _FAIL_MSG_REVEAL.format(relpath, lineno, error_msg) + failures.append(reason) + + if failures: + reasons = _FAIL_SEP.join(failures) + pytest.fail(reasons, pytrace=False) + + +@pytest.mark.slow +@pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed") +@pytest.mark.parametrize("path", get_test_cases(PASS_DIR)) +def test_code_runs(path: str) -> None: + """Validate that the code in `path` properly during runtime.""" + path_without_extension, _ = os.path.splitext(path) + dirname, filename = path.split(os.sep)[-2:] + + spec = importlib.util.spec_from_file_location( + f"{dirname}.{filename}", path + ) + assert spec is not None + assert spec.loader is not None + + test_module = importlib.util.module_from_spec(spec) + spec.loader.exec_module(test_module) diff --git a/.venv/lib/python3.12/site-packages/numpy/version.py b/.venv/lib/python3.12/site-packages/numpy/version.py new file mode 100644 index 0000000..07b7230 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/version.py @@ -0,0 +1,11 @@ + +""" +Module to expose more detailed version info for the installed `numpy` +""" +version = "2.3.2" +__version__ = version +full_version = version + +git_revision = "bc5e4f811db9487a9ea1618ffb77a33b3919bb8e" +release = 'dev' not in version and '+' not in version +short_version = version.split("+")[0] diff --git a/.venv/lib/python3.12/site-packages/numpy/version.pyi b/.venv/lib/python3.12/site-packages/numpy/version.pyi new file mode 100644 index 0000000..113cde3 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/version.pyi @@ -0,0 +1,18 @@ +from typing import Final, LiteralString + +__all__ = ( + '__version__', + 'full_version', + 'git_revision', + 'release', + 'short_version', + 'version', +) + +version: Final[LiteralString] +__version__: Final[LiteralString] +full_version: Final[LiteralString] + +git_revision: Final[LiteralString] +release: Final[bool] +short_version: Final[LiteralString] diff --git a/.venv/lib/python3.12/site-packages/pip-25.2.dist-info/METADATA b/.venv/lib/python3.12/site-packages/pip-25.2.dist-info/METADATA new file mode 100644 index 0000000..2cf904b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip-25.2.dist-info/METADATA @@ -0,0 +1,112 @@ +Metadata-Version: 2.4 +Name: pip +Version: 25.2 +Summary: The PyPA recommended tool for installing Python packages. +Author-email: The pip developers +License-Expression: MIT +Project-URL: Homepage, https://pip.pypa.io/ +Project-URL: Documentation, https://pip.pypa.io +Project-URL: Source, https://github.com/pypa/pip +Project-URL: Changelog, https://pip.pypa.io/en/stable/news/ +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: Topic :: Software Development :: Build Tools +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: 3.13 +Classifier: Programming Language :: Python :: 3.14 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Requires-Python: >=3.9 +Description-Content-Type: text/x-rst +License-File: AUTHORS.txt +License-File: LICENSE.txt +License-File: src/pip/_vendor/msgpack/COPYING +License-File: src/pip/_vendor/cachecontrol/LICENSE.txt +License-File: src/pip/_vendor/certifi/LICENSE +License-File: src/pip/_vendor/dependency_groups/LICENSE.txt +License-File: src/pip/_vendor/distlib/LICENSE.txt +License-File: src/pip/_vendor/distro/LICENSE +License-File: src/pip/_vendor/idna/LICENSE.md +License-File: src/pip/_vendor/packaging/LICENSE +License-File: src/pip/_vendor/packaging/LICENSE.APACHE +License-File: src/pip/_vendor/packaging/LICENSE.BSD +License-File: src/pip/_vendor/pkg_resources/LICENSE +License-File: src/pip/_vendor/platformdirs/LICENSE +License-File: src/pip/_vendor/pygments/LICENSE +License-File: src/pip/_vendor/pyproject_hooks/LICENSE +License-File: src/pip/_vendor/requests/LICENSE +License-File: src/pip/_vendor/resolvelib/LICENSE +License-File: src/pip/_vendor/rich/LICENSE +License-File: src/pip/_vendor/tomli/LICENSE +License-File: src/pip/_vendor/tomli/LICENSE-HEADER +License-File: src/pip/_vendor/tomli_w/LICENSE +License-File: src/pip/_vendor/truststore/LICENSE +License-File: src/pip/_vendor/urllib3/LICENSE.txt +Dynamic: license-file + +pip - The Python Package Installer +================================== + +.. |pypi-version| image:: https://img.shields.io/pypi/v/pip.svg + :target: https://pypi.org/project/pip/ + :alt: PyPI + +.. |python-versions| image:: https://img.shields.io/pypi/pyversions/pip + :target: https://pypi.org/project/pip + :alt: PyPI - Python Version + +.. |docs-badge| image:: https://readthedocs.org/projects/pip/badge/?version=latest + :target: https://pip.pypa.io/en/latest + :alt: Documentation + +|pypi-version| |python-versions| |docs-badge| + +pip is the `package installer`_ for Python. You can use pip to install packages from the `Python Package Index`_ and other indexes. + +Please take a look at our documentation for how to install and use pip: + +* `Installation`_ +* `Usage`_ + +We release updates regularly, with a new version every 3 months. Find more details in our documentation: + +* `Release notes`_ +* `Release process`_ + +If you find bugs, need help, or want to talk to the developers, please use our mailing lists or chat rooms: + +* `Issue tracking`_ +* `Discourse channel`_ +* `User IRC`_ + +If you want to get involved head over to GitHub to get the source code, look at our development documentation and feel free to jump on the developer mailing lists and chat rooms: + +* `GitHub page`_ +* `Development documentation`_ +* `Development IRC`_ + +Code of Conduct +--------------- + +Everyone interacting in the pip project's codebases, issue trackers, chat +rooms, and mailing lists is expected to follow the `PSF Code of Conduct`_. + +.. _package installer: https://packaging.python.org/guides/tool-recommendations/ +.. _Python Package Index: https://pypi.org +.. _Installation: https://pip.pypa.io/en/stable/installation/ +.. _Usage: https://pip.pypa.io/en/stable/ +.. _Release notes: https://pip.pypa.io/en/stable/news.html +.. _Release process: https://pip.pypa.io/en/latest/development/release-process/ +.. _GitHub page: https://github.com/pypa/pip +.. _Development documentation: https://pip.pypa.io/en/latest/development +.. _Issue tracking: https://github.com/pypa/pip/issues +.. _Discourse channel: https://discuss.python.org/c/packaging +.. _User IRC: https://kiwiirc.com/nextclient/#ircs://irc.libera.chat:+6697/pypa +.. _Development IRC: https://kiwiirc.com/nextclient/#ircs://irc.libera.chat:+6697/pypa-dev +.. _PSF Code of Conduct: https://github.com/pypa/.github/blob/main/CODE_OF_CONDUCT.md diff --git a/.venv/lib/python3.12/site-packages/pip-25.2.dist-info/RECORD b/.venv/lib/python3.12/site-packages/pip-25.2.dist-info/RECORD new file mode 100644 index 0000000..eb69035 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip-25.2.dist-info/RECORD @@ -0,0 +1,453 @@ +pip/__init__.py,sha256=_lgs5Mfp0t7AGtI7sTVwxKWquz_vahWWH9kKO1cJusA,353 +pip/__main__.py,sha256=WzbhHXTbSE6gBY19mNN9m4s5o_365LOvTYSgqgbdBhE,854 +pip/__pip-runner__.py,sha256=JOoEZTwrtv7jRaXBkgSQKAE04yNyfFmGHxqpHiGHvL0,1450 +pip/py.typed,sha256=EBVvvPRTn_eIpz5e5QztSCdrMX7Qwd7VP93RSoIlZ2I,286 +pip/_internal/__init__.py,sha256=S7i9Dn9aSZS0MG-2Wrve3dV9TImPzvQn5jjhp9t_uf0,511 +pip/_internal/build_env.py,sha256=5_-O5G0QtCeFKHAezqAz0IZ5_O7qCYZ-Bxddu9idpYs,11566 +pip/_internal/cache.py,sha256=rWXvsuFY7GgPERhiNp3yZHFGKdvDvkZRn2n4mXn8lAg,10364 +pip/_internal/configuration.py,sha256=BomQ3pC84J6zCXDQpND_OmFY7mubE9cMBZVohNUzMvY,14587 +pip/_internal/exceptions.py,sha256=bRTEn6Jlw-vxHjixtiu0K79jXJu0X5FVC0L62Bdb0KI,28974 +pip/_internal/main.py,sha256=1cHqjsfFCrMFf3B5twzocxTJUdHMLoXUpy5lJoFqUi8,338 +pip/_internal/pyproject.py,sha256=T72qz0buaY0s-mgrXLUU1sONf4Rk97dJu0qpniBCGU8,7233 +pip/_internal/self_outdated_check.py,sha256=wfikZmcGVIrnepL413-0rnQ3jeOA0unOKPle0Ovwoho,8326 +pip/_internal/wheel_builder.py,sha256=PDF2w6pxeec5skP3gLIrR_VrJrAO4pHRpUnkNbJ8R5Q,11225 +pip/_internal/cli/__init__.py,sha256=Iqg_tKA771XuMO1P4t_sDHnSKPzkUb9D0DqunAmw_ko,131 +pip/_internal/cli/autocompletion.py,sha256=ZG2cM03nlcNrs-WG_SFTW46isx9s2Go5lUD_8-iv70o,7193 +pip/_internal/cli/base_command.py,sha256=1Nx919JRFlgURLis9XYJwtbyEEjRJa_NdHwM6iBkZvY,8716 +pip/_internal/cli/cmdoptions.py,sha256=99bW3xpXJ8Mr59OGbG7d7vN7CGPLPwi1nTmE3eMc8lE,32032 +pip/_internal/cli/command_context.py,sha256=kmu3EWZbfBega1oDamnGJTA_UaejhIQNuMj2CVmMXu0,817 +pip/_internal/cli/index_command.py,sha256=AHk6eSqboaxTXbG3v9mBrVd0dCK1MtW4w3PVudnj0WE,5717 +pip/_internal/cli/main.py,sha256=K9PtpRdg6uBrVKk8S2VZ14fAN0kP-cnA1o-FtJCN_OQ,2815 +pip/_internal/cli/main_parser.py,sha256=UugPD-hF1WtNQdow_WWduDLUH1DvElpc7EeUWjUkcNo,4329 +pip/_internal/cli/parser.py,sha256=sAOebBa5_0rRAlmBy8myMPFHQZb-bbCH1xpIJDaXaLs,10928 +pip/_internal/cli/progress_bars.py,sha256=nRTWNof-FjHfvirvECXIh7T7eAynTUVPTyHENfpbWiU,4668 +pip/_internal/cli/req_command.py,sha256=pXT_jAwcmO9PjJ1-Pl1VevBgr5pnxHsyPZUBzAfqQg8,13081 +pip/_internal/cli/spinners.py,sha256=EJzZIZNyUtJljp3-WjcsyIrqxW-HUsfWzhuW84n_Tqw,7362 +pip/_internal/cli/status_codes.py,sha256=sEFHUaUJbqv8iArL3HAtcztWZmGOFX01hTesSytDEh0,116 +pip/_internal/commands/__init__.py,sha256=aNeCbQurGWihfhQq7BqaLXHqWDQ0i3I04OS7kxK6plQ,4026 +pip/_internal/commands/cache.py,sha256=OrrLS6EJEha_55yPa9fTaOaonw-VpH4_lVhjxuOTChQ,8230 +pip/_internal/commands/check.py,sha256=hVFBQezQ3zj4EydoWbFQj_afPUppMt7r9JPAlY22U6Y,2244 +pip/_internal/commands/completion.py,sha256=MDwhTOBjlM4WEbOhgbhrWnlDm710i4FMjop3RBXXXCc,4530 +pip/_internal/commands/configuration.py,sha256=6gNOGrVWnOLU15zUnAiNuOMhf76RRIZvCdVD0degPRk,10105 +pip/_internal/commands/debug.py,sha256=_8IqM8Fx1_lY2STu_qspr63tufF7zyFJCyYAXtxz0N4,6805 +pip/_internal/commands/download.py,sha256=GGBSxhORV0mrad8STgORfvjRGh1qS2plvpXFNbxgcps,5249 +pip/_internal/commands/freeze.py,sha256=fxoW8AAc-bAqB_fXdNq2VnZ3JfWkFMg-bR6LcdDVO7A,3099 +pip/_internal/commands/hash.py,sha256=GO9pRN3wXC2kQaovK57TaLYBMc3IltOH92O6QEw6YE0,1679 +pip/_internal/commands/help.py,sha256=Bz3LcjNQXkz4Cu__pL4CZ86o4-HNLZj1NZWdlJhjuu0,1108 +pip/_internal/commands/index.py,sha256=8GMBVI5NvhRRHBSUq27YxDIE02DpvdJ_6qiBFgGd1co,5243 +pip/_internal/commands/inspect.py,sha256=ogm4UT7LRo8bIQcWUS1IiA25QdD4VHLa7JaPAodDttM,3177 +pip/_internal/commands/install.py,sha256=AIO-Um49e3GqMOk2SmHCt45A0W0-YhKXcpr5okjHh80,30080 +pip/_internal/commands/list.py,sha256=I4ZH604E5gpcROxEXA7eyaNEFhXx3VFVqvpscz_Ps_A,13514 +pip/_internal/commands/lock.py,sha256=SxXGnU2EH-TG-fs9fuZHAuMOH15Lzy1K7e_KJ4vtSr0,5917 +pip/_internal/commands/search.py,sha256=zbMsX_YASj6kXA6XIBgTDv0bGK51xG-CV3IynZJcE-c,5782 +pip/_internal/commands/show.py,sha256=oLVJIfKWmDKm0SsQGEi3pozNiqrXjTras_fbBSYKpBA,8066 +pip/_internal/commands/uninstall.py,sha256=CsOihqvb6ZA6O67L70oXeoLHeOfNzMM88H9g-9aocgw,3868 +pip/_internal/commands/wheel.py,sha256=vcNgnhxwilocRQJEBdWQvJ8qvO0RfWmz9LwrUBadvYE,6322 +pip/_internal/distributions/__init__.py,sha256=Hq6kt6gXBgjNit5hTTWLAzeCNOKoB-N0pGYSqehrli8,858 +pip/_internal/distributions/base.py,sha256=l-OTCAIs25lsapejA6IYpPZxSM5-BET4sdZDkql8jiY,1830 +pip/_internal/distributions/installed.py,sha256=kgIEE_1NzjZxLBSC-v5s64uOFZlVEt3aPrjTtL6x2XY,929 +pip/_internal/distributions/sdist.py,sha256=gYgrzI0lstHJRKweOdL_m6LtqDxtfuo_yCL9HjmH-xw,6952 +pip/_internal/distributions/wheel.py,sha256=_HbG0OehF8dwj4UX-xV__tXLwgPus9OjMEf2NTRqBbE,1364 +pip/_internal/index/__init__.py,sha256=tzwMH_fhQeubwMqHdSivasg1cRgTSbNg2CiMVnzMmyU,29 +pip/_internal/index/collector.py,sha256=PCB3thVWRiSBowGtpv1elIPFc-GvEqhZiNgZD7b0vBc,16185 +pip/_internal/index/package_finder.py,sha256=Kd2FmWJFAcEg2Sy2pTGQX8WPFC8B9U_3b1VK7i8sx9o,38827 +pip/_internal/index/sources.py,sha256=nXJkOjhLy-O2FsrKU9RIqCOqgY2PsoKWybtZjjRgqU0,8639 +pip/_internal/locations/__init__.py,sha256=2SADX0Gr9BIpx19AO7Feq89nOmBQGEbl1IWjBpnaE9E,14185 +pip/_internal/locations/_distutils.py,sha256=jpFj4V00rD9IR3vA9TqrGkwcdNVFc58LsChZavge9JY,5975 +pip/_internal/locations/_sysconfig.py,sha256=NhcEi1_25w9cTTcH4RyOjD4UHW6Ijks0uKy1PL1_j_8,7716 +pip/_internal/locations/base.py,sha256=AImjYJWxOtDkc0KKc6Y4Gz677cg91caMA4L94B9FZEg,2550 +pip/_internal/metadata/__init__.py,sha256=a19B00IsX25khBt8oWWy6_kKgjCM4zeh-FqYteCOFwA,5714 +pip/_internal/metadata/_json.py,sha256=hNvnMHOXLAyNlzirWhPL9Nx2CvCqa1iRma6Osq1YfV8,2711 +pip/_internal/metadata/base.py,sha256=BGuMenlcQT8i7j9iclrfdC3vSwgvhr8gjn955cCy16s,25420 +pip/_internal/metadata/pkg_resources.py,sha256=NO76ZrfR2-LKJTyaXrmQoGhmJMArALvacrlZHViSDT8,10544 +pip/_internal/metadata/importlib/__init__.py,sha256=jUUidoxnHcfITHHaAWG1G2i5fdBYklv_uJcjo2x7VYE,135 +pip/_internal/metadata/importlib/_compat.py,sha256=sneVh4_6WxQZK4ljdl3ylVuP-q0ttSqbgl9mWt0HnOg,2804 +pip/_internal/metadata/importlib/_dists.py,sha256=aOUgCX_AtA8B-lWLu-HfXA-ivMJWNxHPAohVcpQj2g4,8259 +pip/_internal/metadata/importlib/_envs.py,sha256=H3qVLXVh4LWvrPvu_ekXf3dfbtwnlhNJQP2pxXpccfU,5333 +pip/_internal/models/__init__.py,sha256=AjmCEBxX_MH9f_jVjIGNCFJKYCYeSEe18yyvNx4uRKQ,62 +pip/_internal/models/candidate.py,sha256=zzgFRuw_kWPjKpGw7LC0ZUMD2CQ2EberUIYs8izjdCA,753 +pip/_internal/models/direct_url.py,sha256=4NMWacu_QzPPWREC1te7v6Wfv-2HkI4tvSJF-CBgLh4,6555 +pip/_internal/models/format_control.py,sha256=PwemYG1L27BM0f1KP61rm24wShENFyxqlD1TWu34alc,2471 +pip/_internal/models/index.py,sha256=tYnL8oxGi4aSNWur0mG8DAP7rC6yuha_MwJO8xw0crI,1030 +pip/_internal/models/installation_report.py,sha256=cqfWJ93ThCxjcacqSWryOCD2XtIn1CZrgzZxAv5FQZ0,2839 +pip/_internal/models/link.py,sha256=h4lI8MaDA7DTIxIJ_NgDE64EE4h1sX3AB23Y1Qu8W-k,21793 +pip/_internal/models/pylock.py,sha256=Vmaa71gOSV0ZYzRgWiIm4KwVbClaahMcuvKCkP_ZznA,6211 +pip/_internal/models/scheme.py,sha256=PakmHJM3e8OOWSZFtfz1Az7f1meONJnkGuQxFlt3wBE,575 +pip/_internal/models/search_scope.py,sha256=1hxU2IVsAaLZVjp0CbzJbYaYzCxv72_Qbg3JL0qhXo0,4507 +pip/_internal/models/selection_prefs.py,sha256=lgYyo4W8lb22wsYx2ElBBB0cvSNlBVgucwBzL43dfzE,2016 +pip/_internal/models/target_python.py,sha256=I0eFS-eia3kwhrOvgsphFZtNAB2IwXZ9Sr9fp6IjBP4,4243 +pip/_internal/models/wheel.py,sha256=xWO0K-YanSL3OfMZUN1gHlEUV0YJOVYMQ4wrwmA9Zis,5526 +pip/_internal/network/__init__.py,sha256=FMy06P__y6jMjUc8z3ZcQdKF-pmZ2zM14_vBeHPGhUI,49 +pip/_internal/network/auth.py,sha256=uAwRGAYnVtgNSZm4HMC3BMACkgA7ku4m8wupiX6LpK8,20681 +pip/_internal/network/cache.py,sha256=u5LtQbnCzMU6vFT0eUUcYe6iKiwALMqrn1jUZj4CqaY,5302 +pip/_internal/network/download.py,sha256=HgsFvTkPDdgg0zUehose_J-542-9R0FpyipRw5BhxAM,12682 +pip/_internal/network/lazy_wheel.py,sha256=5H7_s-_AK6br1K5NFcyUsiocK9E6vwrJY5kzwjMv0EM,7651 +pip/_internal/network/session.py,sha256=eE-VUIJGU9YeeaVy7tVAvMRWigMsyuAMpxkjlbptbjo,19188 +pip/_internal/network/utils.py,sha256=ACsXd1msqNCidHVXsu7LHUSr8NgaypcOKQ4KG-Z_wJM,4091 +pip/_internal/network/xmlrpc.py,sha256=_-Rnk3vOff8uF9hAGmT6SLALflY1gMBcbGwS12fb_Y4,1830 +pip/_internal/operations/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_internal/operations/check.py,sha256=yC2XWth6iehGGE_fj7XRJLjVKBsTIG3ZoWRkFi3rOwc,5894 +pip/_internal/operations/freeze.py,sha256=PDdY-y_ZtZZJLAKcaWPIGRKAGW7DXR48f0aMRU0j7BA,9854 +pip/_internal/operations/prepare.py,sha256=gdbQUAjxLqqz8P2KFem2OEJ_6V3aTuozQkiBLfCCVAU,28613 +pip/_internal/operations/build/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_internal/operations/build/build_tracker.py,sha256=W3b5cmkMWPaE6QIwfzsTayJo7-OlxFHWDxfPuax1KcE,4771 +pip/_internal/operations/build/metadata.py,sha256=INHaeiRfOiLYCXApfDNRo9Cw2xI4VwTc0KItvfdfOjk,1421 +pip/_internal/operations/build/metadata_editable.py,sha256=oWudMsnjy4loO_Jy7g4N9nxsnaEX_iDlVRgCy7pu1rs,1509 +pip/_internal/operations/build/metadata_legacy.py,sha256=wv8cFA0wTqF62Jlm9QwloYZsofOyQ7sWBBmvCcVvn1k,2189 +pip/_internal/operations/build/wheel.py,sha256=toBJE5atKJGsSckY5ztVAQpRss25f049f-nJaiJsiD8,1080 +pip/_internal/operations/build/wheel_editable.py,sha256=Djx7hZqhejgYUcdQD5p4mn7AVFN3VQ7bOSs7b90TtOI,1422 +pip/_internal/operations/build/wheel_legacy.py,sha256=LaHpG4_s6415iLjuTb6seO3JIJ7097yQUIthcmpqaOY,3616 +pip/_internal/operations/install/__init__.py,sha256=ak-UETcQPKlFZaWoYKWu5QVXbpFBvg0sXc3i0O4vSYY,50 +pip/_internal/operations/install/editable_legacy.py,sha256=zS6Hm-9SHcrVuA_9Irc9Qc8CIoFLDMZiSvi5rizZe34,1311 +pip/_internal/operations/install/wheel.py,sha256=8aepxxAFmnzZFtcMCv-1I4T_maEkQd4hXZztYWE4yR0,27956 +pip/_internal/req/__init__.py,sha256=vM0bWAl3By5ALRIXWmKdkMca18KQfbGXVJZm3Igpwmg,3122 +pip/_internal/req/constructors.py,sha256=QlLjBIg5xPxyQx_hGGB7Fz527ruAXC6nbVMoGfXzNnM,18320 +pip/_internal/req/req_dependency_group.py,sha256=0yEQCUaO5Bza66Y3D5o9JRf0qII5QgCRugn1x5aRivA,2618 +pip/_internal/req/req_file.py,sha256=j_1PcBDO0aKb8pjjRMsZQrwts5YNrKsrHjPyV56XoHo,20161 +pip/_internal/req/req_install.py,sha256=N4LxV9LHimgle5zEkNFk9WSfi51KXvK9y73mvYl9uig,35718 +pip/_internal/req/req_set.py,sha256=awkqIXnYA4Prmsj0Qb3zhqdbYUmXd-1o0P-KZ3mvRQs,2828 +pip/_internal/req/req_uninstall.py,sha256=dCmOHt-9RaJBq921L4tMH3PmIBDetGplnbjRKXmGt00,24099 +pip/_internal/resolution/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_internal/resolution/base.py,sha256=RIsqSP79olPdOgtPKW-oOQ364ICVopehA6RfGkRfe2s,577 +pip/_internal/resolution/legacy/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_internal/resolution/legacy/resolver.py,sha256=bwUqE66etz2bcPabqxed18-iyqqb-kx3Er2aT6GeUJY,24060 +pip/_internal/resolution/resolvelib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_internal/resolution/resolvelib/base.py,sha256=_AoP0ZWlaSct8CRDn2ol3CbNn4zDtnh_0zQGjXASDKI,5047 +pip/_internal/resolution/resolvelib/candidates.py,sha256=dblosDcMumdT075Y8D1yXiJrWF_wDgIGe_8hUQIneTw,20208 +pip/_internal/resolution/resolvelib/factory.py,sha256=XdCpLq59oVIHMMekc2cgD7imcVD00_ioTnu9_k3iq3E,32577 +pip/_internal/resolution/resolvelib/found_candidates.py,sha256=8bZYDCZLXSdLHy_s1o5f4r15HmKvqFUhzBUQOF21Lr4,6018 +pip/_internal/resolution/resolvelib/provider.py,sha256=1TJC2o7F02aTMaoqa0Ayz45PrE-pQ5XshgGUmweESkk,11144 +pip/_internal/resolution/resolvelib/reporter.py,sha256=atiTh1bnKc-KfG841nRw3dk0WbOhr_L8luJHcY0JoSs,3270 +pip/_internal/resolution/resolvelib/requirements.py,sha256=z0gXmWfo03ynOnhF8kpj5SycgroerDhQV0VWzmAKAfg,8076 +pip/_internal/resolution/resolvelib/resolver.py,sha256=f0fmYzB9G4wWxssfj4qerDi_F_8vjAaYPbGLb7N7tQw,13617 +pip/_internal/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_internal/utils/_jaraco_text.py,sha256=M15uUPIh5NpP1tdUGBxRau6q1ZAEtI8-XyLEETscFfE,3350 +pip/_internal/utils/_log.py,sha256=-jHLOE_THaZz5BFcCnoSL9EYAtJ0nXem49s9of4jvKw,1015 +pip/_internal/utils/appdirs.py,sha256=LrzDPZMKVh0rubtCx9vu3XlZbLCSug6VSj4Qsvt66BA,1681 +pip/_internal/utils/compat.py,sha256=C9LHXJAKkwAH8Hn3nPkz9EYK3rqPBeO_IXkOG2zzsdQ,2514 +pip/_internal/utils/compatibility_tags.py,sha256=DiNSLqpuruXUamGQwOJ2WZByDGLTGaXi9O-Xf8fOi34,6630 +pip/_internal/utils/datetime.py,sha256=Gt29Ml4ToPSM88j54iu43WKtrU9A-moP4QmMiiqzedU,241 +pip/_internal/utils/deprecation.py,sha256=HVhvyO5qiRFcG88PhZlp_87qdKQNwPTUIIHWtsTR2yI,3696 +pip/_internal/utils/direct_url_helpers.py,sha256=ttKv4GMUqlRwPPog9_CUopy6SDgoxVILzeBJzgfn2tg,3200 +pip/_internal/utils/egg_link.py,sha256=YWfsrbmfcrfWgqQYy6OuIjsyb9IfL1q_2v4zsms1WjI,2459 +pip/_internal/utils/entrypoints.py,sha256=uPjAyShKObdotjQjJUzprQ6r3xQvDIZwUYfHHqZ7Dok,3324 +pip/_internal/utils/filesystem.py,sha256=du4lOOlTOBq22V5kf0yXMydo1KU2Cs-cMtYs3bEk13c,4988 +pip/_internal/utils/filetypes.py,sha256=sEMa38qaqjvx1Zid3OCAUja31BOBU-USuSMPBvU3yjo,689 +pip/_internal/utils/glibc.py,sha256=sEh8RJJLYSdRvTqAO4THVPPA-YSDVLD4SI9So-bxX1U,3726 +pip/_internal/utils/hashes.py,sha256=d32UI1en8nyqZzdZQvxUVdfeBoe4ADWx7HtrIM4-XQ4,4998 +pip/_internal/utils/logging.py,sha256=RtRe7Vp0COC4UBewYdfKicXjCTmHXpDZHdReTzJvB78,12108 +pip/_internal/utils/misc.py,sha256=1jEpqjfqYmQ6K3D4_O8xXSPn8aEfH2uMOlNM7KPvSrg,23374 +pip/_internal/utils/packaging.py,sha256=s5tpUmFumwV0H9JSTzryrIY4JwQM8paGt7Sm7eNwt2Y,1601 +pip/_internal/utils/retry.py,sha256=83wReEB2rcntMZ5VLd7ascaYSjn_kLdlQCqxILxWkPM,1461 +pip/_internal/utils/setuptools_build.py,sha256=0RK4K07qpaBgnYm50_f5d5VdedYPk1fl9QNKaOa60XM,4499 +pip/_internal/utils/subprocess.py,sha256=r4-Ba_Yc3uZXQpi0K4pZFsCT_QqdSvtF3XJ-204QWaA,8983 +pip/_internal/utils/temp_dir.py,sha256=D9c8D7WOProOO8GGDqpBeVSj10NGFmunG0o2TodjjIU,9307 +pip/_internal/utils/unpacking.py,sha256=4tY2D-sbtBt2Lcws98Em3MPAPdHnXSBXnpSU0LoGUZQ,11939 +pip/_internal/utils/urls.py,sha256=aF_eg9ul5d8bMCxfSSSxQcfs-OpJdbStYqZHoy2K1RE,1601 +pip/_internal/utils/virtualenv.py,sha256=mX-UPyw1MPxhwUxKhbqWWX70J6PHXAJjVVrRnG0h9mc,3455 +pip/_internal/utils/wheel.py,sha256=YdRuj6MicG-Q9Mg03FbUv1WTLam6Lc7AgijY4voVyis,4468 +pip/_internal/vcs/__init__.py,sha256=UAqvzpbi0VbZo3Ub6skEeZAw-ooIZR-zX_WpCbxyCoU,596 +pip/_internal/vcs/bazaar.py,sha256=3W1eHjkYx2vc6boeb2NBh4I_rlGAXM-vrzfNhLm1Rxg,3734 +pip/_internal/vcs/git.py,sha256=TTeqDuzS-_BFSNuUStVWmE2nGDpKuvUhBBJk_CCQXV0,19144 +pip/_internal/vcs/mercurial.py,sha256=w1ZJWLKqNP1onEjkfjlwBVnMqPZNSIER8ayjQcnTq4w,5575 +pip/_internal/vcs/subversion.py,sha256=uUgdPvxmvEB8Qwtjr0Hc0XgFjbiNi5cbvI4vARLOJXo,11787 +pip/_internal/vcs/versioncontrol.py,sha256=d-v1mcLxofg2FaIqBrV-e-ZcjOgQhS0oxXpki1v1yXs,22502 +pip/_vendor/__init__.py,sha256=WzusPTGWIMeQQWSVJ0h2rafGkVTa9WKJ2HT-2-EoZrU,4907 +pip/_vendor/vendor.txt,sha256=fawq8T1XFfBhs4rjjSl4fUA3Px9P2mtG2evqqPyhbhc,343 +pip/_vendor/cachecontrol/__init__.py,sha256=BF2n5OeQz1QW2xSey2LxfNCtwbjnTadXdIH2toqJecg,677 +pip/_vendor/cachecontrol/_cmd.py,sha256=iist2EpzJvDVIhMAxXq8iFnTBsiZAd6iplxfmNboNyk,1737 +pip/_vendor/cachecontrol/adapter.py,sha256=8y6rTPXOzVHmDKCW5CR9sivLVuDv-cpdGcZYdRWNaPw,6599 +pip/_vendor/cachecontrol/cache.py,sha256=OXwv7Fn2AwnKNiahJHnjtvaKLndvVLv_-zO-ltlV9qI,1953 +pip/_vendor/cachecontrol/controller.py,sha256=cx0Hl8xLZgUuXuy78Gih9AYjCtqurmYjVJxyA4yWt7w,19101 +pip/_vendor/cachecontrol/filewrapper.py,sha256=2ktXNPE0KqnyzF24aOsKCA58HQq1xeC6l2g6_zwjghc,4291 +pip/_vendor/cachecontrol/heuristics.py,sha256=gqMXU8w0gQuEQiSdu3Yg-0vd9kW7nrWKbLca75rheGE,4881 +pip/_vendor/cachecontrol/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_vendor/cachecontrol/serialize.py,sha256=HQd2IllQ05HzPkVLMXTF2uX5mjEQjDBkxCqUJUODpZk,5163 +pip/_vendor/cachecontrol/wrapper.py,sha256=hsGc7g8QGQTT-4f8tgz3AM5qwScg6FO0BSdLSRdEvpU,1417 +pip/_vendor/cachecontrol/caches/__init__.py,sha256=dtrrroK5BnADR1GWjCZ19aZ0tFsMfvFBtLQQU1sp_ag,303 +pip/_vendor/cachecontrol/caches/file_cache.py,sha256=d8upFmy_zwaCmlbWEVBlLXFddt8Zw8c5SFpxeOZsdfw,4117 +pip/_vendor/cachecontrol/caches/redis_cache.py,sha256=9rmqwtYu_ljVkW6_oLqbC7EaX_a8YT_yLuna-eS0dgo,1386 +pip/_vendor/certifi/__init__.py,sha256=dvNDUdAXp-hzoYcf09PXzLmHIlPfypaBQPFp5esDXyo,94 +pip/_vendor/certifi/__main__.py,sha256=1k3Cr95vCxxGRGDljrW3wMdpZdL3Nhf0u1n-k2qdsCY,255 +pip/_vendor/certifi/cacert.pem,sha256=lm3cYJxEv9SoAx4Atc3NiT1D92d965vcID6H39f_93o,290057 +pip/_vendor/certifi/core.py,sha256=gu_ECVI1m3Rq0ytpsNE61hgQGcKaOAt9Rs9G8KsTCOI,3442 +pip/_vendor/certifi/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_vendor/dependency_groups/__init__.py,sha256=C3OFu0NGwDzQ4LOmmSOFPsRSvkbBn-mdd4j_5YqJw-s,250 +pip/_vendor/dependency_groups/__main__.py,sha256=UNTM7P5mfVtT7wDi9kOTXWgV3fu3e8bTrt1Qp1jvjKo,1709 +pip/_vendor/dependency_groups/_implementation.py,sha256=Gqb2DlQELRakeHlKf6QtQSW0M-bcEomxHw4JsvID1ls,8041 +pip/_vendor/dependency_groups/_lint_dependency_groups.py,sha256=yp-DDqKXtbkDTNa0ifa-FmOA8ra24lPZEXftW-R5AuI,1710 +pip/_vendor/dependency_groups/_pip_wrapper.py,sha256=nuVW_w_ntVxpE26ELEvngMY0N04sFLsijXRyZZROFG8,1865 +pip/_vendor/dependency_groups/_toml_compat.py,sha256=BHnXnFacm3DeolsA35GjI6qkDApvua-1F20kv3BfZWE,285 +pip/_vendor/dependency_groups/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_vendor/distlib/__init__.py,sha256=Deo3uo98aUyIfdKJNqofeSEFWwDzrV2QeGLXLsgq0Ag,625 +pip/_vendor/distlib/compat.py,sha256=2jRSjRI4o-vlXeTK2BCGIUhkc6e9ZGhSsacRM5oseTw,41467 +pip/_vendor/distlib/resources.py,sha256=LwbPksc0A1JMbi6XnuPdMBUn83X7BPuFNWqPGEKI698,10820 +pip/_vendor/distlib/scripts.py,sha256=Qvp76E9Jc3IgyYubnpqI9fS7eseGOe4FjpeVKqKt9Iw,18612 +pip/_vendor/distlib/t32.exe,sha256=a0GV5kCoWsMutvliiCKmIgV98eRZ33wXoS-XrqvJQVs,97792 +pip/_vendor/distlib/t64-arm.exe,sha256=68TAa32V504xVBnufojh0PcenpR3U4wAqTqf-MZqbPw,182784 +pip/_vendor/distlib/t64.exe,sha256=gaYY8hy4fbkHYTTnA4i26ct8IQZzkBG2pRdy0iyuBrc,108032 +pip/_vendor/distlib/util.py,sha256=vMPGvsS4j9hF6Y9k3Tyom1aaHLb0rFmZAEyzeAdel9w,66682 +pip/_vendor/distlib/w32.exe,sha256=R4csx3-OGM9kL4aPIzQKRo5TfmRSHZo6QWyLhDhNBks,91648 +pip/_vendor/distlib/w64-arm.exe,sha256=xdyYhKj0WDcVUOCb05blQYvzdYIKMbmJn2SZvzkcey4,168448 +pip/_vendor/distlib/w64.exe,sha256=ejGf-rojoBfXseGLpya6bFTFPWRG21X5KvU8J5iU-K0,101888 +pip/_vendor/distro/__init__.py,sha256=2fHjF-SfgPvjyNZ1iHh_wjqWdR_Yo5ODHwZC0jLBPhc,981 +pip/_vendor/distro/__main__.py,sha256=bu9d3TifoKciZFcqRBuygV3GSuThnVD_m2IK4cz96Vs,64 +pip/_vendor/distro/distro.py,sha256=XqbefacAhDT4zr_trnbA15eY8vdK4GTghgmvUGrEM_4,49430 +pip/_vendor/distro/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_vendor/idna/__init__.py,sha256=MPqNDLZbXqGaNdXxAFhiqFPKEQXju2jNQhCey6-5eJM,868 +pip/_vendor/idna/codec.py,sha256=PEew3ItwzjW4hymbasnty2N2OXvNcgHB-JjrBuxHPYY,3422 +pip/_vendor/idna/compat.py,sha256=RzLy6QQCdl9784aFhb2EX9EKGCJjg0P3PilGdeXXcx8,316 +pip/_vendor/idna/core.py,sha256=YJYyAMnwiQEPjVC4-Fqu_p4CJ6yKKuDGmppBNQNQpFs,13239 +pip/_vendor/idna/idnadata.py,sha256=W30GcIGvtOWYwAjZj4ZjuouUutC6ffgNuyjJy7fZ-lo,78306 +pip/_vendor/idna/intranges.py,sha256=amUtkdhYcQG8Zr-CoMM_kVRacxkivC1WgxN1b63KKdU,1898 +pip/_vendor/idna/package_data.py,sha256=q59S3OXsc5VI8j6vSD0sGBMyk6zZ4vWFREE88yCJYKs,21 +pip/_vendor/idna/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_vendor/idna/uts46data.py,sha256=rt90K9J40gUSwppDPCrhjgi5AA6pWM65dEGRSf6rIhM,239289 +pip/_vendor/msgpack/__init__.py,sha256=q2T5PRsITVpeytgS0WiSqok9IY8V_aFiC8VVlXTCTgA,1109 +pip/_vendor/msgpack/exceptions.py,sha256=dCTWei8dpkrMsQDcjQk74ATl9HsIBH0ybt8zOPNqMYc,1081 +pip/_vendor/msgpack/ext.py,sha256=kteJv03n9tYzd5oo3xYopVTo4vRaAxonBQQJhXohZZo,5726 +pip/_vendor/msgpack/fallback.py,sha256=0g1Pzp0vtmBEmJ5w9F3s_-JMVURP8RS4G1cc5TRaAsI,32390 +pip/_vendor/packaging/__init__.py,sha256=_0cDiPVf2S-bNfVmZguxxzmrIYWlyASxpqph4qsJWUc,494 +pip/_vendor/packaging/_elffile.py,sha256=UkrbDtW7aeq3qqoAfU16ojyHZ1xsTvGke_WqMTKAKd0,3286 +pip/_vendor/packaging/_manylinux.py,sha256=t4y_-dTOcfr36gLY-ztiOpxxJFGO2ikC11HgfysGxiM,9596 +pip/_vendor/packaging/_musllinux.py,sha256=p9ZqNYiOItGee8KcZFeHF_YcdhVwGHdK6r-8lgixvGQ,2694 +pip/_vendor/packaging/_parser.py,sha256=gYfnj0pRHflVc4RHZit13KNTyN9iiVcU2RUCGi22BwM,10221 +pip/_vendor/packaging/_structures.py,sha256=q3eVNmbWJGG_S0Dit_S3Ao8qQqz_5PYTXFAKBZe5yr4,1431 +pip/_vendor/packaging/_tokenizer.py,sha256=OYzt7qKxylOAJ-q0XyK1qAycyPRYLfMPdGQKRXkZWyI,5310 +pip/_vendor/packaging/markers.py,sha256=P0we27jm1xUzgGMJxBjtUFCIWeBxTsMeJTOJ6chZmAY,12049 +pip/_vendor/packaging/metadata.py,sha256=8IZErqQQnNm53dZZuYq4FGU4_dpyinMeH1QFBIWIkfE,34739 +pip/_vendor/packaging/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_vendor/packaging/requirements.py,sha256=gYyRSAdbrIyKDY66ugIDUQjRMvxkH2ALioTmX3tnL6o,2947 +pip/_vendor/packaging/specifiers.py,sha256=yc9D_MycJEmwUpZvcs1OZL9HfiNFmyw0RZaeHRNHkPw,40079 +pip/_vendor/packaging/tags.py,sha256=41s97W9Zatrq2Ed7Rc3qeBDaHe8pKKvYq2mGjwahfXk,22745 +pip/_vendor/packaging/utils.py,sha256=0F3Hh9OFuRgrhTgGZUl5K22Fv1YP2tZl1z_2gO6kJiA,5050 +pip/_vendor/packaging/version.py,sha256=oiHqzTUv_p12hpjgsLDVcaF5hT7pDaSOViUNMD4GTW0,16688 +pip/_vendor/packaging/licenses/__init__.py,sha256=3bx-gryo4sRv5LsrwApouy65VIs3u6irSORJzALkrzU,5727 +pip/_vendor/packaging/licenses/_spdx.py,sha256=oAm1ztPFwlsmCKe7lAAsv_OIOfS1cWDu9bNBkeu-2ns,48398 +pip/_vendor/pkg_resources/__init__.py,sha256=vbTJ0_ruUgGxQjlEqsruFmiNPVyh2t9q-zyTDT053xI,124451 +pip/_vendor/platformdirs/__init__.py,sha256=UfeSHWl8AeTtbOBOoHAxK4dODOWkZtfy-m_i7cWdJ8c,22344 +pip/_vendor/platformdirs/__main__.py,sha256=jBJ8zb7Mpx5ebcqF83xrpO94MaeCpNGHVf9cvDN2JLg,1505 +pip/_vendor/platformdirs/android.py,sha256=r0DshVBf-RO1jXJGX8C4Til7F1XWt-bkdWMgmvEiaYg,9013 +pip/_vendor/platformdirs/api.py,sha256=U9EzI3EYxcXWUCtIGRllqrcN99i2LSY1mq2-GtsUwEQ,9277 +pip/_vendor/platformdirs/macos.py,sha256=UlbyFZ8Rzu3xndCqQEHrfsYTeHwYdFap1Ioz-yxveT4,6154 +pip/_vendor/platformdirs/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_vendor/platformdirs/unix.py,sha256=WZmkUA--L3JNRGmz32s35YfoD3ica6xKIPdCV_HhLcs,10458 +pip/_vendor/platformdirs/version.py,sha256=ddN3EcUPfer7CbqmyFNmg03R3u-qDn32T_fLsx25-Ck,511 +pip/_vendor/platformdirs/windows.py,sha256=IFpiohUBwxPtCzlyKwNtxyW4Jk8haa6W8o59mfrDXVo,10125 +pip/_vendor/pygments/__init__.py,sha256=8uNqJCCwXqbEx5aSsBr0FykUQOBDKBihO5mPqiw1aqo,2983 +pip/_vendor/pygments/__main__.py,sha256=WrndpSe6i1ckX_SQ1KaxD9CTKGzD0EuCOFxcbwFpoLU,353 +pip/_vendor/pygments/console.py,sha256=AagDWqwea2yBWf10KC9ptBgMpMjxKp8yABAmh-NQOVk,1718 +pip/_vendor/pygments/filter.py,sha256=YLtpTnZiu07nY3oK9nfR6E9Y1FBHhP5PX8gvkJWcfag,1910 +pip/_vendor/pygments/formatter.py,sha256=KZQMmyo_xkOIkQG8g66LYEkBh1bx7a0HyGCBcvhI9Ew,4390 +pip/_vendor/pygments/lexer.py,sha256=_kBrOJ_NT5Tl0IVM0rA9c8eysP6_yrlGzEQI0eVYB-A,35349 +pip/_vendor/pygments/modeline.py,sha256=K5eSkR8GS1r5OkXXTHOcV0aM_6xpk9eWNEIAW-OOJ2g,1005 +pip/_vendor/pygments/plugin.py,sha256=tPx0rJCTIZ9ioRgLNYG4pifCbAwTRUZddvLw-NfAk2w,1891 +pip/_vendor/pygments/regexopt.py,sha256=wXaP9Gjp_hKAdnICqoDkRxAOQJSc4v3X6mcxx3z-TNs,3072 +pip/_vendor/pygments/scanner.py,sha256=nNcETRR1tRuiTaHmHSTTECVYFPcLf6mDZu1e4u91A9E,3092 +pip/_vendor/pygments/sphinxext.py,sha256=5x7Zh9YlU6ISJ31dMwduiaanb5dWZnKg3MyEQsseNnQ,7981 +pip/_vendor/pygments/style.py,sha256=PlOZqlsnTVd58RGy50vkA2cXQ_lP5bF5EGMEBTno6DA,6420 +pip/_vendor/pygments/token.py,sha256=WbdWGhYm_Vosb0DDxW9lHNPgITXfWTsQmHt6cy9RbcM,6226 +pip/_vendor/pygments/unistring.py,sha256=al-_rBemRuGvinsrM6atNsHTmJ6DUbw24q2O2Ru1cBc,63208 +pip/_vendor/pygments/util.py,sha256=oRtSpiAo5jM9ulntkvVbgXUdiAW57jnuYGB7t9fYuhc,10031 +pip/_vendor/pygments/filters/__init__.py,sha256=4U4jtA0X3iP83uQnB9-TI-HDSw8E8y8zMYHa0UjbbaI,40392 +pip/_vendor/pygments/formatters/__init__.py,sha256=KTwBmnXlaopJhQDOemVHYHskiDghuq-08YtP6xPNJPg,5385 +pip/_vendor/pygments/formatters/_mapping.py,sha256=1Cw37FuQlNacnxRKmtlPX4nyLoX9_ttko5ZwscNUZZ4,4176 +pip/_vendor/pygments/lexers/__init__.py,sha256=wbIME35GH7bI1B9rNPJFqWT-ij_RApZDYPUlZycaLzA,12115 +pip/_vendor/pygments/lexers/_mapping.py,sha256=l4tCXM8e9aPC2BD6sjIr0deT-J-z5tHgCwL-p1fS0PE,77602 +pip/_vendor/pygments/lexers/python.py,sha256=vxjn1cOHclIKJKxoyiBsQTY65GHbkZtZRuKQ2AVCKaw,53853 +pip/_vendor/pygments/styles/__init__.py,sha256=x9ebctfyvCAFpMTlMJ5YxwcNYBzjgq6zJaKkNm78r4M,2042 +pip/_vendor/pygments/styles/_mapping.py,sha256=6lovFUE29tz6EsV3XYY4hgozJ7q1JL7cfO3UOlgnS8w,3312 +pip/_vendor/pyproject_hooks/__init__.py,sha256=cPB_a9LXz5xvsRbX1o2qyAdjLatZJdQ_Lc5McNX-X7Y,691 +pip/_vendor/pyproject_hooks/_impl.py,sha256=jY-raxnmyRyB57ruAitrJRUzEexuAhGTpgMygqx67Z4,14936 +pip/_vendor/pyproject_hooks/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_vendor/pyproject_hooks/_in_process/__init__.py,sha256=MJNPpfIxcO-FghxpBbxkG1rFiQf6HOUbV4U5mq0HFns,557 +pip/_vendor/pyproject_hooks/_in_process/_in_process.py,sha256=qcXMhmx__MIJq10gGHW3mA4Tl8dy8YzHMccwnNoKlw0,12216 +pip/_vendor/requests/__init__.py,sha256=HlB_HzhrzGtfD_aaYUwUh1zWXLZ75_YCLyit75d0Vz8,5057 +pip/_vendor/requests/__version__.py,sha256=FDq681Y3EvBjdDp5UqplMZ28uTTYlM_Jib0sAV-NpXc,435 +pip/_vendor/requests/_internal_utils.py,sha256=nMQymr4hs32TqVo5AbCrmcJEhvPUh7xXlluyqwslLiQ,1495 +pip/_vendor/requests/adapters.py,sha256=J7VeVxKBvawbtlX2DERVo05J9BXTcWYLMHNd1Baa-bk,27607 +pip/_vendor/requests/api.py,sha256=_Zb9Oa7tzVIizTKwFrPjDEY9ejtm_OnSRERnADxGsQs,6449 +pip/_vendor/requests/auth.py,sha256=kF75tqnLctZ9Mf_hm9TZIj4cQWnN5uxRz8oWsx5wmR0,10186 +pip/_vendor/requests/certs.py,sha256=kHDlkK_beuHXeMPc5jta2wgl8gdKeUWt5f2nTDVrvt8,441 +pip/_vendor/requests/compat.py,sha256=QfbmdTFiZzjSHMXiMrd4joCRU6RabtQ9zIcPoVaHIus,1822 +pip/_vendor/requests/cookies.py,sha256=bNi-iqEj4NPZ00-ob-rHvzkvObzN3lEpgw3g6paS3Xw,18590 +pip/_vendor/requests/exceptions.py,sha256=D1wqzYWne1mS2rU43tP9CeN1G7QAy7eqL9o1god6Ejw,4272 +pip/_vendor/requests/help.py,sha256=hRKaf9u0G7fdwrqMHtF3oG16RKktRf6KiwtSq2Fo1_0,3813 +pip/_vendor/requests/hooks.py,sha256=CiuysiHA39V5UfcCBXFIx83IrDpuwfN9RcTUgv28ftQ,733 +pip/_vendor/requests/models.py,sha256=taljlg6vJ4b-xMu2TaMNFFkaiwMex_VsEQ6qUTN3wzY,35575 +pip/_vendor/requests/packages.py,sha256=_ZQDCJTJ8SP3kVWunSqBsRZNPzj2c1WFVqbdr08pz3U,1057 +pip/_vendor/requests/sessions.py,sha256=ykTI8UWGSltOfH07HKollH7kTBGw4WhiBVaQGmckTw4,30495 +pip/_vendor/requests/status_codes.py,sha256=iJUAeA25baTdw-6PfD0eF4qhpINDJRJI-yaMqxs4LEI,4322 +pip/_vendor/requests/structures.py,sha256=-IbmhVz06S-5aPSZuUthZ6-6D9XOjRuTXHOabY041XM,2912 +pip/_vendor/requests/utils.py,sha256=WS3wHSQaaEfceu1syiFo5jf4e_CWKUTep_IabOVI_J0,33225 +pip/_vendor/resolvelib/__init__.py,sha256=T0LcAr9Sfai6ZXanpwavdHEea-Anw2QZGx16zd3lMKY,541 +pip/_vendor/resolvelib/providers.py,sha256=pIWJbIdJJ9GFtNbtwTH0Ia43Vj6hYCEJj2DOLue15FM,8914 +pip/_vendor/resolvelib/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_vendor/resolvelib/reporters.py,sha256=pNJf4nFxLpAeKxlBUi2GEj0a2Ij1nikY0UabTKXesT4,2037 +pip/_vendor/resolvelib/structs.py,sha256=pu-EJiR2IBITr2SQeNPRa0rXhjlStfmO_GEgAhr3004,6420 +pip/_vendor/resolvelib/resolvers/__init__.py,sha256=728M3EvmnPbVXS7ExXlv2kMu6b7wEsoPutEfl-uVk_I,640 +pip/_vendor/resolvelib/resolvers/abstract.py,sha256=jZOBVigE4PUub9i3F-bTvBwaIXX8S9EU3CGASBvFqEU,1558 +pip/_vendor/resolvelib/resolvers/criterion.py,sha256=lcmZGv5sKHOnFD_RzZwvlGSj19MeA-5rCMpdf2Sgw7Y,1768 +pip/_vendor/resolvelib/resolvers/exceptions.py,sha256=ln_jaQtgLlRUSFY627yiHG2gD7AgaXzRKaElFVh7fDQ,1768 +pip/_vendor/resolvelib/resolvers/resolution.py,sha256=qU64VKtN-HxoFynmnI6oP8aMPzaiKZtb_1hASH3HTHI,23994 +pip/_vendor/rich/__init__.py,sha256=dRxjIL-SbFVY0q3IjSMrfgBTHrm1LZDgLOygVBwiYZc,6090 +pip/_vendor/rich/__main__.py,sha256=e_aVC-tDzarWQW9SuZMuCgBr6ODV_iDNV2Wh2xkxOlw,7896 +pip/_vendor/rich/_cell_widths.py,sha256=fbmeyetEdHjzE_Vx2l1uK7tnPOhMs2X1lJfO3vsKDpA,10209 +pip/_vendor/rich/_emoji_codes.py,sha256=hu1VL9nbVdppJrVoijVshRlcRRe_v3dju3Mmd2sKZdY,140235 +pip/_vendor/rich/_emoji_replace.py,sha256=n-kcetsEUx2ZUmhQrfeMNc-teeGhpuSQ5F8VPBsyvDo,1064 +pip/_vendor/rich/_export_format.py,sha256=RI08pSrm5tBSzPMvnbTqbD9WIalaOoN5d4M1RTmLq1Y,2128 +pip/_vendor/rich/_extension.py,sha256=Xt47QacCKwYruzjDi-gOBq724JReDj9Cm9xUi5fr-34,265 +pip/_vendor/rich/_fileno.py,sha256=HWZxP5C2ajMbHryvAQZseflVfQoGzsKOHzKGsLD8ynQ,799 +pip/_vendor/rich/_inspect.py,sha256=ROT0PLC2GMWialWZkqJIjmYq7INRijQQkoSokWTaAiI,9656 +pip/_vendor/rich/_log_render.py,sha256=1ByI0PA1ZpxZY3CGJOK54hjlq4X-Bz_boIjIqCd8Kns,3225 +pip/_vendor/rich/_loop.py,sha256=hV_6CLdoPm0va22Wpw4zKqM0RYsz3TZxXj0PoS-9eDQ,1236 +pip/_vendor/rich/_null_file.py,sha256=ADGKp1yt-k70FMKV6tnqCqecB-rSJzp-WQsD7LPL-kg,1394 +pip/_vendor/rich/_palettes.py,sha256=cdev1JQKZ0JvlguV9ipHgznTdnvlIzUFDBb0It2PzjI,7063 +pip/_vendor/rich/_pick.py,sha256=evDt8QN4lF5CiwrUIXlOJCntitBCOsI3ZLPEIAVRLJU,423 +pip/_vendor/rich/_ratio.py,sha256=IOtl78sQCYZsmHyxhe45krkb68u9xVz7zFsXVJD-b2Y,5325 +pip/_vendor/rich/_spinners.py,sha256=U2r1_g_1zSjsjiUdAESc2iAMc3i4ri_S8PYP6kQ5z1I,19919 +pip/_vendor/rich/_stack.py,sha256=-C8OK7rxn3sIUdVwxZBBpeHhIzX0eI-VM3MemYfaXm0,351 +pip/_vendor/rich/_timer.py,sha256=zelxbT6oPFZnNrwWPpc1ktUeAT-Vc4fuFcRZLQGLtMI,417 +pip/_vendor/rich/_win32_console.py,sha256=BSaDRIMwBLITn_m0mTRLPqME5q-quGdSMuYMpYeYJwc,22755 +pip/_vendor/rich/_windows.py,sha256=aBwaD_S56SbgopIvayVmpk0Y28uwY2C5Bab1wl3Bp-I,1925 +pip/_vendor/rich/_windows_renderer.py,sha256=t74ZL3xuDCP3nmTp9pH1L5LiI2cakJuQRQleHCJerlk,2783 +pip/_vendor/rich/_wrap.py,sha256=FlSsom5EX0LVkA3KWy34yHnCfLtqX-ZIepXKh-70rpc,3404 +pip/_vendor/rich/abc.py,sha256=ON-E-ZqSSheZ88VrKX2M3PXpFbGEUUZPMa_Af0l-4f0,890 +pip/_vendor/rich/align.py,sha256=dg-7uY0ukMLLlUEsBDRLva22_sQgIJD4BK0dmZHFHug,10324 +pip/_vendor/rich/ansi.py,sha256=Avs1LHbSdcyOvDOdpELZUoULcBiYewY76eNBp6uFBhs,6921 +pip/_vendor/rich/bar.py,sha256=ldbVHOzKJOnflVNuv1xS7g6dLX2E3wMnXkdPbpzJTcs,3263 +pip/_vendor/rich/box.py,sha256=kmavBc_dn73L_g_8vxWSwYJD2uzBXOUFTtJOfpbczcM,10686 +pip/_vendor/rich/cells.py,sha256=KrQkj5-LghCCpJLSNQIyAZjndc4bnEqOEmi5YuZ9UCY,5130 +pip/_vendor/rich/color.py,sha256=3HSULVDj7qQkXUdFWv78JOiSZzfy5y1nkcYhna296V0,18211 +pip/_vendor/rich/color_triplet.py,sha256=3lhQkdJbvWPoLDO-AnYImAWmJvV5dlgYNCVZ97ORaN4,1054 +pip/_vendor/rich/columns.py,sha256=HUX0KcMm9dsKNi11fTbiM_h2iDtl8ySCaVcxlalEzq8,7131 +pip/_vendor/rich/console.py,sha256=t9azZpmRMVU5cphVBZSShNsmBxd2-IAWcTTlhor-E1s,100849 +pip/_vendor/rich/constrain.py,sha256=1VIPuC8AgtKWrcncQrjBdYqA3JVWysu6jZo1rrh7c7Q,1288 +pip/_vendor/rich/containers.py,sha256=c_56TxcedGYqDepHBMTuZdUIijitAQgnox-Qde0Z1qo,5502 +pip/_vendor/rich/control.py,sha256=EUTSUFLQbxY6Zmo_sdM-5Ls323vIHTBfN8TPulqeHUY,6487 +pip/_vendor/rich/default_styles.py,sha256=khQFqqaoDs3bprMqWpHw8nO5UpG2DN6QtuTd6LzZwYc,8257 +pip/_vendor/rich/diagnose.py,sha256=fJl1TItRn19gGwouqTg-8zPUW3YqQBqGltrfPQs1H9w,1025 +pip/_vendor/rich/emoji.py,sha256=Wd4bQubZdSy6-PyrRQNuMHtn2VkljK9uPZPVlu2cmx0,2367 +pip/_vendor/rich/errors.py,sha256=5pP3Kc5d4QJ_c0KFsxrfyhjiPVe7J1zOqSFbFAzcV-Y,642 +pip/_vendor/rich/file_proxy.py,sha256=Tl9THMDZ-Pk5Wm8sI1gGg_U5DhusmxD-FZ0fUbcU0W0,1683 +pip/_vendor/rich/filesize.py,sha256=_iz9lIpRgvW7MNSeCZnLg-HwzbP4GETg543WqD8SFs0,2484 +pip/_vendor/rich/highlighter.py,sha256=G_sn-8DKjM1sEjLG_oc4ovkWmiUpWvj8bXi0yed2LnY,9586 +pip/_vendor/rich/json.py,sha256=vVEoKdawoJRjAFayPwXkMBPLy7RSTs-f44wSQDR2nJ0,5031 +pip/_vendor/rich/jupyter.py,sha256=QyoKoE_8IdCbrtiSHp9TsTSNyTHY0FO5whE7jOTd9UE,3252 +pip/_vendor/rich/layout.py,sha256=ajkSFAtEVv9EFTcFs-w4uZfft7nEXhNzL7ZVdgrT5rI,14004 +pip/_vendor/rich/live.py,sha256=tF3ukAAJZ_N2ZbGclqZ-iwLoIoZ8f0HHUz79jAyJqj8,15180 +pip/_vendor/rich/live_render.py,sha256=It_39YdzrBm8o3LL0kaGorPFg-BfZWAcrBjLjFokbx4,3521 +pip/_vendor/rich/logging.py,sha256=5KaPPSMP9FxcXPBcKM4cGd_zW78PMgf-YbMVnvfSw0o,12468 +pip/_vendor/rich/markup.py,sha256=3euGKP5s41NCQwaSjTnJxus5iZMHjxpIM0W6fCxra38,8451 +pip/_vendor/rich/measure.py,sha256=HmrIJX8sWRTHbgh8MxEay_83VkqNW_70s8aKP5ZcYI8,5305 +pip/_vendor/rich/padding.py,sha256=KVEI3tOwo9sgK1YNSuH__M1_jUWmLZwRVV_KmOtVzyM,4908 +pip/_vendor/rich/pager.py,sha256=SO_ETBFKbg3n_AgOzXm41Sv36YxXAyI3_R-KOY2_uSc,828 +pip/_vendor/rich/palette.py,sha256=lInvR1ODDT2f3UZMfL1grq7dY_pDdKHw4bdUgOGaM4Y,3396 +pip/_vendor/rich/panel.py,sha256=9sQl00hPIqH5G2gALQo4NepFwpP0k9wT-s_gOms5pIc,11157 +pip/_vendor/rich/pretty.py,sha256=gy3S72u4FRg2ytoo7N1ZDWDIvB4unbzd5iUGdgm-8fc,36391 +pip/_vendor/rich/progress.py,sha256=CUc2lkU-X59mVdGfjMCBkZeiGPL3uxdONjhNJF2T7wY,60408 +pip/_vendor/rich/progress_bar.py,sha256=mZTPpJUwcfcdgQCTTz3kyY-fc79ddLwtx6Ghhxfo064,8162 +pip/_vendor/rich/prompt.py,sha256=l0RhQU-0UVTV9e08xW1BbIj0Jq2IXyChX4lC0lFNzt4,12447 +pip/_vendor/rich/protocol.py,sha256=5hHHDDNHckdk8iWH5zEbi-zuIVSF5hbU2jIo47R7lTE,1391 +pip/_vendor/rich/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_vendor/rich/region.py,sha256=rNT9xZrVZTYIXZC0NYn41CJQwYNbR-KecPOxTgQvB8Y,166 +pip/_vendor/rich/repr.py,sha256=5MZJZmONgC6kud-QW-_m1okXwL2aR6u6y-pUcUCJz28,4431 +pip/_vendor/rich/rule.py,sha256=0fNaS_aERa3UMRc3T5WMpN_sumtDxfaor2y3of1ftBk,4602 +pip/_vendor/rich/scope.py,sha256=TMUU8qo17thyqQCPqjDLYpg_UU1k5qVd-WwiJvnJVas,2843 +pip/_vendor/rich/screen.py,sha256=YoeReESUhx74grqb0mSSb9lghhysWmFHYhsbMVQjXO8,1591 +pip/_vendor/rich/segment.py,sha256=otnKeKGEV-WRlQVosfJVeFDcDxAKHpvJ_hLzSu5lumM,24743 +pip/_vendor/rich/spinner.py,sha256=onIhpKlljRHppTZasxO8kXgtYyCHUkpSgKglRJ3o51g,4214 +pip/_vendor/rich/status.py,sha256=kkPph3YeAZBo-X-4wPp8gTqZyU466NLwZBA4PZTTewo,4424 +pip/_vendor/rich/style.py,sha256=xpj4uMBZMtuNuNomfUiamigl3p1sDvTCZwrG1tcTVeg,27059 +pip/_vendor/rich/styled.py,sha256=eZNnzGrI4ki_54pgY3Oj0T-x3lxdXTYh4_ryDB24wBU,1258 +pip/_vendor/rich/syntax.py,sha256=eDKIRwl--eZ0Lwo2da2RRtfutXGavrJO61Cl5OkS59U,36371 +pip/_vendor/rich/table.py,sha256=ZmT7V7MMCOYKw7TGY9SZLyYDf6JdM-WVf07FdVuVhTI,40049 +pip/_vendor/rich/terminal_theme.py,sha256=1j5-ufJfnvlAo5Qsi_ACZiXDmwMXzqgmFByObT9-yJY,3370 +pip/_vendor/rich/text.py,sha256=AO7JPCz6-gaN1thVLXMBntEmDPVYFgFNG1oM61_sanU,47552 +pip/_vendor/rich/theme.py,sha256=oNyhXhGagtDlbDye3tVu3esWOWk0vNkuxFw-_unlaK0,3771 +pip/_vendor/rich/themes.py,sha256=0xgTLozfabebYtcJtDdC5QkX5IVUEaviqDUJJh4YVFk,102 +pip/_vendor/rich/traceback.py,sha256=c0WmB_L04_UfZbLaoH982_U_s7eosxKMUiAVmDPdRYU,35861 +pip/_vendor/rich/tree.py,sha256=yWnQ6rAvRGJ3qZGqBrxS2SW2TKBTNrP0SdY8QxOFPuw,9451 +pip/_vendor/tomli/__init__.py,sha256=PhNw_eyLgdn7McJ6nrAN8yIm3dXC75vr1sVGVVwDSpA,314 +pip/_vendor/tomli/_parser.py,sha256=9w8LG0jB7fwmZZWB0vVXbeejDHcl4ANIJxB2scEnDlA,25591 +pip/_vendor/tomli/_re.py,sha256=sh4sBDRgO94KJZwNIrgdcyV_qQast50YvzOAUGpRDKA,3171 +pip/_vendor/tomli/_types.py,sha256=-GTG2VUqkpxwMqzmVO4F7ybKddIbAnuAHXfmWQcTi3Q,254 +pip/_vendor/tomli/py.typed,sha256=8PjyZ1aVoQpRVvt71muvuq5qE-jTFZkK-GLHkhdebmc,26 +pip/_vendor/tomli_w/__init__.py,sha256=0F8yDtXx3Uunhm874KrAcP76srsM98y7WyHQwCulZbo,169 +pip/_vendor/tomli_w/_writer.py,sha256=dsifFS2xYf1i76mmRyfz9y125xC7Z_HQ845ZKhJsYXs,6961 +pip/_vendor/tomli_w/py.typed,sha256=8PjyZ1aVoQpRVvt71muvuq5qE-jTFZkK-GLHkhdebmc,26 +pip/_vendor/truststore/__init__.py,sha256=2wRSVijjRzPLVXUzWqvdZLNsEOhDfopKLd2EKAYLwKU,1320 +pip/_vendor/truststore/_api.py,sha256=af8gEZG_vhsudia9vz4es3Vh8xAqhzQz4Cbjs6_rxus,11234 +pip/_vendor/truststore/_macos.py,sha256=nZlLkOmszUE0g6ryRwBVGY5COzPyudcsiJtDWarM5LQ,20503 +pip/_vendor/truststore/_openssl.py,sha256=LLUZ7ZGaio-i5dpKKjKCSeSufmn6T8pi9lDcFnvSyq0,2324 +pip/_vendor/truststore/_ssl_constants.py,sha256=NUD4fVKdSD02ri7-db0tnO0VqLP9aHuzmStcW7tAl08,1130 +pip/_vendor/truststore/_windows.py,sha256=rAHyKYD8M7t-bXfG8VgOVa3TpfhVhbt4rZQlO45YuP8,17993 +pip/_vendor/truststore/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_vendor/urllib3/__init__.py,sha256=iXLcYiJySn0GNbWOOZDDApgBL1JgP44EZ8i1760S8Mc,3333 +pip/_vendor/urllib3/_collections.py,sha256=pyASJJhW7wdOpqJj9QJA8FyGRfr8E8uUUhqUvhF0728,11372 +pip/_vendor/urllib3/_version.py,sha256=t9wGB6ooOTXXgiY66K1m6BZS1CJyXHAU8EoWDTe6Shk,64 +pip/_vendor/urllib3/connection.py,sha256=ttIA909BrbTUzwkqEe_TzZVh4JOOj7g61Ysei2mrwGg,20314 +pip/_vendor/urllib3/connectionpool.py,sha256=e2eiAwNbFNCKxj4bwDKNK-w7HIdSz3OmMxU_TIt-evQ,40408 +pip/_vendor/urllib3/exceptions.py,sha256=0Mnno3KHTNfXRfY7638NufOPkUb6mXOm-Lqj-4x2w8A,8217 +pip/_vendor/urllib3/fields.py,sha256=kvLDCg_JmH1lLjUUEY_FLS8UhY7hBvDPuVETbY8mdrM,8579 +pip/_vendor/urllib3/filepost.py,sha256=5b_qqgRHVlL7uLtdAYBzBh-GHmU5AfJVt_2N0XS3PeY,2440 +pip/_vendor/urllib3/poolmanager.py,sha256=aWyhXRtNO4JUnCSVVqKTKQd8EXTvUm1VN9pgs2bcONo,19990 +pip/_vendor/urllib3/request.py,sha256=YTWFNr7QIwh7E1W9dde9LM77v2VWTJ5V78XuTTw7D1A,6691 +pip/_vendor/urllib3/response.py,sha256=fmDJAFkG71uFTn-sVSTh2Iw0WmcXQYqkbRjihvwBjU8,30641 +pip/_vendor/urllib3/contrib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_vendor/urllib3/contrib/_appengine_environ.py,sha256=bDbyOEhW2CKLJcQqAKAyrEHN-aklsyHFKq6vF8ZFsmk,957 +pip/_vendor/urllib3/contrib/appengine.py,sha256=VR68eAVE137lxTgjBDwCna5UiBZTOKa01Aj_-5BaCz4,11036 +pip/_vendor/urllib3/contrib/ntlmpool.py,sha256=NlfkW7WMdW8ziqudopjHoW299og1BTWi0IeIibquFwk,4528 +pip/_vendor/urllib3/contrib/pyopenssl.py,sha256=hDJh4MhyY_p-oKlFcYcQaVQRDv6GMmBGuW9yjxyeejM,17081 +pip/_vendor/urllib3/contrib/securetransport.py,sha256=Fef1IIUUFHqpevzXiDPbIGkDKchY2FVKeVeLGR1Qq3g,34446 +pip/_vendor/urllib3/contrib/socks.py,sha256=aRi9eWXo9ZEb95XUxef4Z21CFlnnjbEiAo9HOseoMt4,7097 +pip/_vendor/urllib3/contrib/_securetransport/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_vendor/urllib3/contrib/_securetransport/bindings.py,sha256=4Xk64qIkPBt09A5q-RIFUuDhNc9mXilVapm7WnYnzRw,17632 +pip/_vendor/urllib3/contrib/_securetransport/low_level.py,sha256=B2JBB2_NRP02xK6DCa1Pa9IuxrPwxzDzZbixQkb7U9M,13922 +pip/_vendor/urllib3/packages/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_vendor/urllib3/packages/six.py,sha256=b9LM0wBXv7E7SrbCjAm4wwN-hrH-iNxv18LgWNMMKPo,34665 +pip/_vendor/urllib3/packages/backports/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_vendor/urllib3/packages/backports/makefile.py,sha256=nbzt3i0agPVP07jqqgjhaYjMmuAi_W5E0EywZivVO8E,1417 +pip/_vendor/urllib3/packages/backports/weakref_finalize.py,sha256=tRCal5OAhNSRyb0DhHp-38AtIlCsRP8BxF3NX-6rqIA,5343 +pip/_vendor/urllib3/util/__init__.py,sha256=JEmSmmqqLyaw8P51gUImZh8Gwg9i1zSe-DoqAitn2nc,1155 +pip/_vendor/urllib3/util/connection.py,sha256=5Lx2B1PW29KxBn2T0xkN1CBgRBa3gGVJBKoQoRogEVk,4901 +pip/_vendor/urllib3/util/proxy.py,sha256=zUvPPCJrp6dOF0N4GAVbOcl6o-4uXKSrGiTkkr5vUS4,1605 +pip/_vendor/urllib3/util/queue.py,sha256=nRgX8_eX-_VkvxoX096QWoz8Ps0QHUAExILCY_7PncM,498 +pip/_vendor/urllib3/util/request.py,sha256=C0OUt2tcU6LRiQJ7YYNP9GvPrSvl7ziIBekQ-5nlBZk,3997 +pip/_vendor/urllib3/util/response.py,sha256=GJpg3Egi9qaJXRwBh5wv-MNuRWan5BIu40oReoxWP28,3510 +pip/_vendor/urllib3/util/retry.py,sha256=6ENvOZ8PBDzh8kgixpql9lIrb2dxH-k7ZmBanJF2Ng4,22050 +pip/_vendor/urllib3/util/ssl_.py,sha256=QDuuTxPSCj1rYtZ4xpD7Ux-r20TD50aHyqKyhQ7Bq4A,17460 +pip/_vendor/urllib3/util/ssl_match_hostname.py,sha256=Ir4cZVEjmAk8gUAIHWSi7wtOO83UCYABY2xFD1Ql_WA,5758 +pip/_vendor/urllib3/util/ssltransport.py,sha256=NA-u5rMTrDFDFC8QzRKUEKMG0561hOD4qBTr3Z4pv6E,6895 +pip/_vendor/urllib3/util/timeout.py,sha256=cwq4dMk87mJHSBktK1miYJ-85G-3T3RmT20v7SFCpno,10168 +pip/_vendor/urllib3/util/url.py,sha256=lCAE7M5myA8EDdW0sJuyyZhVB9K_j38ljWhHAnFaWoE,14296 +pip/_vendor/urllib3/util/wait.py,sha256=fOX0_faozG2P7iVojQoE1mbydweNyTcm-hXEfFrTtLI,5403 +pip-25.2.dist-info/licenses/AUTHORS.txt,sha256=ioEfMGkkizcTcIPdvjh-kYymO1E9I9dvzHjLUlKS5m8,11385 +pip-25.2.dist-info/licenses/LICENSE.txt,sha256=Y0MApmnUmurmWxLGxIySTFGkzfPR_whtw0VtyLyqIQQ,1093 +pip-25.2.dist-info/licenses/src/pip/_vendor/cachecontrol/LICENSE.txt,sha256=hu7uh74qQ_P_H1ZJb0UfaSQ5JvAl_tuwM2ZsMExMFhs,558 +pip-25.2.dist-info/licenses/src/pip/_vendor/certifi/LICENSE,sha256=6TcW2mucDVpKHfYP5pWzcPBpVgPSH2-D8FPkLPwQyvc,989 +pip-25.2.dist-info/licenses/src/pip/_vendor/dependency_groups/LICENSE.txt,sha256=GrNuPipLqGMWJThPh-ngkdsfrtA0xbIzJbMjmr8sxSU,1099 +pip-25.2.dist-info/licenses/src/pip/_vendor/distlib/LICENSE.txt,sha256=gI4QyKarjesUn_mz-xn0R6gICUYG1xKpylf-rTVSWZ0,14531 +pip-25.2.dist-info/licenses/src/pip/_vendor/distro/LICENSE,sha256=y16Ofl9KOYjhBjwULGDcLfdWBfTEZRXnduOspt-XbhQ,11325 +pip-25.2.dist-info/licenses/src/pip/_vendor/idna/LICENSE.md,sha256=pZ8LDvNjWHQQmkRhykT_enDVBpboFHZ7-vch1Mmw2w8,1541 +pip-25.2.dist-info/licenses/src/pip/_vendor/msgpack/COPYING,sha256=SS3tuoXaWHL3jmCRvNH-pHTWYNNay03ulkuKqz8AdCc,614 +pip-25.2.dist-info/licenses/src/pip/_vendor/packaging/LICENSE,sha256=ytHvW9NA1z4HS6YU0m996spceUDD2MNIUuZcSQlobEg,197 +pip-25.2.dist-info/licenses/src/pip/_vendor/packaging/LICENSE.APACHE,sha256=DVQuDIgE45qn836wDaWnYhSdxoLXgpRRKH4RuTjpRZQ,10174 +pip-25.2.dist-info/licenses/src/pip/_vendor/packaging/LICENSE.BSD,sha256=tw5-m3QvHMb5SLNMFqo5_-zpQZY2S8iP8NIYDwAo-sU,1344 +pip-25.2.dist-info/licenses/src/pip/_vendor/pkg_resources/LICENSE,sha256=htoPAa6uRjSKPD1GUZXcHOzN55956HdppkuNoEsqR0E,1023 +pip-25.2.dist-info/licenses/src/pip/_vendor/platformdirs/LICENSE,sha256=KeD9YukphQ6G6yjD_czwzv30-pSHkBHP-z0NS-1tTbY,1089 +pip-25.2.dist-info/licenses/src/pip/_vendor/pygments/LICENSE,sha256=qdZvHVJt8C4p3Oc0NtNOVuhjL0bCdbvf_HBWnogvnxc,1331 +pip-25.2.dist-info/licenses/src/pip/_vendor/pyproject_hooks/LICENSE,sha256=GyKwSbUmfW38I6Z79KhNjsBLn9-xpR02DkK0NCyLQVQ,1081 +pip-25.2.dist-info/licenses/src/pip/_vendor/requests/LICENSE,sha256=CeipvOyAZxBGUsFoaFqwkx54aPnIKEtm9a5u2uXxEws,10142 +pip-25.2.dist-info/licenses/src/pip/_vendor/resolvelib/LICENSE,sha256=84j9OMrRMRLB3A9mm76A5_hFQe26-3LzAw0sp2QsPJ0,751 +pip-25.2.dist-info/licenses/src/pip/_vendor/rich/LICENSE,sha256=3u18F6QxgVgZCj6iOcyHmlpQJxzruYrnAl9I--WNyhU,1056 +pip-25.2.dist-info/licenses/src/pip/_vendor/tomli/LICENSE,sha256=uAgWsNUwuKzLTCIReDeQmEpuO2GSLCte6S8zcqsnQv4,1072 +pip-25.2.dist-info/licenses/src/pip/_vendor/tomli/LICENSE-HEADER,sha256=l86TMJBaFy3ehw7gNh2Jvrlbo70PRUV5aqkajAGkNTE,121 +pip-25.2.dist-info/licenses/src/pip/_vendor/tomli_w/LICENSE,sha256=uAgWsNUwuKzLTCIReDeQmEpuO2GSLCte6S8zcqsnQv4,1072 +pip-25.2.dist-info/licenses/src/pip/_vendor/truststore/LICENSE,sha256=M757fo-k_Rmxdg4ajtimaL2rhSyRtpLdQUJLy3Jan8o,1086 +pip-25.2.dist-info/licenses/src/pip/_vendor/urllib3/LICENSE.txt,sha256=w3vxhuJ8-dvpYZ5V7f486nswCRzrPaY8fay-Dm13kHs,1115 +pip-25.2.dist-info/METADATA,sha256=l6OtFNcf2JFKOPJK9bMaH5-usFrqZqSvQNl51tetIp8,4744 +pip-25.2.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91 +pip-25.2.dist-info/entry_points.txt,sha256=eeIjuzfnfR2PrhbjnbzFU6MnSS70kZLxwaHHq6M-bD0,87 +pip-25.2.dist-info/top_level.txt,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +pip-25.2.dist-info/RECORD,, diff --git a/.venv/lib/python3.12/site-packages/pip-25.2.dist-info/WHEEL b/.venv/lib/python3.12/site-packages/pip-25.2.dist-info/WHEEL new file mode 100644 index 0000000..e7fa31b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip-25.2.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: setuptools (80.9.0) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/.venv/lib/python3.12/site-packages/pip-25.2.dist-info/entry_points.txt b/.venv/lib/python3.12/site-packages/pip-25.2.dist-info/entry_points.txt new file mode 100644 index 0000000..25fcf7e --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip-25.2.dist-info/entry_points.txt @@ -0,0 +1,3 @@ +[console_scripts] +pip = pip._internal.cli.main:main +pip3 = pip._internal.cli.main:main diff --git a/.venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/AUTHORS.txt b/.venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/AUTHORS.txt new file mode 100644 index 0000000..c8a7c68 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/AUTHORS.txt @@ -0,0 +1,833 @@ +@Switch01 +A_Rog +Aakanksha Agrawal +Abhinav Sagar +ABHYUDAY PRATAP SINGH +abs51295 +AceGentile +Adam Chainz +Adam Tse +Adam Turner +Adam Wentz +admin +Adolfo Ochagavía +Adrien Morison +Agus +ahayrapetyan +Ahilya +AinsworthK +Akash Srivastava +Alan Yee +Albert Tugushev +Albert-Guan +albertg +Alberto Sottile +Aleks Bunin +Ales Erjavec +Alethea Flowers +Alex Gaynor +Alex Grönholm +Alex Hedges +Alex Loosley +Alex Morega +Alex Stachowiak +Alexander Regueiro +Alexander Shtyrov +Alexandre Conrad +Alexey Popravka +Aleš Erjavec +Alli +Ami Fischman +Ananya Maiti +Anatoly Techtonik +Anders Kaseorg +Andre Aguiar +Andreas Lutro +Andrei Geacar +Andrew Gaul +Andrew Shymanel +Andrey Bienkowski +Andrey Bulgakov +Andrés Delfino +Andy Freeland +Andy Kluger +Ani Hayrapetyan +Aniruddha Basak +Anish Tambe +Anrs Hu +Anthony Sottile +Antoine Musso +Anton Ovchinnikov +Anton Patrushev +Anton Zelenov +Antonio Alvarado Hernandez +Antony Lee +Antti Kaihola +Anubhav Patel +Anudit Nagar +Anuj Godase +AQNOUCH Mohammed +AraHaan +arena +arenasys +Arindam Choudhury +Armin Ronacher +Arnon Yaari +Artem +Arun Babu Neelicattu +Ashley Manton +Ashwin Ramaswami +atse +Atsushi Odagiri +Avinash Karhana +Avner Cohen +Awit (Ah-Wit) Ghirmai +Baptiste Mispelon +Barney Gale +barneygale +Bartek Ogryczak +Bastian Venthur +Ben Bodenmiller +Ben Darnell +Ben Hoyt +Ben Mares +Ben Rosser +Bence Nagy +Benjamin Peterson +Benjamin VanEvery +Benoit Pierre +Berker Peksag +Bernard +Bernard Tyers +Bernardo B. Marques +Bernhard M. Wiedemann +Bertil Hatt +Bhavam Vidyarthi +Blazej Michalik +Bogdan Opanchuk +BorisZZZ +Brad Erickson +Bradley Ayers +Bradley Reynolds +Branch Vincent +Brandon L. Reiss +Brandt Bucher +Brannon Dorsey +Brett Randall +Brett Rosen +Brian Cristante +Brian Rosner +briantracy +BrownTruck +Bruno Oliveira +Bruno Renié +Bruno S +Bstrdsmkr +Buck Golemon +burrows +Bussonnier Matthias +bwoodsend +c22 +Caleb Brown +Caleb Martinez +Calvin Smith +Carl Meyer +Carlos Liam +Carol Willing +Carter Thayer +Cass +Chandrasekhar Atina +Charlie Marsh +charwick +Chih-Hsuan Yen +Chris Brinker +Chris Hunt +Chris Jerdonek +Chris Kuehl +Chris Markiewicz +Chris McDonough +Chris Pawley +Chris Pryer +Chris Wolfe +Christian Clauss +Christian Heimes +Christian Oudard +Christoph Reiter +Christopher Hunt +Christopher Snyder +chrysle +cjc7373 +Clark Boylan +Claudio Jolowicz +Clay McClure +Cody +Cody Soyland +Colin Watson +Collin Anderson +Connor Osborn +Cooper Lees +Cooper Ry Lees +Cory Benfield +Cory Wright +Craig Kerstiens +Cristian Sorinel +Cristina +Cristina Muñoz +ctg123 +Curtis Doty +cytolentino +Daan De Meyer +Dale +Damian +Damian Quiroga +Damian Shaw +Dan Black +Dan Savilonis +Dan Sully +Dane Hillard +daniel +Daniel Collins +Daniel Hahler +Daniel Holth +Daniel Jost +Daniel Katz +Daniel Shaulov +Daniele Esposti +Daniele Nicolodi +Daniele Procida +Daniil Konovalenko +Danny Hermes +Danny McClanahan +Darren Kavanagh +Dav Clark +Dave Abrahams +Dave Jones +David Aguilar +David Black +David Bordeynik +David Caro +David D Lowe +David Evans +David Hewitt +David Linke +David Poggi +David Poznik +David Pursehouse +David Runge +David Tucker +David Wales +Davidovich +ddelange +Deepak Sharma +Deepyaman Datta +Denis Roussel (ACSONE) +Denise Yu +dependabot[bot] +derwolfe +Desetude +developer +Devesh Kumar +Devesh Kumar Singh +devsagul +Diego Caraballo +Diego Ramirez +DiegoCaraballo +Dimitri Merejkowsky +Dimitri Papadopoulos +Dimitri Papadopoulos Orfanos +Dirk Stolle +Dmitry Gladkov +Dmitry Volodin +Domen Kožar +Dominic Davis-Foster +Donald Stufft +Dongweiming +doron zarhi +Dos Moonen +Douglas Thor +DrFeathers +Dustin Ingram +Dustin Rodrigues +Dwayne Bailey +Ed Morley +Edgar Ramírez +Edgar Ramírez Mondragón +Ee Durbin +Efflam Lemaillet +efflamlemaillet +Eitan Adler +ekristina +elainechan +Eli Schwartz +Elisha Hollander +Ellen Marie Dash +Emil Burzo +Emil Styrke +Emmanuel Arias +Endoh Takanao +enoch +Erdinc Mutlu +Eric Cousineau +Eric Gillingham +Eric Hanchrow +Eric Hopper +Erik M. Bray +Erik Rose +Erwin Janssen +Eugene Vereshchagin +everdimension +Federico +Felipe Peter +Felix Yan +fiber-space +Filip Kokosiński +Filipe Laíns +Finn Womack +finnagin +Flavio Amurrio +Florian Briand +Florian Rathgeber +Francesco +Francesco Montesano +Fredrik Orderud +Fredrik Roubert +Frost Ming +Gabriel Curio +Gabriel de Perthuis +Garry Polley +gavin +gdanielson +Gene Wood +Geoffrey Sneddon +George Margaritis +George Song +Georgi Valkov +Georgy Pchelkin +ghost +Giftlin Rajaiah +gizmoguy1 +gkdoc +Godefroid Chapelle +Gopinath M +GOTO Hayato +gousaiyang +gpiks +Greg Roodt +Greg Ward +Guilherme Espada +Guillaume Seguin +gutsytechster +Guy Rozendorn +Guy Tuval +gzpan123 +Hanjun Kim +Hari Charan +Harsh Vardhan +harupy +Harutaka Kawamura +hauntsaninja +Henrich Hartzer +Henry Schreiner +Herbert Pfennig +Holly Stotelmyer +Honnix +Hsiaoming Yang +Hugo Lopes Tavares +Hugo van Kemenade +Hugues Bruant +Hynek Schlawack +iamsrp-deshaw +Ian Bicking +Ian Cordasco +Ian Lee +Ian Stapleton Cordasco +Ian Wienand +Igor Kuzmitshov +Igor Sobreira +Ikko Ashimine +Ilan Schnell +Illia Volochii +Ilya Baryshev +Inada Naoki +Ionel Cristian Mărieș +Ionel Maries Cristian +Itamar Turner-Trauring +iTrooz +Ivan Pozdeev +J. Nick Koston +Jacob Kim +Jacob Walls +Jaime Sanz +Jake Lishman +jakirkham +Jakub Kuczys +Jakub Stasiak +Jakub Vysoky +Jakub Wilk +James Cleveland +James Curtin +James Firth +James Gerity +James Polley +Jan Pokorný +Jannis Leidel +Jarek Potiuk +jarondl +Jason Curtis +Jason R. Coombs +JasonMo +JasonMo1 +Jay Graves +Jean Abou Samra +Jean-Christophe Fillion-Robin +Jeff Barber +Jeff Dairiki +Jeff Widman +Jelmer Vernooij +jenix21 +Jeremy Fleischman +Jeremy Stanley +Jeremy Zafran +Jesse Rittner +Jiashuo Li +Jim Fisher +Jim Garrison +Jinzhe Zeng +Jiun Bae +Jivan Amara +Joa +Joe Bylund +Joe Michelini +Johannes Altmanninger +John Paton +John Sirois +John T. Wodder II +John-Scott Atlakson +johnthagen +Jon Banafato +Jon Dufresne +Jon Parise +Jonas Nockert +Jonathan Herbert +Joonatan Partanen +Joost Molenaar +Jorge Niedbalski +Joseph Bylund +Joseph Long +Josh Bronson +Josh Cannon +Josh Hansen +Josh Schneier +Joshua +JoshuaPerdue +Juan Luis Cano Rodríguez +Juanjo Bazán +Judah Rand +Julian Berman +Julian Gethmann +Julien Demoor +July Tikhonov +Jussi Kukkonen +Justin van Heek +jwg4 +Jyrki Pulliainen +Kai Chen +Kai Mueller +Kamal Bin Mustafa +Karolina Surma +kasium +kaustav haldar +keanemind +Keith Maxwell +Kelsey Hightower +Kenneth Belitzky +Kenneth Reitz +Kevin Burke +Kevin Carter +Kevin Frommelt +Kevin R Patterson +Kexuan Sun +Kit Randel +Klaas van Schelven +KOLANICH +konstin +kpinc +Krishan Bhasin +Krishna Oza +Kumar McMillan +Kuntal Majumder +Kurt McKee +Kyle Persohn +lakshmanaram +Laszlo Kiss-Kollar +Laurent Bristiel +Laurent LAPORTE +Laurie O +Laurie Opperman +layday +Leon Sasson +Lev Givon +Lincoln de Sousa +Lipis +lorddavidiii +Loren Carvalho +Lucas Cimon +Ludovic Gasc +Luis Medel +Lukas Geiger +Lukas Juhrich +Luke Macken +Luo Jiebin +luojiebin +luz.paz +László Kiss Kollár +M00nL1ght +Malcolm Smith +Marc Abramowitz +Marc Tamlyn +Marcus Smith +Mariatta +Mark Kohler +Mark McLoughlin +Mark Williams +Markus Hametner +Martey Dodoo +Martin Fischer +Martin Häcker +Martin Pavlasek +Masaki +Masklinn +Matej Stuchlik +Mathew Jennings +Mathieu Bridon +Mathieu Kniewallner +Matt Bacchi +Matt Good +Matt Maker +Matt Robenolt +Matt Wozniski +matthew +Matthew Einhorn +Matthew Feickert +Matthew Gilliard +Matthew Hughes +Matthew Iversen +Matthew Treinish +Matthew Trumbell +Matthew Willson +Matthias Bussonnier +mattip +Maurits van Rees +Max W Chase +Maxim Kurnikov +Maxime Rouyrre +mayeut +mbaluna +Md Sujauddin Sekh +mdebi +memoselyk +meowmeowcat +Michael +Michael Aquilina +Michael E. Karpeles +Michael Klich +Michael Mintz +Michael Williamson +michaelpacer +Michał Górny +Mickaël Schoentgen +Miguel Araujo Perez +Mihir Singh +Mike +Mike Hendricks +Min RK +MinRK +Miro Hrončok +Monica Baluna +montefra +Monty Taylor +morotti +mrKazzila +Muha Ajjan +Nadav Wexler +Nahuel Ambrosini +Nate Coraor +Nate Prewitt +Nathan Houghton +Nathaniel J. Smith +Nehal J Wani +Neil Botelho +Nguyễn Gia Phong +Nicholas Serra +Nick Coghlan +Nick Stenning +Nick Timkovich +Nicolas Bock +Nicole Harris +Nikhil Benesch +Nikhil Ladha +Nikita Chepanov +Nikolay Korolev +Nipunn Koorapati +Nitesh Sharma +Niyas Sait +Noah +Noah Gorny +Nowell Strite +NtaleGrey +nucccc +nvdv +OBITORASU +Ofek Lev +ofrinevo +Oleg Burnaev +Oliver Freund +Oliver Jeeves +Oliver Mannion +Oliver Tonnhofer +Olivier Girardot +Olivier Grisel +Ollie Rutherfurd +OMOTO Kenji +Omry Yadan +onlinejudge95 +Oren Held +Oscar Benjamin +Oz N Tiram +Pachwenko +Patrick Dubroy +Patrick Jenkins +Patrick Lawson +patricktokeeffe +Patrik Kopkan +Paul Ganssle +Paul Kehrer +Paul Moore +Paul Nasrat +Paul Oswald +Paul van der Linden +Paulus Schoutsen +Pavel Safronov +Pavithra Eswaramoorthy +Pawel Jasinski +Paweł Szramowski +Pekka Klärck +Peter Gessler +Peter Lisák +Peter Shen +Peter Waller +Petr Viktorin +petr-tik +Phaneendra Chiruvella +Phil Elson +Phil Freo +Phil Pennock +Phil Whelan +Philip Jägenstedt +Philip Molloy +Philippe Ombredanne +Pi Delport +Pierre-Yves Rofes +Pieter Degroote +pip +Prabakaran Kumaresshan +Prabhjyotsing Surjit Singh Sodhi +Prabhu Marappan +Pradyun Gedam +Prashant Sharma +Pratik Mallya +pre-commit-ci[bot] +Preet Thakkar +Preston Holmes +Przemek Wrzos +Pulkit Goyal +q0w +Qiangning Hong +Qiming Xu +qraqras +Quentin Lee +Quentin Pradet +R. David Murray +Rafael Caricio +Ralf Schmitt +Ran Benita +Randy Döring +Razzi Abuissa +rdb +Reece Dunham +Remi Rampin +Rene Dudfield +Riccardo Magliocchetti +Riccardo Schirone +Richard Jones +Richard Si +Ricky Ng-Adam +Rishi +rmorotti +RobberPhex +Robert Collins +Robert McGibbon +Robert Pollak +Robert T. McGibbon +robin elisha robinson +Rodney, Tiara +Roey Berman +Rohan Jain +Roman Bogorodskiy +Roman Donchenko +Romuald Brunet +ronaudinho +Ronny Pfannschmidt +Rory McCann +Ross Brattain +Roy Wellington Ⅳ +Ruairidh MacLeod +Russell Keith-Magee +Ryan Shepherd +Ryan Wooden +ryneeverett +Ryuma Asai +S. Guliaev +Sachi King +Salvatore Rinchiera +sandeepkiran-js +Sander Van Balen +Savio Jomton +schlamar +Scott Kitterman +Sean +seanj +Sebastian Jordan +Sebastian Schaetz +Segev Finer +SeongSoo Cho +Sepehr Rasouli +sepehrrasooli +Sergey Vasilyev +Seth Michael Larson +Seth Woodworth +Shahar Epstein +Shantanu +shenxianpeng +shireenrao +Shivansh-007 +Shixian Sheng +Shlomi Fish +Shovan Maity +Simeon Visser +Simon Cross +Simon Pichugin +sinoroc +sinscary +snook92 +socketubs +Sorin Sbarnea +Srinivas Nyayapati +Srishti Hegde +Stavros Korokithakis +Stefan Scherfke +Stefano Rivera +Stephan Erb +Stephen Payne +Stephen Rosen +stepshal +Steve (Gadget) Barnes +Steve Barnes +Steve Dower +Steve Kowalik +Steven Myint +Steven Silvester +stonebig +studioj +Stéphane Bidoul +Stéphane Bidoul (ACSONE) +Stéphane Klein +Sumana Harihareswara +Surbhi Sharma +Sviatoslav Sydorenko +Sviatoslav Sydorenko (Святослав Сидоренко) +Swat009 +Sylvain +Takayuki SHIMIZUKAWA +Taneli Hukkinen +tbeswick +Thiago +Thijs Triemstra +Thomas Fenzl +Thomas Grainger +Thomas Guettler +Thomas Johansson +Thomas Kluyver +Thomas Smith +Thomas VINCENT +Tim D. Smith +Tim Gates +Tim Harder +Tim Heap +tim smith +tinruufu +Tobias Hermann +Tom Forbes +Tom Freudenheim +Tom V +Tomas Hrnciar +Tomas Orsava +Tomer Chachamu +Tommi Enenkel | AnB +Tomáš Hrnčiar +Tony Beswick +Tony Narlock +Tony Zhaocheng Tan +TonyBeswick +toonarmycaptain +Toshio Kuratomi +toxinu +Travis Swicegood +Tushar Sadhwani +Tzu-ping Chung +Valentin Haenel +Victor Stinner +victorvpaulo +Vikram - Google +Viktor Szépe +Ville Skyttä +Vinay Sajip +Vincent Philippon +Vinicyus Macedo +Vipul Kumar +Vitaly Babiy +Vladimir Fokow +Vladimir Rutsky +W. Trevor King +Wil Tan +Wilfred Hughes +William Edwards +William ML Leslie +William T Olson +William Woodruff +Wilson Mo +wim glenn +Winson Luk +Wolfgang Maier +Wu Zhenyu +XAMES3 +Xavier Fernandez +Xianpeng Shen +xoviat +xtreak +YAMAMOTO Takashi +Yen Chi Hsuan +Yeray Diaz Diaz +Yoval P +Yu Jian +Yuan Jing Vincent Yan +Yuki Kobayashi +Yusuke Hayashi +zackzack38 +Zearin +Zhiping Deng +ziebam +Zvezdan Petkovic +Łukasz Langa +Роман Донченко +Семён Марьясин diff --git a/.venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/LICENSE.txt b/.venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/LICENSE.txt new file mode 100644 index 0000000..8e7b65e --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/LICENSE.txt @@ -0,0 +1,20 @@ +Copyright (c) 2008-present The pip developers (see AUTHORS.txt file) + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/.venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/src/pip/_vendor/cachecontrol/LICENSE.txt b/.venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/src/pip/_vendor/cachecontrol/LICENSE.txt new file mode 100644 index 0000000..d8b3b56 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/src/pip/_vendor/cachecontrol/LICENSE.txt @@ -0,0 +1,13 @@ +Copyright 2012-2021 Eric Larson + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/.venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/src/pip/_vendor/certifi/LICENSE b/.venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/src/pip/_vendor/certifi/LICENSE new file mode 100644 index 0000000..62b076c --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/src/pip/_vendor/certifi/LICENSE @@ -0,0 +1,20 @@ +This package contains a modified version of ca-bundle.crt: + +ca-bundle.crt -- Bundle of CA Root Certificates + +This is a bundle of X.509 certificates of public Certificate Authorities +(CA). These were automatically extracted from Mozilla's root certificates +file (certdata.txt). This file can be found in the mozilla source tree: +https://hg.mozilla.org/mozilla-central/file/tip/security/nss/lib/ckfw/builtins/certdata.txt +It contains the certificates in PEM format and therefore +can be directly used with curl / libcurl / php_curl, or with +an Apache+mod_ssl webserver for SSL client authentication. +Just configure this file as the SSLCACertificateFile.# + +***** BEGIN LICENSE BLOCK ***** +This Source Code Form is subject to the terms of the Mozilla Public License, +v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain +one at http://mozilla.org/MPL/2.0/. + +***** END LICENSE BLOCK ***** +@(#) $RCSfile: certdata.txt,v $ $Revision: 1.80 $ $Date: 2011/11/03 15:11:58 $ diff --git a/.venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/src/pip/_vendor/dependency_groups/LICENSE.txt b/.venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/src/pip/_vendor/dependency_groups/LICENSE.txt new file mode 100644 index 0000000..b9723b8 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/src/pip/_vendor/dependency_groups/LICENSE.txt @@ -0,0 +1,9 @@ +MIT License + +Copyright (c) 2024-present Stephen Rosen + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/.venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/src/pip/_vendor/distlib/LICENSE.txt b/.venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/src/pip/_vendor/distlib/LICENSE.txt new file mode 100644 index 0000000..c31ac56 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/src/pip/_vendor/distlib/LICENSE.txt @@ -0,0 +1,284 @@ +A. HISTORY OF THE SOFTWARE +========================== + +Python was created in the early 1990s by Guido van Rossum at Stichting +Mathematisch Centrum (CWI, see http://www.cwi.nl) in the Netherlands +as a successor of a language called ABC. Guido remains Python's +principal author, although it includes many contributions from others. + +In 1995, Guido continued his work on Python at the Corporation for +National Research Initiatives (CNRI, see http://www.cnri.reston.va.us) +in Reston, Virginia where he released several versions of the +software. + +In May 2000, Guido and the Python core development team moved to +BeOpen.com to form the BeOpen PythonLabs team. In October of the same +year, the PythonLabs team moved to Digital Creations (now Zope +Corporation, see http://www.zope.com). In 2001, the Python Software +Foundation (PSF, see http://www.python.org/psf/) was formed, a +non-profit organization created specifically to own Python-related +Intellectual Property. Zope Corporation is a sponsoring member of +the PSF. + +All Python releases are Open Source (see http://www.opensource.org for +the Open Source Definition). Historically, most, but not all, Python +releases have also been GPL-compatible; the table below summarizes +the various releases. + + Release Derived Year Owner GPL- + from compatible? (1) + + 0.9.0 thru 1.2 1991-1995 CWI yes + 1.3 thru 1.5.2 1.2 1995-1999 CNRI yes + 1.6 1.5.2 2000 CNRI no + 2.0 1.6 2000 BeOpen.com no + 1.6.1 1.6 2001 CNRI yes (2) + 2.1 2.0+1.6.1 2001 PSF no + 2.0.1 2.0+1.6.1 2001 PSF yes + 2.1.1 2.1+2.0.1 2001 PSF yes + 2.2 2.1.1 2001 PSF yes + 2.1.2 2.1.1 2002 PSF yes + 2.1.3 2.1.2 2002 PSF yes + 2.2.1 2.2 2002 PSF yes + 2.2.2 2.2.1 2002 PSF yes + 2.2.3 2.2.2 2003 PSF yes + 2.3 2.2.2 2002-2003 PSF yes + 2.3.1 2.3 2002-2003 PSF yes + 2.3.2 2.3.1 2002-2003 PSF yes + 2.3.3 2.3.2 2002-2003 PSF yes + 2.3.4 2.3.3 2004 PSF yes + 2.3.5 2.3.4 2005 PSF yes + 2.4 2.3 2004 PSF yes + 2.4.1 2.4 2005 PSF yes + 2.4.2 2.4.1 2005 PSF yes + 2.4.3 2.4.2 2006 PSF yes + 2.4.4 2.4.3 2006 PSF yes + 2.5 2.4 2006 PSF yes + 2.5.1 2.5 2007 PSF yes + 2.5.2 2.5.1 2008 PSF yes + 2.5.3 2.5.2 2008 PSF yes + 2.6 2.5 2008 PSF yes + 2.6.1 2.6 2008 PSF yes + 2.6.2 2.6.1 2009 PSF yes + 2.6.3 2.6.2 2009 PSF yes + 2.6.4 2.6.3 2009 PSF yes + 2.6.5 2.6.4 2010 PSF yes + 3.0 2.6 2008 PSF yes + 3.0.1 3.0 2009 PSF yes + 3.1 3.0.1 2009 PSF yes + 3.1.1 3.1 2009 PSF yes + 3.1.2 3.1 2010 PSF yes + 3.2 3.1 2010 PSF yes + +Footnotes: + +(1) GPL-compatible doesn't mean that we're distributing Python under + the GPL. All Python licenses, unlike the GPL, let you distribute + a modified version without making your changes open source. The + GPL-compatible licenses make it possible to combine Python with + other software that is released under the GPL; the others don't. + +(2) According to Richard Stallman, 1.6.1 is not GPL-compatible, + because its license has a choice of law clause. According to + CNRI, however, Stallman's lawyer has told CNRI's lawyer that 1.6.1 + is "not incompatible" with the GPL. + +Thanks to the many outside volunteers who have worked under Guido's +direction to make these releases possible. + + +B. TERMS AND CONDITIONS FOR ACCESSING OR OTHERWISE USING PYTHON +=============================================================== + +PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2 +-------------------------------------------- + +1. This LICENSE AGREEMENT is between the Python Software Foundation +("PSF"), and the Individual or Organization ("Licensee") accessing and +otherwise using this software ("Python") in source or binary form and +its associated documentation. + +2. Subject to the terms and conditions of this License Agreement, PSF hereby +grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, +analyze, test, perform and/or display publicly, prepare derivative works, +distribute, and otherwise use Python alone or in any derivative version, +provided, however, that PSF's License Agreement and PSF's notice of copyright, +i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 +Python Software Foundation; All Rights Reserved" are retained in Python alone or +in any derivative version prepared by Licensee. + +3. In the event Licensee prepares a derivative work that is based on +or incorporates Python or any part thereof, and wants to make +the derivative work available to others as provided herein, then +Licensee hereby agrees to include in any such work a brief summary of +the changes made to Python. + +4. PSF is making Python available to Licensee on an "AS IS" +basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR +IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND +DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS +FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT +INFRINGE ANY THIRD PARTY RIGHTS. + +5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON +FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS +A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON, +OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. + +6. This License Agreement will automatically terminate upon a material +breach of its terms and conditions. + +7. Nothing in this License Agreement shall be deemed to create any +relationship of agency, partnership, or joint venture between PSF and +Licensee. This License Agreement does not grant permission to use PSF +trademarks or trade name in a trademark sense to endorse or promote +products or services of Licensee, or any third party. + +8. By copying, installing or otherwise using Python, Licensee +agrees to be bound by the terms and conditions of this License +Agreement. + + +BEOPEN.COM LICENSE AGREEMENT FOR PYTHON 2.0 +------------------------------------------- + +BEOPEN PYTHON OPEN SOURCE LICENSE AGREEMENT VERSION 1 + +1. This LICENSE AGREEMENT is between BeOpen.com ("BeOpen"), having an +office at 160 Saratoga Avenue, Santa Clara, CA 95051, and the +Individual or Organization ("Licensee") accessing and otherwise using +this software in source or binary form and its associated +documentation ("the Software"). + +2. Subject to the terms and conditions of this BeOpen Python License +Agreement, BeOpen hereby grants Licensee a non-exclusive, +royalty-free, world-wide license to reproduce, analyze, test, perform +and/or display publicly, prepare derivative works, distribute, and +otherwise use the Software alone or in any derivative version, +provided, however, that the BeOpen Python License is retained in the +Software, alone or in any derivative version prepared by Licensee. + +3. BeOpen is making the Software available to Licensee on an "AS IS" +basis. BEOPEN MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR +IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, BEOPEN MAKES NO AND +DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS +FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF THE SOFTWARE WILL NOT +INFRINGE ANY THIRD PARTY RIGHTS. + +4. BEOPEN SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF THE +SOFTWARE FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS +AS A RESULT OF USING, MODIFYING OR DISTRIBUTING THE SOFTWARE, OR ANY +DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. + +5. This License Agreement will automatically terminate upon a material +breach of its terms and conditions. + +6. This License Agreement shall be governed by and interpreted in all +respects by the law of the State of California, excluding conflict of +law provisions. Nothing in this License Agreement shall be deemed to +create any relationship of agency, partnership, or joint venture +between BeOpen and Licensee. This License Agreement does not grant +permission to use BeOpen trademarks or trade names in a trademark +sense to endorse or promote products or services of Licensee, or any +third party. As an exception, the "BeOpen Python" logos available at +http://www.pythonlabs.com/logos.html may be used according to the +permissions granted on that web page. + +7. By copying, installing or otherwise using the software, Licensee +agrees to be bound by the terms and conditions of this License +Agreement. + + +CNRI LICENSE AGREEMENT FOR PYTHON 1.6.1 +--------------------------------------- + +1. This LICENSE AGREEMENT is between the Corporation for National +Research Initiatives, having an office at 1895 Preston White Drive, +Reston, VA 20191 ("CNRI"), and the Individual or Organization +("Licensee") accessing and otherwise using Python 1.6.1 software in +source or binary form and its associated documentation. + +2. Subject to the terms and conditions of this License Agreement, CNRI +hereby grants Licensee a nonexclusive, royalty-free, world-wide +license to reproduce, analyze, test, perform and/or display publicly, +prepare derivative works, distribute, and otherwise use Python 1.6.1 +alone or in any derivative version, provided, however, that CNRI's +License Agreement and CNRI's notice of copyright, i.e., "Copyright (c) +1995-2001 Corporation for National Research Initiatives; All Rights +Reserved" are retained in Python 1.6.1 alone or in any derivative +version prepared by Licensee. Alternately, in lieu of CNRI's License +Agreement, Licensee may substitute the following text (omitting the +quotes): "Python 1.6.1 is made available subject to the terms and +conditions in CNRI's License Agreement. This Agreement together with +Python 1.6.1 may be located on the Internet using the following +unique, persistent identifier (known as a handle): 1895.22/1013. This +Agreement may also be obtained from a proxy server on the Internet +using the following URL: http://hdl.handle.net/1895.22/1013". + +3. In the event Licensee prepares a derivative work that is based on +or incorporates Python 1.6.1 or any part thereof, and wants to make +the derivative work available to others as provided herein, then +Licensee hereby agrees to include in any such work a brief summary of +the changes made to Python 1.6.1. + +4. CNRI is making Python 1.6.1 available to Licensee on an "AS IS" +basis. CNRI MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR +IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, CNRI MAKES NO AND +DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS +FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON 1.6.1 WILL NOT +INFRINGE ANY THIRD PARTY RIGHTS. + +5. CNRI SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON +1.6.1 FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS +A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON 1.6.1, +OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. + +6. This License Agreement will automatically terminate upon a material +breach of its terms and conditions. + +7. This License Agreement shall be governed by the federal +intellectual property law of the United States, including without +limitation the federal copyright law, and, to the extent such +U.S. federal law does not apply, by the law of the Commonwealth of +Virginia, excluding Virginia's conflict of law provisions. +Notwithstanding the foregoing, with regard to derivative works based +on Python 1.6.1 that incorporate non-separable material that was +previously distributed under the GNU General Public License (GPL), the +law of the Commonwealth of Virginia shall govern this License +Agreement only as to issues arising under or with respect to +Paragraphs 4, 5, and 7 of this License Agreement. Nothing in this +License Agreement shall be deemed to create any relationship of +agency, partnership, or joint venture between CNRI and Licensee. This +License Agreement does not grant permission to use CNRI trademarks or +trade name in a trademark sense to endorse or promote products or +services of Licensee, or any third party. + +8. By clicking on the "ACCEPT" button where indicated, or by copying, +installing or otherwise using Python 1.6.1, Licensee agrees to be +bound by the terms and conditions of this License Agreement. + + ACCEPT + + +CWI LICENSE AGREEMENT FOR PYTHON 0.9.0 THROUGH 1.2 +-------------------------------------------------- + +Copyright (c) 1991 - 1995, Stichting Mathematisch Centrum Amsterdam, +The Netherlands. All rights reserved. + +Permission to use, copy, modify, and distribute this software and its +documentation for any purpose and without fee is hereby granted, +provided that the above copyright notice appear in all copies and that +both that copyright notice and this permission notice appear in +supporting documentation, and that the name of Stichting Mathematisch +Centrum or CWI not be used in advertising or publicity pertaining to +distribution of the software without specific, written prior +permission. + +STICHTING MATHEMATISCH CENTRUM DISCLAIMS ALL WARRANTIES WITH REGARD TO +THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND +FITNESS, IN NO EVENT SHALL STICHTING MATHEMATISCH CENTRUM BE LIABLE +FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT +OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/.venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/src/pip/_vendor/distro/LICENSE b/.venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/src/pip/_vendor/distro/LICENSE new file mode 100644 index 0000000..e06d208 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/src/pip/_vendor/distro/LICENSE @@ -0,0 +1,202 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/.venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/src/pip/_vendor/idna/LICENSE.md b/.venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/src/pip/_vendor/idna/LICENSE.md new file mode 100644 index 0000000..19b6b45 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/src/pip/_vendor/idna/LICENSE.md @@ -0,0 +1,31 @@ +BSD 3-Clause License + +Copyright (c) 2013-2024, Kim Davies and contributors. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/.venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/src/pip/_vendor/msgpack/COPYING b/.venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/src/pip/_vendor/msgpack/COPYING new file mode 100644 index 0000000..f067af3 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/src/pip/_vendor/msgpack/COPYING @@ -0,0 +1,14 @@ +Copyright (C) 2008-2011 INADA Naoki + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/.venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/src/pip/_vendor/packaging/LICENSE b/.venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/src/pip/_vendor/packaging/LICENSE new file mode 100644 index 0000000..6f62d44 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/src/pip/_vendor/packaging/LICENSE @@ -0,0 +1,3 @@ +This software is made available under the terms of *either* of the licenses +found in LICENSE.APACHE or LICENSE.BSD. Contributions to this software is made +under the terms of *both* these licenses. diff --git a/.venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/src/pip/_vendor/packaging/LICENSE.APACHE b/.venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/src/pip/_vendor/packaging/LICENSE.APACHE new file mode 100644 index 0000000..f433b1a --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/src/pip/_vendor/packaging/LICENSE.APACHE @@ -0,0 +1,177 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/.venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/src/pip/_vendor/packaging/LICENSE.BSD b/.venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/src/pip/_vendor/packaging/LICENSE.BSD new file mode 100644 index 0000000..42ce7b7 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/src/pip/_vendor/packaging/LICENSE.BSD @@ -0,0 +1,23 @@ +Copyright (c) Donald Stufft and individual contributors. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/.venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/src/pip/_vendor/pkg_resources/LICENSE b/.venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/src/pip/_vendor/pkg_resources/LICENSE new file mode 100644 index 0000000..1bb5a44 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/src/pip/_vendor/pkg_resources/LICENSE @@ -0,0 +1,17 @@ +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to +deal in the Software without restriction, including without limitation the +rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +sell copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +IN THE SOFTWARE. diff --git a/.venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/src/pip/_vendor/platformdirs/LICENSE b/.venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/src/pip/_vendor/platformdirs/LICENSE new file mode 100644 index 0000000..f35fed9 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/src/pip/_vendor/platformdirs/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2010-202x The platformdirs developers + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/.venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/src/pip/_vendor/pygments/LICENSE b/.venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/src/pip/_vendor/pygments/LICENSE new file mode 100644 index 0000000..446a1a8 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/src/pip/_vendor/pygments/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2006-2022 by the respective authors (see AUTHORS file). +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +* Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/.venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/src/pip/_vendor/pyproject_hooks/LICENSE b/.venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/src/pip/_vendor/pyproject_hooks/LICENSE new file mode 100644 index 0000000..b0ae9db --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/src/pip/_vendor/pyproject_hooks/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2017 Thomas Kluyver + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/.venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/src/pip/_vendor/requests/LICENSE b/.venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/src/pip/_vendor/requests/LICENSE new file mode 100644 index 0000000..67db858 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/src/pip/_vendor/requests/LICENSE @@ -0,0 +1,175 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. diff --git a/.venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/src/pip/_vendor/resolvelib/LICENSE b/.venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/src/pip/_vendor/resolvelib/LICENSE new file mode 100644 index 0000000..b907776 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/src/pip/_vendor/resolvelib/LICENSE @@ -0,0 +1,13 @@ +Copyright (c) 2018, Tzu-ping Chung + +Permission to use, copy, modify, and distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/.venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/src/pip/_vendor/rich/LICENSE b/.venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/src/pip/_vendor/rich/LICENSE new file mode 100644 index 0000000..4415505 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/src/pip/_vendor/rich/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2020 Will McGugan + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/.venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/src/pip/_vendor/tomli/LICENSE b/.venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/src/pip/_vendor/tomli/LICENSE new file mode 100644 index 0000000..e859590 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/src/pip/_vendor/tomli/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2021 Taneli Hukkinen + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/.venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/src/pip/_vendor/tomli/LICENSE-HEADER b/.venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/src/pip/_vendor/tomli/LICENSE-HEADER new file mode 100644 index 0000000..aba78dd --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/src/pip/_vendor/tomli/LICENSE-HEADER @@ -0,0 +1,3 @@ +SPDX-License-Identifier: MIT +SPDX-FileCopyrightText: 2021 Taneli Hukkinen +Licensed to PSF under a Contributor Agreement. diff --git a/.venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/src/pip/_vendor/tomli_w/LICENSE b/.venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/src/pip/_vendor/tomli_w/LICENSE new file mode 100644 index 0000000..e859590 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/src/pip/_vendor/tomli_w/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2021 Taneli Hukkinen + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/.venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/src/pip/_vendor/truststore/LICENSE b/.venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/src/pip/_vendor/truststore/LICENSE new file mode 100644 index 0000000..7ec568c --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/src/pip/_vendor/truststore/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2022 Seth Michael Larson + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/.venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/src/pip/_vendor/urllib3/LICENSE.txt b/.venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/src/pip/_vendor/urllib3/LICENSE.txt new file mode 100644 index 0000000..429a176 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip-25.2.dist-info/licenses/src/pip/_vendor/urllib3/LICENSE.txt @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2008-2020 Andrey Petrov and contributors (see CONTRIBUTORS.txt) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/.venv/lib/python3.12/site-packages/pip-25.2.dist-info/top_level.txt b/.venv/lib/python3.12/site-packages/pip-25.2.dist-info/top_level.txt new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip-25.2.dist-info/top_level.txt @@ -0,0 +1 @@ +pip diff --git a/.venv/lib/python3.12/site-packages/pip/__init__.py b/.venv/lib/python3.12/site-packages/pip/__init__.py new file mode 100644 index 0000000..c81f381 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/__init__.py @@ -0,0 +1,13 @@ +from __future__ import annotations + +__version__ = "25.2" + + +def main(args: list[str] | None = None) -> int: + """This is an internal API only meant for use by pip's own console scripts. + + For additional details, see https://github.com/pypa/pip/issues/7498. + """ + from pip._internal.utils.entrypoints import _wrapper + + return _wrapper(args) diff --git a/.venv/lib/python3.12/site-packages/pip/__main__.py b/.venv/lib/python3.12/site-packages/pip/__main__.py new file mode 100644 index 0000000..5991326 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/__main__.py @@ -0,0 +1,24 @@ +import os +import sys + +# Remove '' and current working directory from the first entry +# of sys.path, if present to avoid using current directory +# in pip commands check, freeze, install, list and show, +# when invoked as python -m pip +if sys.path[0] in ("", os.getcwd()): + sys.path.pop(0) + +# If we are running from a wheel, add the wheel to sys.path +# This allows the usage python pip-*.whl/pip install pip-*.whl +if __package__ == "": + # __file__ is pip-*.whl/pip/__main__.py + # first dirname call strips of '/__main__.py', second strips off '/pip' + # Resulting path is the name of the wheel itself + # Add that to sys.path so we can import pip + path = os.path.dirname(os.path.dirname(__file__)) + sys.path.insert(0, path) + +if __name__ == "__main__": + from pip._internal.cli.main import main as _main + + sys.exit(_main()) diff --git a/.venv/lib/python3.12/site-packages/pip/__pip-runner__.py b/.venv/lib/python3.12/site-packages/pip/__pip-runner__.py new file mode 100644 index 0000000..d6be157 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/__pip-runner__.py @@ -0,0 +1,50 @@ +"""Execute exactly this copy of pip, within a different environment. + +This file is named as it is, to ensure that this module can't be imported via +an import statement. +""" + +# /!\ This version compatibility check section must be Python 2 compatible. /!\ + +import sys + +# Copied from pyproject.toml +PYTHON_REQUIRES = (3, 9) + + +def version_str(version): # type: ignore + return ".".join(str(v) for v in version) + + +if sys.version_info[:2] < PYTHON_REQUIRES: + raise SystemExit( + "This version of pip does not support python {} (requires >={}).".format( + version_str(sys.version_info[:2]), version_str(PYTHON_REQUIRES) + ) + ) + +# From here on, we can use Python 3 features, but the syntax must remain +# Python 2 compatible. + +import runpy # noqa: E402 +from importlib.machinery import PathFinder # noqa: E402 +from os.path import dirname # noqa: E402 + +PIP_SOURCES_ROOT = dirname(dirname(__file__)) + + +class PipImportRedirectingFinder: + @classmethod + def find_spec(self, fullname, path=None, target=None): # type: ignore + if fullname != "pip": + return None + + spec = PathFinder.find_spec(fullname, [PIP_SOURCES_ROOT], target) + assert spec, (PIP_SOURCES_ROOT, fullname) + return spec + + +sys.meta_path.insert(0, PipImportRedirectingFinder()) + +assert __name__ == "__main__", "Cannot run __pip-runner__.py as a non-main module" +runpy.run_module("pip", run_name="__main__", alter_sys=True) diff --git a/.venv/lib/python3.12/site-packages/pip/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..8748f6f Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/__pycache__/__main__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/__pycache__/__main__.cpython-312.pyc new file mode 100644 index 0000000..f3f4e96 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/__pycache__/__main__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/__pycache__/__pip-runner__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/__pycache__/__pip-runner__.cpython-312.pyc new file mode 100644 index 0000000..ef9f041 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/__pycache__/__pip-runner__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/__init__.py b/.venv/lib/python3.12/site-packages/pip/_internal/__init__.py new file mode 100644 index 0000000..24d0baf --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/__init__.py @@ -0,0 +1,18 @@ +from __future__ import annotations + +from pip._internal.utils import _log + +# init_logging() must be called before any call to logging.getLogger() +# which happens at import of most modules. +_log.init_logging() + + +def main(args: list[str] | None = None) -> int: + """This is preserved for old console scripts that may still be referencing + it. + + For additional details, see https://github.com/pypa/pip/issues/7498. + """ + from pip._internal.utils.entrypoints import _wrapper + + return _wrapper(args) diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..04b4974 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/__pycache__/build_env.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/__pycache__/build_env.cpython-312.pyc new file mode 100644 index 0000000..51cd002 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/__pycache__/build_env.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/__pycache__/cache.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/__pycache__/cache.cpython-312.pyc new file mode 100644 index 0000000..9dfc6e5 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/__pycache__/cache.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/__pycache__/configuration.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/__pycache__/configuration.cpython-312.pyc new file mode 100644 index 0000000..a962b41 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/__pycache__/configuration.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/__pycache__/exceptions.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/__pycache__/exceptions.cpython-312.pyc new file mode 100644 index 0000000..e073972 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/__pycache__/exceptions.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/__pycache__/main.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/__pycache__/main.cpython-312.pyc new file mode 100644 index 0000000..22accc3 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/__pycache__/main.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/__pycache__/pyproject.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/__pycache__/pyproject.cpython-312.pyc new file mode 100644 index 0000000..841a865 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/__pycache__/pyproject.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/__pycache__/self_outdated_check.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/__pycache__/self_outdated_check.cpython-312.pyc new file mode 100644 index 0000000..beab0ea Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/__pycache__/self_outdated_check.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/__pycache__/wheel_builder.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/__pycache__/wheel_builder.cpython-312.pyc new file mode 100644 index 0000000..210a9bc Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/__pycache__/wheel_builder.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/build_env.py b/.venv/lib/python3.12/site-packages/pip/_internal/build_env.py new file mode 100644 index 0000000..3a246a1 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/build_env.py @@ -0,0 +1,349 @@ +"""Build Environment used for isolation during sdist building""" + +from __future__ import annotations + +import logging +import os +import pathlib +import site +import sys +import textwrap +from collections import OrderedDict +from collections.abc import Iterable +from types import TracebackType +from typing import TYPE_CHECKING, Protocol + +from pip._vendor.packaging.version import Version + +from pip import __file__ as pip_location +from pip._internal.cli.spinners import open_spinner +from pip._internal.locations import get_platlib, get_purelib, get_scheme +from pip._internal.metadata import get_default_environment, get_environment +from pip._internal.utils.logging import VERBOSE +from pip._internal.utils.packaging import get_requirement +from pip._internal.utils.subprocess import call_subprocess +from pip._internal.utils.temp_dir import TempDirectory, tempdir_kinds + +if TYPE_CHECKING: + from pip._internal.index.package_finder import PackageFinder + from pip._internal.req.req_install import InstallRequirement + +logger = logging.getLogger(__name__) + + +def _dedup(a: str, b: str) -> tuple[str] | tuple[str, str]: + return (a, b) if a != b else (a,) + + +class _Prefix: + def __init__(self, path: str) -> None: + self.path = path + self.setup = False + scheme = get_scheme("", prefix=path) + self.bin_dir = scheme.scripts + self.lib_dirs = _dedup(scheme.purelib, scheme.platlib) + + +def get_runnable_pip() -> str: + """Get a file to pass to a Python executable, to run the currently-running pip. + + This is used to run a pip subprocess, for installing requirements into the build + environment. + """ + source = pathlib.Path(pip_location).resolve().parent + + if not source.is_dir(): + # This would happen if someone is using pip from inside a zip file. In that + # case, we can use that directly. + return str(source) + + return os.fsdecode(source / "__pip-runner__.py") + + +def _get_system_sitepackages() -> set[str]: + """Get system site packages + + Usually from site.getsitepackages, + but fallback on `get_purelib()/get_platlib()` if unavailable + (e.g. in a virtualenv created by virtualenv<20) + + Returns normalized set of strings. + """ + if hasattr(site, "getsitepackages"): + system_sites = site.getsitepackages() + else: + # virtualenv < 20 overwrites site.py without getsitepackages + # fallback on get_purelib/get_platlib. + # this is known to miss things, but shouldn't in the cases + # where getsitepackages() has been removed (inside a virtualenv) + system_sites = [get_purelib(), get_platlib()] + return {os.path.normcase(path) for path in system_sites} + + +class BuildEnvironmentInstaller(Protocol): + """ + Interface for installing build dependencies into an isolated build + environment. + """ + + def install( + self, + requirements: Iterable[str], + prefix: _Prefix, + *, + kind: str, + for_req: InstallRequirement | None, + ) -> None: ... + + +class SubprocessBuildEnvironmentInstaller: + """ + Install build dependencies by calling pip in a subprocess. + """ + + def __init__(self, finder: PackageFinder) -> None: + self.finder = finder + + def install( + self, + requirements: Iterable[str], + prefix: _Prefix, + *, + kind: str, + for_req: InstallRequirement | None, + ) -> None: + finder = self.finder + args: list[str] = [ + sys.executable, + get_runnable_pip(), + "install", + "--ignore-installed", + "--no-user", + "--prefix", + prefix.path, + "--no-warn-script-location", + "--disable-pip-version-check", + # As the build environment is ephemeral, it's wasteful to + # pre-compile everything, especially as not every Python + # module will be used/compiled in most cases. + "--no-compile", + # The prefix specified two lines above, thus + # target from config file or env var should be ignored + "--target", + "", + ] + if logger.getEffectiveLevel() <= logging.DEBUG: + args.append("-vv") + elif logger.getEffectiveLevel() <= VERBOSE: + args.append("-v") + for format_control in ("no_binary", "only_binary"): + formats = getattr(finder.format_control, format_control) + args.extend( + ( + "--" + format_control.replace("_", "-"), + ",".join(sorted(formats or {":none:"})), + ) + ) + + index_urls = finder.index_urls + if index_urls: + args.extend(["-i", index_urls[0]]) + for extra_index in index_urls[1:]: + args.extend(["--extra-index-url", extra_index]) + else: + args.append("--no-index") + for link in finder.find_links: + args.extend(["--find-links", link]) + + if finder.proxy: + args.extend(["--proxy", finder.proxy]) + for host in finder.trusted_hosts: + args.extend(["--trusted-host", host]) + if finder.custom_cert: + args.extend(["--cert", finder.custom_cert]) + if finder.client_cert: + args.extend(["--client-cert", finder.client_cert]) + if finder.allow_all_prereleases: + args.append("--pre") + if finder.prefer_binary: + args.append("--prefer-binary") + args.append("--") + args.extend(requirements) + with open_spinner(f"Installing {kind}") as spinner: + call_subprocess( + args, + command_desc=f"pip subprocess to install {kind}", + spinner=spinner, + ) + + +class BuildEnvironment: + """Creates and manages an isolated environment to install build deps""" + + def __init__(self, installer: BuildEnvironmentInstaller) -> None: + self.installer = installer + temp_dir = TempDirectory(kind=tempdir_kinds.BUILD_ENV, globally_managed=True) + + self._prefixes = OrderedDict( + (name, _Prefix(os.path.join(temp_dir.path, name))) + for name in ("normal", "overlay") + ) + + self._bin_dirs: list[str] = [] + self._lib_dirs: list[str] = [] + for prefix in reversed(list(self._prefixes.values())): + self._bin_dirs.append(prefix.bin_dir) + self._lib_dirs.extend(prefix.lib_dirs) + + # Customize site to: + # - ensure .pth files are honored + # - prevent access to system site packages + system_sites = _get_system_sitepackages() + + self._site_dir = os.path.join(temp_dir.path, "site") + if not os.path.exists(self._site_dir): + os.mkdir(self._site_dir) + with open( + os.path.join(self._site_dir, "sitecustomize.py"), "w", encoding="utf-8" + ) as fp: + fp.write( + textwrap.dedent( + """ + import os, site, sys + + # First, drop system-sites related paths. + original_sys_path = sys.path[:] + known_paths = set() + for path in {system_sites!r}: + site.addsitedir(path, known_paths=known_paths) + system_paths = set( + os.path.normcase(path) + for path in sys.path[len(original_sys_path):] + ) + original_sys_path = [ + path for path in original_sys_path + if os.path.normcase(path) not in system_paths + ] + sys.path = original_sys_path + + # Second, add lib directories. + # ensuring .pth file are processed. + for path in {lib_dirs!r}: + assert not path in sys.path + site.addsitedir(path) + """ + ).format(system_sites=system_sites, lib_dirs=self._lib_dirs) + ) + + def __enter__(self) -> None: + self._save_env = { + name: os.environ.get(name, None) + for name in ("PATH", "PYTHONNOUSERSITE", "PYTHONPATH") + } + + path = self._bin_dirs[:] + old_path = self._save_env["PATH"] + if old_path: + path.extend(old_path.split(os.pathsep)) + + pythonpath = [self._site_dir] + + os.environ.update( + { + "PATH": os.pathsep.join(path), + "PYTHONNOUSERSITE": "1", + "PYTHONPATH": os.pathsep.join(pythonpath), + } + ) + + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_val: BaseException | None, + exc_tb: TracebackType | None, + ) -> None: + for varname, old_value in self._save_env.items(): + if old_value is None: + os.environ.pop(varname, None) + else: + os.environ[varname] = old_value + + def check_requirements( + self, reqs: Iterable[str] + ) -> tuple[set[tuple[str, str]], set[str]]: + """Return 2 sets: + - conflicting requirements: set of (installed, wanted) reqs tuples + - missing requirements: set of reqs + """ + missing = set() + conflicting = set() + if reqs: + env = ( + get_environment(self._lib_dirs) + if hasattr(self, "_lib_dirs") + else get_default_environment() + ) + for req_str in reqs: + req = get_requirement(req_str) + # We're explicitly evaluating with an empty extra value, since build + # environments are not provided any mechanism to select specific extras. + if req.marker is not None and not req.marker.evaluate({"extra": ""}): + continue + dist = env.get_distribution(req.name) + if not dist: + missing.add(req_str) + continue + if isinstance(dist.version, Version): + installed_req_str = f"{req.name}=={dist.version}" + else: + installed_req_str = f"{req.name}==={dist.version}" + if not req.specifier.contains(dist.version, prereleases=True): + conflicting.add((installed_req_str, req_str)) + # FIXME: Consider direct URL? + return conflicting, missing + + def install_requirements( + self, + requirements: Iterable[str], + prefix_as_string: str, + *, + kind: str, + for_req: InstallRequirement | None = None, + ) -> None: + prefix = self._prefixes[prefix_as_string] + assert not prefix.setup + prefix.setup = True + if not requirements: + return + self.installer.install(requirements, prefix, kind=kind, for_req=for_req) + + +class NoOpBuildEnvironment(BuildEnvironment): + """A no-op drop-in replacement for BuildEnvironment""" + + def __init__(self) -> None: + pass + + def __enter__(self) -> None: + pass + + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_val: BaseException | None, + exc_tb: TracebackType | None, + ) -> None: + pass + + def cleanup(self) -> None: + pass + + def install_requirements( + self, + requirements: Iterable[str], + prefix_as_string: str, + *, + kind: str, + for_req: InstallRequirement | None = None, + ) -> None: + raise NotImplementedError() diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/cache.py b/.venv/lib/python3.12/site-packages/pip/_internal/cache.py new file mode 100644 index 0000000..ce98f28 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/cache.py @@ -0,0 +1,291 @@ +"""Cache Management""" + +from __future__ import annotations + +import hashlib +import json +import logging +import os +from pathlib import Path +from typing import Any + +from pip._vendor.packaging.tags import Tag, interpreter_name, interpreter_version +from pip._vendor.packaging.utils import canonicalize_name + +from pip._internal.exceptions import InvalidWheelFilename +from pip._internal.models.direct_url import DirectUrl +from pip._internal.models.link import Link +from pip._internal.models.wheel import Wheel +from pip._internal.utils.temp_dir import TempDirectory, tempdir_kinds +from pip._internal.utils.urls import path_to_url + +logger = logging.getLogger(__name__) + +ORIGIN_JSON_NAME = "origin.json" + + +def _hash_dict(d: dict[str, str]) -> str: + """Return a stable sha224 of a dictionary.""" + s = json.dumps(d, sort_keys=True, separators=(",", ":"), ensure_ascii=True) + return hashlib.sha224(s.encode("ascii")).hexdigest() + + +class Cache: + """An abstract class - provides cache directories for data from links + + :param cache_dir: The root of the cache. + """ + + def __init__(self, cache_dir: str) -> None: + super().__init__() + assert not cache_dir or os.path.isabs(cache_dir) + self.cache_dir = cache_dir or None + + def _get_cache_path_parts(self, link: Link) -> list[str]: + """Get parts of part that must be os.path.joined with cache_dir""" + + # We want to generate an url to use as our cache key, we don't want to + # just reuse the URL because it might have other items in the fragment + # and we don't care about those. + key_parts = {"url": link.url_without_fragment} + if link.hash_name is not None and link.hash is not None: + key_parts[link.hash_name] = link.hash + if link.subdirectory_fragment: + key_parts["subdirectory"] = link.subdirectory_fragment + + # Include interpreter name, major and minor version in cache key + # to cope with ill-behaved sdists that build a different wheel + # depending on the python version their setup.py is being run on, + # and don't encode the difference in compatibility tags. + # https://github.com/pypa/pip/issues/7296 + key_parts["interpreter_name"] = interpreter_name() + key_parts["interpreter_version"] = interpreter_version() + + # Encode our key url with sha224, we'll use this because it has similar + # security properties to sha256, but with a shorter total output (and + # thus less secure). However the differences don't make a lot of + # difference for our use case here. + hashed = _hash_dict(key_parts) + + # We want to nest the directories some to prevent having a ton of top + # level directories where we might run out of sub directories on some + # FS. + parts = [hashed[:2], hashed[2:4], hashed[4:6], hashed[6:]] + + return parts + + def _get_candidates(self, link: Link, canonical_package_name: str) -> list[Any]: + can_not_cache = not self.cache_dir or not canonical_package_name or not link + if can_not_cache: + return [] + + path = self.get_path_for_link(link) + if os.path.isdir(path): + return [(candidate, path) for candidate in os.listdir(path)] + return [] + + def get_path_for_link(self, link: Link) -> str: + """Return a directory to store cached items in for link.""" + raise NotImplementedError() + + def get( + self, + link: Link, + package_name: str | None, + supported_tags: list[Tag], + ) -> Link: + """Returns a link to a cached item if it exists, otherwise returns the + passed link. + """ + raise NotImplementedError() + + +class SimpleWheelCache(Cache): + """A cache of wheels for future installs.""" + + def __init__(self, cache_dir: str) -> None: + super().__init__(cache_dir) + + def get_path_for_link(self, link: Link) -> str: + """Return a directory to store cached wheels for link + + Because there are M wheels for any one sdist, we provide a directory + to cache them in, and then consult that directory when looking up + cache hits. + + We only insert things into the cache if they have plausible version + numbers, so that we don't contaminate the cache with things that were + not unique. E.g. ./package might have dozens of installs done for it + and build a version of 0.0...and if we built and cached a wheel, we'd + end up using the same wheel even if the source has been edited. + + :param link: The link of the sdist for which this will cache wheels. + """ + parts = self._get_cache_path_parts(link) + assert self.cache_dir + # Store wheels within the root cache_dir + return os.path.join(self.cache_dir, "wheels", *parts) + + def get( + self, + link: Link, + package_name: str | None, + supported_tags: list[Tag], + ) -> Link: + candidates = [] + + if not package_name: + return link + + canonical_package_name = canonicalize_name(package_name) + for wheel_name, wheel_dir in self._get_candidates(link, canonical_package_name): + try: + wheel = Wheel(wheel_name) + except InvalidWheelFilename: + continue + if canonicalize_name(wheel.name) != canonical_package_name: + logger.debug( + "Ignoring cached wheel %s for %s as it " + "does not match the expected distribution name %s.", + wheel_name, + link, + package_name, + ) + continue + if not wheel.supported(supported_tags): + # Built for a different python/arch/etc + continue + candidates.append( + ( + wheel.support_index_min(supported_tags), + wheel_name, + wheel_dir, + ) + ) + + if not candidates: + return link + + _, wheel_name, wheel_dir = min(candidates) + return Link(path_to_url(os.path.join(wheel_dir, wheel_name))) + + +class EphemWheelCache(SimpleWheelCache): + """A SimpleWheelCache that creates it's own temporary cache directory""" + + def __init__(self) -> None: + self._temp_dir = TempDirectory( + kind=tempdir_kinds.EPHEM_WHEEL_CACHE, + globally_managed=True, + ) + + super().__init__(self._temp_dir.path) + + +class CacheEntry: + def __init__( + self, + link: Link, + persistent: bool, + ): + self.link = link + self.persistent = persistent + self.origin: DirectUrl | None = None + origin_direct_url_path = Path(self.link.file_path).parent / ORIGIN_JSON_NAME + if origin_direct_url_path.exists(): + try: + self.origin = DirectUrl.from_json( + origin_direct_url_path.read_text(encoding="utf-8") + ) + except Exception as e: + logger.warning( + "Ignoring invalid cache entry origin file %s for %s (%s)", + origin_direct_url_path, + link.filename, + e, + ) + + +class WheelCache(Cache): + """Wraps EphemWheelCache and SimpleWheelCache into a single Cache + + This Cache allows for gracefully degradation, using the ephem wheel cache + when a certain link is not found in the simple wheel cache first. + """ + + def __init__(self, cache_dir: str) -> None: + super().__init__(cache_dir) + self._wheel_cache = SimpleWheelCache(cache_dir) + self._ephem_cache = EphemWheelCache() + + def get_path_for_link(self, link: Link) -> str: + return self._wheel_cache.get_path_for_link(link) + + def get_ephem_path_for_link(self, link: Link) -> str: + return self._ephem_cache.get_path_for_link(link) + + def get( + self, + link: Link, + package_name: str | None, + supported_tags: list[Tag], + ) -> Link: + cache_entry = self.get_cache_entry(link, package_name, supported_tags) + if cache_entry is None: + return link + return cache_entry.link + + def get_cache_entry( + self, + link: Link, + package_name: str | None, + supported_tags: list[Tag], + ) -> CacheEntry | None: + """Returns a CacheEntry with a link to a cached item if it exists or + None. The cache entry indicates if the item was found in the persistent + or ephemeral cache. + """ + retval = self._wheel_cache.get( + link=link, + package_name=package_name, + supported_tags=supported_tags, + ) + if retval is not link: + return CacheEntry(retval, persistent=True) + + retval = self._ephem_cache.get( + link=link, + package_name=package_name, + supported_tags=supported_tags, + ) + if retval is not link: + return CacheEntry(retval, persistent=False) + + return None + + @staticmethod + def record_download_origin(cache_dir: str, download_info: DirectUrl) -> None: + origin_path = Path(cache_dir) / ORIGIN_JSON_NAME + if origin_path.exists(): + try: + origin = DirectUrl.from_json(origin_path.read_text(encoding="utf-8")) + except Exception as e: + logger.warning( + "Could not read origin file %s in cache entry (%s). " + "Will attempt to overwrite it.", + origin_path, + e, + ) + else: + # TODO: use DirectUrl.equivalent when + # https://github.com/pypa/pip/pull/10564 is merged. + if origin.url != download_info.url: + logger.warning( + "Origin URL %s in cache entry %s does not match download URL " + "%s. This is likely a pip bug or a cache corruption issue. " + "Will overwrite it with the new value.", + origin.url, + cache_dir, + download_info.url, + ) + origin_path.write_text(download_info.to_json(), encoding="utf-8") diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/cli/__init__.py b/.venv/lib/python3.12/site-packages/pip/_internal/cli/__init__.py new file mode 100644 index 0000000..5fcddf5 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/cli/__init__.py @@ -0,0 +1,3 @@ +"""Subpackage containing all of pip's command line interface related code""" + +# This file intentionally does not import submodules diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/cli/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/cli/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..6cbd814 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/cli/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/cli/__pycache__/autocompletion.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/cli/__pycache__/autocompletion.cpython-312.pyc new file mode 100644 index 0000000..f22ec62 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/cli/__pycache__/autocompletion.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/cli/__pycache__/base_command.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/cli/__pycache__/base_command.cpython-312.pyc new file mode 100644 index 0000000..8862251 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/cli/__pycache__/base_command.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/cli/__pycache__/cmdoptions.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/cli/__pycache__/cmdoptions.cpython-312.pyc new file mode 100644 index 0000000..c010091 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/cli/__pycache__/cmdoptions.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/cli/__pycache__/command_context.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/cli/__pycache__/command_context.cpython-312.pyc new file mode 100644 index 0000000..eb2b34b Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/cli/__pycache__/command_context.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/cli/__pycache__/index_command.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/cli/__pycache__/index_command.cpython-312.pyc new file mode 100644 index 0000000..b00b7ef Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/cli/__pycache__/index_command.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/cli/__pycache__/main.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/cli/__pycache__/main.cpython-312.pyc new file mode 100644 index 0000000..bdd079f Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/cli/__pycache__/main.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/cli/__pycache__/main_parser.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/cli/__pycache__/main_parser.cpython-312.pyc new file mode 100644 index 0000000..bc94a08 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/cli/__pycache__/main_parser.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/cli/__pycache__/parser.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/cli/__pycache__/parser.cpython-312.pyc new file mode 100644 index 0000000..5ac6b9b Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/cli/__pycache__/parser.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/cli/__pycache__/progress_bars.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/cli/__pycache__/progress_bars.cpython-312.pyc new file mode 100644 index 0000000..0916bef Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/cli/__pycache__/progress_bars.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/cli/__pycache__/req_command.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/cli/__pycache__/req_command.cpython-312.pyc new file mode 100644 index 0000000..76d7dad Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/cli/__pycache__/req_command.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/cli/__pycache__/spinners.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/cli/__pycache__/spinners.cpython-312.pyc new file mode 100644 index 0000000..d524861 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/cli/__pycache__/spinners.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/cli/__pycache__/status_codes.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/cli/__pycache__/status_codes.cpython-312.pyc new file mode 100644 index 0000000..915bc40 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/cli/__pycache__/status_codes.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/cli/autocompletion.py b/.venv/lib/python3.12/site-packages/pip/_internal/cli/autocompletion.py new file mode 100644 index 0000000..f22cd11 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/cli/autocompletion.py @@ -0,0 +1,184 @@ +"""Logic that powers autocompletion installed by ``pip completion``.""" + +from __future__ import annotations + +import optparse +import os +import sys +from collections.abc import Iterable +from itertools import chain +from typing import Any + +from pip._internal.cli.main_parser import create_main_parser +from pip._internal.commands import commands_dict, create_command +from pip._internal.metadata import get_default_environment + + +def autocomplete() -> None: + """Entry Point for completion of main and subcommand options.""" + # Don't complete if user hasn't sourced bash_completion file. + if "PIP_AUTO_COMPLETE" not in os.environ: + return + # Don't complete if autocompletion environment variables + # are not present + if not os.environ.get("COMP_WORDS") or not os.environ.get("COMP_CWORD"): + return + cwords = os.environ["COMP_WORDS"].split()[1:] + cword = int(os.environ["COMP_CWORD"]) + try: + current = cwords[cword - 1] + except IndexError: + current = "" + + parser = create_main_parser() + subcommands = list(commands_dict) + options = [] + + # subcommand + subcommand_name: str | None = None + for word in cwords: + if word in subcommands: + subcommand_name = word + break + # subcommand options + if subcommand_name is not None: + # special case: 'help' subcommand has no options + if subcommand_name == "help": + sys.exit(1) + # special case: list locally installed dists for show and uninstall + should_list_installed = not current.startswith("-") and subcommand_name in [ + "show", + "uninstall", + ] + if should_list_installed: + env = get_default_environment() + lc = current.lower() + installed = [ + dist.canonical_name + for dist in env.iter_installed_distributions(local_only=True) + if dist.canonical_name.startswith(lc) + and dist.canonical_name not in cwords[1:] + ] + # if there are no dists installed, fall back to option completion + if installed: + for dist in installed: + print(dist) + sys.exit(1) + + should_list_installables = ( + not current.startswith("-") and subcommand_name == "install" + ) + if should_list_installables: + for path in auto_complete_paths(current, "path"): + print(path) + sys.exit(1) + + subcommand = create_command(subcommand_name) + + for opt in subcommand.parser.option_list_all: + if opt.help != optparse.SUPPRESS_HELP: + options += [ + (opt_str, opt.nargs) for opt_str in opt._long_opts + opt._short_opts + ] + + # filter out previously specified options from available options + prev_opts = [x.split("=")[0] for x in cwords[1 : cword - 1]] + options = [(x, v) for (x, v) in options if x not in prev_opts] + # filter options by current input + options = [(k, v) for k, v in options if k.startswith(current)] + # get completion type given cwords and available subcommand options + completion_type = get_path_completion_type( + cwords, + cword, + subcommand.parser.option_list_all, + ) + # get completion files and directories if ``completion_type`` is + # ````, ```` or ```` + if completion_type: + paths = auto_complete_paths(current, completion_type) + options = [(path, 0) for path in paths] + for option in options: + opt_label = option[0] + # append '=' to options which require args + if option[1] and option[0][:2] == "--": + opt_label += "=" + print(opt_label) + + # Complete sub-commands (unless one is already given). + if not any(name in cwords for name in subcommand.handler_map()): + for handler_name in subcommand.handler_map(): + if handler_name.startswith(current): + print(handler_name) + else: + # show main parser options only when necessary + + opts = [i.option_list for i in parser.option_groups] + opts.append(parser.option_list) + flattened_opts = chain.from_iterable(opts) + if current.startswith("-"): + for opt in flattened_opts: + if opt.help != optparse.SUPPRESS_HELP: + subcommands += opt._long_opts + opt._short_opts + else: + # get completion type given cwords and all available options + completion_type = get_path_completion_type(cwords, cword, flattened_opts) + if completion_type: + subcommands = list(auto_complete_paths(current, completion_type)) + + print(" ".join([x for x in subcommands if x.startswith(current)])) + sys.exit(1) + + +def get_path_completion_type( + cwords: list[str], cword: int, opts: Iterable[Any] +) -> str | None: + """Get the type of path completion (``file``, ``dir``, ``path`` or None) + + :param cwords: same as the environmental variable ``COMP_WORDS`` + :param cword: same as the environmental variable ``COMP_CWORD`` + :param opts: The available options to check + :return: path completion type (``file``, ``dir``, ``path`` or None) + """ + if cword < 2 or not cwords[cword - 2].startswith("-"): + return None + for opt in opts: + if opt.help == optparse.SUPPRESS_HELP: + continue + for o in str(opt).split("/"): + if cwords[cword - 2].split("=")[0] == o: + if not opt.metavar or any( + x in ("path", "file", "dir") for x in opt.metavar.split("/") + ): + return opt.metavar + return None + + +def auto_complete_paths(current: str, completion_type: str) -> Iterable[str]: + """If ``completion_type`` is ``file`` or ``path``, list all regular files + and directories starting with ``current``; otherwise only list directories + starting with ``current``. + + :param current: The word to be completed + :param completion_type: path completion type(``file``, ``path`` or ``dir``) + :return: A generator of regular files and/or directories + """ + directory, filename = os.path.split(current) + current_path = os.path.abspath(directory) + # Don't complete paths if they can't be accessed + if not os.access(current_path, os.R_OK): + return + filename = os.path.normcase(filename) + # list all files that start with ``filename`` + file_list = ( + x for x in os.listdir(current_path) if os.path.normcase(x).startswith(filename) + ) + for f in file_list: + opt = os.path.join(current_path, f) + comp_file = os.path.normcase(os.path.join(directory, f)) + # complete regular files when there is not ```` after option + # complete directories when there is ````, ```` or + # ````after option + if completion_type != "dir" and os.path.isfile(opt): + yield comp_file + elif os.path.isdir(opt): + yield os.path.join(comp_file, "") diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/cli/base_command.py b/.venv/lib/python3.12/site-packages/pip/_internal/cli/base_command.py new file mode 100644 index 0000000..7acc29c --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/cli/base_command.py @@ -0,0 +1,244 @@ +"""Base Command class, and related routines""" + +from __future__ import annotations + +import logging +import logging.config +import optparse +import os +import sys +import traceback +from optparse import Values +from typing import Callable + +from pip._vendor.rich import reconfigure +from pip._vendor.rich import traceback as rich_traceback + +from pip._internal.cli import cmdoptions +from pip._internal.cli.command_context import CommandContextMixIn +from pip._internal.cli.parser import ConfigOptionParser, UpdatingDefaultsHelpFormatter +from pip._internal.cli.status_codes import ( + ERROR, + PREVIOUS_BUILD_DIR_ERROR, + UNKNOWN_ERROR, + VIRTUALENV_NOT_FOUND, +) +from pip._internal.exceptions import ( + BadCommand, + CommandError, + DiagnosticPipError, + InstallationError, + NetworkConnectionError, + PreviousBuildDirError, +) +from pip._internal.utils.filesystem import check_path_owner +from pip._internal.utils.logging import BrokenStdoutLoggingError, setup_logging +from pip._internal.utils.misc import get_prog, normalize_path +from pip._internal.utils.temp_dir import TempDirectoryTypeRegistry as TempDirRegistry +from pip._internal.utils.temp_dir import global_tempdir_manager, tempdir_registry +from pip._internal.utils.virtualenv import running_under_virtualenv + +__all__ = ["Command"] + +logger = logging.getLogger(__name__) + + +class Command(CommandContextMixIn): + usage: str = "" + ignore_require_venv: bool = False + + def __init__(self, name: str, summary: str, isolated: bool = False) -> None: + super().__init__() + + self.name = name + self.summary = summary + self.parser = ConfigOptionParser( + usage=self.usage, + prog=f"{get_prog()} {name}", + formatter=UpdatingDefaultsHelpFormatter(), + add_help_option=False, + name=name, + description=self.__doc__, + isolated=isolated, + ) + + self.tempdir_registry: TempDirRegistry | None = None + + # Commands should add options to this option group + optgroup_name = f"{self.name.capitalize()} Options" + self.cmd_opts = optparse.OptionGroup(self.parser, optgroup_name) + + # Add the general options + gen_opts = cmdoptions.make_option_group( + cmdoptions.general_group, + self.parser, + ) + self.parser.add_option_group(gen_opts) + + self.add_options() + + def add_options(self) -> None: + pass + + def handle_pip_version_check(self, options: Values) -> None: + """ + This is a no-op so that commands by default do not do the pip version + check. + """ + # Make sure we do the pip version check if the index_group options + # are present. + assert not hasattr(options, "no_index") + + def run(self, options: Values, args: list[str]) -> int: + raise NotImplementedError + + def _run_wrapper(self, level_number: int, options: Values, args: list[str]) -> int: + def _inner_run() -> int: + try: + return self.run(options, args) + finally: + self.handle_pip_version_check(options) + + if options.debug_mode: + rich_traceback.install(show_locals=True) + return _inner_run() + + try: + status = _inner_run() + assert isinstance(status, int) + return status + except DiagnosticPipError as exc: + logger.error("%s", exc, extra={"rich": True}) + logger.debug("Exception information:", exc_info=True) + + return ERROR + except PreviousBuildDirError as exc: + logger.critical(str(exc)) + logger.debug("Exception information:", exc_info=True) + + return PREVIOUS_BUILD_DIR_ERROR + except ( + InstallationError, + BadCommand, + NetworkConnectionError, + ) as exc: + logger.critical(str(exc)) + logger.debug("Exception information:", exc_info=True) + + return ERROR + except CommandError as exc: + logger.critical("%s", exc) + logger.debug("Exception information:", exc_info=True) + + return ERROR + except BrokenStdoutLoggingError: + # Bypass our logger and write any remaining messages to + # stderr because stdout no longer works. + print("ERROR: Pipe to stdout was broken", file=sys.stderr) + if level_number <= logging.DEBUG: + traceback.print_exc(file=sys.stderr) + + return ERROR + except KeyboardInterrupt: + logger.critical("Operation cancelled by user") + logger.debug("Exception information:", exc_info=True) + + return ERROR + except BaseException: + logger.critical("Exception:", exc_info=True) + + return UNKNOWN_ERROR + + def parse_args(self, args: list[str]) -> tuple[Values, list[str]]: + # factored out for testability + return self.parser.parse_args(args) + + def main(self, args: list[str]) -> int: + try: + with self.main_context(): + return self._main(args) + finally: + logging.shutdown() + + def _main(self, args: list[str]) -> int: + # We must initialize this before the tempdir manager, otherwise the + # configuration would not be accessible by the time we clean up the + # tempdir manager. + self.tempdir_registry = self.enter_context(tempdir_registry()) + # Intentionally set as early as possible so globally-managed temporary + # directories are available to the rest of the code. + self.enter_context(global_tempdir_manager()) + + options, args = self.parse_args(args) + + # Set verbosity so that it can be used elsewhere. + self.verbosity = options.verbose - options.quiet + if options.debug_mode: + self.verbosity = 2 + + if hasattr(options, "progress_bar") and options.progress_bar == "auto": + options.progress_bar = "on" if self.verbosity >= 0 else "off" + + reconfigure(no_color=options.no_color) + level_number = setup_logging( + verbosity=self.verbosity, + no_color=options.no_color, + user_log_file=options.log, + ) + + always_enabled_features = set(options.features_enabled) & set( + cmdoptions.ALWAYS_ENABLED_FEATURES + ) + if always_enabled_features: + logger.warning( + "The following features are always enabled: %s. ", + ", ".join(sorted(always_enabled_features)), + ) + + # Make sure that the --python argument isn't specified after the + # subcommand. We can tell, because if --python was specified, + # we should only reach this point if we're running in the created + # subprocess, which has the _PIP_RUNNING_IN_SUBPROCESS environment + # variable set. + if options.python and "_PIP_RUNNING_IN_SUBPROCESS" not in os.environ: + logger.critical( + "The --python option must be placed before the pip subcommand name" + ) + sys.exit(ERROR) + + # TODO: Try to get these passing down from the command? + # without resorting to os.environ to hold these. + # This also affects isolated builds and it should. + + if options.no_input: + os.environ["PIP_NO_INPUT"] = "1" + + if options.exists_action: + os.environ["PIP_EXISTS_ACTION"] = " ".join(options.exists_action) + + if options.require_venv and not self.ignore_require_venv: + # If a venv is required check if it can really be found + if not running_under_virtualenv(): + logger.critical("Could not find an activated virtualenv (required).") + sys.exit(VIRTUALENV_NOT_FOUND) + + if options.cache_dir: + options.cache_dir = normalize_path(options.cache_dir) + if not check_path_owner(options.cache_dir): + logger.warning( + "The directory '%s' or its parent directory is not owned " + "or is not writable by the current user. The cache " + "has been disabled. Check the permissions and owner of " + "that directory. If executing pip with sudo, you should " + "use sudo's -H flag.", + options.cache_dir, + ) + options.cache_dir = None + + return self._run_wrapper(level_number, options, args) + + def handler_map(self) -> dict[str, Callable[[Values, list[str]], None]]: + """ + map of names to handler actions for commands with sub-actions + """ + return {} diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/cli/cmdoptions.py b/.venv/lib/python3.12/site-packages/pip/_internal/cli/cmdoptions.py new file mode 100644 index 0000000..3519dad --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/cli/cmdoptions.py @@ -0,0 +1,1138 @@ +""" +shared options and groups + +The principle here is to define options once, but *not* instantiate them +globally. One reason being that options with action='append' can carry state +between parses. pip parses general options twice internally, and shouldn't +pass on state. To be consistent, all options will follow this design. +""" + +# The following comment should be removed at some point in the future. +# mypy: strict-optional=False +from __future__ import annotations + +import importlib.util +import logging +import os +import pathlib +import textwrap +from functools import partial +from optparse import SUPPRESS_HELP, Option, OptionGroup, OptionParser, Values +from textwrap import dedent +from typing import Any, Callable + +from pip._vendor.packaging.utils import canonicalize_name + +from pip._internal.cli.parser import ConfigOptionParser +from pip._internal.exceptions import CommandError +from pip._internal.locations import USER_CACHE_DIR, get_src_prefix +from pip._internal.models.format_control import FormatControl +from pip._internal.models.index import PyPI +from pip._internal.models.target_python import TargetPython +from pip._internal.utils.hashes import STRONG_HASHES +from pip._internal.utils.misc import strtobool + +logger = logging.getLogger(__name__) + + +def raise_option_error(parser: OptionParser, option: Option, msg: str) -> None: + """ + Raise an option parsing error using parser.error(). + + Args: + parser: an OptionParser instance. + option: an Option instance. + msg: the error text. + """ + msg = f"{option} error: {msg}" + msg = textwrap.fill(" ".join(msg.split())) + parser.error(msg) + + +def make_option_group(group: dict[str, Any], parser: ConfigOptionParser) -> OptionGroup: + """ + Return an OptionGroup object + group -- assumed to be dict with 'name' and 'options' keys + parser -- an optparse Parser + """ + option_group = OptionGroup(parser, group["name"]) + for option in group["options"]: + option_group.add_option(option()) + return option_group + + +def check_dist_restriction(options: Values, check_target: bool = False) -> None: + """Function for determining if custom platform options are allowed. + + :param options: The OptionParser options. + :param check_target: Whether or not to check if --target is being used. + """ + dist_restriction_set = any( + [ + options.python_version, + options.platforms, + options.abis, + options.implementation, + ] + ) + + binary_only = FormatControl(set(), {":all:"}) + sdist_dependencies_allowed = ( + options.format_control != binary_only and not options.ignore_dependencies + ) + + # Installations or downloads using dist restrictions must not combine + # source distributions and dist-specific wheels, as they are not + # guaranteed to be locally compatible. + if dist_restriction_set and sdist_dependencies_allowed: + raise CommandError( + "When restricting platform and interpreter constraints using " + "--python-version, --platform, --abi, or --implementation, " + "either --no-deps must be set, or --only-binary=:all: must be " + "set and --no-binary must not be set (or must be set to " + ":none:)." + ) + + if check_target: + if not options.dry_run and dist_restriction_set and not options.target_dir: + raise CommandError( + "Can not use any platform or abi specific options unless " + "installing via '--target' or using '--dry-run'" + ) + + +def _path_option_check(option: Option, opt: str, value: str) -> str: + return os.path.expanduser(value) + + +def _package_name_option_check(option: Option, opt: str, value: str) -> str: + return canonicalize_name(value) + + +class PipOption(Option): + TYPES = Option.TYPES + ("path", "package_name") + TYPE_CHECKER = Option.TYPE_CHECKER.copy() + TYPE_CHECKER["package_name"] = _package_name_option_check + TYPE_CHECKER["path"] = _path_option_check + + +########### +# options # +########### + +help_: Callable[..., Option] = partial( + Option, + "-h", + "--help", + dest="help", + action="help", + help="Show help.", +) + +debug_mode: Callable[..., Option] = partial( + Option, + "--debug", + dest="debug_mode", + action="store_true", + default=False, + help=( + "Let unhandled exceptions propagate outside the main subroutine, " + "instead of logging them to stderr." + ), +) + +isolated_mode: Callable[..., Option] = partial( + Option, + "--isolated", + dest="isolated_mode", + action="store_true", + default=False, + help=( + "Run pip in an isolated mode, ignoring environment variables and user " + "configuration." + ), +) + +require_virtualenv: Callable[..., Option] = partial( + Option, + "--require-virtualenv", + "--require-venv", + dest="require_venv", + action="store_true", + default=False, + help=( + "Allow pip to only run in a virtual environment; " + "exit with an error otherwise." + ), +) + +override_externally_managed: Callable[..., Option] = partial( + Option, + "--break-system-packages", + dest="override_externally_managed", + action="store_true", + help="Allow pip to modify an EXTERNALLY-MANAGED Python installation", +) + +python: Callable[..., Option] = partial( + Option, + "--python", + dest="python", + help="Run pip with the specified Python interpreter.", +) + +verbose: Callable[..., Option] = partial( + Option, + "-v", + "--verbose", + dest="verbose", + action="count", + default=0, + help="Give more output. Option is additive, and can be used up to 3 times.", +) + +no_color: Callable[..., Option] = partial( + Option, + "--no-color", + dest="no_color", + action="store_true", + default=False, + help="Suppress colored output.", +) + +version: Callable[..., Option] = partial( + Option, + "-V", + "--version", + dest="version", + action="store_true", + help="Show version and exit.", +) + +quiet: Callable[..., Option] = partial( + Option, + "-q", + "--quiet", + dest="quiet", + action="count", + default=0, + help=( + "Give less output. Option is additive, and can be used up to 3" + " times (corresponding to WARNING, ERROR, and CRITICAL logging" + " levels)." + ), +) + +progress_bar: Callable[..., Option] = partial( + Option, + "--progress-bar", + dest="progress_bar", + type="choice", + choices=["auto", "on", "off", "raw"], + default="auto", + help=( + "Specify whether the progress bar should be used. In 'auto'" + " mode, --quiet will suppress all progress bars." + " [auto, on, off, raw] (default: auto)" + ), +) + +log: Callable[..., Option] = partial( + PipOption, + "--log", + "--log-file", + "--local-log", + dest="log", + metavar="path", + type="path", + help="Path to a verbose appending log.", +) + +no_input: Callable[..., Option] = partial( + Option, + # Don't ask for input + "--no-input", + dest="no_input", + action="store_true", + default=False, + help="Disable prompting for input.", +) + +keyring_provider: Callable[..., Option] = partial( + Option, + "--keyring-provider", + dest="keyring_provider", + choices=["auto", "disabled", "import", "subprocess"], + default="auto", + help=( + "Enable the credential lookup via the keyring library if user input is allowed." + " Specify which mechanism to use [auto, disabled, import, subprocess]." + " (default: %default)" + ), +) + +proxy: Callable[..., Option] = partial( + Option, + "--proxy", + dest="proxy", + type="str", + default="", + help="Specify a proxy in the form scheme://[user:passwd@]proxy.server:port.", +) + +retries: Callable[..., Option] = partial( + Option, + "--retries", + dest="retries", + type="int", + default=5, + help="Maximum attempts to establish a new HTTP connection. (default: %default)", +) + +resume_retries: Callable[..., Option] = partial( + Option, + "--resume-retries", + dest="resume_retries", + type="int", + default=5, + help="Maximum attempts to resume or restart an incomplete download. " + "(default: %default)", +) + +timeout: Callable[..., Option] = partial( + Option, + "--timeout", + "--default-timeout", + metavar="sec", + dest="timeout", + type="float", + default=15, + help="Set the socket timeout (default %default seconds).", +) + + +def exists_action() -> Option: + return Option( + # Option when path already exist + "--exists-action", + dest="exists_action", + type="choice", + choices=["s", "i", "w", "b", "a"], + default=[], + action="append", + metavar="action", + help="Default action when a path already exists: " + "(s)witch, (i)gnore, (w)ipe, (b)ackup, (a)bort.", + ) + + +cert: Callable[..., Option] = partial( + PipOption, + "--cert", + dest="cert", + type="path", + metavar="path", + help=( + "Path to PEM-encoded CA certificate bundle. " + "If provided, overrides the default. " + "See 'SSL Certificate Verification' in pip documentation " + "for more information." + ), +) + +client_cert: Callable[..., Option] = partial( + PipOption, + "--client-cert", + dest="client_cert", + type="path", + default=None, + metavar="path", + help="Path to SSL client certificate, a single file containing the " + "private key and the certificate in PEM format.", +) + +index_url: Callable[..., Option] = partial( + Option, + "-i", + "--index-url", + "--pypi-url", + dest="index_url", + metavar="URL", + default=PyPI.simple_url, + help="Base URL of the Python Package Index (default %default). " + "This should point to a repository compliant with PEP 503 " + "(the simple repository API) or a local directory laid out " + "in the same format.", +) + + +def extra_index_url() -> Option: + return Option( + "--extra-index-url", + dest="extra_index_urls", + metavar="URL", + action="append", + default=[], + help="Extra URLs of package indexes to use in addition to " + "--index-url. Should follow the same rules as " + "--index-url.", + ) + + +no_index: Callable[..., Option] = partial( + Option, + "--no-index", + dest="no_index", + action="store_true", + default=False, + help="Ignore package index (only looking at --find-links URLs instead).", +) + + +def find_links() -> Option: + return Option( + "-f", + "--find-links", + dest="find_links", + action="append", + default=[], + metavar="url", + help="If a URL or path to an html file, then parse for links to " + "archives such as sdist (.tar.gz) or wheel (.whl) files. " + "If a local path or file:// URL that's a directory, " + "then look for archives in the directory listing. " + "Links to VCS project URLs are not supported.", + ) + + +def trusted_host() -> Option: + return Option( + "--trusted-host", + dest="trusted_hosts", + action="append", + metavar="HOSTNAME", + default=[], + help="Mark this host or host:port pair as trusted, even though it " + "does not have valid or any HTTPS.", + ) + + +def constraints() -> Option: + return Option( + "-c", + "--constraint", + dest="constraints", + action="append", + default=[], + metavar="file", + help="Constrain versions using the given constraints file. " + "This option can be used multiple times.", + ) + + +def requirements() -> Option: + return Option( + "-r", + "--requirement", + dest="requirements", + action="append", + default=[], + metavar="file", + help="Install from the given requirements file. " + "This option can be used multiple times.", + ) + + +def editable() -> Option: + return Option( + "-e", + "--editable", + dest="editables", + action="append", + default=[], + metavar="path/url", + help=( + "Install a project in editable mode (i.e. setuptools " + '"develop mode") from a local project path or a VCS url.' + ), + ) + + +def _handle_src(option: Option, opt_str: str, value: str, parser: OptionParser) -> None: + value = os.path.abspath(value) + setattr(parser.values, option.dest, value) + + +src: Callable[..., Option] = partial( + PipOption, + "--src", + "--source", + "--source-dir", + "--source-directory", + dest="src_dir", + type="path", + metavar="dir", + default=get_src_prefix(), + action="callback", + callback=_handle_src, + help="Directory to check out editable projects into. " + 'The default in a virtualenv is "/src". ' + 'The default for global installs is "/src".', +) + + +def _get_format_control(values: Values, option: Option) -> Any: + """Get a format_control object.""" + return getattr(values, option.dest) + + +def _handle_no_binary( + option: Option, opt_str: str, value: str, parser: OptionParser +) -> None: + existing = _get_format_control(parser.values, option) + FormatControl.handle_mutual_excludes( + value, + existing.no_binary, + existing.only_binary, + ) + + +def _handle_only_binary( + option: Option, opt_str: str, value: str, parser: OptionParser +) -> None: + existing = _get_format_control(parser.values, option) + FormatControl.handle_mutual_excludes( + value, + existing.only_binary, + existing.no_binary, + ) + + +def no_binary() -> Option: + format_control = FormatControl(set(), set()) + return Option( + "--no-binary", + dest="format_control", + action="callback", + callback=_handle_no_binary, + type="str", + default=format_control, + help="Do not use binary packages. Can be supplied multiple times, and " + 'each time adds to the existing value. Accepts either ":all:" to ' + 'disable all binary packages, ":none:" to empty the set (notice ' + "the colons), or one or more package names with commas between " + "them (no colons). Note that some packages are tricky to compile " + "and may fail to install when this option is used on them.", + ) + + +def only_binary() -> Option: + format_control = FormatControl(set(), set()) + return Option( + "--only-binary", + dest="format_control", + action="callback", + callback=_handle_only_binary, + type="str", + default=format_control, + help="Do not use source packages. Can be supplied multiple times, and " + 'each time adds to the existing value. Accepts either ":all:" to ' + 'disable all source packages, ":none:" to empty the set, or one ' + "or more package names with commas between them. Packages " + "without binary distributions will fail to install when this " + "option is used on them.", + ) + + +platforms: Callable[..., Option] = partial( + Option, + "--platform", + dest="platforms", + metavar="platform", + action="append", + default=None, + help=( + "Only use wheels compatible with . Defaults to the " + "platform of the running system. Use this option multiple times to " + "specify multiple platforms supported by the target interpreter." + ), +) + + +# This was made a separate function for unit-testing purposes. +def _convert_python_version(value: str) -> tuple[tuple[int, ...], str | None]: + """ + Convert a version string like "3", "37", or "3.7.3" into a tuple of ints. + + :return: A 2-tuple (version_info, error_msg), where `error_msg` is + non-None if and only if there was a parsing error. + """ + if not value: + # The empty string is the same as not providing a value. + return (None, None) + + parts = value.split(".") + if len(parts) > 3: + return ((), "at most three version parts are allowed") + + if len(parts) == 1: + # Then we are in the case of "3" or "37". + value = parts[0] + if len(value) > 1: + parts = [value[0], value[1:]] + + try: + version_info = tuple(int(part) for part in parts) + except ValueError: + return ((), "each version part must be an integer") + + return (version_info, None) + + +def _handle_python_version( + option: Option, opt_str: str, value: str, parser: OptionParser +) -> None: + """ + Handle a provided --python-version value. + """ + version_info, error_msg = _convert_python_version(value) + if error_msg is not None: + msg = f"invalid --python-version value: {value!r}: {error_msg}" + raise_option_error(parser, option=option, msg=msg) + + parser.values.python_version = version_info + + +python_version: Callable[..., Option] = partial( + Option, + "--python-version", + dest="python_version", + metavar="python_version", + action="callback", + callback=_handle_python_version, + type="str", + default=None, + help=dedent( + """\ + The Python interpreter version to use for wheel and "Requires-Python" + compatibility checks. Defaults to a version derived from the running + interpreter. The version can be specified using up to three dot-separated + integers (e.g. "3" for 3.0.0, "3.7" for 3.7.0, or "3.7.3"). A major-minor + version can also be given as a string without dots (e.g. "37" for 3.7.0). + """ + ), +) + + +implementation: Callable[..., Option] = partial( + Option, + "--implementation", + dest="implementation", + metavar="implementation", + default=None, + help=( + "Only use wheels compatible with Python " + "implementation , e.g. 'pp', 'jy', 'cp', " + " or 'ip'. If not specified, then the current " + "interpreter implementation is used. Use 'py' to force " + "implementation-agnostic wheels." + ), +) + + +abis: Callable[..., Option] = partial( + Option, + "--abi", + dest="abis", + metavar="abi", + action="append", + default=None, + help=( + "Only use wheels compatible with Python abi , e.g. 'pypy_41'. " + "If not specified, then the current interpreter abi tag is used. " + "Use this option multiple times to specify multiple abis supported " + "by the target interpreter. Generally you will need to specify " + "--implementation, --platform, and --python-version when using this " + "option." + ), +) + + +def add_target_python_options(cmd_opts: OptionGroup) -> None: + cmd_opts.add_option(platforms()) + cmd_opts.add_option(python_version()) + cmd_opts.add_option(implementation()) + cmd_opts.add_option(abis()) + + +def make_target_python(options: Values) -> TargetPython: + target_python = TargetPython( + platforms=options.platforms, + py_version_info=options.python_version, + abis=options.abis, + implementation=options.implementation, + ) + + return target_python + + +def prefer_binary() -> Option: + return Option( + "--prefer-binary", + dest="prefer_binary", + action="store_true", + default=False, + help=( + "Prefer binary packages over source packages, even if the " + "source packages are newer." + ), + ) + + +cache_dir: Callable[..., Option] = partial( + PipOption, + "--cache-dir", + dest="cache_dir", + default=USER_CACHE_DIR, + metavar="dir", + type="path", + help="Store the cache data in .", +) + + +def _handle_no_cache_dir( + option: Option, opt: str, value: str, parser: OptionParser +) -> None: + """ + Process a value provided for the --no-cache-dir option. + + This is an optparse.Option callback for the --no-cache-dir option. + """ + # The value argument will be None if --no-cache-dir is passed via the + # command-line, since the option doesn't accept arguments. However, + # the value can be non-None if the option is triggered e.g. by an + # environment variable, like PIP_NO_CACHE_DIR=true. + if value is not None: + # Then parse the string value to get argument error-checking. + try: + strtobool(value) + except ValueError as exc: + raise_option_error(parser, option=option, msg=str(exc)) + + # Originally, setting PIP_NO_CACHE_DIR to a value that strtobool() + # converted to 0 (like "false" or "no") caused cache_dir to be disabled + # rather than enabled (logic would say the latter). Thus, we disable + # the cache directory not just on values that parse to True, but (for + # backwards compatibility reasons) also on values that parse to False. + # In other words, always set it to False if the option is provided in + # some (valid) form. + parser.values.cache_dir = False + + +no_cache: Callable[..., Option] = partial( + Option, + "--no-cache-dir", + dest="cache_dir", + action="callback", + callback=_handle_no_cache_dir, + help="Disable the cache.", +) + +no_deps: Callable[..., Option] = partial( + Option, + "--no-deps", + "--no-dependencies", + dest="ignore_dependencies", + action="store_true", + default=False, + help="Don't install package dependencies.", +) + + +def _handle_dependency_group( + option: Option, opt: str, value: str, parser: OptionParser +) -> None: + """ + Process a value provided for the --group option. + + Splits on the rightmost ":", and validates that the path (if present) ends + in `pyproject.toml`. Defaults the path to `pyproject.toml` when one is not given. + + `:` cannot appear in dependency group names, so this is a safe and simple parse. + + This is an optparse.Option callback for the dependency_groups option. + """ + path, sep, groupname = value.rpartition(":") + if not sep: + path = "pyproject.toml" + else: + # check for 'pyproject.toml' filenames using pathlib + if pathlib.PurePath(path).name != "pyproject.toml": + msg = "group paths use 'pyproject.toml' filenames" + raise_option_error(parser, option=option, msg=msg) + + parser.values.dependency_groups.append((path, groupname)) + + +dependency_groups: Callable[..., Option] = partial( + Option, + "--group", + dest="dependency_groups", + default=[], + type=str, + action="callback", + callback=_handle_dependency_group, + metavar="[path:]group", + help='Install a named dependency-group from a "pyproject.toml" file. ' + 'If a path is given, the name of the file must be "pyproject.toml". ' + 'Defaults to using "pyproject.toml" in the current directory.', +) + +ignore_requires_python: Callable[..., Option] = partial( + Option, + "--ignore-requires-python", + dest="ignore_requires_python", + action="store_true", + help="Ignore the Requires-Python information.", +) + +no_build_isolation: Callable[..., Option] = partial( + Option, + "--no-build-isolation", + dest="build_isolation", + action="store_false", + default=True, + help="Disable isolation when building a modern source distribution. " + "Build dependencies specified by PEP 518 must be already installed " + "if this option is used.", +) + +check_build_deps: Callable[..., Option] = partial( + Option, + "--check-build-dependencies", + dest="check_build_deps", + action="store_true", + default=False, + help="Check the build dependencies when PEP517 is used.", +) + + +def _handle_no_use_pep517( + option: Option, opt: str, value: str, parser: OptionParser +) -> None: + """ + Process a value provided for the --no-use-pep517 option. + + This is an optparse.Option callback for the no_use_pep517 option. + """ + # Since --no-use-pep517 doesn't accept arguments, the value argument + # will be None if --no-use-pep517 is passed via the command-line. + # However, the value can be non-None if the option is triggered e.g. + # by an environment variable, for example "PIP_NO_USE_PEP517=true". + if value is not None: + msg = """A value was passed for --no-use-pep517, + probably using either the PIP_NO_USE_PEP517 environment variable + or the "no-use-pep517" config file option. Use an appropriate value + of the PIP_USE_PEP517 environment variable or the "use-pep517" + config file option instead. + """ + raise_option_error(parser, option=option, msg=msg) + + # If user doesn't wish to use pep517, we check if setuptools is installed + # and raise error if it is not. + packages = ("setuptools",) + if not all(importlib.util.find_spec(package) for package in packages): + msg = ( + f"It is not possible to use --no-use-pep517 " + f"without {' and '.join(packages)} installed." + ) + raise_option_error(parser, option=option, msg=msg) + + # Otherwise, --no-use-pep517 was passed via the command-line. + parser.values.use_pep517 = False + + +use_pep517: Any = partial( + Option, + "--use-pep517", + dest="use_pep517", + action="store_true", + default=None, + help="Use PEP 517 for building source distributions " + "(use --no-use-pep517 to force legacy behaviour).", +) + +no_use_pep517: Any = partial( + Option, + "--no-use-pep517", + dest="use_pep517", + action="callback", + callback=_handle_no_use_pep517, + default=None, + help=SUPPRESS_HELP, +) + + +def _handle_config_settings( + option: Option, opt_str: str, value: str, parser: OptionParser +) -> None: + key, sep, val = value.partition("=") + if sep != "=": + parser.error(f"Arguments to {opt_str} must be of the form KEY=VAL") + dest = getattr(parser.values, option.dest) + if dest is None: + dest = {} + setattr(parser.values, option.dest, dest) + if key in dest: + if isinstance(dest[key], list): + dest[key].append(val) + else: + dest[key] = [dest[key], val] + else: + dest[key] = val + + +config_settings: Callable[..., Option] = partial( + Option, + "-C", + "--config-settings", + dest="config_settings", + type=str, + action="callback", + callback=_handle_config_settings, + metavar="settings", + help="Configuration settings to be passed to the PEP 517 build backend. " + "Settings take the form KEY=VALUE. Use multiple --config-settings options " + "to pass multiple keys to the backend.", +) + +build_options: Callable[..., Option] = partial( + Option, + "--build-option", + dest="build_options", + metavar="options", + action="append", + help="Extra arguments to be supplied to 'setup.py bdist_wheel'.", +) + +global_options: Callable[..., Option] = partial( + Option, + "--global-option", + dest="global_options", + action="append", + metavar="options", + help="Extra global options to be supplied to the setup.py " + "call before the install or bdist_wheel command.", +) + +no_clean: Callable[..., Option] = partial( + Option, + "--no-clean", + action="store_true", + default=False, + help="Don't clean up build directories.", +) + +pre: Callable[..., Option] = partial( + Option, + "--pre", + action="store_true", + default=False, + help="Include pre-release and development versions. By default, " + "pip only finds stable versions.", +) + +json: Callable[..., Option] = partial( + Option, + "--json", + action="store_true", + default=False, + help="Output data in a machine-readable JSON format.", +) + +disable_pip_version_check: Callable[..., Option] = partial( + Option, + "--disable-pip-version-check", + dest="disable_pip_version_check", + action="store_true", + default=False, + help="Don't periodically check PyPI to determine whether a new version " + "of pip is available for download. Implied with --no-index.", +) + +root_user_action: Callable[..., Option] = partial( + Option, + "--root-user-action", + dest="root_user_action", + default="warn", + choices=["warn", "ignore"], + help="Action if pip is run as a root user [warn, ignore] (default: warn)", +) + + +def _handle_merge_hash( + option: Option, opt_str: str, value: str, parser: OptionParser +) -> None: + """Given a value spelled "algo:digest", append the digest to a list + pointed to in a dict by the algo name.""" + if not parser.values.hashes: + parser.values.hashes = {} + try: + algo, digest = value.split(":", 1) + except ValueError: + parser.error( + f"Arguments to {opt_str} must be a hash name " + "followed by a value, like --hash=sha256:" + "abcde..." + ) + if algo not in STRONG_HASHES: + parser.error( + "Allowed hash algorithms for {} are {}.".format( + opt_str, ", ".join(STRONG_HASHES) + ) + ) + parser.values.hashes.setdefault(algo, []).append(digest) + + +hash: Callable[..., Option] = partial( + Option, + "--hash", + # Hash values eventually end up in InstallRequirement.hashes due to + # __dict__ copying in process_line(). + dest="hashes", + action="callback", + callback=_handle_merge_hash, + type="string", + help="Verify that the package's archive matches this " + "hash before installing. Example: --hash=sha256:abcdef...", +) + + +require_hashes: Callable[..., Option] = partial( + Option, + "--require-hashes", + dest="require_hashes", + action="store_true", + default=False, + help="Require a hash to check each requirement against, for " + "repeatable installs. This option is implied when any package in a " + "requirements file has a --hash option.", +) + + +list_path: Callable[..., Option] = partial( + PipOption, + "--path", + dest="path", + type="path", + action="append", + help="Restrict to the specified installation path for listing " + "packages (can be used multiple times).", +) + + +def check_list_path_option(options: Values) -> None: + if options.path and (options.user or options.local): + raise CommandError("Cannot combine '--path' with '--user' or '--local'") + + +list_exclude: Callable[..., Option] = partial( + PipOption, + "--exclude", + dest="excludes", + action="append", + metavar="package", + type="package_name", + help="Exclude specified package from the output", +) + + +no_python_version_warning: Callable[..., Option] = partial( + Option, + "--no-python-version-warning", + dest="no_python_version_warning", + action="store_true", + default=False, + help=SUPPRESS_HELP, # No-op, a hold-over from the Python 2->3 transition. +) + + +# Features that are now always on. A warning is printed if they are used. +ALWAYS_ENABLED_FEATURES = [ + "truststore", # always on since 24.2 + "no-binary-enable-wheel-cache", # always on since 23.1 +] + +use_new_feature: Callable[..., Option] = partial( + Option, + "--use-feature", + dest="features_enabled", + metavar="feature", + action="append", + default=[], + choices=[ + "fast-deps", + ] + + ALWAYS_ENABLED_FEATURES, + help="Enable new functionality, that may be backward incompatible.", +) + +use_deprecated_feature: Callable[..., Option] = partial( + Option, + "--use-deprecated", + dest="deprecated_features_enabled", + metavar="feature", + action="append", + default=[], + choices=[ + "legacy-resolver", + "legacy-certs", + ], + help=("Enable deprecated functionality, that will be removed in the future."), +) + +########## +# groups # +########## + +general_group: dict[str, Any] = { + "name": "General Options", + "options": [ + help_, + debug_mode, + isolated_mode, + require_virtualenv, + python, + verbose, + version, + quiet, + log, + no_input, + keyring_provider, + proxy, + retries, + timeout, + exists_action, + trusted_host, + cert, + client_cert, + cache_dir, + no_cache, + disable_pip_version_check, + no_color, + no_python_version_warning, + use_new_feature, + use_deprecated_feature, + resume_retries, + ], +} + +index_group: dict[str, Any] = { + "name": "Package Index Options", + "options": [ + index_url, + extra_index_url, + no_index, + find_links, + ], +} diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/cli/command_context.py b/.venv/lib/python3.12/site-packages/pip/_internal/cli/command_context.py new file mode 100644 index 0000000..9c167bd --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/cli/command_context.py @@ -0,0 +1,28 @@ +from collections.abc import Generator +from contextlib import AbstractContextManager, ExitStack, contextmanager +from typing import TypeVar + +_T = TypeVar("_T", covariant=True) + + +class CommandContextMixIn: + def __init__(self) -> None: + super().__init__() + self._in_main_context = False + self._main_context = ExitStack() + + @contextmanager + def main_context(self) -> Generator[None, None, None]: + assert not self._in_main_context + + self._in_main_context = True + try: + with self._main_context: + yield + finally: + self._in_main_context = False + + def enter_context(self, context_provider: AbstractContextManager[_T]) -> _T: + assert self._in_main_context + + return self._main_context.enter_context(context_provider) diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/cli/index_command.py b/.venv/lib/python3.12/site-packages/pip/_internal/cli/index_command.py new file mode 100644 index 0000000..f6a82c8 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/cli/index_command.py @@ -0,0 +1,175 @@ +""" +Contains command classes which may interact with an index / the network. + +Unlike its sister module, req_command, this module still uses lazy imports +so commands which don't always hit the network (e.g. list w/o --outdated or +--uptodate) don't need waste time importing PipSession and friends. +""" + +from __future__ import annotations + +import logging +import os +import sys +from functools import lru_cache +from optparse import Values +from typing import TYPE_CHECKING + +from pip._vendor import certifi + +from pip._internal.cli.base_command import Command +from pip._internal.cli.command_context import CommandContextMixIn + +if TYPE_CHECKING: + from ssl import SSLContext + + from pip._internal.network.session import PipSession + +logger = logging.getLogger(__name__) + + +@lru_cache +def _create_truststore_ssl_context() -> SSLContext | None: + if sys.version_info < (3, 10): + logger.debug("Disabling truststore because Python version isn't 3.10+") + return None + + try: + import ssl + except ImportError: + logger.warning("Disabling truststore since ssl support is missing") + return None + + try: + from pip._vendor import truststore + except ImportError: + logger.warning("Disabling truststore because platform isn't supported") + return None + + ctx = truststore.SSLContext(ssl.PROTOCOL_TLS_CLIENT) + ctx.load_verify_locations(certifi.where()) + return ctx + + +class SessionCommandMixin(CommandContextMixIn): + """ + A class mixin for command classes needing _build_session(). + """ + + def __init__(self) -> None: + super().__init__() + self._session: PipSession | None = None + + @classmethod + def _get_index_urls(cls, options: Values) -> list[str] | None: + """Return a list of index urls from user-provided options.""" + index_urls = [] + if not getattr(options, "no_index", False): + url = getattr(options, "index_url", None) + if url: + index_urls.append(url) + urls = getattr(options, "extra_index_urls", None) + if urls: + index_urls.extend(urls) + # Return None rather than an empty list + return index_urls or None + + def get_default_session(self, options: Values) -> PipSession: + """Get a default-managed session.""" + if self._session is None: + self._session = self.enter_context(self._build_session(options)) + # there's no type annotation on requests.Session, so it's + # automatically ContextManager[Any] and self._session becomes Any, + # then https://github.com/python/mypy/issues/7696 kicks in + assert self._session is not None + return self._session + + def _build_session( + self, + options: Values, + retries: int | None = None, + timeout: int | None = None, + ) -> PipSession: + from pip._internal.network.session import PipSession + + cache_dir = options.cache_dir + assert not cache_dir or os.path.isabs(cache_dir) + + if "legacy-certs" not in options.deprecated_features_enabled: + ssl_context = _create_truststore_ssl_context() + else: + ssl_context = None + + session = PipSession( + cache=os.path.join(cache_dir, "http-v2") if cache_dir else None, + retries=retries if retries is not None else options.retries, + trusted_hosts=options.trusted_hosts, + index_urls=self._get_index_urls(options), + ssl_context=ssl_context, + ) + + # Handle custom ca-bundles from the user + if options.cert: + session.verify = options.cert + + # Handle SSL client certificate + if options.client_cert: + session.cert = options.client_cert + + # Handle timeouts + if options.timeout or timeout: + session.timeout = timeout if timeout is not None else options.timeout + + # Handle configured proxies + if options.proxy: + session.proxies = { + "http": options.proxy, + "https": options.proxy, + } + session.trust_env = False + session.pip_proxy = options.proxy + + # Determine if we can prompt the user for authentication or not + session.auth.prompting = not options.no_input + session.auth.keyring_provider = options.keyring_provider + + return session + + +def _pip_self_version_check(session: PipSession, options: Values) -> None: + from pip._internal.self_outdated_check import pip_self_version_check as check + + check(session, options) + + +class IndexGroupCommand(Command, SessionCommandMixin): + """ + Abstract base class for commands with the index_group options. + + This also corresponds to the commands that permit the pip version check. + """ + + def handle_pip_version_check(self, options: Values) -> None: + """ + Do the pip version check if not disabled. + + This overrides the default behavior of not doing the check. + """ + # Make sure the index_group options are present. + assert hasattr(options, "no_index") + + if options.disable_pip_version_check or options.no_index: + return + + try: + # Otherwise, check if we're using the latest version of pip available. + session = self._build_session( + options, + retries=0, + timeout=min(5, options.timeout), + ) + with session: + _pip_self_version_check(session, options) + except Exception: + logger.warning("There was an error checking the latest version of pip.") + logger.debug("See below for error", exc_info=True) diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/cli/main.py b/.venv/lib/python3.12/site-packages/pip/_internal/cli/main.py new file mode 100644 index 0000000..9a161fd --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/cli/main.py @@ -0,0 +1,80 @@ +"""Primary application entrypoint.""" + +from __future__ import annotations + +import locale +import logging +import os +import sys +import warnings + +from pip._internal.cli.autocompletion import autocomplete +from pip._internal.cli.main_parser import parse_command +from pip._internal.commands import create_command +from pip._internal.exceptions import PipError +from pip._internal.utils import deprecation + +logger = logging.getLogger(__name__) + + +# Do not import and use main() directly! Using it directly is actively +# discouraged by pip's maintainers. The name, location and behavior of +# this function is subject to change, so calling it directly is not +# portable across different pip versions. + +# In addition, running pip in-process is unsupported and unsafe. This is +# elaborated in detail at +# https://pip.pypa.io/en/stable/user_guide/#using-pip-from-your-program. +# That document also provides suggestions that should work for nearly +# all users that are considering importing and using main() directly. + +# However, we know that certain users will still want to invoke pip +# in-process. If you understand and accept the implications of using pip +# in an unsupported manner, the best approach is to use runpy to avoid +# depending on the exact location of this entry point. + +# The following example shows how to use runpy to invoke pip in that +# case: +# +# sys.argv = ["pip", your, args, here] +# runpy.run_module("pip", run_name="__main__") +# +# Note that this will exit the process after running, unlike a direct +# call to main. As it is not safe to do any processing after calling +# main, this should not be an issue in practice. + + +def main(args: list[str] | None = None) -> int: + if args is None: + args = sys.argv[1:] + + # Suppress the pkg_resources deprecation warning + # Note - we use a module of .*pkg_resources to cover + # the normal case (pip._vendor.pkg_resources) and the + # devendored case (a bare pkg_resources) + warnings.filterwarnings( + action="ignore", category=DeprecationWarning, module=".*pkg_resources" + ) + + # Configure our deprecation warnings to be sent through loggers + deprecation.install_warning_logger() + + autocomplete() + + try: + cmd_name, cmd_args = parse_command(args) + except PipError as exc: + sys.stderr.write(f"ERROR: {exc}") + sys.stderr.write(os.linesep) + sys.exit(1) + + # Needed for locale.getpreferredencoding(False) to work + # in pip._internal.utils.encoding.auto_decode + try: + locale.setlocale(locale.LC_ALL, "") + except locale.Error as e: + # setlocale can apparently crash if locale are uninitialized + logger.debug("Ignoring error %s when setting locale", e) + command = create_command(cmd_name, isolated=("--isolated" in cmd_args)) + + return command.main(cmd_args) diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/cli/main_parser.py b/.venv/lib/python3.12/site-packages/pip/_internal/cli/main_parser.py new file mode 100644 index 0000000..5ce9f5a --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/cli/main_parser.py @@ -0,0 +1,134 @@ +"""A single place for constructing and exposing the main parser""" + +from __future__ import annotations + +import os +import subprocess +import sys + +from pip._internal.build_env import get_runnable_pip +from pip._internal.cli import cmdoptions +from pip._internal.cli.parser import ConfigOptionParser, UpdatingDefaultsHelpFormatter +from pip._internal.commands import commands_dict, get_similar_commands +from pip._internal.exceptions import CommandError +from pip._internal.utils.misc import get_pip_version, get_prog + +__all__ = ["create_main_parser", "parse_command"] + + +def create_main_parser() -> ConfigOptionParser: + """Creates and returns the main parser for pip's CLI""" + + parser = ConfigOptionParser( + usage="\n%prog [options]", + add_help_option=False, + formatter=UpdatingDefaultsHelpFormatter(), + name="global", + prog=get_prog(), + ) + parser.disable_interspersed_args() + + parser.version = get_pip_version() + + # add the general options + gen_opts = cmdoptions.make_option_group(cmdoptions.general_group, parser) + parser.add_option_group(gen_opts) + + # so the help formatter knows + parser.main = True # type: ignore + + # create command listing for description + description = [""] + [ + f"{name:27} {command_info.summary}" + for name, command_info in commands_dict.items() + ] + parser.description = "\n".join(description) + + return parser + + +def identify_python_interpreter(python: str) -> str | None: + # If the named file exists, use it. + # If it's a directory, assume it's a virtual environment and + # look for the environment's Python executable. + if os.path.exists(python): + if os.path.isdir(python): + # bin/python for Unix, Scripts/python.exe for Windows + # Try both in case of odd cases like cygwin. + for exe in ("bin/python", "Scripts/python.exe"): + py = os.path.join(python, exe) + if os.path.exists(py): + return py + else: + return python + + # Could not find the interpreter specified + return None + + +def parse_command(args: list[str]) -> tuple[str, list[str]]: + parser = create_main_parser() + + # Note: parser calls disable_interspersed_args(), so the result of this + # call is to split the initial args into the general options before the + # subcommand and everything else. + # For example: + # args: ['--timeout=5', 'install', '--user', 'INITools'] + # general_options: ['--timeout==5'] + # args_else: ['install', '--user', 'INITools'] + general_options, args_else = parser.parse_args(args) + + # --python + if general_options.python and "_PIP_RUNNING_IN_SUBPROCESS" not in os.environ: + # Re-invoke pip using the specified Python interpreter + interpreter = identify_python_interpreter(general_options.python) + if interpreter is None: + raise CommandError( + f"Could not locate Python interpreter {general_options.python}" + ) + + pip_cmd = [ + interpreter, + get_runnable_pip(), + ] + pip_cmd.extend(args) + + # Set a flag so the child doesn't re-invoke itself, causing + # an infinite loop. + os.environ["_PIP_RUNNING_IN_SUBPROCESS"] = "1" + returncode = 0 + try: + proc = subprocess.run(pip_cmd) + returncode = proc.returncode + except (subprocess.SubprocessError, OSError) as exc: + raise CommandError(f"Failed to run pip under {interpreter}: {exc}") + sys.exit(returncode) + + # --version + if general_options.version: + sys.stdout.write(parser.version) + sys.stdout.write(os.linesep) + sys.exit() + + # pip || pip help -> print_help() + if not args_else or (args_else[0] == "help" and len(args_else) == 1): + parser.print_help() + sys.exit() + + # the subcommand name + cmd_name = args_else[0] + + if cmd_name not in commands_dict: + guess = get_similar_commands(cmd_name) + + msg = [f'unknown command "{cmd_name}"'] + if guess: + msg.append(f'maybe you meant "{guess}"') + + raise CommandError(" - ".join(msg)) + + # all the args without the subcommand + cmd_args = args[:] + cmd_args.remove(cmd_name) + + return cmd_name, cmd_args diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/cli/parser.py b/.venv/lib/python3.12/site-packages/pip/_internal/cli/parser.py new file mode 100644 index 0000000..f8b8ac4 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/cli/parser.py @@ -0,0 +1,298 @@ +"""Base option parser setup""" + +from __future__ import annotations + +import logging +import optparse +import shutil +import sys +import textwrap +from collections.abc import Generator +from contextlib import suppress +from typing import Any, NoReturn + +from pip._internal.cli.status_codes import UNKNOWN_ERROR +from pip._internal.configuration import Configuration, ConfigurationError +from pip._internal.utils.misc import redact_auth_from_url, strtobool + +logger = logging.getLogger(__name__) + + +class PrettyHelpFormatter(optparse.IndentedHelpFormatter): + """A prettier/less verbose help formatter for optparse.""" + + def __init__(self, *args: Any, **kwargs: Any) -> None: + # help position must be aligned with __init__.parseopts.description + kwargs["max_help_position"] = 30 + kwargs["indent_increment"] = 1 + kwargs["width"] = shutil.get_terminal_size()[0] - 2 + super().__init__(*args, **kwargs) + + def format_option_strings(self, option: optparse.Option) -> str: + return self._format_option_strings(option) + + def _format_option_strings( + self, option: optparse.Option, mvarfmt: str = " <{}>", optsep: str = ", " + ) -> str: + """ + Return a comma-separated list of option strings and metavars. + + :param option: tuple of (short opt, long opt), e.g: ('-f', '--format') + :param mvarfmt: metavar format string + :param optsep: separator + """ + opts = [] + + if option._short_opts: + opts.append(option._short_opts[0]) + if option._long_opts: + opts.append(option._long_opts[0]) + if len(opts) > 1: + opts.insert(1, optsep) + + if option.takes_value(): + assert option.dest is not None + metavar = option.metavar or option.dest.lower() + opts.append(mvarfmt.format(metavar.lower())) + + return "".join(opts) + + def format_heading(self, heading: str) -> str: + if heading == "Options": + return "" + return heading + ":\n" + + def format_usage(self, usage: str) -> str: + """ + Ensure there is only one newline between usage and the first heading + if there is no description. + """ + msg = "\nUsage: {}\n".format(self.indent_lines(textwrap.dedent(usage), " ")) + return msg + + def format_description(self, description: str | None) -> str: + # leave full control over description to us + if description: + if hasattr(self.parser, "main"): + label = "Commands" + else: + label = "Description" + # some doc strings have initial newlines, some don't + description = description.lstrip("\n") + # some doc strings have final newlines and spaces, some don't + description = description.rstrip() + # dedent, then reindent + description = self.indent_lines(textwrap.dedent(description), " ") + description = f"{label}:\n{description}\n" + return description + else: + return "" + + def format_epilog(self, epilog: str | None) -> str: + # leave full control over epilog to us + if epilog: + return epilog + else: + return "" + + def indent_lines(self, text: str, indent: str) -> str: + new_lines = [indent + line for line in text.split("\n")] + return "\n".join(new_lines) + + +class UpdatingDefaultsHelpFormatter(PrettyHelpFormatter): + """Custom help formatter for use in ConfigOptionParser. + + This is updates the defaults before expanding them, allowing + them to show up correctly in the help listing. + + Also redact auth from url type options + """ + + def expand_default(self, option: optparse.Option) -> str: + default_values = None + if self.parser is not None: + assert isinstance(self.parser, ConfigOptionParser) + self.parser._update_defaults(self.parser.defaults) + assert option.dest is not None + default_values = self.parser.defaults.get(option.dest) + help_text = super().expand_default(option) + + if default_values and option.metavar == "URL": + if isinstance(default_values, str): + default_values = [default_values] + + # If its not a list, we should abort and just return the help text + if not isinstance(default_values, list): + default_values = [] + + for val in default_values: + help_text = help_text.replace(val, redact_auth_from_url(val)) + + return help_text + + +class CustomOptionParser(optparse.OptionParser): + def insert_option_group( + self, idx: int, *args: Any, **kwargs: Any + ) -> optparse.OptionGroup: + """Insert an OptionGroup at a given position.""" + group = self.add_option_group(*args, **kwargs) + + self.option_groups.pop() + self.option_groups.insert(idx, group) + + return group + + @property + def option_list_all(self) -> list[optparse.Option]: + """Get a list of all options, including those in option groups.""" + res = self.option_list[:] + for i in self.option_groups: + res.extend(i.option_list) + + return res + + +class ConfigOptionParser(CustomOptionParser): + """Custom option parser which updates its defaults by checking the + configuration files and environmental variables""" + + def __init__( + self, + *args: Any, + name: str, + isolated: bool = False, + **kwargs: Any, + ) -> None: + self.name = name + self.config = Configuration(isolated) + + assert self.name + super().__init__(*args, **kwargs) + + def check_default(self, option: optparse.Option, key: str, val: Any) -> Any: + try: + return option.check_value(key, val) + except optparse.OptionValueError as exc: + print(f"An error occurred during configuration: {exc}") + sys.exit(3) + + def _get_ordered_configuration_items( + self, + ) -> Generator[tuple[str, Any], None, None]: + # Configuration gives keys in an unordered manner. Order them. + override_order = ["global", self.name, ":env:"] + + # Pool the options into different groups + section_items: dict[str, list[tuple[str, Any]]] = { + name: [] for name in override_order + } + + for _, value in self.config.items(): # noqa: PERF102 + for section_key, val in value.items(): + # ignore empty values + if not val: + logger.debug( + "Ignoring configuration key '%s' as its value is empty.", + section_key, + ) + continue + + section, key = section_key.split(".", 1) + if section in override_order: + section_items[section].append((key, val)) + + # Yield each group in their override order + for section in override_order: + yield from section_items[section] + + def _update_defaults(self, defaults: dict[str, Any]) -> dict[str, Any]: + """Updates the given defaults with values from the config files and + the environ. Does a little special handling for certain types of + options (lists).""" + + # Accumulate complex default state. + self.values = optparse.Values(self.defaults) + late_eval = set() + # Then set the options with those values + for key, val in self._get_ordered_configuration_items(): + # '--' because configuration supports only long names + option = self.get_option("--" + key) + + # Ignore options not present in this parser. E.g. non-globals put + # in [global] by users that want them to apply to all applicable + # commands. + if option is None: + continue + + assert option.dest is not None + + if option.action in ("store_true", "store_false"): + try: + val = strtobool(val) + except ValueError: + self.error( + f"{val} is not a valid value for {key} option, " + "please specify a boolean value like yes/no, " + "true/false or 1/0 instead." + ) + elif option.action == "count": + with suppress(ValueError): + val = strtobool(val) + with suppress(ValueError): + val = int(val) + if not isinstance(val, int) or val < 0: + self.error( + f"{val} is not a valid value for {key} option, " + "please instead specify either a non-negative integer " + "or a boolean value like yes/no or false/true " + "which is equivalent to 1/0." + ) + elif option.action == "append": + val = val.split() + val = [self.check_default(option, key, v) for v in val] + elif option.action == "callback": + assert option.callback is not None + late_eval.add(option.dest) + opt_str = option.get_opt_string() + val = option.convert_value(opt_str, val) + # From take_action + args = option.callback_args or () + kwargs = option.callback_kwargs or {} + option.callback(option, opt_str, val, self, *args, **kwargs) + else: + val = self.check_default(option, key, val) + + defaults[option.dest] = val + + for key in late_eval: + defaults[key] = getattr(self.values, key) + self.values = None + return defaults + + def get_default_values(self) -> optparse.Values: + """Overriding to make updating the defaults after instantiation of + the option parser possible, _update_defaults() does the dirty work.""" + if not self.process_default_values: + # Old, pre-Optik 1.5 behaviour. + return optparse.Values(self.defaults) + + # Load the configuration, or error out in case of an error + try: + self.config.load() + except ConfigurationError as err: + self.exit(UNKNOWN_ERROR, str(err)) + + defaults = self._update_defaults(self.defaults.copy()) # ours + for option in self._get_all_options(): + assert option.dest is not None + default = defaults.get(option.dest) + if isinstance(default, str): + opt_str = option.get_opt_string() + defaults[option.dest] = option.check_value(opt_str, default) + return optparse.Values(defaults) + + def error(self, msg: str) -> NoReturn: + self.print_usage(sys.stderr) + self.exit(UNKNOWN_ERROR, f"{msg}\n") diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/cli/progress_bars.py b/.venv/lib/python3.12/site-packages/pip/_internal/cli/progress_bars.py new file mode 100644 index 0000000..af1bb6a --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/cli/progress_bars.py @@ -0,0 +1,151 @@ +from __future__ import annotations + +import functools +import sys +from collections.abc import Generator, Iterable, Iterator +from typing import Callable, Literal, TypeVar + +from pip._vendor.rich.progress import ( + BarColumn, + DownloadColumn, + FileSizeColumn, + MofNCompleteColumn, + Progress, + ProgressColumn, + SpinnerColumn, + TextColumn, + TimeElapsedColumn, + TimeRemainingColumn, + TransferSpeedColumn, +) + +from pip._internal.cli.spinners import RateLimiter +from pip._internal.req.req_install import InstallRequirement +from pip._internal.utils.logging import get_console, get_indentation + +T = TypeVar("T") +ProgressRenderer = Callable[[Iterable[T]], Iterator[T]] +BarType = Literal["on", "off", "raw"] + + +def _rich_download_progress_bar( + iterable: Iterable[bytes], + *, + bar_type: BarType, + size: int | None, + initial_progress: int | None = None, +) -> Generator[bytes, None, None]: + assert bar_type == "on", "This should only be used in the default mode." + + if not size: + total = float("inf") + columns: tuple[ProgressColumn, ...] = ( + TextColumn("[progress.description]{task.description}"), + SpinnerColumn("line", speed=1.5), + FileSizeColumn(), + TransferSpeedColumn(), + TimeElapsedColumn(), + ) + else: + total = size + columns = ( + TextColumn("[progress.description]{task.description}"), + BarColumn(), + DownloadColumn(), + TransferSpeedColumn(), + TextColumn("{task.fields[time_description]}"), + TimeRemainingColumn(elapsed_when_finished=True), + ) + + progress = Progress(*columns, refresh_per_second=5) + task_id = progress.add_task( + " " * (get_indentation() + 2), total=total, time_description="eta" + ) + if initial_progress is not None: + progress.update(task_id, advance=initial_progress) + with progress: + for chunk in iterable: + yield chunk + progress.update(task_id, advance=len(chunk)) + progress.update(task_id, time_description="") + + +def _rich_install_progress_bar( + iterable: Iterable[InstallRequirement], *, total: int +) -> Iterator[InstallRequirement]: + columns = ( + TextColumn("{task.fields[indent]}"), + BarColumn(), + MofNCompleteColumn(), + TextColumn("{task.description}"), + ) + console = get_console() + + bar = Progress(*columns, refresh_per_second=6, console=console, transient=True) + # Hiding the progress bar at initialization forces a refresh cycle to occur + # until the bar appears, avoiding very short flashes. + task = bar.add_task("", total=total, indent=" " * get_indentation(), visible=False) + with bar: + for req in iterable: + bar.update(task, description=rf"\[{req.name}]", visible=True) + yield req + bar.advance(task) + + +def _raw_progress_bar( + iterable: Iterable[bytes], + *, + size: int | None, + initial_progress: int | None = None, +) -> Generator[bytes, None, None]: + def write_progress(current: int, total: int) -> None: + sys.stdout.write(f"Progress {current} of {total}\n") + sys.stdout.flush() + + current = initial_progress or 0 + total = size or 0 + rate_limiter = RateLimiter(0.25) + + write_progress(current, total) + for chunk in iterable: + current += len(chunk) + if rate_limiter.ready() or current == total: + write_progress(current, total) + rate_limiter.reset() + yield chunk + + +def get_download_progress_renderer( + *, bar_type: BarType, size: int | None = None, initial_progress: int | None = None +) -> ProgressRenderer[bytes]: + """Get an object that can be used to render the download progress. + + Returns a callable, that takes an iterable to "wrap". + """ + if bar_type == "on": + return functools.partial( + _rich_download_progress_bar, + bar_type=bar_type, + size=size, + initial_progress=initial_progress, + ) + elif bar_type == "raw": + return functools.partial( + _raw_progress_bar, + size=size, + initial_progress=initial_progress, + ) + else: + return iter # no-op, when passed an iterator + + +def get_install_progress_renderer( + *, bar_type: BarType, total: int +) -> ProgressRenderer[InstallRequirement]: + """Get an object that can be used to render the install progress. + Returns a callable, that takes an iterable to "wrap". + """ + if bar_type == "on": + return functools.partial(_rich_install_progress_bar, total=total) + else: + return iter diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/cli/req_command.py b/.venv/lib/python3.12/site-packages/pip/_internal/cli/req_command.py new file mode 100644 index 0000000..dc1328f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/cli/req_command.py @@ -0,0 +1,351 @@ +"""Contains the RequirementCommand base class. + +This class is in a separate module so the commands that do not always +need PackageFinder capability don't unnecessarily import the +PackageFinder machinery and all its vendored dependencies, etc. +""" + +from __future__ import annotations + +import logging +from functools import partial +from optparse import Values +from typing import Any + +from pip._internal.build_env import SubprocessBuildEnvironmentInstaller +from pip._internal.cache import WheelCache +from pip._internal.cli import cmdoptions +from pip._internal.cli.index_command import IndexGroupCommand +from pip._internal.cli.index_command import SessionCommandMixin as SessionCommandMixin +from pip._internal.exceptions import CommandError, PreviousBuildDirError +from pip._internal.index.collector import LinkCollector +from pip._internal.index.package_finder import PackageFinder +from pip._internal.models.selection_prefs import SelectionPreferences +from pip._internal.models.target_python import TargetPython +from pip._internal.network.session import PipSession +from pip._internal.operations.build.build_tracker import BuildTracker +from pip._internal.operations.prepare import RequirementPreparer +from pip._internal.req.constructors import ( + install_req_from_editable, + install_req_from_line, + install_req_from_parsed_requirement, + install_req_from_req_string, +) +from pip._internal.req.req_dependency_group import parse_dependency_groups +from pip._internal.req.req_file import parse_requirements +from pip._internal.req.req_install import InstallRequirement +from pip._internal.resolution.base import BaseResolver +from pip._internal.utils.temp_dir import ( + TempDirectory, + TempDirectoryTypeRegistry, + tempdir_kinds, +) + +logger = logging.getLogger(__name__) + + +KEEPABLE_TEMPDIR_TYPES = [ + tempdir_kinds.BUILD_ENV, + tempdir_kinds.EPHEM_WHEEL_CACHE, + tempdir_kinds.REQ_BUILD, +] + + +def with_cleanup(func: Any) -> Any: + """Decorator for common logic related to managing temporary + directories. + """ + + def configure_tempdir_registry(registry: TempDirectoryTypeRegistry) -> None: + for t in KEEPABLE_TEMPDIR_TYPES: + registry.set_delete(t, False) + + def wrapper( + self: RequirementCommand, options: Values, args: list[Any] + ) -> int | None: + assert self.tempdir_registry is not None + if options.no_clean: + configure_tempdir_registry(self.tempdir_registry) + + try: + return func(self, options, args) + except PreviousBuildDirError: + # This kind of conflict can occur when the user passes an explicit + # build directory with a pre-existing folder. In that case we do + # not want to accidentally remove it. + configure_tempdir_registry(self.tempdir_registry) + raise + + return wrapper + + +class RequirementCommand(IndexGroupCommand): + def __init__(self, *args: Any, **kw: Any) -> None: + super().__init__(*args, **kw) + + self.cmd_opts.add_option(cmdoptions.dependency_groups()) + self.cmd_opts.add_option(cmdoptions.no_clean()) + + @staticmethod + def determine_resolver_variant(options: Values) -> str: + """Determines which resolver should be used, based on the given options.""" + if "legacy-resolver" in options.deprecated_features_enabled: + return "legacy" + + return "resolvelib" + + @classmethod + def make_requirement_preparer( + cls, + temp_build_dir: TempDirectory, + options: Values, + build_tracker: BuildTracker, + session: PipSession, + finder: PackageFinder, + use_user_site: bool, + download_dir: str | None = None, + verbosity: int = 0, + ) -> RequirementPreparer: + """ + Create a RequirementPreparer instance for the given parameters. + """ + temp_build_dir_path = temp_build_dir.path + assert temp_build_dir_path is not None + legacy_resolver = False + + resolver_variant = cls.determine_resolver_variant(options) + if resolver_variant == "resolvelib": + lazy_wheel = "fast-deps" in options.features_enabled + if lazy_wheel: + logger.warning( + "pip is using lazily downloaded wheels using HTTP " + "range requests to obtain dependency information. " + "This experimental feature is enabled through " + "--use-feature=fast-deps and it is not ready for " + "production." + ) + else: + legacy_resolver = True + lazy_wheel = False + if "fast-deps" in options.features_enabled: + logger.warning( + "fast-deps has no effect when used with the legacy resolver." + ) + + return RequirementPreparer( + build_dir=temp_build_dir_path, + src_dir=options.src_dir, + download_dir=download_dir, + build_isolation=options.build_isolation, + build_isolation_installer=SubprocessBuildEnvironmentInstaller(finder), + check_build_deps=options.check_build_deps, + build_tracker=build_tracker, + session=session, + progress_bar=options.progress_bar, + finder=finder, + require_hashes=options.require_hashes, + use_user_site=use_user_site, + lazy_wheel=lazy_wheel, + verbosity=verbosity, + legacy_resolver=legacy_resolver, + resume_retries=options.resume_retries, + ) + + @classmethod + def make_resolver( + cls, + preparer: RequirementPreparer, + finder: PackageFinder, + options: Values, + wheel_cache: WheelCache | None = None, + use_user_site: bool = False, + ignore_installed: bool = True, + ignore_requires_python: bool = False, + force_reinstall: bool = False, + upgrade_strategy: str = "to-satisfy-only", + use_pep517: bool | None = None, + py_version_info: tuple[int, ...] | None = None, + ) -> BaseResolver: + """ + Create a Resolver instance for the given parameters. + """ + make_install_req = partial( + install_req_from_req_string, + isolated=options.isolated_mode, + use_pep517=use_pep517, + ) + resolver_variant = cls.determine_resolver_variant(options) + # The long import name and duplicated invocation is needed to convince + # Mypy into correctly typechecking. Otherwise it would complain the + # "Resolver" class being redefined. + if resolver_variant == "resolvelib": + import pip._internal.resolution.resolvelib.resolver + + return pip._internal.resolution.resolvelib.resolver.Resolver( + preparer=preparer, + finder=finder, + wheel_cache=wheel_cache, + make_install_req=make_install_req, + use_user_site=use_user_site, + ignore_dependencies=options.ignore_dependencies, + ignore_installed=ignore_installed, + ignore_requires_python=ignore_requires_python, + force_reinstall=force_reinstall, + upgrade_strategy=upgrade_strategy, + py_version_info=py_version_info, + ) + import pip._internal.resolution.legacy.resolver + + return pip._internal.resolution.legacy.resolver.Resolver( + preparer=preparer, + finder=finder, + wheel_cache=wheel_cache, + make_install_req=make_install_req, + use_user_site=use_user_site, + ignore_dependencies=options.ignore_dependencies, + ignore_installed=ignore_installed, + ignore_requires_python=ignore_requires_python, + force_reinstall=force_reinstall, + upgrade_strategy=upgrade_strategy, + py_version_info=py_version_info, + ) + + def get_requirements( + self, + args: list[str], + options: Values, + finder: PackageFinder, + session: PipSession, + ) -> list[InstallRequirement]: + """ + Parse command-line arguments into the corresponding requirements. + """ + requirements: list[InstallRequirement] = [] + for filename in options.constraints: + for parsed_req in parse_requirements( + filename, + constraint=True, + finder=finder, + options=options, + session=session, + ): + req_to_add = install_req_from_parsed_requirement( + parsed_req, + isolated=options.isolated_mode, + user_supplied=False, + ) + requirements.append(req_to_add) + + for req in args: + req_to_add = install_req_from_line( + req, + comes_from=None, + isolated=options.isolated_mode, + use_pep517=options.use_pep517, + user_supplied=True, + config_settings=getattr(options, "config_settings", None), + ) + requirements.append(req_to_add) + + if options.dependency_groups: + for req in parse_dependency_groups(options.dependency_groups): + req_to_add = install_req_from_req_string( + req, + isolated=options.isolated_mode, + use_pep517=options.use_pep517, + user_supplied=True, + ) + requirements.append(req_to_add) + + for req in options.editables: + req_to_add = install_req_from_editable( + req, + user_supplied=True, + isolated=options.isolated_mode, + use_pep517=options.use_pep517, + config_settings=getattr(options, "config_settings", None), + ) + requirements.append(req_to_add) + + # NOTE: options.require_hashes may be set if --require-hashes is True + for filename in options.requirements: + for parsed_req in parse_requirements( + filename, finder=finder, options=options, session=session + ): + req_to_add = install_req_from_parsed_requirement( + parsed_req, + isolated=options.isolated_mode, + use_pep517=options.use_pep517, + user_supplied=True, + config_settings=( + parsed_req.options.get("config_settings") + if parsed_req.options + else None + ), + ) + requirements.append(req_to_add) + + # If any requirement has hash options, enable hash checking. + if any(req.has_hash_options for req in requirements): + options.require_hashes = True + + if not ( + args + or options.editables + or options.requirements + or options.dependency_groups + ): + opts = {"name": self.name} + if options.find_links: + raise CommandError( + "You must give at least one requirement to {name} " + '(maybe you meant "pip {name} {links}"?)'.format( + **dict(opts, links=" ".join(options.find_links)) + ) + ) + else: + raise CommandError( + "You must give at least one requirement to {name} " + '(see "pip help {name}")'.format(**opts) + ) + + return requirements + + @staticmethod + def trace_basic_info(finder: PackageFinder) -> None: + """ + Trace basic information about the provided objects. + """ + # Display where finder is looking for packages + search_scope = finder.search_scope + locations = search_scope.get_formatted_locations() + if locations: + logger.info(locations) + + def _build_package_finder( + self, + options: Values, + session: PipSession, + target_python: TargetPython | None = None, + ignore_requires_python: bool | None = None, + ) -> PackageFinder: + """ + Create a package finder appropriate to this requirement command. + + :param ignore_requires_python: Whether to ignore incompatible + "Requires-Python" values in links. Defaults to False. + """ + link_collector = LinkCollector.create(session, options=options) + selection_prefs = SelectionPreferences( + allow_yanked=True, + format_control=options.format_control, + allow_all_prereleases=options.pre, + prefer_binary=options.prefer_binary, + ignore_requires_python=ignore_requires_python, + ) + + return PackageFinder.create( + link_collector=link_collector, + selection_prefs=selection_prefs, + target_python=target_python, + ) diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/cli/spinners.py b/.venv/lib/python3.12/site-packages/pip/_internal/cli/spinners.py new file mode 100644 index 0000000..58aad28 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/cli/spinners.py @@ -0,0 +1,235 @@ +from __future__ import annotations + +import contextlib +import itertools +import logging +import sys +import time +from collections.abc import Generator +from typing import IO, Final + +from pip._vendor.rich.console import ( + Console, + ConsoleOptions, + RenderableType, + RenderResult, +) +from pip._vendor.rich.live import Live +from pip._vendor.rich.measure import Measurement +from pip._vendor.rich.text import Text + +from pip._internal.utils.compat import WINDOWS +from pip._internal.utils.logging import get_console, get_indentation + +logger = logging.getLogger(__name__) + +SPINNER_CHARS: Final = r"-\|/" +SPINS_PER_SECOND: Final = 8 + + +class SpinnerInterface: + def spin(self) -> None: + raise NotImplementedError() + + def finish(self, final_status: str) -> None: + raise NotImplementedError() + + +class InteractiveSpinner(SpinnerInterface): + def __init__( + self, + message: str, + file: IO[str] | None = None, + spin_chars: str = SPINNER_CHARS, + # Empirically, 8 updates/second looks nice + min_update_interval_seconds: float = 1 / SPINS_PER_SECOND, + ): + self._message = message + if file is None: + file = sys.stdout + self._file = file + self._rate_limiter = RateLimiter(min_update_interval_seconds) + self._finished = False + + self._spin_cycle = itertools.cycle(spin_chars) + + self._file.write(" " * get_indentation() + self._message + " ... ") + self._width = 0 + + def _write(self, status: str) -> None: + assert not self._finished + # Erase what we wrote before by backspacing to the beginning, writing + # spaces to overwrite the old text, and then backspacing again + backup = "\b" * self._width + self._file.write(backup + " " * self._width + backup) + # Now we have a blank slate to add our status + self._file.write(status) + self._width = len(status) + self._file.flush() + self._rate_limiter.reset() + + def spin(self) -> None: + if self._finished: + return + if not self._rate_limiter.ready(): + return + self._write(next(self._spin_cycle)) + + def finish(self, final_status: str) -> None: + if self._finished: + return + self._write(final_status) + self._file.write("\n") + self._file.flush() + self._finished = True + + +# Used for dumb terminals, non-interactive installs (no tty), etc. +# We still print updates occasionally (once every 60 seconds by default) to +# act as a keep-alive for systems like Travis-CI that take lack-of-output as +# an indication that a task has frozen. +class NonInteractiveSpinner(SpinnerInterface): + def __init__(self, message: str, min_update_interval_seconds: float = 60.0) -> None: + self._message = message + self._finished = False + self._rate_limiter = RateLimiter(min_update_interval_seconds) + self._update("started") + + def _update(self, status: str) -> None: + assert not self._finished + self._rate_limiter.reset() + logger.info("%s: %s", self._message, status) + + def spin(self) -> None: + if self._finished: + return + if not self._rate_limiter.ready(): + return + self._update("still running...") + + def finish(self, final_status: str) -> None: + if self._finished: + return + self._update(f"finished with status '{final_status}'") + self._finished = True + + +class RateLimiter: + def __init__(self, min_update_interval_seconds: float) -> None: + self._min_update_interval_seconds = min_update_interval_seconds + self._last_update: float = 0 + + def ready(self) -> bool: + now = time.time() + delta = now - self._last_update + return delta >= self._min_update_interval_seconds + + def reset(self) -> None: + self._last_update = time.time() + + +@contextlib.contextmanager +def open_spinner(message: str) -> Generator[SpinnerInterface, None, None]: + # Interactive spinner goes directly to sys.stdout rather than being routed + # through the logging system, but it acts like it has level INFO, + # i.e. it's only displayed if we're at level INFO or better. + # Non-interactive spinner goes through the logging system, so it is always + # in sync with logging configuration. + if sys.stdout.isatty() and logger.getEffectiveLevel() <= logging.INFO: + spinner: SpinnerInterface = InteractiveSpinner(message) + else: + spinner = NonInteractiveSpinner(message) + try: + with hidden_cursor(sys.stdout): + yield spinner + except KeyboardInterrupt: + spinner.finish("canceled") + raise + except Exception: + spinner.finish("error") + raise + else: + spinner.finish("done") + + +class _PipRichSpinner: + """ + Custom rich spinner that matches the style of the legacy spinners. + + (*) Updates will be handled in a background thread by a rich live panel + which will call render() automatically at the appropriate time. + """ + + def __init__(self, label: str) -> None: + self.label = label + self._spin_cycle = itertools.cycle(SPINNER_CHARS) + self._spinner_text = "" + self._finished = False + self._indent = get_indentation() * " " + + def __rich_console__( + self, console: Console, options: ConsoleOptions + ) -> RenderResult: + yield self.render() + + def __rich_measure__( + self, console: Console, options: ConsoleOptions + ) -> Measurement: + text = self.render() + return Measurement.get(console, options, text) + + def render(self) -> RenderableType: + if not self._finished: + self._spinner_text = next(self._spin_cycle) + + return Text.assemble(self._indent, self.label, " ... ", self._spinner_text) + + def finish(self, status: str) -> None: + """Stop spinning and set a final status message.""" + self._spinner_text = status + self._finished = True + + +@contextlib.contextmanager +def open_rich_spinner(label: str, console: Console | None = None) -> Generator[None]: + if not logger.isEnabledFor(logging.INFO): + # Don't show spinner if --quiet is given. + yield + return + + console = console or get_console() + spinner = _PipRichSpinner(label) + with Live(spinner, refresh_per_second=SPINS_PER_SECOND, console=console): + try: + yield + except KeyboardInterrupt: + spinner.finish("canceled") + raise + except Exception: + spinner.finish("error") + raise + else: + spinner.finish("done") + + +HIDE_CURSOR = "\x1b[?25l" +SHOW_CURSOR = "\x1b[?25h" + + +@contextlib.contextmanager +def hidden_cursor(file: IO[str]) -> Generator[None, None, None]: + # The Windows terminal does not support the hide/show cursor ANSI codes, + # even via colorama. So don't even try. + if WINDOWS: + yield + # We don't want to clutter the output with control characters if we're + # writing to a file, or if the user is running with --quiet. + # See https://github.com/pypa/pip/issues/3418 + elif not file.isatty() or logger.getEffectiveLevel() > logging.INFO: + yield + else: + file.write(HIDE_CURSOR) + try: + yield + finally: + file.write(SHOW_CURSOR) diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/cli/status_codes.py b/.venv/lib/python3.12/site-packages/pip/_internal/cli/status_codes.py new file mode 100644 index 0000000..5e29502 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/cli/status_codes.py @@ -0,0 +1,6 @@ +SUCCESS = 0 +ERROR = 1 +UNKNOWN_ERROR = 2 +VIRTUALENV_NOT_FOUND = 3 +PREVIOUS_BUILD_DIR_ERROR = 4 +NO_MATCHES_FOUND = 23 diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/commands/__init__.py b/.venv/lib/python3.12/site-packages/pip/_internal/commands/__init__.py new file mode 100644 index 0000000..bedeca9 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/commands/__init__.py @@ -0,0 +1,139 @@ +""" +Package containing all pip commands +""" + +from __future__ import annotations + +import importlib +from collections import namedtuple +from typing import Any + +from pip._internal.cli.base_command import Command + +CommandInfo = namedtuple("CommandInfo", "module_path, class_name, summary") + +# This dictionary does a bunch of heavy lifting for help output: +# - Enables avoiding additional (costly) imports for presenting `--help`. +# - The ordering matters for help display. +# +# Even though the module path starts with the same "pip._internal.commands" +# prefix, the full path makes testing easier (specifically when modifying +# `commands_dict` in test setup / teardown). +commands_dict: dict[str, CommandInfo] = { + "install": CommandInfo( + "pip._internal.commands.install", + "InstallCommand", + "Install packages.", + ), + "lock": CommandInfo( + "pip._internal.commands.lock", + "LockCommand", + "Generate a lock file.", + ), + "download": CommandInfo( + "pip._internal.commands.download", + "DownloadCommand", + "Download packages.", + ), + "uninstall": CommandInfo( + "pip._internal.commands.uninstall", + "UninstallCommand", + "Uninstall packages.", + ), + "freeze": CommandInfo( + "pip._internal.commands.freeze", + "FreezeCommand", + "Output installed packages in requirements format.", + ), + "inspect": CommandInfo( + "pip._internal.commands.inspect", + "InspectCommand", + "Inspect the python environment.", + ), + "list": CommandInfo( + "pip._internal.commands.list", + "ListCommand", + "List installed packages.", + ), + "show": CommandInfo( + "pip._internal.commands.show", + "ShowCommand", + "Show information about installed packages.", + ), + "check": CommandInfo( + "pip._internal.commands.check", + "CheckCommand", + "Verify installed packages have compatible dependencies.", + ), + "config": CommandInfo( + "pip._internal.commands.configuration", + "ConfigurationCommand", + "Manage local and global configuration.", + ), + "search": CommandInfo( + "pip._internal.commands.search", + "SearchCommand", + "Search PyPI for packages.", + ), + "cache": CommandInfo( + "pip._internal.commands.cache", + "CacheCommand", + "Inspect and manage pip's wheel cache.", + ), + "index": CommandInfo( + "pip._internal.commands.index", + "IndexCommand", + "Inspect information available from package indexes.", + ), + "wheel": CommandInfo( + "pip._internal.commands.wheel", + "WheelCommand", + "Build wheels from your requirements.", + ), + "hash": CommandInfo( + "pip._internal.commands.hash", + "HashCommand", + "Compute hashes of package archives.", + ), + "completion": CommandInfo( + "pip._internal.commands.completion", + "CompletionCommand", + "A helper command used for command completion.", + ), + "debug": CommandInfo( + "pip._internal.commands.debug", + "DebugCommand", + "Show information useful for debugging.", + ), + "help": CommandInfo( + "pip._internal.commands.help", + "HelpCommand", + "Show help for commands.", + ), +} + + +def create_command(name: str, **kwargs: Any) -> Command: + """ + Create an instance of the Command class with the given name. + """ + module_path, class_name, summary = commands_dict[name] + module = importlib.import_module(module_path) + command_class = getattr(module, class_name) + command = command_class(name=name, summary=summary, **kwargs) + + return command + + +def get_similar_commands(name: str) -> str | None: + """Command name auto-correct.""" + from difflib import get_close_matches + + name = name.lower() + + close_commands = get_close_matches(name, commands_dict.keys()) + + if close_commands: + return close_commands[0] + else: + return None diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/commands/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/commands/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..07e1bbd Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/commands/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/commands/__pycache__/cache.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/commands/__pycache__/cache.cpython-312.pyc new file mode 100644 index 0000000..6a13625 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/commands/__pycache__/cache.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/commands/__pycache__/check.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/commands/__pycache__/check.cpython-312.pyc new file mode 100644 index 0000000..b4bae4e Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/commands/__pycache__/check.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/commands/__pycache__/completion.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/commands/__pycache__/completion.cpython-312.pyc new file mode 100644 index 0000000..3e24215 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/commands/__pycache__/completion.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/commands/__pycache__/configuration.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/commands/__pycache__/configuration.cpython-312.pyc new file mode 100644 index 0000000..ae934cb Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/commands/__pycache__/configuration.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/commands/__pycache__/debug.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/commands/__pycache__/debug.cpython-312.pyc new file mode 100644 index 0000000..db13988 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/commands/__pycache__/debug.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/commands/__pycache__/download.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/commands/__pycache__/download.cpython-312.pyc new file mode 100644 index 0000000..80b4859 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/commands/__pycache__/download.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/commands/__pycache__/freeze.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/commands/__pycache__/freeze.cpython-312.pyc new file mode 100644 index 0000000..1f0f6b4 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/commands/__pycache__/freeze.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/commands/__pycache__/hash.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/commands/__pycache__/hash.cpython-312.pyc new file mode 100644 index 0000000..115f066 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/commands/__pycache__/hash.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/commands/__pycache__/help.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/commands/__pycache__/help.cpython-312.pyc new file mode 100644 index 0000000..c3c8f8e Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/commands/__pycache__/help.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/commands/__pycache__/index.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/commands/__pycache__/index.cpython-312.pyc new file mode 100644 index 0000000..6197314 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/commands/__pycache__/index.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/commands/__pycache__/inspect.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/commands/__pycache__/inspect.cpython-312.pyc new file mode 100644 index 0000000..e5a5d50 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/commands/__pycache__/inspect.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/commands/__pycache__/install.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/commands/__pycache__/install.cpython-312.pyc new file mode 100644 index 0000000..d746dfe Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/commands/__pycache__/install.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/commands/__pycache__/list.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/commands/__pycache__/list.cpython-312.pyc new file mode 100644 index 0000000..259f98a Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/commands/__pycache__/list.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/commands/__pycache__/lock.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/commands/__pycache__/lock.cpython-312.pyc new file mode 100644 index 0000000..d9ebbcb Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/commands/__pycache__/lock.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/commands/__pycache__/search.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/commands/__pycache__/search.cpython-312.pyc new file mode 100644 index 0000000..ecfc404 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/commands/__pycache__/search.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/commands/__pycache__/show.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/commands/__pycache__/show.cpython-312.pyc new file mode 100644 index 0000000..4d73a1b Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/commands/__pycache__/show.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/commands/__pycache__/uninstall.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/commands/__pycache__/uninstall.cpython-312.pyc new file mode 100644 index 0000000..4e6c440 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/commands/__pycache__/uninstall.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/commands/__pycache__/wheel.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/commands/__pycache__/wheel.cpython-312.pyc new file mode 100644 index 0000000..4ea7c9d Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/commands/__pycache__/wheel.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/commands/cache.py b/.venv/lib/python3.12/site-packages/pip/_internal/commands/cache.py new file mode 100644 index 0000000..c8e7aed --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/commands/cache.py @@ -0,0 +1,231 @@ +import os +import textwrap +from optparse import Values +from typing import Callable + +from pip._internal.cli.base_command import Command +from pip._internal.cli.status_codes import ERROR, SUCCESS +from pip._internal.exceptions import CommandError, PipError +from pip._internal.utils import filesystem +from pip._internal.utils.logging import getLogger +from pip._internal.utils.misc import format_size + +logger = getLogger(__name__) + + +class CacheCommand(Command): + """ + Inspect and manage pip's wheel cache. + + Subcommands: + + - dir: Show the cache directory. + - info: Show information about the cache. + - list: List filenames of packages stored in the cache. + - remove: Remove one or more package from the cache. + - purge: Remove all items from the cache. + + ```` can be a glob expression or a package name. + """ + + ignore_require_venv = True + usage = """ + %prog dir + %prog info + %prog list [] [--format=[human, abspath]] + %prog remove + %prog purge + """ + + def add_options(self) -> None: + self.cmd_opts.add_option( + "--format", + action="store", + dest="list_format", + default="human", + choices=("human", "abspath"), + help="Select the output format among: human (default) or abspath", + ) + + self.parser.insert_option_group(0, self.cmd_opts) + + def handler_map(self) -> dict[str, Callable[[Values, list[str]], None]]: + return { + "dir": self.get_cache_dir, + "info": self.get_cache_info, + "list": self.list_cache_items, + "remove": self.remove_cache_items, + "purge": self.purge_cache, + } + + def run(self, options: Values, args: list[str]) -> int: + handler_map = self.handler_map() + + if not options.cache_dir: + logger.error("pip cache commands can not function since cache is disabled.") + return ERROR + + # Determine action + if not args or args[0] not in handler_map: + logger.error( + "Need an action (%s) to perform.", + ", ".join(sorted(handler_map)), + ) + return ERROR + + action = args[0] + + # Error handling happens here, not in the action-handlers. + try: + handler_map[action](options, args[1:]) + except PipError as e: + logger.error(e.args[0]) + return ERROR + + return SUCCESS + + def get_cache_dir(self, options: Values, args: list[str]) -> None: + if args: + raise CommandError("Too many arguments") + + logger.info(options.cache_dir) + + def get_cache_info(self, options: Values, args: list[str]) -> None: + if args: + raise CommandError("Too many arguments") + + num_http_files = len(self._find_http_files(options)) + num_packages = len(self._find_wheels(options, "*")) + + http_cache_location = self._cache_dir(options, "http-v2") + old_http_cache_location = self._cache_dir(options, "http") + wheels_cache_location = self._cache_dir(options, "wheels") + http_cache_size = filesystem.format_size( + filesystem.directory_size(http_cache_location) + + filesystem.directory_size(old_http_cache_location) + ) + wheels_cache_size = filesystem.format_directory_size(wheels_cache_location) + + message = ( + textwrap.dedent( + """ + Package index page cache location (pip v23.3+): {http_cache_location} + Package index page cache location (older pips): {old_http_cache_location} + Package index page cache size: {http_cache_size} + Number of HTTP files: {num_http_files} + Locally built wheels location: {wheels_cache_location} + Locally built wheels size: {wheels_cache_size} + Number of locally built wheels: {package_count} + """ # noqa: E501 + ) + .format( + http_cache_location=http_cache_location, + old_http_cache_location=old_http_cache_location, + http_cache_size=http_cache_size, + num_http_files=num_http_files, + wheels_cache_location=wheels_cache_location, + package_count=num_packages, + wheels_cache_size=wheels_cache_size, + ) + .strip() + ) + + logger.info(message) + + def list_cache_items(self, options: Values, args: list[str]) -> None: + if len(args) > 1: + raise CommandError("Too many arguments") + + if args: + pattern = args[0] + else: + pattern = "*" + + files = self._find_wheels(options, pattern) + if options.list_format == "human": + self.format_for_human(files) + else: + self.format_for_abspath(files) + + def format_for_human(self, files: list[str]) -> None: + if not files: + logger.info("No locally built wheels cached.") + return + + results = [] + for filename in files: + wheel = os.path.basename(filename) + size = filesystem.format_file_size(filename) + results.append(f" - {wheel} ({size})") + logger.info("Cache contents:\n") + logger.info("\n".join(sorted(results))) + + def format_for_abspath(self, files: list[str]) -> None: + if files: + logger.info("\n".join(sorted(files))) + + def remove_cache_items(self, options: Values, args: list[str]) -> None: + if len(args) > 1: + raise CommandError("Too many arguments") + + if not args: + raise CommandError("Please provide a pattern") + + files = self._find_wheels(options, args[0]) + + no_matching_msg = "No matching packages" + if args[0] == "*": + # Only fetch http files if no specific pattern given + files += self._find_http_files(options) + else: + # Add the pattern to the log message + no_matching_msg += f' for pattern "{args[0]}"' + + if not files: + logger.warning(no_matching_msg) + + bytes_removed = 0 + for filename in files: + bytes_removed += os.stat(filename).st_size + os.unlink(filename) + logger.verbose("Removed %s", filename) + logger.info("Files removed: %s (%s)", len(files), format_size(bytes_removed)) + + def purge_cache(self, options: Values, args: list[str]) -> None: + if args: + raise CommandError("Too many arguments") + + return self.remove_cache_items(options, ["*"]) + + def _cache_dir(self, options: Values, subdir: str) -> str: + return os.path.join(options.cache_dir, subdir) + + def _find_http_files(self, options: Values) -> list[str]: + old_http_dir = self._cache_dir(options, "http") + new_http_dir = self._cache_dir(options, "http-v2") + return filesystem.find_files(old_http_dir, "*") + filesystem.find_files( + new_http_dir, "*" + ) + + def _find_wheels(self, options: Values, pattern: str) -> list[str]: + wheel_dir = self._cache_dir(options, "wheels") + + # The wheel filename format, as specified in PEP 427, is: + # {distribution}-{version}(-{build})?-{python}-{abi}-{platform}.whl + # + # Additionally, non-alphanumeric values in the distribution are + # normalized to underscores (_), meaning hyphens can never occur + # before `-{version}`. + # + # Given that information: + # - If the pattern we're given contains a hyphen (-), the user is + # providing at least the version. Thus, we can just append `*.whl` + # to match the rest of it. + # - If the pattern we're given doesn't contain a hyphen (-), the + # user is only providing the name. Thus, we append `-*.whl` to + # match the hyphen before the version, followed by anything else. + # + # PEP 427: https://www.python.org/dev/peps/pep-0427/ + pattern = pattern + ("*.whl" if "-" in pattern else "-*.whl") + + return filesystem.find_files(wheel_dir, pattern) diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/commands/check.py b/.venv/lib/python3.12/site-packages/pip/_internal/commands/check.py new file mode 100644 index 0000000..516757e --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/commands/check.py @@ -0,0 +1,66 @@ +import logging +from optparse import Values + +from pip._internal.cli.base_command import Command +from pip._internal.cli.status_codes import ERROR, SUCCESS +from pip._internal.metadata import get_default_environment +from pip._internal.operations.check import ( + check_package_set, + check_unsupported, + create_package_set_from_installed, +) +from pip._internal.utils.compatibility_tags import get_supported +from pip._internal.utils.misc import write_output + +logger = logging.getLogger(__name__) + + +class CheckCommand(Command): + """Verify installed packages have compatible dependencies.""" + + ignore_require_venv = True + usage = """ + %prog [options]""" + + def run(self, options: Values, args: list[str]) -> int: + package_set, parsing_probs = create_package_set_from_installed() + missing, conflicting = check_package_set(package_set) + unsupported = list( + check_unsupported( + get_default_environment().iter_installed_distributions(), + get_supported(), + ) + ) + + for project_name in missing: + version = package_set[project_name].version + for dependency in missing[project_name]: + write_output( + "%s %s requires %s, which is not installed.", + project_name, + version, + dependency[0], + ) + + for project_name in conflicting: + version = package_set[project_name].version + for dep_name, dep_version, req in conflicting[project_name]: + write_output( + "%s %s has requirement %s, but you have %s %s.", + project_name, + version, + req, + dep_name, + dep_version, + ) + for package in unsupported: + write_output( + "%s %s is not supported on this platform", + package.raw_name, + package.version, + ) + if missing or conflicting or parsing_probs or unsupported: + return ERROR + else: + write_output("No broken requirements found.") + return SUCCESS diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/commands/completion.py b/.venv/lib/python3.12/site-packages/pip/_internal/commands/completion.py new file mode 100644 index 0000000..6d9597b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/commands/completion.py @@ -0,0 +1,135 @@ +import sys +import textwrap +from optparse import Values + +from pip._internal.cli.base_command import Command +from pip._internal.cli.status_codes import SUCCESS +from pip._internal.utils.misc import get_prog + +BASE_COMPLETION = """ +# pip {shell} completion start{script}# pip {shell} completion end +""" + +COMPLETION_SCRIPTS = { + "bash": """ + _pip_completion() + {{ + COMPREPLY=( $( COMP_WORDS="${{COMP_WORDS[*]}}" \\ + COMP_CWORD=$COMP_CWORD \\ + PIP_AUTO_COMPLETE=1 $1 2>/dev/null ) ) + }} + complete -o default -F _pip_completion {prog} + """, + "zsh": """ + #compdef -P pip[0-9.]# + __pip() {{ + compadd $( COMP_WORDS="$words[*]" \\ + COMP_CWORD=$((CURRENT-1)) \\ + PIP_AUTO_COMPLETE=1 $words[1] 2>/dev/null ) + }} + if [[ $zsh_eval_context[-1] == loadautofunc ]]; then + # autoload from fpath, call function directly + __pip "$@" + else + # eval/source/. command, register function for later + compdef __pip -P 'pip[0-9.]#' + fi + """, + "fish": """ + function __fish_complete_pip + set -lx COMP_WORDS \\ + (commandline --current-process --tokenize --cut-at-cursor) \\ + (commandline --current-token --cut-at-cursor) + set -lx COMP_CWORD (math (count $COMP_WORDS) - 1) + set -lx PIP_AUTO_COMPLETE 1 + set -l completions + if string match -q '2.*' $version + set completions (eval $COMP_WORDS[1]) + else + set completions ($COMP_WORDS[1]) + end + string split \\ -- $completions + end + complete -fa "(__fish_complete_pip)" -c {prog} + """, + "powershell": """ + if ((Test-Path Function:\\TabExpansion) -and -not ` + (Test-Path Function:\\_pip_completeBackup)) {{ + Rename-Item Function:\\TabExpansion _pip_completeBackup + }} + function TabExpansion($line, $lastWord) {{ + $lastBlock = [regex]::Split($line, '[|;]')[-1].TrimStart() + if ($lastBlock.StartsWith("{prog} ")) {{ + $Env:COMP_WORDS=$lastBlock + $Env:COMP_CWORD=$lastBlock.Split().Length - 1 + $Env:PIP_AUTO_COMPLETE=1 + (& {prog}).Split() + Remove-Item Env:COMP_WORDS + Remove-Item Env:COMP_CWORD + Remove-Item Env:PIP_AUTO_COMPLETE + }} + elseif (Test-Path Function:\\_pip_completeBackup) {{ + # Fall back on existing tab expansion + _pip_completeBackup $line $lastWord + }} + }} + """, +} + + +class CompletionCommand(Command): + """A helper command to be used for command completion.""" + + ignore_require_venv = True + + def add_options(self) -> None: + self.cmd_opts.add_option( + "--bash", + "-b", + action="store_const", + const="bash", + dest="shell", + help="Emit completion code for bash", + ) + self.cmd_opts.add_option( + "--zsh", + "-z", + action="store_const", + const="zsh", + dest="shell", + help="Emit completion code for zsh", + ) + self.cmd_opts.add_option( + "--fish", + "-f", + action="store_const", + const="fish", + dest="shell", + help="Emit completion code for fish", + ) + self.cmd_opts.add_option( + "--powershell", + "-p", + action="store_const", + const="powershell", + dest="shell", + help="Emit completion code for powershell", + ) + + self.parser.insert_option_group(0, self.cmd_opts) + + def run(self, options: Values, args: list[str]) -> int: + """Prints the completion code of the given shell""" + shells = COMPLETION_SCRIPTS.keys() + shell_options = ["--" + shell for shell in sorted(shells)] + if options.shell in shells: + script = textwrap.dedent( + COMPLETION_SCRIPTS.get(options.shell, "").format(prog=get_prog()) + ) + print(BASE_COMPLETION.format(script=script, shell=options.shell)) + return SUCCESS + else: + sys.stderr.write( + "ERROR: You must pass {}\n".format(" or ".join(shell_options)) + ) + return SUCCESS diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/commands/configuration.py b/.venv/lib/python3.12/site-packages/pip/_internal/commands/configuration.py new file mode 100644 index 0000000..7bcea04 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/commands/configuration.py @@ -0,0 +1,288 @@ +from __future__ import annotations + +import logging +import os +import subprocess +from optparse import Values +from typing import Any, Callable + +from pip._internal.cli.base_command import Command +from pip._internal.cli.status_codes import ERROR, SUCCESS +from pip._internal.configuration import ( + Configuration, + Kind, + get_configuration_files, + kinds, +) +from pip._internal.exceptions import PipError +from pip._internal.utils.logging import indent_log +from pip._internal.utils.misc import get_prog, write_output + +logger = logging.getLogger(__name__) + + +class ConfigurationCommand(Command): + """ + Manage local and global configuration. + + Subcommands: + + - list: List the active configuration (or from the file specified) + - edit: Edit the configuration file in an editor + - get: Get the value associated with command.option + - set: Set the command.option=value + - unset: Unset the value associated with command.option + - debug: List the configuration files and values defined under them + + Configuration keys should be dot separated command and option name, + with the special prefix "global" affecting any command. For example, + "pip config set global.index-url https://example.org/" would configure + the index url for all commands, but "pip config set download.timeout 10" + would configure a 10 second timeout only for "pip download" commands. + + If none of --user, --global and --site are passed, a virtual + environment configuration file is used if one is active and the file + exists. Otherwise, all modifications happen to the user file by + default. + """ + + ignore_require_venv = True + usage = """ + %prog [] list + %prog [] [--editor ] edit + + %prog [] get command.option + %prog [] set command.option value + %prog [] unset command.option + %prog [] debug + """ + + def add_options(self) -> None: + self.cmd_opts.add_option( + "--editor", + dest="editor", + action="store", + default=None, + help=( + "Editor to use to edit the file. Uses VISUAL or EDITOR " + "environment variables if not provided." + ), + ) + + self.cmd_opts.add_option( + "--global", + dest="global_file", + action="store_true", + default=False, + help="Use the system-wide configuration file only", + ) + + self.cmd_opts.add_option( + "--user", + dest="user_file", + action="store_true", + default=False, + help="Use the user configuration file only", + ) + + self.cmd_opts.add_option( + "--site", + dest="site_file", + action="store_true", + default=False, + help="Use the current environment configuration file only", + ) + + self.parser.insert_option_group(0, self.cmd_opts) + + def handler_map(self) -> dict[str, Callable[[Values, list[str]], None]]: + return { + "list": self.list_values, + "edit": self.open_in_editor, + "get": self.get_name, + "set": self.set_name_value, + "unset": self.unset_name, + "debug": self.list_config_values, + } + + def run(self, options: Values, args: list[str]) -> int: + handler_map = self.handler_map() + + # Determine action + if not args or args[0] not in handler_map: + logger.error( + "Need an action (%s) to perform.", + ", ".join(sorted(handler_map)), + ) + return ERROR + + action = args[0] + + # Determine which configuration files are to be loaded + # Depends on whether the command is modifying. + try: + load_only = self._determine_file( + options, need_value=(action in ["get", "set", "unset", "edit"]) + ) + except PipError as e: + logger.error(e.args[0]) + return ERROR + + # Load a new configuration + self.configuration = Configuration( + isolated=options.isolated_mode, load_only=load_only + ) + self.configuration.load() + + # Error handling happens here, not in the action-handlers. + try: + handler_map[action](options, args[1:]) + except PipError as e: + logger.error(e.args[0]) + return ERROR + + return SUCCESS + + def _determine_file(self, options: Values, need_value: bool) -> Kind | None: + file_options = [ + key + for key, value in ( + (kinds.USER, options.user_file), + (kinds.GLOBAL, options.global_file), + (kinds.SITE, options.site_file), + ) + if value + ] + + if not file_options: + if not need_value: + return None + # Default to user, unless there's a site file. + elif any( + os.path.exists(site_config_file) + for site_config_file in get_configuration_files()[kinds.SITE] + ): + return kinds.SITE + else: + return kinds.USER + elif len(file_options) == 1: + return file_options[0] + + raise PipError( + "Need exactly one file to operate upon " + "(--user, --site, --global) to perform." + ) + + def list_values(self, options: Values, args: list[str]) -> None: + self._get_n_args(args, "list", n=0) + + for key, value in sorted(self.configuration.items()): + for key, value in sorted(value.items()): + write_output("%s=%r", key, value) + + def get_name(self, options: Values, args: list[str]) -> None: + key = self._get_n_args(args, "get [name]", n=1) + value = self.configuration.get_value(key) + + write_output("%s", value) + + def set_name_value(self, options: Values, args: list[str]) -> None: + key, value = self._get_n_args(args, "set [name] [value]", n=2) + self.configuration.set_value(key, value) + + self._save_configuration() + + def unset_name(self, options: Values, args: list[str]) -> None: + key = self._get_n_args(args, "unset [name]", n=1) + self.configuration.unset_value(key) + + self._save_configuration() + + def list_config_values(self, options: Values, args: list[str]) -> None: + """List config key-value pairs across different config files""" + self._get_n_args(args, "debug", n=0) + + self.print_env_var_values() + # Iterate over config files and print if they exist, and the + # key-value pairs present in them if they do + for variant, files in sorted(self.configuration.iter_config_files()): + write_output("%s:", variant) + for fname in files: + with indent_log(): + file_exists = os.path.exists(fname) + write_output("%s, exists: %r", fname, file_exists) + if file_exists: + self.print_config_file_values(variant, fname) + + def print_config_file_values(self, variant: Kind, fname: str) -> None: + """Get key-value pairs from the file of a variant""" + for name, value in self.configuration.get_values_in_config(variant).items(): + with indent_log(): + if name == fname: + for confname, confvalue in value.items(): + write_output("%s: %s", confname, confvalue) + + def print_env_var_values(self) -> None: + """Get key-values pairs present as environment variables""" + write_output("%s:", "env_var") + with indent_log(): + for key, value in sorted(self.configuration.get_environ_vars()): + env_var = f"PIP_{key.upper()}" + write_output("%s=%r", env_var, value) + + def open_in_editor(self, options: Values, args: list[str]) -> None: + editor = self._determine_editor(options) + + fname = self.configuration.get_file_to_edit() + if fname is None: + raise PipError("Could not determine appropriate file.") + elif '"' in fname: + # This shouldn't happen, unless we see a username like that. + # If that happens, we'd appreciate a pull request fixing this. + raise PipError( + f'Can not open an editor for a file name containing "\n{fname}' + ) + + try: + subprocess.check_call(f'{editor} "{fname}"', shell=True) + except FileNotFoundError as e: + if not e.filename: + e.filename = editor + raise + except subprocess.CalledProcessError as e: + raise PipError(f"Editor Subprocess exited with exit code {e.returncode}") + + def _get_n_args(self, args: list[str], example: str, n: int) -> Any: + """Helper to make sure the command got the right number of arguments""" + if len(args) != n: + msg = ( + f"Got unexpected number of arguments, expected {n}. " + f'(example: "{get_prog()} config {example}")' + ) + raise PipError(msg) + + if n == 1: + return args[0] + else: + return args + + def _save_configuration(self) -> None: + # We successfully ran a modifying command. Need to save the + # configuration. + try: + self.configuration.save() + except Exception: + logger.exception( + "Unable to save configuration. Please report this as a bug." + ) + raise PipError("Internal Error.") + + def _determine_editor(self, options: Values) -> str: + if options.editor is not None: + return options.editor + elif "VISUAL" in os.environ: + return os.environ["VISUAL"] + elif "EDITOR" in os.environ: + return os.environ["EDITOR"] + else: + raise PipError("Could not determine editor to use.") diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/commands/debug.py b/.venv/lib/python3.12/site-packages/pip/_internal/commands/debug.py new file mode 100644 index 0000000..0e187e7 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/commands/debug.py @@ -0,0 +1,203 @@ +from __future__ import annotations + +import locale +import logging +import os +import sys +from optparse import Values +from types import ModuleType +from typing import Any + +import pip._vendor +from pip._vendor.certifi import where +from pip._vendor.packaging.version import parse as parse_version + +from pip._internal.cli import cmdoptions +from pip._internal.cli.base_command import Command +from pip._internal.cli.cmdoptions import make_target_python +from pip._internal.cli.status_codes import SUCCESS +from pip._internal.configuration import Configuration +from pip._internal.metadata import get_environment +from pip._internal.utils.compat import open_text_resource +from pip._internal.utils.logging import indent_log +from pip._internal.utils.misc import get_pip_version + +logger = logging.getLogger(__name__) + + +def show_value(name: str, value: Any) -> None: + logger.info("%s: %s", name, value) + + +def show_sys_implementation() -> None: + logger.info("sys.implementation:") + implementation_name = sys.implementation.name + with indent_log(): + show_value("name", implementation_name) + + +def create_vendor_txt_map() -> dict[str, str]: + with open_text_resource("pip._vendor", "vendor.txt") as f: + # Purge non version specifying lines. + # Also, remove any space prefix or suffixes (including comments). + lines = [ + line.strip().split(" ", 1)[0] for line in f.readlines() if "==" in line + ] + + # Transform into "module" -> version dict. + return dict(line.split("==", 1) for line in lines) + + +def get_module_from_module_name(module_name: str) -> ModuleType | None: + # Module name can be uppercase in vendor.txt for some reason... + module_name = module_name.lower().replace("-", "_") + # PATCH: setuptools is actually only pkg_resources. + if module_name == "setuptools": + module_name = "pkg_resources" + + try: + __import__(f"pip._vendor.{module_name}", globals(), locals(), level=0) + return getattr(pip._vendor, module_name) + except ImportError: + # We allow 'truststore' to fail to import due + # to being unavailable on Python 3.9 and earlier. + if module_name == "truststore" and sys.version_info < (3, 10): + return None + raise + + +def get_vendor_version_from_module(module_name: str) -> str | None: + module = get_module_from_module_name(module_name) + version = getattr(module, "__version__", None) + + if module and not version: + # Try to find version in debundled module info. + assert module.__file__ is not None + env = get_environment([os.path.dirname(module.__file__)]) + dist = env.get_distribution(module_name) + if dist: + version = str(dist.version) + + return version + + +def show_actual_vendor_versions(vendor_txt_versions: dict[str, str]) -> None: + """Log the actual version and print extra info if there is + a conflict or if the actual version could not be imported. + """ + for module_name, expected_version in vendor_txt_versions.items(): + extra_message = "" + actual_version = get_vendor_version_from_module(module_name) + if not actual_version: + extra_message = ( + " (Unable to locate actual module version, using" + " vendor.txt specified version)" + ) + actual_version = expected_version + elif parse_version(actual_version) != parse_version(expected_version): + extra_message = ( + " (CONFLICT: vendor.txt suggests version should" + f" be {expected_version})" + ) + logger.info("%s==%s%s", module_name, actual_version, extra_message) + + +def show_vendor_versions() -> None: + logger.info("vendored library versions:") + + vendor_txt_versions = create_vendor_txt_map() + with indent_log(): + show_actual_vendor_versions(vendor_txt_versions) + + +def show_tags(options: Values) -> None: + tag_limit = 10 + + target_python = make_target_python(options) + tags = target_python.get_sorted_tags() + + # Display the target options that were explicitly provided. + formatted_target = target_python.format_given() + suffix = "" + if formatted_target: + suffix = f" (target: {formatted_target})" + + msg = f"Compatible tags: {len(tags)}{suffix}" + logger.info(msg) + + if options.verbose < 1 and len(tags) > tag_limit: + tags_limited = True + tags = tags[:tag_limit] + else: + tags_limited = False + + with indent_log(): + for tag in tags: + logger.info(str(tag)) + + if tags_limited: + msg = f"...\n[First {tag_limit} tags shown. Pass --verbose to show all.]" + logger.info(msg) + + +def ca_bundle_info(config: Configuration) -> str: + levels = {key.split(".", 1)[0] for key, _ in config.items()} + if not levels: + return "Not specified" + + levels_that_override_global = ["install", "wheel", "download"] + global_overriding_level = [ + level for level in levels if level in levels_that_override_global + ] + if not global_overriding_level: + return "global" + + if "global" in levels: + levels.remove("global") + return ", ".join(levels) + + +class DebugCommand(Command): + """ + Display debug information. + """ + + usage = """ + %prog """ + ignore_require_venv = True + + def add_options(self) -> None: + cmdoptions.add_target_python_options(self.cmd_opts) + self.parser.insert_option_group(0, self.cmd_opts) + self.parser.config.load() + + def run(self, options: Values, args: list[str]) -> int: + logger.warning( + "This command is only meant for debugging. " + "Do not use this with automation for parsing and getting these " + "details, since the output and options of this command may " + "change without notice." + ) + show_value("pip version", get_pip_version()) + show_value("sys.version", sys.version) + show_value("sys.executable", sys.executable) + show_value("sys.getdefaultencoding", sys.getdefaultencoding()) + show_value("sys.getfilesystemencoding", sys.getfilesystemencoding()) + show_value( + "locale.getpreferredencoding", + locale.getpreferredencoding(), + ) + show_value("sys.platform", sys.platform) + show_sys_implementation() + + show_value("'cert' config value", ca_bundle_info(self.parser.config)) + show_value("REQUESTS_CA_BUNDLE", os.environ.get("REQUESTS_CA_BUNDLE")) + show_value("CURL_CA_BUNDLE", os.environ.get("CURL_CA_BUNDLE")) + show_value("pip._vendor.certifi.where()", where()) + show_value("pip._vendor.DEBUNDLED", pip._vendor.DEBUNDLED) + + show_vendor_versions() + + show_tags(options) + + return SUCCESS diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/commands/download.py b/.venv/lib/python3.12/site-packages/pip/_internal/commands/download.py new file mode 100644 index 0000000..900fb40 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/commands/download.py @@ -0,0 +1,145 @@ +import logging +import os +from optparse import Values + +from pip._internal.cli import cmdoptions +from pip._internal.cli.cmdoptions import make_target_python +from pip._internal.cli.req_command import RequirementCommand, with_cleanup +from pip._internal.cli.status_codes import SUCCESS +from pip._internal.operations.build.build_tracker import get_build_tracker +from pip._internal.req.req_install import check_legacy_setup_py_options +from pip._internal.utils.misc import ensure_dir, normalize_path, write_output +from pip._internal.utils.temp_dir import TempDirectory + +logger = logging.getLogger(__name__) + + +class DownloadCommand(RequirementCommand): + """ + Download packages from: + + - PyPI (and other indexes) using requirement specifiers. + - VCS project urls. + - Local project directories. + - Local or remote source archives. + + pip also supports downloading from "requirements files", which provide + an easy way to specify a whole environment to be downloaded. + """ + + usage = """ + %prog [options] [package-index-options] ... + %prog [options] -r [package-index-options] ... + %prog [options] ... + %prog [options] ... + %prog [options] ...""" + + def add_options(self) -> None: + self.cmd_opts.add_option(cmdoptions.constraints()) + self.cmd_opts.add_option(cmdoptions.requirements()) + self.cmd_opts.add_option(cmdoptions.no_deps()) + self.cmd_opts.add_option(cmdoptions.global_options()) + self.cmd_opts.add_option(cmdoptions.no_binary()) + self.cmd_opts.add_option(cmdoptions.only_binary()) + self.cmd_opts.add_option(cmdoptions.prefer_binary()) + self.cmd_opts.add_option(cmdoptions.src()) + self.cmd_opts.add_option(cmdoptions.pre()) + self.cmd_opts.add_option(cmdoptions.require_hashes()) + self.cmd_opts.add_option(cmdoptions.progress_bar()) + self.cmd_opts.add_option(cmdoptions.no_build_isolation()) + self.cmd_opts.add_option(cmdoptions.use_pep517()) + self.cmd_opts.add_option(cmdoptions.no_use_pep517()) + self.cmd_opts.add_option(cmdoptions.check_build_deps()) + self.cmd_opts.add_option(cmdoptions.ignore_requires_python()) + + self.cmd_opts.add_option( + "-d", + "--dest", + "--destination-dir", + "--destination-directory", + dest="download_dir", + metavar="dir", + default=os.curdir, + help="Download packages into .", + ) + + cmdoptions.add_target_python_options(self.cmd_opts) + + index_opts = cmdoptions.make_option_group( + cmdoptions.index_group, + self.parser, + ) + + self.parser.insert_option_group(0, index_opts) + self.parser.insert_option_group(0, self.cmd_opts) + + @with_cleanup + def run(self, options: Values, args: list[str]) -> int: + options.ignore_installed = True + # editable doesn't really make sense for `pip download`, but the bowels + # of the RequirementSet code require that property. + options.editables = [] + + cmdoptions.check_dist_restriction(options) + + options.download_dir = normalize_path(options.download_dir) + ensure_dir(options.download_dir) + + session = self.get_default_session(options) + + target_python = make_target_python(options) + finder = self._build_package_finder( + options=options, + session=session, + target_python=target_python, + ignore_requires_python=options.ignore_requires_python, + ) + + build_tracker = self.enter_context(get_build_tracker()) + + directory = TempDirectory( + delete=not options.no_clean, + kind="download", + globally_managed=True, + ) + + reqs = self.get_requirements(args, options, finder, session) + check_legacy_setup_py_options(options, reqs) + + preparer = self.make_requirement_preparer( + temp_build_dir=directory, + options=options, + build_tracker=build_tracker, + session=session, + finder=finder, + download_dir=options.download_dir, + use_user_site=False, + verbosity=self.verbosity, + ) + + resolver = self.make_resolver( + preparer=preparer, + finder=finder, + options=options, + ignore_requires_python=options.ignore_requires_python, + use_pep517=options.use_pep517, + py_version_info=options.python_version, + ) + + self.trace_basic_info(finder) + + requirement_set = resolver.resolve(reqs, check_supported_wheels=True) + + downloaded: list[str] = [] + for req in requirement_set.requirements.values(): + if req.satisfied_by is None: + assert req.name is not None + preparer.save_linked_requirement(req) + downloaded.append(req.name) + + preparer.prepare_linked_requirements_more(requirement_set.requirements.values()) + + if downloaded: + write_output("Successfully downloaded %s", " ".join(downloaded)) + + return SUCCESS diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/commands/freeze.py b/.venv/lib/python3.12/site-packages/pip/_internal/commands/freeze.py new file mode 100644 index 0000000..7794857 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/commands/freeze.py @@ -0,0 +1,107 @@ +import sys +from optparse import Values + +from pip._internal.cli import cmdoptions +from pip._internal.cli.base_command import Command +from pip._internal.cli.status_codes import SUCCESS +from pip._internal.operations.freeze import freeze +from pip._internal.utils.compat import stdlib_pkgs + + +def _should_suppress_build_backends() -> bool: + return sys.version_info < (3, 12) + + +def _dev_pkgs() -> set[str]: + pkgs = {"pip"} + + if _should_suppress_build_backends(): + pkgs |= {"setuptools", "distribute", "wheel"} + + return pkgs + + +class FreezeCommand(Command): + """ + Output installed packages in requirements format. + + packages are listed in a case-insensitive sorted order. + """ + + ignore_require_venv = True + usage = """ + %prog [options]""" + + def add_options(self) -> None: + self.cmd_opts.add_option( + "-r", + "--requirement", + dest="requirements", + action="append", + default=[], + metavar="file", + help=( + "Use the order in the given requirements file and its " + "comments when generating output. This option can be " + "used multiple times." + ), + ) + self.cmd_opts.add_option( + "-l", + "--local", + dest="local", + action="store_true", + default=False, + help=( + "If in a virtualenv that has global access, do not output " + "globally-installed packages." + ), + ) + self.cmd_opts.add_option( + "--user", + dest="user", + action="store_true", + default=False, + help="Only output packages installed in user-site.", + ) + self.cmd_opts.add_option(cmdoptions.list_path()) + self.cmd_opts.add_option( + "--all", + dest="freeze_all", + action="store_true", + help=( + "Do not skip these packages in the output:" + " {}".format(", ".join(_dev_pkgs())) + ), + ) + self.cmd_opts.add_option( + "--exclude-editable", + dest="exclude_editable", + action="store_true", + help="Exclude editable package from output.", + ) + self.cmd_opts.add_option(cmdoptions.list_exclude()) + + self.parser.insert_option_group(0, self.cmd_opts) + + def run(self, options: Values, args: list[str]) -> int: + skip = set(stdlib_pkgs) + if not options.freeze_all: + skip.update(_dev_pkgs()) + + if options.excludes: + skip.update(options.excludes) + + cmdoptions.check_list_path_option(options) + + for line in freeze( + requirement=options.requirements, + local_only=options.local, + user_only=options.user, + paths=options.path, + isolated=options.isolated_mode, + skip=skip, + exclude_editable=options.exclude_editable, + ): + sys.stdout.write(line + "\n") + return SUCCESS diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/commands/hash.py b/.venv/lib/python3.12/site-packages/pip/_internal/commands/hash.py new file mode 100644 index 0000000..271a4c9 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/commands/hash.py @@ -0,0 +1,58 @@ +import hashlib +import logging +import sys +from optparse import Values + +from pip._internal.cli.base_command import Command +from pip._internal.cli.status_codes import ERROR, SUCCESS +from pip._internal.utils.hashes import FAVORITE_HASH, STRONG_HASHES +from pip._internal.utils.misc import read_chunks, write_output + +logger = logging.getLogger(__name__) + + +class HashCommand(Command): + """ + Compute a hash of a local package archive. + + These can be used with --hash in a requirements file to do repeatable + installs. + """ + + usage = "%prog [options] ..." + ignore_require_venv = True + + def add_options(self) -> None: + self.cmd_opts.add_option( + "-a", + "--algorithm", + dest="algorithm", + choices=STRONG_HASHES, + action="store", + default=FAVORITE_HASH, + help="The hash algorithm to use: one of {}".format( + ", ".join(STRONG_HASHES) + ), + ) + self.parser.insert_option_group(0, self.cmd_opts) + + def run(self, options: Values, args: list[str]) -> int: + if not args: + self.parser.print_usage(sys.stderr) + return ERROR + + algorithm = options.algorithm + for path in args: + write_output( + "%s:\n--hash=%s:%s", path, algorithm, _hash_of_file(path, algorithm) + ) + return SUCCESS + + +def _hash_of_file(path: str, algorithm: str) -> str: + """Return the hash digest of a file.""" + with open(path, "rb") as archive: + hash = hashlib.new(algorithm) + for chunk in read_chunks(archive): + hash.update(chunk) + return hash.hexdigest() diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/commands/help.py b/.venv/lib/python3.12/site-packages/pip/_internal/commands/help.py new file mode 100644 index 0000000..2ae658f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/commands/help.py @@ -0,0 +1,40 @@ +from optparse import Values + +from pip._internal.cli.base_command import Command +from pip._internal.cli.status_codes import SUCCESS +from pip._internal.exceptions import CommandError + + +class HelpCommand(Command): + """Show help for commands""" + + usage = """ + %prog """ + ignore_require_venv = True + + def run(self, options: Values, args: list[str]) -> int: + from pip._internal.commands import ( + commands_dict, + create_command, + get_similar_commands, + ) + + try: + # 'pip help' with no args is handled by pip.__init__.parseopt() + cmd_name = args[0] # the command we need help for + except IndexError: + return SUCCESS + + if cmd_name not in commands_dict: + guess = get_similar_commands(cmd_name) + + msg = [f'unknown command "{cmd_name}"'] + if guess: + msg.append(f'maybe you meant "{guess}"') + + raise CommandError(" - ".join(msg)) + + command = create_command(cmd_name) + command.parser.print_help() + + return SUCCESS diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/commands/index.py b/.venv/lib/python3.12/site-packages/pip/_internal/commands/index.py new file mode 100644 index 0000000..ecac998 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/commands/index.py @@ -0,0 +1,159 @@ +from __future__ import annotations + +import json +import logging +from collections.abc import Iterable +from optparse import Values +from typing import Any, Callable + +from pip._vendor.packaging.version import Version + +from pip._internal.cli import cmdoptions +from pip._internal.cli.req_command import IndexGroupCommand +from pip._internal.cli.status_codes import ERROR, SUCCESS +from pip._internal.commands.search import ( + get_installed_distribution, + print_dist_installation_info, +) +from pip._internal.exceptions import CommandError, DistributionNotFound, PipError +from pip._internal.index.collector import LinkCollector +from pip._internal.index.package_finder import PackageFinder +from pip._internal.models.selection_prefs import SelectionPreferences +from pip._internal.models.target_python import TargetPython +from pip._internal.network.session import PipSession +from pip._internal.utils.misc import write_output + +logger = logging.getLogger(__name__) + + +class IndexCommand(IndexGroupCommand): + """ + Inspect information available from package indexes. + """ + + ignore_require_venv = True + usage = """ + %prog versions + """ + + def add_options(self) -> None: + cmdoptions.add_target_python_options(self.cmd_opts) + + self.cmd_opts.add_option(cmdoptions.ignore_requires_python()) + self.cmd_opts.add_option(cmdoptions.pre()) + self.cmd_opts.add_option(cmdoptions.json()) + self.cmd_opts.add_option(cmdoptions.no_binary()) + self.cmd_opts.add_option(cmdoptions.only_binary()) + + index_opts = cmdoptions.make_option_group( + cmdoptions.index_group, + self.parser, + ) + + self.parser.insert_option_group(0, index_opts) + self.parser.insert_option_group(0, self.cmd_opts) + + def handler_map(self) -> dict[str, Callable[[Values, list[str]], None]]: + return { + "versions": self.get_available_package_versions, + } + + def run(self, options: Values, args: list[str]) -> int: + handler_map = self.handler_map() + + # Determine action + if not args or args[0] not in handler_map: + logger.error( + "Need an action (%s) to perform.", + ", ".join(sorted(handler_map)), + ) + return ERROR + + action = args[0] + + # Error handling happens here, not in the action-handlers. + try: + handler_map[action](options, args[1:]) + except PipError as e: + logger.error(e.args[0]) + return ERROR + + return SUCCESS + + def _build_package_finder( + self, + options: Values, + session: PipSession, + target_python: TargetPython | None = None, + ignore_requires_python: bool | None = None, + ) -> PackageFinder: + """ + Create a package finder appropriate to the index command. + """ + link_collector = LinkCollector.create(session, options=options) + + # Pass allow_yanked=False to ignore yanked versions. + selection_prefs = SelectionPreferences( + allow_yanked=False, + allow_all_prereleases=options.pre, + ignore_requires_python=ignore_requires_python, + ) + + return PackageFinder.create( + link_collector=link_collector, + selection_prefs=selection_prefs, + target_python=target_python, + ) + + def get_available_package_versions(self, options: Values, args: list[Any]) -> None: + if len(args) != 1: + raise CommandError("You need to specify exactly one argument") + + target_python = cmdoptions.make_target_python(options) + query = args[0] + + with self._build_session(options) as session: + finder = self._build_package_finder( + options=options, + session=session, + target_python=target_python, + ignore_requires_python=options.ignore_requires_python, + ) + + versions: Iterable[Version] = ( + candidate.version for candidate in finder.find_all_candidates(query) + ) + + if not options.pre: + # Remove prereleases + versions = ( + version for version in versions if not version.is_prerelease + ) + versions = set(versions) + + if not versions: + raise DistributionNotFound( + f"No matching distribution found for {query}" + ) + + formatted_versions = [str(ver) for ver in sorted(versions, reverse=True)] + latest = formatted_versions[0] + + dist = get_installed_distribution(query) + + if options.json: + structured_output = { + "name": query, + "versions": formatted_versions, + "latest": latest, + } + + if dist is not None: + structured_output["installed_version"] = str(dist.version) + + write_output(json.dumps(structured_output)) + + else: + write_output(f"{query} ({latest})") + write_output("Available versions: {}".format(", ".join(formatted_versions))) + print_dist_installation_info(latest, dist) diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/commands/inspect.py b/.venv/lib/python3.12/site-packages/pip/_internal/commands/inspect.py new file mode 100644 index 0000000..e262012 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/commands/inspect.py @@ -0,0 +1,92 @@ +import logging +from optparse import Values +from typing import Any + +from pip._vendor.packaging.markers import default_environment +from pip._vendor.rich import print_json + +from pip import __version__ +from pip._internal.cli import cmdoptions +from pip._internal.cli.base_command import Command +from pip._internal.cli.status_codes import SUCCESS +from pip._internal.metadata import BaseDistribution, get_environment +from pip._internal.utils.compat import stdlib_pkgs +from pip._internal.utils.urls import path_to_url + +logger = logging.getLogger(__name__) + + +class InspectCommand(Command): + """ + Inspect the content of a Python environment and produce a report in JSON format. + """ + + ignore_require_venv = True + usage = """ + %prog [options]""" + + def add_options(self) -> None: + self.cmd_opts.add_option( + "--local", + action="store_true", + default=False, + help=( + "If in a virtualenv that has global access, do not list " + "globally-installed packages." + ), + ) + self.cmd_opts.add_option( + "--user", + dest="user", + action="store_true", + default=False, + help="Only output packages installed in user-site.", + ) + self.cmd_opts.add_option(cmdoptions.list_path()) + self.parser.insert_option_group(0, self.cmd_opts) + + def run(self, options: Values, args: list[str]) -> int: + cmdoptions.check_list_path_option(options) + dists = get_environment(options.path).iter_installed_distributions( + local_only=options.local, + user_only=options.user, + skip=set(stdlib_pkgs), + ) + output = { + "version": "1", + "pip_version": __version__, + "installed": [self._dist_to_dict(dist) for dist in dists], + "environment": default_environment(), + # TODO tags? scheme? + } + print_json(data=output) + return SUCCESS + + def _dist_to_dict(self, dist: BaseDistribution) -> dict[str, Any]: + res: dict[str, Any] = { + "metadata": dist.metadata_dict, + "metadata_location": dist.info_location, + } + # direct_url. Note that we don't have download_info (as in the installation + # report) since it is not recorded in installed metadata. + direct_url = dist.direct_url + if direct_url is not None: + res["direct_url"] = direct_url.to_dict() + else: + # Emulate direct_url for legacy editable installs. + editable_project_location = dist.editable_project_location + if editable_project_location is not None: + res["direct_url"] = { + "url": path_to_url(editable_project_location), + "dir_info": { + "editable": True, + }, + } + # installer + installer = dist.installer + if dist.installer: + res["installer"] = installer + # requested + if dist.installed_with_dist_info: + res["requested"] = dist.requested + return res diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/commands/install.py b/.venv/lib/python3.12/site-packages/pip/_internal/commands/install.py new file mode 100644 index 0000000..1ef7a0f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/commands/install.py @@ -0,0 +1,798 @@ +from __future__ import annotations + +import errno +import json +import operator +import os +import shutil +import site +from optparse import SUPPRESS_HELP, Values +from pathlib import Path + +from pip._vendor.packaging.utils import canonicalize_name +from pip._vendor.requests.exceptions import InvalidProxyURL +from pip._vendor.rich import print_json + +# Eagerly import self_outdated_check to avoid crashes. Otherwise, +# this module would be imported *after* pip was replaced, resulting +# in crashes if the new self_outdated_check module was incompatible +# with the rest of pip that's already imported, or allowing a +# wheel to execute arbitrary code on install by replacing +# self_outdated_check. +import pip._internal.self_outdated_check # noqa: F401 +from pip._internal.cache import WheelCache +from pip._internal.cli import cmdoptions +from pip._internal.cli.cmdoptions import make_target_python +from pip._internal.cli.req_command import ( + RequirementCommand, + with_cleanup, +) +from pip._internal.cli.status_codes import ERROR, SUCCESS +from pip._internal.exceptions import ( + CommandError, + InstallationError, + InstallWheelBuildError, +) +from pip._internal.locations import get_scheme +from pip._internal.metadata import get_environment +from pip._internal.models.installation_report import InstallationReport +from pip._internal.operations.build.build_tracker import get_build_tracker +from pip._internal.operations.check import ConflictDetails, check_install_conflicts +from pip._internal.req import install_given_reqs +from pip._internal.req.req_install import ( + InstallRequirement, + check_legacy_setup_py_options, +) +from pip._internal.utils.compat import WINDOWS +from pip._internal.utils.filesystem import test_writable_dir +from pip._internal.utils.logging import getLogger +from pip._internal.utils.misc import ( + check_externally_managed, + ensure_dir, + get_pip_version, + protect_pip_from_modification_on_windows, + warn_if_run_as_root, + write_output, +) +from pip._internal.utils.temp_dir import TempDirectory +from pip._internal.utils.virtualenv import ( + running_under_virtualenv, + virtualenv_no_global, +) +from pip._internal.wheel_builder import build, should_build_for_install_command + +logger = getLogger(__name__) + + +class InstallCommand(RequirementCommand): + """ + Install packages from: + + - PyPI (and other indexes) using requirement specifiers. + - VCS project urls. + - Local project directories. + - Local or remote source archives. + + pip also supports installing from "requirements files", which provide + an easy way to specify a whole environment to be installed. + """ + + usage = """ + %prog [options] [package-index-options] ... + %prog [options] -r [package-index-options] ... + %prog [options] [-e] ... + %prog [options] [-e] ... + %prog [options] ...""" + + def add_options(self) -> None: + self.cmd_opts.add_option(cmdoptions.requirements()) + self.cmd_opts.add_option(cmdoptions.constraints()) + self.cmd_opts.add_option(cmdoptions.no_deps()) + self.cmd_opts.add_option(cmdoptions.pre()) + + self.cmd_opts.add_option(cmdoptions.editable()) + self.cmd_opts.add_option( + "--dry-run", + action="store_true", + dest="dry_run", + default=False, + help=( + "Don't actually install anything, just print what would be. " + "Can be used in combination with --ignore-installed " + "to 'resolve' the requirements." + ), + ) + self.cmd_opts.add_option( + "-t", + "--target", + dest="target_dir", + metavar="dir", + default=None, + help=( + "Install packages into . " + "By default this will not replace existing files/folders in " + ". Use --upgrade to replace existing packages in " + "with new versions." + ), + ) + cmdoptions.add_target_python_options(self.cmd_opts) + + self.cmd_opts.add_option( + "--user", + dest="use_user_site", + action="store_true", + help=( + "Install to the Python user install directory for your " + "platform. Typically ~/.local/, or %APPDATA%\\Python on " + "Windows. (See the Python documentation for site.USER_BASE " + "for full details.)" + ), + ) + self.cmd_opts.add_option( + "--no-user", + dest="use_user_site", + action="store_false", + help=SUPPRESS_HELP, + ) + self.cmd_opts.add_option( + "--root", + dest="root_path", + metavar="dir", + default=None, + help="Install everything relative to this alternate root directory.", + ) + self.cmd_opts.add_option( + "--prefix", + dest="prefix_path", + metavar="dir", + default=None, + help=( + "Installation prefix where lib, bin and other top-level " + "folders are placed. Note that the resulting installation may " + "contain scripts and other resources which reference the " + "Python interpreter of pip, and not that of ``--prefix``. " + "See also the ``--python`` option if the intention is to " + "install packages into another (possibly pip-free) " + "environment." + ), + ) + + self.cmd_opts.add_option(cmdoptions.src()) + + self.cmd_opts.add_option( + "-U", + "--upgrade", + dest="upgrade", + action="store_true", + help=( + "Upgrade all specified packages to the newest available " + "version. The handling of dependencies depends on the " + "upgrade-strategy used." + ), + ) + + self.cmd_opts.add_option( + "--upgrade-strategy", + dest="upgrade_strategy", + default="only-if-needed", + choices=["only-if-needed", "eager"], + help=( + "Determines how dependency upgrading should be handled " + "[default: %default]. " + '"eager" - dependencies are upgraded regardless of ' + "whether the currently installed version satisfies the " + "requirements of the upgraded package(s). " + '"only-if-needed" - are upgraded only when they do not ' + "satisfy the requirements of the upgraded package(s)." + ), + ) + + self.cmd_opts.add_option( + "--force-reinstall", + dest="force_reinstall", + action="store_true", + help="Reinstall all packages even if they are already up-to-date.", + ) + + self.cmd_opts.add_option( + "-I", + "--ignore-installed", + dest="ignore_installed", + action="store_true", + help=( + "Ignore the installed packages, overwriting them. " + "This can break your system if the existing package " + "is of a different version or was installed " + "with a different package manager!" + ), + ) + + self.cmd_opts.add_option(cmdoptions.ignore_requires_python()) + self.cmd_opts.add_option(cmdoptions.no_build_isolation()) + self.cmd_opts.add_option(cmdoptions.use_pep517()) + self.cmd_opts.add_option(cmdoptions.no_use_pep517()) + self.cmd_opts.add_option(cmdoptions.check_build_deps()) + self.cmd_opts.add_option(cmdoptions.override_externally_managed()) + + self.cmd_opts.add_option(cmdoptions.config_settings()) + self.cmd_opts.add_option(cmdoptions.global_options()) + + self.cmd_opts.add_option( + "--compile", + action="store_true", + dest="compile", + default=True, + help="Compile Python source files to bytecode", + ) + + self.cmd_opts.add_option( + "--no-compile", + action="store_false", + dest="compile", + help="Do not compile Python source files to bytecode", + ) + + self.cmd_opts.add_option( + "--no-warn-script-location", + action="store_false", + dest="warn_script_location", + default=True, + help="Do not warn when installing scripts outside PATH", + ) + self.cmd_opts.add_option( + "--no-warn-conflicts", + action="store_false", + dest="warn_about_conflicts", + default=True, + help="Do not warn about broken dependencies", + ) + self.cmd_opts.add_option(cmdoptions.no_binary()) + self.cmd_opts.add_option(cmdoptions.only_binary()) + self.cmd_opts.add_option(cmdoptions.prefer_binary()) + self.cmd_opts.add_option(cmdoptions.require_hashes()) + self.cmd_opts.add_option(cmdoptions.progress_bar()) + self.cmd_opts.add_option(cmdoptions.root_user_action()) + + index_opts = cmdoptions.make_option_group( + cmdoptions.index_group, + self.parser, + ) + + self.parser.insert_option_group(0, index_opts) + self.parser.insert_option_group(0, self.cmd_opts) + + self.cmd_opts.add_option( + "--report", + dest="json_report_file", + metavar="file", + default=None, + help=( + "Generate a JSON file describing what pip did to install " + "the provided requirements. " + "Can be used in combination with --dry-run and --ignore-installed " + "to 'resolve' the requirements. " + "When - is used as file name it writes to stdout. " + "When writing to stdout, please combine with the --quiet option " + "to avoid mixing pip logging output with JSON output." + ), + ) + + @with_cleanup + def run(self, options: Values, args: list[str]) -> int: + if options.use_user_site and options.target_dir is not None: + raise CommandError("Can not combine '--user' and '--target'") + + # Check whether the environment we're installing into is externally + # managed, as specified in PEP 668. Specifying --root, --target, or + # --prefix disables the check, since there's no reliable way to locate + # the EXTERNALLY-MANAGED file for those cases. An exception is also + # made specifically for "--dry-run --report" for convenience. + installing_into_current_environment = ( + not (options.dry_run and options.json_report_file) + and options.root_path is None + and options.target_dir is None + and options.prefix_path is None + ) + if ( + installing_into_current_environment + and not options.override_externally_managed + ): + check_externally_managed() + + upgrade_strategy = "to-satisfy-only" + if options.upgrade: + upgrade_strategy = options.upgrade_strategy + + cmdoptions.check_dist_restriction(options, check_target=True) + + logger.verbose("Using %s", get_pip_version()) + options.use_user_site = decide_user_install( + options.use_user_site, + prefix_path=options.prefix_path, + target_dir=options.target_dir, + root_path=options.root_path, + isolated_mode=options.isolated_mode, + ) + + target_temp_dir: TempDirectory | None = None + target_temp_dir_path: str | None = None + if options.target_dir: + options.ignore_installed = True + options.target_dir = os.path.abspath(options.target_dir) + if ( + # fmt: off + os.path.exists(options.target_dir) and + not os.path.isdir(options.target_dir) + # fmt: on + ): + raise CommandError( + "Target path exists but is not a directory, will not continue." + ) + + # Create a target directory for using with the target option + target_temp_dir = TempDirectory(kind="target") + target_temp_dir_path = target_temp_dir.path + self.enter_context(target_temp_dir) + + global_options = options.global_options or [] + + session = self.get_default_session(options) + + target_python = make_target_python(options) + finder = self._build_package_finder( + options=options, + session=session, + target_python=target_python, + ignore_requires_python=options.ignore_requires_python, + ) + build_tracker = self.enter_context(get_build_tracker()) + + directory = TempDirectory( + delete=not options.no_clean, + kind="install", + globally_managed=True, + ) + + try: + reqs = self.get_requirements(args, options, finder, session) + check_legacy_setup_py_options(options, reqs) + + wheel_cache = WheelCache(options.cache_dir) + + # Only when installing is it permitted to use PEP 660. + # In other circumstances (pip wheel, pip download) we generate + # regular (i.e. non editable) metadata and wheels. + for req in reqs: + req.permit_editable_wheels = True + + preparer = self.make_requirement_preparer( + temp_build_dir=directory, + options=options, + build_tracker=build_tracker, + session=session, + finder=finder, + use_user_site=options.use_user_site, + verbosity=self.verbosity, + ) + resolver = self.make_resolver( + preparer=preparer, + finder=finder, + options=options, + wheel_cache=wheel_cache, + use_user_site=options.use_user_site, + ignore_installed=options.ignore_installed, + ignore_requires_python=options.ignore_requires_python, + force_reinstall=options.force_reinstall, + upgrade_strategy=upgrade_strategy, + use_pep517=options.use_pep517, + py_version_info=options.python_version, + ) + + self.trace_basic_info(finder) + + requirement_set = resolver.resolve( + reqs, check_supported_wheels=not options.target_dir + ) + + if options.json_report_file: + report = InstallationReport(requirement_set.requirements_to_install) + if options.json_report_file == "-": + print_json(data=report.to_dict()) + else: + with open(options.json_report_file, "w", encoding="utf-8") as f: + json.dump(report.to_dict(), f, indent=2, ensure_ascii=False) + + if options.dry_run: + would_install_items = sorted( + (r.metadata["name"], r.metadata["version"]) + for r in requirement_set.requirements_to_install + ) + if would_install_items: + write_output( + "Would install %s", + " ".join("-".join(item) for item in would_install_items), + ) + return SUCCESS + + try: + pip_req = requirement_set.get_requirement("pip") + except KeyError: + modifying_pip = False + else: + # If we're not replacing an already installed pip, + # we're not modifying it. + modifying_pip = pip_req.satisfied_by is None + protect_pip_from_modification_on_windows(modifying_pip=modifying_pip) + + reqs_to_build = [ + r + for r in requirement_set.requirements_to_install + if should_build_for_install_command(r) + ] + + _, build_failures = build( + reqs_to_build, + wheel_cache=wheel_cache, + verify=True, + build_options=[], + global_options=global_options, + ) + + if build_failures: + raise InstallWheelBuildError(build_failures) + + to_install = resolver.get_installation_order(requirement_set) + + # Check for conflicts in the package set we're installing. + conflicts: ConflictDetails | None = None + should_warn_about_conflicts = ( + not options.ignore_dependencies and options.warn_about_conflicts + ) + if should_warn_about_conflicts: + conflicts = self._determine_conflicts(to_install) + + # Don't warn about script install locations if + # --target or --prefix has been specified + warn_script_location = options.warn_script_location + if options.target_dir or options.prefix_path: + warn_script_location = False + + installed = install_given_reqs( + to_install, + global_options, + root=options.root_path, + home=target_temp_dir_path, + prefix=options.prefix_path, + warn_script_location=warn_script_location, + use_user_site=options.use_user_site, + pycompile=options.compile, + progress_bar=options.progress_bar, + ) + + lib_locations = get_lib_location_guesses( + user=options.use_user_site, + home=target_temp_dir_path, + root=options.root_path, + prefix=options.prefix_path, + isolated=options.isolated_mode, + ) + env = get_environment(lib_locations) + + # Display a summary of installed packages, with extra care to + # display a package name as it was requested by the user. + installed.sort(key=operator.attrgetter("name")) + summary = [] + installed_versions = {} + for distribution in env.iter_all_distributions(): + installed_versions[distribution.canonical_name] = distribution.version + for package in installed: + display_name = package.name + version = installed_versions.get(canonicalize_name(display_name), None) + if version: + text = f"{display_name}-{version}" + else: + text = display_name + summary.append(text) + + if conflicts is not None: + self._warn_about_conflicts( + conflicts, + resolver_variant=self.determine_resolver_variant(options), + ) + + installed_desc = " ".join(summary) + if installed_desc: + write_output( + "Successfully installed %s", + installed_desc, + ) + except OSError as error: + show_traceback = self.verbosity >= 1 + + message = create_os_error_message( + error, + show_traceback, + options.use_user_site, + ) + logger.error(message, exc_info=show_traceback) + + return ERROR + + if options.target_dir: + assert target_temp_dir + self._handle_target_dir( + options.target_dir, target_temp_dir, options.upgrade + ) + if options.root_user_action == "warn": + warn_if_run_as_root() + return SUCCESS + + def _handle_target_dir( + self, target_dir: str, target_temp_dir: TempDirectory, upgrade: bool + ) -> None: + ensure_dir(target_dir) + + # Checking both purelib and platlib directories for installed + # packages to be moved to target directory + lib_dir_list = [] + + # Checking both purelib and platlib directories for installed + # packages to be moved to target directory + scheme = get_scheme("", home=target_temp_dir.path) + purelib_dir = scheme.purelib + platlib_dir = scheme.platlib + data_dir = scheme.data + + if os.path.exists(purelib_dir): + lib_dir_list.append(purelib_dir) + if os.path.exists(platlib_dir) and platlib_dir != purelib_dir: + lib_dir_list.append(platlib_dir) + if os.path.exists(data_dir): + lib_dir_list.append(data_dir) + + for lib_dir in lib_dir_list: + for item in os.listdir(lib_dir): + if lib_dir == data_dir: + ddir = os.path.join(data_dir, item) + if any(s.startswith(ddir) for s in lib_dir_list[:-1]): + continue + target_item_dir = os.path.join(target_dir, item) + if os.path.exists(target_item_dir): + if not upgrade: + logger.warning( + "Target directory %s already exists. Specify " + "--upgrade to force replacement.", + target_item_dir, + ) + continue + if os.path.islink(target_item_dir): + logger.warning( + "Target directory %s already exists and is " + "a link. pip will not automatically replace " + "links, please remove if replacement is " + "desired.", + target_item_dir, + ) + continue + if os.path.isdir(target_item_dir): + shutil.rmtree(target_item_dir) + else: + os.remove(target_item_dir) + + shutil.move(os.path.join(lib_dir, item), target_item_dir) + + def _determine_conflicts( + self, to_install: list[InstallRequirement] + ) -> ConflictDetails | None: + try: + return check_install_conflicts(to_install) + except Exception: + logger.exception( + "Error while checking for conflicts. Please file an issue on " + "pip's issue tracker: https://github.com/pypa/pip/issues/new" + ) + return None + + def _warn_about_conflicts( + self, conflict_details: ConflictDetails, resolver_variant: str + ) -> None: + package_set, (missing, conflicting) = conflict_details + if not missing and not conflicting: + return + + parts: list[str] = [] + if resolver_variant == "legacy": + parts.append( + "pip's legacy dependency resolver does not consider dependency " + "conflicts when selecting packages. This behaviour is the " + "source of the following dependency conflicts." + ) + else: + assert resolver_variant == "resolvelib" + parts.append( + "pip's dependency resolver does not currently take into account " + "all the packages that are installed. This behaviour is the " + "source of the following dependency conflicts." + ) + + # NOTE: There is some duplication here, with commands/check.py + for project_name in missing: + version = package_set[project_name][0] + for dependency in missing[project_name]: + message = ( + f"{project_name} {version} requires {dependency[1]}, " + "which is not installed." + ) + parts.append(message) + + for project_name in conflicting: + version = package_set[project_name][0] + for dep_name, dep_version, req in conflicting[project_name]: + message = ( + "{name} {version} requires {requirement}, but {you} have " + "{dep_name} {dep_version} which is incompatible." + ).format( + name=project_name, + version=version, + requirement=req, + dep_name=dep_name, + dep_version=dep_version, + you=("you" if resolver_variant == "resolvelib" else "you'll"), + ) + parts.append(message) + + logger.critical("\n".join(parts)) + + +def get_lib_location_guesses( + user: bool = False, + home: str | None = None, + root: str | None = None, + isolated: bool = False, + prefix: str | None = None, +) -> list[str]: + scheme = get_scheme( + "", + user=user, + home=home, + root=root, + isolated=isolated, + prefix=prefix, + ) + return [scheme.purelib, scheme.platlib] + + +def site_packages_writable(root: str | None, isolated: bool) -> bool: + return all( + test_writable_dir(d) + for d in set(get_lib_location_guesses(root=root, isolated=isolated)) + ) + + +def decide_user_install( + use_user_site: bool | None, + prefix_path: str | None = None, + target_dir: str | None = None, + root_path: str | None = None, + isolated_mode: bool = False, +) -> bool: + """Determine whether to do a user install based on the input options. + + If use_user_site is False, no additional checks are done. + If use_user_site is True, it is checked for compatibility with other + options. + If use_user_site is None, the default behaviour depends on the environment, + which is provided by the other arguments. + """ + # In some cases (config from tox), use_user_site can be set to an integer + # rather than a bool, which 'use_user_site is False' wouldn't catch. + if (use_user_site is not None) and (not use_user_site): + logger.debug("Non-user install by explicit request") + return False + + if use_user_site: + if prefix_path: + raise CommandError( + "Can not combine '--user' and '--prefix' as they imply " + "different installation locations" + ) + if virtualenv_no_global(): + raise InstallationError( + "Can not perform a '--user' install. User site-packages " + "are not visible in this virtualenv." + ) + logger.debug("User install by explicit request") + return True + + # If we are here, user installs have not been explicitly requested/avoided + assert use_user_site is None + + # user install incompatible with --prefix/--target + if prefix_path or target_dir: + logger.debug("Non-user install due to --prefix or --target option") + return False + + # If user installs are not enabled, choose a non-user install + if not site.ENABLE_USER_SITE: + logger.debug("Non-user install because user site-packages disabled") + return False + + # If we have permission for a non-user install, do that, + # otherwise do a user install. + if site_packages_writable(root=root_path, isolated=isolated_mode): + logger.debug("Non-user install because site-packages writeable") + return False + + logger.info( + "Defaulting to user installation because normal site-packages " + "is not writeable" + ) + return True + + +def create_os_error_message( + error: OSError, show_traceback: bool, using_user_site: bool +) -> str: + """Format an error message for an OSError + + It may occur anytime during the execution of the install command. + """ + parts = [] + + # Mention the error if we are not going to show a traceback + parts.append("Could not install packages due to an OSError") + if not show_traceback: + parts.append(": ") + parts.append(str(error)) + else: + parts.append(".") + + # Spilt the error indication from a helper message (if any) + parts[-1] += "\n" + + # Suggest useful actions to the user: + # (1) using user site-packages or (2) verifying the permissions + if error.errno == errno.EACCES: + user_option_part = "Consider using the `--user` option" + permissions_part = "Check the permissions" + + if not running_under_virtualenv() and not using_user_site: + parts.extend( + [ + user_option_part, + " or ", + permissions_part.lower(), + ] + ) + else: + parts.append(permissions_part) + parts.append(".\n") + + # Suggest to check "pip config debug" in case of invalid proxy + if type(error) is InvalidProxyURL: + parts.append( + 'Consider checking your local proxy configuration with "pip config debug"' + ) + parts.append(".\n") + + # On Windows, errors like EINVAL or ENOENT may occur + # if a file or folder name exceeds 255 characters, + # or if the full path exceeds 260 characters and long path support isn't enabled. + # This condition checks for such cases and adds a hint to the error output. + + if WINDOWS and error.errno in (errno.EINVAL, errno.ENOENT) and error.filename: + if any(len(part) > 255 for part in Path(error.filename).parts): + parts.append( + "HINT: This error might be caused by a file or folder name exceeding " + "255 characters, which is a Windows limitation even if long paths " + "are enabled.\n " + ) + if len(error.filename) > 260: + parts.append( + "HINT: This error might have occurred since " + "this system does not have Windows Long Path " + "support enabled. You can find information on " + "how to enable this at " + "https://pip.pypa.io/warnings/enable-long-paths\n" + ) + return "".join(parts).strip() + "\n" diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/commands/list.py b/.venv/lib/python3.12/site-packages/pip/_internal/commands/list.py new file mode 100644 index 0000000..ad27e45 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/commands/list.py @@ -0,0 +1,400 @@ +from __future__ import annotations + +import json +import logging +from collections.abc import Generator, Sequence +from email.parser import Parser +from optparse import Values +from typing import TYPE_CHECKING, cast + +from pip._vendor.packaging.utils import canonicalize_name +from pip._vendor.packaging.version import InvalidVersion, Version + +from pip._internal.cli import cmdoptions +from pip._internal.cli.index_command import IndexGroupCommand +from pip._internal.cli.status_codes import SUCCESS +from pip._internal.exceptions import CommandError +from pip._internal.metadata import BaseDistribution, get_environment +from pip._internal.models.selection_prefs import SelectionPreferences +from pip._internal.utils.compat import stdlib_pkgs +from pip._internal.utils.misc import tabulate, write_output + +if TYPE_CHECKING: + from pip._internal.index.package_finder import PackageFinder + from pip._internal.network.session import PipSession + + class _DistWithLatestInfo(BaseDistribution): + """Give the distribution object a couple of extra fields. + + These will be populated during ``get_outdated()``. This is dirty but + makes the rest of the code much cleaner. + """ + + latest_version: Version + latest_filetype: str + + _ProcessedDists = Sequence[_DistWithLatestInfo] + + +logger = logging.getLogger(__name__) + + +class ListCommand(IndexGroupCommand): + """ + List installed packages, including editables. + + Packages are listed in a case-insensitive sorted order. + """ + + ignore_require_venv = True + usage = """ + %prog [options]""" + + def add_options(self) -> None: + self.cmd_opts.add_option( + "-o", + "--outdated", + action="store_true", + default=False, + help="List outdated packages", + ) + self.cmd_opts.add_option( + "-u", + "--uptodate", + action="store_true", + default=False, + help="List uptodate packages", + ) + self.cmd_opts.add_option( + "-e", + "--editable", + action="store_true", + default=False, + help="List editable projects.", + ) + self.cmd_opts.add_option( + "-l", + "--local", + action="store_true", + default=False, + help=( + "If in a virtualenv that has global access, do not list " + "globally-installed packages." + ), + ) + self.cmd_opts.add_option( + "--user", + dest="user", + action="store_true", + default=False, + help="Only output packages installed in user-site.", + ) + self.cmd_opts.add_option(cmdoptions.list_path()) + self.cmd_opts.add_option( + "--pre", + action="store_true", + default=False, + help=( + "Include pre-release and development versions. By default, " + "pip only finds stable versions." + ), + ) + + self.cmd_opts.add_option( + "--format", + action="store", + dest="list_format", + default="columns", + choices=("columns", "freeze", "json"), + help=( + "Select the output format among: columns (default), freeze, or json. " + "The 'freeze' format cannot be used with the --outdated option." + ), + ) + + self.cmd_opts.add_option( + "--not-required", + action="store_true", + dest="not_required", + help="List packages that are not dependencies of installed packages.", + ) + + self.cmd_opts.add_option( + "--exclude-editable", + action="store_false", + dest="include_editable", + help="Exclude editable package from output.", + ) + self.cmd_opts.add_option( + "--include-editable", + action="store_true", + dest="include_editable", + help="Include editable package in output.", + default=True, + ) + self.cmd_opts.add_option(cmdoptions.list_exclude()) + index_opts = cmdoptions.make_option_group(cmdoptions.index_group, self.parser) + + self.parser.insert_option_group(0, index_opts) + self.parser.insert_option_group(0, self.cmd_opts) + + def handle_pip_version_check(self, options: Values) -> None: + if options.outdated or options.uptodate: + super().handle_pip_version_check(options) + + def _build_package_finder( + self, options: Values, session: PipSession + ) -> PackageFinder: + """ + Create a package finder appropriate to this list command. + """ + # Lazy import the heavy index modules as most list invocations won't need 'em. + from pip._internal.index.collector import LinkCollector + from pip._internal.index.package_finder import PackageFinder + + link_collector = LinkCollector.create(session, options=options) + + # Pass allow_yanked=False to ignore yanked versions. + selection_prefs = SelectionPreferences( + allow_yanked=False, + allow_all_prereleases=options.pre, + ) + + return PackageFinder.create( + link_collector=link_collector, + selection_prefs=selection_prefs, + ) + + def run(self, options: Values, args: list[str]) -> int: + if options.outdated and options.uptodate: + raise CommandError("Options --outdated and --uptodate cannot be combined.") + + if options.outdated and options.list_format == "freeze": + raise CommandError( + "List format 'freeze' cannot be used with the --outdated option." + ) + + cmdoptions.check_list_path_option(options) + + skip = set(stdlib_pkgs) + if options.excludes: + skip.update(canonicalize_name(n) for n in options.excludes) + + packages: _ProcessedDists = [ + cast("_DistWithLatestInfo", d) + for d in get_environment(options.path).iter_installed_distributions( + local_only=options.local, + user_only=options.user, + editables_only=options.editable, + include_editables=options.include_editable, + skip=skip, + ) + ] + + # get_not_required must be called firstly in order to find and + # filter out all dependencies correctly. Otherwise a package + # can't be identified as requirement because some parent packages + # could be filtered out before. + if options.not_required: + packages = self.get_not_required(packages, options) + + if options.outdated: + packages = self.get_outdated(packages, options) + elif options.uptodate: + packages = self.get_uptodate(packages, options) + + self.output_package_listing(packages, options) + return SUCCESS + + def get_outdated( + self, packages: _ProcessedDists, options: Values + ) -> _ProcessedDists: + return [ + dist + for dist in self.iter_packages_latest_infos(packages, options) + if dist.latest_version > dist.version + ] + + def get_uptodate( + self, packages: _ProcessedDists, options: Values + ) -> _ProcessedDists: + return [ + dist + for dist in self.iter_packages_latest_infos(packages, options) + if dist.latest_version == dist.version + ] + + def get_not_required( + self, packages: _ProcessedDists, options: Values + ) -> _ProcessedDists: + dep_keys = { + canonicalize_name(dep.name) + for dist in packages + for dep in (dist.iter_dependencies() or ()) + } + + # Create a set to remove duplicate packages, and cast it to a list + # to keep the return type consistent with get_outdated and + # get_uptodate + return list({pkg for pkg in packages if pkg.canonical_name not in dep_keys}) + + def iter_packages_latest_infos( + self, packages: _ProcessedDists, options: Values + ) -> Generator[_DistWithLatestInfo, None, None]: + with self._build_session(options) as session: + finder = self._build_package_finder(options, session) + + def latest_info( + dist: _DistWithLatestInfo, + ) -> _DistWithLatestInfo | None: + all_candidates = finder.find_all_candidates(dist.canonical_name) + if not options.pre: + # Remove prereleases + all_candidates = [ + candidate + for candidate in all_candidates + if not candidate.version.is_prerelease + ] + + evaluator = finder.make_candidate_evaluator( + project_name=dist.canonical_name, + ) + best_candidate = evaluator.sort_best_candidate(all_candidates) + if best_candidate is None: + return None + + remote_version = best_candidate.version + if best_candidate.link.is_wheel: + typ = "wheel" + else: + typ = "sdist" + dist.latest_version = remote_version + dist.latest_filetype = typ + return dist + + for dist in map(latest_info, packages): + if dist is not None: + yield dist + + def output_package_listing( + self, packages: _ProcessedDists, options: Values + ) -> None: + packages = sorted( + packages, + key=lambda dist: dist.canonical_name, + ) + if options.list_format == "columns" and packages: + data, header = format_for_columns(packages, options) + self.output_package_listing_columns(data, header) + elif options.list_format == "freeze": + for dist in packages: + try: + req_string = f"{dist.raw_name}=={dist.version}" + except InvalidVersion: + req_string = f"{dist.raw_name}==={dist.raw_version}" + if options.verbose >= 1: + write_output("%s (%s)", req_string, dist.location) + else: + write_output(req_string) + elif options.list_format == "json": + write_output(format_for_json(packages, options)) + + def output_package_listing_columns( + self, data: list[list[str]], header: list[str] + ) -> None: + # insert the header first: we need to know the size of column names + if len(data) > 0: + data.insert(0, header) + + pkg_strings, sizes = tabulate(data) + + # Create and add a separator. + if len(data) > 0: + pkg_strings.insert(1, " ".join("-" * x for x in sizes)) + + for val in pkg_strings: + write_output(val) + + +def format_for_columns( + pkgs: _ProcessedDists, options: Values +) -> tuple[list[list[str]], list[str]]: + """ + Convert the package data into something usable + by output_package_listing_columns. + """ + header = ["Package", "Version"] + + running_outdated = options.outdated + if running_outdated: + header.extend(["Latest", "Type"]) + + def wheel_build_tag(dist: BaseDistribution) -> str | None: + try: + wheel_file = dist.read_text("WHEEL") + except FileNotFoundError: + return None + return Parser().parsestr(wheel_file).get("Build") + + build_tags = [wheel_build_tag(p) for p in pkgs] + has_build_tags = any(build_tags) + if has_build_tags: + header.append("Build") + + if options.verbose >= 1: + header.append("Location") + if options.verbose >= 1: + header.append("Installer") + + has_editables = any(x.editable for x in pkgs) + if has_editables: + header.append("Editable project location") + + data = [] + for i, proj in enumerate(pkgs): + # if we're working on the 'outdated' list, separate out the + # latest_version and type + row = [proj.raw_name, proj.raw_version] + + if running_outdated: + row.append(str(proj.latest_version)) + row.append(proj.latest_filetype) + + if has_build_tags: + row.append(build_tags[i] or "") + + if has_editables: + row.append(proj.editable_project_location or "") + + if options.verbose >= 1: + row.append(proj.location or "") + if options.verbose >= 1: + row.append(proj.installer) + + data.append(row) + + return data, header + + +def format_for_json(packages: _ProcessedDists, options: Values) -> str: + data = [] + for dist in packages: + try: + version = str(dist.version) + except InvalidVersion: + version = dist.raw_version + info = { + "name": dist.raw_name, + "version": version, + } + if options.verbose >= 1: + info["location"] = dist.location or "" + info["installer"] = dist.installer + if options.outdated: + info["latest_version"] = str(dist.latest_version) + info["latest_filetype"] = dist.latest_filetype + editable_project_location = dist.editable_project_location + if editable_project_location: + info["editable_project_location"] = editable_project_location + data.append(info) + return json.dumps(data) diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/commands/lock.py b/.venv/lib/python3.12/site-packages/pip/_internal/commands/lock.py new file mode 100644 index 0000000..e4a978d --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/commands/lock.py @@ -0,0 +1,170 @@ +import sys +from optparse import Values +from pathlib import Path + +from pip._internal.cache import WheelCache +from pip._internal.cli import cmdoptions +from pip._internal.cli.req_command import ( + RequirementCommand, + with_cleanup, +) +from pip._internal.cli.status_codes import SUCCESS +from pip._internal.models.pylock import Pylock, is_valid_pylock_file_name +from pip._internal.operations.build.build_tracker import get_build_tracker +from pip._internal.req.req_install import ( + check_legacy_setup_py_options, +) +from pip._internal.utils.logging import getLogger +from pip._internal.utils.misc import ( + get_pip_version, +) +from pip._internal.utils.temp_dir import TempDirectory + +logger = getLogger(__name__) + + +class LockCommand(RequirementCommand): + """ + EXPERIMENTAL - Lock packages and their dependencies from: + + - PyPI (and other indexes) using requirement specifiers. + - VCS project urls. + - Local project directories. + - Local or remote source archives. + + pip also supports locking from "requirements files", which provide an easy + way to specify a whole environment to be installed. + + The generated lock file is only guaranteed to be valid for the current + python version and platform. + """ + + usage = """ + %prog [options] [-e] ... + %prog [options] [package-index-options] ... + %prog [options] -r [package-index-options] ... + %prog [options] ...""" + + def add_options(self) -> None: + self.cmd_opts.add_option( + cmdoptions.PipOption( + "--output", + "-o", + dest="output_file", + metavar="path", + type="path", + default="pylock.toml", + help="Lock file name (default=pylock.toml). Use - for stdout.", + ) + ) + self.cmd_opts.add_option(cmdoptions.requirements()) + self.cmd_opts.add_option(cmdoptions.constraints()) + self.cmd_opts.add_option(cmdoptions.no_deps()) + self.cmd_opts.add_option(cmdoptions.pre()) + + self.cmd_opts.add_option(cmdoptions.editable()) + + self.cmd_opts.add_option(cmdoptions.src()) + + self.cmd_opts.add_option(cmdoptions.ignore_requires_python()) + self.cmd_opts.add_option(cmdoptions.no_build_isolation()) + self.cmd_opts.add_option(cmdoptions.use_pep517()) + self.cmd_opts.add_option(cmdoptions.no_use_pep517()) + self.cmd_opts.add_option(cmdoptions.check_build_deps()) + + self.cmd_opts.add_option(cmdoptions.config_settings()) + + self.cmd_opts.add_option(cmdoptions.no_binary()) + self.cmd_opts.add_option(cmdoptions.only_binary()) + self.cmd_opts.add_option(cmdoptions.prefer_binary()) + self.cmd_opts.add_option(cmdoptions.require_hashes()) + self.cmd_opts.add_option(cmdoptions.progress_bar()) + + index_opts = cmdoptions.make_option_group( + cmdoptions.index_group, + self.parser, + ) + + self.parser.insert_option_group(0, index_opts) + self.parser.insert_option_group(0, self.cmd_opts) + + @with_cleanup + def run(self, options: Values, args: list[str]) -> int: + logger.verbose("Using %s", get_pip_version()) + + logger.warning( + "pip lock is currently an experimental command. " + "It may be removed/changed in a future release " + "without prior warning." + ) + + session = self.get_default_session(options) + + finder = self._build_package_finder( + options=options, + session=session, + ignore_requires_python=options.ignore_requires_python, + ) + build_tracker = self.enter_context(get_build_tracker()) + + directory = TempDirectory( + delete=not options.no_clean, + kind="install", + globally_managed=True, + ) + + reqs = self.get_requirements(args, options, finder, session) + check_legacy_setup_py_options(options, reqs) + + wheel_cache = WheelCache(options.cache_dir) + + # Only when installing is it permitted to use PEP 660. + # In other circumstances (pip wheel, pip download) we generate + # regular (i.e. non editable) metadata and wheels. + for req in reqs: + req.permit_editable_wheels = True + + preparer = self.make_requirement_preparer( + temp_build_dir=directory, + options=options, + build_tracker=build_tracker, + session=session, + finder=finder, + use_user_site=False, + verbosity=self.verbosity, + ) + resolver = self.make_resolver( + preparer=preparer, + finder=finder, + options=options, + wheel_cache=wheel_cache, + use_user_site=False, + ignore_installed=True, + ignore_requires_python=options.ignore_requires_python, + upgrade_strategy="to-satisfy-only", + use_pep517=options.use_pep517, + ) + + self.trace_basic_info(finder) + + requirement_set = resolver.resolve(reqs, check_supported_wheels=True) + + if options.output_file == "-": + base_dir = Path.cwd() + else: + output_file_path = Path(options.output_file) + if not is_valid_pylock_file_name(output_file_path): + logger.warning( + "%s is not a valid lock file name.", + output_file_path, + ) + base_dir = output_file_path.parent + pylock_toml = Pylock.from_install_requirements( + requirement_set.requirements.values(), base_dir=base_dir + ).as_toml() + if options.output_file == "-": + sys.stdout.write(pylock_toml) + else: + output_file_path.write_text(pylock_toml, encoding="utf-8") + + return SUCCESS diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/commands/search.py b/.venv/lib/python3.12/site-packages/pip/_internal/commands/search.py new file mode 100644 index 0000000..b8dbc27 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/commands/search.py @@ -0,0 +1,178 @@ +from __future__ import annotations + +import logging +import shutil +import sys +import textwrap +import xmlrpc.client +from collections import OrderedDict +from optparse import Values +from typing import TypedDict + +from pip._vendor.packaging.version import parse as parse_version + +from pip._internal.cli.base_command import Command +from pip._internal.cli.req_command import SessionCommandMixin +from pip._internal.cli.status_codes import NO_MATCHES_FOUND, SUCCESS +from pip._internal.exceptions import CommandError +from pip._internal.metadata import get_default_environment +from pip._internal.metadata.base import BaseDistribution +from pip._internal.models.index import PyPI +from pip._internal.network.xmlrpc import PipXmlrpcTransport +from pip._internal.utils.logging import indent_log +from pip._internal.utils.misc import write_output + + +class TransformedHit(TypedDict): + name: str + summary: str + versions: list[str] + + +logger = logging.getLogger(__name__) + + +class SearchCommand(Command, SessionCommandMixin): + """Search for PyPI packages whose name or summary contains .""" + + usage = """ + %prog [options] """ + ignore_require_venv = True + + def add_options(self) -> None: + self.cmd_opts.add_option( + "-i", + "--index", + dest="index", + metavar="URL", + default=PyPI.pypi_url, + help="Base URL of Python Package Index (default %default)", + ) + + self.parser.insert_option_group(0, self.cmd_opts) + + def run(self, options: Values, args: list[str]) -> int: + if not args: + raise CommandError("Missing required argument (search query).") + query = args + pypi_hits = self.search(query, options) + hits = transform_hits(pypi_hits) + + terminal_width = None + if sys.stdout.isatty(): + terminal_width = shutil.get_terminal_size()[0] + + print_results(hits, terminal_width=terminal_width) + if pypi_hits: + return SUCCESS + return NO_MATCHES_FOUND + + def search(self, query: list[str], options: Values) -> list[dict[str, str]]: + index_url = options.index + + session = self.get_default_session(options) + + transport = PipXmlrpcTransport(index_url, session) + pypi = xmlrpc.client.ServerProxy(index_url, transport) + try: + hits = pypi.search({"name": query, "summary": query}, "or") + except xmlrpc.client.Fault as fault: + message = ( + f"XMLRPC request failed [code: {fault.faultCode}]\n{fault.faultString}" + ) + raise CommandError(message) + assert isinstance(hits, list) + return hits + + +def transform_hits(hits: list[dict[str, str]]) -> list[TransformedHit]: + """ + The list from pypi is really a list of versions. We want a list of + packages with the list of versions stored inline. This converts the + list from pypi into one we can use. + """ + packages: dict[str, TransformedHit] = OrderedDict() + for hit in hits: + name = hit["name"] + summary = hit["summary"] + version = hit["version"] + + if name not in packages.keys(): + packages[name] = { + "name": name, + "summary": summary, + "versions": [version], + } + else: + packages[name]["versions"].append(version) + + # if this is the highest version, replace summary and score + if version == highest_version(packages[name]["versions"]): + packages[name]["summary"] = summary + + return list(packages.values()) + + +def print_dist_installation_info(latest: str, dist: BaseDistribution | None) -> None: + if dist is not None: + with indent_log(): + if dist.version == latest: + write_output("INSTALLED: %s (latest)", dist.version) + else: + write_output("INSTALLED: %s", dist.version) + if parse_version(latest).pre: + write_output( + "LATEST: %s (pre-release; install" + " with `pip install --pre`)", + latest, + ) + else: + write_output("LATEST: %s", latest) + + +def get_installed_distribution(name: str) -> BaseDistribution | None: + env = get_default_environment() + return env.get_distribution(name) + + +def print_results( + hits: list[TransformedHit], + name_column_width: int | None = None, + terminal_width: int | None = None, +) -> None: + if not hits: + return + if name_column_width is None: + name_column_width = ( + max( + [ + len(hit["name"]) + len(highest_version(hit.get("versions", ["-"]))) + for hit in hits + ] + ) + + 4 + ) + + for hit in hits: + name = hit["name"] + summary = hit["summary"] or "" + latest = highest_version(hit.get("versions", ["-"])) + if terminal_width is not None: + target_width = terminal_width - name_column_width - 5 + if target_width > 10: + # wrap and indent summary to fit terminal + summary_lines = textwrap.wrap(summary, target_width) + summary = ("\n" + " " * (name_column_width + 3)).join(summary_lines) + + name_latest = f"{name} ({latest})" + line = f"{name_latest:{name_column_width}} - {summary}" + try: + write_output(line) + dist = get_installed_distribution(name) + print_dist_installation_info(latest, dist) + except UnicodeEncodeError: + pass + + +def highest_version(versions: list[str]) -> str: + return max(versions, key=parse_version) diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/commands/show.py b/.venv/lib/python3.12/site-packages/pip/_internal/commands/show.py new file mode 100644 index 0000000..f9fcfa6 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/commands/show.py @@ -0,0 +1,231 @@ +from __future__ import annotations + +import logging +import string +from collections.abc import Generator, Iterable, Iterator +from optparse import Values +from typing import NamedTuple + +from pip._vendor.packaging.requirements import InvalidRequirement +from pip._vendor.packaging.utils import canonicalize_name + +from pip._internal.cli.base_command import Command +from pip._internal.cli.status_codes import ERROR, SUCCESS +from pip._internal.metadata import BaseDistribution, get_default_environment +from pip._internal.utils.misc import write_output + +logger = logging.getLogger(__name__) + + +def normalize_project_url_label(label: str) -> str: + # This logic is from PEP 753 (Well-known Project URLs in Metadata). + chars_to_remove = string.punctuation + string.whitespace + removal_map = str.maketrans("", "", chars_to_remove) + return label.translate(removal_map).lower() + + +class ShowCommand(Command): + """ + Show information about one or more installed packages. + + The output is in RFC-compliant mail header format. + """ + + usage = """ + %prog [options] ...""" + ignore_require_venv = True + + def add_options(self) -> None: + self.cmd_opts.add_option( + "-f", + "--files", + dest="files", + action="store_true", + default=False, + help="Show the full list of installed files for each package.", + ) + + self.parser.insert_option_group(0, self.cmd_opts) + + def run(self, options: Values, args: list[str]) -> int: + if not args: + logger.warning("ERROR: Please provide a package name or names.") + return ERROR + query = args + + results = search_packages_info(query) + if not print_results( + results, list_files=options.files, verbose=options.verbose + ): + return ERROR + return SUCCESS + + +class _PackageInfo(NamedTuple): + name: str + version: str + location: str + editable_project_location: str | None + requires: list[str] + required_by: list[str] + installer: str + metadata_version: str + classifiers: list[str] + summary: str + homepage: str + project_urls: list[str] + author: str + author_email: str + license: str + license_expression: str + entry_points: list[str] + files: list[str] | None + + +def search_packages_info(query: list[str]) -> Generator[_PackageInfo, None, None]: + """ + Gather details from installed distributions. Print distribution name, + version, location, and installed files. Installed files requires a + pip generated 'installed-files.txt' in the distributions '.egg-info' + directory. + """ + env = get_default_environment() + + installed = {dist.canonical_name: dist for dist in env.iter_all_distributions()} + query_names = [canonicalize_name(name) for name in query] + missing = sorted( + [name for name, pkg in zip(query, query_names) if pkg not in installed] + ) + if missing: + logger.warning("Package(s) not found: %s", ", ".join(missing)) + + def _get_requiring_packages(current_dist: BaseDistribution) -> Iterator[str]: + return ( + dist.metadata["Name"] or "UNKNOWN" + for dist in installed.values() + if current_dist.canonical_name + in {canonicalize_name(d.name) for d in dist.iter_dependencies()} + ) + + for query_name in query_names: + try: + dist = installed[query_name] + except KeyError: + continue + + try: + requires = sorted( + # Avoid duplicates in requirements (e.g. due to environment markers). + {req.name for req in dist.iter_dependencies()}, + key=str.lower, + ) + except InvalidRequirement: + requires = sorted(dist.iter_raw_dependencies(), key=str.lower) + + try: + required_by = sorted(_get_requiring_packages(dist), key=str.lower) + except InvalidRequirement: + required_by = ["#N/A"] + + try: + entry_points_text = dist.read_text("entry_points.txt") + entry_points = entry_points_text.splitlines(keepends=False) + except FileNotFoundError: + entry_points = [] + + files_iter = dist.iter_declared_entries() + if files_iter is None: + files: list[str] | None = None + else: + files = sorted(files_iter) + + metadata = dist.metadata + + project_urls = metadata.get_all("Project-URL", []) + homepage = metadata.get("Home-page", "") + if not homepage: + # It's common that there is a "homepage" Project-URL, but Home-page + # remains unset (especially as PEP 621 doesn't surface the field). + for url in project_urls: + url_label, url = url.split(",", maxsplit=1) + normalized_label = normalize_project_url_label(url_label) + if normalized_label == "homepage": + homepage = url.strip() + break + + yield _PackageInfo( + name=dist.raw_name, + version=dist.raw_version, + location=dist.location or "", + editable_project_location=dist.editable_project_location, + requires=requires, + required_by=required_by, + installer=dist.installer, + metadata_version=dist.metadata_version or "", + classifiers=metadata.get_all("Classifier", []), + summary=metadata.get("Summary", ""), + homepage=homepage, + project_urls=project_urls, + author=metadata.get("Author", ""), + author_email=metadata.get("Author-email", ""), + license=metadata.get("License", ""), + license_expression=metadata.get("License-Expression", ""), + entry_points=entry_points, + files=files, + ) + + +def print_results( + distributions: Iterable[_PackageInfo], + list_files: bool, + verbose: bool, +) -> bool: + """ + Print the information from installed distributions found. + """ + results_printed = False + for i, dist in enumerate(distributions): + results_printed = True + if i > 0: + write_output("---") + + metadata_version_tuple = tuple(map(int, dist.metadata_version.split("."))) + + write_output("Name: %s", dist.name) + write_output("Version: %s", dist.version) + write_output("Summary: %s", dist.summary) + write_output("Home-page: %s", dist.homepage) + write_output("Author: %s", dist.author) + write_output("Author-email: %s", dist.author_email) + if metadata_version_tuple >= (2, 4) and dist.license_expression: + write_output("License-Expression: %s", dist.license_expression) + else: + write_output("License: %s", dist.license) + write_output("Location: %s", dist.location) + if dist.editable_project_location is not None: + write_output( + "Editable project location: %s", dist.editable_project_location + ) + write_output("Requires: %s", ", ".join(dist.requires)) + write_output("Required-by: %s", ", ".join(dist.required_by)) + + if verbose: + write_output("Metadata-Version: %s", dist.metadata_version) + write_output("Installer: %s", dist.installer) + write_output("Classifiers:") + for classifier in dist.classifiers: + write_output(" %s", classifier) + write_output("Entry-points:") + for entry in dist.entry_points: + write_output(" %s", entry.strip()) + write_output("Project-URLs:") + for project_url in dist.project_urls: + write_output(" %s", project_url) + if list_files: + write_output("Files:") + if dist.files is None: + write_output("Cannot locate RECORD or installed-files.txt") + else: + for line in dist.files: + write_output(" %s", line.strip()) + return results_printed diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/commands/uninstall.py b/.venv/lib/python3.12/site-packages/pip/_internal/commands/uninstall.py new file mode 100644 index 0000000..9c4f031 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/commands/uninstall.py @@ -0,0 +1,113 @@ +import logging +from optparse import Values + +from pip._vendor.packaging.utils import canonicalize_name + +from pip._internal.cli import cmdoptions +from pip._internal.cli.base_command import Command +from pip._internal.cli.index_command import SessionCommandMixin +from pip._internal.cli.status_codes import SUCCESS +from pip._internal.exceptions import InstallationError +from pip._internal.req import parse_requirements +from pip._internal.req.constructors import ( + install_req_from_line, + install_req_from_parsed_requirement, +) +from pip._internal.utils.misc import ( + check_externally_managed, + protect_pip_from_modification_on_windows, + warn_if_run_as_root, +) + +logger = logging.getLogger(__name__) + + +class UninstallCommand(Command, SessionCommandMixin): + """ + Uninstall packages. + + pip is able to uninstall most installed packages. Known exceptions are: + + - Pure distutils packages installed with ``python setup.py install``, which + leave behind no metadata to determine what files were installed. + - Script wrappers installed by ``python setup.py develop``. + """ + + usage = """ + %prog [options] ... + %prog [options] -r ...""" + + def add_options(self) -> None: + self.cmd_opts.add_option( + "-r", + "--requirement", + dest="requirements", + action="append", + default=[], + metavar="file", + help=( + "Uninstall all the packages listed in the given requirements " + "file. This option can be used multiple times." + ), + ) + self.cmd_opts.add_option( + "-y", + "--yes", + dest="yes", + action="store_true", + help="Don't ask for confirmation of uninstall deletions.", + ) + self.cmd_opts.add_option(cmdoptions.root_user_action()) + self.cmd_opts.add_option(cmdoptions.override_externally_managed()) + self.parser.insert_option_group(0, self.cmd_opts) + + def run(self, options: Values, args: list[str]) -> int: + session = self.get_default_session(options) + + reqs_to_uninstall = {} + for name in args: + req = install_req_from_line( + name, + isolated=options.isolated_mode, + ) + if req.name: + reqs_to_uninstall[canonicalize_name(req.name)] = req + else: + logger.warning( + "Invalid requirement: %r ignored -" + " the uninstall command expects named" + " requirements.", + name, + ) + for filename in options.requirements: + for parsed_req in parse_requirements( + filename, options=options, session=session + ): + req = install_req_from_parsed_requirement( + parsed_req, isolated=options.isolated_mode + ) + if req.name: + reqs_to_uninstall[canonicalize_name(req.name)] = req + if not reqs_to_uninstall: + raise InstallationError( + f"You must give at least one requirement to {self.name} (see " + f'"pip help {self.name}")' + ) + + if not options.override_externally_managed: + check_externally_managed() + + protect_pip_from_modification_on_windows( + modifying_pip="pip" in reqs_to_uninstall + ) + + for req in reqs_to_uninstall.values(): + uninstall_pathset = req.uninstall( + auto_confirm=options.yes, + verbose=self.verbosity > 0, + ) + if uninstall_pathset: + uninstall_pathset.commit() + if options.root_user_action == "warn": + warn_if_run_as_root() + return SUCCESS diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/commands/wheel.py b/.venv/lib/python3.12/site-packages/pip/_internal/commands/wheel.py new file mode 100644 index 0000000..61be254 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/commands/wheel.py @@ -0,0 +1,181 @@ +import logging +import os +import shutil +from optparse import Values + +from pip._internal.cache import WheelCache +from pip._internal.cli import cmdoptions +from pip._internal.cli.req_command import RequirementCommand, with_cleanup +from pip._internal.cli.status_codes import SUCCESS +from pip._internal.exceptions import CommandError +from pip._internal.operations.build.build_tracker import get_build_tracker +from pip._internal.req.req_install import ( + InstallRequirement, + check_legacy_setup_py_options, +) +from pip._internal.utils.misc import ensure_dir, normalize_path +from pip._internal.utils.temp_dir import TempDirectory +from pip._internal.wheel_builder import build + +logger = logging.getLogger(__name__) + + +class WheelCommand(RequirementCommand): + """ + Build Wheel archives for your requirements and dependencies. + + Wheel is a built-package format, and offers the advantage of not + recompiling your software during every install. For more details, see the + wheel docs: https://wheel.readthedocs.io/en/latest/ + + 'pip wheel' uses the build system interface as described here: + https://pip.pypa.io/en/stable/reference/build-system/ + + """ + + usage = """ + %prog [options] ... + %prog [options] -r ... + %prog [options] [-e] ... + %prog [options] [-e] ... + %prog [options] ...""" + + def add_options(self) -> None: + self.cmd_opts.add_option( + "-w", + "--wheel-dir", + dest="wheel_dir", + metavar="dir", + default=os.curdir, + help=( + "Build wheels into , where the default is the " + "current working directory." + ), + ) + self.cmd_opts.add_option(cmdoptions.no_binary()) + self.cmd_opts.add_option(cmdoptions.only_binary()) + self.cmd_opts.add_option(cmdoptions.prefer_binary()) + self.cmd_opts.add_option(cmdoptions.no_build_isolation()) + self.cmd_opts.add_option(cmdoptions.use_pep517()) + self.cmd_opts.add_option(cmdoptions.no_use_pep517()) + self.cmd_opts.add_option(cmdoptions.check_build_deps()) + self.cmd_opts.add_option(cmdoptions.constraints()) + self.cmd_opts.add_option(cmdoptions.editable()) + self.cmd_opts.add_option(cmdoptions.requirements()) + self.cmd_opts.add_option(cmdoptions.src()) + self.cmd_opts.add_option(cmdoptions.ignore_requires_python()) + self.cmd_opts.add_option(cmdoptions.no_deps()) + self.cmd_opts.add_option(cmdoptions.progress_bar()) + + self.cmd_opts.add_option( + "--no-verify", + dest="no_verify", + action="store_true", + default=False, + help="Don't verify if built wheel is valid.", + ) + + self.cmd_opts.add_option(cmdoptions.config_settings()) + self.cmd_opts.add_option(cmdoptions.build_options()) + self.cmd_opts.add_option(cmdoptions.global_options()) + + self.cmd_opts.add_option( + "--pre", + action="store_true", + default=False, + help=( + "Include pre-release and development versions. By default, " + "pip only finds stable versions." + ), + ) + + self.cmd_opts.add_option(cmdoptions.require_hashes()) + + index_opts = cmdoptions.make_option_group( + cmdoptions.index_group, + self.parser, + ) + + self.parser.insert_option_group(0, index_opts) + self.parser.insert_option_group(0, self.cmd_opts) + + @with_cleanup + def run(self, options: Values, args: list[str]) -> int: + session = self.get_default_session(options) + + finder = self._build_package_finder(options, session) + + options.wheel_dir = normalize_path(options.wheel_dir) + ensure_dir(options.wheel_dir) + + build_tracker = self.enter_context(get_build_tracker()) + + directory = TempDirectory( + delete=not options.no_clean, + kind="wheel", + globally_managed=True, + ) + + reqs = self.get_requirements(args, options, finder, session) + check_legacy_setup_py_options(options, reqs) + + wheel_cache = WheelCache(options.cache_dir) + + preparer = self.make_requirement_preparer( + temp_build_dir=directory, + options=options, + build_tracker=build_tracker, + session=session, + finder=finder, + download_dir=options.wheel_dir, + use_user_site=False, + verbosity=self.verbosity, + ) + + resolver = self.make_resolver( + preparer=preparer, + finder=finder, + options=options, + wheel_cache=wheel_cache, + ignore_requires_python=options.ignore_requires_python, + use_pep517=options.use_pep517, + ) + + self.trace_basic_info(finder) + + requirement_set = resolver.resolve(reqs, check_supported_wheels=True) + + reqs_to_build: list[InstallRequirement] = [] + for req in requirement_set.requirements.values(): + if req.is_wheel: + preparer.save_linked_requirement(req) + else: + reqs_to_build.append(req) + + preparer.prepare_linked_requirements_more(requirement_set.requirements.values()) + + # build wheels + build_successes, build_failures = build( + reqs_to_build, + wheel_cache=wheel_cache, + verify=(not options.no_verify), + build_options=options.build_options or [], + global_options=options.global_options or [], + ) + for req in build_successes: + assert req.link and req.link.is_wheel + assert req.local_file_path + # copy from cache to target directory + try: + shutil.copy(req.local_file_path, options.wheel_dir) + except OSError as e: + logger.warning( + "Building wheel for %s failed: %s", + req.name, + e, + ) + build_failures.append(req) + if len(build_failures) != 0: + raise CommandError("Failed to build one or more wheels") + + return SUCCESS diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/configuration.py b/.venv/lib/python3.12/site-packages/pip/_internal/configuration.py new file mode 100644 index 0000000..f581a2c --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/configuration.py @@ -0,0 +1,397 @@ +"""Configuration management setup + +Some terminology: +- name + As written in config files. +- value + Value associated with a name +- key + Name combined with it's section (section.name) +- variant + A single word describing where the configuration key-value pair came from +""" + +from __future__ import annotations + +import configparser +import locale +import os +import sys +from collections.abc import Iterable +from typing import Any, NewType + +from pip._internal.exceptions import ( + ConfigurationError, + ConfigurationFileCouldNotBeLoaded, +) +from pip._internal.utils import appdirs +from pip._internal.utils.compat import WINDOWS +from pip._internal.utils.logging import getLogger +from pip._internal.utils.misc import ensure_dir, enum + +RawConfigParser = configparser.RawConfigParser # Shorthand +Kind = NewType("Kind", str) + +CONFIG_BASENAME = "pip.ini" if WINDOWS else "pip.conf" +ENV_NAMES_IGNORED = "version", "help" + +# The kinds of configurations there are. +kinds = enum( + USER="user", # User Specific + GLOBAL="global", # System Wide + SITE="site", # [Virtual] Environment Specific + ENV="env", # from PIP_CONFIG_FILE + ENV_VAR="env-var", # from Environment Variables +) +OVERRIDE_ORDER = kinds.GLOBAL, kinds.USER, kinds.SITE, kinds.ENV, kinds.ENV_VAR +VALID_LOAD_ONLY = kinds.USER, kinds.GLOBAL, kinds.SITE + +logger = getLogger(__name__) + + +# NOTE: Maybe use the optionx attribute to normalize keynames. +def _normalize_name(name: str) -> str: + """Make a name consistent regardless of source (environment or file)""" + name = name.lower().replace("_", "-") + if name.startswith("--"): + name = name[2:] # only prefer long opts + return name + + +def _disassemble_key(name: str) -> list[str]: + if "." not in name: + error_message = ( + "Key does not contain dot separated section and key. " + f"Perhaps you wanted to use 'global.{name}' instead?" + ) + raise ConfigurationError(error_message) + return name.split(".", 1) + + +def get_configuration_files() -> dict[Kind, list[str]]: + global_config_files = [ + os.path.join(path, CONFIG_BASENAME) for path in appdirs.site_config_dirs("pip") + ] + + site_config_file = os.path.join(sys.prefix, CONFIG_BASENAME) + legacy_config_file = os.path.join( + os.path.expanduser("~"), + "pip" if WINDOWS else ".pip", + CONFIG_BASENAME, + ) + new_config_file = os.path.join(appdirs.user_config_dir("pip"), CONFIG_BASENAME) + return { + kinds.GLOBAL: global_config_files, + kinds.SITE: [site_config_file], + kinds.USER: [legacy_config_file, new_config_file], + } + + +class Configuration: + """Handles management of configuration. + + Provides an interface to accessing and managing configuration files. + + This class converts provides an API that takes "section.key-name" style + keys and stores the value associated with it as "key-name" under the + section "section". + + This allows for a clean interface wherein the both the section and the + key-name are preserved in an easy to manage form in the configuration files + and the data stored is also nice. + """ + + def __init__(self, isolated: bool, load_only: Kind | None = None) -> None: + super().__init__() + + if load_only is not None and load_only not in VALID_LOAD_ONLY: + raise ConfigurationError( + "Got invalid value for load_only - should be one of {}".format( + ", ".join(map(repr, VALID_LOAD_ONLY)) + ) + ) + self.isolated = isolated + self.load_only = load_only + + # Because we keep track of where we got the data from + self._parsers: dict[Kind, list[tuple[str, RawConfigParser]]] = { + variant: [] for variant in OVERRIDE_ORDER + } + self._config: dict[Kind, dict[str, dict[str, Any]]] = { + variant: {} for variant in OVERRIDE_ORDER + } + self._modified_parsers: list[tuple[str, RawConfigParser]] = [] + + def load(self) -> None: + """Loads configuration from configuration files and environment""" + self._load_config_files() + if not self.isolated: + self._load_environment_vars() + + def get_file_to_edit(self) -> str | None: + """Returns the file with highest priority in configuration""" + assert self.load_only is not None, "Need to be specified a file to be editing" + + try: + return self._get_parser_to_modify()[0] + except IndexError: + return None + + def items(self) -> Iterable[tuple[str, Any]]: + """Returns key-value pairs like dict.items() representing the loaded + configuration + """ + return self._dictionary.items() + + def get_value(self, key: str) -> Any: + """Get a value from the configuration.""" + orig_key = key + key = _normalize_name(key) + try: + clean_config: dict[str, Any] = {} + for file_values in self._dictionary.values(): + clean_config.update(file_values) + return clean_config[key] + except KeyError: + # disassembling triggers a more useful error message than simply + # "No such key" in the case that the key isn't in the form command.option + _disassemble_key(key) + raise ConfigurationError(f"No such key - {orig_key}") + + def set_value(self, key: str, value: Any) -> None: + """Modify a value in the configuration.""" + key = _normalize_name(key) + self._ensure_have_load_only() + + assert self.load_only + fname, parser = self._get_parser_to_modify() + + if parser is not None: + section, name = _disassemble_key(key) + + # Modify the parser and the configuration + if not parser.has_section(section): + parser.add_section(section) + parser.set(section, name, value) + + self._config[self.load_only].setdefault(fname, {}) + self._config[self.load_only][fname][key] = value + self._mark_as_modified(fname, parser) + + def unset_value(self, key: str) -> None: + """Unset a value in the configuration.""" + orig_key = key + key = _normalize_name(key) + self._ensure_have_load_only() + + assert self.load_only + fname, parser = self._get_parser_to_modify() + + if ( + key not in self._config[self.load_only][fname] + and key not in self._config[self.load_only] + ): + raise ConfigurationError(f"No such key - {orig_key}") + + if parser is not None: + section, name = _disassemble_key(key) + if not ( + parser.has_section(section) and parser.remove_option(section, name) + ): + # The option was not removed. + raise ConfigurationError( + "Fatal Internal error [id=1]. Please report as a bug." + ) + + # The section may be empty after the option was removed. + if not parser.items(section): + parser.remove_section(section) + self._mark_as_modified(fname, parser) + try: + del self._config[self.load_only][fname][key] + except KeyError: + del self._config[self.load_only][key] + + def save(self) -> None: + """Save the current in-memory state.""" + self._ensure_have_load_only() + + for fname, parser in self._modified_parsers: + logger.info("Writing to %s", fname) + + # Ensure directory exists. + ensure_dir(os.path.dirname(fname)) + + # Ensure directory's permission(need to be writeable) + try: + with open(fname, "w") as f: + parser.write(f) + except OSError as error: + raise ConfigurationError( + f"An error occurred while writing to the configuration file " + f"{fname}: {error}" + ) + + # + # Private routines + # + + def _ensure_have_load_only(self) -> None: + if self.load_only is None: + raise ConfigurationError("Needed a specific file to be modifying.") + logger.debug("Will be working with %s variant only", self.load_only) + + @property + def _dictionary(self) -> dict[str, dict[str, Any]]: + """A dictionary representing the loaded configuration.""" + # NOTE: Dictionaries are not populated if not loaded. So, conditionals + # are not needed here. + retval = {} + + for variant in OVERRIDE_ORDER: + retval.update(self._config[variant]) + + return retval + + def _load_config_files(self) -> None: + """Loads configuration from configuration files""" + config_files = dict(self.iter_config_files()) + if config_files[kinds.ENV][0:1] == [os.devnull]: + logger.debug( + "Skipping loading configuration files due to " + "environment's PIP_CONFIG_FILE being os.devnull" + ) + return + + for variant, files in config_files.items(): + for fname in files: + # If there's specific variant set in `load_only`, load only + # that variant, not the others. + if self.load_only is not None and variant != self.load_only: + logger.debug("Skipping file '%s' (variant: %s)", fname, variant) + continue + + parser = self._load_file(variant, fname) + + # Keeping track of the parsers used + self._parsers[variant].append((fname, parser)) + + def _load_file(self, variant: Kind, fname: str) -> RawConfigParser: + logger.verbose("For variant '%s', will try loading '%s'", variant, fname) + parser = self._construct_parser(fname) + + for section in parser.sections(): + items = parser.items(section) + self._config[variant].setdefault(fname, {}) + self._config[variant][fname].update(self._normalized_keys(section, items)) + + return parser + + def _construct_parser(self, fname: str) -> RawConfigParser: + parser = configparser.RawConfigParser() + # If there is no such file, don't bother reading it but create the + # parser anyway, to hold the data. + # Doing this is useful when modifying and saving files, where we don't + # need to construct a parser. + if os.path.exists(fname): + locale_encoding = locale.getpreferredencoding(False) + try: + parser.read(fname, encoding=locale_encoding) + except UnicodeDecodeError: + # See https://github.com/pypa/pip/issues/4963 + raise ConfigurationFileCouldNotBeLoaded( + reason=f"contains invalid {locale_encoding} characters", + fname=fname, + ) + except configparser.Error as error: + # See https://github.com/pypa/pip/issues/4893 + raise ConfigurationFileCouldNotBeLoaded(error=error) + return parser + + def _load_environment_vars(self) -> None: + """Loads configuration from environment variables""" + self._config[kinds.ENV_VAR].setdefault(":env:", {}) + self._config[kinds.ENV_VAR][":env:"].update( + self._normalized_keys(":env:", self.get_environ_vars()) + ) + + def _normalized_keys( + self, section: str, items: Iterable[tuple[str, Any]] + ) -> dict[str, Any]: + """Normalizes items to construct a dictionary with normalized keys. + + This routine is where the names become keys and are made the same + regardless of source - configuration files or environment. + """ + normalized = {} + for name, val in items: + key = section + "." + _normalize_name(name) + normalized[key] = val + return normalized + + def get_environ_vars(self) -> Iterable[tuple[str, str]]: + """Returns a generator with all environmental vars with prefix PIP_""" + for key, val in os.environ.items(): + if key.startswith("PIP_"): + name = key[4:].lower() + if name not in ENV_NAMES_IGNORED: + yield name, val + + # XXX: This is patched in the tests. + def iter_config_files(self) -> Iterable[tuple[Kind, list[str]]]: + """Yields variant and configuration files associated with it. + + This should be treated like items of a dictionary. The order + here doesn't affect what gets overridden. That is controlled + by OVERRIDE_ORDER. However this does control the order they are + displayed to the user. It's probably most ergonomic to display + things in the same order as OVERRIDE_ORDER + """ + # SMELL: Move the conditions out of this function + + env_config_file = os.environ.get("PIP_CONFIG_FILE", None) + config_files = get_configuration_files() + + yield kinds.GLOBAL, config_files[kinds.GLOBAL] + + # per-user config is not loaded when env_config_file exists + should_load_user_config = not self.isolated and not ( + env_config_file and os.path.exists(env_config_file) + ) + if should_load_user_config: + # The legacy config file is overridden by the new config file + yield kinds.USER, config_files[kinds.USER] + + # virtualenv config + yield kinds.SITE, config_files[kinds.SITE] + + if env_config_file is not None: + yield kinds.ENV, [env_config_file] + else: + yield kinds.ENV, [] + + def get_values_in_config(self, variant: Kind) -> dict[str, Any]: + """Get values present in a config file""" + return self._config[variant] + + def _get_parser_to_modify(self) -> tuple[str, RawConfigParser]: + # Determine which parser to modify + assert self.load_only + parsers = self._parsers[self.load_only] + if not parsers: + # This should not happen if everything works correctly. + raise ConfigurationError( + "Fatal Internal error [id=2]. Please report as a bug." + ) + + # Use the highest priority parser. + return parsers[-1] + + # XXX: This is patched in the tests. + def _mark_as_modified(self, fname: str, parser: RawConfigParser) -> None: + file_parser_tuple = (fname, parser) + if file_parser_tuple not in self._modified_parsers: + self._modified_parsers.append(file_parser_tuple) + + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self._dictionary!r})" diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/distributions/__init__.py b/.venv/lib/python3.12/site-packages/pip/_internal/distributions/__init__.py new file mode 100644 index 0000000..9a89a83 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/distributions/__init__.py @@ -0,0 +1,21 @@ +from pip._internal.distributions.base import AbstractDistribution +from pip._internal.distributions.sdist import SourceDistribution +from pip._internal.distributions.wheel import WheelDistribution +from pip._internal.req.req_install import InstallRequirement + + +def make_distribution_for_install_requirement( + install_req: InstallRequirement, +) -> AbstractDistribution: + """Returns a Distribution for the given InstallRequirement""" + # Editable requirements will always be source distributions. They use the + # legacy logic until we create a modern standard for them. + if install_req.editable: + return SourceDistribution(install_req) + + # If it's a wheel, it's a WheelDistribution + if install_req.is_wheel: + return WheelDistribution(install_req) + + # Otherwise, a SourceDistribution + return SourceDistribution(install_req) diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/distributions/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/distributions/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..d26ae6f Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/distributions/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/distributions/__pycache__/base.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/distributions/__pycache__/base.cpython-312.pyc new file mode 100644 index 0000000..5b06531 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/distributions/__pycache__/base.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/distributions/__pycache__/installed.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/distributions/__pycache__/installed.cpython-312.pyc new file mode 100644 index 0000000..8b9bb31 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/distributions/__pycache__/installed.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/distributions/__pycache__/sdist.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/distributions/__pycache__/sdist.cpython-312.pyc new file mode 100644 index 0000000..21c2dec Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/distributions/__pycache__/sdist.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/distributions/__pycache__/wheel.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/distributions/__pycache__/wheel.cpython-312.pyc new file mode 100644 index 0000000..864a5b0 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/distributions/__pycache__/wheel.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/distributions/base.py b/.venv/lib/python3.12/site-packages/pip/_internal/distributions/base.py new file mode 100644 index 0000000..ea61f35 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/distributions/base.py @@ -0,0 +1,55 @@ +from __future__ import annotations + +import abc +from typing import TYPE_CHECKING + +from pip._internal.metadata.base import BaseDistribution +from pip._internal.req import InstallRequirement + +if TYPE_CHECKING: + from pip._internal.build_env import BuildEnvironmentInstaller + + +class AbstractDistribution(metaclass=abc.ABCMeta): + """A base class for handling installable artifacts. + + The requirements for anything installable are as follows: + + - we must be able to determine the requirement name + (or we can't correctly handle the non-upgrade case). + + - for packages with setup requirements, we must also be able + to determine their requirements without installing additional + packages (for the same reason as run-time dependencies) + + - we must be able to create a Distribution object exposing the + above metadata. + + - if we need to do work in the build tracker, we must be able to generate a unique + string to identify the requirement in the build tracker. + """ + + def __init__(self, req: InstallRequirement) -> None: + super().__init__() + self.req = req + + @abc.abstractproperty + def build_tracker_id(self) -> str | None: + """A string that uniquely identifies this requirement to the build tracker. + + If None, then this dist has no work to do in the build tracker, and + ``.prepare_distribution_metadata()`` will not be called.""" + raise NotImplementedError() + + @abc.abstractmethod + def get_metadata_distribution(self) -> BaseDistribution: + raise NotImplementedError() + + @abc.abstractmethod + def prepare_distribution_metadata( + self, + build_env_installer: BuildEnvironmentInstaller, + build_isolation: bool, + check_build_deps: bool, + ) -> None: + raise NotImplementedError() diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/distributions/installed.py b/.venv/lib/python3.12/site-packages/pip/_internal/distributions/installed.py new file mode 100644 index 0000000..b6a67df --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/distributions/installed.py @@ -0,0 +1,33 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +from pip._internal.distributions.base import AbstractDistribution +from pip._internal.metadata import BaseDistribution + +if TYPE_CHECKING: + from pip._internal.build_env import BuildEnvironmentInstaller + + +class InstalledDistribution(AbstractDistribution): + """Represents an installed package. + + This does not need any preparation as the required information has already + been computed. + """ + + @property + def build_tracker_id(self) -> str | None: + return None + + def get_metadata_distribution(self) -> BaseDistribution: + assert self.req.satisfied_by is not None, "not actually installed" + return self.req.satisfied_by + + def prepare_distribution_metadata( + self, + build_env_installer: BuildEnvironmentInstaller, + build_isolation: bool, + check_build_deps: bool, + ) -> None: + pass diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/distributions/sdist.py b/.venv/lib/python3.12/site-packages/pip/_internal/distributions/sdist.py new file mode 100644 index 0000000..e2821f8 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/distributions/sdist.py @@ -0,0 +1,165 @@ +from __future__ import annotations + +import logging +from collections.abc import Iterable +from typing import TYPE_CHECKING + +from pip._internal.build_env import BuildEnvironment +from pip._internal.distributions.base import AbstractDistribution +from pip._internal.exceptions import InstallationError +from pip._internal.metadata import BaseDistribution +from pip._internal.utils.subprocess import runner_with_spinner_message + +if TYPE_CHECKING: + from pip._internal.build_env import BuildEnvironmentInstaller + +logger = logging.getLogger(__name__) + + +class SourceDistribution(AbstractDistribution): + """Represents a source distribution. + + The preparation step for these needs metadata for the packages to be + generated, either using PEP 517 or using the legacy `setup.py egg_info`. + """ + + @property + def build_tracker_id(self) -> str | None: + """Identify this requirement uniquely by its link.""" + assert self.req.link + return self.req.link.url_without_fragment + + def get_metadata_distribution(self) -> BaseDistribution: + return self.req.get_dist() + + def prepare_distribution_metadata( + self, + build_env_installer: BuildEnvironmentInstaller, + build_isolation: bool, + check_build_deps: bool, + ) -> None: + # Load pyproject.toml, to determine whether PEP 517 is to be used + self.req.load_pyproject_toml() + + # Set up the build isolation, if this requirement should be isolated + should_isolate = self.req.use_pep517 and build_isolation + if should_isolate: + # Setup an isolated environment and install the build backend static + # requirements in it. + self._prepare_build_backend(build_env_installer) + # Check that if the requirement is editable, it either supports PEP 660 or + # has a setup.py or a setup.cfg. This cannot be done earlier because we need + # to setup the build backend to verify it supports build_editable, nor can + # it be done later, because we want to avoid installing build requirements + # needlessly. Doing it here also works around setuptools generating + # UNKNOWN.egg-info when running get_requires_for_build_wheel on a directory + # without setup.py nor setup.cfg. + self.req.isolated_editable_sanity_check() + # Install the dynamic build requirements. + self._install_build_reqs(build_env_installer) + # Check if the current environment provides build dependencies + should_check_deps = self.req.use_pep517 and check_build_deps + if should_check_deps: + pyproject_requires = self.req.pyproject_requires + assert pyproject_requires is not None + conflicting, missing = self.req.build_env.check_requirements( + pyproject_requires + ) + if conflicting: + self._raise_conflicts("the backend dependencies", conflicting) + if missing: + self._raise_missing_reqs(missing) + self.req.prepare_metadata() + + def _prepare_build_backend( + self, build_env_installer: BuildEnvironmentInstaller + ) -> None: + # Isolate in a BuildEnvironment and install the build-time + # requirements. + pyproject_requires = self.req.pyproject_requires + assert pyproject_requires is not None + + self.req.build_env = BuildEnvironment(build_env_installer) + self.req.build_env.install_requirements( + pyproject_requires, "overlay", kind="build dependencies", for_req=self.req + ) + conflicting, missing = self.req.build_env.check_requirements( + self.req.requirements_to_check + ) + if conflicting: + self._raise_conflicts("PEP 517/518 supported requirements", conflicting) + if missing: + logger.warning( + "Missing build requirements in pyproject.toml for %s.", + self.req, + ) + logger.warning( + "The project does not specify a build backend, and " + "pip cannot fall back to setuptools without %s.", + " and ".join(map(repr, sorted(missing))), + ) + + def _get_build_requires_wheel(self) -> Iterable[str]: + with self.req.build_env: + runner = runner_with_spinner_message("Getting requirements to build wheel") + backend = self.req.pep517_backend + assert backend is not None + with backend.subprocess_runner(runner): + return backend.get_requires_for_build_wheel() + + def _get_build_requires_editable(self) -> Iterable[str]: + with self.req.build_env: + runner = runner_with_spinner_message( + "Getting requirements to build editable" + ) + backend = self.req.pep517_backend + assert backend is not None + with backend.subprocess_runner(runner): + return backend.get_requires_for_build_editable() + + def _install_build_reqs( + self, build_env_installer: BuildEnvironmentInstaller + ) -> None: + # Install any extra build dependencies that the backend requests. + # This must be done in a second pass, as the pyproject.toml + # dependencies must be installed before we can call the backend. + if ( + self.req.editable + and self.req.permit_editable_wheels + and self.req.supports_pyproject_editable + ): + build_reqs = self._get_build_requires_editable() + else: + build_reqs = self._get_build_requires_wheel() + conflicting, missing = self.req.build_env.check_requirements(build_reqs) + if conflicting: + self._raise_conflicts("the backend dependencies", conflicting) + self.req.build_env.install_requirements( + missing, "normal", kind="backend dependencies", for_req=self.req + ) + + def _raise_conflicts( + self, conflicting_with: str, conflicting_reqs: set[tuple[str, str]] + ) -> None: + format_string = ( + "Some build dependencies for {requirement} " + "conflict with {conflicting_with}: {description}." + ) + error_message = format_string.format( + requirement=self.req, + conflicting_with=conflicting_with, + description=", ".join( + f"{installed} is incompatible with {wanted}" + for installed, wanted in sorted(conflicting_reqs) + ), + ) + raise InstallationError(error_message) + + def _raise_missing_reqs(self, missing: set[str]) -> None: + format_string = ( + "Some build dependencies for {requirement} are missing: {missing}." + ) + error_message = format_string.format( + requirement=self.req, missing=", ".join(map(repr, sorted(missing))) + ) + raise InstallationError(error_message) diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/distributions/wheel.py b/.venv/lib/python3.12/site-packages/pip/_internal/distributions/wheel.py new file mode 100644 index 0000000..ee12bfa --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/distributions/wheel.py @@ -0,0 +1,44 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +from pip._vendor.packaging.utils import canonicalize_name + +from pip._internal.distributions.base import AbstractDistribution +from pip._internal.metadata import ( + BaseDistribution, + FilesystemWheel, + get_wheel_distribution, +) + +if TYPE_CHECKING: + from pip._internal.build_env import BuildEnvironmentInstaller + + +class WheelDistribution(AbstractDistribution): + """Represents a wheel distribution. + + This does not need any preparation as wheels can be directly unpacked. + """ + + @property + def build_tracker_id(self) -> str | None: + return None + + def get_metadata_distribution(self) -> BaseDistribution: + """Loads the metadata from the wheel file into memory and returns a + Distribution that uses it, not relying on the wheel file or + requirement. + """ + assert self.req.local_file_path, "Set as part of preparation during download" + assert self.req.name, "Wheels are never unnamed" + wheel = FilesystemWheel(self.req.local_file_path) + return get_wheel_distribution(wheel, canonicalize_name(self.req.name)) + + def prepare_distribution_metadata( + self, + build_env_installer: BuildEnvironmentInstaller, + build_isolation: bool, + check_build_deps: bool, + ) -> None: + pass diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/exceptions.py b/.venv/lib/python3.12/site-packages/pip/_internal/exceptions.py new file mode 100644 index 0000000..98f9549 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/exceptions.py @@ -0,0 +1,881 @@ +"""Exceptions used throughout package. + +This module MUST NOT try to import from anything within `pip._internal` to +operate. This is expected to be importable from any/all files within the +subpackage and, thus, should not depend on them. +""" + +from __future__ import annotations + +import configparser +import contextlib +import locale +import logging +import pathlib +import re +import sys +from collections.abc import Iterator +from itertools import chain, groupby, repeat +from typing import TYPE_CHECKING, Literal + +from pip._vendor.packaging.requirements import InvalidRequirement +from pip._vendor.packaging.version import InvalidVersion +from pip._vendor.rich.console import Console, ConsoleOptions, RenderResult +from pip._vendor.rich.markup import escape +from pip._vendor.rich.text import Text + +if TYPE_CHECKING: + from hashlib import _Hash + + from pip._vendor.requests.models import Request, Response + + from pip._internal.metadata import BaseDistribution + from pip._internal.network.download import _FileDownload + from pip._internal.req.req_install import InstallRequirement + +logger = logging.getLogger(__name__) + + +# +# Scaffolding +# +def _is_kebab_case(s: str) -> bool: + return re.match(r"^[a-z]+(-[a-z]+)*$", s) is not None + + +def _prefix_with_indent( + s: Text | str, + console: Console, + *, + prefix: str, + indent: str, +) -> Text: + if isinstance(s, Text): + text = s + else: + text = console.render_str(s) + + return console.render_str(prefix, overflow="ignore") + console.render_str( + f"\n{indent}", overflow="ignore" + ).join(text.split(allow_blank=True)) + + +class PipError(Exception): + """The base pip error.""" + + +class DiagnosticPipError(PipError): + """An error, that presents diagnostic information to the user. + + This contains a bunch of logic, to enable pretty presentation of our error + messages. Each error gets a unique reference. Each error can also include + additional context, a hint and/or a note -- which are presented with the + main error message in a consistent style. + + This is adapted from the error output styling in `sphinx-theme-builder`. + """ + + reference: str + + def __init__( + self, + *, + kind: Literal["error", "warning"] = "error", + reference: str | None = None, + message: str | Text, + context: str | Text | None, + hint_stmt: str | Text | None, + note_stmt: str | Text | None = None, + link: str | None = None, + ) -> None: + # Ensure a proper reference is provided. + if reference is None: + assert hasattr(self, "reference"), "error reference not provided!" + reference = self.reference + assert _is_kebab_case(reference), "error reference must be kebab-case!" + + self.kind = kind + self.reference = reference + + self.message = message + self.context = context + + self.note_stmt = note_stmt + self.hint_stmt = hint_stmt + + self.link = link + + super().__init__(f"<{self.__class__.__name__}: {self.reference}>") + + def __repr__(self) -> str: + return ( + f"<{self.__class__.__name__}(" + f"reference={self.reference!r}, " + f"message={self.message!r}, " + f"context={self.context!r}, " + f"note_stmt={self.note_stmt!r}, " + f"hint_stmt={self.hint_stmt!r}" + ")>" + ) + + def __rich_console__( + self, + console: Console, + options: ConsoleOptions, + ) -> RenderResult: + colour = "red" if self.kind == "error" else "yellow" + + yield f"[{colour} bold]{self.kind}[/]: [bold]{self.reference}[/]" + yield "" + + if not options.ascii_only: + # Present the main message, with relevant context indented. + if self.context is not None: + yield _prefix_with_indent( + self.message, + console, + prefix=f"[{colour}]×[/] ", + indent=f"[{colour}]│[/] ", + ) + yield _prefix_with_indent( + self.context, + console, + prefix=f"[{colour}]╰─>[/] ", + indent=f"[{colour}] [/] ", + ) + else: + yield _prefix_with_indent( + self.message, + console, + prefix="[red]×[/] ", + indent=" ", + ) + else: + yield self.message + if self.context is not None: + yield "" + yield self.context + + if self.note_stmt is not None or self.hint_stmt is not None: + yield "" + + if self.note_stmt is not None: + yield _prefix_with_indent( + self.note_stmt, + console, + prefix="[magenta bold]note[/]: ", + indent=" ", + ) + if self.hint_stmt is not None: + yield _prefix_with_indent( + self.hint_stmt, + console, + prefix="[cyan bold]hint[/]: ", + indent=" ", + ) + + if self.link is not None: + yield "" + yield f"Link: {self.link}" + + +# +# Actual Errors +# +class ConfigurationError(PipError): + """General exception in configuration""" + + +class InstallationError(PipError): + """General exception during installation""" + + +class MissingPyProjectBuildRequires(DiagnosticPipError): + """Raised when pyproject.toml has `build-system`, but no `build-system.requires`.""" + + reference = "missing-pyproject-build-system-requires" + + def __init__(self, *, package: str) -> None: + super().__init__( + message=f"Can not process {escape(package)}", + context=Text( + "This package has an invalid pyproject.toml file.\n" + "The [build-system] table is missing the mandatory `requires` key." + ), + note_stmt="This is an issue with the package mentioned above, not pip.", + hint_stmt=Text("See PEP 518 for the detailed specification."), + ) + + +class InvalidPyProjectBuildRequires(DiagnosticPipError): + """Raised when pyproject.toml an invalid `build-system.requires`.""" + + reference = "invalid-pyproject-build-system-requires" + + def __init__(self, *, package: str, reason: str) -> None: + super().__init__( + message=f"Can not process {escape(package)}", + context=Text( + "This package has an invalid `build-system.requires` key in " + f"pyproject.toml.\n{reason}" + ), + note_stmt="This is an issue with the package mentioned above, not pip.", + hint_stmt=Text("See PEP 518 for the detailed specification."), + ) + + +class NoneMetadataError(PipError): + """Raised when accessing a Distribution's "METADATA" or "PKG-INFO". + + This signifies an inconsistency, when the Distribution claims to have + the metadata file (if not, raise ``FileNotFoundError`` instead), but is + not actually able to produce its content. This may be due to permission + errors. + """ + + def __init__( + self, + dist: BaseDistribution, + metadata_name: str, + ) -> None: + """ + :param dist: A Distribution object. + :param metadata_name: The name of the metadata being accessed + (can be "METADATA" or "PKG-INFO"). + """ + self.dist = dist + self.metadata_name = metadata_name + + def __str__(self) -> str: + # Use `dist` in the error message because its stringification + # includes more information, like the version and location. + return f"None {self.metadata_name} metadata found for distribution: {self.dist}" + + +class UserInstallationInvalid(InstallationError): + """A --user install is requested on an environment without user site.""" + + def __str__(self) -> str: + return "User base directory is not specified" + + +class InvalidSchemeCombination(InstallationError): + def __str__(self) -> str: + before = ", ".join(str(a) for a in self.args[:-1]) + return f"Cannot set {before} and {self.args[-1]} together" + + +class DistributionNotFound(InstallationError): + """Raised when a distribution cannot be found to satisfy a requirement""" + + +class RequirementsFileParseError(InstallationError): + """Raised when a general error occurs parsing a requirements file line.""" + + +class BestVersionAlreadyInstalled(PipError): + """Raised when the most up-to-date version of a package is already + installed.""" + + +class BadCommand(PipError): + """Raised when virtualenv or a command is not found""" + + +class CommandError(PipError): + """Raised when there is an error in command-line arguments""" + + +class PreviousBuildDirError(PipError): + """Raised when there's a previous conflicting build directory""" + + +class NetworkConnectionError(PipError): + """HTTP connection error""" + + def __init__( + self, + error_msg: str, + response: Response | None = None, + request: Request | None = None, + ) -> None: + """ + Initialize NetworkConnectionError with `request` and `response` + objects. + """ + self.response = response + self.request = request + self.error_msg = error_msg + if ( + self.response is not None + and not self.request + and hasattr(response, "request") + ): + self.request = self.response.request + super().__init__(error_msg, response, request) + + def __str__(self) -> str: + return str(self.error_msg) + + +class InvalidWheelFilename(InstallationError): + """Invalid wheel filename.""" + + +class UnsupportedWheel(InstallationError): + """Unsupported wheel.""" + + +class InvalidWheel(InstallationError): + """Invalid (e.g. corrupt) wheel.""" + + def __init__(self, location: str, name: str): + self.location = location + self.name = name + + def __str__(self) -> str: + return f"Wheel '{self.name}' located at {self.location} is invalid." + + +class MetadataInconsistent(InstallationError): + """Built metadata contains inconsistent information. + + This is raised when the metadata contains values (e.g. name and version) + that do not match the information previously obtained from sdist filename, + user-supplied ``#egg=`` value, or an install requirement name. + """ + + def __init__( + self, ireq: InstallRequirement, field: str, f_val: str, m_val: str + ) -> None: + self.ireq = ireq + self.field = field + self.f_val = f_val + self.m_val = m_val + + def __str__(self) -> str: + return ( + f"Requested {self.ireq} has inconsistent {self.field}: " + f"expected {self.f_val!r}, but metadata has {self.m_val!r}" + ) + + +class MetadataInvalid(InstallationError): + """Metadata is invalid.""" + + def __init__(self, ireq: InstallRequirement, error: str) -> None: + self.ireq = ireq + self.error = error + + def __str__(self) -> str: + return f"Requested {self.ireq} has invalid metadata: {self.error}" + + +class InstallationSubprocessError(DiagnosticPipError, InstallationError): + """A subprocess call failed.""" + + reference = "subprocess-exited-with-error" + + def __init__( + self, + *, + command_description: str, + exit_code: int, + output_lines: list[str] | None, + ) -> None: + if output_lines is None: + output_prompt = Text("See above for output.") + else: + output_prompt = ( + Text.from_markup(f"[red][{len(output_lines)} lines of output][/]\n") + + Text("".join(output_lines)) + + Text.from_markup(R"[red]\[end of output][/]") + ) + + super().__init__( + message=( + f"[green]{escape(command_description)}[/] did not run successfully.\n" + f"exit code: {exit_code}" + ), + context=output_prompt, + hint_stmt=None, + note_stmt=( + "This error originates from a subprocess, and is likely not a " + "problem with pip." + ), + ) + + self.command_description = command_description + self.exit_code = exit_code + + def __str__(self) -> str: + return f"{self.command_description} exited with {self.exit_code}" + + +class MetadataGenerationFailed(InstallationSubprocessError, InstallationError): + reference = "metadata-generation-failed" + + def __init__( + self, + *, + package_details: str, + ) -> None: + super(InstallationSubprocessError, self).__init__( + message="Encountered error while generating package metadata.", + context=escape(package_details), + hint_stmt="See above for details.", + note_stmt="This is an issue with the package mentioned above, not pip.", + ) + + def __str__(self) -> str: + return "metadata generation failed" + + +class HashErrors(InstallationError): + """Multiple HashError instances rolled into one for reporting""" + + def __init__(self) -> None: + self.errors: list[HashError] = [] + + def append(self, error: HashError) -> None: + self.errors.append(error) + + def __str__(self) -> str: + lines = [] + self.errors.sort(key=lambda e: e.order) + for cls, errors_of_cls in groupby(self.errors, lambda e: e.__class__): + lines.append(cls.head) + lines.extend(e.body() for e in errors_of_cls) + if lines: + return "\n".join(lines) + return "" + + def __bool__(self) -> bool: + return bool(self.errors) + + +class HashError(InstallationError): + """ + A failure to verify a package against known-good hashes + + :cvar order: An int sorting hash exception classes by difficulty of + recovery (lower being harder), so the user doesn't bother fretting + about unpinned packages when he has deeper issues, like VCS + dependencies, to deal with. Also keeps error reports in a + deterministic order. + :cvar head: A section heading for display above potentially many + exceptions of this kind + :ivar req: The InstallRequirement that triggered this error. This is + pasted on after the exception is instantiated, because it's not + typically available earlier. + + """ + + req: InstallRequirement | None = None + head = "" + order: int = -1 + + def body(self) -> str: + """Return a summary of me for display under the heading. + + This default implementation simply prints a description of the + triggering requirement. + + :param req: The InstallRequirement that provoked this error, with + its link already populated by the resolver's _populate_link(). + + """ + return f" {self._requirement_name()}" + + def __str__(self) -> str: + return f"{self.head}\n{self.body()}" + + def _requirement_name(self) -> str: + """Return a description of the requirement that triggered me. + + This default implementation returns long description of the req, with + line numbers + + """ + return str(self.req) if self.req else "unknown package" + + +class VcsHashUnsupported(HashError): + """A hash was provided for a version-control-system-based requirement, but + we don't have a method for hashing those.""" + + order = 0 + head = ( + "Can't verify hashes for these requirements because we don't " + "have a way to hash version control repositories:" + ) + + +class DirectoryUrlHashUnsupported(HashError): + """A hash was provided for a version-control-system-based requirement, but + we don't have a method for hashing those.""" + + order = 1 + head = ( + "Can't verify hashes for these file:// requirements because they " + "point to directories:" + ) + + +class HashMissing(HashError): + """A hash was needed for a requirement but is absent.""" + + order = 2 + head = ( + "Hashes are required in --require-hashes mode, but they are " + "missing from some requirements. Here is a list of those " + "requirements along with the hashes their downloaded archives " + "actually had. Add lines like these to your requirements files to " + "prevent tampering. (If you did not enable --require-hashes " + "manually, note that it turns on automatically when any package " + "has a hash.)" + ) + + def __init__(self, gotten_hash: str) -> None: + """ + :param gotten_hash: The hash of the (possibly malicious) archive we + just downloaded + """ + self.gotten_hash = gotten_hash + + def body(self) -> str: + # Dodge circular import. + from pip._internal.utils.hashes import FAVORITE_HASH + + package = None + if self.req: + # In the case of URL-based requirements, display the original URL + # seen in the requirements file rather than the package name, + # so the output can be directly copied into the requirements file. + package = ( + self.req.original_link + if self.req.is_direct + # In case someone feeds something downright stupid + # to InstallRequirement's constructor. + else getattr(self.req, "req", None) + ) + return " {} --hash={}:{}".format( + package or "unknown package", FAVORITE_HASH, self.gotten_hash + ) + + +class HashUnpinned(HashError): + """A requirement had a hash specified but was not pinned to a specific + version.""" + + order = 3 + head = ( + "In --require-hashes mode, all requirements must have their " + "versions pinned with ==. These do not:" + ) + + +class HashMismatch(HashError): + """ + Distribution file hash values don't match. + + :ivar package_name: The name of the package that triggered the hash + mismatch. Feel free to write to this after the exception is raise to + improve its error message. + + """ + + order = 4 + head = ( + "THESE PACKAGES DO NOT MATCH THE HASHES FROM THE REQUIREMENTS " + "FILE. If you have updated the package versions, please update " + "the hashes. Otherwise, examine the package contents carefully; " + "someone may have tampered with them." + ) + + def __init__(self, allowed: dict[str, list[str]], gots: dict[str, _Hash]) -> None: + """ + :param allowed: A dict of algorithm names pointing to lists of allowed + hex digests + :param gots: A dict of algorithm names pointing to hashes we + actually got from the files under suspicion + """ + self.allowed = allowed + self.gots = gots + + def body(self) -> str: + return f" {self._requirement_name()}:\n{self._hash_comparison()}" + + def _hash_comparison(self) -> str: + """ + Return a comparison of actual and expected hash values. + + Example:: + + Expected sha256 abcdeabcdeabcdeabcdeabcdeabcdeabcdeabcdeabcde + or 123451234512345123451234512345123451234512345 + Got bcdefbcdefbcdefbcdefbcdefbcdefbcdefbcdefbcdef + + """ + + def hash_then_or(hash_name: str) -> chain[str]: + # For now, all the decent hashes have 6-char names, so we can get + # away with hard-coding space literals. + return chain([hash_name], repeat(" or")) + + lines: list[str] = [] + for hash_name, expecteds in self.allowed.items(): + prefix = hash_then_or(hash_name) + lines.extend((f" Expected {next(prefix)} {e}") for e in expecteds) + lines.append( + f" Got {self.gots[hash_name].hexdigest()}\n" + ) + return "\n".join(lines) + + +class UnsupportedPythonVersion(InstallationError): + """Unsupported python version according to Requires-Python package + metadata.""" + + +class ConfigurationFileCouldNotBeLoaded(ConfigurationError): + """When there are errors while loading a configuration file""" + + def __init__( + self, + reason: str = "could not be loaded", + fname: str | None = None, + error: configparser.Error | None = None, + ) -> None: + super().__init__(error) + self.reason = reason + self.fname = fname + self.error = error + + def __str__(self) -> str: + if self.fname is not None: + message_part = f" in {self.fname}." + else: + assert self.error is not None + message_part = f".\n{self.error}\n" + return f"Configuration file {self.reason}{message_part}" + + +_DEFAULT_EXTERNALLY_MANAGED_ERROR = f"""\ +The Python environment under {sys.prefix} is managed externally, and may not be +manipulated by the user. Please use specific tooling from the distributor of +the Python installation to interact with this environment instead. +""" + + +class ExternallyManagedEnvironment(DiagnosticPipError): + """The current environment is externally managed. + + This is raised when the current environment is externally managed, as + defined by `PEP 668`_. The ``EXTERNALLY-MANAGED`` configuration is checked + and displayed when the error is bubbled up to the user. + + :param error: The error message read from ``EXTERNALLY-MANAGED``. + """ + + reference = "externally-managed-environment" + + def __init__(self, error: str | None) -> None: + if error is None: + context = Text(_DEFAULT_EXTERNALLY_MANAGED_ERROR) + else: + context = Text(error) + super().__init__( + message="This environment is externally managed", + context=context, + note_stmt=( + "If you believe this is a mistake, please contact your " + "Python installation or OS distribution provider. " + "You can override this, at the risk of breaking your Python " + "installation or OS, by passing --break-system-packages." + ), + hint_stmt=Text("See PEP 668 for the detailed specification."), + ) + + @staticmethod + def _iter_externally_managed_error_keys() -> Iterator[str]: + # LC_MESSAGES is in POSIX, but not the C standard. The most common + # platform that does not implement this category is Windows, where + # using other categories for console message localization is equally + # unreliable, so we fall back to the locale-less vendor message. This + # can always be re-evaluated when a vendor proposes a new alternative. + try: + category = locale.LC_MESSAGES + except AttributeError: + lang: str | None = None + else: + lang, _ = locale.getlocale(category) + if lang is not None: + yield f"Error-{lang}" + for sep in ("-", "_"): + before, found, _ = lang.partition(sep) + if not found: + continue + yield f"Error-{before}" + yield "Error" + + @classmethod + def from_config( + cls, + config: pathlib.Path | str, + ) -> ExternallyManagedEnvironment: + parser = configparser.ConfigParser(interpolation=None) + try: + parser.read(config, encoding="utf-8") + section = parser["externally-managed"] + for key in cls._iter_externally_managed_error_keys(): + with contextlib.suppress(KeyError): + return cls(section[key]) + except KeyError: + pass + except (OSError, UnicodeDecodeError, configparser.ParsingError): + from pip._internal.utils._log import VERBOSE + + exc_info = logger.isEnabledFor(VERBOSE) + logger.warning("Failed to read %s", config, exc_info=exc_info) + return cls(None) + + +class UninstallMissingRecord(DiagnosticPipError): + reference = "uninstall-no-record-file" + + def __init__(self, *, distribution: BaseDistribution) -> None: + installer = distribution.installer + if not installer or installer == "pip": + dep = f"{distribution.raw_name}=={distribution.version}" + hint = Text.assemble( + "You might be able to recover from this via: ", + (f"pip install --force-reinstall --no-deps {dep}", "green"), + ) + else: + hint = Text( + f"The package was installed by {installer}. " + "You should check if it can uninstall the package." + ) + + super().__init__( + message=Text(f"Cannot uninstall {distribution}"), + context=( + "The package's contents are unknown: " + f"no RECORD file was found for {distribution.raw_name}." + ), + hint_stmt=hint, + ) + + +class LegacyDistutilsInstall(DiagnosticPipError): + reference = "uninstall-distutils-installed-package" + + def __init__(self, *, distribution: BaseDistribution) -> None: + super().__init__( + message=Text(f"Cannot uninstall {distribution}"), + context=( + "It is a distutils installed project and thus we cannot accurately " + "determine which files belong to it which would lead to only a partial " + "uninstall." + ), + hint_stmt=None, + ) + + +class InvalidInstalledPackage(DiagnosticPipError): + reference = "invalid-installed-package" + + def __init__( + self, + *, + dist: BaseDistribution, + invalid_exc: InvalidRequirement | InvalidVersion, + ) -> None: + installed_location = dist.installed_location + + if isinstance(invalid_exc, InvalidRequirement): + invalid_type = "requirement" + else: + invalid_type = "version" + + super().__init__( + message=Text( + f"Cannot process installed package {dist} " + + (f"in {installed_location!r} " if installed_location else "") + + f"because it has an invalid {invalid_type}:\n{invalid_exc.args[0]}" + ), + context=( + "Starting with pip 24.1, packages with invalid " + f"{invalid_type}s can not be processed." + ), + hint_stmt="To proceed this package must be uninstalled.", + ) + + +class IncompleteDownloadError(DiagnosticPipError): + """Raised when the downloader receives fewer bytes than advertised + in the Content-Length header.""" + + reference = "incomplete-download" + + def __init__(self, download: _FileDownload) -> None: + # Dodge circular import. + from pip._internal.utils.misc import format_size + + assert download.size is not None + download_status = ( + f"{format_size(download.bytes_received)}/{format_size(download.size)}" + ) + if download.reattempts: + retry_status = f"after {download.reattempts + 1} attempts " + hint = "Use --resume-retries to configure resume attempt limit." + else: + # Download retrying is not enabled. + retry_status = "" + hint = "Consider using --resume-retries to enable download resumption." + message = Text( + f"Download failed {retry_status}because not enough bytes " + f"were received ({download_status})" + ) + + super().__init__( + message=message, + context=f"URL: {download.link.redacted_url}", + hint_stmt=hint, + note_stmt="This is an issue with network connectivity, not pip.", + ) + + +class ResolutionTooDeepError(DiagnosticPipError): + """Raised when the dependency resolver exceeds the maximum recursion depth.""" + + reference = "resolution-too-deep" + + def __init__(self) -> None: + super().__init__( + message="Dependency resolution exceeded maximum depth", + context=( + "Pip cannot resolve the current dependencies as the dependency graph " + "is too complex for pip to solve efficiently." + ), + hint_stmt=( + "Try adding lower bounds to constrain your dependencies, " + "for example: 'package>=2.0.0' instead of just 'package'. " + ), + link="https://pip.pypa.io/en/stable/topics/dependency-resolution/#handling-resolution-too-deep-errors", + ) + + +class InstallWheelBuildError(DiagnosticPipError): + reference = "failed-wheel-build-for-install" + + def __init__(self, failed: list[InstallRequirement]) -> None: + super().__init__( + message=( + "Failed to build installable wheels for some " + "pyproject.toml based projects" + ), + context=", ".join(r.name for r in failed), # type: ignore + hint_stmt=None, + ) diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/index/__init__.py b/.venv/lib/python3.12/site-packages/pip/_internal/index/__init__.py new file mode 100644 index 0000000..197dd75 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/index/__init__.py @@ -0,0 +1 @@ +"""Index interaction code""" diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/index/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/index/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..5324b3d Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/index/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/index/__pycache__/collector.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/index/__pycache__/collector.cpython-312.pyc new file mode 100644 index 0000000..91c8ad0 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/index/__pycache__/collector.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/index/__pycache__/package_finder.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/index/__pycache__/package_finder.cpython-312.pyc new file mode 100644 index 0000000..518b085 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/index/__pycache__/package_finder.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/index/__pycache__/sources.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/index/__pycache__/sources.cpython-312.pyc new file mode 100644 index 0000000..a089b51 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/index/__pycache__/sources.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/index/collector.py b/.venv/lib/python3.12/site-packages/pip/_internal/index/collector.py new file mode 100644 index 0000000..00d66da --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/index/collector.py @@ -0,0 +1,489 @@ +""" +The main purpose of this module is to expose LinkCollector.collect_sources(). +""" + +from __future__ import annotations + +import collections +import email.message +import functools +import itertools +import json +import logging +import os +import urllib.parse +import urllib.request +from collections.abc import Iterable, MutableMapping, Sequence +from dataclasses import dataclass +from html.parser import HTMLParser +from optparse import Values +from typing import ( + Callable, + NamedTuple, + Protocol, +) + +from pip._vendor import requests +from pip._vendor.requests import Response +from pip._vendor.requests.exceptions import RetryError, SSLError + +from pip._internal.exceptions import NetworkConnectionError +from pip._internal.models.link import Link +from pip._internal.models.search_scope import SearchScope +from pip._internal.network.session import PipSession +from pip._internal.network.utils import raise_for_status +from pip._internal.utils.filetypes import is_archive_file +from pip._internal.utils.misc import redact_auth_from_url +from pip._internal.vcs import vcs + +from .sources import CandidatesFromPage, LinkSource, build_source + +logger = logging.getLogger(__name__) + +ResponseHeaders = MutableMapping[str, str] + + +def _match_vcs_scheme(url: str) -> str | None: + """Look for VCS schemes in the URL. + + Returns the matched VCS scheme, or None if there's no match. + """ + for scheme in vcs.schemes: + if url.lower().startswith(scheme) and url[len(scheme)] in "+:": + return scheme + return None + + +class _NotAPIContent(Exception): + def __init__(self, content_type: str, request_desc: str) -> None: + super().__init__(content_type, request_desc) + self.content_type = content_type + self.request_desc = request_desc + + +def _ensure_api_header(response: Response) -> None: + """ + Check the Content-Type header to ensure the response contains a Simple + API Response. + + Raises `_NotAPIContent` if the content type is not a valid content-type. + """ + content_type = response.headers.get("Content-Type", "Unknown") + + content_type_l = content_type.lower() + if content_type_l.startswith( + ( + "text/html", + "application/vnd.pypi.simple.v1+html", + "application/vnd.pypi.simple.v1+json", + ) + ): + return + + raise _NotAPIContent(content_type, response.request.method) + + +class _NotHTTP(Exception): + pass + + +def _ensure_api_response(url: str, session: PipSession) -> None: + """ + Send a HEAD request to the URL, and ensure the response contains a simple + API Response. + + Raises `_NotHTTP` if the URL is not available for a HEAD request, or + `_NotAPIContent` if the content type is not a valid content type. + """ + scheme, netloc, path, query, fragment = urllib.parse.urlsplit(url) + if scheme not in {"http", "https"}: + raise _NotHTTP() + + resp = session.head(url, allow_redirects=True) + raise_for_status(resp) + + _ensure_api_header(resp) + + +def _get_simple_response(url: str, session: PipSession) -> Response: + """Access an Simple API response with GET, and return the response. + + This consists of three parts: + + 1. If the URL looks suspiciously like an archive, send a HEAD first to + check the Content-Type is HTML or Simple API, to avoid downloading a + large file. Raise `_NotHTTP` if the content type cannot be determined, or + `_NotAPIContent` if it is not HTML or a Simple API. + 2. Actually perform the request. Raise HTTP exceptions on network failures. + 3. Check the Content-Type header to make sure we got a Simple API response, + and raise `_NotAPIContent` otherwise. + """ + if is_archive_file(Link(url).filename): + _ensure_api_response(url, session=session) + + logger.debug("Getting page %s", redact_auth_from_url(url)) + + resp = session.get( + url, + headers={ + "Accept": ", ".join( + [ + "application/vnd.pypi.simple.v1+json", + "application/vnd.pypi.simple.v1+html; q=0.1", + "text/html; q=0.01", + ] + ), + # We don't want to blindly returned cached data for + # /simple/, because authors generally expecting that + # twine upload && pip install will function, but if + # they've done a pip install in the last ~10 minutes + # it won't. Thus by setting this to zero we will not + # blindly use any cached data, however the benefit of + # using max-age=0 instead of no-cache, is that we will + # still support conditional requests, so we will still + # minimize traffic sent in cases where the page hasn't + # changed at all, we will just always incur the round + # trip for the conditional GET now instead of only + # once per 10 minutes. + # For more information, please see pypa/pip#5670. + "Cache-Control": "max-age=0", + }, + ) + raise_for_status(resp) + + # The check for archives above only works if the url ends with + # something that looks like an archive. However that is not a + # requirement of an url. Unless we issue a HEAD request on every + # url we cannot know ahead of time for sure if something is a + # Simple API response or not. However we can check after we've + # downloaded it. + _ensure_api_header(resp) + + logger.debug( + "Fetched page %s as %s", + redact_auth_from_url(url), + resp.headers.get("Content-Type", "Unknown"), + ) + + return resp + + +def _get_encoding_from_headers(headers: ResponseHeaders) -> str | None: + """Determine if we have any encoding information in our headers.""" + if headers and "Content-Type" in headers: + m = email.message.Message() + m["content-type"] = headers["Content-Type"] + charset = m.get_param("charset") + if charset: + return str(charset) + return None + + +class CacheablePageContent: + def __init__(self, page: IndexContent) -> None: + assert page.cache_link_parsing + self.page = page + + def __eq__(self, other: object) -> bool: + return isinstance(other, type(self)) and self.page.url == other.page.url + + def __hash__(self) -> int: + return hash(self.page.url) + + +class ParseLinks(Protocol): + def __call__(self, page: IndexContent) -> Iterable[Link]: ... + + +def with_cached_index_content(fn: ParseLinks) -> ParseLinks: + """ + Given a function that parses an Iterable[Link] from an IndexContent, cache the + function's result (keyed by CacheablePageContent), unless the IndexContent + `page` has `page.cache_link_parsing == False`. + """ + + @functools.cache + def wrapper(cacheable_page: CacheablePageContent) -> list[Link]: + return list(fn(cacheable_page.page)) + + @functools.wraps(fn) + def wrapper_wrapper(page: IndexContent) -> list[Link]: + if page.cache_link_parsing: + return wrapper(CacheablePageContent(page)) + return list(fn(page)) + + return wrapper_wrapper + + +@with_cached_index_content +def parse_links(page: IndexContent) -> Iterable[Link]: + """ + Parse a Simple API's Index Content, and yield its anchor elements as Link objects. + """ + + content_type_l = page.content_type.lower() + if content_type_l.startswith("application/vnd.pypi.simple.v1+json"): + data = json.loads(page.content) + for file in data.get("files", []): + link = Link.from_json(file, page.url) + if link is None: + continue + yield link + return + + parser = HTMLLinkParser(page.url) + encoding = page.encoding or "utf-8" + parser.feed(page.content.decode(encoding)) + + url = page.url + base_url = parser.base_url or url + for anchor in parser.anchors: + link = Link.from_element(anchor, page_url=url, base_url=base_url) + if link is None: + continue + yield link + + +@dataclass(frozen=True) +class IndexContent: + """Represents one response (or page), along with its URL. + + :param encoding: the encoding to decode the given content. + :param url: the URL from which the HTML was downloaded. + :param cache_link_parsing: whether links parsed from this page's url + should be cached. PyPI index urls should + have this set to False, for example. + """ + + content: bytes + content_type: str + encoding: str | None + url: str + cache_link_parsing: bool = True + + def __str__(self) -> str: + return redact_auth_from_url(self.url) + + +class HTMLLinkParser(HTMLParser): + """ + HTMLParser that keeps the first base HREF and a list of all anchor + elements' attributes. + """ + + def __init__(self, url: str) -> None: + super().__init__(convert_charrefs=True) + + self.url: str = url + self.base_url: str | None = None + self.anchors: list[dict[str, str | None]] = [] + + def handle_starttag(self, tag: str, attrs: list[tuple[str, str | None]]) -> None: + if tag == "base" and self.base_url is None: + href = self.get_href(attrs) + if href is not None: + self.base_url = href + elif tag == "a": + self.anchors.append(dict(attrs)) + + def get_href(self, attrs: list[tuple[str, str | None]]) -> str | None: + for name, value in attrs: + if name == "href": + return value + return None + + +def _handle_get_simple_fail( + link: Link, + reason: str | Exception, + meth: Callable[..., None] | None = None, +) -> None: + if meth is None: + meth = logger.debug + meth("Could not fetch URL %s: %s - skipping", link, reason) + + +def _make_index_content( + response: Response, cache_link_parsing: bool = True +) -> IndexContent: + encoding = _get_encoding_from_headers(response.headers) + return IndexContent( + response.content, + response.headers["Content-Type"], + encoding=encoding, + url=response.url, + cache_link_parsing=cache_link_parsing, + ) + + +def _get_index_content(link: Link, *, session: PipSession) -> IndexContent | None: + url = link.url.split("#", 1)[0] + + # Check for VCS schemes that do not support lookup as web pages. + vcs_scheme = _match_vcs_scheme(url) + if vcs_scheme: + logger.warning( + "Cannot look at %s URL %s because it does not support lookup as web pages.", + vcs_scheme, + link, + ) + return None + + # Tack index.html onto file:// URLs that point to directories + scheme, _, path, _, _, _ = urllib.parse.urlparse(url) + if scheme == "file" and os.path.isdir(urllib.request.url2pathname(path)): + # add trailing slash if not present so urljoin doesn't trim + # final segment + if not url.endswith("/"): + url += "/" + # TODO: In the future, it would be nice if pip supported PEP 691 + # style responses in the file:// URLs, however there's no + # standard file extension for application/vnd.pypi.simple.v1+json + # so we'll need to come up with something on our own. + url = urllib.parse.urljoin(url, "index.html") + logger.debug(" file: URL is directory, getting %s", url) + + try: + resp = _get_simple_response(url, session=session) + except _NotHTTP: + logger.warning( + "Skipping page %s because it looks like an archive, and cannot " + "be checked by a HTTP HEAD request.", + link, + ) + except _NotAPIContent as exc: + logger.warning( + "Skipping page %s because the %s request got Content-Type: %s. " + "The only supported Content-Types are application/vnd.pypi.simple.v1+json, " + "application/vnd.pypi.simple.v1+html, and text/html", + link, + exc.request_desc, + exc.content_type, + ) + except NetworkConnectionError as exc: + _handle_get_simple_fail(link, exc) + except RetryError as exc: + _handle_get_simple_fail(link, exc) + except SSLError as exc: + reason = "There was a problem confirming the ssl certificate: " + reason += str(exc) + _handle_get_simple_fail(link, reason, meth=logger.info) + except requests.ConnectionError as exc: + _handle_get_simple_fail(link, f"connection error: {exc}") + except requests.Timeout: + _handle_get_simple_fail(link, "timed out") + else: + return _make_index_content(resp, cache_link_parsing=link.cache_link_parsing) + return None + + +class CollectedSources(NamedTuple): + find_links: Sequence[LinkSource | None] + index_urls: Sequence[LinkSource | None] + + +class LinkCollector: + """ + Responsible for collecting Link objects from all configured locations, + making network requests as needed. + + The class's main method is its collect_sources() method. + """ + + def __init__( + self, + session: PipSession, + search_scope: SearchScope, + ) -> None: + self.search_scope = search_scope + self.session = session + + @classmethod + def create( + cls, + session: PipSession, + options: Values, + suppress_no_index: bool = False, + ) -> LinkCollector: + """ + :param session: The Session to use to make requests. + :param suppress_no_index: Whether to ignore the --no-index option + when constructing the SearchScope object. + """ + index_urls = [options.index_url] + options.extra_index_urls + if options.no_index and not suppress_no_index: + logger.debug( + "Ignoring indexes: %s", + ",".join(redact_auth_from_url(url) for url in index_urls), + ) + index_urls = [] + + # Make sure find_links is a list before passing to create(). + find_links = options.find_links or [] + + search_scope = SearchScope.create( + find_links=find_links, + index_urls=index_urls, + no_index=options.no_index, + ) + link_collector = LinkCollector( + session=session, + search_scope=search_scope, + ) + return link_collector + + @property + def find_links(self) -> list[str]: + return self.search_scope.find_links + + def fetch_response(self, location: Link) -> IndexContent | None: + """ + Fetch an HTML page containing package links. + """ + return _get_index_content(location, session=self.session) + + def collect_sources( + self, + project_name: str, + candidates_from_page: CandidatesFromPage, + ) -> CollectedSources: + # The OrderedDict calls deduplicate sources by URL. + index_url_sources = collections.OrderedDict( + build_source( + loc, + candidates_from_page=candidates_from_page, + page_validator=self.session.is_secure_origin, + expand_dir=False, + cache_link_parsing=False, + project_name=project_name, + ) + for loc in self.search_scope.get_index_urls_locations(project_name) + ).values() + find_links_sources = collections.OrderedDict( + build_source( + loc, + candidates_from_page=candidates_from_page, + page_validator=self.session.is_secure_origin, + expand_dir=True, + cache_link_parsing=True, + project_name=project_name, + ) + for loc in self.find_links + ).values() + + if logger.isEnabledFor(logging.DEBUG): + lines = [ + f"* {s.link}" + for s in itertools.chain(find_links_sources, index_url_sources) + if s is not None and s.link is not None + ] + lines = [ + f"{len(lines)} location(s) to search " + f"for versions of {project_name}:" + ] + lines + logger.debug("\n".join(lines)) + + return CollectedSources( + find_links=list(find_links_sources), + index_urls=list(index_url_sources), + ) diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/index/package_finder.py b/.venv/lib/python3.12/site-packages/pip/_internal/index/package_finder.py new file mode 100644 index 0000000..bc523cd --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/index/package_finder.py @@ -0,0 +1,1059 @@ +"""Routines related to PyPI, indexes""" + +from __future__ import annotations + +import enum +import functools +import itertools +import logging +import re +from collections.abc import Iterable +from dataclasses import dataclass +from typing import ( + TYPE_CHECKING, + Optional, + Union, +) + +from pip._vendor.packaging import specifiers +from pip._vendor.packaging.tags import Tag +from pip._vendor.packaging.utils import canonicalize_name +from pip._vendor.packaging.version import InvalidVersion, _BaseVersion +from pip._vendor.packaging.version import parse as parse_version + +from pip._internal.exceptions import ( + BestVersionAlreadyInstalled, + DistributionNotFound, + InvalidWheelFilename, + UnsupportedWheel, +) +from pip._internal.index.collector import LinkCollector, parse_links +from pip._internal.models.candidate import InstallationCandidate +from pip._internal.models.format_control import FormatControl +from pip._internal.models.link import Link +from pip._internal.models.search_scope import SearchScope +from pip._internal.models.selection_prefs import SelectionPreferences +from pip._internal.models.target_python import TargetPython +from pip._internal.models.wheel import Wheel +from pip._internal.req import InstallRequirement +from pip._internal.utils._log import getLogger +from pip._internal.utils.filetypes import WHEEL_EXTENSION +from pip._internal.utils.hashes import Hashes +from pip._internal.utils.logging import indent_log +from pip._internal.utils.misc import build_netloc +from pip._internal.utils.packaging import check_requires_python +from pip._internal.utils.unpacking import SUPPORTED_EXTENSIONS + +if TYPE_CHECKING: + from typing_extensions import TypeGuard + +__all__ = ["FormatControl", "BestCandidateResult", "PackageFinder"] + + +logger = getLogger(__name__) + +BuildTag = Union[tuple[()], tuple[int, str]] +CandidateSortingKey = tuple[int, int, int, _BaseVersion, Optional[int], BuildTag] + + +def _check_link_requires_python( + link: Link, + version_info: tuple[int, int, int], + ignore_requires_python: bool = False, +) -> bool: + """ + Return whether the given Python version is compatible with a link's + "Requires-Python" value. + + :param version_info: A 3-tuple of ints representing the Python + major-minor-micro version to check. + :param ignore_requires_python: Whether to ignore the "Requires-Python" + value if the given Python version isn't compatible. + """ + try: + is_compatible = check_requires_python( + link.requires_python, + version_info=version_info, + ) + except specifiers.InvalidSpecifier: + logger.debug( + "Ignoring invalid Requires-Python (%r) for link: %s", + link.requires_python, + link, + ) + else: + if not is_compatible: + version = ".".join(map(str, version_info)) + if not ignore_requires_python: + logger.verbose( + "Link requires a different Python (%s not in: %r): %s", + version, + link.requires_python, + link, + ) + return False + + logger.debug( + "Ignoring failed Requires-Python check (%s not in: %r) for link: %s", + version, + link.requires_python, + link, + ) + + return True + + +class LinkType(enum.Enum): + candidate = enum.auto() + different_project = enum.auto() + yanked = enum.auto() + format_unsupported = enum.auto() + format_invalid = enum.auto() + platform_mismatch = enum.auto() + requires_python_mismatch = enum.auto() + + +class LinkEvaluator: + """ + Responsible for evaluating links for a particular project. + """ + + _py_version_re = re.compile(r"-py([123]\.?[0-9]?)$") + + # Don't include an allow_yanked default value to make sure each call + # site considers whether yanked releases are allowed. This also causes + # that decision to be made explicit in the calling code, which helps + # people when reading the code. + def __init__( + self, + project_name: str, + canonical_name: str, + formats: frozenset[str], + target_python: TargetPython, + allow_yanked: bool, + ignore_requires_python: bool | None = None, + ) -> None: + """ + :param project_name: The user supplied package name. + :param canonical_name: The canonical package name. + :param formats: The formats allowed for this package. Should be a set + with 'binary' or 'source' or both in it. + :param target_python: The target Python interpreter to use when + evaluating link compatibility. This is used, for example, to + check wheel compatibility, as well as when checking the Python + version, e.g. the Python version embedded in a link filename + (or egg fragment) and against an HTML link's optional PEP 503 + "data-requires-python" attribute. + :param allow_yanked: Whether files marked as yanked (in the sense + of PEP 592) are permitted to be candidates for install. + :param ignore_requires_python: Whether to ignore incompatible + PEP 503 "data-requires-python" values in HTML links. Defaults + to False. + """ + if ignore_requires_python is None: + ignore_requires_python = False + + self._allow_yanked = allow_yanked + self._canonical_name = canonical_name + self._ignore_requires_python = ignore_requires_python + self._formats = formats + self._target_python = target_python + + self.project_name = project_name + + def evaluate_link(self, link: Link) -> tuple[LinkType, str]: + """ + Determine whether a link is a candidate for installation. + + :return: A tuple (result, detail), where *result* is an enum + representing whether the evaluation found a candidate, or the reason + why one is not found. If a candidate is found, *detail* will be the + candidate's version string; if one is not found, it contains the + reason the link fails to qualify. + """ + version = None + if link.is_yanked and not self._allow_yanked: + reason = link.yanked_reason or "" + return (LinkType.yanked, f"yanked for reason: {reason}") + + if link.egg_fragment: + egg_info = link.egg_fragment + ext = link.ext + else: + egg_info, ext = link.splitext() + if not ext: + return (LinkType.format_unsupported, "not a file") + if ext not in SUPPORTED_EXTENSIONS: + return ( + LinkType.format_unsupported, + f"unsupported archive format: {ext}", + ) + if "binary" not in self._formats and ext == WHEEL_EXTENSION: + reason = f"No binaries permitted for {self.project_name}" + return (LinkType.format_unsupported, reason) + if "macosx10" in link.path and ext == ".zip": + return (LinkType.format_unsupported, "macosx10 one") + if ext == WHEEL_EXTENSION: + try: + wheel = Wheel(link.filename) + except InvalidWheelFilename: + return ( + LinkType.format_invalid, + "invalid wheel filename", + ) + if canonicalize_name(wheel.name) != self._canonical_name: + reason = f"wrong project name (not {self.project_name})" + return (LinkType.different_project, reason) + + supported_tags = self._target_python.get_unsorted_tags() + if not wheel.supported(supported_tags): + # Include the wheel's tags in the reason string to + # simplify troubleshooting compatibility issues. + file_tags = ", ".join(wheel.get_formatted_file_tags()) + reason = ( + f"none of the wheel's tags ({file_tags}) are compatible " + f"(run pip debug --verbose to show compatible tags)" + ) + return (LinkType.platform_mismatch, reason) + + version = wheel.version + + # This should be up by the self.ok_binary check, but see issue 2700. + if "source" not in self._formats and ext != WHEEL_EXTENSION: + reason = f"No sources permitted for {self.project_name}" + return (LinkType.format_unsupported, reason) + + if not version: + version = _extract_version_from_fragment( + egg_info, + self._canonical_name, + ) + if not version: + reason = f"Missing project version for {self.project_name}" + return (LinkType.format_invalid, reason) + + match = self._py_version_re.search(version) + if match: + version = version[: match.start()] + py_version = match.group(1) + if py_version != self._target_python.py_version: + return ( + LinkType.platform_mismatch, + "Python version is incorrect", + ) + + supports_python = _check_link_requires_python( + link, + version_info=self._target_python.py_version_info, + ignore_requires_python=self._ignore_requires_python, + ) + if not supports_python: + requires_python = link.requires_python + if requires_python: + + def get_version_sort_key(v: str) -> tuple[int, ...]: + return tuple(int(s) for s in v.split(".") if s.isdigit()) + + requires_python = ",".join( + sorted( + (str(s) for s in specifiers.SpecifierSet(requires_python)), + key=get_version_sort_key, + ) + ) + reason = f"{version} Requires-Python {requires_python}" + return (LinkType.requires_python_mismatch, reason) + + logger.debug("Found link %s, version: %s", link, version) + + return (LinkType.candidate, version) + + +def filter_unallowed_hashes( + candidates: list[InstallationCandidate], + hashes: Hashes | None, + project_name: str, +) -> list[InstallationCandidate]: + """ + Filter out candidates whose hashes aren't allowed, and return a new + list of candidates. + + If at least one candidate has an allowed hash, then all candidates with + either an allowed hash or no hash specified are returned. Otherwise, + the given candidates are returned. + + Including the candidates with no hash specified when there is a match + allows a warning to be logged if there is a more preferred candidate + with no hash specified. Returning all candidates in the case of no + matches lets pip report the hash of the candidate that would otherwise + have been installed (e.g. permitting the user to more easily update + their requirements file with the desired hash). + """ + if not hashes: + logger.debug( + "Given no hashes to check %s links for project %r: " + "discarding no candidates", + len(candidates), + project_name, + ) + # Make sure we're not returning back the given value. + return list(candidates) + + matches_or_no_digest = [] + # Collect the non-matches for logging purposes. + non_matches = [] + match_count = 0 + for candidate in candidates: + link = candidate.link + if not link.has_hash: + pass + elif link.is_hash_allowed(hashes=hashes): + match_count += 1 + else: + non_matches.append(candidate) + continue + + matches_or_no_digest.append(candidate) + + if match_count: + filtered = matches_or_no_digest + else: + # Make sure we're not returning back the given value. + filtered = list(candidates) + + if len(filtered) == len(candidates): + discard_message = "discarding no candidates" + else: + discard_message = "discarding {} non-matches:\n {}".format( + len(non_matches), + "\n ".join(str(candidate.link) for candidate in non_matches), + ) + + logger.debug( + "Checked %s links for project %r against %s hashes " + "(%s matches, %s no digest): %s", + len(candidates), + project_name, + hashes.digest_count, + match_count, + len(matches_or_no_digest) - match_count, + discard_message, + ) + + return filtered + + +@dataclass +class CandidatePreferences: + """ + Encapsulates some of the preferences for filtering and sorting + InstallationCandidate objects. + """ + + prefer_binary: bool = False + allow_all_prereleases: bool = False + + +@dataclass(frozen=True) +class BestCandidateResult: + """A collection of candidates, returned by `PackageFinder.find_best_candidate`. + + This class is only intended to be instantiated by CandidateEvaluator's + `compute_best_candidate()` method. + + :param all_candidates: A sequence of all available candidates found. + :param applicable_candidates: The applicable candidates. + :param best_candidate: The most preferred candidate found, or None + if no applicable candidates were found. + """ + + all_candidates: list[InstallationCandidate] + applicable_candidates: list[InstallationCandidate] + best_candidate: InstallationCandidate | None + + def __post_init__(self) -> None: + assert set(self.applicable_candidates) <= set(self.all_candidates) + + if self.best_candidate is None: + assert not self.applicable_candidates + else: + assert self.best_candidate in self.applicable_candidates + + +class CandidateEvaluator: + """ + Responsible for filtering and sorting candidates for installation based + on what tags are valid. + """ + + @classmethod + def create( + cls, + project_name: str, + target_python: TargetPython | None = None, + prefer_binary: bool = False, + allow_all_prereleases: bool = False, + specifier: specifiers.BaseSpecifier | None = None, + hashes: Hashes | None = None, + ) -> CandidateEvaluator: + """Create a CandidateEvaluator object. + + :param target_python: The target Python interpreter to use when + checking compatibility. If None (the default), a TargetPython + object will be constructed from the running Python. + :param specifier: An optional object implementing `filter` + (e.g. `packaging.specifiers.SpecifierSet`) to filter applicable + versions. + :param hashes: An optional collection of allowed hashes. + """ + if target_python is None: + target_python = TargetPython() + if specifier is None: + specifier = specifiers.SpecifierSet() + + supported_tags = target_python.get_sorted_tags() + + return cls( + project_name=project_name, + supported_tags=supported_tags, + specifier=specifier, + prefer_binary=prefer_binary, + allow_all_prereleases=allow_all_prereleases, + hashes=hashes, + ) + + def __init__( + self, + project_name: str, + supported_tags: list[Tag], + specifier: specifiers.BaseSpecifier, + prefer_binary: bool = False, + allow_all_prereleases: bool = False, + hashes: Hashes | None = None, + ) -> None: + """ + :param supported_tags: The PEP 425 tags supported by the target + Python in order of preference (most preferred first). + """ + self._allow_all_prereleases = allow_all_prereleases + self._hashes = hashes + self._prefer_binary = prefer_binary + self._project_name = project_name + self._specifier = specifier + self._supported_tags = supported_tags + # Since the index of the tag in the _supported_tags list is used + # as a priority, precompute a map from tag to index/priority to be + # used in wheel.find_most_preferred_tag. + self._wheel_tag_preferences = { + tag: idx for idx, tag in enumerate(supported_tags) + } + + def get_applicable_candidates( + self, + candidates: list[InstallationCandidate], + ) -> list[InstallationCandidate]: + """ + Return the applicable candidates from a list of candidates. + """ + # Using None infers from the specifier instead. + allow_prereleases = self._allow_all_prereleases or None + specifier = self._specifier + + # We turn the version object into a str here because otherwise + # when we're debundled but setuptools isn't, Python will see + # packaging.version.Version and + # pkg_resources._vendor.packaging.version.Version as different + # types. This way we'll use a str as a common data interchange + # format. If we stop using the pkg_resources provided specifier + # and start using our own, we can drop the cast to str(). + candidates_and_versions = [(c, str(c.version)) for c in candidates] + versions = set( + specifier.filter( + (v for _, v in candidates_and_versions), + prereleases=allow_prereleases, + ) + ) + + applicable_candidates = [c for c, v in candidates_and_versions if v in versions] + filtered_applicable_candidates = filter_unallowed_hashes( + candidates=applicable_candidates, + hashes=self._hashes, + project_name=self._project_name, + ) + + return sorted(filtered_applicable_candidates, key=self._sort_key) + + def _sort_key(self, candidate: InstallationCandidate) -> CandidateSortingKey: + """ + Function to pass as the `key` argument to a call to sorted() to sort + InstallationCandidates by preference. + + Returns a tuple such that tuples sorting as greater using Python's + default comparison operator are more preferred. + + The preference is as follows: + + First and foremost, candidates with allowed (matching) hashes are + always preferred over candidates without matching hashes. This is + because e.g. if the only candidate with an allowed hash is yanked, + we still want to use that candidate. + + Second, excepting hash considerations, candidates that have been + yanked (in the sense of PEP 592) are always less preferred than + candidates that haven't been yanked. Then: + + If not finding wheels, they are sorted by version only. + If finding wheels, then the sort order is by version, then: + 1. existing installs + 2. wheels ordered via Wheel.support_index_min(self._supported_tags) + 3. source archives + If prefer_binary was set, then all wheels are sorted above sources. + + Note: it was considered to embed this logic into the Link + comparison operators, but then different sdist links + with the same version, would have to be considered equal + """ + valid_tags = self._supported_tags + support_num = len(valid_tags) + build_tag: BuildTag = () + binary_preference = 0 + link = candidate.link + if link.is_wheel: + # can raise InvalidWheelFilename + wheel = Wheel(link.filename) + try: + pri = -( + wheel.find_most_preferred_tag( + valid_tags, self._wheel_tag_preferences + ) + ) + except ValueError: + raise UnsupportedWheel( + f"{wheel.filename} is not a supported wheel for this platform. It " + "can't be sorted." + ) + if self._prefer_binary: + binary_preference = 1 + build_tag = wheel.build_tag + else: # sdist + pri = -(support_num) + has_allowed_hash = int(link.is_hash_allowed(self._hashes)) + yank_value = -1 * int(link.is_yanked) # -1 for yanked. + return ( + has_allowed_hash, + yank_value, + binary_preference, + candidate.version, + pri, + build_tag, + ) + + def sort_best_candidate( + self, + candidates: list[InstallationCandidate], + ) -> InstallationCandidate | None: + """ + Return the best candidate per the instance's sort order, or None if + no candidate is acceptable. + """ + if not candidates: + return None + best_candidate = max(candidates, key=self._sort_key) + return best_candidate + + def compute_best_candidate( + self, + candidates: list[InstallationCandidate], + ) -> BestCandidateResult: + """ + Compute and return a `BestCandidateResult` instance. + """ + applicable_candidates = self.get_applicable_candidates(candidates) + + best_candidate = self.sort_best_candidate(applicable_candidates) + + return BestCandidateResult( + candidates, + applicable_candidates=applicable_candidates, + best_candidate=best_candidate, + ) + + +class PackageFinder: + """This finds packages. + + This is meant to match easy_install's technique for looking for + packages, by reading pages and looking for appropriate links. + """ + + def __init__( + self, + link_collector: LinkCollector, + target_python: TargetPython, + allow_yanked: bool, + format_control: FormatControl | None = None, + candidate_prefs: CandidatePreferences | None = None, + ignore_requires_python: bool | None = None, + ) -> None: + """ + This constructor is primarily meant to be used by the create() class + method and from tests. + + :param format_control: A FormatControl object, used to control + the selection of source packages / binary packages when consulting + the index and links. + :param candidate_prefs: Options to use when creating a + CandidateEvaluator object. + """ + if candidate_prefs is None: + candidate_prefs = CandidatePreferences() + + format_control = format_control or FormatControl(set(), set()) + + self._allow_yanked = allow_yanked + self._candidate_prefs = candidate_prefs + self._ignore_requires_python = ignore_requires_python + self._link_collector = link_collector + self._target_python = target_python + + self.format_control = format_control + + # These are boring links that have already been logged somehow. + self._logged_links: set[tuple[Link, LinkType, str]] = set() + + # Cache of the result of finding candidates + self._all_candidates: dict[str, list[InstallationCandidate]] = {} + self._best_candidates: dict[ + tuple[str, specifiers.BaseSpecifier | None, Hashes | None], + BestCandidateResult, + ] = {} + + # Don't include an allow_yanked default value to make sure each call + # site considers whether yanked releases are allowed. This also causes + # that decision to be made explicit in the calling code, which helps + # people when reading the code. + @classmethod + def create( + cls, + link_collector: LinkCollector, + selection_prefs: SelectionPreferences, + target_python: TargetPython | None = None, + ) -> PackageFinder: + """Create a PackageFinder. + + :param selection_prefs: The candidate selection preferences, as a + SelectionPreferences object. + :param target_python: The target Python interpreter to use when + checking compatibility. If None (the default), a TargetPython + object will be constructed from the running Python. + """ + if target_python is None: + target_python = TargetPython() + + candidate_prefs = CandidatePreferences( + prefer_binary=selection_prefs.prefer_binary, + allow_all_prereleases=selection_prefs.allow_all_prereleases, + ) + + return cls( + candidate_prefs=candidate_prefs, + link_collector=link_collector, + target_python=target_python, + allow_yanked=selection_prefs.allow_yanked, + format_control=selection_prefs.format_control, + ignore_requires_python=selection_prefs.ignore_requires_python, + ) + + @property + def target_python(self) -> TargetPython: + return self._target_python + + @property + def search_scope(self) -> SearchScope: + return self._link_collector.search_scope + + @search_scope.setter + def search_scope(self, search_scope: SearchScope) -> None: + self._link_collector.search_scope = search_scope + + @property + def find_links(self) -> list[str]: + return self._link_collector.find_links + + @property + def index_urls(self) -> list[str]: + return self.search_scope.index_urls + + @property + def proxy(self) -> str | None: + return self._link_collector.session.pip_proxy + + @property + def trusted_hosts(self) -> Iterable[str]: + for host_port in self._link_collector.session.pip_trusted_origins: + yield build_netloc(*host_port) + + @property + def custom_cert(self) -> str | None: + # session.verify is either a boolean (use default bundle/no SSL + # verification) or a string path to a custom CA bundle to use. We only + # care about the latter. + verify = self._link_collector.session.verify + return verify if isinstance(verify, str) else None + + @property + def client_cert(self) -> str | None: + cert = self._link_collector.session.cert + assert not isinstance(cert, tuple), "pip only supports PEM client certs" + return cert + + @property + def allow_all_prereleases(self) -> bool: + return self._candidate_prefs.allow_all_prereleases + + def set_allow_all_prereleases(self) -> None: + self._candidate_prefs.allow_all_prereleases = True + + @property + def prefer_binary(self) -> bool: + return self._candidate_prefs.prefer_binary + + def set_prefer_binary(self) -> None: + self._candidate_prefs.prefer_binary = True + + def requires_python_skipped_reasons(self) -> list[str]: + reasons = { + detail + for _, result, detail in self._logged_links + if result == LinkType.requires_python_mismatch + } + return sorted(reasons) + + def make_link_evaluator(self, project_name: str) -> LinkEvaluator: + canonical_name = canonicalize_name(project_name) + formats = self.format_control.get_allowed_formats(canonical_name) + + return LinkEvaluator( + project_name=project_name, + canonical_name=canonical_name, + formats=formats, + target_python=self._target_python, + allow_yanked=self._allow_yanked, + ignore_requires_python=self._ignore_requires_python, + ) + + def _sort_links(self, links: Iterable[Link]) -> list[Link]: + """ + Returns elements of links in order, non-egg links first, egg links + second, while eliminating duplicates + """ + eggs, no_eggs = [], [] + seen: set[Link] = set() + for link in links: + if link not in seen: + seen.add(link) + if link.egg_fragment: + eggs.append(link) + else: + no_eggs.append(link) + return no_eggs + eggs + + def _log_skipped_link(self, link: Link, result: LinkType, detail: str) -> None: + entry = (link, result, detail) + if entry not in self._logged_links: + # Put the link at the end so the reason is more visible and because + # the link string is usually very long. + logger.debug("Skipping link: %s: %s", detail, link) + self._logged_links.add(entry) + + def get_install_candidate( + self, link_evaluator: LinkEvaluator, link: Link + ) -> InstallationCandidate | None: + """ + If the link is a candidate for install, convert it to an + InstallationCandidate and return it. Otherwise, return None. + """ + result, detail = link_evaluator.evaluate_link(link) + if result != LinkType.candidate: + self._log_skipped_link(link, result, detail) + return None + + try: + return InstallationCandidate( + name=link_evaluator.project_name, + link=link, + version=detail, + ) + except InvalidVersion: + return None + + def evaluate_links( + self, link_evaluator: LinkEvaluator, links: Iterable[Link] + ) -> list[InstallationCandidate]: + """ + Convert links that are candidates to InstallationCandidate objects. + """ + candidates = [] + for link in self._sort_links(links): + candidate = self.get_install_candidate(link_evaluator, link) + if candidate is not None: + candidates.append(candidate) + + return candidates + + def process_project_url( + self, project_url: Link, link_evaluator: LinkEvaluator + ) -> list[InstallationCandidate]: + logger.debug( + "Fetching project page and analyzing links: %s", + project_url, + ) + index_response = self._link_collector.fetch_response(project_url) + if index_response is None: + return [] + + page_links = list(parse_links(index_response)) + + with indent_log(): + package_links = self.evaluate_links( + link_evaluator, + links=page_links, + ) + + return package_links + + def find_all_candidates(self, project_name: str) -> list[InstallationCandidate]: + """Find all available InstallationCandidate for project_name + + This checks index_urls and find_links. + All versions found are returned as an InstallationCandidate list. + + See LinkEvaluator.evaluate_link() for details on which files + are accepted. + """ + if project_name in self._all_candidates: + return self._all_candidates[project_name] + + link_evaluator = self.make_link_evaluator(project_name) + + collected_sources = self._link_collector.collect_sources( + project_name=project_name, + candidates_from_page=functools.partial( + self.process_project_url, + link_evaluator=link_evaluator, + ), + ) + + page_candidates_it = itertools.chain.from_iterable( + source.page_candidates() + for sources in collected_sources + for source in sources + if source is not None + ) + page_candidates = list(page_candidates_it) + + file_links_it = itertools.chain.from_iterable( + source.file_links() + for sources in collected_sources + for source in sources + if source is not None + ) + file_candidates = self.evaluate_links( + link_evaluator, + sorted(file_links_it, reverse=True), + ) + + if logger.isEnabledFor(logging.DEBUG) and file_candidates: + paths = [] + for candidate in file_candidates: + assert candidate.link.url # we need to have a URL + try: + paths.append(candidate.link.file_path) + except Exception: + paths.append(candidate.link.url) # it's not a local file + + logger.debug("Local files found: %s", ", ".join(paths)) + + # This is an intentional priority ordering + self._all_candidates[project_name] = file_candidates + page_candidates + + return self._all_candidates[project_name] + + def make_candidate_evaluator( + self, + project_name: str, + specifier: specifiers.BaseSpecifier | None = None, + hashes: Hashes | None = None, + ) -> CandidateEvaluator: + """Create a CandidateEvaluator object to use.""" + candidate_prefs = self._candidate_prefs + return CandidateEvaluator.create( + project_name=project_name, + target_python=self._target_python, + prefer_binary=candidate_prefs.prefer_binary, + allow_all_prereleases=candidate_prefs.allow_all_prereleases, + specifier=specifier, + hashes=hashes, + ) + + def find_best_candidate( + self, + project_name: str, + specifier: specifiers.BaseSpecifier | None = None, + hashes: Hashes | None = None, + ) -> BestCandidateResult: + """Find matches for the given project and specifier. + + :param specifier: An optional object implementing `filter` + (e.g. `packaging.specifiers.SpecifierSet`) to filter applicable + versions. + + :return: A `BestCandidateResult` instance. + """ + if (project_name, specifier, hashes) in self._best_candidates: + return self._best_candidates[project_name, specifier, hashes] + + candidates = self.find_all_candidates(project_name) + candidate_evaluator = self.make_candidate_evaluator( + project_name=project_name, + specifier=specifier, + hashes=hashes, + ) + self._best_candidates[project_name, specifier, hashes] = ( + candidate_evaluator.compute_best_candidate(candidates) + ) + + return self._best_candidates[project_name, specifier, hashes] + + def find_requirement( + self, req: InstallRequirement, upgrade: bool + ) -> InstallationCandidate | None: + """Try to find a Link matching req + + Expects req, an InstallRequirement and upgrade, a boolean + Returns a InstallationCandidate if found, + Raises DistributionNotFound or BestVersionAlreadyInstalled otherwise + """ + name = req.name + assert name is not None, "find_requirement() called with no name" + + hashes = req.hashes(trust_internet=False) + best_candidate_result = self.find_best_candidate( + name, + specifier=req.specifier, + hashes=hashes, + ) + best_candidate = best_candidate_result.best_candidate + + installed_version: _BaseVersion | None = None + if req.satisfied_by is not None: + installed_version = req.satisfied_by.version + + def _format_versions(cand_iter: Iterable[InstallationCandidate]) -> str: + # This repeated parse_version and str() conversion is needed to + # handle different vendoring sources from pip and pkg_resources. + # If we stop using the pkg_resources provided specifier and start + # using our own, we can drop the cast to str(). + return ( + ", ".join( + sorted( + {str(c.version) for c in cand_iter}, + key=parse_version, + ) + ) + or "none" + ) + + if installed_version is None and best_candidate is None: + logger.critical( + "Could not find a version that satisfies the requirement %s " + "(from versions: %s)", + req, + _format_versions(best_candidate_result.all_candidates), + ) + + raise DistributionNotFound(f"No matching distribution found for {req}") + + def _should_install_candidate( + candidate: InstallationCandidate | None, + ) -> TypeGuard[InstallationCandidate]: + if installed_version is None: + return True + if best_candidate is None: + return False + return best_candidate.version > installed_version + + if not upgrade and installed_version is not None: + if _should_install_candidate(best_candidate): + logger.debug( + "Existing installed version (%s) satisfies requirement " + "(most up-to-date version is %s)", + installed_version, + best_candidate.version, + ) + else: + logger.debug( + "Existing installed version (%s) is most up-to-date and " + "satisfies requirement", + installed_version, + ) + return None + + if _should_install_candidate(best_candidate): + logger.debug( + "Using version %s (newest of versions: %s)", + best_candidate.version, + _format_versions(best_candidate_result.applicable_candidates), + ) + return best_candidate + + # We have an existing version, and its the best version + logger.debug( + "Installed version (%s) is most up-to-date (past versions: %s)", + installed_version, + _format_versions(best_candidate_result.applicable_candidates), + ) + raise BestVersionAlreadyInstalled + + +def _find_name_version_sep(fragment: str, canonical_name: str) -> int: + """Find the separator's index based on the package's canonical name. + + :param fragment: A + filename "fragment" (stem) or + egg fragment. + :param canonical_name: The package's canonical name. + + This function is needed since the canonicalized name does not necessarily + have the same length as the egg info's name part. An example:: + + >>> fragment = 'foo__bar-1.0' + >>> canonical_name = 'foo-bar' + >>> _find_name_version_sep(fragment, canonical_name) + 8 + """ + # Project name and version must be separated by one single dash. Find all + # occurrences of dashes; if the string in front of it matches the canonical + # name, this is the one separating the name and version parts. + for i, c in enumerate(fragment): + if c != "-": + continue + if canonicalize_name(fragment[:i]) == canonical_name: + return i + raise ValueError(f"{fragment} does not match {canonical_name}") + + +def _extract_version_from_fragment(fragment: str, canonical_name: str) -> str | None: + """Parse the version string from a + filename + "fragment" (stem) or egg fragment. + + :param fragment: The string to parse. E.g. foo-2.1 + :param canonical_name: The canonicalized name of the package this + belongs to. + """ + try: + version_start = _find_name_version_sep(fragment, canonical_name) + 1 + except ValueError: + return None + version = fragment[version_start:] + if not version: + return None + return version diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/index/sources.py b/.venv/lib/python3.12/site-packages/pip/_internal/index/sources.py new file mode 100644 index 0000000..c67c4d7 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/index/sources.py @@ -0,0 +1,287 @@ +from __future__ import annotations + +import logging +import mimetypes +import os +from collections import defaultdict +from collections.abc import Iterable +from typing import Callable + +from pip._vendor.packaging.utils import ( + InvalidSdistFilename, + InvalidWheelFilename, + canonicalize_name, + parse_sdist_filename, + parse_wheel_filename, +) + +from pip._internal.models.candidate import InstallationCandidate +from pip._internal.models.link import Link +from pip._internal.utils.urls import path_to_url, url_to_path +from pip._internal.vcs import is_url + +logger = logging.getLogger(__name__) + +FoundCandidates = Iterable[InstallationCandidate] +FoundLinks = Iterable[Link] +CandidatesFromPage = Callable[[Link], Iterable[InstallationCandidate]] +PageValidator = Callable[[Link], bool] + + +class LinkSource: + @property + def link(self) -> Link | None: + """Returns the underlying link, if there's one.""" + raise NotImplementedError() + + def page_candidates(self) -> FoundCandidates: + """Candidates found by parsing an archive listing HTML file.""" + raise NotImplementedError() + + def file_links(self) -> FoundLinks: + """Links found by specifying archives directly.""" + raise NotImplementedError() + + +def _is_html_file(file_url: str) -> bool: + return mimetypes.guess_type(file_url, strict=False)[0] == "text/html" + + +class _FlatDirectoryToUrls: + """Scans directory and caches results""" + + def __init__(self, path: str) -> None: + self._path = path + self._page_candidates: list[str] = [] + self._project_name_to_urls: dict[str, list[str]] = defaultdict(list) + self._scanned_directory = False + + def _scan_directory(self) -> None: + """Scans directory once and populates both page_candidates + and project_name_to_urls at the same time + """ + for entry in os.scandir(self._path): + url = path_to_url(entry.path) + if _is_html_file(url): + self._page_candidates.append(url) + continue + + # File must have a valid wheel or sdist name, + # otherwise not worth considering as a package + try: + project_filename = parse_wheel_filename(entry.name)[0] + except InvalidWheelFilename: + try: + project_filename = parse_sdist_filename(entry.name)[0] + except InvalidSdistFilename: + continue + + self._project_name_to_urls[project_filename].append(url) + self._scanned_directory = True + + @property + def page_candidates(self) -> list[str]: + if not self._scanned_directory: + self._scan_directory() + + return self._page_candidates + + @property + def project_name_to_urls(self) -> dict[str, list[str]]: + if not self._scanned_directory: + self._scan_directory() + + return self._project_name_to_urls + + +class _FlatDirectorySource(LinkSource): + """Link source specified by ``--find-links=``. + + This looks the content of the directory, and returns: + + * ``page_candidates``: Links listed on each HTML file in the directory. + * ``file_candidates``: Archives in the directory. + """ + + _paths_to_urls: dict[str, _FlatDirectoryToUrls] = {} + + def __init__( + self, + candidates_from_page: CandidatesFromPage, + path: str, + project_name: str, + ) -> None: + self._candidates_from_page = candidates_from_page + self._project_name = canonicalize_name(project_name) + + # Get existing instance of _FlatDirectoryToUrls if it exists + if path in self._paths_to_urls: + self._path_to_urls = self._paths_to_urls[path] + else: + self._path_to_urls = _FlatDirectoryToUrls(path=path) + self._paths_to_urls[path] = self._path_to_urls + + @property + def link(self) -> Link | None: + return None + + def page_candidates(self) -> FoundCandidates: + for url in self._path_to_urls.page_candidates: + yield from self._candidates_from_page(Link(url)) + + def file_links(self) -> FoundLinks: + for url in self._path_to_urls.project_name_to_urls[self._project_name]: + yield Link(url) + + +class _LocalFileSource(LinkSource): + """``--find-links=`` or ``--[extra-]index-url=``. + + If a URL is supplied, it must be a ``file:`` URL. If a path is supplied to + the option, it is converted to a URL first. This returns: + + * ``page_candidates``: Links listed on an HTML file. + * ``file_candidates``: The non-HTML file. + """ + + def __init__( + self, + candidates_from_page: CandidatesFromPage, + link: Link, + ) -> None: + self._candidates_from_page = candidates_from_page + self._link = link + + @property + def link(self) -> Link | None: + return self._link + + def page_candidates(self) -> FoundCandidates: + if not _is_html_file(self._link.url): + return + yield from self._candidates_from_page(self._link) + + def file_links(self) -> FoundLinks: + if _is_html_file(self._link.url): + return + yield self._link + + +class _RemoteFileSource(LinkSource): + """``--find-links=`` or ``--[extra-]index-url=``. + + This returns: + + * ``page_candidates``: Links listed on an HTML file. + * ``file_candidates``: The non-HTML file. + """ + + def __init__( + self, + candidates_from_page: CandidatesFromPage, + page_validator: PageValidator, + link: Link, + ) -> None: + self._candidates_from_page = candidates_from_page + self._page_validator = page_validator + self._link = link + + @property + def link(self) -> Link | None: + return self._link + + def page_candidates(self) -> FoundCandidates: + if not self._page_validator(self._link): + return + yield from self._candidates_from_page(self._link) + + def file_links(self) -> FoundLinks: + yield self._link + + +class _IndexDirectorySource(LinkSource): + """``--[extra-]index-url=``. + + This is treated like a remote URL; ``candidates_from_page`` contains logic + for this by appending ``index.html`` to the link. + """ + + def __init__( + self, + candidates_from_page: CandidatesFromPage, + link: Link, + ) -> None: + self._candidates_from_page = candidates_from_page + self._link = link + + @property + def link(self) -> Link | None: + return self._link + + def page_candidates(self) -> FoundCandidates: + yield from self._candidates_from_page(self._link) + + def file_links(self) -> FoundLinks: + return () + + +def build_source( + location: str, + *, + candidates_from_page: CandidatesFromPage, + page_validator: PageValidator, + expand_dir: bool, + cache_link_parsing: bool, + project_name: str, +) -> tuple[str | None, LinkSource | None]: + path: str | None = None + url: str | None = None + if os.path.exists(location): # Is a local path. + url = path_to_url(location) + path = location + elif location.startswith("file:"): # A file: URL. + url = location + path = url_to_path(location) + elif is_url(location): + url = location + + if url is None: + msg = ( + "Location '%s' is ignored: " + "it is either a non-existing path or lacks a specific scheme." + ) + logger.warning(msg, location) + return (None, None) + + if path is None: + source: LinkSource = _RemoteFileSource( + candidates_from_page=candidates_from_page, + page_validator=page_validator, + link=Link(url, cache_link_parsing=cache_link_parsing), + ) + return (url, source) + + if os.path.isdir(path): + if expand_dir: + source = _FlatDirectorySource( + candidates_from_page=candidates_from_page, + path=path, + project_name=project_name, + ) + else: + source = _IndexDirectorySource( + candidates_from_page=candidates_from_page, + link=Link(url, cache_link_parsing=cache_link_parsing), + ) + return (url, source) + elif os.path.isfile(path): + source = _LocalFileSource( + candidates_from_page=candidates_from_page, + link=Link(url, cache_link_parsing=cache_link_parsing), + ) + return (url, source) + logger.warning( + "Location '%s' is ignored: it is neither a file nor a directory.", + location, + ) + return (url, None) diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/locations/__init__.py b/.venv/lib/python3.12/site-packages/pip/_internal/locations/__init__.py new file mode 100644 index 0000000..9f2c4fe --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/locations/__init__.py @@ -0,0 +1,441 @@ +from __future__ import annotations + +import functools +import logging +import os +import pathlib +import sys +import sysconfig +from typing import Any + +from pip._internal.models.scheme import SCHEME_KEYS, Scheme +from pip._internal.utils.compat import WINDOWS +from pip._internal.utils.deprecation import deprecated +from pip._internal.utils.virtualenv import running_under_virtualenv + +from . import _sysconfig +from .base import ( + USER_CACHE_DIR, + get_major_minor_version, + get_src_prefix, + is_osx_framework, + site_packages, + user_site, +) + +__all__ = [ + "USER_CACHE_DIR", + "get_bin_prefix", + "get_bin_user", + "get_major_minor_version", + "get_platlib", + "get_purelib", + "get_scheme", + "get_src_prefix", + "site_packages", + "user_site", +] + + +logger = logging.getLogger(__name__) + + +_PLATLIBDIR: str = getattr(sys, "platlibdir", "lib") + +_USE_SYSCONFIG_DEFAULT = sys.version_info >= (3, 10) + + +def _should_use_sysconfig() -> bool: + """This function determines the value of _USE_SYSCONFIG. + + By default, pip uses sysconfig on Python 3.10+. + But Python distributors can override this decision by setting: + sysconfig._PIP_USE_SYSCONFIG = True / False + Rationale in https://github.com/pypa/pip/issues/10647 + + This is a function for testability, but should be constant during any one + run. + """ + return bool(getattr(sysconfig, "_PIP_USE_SYSCONFIG", _USE_SYSCONFIG_DEFAULT)) + + +_USE_SYSCONFIG = _should_use_sysconfig() + +if not _USE_SYSCONFIG: + # Import distutils lazily to avoid deprecation warnings, + # but import it soon enough that it is in memory and available during + # a pip reinstall. + from . import _distutils + +# Be noisy about incompatibilities if this platforms "should" be using +# sysconfig, but is explicitly opting out and using distutils instead. +if _USE_SYSCONFIG_DEFAULT and not _USE_SYSCONFIG: + _MISMATCH_LEVEL = logging.WARNING +else: + _MISMATCH_LEVEL = logging.DEBUG + + +def _looks_like_bpo_44860() -> bool: + """The resolution to bpo-44860 will change this incorrect platlib. + + See . + """ + from distutils.command.install import INSTALL_SCHEMES + + try: + unix_user_platlib = INSTALL_SCHEMES["unix_user"]["platlib"] + except KeyError: + return False + return unix_user_platlib == "$usersite" + + +def _looks_like_red_hat_patched_platlib_purelib(scheme: dict[str, str]) -> bool: + platlib = scheme["platlib"] + if "/$platlibdir/" in platlib: + platlib = platlib.replace("/$platlibdir/", f"/{_PLATLIBDIR}/") + if "/lib64/" not in platlib: + return False + unpatched = platlib.replace("/lib64/", "/lib/") + return unpatched.replace("$platbase/", "$base/") == scheme["purelib"] + + +@functools.cache +def _looks_like_red_hat_lib() -> bool: + """Red Hat patches platlib in unix_prefix and unix_home, but not purelib. + + This is the only way I can see to tell a Red Hat-patched Python. + """ + from distutils.command.install import INSTALL_SCHEMES + + return all( + k in INSTALL_SCHEMES + and _looks_like_red_hat_patched_platlib_purelib(INSTALL_SCHEMES[k]) + for k in ("unix_prefix", "unix_home") + ) + + +@functools.cache +def _looks_like_debian_scheme() -> bool: + """Debian adds two additional schemes.""" + from distutils.command.install import INSTALL_SCHEMES + + return "deb_system" in INSTALL_SCHEMES and "unix_local" in INSTALL_SCHEMES + + +@functools.cache +def _looks_like_red_hat_scheme() -> bool: + """Red Hat patches ``sys.prefix`` and ``sys.exec_prefix``. + + Red Hat's ``00251-change-user-install-location.patch`` changes the install + command's ``prefix`` and ``exec_prefix`` to append ``"/local"``. This is + (fortunately?) done quite unconditionally, so we create a default command + object without any configuration to detect this. + """ + from distutils.command.install import install + from distutils.dist import Distribution + + cmd: Any = install(Distribution()) + cmd.finalize_options() + return ( + cmd.exec_prefix == f"{os.path.normpath(sys.exec_prefix)}/local" + and cmd.prefix == f"{os.path.normpath(sys.prefix)}/local" + ) + + +@functools.cache +def _looks_like_slackware_scheme() -> bool: + """Slackware patches sysconfig but fails to patch distutils and site. + + Slackware changes sysconfig's user scheme to use ``"lib64"`` for the lib + path, but does not do the same to the site module. + """ + if user_site is None: # User-site not available. + return False + try: + paths = sysconfig.get_paths(scheme="posix_user", expand=False) + except KeyError: # User-site not available. + return False + return "/lib64/" in paths["purelib"] and "/lib64/" not in user_site + + +@functools.cache +def _looks_like_msys2_mingw_scheme() -> bool: + """MSYS2 patches distutils and sysconfig to use a UNIX-like scheme. + + However, MSYS2 incorrectly patches sysconfig ``nt`` scheme. The fix is + likely going to be included in their 3.10 release, so we ignore the warning. + See msys2/MINGW-packages#9319. + + MSYS2 MINGW's patch uses lowercase ``"lib"`` instead of the usual uppercase, + and is missing the final ``"site-packages"``. + """ + paths = sysconfig.get_paths("nt", expand=False) + return all( + "Lib" not in p and "lib" in p and not p.endswith("site-packages") + for p in (paths[key] for key in ("platlib", "purelib")) + ) + + +@functools.cache +def _warn_mismatched(old: pathlib.Path, new: pathlib.Path, *, key: str) -> None: + issue_url = "https://github.com/pypa/pip/issues/10151" + message = ( + "Value for %s does not match. Please report this to <%s>" + "\ndistutils: %s" + "\nsysconfig: %s" + ) + logger.log(_MISMATCH_LEVEL, message, key, issue_url, old, new) + + +def _warn_if_mismatch(old: pathlib.Path, new: pathlib.Path, *, key: str) -> bool: + if old == new: + return False + _warn_mismatched(old, new, key=key) + return True + + +@functools.cache +def _log_context( + *, + user: bool = False, + home: str | None = None, + root: str | None = None, + prefix: str | None = None, +) -> None: + parts = [ + "Additional context:", + "user = %r", + "home = %r", + "root = %r", + "prefix = %r", + ] + + logger.log(_MISMATCH_LEVEL, "\n".join(parts), user, home, root, prefix) + + +def get_scheme( + dist_name: str, + user: bool = False, + home: str | None = None, + root: str | None = None, + isolated: bool = False, + prefix: str | None = None, +) -> Scheme: + new = _sysconfig.get_scheme( + dist_name, + user=user, + home=home, + root=root, + isolated=isolated, + prefix=prefix, + ) + if _USE_SYSCONFIG: + return new + + old = _distutils.get_scheme( + dist_name, + user=user, + home=home, + root=root, + isolated=isolated, + prefix=prefix, + ) + + warning_contexts = [] + for k in SCHEME_KEYS: + old_v = pathlib.Path(getattr(old, k)) + new_v = pathlib.Path(getattr(new, k)) + + if old_v == new_v: + continue + + # distutils incorrectly put PyPy packages under ``site-packages/python`` + # in the ``posix_home`` scheme, but PyPy devs said they expect the + # directory name to be ``pypy`` instead. So we treat this as a bug fix + # and not warn about it. See bpo-43307 and python/cpython#24628. + skip_pypy_special_case = ( + sys.implementation.name == "pypy" + and home is not None + and k in ("platlib", "purelib") + and old_v.parent == new_v.parent + and old_v.name.startswith("python") + and new_v.name.startswith("pypy") + ) + if skip_pypy_special_case: + continue + + # sysconfig's ``osx_framework_user`` does not include ``pythonX.Y`` in + # the ``include`` value, but distutils's ``headers`` does. We'll let + # CPython decide whether this is a bug or feature. See bpo-43948. + skip_osx_framework_user_special_case = ( + user + and is_osx_framework() + and k == "headers" + and old_v.parent.parent == new_v.parent + and old_v.parent.name.startswith("python") + ) + if skip_osx_framework_user_special_case: + continue + + # On Red Hat and derived Linux distributions, distutils is patched to + # use "lib64" instead of "lib" for platlib. + if k == "platlib" and _looks_like_red_hat_lib(): + continue + + # On Python 3.9+, sysconfig's posix_user scheme sets platlib against + # sys.platlibdir, but distutils's unix_user incorrectly continues + # using the same $usersite for both platlib and purelib. This creates a + # mismatch when sys.platlibdir is not "lib". + skip_bpo_44860 = ( + user + and k == "platlib" + and not WINDOWS + and _PLATLIBDIR != "lib" + and _looks_like_bpo_44860() + ) + if skip_bpo_44860: + continue + + # Slackware incorrectly patches posix_user to use lib64 instead of lib, + # but not usersite to match the location. + skip_slackware_user_scheme = ( + user + and k in ("platlib", "purelib") + and not WINDOWS + and _looks_like_slackware_scheme() + ) + if skip_slackware_user_scheme: + continue + + # Both Debian and Red Hat patch Python to place the system site under + # /usr/local instead of /usr. Debian also places lib in dist-packages + # instead of site-packages, but the /usr/local check should cover it. + skip_linux_system_special_case = ( + not (user or home or prefix or running_under_virtualenv()) + and old_v.parts[1:3] == ("usr", "local") + and len(new_v.parts) > 1 + and new_v.parts[1] == "usr" + and (len(new_v.parts) < 3 or new_v.parts[2] != "local") + and (_looks_like_red_hat_scheme() or _looks_like_debian_scheme()) + ) + if skip_linux_system_special_case: + continue + + # MSYS2 MINGW's sysconfig patch does not include the "site-packages" + # part of the path. This is incorrect and will be fixed in MSYS. + skip_msys2_mingw_bug = ( + WINDOWS and k in ("platlib", "purelib") and _looks_like_msys2_mingw_scheme() + ) + if skip_msys2_mingw_bug: + continue + + # CPython's POSIX install script invokes pip (via ensurepip) against the + # interpreter located in the source tree, not the install site. This + # triggers special logic in sysconfig that's not present in distutils. + # https://github.com/python/cpython/blob/8c21941ddaf/Lib/sysconfig.py#L178-L194 + skip_cpython_build = ( + sysconfig.is_python_build(check_home=True) + and not WINDOWS + and k in ("headers", "include", "platinclude") + ) + if skip_cpython_build: + continue + + warning_contexts.append((old_v, new_v, f"scheme.{k}")) + + if not warning_contexts: + return old + + # Check if this path mismatch is caused by distutils config files. Those + # files will no longer work once we switch to sysconfig, so this raises a + # deprecation message for them. + default_old = _distutils.distutils_scheme( + dist_name, + user, + home, + root, + isolated, + prefix, + ignore_config_files=True, + ) + if any(default_old[k] != getattr(old, k) for k in SCHEME_KEYS): + deprecated( + reason=( + "Configuring installation scheme with distutils config files " + "is deprecated and will no longer work in the near future. If you " + "are using a Homebrew or Linuxbrew Python, please see discussion " + "at https://github.com/Homebrew/homebrew-core/issues/76621" + ), + replacement=None, + gone_in=None, + ) + return old + + # Post warnings about this mismatch so user can report them back. + for old_v, new_v, key in warning_contexts: + _warn_mismatched(old_v, new_v, key=key) + _log_context(user=user, home=home, root=root, prefix=prefix) + + return old + + +def get_bin_prefix() -> str: + new = _sysconfig.get_bin_prefix() + if _USE_SYSCONFIG: + return new + + old = _distutils.get_bin_prefix() + if _warn_if_mismatch(pathlib.Path(old), pathlib.Path(new), key="bin_prefix"): + _log_context() + return old + + +def get_bin_user() -> str: + return _sysconfig.get_scheme("", user=True).scripts + + +def _looks_like_deb_system_dist_packages(value: str) -> bool: + """Check if the value is Debian's APT-controlled dist-packages. + + Debian's ``distutils.sysconfig.get_python_lib()`` implementation returns the + default package path controlled by APT, but does not patch ``sysconfig`` to + do the same. This is similar to the bug worked around in ``get_scheme()``, + but here the default is ``deb_system`` instead of ``unix_local``. Ultimately + we can't do anything about this Debian bug, and this detection allows us to + skip the warning when needed. + """ + if not _looks_like_debian_scheme(): + return False + if value == "/usr/lib/python3/dist-packages": + return True + return False + + +def get_purelib() -> str: + """Return the default pure-Python lib location.""" + new = _sysconfig.get_purelib() + if _USE_SYSCONFIG: + return new + + old = _distutils.get_purelib() + if _looks_like_deb_system_dist_packages(old): + return old + if _warn_if_mismatch(pathlib.Path(old), pathlib.Path(new), key="purelib"): + _log_context() + return old + + +def get_platlib() -> str: + """Return the default platform-shared lib location.""" + new = _sysconfig.get_platlib() + if _USE_SYSCONFIG: + return new + + from . import _distutils + + old = _distutils.get_platlib() + if _looks_like_deb_system_dist_packages(old): + return old + if _warn_if_mismatch(pathlib.Path(old), pathlib.Path(new), key="platlib"): + _log_context() + return old diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/locations/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/locations/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..3a1fe94 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/locations/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/locations/__pycache__/_distutils.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/locations/__pycache__/_distutils.cpython-312.pyc new file mode 100644 index 0000000..cec1b60 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/locations/__pycache__/_distutils.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/locations/__pycache__/_sysconfig.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/locations/__pycache__/_sysconfig.cpython-312.pyc new file mode 100644 index 0000000..96ee798 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/locations/__pycache__/_sysconfig.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/locations/__pycache__/base.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/locations/__pycache__/base.cpython-312.pyc new file mode 100644 index 0000000..f811b66 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/locations/__pycache__/base.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/locations/_distutils.py b/.venv/lib/python3.12/site-packages/pip/_internal/locations/_distutils.py new file mode 100644 index 0000000..28c066b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/locations/_distutils.py @@ -0,0 +1,173 @@ +"""Locations where we look for configs, install stuff, etc""" + +# The following comment should be removed at some point in the future. +# mypy: strict-optional=False + +# If pip's going to use distutils, it should not be using the copy that setuptools +# might have injected into the environment. This is done by removing the injected +# shim, if it's injected. +# +# See https://github.com/pypa/pip/issues/8761 for the original discussion and +# rationale for why this is done within pip. +from __future__ import annotations + +try: + __import__("_distutils_hack").remove_shim() +except (ImportError, AttributeError): + pass + +import logging +import os +import sys +from distutils.cmd import Command as DistutilsCommand +from distutils.command.install import SCHEME_KEYS +from distutils.command.install import install as distutils_install_command +from distutils.sysconfig import get_python_lib + +from pip._internal.models.scheme import Scheme +from pip._internal.utils.compat import WINDOWS +from pip._internal.utils.virtualenv import running_under_virtualenv + +from .base import get_major_minor_version + +logger = logging.getLogger(__name__) + + +def distutils_scheme( + dist_name: str, + user: bool = False, + home: str | None = None, + root: str | None = None, + isolated: bool = False, + prefix: str | None = None, + *, + ignore_config_files: bool = False, +) -> dict[str, str]: + """ + Return a distutils install scheme + """ + from distutils.dist import Distribution + + dist_args: dict[str, str | list[str]] = {"name": dist_name} + if isolated: + dist_args["script_args"] = ["--no-user-cfg"] + + d = Distribution(dist_args) + if not ignore_config_files: + try: + d.parse_config_files() + except UnicodeDecodeError: + paths = d.find_config_files() + logger.warning( + "Ignore distutils configs in %s due to encoding errors.", + ", ".join(os.path.basename(p) for p in paths), + ) + obj: DistutilsCommand | None = None + obj = d.get_command_obj("install", create=True) + assert obj is not None + i: distutils_install_command = obj + # NOTE: setting user or home has the side-effect of creating the home dir + # or user base for installations during finalize_options() + # ideally, we'd prefer a scheme class that has no side-effects. + assert not (user and prefix), f"user={user} prefix={prefix}" + assert not (home and prefix), f"home={home} prefix={prefix}" + i.user = user or i.user + if user or home: + i.prefix = "" + i.prefix = prefix or i.prefix + i.home = home or i.home + i.root = root or i.root + i.finalize_options() + + scheme: dict[str, str] = {} + for key in SCHEME_KEYS: + scheme[key] = getattr(i, "install_" + key) + + # install_lib specified in setup.cfg should install *everything* + # into there (i.e. it takes precedence over both purelib and + # platlib). Note, i.install_lib is *always* set after + # finalize_options(); we only want to override here if the user + # has explicitly requested it hence going back to the config + if "install_lib" in d.get_option_dict("install"): + scheme.update({"purelib": i.install_lib, "platlib": i.install_lib}) + + if running_under_virtualenv(): + if home: + prefix = home + elif user: + prefix = i.install_userbase + else: + prefix = i.prefix + scheme["headers"] = os.path.join( + prefix, + "include", + "site", + f"python{get_major_minor_version()}", + dist_name, + ) + + if root is not None: + path_no_drive = os.path.splitdrive(os.path.abspath(scheme["headers"]))[1] + scheme["headers"] = os.path.join(root, path_no_drive[1:]) + + return scheme + + +def get_scheme( + dist_name: str, + user: bool = False, + home: str | None = None, + root: str | None = None, + isolated: bool = False, + prefix: str | None = None, +) -> Scheme: + """ + Get the "scheme" corresponding to the input parameters. The distutils + documentation provides the context for the available schemes: + https://docs.python.org/3/install/index.html#alternate-installation + + :param dist_name: the name of the package to retrieve the scheme for, used + in the headers scheme path + :param user: indicates to use the "user" scheme + :param home: indicates to use the "home" scheme and provides the base + directory for the same + :param root: root under which other directories are re-based + :param isolated: equivalent to --no-user-cfg, i.e. do not consider + ~/.pydistutils.cfg (posix) or ~/pydistutils.cfg (non-posix) for + scheme paths + :param prefix: indicates to use the "prefix" scheme and provides the + base directory for the same + """ + scheme = distutils_scheme(dist_name, user, home, root, isolated, prefix) + return Scheme( + platlib=scheme["platlib"], + purelib=scheme["purelib"], + headers=scheme["headers"], + scripts=scheme["scripts"], + data=scheme["data"], + ) + + +def get_bin_prefix() -> str: + # XXX: In old virtualenv versions, sys.prefix can contain '..' components, + # so we need to call normpath to eliminate them. + prefix = os.path.normpath(sys.prefix) + if WINDOWS: + bin_py = os.path.join(prefix, "Scripts") + # buildout uses 'bin' on Windows too? + if not os.path.exists(bin_py): + bin_py = os.path.join(prefix, "bin") + return bin_py + # Forcing to use /usr/local/bin for standard macOS framework installs + # Also log to ~/Library/Logs/ for use with the Console.app log viewer + if sys.platform[:6] == "darwin" and prefix[:16] == "/System/Library/": + return "/usr/local/bin" + return os.path.join(prefix, "bin") + + +def get_purelib() -> str: + return get_python_lib(plat_specific=False) + + +def get_platlib() -> str: + return get_python_lib(plat_specific=True) diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/locations/_sysconfig.py b/.venv/lib/python3.12/site-packages/pip/_internal/locations/_sysconfig.py new file mode 100644 index 0000000..d4a448e --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/locations/_sysconfig.py @@ -0,0 +1,215 @@ +from __future__ import annotations + +import logging +import os +import sys +import sysconfig + +from pip._internal.exceptions import InvalidSchemeCombination, UserInstallationInvalid +from pip._internal.models.scheme import SCHEME_KEYS, Scheme +from pip._internal.utils.virtualenv import running_under_virtualenv + +from .base import change_root, get_major_minor_version, is_osx_framework + +logger = logging.getLogger(__name__) + + +# Notes on _infer_* functions. +# Unfortunately ``get_default_scheme()`` didn't exist before 3.10, so there's no +# way to ask things like "what is the '_prefix' scheme on this platform". These +# functions try to answer that with some heuristics while accounting for ad-hoc +# platforms not covered by CPython's default sysconfig implementation. If the +# ad-hoc implementation does not fully implement sysconfig, we'll fall back to +# a POSIX scheme. + +_AVAILABLE_SCHEMES = set(sysconfig.get_scheme_names()) + +_PREFERRED_SCHEME_API = getattr(sysconfig, "get_preferred_scheme", None) + + +def _should_use_osx_framework_prefix() -> bool: + """Check for Apple's ``osx_framework_library`` scheme. + + Python distributed by Apple's Command Line Tools has this special scheme + that's used when: + + * This is a framework build. + * We are installing into the system prefix. + + This does not account for ``pip install --prefix`` (also means we're not + installing to the system prefix), which should use ``posix_prefix``, but + logic here means ``_infer_prefix()`` outputs ``osx_framework_library``. But + since ``prefix`` is not available for ``sysconfig.get_default_scheme()``, + which is the stdlib replacement for ``_infer_prefix()``, presumably Apple + wouldn't be able to magically switch between ``osx_framework_library`` and + ``posix_prefix``. ``_infer_prefix()`` returning ``osx_framework_library`` + means its behavior is consistent whether we use the stdlib implementation + or our own, and we deal with this special case in ``get_scheme()`` instead. + """ + return ( + "osx_framework_library" in _AVAILABLE_SCHEMES + and not running_under_virtualenv() + and is_osx_framework() + ) + + +def _infer_prefix() -> str: + """Try to find a prefix scheme for the current platform. + + This tries: + + * A special ``osx_framework_library`` for Python distributed by Apple's + Command Line Tools, when not running in a virtual environment. + * Implementation + OS, used by PyPy on Windows (``pypy_nt``). + * Implementation without OS, used by PyPy on POSIX (``pypy``). + * OS + "prefix", used by CPython on POSIX (``posix_prefix``). + * Just the OS name, used by CPython on Windows (``nt``). + + If none of the above works, fall back to ``posix_prefix``. + """ + if _PREFERRED_SCHEME_API: + return _PREFERRED_SCHEME_API("prefix") + if _should_use_osx_framework_prefix(): + return "osx_framework_library" + implementation_suffixed = f"{sys.implementation.name}_{os.name}" + if implementation_suffixed in _AVAILABLE_SCHEMES: + return implementation_suffixed + if sys.implementation.name in _AVAILABLE_SCHEMES: + return sys.implementation.name + suffixed = f"{os.name}_prefix" + if suffixed in _AVAILABLE_SCHEMES: + return suffixed + if os.name in _AVAILABLE_SCHEMES: # On Windows, prefx is just called "nt". + return os.name + return "posix_prefix" + + +def _infer_user() -> str: + """Try to find a user scheme for the current platform.""" + if _PREFERRED_SCHEME_API: + return _PREFERRED_SCHEME_API("user") + if is_osx_framework() and not running_under_virtualenv(): + suffixed = "osx_framework_user" + else: + suffixed = f"{os.name}_user" + if suffixed in _AVAILABLE_SCHEMES: + return suffixed + if "posix_user" not in _AVAILABLE_SCHEMES: # User scheme unavailable. + raise UserInstallationInvalid() + return "posix_user" + + +def _infer_home() -> str: + """Try to find a home for the current platform.""" + if _PREFERRED_SCHEME_API: + return _PREFERRED_SCHEME_API("home") + suffixed = f"{os.name}_home" + if suffixed in _AVAILABLE_SCHEMES: + return suffixed + return "posix_home" + + +# Update these keys if the user sets a custom home. +_HOME_KEYS = [ + "installed_base", + "base", + "installed_platbase", + "platbase", + "prefix", + "exec_prefix", +] +if sysconfig.get_config_var("userbase") is not None: + _HOME_KEYS.append("userbase") + + +def get_scheme( + dist_name: str, + user: bool = False, + home: str | None = None, + root: str | None = None, + isolated: bool = False, + prefix: str | None = None, +) -> Scheme: + """ + Get the "scheme" corresponding to the input parameters. + + :param dist_name: the name of the package to retrieve the scheme for, used + in the headers scheme path + :param user: indicates to use the "user" scheme + :param home: indicates to use the "home" scheme + :param root: root under which other directories are re-based + :param isolated: ignored, but kept for distutils compatibility (where + this controls whether the user-site pydistutils.cfg is honored) + :param prefix: indicates to use the "prefix" scheme and provides the + base directory for the same + """ + if user and prefix: + raise InvalidSchemeCombination("--user", "--prefix") + if home and prefix: + raise InvalidSchemeCombination("--home", "--prefix") + + if home is not None: + scheme_name = _infer_home() + elif user: + scheme_name = _infer_user() + else: + scheme_name = _infer_prefix() + + # Special case: When installing into a custom prefix, use posix_prefix + # instead of osx_framework_library. See _should_use_osx_framework_prefix() + # docstring for details. + if prefix is not None and scheme_name == "osx_framework_library": + scheme_name = "posix_prefix" + + if home is not None: + variables = {k: home for k in _HOME_KEYS} + elif prefix is not None: + variables = {k: prefix for k in _HOME_KEYS} + else: + variables = {} + + paths = sysconfig.get_paths(scheme=scheme_name, vars=variables) + + # Logic here is very arbitrary, we're doing it for compatibility, don't ask. + # 1. Pip historically uses a special header path in virtual environments. + # 2. If the distribution name is not known, distutils uses 'UNKNOWN'. We + # only do the same when not running in a virtual environment because + # pip's historical header path logic (see point 1) did not do this. + if running_under_virtualenv(): + if user: + base = variables.get("userbase", sys.prefix) + else: + base = variables.get("base", sys.prefix) + python_xy = f"python{get_major_minor_version()}" + paths["include"] = os.path.join(base, "include", "site", python_xy) + elif not dist_name: + dist_name = "UNKNOWN" + + scheme = Scheme( + platlib=paths["platlib"], + purelib=paths["purelib"], + headers=os.path.join(paths["include"], dist_name), + scripts=paths["scripts"], + data=paths["data"], + ) + if root is not None: + converted_keys = {} + for key in SCHEME_KEYS: + converted_keys[key] = change_root(root, getattr(scheme, key)) + scheme = Scheme(**converted_keys) + return scheme + + +def get_bin_prefix() -> str: + # Forcing to use /usr/local/bin for standard macOS framework installs. + if sys.platform[:6] == "darwin" and sys.prefix[:16] == "/System/Library/": + return "/usr/local/bin" + return sysconfig.get_paths()["scripts"] + + +def get_purelib() -> str: + return sysconfig.get_paths()["purelib"] + + +def get_platlib() -> str: + return sysconfig.get_paths()["platlib"] diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/locations/base.py b/.venv/lib/python3.12/site-packages/pip/_internal/locations/base.py new file mode 100644 index 0000000..17cd0e8 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/locations/base.py @@ -0,0 +1,82 @@ +from __future__ import annotations + +import functools +import os +import site +import sys +import sysconfig + +from pip._internal.exceptions import InstallationError +from pip._internal.utils import appdirs +from pip._internal.utils.virtualenv import running_under_virtualenv + +# Application Directories +USER_CACHE_DIR = appdirs.user_cache_dir("pip") + +# FIXME doesn't account for venv linked to global site-packages +site_packages: str = sysconfig.get_path("purelib") + + +def get_major_minor_version() -> str: + """ + Return the major-minor version of the current Python as a string, e.g. + "3.7" or "3.10". + """ + return "{}.{}".format(*sys.version_info) + + +def change_root(new_root: str, pathname: str) -> str: + """Return 'pathname' with 'new_root' prepended. + + If 'pathname' is relative, this is equivalent to os.path.join(new_root, pathname). + Otherwise, it requires making 'pathname' relative and then joining the + two, which is tricky on DOS/Windows and Mac OS. + + This is borrowed from Python's standard library's distutils module. + """ + if os.name == "posix": + if not os.path.isabs(pathname): + return os.path.join(new_root, pathname) + else: + return os.path.join(new_root, pathname[1:]) + + elif os.name == "nt": + (drive, path) = os.path.splitdrive(pathname) + if path[0] == "\\": + path = path[1:] + return os.path.join(new_root, path) + + else: + raise InstallationError( + f"Unknown platform: {os.name}\n" + "Can not change root path prefix on unknown platform." + ) + + +def get_src_prefix() -> str: + if running_under_virtualenv(): + src_prefix = os.path.join(sys.prefix, "src") + else: + # FIXME: keep src in cwd for now (it is not a temporary folder) + try: + src_prefix = os.path.join(os.getcwd(), "src") + except OSError: + # In case the current working directory has been renamed or deleted + sys.exit("The folder you are executing pip from can no longer be found.") + + # under macOS + virtualenv sys.prefix is not properly resolved + # it is something like /path/to/python/bin/.. + return os.path.abspath(src_prefix) + + +try: + # Use getusersitepackages if this is present, as it ensures that the + # value is initialised properly. + user_site: str | None = site.getusersitepackages() +except AttributeError: + user_site = site.USER_SITE + + +@functools.cache +def is_osx_framework() -> bool: + return bool(sysconfig.get_config_var("PYTHONFRAMEWORK")) diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/main.py b/.venv/lib/python3.12/site-packages/pip/_internal/main.py new file mode 100644 index 0000000..ec52c4e --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/main.py @@ -0,0 +1,12 @@ +from __future__ import annotations + + +def main(args: list[str] | None = None) -> int: + """This is preserved for old console scripts that may still be referencing + it. + + For additional details, see https://github.com/pypa/pip/issues/7498. + """ + from pip._internal.utils.entrypoints import _wrapper + + return _wrapper(args) diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/metadata/__init__.py b/.venv/lib/python3.12/site-packages/pip/_internal/metadata/__init__.py new file mode 100644 index 0000000..927e375 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/metadata/__init__.py @@ -0,0 +1,164 @@ +from __future__ import annotations + +import contextlib +import functools +import os +import sys +from typing import Literal, Protocol, cast + +from pip._internal.utils.deprecation import deprecated +from pip._internal.utils.misc import strtobool + +from .base import BaseDistribution, BaseEnvironment, FilesystemWheel, MemoryWheel, Wheel + +__all__ = [ + "BaseDistribution", + "BaseEnvironment", + "FilesystemWheel", + "MemoryWheel", + "Wheel", + "get_default_environment", + "get_environment", + "get_wheel_distribution", + "select_backend", +] + + +def _should_use_importlib_metadata() -> bool: + """Whether to use the ``importlib.metadata`` or ``pkg_resources`` backend. + + By default, pip uses ``importlib.metadata`` on Python 3.11+, and + ``pkg_resources`` otherwise. Up to Python 3.13, This can be + overridden by a couple of ways: + + * If environment variable ``_PIP_USE_IMPORTLIB_METADATA`` is set, it + dictates whether ``importlib.metadata`` is used, for Python <3.14. + * On Python 3.11, 3.12 and 3.13, Python distributors can patch + ``importlib.metadata`` to add a global constant + ``_PIP_USE_IMPORTLIB_METADATA = False``. This makes pip use + ``pkg_resources`` (unless the user set the aforementioned environment + variable to *True*). + + On Python 3.14+, the ``pkg_resources`` backend cannot be used. + """ + if sys.version_info >= (3, 14): + # On Python >=3.14 we only support importlib.metadata. + return True + with contextlib.suppress(KeyError, ValueError): + # On Python <3.14, if the environment variable is set, we obey what it says. + return bool(strtobool(os.environ["_PIP_USE_IMPORTLIB_METADATA"])) + if sys.version_info < (3, 11): + # On Python <3.11, we always use pkg_resources, unless the environment + # variable was set. + return False + # On Python 3.11, 3.12 and 3.13, we check if the global constant is set. + import importlib.metadata + + return bool(getattr(importlib.metadata, "_PIP_USE_IMPORTLIB_METADATA", True)) + + +def _emit_pkg_resources_deprecation_if_needed() -> None: + if sys.version_info < (3, 11): + # All pip versions supporting Python<=3.11 will support pkg_resources, + # and pkg_resources is the default for these, so let's not bother users. + return + + import importlib.metadata + + if hasattr(importlib.metadata, "_PIP_USE_IMPORTLIB_METADATA"): + # The Python distributor has set the global constant, so we don't + # warn, since it is not a user decision. + return + + # The user has decided to use pkg_resources, so we warn. + deprecated( + reason="Using the pkg_resources metadata backend is deprecated.", + replacement=( + "to use the default importlib.metadata backend, " + "by unsetting the _PIP_USE_IMPORTLIB_METADATA environment variable" + ), + gone_in="26.3", + issue=13317, + ) + + +class Backend(Protocol): + NAME: Literal["importlib", "pkg_resources"] + Distribution: type[BaseDistribution] + Environment: type[BaseEnvironment] + + +@functools.cache +def select_backend() -> Backend: + if _should_use_importlib_metadata(): + from . import importlib + + return cast(Backend, importlib) + + _emit_pkg_resources_deprecation_if_needed() + + from . import pkg_resources + + return cast(Backend, pkg_resources) + + +def get_default_environment() -> BaseEnvironment: + """Get the default representation for the current environment. + + This returns an Environment instance from the chosen backend. The default + Environment instance should be built from ``sys.path`` and may use caching + to share instance state across calls. + """ + return select_backend().Environment.default() + + +def get_environment(paths: list[str] | None) -> BaseEnvironment: + """Get a representation of the environment specified by ``paths``. + + This returns an Environment instance from the chosen backend based on the + given import paths. The backend must build a fresh instance representing + the state of installed distributions when this function is called. + """ + return select_backend().Environment.from_paths(paths) + + +def get_directory_distribution(directory: str) -> BaseDistribution: + """Get the distribution metadata representation in the specified directory. + + This returns a Distribution instance from the chosen backend based on + the given on-disk ``.dist-info`` directory. + """ + return select_backend().Distribution.from_directory(directory) + + +def get_wheel_distribution(wheel: Wheel, canonical_name: str) -> BaseDistribution: + """Get the representation of the specified wheel's distribution metadata. + + This returns a Distribution instance from the chosen backend based on + the given wheel's ``.dist-info`` directory. + + :param canonical_name: Normalized project name of the given wheel. + """ + return select_backend().Distribution.from_wheel(wheel, canonical_name) + + +def get_metadata_distribution( + metadata_contents: bytes, + filename: str, + canonical_name: str, +) -> BaseDistribution: + """Get the dist representation of the specified METADATA file contents. + + This returns a Distribution instance from the chosen backend sourced from the data + in `metadata_contents`. + + :param metadata_contents: Contents of a METADATA file within a dist, or one served + via PEP 658. + :param filename: Filename for the dist this metadata represents. + :param canonical_name: Normalized project name of the given dist. + """ + return select_backend().Distribution.from_metadata_file_contents( + metadata_contents, + filename, + canonical_name, + ) diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/metadata/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/metadata/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..91409d8 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/metadata/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/metadata/__pycache__/_json.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/metadata/__pycache__/_json.cpython-312.pyc new file mode 100644 index 0000000..a9bcb1d Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/metadata/__pycache__/_json.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/metadata/__pycache__/base.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/metadata/__pycache__/base.cpython-312.pyc new file mode 100644 index 0000000..41618c5 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/metadata/__pycache__/base.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/metadata/__pycache__/pkg_resources.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/metadata/__pycache__/pkg_resources.cpython-312.pyc new file mode 100644 index 0000000..f5fbd96 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/metadata/__pycache__/pkg_resources.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/metadata/_json.py b/.venv/lib/python3.12/site-packages/pip/_internal/metadata/_json.py new file mode 100644 index 0000000..b39ac05 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/metadata/_json.py @@ -0,0 +1,87 @@ +# Extracted from https://github.com/pfmoore/pkg_metadata +from __future__ import annotations + +from email.header import Header, decode_header, make_header +from email.message import Message +from typing import Any, cast + +METADATA_FIELDS = [ + # Name, Multiple-Use + ("Metadata-Version", False), + ("Name", False), + ("Version", False), + ("Dynamic", True), + ("Platform", True), + ("Supported-Platform", True), + ("Summary", False), + ("Description", False), + ("Description-Content-Type", False), + ("Keywords", False), + ("Home-page", False), + ("Download-URL", False), + ("Author", False), + ("Author-email", False), + ("Maintainer", False), + ("Maintainer-email", False), + ("License", False), + ("License-Expression", False), + ("License-File", True), + ("Classifier", True), + ("Requires-Dist", True), + ("Requires-Python", False), + ("Requires-External", True), + ("Project-URL", True), + ("Provides-Extra", True), + ("Provides-Dist", True), + ("Obsoletes-Dist", True), +] + + +def json_name(field: str) -> str: + return field.lower().replace("-", "_") + + +def msg_to_json(msg: Message) -> dict[str, Any]: + """Convert a Message object into a JSON-compatible dictionary.""" + + def sanitise_header(h: Header | str) -> str: + if isinstance(h, Header): + chunks = [] + for bytes, encoding in decode_header(h): + if encoding == "unknown-8bit": + try: + # See if UTF-8 works + bytes.decode("utf-8") + encoding = "utf-8" + except UnicodeDecodeError: + # If not, latin1 at least won't fail + encoding = "latin1" + chunks.append((bytes, encoding)) + return str(make_header(chunks)) + return str(h) + + result = {} + for field, multi in METADATA_FIELDS: + if field not in msg: + continue + key = json_name(field) + if multi: + value: str | list[str] = [ + sanitise_header(v) for v in msg.get_all(field) # type: ignore + ] + else: + value = sanitise_header(msg.get(field)) # type: ignore + if key == "keywords": + # Accept both comma-separated and space-separated + # forms, for better compatibility with old data. + if "," in value: + value = [v.strip() for v in value.split(",")] + else: + value = value.split() + result[key] = value + + payload = cast(str, msg.get_payload()) + if payload: + result["description"] = payload + + return result diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/metadata/base.py b/.venv/lib/python3.12/site-packages/pip/_internal/metadata/base.py new file mode 100644 index 0000000..230e114 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/metadata/base.py @@ -0,0 +1,685 @@ +from __future__ import annotations + +import csv +import email.message +import functools +import json +import logging +import pathlib +import re +import zipfile +from collections.abc import Collection, Container, Iterable, Iterator +from typing import ( + IO, + Any, + NamedTuple, + Protocol, + Union, +) + +from pip._vendor.packaging.requirements import Requirement +from pip._vendor.packaging.specifiers import InvalidSpecifier, SpecifierSet +from pip._vendor.packaging.utils import NormalizedName, canonicalize_name +from pip._vendor.packaging.version import Version + +from pip._internal.exceptions import NoneMetadataError +from pip._internal.locations import site_packages, user_site +from pip._internal.models.direct_url import ( + DIRECT_URL_METADATA_NAME, + DirectUrl, + DirectUrlValidationError, +) +from pip._internal.utils.compat import stdlib_pkgs # TODO: Move definition here. +from pip._internal.utils.egg_link import egg_link_path_from_sys_path +from pip._internal.utils.misc import is_local, normalize_path +from pip._internal.utils.urls import url_to_path + +from ._json import msg_to_json + +InfoPath = Union[str, pathlib.PurePath] + +logger = logging.getLogger(__name__) + + +class BaseEntryPoint(Protocol): + @property + def name(self) -> str: + raise NotImplementedError() + + @property + def value(self) -> str: + raise NotImplementedError() + + @property + def group(self) -> str: + raise NotImplementedError() + + +def _convert_installed_files_path( + entry: tuple[str, ...], + info: tuple[str, ...], +) -> str: + """Convert a legacy installed-files.txt path into modern RECORD path. + + The legacy format stores paths relative to the info directory, while the + modern format stores paths relative to the package root, e.g. the + site-packages directory. + + :param entry: Path parts of the installed-files.txt entry. + :param info: Path parts of the egg-info directory relative to package root. + :returns: The converted entry. + + For best compatibility with symlinks, this does not use ``abspath()`` or + ``Path.resolve()``, but tries to work with path parts: + + 1. While ``entry`` starts with ``..``, remove the equal amounts of parts + from ``info``; if ``info`` is empty, start appending ``..`` instead. + 2. Join the two directly. + """ + while entry and entry[0] == "..": + if not info or info[-1] == "..": + info += ("..",) + else: + info = info[:-1] + entry = entry[1:] + return str(pathlib.Path(*info, *entry)) + + +class RequiresEntry(NamedTuple): + requirement: str + extra: str + marker: str + + +class BaseDistribution(Protocol): + @classmethod + def from_directory(cls, directory: str) -> BaseDistribution: + """Load the distribution from a metadata directory. + + :param directory: Path to a metadata directory, e.g. ``.dist-info``. + """ + raise NotImplementedError() + + @classmethod + def from_metadata_file_contents( + cls, + metadata_contents: bytes, + filename: str, + project_name: str, + ) -> BaseDistribution: + """Load the distribution from the contents of a METADATA file. + + This is used to implement PEP 658 by generating a "shallow" dist object that can + be used for resolution without downloading or building the actual dist yet. + + :param metadata_contents: The contents of a METADATA file. + :param filename: File name for the dist with this metadata. + :param project_name: Name of the project this dist represents. + """ + raise NotImplementedError() + + @classmethod + def from_wheel(cls, wheel: Wheel, name: str) -> BaseDistribution: + """Load the distribution from a given wheel. + + :param wheel: A concrete wheel definition. + :param name: File name of the wheel. + + :raises InvalidWheel: Whenever loading of the wheel causes a + :py:exc:`zipfile.BadZipFile` exception to be thrown. + :raises UnsupportedWheel: If the wheel is a valid zip, but malformed + internally. + """ + raise NotImplementedError() + + def __repr__(self) -> str: + return f"{self.raw_name} {self.raw_version} ({self.location})" + + def __str__(self) -> str: + return f"{self.raw_name} {self.raw_version}" + + @property + def location(self) -> str | None: + """Where the distribution is loaded from. + + A string value is not necessarily a filesystem path, since distributions + can be loaded from other sources, e.g. arbitrary zip archives. ``None`` + means the distribution is created in-memory. + + Do not canonicalize this value with e.g. ``pathlib.Path.resolve()``. If + this is a symbolic link, we want to preserve the relative path between + it and files in the distribution. + """ + raise NotImplementedError() + + @property + def editable_project_location(self) -> str | None: + """The project location for editable distributions. + + This is the directory where pyproject.toml or setup.py is located. + None if the distribution is not installed in editable mode. + """ + # TODO: this property is relatively costly to compute, memoize it ? + direct_url = self.direct_url + if direct_url: + if direct_url.is_local_editable(): + return url_to_path(direct_url.url) + else: + # Search for an .egg-link file by walking sys.path, as it was + # done before by dist_is_editable(). + egg_link_path = egg_link_path_from_sys_path(self.raw_name) + if egg_link_path: + # TODO: get project location from second line of egg_link file + # (https://github.com/pypa/pip/issues/10243) + return self.location + return None + + @property + def installed_location(self) -> str | None: + """The distribution's "installed" location. + + This should generally be a ``site-packages`` directory. This is + usually ``dist.location``, except for legacy develop-installed packages, + where ``dist.location`` is the source code location, and this is where + the ``.egg-link`` file is. + + The returned location is normalized (in particular, with symlinks removed). + """ + raise NotImplementedError() + + @property + def info_location(self) -> str | None: + """Location of the .[egg|dist]-info directory or file. + + Similarly to ``location``, a string value is not necessarily a + filesystem path. ``None`` means the distribution is created in-memory. + + For a modern .dist-info installation on disk, this should be something + like ``{location}/{raw_name}-{version}.dist-info``. + + Do not canonicalize this value with e.g. ``pathlib.Path.resolve()``. If + this is a symbolic link, we want to preserve the relative path between + it and other files in the distribution. + """ + raise NotImplementedError() + + @property + def installed_by_distutils(self) -> bool: + """Whether this distribution is installed with legacy distutils format. + + A distribution installed with "raw" distutils not patched by setuptools + uses one single file at ``info_location`` to store metadata. We need to + treat this specially on uninstallation. + """ + info_location = self.info_location + if not info_location: + return False + return pathlib.Path(info_location).is_file() + + @property + def installed_as_egg(self) -> bool: + """Whether this distribution is installed as an egg. + + This usually indicates the distribution was installed by (older versions + of) easy_install. + """ + location = self.location + if not location: + return False + # XXX if the distribution is a zipped egg, location has a trailing / + # so we resort to pathlib.Path to check the suffix in a reliable way. + return pathlib.Path(location).suffix == ".egg" + + @property + def installed_with_setuptools_egg_info(self) -> bool: + """Whether this distribution is installed with the ``.egg-info`` format. + + This usually indicates the distribution was installed with setuptools + with an old pip version or with ``single-version-externally-managed``. + + Note that this ensure the metadata store is a directory. distutils can + also installs an ``.egg-info``, but as a file, not a directory. This + property is *False* for that case. Also see ``installed_by_distutils``. + """ + info_location = self.info_location + if not info_location: + return False + if not info_location.endswith(".egg-info"): + return False + return pathlib.Path(info_location).is_dir() + + @property + def installed_with_dist_info(self) -> bool: + """Whether this distribution is installed with the "modern format". + + This indicates a "modern" installation, e.g. storing metadata in the + ``.dist-info`` directory. This applies to installations made by + setuptools (but through pip, not directly), or anything using the + standardized build backend interface (PEP 517). + """ + info_location = self.info_location + if not info_location: + return False + if not info_location.endswith(".dist-info"): + return False + return pathlib.Path(info_location).is_dir() + + @property + def canonical_name(self) -> NormalizedName: + raise NotImplementedError() + + @property + def version(self) -> Version: + raise NotImplementedError() + + @property + def raw_version(self) -> str: + raise NotImplementedError() + + @property + def setuptools_filename(self) -> str: + """Convert a project name to its setuptools-compatible filename. + + This is a copy of ``pkg_resources.to_filename()`` for compatibility. + """ + return self.raw_name.replace("-", "_") + + @property + def direct_url(self) -> DirectUrl | None: + """Obtain a DirectUrl from this distribution. + + Returns None if the distribution has no `direct_url.json` metadata, + or if `direct_url.json` is invalid. + """ + try: + content = self.read_text(DIRECT_URL_METADATA_NAME) + except FileNotFoundError: + return None + try: + return DirectUrl.from_json(content) + except ( + UnicodeDecodeError, + json.JSONDecodeError, + DirectUrlValidationError, + ) as e: + logger.warning( + "Error parsing %s for %s: %s", + DIRECT_URL_METADATA_NAME, + self.canonical_name, + e, + ) + return None + + @property + def installer(self) -> str: + try: + installer_text = self.read_text("INSTALLER") + except (OSError, ValueError, NoneMetadataError): + return "" # Fail silently if the installer file cannot be read. + for line in installer_text.splitlines(): + cleaned_line = line.strip() + if cleaned_line: + return cleaned_line + return "" + + @property + def requested(self) -> bool: + return self.is_file("REQUESTED") + + @property + def editable(self) -> bool: + return bool(self.editable_project_location) + + @property + def local(self) -> bool: + """If distribution is installed in the current virtual environment. + + Always True if we're not in a virtualenv. + """ + if self.installed_location is None: + return False + return is_local(self.installed_location) + + @property + def in_usersite(self) -> bool: + if self.installed_location is None or user_site is None: + return False + return self.installed_location.startswith(normalize_path(user_site)) + + @property + def in_site_packages(self) -> bool: + if self.installed_location is None or site_packages is None: + return False + return self.installed_location.startswith(normalize_path(site_packages)) + + def is_file(self, path: InfoPath) -> bool: + """Check whether an entry in the info directory is a file.""" + raise NotImplementedError() + + def iter_distutils_script_names(self) -> Iterator[str]: + """Find distutils 'scripts' entries metadata. + + If 'scripts' is supplied in ``setup.py``, distutils records those in the + installed distribution's ``scripts`` directory, a file for each script. + """ + raise NotImplementedError() + + def read_text(self, path: InfoPath) -> str: + """Read a file in the info directory. + + :raise FileNotFoundError: If ``path`` does not exist in the directory. + :raise NoneMetadataError: If ``path`` exists in the info directory, but + cannot be read. + """ + raise NotImplementedError() + + def iter_entry_points(self) -> Iterable[BaseEntryPoint]: + raise NotImplementedError() + + def _metadata_impl(self) -> email.message.Message: + raise NotImplementedError() + + @functools.cached_property + def metadata(self) -> email.message.Message: + """Metadata of distribution parsed from e.g. METADATA or PKG-INFO. + + This should return an empty message if the metadata file is unavailable. + + :raises NoneMetadataError: If the metadata file is available, but does + not contain valid metadata. + """ + metadata = self._metadata_impl() + self._add_egg_info_requires(metadata) + return metadata + + @property + def metadata_dict(self) -> dict[str, Any]: + """PEP 566 compliant JSON-serializable representation of METADATA or PKG-INFO. + + This should return an empty dict if the metadata file is unavailable. + + :raises NoneMetadataError: If the metadata file is available, but does + not contain valid metadata. + """ + return msg_to_json(self.metadata) + + @property + def metadata_version(self) -> str | None: + """Value of "Metadata-Version:" in distribution metadata, if available.""" + return self.metadata.get("Metadata-Version") + + @property + def raw_name(self) -> str: + """Value of "Name:" in distribution metadata.""" + # The metadata should NEVER be missing the Name: key, but if it somehow + # does, fall back to the known canonical name. + return self.metadata.get("Name", self.canonical_name) + + @property + def requires_python(self) -> SpecifierSet: + """Value of "Requires-Python:" in distribution metadata. + + If the key does not exist or contains an invalid value, an empty + SpecifierSet should be returned. + """ + value = self.metadata.get("Requires-Python") + if value is None: + return SpecifierSet() + try: + # Convert to str to satisfy the type checker; this can be a Header object. + spec = SpecifierSet(str(value)) + except InvalidSpecifier as e: + message = "Package %r has an invalid Requires-Python: %s" + logger.warning(message, self.raw_name, e) + return SpecifierSet() + return spec + + def iter_dependencies(self, extras: Collection[str] = ()) -> Iterable[Requirement]: + """Dependencies of this distribution. + + For modern .dist-info distributions, this is the collection of + "Requires-Dist:" entries in distribution metadata. + """ + raise NotImplementedError() + + def iter_raw_dependencies(self) -> Iterable[str]: + """Raw Requires-Dist metadata.""" + return self.metadata.get_all("Requires-Dist", []) + + def iter_provided_extras(self) -> Iterable[NormalizedName]: + """Extras provided by this distribution. + + For modern .dist-info distributions, this is the collection of + "Provides-Extra:" entries in distribution metadata. + + The return value of this function is expected to be normalised names, + per PEP 685, with the returned value being handled appropriately by + `iter_dependencies`. + """ + raise NotImplementedError() + + def _iter_declared_entries_from_record(self) -> Iterator[str] | None: + try: + text = self.read_text("RECORD") + except FileNotFoundError: + return None + # This extra Path-str cast normalizes entries. + return (str(pathlib.Path(row[0])) for row in csv.reader(text.splitlines())) + + def _iter_declared_entries_from_legacy(self) -> Iterator[str] | None: + try: + text = self.read_text("installed-files.txt") + except FileNotFoundError: + return None + paths = (p for p in text.splitlines(keepends=False) if p) + root = self.location + info = self.info_location + if root is None or info is None: + return paths + try: + info_rel = pathlib.Path(info).relative_to(root) + except ValueError: # info is not relative to root. + return paths + if not info_rel.parts: # info *is* root. + return paths + return ( + _convert_installed_files_path(pathlib.Path(p).parts, info_rel.parts) + for p in paths + ) + + def iter_declared_entries(self) -> Iterator[str] | None: + """Iterate through file entries declared in this distribution. + + For modern .dist-info distributions, this is the files listed in the + ``RECORD`` metadata file. For legacy setuptools distributions, this + comes from ``installed-files.txt``, with entries normalized to be + compatible with the format used by ``RECORD``. + + :return: An iterator for listed entries, or None if the distribution + contains neither ``RECORD`` nor ``installed-files.txt``. + """ + return ( + self._iter_declared_entries_from_record() + or self._iter_declared_entries_from_legacy() + ) + + def _iter_requires_txt_entries(self) -> Iterator[RequiresEntry]: + """Parse a ``requires.txt`` in an egg-info directory. + + This is an INI-ish format where an egg-info stores dependencies. A + section name describes extra other environment markers, while each entry + is an arbitrary string (not a key-value pair) representing a dependency + as a requirement string (no markers). + + There is a construct in ``importlib.metadata`` called ``Sectioned`` that + does mostly the same, but the format is currently considered private. + """ + try: + content = self.read_text("requires.txt") + except FileNotFoundError: + return + extra = marker = "" # Section-less entries don't have markers. + for line in content.splitlines(): + line = line.strip() + if not line or line.startswith("#"): # Comment; ignored. + continue + if line.startswith("[") and line.endswith("]"): # A section header. + extra, _, marker = line.strip("[]").partition(":") + continue + yield RequiresEntry(requirement=line, extra=extra, marker=marker) + + def _iter_egg_info_extras(self) -> Iterable[str]: + """Get extras from the egg-info directory.""" + known_extras = {""} + for entry in self._iter_requires_txt_entries(): + extra = canonicalize_name(entry.extra) + if extra in known_extras: + continue + known_extras.add(extra) + yield extra + + def _iter_egg_info_dependencies(self) -> Iterable[str]: + """Get distribution dependencies from the egg-info directory. + + To ease parsing, this converts a legacy dependency entry into a PEP 508 + requirement string. Like ``_iter_requires_txt_entries()``, there is code + in ``importlib.metadata`` that does mostly the same, but not do exactly + what we need. + + Namely, ``importlib.metadata`` does not normalize the extra name before + putting it into the requirement string, which causes marker comparison + to fail because the dist-info format do normalize. This is consistent in + all currently available PEP 517 backends, although not standardized. + """ + for entry in self._iter_requires_txt_entries(): + extra = canonicalize_name(entry.extra) + if extra and entry.marker: + marker = f'({entry.marker}) and extra == "{extra}"' + elif extra: + marker = f'extra == "{extra}"' + elif entry.marker: + marker = entry.marker + else: + marker = "" + if marker: + yield f"{entry.requirement} ; {marker}" + else: + yield entry.requirement + + def _add_egg_info_requires(self, metadata: email.message.Message) -> None: + """Add egg-info requires.txt information to the metadata.""" + if not metadata.get_all("Requires-Dist"): + for dep in self._iter_egg_info_dependencies(): + metadata["Requires-Dist"] = dep + if not metadata.get_all("Provides-Extra"): + for extra in self._iter_egg_info_extras(): + metadata["Provides-Extra"] = extra + + +class BaseEnvironment: + """An environment containing distributions to introspect.""" + + @classmethod + def default(cls) -> BaseEnvironment: + raise NotImplementedError() + + @classmethod + def from_paths(cls, paths: list[str] | None) -> BaseEnvironment: + raise NotImplementedError() + + def get_distribution(self, name: str) -> BaseDistribution | None: + """Given a requirement name, return the installed distributions. + + The name may not be normalized. The implementation must canonicalize + it for lookup. + """ + raise NotImplementedError() + + def _iter_distributions(self) -> Iterator[BaseDistribution]: + """Iterate through installed distributions. + + This function should be implemented by subclass, but never called + directly. Use the public ``iter_distribution()`` instead, which + implements additional logic to make sure the distributions are valid. + """ + raise NotImplementedError() + + def iter_all_distributions(self) -> Iterator[BaseDistribution]: + """Iterate through all installed distributions without any filtering.""" + for dist in self._iter_distributions(): + # Make sure the distribution actually comes from a valid Python + # packaging distribution. Pip's AdjacentTempDirectory leaves folders + # e.g. ``~atplotlib.dist-info`` if cleanup was interrupted. The + # valid project name pattern is taken from PEP 508. + project_name_valid = re.match( + r"^([A-Z0-9]|[A-Z0-9][A-Z0-9._-]*[A-Z0-9])$", + dist.canonical_name, + flags=re.IGNORECASE, + ) + if not project_name_valid: + logger.warning( + "Ignoring invalid distribution %s (%s)", + dist.canonical_name, + dist.location, + ) + continue + yield dist + + def iter_installed_distributions( + self, + local_only: bool = True, + skip: Container[str] = stdlib_pkgs, + include_editables: bool = True, + editables_only: bool = False, + user_only: bool = False, + ) -> Iterator[BaseDistribution]: + """Return a list of installed distributions. + + This is based on ``iter_all_distributions()`` with additional filtering + options. Note that ``iter_installed_distributions()`` without arguments + is *not* equal to ``iter_all_distributions()``, since some of the + configurations exclude packages by default. + + :param local_only: If True (default), only return installations + local to the current virtualenv, if in a virtualenv. + :param skip: An iterable of canonicalized project names to ignore; + defaults to ``stdlib_pkgs``. + :param include_editables: If False, don't report editables. + :param editables_only: If True, only report editables. + :param user_only: If True, only report installations in the user + site directory. + """ + it = self.iter_all_distributions() + if local_only: + it = (d for d in it if d.local) + if not include_editables: + it = (d for d in it if not d.editable) + if editables_only: + it = (d for d in it if d.editable) + if user_only: + it = (d for d in it if d.in_usersite) + return (d for d in it if d.canonical_name not in skip) + + +class Wheel(Protocol): + location: str + + def as_zipfile(self) -> zipfile.ZipFile: + raise NotImplementedError() + + +class FilesystemWheel(Wheel): + def __init__(self, location: str) -> None: + self.location = location + + def as_zipfile(self) -> zipfile.ZipFile: + return zipfile.ZipFile(self.location, allowZip64=True) + + +class MemoryWheel(Wheel): + def __init__(self, location: str, stream: IO[bytes]) -> None: + self.location = location + self.stream = stream + + def as_zipfile(self) -> zipfile.ZipFile: + return zipfile.ZipFile(self.stream, allowZip64=True) diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/metadata/importlib/__init__.py b/.venv/lib/python3.12/site-packages/pip/_internal/metadata/importlib/__init__.py new file mode 100644 index 0000000..a779138 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/metadata/importlib/__init__.py @@ -0,0 +1,6 @@ +from ._dists import Distribution +from ._envs import Environment + +__all__ = ["NAME", "Distribution", "Environment"] + +NAME = "importlib" diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/metadata/importlib/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/metadata/importlib/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..fd919e1 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/metadata/importlib/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/metadata/importlib/__pycache__/_compat.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/metadata/importlib/__pycache__/_compat.cpython-312.pyc new file mode 100644 index 0000000..23d92ac Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/metadata/importlib/__pycache__/_compat.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/metadata/importlib/__pycache__/_dists.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/metadata/importlib/__pycache__/_dists.cpython-312.pyc new file mode 100644 index 0000000..e2b454d Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/metadata/importlib/__pycache__/_dists.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/metadata/importlib/__pycache__/_envs.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/metadata/importlib/__pycache__/_envs.cpython-312.pyc new file mode 100644 index 0000000..429b704 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/metadata/importlib/__pycache__/_envs.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/metadata/importlib/_compat.py b/.venv/lib/python3.12/site-packages/pip/_internal/metadata/importlib/_compat.py new file mode 100644 index 0000000..7de614d --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/metadata/importlib/_compat.py @@ -0,0 +1,87 @@ +from __future__ import annotations + +import importlib.metadata +import os +from typing import Any, Protocol, cast + +from pip._vendor.packaging.utils import NormalizedName, canonicalize_name + + +class BadMetadata(ValueError): + def __init__(self, dist: importlib.metadata.Distribution, *, reason: str) -> None: + self.dist = dist + self.reason = reason + + def __str__(self) -> str: + return f"Bad metadata in {self.dist} ({self.reason})" + + +class BasePath(Protocol): + """A protocol that various path objects conform. + + This exists because importlib.metadata uses both ``pathlib.Path`` and + ``zipfile.Path``, and we need a common base for type hints (Union does not + work well since ``zipfile.Path`` is too new for our linter setup). + + This does not mean to be exhaustive, but only contains things that present + in both classes *that we need*. + """ + + @property + def name(self) -> str: + raise NotImplementedError() + + @property + def parent(self) -> BasePath: + raise NotImplementedError() + + +def get_info_location(d: importlib.metadata.Distribution) -> BasePath | None: + """Find the path to the distribution's metadata directory. + + HACK: This relies on importlib.metadata's private ``_path`` attribute. Not + all distributions exist on disk, so importlib.metadata is correct to not + expose the attribute as public. But pip's code base is old and not as clean, + so we do this to avoid having to rewrite too many things. Hopefully we can + eliminate this some day. + """ + return getattr(d, "_path", None) + + +def parse_name_and_version_from_info_directory( + dist: importlib.metadata.Distribution, +) -> tuple[str | None, str | None]: + """Get a name and version from the metadata directory name. + + This is much faster than reading distribution metadata. + """ + info_location = get_info_location(dist) + if info_location is None: + return None, None + + stem, suffix = os.path.splitext(info_location.name) + if suffix == ".dist-info": + name, sep, version = stem.partition("-") + if sep: + return name, version + + if suffix == ".egg-info": + name = stem.split("-", 1)[0] + return name, None + + return None, None + + +def get_dist_canonical_name(dist: importlib.metadata.Distribution) -> NormalizedName: + """Get the distribution's normalized name. + + The ``name`` attribute is only available in Python 3.10 or later. We are + targeting exactly that, but Mypy does not know this. + """ + if name := parse_name_and_version_from_info_directory(dist)[0]: + return canonicalize_name(name) + + name = cast(Any, dist).name + if not isinstance(name, str): + raise BadMetadata(dist, reason="invalid metadata entry 'name'") + return canonicalize_name(name) diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/metadata/importlib/_dists.py b/.venv/lib/python3.12/site-packages/pip/_internal/metadata/importlib/_dists.py new file mode 100644 index 0000000..97363af --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/metadata/importlib/_dists.py @@ -0,0 +1,223 @@ +from __future__ import annotations + +import email.message +import importlib.metadata +import pathlib +import zipfile +from collections.abc import Collection, Iterable, Iterator, Mapping, Sequence +from os import PathLike +from typing import ( + cast, +) + +from pip._vendor.packaging.requirements import Requirement +from pip._vendor.packaging.utils import NormalizedName, canonicalize_name +from pip._vendor.packaging.version import Version +from pip._vendor.packaging.version import parse as parse_version + +from pip._internal.exceptions import InvalidWheel, UnsupportedWheel +from pip._internal.metadata.base import ( + BaseDistribution, + BaseEntryPoint, + InfoPath, + Wheel, +) +from pip._internal.utils.misc import normalize_path +from pip._internal.utils.packaging import get_requirement +from pip._internal.utils.temp_dir import TempDirectory +from pip._internal.utils.wheel import parse_wheel, read_wheel_metadata_file + +from ._compat import ( + BasePath, + get_dist_canonical_name, + parse_name_and_version_from_info_directory, +) + + +class WheelDistribution(importlib.metadata.Distribution): + """An ``importlib.metadata.Distribution`` read from a wheel. + + Although ``importlib.metadata.PathDistribution`` accepts ``zipfile.Path``, + its implementation is too "lazy" for pip's needs (we can't keep the ZipFile + handle open for the entire lifetime of the distribution object). + + This implementation eagerly reads the entire metadata directory into the + memory instead, and operates from that. + """ + + def __init__( + self, + files: Mapping[pathlib.PurePosixPath, bytes], + info_location: pathlib.PurePosixPath, + ) -> None: + self._files = files + self.info_location = info_location + + @classmethod + def from_zipfile( + cls, + zf: zipfile.ZipFile, + name: str, + location: str, + ) -> WheelDistribution: + info_dir, _ = parse_wheel(zf, name) + paths = ( + (name, pathlib.PurePosixPath(name.split("/", 1)[-1])) + for name in zf.namelist() + if name.startswith(f"{info_dir}/") + ) + files = { + relpath: read_wheel_metadata_file(zf, fullpath) + for fullpath, relpath in paths + } + info_location = pathlib.PurePosixPath(location, info_dir) + return cls(files, info_location) + + def iterdir(self, path: InfoPath) -> Iterator[pathlib.PurePosixPath]: + # Only allow iterating through the metadata directory. + if pathlib.PurePosixPath(str(path)) in self._files: + return iter(self._files) + raise FileNotFoundError(path) + + def read_text(self, filename: str) -> str | None: + try: + data = self._files[pathlib.PurePosixPath(filename)] + except KeyError: + return None + try: + text = data.decode("utf-8") + except UnicodeDecodeError as e: + wheel = self.info_location.parent + error = f"Error decoding metadata for {wheel}: {e} in {filename} file" + raise UnsupportedWheel(error) + return text + + def locate_file(self, path: str | PathLike[str]) -> pathlib.Path: + # This method doesn't make sense for our in-memory wheel, but the API + # requires us to define it. + raise NotImplementedError + + +class Distribution(BaseDistribution): + def __init__( + self, + dist: importlib.metadata.Distribution, + info_location: BasePath | None, + installed_location: BasePath | None, + ) -> None: + self._dist = dist + self._info_location = info_location + self._installed_location = installed_location + + @classmethod + def from_directory(cls, directory: str) -> BaseDistribution: + info_location = pathlib.Path(directory) + dist = importlib.metadata.Distribution.at(info_location) + return cls(dist, info_location, info_location.parent) + + @classmethod + def from_metadata_file_contents( + cls, + metadata_contents: bytes, + filename: str, + project_name: str, + ) -> BaseDistribution: + # Generate temp dir to contain the metadata file, and write the file contents. + temp_dir = pathlib.Path( + TempDirectory(kind="metadata", globally_managed=True).path + ) + metadata_path = temp_dir / "METADATA" + metadata_path.write_bytes(metadata_contents) + # Construct dist pointing to the newly created directory. + dist = importlib.metadata.Distribution.at(metadata_path.parent) + return cls(dist, metadata_path.parent, None) + + @classmethod + def from_wheel(cls, wheel: Wheel, name: str) -> BaseDistribution: + try: + with wheel.as_zipfile() as zf: + dist = WheelDistribution.from_zipfile(zf, name, wheel.location) + except zipfile.BadZipFile as e: + raise InvalidWheel(wheel.location, name) from e + return cls(dist, dist.info_location, pathlib.PurePosixPath(wheel.location)) + + @property + def location(self) -> str | None: + if self._info_location is None: + return None + return str(self._info_location.parent) + + @property + def info_location(self) -> str | None: + if self._info_location is None: + return None + return str(self._info_location) + + @property + def installed_location(self) -> str | None: + if self._installed_location is None: + return None + return normalize_path(str(self._installed_location)) + + @property + def canonical_name(self) -> NormalizedName: + return get_dist_canonical_name(self._dist) + + @property + def version(self) -> Version: + if version := parse_name_and_version_from_info_directory(self._dist)[1]: + return parse_version(version) + return parse_version(self._dist.version) + + @property + def raw_version(self) -> str: + return self._dist.version + + def is_file(self, path: InfoPath) -> bool: + return self._dist.read_text(str(path)) is not None + + def iter_distutils_script_names(self) -> Iterator[str]: + # A distutils installation is always "flat" (not in e.g. egg form), so + # if this distribution's info location is NOT a pathlib.Path (but e.g. + # zipfile.Path), it can never contain any distutils scripts. + if not isinstance(self._info_location, pathlib.Path): + return + for child in self._info_location.joinpath("scripts").iterdir(): + yield child.name + + def read_text(self, path: InfoPath) -> str: + content = self._dist.read_text(str(path)) + if content is None: + raise FileNotFoundError(path) + return content + + def iter_entry_points(self) -> Iterable[BaseEntryPoint]: + # importlib.metadata's EntryPoint structure satisfies BaseEntryPoint. + return self._dist.entry_points + + def _metadata_impl(self) -> email.message.Message: + # From Python 3.10+, importlib.metadata declares PackageMetadata as the + # return type. This protocol is unfortunately a disaster now and misses + # a ton of fields that we need, including get() and get_payload(). We + # rely on the implementation that the object is actually a Message now, + # until upstream can improve the protocol. (python/cpython#94952) + return cast(email.message.Message, self._dist.metadata) + + def iter_provided_extras(self) -> Iterable[NormalizedName]: + return [ + canonicalize_name(extra) + for extra in self.metadata.get_all("Provides-Extra", []) + ] + + def iter_dependencies(self, extras: Collection[str] = ()) -> Iterable[Requirement]: + contexts: Sequence[dict[str, str]] = [{"extra": e} for e in extras] + for req_string in self.metadata.get_all("Requires-Dist", []): + # strip() because email.message.Message.get_all() may return a leading \n + # in case a long header was wrapped. + req = get_requirement(req_string.strip()) + if not req.marker: + yield req + elif not extras and req.marker.evaluate({"extra": ""}): + yield req + elif any(req.marker.evaluate(context) for context in contexts): + yield req diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/metadata/importlib/_envs.py b/.venv/lib/python3.12/site-packages/pip/_internal/metadata/importlib/_envs.py new file mode 100644 index 0000000..71a73b7 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/metadata/importlib/_envs.py @@ -0,0 +1,143 @@ +from __future__ import annotations + +import importlib.metadata +import logging +import os +import pathlib +import sys +import zipfile +from collections.abc import Iterator, Sequence +from typing import Optional + +from pip._vendor.packaging.utils import ( + InvalidWheelFilename, + NormalizedName, + canonicalize_name, + parse_wheel_filename, +) + +from pip._internal.metadata.base import BaseDistribution, BaseEnvironment +from pip._internal.utils.filetypes import WHEEL_EXTENSION + +from ._compat import BadMetadata, BasePath, get_dist_canonical_name, get_info_location +from ._dists import Distribution + +logger = logging.getLogger(__name__) + + +def _looks_like_wheel(location: str) -> bool: + if not location.endswith(WHEEL_EXTENSION): + return False + if not os.path.isfile(location): + return False + try: + parse_wheel_filename(os.path.basename(location)) + except InvalidWheelFilename: + return False + return zipfile.is_zipfile(location) + + +class _DistributionFinder: + """Finder to locate distributions. + + The main purpose of this class is to memoize found distributions' names, so + only one distribution is returned for each package name. At lot of pip code + assumes this (because it is setuptools's behavior), and not doing the same + can potentially cause a distribution in lower precedence path to override a + higher precedence one if the caller is not careful. + + Eventually we probably want to make it possible to see lower precedence + installations as well. It's useful feature, after all. + """ + + FoundResult = tuple[importlib.metadata.Distribution, Optional[BasePath]] + + def __init__(self) -> None: + self._found_names: set[NormalizedName] = set() + + def _find_impl(self, location: str) -> Iterator[FoundResult]: + """Find distributions in a location.""" + # Skip looking inside a wheel. Since a package inside a wheel is not + # always valid (due to .data directories etc.), its .dist-info entry + # should not be considered an installed distribution. + if _looks_like_wheel(location): + return + # To know exactly where we find a distribution, we have to feed in the + # paths one by one, instead of dumping the list to importlib.metadata. + for dist in importlib.metadata.distributions(path=[location]): + info_location = get_info_location(dist) + try: + name = get_dist_canonical_name(dist) + except BadMetadata as e: + logger.warning("Skipping %s due to %s", info_location, e.reason) + continue + if name in self._found_names: + continue + self._found_names.add(name) + yield dist, info_location + + def find(self, location: str) -> Iterator[BaseDistribution]: + """Find distributions in a location. + + The path can be either a directory, or a ZIP archive. + """ + for dist, info_location in self._find_impl(location): + if info_location is None: + installed_location: BasePath | None = None + else: + installed_location = info_location.parent + yield Distribution(dist, info_location, installed_location) + + def find_legacy_editables(self, location: str) -> Iterator[BaseDistribution]: + """Read location in egg-link files and return distributions in there. + + The path should be a directory; otherwise this returns nothing. This + follows how setuptools does this for compatibility. The first non-empty + line in the egg-link is read as a path (resolved against the egg-link's + containing directory if relative). Distributions found at that linked + location are returned. + """ + path = pathlib.Path(location) + if not path.is_dir(): + return + for child in path.iterdir(): + if child.suffix != ".egg-link": + continue + with child.open() as f: + lines = (line.strip() for line in f) + target_rel = next((line for line in lines if line), "") + if not target_rel: + continue + target_location = str(path.joinpath(target_rel)) + for dist, info_location in self._find_impl(target_location): + yield Distribution(dist, info_location, path) + + +class Environment(BaseEnvironment): + def __init__(self, paths: Sequence[str]) -> None: + self._paths = paths + + @classmethod + def default(cls) -> BaseEnvironment: + return cls(sys.path) + + @classmethod + def from_paths(cls, paths: list[str] | None) -> BaseEnvironment: + if paths is None: + return cls(sys.path) + return cls(paths) + + def _iter_distributions(self) -> Iterator[BaseDistribution]: + finder = _DistributionFinder() + for location in self._paths: + yield from finder.find(location) + yield from finder.find_legacy_editables(location) + + def get_distribution(self, name: str) -> BaseDistribution | None: + canonical_name = canonicalize_name(name) + matches = ( + distribution + for distribution in self.iter_all_distributions() + if distribution.canonical_name == canonical_name + ) + return next(matches, None) diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/metadata/pkg_resources.py b/.venv/lib/python3.12/site-packages/pip/_internal/metadata/pkg_resources.py new file mode 100644 index 0000000..89fce8b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/metadata/pkg_resources.py @@ -0,0 +1,298 @@ +from __future__ import annotations + +import email.message +import email.parser +import logging +import os +import zipfile +from collections.abc import Collection, Iterable, Iterator, Mapping +from typing import ( + NamedTuple, +) + +from pip._vendor import pkg_resources +from pip._vendor.packaging.requirements import Requirement +from pip._vendor.packaging.utils import NormalizedName, canonicalize_name +from pip._vendor.packaging.version import Version +from pip._vendor.packaging.version import parse as parse_version + +from pip._internal.exceptions import InvalidWheel, NoneMetadataError, UnsupportedWheel +from pip._internal.utils.egg_link import egg_link_path_from_location +from pip._internal.utils.misc import display_path, normalize_path +from pip._internal.utils.wheel import parse_wheel, read_wheel_metadata_file + +from .base import ( + BaseDistribution, + BaseEntryPoint, + BaseEnvironment, + InfoPath, + Wheel, +) + +__all__ = ["NAME", "Distribution", "Environment"] + +logger = logging.getLogger(__name__) + +NAME = "pkg_resources" + + +class EntryPoint(NamedTuple): + name: str + value: str + group: str + + +class InMemoryMetadata: + """IMetadataProvider that reads metadata files from a dictionary. + + This also maps metadata decoding exceptions to our internal exception type. + """ + + def __init__(self, metadata: Mapping[str, bytes], wheel_name: str) -> None: + self._metadata = metadata + self._wheel_name = wheel_name + + def has_metadata(self, name: str) -> bool: + return name in self._metadata + + def get_metadata(self, name: str) -> str: + try: + return self._metadata[name].decode() + except UnicodeDecodeError as e: + # Augment the default error with the origin of the file. + raise UnsupportedWheel( + f"Error decoding metadata for {self._wheel_name}: {e} in {name} file" + ) + + def get_metadata_lines(self, name: str) -> Iterable[str]: + return pkg_resources.yield_lines(self.get_metadata(name)) + + def metadata_isdir(self, name: str) -> bool: + return False + + def metadata_listdir(self, name: str) -> list[str]: + return [] + + def run_script(self, script_name: str, namespace: str) -> None: + pass + + +class Distribution(BaseDistribution): + def __init__(self, dist: pkg_resources.Distribution) -> None: + self._dist = dist + # This is populated lazily, to avoid loading metadata for all possible + # distributions eagerly. + self.__extra_mapping: Mapping[NormalizedName, str] | None = None + + @property + def _extra_mapping(self) -> Mapping[NormalizedName, str]: + if self.__extra_mapping is None: + self.__extra_mapping = { + canonicalize_name(extra): extra for extra in self._dist.extras + } + + return self.__extra_mapping + + @classmethod + def from_directory(cls, directory: str) -> BaseDistribution: + dist_dir = directory.rstrip(os.sep) + + # Build a PathMetadata object, from path to metadata. :wink: + base_dir, dist_dir_name = os.path.split(dist_dir) + metadata = pkg_resources.PathMetadata(base_dir, dist_dir) + + # Determine the correct Distribution object type. + if dist_dir.endswith(".egg-info"): + dist_cls = pkg_resources.Distribution + dist_name = os.path.splitext(dist_dir_name)[0] + else: + assert dist_dir.endswith(".dist-info") + dist_cls = pkg_resources.DistInfoDistribution + dist_name = os.path.splitext(dist_dir_name)[0].split("-")[0] + + dist = dist_cls(base_dir, project_name=dist_name, metadata=metadata) + return cls(dist) + + @classmethod + def from_metadata_file_contents( + cls, + metadata_contents: bytes, + filename: str, + project_name: str, + ) -> BaseDistribution: + metadata_dict = { + "METADATA": metadata_contents, + } + dist = pkg_resources.DistInfoDistribution( + location=filename, + metadata=InMemoryMetadata(metadata_dict, filename), + project_name=project_name, + ) + return cls(dist) + + @classmethod + def from_wheel(cls, wheel: Wheel, name: str) -> BaseDistribution: + try: + with wheel.as_zipfile() as zf: + info_dir, _ = parse_wheel(zf, name) + metadata_dict = { + path.split("/", 1)[-1]: read_wheel_metadata_file(zf, path) + for path in zf.namelist() + if path.startswith(f"{info_dir}/") + } + except zipfile.BadZipFile as e: + raise InvalidWheel(wheel.location, name) from e + except UnsupportedWheel as e: + raise UnsupportedWheel(f"{name} has an invalid wheel, {e}") + dist = pkg_resources.DistInfoDistribution( + location=wheel.location, + metadata=InMemoryMetadata(metadata_dict, wheel.location), + project_name=name, + ) + return cls(dist) + + @property + def location(self) -> str | None: + return self._dist.location + + @property + def installed_location(self) -> str | None: + egg_link = egg_link_path_from_location(self.raw_name) + if egg_link: + location = egg_link + elif self.location: + location = self.location + else: + return None + return normalize_path(location) + + @property + def info_location(self) -> str | None: + return self._dist.egg_info + + @property + def installed_by_distutils(self) -> bool: + # A distutils-installed distribution is provided by FileMetadata. This + # provider has a "path" attribute not present anywhere else. Not the + # best introspection logic, but pip has been doing this for a long time. + try: + return bool(self._dist._provider.path) + except AttributeError: + return False + + @property + def canonical_name(self) -> NormalizedName: + return canonicalize_name(self._dist.project_name) + + @property + def version(self) -> Version: + return parse_version(self._dist.version) + + @property + def raw_version(self) -> str: + return self._dist.version + + def is_file(self, path: InfoPath) -> bool: + return self._dist.has_metadata(str(path)) + + def iter_distutils_script_names(self) -> Iterator[str]: + yield from self._dist.metadata_listdir("scripts") + + def read_text(self, path: InfoPath) -> str: + name = str(path) + if not self._dist.has_metadata(name): + raise FileNotFoundError(name) + content = self._dist.get_metadata(name) + if content is None: + raise NoneMetadataError(self, name) + return content + + def iter_entry_points(self) -> Iterable[BaseEntryPoint]: + for group, entries in self._dist.get_entry_map().items(): + for name, entry_point in entries.items(): + name, _, value = str(entry_point).partition("=") + yield EntryPoint(name=name.strip(), value=value.strip(), group=group) + + def _metadata_impl(self) -> email.message.Message: + """ + :raises NoneMetadataError: if the distribution reports `has_metadata()` + True but `get_metadata()` returns None. + """ + if isinstance(self._dist, pkg_resources.DistInfoDistribution): + metadata_name = "METADATA" + else: + metadata_name = "PKG-INFO" + try: + metadata = self.read_text(metadata_name) + except FileNotFoundError: + if self.location: + displaying_path = display_path(self.location) + else: + displaying_path = repr(self.location) + logger.warning("No metadata found in %s", displaying_path) + metadata = "" + feed_parser = email.parser.FeedParser() + feed_parser.feed(metadata) + return feed_parser.close() + + def iter_dependencies(self, extras: Collection[str] = ()) -> Iterable[Requirement]: + if extras: + relevant_extras = set(self._extra_mapping) & set( + map(canonicalize_name, extras) + ) + extras = [self._extra_mapping[extra] for extra in relevant_extras] + return self._dist.requires(extras) + + def iter_provided_extras(self) -> Iterable[NormalizedName]: + return self._extra_mapping.keys() + + +class Environment(BaseEnvironment): + def __init__(self, ws: pkg_resources.WorkingSet) -> None: + self._ws = ws + + @classmethod + def default(cls) -> BaseEnvironment: + return cls(pkg_resources.working_set) + + @classmethod + def from_paths(cls, paths: list[str] | None) -> BaseEnvironment: + return cls(pkg_resources.WorkingSet(paths)) + + def _iter_distributions(self) -> Iterator[BaseDistribution]: + for dist in self._ws: + yield Distribution(dist) + + def _search_distribution(self, name: str) -> BaseDistribution | None: + """Find a distribution matching the ``name`` in the environment. + + This searches from *all* distributions available in the environment, to + match the behavior of ``pkg_resources.get_distribution()``. + """ + canonical_name = canonicalize_name(name) + for dist in self.iter_all_distributions(): + if dist.canonical_name == canonical_name: + return dist + return None + + def get_distribution(self, name: str) -> BaseDistribution | None: + # Search the distribution by looking through the working set. + dist = self._search_distribution(name) + if dist: + return dist + + # If distribution could not be found, call working_set.require to + # update the working set, and try to find the distribution again. + # This might happen for e.g. when you install a package twice, once + # using setup.py develop and again using setup.py install. Now when + # running pip uninstall twice, the package gets removed from the + # working set in the first uninstall, so we have to populate the + # working set again so that pip knows about it and the packages gets + # picked up and is successfully uninstalled the second time too. + try: + # We didn't pass in any version specifiers, so this can never + # raise pkg_resources.VersionConflict. + self._ws.require(name) + except pkg_resources.DistributionNotFound: + return None + return self._search_distribution(name) diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/models/__init__.py b/.venv/lib/python3.12/site-packages/pip/_internal/models/__init__.py new file mode 100644 index 0000000..7b1fc29 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/models/__init__.py @@ -0,0 +1 @@ +"""A package that contains models that represent entities.""" diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/models/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/models/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..12195f0 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/models/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/models/__pycache__/candidate.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/models/__pycache__/candidate.cpython-312.pyc new file mode 100644 index 0000000..292fb3b Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/models/__pycache__/candidate.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/models/__pycache__/direct_url.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/models/__pycache__/direct_url.cpython-312.pyc new file mode 100644 index 0000000..6768dc0 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/models/__pycache__/direct_url.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/models/__pycache__/format_control.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/models/__pycache__/format_control.cpython-312.pyc new file mode 100644 index 0000000..e41cff1 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/models/__pycache__/format_control.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/models/__pycache__/index.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/models/__pycache__/index.cpython-312.pyc new file mode 100644 index 0000000..2dbc455 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/models/__pycache__/index.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/models/__pycache__/installation_report.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/models/__pycache__/installation_report.cpython-312.pyc new file mode 100644 index 0000000..62250d3 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/models/__pycache__/installation_report.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/models/__pycache__/link.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/models/__pycache__/link.cpython-312.pyc new file mode 100644 index 0000000..e70fa92 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/models/__pycache__/link.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/models/__pycache__/pylock.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/models/__pycache__/pylock.cpython-312.pyc new file mode 100644 index 0000000..c1dc971 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/models/__pycache__/pylock.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/models/__pycache__/scheme.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/models/__pycache__/scheme.cpython-312.pyc new file mode 100644 index 0000000..ebe5b7a Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/models/__pycache__/scheme.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/models/__pycache__/search_scope.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/models/__pycache__/search_scope.cpython-312.pyc new file mode 100644 index 0000000..7902204 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/models/__pycache__/search_scope.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/models/__pycache__/selection_prefs.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/models/__pycache__/selection_prefs.cpython-312.pyc new file mode 100644 index 0000000..7f54684 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/models/__pycache__/selection_prefs.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/models/__pycache__/target_python.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/models/__pycache__/target_python.cpython-312.pyc new file mode 100644 index 0000000..7feb851 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/models/__pycache__/target_python.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/models/__pycache__/wheel.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/models/__pycache__/wheel.cpython-312.pyc new file mode 100644 index 0000000..110a398 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/models/__pycache__/wheel.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/models/candidate.py b/.venv/lib/python3.12/site-packages/pip/_internal/models/candidate.py new file mode 100644 index 0000000..f27f283 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/models/candidate.py @@ -0,0 +1,25 @@ +from dataclasses import dataclass + +from pip._vendor.packaging.version import Version +from pip._vendor.packaging.version import parse as parse_version + +from pip._internal.models.link import Link + + +@dataclass(frozen=True) +class InstallationCandidate: + """Represents a potential "candidate" for installation.""" + + __slots__ = ["name", "version", "link"] + + name: str + version: Version + link: Link + + def __init__(self, name: str, version: str, link: Link) -> None: + object.__setattr__(self, "name", name) + object.__setattr__(self, "version", parse_version(version)) + object.__setattr__(self, "link", link) + + def __str__(self) -> str: + return f"{self.name!r} candidate (version {self.version} at {self.link})" diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/models/direct_url.py b/.venv/lib/python3.12/site-packages/pip/_internal/models/direct_url.py new file mode 100644 index 0000000..aefc670 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/models/direct_url.py @@ -0,0 +1,227 @@ +"""PEP 610""" + +from __future__ import annotations + +import json +import re +import urllib.parse +from collections.abc import Iterable +from dataclasses import dataclass +from typing import Any, ClassVar, TypeVar, Union + +__all__ = [ + "DirectUrl", + "DirectUrlValidationError", + "DirInfo", + "ArchiveInfo", + "VcsInfo", +] + +T = TypeVar("T") + +DIRECT_URL_METADATA_NAME = "direct_url.json" +ENV_VAR_RE = re.compile(r"^\$\{[A-Za-z0-9-_]+\}(:\$\{[A-Za-z0-9-_]+\})?$") + + +class DirectUrlValidationError(Exception): + pass + + +def _get( + d: dict[str, Any], expected_type: type[T], key: str, default: T | None = None +) -> T | None: + """Get value from dictionary and verify expected type.""" + if key not in d: + return default + value = d[key] + if not isinstance(value, expected_type): + raise DirectUrlValidationError( + f"{value!r} has unexpected type for {key} (expected {expected_type})" + ) + return value + + +def _get_required( + d: dict[str, Any], expected_type: type[T], key: str, default: T | None = None +) -> T: + value = _get(d, expected_type, key, default) + if value is None: + raise DirectUrlValidationError(f"{key} must have a value") + return value + + +def _exactly_one_of(infos: Iterable[InfoType | None]) -> InfoType: + infos = [info for info in infos if info is not None] + if not infos: + raise DirectUrlValidationError( + "missing one of archive_info, dir_info, vcs_info" + ) + if len(infos) > 1: + raise DirectUrlValidationError( + "more than one of archive_info, dir_info, vcs_info" + ) + assert infos[0] is not None + return infos[0] + + +def _filter_none(**kwargs: Any) -> dict[str, Any]: + """Make dict excluding None values.""" + return {k: v for k, v in kwargs.items() if v is not None} + + +@dataclass +class VcsInfo: + name: ClassVar = "vcs_info" + + vcs: str + commit_id: str + requested_revision: str | None = None + + @classmethod + def _from_dict(cls, d: dict[str, Any] | None) -> VcsInfo | None: + if d is None: + return None + return cls( + vcs=_get_required(d, str, "vcs"), + commit_id=_get_required(d, str, "commit_id"), + requested_revision=_get(d, str, "requested_revision"), + ) + + def _to_dict(self) -> dict[str, Any]: + return _filter_none( + vcs=self.vcs, + requested_revision=self.requested_revision, + commit_id=self.commit_id, + ) + + +class ArchiveInfo: + name = "archive_info" + + def __init__( + self, + hash: str | None = None, + hashes: dict[str, str] | None = None, + ) -> None: + # set hashes before hash, since the hash setter will further populate hashes + self.hashes = hashes + self.hash = hash + + @property + def hash(self) -> str | None: + return self._hash + + @hash.setter + def hash(self, value: str | None) -> None: + if value is not None: + # Auto-populate the hashes key to upgrade to the new format automatically. + # We don't back-populate the legacy hash key from hashes. + try: + hash_name, hash_value = value.split("=", 1) + except ValueError: + raise DirectUrlValidationError( + f"invalid archive_info.hash format: {value!r}" + ) + if self.hashes is None: + self.hashes = {hash_name: hash_value} + elif hash_name not in self.hashes: + self.hashes = self.hashes.copy() + self.hashes[hash_name] = hash_value + self._hash = value + + @classmethod + def _from_dict(cls, d: dict[str, Any] | None) -> ArchiveInfo | None: + if d is None: + return None + return cls(hash=_get(d, str, "hash"), hashes=_get(d, dict, "hashes")) + + def _to_dict(self) -> dict[str, Any]: + return _filter_none(hash=self.hash, hashes=self.hashes) + + +@dataclass +class DirInfo: + name: ClassVar = "dir_info" + + editable: bool = False + + @classmethod + def _from_dict(cls, d: dict[str, Any] | None) -> DirInfo | None: + if d is None: + return None + return cls(editable=_get_required(d, bool, "editable", default=False)) + + def _to_dict(self) -> dict[str, Any]: + return _filter_none(editable=self.editable or None) + + +InfoType = Union[ArchiveInfo, DirInfo, VcsInfo] + + +@dataclass +class DirectUrl: + url: str + info: InfoType + subdirectory: str | None = None + + def _remove_auth_from_netloc(self, netloc: str) -> str: + if "@" not in netloc: + return netloc + user_pass, netloc_no_user_pass = netloc.split("@", 1) + if ( + isinstance(self.info, VcsInfo) + and self.info.vcs == "git" + and user_pass == "git" + ): + return netloc + if ENV_VAR_RE.match(user_pass): + return netloc + return netloc_no_user_pass + + @property + def redacted_url(self) -> str: + """url with user:password part removed unless it is formed with + environment variables as specified in PEP 610, or it is ``git`` + in the case of a git URL. + """ + purl = urllib.parse.urlsplit(self.url) + netloc = self._remove_auth_from_netloc(purl.netloc) + surl = urllib.parse.urlunsplit( + (purl.scheme, netloc, purl.path, purl.query, purl.fragment) + ) + return surl + + def validate(self) -> None: + self.from_dict(self.to_dict()) + + @classmethod + def from_dict(cls, d: dict[str, Any]) -> DirectUrl: + return DirectUrl( + url=_get_required(d, str, "url"), + subdirectory=_get(d, str, "subdirectory"), + info=_exactly_one_of( + [ + ArchiveInfo._from_dict(_get(d, dict, "archive_info")), + DirInfo._from_dict(_get(d, dict, "dir_info")), + VcsInfo._from_dict(_get(d, dict, "vcs_info")), + ] + ), + ) + + def to_dict(self) -> dict[str, Any]: + res = _filter_none( + url=self.redacted_url, + subdirectory=self.subdirectory, + ) + res[self.info.name] = self.info._to_dict() + return res + + @classmethod + def from_json(cls, s: str) -> DirectUrl: + return cls.from_dict(json.loads(s)) + + def to_json(self) -> str: + return json.dumps(self.to_dict(), sort_keys=True) + + def is_local_editable(self) -> bool: + return isinstance(self.info, DirInfo) and self.info.editable diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/models/format_control.py b/.venv/lib/python3.12/site-packages/pip/_internal/models/format_control.py new file mode 100644 index 0000000..9f07e3f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/models/format_control.py @@ -0,0 +1,78 @@ +from __future__ import annotations + +from pip._vendor.packaging.utils import canonicalize_name + +from pip._internal.exceptions import CommandError + + +class FormatControl: + """Helper for managing formats from which a package can be installed.""" + + __slots__ = ["no_binary", "only_binary"] + + def __init__( + self, + no_binary: set[str] | None = None, + only_binary: set[str] | None = None, + ) -> None: + if no_binary is None: + no_binary = set() + if only_binary is None: + only_binary = set() + + self.no_binary = no_binary + self.only_binary = only_binary + + def __eq__(self, other: object) -> bool: + if not isinstance(other, self.__class__): + return NotImplemented + + if self.__slots__ != other.__slots__: + return False + + return all(getattr(self, k) == getattr(other, k) for k in self.__slots__) + + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self.no_binary}, {self.only_binary})" + + @staticmethod + def handle_mutual_excludes(value: str, target: set[str], other: set[str]) -> None: + if value.startswith("-"): + raise CommandError( + "--no-binary / --only-binary option requires 1 argument." + ) + new = value.split(",") + while ":all:" in new: + other.clear() + target.clear() + target.add(":all:") + del new[: new.index(":all:") + 1] + # Without a none, we want to discard everything as :all: covers it + if ":none:" not in new: + return + for name in new: + if name == ":none:": + target.clear() + continue + name = canonicalize_name(name) + other.discard(name) + target.add(name) + + def get_allowed_formats(self, canonical_name: str) -> frozenset[str]: + result = {"binary", "source"} + if canonical_name in self.only_binary: + result.discard("source") + elif canonical_name in self.no_binary: + result.discard("binary") + elif ":all:" in self.only_binary: + result.discard("source") + elif ":all:" in self.no_binary: + result.discard("binary") + return frozenset(result) + + def disallow_binaries(self) -> None: + self.handle_mutual_excludes( + ":all:", + self.no_binary, + self.only_binary, + ) diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/models/index.py b/.venv/lib/python3.12/site-packages/pip/_internal/models/index.py new file mode 100644 index 0000000..b94c325 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/models/index.py @@ -0,0 +1,28 @@ +import urllib.parse + + +class PackageIndex: + """Represents a Package Index and provides easier access to endpoints""" + + __slots__ = ["url", "netloc", "simple_url", "pypi_url", "file_storage_domain"] + + def __init__(self, url: str, file_storage_domain: str) -> None: + super().__init__() + self.url = url + self.netloc = urllib.parse.urlsplit(url).netloc + self.simple_url = self._url_for_path("simple") + self.pypi_url = self._url_for_path("pypi") + + # This is part of a temporary hack used to block installs of PyPI + # packages which depend on external urls only necessary until PyPI can + # block such packages themselves + self.file_storage_domain = file_storage_domain + + def _url_for_path(self, path: str) -> str: + return urllib.parse.urljoin(self.url, path) + + +PyPI = PackageIndex("https://pypi.org/", file_storage_domain="files.pythonhosted.org") +TestPyPI = PackageIndex( + "https://test.pypi.org/", file_storage_domain="test-files.pythonhosted.org" +) diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/models/installation_report.py b/.venv/lib/python3.12/site-packages/pip/_internal/models/installation_report.py new file mode 100644 index 0000000..3e8e968 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/models/installation_report.py @@ -0,0 +1,57 @@ +from collections.abc import Sequence +from typing import Any + +from pip._vendor.packaging.markers import default_environment + +from pip import __version__ +from pip._internal.req.req_install import InstallRequirement + + +class InstallationReport: + def __init__(self, install_requirements: Sequence[InstallRequirement]): + self._install_requirements = install_requirements + + @classmethod + def _install_req_to_dict(cls, ireq: InstallRequirement) -> dict[str, Any]: + assert ireq.download_info, f"No download_info for {ireq}" + res = { + # PEP 610 json for the download URL. download_info.archive_info.hashes may + # be absent when the requirement was installed from the wheel cache + # and the cache entry was populated by an older pip version that did not + # record origin.json. + "download_info": ireq.download_info.to_dict(), + # is_direct is true if the requirement was a direct URL reference (which + # includes editable requirements), and false if the requirement was + # downloaded from a PEP 503 index or --find-links. + "is_direct": ireq.is_direct, + # is_yanked is true if the requirement was yanked from the index, but + # was still selected by pip to conform to PEP 592. + "is_yanked": ireq.link.is_yanked if ireq.link else False, + # requested is true if the requirement was specified by the user (aka + # top level requirement), and false if it was installed as a dependency of a + # requirement. https://peps.python.org/pep-0376/#requested + "requested": ireq.user_supplied, + # PEP 566 json encoding for metadata + # https://www.python.org/dev/peps/pep-0566/#json-compatible-metadata + "metadata": ireq.get_dist().metadata_dict, + } + if ireq.user_supplied and ireq.extras: + # For top level requirements, the list of requested extras, if any. + res["requested_extras"] = sorted(ireq.extras) + return res + + def to_dict(self) -> dict[str, Any]: + return { + "version": "1", + "pip_version": __version__, + "install": [ + self._install_req_to_dict(ireq) for ireq in self._install_requirements + ], + # https://peps.python.org/pep-0508/#environment-markers + # TODO: currently, the resolver uses the default environment to evaluate + # environment markers, so that is what we report here. In the future, it + # should also take into account options such as --python-version or + # --platform, perhaps under the form of an environment_override field? + # https://github.com/pypa/pip/issues/11198 + "environment": default_environment(), + } diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/models/link.py b/.venv/lib/python3.12/site-packages/pip/_internal/models/link.py new file mode 100644 index 0000000..2e2c0f8 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/models/link.py @@ -0,0 +1,613 @@ +from __future__ import annotations + +import functools +import itertools +import logging +import os +import posixpath +import re +import urllib.parse +from collections.abc import Mapping +from dataclasses import dataclass +from typing import ( + TYPE_CHECKING, + Any, + NamedTuple, +) + +from pip._internal.utils.deprecation import deprecated +from pip._internal.utils.filetypes import WHEEL_EXTENSION +from pip._internal.utils.hashes import Hashes +from pip._internal.utils.misc import ( + pairwise, + redact_auth_from_url, + split_auth_from_netloc, + splitext, +) +from pip._internal.utils.urls import path_to_url, url_to_path + +if TYPE_CHECKING: + from pip._internal.index.collector import IndexContent + +logger = logging.getLogger(__name__) + + +# Order matters, earlier hashes have a precedence over later hashes for what +# we will pick to use. +_SUPPORTED_HASHES = ("sha512", "sha384", "sha256", "sha224", "sha1", "md5") + + +@dataclass(frozen=True) +class LinkHash: + """Links to content may have embedded hash values. This class parses those. + + `name` must be any member of `_SUPPORTED_HASHES`. + + This class can be converted to and from `ArchiveInfo`. While ArchiveInfo intends to + be JSON-serializable to conform to PEP 610, this class contains the logic for + parsing a hash name and value for correctness, and then checking whether that hash + conforms to a schema with `.is_hash_allowed()`.""" + + name: str + value: str + + _hash_url_fragment_re = re.compile( + # NB: we do not validate that the second group (.*) is a valid hex + # digest. Instead, we simply keep that string in this class, and then check it + # against Hashes when hash-checking is needed. This is easier to debug than + # proactively discarding an invalid hex digest, as we handle incorrect hashes + # and malformed hashes in the same place. + r"[#&]({choices})=([^&]*)".format( + choices="|".join(re.escape(hash_name) for hash_name in _SUPPORTED_HASHES) + ), + ) + + def __post_init__(self) -> None: + assert self.name in _SUPPORTED_HASHES + + @classmethod + @functools.cache + def find_hash_url_fragment(cls, url: str) -> LinkHash | None: + """Search a string for a checksum algorithm name and encoded output value.""" + match = cls._hash_url_fragment_re.search(url) + if match is None: + return None + name, value = match.groups() + return cls(name=name, value=value) + + def as_dict(self) -> dict[str, str]: + return {self.name: self.value} + + def as_hashes(self) -> Hashes: + """Return a Hashes instance which checks only for the current hash.""" + return Hashes({self.name: [self.value]}) + + def is_hash_allowed(self, hashes: Hashes | None) -> bool: + """ + Return True if the current hash is allowed by `hashes`. + """ + if hashes is None: + return False + return hashes.is_hash_allowed(self.name, hex_digest=self.value) + + +@dataclass(frozen=True) +class MetadataFile: + """Information about a core metadata file associated with a distribution.""" + + hashes: dict[str, str] | None + + def __post_init__(self) -> None: + if self.hashes is not None: + assert all(name in _SUPPORTED_HASHES for name in self.hashes) + + +def supported_hashes(hashes: dict[str, str] | None) -> dict[str, str] | None: + # Remove any unsupported hash types from the mapping. If this leaves no + # supported hashes, return None + if hashes is None: + return None + hashes = {n: v for n, v in hashes.items() if n in _SUPPORTED_HASHES} + if not hashes: + return None + return hashes + + +def _clean_url_path_part(part: str) -> str: + """ + Clean a "part" of a URL path (i.e. after splitting on "@" characters). + """ + # We unquote prior to quoting to make sure nothing is double quoted. + return urllib.parse.quote(urllib.parse.unquote(part)) + + +def _clean_file_url_path(part: str) -> str: + """ + Clean the first part of a URL path that corresponds to a local + filesystem path (i.e. the first part after splitting on "@" characters). + """ + # We unquote prior to quoting to make sure nothing is double quoted. + # Also, on Windows the path part might contain a drive letter which + # should not be quoted. On Linux where drive letters do not + # exist, the colon should be quoted. We rely on urllib.request + # to do the right thing here. + ret = urllib.request.pathname2url(urllib.request.url2pathname(part)) + if ret.startswith("///"): + # Remove any URL authority section, leaving only the URL path. + ret = ret.removeprefix("//") + return ret + + +# percent-encoded: / +_reserved_chars_re = re.compile("(@|%2F)", re.IGNORECASE) + + +def _clean_url_path(path: str, is_local_path: bool) -> str: + """ + Clean the path portion of a URL. + """ + if is_local_path: + clean_func = _clean_file_url_path + else: + clean_func = _clean_url_path_part + + # Split on the reserved characters prior to cleaning so that + # revision strings in VCS URLs are properly preserved. + parts = _reserved_chars_re.split(path) + + cleaned_parts = [] + for to_clean, reserved in pairwise(itertools.chain(parts, [""])): + cleaned_parts.append(clean_func(to_clean)) + # Normalize %xx escapes (e.g. %2f -> %2F) + cleaned_parts.append(reserved.upper()) + + return "".join(cleaned_parts) + + +def _ensure_quoted_url(url: str) -> str: + """ + Make sure a link is fully quoted. + For example, if ' ' occurs in the URL, it will be replaced with "%20", + and without double-quoting other characters. + """ + # Split the URL into parts according to the general structure + # `scheme://netloc/path?query#fragment`. + result = urllib.parse.urlsplit(url) + # If the netloc is empty, then the URL refers to a local filesystem path. + is_local_path = not result.netloc + path = _clean_url_path(result.path, is_local_path=is_local_path) + # Temporarily replace scheme with file to ensure the URL generated by + # urlunsplit() contains an empty netloc (file://) as per RFC 1738. + ret = urllib.parse.urlunsplit(result._replace(scheme="file", path=path)) + ret = result.scheme + ret[4:] # Restore original scheme. + return ret + + +def _absolute_link_url(base_url: str, url: str) -> str: + """ + A faster implementation of urllib.parse.urljoin with a shortcut + for absolute http/https URLs. + """ + if url.startswith(("https://", "http://")): + return url + else: + return urllib.parse.urljoin(base_url, url) + + +@functools.total_ordering +class Link: + """Represents a parsed link from a Package Index's simple URL""" + + __slots__ = [ + "_parsed_url", + "_url", + "_path", + "_hashes", + "comes_from", + "requires_python", + "yanked_reason", + "metadata_file_data", + "cache_link_parsing", + "egg_fragment", + ] + + def __init__( + self, + url: str, + comes_from: str | IndexContent | None = None, + requires_python: str | None = None, + yanked_reason: str | None = None, + metadata_file_data: MetadataFile | None = None, + cache_link_parsing: bool = True, + hashes: Mapping[str, str] | None = None, + ) -> None: + """ + :param url: url of the resource pointed to (href of the link) + :param comes_from: instance of IndexContent where the link was found, + or string. + :param requires_python: String containing the `Requires-Python` + metadata field, specified in PEP 345. This may be specified by + a data-requires-python attribute in the HTML link tag, as + described in PEP 503. + :param yanked_reason: the reason the file has been yanked, if the + file has been yanked, or None if the file hasn't been yanked. + This is the value of the "data-yanked" attribute, if present, in + a simple repository HTML link. If the file has been yanked but + no reason was provided, this should be the empty string. See + PEP 592 for more information and the specification. + :param metadata_file_data: the metadata attached to the file, or None if + no such metadata is provided. This argument, if not None, indicates + that a separate metadata file exists, and also optionally supplies + hashes for that file. + :param cache_link_parsing: A flag that is used elsewhere to determine + whether resources retrieved from this link should be cached. PyPI + URLs should generally have this set to False, for example. + :param hashes: A mapping of hash names to digests to allow us to + determine the validity of a download. + """ + + # The comes_from, requires_python, and metadata_file_data arguments are + # only used by classmethods of this class, and are not used in client + # code directly. + + # url can be a UNC windows share + if url.startswith("\\\\"): + url = path_to_url(url) + + self._parsed_url = urllib.parse.urlsplit(url) + # Store the url as a private attribute to prevent accidentally + # trying to set a new value. + self._url = url + # The .path property is hot, so calculate its value ahead of time. + self._path = urllib.parse.unquote(self._parsed_url.path) + + link_hash = LinkHash.find_hash_url_fragment(url) + hashes_from_link = {} if link_hash is None else link_hash.as_dict() + if hashes is None: + self._hashes = hashes_from_link + else: + self._hashes = {**hashes, **hashes_from_link} + + self.comes_from = comes_from + self.requires_python = requires_python if requires_python else None + self.yanked_reason = yanked_reason + self.metadata_file_data = metadata_file_data + + self.cache_link_parsing = cache_link_parsing + self.egg_fragment = self._egg_fragment() + + @classmethod + def from_json( + cls, + file_data: dict[str, Any], + page_url: str, + ) -> Link | None: + """ + Convert an pypi json document from a simple repository page into a Link. + """ + file_url = file_data.get("url") + if file_url is None: + return None + + url = _ensure_quoted_url(_absolute_link_url(page_url, file_url)) + pyrequire = file_data.get("requires-python") + yanked_reason = file_data.get("yanked") + hashes = file_data.get("hashes", {}) + + # PEP 714: Indexes must use the name core-metadata, but + # clients should support the old name as a fallback for compatibility. + metadata_info = file_data.get("core-metadata") + if metadata_info is None: + metadata_info = file_data.get("dist-info-metadata") + + # The metadata info value may be a boolean, or a dict of hashes. + if isinstance(metadata_info, dict): + # The file exists, and hashes have been supplied + metadata_file_data = MetadataFile(supported_hashes(metadata_info)) + elif metadata_info: + # The file exists, but there are no hashes + metadata_file_data = MetadataFile(None) + else: + # False or not present: the file does not exist + metadata_file_data = None + + # The Link.yanked_reason expects an empty string instead of a boolean. + if yanked_reason and not isinstance(yanked_reason, str): + yanked_reason = "" + # The Link.yanked_reason expects None instead of False. + elif not yanked_reason: + yanked_reason = None + + return cls( + url, + comes_from=page_url, + requires_python=pyrequire, + yanked_reason=yanked_reason, + hashes=hashes, + metadata_file_data=metadata_file_data, + ) + + @classmethod + def from_element( + cls, + anchor_attribs: dict[str, str | None], + page_url: str, + base_url: str, + ) -> Link | None: + """ + Convert an anchor element's attributes in a simple repository page to a Link. + """ + href = anchor_attribs.get("href") + if not href: + return None + + url = _ensure_quoted_url(_absolute_link_url(base_url, href)) + pyrequire = anchor_attribs.get("data-requires-python") + yanked_reason = anchor_attribs.get("data-yanked") + + # PEP 714: Indexes must use the name data-core-metadata, but + # clients should support the old name as a fallback for compatibility. + metadata_info = anchor_attribs.get("data-core-metadata") + if metadata_info is None: + metadata_info = anchor_attribs.get("data-dist-info-metadata") + # The metadata info value may be the string "true", or a string of + # the form "hashname=hashval" + if metadata_info == "true": + # The file exists, but there are no hashes + metadata_file_data = MetadataFile(None) + elif metadata_info is None: + # The file does not exist + metadata_file_data = None + else: + # The file exists, and hashes have been supplied + hashname, sep, hashval = metadata_info.partition("=") + if sep == "=": + metadata_file_data = MetadataFile(supported_hashes({hashname: hashval})) + else: + # Error - data is wrong. Treat as no hashes supplied. + logger.debug( + "Index returned invalid data-dist-info-metadata value: %s", + metadata_info, + ) + metadata_file_data = MetadataFile(None) + + return cls( + url, + comes_from=page_url, + requires_python=pyrequire, + yanked_reason=yanked_reason, + metadata_file_data=metadata_file_data, + ) + + def __str__(self) -> str: + if self.requires_python: + rp = f" (requires-python:{self.requires_python})" + else: + rp = "" + if self.comes_from: + return f"{self.redacted_url} (from {self.comes_from}){rp}" + else: + return self.redacted_url + + def __repr__(self) -> str: + return f"" + + def __hash__(self) -> int: + return hash(self.url) + + def __eq__(self, other: Any) -> bool: + if not isinstance(other, Link): + return NotImplemented + return self.url == other.url + + def __lt__(self, other: Any) -> bool: + if not isinstance(other, Link): + return NotImplemented + return self.url < other.url + + @property + def url(self) -> str: + return self._url + + @property + def redacted_url(self) -> str: + return redact_auth_from_url(self.url) + + @property + def filename(self) -> str: + path = self.path.rstrip("/") + name = posixpath.basename(path) + if not name: + # Make sure we don't leak auth information if the netloc + # includes a username and password. + netloc, user_pass = split_auth_from_netloc(self.netloc) + return netloc + + name = urllib.parse.unquote(name) + assert name, f"URL {self._url!r} produced no filename" + return name + + @property + def file_path(self) -> str: + return url_to_path(self.url) + + @property + def scheme(self) -> str: + return self._parsed_url.scheme + + @property + def netloc(self) -> str: + """ + This can contain auth information. + """ + return self._parsed_url.netloc + + @property + def path(self) -> str: + return self._path + + def splitext(self) -> tuple[str, str]: + return splitext(posixpath.basename(self.path.rstrip("/"))) + + @property + def ext(self) -> str: + return self.splitext()[1] + + @property + def url_without_fragment(self) -> str: + scheme, netloc, path, query, fragment = self._parsed_url + return urllib.parse.urlunsplit((scheme, netloc, path, query, "")) + + _egg_fragment_re = re.compile(r"[#&]egg=([^&]*)") + + # Per PEP 508. + _project_name_re = re.compile( + r"^([A-Z0-9]|[A-Z0-9][A-Z0-9._-]*[A-Z0-9])$", re.IGNORECASE + ) + + def _egg_fragment(self) -> str | None: + match = self._egg_fragment_re.search(self._url) + if not match: + return None + + # An egg fragment looks like a PEP 508 project name, along with + # an optional extras specifier. Anything else is invalid. + project_name = match.group(1) + if not self._project_name_re.match(project_name): + deprecated( + reason=f"{self} contains an egg fragment with a non-PEP 508 name.", + replacement="to use the req @ url syntax, and remove the egg fragment", + gone_in="25.3", + issue=13157, + ) + + return project_name + + _subdirectory_fragment_re = re.compile(r"[#&]subdirectory=([^&]*)") + + @property + def subdirectory_fragment(self) -> str | None: + match = self._subdirectory_fragment_re.search(self._url) + if not match: + return None + return match.group(1) + + def metadata_link(self) -> Link | None: + """Return a link to the associated core metadata file (if any).""" + if self.metadata_file_data is None: + return None + metadata_url = f"{self.url_without_fragment}.metadata" + if self.metadata_file_data.hashes is None: + return Link(metadata_url) + return Link(metadata_url, hashes=self.metadata_file_data.hashes) + + def as_hashes(self) -> Hashes: + return Hashes({k: [v] for k, v in self._hashes.items()}) + + @property + def hash(self) -> str | None: + return next(iter(self._hashes.values()), None) + + @property + def hash_name(self) -> str | None: + return next(iter(self._hashes), None) + + @property + def show_url(self) -> str: + return posixpath.basename(self._url.split("#", 1)[0].split("?", 1)[0]) + + @property + def is_file(self) -> bool: + return self.scheme == "file" + + def is_existing_dir(self) -> bool: + return self.is_file and os.path.isdir(self.file_path) + + @property + def is_wheel(self) -> bool: + return self.ext == WHEEL_EXTENSION + + @property + def is_vcs(self) -> bool: + from pip._internal.vcs import vcs + + return self.scheme in vcs.all_schemes + + @property + def is_yanked(self) -> bool: + return self.yanked_reason is not None + + @property + def has_hash(self) -> bool: + return bool(self._hashes) + + def is_hash_allowed(self, hashes: Hashes | None) -> bool: + """ + Return True if the link has a hash and it is allowed by `hashes`. + """ + if hashes is None: + return False + return any(hashes.is_hash_allowed(k, v) for k, v in self._hashes.items()) + + +class _CleanResult(NamedTuple): + """Convert link for equivalency check. + + This is used in the resolver to check whether two URL-specified requirements + likely point to the same distribution and can be considered equivalent. This + equivalency logic avoids comparing URLs literally, which can be too strict + (e.g. "a=1&b=2" vs "b=2&a=1") and produce conflicts unexpecting to users. + + Currently this does three things: + + 1. Drop the basic auth part. This is technically wrong since a server can + serve different content based on auth, but if it does that, it is even + impossible to guarantee two URLs without auth are equivalent, since + the user can input different auth information when prompted. So the + practical solution is to assume the auth doesn't affect the response. + 2. Parse the query to avoid the ordering issue. Note that ordering under the + same key in the query are NOT cleaned; i.e. "a=1&a=2" and "a=2&a=1" are + still considered different. + 3. Explicitly drop most of the fragment part, except ``subdirectory=`` and + hash values, since it should have no impact the downloaded content. Note + that this drops the "egg=" part historically used to denote the requested + project (and extras), which is wrong in the strictest sense, but too many + people are supplying it inconsistently to cause superfluous resolution + conflicts, so we choose to also ignore them. + """ + + parsed: urllib.parse.SplitResult + query: dict[str, list[str]] + subdirectory: str + hashes: dict[str, str] + + +def _clean_link(link: Link) -> _CleanResult: + parsed = link._parsed_url + netloc = parsed.netloc.rsplit("@", 1)[-1] + # According to RFC 8089, an empty host in file: means localhost. + if parsed.scheme == "file" and not netloc: + netloc = "localhost" + fragment = urllib.parse.parse_qs(parsed.fragment) + if "egg" in fragment: + logger.debug("Ignoring egg= fragment in %s", link) + try: + # If there are multiple subdirectory values, use the first one. + # This matches the behavior of Link.subdirectory_fragment. + subdirectory = fragment["subdirectory"][0] + except (IndexError, KeyError): + subdirectory = "" + # If there are multiple hash values under the same algorithm, use the + # first one. This matches the behavior of Link.hash_value. + hashes = {k: fragment[k][0] for k in _SUPPORTED_HASHES if k in fragment} + return _CleanResult( + parsed=parsed._replace(netloc=netloc, query="", fragment=""), + query=urllib.parse.parse_qs(parsed.query), + subdirectory=subdirectory, + hashes=hashes, + ) + + +@functools.cache +def links_equivalent(link1: Link, link2: Link) -> bool: + return _clean_link(link1) == _clean_link(link2) diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/models/pylock.py b/.venv/lib/python3.12/site-packages/pip/_internal/models/pylock.py new file mode 100644 index 0000000..1b6b8c1 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/models/pylock.py @@ -0,0 +1,188 @@ +from __future__ import annotations + +import dataclasses +import re +from collections.abc import Iterable +from dataclasses import dataclass +from pathlib import Path +from typing import TYPE_CHECKING, Any + +from pip._vendor import tomli_w + +from pip._internal.models.direct_url import ArchiveInfo, DirInfo, VcsInfo +from pip._internal.models.link import Link +from pip._internal.req.req_install import InstallRequirement +from pip._internal.utils.urls import url_to_path + +if TYPE_CHECKING: + from typing_extensions import Self + +PYLOCK_FILE_NAME_RE = re.compile(r"^pylock\.([^.]+)\.toml$") + + +def is_valid_pylock_file_name(path: Path) -> bool: + return path.name == "pylock.toml" or bool(re.match(PYLOCK_FILE_NAME_RE, path.name)) + + +def _toml_dict_factory(data: list[tuple[str, Any]]) -> dict[str, Any]: + return {key.replace("_", "-"): value for key, value in data if value is not None} + + +@dataclass +class PackageVcs: + type: str + url: str | None + # (not supported) path: Optional[str] + requested_revision: str | None + commit_id: str + subdirectory: str | None + + +@dataclass +class PackageDirectory: + path: str + editable: bool | None + subdirectory: str | None + + +@dataclass +class PackageArchive: + url: str | None + # (not supported) path: Optional[str] + # (not supported) size: Optional[int] + # (not supported) upload_time: Optional[datetime] + hashes: dict[str, str] + subdirectory: str | None + + +@dataclass +class PackageSdist: + name: str + # (not supported) upload_time: Optional[datetime] + url: str | None + # (not supported) path: Optional[str] + # (not supported) size: Optional[int] + hashes: dict[str, str] + + +@dataclass +class PackageWheel: + name: str + # (not supported) upload_time: Optional[datetime] + url: str | None + # (not supported) path: Optional[str] + # (not supported) size: Optional[int] + hashes: dict[str, str] + + +@dataclass +class Package: + name: str + version: str | None = None + # (not supported) marker: Optional[str] + # (not supported) requires_python: Optional[str] + # (not supported) dependencies + vcs: PackageVcs | None = None + directory: PackageDirectory | None = None + archive: PackageArchive | None = None + # (not supported) index: Optional[str] + sdist: PackageSdist | None = None + wheels: list[PackageWheel] | None = None + # (not supported) attestation_identities: Optional[List[Dict[str, Any]]] + # (not supported) tool: Optional[Dict[str, Any]] + + @classmethod + def from_install_requirement(cls, ireq: InstallRequirement, base_dir: Path) -> Self: + base_dir = base_dir.resolve() + dist = ireq.get_dist() + download_info = ireq.download_info + assert download_info + package = cls(name=dist.canonical_name) + if ireq.is_direct: + if isinstance(download_info.info, VcsInfo): + package.vcs = PackageVcs( + type=download_info.info.vcs, + url=download_info.url, + requested_revision=download_info.info.requested_revision, + commit_id=download_info.info.commit_id, + subdirectory=download_info.subdirectory, + ) + elif isinstance(download_info.info, DirInfo): + package.directory = PackageDirectory( + path=( + Path(url_to_path(download_info.url)) + .resolve() + .relative_to(base_dir) + .as_posix() + ), + editable=( + download_info.info.editable + if download_info.info.editable + else None + ), + subdirectory=download_info.subdirectory, + ) + elif isinstance(download_info.info, ArchiveInfo): + if not download_info.info.hashes: + raise NotImplementedError() + package.archive = PackageArchive( + url=download_info.url, + hashes=download_info.info.hashes, + subdirectory=download_info.subdirectory, + ) + else: + # should never happen + raise NotImplementedError() + else: + package.version = str(dist.version) + if isinstance(download_info.info, ArchiveInfo): + if not download_info.info.hashes: + raise NotImplementedError() + link = Link(download_info.url) + if link.is_wheel: + package.wheels = [ + PackageWheel( + name=link.filename, + url=download_info.url, + hashes=download_info.info.hashes, + ) + ] + else: + package.sdist = PackageSdist( + name=link.filename, + url=download_info.url, + hashes=download_info.info.hashes, + ) + else: + # should never happen + raise NotImplementedError() + return package + + +@dataclass +class Pylock: + lock_version: str = "1.0" + # (not supported) environments: Optional[List[str]] + # (not supported) requires_python: Optional[str] + # (not supported) extras: List[str] = [] + # (not supported) dependency_groups: List[str] = [] + created_by: str = "pip" + packages: list[Package] = dataclasses.field(default_factory=list) + # (not supported) tool: Optional[Dict[str, Any]] + + def as_toml(self) -> str: + return tomli_w.dumps(dataclasses.asdict(self, dict_factory=_toml_dict_factory)) + + @classmethod + def from_install_requirements( + cls, install_requirements: Iterable[InstallRequirement], base_dir: Path + ) -> Self: + return cls( + packages=sorted( + ( + Package.from_install_requirement(ireq, base_dir) + for ireq in install_requirements + ), + key=lambda p: p.name, + ) + ) diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/models/scheme.py b/.venv/lib/python3.12/site-packages/pip/_internal/models/scheme.py new file mode 100644 index 0000000..06a9a55 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/models/scheme.py @@ -0,0 +1,25 @@ +""" +For types associated with installation schemes. + +For a general overview of available schemes and their context, see +https://docs.python.org/3/install/index.html#alternate-installation. +""" + +from dataclasses import dataclass + +SCHEME_KEYS = ["platlib", "purelib", "headers", "scripts", "data"] + + +@dataclass(frozen=True) +class Scheme: + """A Scheme holds paths which are used as the base directories for + artifacts associated with a Python package. + """ + + __slots__ = SCHEME_KEYS + + platlib: str + purelib: str + headers: str + scripts: str + data: str diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/models/search_scope.py b/.venv/lib/python3.12/site-packages/pip/_internal/models/search_scope.py new file mode 100644 index 0000000..136163c --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/models/search_scope.py @@ -0,0 +1,126 @@ +import itertools +import logging +import os +import posixpath +import urllib.parse +from dataclasses import dataclass + +from pip._vendor.packaging.utils import canonicalize_name + +from pip._internal.models.index import PyPI +from pip._internal.utils.compat import has_tls +from pip._internal.utils.misc import normalize_path, redact_auth_from_url + +logger = logging.getLogger(__name__) + + +@dataclass(frozen=True) +class SearchScope: + """ + Encapsulates the locations that pip is configured to search. + """ + + __slots__ = ["find_links", "index_urls", "no_index"] + + find_links: list[str] + index_urls: list[str] + no_index: bool + + @classmethod + def create( + cls, + find_links: list[str], + index_urls: list[str], + no_index: bool, + ) -> "SearchScope": + """ + Create a SearchScope object after normalizing the `find_links`. + """ + # Build find_links. If an argument starts with ~, it may be + # a local file relative to a home directory. So try normalizing + # it and if it exists, use the normalized version. + # This is deliberately conservative - it might be fine just to + # blindly normalize anything starting with a ~... + built_find_links: list[str] = [] + for link in find_links: + if link.startswith("~"): + new_link = normalize_path(link) + if os.path.exists(new_link): + link = new_link + built_find_links.append(link) + + # If we don't have TLS enabled, then WARN if anyplace we're looking + # relies on TLS. + if not has_tls(): + for link in itertools.chain(index_urls, built_find_links): + parsed = urllib.parse.urlparse(link) + if parsed.scheme == "https": + logger.warning( + "pip is configured with locations that require " + "TLS/SSL, however the ssl module in Python is not " + "available." + ) + break + + return cls( + find_links=built_find_links, + index_urls=index_urls, + no_index=no_index, + ) + + def get_formatted_locations(self) -> str: + lines = [] + redacted_index_urls = [] + if self.index_urls and self.index_urls != [PyPI.simple_url]: + for url in self.index_urls: + redacted_index_url = redact_auth_from_url(url) + + # Parse the URL + purl = urllib.parse.urlsplit(redacted_index_url) + + # URL is generally invalid if scheme and netloc is missing + # there are issues with Python and URL parsing, so this test + # is a bit crude. See bpo-20271, bpo-23505. Python doesn't + # always parse invalid URLs correctly - it should raise + # exceptions for malformed URLs + if not purl.scheme and not purl.netloc: + logger.warning( + 'The index url "%s" seems invalid, please provide a scheme.', + redacted_index_url, + ) + + redacted_index_urls.append(redacted_index_url) + + lines.append( + "Looking in indexes: {}".format(", ".join(redacted_index_urls)) + ) + + if self.find_links: + lines.append( + "Looking in links: {}".format( + ", ".join(redact_auth_from_url(url) for url in self.find_links) + ) + ) + return "\n".join(lines) + + def get_index_urls_locations(self, project_name: str) -> list[str]: + """Returns the locations found via self.index_urls + + Checks the url_name on the main (first in the list) index and + use this url_name to produce all locations + """ + + def mkurl_pypi_url(url: str) -> str: + loc = posixpath.join( + url, urllib.parse.quote(canonicalize_name(project_name)) + ) + # For maximum compatibility with easy_install, ensure the path + # ends in a trailing slash. Although this isn't in the spec + # (and PyPI can handle it without the slash) some other index + # implementations might break if they relied on easy_install's + # behavior. + if not loc.endswith("/"): + loc = loc + "/" + return loc + + return [mkurl_pypi_url(url) for url in self.index_urls] diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/models/selection_prefs.py b/.venv/lib/python3.12/site-packages/pip/_internal/models/selection_prefs.py new file mode 100644 index 0000000..8d5b42d --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/models/selection_prefs.py @@ -0,0 +1,53 @@ +from __future__ import annotations + +from pip._internal.models.format_control import FormatControl + + +# TODO: This needs Python 3.10's improved slots support for dataclasses +# to be converted into a dataclass. +class SelectionPreferences: + """ + Encapsulates the candidate selection preferences for downloading + and installing files. + """ + + __slots__ = [ + "allow_yanked", + "allow_all_prereleases", + "format_control", + "prefer_binary", + "ignore_requires_python", + ] + + # Don't include an allow_yanked default value to make sure each call + # site considers whether yanked releases are allowed. This also causes + # that decision to be made explicit in the calling code, which helps + # people when reading the code. + def __init__( + self, + allow_yanked: bool, + allow_all_prereleases: bool = False, + format_control: FormatControl | None = None, + prefer_binary: bool = False, + ignore_requires_python: bool | None = None, + ) -> None: + """Create a SelectionPreferences object. + + :param allow_yanked: Whether files marked as yanked (in the sense + of PEP 592) are permitted to be candidates for install. + :param format_control: A FormatControl object or None. Used to control + the selection of source packages / binary packages when consulting + the index and links. + :param prefer_binary: Whether to prefer an old, but valid, binary + dist over a new source dist. + :param ignore_requires_python: Whether to ignore incompatible + "Requires-Python" values in links. Defaults to False. + """ + if ignore_requires_python is None: + ignore_requires_python = False + + self.allow_yanked = allow_yanked + self.allow_all_prereleases = allow_all_prereleases + self.format_control = format_control + self.prefer_binary = prefer_binary + self.ignore_requires_python = ignore_requires_python diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/models/target_python.py b/.venv/lib/python3.12/site-packages/pip/_internal/models/target_python.py new file mode 100644 index 0000000..8c38392 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/models/target_python.py @@ -0,0 +1,122 @@ +from __future__ import annotations + +import sys + +from pip._vendor.packaging.tags import Tag + +from pip._internal.utils.compatibility_tags import get_supported, version_info_to_nodot +from pip._internal.utils.misc import normalize_version_info + + +class TargetPython: + """ + Encapsulates the properties of a Python interpreter one is targeting + for a package install, download, etc. + """ + + __slots__ = [ + "_given_py_version_info", + "abis", + "implementation", + "platforms", + "py_version", + "py_version_info", + "_valid_tags", + "_valid_tags_set", + ] + + def __init__( + self, + platforms: list[str] | None = None, + py_version_info: tuple[int, ...] | None = None, + abis: list[str] | None = None, + implementation: str | None = None, + ) -> None: + """ + :param platforms: A list of strings or None. If None, searches for + packages that are supported by the current system. Otherwise, will + find packages that can be built on the platforms passed in. These + packages will only be downloaded for distribution: they will + not be built locally. + :param py_version_info: An optional tuple of ints representing the + Python version information to use (e.g. `sys.version_info[:3]`). + This can have length 1, 2, or 3 when provided. + :param abis: A list of strings or None. This is passed to + compatibility_tags.py's get_supported() function as is. + :param implementation: A string or None. This is passed to + compatibility_tags.py's get_supported() function as is. + """ + # Store the given py_version_info for when we call get_supported(). + self._given_py_version_info = py_version_info + + if py_version_info is None: + py_version_info = sys.version_info[:3] + else: + py_version_info = normalize_version_info(py_version_info) + + py_version = ".".join(map(str, py_version_info[:2])) + + self.abis = abis + self.implementation = implementation + self.platforms = platforms + self.py_version = py_version + self.py_version_info = py_version_info + + # This is used to cache the return value of get_(un)sorted_tags. + self._valid_tags: list[Tag] | None = None + self._valid_tags_set: set[Tag] | None = None + + def format_given(self) -> str: + """ + Format the given, non-None attributes for display. + """ + display_version = None + if self._given_py_version_info is not None: + display_version = ".".join( + str(part) for part in self._given_py_version_info + ) + + key_values = [ + ("platforms", self.platforms), + ("version_info", display_version), + ("abis", self.abis), + ("implementation", self.implementation), + ] + return " ".join( + f"{key}={value!r}" for key, value in key_values if value is not None + ) + + def get_sorted_tags(self) -> list[Tag]: + """ + Return the supported PEP 425 tags to check wheel candidates against. + + The tags are returned in order of preference (most preferred first). + """ + if self._valid_tags is None: + # Pass versions=None if no py_version_info was given since + # versions=None uses special default logic. + py_version_info = self._given_py_version_info + if py_version_info is None: + version = None + else: + version = version_info_to_nodot(py_version_info) + + tags = get_supported( + version=version, + platforms=self.platforms, + abis=self.abis, + impl=self.implementation, + ) + self._valid_tags = tags + + return self._valid_tags + + def get_unsorted_tags(self) -> set[Tag]: + """Exactly the same as get_sorted_tags, but returns a set. + + This is important for performance. + """ + if self._valid_tags_set is None: + self._valid_tags_set = set(self.get_sorted_tags()) + + return self._valid_tags_set diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/models/wheel.py b/.venv/lib/python3.12/site-packages/pip/_internal/models/wheel.py new file mode 100644 index 0000000..60e97cb --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/models/wheel.py @@ -0,0 +1,141 @@ +"""Represents a wheel file and provides access to the various parts of the +name that have meaning. +""" + +from __future__ import annotations + +import re +from collections.abc import Iterable + +from pip._vendor.packaging.tags import Tag +from pip._vendor.packaging.utils import BuildTag, parse_wheel_filename +from pip._vendor.packaging.utils import ( + InvalidWheelFilename as _PackagingInvalidWheelFilename, +) + +from pip._internal.exceptions import InvalidWheelFilename +from pip._internal.utils.deprecation import deprecated + + +class Wheel: + """A wheel file""" + + legacy_wheel_file_re = re.compile( + r"""^(?P(?P[^\s-]+?)-(?P[^\s-]*?)) + ((-(?P\d[^-]*?))?-(?P[^\s-]+?)-(?P[^\s-]+?)-(?P[^\s-]+?) + \.whl|\.dist-info)$""", + re.VERBOSE, + ) + + def __init__(self, filename: str) -> None: + self.filename = filename + + # To make mypy happy specify type hints that can come from either + # parse_wheel_filename or the legacy_wheel_file_re match. + self.name: str + self._build_tag: BuildTag | None = None + + try: + wheel_info = parse_wheel_filename(filename) + self.name, _version, self._build_tag, self.file_tags = wheel_info + self.version = str(_version) + except _PackagingInvalidWheelFilename as e: + # Check if the wheel filename is in the legacy format + legacy_wheel_info = self.legacy_wheel_file_re.match(filename) + if not legacy_wheel_info: + raise InvalidWheelFilename(e.args[0]) from None + + deprecated( + reason=( + f"Wheel filename {filename!r} is not correctly normalised. " + "Future versions of pip will raise the following error:\n" + f"{e.args[0]}\n\n" + ), + replacement=( + "to rename the wheel to use a correctly normalised " + "name (this may require updating the version in " + "the project metadata)" + ), + gone_in="25.3", + issue=12938, + ) + + self.name = legacy_wheel_info.group("name").replace("_", "-") + self.version = legacy_wheel_info.group("ver").replace("_", "-") + + # Generate the file tags from the legacy wheel filename + pyversions = legacy_wheel_info.group("pyver").split(".") + abis = legacy_wheel_info.group("abi").split(".") + plats = legacy_wheel_info.group("plat").split(".") + self.file_tags = frozenset( + Tag(interpreter=py, abi=abi, platform=plat) + for py in pyversions + for abi in abis + for plat in plats + ) + + @property + def build_tag(self) -> BuildTag: + if self._build_tag is not None: + return self._build_tag + + # Parse the build tag from the legacy wheel filename + legacy_wheel_info = self.legacy_wheel_file_re.match(self.filename) + assert legacy_wheel_info is not None, "guaranteed by filename validation" + build_tag = legacy_wheel_info.group("build") + match = re.match(r"^(\d+)(.*)$", build_tag) + assert match is not None, "guaranteed by filename validation" + build_tag_groups = match.groups() + self._build_tag = (int(build_tag_groups[0]), build_tag_groups[1]) + + return self._build_tag + + def get_formatted_file_tags(self) -> list[str]: + """Return the wheel's tags as a sorted list of strings.""" + return sorted(str(tag) for tag in self.file_tags) + + def support_index_min(self, tags: list[Tag]) -> int: + """Return the lowest index that one of the wheel's file_tag combinations + achieves in the given list of supported tags. + + For example, if there are 8 supported tags and one of the file tags + is first in the list, then return 0. + + :param tags: the PEP 425 tags to check the wheel against, in order + with most preferred first. + + :raises ValueError: If none of the wheel's file tags match one of + the supported tags. + """ + try: + return next(i for i, t in enumerate(tags) if t in self.file_tags) + except StopIteration: + raise ValueError() + + def find_most_preferred_tag( + self, tags: list[Tag], tag_to_priority: dict[Tag, int] + ) -> int: + """Return the priority of the most preferred tag that one of the wheel's file + tag combinations achieves in the given list of supported tags using the given + tag_to_priority mapping, where lower priorities are more-preferred. + + This is used in place of support_index_min in some cases in order to avoid + an expensive linear scan of a large list of tags. + + :param tags: the PEP 425 tags to check the wheel against. + :param tag_to_priority: a mapping from tag to priority of that tag, where + lower is more preferred. + + :raises ValueError: If none of the wheel's file tags match one of + the supported tags. + """ + return min( + tag_to_priority[tag] for tag in self.file_tags if tag in tag_to_priority + ) + + def supported(self, tags: Iterable[Tag]) -> bool: + """Return whether the wheel is compatible with one of the given tags. + + :param tags: the PEP 425 tags to check the wheel against. + """ + return not self.file_tags.isdisjoint(tags) diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/network/__init__.py b/.venv/lib/python3.12/site-packages/pip/_internal/network/__init__.py new file mode 100644 index 0000000..0ae1f56 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/network/__init__.py @@ -0,0 +1 @@ +"""Contains purely network-related utilities.""" diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/network/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/network/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..3904853 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/network/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/network/__pycache__/auth.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/network/__pycache__/auth.cpython-312.pyc new file mode 100644 index 0000000..8f542de Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/network/__pycache__/auth.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/network/__pycache__/cache.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/network/__pycache__/cache.cpython-312.pyc new file mode 100644 index 0000000..ac25fb1 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/network/__pycache__/cache.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/network/__pycache__/download.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/network/__pycache__/download.cpython-312.pyc new file mode 100644 index 0000000..96c005e Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/network/__pycache__/download.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/network/__pycache__/lazy_wheel.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/network/__pycache__/lazy_wheel.cpython-312.pyc new file mode 100644 index 0000000..dde708e Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/network/__pycache__/lazy_wheel.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/network/__pycache__/session.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/network/__pycache__/session.cpython-312.pyc new file mode 100644 index 0000000..370b00b Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/network/__pycache__/session.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/network/__pycache__/utils.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/network/__pycache__/utils.cpython-312.pyc new file mode 100644 index 0000000..4b91f34 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/network/__pycache__/utils.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/network/__pycache__/xmlrpc.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/network/__pycache__/xmlrpc.cpython-312.pyc new file mode 100644 index 0000000..8326a1a Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/network/__pycache__/xmlrpc.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/network/auth.py b/.venv/lib/python3.12/site-packages/pip/_internal/network/auth.py new file mode 100644 index 0000000..a42f702 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/network/auth.py @@ -0,0 +1,564 @@ +"""Network Authentication Helpers + +Contains interface (MultiDomainBasicAuth) and associated glue code for +providing credentials in the context of network requests. +""" + +from __future__ import annotations + +import logging +import os +import shutil +import subprocess +import sysconfig +import typing +import urllib.parse +from abc import ABC, abstractmethod +from functools import cache +from os.path import commonprefix +from pathlib import Path +from typing import Any, NamedTuple + +from pip._vendor.requests.auth import AuthBase, HTTPBasicAuth +from pip._vendor.requests.models import Request, Response +from pip._vendor.requests.utils import get_netrc_auth + +from pip._internal.utils.logging import getLogger +from pip._internal.utils.misc import ( + ask, + ask_input, + ask_password, + remove_auth_from_url, + split_auth_netloc_from_url, +) +from pip._internal.vcs.versioncontrol import AuthInfo + +logger = getLogger(__name__) + +KEYRING_DISABLED = False + + +class Credentials(NamedTuple): + url: str + username: str + password: str + + +class KeyRingBaseProvider(ABC): + """Keyring base provider interface""" + + has_keyring: bool + + @abstractmethod + def get_auth_info(self, url: str, username: str | None) -> AuthInfo | None: ... + + @abstractmethod + def save_auth_info(self, url: str, username: str, password: str) -> None: ... + + +class KeyRingNullProvider(KeyRingBaseProvider): + """Keyring null provider""" + + has_keyring = False + + def get_auth_info(self, url: str, username: str | None) -> AuthInfo | None: + return None + + def save_auth_info(self, url: str, username: str, password: str) -> None: + return None + + +class KeyRingPythonProvider(KeyRingBaseProvider): + """Keyring interface which uses locally imported `keyring`""" + + has_keyring = True + + def __init__(self) -> None: + import keyring + + self.keyring = keyring + + def get_auth_info(self, url: str, username: str | None) -> AuthInfo | None: + # Support keyring's get_credential interface which supports getting + # credentials without a username. This is only available for + # keyring>=15.2.0. + if hasattr(self.keyring, "get_credential"): + logger.debug("Getting credentials from keyring for %s", url) + cred = self.keyring.get_credential(url, username) + if cred is not None: + return cred.username, cred.password + return None + + if username is not None: + logger.debug("Getting password from keyring for %s", url) + password = self.keyring.get_password(url, username) + if password: + return username, password + return None + + def save_auth_info(self, url: str, username: str, password: str) -> None: + self.keyring.set_password(url, username, password) + + +class KeyRingCliProvider(KeyRingBaseProvider): + """Provider which uses `keyring` cli + + Instead of calling the keyring package installed alongside pip + we call keyring on the command line which will enable pip to + use which ever installation of keyring is available first in + PATH. + """ + + has_keyring = True + + def __init__(self, cmd: str) -> None: + self.keyring = cmd + + def get_auth_info(self, url: str, username: str | None) -> AuthInfo | None: + # This is the default implementation of keyring.get_credential + # https://github.com/jaraco/keyring/blob/97689324abcf01bd1793d49063e7ca01e03d7d07/keyring/backend.py#L134-L139 + if username is not None: + password = self._get_password(url, username) + if password is not None: + return username, password + return None + + def save_auth_info(self, url: str, username: str, password: str) -> None: + return self._set_password(url, username, password) + + def _get_password(self, service_name: str, username: str) -> str | None: + """Mirror the implementation of keyring.get_password using cli""" + if self.keyring is None: + return None + + cmd = [self.keyring, "get", service_name, username] + env = os.environ.copy() + env["PYTHONIOENCODING"] = "utf-8" + res = subprocess.run( + cmd, + stdin=subprocess.DEVNULL, + stdout=subprocess.PIPE, + env=env, + ) + if res.returncode: + return None + return res.stdout.decode("utf-8").strip(os.linesep) + + def _set_password(self, service_name: str, username: str, password: str) -> None: + """Mirror the implementation of keyring.set_password using cli""" + if self.keyring is None: + return None + env = os.environ.copy() + env["PYTHONIOENCODING"] = "utf-8" + subprocess.run( + [self.keyring, "set", service_name, username], + input=f"{password}{os.linesep}".encode(), + env=env, + check=True, + ) + return None + + +@cache +def get_keyring_provider(provider: str) -> KeyRingBaseProvider: + logger.verbose("Keyring provider requested: %s", provider) + + # keyring has previously failed and been disabled + if KEYRING_DISABLED: + provider = "disabled" + if provider in ["import", "auto"]: + try: + impl = KeyRingPythonProvider() + logger.verbose("Keyring provider set: import") + return impl + except ImportError: + pass + except Exception as exc: + # In the event of an unexpected exception + # we should warn the user + msg = "Installed copy of keyring fails with exception %s" + if provider == "auto": + msg = msg + ", trying to find a keyring executable as a fallback" + logger.warning(msg, exc, exc_info=logger.isEnabledFor(logging.DEBUG)) + if provider in ["subprocess", "auto"]: + cli = shutil.which("keyring") + if cli and cli.startswith(sysconfig.get_path("scripts")): + # all code within this function is stolen from shutil.which implementation + @typing.no_type_check + def PATH_as_shutil_which_determines_it() -> str: + path = os.environ.get("PATH", None) + if path is None: + try: + path = os.confstr("CS_PATH") + except (AttributeError, ValueError): + # os.confstr() or CS_PATH is not available + path = os.defpath + # bpo-35755: Don't use os.defpath if the PATH environment variable is + # set to an empty string + + return path + + scripts = Path(sysconfig.get_path("scripts")) + + paths = [] + for path in PATH_as_shutil_which_determines_it().split(os.pathsep): + p = Path(path) + try: + if not p.samefile(scripts): + paths.append(path) + except FileNotFoundError: + pass + + path = os.pathsep.join(paths) + + cli = shutil.which("keyring", path=path) + + if cli: + logger.verbose("Keyring provider set: subprocess with executable %s", cli) + return KeyRingCliProvider(cli) + + logger.verbose("Keyring provider set: disabled") + return KeyRingNullProvider() + + +class MultiDomainBasicAuth(AuthBase): + def __init__( + self, + prompting: bool = True, + index_urls: list[str] | None = None, + keyring_provider: str = "auto", + ) -> None: + self.prompting = prompting + self.index_urls = index_urls + self.keyring_provider = keyring_provider + self.passwords: dict[str, AuthInfo] = {} + # When the user is prompted to enter credentials and keyring is + # available, we will offer to save them. If the user accepts, + # this value is set to the credentials they entered. After the + # request authenticates, the caller should call + # ``save_credentials`` to save these. + self._credentials_to_save: Credentials | None = None + + @property + def keyring_provider(self) -> KeyRingBaseProvider: + return get_keyring_provider(self._keyring_provider) + + @keyring_provider.setter + def keyring_provider(self, provider: str) -> None: + # The free function get_keyring_provider has been decorated with + # functools.cache. If an exception occurs in get_keyring_auth that + # cache will be cleared and keyring disabled, take that into account + # if you want to remove this indirection. + self._keyring_provider = provider + + @property + def use_keyring(self) -> bool: + # We won't use keyring when --no-input is passed unless + # a specific provider is requested because it might require + # user interaction + return self.prompting or self._keyring_provider not in ["auto", "disabled"] + + def _get_keyring_auth( + self, + url: str | None, + username: str | None, + ) -> AuthInfo | None: + """Return the tuple auth for a given url from keyring.""" + # Do nothing if no url was provided + if not url: + return None + + try: + return self.keyring_provider.get_auth_info(url, username) + except Exception as exc: + # Log the full exception (with stacktrace) at debug, so it'll only + # show up when running in verbose mode. + logger.debug("Keyring is skipped due to an exception", exc_info=True) + # Always log a shortened version of the exception. + logger.warning( + "Keyring is skipped due to an exception: %s", + str(exc), + ) + global KEYRING_DISABLED + KEYRING_DISABLED = True + get_keyring_provider.cache_clear() + return None + + def _get_index_url(self, url: str) -> str | None: + """Return the original index URL matching the requested URL. + + Cached or dynamically generated credentials may work against + the original index URL rather than just the netloc. + + The provided url should have had its username and password + removed already. If the original index url had credentials then + they will be included in the return value. + + Returns None if no matching index was found, or if --no-index + was specified by the user. + """ + if not url or not self.index_urls: + return None + + url = remove_auth_from_url(url).rstrip("/") + "/" + parsed_url = urllib.parse.urlsplit(url) + + candidates = [] + + for index in self.index_urls: + index = index.rstrip("/") + "/" + parsed_index = urllib.parse.urlsplit(remove_auth_from_url(index)) + if parsed_url == parsed_index: + return index + + if parsed_url.netloc != parsed_index.netloc: + continue + + candidate = urllib.parse.urlsplit(index) + candidates.append(candidate) + + if not candidates: + return None + + candidates.sort( + reverse=True, + key=lambda candidate: commonprefix( + [ + parsed_url.path, + candidate.path, + ] + ).rfind("/"), + ) + + return urllib.parse.urlunsplit(candidates[0]) + + def _get_new_credentials( + self, + original_url: str, + *, + allow_netrc: bool = True, + allow_keyring: bool = False, + ) -> AuthInfo: + """Find and return credentials for the specified URL.""" + # Split the credentials and netloc from the url. + url, netloc, url_user_password = split_auth_netloc_from_url( + original_url, + ) + + # Start with the credentials embedded in the url + username, password = url_user_password + if username is not None and password is not None: + logger.debug("Found credentials in url for %s", netloc) + return url_user_password + + # Find a matching index url for this request + index_url = self._get_index_url(url) + if index_url: + # Split the credentials from the url. + index_info = split_auth_netloc_from_url(index_url) + if index_info: + index_url, _, index_url_user_password = index_info + logger.debug("Found index url %s", index_url) + + # If an index URL was found, try its embedded credentials + if index_url and index_url_user_password[0] is not None: + username, password = index_url_user_password + if username is not None and password is not None: + logger.debug("Found credentials in index url for %s", netloc) + return index_url_user_password + + # Get creds from netrc if we still don't have them + if allow_netrc: + netrc_auth = get_netrc_auth(original_url) + if netrc_auth: + logger.debug("Found credentials in netrc for %s", netloc) + return netrc_auth + + # If we don't have a password and keyring is available, use it. + if allow_keyring: + # The index url is more specific than the netloc, so try it first + # fmt: off + kr_auth = ( + self._get_keyring_auth(index_url, username) or + self._get_keyring_auth(netloc, username) + ) + # fmt: on + if kr_auth: + logger.debug("Found credentials in keyring for %s", netloc) + return kr_auth + + return username, password + + def _get_url_and_credentials( + self, original_url: str + ) -> tuple[str, str | None, str | None]: + """Return the credentials to use for the provided URL. + + If allowed, netrc and keyring may be used to obtain the + correct credentials. + + Returns (url_without_credentials, username, password). Note + that even if the original URL contains credentials, this + function may return a different username and password. + """ + url, netloc, _ = split_auth_netloc_from_url(original_url) + + # Try to get credentials from original url + username, password = self._get_new_credentials(original_url) + + # If credentials not found, use any stored credentials for this netloc. + # Do this if either the username or the password is missing. + # This accounts for the situation in which the user has specified + # the username in the index url, but the password comes from keyring. + if (username is None or password is None) and netloc in self.passwords: + un, pw = self.passwords[netloc] + # It is possible that the cached credentials are for a different username, + # in which case the cache should be ignored. + if username is None or username == un: + username, password = un, pw + + if username is not None or password is not None: + # Convert the username and password if they're None, so that + # this netloc will show up as "cached" in the conditional above. + # Further, HTTPBasicAuth doesn't accept None, so it makes sense to + # cache the value that is going to be used. + username = username or "" + password = password or "" + + # Store any acquired credentials. + self.passwords[netloc] = (username, password) + + assert ( + # Credentials were found + (username is not None and password is not None) + # Credentials were not found + or (username is None and password is None) + ), f"Could not load credentials from url: {original_url}" + + return url, username, password + + def __call__(self, req: Request) -> Request: + # Get credentials for this request + url, username, password = self._get_url_and_credentials(req.url) + + # Set the url of the request to the url without any credentials + req.url = url + + if username is not None and password is not None: + # Send the basic auth with this request + req = HTTPBasicAuth(username, password)(req) + + # Attach a hook to handle 401 responses + req.register_hook("response", self.handle_401) + + return req + + # Factored out to allow for easy patching in tests + def _prompt_for_password(self, netloc: str) -> tuple[str | None, str | None, bool]: + username = ask_input(f"User for {netloc}: ") if self.prompting else None + if not username: + return None, None, False + if self.use_keyring: + auth = self._get_keyring_auth(netloc, username) + if auth and auth[0] is not None and auth[1] is not None: + return auth[0], auth[1], False + password = ask_password("Password: ") + return username, password, True + + # Factored out to allow for easy patching in tests + def _should_save_password_to_keyring(self) -> bool: + if ( + not self.prompting + or not self.use_keyring + or not self.keyring_provider.has_keyring + ): + return False + return ask("Save credentials to keyring [y/N]: ", ["y", "n"]) == "y" + + def handle_401(self, resp: Response, **kwargs: Any) -> Response: + # We only care about 401 responses, anything else we want to just + # pass through the actual response + if resp.status_code != 401: + return resp + + username, password = None, None + + # Query the keyring for credentials: + if self.use_keyring: + username, password = self._get_new_credentials( + resp.url, + allow_netrc=False, + allow_keyring=True, + ) + + # We are not able to prompt the user so simply return the response + if not self.prompting and not username and not password: + return resp + + parsed = urllib.parse.urlparse(resp.url) + + # Prompt the user for a new username and password + save = False + if not username and not password: + username, password, save = self._prompt_for_password(parsed.netloc) + + # Store the new username and password to use for future requests + self._credentials_to_save = None + if username is not None and password is not None: + self.passwords[parsed.netloc] = (username, password) + + # Prompt to save the password to keyring + if save and self._should_save_password_to_keyring(): + self._credentials_to_save = Credentials( + url=parsed.netloc, + username=username, + password=password, + ) + + # Consume content and release the original connection to allow our new + # request to reuse the same one. + # The result of the assignment isn't used, it's just needed to consume + # the content. + _ = resp.content + resp.raw.release_conn() + + # Add our new username and password to the request + req = HTTPBasicAuth(username or "", password or "")(resp.request) + req.register_hook("response", self.warn_on_401) + + # On successful request, save the credentials that were used to + # keyring. (Note that if the user responded "no" above, this member + # is not set and nothing will be saved.) + if self._credentials_to_save: + req.register_hook("response", self.save_credentials) + + # Send our new request + new_resp = resp.connection.send(req, **kwargs) + new_resp.history.append(resp) + + return new_resp + + def warn_on_401(self, resp: Response, **kwargs: Any) -> None: + """Response callback to warn about incorrect credentials.""" + if resp.status_code == 401: + logger.warning( + "401 Error, Credentials not correct for %s", + resp.request.url, + ) + + def save_credentials(self, resp: Response, **kwargs: Any) -> None: + """Response callback to save credentials on success.""" + assert ( + self.keyring_provider.has_keyring + ), "should never reach here without keyring" + + creds = self._credentials_to_save + self._credentials_to_save = None + if creds and resp.status_code < 400: + try: + logger.info("Saving credentials to keyring") + self.keyring_provider.save_auth_info( + creds.url, creds.username, creds.password + ) + except Exception: + logger.exception("Failed to save credentials") diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/network/cache.py b/.venv/lib/python3.12/site-packages/pip/_internal/network/cache.py new file mode 100644 index 0000000..0c5961c --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/network/cache.py @@ -0,0 +1,133 @@ +"""HTTP cache implementation.""" + +from __future__ import annotations + +import os +import shutil +from collections.abc import Generator +from contextlib import contextmanager +from datetime import datetime +from typing import Any, BinaryIO, Callable + +from pip._vendor.cachecontrol.cache import SeparateBodyBaseCache +from pip._vendor.cachecontrol.caches import SeparateBodyFileCache +from pip._vendor.requests.models import Response + +from pip._internal.utils.filesystem import adjacent_tmp_file, replace +from pip._internal.utils.misc import ensure_dir + + +def is_from_cache(response: Response) -> bool: + return getattr(response, "from_cache", False) + + +@contextmanager +def suppressed_cache_errors() -> Generator[None, None, None]: + """If we can't access the cache then we can just skip caching and process + requests as if caching wasn't enabled. + """ + try: + yield + except OSError: + pass + + +class SafeFileCache(SeparateBodyBaseCache): + """ + A file based cache which is safe to use even when the target directory may + not be accessible or writable. + + There is a race condition when two processes try to write and/or read the + same entry at the same time, since each entry consists of two separate + files (https://github.com/psf/cachecontrol/issues/324). We therefore have + additional logic that makes sure that both files to be present before + returning an entry; this fixes the read side of the race condition. + + For the write side, we assume that the server will only ever return the + same data for the same URL, which ought to be the case for files pip is + downloading. PyPI does not have a mechanism to swap out a wheel for + another wheel, for example. If this assumption is not true, the + CacheControl issue will need to be fixed. + """ + + def __init__(self, directory: str) -> None: + assert directory is not None, "Cache directory must not be None." + super().__init__() + self.directory = directory + + def _get_cache_path(self, name: str) -> str: + # From cachecontrol.caches.file_cache.FileCache._fn, brought into our + # class for backwards-compatibility and to avoid using a non-public + # method. + hashed = SeparateBodyFileCache.encode(name) + parts = list(hashed[:5]) + [hashed] + return os.path.join(self.directory, *parts) + + def get(self, key: str) -> bytes | None: + # The cache entry is only valid if both metadata and body exist. + metadata_path = self._get_cache_path(key) + body_path = metadata_path + ".body" + if not (os.path.exists(metadata_path) and os.path.exists(body_path)): + return None + with suppressed_cache_errors(): + with open(metadata_path, "rb") as f: + return f.read() + + def _write_to_file(self, path: str, writer_func: Callable[[BinaryIO], Any]) -> None: + """Common file writing logic with proper permissions and atomic replacement.""" + with suppressed_cache_errors(): + ensure_dir(os.path.dirname(path)) + + with adjacent_tmp_file(path) as f: + writer_func(f) + # Inherit the read/write permissions of the cache directory + # to enable multi-user cache use-cases. + mode = ( + os.stat(self.directory).st_mode + & 0o666 # select read/write permissions of cache directory + | 0o600 # set owner read/write permissions + ) + # Change permissions only if there is no risk of following a symlink. + if os.chmod in os.supports_fd: + os.chmod(f.fileno(), mode) + elif os.chmod in os.supports_follow_symlinks: + os.chmod(f.name, mode, follow_symlinks=False) + + replace(f.name, path) + + def _write(self, path: str, data: bytes) -> None: + self._write_to_file(path, lambda f: f.write(data)) + + def _write_from_io(self, path: str, source_file: BinaryIO) -> None: + self._write_to_file(path, lambda f: shutil.copyfileobj(source_file, f)) + + def set( + self, key: str, value: bytes, expires: int | datetime | None = None + ) -> None: + path = self._get_cache_path(key) + self._write(path, value) + + def delete(self, key: str) -> None: + path = self._get_cache_path(key) + with suppressed_cache_errors(): + os.remove(path) + with suppressed_cache_errors(): + os.remove(path + ".body") + + def get_body(self, key: str) -> BinaryIO | None: + # The cache entry is only valid if both metadata and body exist. + metadata_path = self._get_cache_path(key) + body_path = metadata_path + ".body" + if not (os.path.exists(metadata_path) and os.path.exists(body_path)): + return None + with suppressed_cache_errors(): + return open(body_path, "rb") + + def set_body(self, key: str, body: bytes) -> None: + path = self._get_cache_path(key) + ".body" + self._write(path, body) + + def set_body_from_io(self, key: str, body_file: BinaryIO) -> None: + """Set the body of the cache entry from a file object.""" + path = self._get_cache_path(key) + ".body" + self._write_from_io(path, body_file) diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/network/download.py b/.venv/lib/python3.12/site-packages/pip/_internal/network/download.py new file mode 100644 index 0000000..9881cc2 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/network/download.py @@ -0,0 +1,342 @@ +"""Download files with progress indicators.""" + +from __future__ import annotations + +import email.message +import logging +import mimetypes +import os +from collections.abc import Iterable, Mapping +from dataclasses import dataclass +from http import HTTPStatus +from typing import BinaryIO + +from pip._vendor.requests import PreparedRequest +from pip._vendor.requests.models import Response +from pip._vendor.urllib3 import HTTPResponse as URLlib3Response +from pip._vendor.urllib3._collections import HTTPHeaderDict +from pip._vendor.urllib3.exceptions import ReadTimeoutError + +from pip._internal.cli.progress_bars import BarType, get_download_progress_renderer +from pip._internal.exceptions import IncompleteDownloadError, NetworkConnectionError +from pip._internal.models.index import PyPI +from pip._internal.models.link import Link +from pip._internal.network.cache import SafeFileCache, is_from_cache +from pip._internal.network.session import CacheControlAdapter, PipSession +from pip._internal.network.utils import HEADERS, raise_for_status, response_chunks +from pip._internal.utils.misc import format_size, redact_auth_from_url, splitext + +logger = logging.getLogger(__name__) + + +def _get_http_response_size(resp: Response) -> int | None: + try: + return int(resp.headers["content-length"]) + except (ValueError, KeyError, TypeError): + return None + + +def _get_http_response_etag_or_last_modified(resp: Response) -> str | None: + """ + Return either the ETag or Last-Modified header (or None if neither exists). + The return value can be used in an If-Range header. + """ + return resp.headers.get("etag", resp.headers.get("last-modified")) + + +def _log_download( + resp: Response, + link: Link, + progress_bar: BarType, + total_length: int | None, + range_start: int | None = 0, +) -> Iterable[bytes]: + if link.netloc == PyPI.file_storage_domain: + url = link.show_url + else: + url = link.url_without_fragment + + logged_url = redact_auth_from_url(url) + + if total_length: + if range_start: + logged_url = ( + f"{logged_url} ({format_size(range_start)}/{format_size(total_length)})" + ) + else: + logged_url = f"{logged_url} ({format_size(total_length)})" + + if is_from_cache(resp): + logger.info("Using cached %s", logged_url) + elif range_start: + logger.info("Resuming download %s", logged_url) + else: + logger.info("Downloading %s", logged_url) + + if logger.getEffectiveLevel() > logging.INFO: + show_progress = False + elif is_from_cache(resp): + show_progress = False + elif not total_length: + show_progress = True + elif total_length > (512 * 1024): + show_progress = True + else: + show_progress = False + + chunks = response_chunks(resp) + + if not show_progress: + return chunks + + renderer = get_download_progress_renderer( + bar_type=progress_bar, size=total_length, initial_progress=range_start + ) + return renderer(chunks) + + +def sanitize_content_filename(filename: str) -> str: + """ + Sanitize the "filename" value from a Content-Disposition header. + """ + return os.path.basename(filename) + + +def parse_content_disposition(content_disposition: str, default_filename: str) -> str: + """ + Parse the "filename" value from a Content-Disposition header, and + return the default filename if the result is empty. + """ + m = email.message.Message() + m["content-type"] = content_disposition + filename = m.get_param("filename") + if filename: + # We need to sanitize the filename to prevent directory traversal + # in case the filename contains ".." path parts. + filename = sanitize_content_filename(str(filename)) + return filename or default_filename + + +def _get_http_response_filename(resp: Response, link: Link) -> str: + """Get an ideal filename from the given HTTP response, falling back to + the link filename if not provided. + """ + filename = link.filename # fallback + # Have a look at the Content-Disposition header for a better guess + content_disposition = resp.headers.get("content-disposition") + if content_disposition: + filename = parse_content_disposition(content_disposition, filename) + ext: str | None = splitext(filename)[1] + if not ext: + ext = mimetypes.guess_extension(resp.headers.get("content-type", "")) + if ext: + filename += ext + if not ext and link.url != resp.url: + ext = os.path.splitext(resp.url)[1] + if ext: + filename += ext + return filename + + +@dataclass +class _FileDownload: + """Stores the state of a single link download.""" + + link: Link + output_file: BinaryIO + size: int | None + bytes_received: int = 0 + reattempts: int = 0 + + def is_incomplete(self) -> bool: + return bool(self.size is not None and self.bytes_received < self.size) + + def write_chunk(self, data: bytes) -> None: + self.bytes_received += len(data) + self.output_file.write(data) + + def reset_file(self) -> None: + """Delete any saved data and reset progress to zero.""" + self.output_file.seek(0) + self.output_file.truncate() + self.bytes_received = 0 + + +class Downloader: + def __init__( + self, + session: PipSession, + progress_bar: BarType, + resume_retries: int, + ) -> None: + assert ( + resume_retries >= 0 + ), "Number of max resume retries must be bigger or equal to zero" + self._session = session + self._progress_bar = progress_bar + self._resume_retries = resume_retries + + def batch( + self, links: Iterable[Link], location: str + ) -> Iterable[tuple[Link, tuple[str, str]]]: + """Convenience method to download multiple links.""" + for link in links: + filepath, content_type = self(link, location) + yield link, (filepath, content_type) + + def __call__(self, link: Link, location: str) -> tuple[str, str]: + """Download a link and save it under location.""" + resp = self._http_get(link) + download_size = _get_http_response_size(resp) + + filepath = os.path.join(location, _get_http_response_filename(resp, link)) + with open(filepath, "wb") as content_file: + download = _FileDownload(link, content_file, download_size) + self._process_response(download, resp) + if download.is_incomplete(): + self._attempt_resumes_or_redownloads(download, resp) + + content_type = resp.headers.get("Content-Type", "") + return filepath, content_type + + def _process_response(self, download: _FileDownload, resp: Response) -> None: + """Download and save chunks from a response.""" + chunks = _log_download( + resp, + download.link, + self._progress_bar, + download.size, + range_start=download.bytes_received, + ) + try: + for chunk in chunks: + download.write_chunk(chunk) + except ReadTimeoutError as e: + # If the download size is not known, then give up downloading the file. + if download.size is None: + raise e + + logger.warning("Connection timed out while downloading.") + + def _attempt_resumes_or_redownloads( + self, download: _FileDownload, first_resp: Response + ) -> None: + """Attempt to resume/restart the download if connection was dropped.""" + + while download.reattempts < self._resume_retries and download.is_incomplete(): + assert download.size is not None + download.reattempts += 1 + logger.warning( + "Attempting to resume incomplete download (%s/%s, attempt %d)", + format_size(download.bytes_received), + format_size(download.size), + download.reattempts, + ) + + try: + resume_resp = self._http_get_resume(download, should_match=first_resp) + # Fallback: if the server responded with 200 (i.e., the file has + # since been modified or range requests are unsupported) or any + # other unexpected status, restart the download from the beginning. + must_restart = resume_resp.status_code != HTTPStatus.PARTIAL_CONTENT + if must_restart: + download.reset_file() + download.size = _get_http_response_size(resume_resp) + first_resp = resume_resp + + self._process_response(download, resume_resp) + except (ConnectionError, ReadTimeoutError, OSError): + continue + + # No more resume attempts. Raise an error if the download is still incomplete. + if download.is_incomplete(): + os.remove(download.output_file.name) + raise IncompleteDownloadError(download) + + # If we successfully completed the download via resume, manually cache it + # as a complete response to enable future caching + if download.reattempts > 0: + self._cache_resumed_download(download, first_resp) + + def _cache_resumed_download( + self, download: _FileDownload, original_response: Response + ) -> None: + """ + Manually cache a file that was successfully downloaded via resume retries. + + cachecontrol doesn't cache 206 (Partial Content) responses, since they + are not complete files. This method manually adds the final file to the + cache as though it was downloaded in a single request, so that future + requests can use the cache. + """ + url = download.link.url_without_fragment + adapter = self._session.get_adapter(url) + + # Check if the adapter is the CacheControlAdapter (i.e. caching is enabled) + if not isinstance(adapter, CacheControlAdapter): + logger.debug( + "Skipping resume download caching: no cache controller for %s", url + ) + return + + # Check SafeFileCache is being used + assert isinstance( + adapter.cache, SafeFileCache + ), "separate body cache not in use!" + + synthetic_request = PreparedRequest() + synthetic_request.prepare(method="GET", url=url, headers={}) + + synthetic_response_headers = HTTPHeaderDict() + for key, value in original_response.headers.items(): + if key.lower() not in ["content-range", "content-length"]: + synthetic_response_headers[key] = value + synthetic_response_headers["content-length"] = str(download.size) + + synthetic_response = URLlib3Response( + body="", + headers=synthetic_response_headers, + status=200, + preload_content=False, + ) + + # Save metadata and then stream the file contents to cache. + cache_url = adapter.controller.cache_url(url) + metadata_blob = adapter.controller.serializer.dumps( + synthetic_request, synthetic_response, b"" + ) + adapter.cache.set(cache_url, metadata_blob) + download.output_file.flush() + with open(download.output_file.name, "rb") as f: + adapter.cache.set_body_from_io(cache_url, f) + + logger.debug( + "Cached resumed download as complete response for future use: %s", url + ) + + def _http_get_resume( + self, download: _FileDownload, should_match: Response + ) -> Response: + """Issue a HTTP range request to resume the download.""" + # To better understand the download resumption logic, see the mdn web docs: + # https://developer.mozilla.org/en-US/docs/Web/HTTP/Guides/Range_requests + headers = HEADERS.copy() + headers["Range"] = f"bytes={download.bytes_received}-" + # If possible, use a conditional range request to avoid corrupted + # downloads caused by the remote file changing in-between. + if identifier := _get_http_response_etag_or_last_modified(should_match): + headers["If-Range"] = identifier + return self._http_get(download.link, headers) + + def _http_get(self, link: Link, headers: Mapping[str, str] = HEADERS) -> Response: + target_url = link.url_without_fragment + try: + resp = self._session.get(target_url, headers=headers, stream=True) + raise_for_status(resp) + except NetworkConnectionError as e: + assert e.response is not None + logger.critical( + "HTTP error %s while getting %s", e.response.status_code, link + ) + raise + return resp diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/network/lazy_wheel.py b/.venv/lib/python3.12/site-packages/pip/_internal/network/lazy_wheel.py new file mode 100644 index 0000000..ac3ebe6 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/network/lazy_wheel.py @@ -0,0 +1,213 @@ +"""Lazy ZIP over HTTP""" + +from __future__ import annotations + +__all__ = ["HTTPRangeRequestUnsupported", "dist_from_wheel_url"] + +from bisect import bisect_left, bisect_right +from collections.abc import Generator +from contextlib import contextmanager +from tempfile import NamedTemporaryFile +from typing import Any +from zipfile import BadZipFile, ZipFile + +from pip._vendor.packaging.utils import canonicalize_name +from pip._vendor.requests.models import CONTENT_CHUNK_SIZE, Response + +from pip._internal.metadata import BaseDistribution, MemoryWheel, get_wheel_distribution +from pip._internal.network.session import PipSession +from pip._internal.network.utils import HEADERS, raise_for_status, response_chunks + + +class HTTPRangeRequestUnsupported(Exception): + pass + + +def dist_from_wheel_url(name: str, url: str, session: PipSession) -> BaseDistribution: + """Return a distribution object from the given wheel URL. + + This uses HTTP range requests to only fetch the portion of the wheel + containing metadata, just enough for the object to be constructed. + If such requests are not supported, HTTPRangeRequestUnsupported + is raised. + """ + with LazyZipOverHTTP(url, session) as zf: + # For read-only ZIP files, ZipFile only needs methods read, + # seek, seekable and tell, not the whole IO protocol. + wheel = MemoryWheel(zf.name, zf) # type: ignore + # After context manager exit, wheel.name + # is an invalid file by intention. + return get_wheel_distribution(wheel, canonicalize_name(name)) + + +class LazyZipOverHTTP: + """File-like object mapped to a ZIP file over HTTP. + + This uses HTTP range requests to lazily fetch the file's content, + which is supposed to be fed to ZipFile. If such requests are not + supported by the server, raise HTTPRangeRequestUnsupported + during initialization. + """ + + def __init__( + self, url: str, session: PipSession, chunk_size: int = CONTENT_CHUNK_SIZE + ) -> None: + head = session.head(url, headers=HEADERS) + raise_for_status(head) + assert head.status_code == 200 + self._session, self._url, self._chunk_size = session, url, chunk_size + self._length = int(head.headers["Content-Length"]) + self._file = NamedTemporaryFile() + self.truncate(self._length) + self._left: list[int] = [] + self._right: list[int] = [] + if "bytes" not in head.headers.get("Accept-Ranges", "none"): + raise HTTPRangeRequestUnsupported("range request is not supported") + self._check_zip() + + @property + def mode(self) -> str: + """Opening mode, which is always rb.""" + return "rb" + + @property + def name(self) -> str: + """Path to the underlying file.""" + return self._file.name + + def seekable(self) -> bool: + """Return whether random access is supported, which is True.""" + return True + + def close(self) -> None: + """Close the file.""" + self._file.close() + + @property + def closed(self) -> bool: + """Whether the file is closed.""" + return self._file.closed + + def read(self, size: int = -1) -> bytes: + """Read up to size bytes from the object and return them. + + As a convenience, if size is unspecified or -1, + all bytes until EOF are returned. Fewer than + size bytes may be returned if EOF is reached. + """ + download_size = max(size, self._chunk_size) + start, length = self.tell(), self._length + stop = length if size < 0 else min(start + download_size, length) + start = max(0, stop - download_size) + self._download(start, stop - 1) + return self._file.read(size) + + def readable(self) -> bool: + """Return whether the file is readable, which is True.""" + return True + + def seek(self, offset: int, whence: int = 0) -> int: + """Change stream position and return the new absolute position. + + Seek to offset relative position indicated by whence: + * 0: Start of stream (the default). pos should be >= 0; + * 1: Current position - pos may be negative; + * 2: End of stream - pos usually negative. + """ + return self._file.seek(offset, whence) + + def tell(self) -> int: + """Return the current position.""" + return self._file.tell() + + def truncate(self, size: int | None = None) -> int: + """Resize the stream to the given size in bytes. + + If size is unspecified resize to the current position. + The current stream position isn't changed. + + Return the new file size. + """ + return self._file.truncate(size) + + def writable(self) -> bool: + """Return False.""" + return False + + def __enter__(self) -> LazyZipOverHTTP: + self._file.__enter__() + return self + + def __exit__(self, *exc: Any) -> None: + self._file.__exit__(*exc) + + @contextmanager + def _stay(self) -> Generator[None, None, None]: + """Return a context manager keeping the position. + + At the end of the block, seek back to original position. + """ + pos = self.tell() + try: + yield + finally: + self.seek(pos) + + def _check_zip(self) -> None: + """Check and download until the file is a valid ZIP.""" + end = self._length - 1 + for start in reversed(range(0, end, self._chunk_size)): + self._download(start, end) + with self._stay(): + try: + # For read-only ZIP files, ZipFile only needs + # methods read, seek, seekable and tell. + ZipFile(self) + except BadZipFile: + pass + else: + break + + def _stream_response( + self, start: int, end: int, base_headers: dict[str, str] = HEADERS + ) -> Response: + """Return HTTP response to a range request from start to end.""" + headers = base_headers.copy() + headers["Range"] = f"bytes={start}-{end}" + # TODO: Get range requests to be correctly cached + headers["Cache-Control"] = "no-cache" + return self._session.get(self._url, headers=headers, stream=True) + + def _merge( + self, start: int, end: int, left: int, right: int + ) -> Generator[tuple[int, int], None, None]: + """Return a generator of intervals to be fetched. + + Args: + start (int): Start of needed interval + end (int): End of needed interval + left (int): Index of first overlapping downloaded data + right (int): Index after last overlapping downloaded data + """ + lslice, rslice = self._left[left:right], self._right[left:right] + i = start = min([start] + lslice[:1]) + end = max([end] + rslice[-1:]) + for j, k in zip(lslice, rslice): + if j > i: + yield i, j - 1 + i = k + 1 + if i <= end: + yield i, end + self._left[left:right], self._right[left:right] = [start], [end] + + def _download(self, start: int, end: int) -> None: + """Download bytes from start to end inclusively.""" + with self._stay(): + left = bisect_left(self._right, start) + right = bisect_right(self._left, end) + for start, end in self._merge(start, end, left, right): + response = self._stream_response(start, end) + response.raise_for_status() + self.seek(start) + for chunk in response_chunks(response, self._chunk_size): + self._file.write(chunk) diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/network/session.py b/.venv/lib/python3.12/site-packages/pip/_internal/network/session.py new file mode 100644 index 0000000..a1f9444 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/network/session.py @@ -0,0 +1,528 @@ +"""PipSession and supporting code, containing all pip-specific +network request configuration and behavior. +""" + +from __future__ import annotations + +import email.utils +import functools +import io +import ipaddress +import json +import logging +import mimetypes +import os +import platform +import shutil +import subprocess +import sys +import urllib.parse +import warnings +from collections.abc import Generator, Mapping, Sequence +from typing import ( + TYPE_CHECKING, + Any, + Optional, + Union, +) + +from pip._vendor import requests, urllib3 +from pip._vendor.cachecontrol import CacheControlAdapter as _BaseCacheControlAdapter +from pip._vendor.requests.adapters import DEFAULT_POOLBLOCK, BaseAdapter +from pip._vendor.requests.adapters import HTTPAdapter as _BaseHTTPAdapter +from pip._vendor.requests.models import PreparedRequest, Response +from pip._vendor.requests.structures import CaseInsensitiveDict +from pip._vendor.urllib3.connectionpool import ConnectionPool +from pip._vendor.urllib3.exceptions import InsecureRequestWarning + +from pip import __version__ +from pip._internal.metadata import get_default_environment +from pip._internal.models.link import Link +from pip._internal.network.auth import MultiDomainBasicAuth +from pip._internal.network.cache import SafeFileCache + +# Import ssl from compat so the initial import occurs in only one place. +from pip._internal.utils.compat import has_tls +from pip._internal.utils.glibc import libc_ver +from pip._internal.utils.misc import build_url_from_netloc, parse_netloc +from pip._internal.utils.urls import url_to_path + +if TYPE_CHECKING: + from ssl import SSLContext + + from pip._vendor.urllib3.poolmanager import PoolManager + from pip._vendor.urllib3.proxymanager import ProxyManager + + +logger = logging.getLogger(__name__) + +SecureOrigin = tuple[str, str, Optional[Union[int, str]]] + + +# Ignore warning raised when using --trusted-host. +warnings.filterwarnings("ignore", category=InsecureRequestWarning) + + +SECURE_ORIGINS: list[SecureOrigin] = [ + # protocol, hostname, port + # Taken from Chrome's list of secure origins (See: http://bit.ly/1qrySKC) + ("https", "*", "*"), + ("*", "localhost", "*"), + ("*", "127.0.0.0/8", "*"), + ("*", "::1/128", "*"), + ("file", "*", None), + # ssh is always secure. + ("ssh", "*", "*"), +] + + +# These are environment variables present when running under various +# CI systems. For each variable, some CI systems that use the variable +# are indicated. The collection was chosen so that for each of a number +# of popular systems, at least one of the environment variables is used. +# This list is used to provide some indication of and lower bound for +# CI traffic to PyPI. Thus, it is okay if the list is not comprehensive. +# For more background, see: https://github.com/pypa/pip/issues/5499 +CI_ENVIRONMENT_VARIABLES = ( + # Azure Pipelines + "BUILD_BUILDID", + # Jenkins + "BUILD_ID", + # AppVeyor, CircleCI, Codeship, Gitlab CI, Shippable, Travis CI + "CI", + # Explicit environment variable. + "PIP_IS_CI", +) + + +def looks_like_ci() -> bool: + """ + Return whether it looks like pip is running under CI. + """ + # We don't use the method of checking for a tty (e.g. using isatty()) + # because some CI systems mimic a tty (e.g. Travis CI). Thus that + # method doesn't provide definitive information in either direction. + return any(name in os.environ for name in CI_ENVIRONMENT_VARIABLES) + + +@functools.lru_cache(maxsize=1) +def user_agent() -> str: + """ + Return a string representing the user agent. + """ + data: dict[str, Any] = { + "installer": {"name": "pip", "version": __version__}, + "python": platform.python_version(), + "implementation": { + "name": platform.python_implementation(), + }, + } + + if data["implementation"]["name"] == "CPython": + data["implementation"]["version"] = platform.python_version() + elif data["implementation"]["name"] == "PyPy": + pypy_version_info = sys.pypy_version_info # type: ignore + if pypy_version_info.releaselevel == "final": + pypy_version_info = pypy_version_info[:3] + data["implementation"]["version"] = ".".join( + [str(x) for x in pypy_version_info] + ) + elif data["implementation"]["name"] == "Jython": + # Complete Guess + data["implementation"]["version"] = platform.python_version() + elif data["implementation"]["name"] == "IronPython": + # Complete Guess + data["implementation"]["version"] = platform.python_version() + + if sys.platform.startswith("linux"): + from pip._vendor import distro + + linux_distribution = distro.name(), distro.version(), distro.codename() + distro_infos: dict[str, Any] = dict( + filter( + lambda x: x[1], + zip(["name", "version", "id"], linux_distribution), + ) + ) + libc = dict( + filter( + lambda x: x[1], + zip(["lib", "version"], libc_ver()), + ) + ) + if libc: + distro_infos["libc"] = libc + if distro_infos: + data["distro"] = distro_infos + + if sys.platform.startswith("darwin") and platform.mac_ver()[0]: + data["distro"] = {"name": "macOS", "version": platform.mac_ver()[0]} + + if platform.system(): + data.setdefault("system", {})["name"] = platform.system() + + if platform.release(): + data.setdefault("system", {})["release"] = platform.release() + + if platform.machine(): + data["cpu"] = platform.machine() + + if has_tls(): + import _ssl as ssl + + data["openssl_version"] = ssl.OPENSSL_VERSION + + setuptools_dist = get_default_environment().get_distribution("setuptools") + if setuptools_dist is not None: + data["setuptools_version"] = str(setuptools_dist.version) + + if shutil.which("rustc") is not None: + # If for any reason `rustc --version` fails, silently ignore it + try: + rustc_output = subprocess.check_output( + ["rustc", "--version"], stderr=subprocess.STDOUT, timeout=0.5 + ) + except Exception: + pass + else: + if rustc_output.startswith(b"rustc "): + # The format of `rustc --version` is: + # `b'rustc 1.52.1 (9bc8c42bb 2021-05-09)\n'` + # We extract just the middle (1.52.1) part + data["rustc_version"] = rustc_output.split(b" ")[1].decode() + + # Use None rather than False so as not to give the impression that + # pip knows it is not being run under CI. Rather, it is a null or + # inconclusive result. Also, we include some value rather than no + # value to make it easier to know that the check has been run. + data["ci"] = True if looks_like_ci() else None + + user_data = os.environ.get("PIP_USER_AGENT_USER_DATA") + if user_data is not None: + data["user_data"] = user_data + + return "{data[installer][name]}/{data[installer][version]} {json}".format( + data=data, + json=json.dumps(data, separators=(",", ":"), sort_keys=True), + ) + + +class LocalFSAdapter(BaseAdapter): + def send( + self, + request: PreparedRequest, + stream: bool = False, + timeout: float | tuple[float, float] | None = None, + verify: bool | str = True, + cert: str | tuple[str, str] | None = None, + proxies: Mapping[str, str] | None = None, + ) -> Response: + pathname = url_to_path(request.url) + + resp = Response() + resp.status_code = 200 + resp.url = request.url + + try: + stats = os.stat(pathname) + except OSError as exc: + # format the exception raised as a io.BytesIO object, + # to return a better error message: + resp.status_code = 404 + resp.reason = type(exc).__name__ + resp.raw = io.BytesIO(f"{resp.reason}: {exc}".encode()) + else: + modified = email.utils.formatdate(stats.st_mtime, usegmt=True) + content_type = mimetypes.guess_type(pathname)[0] or "text/plain" + resp.headers = CaseInsensitiveDict( + { + "Content-Type": content_type, + "Content-Length": stats.st_size, + "Last-Modified": modified, + } + ) + + resp.raw = open(pathname, "rb") + resp.close = resp.raw.close + + return resp + + def close(self) -> None: + pass + + +class _SSLContextAdapterMixin: + """Mixin to add the ``ssl_context`` constructor argument to HTTP adapters. + + The additional argument is forwarded directly to the pool manager. This allows us + to dynamically decide what SSL store to use at runtime, which is used to implement + the optional ``truststore`` backend. + """ + + def __init__( + self, + *, + ssl_context: SSLContext | None = None, + **kwargs: Any, + ) -> None: + self._ssl_context = ssl_context + super().__init__(**kwargs) + + def init_poolmanager( + self, + connections: int, + maxsize: int, + block: bool = DEFAULT_POOLBLOCK, + **pool_kwargs: Any, + ) -> PoolManager: + if self._ssl_context is not None: + pool_kwargs.setdefault("ssl_context", self._ssl_context) + return super().init_poolmanager( # type: ignore[misc] + connections=connections, + maxsize=maxsize, + block=block, + **pool_kwargs, + ) + + def proxy_manager_for(self, proxy: str, **proxy_kwargs: Any) -> ProxyManager: + # Proxy manager replaces the pool manager, so inject our SSL + # context here too. https://github.com/pypa/pip/issues/13288 + if self._ssl_context is not None: + proxy_kwargs.setdefault("ssl_context", self._ssl_context) + return super().proxy_manager_for(proxy, **proxy_kwargs) # type: ignore[misc] + + +class HTTPAdapter(_SSLContextAdapterMixin, _BaseHTTPAdapter): + pass + + +class CacheControlAdapter(_SSLContextAdapterMixin, _BaseCacheControlAdapter): + pass + + +class InsecureHTTPAdapter(HTTPAdapter): + def cert_verify( + self, + conn: ConnectionPool, + url: str, + verify: bool | str, + cert: str | tuple[str, str] | None, + ) -> None: + super().cert_verify(conn=conn, url=url, verify=False, cert=cert) + + +class InsecureCacheControlAdapter(CacheControlAdapter): + def cert_verify( + self, + conn: ConnectionPool, + url: str, + verify: bool | str, + cert: str | tuple[str, str] | None, + ) -> None: + super().cert_verify(conn=conn, url=url, verify=False, cert=cert) + + +class PipSession(requests.Session): + timeout: int | None = None + + def __init__( + self, + *args: Any, + retries: int = 0, + cache: str | None = None, + trusted_hosts: Sequence[str] = (), + index_urls: list[str] | None = None, + ssl_context: SSLContext | None = None, + **kwargs: Any, + ) -> None: + """ + :param trusted_hosts: Domains not to emit warnings for when not using + HTTPS. + """ + super().__init__(*args, **kwargs) + + # Namespace the attribute with "pip_" just in case to prevent + # possible conflicts with the base class. + self.pip_trusted_origins: list[tuple[str, int | None]] = [] + self.pip_proxy = None + + # Attach our User Agent to the request + self.headers["User-Agent"] = user_agent() + + # Attach our Authentication handler to the session + self.auth = MultiDomainBasicAuth(index_urls=index_urls) + + # Create our urllib3.Retry instance which will allow us to customize + # how we handle retries. + retries = urllib3.Retry( + # Set the total number of retries that a particular request can + # have. + total=retries, + # A 503 error from PyPI typically means that the Fastly -> Origin + # connection got interrupted in some way. A 503 error in general + # is typically considered a transient error so we'll go ahead and + # retry it. + # A 500 may indicate transient error in Amazon S3 + # A 502 may be a transient error from a CDN like CloudFlare or CloudFront + # A 520 or 527 - may indicate transient error in CloudFlare + status_forcelist=[500, 502, 503, 520, 527], + # Add a small amount of back off between failed requests in + # order to prevent hammering the service. + backoff_factor=0.25, + ) # type: ignore + + # Our Insecure HTTPAdapter disables HTTPS validation. It does not + # support caching so we'll use it for all http:// URLs. + # If caching is disabled, we will also use it for + # https:// hosts that we've marked as ignoring + # TLS errors for (trusted-hosts). + insecure_adapter = InsecureHTTPAdapter(max_retries=retries) + + # We want to _only_ cache responses on securely fetched origins or when + # the host is specified as trusted. We do this because + # we can't validate the response of an insecurely/untrusted fetched + # origin, and we don't want someone to be able to poison the cache and + # require manual eviction from the cache to fix it. + if cache: + secure_adapter = CacheControlAdapter( + cache=SafeFileCache(cache), + max_retries=retries, + ssl_context=ssl_context, + ) + self._trusted_host_adapter = InsecureCacheControlAdapter( + cache=SafeFileCache(cache), + max_retries=retries, + ) + else: + secure_adapter = HTTPAdapter(max_retries=retries, ssl_context=ssl_context) + self._trusted_host_adapter = insecure_adapter + + self.mount("https://", secure_adapter) + self.mount("http://", insecure_adapter) + + # Enable file:// urls + self.mount("file://", LocalFSAdapter()) + + for host in trusted_hosts: + self.add_trusted_host(host, suppress_logging=True) + + def update_index_urls(self, new_index_urls: list[str]) -> None: + """ + :param new_index_urls: New index urls to update the authentication + handler with. + """ + self.auth.index_urls = new_index_urls + + def add_trusted_host( + self, host: str, source: str | None = None, suppress_logging: bool = False + ) -> None: + """ + :param host: It is okay to provide a host that has previously been + added. + :param source: An optional source string, for logging where the host + string came from. + """ + if not suppress_logging: + msg = f"adding trusted host: {host!r}" + if source is not None: + msg += f" (from {source})" + logger.info(msg) + + parsed_host, parsed_port = parse_netloc(host) + if parsed_host is None: + raise ValueError(f"Trusted host URL must include a host part: {host!r}") + if (parsed_host, parsed_port) not in self.pip_trusted_origins: + self.pip_trusted_origins.append((parsed_host, parsed_port)) + + self.mount( + build_url_from_netloc(host, scheme="http") + "/", self._trusted_host_adapter + ) + self.mount(build_url_from_netloc(host) + "/", self._trusted_host_adapter) + if not parsed_port: + self.mount( + build_url_from_netloc(host, scheme="http") + ":", + self._trusted_host_adapter, + ) + # Mount wildcard ports for the same host. + self.mount(build_url_from_netloc(host) + ":", self._trusted_host_adapter) + + def iter_secure_origins(self) -> Generator[SecureOrigin, None, None]: + yield from SECURE_ORIGINS + for host, port in self.pip_trusted_origins: + yield ("*", host, "*" if port is None else port) + + def is_secure_origin(self, location: Link) -> bool: + # Determine if this url used a secure transport mechanism + parsed = urllib.parse.urlparse(str(location)) + origin_protocol, origin_host, origin_port = ( + parsed.scheme, + parsed.hostname, + parsed.port, + ) + + # The protocol to use to see if the protocol matches. + # Don't count the repository type as part of the protocol: in + # cases such as "git+ssh", only use "ssh". (I.e., Only verify against + # the last scheme.) + origin_protocol = origin_protocol.rsplit("+", 1)[-1] + + # Determine if our origin is a secure origin by looking through our + # hardcoded list of secure origins, as well as any additional ones + # configured on this PackageFinder instance. + for secure_origin in self.iter_secure_origins(): + secure_protocol, secure_host, secure_port = secure_origin + if origin_protocol != secure_protocol and secure_protocol != "*": + continue + + try: + addr = ipaddress.ip_address(origin_host or "") + network = ipaddress.ip_network(secure_host) + except ValueError: + # We don't have both a valid address or a valid network, so + # we'll check this origin against hostnames. + if ( + origin_host + and origin_host.lower() != secure_host.lower() + and secure_host != "*" + ): + continue + else: + # We have a valid address and network, so see if the address + # is contained within the network. + if addr not in network: + continue + + # Check to see if the port matches. + if ( + origin_port != secure_port + and secure_port != "*" + and secure_port is not None + ): + continue + + # If we've gotten here, then this origin matches the current + # secure origin and we should return True + return True + + # If we've gotten to this point, then the origin isn't secure and we + # will not accept it as a valid location to search. We will however + # log a warning that we are ignoring it. + logger.warning( + "The repository located at %s is not a trusted or secure host and " + "is being ignored. If this repository is available via HTTPS we " + "recommend you use HTTPS instead, otherwise you may silence " + "this warning and allow it anyway with '--trusted-host %s'.", + origin_host, + origin_host, + ) + + return False + + def request(self, method: str, url: str, *args: Any, **kwargs: Any) -> Response: + # Allow setting a default timeout on a session + kwargs.setdefault("timeout", self.timeout) + # Allow setting a default proxies on a session + kwargs.setdefault("proxies", self.proxies) + + # Dispatch the actual request + return super().request(method, url, *args, **kwargs) diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/network/utils.py b/.venv/lib/python3.12/site-packages/pip/_internal/network/utils.py new file mode 100644 index 0000000..74d3111 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/network/utils.py @@ -0,0 +1,98 @@ +from collections.abc import Generator + +from pip._vendor.requests.models import Response + +from pip._internal.exceptions import NetworkConnectionError + +# The following comments and HTTP headers were originally added by +# Donald Stufft in git commit 22c562429a61bb77172039e480873fb239dd8c03. +# +# We use Accept-Encoding: identity here because requests defaults to +# accepting compressed responses. This breaks in a variety of ways +# depending on how the server is configured. +# - Some servers will notice that the file isn't a compressible file +# and will leave the file alone and with an empty Content-Encoding +# - Some servers will notice that the file is already compressed and +# will leave the file alone, adding a Content-Encoding: gzip header +# - Some servers won't notice anything at all and will take a file +# that's already been compressed and compress it again, and set +# the Content-Encoding: gzip header +# By setting this to request only the identity encoding we're hoping +# to eliminate the third case. Hopefully there does not exist a server +# which when given a file will notice it is already compressed and that +# you're not asking for a compressed file and will then decompress it +# before sending because if that's the case I don't think it'll ever be +# possible to make this work. +HEADERS: dict[str, str] = {"Accept-Encoding": "identity"} + +DOWNLOAD_CHUNK_SIZE = 256 * 1024 + + +def raise_for_status(resp: Response) -> None: + http_error_msg = "" + if isinstance(resp.reason, bytes): + # We attempt to decode utf-8 first because some servers + # choose to localize their reason strings. If the string + # isn't utf-8, we fall back to iso-8859-1 for all other + # encodings. + try: + reason = resp.reason.decode("utf-8") + except UnicodeDecodeError: + reason = resp.reason.decode("iso-8859-1") + else: + reason = resp.reason + + if 400 <= resp.status_code < 500: + http_error_msg = ( + f"{resp.status_code} Client Error: {reason} for url: {resp.url}" + ) + + elif 500 <= resp.status_code < 600: + http_error_msg = ( + f"{resp.status_code} Server Error: {reason} for url: {resp.url}" + ) + + if http_error_msg: + raise NetworkConnectionError(http_error_msg, response=resp) + + +def response_chunks( + response: Response, chunk_size: int = DOWNLOAD_CHUNK_SIZE +) -> Generator[bytes, None, None]: + """Given a requests Response, provide the data chunks.""" + try: + # Special case for urllib3. + for chunk in response.raw.stream( + chunk_size, + # We use decode_content=False here because we don't + # want urllib3 to mess with the raw bytes we get + # from the server. If we decompress inside of + # urllib3 then we cannot verify the checksum + # because the checksum will be of the compressed + # file. This breakage will only occur if the + # server adds a Content-Encoding header, which + # depends on how the server was configured: + # - Some servers will notice that the file isn't a + # compressible file and will leave the file alone + # and with an empty Content-Encoding + # - Some servers will notice that the file is + # already compressed and will leave the file + # alone and will add a Content-Encoding: gzip + # header + # - Some servers won't notice anything at all and + # will take a file that's already been compressed + # and compress it again and set the + # Content-Encoding: gzip header + # + # By setting this not to decode automatically we + # hope to eliminate problems with the second case. + decode_content=False, + ): + yield chunk + except AttributeError: + # Standard file-like object. + while True: + chunk = response.raw.read(chunk_size) + if not chunk: + break + yield chunk diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/network/xmlrpc.py b/.venv/lib/python3.12/site-packages/pip/_internal/network/xmlrpc.py new file mode 100644 index 0000000..f4bddb4 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/network/xmlrpc.py @@ -0,0 +1,61 @@ +"""xmlrpclib.Transport implementation""" + +import logging +import urllib.parse +import xmlrpc.client +from typing import TYPE_CHECKING + +from pip._internal.exceptions import NetworkConnectionError +from pip._internal.network.session import PipSession +from pip._internal.network.utils import raise_for_status + +if TYPE_CHECKING: + from xmlrpc.client import _HostType, _Marshallable + + from _typeshed import SizedBuffer + +logger = logging.getLogger(__name__) + + +class PipXmlrpcTransport(xmlrpc.client.Transport): + """Provide a `xmlrpclib.Transport` implementation via a `PipSession` + object. + """ + + def __init__( + self, index_url: str, session: PipSession, use_datetime: bool = False + ) -> None: + super().__init__(use_datetime) + index_parts = urllib.parse.urlparse(index_url) + self._scheme = index_parts.scheme + self._session = session + + def request( + self, + host: "_HostType", + handler: str, + request_body: "SizedBuffer", + verbose: bool = False, + ) -> tuple["_Marshallable", ...]: + assert isinstance(host, str) + parts = (self._scheme, host, handler, None, None, None) + url = urllib.parse.urlunparse(parts) + try: + headers = {"Content-Type": "text/xml"} + response = self._session.post( + url, + data=request_body, + headers=headers, + stream=True, + ) + raise_for_status(response) + self.verbose = verbose + return self.parse_response(response.raw) + except NetworkConnectionError as exc: + assert exc.response + logger.critical( + "HTTP error %s while getting %s", + exc.response.status_code, + url, + ) + raise diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/operations/__init__.py b/.venv/lib/python3.12/site-packages/pip/_internal/operations/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/operations/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/operations/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..b2df156 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/operations/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/operations/__pycache__/check.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/operations/__pycache__/check.cpython-312.pyc new file mode 100644 index 0000000..49a75c3 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/operations/__pycache__/check.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/operations/__pycache__/freeze.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/operations/__pycache__/freeze.cpython-312.pyc new file mode 100644 index 0000000..7c0fd76 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/operations/__pycache__/freeze.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/operations/__pycache__/prepare.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/operations/__pycache__/prepare.cpython-312.pyc new file mode 100644 index 0000000..58f0a19 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/operations/__pycache__/prepare.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/operations/build/__init__.py b/.venv/lib/python3.12/site-packages/pip/_internal/operations/build/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/operations/build/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/operations/build/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..3ea5ee9 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/operations/build/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/operations/build/__pycache__/build_tracker.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/operations/build/__pycache__/build_tracker.cpython-312.pyc new file mode 100644 index 0000000..10f9c54 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/operations/build/__pycache__/build_tracker.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/operations/build/__pycache__/metadata.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/operations/build/__pycache__/metadata.cpython-312.pyc new file mode 100644 index 0000000..5eef4b0 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/operations/build/__pycache__/metadata.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/operations/build/__pycache__/metadata_editable.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/operations/build/__pycache__/metadata_editable.cpython-312.pyc new file mode 100644 index 0000000..60258c8 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/operations/build/__pycache__/metadata_editable.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/operations/build/__pycache__/metadata_legacy.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/operations/build/__pycache__/metadata_legacy.cpython-312.pyc new file mode 100644 index 0000000..659e722 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/operations/build/__pycache__/metadata_legacy.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/operations/build/__pycache__/wheel.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/operations/build/__pycache__/wheel.cpython-312.pyc new file mode 100644 index 0000000..a1107e2 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/operations/build/__pycache__/wheel.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/operations/build/__pycache__/wheel_editable.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/operations/build/__pycache__/wheel_editable.cpython-312.pyc new file mode 100644 index 0000000..c92bf45 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/operations/build/__pycache__/wheel_editable.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/operations/build/__pycache__/wheel_legacy.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/operations/build/__pycache__/wheel_legacy.cpython-312.pyc new file mode 100644 index 0000000..71e8349 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/operations/build/__pycache__/wheel_legacy.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/operations/build/build_tracker.py b/.venv/lib/python3.12/site-packages/pip/_internal/operations/build/build_tracker.py new file mode 100644 index 0000000..cbb4e4f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/operations/build/build_tracker.py @@ -0,0 +1,140 @@ +from __future__ import annotations + +import contextlib +import hashlib +import logging +import os +from collections.abc import Generator +from types import TracebackType + +from pip._internal.req.req_install import InstallRequirement +from pip._internal.utils.temp_dir import TempDirectory + +logger = logging.getLogger(__name__) + + +@contextlib.contextmanager +def update_env_context_manager(**changes: str) -> Generator[None, None, None]: + target = os.environ + + # Save values from the target and change them. + non_existent_marker = object() + saved_values: dict[str, object | str] = {} + for name, new_value in changes.items(): + try: + saved_values[name] = target[name] + except KeyError: + saved_values[name] = non_existent_marker + target[name] = new_value + + try: + yield + finally: + # Restore original values in the target. + for name, original_value in saved_values.items(): + if original_value is non_existent_marker: + del target[name] + else: + assert isinstance(original_value, str) # for mypy + target[name] = original_value + + +@contextlib.contextmanager +def get_build_tracker() -> Generator[BuildTracker, None, None]: + root = os.environ.get("PIP_BUILD_TRACKER") + with contextlib.ExitStack() as ctx: + if root is None: + root = ctx.enter_context(TempDirectory(kind="build-tracker")).path + ctx.enter_context(update_env_context_manager(PIP_BUILD_TRACKER=root)) + logger.debug("Initialized build tracking at %s", root) + + with BuildTracker(root) as tracker: + yield tracker + + +class TrackerId(str): + """Uniquely identifying string provided to the build tracker.""" + + +class BuildTracker: + """Ensure that an sdist cannot request itself as a setup requirement. + + When an sdist is prepared, it identifies its setup requirements in the + context of ``BuildTracker.track()``. If a requirement shows up recursively, this + raises an exception. + + This stops fork bombs embedded in malicious packages.""" + + def __init__(self, root: str) -> None: + self._root = root + self._entries: dict[TrackerId, InstallRequirement] = {} + logger.debug("Created build tracker: %s", self._root) + + def __enter__(self) -> BuildTracker: + logger.debug("Entered build tracker: %s", self._root) + return self + + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_val: BaseException | None, + exc_tb: TracebackType | None, + ) -> None: + self.cleanup() + + def _entry_path(self, key: TrackerId) -> str: + hashed = hashlib.sha224(key.encode()).hexdigest() + return os.path.join(self._root, hashed) + + def add(self, req: InstallRequirement, key: TrackerId) -> None: + """Add an InstallRequirement to build tracking.""" + + # Get the file to write information about this requirement. + entry_path = self._entry_path(key) + + # Try reading from the file. If it exists and can be read from, a build + # is already in progress, so a LookupError is raised. + try: + with open(entry_path) as fp: + contents = fp.read() + except FileNotFoundError: + pass + else: + message = f"{req.link} is already being built: {contents}" + raise LookupError(message) + + # If we're here, req should really not be building already. + assert key not in self._entries + + # Start tracking this requirement. + with open(entry_path, "w", encoding="utf-8") as fp: + fp.write(str(req)) + self._entries[key] = req + + logger.debug("Added %s to build tracker %r", req, self._root) + + def remove(self, req: InstallRequirement, key: TrackerId) -> None: + """Remove an InstallRequirement from build tracking.""" + + # Delete the created file and the corresponding entry. + os.unlink(self._entry_path(key)) + del self._entries[key] + + logger.debug("Removed %s from build tracker %r", req, self._root) + + def cleanup(self) -> None: + for key, req in list(self._entries.items()): + self.remove(req, key) + + logger.debug("Removed build tracker: %r", self._root) + + @contextlib.contextmanager + def track(self, req: InstallRequirement, key: str) -> Generator[None, None, None]: + """Ensure that `key` cannot install itself as a setup requirement. + + :raises LookupError: If `key` was already provided in a parent invocation of + the context introduced by this method.""" + tracker_id = TrackerId(key) + self.add(req, tracker_id) + yield + self.remove(req, tracker_id) diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/operations/build/metadata.py b/.venv/lib/python3.12/site-packages/pip/_internal/operations/build/metadata.py new file mode 100644 index 0000000..a546809 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/operations/build/metadata.py @@ -0,0 +1,38 @@ +"""Metadata generation logic for source distributions.""" + +import os + +from pip._vendor.pyproject_hooks import BuildBackendHookCaller + +from pip._internal.build_env import BuildEnvironment +from pip._internal.exceptions import ( + InstallationSubprocessError, + MetadataGenerationFailed, +) +from pip._internal.utils.subprocess import runner_with_spinner_message +from pip._internal.utils.temp_dir import TempDirectory + + +def generate_metadata( + build_env: BuildEnvironment, backend: BuildBackendHookCaller, details: str +) -> str: + """Generate metadata using mechanisms described in PEP 517. + + Returns the generated metadata directory. + """ + metadata_tmpdir = TempDirectory(kind="modern-metadata", globally_managed=True) + + metadata_dir = metadata_tmpdir.path + + with build_env: + # Note that BuildBackendHookCaller implements a fallback for + # prepare_metadata_for_build_wheel, so we don't have to + # consider the possibility that this hook doesn't exist. + runner = runner_with_spinner_message("Preparing metadata (pyproject.toml)") + with backend.subprocess_runner(runner): + try: + distinfo_dir = backend.prepare_metadata_for_build_wheel(metadata_dir) + except InstallationSubprocessError as error: + raise MetadataGenerationFailed(package_details=details) from error + + return os.path.join(metadata_dir, distinfo_dir) diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/operations/build/metadata_editable.py b/.venv/lib/python3.12/site-packages/pip/_internal/operations/build/metadata_editable.py new file mode 100644 index 0000000..27ecd7d --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/operations/build/metadata_editable.py @@ -0,0 +1,41 @@ +"""Metadata generation logic for source distributions.""" + +import os + +from pip._vendor.pyproject_hooks import BuildBackendHookCaller + +from pip._internal.build_env import BuildEnvironment +from pip._internal.exceptions import ( + InstallationSubprocessError, + MetadataGenerationFailed, +) +from pip._internal.utils.subprocess import runner_with_spinner_message +from pip._internal.utils.temp_dir import TempDirectory + + +def generate_editable_metadata( + build_env: BuildEnvironment, backend: BuildBackendHookCaller, details: str +) -> str: + """Generate metadata using mechanisms described in PEP 660. + + Returns the generated metadata directory. + """ + metadata_tmpdir = TempDirectory(kind="modern-metadata", globally_managed=True) + + metadata_dir = metadata_tmpdir.path + + with build_env: + # Note that BuildBackendHookCaller implements a fallback for + # prepare_metadata_for_build_wheel/editable, so we don't have to + # consider the possibility that this hook doesn't exist. + runner = runner_with_spinner_message( + "Preparing editable metadata (pyproject.toml)" + ) + with backend.subprocess_runner(runner): + try: + distinfo_dir = backend.prepare_metadata_for_build_editable(metadata_dir) + except InstallationSubprocessError as error: + raise MetadataGenerationFailed(package_details=details) from error + + assert distinfo_dir is not None + return os.path.join(metadata_dir, distinfo_dir) diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/operations/build/metadata_legacy.py b/.venv/lib/python3.12/site-packages/pip/_internal/operations/build/metadata_legacy.py new file mode 100644 index 0000000..e385b5d --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/operations/build/metadata_legacy.py @@ -0,0 +1,73 @@ +"""Metadata generation logic for legacy source distributions.""" + +import logging +import os + +from pip._internal.build_env import BuildEnvironment +from pip._internal.cli.spinners import open_spinner +from pip._internal.exceptions import ( + InstallationError, + InstallationSubprocessError, + MetadataGenerationFailed, +) +from pip._internal.utils.setuptools_build import make_setuptools_egg_info_args +from pip._internal.utils.subprocess import call_subprocess +from pip._internal.utils.temp_dir import TempDirectory + +logger = logging.getLogger(__name__) + + +def _find_egg_info(directory: str) -> str: + """Find an .egg-info subdirectory in `directory`.""" + filenames = [f for f in os.listdir(directory) if f.endswith(".egg-info")] + + if not filenames: + raise InstallationError(f"No .egg-info directory found in {directory}") + + if len(filenames) > 1: + raise InstallationError( + f"More than one .egg-info directory found in {directory}" + ) + + return os.path.join(directory, filenames[0]) + + +def generate_metadata( + build_env: BuildEnvironment, + setup_py_path: str, + source_dir: str, + isolated: bool, + details: str, +) -> str: + """Generate metadata using setup.py-based defacto mechanisms. + + Returns the generated metadata directory. + """ + logger.debug( + "Running setup.py (path:%s) egg_info for package %s", + setup_py_path, + details, + ) + + egg_info_dir = TempDirectory(kind="pip-egg-info", globally_managed=True).path + + args = make_setuptools_egg_info_args( + setup_py_path, + egg_info_dir=egg_info_dir, + no_user_config=isolated, + ) + + with build_env: + with open_spinner("Preparing metadata (setup.py)") as spinner: + try: + call_subprocess( + args, + cwd=source_dir, + command_desc="python setup.py egg_info", + spinner=spinner, + ) + except InstallationSubprocessError as error: + raise MetadataGenerationFailed(package_details=details) from error + + # Return the .egg-info directory. + return _find_egg_info(egg_info_dir) diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/operations/build/wheel.py b/.venv/lib/python3.12/site-packages/pip/_internal/operations/build/wheel.py new file mode 100644 index 0000000..5e404c6 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/operations/build/wheel.py @@ -0,0 +1,38 @@ +from __future__ import annotations + +import logging +import os + +from pip._vendor.pyproject_hooks import BuildBackendHookCaller + +from pip._internal.utils.subprocess import runner_with_spinner_message + +logger = logging.getLogger(__name__) + + +def build_wheel_pep517( + name: str, + backend: BuildBackendHookCaller, + metadata_directory: str, + tempd: str, +) -> str | None: + """Build one InstallRequirement using the PEP 517 build process. + + Returns path to wheel if successfully built. Otherwise, returns None. + """ + assert metadata_directory is not None + try: + logger.debug("Destination directory: %s", tempd) + + runner = runner_with_spinner_message( + f"Building wheel for {name} (pyproject.toml)" + ) + with backend.subprocess_runner(runner): + wheel_name = backend.build_wheel( + tempd, + metadata_directory=metadata_directory, + ) + except Exception: + logger.error("Failed building wheel for %s", name) + return None + return os.path.join(tempd, wheel_name) diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/operations/build/wheel_editable.py b/.venv/lib/python3.12/site-packages/pip/_internal/operations/build/wheel_editable.py new file mode 100644 index 0000000..521bd55 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/operations/build/wheel_editable.py @@ -0,0 +1,47 @@ +from __future__ import annotations + +import logging +import os + +from pip._vendor.pyproject_hooks import BuildBackendHookCaller, HookMissing + +from pip._internal.utils.subprocess import runner_with_spinner_message + +logger = logging.getLogger(__name__) + + +def build_wheel_editable( + name: str, + backend: BuildBackendHookCaller, + metadata_directory: str, + tempd: str, +) -> str | None: + """Build one InstallRequirement using the PEP 660 build process. + + Returns path to wheel if successfully built. Otherwise, returns None. + """ + assert metadata_directory is not None + try: + logger.debug("Destination directory: %s", tempd) + + runner = runner_with_spinner_message( + f"Building editable for {name} (pyproject.toml)" + ) + with backend.subprocess_runner(runner): + try: + wheel_name = backend.build_editable( + tempd, + metadata_directory=metadata_directory, + ) + except HookMissing as e: + logger.error( + "Cannot build editable %s because the build " + "backend does not have the %s hook", + name, + e, + ) + return None + except Exception: + logger.error("Failed building editable for %s", name) + return None + return os.path.join(tempd, wheel_name) diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/operations/build/wheel_legacy.py b/.venv/lib/python3.12/site-packages/pip/_internal/operations/build/wheel_legacy.py new file mode 100644 index 0000000..02ef8e5 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/operations/build/wheel_legacy.py @@ -0,0 +1,119 @@ +from __future__ import annotations + +import logging +import os.path + +from pip._internal.cli.spinners import open_spinner +from pip._internal.utils.deprecation import deprecated +from pip._internal.utils.setuptools_build import make_setuptools_bdist_wheel_args +from pip._internal.utils.subprocess import call_subprocess, format_command_args + +logger = logging.getLogger(__name__) + + +def format_command_result( + command_args: list[str], + command_output: str, +) -> str: + """Format command information for logging.""" + command_desc = format_command_args(command_args) + text = f"Command arguments: {command_desc}\n" + + if not command_output: + text += "Command output: None" + elif logger.getEffectiveLevel() > logging.DEBUG: + text += "Command output: [use --verbose to show]" + else: + if not command_output.endswith("\n"): + command_output += "\n" + text += f"Command output:\n{command_output}" + + return text + + +def get_legacy_build_wheel_path( + names: list[str], + temp_dir: str, + name: str, + command_args: list[str], + command_output: str, +) -> str | None: + """Return the path to the wheel in the temporary build directory.""" + # Sort for determinism. + names = sorted(names) + if not names: + msg = f"Legacy build of wheel for {name!r} created no files.\n" + msg += format_command_result(command_args, command_output) + logger.warning(msg) + return None + + if len(names) > 1: + msg = ( + f"Legacy build of wheel for {name!r} created more than one file.\n" + f"Filenames (choosing first): {names}\n" + ) + msg += format_command_result(command_args, command_output) + logger.warning(msg) + + return os.path.join(temp_dir, names[0]) + + +def build_wheel_legacy( + name: str, + setup_py_path: str, + source_dir: str, + global_options: list[str], + build_options: list[str], + tempd: str, +) -> str | None: + """Build one unpacked package using the "legacy" build process. + + Returns path to wheel if successfully built. Otherwise, returns None. + """ + deprecated( + reason=( + f"Building {name!r} using the legacy setup.py bdist_wheel mechanism, " + "which will be removed in a future version." + ), + replacement=( + "to use the standardized build interface by " + "setting the `--use-pep517` option, " + "(possibly combined with `--no-build-isolation`), " + f"or adding a `pyproject.toml` file to the source tree of {name!r}" + ), + gone_in="25.3", + issue=6334, + ) + + wheel_args = make_setuptools_bdist_wheel_args( + setup_py_path, + global_options=global_options, + build_options=build_options, + destination_dir=tempd, + ) + + spin_message = f"Building wheel for {name} (setup.py)" + with open_spinner(spin_message) as spinner: + logger.debug("Destination directory: %s", tempd) + + try: + output = call_subprocess( + wheel_args, + command_desc="python setup.py bdist_wheel", + cwd=source_dir, + spinner=spinner, + ) + except Exception: + spinner.finish("error") + logger.error("Failed building wheel for %s", name) + return None + + names = os.listdir(tempd) + wheel_path = get_legacy_build_wheel_path( + names=names, + temp_dir=tempd, + name=name, + command_args=wheel_args, + command_output=output, + ) + return wheel_path diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/operations/check.py b/.venv/lib/python3.12/site-packages/pip/_internal/operations/check.py new file mode 100644 index 0000000..2d71fa5 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/operations/check.py @@ -0,0 +1,175 @@ +"""Validation of dependencies of packages""" + +from __future__ import annotations + +import logging +from collections.abc import Generator, Iterable +from contextlib import suppress +from email.parser import Parser +from functools import reduce +from typing import ( + Callable, + NamedTuple, +) + +from pip._vendor.packaging.requirements import Requirement +from pip._vendor.packaging.tags import Tag, parse_tag +from pip._vendor.packaging.utils import NormalizedName, canonicalize_name +from pip._vendor.packaging.version import Version + +from pip._internal.distributions import make_distribution_for_install_requirement +from pip._internal.metadata import get_default_environment +from pip._internal.metadata.base import BaseDistribution +from pip._internal.req.req_install import InstallRequirement + +logger = logging.getLogger(__name__) + + +class PackageDetails(NamedTuple): + version: Version + dependencies: list[Requirement] + + +# Shorthands +PackageSet = dict[NormalizedName, PackageDetails] +Missing = tuple[NormalizedName, Requirement] +Conflicting = tuple[NormalizedName, Version, Requirement] + +MissingDict = dict[NormalizedName, list[Missing]] +ConflictingDict = dict[NormalizedName, list[Conflicting]] +CheckResult = tuple[MissingDict, ConflictingDict] +ConflictDetails = tuple[PackageSet, CheckResult] + + +def create_package_set_from_installed() -> tuple[PackageSet, bool]: + """Converts a list of distributions into a PackageSet.""" + package_set = {} + problems = False + env = get_default_environment() + for dist in env.iter_installed_distributions(local_only=False, skip=()): + name = dist.canonical_name + try: + dependencies = list(dist.iter_dependencies()) + package_set[name] = PackageDetails(dist.version, dependencies) + except (OSError, ValueError) as e: + # Don't crash on unreadable or broken metadata. + logger.warning("Error parsing dependencies of %s: %s", name, e) + problems = True + return package_set, problems + + +def check_package_set( + package_set: PackageSet, should_ignore: Callable[[str], bool] | None = None +) -> CheckResult: + """Check if a package set is consistent + + If should_ignore is passed, it should be a callable that takes a + package name and returns a boolean. + """ + + missing = {} + conflicting = {} + + for package_name, package_detail in package_set.items(): + # Info about dependencies of package_name + missing_deps: set[Missing] = set() + conflicting_deps: set[Conflicting] = set() + + if should_ignore and should_ignore(package_name): + continue + + for req in package_detail.dependencies: + name = canonicalize_name(req.name) + + # Check if it's missing + if name not in package_set: + missed = True + if req.marker is not None: + missed = req.marker.evaluate({"extra": ""}) + if missed: + missing_deps.add((name, req)) + continue + + # Check if there's a conflict + version = package_set[name].version + if not req.specifier.contains(version, prereleases=True): + conflicting_deps.add((name, version, req)) + + if missing_deps: + missing[package_name] = sorted(missing_deps, key=str) + if conflicting_deps: + conflicting[package_name] = sorted(conflicting_deps, key=str) + + return missing, conflicting + + +def check_install_conflicts(to_install: list[InstallRequirement]) -> ConflictDetails: + """For checking if the dependency graph would be consistent after \ + installing given requirements + """ + # Start from the current state + package_set, _ = create_package_set_from_installed() + # Install packages + would_be_installed = _simulate_installation_of(to_install, package_set) + + # Only warn about directly-dependent packages; create a whitelist of them + whitelist = _create_whitelist(would_be_installed, package_set) + + return ( + package_set, + check_package_set( + package_set, should_ignore=lambda name: name not in whitelist + ), + ) + + +def check_unsupported( + packages: Iterable[BaseDistribution], + supported_tags: Iterable[Tag], +) -> Generator[BaseDistribution, None, None]: + for p in packages: + with suppress(FileNotFoundError): + wheel_file = p.read_text("WHEEL") + wheel_tags: frozenset[Tag] = reduce( + frozenset.union, + map(parse_tag, Parser().parsestr(wheel_file).get_all("Tag", [])), + frozenset(), + ) + if wheel_tags.isdisjoint(supported_tags): + yield p + + +def _simulate_installation_of( + to_install: list[InstallRequirement], package_set: PackageSet +) -> set[NormalizedName]: + """Computes the version of packages after installing to_install.""" + # Keep track of packages that were installed + installed = set() + + # Modify it as installing requirement_set would (assuming no errors) + for inst_req in to_install: + abstract_dist = make_distribution_for_install_requirement(inst_req) + dist = abstract_dist.get_metadata_distribution() + name = dist.canonical_name + package_set[name] = PackageDetails(dist.version, list(dist.iter_dependencies())) + + installed.add(name) + + return installed + + +def _create_whitelist( + would_be_installed: set[NormalizedName], package_set: PackageSet +) -> set[NormalizedName]: + packages_affected = set(would_be_installed) + + for package_name in package_set: + if package_name in packages_affected: + continue + + for req in package_set[package_name].dependencies: + if canonicalize_name(req.name) in packages_affected: + packages_affected.add(package_name) + break + + return packages_affected diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/operations/freeze.py b/.venv/lib/python3.12/site-packages/pip/_internal/operations/freeze.py new file mode 100644 index 0000000..486a833 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/operations/freeze.py @@ -0,0 +1,259 @@ +from __future__ import annotations + +import collections +import logging +import os +from collections.abc import Container, Generator, Iterable +from dataclasses import dataclass, field +from typing import NamedTuple + +from pip._vendor.packaging.utils import NormalizedName, canonicalize_name +from pip._vendor.packaging.version import InvalidVersion + +from pip._internal.exceptions import BadCommand, InstallationError +from pip._internal.metadata import BaseDistribution, get_environment +from pip._internal.req.constructors import ( + install_req_from_editable, + install_req_from_line, +) +from pip._internal.req.req_file import COMMENT_RE +from pip._internal.utils.direct_url_helpers import direct_url_as_pep440_direct_reference + +logger = logging.getLogger(__name__) + + +class _EditableInfo(NamedTuple): + requirement: str + comments: list[str] + + +def freeze( + requirement: list[str] | None = None, + local_only: bool = False, + user_only: bool = False, + paths: list[str] | None = None, + isolated: bool = False, + exclude_editable: bool = False, + skip: Container[str] = (), +) -> Generator[str, None, None]: + installations: dict[str, FrozenRequirement] = {} + + dists = get_environment(paths).iter_installed_distributions( + local_only=local_only, + skip=(), + user_only=user_only, + ) + for dist in dists: + req = FrozenRequirement.from_dist(dist) + if exclude_editable and req.editable: + continue + installations[req.canonical_name] = req + + if requirement: + # the options that don't get turned into an InstallRequirement + # should only be emitted once, even if the same option is in multiple + # requirements files, so we need to keep track of what has been emitted + # so that we don't emit it again if it's seen again + emitted_options: set[str] = set() + # keep track of which files a requirement is in so that we can + # give an accurate warning if a requirement appears multiple times. + req_files: dict[str, list[str]] = collections.defaultdict(list) + for req_file_path in requirement: + with open(req_file_path) as req_file: + for line in req_file: + if ( + not line.strip() + or line.strip().startswith("#") + or line.startswith( + ( + "-r", + "--requirement", + "-f", + "--find-links", + "-i", + "--index-url", + "--pre", + "--trusted-host", + "--process-dependency-links", + "--extra-index-url", + "--use-feature", + ) + ) + ): + line = line.rstrip() + if line not in emitted_options: + emitted_options.add(line) + yield line + continue + + if line.startswith(("-e", "--editable")): + if line.startswith("-e"): + line = line[2:].strip() + else: + line = line[len("--editable") :].strip().lstrip("=") + line_req = install_req_from_editable( + line, + isolated=isolated, + ) + else: + line_req = install_req_from_line( + COMMENT_RE.sub("", line).strip(), + isolated=isolated, + ) + + if not line_req.name: + logger.info( + "Skipping line in requirement file [%s] because " + "it's not clear what it would install: %s", + req_file_path, + line.strip(), + ) + logger.info( + " (add #egg=PackageName to the URL to avoid" + " this warning)" + ) + else: + line_req_canonical_name = canonicalize_name(line_req.name) + if line_req_canonical_name not in installations: + # either it's not installed, or it is installed + # but has been processed already + if not req_files[line_req.name]: + logger.warning( + "Requirement file [%s] contains %s, but " + "package %r is not installed", + req_file_path, + COMMENT_RE.sub("", line).strip(), + line_req.name, + ) + else: + req_files[line_req.name].append(req_file_path) + else: + yield str(installations[line_req_canonical_name]).rstrip() + del installations[line_req_canonical_name] + req_files[line_req.name].append(req_file_path) + + # Warn about requirements that were included multiple times (in a + # single requirements file or in different requirements files). + for name, files in req_files.items(): + if len(files) > 1: + logger.warning( + "Requirement %s included multiple times [%s]", + name, + ", ".join(sorted(set(files))), + ) + + yield ("## The following requirements were added by pip freeze:") + for installation in sorted(installations.values(), key=lambda x: x.name.lower()): + if installation.canonical_name not in skip: + yield str(installation).rstrip() + + +def _format_as_name_version(dist: BaseDistribution) -> str: + try: + dist_version = dist.version + except InvalidVersion: + # legacy version + return f"{dist.raw_name}==={dist.raw_version}" + else: + return f"{dist.raw_name}=={dist_version}" + + +def _get_editable_info(dist: BaseDistribution) -> _EditableInfo: + """ + Compute and return values (req, comments) for use in + FrozenRequirement.from_dist(). + """ + editable_project_location = dist.editable_project_location + assert editable_project_location + location = os.path.normcase(os.path.abspath(editable_project_location)) + + from pip._internal.vcs import RemoteNotFoundError, RemoteNotValidError, vcs + + vcs_backend = vcs.get_backend_for_dir(location) + + if vcs_backend is None: + display = _format_as_name_version(dist) + logger.debug( + 'No VCS found for editable requirement "%s" in: %r', + display, + location, + ) + return _EditableInfo( + requirement=location, + comments=[f"# Editable install with no version control ({display})"], + ) + + vcs_name = type(vcs_backend).__name__ + + try: + req = vcs_backend.get_src_requirement(location, dist.raw_name) + except RemoteNotFoundError: + display = _format_as_name_version(dist) + return _EditableInfo( + requirement=location, + comments=[f"# Editable {vcs_name} install with no remote ({display})"], + ) + except RemoteNotValidError as ex: + display = _format_as_name_version(dist) + return _EditableInfo( + requirement=location, + comments=[ + f"# Editable {vcs_name} install ({display}) with either a deleted " + f"local remote or invalid URI:", + f"# '{ex.url}'", + ], + ) + except BadCommand: + logger.warning( + "cannot determine version of editable source in %s " + "(%s command not found in path)", + location, + vcs_backend.name, + ) + return _EditableInfo(requirement=location, comments=[]) + except InstallationError as exc: + logger.warning("Error when trying to get requirement for VCS system %s", exc) + else: + return _EditableInfo(requirement=req, comments=[]) + + logger.warning("Could not determine repository location of %s", location) + + return _EditableInfo( + requirement=location, + comments=["## !! Could not determine repository location"], + ) + + +@dataclass(frozen=True) +class FrozenRequirement: + name: str + req: str + editable: bool + comments: Iterable[str] = field(default_factory=tuple) + + @property + def canonical_name(self) -> NormalizedName: + return canonicalize_name(self.name) + + @classmethod + def from_dist(cls, dist: BaseDistribution) -> FrozenRequirement: + editable = dist.editable + if editable: + req, comments = _get_editable_info(dist) + else: + comments = [] + direct_url = dist.direct_url + if direct_url: + # if PEP 610 metadata is present, use it + req = direct_url_as_pep440_direct_reference(direct_url, dist.raw_name) + else: + # name==version requirement + req = _format_as_name_version(dist) + + return cls(dist.raw_name, req, editable, comments=comments) + + def __str__(self) -> str: + req = self.req + if self.editable: + req = f"-e {req}" + return "\n".join(list(self.comments) + [str(req)]) + "\n" diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/operations/install/__init__.py b/.venv/lib/python3.12/site-packages/pip/_internal/operations/install/__init__.py new file mode 100644 index 0000000..2645a4a --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/operations/install/__init__.py @@ -0,0 +1 @@ +"""For modules related to installing packages.""" diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/operations/install/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/operations/install/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..b15e75b Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/operations/install/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/operations/install/__pycache__/editable_legacy.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/operations/install/__pycache__/editable_legacy.cpython-312.pyc new file mode 100644 index 0000000..5fad6f5 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/operations/install/__pycache__/editable_legacy.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/operations/install/__pycache__/wheel.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/operations/install/__pycache__/wheel.cpython-312.pyc new file mode 100644 index 0000000..aa3e46c Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/operations/install/__pycache__/wheel.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/operations/install/editable_legacy.py b/.venv/lib/python3.12/site-packages/pip/_internal/operations/install/editable_legacy.py new file mode 100644 index 0000000..0603d3d --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/operations/install/editable_legacy.py @@ -0,0 +1,48 @@ +"""Legacy editable installation process, i.e. `setup.py develop`.""" + +from __future__ import annotations + +import logging +from collections.abc import Sequence + +from pip._internal.build_env import BuildEnvironment +from pip._internal.utils.logging import indent_log +from pip._internal.utils.setuptools_build import make_setuptools_develop_args +from pip._internal.utils.subprocess import call_subprocess + +logger = logging.getLogger(__name__) + + +def install_editable( + *, + global_options: Sequence[str], + prefix: str | None, + home: str | None, + use_user_site: bool, + name: str, + setup_py_path: str, + isolated: bool, + build_env: BuildEnvironment, + unpacked_source_directory: str, +) -> None: + """Install a package in editable mode. Most arguments are pass-through + to setuptools. + """ + logger.info("Running setup.py develop for %s", name) + + args = make_setuptools_develop_args( + setup_py_path, + global_options=global_options, + no_user_config=isolated, + prefix=prefix, + home=home, + use_user_site=use_user_site, + ) + + with indent_log(): + with build_env: + call_subprocess( + args, + command_desc="python setup.py develop", + cwd=unpacked_source_directory, + ) diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/operations/install/wheel.py b/.venv/lib/python3.12/site-packages/pip/_internal/operations/install/wheel.py new file mode 100644 index 0000000..2724f15 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/operations/install/wheel.py @@ -0,0 +1,746 @@ +"""Support for installing and building the "wheel" binary package format.""" + +from __future__ import annotations + +import collections +import compileall +import contextlib +import csv +import importlib +import logging +import os.path +import re +import shutil +import sys +import textwrap +import warnings +from base64 import urlsafe_b64encode +from collections.abc import Generator, Iterable, Iterator, Sequence +from email.message import Message +from itertools import chain, filterfalse, starmap +from typing import ( + IO, + Any, + BinaryIO, + Callable, + NewType, + Protocol, + Union, + cast, +) +from zipfile import ZipFile, ZipInfo + +from pip._vendor.distlib.scripts import ScriptMaker +from pip._vendor.distlib.util import get_export_entry +from pip._vendor.packaging.utils import canonicalize_name + +from pip._internal.exceptions import InstallationError +from pip._internal.locations import get_major_minor_version +from pip._internal.metadata import ( + BaseDistribution, + FilesystemWheel, + get_wheel_distribution, +) +from pip._internal.models.direct_url import DIRECT_URL_METADATA_NAME, DirectUrl +from pip._internal.models.scheme import SCHEME_KEYS, Scheme +from pip._internal.utils.filesystem import adjacent_tmp_file, replace +from pip._internal.utils.misc import StreamWrapper, ensure_dir, hash_file, partition +from pip._internal.utils.unpacking import ( + current_umask, + is_within_directory, + set_extracted_file_to_default_mode_plus_executable, + zip_item_is_executable, +) +from pip._internal.utils.wheel import parse_wheel + + +class File(Protocol): + src_record_path: RecordPath + dest_path: str + changed: bool + + def save(self) -> None: + pass + + +logger = logging.getLogger(__name__) + +RecordPath = NewType("RecordPath", str) +InstalledCSVRow = tuple[RecordPath, str, Union[int, str]] + + +def rehash(path: str, blocksize: int = 1 << 20) -> tuple[str, str]: + """Return (encoded_digest, length) for path using hashlib.sha256()""" + h, length = hash_file(path, blocksize) + digest = "sha256=" + urlsafe_b64encode(h.digest()).decode("latin1").rstrip("=") + return (digest, str(length)) + + +def csv_io_kwargs(mode: str) -> dict[str, Any]: + """Return keyword arguments to properly open a CSV file + in the given mode. + """ + return {"mode": mode, "newline": "", "encoding": "utf-8"} + + +def fix_script(path: str) -> bool: + """Replace #!python with #!/path/to/python + Return True if file was changed. + """ + # XXX RECORD hashes will need to be updated + assert os.path.isfile(path) + + with open(path, "rb") as script: + firstline = script.readline() + if not firstline.startswith(b"#!python"): + return False + exename = sys.executable.encode(sys.getfilesystemencoding()) + firstline = b"#!" + exename + os.linesep.encode("ascii") + rest = script.read() + with open(path, "wb") as script: + script.write(firstline) + script.write(rest) + return True + + +def wheel_root_is_purelib(metadata: Message) -> bool: + return metadata.get("Root-Is-Purelib", "").lower() == "true" + + +def get_entrypoints(dist: BaseDistribution) -> tuple[dict[str, str], dict[str, str]]: + console_scripts = {} + gui_scripts = {} + for entry_point in dist.iter_entry_points(): + if entry_point.group == "console_scripts": + console_scripts[entry_point.name] = entry_point.value + elif entry_point.group == "gui_scripts": + gui_scripts[entry_point.name] = entry_point.value + return console_scripts, gui_scripts + + +def message_about_scripts_not_on_PATH(scripts: Sequence[str]) -> str | None: + """Determine if any scripts are not on PATH and format a warning. + Returns a warning message if one or more scripts are not on PATH, + otherwise None. + """ + if not scripts: + return None + + # Group scripts by the path they were installed in + grouped_by_dir: dict[str, set[str]] = collections.defaultdict(set) + for destfile in scripts: + parent_dir = os.path.dirname(destfile) + script_name = os.path.basename(destfile) + grouped_by_dir[parent_dir].add(script_name) + + # We don't want to warn for directories that are on PATH. + not_warn_dirs = [ + os.path.normcase(os.path.normpath(i)).rstrip(os.sep) + for i in os.environ.get("PATH", "").split(os.pathsep) + ] + # If an executable sits with sys.executable, we don't warn for it. + # This covers the case of venv invocations without activating the venv. + not_warn_dirs.append( + os.path.normcase(os.path.normpath(os.path.dirname(sys.executable))) + ) + warn_for: dict[str, set[str]] = { + parent_dir: scripts + for parent_dir, scripts in grouped_by_dir.items() + if os.path.normcase(os.path.normpath(parent_dir)) not in not_warn_dirs + } + if not warn_for: + return None + + # Format a message + msg_lines = [] + for parent_dir, dir_scripts in warn_for.items(): + sorted_scripts: list[str] = sorted(dir_scripts) + if len(sorted_scripts) == 1: + start_text = f"script {sorted_scripts[0]} is" + else: + start_text = "scripts {} are".format( + ", ".join(sorted_scripts[:-1]) + " and " + sorted_scripts[-1] + ) + + msg_lines.append( + f"The {start_text} installed in '{parent_dir}' which is not on PATH." + ) + + last_line_fmt = ( + "Consider adding {} to PATH or, if you prefer " + "to suppress this warning, use --no-warn-script-location." + ) + if len(msg_lines) == 1: + msg_lines.append(last_line_fmt.format("this directory")) + else: + msg_lines.append(last_line_fmt.format("these directories")) + + # Add a note if any directory starts with ~ + warn_for_tilde = any( + i[0] == "~" for i in os.environ.get("PATH", "").split(os.pathsep) if i + ) + if warn_for_tilde: + tilde_warning_msg = ( + "NOTE: The current PATH contains path(s) starting with `~`, " + "which may not be expanded by all applications." + ) + msg_lines.append(tilde_warning_msg) + + # Returns the formatted multiline message + return "\n".join(msg_lines) + + +def _normalized_outrows( + outrows: Iterable[InstalledCSVRow], +) -> list[tuple[str, str, str]]: + """Normalize the given rows of a RECORD file. + + Items in each row are converted into str. Rows are then sorted to make + the value more predictable for tests. + + Each row is a 3-tuple (path, hash, size) and corresponds to a record of + a RECORD file (see PEP 376 and PEP 427 for details). For the rows + passed to this function, the size can be an integer as an int or string, + or the empty string. + """ + # Normally, there should only be one row per path, in which case the + # second and third elements don't come into play when sorting. + # However, in cases in the wild where a path might happen to occur twice, + # we don't want the sort operation to trigger an error (but still want + # determinism). Since the third element can be an int or string, we + # coerce each element to a string to avoid a TypeError in this case. + # For additional background, see-- + # https://github.com/pypa/pip/issues/5868 + return sorted( + (record_path, hash_, str(size)) for record_path, hash_, size in outrows + ) + + +def _record_to_fs_path(record_path: RecordPath, lib_dir: str) -> str: + return os.path.join(lib_dir, record_path) + + +def _fs_to_record_path(path: str, lib_dir: str) -> RecordPath: + # On Windows, do not handle relative paths if they belong to different + # logical disks + if os.path.splitdrive(path)[0].lower() == os.path.splitdrive(lib_dir)[0].lower(): + path = os.path.relpath(path, lib_dir) + + path = path.replace(os.path.sep, "/") + return cast("RecordPath", path) + + +def get_csv_rows_for_installed( + old_csv_rows: list[list[str]], + installed: dict[RecordPath, RecordPath], + changed: set[RecordPath], + generated: list[str], + lib_dir: str, +) -> list[InstalledCSVRow]: + """ + :param installed: A map from archive RECORD path to installation RECORD + path. + """ + installed_rows: list[InstalledCSVRow] = [] + for row in old_csv_rows: + if len(row) > 3: + logger.warning("RECORD line has more than three elements: %s", row) + old_record_path = cast("RecordPath", row[0]) + new_record_path = installed.pop(old_record_path, old_record_path) + if new_record_path in changed: + digest, length = rehash(_record_to_fs_path(new_record_path, lib_dir)) + else: + digest = row[1] if len(row) > 1 else "" + length = row[2] if len(row) > 2 else "" + installed_rows.append((new_record_path, digest, length)) + for f in generated: + path = _fs_to_record_path(f, lib_dir) + digest, length = rehash(f) + installed_rows.append((path, digest, length)) + return installed_rows + [ + (installed_record_path, "", "") for installed_record_path in installed.values() + ] + + +def get_console_script_specs(console: dict[str, str]) -> list[str]: + """ + Given the mapping from entrypoint name to callable, return the relevant + console script specs. + """ + # Don't mutate caller's version + console = console.copy() + + scripts_to_generate = [] + + # Special case pip and setuptools to generate versioned wrappers + # + # The issue is that some projects (specifically, pip and setuptools) use + # code in setup.py to create "versioned" entry points - pip2.7 on Python + # 2.7, pip3.3 on Python 3.3, etc. But these entry points are baked into + # the wheel metadata at build time, and so if the wheel is installed with + # a *different* version of Python the entry points will be wrong. The + # correct fix for this is to enhance the metadata to be able to describe + # such versioned entry points. + # Currently, projects using versioned entry points will either have + # incorrect versioned entry points, or they will not be able to distribute + # "universal" wheels (i.e., they will need a wheel per Python version). + # + # Because setuptools and pip are bundled with _ensurepip and virtualenv, + # we need to use universal wheels. As a workaround, we + # override the versioned entry points in the wheel and generate the + # correct ones. + # + # To add the level of hack in this section of code, in order to support + # ensurepip this code will look for an ``ENSUREPIP_OPTIONS`` environment + # variable which will control which version scripts get installed. + # + # ENSUREPIP_OPTIONS=altinstall + # - Only pipX.Y and easy_install-X.Y will be generated and installed + # ENSUREPIP_OPTIONS=install + # - pipX.Y, pipX, easy_install-X.Y will be generated and installed. Note + # that this option is technically if ENSUREPIP_OPTIONS is set and is + # not altinstall + # DEFAULT + # - The default behavior is to install pip, pipX, pipX.Y, easy_install + # and easy_install-X.Y. + pip_script = console.pop("pip", None) + if pip_script: + if "ENSUREPIP_OPTIONS" not in os.environ: + scripts_to_generate.append("pip = " + pip_script) + + if os.environ.get("ENSUREPIP_OPTIONS", "") != "altinstall": + scripts_to_generate.append(f"pip{sys.version_info[0]} = {pip_script}") + + scripts_to_generate.append(f"pip{get_major_minor_version()} = {pip_script}") + # Delete any other versioned pip entry points + pip_ep = [k for k in console if re.match(r"pip(\d+(\.\d+)?)?$", k)] + for k in pip_ep: + del console[k] + easy_install_script = console.pop("easy_install", None) + if easy_install_script: + if "ENSUREPIP_OPTIONS" not in os.environ: + scripts_to_generate.append("easy_install = " + easy_install_script) + + scripts_to_generate.append( + f"easy_install-{get_major_minor_version()} = {easy_install_script}" + ) + # Delete any other versioned easy_install entry points + easy_install_ep = [ + k for k in console if re.match(r"easy_install(-\d+\.\d+)?$", k) + ] + for k in easy_install_ep: + del console[k] + + # Generate the console entry points specified in the wheel + scripts_to_generate.extend(starmap("{} = {}".format, console.items())) + + return scripts_to_generate + + +class ZipBackedFile: + def __init__( + self, src_record_path: RecordPath, dest_path: str, zip_file: ZipFile + ) -> None: + self.src_record_path = src_record_path + self.dest_path = dest_path + self._zip_file = zip_file + self.changed = False + + def _getinfo(self) -> ZipInfo: + return self._zip_file.getinfo(self.src_record_path) + + def save(self) -> None: + # When we open the output file below, any existing file is truncated + # before we start writing the new contents. This is fine in most + # cases, but can cause a segfault if pip has loaded a shared + # object (e.g. from pyopenssl through its vendored urllib3) + # Since the shared object is mmap'd an attempt to call a + # symbol in it will then cause a segfault. Unlinking the file + # allows writing of new contents while allowing the process to + # continue to use the old copy. + if os.path.exists(self.dest_path): + os.unlink(self.dest_path) + + zipinfo = self._getinfo() + + # optimization: the file is created by open(), + # skip the decompression when there is 0 bytes to decompress. + with open(self.dest_path, "wb") as dest: + if zipinfo.file_size > 0: + with self._zip_file.open(zipinfo) as f: + blocksize = min(zipinfo.file_size, 1024 * 1024) + shutil.copyfileobj(f, dest, blocksize) + + if zip_item_is_executable(zipinfo): + set_extracted_file_to_default_mode_plus_executable(self.dest_path) + + +class ScriptFile: + def __init__(self, file: File) -> None: + self._file = file + self.src_record_path = self._file.src_record_path + self.dest_path = self._file.dest_path + self.changed = False + + def save(self) -> None: + self._file.save() + self.changed = fix_script(self.dest_path) + + +class MissingCallableSuffix(InstallationError): + def __init__(self, entry_point: str) -> None: + super().__init__( + f"Invalid script entry point: {entry_point} - A callable " + "suffix is required. See https://packaging.python.org/" + "specifications/entry-points/#use-for-scripts for more " + "information." + ) + + +def _raise_for_invalid_entrypoint(specification: str) -> None: + entry = get_export_entry(specification) + if entry is not None and entry.suffix is None: + raise MissingCallableSuffix(str(entry)) + + +class PipScriptMaker(ScriptMaker): + # Override distlib's default script template with one that + # doesn't import `re` module, allowing scripts to load faster. + script_template = textwrap.dedent( + """\ + import sys + from %(module)s import %(import_name)s + if __name__ == '__main__': + if sys.argv[0].endswith('.exe'): + sys.argv[0] = sys.argv[0][:-4] + sys.exit(%(func)s()) +""" + ) + + def make( + self, specification: str, options: dict[str, Any] | None = None + ) -> list[str]: + _raise_for_invalid_entrypoint(specification) + return super().make(specification, options) + + +def _install_wheel( # noqa: C901, PLR0915 function is too long + name: str, + wheel_zip: ZipFile, + wheel_path: str, + scheme: Scheme, + pycompile: bool = True, + warn_script_location: bool = True, + direct_url: DirectUrl | None = None, + requested: bool = False, +) -> None: + """Install a wheel. + + :param name: Name of the project to install + :param wheel_zip: open ZipFile for wheel being installed + :param scheme: Distutils scheme dictating the install directories + :param req_description: String used in place of the requirement, for + logging + :param pycompile: Whether to byte-compile installed Python files + :param warn_script_location: Whether to check that scripts are installed + into a directory on PATH + :raises UnsupportedWheel: + * when the directory holds an unpacked wheel with incompatible + Wheel-Version + * when the .dist-info dir does not match the wheel + """ + info_dir, metadata = parse_wheel(wheel_zip, name) + + if wheel_root_is_purelib(metadata): + lib_dir = scheme.purelib + else: + lib_dir = scheme.platlib + + # Record details of the files moved + # installed = files copied from the wheel to the destination + # changed = files changed while installing (scripts #! line typically) + # generated = files newly generated during the install (script wrappers) + installed: dict[RecordPath, RecordPath] = {} + changed: set[RecordPath] = set() + generated: list[str] = [] + + def record_installed( + srcfile: RecordPath, destfile: str, modified: bool = False + ) -> None: + """Map archive RECORD paths to installation RECORD paths.""" + newpath = _fs_to_record_path(destfile, lib_dir) + installed[srcfile] = newpath + if modified: + changed.add(newpath) + + def is_dir_path(path: RecordPath) -> bool: + return path.endswith("/") + + def assert_no_path_traversal(dest_dir_path: str, target_path: str) -> None: + if not is_within_directory(dest_dir_path, target_path): + message = ( + "The wheel {!r} has a file {!r} trying to install" + " outside the target directory {!r}" + ) + raise InstallationError( + message.format(wheel_path, target_path, dest_dir_path) + ) + + def root_scheme_file_maker( + zip_file: ZipFile, dest: str + ) -> Callable[[RecordPath], File]: + def make_root_scheme_file(record_path: RecordPath) -> File: + normed_path = os.path.normpath(record_path) + dest_path = os.path.join(dest, normed_path) + assert_no_path_traversal(dest, dest_path) + return ZipBackedFile(record_path, dest_path, zip_file) + + return make_root_scheme_file + + def data_scheme_file_maker( + zip_file: ZipFile, scheme: Scheme + ) -> Callable[[RecordPath], File]: + scheme_paths = {key: getattr(scheme, key) for key in SCHEME_KEYS} + + def make_data_scheme_file(record_path: RecordPath) -> File: + normed_path = os.path.normpath(record_path) + try: + _, scheme_key, dest_subpath = normed_path.split(os.path.sep, 2) + except ValueError: + message = ( + f"Unexpected file in {wheel_path}: {record_path!r}. .data directory" + " contents should be named like: '/'." + ) + raise InstallationError(message) + + try: + scheme_path = scheme_paths[scheme_key] + except KeyError: + valid_scheme_keys = ", ".join(sorted(scheme_paths)) + message = ( + f"Unknown scheme key used in {wheel_path}: {scheme_key} " + f"(for file {record_path!r}). .data directory contents " + f"should be in subdirectories named with a valid scheme " + f"key ({valid_scheme_keys})" + ) + raise InstallationError(message) + + dest_path = os.path.join(scheme_path, dest_subpath) + assert_no_path_traversal(scheme_path, dest_path) + return ZipBackedFile(record_path, dest_path, zip_file) + + return make_data_scheme_file + + def is_data_scheme_path(path: RecordPath) -> bool: + return path.split("/", 1)[0].endswith(".data") + + paths = cast(list[RecordPath], wheel_zip.namelist()) + file_paths = filterfalse(is_dir_path, paths) + root_scheme_paths, data_scheme_paths = partition(is_data_scheme_path, file_paths) + + make_root_scheme_file = root_scheme_file_maker(wheel_zip, lib_dir) + files: Iterator[File] = map(make_root_scheme_file, root_scheme_paths) + + def is_script_scheme_path(path: RecordPath) -> bool: + parts = path.split("/", 2) + return len(parts) > 2 and parts[0].endswith(".data") and parts[1] == "scripts" + + other_scheme_paths, script_scheme_paths = partition( + is_script_scheme_path, data_scheme_paths + ) + + make_data_scheme_file = data_scheme_file_maker(wheel_zip, scheme) + other_scheme_files = map(make_data_scheme_file, other_scheme_paths) + files = chain(files, other_scheme_files) + + # Get the defined entry points + distribution = get_wheel_distribution( + FilesystemWheel(wheel_path), + canonicalize_name(name), + ) + console, gui = get_entrypoints(distribution) + + def is_entrypoint_wrapper(file: File) -> bool: + # EP, EP.exe and EP-script.py are scripts generated for + # entry point EP by setuptools + path = file.dest_path + name = os.path.basename(path) + if name.lower().endswith(".exe"): + matchname = name[:-4] + elif name.lower().endswith("-script.py"): + matchname = name[:-10] + elif name.lower().endswith(".pya"): + matchname = name[:-4] + else: + matchname = name + # Ignore setuptools-generated scripts + return matchname in console or matchname in gui + + script_scheme_files: Iterator[File] = map( + make_data_scheme_file, script_scheme_paths + ) + script_scheme_files = filterfalse(is_entrypoint_wrapper, script_scheme_files) + script_scheme_files = map(ScriptFile, script_scheme_files) + files = chain(files, script_scheme_files) + + existing_parents = set() + for file in files: + # directory creation is lazy and after file filtering + # to ensure we don't install empty dirs; empty dirs can't be + # uninstalled. + parent_dir = os.path.dirname(file.dest_path) + if parent_dir not in existing_parents: + ensure_dir(parent_dir) + existing_parents.add(parent_dir) + file.save() + record_installed(file.src_record_path, file.dest_path, file.changed) + + def pyc_source_file_paths() -> Generator[str, None, None]: + # We de-duplicate installation paths, since there can be overlap (e.g. + # file in .data maps to same location as file in wheel root). + # Sorting installation paths makes it easier to reproduce and debug + # issues related to permissions on existing files. + for installed_path in sorted(set(installed.values())): + full_installed_path = os.path.join(lib_dir, installed_path) + if not os.path.isfile(full_installed_path): + continue + if not full_installed_path.endswith(".py"): + continue + yield full_installed_path + + def pyc_output_path(path: str) -> str: + """Return the path the pyc file would have been written to.""" + return importlib.util.cache_from_source(path) + + # Compile all of the pyc files for the installed files + if pycompile: + with contextlib.redirect_stdout( + StreamWrapper.from_stream(sys.stdout) + ) as stdout: + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + for path in pyc_source_file_paths(): + success = compileall.compile_file(path, force=True, quiet=True) + if success: + pyc_path = pyc_output_path(path) + assert os.path.exists(pyc_path) + pyc_record_path = cast( + "RecordPath", pyc_path.replace(os.path.sep, "/") + ) + record_installed(pyc_record_path, pyc_path) + logger.debug(stdout.getvalue()) + + maker = PipScriptMaker(None, scheme.scripts) + + # Ensure old scripts are overwritten. + # See https://github.com/pypa/pip/issues/1800 + maker.clobber = True + + # Ensure we don't generate any variants for scripts because this is almost + # never what somebody wants. + # See https://bitbucket.org/pypa/distlib/issue/35/ + maker.variants = {""} + + # This is required because otherwise distlib creates scripts that are not + # executable. + # See https://bitbucket.org/pypa/distlib/issue/32/ + maker.set_mode = True + + # Generate the console and GUI entry points specified in the wheel + scripts_to_generate = get_console_script_specs(console) + + gui_scripts_to_generate = list(starmap("{} = {}".format, gui.items())) + + generated_console_scripts = maker.make_multiple(scripts_to_generate) + generated.extend(generated_console_scripts) + + generated.extend(maker.make_multiple(gui_scripts_to_generate, {"gui": True})) + + if warn_script_location: + msg = message_about_scripts_not_on_PATH(generated_console_scripts) + if msg is not None: + logger.warning(msg) + + generated_file_mode = 0o666 & ~current_umask() + + @contextlib.contextmanager + def _generate_file(path: str, **kwargs: Any) -> Generator[BinaryIO, None, None]: + with adjacent_tmp_file(path, **kwargs) as f: + yield f + os.chmod(f.name, generated_file_mode) + replace(f.name, path) + + dest_info_dir = os.path.join(lib_dir, info_dir) + + # Record pip as the installer + installer_path = os.path.join(dest_info_dir, "INSTALLER") + with _generate_file(installer_path) as installer_file: + installer_file.write(b"pip\n") + generated.append(installer_path) + + # Record the PEP 610 direct URL reference + if direct_url is not None: + direct_url_path = os.path.join(dest_info_dir, DIRECT_URL_METADATA_NAME) + with _generate_file(direct_url_path) as direct_url_file: + direct_url_file.write(direct_url.to_json().encode("utf-8")) + generated.append(direct_url_path) + + # Record the REQUESTED file + if requested: + requested_path = os.path.join(dest_info_dir, "REQUESTED") + with open(requested_path, "wb"): + pass + generated.append(requested_path) + + record_text = distribution.read_text("RECORD") + record_rows = list(csv.reader(record_text.splitlines())) + + rows = get_csv_rows_for_installed( + record_rows, + installed=installed, + changed=changed, + generated=generated, + lib_dir=lib_dir, + ) + + # Record details of all files installed + record_path = os.path.join(dest_info_dir, "RECORD") + + with _generate_file(record_path, **csv_io_kwargs("w")) as record_file: + # Explicitly cast to typing.IO[str] as a workaround for the mypy error: + # "writer" has incompatible type "BinaryIO"; expected "_Writer" + writer = csv.writer(cast("IO[str]", record_file)) + writer.writerows(_normalized_outrows(rows)) + + +@contextlib.contextmanager +def req_error_context(req_description: str) -> Generator[None, None, None]: + try: + yield + except InstallationError as e: + message = f"For req: {req_description}. {e.args[0]}" + raise InstallationError(message) from e + + +def install_wheel( + name: str, + wheel_path: str, + scheme: Scheme, + req_description: str, + pycompile: bool = True, + warn_script_location: bool = True, + direct_url: DirectUrl | None = None, + requested: bool = False, +) -> None: + with ZipFile(wheel_path, allowZip64=True) as z: + with req_error_context(req_description): + _install_wheel( + name=name, + wheel_zip=z, + wheel_path=wheel_path, + scheme=scheme, + pycompile=pycompile, + warn_script_location=warn_script_location, + direct_url=direct_url, + requested=requested, + ) diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/operations/prepare.py b/.venv/lib/python3.12/site-packages/pip/_internal/operations/prepare.py new file mode 100644 index 0000000..00b1a33 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/operations/prepare.py @@ -0,0 +1,742 @@ +"""Prepares a distribution for installation""" + +# The following comment should be removed at some point in the future. +# mypy: strict-optional=False +from __future__ import annotations + +import mimetypes +import os +import shutil +from collections.abc import Iterable +from dataclasses import dataclass +from pathlib import Path +from typing import TYPE_CHECKING + +from pip._vendor.packaging.utils import canonicalize_name + +from pip._internal.build_env import BuildEnvironmentInstaller +from pip._internal.distributions import make_distribution_for_install_requirement +from pip._internal.distributions.installed import InstalledDistribution +from pip._internal.exceptions import ( + DirectoryUrlHashUnsupported, + HashMismatch, + HashUnpinned, + InstallationError, + MetadataInconsistent, + NetworkConnectionError, + VcsHashUnsupported, +) +from pip._internal.index.package_finder import PackageFinder +from pip._internal.metadata import BaseDistribution, get_metadata_distribution +from pip._internal.models.direct_url import ArchiveInfo +from pip._internal.models.link import Link +from pip._internal.models.wheel import Wheel +from pip._internal.network.download import Downloader +from pip._internal.network.lazy_wheel import ( + HTTPRangeRequestUnsupported, + dist_from_wheel_url, +) +from pip._internal.network.session import PipSession +from pip._internal.operations.build.build_tracker import BuildTracker +from pip._internal.req.req_install import InstallRequirement +from pip._internal.utils._log import getLogger +from pip._internal.utils.direct_url_helpers import ( + direct_url_for_editable, + direct_url_from_link, +) +from pip._internal.utils.hashes import Hashes, MissingHashes +from pip._internal.utils.logging import indent_log +from pip._internal.utils.misc import ( + display_path, + hash_file, + hide_url, + redact_auth_from_requirement, +) +from pip._internal.utils.temp_dir import TempDirectory +from pip._internal.utils.unpacking import unpack_file +from pip._internal.vcs import vcs + +if TYPE_CHECKING: + from pip._internal.cli.progress_bars import BarType + +logger = getLogger(__name__) + + +def _get_prepared_distribution( + req: InstallRequirement, + build_tracker: BuildTracker, + build_env_installer: BuildEnvironmentInstaller, + build_isolation: bool, + check_build_deps: bool, +) -> BaseDistribution: + """Prepare a distribution for installation.""" + abstract_dist = make_distribution_for_install_requirement(req) + tracker_id = abstract_dist.build_tracker_id + if tracker_id is not None: + with build_tracker.track(req, tracker_id): + abstract_dist.prepare_distribution_metadata( + build_env_installer, build_isolation, check_build_deps + ) + return abstract_dist.get_metadata_distribution() + + +def unpack_vcs_link(link: Link, location: str, verbosity: int) -> None: + vcs_backend = vcs.get_backend_for_scheme(link.scheme) + assert vcs_backend is not None + vcs_backend.unpack(location, url=hide_url(link.url), verbosity=verbosity) + + +@dataclass +class File: + path: str + content_type: str | None = None + + def __post_init__(self) -> None: + if self.content_type is None: + # Try to guess the file's MIME type. If the system MIME tables + # can't be loaded, give up. + try: + self.content_type = mimetypes.guess_type(self.path)[0] + except OSError: + pass + + +def get_http_url( + link: Link, + download: Downloader, + download_dir: str | None = None, + hashes: Hashes | None = None, +) -> File: + temp_dir = TempDirectory(kind="unpack", globally_managed=True) + # If a download dir is specified, is the file already downloaded there? + already_downloaded_path = None + if download_dir: + already_downloaded_path = _check_download_dir(link, download_dir, hashes) + + if already_downloaded_path: + from_path = already_downloaded_path + content_type = None + else: + # let's download to a tmp dir + from_path, content_type = download(link, temp_dir.path) + if hashes: + hashes.check_against_path(from_path) + + return File(from_path, content_type) + + +def get_file_url( + link: Link, download_dir: str | None = None, hashes: Hashes | None = None +) -> File: + """Get file and optionally check its hash.""" + # If a download dir is specified, is the file already there and valid? + already_downloaded_path = None + if download_dir: + already_downloaded_path = _check_download_dir(link, download_dir, hashes) + + if already_downloaded_path: + from_path = already_downloaded_path + else: + from_path = link.file_path + + # If --require-hashes is off, `hashes` is either empty, the + # link's embedded hash, or MissingHashes; it is required to + # match. If --require-hashes is on, we are satisfied by any + # hash in `hashes` matching: a URL-based or an option-based + # one; no internet-sourced hash will be in `hashes`. + if hashes: + hashes.check_against_path(from_path) + return File(from_path, None) + + +def unpack_url( + link: Link, + location: str, + download: Downloader, + verbosity: int, + download_dir: str | None = None, + hashes: Hashes | None = None, +) -> File | None: + """Unpack link into location, downloading if required. + + :param hashes: A Hashes object, one of whose embedded hashes must match, + or HashMismatch will be raised. If the Hashes is empty, no matches are + required, and unhashable types of requirements (like VCS ones, which + would ordinarily raise HashUnsupported) are allowed. + """ + # non-editable vcs urls + if link.is_vcs: + unpack_vcs_link(link, location, verbosity=verbosity) + return None + + assert not link.is_existing_dir() + + # file urls + if link.is_file: + file = get_file_url(link, download_dir, hashes=hashes) + + # http urls + else: + file = get_http_url( + link, + download, + download_dir, + hashes=hashes, + ) + + # unpack the archive to the build dir location. even when only downloading + # archives, they have to be unpacked to parse dependencies, except wheels + if not link.is_wheel: + unpack_file(file.path, location, file.content_type) + + return file + + +def _check_download_dir( + link: Link, + download_dir: str, + hashes: Hashes | None, + warn_on_hash_mismatch: bool = True, +) -> str | None: + """Check download_dir for previously downloaded file with correct hash + If a correct file is found return its path else None + """ + download_path = os.path.join(download_dir, link.filename) + + if not os.path.exists(download_path): + return None + + # If already downloaded, does its hash match? + logger.info("File was already downloaded %s", download_path) + if hashes: + try: + hashes.check_against_path(download_path) + except HashMismatch: + if warn_on_hash_mismatch: + logger.warning( + "Previously-downloaded file %s has bad hash. Re-downloading.", + download_path, + ) + os.unlink(download_path) + return None + return download_path + + +class RequirementPreparer: + """Prepares a Requirement""" + + def __init__( # noqa: PLR0913 (too many parameters) + self, + *, + build_dir: str, + download_dir: str | None, + src_dir: str, + build_isolation: bool, + build_isolation_installer: BuildEnvironmentInstaller, + check_build_deps: bool, + build_tracker: BuildTracker, + session: PipSession, + progress_bar: BarType, + finder: PackageFinder, + require_hashes: bool, + use_user_site: bool, + lazy_wheel: bool, + verbosity: int, + legacy_resolver: bool, + resume_retries: int, + ) -> None: + super().__init__() + + self.src_dir = src_dir + self.build_dir = build_dir + self.build_tracker = build_tracker + self._session = session + self._download = Downloader(session, progress_bar, resume_retries) + self.finder = finder + + # Where still-packed archives should be written to. If None, they are + # not saved, and are deleted immediately after unpacking. + self.download_dir = download_dir + + # Is build isolation allowed? + self.build_isolation = build_isolation + self.build_env_installer = build_isolation_installer + + # Should check build dependencies? + self.check_build_deps = check_build_deps + + # Should hash-checking be required? + self.require_hashes = require_hashes + + # Should install in user site-packages? + self.use_user_site = use_user_site + + # Should wheels be downloaded lazily? + self.use_lazy_wheel = lazy_wheel + + # How verbose should underlying tooling be? + self.verbosity = verbosity + + # Are we using the legacy resolver? + self.legacy_resolver = legacy_resolver + + # Memoized downloaded files, as mapping of url: path. + self._downloaded: dict[str, str] = {} + + # Previous "header" printed for a link-based InstallRequirement + self._previous_requirement_header = ("", "") + + def _log_preparing_link(self, req: InstallRequirement) -> None: + """Provide context for the requirement being prepared.""" + if req.link.is_file and not req.is_wheel_from_cache: + message = "Processing %s" + information = str(display_path(req.link.file_path)) + else: + message = "Collecting %s" + information = redact_auth_from_requirement(req.req) if req.req else str(req) + + # If we used req.req, inject requirement source if available (this + # would already be included if we used req directly) + if req.req and req.comes_from: + if isinstance(req.comes_from, str): + comes_from: str | None = req.comes_from + else: + comes_from = req.comes_from.from_path() + if comes_from: + information += f" (from {comes_from})" + + if (message, information) != self._previous_requirement_header: + self._previous_requirement_header = (message, information) + logger.info(message, information) + + if req.is_wheel_from_cache: + with indent_log(): + logger.info("Using cached %s", req.link.filename) + + def _ensure_link_req_src_dir( + self, req: InstallRequirement, parallel_builds: bool + ) -> None: + """Ensure source_dir of a linked InstallRequirement.""" + # Since source_dir is only set for editable requirements. + if req.link.is_wheel: + # We don't need to unpack wheels, so no need for a source + # directory. + return + assert req.source_dir is None + if req.link.is_existing_dir(): + # build local directories in-tree + req.source_dir = req.link.file_path + return + + # We always delete unpacked sdists after pip runs. + req.ensure_has_source_dir( + self.build_dir, + autodelete=True, + parallel_builds=parallel_builds, + ) + req.ensure_pristine_source_checkout() + + def _get_linked_req_hashes(self, req: InstallRequirement) -> Hashes: + # By the time this is called, the requirement's link should have + # been checked so we can tell what kind of requirements req is + # and raise some more informative errors than otherwise. + # (For example, we can raise VcsHashUnsupported for a VCS URL + # rather than HashMissing.) + if not self.require_hashes: + return req.hashes(trust_internet=True) + + # We could check these first 2 conditions inside unpack_url + # and save repetition of conditions, but then we would + # report less-useful error messages for unhashable + # requirements, complaining that there's no hash provided. + if req.link.is_vcs: + raise VcsHashUnsupported() + if req.link.is_existing_dir(): + raise DirectoryUrlHashUnsupported() + + # Unpinned packages are asking for trouble when a new version + # is uploaded. This isn't a security check, but it saves users + # a surprising hash mismatch in the future. + # file:/// URLs aren't pinnable, so don't complain about them + # not being pinned. + if not req.is_direct and not req.is_pinned: + raise HashUnpinned() + + # If known-good hashes are missing for this requirement, + # shim it with a facade object that will provoke hash + # computation and then raise a HashMissing exception + # showing the user what the hash should be. + return req.hashes(trust_internet=False) or MissingHashes() + + def _fetch_metadata_only( + self, + req: InstallRequirement, + ) -> BaseDistribution | None: + if self.legacy_resolver: + logger.debug( + "Metadata-only fetching is not used in the legacy resolver", + ) + return None + if self.require_hashes: + logger.debug( + "Metadata-only fetching is not used as hash checking is required", + ) + return None + # Try PEP 658 metadata first, then fall back to lazy wheel if unavailable. + return self._fetch_metadata_using_link_data_attr( + req + ) or self._fetch_metadata_using_lazy_wheel(req.link) + + def _fetch_metadata_using_link_data_attr( + self, + req: InstallRequirement, + ) -> BaseDistribution | None: + """Fetch metadata from the data-dist-info-metadata attribute, if possible.""" + # (1) Get the link to the metadata file, if provided by the backend. + metadata_link = req.link.metadata_link() + if metadata_link is None: + return None + assert req.req is not None + logger.verbose( + "Obtaining dependency information for %s from %s", + req.req, + metadata_link, + ) + # (2) Download the contents of the METADATA file, separate from the dist itself. + metadata_file = get_http_url( + metadata_link, + self._download, + hashes=metadata_link.as_hashes(), + ) + with open(metadata_file.path, "rb") as f: + metadata_contents = f.read() + # (3) Generate a dist just from those file contents. + metadata_dist = get_metadata_distribution( + metadata_contents, + req.link.filename, + req.req.name, + ) + # (4) Ensure the Name: field from the METADATA file matches the name from the + # install requirement. + # + # NB: raw_name will fall back to the name from the install requirement if + # the Name: field is not present, but it's noted in the raw_name docstring + # that that should NEVER happen anyway. + if canonicalize_name(metadata_dist.raw_name) != canonicalize_name(req.req.name): + raise MetadataInconsistent( + req, "Name", req.req.name, metadata_dist.raw_name + ) + return metadata_dist + + def _fetch_metadata_using_lazy_wheel( + self, + link: Link, + ) -> BaseDistribution | None: + """Fetch metadata using lazy wheel, if possible.""" + # --use-feature=fast-deps must be provided. + if not self.use_lazy_wheel: + return None + if link.is_file or not link.is_wheel: + logger.debug( + "Lazy wheel is not used as %r does not point to a remote wheel", + link, + ) + return None + + wheel = Wheel(link.filename) + name = canonicalize_name(wheel.name) + logger.info( + "Obtaining dependency information from %s %s", + name, + wheel.version, + ) + url = link.url.split("#", 1)[0] + try: + return dist_from_wheel_url(name, url, self._session) + except HTTPRangeRequestUnsupported: + logger.debug("%s does not support range requests", url) + return None + + def _complete_partial_requirements( + self, + partially_downloaded_reqs: Iterable[InstallRequirement], + parallel_builds: bool = False, + ) -> None: + """Download any requirements which were only fetched by metadata.""" + # Download to a temporary directory. These will be copied over as + # needed for downstream 'download', 'wheel', and 'install' commands. + temp_dir = TempDirectory(kind="unpack", globally_managed=True).path + + # Map each link to the requirement that owns it. This allows us to set + # `req.local_file_path` on the appropriate requirement after passing + # all the links at once into BatchDownloader. + links_to_fully_download: dict[Link, InstallRequirement] = {} + for req in partially_downloaded_reqs: + assert req.link + links_to_fully_download[req.link] = req + + batch_download = self._download.batch(links_to_fully_download.keys(), temp_dir) + for link, (filepath, _) in batch_download: + logger.debug("Downloading link %s to %s", link, filepath) + req = links_to_fully_download[link] + # Record the downloaded file path so wheel reqs can extract a Distribution + # in .get_dist(). + req.local_file_path = filepath + # Record that the file is downloaded so we don't do it again in + # _prepare_linked_requirement(). + self._downloaded[req.link.url] = filepath + + # If this is an sdist, we need to unpack it after downloading, but the + # .source_dir won't be set up until we are in _prepare_linked_requirement(). + # Add the downloaded archive to the install requirement to unpack after + # preparing the source dir. + if not req.is_wheel: + req.needs_unpacked_archive(Path(filepath)) + + # This step is necessary to ensure all lazy wheels are processed + # successfully by the 'download', 'wheel', and 'install' commands. + for req in partially_downloaded_reqs: + self._prepare_linked_requirement(req, parallel_builds) + + def prepare_linked_requirement( + self, req: InstallRequirement, parallel_builds: bool = False + ) -> BaseDistribution: + """Prepare a requirement to be obtained from req.link.""" + assert req.link + self._log_preparing_link(req) + with indent_log(): + # Check if the relevant file is already available + # in the download directory + file_path = None + if self.download_dir is not None and req.link.is_wheel: + hashes = self._get_linked_req_hashes(req) + file_path = _check_download_dir( + req.link, + self.download_dir, + hashes, + # When a locally built wheel has been found in cache, we don't warn + # about re-downloading when the already downloaded wheel hash does + # not match. This is because the hash must be checked against the + # original link, not the cached link. It that case the already + # downloaded file will be removed and re-fetched from cache (which + # implies a hash check against the cache entry's origin.json). + warn_on_hash_mismatch=not req.is_wheel_from_cache, + ) + + if file_path is not None: + # The file is already available, so mark it as downloaded + self._downloaded[req.link.url] = file_path + else: + # The file is not available, attempt to fetch only metadata + metadata_dist = self._fetch_metadata_only(req) + if metadata_dist is not None: + req.needs_more_preparation = True + return metadata_dist + + # None of the optimizations worked, fully prepare the requirement + return self._prepare_linked_requirement(req, parallel_builds) + + def prepare_linked_requirements_more( + self, reqs: Iterable[InstallRequirement], parallel_builds: bool = False + ) -> None: + """Prepare linked requirements more, if needed.""" + reqs = [req for req in reqs if req.needs_more_preparation] + for req in reqs: + # Determine if any of these requirements were already downloaded. + if self.download_dir is not None and req.link.is_wheel: + hashes = self._get_linked_req_hashes(req) + file_path = _check_download_dir(req.link, self.download_dir, hashes) + if file_path is not None: + self._downloaded[req.link.url] = file_path + req.needs_more_preparation = False + + # Prepare requirements we found were already downloaded for some + # reason. The other downloads will be completed separately. + partially_downloaded_reqs: list[InstallRequirement] = [] + for req in reqs: + if req.needs_more_preparation: + partially_downloaded_reqs.append(req) + else: + self._prepare_linked_requirement(req, parallel_builds) + + # TODO: separate this part out from RequirementPreparer when the v1 + # resolver can be removed! + self._complete_partial_requirements( + partially_downloaded_reqs, + parallel_builds=parallel_builds, + ) + + def _prepare_linked_requirement( + self, req: InstallRequirement, parallel_builds: bool + ) -> BaseDistribution: + assert req.link + link = req.link + + hashes = self._get_linked_req_hashes(req) + + if hashes and req.is_wheel_from_cache: + assert req.download_info is not None + assert link.is_wheel + assert link.is_file + # We need to verify hashes, and we have found the requirement in the cache + # of locally built wheels. + if ( + isinstance(req.download_info.info, ArchiveInfo) + and req.download_info.info.hashes + and hashes.has_one_of(req.download_info.info.hashes) + ): + # At this point we know the requirement was built from a hashable source + # artifact, and we verified that the cache entry's hash of the original + # artifact matches one of the hashes we expect. We don't verify hashes + # against the cached wheel, because the wheel is not the original. + hashes = None + else: + logger.warning( + "The hashes of the source archive found in cache entry " + "don't match, ignoring cached built wheel " + "and re-downloading source." + ) + req.link = req.cached_wheel_source_link + link = req.link + + self._ensure_link_req_src_dir(req, parallel_builds) + + if link.is_existing_dir(): + local_file = None + elif link.url not in self._downloaded: + try: + local_file = unpack_url( + link, + req.source_dir, + self._download, + self.verbosity, + self.download_dir, + hashes, + ) + except NetworkConnectionError as exc: + raise InstallationError( + f"Could not install requirement {req} because of HTTP " + f"error {exc} for URL {link}" + ) + else: + file_path = self._downloaded[link.url] + if hashes: + hashes.check_against_path(file_path) + local_file = File(file_path, content_type=None) + + # If download_info is set, we got it from the wheel cache. + if req.download_info is None: + # Editables don't go through this function (see + # prepare_editable_requirement). + assert not req.editable + req.download_info = direct_url_from_link(link, req.source_dir) + # Make sure we have a hash in download_info. If we got it as part of the + # URL, it will have been verified and we can rely on it. Otherwise we + # compute it from the downloaded file. + # FIXME: https://github.com/pypa/pip/issues/11943 + if ( + isinstance(req.download_info.info, ArchiveInfo) + and not req.download_info.info.hashes + and local_file + ): + hash = hash_file(local_file.path)[0].hexdigest() + # We populate info.hash for backward compatibility. + # This will automatically populate info.hashes. + req.download_info.info.hash = f"sha256={hash}" + + # For use in later processing, + # preserve the file path on the requirement. + if local_file: + req.local_file_path = local_file.path + + dist = _get_prepared_distribution( + req, + self.build_tracker, + self.build_env_installer, + self.build_isolation, + self.check_build_deps, + ) + return dist + + def save_linked_requirement(self, req: InstallRequirement) -> None: + assert self.download_dir is not None + assert req.link is not None + link = req.link + if link.is_vcs or (link.is_existing_dir() and req.editable): + # Make a .zip of the source_dir we already created. + req.archive(self.download_dir) + return + + if link.is_existing_dir(): + logger.debug( + "Not copying link to destination directory " + "since it is a directory: %s", + link, + ) + return + if req.local_file_path is None: + # No distribution was downloaded for this requirement. + return + + download_location = os.path.join(self.download_dir, link.filename) + if not os.path.exists(download_location): + shutil.copy(req.local_file_path, download_location) + download_path = display_path(download_location) + logger.info("Saved %s", download_path) + + def prepare_editable_requirement( + self, + req: InstallRequirement, + ) -> BaseDistribution: + """Prepare an editable requirement.""" + assert req.editable, "cannot prepare a non-editable req as editable" + + logger.info("Obtaining %s", req) + + with indent_log(): + if self.require_hashes: + raise InstallationError( + f"The editable requirement {req} cannot be installed when " + "requiring hashes, because there is no single file to " + "hash." + ) + req.ensure_has_source_dir(self.src_dir) + req.update_editable() + assert req.source_dir + req.download_info = direct_url_for_editable(req.unpacked_source_directory) + + dist = _get_prepared_distribution( + req, + self.build_tracker, + self.build_env_installer, + self.build_isolation, + self.check_build_deps, + ) + + req.check_if_exists(self.use_user_site) + + return dist + + def prepare_installed_requirement( + self, + req: InstallRequirement, + skip_reason: str, + ) -> BaseDistribution: + """Prepare an already-installed requirement.""" + assert req.satisfied_by, "req should have been satisfied but isn't" + assert skip_reason is not None, ( + "did not get skip reason skipped but req.satisfied_by " + f"is set to {req.satisfied_by}" + ) + logger.info( + "Requirement %s: %s (%s)", skip_reason, req, req.satisfied_by.version + ) + with indent_log(): + if self.require_hashes: + logger.debug( + "Since it is already installed, we are trusting this " + "package without checking its hash. To ensure a " + "completely repeatable environment, install into an " + "empty virtualenv." + ) + return InstalledDistribution(req).get_metadata_distribution() diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/pyproject.py b/.venv/lib/python3.12/site-packages/pip/_internal/pyproject.py new file mode 100644 index 0000000..5c08b60 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/pyproject.py @@ -0,0 +1,182 @@ +from __future__ import annotations + +import importlib.util +import os +from collections import namedtuple +from typing import Any + +from pip._vendor.packaging.requirements import InvalidRequirement + +from pip._internal.exceptions import ( + InstallationError, + InvalidPyProjectBuildRequires, + MissingPyProjectBuildRequires, +) +from pip._internal.utils.compat import tomllib +from pip._internal.utils.packaging import get_requirement + + +def _is_list_of_str(obj: Any) -> bool: + return isinstance(obj, list) and all(isinstance(item, str) for item in obj) + + +def make_pyproject_path(unpacked_source_directory: str) -> str: + return os.path.join(unpacked_source_directory, "pyproject.toml") + + +BuildSystemDetails = namedtuple( + "BuildSystemDetails", ["requires", "backend", "check", "backend_path"] +) + + +def load_pyproject_toml( + use_pep517: bool | None, pyproject_toml: str, setup_py: str, req_name: str +) -> BuildSystemDetails | None: + """Load the pyproject.toml file. + + Parameters: + use_pep517 - Has the user requested PEP 517 processing? None + means the user hasn't explicitly specified. + pyproject_toml - Location of the project's pyproject.toml file + setup_py - Location of the project's setup.py file + req_name - The name of the requirement we're processing (for + error reporting) + + Returns: + None if we should use the legacy code path, otherwise a tuple + ( + requirements from pyproject.toml, + name of PEP 517 backend, + requirements we should check are installed after setting + up the build environment + directory paths to import the backend from (backend-path), + relative to the project root. + ) + """ + has_pyproject = os.path.isfile(pyproject_toml) + has_setup = os.path.isfile(setup_py) + + if not has_pyproject and not has_setup: + raise InstallationError( + f"{req_name} does not appear to be a Python project: " + f"neither 'setup.py' nor 'pyproject.toml' found." + ) + + if has_pyproject: + with open(pyproject_toml, encoding="utf-8") as f: + pp_toml = tomllib.loads(f.read()) + build_system = pp_toml.get("build-system") + else: + build_system = None + + # The following cases must use PEP 517 + # We check for use_pep517 being non-None and falsy because that means + # the user explicitly requested --no-use-pep517. The value 0 as + # opposed to False can occur when the value is provided via an + # environment variable or config file option (due to the quirk of + # strtobool() returning an integer in pip's configuration code). + if has_pyproject and not has_setup: + if use_pep517 is not None and not use_pep517: + raise InstallationError( + "Disabling PEP 517 processing is invalid: " + "project does not have a setup.py" + ) + use_pep517 = True + elif build_system and "build-backend" in build_system: + if use_pep517 is not None and not use_pep517: + raise InstallationError( + "Disabling PEP 517 processing is invalid: " + "project specifies a build backend of {} " + "in pyproject.toml".format(build_system["build-backend"]) + ) + use_pep517 = True + + # If we haven't worked out whether to use PEP 517 yet, + # and the user hasn't explicitly stated a preference, + # we do so if the project has a pyproject.toml file + # or if we cannot import setuptools or wheels. + + # We fallback to PEP 517 when without setuptools or without the wheel package, + # so setuptools can be installed as a default build backend. + # For more info see: + # https://discuss.python.org/t/pip-without-setuptools-could-the-experience-be-improved/11810/9 + # https://github.com/pypa/pip/issues/8559 + elif use_pep517 is None: + use_pep517 = ( + has_pyproject + or not importlib.util.find_spec("setuptools") + or not importlib.util.find_spec("wheel") + ) + + # At this point, we know whether we're going to use PEP 517. + assert use_pep517 is not None + + # If we're using the legacy code path, there is nothing further + # for us to do here. + if not use_pep517: + return None + + if build_system is None: + # Either the user has a pyproject.toml with no build-system + # section, or the user has no pyproject.toml, but has opted in + # explicitly via --use-pep517. + # In the absence of any explicit backend specification, we + # assume the setuptools backend that most closely emulates the + # traditional direct setup.py execution, and require wheel and + # a version of setuptools that supports that backend. + + build_system = { + "requires": ["setuptools>=40.8.0"], + "build-backend": "setuptools.build_meta:__legacy__", + } + + # If we're using PEP 517, we have build system information (either + # from pyproject.toml, or defaulted by the code above). + # Note that at this point, we do not know if the user has actually + # specified a backend, though. + assert build_system is not None + + # Ensure that the build-system section in pyproject.toml conforms + # to PEP 518. + + # Specifying the build-system table but not the requires key is invalid + if "requires" not in build_system: + raise MissingPyProjectBuildRequires(package=req_name) + + # Error out if requires is not a list of strings + requires = build_system["requires"] + if not _is_list_of_str(requires): + raise InvalidPyProjectBuildRequires( + package=req_name, + reason="It is not a list of strings.", + ) + + # Each requirement must be valid as per PEP 508 + for requirement in requires: + try: + get_requirement(requirement) + except InvalidRequirement as error: + raise InvalidPyProjectBuildRequires( + package=req_name, + reason=f"It contains an invalid requirement: {requirement!r}", + ) from error + + backend = build_system.get("build-backend") + backend_path = build_system.get("backend-path", []) + check: list[str] = [] + if backend is None: + # If the user didn't specify a backend, we assume they want to use + # the setuptools backend. But we can't be sure they have included + # a version of setuptools which supplies the backend. So we + # make a note to check that this requirement is present once + # we have set up the environment. + # This is quite a lot of work to check for a very specific case. But + # the problem is, that case is potentially quite common - projects that + # adopted PEP 518 early for the ability to specify requirements to + # execute setup.py, but never considered needing to mention the build + # tools themselves. The original PEP 518 code had a similar check (but + # implemented in a different way). + backend = "setuptools.build_meta:__legacy__" + check = ["setuptools>=40.8.0"] + + return BuildSystemDetails(requires, backend, check, backend_path) diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/req/__init__.py b/.venv/lib/python3.12/site-packages/pip/_internal/req/__init__.py new file mode 100644 index 0000000..e5050ee --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/req/__init__.py @@ -0,0 +1,105 @@ +from __future__ import annotations + +import collections +import logging +from collections.abc import Generator, Sequence +from dataclasses import dataclass + +from pip._internal.cli.progress_bars import BarType, get_install_progress_renderer +from pip._internal.utils.logging import indent_log + +from .req_file import parse_requirements +from .req_install import InstallRequirement +from .req_set import RequirementSet + +__all__ = [ + "RequirementSet", + "InstallRequirement", + "parse_requirements", + "install_given_reqs", +] + +logger = logging.getLogger(__name__) + + +@dataclass(frozen=True) +class InstallationResult: + name: str + + +def _validate_requirements( + requirements: list[InstallRequirement], +) -> Generator[tuple[str, InstallRequirement], None, None]: + for req in requirements: + assert req.name, f"invalid to-be-installed requirement: {req}" + yield req.name, req + + +def install_given_reqs( + requirements: list[InstallRequirement], + global_options: Sequence[str], + root: str | None, + home: str | None, + prefix: str | None, + warn_script_location: bool, + use_user_site: bool, + pycompile: bool, + progress_bar: BarType, +) -> list[InstallationResult]: + """ + Install everything in the given list. + + (to be called after having downloaded and unpacked the packages) + """ + to_install = collections.OrderedDict(_validate_requirements(requirements)) + + if to_install: + logger.info( + "Installing collected packages: %s", + ", ".join(to_install.keys()), + ) + + installed = [] + + show_progress = logger.isEnabledFor(logging.INFO) and len(to_install) > 1 + + items = iter(to_install.values()) + if show_progress: + renderer = get_install_progress_renderer( + bar_type=progress_bar, total=len(to_install) + ) + items = renderer(items) + + with indent_log(): + for requirement in items: + req_name = requirement.name + assert req_name is not None + if requirement.should_reinstall: + logger.info("Attempting uninstall: %s", req_name) + with indent_log(): + uninstalled_pathset = requirement.uninstall(auto_confirm=True) + else: + uninstalled_pathset = None + + try: + requirement.install( + global_options, + root=root, + home=home, + prefix=prefix, + warn_script_location=warn_script_location, + use_user_site=use_user_site, + pycompile=pycompile, + ) + except Exception: + # if install did not succeed, rollback previous uninstall + if uninstalled_pathset and not requirement.install_succeeded: + uninstalled_pathset.rollback() + raise + else: + if uninstalled_pathset and requirement.install_succeeded: + uninstalled_pathset.commit() + + installed.append(InstallationResult(req_name)) + + return installed diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/req/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/req/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..c022fa7 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/req/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/req/__pycache__/constructors.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/req/__pycache__/constructors.cpython-312.pyc new file mode 100644 index 0000000..33a1f2e Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/req/__pycache__/constructors.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/req/__pycache__/req_dependency_group.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/req/__pycache__/req_dependency_group.cpython-312.pyc new file mode 100644 index 0000000..479fbef Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/req/__pycache__/req_dependency_group.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/req/__pycache__/req_file.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/req/__pycache__/req_file.cpython-312.pyc new file mode 100644 index 0000000..e23d6f1 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/req/__pycache__/req_file.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/req/__pycache__/req_install.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/req/__pycache__/req_install.cpython-312.pyc new file mode 100644 index 0000000..a8b5e51 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/req/__pycache__/req_install.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/req/__pycache__/req_set.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/req/__pycache__/req_set.cpython-312.pyc new file mode 100644 index 0000000..d619cf6 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/req/__pycache__/req_set.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/req/__pycache__/req_uninstall.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/req/__pycache__/req_uninstall.cpython-312.pyc new file mode 100644 index 0000000..45caeb0 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/req/__pycache__/req_uninstall.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/req/constructors.py b/.venv/lib/python3.12/site-packages/pip/_internal/req/constructors.py new file mode 100644 index 0000000..056e7e3 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/req/constructors.py @@ -0,0 +1,562 @@ +"""Backing implementation for InstallRequirement's various constructors + +The idea here is that these formed a major chunk of InstallRequirement's size +so, moving them and support code dedicated to them outside of that class +helps creates for better understandability for the rest of the code. + +These are meant to be used elsewhere within pip to create instances of +InstallRequirement. +""" + +from __future__ import annotations + +import copy +import logging +import os +import re +from collections.abc import Collection +from dataclasses import dataclass + +from pip._vendor.packaging.markers import Marker +from pip._vendor.packaging.requirements import InvalidRequirement, Requirement +from pip._vendor.packaging.specifiers import Specifier + +from pip._internal.exceptions import InstallationError +from pip._internal.models.index import PyPI, TestPyPI +from pip._internal.models.link import Link +from pip._internal.models.wheel import Wheel +from pip._internal.req.req_file import ParsedRequirement +from pip._internal.req.req_install import InstallRequirement +from pip._internal.utils.filetypes import is_archive_file +from pip._internal.utils.misc import is_installable_dir +from pip._internal.utils.packaging import get_requirement +from pip._internal.utils.urls import path_to_url +from pip._internal.vcs import is_url, vcs + +__all__ = [ + "install_req_from_editable", + "install_req_from_line", + "parse_editable", +] + +logger = logging.getLogger(__name__) +operators = Specifier._operators.keys() + + +def _strip_extras(path: str) -> tuple[str, str | None]: + m = re.match(r"^(.+)(\[[^\]]+\])$", path) + extras = None + if m: + path_no_extras = m.group(1) + extras = m.group(2) + else: + path_no_extras = path + + return path_no_extras, extras + + +def convert_extras(extras: str | None) -> set[str]: + if not extras: + return set() + return get_requirement("placeholder" + extras.lower()).extras + + +def _set_requirement_extras(req: Requirement, new_extras: set[str]) -> Requirement: + """ + Returns a new requirement based on the given one, with the supplied extras. If the + given requirement already has extras those are replaced (or dropped if no new extras + are given). + """ + match: re.Match[str] | None = re.fullmatch( + # see https://peps.python.org/pep-0508/#complete-grammar + r"([\w\t .-]+)(\[[^\]]*\])?(.*)", + str(req), + flags=re.ASCII, + ) + # ireq.req is a valid requirement so the regex should always match + assert ( + match is not None + ), f"regex match on requirement {req} failed, this should never happen" + pre: str | None = match.group(1) + post: str | None = match.group(3) + assert ( + pre is not None and post is not None + ), f"regex group selection for requirement {req} failed, this should never happen" + extras: str = "[{}]".format(",".join(sorted(new_extras)) if new_extras else "") + return get_requirement(f"{pre}{extras}{post}") + + +def parse_editable(editable_req: str) -> tuple[str | None, str, set[str]]: + """Parses an editable requirement into: + - a requirement name + - an URL + - extras + - editable options + Accepted requirements: + svn+http://blahblah@rev#egg=Foobar[baz]&subdirectory=version_subdir + .[some_extra] + """ + + url = editable_req + + # If a file path is specified with extras, strip off the extras. + url_no_extras, extras = _strip_extras(url) + + if os.path.isdir(url_no_extras): + # Treating it as code that has already been checked out + url_no_extras = path_to_url(url_no_extras) + + if url_no_extras.lower().startswith("file:"): + package_name = Link(url_no_extras).egg_fragment + if extras: + return ( + package_name, + url_no_extras, + get_requirement("placeholder" + extras.lower()).extras, + ) + else: + return package_name, url_no_extras, set() + + for version_control in vcs: + if url.lower().startswith(f"{version_control}:"): + url = f"{version_control}+{url}" + break + + link = Link(url) + + if not link.is_vcs: + backends = ", ".join(vcs.all_schemes) + raise InstallationError( + f"{editable_req} is not a valid editable requirement. " + f"It should either be a path to a local project or a VCS URL " + f"(beginning with {backends})." + ) + + package_name = link.egg_fragment + if not package_name: + raise InstallationError( + f"Could not detect requirement name for '{editable_req}', " + "please specify one with #egg=your_package_name" + ) + return package_name, url, set() + + +def check_first_requirement_in_file(filename: str) -> None: + """Check if file is parsable as a requirements file. + + This is heavily based on ``pkg_resources.parse_requirements``, but + simplified to just check the first meaningful line. + + :raises InvalidRequirement: If the first meaningful line cannot be parsed + as an requirement. + """ + with open(filename, encoding="utf-8", errors="ignore") as f: + # Create a steppable iterator, so we can handle \-continuations. + lines = ( + line + for line in (line.strip() for line in f) + if line and not line.startswith("#") # Skip blank lines/comments. + ) + + for line in lines: + # Drop comments -- a hash without a space may be in a URL. + if " #" in line: + line = line[: line.find(" #")] + # If there is a line continuation, drop it, and append the next line. + if line.endswith("\\"): + line = line[:-2].strip() + next(lines, "") + get_requirement(line) + return + + +def deduce_helpful_msg(req: str) -> str: + """Returns helpful msg in case requirements file does not exist, + or cannot be parsed. + + :params req: Requirements file path + """ + if not os.path.exists(req): + return f" File '{req}' does not exist." + msg = " The path does exist. " + # Try to parse and check if it is a requirements file. + try: + check_first_requirement_in_file(req) + except InvalidRequirement: + logger.debug("Cannot parse '%s' as requirements file", req) + else: + msg += ( + f"The argument you provided " + f"({req}) appears to be a" + f" requirements file. If that is the" + f" case, use the '-r' flag to install" + f" the packages specified within it." + ) + return msg + + +@dataclass(frozen=True) +class RequirementParts: + requirement: Requirement | None + link: Link | None + markers: Marker | None + extras: set[str] + + +def parse_req_from_editable(editable_req: str) -> RequirementParts: + name, url, extras_override = parse_editable(editable_req) + + if name is not None: + try: + req: Requirement | None = get_requirement(name) + except InvalidRequirement as exc: + raise InstallationError(f"Invalid requirement: {name!r}: {exc}") + else: + req = None + + link = Link(url) + + return RequirementParts(req, link, None, extras_override) + + +# ---- The actual constructors follow ---- + + +def install_req_from_editable( + editable_req: str, + comes_from: InstallRequirement | str | None = None, + *, + use_pep517: bool | None = None, + isolated: bool = False, + global_options: list[str] | None = None, + hash_options: dict[str, list[str]] | None = None, + constraint: bool = False, + user_supplied: bool = False, + permit_editable_wheels: bool = False, + config_settings: dict[str, str | list[str]] | None = None, +) -> InstallRequirement: + parts = parse_req_from_editable(editable_req) + + return InstallRequirement( + parts.requirement, + comes_from=comes_from, + user_supplied=user_supplied, + editable=True, + permit_editable_wheels=permit_editable_wheels, + link=parts.link, + constraint=constraint, + use_pep517=use_pep517, + isolated=isolated, + global_options=global_options, + hash_options=hash_options, + config_settings=config_settings, + extras=parts.extras, + ) + + +def _looks_like_path(name: str) -> bool: + """Checks whether the string "looks like" a path on the filesystem. + + This does not check whether the target actually exists, only judge from the + appearance. + + Returns true if any of the following conditions is true: + * a path separator is found (either os.path.sep or os.path.altsep); + * a dot is found (which represents the current directory). + """ + if os.path.sep in name: + return True + if os.path.altsep is not None and os.path.altsep in name: + return True + if name.startswith("."): + return True + return False + + +def _get_url_from_path(path: str, name: str) -> str | None: + """ + First, it checks whether a provided path is an installable directory. If it + is, returns the path. + + If false, check if the path is an archive file (such as a .whl). + The function checks if the path is a file. If false, if the path has + an @, it will treat it as a PEP 440 URL requirement and return the path. + """ + if _looks_like_path(name) and os.path.isdir(path): + if is_installable_dir(path): + return path_to_url(path) + # TODO: The is_installable_dir test here might not be necessary + # now that it is done in load_pyproject_toml too. + raise InstallationError( + f"Directory {name!r} is not installable. Neither 'setup.py' " + "nor 'pyproject.toml' found." + ) + if not is_archive_file(path): + return None + if os.path.isfile(path): + return path_to_url(path) + urlreq_parts = name.split("@", 1) + if len(urlreq_parts) >= 2 and not _looks_like_path(urlreq_parts[0]): + # If the path contains '@' and the part before it does not look + # like a path, try to treat it as a PEP 440 URL req instead. + return None + logger.warning( + "Requirement %r looks like a filename, but the file does not exist", + name, + ) + return path_to_url(path) + + +def parse_req_from_line(name: str, line_source: str | None) -> RequirementParts: + if is_url(name): + marker_sep = "; " + else: + marker_sep = ";" + if marker_sep in name: + name, markers_as_string = name.split(marker_sep, 1) + markers_as_string = markers_as_string.strip() + if not markers_as_string: + markers = None + else: + markers = Marker(markers_as_string) + else: + markers = None + name = name.strip() + req_as_string = None + path = os.path.normpath(os.path.abspath(name)) + link = None + extras_as_string = None + + if is_url(name): + link = Link(name) + else: + p, extras_as_string = _strip_extras(path) + url = _get_url_from_path(p, name) + if url is not None: + link = Link(url) + + # it's a local file, dir, or url + if link: + # Handle relative file URLs + if link.scheme == "file" and re.search(r"\.\./", link.url): + link = Link(path_to_url(os.path.normpath(os.path.abspath(link.path)))) + # wheel file + if link.is_wheel: + wheel = Wheel(link.filename) # can raise InvalidWheelFilename + req_as_string = f"{wheel.name}=={wheel.version}" + else: + # set the req to the egg fragment. when it's not there, this + # will become an 'unnamed' requirement + req_as_string = link.egg_fragment + + # a requirement specifier + else: + req_as_string = name + + extras = convert_extras(extras_as_string) + + def with_source(text: str) -> str: + if not line_source: + return text + return f"{text} (from {line_source})" + + def _parse_req_string(req_as_string: str) -> Requirement: + try: + return get_requirement(req_as_string) + except InvalidRequirement as exc: + if os.path.sep in req_as_string: + add_msg = "It looks like a path." + add_msg += deduce_helpful_msg(req_as_string) + elif "=" in req_as_string and not any( + op in req_as_string for op in operators + ): + add_msg = "= is not a valid operator. Did you mean == ?" + else: + add_msg = "" + msg = with_source(f"Invalid requirement: {req_as_string!r}: {exc}") + if add_msg: + msg += f"\nHint: {add_msg}" + raise InstallationError(msg) + + if req_as_string is not None: + req: Requirement | None = _parse_req_string(req_as_string) + else: + req = None + + return RequirementParts(req, link, markers, extras) + + +def install_req_from_line( + name: str, + comes_from: str | InstallRequirement | None = None, + *, + use_pep517: bool | None = None, + isolated: bool = False, + global_options: list[str] | None = None, + hash_options: dict[str, list[str]] | None = None, + constraint: bool = False, + line_source: str | None = None, + user_supplied: bool = False, + config_settings: dict[str, str | list[str]] | None = None, +) -> InstallRequirement: + """Creates an InstallRequirement from a name, which might be a + requirement, directory containing 'setup.py', filename, or URL. + + :param line_source: An optional string describing where the line is from, + for logging purposes in case of an error. + """ + parts = parse_req_from_line(name, line_source) + + return InstallRequirement( + parts.requirement, + comes_from, + link=parts.link, + markers=parts.markers, + use_pep517=use_pep517, + isolated=isolated, + global_options=global_options, + hash_options=hash_options, + config_settings=config_settings, + constraint=constraint, + extras=parts.extras, + user_supplied=user_supplied, + ) + + +def install_req_from_req_string( + req_string: str, + comes_from: InstallRequirement | None = None, + isolated: bool = False, + use_pep517: bool | None = None, + user_supplied: bool = False, +) -> InstallRequirement: + try: + req = get_requirement(req_string) + except InvalidRequirement as exc: + raise InstallationError(f"Invalid requirement: {req_string!r}: {exc}") + + domains_not_allowed = [ + PyPI.file_storage_domain, + TestPyPI.file_storage_domain, + ] + if ( + req.url + and comes_from + and comes_from.link + and comes_from.link.netloc in domains_not_allowed + ): + # Explicitly disallow pypi packages that depend on external urls + raise InstallationError( + "Packages installed from PyPI cannot depend on packages " + "which are not also hosted on PyPI.\n" + f"{comes_from.name} depends on {req} " + ) + + return InstallRequirement( + req, + comes_from, + isolated=isolated, + use_pep517=use_pep517, + user_supplied=user_supplied, + ) + + +def install_req_from_parsed_requirement( + parsed_req: ParsedRequirement, + isolated: bool = False, + use_pep517: bool | None = None, + user_supplied: bool = False, + config_settings: dict[str, str | list[str]] | None = None, +) -> InstallRequirement: + if parsed_req.is_editable: + req = install_req_from_editable( + parsed_req.requirement, + comes_from=parsed_req.comes_from, + use_pep517=use_pep517, + constraint=parsed_req.constraint, + isolated=isolated, + user_supplied=user_supplied, + config_settings=config_settings, + ) + + else: + req = install_req_from_line( + parsed_req.requirement, + comes_from=parsed_req.comes_from, + use_pep517=use_pep517, + isolated=isolated, + global_options=( + parsed_req.options.get("global_options", []) + if parsed_req.options + else [] + ), + hash_options=( + parsed_req.options.get("hashes", {}) if parsed_req.options else {} + ), + constraint=parsed_req.constraint, + line_source=parsed_req.line_source, + user_supplied=user_supplied, + config_settings=config_settings, + ) + return req + + +def install_req_from_link_and_ireq( + link: Link, ireq: InstallRequirement +) -> InstallRequirement: + return InstallRequirement( + req=ireq.req, + comes_from=ireq.comes_from, + editable=ireq.editable, + link=link, + markers=ireq.markers, + use_pep517=ireq.use_pep517, + isolated=ireq.isolated, + global_options=ireq.global_options, + hash_options=ireq.hash_options, + config_settings=ireq.config_settings, + user_supplied=ireq.user_supplied, + ) + + +def install_req_drop_extras(ireq: InstallRequirement) -> InstallRequirement: + """ + Creates a new InstallationRequirement using the given template but without + any extras. Sets the original requirement as the new one's parent + (comes_from). + """ + return InstallRequirement( + req=( + _set_requirement_extras(ireq.req, set()) if ireq.req is not None else None + ), + comes_from=ireq, + editable=ireq.editable, + link=ireq.link, + markers=ireq.markers, + use_pep517=ireq.use_pep517, + isolated=ireq.isolated, + global_options=ireq.global_options, + hash_options=ireq.hash_options, + constraint=ireq.constraint, + extras=[], + config_settings=ireq.config_settings, + user_supplied=ireq.user_supplied, + permit_editable_wheels=ireq.permit_editable_wheels, + ) + + +def install_req_extend_extras( + ireq: InstallRequirement, + extras: Collection[str], +) -> InstallRequirement: + """ + Returns a copy of an installation requirement with some additional extras. + Makes a shallow copy of the ireq object. + """ + result = copy.copy(ireq) + result.extras = {*ireq.extras, *extras} + result.req = ( + _set_requirement_extras(ireq.req, result.extras) + if ireq.req is not None + else None + ) + return result diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/req/req_dependency_group.py b/.venv/lib/python3.12/site-packages/pip/_internal/req/req_dependency_group.py new file mode 100644 index 0000000..396ac1b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/req/req_dependency_group.py @@ -0,0 +1,75 @@ +from collections.abc import Iterable, Iterator +from typing import Any + +from pip._vendor.dependency_groups import DependencyGroupResolver + +from pip._internal.exceptions import InstallationError +from pip._internal.utils.compat import tomllib + + +def parse_dependency_groups(groups: list[tuple[str, str]]) -> list[str]: + """ + Parse dependency groups data as provided via the CLI, in a `[path:]group` syntax. + + Raises InstallationErrors if anything goes wrong. + """ + resolvers = _build_resolvers(path for (path, _) in groups) + return list(_resolve_all_groups(resolvers, groups)) + + +def _resolve_all_groups( + resolvers: dict[str, DependencyGroupResolver], groups: list[tuple[str, str]] +) -> Iterator[str]: + """ + Run all resolution, converting any error from `DependencyGroupResolver` into + an InstallationError. + """ + for path, groupname in groups: + resolver = resolvers[path] + try: + yield from (str(req) for req in resolver.resolve(groupname)) + except (ValueError, TypeError, LookupError) as e: + raise InstallationError( + f"[dependency-groups] resolution failed for '{groupname}' " + f"from '{path}': {e}" + ) from e + + +def _build_resolvers(paths: Iterable[str]) -> dict[str, Any]: + resolvers = {} + for path in paths: + if path in resolvers: + continue + + pyproject = _load_pyproject(path) + if "dependency-groups" not in pyproject: + raise InstallationError( + f"[dependency-groups] table was missing from '{path}'. " + "Cannot resolve '--group' option." + ) + raw_dependency_groups = pyproject["dependency-groups"] + if not isinstance(raw_dependency_groups, dict): + raise InstallationError( + f"[dependency-groups] table was malformed in {path}. " + "Cannot resolve '--group' option." + ) + + resolvers[path] = DependencyGroupResolver(raw_dependency_groups) + return resolvers + + +def _load_pyproject(path: str) -> dict[str, Any]: + """ + This helper loads a pyproject.toml as TOML. + + It raises an InstallationError if the operation fails. + """ + try: + with open(path, "rb") as fp: + return tomllib.load(fp) + except FileNotFoundError: + raise InstallationError(f"{path} not found. Cannot resolve '--group' option.") + except tomllib.TOMLDecodeError as e: + raise InstallationError(f"Error parsing {path}: {e}") from e + except OSError as e: + raise InstallationError(f"Error reading {path}: {e}") from e diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/req/req_file.py b/.venv/lib/python3.12/site-packages/pip/_internal/req/req_file.py new file mode 100644 index 0000000..0aad0a3 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/req/req_file.py @@ -0,0 +1,620 @@ +""" +Requirements file parsing +""" + +from __future__ import annotations + +import codecs +import locale +import logging +import optparse +import os +import re +import shlex +import sys +import urllib.parse +from collections.abc import Generator, Iterable +from dataclasses import dataclass +from optparse import Values +from typing import ( + TYPE_CHECKING, + Any, + Callable, + NoReturn, +) + +from pip._internal.cli import cmdoptions +from pip._internal.exceptions import InstallationError, RequirementsFileParseError +from pip._internal.models.search_scope import SearchScope + +if TYPE_CHECKING: + from pip._internal.index.package_finder import PackageFinder + from pip._internal.network.session import PipSession + +__all__ = ["parse_requirements"] + +ReqFileLines = Iterable[tuple[int, str]] + +LineParser = Callable[[str], tuple[str, Values]] + +SCHEME_RE = re.compile(r"^(http|https|file):", re.I) +COMMENT_RE = re.compile(r"(^|\s+)#.*$") + +# Matches environment variable-style values in '${MY_VARIABLE_1}' with the +# variable name consisting of only uppercase letters, digits or the '_' +# (underscore). This follows the POSIX standard defined in IEEE Std 1003.1, +# 2013 Edition. +ENV_VAR_RE = re.compile(r"(?P\$\{(?P[A-Z0-9_]+)\})") + +SUPPORTED_OPTIONS: list[Callable[..., optparse.Option]] = [ + cmdoptions.index_url, + cmdoptions.extra_index_url, + cmdoptions.no_index, + cmdoptions.constraints, + cmdoptions.requirements, + cmdoptions.editable, + cmdoptions.find_links, + cmdoptions.no_binary, + cmdoptions.only_binary, + cmdoptions.prefer_binary, + cmdoptions.require_hashes, + cmdoptions.pre, + cmdoptions.trusted_host, + cmdoptions.use_new_feature, +] + +# options to be passed to requirements +SUPPORTED_OPTIONS_REQ: list[Callable[..., optparse.Option]] = [ + cmdoptions.global_options, + cmdoptions.hash, + cmdoptions.config_settings, +] + +SUPPORTED_OPTIONS_EDITABLE_REQ: list[Callable[..., optparse.Option]] = [ + cmdoptions.config_settings, +] + + +# the 'dest' string values +SUPPORTED_OPTIONS_REQ_DEST = [str(o().dest) for o in SUPPORTED_OPTIONS_REQ] +SUPPORTED_OPTIONS_EDITABLE_REQ_DEST = [ + str(o().dest) for o in SUPPORTED_OPTIONS_EDITABLE_REQ +] + +# order of BOMS is important: codecs.BOM_UTF16_LE is a prefix of codecs.BOM_UTF32_LE +# so data.startswith(BOM_UTF16_LE) would be true for UTF32_LE data +BOMS: list[tuple[bytes, str]] = [ + (codecs.BOM_UTF8, "utf-8"), + (codecs.BOM_UTF32, "utf-32"), + (codecs.BOM_UTF32_BE, "utf-32-be"), + (codecs.BOM_UTF32_LE, "utf-32-le"), + (codecs.BOM_UTF16, "utf-16"), + (codecs.BOM_UTF16_BE, "utf-16-be"), + (codecs.BOM_UTF16_LE, "utf-16-le"), +] + +PEP263_ENCODING_RE = re.compile(rb"coding[:=]\s*([-\w.]+)") +DEFAULT_ENCODING = "utf-8" + +logger = logging.getLogger(__name__) + + +@dataclass(frozen=True) +class ParsedRequirement: + # TODO: replace this with slots=True when dropping Python 3.9 support. + __slots__ = ( + "requirement", + "is_editable", + "comes_from", + "constraint", + "options", + "line_source", + ) + + requirement: str + is_editable: bool + comes_from: str + constraint: bool + options: dict[str, Any] | None + line_source: str | None + + +@dataclass(frozen=True) +class ParsedLine: + __slots__ = ("filename", "lineno", "args", "opts", "constraint") + + filename: str + lineno: int + args: str + opts: Values + constraint: bool + + @property + def is_editable(self) -> bool: + return bool(self.opts.editables) + + @property + def requirement(self) -> str | None: + if self.args: + return self.args + elif self.is_editable: + # We don't support multiple -e on one line + return self.opts.editables[0] + return None + + +def parse_requirements( + filename: str, + session: PipSession, + finder: PackageFinder | None = None, + options: optparse.Values | None = None, + constraint: bool = False, +) -> Generator[ParsedRequirement, None, None]: + """Parse a requirements file and yield ParsedRequirement instances. + + :param filename: Path or url of requirements file. + :param session: PipSession instance. + :param finder: Instance of pip.index.PackageFinder. + :param options: cli options. + :param constraint: If true, parsing a constraint file rather than + requirements file. + """ + line_parser = get_line_parser(finder) + parser = RequirementsFileParser(session, line_parser) + + for parsed_line in parser.parse(filename, constraint): + parsed_req = handle_line( + parsed_line, options=options, finder=finder, session=session + ) + if parsed_req is not None: + yield parsed_req + + +def preprocess(content: str) -> ReqFileLines: + """Split, filter, and join lines, and return a line iterator + + :param content: the content of the requirements file + """ + lines_enum: ReqFileLines = enumerate(content.splitlines(), start=1) + lines_enum = join_lines(lines_enum) + lines_enum = ignore_comments(lines_enum) + lines_enum = expand_env_variables(lines_enum) + return lines_enum + + +def handle_requirement_line( + line: ParsedLine, + options: optparse.Values | None = None, +) -> ParsedRequirement: + # preserve for the nested code path + line_comes_from = "{} {} (line {})".format( + "-c" if line.constraint else "-r", + line.filename, + line.lineno, + ) + + assert line.requirement is not None + + # get the options that apply to requirements + if line.is_editable: + supported_dest = SUPPORTED_OPTIONS_EDITABLE_REQ_DEST + else: + supported_dest = SUPPORTED_OPTIONS_REQ_DEST + req_options = {} + for dest in supported_dest: + if dest in line.opts.__dict__ and line.opts.__dict__[dest]: + req_options[dest] = line.opts.__dict__[dest] + + line_source = f"line {line.lineno} of {line.filename}" + return ParsedRequirement( + requirement=line.requirement, + is_editable=line.is_editable, + comes_from=line_comes_from, + constraint=line.constraint, + options=req_options, + line_source=line_source, + ) + + +def handle_option_line( + opts: Values, + filename: str, + lineno: int, + finder: PackageFinder | None = None, + options: optparse.Values | None = None, + session: PipSession | None = None, +) -> None: + if opts.hashes: + logger.warning( + "%s line %s has --hash but no requirement, and will be ignored.", + filename, + lineno, + ) + + if options: + # percolate options upward + if opts.require_hashes: + options.require_hashes = opts.require_hashes + if opts.features_enabled: + options.features_enabled.extend( + f for f in opts.features_enabled if f not in options.features_enabled + ) + + # set finder options + if finder: + find_links = finder.find_links + index_urls = finder.index_urls + no_index = finder.search_scope.no_index + if opts.no_index is True: + no_index = True + index_urls = [] + if opts.index_url and not no_index: + index_urls = [opts.index_url] + if opts.extra_index_urls and not no_index: + index_urls.extend(opts.extra_index_urls) + if opts.find_links: + # FIXME: it would be nice to keep track of the source + # of the find_links: support a find-links local path + # relative to a requirements file. + value = opts.find_links[0] + req_dir = os.path.dirname(os.path.abspath(filename)) + relative_to_reqs_file = os.path.join(req_dir, value) + if os.path.exists(relative_to_reqs_file): + value = relative_to_reqs_file + find_links.append(value) + + if session: + # We need to update the auth urls in session + session.update_index_urls(index_urls) + + search_scope = SearchScope( + find_links=find_links, + index_urls=index_urls, + no_index=no_index, + ) + finder.search_scope = search_scope + + if opts.pre: + finder.set_allow_all_prereleases() + + if opts.prefer_binary: + finder.set_prefer_binary() + + if session: + for host in opts.trusted_hosts or []: + source = f"line {lineno} of {filename}" + session.add_trusted_host(host, source=source) + + +def handle_line( + line: ParsedLine, + options: optparse.Values | None = None, + finder: PackageFinder | None = None, + session: PipSession | None = None, +) -> ParsedRequirement | None: + """Handle a single parsed requirements line; This can result in + creating/yielding requirements, or updating the finder. + + :param line: The parsed line to be processed. + :param options: CLI options. + :param finder: The finder - updated by non-requirement lines. + :param session: The session - updated by non-requirement lines. + + Returns a ParsedRequirement object if the line is a requirement line, + otherwise returns None. + + For lines that contain requirements, the only options that have an effect + are from SUPPORTED_OPTIONS_REQ, and they are scoped to the + requirement. Other options from SUPPORTED_OPTIONS may be present, but are + ignored. + + For lines that do not contain requirements, the only options that have an + effect are from SUPPORTED_OPTIONS. Options from SUPPORTED_OPTIONS_REQ may + be present, but are ignored. These lines may contain multiple options + (although our docs imply only one is supported), and all our parsed and + affect the finder. + """ + + if line.requirement is not None: + parsed_req = handle_requirement_line(line, options) + return parsed_req + else: + handle_option_line( + line.opts, + line.filename, + line.lineno, + finder, + options, + session, + ) + return None + + +class RequirementsFileParser: + def __init__( + self, + session: PipSession, + line_parser: LineParser, + ) -> None: + self._session = session + self._line_parser = line_parser + + def parse( + self, filename: str, constraint: bool + ) -> Generator[ParsedLine, None, None]: + """Parse a given file, yielding parsed lines.""" + yield from self._parse_and_recurse( + filename, constraint, [{os.path.abspath(filename): None}] + ) + + def _parse_and_recurse( + self, + filename: str, + constraint: bool, + parsed_files_stack: list[dict[str, str | None]], + ) -> Generator[ParsedLine, None, None]: + for line in self._parse_file(filename, constraint): + if line.requirement is None and ( + line.opts.requirements or line.opts.constraints + ): + # parse a nested requirements file + if line.opts.requirements: + req_path = line.opts.requirements[0] + nested_constraint = False + else: + req_path = line.opts.constraints[0] + nested_constraint = True + + # original file is over http + if SCHEME_RE.search(filename): + # do a url join so relative paths work + req_path = urllib.parse.urljoin(filename, req_path) + # original file and nested file are paths + elif not SCHEME_RE.search(req_path): + # do a join so relative paths work + # and then abspath so that we can identify recursive references + req_path = os.path.abspath( + os.path.join( + os.path.dirname(filename), + req_path, + ) + ) + parsed_files = parsed_files_stack[0] + if req_path in parsed_files: + initial_file = parsed_files[req_path] + tail = ( + f" and again in {initial_file}" + if initial_file is not None + else "" + ) + raise RequirementsFileParseError( + f"{req_path} recursively references itself in {filename}{tail}" + ) + # Keeping a track where was each file first included in + new_parsed_files = parsed_files.copy() + new_parsed_files[req_path] = filename + yield from self._parse_and_recurse( + req_path, nested_constraint, [new_parsed_files, *parsed_files_stack] + ) + else: + yield line + + def _parse_file( + self, filename: str, constraint: bool + ) -> Generator[ParsedLine, None, None]: + _, content = get_file_content(filename, self._session) + + lines_enum = preprocess(content) + + for line_number, line in lines_enum: + try: + args_str, opts = self._line_parser(line) + except OptionParsingError as e: + # add offending line + msg = f"Invalid requirement: {line}\n{e.msg}" + raise RequirementsFileParseError(msg) + + yield ParsedLine( + filename, + line_number, + args_str, + opts, + constraint, + ) + + +def get_line_parser(finder: PackageFinder | None) -> LineParser: + def parse_line(line: str) -> tuple[str, Values]: + # Build new parser for each line since it accumulates appendable + # options. + parser = build_parser() + defaults = parser.get_default_values() + defaults.index_url = None + if finder: + defaults.format_control = finder.format_control + + args_str, options_str = break_args_options(line) + + try: + options = shlex.split(options_str) + except ValueError as e: + raise OptionParsingError(f"Could not split options: {options_str}") from e + + opts, _ = parser.parse_args(options, defaults) + + return args_str, opts + + return parse_line + + +def break_args_options(line: str) -> tuple[str, str]: + """Break up the line into an args and options string. We only want to shlex + (and then optparse) the options, not the args. args can contain markers + which are corrupted by shlex. + """ + tokens = line.split(" ") + args = [] + options = tokens[:] + for token in tokens: + if token.startswith(("-", "--")): + break + else: + args.append(token) + options.pop(0) + return " ".join(args), " ".join(options) + + +class OptionParsingError(Exception): + def __init__(self, msg: str) -> None: + self.msg = msg + + +def build_parser() -> optparse.OptionParser: + """ + Return a parser for parsing requirement lines + """ + parser = optparse.OptionParser(add_help_option=False) + + option_factories = SUPPORTED_OPTIONS + SUPPORTED_OPTIONS_REQ + for option_factory in option_factories: + option = option_factory() + parser.add_option(option) + + # By default optparse sys.exits on parsing errors. We want to wrap + # that in our own exception. + def parser_exit(self: Any, msg: str) -> NoReturn: + raise OptionParsingError(msg) + + # NOTE: mypy disallows assigning to a method + # https://github.com/python/mypy/issues/2427 + parser.exit = parser_exit # type: ignore + + return parser + + +def join_lines(lines_enum: ReqFileLines) -> ReqFileLines: + """Joins a line ending in '\' with the previous line (except when following + comments). The joined line takes on the index of the first line. + """ + primary_line_number = None + new_line: list[str] = [] + for line_number, line in lines_enum: + if not line.endswith("\\") or COMMENT_RE.match(line): + if COMMENT_RE.match(line): + # this ensures comments are always matched later + line = " " + line + if new_line: + new_line.append(line) + assert primary_line_number is not None + yield primary_line_number, "".join(new_line) + new_line = [] + else: + yield line_number, line + else: + if not new_line: + primary_line_number = line_number + new_line.append(line.strip("\\")) + + # last line contains \ + if new_line: + assert primary_line_number is not None + yield primary_line_number, "".join(new_line) + + # TODO: handle space after '\'. + + +def ignore_comments(lines_enum: ReqFileLines) -> ReqFileLines: + """ + Strips comments and filter empty lines. + """ + for line_number, line in lines_enum: + line = COMMENT_RE.sub("", line) + line = line.strip() + if line: + yield line_number, line + + +def expand_env_variables(lines_enum: ReqFileLines) -> ReqFileLines: + """Replace all environment variables that can be retrieved via `os.getenv`. + + The only allowed format for environment variables defined in the + requirement file is `${MY_VARIABLE_1}` to ensure two things: + + 1. Strings that contain a `$` aren't accidentally (partially) expanded. + 2. Ensure consistency across platforms for requirement files. + + These points are the result of a discussion on the `github pull + request #3514 `_. + + Valid characters in variable names follow the `POSIX standard + `_ and are limited + to uppercase letter, digits and the `_` (underscore). + """ + for line_number, line in lines_enum: + for env_var, var_name in ENV_VAR_RE.findall(line): + value = os.getenv(var_name) + if not value: + continue + + line = line.replace(env_var, value) + + yield line_number, line + + +def get_file_content(url: str, session: PipSession) -> tuple[str, str]: + """Gets the content of a file; it may be a filename, file: URL, or + http: URL. Returns (location, content). Content is unicode. + Respects # -*- coding: declarations on the retrieved files. + + :param url: File path or url. + :param session: PipSession instance. + """ + scheme = urllib.parse.urlsplit(url).scheme + # Pip has special support for file:// URLs (LocalFSAdapter). + if scheme in ["http", "https", "file"]: + # Delay importing heavy network modules until absolutely necessary. + from pip._internal.network.utils import raise_for_status + + resp = session.get(url) + raise_for_status(resp) + return resp.url, resp.text + + # Assume this is a bare path. + try: + with open(url, "rb") as f: + raw_content = f.read() + except OSError as exc: + raise InstallationError(f"Could not open requirements file: {exc}") + + content = _decode_req_file(raw_content, url) + + return url, content + + +def _decode_req_file(data: bytes, url: str) -> str: + for bom, encoding in BOMS: + if data.startswith(bom): + return data[len(bom) :].decode(encoding) + + for line in data.split(b"\n")[:2]: + if line[0:1] == b"#": + result = PEP263_ENCODING_RE.search(line) + if result is not None: + encoding = result.groups()[0].decode("ascii") + return data.decode(encoding) + + try: + return data.decode(DEFAULT_ENCODING) + except UnicodeDecodeError: + locale_encoding = locale.getpreferredencoding(False) or sys.getdefaultencoding() + logging.warning( + "unable to decode data from %s with default encoding %s, " + "falling back to encoding from locale: %s. " + "If this is intentional you should specify the encoding with a " + "PEP-263 style comment, e.g. '# -*- coding: %s -*-'", + url, + DEFAULT_ENCODING, + locale_encoding, + locale_encoding, + ) + return data.decode(locale_encoding) diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/req/req_install.py b/.venv/lib/python3.12/site-packages/pip/_internal/req/req_install.py new file mode 100644 index 0000000..c9f6bff --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/req/req_install.py @@ -0,0 +1,937 @@ +from __future__ import annotations + +import functools +import logging +import os +import shutil +import sys +import uuid +import zipfile +from collections.abc import Collection, Iterable, Sequence +from optparse import Values +from pathlib import Path +from typing import Any + +from pip._vendor.packaging.markers import Marker +from pip._vendor.packaging.requirements import Requirement +from pip._vendor.packaging.specifiers import SpecifierSet +from pip._vendor.packaging.utils import canonicalize_name +from pip._vendor.packaging.version import Version +from pip._vendor.packaging.version import parse as parse_version +from pip._vendor.pyproject_hooks import BuildBackendHookCaller + +from pip._internal.build_env import BuildEnvironment, NoOpBuildEnvironment +from pip._internal.exceptions import InstallationError, PreviousBuildDirError +from pip._internal.locations import get_scheme +from pip._internal.metadata import ( + BaseDistribution, + get_default_environment, + get_directory_distribution, + get_wheel_distribution, +) +from pip._internal.metadata.base import FilesystemWheel +from pip._internal.models.direct_url import DirectUrl +from pip._internal.models.link import Link +from pip._internal.operations.build.metadata import generate_metadata +from pip._internal.operations.build.metadata_editable import generate_editable_metadata +from pip._internal.operations.build.metadata_legacy import ( + generate_metadata as generate_metadata_legacy, +) +from pip._internal.operations.install.editable_legacy import ( + install_editable as install_editable_legacy, +) +from pip._internal.operations.install.wheel import install_wheel +from pip._internal.pyproject import load_pyproject_toml, make_pyproject_path +from pip._internal.req.req_uninstall import UninstallPathSet +from pip._internal.utils.deprecation import deprecated +from pip._internal.utils.hashes import Hashes +from pip._internal.utils.misc import ( + ConfiguredBuildBackendHookCaller, + ask_path_exists, + backup_dir, + display_path, + hide_url, + is_installable_dir, + redact_auth_from_requirement, + redact_auth_from_url, +) +from pip._internal.utils.packaging import get_requirement +from pip._internal.utils.subprocess import runner_with_spinner_message +from pip._internal.utils.temp_dir import TempDirectory, tempdir_kinds +from pip._internal.utils.unpacking import unpack_file +from pip._internal.utils.virtualenv import running_under_virtualenv +from pip._internal.vcs import vcs + +logger = logging.getLogger(__name__) + + +class InstallRequirement: + """ + Represents something that may be installed later on, may have information + about where to fetch the relevant requirement and also contains logic for + installing the said requirement. + """ + + def __init__( + self, + req: Requirement | None, + comes_from: str | InstallRequirement | None, + editable: bool = False, + link: Link | None = None, + markers: Marker | None = None, + use_pep517: bool | None = None, + isolated: bool = False, + *, + global_options: list[str] | None = None, + hash_options: dict[str, list[str]] | None = None, + config_settings: dict[str, str | list[str]] | None = None, + constraint: bool = False, + extras: Collection[str] = (), + user_supplied: bool = False, + permit_editable_wheels: bool = False, + ) -> None: + assert req is None or isinstance(req, Requirement), req + self.req = req + self.comes_from = comes_from + self.constraint = constraint + self.editable = editable + self.permit_editable_wheels = permit_editable_wheels + + # source_dir is the local directory where the linked requirement is + # located, or unpacked. In case unpacking is needed, creating and + # populating source_dir is done by the RequirementPreparer. Note this + # is not necessarily the directory where pyproject.toml or setup.py is + # located - that one is obtained via unpacked_source_directory. + self.source_dir: str | None = None + if self.editable: + assert link + if link.is_file: + self.source_dir = os.path.normpath(os.path.abspath(link.file_path)) + + # original_link is the direct URL that was provided by the user for the + # requirement, either directly or via a constraints file. + if link is None and req and req.url: + # PEP 508 URL requirement + link = Link(req.url) + self.link = self.original_link = link + + # When this InstallRequirement is a wheel obtained from the cache of locally + # built wheels, this is the source link corresponding to the cache entry, which + # was used to download and build the cached wheel. + self.cached_wheel_source_link: Link | None = None + + # Information about the location of the artifact that was downloaded . This + # property is guaranteed to be set in resolver results. + self.download_info: DirectUrl | None = None + + # Path to any downloaded or already-existing package. + self.local_file_path: str | None = None + if self.link and self.link.is_file: + self.local_file_path = self.link.file_path + + if extras: + self.extras = extras + elif req: + self.extras = req.extras + else: + self.extras = set() + if markers is None and req: + markers = req.marker + self.markers = markers + + # This holds the Distribution object if this requirement is already installed. + self.satisfied_by: BaseDistribution | None = None + # Whether the installation process should try to uninstall an existing + # distribution before installing this requirement. + self.should_reinstall = False + # Temporary build location + self._temp_build_dir: TempDirectory | None = None + # Set to True after successful installation + self.install_succeeded: bool | None = None + # Supplied options + self.global_options = global_options if global_options else [] + self.hash_options = hash_options if hash_options else {} + self.config_settings = config_settings + # Set to True after successful preparation of this requirement + self.prepared = False + # User supplied requirement are explicitly requested for installation + # by the user via CLI arguments or requirements files, as opposed to, + # e.g. dependencies, extras or constraints. + self.user_supplied = user_supplied + + self.isolated = isolated + self.build_env: BuildEnvironment = NoOpBuildEnvironment() + + # For PEP 517, the directory where we request the project metadata + # gets stored. We need this to pass to build_wheel, so the backend + # can ensure that the wheel matches the metadata (see the PEP for + # details). + self.metadata_directory: str | None = None + + # The static build requirements (from pyproject.toml) + self.pyproject_requires: list[str] | None = None + + # Build requirements that we will check are available + self.requirements_to_check: list[str] = [] + + # The PEP 517 backend we should use to build the project + self.pep517_backend: BuildBackendHookCaller | None = None + + # Are we using PEP 517 for this requirement? + # After pyproject.toml has been loaded, the only valid values are True + # and False. Before loading, None is valid (meaning "use the default"). + # Setting an explicit value before loading pyproject.toml is supported, + # but after loading this flag should be treated as read only. + self.use_pep517 = use_pep517 + + # If config settings are provided, enforce PEP 517. + if self.config_settings: + if self.use_pep517 is False: + logger.warning( + "--no-use-pep517 ignored for %s " + "because --config-settings are specified.", + self, + ) + self.use_pep517 = True + + # This requirement needs more preparation before it can be built + self.needs_more_preparation = False + + # This requirement needs to be unpacked before it can be installed. + self._archive_source: Path | None = None + + def __str__(self) -> str: + if self.req: + s = redact_auth_from_requirement(self.req) + if self.link: + s += f" from {redact_auth_from_url(self.link.url)}" + elif self.link: + s = redact_auth_from_url(self.link.url) + else: + s = "" + if self.satisfied_by is not None: + if self.satisfied_by.location is not None: + location = display_path(self.satisfied_by.location) + else: + location = "" + s += f" in {location}" + if self.comes_from: + if isinstance(self.comes_from, str): + comes_from: str | None = self.comes_from + else: + comes_from = self.comes_from.from_path() + if comes_from: + s += f" (from {comes_from})" + return s + + def __repr__(self) -> str: + return ( + f"<{self.__class__.__name__} object: " + f"{str(self)} editable={self.editable!r}>" + ) + + def format_debug(self) -> str: + """An un-tested helper for getting state, for debugging.""" + attributes = vars(self) + names = sorted(attributes) + + state = (f"{attr}={attributes[attr]!r}" for attr in sorted(names)) + return "<{name} object: {{{state}}}>".format( + name=self.__class__.__name__, + state=", ".join(state), + ) + + # Things that are valid for all kinds of requirements? + @property + def name(self) -> str | None: + if self.req is None: + return None + return self.req.name + + @functools.cached_property + def supports_pyproject_editable(self) -> bool: + if not self.use_pep517: + return False + assert self.pep517_backend + with self.build_env: + runner = runner_with_spinner_message( + "Checking if build backend supports build_editable" + ) + with self.pep517_backend.subprocess_runner(runner): + return "build_editable" in self.pep517_backend._supported_features() + + @property + def specifier(self) -> SpecifierSet: + assert self.req is not None + return self.req.specifier + + @property + def is_direct(self) -> bool: + """Whether this requirement was specified as a direct URL.""" + return self.original_link is not None + + @property + def is_pinned(self) -> bool: + """Return whether I am pinned to an exact version. + + For example, some-package==1.2 is pinned; some-package>1.2 is not. + """ + assert self.req is not None + specifiers = self.req.specifier + return len(specifiers) == 1 and next(iter(specifiers)).operator in {"==", "==="} + + def match_markers(self, extras_requested: Iterable[str] | None = None) -> bool: + if not extras_requested: + # Provide an extra to safely evaluate the markers + # without matching any extra + extras_requested = ("",) + if self.markers is not None: + return any( + self.markers.evaluate({"extra": extra}) for extra in extras_requested + ) + else: + return True + + @property + def has_hash_options(self) -> bool: + """Return whether any known-good hashes are specified as options. + + These activate --require-hashes mode; hashes specified as part of a + URL do not. + + """ + return bool(self.hash_options) + + def hashes(self, trust_internet: bool = True) -> Hashes: + """Return a hash-comparer that considers my option- and URL-based + hashes to be known-good. + + Hashes in URLs--ones embedded in the requirements file, not ones + downloaded from an index server--are almost peers with ones from + flags. They satisfy --require-hashes (whether it was implicitly or + explicitly activated) but do not activate it. md5 and sha224 are not + allowed in flags, which should nudge people toward good algos. We + always OR all hashes together, even ones from URLs. + + :param trust_internet: Whether to trust URL-based (#md5=...) hashes + downloaded from the internet, as by populate_link() + + """ + good_hashes = self.hash_options.copy() + if trust_internet: + link = self.link + elif self.is_direct and self.user_supplied: + link = self.original_link + else: + link = None + if link and link.hash: + assert link.hash_name is not None + good_hashes.setdefault(link.hash_name, []).append(link.hash) + return Hashes(good_hashes) + + def from_path(self) -> str | None: + """Format a nice indicator to show where this "comes from" """ + if self.req is None: + return None + s = str(self.req) + if self.comes_from: + comes_from: str | None + if isinstance(self.comes_from, str): + comes_from = self.comes_from + else: + comes_from = self.comes_from.from_path() + if comes_from: + s += "->" + comes_from + return s + + def ensure_build_location( + self, build_dir: str, autodelete: bool, parallel_builds: bool + ) -> str: + assert build_dir is not None + if self._temp_build_dir is not None: + assert self._temp_build_dir.path + return self._temp_build_dir.path + if self.req is None: + # Some systems have /tmp as a symlink which confuses custom + # builds (such as numpy). Thus, we ensure that the real path + # is returned. + self._temp_build_dir = TempDirectory( + kind=tempdir_kinds.REQ_BUILD, globally_managed=True + ) + + return self._temp_build_dir.path + + # This is the only remaining place where we manually determine the path + # for the temporary directory. It is only needed for editables where + # it is the value of the --src option. + + # When parallel builds are enabled, add a UUID to the build directory + # name so multiple builds do not interfere with each other. + dir_name: str = canonicalize_name(self.req.name) + if parallel_builds: + dir_name = f"{dir_name}_{uuid.uuid4().hex}" + + # FIXME: Is there a better place to create the build_dir? (hg and bzr + # need this) + if not os.path.exists(build_dir): + logger.debug("Creating directory %s", build_dir) + os.makedirs(build_dir) + actual_build_dir = os.path.join(build_dir, dir_name) + # `None` indicates that we respect the globally-configured deletion + # settings, which is what we actually want when auto-deleting. + delete_arg = None if autodelete else False + return TempDirectory( + path=actual_build_dir, + delete=delete_arg, + kind=tempdir_kinds.REQ_BUILD, + globally_managed=True, + ).path + + def _set_requirement(self) -> None: + """Set requirement after generating metadata.""" + assert self.req is None + assert self.metadata is not None + assert self.source_dir is not None + + # Construct a Requirement object from the generated metadata + if isinstance(parse_version(self.metadata["Version"]), Version): + op = "==" + else: + op = "===" + + self.req = get_requirement( + "".join( + [ + self.metadata["Name"], + op, + self.metadata["Version"], + ] + ) + ) + + def warn_on_mismatching_name(self) -> None: + assert self.req is not None + metadata_name = canonicalize_name(self.metadata["Name"]) + if canonicalize_name(self.req.name) == metadata_name: + # Everything is fine. + return + + # If we're here, there's a mismatch. Log a warning about it. + logger.warning( + "Generating metadata for package %s " + "produced metadata for project name %s. Fix your " + "#egg=%s fragments.", + self.name, + metadata_name, + self.name, + ) + self.req = get_requirement(metadata_name) + + def check_if_exists(self, use_user_site: bool) -> None: + """Find an installed distribution that satisfies or conflicts + with this requirement, and set self.satisfied_by or + self.should_reinstall appropriately. + """ + if self.req is None: + return + existing_dist = get_default_environment().get_distribution(self.req.name) + if not existing_dist: + return + + version_compatible = self.req.specifier.contains( + existing_dist.version, + prereleases=True, + ) + if not version_compatible: + self.satisfied_by = None + if use_user_site: + if existing_dist.in_usersite: + self.should_reinstall = True + elif running_under_virtualenv() and existing_dist.in_site_packages: + raise InstallationError( + f"Will not install to the user site because it will " + f"lack sys.path precedence to {existing_dist.raw_name} " + f"in {existing_dist.location}" + ) + else: + self.should_reinstall = True + else: + if self.editable: + self.should_reinstall = True + # when installing editables, nothing pre-existing should ever + # satisfy + self.satisfied_by = None + else: + self.satisfied_by = existing_dist + + # Things valid for wheels + @property + def is_wheel(self) -> bool: + if not self.link: + return False + return self.link.is_wheel + + @property + def is_wheel_from_cache(self) -> bool: + # When True, it means that this InstallRequirement is a local wheel file in the + # cache of locally built wheels. + return self.cached_wheel_source_link is not None + + # Things valid for sdists + @property + def unpacked_source_directory(self) -> str: + assert self.source_dir, f"No source dir for {self}" + return os.path.join( + self.source_dir, self.link and self.link.subdirectory_fragment or "" + ) + + @property + def setup_py_path(self) -> str: + assert self.source_dir, f"No source dir for {self}" + setup_py = os.path.join(self.unpacked_source_directory, "setup.py") + + return setup_py + + @property + def setup_cfg_path(self) -> str: + assert self.source_dir, f"No source dir for {self}" + setup_cfg = os.path.join(self.unpacked_source_directory, "setup.cfg") + + return setup_cfg + + @property + def pyproject_toml_path(self) -> str: + assert self.source_dir, f"No source dir for {self}" + return make_pyproject_path(self.unpacked_source_directory) + + def load_pyproject_toml(self) -> None: + """Load the pyproject.toml file. + + After calling this routine, all of the attributes related to PEP 517 + processing for this requirement have been set. In particular, the + use_pep517 attribute can be used to determine whether we should + follow the PEP 517 or legacy (setup.py) code path. + """ + pyproject_toml_data = load_pyproject_toml( + self.use_pep517, self.pyproject_toml_path, self.setup_py_path, str(self) + ) + + if pyproject_toml_data is None: + assert not self.config_settings + self.use_pep517 = False + return + + self.use_pep517 = True + requires, backend, check, backend_path = pyproject_toml_data + self.requirements_to_check = check + self.pyproject_requires = requires + self.pep517_backend = ConfiguredBuildBackendHookCaller( + self, + self.unpacked_source_directory, + backend, + backend_path=backend_path, + ) + + def isolated_editable_sanity_check(self) -> None: + """Check that an editable requirement if valid for use with PEP 517/518. + + This verifies that an editable that has a pyproject.toml either supports PEP 660 + or as a setup.py or a setup.cfg + """ + if ( + self.editable + and self.use_pep517 + and not self.supports_pyproject_editable + and not os.path.isfile(self.setup_py_path) + and not os.path.isfile(self.setup_cfg_path) + ): + raise InstallationError( + f"Project {self} has a 'pyproject.toml' and its build " + f"backend is missing the 'build_editable' hook. Since it does not " + f"have a 'setup.py' nor a 'setup.cfg', " + f"it cannot be installed in editable mode. " + f"Consider using a build backend that supports PEP 660." + ) + + def prepare_metadata(self) -> None: + """Ensure that project metadata is available. + + Under PEP 517 and PEP 660, call the backend hook to prepare the metadata. + Under legacy processing, call setup.py egg-info. + """ + assert self.source_dir, f"No source dir for {self}" + details = self.name or f"from {self.link}" + + if self.use_pep517: + assert self.pep517_backend is not None + if ( + self.editable + and self.permit_editable_wheels + and self.supports_pyproject_editable + ): + self.metadata_directory = generate_editable_metadata( + build_env=self.build_env, + backend=self.pep517_backend, + details=details, + ) + else: + self.metadata_directory = generate_metadata( + build_env=self.build_env, + backend=self.pep517_backend, + details=details, + ) + else: + self.metadata_directory = generate_metadata_legacy( + build_env=self.build_env, + setup_py_path=self.setup_py_path, + source_dir=self.unpacked_source_directory, + isolated=self.isolated, + details=details, + ) + + # Act on the newly generated metadata, based on the name and version. + if not self.name: + self._set_requirement() + else: + self.warn_on_mismatching_name() + + self.assert_source_matches_version() + + @property + def metadata(self) -> Any: + if not hasattr(self, "_metadata"): + self._metadata = self.get_dist().metadata + + return self._metadata + + def get_dist(self) -> BaseDistribution: + if self.metadata_directory: + return get_directory_distribution(self.metadata_directory) + elif self.local_file_path and self.is_wheel: + assert self.req is not None + return get_wheel_distribution( + FilesystemWheel(self.local_file_path), + canonicalize_name(self.req.name), + ) + raise AssertionError( + f"InstallRequirement {self} has no metadata directory and no wheel: " + f"can't make a distribution." + ) + + def assert_source_matches_version(self) -> None: + assert self.source_dir, f"No source dir for {self}" + version = self.metadata["version"] + if self.req and self.req.specifier and version not in self.req.specifier: + logger.warning( + "Requested %s, but installing version %s", + self, + version, + ) + else: + logger.debug( + "Source in %s has version %s, which satisfies requirement %s", + display_path(self.source_dir), + version, + self, + ) + + # For both source distributions and editables + def ensure_has_source_dir( + self, + parent_dir: str, + autodelete: bool = False, + parallel_builds: bool = False, + ) -> None: + """Ensure that a source_dir is set. + + This will create a temporary build dir if the name of the requirement + isn't known yet. + + :param parent_dir: The ideal pip parent_dir for the source_dir. + Generally src_dir for editables and build_dir for sdists. + :return: self.source_dir + """ + if self.source_dir is None: + self.source_dir = self.ensure_build_location( + parent_dir, + autodelete=autodelete, + parallel_builds=parallel_builds, + ) + + def needs_unpacked_archive(self, archive_source: Path) -> None: + assert self._archive_source is None + self._archive_source = archive_source + + def ensure_pristine_source_checkout(self) -> None: + """Ensure the source directory has not yet been built in.""" + assert self.source_dir is not None + if self._archive_source is not None: + unpack_file(str(self._archive_source), self.source_dir) + elif is_installable_dir(self.source_dir): + # If a checkout exists, it's unwise to keep going. + # version inconsistencies are logged later, but do not fail + # the installation. + raise PreviousBuildDirError( + f"pip can't proceed with requirements '{self}' due to a " + f"pre-existing build directory ({self.source_dir}). This is likely " + "due to a previous installation that failed . pip is " + "being responsible and not assuming it can delete this. " + "Please delete it and try again." + ) + + # For editable installations + def update_editable(self) -> None: + if not self.link: + logger.debug( + "Cannot update repository at %s; repository location is unknown", + self.source_dir, + ) + return + assert self.editable + assert self.source_dir + if self.link.scheme == "file": + # Static paths don't get updated + return + vcs_backend = vcs.get_backend_for_scheme(self.link.scheme) + # Editable requirements are validated in Requirement constructors. + # So here, if it's neither a path nor a valid VCS URL, it's a bug. + assert vcs_backend, f"Unsupported VCS URL {self.link.url}" + hidden_url = hide_url(self.link.url) + vcs_backend.obtain(self.source_dir, url=hidden_url, verbosity=0) + + # Top-level Actions + def uninstall( + self, auto_confirm: bool = False, verbose: bool = False + ) -> UninstallPathSet | None: + """ + Uninstall the distribution currently satisfying this requirement. + + Prompts before removing or modifying files unless + ``auto_confirm`` is True. + + Refuses to delete or modify files outside of ``sys.prefix`` - + thus uninstallation within a virtual environment can only + modify that virtual environment, even if the virtualenv is + linked to global site-packages. + + """ + assert self.req + dist = get_default_environment().get_distribution(self.req.name) + if not dist: + logger.warning("Skipping %s as it is not installed.", self.name) + return None + logger.info("Found existing installation: %s", dist) + + uninstalled_pathset = UninstallPathSet.from_dist(dist) + uninstalled_pathset.remove(auto_confirm, verbose) + return uninstalled_pathset + + def _get_archive_name(self, path: str, parentdir: str, rootdir: str) -> str: + def _clean_zip_name(name: str, prefix: str) -> str: + assert name.startswith( + prefix + os.path.sep + ), f"name {name!r} doesn't start with prefix {prefix!r}" + name = name[len(prefix) + 1 :] + name = name.replace(os.path.sep, "/") + return name + + assert self.req is not None + path = os.path.join(parentdir, path) + name = _clean_zip_name(path, rootdir) + return self.req.name + "/" + name + + def archive(self, build_dir: str | None) -> None: + """Saves archive to provided build_dir. + + Used for saving downloaded VCS requirements as part of `pip download`. + """ + assert self.source_dir + if build_dir is None: + return + + create_archive = True + archive_name = "{}-{}.zip".format(self.name, self.metadata["version"]) + archive_path = os.path.join(build_dir, archive_name) + + if os.path.exists(archive_path): + response = ask_path_exists( + f"The file {display_path(archive_path)} exists. (i)gnore, (w)ipe, " + "(b)ackup, (a)bort ", + ("i", "w", "b", "a"), + ) + if response == "i": + create_archive = False + elif response == "w": + logger.warning("Deleting %s", display_path(archive_path)) + os.remove(archive_path) + elif response == "b": + dest_file = backup_dir(archive_path) + logger.warning( + "Backing up %s to %s", + display_path(archive_path), + display_path(dest_file), + ) + shutil.move(archive_path, dest_file) + elif response == "a": + sys.exit(-1) + + if not create_archive: + return + + zip_output = zipfile.ZipFile( + archive_path, + "w", + zipfile.ZIP_DEFLATED, + allowZip64=True, + ) + with zip_output: + dir = os.path.normcase(os.path.abspath(self.unpacked_source_directory)) + for dirpath, dirnames, filenames in os.walk(dir): + for dirname in dirnames: + dir_arcname = self._get_archive_name( + dirname, + parentdir=dirpath, + rootdir=dir, + ) + zipdir = zipfile.ZipInfo(dir_arcname + "/") + zipdir.external_attr = 0x1ED << 16 # 0o755 + zip_output.writestr(zipdir, "") + for filename in filenames: + file_arcname = self._get_archive_name( + filename, + parentdir=dirpath, + rootdir=dir, + ) + filename = os.path.join(dirpath, filename) + zip_output.write(filename, file_arcname) + + logger.info("Saved %s", display_path(archive_path)) + + def install( + self, + global_options: Sequence[str] | None = None, + root: str | None = None, + home: str | None = None, + prefix: str | None = None, + warn_script_location: bool = True, + use_user_site: bool = False, + pycompile: bool = True, + ) -> None: + assert self.req is not None + scheme = get_scheme( + self.req.name, + user=use_user_site, + home=home, + root=root, + isolated=self.isolated, + prefix=prefix, + ) + + if self.editable and not self.is_wheel: + deprecated( + reason=( + f"Legacy editable install of {self} (setup.py develop) " + "is deprecated." + ), + replacement=( + "to add a pyproject.toml or enable --use-pep517, " + "and use setuptools >= 64. " + "If the resulting installation is not behaving as expected, " + "try using --config-settings editable_mode=compat. " + "Please consult the setuptools documentation for more information" + ), + gone_in="25.3", + issue=11457, + ) + if self.config_settings: + logger.warning( + "--config-settings ignored for legacy editable install of %s. " + "Consider upgrading to a version of setuptools " + "that supports PEP 660 (>= 64).", + self, + ) + install_editable_legacy( + global_options=global_options if global_options is not None else [], + prefix=prefix, + home=home, + use_user_site=use_user_site, + name=self.req.name, + setup_py_path=self.setup_py_path, + isolated=self.isolated, + build_env=self.build_env, + unpacked_source_directory=self.unpacked_source_directory, + ) + self.install_succeeded = True + return + + assert self.is_wheel + assert self.local_file_path + + install_wheel( + self.req.name, + self.local_file_path, + scheme=scheme, + req_description=str(self.req), + pycompile=pycompile, + warn_script_location=warn_script_location, + direct_url=self.download_info if self.is_direct else None, + requested=self.user_supplied, + ) + self.install_succeeded = True + + +def check_invalid_constraint_type(req: InstallRequirement) -> str: + # Check for unsupported forms + problem = "" + if not req.name: + problem = "Unnamed requirements are not allowed as constraints" + elif req.editable: + problem = "Editable requirements are not allowed as constraints" + elif req.extras: + problem = "Constraints cannot have extras" + + if problem: + deprecated( + reason=( + "Constraints are only allowed to take the form of a package " + "name and a version specifier. Other forms were originally " + "permitted as an accident of the implementation, but were " + "undocumented. The new implementation of the resolver no " + "longer supports these forms." + ), + replacement="replacing the constraint with a requirement", + # No plan yet for when the new resolver becomes default + gone_in=None, + issue=8210, + ) + + return problem + + +def _has_option(options: Values, reqs: list[InstallRequirement], option: str) -> bool: + if getattr(options, option, None): + return True + for req in reqs: + if getattr(req, option, None): + return True + return False + + +def check_legacy_setup_py_options( + options: Values, + reqs: list[InstallRequirement], +) -> None: + has_build_options = _has_option(options, reqs, "build_options") + has_global_options = _has_option(options, reqs, "global_options") + if has_build_options or has_global_options: + deprecated( + reason="--build-option and --global-option are deprecated.", + issue=11859, + replacement="to use --config-settings", + gone_in="25.3", + ) + logger.warning( + "Implying --no-binary=:all: due to the presence of " + "--build-option / --global-option. " + ) + options.format_control.disallow_binaries() diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/req/req_set.py b/.venv/lib/python3.12/site-packages/pip/_internal/req/req_set.py new file mode 100644 index 0000000..3451b24 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/req/req_set.py @@ -0,0 +1,81 @@ +import logging +from collections import OrderedDict + +from pip._vendor.packaging.utils import canonicalize_name + +from pip._internal.req.req_install import InstallRequirement + +logger = logging.getLogger(__name__) + + +class RequirementSet: + def __init__(self, check_supported_wheels: bool = True) -> None: + """Create a RequirementSet.""" + + self.requirements: dict[str, InstallRequirement] = OrderedDict() + self.check_supported_wheels = check_supported_wheels + + self.unnamed_requirements: list[InstallRequirement] = [] + + def __str__(self) -> str: + requirements = sorted( + (req for req in self.requirements.values() if not req.comes_from), + key=lambda req: canonicalize_name(req.name or ""), + ) + return " ".join(str(req.req) for req in requirements) + + def __repr__(self) -> str: + requirements = sorted( + self.requirements.values(), + key=lambda req: canonicalize_name(req.name or ""), + ) + + format_string = "<{classname} object; {count} requirement(s): {reqs}>" + return format_string.format( + classname=self.__class__.__name__, + count=len(requirements), + reqs=", ".join(str(req.req) for req in requirements), + ) + + def add_unnamed_requirement(self, install_req: InstallRequirement) -> None: + assert not install_req.name + self.unnamed_requirements.append(install_req) + + def add_named_requirement(self, install_req: InstallRequirement) -> None: + assert install_req.name + + project_name = canonicalize_name(install_req.name) + self.requirements[project_name] = install_req + + def has_requirement(self, name: str) -> bool: + project_name = canonicalize_name(name) + + return ( + project_name in self.requirements + and not self.requirements[project_name].constraint + ) + + def get_requirement(self, name: str) -> InstallRequirement: + project_name = canonicalize_name(name) + + if project_name in self.requirements: + return self.requirements[project_name] + + raise KeyError(f"No project with the name {name!r}") + + @property + def all_requirements(self) -> list[InstallRequirement]: + return self.unnamed_requirements + list(self.requirements.values()) + + @property + def requirements_to_install(self) -> list[InstallRequirement]: + """Return the list of requirements that need to be installed. + + TODO remove this property together with the legacy resolver, since the new + resolver only returns requirements that need to be installed. + """ + return [ + install_req + for install_req in self.all_requirements + if not install_req.constraint and not install_req.satisfied_by + ] diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/req/req_uninstall.py b/.venv/lib/python3.12/site-packages/pip/_internal/req/req_uninstall.py new file mode 100644 index 0000000..3f3dde2 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/req/req_uninstall.py @@ -0,0 +1,639 @@ +from __future__ import annotations + +import functools +import os +import sys +import sysconfig +from collections.abc import Generator, Iterable +from importlib.util import cache_from_source +from typing import Any, Callable + +from pip._internal.exceptions import LegacyDistutilsInstall, UninstallMissingRecord +from pip._internal.locations import get_bin_prefix, get_bin_user +from pip._internal.metadata import BaseDistribution +from pip._internal.utils.compat import WINDOWS +from pip._internal.utils.egg_link import egg_link_path_from_location +from pip._internal.utils.logging import getLogger, indent_log +from pip._internal.utils.misc import ask, normalize_path, renames, rmtree +from pip._internal.utils.temp_dir import AdjacentTempDirectory, TempDirectory +from pip._internal.utils.virtualenv import running_under_virtualenv + +logger = getLogger(__name__) + + +def _script_names( + bin_dir: str, script_name: str, is_gui: bool +) -> Generator[str, None, None]: + """Create the fully qualified name of the files created by + {console,gui}_scripts for the given ``dist``. + Returns the list of file names + """ + exe_name = os.path.join(bin_dir, script_name) + yield exe_name + if not WINDOWS: + return + yield f"{exe_name}.exe" + yield f"{exe_name}.exe.manifest" + if is_gui: + yield f"{exe_name}-script.pyw" + else: + yield f"{exe_name}-script.py" + + +def _unique( + fn: Callable[..., Generator[Any, None, None]], +) -> Callable[..., Generator[Any, None, None]]: + @functools.wraps(fn) + def unique(*args: Any, **kw: Any) -> Generator[Any, None, None]: + seen: set[Any] = set() + for item in fn(*args, **kw): + if item not in seen: + seen.add(item) + yield item + + return unique + + +@_unique +def uninstallation_paths(dist: BaseDistribution) -> Generator[str, None, None]: + """ + Yield all the uninstallation paths for dist based on RECORD-without-.py[co] + + Yield paths to all the files in RECORD. For each .py file in RECORD, add + the .pyc and .pyo in the same directory. + + UninstallPathSet.add() takes care of the __pycache__ .py[co]. + + If RECORD is not found, raises an error, + with possible information from the INSTALLER file. + + https://packaging.python.org/specifications/recording-installed-packages/ + """ + location = dist.location + assert location is not None, "not installed" + + entries = dist.iter_declared_entries() + if entries is None: + raise UninstallMissingRecord(distribution=dist) + + for entry in entries: + path = os.path.join(location, entry) + yield path + if path.endswith(".py"): + dn, fn = os.path.split(path) + base = fn[:-3] + path = os.path.join(dn, base + ".pyc") + yield path + path = os.path.join(dn, base + ".pyo") + yield path + + +def compact(paths: Iterable[str]) -> set[str]: + """Compact a path set to contain the minimal number of paths + necessary to contain all paths in the set. If /a/path/ and + /a/path/to/a/file.txt are both in the set, leave only the + shorter path.""" + + sep = os.path.sep + short_paths: set[str] = set() + for path in sorted(paths, key=len): + should_skip = any( + path.startswith(shortpath.rstrip("*")) + and path[len(shortpath.rstrip("*").rstrip(sep))] == sep + for shortpath in short_paths + ) + if not should_skip: + short_paths.add(path) + return short_paths + + +def compress_for_rename(paths: Iterable[str]) -> set[str]: + """Returns a set containing the paths that need to be renamed. + + This set may include directories when the original sequence of paths + included every file on disk. + """ + case_map = {os.path.normcase(p): p for p in paths} + remaining = set(case_map) + unchecked = sorted({os.path.split(p)[0] for p in case_map.values()}, key=len) + wildcards: set[str] = set() + + def norm_join(*a: str) -> str: + return os.path.normcase(os.path.join(*a)) + + for root in unchecked: + if any(os.path.normcase(root).startswith(w) for w in wildcards): + # This directory has already been handled. + continue + + all_files: set[str] = set() + all_subdirs: set[str] = set() + for dirname, subdirs, files in os.walk(root): + all_subdirs.update(norm_join(root, dirname, d) for d in subdirs) + all_files.update(norm_join(root, dirname, f) for f in files) + # If all the files we found are in our remaining set of files to + # remove, then remove them from the latter set and add a wildcard + # for the directory. + if not (all_files - remaining): + remaining.difference_update(all_files) + wildcards.add(root + os.sep) + + return set(map(case_map.__getitem__, remaining)) | wildcards + + +def compress_for_output_listing(paths: Iterable[str]) -> tuple[set[str], set[str]]: + """Returns a tuple of 2 sets of which paths to display to user + + The first set contains paths that would be deleted. Files of a package + are not added and the top-level directory of the package has a '*' added + at the end - to signify that all it's contents are removed. + + The second set contains files that would have been skipped in the above + folders. + """ + + will_remove = set(paths) + will_skip = set() + + # Determine folders and files + folders = set() + files = set() + for path in will_remove: + if path.endswith(".pyc"): + continue + if path.endswith("__init__.py") or ".dist-info" in path: + folders.add(os.path.dirname(path)) + files.add(path) + + _normcased_files = set(map(os.path.normcase, files)) + + folders = compact(folders) + + # This walks the tree using os.walk to not miss extra folders + # that might get added. + for folder in folders: + for dirpath, _, dirfiles in os.walk(folder): + for fname in dirfiles: + if fname.endswith(".pyc"): + continue + + file_ = os.path.join(dirpath, fname) + if ( + os.path.isfile(file_) + and os.path.normcase(file_) not in _normcased_files + ): + # We are skipping this file. Add it to the set. + will_skip.add(file_) + + will_remove = files | {os.path.join(folder, "*") for folder in folders} + + return will_remove, will_skip + + +class StashedUninstallPathSet: + """A set of file rename operations to stash files while + tentatively uninstalling them.""" + + def __init__(self) -> None: + # Mapping from source file root to [Adjacent]TempDirectory + # for files under that directory. + self._save_dirs: dict[str, TempDirectory] = {} + # (old path, new path) tuples for each move that may need + # to be undone. + self._moves: list[tuple[str, str]] = [] + + def _get_directory_stash(self, path: str) -> str: + """Stashes a directory. + + Directories are stashed adjacent to their original location if + possible, or else moved/copied into the user's temp dir.""" + + try: + save_dir: TempDirectory = AdjacentTempDirectory(path) + except OSError: + save_dir = TempDirectory(kind="uninstall") + self._save_dirs[os.path.normcase(path)] = save_dir + + return save_dir.path + + def _get_file_stash(self, path: str) -> str: + """Stashes a file. + + If no root has been provided, one will be created for the directory + in the user's temp directory.""" + path = os.path.normcase(path) + head, old_head = os.path.dirname(path), None + save_dir = None + + while head != old_head: + try: + save_dir = self._save_dirs[head] + break + except KeyError: + pass + head, old_head = os.path.dirname(head), head + else: + # Did not find any suitable root + head = os.path.dirname(path) + save_dir = TempDirectory(kind="uninstall") + self._save_dirs[head] = save_dir + + relpath = os.path.relpath(path, head) + if relpath and relpath != os.path.curdir: + return os.path.join(save_dir.path, relpath) + return save_dir.path + + def stash(self, path: str) -> str: + """Stashes the directory or file and returns its new location. + Handle symlinks as files to avoid modifying the symlink targets. + """ + path_is_dir = os.path.isdir(path) and not os.path.islink(path) + if path_is_dir: + new_path = self._get_directory_stash(path) + else: + new_path = self._get_file_stash(path) + + self._moves.append((path, new_path)) + if path_is_dir and os.path.isdir(new_path): + # If we're moving a directory, we need to + # remove the destination first or else it will be + # moved to inside the existing directory. + # We just created new_path ourselves, so it will + # be removable. + os.rmdir(new_path) + renames(path, new_path) + return new_path + + def commit(self) -> None: + """Commits the uninstall by removing stashed files.""" + for save_dir in self._save_dirs.values(): + save_dir.cleanup() + self._moves = [] + self._save_dirs = {} + + def rollback(self) -> None: + """Undoes the uninstall by moving stashed files back.""" + for p in self._moves: + logger.info("Moving to %s\n from %s", *p) + + for new_path, path in self._moves: + try: + logger.debug("Replacing %s from %s", new_path, path) + if os.path.isfile(new_path) or os.path.islink(new_path): + os.unlink(new_path) + elif os.path.isdir(new_path): + rmtree(new_path) + renames(path, new_path) + except OSError as ex: + logger.error("Failed to restore %s", new_path) + logger.debug("Exception: %s", ex) + + self.commit() + + @property + def can_rollback(self) -> bool: + return bool(self._moves) + + +class UninstallPathSet: + """A set of file paths to be removed in the uninstallation of a + requirement.""" + + def __init__(self, dist: BaseDistribution) -> None: + self._paths: set[str] = set() + self._refuse: set[str] = set() + self._pth: dict[str, UninstallPthEntries] = {} + self._dist = dist + self._moved_paths = StashedUninstallPathSet() + # Create local cache of normalize_path results. Creating an UninstallPathSet + # can result in hundreds/thousands of redundant calls to normalize_path with + # the same args, which hurts performance. + self._normalize_path_cached = functools.lru_cache(normalize_path) + + def _permitted(self, path: str) -> bool: + """ + Return True if the given path is one we are permitted to + remove/modify, False otherwise. + + """ + # aka is_local, but caching normalized sys.prefix + if not running_under_virtualenv(): + return True + return path.startswith(self._normalize_path_cached(sys.prefix)) + + def add(self, path: str) -> None: + head, tail = os.path.split(path) + + # we normalize the head to resolve parent directory symlinks, but not + # the tail, since we only want to uninstall symlinks, not their targets + path = os.path.join(self._normalize_path_cached(head), os.path.normcase(tail)) + + if not os.path.exists(path): + return + if self._permitted(path): + self._paths.add(path) + else: + self._refuse.add(path) + + # __pycache__ files can show up after 'installed-files.txt' is created, + # due to imports + if os.path.splitext(path)[1] == ".py": + self.add(cache_from_source(path)) + + def add_pth(self, pth_file: str, entry: str) -> None: + pth_file = self._normalize_path_cached(pth_file) + if self._permitted(pth_file): + if pth_file not in self._pth: + self._pth[pth_file] = UninstallPthEntries(pth_file) + self._pth[pth_file].add(entry) + else: + self._refuse.add(pth_file) + + def remove(self, auto_confirm: bool = False, verbose: bool = False) -> None: + """Remove paths in ``self._paths`` with confirmation (unless + ``auto_confirm`` is True).""" + + if not self._paths: + logger.info( + "Can't uninstall '%s'. No files were found to uninstall.", + self._dist.raw_name, + ) + return + + dist_name_version = f"{self._dist.raw_name}-{self._dist.raw_version}" + logger.info("Uninstalling %s:", dist_name_version) + + with indent_log(): + if auto_confirm or self._allowed_to_proceed(verbose): + moved = self._moved_paths + + for_rename = compress_for_rename(self._paths) + + for path in sorted(compact(for_rename)): + moved.stash(path) + logger.verbose("Removing file or directory %s", path) + + for pth in self._pth.values(): + pth.remove() + + logger.info("Successfully uninstalled %s", dist_name_version) + + def _allowed_to_proceed(self, verbose: bool) -> bool: + """Display which files would be deleted and prompt for confirmation""" + + def _display(msg: str, paths: Iterable[str]) -> None: + if not paths: + return + + logger.info(msg) + with indent_log(): + for path in sorted(compact(paths)): + logger.info(path) + + if not verbose: + will_remove, will_skip = compress_for_output_listing(self._paths) + else: + # In verbose mode, display all the files that are going to be + # deleted. + will_remove = set(self._paths) + will_skip = set() + + _display("Would remove:", will_remove) + _display("Would not remove (might be manually added):", will_skip) + _display("Would not remove (outside of prefix):", self._refuse) + if verbose: + _display("Will actually move:", compress_for_rename(self._paths)) + + return ask("Proceed (Y/n)? ", ("y", "n", "")) != "n" + + def rollback(self) -> None: + """Rollback the changes previously made by remove().""" + if not self._moved_paths.can_rollback: + logger.error( + "Can't roll back %s; was not uninstalled", + self._dist.raw_name, + ) + return + logger.info("Rolling back uninstall of %s", self._dist.raw_name) + self._moved_paths.rollback() + for pth in self._pth.values(): + pth.rollback() + + def commit(self) -> None: + """Remove temporary save dir: rollback will no longer be possible.""" + self._moved_paths.commit() + + @classmethod + def from_dist(cls, dist: BaseDistribution) -> UninstallPathSet: + dist_location = dist.location + info_location = dist.info_location + if dist_location is None: + logger.info( + "Not uninstalling %s since it is not installed", + dist.canonical_name, + ) + return cls(dist) + + normalized_dist_location = normalize_path(dist_location) + if not dist.local: + logger.info( + "Not uninstalling %s at %s, outside environment %s", + dist.canonical_name, + normalized_dist_location, + sys.prefix, + ) + return cls(dist) + + if normalized_dist_location in { + p + for p in {sysconfig.get_path("stdlib"), sysconfig.get_path("platstdlib")} + if p + }: + logger.info( + "Not uninstalling %s at %s, as it is in the standard library.", + dist.canonical_name, + normalized_dist_location, + ) + return cls(dist) + + paths_to_remove = cls(dist) + develop_egg_link = egg_link_path_from_location(dist.raw_name) + + # Distribution is installed with metadata in a "flat" .egg-info + # directory. This means it is not a modern .dist-info installation, an + # egg, or legacy editable. + setuptools_flat_installation = ( + dist.installed_with_setuptools_egg_info + and info_location is not None + and os.path.exists(info_location) + # If dist is editable and the location points to a ``.egg-info``, + # we are in fact in the legacy editable case. + and not info_location.endswith(f"{dist.setuptools_filename}.egg-info") + ) + + # Uninstall cases order do matter as in the case of 2 installs of the + # same package, pip needs to uninstall the currently detected version + if setuptools_flat_installation: + if info_location is not None: + paths_to_remove.add(info_location) + installed_files = dist.iter_declared_entries() + if installed_files is not None: + for installed_file in installed_files: + paths_to_remove.add(os.path.join(dist_location, installed_file)) + # FIXME: need a test for this elif block + # occurs with --single-version-externally-managed/--record outside + # of pip + elif dist.is_file("top_level.txt"): + try: + namespace_packages = dist.read_text("namespace_packages.txt") + except FileNotFoundError: + namespaces = [] + else: + namespaces = namespace_packages.splitlines(keepends=False) + for top_level_pkg in [ + p + for p in dist.read_text("top_level.txt").splitlines() + if p and p not in namespaces + ]: + path = os.path.join(dist_location, top_level_pkg) + paths_to_remove.add(path) + paths_to_remove.add(f"{path}.py") + paths_to_remove.add(f"{path}.pyc") + paths_to_remove.add(f"{path}.pyo") + + elif dist.installed_by_distutils: + raise LegacyDistutilsInstall(distribution=dist) + + elif dist.installed_as_egg: + # package installed by easy_install + # We cannot match on dist.egg_name because it can slightly vary + # i.e. setuptools-0.6c11-py2.6.egg vs setuptools-0.6rc11-py2.6.egg + # XXX We use normalized_dist_location because dist_location my contain + # a trailing / if the distribution is a zipped egg + # (which is not a directory). + paths_to_remove.add(normalized_dist_location) + easy_install_egg = os.path.split(normalized_dist_location)[1] + easy_install_pth = os.path.join( + os.path.dirname(normalized_dist_location), + "easy-install.pth", + ) + paths_to_remove.add_pth(easy_install_pth, "./" + easy_install_egg) + + elif dist.installed_with_dist_info: + for path in uninstallation_paths(dist): + paths_to_remove.add(path) + + elif develop_egg_link: + # PEP 660 modern editable is handled in the ``.dist-info`` case + # above, so this only covers the setuptools-style editable. + with open(develop_egg_link) as fh: + link_pointer = os.path.normcase(fh.readline().strip()) + normalized_link_pointer = paths_to_remove._normalize_path_cached( + link_pointer + ) + assert os.path.samefile( + normalized_link_pointer, normalized_dist_location + ), ( + f"Egg-link {develop_egg_link} (to {link_pointer}) does not match " + f"installed location of {dist.raw_name} (at {dist_location})" + ) + paths_to_remove.add(develop_egg_link) + easy_install_pth = os.path.join( + os.path.dirname(develop_egg_link), "easy-install.pth" + ) + paths_to_remove.add_pth(easy_install_pth, dist_location) + + else: + logger.debug( + "Not sure how to uninstall: %s - Check: %s", + dist, + dist_location, + ) + + if dist.in_usersite: + bin_dir = get_bin_user() + else: + bin_dir = get_bin_prefix() + + # find distutils scripts= scripts + try: + for script in dist.iter_distutils_script_names(): + paths_to_remove.add(os.path.join(bin_dir, script)) + if WINDOWS: + paths_to_remove.add(os.path.join(bin_dir, f"{script}.bat")) + except (FileNotFoundError, NotADirectoryError): + pass + + # find console_scripts and gui_scripts + def iter_scripts_to_remove( + dist: BaseDistribution, + bin_dir: str, + ) -> Generator[str, None, None]: + for entry_point in dist.iter_entry_points(): + if entry_point.group == "console_scripts": + yield from _script_names(bin_dir, entry_point.name, False) + elif entry_point.group == "gui_scripts": + yield from _script_names(bin_dir, entry_point.name, True) + + for s in iter_scripts_to_remove(dist, bin_dir): + paths_to_remove.add(s) + + return paths_to_remove + + +class UninstallPthEntries: + def __init__(self, pth_file: str) -> None: + self.file = pth_file + self.entries: set[str] = set() + self._saved_lines: list[bytes] | None = None + + def add(self, entry: str) -> None: + entry = os.path.normcase(entry) + # On Windows, os.path.normcase converts the entry to use + # backslashes. This is correct for entries that describe absolute + # paths outside of site-packages, but all the others use forward + # slashes. + # os.path.splitdrive is used instead of os.path.isabs because isabs + # treats non-absolute paths with drive letter markings like c:foo\bar + # as absolute paths. It also does not recognize UNC paths if they don't + # have more than "\\sever\share". Valid examples: "\\server\share\" or + # "\\server\share\folder". + if WINDOWS and not os.path.splitdrive(entry)[0]: + entry = entry.replace("\\", "/") + self.entries.add(entry) + + def remove(self) -> None: + logger.verbose("Removing pth entries from %s:", self.file) + + # If the file doesn't exist, log a warning and return + if not os.path.isfile(self.file): + logger.warning("Cannot remove entries from nonexistent file %s", self.file) + return + with open(self.file, "rb") as fh: + # windows uses '\r\n' with py3k, but uses '\n' with py2.x + lines = fh.readlines() + self._saved_lines = lines + if any(b"\r\n" in line for line in lines): + endline = "\r\n" + else: + endline = "\n" + # handle missing trailing newline + if lines and not lines[-1].endswith(endline.encode("utf-8")): + lines[-1] = lines[-1] + endline.encode("utf-8") + for entry in self.entries: + try: + logger.verbose("Removing entry: %s", entry) + lines.remove((entry + endline).encode("utf-8")) + except ValueError: + pass + with open(self.file, "wb") as fh: + fh.writelines(lines) + + def rollback(self) -> bool: + if self._saved_lines is None: + logger.error("Cannot roll back changes to %s, none were made", self.file) + return False + logger.debug("Rolling %s back to previous state", self.file) + with open(self.file, "wb") as fh: + fh.writelines(self._saved_lines) + return True diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/resolution/__init__.py b/.venv/lib/python3.12/site-packages/pip/_internal/resolution/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/resolution/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/resolution/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..8b91b8c Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/resolution/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/resolution/__pycache__/base.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/resolution/__pycache__/base.cpython-312.pyc new file mode 100644 index 0000000..de8df07 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/resolution/__pycache__/base.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/resolution/base.py b/.venv/lib/python3.12/site-packages/pip/_internal/resolution/base.py new file mode 100644 index 0000000..5ec4d96 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/resolution/base.py @@ -0,0 +1,20 @@ +from typing import Callable, Optional + +from pip._internal.req.req_install import InstallRequirement +from pip._internal.req.req_set import RequirementSet + +InstallRequirementProvider = Callable[ + [str, Optional[InstallRequirement]], InstallRequirement +] + + +class BaseResolver: + def resolve( + self, root_reqs: list[InstallRequirement], check_supported_wheels: bool + ) -> RequirementSet: + raise NotImplementedError() + + def get_installation_order( + self, req_set: RequirementSet + ) -> list[InstallRequirement]: + raise NotImplementedError() diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/resolution/legacy/__init__.py b/.venv/lib/python3.12/site-packages/pip/_internal/resolution/legacy/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/resolution/legacy/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/resolution/legacy/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..ab1dd72 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/resolution/legacy/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/resolution/legacy/__pycache__/resolver.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/resolution/legacy/__pycache__/resolver.cpython-312.pyc new file mode 100644 index 0000000..44f77d3 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/resolution/legacy/__pycache__/resolver.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/resolution/legacy/resolver.py b/.venv/lib/python3.12/site-packages/pip/_internal/resolution/legacy/resolver.py new file mode 100644 index 0000000..33a4fdc --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/resolution/legacy/resolver.py @@ -0,0 +1,598 @@ +"""Dependency Resolution + +The dependency resolution in pip is performed as follows: + +for top-level requirements: + a. only one spec allowed per project, regardless of conflicts or not. + otherwise a "double requirement" exception is raised + b. they override sub-dependency requirements. +for sub-dependencies + a. "first found, wins" (where the order is breadth first) +""" + +from __future__ import annotations + +import logging +import sys +from collections import defaultdict +from collections.abc import Iterable +from itertools import chain +from typing import Optional + +from pip._vendor.packaging import specifiers +from pip._vendor.packaging.requirements import Requirement + +from pip._internal.cache import WheelCache +from pip._internal.exceptions import ( + BestVersionAlreadyInstalled, + DistributionNotFound, + HashError, + HashErrors, + InstallationError, + NoneMetadataError, + UnsupportedPythonVersion, +) +from pip._internal.index.package_finder import PackageFinder +from pip._internal.metadata import BaseDistribution +from pip._internal.models.link import Link +from pip._internal.models.wheel import Wheel +from pip._internal.operations.prepare import RequirementPreparer +from pip._internal.req.req_install import ( + InstallRequirement, + check_invalid_constraint_type, +) +from pip._internal.req.req_set import RequirementSet +from pip._internal.resolution.base import BaseResolver, InstallRequirementProvider +from pip._internal.utils import compatibility_tags +from pip._internal.utils.compatibility_tags import get_supported +from pip._internal.utils.direct_url_helpers import direct_url_from_link +from pip._internal.utils.logging import indent_log +from pip._internal.utils.misc import normalize_version_info +from pip._internal.utils.packaging import check_requires_python + +logger = logging.getLogger(__name__) + +DiscoveredDependencies = defaultdict[Optional[str], list[InstallRequirement]] + + +def _check_dist_requires_python( + dist: BaseDistribution, + version_info: tuple[int, int, int], + ignore_requires_python: bool = False, +) -> None: + """ + Check whether the given Python version is compatible with a distribution's + "Requires-Python" value. + + :param version_info: A 3-tuple of ints representing the Python + major-minor-micro version to check. + :param ignore_requires_python: Whether to ignore the "Requires-Python" + value if the given Python version isn't compatible. + + :raises UnsupportedPythonVersion: When the given Python version isn't + compatible. + """ + # This idiosyncratically converts the SpecifierSet to str and let + # check_requires_python then parse it again into SpecifierSet. But this + # is the legacy resolver so I'm just not going to bother refactoring. + try: + requires_python = str(dist.requires_python) + except FileNotFoundError as e: + raise NoneMetadataError(dist, str(e)) + try: + is_compatible = check_requires_python( + requires_python, + version_info=version_info, + ) + except specifiers.InvalidSpecifier as exc: + logger.warning( + "Package %r has an invalid Requires-Python: %s", dist.raw_name, exc + ) + return + + if is_compatible: + return + + version = ".".join(map(str, version_info)) + if ignore_requires_python: + logger.debug( + "Ignoring failed Requires-Python check for package %r: %s not in %r", + dist.raw_name, + version, + requires_python, + ) + return + + raise UnsupportedPythonVersion( + f"Package {dist.raw_name!r} requires a different Python: " + f"{version} not in {requires_python!r}" + ) + + +class Resolver(BaseResolver): + """Resolves which packages need to be installed/uninstalled to perform \ + the requested operation without breaking the requirements of any package. + """ + + _allowed_strategies = {"eager", "only-if-needed", "to-satisfy-only"} + + def __init__( + self, + preparer: RequirementPreparer, + finder: PackageFinder, + wheel_cache: WheelCache | None, + make_install_req: InstallRequirementProvider, + use_user_site: bool, + ignore_dependencies: bool, + ignore_installed: bool, + ignore_requires_python: bool, + force_reinstall: bool, + upgrade_strategy: str, + py_version_info: tuple[int, ...] | None = None, + ) -> None: + super().__init__() + assert upgrade_strategy in self._allowed_strategies + + if py_version_info is None: + py_version_info = sys.version_info[:3] + else: + py_version_info = normalize_version_info(py_version_info) + + self._py_version_info = py_version_info + + self.preparer = preparer + self.finder = finder + self.wheel_cache = wheel_cache + + self.upgrade_strategy = upgrade_strategy + self.force_reinstall = force_reinstall + self.ignore_dependencies = ignore_dependencies + self.ignore_installed = ignore_installed + self.ignore_requires_python = ignore_requires_python + self.use_user_site = use_user_site + self._make_install_req = make_install_req + + self._discovered_dependencies: DiscoveredDependencies = defaultdict(list) + + def resolve( + self, root_reqs: list[InstallRequirement], check_supported_wheels: bool + ) -> RequirementSet: + """Resolve what operations need to be done + + As a side-effect of this method, the packages (and their dependencies) + are downloaded, unpacked and prepared for installation. This + preparation is done by ``pip.operations.prepare``. + + Once PyPI has static dependency metadata available, it would be + possible to move the preparation to become a step separated from + dependency resolution. + """ + requirement_set = RequirementSet(check_supported_wheels=check_supported_wheels) + for req in root_reqs: + if req.constraint: + check_invalid_constraint_type(req) + self._add_requirement_to_set(requirement_set, req) + + # Actually prepare the files, and collect any exceptions. Most hash + # exceptions cannot be checked ahead of time, because + # _populate_link() needs to be called before we can make decisions + # based on link type. + discovered_reqs: list[InstallRequirement] = [] + hash_errors = HashErrors() + for req in chain(requirement_set.all_requirements, discovered_reqs): + try: + discovered_reqs.extend(self._resolve_one(requirement_set, req)) + except HashError as exc: + exc.req = req + hash_errors.append(exc) + + if hash_errors: + raise hash_errors + + return requirement_set + + def _add_requirement_to_set( + self, + requirement_set: RequirementSet, + install_req: InstallRequirement, + parent_req_name: str | None = None, + extras_requested: Iterable[str] | None = None, + ) -> tuple[list[InstallRequirement], InstallRequirement | None]: + """Add install_req as a requirement to install. + + :param parent_req_name: The name of the requirement that needed this + added. The name is used because when multiple unnamed requirements + resolve to the same name, we could otherwise end up with dependency + links that point outside the Requirements set. parent_req must + already be added. Note that None implies that this is a user + supplied requirement, vs an inferred one. + :param extras_requested: an iterable of extras used to evaluate the + environment markers. + :return: Additional requirements to scan. That is either [] if + the requirement is not applicable, or [install_req] if the + requirement is applicable and has just been added. + """ + # If the markers do not match, ignore this requirement. + if not install_req.match_markers(extras_requested): + logger.info( + "Ignoring %s: markers '%s' don't match your environment", + install_req.name, + install_req.markers, + ) + return [], None + + # If the wheel is not supported, raise an error. + # Should check this after filtering out based on environment markers to + # allow specifying different wheels based on the environment/OS, in a + # single requirements file. + if install_req.link and install_req.link.is_wheel: + wheel = Wheel(install_req.link.filename) + tags = compatibility_tags.get_supported() + if requirement_set.check_supported_wheels and not wheel.supported(tags): + raise InstallationError( + f"{wheel.filename} is not a supported wheel on this platform." + ) + + # This next bit is really a sanity check. + assert ( + not install_req.user_supplied or parent_req_name is None + ), "a user supplied req shouldn't have a parent" + + # Unnamed requirements are scanned again and the requirement won't be + # added as a dependency until after scanning. + if not install_req.name: + requirement_set.add_unnamed_requirement(install_req) + return [install_req], None + + try: + existing_req: InstallRequirement | None = requirement_set.get_requirement( + install_req.name + ) + except KeyError: + existing_req = None + + has_conflicting_requirement = ( + parent_req_name is None + and existing_req + and not existing_req.constraint + and existing_req.extras == install_req.extras + and existing_req.req + and install_req.req + and existing_req.req.specifier != install_req.req.specifier + ) + if has_conflicting_requirement: + raise InstallationError( + f"Double requirement given: {install_req} " + f"(already in {existing_req}, name={install_req.name!r})" + ) + + # When no existing requirement exists, add the requirement as a + # dependency and it will be scanned again after. + if not existing_req: + requirement_set.add_named_requirement(install_req) + # We'd want to rescan this requirement later + return [install_req], install_req + + # Assume there's no need to scan, and that we've already + # encountered this for scanning. + if install_req.constraint or not existing_req.constraint: + return [], existing_req + + does_not_satisfy_constraint = install_req.link and not ( + existing_req.link and install_req.link.path == existing_req.link.path + ) + if does_not_satisfy_constraint: + raise InstallationError( + f"Could not satisfy constraints for '{install_req.name}': " + "installation from path or url cannot be " + "constrained to a version" + ) + # If we're now installing a constraint, mark the existing + # object for real installation. + existing_req.constraint = False + # If we're now installing a user supplied requirement, + # mark the existing object as such. + if install_req.user_supplied: + existing_req.user_supplied = True + existing_req.extras = tuple( + sorted(set(existing_req.extras) | set(install_req.extras)) + ) + logger.debug( + "Setting %s extras to: %s", + existing_req, + existing_req.extras, + ) + # Return the existing requirement for addition to the parent and + # scanning again. + return [existing_req], existing_req + + def _is_upgrade_allowed(self, req: InstallRequirement) -> bool: + if self.upgrade_strategy == "to-satisfy-only": + return False + elif self.upgrade_strategy == "eager": + return True + else: + assert self.upgrade_strategy == "only-if-needed" + return req.user_supplied or req.constraint + + def _set_req_to_reinstall(self, req: InstallRequirement) -> None: + """ + Set a requirement to be installed. + """ + # Don't uninstall the conflict if doing a user install and the + # conflict is not a user install. + assert req.satisfied_by is not None + if not self.use_user_site or req.satisfied_by.in_usersite: + req.should_reinstall = True + req.satisfied_by = None + + def _check_skip_installed(self, req_to_install: InstallRequirement) -> str | None: + """Check if req_to_install should be skipped. + + This will check if the req is installed, and whether we should upgrade + or reinstall it, taking into account all the relevant user options. + + After calling this req_to_install will only have satisfied_by set to + None if the req_to_install is to be upgraded/reinstalled etc. Any + other value will be a dist recording the current thing installed that + satisfies the requirement. + + Note that for vcs urls and the like we can't assess skipping in this + routine - we simply identify that we need to pull the thing down, + then later on it is pulled down and introspected to assess upgrade/ + reinstalls etc. + + :return: A text reason for why it was skipped, or None. + """ + if self.ignore_installed: + return None + + req_to_install.check_if_exists(self.use_user_site) + if not req_to_install.satisfied_by: + return None + + if self.force_reinstall: + self._set_req_to_reinstall(req_to_install) + return None + + if not self._is_upgrade_allowed(req_to_install): + if self.upgrade_strategy == "only-if-needed": + return "already satisfied, skipping upgrade" + return "already satisfied" + + # Check for the possibility of an upgrade. For link-based + # requirements we have to pull the tree down and inspect to assess + # the version #, so it's handled way down. + if not req_to_install.link: + try: + self.finder.find_requirement(req_to_install, upgrade=True) + except BestVersionAlreadyInstalled: + # Then the best version is installed. + return "already up-to-date" + except DistributionNotFound: + # No distribution found, so we squash the error. It will + # be raised later when we re-try later to do the install. + # Why don't we just raise here? + pass + + self._set_req_to_reinstall(req_to_install) + return None + + def _find_requirement_link(self, req: InstallRequirement) -> Link | None: + upgrade = self._is_upgrade_allowed(req) + best_candidate = self.finder.find_requirement(req, upgrade) + if not best_candidate: + return None + + # Log a warning per PEP 592 if necessary before returning. + link = best_candidate.link + if link.is_yanked: + reason = link.yanked_reason or "" + msg = ( + # Mark this as a unicode string to prevent + # "UnicodeEncodeError: 'ascii' codec can't encode character" + # in Python 2 when the reason contains non-ascii characters. + "The candidate selected for download or install is a " + f"yanked version: {best_candidate}\n" + f"Reason for being yanked: {reason}" + ) + logger.warning(msg) + + return link + + def _populate_link(self, req: InstallRequirement) -> None: + """Ensure that if a link can be found for this, that it is found. + + Note that req.link may still be None - if the requirement is already + installed and not needed to be upgraded based on the return value of + _is_upgrade_allowed(). + + If preparer.require_hashes is True, don't use the wheel cache, because + cached wheels, always built locally, have different hashes than the + files downloaded from the index server and thus throw false hash + mismatches. Furthermore, cached wheels at present have undeterministic + contents due to file modification times. + """ + if req.link is None: + req.link = self._find_requirement_link(req) + + if self.wheel_cache is None or self.preparer.require_hashes: + return + + assert req.link is not None, "_find_requirement_link unexpectedly returned None" + cache_entry = self.wheel_cache.get_cache_entry( + link=req.link, + package_name=req.name, + supported_tags=get_supported(), + ) + if cache_entry is not None: + logger.debug("Using cached wheel link: %s", cache_entry.link) + if req.link is req.original_link and cache_entry.persistent: + req.cached_wheel_source_link = req.link + if cache_entry.origin is not None: + req.download_info = cache_entry.origin + else: + # Legacy cache entry that does not have origin.json. + # download_info may miss the archive_info.hashes field. + req.download_info = direct_url_from_link( + req.link, link_is_in_wheel_cache=cache_entry.persistent + ) + req.link = cache_entry.link + + def _get_dist_for(self, req: InstallRequirement) -> BaseDistribution: + """Takes a InstallRequirement and returns a single AbstractDist \ + representing a prepared variant of the same. + """ + if req.editable: + return self.preparer.prepare_editable_requirement(req) + + # satisfied_by is only evaluated by calling _check_skip_installed, + # so it must be None here. + assert req.satisfied_by is None + skip_reason = self._check_skip_installed(req) + + if req.satisfied_by: + return self.preparer.prepare_installed_requirement(req, skip_reason) + + # We eagerly populate the link, since that's our "legacy" behavior. + self._populate_link(req) + dist = self.preparer.prepare_linked_requirement(req) + + # NOTE + # The following portion is for determining if a certain package is + # going to be re-installed/upgraded or not and reporting to the user. + # This should probably get cleaned up in a future refactor. + + # req.req is only avail after unpack for URL + # pkgs repeat check_if_exists to uninstall-on-upgrade + # (#14) + if not self.ignore_installed: + req.check_if_exists(self.use_user_site) + + if req.satisfied_by: + should_modify = ( + self.upgrade_strategy != "to-satisfy-only" + or self.force_reinstall + or self.ignore_installed + or req.link.scheme == "file" + ) + if should_modify: + self._set_req_to_reinstall(req) + else: + logger.info( + "Requirement already satisfied (use --upgrade to upgrade): %s", + req, + ) + return dist + + def _resolve_one( + self, + requirement_set: RequirementSet, + req_to_install: InstallRequirement, + ) -> list[InstallRequirement]: + """Prepare a single requirements file. + + :return: A list of additional InstallRequirements to also install. + """ + # Tell user what we are doing for this requirement: + # obtain (editable), skipping, processing (local url), collecting + # (remote url or package name) + if req_to_install.constraint or req_to_install.prepared: + return [] + + req_to_install.prepared = True + + # Parse and return dependencies + dist = self._get_dist_for(req_to_install) + # This will raise UnsupportedPythonVersion if the given Python + # version isn't compatible with the distribution's Requires-Python. + _check_dist_requires_python( + dist, + version_info=self._py_version_info, + ignore_requires_python=self.ignore_requires_python, + ) + + more_reqs: list[InstallRequirement] = [] + + def add_req(subreq: Requirement, extras_requested: Iterable[str]) -> None: + # This idiosyncratically converts the Requirement to str and let + # make_install_req then parse it again into Requirement. But this is + # the legacy resolver so I'm just not going to bother refactoring. + sub_install_req = self._make_install_req(str(subreq), req_to_install) + parent_req_name = req_to_install.name + to_scan_again, add_to_parent = self._add_requirement_to_set( + requirement_set, + sub_install_req, + parent_req_name=parent_req_name, + extras_requested=extras_requested, + ) + if parent_req_name and add_to_parent: + self._discovered_dependencies[parent_req_name].append(add_to_parent) + more_reqs.extend(to_scan_again) + + with indent_log(): + # We add req_to_install before its dependencies, so that we + # can refer to it when adding dependencies. + assert req_to_install.name is not None + if not requirement_set.has_requirement(req_to_install.name): + # 'unnamed' requirements will get added here + # 'unnamed' requirements can only come from being directly + # provided by the user. + assert req_to_install.user_supplied + self._add_requirement_to_set( + requirement_set, req_to_install, parent_req_name=None + ) + + if not self.ignore_dependencies: + if req_to_install.extras: + logger.debug( + "Installing extra requirements: %r", + ",".join(req_to_install.extras), + ) + missing_requested = sorted( + set(req_to_install.extras) - set(dist.iter_provided_extras()) + ) + for missing in missing_requested: + logger.warning( + "%s %s does not provide the extra '%s'", + dist.raw_name, + dist.version, + missing, + ) + + available_requested = sorted( + set(dist.iter_provided_extras()) & set(req_to_install.extras) + ) + for subreq in dist.iter_dependencies(available_requested): + add_req(subreq, extras_requested=available_requested) + + return more_reqs + + def get_installation_order( + self, req_set: RequirementSet + ) -> list[InstallRequirement]: + """Create the installation order. + + The installation order is topological - requirements are installed + before the requiring thing. We break cycles at an arbitrary point, + and make no other guarantees. + """ + # The current implementation, which we may change at any point + # installs the user specified things in the order given, except when + # dependencies must come earlier to achieve topological order. + order = [] + ordered_reqs: set[InstallRequirement] = set() + + def schedule(req: InstallRequirement) -> None: + if req.satisfied_by or req in ordered_reqs: + return + if req.constraint: + return + ordered_reqs.add(req) + for dep in self._discovered_dependencies[req.name]: + schedule(dep) + order.append(req) + + for install_req in req_set.requirements.values(): + schedule(install_req) + return order diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/resolution/resolvelib/__init__.py b/.venv/lib/python3.12/site-packages/pip/_internal/resolution/resolvelib/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/resolution/resolvelib/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/resolution/resolvelib/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..7612ccd Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/resolution/resolvelib/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/resolution/resolvelib/__pycache__/base.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/resolution/resolvelib/__pycache__/base.cpython-312.pyc new file mode 100644 index 0000000..57c5ec9 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/resolution/resolvelib/__pycache__/base.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/resolution/resolvelib/__pycache__/candidates.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/resolution/resolvelib/__pycache__/candidates.cpython-312.pyc new file mode 100644 index 0000000..201e917 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/resolution/resolvelib/__pycache__/candidates.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/resolution/resolvelib/__pycache__/factory.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/resolution/resolvelib/__pycache__/factory.cpython-312.pyc new file mode 100644 index 0000000..c5a31ea Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/resolution/resolvelib/__pycache__/factory.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/resolution/resolvelib/__pycache__/found_candidates.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/resolution/resolvelib/__pycache__/found_candidates.cpython-312.pyc new file mode 100644 index 0000000..e91bf3c Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/resolution/resolvelib/__pycache__/found_candidates.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/resolution/resolvelib/__pycache__/provider.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/resolution/resolvelib/__pycache__/provider.cpython-312.pyc new file mode 100644 index 0000000..750d9e1 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/resolution/resolvelib/__pycache__/provider.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/resolution/resolvelib/__pycache__/reporter.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/resolution/resolvelib/__pycache__/reporter.cpython-312.pyc new file mode 100644 index 0000000..a35ab5e Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/resolution/resolvelib/__pycache__/reporter.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/resolution/resolvelib/__pycache__/requirements.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/resolution/resolvelib/__pycache__/requirements.cpython-312.pyc new file mode 100644 index 0000000..6637414 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/resolution/resolvelib/__pycache__/requirements.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/resolution/resolvelib/__pycache__/resolver.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/resolution/resolvelib/__pycache__/resolver.cpython-312.pyc new file mode 100644 index 0000000..406d3cf Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/resolution/resolvelib/__pycache__/resolver.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/resolution/resolvelib/base.py b/.venv/lib/python3.12/site-packages/pip/_internal/resolution/resolvelib/base.py new file mode 100644 index 0000000..03877b6 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/resolution/resolvelib/base.py @@ -0,0 +1,142 @@ +from __future__ import annotations + +from collections.abc import Iterable +from dataclasses import dataclass +from typing import Optional + +from pip._vendor.packaging.specifiers import SpecifierSet +from pip._vendor.packaging.utils import NormalizedName +from pip._vendor.packaging.version import Version + +from pip._internal.models.link import Link, links_equivalent +from pip._internal.req.req_install import InstallRequirement +from pip._internal.utils.hashes import Hashes + +CandidateLookup = tuple[Optional["Candidate"], Optional[InstallRequirement]] + + +def format_name(project: NormalizedName, extras: frozenset[NormalizedName]) -> str: + if not extras: + return project + extras_expr = ",".join(sorted(extras)) + return f"{project}[{extras_expr}]" + + +@dataclass(frozen=True) +class Constraint: + specifier: SpecifierSet + hashes: Hashes + links: frozenset[Link] + + @classmethod + def empty(cls) -> Constraint: + return Constraint(SpecifierSet(), Hashes(), frozenset()) + + @classmethod + def from_ireq(cls, ireq: InstallRequirement) -> Constraint: + links = frozenset([ireq.link]) if ireq.link else frozenset() + return Constraint(ireq.specifier, ireq.hashes(trust_internet=False), links) + + def __bool__(self) -> bool: + return bool(self.specifier) or bool(self.hashes) or bool(self.links) + + def __and__(self, other: InstallRequirement) -> Constraint: + if not isinstance(other, InstallRequirement): + return NotImplemented + specifier = self.specifier & other.specifier + hashes = self.hashes & other.hashes(trust_internet=False) + links = self.links + if other.link: + links = links.union([other.link]) + return Constraint(specifier, hashes, links) + + def is_satisfied_by(self, candidate: Candidate) -> bool: + # Reject if there are any mismatched URL constraints on this package. + if self.links and not all(_match_link(link, candidate) for link in self.links): + return False + # We can safely always allow prereleases here since PackageFinder + # already implements the prerelease logic, and would have filtered out + # prerelease candidates if the user does not expect them. + return self.specifier.contains(candidate.version, prereleases=True) + + +class Requirement: + @property + def project_name(self) -> NormalizedName: + """The "project name" of a requirement. + + This is different from ``name`` if this requirement contains extras, + in which case ``name`` would contain the ``[...]`` part, while this + refers to the name of the project. + """ + raise NotImplementedError("Subclass should override") + + @property + def name(self) -> str: + """The name identifying this requirement in the resolver. + + This is different from ``project_name`` if this requirement contains + extras, where ``project_name`` would not contain the ``[...]`` part. + """ + raise NotImplementedError("Subclass should override") + + def is_satisfied_by(self, candidate: Candidate) -> bool: + return False + + def get_candidate_lookup(self) -> CandidateLookup: + raise NotImplementedError("Subclass should override") + + def format_for_error(self) -> str: + raise NotImplementedError("Subclass should override") + + +def _match_link(link: Link, candidate: Candidate) -> bool: + if candidate.source_link: + return links_equivalent(link, candidate.source_link) + return False + + +class Candidate: + @property + def project_name(self) -> NormalizedName: + """The "project name" of the candidate. + + This is different from ``name`` if this candidate contains extras, + in which case ``name`` would contain the ``[...]`` part, while this + refers to the name of the project. + """ + raise NotImplementedError("Override in subclass") + + @property + def name(self) -> str: + """The name identifying this candidate in the resolver. + + This is different from ``project_name`` if this candidate contains + extras, where ``project_name`` would not contain the ``[...]`` part. + """ + raise NotImplementedError("Override in subclass") + + @property + def version(self) -> Version: + raise NotImplementedError("Override in subclass") + + @property + def is_installed(self) -> bool: + raise NotImplementedError("Override in subclass") + + @property + def is_editable(self) -> bool: + raise NotImplementedError("Override in subclass") + + @property + def source_link(self) -> Link | None: + raise NotImplementedError("Override in subclass") + + def iter_dependencies(self, with_requires: bool) -> Iterable[Requirement | None]: + raise NotImplementedError("Override in subclass") + + def get_install_requirement(self) -> InstallRequirement | None: + raise NotImplementedError("Override in subclass") + + def format_for_error(self) -> str: + raise NotImplementedError("Subclass should override") diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/resolution/resolvelib/candidates.py b/.venv/lib/python3.12/site-packages/pip/_internal/resolution/resolvelib/candidates.py new file mode 100644 index 0000000..a831534 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/resolution/resolvelib/candidates.py @@ -0,0 +1,582 @@ +from __future__ import annotations + +import logging +import sys +from collections.abc import Iterable +from typing import TYPE_CHECKING, Any, Union, cast + +from pip._vendor.packaging.requirements import InvalidRequirement +from pip._vendor.packaging.utils import NormalizedName, canonicalize_name +from pip._vendor.packaging.version import Version + +from pip._internal.exceptions import ( + HashError, + InstallationSubprocessError, + InvalidInstalledPackage, + MetadataInconsistent, + MetadataInvalid, +) +from pip._internal.metadata import BaseDistribution +from pip._internal.models.link import Link, links_equivalent +from pip._internal.models.wheel import Wheel +from pip._internal.req.constructors import ( + install_req_from_editable, + install_req_from_line, +) +from pip._internal.req.req_install import InstallRequirement +from pip._internal.utils.direct_url_helpers import direct_url_from_link +from pip._internal.utils.misc import normalize_version_info + +from .base import Candidate, Requirement, format_name + +if TYPE_CHECKING: + from .factory import Factory + +logger = logging.getLogger(__name__) + +BaseCandidate = Union[ + "AlreadyInstalledCandidate", + "EditableCandidate", + "LinkCandidate", +] + +# Avoid conflicting with the PyPI package "Python". +REQUIRES_PYTHON_IDENTIFIER = cast(NormalizedName, "") + + +def as_base_candidate(candidate: Candidate) -> BaseCandidate | None: + """The runtime version of BaseCandidate.""" + base_candidate_classes = ( + AlreadyInstalledCandidate, + EditableCandidate, + LinkCandidate, + ) + if isinstance(candidate, base_candidate_classes): + return candidate + return None + + +def make_install_req_from_link( + link: Link, template: InstallRequirement +) -> InstallRequirement: + assert not template.editable, "template is editable" + if template.req: + line = str(template.req) + else: + line = link.url + ireq = install_req_from_line( + line, + user_supplied=template.user_supplied, + comes_from=template.comes_from, + use_pep517=template.use_pep517, + isolated=template.isolated, + constraint=template.constraint, + global_options=template.global_options, + hash_options=template.hash_options, + config_settings=template.config_settings, + ) + ireq.original_link = template.original_link + ireq.link = link + ireq.extras = template.extras + return ireq + + +def make_install_req_from_editable( + link: Link, template: InstallRequirement +) -> InstallRequirement: + assert template.editable, "template not editable" + ireq = install_req_from_editable( + link.url, + user_supplied=template.user_supplied, + comes_from=template.comes_from, + use_pep517=template.use_pep517, + isolated=template.isolated, + constraint=template.constraint, + permit_editable_wheels=template.permit_editable_wheels, + global_options=template.global_options, + hash_options=template.hash_options, + config_settings=template.config_settings, + ) + ireq.extras = template.extras + return ireq + + +def _make_install_req_from_dist( + dist: BaseDistribution, template: InstallRequirement +) -> InstallRequirement: + if template.req: + line = str(template.req) + elif template.link: + line = f"{dist.canonical_name} @ {template.link.url}" + else: + line = f"{dist.canonical_name}=={dist.version}" + ireq = install_req_from_line( + line, + user_supplied=template.user_supplied, + comes_from=template.comes_from, + use_pep517=template.use_pep517, + isolated=template.isolated, + constraint=template.constraint, + global_options=template.global_options, + hash_options=template.hash_options, + config_settings=template.config_settings, + ) + ireq.satisfied_by = dist + return ireq + + +class _InstallRequirementBackedCandidate(Candidate): + """A candidate backed by an ``InstallRequirement``. + + This represents a package request with the target not being already + in the environment, and needs to be fetched and installed. The backing + ``InstallRequirement`` is responsible for most of the leg work; this + class exposes appropriate information to the resolver. + + :param link: The link passed to the ``InstallRequirement``. The backing + ``InstallRequirement`` will use this link to fetch the distribution. + :param source_link: The link this candidate "originates" from. This is + different from ``link`` when the link is found in the wheel cache. + ``link`` would point to the wheel cache, while this points to the + found remote link (e.g. from pypi.org). + """ + + dist: BaseDistribution + is_installed = False + + def __init__( + self, + link: Link, + source_link: Link, + ireq: InstallRequirement, + factory: Factory, + name: NormalizedName | None = None, + version: Version | None = None, + ) -> None: + self._link = link + self._source_link = source_link + self._factory = factory + self._ireq = ireq + self._name = name + self._version = version + self.dist = self._prepare() + self._hash: int | None = None + + def __str__(self) -> str: + return f"{self.name} {self.version}" + + def __repr__(self) -> str: + return f"{self.__class__.__name__}({str(self._link)!r})" + + def __hash__(self) -> int: + if self._hash is not None: + return self._hash + + self._hash = hash((self.__class__, self._link)) + return self._hash + + def __eq__(self, other: Any) -> bool: + if isinstance(other, self.__class__): + return links_equivalent(self._link, other._link) + return False + + @property + def source_link(self) -> Link | None: + return self._source_link + + @property + def project_name(self) -> NormalizedName: + """The normalised name of the project the candidate refers to""" + if self._name is None: + self._name = self.dist.canonical_name + return self._name + + @property + def name(self) -> str: + return self.project_name + + @property + def version(self) -> Version: + if self._version is None: + self._version = self.dist.version + return self._version + + def format_for_error(self) -> str: + return ( + f"{self.name} {self.version} " + f"(from {self._link.file_path if self._link.is_file else self._link})" + ) + + def _prepare_distribution(self) -> BaseDistribution: + raise NotImplementedError("Override in subclass") + + def _check_metadata_consistency(self, dist: BaseDistribution) -> None: + """Check for consistency of project name and version of dist.""" + if self._name is not None and self._name != dist.canonical_name: + raise MetadataInconsistent( + self._ireq, + "name", + self._name, + dist.canonical_name, + ) + if self._version is not None and self._version != dist.version: + raise MetadataInconsistent( + self._ireq, + "version", + str(self._version), + str(dist.version), + ) + # check dependencies are valid + # TODO performance: this means we iterate the dependencies at least twice, + # we may want to cache parsed Requires-Dist + try: + list(dist.iter_dependencies(list(dist.iter_provided_extras()))) + except InvalidRequirement as e: + raise MetadataInvalid(self._ireq, str(e)) + + def _prepare(self) -> BaseDistribution: + try: + dist = self._prepare_distribution() + except HashError as e: + # Provide HashError the underlying ireq that caused it. This + # provides context for the resulting error message to show the + # offending line to the user. + e.req = self._ireq + raise + except InstallationSubprocessError as exc: + # The output has been presented already, so don't duplicate it. + exc.context = "See above for output." + raise + + self._check_metadata_consistency(dist) + return dist + + def iter_dependencies(self, with_requires: bool) -> Iterable[Requirement | None]: + # Emit the Requires-Python requirement first to fail fast on + # unsupported candidates and avoid pointless downloads/preparation. + yield self._factory.make_requires_python_requirement(self.dist.requires_python) + requires = self.dist.iter_dependencies() if with_requires else () + for r in requires: + yield from self._factory.make_requirements_from_spec(str(r), self._ireq) + + def get_install_requirement(self) -> InstallRequirement | None: + return self._ireq + + +class LinkCandidate(_InstallRequirementBackedCandidate): + is_editable = False + + def __init__( + self, + link: Link, + template: InstallRequirement, + factory: Factory, + name: NormalizedName | None = None, + version: Version | None = None, + ) -> None: + source_link = link + cache_entry = factory.get_wheel_cache_entry(source_link, name) + if cache_entry is not None: + logger.debug("Using cached wheel link: %s", cache_entry.link) + link = cache_entry.link + ireq = make_install_req_from_link(link, template) + assert ireq.link == link + if ireq.link.is_wheel and not ireq.link.is_file: + wheel = Wheel(ireq.link.filename) + wheel_name = canonicalize_name(wheel.name) + assert name == wheel_name, f"{name!r} != {wheel_name!r} for wheel" + # Version may not be present for PEP 508 direct URLs + if version is not None: + wheel_version = Version(wheel.version) + assert ( + version == wheel_version + ), f"{version!r} != {wheel_version!r} for wheel {name}" + + if cache_entry is not None: + assert ireq.link.is_wheel + assert ireq.link.is_file + if cache_entry.persistent and template.link is template.original_link: + ireq.cached_wheel_source_link = source_link + if cache_entry.origin is not None: + ireq.download_info = cache_entry.origin + else: + # Legacy cache entry that does not have origin.json. + # download_info may miss the archive_info.hashes field. + ireq.download_info = direct_url_from_link( + source_link, link_is_in_wheel_cache=cache_entry.persistent + ) + + super().__init__( + link=link, + source_link=source_link, + ireq=ireq, + factory=factory, + name=name, + version=version, + ) + + def _prepare_distribution(self) -> BaseDistribution: + preparer = self._factory.preparer + return preparer.prepare_linked_requirement(self._ireq, parallel_builds=True) + + +class EditableCandidate(_InstallRequirementBackedCandidate): + is_editable = True + + def __init__( + self, + link: Link, + template: InstallRequirement, + factory: Factory, + name: NormalizedName | None = None, + version: Version | None = None, + ) -> None: + super().__init__( + link=link, + source_link=link, + ireq=make_install_req_from_editable(link, template), + factory=factory, + name=name, + version=version, + ) + + def _prepare_distribution(self) -> BaseDistribution: + return self._factory.preparer.prepare_editable_requirement(self._ireq) + + +class AlreadyInstalledCandidate(Candidate): + is_installed = True + source_link = None + + def __init__( + self, + dist: BaseDistribution, + template: InstallRequirement, + factory: Factory, + ) -> None: + self.dist = dist + self._ireq = _make_install_req_from_dist(dist, template) + self._factory = factory + self._version = None + + # This is just logging some messages, so we can do it eagerly. + # The returned dist would be exactly the same as self.dist because we + # set satisfied_by in _make_install_req_from_dist. + # TODO: Supply reason based on force_reinstall and upgrade_strategy. + skip_reason = "already satisfied" + factory.preparer.prepare_installed_requirement(self._ireq, skip_reason) + + def __str__(self) -> str: + return str(self.dist) + + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self.dist!r})" + + def __eq__(self, other: object) -> bool: + if not isinstance(other, AlreadyInstalledCandidate): + return NotImplemented + return self.name == other.name and self.version == other.version + + def __hash__(self) -> int: + return hash((self.name, self.version)) + + @property + def project_name(self) -> NormalizedName: + return self.dist.canonical_name + + @property + def name(self) -> str: + return self.project_name + + @property + def version(self) -> Version: + if self._version is None: + self._version = self.dist.version + return self._version + + @property + def is_editable(self) -> bool: + return self.dist.editable + + def format_for_error(self) -> str: + return f"{self.name} {self.version} (Installed)" + + def iter_dependencies(self, with_requires: bool) -> Iterable[Requirement | None]: + if not with_requires: + return + + try: + for r in self.dist.iter_dependencies(): + yield from self._factory.make_requirements_from_spec(str(r), self._ireq) + except InvalidRequirement as exc: + raise InvalidInstalledPackage(dist=self.dist, invalid_exc=exc) from None + + def get_install_requirement(self) -> InstallRequirement | None: + return None + + +class ExtrasCandidate(Candidate): + """A candidate that has 'extras', indicating additional dependencies. + + Requirements can be for a project with dependencies, something like + foo[extra]. The extras don't affect the project/version being installed + directly, but indicate that we need additional dependencies. We model that + by having an artificial ExtrasCandidate that wraps the "base" candidate. + + The ExtrasCandidate differs from the base in the following ways: + + 1. It has a unique name, of the form foo[extra]. This causes the resolver + to treat it as a separate node in the dependency graph. + 2. When we're getting the candidate's dependencies, + a) We specify that we want the extra dependencies as well. + b) We add a dependency on the base candidate. + See below for why this is needed. + 3. We return None for the underlying InstallRequirement, as the base + candidate will provide it, and we don't want to end up with duplicates. + + The dependency on the base candidate is needed so that the resolver can't + decide that it should recommend foo[extra1] version 1.0 and foo[extra2] + version 2.0. Having those candidates depend on foo=1.0 and foo=2.0 + respectively forces the resolver to recognise that this is a conflict. + """ + + def __init__( + self, + base: BaseCandidate, + extras: frozenset[str], + *, + comes_from: InstallRequirement | None = None, + ) -> None: + """ + :param comes_from: the InstallRequirement that led to this candidate if it + differs from the base's InstallRequirement. This will often be the + case in the sense that this candidate's requirement has the extras + while the base's does not. Unlike the InstallRequirement backed + candidates, this requirement is used solely for reporting purposes, + it does not do any leg work. + """ + self.base = base + self.extras = frozenset(canonicalize_name(e) for e in extras) + self._comes_from = comes_from if comes_from is not None else self.base._ireq + + def __str__(self) -> str: + name, rest = str(self.base).split(" ", 1) + return "{}[{}] {}".format(name, ",".join(self.extras), rest) + + def __repr__(self) -> str: + return f"{self.__class__.__name__}(base={self.base!r}, extras={self.extras!r})" + + def __hash__(self) -> int: + return hash((self.base, self.extras)) + + def __eq__(self, other: Any) -> bool: + if isinstance(other, self.__class__): + return self.base == other.base and self.extras == other.extras + return False + + @property + def project_name(self) -> NormalizedName: + return self.base.project_name + + @property + def name(self) -> str: + """The normalised name of the project the candidate refers to""" + return format_name(self.base.project_name, self.extras) + + @property + def version(self) -> Version: + return self.base.version + + def format_for_error(self) -> str: + return "{} [{}]".format( + self.base.format_for_error(), ", ".join(sorted(self.extras)) + ) + + @property + def is_installed(self) -> bool: + return self.base.is_installed + + @property + def is_editable(self) -> bool: + return self.base.is_editable + + @property + def source_link(self) -> Link | None: + return self.base.source_link + + def iter_dependencies(self, with_requires: bool) -> Iterable[Requirement | None]: + factory = self.base._factory + + # Add a dependency on the exact base + # (See note 2b in the class docstring) + yield factory.make_requirement_from_candidate(self.base) + if not with_requires: + return + + # The user may have specified extras that the candidate doesn't + # support. We ignore any unsupported extras here. + valid_extras = self.extras.intersection(self.base.dist.iter_provided_extras()) + invalid_extras = self.extras.difference(self.base.dist.iter_provided_extras()) + for extra in sorted(invalid_extras): + logger.warning( + "%s %s does not provide the extra '%s'", + self.base.name, + self.version, + extra, + ) + + for r in self.base.dist.iter_dependencies(valid_extras): + yield from factory.make_requirements_from_spec( + str(r), + self._comes_from, + valid_extras, + ) + + def get_install_requirement(self) -> InstallRequirement | None: + # We don't return anything here, because we always + # depend on the base candidate, and we'll get the + # install requirement from that. + return None + + +class RequiresPythonCandidate(Candidate): + is_installed = False + source_link = None + + def __init__(self, py_version_info: tuple[int, ...] | None) -> None: + if py_version_info is not None: + version_info = normalize_version_info(py_version_info) + else: + version_info = sys.version_info[:3] + self._version = Version(".".join(str(c) for c in version_info)) + + # We don't need to implement __eq__() and __ne__() since there is always + # only one RequiresPythonCandidate in a resolution, i.e. the host Python. + # The built-in object.__eq__() and object.__ne__() do exactly what we want. + + def __str__(self) -> str: + return f"Python {self._version}" + + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self._version!r})" + + @property + def project_name(self) -> NormalizedName: + return REQUIRES_PYTHON_IDENTIFIER + + @property + def name(self) -> str: + return REQUIRES_PYTHON_IDENTIFIER + + @property + def version(self) -> Version: + return self._version + + def format_for_error(self) -> str: + return f"Python {self.version}" + + def iter_dependencies(self, with_requires: bool) -> Iterable[Requirement | None]: + return () + + def get_install_requirement(self) -> InstallRequirement | None: + return None diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/resolution/resolvelib/factory.py b/.venv/lib/python3.12/site-packages/pip/_internal/resolution/resolvelib/factory.py new file mode 100644 index 0000000..f23e4cd --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/resolution/resolvelib/factory.py @@ -0,0 +1,814 @@ +from __future__ import annotations + +import contextlib +import functools +import logging +from collections.abc import Iterable, Iterator, Mapping, Sequence +from typing import ( + TYPE_CHECKING, + Callable, + NamedTuple, + Protocol, + TypeVar, + cast, +) + +from pip._vendor.packaging.requirements import InvalidRequirement +from pip._vendor.packaging.specifiers import SpecifierSet +from pip._vendor.packaging.utils import NormalizedName, canonicalize_name +from pip._vendor.packaging.version import InvalidVersion, Version +from pip._vendor.resolvelib import ResolutionImpossible + +from pip._internal.cache import CacheEntry, WheelCache +from pip._internal.exceptions import ( + DistributionNotFound, + InstallationError, + InvalidInstalledPackage, + MetadataInconsistent, + MetadataInvalid, + UnsupportedPythonVersion, + UnsupportedWheel, +) +from pip._internal.index.package_finder import PackageFinder +from pip._internal.metadata import BaseDistribution, get_default_environment +from pip._internal.models.link import Link +from pip._internal.models.wheel import Wheel +from pip._internal.operations.prepare import RequirementPreparer +from pip._internal.req.constructors import ( + install_req_drop_extras, + install_req_from_link_and_ireq, +) +from pip._internal.req.req_install import ( + InstallRequirement, + check_invalid_constraint_type, +) +from pip._internal.resolution.base import InstallRequirementProvider +from pip._internal.utils.compatibility_tags import get_supported +from pip._internal.utils.hashes import Hashes +from pip._internal.utils.packaging import get_requirement +from pip._internal.utils.virtualenv import running_under_virtualenv + +from .base import Candidate, Constraint, Requirement +from .candidates import ( + AlreadyInstalledCandidate, + BaseCandidate, + EditableCandidate, + ExtrasCandidate, + LinkCandidate, + RequiresPythonCandidate, + as_base_candidate, +) +from .found_candidates import FoundCandidates, IndexCandidateInfo +from .requirements import ( + ExplicitRequirement, + RequiresPythonRequirement, + SpecifierRequirement, + SpecifierWithoutExtrasRequirement, + UnsatisfiableRequirement, +) + +if TYPE_CHECKING: + + class ConflictCause(Protocol): + requirement: RequiresPythonRequirement + parent: Candidate + + +logger = logging.getLogger(__name__) + +C = TypeVar("C") +Cache = dict[Link, C] + + +class CollectedRootRequirements(NamedTuple): + requirements: list[Requirement] + constraints: dict[str, Constraint] + user_requested: dict[str, int] + + +class Factory: + def __init__( + self, + finder: PackageFinder, + preparer: RequirementPreparer, + make_install_req: InstallRequirementProvider, + wheel_cache: WheelCache | None, + use_user_site: bool, + force_reinstall: bool, + ignore_installed: bool, + ignore_requires_python: bool, + py_version_info: tuple[int, ...] | None = None, + ) -> None: + self._finder = finder + self.preparer = preparer + self._wheel_cache = wheel_cache + self._python_candidate = RequiresPythonCandidate(py_version_info) + self._make_install_req_from_spec = make_install_req + self._use_user_site = use_user_site + self._force_reinstall = force_reinstall + self._ignore_requires_python = ignore_requires_python + + self._build_failures: Cache[InstallationError] = {} + self._link_candidate_cache: Cache[LinkCandidate] = {} + self._editable_candidate_cache: Cache[EditableCandidate] = {} + self._installed_candidate_cache: dict[str, AlreadyInstalledCandidate] = {} + self._extras_candidate_cache: dict[ + tuple[int, frozenset[NormalizedName]], ExtrasCandidate + ] = {} + self._supported_tags_cache = get_supported() + + if not ignore_installed: + env = get_default_environment() + self._installed_dists = { + dist.canonical_name: dist + for dist in env.iter_installed_distributions(local_only=False) + } + else: + self._installed_dists = {} + + @property + def force_reinstall(self) -> bool: + return self._force_reinstall + + def _fail_if_link_is_unsupported_wheel(self, link: Link) -> None: + if not link.is_wheel: + return + wheel = Wheel(link.filename) + if wheel.supported(self._finder.target_python.get_unsorted_tags()): + return + msg = f"{link.filename} is not a supported wheel on this platform." + raise UnsupportedWheel(msg) + + def _make_extras_candidate( + self, + base: BaseCandidate, + extras: frozenset[str], + *, + comes_from: InstallRequirement | None = None, + ) -> ExtrasCandidate: + cache_key = (id(base), frozenset(canonicalize_name(e) for e in extras)) + try: + candidate = self._extras_candidate_cache[cache_key] + except KeyError: + candidate = ExtrasCandidate(base, extras, comes_from=comes_from) + self._extras_candidate_cache[cache_key] = candidate + return candidate + + def _make_candidate_from_dist( + self, + dist: BaseDistribution, + extras: frozenset[str], + template: InstallRequirement, + ) -> Candidate: + try: + base = self._installed_candidate_cache[dist.canonical_name] + except KeyError: + base = AlreadyInstalledCandidate(dist, template, factory=self) + self._installed_candidate_cache[dist.canonical_name] = base + if not extras: + return base + return self._make_extras_candidate(base, extras, comes_from=template) + + def _make_candidate_from_link( + self, + link: Link, + extras: frozenset[str], + template: InstallRequirement, + name: NormalizedName | None, + version: Version | None, + ) -> Candidate | None: + base: BaseCandidate | None = self._make_base_candidate_from_link( + link, template, name, version + ) + if not extras or base is None: + return base + return self._make_extras_candidate(base, extras, comes_from=template) + + def _make_base_candidate_from_link( + self, + link: Link, + template: InstallRequirement, + name: NormalizedName | None, + version: Version | None, + ) -> BaseCandidate | None: + # TODO: Check already installed candidate, and use it if the link and + # editable flag match. + + if link in self._build_failures: + # We already tried this candidate before, and it does not build. + # Don't bother trying again. + return None + + if template.editable: + if link not in self._editable_candidate_cache: + try: + self._editable_candidate_cache[link] = EditableCandidate( + link, + template, + factory=self, + name=name, + version=version, + ) + except (MetadataInconsistent, MetadataInvalid) as e: + logger.info( + "Discarding [blue underline]%s[/]: [yellow]%s[reset]", + link, + e, + extra={"markup": True}, + ) + self._build_failures[link] = e + return None + + return self._editable_candidate_cache[link] + else: + if link not in self._link_candidate_cache: + try: + self._link_candidate_cache[link] = LinkCandidate( + link, + template, + factory=self, + name=name, + version=version, + ) + except MetadataInconsistent as e: + logger.info( + "Discarding [blue underline]%s[/]: [yellow]%s[reset]", + link, + e, + extra={"markup": True}, + ) + self._build_failures[link] = e + return None + return self._link_candidate_cache[link] + + def _iter_found_candidates( + self, + ireqs: Sequence[InstallRequirement], + specifier: SpecifierSet, + hashes: Hashes, + prefers_installed: bool, + incompatible_ids: set[int], + ) -> Iterable[Candidate]: + if not ireqs: + return () + + # The InstallRequirement implementation requires us to give it a + # "template". Here we just choose the first requirement to represent + # all of them. + # Hopefully the Project model can correct this mismatch in the future. + template = ireqs[0] + assert template.req, "Candidates found on index must be PEP 508" + name = canonicalize_name(template.req.name) + + extras: frozenset[str] = frozenset() + for ireq in ireqs: + assert ireq.req, "Candidates found on index must be PEP 508" + specifier &= ireq.req.specifier + hashes &= ireq.hashes(trust_internet=False) + extras |= frozenset(ireq.extras) + + def _get_installed_candidate() -> Candidate | None: + """Get the candidate for the currently-installed version.""" + # If --force-reinstall is set, we want the version from the index + # instead, so we "pretend" there is nothing installed. + if self._force_reinstall: + return None + try: + installed_dist = self._installed_dists[name] + except KeyError: + return None + + try: + # Don't use the installed distribution if its version + # does not fit the current dependency graph. + if not specifier.contains(installed_dist.version, prereleases=True): + return None + except InvalidVersion as e: + raise InvalidInstalledPackage(dist=installed_dist, invalid_exc=e) + + candidate = self._make_candidate_from_dist( + dist=installed_dist, + extras=extras, + template=template, + ) + # The candidate is a known incompatibility. Don't use it. + if id(candidate) in incompatible_ids: + return None + return candidate + + def iter_index_candidate_infos() -> Iterator[IndexCandidateInfo]: + result = self._finder.find_best_candidate( + project_name=name, + specifier=specifier, + hashes=hashes, + ) + icans = result.applicable_candidates + + # PEP 592: Yanked releases are ignored unless the specifier + # explicitly pins a version (via '==' or '===') that can be + # solely satisfied by a yanked release. + all_yanked = all(ican.link.is_yanked for ican in icans) + + def is_pinned(specifier: SpecifierSet) -> bool: + for sp in specifier: + if sp.operator == "===": + return True + if sp.operator != "==": + continue + if sp.version.endswith(".*"): + continue + return True + return False + + pinned = is_pinned(specifier) + + # PackageFinder returns earlier versions first, so we reverse. + for ican in reversed(icans): + if not (all_yanked and pinned) and ican.link.is_yanked: + continue + func = functools.partial( + self._make_candidate_from_link, + link=ican.link, + extras=extras, + template=template, + name=name, + version=ican.version, + ) + yield ican.version, func + + return FoundCandidates( + iter_index_candidate_infos, + _get_installed_candidate(), + prefers_installed, + incompatible_ids, + ) + + def _iter_explicit_candidates_from_base( + self, + base_requirements: Iterable[Requirement], + extras: frozenset[str], + ) -> Iterator[Candidate]: + """Produce explicit candidates from the base given an extra-ed package. + + :param base_requirements: Requirements known to the resolver. The + requirements are guaranteed to not have extras. + :param extras: The extras to inject into the explicit requirements' + candidates. + """ + for req in base_requirements: + lookup_cand, _ = req.get_candidate_lookup() + if lookup_cand is None: # Not explicit. + continue + # We've stripped extras from the identifier, and should always + # get a BaseCandidate here, unless there's a bug elsewhere. + base_cand = as_base_candidate(lookup_cand) + assert base_cand is not None, "no extras here" + yield self._make_extras_candidate(base_cand, extras) + + def _iter_candidates_from_constraints( + self, + identifier: str, + constraint: Constraint, + template: InstallRequirement, + ) -> Iterator[Candidate]: + """Produce explicit candidates from constraints. + + This creates "fake" InstallRequirement objects that are basically clones + of what "should" be the template, but with original_link set to link. + """ + for link in constraint.links: + self._fail_if_link_is_unsupported_wheel(link) + candidate = self._make_base_candidate_from_link( + link, + template=install_req_from_link_and_ireq(link, template), + name=canonicalize_name(identifier), + version=None, + ) + if candidate: + yield candidate + + def find_candidates( + self, + identifier: str, + requirements: Mapping[str, Iterable[Requirement]], + incompatibilities: Mapping[str, Iterator[Candidate]], + constraint: Constraint, + prefers_installed: bool, + is_satisfied_by: Callable[[Requirement, Candidate], bool], + ) -> Iterable[Candidate]: + # Collect basic lookup information from the requirements. + explicit_candidates: set[Candidate] = set() + ireqs: list[InstallRequirement] = [] + for req in requirements[identifier]: + cand, ireq = req.get_candidate_lookup() + if cand is not None: + explicit_candidates.add(cand) + if ireq is not None: + ireqs.append(ireq) + + # If the current identifier contains extras, add requires and explicit + # candidates from entries from extra-less identifier. + with contextlib.suppress(InvalidRequirement): + parsed_requirement = get_requirement(identifier) + if parsed_requirement.name != identifier: + explicit_candidates.update( + self._iter_explicit_candidates_from_base( + requirements.get(parsed_requirement.name, ()), + frozenset(parsed_requirement.extras), + ), + ) + for req in requirements.get(parsed_requirement.name, []): + _, ireq = req.get_candidate_lookup() + if ireq is not None: + ireqs.append(ireq) + + # Add explicit candidates from constraints. We only do this if there are + # known ireqs, which represent requirements not already explicit. If + # there are no ireqs, we're constraining already-explicit requirements, + # which is handled later when we return the explicit candidates. + if ireqs: + try: + explicit_candidates.update( + self._iter_candidates_from_constraints( + identifier, + constraint, + template=ireqs[0], + ), + ) + except UnsupportedWheel: + # If we're constrained to install a wheel incompatible with the + # target architecture, no candidates will ever be valid. + return () + + # Since we cache all the candidates, incompatibility identification + # can be made quicker by comparing only the id() values. + incompat_ids = {id(c) for c in incompatibilities.get(identifier, ())} + + # If none of the requirements want an explicit candidate, we can ask + # the finder for candidates. + if not explicit_candidates: + return self._iter_found_candidates( + ireqs, + constraint.specifier, + constraint.hashes, + prefers_installed, + incompat_ids, + ) + + return ( + c + for c in explicit_candidates + if id(c) not in incompat_ids + and constraint.is_satisfied_by(c) + and all(is_satisfied_by(req, c) for req in requirements[identifier]) + ) + + def _make_requirements_from_install_req( + self, ireq: InstallRequirement, requested_extras: Iterable[str] + ) -> Iterator[Requirement]: + """ + Returns requirement objects associated with the given InstallRequirement. In + most cases this will be a single object but the following special cases exist: + - the InstallRequirement has markers that do not apply -> result is empty + - the InstallRequirement has both a constraint (or link) and extras + -> result is split in two requirement objects: one with the constraint + (or link) and one with the extra. This allows centralized constraint + handling for the base, resulting in fewer candidate rejections. + """ + if not ireq.match_markers(requested_extras): + logger.info( + "Ignoring %s: markers '%s' don't match your environment", + ireq.name, + ireq.markers, + ) + elif not ireq.link: + if ireq.extras and ireq.req is not None and ireq.req.specifier: + yield SpecifierWithoutExtrasRequirement(ireq) + yield SpecifierRequirement(ireq) + else: + self._fail_if_link_is_unsupported_wheel(ireq.link) + # Always make the link candidate for the base requirement to make it + # available to `find_candidates` for explicit candidate lookup for any + # set of extras. + # The extras are required separately via a second requirement. + cand = self._make_base_candidate_from_link( + ireq.link, + template=install_req_drop_extras(ireq) if ireq.extras else ireq, + name=canonicalize_name(ireq.name) if ireq.name else None, + version=None, + ) + if cand is None: + # There's no way we can satisfy a URL requirement if the underlying + # candidate fails to build. An unnamed URL must be user-supplied, so + # we fail eagerly. If the URL is named, an unsatisfiable requirement + # can make the resolver do the right thing, either backtrack (and + # maybe find some other requirement that's buildable) or raise a + # ResolutionImpossible eventually. + if not ireq.name: + raise self._build_failures[ireq.link] + yield UnsatisfiableRequirement(canonicalize_name(ireq.name)) + else: + # require the base from the link + yield self.make_requirement_from_candidate(cand) + if ireq.extras: + # require the extras on top of the base candidate + yield self.make_requirement_from_candidate( + self._make_extras_candidate(cand, frozenset(ireq.extras)) + ) + + def collect_root_requirements( + self, root_ireqs: list[InstallRequirement] + ) -> CollectedRootRequirements: + collected = CollectedRootRequirements([], {}, {}) + for i, ireq in enumerate(root_ireqs): + if ireq.constraint: + # Ensure we only accept valid constraints + problem = check_invalid_constraint_type(ireq) + if problem: + raise InstallationError(problem) + if not ireq.match_markers(): + continue + assert ireq.name, "Constraint must be named" + name = canonicalize_name(ireq.name) + if name in collected.constraints: + collected.constraints[name] &= ireq + else: + collected.constraints[name] = Constraint.from_ireq(ireq) + else: + reqs = list( + self._make_requirements_from_install_req( + ireq, + requested_extras=(), + ) + ) + if not reqs: + continue + template = reqs[0] + if ireq.user_supplied and template.name not in collected.user_requested: + collected.user_requested[template.name] = i + collected.requirements.extend(reqs) + # Put requirements with extras at the end of the root requires. This does not + # affect resolvelib's picking preference but it does affect its initial criteria + # population: by putting extras at the end we enable the candidate finder to + # present resolvelib with a smaller set of candidates to resolvelib, already + # taking into account any non-transient constraints on the associated base. This + # means resolvelib will have fewer candidates to visit and reject. + # Python's list sort is stable, meaning relative order is kept for objects with + # the same key. + collected.requirements.sort(key=lambda r: r.name != r.project_name) + return collected + + def make_requirement_from_candidate( + self, candidate: Candidate + ) -> ExplicitRequirement: + return ExplicitRequirement(candidate) + + def make_requirements_from_spec( + self, + specifier: str, + comes_from: InstallRequirement | None, + requested_extras: Iterable[str] = (), + ) -> Iterator[Requirement]: + """ + Returns requirement objects associated with the given specifier. In most cases + this will be a single object but the following special cases exist: + - the specifier has markers that do not apply -> result is empty + - the specifier has both a constraint and extras -> result is split + in two requirement objects: one with the constraint and one with the + extra. This allows centralized constraint handling for the base, + resulting in fewer candidate rejections. + """ + ireq = self._make_install_req_from_spec(specifier, comes_from) + return self._make_requirements_from_install_req(ireq, requested_extras) + + def make_requires_python_requirement( + self, + specifier: SpecifierSet, + ) -> Requirement | None: + if self._ignore_requires_python: + return None + # Don't bother creating a dependency for an empty Requires-Python. + if not str(specifier): + return None + return RequiresPythonRequirement(specifier, self._python_candidate) + + def get_wheel_cache_entry(self, link: Link, name: str | None) -> CacheEntry | None: + """Look up the link in the wheel cache. + + If ``preparer.require_hashes`` is True, don't use the wheel cache, + because cached wheels, always built locally, have different hashes + than the files downloaded from the index server and thus throw false + hash mismatches. Furthermore, cached wheels at present have + nondeterministic contents due to file modification times. + """ + if self._wheel_cache is None: + return None + return self._wheel_cache.get_cache_entry( + link=link, + package_name=name, + supported_tags=self._supported_tags_cache, + ) + + def get_dist_to_uninstall(self, candidate: Candidate) -> BaseDistribution | None: + # TODO: Are there more cases this needs to return True? Editable? + dist = self._installed_dists.get(candidate.project_name) + if dist is None: # Not installed, no uninstallation required. + return None + + # We're installing into global site. The current installation must + # be uninstalled, no matter it's in global or user site, because the + # user site installation has precedence over global. + if not self._use_user_site: + return dist + + # We're installing into user site. Remove the user site installation. + if dist.in_usersite: + return dist + + # We're installing into user site, but the installed incompatible + # package is in global site. We can't uninstall that, and would let + # the new user installation to "shadow" it. But shadowing won't work + # in virtual environments, so we error out. + if running_under_virtualenv() and dist.in_site_packages: + message = ( + f"Will not install to the user site because it will lack " + f"sys.path precedence to {dist.raw_name} in {dist.location}" + ) + raise InstallationError(message) + return None + + def _report_requires_python_error( + self, causes: Sequence[ConflictCause] + ) -> UnsupportedPythonVersion: + assert causes, "Requires-Python error reported with no cause" + + version = self._python_candidate.version + + if len(causes) == 1: + specifier = str(causes[0].requirement.specifier) + message = ( + f"Package {causes[0].parent.name!r} requires a different " + f"Python: {version} not in {specifier!r}" + ) + return UnsupportedPythonVersion(message) + + message = f"Packages require a different Python. {version} not in:" + for cause in causes: + package = cause.parent.format_for_error() + specifier = str(cause.requirement.specifier) + message += f"\n{specifier!r} (required by {package})" + return UnsupportedPythonVersion(message) + + def _report_single_requirement_conflict( + self, req: Requirement, parent: Candidate | None + ) -> DistributionNotFound: + if parent is None: + req_disp = str(req) + else: + req_disp = f"{req} (from {parent.name})" + + cands = self._finder.find_all_candidates(req.project_name) + skipped_by_requires_python = self._finder.requires_python_skipped_reasons() + + versions_set: set[Version] = set() + yanked_versions_set: set[Version] = set() + for c in cands: + is_yanked = c.link.is_yanked if c.link else False + if is_yanked: + yanked_versions_set.add(c.version) + else: + versions_set.add(c.version) + + versions = [str(v) for v in sorted(versions_set)] + yanked_versions = [str(v) for v in sorted(yanked_versions_set)] + + if yanked_versions: + # Saying "version X is yanked" isn't entirely accurate. + # https://github.com/pypa/pip/issues/11745#issuecomment-1402805842 + logger.critical( + "Ignored the following yanked versions: %s", + ", ".join(yanked_versions) or "none", + ) + if skipped_by_requires_python: + logger.critical( + "Ignored the following versions that require a different python " + "version: %s", + "; ".join(skipped_by_requires_python) or "none", + ) + logger.critical( + "Could not find a version that satisfies the requirement %s " + "(from versions: %s)", + req_disp, + ", ".join(versions) or "none", + ) + if str(req) == "requirements.txt": + logger.info( + "HINT: You are attempting to install a package literally " + 'named "requirements.txt" (which cannot exist). Consider ' + "using the '-r' flag to install the packages listed in " + "requirements.txt" + ) + + return DistributionNotFound(f"No matching distribution found for {req}") + + def get_installation_error( + self, + e: ResolutionImpossible[Requirement, Candidate], + constraints: dict[str, Constraint], + ) -> InstallationError: + assert e.causes, "Installation error reported with no cause" + + # If one of the things we can't solve is "we need Python X.Y", + # that is what we report. + requires_python_causes = [ + cause + for cause in e.causes + if isinstance(cause.requirement, RequiresPythonRequirement) + and not cause.requirement.is_satisfied_by(self._python_candidate) + ] + if requires_python_causes: + # The comprehension above makes sure all Requirement instances are + # RequiresPythonRequirement, so let's cast for convenience. + return self._report_requires_python_error( + cast("Sequence[ConflictCause]", requires_python_causes), + ) + + # Otherwise, we have a set of causes which can't all be satisfied + # at once. + + # The simplest case is when we have *one* cause that can't be + # satisfied. We just report that case. + if len(e.causes) == 1: + req, parent = next(iter(e.causes)) + if req.name not in constraints: + return self._report_single_requirement_conflict(req, parent) + + # OK, we now have a list of requirements that can't all be + # satisfied at once. + + # A couple of formatting helpers + def text_join(parts: list[str]) -> str: + if len(parts) == 1: + return parts[0] + + return ", ".join(parts[:-1]) + " and " + parts[-1] + + def describe_trigger(parent: Candidate) -> str: + ireq = parent.get_install_requirement() + if not ireq or not ireq.comes_from: + return f"{parent.name}=={parent.version}" + if isinstance(ireq.comes_from, InstallRequirement): + return str(ireq.comes_from.name) + return str(ireq.comes_from) + + triggers = set() + for req, parent in e.causes: + if parent is None: + # This is a root requirement, so we can report it directly + trigger = req.format_for_error() + else: + trigger = describe_trigger(parent) + triggers.add(trigger) + + if triggers: + info = text_join(sorted(triggers)) + else: + info = "the requested packages" + + msg = ( + f"Cannot install {info} because these package versions " + "have conflicting dependencies." + ) + logger.critical(msg) + msg = "\nThe conflict is caused by:" + + relevant_constraints = set() + for req, parent in e.causes: + if req.name in constraints: + relevant_constraints.add(req.name) + msg = msg + "\n " + if parent: + msg = msg + f"{parent.name} {parent.version} depends on " + else: + msg = msg + "The user requested " + msg = msg + req.format_for_error() + for key in relevant_constraints: + spec = constraints[key].specifier + msg += f"\n The user requested (constraint) {key}{spec}" + + msg = ( + msg + + "\n\n" + + "To fix this you could try to:\n" + + "1. loosen the range of package versions you've specified\n" + + "2. remove package versions to allow pip to attempt to solve " + + "the dependency conflict\n" + ) + + logger.info(msg) + + return DistributionNotFound( + "ResolutionImpossible: for help visit " + "https://pip.pypa.io/en/latest/topics/dependency-resolution/" + "#dealing-with-dependency-conflicts" + ) diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/resolution/resolvelib/found_candidates.py b/.venv/lib/python3.12/site-packages/pip/_internal/resolution/resolvelib/found_candidates.py new file mode 100644 index 0000000..f60653d --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/resolution/resolvelib/found_candidates.py @@ -0,0 +1,166 @@ +"""Utilities to lazily create and visit candidates found. + +Creating and visiting a candidate is a *very* costly operation. It involves +fetching, extracting, potentially building modules from source, and verifying +distribution metadata. It is therefore crucial for performance to keep +everything here lazy all the way down, so we only touch candidates that we +absolutely need, and not "download the world" when we only need one version of +something. +""" + +from __future__ import annotations + +import logging +from collections.abc import Iterator, Sequence +from typing import Any, Callable, Optional + +from pip._vendor.packaging.version import _BaseVersion + +from pip._internal.exceptions import MetadataInvalid + +from .base import Candidate + +logger = logging.getLogger(__name__) + +IndexCandidateInfo = tuple[_BaseVersion, Callable[[], Optional[Candidate]]] + + +def _iter_built(infos: Iterator[IndexCandidateInfo]) -> Iterator[Candidate]: + """Iterator for ``FoundCandidates``. + + This iterator is used when the package is not already installed. Candidates + from index come later in their normal ordering. + """ + versions_found: set[_BaseVersion] = set() + for version, func in infos: + if version in versions_found: + continue + try: + candidate = func() + except MetadataInvalid as e: + logger.warning( + "Ignoring version %s of %s since it has invalid metadata:\n" + "%s\n" + "Please use pip<24.1 if you need to use this version.", + version, + e.ireq.name, + e, + ) + # Mark version as found to avoid trying other candidates with the same + # version, since they most likely have invalid metadata as well. + versions_found.add(version) + else: + if candidate is None: + continue + yield candidate + versions_found.add(version) + + +def _iter_built_with_prepended( + installed: Candidate, infos: Iterator[IndexCandidateInfo] +) -> Iterator[Candidate]: + """Iterator for ``FoundCandidates``. + + This iterator is used when the resolver prefers the already-installed + candidate and NOT to upgrade. The installed candidate is therefore + always yielded first, and candidates from index come later in their + normal ordering, except skipped when the version is already installed. + """ + yield installed + versions_found: set[_BaseVersion] = {installed.version} + for version, func in infos: + if version in versions_found: + continue + candidate = func() + if candidate is None: + continue + yield candidate + versions_found.add(version) + + +def _iter_built_with_inserted( + installed: Candidate, infos: Iterator[IndexCandidateInfo] +) -> Iterator[Candidate]: + """Iterator for ``FoundCandidates``. + + This iterator is used when the resolver prefers to upgrade an + already-installed package. Candidates from index are returned in their + normal ordering, except replaced when the version is already installed. + + The implementation iterates through and yields other candidates, inserting + the installed candidate exactly once before we start yielding older or + equivalent candidates, or after all other candidates if they are all newer. + """ + versions_found: set[_BaseVersion] = set() + for version, func in infos: + if version in versions_found: + continue + # If the installed candidate is better, yield it first. + if installed.version >= version: + yield installed + versions_found.add(installed.version) + candidate = func() + if candidate is None: + continue + yield candidate + versions_found.add(version) + + # If the installed candidate is older than all other candidates. + if installed.version not in versions_found: + yield installed + + +class FoundCandidates(Sequence[Candidate]): + """A lazy sequence to provide candidates to the resolver. + + The intended usage is to return this from `find_matches()` so the resolver + can iterate through the sequence multiple times, but only access the index + page when remote packages are actually needed. This improve performances + when suitable candidates are already installed on disk. + """ + + def __init__( + self, + get_infos: Callable[[], Iterator[IndexCandidateInfo]], + installed: Candidate | None, + prefers_installed: bool, + incompatible_ids: set[int], + ): + self._get_infos = get_infos + self._installed = installed + self._prefers_installed = prefers_installed + self._incompatible_ids = incompatible_ids + self._bool: bool | None = None + + def __getitem__(self, index: Any) -> Any: + # Implemented to satisfy the ABC check. This is not needed by the + # resolver, and should not be used by the provider either (for + # performance reasons). + raise NotImplementedError("don't do this") + + def __iter__(self) -> Iterator[Candidate]: + infos = self._get_infos() + if not self._installed: + iterator = _iter_built(infos) + elif self._prefers_installed: + iterator = _iter_built_with_prepended(self._installed, infos) + else: + iterator = _iter_built_with_inserted(self._installed, infos) + return (c for c in iterator if id(c) not in self._incompatible_ids) + + def __len__(self) -> int: + # Implemented to satisfy the ABC check. This is not needed by the + # resolver, and should not be used by the provider either (for + # performance reasons). + raise NotImplementedError("don't do this") + + def __bool__(self) -> bool: + if self._bool is not None: + return self._bool + + if self._prefers_installed and self._installed: + self._bool = True + return True + + self._bool = any(self) + return self._bool diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/resolution/resolvelib/provider.py b/.venv/lib/python3.12/site-packages/pip/_internal/resolution/resolvelib/provider.py new file mode 100644 index 0000000..40d6115 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/resolution/resolvelib/provider.py @@ -0,0 +1,276 @@ +from __future__ import annotations + +import math +from collections.abc import Iterable, Iterator, Mapping, Sequence +from functools import cache +from typing import ( + TYPE_CHECKING, + TypeVar, +) + +from pip._vendor.resolvelib.providers import AbstractProvider + +from pip._internal.req.req_install import InstallRequirement + +from .base import Candidate, Constraint, Requirement +from .candidates import REQUIRES_PYTHON_IDENTIFIER +from .factory import Factory +from .requirements import ExplicitRequirement + +if TYPE_CHECKING: + from pip._vendor.resolvelib.providers import Preference + from pip._vendor.resolvelib.resolvers import RequirementInformation + + PreferenceInformation = RequirementInformation[Requirement, Candidate] + + _ProviderBase = AbstractProvider[Requirement, Candidate, str] +else: + _ProviderBase = AbstractProvider + +# Notes on the relationship between the provider, the factory, and the +# candidate and requirement classes. +# +# The provider is a direct implementation of the resolvelib class. Its role +# is to deliver the API that resolvelib expects. +# +# Rather than work with completely abstract "requirement" and "candidate" +# concepts as resolvelib does, pip has concrete classes implementing these two +# ideas. The API of Requirement and Candidate objects are defined in the base +# classes, but essentially map fairly directly to the equivalent provider +# methods. In particular, `find_matches` and `is_satisfied_by` are +# requirement methods, and `get_dependencies` is a candidate method. +# +# The factory is the interface to pip's internal mechanisms. It is stateless, +# and is created by the resolver and held as a property of the provider. It is +# responsible for creating Requirement and Candidate objects, and provides +# services to those objects (access to pip's finder and preparer). + + +D = TypeVar("D") +V = TypeVar("V") + + +def _get_with_identifier( + mapping: Mapping[str, V], + identifier: str, + default: D, +) -> D | V: + """Get item from a package name lookup mapping with a resolver identifier. + + This extra logic is needed when the target mapping is keyed by package + name, which cannot be directly looked up with an identifier (which may + contain requested extras). Additional logic is added to also look up a value + by "cleaning up" the extras from the identifier. + """ + if identifier in mapping: + return mapping[identifier] + # HACK: Theoretically we should check whether this identifier is a valid + # "NAME[EXTRAS]" format, and parse out the name part with packaging or + # some regular expression. But since pip's resolver only spits out three + # kinds of identifiers: normalized PEP 503 names, normalized names plus + # extras, and Requires-Python, we can cheat a bit here. + name, open_bracket, _ = identifier.partition("[") + if open_bracket and name in mapping: + return mapping[name] + return default + + +class PipProvider(_ProviderBase): + """Pip's provider implementation for resolvelib. + + :params constraints: A mapping of constraints specified by the user. Keys + are canonicalized project names. + :params ignore_dependencies: Whether the user specified ``--no-deps``. + :params upgrade_strategy: The user-specified upgrade strategy. + :params user_requested: A set of canonicalized package names that the user + supplied for pip to install/upgrade. + """ + + def __init__( + self, + factory: Factory, + constraints: dict[str, Constraint], + ignore_dependencies: bool, + upgrade_strategy: str, + user_requested: dict[str, int], + ) -> None: + self._factory = factory + self._constraints = constraints + self._ignore_dependencies = ignore_dependencies + self._upgrade_strategy = upgrade_strategy + self._user_requested = user_requested + + def identify(self, requirement_or_candidate: Requirement | Candidate) -> str: + return requirement_or_candidate.name + + def narrow_requirement_selection( + self, + identifiers: Iterable[str], + resolutions: Mapping[str, Candidate], + candidates: Mapping[str, Iterator[Candidate]], + information: Mapping[str, Iterator[PreferenceInformation]], + backtrack_causes: Sequence[PreferenceInformation], + ) -> Iterable[str]: + """Produce a subset of identifiers that should be considered before others. + + Currently pip narrows the following selection: + * Requires-Python, if present is always returned by itself + * Backtrack causes are considered next because they can be identified + in linear time here, whereas because get_preference() is called + for each identifier, it would be quadratic to check for them there. + Further, the current backtrack causes likely need to be resolved + before other requirements as a resolution can't be found while + there is a conflict. + """ + backtrack_identifiers = set() + for info in backtrack_causes: + backtrack_identifiers.add(info.requirement.name) + if info.parent is not None: + backtrack_identifiers.add(info.parent.name) + + current_backtrack_causes = [] + for identifier in identifiers: + # Requires-Python has only one candidate and the check is basically + # free, so we always do it first to avoid needless work if it fails. + # This skips calling get_preference() for all other identifiers. + if identifier == REQUIRES_PYTHON_IDENTIFIER: + return [identifier] + + # Check if this identifier is a backtrack cause + if identifier in backtrack_identifiers: + current_backtrack_causes.append(identifier) + continue + + if current_backtrack_causes: + return current_backtrack_causes + + return identifiers + + def get_preference( + self, + identifier: str, + resolutions: Mapping[str, Candidate], + candidates: Mapping[str, Iterator[Candidate]], + information: Mapping[str, Iterable[PreferenceInformation]], + backtrack_causes: Sequence[PreferenceInformation], + ) -> Preference: + """Produce a sort key for given requirement based on preference. + + The lower the return value is, the more preferred this group of + arguments is. + + Currently pip considers the following in order: + + * Any requirement that is "direct", e.g., points to an explicit URL. + * Any requirement that is "pinned", i.e., contains the operator ``===`` + or ``==`` without a wildcard. + * Any requirement that imposes an upper version limit, i.e., contains the + operator ``<``, ``<=``, ``~=``, or ``==`` with a wildcard. Because + pip prioritizes the latest version, preferring explicit upper bounds + can rule out infeasible candidates sooner. This does not imply that + upper bounds are good practice; they can make dependency management + and resolution harder. + * Order user-specified requirements as they are specified, placing + other requirements afterward. + * Any "non-free" requirement, i.e., one that contains at least one + operator, such as ``>=`` or ``!=``. + * Alphabetical order for consistency (aids debuggability). + """ + try: + next(iter(information[identifier])) + except StopIteration: + # There is no information for this identifier, so there's no known + # candidates. + has_information = False + else: + has_information = True + + if not has_information: + direct = False + ireqs: tuple[InstallRequirement | None, ...] = () + else: + # Go through the information and for each requirement, + # check if it's explicit (e.g., a direct link) and get the + # InstallRequirement (the second element) from get_candidate_lookup() + directs, ireqs = zip( + *( + (isinstance(r, ExplicitRequirement), r.get_candidate_lookup()[1]) + for r, _ in information[identifier] + ) + ) + direct = any(directs) + + operators: list[tuple[str, str]] = [ + (specifier.operator, specifier.version) + for specifier_set in (ireq.specifier for ireq in ireqs if ireq) + for specifier in specifier_set + ] + + pinned = any(((op[:2] == "==") and ("*" not in ver)) for op, ver in operators) + upper_bounded = any( + ((op in ("<", "<=", "~=")) or (op == "==" and "*" in ver)) + for op, ver in operators + ) + unfree = bool(operators) + requested_order = self._user_requested.get(identifier, math.inf) + + return ( + not direct, + not pinned, + not upper_bounded, + requested_order, + not unfree, + identifier, + ) + + def find_matches( + self, + identifier: str, + requirements: Mapping[str, Iterator[Requirement]], + incompatibilities: Mapping[str, Iterator[Candidate]], + ) -> Iterable[Candidate]: + def _eligible_for_upgrade(identifier: str) -> bool: + """Are upgrades allowed for this project? + + This checks the upgrade strategy, and whether the project was one + that the user specified in the command line, in order to decide + whether we should upgrade if there's a newer version available. + + (Note that we don't need access to the `--upgrade` flag, because + an upgrade strategy of "to-satisfy-only" means that `--upgrade` + was not specified). + """ + if self._upgrade_strategy == "eager": + return True + elif self._upgrade_strategy == "only-if-needed": + user_order = _get_with_identifier( + self._user_requested, + identifier, + default=None, + ) + return user_order is not None + return False + + constraint = _get_with_identifier( + self._constraints, + identifier, + default=Constraint.empty(), + ) + return self._factory.find_candidates( + identifier=identifier, + requirements=requirements, + constraint=constraint, + prefers_installed=(not _eligible_for_upgrade(identifier)), + incompatibilities=incompatibilities, + is_satisfied_by=self.is_satisfied_by, + ) + + @staticmethod + @cache + def is_satisfied_by(requirement: Requirement, candidate: Candidate) -> bool: + return requirement.is_satisfied_by(candidate) + + def get_dependencies(self, candidate: Candidate) -> Iterable[Requirement]: + with_requires = not self._ignore_dependencies + # iter_dependencies() can perform nontrivial work so delay until needed. + return (r for r in candidate.iter_dependencies(with_requires) if r is not None) diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/resolution/resolvelib/reporter.py b/.venv/lib/python3.12/site-packages/pip/_internal/resolution/resolvelib/reporter.py new file mode 100644 index 0000000..e694132 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/resolution/resolvelib/reporter.py @@ -0,0 +1,85 @@ +from __future__ import annotations + +from collections import defaultdict +from logging import getLogger +from typing import Any + +from pip._vendor.resolvelib.reporters import BaseReporter + +from .base import Candidate, Requirement + +logger = getLogger(__name__) + + +class PipReporter(BaseReporter[Requirement, Candidate, str]): + def __init__(self) -> None: + self.reject_count_by_package: defaultdict[str, int] = defaultdict(int) + + self._messages_at_reject_count = { + 1: ( + "pip is looking at multiple versions of {package_name} to " + "determine which version is compatible with other " + "requirements. This could take a while." + ), + 8: ( + "pip is still looking at multiple versions of {package_name} to " + "determine which version is compatible with other " + "requirements. This could take a while." + ), + 13: ( + "This is taking longer than usual. You might need to provide " + "the dependency resolver with stricter constraints to reduce " + "runtime. See https://pip.pypa.io/warnings/backtracking for " + "guidance. If you want to abort this run, press Ctrl + C." + ), + } + + def rejecting_candidate(self, criterion: Any, candidate: Candidate) -> None: + self.reject_count_by_package[candidate.name] += 1 + + count = self.reject_count_by_package[candidate.name] + if count not in self._messages_at_reject_count: + return + + message = self._messages_at_reject_count[count] + logger.info("INFO: %s", message.format(package_name=candidate.name)) + + msg = "Will try a different candidate, due to conflict:" + for req_info in criterion.information: + req, parent = req_info.requirement, req_info.parent + # Inspired by Factory.get_installation_error + msg += "\n " + if parent: + msg += f"{parent.name} {parent.version} depends on " + else: + msg += "The user requested " + msg += req.format_for_error() + logger.debug(msg) + + +class PipDebuggingReporter(BaseReporter[Requirement, Candidate, str]): + """A reporter that does an info log for every event it sees.""" + + def starting(self) -> None: + logger.info("Reporter.starting()") + + def starting_round(self, index: int) -> None: + logger.info("Reporter.starting_round(%r)", index) + + def ending_round(self, index: int, state: Any) -> None: + logger.info("Reporter.ending_round(%r, state)", index) + logger.debug("Reporter.ending_round(%r, %r)", index, state) + + def ending(self, state: Any) -> None: + logger.info("Reporter.ending(%r)", state) + + def adding_requirement( + self, requirement: Requirement, parent: Candidate | None + ) -> None: + logger.info("Reporter.adding_requirement(%r, %r)", requirement, parent) + + def rejecting_candidate(self, criterion: Any, candidate: Candidate) -> None: + logger.info("Reporter.rejecting_candidate(%r, %r)", criterion, candidate) + + def pinning(self, candidate: Candidate) -> None: + logger.info("Reporter.pinning(%r)", candidate) diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/resolution/resolvelib/requirements.py b/.venv/lib/python3.12/site-packages/pip/_internal/resolution/resolvelib/requirements.py new file mode 100644 index 0000000..447e36b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/resolution/resolvelib/requirements.py @@ -0,0 +1,247 @@ +from __future__ import annotations + +from typing import Any + +from pip._vendor.packaging.specifiers import SpecifierSet +from pip._vendor.packaging.utils import NormalizedName, canonicalize_name + +from pip._internal.req.constructors import install_req_drop_extras +from pip._internal.req.req_install import InstallRequirement + +from .base import Candidate, CandidateLookup, Requirement, format_name + + +class ExplicitRequirement(Requirement): + def __init__(self, candidate: Candidate) -> None: + self.candidate = candidate + + def __str__(self) -> str: + return str(self.candidate) + + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self.candidate!r})" + + def __hash__(self) -> int: + return hash(self.candidate) + + def __eq__(self, other: Any) -> bool: + if not isinstance(other, ExplicitRequirement): + return False + return self.candidate == other.candidate + + @property + def project_name(self) -> NormalizedName: + # No need to canonicalize - the candidate did this + return self.candidate.project_name + + @property + def name(self) -> str: + # No need to canonicalize - the candidate did this + return self.candidate.name + + def format_for_error(self) -> str: + return self.candidate.format_for_error() + + def get_candidate_lookup(self) -> CandidateLookup: + return self.candidate, None + + def is_satisfied_by(self, candidate: Candidate) -> bool: + return candidate == self.candidate + + +class SpecifierRequirement(Requirement): + def __init__(self, ireq: InstallRequirement) -> None: + assert ireq.link is None, "This is a link, not a specifier" + self._ireq = ireq + self._equal_cache: str | None = None + self._hash: int | None = None + self._extras = frozenset(canonicalize_name(e) for e in self._ireq.extras) + + @property + def _equal(self) -> str: + if self._equal_cache is not None: + return self._equal_cache + + self._equal_cache = str(self._ireq) + return self._equal_cache + + def __str__(self) -> str: + return str(self._ireq.req) + + def __repr__(self) -> str: + return f"{self.__class__.__name__}({str(self._ireq.req)!r})" + + def __eq__(self, other: object) -> bool: + if not isinstance(other, SpecifierRequirement): + return NotImplemented + return self._equal == other._equal + + def __hash__(self) -> int: + if self._hash is not None: + return self._hash + + self._hash = hash(self._equal) + return self._hash + + @property + def project_name(self) -> NormalizedName: + assert self._ireq.req, "Specifier-backed ireq is always PEP 508" + return canonicalize_name(self._ireq.req.name) + + @property + def name(self) -> str: + return format_name(self.project_name, self._extras) + + def format_for_error(self) -> str: + # Convert comma-separated specifiers into "A, B, ..., F and G" + # This makes the specifier a bit more "human readable", without + # risking a change in meaning. (Hopefully! Not all edge cases have + # been checked) + parts = [s.strip() for s in str(self).split(",")] + if len(parts) == 0: + return "" + elif len(parts) == 1: + return parts[0] + + return ", ".join(parts[:-1]) + " and " + parts[-1] + + def get_candidate_lookup(self) -> CandidateLookup: + return None, self._ireq + + def is_satisfied_by(self, candidate: Candidate) -> bool: + assert candidate.name == self.name, ( + f"Internal issue: Candidate is not for this requirement " + f"{candidate.name} vs {self.name}" + ) + # We can safely always allow prereleases here since PackageFinder + # already implements the prerelease logic, and would have filtered out + # prerelease candidates if the user does not expect them. + assert self._ireq.req, "Specifier-backed ireq is always PEP 508" + spec = self._ireq.req.specifier + return spec.contains(candidate.version, prereleases=True) + + +class SpecifierWithoutExtrasRequirement(SpecifierRequirement): + """ + Requirement backed by an install requirement on a base package. + Trims extras from its install requirement if there are any. + """ + + def __init__(self, ireq: InstallRequirement) -> None: + assert ireq.link is None, "This is a link, not a specifier" + self._ireq = install_req_drop_extras(ireq) + self._equal_cache: str | None = None + self._hash: int | None = None + self._extras = frozenset(canonicalize_name(e) for e in self._ireq.extras) + + @property + def _equal(self) -> str: + if self._equal_cache is not None: + return self._equal_cache + + self._equal_cache = str(self._ireq) + return self._equal_cache + + def __eq__(self, other: object) -> bool: + if not isinstance(other, SpecifierWithoutExtrasRequirement): + return NotImplemented + return self._equal == other._equal + + def __hash__(self) -> int: + if self._hash is not None: + return self._hash + + self._hash = hash(self._equal) + return self._hash + + +class RequiresPythonRequirement(Requirement): + """A requirement representing Requires-Python metadata.""" + + def __init__(self, specifier: SpecifierSet, match: Candidate) -> None: + self.specifier = specifier + self._specifier_string = str(specifier) # for faster __eq__ + self._hash: int | None = None + self._candidate = match + + def __str__(self) -> str: + return f"Python {self.specifier}" + + def __repr__(self) -> str: + return f"{self.__class__.__name__}({str(self.specifier)!r})" + + def __hash__(self) -> int: + if self._hash is not None: + return self._hash + + self._hash = hash((self._specifier_string, self._candidate)) + return self._hash + + def __eq__(self, other: Any) -> bool: + if not isinstance(other, RequiresPythonRequirement): + return False + return ( + self._specifier_string == other._specifier_string + and self._candidate == other._candidate + ) + + @property + def project_name(self) -> NormalizedName: + return self._candidate.project_name + + @property + def name(self) -> str: + return self._candidate.name + + def format_for_error(self) -> str: + return str(self) + + def get_candidate_lookup(self) -> CandidateLookup: + if self.specifier.contains(self._candidate.version, prereleases=True): + return self._candidate, None + return None, None + + def is_satisfied_by(self, candidate: Candidate) -> bool: + assert candidate.name == self._candidate.name, "Not Python candidate" + # We can safely always allow prereleases here since PackageFinder + # already implements the prerelease logic, and would have filtered out + # prerelease candidates if the user does not expect them. + return self.specifier.contains(candidate.version, prereleases=True) + + +class UnsatisfiableRequirement(Requirement): + """A requirement that cannot be satisfied.""" + + def __init__(self, name: NormalizedName) -> None: + self._name = name + + def __str__(self) -> str: + return f"{self._name} (unavailable)" + + def __repr__(self) -> str: + return f"{self.__class__.__name__}({str(self._name)!r})" + + def __eq__(self, other: object) -> bool: + if not isinstance(other, UnsatisfiableRequirement): + return NotImplemented + return self._name == other._name + + def __hash__(self) -> int: + return hash(self._name) + + @property + def project_name(self) -> NormalizedName: + return self._name + + @property + def name(self) -> str: + return self._name + + def format_for_error(self) -> str: + return str(self) + + def get_candidate_lookup(self) -> CandidateLookup: + return None, None + + def is_satisfied_by(self, candidate: Candidate) -> bool: + return False diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/resolution/resolvelib/resolver.py b/.venv/lib/python3.12/site-packages/pip/_internal/resolution/resolvelib/resolver.py new file mode 100644 index 0000000..1ba70c2 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/resolution/resolvelib/resolver.py @@ -0,0 +1,336 @@ +from __future__ import annotations + +import contextlib +import functools +import logging +import os +from typing import TYPE_CHECKING, cast + +from pip._vendor.packaging.utils import canonicalize_name +from pip._vendor.resolvelib import BaseReporter, ResolutionImpossible, ResolutionTooDeep +from pip._vendor.resolvelib import Resolver as RLResolver +from pip._vendor.resolvelib.structs import DirectedGraph + +from pip._internal.cache import WheelCache +from pip._internal.exceptions import ResolutionTooDeepError +from pip._internal.index.package_finder import PackageFinder +from pip._internal.operations.prepare import RequirementPreparer +from pip._internal.req.constructors import install_req_extend_extras +from pip._internal.req.req_install import InstallRequirement +from pip._internal.req.req_set import RequirementSet +from pip._internal.resolution.base import BaseResolver, InstallRequirementProvider +from pip._internal.resolution.resolvelib.provider import PipProvider +from pip._internal.resolution.resolvelib.reporter import ( + PipDebuggingReporter, + PipReporter, +) +from pip._internal.utils.packaging import get_requirement + +from .base import Candidate, Requirement +from .factory import Factory + +if TYPE_CHECKING: + from pip._vendor.resolvelib.resolvers import Result as RLResult + + Result = RLResult[Requirement, Candidate, str] + + +logger = logging.getLogger(__name__) + + +class Resolver(BaseResolver): + _allowed_strategies = {"eager", "only-if-needed", "to-satisfy-only"} + + def __init__( + self, + preparer: RequirementPreparer, + finder: PackageFinder, + wheel_cache: WheelCache | None, + make_install_req: InstallRequirementProvider, + use_user_site: bool, + ignore_dependencies: bool, + ignore_installed: bool, + ignore_requires_python: bool, + force_reinstall: bool, + upgrade_strategy: str, + py_version_info: tuple[int, ...] | None = None, + ): + super().__init__() + assert upgrade_strategy in self._allowed_strategies + + self.factory = Factory( + finder=finder, + preparer=preparer, + make_install_req=make_install_req, + wheel_cache=wheel_cache, + use_user_site=use_user_site, + force_reinstall=force_reinstall, + ignore_installed=ignore_installed, + ignore_requires_python=ignore_requires_python, + py_version_info=py_version_info, + ) + self.ignore_dependencies = ignore_dependencies + self.upgrade_strategy = upgrade_strategy + self._result: Result | None = None + + def resolve( + self, root_reqs: list[InstallRequirement], check_supported_wheels: bool + ) -> RequirementSet: + collected = self.factory.collect_root_requirements(root_reqs) + provider = PipProvider( + factory=self.factory, + constraints=collected.constraints, + ignore_dependencies=self.ignore_dependencies, + upgrade_strategy=self.upgrade_strategy, + user_requested=collected.user_requested, + ) + if "PIP_RESOLVER_DEBUG" in os.environ: + reporter: BaseReporter[Requirement, Candidate, str] = PipDebuggingReporter() + else: + reporter = PipReporter() + resolver: RLResolver[Requirement, Candidate, str] = RLResolver( + provider, + reporter, + ) + + try: + limit_how_complex_resolution_can_be = 200000 + result = self._result = resolver.resolve( + collected.requirements, max_rounds=limit_how_complex_resolution_can_be + ) + + except ResolutionImpossible as e: + error = self.factory.get_installation_error( + cast("ResolutionImpossible[Requirement, Candidate]", e), + collected.constraints, + ) + raise error from e + except ResolutionTooDeep: + raise ResolutionTooDeepError from None + + req_set = RequirementSet(check_supported_wheels=check_supported_wheels) + # process candidates with extras last to ensure their base equivalent is + # already in the req_set if appropriate. + # Python's sort is stable so using a binary key function keeps relative order + # within both subsets. + for candidate in sorted( + result.mapping.values(), key=lambda c: c.name != c.project_name + ): + ireq = candidate.get_install_requirement() + if ireq is None: + if candidate.name != candidate.project_name: + # extend existing req's extras + with contextlib.suppress(KeyError): + req = req_set.get_requirement(candidate.project_name) + req_set.add_named_requirement( + install_req_extend_extras( + req, get_requirement(candidate.name).extras + ) + ) + continue + + # Check if there is already an installation under the same name, + # and set a flag for later stages to uninstall it, if needed. + installed_dist = self.factory.get_dist_to_uninstall(candidate) + if installed_dist is None: + # There is no existing installation -- nothing to uninstall. + ireq.should_reinstall = False + elif self.factory.force_reinstall: + # The --force-reinstall flag is set -- reinstall. + ireq.should_reinstall = True + elif installed_dist.version != candidate.version: + # The installation is different in version -- reinstall. + ireq.should_reinstall = True + elif candidate.is_editable or installed_dist.editable: + # The incoming distribution is editable, or different in + # editable-ness to installation -- reinstall. + ireq.should_reinstall = True + elif candidate.source_link and candidate.source_link.is_file: + # The incoming distribution is under file:// + if candidate.source_link.is_wheel: + # is a local wheel -- do nothing. + logger.info( + "%s is already installed with the same version as the " + "provided wheel. Use --force-reinstall to force an " + "installation of the wheel.", + ireq.name, + ) + continue + + # is a local sdist or path -- reinstall + ireq.should_reinstall = True + else: + continue + + link = candidate.source_link + if link and link.is_yanked: + # The reason can contain non-ASCII characters, Unicode + # is required for Python 2. + msg = ( + "The candidate selected for download or install is a " + "yanked version: {name!r} candidate (version {version} " + "at {link})\nReason for being yanked: {reason}" + ).format( + name=candidate.name, + version=candidate.version, + link=link, + reason=link.yanked_reason or "", + ) + logger.warning(msg) + + req_set.add_named_requirement(ireq) + + reqs = req_set.all_requirements + self.factory.preparer.prepare_linked_requirements_more(reqs) + for req in reqs: + req.prepared = True + req.needs_more_preparation = False + return req_set + + def get_installation_order( + self, req_set: RequirementSet + ) -> list[InstallRequirement]: + """Get order for installation of requirements in RequirementSet. + + The returned list contains a requirement before another that depends on + it. This helps ensure that the environment is kept consistent as they + get installed one-by-one. + + The current implementation creates a topological ordering of the + dependency graph, giving more weight to packages with less + or no dependencies, while breaking any cycles in the graph at + arbitrary points. We make no guarantees about where the cycle + would be broken, other than it *would* be broken. + """ + assert self._result is not None, "must call resolve() first" + + if not req_set.requirements: + # Nothing is left to install, so we do not need an order. + return [] + + graph = self._result.graph + weights = get_topological_weights(graph, set(req_set.requirements.keys())) + + sorted_items = sorted( + req_set.requirements.items(), + key=functools.partial(_req_set_item_sorter, weights=weights), + reverse=True, + ) + return [ireq for _, ireq in sorted_items] + + +def get_topological_weights( + graph: DirectedGraph[str | None], requirement_keys: set[str] +) -> dict[str | None, int]: + """Assign weights to each node based on how "deep" they are. + + This implementation may change at any point in the future without prior + notice. + + We first simplify the dependency graph by pruning any leaves and giving them + the highest weight: a package without any dependencies should be installed + first. This is done again and again in the same way, giving ever less weight + to the newly found leaves. The loop stops when no leaves are left: all + remaining packages have at least one dependency left in the graph. + + Then we continue with the remaining graph, by taking the length for the + longest path to any node from root, ignoring any paths that contain a single + node twice (i.e. cycles). This is done through a depth-first search through + the graph, while keeping track of the path to the node. + + Cycles in the graph result would result in node being revisited while also + being on its own path. In this case, take no action. This helps ensure we + don't get stuck in a cycle. + + When assigning weight, the longer path (i.e. larger length) is preferred. + + We are only interested in the weights of packages that are in the + requirement_keys. + """ + path: set[str | None] = set() + weights: dict[str | None, list[int]] = {} + + def visit(node: str | None) -> None: + if node in path: + # We hit a cycle, so we'll break it here. + return + + # The walk is exponential and for pathologically connected graphs (which + # are the ones most likely to contain cycles in the first place) it can + # take until the heat-death of the universe. To counter this we limit + # the number of attempts to visit (i.e. traverse through) any given + # node. We choose a value here which gives decent enough coverage for + # fairly well behaved graphs, and still limits the walk complexity to be + # linear in nature. + cur_weights = weights.get(node, []) + if len(cur_weights) >= 5: + return + + # Time to visit the children! + path.add(node) + for child in graph.iter_children(node): + visit(child) + path.remove(node) + + if node not in requirement_keys: + return + + cur_weights.append(len(path)) + weights[node] = cur_weights + + # Simplify the graph, pruning leaves that have no dependencies. This is + # needed for large graphs (say over 200 packages) because the `visit` + # function is slower for large/densely connected graphs, taking minutes. + # See https://github.com/pypa/pip/issues/10557 + # We repeat the pruning step until we have no more leaves to remove. + while True: + leaves = set() + for key in graph: + if key is None: + continue + for _child in graph.iter_children(key): + # This means we have at least one child + break + else: + # No child. + leaves.add(key) + if not leaves: + # We are done simplifying. + break + # Calculate the weight for the leaves. + weight = len(graph) - 1 + for leaf in leaves: + if leaf not in requirement_keys: + continue + weights[leaf] = [weight] + # Remove the leaves from the graph, making it simpler. + for leaf in leaves: + graph.remove(leaf) + + # Visit the remaining graph, this will only have nodes to handle if the + # graph had a cycle in it, which the pruning step above could not handle. + # `None` is guaranteed to be the root node by resolvelib. + visit(None) + + # Sanity check: all requirement keys should be in the weights, + # and no other keys should be in the weights. + difference = set(weights.keys()).difference(requirement_keys) + assert not difference, difference + + # Now give back all the weights, choosing the largest ones from what we + # accumulated. + return {node: max(wgts) for (node, wgts) in weights.items()} + + +def _req_set_item_sorter( + item: tuple[str, InstallRequirement], + weights: dict[str | None, int], +) -> tuple[int, str]: + """Key function used to sort install requirements for installation. + + Based on the "weight" mapping calculated in ``get_installation_order()``. + The canonical package name is returned as the second member as a tie- + breaker to ensure the result is predictable, which is useful in tests. + """ + name = canonicalize_name(item[0]) + return weights[name], name diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/self_outdated_check.py b/.venv/lib/python3.12/site-packages/pip/_internal/self_outdated_check.py new file mode 100644 index 0000000..ca507f1 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/self_outdated_check.py @@ -0,0 +1,254 @@ +from __future__ import annotations + +import datetime +import functools +import hashlib +import json +import logging +import optparse +import os.path +import sys +from dataclasses import dataclass +from typing import Any, Callable + +from pip._vendor.packaging.version import Version +from pip._vendor.packaging.version import parse as parse_version +from pip._vendor.rich.console import Group +from pip._vendor.rich.markup import escape +from pip._vendor.rich.text import Text + +from pip._internal.index.collector import LinkCollector +from pip._internal.index.package_finder import PackageFinder +from pip._internal.metadata import get_default_environment +from pip._internal.models.selection_prefs import SelectionPreferences +from pip._internal.network.session import PipSession +from pip._internal.utils.compat import WINDOWS +from pip._internal.utils.entrypoints import ( + get_best_invocation_for_this_pip, + get_best_invocation_for_this_python, +) +from pip._internal.utils.filesystem import adjacent_tmp_file, check_path_owner, replace +from pip._internal.utils.misc import ( + ExternallyManagedEnvironment, + check_externally_managed, + ensure_dir, +) + +_WEEK = datetime.timedelta(days=7) + +logger = logging.getLogger(__name__) + + +def _get_statefile_name(key: str) -> str: + key_bytes = key.encode() + name = hashlib.sha224(key_bytes).hexdigest() + return name + + +def _convert_date(isodate: str) -> datetime.datetime: + """Convert an ISO format string to a date. + + Handles the format 2020-01-22T14:24:01Z (trailing Z) + which is not supported by older versions of fromisoformat. + """ + return datetime.datetime.fromisoformat(isodate.replace("Z", "+00:00")) + + +class SelfCheckState: + def __init__(self, cache_dir: str) -> None: + self._state: dict[str, Any] = {} + self._statefile_path = None + + # Try to load the existing state + if cache_dir: + self._statefile_path = os.path.join( + cache_dir, "selfcheck", _get_statefile_name(self.key) + ) + try: + with open(self._statefile_path, encoding="utf-8") as statefile: + self._state = json.load(statefile) + except (OSError, ValueError, KeyError): + # Explicitly suppressing exceptions, since we don't want to + # error out if the cache file is invalid. + pass + + @property + def key(self) -> str: + return sys.prefix + + def get(self, current_time: datetime.datetime) -> str | None: + """Check if we have a not-outdated version loaded already.""" + if not self._state: + return None + + if "last_check" not in self._state: + return None + + if "pypi_version" not in self._state: + return None + + # Determine if we need to refresh the state + last_check = _convert_date(self._state["last_check"]) + time_since_last_check = current_time - last_check + if time_since_last_check > _WEEK: + return None + + return self._state["pypi_version"] + + def set(self, pypi_version: str, current_time: datetime.datetime) -> None: + # If we do not have a path to cache in, don't bother saving. + if not self._statefile_path: + return + + # Check to make sure that we own the directory + if not check_path_owner(os.path.dirname(self._statefile_path)): + return + + # Now that we've ensured the directory is owned by this user, we'll go + # ahead and make sure that all our directories are created. + ensure_dir(os.path.dirname(self._statefile_path)) + + state = { + # Include the key so it's easy to tell which pip wrote the + # file. + "key": self.key, + "last_check": current_time.isoformat(), + "pypi_version": pypi_version, + } + + text = json.dumps(state, sort_keys=True, separators=(",", ":")) + + with adjacent_tmp_file(self._statefile_path) as f: + f.write(text.encode()) + + try: + # Since we have a prefix-specific state file, we can just + # overwrite whatever is there, no need to check. + replace(f.name, self._statefile_path) + except OSError: + # Best effort. + pass + + +@dataclass +class UpgradePrompt: + old: str + new: str + + def __rich__(self) -> Group: + if WINDOWS: + pip_cmd = f"{get_best_invocation_for_this_python()} -m pip" + else: + pip_cmd = get_best_invocation_for_this_pip() + + notice = "[bold][[reset][blue]notice[reset][bold]][reset]" + return Group( + Text(), + Text.from_markup( + f"{notice} A new release of pip is available: " + f"[red]{self.old}[reset] -> [green]{self.new}[reset]" + ), + Text.from_markup( + f"{notice} To update, run: " + f"[green]{escape(pip_cmd)} install --upgrade pip" + ), + ) + + +def was_installed_by_pip(pkg: str) -> bool: + """Checks whether pkg was installed by pip + + This is used not to display the upgrade message when pip is in fact + installed by system package manager, such as dnf on Fedora. + """ + dist = get_default_environment().get_distribution(pkg) + return dist is not None and "pip" == dist.installer + + +def _get_current_remote_pip_version( + session: PipSession, options: optparse.Values +) -> str | None: + # Lets use PackageFinder to see what the latest pip version is + link_collector = LinkCollector.create( + session, + options=options, + suppress_no_index=True, + ) + + # Pass allow_yanked=False so we don't suggest upgrading to a + # yanked version. + selection_prefs = SelectionPreferences( + allow_yanked=False, + allow_all_prereleases=False, # Explicitly set to False + ) + + finder = PackageFinder.create( + link_collector=link_collector, + selection_prefs=selection_prefs, + ) + best_candidate = finder.find_best_candidate("pip").best_candidate + if best_candidate is None: + return None + + return str(best_candidate.version) + + +def _self_version_check_logic( + *, + state: SelfCheckState, + current_time: datetime.datetime, + local_version: Version, + get_remote_version: Callable[[], str | None], +) -> UpgradePrompt | None: + remote_version_str = state.get(current_time) + if remote_version_str is None: + remote_version_str = get_remote_version() + if remote_version_str is None: + logger.debug("No remote pip version found") + return None + state.set(remote_version_str, current_time) + + remote_version = parse_version(remote_version_str) + logger.debug("Remote version of pip: %s", remote_version) + logger.debug("Local version of pip: %s", local_version) + + pip_installed_by_pip = was_installed_by_pip("pip") + logger.debug("Was pip installed by pip? %s", pip_installed_by_pip) + if not pip_installed_by_pip: + return None # Only suggest upgrade if pip is installed by pip. + + local_version_is_older = ( + local_version < remote_version + and local_version.base_version != remote_version.base_version + ) + if local_version_is_older: + return UpgradePrompt(old=str(local_version), new=remote_version_str) + + return None + + +def pip_self_version_check(session: PipSession, options: optparse.Values) -> None: + """Check for an update for pip. + + Limit the frequency of checks to once per week. State is stored either in + the active virtualenv or in the user's USER_CACHE_DIR keyed off the prefix + of the pip script path. + """ + installed_dist = get_default_environment().get_distribution("pip") + if not installed_dist: + return + try: + check_externally_managed() + except ExternallyManagedEnvironment: + return + + upgrade_prompt = _self_version_check_logic( + state=SelfCheckState(cache_dir=options.cache_dir), + current_time=datetime.datetime.now(datetime.timezone.utc), + local_version=installed_dist.version, + get_remote_version=functools.partial( + _get_current_remote_pip_version, session, options + ), + ) + if upgrade_prompt is not None: + logger.warning("%s", upgrade_prompt, extra={"rich": True}) diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/utils/__init__.py b/.venv/lib/python3.12/site-packages/pip/_internal/utils/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..051da94 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/_jaraco_text.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/_jaraco_text.cpython-312.pyc new file mode 100644 index 0000000..b22b08b Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/_jaraco_text.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/_log.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/_log.cpython-312.pyc new file mode 100644 index 0000000..cdfc789 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/_log.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/appdirs.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/appdirs.cpython-312.pyc new file mode 100644 index 0000000..a396743 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/appdirs.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/compat.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/compat.cpython-312.pyc new file mode 100644 index 0000000..743f1dd Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/compat.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/compatibility_tags.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/compatibility_tags.cpython-312.pyc new file mode 100644 index 0000000..a24a6b9 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/compatibility_tags.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/datetime.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/datetime.cpython-312.pyc new file mode 100644 index 0000000..0ce10b1 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/datetime.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/deprecation.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/deprecation.cpython-312.pyc new file mode 100644 index 0000000..3039ee2 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/deprecation.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/direct_url_helpers.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/direct_url_helpers.cpython-312.pyc new file mode 100644 index 0000000..e45349e Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/direct_url_helpers.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/egg_link.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/egg_link.cpython-312.pyc new file mode 100644 index 0000000..1309c7f Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/egg_link.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/entrypoints.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/entrypoints.cpython-312.pyc new file mode 100644 index 0000000..44705dc Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/entrypoints.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/filesystem.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/filesystem.cpython-312.pyc new file mode 100644 index 0000000..b49bf2e Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/filesystem.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/filetypes.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/filetypes.cpython-312.pyc new file mode 100644 index 0000000..14739ce Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/filetypes.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/glibc.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/glibc.cpython-312.pyc new file mode 100644 index 0000000..fec8739 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/glibc.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/hashes.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/hashes.cpython-312.pyc new file mode 100644 index 0000000..3a1972f Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/hashes.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/logging.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/logging.cpython-312.pyc new file mode 100644 index 0000000..b5c1f86 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/logging.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/misc.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/misc.cpython-312.pyc new file mode 100644 index 0000000..72ee6af Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/misc.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/packaging.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/packaging.cpython-312.pyc new file mode 100644 index 0000000..940b84b Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/packaging.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/retry.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/retry.cpython-312.pyc new file mode 100644 index 0000000..1fcb418 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/retry.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/setuptools_build.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/setuptools_build.cpython-312.pyc new file mode 100644 index 0000000..66b2a60 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/setuptools_build.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/subprocess.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/subprocess.cpython-312.pyc new file mode 100644 index 0000000..a835dfc Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/subprocess.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/temp_dir.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/temp_dir.cpython-312.pyc new file mode 100644 index 0000000..be2f7f7 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/temp_dir.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/unpacking.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/unpacking.cpython-312.pyc new file mode 100644 index 0000000..7b2ba8a Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/unpacking.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/urls.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/urls.cpython-312.pyc new file mode 100644 index 0000000..3ec9d0c Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/urls.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/virtualenv.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/virtualenv.cpython-312.pyc new file mode 100644 index 0000000..a9bb599 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/virtualenv.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/wheel.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/wheel.cpython-312.pyc new file mode 100644 index 0000000..c266044 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/wheel.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/utils/_jaraco_text.py b/.venv/lib/python3.12/site-packages/pip/_internal/utils/_jaraco_text.py new file mode 100644 index 0000000..6ccf53b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/utils/_jaraco_text.py @@ -0,0 +1,109 @@ +"""Functions brought over from jaraco.text. + +These functions are not supposed to be used within `pip._internal`. These are +helper functions brought over from `jaraco.text` to enable vendoring newer +copies of `pkg_resources` without having to vendor `jaraco.text` and its entire +dependency cone; something that our vendoring setup is not currently capable of +handling. + +License reproduced from original source below: + +Copyright Jason R. Coombs + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to +deal in the Software without restriction, including without limitation the +rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +sell copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +IN THE SOFTWARE. +""" + +import functools +import itertools + + +def _nonblank(str): + return str and not str.startswith("#") + + +@functools.singledispatch +def yield_lines(iterable): + r""" + Yield valid lines of a string or iterable. + + >>> list(yield_lines('')) + [] + >>> list(yield_lines(['foo', 'bar'])) + ['foo', 'bar'] + >>> list(yield_lines('foo\nbar')) + ['foo', 'bar'] + >>> list(yield_lines('\nfoo\n#bar\nbaz #comment')) + ['foo', 'baz #comment'] + >>> list(yield_lines(['foo\nbar', 'baz', 'bing\n\n\n'])) + ['foo', 'bar', 'baz', 'bing'] + """ + return itertools.chain.from_iterable(map(yield_lines, iterable)) + + +@yield_lines.register(str) +def _(text): + return filter(_nonblank, map(str.strip, text.splitlines())) + + +def drop_comment(line): + """ + Drop comments. + + >>> drop_comment('foo # bar') + 'foo' + + A hash without a space may be in a URL. + + >>> drop_comment('http://example.com/foo#bar') + 'http://example.com/foo#bar' + """ + return line.partition(" #")[0] + + +def join_continuation(lines): + r""" + Join lines continued by a trailing backslash. + + >>> list(join_continuation(['foo \\', 'bar', 'baz'])) + ['foobar', 'baz'] + >>> list(join_continuation(['foo \\', 'bar', 'baz'])) + ['foobar', 'baz'] + >>> list(join_continuation(['foo \\', 'bar \\', 'baz'])) + ['foobarbaz'] + + Not sure why, but... + The character preceding the backslash is also elided. + + >>> list(join_continuation(['goo\\', 'dly'])) + ['godly'] + + A terrible idea, but... + If no line is available to continue, suppress the lines. + + >>> list(join_continuation(['foo', 'bar\\', 'baz\\'])) + ['foo'] + """ + lines = iter(lines) + for item in lines: + while item.endswith("\\"): + try: + item = item[:-2].strip() + next(lines) + except StopIteration: + return + yield item diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/utils/_log.py b/.venv/lib/python3.12/site-packages/pip/_internal/utils/_log.py new file mode 100644 index 0000000..92c4c6a --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/utils/_log.py @@ -0,0 +1,38 @@ +"""Customize logging + +Defines custom logger class for the `logger.verbose(...)` method. + +init_logging() must be called before any other modules that call logging.getLogger. +""" + +import logging +from typing import Any, cast + +# custom log level for `--verbose` output +# between DEBUG and INFO +VERBOSE = 15 + + +class VerboseLogger(logging.Logger): + """Custom Logger, defining a verbose log-level + + VERBOSE is between INFO and DEBUG. + """ + + def verbose(self, msg: str, *args: Any, **kwargs: Any) -> None: + return self.log(VERBOSE, msg, *args, **kwargs) + + +def getLogger(name: str) -> VerboseLogger: + """logging.getLogger, but ensures our VerboseLogger class is returned""" + return cast(VerboseLogger, logging.getLogger(name)) + + +def init_logging() -> None: + """Register our VerboseLogger and VERBOSE log level. + + Should be called before any calls to getLogger(), + i.e. in pip._internal.__init__ + """ + logging.setLoggerClass(VerboseLogger) + logging.addLevelName(VERBOSE, "VERBOSE") diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/utils/appdirs.py b/.venv/lib/python3.12/site-packages/pip/_internal/utils/appdirs.py new file mode 100644 index 0000000..4152528 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/utils/appdirs.py @@ -0,0 +1,52 @@ +""" +This code wraps the vendored appdirs module to so the return values are +compatible for the current pip code base. + +The intention is to rewrite current usages gradually, keeping the tests pass, +and eventually drop this after all usages are changed. +""" + +import os +import sys + +from pip._vendor import platformdirs as _appdirs + + +def user_cache_dir(appname: str) -> str: + return _appdirs.user_cache_dir(appname, appauthor=False) + + +def _macos_user_config_dir(appname: str, roaming: bool = True) -> str: + # Use ~/Application Support/pip, if the directory exists. + path = _appdirs.user_data_dir(appname, appauthor=False, roaming=roaming) + if os.path.isdir(path): + return path + + # Use a Linux-like ~/.config/pip, by default. + linux_like_path = "~/.config/" + if appname: + linux_like_path = os.path.join(linux_like_path, appname) + + return os.path.expanduser(linux_like_path) + + +def user_config_dir(appname: str, roaming: bool = True) -> str: + if sys.platform == "darwin": + return _macos_user_config_dir(appname, roaming) + + return _appdirs.user_config_dir(appname, appauthor=False, roaming=roaming) + + +# for the discussion regarding site_config_dir locations +# see +def site_config_dirs(appname: str) -> list[str]: + if sys.platform == "darwin": + dirval = _appdirs.site_data_dir(appname, appauthor=False, multipath=True) + return dirval.split(os.pathsep) + + dirval = _appdirs.site_config_dir(appname, appauthor=False, multipath=True) + if sys.platform == "win32": + return [dirval] + + # Unix-y system. Look in /etc as well. + return dirval.split(os.pathsep) + ["/etc"] diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/utils/compat.py b/.venv/lib/python3.12/site-packages/pip/_internal/utils/compat.py new file mode 100644 index 0000000..324789f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/utils/compat.py @@ -0,0 +1,85 @@ +"""Stuff that differs in different Python versions and platform +distributions.""" + +import importlib.resources +import logging +import os +import sys +from typing import IO + +__all__ = ["get_path_uid", "stdlib_pkgs", "tomllib", "WINDOWS"] + + +logger = logging.getLogger(__name__) + + +def has_tls() -> bool: + try: + import _ssl # noqa: F401 # ignore unused + + return True + except ImportError: + pass + + from pip._vendor.urllib3.util import IS_PYOPENSSL + + return IS_PYOPENSSL + + +def get_path_uid(path: str) -> int: + """ + Return path's uid. + + Does not follow symlinks: + https://github.com/pypa/pip/pull/935#discussion_r5307003 + + Placed this function in compat due to differences on AIX and + Jython, that should eventually go away. + + :raises OSError: When path is a symlink or can't be read. + """ + if hasattr(os, "O_NOFOLLOW"): + fd = os.open(path, os.O_RDONLY | os.O_NOFOLLOW) + file_uid = os.fstat(fd).st_uid + os.close(fd) + else: # AIX and Jython + # WARNING: time of check vulnerability, but best we can do w/o NOFOLLOW + if not os.path.islink(path): + # older versions of Jython don't have `os.fstat` + file_uid = os.stat(path).st_uid + else: + # raise OSError for parity with os.O_NOFOLLOW above + raise OSError(f"{path} is a symlink; Will not return uid for symlinks") + return file_uid + + +# The importlib.resources.open_text function was deprecated in 3.11 with suggested +# replacement we use below. +if sys.version_info < (3, 11): + open_text_resource = importlib.resources.open_text +else: + + def open_text_resource( + package: str, resource: str, encoding: str = "utf-8", errors: str = "strict" + ) -> IO[str]: + return (importlib.resources.files(package) / resource).open( + "r", encoding=encoding, errors=errors + ) + + +if sys.version_info >= (3, 11): + import tomllib +else: + from pip._vendor import tomli as tomllib + + +# packages in the stdlib that may have installation metadata, but should not be +# considered 'installed'. this theoretically could be determined based on +# dist.location (py27:`sysconfig.get_paths()['stdlib']`, +# py26:sysconfig.get_config_vars('LIBDEST')), but fear platform variation may +# make this ineffective, so hard-coding +stdlib_pkgs = {"python", "wsgiref", "argparse"} + + +# windows detection, covers cpython and ironpython +WINDOWS = sys.platform.startswith("win") or (sys.platform == "cli" and os.name == "nt") diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/utils/compatibility_tags.py b/.venv/lib/python3.12/site-packages/pip/_internal/utils/compatibility_tags.py new file mode 100644 index 0000000..6d98171 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/utils/compatibility_tags.py @@ -0,0 +1,201 @@ +"""Generate and work with PEP 425 Compatibility Tags.""" + +from __future__ import annotations + +import re + +from pip._vendor.packaging.tags import ( + PythonVersion, + Tag, + android_platforms, + compatible_tags, + cpython_tags, + generic_tags, + interpreter_name, + interpreter_version, + ios_platforms, + mac_platforms, +) + +_apple_arch_pat = re.compile(r"(.+)_(\d+)_(\d+)_(.+)") + + +def version_info_to_nodot(version_info: tuple[int, ...]) -> str: + # Only use up to the first two numbers. + return "".join(map(str, version_info[:2])) + + +def _mac_platforms(arch: str) -> list[str]: + match = _apple_arch_pat.match(arch) + if match: + name, major, minor, actual_arch = match.groups() + mac_version = (int(major), int(minor)) + arches = [ + # Since we have always only checked that the platform starts + # with "macosx", for backwards-compatibility we extract the + # actual prefix provided by the user in case they provided + # something like "macosxcustom_". It may be good to remove + # this as undocumented or deprecate it in the future. + "{}_{}".format(name, arch[len("macosx_") :]) + for arch in mac_platforms(mac_version, actual_arch) + ] + else: + # arch pattern didn't match (?!) + arches = [arch] + return arches + + +def _ios_platforms(arch: str) -> list[str]: + match = _apple_arch_pat.match(arch) + if match: + name, major, minor, actual_multiarch = match.groups() + ios_version = (int(major), int(minor)) + arches = [ + # Since we have always only checked that the platform starts + # with "ios", for backwards-compatibility we extract the + # actual prefix provided by the user in case they provided + # something like "ioscustom_". It may be good to remove + # this as undocumented or deprecate it in the future. + "{}_{}".format(name, arch[len("ios_") :]) + for arch in ios_platforms(ios_version, actual_multiarch) + ] + else: + # arch pattern didn't match (?!) + arches = [arch] + return arches + + +def _android_platforms(arch: str) -> list[str]: + match = re.fullmatch(r"android_(\d+)_(.+)", arch) + if match: + api_level, abi = match.groups() + return list(android_platforms(int(api_level), abi)) + else: + # arch pattern didn't match (?!) + return [arch] + + +def _custom_manylinux_platforms(arch: str) -> list[str]: + arches = [arch] + arch_prefix, arch_sep, arch_suffix = arch.partition("_") + if arch_prefix == "manylinux2014": + # manylinux1/manylinux2010 wheels run on most manylinux2014 systems + # with the exception of wheels depending on ncurses. PEP 599 states + # manylinux1/manylinux2010 wheels should be considered + # manylinux2014 wheels: + # https://www.python.org/dev/peps/pep-0599/#backwards-compatibility-with-manylinux2010-wheels + if arch_suffix in {"i686", "x86_64"}: + arches.append("manylinux2010" + arch_sep + arch_suffix) + arches.append("manylinux1" + arch_sep + arch_suffix) + elif arch_prefix == "manylinux2010": + # manylinux1 wheels run on most manylinux2010 systems with the + # exception of wheels depending on ncurses. PEP 571 states + # manylinux1 wheels should be considered manylinux2010 wheels: + # https://www.python.org/dev/peps/pep-0571/#backwards-compatibility-with-manylinux1-wheels + arches.append("manylinux1" + arch_sep + arch_suffix) + return arches + + +def _get_custom_platforms(arch: str) -> list[str]: + arch_prefix, arch_sep, arch_suffix = arch.partition("_") + if arch.startswith("macosx"): + arches = _mac_platforms(arch) + elif arch.startswith("ios"): + arches = _ios_platforms(arch) + elif arch_prefix == "android": + arches = _android_platforms(arch) + elif arch_prefix in ["manylinux2014", "manylinux2010"]: + arches = _custom_manylinux_platforms(arch) + else: + arches = [arch] + return arches + + +def _expand_allowed_platforms(platforms: list[str] | None) -> list[str] | None: + if not platforms: + return None + + seen = set() + result = [] + + for p in platforms: + if p in seen: + continue + additions = [c for c in _get_custom_platforms(p) if c not in seen] + seen.update(additions) + result.extend(additions) + + return result + + +def _get_python_version(version: str) -> PythonVersion: + if len(version) > 1: + return int(version[0]), int(version[1:]) + else: + return (int(version[0]),) + + +def _get_custom_interpreter( + implementation: str | None = None, version: str | None = None +) -> str: + if implementation is None: + implementation = interpreter_name() + if version is None: + version = interpreter_version() + return f"{implementation}{version}" + + +def get_supported( + version: str | None = None, + platforms: list[str] | None = None, + impl: str | None = None, + abis: list[str] | None = None, +) -> list[Tag]: + """Return a list of supported tags for each version specified in + `versions`. + + :param version: a string version, of the form "33" or "32", + or None. The version will be assumed to support our ABI. + :param platform: specify a list of platforms you want valid + tags for, or None. If None, use the local system platform. + :param impl: specify the exact implementation you want valid + tags for, or None. If None, use the local interpreter impl. + :param abis: specify a list of abis you want valid + tags for, or None. If None, use the local interpreter abi. + """ + supported: list[Tag] = [] + + python_version: PythonVersion | None = None + if version is not None: + python_version = _get_python_version(version) + + interpreter = _get_custom_interpreter(impl, version) + + platforms = _expand_allowed_platforms(platforms) + + is_cpython = (impl or interpreter_name()) == "cp" + if is_cpython: + supported.extend( + cpython_tags( + python_version=python_version, + abis=abis, + platforms=platforms, + ) + ) + else: + supported.extend( + generic_tags( + interpreter=interpreter, + abis=abis, + platforms=platforms, + ) + ) + supported.extend( + compatible_tags( + python_version=python_version, + interpreter=interpreter, + platforms=platforms, + ) + ) + + return supported diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/utils/datetime.py b/.venv/lib/python3.12/site-packages/pip/_internal/utils/datetime.py new file mode 100644 index 0000000..776e498 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/utils/datetime.py @@ -0,0 +1,10 @@ +"""For when pip wants to check the date or time.""" + +import datetime + + +def today_is_later_than(year: int, month: int, day: int) -> bool: + today = datetime.date.today() + given = datetime.date(year, month, day) + + return today > given diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/utils/deprecation.py b/.venv/lib/python3.12/site-packages/pip/_internal/utils/deprecation.py new file mode 100644 index 0000000..96e7783 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/utils/deprecation.py @@ -0,0 +1,126 @@ +""" +A module that implements tooling to enable easy warnings about deprecations. +""" + +from __future__ import annotations + +import logging +import warnings +from typing import Any, TextIO + +from pip._vendor.packaging.version import parse + +from pip import __version__ as current_version # NOTE: tests patch this name. + +DEPRECATION_MSG_PREFIX = "DEPRECATION: " + + +class PipDeprecationWarning(Warning): + pass + + +_original_showwarning: Any = None + + +# Warnings <-> Logging Integration +def _showwarning( + message: Warning | str, + category: type[Warning], + filename: str, + lineno: int, + file: TextIO | None = None, + line: str | None = None, +) -> None: + if file is not None: + if _original_showwarning is not None: + _original_showwarning(message, category, filename, lineno, file, line) + elif issubclass(category, PipDeprecationWarning): + # We use a specially named logger which will handle all of the + # deprecation messages for pip. + logger = logging.getLogger("pip._internal.deprecations") + logger.warning(message) + else: + _original_showwarning(message, category, filename, lineno, file, line) + + +def install_warning_logger() -> None: + # Enable our Deprecation Warnings + warnings.simplefilter("default", PipDeprecationWarning, append=True) + + global _original_showwarning + + if _original_showwarning is None: + _original_showwarning = warnings.showwarning + warnings.showwarning = _showwarning + + +def deprecated( + *, + reason: str, + replacement: str | None, + gone_in: str | None, + feature_flag: str | None = None, + issue: int | None = None, +) -> None: + """Helper to deprecate existing functionality. + + reason: + Textual reason shown to the user about why this functionality has + been deprecated. Should be a complete sentence. + replacement: + Textual suggestion shown to the user about what alternative + functionality they can use. + gone_in: + The version of pip does this functionality should get removed in. + Raises an error if pip's current version is greater than or equal to + this. + feature_flag: + Command-line flag of the form --use-feature={feature_flag} for testing + upcoming functionality. + issue: + Issue number on the tracker that would serve as a useful place for + users to find related discussion and provide feedback. + """ + + # Determine whether or not the feature is already gone in this version. + is_gone = gone_in is not None and parse(current_version) >= parse(gone_in) + + message_parts = [ + (reason, f"{DEPRECATION_MSG_PREFIX}{{}}"), + ( + gone_in, + ( + "pip {} will enforce this behaviour change." + if not is_gone + else "Since pip {}, this is no longer supported." + ), + ), + ( + replacement, + "A possible replacement is {}.", + ), + ( + feature_flag, + ( + "You can use the flag --use-feature={} to test the upcoming behaviour." + if not is_gone + else None + ), + ), + ( + issue, + "Discussion can be found at https://github.com/pypa/pip/issues/{}", + ), + ] + + message = " ".join( + format_str.format(value) + for value, format_str in message_parts + if format_str is not None and value is not None + ) + + # Raise as an error if this behaviour is deprecated. + if is_gone: + raise PipDeprecationWarning(message) + + warnings.warn(message, category=PipDeprecationWarning, stacklevel=2) diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/utils/direct_url_helpers.py b/.venv/lib/python3.12/site-packages/pip/_internal/utils/direct_url_helpers.py new file mode 100644 index 0000000..3cbc1e7 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/utils/direct_url_helpers.py @@ -0,0 +1,87 @@ +from __future__ import annotations + +from pip._internal.models.direct_url import ArchiveInfo, DirectUrl, DirInfo, VcsInfo +from pip._internal.models.link import Link +from pip._internal.utils.urls import path_to_url +from pip._internal.vcs import vcs + + +def direct_url_as_pep440_direct_reference(direct_url: DirectUrl, name: str) -> str: + """Convert a DirectUrl to a pip requirement string.""" + direct_url.validate() # if invalid, this is a pip bug + requirement = name + " @ " + fragments = [] + if isinstance(direct_url.info, VcsInfo): + requirement += ( + f"{direct_url.info.vcs}+{direct_url.url}@{direct_url.info.commit_id}" + ) + elif isinstance(direct_url.info, ArchiveInfo): + requirement += direct_url.url + if direct_url.info.hash: + fragments.append(direct_url.info.hash) + else: + assert isinstance(direct_url.info, DirInfo) + requirement += direct_url.url + if direct_url.subdirectory: + fragments.append("subdirectory=" + direct_url.subdirectory) + if fragments: + requirement += "#" + "&".join(fragments) + return requirement + + +def direct_url_for_editable(source_dir: str) -> DirectUrl: + return DirectUrl( + url=path_to_url(source_dir), + info=DirInfo(editable=True), + ) + + +def direct_url_from_link( + link: Link, source_dir: str | None = None, link_is_in_wheel_cache: bool = False +) -> DirectUrl: + if link.is_vcs: + vcs_backend = vcs.get_backend_for_scheme(link.scheme) + assert vcs_backend + url, requested_revision, _ = vcs_backend.get_url_rev_and_auth( + link.url_without_fragment + ) + # For VCS links, we need to find out and add commit_id. + if link_is_in_wheel_cache: + # If the requested VCS link corresponds to a cached + # wheel, it means the requested revision was an + # immutable commit hash, otherwise it would not have + # been cached. In that case we don't have a source_dir + # with the VCS checkout. + assert requested_revision + commit_id = requested_revision + else: + # If the wheel was not in cache, it means we have + # had to checkout from VCS to build and we have a source_dir + # which we can inspect to find out the commit id. + assert source_dir + commit_id = vcs_backend.get_revision(source_dir) + return DirectUrl( + url=url, + info=VcsInfo( + vcs=vcs_backend.name, + commit_id=commit_id, + requested_revision=requested_revision, + ), + subdirectory=link.subdirectory_fragment, + ) + elif link.is_existing_dir(): + return DirectUrl( + url=link.url_without_fragment, + info=DirInfo(), + subdirectory=link.subdirectory_fragment, + ) + else: + hash = None + hash_name = link.hash_name + if hash_name: + hash = f"{hash_name}={link.hash}" + return DirectUrl( + url=link.url_without_fragment, + info=ArchiveInfo(hash=hash), + subdirectory=link.subdirectory_fragment, + ) diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/utils/egg_link.py b/.venv/lib/python3.12/site-packages/pip/_internal/utils/egg_link.py new file mode 100644 index 0000000..dc85a58 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/utils/egg_link.py @@ -0,0 +1,81 @@ +from __future__ import annotations + +import os +import re +import sys + +from pip._internal.locations import site_packages, user_site +from pip._internal.utils.virtualenv import ( + running_under_virtualenv, + virtualenv_no_global, +) + +__all__ = [ + "egg_link_path_from_sys_path", + "egg_link_path_from_location", +] + + +def _egg_link_names(raw_name: str) -> list[str]: + """ + Convert a Name metadata value to a .egg-link name, by applying + the same substitution as pkg_resources's safe_name function. + Note: we cannot use canonicalize_name because it has a different logic. + + We also look for the raw name (without normalization) as setuptools 69 changed + the way it names .egg-link files (https://github.com/pypa/setuptools/issues/4167). + """ + return [ + re.sub("[^A-Za-z0-9.]+", "-", raw_name) + ".egg-link", + f"{raw_name}.egg-link", + ] + + +def egg_link_path_from_sys_path(raw_name: str) -> str | None: + """ + Look for a .egg-link file for project name, by walking sys.path. + """ + egg_link_names = _egg_link_names(raw_name) + for path_item in sys.path: + for egg_link_name in egg_link_names: + egg_link = os.path.join(path_item, egg_link_name) + if os.path.isfile(egg_link): + return egg_link + return None + + +def egg_link_path_from_location(raw_name: str) -> str | None: + """ + Return the path for the .egg-link file if it exists, otherwise, None. + + There's 3 scenarios: + 1) not in a virtualenv + try to find in site.USER_SITE, then site_packages + 2) in a no-global virtualenv + try to find in site_packages + 3) in a yes-global virtualenv + try to find in site_packages, then site.USER_SITE + (don't look in global location) + + For #1 and #3, there could be odd cases, where there's an egg-link in 2 + locations. + + This method will just return the first one found. + """ + sites: list[str] = [] + if running_under_virtualenv(): + sites.append(site_packages) + if not virtualenv_no_global() and user_site: + sites.append(user_site) + else: + if user_site: + sites.append(user_site) + sites.append(site_packages) + + egg_link_names = _egg_link_names(raw_name) + for site in sites: + for egg_link_name in egg_link_names: + egglink = os.path.join(site, egg_link_name) + if os.path.isfile(egglink): + return egglink + return None diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/utils/entrypoints.py b/.venv/lib/python3.12/site-packages/pip/_internal/utils/entrypoints.py new file mode 100644 index 0000000..e3a150e --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/utils/entrypoints.py @@ -0,0 +1,88 @@ +from __future__ import annotations + +import itertools +import os +import shutil +import sys + +from pip._internal.cli.main import main +from pip._internal.utils.compat import WINDOWS + +_EXECUTABLE_NAMES = [ + "pip", + f"pip{sys.version_info.major}", + f"pip{sys.version_info.major}.{sys.version_info.minor}", +] +if WINDOWS: + _allowed_extensions = {"", ".exe"} + _EXECUTABLE_NAMES = [ + "".join(parts) + for parts in itertools.product(_EXECUTABLE_NAMES, _allowed_extensions) + ] + + +def _wrapper(args: list[str] | None = None) -> int: + """Central wrapper for all old entrypoints. + + Historically pip has had several entrypoints defined. Because of issues + arising from PATH, sys.path, multiple Pythons, their interactions, and most + of them having a pip installed, users suffer every time an entrypoint gets + moved. + + To alleviate this pain, and provide a mechanism for warning users and + directing them to an appropriate place for help, we now define all of + our old entrypoints as wrappers for the current one. + """ + sys.stderr.write( + "WARNING: pip is being invoked by an old script wrapper. This will " + "fail in a future version of pip.\n" + "Please see https://github.com/pypa/pip/issues/5599 for advice on " + "fixing the underlying issue.\n" + "To avoid this problem you can invoke Python with '-m pip' instead of " + "running pip directly.\n" + ) + return main(args) + + +def get_best_invocation_for_this_pip() -> str: + """Try to figure out the best way to invoke pip in the current environment.""" + binary_directory = "Scripts" if WINDOWS else "bin" + binary_prefix = os.path.join(sys.prefix, binary_directory) + + # Try to use pip[X[.Y]] names, if those executables for this environment are + # the first on PATH with that name. + path_parts = os.path.normcase(os.environ.get("PATH", "")).split(os.pathsep) + exe_are_in_PATH = os.path.normcase(binary_prefix) in path_parts + if exe_are_in_PATH: + for exe_name in _EXECUTABLE_NAMES: + found_executable = shutil.which(exe_name) + binary_executable = os.path.join(binary_prefix, exe_name) + if ( + found_executable + and os.path.exists(binary_executable) + and os.path.samefile( + found_executable, + binary_executable, + ) + ): + return exe_name + + # Use the `-m` invocation, if there's no "nice" invocation. + return f"{get_best_invocation_for_this_python()} -m pip" + + +def get_best_invocation_for_this_python() -> str: + """Try to figure out the best way to invoke the current Python.""" + exe = sys.executable + exe_name = os.path.basename(exe) + + # Try to use the basename, if it's the first executable. + found_executable = shutil.which(exe_name) + # Virtual environments often symlink to their parent Python binaries, but we don't + # want to treat the Python binaries as equivalent when the environment's Python is + # not on PATH (not activated). Thus, we don't follow symlinks. + if found_executable and os.path.samestat(os.lstat(found_executable), os.lstat(exe)): + return exe_name + + # Use the full executable name, because we couldn't find something simpler. + return exe diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/utils/filesystem.py b/.venv/lib/python3.12/site-packages/pip/_internal/utils/filesystem.py new file mode 100644 index 0000000..d7c0524 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/utils/filesystem.py @@ -0,0 +1,152 @@ +from __future__ import annotations + +import fnmatch +import os +import os.path +import random +import sys +from collections.abc import Generator +from contextlib import contextmanager +from tempfile import NamedTemporaryFile +from typing import Any, BinaryIO, cast + +from pip._internal.utils.compat import get_path_uid +from pip._internal.utils.misc import format_size +from pip._internal.utils.retry import retry + + +def check_path_owner(path: str) -> bool: + # If we don't have a way to check the effective uid of this process, then + # we'll just assume that we own the directory. + if sys.platform == "win32" or not hasattr(os, "geteuid"): + return True + + assert os.path.isabs(path) + + previous = None + while path != previous: + if os.path.lexists(path): + # Check if path is writable by current user. + if os.geteuid() == 0: + # Special handling for root user in order to handle properly + # cases where users use sudo without -H flag. + try: + path_uid = get_path_uid(path) + except OSError: + return False + return path_uid == 0 + else: + return os.access(path, os.W_OK) + else: + previous, path = path, os.path.dirname(path) + return False # assume we don't own the path + + +@contextmanager +def adjacent_tmp_file(path: str, **kwargs: Any) -> Generator[BinaryIO, None, None]: + """Return a file-like object pointing to a tmp file next to path. + + The file is created securely and is ensured to be written to disk + after the context reaches its end. + + kwargs will be passed to tempfile.NamedTemporaryFile to control + the way the temporary file will be opened. + """ + with NamedTemporaryFile( + delete=False, + dir=os.path.dirname(path), + prefix=os.path.basename(path), + suffix=".tmp", + **kwargs, + ) as f: + result = cast(BinaryIO, f) + try: + yield result + finally: + result.flush() + os.fsync(result.fileno()) + + +replace = retry(stop_after_delay=1, wait=0.25)(os.replace) + + +# test_writable_dir and _test_writable_dir_win are copied from Flit, +# with the author's agreement to also place them under pip's license. +def test_writable_dir(path: str) -> bool: + """Check if a directory is writable. + + Uses os.access() on POSIX, tries creating files on Windows. + """ + # If the directory doesn't exist, find the closest parent that does. + while not os.path.isdir(path): + parent = os.path.dirname(path) + if parent == path: + break # Should never get here, but infinite loops are bad + path = parent + + if os.name == "posix": + return os.access(path, os.W_OK) + + return _test_writable_dir_win(path) + + +def _test_writable_dir_win(path: str) -> bool: + # os.access doesn't work on Windows: http://bugs.python.org/issue2528 + # and we can't use tempfile: http://bugs.python.org/issue22107 + basename = "accesstest_deleteme_fishfingers_custard_" + alphabet = "abcdefghijklmnopqrstuvwxyz0123456789" + for _ in range(10): + name = basename + "".join(random.choice(alphabet) for _ in range(6)) + file = os.path.join(path, name) + try: + fd = os.open(file, os.O_RDWR | os.O_CREAT | os.O_EXCL) + except FileExistsError: + pass + except PermissionError: + # This could be because there's a directory with the same name. + # But it's highly unlikely there's a directory called that, + # so we'll assume it's because the parent dir is not writable. + # This could as well be because the parent dir is not readable, + # due to non-privileged user access. + return False + else: + os.close(fd) + os.unlink(file) + return True + + # This should never be reached + raise OSError("Unexpected condition testing for writable directory") + + +def find_files(path: str, pattern: str) -> list[str]: + """Returns a list of absolute paths of files beneath path, recursively, + with filenames which match the UNIX-style shell glob pattern.""" + result: list[str] = [] + for root, _, files in os.walk(path): + matches = fnmatch.filter(files, pattern) + result.extend(os.path.join(root, f) for f in matches) + return result + + +def file_size(path: str) -> int | float: + # If it's a symlink, return 0. + if os.path.islink(path): + return 0 + return os.path.getsize(path) + + +def format_file_size(path: str) -> str: + return format_size(file_size(path)) + + +def directory_size(path: str) -> int | float: + size = 0.0 + for root, _dirs, files in os.walk(path): + for filename in files: + file_path = os.path.join(root, filename) + size += file_size(file_path) + return size + + +def format_directory_size(path: str) -> str: + return format_size(directory_size(path)) diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/utils/filetypes.py b/.venv/lib/python3.12/site-packages/pip/_internal/utils/filetypes.py new file mode 100644 index 0000000..2b8baad --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/utils/filetypes.py @@ -0,0 +1,24 @@ +"""Filetype information.""" + +from pip._internal.utils.misc import splitext + +WHEEL_EXTENSION = ".whl" +BZ2_EXTENSIONS: tuple[str, ...] = (".tar.bz2", ".tbz") +XZ_EXTENSIONS: tuple[str, ...] = ( + ".tar.xz", + ".txz", + ".tlz", + ".tar.lz", + ".tar.lzma", +) +ZIP_EXTENSIONS: tuple[str, ...] = (".zip", WHEEL_EXTENSION) +TAR_EXTENSIONS: tuple[str, ...] = (".tar.gz", ".tgz", ".tar") +ARCHIVE_EXTENSIONS = ZIP_EXTENSIONS + BZ2_EXTENSIONS + TAR_EXTENSIONS + XZ_EXTENSIONS + + +def is_archive_file(name: str) -> bool: + """Return True if `name` is a considered as an archive file.""" + ext = splitext(name)[1].lower() + if ext in ARCHIVE_EXTENSIONS: + return True + return False diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/utils/glibc.py b/.venv/lib/python3.12/site-packages/pip/_internal/utils/glibc.py new file mode 100644 index 0000000..2cb3013 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/utils/glibc.py @@ -0,0 +1,102 @@ +from __future__ import annotations + +import os +import sys + + +def glibc_version_string() -> str | None: + "Returns glibc version string, or None if not using glibc." + return glibc_version_string_confstr() or glibc_version_string_ctypes() + + +def glibc_version_string_confstr() -> str | None: + "Primary implementation of glibc_version_string using os.confstr." + # os.confstr is quite a bit faster than ctypes.DLL. It's also less likely + # to be broken or missing. This strategy is used in the standard library + # platform module: + # https://github.com/python/cpython/blob/fcf1d003bf4f0100c9d0921ff3d70e1127ca1b71/Lib/platform.py#L175-L183 + if sys.platform == "win32": + return None + try: + gnu_libc_version = os.confstr("CS_GNU_LIBC_VERSION") + if gnu_libc_version is None: + return None + # os.confstr("CS_GNU_LIBC_VERSION") returns a string like "glibc 2.17": + _, version = gnu_libc_version.split() + except (AttributeError, OSError, ValueError): + # os.confstr() or CS_GNU_LIBC_VERSION not available (or a bad value)... + return None + return version + + +def glibc_version_string_ctypes() -> str | None: + "Fallback implementation of glibc_version_string using ctypes." + + try: + import ctypes + except ImportError: + return None + + # ctypes.CDLL(None) internally calls dlopen(NULL), and as the dlopen + # manpage says, "If filename is NULL, then the returned handle is for the + # main program". This way we can let the linker do the work to figure out + # which libc our process is actually using. + # + # We must also handle the special case where the executable is not a + # dynamically linked executable. This can occur when using musl libc, + # for example. In this situation, dlopen() will error, leading to an + # OSError. Interestingly, at least in the case of musl, there is no + # errno set on the OSError. The single string argument used to construct + # OSError comes from libc itself and is therefore not portable to + # hard code here. In any case, failure to call dlopen() means we + # can't proceed, so we bail on our attempt. + try: + process_namespace = ctypes.CDLL(None) + except OSError: + return None + + try: + gnu_get_libc_version = process_namespace.gnu_get_libc_version + except AttributeError: + # Symbol doesn't exist -> therefore, we are not linked to + # glibc. + return None + + # Call gnu_get_libc_version, which returns a string like "2.5" + gnu_get_libc_version.restype = ctypes.c_char_p + version_str: str = gnu_get_libc_version() + # py2 / py3 compatibility: + if not isinstance(version_str, str): + version_str = version_str.decode("ascii") + + return version_str + + +# platform.libc_ver regularly returns completely nonsensical glibc +# versions. E.g. on my computer, platform says: +# +# ~$ python2.7 -c 'import platform; print(platform.libc_ver())' +# ('glibc', '2.7') +# ~$ python3.5 -c 'import platform; print(platform.libc_ver())' +# ('glibc', '2.9') +# +# But the truth is: +# +# ~$ ldd --version +# ldd (Debian GLIBC 2.22-11) 2.22 +# +# This is unfortunate, because it means that the linehaul data on libc +# versions that was generated by pip 8.1.2 and earlier is useless and +# misleading. Solution: instead of using platform, use our code that actually +# works. +def libc_ver() -> tuple[str, str]: + """Try to determine the glibc version + + Returns a tuple of strings (lib, version) which default to empty strings + in case the lookup fails. + """ + glibc_version = glibc_version_string() + if glibc_version is None: + return ("", "") + else: + return ("glibc", glibc_version) diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/utils/hashes.py b/.venv/lib/python3.12/site-packages/pip/_internal/utils/hashes.py new file mode 100644 index 0000000..3d8c125 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/utils/hashes.py @@ -0,0 +1,150 @@ +from __future__ import annotations + +import hashlib +from collections.abc import Iterable +from typing import TYPE_CHECKING, BinaryIO, NoReturn + +from pip._internal.exceptions import HashMismatch, HashMissing, InstallationError +from pip._internal.utils.misc import read_chunks + +if TYPE_CHECKING: + from hashlib import _Hash + + +# The recommended hash algo of the moment. Change this whenever the state of +# the art changes; it won't hurt backward compatibility. +FAVORITE_HASH = "sha256" + + +# Names of hashlib algorithms allowed by the --hash option and ``pip hash`` +# Currently, those are the ones at least as collision-resistant as sha256. +STRONG_HASHES = ["sha256", "sha384", "sha512"] + + +class Hashes: + """A wrapper that builds multiple hashes at once and checks them against + known-good values + + """ + + def __init__(self, hashes: dict[str, list[str]] | None = None) -> None: + """ + :param hashes: A dict of algorithm names pointing to lists of allowed + hex digests + """ + allowed = {} + if hashes is not None: + for alg, keys in hashes.items(): + # Make sure values are always sorted (to ease equality checks) + allowed[alg] = [k.lower() for k in sorted(keys)] + self._allowed = allowed + + def __and__(self, other: Hashes) -> Hashes: + if not isinstance(other, Hashes): + return NotImplemented + + # If either of the Hashes object is entirely empty (i.e. no hash + # specified at all), all hashes from the other object are allowed. + if not other: + return self + if not self: + return other + + # Otherwise only hashes that present in both objects are allowed. + new = {} + for alg, values in other._allowed.items(): + if alg not in self._allowed: + continue + new[alg] = [v for v in values if v in self._allowed[alg]] + return Hashes(new) + + @property + def digest_count(self) -> int: + return sum(len(digests) for digests in self._allowed.values()) + + def is_hash_allowed(self, hash_name: str, hex_digest: str) -> bool: + """Return whether the given hex digest is allowed.""" + return hex_digest in self._allowed.get(hash_name, []) + + def check_against_chunks(self, chunks: Iterable[bytes]) -> None: + """Check good hashes against ones built from iterable of chunks of + data. + + Raise HashMismatch if none match. + + """ + gots = {} + for hash_name in self._allowed.keys(): + try: + gots[hash_name] = hashlib.new(hash_name) + except (ValueError, TypeError): + raise InstallationError(f"Unknown hash name: {hash_name}") + + for chunk in chunks: + for hash in gots.values(): + hash.update(chunk) + + for hash_name, got in gots.items(): + if got.hexdigest() in self._allowed[hash_name]: + return + self._raise(gots) + + def _raise(self, gots: dict[str, _Hash]) -> NoReturn: + raise HashMismatch(self._allowed, gots) + + def check_against_file(self, file: BinaryIO) -> None: + """Check good hashes against a file-like object + + Raise HashMismatch if none match. + + """ + return self.check_against_chunks(read_chunks(file)) + + def check_against_path(self, path: str) -> None: + with open(path, "rb") as file: + return self.check_against_file(file) + + def has_one_of(self, hashes: dict[str, str]) -> bool: + """Return whether any of the given hashes are allowed.""" + for hash_name, hex_digest in hashes.items(): + if self.is_hash_allowed(hash_name, hex_digest): + return True + return False + + def __bool__(self) -> bool: + """Return whether I know any known-good hashes.""" + return bool(self._allowed) + + def __eq__(self, other: object) -> bool: + if not isinstance(other, Hashes): + return NotImplemented + return self._allowed == other._allowed + + def __hash__(self) -> int: + return hash( + ",".join( + sorted( + ":".join((alg, digest)) + for alg, digest_list in self._allowed.items() + for digest in digest_list + ) + ) + ) + + +class MissingHashes(Hashes): + """A workalike for Hashes used when we're missing a hash for a requirement + + It computes the actual hash of the requirement and raises a HashMissing + exception showing it to the user. + + """ + + def __init__(self) -> None: + """Don't offer the ``hashes`` kwarg.""" + # Pass our favorite hash in to generate a "gotten hash". With the + # empty list, it will never match, so an error will always raise. + super().__init__(hashes={FAVORITE_HASH: []}) + + def _raise(self, gots: dict[str, _Hash]) -> NoReturn: + raise HashMissing(gots[FAVORITE_HASH].hexdigest()) diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/utils/logging.py b/.venv/lib/python3.12/site-packages/pip/_internal/utils/logging.py new file mode 100644 index 0000000..5cdbeb7 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/utils/logging.py @@ -0,0 +1,364 @@ +from __future__ import annotations + +import contextlib +import errno +import logging +import logging.handlers +import os +import sys +import threading +from collections.abc import Generator +from dataclasses import dataclass +from io import TextIOWrapper +from logging import Filter +from typing import Any, ClassVar + +from pip._vendor.rich.console import ( + Console, + ConsoleOptions, + ConsoleRenderable, + RenderableType, + RenderResult, + RichCast, +) +from pip._vendor.rich.highlighter import NullHighlighter +from pip._vendor.rich.logging import RichHandler +from pip._vendor.rich.segment import Segment +from pip._vendor.rich.style import Style + +from pip._internal.utils._log import VERBOSE, getLogger +from pip._internal.utils.compat import WINDOWS +from pip._internal.utils.deprecation import DEPRECATION_MSG_PREFIX +from pip._internal.utils.misc import ensure_dir + +_log_state = threading.local() +_stdout_console = None +_stderr_console = None +subprocess_logger = getLogger("pip.subprocessor") + + +class BrokenStdoutLoggingError(Exception): + """ + Raised if BrokenPipeError occurs for the stdout stream while logging. + """ + + +def _is_broken_pipe_error(exc_class: type[BaseException], exc: BaseException) -> bool: + if exc_class is BrokenPipeError: + return True + + # On Windows, a broken pipe can show up as EINVAL rather than EPIPE: + # https://bugs.python.org/issue19612 + # https://bugs.python.org/issue30418 + if not WINDOWS: + return False + + return isinstance(exc, OSError) and exc.errno in (errno.EINVAL, errno.EPIPE) + + +@contextlib.contextmanager +def indent_log(num: int = 2) -> Generator[None, None, None]: + """ + A context manager which will cause the log output to be indented for any + log messages emitted inside it. + """ + # For thread-safety + _log_state.indentation = get_indentation() + _log_state.indentation += num + try: + yield + finally: + _log_state.indentation -= num + + +def get_indentation() -> int: + return getattr(_log_state, "indentation", 0) + + +class IndentingFormatter(logging.Formatter): + default_time_format = "%Y-%m-%dT%H:%M:%S" + + def __init__( + self, + *args: Any, + add_timestamp: bool = False, + **kwargs: Any, + ) -> None: + """ + A logging.Formatter that obeys the indent_log() context manager. + + :param add_timestamp: A bool indicating output lines should be prefixed + with their record's timestamp. + """ + self.add_timestamp = add_timestamp + super().__init__(*args, **kwargs) + + def get_message_start(self, formatted: str, levelno: int) -> str: + """ + Return the start of the formatted log message (not counting the + prefix to add to each line). + """ + if levelno < logging.WARNING: + return "" + if formatted.startswith(DEPRECATION_MSG_PREFIX): + # Then the message already has a prefix. We don't want it to + # look like "WARNING: DEPRECATION: ...." + return "" + if levelno < logging.ERROR: + return "WARNING: " + + return "ERROR: " + + def format(self, record: logging.LogRecord) -> str: + """ + Calls the standard formatter, but will indent all of the log message + lines by our current indentation level. + """ + formatted = super().format(record) + message_start = self.get_message_start(formatted, record.levelno) + formatted = message_start + formatted + + prefix = "" + if self.add_timestamp: + prefix = f"{self.formatTime(record)} " + prefix += " " * get_indentation() + formatted = "".join([prefix + line for line in formatted.splitlines(True)]) + return formatted + + +@dataclass +class IndentedRenderable: + renderable: RenderableType + indent: int + + def __rich_console__( + self, console: Console, options: ConsoleOptions + ) -> RenderResult: + segments = console.render(self.renderable, options) + lines = Segment.split_lines(segments) + for line in lines: + yield Segment(" " * self.indent) + yield from line + yield Segment("\n") + + +class PipConsole(Console): + def on_broken_pipe(self) -> None: + # Reraise the original exception, rich 13.8.0+ exits by default + # instead, preventing our handler from firing. + raise BrokenPipeError() from None + + +def get_console(*, stderr: bool = False) -> Console: + if stderr: + assert _stderr_console is not None, "stderr rich console is missing!" + return _stderr_console + else: + assert _stdout_console is not None, "stdout rich console is missing!" + return _stdout_console + + +class RichPipStreamHandler(RichHandler): + KEYWORDS: ClassVar[list[str] | None] = [] + + def __init__(self, console: Console) -> None: + super().__init__( + console=console, + show_time=False, + show_level=False, + show_path=False, + highlighter=NullHighlighter(), + ) + + # Our custom override on Rich's logger, to make things work as we need them to. + def emit(self, record: logging.LogRecord) -> None: + style: Style | None = None + + # If we are given a diagnostic error to present, present it with indentation. + if getattr(record, "rich", False): + assert isinstance(record.args, tuple) + (rich_renderable,) = record.args + assert isinstance( + rich_renderable, (ConsoleRenderable, RichCast, str) + ), f"{rich_renderable} is not rich-console-renderable" + + renderable: RenderableType = IndentedRenderable( + rich_renderable, indent=get_indentation() + ) + else: + message = self.format(record) + renderable = self.render_message(record, message) + if record.levelno is not None: + if record.levelno >= logging.ERROR: + style = Style(color="red") + elif record.levelno >= logging.WARNING: + style = Style(color="yellow") + + try: + self.console.print(renderable, overflow="ignore", crop=False, style=style) + except Exception: + self.handleError(record) + + def handleError(self, record: logging.LogRecord) -> None: + """Called when logging is unable to log some output.""" + + exc_class, exc = sys.exc_info()[:2] + # If a broken pipe occurred while calling write() or flush() on the + # stdout stream in logging's Handler.emit(), then raise our special + # exception so we can handle it in main() instead of logging the + # broken pipe error and continuing. + if ( + exc_class + and exc + and self.console.file is sys.stdout + and _is_broken_pipe_error(exc_class, exc) + ): + raise BrokenStdoutLoggingError() + + return super().handleError(record) + + +class BetterRotatingFileHandler(logging.handlers.RotatingFileHandler): + def _open(self) -> TextIOWrapper: + ensure_dir(os.path.dirname(self.baseFilename)) + return super()._open() + + +class MaxLevelFilter(Filter): + def __init__(self, level: int) -> None: + self.level = level + + def filter(self, record: logging.LogRecord) -> bool: + return record.levelno < self.level + + +class ExcludeLoggerFilter(Filter): + """ + A logging Filter that excludes records from a logger (or its children). + """ + + def filter(self, record: logging.LogRecord) -> bool: + # The base Filter class allows only records from a logger (or its + # children). + return not super().filter(record) + + +def setup_logging(verbosity: int, no_color: bool, user_log_file: str | None) -> int: + """Configures and sets up all of the logging + + Returns the requested logging level, as its integer value. + """ + + # Determine the level to be logging at. + if verbosity >= 2: + level_number = logging.DEBUG + elif verbosity == 1: + level_number = VERBOSE + elif verbosity == -1: + level_number = logging.WARNING + elif verbosity == -2: + level_number = logging.ERROR + elif verbosity <= -3: + level_number = logging.CRITICAL + else: + level_number = logging.INFO + + level = logging.getLevelName(level_number) + + # The "root" logger should match the "console" level *unless* we also need + # to log to a user log file. + include_user_log = user_log_file is not None + if include_user_log: + additional_log_file = user_log_file + root_level = "DEBUG" + else: + additional_log_file = "/dev/null" + root_level = level + + # Disable any logging besides WARNING unless we have DEBUG level logging + # enabled for vendored libraries. + vendored_log_level = "WARNING" if level in ["INFO", "ERROR"] else "DEBUG" + + # Shorthands for clarity + handler_classes = { + "stream": "pip._internal.utils.logging.RichPipStreamHandler", + "file": "pip._internal.utils.logging.BetterRotatingFileHandler", + } + handlers = ["console", "console_errors", "console_subprocess"] + ( + ["user_log"] if include_user_log else [] + ) + global _stdout_console, stderr_console + _stdout_console = PipConsole(file=sys.stdout, no_color=no_color, soft_wrap=True) + _stderr_console = PipConsole(file=sys.stderr, no_color=no_color, soft_wrap=True) + + logging.config.dictConfig( + { + "version": 1, + "disable_existing_loggers": False, + "filters": { + "exclude_warnings": { + "()": "pip._internal.utils.logging.MaxLevelFilter", + "level": logging.WARNING, + }, + "restrict_to_subprocess": { + "()": "logging.Filter", + "name": subprocess_logger.name, + }, + "exclude_subprocess": { + "()": "pip._internal.utils.logging.ExcludeLoggerFilter", + "name": subprocess_logger.name, + }, + }, + "formatters": { + "indent": { + "()": IndentingFormatter, + "format": "%(message)s", + }, + "indent_with_timestamp": { + "()": IndentingFormatter, + "format": "%(message)s", + "add_timestamp": True, + }, + }, + "handlers": { + "console": { + "level": level, + "class": handler_classes["stream"], + "console": _stdout_console, + "filters": ["exclude_subprocess", "exclude_warnings"], + "formatter": "indent", + }, + "console_errors": { + "level": "WARNING", + "class": handler_classes["stream"], + "console": _stderr_console, + "filters": ["exclude_subprocess"], + "formatter": "indent", + }, + # A handler responsible for logging to the console messages + # from the "subprocessor" logger. + "console_subprocess": { + "level": level, + "class": handler_classes["stream"], + "console": _stderr_console, + "filters": ["restrict_to_subprocess"], + "formatter": "indent", + }, + "user_log": { + "level": "DEBUG", + "class": handler_classes["file"], + "filename": additional_log_file, + "encoding": "utf-8", + "delay": True, + "formatter": "indent_with_timestamp", + }, + }, + "root": { + "level": root_level, + "handlers": handlers, + }, + "loggers": {"pip._vendor": {"level": vendored_log_level}}, + } + ) + + return level_number diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/utils/misc.py b/.venv/lib/python3.12/site-packages/pip/_internal/utils/misc.py new file mode 100644 index 0000000..3a28e84 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/utils/misc.py @@ -0,0 +1,765 @@ +from __future__ import annotations + +import errno +import getpass +import hashlib +import logging +import os +import posixpath +import shutil +import stat +import sys +import sysconfig +import urllib.parse +from collections.abc import Generator, Iterable, Iterator, Mapping, Sequence +from dataclasses import dataclass +from functools import partial +from io import StringIO +from itertools import filterfalse, tee, zip_longest +from pathlib import Path +from types import FunctionType, TracebackType +from typing import ( + Any, + BinaryIO, + Callable, + Optional, + TextIO, + TypeVar, + cast, +) + +from pip._vendor.packaging.requirements import Requirement +from pip._vendor.pyproject_hooks import BuildBackendHookCaller + +from pip import __version__ +from pip._internal.exceptions import CommandError, ExternallyManagedEnvironment +from pip._internal.locations import get_major_minor_version +from pip._internal.utils.compat import WINDOWS +from pip._internal.utils.retry import retry +from pip._internal.utils.virtualenv import running_under_virtualenv + +__all__ = [ + "rmtree", + "display_path", + "backup_dir", + "ask", + "splitext", + "format_size", + "is_installable_dir", + "normalize_path", + "renames", + "get_prog", + "ensure_dir", + "remove_auth_from_url", + "check_externally_managed", + "ConfiguredBuildBackendHookCaller", +] + +logger = logging.getLogger(__name__) + +T = TypeVar("T") +ExcInfo = tuple[type[BaseException], BaseException, TracebackType] +VersionInfo = tuple[int, int, int] +NetlocTuple = tuple[str, tuple[Optional[str], Optional[str]]] +OnExc = Callable[[FunctionType, Path, BaseException], Any] +OnErr = Callable[[FunctionType, Path, ExcInfo], Any] + +FILE_CHUNK_SIZE = 1024 * 1024 + + +def get_pip_version() -> str: + pip_pkg_dir = os.path.join(os.path.dirname(__file__), "..", "..") + pip_pkg_dir = os.path.abspath(pip_pkg_dir) + + return f"pip {__version__} from {pip_pkg_dir} (python {get_major_minor_version()})" + + +def normalize_version_info(py_version_info: tuple[int, ...]) -> tuple[int, int, int]: + """ + Convert a tuple of ints representing a Python version to one of length + three. + + :param py_version_info: a tuple of ints representing a Python version, + or None to specify no version. The tuple can have any length. + + :return: a tuple of length three if `py_version_info` is non-None. + Otherwise, return `py_version_info` unchanged (i.e. None). + """ + if len(py_version_info) < 3: + py_version_info += (3 - len(py_version_info)) * (0,) + elif len(py_version_info) > 3: + py_version_info = py_version_info[:3] + + return cast("VersionInfo", py_version_info) + + +def ensure_dir(path: str) -> None: + """os.path.makedirs without EEXIST.""" + try: + os.makedirs(path) + except OSError as e: + # Windows can raise spurious ENOTEMPTY errors. See #6426. + if e.errno != errno.EEXIST and e.errno != errno.ENOTEMPTY: + raise + + +def get_prog() -> str: + try: + prog = os.path.basename(sys.argv[0]) + if prog in ("__main__.py", "-c"): + return f"{sys.executable} -m pip" + else: + return prog + except (AttributeError, TypeError, IndexError): + pass + return "pip" + + +# Retry every half second for up to 3 seconds +@retry(stop_after_delay=3, wait=0.5) +def rmtree(dir: str, ignore_errors: bool = False, onexc: OnExc | None = None) -> None: + if ignore_errors: + onexc = _onerror_ignore + if onexc is None: + onexc = _onerror_reraise + handler: OnErr = partial(rmtree_errorhandler, onexc=onexc) + if sys.version_info >= (3, 12): + # See https://docs.python.org/3.12/whatsnew/3.12.html#shutil. + shutil.rmtree(dir, onexc=handler) # type: ignore + else: + shutil.rmtree(dir, onerror=handler) # type: ignore + + +def _onerror_ignore(*_args: Any) -> None: + pass + + +def _onerror_reraise(*_args: Any) -> None: + raise # noqa: PLE0704 - Bare exception used to reraise existing exception + + +def rmtree_errorhandler( + func: FunctionType, + path: Path, + exc_info: ExcInfo | BaseException, + *, + onexc: OnExc = _onerror_reraise, +) -> None: + """ + `rmtree` error handler to 'force' a file remove (i.e. like `rm -f`). + + * If a file is readonly then it's write flag is set and operation is + retried. + + * `onerror` is the original callback from `rmtree(... onerror=onerror)` + that is chained at the end if the "rm -f" still fails. + """ + try: + st_mode = os.stat(path).st_mode + except OSError: + # it's equivalent to os.path.exists + return + + if not st_mode & stat.S_IWRITE: + # convert to read/write + try: + os.chmod(path, st_mode | stat.S_IWRITE) + except OSError: + pass + else: + # use the original function to repeat the operation + try: + func(path) + return + except OSError: + pass + + if not isinstance(exc_info, BaseException): + _, exc_info, _ = exc_info + onexc(func, path, exc_info) + + +def display_path(path: str) -> str: + """Gives the display value for a given path, making it relative to cwd + if possible.""" + path = os.path.normcase(os.path.abspath(path)) + if path.startswith(os.getcwd() + os.path.sep): + path = "." + path[len(os.getcwd()) :] + return path + + +def backup_dir(dir: str, ext: str = ".bak") -> str: + """Figure out the name of a directory to back up the given dir to + (adding .bak, .bak2, etc)""" + n = 1 + extension = ext + while os.path.exists(dir + extension): + n += 1 + extension = ext + str(n) + return dir + extension + + +def ask_path_exists(message: str, options: Iterable[str]) -> str: + for action in os.environ.get("PIP_EXISTS_ACTION", "").split(): + if action in options: + return action + return ask(message, options) + + +def _check_no_input(message: str) -> None: + """Raise an error if no input is allowed.""" + if os.environ.get("PIP_NO_INPUT"): + raise Exception( + f"No input was expected ($PIP_NO_INPUT set); question: {message}" + ) + + +def ask(message: str, options: Iterable[str]) -> str: + """Ask the message interactively, with the given possible responses""" + while 1: + _check_no_input(message) + response = input(message) + response = response.strip().lower() + if response not in options: + print( + "Your response ({!r}) was not one of the expected responses: " + "{}".format(response, ", ".join(options)) + ) + else: + return response + + +def ask_input(message: str) -> str: + """Ask for input interactively.""" + _check_no_input(message) + return input(message) + + +def ask_password(message: str) -> str: + """Ask for a password interactively.""" + _check_no_input(message) + return getpass.getpass(message) + + +def strtobool(val: str) -> int: + """Convert a string representation of truth to true (1) or false (0). + + True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values + are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if + 'val' is anything else. + """ + val = val.lower() + if val in ("y", "yes", "t", "true", "on", "1"): + return 1 + elif val in ("n", "no", "f", "false", "off", "0"): + return 0 + else: + raise ValueError(f"invalid truth value {val!r}") + + +def format_size(bytes: float) -> str: + if bytes > 1000 * 1000: + return f"{bytes / 1000.0 / 1000:.1f} MB" + elif bytes > 10 * 1000: + return f"{int(bytes / 1000)} kB" + elif bytes > 1000: + return f"{bytes / 1000.0:.1f} kB" + else: + return f"{int(bytes)} bytes" + + +def tabulate(rows: Iterable[Iterable[Any]]) -> tuple[list[str], list[int]]: + """Return a list of formatted rows and a list of column sizes. + + For example:: + + >>> tabulate([['foobar', 2000], [0xdeadbeef]]) + (['foobar 2000', '3735928559'], [10, 4]) + """ + rows = [tuple(map(str, row)) for row in rows] + sizes = [max(map(len, col)) for col in zip_longest(*rows, fillvalue="")] + table = [" ".join(map(str.ljust, row, sizes)).rstrip() for row in rows] + return table, sizes + + +def is_installable_dir(path: str) -> bool: + """Is path is a directory containing pyproject.toml or setup.py? + + If pyproject.toml exists, this is a PEP 517 project. Otherwise we look for + a legacy setuptools layout by identifying setup.py. We don't check for the + setup.cfg because using it without setup.py is only available for PEP 517 + projects, which are already covered by the pyproject.toml check. + """ + if not os.path.isdir(path): + return False + if os.path.isfile(os.path.join(path, "pyproject.toml")): + return True + if os.path.isfile(os.path.join(path, "setup.py")): + return True + return False + + +def read_chunks( + file: BinaryIO, size: int = FILE_CHUNK_SIZE +) -> Generator[bytes, None, None]: + """Yield pieces of data from a file-like object until EOF.""" + while True: + chunk = file.read(size) + if not chunk: + break + yield chunk + + +def normalize_path(path: str, resolve_symlinks: bool = True) -> str: + """ + Convert a path to its canonical, case-normalized, absolute version. + + """ + path = os.path.expanduser(path) + if resolve_symlinks: + path = os.path.realpath(path) + else: + path = os.path.abspath(path) + return os.path.normcase(path) + + +def splitext(path: str) -> tuple[str, str]: + """Like os.path.splitext, but take off .tar too""" + base, ext = posixpath.splitext(path) + if base.lower().endswith(".tar"): + ext = base[-4:] + ext + base = base[:-4] + return base, ext + + +def renames(old: str, new: str) -> None: + """Like os.renames(), but handles renaming across devices.""" + # Implementation borrowed from os.renames(). + head, tail = os.path.split(new) + if head and tail and not os.path.exists(head): + os.makedirs(head) + + shutil.move(old, new) + + head, tail = os.path.split(old) + if head and tail: + try: + os.removedirs(head) + except OSError: + pass + + +def is_local(path: str) -> bool: + """ + Return True if path is within sys.prefix, if we're running in a virtualenv. + + If we're not in a virtualenv, all paths are considered "local." + + Caution: this function assumes the head of path has been normalized + with normalize_path. + """ + if not running_under_virtualenv(): + return True + return path.startswith(normalize_path(sys.prefix)) + + +def write_output(msg: Any, *args: Any) -> None: + logger.info(msg, *args) + + +class StreamWrapper(StringIO): + orig_stream: TextIO + + @classmethod + def from_stream(cls, orig_stream: TextIO) -> StreamWrapper: + ret = cls() + ret.orig_stream = orig_stream + return ret + + # compileall.compile_dir() needs stdout.encoding to print to stdout + # type ignore is because TextIOBase.encoding is writeable + @property + def encoding(self) -> str: # type: ignore + return self.orig_stream.encoding + + +# Simulates an enum +def enum(*sequential: Any, **named: Any) -> type[Any]: + enums = dict(zip(sequential, range(len(sequential))), **named) + reverse = {value: key for key, value in enums.items()} + enums["reverse_mapping"] = reverse + return type("Enum", (), enums) + + +def build_netloc(host: str, port: int | None) -> str: + """ + Build a netloc from a host-port pair + """ + if port is None: + return host + if ":" in host: + # Only wrap host with square brackets when it is IPv6 + host = f"[{host}]" + return f"{host}:{port}" + + +def build_url_from_netloc(netloc: str, scheme: str = "https") -> str: + """ + Build a full URL from a netloc. + """ + if netloc.count(":") >= 2 and "@" not in netloc and "[" not in netloc: + # It must be a bare IPv6 address, so wrap it with brackets. + netloc = f"[{netloc}]" + return f"{scheme}://{netloc}" + + +def parse_netloc(netloc: str) -> tuple[str | None, int | None]: + """ + Return the host-port pair from a netloc. + """ + url = build_url_from_netloc(netloc) + parsed = urllib.parse.urlparse(url) + return parsed.hostname, parsed.port + + +def split_auth_from_netloc(netloc: str) -> NetlocTuple: + """ + Parse out and remove the auth information from a netloc. + + Returns: (netloc, (username, password)). + """ + if "@" not in netloc: + return netloc, (None, None) + + # Split from the right because that's how urllib.parse.urlsplit() + # behaves if more than one @ is present (which can be checked using + # the password attribute of urlsplit()'s return value). + auth, netloc = netloc.rsplit("@", 1) + pw: str | None = None + if ":" in auth: + # Split from the left because that's how urllib.parse.urlsplit() + # behaves if more than one : is present (which again can be checked + # using the password attribute of the return value) + user, pw = auth.split(":", 1) + else: + user, pw = auth, None + + user = urllib.parse.unquote(user) + if pw is not None: + pw = urllib.parse.unquote(pw) + + return netloc, (user, pw) + + +def redact_netloc(netloc: str) -> str: + """ + Replace the sensitive data in a netloc with "****", if it exists. + + For example: + - "user:pass@example.com" returns "user:****@example.com" + - "accesstoken@example.com" returns "****@example.com" + """ + netloc, (user, password) = split_auth_from_netloc(netloc) + if user is None: + return netloc + if password is None: + user = "****" + password = "" + else: + user = urllib.parse.quote(user) + password = ":****" + return f"{user}{password}@{netloc}" + + +def _transform_url( + url: str, transform_netloc: Callable[[str], tuple[Any, ...]] +) -> tuple[str, NetlocTuple]: + """Transform and replace netloc in a url. + + transform_netloc is a function taking the netloc and returning a + tuple. The first element of this tuple is the new netloc. The + entire tuple is returned. + + Returns a tuple containing the transformed url as item 0 and the + original tuple returned by transform_netloc as item 1. + """ + purl = urllib.parse.urlsplit(url) + netloc_tuple = transform_netloc(purl.netloc) + # stripped url + url_pieces = (purl.scheme, netloc_tuple[0], purl.path, purl.query, purl.fragment) + surl = urllib.parse.urlunsplit(url_pieces) + return surl, cast("NetlocTuple", netloc_tuple) + + +def _get_netloc(netloc: str) -> NetlocTuple: + return split_auth_from_netloc(netloc) + + +def _redact_netloc(netloc: str) -> tuple[str]: + return (redact_netloc(netloc),) + + +def split_auth_netloc_from_url( + url: str, +) -> tuple[str, str, tuple[str | None, str | None]]: + """ + Parse a url into separate netloc, auth, and url with no auth. + + Returns: (url_without_auth, netloc, (username, password)) + """ + url_without_auth, (netloc, auth) = _transform_url(url, _get_netloc) + return url_without_auth, netloc, auth + + +def remove_auth_from_url(url: str) -> str: + """Return a copy of url with 'username:password@' removed.""" + # username/pass params are passed to subversion through flags + # and are not recognized in the url. + return _transform_url(url, _get_netloc)[0] + + +def redact_auth_from_url(url: str) -> str: + """Replace the password in a given url with ****.""" + return _transform_url(url, _redact_netloc)[0] + + +def redact_auth_from_requirement(req: Requirement) -> str: + """Replace the password in a given requirement url with ****.""" + if not req.url: + return str(req) + return str(req).replace(req.url, redact_auth_from_url(req.url)) + + +@dataclass(frozen=True) +class HiddenText: + secret: str + redacted: str + + def __repr__(self) -> str: + return f"" + + def __str__(self) -> str: + return self.redacted + + # This is useful for testing. + def __eq__(self, other: Any) -> bool: + if type(self) is not type(other): + return False + + # The string being used for redaction doesn't also have to match, + # just the raw, original string. + return self.secret == other.secret + + +def hide_value(value: str) -> HiddenText: + return HiddenText(value, redacted="****") + + +def hide_url(url: str) -> HiddenText: + redacted = redact_auth_from_url(url) + return HiddenText(url, redacted=redacted) + + +def protect_pip_from_modification_on_windows(modifying_pip: bool) -> None: + """Protection of pip.exe from modification on Windows + + On Windows, any operation modifying pip should be run as: + python -m pip ... + """ + pip_names = [ + "pip", + f"pip{sys.version_info.major}", + f"pip{sys.version_info.major}.{sys.version_info.minor}", + ] + + # See https://github.com/pypa/pip/issues/1299 for more discussion + should_show_use_python_msg = ( + modifying_pip and WINDOWS and os.path.basename(sys.argv[0]) in pip_names + ) + + if should_show_use_python_msg: + new_command = [sys.executable, "-m", "pip"] + sys.argv[1:] + raise CommandError( + "To modify pip, please run the following command:\n{}".format( + " ".join(new_command) + ) + ) + + +def check_externally_managed() -> None: + """Check whether the current environment is externally managed. + + If the ``EXTERNALLY-MANAGED`` config file is found, the current environment + is considered externally managed, and an ExternallyManagedEnvironment is + raised. + """ + if running_under_virtualenv(): + return + marker = os.path.join(sysconfig.get_path("stdlib"), "EXTERNALLY-MANAGED") + if not os.path.isfile(marker): + return + raise ExternallyManagedEnvironment.from_config(marker) + + +def is_console_interactive() -> bool: + """Is this console interactive?""" + return sys.stdin is not None and sys.stdin.isatty() + + +def hash_file(path: str, blocksize: int = 1 << 20) -> tuple[Any, int]: + """Return (hash, length) for path using hashlib.sha256()""" + + h = hashlib.sha256() + length = 0 + with open(path, "rb") as f: + for block in read_chunks(f, size=blocksize): + length += len(block) + h.update(block) + return h, length + + +def pairwise(iterable: Iterable[Any]) -> Iterator[tuple[Any, Any]]: + """ + Return paired elements. + + For example: + s -> (s0, s1), (s2, s3), (s4, s5), ... + """ + iterable = iter(iterable) + return zip_longest(iterable, iterable) + + +def partition( + pred: Callable[[T], bool], iterable: Iterable[T] +) -> tuple[Iterable[T], Iterable[T]]: + """ + Use a predicate to partition entries into false entries and true entries, + like + + partition(is_odd, range(10)) --> 0 2 4 6 8 and 1 3 5 7 9 + """ + t1, t2 = tee(iterable) + return filterfalse(pred, t1), filter(pred, t2) + + +class ConfiguredBuildBackendHookCaller(BuildBackendHookCaller): + def __init__( + self, + config_holder: Any, + source_dir: str, + build_backend: str, + backend_path: str | None = None, + runner: Callable[..., None] | None = None, + python_executable: str | None = None, + ): + super().__init__( + source_dir, build_backend, backend_path, runner, python_executable + ) + self.config_holder = config_holder + + def build_wheel( + self, + wheel_directory: str, + config_settings: Mapping[str, Any] | None = None, + metadata_directory: str | None = None, + ) -> str: + cs = self.config_holder.config_settings + return super().build_wheel( + wheel_directory, config_settings=cs, metadata_directory=metadata_directory + ) + + def build_sdist( + self, + sdist_directory: str, + config_settings: Mapping[str, Any] | None = None, + ) -> str: + cs = self.config_holder.config_settings + return super().build_sdist(sdist_directory, config_settings=cs) + + def build_editable( + self, + wheel_directory: str, + config_settings: Mapping[str, Any] | None = None, + metadata_directory: str | None = None, + ) -> str: + cs = self.config_holder.config_settings + return super().build_editable( + wheel_directory, config_settings=cs, metadata_directory=metadata_directory + ) + + def get_requires_for_build_wheel( + self, config_settings: Mapping[str, Any] | None = None + ) -> Sequence[str]: + cs = self.config_holder.config_settings + return super().get_requires_for_build_wheel(config_settings=cs) + + def get_requires_for_build_sdist( + self, config_settings: Mapping[str, Any] | None = None + ) -> Sequence[str]: + cs = self.config_holder.config_settings + return super().get_requires_for_build_sdist(config_settings=cs) + + def get_requires_for_build_editable( + self, config_settings: Mapping[str, Any] | None = None + ) -> Sequence[str]: + cs = self.config_holder.config_settings + return super().get_requires_for_build_editable(config_settings=cs) + + def prepare_metadata_for_build_wheel( + self, + metadata_directory: str, + config_settings: Mapping[str, Any] | None = None, + _allow_fallback: bool = True, + ) -> str: + cs = self.config_holder.config_settings + return super().prepare_metadata_for_build_wheel( + metadata_directory=metadata_directory, + config_settings=cs, + _allow_fallback=_allow_fallback, + ) + + def prepare_metadata_for_build_editable( + self, + metadata_directory: str, + config_settings: Mapping[str, Any] | None = None, + _allow_fallback: bool = True, + ) -> str | None: + cs = self.config_holder.config_settings + return super().prepare_metadata_for_build_editable( + metadata_directory=metadata_directory, + config_settings=cs, + _allow_fallback=_allow_fallback, + ) + + +def warn_if_run_as_root() -> None: + """Output a warning for sudo users on Unix. + + In a virtual environment, sudo pip still writes to virtualenv. + On Windows, users may run pip as Administrator without issues. + This warning only applies to Unix root users outside of virtualenv. + """ + if running_under_virtualenv(): + return + if not hasattr(os, "getuid"): + return + # On Windows, there are no "system managed" Python packages. Installing as + # Administrator via pip is the correct way of updating system environments. + # + # We choose sys.platform over utils.compat.WINDOWS here to enable Mypy platform + # checks: https://mypy.readthedocs.io/en/stable/common_issues.html + if sys.platform == "win32" or sys.platform == "cygwin": + return + + if os.getuid() != 0: + return + + logger.warning( + "Running pip as the 'root' user can result in broken permissions and " + "conflicting behaviour with the system package manager, possibly " + "rendering your system unusable. " + "It is recommended to use a virtual environment instead: " + "https://pip.pypa.io/warnings/venv. " + "Use the --root-user-action option if you know what you are doing and " + "want to suppress this warning." + ) diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/utils/packaging.py b/.venv/lib/python3.12/site-packages/pip/_internal/utils/packaging.py new file mode 100644 index 0000000..3cbc049 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/utils/packaging.py @@ -0,0 +1,44 @@ +from __future__ import annotations + +import functools +import logging + +from pip._vendor.packaging import specifiers, version +from pip._vendor.packaging.requirements import Requirement + +logger = logging.getLogger(__name__) + + +@functools.lru_cache(maxsize=32) +def check_requires_python( + requires_python: str | None, version_info: tuple[int, ...] +) -> bool: + """ + Check if the given Python version matches a "Requires-Python" specifier. + + :param version_info: A 3-tuple of ints representing a Python + major-minor-micro version to check (e.g. `sys.version_info[:3]`). + + :return: `True` if the given Python version satisfies the requirement. + Otherwise, return `False`. + + :raises InvalidSpecifier: If `requires_python` has an invalid format. + """ + if requires_python is None: + # The package provides no information + return True + requires_python_specifier = specifiers.SpecifierSet(requires_python) + + python_version = version.parse(".".join(map(str, version_info))) + return python_version in requires_python_specifier + + +@functools.lru_cache(maxsize=10000) +def get_requirement(req_string: str) -> Requirement: + """Construct a packaging.Requirement object with caching""" + # Parsing requirement strings is expensive, and is also expected to happen + # with a low diversity of different arguments (at least relative the number + # constructed). This method adds a cache to requirement object creation to + # minimize repeated parsing of the same string to construct equivalent + # Requirement objects. + return Requirement(req_string) diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/utils/retry.py b/.venv/lib/python3.12/site-packages/pip/_internal/utils/retry.py new file mode 100644 index 0000000..27d3b6e --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/utils/retry.py @@ -0,0 +1,45 @@ +from __future__ import annotations + +import functools +from time import perf_counter, sleep +from typing import TYPE_CHECKING, Callable, TypeVar + +if TYPE_CHECKING: + from typing_extensions import ParamSpec + + T = TypeVar("T") + P = ParamSpec("P") + + +def retry( + wait: float, stop_after_delay: float +) -> Callable[[Callable[P, T]], Callable[P, T]]: + """Decorator to automatically retry a function on error. + + If the function raises, the function is recalled with the same arguments + until it returns or the time limit is reached. When the time limit is + surpassed, the last exception raised is reraised. + + :param wait: The time to wait after an error before retrying, in seconds. + :param stop_after_delay: The time limit after which retries will cease, + in seconds. + """ + + def wrapper(func: Callable[P, T]) -> Callable[P, T]: + + @functools.wraps(func) + def retry_wrapped(*args: P.args, **kwargs: P.kwargs) -> T: + # The performance counter is monotonic on all platforms we care + # about and has much better resolution than time.monotonic(). + start_time = perf_counter() + while True: + try: + return func(*args, **kwargs) + except Exception: + if perf_counter() - start_time > stop_after_delay: + raise + sleep(wait) + + return retry_wrapped + + return wrapper diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/utils/setuptools_build.py b/.venv/lib/python3.12/site-packages/pip/_internal/utils/setuptools_build.py new file mode 100644 index 0000000..1b8c7a1 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/utils/setuptools_build.py @@ -0,0 +1,149 @@ +from __future__ import annotations + +import sys +import textwrap +from collections.abc import Sequence + +# Shim to wrap setup.py invocation with setuptools +# Note that __file__ is handled via two {!r} *and* %r, to ensure that paths on +# Windows are correctly handled (it should be "C:\\Users" not "C:\Users"). +_SETUPTOOLS_SHIM = textwrap.dedent( + """ + exec(compile(''' + # This is -- a caller that pip uses to run setup.py + # + # - It imports setuptools before invoking setup.py, to enable projects that directly + # import from `distutils.core` to work with newer packaging standards. + # - It provides a clear error message when setuptools is not installed. + # - It sets `sys.argv[0]` to the underlying `setup.py`, when invoking `setup.py` so + # setuptools doesn't think the script is `-c`. This avoids the following warning: + # manifest_maker: standard file '-c' not found". + # - It generates a shim setup.py, for handling setup.cfg-only projects. + import os, sys, tokenize, traceback + + try: + import setuptools + except ImportError: + print( + "ERROR: Can not execute `setup.py` since setuptools failed to import in " + "the build environment with exception:", + file=sys.stderr, + ) + traceback.print_exc() + sys.exit(1) + + __file__ = %r + sys.argv[0] = __file__ + + if os.path.exists(__file__): + filename = __file__ + with tokenize.open(__file__) as f: + setup_py_code = f.read() + else: + filename = "" + setup_py_code = "from setuptools import setup; setup()" + + exec(compile(setup_py_code, filename, "exec")) + ''' % ({!r},), "", "exec")) + """ +).rstrip() + + +def make_setuptools_shim_args( + setup_py_path: str, + global_options: Sequence[str] | None = None, + no_user_config: bool = False, + unbuffered_output: bool = False, +) -> list[str]: + """ + Get setuptools command arguments with shim wrapped setup file invocation. + + :param setup_py_path: The path to setup.py to be wrapped. + :param global_options: Additional global options. + :param no_user_config: If True, disables personal user configuration. + :param unbuffered_output: If True, adds the unbuffered switch to the + argument list. + """ + args = [sys.executable] + if unbuffered_output: + args += ["-u"] + args += ["-c", _SETUPTOOLS_SHIM.format(setup_py_path)] + if global_options: + args += global_options + if no_user_config: + args += ["--no-user-cfg"] + return args + + +def make_setuptools_bdist_wheel_args( + setup_py_path: str, + global_options: Sequence[str], + build_options: Sequence[str], + destination_dir: str, +) -> list[str]: + # NOTE: Eventually, we'd want to also -S to the flags here, when we're + # isolating. Currently, it breaks Python in virtualenvs, because it + # relies on site.py to find parts of the standard library outside the + # virtualenv. + args = make_setuptools_shim_args( + setup_py_path, global_options=global_options, unbuffered_output=True + ) + args += ["bdist_wheel", "-d", destination_dir] + args += build_options + return args + + +def make_setuptools_clean_args( + setup_py_path: str, + global_options: Sequence[str], +) -> list[str]: + args = make_setuptools_shim_args( + setup_py_path, global_options=global_options, unbuffered_output=True + ) + args += ["clean", "--all"] + return args + + +def make_setuptools_develop_args( + setup_py_path: str, + *, + global_options: Sequence[str], + no_user_config: bool, + prefix: str | None, + home: str | None, + use_user_site: bool, +) -> list[str]: + assert not (use_user_site and prefix) + + args = make_setuptools_shim_args( + setup_py_path, + global_options=global_options, + no_user_config=no_user_config, + ) + + args += ["develop", "--no-deps"] + + if prefix: + args += ["--prefix", prefix] + if home is not None: + args += ["--install-dir", home] + + if use_user_site: + args += ["--user", "--prefix="] + + return args + + +def make_setuptools_egg_info_args( + setup_py_path: str, + egg_info_dir: str | None, + no_user_config: bool, +) -> list[str]: + args = make_setuptools_shim_args(setup_py_path, no_user_config=no_user_config) + + args += ["egg_info"] + + if egg_info_dir: + args += ["--egg-base", egg_info_dir] + + return args diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/utils/subprocess.py b/.venv/lib/python3.12/site-packages/pip/_internal/utils/subprocess.py new file mode 100644 index 0000000..3e7b83f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/utils/subprocess.py @@ -0,0 +1,248 @@ +from __future__ import annotations + +import logging +import os +import shlex +import subprocess +from collections.abc import Iterable, Mapping +from typing import Any, Callable, Literal, Union + +from pip._vendor.rich.markup import escape + +from pip._internal.cli.spinners import SpinnerInterface, open_spinner +from pip._internal.exceptions import InstallationSubprocessError +from pip._internal.utils.logging import VERBOSE, subprocess_logger +from pip._internal.utils.misc import HiddenText + +CommandArgs = list[Union[str, HiddenText]] + + +def make_command(*args: str | HiddenText | CommandArgs) -> CommandArgs: + """ + Create a CommandArgs object. + """ + command_args: CommandArgs = [] + for arg in args: + # Check for list instead of CommandArgs since CommandArgs is + # only known during type-checking. + if isinstance(arg, list): + command_args.extend(arg) + else: + # Otherwise, arg is str or HiddenText. + command_args.append(arg) + + return command_args + + +def format_command_args(args: list[str] | CommandArgs) -> str: + """ + Format command arguments for display. + """ + # For HiddenText arguments, display the redacted form by calling str(). + # Also, we don't apply str() to arguments that aren't HiddenText since + # this can trigger a UnicodeDecodeError in Python 2 if the argument + # has type unicode and includes a non-ascii character. (The type + # checker doesn't ensure the annotations are correct in all cases.) + return " ".join( + shlex.quote(str(arg)) if isinstance(arg, HiddenText) else shlex.quote(arg) + for arg in args + ) + + +def reveal_command_args(args: list[str] | CommandArgs) -> list[str]: + """ + Return the arguments in their raw, unredacted form. + """ + return [arg.secret if isinstance(arg, HiddenText) else arg for arg in args] + + +def call_subprocess( + cmd: list[str] | CommandArgs, + show_stdout: bool = False, + cwd: str | None = None, + on_returncode: Literal["raise", "warn", "ignore"] = "raise", + extra_ok_returncodes: Iterable[int] | None = None, + extra_environ: Mapping[str, Any] | None = None, + unset_environ: Iterable[str] | None = None, + spinner: SpinnerInterface | None = None, + log_failed_cmd: bool | None = True, + stdout_only: bool | None = False, + *, + command_desc: str, +) -> str: + """ + Args: + show_stdout: if true, use INFO to log the subprocess's stderr and + stdout streams. Otherwise, use DEBUG. Defaults to False. + extra_ok_returncodes: an iterable of integer return codes that are + acceptable, in addition to 0. Defaults to None, which means []. + unset_environ: an iterable of environment variable names to unset + prior to calling subprocess.Popen(). + log_failed_cmd: if false, failed commands are not logged, only raised. + stdout_only: if true, return only stdout, else return both. When true, + logging of both stdout and stderr occurs when the subprocess has + terminated, else logging occurs as subprocess output is produced. + """ + if extra_ok_returncodes is None: + extra_ok_returncodes = [] + if unset_environ is None: + unset_environ = [] + # Most places in pip use show_stdout=False. What this means is-- + # + # - We connect the child's output (combined stderr and stdout) to a + # single pipe, which we read. + # - We log this output to stderr at DEBUG level as it is received. + # - If DEBUG logging isn't enabled (e.g. if --verbose logging wasn't + # requested), then we show a spinner so the user can still see the + # subprocess is in progress. + # - If the subprocess exits with an error, we log the output to stderr + # at ERROR level if it hasn't already been displayed to the console + # (e.g. if --verbose logging wasn't enabled). This way we don't log + # the output to the console twice. + # + # If show_stdout=True, then the above is still done, but with DEBUG + # replaced by INFO. + if show_stdout: + # Then log the subprocess output at INFO level. + log_subprocess: Callable[..., None] = subprocess_logger.info + used_level = logging.INFO + else: + # Then log the subprocess output using VERBOSE. This also ensures + # it will be logged to the log file (aka user_log), if enabled. + log_subprocess = subprocess_logger.verbose + used_level = VERBOSE + + # Whether the subprocess will be visible in the console. + showing_subprocess = subprocess_logger.getEffectiveLevel() <= used_level + + # Only use the spinner if we're not showing the subprocess output + # and we have a spinner. + use_spinner = not showing_subprocess and spinner is not None + + log_subprocess("Running command %s", command_desc) + env = os.environ.copy() + if extra_environ: + env.update(extra_environ) + for name in unset_environ: + env.pop(name, None) + try: + proc = subprocess.Popen( + # Convert HiddenText objects to the underlying str. + reveal_command_args(cmd), + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT if not stdout_only else subprocess.PIPE, + cwd=cwd, + env=env, + errors="backslashreplace", + ) + except Exception as exc: + if log_failed_cmd: + subprocess_logger.critical( + "Error %s while executing command %s", + exc, + command_desc, + ) + raise + all_output = [] + if not stdout_only: + assert proc.stdout + assert proc.stdin + proc.stdin.close() + # In this mode, stdout and stderr are in the same pipe. + while True: + line: str = proc.stdout.readline() + if not line: + break + line = line.rstrip() + all_output.append(line + "\n") + + # Show the line immediately. + log_subprocess(line) + # Update the spinner. + if use_spinner: + assert spinner + spinner.spin() + try: + proc.wait() + finally: + if proc.stdout: + proc.stdout.close() + output = "".join(all_output) + else: + # In this mode, stdout and stderr are in different pipes. + # We must use communicate() which is the only safe way to read both. + out, err = proc.communicate() + # log line by line to preserve pip log indenting + for out_line in out.splitlines(): + log_subprocess(out_line) + all_output.append(out) + for err_line in err.splitlines(): + log_subprocess(err_line) + all_output.append(err) + output = out + + proc_had_error = proc.returncode and proc.returncode not in extra_ok_returncodes + if use_spinner: + assert spinner + if proc_had_error: + spinner.finish("error") + else: + spinner.finish("done") + if proc_had_error: + if on_returncode == "raise": + error = InstallationSubprocessError( + command_description=command_desc, + exit_code=proc.returncode, + output_lines=all_output if not showing_subprocess else None, + ) + if log_failed_cmd: + subprocess_logger.error("%s", error, extra={"rich": True}) + subprocess_logger.verbose( + "[bold magenta]full command[/]: [blue]%s[/]", + escape(format_command_args(cmd)), + extra={"markup": True}, + ) + subprocess_logger.verbose( + "[bold magenta]cwd[/]: %s", + escape(cwd or "[inherit]"), + extra={"markup": True}, + ) + + raise error + elif on_returncode == "warn": + subprocess_logger.warning( + 'Command "%s" had error code %s in %s', + command_desc, + proc.returncode, + cwd, + ) + elif on_returncode == "ignore": + pass + else: + raise ValueError(f"Invalid value: on_returncode={on_returncode!r}") + return output + + +def runner_with_spinner_message(message: str) -> Callable[..., None]: + """Provide a subprocess_runner that shows a spinner message. + + Intended for use with for BuildBackendHookCaller. Thus, the runner has + an API that matches what's expected by BuildBackendHookCaller.subprocess_runner. + """ + + def runner( + cmd: list[str], + cwd: str | None = None, + extra_environ: Mapping[str, Any] | None = None, + ) -> None: + with open_spinner(message) as spinner: + call_subprocess( + cmd, + command_desc=message, + cwd=cwd, + extra_environ=extra_environ, + spinner=spinner, + ) + + return runner diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/utils/temp_dir.py b/.venv/lib/python3.12/site-packages/pip/_internal/utils/temp_dir.py new file mode 100644 index 0000000..a9afa76 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/utils/temp_dir.py @@ -0,0 +1,294 @@ +from __future__ import annotations + +import errno +import itertools +import logging +import os.path +import tempfile +import traceback +from collections.abc import Generator +from contextlib import ExitStack, contextmanager +from pathlib import Path +from typing import ( + Any, + Callable, + TypeVar, +) + +from pip._internal.utils.misc import enum, rmtree + +logger = logging.getLogger(__name__) + +_T = TypeVar("_T", bound="TempDirectory") + + +# Kinds of temporary directories. Only needed for ones that are +# globally-managed. +tempdir_kinds = enum( + BUILD_ENV="build-env", + EPHEM_WHEEL_CACHE="ephem-wheel-cache", + REQ_BUILD="req-build", +) + + +_tempdir_manager: ExitStack | None = None + + +@contextmanager +def global_tempdir_manager() -> Generator[None, None, None]: + global _tempdir_manager + with ExitStack() as stack: + old_tempdir_manager, _tempdir_manager = _tempdir_manager, stack + try: + yield + finally: + _tempdir_manager = old_tempdir_manager + + +class TempDirectoryTypeRegistry: + """Manages temp directory behavior""" + + def __init__(self) -> None: + self._should_delete: dict[str, bool] = {} + + def set_delete(self, kind: str, value: bool) -> None: + """Indicate whether a TempDirectory of the given kind should be + auto-deleted. + """ + self._should_delete[kind] = value + + def get_delete(self, kind: str) -> bool: + """Get configured auto-delete flag for a given TempDirectory type, + default True. + """ + return self._should_delete.get(kind, True) + + +_tempdir_registry: TempDirectoryTypeRegistry | None = None + + +@contextmanager +def tempdir_registry() -> Generator[TempDirectoryTypeRegistry, None, None]: + """Provides a scoped global tempdir registry that can be used to dictate + whether directories should be deleted. + """ + global _tempdir_registry + old_tempdir_registry = _tempdir_registry + _tempdir_registry = TempDirectoryTypeRegistry() + try: + yield _tempdir_registry + finally: + _tempdir_registry = old_tempdir_registry + + +class _Default: + pass + + +_default = _Default() + + +class TempDirectory: + """Helper class that owns and cleans up a temporary directory. + + This class can be used as a context manager or as an OO representation of a + temporary directory. + + Attributes: + path + Location to the created temporary directory + delete + Whether the directory should be deleted when exiting + (when used as a contextmanager) + + Methods: + cleanup() + Deletes the temporary directory + + When used as a context manager, if the delete attribute is True, on + exiting the context the temporary directory is deleted. + """ + + def __init__( + self, + path: str | None = None, + delete: bool | None | _Default = _default, + kind: str = "temp", + globally_managed: bool = False, + ignore_cleanup_errors: bool = True, + ): + super().__init__() + + if delete is _default: + if path is not None: + # If we were given an explicit directory, resolve delete option + # now. + delete = False + else: + # Otherwise, we wait until cleanup and see what + # tempdir_registry says. + delete = None + + # The only time we specify path is in for editables where it + # is the value of the --src option. + if path is None: + path = self._create(kind) + + self._path = path + self._deleted = False + self.delete = delete + self.kind = kind + self.ignore_cleanup_errors = ignore_cleanup_errors + + if globally_managed: + assert _tempdir_manager is not None + _tempdir_manager.enter_context(self) + + @property + def path(self) -> str: + assert not self._deleted, f"Attempted to access deleted path: {self._path}" + return self._path + + def __repr__(self) -> str: + return f"<{self.__class__.__name__} {self.path!r}>" + + def __enter__(self: _T) -> _T: + return self + + def __exit__(self, exc: Any, value: Any, tb: Any) -> None: + if self.delete is not None: + delete = self.delete + elif _tempdir_registry: + delete = _tempdir_registry.get_delete(self.kind) + else: + delete = True + + if delete: + self.cleanup() + + def _create(self, kind: str) -> str: + """Create a temporary directory and store its path in self.path""" + # We realpath here because some systems have their default tmpdir + # symlinked to another directory. This tends to confuse build + # scripts, so we canonicalize the path by traversing potential + # symlinks here. + path = os.path.realpath(tempfile.mkdtemp(prefix=f"pip-{kind}-")) + logger.debug("Created temporary directory: %s", path) + return path + + def cleanup(self) -> None: + """Remove the temporary directory created and reset state""" + self._deleted = True + if not os.path.exists(self._path): + return + + errors: list[BaseException] = [] + + def onerror( + func: Callable[..., Any], + path: Path, + exc_val: BaseException, + ) -> None: + """Log a warning for a `rmtree` error and continue""" + formatted_exc = "\n".join( + traceback.format_exception_only(type(exc_val), exc_val) + ) + formatted_exc = formatted_exc.rstrip() # remove trailing new line + if func in (os.unlink, os.remove, os.rmdir): + logger.debug( + "Failed to remove a temporary file '%s' due to %s.\n", + path, + formatted_exc, + ) + else: + logger.debug("%s failed with %s.", func.__qualname__, formatted_exc) + errors.append(exc_val) + + if self.ignore_cleanup_errors: + try: + # first try with @retry; retrying to handle ephemeral errors + rmtree(self._path, ignore_errors=False) + except OSError: + # last pass ignore/log all errors + rmtree(self._path, onexc=onerror) + if errors: + logger.warning( + "Failed to remove contents in a temporary directory '%s'.\n" + "You can safely remove it manually.", + self._path, + ) + else: + rmtree(self._path) + + +class AdjacentTempDirectory(TempDirectory): + """Helper class that creates a temporary directory adjacent to a real one. + + Attributes: + original + The original directory to create a temp directory for. + path + After calling create() or entering, contains the full + path to the temporary directory. + delete + Whether the directory should be deleted when exiting + (when used as a contextmanager) + + """ + + # The characters that may be used to name the temp directory + # We always prepend a ~ and then rotate through these until + # a usable name is found. + # pkg_resources raises a different error for .dist-info folder + # with leading '-' and invalid metadata + LEADING_CHARS = "-~.=%0123456789" + + def __init__(self, original: str, delete: bool | None = None) -> None: + self.original = original.rstrip("/\\") + super().__init__(delete=delete) + + @classmethod + def _generate_names(cls, name: str) -> Generator[str, None, None]: + """Generates a series of temporary names. + + The algorithm replaces the leading characters in the name + with ones that are valid filesystem characters, but are not + valid package names (for both Python and pip definitions of + package). + """ + for i in range(1, len(name)): + for candidate in itertools.combinations_with_replacement( + cls.LEADING_CHARS, i - 1 + ): + new_name = "~" + "".join(candidate) + name[i:] + if new_name != name: + yield new_name + + # If we make it this far, we will have to make a longer name + for i in range(len(cls.LEADING_CHARS)): + for candidate in itertools.combinations_with_replacement( + cls.LEADING_CHARS, i + ): + new_name = "~" + "".join(candidate) + name + if new_name != name: + yield new_name + + def _create(self, kind: str) -> str: + root, name = os.path.split(self.original) + for candidate in self._generate_names(name): + path = os.path.join(root, candidate) + try: + os.mkdir(path) + except OSError as ex: + # Continue if the name exists already + if ex.errno != errno.EEXIST: + raise + else: + path = os.path.realpath(path) + break + else: + # Final fallback on the default behavior. + path = os.path.realpath(tempfile.mkdtemp(prefix=f"pip-{kind}-")) + + logger.debug("Created temporary directory: %s", path) + return path diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/utils/unpacking.py b/.venv/lib/python3.12/site-packages/pip/_internal/utils/unpacking.py new file mode 100644 index 0000000..0ad3129 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/utils/unpacking.py @@ -0,0 +1,337 @@ +"""Utilities related archives.""" + +from __future__ import annotations + +import logging +import os +import shutil +import stat +import sys +import tarfile +import zipfile +from collections.abc import Iterable +from zipfile import ZipInfo + +from pip._internal.exceptions import InstallationError +from pip._internal.utils.filetypes import ( + BZ2_EXTENSIONS, + TAR_EXTENSIONS, + XZ_EXTENSIONS, + ZIP_EXTENSIONS, +) +from pip._internal.utils.misc import ensure_dir + +logger = logging.getLogger(__name__) + + +SUPPORTED_EXTENSIONS = ZIP_EXTENSIONS + TAR_EXTENSIONS + +try: + import bz2 # noqa + + SUPPORTED_EXTENSIONS += BZ2_EXTENSIONS +except ImportError: + logger.debug("bz2 module is not available") + +try: + # Only for Python 3.3+ + import lzma # noqa + + SUPPORTED_EXTENSIONS += XZ_EXTENSIONS +except ImportError: + logger.debug("lzma module is not available") + + +def current_umask() -> int: + """Get the current umask which involves having to set it temporarily.""" + mask = os.umask(0) + os.umask(mask) + return mask + + +def split_leading_dir(path: str) -> list[str]: + path = path.lstrip("/").lstrip("\\") + if "/" in path and ( + ("\\" in path and path.find("/") < path.find("\\")) or "\\" not in path + ): + return path.split("/", 1) + elif "\\" in path: + return path.split("\\", 1) + else: + return [path, ""] + + +def has_leading_dir(paths: Iterable[str]) -> bool: + """Returns true if all the paths have the same leading path name + (i.e., everything is in one subdirectory in an archive)""" + common_prefix = None + for path in paths: + prefix, rest = split_leading_dir(path) + if not prefix: + return False + elif common_prefix is None: + common_prefix = prefix + elif prefix != common_prefix: + return False + return True + + +def is_within_directory(directory: str, target: str) -> bool: + """ + Return true if the absolute path of target is within the directory + """ + abs_directory = os.path.abspath(directory) + abs_target = os.path.abspath(target) + + prefix = os.path.commonprefix([abs_directory, abs_target]) + return prefix == abs_directory + + +def _get_default_mode_plus_executable() -> int: + return 0o777 & ~current_umask() | 0o111 + + +def set_extracted_file_to_default_mode_plus_executable(path: str) -> None: + """ + Make file present at path have execute for user/group/world + (chmod +x) is no-op on windows per python docs + """ + os.chmod(path, _get_default_mode_plus_executable()) + + +def zip_item_is_executable(info: ZipInfo) -> bool: + mode = info.external_attr >> 16 + # if mode and regular file and any execute permissions for + # user/group/world? + return bool(mode and stat.S_ISREG(mode) and mode & 0o111) + + +def unzip_file(filename: str, location: str, flatten: bool = True) -> None: + """ + Unzip the file (with path `filename`) to the destination `location`. All + files are written based on system defaults and umask (i.e. permissions are + not preserved), except that regular file members with any execute + permissions (user, group, or world) have "chmod +x" applied after being + written. Note that for windows, any execute changes using os.chmod are + no-ops per the python docs. + """ + ensure_dir(location) + zipfp = open(filename, "rb") + try: + zip = zipfile.ZipFile(zipfp, allowZip64=True) + leading = has_leading_dir(zip.namelist()) and flatten + for info in zip.infolist(): + name = info.filename + fn = name + if leading: + fn = split_leading_dir(name)[1] + fn = os.path.join(location, fn) + dir = os.path.dirname(fn) + if not is_within_directory(location, fn): + message = ( + "The zip file ({}) has a file ({}) trying to install " + "outside target directory ({})" + ) + raise InstallationError(message.format(filename, fn, location)) + if fn.endswith(("/", "\\")): + # A directory + ensure_dir(fn) + else: + ensure_dir(dir) + # Don't use read() to avoid allocating an arbitrarily large + # chunk of memory for the file's content + fp = zip.open(name) + try: + with open(fn, "wb") as destfp: + shutil.copyfileobj(fp, destfp) + finally: + fp.close() + if zip_item_is_executable(info): + set_extracted_file_to_default_mode_plus_executable(fn) + finally: + zipfp.close() + + +def untar_file(filename: str, location: str) -> None: + """ + Untar the file (with path `filename`) to the destination `location`. + All files are written based on system defaults and umask (i.e. permissions + are not preserved), except that regular file members with any execute + permissions (user, group, or world) have "chmod +x" applied on top of the + default. Note that for windows, any execute changes using os.chmod are + no-ops per the python docs. + """ + ensure_dir(location) + if filename.lower().endswith(".gz") or filename.lower().endswith(".tgz"): + mode = "r:gz" + elif filename.lower().endswith(BZ2_EXTENSIONS): + mode = "r:bz2" + elif filename.lower().endswith(XZ_EXTENSIONS): + mode = "r:xz" + elif filename.lower().endswith(".tar"): + mode = "r" + else: + logger.warning( + "Cannot determine compression type for file %s", + filename, + ) + mode = "r:*" + + tar = tarfile.open(filename, mode, encoding="utf-8") # type: ignore + try: + leading = has_leading_dir([member.name for member in tar.getmembers()]) + + # PEP 706 added `tarfile.data_filter`, and made some other changes to + # Python's tarfile module (see below). The features were backported to + # security releases. + try: + data_filter = tarfile.data_filter + except AttributeError: + _untar_without_filter(filename, location, tar, leading) + else: + default_mode_plus_executable = _get_default_mode_plus_executable() + + if leading: + # Strip the leading directory from all files in the archive, + # including hardlink targets (which are relative to the + # unpack location). + for member in tar.getmembers(): + name_lead, name_rest = split_leading_dir(member.name) + member.name = name_rest + if member.islnk(): + lnk_lead, lnk_rest = split_leading_dir(member.linkname) + if lnk_lead == name_lead: + member.linkname = lnk_rest + + def pip_filter(member: tarfile.TarInfo, path: str) -> tarfile.TarInfo: + orig_mode = member.mode + try: + try: + member = data_filter(member, location) + except tarfile.LinkOutsideDestinationError: + if sys.version_info[:3] in { + (3, 9, 17), + (3, 10, 12), + (3, 11, 4), + }: + # The tarfile filter in specific Python versions + # raises LinkOutsideDestinationError on valid input + # (https://github.com/python/cpython/issues/107845) + # Ignore the error there, but do use the + # more lax `tar_filter` + member = tarfile.tar_filter(member, location) + else: + raise + except tarfile.TarError as exc: + message = "Invalid member in the tar file {}: {}" + # Filter error messages mention the member name. + # No need to add it here. + raise InstallationError( + message.format( + filename, + exc, + ) + ) + if member.isfile() and orig_mode & 0o111: + member.mode = default_mode_plus_executable + else: + # See PEP 706 note above. + # The PEP changed this from `int` to `Optional[int]`, + # where None means "use the default". Mypy doesn't + # know this yet. + member.mode = None # type: ignore [assignment] + return member + + tar.extractall(location, filter=pip_filter) + + finally: + tar.close() + + +def _untar_without_filter( + filename: str, + location: str, + tar: tarfile.TarFile, + leading: bool, +) -> None: + """Fallback for Python without tarfile.data_filter""" + for member in tar.getmembers(): + fn = member.name + if leading: + fn = split_leading_dir(fn)[1] + path = os.path.join(location, fn) + if not is_within_directory(location, path): + message = ( + "The tar file ({}) has a file ({}) trying to install " + "outside target directory ({})" + ) + raise InstallationError(message.format(filename, path, location)) + if member.isdir(): + ensure_dir(path) + elif member.issym(): + try: + tar._extract_member(member, path) + except Exception as exc: + # Some corrupt tar files seem to produce this + # (specifically bad symlinks) + logger.warning( + "In the tar file %s the member %s is invalid: %s", + filename, + member.name, + exc, + ) + continue + else: + try: + fp = tar.extractfile(member) + except (KeyError, AttributeError) as exc: + # Some corrupt tar files seem to produce this + # (specifically bad symlinks) + logger.warning( + "In the tar file %s the member %s is invalid: %s", + filename, + member.name, + exc, + ) + continue + ensure_dir(os.path.dirname(path)) + assert fp is not None + with open(path, "wb") as destfp: + shutil.copyfileobj(fp, destfp) + fp.close() + # Update the timestamp (useful for cython compiled files) + tar.utime(member, path) + # member have any execute permissions for user/group/world? + if member.mode & 0o111: + set_extracted_file_to_default_mode_plus_executable(path) + + +def unpack_file( + filename: str, + location: str, + content_type: str | None = None, +) -> None: + filename = os.path.realpath(filename) + if ( + content_type == "application/zip" + or filename.lower().endswith(ZIP_EXTENSIONS) + or zipfile.is_zipfile(filename) + ): + unzip_file(filename, location, flatten=not filename.endswith(".whl")) + elif ( + content_type == "application/x-gzip" + or tarfile.is_tarfile(filename) + or filename.lower().endswith(TAR_EXTENSIONS + BZ2_EXTENSIONS + XZ_EXTENSIONS) + ): + untar_file(filename, location) + else: + # FIXME: handle? + # FIXME: magic signatures? + logger.critical( + "Cannot unpack file %s (downloaded from %s, content-type: %s); " + "cannot detect archive format", + filename, + location, + content_type, + ) + raise InstallationError(f"Cannot determine archive format of {location}") diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/utils/urls.py b/.venv/lib/python3.12/site-packages/pip/_internal/utils/urls.py new file mode 100644 index 0000000..e951a5e --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/utils/urls.py @@ -0,0 +1,55 @@ +import os +import string +import urllib.parse +import urllib.request + +from .compat import WINDOWS + + +def path_to_url(path: str) -> str: + """ + Convert a path to a file: URL. The path will be made absolute and have + quoted path parts. + """ + path = os.path.normpath(os.path.abspath(path)) + url = urllib.parse.urljoin("file://", urllib.request.pathname2url(path)) + return url + + +def url_to_path(url: str) -> str: + """ + Convert a file: URL to a path. + """ + assert url.startswith( + "file:" + ), f"You can only turn file: urls into filenames (not {url!r})" + + _, netloc, path, _, _ = urllib.parse.urlsplit(url) + + if not netloc or netloc == "localhost": + # According to RFC 8089, same as empty authority. + netloc = "" + elif WINDOWS: + # If we have a UNC path, prepend UNC share notation. + netloc = "\\\\" + netloc + else: + raise ValueError( + f"non-local file URIs are not supported on this platform: {url!r}" + ) + + path = urllib.request.url2pathname(netloc + path) + + # On Windows, urlsplit parses the path as something like "/C:/Users/foo". + # This creates issues for path-related functions like io.open(), so we try + # to detect and strip the leading slash. + if ( + WINDOWS + and not netloc # Not UNC. + and len(path) >= 3 + and path[0] == "/" # Leading slash to strip. + and path[1] in string.ascii_letters # Drive letter. + and path[2:4] in (":", ":/") # Colon + end of string, or colon + absolute path. + ): + path = path[1:] + + return path diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/utils/virtualenv.py b/.venv/lib/python3.12/site-packages/pip/_internal/utils/virtualenv.py new file mode 100644 index 0000000..b1742a3 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/utils/virtualenv.py @@ -0,0 +1,105 @@ +from __future__ import annotations + +import logging +import os +import re +import site +import sys + +logger = logging.getLogger(__name__) +_INCLUDE_SYSTEM_SITE_PACKAGES_REGEX = re.compile( + r"include-system-site-packages\s*=\s*(?Ptrue|false)" +) + + +def _running_under_venv() -> bool: + """Checks if sys.base_prefix and sys.prefix match. + + This handles PEP 405 compliant virtual environments. + """ + return sys.prefix != getattr(sys, "base_prefix", sys.prefix) + + +def _running_under_legacy_virtualenv() -> bool: + """Checks if sys.real_prefix is set. + + This handles virtual environments created with pypa's virtualenv. + """ + # pypa/virtualenv case + return hasattr(sys, "real_prefix") + + +def running_under_virtualenv() -> bool: + """True if we're running inside a virtual environment, False otherwise.""" + return _running_under_venv() or _running_under_legacy_virtualenv() + + +def _get_pyvenv_cfg_lines() -> list[str] | None: + """Reads {sys.prefix}/pyvenv.cfg and returns its contents as list of lines + + Returns None, if it could not read/access the file. + """ + pyvenv_cfg_file = os.path.join(sys.prefix, "pyvenv.cfg") + try: + # Although PEP 405 does not specify, the built-in venv module always + # writes with UTF-8. (pypa/pip#8717) + with open(pyvenv_cfg_file, encoding="utf-8") as f: + return f.read().splitlines() # avoids trailing newlines + except OSError: + return None + + +def _no_global_under_venv() -> bool: + """Check `{sys.prefix}/pyvenv.cfg` for system site-packages inclusion + + PEP 405 specifies that when system site-packages are not supposed to be + visible from a virtual environment, `pyvenv.cfg` must contain the following + line: + + include-system-site-packages = false + + Additionally, log a warning if accessing the file fails. + """ + cfg_lines = _get_pyvenv_cfg_lines() + if cfg_lines is None: + # We're not in a "sane" venv, so assume there is no system + # site-packages access (since that's PEP 405's default state). + logger.warning( + "Could not access 'pyvenv.cfg' despite a virtual environment " + "being active. Assuming global site-packages is not accessible " + "in this environment." + ) + return True + + for line in cfg_lines: + match = _INCLUDE_SYSTEM_SITE_PACKAGES_REGEX.match(line) + if match is not None and match.group("value") == "false": + return True + return False + + +def _no_global_under_legacy_virtualenv() -> bool: + """Check if "no-global-site-packages.txt" exists beside site.py + + This mirrors logic in pypa/virtualenv for determining whether system + site-packages are visible in the virtual environment. + """ + site_mod_dir = os.path.dirname(os.path.abspath(site.__file__)) + no_global_site_packages_file = os.path.join( + site_mod_dir, + "no-global-site-packages.txt", + ) + return os.path.exists(no_global_site_packages_file) + + +def virtualenv_no_global() -> bool: + """Returns a boolean, whether running in venv with no system site-packages.""" + # PEP 405 compliance needs to be checked first since virtualenv >=20 would + # return True for both checks, but is only able to use the PEP 405 config. + if _running_under_venv(): + return _no_global_under_venv() + + if _running_under_legacy_virtualenv(): + return _no_global_under_legacy_virtualenv() + + return False diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/utils/wheel.py b/.venv/lib/python3.12/site-packages/pip/_internal/utils/wheel.py new file mode 100644 index 0000000..789e736 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/utils/wheel.py @@ -0,0 +1,132 @@ +"""Support functions for working with wheel files.""" + +import logging +from email.message import Message +from email.parser import Parser +from zipfile import BadZipFile, ZipFile + +from pip._vendor.packaging.utils import canonicalize_name + +from pip._internal.exceptions import UnsupportedWheel + +VERSION_COMPATIBLE = (1, 0) + + +logger = logging.getLogger(__name__) + + +def parse_wheel(wheel_zip: ZipFile, name: str) -> tuple[str, Message]: + """Extract information from the provided wheel, ensuring it meets basic + standards. + + Returns the name of the .dist-info directory and the parsed WHEEL metadata. + """ + try: + info_dir = wheel_dist_info_dir(wheel_zip, name) + metadata = wheel_metadata(wheel_zip, info_dir) + version = wheel_version(metadata) + except UnsupportedWheel as e: + raise UnsupportedWheel(f"{name} has an invalid wheel, {e}") + + check_compatibility(version, name) + + return info_dir, metadata + + +def wheel_dist_info_dir(source: ZipFile, name: str) -> str: + """Returns the name of the contained .dist-info directory. + + Raises AssertionError or UnsupportedWheel if not found, >1 found, or + it doesn't match the provided name. + """ + # Zip file path separators must be / + subdirs = {p.split("/", 1)[0] for p in source.namelist()} + + info_dirs = [s for s in subdirs if s.endswith(".dist-info")] + + if not info_dirs: + raise UnsupportedWheel(".dist-info directory not found") + + if len(info_dirs) > 1: + raise UnsupportedWheel( + "multiple .dist-info directories found: {}".format(", ".join(info_dirs)) + ) + + info_dir = info_dirs[0] + + info_dir_name = canonicalize_name(info_dir) + canonical_name = canonicalize_name(name) + if not info_dir_name.startswith(canonical_name): + raise UnsupportedWheel( + f".dist-info directory {info_dir!r} does not start with {canonical_name!r}" + ) + + return info_dir + + +def read_wheel_metadata_file(source: ZipFile, path: str) -> bytes: + try: + return source.read(path) + # BadZipFile for general corruption, KeyError for missing entry, + # and RuntimeError for password-protected files + except (BadZipFile, KeyError, RuntimeError) as e: + raise UnsupportedWheel(f"could not read {path!r} file: {e!r}") + + +def wheel_metadata(source: ZipFile, dist_info_dir: str) -> Message: + """Return the WHEEL metadata of an extracted wheel, if possible. + Otherwise, raise UnsupportedWheel. + """ + path = f"{dist_info_dir}/WHEEL" + # Zip file path separators must be / + wheel_contents = read_wheel_metadata_file(source, path) + + try: + wheel_text = wheel_contents.decode() + except UnicodeDecodeError as e: + raise UnsupportedWheel(f"error decoding {path!r}: {e!r}") + + # FeedParser (used by Parser) does not raise any exceptions. The returned + # message may have .defects populated, but for backwards-compatibility we + # currently ignore them. + return Parser().parsestr(wheel_text) + + +def wheel_version(wheel_data: Message) -> tuple[int, ...]: + """Given WHEEL metadata, return the parsed Wheel-Version. + Otherwise, raise UnsupportedWheel. + """ + version_text = wheel_data["Wheel-Version"] + if version_text is None: + raise UnsupportedWheel("WHEEL is missing Wheel-Version") + + version = version_text.strip() + + try: + return tuple(map(int, version.split("."))) + except ValueError: + raise UnsupportedWheel(f"invalid Wheel-Version: {version!r}") + + +def check_compatibility(version: tuple[int, ...], name: str) -> None: + """Raises errors or warns if called with an incompatible Wheel-Version. + + pip should refuse to install a Wheel-Version that's a major series + ahead of what it's compatible with (e.g 2.0 > 1.1); and warn when + installing a version only minor version ahead (e.g 1.2 > 1.1). + + version: a 2-tuple representing a Wheel-Version (Major, Minor) + name: name of wheel or package to raise exception about + + :raises UnsupportedWheel: when an incompatible Wheel-Version is given + """ + if version[0] > VERSION_COMPATIBLE[0]: + raise UnsupportedWheel( + "{}'s Wheel-Version ({}) is not compatible with this version " + "of pip".format(name, ".".join(map(str, version))) + ) + elif version > VERSION_COMPATIBLE: + logger.warning( + "Installing from a newer Wheel-Version (%s)", + ".".join(map(str, version)), + ) diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/vcs/__init__.py b/.venv/lib/python3.12/site-packages/pip/_internal/vcs/__init__.py new file mode 100644 index 0000000..b6beddb --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/vcs/__init__.py @@ -0,0 +1,15 @@ +# Expose a limited set of classes and functions so callers outside of +# the vcs package don't need to import deeper than `pip._internal.vcs`. +# (The test directory may still need to import from a vcs sub-package.) +# Import all vcs modules to register each VCS in the VcsSupport object. +import pip._internal.vcs.bazaar +import pip._internal.vcs.git +import pip._internal.vcs.mercurial +import pip._internal.vcs.subversion # noqa: F401 +from pip._internal.vcs.versioncontrol import ( # noqa: F401 + RemoteNotFoundError, + RemoteNotValidError, + is_url, + make_vcs_requirement_url, + vcs, +) diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/vcs/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/vcs/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..cc6ef28 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/vcs/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/vcs/__pycache__/bazaar.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/vcs/__pycache__/bazaar.cpython-312.pyc new file mode 100644 index 0000000..257c705 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/vcs/__pycache__/bazaar.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/vcs/__pycache__/git.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/vcs/__pycache__/git.cpython-312.pyc new file mode 100644 index 0000000..fd34023 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/vcs/__pycache__/git.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/vcs/__pycache__/mercurial.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/vcs/__pycache__/mercurial.cpython-312.pyc new file mode 100644 index 0000000..64fe676 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/vcs/__pycache__/mercurial.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/vcs/__pycache__/subversion.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/vcs/__pycache__/subversion.cpython-312.pyc new file mode 100644 index 0000000..c6b939c Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/vcs/__pycache__/subversion.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/vcs/__pycache__/versioncontrol.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_internal/vcs/__pycache__/versioncontrol.cpython-312.pyc new file mode 100644 index 0000000..264b403 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_internal/vcs/__pycache__/versioncontrol.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/vcs/bazaar.py b/.venv/lib/python3.12/site-packages/pip/_internal/vcs/bazaar.py new file mode 100644 index 0000000..3a8a21e --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/vcs/bazaar.py @@ -0,0 +1,130 @@ +from __future__ import annotations + +import logging + +from pip._internal.utils.misc import HiddenText, display_path +from pip._internal.utils.subprocess import make_command +from pip._internal.utils.urls import path_to_url +from pip._internal.vcs.versioncontrol import ( + AuthInfo, + RemoteNotFoundError, + RevOptions, + VersionControl, + vcs, +) + +logger = logging.getLogger(__name__) + + +class Bazaar(VersionControl): + name = "bzr" + dirname = ".bzr" + repo_name = "branch" + schemes = ( + "bzr+http", + "bzr+https", + "bzr+ssh", + "bzr+sftp", + "bzr+ftp", + "bzr+lp", + "bzr+file", + ) + + @staticmethod + def get_base_rev_args(rev: str) -> list[str]: + return ["-r", rev] + + def fetch_new( + self, dest: str, url: HiddenText, rev_options: RevOptions, verbosity: int + ) -> None: + rev_display = rev_options.to_display() + logger.info( + "Checking out %s%s to %s", + url, + rev_display, + display_path(dest), + ) + if verbosity <= 0: + flags = ["--quiet"] + elif verbosity == 1: + flags = [] + else: + flags = [f"-{'v'*verbosity}"] + cmd_args = make_command( + "checkout", "--lightweight", *flags, rev_options.to_args(), url, dest + ) + self.run_command(cmd_args) + + def switch( + self, + dest: str, + url: HiddenText, + rev_options: RevOptions, + verbosity: int = 0, + ) -> None: + self.run_command(make_command("switch", url), cwd=dest) + + def update( + self, + dest: str, + url: HiddenText, + rev_options: RevOptions, + verbosity: int = 0, + ) -> None: + flags = [] + + if verbosity <= 0: + flags.append("-q") + + output = self.run_command( + make_command("info"), show_stdout=False, stdout_only=True, cwd=dest + ) + if output.startswith("Standalone "): + # Older versions of pip used to create standalone branches. + # Convert the standalone branch to a checkout by calling "bzr bind". + cmd_args = make_command("bind", *flags, url) + self.run_command(cmd_args, cwd=dest) + + cmd_args = make_command("update", *flags, rev_options.to_args()) + self.run_command(cmd_args, cwd=dest) + + @classmethod + def get_url_rev_and_auth(cls, url: str) -> tuple[str, str | None, AuthInfo]: + # hotfix the URL scheme after removing bzr+ from bzr+ssh:// re-add it + url, rev, user_pass = super().get_url_rev_and_auth(url) + if url.startswith("ssh://"): + url = "bzr+" + url + return url, rev, user_pass + + @classmethod + def get_remote_url(cls, location: str) -> str: + urls = cls.run_command( + ["info"], show_stdout=False, stdout_only=True, cwd=location + ) + for line in urls.splitlines(): + line = line.strip() + for x in ("checkout of branch: ", "parent branch: "): + if line.startswith(x): + repo = line.split(x)[1] + if cls._is_local_repository(repo): + return path_to_url(repo) + return repo + raise RemoteNotFoundError + + @classmethod + def get_revision(cls, location: str) -> str: + revision = cls.run_command( + ["revno"], + show_stdout=False, + stdout_only=True, + cwd=location, + ) + return revision.splitlines()[-1] + + @classmethod + def is_commit_id_equal(cls, dest: str, name: str | None) -> bool: + """Always assume the versions don't match""" + return False + + +vcs.register(Bazaar) diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/vcs/git.py b/.venv/lib/python3.12/site-packages/pip/_internal/vcs/git.py new file mode 100644 index 0000000..1769da7 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/vcs/git.py @@ -0,0 +1,571 @@ +from __future__ import annotations + +import logging +import os.path +import pathlib +import re +import urllib.parse +import urllib.request +from dataclasses import replace +from typing import Any + +from pip._internal.exceptions import BadCommand, InstallationError +from pip._internal.utils.misc import HiddenText, display_path, hide_url +from pip._internal.utils.subprocess import make_command +from pip._internal.vcs.versioncontrol import ( + AuthInfo, + RemoteNotFoundError, + RemoteNotValidError, + RevOptions, + VersionControl, + find_path_to_project_root_from_repo_root, + vcs, +) + +urlsplit = urllib.parse.urlsplit +urlunsplit = urllib.parse.urlunsplit + + +logger = logging.getLogger(__name__) + + +GIT_VERSION_REGEX = re.compile( + r"^git version " # Prefix. + r"(\d+)" # Major. + r"\.(\d+)" # Dot, minor. + r"(?:\.(\d+))?" # Optional dot, patch. + r".*$" # Suffix, including any pre- and post-release segments we don't care about. +) + +HASH_REGEX = re.compile("^[a-fA-F0-9]{40}$") + +# SCP (Secure copy protocol) shorthand. e.g. 'git@example.com:foo/bar.git' +SCP_REGEX = re.compile( + r"""^ + # Optional user, e.g. 'git@' + (\w+@)? + # Server, e.g. 'github.com'. + ([^/:]+): + # The server-side path. e.g. 'user/project.git'. Must start with an + # alphanumeric character so as not to be confusable with a Windows paths + # like 'C:/foo/bar' or 'C:\foo\bar'. + (\w[^:]*) + $""", + re.VERBOSE, +) + + +def looks_like_hash(sha: str) -> bool: + return bool(HASH_REGEX.match(sha)) + + +class Git(VersionControl): + name = "git" + dirname = ".git" + repo_name = "clone" + schemes = ( + "git+http", + "git+https", + "git+ssh", + "git+git", + "git+file", + ) + # Prevent the user's environment variables from interfering with pip: + # https://github.com/pypa/pip/issues/1130 + unset_environ = ("GIT_DIR", "GIT_WORK_TREE") + default_arg_rev = "HEAD" + + @staticmethod + def get_base_rev_args(rev: str) -> list[str]: + return [rev] + + @classmethod + def run_command(cls, *args: Any, **kwargs: Any) -> str: + if os.environ.get("PIP_NO_INPUT"): + extra_environ = kwargs.get("extra_environ", {}) + extra_environ["GIT_TERMINAL_PROMPT"] = "0" + extra_environ["GIT_SSH_COMMAND"] = "ssh -oBatchMode=yes" + kwargs["extra_environ"] = extra_environ + return super().run_command(*args, **kwargs) + + def is_immutable_rev_checkout(self, url: str, dest: str) -> bool: + _, rev_options = self.get_url_rev_options(hide_url(url)) + if not rev_options.rev: + return False + if not self.is_commit_id_equal(dest, rev_options.rev): + # the current commit is different from rev, + # which means rev was something else than a commit hash + return False + # return False in the rare case rev is both a commit hash + # and a tag or a branch; we don't want to cache in that case + # because that branch/tag could point to something else in the future + is_tag_or_branch = bool(self.get_revision_sha(dest, rev_options.rev)[0]) + return not is_tag_or_branch + + def get_git_version(self) -> tuple[int, ...]: + version = self.run_command( + ["version"], + command_desc="git version", + show_stdout=False, + stdout_only=True, + ) + match = GIT_VERSION_REGEX.match(version) + if not match: + logger.warning("Can't parse git version: %s", version) + return () + return (int(match.group(1)), int(match.group(2))) + + @classmethod + def get_current_branch(cls, location: str) -> str | None: + """ + Return the current branch, or None if HEAD isn't at a branch + (e.g. detached HEAD). + """ + # git-symbolic-ref exits with empty stdout if "HEAD" is a detached + # HEAD rather than a symbolic ref. In addition, the -q causes the + # command to exit with status code 1 instead of 128 in this case + # and to suppress the message to stderr. + args = ["symbolic-ref", "-q", "HEAD"] + output = cls.run_command( + args, + extra_ok_returncodes=(1,), + show_stdout=False, + stdout_only=True, + cwd=location, + ) + ref = output.strip() + + if ref.startswith("refs/heads/"): + return ref[len("refs/heads/") :] + + return None + + @classmethod + def get_revision_sha(cls, dest: str, rev: str) -> tuple[str | None, bool]: + """ + Return (sha_or_none, is_branch), where sha_or_none is a commit hash + if the revision names a remote branch or tag, otherwise None. + + Args: + dest: the repository directory. + rev: the revision name. + """ + # Pass rev to pre-filter the list. + output = cls.run_command( + ["show-ref", rev], + cwd=dest, + show_stdout=False, + stdout_only=True, + on_returncode="ignore", + ) + refs = {} + # NOTE: We do not use splitlines here since that would split on other + # unicode separators, which can be maliciously used to install a + # different revision. + for line in output.strip().split("\n"): + line = line.rstrip("\r") + if not line: + continue + try: + ref_sha, ref_name = line.split(" ", maxsplit=2) + except ValueError: + # Include the offending line to simplify troubleshooting if + # this error ever occurs. + raise ValueError(f"unexpected show-ref line: {line!r}") + + refs[ref_name] = ref_sha + + branch_ref = f"refs/remotes/origin/{rev}" + tag_ref = f"refs/tags/{rev}" + + sha = refs.get(branch_ref) + if sha is not None: + return (sha, True) + + sha = refs.get(tag_ref) + + return (sha, False) + + @classmethod + def _should_fetch(cls, dest: str, rev: str) -> bool: + """ + Return true if rev is a ref or is a commit that we don't have locally. + + Branches and tags are not considered in this method because they are + assumed to be always available locally (which is a normal outcome of + ``git clone`` and ``git fetch --tags``). + """ + if rev.startswith("refs/"): + # Always fetch remote refs. + return True + + if not looks_like_hash(rev): + # Git fetch would fail with abbreviated commits. + return False + + if cls.has_commit(dest, rev): + # Don't fetch if we have the commit locally. + return False + + return True + + @classmethod + def resolve_revision( + cls, dest: str, url: HiddenText, rev_options: RevOptions + ) -> RevOptions: + """ + Resolve a revision to a new RevOptions object with the SHA1 of the + branch, tag, or ref if found. + + Args: + rev_options: a RevOptions object. + """ + rev = rev_options.arg_rev + # The arg_rev property's implementation for Git ensures that the + # rev return value is always non-None. + assert rev is not None + + sha, is_branch = cls.get_revision_sha(dest, rev) + + if sha is not None: + rev_options = rev_options.make_new(sha) + rev_options = replace(rev_options, branch_name=(rev if is_branch else None)) + + return rev_options + + # Do not show a warning for the common case of something that has + # the form of a Git commit hash. + if not looks_like_hash(rev): + logger.info( + "Did not find branch or tag '%s', assuming revision or ref.", + rev, + ) + + if not cls._should_fetch(dest, rev): + return rev_options + + # fetch the requested revision + cls.run_command( + make_command("fetch", "-q", url, rev_options.to_args()), + cwd=dest, + ) + # Change the revision to the SHA of the ref we fetched + sha = cls.get_revision(dest, rev="FETCH_HEAD") + rev_options = rev_options.make_new(sha) + + return rev_options + + @classmethod + def is_commit_id_equal(cls, dest: str, name: str | None) -> bool: + """ + Return whether the current commit hash equals the given name. + + Args: + dest: the repository directory. + name: a string name. + """ + if not name: + # Then avoid an unnecessary subprocess call. + return False + + return cls.get_revision(dest) == name + + def fetch_new( + self, dest: str, url: HiddenText, rev_options: RevOptions, verbosity: int + ) -> None: + rev_display = rev_options.to_display() + logger.info("Cloning %s%s to %s", url, rev_display, display_path(dest)) + if verbosity <= 0: + flags: tuple[str, ...] = ("--quiet",) + elif verbosity == 1: + flags = () + else: + flags = ("--verbose", "--progress") + if self.get_git_version() >= (2, 17): + # Git added support for partial clone in 2.17 + # https://git-scm.com/docs/partial-clone + # Speeds up cloning by functioning without a complete copy of repository + self.run_command( + make_command( + "clone", + "--filter=blob:none", + *flags, + url, + dest, + ) + ) + else: + self.run_command(make_command("clone", *flags, url, dest)) + + if rev_options.rev: + # Then a specific revision was requested. + rev_options = self.resolve_revision(dest, url, rev_options) + branch_name = getattr(rev_options, "branch_name", None) + logger.debug("Rev options %s, branch_name %s", rev_options, branch_name) + if branch_name is None: + # Only do a checkout if the current commit id doesn't match + # the requested revision. + if not self.is_commit_id_equal(dest, rev_options.rev): + cmd_args = make_command( + "checkout", + "-q", + rev_options.to_args(), + ) + self.run_command(cmd_args, cwd=dest) + elif self.get_current_branch(dest) != branch_name: + # Then a specific branch was requested, and that branch + # is not yet checked out. + track_branch = f"origin/{branch_name}" + cmd_args = [ + "checkout", + "-b", + branch_name, + "--track", + track_branch, + ] + self.run_command(cmd_args, cwd=dest) + else: + sha = self.get_revision(dest) + rev_options = rev_options.make_new(sha) + + logger.info("Resolved %s to commit %s", url, rev_options.rev) + + #: repo may contain submodules + self.update_submodules(dest, verbosity=verbosity) + + def switch( + self, + dest: str, + url: HiddenText, + rev_options: RevOptions, + verbosity: int = 0, + ) -> None: + self.run_command( + make_command("config", "remote.origin.url", url), + cwd=dest, + ) + + extra_flags = [] + + if verbosity <= 0: + extra_flags.append("-q") + + cmd_args = make_command("checkout", *extra_flags, rev_options.to_args()) + self.run_command(cmd_args, cwd=dest) + + self.update_submodules(dest, verbosity=verbosity) + + def update( + self, + dest: str, + url: HiddenText, + rev_options: RevOptions, + verbosity: int = 0, + ) -> None: + extra_flags = [] + + if verbosity <= 0: + extra_flags.append("-q") + + # First fetch changes from the default remote + if self.get_git_version() >= (1, 9): + # fetch tags in addition to everything else + self.run_command(["fetch", "--tags", *extra_flags], cwd=dest) + else: + self.run_command(["fetch", *extra_flags], cwd=dest) + # Then reset to wanted revision (maybe even origin/master) + rev_options = self.resolve_revision(dest, url, rev_options) + cmd_args = make_command( + "reset", + "--hard", + *extra_flags, + rev_options.to_args(), + ) + self.run_command(cmd_args, cwd=dest) + #: update submodules + self.update_submodules(dest, verbosity=verbosity) + + @classmethod + def get_remote_url(cls, location: str) -> str: + """ + Return URL of the first remote encountered. + + Raises RemoteNotFoundError if the repository does not have a remote + url configured. + """ + # We need to pass 1 for extra_ok_returncodes since the command + # exits with return code 1 if there are no matching lines. + stdout = cls.run_command( + ["config", "--get-regexp", r"remote\..*\.url"], + extra_ok_returncodes=(1,), + show_stdout=False, + stdout_only=True, + cwd=location, + ) + remotes = stdout.splitlines() + try: + found_remote = remotes[0] + except IndexError: + raise RemoteNotFoundError + + for remote in remotes: + if remote.startswith("remote.origin.url "): + found_remote = remote + break + url = found_remote.split(" ")[1] + return cls._git_remote_to_pip_url(url.strip()) + + @staticmethod + def _git_remote_to_pip_url(url: str) -> str: + """ + Convert a remote url from what git uses to what pip accepts. + + There are 3 legal forms **url** may take: + + 1. A fully qualified url: ssh://git@example.com/foo/bar.git + 2. A local project.git folder: /path/to/bare/repository.git + 3. SCP shorthand for form 1: git@example.com:foo/bar.git + + Form 1 is output as-is. Form 2 must be converted to URI and form 3 must + be converted to form 1. + + See the corresponding test test_git_remote_url_to_pip() for examples of + sample inputs/outputs. + """ + if re.match(r"\w+://", url): + # This is already valid. Pass it though as-is. + return url + if os.path.exists(url): + # A local bare remote (git clone --mirror). + # Needs a file:// prefix. + return pathlib.PurePath(url).as_uri() + scp_match = SCP_REGEX.match(url) + if scp_match: + # Add an ssh:// prefix and replace the ':' with a '/'. + return scp_match.expand(r"ssh://\1\2/\3") + # Otherwise, bail out. + raise RemoteNotValidError(url) + + @classmethod + def has_commit(cls, location: str, rev: str) -> bool: + """ + Check if rev is a commit that is available in the local repository. + """ + try: + cls.run_command( + ["rev-parse", "-q", "--verify", "sha^" + rev], + cwd=location, + log_failed_cmd=False, + ) + except InstallationError: + return False + else: + return True + + @classmethod + def get_revision(cls, location: str, rev: str | None = None) -> str: + if rev is None: + rev = "HEAD" + current_rev = cls.run_command( + ["rev-parse", rev], + show_stdout=False, + stdout_only=True, + cwd=location, + ) + return current_rev.strip() + + @classmethod + def get_subdirectory(cls, location: str) -> str | None: + """ + Return the path to Python project root, relative to the repo root. + Return None if the project root is in the repo root. + """ + # find the repo root + git_dir = cls.run_command( + ["rev-parse", "--git-dir"], + show_stdout=False, + stdout_only=True, + cwd=location, + ).strip() + if not os.path.isabs(git_dir): + git_dir = os.path.join(location, git_dir) + repo_root = os.path.abspath(os.path.join(git_dir, "..")) + return find_path_to_project_root_from_repo_root(location, repo_root) + + @classmethod + def get_url_rev_and_auth(cls, url: str) -> tuple[str, str | None, AuthInfo]: + """ + Prefixes stub URLs like 'user@hostname:user/repo.git' with 'ssh://'. + That's required because although they use SSH they sometimes don't + work with a ssh:// scheme (e.g. GitHub). But we need a scheme for + parsing. Hence we remove it again afterwards and return it as a stub. + """ + # Works around an apparent Git bug + # (see https://article.gmane.org/gmane.comp.version-control.git/146500) + scheme, netloc, path, query, fragment = urlsplit(url) + if scheme.endswith("file"): + initial_slashes = path[: -len(path.lstrip("/"))] + newpath = initial_slashes + urllib.request.url2pathname(path).replace( + "\\", "/" + ).lstrip("/") + after_plus = scheme.find("+") + 1 + url = scheme[:after_plus] + urlunsplit( + (scheme[after_plus:], netloc, newpath, query, fragment), + ) + + if "://" not in url: + assert "file:" not in url + url = url.replace("git+", "git+ssh://") + url, rev, user_pass = super().get_url_rev_and_auth(url) + url = url.replace("ssh://", "") + else: + url, rev, user_pass = super().get_url_rev_and_auth(url) + + return url, rev, user_pass + + @classmethod + def update_submodules(cls, location: str, verbosity: int = 0) -> None: + argv = ["submodule", "update", "--init", "--recursive"] + + if verbosity <= 0: + argv.append("-q") + + if not os.path.exists(os.path.join(location, ".gitmodules")): + return + cls.run_command( + argv, + cwd=location, + ) + + @classmethod + def get_repository_root(cls, location: str) -> str | None: + loc = super().get_repository_root(location) + if loc: + return loc + try: + r = cls.run_command( + ["rev-parse", "--show-toplevel"], + cwd=location, + show_stdout=False, + stdout_only=True, + on_returncode="raise", + log_failed_cmd=False, + ) + except BadCommand: + logger.debug( + "could not determine if %s is under git control " + "because git is not available", + location, + ) + return None + except InstallationError: + return None + return os.path.normpath(r.rstrip("\r\n")) + + @staticmethod + def should_add_vcs_url_prefix(repo_url: str) -> bool: + """In either https or ssh form, requirements must be prefixed with git+.""" + return True + + +vcs.register(Git) diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/vcs/mercurial.py b/.venv/lib/python3.12/site-packages/pip/_internal/vcs/mercurial.py new file mode 100644 index 0000000..c875803 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/vcs/mercurial.py @@ -0,0 +1,186 @@ +from __future__ import annotations + +import configparser +import logging +import os + +from pip._internal.exceptions import BadCommand, InstallationError +from pip._internal.utils.misc import HiddenText, display_path +from pip._internal.utils.subprocess import make_command +from pip._internal.utils.urls import path_to_url +from pip._internal.vcs.versioncontrol import ( + RevOptions, + VersionControl, + find_path_to_project_root_from_repo_root, + vcs, +) + +logger = logging.getLogger(__name__) + + +class Mercurial(VersionControl): + name = "hg" + dirname = ".hg" + repo_name = "clone" + schemes = ( + "hg+file", + "hg+http", + "hg+https", + "hg+ssh", + "hg+static-http", + ) + + @staticmethod + def get_base_rev_args(rev: str) -> list[str]: + return [f"--rev={rev}"] + + def fetch_new( + self, dest: str, url: HiddenText, rev_options: RevOptions, verbosity: int + ) -> None: + rev_display = rev_options.to_display() + logger.info( + "Cloning hg %s%s to %s", + url, + rev_display, + display_path(dest), + ) + if verbosity <= 0: + flags: tuple[str, ...] = ("--quiet",) + elif verbosity == 1: + flags = () + elif verbosity == 2: + flags = ("--verbose",) + else: + flags = ("--verbose", "--debug") + self.run_command(make_command("clone", "--noupdate", *flags, url, dest)) + self.run_command( + make_command("update", *flags, rev_options.to_args()), + cwd=dest, + ) + + def switch( + self, + dest: str, + url: HiddenText, + rev_options: RevOptions, + verbosity: int = 0, + ) -> None: + extra_flags = [] + repo_config = os.path.join(dest, self.dirname, "hgrc") + config = configparser.RawConfigParser() + + if verbosity <= 0: + extra_flags.append("-q") + + try: + config.read(repo_config) + config.set("paths", "default", url.secret) + with open(repo_config, "w") as config_file: + config.write(config_file) + except (OSError, configparser.NoSectionError) as exc: + logger.warning("Could not switch Mercurial repository to %s: %s", url, exc) + else: + cmd_args = make_command("update", *extra_flags, rev_options.to_args()) + self.run_command(cmd_args, cwd=dest) + + def update( + self, + dest: str, + url: HiddenText, + rev_options: RevOptions, + verbosity: int = 0, + ) -> None: + extra_flags = [] + + if verbosity <= 0: + extra_flags.append("-q") + + self.run_command(["pull", *extra_flags], cwd=dest) + cmd_args = make_command("update", *extra_flags, rev_options.to_args()) + self.run_command(cmd_args, cwd=dest) + + @classmethod + def get_remote_url(cls, location: str) -> str: + url = cls.run_command( + ["showconfig", "paths.default"], + show_stdout=False, + stdout_only=True, + cwd=location, + ).strip() + if cls._is_local_repository(url): + url = path_to_url(url) + return url.strip() + + @classmethod + def get_revision(cls, location: str) -> str: + """ + Return the repository-local changeset revision number, as an integer. + """ + current_revision = cls.run_command( + ["parents", "--template={rev}"], + show_stdout=False, + stdout_only=True, + cwd=location, + ).strip() + return current_revision + + @classmethod + def get_requirement_revision(cls, location: str) -> str: + """ + Return the changeset identification hash, as a 40-character + hexadecimal string + """ + current_rev_hash = cls.run_command( + ["parents", "--template={node}"], + show_stdout=False, + stdout_only=True, + cwd=location, + ).strip() + return current_rev_hash + + @classmethod + def is_commit_id_equal(cls, dest: str, name: str | None) -> bool: + """Always assume the versions don't match""" + return False + + @classmethod + def get_subdirectory(cls, location: str) -> str | None: + """ + Return the path to Python project root, relative to the repo root. + Return None if the project root is in the repo root. + """ + # find the repo root + repo_root = cls.run_command( + ["root"], show_stdout=False, stdout_only=True, cwd=location + ).strip() + if not os.path.isabs(repo_root): + repo_root = os.path.abspath(os.path.join(location, repo_root)) + return find_path_to_project_root_from_repo_root(location, repo_root) + + @classmethod + def get_repository_root(cls, location: str) -> str | None: + loc = super().get_repository_root(location) + if loc: + return loc + try: + r = cls.run_command( + ["root"], + cwd=location, + show_stdout=False, + stdout_only=True, + on_returncode="raise", + log_failed_cmd=False, + ) + except BadCommand: + logger.debug( + "could not determine if %s is under hg control " + "because hg is not available", + location, + ) + return None + except InstallationError: + return None + return os.path.normpath(r.rstrip("\r\n")) + + +vcs.register(Mercurial) diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/vcs/subversion.py b/.venv/lib/python3.12/site-packages/pip/_internal/vcs/subversion.py new file mode 100644 index 0000000..579f428 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/vcs/subversion.py @@ -0,0 +1,335 @@ +from __future__ import annotations + +import logging +import os +import re + +from pip._internal.utils.misc import ( + HiddenText, + display_path, + is_console_interactive, + is_installable_dir, + split_auth_from_netloc, +) +from pip._internal.utils.subprocess import CommandArgs, make_command +from pip._internal.vcs.versioncontrol import ( + AuthInfo, + RemoteNotFoundError, + RevOptions, + VersionControl, + vcs, +) + +logger = logging.getLogger(__name__) + +_svn_xml_url_re = re.compile('url="([^"]+)"') +_svn_rev_re = re.compile(r'committed-rev="(\d+)"') +_svn_info_xml_rev_re = re.compile(r'\s*revision="(\d+)"') +_svn_info_xml_url_re = re.compile(r"(.*)") + + +class Subversion(VersionControl): + name = "svn" + dirname = ".svn" + repo_name = "checkout" + schemes = ("svn+ssh", "svn+http", "svn+https", "svn+svn", "svn+file") + + @classmethod + def should_add_vcs_url_prefix(cls, remote_url: str) -> bool: + return True + + @staticmethod + def get_base_rev_args(rev: str) -> list[str]: + return ["-r", rev] + + @classmethod + def get_revision(cls, location: str) -> str: + """ + Return the maximum revision for all files under a given location + """ + # Note: taken from setuptools.command.egg_info + revision = 0 + + for base, dirs, _ in os.walk(location): + if cls.dirname not in dirs: + dirs[:] = [] + continue # no sense walking uncontrolled subdirs + dirs.remove(cls.dirname) + entries_fn = os.path.join(base, cls.dirname, "entries") + if not os.path.exists(entries_fn): + # FIXME: should we warn? + continue + + dirurl, localrev = cls._get_svn_url_rev(base) + + if base == location: + assert dirurl is not None + base = dirurl + "/" # save the root url + elif not dirurl or not dirurl.startswith(base): + dirs[:] = [] + continue # not part of the same svn tree, skip it + revision = max(revision, localrev) + return str(revision) + + @classmethod + def get_netloc_and_auth( + cls, netloc: str, scheme: str + ) -> tuple[str, tuple[str | None, str | None]]: + """ + This override allows the auth information to be passed to svn via the + --username and --password options instead of via the URL. + """ + if scheme == "ssh": + # The --username and --password options can't be used for + # svn+ssh URLs, so keep the auth information in the URL. + return super().get_netloc_and_auth(netloc, scheme) + + return split_auth_from_netloc(netloc) + + @classmethod + def get_url_rev_and_auth(cls, url: str) -> tuple[str, str | None, AuthInfo]: + # hotfix the URL scheme after removing svn+ from svn+ssh:// re-add it + url, rev, user_pass = super().get_url_rev_and_auth(url) + if url.startswith("ssh://"): + url = "svn+" + url + return url, rev, user_pass + + @staticmethod + def make_rev_args(username: str | None, password: HiddenText | None) -> CommandArgs: + extra_args: CommandArgs = [] + if username: + extra_args += ["--username", username] + if password: + extra_args += ["--password", password] + + return extra_args + + @classmethod + def get_remote_url(cls, location: str) -> str: + # In cases where the source is in a subdirectory, we have to look up in + # the location until we find a valid project root. + orig_location = location + while not is_installable_dir(location): + last_location = location + location = os.path.dirname(location) + if location == last_location: + # We've traversed up to the root of the filesystem without + # finding a Python project. + logger.warning( + "Could not find Python project for directory %s (tried all " + "parent directories)", + orig_location, + ) + raise RemoteNotFoundError + + url, _rev = cls._get_svn_url_rev(location) + if url is None: + raise RemoteNotFoundError + + return url + + @classmethod + def _get_svn_url_rev(cls, location: str) -> tuple[str | None, int]: + from pip._internal.exceptions import InstallationError + + entries_path = os.path.join(location, cls.dirname, "entries") + if os.path.exists(entries_path): + with open(entries_path) as f: + data = f.read() + else: # subversion >= 1.7 does not have the 'entries' file + data = "" + + url = None + if data.startswith(("8", "9", "10")): + entries = list(map(str.splitlines, data.split("\n\x0c\n"))) + del entries[0][0] # get rid of the '8' + url = entries[0][3] + revs = [int(d[9]) for d in entries if len(d) > 9 and d[9]] + [0] + elif data.startswith("= 1.7 + # Note that using get_remote_call_options is not necessary here + # because `svn info` is being run against a local directory. + # We don't need to worry about making sure interactive mode + # is being used to prompt for passwords, because passwords + # are only potentially needed for remote server requests. + xml = cls.run_command( + ["info", "--xml", location], + show_stdout=False, + stdout_only=True, + ) + match = _svn_info_xml_url_re.search(xml) + assert match is not None + url = match.group(1) + revs = [int(m.group(1)) for m in _svn_info_xml_rev_re.finditer(xml)] + except InstallationError: + url, revs = None, [] + + if revs: + rev = max(revs) + else: + rev = 0 + + return url, rev + + @classmethod + def is_commit_id_equal(cls, dest: str, name: str | None) -> bool: + """Always assume the versions don't match""" + return False + + def __init__(self, use_interactive: bool | None = None) -> None: + if use_interactive is None: + use_interactive = is_console_interactive() + self.use_interactive = use_interactive + + # This member is used to cache the fetched version of the current + # ``svn`` client. + # Special value definitions: + # None: Not evaluated yet. + # Empty tuple: Could not parse version. + self._vcs_version: tuple[int, ...] | None = None + + super().__init__() + + def call_vcs_version(self) -> tuple[int, ...]: + """Query the version of the currently installed Subversion client. + + :return: A tuple containing the parts of the version information or + ``()`` if the version returned from ``svn`` could not be parsed. + :raises: BadCommand: If ``svn`` is not installed. + """ + # Example versions: + # svn, version 1.10.3 (r1842928) + # compiled Feb 25 2019, 14:20:39 on x86_64-apple-darwin17.0.0 + # svn, version 1.7.14 (r1542130) + # compiled Mar 28 2018, 08:49:13 on x86_64-pc-linux-gnu + # svn, version 1.12.0-SlikSvn (SlikSvn/1.12.0) + # compiled May 28 2019, 13:44:56 on x86_64-microsoft-windows6.2 + version_prefix = "svn, version " + version = self.run_command(["--version"], show_stdout=False, stdout_only=True) + if not version.startswith(version_prefix): + return () + + version = version[len(version_prefix) :].split()[0] + version_list = version.partition("-")[0].split(".") + try: + parsed_version = tuple(map(int, version_list)) + except ValueError: + return () + + return parsed_version + + def get_vcs_version(self) -> tuple[int, ...]: + """Return the version of the currently installed Subversion client. + + If the version of the Subversion client has already been queried, + a cached value will be used. + + :return: A tuple containing the parts of the version information or + ``()`` if the version returned from ``svn`` could not be parsed. + :raises: BadCommand: If ``svn`` is not installed. + """ + if self._vcs_version is not None: + # Use cached version, if available. + # If parsing the version failed previously (empty tuple), + # do not attempt to parse it again. + return self._vcs_version + + vcs_version = self.call_vcs_version() + self._vcs_version = vcs_version + return vcs_version + + def get_remote_call_options(self) -> CommandArgs: + """Return options to be used on calls to Subversion that contact the server. + + These options are applicable for the following ``svn`` subcommands used + in this class. + + - checkout + - switch + - update + + :return: A list of command line arguments to pass to ``svn``. + """ + if not self.use_interactive: + # --non-interactive switch is available since Subversion 0.14.4. + # Subversion < 1.8 runs in interactive mode by default. + return ["--non-interactive"] + + svn_version = self.get_vcs_version() + # By default, Subversion >= 1.8 runs in non-interactive mode if + # stdin is not a TTY. Since that is how pip invokes SVN, in + # call_subprocess(), pip must pass --force-interactive to ensure + # the user can be prompted for a password, if required. + # SVN added the --force-interactive option in SVN 1.8. Since + # e.g. RHEL/CentOS 7, which is supported until 2024, ships with + # SVN 1.7, pip should continue to support SVN 1.7. Therefore, pip + # can't safely add the option if the SVN version is < 1.8 (or unknown). + if svn_version >= (1, 8): + return ["--force-interactive"] + + return [] + + def fetch_new( + self, dest: str, url: HiddenText, rev_options: RevOptions, verbosity: int + ) -> None: + rev_display = rev_options.to_display() + logger.info( + "Checking out %s%s to %s", + url, + rev_display, + display_path(dest), + ) + if verbosity <= 0: + flags = ["--quiet"] + else: + flags = [] + cmd_args = make_command( + "checkout", + *flags, + self.get_remote_call_options(), + rev_options.to_args(), + url, + dest, + ) + self.run_command(cmd_args) + + def switch( + self, + dest: str, + url: HiddenText, + rev_options: RevOptions, + verbosity: int = 0, + ) -> None: + cmd_args = make_command( + "switch", + self.get_remote_call_options(), + rev_options.to_args(), + url, + dest, + ) + self.run_command(cmd_args) + + def update( + self, + dest: str, + url: HiddenText, + rev_options: RevOptions, + verbosity: int = 0, + ) -> None: + cmd_args = make_command( + "update", + self.get_remote_call_options(), + rev_options.to_args(), + dest, + ) + self.run_command(cmd_args) + + +vcs.register(Subversion) diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/vcs/versioncontrol.py b/.venv/lib/python3.12/site-packages/pip/_internal/vcs/versioncontrol.py new file mode 100644 index 0000000..4e91ccd --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/vcs/versioncontrol.py @@ -0,0 +1,693 @@ +"""Handles all VCS (version control) support""" + +from __future__ import annotations + +import logging +import os +import shutil +import sys +import urllib.parse +from collections.abc import Iterable, Iterator, Mapping +from dataclasses import dataclass, field +from typing import ( + Any, + Literal, + Optional, +) + +from pip._internal.cli.spinners import SpinnerInterface +from pip._internal.exceptions import BadCommand, InstallationError +from pip._internal.utils.misc import ( + HiddenText, + ask_path_exists, + backup_dir, + display_path, + hide_url, + hide_value, + is_installable_dir, + rmtree, +) +from pip._internal.utils.subprocess import ( + CommandArgs, + call_subprocess, + format_command_args, + make_command, +) + +__all__ = ["vcs"] + + +logger = logging.getLogger(__name__) + +AuthInfo = tuple[Optional[str], Optional[str]] + + +def is_url(name: str) -> bool: + """ + Return true if the name looks like a URL. + """ + scheme = urllib.parse.urlsplit(name).scheme + if not scheme: + return False + return scheme in ["http", "https", "file", "ftp"] + vcs.all_schemes + + +def make_vcs_requirement_url( + repo_url: str, rev: str, project_name: str, subdir: str | None = None +) -> str: + """ + Return the URL for a VCS requirement. + + Args: + repo_url: the remote VCS url, with any needed VCS prefix (e.g. "git+"). + project_name: the (unescaped) project name. + """ + egg_project_name = project_name.replace("-", "_") + req = f"{repo_url}@{rev}#egg={egg_project_name}" + if subdir: + req += f"&subdirectory={subdir}" + + return req + + +def find_path_to_project_root_from_repo_root( + location: str, repo_root: str +) -> str | None: + """ + Find the the Python project's root by searching up the filesystem from + `location`. Return the path to project root relative to `repo_root`. + Return None if the project root is `repo_root`, or cannot be found. + """ + # find project root. + orig_location = location + while not is_installable_dir(location): + last_location = location + location = os.path.dirname(location) + if location == last_location: + # We've traversed up to the root of the filesystem without + # finding a Python project. + logger.warning( + "Could not find a Python project for directory %s (tried all " + "parent directories)", + orig_location, + ) + return None + + if os.path.samefile(repo_root, location): + return None + + return os.path.relpath(location, repo_root) + + +class RemoteNotFoundError(Exception): + pass + + +class RemoteNotValidError(Exception): + def __init__(self, url: str): + super().__init__(url) + self.url = url + + +@dataclass(frozen=True) +class RevOptions: + """ + Encapsulates a VCS-specific revision to install, along with any VCS + install options. + + Args: + vc_class: a VersionControl subclass. + rev: the name of the revision to install. + extra_args: a list of extra options. + """ + + vc_class: type[VersionControl] + rev: str | None = None + extra_args: CommandArgs = field(default_factory=list) + branch_name: str | None = None + + def __repr__(self) -> str: + return f"" + + @property + def arg_rev(self) -> str | None: + if self.rev is None: + return self.vc_class.default_arg_rev + + return self.rev + + def to_args(self) -> CommandArgs: + """ + Return the VCS-specific command arguments. + """ + args: CommandArgs = [] + rev = self.arg_rev + if rev is not None: + args += self.vc_class.get_base_rev_args(rev) + args += self.extra_args + + return args + + def to_display(self) -> str: + if not self.rev: + return "" + + return f" (to revision {self.rev})" + + def make_new(self, rev: str) -> RevOptions: + """ + Make a copy of the current instance, but with a new rev. + + Args: + rev: the name of the revision for the new object. + """ + return self.vc_class.make_rev_options(rev, extra_args=self.extra_args) + + +class VcsSupport: + _registry: dict[str, VersionControl] = {} + schemes = ["ssh", "git", "hg", "bzr", "sftp", "svn"] + + def __init__(self) -> None: + # Register more schemes with urlparse for various version control + # systems + urllib.parse.uses_netloc.extend(self.schemes) + super().__init__() + + def __iter__(self) -> Iterator[str]: + return self._registry.__iter__() + + @property + def backends(self) -> list[VersionControl]: + return list(self._registry.values()) + + @property + def dirnames(self) -> list[str]: + return [backend.dirname for backend in self.backends] + + @property + def all_schemes(self) -> list[str]: + schemes: list[str] = [] + for backend in self.backends: + schemes.extend(backend.schemes) + return schemes + + def register(self, cls: type[VersionControl]) -> None: + if not hasattr(cls, "name"): + logger.warning("Cannot register VCS %s", cls.__name__) + return + if cls.name not in self._registry: + self._registry[cls.name] = cls() + logger.debug("Registered VCS backend: %s", cls.name) + + def unregister(self, name: str) -> None: + if name in self._registry: + del self._registry[name] + + def get_backend_for_dir(self, location: str) -> VersionControl | None: + """ + Return a VersionControl object if a repository of that type is found + at the given directory. + """ + vcs_backends = {} + for vcs_backend in self._registry.values(): + repo_path = vcs_backend.get_repository_root(location) + if not repo_path: + continue + logger.debug("Determine that %s uses VCS: %s", location, vcs_backend.name) + vcs_backends[repo_path] = vcs_backend + + if not vcs_backends: + return None + + # Choose the VCS in the inner-most directory. Since all repository + # roots found here would be either `location` or one of its + # parents, the longest path should have the most path components, + # i.e. the backend representing the inner-most repository. + inner_most_repo_path = max(vcs_backends, key=len) + return vcs_backends[inner_most_repo_path] + + def get_backend_for_scheme(self, scheme: str) -> VersionControl | None: + """ + Return a VersionControl object or None. + """ + for vcs_backend in self._registry.values(): + if scheme in vcs_backend.schemes: + return vcs_backend + return None + + def get_backend(self, name: str) -> VersionControl | None: + """ + Return a VersionControl object or None. + """ + name = name.lower() + return self._registry.get(name) + + +vcs = VcsSupport() + + +class VersionControl: + name = "" + dirname = "" + repo_name = "" + # List of supported schemes for this Version Control + schemes: tuple[str, ...] = () + # Iterable of environment variable names to pass to call_subprocess(). + unset_environ: tuple[str, ...] = () + default_arg_rev: str | None = None + + @classmethod + def should_add_vcs_url_prefix(cls, remote_url: str) -> bool: + """ + Return whether the vcs prefix (e.g. "git+") should be added to a + repository's remote url when used in a requirement. + """ + return not remote_url.lower().startswith(f"{cls.name}:") + + @classmethod + def get_subdirectory(cls, location: str) -> str | None: + """ + Return the path to Python project root, relative to the repo root. + Return None if the project root is in the repo root. + """ + return None + + @classmethod + def get_requirement_revision(cls, repo_dir: str) -> str: + """ + Return the revision string that should be used in a requirement. + """ + return cls.get_revision(repo_dir) + + @classmethod + def get_src_requirement(cls, repo_dir: str, project_name: str) -> str: + """ + Return the requirement string to use to redownload the files + currently at the given repository directory. + + Args: + project_name: the (unescaped) project name. + + The return value has a form similar to the following: + + {repository_url}@{revision}#egg={project_name} + """ + repo_url = cls.get_remote_url(repo_dir) + + if cls.should_add_vcs_url_prefix(repo_url): + repo_url = f"{cls.name}+{repo_url}" + + revision = cls.get_requirement_revision(repo_dir) + subdir = cls.get_subdirectory(repo_dir) + req = make_vcs_requirement_url(repo_url, revision, project_name, subdir=subdir) + + return req + + @staticmethod + def get_base_rev_args(rev: str) -> list[str]: + """ + Return the base revision arguments for a vcs command. + + Args: + rev: the name of a revision to install. Cannot be None. + """ + raise NotImplementedError + + def is_immutable_rev_checkout(self, url: str, dest: str) -> bool: + """ + Return true if the commit hash checked out at dest matches + the revision in url. + + Always return False, if the VCS does not support immutable commit + hashes. + + This method does not check if there are local uncommitted changes + in dest after checkout, as pip currently has no use case for that. + """ + return False + + @classmethod + def make_rev_options( + cls, rev: str | None = None, extra_args: CommandArgs | None = None + ) -> RevOptions: + """ + Return a RevOptions object. + + Args: + rev: the name of a revision to install. + extra_args: a list of extra options. + """ + return RevOptions(cls, rev, extra_args=extra_args or []) + + @classmethod + def _is_local_repository(cls, repo: str) -> bool: + """ + posix absolute paths start with os.path.sep, + win32 ones start with drive (like c:\\folder) + """ + drive, tail = os.path.splitdrive(repo) + return repo.startswith(os.path.sep) or bool(drive) + + @classmethod + def get_netloc_and_auth( + cls, netloc: str, scheme: str + ) -> tuple[str, tuple[str | None, str | None]]: + """ + Parse the repository URL's netloc, and return the new netloc to use + along with auth information. + + Args: + netloc: the original repository URL netloc. + scheme: the repository URL's scheme without the vcs prefix. + + This is mainly for the Subversion class to override, so that auth + information can be provided via the --username and --password options + instead of through the URL. For other subclasses like Git without + such an option, auth information must stay in the URL. + + Returns: (netloc, (username, password)). + """ + return netloc, (None, None) + + @classmethod + def get_url_rev_and_auth(cls, url: str) -> tuple[str, str | None, AuthInfo]: + """ + Parse the repository URL to use, and return the URL, revision, + and auth info to use. + + Returns: (url, rev, (username, password)). + """ + scheme, netloc, path, query, frag = urllib.parse.urlsplit(url) + if "+" not in scheme: + raise ValueError( + f"Sorry, {url!r} is a malformed VCS url. " + "The format is +://, " + "e.g. svn+http://myrepo/svn/MyApp#egg=MyApp" + ) + # Remove the vcs prefix. + scheme = scheme.split("+", 1)[1] + netloc, user_pass = cls.get_netloc_and_auth(netloc, scheme) + rev = None + if "@" in path: + path, rev = path.rsplit("@", 1) + if not rev: + raise InstallationError( + f"The URL {url!r} has an empty revision (after @) " + "which is not supported. Include a revision after @ " + "or remove @ from the URL." + ) + url = urllib.parse.urlunsplit((scheme, netloc, path, query, "")) + return url, rev, user_pass + + @staticmethod + def make_rev_args(username: str | None, password: HiddenText | None) -> CommandArgs: + """ + Return the RevOptions "extra arguments" to use in obtain(). + """ + return [] + + def get_url_rev_options(self, url: HiddenText) -> tuple[HiddenText, RevOptions]: + """ + Return the URL and RevOptions object to use in obtain(), + as a tuple (url, rev_options). + """ + secret_url, rev, user_pass = self.get_url_rev_and_auth(url.secret) + username, secret_password = user_pass + password: HiddenText | None = None + if secret_password is not None: + password = hide_value(secret_password) + extra_args = self.make_rev_args(username, password) + rev_options = self.make_rev_options(rev, extra_args=extra_args) + + return hide_url(secret_url), rev_options + + @staticmethod + def normalize_url(url: str) -> str: + """ + Normalize a URL for comparison by unquoting it and removing any + trailing slash. + """ + return urllib.parse.unquote(url).rstrip("/") + + @classmethod + def compare_urls(cls, url1: str, url2: str) -> bool: + """ + Compare two repo URLs for identity, ignoring incidental differences. + """ + return cls.normalize_url(url1) == cls.normalize_url(url2) + + def fetch_new( + self, dest: str, url: HiddenText, rev_options: RevOptions, verbosity: int + ) -> None: + """ + Fetch a revision from a repository, in the case that this is the + first fetch from the repository. + + Args: + dest: the directory to fetch the repository to. + rev_options: a RevOptions object. + verbosity: verbosity level. + """ + raise NotImplementedError + + def switch( + self, + dest: str, + url: HiddenText, + rev_options: RevOptions, + verbosity: int = 0, + ) -> None: + """ + Switch the repo at ``dest`` to point to ``URL``. + + Args: + rev_options: a RevOptions object. + """ + raise NotImplementedError + + def update( + self, + dest: str, + url: HiddenText, + rev_options: RevOptions, + verbosity: int = 0, + ) -> None: + """ + Update an already-existing repo to the given ``rev_options``. + + Args: + rev_options: a RevOptions object. + """ + raise NotImplementedError + + @classmethod + def is_commit_id_equal(cls, dest: str, name: str | None) -> bool: + """ + Return whether the id of the current commit equals the given name. + + Args: + dest: the repository directory. + name: a string name. + """ + raise NotImplementedError + + def obtain(self, dest: str, url: HiddenText, verbosity: int) -> None: + """ + Install or update in editable mode the package represented by this + VersionControl object. + + :param dest: the repository directory in which to install or update. + :param url: the repository URL starting with a vcs prefix. + :param verbosity: verbosity level. + """ + url, rev_options = self.get_url_rev_options(url) + + if not os.path.exists(dest): + self.fetch_new(dest, url, rev_options, verbosity=verbosity) + return + + rev_display = rev_options.to_display() + if self.is_repository_directory(dest): + existing_url = self.get_remote_url(dest) + if self.compare_urls(existing_url, url.secret): + logger.debug( + "%s in %s exists, and has correct URL (%s)", + self.repo_name.title(), + display_path(dest), + url, + ) + if not self.is_commit_id_equal(dest, rev_options.rev): + logger.info( + "Updating %s %s%s", + display_path(dest), + self.repo_name, + rev_display, + ) + self.update(dest, url, rev_options, verbosity=verbosity) + else: + logger.info("Skipping because already up-to-date.") + return + + logger.warning( + "%s %s in %s exists with URL %s", + self.name, + self.repo_name, + display_path(dest), + existing_url, + ) + prompt = ("(s)witch, (i)gnore, (w)ipe, (b)ackup ", ("s", "i", "w", "b")) + else: + logger.warning( + "Directory %s already exists, and is not a %s %s.", + dest, + self.name, + self.repo_name, + ) + # https://github.com/python/mypy/issues/1174 + prompt = ("(i)gnore, (w)ipe, (b)ackup ", ("i", "w", "b")) # type: ignore + + logger.warning( + "The plan is to install the %s repository %s", + self.name, + url, + ) + response = ask_path_exists(f"What to do? {prompt[0]}", prompt[1]) + + if response == "a": + sys.exit(-1) + + if response == "w": + logger.warning("Deleting %s", display_path(dest)) + rmtree(dest) + self.fetch_new(dest, url, rev_options, verbosity=verbosity) + return + + if response == "b": + dest_dir = backup_dir(dest) + logger.warning("Backing up %s to %s", display_path(dest), dest_dir) + shutil.move(dest, dest_dir) + self.fetch_new(dest, url, rev_options, verbosity=verbosity) + return + + # Do nothing if the response is "i". + if response == "s": + logger.info( + "Switching %s %s to %s%s", + self.repo_name, + display_path(dest), + url, + rev_display, + ) + self.switch(dest, url, rev_options, verbosity=verbosity) + + def unpack(self, location: str, url: HiddenText, verbosity: int) -> None: + """ + Clean up current location and download the url repository + (and vcs infos) into location + + :param url: the repository URL starting with a vcs prefix. + :param verbosity: verbosity level. + """ + if os.path.exists(location): + rmtree(location) + self.obtain(location, url=url, verbosity=verbosity) + + @classmethod + def get_remote_url(cls, location: str) -> str: + """ + Return the url used at location + + Raises RemoteNotFoundError if the repository does not have a remote + url configured. + """ + raise NotImplementedError + + @classmethod + def get_revision(cls, location: str) -> str: + """ + Return the current commit id of the files at the given location. + """ + raise NotImplementedError + + @classmethod + def run_command( + cls, + cmd: list[str] | CommandArgs, + show_stdout: bool = True, + cwd: str | None = None, + on_returncode: Literal["raise", "warn", "ignore"] = "raise", + extra_ok_returncodes: Iterable[int] | None = None, + command_desc: str | None = None, + extra_environ: Mapping[str, Any] | None = None, + spinner: SpinnerInterface | None = None, + log_failed_cmd: bool = True, + stdout_only: bool = False, + ) -> str: + """ + Run a VCS subcommand + This is simply a wrapper around call_subprocess that adds the VCS + command name, and checks that the VCS is available + """ + cmd = make_command(cls.name, *cmd) + if command_desc is None: + command_desc = format_command_args(cmd) + try: + return call_subprocess( + cmd, + show_stdout, + cwd, + on_returncode=on_returncode, + extra_ok_returncodes=extra_ok_returncodes, + command_desc=command_desc, + extra_environ=extra_environ, + unset_environ=cls.unset_environ, + spinner=spinner, + log_failed_cmd=log_failed_cmd, + stdout_only=stdout_only, + ) + except NotADirectoryError: + raise BadCommand(f"Cannot find command {cls.name!r} - invalid PATH") + except FileNotFoundError: + # errno.ENOENT = no such file or directory + # In other words, the VCS executable isn't available + raise BadCommand( + f"Cannot find command {cls.name!r} - do you have " + f"{cls.name!r} installed and in your PATH?" + ) + except PermissionError: + # errno.EACCES = Permission denied + # This error occurs, for instance, when the command is installed + # only for another user. So, the current user don't have + # permission to call the other user command. + raise BadCommand( + f"No permission to execute {cls.name!r} - install it " + f"locally, globally (ask admin), or check your PATH. " + f"See possible solutions at " + f"https://pip.pypa.io/en/latest/reference/pip_freeze/" + f"#fixing-permission-denied." + ) + + @classmethod + def is_repository_directory(cls, path: str) -> bool: + """ + Return whether a directory path is a repository directory. + """ + logger.debug("Checking in %s for %s (%s)...", path, cls.dirname, cls.name) + return os.path.exists(os.path.join(path, cls.dirname)) + + @classmethod + def get_repository_root(cls, location: str) -> str | None: + """ + Return the "root" (top-level) directory controlled by the vcs, + or `None` if the directory is not in any. + + It is meant to be overridden to implement smarter detection + mechanisms for specific vcs. + + This can do more than is_repository_directory() alone. For + example, the Git override checks that Git is actually available. + """ + if cls.is_repository_directory(location): + return location + return None diff --git a/.venv/lib/python3.12/site-packages/pip/_internal/wheel_builder.py b/.venv/lib/python3.12/site-packages/pip/_internal/wheel_builder.py new file mode 100644 index 0000000..beed02f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_internal/wheel_builder.py @@ -0,0 +1,334 @@ +"""Orchestrator for building wheels from InstallRequirements.""" + +from __future__ import annotations + +import logging +import os.path +import re +import shutil +from collections.abc import Iterable + +from pip._vendor.packaging.utils import canonicalize_name, canonicalize_version +from pip._vendor.packaging.version import InvalidVersion, Version + +from pip._internal.cache import WheelCache +from pip._internal.exceptions import InvalidWheelFilename, UnsupportedWheel +from pip._internal.metadata import FilesystemWheel, get_wheel_distribution +from pip._internal.models.link import Link +from pip._internal.models.wheel import Wheel +from pip._internal.operations.build.wheel import build_wheel_pep517 +from pip._internal.operations.build.wheel_editable import build_wheel_editable +from pip._internal.operations.build.wheel_legacy import build_wheel_legacy +from pip._internal.req.req_install import InstallRequirement +from pip._internal.utils.logging import indent_log +from pip._internal.utils.misc import ensure_dir, hash_file +from pip._internal.utils.setuptools_build import make_setuptools_clean_args +from pip._internal.utils.subprocess import call_subprocess +from pip._internal.utils.temp_dir import TempDirectory +from pip._internal.utils.urls import path_to_url +from pip._internal.vcs import vcs + +logger = logging.getLogger(__name__) + +_egg_info_re = re.compile(r"([a-z0-9_.]+)-([a-z0-9_.!+-]+)", re.IGNORECASE) + +BuildResult = tuple[list[InstallRequirement], list[InstallRequirement]] + + +def _contains_egg_info(s: str) -> bool: + """Determine whether the string looks like an egg_info. + + :param s: The string to parse. E.g. foo-2.1 + """ + return bool(_egg_info_re.search(s)) + + +def _should_build( + req: InstallRequirement, +) -> bool: + """Return whether an InstallRequirement should be built into a wheel.""" + assert not req.constraint + + if req.is_wheel: + return False + + assert req.source_dir + + if req.editable: + # we only build PEP 660 editable requirements + return req.supports_pyproject_editable + + return True + + +def should_build_for_install_command( + req: InstallRequirement, +) -> bool: + return _should_build(req) + + +def _should_cache( + req: InstallRequirement, +) -> bool | None: + """ + Return whether a built InstallRequirement can be stored in the persistent + wheel cache, assuming the wheel cache is available, and _should_build() + has determined a wheel needs to be built. + """ + if req.editable or not req.source_dir: + # never cache editable requirements + return False + + if req.link and req.link.is_vcs: + # VCS checkout. Do not cache + # unless it points to an immutable commit hash. + assert not req.editable + assert req.source_dir + vcs_backend = vcs.get_backend_for_scheme(req.link.scheme) + assert vcs_backend + if vcs_backend.is_immutable_rev_checkout(req.link.url, req.source_dir): + return True + return False + + assert req.link + base, ext = req.link.splitext() + if _contains_egg_info(base): + return True + + # Otherwise, do not cache. + return False + + +def _get_cache_dir( + req: InstallRequirement, + wheel_cache: WheelCache, +) -> str: + """Return the persistent or temporary cache directory where the built + wheel need to be stored. + """ + cache_available = bool(wheel_cache.cache_dir) + assert req.link + if cache_available and _should_cache(req): + cache_dir = wheel_cache.get_path_for_link(req.link) + else: + cache_dir = wheel_cache.get_ephem_path_for_link(req.link) + return cache_dir + + +def _verify_one(req: InstallRequirement, wheel_path: str) -> None: + canonical_name = canonicalize_name(req.name or "") + w = Wheel(os.path.basename(wheel_path)) + if canonicalize_name(w.name) != canonical_name: + raise InvalidWheelFilename( + f"Wheel has unexpected file name: expected {canonical_name!r}, " + f"got {w.name!r}", + ) + dist = get_wheel_distribution(FilesystemWheel(wheel_path), canonical_name) + dist_verstr = str(dist.version) + if canonicalize_version(dist_verstr) != canonicalize_version(w.version): + raise InvalidWheelFilename( + f"Wheel has unexpected file name: expected {dist_verstr!r}, " + f"got {w.version!r}", + ) + metadata_version_value = dist.metadata_version + if metadata_version_value is None: + raise UnsupportedWheel("Missing Metadata-Version") + try: + metadata_version = Version(metadata_version_value) + except InvalidVersion: + msg = f"Invalid Metadata-Version: {metadata_version_value}" + raise UnsupportedWheel(msg) + if metadata_version >= Version("1.2") and not isinstance(dist.version, Version): + raise UnsupportedWheel( + f"Metadata 1.2 mandates PEP 440 version, but {dist_verstr!r} is not" + ) + + +def _build_one( + req: InstallRequirement, + output_dir: str, + verify: bool, + build_options: list[str], + global_options: list[str], + editable: bool, +) -> str | None: + """Build one wheel. + + :return: The filename of the built wheel, or None if the build failed. + """ + artifact = "editable" if editable else "wheel" + try: + ensure_dir(output_dir) + except OSError as e: + logger.warning( + "Building %s for %s failed: %s", + artifact, + req.name, + e, + ) + return None + + # Install build deps into temporary directory (PEP 518) + with req.build_env: + wheel_path = _build_one_inside_env( + req, output_dir, build_options, global_options, editable + ) + if wheel_path and verify: + try: + _verify_one(req, wheel_path) + except (InvalidWheelFilename, UnsupportedWheel) as e: + logger.warning("Built %s for %s is invalid: %s", artifact, req.name, e) + return None + return wheel_path + + +def _build_one_inside_env( + req: InstallRequirement, + output_dir: str, + build_options: list[str], + global_options: list[str], + editable: bool, +) -> str | None: + with TempDirectory(kind="wheel") as temp_dir: + assert req.name + if req.use_pep517: + assert req.metadata_directory + assert req.pep517_backend + if global_options: + logger.warning( + "Ignoring --global-option when building %s using PEP 517", req.name + ) + if build_options: + logger.warning( + "Ignoring --build-option when building %s using PEP 517", req.name + ) + if editable: + wheel_path = build_wheel_editable( + name=req.name, + backend=req.pep517_backend, + metadata_directory=req.metadata_directory, + tempd=temp_dir.path, + ) + else: + wheel_path = build_wheel_pep517( + name=req.name, + backend=req.pep517_backend, + metadata_directory=req.metadata_directory, + tempd=temp_dir.path, + ) + else: + wheel_path = build_wheel_legacy( + name=req.name, + setup_py_path=req.setup_py_path, + source_dir=req.unpacked_source_directory, + global_options=global_options, + build_options=build_options, + tempd=temp_dir.path, + ) + + if wheel_path is not None: + wheel_name = os.path.basename(wheel_path) + dest_path = os.path.join(output_dir, wheel_name) + try: + wheel_hash, length = hash_file(wheel_path) + shutil.move(wheel_path, dest_path) + logger.info( + "Created wheel for %s: filename=%s size=%d sha256=%s", + req.name, + wheel_name, + length, + wheel_hash.hexdigest(), + ) + logger.info("Stored in directory: %s", output_dir) + return dest_path + except Exception as e: + logger.warning( + "Building wheel for %s failed: %s", + req.name, + e, + ) + # Ignore return, we can't do anything else useful. + if not req.use_pep517: + _clean_one_legacy(req, global_options) + return None + + +def _clean_one_legacy(req: InstallRequirement, global_options: list[str]) -> bool: + clean_args = make_setuptools_clean_args( + req.setup_py_path, + global_options=global_options, + ) + + logger.info("Running setup.py clean for %s", req.name) + try: + call_subprocess( + clean_args, command_desc="python setup.py clean", cwd=req.source_dir + ) + return True + except Exception: + logger.error("Failed cleaning build dir for %s", req.name) + return False + + +def build( + requirements: Iterable[InstallRequirement], + wheel_cache: WheelCache, + verify: bool, + build_options: list[str], + global_options: list[str], +) -> BuildResult: + """Build wheels. + + :return: The list of InstallRequirement that succeeded to build and + the list of InstallRequirement that failed to build. + """ + if not requirements: + return [], [] + + # Build the wheels. + logger.info( + "Building wheels for collected packages: %s", + ", ".join(req.name for req in requirements), # type: ignore + ) + + with indent_log(): + build_successes, build_failures = [], [] + for req in requirements: + assert req.name + cache_dir = _get_cache_dir(req, wheel_cache) + wheel_file = _build_one( + req, + cache_dir, + verify, + build_options, + global_options, + req.editable and req.permit_editable_wheels, + ) + if wheel_file: + # Record the download origin in the cache + if req.download_info is not None: + # download_info is guaranteed to be set because when we build an + # InstallRequirement it has been through the preparer before, but + # let's be cautious. + wheel_cache.record_download_origin(cache_dir, req.download_info) + # Update the link for this. + req.link = Link(path_to_url(wheel_file)) + req.local_file_path = req.link.file_path + assert req.link.is_wheel + build_successes.append(req) + else: + build_failures.append(req) + + # notify success/failure + if build_successes: + logger.info( + "Successfully built %s", + " ".join([req.name for req in build_successes]), # type: ignore + ) + if build_failures: + logger.info( + "Failed to build %s", + " ".join([req.name for req in build_failures]), # type: ignore + ) + # Return a list of requirements that failed to build + return build_successes, build_failures diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/__init__.py b/.venv/lib/python3.12/site-packages/pip/_vendor/__init__.py new file mode 100644 index 0000000..34ccb99 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/__init__.py @@ -0,0 +1,117 @@ +""" +pip._vendor is for vendoring dependencies of pip to prevent needing pip to +depend on something external. + +Files inside of pip._vendor should be considered immutable and should only be +updated to versions from upstream. +""" +from __future__ import absolute_import + +import glob +import os.path +import sys + +# Downstream redistributors which have debundled our dependencies should also +# patch this value to be true. This will trigger the additional patching +# to cause things like "six" to be available as pip. +DEBUNDLED = False + +# By default, look in this directory for a bunch of .whl files which we will +# add to the beginning of sys.path before attempting to import anything. This +# is done to support downstream re-distributors like Debian and Fedora who +# wish to create their own Wheels for our dependencies to aid in debundling. +WHEEL_DIR = os.path.abspath(os.path.dirname(__file__)) + + +# Define a small helper function to alias our vendored modules to the real ones +# if the vendored ones do not exist. This idea of this was taken from +# https://github.com/kennethreitz/requests/pull/2567. +def vendored(modulename): + vendored_name = "{0}.{1}".format(__name__, modulename) + + try: + __import__(modulename, globals(), locals(), level=0) + except ImportError: + # We can just silently allow import failures to pass here. If we + # got to this point it means that ``import pip._vendor.whatever`` + # failed and so did ``import whatever``. Since we're importing this + # upfront in an attempt to alias imports, not erroring here will + # just mean we get a regular import error whenever pip *actually* + # tries to import one of these modules to use it, which actually + # gives us a better error message than we would have otherwise + # gotten. + pass + else: + sys.modules[vendored_name] = sys.modules[modulename] + base, head = vendored_name.rsplit(".", 1) + setattr(sys.modules[base], head, sys.modules[modulename]) + + +# If we're operating in a debundled setup, then we want to go ahead and trigger +# the aliasing of our vendored libraries as well as looking for wheels to add +# to our sys.path. This will cause all of this code to be a no-op typically +# however downstream redistributors can enable it in a consistent way across +# all platforms. +if DEBUNDLED: + # Actually look inside of WHEEL_DIR to find .whl files and add them to the + # front of our sys.path. + sys.path[:] = glob.glob(os.path.join(WHEEL_DIR, "*.whl")) + sys.path + + # Actually alias all of our vendored dependencies. + vendored("cachecontrol") + vendored("certifi") + vendored("dependency-groups") + vendored("distlib") + vendored("distro") + vendored("packaging") + vendored("packaging.version") + vendored("packaging.specifiers") + vendored("pkg_resources") + vendored("platformdirs") + vendored("progress") + vendored("pyproject_hooks") + vendored("requests") + vendored("requests.exceptions") + vendored("requests.packages") + vendored("requests.packages.urllib3") + vendored("requests.packages.urllib3._collections") + vendored("requests.packages.urllib3.connection") + vendored("requests.packages.urllib3.connectionpool") + vendored("requests.packages.urllib3.contrib") + vendored("requests.packages.urllib3.contrib.ntlmpool") + vendored("requests.packages.urllib3.contrib.pyopenssl") + vendored("requests.packages.urllib3.exceptions") + vendored("requests.packages.urllib3.fields") + vendored("requests.packages.urllib3.filepost") + vendored("requests.packages.urllib3.packages") + vendored("requests.packages.urllib3.packages.ordered_dict") + vendored("requests.packages.urllib3.packages.six") + vendored("requests.packages.urllib3.packages.ssl_match_hostname") + vendored("requests.packages.urllib3.packages.ssl_match_hostname." + "_implementation") + vendored("requests.packages.urllib3.poolmanager") + vendored("requests.packages.urllib3.request") + vendored("requests.packages.urllib3.response") + vendored("requests.packages.urllib3.util") + vendored("requests.packages.urllib3.util.connection") + vendored("requests.packages.urllib3.util.request") + vendored("requests.packages.urllib3.util.response") + vendored("requests.packages.urllib3.util.retry") + vendored("requests.packages.urllib3.util.ssl_") + vendored("requests.packages.urllib3.util.timeout") + vendored("requests.packages.urllib3.util.url") + vendored("resolvelib") + vendored("rich") + vendored("rich.console") + vendored("rich.highlighter") + vendored("rich.logging") + vendored("rich.markup") + vendored("rich.progress") + vendored("rich.segment") + vendored("rich.style") + vendored("rich.text") + vendored("rich.traceback") + if sys.version_info < (3, 11): + vendored("tomli") + vendored("truststore") + vendored("urllib3") diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..f842438 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/__init__.py b/.venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/__init__.py new file mode 100644 index 0000000..67888db --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/__init__.py @@ -0,0 +1,29 @@ +# SPDX-FileCopyrightText: 2015 Eric Larson +# +# SPDX-License-Identifier: Apache-2.0 + +"""CacheControl import Interface. + +Make it easy to import from cachecontrol without long namespaces. +""" + +__author__ = "Eric Larson" +__email__ = "eric@ionrock.org" +__version__ = "0.14.3" + +from pip._vendor.cachecontrol.adapter import CacheControlAdapter +from pip._vendor.cachecontrol.controller import CacheController +from pip._vendor.cachecontrol.wrapper import CacheControl + +__all__ = [ + "__author__", + "__email__", + "__version__", + "CacheControlAdapter", + "CacheController", + "CacheControl", +] + +import logging + +logging.getLogger(__name__).addHandler(logging.NullHandler()) diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..2cc0fab Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/__pycache__/_cmd.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/__pycache__/_cmd.cpython-312.pyc new file mode 100644 index 0000000..e227345 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/__pycache__/_cmd.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/__pycache__/adapter.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/__pycache__/adapter.cpython-312.pyc new file mode 100644 index 0000000..1f37c07 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/__pycache__/adapter.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/__pycache__/cache.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/__pycache__/cache.cpython-312.pyc new file mode 100644 index 0000000..8a8c823 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/__pycache__/cache.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/__pycache__/controller.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/__pycache__/controller.cpython-312.pyc new file mode 100644 index 0000000..c558fb3 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/__pycache__/controller.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/__pycache__/filewrapper.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/__pycache__/filewrapper.cpython-312.pyc new file mode 100644 index 0000000..4b76d39 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/__pycache__/filewrapper.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/__pycache__/heuristics.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/__pycache__/heuristics.cpython-312.pyc new file mode 100644 index 0000000..81922da Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/__pycache__/heuristics.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/__pycache__/serialize.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/__pycache__/serialize.cpython-312.pyc new file mode 100644 index 0000000..93e57b6 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/__pycache__/serialize.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/__pycache__/wrapper.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/__pycache__/wrapper.cpython-312.pyc new file mode 100644 index 0000000..608e6c7 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/__pycache__/wrapper.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/_cmd.py b/.venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/_cmd.py new file mode 100644 index 0000000..2c84208 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/_cmd.py @@ -0,0 +1,70 @@ +# SPDX-FileCopyrightText: 2015 Eric Larson +# +# SPDX-License-Identifier: Apache-2.0 +from __future__ import annotations + +import logging +from argparse import ArgumentParser +from typing import TYPE_CHECKING + +from pip._vendor import requests + +from pip._vendor.cachecontrol.adapter import CacheControlAdapter +from pip._vendor.cachecontrol.cache import DictCache +from pip._vendor.cachecontrol.controller import logger + +if TYPE_CHECKING: + from argparse import Namespace + + from pip._vendor.cachecontrol.controller import CacheController + + +def setup_logging() -> None: + logger.setLevel(logging.DEBUG) + handler = logging.StreamHandler() + logger.addHandler(handler) + + +def get_session() -> requests.Session: + adapter = CacheControlAdapter( + DictCache(), cache_etags=True, serializer=None, heuristic=None + ) + sess = requests.Session() + sess.mount("http://", adapter) + sess.mount("https://", adapter) + + sess.cache_controller = adapter.controller # type: ignore[attr-defined] + return sess + + +def get_args() -> Namespace: + parser = ArgumentParser() + parser.add_argument("url", help="The URL to try and cache") + return parser.parse_args() + + +def main() -> None: + args = get_args() + sess = get_session() + + # Make a request to get a response + resp = sess.get(args.url) + + # Turn on logging + setup_logging() + + # try setting the cache + cache_controller: CacheController = ( + sess.cache_controller # type: ignore[attr-defined] + ) + cache_controller.cache_response(resp.request, resp.raw) + + # Now try to get it + if cache_controller.cached_request(resp.request): + print("Cached!") + else: + print("Not cached :(") + + +if __name__ == "__main__": + main() diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/adapter.py b/.venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/adapter.py new file mode 100644 index 0000000..18084d1 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/adapter.py @@ -0,0 +1,168 @@ +# SPDX-FileCopyrightText: 2015 Eric Larson +# +# SPDX-License-Identifier: Apache-2.0 +from __future__ import annotations + +import functools +import types +import weakref +import zlib +from typing import TYPE_CHECKING, Any, Collection, Mapping + +from pip._vendor.requests.adapters import HTTPAdapter + +from pip._vendor.cachecontrol.cache import DictCache +from pip._vendor.cachecontrol.controller import PERMANENT_REDIRECT_STATUSES, CacheController +from pip._vendor.cachecontrol.filewrapper import CallbackFileWrapper + +if TYPE_CHECKING: + from pip._vendor.requests import PreparedRequest, Response + from pip._vendor.urllib3 import HTTPResponse + + from pip._vendor.cachecontrol.cache import BaseCache + from pip._vendor.cachecontrol.heuristics import BaseHeuristic + from pip._vendor.cachecontrol.serialize import Serializer + + +class CacheControlAdapter(HTTPAdapter): + invalidating_methods = {"PUT", "PATCH", "DELETE"} + + def __init__( + self, + cache: BaseCache | None = None, + cache_etags: bool = True, + controller_class: type[CacheController] | None = None, + serializer: Serializer | None = None, + heuristic: BaseHeuristic | None = None, + cacheable_methods: Collection[str] | None = None, + *args: Any, + **kw: Any, + ) -> None: + super().__init__(*args, **kw) + self.cache = DictCache() if cache is None else cache + self.heuristic = heuristic + self.cacheable_methods = cacheable_methods or ("GET",) + + controller_factory = controller_class or CacheController + self.controller = controller_factory( + self.cache, cache_etags=cache_etags, serializer=serializer + ) + + def send( + self, + request: PreparedRequest, + stream: bool = False, + timeout: None | float | tuple[float, float] | tuple[float, None] = None, + verify: bool | str = True, + cert: (None | bytes | str | tuple[bytes | str, bytes | str]) = None, + proxies: Mapping[str, str] | None = None, + cacheable_methods: Collection[str] | None = None, + ) -> Response: + """ + Send a request. Use the request information to see if it + exists in the cache and cache the response if we need to and can. + """ + cacheable = cacheable_methods or self.cacheable_methods + if request.method in cacheable: + try: + cached_response = self.controller.cached_request(request) + except zlib.error: + cached_response = None + if cached_response: + return self.build_response(request, cached_response, from_cache=True) + + # check for etags and add headers if appropriate + request.headers.update(self.controller.conditional_headers(request)) + + resp = super().send(request, stream, timeout, verify, cert, proxies) + + return resp + + def build_response( # type: ignore[override] + self, + request: PreparedRequest, + response: HTTPResponse, + from_cache: bool = False, + cacheable_methods: Collection[str] | None = None, + ) -> Response: + """ + Build a response by making a request or using the cache. + + This will end up calling send and returning a potentially + cached response + """ + cacheable = cacheable_methods or self.cacheable_methods + if not from_cache and request.method in cacheable: + # Check for any heuristics that might update headers + # before trying to cache. + if self.heuristic: + response = self.heuristic.apply(response) + + # apply any expiration heuristics + if response.status == 304: + # We must have sent an ETag request. This could mean + # that we've been expired already or that we simply + # have an etag. In either case, we want to try and + # update the cache if that is the case. + cached_response = self.controller.update_cached_response( + request, response + ) + + if cached_response is not response: + from_cache = True + + # We are done with the server response, read a + # possible response body (compliant servers will + # not return one, but we cannot be 100% sure) and + # release the connection back to the pool. + response.read(decode_content=False) + response.release_conn() + + response = cached_response + + # We always cache the 301 responses + elif int(response.status) in PERMANENT_REDIRECT_STATUSES: + self.controller.cache_response(request, response) + else: + # Wrap the response file with a wrapper that will cache the + # response when the stream has been consumed. + response._fp = CallbackFileWrapper( # type: ignore[assignment] + response._fp, # type: ignore[arg-type] + functools.partial( + self.controller.cache_response, request, weakref.ref(response) + ), + ) + if response.chunked: + super_update_chunk_length = response.__class__._update_chunk_length + + def _update_chunk_length( + weak_self: weakref.ReferenceType[HTTPResponse], + ) -> None: + self = weak_self() + if self is None: + return + + super_update_chunk_length(self) + if self.chunk_left == 0: + self._fp._close() # type: ignore[union-attr] + + response._update_chunk_length = functools.partial( # type: ignore[method-assign] + _update_chunk_length, weakref.ref(response) + ) + + resp: Response = super().build_response(request, response) + + # See if we should invalidate the cache. + if request.method in self.invalidating_methods and resp.ok: + assert request.url is not None + cache_url = self.controller.cache_url(request.url) + self.cache.delete(cache_url) + + # Give the request a from_cache attr to let people use it + resp.from_cache = from_cache # type: ignore[attr-defined] + + return resp + + def close(self) -> None: + self.cache.close() + super().close() # type: ignore[no-untyped-call] diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/cache.py b/.venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/cache.py new file mode 100644 index 0000000..91598e9 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/cache.py @@ -0,0 +1,75 @@ +# SPDX-FileCopyrightText: 2015 Eric Larson +# +# SPDX-License-Identifier: Apache-2.0 + +""" +The cache object API for implementing caches. The default is a thread +safe in-memory dictionary. +""" + +from __future__ import annotations + +from threading import Lock +from typing import IO, TYPE_CHECKING, MutableMapping + +if TYPE_CHECKING: + from datetime import datetime + + +class BaseCache: + def get(self, key: str) -> bytes | None: + raise NotImplementedError() + + def set( + self, key: str, value: bytes, expires: int | datetime | None = None + ) -> None: + raise NotImplementedError() + + def delete(self, key: str) -> None: + raise NotImplementedError() + + def close(self) -> None: + pass + + +class DictCache(BaseCache): + def __init__(self, init_dict: MutableMapping[str, bytes] | None = None) -> None: + self.lock = Lock() + self.data = init_dict or {} + + def get(self, key: str) -> bytes | None: + return self.data.get(key, None) + + def set( + self, key: str, value: bytes, expires: int | datetime | None = None + ) -> None: + with self.lock: + self.data.update({key: value}) + + def delete(self, key: str) -> None: + with self.lock: + if key in self.data: + self.data.pop(key) + + +class SeparateBodyBaseCache(BaseCache): + """ + In this variant, the body is not stored mixed in with the metadata, but is + passed in (as a bytes-like object) in a separate call to ``set_body()``. + + That is, the expected interaction pattern is:: + + cache.set(key, serialized_metadata) + cache.set_body(key) + + Similarly, the body should be loaded separately via ``get_body()``. + """ + + def set_body(self, key: str, body: bytes) -> None: + raise NotImplementedError() + + def get_body(self, key: str) -> IO[bytes] | None: + """ + Return the body as file-like object. + """ + raise NotImplementedError() diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/caches/__init__.py b/.venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/caches/__init__.py new file mode 100644 index 0000000..24ff469 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/caches/__init__.py @@ -0,0 +1,8 @@ +# SPDX-FileCopyrightText: 2015 Eric Larson +# +# SPDX-License-Identifier: Apache-2.0 + +from pip._vendor.cachecontrol.caches.file_cache import FileCache, SeparateBodyFileCache +from pip._vendor.cachecontrol.caches.redis_cache import RedisCache + +__all__ = ["FileCache", "SeparateBodyFileCache", "RedisCache"] diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/caches/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/caches/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..5bd2362 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/caches/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/caches/__pycache__/file_cache.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/caches/__pycache__/file_cache.cpython-312.pyc new file mode 100644 index 0000000..5af9006 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/caches/__pycache__/file_cache.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/caches/__pycache__/redis_cache.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/caches/__pycache__/redis_cache.cpython-312.pyc new file mode 100644 index 0000000..3d41202 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/caches/__pycache__/redis_cache.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/caches/file_cache.py b/.venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/caches/file_cache.py new file mode 100644 index 0000000..45c632c --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/caches/file_cache.py @@ -0,0 +1,145 @@ +# SPDX-FileCopyrightText: 2015 Eric Larson +# +# SPDX-License-Identifier: Apache-2.0 +from __future__ import annotations + +import hashlib +import os +import tempfile +from textwrap import dedent +from typing import IO, TYPE_CHECKING +from pathlib import Path + +from pip._vendor.cachecontrol.cache import BaseCache, SeparateBodyBaseCache +from pip._vendor.cachecontrol.controller import CacheController + +if TYPE_CHECKING: + from datetime import datetime + + from filelock import BaseFileLock + + +class _FileCacheMixin: + """Shared implementation for both FileCache variants.""" + + def __init__( + self, + directory: str | Path, + forever: bool = False, + filemode: int = 0o0600, + dirmode: int = 0o0700, + lock_class: type[BaseFileLock] | None = None, + ) -> None: + try: + if lock_class is None: + from filelock import FileLock + + lock_class = FileLock + except ImportError: + notice = dedent( + """ + NOTE: In order to use the FileCache you must have + filelock installed. You can install it via pip: + pip install cachecontrol[filecache] + """ + ) + raise ImportError(notice) + + self.directory = directory + self.forever = forever + self.filemode = filemode + self.dirmode = dirmode + self.lock_class = lock_class + + @staticmethod + def encode(x: str) -> str: + return hashlib.sha224(x.encode()).hexdigest() + + def _fn(self, name: str) -> str: + # NOTE: This method should not change as some may depend on it. + # See: https://github.com/ionrock/cachecontrol/issues/63 + hashed = self.encode(name) + parts = list(hashed[:5]) + [hashed] + return os.path.join(self.directory, *parts) + + def get(self, key: str) -> bytes | None: + name = self._fn(key) + try: + with open(name, "rb") as fh: + return fh.read() + + except FileNotFoundError: + return None + + def set( + self, key: str, value: bytes, expires: int | datetime | None = None + ) -> None: + name = self._fn(key) + self._write(name, value) + + def _write(self, path: str, data: bytes) -> None: + """ + Safely write the data to the given path. + """ + # Make sure the directory exists + dirname = os.path.dirname(path) + os.makedirs(dirname, self.dirmode, exist_ok=True) + + with self.lock_class(path + ".lock"): + # Write our actual file + (fd, name) = tempfile.mkstemp(dir=dirname) + try: + os.write(fd, data) + finally: + os.close(fd) + os.chmod(name, self.filemode) + os.replace(name, path) + + def _delete(self, key: str, suffix: str) -> None: + name = self._fn(key) + suffix + if not self.forever: + try: + os.remove(name) + except FileNotFoundError: + pass + + +class FileCache(_FileCacheMixin, BaseCache): + """ + Traditional FileCache: body is stored in memory, so not suitable for large + downloads. + """ + + def delete(self, key: str) -> None: + self._delete(key, "") + + +class SeparateBodyFileCache(_FileCacheMixin, SeparateBodyBaseCache): + """ + Memory-efficient FileCache: body is stored in a separate file, reducing + peak memory usage. + """ + + def get_body(self, key: str) -> IO[bytes] | None: + name = self._fn(key) + ".body" + try: + return open(name, "rb") + except FileNotFoundError: + return None + + def set_body(self, key: str, body: bytes) -> None: + name = self._fn(key) + ".body" + self._write(name, body) + + def delete(self, key: str) -> None: + self._delete(key, "") + self._delete(key, ".body") + + +def url_to_file_path(url: str, filecache: FileCache) -> str: + """Return the file cache path based on the URL. + + This does not ensure the file exists! + """ + key = CacheController.cache_url(url) + return filecache._fn(key) diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/caches/redis_cache.py b/.venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/caches/redis_cache.py new file mode 100644 index 0000000..f4f68c4 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/caches/redis_cache.py @@ -0,0 +1,48 @@ +# SPDX-FileCopyrightText: 2015 Eric Larson +# +# SPDX-License-Identifier: Apache-2.0 +from __future__ import annotations + + +from datetime import datetime, timezone +from typing import TYPE_CHECKING + +from pip._vendor.cachecontrol.cache import BaseCache + +if TYPE_CHECKING: + from redis import Redis + + +class RedisCache(BaseCache): + def __init__(self, conn: Redis[bytes]) -> None: + self.conn = conn + + def get(self, key: str) -> bytes | None: + return self.conn.get(key) + + def set( + self, key: str, value: bytes, expires: int | datetime | None = None + ) -> None: + if not expires: + self.conn.set(key, value) + elif isinstance(expires, datetime): + now_utc = datetime.now(timezone.utc) + if expires.tzinfo is None: + now_utc = now_utc.replace(tzinfo=None) + delta = expires - now_utc + self.conn.setex(key, int(delta.total_seconds()), value) + else: + self.conn.setex(key, expires, value) + + def delete(self, key: str) -> None: + self.conn.delete(key) + + def clear(self) -> None: + """Helper for clearing all the keys in a database. Use with + caution!""" + for key in self.conn.keys(): + self.conn.delete(key) + + def close(self) -> None: + """Redis uses connection pooling, no need to close the connection.""" + pass diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/controller.py b/.venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/controller.py new file mode 100644 index 0000000..d92d991 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/controller.py @@ -0,0 +1,511 @@ +# SPDX-FileCopyrightText: 2015 Eric Larson +# +# SPDX-License-Identifier: Apache-2.0 + +""" +The httplib2 algorithms ported for use with requests. +""" + +from __future__ import annotations + +import calendar +import logging +import re +import time +import weakref +from email.utils import parsedate_tz +from typing import TYPE_CHECKING, Collection, Mapping + +from pip._vendor.requests.structures import CaseInsensitiveDict + +from pip._vendor.cachecontrol.cache import DictCache, SeparateBodyBaseCache +from pip._vendor.cachecontrol.serialize import Serializer + +if TYPE_CHECKING: + from typing import Literal + + from pip._vendor.requests import PreparedRequest + from pip._vendor.urllib3 import HTTPResponse + + from pip._vendor.cachecontrol.cache import BaseCache + +logger = logging.getLogger(__name__) + +URI = re.compile(r"^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?") + +PERMANENT_REDIRECT_STATUSES = (301, 308) + + +def parse_uri(uri: str) -> tuple[str, str, str, str, str]: + """Parses a URI using the regex given in Appendix B of RFC 3986. + + (scheme, authority, path, query, fragment) = parse_uri(uri) + """ + match = URI.match(uri) + assert match is not None + groups = match.groups() + return (groups[1], groups[3], groups[4], groups[6], groups[8]) + + +class CacheController: + """An interface to see if request should cached or not.""" + + def __init__( + self, + cache: BaseCache | None = None, + cache_etags: bool = True, + serializer: Serializer | None = None, + status_codes: Collection[int] | None = None, + ): + self.cache = DictCache() if cache is None else cache + self.cache_etags = cache_etags + self.serializer = serializer or Serializer() + self.cacheable_status_codes = status_codes or (200, 203, 300, 301, 308) + + @classmethod + def _urlnorm(cls, uri: str) -> str: + """Normalize the URL to create a safe key for the cache""" + (scheme, authority, path, query, fragment) = parse_uri(uri) + if not scheme or not authority: + raise Exception("Only absolute URIs are allowed. uri = %s" % uri) + + scheme = scheme.lower() + authority = authority.lower() + + if not path: + path = "/" + + # Could do syntax based normalization of the URI before + # computing the digest. See Section 6.2.2 of Std 66. + request_uri = query and "?".join([path, query]) or path + defrag_uri = scheme + "://" + authority + request_uri + + return defrag_uri + + @classmethod + def cache_url(cls, uri: str) -> str: + return cls._urlnorm(uri) + + def parse_cache_control(self, headers: Mapping[str, str]) -> dict[str, int | None]: + known_directives = { + # https://tools.ietf.org/html/rfc7234#section-5.2 + "max-age": (int, True), + "max-stale": (int, False), + "min-fresh": (int, True), + "no-cache": (None, False), + "no-store": (None, False), + "no-transform": (None, False), + "only-if-cached": (None, False), + "must-revalidate": (None, False), + "public": (None, False), + "private": (None, False), + "proxy-revalidate": (None, False), + "s-maxage": (int, True), + } + + cc_headers = headers.get("cache-control", headers.get("Cache-Control", "")) + + retval: dict[str, int | None] = {} + + for cc_directive in cc_headers.split(","): + if not cc_directive.strip(): + continue + + parts = cc_directive.split("=", 1) + directive = parts[0].strip() + + try: + typ, required = known_directives[directive] + except KeyError: + logger.debug("Ignoring unknown cache-control directive: %s", directive) + continue + + if not typ or not required: + retval[directive] = None + if typ: + try: + retval[directive] = typ(parts[1].strip()) + except IndexError: + if required: + logger.debug( + "Missing value for cache-control " "directive: %s", + directive, + ) + except ValueError: + logger.debug( + "Invalid value for cache-control directive " "%s, must be %s", + directive, + typ.__name__, + ) + + return retval + + def _load_from_cache(self, request: PreparedRequest) -> HTTPResponse | None: + """ + Load a cached response, or return None if it's not available. + """ + # We do not support caching of partial content: so if the request contains a + # Range header then we don't want to load anything from the cache. + if "Range" in request.headers: + return None + + cache_url = request.url + assert cache_url is not None + cache_data = self.cache.get(cache_url) + if cache_data is None: + logger.debug("No cache entry available") + return None + + if isinstance(self.cache, SeparateBodyBaseCache): + body_file = self.cache.get_body(cache_url) + else: + body_file = None + + result = self.serializer.loads(request, cache_data, body_file) + if result is None: + logger.warning("Cache entry deserialization failed, entry ignored") + return result + + def cached_request(self, request: PreparedRequest) -> HTTPResponse | Literal[False]: + """ + Return a cached response if it exists in the cache, otherwise + return False. + """ + assert request.url is not None + cache_url = self.cache_url(request.url) + logger.debug('Looking up "%s" in the cache', cache_url) + cc = self.parse_cache_control(request.headers) + + # Bail out if the request insists on fresh data + if "no-cache" in cc: + logger.debug('Request header has "no-cache", cache bypassed') + return False + + if "max-age" in cc and cc["max-age"] == 0: + logger.debug('Request header has "max_age" as 0, cache bypassed') + return False + + # Check whether we can load the response from the cache: + resp = self._load_from_cache(request) + if not resp: + return False + + # If we have a cached permanent redirect, return it immediately. We + # don't need to test our response for other headers b/c it is + # intrinsically "cacheable" as it is Permanent. + # + # See: + # https://tools.ietf.org/html/rfc7231#section-6.4.2 + # + # Client can try to refresh the value by repeating the request + # with cache busting headers as usual (ie no-cache). + if int(resp.status) in PERMANENT_REDIRECT_STATUSES: + msg = ( + "Returning cached permanent redirect response " + "(ignoring date and etag information)" + ) + logger.debug(msg) + return resp + + headers: CaseInsensitiveDict[str] = CaseInsensitiveDict(resp.headers) + if not headers or "date" not in headers: + if "etag" not in headers: + # Without date or etag, the cached response can never be used + # and should be deleted. + logger.debug("Purging cached response: no date or etag") + self.cache.delete(cache_url) + logger.debug("Ignoring cached response: no date") + return False + + now = time.time() + time_tuple = parsedate_tz(headers["date"]) + assert time_tuple is not None + date = calendar.timegm(time_tuple[:6]) + current_age = max(0, now - date) + logger.debug("Current age based on date: %i", current_age) + + # TODO: There is an assumption that the result will be a + # urllib3 response object. This may not be best since we + # could probably avoid instantiating or constructing the + # response until we know we need it. + resp_cc = self.parse_cache_control(headers) + + # determine freshness + freshness_lifetime = 0 + + # Check the max-age pragma in the cache control header + max_age = resp_cc.get("max-age") + if max_age is not None: + freshness_lifetime = max_age + logger.debug("Freshness lifetime from max-age: %i", freshness_lifetime) + + # If there isn't a max-age, check for an expires header + elif "expires" in headers: + expires = parsedate_tz(headers["expires"]) + if expires is not None: + expire_time = calendar.timegm(expires[:6]) - date + freshness_lifetime = max(0, expire_time) + logger.debug("Freshness lifetime from expires: %i", freshness_lifetime) + + # Determine if we are setting freshness limit in the + # request. Note, this overrides what was in the response. + max_age = cc.get("max-age") + if max_age is not None: + freshness_lifetime = max_age + logger.debug( + "Freshness lifetime from request max-age: %i", freshness_lifetime + ) + + min_fresh = cc.get("min-fresh") + if min_fresh is not None: + # adjust our current age by our min fresh + current_age += min_fresh + logger.debug("Adjusted current age from min-fresh: %i", current_age) + + # Return entry if it is fresh enough + if freshness_lifetime > current_age: + logger.debug('The response is "fresh", returning cached response') + logger.debug("%i > %i", freshness_lifetime, current_age) + return resp + + # we're not fresh. If we don't have an Etag, clear it out + if "etag" not in headers: + logger.debug('The cached response is "stale" with no etag, purging') + self.cache.delete(cache_url) + + # return the original handler + return False + + def conditional_headers(self, request: PreparedRequest) -> dict[str, str]: + resp = self._load_from_cache(request) + new_headers = {} + + if resp: + headers: CaseInsensitiveDict[str] = CaseInsensitiveDict(resp.headers) + + if "etag" in headers: + new_headers["If-None-Match"] = headers["ETag"] + + if "last-modified" in headers: + new_headers["If-Modified-Since"] = headers["Last-Modified"] + + return new_headers + + def _cache_set( + self, + cache_url: str, + request: PreparedRequest, + response: HTTPResponse, + body: bytes | None = None, + expires_time: int | None = None, + ) -> None: + """ + Store the data in the cache. + """ + if isinstance(self.cache, SeparateBodyBaseCache): + # We pass in the body separately; just put a placeholder empty + # string in the metadata. + self.cache.set( + cache_url, + self.serializer.dumps(request, response, b""), + expires=expires_time, + ) + # body is None can happen when, for example, we're only updating + # headers, as is the case in update_cached_response(). + if body is not None: + self.cache.set_body(cache_url, body) + else: + self.cache.set( + cache_url, + self.serializer.dumps(request, response, body), + expires=expires_time, + ) + + def cache_response( + self, + request: PreparedRequest, + response_or_ref: HTTPResponse | weakref.ReferenceType[HTTPResponse], + body: bytes | None = None, + status_codes: Collection[int] | None = None, + ) -> None: + """ + Algorithm for caching requests. + + This assumes a requests Response object. + """ + if isinstance(response_or_ref, weakref.ReferenceType): + response = response_or_ref() + if response is None: + # The weakref can be None only in case the user used streamed request + # and did not consume or close it, and holds no reference to requests.Response. + # In such case, we don't want to cache the response. + return + else: + response = response_or_ref + + # From httplib2: Don't cache 206's since we aren't going to + # handle byte range requests + cacheable_status_codes = status_codes or self.cacheable_status_codes + if response.status not in cacheable_status_codes: + logger.debug( + "Status code %s not in %s", response.status, cacheable_status_codes + ) + return + + response_headers: CaseInsensitiveDict[str] = CaseInsensitiveDict( + response.headers + ) + + if "date" in response_headers: + time_tuple = parsedate_tz(response_headers["date"]) + assert time_tuple is not None + date = calendar.timegm(time_tuple[:6]) + else: + date = 0 + + # If we've been given a body, our response has a Content-Length, that + # Content-Length is valid then we can check to see if the body we've + # been given matches the expected size, and if it doesn't we'll just + # skip trying to cache it. + if ( + body is not None + and "content-length" in response_headers + and response_headers["content-length"].isdigit() + and int(response_headers["content-length"]) != len(body) + ): + return + + cc_req = self.parse_cache_control(request.headers) + cc = self.parse_cache_control(response_headers) + + assert request.url is not None + cache_url = self.cache_url(request.url) + logger.debug('Updating cache with response from "%s"', cache_url) + + # Delete it from the cache if we happen to have it stored there + no_store = False + if "no-store" in cc: + no_store = True + logger.debug('Response header has "no-store"') + if "no-store" in cc_req: + no_store = True + logger.debug('Request header has "no-store"') + if no_store and self.cache.get(cache_url): + logger.debug('Purging existing cache entry to honor "no-store"') + self.cache.delete(cache_url) + if no_store: + return + + # https://tools.ietf.org/html/rfc7234#section-4.1: + # A Vary header field-value of "*" always fails to match. + # Storing such a response leads to a deserialization warning + # during cache lookup and is not allowed to ever be served, + # so storing it can be avoided. + if "*" in response_headers.get("vary", ""): + logger.debug('Response header has "Vary: *"') + return + + # If we've been given an etag, then keep the response + if self.cache_etags and "etag" in response_headers: + expires_time = 0 + if response_headers.get("expires"): + expires = parsedate_tz(response_headers["expires"]) + if expires is not None: + expires_time = calendar.timegm(expires[:6]) - date + + expires_time = max(expires_time, 14 * 86400) + + logger.debug(f"etag object cached for {expires_time} seconds") + logger.debug("Caching due to etag") + self._cache_set(cache_url, request, response, body, expires_time) + + # Add to the cache any permanent redirects. We do this before looking + # that the Date headers. + elif int(response.status) in PERMANENT_REDIRECT_STATUSES: + logger.debug("Caching permanent redirect") + self._cache_set(cache_url, request, response, b"") + + # Add to the cache if the response headers demand it. If there + # is no date header then we can't do anything about expiring + # the cache. + elif "date" in response_headers: + time_tuple = parsedate_tz(response_headers["date"]) + assert time_tuple is not None + date = calendar.timegm(time_tuple[:6]) + # cache when there is a max-age > 0 + max_age = cc.get("max-age") + if max_age is not None and max_age > 0: + logger.debug("Caching b/c date exists and max-age > 0") + expires_time = max_age + self._cache_set( + cache_url, + request, + response, + body, + expires_time, + ) + + # If the request can expire, it means we should cache it + # in the meantime. + elif "expires" in response_headers: + if response_headers["expires"]: + expires = parsedate_tz(response_headers["expires"]) + if expires is not None: + expires_time = calendar.timegm(expires[:6]) - date + else: + expires_time = None + + logger.debug( + "Caching b/c of expires header. expires in {} seconds".format( + expires_time + ) + ) + self._cache_set( + cache_url, + request, + response, + body, + expires_time, + ) + + def update_cached_response( + self, request: PreparedRequest, response: HTTPResponse + ) -> HTTPResponse: + """On a 304 we will get a new set of headers that we want to + update our cached value with, assuming we have one. + + This should only ever be called when we've sent an ETag and + gotten a 304 as the response. + """ + assert request.url is not None + cache_url = self.cache_url(request.url) + cached_response = self._load_from_cache(request) + + if not cached_response: + # we didn't have a cached response + return response + + # Lets update our headers with the headers from the new request: + # http://tools.ietf.org/html/draft-ietf-httpbis-p4-conditional-26#section-4.1 + # + # The server isn't supposed to send headers that would make + # the cached body invalid. But... just in case, we'll be sure + # to strip out ones we know that might be problmatic due to + # typical assumptions. + excluded_headers = ["content-length"] + + cached_response.headers.update( + { + k: v + for k, v in response.headers.items() + if k.lower() not in excluded_headers + } + ) + + # we want a 200 b/c we have content via the cache + cached_response.status = 200 + + # update our cache + self._cache_set(cache_url, request, cached_response) + + return cached_response diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/filewrapper.py b/.venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/filewrapper.py new file mode 100644 index 0000000..37d2fa5 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/filewrapper.py @@ -0,0 +1,119 @@ +# SPDX-FileCopyrightText: 2015 Eric Larson +# +# SPDX-License-Identifier: Apache-2.0 +from __future__ import annotations + +import mmap +from tempfile import NamedTemporaryFile +from typing import TYPE_CHECKING, Any, Callable + +if TYPE_CHECKING: + from http.client import HTTPResponse + + +class CallbackFileWrapper: + """ + Small wrapper around a fp object which will tee everything read into a + buffer, and when that file is closed it will execute a callback with the + contents of that buffer. + + All attributes are proxied to the underlying file object. + + This class uses members with a double underscore (__) leading prefix so as + not to accidentally shadow an attribute. + + The data is stored in a temporary file until it is all available. As long + as the temporary files directory is disk-based (sometimes it's a + memory-backed-``tmpfs`` on Linux), data will be unloaded to disk if memory + pressure is high. For small files the disk usually won't be used at all, + it'll all be in the filesystem memory cache, so there should be no + performance impact. + """ + + def __init__( + self, fp: HTTPResponse, callback: Callable[[bytes], None] | None + ) -> None: + self.__buf = NamedTemporaryFile("rb+", delete=True) + self.__fp = fp + self.__callback = callback + + def __getattr__(self, name: str) -> Any: + # The vagaries of garbage collection means that self.__fp is + # not always set. By using __getattribute__ and the private + # name[0] allows looking up the attribute value and raising an + # AttributeError when it doesn't exist. This stop things from + # infinitely recursing calls to getattr in the case where + # self.__fp hasn't been set. + # + # [0] https://docs.python.org/2/reference/expressions.html#atom-identifiers + fp = self.__getattribute__("_CallbackFileWrapper__fp") + return getattr(fp, name) + + def __is_fp_closed(self) -> bool: + try: + return self.__fp.fp is None + + except AttributeError: + pass + + try: + closed: bool = self.__fp.closed + return closed + + except AttributeError: + pass + + # We just don't cache it then. + # TODO: Add some logging here... + return False + + def _close(self) -> None: + if self.__callback: + if self.__buf.tell() == 0: + # Empty file: + result = b"" + else: + # Return the data without actually loading it into memory, + # relying on Python's buffer API and mmap(). mmap() just gives + # a view directly into the filesystem's memory cache, so it + # doesn't result in duplicate memory use. + self.__buf.seek(0, 0) + result = memoryview( + mmap.mmap(self.__buf.fileno(), 0, access=mmap.ACCESS_READ) + ) + self.__callback(result) + + # We assign this to None here, because otherwise we can get into + # really tricky problems where the CPython interpreter dead locks + # because the callback is holding a reference to something which + # has a __del__ method. Setting this to None breaks the cycle + # and allows the garbage collector to do it's thing normally. + self.__callback = None + + # Closing the temporary file releases memory and frees disk space. + # Important when caching big files. + self.__buf.close() + + def read(self, amt: int | None = None) -> bytes: + data: bytes = self.__fp.read(amt) + if data: + # We may be dealing with b'', a sign that things are over: + # it's passed e.g. after we've already closed self.__buf. + self.__buf.write(data) + if self.__is_fp_closed(): + self._close() + + return data + + def _safe_read(self, amt: int) -> bytes: + data: bytes = self.__fp._safe_read(amt) # type: ignore[attr-defined] + if amt == 2 and data == b"\r\n": + # urllib executes this read to toss the CRLF at the end + # of the chunk. + return data + + self.__buf.write(data) + if self.__is_fp_closed(): + self._close() + + return data diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/heuristics.py b/.venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/heuristics.py new file mode 100644 index 0000000..b778c4f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/heuristics.py @@ -0,0 +1,157 @@ +# SPDX-FileCopyrightText: 2015 Eric Larson +# +# SPDX-License-Identifier: Apache-2.0 +from __future__ import annotations + +import calendar +import time +from datetime import datetime, timedelta, timezone +from email.utils import formatdate, parsedate, parsedate_tz +from typing import TYPE_CHECKING, Any, Mapping + +if TYPE_CHECKING: + from pip._vendor.urllib3 import HTTPResponse + +TIME_FMT = "%a, %d %b %Y %H:%M:%S GMT" + + +def expire_after(delta: timedelta, date: datetime | None = None) -> datetime: + date = date or datetime.now(timezone.utc) + return date + delta + + +def datetime_to_header(dt: datetime) -> str: + return formatdate(calendar.timegm(dt.timetuple())) + + +class BaseHeuristic: + def warning(self, response: HTTPResponse) -> str | None: + """ + Return a valid 1xx warning header value describing the cache + adjustments. + + The response is provided too allow warnings like 113 + http://tools.ietf.org/html/rfc7234#section-5.5.4 where we need + to explicitly say response is over 24 hours old. + """ + return '110 - "Response is Stale"' + + def update_headers(self, response: HTTPResponse) -> dict[str, str]: + """Update the response headers with any new headers. + + NOTE: This SHOULD always include some Warning header to + signify that the response was cached by the client, not + by way of the provided headers. + """ + return {} + + def apply(self, response: HTTPResponse) -> HTTPResponse: + updated_headers = self.update_headers(response) + + if updated_headers: + response.headers.update(updated_headers) + warning_header_value = self.warning(response) + if warning_header_value is not None: + response.headers.update({"Warning": warning_header_value}) + + return response + + +class OneDayCache(BaseHeuristic): + """ + Cache the response by providing an expires 1 day in the + future. + """ + + def update_headers(self, response: HTTPResponse) -> dict[str, str]: + headers = {} + + if "expires" not in response.headers: + date = parsedate(response.headers["date"]) + expires = expire_after( + timedelta(days=1), + date=datetime(*date[:6], tzinfo=timezone.utc), # type: ignore[index,misc] + ) + headers["expires"] = datetime_to_header(expires) + headers["cache-control"] = "public" + return headers + + +class ExpiresAfter(BaseHeuristic): + """ + Cache **all** requests for a defined time period. + """ + + def __init__(self, **kw: Any) -> None: + self.delta = timedelta(**kw) + + def update_headers(self, response: HTTPResponse) -> dict[str, str]: + expires = expire_after(self.delta) + return {"expires": datetime_to_header(expires), "cache-control": "public"} + + def warning(self, response: HTTPResponse) -> str | None: + tmpl = "110 - Automatically cached for %s. Response might be stale" + return tmpl % self.delta + + +class LastModified(BaseHeuristic): + """ + If there is no Expires header already, fall back on Last-Modified + using the heuristic from + http://tools.ietf.org/html/rfc7234#section-4.2.2 + to calculate a reasonable value. + + Firefox also does something like this per + https://developer.mozilla.org/en-US/docs/Web/HTTP/Caching_FAQ + http://lxr.mozilla.org/mozilla-release/source/netwerk/protocol/http/nsHttpResponseHead.cpp#397 + Unlike mozilla we limit this to 24-hr. + """ + + cacheable_by_default_statuses = { + 200, + 203, + 204, + 206, + 300, + 301, + 404, + 405, + 410, + 414, + 501, + } + + def update_headers(self, resp: HTTPResponse) -> dict[str, str]: + headers: Mapping[str, str] = resp.headers + + if "expires" in headers: + return {} + + if "cache-control" in headers and headers["cache-control"] != "public": + return {} + + if resp.status not in self.cacheable_by_default_statuses: + return {} + + if "date" not in headers or "last-modified" not in headers: + return {} + + time_tuple = parsedate_tz(headers["date"]) + assert time_tuple is not None + date = calendar.timegm(time_tuple[:6]) + last_modified = parsedate(headers["last-modified"]) + if last_modified is None: + return {} + + now = time.time() + current_age = max(0, now - date) + delta = date - calendar.timegm(last_modified) + freshness_lifetime = max(0, min(delta / 10, 24 * 3600)) + if freshness_lifetime <= current_age: + return {} + + expires = date + freshness_lifetime + return {"expires": time.strftime(TIME_FMT, time.gmtime(expires))} + + def warning(self, resp: HTTPResponse) -> str | None: + return None diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/py.typed b/.venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/py.typed new file mode 100644 index 0000000..e69de29 diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/serialize.py b/.venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/serialize.py new file mode 100644 index 0000000..a49487a --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/serialize.py @@ -0,0 +1,146 @@ +# SPDX-FileCopyrightText: 2015 Eric Larson +# +# SPDX-License-Identifier: Apache-2.0 +from __future__ import annotations + +import io +from typing import IO, TYPE_CHECKING, Any, Mapping, cast + +from pip._vendor import msgpack +from pip._vendor.requests.structures import CaseInsensitiveDict +from pip._vendor.urllib3 import HTTPResponse + +if TYPE_CHECKING: + from pip._vendor.requests import PreparedRequest + + +class Serializer: + serde_version = "4" + + def dumps( + self, + request: PreparedRequest, + response: HTTPResponse, + body: bytes | None = None, + ) -> bytes: + response_headers: CaseInsensitiveDict[str] = CaseInsensitiveDict( + response.headers + ) + + if body is None: + # When a body isn't passed in, we'll read the response. We + # also update the response with a new file handler to be + # sure it acts as though it was never read. + body = response.read(decode_content=False) + response._fp = io.BytesIO(body) # type: ignore[assignment] + response.length_remaining = len(body) + + data = { + "response": { + "body": body, # Empty bytestring if body is stored separately + "headers": {str(k): str(v) for k, v in response.headers.items()}, + "status": response.status, + "version": response.version, + "reason": str(response.reason), + "decode_content": response.decode_content, + } + } + + # Construct our vary headers + data["vary"] = {} + if "vary" in response_headers: + varied_headers = response_headers["vary"].split(",") + for header in varied_headers: + header = str(header).strip() + header_value = request.headers.get(header, None) + if header_value is not None: + header_value = str(header_value) + data["vary"][header] = header_value + + return b",".join([f"cc={self.serde_version}".encode(), self.serialize(data)]) + + def serialize(self, data: dict[str, Any]) -> bytes: + return cast(bytes, msgpack.dumps(data, use_bin_type=True)) + + def loads( + self, + request: PreparedRequest, + data: bytes, + body_file: IO[bytes] | None = None, + ) -> HTTPResponse | None: + # Short circuit if we've been given an empty set of data + if not data: + return None + + # Previous versions of this library supported other serialization + # formats, but these have all been removed. + if not data.startswith(f"cc={self.serde_version},".encode()): + return None + + data = data[5:] + return self._loads_v4(request, data, body_file) + + def prepare_response( + self, + request: PreparedRequest, + cached: Mapping[str, Any], + body_file: IO[bytes] | None = None, + ) -> HTTPResponse | None: + """Verify our vary headers match and construct a real urllib3 + HTTPResponse object. + """ + # Special case the '*' Vary value as it means we cannot actually + # determine if the cached response is suitable for this request. + # This case is also handled in the controller code when creating + # a cache entry, but is left here for backwards compatibility. + if "*" in cached.get("vary", {}): + return None + + # Ensure that the Vary headers for the cached response match our + # request + for header, value in cached.get("vary", {}).items(): + if request.headers.get(header, None) != value: + return None + + body_raw = cached["response"].pop("body") + + headers: CaseInsensitiveDict[str] = CaseInsensitiveDict( + data=cached["response"]["headers"] + ) + if headers.get("transfer-encoding", "") == "chunked": + headers.pop("transfer-encoding") + + cached["response"]["headers"] = headers + + try: + body: IO[bytes] + if body_file is None: + body = io.BytesIO(body_raw) + else: + body = body_file + except TypeError: + # This can happen if cachecontrol serialized to v1 format (pickle) + # using Python 2. A Python 2 str(byte string) will be unpickled as + # a Python 3 str (unicode string), which will cause the above to + # fail with: + # + # TypeError: 'str' does not support the buffer interface + body = io.BytesIO(body_raw.encode("utf8")) + + # Discard any `strict` parameter serialized by older version of cachecontrol. + cached["response"].pop("strict", None) + + return HTTPResponse(body=body, preload_content=False, **cached["response"]) + + def _loads_v4( + self, + request: PreparedRequest, + data: bytes, + body_file: IO[bytes] | None = None, + ) -> HTTPResponse | None: + try: + cached = msgpack.loads(data, raw=False) + except ValueError: + return None + + return self.prepare_response(request, cached, body_file) diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/wrapper.py b/.venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/wrapper.py new file mode 100644 index 0000000..f618bc3 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/cachecontrol/wrapper.py @@ -0,0 +1,43 @@ +# SPDX-FileCopyrightText: 2015 Eric Larson +# +# SPDX-License-Identifier: Apache-2.0 +from __future__ import annotations + +from typing import TYPE_CHECKING, Collection + +from pip._vendor.cachecontrol.adapter import CacheControlAdapter +from pip._vendor.cachecontrol.cache import DictCache + +if TYPE_CHECKING: + from pip._vendor import requests + + from pip._vendor.cachecontrol.cache import BaseCache + from pip._vendor.cachecontrol.controller import CacheController + from pip._vendor.cachecontrol.heuristics import BaseHeuristic + from pip._vendor.cachecontrol.serialize import Serializer + + +def CacheControl( + sess: requests.Session, + cache: BaseCache | None = None, + cache_etags: bool = True, + serializer: Serializer | None = None, + heuristic: BaseHeuristic | None = None, + controller_class: type[CacheController] | None = None, + adapter_class: type[CacheControlAdapter] | None = None, + cacheable_methods: Collection[str] | None = None, +) -> requests.Session: + cache = DictCache() if cache is None else cache + adapter_class = adapter_class or CacheControlAdapter + adapter = adapter_class( + cache, + cache_etags=cache_etags, + serializer=serializer, + heuristic=heuristic, + controller_class=controller_class, + cacheable_methods=cacheable_methods, + ) + sess.mount("http://", adapter) + sess.mount("https://", adapter) + + return sess diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/certifi/__init__.py b/.venv/lib/python3.12/site-packages/pip/_vendor/certifi/__init__.py new file mode 100644 index 0000000..e837049 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/certifi/__init__.py @@ -0,0 +1,4 @@ +from .core import contents, where + +__all__ = ["contents", "where"] +__version__ = "2025.07.14" diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/certifi/__main__.py b/.venv/lib/python3.12/site-packages/pip/_vendor/certifi/__main__.py new file mode 100644 index 0000000..0037634 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/certifi/__main__.py @@ -0,0 +1,12 @@ +import argparse + +from pip._vendor.certifi import contents, where + +parser = argparse.ArgumentParser() +parser.add_argument("-c", "--contents", action="store_true") +args = parser.parse_args() + +if args.contents: + print(contents()) +else: + print(where()) diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/certifi/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/certifi/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..b3d2c6f Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/certifi/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/certifi/__pycache__/__main__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/certifi/__pycache__/__main__.cpython-312.pyc new file mode 100644 index 0000000..2c86682 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/certifi/__pycache__/__main__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/certifi/__pycache__/core.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/certifi/__pycache__/core.cpython-312.pyc new file mode 100644 index 0000000..1aa30de Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/certifi/__pycache__/core.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/certifi/cacert.pem b/.venv/lib/python3.12/site-packages/pip/_vendor/certifi/cacert.pem new file mode 100644 index 0000000..64c05d7 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/certifi/cacert.pem @@ -0,0 +1,4778 @@ + +# Issuer: CN=Entrust Root Certification Authority O=Entrust, Inc. OU=www.entrust.net/CPS is incorporated by reference/(c) 2006 Entrust, Inc. +# Subject: CN=Entrust Root Certification Authority O=Entrust, Inc. OU=www.entrust.net/CPS is incorporated by reference/(c) 2006 Entrust, Inc. +# Label: "Entrust Root Certification Authority" +# Serial: 1164660820 +# MD5 Fingerprint: d6:a5:c3:ed:5d:dd:3e:00:c1:3d:87:92:1f:1d:3f:e4 +# SHA1 Fingerprint: b3:1e:b1:b7:40:e3:6c:84:02:da:dc:37:d4:4d:f5:d4:67:49:52:f9 +# SHA256 Fingerprint: 73:c1:76:43:4f:1b:c6:d5:ad:f4:5b:0e:76:e7:27:28:7c:8d:e5:76:16:c1:e6:e6:14:1a:2b:2c:bc:7d:8e:4c +-----BEGIN CERTIFICATE----- +MIIEkTCCA3mgAwIBAgIERWtQVDANBgkqhkiG9w0BAQUFADCBsDELMAkGA1UEBhMC +VVMxFjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xOTA3BgNVBAsTMHd3dy5lbnRydXN0 +Lm5ldC9DUFMgaXMgaW5jb3Jwb3JhdGVkIGJ5IHJlZmVyZW5jZTEfMB0GA1UECxMW +KGMpIDIwMDYgRW50cnVzdCwgSW5jLjEtMCsGA1UEAxMkRW50cnVzdCBSb290IENl +cnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTA2MTEyNzIwMjM0MloXDTI2MTEyNzIw +NTM0MlowgbAxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1FbnRydXN0LCBJbmMuMTkw +NwYDVQQLEzB3d3cuZW50cnVzdC5uZXQvQ1BTIGlzIGluY29ycG9yYXRlZCBieSBy +ZWZlcmVuY2UxHzAdBgNVBAsTFihjKSAyMDA2IEVudHJ1c3QsIEluYy4xLTArBgNV +BAMTJEVudHJ1c3QgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASIwDQYJ +KoZIhvcNAQEBBQADggEPADCCAQoCggEBALaVtkNC+sZtKm9I35RMOVcF7sN5EUFo +Nu3s/poBj6E4KPz3EEZmLk0eGrEaTsbRwJWIsMn/MYszA9u3g3s+IIRe7bJWKKf4 +4LlAcTfFy0cOlypowCKVYhXbR9n10Cv/gkvJrT7eTNuQgFA/CYqEAOwwCj0Yzfv9 +KlmaI5UXLEWeH25DeW0MXJj+SKfFI0dcXv1u5x609mhF0YaDW6KKjbHjKYD+JXGI +rb68j6xSlkuqUY3kEzEZ6E5Nn9uss2rVvDlUccp6en+Q3X0dgNmBu1kmwhH+5pPi +94DkZfs0Nw4pgHBNrziGLp5/V6+eF67rHMsoIV+2HNjnogQi+dPa2MsCAwEAAaOB +sDCBrTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zArBgNVHRAEJDAi +gA8yMDA2MTEyNzIwMjM0MlqBDzIwMjYxMTI3MjA1MzQyWjAfBgNVHSMEGDAWgBRo +kORnpKZTgMeGZqTx90tD+4S9bTAdBgNVHQ4EFgQUaJDkZ6SmU4DHhmak8fdLQ/uE +vW0wHQYJKoZIhvZ9B0EABBAwDhsIVjcuMTo0LjADAgSQMA0GCSqGSIb3DQEBBQUA +A4IBAQCT1DCw1wMgKtD5Y+iRDAUgqV8ZyntyTtSx29CW+1RaGSwMCPeyvIWonX9t +O1KzKtvn1ISMY/YPyyYBkVBs9F8U4pN0wBOeMDpQ47RgxRzwIkSNcUesyBrJ6Zua +AGAT/3B+XxFNSRuzFVJ7yVTav52Vr2ua2J7p8eRDjeIRRDq/r72DQnNSi6q7pynP +9WQcCk3RvKqsnyrQ/39/2n3qse0wJcGE2jTSW3iDVuycNsMm4hH2Z0kdkquM++v/ +eu6FSqdQgPCnXEqULl8FmTxSQeDNtGPPAUO6nIPcj2A781q0tHuu2guQOHXvgR1m +0vdXcDazv/wor3ElhVsT/h5/WrQ8 +-----END CERTIFICATE----- + +# Issuer: CN=QuoVadis Root CA 2 O=QuoVadis Limited +# Subject: CN=QuoVadis Root CA 2 O=QuoVadis Limited +# Label: "QuoVadis Root CA 2" +# Serial: 1289 +# MD5 Fingerprint: 5e:39:7b:dd:f8:ba:ec:82:e9:ac:62:ba:0c:54:00:2b +# SHA1 Fingerprint: ca:3a:fb:cf:12:40:36:4b:44:b2:16:20:88:80:48:39:19:93:7c:f7 +# SHA256 Fingerprint: 85:a0:dd:7d:d7:20:ad:b7:ff:05:f8:3d:54:2b:20:9d:c7:ff:45:28:f7:d6:77:b1:83:89:fe:a5:e5:c4:9e:86 +-----BEGIN CERTIFICATE----- +MIIFtzCCA5+gAwIBAgICBQkwDQYJKoZIhvcNAQEFBQAwRTELMAkGA1UEBhMCQk0x +GTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMTElF1b1ZhZGlzIFJv +b3QgQ0EgMjAeFw0wNjExMjQxODI3MDBaFw0zMTExMjQxODIzMzNaMEUxCzAJBgNV +BAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMRswGQYDVQQDExJRdW9W +YWRpcyBSb290IENBIDIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCa +GMpLlA0ALa8DKYrwD4HIrkwZhR0In6spRIXzL4GtMh6QRr+jhiYaHv5+HBg6XJxg +Fyo6dIMzMH1hVBHL7avg5tKifvVrbxi3Cgst/ek+7wrGsxDp3MJGF/hd/aTa/55J +WpzmM+Yklvc/ulsrHHo1wtZn/qtmUIttKGAr79dgw8eTvI02kfN/+NsRE8Scd3bB +rrcCaoF6qUWD4gXmuVbBlDePSHFjIuwXZQeVikvfj8ZaCuWw419eaxGrDPmF60Tp ++ARz8un+XJiM9XOva7R+zdRcAitMOeGylZUtQofX1bOQQ7dsE/He3fbE+Ik/0XX1 +ksOR1YqI0JDs3G3eicJlcZaLDQP9nL9bFqyS2+r+eXyt66/3FsvbzSUr5R/7mp/i +Ucw6UwxI5g69ybR2BlLmEROFcmMDBOAENisgGQLodKcftslWZvB1JdxnwQ5hYIiz +PtGo/KPaHbDRsSNU30R2be1B2MGyIrZTHN81Hdyhdyox5C315eXbyOD/5YDXC2Og +/zOhD7osFRXql7PSorW+8oyWHhqPHWykYTe5hnMz15eWniN9gqRMgeKh0bpnX5UH +oycR7hYQe7xFSkyyBNKr79X9DFHOUGoIMfmR2gyPZFwDwzqLID9ujWc9Otb+fVuI +yV77zGHcizN300QyNQliBJIWENieJ0f7OyHj+OsdWwIDAQABo4GwMIGtMA8GA1Ud +EwEB/wQFMAMBAf8wCwYDVR0PBAQDAgEGMB0GA1UdDgQWBBQahGK8SEwzJQTU7tD2 +A8QZRtGUazBuBgNVHSMEZzBlgBQahGK8SEwzJQTU7tD2A8QZRtGUa6FJpEcwRTEL +MAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMT +ElF1b1ZhZGlzIFJvb3QgQ0EgMoICBQkwDQYJKoZIhvcNAQEFBQADggIBAD4KFk2f +BluornFdLwUvZ+YTRYPENvbzwCYMDbVHZF34tHLJRqUDGCdViXh9duqWNIAXINzn +g/iN/Ae42l9NLmeyhP3ZRPx3UIHmfLTJDQtyU/h2BwdBR5YM++CCJpNVjP4iH2Bl +fF/nJrP3MpCYUNQ3cVX2kiF495V5+vgtJodmVjB3pjd4M1IQWK4/YY7yarHvGH5K +WWPKjaJW1acvvFYfzznB4vsKqBUsfU16Y8Zsl0Q80m/DShcK+JDSV6IZUaUtl0Ha +B0+pUNqQjZRG4T7wlP0QADj1O+hA4bRuVhogzG9Yje0uRY/W6ZM/57Es3zrWIozc +hLsib9D45MY56QSIPMO661V6bYCZJPVsAfv4l7CUW+v90m/xd2gNNWQjrLhVoQPR +TUIZ3Ph1WVaj+ahJefivDrkRoHy3au000LYmYjgahwz46P0u05B/B5EqHdZ+XIWD +mbA4CD/pXvk1B+TJYm5Xf6dQlfe6yJvmjqIBxdZmv3lh8zwc4bmCXF2gw+nYSL0Z +ohEUGW6yhhtoPkg3Goi3XZZenMfvJ2II4pEZXNLxId26F0KCl3GBUzGpn/Z9Yr9y +4aOTHcyKJloJONDO1w2AFrR4pTqHTI2KpdVGl/IsELm8VCLAAVBpQ570su9t+Oza +8eOx79+Rj1QqCyXBJhnEUhAFZdWCEOrCMc0u +-----END CERTIFICATE----- + +# Issuer: CN=QuoVadis Root CA 3 O=QuoVadis Limited +# Subject: CN=QuoVadis Root CA 3 O=QuoVadis Limited +# Label: "QuoVadis Root CA 3" +# Serial: 1478 +# MD5 Fingerprint: 31:85:3c:62:94:97:63:b9:aa:fd:89:4e:af:6f:e0:cf +# SHA1 Fingerprint: 1f:49:14:f7:d8:74:95:1d:dd:ae:02:c0:be:fd:3a:2d:82:75:51:85 +# SHA256 Fingerprint: 18:f1:fc:7f:20:5d:f8:ad:dd:eb:7f:e0:07:dd:57:e3:af:37:5a:9c:4d:8d:73:54:6b:f4:f1:fe:d1:e1:8d:35 +-----BEGIN CERTIFICATE----- +MIIGnTCCBIWgAwIBAgICBcYwDQYJKoZIhvcNAQEFBQAwRTELMAkGA1UEBhMCQk0x +GTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMTElF1b1ZhZGlzIFJv +b3QgQ0EgMzAeFw0wNjExMjQxOTExMjNaFw0zMTExMjQxOTA2NDRaMEUxCzAJBgNV +BAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMRswGQYDVQQDExJRdW9W +YWRpcyBSb290IENBIDMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDM +V0IWVJzmmNPTTe7+7cefQzlKZbPoFog02w1ZkXTPkrgEQK0CSzGrvI2RaNggDhoB +4hp7Thdd4oq3P5kazethq8Jlph+3t723j/z9cI8LoGe+AaJZz3HmDyl2/7FWeUUr +H556VOijKTVopAFPD6QuN+8bv+OPEKhyq1hX51SGyMnzW9os2l2ObjyjPtr7guXd +8lyyBTNvijbO0BNO/79KDDRMpsMhvVAEVeuxu537RR5kFd5VAYwCdrXLoT9Cabwv +vWhDFlaJKjdhkf2mrk7AyxRllDdLkgbvBNDInIjbC3uBr7E9KsRlOni27tyAsdLT +mZw67mtaa7ONt9XOnMK+pUsvFrGeaDsGb659n/je7Mwpp5ijJUMv7/FfJuGITfhe +btfZFG4ZM2mnO4SJk8RTVROhUXhA+LjJou57ulJCg54U7QVSWllWp5f8nT8KKdjc +T5EOE7zelaTfi5m+rJsziO+1ga8bxiJTyPbH7pcUsMV8eFLI8M5ud2CEpukqdiDt +WAEXMJPpGovgc2PZapKUSU60rUqFxKMiMPwJ7Wgic6aIDFUhWMXhOp8q3crhkODZ +c6tsgLjoC2SToJyMGf+z0gzskSaHirOi4XCPLArlzW1oUevaPwV/izLmE1xr/l9A +4iLItLRkT9a6fUg+qGkM17uGcclzuD87nSVL2v9A6wIDAQABo4IBlTCCAZEwDwYD +VR0TAQH/BAUwAwEB/zCB4QYDVR0gBIHZMIHWMIHTBgkrBgEEAb5YAAMwgcUwgZMG +CCsGAQUFBwICMIGGGoGDQW55IHVzZSBvZiB0aGlzIENlcnRpZmljYXRlIGNvbnN0 +aXR1dGVzIGFjY2VwdGFuY2Ugb2YgdGhlIFF1b1ZhZGlzIFJvb3QgQ0EgMyBDZXJ0 +aWZpY2F0ZSBQb2xpY3kgLyBDZXJ0aWZpY2F0aW9uIFByYWN0aWNlIFN0YXRlbWVu +dC4wLQYIKwYBBQUHAgEWIWh0dHA6Ly93d3cucXVvdmFkaXNnbG9iYWwuY29tL2Nw +czALBgNVHQ8EBAMCAQYwHQYDVR0OBBYEFPLAE+CCQz777i9nMpY1XNu4ywLQMG4G +A1UdIwRnMGWAFPLAE+CCQz777i9nMpY1XNu4ywLQoUmkRzBFMQswCQYDVQQGEwJC +TTEZMBcGA1UEChMQUXVvVmFkaXMgTGltaXRlZDEbMBkGA1UEAxMSUXVvVmFkaXMg +Um9vdCBDQSAzggIFxjANBgkqhkiG9w0BAQUFAAOCAgEAT62gLEz6wPJv92ZVqyM0 +7ucp2sNbtrCD2dDQ4iH782CnO11gUyeim/YIIirnv6By5ZwkajGxkHon24QRiSem +d1o417+shvzuXYO8BsbRd2sPbSQvS3pspweWyuOEn62Iix2rFo1bZhfZFvSLgNLd ++LJ2w/w4E6oM3kJpK27zPOuAJ9v1pkQNn1pVWQvVDVJIxa6f8i+AxeoyUDUSly7B +4f/xI4hROJ/yZlZ25w9Rl6VSDE1JUZU2Pb+iSwwQHYaZTKrzchGT5Or2m9qoXadN +t54CrnMAyNojA+j56hl0YgCUyyIgvpSnWbWCar6ZeXqp8kokUvd0/bpO5qgdAm6x +DYBEwa7TIzdfu4V8K5Iu6H6li92Z4b8nby1dqnuH/grdS/yO9SbkbnBCbjPsMZ57 +k8HkyWkaPcBrTiJt7qtYTcbQQcEr6k8Sh17rRdhs9ZgC06DYVYoGmRmioHfRMJ6s +zHXug/WwYjnPbFfiTNKRCw51KBuav/0aQ/HKd/s7j2G4aSgWQgRecCocIdiP4b0j +Wy10QJLZYxkNc91pvGJHvOB0K7Lrfb5BG7XARsWhIstfTsEokt4YutUqKLsRixeT +mJlglFwjz1onl14LBQaTNx47aTbrqZ5hHY8y2o4M1nQ+ewkk2gF3R8Q7zTSMmfXK +4SVhM7JZG+Ju1zdXtg2pEto= +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert Assured ID Root CA O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert Assured ID Root CA O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert Assured ID Root CA" +# Serial: 17154717934120587862167794914071425081 +# MD5 Fingerprint: 87:ce:0b:7b:2a:0e:49:00:e1:58:71:9b:37:a8:93:72 +# SHA1 Fingerprint: 05:63:b8:63:0d:62:d7:5a:bb:c8:ab:1e:4b:df:b5:a8:99:b2:4d:43 +# SHA256 Fingerprint: 3e:90:99:b5:01:5e:8f:48:6c:00:bc:ea:9d:11:1e:e7:21:fa:ba:35:5a:89:bc:f1:df:69:56:1e:3d:c6:32:5c +-----BEGIN CERTIFICATE----- +MIIDtzCCAp+gAwIBAgIQDOfg5RfYRv6P5WD8G/AwOTANBgkqhkiG9w0BAQUFADBl +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJv +b3QgQ0EwHhcNMDYxMTEwMDAwMDAwWhcNMzExMTEwMDAwMDAwWjBlMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNl +cnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgQ0EwggEi +MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCtDhXO5EOAXLGH87dg+XESpa7c +JpSIqvTO9SA5KFhgDPiA2qkVlTJhPLWxKISKityfCgyDF3qPkKyK53lTXDGEKvYP +mDI2dsze3Tyoou9q+yHyUmHfnyDXH+Kx2f4YZNISW1/5WBg1vEfNoTb5a3/UsDg+ +wRvDjDPZ2C8Y/igPs6eD1sNuRMBhNZYW/lmci3Zt1/GiSw0r/wty2p5g0I6QNcZ4 +VYcgoc/lbQrISXwxmDNsIumH0DJaoroTghHtORedmTpyoeb6pNnVFzF1roV9Iq4/ +AUaG9ih5yLHa5FcXxH4cDrC0kqZWs72yl+2qp/C3xag/lRbQ/6GW6whfGHdPAgMB +AAGjYzBhMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW +BBRF66Kv9JLLgjEtUYunpyGd823IDzAfBgNVHSMEGDAWgBRF66Kv9JLLgjEtUYun +pyGd823IDzANBgkqhkiG9w0BAQUFAAOCAQEAog683+Lt8ONyc3pklL/3cmbYMuRC +dWKuh+vy1dneVrOfzM4UKLkNl2BcEkxY5NM9g0lFWJc1aRqoR+pWxnmrEthngYTf +fwk8lOa4JiwgvT2zKIn3X/8i4peEH+ll74fg38FnSbNd67IJKusm7Xi+fT8r87cm +NW1fiQG2SVufAQWbqz0lwcy2f8Lxb4bG+mRo64EtlOtCt/qMHt1i8b5QZ7dsvfPx +H2sMNgcWfzd8qVttevESRmCD1ycEvkvOl77DZypoEd+A5wwzZr8TDRRu838fYxAe ++o0bJW1sj6W3YQGx0qMmoRBxna3iw/nDmVG3KwcIzi7mULKn+gpFL6Lw8g== +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert Global Root CA O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert Global Root CA O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert Global Root CA" +# Serial: 10944719598952040374951832963794454346 +# MD5 Fingerprint: 79:e4:a9:84:0d:7d:3a:96:d7:c0:4f:e2:43:4c:89:2e +# SHA1 Fingerprint: a8:98:5d:3a:65:e5:e5:c4:b2:d7:d6:6d:40:c6:dd:2f:b1:9c:54:36 +# SHA256 Fingerprint: 43:48:a0:e9:44:4c:78:cb:26:5e:05:8d:5e:89:44:b4:d8:4f:96:62:bd:26:db:25:7f:89:34:a4:43:c7:01:61 +-----BEGIN CERTIFICATE----- +MIIDrzCCApegAwIBAgIQCDvgVpBCRrGhdWrJWZHHSjANBgkqhkiG9w0BAQUFADBh +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBD +QTAeFw0wNjExMTAwMDAwMDBaFw0zMTExMTAwMDAwMDBaMGExCzAJBgNVBAYTAlVT +MRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5j +b20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IENBMIIBIjANBgkqhkiG +9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4jvhEXLeqKTTo1eqUKKPC3eQyaKl7hLOllsB +CSDMAZOnTjC3U/dDxGkAV53ijSLdhwZAAIEJzs4bg7/fzTtxRuLWZscFs3YnFo97 +nh6Vfe63SKMI2tavegw5BmV/Sl0fvBf4q77uKNd0f3p4mVmFaG5cIzJLv07A6Fpt +43C/dxC//AH2hdmoRBBYMql1GNXRor5H4idq9Joz+EkIYIvUX7Q6hL+hqkpMfT7P +T19sdl6gSzeRntwi5m3OFBqOasv+zbMUZBfHWymeMr/y7vrTC0LUq7dBMtoM1O/4 +gdW7jVg/tRvoSSiicNoxBN33shbyTApOB6jtSj1etX+jkMOvJwIDAQABo2MwYTAO +BgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUA95QNVbR +TLtm8KPiGxvDl7I90VUwHwYDVR0jBBgwFoAUA95QNVbRTLtm8KPiGxvDl7I90VUw +DQYJKoZIhvcNAQEFBQADggEBAMucN6pIExIK+t1EnE9SsPTfrgT1eXkIoyQY/Esr +hMAtudXH/vTBH1jLuG2cenTnmCmrEbXjcKChzUyImZOMkXDiqw8cvpOp/2PV5Adg +06O/nVsJ8dWO41P0jmP6P6fbtGbfYmbW0W5BjfIttep3Sp+dWOIrWcBAI+0tKIJF +PnlUkiaY4IBIqDfv8NZ5YBberOgOzW6sRBc4L0na4UU+Krk2U886UAb3LujEV0ls +YSEY1QSteDwsOoBrp+uvFRTp2InBuThs4pFsiv9kuXclVzDAGySj4dzp30d8tbQk +CAUw7C29C79Fv1C5qfPrmAESrciIxpg0X40KPMbp1ZWVbd4= +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert High Assurance EV Root CA O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert High Assurance EV Root CA O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert High Assurance EV Root CA" +# Serial: 3553400076410547919724730734378100087 +# MD5 Fingerprint: d4:74:de:57:5c:39:b2:d3:9c:85:83:c5:c0:65:49:8a +# SHA1 Fingerprint: 5f:b7:ee:06:33:e2:59:db:ad:0c:4c:9a:e6:d3:8f:1a:61:c7:dc:25 +# SHA256 Fingerprint: 74:31:e5:f4:c3:c1:ce:46:90:77:4f:0b:61:e0:54:40:88:3b:a9:a0:1e:d0:0b:a6:ab:d7:80:6e:d3:b1:18:cf +-----BEGIN CERTIFICATE----- +MIIDxTCCAq2gAwIBAgIQAqxcJmoLQJuPC3nyrkYldzANBgkqhkiG9w0BAQUFADBs +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSswKQYDVQQDEyJEaWdpQ2VydCBIaWdoIEFzc3VyYW5j +ZSBFViBSb290IENBMB4XDTA2MTExMDAwMDAwMFoXDTMxMTExMDAwMDAwMFowbDEL +MAkGA1UEBhMCVVMxFTATBgNVBAoTDERpZ2lDZXJ0IEluYzEZMBcGA1UECxMQd3d3 +LmRpZ2ljZXJ0LmNvbTErMCkGA1UEAxMiRGlnaUNlcnQgSGlnaCBBc3N1cmFuY2Ug +RVYgUm9vdCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMbM5XPm ++9S75S0tMqbf5YE/yc0lSbZxKsPVlDRnogocsF9ppkCxxLeyj9CYpKlBWTrT3JTW +PNt0OKRKzE0lgvdKpVMSOO7zSW1xkX5jtqumX8OkhPhPYlG++MXs2ziS4wblCJEM +xChBVfvLWokVfnHoNb9Ncgk9vjo4UFt3MRuNs8ckRZqnrG0AFFoEt7oT61EKmEFB +Ik5lYYeBQVCmeVyJ3hlKV9Uu5l0cUyx+mM0aBhakaHPQNAQTXKFx01p8VdteZOE3 +hzBWBOURtCmAEvF5OYiiAhF8J2a3iLd48soKqDirCmTCv2ZdlYTBoSUeh10aUAsg +EsxBu24LUTi4S8sCAwEAAaNjMGEwDgYDVR0PAQH/BAQDAgGGMA8GA1UdEwEB/wQF +MAMBAf8wHQYDVR0OBBYEFLE+w2kD+L9HAdSYJhoIAu9jZCvDMB8GA1UdIwQYMBaA +FLE+w2kD+L9HAdSYJhoIAu9jZCvDMA0GCSqGSIb3DQEBBQUAA4IBAQAcGgaX3Nec +nzyIZgYIVyHbIUf4KmeqvxgydkAQV8GK83rZEWWONfqe/EW1ntlMMUu4kehDLI6z +eM7b41N5cdblIZQB2lWHmiRk9opmzN6cN82oNLFpmyPInngiK3BD41VHMWEZ71jF +hS9OMPagMRYjyOfiZRYzy78aG6A9+MpeizGLYAiJLQwGXFK3xPkKmNEVX58Svnw2 +Yzi9RKR/5CYrCsSXaQ3pjOLAEFe4yHYSkVXySGnYvCoCWw9E1CAx2/S6cCZdkGCe +vEsXCS+0yx5DaMkHJ8HSXPfqIbloEpw8nL+e/IBcm2PN7EeqJSdnoDfzAIJ9VNep ++OkuE6N36B9K +-----END CERTIFICATE----- + +# Issuer: CN=SwissSign Gold CA - G2 O=SwissSign AG +# Subject: CN=SwissSign Gold CA - G2 O=SwissSign AG +# Label: "SwissSign Gold CA - G2" +# Serial: 13492815561806991280 +# MD5 Fingerprint: 24:77:d9:a8:91:d1:3b:fa:88:2d:c2:ff:f8:cd:33:93 +# SHA1 Fingerprint: d8:c5:38:8a:b7:30:1b:1b:6e:d4:7a:e6:45:25:3a:6f:9f:1a:27:61 +# SHA256 Fingerprint: 62:dd:0b:e9:b9:f5:0a:16:3e:a0:f8:e7:5c:05:3b:1e:ca:57:ea:55:c8:68:8f:64:7c:68:81:f2:c8:35:7b:95 +-----BEGIN CERTIFICATE----- +MIIFujCCA6KgAwIBAgIJALtAHEP1Xk+wMA0GCSqGSIb3DQEBBQUAMEUxCzAJBgNV +BAYTAkNIMRUwEwYDVQQKEwxTd2lzc1NpZ24gQUcxHzAdBgNVBAMTFlN3aXNzU2ln +biBHb2xkIENBIC0gRzIwHhcNMDYxMDI1MDgzMDM1WhcNMzYxMDI1MDgzMDM1WjBF +MQswCQYDVQQGEwJDSDEVMBMGA1UEChMMU3dpc3NTaWduIEFHMR8wHQYDVQQDExZT +d2lzc1NpZ24gR29sZCBDQSAtIEcyMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIIC +CgKCAgEAr+TufoskDhJuqVAtFkQ7kpJcyrhdhJJCEyq8ZVeCQD5XJM1QiyUqt2/8 +76LQwB8CJEoTlo8jE+YoWACjR8cGp4QjK7u9lit/VcyLwVcfDmJlD909Vopz2q5+ +bbqBHH5CjCA12UNNhPqE21Is8w4ndwtrvxEvcnifLtg+5hg3Wipy+dpikJKVyh+c +6bM8K8vzARO/Ws/BtQpgvd21mWRTuKCWs2/iJneRjOBiEAKfNA+k1ZIzUd6+jbqE +emA8atufK+ze3gE/bk3lUIbLtK/tREDFylqM2tIrfKjuvqblCqoOpd8FUrdVxyJd +MmqXl2MT28nbeTZ7hTpKxVKJ+STnnXepgv9VHKVxaSvRAiTysybUa9oEVeXBCsdt +MDeQKuSeFDNeFhdVxVu1yzSJkvGdJo+hB9TGsnhQ2wwMC3wLjEHXuendjIj3o02y +MszYF9rNt85mndT9Xv+9lz4pded+p2JYryU0pUHHPbwNUMoDAw8IWh+Vc3hiv69y +FGkOpeUDDniOJihC8AcLYiAQZzlG+qkDzAQ4embvIIO1jEpWjpEA/I5cgt6IoMPi +aG59je883WX0XaxR7ySArqpWl2/5rX3aYT+YdzylkbYcjCbaZaIJbcHiVOO5ykxM +gI93e2CaHt+28kgeDrpOVG2Y4OGiGqJ3UM/EY5LsRxmd6+ZrzsECAwEAAaOBrDCB +qTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUWyV7 +lqRlUX64OfPAeGZe6Drn8O4wHwYDVR0jBBgwFoAUWyV7lqRlUX64OfPAeGZe6Drn +8O4wRgYDVR0gBD8wPTA7BglghXQBWQECAQEwLjAsBggrBgEFBQcCARYgaHR0cDov +L3JlcG9zaXRvcnkuc3dpc3NzaWduLmNvbS8wDQYJKoZIhvcNAQEFBQADggIBACe6 +45R88a7A3hfm5djV9VSwg/S7zV4Fe0+fdWavPOhWfvxyeDgD2StiGwC5+OlgzczO +UYrHUDFu4Up+GC9pWbY9ZIEr44OE5iKHjn3g7gKZYbge9LgriBIWhMIxkziWMaa5 +O1M/wySTVltpkuzFwbs4AOPsF6m43Md8AYOfMke6UiI0HTJ6CVanfCU2qT1L2sCC +bwq7EsiHSycR+R4tx5M/nttfJmtS2S6K8RTGRI0Vqbe/vd6mGu6uLftIdxf+u+yv +GPUqUfA5hJeVbG4bwyvEdGB5JbAKJ9/fXtI5z0V9QkvfsywexcZdylU6oJxpmo/a +77KwPJ+HbBIrZXAVUjEaJM9vMSNQH4xPjyPDdEFjHFWoFN0+4FFQz/EbMFYOkrCC +hdiDyyJkvC24JdVUorgG6q2SpCSgwYa1ShNqR88uC1aVVMvOmttqtKay20EIhid3 +92qgQmwLOM7XdVAyksLfKzAiSNDVQTglXaTpXZ/GlHXQRf0wl0OPkKsKx4ZzYEpp +Ld6leNcG2mqeSz53OiATIgHQv2ieY2BrNU0LbbqhPcCT4H8js1WtciVORvnSFu+w +ZMEBnunKoGqYDs/YYPIvSbjkQuE4NRb0yG5P94FW6LqjviOvrv1vA+ACOzB2+htt +Qc8Bsem4yWb02ybzOqR08kkkW8mw0FfB+j564ZfJ +-----END CERTIFICATE----- + +# Issuer: CN=SecureTrust CA O=SecureTrust Corporation +# Subject: CN=SecureTrust CA O=SecureTrust Corporation +# Label: "SecureTrust CA" +# Serial: 17199774589125277788362757014266862032 +# MD5 Fingerprint: dc:32:c3:a7:6d:25:57:c7:68:09:9d:ea:2d:a9:a2:d1 +# SHA1 Fingerprint: 87:82:c6:c3:04:35:3b:cf:d2:96:92:d2:59:3e:7d:44:d9:34:ff:11 +# SHA256 Fingerprint: f1:c1:b5:0a:e5:a2:0d:d8:03:0e:c9:f6:bc:24:82:3d:d3:67:b5:25:57:59:b4:e7:1b:61:fc:e9:f7:37:5d:73 +-----BEGIN CERTIFICATE----- +MIIDuDCCAqCgAwIBAgIQDPCOXAgWpa1Cf/DrJxhZ0DANBgkqhkiG9w0BAQUFADBI +MQswCQYDVQQGEwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24x +FzAVBgNVBAMTDlNlY3VyZVRydXN0IENBMB4XDTA2MTEwNzE5MzExOFoXDTI5MTIz +MTE5NDA1NVowSDELMAkGA1UEBhMCVVMxIDAeBgNVBAoTF1NlY3VyZVRydXN0IENv +cnBvcmF0aW9uMRcwFQYDVQQDEw5TZWN1cmVUcnVzdCBDQTCCASIwDQYJKoZIhvcN +AQEBBQADggEPADCCAQoCggEBAKukgeWVzfX2FI7CT8rU4niVWJxB4Q2ZQCQXOZEz +Zum+4YOvYlyJ0fwkW2Gz4BERQRwdbvC4u/jep4G6pkjGnx29vo6pQT64lO0pGtSO +0gMdA+9tDWccV9cGrcrI9f4Or2YlSASWC12juhbDCE/RRvgUXPLIXgGZbf2IzIao +wW8xQmxSPmjL8xk037uHGFaAJsTQ3MBv396gwpEWoGQRS0S8Hvbn+mPeZqx2pHGj +7DaUaHp3pLHnDi+BeuK1cobvomuL8A/b01k/unK8RCSc43Oz969XL0Imnal0ugBS +8kvNU3xHCzaFDmapCJcWNFfBZveA4+1wVMeT4C4oFVmHursCAwEAAaOBnTCBmjAT +BgkrBgEEAYI3FAIEBh4EAEMAQTALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB +/zAdBgNVHQ4EFgQUQjK2FvoE/f5dS3rD/fdMQB1aQ68wNAYDVR0fBC0wKzApoCeg +JYYjaHR0cDovL2NybC5zZWN1cmV0cnVzdC5jb20vU1RDQS5jcmwwEAYJKwYBBAGC +NxUBBAMCAQAwDQYJKoZIhvcNAQEFBQADggEBADDtT0rhWDpSclu1pqNlGKa7UTt3 +6Z3q059c4EVlew3KW+JwULKUBRSuSceNQQcSc5R+DCMh/bwQf2AQWnL1mA6s7Ll/ +3XpvXdMc9P+IBWlCqQVxyLesJugutIxq/3HcuLHfmbx8IVQr5Fiiu1cprp6poxkm +D5kuCLDv/WnPmRoJjeOnnyvJNjR7JLN4TJUXpAYmHrZkUjZfYGfZnMUFdAvnZyPS +CPyI6a6Lf+Ew9Dd+/cYy2i2eRDAwbO4H3tI0/NL/QPZL9GZGBlSm8jIKYyYwa5vR +3ItHuuG51WLQoqD0ZwV4KWMabwTW+MZMo5qxN7SN5ShLHZ4swrhovO0C7jE= +-----END CERTIFICATE----- + +# Issuer: CN=Secure Global CA O=SecureTrust Corporation +# Subject: CN=Secure Global CA O=SecureTrust Corporation +# Label: "Secure Global CA" +# Serial: 9751836167731051554232119481456978597 +# MD5 Fingerprint: cf:f4:27:0d:d4:ed:dc:65:16:49:6d:3d:da:bf:6e:de +# SHA1 Fingerprint: 3a:44:73:5a:e5:81:90:1f:24:86:61:46:1e:3b:9c:c4:5f:f5:3a:1b +# SHA256 Fingerprint: 42:00:f5:04:3a:c8:59:0e:bb:52:7d:20:9e:d1:50:30:29:fb:cb:d4:1c:a1:b5:06:ec:27:f1:5a:de:7d:ac:69 +-----BEGIN CERTIFICATE----- +MIIDvDCCAqSgAwIBAgIQB1YipOjUiolN9BPI8PjqpTANBgkqhkiG9w0BAQUFADBK +MQswCQYDVQQGEwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24x +GTAXBgNVBAMTEFNlY3VyZSBHbG9iYWwgQ0EwHhcNMDYxMTA3MTk0MjI4WhcNMjkx +MjMxMTk1MjA2WjBKMQswCQYDVQQGEwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3Qg +Q29ycG9yYXRpb24xGTAXBgNVBAMTEFNlY3VyZSBHbG9iYWwgQ0EwggEiMA0GCSqG +SIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvNS7YrGxVaQZx5RNoJLNP2MwhR/jxYDiJ +iQPpvepeRlMJ3Fz1Wuj3RSoC6zFh1ykzTM7HfAo3fg+6MpjhHZevj8fcyTiW89sa +/FHtaMbQbqR8JNGuQsiWUGMu4P51/pinX0kuleM5M2SOHqRfkNJnPLLZ/kG5VacJ +jnIFHovdRIWCQtBJwB1g8NEXLJXr9qXBkqPFwqcIYA1gBBCWeZ4WNOaptvolRTnI +HmX5k/Wq8VLcmZg9pYYaDDUz+kulBAYVHDGA76oYa8J719rO+TMg1fW9ajMtgQT7 +sFzUnKPiXB3jqUJ1XnvUd+85VLrJChgbEplJL4hL/VBi0XPnj3pDAgMBAAGjgZ0w +gZowEwYJKwYBBAGCNxQCBAYeBABDAEEwCwYDVR0PBAQDAgGGMA8GA1UdEwEB/wQF +MAMBAf8wHQYDVR0OBBYEFK9EBMJBfkiD2045AuzshHrmzsmkMDQGA1UdHwQtMCsw +KaAnoCWGI2h0dHA6Ly9jcmwuc2VjdXJldHJ1c3QuY29tL1NHQ0EuY3JsMBAGCSsG +AQQBgjcVAQQDAgEAMA0GCSqGSIb3DQEBBQUAA4IBAQBjGghAfaReUw132HquHw0L +URYD7xh8yOOvaliTFGCRsoTciE6+OYo68+aCiV0BN7OrJKQVDpI1WkpEXk5X+nXO +H0jOZvQ8QCaSmGwb7iRGDBezUqXbpZGRzzfTb+cnCDpOGR86p1hcF895P4vkp9Mm +I50mD1hp/Ed+stCNi5O/KU9DaXR2Z0vPB4zmAve14bRDtUstFJ/53CYNv6ZHdAbY +iNE6KTCEztI5gGIbqMdXSbxqVVFnFUq+NQfk1XWYN3kwFNspnWzFacxHVaIw98xc +f8LDmBxrThaA63p4ZUWiABqvDA1VZDRIuJK58bRQKfJPIx/abKwfROHdI3hRW8cW +-----END CERTIFICATE----- + +# Issuer: CN=COMODO Certification Authority O=COMODO CA Limited +# Subject: CN=COMODO Certification Authority O=COMODO CA Limited +# Label: "COMODO Certification Authority" +# Serial: 104350513648249232941998508985834464573 +# MD5 Fingerprint: 5c:48:dc:f7:42:72:ec:56:94:6d:1c:cc:71:35:80:75 +# SHA1 Fingerprint: 66:31:bf:9e:f7:4f:9e:b6:c9:d5:a6:0c:ba:6a:be:d1:f7:bd:ef:7b +# SHA256 Fingerprint: 0c:2c:d6:3d:f7:80:6f:a3:99:ed:e8:09:11:6b:57:5b:f8:79:89:f0:65:18:f9:80:8c:86:05:03:17:8b:af:66 +-----BEGIN CERTIFICATE----- +MIIEHTCCAwWgAwIBAgIQToEtioJl4AsC7j41AkblPTANBgkqhkiG9w0BAQUFADCB +gTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4G +A1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxJzAlBgNV +BAMTHkNPTU9ETyBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wNjEyMDEwMDAw +MDBaFw0yOTEyMzEyMzU5NTlaMIGBMQswCQYDVQQGEwJHQjEbMBkGA1UECBMSR3Jl +YXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHEwdTYWxmb3JkMRowGAYDVQQKExFDT01P +RE8gQ0EgTGltaXRlZDEnMCUGA1UEAxMeQ09NT0RPIENlcnRpZmljYXRpb24gQXV0 +aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA0ECLi3LjkRv3 +UcEbVASY06m/weaKXTuH+7uIzg3jLz8GlvCiKVCZrts7oVewdFFxze1CkU1B/qnI +2GqGd0S7WWaXUF601CxwRM/aN5VCaTwwxHGzUvAhTaHYujl8HJ6jJJ3ygxaYqhZ8 +Q5sVW7euNJH+1GImGEaaP+vB+fGQV+useg2L23IwambV4EajcNxo2f8ESIl33rXp ++2dtQem8Ob0y2WIC8bGoPW43nOIv4tOiJovGuFVDiOEjPqXSJDlqR6sA1KGzqSX+ +DT+nHbrTUcELpNqsOO9VUCQFZUaTNE8tja3G1CEZ0o7KBWFxB3NH5YoZEr0ETc5O +nKVIrLsm9wIDAQABo4GOMIGLMB0GA1UdDgQWBBQLWOWLxkwVN6RAqTCpIb5HNlpW +/zAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zBJBgNVHR8EQjBAMD6g +PKA6hjhodHRwOi8vY3JsLmNvbW9kb2NhLmNvbS9DT01PRE9DZXJ0aWZpY2F0aW9u +QXV0aG9yaXR5LmNybDANBgkqhkiG9w0BAQUFAAOCAQEAPpiem/Yb6dc5t3iuHXIY +SdOH5EOC6z/JqvWote9VfCFSZfnVDeFs9D6Mk3ORLgLETgdxb8CPOGEIqB6BCsAv +IC9Bi5HcSEW88cbeunZrM8gALTFGTO3nnc+IlP8zwFboJIYmuNg4ON8qa90SzMc/ +RxdMosIGlgnW2/4/PEZB31jiVg88O8EckzXZOFKs7sjsLjBOlDW0JB9LeGna8gI4 +zJVSk/BwJVmcIGfE7vmLV2H0knZ9P4SNVbfo5azV8fUZVqZa+5Acr5Pr5RzUZ5dd +BA6+C4OmF4O5MBKgxTMVBbkN+8cFduPYSo38NBejxiEovjBFMR7HeL5YYTisO+IB +ZQ== +-----END CERTIFICATE----- + +# Issuer: CN=COMODO ECC Certification Authority O=COMODO CA Limited +# Subject: CN=COMODO ECC Certification Authority O=COMODO CA Limited +# Label: "COMODO ECC Certification Authority" +# Serial: 41578283867086692638256921589707938090 +# MD5 Fingerprint: 7c:62:ff:74:9d:31:53:5e:68:4a:d5:78:aa:1e:bf:23 +# SHA1 Fingerprint: 9f:74:4e:9f:2b:4d:ba:ec:0f:31:2c:50:b6:56:3b:8e:2d:93:c3:11 +# SHA256 Fingerprint: 17:93:92:7a:06:14:54:97:89:ad:ce:2f:8f:34:f7:f0:b6:6d:0f:3a:e3:a3:b8:4d:21:ec:15:db:ba:4f:ad:c7 +-----BEGIN CERTIFICATE----- +MIICiTCCAg+gAwIBAgIQH0evqmIAcFBUTAGem2OZKjAKBggqhkjOPQQDAzCBhTEL +MAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UE +BxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMT +IkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDgwMzA2MDAw +MDAwWhcNMzgwMTE4MjM1OTU5WjCBhTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdy +ZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09N +T0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlv +biBBdXRob3JpdHkwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQDR3svdcmCFYX7deSR +FtSrYpn1PlILBs5BAH+X4QokPB0BBO490o0JlwzgdeT6+3eKKvUDYEs2ixYjFq0J +cfRK9ChQtP6IHG4/bC8vCVlbpVsLM5niwz2J+Wos77LTBumjQjBAMB0GA1UdDgQW +BBR1cacZSBm8nZ3qQUfflMRId5nTeTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/ +BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjEA7wNbeqy3eApyt4jf/7VGFAkK+qDm +fQjGGoe9GKhzvSbKYAydzpmfz1wPMOG+FDHqAjAU9JM8SaczepBGR7NjfRObTrdv +GDeAU/7dIOA1mjbRxwG55tzd8/8dLDoWV9mSOdY= +-----END CERTIFICATE----- + +# Issuer: CN=Certigna O=Dhimyotis +# Subject: CN=Certigna O=Dhimyotis +# Label: "Certigna" +# Serial: 18364802974209362175 +# MD5 Fingerprint: ab:57:a6:5b:7d:42:82:19:b5:d8:58:26:28:5e:fd:ff +# SHA1 Fingerprint: b1:2e:13:63:45:86:a4:6f:1a:b2:60:68:37:58:2d:c4:ac:fd:94:97 +# SHA256 Fingerprint: e3:b6:a2:db:2e:d7:ce:48:84:2f:7a:c5:32:41:c7:b7:1d:54:14:4b:fb:40:c1:1f:3f:1d:0b:42:f5:ee:a1:2d +-----BEGIN CERTIFICATE----- +MIIDqDCCApCgAwIBAgIJAP7c4wEPyUj/MA0GCSqGSIb3DQEBBQUAMDQxCzAJBgNV +BAYTAkZSMRIwEAYDVQQKDAlEaGlteW90aXMxETAPBgNVBAMMCENlcnRpZ25hMB4X +DTA3MDYyOTE1MTMwNVoXDTI3MDYyOTE1MTMwNVowNDELMAkGA1UEBhMCRlIxEjAQ +BgNVBAoMCURoaW15b3RpczERMA8GA1UEAwwIQ2VydGlnbmEwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQDIaPHJ1tazNHUmgh7stL7qXOEm7RFHYeGifBZ4 +QCHkYJ5ayGPhxLGWkv8YbWkj4Sti993iNi+RB7lIzw7sebYs5zRLcAglozyHGxny +gQcPOJAZ0xH+hrTy0V4eHpbNgGzOOzGTtvKg0KmVEn2lmsxryIRWijOp5yIVUxbw +zBfsV1/pogqYCd7jX5xv3EjjhQsVWqa6n6xI4wmy9/Qy3l40vhx4XUJbzg4ij02Q +130yGLMLLGq/jj8UEYkgDncUtT2UCIf3JR7VsmAA7G8qKCVuKj4YYxclPz5EIBb2 +JsglrgVKtOdjLPOMFlN+XPsRGgjBRmKfIrjxwo1p3Po6WAbfAgMBAAGjgbwwgbkw +DwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUGu3+QTmQtCRZvgHyUtVF9lo53BEw +ZAYDVR0jBF0wW4AUGu3+QTmQtCRZvgHyUtVF9lo53BGhOKQ2MDQxCzAJBgNVBAYT +AkZSMRIwEAYDVQQKDAlEaGlteW90aXMxETAPBgNVBAMMCENlcnRpZ25hggkA/tzj +AQ/JSP8wDgYDVR0PAQH/BAQDAgEGMBEGCWCGSAGG+EIBAQQEAwIABzANBgkqhkiG +9w0BAQUFAAOCAQEAhQMeknH2Qq/ho2Ge6/PAD/Kl1NqV5ta+aDY9fm4fTIrv0Q8h +bV6lUmPOEvjvKtpv6zf+EwLHyzs+ImvaYS5/1HI93TDhHkxAGYwP15zRgzB7mFnc +fca5DClMoTOi62c6ZYTTluLtdkVwj7Ur3vkj1kluPBS1xp81HlDQwY9qcEQCYsuu +HWhBp6pX6FOqB9IG9tUUBguRA3UsbHK1YZWaDYu5Def131TN3ubY1gkIl2PlwS6w +t0QmwCbAr1UwnjvVNioZBPRcHv/PLLf/0P2HQBHVESO7SMAhqaQoLf0V+LBOK/Qw +WyH8EZE0vkHve52Xdf+XlcCWWC/qu0bXu+TZLg== +-----END CERTIFICATE----- + +# Issuer: O=Chunghwa Telecom Co., Ltd. OU=ePKI Root Certification Authority +# Subject: O=Chunghwa Telecom Co., Ltd. OU=ePKI Root Certification Authority +# Label: "ePKI Root Certification Authority" +# Serial: 28956088682735189655030529057352760477 +# MD5 Fingerprint: 1b:2e:00:ca:26:06:90:3d:ad:fe:6f:15:68:d3:6b:b3 +# SHA1 Fingerprint: 67:65:0d:f1:7e:8e:7e:5b:82:40:a4:f4:56:4b:cf:e2:3d:69:c6:f0 +# SHA256 Fingerprint: c0:a6:f4:dc:63:a2:4b:fd:cf:54:ef:2a:6a:08:2a:0a:72:de:35:80:3e:2f:f5:ff:52:7a:e5:d8:72:06:df:d5 +-----BEGIN CERTIFICATE----- +MIIFsDCCA5igAwIBAgIQFci9ZUdcr7iXAF7kBtK8nTANBgkqhkiG9w0BAQUFADBe +MQswCQYDVQQGEwJUVzEjMCEGA1UECgwaQ2h1bmdod2EgVGVsZWNvbSBDby4sIEx0 +ZC4xKjAoBgNVBAsMIWVQS0kgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAe +Fw0wNDEyMjAwMjMxMjdaFw0zNDEyMjAwMjMxMjdaMF4xCzAJBgNVBAYTAlRXMSMw +IQYDVQQKDBpDaHVuZ2h3YSBUZWxlY29tIENvLiwgTHRkLjEqMCgGA1UECwwhZVBL +SSBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIICIjANBgkqhkiG9w0BAQEF +AAOCAg8AMIICCgKCAgEA4SUP7o3biDN1Z82tH306Tm2d0y8U82N0ywEhajfqhFAH +SyZbCUNsIZ5qyNUD9WBpj8zwIuQf5/dqIjG3LBXy4P4AakP/h2XGtRrBp0xtInAh +ijHyl3SJCRImHJ7K2RKilTza6We/CKBk49ZCt0Xvl/T29de1ShUCWH2YWEtgvM3X +DZoTM1PRYfl61dd4s5oz9wCGzh1NlDivqOx4UXCKXBCDUSH3ET00hl7lSM2XgYI1 +TBnsZfZrxQWh7kcT1rMhJ5QQCtkkO7q+RBNGMD+XPNjX12ruOzjjK9SXDrkb5wdJ +fzcq+Xd4z1TtW0ado4AOkUPB1ltfFLqfpo0kR0BZv3I4sjZsN/+Z0V0OWQqraffA +sgRFelQArr5T9rXn4fg8ozHSqf4hUmTFpmfwdQcGlBSBVcYn5AGPF8Fqcde+S/uU +WH1+ETOxQvdibBjWzwloPn9s9h6PYq2lY9sJpx8iQkEeb5mKPtf5P0B6ebClAZLS +nT0IFaUQAS2zMnaolQ2zepr7BxB4EW/hj8e6DyUadCrlHJhBmd8hh+iVBmoKs2pH +dmX2Os+PYhcZewoozRrSgx4hxyy/vv9haLdnG7t4TY3OZ+XkwY63I2binZB1NJip +NiuKmpS5nezMirH4JYlcWrYvjB9teSSnUmjDhDXiZo1jDiVN1Rmy5nk3pyKdVDEC +AwEAAaNqMGgwHQYDVR0OBBYEFB4M97Zn8uGSJglFwFU5Lnc/QkqiMAwGA1UdEwQF +MAMBAf8wOQYEZyoHAAQxMC8wLQIBADAJBgUrDgMCGgUAMAcGBWcqAwAABBRFsMLH +ClZ87lt4DJX5GFPBphzYEDANBgkqhkiG9w0BAQUFAAOCAgEACbODU1kBPpVJufGB +uvl2ICO1J2B01GqZNF5sAFPZn/KmsSQHRGoqxqWOeBLoR9lYGxMqXnmbnwoqZ6Yl +PwZpVnPDimZI+ymBV3QGypzqKOg4ZyYr8dW1P2WT+DZdjo2NQCCHGervJ8A9tDkP +JXtoUHRVnAxZfVo9QZQlUgjgRywVMRnVvwdVxrsStZf0X4OFunHB2WyBEXYKCrC/ +gpf36j36+uwtqSiUO1bd0lEursC9CBWMd1I0ltabrNMdjmEPNXubrjlpC2JgQCA2 +j6/7Nu4tCEoduL+bXPjqpRugc6bY+G7gMwRfaKonh+3ZwZCc7b3jajWvY9+rGNm6 +5ulK6lCKD2GTHuItGeIwlDWSXQ62B68ZgI9HkFFLLk3dheLSClIKF5r8GrBQAuUB +o2M3IUxExJtRmREOc5wGj1QupyheRDmHVi03vYVElOEMSyycw5KFNGHLD7ibSkNS +/jQ6fbjpKdx2qcgw+BRxgMYeNkh0IkFch4LoGHGLQYlE535YW6i4jRPpp2zDR+2z +Gp1iro2C6pSe3VkQw63d4k3jMdXH7OjysP6SHhYKGvzZ8/gntsm+HbRsZJB/9OTE +W9c3rkIO3aQab3yIVMUWbuF6aC74Or8NpDyJO3inTmODBCEIZ43ygknQW/2xzQ+D +hNQ+IIX3Sj0rnP0qCglN6oH4EZw= +-----END CERTIFICATE----- + +# Issuer: O=certSIGN OU=certSIGN ROOT CA +# Subject: O=certSIGN OU=certSIGN ROOT CA +# Label: "certSIGN ROOT CA" +# Serial: 35210227249154 +# MD5 Fingerprint: 18:98:c0:d6:e9:3a:fc:f9:b0:f5:0c:f7:4b:01:44:17 +# SHA1 Fingerprint: fa:b7:ee:36:97:26:62:fb:2d:b0:2a:f6:bf:03:fd:e8:7c:4b:2f:9b +# SHA256 Fingerprint: ea:a9:62:c4:fa:4a:6b:af:eb:e4:15:19:6d:35:1c:cd:88:8d:4f:53:f3:fa:8a:e6:d7:c4:66:a9:4e:60:42:bb +-----BEGIN CERTIFICATE----- +MIIDODCCAiCgAwIBAgIGIAYFFnACMA0GCSqGSIb3DQEBBQUAMDsxCzAJBgNVBAYT +AlJPMREwDwYDVQQKEwhjZXJ0U0lHTjEZMBcGA1UECxMQY2VydFNJR04gUk9PVCBD +QTAeFw0wNjA3MDQxNzIwMDRaFw0zMTA3MDQxNzIwMDRaMDsxCzAJBgNVBAYTAlJP +MREwDwYDVQQKEwhjZXJ0U0lHTjEZMBcGA1UECxMQY2VydFNJR04gUk9PVCBDQTCC +ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALczuX7IJUqOtdu0KBuqV5Do +0SLTZLrTk+jUrIZhQGpgV2hUhE28alQCBf/fm5oqrl0Hj0rDKH/v+yv6efHHrfAQ +UySQi2bJqIirr1qjAOm+ukbuW3N7LBeCgV5iLKECZbO9xSsAfsT8AzNXDe3i+s5d +RdY4zTW2ssHQnIFKquSyAVwdj1+ZxLGt24gh65AIgoDzMKND5pCCrlUoSe1b16kQ +OA7+j0xbm0bqQfWwCHTD0IgztnzXdN/chNFDDnU5oSVAKOp4yw4sLjmdjItuFhwv +JoIQ4uNllAoEwF73XVv4EOLQunpL+943AAAaWyjj0pxzPjKHmKHJUS/X3qwzs08C +AwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAcYwHQYDVR0O +BBYEFOCMm9slSbPxfIbWskKHC9BroNnkMA0GCSqGSIb3DQEBBQUAA4IBAQA+0hyJ +LjX8+HXd5n9liPRyTMks1zJO890ZeUe9jjtbkw9QSSQTaxQGcu8J06Gh40CEyecY +MnQ8SG4Pn0vU9x7Tk4ZkVJdjclDVVc/6IJMCopvDI5NOFlV2oHB5bc0hH88vLbwZ +44gx+FkagQnIl6Z0x2DEW8xXjrJ1/RsCCdtZb3KTafcxQdaIOL+Hsr0Wefmq5L6I +Jd1hJyMctTEHBDa0GpC9oHRxUIltvBTjD4au8as+x6AJzKNI0eDbZOeStc+vckNw +i/nDhDwTqn6Sm1dTk/pwwpEOMfmbZ13pljheX7NzTogVZ96edhBiIL5VaZVDADlN +9u6wWk5JRFRYX0KD +-----END CERTIFICATE----- + +# Issuer: CN=NetLock Arany (Class Gold) F\u0151tan\xfas\xedtv\xe1ny O=NetLock Kft. OU=Tan\xfas\xedtv\xe1nykiad\xf3k (Certification Services) +# Subject: CN=NetLock Arany (Class Gold) F\u0151tan\xfas\xedtv\xe1ny O=NetLock Kft. OU=Tan\xfas\xedtv\xe1nykiad\xf3k (Certification Services) +# Label: "NetLock Arany (Class Gold) F\u0151tan\xfas\xedtv\xe1ny" +# Serial: 80544274841616 +# MD5 Fingerprint: c5:a1:b7:ff:73:dd:d6:d7:34:32:18:df:fc:3c:ad:88 +# SHA1 Fingerprint: 06:08:3f:59:3f:15:a1:04:a0:69:a4:6b:a9:03:d0:06:b7:97:09:91 +# SHA256 Fingerprint: 6c:61:da:c3:a2:de:f0:31:50:6b:e0:36:d2:a6:fe:40:19:94:fb:d1:3d:f9:c8:d4:66:59:92:74:c4:46:ec:98 +-----BEGIN CERTIFICATE----- +MIIEFTCCAv2gAwIBAgIGSUEs5AAQMA0GCSqGSIb3DQEBCwUAMIGnMQswCQYDVQQG +EwJIVTERMA8GA1UEBwwIQnVkYXBlc3QxFTATBgNVBAoMDE5ldExvY2sgS2Z0LjE3 +MDUGA1UECwwuVGFuw7pzw610dsOhbnlraWFkw7NrIChDZXJ0aWZpY2F0aW9uIFNl +cnZpY2VzKTE1MDMGA1UEAwwsTmV0TG9jayBBcmFueSAoQ2xhc3MgR29sZCkgRsWR +dGFuw7pzw610dsOhbnkwHhcNMDgxMjExMTUwODIxWhcNMjgxMjA2MTUwODIxWjCB +pzELMAkGA1UEBhMCSFUxETAPBgNVBAcMCEJ1ZGFwZXN0MRUwEwYDVQQKDAxOZXRM +b2NrIEtmdC4xNzA1BgNVBAsMLlRhbsO6c8OtdHbDoW55a2lhZMOzayAoQ2VydGlm +aWNhdGlvbiBTZXJ2aWNlcykxNTAzBgNVBAMMLE5ldExvY2sgQXJhbnkgKENsYXNz +IEdvbGQpIEbFkXRhbsO6c8OtdHbDoW55MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEAxCRec75LbRTDofTjl5Bu0jBFHjzuZ9lk4BqKf8owyoPjIMHj9DrT +lF8afFttvzBPhCf2nx9JvMaZCpDyD/V/Q4Q3Y1GLeqVw/HpYzY6b7cNGbIRwXdrz +AZAj/E4wqX7hJ2Pn7WQ8oLjJM2P+FpD/sLj916jAwJRDC7bVWaaeVtAkH3B5r9s5 +VA1lddkVQZQBr17s9o3x/61k/iCa11zr/qYfCGSji3ZVrR47KGAuhyXoqq8fxmRG +ILdwfzzeSNuWU7c5d+Qa4scWhHaXWy+7GRWF+GmF9ZmnqfI0p6m2pgP8b4Y9VHx2 +BJtr+UBdADTHLpl1neWIA6pN+APSQnbAGwIDAKiLo0UwQzASBgNVHRMBAf8ECDAG +AQH/AgEEMA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUzPpnk/C2uNClwB7zU/2M +U9+D15YwDQYJKoZIhvcNAQELBQADggEBAKt/7hwWqZw8UQCgwBEIBaeZ5m8BiFRh +bvG5GK1Krf6BQCOUL/t1fC8oS2IkgYIL9WHxHG64YTjrgfpioTtaYtOUZcTh5m2C ++C8lcLIhJsFyUR+MLMOEkMNaj7rP9KdlpeuY0fsFskZ1FSNqb4VjMIDw1Z4fKRzC +bLBQWV2QWzuoDTDPv31/zvGdg73JRm4gpvlhUbohL3u+pRVjodSVh/GeufOJ8z2F +uLjbvrW5KfnaNwUASZQDhETnv0Mxz3WLJdH0pmT1kvarBes96aULNmLazAZfNou2 +XjG4Kvte9nHfRCaexOYNkbQudZWAUWpLMKawYqGT8ZvYzsRjdT9ZR7E= +-----END CERTIFICATE----- + +# Issuer: CN=Microsec e-Szigno Root CA 2009 O=Microsec Ltd. +# Subject: CN=Microsec e-Szigno Root CA 2009 O=Microsec Ltd. +# Label: "Microsec e-Szigno Root CA 2009" +# Serial: 14014712776195784473 +# MD5 Fingerprint: f8:49:f4:03:bc:44:2d:83:be:48:69:7d:29:64:fc:b1 +# SHA1 Fingerprint: 89:df:74:fe:5c:f4:0f:4a:80:f9:e3:37:7d:54:da:91:e1:01:31:8e +# SHA256 Fingerprint: 3c:5f:81:fe:a5:fa:b8:2c:64:bf:a2:ea:ec:af:cd:e8:e0:77:fc:86:20:a7:ca:e5:37:16:3d:f3:6e:db:f3:78 +-----BEGIN CERTIFICATE----- +MIIECjCCAvKgAwIBAgIJAMJ+QwRORz8ZMA0GCSqGSIb3DQEBCwUAMIGCMQswCQYD +VQQGEwJIVTERMA8GA1UEBwwIQnVkYXBlc3QxFjAUBgNVBAoMDU1pY3Jvc2VjIEx0 +ZC4xJzAlBgNVBAMMHk1pY3Jvc2VjIGUtU3ppZ25vIFJvb3QgQ0EgMjAwOTEfMB0G +CSqGSIb3DQEJARYQaW5mb0BlLXN6aWduby5odTAeFw0wOTA2MTYxMTMwMThaFw0y +OTEyMzAxMTMwMThaMIGCMQswCQYDVQQGEwJIVTERMA8GA1UEBwwIQnVkYXBlc3Qx +FjAUBgNVBAoMDU1pY3Jvc2VjIEx0ZC4xJzAlBgNVBAMMHk1pY3Jvc2VjIGUtU3pp +Z25vIFJvb3QgQ0EgMjAwOTEfMB0GCSqGSIb3DQEJARYQaW5mb0BlLXN6aWduby5o +dTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAOn4j/NjrdqG2KfgQvvP +kd6mJviZpWNwrZuuyjNAfW2WbqEORO7hE52UQlKavXWFdCyoDh2Tthi3jCyoz/tc +cbna7P7ofo/kLx2yqHWH2Leh5TvPmUpG0IMZfcChEhyVbUr02MelTTMuhTlAdX4U +fIASmFDHQWe4oIBhVKZsTh/gnQ4H6cm6M+f+wFUoLAKApxn1ntxVUwOXewdI/5n7 +N4okxFnMUBBjjqqpGrCEGob5X7uxUG6k0QrM1XF+H6cbfPVTbiJfyyvm1HxdrtbC +xkzlBQHZ7Vf8wSN5/PrIJIOV87VqUQHQd9bpEqH5GoP7ghu5sJf0dgYzQ0mg/wu1 ++rUCAwEAAaOBgDB+MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0G +A1UdDgQWBBTLD8bfQkPMPcu1SCOhGnqmKrs0aDAfBgNVHSMEGDAWgBTLD8bfQkPM +Pcu1SCOhGnqmKrs0aDAbBgNVHREEFDASgRBpbmZvQGUtc3ppZ25vLmh1MA0GCSqG +SIb3DQEBCwUAA4IBAQDJ0Q5eLtXMs3w+y/w9/w0olZMEyL/azXm4Q5DwpL7v8u8h +mLzU1F0G9u5C7DBsoKqpyvGvivo/C3NqPuouQH4frlRheesuCDfXI/OMn74dseGk +ddug4lQUsbocKaQY9hK6ohQU4zE1yED/t+AFdlfBHFny+L/k7SViXITwfn4fs775 +tyERzAMBVnCnEJIeGzSBHq2cGsMEPO0CYdYeBvNfOofyK/FFh+U9rNHHV4S9a67c +2Pm2G2JwCz02yULyMtd6YebS2z3PyKnJm9zbWETXbzivf3jTo60adbocwTZ8jx5t +HMN1Rq41Bab2XD0h7lbwyYIiLXpUq3DDfSJlgnCW +-----END CERTIFICATE----- + +# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R3 +# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R3 +# Label: "GlobalSign Root CA - R3" +# Serial: 4835703278459759426209954 +# MD5 Fingerprint: c5:df:b8:49:ca:05:13:55:ee:2d:ba:1a:c3:3e:b0:28 +# SHA1 Fingerprint: d6:9b:56:11:48:f0:1c:77:c5:45:78:c1:09:26:df:5b:85:69:76:ad +# SHA256 Fingerprint: cb:b5:22:d7:b7:f1:27:ad:6a:01:13:86:5b:df:1c:d4:10:2e:7d:07:59:af:63:5a:7c:f4:72:0d:c9:63:c5:3b +-----BEGIN CERTIFICATE----- +MIIDXzCCAkegAwIBAgILBAAAAAABIVhTCKIwDQYJKoZIhvcNAQELBQAwTDEgMB4G +A1UECxMXR2xvYmFsU2lnbiBSb290IENBIC0gUjMxEzARBgNVBAoTCkdsb2JhbFNp +Z24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMDkwMzE4MTAwMDAwWhcNMjkwMzE4 +MTAwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxTaWduIFJvb3QgQ0EgLSBSMzETMBEG +A1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2lnbjCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAMwldpB5BngiFvXAg7aEyiie/QV2EcWtiHL8 +RgJDx7KKnQRfJMsuS+FggkbhUqsMgUdwbN1k0ev1LKMPgj0MK66X17YUhhB5uzsT +gHeMCOFJ0mpiLx9e+pZo34knlTifBtc+ycsmWQ1z3rDI6SYOgxXG71uL0gRgykmm +KPZpO/bLyCiR5Z2KYVc3rHQU3HTgOu5yLy6c+9C7v/U9AOEGM+iCK65TpjoWc4zd +QQ4gOsC0p6Hpsk+QLjJg6VfLuQSSaGjlOCZgdbKfd/+RFO+uIEn8rUAVSNECMWEZ +XriX7613t2Saer9fwRPvm2L7DWzgVGkWqQPabumDk3F2xmmFghcCAwEAAaNCMEAw +DgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFI/wS3+o +LkUkrk1Q+mOai97i3Ru8MA0GCSqGSIb3DQEBCwUAA4IBAQBLQNvAUKr+yAzv95ZU +RUm7lgAJQayzE4aGKAczymvmdLm6AC2upArT9fHxD4q/c2dKg8dEe3jgr25sbwMp +jjM5RcOO5LlXbKr8EpbsU8Yt5CRsuZRj+9xTaGdWPoO4zzUhw8lo/s7awlOqzJCK +6fBdRoyV3XpYKBovHd7NADdBj+1EbddTKJd+82cEHhXXipa0095MJ6RMG3NzdvQX +mcIfeg7jLQitChws/zyrVQ4PkX4268NXSb7hLi18YIvDQVETI53O9zJrlAGomecs +Mx86OyXShkDOOyyGeMlhLxS67ttVb9+E7gUJTb0o2HLO02JQZR7rkpeDMdmztcpH +WD9f +-----END CERTIFICATE----- + +# Issuer: CN=Izenpe.com O=IZENPE S.A. +# Subject: CN=Izenpe.com O=IZENPE S.A. +# Label: "Izenpe.com" +# Serial: 917563065490389241595536686991402621 +# MD5 Fingerprint: a6:b0:cd:85:80:da:5c:50:34:a3:39:90:2f:55:67:73 +# SHA1 Fingerprint: 2f:78:3d:25:52:18:a7:4a:65:39:71:b5:2c:a2:9c:45:15:6f:e9:19 +# SHA256 Fingerprint: 25:30:cc:8e:98:32:15:02:ba:d9:6f:9b:1f:ba:1b:09:9e:2d:29:9e:0f:45:48:bb:91:4f:36:3b:c0:d4:53:1f +-----BEGIN CERTIFICATE----- +MIIF8TCCA9mgAwIBAgIQALC3WhZIX7/hy/WL1xnmfTANBgkqhkiG9w0BAQsFADA4 +MQswCQYDVQQGEwJFUzEUMBIGA1UECgwLSVpFTlBFIFMuQS4xEzARBgNVBAMMCkl6 +ZW5wZS5jb20wHhcNMDcxMjEzMTMwODI4WhcNMzcxMjEzMDgyNzI1WjA4MQswCQYD +VQQGEwJFUzEUMBIGA1UECgwLSVpFTlBFIFMuQS4xEzARBgNVBAMMCkl6ZW5wZS5j +b20wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDJ03rKDx6sp4boFmVq +scIbRTJxldn+EFvMr+eleQGPicPK8lVx93e+d5TzcqQsRNiekpsUOqHnJJAKClaO +xdgmlOHZSOEtPtoKct2jmRXagaKH9HtuJneJWK3W6wyyQXpzbm3benhB6QiIEn6H +LmYRY2xU+zydcsC8Lv/Ct90NduM61/e0aL6i9eOBbsFGb12N4E3GVFWJGjMxCrFX +uaOKmMPsOzTFlUFpfnXCPCDFYbpRR6AgkJOhkEvzTnyFRVSa0QUmQbC1TR0zvsQD +yCV8wXDbO/QJLVQnSKwv4cSsPsjLkkxTOTcj7NMB+eAJRE1NZMDhDVqHIrytG6P+ +JrUV86f8hBnp7KGItERphIPzidF0BqnMC9bC3ieFUCbKF7jJeodWLBoBHmy+E60Q +rLUk9TiRodZL2vG70t5HtfG8gfZZa88ZU+mNFctKy6lvROUbQc/hhqfK0GqfvEyN +BjNaooXlkDWgYlwWTvDjovoDGrQscbNYLN57C9saD+veIR8GdwYDsMnvmfzAuU8L +hij+0rnq49qlw0dpEuDb8PYZi+17cNcC1u2HGCgsBCRMd+RIihrGO5rUD8r6ddIB +QFqNeb+Lz0vPqhbBleStTIo+F5HUsWLlguWABKQDfo2/2n+iD5dPDNMN+9fR5XJ+ +HMh3/1uaD7euBUbl8agW7EekFwIDAQABo4H2MIHzMIGwBgNVHREEgagwgaWBD2lu +Zm9AaXplbnBlLmNvbaSBkTCBjjFHMEUGA1UECgw+SVpFTlBFIFMuQS4gLSBDSUYg +QTAxMzM3MjYwLVJNZXJjLlZpdG9yaWEtR2FzdGVpeiBUMTA1NSBGNjIgUzgxQzBB +BgNVBAkMOkF2ZGEgZGVsIE1lZGl0ZXJyYW5lbyBFdG9yYmlkZWEgMTQgLSAwMTAx +MCBWaXRvcmlhLUdhc3RlaXowDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC +AQYwHQYDVR0OBBYEFB0cZQ6o8iV7tJHP5LGx5r1VdGwFMA0GCSqGSIb3DQEBCwUA +A4ICAQB4pgwWSp9MiDrAyw6lFn2fuUhfGI8NYjb2zRlrrKvV9pF9rnHzP7MOeIWb +laQnIUdCSnxIOvVFfLMMjlF4rJUT3sb9fbgakEyrkgPH7UIBzg/YsfqikuFgba56 +awmqxinuaElnMIAkejEWOVt+8Rwu3WwJrfIxwYJOubv5vr8qhT/AQKM6WfxZSzwo +JNu0FXWuDYi6LnPAvViH5ULy617uHjAimcs30cQhbIHsvm0m5hzkQiCeR7Csg1lw +LDXWrzY0tM07+DKo7+N4ifuNRSzanLh+QBxh5z6ikixL8s36mLYp//Pye6kfLqCT +VyvehQP5aTfLnnhqBbTFMXiJ7HqnheG5ezzevh55hM6fcA5ZwjUukCox2eRFekGk +LhObNA5me0mrZJfQRsN5nXJQY6aYWwa9SG3YOYNw6DXwBdGqvOPbyALqfP2C2sJb +UjWumDqtujWTI6cfSN01RpiyEGjkpTHCClguGYEQyVB1/OpaFs4R1+7vUIgtYf8/ +QnMFlEPVjjxOAToZpR9GTnfQXeWBIiGH/pR9hNiTrdZoQ0iy2+tzJOeRf1SktoA+ +naM8THLCV8Sg1Mw4J87VBp6iSNnpn86CcDaTmjvfliHjWbcM2pE38P1ZWrOZyGls +QyYBNWNgVYkDOnXYukrZVP/u3oDYLdE41V4tC5h9Pmzb/CaIxw== +-----END CERTIFICATE----- + +# Issuer: CN=Go Daddy Root Certificate Authority - G2 O=GoDaddy.com, Inc. +# Subject: CN=Go Daddy Root Certificate Authority - G2 O=GoDaddy.com, Inc. +# Label: "Go Daddy Root Certificate Authority - G2" +# Serial: 0 +# MD5 Fingerprint: 80:3a:bc:22:c1:e6:fb:8d:9b:3b:27:4a:32:1b:9a:01 +# SHA1 Fingerprint: 47:be:ab:c9:22:ea:e8:0e:78:78:34:62:a7:9f:45:c2:54:fd:e6:8b +# SHA256 Fingerprint: 45:14:0b:32:47:eb:9c:c8:c5:b4:f0:d7:b5:30:91:f7:32:92:08:9e:6e:5a:63:e2:74:9d:d3:ac:a9:19:8e:da +-----BEGIN CERTIFICATE----- +MIIDxTCCAq2gAwIBAgIBADANBgkqhkiG9w0BAQsFADCBgzELMAkGA1UEBhMCVVMx +EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxGjAYBgNVBAoT +EUdvRGFkZHkuY29tLCBJbmMuMTEwLwYDVQQDEyhHbyBEYWRkeSBSb290IENlcnRp +ZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5MDkwMTAwMDAwMFoXDTM3MTIzMTIz +NTk1OVowgYMxCzAJBgNVBAYTAlVTMRAwDgYDVQQIEwdBcml6b25hMRMwEQYDVQQH +EwpTY290dHNkYWxlMRowGAYDVQQKExFHb0RhZGR5LmNvbSwgSW5jLjExMC8GA1UE +AxMoR28gRGFkZHkgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIw +DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAL9xYgjx+lk09xvJGKP3gElY6SKD +E6bFIEMBO4Tx5oVJnyfq9oQbTqC023CYxzIBsQU+B07u9PpPL1kwIuerGVZr4oAH +/PMWdYA5UXvl+TW2dE6pjYIT5LY/qQOD+qK+ihVqf94Lw7YZFAXK6sOoBJQ7Rnwy +DfMAZiLIjWltNowRGLfTshxgtDj6AozO091GB94KPutdfMh8+7ArU6SSYmlRJQVh +GkSBjCypQ5Yj36w6gZoOKcUcqeldHraenjAKOc7xiID7S13MMuyFYkMlNAJWJwGR +tDtwKj9useiciAF9n9T521NtYJ2/LOdYq7hfRvzOxBsDPAnrSTFcaUaz4EcCAwEA +AaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYE +FDqahQcQZyi27/a9BUFuIMGU2g/eMA0GCSqGSIb3DQEBCwUAA4IBAQCZ21151fmX +WWcDYfF+OwYxdS2hII5PZYe096acvNjpL9DbWu7PdIxztDhC2gV7+AJ1uP2lsdeu +9tfeE8tTEH6KRtGX+rcuKxGrkLAngPnon1rpN5+r5N9ss4UXnT3ZJE95kTXWXwTr +gIOrmgIttRD02JDHBHNA7XIloKmf7J6raBKZV8aPEjoJpL1E/QYVN8Gb5DKj7Tjo +2GTzLH4U/ALqn83/B2gX2yKQOC16jdFU8WnjXzPKej17CuPKf1855eJ1usV2GDPO +LPAvTK33sefOT6jEm0pUBsV/fdUID+Ic/n4XuKxe9tQWskMJDE32p2u0mYRlynqI +4uJEvlz36hz1 +-----END CERTIFICATE----- + +# Issuer: CN=Starfield Root Certificate Authority - G2 O=Starfield Technologies, Inc. +# Subject: CN=Starfield Root Certificate Authority - G2 O=Starfield Technologies, Inc. +# Label: "Starfield Root Certificate Authority - G2" +# Serial: 0 +# MD5 Fingerprint: d6:39:81:c6:52:7e:96:69:fc:fc:ca:66:ed:05:f2:96 +# SHA1 Fingerprint: b5:1c:06:7c:ee:2b:0c:3d:f8:55:ab:2d:92:f4:fe:39:d4:e7:0f:0e +# SHA256 Fingerprint: 2c:e1:cb:0b:f9:d2:f9:e1:02:99:3f:be:21:51:52:c3:b2:dd:0c:ab:de:1c:68:e5:31:9b:83:91:54:db:b7:f5 +-----BEGIN CERTIFICATE----- +MIID3TCCAsWgAwIBAgIBADANBgkqhkiG9w0BAQsFADCBjzELMAkGA1UEBhMCVVMx +EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoT +HFN0YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xMjAwBgNVBAMTKVN0YXJmaWVs +ZCBSb290IENlcnRpZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5MDkwMTAwMDAw +MFoXDTM3MTIzMTIzNTk1OVowgY8xCzAJBgNVBAYTAlVTMRAwDgYDVQQIEwdBcml6 +b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxTdGFyZmllbGQgVGVj +aG5vbG9naWVzLCBJbmMuMTIwMAYDVQQDEylTdGFyZmllbGQgUm9vdCBDZXJ0aWZp +Y2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBAL3twQP89o/8ArFvW59I2Z154qK3A2FWGMNHttfKPTUuiUP3oWmb3ooa/RMg +nLRJdzIpVv257IzdIvpy3Cdhl+72WoTsbhm5iSzchFvVdPtrX8WJpRBSiUZV9Lh1 +HOZ/5FSuS/hVclcCGfgXcVnrHigHdMWdSL5stPSksPNkN3mSwOxGXn/hbVNMYq/N +Hwtjuzqd+/x5AJhhdM8mgkBj87JyahkNmcrUDnXMN/uLicFZ8WJ/X7NfZTD4p7dN +dloedl40wOiWVpmKs/B/pM293DIxfJHP4F8R+GuqSVzRmZTRouNjWwl2tVZi4Ut0 +HZbUJtQIBFnQmA4O5t78w+wfkPECAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAO +BgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFHwMMh+n2TB/xH1oo2Kooc6rB1snMA0G +CSqGSIb3DQEBCwUAA4IBAQARWfolTwNvlJk7mh+ChTnUdgWUXuEok21iXQnCoKjU +sHU48TRqneSfioYmUeYs0cYtbpUgSpIB7LiKZ3sx4mcujJUDJi5DnUox9g61DLu3 +4jd/IroAow57UvtruzvE03lRTs2Q9GcHGcg8RnoNAX3FWOdt5oUwF5okxBDgBPfg +8n/Uqgr/Qh037ZTlZFkSIHc40zI+OIF1lnP6aI+xy84fxez6nH7PfrHxBy22/L/K +pL/QlwVKvOoYKAKQvVR4CSFx09F9HdkWsKlhPdAKACL8x3vLCWRFCztAgfd9fDL1 +mMpYjn0q7pBZc2T5NnReJaH1ZgUufzkVqSr7UIuOhWn0 +-----END CERTIFICATE----- + +# Issuer: CN=Starfield Services Root Certificate Authority - G2 O=Starfield Technologies, Inc. +# Subject: CN=Starfield Services Root Certificate Authority - G2 O=Starfield Technologies, Inc. +# Label: "Starfield Services Root Certificate Authority - G2" +# Serial: 0 +# MD5 Fingerprint: 17:35:74:af:7b:61:1c:eb:f4:f9:3c:e2:ee:40:f9:a2 +# SHA1 Fingerprint: 92:5a:8f:8d:2c:6d:04:e0:66:5f:59:6a:ff:22:d8:63:e8:25:6f:3f +# SHA256 Fingerprint: 56:8d:69:05:a2:c8:87:08:a4:b3:02:51:90:ed:cf:ed:b1:97:4a:60:6a:13:c6:e5:29:0f:cb:2a:e6:3e:da:b5 +-----BEGIN CERTIFICATE----- +MIID7zCCAtegAwIBAgIBADANBgkqhkiG9w0BAQsFADCBmDELMAkGA1UEBhMCVVMx +EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoT +HFN0YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xOzA5BgNVBAMTMlN0YXJmaWVs +ZCBTZXJ2aWNlcyBSb290IENlcnRpZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5 +MDkwMTAwMDAwMFoXDTM3MTIzMTIzNTk1OVowgZgxCzAJBgNVBAYTAlVTMRAwDgYD +VQQIEwdBcml6b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxTdGFy +ZmllbGQgVGVjaG5vbG9naWVzLCBJbmMuMTswOQYDVQQDEzJTdGFyZmllbGQgU2Vy +dmljZXMgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBANUMOsQq+U7i9b4Zl1+OiFOxHz/Lz58gE20p +OsgPfTz3a3Y4Y9k2YKibXlwAgLIvWX/2h/klQ4bnaRtSmpDhcePYLQ1Ob/bISdm2 +8xpWriu2dBTrz/sm4xq6HZYuajtYlIlHVv8loJNwU4PahHQUw2eeBGg6345AWh1K +Ts9DkTvnVtYAcMtS7nt9rjrnvDH5RfbCYM8TWQIrgMw0R9+53pBlbQLPLJGmpufe +hRhJfGZOozptqbXuNC66DQO4M99H67FrjSXZm86B0UVGMpZwh94CDklDhbZsc7tk +6mFBrMnUVN+HL8cisibMn1lUaJ/8viovxFUcdUBgF4UCVTmLfwUCAwEAAaNCMEAw +DwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFJxfAN+q +AdcwKziIorhtSpzyEZGDMA0GCSqGSIb3DQEBCwUAA4IBAQBLNqaEd2ndOxmfZyMI +bw5hyf2E3F/YNoHN2BtBLZ9g3ccaaNnRbobhiCPPE95Dz+I0swSdHynVv/heyNXB +ve6SbzJ08pGCL72CQnqtKrcgfU28elUSwhXqvfdqlS5sdJ/PHLTyxQGjhdByPq1z +qwubdQxtRbeOlKyWN7Wg0I8VRw7j6IPdj/3vQQF3zCepYoUz8jcI73HPdwbeyBkd +iEDPfUYd/x7H4c7/I9vG+o1VTqkC50cRRj70/b17KSa7qWFiNyi2LSr2EIZkyXCn +0q23KXB56jzaYyWf/Wi3MOxw+3WKt21gZ7IeyLnp2KhvAotnDU0mV3HaIPzBSlCN +sSi6 +-----END CERTIFICATE----- + +# Issuer: CN=AffirmTrust Commercial O=AffirmTrust +# Subject: CN=AffirmTrust Commercial O=AffirmTrust +# Label: "AffirmTrust Commercial" +# Serial: 8608355977964138876 +# MD5 Fingerprint: 82:92:ba:5b:ef:cd:8a:6f:a6:3d:55:f9:84:f6:d6:b7 +# SHA1 Fingerprint: f9:b5:b6:32:45:5f:9c:be:ec:57:5f:80:dc:e9:6e:2c:c7:b2:78:b7 +# SHA256 Fingerprint: 03:76:ab:1d:54:c5:f9:80:3c:e4:b2:e2:01:a0:ee:7e:ef:7b:57:b6:36:e8:a9:3c:9b:8d:48:60:c9:6f:5f:a7 +-----BEGIN CERTIFICATE----- +MIIDTDCCAjSgAwIBAgIId3cGJyapsXwwDQYJKoZIhvcNAQELBQAwRDELMAkGA1UE +BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVz +dCBDb21tZXJjaWFsMB4XDTEwMDEyOTE0MDYwNloXDTMwMTIzMTE0MDYwNlowRDEL +MAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZp +cm1UcnVzdCBDb21tZXJjaWFsMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC +AQEA9htPZwcroRX1BiLLHwGy43NFBkRJLLtJJRTWzsO3qyxPxkEylFf6EqdbDuKP +Hx6GGaeqtS25Xw2Kwq+FNXkyLbscYjfysVtKPcrNcV/pQr6U6Mje+SJIZMblq8Yr +ba0F8PrVC8+a5fBQpIs7R6UjW3p6+DM/uO+Zl+MgwdYoic+U+7lF7eNAFxHUdPAL +MeIrJmqbTFeurCA+ukV6BfO9m2kVrn1OIGPENXY6BwLJN/3HR+7o8XYdcxXyl6S1 +yHp52UKqK39c/s4mT6NmgTWvRLpUHhwwMmWd5jyTXlBOeuM61G7MGvv50jeuJCqr +VwMiKA1JdX+3KNp1v47j3A55MQIDAQABo0IwQDAdBgNVHQ4EFgQUnZPGU4teyq8/ +nx4P5ZmVvCT2lI8wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwDQYJ +KoZIhvcNAQELBQADggEBAFis9AQOzcAN/wr91LoWXym9e2iZWEnStB03TX8nfUYG +XUPGhi4+c7ImfU+TqbbEKpqrIZcUsd6M06uJFdhrJNTxFq7YpFzUf1GO7RgBsZNj +vbz4YYCanrHOQnDiqX0GJX0nof5v7LMeJNrjS1UaADs1tDvZ110w/YETifLCBivt +Z8SOyUOyXGsViQK8YvxO8rUzqrJv0wqiUOP2O+guRMLbZjipM1ZI8W0bM40NjD9g +N53Tym1+NH4Nn3J2ixufcv1SNUFFApYvHLKac0khsUlHRUe072o0EclNmsxZt9YC +nlpOZbWUrhvfKbAW8b8Angc6F2S1BLUjIZkKlTuXfO8= +-----END CERTIFICATE----- + +# Issuer: CN=AffirmTrust Networking O=AffirmTrust +# Subject: CN=AffirmTrust Networking O=AffirmTrust +# Label: "AffirmTrust Networking" +# Serial: 8957382827206547757 +# MD5 Fingerprint: 42:65:ca:be:01:9a:9a:4c:a9:8c:41:49:cd:c0:d5:7f +# SHA1 Fingerprint: 29:36:21:02:8b:20:ed:02:f5:66:c5:32:d1:d6:ed:90:9f:45:00:2f +# SHA256 Fingerprint: 0a:81:ec:5a:92:97:77:f1:45:90:4a:f3:8d:5d:50:9f:66:b5:e2:c5:8f:cd:b5:31:05:8b:0e:17:f3:f0:b4:1b +-----BEGIN CERTIFICATE----- +MIIDTDCCAjSgAwIBAgIIfE8EORzUmS0wDQYJKoZIhvcNAQEFBQAwRDELMAkGA1UE +BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVz +dCBOZXR3b3JraW5nMB4XDTEwMDEyOTE0MDgyNFoXDTMwMTIzMTE0MDgyNFowRDEL +MAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZp +cm1UcnVzdCBOZXR3b3JraW5nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC +AQEAtITMMxcua5Rsa2FSoOujz3mUTOWUgJnLVWREZY9nZOIG41w3SfYvm4SEHi3y +YJ0wTsyEheIszx6e/jarM3c1RNg1lho9Nuh6DtjVR6FqaYvZ/Ls6rnla1fTWcbua +kCNrmreIdIcMHl+5ni36q1Mr3Lt2PpNMCAiMHqIjHNRqrSK6mQEubWXLviRmVSRL +QESxG9fhwoXA3hA/Pe24/PHxI1Pcv2WXb9n5QHGNfb2V1M6+oF4nI979ptAmDgAp +6zxG8D1gvz9Q0twmQVGeFDdCBKNwV6gbh+0t+nvujArjqWaJGctB+d1ENmHP4ndG +yH329JKBNv3bNPFyfvMMFr20FQIDAQABo0IwQDAdBgNVHQ4EFgQUBx/S55zawm6i +QLSwelAQUHTEyL0wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwDQYJ +KoZIhvcNAQEFBQADggEBAIlXshZ6qML91tmbmzTCnLQyFE2npN/svqe++EPbkTfO +tDIuUFUaNU52Q3Eg75N3ThVwLofDwR1t3Mu1J9QsVtFSUzpE0nPIxBsFZVpikpzu +QY0x2+c06lkh1QF612S4ZDnNye2v7UsDSKegmQGA3GWjNq5lWUhPgkvIZfFXHeVZ +Lgo/bNjR9eUJtGxUAArgFU2HdW23WJZa3W3SAKD0m0i+wzekujbgfIeFlxoVot4u +olu9rxj5kFDNcFn4J2dHy8egBzp90SxdbBk6ZrV9/ZFvgrG+CJPbFEfxojfHRZ48 +x3evZKiT3/Zpg4Jg8klCNO1aAFSFHBY2kgxc+qatv9s= +-----END CERTIFICATE----- + +# Issuer: CN=AffirmTrust Premium O=AffirmTrust +# Subject: CN=AffirmTrust Premium O=AffirmTrust +# Label: "AffirmTrust Premium" +# Serial: 7893706540734352110 +# MD5 Fingerprint: c4:5d:0e:48:b6:ac:28:30:4e:0a:bc:f9:38:16:87:57 +# SHA1 Fingerprint: d8:a6:33:2c:e0:03:6f:b1:85:f6:63:4f:7d:6a:06:65:26:32:28:27 +# SHA256 Fingerprint: 70:a7:3f:7f:37:6b:60:07:42:48:90:45:34:b1:14:82:d5:bf:0e:69:8e:cc:49:8d:f5:25:77:eb:f2:e9:3b:9a +-----BEGIN CERTIFICATE----- +MIIFRjCCAy6gAwIBAgIIbYwURrGmCu4wDQYJKoZIhvcNAQEMBQAwQTELMAkGA1UE +BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MRwwGgYDVQQDDBNBZmZpcm1UcnVz +dCBQcmVtaXVtMB4XDTEwMDEyOTE0MTAzNloXDTQwMTIzMTE0MTAzNlowQTELMAkG +A1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MRwwGgYDVQQDDBNBZmZpcm1U +cnVzdCBQcmVtaXVtMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAxBLf +qV/+Qd3d9Z+K4/as4Tx4mrzY8H96oDMq3I0gW64tb+eT2TZwamjPjlGjhVtnBKAQ +JG9dKILBl1fYSCkTtuG+kU3fhQxTGJoeJKJPj/CihQvL9Cl/0qRY7iZNyaqoe5rZ ++jjeRFcV5fiMyNlI4g0WJx0eyIOFJbe6qlVBzAMiSy2RjYvmia9mx+n/K+k8rNrS +s8PhaJyJ+HoAVt70VZVs+7pk3WKL3wt3MutizCaam7uqYoNMtAZ6MMgpv+0GTZe5 +HMQxK9VfvFMSF5yZVylmd2EhMQcuJUmdGPLu8ytxjLW6OQdJd/zvLpKQBY0tL3d7 +70O/Nbua2Plzpyzy0FfuKE4mX4+QaAkvuPjcBukumj5Rp9EixAqnOEhss/n/fauG +V+O61oV4d7pD6kh/9ti+I20ev9E2bFhc8e6kGVQa9QPSdubhjL08s9NIS+LI+H+S +qHZGnEJlPqQewQcDWkYtuJfzt9WyVSHvutxMAJf7FJUnM7/oQ0dG0giZFmA7mn7S +5u046uwBHjxIVkkJx0w3AJ6IDsBz4W9m6XJHMD4Q5QsDyZpCAGzFlH5hxIrff4Ia +C1nEWTJ3s7xgaVY5/bQGeyzWZDbZvUjthB9+pSKPKrhC9IK31FOQeE4tGv2Bb0TX +OwF0lkLgAOIua+rF7nKsu7/+6qqo+Nz2snmKtmcCAwEAAaNCMEAwHQYDVR0OBBYE +FJ3AZ6YMItkm9UWrpmVSESfYRaxjMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/ +BAQDAgEGMA0GCSqGSIb3DQEBDAUAA4ICAQCzV00QYk465KzquByvMiPIs0laUZx2 +KI15qldGF9X1Uva3ROgIRL8YhNILgM3FEv0AVQVhh0HctSSePMTYyPtwni94loMg +Nt58D2kTiKV1NpgIpsbfrM7jWNa3Pt668+s0QNiigfV4Py/VpfzZotReBA4Xrf5B +8OWycvpEgjNC6C1Y91aMYj+6QrCcDFx+LmUmXFNPALJ4fqENmS2NuB2OosSw/WDQ +MKSOyARiqcTtNd56l+0OOF6SL5Nwpamcb6d9Ex1+xghIsV5n61EIJenmJWtSKZGc +0jlzCFfemQa0W50QBuHCAKi4HEoCChTQwUHK+4w1IX2COPKpVJEZNZOUbWo6xbLQ +u4mGk+ibyQ86p3q4ofB4Rvr8Ny/lioTz3/4E2aFooC8k4gmVBtWVyuEklut89pMF +u+1z6S3RdTnX5yTb2E5fQ4+e0BQ5v1VwSJlXMbSc7kqYA5YwH2AG7hsj/oFgIxpH +YoWlzBk0gG+zrBrjn/B7SK3VAdlntqlyk+otZrWyuOQ9PLLvTIzq6we/qzWaVYa8 +GKa1qF60g2xraUDTn9zxw2lrueFtCfTxqlB2Cnp9ehehVZZCmTEJ3WARjQUwfuaO +RtGdFNrHF+QFlozEJLUbzxQHskD4o55BhrwE0GuWyCqANP2/7waj3VjFhT0+j/6e +KeC2uAloGRwYQw== +-----END CERTIFICATE----- + +# Issuer: CN=AffirmTrust Premium ECC O=AffirmTrust +# Subject: CN=AffirmTrust Premium ECC O=AffirmTrust +# Label: "AffirmTrust Premium ECC" +# Serial: 8401224907861490260 +# MD5 Fingerprint: 64:b0:09:55:cf:b1:d5:99:e2:be:13:ab:a6:5d:ea:4d +# SHA1 Fingerprint: b8:23:6b:00:2f:1d:16:86:53:01:55:6c:11:a4:37:ca:eb:ff:c3:bb +# SHA256 Fingerprint: bd:71:fd:f6:da:97:e4:cf:62:d1:64:7a:dd:25:81:b0:7d:79:ad:f8:39:7e:b4:ec:ba:9c:5e:84:88:82:14:23 +-----BEGIN CERTIFICATE----- +MIIB/jCCAYWgAwIBAgIIdJclisc/elQwCgYIKoZIzj0EAwMwRTELMAkGA1UEBhMC +VVMxFDASBgNVBAoMC0FmZmlybVRydXN0MSAwHgYDVQQDDBdBZmZpcm1UcnVzdCBQ +cmVtaXVtIEVDQzAeFw0xMDAxMjkxNDIwMjRaFw00MDEyMzExNDIwMjRaMEUxCzAJ +BgNVBAYTAlVTMRQwEgYDVQQKDAtBZmZpcm1UcnVzdDEgMB4GA1UEAwwXQWZmaXJt +VHJ1c3QgUHJlbWl1bSBFQ0MwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQNMF4bFZ0D +0KF5Nbc6PJJ6yhUczWLznCZcBz3lVPqj1swS6vQUX+iOGasvLkjmrBhDeKzQN8O9 +ss0s5kfiGuZjuD0uL3jET9v0D6RoTFVya5UdThhClXjMNzyR4ptlKymjQjBAMB0G +A1UdDgQWBBSaryl6wBE1NSZRMADDav5A1a7WPDAPBgNVHRMBAf8EBTADAQH/MA4G +A1UdDwEB/wQEAwIBBjAKBggqhkjOPQQDAwNnADBkAjAXCfOHiFBar8jAQr9HX/Vs +aobgxCd05DhT1wV/GzTjxi+zygk8N53X57hG8f2h4nECMEJZh0PUUd+60wkyWs6I +flc9nF9Ca/UHLbXwgpP5WW+uZPpY5Yse42O+tYHNbwKMeQ== +-----END CERTIFICATE----- + +# Issuer: CN=Certum Trusted Network CA O=Unizeto Technologies S.A. OU=Certum Certification Authority +# Subject: CN=Certum Trusted Network CA O=Unizeto Technologies S.A. OU=Certum Certification Authority +# Label: "Certum Trusted Network CA" +# Serial: 279744 +# MD5 Fingerprint: d5:e9:81:40:c5:18:69:fc:46:2c:89:75:62:0f:aa:78 +# SHA1 Fingerprint: 07:e0:32:e0:20:b7:2c:3f:19:2f:06:28:a2:59:3a:19:a7:0f:06:9e +# SHA256 Fingerprint: 5c:58:46:8d:55:f5:8e:49:7e:74:39:82:d2:b5:00:10:b6:d1:65:37:4a:cf:83:a7:d4:a3:2d:b7:68:c4:40:8e +-----BEGIN CERTIFICATE----- +MIIDuzCCAqOgAwIBAgIDBETAMA0GCSqGSIb3DQEBBQUAMH4xCzAJBgNVBAYTAlBM +MSIwIAYDVQQKExlVbml6ZXRvIFRlY2hub2xvZ2llcyBTLkEuMScwJQYDVQQLEx5D +ZXJ0dW0gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkxIjAgBgNVBAMTGUNlcnR1bSBU +cnVzdGVkIE5ldHdvcmsgQ0EwHhcNMDgxMDIyMTIwNzM3WhcNMjkxMjMxMTIwNzM3 +WjB+MQswCQYDVQQGEwJQTDEiMCAGA1UEChMZVW5pemV0byBUZWNobm9sb2dpZXMg +Uy5BLjEnMCUGA1UECxMeQ2VydHVtIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MSIw +IAYDVQQDExlDZXJ0dW0gVHJ1c3RlZCBOZXR3b3JrIENBMIIBIjANBgkqhkiG9w0B +AQEFAAOCAQ8AMIIBCgKCAQEA4/t9o3K6wvDJFIf1awFO4W5AB7ptJ11/91sts1rH +UV+rpDKmYYe2bg+G0jACl/jXaVehGDldamR5xgFZrDwxSjh80gTSSyjoIF87B6LM +TXPb865Px1bVWqeWifrzq2jUI4ZZJ88JJ7ysbnKDHDBy3+Ci6dLhdHUZvSqeexVU +BBvXQzmtVSjF4hq79MDkrjhJM8x2hZ85RdKknvISjFH4fOQtf/WsX+sWn7Et0brM +kUJ3TCXJkDhv2/DM+44el1k+1WBO5gUo7Ul5E0u6SNsv+XLTOcr+H9g0cvW0QM8x +AcPs3hEtF10fuFDRXhmnad4HMyjKUJX5p1TLVIZQRan5SQIDAQABo0IwQDAPBgNV +HRMBAf8EBTADAQH/MB0GA1UdDgQWBBQIds3LB/8k9sXN7buQvOKEN0Z19zAOBgNV +HQ8BAf8EBAMCAQYwDQYJKoZIhvcNAQEFBQADggEBAKaorSLOAT2mo/9i0Eidi15y +sHhE49wcrwn9I0j6vSrEuVUEtRCjjSfeC4Jj0O7eDDd5QVsisrCaQVymcODU0HfL +I9MA4GxWL+FpDQ3Zqr8hgVDZBqWo/5U30Kr+4rP1mS1FhIrlQgnXdAIv94nYmem8 +J9RHjboNRhx3zxSkHLmkMcScKHQDNP8zGSal6Q10tz6XxnboJ5ajZt3hrvJBW8qY +VoNzcOSGGtIxQbovvi0TWnZvTuhOgQ4/WwMioBK+ZlgRSssDxLQqKi2WF+A5VLxI +03YnnZotBqbJ7DnSq9ufmgsnAjUpsUCV5/nonFWIGUbWtzT1fs45mtk48VH3Tyw= +-----END CERTIFICATE----- + +# Issuer: CN=TWCA Root Certification Authority O=TAIWAN-CA OU=Root CA +# Subject: CN=TWCA Root Certification Authority O=TAIWAN-CA OU=Root CA +# Label: "TWCA Root Certification Authority" +# Serial: 1 +# MD5 Fingerprint: aa:08:8f:f6:f9:7b:b7:f2:b1:a7:1e:9b:ea:ea:bd:79 +# SHA1 Fingerprint: cf:9e:87:6d:d3:eb:fc:42:26:97:a3:b5:a3:7a:a0:76:a9:06:23:48 +# SHA256 Fingerprint: bf:d8:8f:e1:10:1c:41:ae:3e:80:1b:f8:be:56:35:0e:e9:ba:d1:a6:b9:bd:51:5e:dc:5c:6d:5b:87:11:ac:44 +-----BEGIN CERTIFICATE----- +MIIDezCCAmOgAwIBAgIBATANBgkqhkiG9w0BAQUFADBfMQswCQYDVQQGEwJUVzES +MBAGA1UECgwJVEFJV0FOLUNBMRAwDgYDVQQLDAdSb290IENBMSowKAYDVQQDDCFU +V0NBIFJvb3QgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDgwODI4MDcyNDMz +WhcNMzAxMjMxMTU1OTU5WjBfMQswCQYDVQQGEwJUVzESMBAGA1UECgwJVEFJV0FO +LUNBMRAwDgYDVQQLDAdSb290IENBMSowKAYDVQQDDCFUV0NBIFJvb3QgQ2VydGlm +aWNhdGlvbiBBdXRob3JpdHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB +AQCwfnK4pAOU5qfeCTiRShFAh6d8WWQUe7UREN3+v9XAu1bihSX0NXIP+FPQQeFE +AcK0HMMxQhZHhTMidrIKbw/lJVBPhYa+v5guEGcevhEFhgWQxFnQfHgQsIBct+HH +K3XLfJ+utdGdIzdjp9xCoi2SBBtQwXu4PhvJVgSLL1KbralW6cH/ralYhzC2gfeX +RfwZVzsrb+RH9JlF/h3x+JejiB03HFyP4HYlmlD4oFT/RJB2I9IyxsOrBr/8+7/z +rX2SYgJbKdM1o5OaQ2RgXbL6Mv87BK9NQGr5x+PvI/1ry+UPizgN7gr8/g+YnzAx +3WxSZfmLgb4i4RxYA7qRG4kHAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV +HRMBAf8EBTADAQH/MB0GA1UdDgQWBBRqOFsmjd6LWvJPelSDGRjjCDWmujANBgkq +hkiG9w0BAQUFAAOCAQEAPNV3PdrfibqHDAhUaiBQkr6wQT25JmSDCi/oQMCXKCeC +MErJk/9q56YAf4lCmtYR5VPOL8zy2gXE/uJQxDqGfczafhAJO5I1KlOy/usrBdls +XebQ79NqZp4VKIV66IIArB6nCWlWQtNoURi+VJq/REG6Sb4gumlc7rh3zc5sH62D +lhh9DrUUOYTxKOkto557HnpyWoOzeW/vtPzQCqVYT0bf+215WfKEIlKuD8z7fDvn +aspHYcN6+NOSBB+4IIThNlQWx0DeO4pz3N/GCUzf7Nr/1FNCocnyYh0igzyXxfkZ +YiesZSLX0zzG5Y6yU8xJzrww/nsOM5D77dIUkR8Hrw== +-----END CERTIFICATE----- + +# Issuer: O=SECOM Trust Systems CO.,LTD. OU=Security Communication RootCA2 +# Subject: O=SECOM Trust Systems CO.,LTD. OU=Security Communication RootCA2 +# Label: "Security Communication RootCA2" +# Serial: 0 +# MD5 Fingerprint: 6c:39:7d:a4:0e:55:59:b2:3f:d6:41:b1:12:50:de:43 +# SHA1 Fingerprint: 5f:3b:8c:f2:f8:10:b3:7d:78:b4:ce:ec:19:19:c3:73:34:b9:c7:74 +# SHA256 Fingerprint: 51:3b:2c:ec:b8:10:d4:cd:e5:dd:85:39:1a:df:c6:c2:dd:60:d8:7b:b7:36:d2:b5:21:48:4a:a4:7a:0e:be:f6 +-----BEGIN CERTIFICATE----- +MIIDdzCCAl+gAwIBAgIBADANBgkqhkiG9w0BAQsFADBdMQswCQYDVQQGEwJKUDEl +MCMGA1UEChMcU0VDT00gVHJ1c3QgU3lzdGVtcyBDTy4sTFRELjEnMCUGA1UECxMe +U2VjdXJpdHkgQ29tbXVuaWNhdGlvbiBSb290Q0EyMB4XDTA5MDUyOTA1MDAzOVoX +DTI5MDUyOTA1MDAzOVowXTELMAkGA1UEBhMCSlAxJTAjBgNVBAoTHFNFQ09NIFRy +dXN0IFN5c3RlbXMgQ08uLExURC4xJzAlBgNVBAsTHlNlY3VyaXR5IENvbW11bmlj +YXRpb24gUm9vdENBMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANAV +OVKxUrO6xVmCxF1SrjpDZYBLx/KWvNs2l9amZIyoXvDjChz335c9S672XewhtUGr +zbl+dp+++T42NKA7wfYxEUV0kz1XgMX5iZnK5atq1LXaQZAQwdbWQonCv/Q4EpVM +VAX3NuRFg3sUZdbcDE3R3n4MqzvEFb46VqZab3ZpUql6ucjrappdUtAtCms1FgkQ +hNBqyjoGADdH5H5XTz+L62e4iKrFvlNVspHEfbmwhRkGeC7bYRr6hfVKkaHnFtWO +ojnflLhwHyg/i/xAXmODPIMqGplrz95Zajv8bxbXH/1KEOtOghY6rCcMU/Gt1SSw +awNQwS08Ft1ENCcadfsCAwEAAaNCMEAwHQYDVR0OBBYEFAqFqXdlBZh8QIH4D5cs +OPEK7DzPMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3 +DQEBCwUAA4IBAQBMOqNErLlFsceTfsgLCkLfZOoc7llsCLqJX2rKSpWeeo8HxdpF +coJxDjrSzG+ntKEju/Ykn8sX/oymzsLS28yN/HH8AynBbF0zX2S2ZTuJbxh2ePXc +okgfGT+Ok+vx+hfuzU7jBBJV1uXk3fs+BXziHV7Gp7yXT2g69ekuCkO2r1dcYmh8 +t/2jioSgrGK+KwmHNPBqAbubKVY8/gA3zyNs8U6qtnRGEmyR7jTV7JqR50S+kDFy +1UkC9gLl9B/rfNmWVan/7Ir5mUf/NVoCqgTLiluHcSmRvaS0eg29mvVXIwAHIRc/ +SjnRBUkLp7Y3gaVdjKozXoEofKd9J+sAro03 +-----END CERTIFICATE----- + +# Issuer: CN=Actalis Authentication Root CA O=Actalis S.p.A./03358520967 +# Subject: CN=Actalis Authentication Root CA O=Actalis S.p.A./03358520967 +# Label: "Actalis Authentication Root CA" +# Serial: 6271844772424770508 +# MD5 Fingerprint: 69:c1:0d:4f:07:a3:1b:c3:fe:56:3d:04:bc:11:f6:a6 +# SHA1 Fingerprint: f3:73:b3:87:06:5a:28:84:8a:f2:f3:4a:ce:19:2b:dd:c7:8e:9c:ac +# SHA256 Fingerprint: 55:92:60:84:ec:96:3a:64:b9:6e:2a:be:01:ce:0b:a8:6a:64:fb:fe:bc:c7:aa:b5:af:c1:55:b3:7f:d7:60:66 +-----BEGIN CERTIFICATE----- +MIIFuzCCA6OgAwIBAgIIVwoRl0LE48wwDQYJKoZIhvcNAQELBQAwazELMAkGA1UE +BhMCSVQxDjAMBgNVBAcMBU1pbGFuMSMwIQYDVQQKDBpBY3RhbGlzIFMucC5BLi8w +MzM1ODUyMDk2NzEnMCUGA1UEAwweQWN0YWxpcyBBdXRoZW50aWNhdGlvbiBSb290 +IENBMB4XDTExMDkyMjExMjIwMloXDTMwMDkyMjExMjIwMlowazELMAkGA1UEBhMC +SVQxDjAMBgNVBAcMBU1pbGFuMSMwIQYDVQQKDBpBY3RhbGlzIFMucC5BLi8wMzM1 +ODUyMDk2NzEnMCUGA1UEAwweQWN0YWxpcyBBdXRoZW50aWNhdGlvbiBSb290IENB +MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAp8bEpSmkLO/lGMWwUKNv +UTufClrJwkg4CsIcoBh/kbWHuUA/3R1oHwiD1S0eiKD4j1aPbZkCkpAW1V8IbInX +4ay8IMKx4INRimlNAJZaby/ARH6jDuSRzVju3PvHHkVH3Se5CAGfpiEd9UEtL0z9 +KK3giq0itFZljoZUj5NDKd45RnijMCO6zfB9E1fAXdKDa0hMxKufgFpbOr3JpyI/ +gCczWw63igxdBzcIy2zSekciRDXFzMwujt0q7bd9Zg1fYVEiVRvjRuPjPdA1Yprb +rxTIW6HMiRvhMCb8oJsfgadHHwTrozmSBp+Z07/T6k9QnBn+locePGX2oxgkg4YQ +51Q+qDp2JE+BIcXjDwL4k5RHILv+1A7TaLndxHqEguNTVHnd25zS8gebLra8Pu2F +be8lEfKXGkJh90qX6IuxEAf6ZYGyojnP9zz/GPvG8VqLWeICrHuS0E4UT1lF9gxe +KF+w6D9Fz8+vm2/7hNN3WpVvrJSEnu68wEqPSpP4RCHiMUVhUE4Q2OM1fEwZtN4F +v6MGn8i1zeQf1xcGDXqVdFUNaBr8EBtiZJ1t4JWgw5QHVw0U5r0F+7if5t+L4sbn +fpb2U8WANFAoWPASUHEXMLrmeGO89LKtmyuy/uE5jF66CyCU3nuDuP/jVo23Eek7 +jPKxwV2dpAtMK9myGPW1n0sCAwEAAaNjMGEwHQYDVR0OBBYEFFLYiDrIn3hm7Ynz +ezhwlMkCAjbQMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUUtiIOsifeGbt +ifN7OHCUyQICNtAwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEBCwUAA4ICAQAL +e3KHwGCmSUyIWOYdiPcUZEim2FgKDk8TNd81HdTtBjHIgT5q1d07GjLukD0R0i70 +jsNjLiNmsGe+b7bAEzlgqqI0JZN1Ut6nna0Oh4lScWoWPBkdg/iaKWW+9D+a2fDz +WochcYBNy+A4mz+7+uAwTc+G02UQGRjRlwKxK3JCaKygvU5a2hi/a5iB0P2avl4V +SM0RFbnAKVy06Ij3Pjaut2L9HmLecHgQHEhb2rykOLpn7VU+Xlff1ANATIGk0k9j +pwlCCRT8AKnCgHNPLsBA2RF7SOp6AsDT6ygBJlh0wcBzIm2Tlf05fbsq4/aC4yyX +X04fkZT6/iyj2HYauE2yOE+b+h1IYHkm4vP9qdCa6HCPSXrW5b0KDtst842/6+Ok +fcvHlXHo2qN8xcL4dJIEG4aspCJTQLas/kx2z/uUMsA1n3Y/buWQbqCmJqK4LL7R +K4X9p2jIugErsWx0Hbhzlefut8cl8ABMALJ+tguLHPPAUJ4lueAI3jZm/zel0btU +ZCzJJ7VLkn5l/9Mt4blOvH+kQSGQQXemOR/qnuOf0GZvBeyqdn6/axag67XH/JJU +LysRJyU3eExRarDzzFhdFPFqSBX/wge2sY0PjlxQRrM9vwGYT7JZVEc+NHt4bVaT +LnPqZih4zR0Uv6CPLy64Lo7yFIrM6bV8+2ydDKXhlg== +-----END CERTIFICATE----- + +# Issuer: CN=Buypass Class 2 Root CA O=Buypass AS-983163327 +# Subject: CN=Buypass Class 2 Root CA O=Buypass AS-983163327 +# Label: "Buypass Class 2 Root CA" +# Serial: 2 +# MD5 Fingerprint: 46:a7:d2:fe:45:fb:64:5a:a8:59:90:9b:78:44:9b:29 +# SHA1 Fingerprint: 49:0a:75:74:de:87:0a:47:fe:58:ee:f6:c7:6b:eb:c6:0b:12:40:99 +# SHA256 Fingerprint: 9a:11:40:25:19:7c:5b:b9:5d:94:e6:3d:55:cd:43:79:08:47:b6:46:b2:3c:df:11:ad:a4:a0:0e:ff:15:fb:48 +-----BEGIN CERTIFICATE----- +MIIFWTCCA0GgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBOMQswCQYDVQQGEwJOTzEd +MBsGA1UECgwUQnV5cGFzcyBBUy05ODMxNjMzMjcxIDAeBgNVBAMMF0J1eXBhc3Mg +Q2xhc3MgMiBSb290IENBMB4XDTEwMTAyNjA4MzgwM1oXDTQwMTAyNjA4MzgwM1ow +TjELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1eXBhc3MgQVMtOTgzMTYzMzI3MSAw +HgYDVQQDDBdCdXlwYXNzIENsYXNzIDIgUm9vdCBDQTCCAiIwDQYJKoZIhvcNAQEB +BQADggIPADCCAgoCggIBANfHXvfBB9R3+0Mh9PT1aeTuMgHbo4Yf5FkNuud1g1Lr +6hxhFUi7HQfKjK6w3Jad6sNgkoaCKHOcVgb/S2TwDCo3SbXlzwx87vFKu3MwZfPV +L4O2fuPn9Z6rYPnT8Z2SdIrkHJasW4DptfQxh6NR/Md+oW+OU3fUl8FVM5I+GC91 +1K2GScuVr1QGbNgGE41b/+EmGVnAJLqBcXmQRFBoJJRfuLMR8SlBYaNByyM21cHx +MlAQTn/0hpPshNOOvEu/XAFOBz3cFIqUCqTqc/sLUegTBxj6DvEr0VQVfTzh97QZ +QmdiXnfgolXsttlpF9U6r0TtSsWe5HonfOV116rLJeffawrbD02TTqigzXsu8lkB +arcNuAeBfos4GzjmCleZPe4h6KP1DBbdi+w0jpwqHAAVF41og9JwnxgIzRFo1clr +Us3ERo/ctfPYV3Me6ZQ5BL/T3jjetFPsaRyifsSP5BtwrfKi+fv3FmRmaZ9JUaLi +FRhnBkp/1Wy1TbMz4GHrXb7pmA8y1x1LPC5aAVKRCfLf6o3YBkBjqhHk/sM3nhRS +P/TizPJhk9H9Z2vXUq6/aKtAQ6BXNVN48FP4YUIHZMbXb5tMOA1jrGKvNouicwoN +9SG9dKpN6nIDSdvHXx1iY8f93ZHsM+71bbRuMGjeyNYmsHVee7QHIJihdjK4TWxP +AgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFMmAd+BikoL1Rpzz +uvdMw964o605MA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAgEAU18h +9bqwOlI5LJKwbADJ784g7wbylp7ppHR/ehb8t/W2+xUbP6umwHJdELFx7rxP462s +A20ucS6vxOOto70MEae0/0qyexAQH6dXQbLArvQsWdZHEIjzIVEpMMpghq9Gqx3t +OluwlN5E40EIosHsHdb9T7bWR9AUC8rmyrV7d35BH16Dx7aMOZawP5aBQW9gkOLo ++fsicdl9sz1Gv7SEr5AcD48Saq/v7h56rgJKihcrdv6sVIkkLE8/trKnToyokZf7 +KcZ7XC25y2a2t6hbElGFtQl+Ynhw/qlqYLYdDnkM/crqJIByw5c/8nerQyIKx+u2 +DISCLIBrQYoIwOula9+ZEsuK1V6ADJHgJgg2SMX6OBE1/yWDLfJ6v9r9jv6ly0Us +H8SIU653DtmadsWOLB2jutXsMq7Aqqz30XpN69QH4kj3Io6wpJ9qzo6ysmD0oyLQ +I+uUWnpp3Q+/QFesa1lQ2aOZ4W7+jQF5JyMV3pKdewlNWudLSDBaGOYKbeaP4NK7 +5t98biGCwWg5TbSYWGZizEqQXsP6JwSxeRV0mcy+rSDeJmAc61ZRpqPq5KM/p/9h +3PFaTWwyI0PurKju7koSCTxdccK+efrCh2gdC/1cacwG0Jp9VJkqyTkaGa9LKkPz +Y11aWOIv4x3kqdbQCtCev9eBCfHJxyYNrJgWVqA= +-----END CERTIFICATE----- + +# Issuer: CN=Buypass Class 3 Root CA O=Buypass AS-983163327 +# Subject: CN=Buypass Class 3 Root CA O=Buypass AS-983163327 +# Label: "Buypass Class 3 Root CA" +# Serial: 2 +# MD5 Fingerprint: 3d:3b:18:9e:2c:64:5a:e8:d5:88:ce:0e:f9:37:c2:ec +# SHA1 Fingerprint: da:fa:f7:fa:66:84:ec:06:8f:14:50:bd:c7:c2:81:a5:bc:a9:64:57 +# SHA256 Fingerprint: ed:f7:eb:bc:a2:7a:2a:38:4d:38:7b:7d:40:10:c6:66:e2:ed:b4:84:3e:4c:29:b4:ae:1d:5b:93:32:e6:b2:4d +-----BEGIN CERTIFICATE----- +MIIFWTCCA0GgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBOMQswCQYDVQQGEwJOTzEd +MBsGA1UECgwUQnV5cGFzcyBBUy05ODMxNjMzMjcxIDAeBgNVBAMMF0J1eXBhc3Mg +Q2xhc3MgMyBSb290IENBMB4XDTEwMTAyNjA4Mjg1OFoXDTQwMTAyNjA4Mjg1OFow +TjELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1eXBhc3MgQVMtOTgzMTYzMzI3MSAw +HgYDVQQDDBdCdXlwYXNzIENsYXNzIDMgUm9vdCBDQTCCAiIwDQYJKoZIhvcNAQEB +BQADggIPADCCAgoCggIBAKXaCpUWUOOV8l6ddjEGMnqb8RB2uACatVI2zSRHsJ8Y +ZLya9vrVediQYkwiL944PdbgqOkcLNt4EemOaFEVcsfzM4fkoF0LXOBXByow9c3E +N3coTRiR5r/VUv1xLXA+58bEiuPwKAv0dpihi4dVsjoT/Lc+JzeOIuOoTyrvYLs9 +tznDDgFHmV0ST9tD+leh7fmdvhFHJlsTmKtdFoqwNxxXnUX/iJY2v7vKB3tvh2PX +0DJq1l1sDPGzbjniazEuOQAnFN44wOwZZoYS6J1yFhNkUsepNxz9gjDthBgd9K5c +/3ATAOux9TN6S9ZV+AWNS2mw9bMoNlwUxFFzTWsL8TQH2xc519woe2v1n/MuwU8X +KhDzzMro6/1rqy6any2CbgTUUgGTLT2G/H783+9CHaZr77kgxve9oKeV/afmiSTY +zIw0bOIjL9kSGiG5VZFvC5F5GQytQIgLcOJ60g7YaEi7ghM5EFjp2CoHxhLbWNvS +O1UQRwUVZ2J+GGOmRj8JDlQyXr8NYnon74Do29lLBlo3WiXQCBJ31G8JUJc9yB3D +34xFMFbG02SrZvPAXpacw8Tvw3xrizp5f7NJzz3iiZ+gMEuFuZyUJHmPfWupRWgP +K9Dx2hzLabjKSWJtyNBjYt1gD1iqj6G8BaVmos8bdrKEZLFMOVLAMLrwjEsCsLa3 +AgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFEe4zf/lb+74suwv +Tg75JbCOPGvDMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAgEAACAj +QTUEkMJAYmDv4jVM1z+s4jSQuKFvdvoWFqRINyzpkMLyPPgKn9iB5btb2iUspKdV +cSQy9sgL8rxq+JOssgfCX5/bzMiKqr5qb+FJEMwx14C7u8jYog5kV+qi9cKpMRXS +IGrs/CIBKM+GuIAeqcwRpTzyFrNHnfzSgCHEy9BHcEGhyoMZCCxt8l13nIoUE9Q2 +HJLw5QY33KbmkJs4j1xrG0aGQ0JfPgEHU1RdZX33inOhmlRaHylDFCfChQ+1iHsa +O5S3HWCntZznKWlXWpuTekMwGwPXYshApqr8ZORK15FTAaggiG6cX0S5y2CBNOxv +033aSF/rtJC8LakcC6wc1aJoIIAE1vyxjy+7SjENSoYc6+I2KSb12tjE8nVhz36u +dmNKekBlk4f4HoCMhuWG1o8O/FMsYOgWYRqiPkN7zTlgVGr18okmAWiDSKIz6MkE +kbIRNBE+6tBDGR8Dk5AM/1E9V/RBbuHLoL7ryWPNbczk+DaqaJ3tvV2XcEQNtg41 +3OEMXbugUZTLfhbrES+jkkXITHHZvMmZUldGL1DPvTVp9D0VzgalLA8+9oG6lLvD +u79leNKGef9JOxqDDPDeeOzI8k1MGt6CKfjBWtrt7uYnXuhF0J0cUahoq0Tj0Itq +4/g7u9xN12TyUb7mqqta6THuBrxzvxNiCp/HuZc= +-----END CERTIFICATE----- + +# Issuer: CN=T-TeleSec GlobalRoot Class 3 O=T-Systems Enterprise Services GmbH OU=T-Systems Trust Center +# Subject: CN=T-TeleSec GlobalRoot Class 3 O=T-Systems Enterprise Services GmbH OU=T-Systems Trust Center +# Label: "T-TeleSec GlobalRoot Class 3" +# Serial: 1 +# MD5 Fingerprint: ca:fb:40:a8:4e:39:92:8a:1d:fe:8e:2f:c4:27:ea:ef +# SHA1 Fingerprint: 55:a6:72:3e:cb:f2:ec:cd:c3:23:74:70:19:9d:2a:be:11:e3:81:d1 +# SHA256 Fingerprint: fd:73:da:d3:1c:64:4f:f1:b4:3b:ef:0c:cd:da:96:71:0b:9c:d9:87:5e:ca:7e:31:70:7a:f3:e9:6d:52:2b:bd +-----BEGIN CERTIFICATE----- +MIIDwzCCAqugAwIBAgIBATANBgkqhkiG9w0BAQsFADCBgjELMAkGA1UEBhMCREUx +KzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAd +BgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNl +YyBHbG9iYWxSb290IENsYXNzIDMwHhcNMDgxMDAxMTAyOTU2WhcNMzMxMDAxMjM1 +OTU5WjCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnBy +aXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50 +ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDMwggEiMA0G +CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC9dZPwYiJvJK7genasfb3ZJNW4t/zN +8ELg63iIVl6bmlQdTQyK9tPPcPRStdiTBONGhnFBSivwKixVA9ZIw+A5OO3yXDw/ +RLyTPWGrTs0NvvAgJ1gORH8EGoel15YUNpDQSXuhdfsaa3Ox+M6pCSzyU9XDFES4 +hqX2iys52qMzVNn6chr3IhUciJFrf2blw2qAsCTz34ZFiP0Zf3WHHx+xGwpzJFu5 +ZeAsVMhg02YXP+HMVDNzkQI6pn97djmiH5a2OK61yJN0HZ65tOVgnS9W0eDrXltM +EnAMbEQgqxHY9Bn20pxSN+f6tsIxO0rUFJmtxxr1XV/6B7h8DR/Wgx6zAgMBAAGj +QjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBS1 +A/d2O2GCahKqGFPrAyGUv/7OyjANBgkqhkiG9w0BAQsFAAOCAQEAVj3vlNW92nOy +WL6ukK2YJ5f+AbGwUgC4TeQbIXQbfsDuXmkqJa9c1h3a0nnJ85cp4IaH3gRZD/FZ +1GSFS5mvJQQeyUapl96Cshtwn5z2r3Ex3XsFpSzTucpH9sry9uetuUg/vBa3wW30 +6gmv7PO15wWeph6KU1HWk4HMdJP2udqmJQV0eVp+QD6CSyYRMG7hP0HHRwA11fXT +91Q+gT3aSWqas+8QPebrb9HIIkfLzM8BMZLZGOMivgkeGj5asuRrDFR6fUNOuIml +e9eiPZaGzPImNC1qkp2aGtAw4l1OBLBfiyB+d8E9lYLRRpo7PHi4b6HQDWSieB4p +TpPDpFQUWw== +-----END CERTIFICATE----- + +# Issuer: CN=D-TRUST Root Class 3 CA 2 2009 O=D-Trust GmbH +# Subject: CN=D-TRUST Root Class 3 CA 2 2009 O=D-Trust GmbH +# Label: "D-TRUST Root Class 3 CA 2 2009" +# Serial: 623603 +# MD5 Fingerprint: cd:e0:25:69:8d:47:ac:9c:89:35:90:f7:fd:51:3d:2f +# SHA1 Fingerprint: 58:e8:ab:b0:36:15:33:fb:80:f7:9b:1b:6d:29:d3:ff:8d:5f:00:f0 +# SHA256 Fingerprint: 49:e7:a4:42:ac:f0:ea:62:87:05:00:54:b5:25:64:b6:50:e4:f4:9e:42:e3:48:d6:aa:38:e0:39:e9:57:b1:c1 +-----BEGIN CERTIFICATE----- +MIIEMzCCAxugAwIBAgIDCYPzMA0GCSqGSIb3DQEBCwUAME0xCzAJBgNVBAYTAkRF +MRUwEwYDVQQKDAxELVRydXN0IEdtYkgxJzAlBgNVBAMMHkQtVFJVU1QgUm9vdCBD +bGFzcyAzIENBIDIgMjAwOTAeFw0wOTExMDUwODM1NThaFw0yOTExMDUwODM1NTha +ME0xCzAJBgNVBAYTAkRFMRUwEwYDVQQKDAxELVRydXN0IEdtYkgxJzAlBgNVBAMM +HkQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgMjAwOTCCASIwDQYJKoZIhvcNAQEB +BQADggEPADCCAQoCggEBANOySs96R+91myP6Oi/WUEWJNTrGa9v+2wBoqOADER03 +UAifTUpolDWzU9GUY6cgVq/eUXjsKj3zSEhQPgrfRlWLJ23DEE0NkVJD2IfgXU42 +tSHKXzlABF9bfsyjxiupQB7ZNoTWSPOSHjRGICTBpFGOShrvUD9pXRl/RcPHAY9R +ySPocq60vFYJfxLLHLGvKZAKyVXMD9O0Gu1HNVpK7ZxzBCHQqr0ME7UAyiZsxGsM +lFqVlNpQmvH/pStmMaTJOKDfHR+4CS7zp+hnUquVH+BGPtikw8paxTGA6Eian5Rp +/hnd2HN8gcqW3o7tszIFZYQ05ub9VxC1X3a/L7AQDcUCAwEAAaOCARowggEWMA8G +A1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFP3aFMSfMN4hvR5COfyrYyNJ4PGEMA4G +A1UdDwEB/wQEAwIBBjCB0wYDVR0fBIHLMIHIMIGAoH6gfIZ6bGRhcDovL2RpcmVj +dG9yeS5kLXRydXN0Lm5ldC9DTj1ELVRSVVNUJTIwUm9vdCUyMENsYXNzJTIwMyUy +MENBJTIwMiUyMDIwMDksTz1ELVRydXN0JTIwR21iSCxDPURFP2NlcnRpZmljYXRl +cmV2b2NhdGlvbmxpc3QwQ6BBoD+GPWh0dHA6Ly93d3cuZC10cnVzdC5uZXQvY3Js +L2QtdHJ1c3Rfcm9vdF9jbGFzc18zX2NhXzJfMjAwOS5jcmwwDQYJKoZIhvcNAQEL +BQADggEBAH+X2zDI36ScfSF6gHDOFBJpiBSVYEQBrLLpME+bUMJm2H6NMLVwMeni +acfzcNsgFYbQDfC+rAF1hM5+n02/t2A7nPPKHeJeaNijnZflQGDSNiH+0LS4F9p0 +o3/U37CYAqxva2ssJSRyoWXuJVrl5jLn8t+rSfrzkGkj2wTZ51xY/GXUl77M/C4K +zCUqNQT4YJEVdT1B/yMfGchs64JTBKbkTCJNjYy6zltz7GRUUG3RnFX7acM2w4y8 +PIWmawomDeCTmGCufsYkl4phX5GOZpIJhzbNi5stPvZR1FDUWSi9g/LMKHtThm3Y +Johw1+qRzT65ysCQblrGXnRl11z+o+I= +-----END CERTIFICATE----- + +# Issuer: CN=D-TRUST Root Class 3 CA 2 EV 2009 O=D-Trust GmbH +# Subject: CN=D-TRUST Root Class 3 CA 2 EV 2009 O=D-Trust GmbH +# Label: "D-TRUST Root Class 3 CA 2 EV 2009" +# Serial: 623604 +# MD5 Fingerprint: aa:c6:43:2c:5e:2d:cd:c4:34:c0:50:4f:11:02:4f:b6 +# SHA1 Fingerprint: 96:c9:1b:0b:95:b4:10:98:42:fa:d0:d8:22:79:fe:60:fa:b9:16:83 +# SHA256 Fingerprint: ee:c5:49:6b:98:8c:e9:86:25:b9:34:09:2e:ec:29:08:be:d0:b0:f3:16:c2:d4:73:0c:84:ea:f1:f3:d3:48:81 +-----BEGIN CERTIFICATE----- +MIIEQzCCAyugAwIBAgIDCYP0MA0GCSqGSIb3DQEBCwUAMFAxCzAJBgNVBAYTAkRF +MRUwEwYDVQQKDAxELVRydXN0IEdtYkgxKjAoBgNVBAMMIUQtVFJVU1QgUm9vdCBD +bGFzcyAzIENBIDIgRVYgMjAwOTAeFw0wOTExMDUwODUwNDZaFw0yOTExMDUwODUw +NDZaMFAxCzAJBgNVBAYTAkRFMRUwEwYDVQQKDAxELVRydXN0IEdtYkgxKjAoBgNV +BAMMIUQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgRVYgMjAwOTCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAJnxhDRwui+3MKCOvXwEz75ivJn9gpfSegpn +ljgJ9hBOlSJzmY3aFS3nBfwZcyK3jpgAvDw9rKFs+9Z5JUut8Mxk2og+KbgPCdM0 +3TP1YtHhzRnp7hhPTFiu4h7WDFsVWtg6uMQYZB7jM7K1iXdODL/ZlGsTl28So/6Z +qQTMFexgaDbtCHu39b+T7WYxg4zGcTSHThfqr4uRjRxWQa4iN1438h3Z0S0NL2lR +p75mpoo6Kr3HGrHhFPC+Oh25z1uxav60sUYgovseO3Dvk5h9jHOW8sXvhXCtKSb8 +HgQ+HKDYD8tSg2J87otTlZCpV6LqYQXY+U3EJ/pure3511H3a6UCAwEAAaOCASQw +ggEgMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNOUikxiEyoZLsyvcop9Ntea +HNxnMA4GA1UdDwEB/wQEAwIBBjCB3QYDVR0fBIHVMIHSMIGHoIGEoIGBhn9sZGFw +Oi8vZGlyZWN0b3J5LmQtdHJ1c3QubmV0L0NOPUQtVFJVU1QlMjBSb290JTIwQ2xh +c3MlMjAzJTIwQ0ElMjAyJTIwRVYlMjAyMDA5LE89RC1UcnVzdCUyMEdtYkgsQz1E +RT9jZXJ0aWZpY2F0ZXJldm9jYXRpb25saXN0MEagRKBChkBodHRwOi8vd3d3LmQt +dHJ1c3QubmV0L2NybC9kLXRydXN0X3Jvb3RfY2xhc3NfM19jYV8yX2V2XzIwMDku +Y3JsMA0GCSqGSIb3DQEBCwUAA4IBAQA07XtaPKSUiO8aEXUHL7P+PPoeUSbrh/Yp +3uDx1MYkCenBz1UbtDDZzhr+BlGmFaQt77JLvyAoJUnRpjZ3NOhk31KxEcdzes05 +nsKtjHEh8lprr988TlWvsoRlFIm5d8sqMb7Po23Pb0iUMkZv53GMoKaEGTcH8gNF +CSuGdXzfX2lXANtu2KZyIktQ1HWYVt+3GP9DQ1CuekR78HlR10M9p9OB0/DJT7na +xpeG0ILD5EJt/rDiZE4OJudANCa1CInXCGNjOCd1HjPqbqjdn5lPdE2BiYBL3ZqX +KVwvvoFBuYz/6n1gBp7N1z3TLqMVvKjmJuVvw9y4AyHqnxbxLFS1 +-----END CERTIFICATE----- + +# Issuer: CN=CA Disig Root R2 O=Disig a.s. +# Subject: CN=CA Disig Root R2 O=Disig a.s. +# Label: "CA Disig Root R2" +# Serial: 10572350602393338211 +# MD5 Fingerprint: 26:01:fb:d8:27:a7:17:9a:45:54:38:1a:43:01:3b:03 +# SHA1 Fingerprint: b5:61:eb:ea:a4:de:e4:25:4b:69:1a:98:a5:57:47:c2:34:c7:d9:71 +# SHA256 Fingerprint: e2:3d:4a:03:6d:7b:70:e9:f5:95:b1:42:20:79:d2:b9:1e:df:bb:1f:b6:51:a0:63:3e:aa:8a:9d:c5:f8:07:03 +-----BEGIN CERTIFICATE----- +MIIFaTCCA1GgAwIBAgIJAJK4iNuwisFjMA0GCSqGSIb3DQEBCwUAMFIxCzAJBgNV +BAYTAlNLMRMwEQYDVQQHEwpCcmF0aXNsYXZhMRMwEQYDVQQKEwpEaXNpZyBhLnMu +MRkwFwYDVQQDExBDQSBEaXNpZyBSb290IFIyMB4XDTEyMDcxOTA5MTUzMFoXDTQy +MDcxOTA5MTUzMFowUjELMAkGA1UEBhMCU0sxEzARBgNVBAcTCkJyYXRpc2xhdmEx +EzARBgNVBAoTCkRpc2lnIGEucy4xGTAXBgNVBAMTEENBIERpc2lnIFJvb3QgUjIw +ggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCio8QACdaFXS1tFPbCw3Oe +NcJxVX6B+6tGUODBfEl45qt5WDza/3wcn9iXAng+a0EE6UG9vgMsRfYvZNSrXaNH +PWSb6WiaxswbP7q+sos0Ai6YVRn8jG+qX9pMzk0DIaPY0jSTVpbLTAwAFjxfGs3I +x2ymrdMxp7zo5eFm1tL7A7RBZckQrg4FY8aAamkw/dLukO8NJ9+flXP04SXabBbe +QTg06ov80egEFGEtQX6sx3dOy1FU+16SGBsEWmjGycT6txOgmLcRK7fWV8x8nhfR +yyX+hk4kLlYMeE2eARKmK6cBZW58Yh2EhN/qwGu1pSqVg8NTEQxzHQuyRpDRQjrO +QG6Vrf/GlK1ul4SOfW+eioANSW1z4nuSHsPzwfPrLgVv2RvPN3YEyLRa5Beny912 +H9AZdugsBbPWnDTYltxhh5EF5EQIM8HauQhl1K6yNg3ruji6DOWbnuuNZt2Zz9aJ +QfYEkoopKW1rOhzndX0CcQ7zwOe9yxndnWCywmZgtrEE7snmhrmaZkCo5xHtgUUD +i/ZnWejBBhG93c+AAk9lQHhcR1DIm+YfgXvkRKhbhZri3lrVx/k6RGZL5DJUfORs +nLMOPReisjQS1n6yqEm70XooQL6iFh/f5DcfEXP7kAplQ6INfPgGAVUzfbANuPT1 +rqVCV3w2EYx7XsQDnYx5nQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1Ud +DwEB/wQEAwIBBjAdBgNVHQ4EFgQUtZn4r7CU9eMg1gqtzk5WpC5uQu0wDQYJKoZI +hvcNAQELBQADggIBACYGXnDnZTPIgm7ZnBc6G3pmsgH2eDtpXi/q/075KMOYKmFM +tCQSin1tERT3nLXK5ryeJ45MGcipvXrA1zYObYVybqjGom32+nNjf7xueQgcnYqf +GopTpti72TVVsRHFqQOzVju5hJMiXn7B9hJSi+osZ7z+Nkz1uM/Rs0mSO9MpDpkb +lvdhuDvEK7Z4bLQjb/D907JedR+Zlais9trhxTF7+9FGs9K8Z7RiVLoJ92Owk6Ka ++elSLotgEqv89WBW7xBci8QaQtyDW2QOy7W81k/BfDxujRNt+3vrMNDcTa/F1bal +TFtxyegxvug4BkihGuLq0t4SOVga/4AOgnXmt8kHbA7v/zjxmHHEt38OFdAlab0i +nSvtBfZGR6ztwPDUO+Ls7pZbkBNOHlY667DvlruWIxG68kOGdGSVyCh13x01utI3 +gzhTODY7z2zp+WsO0PsE6E9312UBeIYMej4hYvF/Y3EMyZ9E26gnonW+boE+18Dr +G5gPcFw0sorMwIUY6256s/daoQe/qUKS82Ail+QUoQebTnbAjn39pCXHR+3/H3Os +zMOl6W8KjptlwlCFtaOgUxLMVYdh84GuEEZhvUQhuMI9dM9+JDX6HAcOmz0iyu8x +L4ysEr3vQCj8KWefshNPZiTEUxnpHikV7+ZtsH8tZ/3zbBt1RqPlShfppNcL +-----END CERTIFICATE----- + +# Issuer: CN=ACCVRAIZ1 O=ACCV OU=PKIACCV +# Subject: CN=ACCVRAIZ1 O=ACCV OU=PKIACCV +# Label: "ACCVRAIZ1" +# Serial: 6828503384748696800 +# MD5 Fingerprint: d0:a0:5a:ee:05:b6:09:94:21:a1:7d:f1:b2:29:82:02 +# SHA1 Fingerprint: 93:05:7a:88:15:c6:4f:ce:88:2f:fa:91:16:52:28:78:bc:53:64:17 +# SHA256 Fingerprint: 9a:6e:c0:12:e1:a7:da:9d:be:34:19:4d:47:8a:d7:c0:db:18:22:fb:07:1d:f1:29:81:49:6e:d1:04:38:41:13 +-----BEGIN CERTIFICATE----- +MIIH0zCCBbugAwIBAgIIXsO3pkN/pOAwDQYJKoZIhvcNAQEFBQAwQjESMBAGA1UE +AwwJQUNDVlJBSVoxMRAwDgYDVQQLDAdQS0lBQ0NWMQ0wCwYDVQQKDARBQ0NWMQsw +CQYDVQQGEwJFUzAeFw0xMTA1MDUwOTM3MzdaFw0zMDEyMzEwOTM3MzdaMEIxEjAQ +BgNVBAMMCUFDQ1ZSQUlaMTEQMA4GA1UECwwHUEtJQUNDVjENMAsGA1UECgwEQUND +VjELMAkGA1UEBhMCRVMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCb +qau/YUqXry+XZpp0X9DZlv3P4uRm7x8fRzPCRKPfmt4ftVTdFXxpNRFvu8gMjmoY +HtiP2Ra8EEg2XPBjs5BaXCQ316PWywlxufEBcoSwfdtNgM3802/J+Nq2DoLSRYWo +G2ioPej0RGy9ocLLA76MPhMAhN9KSMDjIgro6TenGEyxCQ0jVn8ETdkXhBilyNpA +lHPrzg5XPAOBOp0KoVdDaaxXbXmQeOW1tDvYvEyNKKGno6e6Ak4l0Squ7a4DIrhr +IA8wKFSVf+DuzgpmndFALW4ir50awQUZ0m/A8p/4e7MCQvtQqR0tkw8jq8bBD5L/ +0KIV9VMJcRz/RROE5iZe+OCIHAr8Fraocwa48GOEAqDGWuzndN9wrqODJerWx5eH +k6fGioozl2A3ED6XPm4pFdahD9GILBKfb6qkxkLrQaLjlUPTAYVtjrs78yM2x/47 +4KElB0iryYl0/wiPgL/AlmXz7uxLaL2diMMxs0Dx6M/2OLuc5NF/1OVYm3z61PMO +m3WR5LpSLhl+0fXNWhn8ugb2+1KoS5kE3fj5tItQo05iifCHJPqDQsGH+tUtKSpa +cXpkatcnYGMN285J9Y0fkIkyF/hzQ7jSWpOGYdbhdQrqeWZ2iE9x6wQl1gpaepPl +uUsXQA+xtrn13k/c4LOsOxFwYIRKQ26ZIMApcQrAZQIDAQABo4ICyzCCAscwfQYI +KwYBBQUHAQEEcTBvMEwGCCsGAQUFBzAChkBodHRwOi8vd3d3LmFjY3YuZXMvZmls +ZWFkbWluL0FyY2hpdm9zL2NlcnRpZmljYWRvcy9yYWl6YWNjdjEuY3J0MB8GCCsG +AQUFBzABhhNodHRwOi8vb2NzcC5hY2N2LmVzMB0GA1UdDgQWBBTSh7Tj3zcnk1X2 +VuqB5TbMjB4/vTAPBgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFNKHtOPfNyeT +VfZW6oHlNsyMHj+9MIIBcwYDVR0gBIIBajCCAWYwggFiBgRVHSAAMIIBWDCCASIG +CCsGAQUFBwICMIIBFB6CARAAQQB1AHQAbwByAGkAZABhAGQAIABkAGUAIABDAGUA +cgB0AGkAZgBpAGMAYQBjAGkA8wBuACAAUgBhAO0AegAgAGQAZQAgAGwAYQAgAEEA +QwBDAFYAIAAoAEEAZwBlAG4AYwBpAGEAIABkAGUAIABUAGUAYwBuAG8AbABvAGcA +7QBhACAAeQAgAEMAZQByAHQAaQBmAGkAYwBhAGMAaQDzAG4AIABFAGwAZQBjAHQA +cgDzAG4AaQBjAGEALAAgAEMASQBGACAAUQA0ADYAMAAxADEANQA2AEUAKQAuACAA +QwBQAFMAIABlAG4AIABoAHQAdABwADoALwAvAHcAdwB3AC4AYQBjAGMAdgAuAGUA +czAwBggrBgEFBQcCARYkaHR0cDovL3d3dy5hY2N2LmVzL2xlZ2lzbGFjaW9uX2Mu +aHRtMFUGA1UdHwROMEwwSqBIoEaGRGh0dHA6Ly93d3cuYWNjdi5lcy9maWxlYWRt +aW4vQXJjaGl2b3MvY2VydGlmaWNhZG9zL3JhaXphY2N2MV9kZXIuY3JsMA4GA1Ud +DwEB/wQEAwIBBjAXBgNVHREEEDAOgQxhY2N2QGFjY3YuZXMwDQYJKoZIhvcNAQEF +BQADggIBAJcxAp/n/UNnSEQU5CmH7UwoZtCPNdpNYbdKl02125DgBS4OxnnQ8pdp +D70ER9m+27Up2pvZrqmZ1dM8MJP1jaGo/AaNRPTKFpV8M9xii6g3+CfYCS0b78gU +JyCpZET/LtZ1qmxNYEAZSUNUY9rizLpm5U9EelvZaoErQNV/+QEnWCzI7UiRfD+m +AM/EKXMRNt6GGT6d7hmKG9Ww7Y49nCrADdg9ZuM8Db3VlFzi4qc1GwQA9j9ajepD +vV+JHanBsMyZ4k0ACtrJJ1vnE5Bc5PUzolVt3OAJTS+xJlsndQAJxGJ3KQhfnlms +tn6tn1QwIgPBHnFk/vk4CpYY3QIUrCPLBhwepH2NDd4nQeit2hW3sCPdK6jT2iWH +7ehVRE2I9DZ+hJp4rPcOVkkO1jMl1oRQQmwgEh0q1b688nCBpHBgvgW1m54ERL5h +I6zppSSMEYCUWqKiuUnSwdzRp+0xESyeGabu4VXhwOrPDYTkF7eifKXeVSUG7szA +h1xA2syVP1XgNce4hL60Xc16gwFy7ofmXx2utYXGJt/mwZrpHgJHnyqobalbz+xF +d3+YJ5oyXSrjhO7FmGYvliAd3djDJ9ew+f7Zfc3Qn48LFFhRny+Lwzgt3uiP1o2H +pPVWQxaZLPSkVrQ0uGE3ycJYgBugl6H8WY3pEfbRD0tVNEYqi4Y7 +-----END CERTIFICATE----- + +# Issuer: CN=TWCA Global Root CA O=TAIWAN-CA OU=Root CA +# Subject: CN=TWCA Global Root CA O=TAIWAN-CA OU=Root CA +# Label: "TWCA Global Root CA" +# Serial: 3262 +# MD5 Fingerprint: f9:03:7e:cf:e6:9e:3c:73:7a:2a:90:07:69:ff:2b:96 +# SHA1 Fingerprint: 9c:bb:48:53:f6:a4:f6:d3:52:a4:e8:32:52:55:60:13:f5:ad:af:65 +# SHA256 Fingerprint: 59:76:90:07:f7:68:5d:0f:cd:50:87:2f:9f:95:d5:75:5a:5b:2b:45:7d:81:f3:69:2b:61:0a:98:67:2f:0e:1b +-----BEGIN CERTIFICATE----- +MIIFQTCCAymgAwIBAgICDL4wDQYJKoZIhvcNAQELBQAwUTELMAkGA1UEBhMCVFcx +EjAQBgNVBAoTCVRBSVdBTi1DQTEQMA4GA1UECxMHUm9vdCBDQTEcMBoGA1UEAxMT +VFdDQSBHbG9iYWwgUm9vdCBDQTAeFw0xMjA2MjcwNjI4MzNaFw0zMDEyMzExNTU5 +NTlaMFExCzAJBgNVBAYTAlRXMRIwEAYDVQQKEwlUQUlXQU4tQ0ExEDAOBgNVBAsT +B1Jvb3QgQ0ExHDAaBgNVBAMTE1RXQ0EgR2xvYmFsIFJvb3QgQ0EwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQCwBdvI64zEbooh745NnHEKH1Jw7W2CnJfF +10xORUnLQEK1EjRsGcJ0pDFfhQKX7EMzClPSnIyOt7h52yvVavKOZsTuKwEHktSz +0ALfUPZVr2YOy+BHYC8rMjk1Ujoog/h7FsYYuGLWRyWRzvAZEk2tY/XTP3VfKfCh +MBwqoJimFb3u/Rk28OKRQ4/6ytYQJ0lM793B8YVwm8rqqFpD/G2Gb3PpN0Wp8DbH +zIh1HrtsBv+baz4X7GGqcXzGHaL3SekVtTzWoWH1EfcFbx39Eb7QMAfCKbAJTibc +46KokWofwpFFiFzlmLhxpRUZyXx1EcxwdE8tmx2RRP1WKKD+u4ZqyPpcC1jcxkt2 +yKsi2XMPpfRaAok/T54igu6idFMqPVMnaR1sjjIsZAAmY2E2TqNGtz99sy2sbZCi +laLOz9qC5wc0GZbpuCGqKX6mOL6OKUohZnkfs8O1CWfe1tQHRvMq2uYiN2DLgbYP +oA/pyJV/v1WRBXrPPRXAb94JlAGD1zQbzECl8LibZ9WYkTunhHiVJqRaCPgrdLQA +BDzfuBSO6N+pjWxnkjMdwLfS7JLIvgm/LCkFbwJrnu+8vyq8W8BQj0FwcYeyTbcE +qYSjMq+u7msXi7Kx/mzhkIyIqJdIzshNy/MGz19qCkKxHh53L46g5pIOBvwFItIm +4TFRfTLcDwIDAQABoyMwITAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB +/zANBgkqhkiG9w0BAQsFAAOCAgEAXzSBdu+WHdXltdkCY4QWwa6gcFGn90xHNcgL +1yg9iXHZqjNB6hQbbCEAwGxCGX6faVsgQt+i0trEfJdLjbDorMjupWkEmQqSpqsn +LhpNgb+E1HAerUf+/UqdM+DyucRFCCEK2mlpc3INvjT+lIutwx4116KD7+U4x6WF +H6vPNOw/KP4M8VeGTslV9xzU2KV9Bnpv1d8Q34FOIWWxtuEXeZVFBs5fzNxGiWNo +RI2T9GRwoD2dKAXDOXC4Ynsg/eTb6QihuJ49CcdP+yz4k3ZB3lLg4VfSnQO8d57+ +nile98FRYB/e2guyLXW3Q0iT5/Z5xoRdgFlglPx4mI88k1HtQJAH32RjJMtOcQWh +15QaiDLxInQirqWm2BJpTGCjAu4r7NRjkgtevi92a6O2JryPA9gK8kxkRr05YuWW +6zRjESjMlfGt7+/cgFhI6Uu46mWs6fyAtbXIRfmswZ/ZuepiiI7E8UuDEq3mi4TW +nsLrgxifarsbJGAzcMzs9zLzXNl5fe+epP7JI8Mk7hWSsT2RTyaGvWZzJBPqpK5j +wa19hAM8EHiGG3njxPPyBJUgriOCxLM6AGK/5jYk4Ve6xx6QddVfP5VhK8E7zeWz +aGHQRiapIVJpLesux+t3zqY6tQMzT3bR51xUAV3LePTJDL/PEo4XLSNolOer/qmy +KwbQBM0= +-----END CERTIFICATE----- + +# Issuer: CN=TeliaSonera Root CA v1 O=TeliaSonera +# Subject: CN=TeliaSonera Root CA v1 O=TeliaSonera +# Label: "TeliaSonera Root CA v1" +# Serial: 199041966741090107964904287217786801558 +# MD5 Fingerprint: 37:41:49:1b:18:56:9a:26:f5:ad:c2:66:fb:40:a5:4c +# SHA1 Fingerprint: 43:13:bb:96:f1:d5:86:9b:c1:4e:6a:92:f6:cf:f6:34:69:87:82:37 +# SHA256 Fingerprint: dd:69:36:fe:21:f8:f0:77:c1:23:a1:a5:21:c1:22:24:f7:22:55:b7:3e:03:a7:26:06:93:e8:a2:4b:0f:a3:89 +-----BEGIN CERTIFICATE----- +MIIFODCCAyCgAwIBAgIRAJW+FqD3LkbxezmCcvqLzZYwDQYJKoZIhvcNAQEFBQAw +NzEUMBIGA1UECgwLVGVsaWFTb25lcmExHzAdBgNVBAMMFlRlbGlhU29uZXJhIFJv +b3QgQ0EgdjEwHhcNMDcxMDE4MTIwMDUwWhcNMzIxMDE4MTIwMDUwWjA3MRQwEgYD +VQQKDAtUZWxpYVNvbmVyYTEfMB0GA1UEAwwWVGVsaWFTb25lcmEgUm9vdCBDQSB2 +MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMK+6yfwIaPzaSZVfp3F +VRaRXP3vIb9TgHot0pGMYzHw7CTww6XScnwQbfQ3t+XmfHnqjLWCi65ItqwA3GV1 +7CpNX8GH9SBlK4GoRz6JI5UwFpB/6FcHSOcZrr9FZ7E3GwYq/t75rH2D+1665I+X +Z75Ljo1kB1c4VWk0Nj0TSO9P4tNmHqTPGrdeNjPUtAa9GAH9d4RQAEX1jF3oI7x+ +/jXh7VB7qTCNGdMJjmhnXb88lxhTuylixcpecsHHltTbLaC0H2kD7OriUPEMPPCs +81Mt8Bz17Ww5OXOAFshSsCPN4D7c3TxHoLs1iuKYaIu+5b9y7tL6pe0S7fyYGKkm +dtwoSxAgHNN/Fnct7W+A90m7UwW7XWjH1Mh1Fj+JWov3F0fUTPHSiXk+TT2YqGHe +Oh7S+F4D4MHJHIzTjU3TlTazN19jY5szFPAtJmtTfImMMsJu7D0hADnJoWjiUIMu +sDor8zagrC/kb2HCUQk5PotTubtn2txTuXZZNp1D5SDgPTJghSJRt8czu90VL6R4 +pgd7gUY2BIbdeTXHlSw7sKMXNeVzH7RcWe/a6hBle3rQf5+ztCo3O3CLm1u5K7fs +slESl1MpWtTwEhDcTwK7EpIvYtQ/aUN8Ddb8WHUBiJ1YFkveupD/RwGJBmr2X7KQ +arMCpgKIv7NHfirZ1fpoeDVNAgMBAAGjPzA9MA8GA1UdEwEB/wQFMAMBAf8wCwYD +VR0PBAQDAgEGMB0GA1UdDgQWBBTwj1k4ALP1j5qWDNXr+nuqF+gTEjANBgkqhkiG +9w0BAQUFAAOCAgEAvuRcYk4k9AwI//DTDGjkk0kiP0Qnb7tt3oNmzqjMDfz1mgbl +dxSR651Be5kqhOX//CHBXfDkH1e3damhXwIm/9fH907eT/j3HEbAek9ALCI18Bmx +0GtnLLCo4MBANzX2hFxc469CeP6nyQ1Q6g2EdvZR74NTxnr/DlZJLo961gzmJ1Tj +TQpgcmLNkQfWpb/ImWvtxBnmq0wROMVvMeJuScg/doAmAyYp4Db29iBT4xdwNBed +Y2gea+zDTYa4EzAvXUYNR0PVG6pZDrlcjQZIrXSHX8f8MVRBE+LHIQ6e4B4N4cB7 +Q4WQxYpYxmUKeFfyxiMPAdkgS94P+5KFdSpcc41teyWRyu5FrgZLAMzTsVlQ2jqI +OylDRl6XK1TOU2+NSueW+r9xDkKLfP0ooNBIytrEgUy7onOTJsjrDNYmiLbAJM+7 +vVvrdX3pCI6GMyx5dwlppYn8s3CQh3aP0yK7Qs69cwsgJirQmz1wHiRszYd2qReW +t88NkvuOGKmYSdGe/mBEciG5Ge3C9THxOUiIkCR1VBatzvT4aRRkOfujuLpwQMcn +HL/EVlP6Y2XQ8xwOFvVrhlhNGNTkDY6lnVuR3HYkUD/GKvvZt5y11ubQ2egZixVx +SK236thZiNSQvxaz2emsWWFUyBy6ysHK4bkgTI86k4mloMy/0/Z1pHWWbVY= +-----END CERTIFICATE----- + +# Issuer: CN=T-TeleSec GlobalRoot Class 2 O=T-Systems Enterprise Services GmbH OU=T-Systems Trust Center +# Subject: CN=T-TeleSec GlobalRoot Class 2 O=T-Systems Enterprise Services GmbH OU=T-Systems Trust Center +# Label: "T-TeleSec GlobalRoot Class 2" +# Serial: 1 +# MD5 Fingerprint: 2b:9b:9e:e4:7b:6c:1f:00:72:1a:cc:c1:77:79:df:6a +# SHA1 Fingerprint: 59:0d:2d:7d:88:4f:40:2e:61:7e:a5:62:32:17:65:cf:17:d8:94:e9 +# SHA256 Fingerprint: 91:e2:f5:78:8d:58:10:eb:a7:ba:58:73:7d:e1:54:8a:8e:ca:cd:01:45:98:bc:0b:14:3e:04:1b:17:05:25:52 +-----BEGIN CERTIFICATE----- +MIIDwzCCAqugAwIBAgIBATANBgkqhkiG9w0BAQsFADCBgjELMAkGA1UEBhMCREUx +KzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAd +BgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNl +YyBHbG9iYWxSb290IENsYXNzIDIwHhcNMDgxMDAxMTA0MDE0WhcNMzMxMDAxMjM1 +OTU5WjCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnBy +aXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50 +ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDIwggEiMA0G +CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCqX9obX+hzkeXaXPSi5kfl82hVYAUd +AqSzm1nzHoqvNK38DcLZSBnuaY/JIPwhqgcZ7bBcrGXHX+0CfHt8LRvWurmAwhiC +FoT6ZrAIxlQjgeTNuUk/9k9uN0goOA/FvudocP05l03Sx5iRUKrERLMjfTlH6VJi +1hKTXrcxlkIF+3anHqP1wvzpesVsqXFP6st4vGCvx9702cu+fjOlbpSD8DT6Iavq +jnKgP6TeMFvvhk1qlVtDRKgQFRzlAVfFmPHmBiiRqiDFt1MmUUOyCxGVWOHAD3bZ +wI18gfNycJ5v/hqO2V81xrJvNHy+SE/iWjnX2J14np+GPgNeGYtEotXHAgMBAAGj +QjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBS/ +WSA2AHmgoCJrjNXyYdK4LMuCSjANBgkqhkiG9w0BAQsFAAOCAQEAMQOiYQsfdOhy +NsZt+U2e+iKo4YFWz827n+qrkRk4r6p8FU3ztqONpfSO9kSpp+ghla0+AGIWiPAC +uvxhI+YzmzB6azZie60EI4RYZeLbK4rnJVM3YlNfvNoBYimipidx5joifsFvHZVw +IEoHNN/q/xWA5brXethbdXwFeilHfkCoMRN3zUA7tFFHei4R40cR3p1m0IvVVGb6 +g1XqfMIpiRvpb7PO4gWEyS8+eIVibslfwXhjdFjASBgMmTnrpMwatXlajRWc2BQN +9noHV8cigwUtPJslJj0Ys6lDfMjIq2SPDqO/nBudMNva0Bkuqjzx+zOAduTNrRlP +BSeOE6Fuwg== +-----END CERTIFICATE----- + +# Issuer: CN=Atos TrustedRoot 2011 O=Atos +# Subject: CN=Atos TrustedRoot 2011 O=Atos +# Label: "Atos TrustedRoot 2011" +# Serial: 6643877497813316402 +# MD5 Fingerprint: ae:b9:c4:32:4b:ac:7f:5d:66:cc:77:94:bb:2a:77:56 +# SHA1 Fingerprint: 2b:b1:f5:3e:55:0c:1d:c5:f1:d4:e6:b7:6a:46:4b:55:06:02:ac:21 +# SHA256 Fingerprint: f3:56:be:a2:44:b7:a9:1e:b3:5d:53:ca:9a:d7:86:4a:ce:01:8e:2d:35:d5:f8:f9:6d:df:68:a6:f4:1a:a4:74 +-----BEGIN CERTIFICATE----- +MIIDdzCCAl+gAwIBAgIIXDPLYixfszIwDQYJKoZIhvcNAQELBQAwPDEeMBwGA1UE +AwwVQXRvcyBUcnVzdGVkUm9vdCAyMDExMQ0wCwYDVQQKDARBdG9zMQswCQYDVQQG +EwJERTAeFw0xMTA3MDcxNDU4MzBaFw0zMDEyMzEyMzU5NTlaMDwxHjAcBgNVBAMM +FUF0b3MgVHJ1c3RlZFJvb3QgMjAxMTENMAsGA1UECgwEQXRvczELMAkGA1UEBhMC +REUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCVhTuXbyo7LjvPpvMp +Nb7PGKw+qtn4TaA+Gke5vJrf8v7MPkfoepbCJI419KkM/IL9bcFyYie96mvr54rM +VD6QUM+A1JX76LWC1BTFtqlVJVfbsVD2sGBkWXppzwO3bw2+yj5vdHLqqjAqc2K+ +SZFhyBH+DgMq92og3AIVDV4VavzjgsG1xZ1kCWyjWZgHJ8cblithdHFsQ/H3NYkQ +4J7sVaE3IqKHBAUsR320HLliKWYoyrfhk/WklAOZuXCFteZI6o1Q/NnezG8HDt0L +cp2AMBYHlT8oDv3FdU9T1nSatCQujgKRz3bFmx5VdJx4IbHwLfELn8LVlhgf8FQi +eowHAgMBAAGjfTB7MB0GA1UdDgQWBBSnpQaxLKYJYO7Rl+lwrrw7GWzbITAPBgNV +HRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFKelBrEspglg7tGX6XCuvDsZbNshMBgG +A1UdIAQRMA8wDQYLKwYBBAGwLQMEAQEwDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3 +DQEBCwUAA4IBAQAmdzTblEiGKkGdLD4GkGDEjKwLVLgfuXvTBznk+j57sj1O7Z8j +vZfza1zv7v1Apt+hk6EKhqzvINB5Ab149xnYJDE0BAGmuhWawyfc2E8PzBhj/5kP +DpFrdRbhIfzYJsdHt6bPWHJxfrrhTZVHO8mvbaG0weyJ9rQPOLXiZNwlz6bb65pc +maHFCN795trV1lpFDMS3wrUU77QR/w4VtfX128a961qn8FYiqTxlVMYVqL2Gns2D +lmh6cYGJ4Qvh6hEbaAjMaZ7snkGeRDImeuKHCnE96+RapNLbxc3G3mB/ufNPRJLv +KrcYPqcZ2Qt9sTdBQrC6YB3y/gkRsPCHe6ed +-----END CERTIFICATE----- + +# Issuer: CN=QuoVadis Root CA 1 G3 O=QuoVadis Limited +# Subject: CN=QuoVadis Root CA 1 G3 O=QuoVadis Limited +# Label: "QuoVadis Root CA 1 G3" +# Serial: 687049649626669250736271037606554624078720034195 +# MD5 Fingerprint: a4:bc:5b:3f:fe:37:9a:fa:64:f0:e2:fa:05:3d:0b:ab +# SHA1 Fingerprint: 1b:8e:ea:57:96:29:1a:c9:39:ea:b8:0a:81:1a:73:73:c0:93:79:67 +# SHA256 Fingerprint: 8a:86:6f:d1:b2:76:b5:7e:57:8e:92:1c:65:82:8a:2b:ed:58:e9:f2:f2:88:05:41:34:b7:f1:f4:bf:c9:cc:74 +-----BEGIN CERTIFICATE----- +MIIFYDCCA0igAwIBAgIUeFhfLq0sGUvjNwc1NBMotZbUZZMwDQYJKoZIhvcNAQEL +BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc +BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMSBHMzAeFw0xMjAxMTIxNzI3NDRaFw00 +MjAxMTIxNzI3NDRaMEgxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM +aW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDEgRzMwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQCgvlAQjunybEC0BJyFuTHK3C3kEakEPBtV +wedYMB0ktMPvhd6MLOHBPd+C5k+tR4ds7FtJwUrVu4/sh6x/gpqG7D0DmVIB0jWe +rNrwU8lmPNSsAgHaJNM7qAJGr6Qc4/hzWHa39g6QDbXwz8z6+cZM5cOGMAqNF341 +68Xfuw6cwI2H44g4hWf6Pser4BOcBRiYz5P1sZK0/CPTz9XEJ0ngnjybCKOLXSoh +4Pw5qlPafX7PGglTvF0FBM+hSo+LdoINofjSxxR3W5A2B4GbPgb6Ul5jxaYA/qXp +UhtStZI5cgMJYr2wYBZupt0lwgNm3fME0UDiTouG9G/lg6AnhF4EwfWQvTA9xO+o +abw4m6SkltFi2mnAAZauy8RRNOoMqv8hjlmPSlzkYZqn0ukqeI1RPToV7qJZjqlc +3sX5kCLliEVx3ZGZbHqfPT2YfF72vhZooF6uCyP8Wg+qInYtyaEQHeTTRCOQiJ/G +KubX9ZqzWB4vMIkIG1SitZgj7Ah3HJVdYdHLiZxfokqRmu8hqkkWCKi9YSgxyXSt +hfbZxbGL0eUQMk1fiyA6PEkfM4VZDdvLCXVDaXP7a3F98N/ETH3Goy7IlXnLc6KO +Tk0k+17kBL5yG6YnLUlamXrXXAkgt3+UuU/xDRxeiEIbEbfnkduebPRq34wGmAOt +zCjvpUfzUwIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB +BjAdBgNVHQ4EFgQUo5fW816iEOGrRZ88F2Q87gFwnMwwDQYJKoZIhvcNAQELBQAD +ggIBABj6W3X8PnrHX3fHyt/PX8MSxEBd1DKquGrX1RUVRpgjpeaQWxiZTOOtQqOC +MTaIzen7xASWSIsBx40Bz1szBpZGZnQdT+3Btrm0DWHMY37XLneMlhwqI2hrhVd2 +cDMT/uFPpiN3GPoajOi9ZcnPP/TJF9zrx7zABC4tRi9pZsMbj/7sPtPKlL92CiUN +qXsCHKnQO18LwIE6PWThv6ctTr1NxNgpxiIY0MWscgKCP6o6ojoilzHdCGPDdRS5 +YCgtW2jgFqlmgiNR9etT2DGbe+m3nUvriBbP+V04ikkwj+3x6xn0dxoxGE1nVGwv +b2X52z3sIexe9PSLymBlVNFxZPT5pqOBMzYzcfCkeF9OrYMh3jRJjehZrJ3ydlo2 +8hP0r+AJx2EqbPfgna67hkooby7utHnNkDPDs3b69fBsnQGQ+p6Q9pxyz0fawx/k +NSBT8lTR32GDpgLiJTjehTItXnOQUl1CxM49S+H5GYQd1aJQzEH7QRTDvdbJWqNj +ZgKAvQU6O0ec7AAmTPWIUb+oI38YB7AL7YsmoWTTYUrrXJ/es69nA7Mf3W1daWhp +q1467HxpvMc7hU6eFbm0FU/DlXpY18ls6Wy58yljXrQs8C097Vpl4KlbQMJImYFt +nh8GKjwStIsPm6Ik8KaN1nrgS7ZklmOVhMJKzRwuJIczYOXD +-----END CERTIFICATE----- + +# Issuer: CN=QuoVadis Root CA 2 G3 O=QuoVadis Limited +# Subject: CN=QuoVadis Root CA 2 G3 O=QuoVadis Limited +# Label: "QuoVadis Root CA 2 G3" +# Serial: 390156079458959257446133169266079962026824725800 +# MD5 Fingerprint: af:0c:86:6e:bf:40:2d:7f:0b:3e:12:50:ba:12:3d:06 +# SHA1 Fingerprint: 09:3c:61:f3:8b:8b:dc:7d:55:df:75:38:02:05:00:e1:25:f5:c8:36 +# SHA256 Fingerprint: 8f:e4:fb:0a:f9:3a:4d:0d:67:db:0b:eb:b2:3e:37:c7:1b:f3:25:dc:bc:dd:24:0e:a0:4d:af:58:b4:7e:18:40 +-----BEGIN CERTIFICATE----- +MIIFYDCCA0igAwIBAgIURFc0JFuBiZs18s64KztbpybwdSgwDQYJKoZIhvcNAQEL +BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc +BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMiBHMzAeFw0xMjAxMTIxODU5MzJaFw00 +MjAxMTIxODU5MzJaMEgxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM +aW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDIgRzMwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQChriWyARjcV4g/Ruv5r+LrI3HimtFhZiFf +qq8nUeVuGxbULX1QsFN3vXg6YOJkApt8hpvWGo6t/x8Vf9WVHhLL5hSEBMHfNrMW +n4rjyduYNM7YMxcoRvynyfDStNVNCXJJ+fKH46nafaF9a7I6JaltUkSs+L5u+9ym +c5GQYaYDFCDy54ejiK2toIz/pgslUiXnFgHVy7g1gQyjO/Dh4fxaXc6AcW34Sas+ +O7q414AB+6XrW7PFXmAqMaCvN+ggOp+oMiwMzAkd056OXbxMmO7FGmh77FOm6RQ1 +o9/NgJ8MSPsc9PG/Srj61YxxSscfrf5BmrODXfKEVu+lV0POKa2Mq1W/xPtbAd0j +IaFYAI7D0GoT7RPjEiuA3GfmlbLNHiJuKvhB1PLKFAeNilUSxmn1uIZoL1NesNKq +IcGY5jDjZ1XHm26sGahVpkUG0CM62+tlXSoREfA7T8pt9DTEceT/AFr2XK4jYIVz +8eQQsSWu1ZK7E8EM4DnatDlXtas1qnIhO4M15zHfeiFuuDIIfR0ykRVKYnLP43eh +vNURG3YBZwjgQQvD6xVu+KQZ2aKrr+InUlYrAoosFCT5v0ICvybIxo/gbjh9Uy3l +7ZizlWNof/k19N+IxWA1ksB8aRxhlRbQ694Lrz4EEEVlWFA4r0jyWbYW8jwNkALG +cC4BrTwV1wIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB +BjAdBgNVHQ4EFgQU7edvdlq/YOxJW8ald7tyFnGbxD0wDQYJKoZIhvcNAQELBQAD +ggIBAJHfgD9DCX5xwvfrs4iP4VGyvD11+ShdyLyZm3tdquXK4Qr36LLTn91nMX66 +AarHakE7kNQIXLJgapDwyM4DYvmL7ftuKtwGTTwpD4kWilhMSA/ohGHqPHKmd+RC +roijQ1h5fq7KpVMNqT1wvSAZYaRsOPxDMuHBR//47PERIjKWnML2W2mWeyAMQ0Ga +W/ZZGYjeVYg3UQt4XAoeo0L9x52ID8DyeAIkVJOviYeIyUqAHerQbj5hLja7NQ4n +lv1mNDthcnPxFlxHBlRJAHpYErAK74X9sbgzdWqTHBLmYF5vHX/JHyPLhGGfHoJE ++V+tYlUkmlKY7VHnoX6XOuYvHxHaU4AshZ6rNRDbIl9qxV6XU/IyAgkwo1jwDQHV +csaxfGl7w/U2Rcxhbl5MlMVerugOXou/983g7aEOGzPuVBj+D77vfoRrQ+NwmNtd +dbINWQeFFSM51vHfqSYP1kjHs6Yi9TM3WpVHn3u6GBVv/9YUZINJ0gpnIdsPNWNg +KCLjsZWDzYWm3S8P52dSbrsvhXz1SnPnxT7AvSESBT/8twNJAlvIJebiVDj1eYeM +HVOyToV7BjjHLPj4sHKNJeV3UvQDHEimUF+IIDBu8oJDqz2XhOdT+yHBTw8imoa4 +WSr2Rz0ZiC3oheGe7IUIarFsNMkd7EgrO3jtZsSOeWmD3n+M +-----END CERTIFICATE----- + +# Issuer: CN=QuoVadis Root CA 3 G3 O=QuoVadis Limited +# Subject: CN=QuoVadis Root CA 3 G3 O=QuoVadis Limited +# Label: "QuoVadis Root CA 3 G3" +# Serial: 268090761170461462463995952157327242137089239581 +# MD5 Fingerprint: df:7d:b9:ad:54:6f:68:a1:df:89:57:03:97:43:b0:d7 +# SHA1 Fingerprint: 48:12:bd:92:3c:a8:c4:39:06:e7:30:6d:27:96:e6:a4:cf:22:2e:7d +# SHA256 Fingerprint: 88:ef:81:de:20:2e:b0:18:45:2e:43:f8:64:72:5c:ea:5f:bd:1f:c2:d9:d2:05:73:07:09:c5:d8:b8:69:0f:46 +-----BEGIN CERTIFICATE----- +MIIFYDCCA0igAwIBAgIULvWbAiin23r/1aOp7r0DoM8Sah0wDQYJKoZIhvcNAQEL +BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc +BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMyBHMzAeFw0xMjAxMTIyMDI2MzJaFw00 +MjAxMTIyMDI2MzJaMEgxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM +aW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDMgRzMwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQCzyw4QZ47qFJenMioKVjZ/aEzHs286IxSR +/xl/pcqs7rN2nXrpixurazHb+gtTTK/FpRp5PIpM/6zfJd5O2YIyC0TeytuMrKNu +FoM7pmRLMon7FhY4futD4tN0SsJiCnMK3UmzV9KwCoWdcTzeo8vAMvMBOSBDGzXR +U7Ox7sWTaYI+FrUoRqHe6okJ7UO4BUaKhvVZR74bbwEhELn9qdIoyhA5CcoTNs+c +ra1AdHkrAj80//ogaX3T7mH1urPnMNA3I4ZyYUUpSFlob3emLoG+B01vr87ERROR +FHAGjx+f+IdpsQ7vw4kZ6+ocYfx6bIrc1gMLnia6Et3UVDmrJqMz6nWB2i3ND0/k +A9HvFZcba5DFApCTZgIhsUfei5pKgLlVj7WiL8DWM2fafsSntARE60f75li59wzw +eyuxwHApw0BiLTtIadwjPEjrewl5qW3aqDCYz4ByA4imW0aucnl8CAMhZa634Ryl +sSqiMd5mBPfAdOhx3v89WcyWJhKLhZVXGqtrdQtEPREoPHtht+KPZ0/l7DxMYIBp +VzgeAVuNVejH38DMdyM0SXV89pgR6y3e7UEuFAUCf+D+IOs15xGsIs5XPd7JMG0Q +A4XN8f+MFrXBsj6IbGB/kE+V9/YtrQE5BwT6dYB9v0lQ7e/JxHwc64B+27bQ3RP+ +ydOc17KXqQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB +BjAdBgNVHQ4EFgQUxhfQvKjqAkPyGwaZXSuQILnXnOQwDQYJKoZIhvcNAQELBQAD +ggIBADRh2Va1EodVTd2jNTFGu6QHcrxfYWLopfsLN7E8trP6KZ1/AvWkyaiTt3px +KGmPc+FSkNrVvjrlt3ZqVoAh313m6Tqe5T72omnHKgqwGEfcIHB9UqM+WXzBusnI +FUBhynLWcKzSt/Ac5IYp8M7vaGPQtSCKFWGafoaYtMnCdvvMujAWzKNhxnQT5Wvv +oxXqA/4Ti2Tk08HS6IT7SdEQTXlm66r99I0xHnAUrdzeZxNMgRVhvLfZkXdxGYFg +u/BYpbWcC/ePIlUnwEsBbTuZDdQdm2NnL9DuDcpmvJRPpq3t/O5jrFc/ZSXPsoaP +0Aj/uHYUbt7lJ+yreLVTubY/6CD50qi+YUbKh4yE8/nxoGibIh6BJpsQBJFxwAYf +3KDTuVan45gtf4Od34wrnDKOMpTwATwiKp9Dwi7DmDkHOHv8XgBCH/MyJnmDhPbl +8MFREsALHgQjDFSlTC9JxUrRtm5gDWv8a4uFJGS3iQ6rJUdbPM9+Sb3H6QrG2vd+ +DhcI00iX0HGS8A85PjRqHH3Y8iKuu2n0M7SmSFXRDw4m6Oy2Cy2nhTXN/VnIn9HN +PlopNLk9hM6xZdRZkZFWdSHBd575euFgndOtBBj0fOtek49TSiIp+EgrPk2GrFt/ +ywaZWWDYWGWVjUTR939+J399roD1B0y2PpxxVJkES/1Y+Zj0 +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert Assured ID Root G2 O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert Assured ID Root G2 O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert Assured ID Root G2" +# Serial: 15385348160840213938643033620894905419 +# MD5 Fingerprint: 92:38:b9:f8:63:24:82:65:2c:57:33:e6:fe:81:8f:9d +# SHA1 Fingerprint: a1:4b:48:d9:43:ee:0a:0e:40:90:4f:3c:e0:a4:c0:91:93:51:5d:3f +# SHA256 Fingerprint: 7d:05:eb:b6:82:33:9f:8c:94:51:ee:09:4e:eb:fe:fa:79:53:a1:14:ed:b2:f4:49:49:45:2f:ab:7d:2f:c1:85 +-----BEGIN CERTIFICATE----- +MIIDljCCAn6gAwIBAgIQC5McOtY5Z+pnI7/Dr5r0SzANBgkqhkiG9w0BAQsFADBl +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJv +b3QgRzIwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1MTIwMDAwWjBlMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNl +cnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgRzIwggEi +MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDZ5ygvUj82ckmIkzTz+GoeMVSA +n61UQbVH35ao1K+ALbkKz3X9iaV9JPrjIgwrvJUXCzO/GU1BBpAAvQxNEP4Htecc +biJVMWWXvdMX0h5i89vqbFCMP4QMls+3ywPgym2hFEwbid3tALBSfK+RbLE4E9Hp +EgjAALAcKxHad3A2m67OeYfcgnDmCXRwVWmvo2ifv922ebPynXApVfSr/5Vh88lA +bx3RvpO704gqu52/clpWcTs/1PPRCv4o76Pu2ZmvA9OPYLfykqGxvYmJHzDNw6Yu +YjOuFgJ3RFrngQo8p0Quebg/BLxcoIfhG69Rjs3sLPr4/m3wOnyqi+RnlTGNAgMB +AAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQW +BBTOw0q5mVXyuNtgv6l+vVa1lzan1jANBgkqhkiG9w0BAQsFAAOCAQEAyqVVjOPI +QW5pJ6d1Ee88hjZv0p3GeDgdaZaikmkuOGybfQTUiaWxMTeKySHMq2zNixya1r9I +0jJmwYrA8y8678Dj1JGG0VDjA9tzd29KOVPt3ibHtX2vK0LRdWLjSisCx1BL4Gni +lmwORGYQRI+tBev4eaymG+g3NJ1TyWGqolKvSnAWhsI6yLETcDbYz+70CjTVW0z9 +B5yiutkBclzzTcHdDrEcDcRjvq30FPuJ7KJBDkzMyFdA0G4Dqs0MjomZmWzwPDCv +ON9vvKO+KSAnq3T/EyJ43pdSVR6DtVQgA+6uwE9W3jfMw3+qBCe703e4YtsXfJwo +IhNzbM8m9Yop5w== +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert Assured ID Root G3 O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert Assured ID Root G3 O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert Assured ID Root G3" +# Serial: 15459312981008553731928384953135426796 +# MD5 Fingerprint: 7c:7f:65:31:0c:81:df:8d:ba:3e:99:e2:5c:ad:6e:fb +# SHA1 Fingerprint: f5:17:a2:4f:9a:48:c6:c9:f8:a2:00:26:9f:dc:0f:48:2c:ab:30:89 +# SHA256 Fingerprint: 7e:37:cb:8b:4c:47:09:0c:ab:36:55:1b:a6:f4:5d:b8:40:68:0f:ba:16:6a:95:2d:b1:00:71:7f:43:05:3f:c2 +-----BEGIN CERTIFICATE----- +MIICRjCCAc2gAwIBAgIQC6Fa+h3foLVJRK/NJKBs7DAKBggqhkjOPQQDAzBlMQsw +CQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cu +ZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3Qg +RzMwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1MTIwMDAwWjBlMQswCQYDVQQGEwJV +UzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQu +Y29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgRzMwdjAQBgcq +hkjOPQIBBgUrgQQAIgNiAAQZ57ysRGXtzbg/WPuNsVepRC0FFfLvC/8QdJ+1YlJf +Zn4f5dwbRXkLzMZTCp2NXQLZqVneAlr2lSoOjThKiknGvMYDOAdfVdp+CW7if17Q +RSAPWXYQ1qAk8C3eNvJsKTmjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/ +BAQDAgGGMB0GA1UdDgQWBBTL0L2p4ZgFUaFNN6KDec6NHSrkhDAKBggqhkjOPQQD +AwNnADBkAjAlpIFFAmsSS3V0T8gj43DydXLefInwz5FyYZ5eEJJZVrmDxxDnOOlY +JjZ91eQ0hjkCMHw2U/Aw5WJjOpnitqM7mzT6HtoQknFekROn3aRukswy1vUhZscv +6pZjamVFkpUBtA== +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert Global Root G2 O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert Global Root G2 O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert Global Root G2" +# Serial: 4293743540046975378534879503202253541 +# MD5 Fingerprint: e4:a6:8a:c8:54:ac:52:42:46:0a:fd:72:48:1b:2a:44 +# SHA1 Fingerprint: df:3c:24:f9:bf:d6:66:76:1b:26:80:73:fe:06:d1:cc:8d:4f:82:a4 +# SHA256 Fingerprint: cb:3c:cb:b7:60:31:e5:e0:13:8f:8d:d3:9a:23:f9:de:47:ff:c3:5e:43:c1:14:4c:ea:27:d4:6a:5a:b1:cb:5f +-----BEGIN CERTIFICATE----- +MIIDjjCCAnagAwIBAgIQAzrx5qcRqaC7KGSxHQn65TANBgkqhkiG9w0BAQsFADBh +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBH +MjAeFw0xMzA4MDExMjAwMDBaFw0zODAxMTUxMjAwMDBaMGExCzAJBgNVBAYTAlVT +MRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5j +b20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IEcyMIIBIjANBgkqhkiG +9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuzfNNNx7a8myaJCtSnX/RrohCgiN9RlUyfuI +2/Ou8jqJkTx65qsGGmvPrC3oXgkkRLpimn7Wo6h+4FR1IAWsULecYxpsMNzaHxmx +1x7e/dfgy5SDN67sH0NO3Xss0r0upS/kqbitOtSZpLYl6ZtrAGCSYP9PIUkY92eQ +q2EGnI/yuum06ZIya7XzV+hdG82MHauVBJVJ8zUtluNJbd134/tJS7SsVQepj5Wz +tCO7TG1F8PapspUwtP1MVYwnSlcUfIKdzXOS0xZKBgyMUNGPHgm+F6HmIcr9g+UQ +vIOlCsRnKPZzFBQ9RnbDhxSJITRNrw9FDKZJobq7nMWxM4MphQIDAQABo0IwQDAP +BgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBhjAdBgNVHQ4EFgQUTiJUIBiV +5uNu5g/6+rkS7QYXjzkwDQYJKoZIhvcNAQELBQADggEBAGBnKJRvDkhj6zHd6mcY +1Yl9PMWLSn/pvtsrF9+wX3N3KjITOYFnQoQj8kVnNeyIv/iPsGEMNKSuIEyExtv4 +NeF22d+mQrvHRAiGfzZ0JFrabA0UWTW98kndth/Jsw1HKj2ZL7tcu7XUIOGZX1NG +Fdtom/DzMNU+MeKNhJ7jitralj41E6Vf8PlwUHBHQRFXGU7Aj64GxJUTFy8bJZ91 +8rGOmaFvE7FBcf6IKshPECBV1/MUReXgRPTqh5Uykw7+U0b6LJ3/iyK5S9kJRaTe +pLiaWN0bfVKfjllDiIGknibVb63dDcY3fe0Dkhvld1927jyNxF1WW6LZZm6zNTfl +MrY= +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert Global Root G3 O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert Global Root G3 O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert Global Root G3" +# Serial: 7089244469030293291760083333884364146 +# MD5 Fingerprint: f5:5d:a4:50:a5:fb:28:7e:1e:0f:0d:cc:96:57:56:ca +# SHA1 Fingerprint: 7e:04:de:89:6a:3e:66:6d:00:e6:87:d3:3f:fa:d9:3b:e8:3d:34:9e +# SHA256 Fingerprint: 31:ad:66:48:f8:10:41:38:c7:38:f3:9e:a4:32:01:33:39:3e:3a:18:cc:02:29:6e:f9:7c:2a:c9:ef:67:31:d0 +-----BEGIN CERTIFICATE----- +MIICPzCCAcWgAwIBAgIQBVVWvPJepDU1w6QP1atFcjAKBggqhkjOPQQDAzBhMQsw +CQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cu +ZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBHMzAe +Fw0xMzA4MDExMjAwMDBaFw0zODAxMTUxMjAwMDBaMGExCzAJBgNVBAYTAlVTMRUw +EwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5jb20x +IDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IEczMHYwEAYHKoZIzj0CAQYF +K4EEACIDYgAE3afZu4q4C/sLfyHS8L6+c/MzXRq8NOrexpu80JX28MzQC7phW1FG +fp4tn+6OYwwX7Adw9c+ELkCDnOg/QW07rdOkFFk2eJ0DQ+4QE2xy3q6Ip6FrtUPO +Z9wj/wMco+I+o0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBhjAd +BgNVHQ4EFgQUs9tIpPmhxdiuNkHMEWNpYim8S8YwCgYIKoZIzj0EAwMDaAAwZQIx +AK288mw/EkrRLTnDCgmXc/SINoyIJ7vmiI1Qhadj+Z4y3maTD/HMsQmP3Wyr+mt/ +oAIwOWZbwmSNuJ5Q3KjVSaLtx9zRSX8XAbjIho9OjIgrqJqpisXRAL34VOKa5Vt8 +sycX +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert Trusted Root G4 O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert Trusted Root G4 O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert Trusted Root G4" +# Serial: 7451500558977370777930084869016614236 +# MD5 Fingerprint: 78:f2:fc:aa:60:1f:2f:b4:eb:c9:37:ba:53:2e:75:49 +# SHA1 Fingerprint: dd:fb:16:cd:49:31:c9:73:a2:03:7d:3f:c8:3a:4d:7d:77:5d:05:e4 +# SHA256 Fingerprint: 55:2f:7b:dc:f1:a7:af:9e:6c:e6:72:01:7f:4f:12:ab:f7:72:40:c7:8e:76:1a:c2:03:d1:d9:d2:0a:c8:99:88 +-----BEGIN CERTIFICATE----- +MIIFkDCCA3igAwIBAgIQBZsbV56OITLiOQe9p3d1XDANBgkqhkiG9w0BAQwFADBi +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSEwHwYDVQQDExhEaWdpQ2VydCBUcnVzdGVkIFJvb3Qg +RzQwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1MTIwMDAwWjBiMQswCQYDVQQGEwJV +UzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQu +Y29tMSEwHwYDVQQDExhEaWdpQ2VydCBUcnVzdGVkIFJvb3QgRzQwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQC/5pBzaN675F1KPDAiMGkz7MKnJS7JIT3y +ithZwuEppz1Yq3aaza57G4QNxDAf8xukOBbrVsaXbR2rsnnyyhHS5F/WBTxSD1If +xp4VpX6+n6lXFllVcq9ok3DCsrp1mWpzMpTREEQQLt+C8weE5nQ7bXHiLQwb7iDV +ySAdYyktzuxeTsiT+CFhmzTrBcZe7FsavOvJz82sNEBfsXpm7nfISKhmV1efVFiO +DCu3T6cw2Vbuyntd463JT17lNecxy9qTXtyOj4DatpGYQJB5w3jHtrHEtWoYOAMQ +jdjUN6QuBX2I9YI+EJFwq1WCQTLX2wRzKm6RAXwhTNS8rhsDdV14Ztk6MUSaM0C/ +CNdaSaTC5qmgZ92kJ7yhTzm1EVgX9yRcRo9k98FpiHaYdj1ZXUJ2h4mXaXpI8OCi +EhtmmnTK3kse5w5jrubU75KSOp493ADkRSWJtppEGSt+wJS00mFt6zPZxd9LBADM +fRyVw4/3IbKyEbe7f/LVjHAsQWCqsWMYRJUadmJ+9oCw++hkpjPRiQfhvbfmQ6QY +uKZ3AeEPlAwhHbJUKSWJbOUOUlFHdL4mrLZBdd56rF+NP8m800ERElvlEFDrMcXK +chYiCd98THU/Y+whX8QgUWtvsauGi0/C1kVfnSD8oR7FwI+isX4KJpn15GkvmB0t +9dmpsh3lGwIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB +hjAdBgNVHQ4EFgQU7NfjgtJxXWRM3y5nP+e6mK4cD08wDQYJKoZIhvcNAQEMBQAD +ggIBALth2X2pbL4XxJEbw6GiAI3jZGgPVs93rnD5/ZpKmbnJeFwMDF/k5hQpVgs2 +SV1EY+CtnJYYZhsjDT156W1r1lT40jzBQ0CuHVD1UvyQO7uYmWlrx8GnqGikJ9yd ++SeuMIW59mdNOj6PWTkiU0TryF0Dyu1Qen1iIQqAyHNm0aAFYF/opbSnr6j3bTWc +fFqK1qI4mfN4i/RN0iAL3gTujJtHgXINwBQy7zBZLq7gcfJW5GqXb5JQbZaNaHqa +sjYUegbyJLkJEVDXCLG4iXqEI2FCKeWjzaIgQdfRnGTZ6iahixTXTBmyUEFxPT9N +cCOGDErcgdLMMpSEDQgJlxxPwO5rIHQw0uA5NBCFIRUBCOhVMt5xSdkoF1BN5r5N +0XWs0Mr7QbhDparTwwVETyw2m+L64kW4I1NsBm9nVX9GtUw/bihaeSbSpKhil9Ie +4u1Ki7wb/UdKDd9nZn6yW0HQO+T0O/QEY+nvwlQAUaCKKsnOeMzV6ocEGLPOr0mI +r/OSmbaz5mEP0oUA51Aa5BuVnRmhuZyxm7EAHu/QD09CbMkKvO5D+jpxpchNJqU1 +/YldvIViHTLSoCtU7ZpXwdv6EM8Zt4tKG48BtieVU+i2iW1bvGjUI+iLUaJW+fCm +gKDWHrO8Dw9TdSmq6hN35N6MgSGtBxBHEa2HPQfRdbzP82Z+ +-----END CERTIFICATE----- + +# Issuer: CN=COMODO RSA Certification Authority O=COMODO CA Limited +# Subject: CN=COMODO RSA Certification Authority O=COMODO CA Limited +# Label: "COMODO RSA Certification Authority" +# Serial: 101909084537582093308941363524873193117 +# MD5 Fingerprint: 1b:31:b0:71:40:36:cc:14:36:91:ad:c4:3e:fd:ec:18 +# SHA1 Fingerprint: af:e5:d2:44:a8:d1:19:42:30:ff:47:9f:e2:f8:97:bb:cd:7a:8c:b4 +# SHA256 Fingerprint: 52:f0:e1:c4:e5:8e:c6:29:29:1b:60:31:7f:07:46:71:b8:5d:7e:a8:0d:5b:07:27:34:63:53:4b:32:b4:02:34 +-----BEGIN CERTIFICATE----- +MIIF2DCCA8CgAwIBAgIQTKr5yttjb+Af907YWwOGnTANBgkqhkiG9w0BAQwFADCB +hTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4G +A1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNV +BAMTIkNPTU9ETyBSU0EgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTAwMTE5 +MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBhTELMAkGA1UEBhMCR0IxGzAZBgNVBAgT +EkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UEChMR +Q09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBSU0EgQ2VydGlmaWNh +dGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCR +6FSS0gpWsawNJN3Fz0RndJkrN6N9I3AAcbxT38T6KhKPS38QVr2fcHK3YX/JSw8X +pz3jsARh7v8Rl8f0hj4K+j5c+ZPmNHrZFGvnnLOFoIJ6dq9xkNfs/Q36nGz637CC +9BR++b7Epi9Pf5l/tfxnQ3K9DADWietrLNPtj5gcFKt+5eNu/Nio5JIk2kNrYrhV +/erBvGy2i/MOjZrkm2xpmfh4SDBF1a3hDTxFYPwyllEnvGfDyi62a+pGx8cgoLEf +Zd5ICLqkTqnyg0Y3hOvozIFIQ2dOciqbXL1MGyiKXCJ7tKuY2e7gUYPDCUZObT6Z ++pUX2nwzV0E8jVHtC7ZcryxjGt9XyD+86V3Em69FmeKjWiS0uqlWPc9vqv9JWL7w +qP/0uK3pN/u6uPQLOvnoQ0IeidiEyxPx2bvhiWC4jChWrBQdnArncevPDt09qZah +SL0896+1DSJMwBGB7FY79tOi4lu3sgQiUpWAk2nojkxl8ZEDLXB0AuqLZxUpaVIC +u9ffUGpVRr+goyhhf3DQw6KqLCGqR84onAZFdr+CGCe01a60y1Dma/RMhnEw6abf +Fobg2P9A3fvQQoh/ozM6LlweQRGBY84YcWsr7KaKtzFcOmpH4MN5WdYgGq/yapiq +crxXStJLnbsQ/LBMQeXtHT1eKJ2czL+zUdqnR+WEUwIDAQABo0IwQDAdBgNVHQ4E +FgQUu69+Aj36pvE8hI6t7jiY7NkyMtQwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB +/wQFMAMBAf8wDQYJKoZIhvcNAQEMBQADggIBAArx1UaEt65Ru2yyTUEUAJNMnMvl +wFTPoCWOAvn9sKIN9SCYPBMtrFaisNZ+EZLpLrqeLppysb0ZRGxhNaKatBYSaVqM +4dc+pBroLwP0rmEdEBsqpIt6xf4FpuHA1sj+nq6PK7o9mfjYcwlYRm6mnPTXJ9OV +2jeDchzTc+CiR5kDOF3VSXkAKRzH7JsgHAckaVd4sjn8OoSgtZx8jb8uk2Intzna +FxiuvTwJaP+EmzzV1gsD41eeFPfR60/IvYcjt7ZJQ3mFXLrrkguhxuhoqEwWsRqZ +CuhTLJK7oQkYdQxlqHvLI7cawiiFwxv/0Cti76R7CZGYZ4wUAc1oBmpjIXUDgIiK +boHGhfKppC3n9KUkEEeDys30jXlYsQab5xoq2Z0B15R97QNKyvDb6KkBPvVWmcke +jkk9u+UJueBPSZI9FoJAzMxZxuY67RIuaTxslbH9qh17f4a+Hg4yRvv7E491f0yL +S0Zj/gA0QHDBw7mh3aZw4gSzQbzpgJHqZJx64SIDqZxubw5lT2yHh17zbqD5daWb +QOhTsiedSrnAdyGN/4fy3ryM7xfft0kL0fJuMAsaDk527RH89elWsn2/x20Kk4yl +0MC2Hb46TpSi125sC8KKfPog88Tk5c0NqMuRkrF8hey1FGlmDoLnzc7ILaZRfyHB +NVOFBkpdn627G190 +-----END CERTIFICATE----- + +# Issuer: CN=USERTrust RSA Certification Authority O=The USERTRUST Network +# Subject: CN=USERTrust RSA Certification Authority O=The USERTRUST Network +# Label: "USERTrust RSA Certification Authority" +# Serial: 2645093764781058787591871645665788717 +# MD5 Fingerprint: 1b:fe:69:d1:91:b7:19:33:a3:72:a8:0f:e1:55:e5:b5 +# SHA1 Fingerprint: 2b:8f:1b:57:33:0d:bb:a2:d0:7a:6c:51:f7:0e:e9:0d:da:b9:ad:8e +# SHA256 Fingerprint: e7:93:c9:b0:2f:d8:aa:13:e2:1c:31:22:8a:cc:b0:81:19:64:3b:74:9c:89:89:64:b1:74:6d:46:c3:d4:cb:d2 +-----BEGIN CERTIFICATE----- +MIIF3jCCA8agAwIBAgIQAf1tMPyjylGoG7xkDjUDLTANBgkqhkiG9w0BAQwFADCB +iDELMAkGA1UEBhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0pl +cnNleSBDaXR5MR4wHAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNV +BAMTJVVTRVJUcnVzdCBSU0EgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTAw +MjAxMDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBiDELMAkGA1UEBhMCVVMxEzARBgNV +BAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNleSBDaXR5MR4wHAYDVQQKExVU +aGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMTJVVTRVJUcnVzdCBSU0EgQ2Vy +dGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK +AoICAQCAEmUXNg7D2wiz0KxXDXbtzSfTTK1Qg2HiqiBNCS1kCdzOiZ/MPans9s/B +3PHTsdZ7NygRK0faOca8Ohm0X6a9fZ2jY0K2dvKpOyuR+OJv0OwWIJAJPuLodMkY +tJHUYmTbf6MG8YgYapAiPLz+E/CHFHv25B+O1ORRxhFnRghRy4YUVD+8M/5+bJz/ +Fp0YvVGONaanZshyZ9shZrHUm3gDwFA66Mzw3LyeTP6vBZY1H1dat//O+T23LLb2 +VN3I5xI6Ta5MirdcmrS3ID3KfyI0rn47aGYBROcBTkZTmzNg95S+UzeQc0PzMsNT +79uq/nROacdrjGCT3sTHDN/hMq7MkztReJVni+49Vv4M0GkPGw/zJSZrM233bkf6 +c0Plfg6lZrEpfDKEY1WJxA3Bk1QwGROs0303p+tdOmw1XNtB1xLaqUkL39iAigmT +Yo61Zs8liM2EuLE/pDkP2QKe6xJMlXzzawWpXhaDzLhn4ugTncxbgtNMs+1b/97l +c6wjOy0AvzVVdAlJ2ElYGn+SNuZRkg7zJn0cTRe8yexDJtC/QV9AqURE9JnnV4ee +UB9XVKg+/XRjL7FQZQnmWEIuQxpMtPAlR1n6BB6T1CZGSlCBst6+eLf8ZxXhyVeE +Hg9j1uliutZfVS7qXMYoCAQlObgOK6nyTJccBz8NUvXt7y+CDwIDAQABo0IwQDAd +BgNVHQ4EFgQUU3m/WqorSs9UgOHYm8Cd8rIDZsswDgYDVR0PAQH/BAQDAgEGMA8G +A1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEMBQADggIBAFzUfA3P9wF9QZllDHPF +Up/L+M+ZBn8b2kMVn54CVVeWFPFSPCeHlCjtHzoBN6J2/FNQwISbxmtOuowhT6KO +VWKR82kV2LyI48SqC/3vqOlLVSoGIG1VeCkZ7l8wXEskEVX/JJpuXior7gtNn3/3 +ATiUFJVDBwn7YKnuHKsSjKCaXqeYalltiz8I+8jRRa8YFWSQEg9zKC7F4iRO/Fjs +8PRF/iKz6y+O0tlFYQXBl2+odnKPi4w2r78NBc5xjeambx9spnFixdjQg3IM8WcR +iQycE0xyNN+81XHfqnHd4blsjDwSXWXavVcStkNr/+XeTWYRUc+ZruwXtuhxkYze +Sf7dNXGiFSeUHM9h4ya7b6NnJSFd5t0dCy5oGzuCr+yDZ4XUmFF0sbmZgIn/f3gZ +XHlKYC6SQK5MNyosycdiyA5d9zZbyuAlJQG03RoHnHcAP9Dc1ew91Pq7P8yF1m9/ +qS3fuQL39ZeatTXaw2ewh0qpKJ4jjv9cJ2vhsE/zB+4ALtRZh8tSQZXq9EfX7mRB +VXyNWQKV3WKdwrnuWih0hKWbt5DHDAff9Yk2dDLWKMGwsAvgnEzDHNb842m1R0aB +L6KCq9NjRHDEjf8tM7qtj3u1cIiuPhnPQCjY/MiQu12ZIvVS5ljFH4gxQ+6IHdfG +jjxDah2nGN59PRbxYvnKkKj9 +-----END CERTIFICATE----- + +# Issuer: CN=USERTrust ECC Certification Authority O=The USERTRUST Network +# Subject: CN=USERTrust ECC Certification Authority O=The USERTRUST Network +# Label: "USERTrust ECC Certification Authority" +# Serial: 123013823720199481456569720443997572134 +# MD5 Fingerprint: fa:68:bc:d9:b5:7f:ad:fd:c9:1d:06:83:28:cc:24:c1 +# SHA1 Fingerprint: d1:cb:ca:5d:b2:d5:2a:7f:69:3b:67:4d:e5:f0:5a:1d:0c:95:7d:f0 +# SHA256 Fingerprint: 4f:f4:60:d5:4b:9c:86:da:bf:bc:fc:57:12:e0:40:0d:2b:ed:3f:bc:4d:4f:bd:aa:86:e0:6a:dc:d2:a9:ad:7a +-----BEGIN CERTIFICATE----- +MIICjzCCAhWgAwIBAgIQXIuZxVqUxdJxVt7NiYDMJjAKBggqhkjOPQQDAzCBiDEL +MAkGA1UEBhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNl +eSBDaXR5MR4wHAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMT +JVVTRVJUcnVzdCBFQ0MgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTAwMjAx +MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBiDELMAkGA1UEBhMCVVMxEzARBgNVBAgT +Ck5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNleSBDaXR5MR4wHAYDVQQKExVUaGUg +VVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMTJVVTRVJUcnVzdCBFQ0MgQ2VydGlm +aWNhdGlvbiBBdXRob3JpdHkwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQarFRaqflo +I+d61SRvU8Za2EurxtW20eZzca7dnNYMYf3boIkDuAUU7FfO7l0/4iGzzvfUinng +o4N+LZfQYcTxmdwlkWOrfzCjtHDix6EznPO/LlxTsV+zfTJ/ijTjeXmjQjBAMB0G +A1UdDgQWBBQ64QmG1M8ZwpZ2dEl23OA1xmNjmjAOBgNVHQ8BAf8EBAMCAQYwDwYD +VR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjA2Z6EWCNzklwBBHU6+4WMB +zzuqQhFkoJ2UOQIReVx7Hfpkue4WQrO/isIJxOzksU0CMQDpKmFHjFJKS04YcPbW +RNZu9YO6bVi9JNlWSOrvxKJGgYhqOkbRqZtNyWHa0V1Xahg= +-----END CERTIFICATE----- + +# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R5 +# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R5 +# Label: "GlobalSign ECC Root CA - R5" +# Serial: 32785792099990507226680698011560947931244 +# MD5 Fingerprint: 9f:ad:3b:1c:02:1e:8a:ba:17:74:38:81:0c:a2:bc:08 +# SHA1 Fingerprint: 1f:24:c6:30:cd:a4:18:ef:20:69:ff:ad:4f:dd:5f:46:3a:1b:69:aa +# SHA256 Fingerprint: 17:9f:bc:14:8a:3d:d0:0f:d2:4e:a1:34:58:cc:43:bf:a7:f5:9c:81:82:d7:83:a5:13:f6:eb:ec:10:0c:89:24 +-----BEGIN CERTIFICATE----- +MIICHjCCAaSgAwIBAgIRYFlJ4CYuu1X5CneKcflK2GwwCgYIKoZIzj0EAwMwUDEk +MCIGA1UECxMbR2xvYmFsU2lnbiBFQ0MgUm9vdCBDQSAtIFI1MRMwEQYDVQQKEwpH +bG9iYWxTaWduMRMwEQYDVQQDEwpHbG9iYWxTaWduMB4XDTEyMTExMzAwMDAwMFoX +DTM4MDExOTAzMTQwN1owUDEkMCIGA1UECxMbR2xvYmFsU2lnbiBFQ0MgUm9vdCBD +QSAtIFI1MRMwEQYDVQQKEwpHbG9iYWxTaWduMRMwEQYDVQQDEwpHbG9iYWxTaWdu +MHYwEAYHKoZIzj0CAQYFK4EEACIDYgAER0UOlvt9Xb/pOdEh+J8LttV7HpI6SFkc +8GIxLcB6KP4ap1yztsyX50XUWPrRd21DosCHZTQKH3rd6zwzocWdTaRvQZU4f8ke +hOvRnkmSh5SHDDqFSmafnVmTTZdhBoZKo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYD +VR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUPeYpSJvqB8ohREom3m7e0oPQn1kwCgYI +KoZIzj0EAwMDaAAwZQIxAOVpEslu28YxuglB4Zf4+/2a4n0Sye18ZNPLBSWLVtmg +515dTguDnFt2KaAJJiFqYgIwcdK1j1zqO+F4CYWodZI7yFz9SO8NdCKoCOJuxUnO +xwy8p2Fp8fc74SrL+SvzZpA3 +-----END CERTIFICATE----- + +# Issuer: CN=IdenTrust Commercial Root CA 1 O=IdenTrust +# Subject: CN=IdenTrust Commercial Root CA 1 O=IdenTrust +# Label: "IdenTrust Commercial Root CA 1" +# Serial: 13298821034946342390520003877796839426 +# MD5 Fingerprint: b3:3e:77:73:75:ee:a0:d3:e3:7e:49:63:49:59:bb:c7 +# SHA1 Fingerprint: df:71:7e:aa:4a:d9:4e:c9:55:84:99:60:2d:48:de:5f:bc:f0:3a:25 +# SHA256 Fingerprint: 5d:56:49:9b:e4:d2:e0:8b:cf:ca:d0:8a:3e:38:72:3d:50:50:3b:de:70:69:48:e4:2f:55:60:30:19:e5:28:ae +-----BEGIN CERTIFICATE----- +MIIFYDCCA0igAwIBAgIQCgFCgAAAAUUjyES1AAAAAjANBgkqhkiG9w0BAQsFADBK +MQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0MScwJQYDVQQDEx5JZGVu +VHJ1c3QgQ29tbWVyY2lhbCBSb290IENBIDEwHhcNMTQwMTE2MTgxMjIzWhcNMzQw +MTE2MTgxMjIzWjBKMQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0MScw +JQYDVQQDEx5JZGVuVHJ1c3QgQ29tbWVyY2lhbCBSb290IENBIDEwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQCnUBneP5k91DNG8W9RYYKyqU+PZ4ldhNlT +3Qwo2dfw/66VQ3KZ+bVdfIrBQuExUHTRgQ18zZshq0PirK1ehm7zCYofWjK9ouuU ++ehcCuz/mNKvcbO0U59Oh++SvL3sTzIwiEsXXlfEU8L2ApeN2WIrvyQfYo3fw7gp +S0l4PJNgiCL8mdo2yMKi1CxUAGc1bnO/AljwpN3lsKImesrgNqUZFvX9t++uP0D1 +bVoE/c40yiTcdCMbXTMTEl3EASX2MN0CXZ/g1Ue9tOsbobtJSdifWwLziuQkkORi +T0/Br4sOdBeo0XKIanoBScy0RnnGF7HamB4HWfp1IYVl3ZBWzvurpWCdxJ35UrCL +vYf5jysjCiN2O/cz4ckA82n5S6LgTrx+kzmEB/dEcH7+B1rlsazRGMzyNeVJSQjK +Vsk9+w8YfYs7wRPCTY/JTw436R+hDmrfYi7LNQZReSzIJTj0+kuniVyc0uMNOYZK +dHzVWYfCP04MXFL0PfdSgvHqo6z9STQaKPNBiDoT7uje/5kdX7rL6B7yuVBgwDHT +c+XvvqDtMwt0viAgxGds8AgDelWAf0ZOlqf0Hj7h9tgJ4TNkK2PXMl6f+cB7D3hv +l7yTmvmcEpB4eoCHFddydJxVdHixuuFucAS6T6C6aMN7/zHwcz09lCqxC0EOoP5N +iGVreTO01wIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB +/zAdBgNVHQ4EFgQU7UQZwNPwBovupHu+QucmVMiONnYwDQYJKoZIhvcNAQELBQAD +ggIBAA2ukDL2pkt8RHYZYR4nKM1eVO8lvOMIkPkp165oCOGUAFjvLi5+U1KMtlwH +6oi6mYtQlNeCgN9hCQCTrQ0U5s7B8jeUeLBfnLOic7iPBZM4zY0+sLj7wM+x8uwt +LRvM7Kqas6pgghstO8OEPVeKlh6cdbjTMM1gCIOQ045U8U1mwF10A0Cj7oV+wh93 +nAbowacYXVKV7cndJZ5t+qntozo00Fl72u1Q8zW/7esUTTHHYPTa8Yec4kjixsU3 ++wYQ+nVZZjFHKdp2mhzpgq7vmrlR94gjmmmVYjzlVYA211QC//G5Xc7UI2/YRYRK +W2XviQzdFKcgyxilJbQN+QHwotL0AMh0jqEqSI5l2xPE4iUXfeu+h1sXIFRRk0pT +AwvsXcoz7WL9RccvW9xYoIA55vrX/hMUpu09lEpCdNTDd1lzzY9GvlU47/rokTLq +l1gEIt44w8y8bckzOmoKaT+gyOpyj4xjhiO9bTyWnpXgSUyqorkqG5w2gXjtw+hG +4iZZRHUe2XWJUc0QhJ1hYMtd+ZciTY6Y5uN/9lu7rs3KSoFrXgvzUeF0K+l+J6fZ +mUlO+KWA2yUPHGNiiskzZ2s8EIPGrd6ozRaOjfAHN3Gf8qv8QfXBi+wAN10J5U6A +7/qxXDgGpRtK4dw4LTzcqx+QGtVKnO7RcGzM7vRX+Bi6hG6H +-----END CERTIFICATE----- + +# Issuer: CN=IdenTrust Public Sector Root CA 1 O=IdenTrust +# Subject: CN=IdenTrust Public Sector Root CA 1 O=IdenTrust +# Label: "IdenTrust Public Sector Root CA 1" +# Serial: 13298821034946342390521976156843933698 +# MD5 Fingerprint: 37:06:a5:b0:fc:89:9d:ba:f4:6b:8c:1a:64:cd:d5:ba +# SHA1 Fingerprint: ba:29:41:60:77:98:3f:f4:f3:ef:f2:31:05:3b:2e:ea:6d:4d:45:fd +# SHA256 Fingerprint: 30:d0:89:5a:9a:44:8a:26:20:91:63:55:22:d1:f5:20:10:b5:86:7a:ca:e1:2c:78:ef:95:8f:d4:f4:38:9f:2f +-----BEGIN CERTIFICATE----- +MIIFZjCCA06gAwIBAgIQCgFCgAAAAUUjz0Z8AAAAAjANBgkqhkiG9w0BAQsFADBN +MQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0MSowKAYDVQQDEyFJZGVu +VHJ1c3QgUHVibGljIFNlY3RvciBSb290IENBIDEwHhcNMTQwMTE2MTc1MzMyWhcN +MzQwMTE2MTc1MzMyWjBNMQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0 +MSowKAYDVQQDEyFJZGVuVHJ1c3QgUHVibGljIFNlY3RvciBSb290IENBIDEwggIi +MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC2IpT8pEiv6EdrCvsnduTyP4o7 +ekosMSqMjbCpwzFrqHd2hCa2rIFCDQjrVVi7evi8ZX3yoG2LqEfpYnYeEe4IFNGy +RBb06tD6Hi9e28tzQa68ALBKK0CyrOE7S8ItneShm+waOh7wCLPQ5CQ1B5+ctMlS +bdsHyo+1W/CD80/HLaXIrcuVIKQxKFdYWuSNG5qrng0M8gozOSI5Cpcu81N3uURF +/YTLNiCBWS2ab21ISGHKTN9T0a9SvESfqy9rg3LvdYDaBjMbXcjaY8ZNzaxmMc3R +3j6HEDbhuaR672BQssvKplbgN6+rNBM5Jeg5ZuSYeqoSmJxZZoY+rfGwyj4GD3vw +EUs3oERte8uojHH01bWRNszwFcYr3lEXsZdMUD2xlVl8BX0tIdUAvwFnol57plzy +9yLxkA2T26pEUWbMfXYD62qoKjgZl3YNa4ph+bz27nb9cCvdKTz4Ch5bQhyLVi9V +GxyhLrXHFub4qjySjmm2AcG1hp2JDws4lFTo6tyePSW8Uybt1as5qsVATFSrsrTZ +2fjXctscvG29ZV/viDUqZi/u9rNl8DONfJhBaUYPQxxp+pu10GFqzcpL2UyQRqsV +WaFHVCkugyhfHMKiq3IXAAaOReyL4jM9f9oZRORicsPfIsbyVtTdX5Vy7W1f90gD +W/3FKqD2cyOEEBsB5wIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/ +BAUwAwEB/zAdBgNVHQ4EFgQU43HgntinQtnbcZFrlJPrw6PRFKMwDQYJKoZIhvcN +AQELBQADggIBAEf63QqwEZE4rU1d9+UOl1QZgkiHVIyqZJnYWv6IAcVYpZmxI1Qj +t2odIFflAWJBF9MJ23XLblSQdf4an4EKwt3X9wnQW3IV5B4Jaj0z8yGa5hV+rVHV +DRDtfULAj+7AmgjVQdZcDiFpboBhDhXAuM/FSRJSzL46zNQuOAXeNf0fb7iAaJg9 +TaDKQGXSc3z1i9kKlT/YPyNtGtEqJBnZhbMX73huqVjRI9PHE+1yJX9dsXNw0H8G +lwmEKYBhHfpe/3OsoOOJuBxxFcbeMX8S3OFtm6/n6J91eEyrRjuazr8FGF1NFTwW +mhlQBJqymm9li1JfPFgEKCXAZmExfrngdbkaqIHWchezxQMxNRF4eKLg6TCMf4Df +WN88uieW4oA0beOY02QnrEh+KHdcxiVhJfiFDGX6xDIvpZgF5PgLZxYWxoK4Mhn5 ++bl53B/N66+rDt0b20XkeucC4pVd/GnwU2lhlXV5C15V5jgclKlZM57IcXR5f1GJ +tshquDDIajjDbp7hNxbqBWJMWxJH7ae0s1hWx0nzfxJoCTFx8G34Tkf71oXuxVhA +GaQdp/lLQzfcaFpPz+vCZHTetBXZ9FRUGi8c15dxVJCO2SCdUyt/q4/i6jC8UDfv +8Ue1fXwsBOxonbRJRBD0ckscZOf85muQ3Wl9af0AVqW3rLatt8o+Ae+c +-----END CERTIFICATE----- + +# Issuer: CN=Entrust Root Certification Authority - G2 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2009 Entrust, Inc. - for authorized use only +# Subject: CN=Entrust Root Certification Authority - G2 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2009 Entrust, Inc. - for authorized use only +# Label: "Entrust Root Certification Authority - G2" +# Serial: 1246989352 +# MD5 Fingerprint: 4b:e2:c9:91:96:65:0c:f4:0e:5a:93:92:a0:0a:fe:b2 +# SHA1 Fingerprint: 8c:f4:27:fd:79:0c:3a:d1:66:06:8d:e8:1e:57:ef:bb:93:22:72:d4 +# SHA256 Fingerprint: 43:df:57:74:b0:3e:7f:ef:5f:e4:0d:93:1a:7b:ed:f1:bb:2e:6b:42:73:8c:4e:6d:38:41:10:3d:3a:a7:f3:39 +-----BEGIN CERTIFICATE----- +MIIEPjCCAyagAwIBAgIESlOMKDANBgkqhkiG9w0BAQsFADCBvjELMAkGA1UEBhMC +VVMxFjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3d3cuZW50 +cnVzdC5uZXQvbGVnYWwtdGVybXMxOTA3BgNVBAsTMChjKSAyMDA5IEVudHJ1c3Qs +IEluYy4gLSBmb3IgYXV0aG9yaXplZCB1c2Ugb25seTEyMDAGA1UEAxMpRW50cnVz +dCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzIwHhcNMDkwNzA3MTcy +NTU0WhcNMzAxMjA3MTc1NTU0WjCBvjELMAkGA1UEBhMCVVMxFjAUBgNVBAoTDUVu +dHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3d3cuZW50cnVzdC5uZXQvbGVnYWwt +dGVybXMxOTA3BgNVBAsTMChjKSAyMDA5IEVudHJ1c3QsIEluYy4gLSBmb3IgYXV0 +aG9yaXplZCB1c2Ugb25seTEyMDAGA1UEAxMpRW50cnVzdCBSb290IENlcnRpZmlj +YXRpb24gQXV0aG9yaXR5IC0gRzIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK +AoIBAQC6hLZy254Ma+KZ6TABp3bqMriVQRrJ2mFOWHLP/vaCeb9zYQYKpSfYs1/T +RU4cctZOMvJyig/3gxnQaoCAAEUesMfnmr8SVycco2gvCoe9amsOXmXzHHfV1IWN +cCG0szLni6LVhjkCsbjSR87kyUnEO6fe+1R9V77w6G7CebI6C1XiUJgWMhNcL3hW +wcKUs/Ja5CeanyTXxuzQmyWC48zCxEXFjJd6BmsqEZ+pCm5IO2/b1BEZQvePB7/1 +U1+cPvQXLOZprE4yTGJ36rfo5bs0vBmLrpxR57d+tVOxMyLlbc9wPBr64ptntoP0 +jaWvYkxN4FisZDQSA/i2jZRjJKRxAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAP +BgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRqciZ60B7vfec7aVHUbI2fkBJmqzAN +BgkqhkiG9w0BAQsFAAOCAQEAeZ8dlsa2eT8ijYfThwMEYGprmi5ZiXMRrEPR9RP/ +jTkrwPK9T3CMqS/qF8QLVJ7UG5aYMzyorWKiAHarWWluBh1+xLlEjZivEtRh2woZ +Rkfz6/djwUAFQKXSt/S1mja/qYh2iARVBCuch38aNzx+LaUa2NSJXsq9rD1s2G2v +1fN2D807iDginWyTmsQ9v4IbZT+mD12q/OWyFcq1rca8PdCE6OoGcrBNOTJ4vz4R +nAuknZoh8/CbCzB428Hch0P+vGOaysXCHMnHjf87ElgI5rY97HosTvuDls4MPGmH +VHOkc8KT/1EQrBVUAdj8BbGJoX90g5pJ19xOe4pIb4tF9g== +-----END CERTIFICATE----- + +# Issuer: CN=Entrust Root Certification Authority - EC1 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2012 Entrust, Inc. - for authorized use only +# Subject: CN=Entrust Root Certification Authority - EC1 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2012 Entrust, Inc. - for authorized use only +# Label: "Entrust Root Certification Authority - EC1" +# Serial: 51543124481930649114116133369 +# MD5 Fingerprint: b6:7e:1d:f0:58:c5:49:6c:24:3b:3d:ed:98:18:ed:bc +# SHA1 Fingerprint: 20:d8:06:40:df:9b:25:f5:12:25:3a:11:ea:f7:59:8a:eb:14:b5:47 +# SHA256 Fingerprint: 02:ed:0e:b2:8c:14:da:45:16:5c:56:67:91:70:0d:64:51:d7:fb:56:f0:b2:ab:1d:3b:8e:b0:70:e5:6e:df:f5 +-----BEGIN CERTIFICATE----- +MIIC+TCCAoCgAwIBAgINAKaLeSkAAAAAUNCR+TAKBggqhkjOPQQDAzCBvzELMAkG +A1UEBhMCVVMxFjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3 +d3cuZW50cnVzdC5uZXQvbGVnYWwtdGVybXMxOTA3BgNVBAsTMChjKSAyMDEyIEVu +dHJ1c3QsIEluYy4gLSBmb3IgYXV0aG9yaXplZCB1c2Ugb25seTEzMDEGA1UEAxMq +RW50cnVzdCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRUMxMB4XDTEy +MTIxODE1MjUzNloXDTM3MTIxODE1NTUzNlowgb8xCzAJBgNVBAYTAlVTMRYwFAYD +VQQKEw1FbnRydXN0LCBJbmMuMSgwJgYDVQQLEx9TZWUgd3d3LmVudHJ1c3QubmV0 +L2xlZ2FsLXRlcm1zMTkwNwYDVQQLEzAoYykgMjAxMiBFbnRydXN0LCBJbmMuIC0g +Zm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxMzAxBgNVBAMTKkVudHJ1c3QgUm9vdCBD +ZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEVDMTB2MBAGByqGSM49AgEGBSuBBAAi +A2IABIQTydC6bUF74mzQ61VfZgIaJPRbiWlH47jCffHyAsWfoPZb1YsGGYZPUxBt +ByQnoaD41UcZYUx9ypMn6nQM72+WCf5j7HBdNq1nd67JnXxVRDqiY1Ef9eNi1KlH +Bz7MIKNCMEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0O +BBYEFLdj5xrdjekIplWDpOBqUEFlEUJJMAoGCCqGSM49BAMDA2cAMGQCMGF52OVC +R98crlOZF7ZvHH3hvxGU0QOIdeSNiaSKd0bebWHvAvX7td/M/k7//qnmpwIwW5nX +hTcGtXsI/esni0qU+eH6p44mCOh8kmhtc9hvJqwhAriZtyZBWyVgrtBIGu4G +-----END CERTIFICATE----- + +# Issuer: CN=CFCA EV ROOT O=China Financial Certification Authority +# Subject: CN=CFCA EV ROOT O=China Financial Certification Authority +# Label: "CFCA EV ROOT" +# Serial: 407555286 +# MD5 Fingerprint: 74:e1:b6:ed:26:7a:7a:44:30:33:94:ab:7b:27:81:30 +# SHA1 Fingerprint: e2:b8:29:4b:55:84:ab:6b:58:c2:90:46:6c:ac:3f:b8:39:8f:84:83 +# SHA256 Fingerprint: 5c:c3:d7:8e:4e:1d:5e:45:54:7a:04:e6:87:3e:64:f9:0c:f9:53:6d:1c:cc:2e:f8:00:f3:55:c4:c5:fd:70:fd +-----BEGIN CERTIFICATE----- +MIIFjTCCA3WgAwIBAgIEGErM1jANBgkqhkiG9w0BAQsFADBWMQswCQYDVQQGEwJD +TjEwMC4GA1UECgwnQ2hpbmEgRmluYW5jaWFsIENlcnRpZmljYXRpb24gQXV0aG9y +aXR5MRUwEwYDVQQDDAxDRkNBIEVWIFJPT1QwHhcNMTIwODA4MDMwNzAxWhcNMjkx +MjMxMDMwNzAxWjBWMQswCQYDVQQGEwJDTjEwMC4GA1UECgwnQ2hpbmEgRmluYW5j +aWFsIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MRUwEwYDVQQDDAxDRkNBIEVWIFJP +T1QwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDXXWvNED8fBVnVBU03 +sQ7smCuOFR36k0sXgiFxEFLXUWRwFsJVaU2OFW2fvwwbwuCjZ9YMrM8irq93VCpL +TIpTUnrD7i7es3ElweldPe6hL6P3KjzJIx1qqx2hp/Hz7KDVRM8Vz3IvHWOX6Jn5 +/ZOkVIBMUtRSqy5J35DNuF++P96hyk0g1CXohClTt7GIH//62pCfCqktQT+x8Rgp +7hZZLDRJGqgG16iI0gNyejLi6mhNbiyWZXvKWfry4t3uMCz7zEasxGPrb382KzRz +EpR/38wmnvFyXVBlWY9ps4deMm/DGIq1lY+wejfeWkU7xzbh72fROdOXW3NiGUgt +hxwG+3SYIElz8AXSG7Ggo7cbcNOIabla1jj0Ytwli3i/+Oh+uFzJlU9fpy25IGvP +a931DfSCt/SyZi4QKPaXWnuWFo8BGS1sbn85WAZkgwGDg8NNkt0yxoekN+kWzqot +aK8KgWU6cMGbrU1tVMoqLUuFG7OA5nBFDWteNfB/O7ic5ARwiRIlk9oKmSJgamNg +TnYGmE69g60dWIolhdLHZR4tjsbftsbhf4oEIRUpdPA+nJCdDC7xij5aqgwJHsfV +PKPtl8MeNPo4+QgO48BdK4PRVmrJtqhUUy54Mmc9gn900PvhtgVguXDbjgv5E1hv +cWAQUhC5wUEJ73IfZzF4/5YFjQIDAQABo2MwYTAfBgNVHSMEGDAWgBTj/i39KNAL +tbq2osS/BqoFjJP7LzAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAd +BgNVHQ4EFgQU4/4t/SjQC7W6tqLEvwaqBYyT+y8wDQYJKoZIhvcNAQELBQADggIB +ACXGumvrh8vegjmWPfBEp2uEcwPenStPuiB/vHiyz5ewG5zz13ku9Ui20vsXiObT +ej/tUxPQ4i9qecsAIyjmHjdXNYmEwnZPNDatZ8POQQaIxffu2Bq41gt/UP+TqhdL +jOztUmCypAbqTuv0axn96/Ua4CUqmtzHQTb3yHQFhDmVOdYLO6Qn+gjYXB74BGBS +ESgoA//vU2YApUo0FmZ8/Qmkrp5nGm9BC2sGE5uPhnEFtC+NiWYzKXZUmhH4J/qy +P5Hgzg0b8zAarb8iXRvTvyUFTeGSGn+ZnzxEk8rUQElsgIfXBDrDMlI1Dlb4pd19 +xIsNER9Tyx6yF7Zod1rg1MvIB671Oi6ON7fQAUtDKXeMOZePglr4UeWJoBjnaH9d +Ci77o0cOPaYjesYBx4/IXr9tgFa+iiS6M+qf4TIRnvHST4D2G0CvOJ4RUHlzEhLN +5mydLIhyPDCBBpEi6lmt2hkuIsKNuYyH4Ga8cyNfIWRjgEj1oDwYPZTISEEdQLpe +/v5WOaHIz16eGWRGENoXkbcFgKyLmZJ956LYBws2J+dIeWCKw9cTXPhyQN9Ky8+Z +AAoACxGV2lZFA4gKn2fQ1XmxqI1AbQ3CekD6819kR5LLU7m7Wc5P/dAVUwHY3+vZ +5nbv0CO7O6l5s9UCKc2Jo5YPSjXnTkLAdc0Hz+Ys63su +-----END CERTIFICATE----- + +# Issuer: CN=OISTE WISeKey Global Root GB CA O=WISeKey OU=OISTE Foundation Endorsed +# Subject: CN=OISTE WISeKey Global Root GB CA O=WISeKey OU=OISTE Foundation Endorsed +# Label: "OISTE WISeKey Global Root GB CA" +# Serial: 157768595616588414422159278966750757568 +# MD5 Fingerprint: a4:eb:b9:61:28:2e:b7:2f:98:b0:35:26:90:99:51:1d +# SHA1 Fingerprint: 0f:f9:40:76:18:d3:d7:6a:4b:98:f0:a8:35:9e:0c:fd:27:ac:cc:ed +# SHA256 Fingerprint: 6b:9c:08:e8:6e:b0:f7:67:cf:ad:65:cd:98:b6:21:49:e5:49:4a:67:f5:84:5e:7b:d1:ed:01:9f:27:b8:6b:d6 +-----BEGIN CERTIFICATE----- +MIIDtTCCAp2gAwIBAgIQdrEgUnTwhYdGs/gjGvbCwDANBgkqhkiG9w0BAQsFADBt +MQswCQYDVQQGEwJDSDEQMA4GA1UEChMHV0lTZUtleTEiMCAGA1UECxMZT0lTVEUg +Rm91bmRhdGlvbiBFbmRvcnNlZDEoMCYGA1UEAxMfT0lTVEUgV0lTZUtleSBHbG9i +YWwgUm9vdCBHQiBDQTAeFw0xNDEyMDExNTAwMzJaFw0zOTEyMDExNTEwMzFaMG0x +CzAJBgNVBAYTAkNIMRAwDgYDVQQKEwdXSVNlS2V5MSIwIAYDVQQLExlPSVNURSBG +b3VuZGF0aW9uIEVuZG9yc2VkMSgwJgYDVQQDEx9PSVNURSBXSVNlS2V5IEdsb2Jh +bCBSb290IEdCIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA2Be3 +HEokKtaXscriHvt9OO+Y9bI5mE4nuBFde9IllIiCFSZqGzG7qFshISvYD06fWvGx +WuR51jIjK+FTzJlFXHtPrby/h0oLS5daqPZI7H17Dc0hBt+eFf1Biki3IPShehtX +1F1Q/7pn2COZH8g/497/b1t3sWtuuMlk9+HKQUYOKXHQuSP8yYFfTvdv37+ErXNk +u7dCjmn21HYdfp2nuFeKUWdy19SouJVUQHMD9ur06/4oQnc/nSMbsrY9gBQHTC5P +99UKFg29ZkM3fiNDecNAhvVMKdqOmq0NpQSHiB6F4+lT1ZvIiwNjeOvgGUpuuy9r +M2RYk61pv48b74JIxwIDAQABo1EwTzALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/BAUw +AwEB/zAdBgNVHQ4EFgQUNQ/INmNe4qPs+TtmFc5RUuORmj0wEAYJKwYBBAGCNxUB +BAMCAQAwDQYJKoZIhvcNAQELBQADggEBAEBM+4eymYGQfp3FsLAmzYh7KzKNbrgh +cViXfa43FK8+5/ea4n32cZiZBKpDdHij40lhPnOMTZTg+XHEthYOU3gf1qKHLwI5 +gSk8rxWYITD+KJAAjNHhy/peyP34EEY7onhCkRd0VQreUGdNZtGn//3ZwLWoo4rO +ZvUPQ82nK1d7Y0Zqqi5S2PTt4W2tKZB4SLrhI6qjiey1q5bAtEuiHZeeevJuQHHf +aPFlTc58Bd9TZaml8LGXBHAVRgOY1NK/VLSgWH1Sb9pWJmLU2NuJMW8c8CLC02Ic +Nc1MaRVUGpCY3useX8p3x8uOPUNpnJpY0CQ73xtAln41rYHHTnG6iBM= +-----END CERTIFICATE----- + +# Issuer: CN=SZAFIR ROOT CA2 O=Krajowa Izba Rozliczeniowa S.A. +# Subject: CN=SZAFIR ROOT CA2 O=Krajowa Izba Rozliczeniowa S.A. +# Label: "SZAFIR ROOT CA2" +# Serial: 357043034767186914217277344587386743377558296292 +# MD5 Fingerprint: 11:64:c1:89:b0:24:b1:8c:b1:07:7e:89:9e:51:9e:99 +# SHA1 Fingerprint: e2:52:fa:95:3f:ed:db:24:60:bd:6e:28:f3:9c:cc:cf:5e:b3:3f:de +# SHA256 Fingerprint: a1:33:9d:33:28:1a:0b:56:e5:57:d3:d3:2b:1c:e7:f9:36:7e:b0:94:bd:5f:a7:2a:7e:50:04:c8:de:d7:ca:fe +-----BEGIN CERTIFICATE----- +MIIDcjCCAlqgAwIBAgIUPopdB+xV0jLVt+O2XwHrLdzk1uQwDQYJKoZIhvcNAQEL +BQAwUTELMAkGA1UEBhMCUEwxKDAmBgNVBAoMH0tyYWpvd2EgSXpiYSBSb3psaWN6 +ZW5pb3dhIFMuQS4xGDAWBgNVBAMMD1NaQUZJUiBST09UIENBMjAeFw0xNTEwMTkw +NzQzMzBaFw0zNTEwMTkwNzQzMzBaMFExCzAJBgNVBAYTAlBMMSgwJgYDVQQKDB9L +cmFqb3dhIEl6YmEgUm96bGljemVuaW93YSBTLkEuMRgwFgYDVQQDDA9TWkFGSVIg +Uk9PVCBDQTIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC3vD5QqEvN +QLXOYeeWyrSh2gwisPq1e3YAd4wLz32ohswmUeQgPYUM1ljj5/QqGJ3a0a4m7utT +3PSQ1hNKDJA8w/Ta0o4NkjrcsbH/ON7Dui1fgLkCvUqdGw+0w8LBZwPd3BucPbOw +3gAeqDRHu5rr/gsUvTaE2g0gv/pby6kWIK05YO4vdbbnl5z5Pv1+TW9NL++IDWr6 +3fE9biCloBK0TXC5ztdyO4mTp4CEHCdJckm1/zuVnsHMyAHs6A6KCpbns6aH5db5 +BSsNl0BwPLqsdVqc1U2dAgrSS5tmS0YHF2Wtn2yIANwiieDhZNRnvDF5YTy7ykHN +XGoAyDw4jlivAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQD +AgEGMB0GA1UdDgQWBBQuFqlKGLXLzPVvUPMjX/hd56zwyDANBgkqhkiG9w0BAQsF +AAOCAQEAtXP4A9xZWx126aMqe5Aosk3AM0+qmrHUuOQn/6mWmc5G4G18TKI4pAZw +8PRBEew/R40/cof5O/2kbytTAOD/OblqBw7rHRz2onKQy4I9EYKL0rufKq8h5mOG +nXkZ7/e7DDWQw4rtTw/1zBLZpD67oPwglV9PJi8RI4NOdQcPv5vRtB3pEAT+ymCP +oky4rc/hkA/NrgrHXXu3UNLUYfrVFdvXn4dRVOul4+vJhaAlIDf7js4MNIThPIGy +d05DpYhfhmehPea0XGG2Ptv+tyjFogeutcrKjSoS75ftwjCkySp6+/NNIxuZMzSg +LvWpCz/UXeHPhJ/iGcJfitYgHuNztw== +-----END CERTIFICATE----- + +# Issuer: CN=Certum Trusted Network CA 2 O=Unizeto Technologies S.A. OU=Certum Certification Authority +# Subject: CN=Certum Trusted Network CA 2 O=Unizeto Technologies S.A. OU=Certum Certification Authority +# Label: "Certum Trusted Network CA 2" +# Serial: 44979900017204383099463764357512596969 +# MD5 Fingerprint: 6d:46:9e:d9:25:6d:08:23:5b:5e:74:7d:1e:27:db:f2 +# SHA1 Fingerprint: d3:dd:48:3e:2b:bf:4c:05:e8:af:10:f5:fa:76:26:cf:d3:dc:30:92 +# SHA256 Fingerprint: b6:76:f2:ed:da:e8:77:5c:d3:6c:b0:f6:3c:d1:d4:60:39:61:f4:9e:62:65:ba:01:3a:2f:03:07:b6:d0:b8:04 +-----BEGIN CERTIFICATE----- +MIIF0jCCA7qgAwIBAgIQIdbQSk8lD8kyN/yqXhKN6TANBgkqhkiG9w0BAQ0FADCB +gDELMAkGA1UEBhMCUEwxIjAgBgNVBAoTGVVuaXpldG8gVGVjaG5vbG9naWVzIFMu +QS4xJzAlBgNVBAsTHkNlcnR1bSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTEkMCIG +A1UEAxMbQ2VydHVtIFRydXN0ZWQgTmV0d29yayBDQSAyMCIYDzIwMTExMDA2MDgz +OTU2WhgPMjA0NjEwMDYwODM5NTZaMIGAMQswCQYDVQQGEwJQTDEiMCAGA1UEChMZ +VW5pemV0byBUZWNobm9sb2dpZXMgUy5BLjEnMCUGA1UECxMeQ2VydHVtIENlcnRp +ZmljYXRpb24gQXV0aG9yaXR5MSQwIgYDVQQDExtDZXJ0dW0gVHJ1c3RlZCBOZXR3 +b3JrIENBIDIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC9+Xj45tWA +DGSdhhuWZGc/IjoedQF97/tcZ4zJzFxrqZHmuULlIEub2pt7uZld2ZuAS9eEQCsn +0+i6MLs+CRqnSZXvK0AkwpfHp+6bJe+oCgCXhVqqndwpyeI1B+twTUrWwbNWuKFB +OJvR+zF/j+Bf4bE/D44WSWDXBo0Y+aomEKsq09DRZ40bRr5HMNUuctHFY9rnY3lE +fktjJImGLjQ/KUxSiyqnwOKRKIm5wFv5HdnnJ63/mgKXwcZQkpsCLL2puTRZCr+E +Sv/f/rOf69me4Jgj7KZrdxYq28ytOxykh9xGc14ZYmhFV+SQgkK7QtbwYeDBoz1m +o130GO6IyY0XRSmZMnUCMe4pJshrAua1YkV/NxVaI2iJ1D7eTiew8EAMvE0Xy02i +sx7QBlrd9pPPV3WZ9fqGGmd4s7+W/jTcvedSVuWz5XV710GRBdxdaeOVDUO5/IOW +OZV7bIBaTxNyxtd9KXpEulKkKtVBRgkg/iKgtlswjbyJDNXXcPiHUv3a76xRLgez +Tv7QCdpw75j6VuZt27VXS9zlLCUVyJ4ueE742pyehizKV/Ma5ciSixqClnrDvFAS +adgOWkaLOusm+iPJtrCBvkIApPjW/jAux9JG9uWOdf3yzLnQh1vMBhBgu4M1t15n +3kfsmUjxpKEV/q2MYo45VU85FrmxY53/twIDAQABo0IwQDAPBgNVHRMBAf8EBTAD +AQH/MB0GA1UdDgQWBBS2oVQ5AsOgP46KvPrU+Bym0ToO/TAOBgNVHQ8BAf8EBAMC +AQYwDQYJKoZIhvcNAQENBQADggIBAHGlDs7k6b8/ONWJWsQCYftMxRQXLYtPU2sQ +F/xlhMcQSZDe28cmk4gmb3DWAl45oPePq5a1pRNcgRRtDoGCERuKTsZPpd1iHkTf +CVn0W3cLN+mLIMb4Ck4uWBzrM9DPhmDJ2vuAL55MYIR4PSFk1vtBHxgP58l1cb29 +XN40hz5BsA72udY/CROWFC/emh1auVbONTqwX3BNXuMp8SMoclm2q8KMZiYcdywm +djWLKKdpoPk79SPdhRB0yZADVpHnr7pH1BKXESLjokmUbOe3lEu6LaTaM4tMpkT/ +WjzGHWTYtTHkpjx6qFcL2+1hGsvxznN3Y6SHb0xRONbkX8eftoEq5IVIeVheO/jb +AoJnwTnbw3RLPTYe+SmTiGhbqEQZIfCn6IENLOiTNrQ3ssqwGyZ6miUfmpqAnksq +P/ujmv5zMnHCnsZy4YpoJ/HkD7TETKVhk/iXEAcqMCWpuchxuO9ozC1+9eB+D4Ko +b7a6bINDd82Kkhehnlt4Fj1F4jNy3eFmypnTycUm/Q1oBEauttmbjL4ZvrHG8hnj +XALKLNhvSgfZyTXaQHXyxKcZb55CEJh15pWLYLztxRLXis7VmFxWlgPF7ncGNf/P +5O4/E2Hu29othfDNrp2yGAlFw5Khchf8R7agCyzxxN5DaAhqXzvwdmP7zAYspsbi +DrW5viSP +-----END CERTIFICATE----- + +# Issuer: CN=Hellenic Academic and Research Institutions RootCA 2015 O=Hellenic Academic and Research Institutions Cert. Authority +# Subject: CN=Hellenic Academic and Research Institutions RootCA 2015 O=Hellenic Academic and Research Institutions Cert. Authority +# Label: "Hellenic Academic and Research Institutions RootCA 2015" +# Serial: 0 +# MD5 Fingerprint: ca:ff:e2:db:03:d9:cb:4b:e9:0f:ad:84:fd:7b:18:ce +# SHA1 Fingerprint: 01:0c:06:95:a6:98:19:14:ff:bf:5f:c6:b0:b6:95:ea:29:e9:12:a6 +# SHA256 Fingerprint: a0:40:92:9a:02:ce:53:b4:ac:f4:f2:ff:c6:98:1c:e4:49:6f:75:5e:6d:45:fe:0b:2a:69:2b:cd:52:52:3f:36 +-----BEGIN CERTIFICATE----- +MIIGCzCCA/OgAwIBAgIBADANBgkqhkiG9w0BAQsFADCBpjELMAkGA1UEBhMCR1Ix +DzANBgNVBAcTBkF0aGVuczFEMEIGA1UEChM7SGVsbGVuaWMgQWNhZGVtaWMgYW5k +IFJlc2VhcmNoIEluc3RpdHV0aW9ucyBDZXJ0LiBBdXRob3JpdHkxQDA+BgNVBAMT +N0hlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1dGlvbnMgUm9v +dENBIDIwMTUwHhcNMTUwNzA3MTAxMTIxWhcNNDAwNjMwMTAxMTIxWjCBpjELMAkG +A1UEBhMCR1IxDzANBgNVBAcTBkF0aGVuczFEMEIGA1UEChM7SGVsbGVuaWMgQWNh +ZGVtaWMgYW5kIFJlc2VhcmNoIEluc3RpdHV0aW9ucyBDZXJ0LiBBdXRob3JpdHkx +QDA+BgNVBAMTN0hlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1 +dGlvbnMgUm9vdENBIDIwMTUwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC +AQDC+Kk/G4n8PDwEXT2QNrCROnk8ZlrvbTkBSRq0t89/TSNTt5AA4xMqKKYx8ZEA +4yjsriFBzh/a/X0SWwGDD7mwX5nh8hKDgE0GPt+sr+ehiGsxr/CL0BgzuNtFajT0 +AoAkKAoCFZVedioNmToUW/bLy1O8E00BiDeUJRtCvCLYjqOWXjrZMts+6PAQZe10 +4S+nfK8nNLspfZu2zwnI5dMK/IhlZXQK3HMcXM1AsRzUtoSMTFDPaI6oWa7CJ06C +ojXdFPQf/7J31Ycvqm59JCfnxssm5uX+Zwdj2EUN3TpZZTlYepKZcj2chF6IIbjV +9Cz82XBST3i4vTwri5WY9bPRaM8gFH5MXF/ni+X1NYEZN9cRCLdmvtNKzoNXADrD +gfgXy5I2XdGj2HUb4Ysn6npIQf1FGQatJ5lOwXBH3bWfgVMS5bGMSF0xQxfjjMZ6 +Y5ZLKTBOhE5iGV48zpeQpX8B653g+IuJ3SWYPZK2fu/Z8VFRfS0myGlZYeCsargq +NhEEelC9MoS+L9xy1dcdFkfkR2YgP/SWxa+OAXqlD3pk9Q0Yh9muiNX6hME6wGko +LfINaFGq46V3xqSQDqE3izEjR8EJCOtu93ib14L8hCCZSRm2Ekax+0VVFqmjZayc +Bw/qa9wfLgZy7IaIEuQt218FL+TwA9MmM+eAws1CoRc0CwIDAQABo0IwQDAPBgNV +HRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUcRVnyMjJvXVd +ctA4GGqd83EkVAswDQYJKoZIhvcNAQELBQADggIBAHW7bVRLqhBYRjTyYtcWNl0I +XtVsyIe9tC5G8jH4fOpCtZMWVdyhDBKg2mF+D1hYc2Ryx+hFjtyp8iY/xnmMsVMI +M4GwVhO+5lFc2JsKT0ucVlMC6U/2DWDqTUJV6HwbISHTGzrMd/K4kPFox/la/vot +9L/J9UUbzjgQKjeKeaO04wlshYaT/4mWJ3iBj2fjRnRUjtkNaeJK9E10A/+yd+2V +Z5fkscWrv2oj6NSU4kQoYsRL4vDY4ilrGnB+JGGTe08DMiUNRSQrlrRGar9KC/ea +j8GsGsVn82800vpzY4zvFrCopEYq+OsS7HK07/grfoxSwIuEVPkvPuNVqNxmsdnh +X9izjFk0WaSrT2y7HxjbdavYy5LNlDhhDgcGH0tGEPEVvo2FXDtKK4F5D7Rpn0lQ +l033DlZdwJVqwjbDG2jJ9SrcR5q+ss7FJej6A7na+RZukYT1HCjI/CbM1xyQVqdf +bzoEvM14iQuODy+jqk+iGxI9FghAD/FGTNeqewjBCvVtJ94Cj8rDtSvK6evIIVM4 +pcw72Hc3MKJP2W/R8kCtQXoXxdZKNYm3QdV8hn9VTYNKpXMgwDqvkPGaJI7ZjnHK +e7iG2rKPmT4dEw0SEe7Uq/DpFXYC5ODfqiAeW2GFZECpkJcNrVPSWh2HagCXZWK0 +vm9qp/UsQu0yrbYhnr68 +-----END CERTIFICATE----- + +# Issuer: CN=Hellenic Academic and Research Institutions ECC RootCA 2015 O=Hellenic Academic and Research Institutions Cert. Authority +# Subject: CN=Hellenic Academic and Research Institutions ECC RootCA 2015 O=Hellenic Academic and Research Institutions Cert. Authority +# Label: "Hellenic Academic and Research Institutions ECC RootCA 2015" +# Serial: 0 +# MD5 Fingerprint: 81:e5:b4:17:eb:c2:f5:e1:4b:0d:41:7b:49:92:fe:ef +# SHA1 Fingerprint: 9f:f1:71:8d:92:d5:9a:f3:7d:74:97:b4:bc:6f:84:68:0b:ba:b6:66 +# SHA256 Fingerprint: 44:b5:45:aa:8a:25:e6:5a:73:ca:15:dc:27:fc:36:d2:4c:1c:b9:95:3a:06:65:39:b1:15:82:dc:48:7b:48:33 +-----BEGIN CERTIFICATE----- +MIICwzCCAkqgAwIBAgIBADAKBggqhkjOPQQDAjCBqjELMAkGA1UEBhMCR1IxDzAN +BgNVBAcTBkF0aGVuczFEMEIGA1UEChM7SGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJl +c2VhcmNoIEluc3RpdHV0aW9ucyBDZXJ0LiBBdXRob3JpdHkxRDBCBgNVBAMTO0hl +bGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1dGlvbnMgRUNDIFJv +b3RDQSAyMDE1MB4XDTE1MDcwNzEwMzcxMloXDTQwMDYzMDEwMzcxMlowgaoxCzAJ +BgNVBAYTAkdSMQ8wDQYDVQQHEwZBdGhlbnMxRDBCBgNVBAoTO0hlbGxlbmljIEFj +YWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1dGlvbnMgQ2VydC4gQXV0aG9yaXR5 +MUQwQgYDVQQDEztIZWxsZW5pYyBBY2FkZW1pYyBhbmQgUmVzZWFyY2ggSW5zdGl0 +dXRpb25zIEVDQyBSb290Q0EgMjAxNTB2MBAGByqGSM49AgEGBSuBBAAiA2IABJKg +QehLgoRc4vgxEZmGZE4JJS+dQS8KrjVPdJWyUWRrjWvmP3CV8AVER6ZyOFB2lQJa +jq4onvktTpnvLEhvTCUp6NFxW98dwXU3tNf6e3pCnGoKVlp8aQuqgAkkbH7BRqNC +MEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFLQi +C4KZJAEOnLvkDv2/+5cgk5kqMAoGCCqGSM49BAMCA2cAMGQCMGfOFmI4oqxiRaep +lSTAGiecMjvAwNW6qef4BENThe5SId6d9SWDPp5YSy/XZxMOIQIwBeF1Ad5o7Sof +TUwJCA3sS61kFyjndc5FZXIhF8siQQ6ME5g4mlRtm8rifOoCWCKR +-----END CERTIFICATE----- + +# Issuer: CN=ISRG Root X1 O=Internet Security Research Group +# Subject: CN=ISRG Root X1 O=Internet Security Research Group +# Label: "ISRG Root X1" +# Serial: 172886928669790476064670243504169061120 +# MD5 Fingerprint: 0c:d2:f9:e0:da:17:73:e9:ed:86:4d:a5:e3:70:e7:4e +# SHA1 Fingerprint: ca:bd:2a:79:a1:07:6a:31:f2:1d:25:36:35:cb:03:9d:43:29:a5:e8 +# SHA256 Fingerprint: 96:bc:ec:06:26:49:76:f3:74:60:77:9a:cf:28:c5:a7:cf:e8:a3:c0:aa:e1:1a:8f:fc:ee:05:c0:bd:df:08:c6 +-----BEGIN CERTIFICATE----- +MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAw +TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh +cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4 +WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJu +ZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBY +MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54rVygc +h77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+ +0TM8ukj13Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6U +A5/TR5d8mUgjU+g4rk8Kb4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sW +T8KOEUt+zwvo/7V3LvSye0rgTBIlDHCNAymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyH +B5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ4Q7e2RCOFvu396j3x+UC +B5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf1b0SHzUv +KBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWn +OlFuhjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTn +jh8BCNAw1FtxNrQHusEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbw +qHyGO0aoSCqI3Haadr8faqU9GY/rOPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CI +rU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV +HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY9umbbjANBgkq +hkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL +ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ +3BebYhtF8GaV0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KK +NFtY2PwByVS5uCbMiogziUwthDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5 +ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJwTdwJx4nLCgdNbOhdjsnvzqvHu7Ur +TkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nxe5AW0wdeRlN8NwdC +jNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZAJzVc +oyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq +4RgqsahDYVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPA +mRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57d +emyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc= +-----END CERTIFICATE----- + +# Issuer: O=FNMT-RCM OU=AC RAIZ FNMT-RCM +# Subject: O=FNMT-RCM OU=AC RAIZ FNMT-RCM +# Label: "AC RAIZ FNMT-RCM" +# Serial: 485876308206448804701554682760554759 +# MD5 Fingerprint: e2:09:04:b4:d3:bd:d1:a0:14:fd:1a:d2:47:c4:57:1d +# SHA1 Fingerprint: ec:50:35:07:b2:15:c4:95:62:19:e2:a8:9a:5b:42:99:2c:4c:2c:20 +# SHA256 Fingerprint: eb:c5:57:0c:29:01:8c:4d:67:b1:aa:12:7b:af:12:f7:03:b4:61:1e:bc:17:b7:da:b5:57:38:94:17:9b:93:fa +-----BEGIN CERTIFICATE----- +MIIFgzCCA2ugAwIBAgIPXZONMGc2yAYdGsdUhGkHMA0GCSqGSIb3DQEBCwUAMDsx +CzAJBgNVBAYTAkVTMREwDwYDVQQKDAhGTk1ULVJDTTEZMBcGA1UECwwQQUMgUkFJ +WiBGTk1ULVJDTTAeFw0wODEwMjkxNTU5NTZaFw0zMDAxMDEwMDAwMDBaMDsxCzAJ +BgNVBAYTAkVTMREwDwYDVQQKDAhGTk1ULVJDTTEZMBcGA1UECwwQQUMgUkFJWiBG +Tk1ULVJDTTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBALpxgHpMhm5/ +yBNtwMZ9HACXjywMI7sQmkCpGreHiPibVmr75nuOi5KOpyVdWRHbNi63URcfqQgf +BBckWKo3Shjf5TnUV/3XwSyRAZHiItQDwFj8d0fsjz50Q7qsNI1NOHZnjrDIbzAz +WHFctPVrbtQBULgTfmxKo0nRIBnuvMApGGWn3v7v3QqQIecaZ5JCEJhfTzC8PhxF +tBDXaEAUwED653cXeuYLj2VbPNmaUtu1vZ5Gzz3rkQUCwJaydkxNEJY7kvqcfw+Z +374jNUUeAlz+taibmSXaXvMiwzn15Cou08YfxGyqxRxqAQVKL9LFwag0Jl1mpdIC +IfkYtwb1TplvqKtMUejPUBjFd8g5CSxJkjKZqLsXF3mwWsXmo8RZZUc1g16p6DUL +mbvkzSDGm0oGObVo/CK67lWMK07q87Hj/LaZmtVC+nFNCM+HHmpxffnTtOmlcYF7 +wk5HlqX2doWjKI/pgG6BU6VtX7hI+cL5NqYuSf+4lsKMB7ObiFj86xsc3i1w4peS +MKGJ47xVqCfWS+2QrYv6YyVZLag13cqXM7zlzced0ezvXg5KkAYmY6252TUtB7p2 +ZSysV4999AeU14ECll2jB0nVetBX+RvnU0Z1qrB5QstocQjpYL05ac70r8NWQMet +UqIJ5G+GR4of6ygnXYMgrwTJbFaai0b1AgMBAAGjgYMwgYAwDwYDVR0TAQH/BAUw +AwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFPd9xf3E6Jobd2Sn9R2gzL+H +YJptMD4GA1UdIAQ3MDUwMwYEVR0gADArMCkGCCsGAQUFBwIBFh1odHRwOi8vd3d3 +LmNlcnQuZm5tdC5lcy9kcGNzLzANBgkqhkiG9w0BAQsFAAOCAgEAB5BK3/MjTvDD +nFFlm5wioooMhfNzKWtN/gHiqQxjAb8EZ6WdmF/9ARP67Jpi6Yb+tmLSbkyU+8B1 +RXxlDPiyN8+sD8+Nb/kZ94/sHvJwnvDKuO+3/3Y3dlv2bojzr2IyIpMNOmqOFGYM +LVN0V2Ue1bLdI4E7pWYjJ2cJj+F3qkPNZVEI7VFY/uY5+ctHhKQV8Xa7pO6kO8Rf +77IzlhEYt8llvhjho6Tc+hj507wTmzl6NLrTQfv6MooqtyuGC2mDOL7Nii4LcK2N +JpLuHvUBKwrZ1pebbuCoGRw6IYsMHkCtA+fdZn71uSANA+iW+YJF1DngoABd15jm +fZ5nc8OaKveri6E6FO80vFIOiZiaBECEHX5FaZNXzuvO+FB8TxxuBEOb+dY7Ixjp +6o7RTUaN8Tvkasq6+yO3m/qZASlaWFot4/nUbQ4mrcFuNLwy+AwF+mWj2zs3gyLp +1txyM/1d8iC9djwj2ij3+RvrWWTV3F9yfiD8zYm1kGdNYno/Tq0dwzn+evQoFt9B +9kiABdcPUXmsEKvU7ANm5mqwujGSQkBqvjrTcuFqN1W8rB2Vt2lh8kORdOag0wok +RqEIr9baRRmW1FMdW4R58MD3R++Lj8UGrp1MYp3/RgT408m2ECVAdf4WqslKYIYv +uu8wd+RU4riEmViAqhOLUTpPSPaLtrM= +-----END CERTIFICATE----- + +# Issuer: CN=Amazon Root CA 1 O=Amazon +# Subject: CN=Amazon Root CA 1 O=Amazon +# Label: "Amazon Root CA 1" +# Serial: 143266978916655856878034712317230054538369994 +# MD5 Fingerprint: 43:c6:bf:ae:ec:fe:ad:2f:18:c6:88:68:30:fc:c8:e6 +# SHA1 Fingerprint: 8d:a7:f9:65:ec:5e:fc:37:91:0f:1c:6e:59:fd:c1:cc:6a:6e:de:16 +# SHA256 Fingerprint: 8e:cd:e6:88:4f:3d:87:b1:12:5b:a3:1a:c3:fc:b1:3d:70:16:de:7f:57:cc:90:4f:e1:cb:97:c6:ae:98:19:6e +-----BEGIN CERTIFICATE----- +MIIDQTCCAimgAwIBAgITBmyfz5m/jAo54vB4ikPmljZbyjANBgkqhkiG9w0BAQsF +ADA5MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6 +b24gUm9vdCBDQSAxMB4XDTE1MDUyNjAwMDAwMFoXDTM4MDExNzAwMDAwMFowOTEL +MAkGA1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJv +b3QgQ0EgMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALJ4gHHKeNXj +ca9HgFB0fW7Y14h29Jlo91ghYPl0hAEvrAIthtOgQ3pOsqTQNroBvo3bSMgHFzZM +9O6II8c+6zf1tRn4SWiw3te5djgdYZ6k/oI2peVKVuRF4fn9tBb6dNqcmzU5L/qw +IFAGbHrQgLKm+a/sRxmPUDgH3KKHOVj4utWp+UhnMJbulHheb4mjUcAwhmahRWa6 +VOujw5H5SNz/0egwLX0tdHA114gk957EWW67c4cX8jJGKLhD+rcdqsq08p8kDi1L +93FcXmn/6pUCyziKrlA4b9v7LWIbxcceVOF34GfID5yHI9Y/QCB/IIDEgEw+OyQm +jgSubJrIqg0CAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC +AYYwHQYDVR0OBBYEFIQYzIU07LwMlJQuCFmcx7IQTgoIMA0GCSqGSIb3DQEBCwUA +A4IBAQCY8jdaQZChGsV2USggNiMOruYou6r4lK5IpDB/G/wkjUu0yKGX9rbxenDI +U5PMCCjjmCXPI6T53iHTfIUJrU6adTrCC2qJeHZERxhlbI1Bjjt/msv0tadQ1wUs +N+gDS63pYaACbvXy8MWy7Vu33PqUXHeeE6V/Uq2V8viTO96LXFvKWlJbYK8U90vv +o/ufQJVtMVT8QtPHRh8jrdkPSHCa2XV4cdFyQzR1bldZwgJcJmApzyMZFo6IQ6XU +5MsI+yMRQ+hDKXJioaldXgjUkK642M4UwtBV8ob2xJNDd2ZhwLnoQdeXeGADbkpy +rqXRfboQnoZsG4q5WTP468SQvvG5 +-----END CERTIFICATE----- + +# Issuer: CN=Amazon Root CA 2 O=Amazon +# Subject: CN=Amazon Root CA 2 O=Amazon +# Label: "Amazon Root CA 2" +# Serial: 143266982885963551818349160658925006970653239 +# MD5 Fingerprint: c8:e5:8d:ce:a8:42:e2:7a:c0:2a:5c:7c:9e:26:bf:66 +# SHA1 Fingerprint: 5a:8c:ef:45:d7:a6:98:59:76:7a:8c:8b:44:96:b5:78:cf:47:4b:1a +# SHA256 Fingerprint: 1b:a5:b2:aa:8c:65:40:1a:82:96:01:18:f8:0b:ec:4f:62:30:4d:83:ce:c4:71:3a:19:c3:9c:01:1e:a4:6d:b4 +-----BEGIN CERTIFICATE----- +MIIFQTCCAymgAwIBAgITBmyf0pY1hp8KD+WGePhbJruKNzANBgkqhkiG9w0BAQwF +ADA5MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6 +b24gUm9vdCBDQSAyMB4XDTE1MDUyNjAwMDAwMFoXDTQwMDUyNjAwMDAwMFowOTEL +MAkGA1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJv +b3QgQ0EgMjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK2Wny2cSkxK +gXlRmeyKy2tgURO8TW0G/LAIjd0ZEGrHJgw12MBvIITplLGbhQPDW9tK6Mj4kHbZ +W0/jTOgGNk3Mmqw9DJArktQGGWCsN0R5hYGCrVo34A3MnaZMUnbqQ523BNFQ9lXg +1dKmSYXpN+nKfq5clU1Imj+uIFptiJXZNLhSGkOQsL9sBbm2eLfq0OQ6PBJTYv9K +8nu+NQWpEjTj82R0Yiw9AElaKP4yRLuH3WUnAnE72kr3H9rN9yFVkE8P7K6C4Z9r +2UXTu/Bfh+08LDmG2j/e7HJV63mjrdvdfLC6HM783k81ds8P+HgfajZRRidhW+me +z/CiVX18JYpvL7TFz4QuK/0NURBs+18bvBt+xa47mAExkv8LV/SasrlX6avvDXbR +8O70zoan4G7ptGmh32n2M8ZpLpcTnqWHsFcQgTfJU7O7f/aS0ZzQGPSSbtqDT6Zj +mUyl+17vIWR6IF9sZIUVyzfpYgwLKhbcAS4y2j5L9Z469hdAlO+ekQiG+r5jqFoz +7Mt0Q5X5bGlSNscpb/xVA1wf+5+9R+vnSUeVC06JIglJ4PVhHvG/LopyboBZ/1c6 ++XUyo05f7O0oYtlNc/LMgRdg7c3r3NunysV+Ar3yVAhU/bQtCSwXVEqY0VThUWcI +0u1ufm8/0i2BWSlmy5A5lREedCf+3euvAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMB +Af8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQWBBSwDPBMMPQFWAJI/TPlUq9LhONm +UjANBgkqhkiG9w0BAQwFAAOCAgEAqqiAjw54o+Ci1M3m9Zh6O+oAA7CXDpO8Wqj2 +LIxyh6mx/H9z/WNxeKWHWc8w4Q0QshNabYL1auaAn6AFC2jkR2vHat+2/XcycuUY ++gn0oJMsXdKMdYV2ZZAMA3m3MSNjrXiDCYZohMr/+c8mmpJ5581LxedhpxfL86kS +k5Nrp+gvU5LEYFiwzAJRGFuFjWJZY7attN6a+yb3ACfAXVU3dJnJUH/jWS5E4ywl +7uxMMne0nxrpS10gxdr9HIcWxkPo1LsmmkVwXqkLN1PiRnsn/eBG8om3zEK2yygm +btmlyTrIQRNg91CMFa6ybRoVGld45pIq2WWQgj9sAq+uEjonljYE1x2igGOpm/Hl +urR8FLBOybEfdF849lHqm/osohHUqS0nGkWxr7JOcQ3AWEbWaQbLU8uz/mtBzUF+ +fUwPfHJ5elnNXkoOrJupmHN5fLT0zLm4BwyydFy4x2+IoZCn9Kr5v2c69BoVYh63 +n749sSmvZ6ES8lgQGVMDMBu4Gon2nL2XA46jCfMdiyHxtN/kHNGfZQIG6lzWE7OE +76KlXIx3KadowGuuQNKotOrN8I1LOJwZmhsoVLiJkO/KdYE+HvJkJMcYr07/R54H +9jVlpNMKVv/1F2Rs76giJUmTtt8AF9pYfl3uxRuw0dFfIRDH+fO6AgonB8Xx1sfT +4PsJYGw= +-----END CERTIFICATE----- + +# Issuer: CN=Amazon Root CA 3 O=Amazon +# Subject: CN=Amazon Root CA 3 O=Amazon +# Label: "Amazon Root CA 3" +# Serial: 143266986699090766294700635381230934788665930 +# MD5 Fingerprint: a0:d4:ef:0b:f7:b5:d8:49:95:2a:ec:f5:c4:fc:81:87 +# SHA1 Fingerprint: 0d:44:dd:8c:3c:8c:1a:1a:58:75:64:81:e9:0f:2e:2a:ff:b3:d2:6e +# SHA256 Fingerprint: 18:ce:6c:fe:7b:f1:4e:60:b2:e3:47:b8:df:e8:68:cb:31:d0:2e:bb:3a:da:27:15:69:f5:03:43:b4:6d:b3:a4 +-----BEGIN CERTIFICATE----- +MIIBtjCCAVugAwIBAgITBmyf1XSXNmY/Owua2eiedgPySjAKBggqhkjOPQQDAjA5 +MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6b24g +Um9vdCBDQSAzMB4XDTE1MDUyNjAwMDAwMFoXDTQwMDUyNjAwMDAwMFowOTELMAkG +A1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJvb3Qg +Q0EgMzBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABCmXp8ZBf8ANm+gBG1bG8lKl +ui2yEujSLtf6ycXYqm0fc4E7O5hrOXwzpcVOho6AF2hiRVd9RFgdszflZwjrZt6j +QjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQWBBSr +ttvXBp43rDCGB5Fwx5zEGbF4wDAKBggqhkjOPQQDAgNJADBGAiEA4IWSoxe3jfkr +BqWTrBqYaGFy+uGh0PsceGCmQ5nFuMQCIQCcAu/xlJyzlvnrxir4tiz+OpAUFteM +YyRIHN8wfdVoOw== +-----END CERTIFICATE----- + +# Issuer: CN=Amazon Root CA 4 O=Amazon +# Subject: CN=Amazon Root CA 4 O=Amazon +# Label: "Amazon Root CA 4" +# Serial: 143266989758080763974105200630763877849284878 +# MD5 Fingerprint: 89:bc:27:d5:eb:17:8d:06:6a:69:d5:fd:89:47:b4:cd +# SHA1 Fingerprint: f6:10:84:07:d6:f8:bb:67:98:0c:c2:e2:44:c2:eb:ae:1c:ef:63:be +# SHA256 Fingerprint: e3:5d:28:41:9e:d0:20:25:cf:a6:90:38:cd:62:39:62:45:8d:a5:c6:95:fb:de:a3:c2:2b:0b:fb:25:89:70:92 +-----BEGIN CERTIFICATE----- +MIIB8jCCAXigAwIBAgITBmyf18G7EEwpQ+Vxe3ssyBrBDjAKBggqhkjOPQQDAzA5 +MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6b24g +Um9vdCBDQSA0MB4XDTE1MDUyNjAwMDAwMFoXDTQwMDUyNjAwMDAwMFowOTELMAkG +A1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJvb3Qg +Q0EgNDB2MBAGByqGSM49AgEGBSuBBAAiA2IABNKrijdPo1MN/sGKe0uoe0ZLY7Bi +9i0b2whxIdIA6GO9mif78DluXeo9pcmBqqNbIJhFXRbb/egQbeOc4OO9X4Ri83Bk +M6DLJC9wuoihKqB1+IGuYgbEgds5bimwHvouXKNCMEAwDwYDVR0TAQH/BAUwAwEB +/zAOBgNVHQ8BAf8EBAMCAYYwHQYDVR0OBBYEFNPsxzplbszh2naaVvuc84ZtV+WB +MAoGCCqGSM49BAMDA2gAMGUCMDqLIfG9fhGt0O9Yli/W651+kI0rz2ZVwyzjKKlw +CkcO8DdZEv8tmZQoTipPNU0zWgIxAOp1AE47xDqUEpHJWEadIRNyp4iciuRMStuW +1KyLa2tJElMzrdfkviT8tQp21KW8EA== +-----END CERTIFICATE----- + +# Issuer: CN=TUBITAK Kamu SM SSL Kok Sertifikasi - Surum 1 O=Turkiye Bilimsel ve Teknolojik Arastirma Kurumu - TUBITAK OU=Kamu Sertifikasyon Merkezi - Kamu SM +# Subject: CN=TUBITAK Kamu SM SSL Kok Sertifikasi - Surum 1 O=Turkiye Bilimsel ve Teknolojik Arastirma Kurumu - TUBITAK OU=Kamu Sertifikasyon Merkezi - Kamu SM +# Label: "TUBITAK Kamu SM SSL Kok Sertifikasi - Surum 1" +# Serial: 1 +# MD5 Fingerprint: dc:00:81:dc:69:2f:3e:2f:b0:3b:f6:3d:5a:91:8e:49 +# SHA1 Fingerprint: 31:43:64:9b:ec:ce:27:ec:ed:3a:3f:0b:8f:0d:e4:e8:91:dd:ee:ca +# SHA256 Fingerprint: 46:ed:c3:68:90:46:d5:3a:45:3f:b3:10:4a:b8:0d:ca:ec:65:8b:26:60:ea:16:29:dd:7e:86:79:90:64:87:16 +-----BEGIN CERTIFICATE----- +MIIEYzCCA0ugAwIBAgIBATANBgkqhkiG9w0BAQsFADCB0jELMAkGA1UEBhMCVFIx +GDAWBgNVBAcTD0dlYnplIC0gS29jYWVsaTFCMEAGA1UEChM5VHVya2l5ZSBCaWxp +bXNlbCB2ZSBUZWtub2xvamlrIEFyYXN0aXJtYSBLdXJ1bXUgLSBUVUJJVEFLMS0w +KwYDVQQLEyRLYW11IFNlcnRpZmlrYXN5b24gTWVya2V6aSAtIEthbXUgU00xNjA0 +BgNVBAMTLVRVQklUQUsgS2FtdSBTTSBTU0wgS29rIFNlcnRpZmlrYXNpIC0gU3Vy +dW0gMTAeFw0xMzExMjUwODI1NTVaFw00MzEwMjUwODI1NTVaMIHSMQswCQYDVQQG +EwJUUjEYMBYGA1UEBxMPR2ViemUgLSBLb2NhZWxpMUIwQAYDVQQKEzlUdXJraXll +IEJpbGltc2VsIHZlIFRla25vbG9qaWsgQXJhc3Rpcm1hIEt1cnVtdSAtIFRVQklU +QUsxLTArBgNVBAsTJEthbXUgU2VydGlmaWthc3lvbiBNZXJrZXppIC0gS2FtdSBT +TTE2MDQGA1UEAxMtVFVCSVRBSyBLYW11IFNNIFNTTCBLb2sgU2VydGlmaWthc2kg +LSBTdXJ1bSAxMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAr3UwM6q7 +a9OZLBI3hNmNe5eA027n/5tQlT6QlVZC1xl8JoSNkvoBHToP4mQ4t4y86Ij5iySr +LqP1N+RAjhgleYN1Hzv/bKjFxlb4tO2KRKOrbEz8HdDc72i9z+SqzvBV96I01INr +N3wcwv61A+xXzry0tcXtAA9TNypN9E8Mg/uGz8v+jE69h/mniyFXnHrfA2eJLJ2X +YacQuFWQfw4tJzh03+f92k4S400VIgLI4OD8D62K18lUUMw7D8oWgITQUVbDjlZ/ +iSIzL+aFCr2lqBs23tPcLG07xxO9WSMs5uWk99gL7eqQQESolbuT1dCANLZGeA4f +AJNG4e7p+exPFwIDAQABo0IwQDAdBgNVHQ4EFgQUZT/HiobGPN08VFw1+DrtUgxH +V8gwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEL +BQADggEBACo/4fEyjq7hmFxLXs9rHmoJ0iKpEsdeV31zVmSAhHqT5Am5EM2fKifh +AHe+SMg1qIGf5LgsyX8OsNJLN13qudULXjS99HMpw+0mFZx+CFOKWI3QSyjfwbPf +IPP54+M638yclNhOT8NrF7f3cuitZjO1JVOr4PhMqZ398g26rrnZqsZr+ZO7rqu4 +lzwDGrpDxpa5RXI4s6ehlj2Re37AIVNMh+3yC1SVUZPVIqUNivGTDj5UDrDYyU7c +8jEyVupk+eq1nRZmQnLzf9OxMUP8pI4X8W0jq5Rm+K37DwhuJi1/FwcJsoz7UMCf +lo3Ptv0AnVoUmr8CRPXBwp8iXqIPoeM= +-----END CERTIFICATE----- + +# Issuer: CN=GDCA TrustAUTH R5 ROOT O=GUANG DONG CERTIFICATE AUTHORITY CO.,LTD. +# Subject: CN=GDCA TrustAUTH R5 ROOT O=GUANG DONG CERTIFICATE AUTHORITY CO.,LTD. +# Label: "GDCA TrustAUTH R5 ROOT" +# Serial: 9009899650740120186 +# MD5 Fingerprint: 63:cc:d9:3d:34:35:5c:6f:53:a3:e2:08:70:48:1f:b4 +# SHA1 Fingerprint: 0f:36:38:5b:81:1a:25:c3:9b:31:4e:83:ca:e9:34:66:70:cc:74:b4 +# SHA256 Fingerprint: bf:ff:8f:d0:44:33:48:7d:6a:8a:a6:0c:1a:29:76:7a:9f:c2:bb:b0:5e:42:0f:71:3a:13:b9:92:89:1d:38:93 +-----BEGIN CERTIFICATE----- +MIIFiDCCA3CgAwIBAgIIfQmX/vBH6nowDQYJKoZIhvcNAQELBQAwYjELMAkGA1UE +BhMCQ04xMjAwBgNVBAoMKUdVQU5HIERPTkcgQ0VSVElGSUNBVEUgQVVUSE9SSVRZ +IENPLixMVEQuMR8wHQYDVQQDDBZHRENBIFRydXN0QVVUSCBSNSBST09UMB4XDTE0 +MTEyNjA1MTMxNVoXDTQwMTIzMTE1NTk1OVowYjELMAkGA1UEBhMCQ04xMjAwBgNV +BAoMKUdVQU5HIERPTkcgQ0VSVElGSUNBVEUgQVVUSE9SSVRZIENPLixMVEQuMR8w +HQYDVQQDDBZHRENBIFRydXN0QVVUSCBSNSBST09UMIICIjANBgkqhkiG9w0BAQEF +AAOCAg8AMIICCgKCAgEA2aMW8Mh0dHeb7zMNOwZ+Vfy1YI92hhJCfVZmPoiC7XJj +Dp6L3TQsAlFRwxn9WVSEyfFrs0yw6ehGXTjGoqcuEVe6ghWinI9tsJlKCvLriXBj +TnnEt1u9ol2x8kECK62pOqPseQrsXzrj/e+APK00mxqriCZ7VqKChh/rNYmDf1+u +KU49tm7srsHwJ5uu4/Ts765/94Y9cnrrpftZTqfrlYwiOXnhLQiPzLyRuEH3FMEj +qcOtmkVEs7LXLM3GKeJQEK5cy4KOFxg2fZfmiJqwTTQJ9Cy5WmYqsBebnh52nUpm +MUHfP/vFBu8btn4aRjb3ZGM74zkYI+dndRTVdVeSN72+ahsmUPI2JgaQxXABZG12 +ZuGR224HwGGALrIuL4xwp9E7PLOR5G62xDtw8mySlwnNR30YwPO7ng/Wi64HtloP +zgsMR6flPri9fcebNaBhlzpBdRfMK5Z3KpIhHtmVdiBnaM8Nvd/WHwlqmuLMc3Gk +L30SgLdTMEZeS1SZD2fJpcjyIMGC7J0R38IC+xo70e0gmu9lZJIQDSri3nDxGGeC +jGHeuLzRL5z7D9Ar7Rt2ueQ5Vfj4oR24qoAATILnsn8JuLwwoC8N9VKejveSswoA +HQBUlwbgsQfZxw9cZX08bVlX5O2ljelAU58VS6Bx9hoh49pwBiFYFIeFd3mqgnkC +AwEAAaNCMEAwHQYDVR0OBBYEFOLJQJ9NzuiaoXzPDj9lxSmIahlRMA8GA1UdEwEB +/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3DQEBCwUAA4ICAQDRSVfg +p8xoWLoBDysZzY2wYUWsEe1jUGn4H3++Fo/9nesLqjJHdtJnJO29fDMylyrHBYZm +DRd9FBUb1Ov9H5r2XpdptxolpAqzkT9fNqyL7FeoPueBihhXOYV0GkLH6VsTX4/5 +COmSdI31R9KrO9b7eGZONn356ZLpBN79SWP8bfsUcZNnL0dKt7n/HipzcEYwv1ry +L3ml4Y0M2fmyYzeMN2WFcGpcWwlyua1jPLHd+PwyvzeG5LuOmCd+uh8W4XAR8gPf +JWIyJyYYMoSf/wA6E7qaTfRPuBRwIrHKK5DOKcFw9C+df/KQHtZa37dG/OaG+svg +IHZ6uqbL9XzeYqWxi+7egmaKTjowHz+Ay60nugxe19CxVsp3cbK1daFQqUBDF8Io +2c9Si1vIY9RCPqAzekYu9wogRlR+ak8x8YF+QnQ4ZXMn7sZ8uI7XpTrXmKGcjBBV +09tL7ECQ8s1uV9JiDnxXk7Gnbc2dg7sq5+W2O3FYrf3RRbxake5TFW/TRQl1brqQ +XR4EzzffHqhmsYzmIGrv/EhOdJhCrylvLmrH+33RZjEizIYAfmaDDEL0vTSSwxrq +T8p+ck0LcIymSLumoRT2+1hEmRSuqguTaaApJUqlyyvdimYHFngVV3Eb7PVHhPOe +MTd61X8kreS8/f3MboPoDKi3QWwH3b08hpcv0g== +-----END CERTIFICATE----- + +# Issuer: CN=SSL.com Root Certification Authority RSA O=SSL Corporation +# Subject: CN=SSL.com Root Certification Authority RSA O=SSL Corporation +# Label: "SSL.com Root Certification Authority RSA" +# Serial: 8875640296558310041 +# MD5 Fingerprint: 86:69:12:c0:70:f1:ec:ac:ac:c2:d5:bc:a5:5b:a1:29 +# SHA1 Fingerprint: b7:ab:33:08:d1:ea:44:77:ba:14:80:12:5a:6f:bd:a9:36:49:0c:bb +# SHA256 Fingerprint: 85:66:6a:56:2e:e0:be:5c:e9:25:c1:d8:89:0a:6f:76:a8:7e:c1:6d:4d:7d:5f:29:ea:74:19:cf:20:12:3b:69 +-----BEGIN CERTIFICATE----- +MIIF3TCCA8WgAwIBAgIIeyyb0xaAMpkwDQYJKoZIhvcNAQELBQAwfDELMAkGA1UE +BhMCVVMxDjAMBgNVBAgMBVRleGFzMRAwDgYDVQQHDAdIb3VzdG9uMRgwFgYDVQQK +DA9TU0wgQ29ycG9yYXRpb24xMTAvBgNVBAMMKFNTTC5jb20gUm9vdCBDZXJ0aWZp +Y2F0aW9uIEF1dGhvcml0eSBSU0EwHhcNMTYwMjEyMTczOTM5WhcNNDEwMjEyMTcz +OTM5WjB8MQswCQYDVQQGEwJVUzEOMAwGA1UECAwFVGV4YXMxEDAOBgNVBAcMB0hv +dXN0b24xGDAWBgNVBAoMD1NTTCBDb3Jwb3JhdGlvbjExMC8GA1UEAwwoU1NMLmNv +bSBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IFJTQTCCAiIwDQYJKoZIhvcN +AQEBBQADggIPADCCAgoCggIBAPkP3aMrfcvQKv7sZ4Wm5y4bunfh4/WvpOz6Sl2R +xFdHaxh3a3by/ZPkPQ/CFp4LZsNWlJ4Xg4XOVu/yFv0AYvUiCVToZRdOQbngT0aX +qhvIuG5iXmmxX9sqAn78bMrzQdjt0Oj8P2FI7bADFB0QDksZ4LtO7IZl/zbzXmcC +C52GVWH9ejjt/uIZALdvoVBidXQ8oPrIJZK0bnoix/geoeOy3ZExqysdBP+lSgQ3 +6YWkMyv94tZVNHwZpEpox7Ko07fKoZOI68GXvIz5HdkihCR0xwQ9aqkpk8zruFvh +/l8lqjRYyMEjVJ0bmBHDOJx+PYZspQ9AhnwC9FwCTyjLrnGfDzrIM/4RJTXq/LrF +YD3ZfBjVsqnTdXgDciLKOsMf7yzlLqn6niy2UUb9rwPW6mBo6oUWNmuF6R7As93E +JNyAKoFBbZQ+yODJgUEAnl6/f8UImKIYLEJAs/lvOCdLToD0PYFH4Ih86hzOtXVc +US4cK38acijnALXRdMbX5J+tB5O2UzU1/Dfkw/ZdFr4hc96SCvigY2q8lpJqPvi8 +ZVWb3vUNiSYE/CUapiVpy8JtynziWV+XrOvvLsi81xtZPCvM8hnIk2snYxnP/Okm ++Mpxm3+T/jRnhE6Z6/yzeAkzcLpmpnbtG3PrGqUNxCITIJRWCk4sbE6x/c+cCbqi +M+2HAgMBAAGjYzBhMB0GA1UdDgQWBBTdBAkHovV6fVJTEpKV7jiAJQ2mWTAPBgNV +HRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFN0ECQei9Xp9UlMSkpXuOIAlDaZZMA4G +A1UdDwEB/wQEAwIBhjANBgkqhkiG9w0BAQsFAAOCAgEAIBgRlCn7Jp0cHh5wYfGV +cpNxJK1ok1iOMq8bs3AD/CUrdIWQPXhq9LmLpZc7tRiRux6n+UBbkflVma8eEdBc +Hadm47GUBwwyOabqG7B52B2ccETjit3E+ZUfijhDPwGFpUenPUayvOUiaPd7nNgs +PgohyC0zrL/FgZkxdMF1ccW+sfAjRfSda/wZY52jvATGGAslu1OJD7OAUN5F7kR/ +q5R4ZJjT9ijdh9hwZXT7DrkT66cPYakylszeu+1jTBi7qUD3oFRuIIhxdRjqerQ0 +cuAjJ3dctpDqhiVAq+8zD8ufgr6iIPv2tS0a5sKFsXQP+8hlAqRSAUfdSSLBv9jr +a6x+3uxjMxW3IwiPxg+NQVrdjsW5j+VFP3jbutIbQLH+cU0/4IGiul607BXgk90I +H37hVZkLId6Tngr75qNJvTYw/ud3sqB1l7UtgYgXZSD32pAAn8lSzDLKNXz1PQ/Y +K9f1JmzJBjSWFupwWRoyeXkLtoh/D1JIPb9s2KJELtFOt3JY04kTlf5Eq/jXixtu +nLwsoFvVagCvXzfh1foQC5ichucmj87w7G6KVwuA406ywKBjYZC6VWg3dGq2ktuf +oYYitmUnDuy2n0Jg5GfCtdpBC8TTi2EbvPofkSvXRAdeuims2cXp71NIWuuA8ShY +Ic2wBlX7Jz9TkHCpBB5XJ7k= +-----END CERTIFICATE----- + +# Issuer: CN=SSL.com Root Certification Authority ECC O=SSL Corporation +# Subject: CN=SSL.com Root Certification Authority ECC O=SSL Corporation +# Label: "SSL.com Root Certification Authority ECC" +# Serial: 8495723813297216424 +# MD5 Fingerprint: 2e:da:e4:39:7f:9c:8f:37:d1:70:9f:26:17:51:3a:8e +# SHA1 Fingerprint: c3:19:7c:39:24:e6:54:af:1b:c4:ab:20:95:7a:e2:c3:0e:13:02:6a +# SHA256 Fingerprint: 34:17:bb:06:cc:60:07:da:1b:96:1c:92:0b:8a:b4:ce:3f:ad:82:0e:4a:a3:0b:9a:cb:c4:a7:4e:bd:ce:bc:65 +-----BEGIN CERTIFICATE----- +MIICjTCCAhSgAwIBAgIIdebfy8FoW6gwCgYIKoZIzj0EAwIwfDELMAkGA1UEBhMC +VVMxDjAMBgNVBAgMBVRleGFzMRAwDgYDVQQHDAdIb3VzdG9uMRgwFgYDVQQKDA9T +U0wgQ29ycG9yYXRpb24xMTAvBgNVBAMMKFNTTC5jb20gUm9vdCBDZXJ0aWZpY2F0 +aW9uIEF1dGhvcml0eSBFQ0MwHhcNMTYwMjEyMTgxNDAzWhcNNDEwMjEyMTgxNDAz +WjB8MQswCQYDVQQGEwJVUzEOMAwGA1UECAwFVGV4YXMxEDAOBgNVBAcMB0hvdXN0 +b24xGDAWBgNVBAoMD1NTTCBDb3Jwb3JhdGlvbjExMC8GA1UEAwwoU1NMLmNvbSBS +b290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IEVDQzB2MBAGByqGSM49AgEGBSuB +BAAiA2IABEVuqVDEpiM2nl8ojRfLliJkP9x6jh3MCLOicSS6jkm5BBtHllirLZXI +7Z4INcgn64mMU1jrYor+8FsPazFSY0E7ic3s7LaNGdM0B9y7xgZ/wkWV7Mt/qCPg +CemB+vNH06NjMGEwHQYDVR0OBBYEFILRhXMw5zUE044CkvvlpNHEIejNMA8GA1Ud +EwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUgtGFczDnNQTTjgKS++Wk0cQh6M0wDgYD +VR0PAQH/BAQDAgGGMAoGCCqGSM49BAMCA2cAMGQCMG/n61kRpGDPYbCWe+0F+S8T +kdzt5fxQaxFGRrMcIQBiu77D5+jNB5n5DQtdcj7EqgIwH7y6C+IwJPt8bYBVCpk+ +gA0z5Wajs6O7pdWLjwkspl1+4vAHCGht0nxpbl/f5Wpl +-----END CERTIFICATE----- + +# Issuer: CN=SSL.com EV Root Certification Authority RSA R2 O=SSL Corporation +# Subject: CN=SSL.com EV Root Certification Authority RSA R2 O=SSL Corporation +# Label: "SSL.com EV Root Certification Authority RSA R2" +# Serial: 6248227494352943350 +# MD5 Fingerprint: e1:1e:31:58:1a:ae:54:53:02:f6:17:6a:11:7b:4d:95 +# SHA1 Fingerprint: 74:3a:f0:52:9b:d0:32:a0:f4:4a:83:cd:d4:ba:a9:7b:7c:2e:c4:9a +# SHA256 Fingerprint: 2e:7b:f1:6c:c2:24:85:a7:bb:e2:aa:86:96:75:07:61:b0:ae:39:be:3b:2f:e9:d0:cc:6d:4e:f7:34:91:42:5c +-----BEGIN CERTIFICATE----- +MIIF6zCCA9OgAwIBAgIIVrYpzTS8ePYwDQYJKoZIhvcNAQELBQAwgYIxCzAJBgNV +BAYTAlVTMQ4wDAYDVQQIDAVUZXhhczEQMA4GA1UEBwwHSG91c3RvbjEYMBYGA1UE +CgwPU1NMIENvcnBvcmF0aW9uMTcwNQYDVQQDDC5TU0wuY29tIEVWIFJvb3QgQ2Vy +dGlmaWNhdGlvbiBBdXRob3JpdHkgUlNBIFIyMB4XDTE3MDUzMTE4MTQzN1oXDTQy +MDUzMDE4MTQzN1owgYIxCzAJBgNVBAYTAlVTMQ4wDAYDVQQIDAVUZXhhczEQMA4G +A1UEBwwHSG91c3RvbjEYMBYGA1UECgwPU1NMIENvcnBvcmF0aW9uMTcwNQYDVQQD +DC5TU0wuY29tIEVWIFJvb3QgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgUlNBIFIy +MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAjzZlQOHWTcDXtOlG2mvq +M0fNTPl9fb69LT3w23jhhqXZuglXaO1XPqDQCEGD5yhBJB/jchXQARr7XnAjssuf +OePPxU7Gkm0mxnu7s9onnQqG6YE3Bf7wcXHswxzpY6IXFJ3vG2fThVUCAtZJycxa +4bH3bzKfydQ7iEGonL3Lq9ttewkfokxykNorCPzPPFTOZw+oz12WGQvE43LrrdF9 +HSfvkusQv1vrO6/PgN3B0pYEW3p+pKk8OHakYo6gOV7qd89dAFmPZiw+B6KjBSYR +aZfqhbcPlgtLyEDhULouisv3D5oi53+aNxPN8k0TayHRwMwi8qFG9kRpnMphNQcA +b9ZhCBHqurj26bNg5U257J8UZslXWNvNh2n4ioYSA0e/ZhN2rHd9NCSFg83XqpyQ +Gp8hLH94t2S42Oim9HizVcuE0jLEeK6jj2HdzghTreyI/BXkmg3mnxp3zkyPuBQV +PWKchjgGAGYS5Fl2WlPAApiiECtoRHuOec4zSnaqW4EWG7WK2NAAe15itAnWhmMO +pgWVSbooi4iTsjQc2KRVbrcc0N6ZVTsj9CLg+SlmJuwgUHfbSguPvuUCYHBBXtSu +UDkiFCbLsjtzdFVHB3mBOagwE0TlBIqulhMlQg+5U8Sb/M3kHN48+qvWBkofZ6aY +MBzdLNvcGJVXZsb/XItW9XcCAwEAAaNjMGEwDwYDVR0TAQH/BAUwAwEB/zAfBgNV +HSMEGDAWgBT5YLvU49U09rj1BoAlp3PbRmmonjAdBgNVHQ4EFgQU+WC71OPVNPa4 +9QaAJadz20ZpqJ4wDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3DQEBCwUAA4ICAQBW +s47LCp1Jjr+kxJG7ZhcFUZh1++VQLHqe8RT6q9OKPv+RKY9ji9i0qVQBDb6Thi/5 +Sm3HXvVX+cpVHBK+Rw82xd9qt9t1wkclf7nxY/hoLVUE0fKNsKTPvDxeH3jnpaAg +cLAExbf3cqfeIg29MyVGjGSSJuM+LmOW2puMPfgYCdcDzH2GguDKBAdRUNf/ktUM +79qGn5nX67evaOI5JpS6aLe/g9Pqemc9YmeuJeVy6OLk7K4S9ksrPJ/psEDzOFSz +/bdoyNrGj1E8svuR3Bznm53htw1yj+KkxKl4+esUrMZDBcJlOSgYAsOCsp0FvmXt +ll9ldDz7CTUue5wT/RsPXcdtgTpWD8w74a8CLyKsRspGPKAcTNZEtF4uXBVmCeEm +Kf7GUmG6sXP/wwyc5WxqlD8UykAWlYTzWamsX0xhk23RO8yilQwipmdnRC652dKK +QbNmC1r7fSOl8hqw/96bg5Qu0T/fkreRrwU7ZcegbLHNYhLDkBvjJc40vG93drEQ +w/cFGsDWr3RiSBd3kmmQYRzelYB0VI8YHMPzA9C/pEN1hlMYegouCRw2n5H9gooi +S9EOUCXdywMMF8mDAAhONU2Ki+3wApRmLER/y5UnlhetCTCstnEXbosX9hwJ1C07 +mKVx01QT2WDz9UtmT/rx7iASjbSsV7FFY6GsdqnC+w== +-----END CERTIFICATE----- + +# Issuer: CN=SSL.com EV Root Certification Authority ECC O=SSL Corporation +# Subject: CN=SSL.com EV Root Certification Authority ECC O=SSL Corporation +# Label: "SSL.com EV Root Certification Authority ECC" +# Serial: 3182246526754555285 +# MD5 Fingerprint: 59:53:22:65:83:42:01:54:c0:ce:42:b9:5a:7c:f2:90 +# SHA1 Fingerprint: 4c:dd:51:a3:d1:f5:20:32:14:b0:c6:c5:32:23:03:91:c7:46:42:6d +# SHA256 Fingerprint: 22:a2:c1:f7:bd:ed:70:4c:c1:e7:01:b5:f4:08:c3:10:88:0f:e9:56:b5:de:2a:4a:44:f9:9c:87:3a:25:a7:c8 +-----BEGIN CERTIFICATE----- +MIIClDCCAhqgAwIBAgIILCmcWxbtBZUwCgYIKoZIzj0EAwIwfzELMAkGA1UEBhMC +VVMxDjAMBgNVBAgMBVRleGFzMRAwDgYDVQQHDAdIb3VzdG9uMRgwFgYDVQQKDA9T +U0wgQ29ycG9yYXRpb24xNDAyBgNVBAMMK1NTTC5jb20gRVYgUm9vdCBDZXJ0aWZp +Y2F0aW9uIEF1dGhvcml0eSBFQ0MwHhcNMTYwMjEyMTgxNTIzWhcNNDEwMjEyMTgx +NTIzWjB/MQswCQYDVQQGEwJVUzEOMAwGA1UECAwFVGV4YXMxEDAOBgNVBAcMB0hv +dXN0b24xGDAWBgNVBAoMD1NTTCBDb3Jwb3JhdGlvbjE0MDIGA1UEAwwrU1NMLmNv +bSBFViBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IEVDQzB2MBAGByqGSM49 +AgEGBSuBBAAiA2IABKoSR5CYG/vvw0AHgyBO8TCCogbR8pKGYfL2IWjKAMTH6kMA +VIbc/R/fALhBYlzccBYy3h+Z1MzFB8gIH2EWB1E9fVwHU+M1OIzfzZ/ZLg1Kthku +WnBaBu2+8KGwytAJKaNjMGEwHQYDVR0OBBYEFFvKXuXe0oGqzagtZFG22XKbl+ZP +MA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUW8pe5d7SgarNqC1kUbbZcpuX +5k8wDgYDVR0PAQH/BAQDAgGGMAoGCCqGSM49BAMCA2gAMGUCMQCK5kCJN+vp1RPZ +ytRrJPOwPYdGWBrssd9v+1a6cGvHOMzosYxPD/fxZ3YOg9AeUY8CMD32IygmTMZg +h5Mmm7I1HrrW9zzRHM76JTymGoEVW/MSD2zuZYrJh6j5B+BimoxcSg== +-----END CERTIFICATE----- + +# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R6 +# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R6 +# Label: "GlobalSign Root CA - R6" +# Serial: 1417766617973444989252670301619537 +# MD5 Fingerprint: 4f:dd:07:e4:d4:22:64:39:1e:0c:37:42:ea:d1:c6:ae +# SHA1 Fingerprint: 80:94:64:0e:b5:a7:a1:ca:11:9c:1f:dd:d5:9f:81:02:63:a7:fb:d1 +# SHA256 Fingerprint: 2c:ab:ea:fe:37:d0:6c:a2:2a:ba:73:91:c0:03:3d:25:98:29:52:c4:53:64:73:49:76:3a:3a:b5:ad:6c:cf:69 +-----BEGIN CERTIFICATE----- +MIIFgzCCA2ugAwIBAgIORea7A4Mzw4VlSOb/RVEwDQYJKoZIhvcNAQEMBQAwTDEg +MB4GA1UECxMXR2xvYmFsU2lnbiBSb290IENBIC0gUjYxEzARBgNVBAoTCkdsb2Jh +bFNpZ24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMTQxMjEwMDAwMDAwWhcNMzQx +MjEwMDAwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxTaWduIFJvb3QgQ0EgLSBSNjET +MBEGA1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2lnbjCCAiIwDQYJ +KoZIhvcNAQEBBQADggIPADCCAgoCggIBAJUH6HPKZvnsFMp7PPcNCPG0RQssgrRI +xutbPK6DuEGSMxSkb3/pKszGsIhrxbaJ0cay/xTOURQh7ErdG1rG1ofuTToVBu1k +ZguSgMpE3nOUTvOniX9PeGMIyBJQbUJmL025eShNUhqKGoC3GYEOfsSKvGRMIRxD +aNc9PIrFsmbVkJq3MQbFvuJtMgamHvm566qjuL++gmNQ0PAYid/kD3n16qIfKtJw +LnvnvJO7bVPiSHyMEAc4/2ayd2F+4OqMPKq0pPbzlUoSB239jLKJz9CgYXfIWHSw +1CM69106yqLbnQneXUQtkPGBzVeS+n68UARjNN9rkxi+azayOeSsJDa38O+2HBNX +k7besvjihbdzorg1qkXy4J02oW9UivFyVm4uiMVRQkQVlO6jxTiWm05OWgtH8wY2 +SXcwvHE35absIQh1/OZhFj931dmRl4QKbNQCTXTAFO39OfuD8l4UoQSwC+n+7o/h +bguyCLNhZglqsQY6ZZZZwPA1/cnaKI0aEYdwgQqomnUdnjqGBQCe24DWJfncBZ4n +WUx2OVvq+aWh2IMP0f/fMBH5hc8zSPXKbWQULHpYT9NLCEnFlWQaYw55PfWzjMpY +rZxCRXluDocZXFSxZba/jJvcE+kNb7gu3GduyYsRtYQUigAZcIN5kZeR1Bonvzce +MgfYFGM8KEyvAgMBAAGjYzBhMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTAD +AQH/MB0GA1UdDgQWBBSubAWjkxPioufi1xzWx/B/yGdToDAfBgNVHSMEGDAWgBSu +bAWjkxPioufi1xzWx/B/yGdToDANBgkqhkiG9w0BAQwFAAOCAgEAgyXt6NH9lVLN +nsAEoJFp5lzQhN7craJP6Ed41mWYqVuoPId8AorRbrcWc+ZfwFSY1XS+wc3iEZGt +Ixg93eFyRJa0lV7Ae46ZeBZDE1ZXs6KzO7V33EByrKPrmzU+sQghoefEQzd5Mr61 +55wsTLxDKZmOMNOsIeDjHfrYBzN2VAAiKrlNIC5waNrlU/yDXNOd8v9EDERm8tLj +vUYAGm0CuiVdjaExUd1URhxN25mW7xocBFymFe944Hn+Xds+qkxV/ZoVqW/hpvvf +cDDpw+5CRu3CkwWJ+n1jez/QcYF8AOiYrg54NMMl+68KnyBr3TsTjxKM4kEaSHpz +oHdpx7Zcf4LIHv5YGygrqGytXm3ABdJ7t+uA/iU3/gKbaKxCXcPu9czc8FB10jZp +nOZ7BN9uBmm23goJSFmH63sUYHpkqmlD75HHTOwY3WzvUy2MmeFe8nI+z1TIvWfs +pA9MRf/TuTAjB0yPEL+GltmZWrSZVxykzLsViVO6LAUP5MSeGbEYNNVMnbrt9x+v +JJUEeKgDu+6B5dpffItKoZB0JaezPkvILFa9x8jvOOJckvB595yEunQtYQEgfn7R +8k8HWV+LLUNS60YMlOH1Zkd5d9VUWx+tJDfLRVpOoERIyNiwmcUVhAn21klJwGW4 +5hpxbqCo8YLoRT5s1gLXCmeDBVrJpBA= +-----END CERTIFICATE----- + +# Issuer: CN=OISTE WISeKey Global Root GC CA O=WISeKey OU=OISTE Foundation Endorsed +# Subject: CN=OISTE WISeKey Global Root GC CA O=WISeKey OU=OISTE Foundation Endorsed +# Label: "OISTE WISeKey Global Root GC CA" +# Serial: 44084345621038548146064804565436152554 +# MD5 Fingerprint: a9:d6:b9:2d:2f:93:64:f8:a5:69:ca:91:e9:68:07:23 +# SHA1 Fingerprint: e0:11:84:5e:34:de:be:88:81:b9:9c:f6:16:26:d1:96:1f:c3:b9:31 +# SHA256 Fingerprint: 85:60:f9:1c:36:24:da:ba:95:70:b5:fe:a0:db:e3:6f:f1:1a:83:23:be:94:86:85:4f:b3:f3:4a:55:71:19:8d +-----BEGIN CERTIFICATE----- +MIICaTCCAe+gAwIBAgIQISpWDK7aDKtARb8roi066jAKBggqhkjOPQQDAzBtMQsw +CQYDVQQGEwJDSDEQMA4GA1UEChMHV0lTZUtleTEiMCAGA1UECxMZT0lTVEUgRm91 +bmRhdGlvbiBFbmRvcnNlZDEoMCYGA1UEAxMfT0lTVEUgV0lTZUtleSBHbG9iYWwg +Um9vdCBHQyBDQTAeFw0xNzA1MDkwOTQ4MzRaFw00MjA1MDkwOTU4MzNaMG0xCzAJ +BgNVBAYTAkNIMRAwDgYDVQQKEwdXSVNlS2V5MSIwIAYDVQQLExlPSVNURSBGb3Vu +ZGF0aW9uIEVuZG9yc2VkMSgwJgYDVQQDEx9PSVNURSBXSVNlS2V5IEdsb2JhbCBS +b290IEdDIENBMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAETOlQwMYPchi82PG6s4ni +eUqjFqdrVCTbUf/q9Akkwwsin8tqJ4KBDdLArzHkdIJuyiXZjHWd8dvQmqJLIX4W +p2OQ0jnUsYd4XxiWD1AbNTcPasbc2RNNpI6QN+a9WzGRo1QwUjAOBgNVHQ8BAf8E +BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUSIcUrOPDnpBgOtfKie7T +rYy0UGYwEAYJKwYBBAGCNxUBBAMCAQAwCgYIKoZIzj0EAwMDaAAwZQIwJsdpW9zV +57LnyAyMjMPdeYwbY9XJUpROTYJKcx6ygISpJcBMWm1JKWB4E+J+SOtkAjEA2zQg +Mgj/mkkCtojeFK9dbJlxjRo/i9fgojaGHAeCOnZT/cKi7e97sIBPWA9LUzm9 +-----END CERTIFICATE----- + +# Issuer: CN=UCA Global G2 Root O=UniTrust +# Subject: CN=UCA Global G2 Root O=UniTrust +# Label: "UCA Global G2 Root" +# Serial: 124779693093741543919145257850076631279 +# MD5 Fingerprint: 80:fe:f0:c4:4a:f0:5c:62:32:9f:1c:ba:78:a9:50:f8 +# SHA1 Fingerprint: 28:f9:78:16:19:7a:ff:18:25:18:aa:44:fe:c1:a0:ce:5c:b6:4c:8a +# SHA256 Fingerprint: 9b:ea:11:c9:76:fe:01:47:64:c1:be:56:a6:f9:14:b5:a5:60:31:7a:bd:99:88:39:33:82:e5:16:1a:a0:49:3c +-----BEGIN CERTIFICATE----- +MIIFRjCCAy6gAwIBAgIQXd+x2lqj7V2+WmUgZQOQ7zANBgkqhkiG9w0BAQsFADA9 +MQswCQYDVQQGEwJDTjERMA8GA1UECgwIVW5pVHJ1c3QxGzAZBgNVBAMMElVDQSBH +bG9iYWwgRzIgUm9vdDAeFw0xNjAzMTEwMDAwMDBaFw00MDEyMzEwMDAwMDBaMD0x +CzAJBgNVBAYTAkNOMREwDwYDVQQKDAhVbmlUcnVzdDEbMBkGA1UEAwwSVUNBIEds +b2JhbCBHMiBSb290MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAxeYr +b3zvJgUno4Ek2m/LAfmZmqkywiKHYUGRO8vDaBsGxUypK8FnFyIdK+35KYmToni9 +kmugow2ifsqTs6bRjDXVdfkX9s9FxeV67HeToI8jrg4aA3++1NDtLnurRiNb/yzm +VHqUwCoV8MmNsHo7JOHXaOIxPAYzRrZUEaalLyJUKlgNAQLx+hVRZ2zA+te2G3/R +VogvGjqNO7uCEeBHANBSh6v7hn4PJGtAnTRnvI3HLYZveT6OqTwXS3+wmeOwcWDc +C/Vkw85DvG1xudLeJ1uK6NjGruFZfc8oLTW4lVYa8bJYS7cSN8h8s+1LgOGN+jIj +tm+3SJUIsUROhYw6AlQgL9+/V087OpAh18EmNVQg7Mc/R+zvWr9LesGtOxdQXGLY +D0tK3Cv6brxzks3sx1DoQZbXqX5t2Okdj4q1uViSukqSKwxW/YDrCPBeKW4bHAyv +j5OJrdu9o54hyokZ7N+1wxrrFv54NkzWbtA+FxyQF2smuvt6L78RHBgOLXMDj6Dl +NaBa4kx1HXHhOThTeEDMg5PXCp6dW4+K5OXgSORIskfNTip1KnvyIvbJvgmRlld6 +iIis7nCs+dwp4wwcOxJORNanTrAmyPPZGpeRaOrvjUYG0lZFWJo8DA+DuAUlwznP +O6Q0ibd5Ei9Hxeepl2n8pndntd978XplFeRhVmUCAwEAAaNCMEAwDgYDVR0PAQH/ +BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFIHEjMz15DD/pQwIX4wV +ZyF0Ad/fMA0GCSqGSIb3DQEBCwUAA4ICAQATZSL1jiutROTL/7lo5sOASD0Ee/oj +L3rtNtqyzm325p7lX1iPyzcyochltq44PTUbPrw7tgTQvPlJ9Zv3hcU2tsu8+Mg5 +1eRfB70VVJd0ysrtT7q6ZHafgbiERUlMjW+i67HM0cOU2kTC5uLqGOiiHycFutfl +1qnN3e92mI0ADs0b+gO3joBYDic/UvuUospeZcnWhNq5NXHzJsBPd+aBJ9J3O5oU +b3n09tDh05S60FdRvScFDcH9yBIw7m+NESsIndTUv4BFFJqIRNow6rSn4+7vW4LV +PtateJLbXDzz2K36uGt/xDYotgIVilQsnLAXc47QN6MUPJiVAAwpBVueSUmxX8fj +y88nZY41F7dXyDDZQVu5FLbowg+UMaeUmMxq67XhJ/UQqAHojhJi6IjMtX9Gl8Cb +EGY4GjZGXyJoPd/JxhMnq1MGrKI8hgZlb7F+sSlEmqO6SWkoaY/X5V+tBIZkbxqg +DMUIYs6Ao9Dz7GjevjPHF1t/gMRMTLGmhIrDO7gJzRSBuhjjVFc2/tsvfEehOjPI ++Vg7RE+xygKJBJYoaMVLuCaJu9YzL1DV/pqJuhgyklTGW+Cd+V7lDSKb9triyCGy +YiGqhkCyLmTTX8jjfhFnRR8F/uOi77Oos/N9j/gMHyIfLXC0uAE0djAA5SN4p1bX +UB+K+wb1whnw0A== +-----END CERTIFICATE----- + +# Issuer: CN=UCA Extended Validation Root O=UniTrust +# Subject: CN=UCA Extended Validation Root O=UniTrust +# Label: "UCA Extended Validation Root" +# Serial: 106100277556486529736699587978573607008 +# MD5 Fingerprint: a1:f3:5f:43:c6:34:9b:da:bf:8c:7e:05:53:ad:96:e2 +# SHA1 Fingerprint: a3:a1:b0:6f:24:61:23:4a:e3:36:a5:c2:37:fc:a6:ff:dd:f0:d7:3a +# SHA256 Fingerprint: d4:3a:f9:b3:54:73:75:5c:96:84:fc:06:d7:d8:cb:70:ee:5c:28:e7:73:fb:29:4e:b4:1e:e7:17:22:92:4d:24 +-----BEGIN CERTIFICATE----- +MIIFWjCCA0KgAwIBAgIQT9Irj/VkyDOeTzRYZiNwYDANBgkqhkiG9w0BAQsFADBH +MQswCQYDVQQGEwJDTjERMA8GA1UECgwIVW5pVHJ1c3QxJTAjBgNVBAMMHFVDQSBF +eHRlbmRlZCBWYWxpZGF0aW9uIFJvb3QwHhcNMTUwMzEzMDAwMDAwWhcNMzgxMjMx +MDAwMDAwWjBHMQswCQYDVQQGEwJDTjERMA8GA1UECgwIVW5pVHJ1c3QxJTAjBgNV +BAMMHFVDQSBFeHRlbmRlZCBWYWxpZGF0aW9uIFJvb3QwggIiMA0GCSqGSIb3DQEB +AQUAA4ICDwAwggIKAoICAQCpCQcoEwKwmeBkqh5DFnpzsZGgdT6o+uM4AHrsiWog +D4vFsJszA1qGxliG1cGFu0/GnEBNyr7uaZa4rYEwmnySBesFK5pI0Lh2PpbIILvS +sPGP2KxFRv+qZ2C0d35qHzwaUnoEPQc8hQ2E0B92CvdqFN9y4zR8V05WAT558aop +O2z6+I9tTcg1367r3CTueUWnhbYFiN6IXSV8l2RnCdm/WhUFhvMJHuxYMjMR83dk +sHYf5BA1FxvyDrFspCqjc/wJHx4yGVMR59mzLC52LqGj3n5qiAno8geK+LLNEOfi +c0CTuwjRP+H8C5SzJe98ptfRr5//lpr1kXuYC3fUfugH0mK1lTnj8/FtDw5lhIpj +VMWAtuCeS31HJqcBCF3RiJ7XwzJE+oJKCmhUfzhTA8ykADNkUVkLo4KRel7sFsLz +KuZi2irbWWIQJUoqgQtHB0MGcIfS+pMRKXpITeuUx3BNr2fVUbGAIAEBtHoIppB/ +TuDvB0GHr2qlXov7z1CymlSvw4m6WC31MJixNnI5fkkE/SmnTHnkBVfblLkWU41G +sx2VYVdWf6/wFlthWG82UBEL2KwrlRYaDh8IzTY0ZRBiZtWAXxQgXy0MoHgKaNYs +1+lvK9JKBZP8nm9rZ/+I8U6laUpSNwXqxhaN0sSZ0YIrO7o1dfdRUVjzyAfd5LQD +fwIDAQABo0IwQDAdBgNVHQ4EFgQU2XQ65DA9DfcS3H5aBZ8eNJr34RQwDwYDVR0T +AQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAYYwDQYJKoZIhvcNAQELBQADggIBADaN +l8xCFWQpN5smLNb7rhVpLGsaGvdftvkHTFnq88nIua7Mui563MD1sC3AO6+fcAUR +ap8lTwEpcOPlDOHqWnzcSbvBHiqB9RZLcpHIojG5qtr8nR/zXUACE/xOHAbKsxSQ +VBcZEhrxH9cMaVr2cXj0lH2RC47skFSOvG+hTKv8dGT9cZr4QQehzZHkPJrgmzI5 +c6sq1WnIeJEmMX3ixzDx/BR4dxIOE/TdFpS/S2d7cFOFyrC78zhNLJA5wA3CXWvp +4uXViI3WLL+rG761KIcSF3Ru/H38j9CHJrAb+7lsq+KePRXBOy5nAliRn+/4Qh8s +t2j1da3Ptfb/EX3C8CSlrdP6oDyp+l3cpaDvRKS+1ujl5BOWF3sGPjLtx7dCvHaj +2GU4Kzg1USEODm8uNBNA4StnDG1KQTAYI1oyVZnJF+A83vbsea0rWBmirSwiGpWO +vpaQXUJXxPkUAzUrHC1RVwinOt4/5Mi0A3PCwSaAuwtCH60NryZy2sy+s6ODWA2C +xR9GUeOcGMyNm43sSet1UNWMKFnKdDTajAshqx7qG+XH/RU+wBeq+yNuJkbL+vmx +cmtpzyKEC2IPrNkZAJSidjzULZrtBJ4tBmIQN1IchXIbJ+XMxjHsN+xjWZsLHXbM +fjKaiJUINlK73nZfdklJrX+9ZSCyycErdhh2n1ax +-----END CERTIFICATE----- + +# Issuer: CN=Certigna Root CA O=Dhimyotis OU=0002 48146308100036 +# Subject: CN=Certigna Root CA O=Dhimyotis OU=0002 48146308100036 +# Label: "Certigna Root CA" +# Serial: 269714418870597844693661054334862075617 +# MD5 Fingerprint: 0e:5c:30:62:27:eb:5b:bc:d7:ae:62:ba:e9:d5:df:77 +# SHA1 Fingerprint: 2d:0d:52:14:ff:9e:ad:99:24:01:74:20:47:6e:6c:85:27:27:f5:43 +# SHA256 Fingerprint: d4:8d:3d:23:ee:db:50:a4:59:e5:51:97:60:1c:27:77:4b:9d:7b:18:c9:4d:5a:05:95:11:a1:02:50:b9:31:68 +-----BEGIN CERTIFICATE----- +MIIGWzCCBEOgAwIBAgIRAMrpG4nxVQMNo+ZBbcTjpuEwDQYJKoZIhvcNAQELBQAw +WjELMAkGA1UEBhMCRlIxEjAQBgNVBAoMCURoaW15b3RpczEcMBoGA1UECwwTMDAw +MiA0ODE0NjMwODEwMDAzNjEZMBcGA1UEAwwQQ2VydGlnbmEgUm9vdCBDQTAeFw0x +MzEwMDEwODMyMjdaFw0zMzEwMDEwODMyMjdaMFoxCzAJBgNVBAYTAkZSMRIwEAYD +VQQKDAlEaGlteW90aXMxHDAaBgNVBAsMEzAwMDIgNDgxNDYzMDgxMDAwMzYxGTAX +BgNVBAMMEENlcnRpZ25hIFJvb3QgQ0EwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAw +ggIKAoICAQDNGDllGlmx6mQWDoyUJJV8g9PFOSbcDO8WV43X2KyjQn+Cyu3NW9sO +ty3tRQgXstmzy9YXUnIo245Onoq2C/mehJpNdt4iKVzSs9IGPjA5qXSjklYcoW9M +CiBtnyN6tMbaLOQdLNyzKNAT8kxOAkmhVECe5uUFoC2EyP+YbNDrihqECB63aCPu +I9Vwzm1RaRDuoXrC0SIxwoKF0vJVdlB8JXrJhFwLrN1CTivngqIkicuQstDuI7pm +TLtipPlTWmR7fJj6o0ieD5Wupxj0auwuA0Wv8HT4Ks16XdG+RCYyKfHx9WzMfgIh +C59vpD++nVPiz32pLHxYGpfhPTc3GGYo0kDFUYqMwy3OU4gkWGQwFsWq4NYKpkDf +ePb1BHxpE4S80dGnBs8B92jAqFe7OmGtBIyT46388NtEbVncSVmurJqZNjBBe3Yz +IoejwpKGbvlw7q6Hh5UbxHq9MfPU0uWZ/75I7HX1eBYdpnDBfzwboZL7z8g81sWT +Co/1VTp2lc5ZmIoJlXcymoO6LAQ6l73UL77XbJuiyn1tJslV1c/DeVIICZkHJC1k +JWumIWmbat10TWuXekG9qxf5kBdIjzb5LdXF2+6qhUVB+s06RbFo5jZMm5BX7CO5 +hwjCxAnxl4YqKE3idMDaxIzb3+KhF1nOJFl0Mdp//TBt2dzhauH8XwIDAQABo4IB +GjCCARYwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYE +FBiHVuBud+4kNTxOc5of1uHieX4rMB8GA1UdIwQYMBaAFBiHVuBud+4kNTxOc5of +1uHieX4rMEQGA1UdIAQ9MDswOQYEVR0gADAxMC8GCCsGAQUFBwIBFiNodHRwczov +L3d3d3cuY2VydGlnbmEuZnIvYXV0b3JpdGVzLzBtBgNVHR8EZjBkMC+gLaArhilo +dHRwOi8vY3JsLmNlcnRpZ25hLmZyL2NlcnRpZ25hcm9vdGNhLmNybDAxoC+gLYYr +aHR0cDovL2NybC5kaGlteW90aXMuY29tL2NlcnRpZ25hcm9vdGNhLmNybDANBgkq +hkiG9w0BAQsFAAOCAgEAlLieT/DjlQgi581oQfccVdV8AOItOoldaDgvUSILSo3L +6btdPrtcPbEo/uRTVRPPoZAbAh1fZkYJMyjhDSSXcNMQH+pkV5a7XdrnxIxPTGRG +HVyH41neQtGbqH6mid2PHMkwgu07nM3A6RngatgCdTer9zQoKJHyBApPNeNgJgH6 +0BGM+RFq7q89w1DTj18zeTyGqHNFkIwgtnJzFyO+B2XleJINugHA64wcZr+shncB +lA2c5uk5jR+mUYyZDDl34bSb+hxnV29qao6pK0xXeXpXIs/NX2NGjVxZOob4Mkdi +o2cNGJHc+6Zr9UhhcyNZjgKnvETq9Emd8VRY+WCv2hikLyhF3HqgiIZd8zvn/yk1 +gPxkQ5Tm4xxvvq0OKmOZK8l+hfZx6AYDlf7ej0gcWtSS6Cvu5zHbugRqh5jnxV/v +faci9wHYTfmJ0A6aBVmknpjZbyvKcL5kwlWj9Omvw5Ip3IgWJJk8jSaYtlu3zM63 +Nwf9JtmYhST/WSMDmu2dnajkXjjO11INb9I/bbEFa0nOipFGc/T2L/Coc3cOZayh +jWZSaX5LaAzHHjcng6WMxwLkFM1JAbBzs/3GkDpv0mztO+7skb6iQ12LAEpmJURw +3kAP+HwV96LOPNdeE4yBFxgX0b3xdxA61GU5wSesVywlVP+i2k+KYTlerj1KjL0= +-----END CERTIFICATE----- + +# Issuer: CN=emSign Root CA - G1 O=eMudhra Technologies Limited OU=emSign PKI +# Subject: CN=emSign Root CA - G1 O=eMudhra Technologies Limited OU=emSign PKI +# Label: "emSign Root CA - G1" +# Serial: 235931866688319308814040 +# MD5 Fingerprint: 9c:42:84:57:dd:cb:0b:a7:2e:95:ad:b6:f3:da:bc:ac +# SHA1 Fingerprint: 8a:c7:ad:8f:73:ac:4e:c1:b5:75:4d:a5:40:f4:fc:cf:7c:b5:8e:8c +# SHA256 Fingerprint: 40:f6:af:03:46:a9:9a:a1:cd:1d:55:5a:4e:9c:ce:62:c7:f9:63:46:03:ee:40:66:15:83:3d:c8:c8:d0:03:67 +-----BEGIN CERTIFICATE----- +MIIDlDCCAnygAwIBAgIKMfXkYgxsWO3W2DANBgkqhkiG9w0BAQsFADBnMQswCQYD +VQQGEwJJTjETMBEGA1UECxMKZW1TaWduIFBLSTElMCMGA1UEChMcZU11ZGhyYSBU +ZWNobm9sb2dpZXMgTGltaXRlZDEcMBoGA1UEAxMTZW1TaWduIFJvb3QgQ0EgLSBH +MTAeFw0xODAyMTgxODMwMDBaFw00MzAyMTgxODMwMDBaMGcxCzAJBgNVBAYTAklO +MRMwEQYDVQQLEwplbVNpZ24gUEtJMSUwIwYDVQQKExxlTXVkaHJhIFRlY2hub2xv +Z2llcyBMaW1pdGVkMRwwGgYDVQQDExNlbVNpZ24gUm9vdCBDQSAtIEcxMIIBIjAN +BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAk0u76WaK7p1b1TST0Bsew+eeuGQz +f2N4aLTNLnF115sgxk0pvLZoYIr3IZpWNVrzdr3YzZr/k1ZLpVkGoZM0Kd0WNHVO +8oG0x5ZOrRkVUkr+PHB1cM2vK6sVmjM8qrOLqs1D/fXqcP/tzxE7lM5OMhbTI0Aq +d7OvPAEsbO2ZLIvZTmmYsvePQbAyeGHWDV/D+qJAkh1cF+ZwPjXnorfCYuKrpDhM +tTk1b+oDafo6VGiFbdbyL0NVHpENDtjVaqSW0RM8LHhQ6DqS0hdW5TUaQBw+jSzt +Od9C4INBdN+jzcKGYEho42kLVACL5HZpIQ15TjQIXhTCzLG3rdd8cIrHhQIDAQAB +o0IwQDAdBgNVHQ4EFgQU++8Nhp6w492pufEhF38+/PB3KxowDgYDVR0PAQH/BAQD +AgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAFn/8oz1h31x +PaOfG1vR2vjTnGs2vZupYeveFix0PZ7mddrXuqe8QhfnPZHr5X3dPpzxz5KsbEjM +wiI/aTvFthUvozXGaCocV685743QNcMYDHsAVhzNixl03r4PEuDQqqE/AjSxcM6d +GNYIAwlG7mDgfrbESQRRfXBgvKqy/3lyeqYdPV8q+Mri/Tm3R7nrft8EI6/6nAYH +6ftjk4BAtcZsCjEozgyfz7MjNYBBjWzEN3uBL4ChQEKF6dk4jeihU80Bv2noWgby +RQuQ+q7hv53yrlc8pa6yVvSLZUDp/TGBLPQ5Cdjua6e0ph0VpZj3AYHYhX3zUVxx +iN66zB+Afko= +-----END CERTIFICATE----- + +# Issuer: CN=emSign ECC Root CA - G3 O=eMudhra Technologies Limited OU=emSign PKI +# Subject: CN=emSign ECC Root CA - G3 O=eMudhra Technologies Limited OU=emSign PKI +# Label: "emSign ECC Root CA - G3" +# Serial: 287880440101571086945156 +# MD5 Fingerprint: ce:0b:72:d1:9f:88:8e:d0:50:03:e8:e3:b8:8b:67:40 +# SHA1 Fingerprint: 30:43:fa:4f:f2:57:dc:a0:c3:80:ee:2e:58:ea:78:b2:3f:e6:bb:c1 +# SHA256 Fingerprint: 86:a1:ec:ba:08:9c:4a:8d:3b:be:27:34:c6:12:ba:34:1d:81:3e:04:3c:f9:e8:a8:62:cd:5c:57:a3:6b:be:6b +-----BEGIN CERTIFICATE----- +MIICTjCCAdOgAwIBAgIKPPYHqWhwDtqLhDAKBggqhkjOPQQDAzBrMQswCQYDVQQG +EwJJTjETMBEGA1UECxMKZW1TaWduIFBLSTElMCMGA1UEChMcZU11ZGhyYSBUZWNo +bm9sb2dpZXMgTGltaXRlZDEgMB4GA1UEAxMXZW1TaWduIEVDQyBSb290IENBIC0g +RzMwHhcNMTgwMjE4MTgzMDAwWhcNNDMwMjE4MTgzMDAwWjBrMQswCQYDVQQGEwJJ +TjETMBEGA1UECxMKZW1TaWduIFBLSTElMCMGA1UEChMcZU11ZGhyYSBUZWNobm9s +b2dpZXMgTGltaXRlZDEgMB4GA1UEAxMXZW1TaWduIEVDQyBSb290IENBIC0gRzMw +djAQBgcqhkjOPQIBBgUrgQQAIgNiAAQjpQy4LRL1KPOxst3iAhKAnjlfSU2fySU0 +WXTsuwYc58Byr+iuL+FBVIcUqEqy6HyC5ltqtdyzdc6LBtCGI79G1Y4PPwT01xyS +fvalY8L1X44uT6EYGQIrMgqCZH0Wk9GjQjBAMB0GA1UdDgQWBBR8XQKEE9TMipuB +zhccLikenEhjQjAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAKBggq +hkjOPQQDAwNpADBmAjEAvvNhzwIQHWSVB7gYboiFBS+DCBeQyh+KTOgNG3qxrdWB +CUfvO6wIBHxcmbHtRwfSAjEAnbpV/KlK6O3t5nYBQnvI+GDZjVGLVTv7jHvrZQnD ++JbNR6iC8hZVdyR+EhCVBCyj +-----END CERTIFICATE----- + +# Issuer: CN=emSign Root CA - C1 O=eMudhra Inc OU=emSign PKI +# Subject: CN=emSign Root CA - C1 O=eMudhra Inc OU=emSign PKI +# Label: "emSign Root CA - C1" +# Serial: 825510296613316004955058 +# MD5 Fingerprint: d8:e3:5d:01:21:fa:78:5a:b0:df:ba:d2:ee:2a:5f:68 +# SHA1 Fingerprint: e7:2e:f1:df:fc:b2:09:28:cf:5d:d4:d5:67:37:b1:51:cb:86:4f:01 +# SHA256 Fingerprint: 12:56:09:aa:30:1d:a0:a2:49:b9:7a:82:39:cb:6a:34:21:6f:44:dc:ac:9f:39:54:b1:42:92:f2:e8:c8:60:8f +-----BEGIN CERTIFICATE----- +MIIDczCCAlugAwIBAgILAK7PALrEzzL4Q7IwDQYJKoZIhvcNAQELBQAwVjELMAkG +A1UEBhMCVVMxEzARBgNVBAsTCmVtU2lnbiBQS0kxFDASBgNVBAoTC2VNdWRocmEg +SW5jMRwwGgYDVQQDExNlbVNpZ24gUm9vdCBDQSAtIEMxMB4XDTE4MDIxODE4MzAw +MFoXDTQzMDIxODE4MzAwMFowVjELMAkGA1UEBhMCVVMxEzARBgNVBAsTCmVtU2ln +biBQS0kxFDASBgNVBAoTC2VNdWRocmEgSW5jMRwwGgYDVQQDExNlbVNpZ24gUm9v +dCBDQSAtIEMxMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAz+upufGZ +BczYKCFK83M0UYRWEPWgTywS4/oTmifQz/l5GnRfHXk5/Fv4cI7gklL35CX5VIPZ +HdPIWoU/Xse2B+4+wM6ar6xWQio5JXDWv7V7Nq2s9nPczdcdioOl+yuQFTdrHCZH +3DspVpNqs8FqOp099cGXOFgFixwR4+S0uF2FHYP+eF8LRWgYSKVGczQ7/g/IdrvH +GPMF0Ybzhe3nudkyrVWIzqa2kbBPrH4VI5b2P/AgNBbeCsbEBEV5f6f9vtKppa+c +xSMq9zwhbL2vj07FOrLzNBL834AaSaTUqZX3noleoomslMuoaJuvimUnzYnu3Yy1 +aylwQ6BpC+S5DwIDAQABo0IwQDAdBgNVHQ4EFgQU/qHgcB4qAzlSWkK+XJGFehiq +TbUwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEL +BQADggEBAMJKVvoVIXsoounlHfv4LcQ5lkFMOycsxGwYFYDGrK9HWS8mC+M2sO87 +/kOXSTKZEhVb3xEp/6tT+LvBeA+snFOvV71ojD1pM/CjoCNjO2RnIkSt1XHLVip4 +kqNPEjE2NuLe/gDEo2APJ62gsIq1NnpSob0n9CAnYuhNlCQT5AoE6TyrLshDCUrG +YQTlSTR+08TI9Q/Aqum6VF7zYytPT1DU/rl7mYw9wC68AivTxEDkigcxHpvOJpkT ++xHqmiIMERnHXhuBUDDIlhJu58tBf5E7oke3VIAb3ADMmpDqw8NQBmIMMMAVSKeo +WXzhriKi4gp6D/piq1JM4fHfyr6DDUI= +-----END CERTIFICATE----- + +# Issuer: CN=emSign ECC Root CA - C3 O=eMudhra Inc OU=emSign PKI +# Subject: CN=emSign ECC Root CA - C3 O=eMudhra Inc OU=emSign PKI +# Label: "emSign ECC Root CA - C3" +# Serial: 582948710642506000014504 +# MD5 Fingerprint: 3e:53:b3:a3:81:ee:d7:10:f8:d3:b0:1d:17:92:f5:d5 +# SHA1 Fingerprint: b6:af:43:c2:9b:81:53:7d:f6:ef:6b:c3:1f:1f:60:15:0c:ee:48:66 +# SHA256 Fingerprint: bc:4d:80:9b:15:18:9d:78:db:3e:1d:8c:f4:f9:72:6a:79:5d:a1:64:3c:a5:f1:35:8e:1d:db:0e:dc:0d:7e:b3 +-----BEGIN CERTIFICATE----- +MIICKzCCAbGgAwIBAgIKe3G2gla4EnycqDAKBggqhkjOPQQDAzBaMQswCQYDVQQG +EwJVUzETMBEGA1UECxMKZW1TaWduIFBLSTEUMBIGA1UEChMLZU11ZGhyYSBJbmMx +IDAeBgNVBAMTF2VtU2lnbiBFQ0MgUm9vdCBDQSAtIEMzMB4XDTE4MDIxODE4MzAw +MFoXDTQzMDIxODE4MzAwMFowWjELMAkGA1UEBhMCVVMxEzARBgNVBAsTCmVtU2ln +biBQS0kxFDASBgNVBAoTC2VNdWRocmEgSW5jMSAwHgYDVQQDExdlbVNpZ24gRUND +IFJvb3QgQ0EgLSBDMzB2MBAGByqGSM49AgEGBSuBBAAiA2IABP2lYa57JhAd6bci +MK4G9IGzsUJxlTm801Ljr6/58pc1kjZGDoeVjbk5Wum739D+yAdBPLtVb4Ojavti +sIGJAnB9SMVK4+kiVCJNk7tCDK93nCOmfddhEc5lx/h//vXyqaNCMEAwHQYDVR0O +BBYEFPtaSNCAIEDyqOkAB2kZd6fmw/TPMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMB +Af8EBTADAQH/MAoGCCqGSM49BAMDA2gAMGUCMQC02C8Cif22TGK6Q04ThHK1rt0c +3ta13FaPWEBaLd4gTCKDypOofu4SQMfWh0/434UCMBwUZOR8loMRnLDRWmFLpg9J +0wD8ofzkpf9/rdcw0Md3f76BB1UwUCAU9Vc4CqgxUQ== +-----END CERTIFICATE----- + +# Issuer: CN=Hongkong Post Root CA 3 O=Hongkong Post +# Subject: CN=Hongkong Post Root CA 3 O=Hongkong Post +# Label: "Hongkong Post Root CA 3" +# Serial: 46170865288971385588281144162979347873371282084 +# MD5 Fingerprint: 11:fc:9f:bd:73:30:02:8a:fd:3f:f3:58:b9:cb:20:f0 +# SHA1 Fingerprint: 58:a2:d0:ec:20:52:81:5b:c1:f3:f8:64:02:24:4e:c2:8e:02:4b:02 +# SHA256 Fingerprint: 5a:2f:c0:3f:0c:83:b0:90:bb:fa:40:60:4b:09:88:44:6c:76:36:18:3d:f9:84:6e:17:10:1a:44:7f:b8:ef:d6 +-----BEGIN CERTIFICATE----- +MIIFzzCCA7egAwIBAgIUCBZfikyl7ADJk0DfxMauI7gcWqQwDQYJKoZIhvcNAQEL +BQAwbzELMAkGA1UEBhMCSEsxEjAQBgNVBAgTCUhvbmcgS29uZzESMBAGA1UEBxMJ +SG9uZyBLb25nMRYwFAYDVQQKEw1Ib25na29uZyBQb3N0MSAwHgYDVQQDExdIb25n +a29uZyBQb3N0IFJvb3QgQ0EgMzAeFw0xNzA2MDMwMjI5NDZaFw00MjA2MDMwMjI5 +NDZaMG8xCzAJBgNVBAYTAkhLMRIwEAYDVQQIEwlIb25nIEtvbmcxEjAQBgNVBAcT +CUhvbmcgS29uZzEWMBQGA1UEChMNSG9uZ2tvbmcgUG9zdDEgMB4GA1UEAxMXSG9u +Z2tvbmcgUG9zdCBSb290IENBIDMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK +AoICAQCziNfqzg8gTr7m1gNt7ln8wlffKWihgw4+aMdoWJwcYEuJQwy51BWy7sFO +dem1p+/l6TWZ5Mwc50tfjTMwIDNT2aa71T4Tjukfh0mtUC1Qyhi+AViiE3CWu4mI +VoBc+L0sPOFMV4i707mV78vH9toxdCim5lSJ9UExyuUmGs2C4HDaOym71QP1mbpV +9WTRYA6ziUm4ii8F0oRFKHyPaFASePwLtVPLwpgchKOesL4jpNrcyCse2m5FHomY +2vkALgbpDDtw1VAliJnLzXNg99X/NWfFobxeq81KuEXryGgeDQ0URhLj0mRiikKY +vLTGCAj4/ahMZJx2Ab0vqWwzD9g/KLg8aQFChn5pwckGyuV6RmXpwtZQQS4/t+Tt +bNe/JgERohYpSms0BpDsE9K2+2p20jzt8NYt3eEV7KObLyzJPivkaTv/ciWxNoZb +x39ri1UbSsUgYT2uy1DhCDq+sI9jQVMwCFk8mB13umOResoQUGC/8Ne8lYePl8X+ +l2oBlKN8W4UdKjk60FSh0Tlxnf0h+bV78OLgAo9uliQlLKAeLKjEiafv7ZkGL7YK +TE/bosw3Gq9HhS2KX8Q0NEwA/RiTZxPRN+ZItIsGxVd7GYYKecsAyVKvQv83j+Gj +Hno9UKtjBucVtT+2RTeUN7F+8kjDf8V1/peNRY8apxpyKBpADwIDAQABo2MwYTAP +BgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAfBgNVHSMEGDAWgBQXnc0e +i9Y5K3DTXNSguB+wAPzFYTAdBgNVHQ4EFgQUF53NHovWOStw01zUoLgfsAD8xWEw +DQYJKoZIhvcNAQELBQADggIBAFbVe27mIgHSQpsY1Q7XZiNc4/6gx5LS6ZStS6LG +7BJ8dNVI0lkUmcDrudHr9EgwW62nV3OZqdPlt9EuWSRY3GguLmLYauRwCy0gUCCk +MpXRAJi70/33MvJJrsZ64Ee+bs7Lo3I6LWldy8joRTnU+kLBEUx3XZL7av9YROXr +gZ6voJmtvqkBZss4HTzfQx/0TW60uhdG/H39h4F5ag0zD/ov+BS5gLNdTaqX4fnk +GMX41TiMJjz98iji7lpJiCzfeT2OnpA8vUFKOt1b9pq0zj8lMH8yfaIDlNDceqFS +3m6TjRgm/VWsvY+b0s+v54Ysyx8Jb6NvqYTUc79NoXQbTiNg8swOqn+knEwlqLJm +Ozj/2ZQw9nKEvmhVEA/GcywWaZMH/rFF7buiVWqw2rVKAiUnhde3t4ZEFolsgCs+ +l6mc1X5VTMbeRRAc6uk7nwNT7u56AQIWeNTowr5GdogTPyK7SBIdUgC0An4hGh6c +JfTzPV4e0hz5sy229zdcxsshTrD3mUcYhcErulWuBurQB7Lcq9CClnXO0lD+mefP +L5/ndtFhKvshuzHQqp9HpLIiyhY6UFfEW0NnxWViA0kB60PZ2Pierc+xYw5F9KBa +LJstxabArahH9CdMOA0uG0k7UvToiIMrVCjU8jVStDKDYmlkDJGcn5fqdBb9HxEG +mpv0 +-----END CERTIFICATE----- + +# Issuer: CN=Microsoft ECC Root Certificate Authority 2017 O=Microsoft Corporation +# Subject: CN=Microsoft ECC Root Certificate Authority 2017 O=Microsoft Corporation +# Label: "Microsoft ECC Root Certificate Authority 2017" +# Serial: 136839042543790627607696632466672567020 +# MD5 Fingerprint: dd:a1:03:e6:4a:93:10:d1:bf:f0:19:42:cb:fe:ed:67 +# SHA1 Fingerprint: 99:9a:64:c3:7f:f4:7d:9f:ab:95:f1:47:69:89:14:60:ee:c4:c3:c5 +# SHA256 Fingerprint: 35:8d:f3:9d:76:4a:f9:e1:b7:66:e9:c9:72:df:35:2e:e1:5c:fa:c2:27:af:6a:d1:d7:0e:8e:4a:6e:dc:ba:02 +-----BEGIN CERTIFICATE----- +MIICWTCCAd+gAwIBAgIQZvI9r4fei7FK6gxXMQHC7DAKBggqhkjOPQQDAzBlMQsw +CQYDVQQGEwJVUzEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMTYwNAYD +VQQDEy1NaWNyb3NvZnQgRUNDIFJvb3QgQ2VydGlmaWNhdGUgQXV0aG9yaXR5IDIw +MTcwHhcNMTkxMjE4MjMwNjQ1WhcNNDIwNzE4MjMxNjA0WjBlMQswCQYDVQQGEwJV +UzEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMTYwNAYDVQQDEy1NaWNy +b3NvZnQgRUNDIFJvb3QgQ2VydGlmaWNhdGUgQXV0aG9yaXR5IDIwMTcwdjAQBgcq +hkjOPQIBBgUrgQQAIgNiAATUvD0CQnVBEyPNgASGAlEvaqiBYgtlzPbKnR5vSmZR +ogPZnZH6thaxjG7efM3beaYvzrvOcS/lpaso7GMEZpn4+vKTEAXhgShC48Zo9OYb +hGBKia/teQ87zvH2RPUBeMCjVDBSMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8E +BTADAQH/MB0GA1UdDgQWBBTIy5lycFIM+Oa+sgRXKSrPQhDtNTAQBgkrBgEEAYI3 +FQEEAwIBADAKBggqhkjOPQQDAwNoADBlAjBY8k3qDPlfXu5gKcs68tvWMoQZP3zV +L8KxzJOuULsJMsbG7X7JNpQS5GiFBqIb0C8CMQCZ6Ra0DvpWSNSkMBaReNtUjGUB +iudQZsIxtzm6uBoiB078a1QWIP8rtedMDE2mT3M= +-----END CERTIFICATE----- + +# Issuer: CN=Microsoft RSA Root Certificate Authority 2017 O=Microsoft Corporation +# Subject: CN=Microsoft RSA Root Certificate Authority 2017 O=Microsoft Corporation +# Label: "Microsoft RSA Root Certificate Authority 2017" +# Serial: 40975477897264996090493496164228220339 +# MD5 Fingerprint: 10:ff:00:ff:cf:c9:f8:c7:7a:c0:ee:35:8e:c9:0f:47 +# SHA1 Fingerprint: 73:a5:e6:4a:3b:ff:83:16:ff:0e:dc:cc:61:8a:90:6e:4e:ae:4d:74 +# SHA256 Fingerprint: c7:41:f7:0f:4b:2a:8d:88:bf:2e:71:c1:41:22:ef:53:ef:10:eb:a0:cf:a5:e6:4c:fa:20:f4:18:85:30:73:e0 +-----BEGIN CERTIFICATE----- +MIIFqDCCA5CgAwIBAgIQHtOXCV/YtLNHcB6qvn9FszANBgkqhkiG9w0BAQwFADBl +MQswCQYDVQQGEwJVUzEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMTYw +NAYDVQQDEy1NaWNyb3NvZnQgUlNBIFJvb3QgQ2VydGlmaWNhdGUgQXV0aG9yaXR5 +IDIwMTcwHhcNMTkxMjE4MjI1MTIyWhcNNDIwNzE4MjMwMDIzWjBlMQswCQYDVQQG +EwJVUzEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMTYwNAYDVQQDEy1N +aWNyb3NvZnQgUlNBIFJvb3QgQ2VydGlmaWNhdGUgQXV0aG9yaXR5IDIwMTcwggIi +MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDKW76UM4wplZEWCpW9R2LBifOZ +Nt9GkMml7Xhqb0eRaPgnZ1AzHaGm++DlQ6OEAlcBXZxIQIJTELy/xztokLaCLeX0 +ZdDMbRnMlfl7rEqUrQ7eS0MdhweSE5CAg2Q1OQT85elss7YfUJQ4ZVBcF0a5toW1 +HLUX6NZFndiyJrDKxHBKrmCk3bPZ7Pw71VdyvD/IybLeS2v4I2wDwAW9lcfNcztm +gGTjGqwu+UcF8ga2m3P1eDNbx6H7JyqhtJqRjJHTOoI+dkC0zVJhUXAoP8XFWvLJ +jEm7FFtNyP9nTUwSlq31/niol4fX/V4ggNyhSyL71Imtus5Hl0dVe49FyGcohJUc +aDDv70ngNXtk55iwlNpNhTs+VcQor1fznhPbRiefHqJeRIOkpcrVE7NLP8TjwuaG +YaRSMLl6IE9vDzhTyzMMEyuP1pq9KsgtsRx9S1HKR9FIJ3Jdh+vVReZIZZ2vUpC6 +W6IYZVcSn2i51BVrlMRpIpj0M+Dt+VGOQVDJNE92kKz8OMHY4Xu54+OU4UZpyw4K +UGsTuqwPN1q3ErWQgR5WrlcihtnJ0tHXUeOrO8ZV/R4O03QK0dqq6mm4lyiPSMQH ++FJDOvTKVTUssKZqwJz58oHhEmrARdlns87/I6KJClTUFLkqqNfs+avNJVgyeY+Q +W5g5xAgGwax/Dj0ApQIDAQABo1QwUjAOBgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/ +BAUwAwEB/zAdBgNVHQ4EFgQUCctZf4aycI8awznjwNnpv7tNsiMwEAYJKwYBBAGC +NxUBBAMCAQAwDQYJKoZIhvcNAQEMBQADggIBAKyvPl3CEZaJjqPnktaXFbgToqZC +LgLNFgVZJ8og6Lq46BrsTaiXVq5lQ7GPAJtSzVXNUzltYkyLDVt8LkS/gxCP81OC +gMNPOsduET/m4xaRhPtthH80dK2Jp86519efhGSSvpWhrQlTM93uCupKUY5vVau6 +tZRGrox/2KJQJWVggEbbMwSubLWYdFQl3JPk+ONVFT24bcMKpBLBaYVu32TxU5nh +SnUgnZUP5NbcA/FZGOhHibJXWpS2qdgXKxdJ5XbLwVaZOjex/2kskZGT4d9Mozd2 +TaGf+G0eHdP67Pv0RR0Tbc/3WeUiJ3IrhvNXuzDtJE3cfVa7o7P4NHmJweDyAmH3 +pvwPuxwXC65B2Xy9J6P9LjrRk5Sxcx0ki69bIImtt2dmefU6xqaWM/5TkshGsRGR +xpl/j8nWZjEgQRCHLQzWwa80mMpkg/sTV9HB8Dx6jKXB/ZUhoHHBk2dxEuqPiApp +GWSZI1b7rCoucL5mxAyE7+WL85MB+GqQk2dLsmijtWKP6T+MejteD+eMuMZ87zf9 +dOLITzNy4ZQ5bb0Sr74MTnB8G2+NszKTc0QWbej09+CVgI+WXTik9KveCjCHk9hN +AHFiRSdLOkKEW39lt2c0Ui2cFmuqqNh7o0JMcccMyj6D5KbvtwEwXlGjefVwaaZB +RA+GsCyRxj3qrg+E +-----END CERTIFICATE----- + +# Issuer: CN=e-Szigno Root CA 2017 O=Microsec Ltd. +# Subject: CN=e-Szigno Root CA 2017 O=Microsec Ltd. +# Label: "e-Szigno Root CA 2017" +# Serial: 411379200276854331539784714 +# MD5 Fingerprint: de:1f:f6:9e:84:ae:a7:b4:21:ce:1e:58:7d:d1:84:98 +# SHA1 Fingerprint: 89:d4:83:03:4f:9e:9a:48:80:5f:72:37:d4:a9:a6:ef:cb:7c:1f:d1 +# SHA256 Fingerprint: be:b0:0b:30:83:9b:9b:c3:2c:32:e4:44:79:05:95:06:41:f2:64:21:b1:5e:d0:89:19:8b:51:8a:e2:ea:1b:99 +-----BEGIN CERTIFICATE----- +MIICQDCCAeWgAwIBAgIMAVRI7yH9l1kN9QQKMAoGCCqGSM49BAMCMHExCzAJBgNV +BAYTAkhVMREwDwYDVQQHDAhCdWRhcGVzdDEWMBQGA1UECgwNTWljcm9zZWMgTHRk +LjEXMBUGA1UEYQwOVkFUSFUtMjM1ODQ0OTcxHjAcBgNVBAMMFWUtU3ppZ25vIFJv +b3QgQ0EgMjAxNzAeFw0xNzA4MjIxMjA3MDZaFw00MjA4MjIxMjA3MDZaMHExCzAJ +BgNVBAYTAkhVMREwDwYDVQQHDAhCdWRhcGVzdDEWMBQGA1UECgwNTWljcm9zZWMg +THRkLjEXMBUGA1UEYQwOVkFUSFUtMjM1ODQ0OTcxHjAcBgNVBAMMFWUtU3ppZ25v +IFJvb3QgQ0EgMjAxNzBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABJbcPYrYsHtv +xie+RJCxs1YVe45DJH0ahFnuY2iyxl6H0BVIHqiQrb1TotreOpCmYF9oMrWGQd+H +Wyx7xf58etqjYzBhMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0G +A1UdDgQWBBSHERUI0arBeAyxr87GyZDvvzAEwDAfBgNVHSMEGDAWgBSHERUI0arB +eAyxr87GyZDvvzAEwDAKBggqhkjOPQQDAgNJADBGAiEAtVfd14pVCzbhhkT61Nlo +jbjcI4qKDdQvfepz7L9NbKgCIQDLpbQS+ue16M9+k/zzNY9vTlp8tLxOsvxyqltZ ++efcMQ== +-----END CERTIFICATE----- + +# Issuer: O=CERTSIGN SA OU=certSIGN ROOT CA G2 +# Subject: O=CERTSIGN SA OU=certSIGN ROOT CA G2 +# Label: "certSIGN Root CA G2" +# Serial: 313609486401300475190 +# MD5 Fingerprint: 8c:f1:75:8a:c6:19:cf:94:b7:f7:65:20:87:c3:97:c7 +# SHA1 Fingerprint: 26:f9:93:b4:ed:3d:28:27:b0:b9:4b:a7:e9:15:1d:a3:8d:92:e5:32 +# SHA256 Fingerprint: 65:7c:fe:2f:a7:3f:aa:38:46:25:71:f3:32:a2:36:3a:46:fc:e7:02:09:51:71:07:02:cd:fb:b6:ee:da:33:05 +-----BEGIN CERTIFICATE----- +MIIFRzCCAy+gAwIBAgIJEQA0tk7GNi02MA0GCSqGSIb3DQEBCwUAMEExCzAJBgNV +BAYTAlJPMRQwEgYDVQQKEwtDRVJUU0lHTiBTQTEcMBoGA1UECxMTY2VydFNJR04g +Uk9PVCBDQSBHMjAeFw0xNzAyMDYwOTI3MzVaFw00MjAyMDYwOTI3MzVaMEExCzAJ +BgNVBAYTAlJPMRQwEgYDVQQKEwtDRVJUU0lHTiBTQTEcMBoGA1UECxMTY2VydFNJ +R04gUk9PVCBDQSBHMjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMDF +dRmRfUR0dIf+DjuW3NgBFszuY5HnC2/OOwppGnzC46+CjobXXo9X69MhWf05N0Iw +vlDqtg+piNguLWkh59E3GE59kdUWX2tbAMI5Qw02hVK5U2UPHULlj88F0+7cDBrZ +uIt4ImfkabBoxTzkbFpG583H+u/E7Eu9aqSs/cwoUe+StCmrqzWaTOTECMYmzPhp +n+Sc8CnTXPnGFiWeI8MgwT0PPzhAsP6CRDiqWhqKa2NYOLQV07YRaXseVO6MGiKs +cpc/I1mbySKEwQdPzH/iV8oScLumZfNpdWO9lfsbl83kqK/20U6o2YpxJM02PbyW +xPFsqa7lzw1uKA2wDrXKUXt4FMMgL3/7FFXhEZn91QqhngLjYl/rNUssuHLoPj1P +rCy7Lobio3aP5ZMqz6WryFyNSwb/EkaseMsUBzXgqd+L6a8VTxaJW732jcZZroiF +DsGJ6x9nxUWO/203Nit4ZoORUSs9/1F3dmKh7Gc+PoGD4FapUB8fepmrY7+EF3fx +DTvf95xhszWYijqy7DwaNz9+j5LP2RIUZNoQAhVB/0/E6xyjyfqZ90bp4RjZsbgy +LcsUDFDYg2WD7rlcz8sFWkz6GZdr1l0T08JcVLwyc6B49fFtHsufpaafItzRUZ6C +eWRgKRM+o/1Pcmqr4tTluCRVLERLiohEnMqE0yo7AgMBAAGjQjBAMA8GA1UdEwEB +/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBSCIS1mxteg4BXrzkwJ +d8RgnlRuAzANBgkqhkiG9w0BAQsFAAOCAgEAYN4auOfyYILVAzOBywaK8SJJ6ejq +kX/GM15oGQOGO0MBzwdw5AgeZYWR5hEit/UCI46uuR59H35s5r0l1ZUa8gWmr4UC +b6741jH/JclKyMeKqdmfS0mbEVeZkkMR3rYzpMzXjWR91M08KCy0mpbqTfXERMQl +qiCA2ClV9+BB/AYm/7k29UMUA2Z44RGx2iBfRgB4ACGlHgAoYXhvqAEBj500mv/0 +OJD7uNGzcgbJceaBxXntC6Z58hMLnPddDnskk7RI24Zf3lCGeOdA5jGokHZwYa+c +NywRtYK3qq4kNFtyDGkNzVmf9nGvnAvRCjj5BiKDUyUM/FHE5r7iOZULJK2v0ZXk +ltd0ZGtxTgI8qoXzIKNDOXZbbFD+mpwUHmUUihW9o4JFWklWatKcsWMy5WHgUyIO +pwpJ6st+H6jiYoD2EEVSmAYY3qXNL3+q1Ok+CHLsIwMCPKaq2LxndD0UF/tUSxfj +03k9bWtJySgOLnRQvwzZRjoQhsmnP+mg7H/rpXdYaXHmgwo38oZJar55CJD2AhZk +PuXaTH4MNMn5X7azKFGnpyuqSfqNZSlO42sTp5SjLVFteAxEy9/eCG/Oo2Sr05WE +1LlSVHJ7liXMvGnjSG4N0MedJ5qq+BOS3R7fY581qRY27Iy4g/Q9iY/NtBde17MX +QRBdJ3NghVdJIgc= +-----END CERTIFICATE----- + +# Issuer: CN=Trustwave Global Certification Authority O=Trustwave Holdings, Inc. +# Subject: CN=Trustwave Global Certification Authority O=Trustwave Holdings, Inc. +# Label: "Trustwave Global Certification Authority" +# Serial: 1846098327275375458322922162 +# MD5 Fingerprint: f8:1c:18:2d:2f:ba:5f:6d:a1:6c:bc:c7:ab:91:c7:0e +# SHA1 Fingerprint: 2f:8f:36:4f:e1:58:97:44:21:59:87:a5:2a:9a:d0:69:95:26:7f:b5 +# SHA256 Fingerprint: 97:55:20:15:f5:dd:fc:3c:87:88:c0:06:94:45:55:40:88:94:45:00:84:f1:00:86:70:86:bc:1a:2b:b5:8d:c8 +-----BEGIN CERTIFICATE----- +MIIF2jCCA8KgAwIBAgIMBfcOhtpJ80Y1LrqyMA0GCSqGSIb3DQEBCwUAMIGIMQsw +CQYDVQQGEwJVUzERMA8GA1UECAwISWxsaW5vaXMxEDAOBgNVBAcMB0NoaWNhZ28x +ITAfBgNVBAoMGFRydXN0d2F2ZSBIb2xkaW5ncywgSW5jLjExMC8GA1UEAwwoVHJ1 +c3R3YXZlIEdsb2JhbCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0xNzA4MjMx +OTM0MTJaFw00MjA4MjMxOTM0MTJaMIGIMQswCQYDVQQGEwJVUzERMA8GA1UECAwI +SWxsaW5vaXMxEDAOBgNVBAcMB0NoaWNhZ28xITAfBgNVBAoMGFRydXN0d2F2ZSBI +b2xkaW5ncywgSW5jLjExMC8GA1UEAwwoVHJ1c3R3YXZlIEdsb2JhbCBDZXJ0aWZp +Y2F0aW9uIEF1dGhvcml0eTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIB +ALldUShLPDeS0YLOvR29zd24q88KPuFd5dyqCblXAj7mY2Hf8g+CY66j96xz0Xzn +swuvCAAJWX/NKSqIk4cXGIDtiLK0thAfLdZfVaITXdHG6wZWiYj+rDKd/VzDBcdu +7oaJuogDnXIhhpCujwOl3J+IKMujkkkP7NAP4m1ET4BqstTnoApTAbqOl5F2brz8 +1Ws25kCI1nsvXwXoLG0R8+eyvpJETNKXpP7ScoFDB5zpET71ixpZfR9oWN0EACyW +80OzfpgZdNmcc9kYvkHHNHnZ9GLCQ7mzJ7Aiy/k9UscwR7PJPrhq4ufogXBeQotP +JqX+OsIgbrv4Fo7NDKm0G2x2EOFYeUY+VM6AqFcJNykbmROPDMjWLBz7BegIlT1l +RtzuzWniTY+HKE40Cz7PFNm73bZQmq131BnW2hqIyE4bJ3XYsgjxroMwuREOzYfw +hI0Vcnyh78zyiGG69Gm7DIwLdVcEuE4qFC49DxweMqZiNu5m4iK4BUBjECLzMx10 +coos9TkpoNPnG4CELcU9402x/RpvumUHO1jsQkUm+9jaJXLE9gCxInm943xZYkqc +BW89zubWR2OZxiRvchLIrH+QtAuRcOi35hYQcRfO3gZPSEF9NUqjifLJS3tBEW1n +twiYTOURGa5CgNz7kAXU+FDKvuStx8KU1xad5hePrzb7AgMBAAGjQjBAMA8GA1Ud +EwEB/wQFMAMBAf8wHQYDVR0OBBYEFJngGWcNYtt2s9o9uFvo/ULSMQ6HMA4GA1Ud +DwEB/wQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAgEAmHNw4rDT7TnsTGDZqRKGFx6W +0OhUKDtkLSGm+J1WE2pIPU/HPinbbViDVD2HfSMF1OQc3Og4ZYbFdada2zUFvXfe +uyk3QAUHw5RSn8pk3fEbK9xGChACMf1KaA0HZJDmHvUqoai7PF35owgLEQzxPy0Q +lG/+4jSHg9bP5Rs1bdID4bANqKCqRieCNqcVtgimQlRXtpla4gt5kNdXElE1GYhB +aCXUNxeEFfsBctyV3lImIJgm4nb1J2/6ADtKYdkNy1GTKv0WBpanI5ojSP5RvbbE +sLFUzt5sQa0WZ37b/TjNuThOssFgy50X31ieemKyJo90lZvkWx3SD92YHJtZuSPT +MaCm/zjdzyBP6VhWOmfD0faZmZ26NraAL4hHT4a/RDqA5Dccprrql5gR0IRiR2Qe +qu5AvzSxnI9O4fKSTx+O856X3vOmeWqJcU9LJxdI/uz0UA9PSX3MReO9ekDFQdxh +VicGaeVyQYHTtgGJoC86cnn+OjC/QezHYj6RS8fZMXZC+fc8Y+wmjHMMfRod6qh8 +h6jCJ3zhM0EPz8/8AKAigJ5Kp28AsEFFtyLKaEjFQqKu3R3y4G5OBVixwJAWKqQ9 +EEC+j2Jjg6mcgn0tAumDMHzLJ8n9HmYAsC7TIS+OMxZsmO0QqAfWzJPP29FpHOTK +yeC2nOnOcXHebD8WpHk= +-----END CERTIFICATE----- + +# Issuer: CN=Trustwave Global ECC P256 Certification Authority O=Trustwave Holdings, Inc. +# Subject: CN=Trustwave Global ECC P256 Certification Authority O=Trustwave Holdings, Inc. +# Label: "Trustwave Global ECC P256 Certification Authority" +# Serial: 4151900041497450638097112925 +# MD5 Fingerprint: 5b:44:e3:8d:5d:36:86:26:e8:0d:05:d2:59:a7:83:54 +# SHA1 Fingerprint: b4:90:82:dd:45:0c:be:8b:5b:b1:66:d3:e2:a4:08:26:cd:ed:42:cf +# SHA256 Fingerprint: 94:5b:bc:82:5e:a5:54:f4:89:d1:fd:51:a7:3d:df:2e:a6:24:ac:70:19:a0:52:05:22:5c:22:a7:8c:cf:a8:b4 +-----BEGIN CERTIFICATE----- +MIICYDCCAgegAwIBAgIMDWpfCD8oXD5Rld9dMAoGCCqGSM49BAMCMIGRMQswCQYD +VQQGEwJVUzERMA8GA1UECBMISWxsaW5vaXMxEDAOBgNVBAcTB0NoaWNhZ28xITAf +BgNVBAoTGFRydXN0d2F2ZSBIb2xkaW5ncywgSW5jLjE6MDgGA1UEAxMxVHJ1c3R3 +YXZlIEdsb2JhbCBFQ0MgUDI1NiBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0x +NzA4MjMxOTM1MTBaFw00MjA4MjMxOTM1MTBaMIGRMQswCQYDVQQGEwJVUzERMA8G +A1UECBMISWxsaW5vaXMxEDAOBgNVBAcTB0NoaWNhZ28xITAfBgNVBAoTGFRydXN0 +d2F2ZSBIb2xkaW5ncywgSW5jLjE6MDgGA1UEAxMxVHJ1c3R3YXZlIEdsb2JhbCBF +Q0MgUDI1NiBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTBZMBMGByqGSM49AgEGCCqG +SM49AwEHA0IABH77bOYj43MyCMpg5lOcunSNGLB4kFKA3TjASh3RqMyTpJcGOMoN +FWLGjgEqZZ2q3zSRLoHB5DOSMcT9CTqmP62jQzBBMA8GA1UdEwEB/wQFMAMBAf8w +DwYDVR0PAQH/BAUDAwcGADAdBgNVHQ4EFgQUo0EGrJBt0UrrdaVKEJmzsaGLSvcw +CgYIKoZIzj0EAwIDRwAwRAIgB+ZU2g6gWrKuEZ+Hxbb/ad4lvvigtwjzRM4q3wgh +DDcCIC0mA6AFvWvR9lz4ZcyGbbOcNEhjhAnFjXca4syc4XR7 +-----END CERTIFICATE----- + +# Issuer: CN=Trustwave Global ECC P384 Certification Authority O=Trustwave Holdings, Inc. +# Subject: CN=Trustwave Global ECC P384 Certification Authority O=Trustwave Holdings, Inc. +# Label: "Trustwave Global ECC P384 Certification Authority" +# Serial: 2704997926503831671788816187 +# MD5 Fingerprint: ea:cf:60:c4:3b:b9:15:29:40:a1:97:ed:78:27:93:d6 +# SHA1 Fingerprint: e7:f3:a3:c8:cf:6f:c3:04:2e:6d:0e:67:32:c5:9e:68:95:0d:5e:d2 +# SHA256 Fingerprint: 55:90:38:59:c8:c0:c3:eb:b8:75:9e:ce:4e:25:57:22:5f:f5:75:8b:bd:38:eb:d4:82:76:60:1e:1b:d5:80:97 +-----BEGIN CERTIFICATE----- +MIICnTCCAiSgAwIBAgIMCL2Fl2yZJ6SAaEc7MAoGCCqGSM49BAMDMIGRMQswCQYD +VQQGEwJVUzERMA8GA1UECBMISWxsaW5vaXMxEDAOBgNVBAcTB0NoaWNhZ28xITAf +BgNVBAoTGFRydXN0d2F2ZSBIb2xkaW5ncywgSW5jLjE6MDgGA1UEAxMxVHJ1c3R3 +YXZlIEdsb2JhbCBFQ0MgUDM4NCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0x +NzA4MjMxOTM2NDNaFw00MjA4MjMxOTM2NDNaMIGRMQswCQYDVQQGEwJVUzERMA8G +A1UECBMISWxsaW5vaXMxEDAOBgNVBAcTB0NoaWNhZ28xITAfBgNVBAoTGFRydXN0 +d2F2ZSBIb2xkaW5ncywgSW5jLjE6MDgGA1UEAxMxVHJ1c3R3YXZlIEdsb2JhbCBF +Q0MgUDM4NCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTB2MBAGByqGSM49AgEGBSuB +BAAiA2IABGvaDXU1CDFHBa5FmVXxERMuSvgQMSOjfoPTfygIOiYaOs+Xgh+AtycJ +j9GOMMQKmw6sWASr9zZ9lCOkmwqKi6vr/TklZvFe/oyujUF5nQlgziip04pt89ZF +1PKYhDhloKNDMEEwDwYDVR0TAQH/BAUwAwEB/zAPBgNVHQ8BAf8EBQMDBwYAMB0G +A1UdDgQWBBRVqYSJ0sEyvRjLbKYHTsjnnb6CkDAKBggqhkjOPQQDAwNnADBkAjA3 +AZKXRRJ+oPM+rRk6ct30UJMDEr5E0k9BpIycnR+j9sKS50gU/k6bpZFXrsY3crsC +MGclCrEMXu6pY5Jv5ZAL/mYiykf9ijH3g/56vxC+GCsej/YpHpRZ744hN8tRmKVu +Sw== +-----END CERTIFICATE----- + +# Issuer: CN=NAVER Global Root Certification Authority O=NAVER BUSINESS PLATFORM Corp. +# Subject: CN=NAVER Global Root Certification Authority O=NAVER BUSINESS PLATFORM Corp. +# Label: "NAVER Global Root Certification Authority" +# Serial: 9013692873798656336226253319739695165984492813 +# MD5 Fingerprint: c8:7e:41:f6:25:3b:f5:09:b3:17:e8:46:3d:bf:d0:9b +# SHA1 Fingerprint: 8f:6b:f2:a9:27:4a:da:14:a0:c4:f4:8e:61:27:f9:c0:1e:78:5d:d1 +# SHA256 Fingerprint: 88:f4:38:dc:f8:ff:d1:fa:8f:42:91:15:ff:e5:f8:2a:e1:e0:6e:0c:70:c3:75:fa:ad:71:7b:34:a4:9e:72:65 +-----BEGIN CERTIFICATE----- +MIIFojCCA4qgAwIBAgIUAZQwHqIL3fXFMyqxQ0Rx+NZQTQ0wDQYJKoZIhvcNAQEM +BQAwaTELMAkGA1UEBhMCS1IxJjAkBgNVBAoMHU5BVkVSIEJVU0lORVNTIFBMQVRG +T1JNIENvcnAuMTIwMAYDVQQDDClOQVZFUiBHbG9iYWwgUm9vdCBDZXJ0aWZpY2F0 +aW9uIEF1dGhvcml0eTAeFw0xNzA4MTgwODU4NDJaFw0zNzA4MTgyMzU5NTlaMGkx +CzAJBgNVBAYTAktSMSYwJAYDVQQKDB1OQVZFUiBCVVNJTkVTUyBQTEFURk9STSBD +b3JwLjEyMDAGA1UEAwwpTkFWRVIgR2xvYmFsIFJvb3QgQ2VydGlmaWNhdGlvbiBB +dXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC21PGTXLVA +iQqrDZBbUGOukJR0F0Vy1ntlWilLp1agS7gvQnXp2XskWjFlqxcX0TM62RHcQDaH +38dq6SZeWYp34+hInDEW+j6RscrJo+KfziFTowI2MMtSAuXaMl3Dxeb57hHHi8lE +HoSTGEq0n+USZGnQJoViAbbJAh2+g1G7XNr4rRVqmfeSVPc0W+m/6imBEtRTkZaz +kVrd/pBzKPswRrXKCAfHcXLJZtM0l/aM9BhK4dA9WkW2aacp+yPOiNgSnABIqKYP +szuSjXEOdMWLyEz59JuOuDxp7W87UC9Y7cSw0BwbagzivESq2M0UXZR4Yb8Obtoq +vC8MC3GmsxY/nOb5zJ9TNeIDoKAYv7vxvvTWjIcNQvcGufFt7QSUqP620wbGQGHf +nZ3zVHbOUzoBppJB7ASjjw2i1QnK1sua8e9DXcCrpUHPXFNwcMmIpi3Ua2FzUCaG +YQ5fG8Ir4ozVu53BA0K6lNpfqbDKzE0K70dpAy8i+/Eozr9dUGWokG2zdLAIx6yo +0es+nPxdGoMuK8u180SdOqcXYZaicdNwlhVNt0xz7hlcxVs+Qf6sdWA7G2POAN3a +CJBitOUt7kinaxeZVL6HSuOpXgRM6xBtVNbv8ejyYhbLgGvtPe31HzClrkvJE+2K +AQHJuFFYwGY6sWZLxNUxAmLpdIQM201GLQIDAQABo0IwQDAdBgNVHQ4EFgQU0p+I +36HNLL3s9TsBAZMzJ7LrYEswDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMB +Af8wDQYJKoZIhvcNAQEMBQADggIBADLKgLOdPVQG3dLSLvCkASELZ0jKbY7gyKoN +qo0hV4/GPnrK21HUUrPUloSlWGB/5QuOH/XcChWB5Tu2tyIvCZwTFrFsDDUIbatj +cu3cvuzHV+YwIHHW1xDBE1UBjCpD5EHxzzp6U5LOogMFDTjfArsQLtk70pt6wKGm ++LUx5vR1yblTmXVHIloUFcd4G7ad6Qz4G3bxhYTeodoS76TiEJd6eN4MUZeoIUCL +hr0N8F5OSza7OyAfikJW4Qsav3vQIkMsRIz75Sq0bBwcupTgE34h5prCy8VCZLQe +lHsIJchxzIdFV4XTnyliIoNRlwAYl3dqmJLJfGBs32x9SuRwTMKeuB330DTHD8z7 +p/8Dvq1wkNoL3chtl1+afwkyQf3NosxabUzyqkn+Zvjp2DXrDige7kgvOtB5CTh8 +piKCk5XQA76+AqAF3SAi428diDRgxuYKuQl1C/AH6GmWNcf7I4GOODm4RStDeKLR +LBT/DShycpWbXgnbiUSYqqFJu3FS8r/2/yehNq+4tneI3TqkbZs0kNwUXTC/t+sX +5Ie3cdCh13cV1ELX8vMxmV2b3RZtP+oGI/hGoiLtk/bdmuYqh7GYVPEi92tF4+KO +dh2ajcQGjTa3FPOdVGm3jjzVpG2Tgbet9r1ke8LJaDmgkpzNNIaRkPpkUZ3+/uul +9XXeifdy +-----END CERTIFICATE----- + +# Issuer: CN=AC RAIZ FNMT-RCM SERVIDORES SEGUROS O=FNMT-RCM OU=Ceres +# Subject: CN=AC RAIZ FNMT-RCM SERVIDORES SEGUROS O=FNMT-RCM OU=Ceres +# Label: "AC RAIZ FNMT-RCM SERVIDORES SEGUROS" +# Serial: 131542671362353147877283741781055151509 +# MD5 Fingerprint: 19:36:9c:52:03:2f:d2:d1:bb:23:cc:dd:1e:12:55:bb +# SHA1 Fingerprint: 62:ff:d9:9e:c0:65:0d:03:ce:75:93:d2:ed:3f:2d:32:c9:e3:e5:4a +# SHA256 Fingerprint: 55:41:53:b1:3d:2c:f9:dd:b7:53:bf:be:1a:4e:0a:e0:8d:0a:a4:18:70:58:fe:60:a2:b8:62:b2:e4:b8:7b:cb +-----BEGIN CERTIFICATE----- +MIICbjCCAfOgAwIBAgIQYvYybOXE42hcG2LdnC6dlTAKBggqhkjOPQQDAzB4MQsw +CQYDVQQGEwJFUzERMA8GA1UECgwIRk5NVC1SQ00xDjAMBgNVBAsMBUNlcmVzMRgw +FgYDVQRhDA9WQVRFUy1RMjgyNjAwNEoxLDAqBgNVBAMMI0FDIFJBSVogRk5NVC1S +Q00gU0VSVklET1JFUyBTRUdVUk9TMB4XDTE4MTIyMDA5MzczM1oXDTQzMTIyMDA5 +MzczM1oweDELMAkGA1UEBhMCRVMxETAPBgNVBAoMCEZOTVQtUkNNMQ4wDAYDVQQL +DAVDZXJlczEYMBYGA1UEYQwPVkFURVMtUTI4MjYwMDRKMSwwKgYDVQQDDCNBQyBS +QUlaIEZOTVQtUkNNIFNFUlZJRE9SRVMgU0VHVVJPUzB2MBAGByqGSM49AgEGBSuB +BAAiA2IABPa6V1PIyqvfNkpSIeSX0oNnnvBlUdBeh8dHsVnyV0ebAAKTRBdp20LH +sbI6GA60XYyzZl2hNPk2LEnb80b8s0RpRBNm/dfF/a82Tc4DTQdxz69qBdKiQ1oK +Um8BA06Oi6NCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYD +VR0OBBYEFAG5L++/EYZg8k/QQW6rcx/n0m5JMAoGCCqGSM49BAMDA2kAMGYCMQCu +SuMrQMN0EfKVrRYj3k4MGuZdpSRea0R7/DjiT8ucRRcRTBQnJlU5dUoDzBOQn5IC +MQD6SmxgiHPz7riYYqnOK8LZiqZwMR2vsJRM60/G49HzYqc8/5MuB1xJAWdpEgJy +v+c= +-----END CERTIFICATE----- + +# Issuer: CN=GlobalSign Root R46 O=GlobalSign nv-sa +# Subject: CN=GlobalSign Root R46 O=GlobalSign nv-sa +# Label: "GlobalSign Root R46" +# Serial: 1552617688466950547958867513931858518042577 +# MD5 Fingerprint: c4:14:30:e4:fa:66:43:94:2a:6a:1b:24:5f:19:d0:ef +# SHA1 Fingerprint: 53:a2:b0:4b:ca:6b:d6:45:e6:39:8a:8e:c4:0d:d2:bf:77:c3:a2:90 +# SHA256 Fingerprint: 4f:a3:12:6d:8d:3a:11:d1:c4:85:5a:4f:80:7c:ba:d6:cf:91:9d:3a:5a:88:b0:3b:ea:2c:63:72:d9:3c:40:c9 +-----BEGIN CERTIFICATE----- +MIIFWjCCA0KgAwIBAgISEdK7udcjGJ5AXwqdLdDfJWfRMA0GCSqGSIb3DQEBDAUA +MEYxCzAJBgNVBAYTAkJFMRkwFwYDVQQKExBHbG9iYWxTaWduIG52LXNhMRwwGgYD +VQQDExNHbG9iYWxTaWduIFJvb3QgUjQ2MB4XDTE5MDMyMDAwMDAwMFoXDTQ2MDMy +MDAwMDAwMFowRjELMAkGA1UEBhMCQkUxGTAXBgNVBAoTEEdsb2JhbFNpZ24gbnYt +c2ExHDAaBgNVBAMTE0dsb2JhbFNpZ24gUm9vdCBSNDYwggIiMA0GCSqGSIb3DQEB +AQUAA4ICDwAwggIKAoICAQCsrHQy6LNl5brtQyYdpokNRbopiLKkHWPd08EsCVeJ +OaFV6Wc0dwxu5FUdUiXSE2te4R2pt32JMl8Nnp8semNgQB+msLZ4j5lUlghYruQG +vGIFAha/r6gjA7aUD7xubMLL1aa7DOn2wQL7Id5m3RerdELv8HQvJfTqa1VbkNud +316HCkD7rRlr+/fKYIje2sGP1q7Vf9Q8g+7XFkyDRTNrJ9CG0Bwta/OrffGFqfUo +0q3v84RLHIf8E6M6cqJaESvWJ3En7YEtbWaBkoe0G1h6zD8K+kZPTXhc+CtI4wSE +y132tGqzZfxCnlEmIyDLPRT5ge1lFgBPGmSXZgjPjHvjK8Cd+RTyG/FWaha/LIWF +zXg4mutCagI0GIMXTpRW+LaCtfOW3T3zvn8gdz57GSNrLNRyc0NXfeD412lPFzYE ++cCQYDdF3uYM2HSNrpyibXRdQr4G9dlkbgIQrImwTDsHTUB+JMWKmIJ5jqSngiCN +I/onccnfxkF0oE32kRbcRoxfKWMxWXEM2G/CtjJ9++ZdU6Z+Ffy7dXxd7Pj2Fxzs +x2sZy/N78CsHpdlseVR2bJ0cpm4O6XkMqCNqo98bMDGfsVR7/mrLZqrcZdCinkqa +ByFrgY/bxFn63iLABJzjqls2k+g9vXqhnQt2sQvHnf3PmKgGwvgqo6GDoLclcqUC +4wIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNV +HQ4EFgQUA1yrc4GHqMywptWU4jaWSf8FmSwwDQYJKoZIhvcNAQEMBQADggIBAHx4 +7PYCLLtbfpIrXTncvtgdokIzTfnvpCo7RGkerNlFo048p9gkUbJUHJNOxO97k4Vg +JuoJSOD1u8fpaNK7ajFxzHmuEajwmf3lH7wvqMxX63bEIaZHU1VNaL8FpO7XJqti +2kM3S+LGteWygxk6x9PbTZ4IevPuzz5i+6zoYMzRx6Fcg0XERczzF2sUyQQCPtIk +pnnpHs6i58FZFZ8d4kuaPp92CC1r2LpXFNqD6v6MVenQTqnMdzGxRBF6XLE+0xRF +FRhiJBPSy03OXIPBNvIQtQ6IbbjhVp+J3pZmOUdkLG5NrmJ7v2B0GbhWrJKsFjLt +rWhV/pi60zTe9Mlhww6G9kuEYO4Ne7UyWHmRVSyBQ7N0H3qqJZ4d16GLuc1CLgSk +ZoNNiTW2bKg2SnkheCLQQrzRQDGQob4Ez8pn7fXwgNNgyYMqIgXQBztSvwyeqiv5 +u+YfjyW6hY0XHgL+XVAEV8/+LbzvXMAaq7afJMbfc2hIkCwU9D9SGuTSyxTDYWnP +4vkYxboznxSjBF25cfe1lNj2M8FawTSLfJvdkzrnE6JwYZ+vj+vYxXX4M2bUdGc6 +N3ec592kD3ZDZopD8p/7DEJ4Y9HiD2971KE9dJeFt0g5QdYg/NA6s/rob8SKunE3 +vouXsXgxT7PntgMTzlSdriVZzH81Xwj3QEUxeCp6 +-----END CERTIFICATE----- + +# Issuer: CN=GlobalSign Root E46 O=GlobalSign nv-sa +# Subject: CN=GlobalSign Root E46 O=GlobalSign nv-sa +# Label: "GlobalSign Root E46" +# Serial: 1552617690338932563915843282459653771421763 +# MD5 Fingerprint: b5:b8:66:ed:de:08:83:e3:c9:e2:01:34:06:ac:51:6f +# SHA1 Fingerprint: 39:b4:6c:d5:fe:80:06:eb:e2:2f:4a:bb:08:33:a0:af:db:b9:dd:84 +# SHA256 Fingerprint: cb:b9:c4:4d:84:b8:04:3e:10:50:ea:31:a6:9f:51:49:55:d7:bf:d2:e2:c6:b4:93:01:01:9a:d6:1d:9f:50:58 +-----BEGIN CERTIFICATE----- +MIICCzCCAZGgAwIBAgISEdK7ujNu1LzmJGjFDYQdmOhDMAoGCCqGSM49BAMDMEYx +CzAJBgNVBAYTAkJFMRkwFwYDVQQKExBHbG9iYWxTaWduIG52LXNhMRwwGgYDVQQD +ExNHbG9iYWxTaWduIFJvb3QgRTQ2MB4XDTE5MDMyMDAwMDAwMFoXDTQ2MDMyMDAw +MDAwMFowRjELMAkGA1UEBhMCQkUxGTAXBgNVBAoTEEdsb2JhbFNpZ24gbnYtc2Ex +HDAaBgNVBAMTE0dsb2JhbFNpZ24gUm9vdCBFNDYwdjAQBgcqhkjOPQIBBgUrgQQA +IgNiAAScDrHPt+ieUnd1NPqlRqetMhkytAepJ8qUuwzSChDH2omwlwxwEwkBjtjq +R+q+soArzfwoDdusvKSGN+1wCAB16pMLey5SnCNoIwZD7JIvU4Tb+0cUB+hflGdd +yXqBPCCjQjBAMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1Ud +DgQWBBQxCpCPtsad0kRLgLWi5h+xEk8blTAKBggqhkjOPQQDAwNoADBlAjEA31SQ +7Zvvi5QCkxeCmb6zniz2C5GMn0oUsfZkvLtoURMMA/cVi4RguYv/Uo7njLwcAjA8 ++RHUjE7AwWHCFUyqqx0LMV87HOIAl0Qx5v5zli/altP+CAezNIm8BZ/3Hobui3A= +-----END CERTIFICATE----- + +# Issuer: CN=GLOBALTRUST 2020 O=e-commerce monitoring GmbH +# Subject: CN=GLOBALTRUST 2020 O=e-commerce monitoring GmbH +# Label: "GLOBALTRUST 2020" +# Serial: 109160994242082918454945253 +# MD5 Fingerprint: 8a:c7:6f:cb:6d:e3:cc:a2:f1:7c:83:fa:0e:78:d7:e8 +# SHA1 Fingerprint: d0:67:c1:13:51:01:0c:aa:d0:c7:6a:65:37:31:16:26:4f:53:71:a2 +# SHA256 Fingerprint: 9a:29:6a:51:82:d1:d4:51:a2:e3:7f:43:9b:74:da:af:a2:67:52:33:29:f9:0f:9a:0d:20:07:c3:34:e2:3c:9a +-----BEGIN CERTIFICATE----- +MIIFgjCCA2qgAwIBAgILWku9WvtPilv6ZeUwDQYJKoZIhvcNAQELBQAwTTELMAkG +A1UEBhMCQVQxIzAhBgNVBAoTGmUtY29tbWVyY2UgbW9uaXRvcmluZyBHbWJIMRkw +FwYDVQQDExBHTE9CQUxUUlVTVCAyMDIwMB4XDTIwMDIxMDAwMDAwMFoXDTQwMDYx +MDAwMDAwMFowTTELMAkGA1UEBhMCQVQxIzAhBgNVBAoTGmUtY29tbWVyY2UgbW9u +aXRvcmluZyBHbWJIMRkwFwYDVQQDExBHTE9CQUxUUlVTVCAyMDIwMIICIjANBgkq +hkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAri5WrRsc7/aVj6B3GyvTY4+ETUWiD59b +RatZe1E0+eyLinjF3WuvvcTfk0Uev5E4C64OFudBc/jbu9G4UeDLgztzOG53ig9Z +YybNpyrOVPu44sB8R85gfD+yc/LAGbaKkoc1DZAoouQVBGM+uq/ufF7MpotQsjj3 +QWPKzv9pj2gOlTblzLmMCcpL3TGQlsjMH/1WljTbjhzqLL6FLmPdqqmV0/0plRPw +yJiT2S0WR5ARg6I6IqIoV6Lr/sCMKKCmfecqQjuCgGOlYx8ZzHyyZqjC0203b+J+ +BlHZRYQfEs4kUmSFC0iAToexIiIwquuuvuAC4EDosEKAA1GqtH6qRNdDYfOiaxaJ +SaSjpCuKAsR49GiKweR6NrFvG5Ybd0mN1MkGco/PU+PcF4UgStyYJ9ORJitHHmkH +r96i5OTUawuzXnzUJIBHKWk7buis/UDr2O1xcSvy6Fgd60GXIsUf1DnQJ4+H4xj0 +4KlGDfV0OoIu0G4skaMxXDtG6nsEEFZegB31pWXogvziB4xiRfUg3kZwhqG8k9Me +dKZssCz3AwyIDMvUclOGvGBG85hqwvG/Q/lwIHfKN0F5VVJjjVsSn8VoxIidrPIw +q7ejMZdnrY8XD2zHc+0klGvIg5rQmjdJBKuxFshsSUktq6HQjJLyQUp5ISXbY9e2 +nKd+Qmn7OmMCAwEAAaNjMGEwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC +AQYwHQYDVR0OBBYEFNwuH9FhN3nkq9XVsxJxaD1qaJwiMB8GA1UdIwQYMBaAFNwu +H9FhN3nkq9XVsxJxaD1qaJwiMA0GCSqGSIb3DQEBCwUAA4ICAQCR8EICaEDuw2jA +VC/f7GLDw56KoDEoqoOOpFaWEhCGVrqXctJUMHytGdUdaG/7FELYjQ7ztdGl4wJC +XtzoRlgHNQIw4Lx0SsFDKv/bGtCwr2zD/cuz9X9tAy5ZVp0tLTWMstZDFyySCstd +6IwPS3BD0IL/qMy/pJTAvoe9iuOTe8aPmxadJ2W8esVCgmxcB9CpwYhgROmYhRZf ++I/KARDOJcP5YBugxZfD0yyIMaK9MOzQ0MAS8cE54+X1+NZK3TTN+2/BT+MAi1bi +kvcoskJ3ciNnxz8RFbLEAwW+uxF7Cr+obuf/WEPPm2eggAe2HcqtbepBEX4tdJP7 +wry+UUTF72glJ4DjyKDUEuzZpTcdN3y0kcra1LGWge9oXHYQSa9+pTeAsRxSvTOB +TI/53WXZFM2KJVj04sWDpQmQ1GwUY7VA3+vA/MRYfg0UFodUJ25W5HCEuGwyEn6C +MUO+1918oa2u1qsgEu8KwxCMSZY13At1XrFP1U80DhEgB3VDRemjEdqso5nCtnkn +4rnvyOL2NSl6dPrFf4IFYqYK6miyeUcGbvJXqBUzxvd4Sj1Ce2t+/vdG6tHrju+I +aFvowdlxfv1k7/9nR4hYJS8+hge9+6jlgqispdNpQ80xiEmEU5LAsTkbOYMBMMTy +qfrQA71yN2BWHzZ8vTmR9W0Nv3vXkg== +-----END CERTIFICATE----- + +# Issuer: CN=ANF Secure Server Root CA O=ANF Autoridad de Certificacion OU=ANF CA Raiz +# Subject: CN=ANF Secure Server Root CA O=ANF Autoridad de Certificacion OU=ANF CA Raiz +# Label: "ANF Secure Server Root CA" +# Serial: 996390341000653745 +# MD5 Fingerprint: 26:a6:44:5a:d9:af:4e:2f:b2:1d:b6:65:b0:4e:e8:96 +# SHA1 Fingerprint: 5b:6e:68:d0:cc:15:b6:a0:5f:1e:c1:5f:ae:02:fc:6b:2f:5d:6f:74 +# SHA256 Fingerprint: fb:8f:ec:75:91:69:b9:10:6b:1e:51:16:44:c6:18:c5:13:04:37:3f:6c:06:43:08:8d:8b:ef:fd:1b:99:75:99 +-----BEGIN CERTIFICATE----- +MIIF7zCCA9egAwIBAgIIDdPjvGz5a7EwDQYJKoZIhvcNAQELBQAwgYQxEjAQBgNV +BAUTCUc2MzI4NzUxMDELMAkGA1UEBhMCRVMxJzAlBgNVBAoTHkFORiBBdXRvcmlk +YWQgZGUgQ2VydGlmaWNhY2lvbjEUMBIGA1UECxMLQU5GIENBIFJhaXoxIjAgBgNV +BAMTGUFORiBTZWN1cmUgU2VydmVyIFJvb3QgQ0EwHhcNMTkwOTA0MTAwMDM4WhcN +MzkwODMwMTAwMDM4WjCBhDESMBAGA1UEBRMJRzYzMjg3NTEwMQswCQYDVQQGEwJF +UzEnMCUGA1UEChMeQU5GIEF1dG9yaWRhZCBkZSBDZXJ0aWZpY2FjaW9uMRQwEgYD +VQQLEwtBTkYgQ0EgUmFpejEiMCAGA1UEAxMZQU5GIFNlY3VyZSBTZXJ2ZXIgUm9v +dCBDQTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANvrayvmZFSVgpCj +cqQZAZ2cC4Ffc0m6p6zzBE57lgvsEeBbphzOG9INgxwruJ4dfkUyYA8H6XdYfp9q +yGFOtibBTI3/TO80sh9l2Ll49a2pcbnvT1gdpd50IJeh7WhM3pIXS7yr/2WanvtH +2Vdy8wmhrnZEE26cLUQ5vPnHO6RYPUG9tMJJo8gN0pcvB2VSAKduyK9o7PQUlrZX +H1bDOZ8rbeTzPvY1ZNoMHKGESy9LS+IsJJ1tk0DrtSOOMspvRdOoiXsezx76W0OL +zc2oD2rKDF65nkeP8Nm2CgtYZRczuSPkdxl9y0oukntPLxB3sY0vaJxizOBQ+OyR +p1RMVwnVdmPF6GUe7m1qzwmd+nxPrWAI/VaZDxUse6mAq4xhj0oHdkLePfTdsiQz +W7i1o0TJrH93PB0j7IKppuLIBkwC/qxcmZkLLxCKpvR/1Yd0DVlJRfbwcVw5Kda/ +SiOL9V8BY9KHcyi1Swr1+KuCLH5zJTIdC2MKF4EA/7Z2Xue0sUDKIbvVgFHlSFJn +LNJhiQcND85Cd8BEc5xEUKDbEAotlRyBr+Qc5RQe8TZBAQIvfXOn3kLMTOmJDVb3 +n5HUA8ZsyY/b2BzgQJhdZpmYgG4t/wHFzstGH6wCxkPmrqKEPMVOHj1tyRRM4y5B +u8o5vzY8KhmqQYdOpc5LMnndkEl/AgMBAAGjYzBhMB8GA1UdIwQYMBaAFJxf0Gxj +o1+TypOYCK2Mh6UsXME3MB0GA1UdDgQWBBScX9BsY6Nfk8qTmAitjIelLFzBNzAO +BgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOC +AgEATh65isagmD9uw2nAalxJUqzLK114OMHVVISfk/CHGT0sZonrDUL8zPB1hT+L +9IBdeeUXZ701guLyPI59WzbLWoAAKfLOKyzxj6ptBZNscsdW699QIyjlRRA96Gej +rw5VD5AJYu9LWaL2U/HANeQvwSS9eS9OICI7/RogsKQOLHDtdD+4E5UGUcjohybK +pFtqFiGS3XNgnhAY3jyB6ugYw3yJ8otQPr0R4hUDqDZ9MwFsSBXXiJCZBMXM5gf0 +vPSQ7RPi6ovDj6MzD8EpTBNO2hVWcXNyglD2mjN8orGoGjR0ZVzO0eurU+AagNjq +OknkJjCb5RyKqKkVMoaZkgoQI1YS4PbOTOK7vtuNknMBZi9iPrJyJ0U27U1W45eZ +/zo1PqVUSlJZS2Db7v54EX9K3BR5YLZrZAPbFYPhor72I5dQ8AkzNqdxliXzuUJ9 +2zg/LFis6ELhDtjTO0wugumDLmsx2d1Hhk9tl5EuT+IocTUW0fJz/iUrB0ckYyfI ++PbZa/wSMVYIwFNCr5zQM378BvAxRAMU8Vjq8moNqRGyg77FGr8H6lnco4g175x2 +MjxNBiLOFeXdntiP2t7SxDnlF4HPOEfrf4htWRvfn0IUrn7PqLBmZdo3r5+qPeoo +tt7VMVgWglvquxl1AnMaykgaIZOQCo6ThKd9OyMYkomgjaw= +-----END CERTIFICATE----- + +# Issuer: CN=Certum EC-384 CA O=Asseco Data Systems S.A. OU=Certum Certification Authority +# Subject: CN=Certum EC-384 CA O=Asseco Data Systems S.A. OU=Certum Certification Authority +# Label: "Certum EC-384 CA" +# Serial: 160250656287871593594747141429395092468 +# MD5 Fingerprint: b6:65:b3:96:60:97:12:a1:ec:4e:e1:3d:a3:c6:c9:f1 +# SHA1 Fingerprint: f3:3e:78:3c:ac:df:f4:a2:cc:ac:67:55:69:56:d7:e5:16:3c:e1:ed +# SHA256 Fingerprint: 6b:32:80:85:62:53:18:aa:50:d1:73:c9:8d:8b:da:09:d5:7e:27:41:3d:11:4c:f7:87:a0:f5:d0:6c:03:0c:f6 +-----BEGIN CERTIFICATE----- +MIICZTCCAeugAwIBAgIQeI8nXIESUiClBNAt3bpz9DAKBggqhkjOPQQDAzB0MQsw +CQYDVQQGEwJQTDEhMB8GA1UEChMYQXNzZWNvIERhdGEgU3lzdGVtcyBTLkEuMScw +JQYDVQQLEx5DZXJ0dW0gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkxGTAXBgNVBAMT +EENlcnR1bSBFQy0zODQgQ0EwHhcNMTgwMzI2MDcyNDU0WhcNNDMwMzI2MDcyNDU0 +WjB0MQswCQYDVQQGEwJQTDEhMB8GA1UEChMYQXNzZWNvIERhdGEgU3lzdGVtcyBT +LkEuMScwJQYDVQQLEx5DZXJ0dW0gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkxGTAX +BgNVBAMTEENlcnR1bSBFQy0zODQgQ0EwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAATE +KI6rGFtqvm5kN2PkzeyrOvfMobgOgknXhimfoZTy42B4mIF4Bk3y7JoOV2CDn7Tm +Fy8as10CW4kjPMIRBSqniBMY81CE1700LCeJVf/OTOffph8oxPBUw7l8t1Ot68Kj +QjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFI0GZnQkdjrzife81r1HfS+8 +EF9LMA4GA1UdDwEB/wQEAwIBBjAKBggqhkjOPQQDAwNoADBlAjADVS2m5hjEfO/J +UG7BJw+ch69u1RsIGL2SKcHvlJF40jocVYli5RsJHrpka/F2tNQCMQC0QoSZ/6vn +nvuRlydd3LBbMHHOXjgaatkl5+r3YZJW+OraNsKHZZYuciUvf9/DE8k= +-----END CERTIFICATE----- + +# Issuer: CN=Certum Trusted Root CA O=Asseco Data Systems S.A. OU=Certum Certification Authority +# Subject: CN=Certum Trusted Root CA O=Asseco Data Systems S.A. OU=Certum Certification Authority +# Label: "Certum Trusted Root CA" +# Serial: 40870380103424195783807378461123655149 +# MD5 Fingerprint: 51:e1:c2:e7:fe:4c:84:af:59:0e:2f:f4:54:6f:ea:29 +# SHA1 Fingerprint: c8:83:44:c0:18:ae:9f:cc:f1:87:b7:8f:22:d1:c5:d7:45:84:ba:e5 +# SHA256 Fingerprint: fe:76:96:57:38:55:77:3e:37:a9:5e:7a:d4:d9:cc:96:c3:01:57:c1:5d:31:76:5b:a9:b1:57:04:e1:ae:78:fd +-----BEGIN CERTIFICATE----- +MIIFwDCCA6igAwIBAgIQHr9ZULjJgDdMBvfrVU+17TANBgkqhkiG9w0BAQ0FADB6 +MQswCQYDVQQGEwJQTDEhMB8GA1UEChMYQXNzZWNvIERhdGEgU3lzdGVtcyBTLkEu +MScwJQYDVQQLEx5DZXJ0dW0gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkxHzAdBgNV +BAMTFkNlcnR1bSBUcnVzdGVkIFJvb3QgQ0EwHhcNMTgwMzE2MTIxMDEzWhcNNDMw +MzE2MTIxMDEzWjB6MQswCQYDVQQGEwJQTDEhMB8GA1UEChMYQXNzZWNvIERhdGEg +U3lzdGVtcyBTLkEuMScwJQYDVQQLEx5DZXJ0dW0gQ2VydGlmaWNhdGlvbiBBdXRo +b3JpdHkxHzAdBgNVBAMTFkNlcnR1bSBUcnVzdGVkIFJvb3QgQ0EwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQDRLY67tzbqbTeRn06TpwXkKQMlzhyC93yZ +n0EGze2jusDbCSzBfN8pfktlL5On1AFrAygYo9idBcEq2EXxkd7fO9CAAozPOA/q +p1x4EaTByIVcJdPTsuclzxFUl6s1wB52HO8AU5853BSlLCIls3Jy/I2z5T4IHhQq +NwuIPMqw9MjCoa68wb4pZ1Xi/K1ZXP69VyywkI3C7Te2fJmItdUDmj0VDT06qKhF +8JVOJVkdzZhpu9PMMsmN74H+rX2Ju7pgE8pllWeg8xn2A1bUatMn4qGtg/BKEiJ3 +HAVz4hlxQsDsdUaakFjgao4rpUYwBI4Zshfjvqm6f1bxJAPXsiEodg42MEx51UGa +mqi4NboMOvJEGyCI98Ul1z3G4z5D3Yf+xOr1Uz5MZf87Sst4WmsXXw3Hw09Omiqi +7VdNIuJGmj8PkTQkfVXjjJU30xrwCSss0smNtA0Aq2cpKNgB9RkEth2+dv5yXMSF +ytKAQd8FqKPVhJBPC/PgP5sZ0jeJP/J7UhyM9uH3PAeXjA6iWYEMspA90+NZRu0P +qafegGtaqge2Gcu8V/OXIXoMsSt0Puvap2ctTMSYnjYJdmZm/Bo/6khUHL4wvYBQ +v3y1zgD2DGHZ5yQD4OMBgQ692IU0iL2yNqh7XAjlRICMb/gv1SHKHRzQ+8S1h9E6 +Tsd2tTVItQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBSM+xx1 +vALTn04uSNn5YFSqxLNP+jAOBgNVHQ8BAf8EBAMCAQYwDQYJKoZIhvcNAQENBQAD +ggIBAEii1QALLtA/vBzVtVRJHlpr9OTy4EA34MwUe7nJ+jW1dReTagVphZzNTxl4 +WxmB82M+w85bj/UvXgF2Ez8sALnNllI5SW0ETsXpD4YN4fqzX4IS8TrOZgYkNCvo +zMrnadyHncI013nR03e4qllY/p0m+jiGPp2Kh2RX5Rc64vmNueMzeMGQ2Ljdt4NR +5MTMI9UGfOZR0800McD2RrsLrfw9EAUqO0qRJe6M1ISHgCq8CYyqOhNf6DR5UMEQ +GfnTKB7U0VEwKbOukGfWHwpjscWpxkIxYxeU72nLL/qMFH3EQxiJ2fAyQOaA4kZf +5ePBAFmo+eggvIksDkc0C+pXwlM2/KfUrzHN/gLldfq5Jwn58/U7yn2fqSLLiMmq +0Uc9NneoWWRrJ8/vJ8HjJLWG965+Mk2weWjROeiQWMODvA8s1pfrzgzhIMfatz7D +P78v3DSk+yshzWePS/Tj6tQ/50+6uaWTRRxmHyH6ZF5v4HaUMst19W7l9o/HuKTM +qJZ9ZPskWkoDbGs4xugDQ5r3V7mzKWmTOPQD8rv7gmsHINFSH5pkAnuYZttcTVoP +0ISVoDwUQwbKytu4QTbaakRnh6+v40URFWkIsr4WOZckbxJF0WddCajJFdr60qZf +E2Efv4WstK2tBZQIgx51F9NxO5NQI1mg7TyRVJ12AMXDuDjb +-----END CERTIFICATE----- + +# Issuer: CN=TunTrust Root CA O=Agence Nationale de Certification Electronique +# Subject: CN=TunTrust Root CA O=Agence Nationale de Certification Electronique +# Label: "TunTrust Root CA" +# Serial: 108534058042236574382096126452369648152337120275 +# MD5 Fingerprint: 85:13:b9:90:5b:36:5c:b6:5e:b8:5a:f8:e0:31:57:b4 +# SHA1 Fingerprint: cf:e9:70:84:0f:e0:73:0f:9d:f6:0c:7f:2c:4b:ee:20:46:34:9c:bb +# SHA256 Fingerprint: 2e:44:10:2a:b5:8c:b8:54:19:45:1c:8e:19:d9:ac:f3:66:2c:af:bc:61:4b:6a:53:96:0a:30:f7:d0:e2:eb:41 +-----BEGIN CERTIFICATE----- +MIIFszCCA5ugAwIBAgIUEwLV4kBMkkaGFmddtLu7sms+/BMwDQYJKoZIhvcNAQEL +BQAwYTELMAkGA1UEBhMCVE4xNzA1BgNVBAoMLkFnZW5jZSBOYXRpb25hbGUgZGUg +Q2VydGlmaWNhdGlvbiBFbGVjdHJvbmlxdWUxGTAXBgNVBAMMEFR1blRydXN0IFJv +b3QgQ0EwHhcNMTkwNDI2MDg1NzU2WhcNNDQwNDI2MDg1NzU2WjBhMQswCQYDVQQG +EwJUTjE3MDUGA1UECgwuQWdlbmNlIE5hdGlvbmFsZSBkZSBDZXJ0aWZpY2F0aW9u +IEVsZWN0cm9uaXF1ZTEZMBcGA1UEAwwQVHVuVHJ1c3QgUm9vdCBDQTCCAiIwDQYJ +KoZIhvcNAQEBBQADggIPADCCAgoCggIBAMPN0/y9BFPdDCA61YguBUtB9YOCfvdZ +n56eY+hz2vYGqU8ftPkLHzmMmiDQfgbU7DTZhrx1W4eI8NLZ1KMKsmwb60ksPqxd +2JQDoOw05TDENX37Jk0bbjBU2PWARZw5rZzJJQRNmpA+TkBuimvNKWfGzC3gdOgF +VwpIUPp6Q9p+7FuaDmJ2/uqdHYVy7BG7NegfJ7/Boce7SBbdVtfMTqDhuazb1YMZ +GoXRlJfXyqNlC/M4+QKu3fZnz8k/9YosRxqZbwUN/dAdgjH8KcwAWJeRTIAAHDOF +li/LQcKLEITDCSSJH7UP2dl3RxiSlGBcx5kDPP73lad9UKGAwqmDrViWVSHbhlnU +r8a83YFuB9tgYv7sEG7aaAH0gxupPqJbI9dkxt/con3YS7qC0lH4Zr8GRuR5KiY2 +eY8fTpkdso8MDhz/yV3A/ZAQprE38806JG60hZC/gLkMjNWb1sjxVj8agIl6qeIb +MlEsPvLfe/ZdeikZjuXIvTZxi11Mwh0/rViizz1wTaZQmCXcI/m4WEEIcb9PuISg +jwBUFfyRbVinljvrS5YnzWuioYasDXxU5mZMZl+QviGaAkYt5IPCgLnPSz7ofzwB +7I9ezX/SKEIBlYrilz0QIX32nRzFNKHsLA4KUiwSVXAkPcvCFDVDXSdOvsC9qnyW +5/yeYa1E0wCXAgMBAAGjYzBhMB0GA1UdDgQWBBQGmpsfU33x9aTI04Y+oXNZtPdE +ITAPBgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFAaamx9TffH1pMjThj6hc1m0 +90QhMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAgEAqgVutt0Vyb+z +xiD2BkewhpMl0425yAA/l/VSJ4hxyXT968pk21vvHl26v9Hr7lxpuhbI87mP0zYu +QEkHDVneixCwSQXi/5E/S7fdAo74gShczNxtr18UnH1YeA32gAm56Q6XKRm4t+v4 +FstVEuTGfbvE7Pi1HE4+Z7/FXxttbUcoqgRYYdZ2vyJ/0Adqp2RT8JeNnYA/u8EH +22Wv5psymsNUk8QcCMNE+3tjEUPRahphanltkE8pjkcFwRJpadbGNjHh/PqAulxP +xOu3Mqz4dWEX1xAZufHSCe96Qp1bWgvUxpVOKs7/B9dPfhgGiPEZtdmYu65xxBzn +dFlY7wyJz4sfdZMaBBSSSFCp61cpABbjNhzI+L/wM9VBD8TMPN3pM0MBkRArHtG5 +Xc0yGYuPjCB31yLEQtyEFpslbei0VXF/sHyz03FJuc9SpAQ/3D2gu68zngowYI7b +nV2UqL1g52KAdoGDDIzMMEZJ4gzSqK/rYXHv5yJiqfdcZGyfFoxnNidF9Ql7v/YQ +CvGwjVRDjAS6oz/v4jXH+XTgbzRB0L9zZVcg+ZtnemZoJE6AZb0QmQZZ8mWvuMZH +u/2QeItBcy6vVR/cO5JyboTT0GFMDcx2V+IthSIVNg3rAZ3r2OvEhJn7wAzMMujj +d9qDRIueVSjAi1jTkD5OGwDxFa2DK5o= +-----END CERTIFICATE----- + +# Issuer: CN=HARICA TLS RSA Root CA 2021 O=Hellenic Academic and Research Institutions CA +# Subject: CN=HARICA TLS RSA Root CA 2021 O=Hellenic Academic and Research Institutions CA +# Label: "HARICA TLS RSA Root CA 2021" +# Serial: 76817823531813593706434026085292783742 +# MD5 Fingerprint: 65:47:9b:58:86:dd:2c:f0:fc:a2:84:1f:1e:96:c4:91 +# SHA1 Fingerprint: 02:2d:05:82:fa:88:ce:14:0c:06:79:de:7f:14:10:e9:45:d7:a5:6d +# SHA256 Fingerprint: d9:5d:0e:8e:da:79:52:5b:f9:be:b1:1b:14:d2:10:0d:32:94:98:5f:0c:62:d9:fa:bd:9c:d9:99:ec:cb:7b:1d +-----BEGIN CERTIFICATE----- +MIIFpDCCA4ygAwIBAgIQOcqTHO9D88aOk8f0ZIk4fjANBgkqhkiG9w0BAQsFADBs +MQswCQYDVQQGEwJHUjE3MDUGA1UECgwuSGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJl +c2VhcmNoIEluc3RpdHV0aW9ucyBDQTEkMCIGA1UEAwwbSEFSSUNBIFRMUyBSU0Eg +Um9vdCBDQSAyMDIxMB4XDTIxMDIxOTEwNTUzOFoXDTQ1MDIxMzEwNTUzN1owbDEL +MAkGA1UEBhMCR1IxNzA1BgNVBAoMLkhlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNl +YXJjaCBJbnN0aXR1dGlvbnMgQ0ExJDAiBgNVBAMMG0hBUklDQSBUTFMgUlNBIFJv +b3QgQ0EgMjAyMTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAIvC569l +mwVnlskNJLnQDmT8zuIkGCyEf3dRywQRNrhe7Wlxp57kJQmXZ8FHws+RFjZiPTgE +4VGC/6zStGndLuwRo0Xua2s7TL+MjaQenRG56Tj5eg4MmOIjHdFOY9TnuEFE+2uv +a9of08WRiFukiZLRgeaMOVig1mlDqa2YUlhu2wr7a89o+uOkXjpFc5gH6l8Cct4M +pbOfrqkdtx2z/IpZ525yZa31MJQjB/OCFks1mJxTuy/K5FrZx40d/JiZ+yykgmvw +Kh+OC19xXFyuQnspiYHLA6OZyoieC0AJQTPb5lh6/a6ZcMBaD9YThnEvdmn8kN3b +LW7R8pv1GmuebxWMevBLKKAiOIAkbDakO/IwkfN4E8/BPzWr8R0RI7VDIp4BkrcY +AuUR0YLbFQDMYTfBKnya4dC6s1BG7oKsnTH4+yPiAwBIcKMJJnkVU2DzOFytOOqB +AGMUuTNe3QvboEUHGjMJ+E20pwKmafTCWQWIZYVWrkvL4N48fS0ayOn7H6NhStYq +E613TBoYm5EPWNgGVMWX+Ko/IIqmhaZ39qb8HOLubpQzKoNQhArlT4b4UEV4AIHr +W2jjJo3Me1xR9BQsQL4aYB16cmEdH2MtiKrOokWQCPxrvrNQKlr9qEgYRtaQQJKQ +CoReaDH46+0N0x3GfZkYVVYnZS6NRcUk7M7jAgMBAAGjQjBAMA8GA1UdEwEB/wQF +MAMBAf8wHQYDVR0OBBYEFApII6ZgpJIKM+qTW8VX6iVNvRLuMA4GA1UdDwEB/wQE +AwIBhjANBgkqhkiG9w0BAQsFAAOCAgEAPpBIqm5iFSVmewzVjIuJndftTgfvnNAU +X15QvWiWkKQUEapobQk1OUAJ2vQJLDSle1mESSmXdMgHHkdt8s4cUCbjnj1AUz/3 +f5Z2EMVGpdAgS1D0NTsY9FVqQRtHBmg8uwkIYtlfVUKqrFOFrJVWNlar5AWMxaja +H6NpvVMPxP/cyuN+8kyIhkdGGvMA9YCRotxDQpSbIPDRzbLrLFPCU3hKTwSUQZqP +JzLB5UkZv/HywouoCjkxKLR9YjYsTewfM7Z+d21+UPCfDtcRj88YxeMn/ibvBZ3P +zzfF0HvaO7AWhAw6k9a+F9sPPg4ZeAnHqQJyIkv3N3a6dcSFA1pj1bF1BcK5vZSt +jBWZp5N99sXzqnTPBIWUmAD04vnKJGW/4GKvyMX6ssmeVkjaef2WdhW+o45WxLM0 +/L5H9MG0qPzVMIho7suuyWPEdr6sOBjhXlzPrjoiUevRi7PzKzMHVIf6tLITe7pT +BGIBnfHAT+7hOtSLIBD6Alfm78ELt5BGnBkpjNxvoEppaZS3JGWg/6w/zgH7IS79 +aPib8qXPMThcFarmlwDB31qlpzmq6YR/PFGoOtmUW4y/Twhx5duoXNTSpv4Ao8YW +xw/ogM4cKGR0GQjTQuPOAF1/sdwTsOEFy9EgqoZ0njnnkf3/W9b3raYvAwtt41dU +63ZTGI0RmLo= +-----END CERTIFICATE----- + +# Issuer: CN=HARICA TLS ECC Root CA 2021 O=Hellenic Academic and Research Institutions CA +# Subject: CN=HARICA TLS ECC Root CA 2021 O=Hellenic Academic and Research Institutions CA +# Label: "HARICA TLS ECC Root CA 2021" +# Serial: 137515985548005187474074462014555733966 +# MD5 Fingerprint: ae:f7:4c:e5:66:35:d1:b7:9b:8c:22:93:74:d3:4b:b0 +# SHA1 Fingerprint: bc:b0:c1:9d:e9:98:92:70:19:38:57:e9:8d:a7:b4:5d:6e:ee:01:48 +# SHA256 Fingerprint: 3f:99:cc:47:4a:cf:ce:4d:fe:d5:87:94:66:5e:47:8d:15:47:73:9f:2e:78:0f:1b:b4:ca:9b:13:30:97:d4:01 +-----BEGIN CERTIFICATE----- +MIICVDCCAdugAwIBAgIQZ3SdjXfYO2rbIvT/WeK/zjAKBggqhkjOPQQDAzBsMQsw +CQYDVQQGEwJHUjE3MDUGA1UECgwuSGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJlc2Vh +cmNoIEluc3RpdHV0aW9ucyBDQTEkMCIGA1UEAwwbSEFSSUNBIFRMUyBFQ0MgUm9v +dCBDQSAyMDIxMB4XDTIxMDIxOTExMDExMFoXDTQ1MDIxMzExMDEwOVowbDELMAkG +A1UEBhMCR1IxNzA1BgNVBAoMLkhlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJj +aCBJbnN0aXR1dGlvbnMgQ0ExJDAiBgNVBAMMG0hBUklDQSBUTFMgRUNDIFJvb3Qg +Q0EgMjAyMTB2MBAGByqGSM49AgEGBSuBBAAiA2IABDgI/rGgltJ6rK9JOtDA4MM7 +KKrxcm1lAEeIhPyaJmuqS7psBAqIXhfyVYf8MLA04jRYVxqEU+kw2anylnTDUR9Y +STHMmE5gEYd103KUkE+bECUqqHgtvpBBWJAVcqeht6NCMEAwDwYDVR0TAQH/BAUw +AwEB/zAdBgNVHQ4EFgQUyRtTgRL+BNUW0aq8mm+3oJUZbsowDgYDVR0PAQH/BAQD +AgGGMAoGCCqGSM49BAMDA2cAMGQCMBHervjcToiwqfAircJRQO9gcS3ujwLEXQNw +SaSS6sUUiHCm0w2wqsosQJz76YJumgIwK0eaB8bRwoF8yguWGEEbo/QwCZ61IygN +nxS2PFOiTAZpffpskcYqSUXm7LcT4Tps +-----END CERTIFICATE----- + +# Issuer: CN=Autoridad de Certificacion Firmaprofesional CIF A62634068 +# Subject: CN=Autoridad de Certificacion Firmaprofesional CIF A62634068 +# Label: "Autoridad de Certificacion Firmaprofesional CIF A62634068" +# Serial: 1977337328857672817 +# MD5 Fingerprint: 4e:6e:9b:54:4c:ca:b7:fa:48:e4:90:b1:15:4b:1c:a3 +# SHA1 Fingerprint: 0b:be:c2:27:22:49:cb:39:aa:db:35:5c:53:e3:8c:ae:78:ff:b6:fe +# SHA256 Fingerprint: 57:de:05:83:ef:d2:b2:6e:03:61:da:99:da:9d:f4:64:8d:ef:7e:e8:44:1c:3b:72:8a:fa:9b:cd:e0:f9:b2:6a +-----BEGIN CERTIFICATE----- +MIIGFDCCA/ygAwIBAgIIG3Dp0v+ubHEwDQYJKoZIhvcNAQELBQAwUTELMAkGA1UE +BhMCRVMxQjBABgNVBAMMOUF1dG9yaWRhZCBkZSBDZXJ0aWZpY2FjaW9uIEZpcm1h +cHJvZmVzaW9uYWwgQ0lGIEE2MjYzNDA2ODAeFw0xNDA5MjMxNTIyMDdaFw0zNjA1 +MDUxNTIyMDdaMFExCzAJBgNVBAYTAkVTMUIwQAYDVQQDDDlBdXRvcmlkYWQgZGUg +Q2VydGlmaWNhY2lvbiBGaXJtYXByb2Zlc2lvbmFsIENJRiBBNjI2MzQwNjgwggIi +MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDKlmuO6vj78aI14H9M2uDDUtd9 +thDIAl6zQyrET2qyyhxdKJp4ERppWVevtSBC5IsP5t9bpgOSL/UR5GLXMnE42QQM +cas9UX4PB99jBVzpv5RvwSmCwLTaUbDBPLutN0pcyvFLNg4kq7/DhHf9qFD0sefG +L9ItWY16Ck6WaVICqjaY7Pz6FIMMNx/Jkjd/14Et5cS54D40/mf0PmbR0/RAz15i +NA9wBj4gGFrO93IbJWyTdBSTo3OxDqqHECNZXyAFGUftaI6SEspd/NYrspI8IM/h +X68gvqB2f3bl7BqGYTM+53u0P6APjqK5am+5hyZvQWyIplD9amML9ZMWGxmPsu2b +m8mQ9QEM3xk9Dz44I8kvjwzRAv4bVdZO0I08r0+k8/6vKtMFnXkIoctXMbScyJCy +Z/QYFpM6/EfY0XiWMR+6KwxfXZmtY4laJCB22N/9q06mIqqdXuYnin1oKaPnirja +EbsXLZmdEyRG98Xi2J+Of8ePdG1asuhy9azuJBCtLxTa/y2aRnFHvkLfuwHb9H/T +KI8xWVvTyQKmtFLKbpf7Q8UIJm+K9Lv9nyiqDdVF8xM6HdjAeI9BZzwelGSuewvF +6NkBiDkal4ZkQdU7hwxu+g/GvUgUvzlN1J5Bto+WHWOWk9mVBngxaJ43BjuAiUVh +OSPHG0SjFeUc+JIwuwIDAQABo4HvMIHsMB0GA1UdDgQWBBRlzeurNR4APn7VdMAc +tHNHDhpkLzASBgNVHRMBAf8ECDAGAQH/AgEBMIGmBgNVHSAEgZ4wgZswgZgGBFUd +IAAwgY8wLwYIKwYBBQUHAgEWI2h0dHA6Ly93d3cuZmlybWFwcm9mZXNpb25hbC5j +b20vY3BzMFwGCCsGAQUFBwICMFAeTgBQAGEAcwBlAG8AIABkAGUAIABsAGEAIABC +AG8AbgBhAG4AbwB2AGEAIAA0ADcAIABCAGEAcgBjAGUAbABvAG4AYQAgADAAOAAw +ADEANzAOBgNVHQ8BAf8EBAMCAQYwDQYJKoZIhvcNAQELBQADggIBAHSHKAIrdx9m +iWTtj3QuRhy7qPj4Cx2Dtjqn6EWKB7fgPiDL4QjbEwj4KKE1soCzC1HA01aajTNF +Sa9J8OA9B3pFE1r/yJfY0xgsfZb43aJlQ3CTkBW6kN/oGbDbLIpgD7dvlAceHabJ +hfa9NPhAeGIQcDq+fUs5gakQ1JZBu/hfHAsdCPKxsIl68veg4MSPi3i1O1ilI45P +Vf42O+AMt8oqMEEgtIDNrvx2ZnOorm7hfNoD6JQg5iKj0B+QXSBTFCZX2lSX3xZE +EAEeiGaPcjiT3SC3NL7X8e5jjkd5KAb881lFJWAiMxujX6i6KtoaPc1A6ozuBRWV +1aUsIC+nmCjuRfzxuIgALI9C2lHVnOUTaHFFQ4ueCyE8S1wF3BqfmI7avSKecs2t +CsvMo2ebKHTEm9caPARYpoKdrcd7b/+Alun4jWq9GJAd/0kakFI3ky88Al2CdgtR +5xbHV/g4+afNmyJU72OwFW1TZQNKXkqgsqeOSQBZONXH9IBk9W6VULgRfhVwOEqw +f9DEMnDAGf/JOC0ULGb0QkTmVXYbgBVX/8Cnp6o5qtjTcNAuuuuUavpfNIbnYrX9 +ivAwhZTJryQCL2/W3Wf+47BVTwSYT6RBVuKT0Gro1vP7ZeDOdcQxWQzugsgMYDNK +GbqEZycPvEJdvSRUDewdcAZfpLz6IHxV +-----END CERTIFICATE----- + +# Issuer: CN=vTrus ECC Root CA O=iTrusChina Co.,Ltd. +# Subject: CN=vTrus ECC Root CA O=iTrusChina Co.,Ltd. +# Label: "vTrus ECC Root CA" +# Serial: 630369271402956006249506845124680065938238527194 +# MD5 Fingerprint: de:4b:c1:f5:52:8c:9b:43:e1:3e:8f:55:54:17:8d:85 +# SHA1 Fingerprint: f6:9c:db:b0:fc:f6:02:13:b6:52:32:a6:a3:91:3f:16:70:da:c3:e1 +# SHA256 Fingerprint: 30:fb:ba:2c:32:23:8e:2a:98:54:7a:f9:79:31:e5:50:42:8b:9b:3f:1c:8e:eb:66:33:dc:fa:86:c5:b2:7d:d3 +-----BEGIN CERTIFICATE----- +MIICDzCCAZWgAwIBAgIUbmq8WapTvpg5Z6LSa6Q75m0c1towCgYIKoZIzj0EAwMw +RzELMAkGA1UEBhMCQ04xHDAaBgNVBAoTE2lUcnVzQ2hpbmEgQ28uLEx0ZC4xGjAY +BgNVBAMTEXZUcnVzIEVDQyBSb290IENBMB4XDTE4MDczMTA3MjY0NFoXDTQzMDcz +MTA3MjY0NFowRzELMAkGA1UEBhMCQ04xHDAaBgNVBAoTE2lUcnVzQ2hpbmEgQ28u +LEx0ZC4xGjAYBgNVBAMTEXZUcnVzIEVDQyBSb290IENBMHYwEAYHKoZIzj0CAQYF +K4EEACIDYgAEZVBKrox5lkqqHAjDo6LN/llWQXf9JpRCux3NCNtzslt188+cToL0 +v/hhJoVs1oVbcnDS/dtitN9Ti72xRFhiQgnH+n9bEOf+QP3A2MMrMudwpremIFUd +e4BdS49nTPEQo0IwQDAdBgNVHQ4EFgQUmDnNvtiyjPeyq+GtJK97fKHbH88wDwYD +VR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwCgYIKoZIzj0EAwMDaAAwZQIw +V53dVvHH4+m4SVBrm2nDb+zDfSXkV5UTQJtS0zvzQBm8JsctBp61ezaf9SXUY2sA +AjEA6dPGnlaaKsyh2j/IZivTWJwghfqrkYpwcBE4YGQLYgmRWAD5Tfs0aNoJrSEG +GJTO +-----END CERTIFICATE----- + +# Issuer: CN=vTrus Root CA O=iTrusChina Co.,Ltd. +# Subject: CN=vTrus Root CA O=iTrusChina Co.,Ltd. +# Label: "vTrus Root CA" +# Serial: 387574501246983434957692974888460947164905180485 +# MD5 Fingerprint: b8:c9:37:df:fa:6b:31:84:64:c5:ea:11:6a:1b:75:fc +# SHA1 Fingerprint: 84:1a:69:fb:f5:cd:1a:25:34:13:3d:e3:f8:fc:b8:99:d0:c9:14:b7 +# SHA256 Fingerprint: 8a:71:de:65:59:33:6f:42:6c:26:e5:38:80:d0:0d:88:a1:8d:a4:c6:a9:1f:0d:cb:61:94:e2:06:c5:c9:63:87 +-----BEGIN CERTIFICATE----- +MIIFVjCCAz6gAwIBAgIUQ+NxE9izWRRdt86M/TX9b7wFjUUwDQYJKoZIhvcNAQEL +BQAwQzELMAkGA1UEBhMCQ04xHDAaBgNVBAoTE2lUcnVzQ2hpbmEgQ28uLEx0ZC4x +FjAUBgNVBAMTDXZUcnVzIFJvb3QgQ0EwHhcNMTgwNzMxMDcyNDA1WhcNNDMwNzMx +MDcyNDA1WjBDMQswCQYDVQQGEwJDTjEcMBoGA1UEChMTaVRydXNDaGluYSBDby4s +THRkLjEWMBQGA1UEAxMNdlRydXMgUm9vdCBDQTCCAiIwDQYJKoZIhvcNAQEBBQAD +ggIPADCCAgoCggIBAL1VfGHTuB0EYgWgrmy3cLRB6ksDXhA/kFocizuwZotsSKYc +IrrVQJLuM7IjWcmOvFjai57QGfIvWcaMY1q6n6MLsLOaXLoRuBLpDLvPbmyAhykU +AyyNJJrIZIO1aqwTLDPxn9wsYTwaP3BVm60AUn/PBLn+NvqcwBauYv6WTEN+VRS+ +GrPSbcKvdmaVayqwlHeFXgQPYh1jdfdr58tbmnDsPmcF8P4HCIDPKNsFxhQnL4Z9 +8Cfe/+Z+M0jnCx5Y0ScrUw5XSmXX+6KAYPxMvDVTAWqXcoKv8R1w6Jz1717CbMdH +flqUhSZNO7rrTOiwCcJlwp2dCZtOtZcFrPUGoPc2BX70kLJrxLT5ZOrpGgrIDajt +J8nU57O5q4IikCc9Kuh8kO+8T/3iCiSn3mUkpF3qwHYw03dQ+A0Em5Q2AXPKBlim +0zvc+gRGE1WKyURHuFE5Gi7oNOJ5y1lKCn+8pu8fA2dqWSslYpPZUxlmPCdiKYZN +pGvu/9ROutW04o5IWgAZCfEF2c6Rsffr6TlP9m8EQ5pV9T4FFL2/s1m02I4zhKOQ +UqqzApVg+QxMaPnu1RcN+HFXtSXkKe5lXa/R7jwXC1pDxaWG6iSe4gUH3DRCEpHW +OXSuTEGC2/KmSNGzm/MzqvOmwMVO9fSddmPmAsYiS8GVP1BkLFTltvA8Kc9XAgMB +AAGjQjBAMB0GA1UdDgQWBBRUYnBj8XWEQ1iO0RYgscasGrz2iTAPBgNVHRMBAf8E +BTADAQH/MA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAgEAKbqSSaet +8PFww+SX8J+pJdVrnjT+5hpk9jprUrIQeBqfTNqK2uwcN1LgQkv7bHbKJAs5EhWd +nxEt/Hlk3ODg9d3gV8mlsnZwUKT+twpw1aA08XXXTUm6EdGz2OyC/+sOxL9kLX1j +bhd47F18iMjrjld22VkE+rxSH0Ws8HqA7Oxvdq6R2xCOBNyS36D25q5J08FsEhvM +Kar5CKXiNxTKsbhm7xqC5PD48acWabfbqWE8n/Uxy+QARsIvdLGx14HuqCaVvIiv +TDUHKgLKeBRtRytAVunLKmChZwOgzoy8sHJnxDHO2zTlJQNgJXtxmOTAGytfdELS +S8VZCAeHvsXDf+eW2eHcKJfWjwXj9ZtOyh1QRwVTsMo554WgicEFOwE30z9J4nfr +I8iIZjs9OXYhRvHsXyO466JmdXTBQPfYaJqT4i2pLr0cox7IdMakLXogqzu4sEb9 +b91fUlV1YvCXoHzXOP0l382gmxDPi7g4Xl7FtKYCNqEeXxzP4padKar9mK5S4fNB +UvupLnKWnyfjqnN9+BojZns7q2WwMgFLFT49ok8MKzWixtlnEjUwzXYuFrOZnk1P +Ti07NEPhmg4NpGaXutIcSkwsKouLgU9xGqndXHt7CMUADTdA43x7VF8vhV929ven +sBxXVsFy6K2ir40zSbofitzmdHxghm+Hl3s= +-----END CERTIFICATE----- + +# Issuer: CN=ISRG Root X2 O=Internet Security Research Group +# Subject: CN=ISRG Root X2 O=Internet Security Research Group +# Label: "ISRG Root X2" +# Serial: 87493402998870891108772069816698636114 +# MD5 Fingerprint: d3:9e:c4:1e:23:3c:a6:df:cf:a3:7e:6d:e0:14:e6:e5 +# SHA1 Fingerprint: bd:b1:b9:3c:d5:97:8d:45:c6:26:14:55:f8:db:95:c7:5a:d1:53:af +# SHA256 Fingerprint: 69:72:9b:8e:15:a8:6e:fc:17:7a:57:af:b7:17:1d:fc:64:ad:d2:8c:2f:ca:8c:f1:50:7e:34:45:3c:cb:14:70 +-----BEGIN CERTIFICATE----- +MIICGzCCAaGgAwIBAgIQQdKd0XLq7qeAwSxs6S+HUjAKBggqhkjOPQQDAzBPMQsw +CQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJuZXQgU2VjdXJpdHkgUmVzZWFyY2gg +R3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBYMjAeFw0yMDA5MDQwMDAwMDBaFw00 +MDA5MTcxNjAwMDBaME8xCzAJBgNVBAYTAlVTMSkwJwYDVQQKEyBJbnRlcm5ldCBT +ZWN1cml0eSBSZXNlYXJjaCBHcm91cDEVMBMGA1UEAxMMSVNSRyBSb290IFgyMHYw +EAYHKoZIzj0CAQYFK4EEACIDYgAEzZvVn4CDCuwJSvMWSj5cz3es3mcFDR0HttwW ++1qLFNvicWDEukWVEYmO6gbf9yoWHKS5xcUy4APgHoIYOIvXRdgKam7mAHf7AlF9 +ItgKbppbd9/w+kHsOdx1ymgHDB/qo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0T +AQH/BAUwAwEB/zAdBgNVHQ4EFgQUfEKWrt5LSDv6kviejM9ti6lyN5UwCgYIKoZI +zj0EAwMDaAAwZQIwe3lORlCEwkSHRhtFcP9Ymd70/aTSVaYgLXTWNLxBo1BfASdW +tL4ndQavEi51mI38AjEAi/V3bNTIZargCyzuFJ0nN6T5U6VR5CmD1/iQMVtCnwr1 +/q4AaOeMSQ+2b1tbFfLn +-----END CERTIFICATE----- + +# Issuer: CN=HiPKI Root CA - G1 O=Chunghwa Telecom Co., Ltd. +# Subject: CN=HiPKI Root CA - G1 O=Chunghwa Telecom Co., Ltd. +# Label: "HiPKI Root CA - G1" +# Serial: 60966262342023497858655262305426234976 +# MD5 Fingerprint: 69:45:df:16:65:4b:e8:68:9a:8f:76:5f:ff:80:9e:d3 +# SHA1 Fingerprint: 6a:92:e4:a8:ee:1b:ec:96:45:37:e3:29:57:49:cd:96:e3:e5:d2:60 +# SHA256 Fingerprint: f0:15:ce:3c:c2:39:bf:ef:06:4b:e9:f1:d2:c4:17:e1:a0:26:4a:0a:94:be:1f:0c:8d:12:18:64:eb:69:49:cc +-----BEGIN CERTIFICATE----- +MIIFajCCA1KgAwIBAgIQLd2szmKXlKFD6LDNdmpeYDANBgkqhkiG9w0BAQsFADBP +MQswCQYDVQQGEwJUVzEjMCEGA1UECgwaQ2h1bmdod2EgVGVsZWNvbSBDby4sIEx0 +ZC4xGzAZBgNVBAMMEkhpUEtJIFJvb3QgQ0EgLSBHMTAeFw0xOTAyMjIwOTQ2MDRa +Fw0zNzEyMzExNTU5NTlaME8xCzAJBgNVBAYTAlRXMSMwIQYDVQQKDBpDaHVuZ2h3 +YSBUZWxlY29tIENvLiwgTHRkLjEbMBkGA1UEAwwSSGlQS0kgUm9vdCBDQSAtIEcx +MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA9B5/UnMyDHPkvRN0o9Qw +qNCuS9i233VHZvR85zkEHmpwINJaR3JnVfSl6J3VHiGh8Ge6zCFovkRTv4354twv +Vcg3Px+kwJyz5HdcoEb+d/oaoDjq7Zpy3iu9lFc6uux55199QmQ5eiY29yTw1S+6 +lZgRZq2XNdZ1AYDgr/SEYYwNHl98h5ZeQa/rh+r4XfEuiAU+TCK72h8q3VJGZDnz +Qs7ZngyzsHeXZJzA9KMuH5UHsBffMNsAGJZMoYFL3QRtU6M9/Aes1MU3guvklQgZ +KILSQjqj2FPseYlgSGDIcpJQ3AOPgz+yQlda22rpEZfdhSi8MEyr48KxRURHH+CK +FgeW0iEPU8DtqX7UTuybCeyvQqww1r/REEXgphaypcXTT3OUM3ECoWqj1jOXTyFj +HluP2cFeRXF3D4FdXyGarYPM+l7WjSNfGz1BryB1ZlpK9p/7qxj3ccC2HTHsOyDr +y+K49a6SsvfhhEvyovKTmiKe0xRvNlS9H15ZFblzqMF8b3ti6RZsR1pl8w4Rm0bZ +/W3c1pzAtH2lsN0/Vm+h+fbkEkj9Bn8SV7apI09bA8PgcSojt/ewsTu8mL3WmKgM +a/aOEmem8rJY5AIJEzypuxC00jBF8ez3ABHfZfjcK0NVvxaXxA/VLGGEqnKG/uY6 +fsI/fe78LxQ+5oXdUG+3Se0CAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAdBgNV +HQ4EFgQU8ncX+l6o/vY9cdVouslGDDjYr7AwDgYDVR0PAQH/BAQDAgGGMA0GCSqG +SIb3DQEBCwUAA4ICAQBQUfB13HAE4/+qddRxosuej6ip0691x1TPOhwEmSKsxBHi +7zNKpiMdDg1H2DfHb680f0+BazVP6XKlMeJ45/dOlBhbQH3PayFUhuaVevvGyuqc +SE5XCV0vrPSltJczWNWseanMX/mF+lLFjfiRFOs6DRfQUsJ748JzjkZ4Bjgs6Fza +ZsT0pPBWGTMpWmWSBUdGSquEwx4noR8RkpkndZMPvDY7l1ePJlsMu5wP1G4wB9Tc +XzZoZjmDlicmisjEOf6aIW/Vcobpf2Lll07QJNBAsNB1CI69aO4I1258EHBGG3zg +iLKecoaZAeO/n0kZtCW+VmWuF2PlHt/o/0elv+EmBYTksMCv5wiZqAxeJoBF1Pho +L5aPruJKHJwWDBNvOIf2u8g0X5IDUXlwpt/L9ZlNec1OvFefQ05rLisY+GpzjLrF +Ne85akEez3GoorKGB1s6yeHvP2UEgEcyRHCVTjFnanRbEEV16rCf0OY1/k6fi8wr +kkVbbiVghUbN0aqwdmaTd5a+g744tiROJgvM7XpWGuDpWsZkrUx6AEhEL7lAuxM+ +vhV4nYWBSipX3tUZQ9rbyltHhoMLP7YNdnhzeSJesYAfz77RP1YQmCuVh6EfnWQU +YDksswBVLuT1sw5XxJFBAJw/6KXf6vb/yPCtbVKoF6ubYfwSUTXkJf2vqmqGOQ== +-----END CERTIFICATE----- + +# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R4 +# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R4 +# Label: "GlobalSign ECC Root CA - R4" +# Serial: 159662223612894884239637590694 +# MD5 Fingerprint: 26:29:f8:6d:e1:88:bf:a2:65:7f:aa:c4:cd:0f:7f:fc +# SHA1 Fingerprint: 6b:a0:b0:98:e1:71:ef:5a:ad:fe:48:15:80:77:10:f4:bd:6f:0b:28 +# SHA256 Fingerprint: b0:85:d7:0b:96:4f:19:1a:73:e4:af:0d:54:ae:7a:0e:07:aa:fd:af:9b:71:dd:08:62:13:8a:b7:32:5a:24:a2 +-----BEGIN CERTIFICATE----- +MIIB3DCCAYOgAwIBAgINAgPlfvU/k/2lCSGypjAKBggqhkjOPQQDAjBQMSQwIgYD +VQQLExtHbG9iYWxTaWduIEVDQyBSb290IENBIC0gUjQxEzARBgNVBAoTCkdsb2Jh +bFNpZ24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMTIxMTEzMDAwMDAwWhcNMzgw +MTE5MDMxNDA3WjBQMSQwIgYDVQQLExtHbG9iYWxTaWduIEVDQyBSb290IENBIC0g +UjQxEzARBgNVBAoTCkdsb2JhbFNpZ24xEzARBgNVBAMTCkdsb2JhbFNpZ24wWTAT +BgcqhkjOPQIBBggqhkjOPQMBBwNCAAS4xnnTj2wlDp8uORkcA6SumuU5BwkWymOx +uYb4ilfBV85C+nOh92VC/x7BALJucw7/xyHlGKSq2XE/qNS5zowdo0IwQDAOBgNV +HQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUVLB7rUW44kB/ ++wpu+74zyTyjhNUwCgYIKoZIzj0EAwIDRwAwRAIgIk90crlgr/HmnKAWBVBfw147 +bmF0774BxL4YSFlhgjICICadVGNA3jdgUM/I2O2dgq43mLyjj0xMqTQrbO/7lZsm +-----END CERTIFICATE----- + +# Issuer: CN=GTS Root R1 O=Google Trust Services LLC +# Subject: CN=GTS Root R1 O=Google Trust Services LLC +# Label: "GTS Root R1" +# Serial: 159662320309726417404178440727 +# MD5 Fingerprint: 05:fe:d0:bf:71:a8:a3:76:63:da:01:e0:d8:52:dc:40 +# SHA1 Fingerprint: e5:8c:1c:c4:91:3b:38:63:4b:e9:10:6e:e3:ad:8e:6b:9d:d9:81:4a +# SHA256 Fingerprint: d9:47:43:2a:bd:e7:b7:fa:90:fc:2e:6b:59:10:1b:12:80:e0:e1:c7:e4:e4:0f:a3:c6:88:7f:ff:57:a7:f4:cf +-----BEGIN CERTIFICATE----- +MIIFVzCCAz+gAwIBAgINAgPlk28xsBNJiGuiFzANBgkqhkiG9w0BAQwFADBHMQsw +CQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEU +MBIGA1UEAxMLR1RTIFJvb3QgUjEwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAw +MDAwWjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZp +Y2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjEwggIiMA0GCSqGSIb3DQEBAQUA +A4ICDwAwggIKAoICAQC2EQKLHuOhd5s73L+UPreVp0A8of2C+X0yBoJx9vaMf/vo +27xqLpeXo4xL+Sv2sfnOhB2x+cWX3u+58qPpvBKJXqeqUqv4IyfLpLGcY9vXmX7w +Cl7raKb0xlpHDU0QM+NOsROjyBhsS+z8CZDfnWQpJSMHobTSPS5g4M/SCYe7zUjw +TcLCeoiKu7rPWRnWr4+wB7CeMfGCwcDfLqZtbBkOtdh+JhpFAz2weaSUKK0Pfybl +qAj+lug8aJRT7oM6iCsVlgmy4HqMLnXWnOunVmSPlk9orj2XwoSPwLxAwAtcvfaH +szVsrBhQf4TgTM2S0yDpM7xSma8ytSmzJSq0SPly4cpk9+aCEI3oncKKiPo4Zor8 +Y/kB+Xj9e1x3+naH+uzfsQ55lVe0vSbv1gHR6xYKu44LtcXFilWr06zqkUspzBmk +MiVOKvFlRNACzqrOSbTqn3yDsEB750Orp2yjj32JgfpMpf/VjsPOS+C12LOORc92 +wO1AK/1TD7Cn1TsNsYqiA94xrcx36m97PtbfkSIS5r762DL8EGMUUXLeXdYWk70p +aDPvOmbsB4om3xPXV2V4J95eSRQAogB/mqghtqmxlbCluQ0WEdrHbEg8QOB+DVrN +VjzRlwW5y0vtOUucxD/SVRNuJLDWcfr0wbrM7Rv1/oFB2ACYPTrIrnqYNxgFlQID +AQABo0IwQDAOBgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4E +FgQU5K8rJnEaK0gnhS9SZizv8IkTcT4wDQYJKoZIhvcNAQEMBQADggIBAJ+qQibb +C5u+/x6Wki4+omVKapi6Ist9wTrYggoGxval3sBOh2Z5ofmmWJyq+bXmYOfg6LEe +QkEzCzc9zolwFcq1JKjPa7XSQCGYzyI0zzvFIoTgxQ6KfF2I5DUkzps+GlQebtuy +h6f88/qBVRRiClmpIgUxPoLW7ttXNLwzldMXG+gnoot7TiYaelpkttGsN/H9oPM4 +7HLwEXWdyzRSjeZ2axfG34arJ45JK3VmgRAhpuo+9K4l/3wV3s6MJT/KYnAK9y8J +ZgfIPxz88NtFMN9iiMG1D53Dn0reWVlHxYciNuaCp+0KueIHoI17eko8cdLiA6Ef +MgfdG+RCzgwARWGAtQsgWSl4vflVy2PFPEz0tv/bal8xa5meLMFrUKTX5hgUvYU/ +Z6tGn6D/Qqc6f1zLXbBwHSs09dR2CQzreExZBfMzQsNhFRAbd03OIozUhfJFfbdT +6u9AWpQKXCBfTkBdYiJ23//OYb2MI3jSNwLgjt7RETeJ9r/tSQdirpLsQBqvFAnZ +0E6yove+7u7Y/9waLd64NnHi/Hm3lCXRSHNboTXns5lndcEZOitHTtNCjv0xyBZm +2tIMPNuzjsmhDYAPexZ3FL//2wmUspO8IFgV6dtxQ/PeEMMA3KgqlbbC1j+Qa3bb +bP6MvPJwNQzcmRk13NfIRmPVNnGuV/u3gm3c +-----END CERTIFICATE----- + +# Issuer: CN=GTS Root R2 O=Google Trust Services LLC +# Subject: CN=GTS Root R2 O=Google Trust Services LLC +# Label: "GTS Root R2" +# Serial: 159662449406622349769042896298 +# MD5 Fingerprint: 1e:39:c0:53:e6:1e:29:82:0b:ca:52:55:36:5d:57:dc +# SHA1 Fingerprint: 9a:44:49:76:32:db:de:fa:d0:bc:fb:5a:7b:17:bd:9e:56:09:24:94 +# SHA256 Fingerprint: 8d:25:cd:97:22:9d:bf:70:35:6b:da:4e:b3:cc:73:40:31:e2:4c:f0:0f:af:cf:d3:2d:c7:6e:b5:84:1c:7e:a8 +-----BEGIN CERTIFICATE----- +MIIFVzCCAz+gAwIBAgINAgPlrsWNBCUaqxElqjANBgkqhkiG9w0BAQwFADBHMQsw +CQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEU +MBIGA1UEAxMLR1RTIFJvb3QgUjIwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAw +MDAwWjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZp +Y2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjIwggIiMA0GCSqGSIb3DQEBAQUA +A4ICDwAwggIKAoICAQDO3v2m++zsFDQ8BwZabFn3GTXd98GdVarTzTukk3LvCvpt +nfbwhYBboUhSnznFt+4orO/LdmgUud+tAWyZH8QiHZ/+cnfgLFuv5AS/T3KgGjSY +6Dlo7JUle3ah5mm5hRm9iYz+re026nO8/4Piy33B0s5Ks40FnotJk9/BW9BuXvAu +MC6C/Pq8tBcKSOWIm8Wba96wyrQD8Nr0kLhlZPdcTK3ofmZemde4wj7I0BOdre7k +RXuJVfeKH2JShBKzwkCX44ofR5GmdFrS+LFjKBC4swm4VndAoiaYecb+3yXuPuWg +f9RhD1FLPD+M2uFwdNjCaKH5wQzpoeJ/u1U8dgbuak7MkogwTZq9TwtImoS1mKPV ++3PBV2HdKFZ1E66HjucMUQkQdYhMvI35ezzUIkgfKtzra7tEscszcTJGr61K8Yzo +dDqs5xoic4DSMPclQsciOzsSrZYuxsN2B6ogtzVJV+mSSeh2FnIxZyuWfoqjx5RW +Ir9qS34BIbIjMt/kmkRtWVtd9QCgHJvGeJeNkP+byKq0rxFROV7Z+2et1VsRnTKa +G73VululycslaVNVJ1zgyjbLiGH7HrfQy+4W+9OmTN6SpdTi3/UGVN4unUu0kzCq +gc7dGtxRcw1PcOnlthYhGXmy5okLdWTK1au8CcEYof/UVKGFPP0UJAOyh9OktwID +AQABo0IwQDAOBgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4E +FgQUu//KjiOfT5nK2+JopqUVJxce2Q4wDQYJKoZIhvcNAQEMBQADggIBAB/Kzt3H +vqGf2SdMC9wXmBFqiN495nFWcrKeGk6c1SuYJF2ba3uwM4IJvd8lRuqYnrYb/oM8 +0mJhwQTtzuDFycgTE1XnqGOtjHsB/ncw4c5omwX4Eu55MaBBRTUoCnGkJE+M3DyC +B19m3H0Q/gxhswWV7uGugQ+o+MePTagjAiZrHYNSVc61LwDKgEDg4XSsYPWHgJ2u +NmSRXbBoGOqKYcl3qJfEycel/FVL8/B/uWU9J2jQzGv6U53hkRrJXRqWbTKH7QMg +yALOWr7Z6v2yTcQvG99fevX4i8buMTolUVVnjWQye+mew4K6Ki3pHrTgSAai/Gev +HyICc/sgCq+dVEuhzf9gR7A/Xe8bVr2XIZYtCtFenTgCR2y59PYjJbigapordwj6 +xLEokCZYCDzifqrXPW+6MYgKBesntaFJ7qBFVHvmJ2WZICGoo7z7GJa7Um8M7YNR +TOlZ4iBgxcJlkoKM8xAfDoqXvneCbT+PHV28SSe9zE8P4c52hgQjxcCMElv924Sg +JPFI/2R80L5cFtHvma3AH/vLrrw4IgYmZNralw4/KBVEqE8AyvCazM90arQ+POuV +7LXTWtiBmelDGDfrs7vRWGJB82bSj6p4lVQgw1oudCvV0b4YacCs1aTPObpRhANl +6WLAYv7YTVWW4tAR+kg0Eeye7QUd5MjWHYbL +-----END CERTIFICATE----- + +# Issuer: CN=GTS Root R3 O=Google Trust Services LLC +# Subject: CN=GTS Root R3 O=Google Trust Services LLC +# Label: "GTS Root R3" +# Serial: 159662495401136852707857743206 +# MD5 Fingerprint: 3e:e7:9d:58:02:94:46:51:94:e5:e0:22:4a:8b:e7:73 +# SHA1 Fingerprint: ed:e5:71:80:2b:c8:92:b9:5b:83:3c:d2:32:68:3f:09:cd:a0:1e:46 +# SHA256 Fingerprint: 34:d8:a7:3e:e2:08:d9:bc:db:0d:95:65:20:93:4b:4e:40:e6:94:82:59:6e:8b:6f:73:c8:42:6b:01:0a:6f:48 +-----BEGIN CERTIFICATE----- +MIICCTCCAY6gAwIBAgINAgPluILrIPglJ209ZjAKBggqhkjOPQQDAzBHMQswCQYD +VQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEUMBIG +A1UEAxMLR1RTIFJvb3QgUjMwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAwMDAw +WjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2Vz +IExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjMwdjAQBgcqhkjOPQIBBgUrgQQAIgNi +AAQfTzOHMymKoYTey8chWEGJ6ladK0uFxh1MJ7x/JlFyb+Kf1qPKzEUURout736G +jOyxfi//qXGdGIRFBEFVbivqJn+7kAHjSxm65FSWRQmx1WyRRK2EE46ajA2ADDL2 +4CejQjBAMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW +BBTB8Sa6oC2uhYHP0/EqEr24Cmf9vDAKBggqhkjOPQQDAwNpADBmAjEA9uEglRR7 +VKOQFhG/hMjqb2sXnh5GmCCbn9MN2azTL818+FsuVbu/3ZL3pAzcMeGiAjEA/Jdm +ZuVDFhOD3cffL74UOO0BzrEXGhF16b0DjyZ+hOXJYKaV11RZt+cRLInUue4X +-----END CERTIFICATE----- + +# Issuer: CN=GTS Root R4 O=Google Trust Services LLC +# Subject: CN=GTS Root R4 O=Google Trust Services LLC +# Label: "GTS Root R4" +# Serial: 159662532700760215368942768210 +# MD5 Fingerprint: 43:96:83:77:19:4d:76:b3:9d:65:52:e4:1d:22:a5:e8 +# SHA1 Fingerprint: 77:d3:03:67:b5:e0:0c:15:f6:0c:38:61:df:7c:e1:3b:92:46:4d:47 +# SHA256 Fingerprint: 34:9d:fa:40:58:c5:e2:63:12:3b:39:8a:e7:95:57:3c:4e:13:13:c8:3f:e6:8f:93:55:6c:d5:e8:03:1b:3c:7d +-----BEGIN CERTIFICATE----- +MIICCTCCAY6gAwIBAgINAgPlwGjvYxqccpBQUjAKBggqhkjOPQQDAzBHMQswCQYD +VQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEUMBIG +A1UEAxMLR1RTIFJvb3QgUjQwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAwMDAw +WjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2Vz +IExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjQwdjAQBgcqhkjOPQIBBgUrgQQAIgNi +AATzdHOnaItgrkO4NcWBMHtLSZ37wWHO5t5GvWvVYRg1rkDdc/eJkTBa6zzuhXyi +QHY7qca4R9gq55KRanPpsXI5nymfopjTX15YhmUPoYRlBtHci8nHc8iMai/lxKvR +HYqjQjBAMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW +BBSATNbrdP9JNqPV2Py1PsVq8JQdjDAKBggqhkjOPQQDAwNpADBmAjEA6ED/g94D +9J+uHXqnLrmvT/aDHQ4thQEd0dlq7A/Cr8deVl5c1RxYIigL9zC2L7F8AjEA8GE8 +p/SgguMh1YQdc4acLa/KNJvxn7kjNuK8YAOdgLOaVsjh4rsUecrNIdSUtUlD +-----END CERTIFICATE----- + +# Issuer: CN=Telia Root CA v2 O=Telia Finland Oyj +# Subject: CN=Telia Root CA v2 O=Telia Finland Oyj +# Label: "Telia Root CA v2" +# Serial: 7288924052977061235122729490515358 +# MD5 Fingerprint: 0e:8f:ac:aa:82:df:85:b1:f4:dc:10:1c:fc:99:d9:48 +# SHA1 Fingerprint: b9:99:cd:d1:73:50:8a:c4:47:05:08:9c:8c:88:fb:be:a0:2b:40:cd +# SHA256 Fingerprint: 24:2b:69:74:2f:cb:1e:5b:2a:bf:98:89:8b:94:57:21:87:54:4e:5b:4d:99:11:78:65:73:62:1f:6a:74:b8:2c +-----BEGIN CERTIFICATE----- +MIIFdDCCA1ygAwIBAgIPAWdfJ9b+euPkrL4JWwWeMA0GCSqGSIb3DQEBCwUAMEQx +CzAJBgNVBAYTAkZJMRowGAYDVQQKDBFUZWxpYSBGaW5sYW5kIE95ajEZMBcGA1UE +AwwQVGVsaWEgUm9vdCBDQSB2MjAeFw0xODExMjkxMTU1NTRaFw00MzExMjkxMTU1 +NTRaMEQxCzAJBgNVBAYTAkZJMRowGAYDVQQKDBFUZWxpYSBGaW5sYW5kIE95ajEZ +MBcGA1UEAwwQVGVsaWEgUm9vdCBDQSB2MjCCAiIwDQYJKoZIhvcNAQEBBQADggIP +ADCCAgoCggIBALLQPwe84nvQa5n44ndp586dpAO8gm2h/oFlH0wnrI4AuhZ76zBq +AMCzdGh+sq/H1WKzej9Qyow2RCRj0jbpDIX2Q3bVTKFgcmfiKDOlyzG4OiIjNLh9 +vVYiQJ3q9HsDrWj8soFPmNB06o3lfc1jw6P23pLCWBnglrvFxKk9pXSW/q/5iaq9 +lRdU2HhE8Qx3FZLgmEKnpNaqIJLNwaCzlrI6hEKNfdWV5Nbb6WLEWLN5xYzTNTOD +n3WhUidhOPFZPY5Q4L15POdslv5e2QJltI5c0BE0312/UqeBAMN/mUWZFdUXyApT +7GPzmX3MaRKGwhfwAZ6/hLzRUssbkmbOpFPlob/E2wnW5olWK8jjfN7j/4nlNW4o +6GwLI1GpJQXrSPjdscr6bAhR77cYbETKJuFzxokGgeWKrLDiKca5JLNrRBH0pUPC +TEPlcDaMtjNXepUugqD0XBCzYYP2AgWGLnwtbNwDRm41k9V6lS/eINhbfpSQBGq6 +WT0EBXWdN6IOLj3rwaRSg/7Qa9RmjtzG6RJOHSpXqhC8fF6CfaamyfItufUXJ63R +DolUK5X6wK0dmBR4M0KGCqlztft0DbcbMBnEWg4cJ7faGND/isgFuvGqHKI3t+ZI +pEYslOqodmJHixBTB0hXbOKSTbauBcvcwUpej6w9GU7C7WB1K9vBykLVAgMBAAGj +YzBhMB8GA1UdIwQYMBaAFHKs5DN5qkWH9v2sHZ7Wxy+G2CQ5MB0GA1UdDgQWBBRy +rOQzeapFh/b9rB2e1scvhtgkOTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUw +AwEB/zANBgkqhkiG9w0BAQsFAAOCAgEAoDtZpwmUPjaE0n4vOaWWl/oRrfxn83EJ +8rKJhGdEr7nv7ZbsnGTbMjBvZ5qsfl+yqwE2foH65IRe0qw24GtixX1LDoJt0nZi +0f6X+J8wfBj5tFJ3gh1229MdqfDBmgC9bXXYfef6xzijnHDoRnkDry5023X4blMM +A8iZGok1GTzTyVR8qPAs5m4HeW9q4ebqkYJpCh3DflminmtGFZhb069GHWLIzoBS +SRE/yQQSwxN8PzuKlts8oB4KtItUsiRnDe+Cy748fdHif64W1lZYudogsYMVoe+K +TTJvQS8TUoKU1xrBeKJR3Stwbbca+few4GeXVtt8YVMJAygCQMez2P2ccGrGKMOF +6eLtGpOg3kuYooQ+BXcBlj37tCAPnHICehIv1aO6UXivKitEZU61/Qrowc15h2Er +3oBXRb9n8ZuRXqWk7FlIEA04x7D6w0RtBPV4UBySllva9bguulvP5fBqnUsvWHMt +Ty3EHD70sz+rFQ47GUGKpMFXEmZxTPpT41frYpUJnlTd0cI8Vzy9OK2YZLe4A5pT +VmBds9hCG1xLEooc6+t9xnppxyd/pPiL8uSUZodL6ZQHCRJ5irLrdATczvREWeAW +ysUsWNc8e89ihmpQfTU2Zqf7N+cox9jQraVplI/owd8k+BsHMYeB2F326CjYSlKA +rBPuUBQemMc= +-----END CERTIFICATE----- + +# Issuer: CN=D-TRUST BR Root CA 1 2020 O=D-Trust GmbH +# Subject: CN=D-TRUST BR Root CA 1 2020 O=D-Trust GmbH +# Label: "D-TRUST BR Root CA 1 2020" +# Serial: 165870826978392376648679885835942448534 +# MD5 Fingerprint: b5:aa:4b:d5:ed:f7:e3:55:2e:8f:72:0a:f3:75:b8:ed +# SHA1 Fingerprint: 1f:5b:98:f0:e3:b5:f7:74:3c:ed:e6:b0:36:7d:32:cd:f4:09:41:67 +# SHA256 Fingerprint: e5:9a:aa:81:60:09:c2:2b:ff:5b:25:ba:d3:7d:f3:06:f0:49:79:7c:1f:81:d8:5a:b0:89:e6:57:bd:8f:00:44 +-----BEGIN CERTIFICATE----- +MIIC2zCCAmCgAwIBAgIQfMmPK4TX3+oPyWWa00tNljAKBggqhkjOPQQDAzBIMQsw +CQYDVQQGEwJERTEVMBMGA1UEChMMRC1UcnVzdCBHbWJIMSIwIAYDVQQDExlELVRS +VVNUIEJSIFJvb3QgQ0EgMSAyMDIwMB4XDTIwMDIxMTA5NDUwMFoXDTM1MDIxMTA5 +NDQ1OVowSDELMAkGA1UEBhMCREUxFTATBgNVBAoTDEQtVHJ1c3QgR21iSDEiMCAG +A1UEAxMZRC1UUlVTVCBCUiBSb290IENBIDEgMjAyMDB2MBAGByqGSM49AgEGBSuB +BAAiA2IABMbLxyjR+4T1mu9CFCDhQ2tuda38KwOE1HaTJddZO0Flax7mNCq7dPYS +zuht56vkPE4/RAiLzRZxy7+SmfSk1zxQVFKQhYN4lGdnoxwJGT11NIXe7WB9xwy0 +QVK5buXuQqOCAQ0wggEJMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFHOREKv/ +VbNafAkl1bK6CKBrqx9tMA4GA1UdDwEB/wQEAwIBBjCBxgYDVR0fBIG+MIG7MD6g +PKA6hjhodHRwOi8vY3JsLmQtdHJ1c3QubmV0L2NybC9kLXRydXN0X2JyX3Jvb3Rf +Y2FfMV8yMDIwLmNybDB5oHegdYZzbGRhcDovL2RpcmVjdG9yeS5kLXRydXN0Lm5l +dC9DTj1ELVRSVVNUJTIwQlIlMjBSb290JTIwQ0ElMjAxJTIwMjAyMCxPPUQtVHJ1 +c3QlMjBHbWJILEM9REU/Y2VydGlmaWNhdGVyZXZvY2F0aW9ubGlzdDAKBggqhkjO +PQQDAwNpADBmAjEAlJAtE/rhY/hhY+ithXhUkZy4kzg+GkHaQBZTQgjKL47xPoFW +wKrY7RjEsK70PvomAjEA8yjixtsrmfu3Ubgko6SUeho/5jbiA1czijDLgsfWFBHV +dWNbFJWcHwHP2NVypw87 +-----END CERTIFICATE----- + +# Issuer: CN=D-TRUST EV Root CA 1 2020 O=D-Trust GmbH +# Subject: CN=D-TRUST EV Root CA 1 2020 O=D-Trust GmbH +# Label: "D-TRUST EV Root CA 1 2020" +# Serial: 126288379621884218666039612629459926992 +# MD5 Fingerprint: 8c:2d:9d:70:9f:48:99:11:06:11:fb:e9:cb:30:c0:6e +# SHA1 Fingerprint: 61:db:8c:21:59:69:03:90:d8:7c:9c:12:86:54:cf:9d:3d:f4:dd:07 +# SHA256 Fingerprint: 08:17:0d:1a:a3:64:53:90:1a:2f:95:92:45:e3:47:db:0c:8d:37:ab:aa:bc:56:b8:1a:a1:00:dc:95:89:70:db +-----BEGIN CERTIFICATE----- +MIIC2zCCAmCgAwIBAgIQXwJB13qHfEwDo6yWjfv/0DAKBggqhkjOPQQDAzBIMQsw +CQYDVQQGEwJERTEVMBMGA1UEChMMRC1UcnVzdCBHbWJIMSIwIAYDVQQDExlELVRS +VVNUIEVWIFJvb3QgQ0EgMSAyMDIwMB4XDTIwMDIxMTEwMDAwMFoXDTM1MDIxMTA5 +NTk1OVowSDELMAkGA1UEBhMCREUxFTATBgNVBAoTDEQtVHJ1c3QgR21iSDEiMCAG +A1UEAxMZRC1UUlVTVCBFViBSb290IENBIDEgMjAyMDB2MBAGByqGSM49AgEGBSuB +BAAiA2IABPEL3YZDIBnfl4XoIkqbz52Yv7QFJsnL46bSj8WeeHsxiamJrSc8ZRCC +/N/DnU7wMyPE0jL1HLDfMxddxfCxivnvubcUyilKwg+pf3VlSSowZ/Rk99Yad9rD +wpdhQntJraOCAQ0wggEJMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFH8QARY3 +OqQo5FD4pPfsazK2/umLMA4GA1UdDwEB/wQEAwIBBjCBxgYDVR0fBIG+MIG7MD6g +PKA6hjhodHRwOi8vY3JsLmQtdHJ1c3QubmV0L2NybC9kLXRydXN0X2V2X3Jvb3Rf +Y2FfMV8yMDIwLmNybDB5oHegdYZzbGRhcDovL2RpcmVjdG9yeS5kLXRydXN0Lm5l +dC9DTj1ELVRSVVNUJTIwRVYlMjBSb290JTIwQ0ElMjAxJTIwMjAyMCxPPUQtVHJ1 +c3QlMjBHbWJILEM9REU/Y2VydGlmaWNhdGVyZXZvY2F0aW9ubGlzdDAKBggqhkjO +PQQDAwNpADBmAjEAyjzGKnXCXnViOTYAYFqLwZOZzNnbQTs7h5kXO9XMT8oi96CA +y/m0sRtW9XLS/BnRAjEAkfcwkz8QRitxpNA7RJvAKQIFskF3UfN5Wp6OFKBOQtJb +gfM0agPnIjhQW+0ZT0MW +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert TLS ECC P384 Root G5 O=DigiCert, Inc. +# Subject: CN=DigiCert TLS ECC P384 Root G5 O=DigiCert, Inc. +# Label: "DigiCert TLS ECC P384 Root G5" +# Serial: 13129116028163249804115411775095713523 +# MD5 Fingerprint: d3:71:04:6a:43:1c:db:a6:59:e1:a8:a3:aa:c5:71:ed +# SHA1 Fingerprint: 17:f3:de:5e:9f:0f:19:e9:8e:f6:1f:32:26:6e:20:c4:07:ae:30:ee +# SHA256 Fingerprint: 01:8e:13:f0:77:25:32:cf:80:9b:d1:b1:72:81:86:72:83:fc:48:c6:e1:3b:e9:c6:98:12:85:4a:49:0c:1b:05 +-----BEGIN CERTIFICATE----- +MIICGTCCAZ+gAwIBAgIQCeCTZaz32ci5PhwLBCou8zAKBggqhkjOPQQDAzBOMQsw +CQYDVQQGEwJVUzEXMBUGA1UEChMORGlnaUNlcnQsIEluYy4xJjAkBgNVBAMTHURp +Z2lDZXJ0IFRMUyBFQ0MgUDM4NCBSb290IEc1MB4XDTIxMDExNTAwMDAwMFoXDTQ2 +MDExNDIzNTk1OVowTjELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDkRpZ2lDZXJ0LCBJ +bmMuMSYwJAYDVQQDEx1EaWdpQ2VydCBUTFMgRUNDIFAzODQgUm9vdCBHNTB2MBAG +ByqGSM49AgEGBSuBBAAiA2IABMFEoc8Rl1Ca3iOCNQfN0MsYndLxf3c1TzvdlHJS +7cI7+Oz6e2tYIOyZrsn8aLN1udsJ7MgT9U7GCh1mMEy7H0cKPGEQQil8pQgO4CLp +0zVozptjn4S1mU1YoI71VOeVyaNCMEAwHQYDVR0OBBYEFMFRRVBZqz7nLFr6ICIS +B4CIfBFqMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MAoGCCqGSM49 +BAMDA2gAMGUCMQCJao1H5+z8blUD2WdsJk6Dxv3J+ysTvLd6jLRl0mlpYxNjOyZQ +LgGheQaRnUi/wr4CMEfDFXuxoJGZSZOoPHzoRgaLLPIxAJSdYsiJvRmEFOml+wG4 +DXZDjC5Ty3zfDBeWUA== +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert TLS RSA4096 Root G5 O=DigiCert, Inc. +# Subject: CN=DigiCert TLS RSA4096 Root G5 O=DigiCert, Inc. +# Label: "DigiCert TLS RSA4096 Root G5" +# Serial: 11930366277458970227240571539258396554 +# MD5 Fingerprint: ac:fe:f7:34:96:a9:f2:b3:b4:12:4b:e4:27:41:6f:e1 +# SHA1 Fingerprint: a7:88:49:dc:5d:7c:75:8c:8c:de:39:98:56:b3:aa:d0:b2:a5:71:35 +# SHA256 Fingerprint: 37:1a:00:dc:05:33:b3:72:1a:7e:eb:40:e8:41:9e:70:79:9d:2b:0a:0f:2c:1d:80:69:31:65:f7:ce:c4:ad:75 +-----BEGIN CERTIFICATE----- +MIIFZjCCA06gAwIBAgIQCPm0eKj6ftpqMzeJ3nzPijANBgkqhkiG9w0BAQwFADBN +MQswCQYDVQQGEwJVUzEXMBUGA1UEChMORGlnaUNlcnQsIEluYy4xJTAjBgNVBAMT +HERpZ2lDZXJ0IFRMUyBSU0E0MDk2IFJvb3QgRzUwHhcNMjEwMTE1MDAwMDAwWhcN +NDYwMTE0MjM1OTU5WjBNMQswCQYDVQQGEwJVUzEXMBUGA1UEChMORGlnaUNlcnQs +IEluYy4xJTAjBgNVBAMTHERpZ2lDZXJ0IFRMUyBSU0E0MDk2IFJvb3QgRzUwggIi +MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCz0PTJeRGd/fxmgefM1eS87IE+ +ajWOLrfn3q/5B03PMJ3qCQuZvWxX2hhKuHisOjmopkisLnLlvevxGs3npAOpPxG0 +2C+JFvuUAT27L/gTBaF4HI4o4EXgg/RZG5Wzrn4DReW+wkL+7vI8toUTmDKdFqgp +wgscONyfMXdcvyej/Cestyu9dJsXLfKB2l2w4SMXPohKEiPQ6s+d3gMXsUJKoBZM +pG2T6T867jp8nVid9E6P/DsjyG244gXazOvswzH016cpVIDPRFtMbzCe88zdH5RD +nU1/cHAN1DrRN/BsnZvAFJNY781BOHW8EwOVfH/jXOnVDdXifBBiqmvwPXbzP6Po +sMH976pXTayGpxi0KcEsDr9kvimM2AItzVwv8n/vFfQMFawKsPHTDU9qTXeXAaDx +Zre3zu/O7Oyldcqs4+Fj97ihBMi8ez9dLRYiVu1ISf6nL3kwJZu6ay0/nTvEF+cd +Lvvyz6b84xQslpghjLSR6Rlgg/IwKwZzUNWYOwbpx4oMYIwo+FKbbuH2TbsGJJvX +KyY//SovcfXWJL5/MZ4PbeiPT02jP/816t9JXkGPhvnxd3lLG7SjXi/7RgLQZhNe +XoVPzthwiHvOAbWWl9fNff2C+MIkwcoBOU+NosEUQB+cZtUMCUbW8tDRSHZWOkPL +tgoRObqME2wGtZ7P6wIDAQABo0IwQDAdBgNVHQ4EFgQUUTMc7TZArxfTJc1paPKv +TiM+s0EwDgYDVR0PAQH/BAQDAgGGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcN +AQEMBQADggIBAGCmr1tfV9qJ20tQqcQjNSH/0GEwhJG3PxDPJY7Jv0Y02cEhJhxw +GXIeo8mH/qlDZJY6yFMECrZBu8RHANmfGBg7sg7zNOok992vIGCukihfNudd5N7H +PNtQOa27PShNlnx2xlv0wdsUpasZYgcYQF+Xkdycx6u1UQ3maVNVzDl92sURVXLF +O4uJ+DQtpBflF+aZfTCIITfNMBc9uPK8qHWgQ9w+iUuQrm0D4ByjoJYJu32jtyoQ +REtGBzRj7TG5BO6jm5qu5jF49OokYTurWGT/u4cnYiWB39yhL/btp/96j1EuMPik +AdKFOV8BmZZvWltwGUb+hmA+rYAQCd05JS9Yf7vSdPD3Rh9GOUrYU9DzLjtxpdRv +/PNn5AeP3SYZ4Y1b+qOTEZvpyDrDVWiakuFSdjjo4bq9+0/V77PnSIMx8IIh47a+ +p6tv75/fTM8BuGJqIz3nCU2AG3swpMPdB380vqQmsvZB6Akd4yCYqjdP//fx4ilw +MUc/dNAUFvohigLVigmUdy7yWSiLfFCSCmZ4OIN1xLVaqBHG5cGdZlXPU8Sv13WF +qUITVuwhd4GTWgzqltlJyqEI8pc7bZsEGCREjnwB8twl2F6GmrE52/WRMmrRpnCK +ovfepEWFJqgejF0pW8hL2JpqA15w8oVPbEtoL8pU9ozaMv7Da4M/OMZ+ +-----END CERTIFICATE----- + +# Issuer: CN=Certainly Root R1 O=Certainly +# Subject: CN=Certainly Root R1 O=Certainly +# Label: "Certainly Root R1" +# Serial: 188833316161142517227353805653483829216 +# MD5 Fingerprint: 07:70:d4:3e:82:87:a0:fa:33:36:13:f4:fa:33:e7:12 +# SHA1 Fingerprint: a0:50:ee:0f:28:71:f4:27:b2:12:6d:6f:50:96:25:ba:cc:86:42:af +# SHA256 Fingerprint: 77:b8:2c:d8:64:4c:43:05:f7:ac:c5:cb:15:6b:45:67:50:04:03:3d:51:c6:0c:62:02:a8:e0:c3:34:67:d3:a0 +-----BEGIN CERTIFICATE----- +MIIFRzCCAy+gAwIBAgIRAI4P+UuQcWhlM1T01EQ5t+AwDQYJKoZIhvcNAQELBQAw +PTELMAkGA1UEBhMCVVMxEjAQBgNVBAoTCUNlcnRhaW5seTEaMBgGA1UEAxMRQ2Vy +dGFpbmx5IFJvb3QgUjEwHhcNMjEwNDAxMDAwMDAwWhcNNDYwNDAxMDAwMDAwWjA9 +MQswCQYDVQQGEwJVUzESMBAGA1UEChMJQ2VydGFpbmx5MRowGAYDVQQDExFDZXJ0 +YWlubHkgUm9vdCBSMTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANA2 +1B/q3avk0bbm+yLA3RMNansiExyXPGhjZjKcA7WNpIGD2ngwEc/csiu+kr+O5MQT +vqRoTNoCaBZ0vrLdBORrKt03H2As2/X3oXyVtwxwhi7xOu9S98zTm/mLvg7fMbed +aFySpvXl8wo0tf97ouSHocavFwDvA5HtqRxOcT3Si2yJ9HiG5mpJoM610rCrm/b0 +1C7jcvk2xusVtyWMOvwlDbMicyF0yEqWYZL1LwsYpfSt4u5BvQF5+paMjRcCMLT5 +r3gajLQ2EBAHBXDQ9DGQilHFhiZ5shGIXsXwClTNSaa/ApzSRKft43jvRl5tcdF5 +cBxGX1HpyTfcX35pe0HfNEXgO4T0oYoKNp43zGJS4YkNKPl6I7ENPT2a/Z2B7yyQ +wHtETrtJ4A5KVpK8y7XdeReJkd5hiXSSqOMyhb5OhaRLWcsrxXiOcVTQAjeZjOVJ +6uBUcqQRBi8LjMFbvrWhsFNunLhgkR9Za/kt9JQKl7XsxXYDVBtlUrpMklZRNaBA +2CnbrlJ2Oy0wQJuK0EJWtLeIAaSHO1OWzaMWj/Nmqhexx2DgwUMFDO6bW2BvBlyH +Wyf5QBGenDPBt+U1VwV/J84XIIwc/PH72jEpSe31C4SnT8H2TsIonPru4K8H+zMR +eiFPCyEQtkA6qyI6BJyLm4SGcprSp6XEtHWRqSsjAgMBAAGjQjBAMA4GA1UdDwEB +/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBTgqj8ljZ9EXME66C6u +d0yEPmcM9DANBgkqhkiG9w0BAQsFAAOCAgEAuVevuBLaV4OPaAszHQNTVfSVcOQr +PbA56/qJYv331hgELyE03fFo8NWWWt7CgKPBjcZq91l3rhVkz1t5BXdm6ozTaw3d +8VkswTOlMIAVRQdFGjEitpIAq5lNOo93r6kiyi9jyhXWx8bwPWz8HA2YEGGeEaIi +1wrykXprOQ4vMMM2SZ/g6Q8CRFA3lFV96p/2O7qUpUzpvD5RtOjKkjZUbVwlKNrd +rRT90+7iIgXr0PK3aBLXWopBGsaSpVo7Y0VPv+E6dyIvXL9G+VoDhRNCX8reU9di +taY1BMJH/5n9hN9czulegChB8n3nHpDYT3Y+gjwN/KUD+nsa2UUeYNrEjvn8K8l7 +lcUq/6qJ34IxD3L/DCfXCh5WAFAeDJDBlrXYFIW7pw0WwfgHJBu6haEaBQmAupVj +yTrsJZ9/nbqkRxWbRHDxakvWOF5D8xh+UG7pWijmZeZ3Gzr9Hb4DJqPb1OG7fpYn +Kx3upPvaJVQTA945xsMfTZDsjxtK0hzthZU4UHlG1sGQUDGpXJpuHfUzVounmdLy +yCwzk5Iwx06MZTMQZBf9JBeW0Y3COmor6xOLRPIh80oat3df1+2IpHLlOR+Vnb5n +wXARPbv0+Em34yaXOp/SX3z7wJl8OSngex2/DaeP0ik0biQVy96QXr8axGbqwua6 +OV+KmalBWQewLK8= +-----END CERTIFICATE----- + +# Issuer: CN=Certainly Root E1 O=Certainly +# Subject: CN=Certainly Root E1 O=Certainly +# Label: "Certainly Root E1" +# Serial: 8168531406727139161245376702891150584 +# MD5 Fingerprint: 0a:9e:ca:cd:3e:52:50:c6:36:f3:4b:a3:ed:a7:53:e9 +# SHA1 Fingerprint: f9:e1:6d:dc:01:89:cf:d5:82:45:63:3e:c5:37:7d:c2:eb:93:6f:2b +# SHA256 Fingerprint: b4:58:5f:22:e4:ac:75:6a:4e:86:12:a1:36:1c:5d:9d:03:1a:93:fd:84:fe:bb:77:8f:a3:06:8b:0f:c4:2d:c2 +-----BEGIN CERTIFICATE----- +MIIB9zCCAX2gAwIBAgIQBiUzsUcDMydc+Y2aub/M+DAKBggqhkjOPQQDAzA9MQsw +CQYDVQQGEwJVUzESMBAGA1UEChMJQ2VydGFpbmx5MRowGAYDVQQDExFDZXJ0YWlu +bHkgUm9vdCBFMTAeFw0yMTA0MDEwMDAwMDBaFw00NjA0MDEwMDAwMDBaMD0xCzAJ +BgNVBAYTAlVTMRIwEAYDVQQKEwlDZXJ0YWlubHkxGjAYBgNVBAMTEUNlcnRhaW5s +eSBSb290IEUxMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAE3m/4fxzf7flHh4axpMCK ++IKXgOqPyEpeKn2IaKcBYhSRJHpcnqMXfYqGITQYUBsQ3tA3SybHGWCA6TS9YBk2 +QNYphwk8kXr2vBMj3VlOBF7PyAIcGFPBMdjaIOlEjeR2o0IwQDAOBgNVHQ8BAf8E +BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU8ygYy2R17ikq6+2uI1g4 +hevIIgcwCgYIKoZIzj0EAwMDaAAwZQIxALGOWiDDshliTd6wT99u0nCK8Z9+aozm +ut6Dacpps6kFtZaSF4fC0urQe87YQVt8rgIwRt7qy12a7DLCZRawTDBcMPPaTnOG +BtjOiQRINzf43TNRnXCve1XYAS59BWQOhriR +-----END CERTIFICATE----- + +# Issuer: CN=Security Communication ECC RootCA1 O=SECOM Trust Systems CO.,LTD. +# Subject: CN=Security Communication ECC RootCA1 O=SECOM Trust Systems CO.,LTD. +# Label: "Security Communication ECC RootCA1" +# Serial: 15446673492073852651 +# MD5 Fingerprint: 7e:43:b0:92:68:ec:05:43:4c:98:ab:5d:35:2e:7e:86 +# SHA1 Fingerprint: b8:0e:26:a9:bf:d2:b2:3b:c0:ef:46:c9:ba:c7:bb:f6:1d:0d:41:41 +# SHA256 Fingerprint: e7:4f:bd:a5:5b:d5:64:c4:73:a3:6b:44:1a:a7:99:c8:a6:8e:07:74:40:e8:28:8b:9f:a1:e5:0e:4b:ba:ca:11 +-----BEGIN CERTIFICATE----- +MIICODCCAb6gAwIBAgIJANZdm7N4gS7rMAoGCCqGSM49BAMDMGExCzAJBgNVBAYT +AkpQMSUwIwYDVQQKExxTRUNPTSBUcnVzdCBTeXN0ZW1zIENPLixMVEQuMSswKQYD +VQQDEyJTZWN1cml0eSBDb21tdW5pY2F0aW9uIEVDQyBSb290Q0ExMB4XDTE2MDYx +NjA1MTUyOFoXDTM4MDExODA1MTUyOFowYTELMAkGA1UEBhMCSlAxJTAjBgNVBAoT +HFNFQ09NIFRydXN0IFN5c3RlbXMgQ08uLExURC4xKzApBgNVBAMTIlNlY3VyaXR5 +IENvbW11bmljYXRpb24gRUNDIFJvb3RDQTEwdjAQBgcqhkjOPQIBBgUrgQQAIgNi +AASkpW9gAwPDvTH00xecK4R1rOX9PVdu12O/5gSJko6BnOPpR27KkBLIE+Cnnfdl +dB9sELLo5OnvbYUymUSxXv3MdhDYW72ixvnWQuRXdtyQwjWpS4g8EkdtXP9JTxpK +ULGjQjBAMB0GA1UdDgQWBBSGHOf+LaVKiwj+KBH6vqNm+GBZLzAOBgNVHQ8BAf8E +BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjAVXUI9/Lbu +9zuxNuie9sRGKEkz0FhDKmMpzE2xtHqiuQ04pV1IKv3LsnNdo4gIxwwCMQDAqy0O +be0YottT6SXbVQjgUMzfRGEWgqtJsLKB7HOHeLRMsmIbEvoWTSVLY70eN9k= +-----END CERTIFICATE----- + +# Issuer: CN=BJCA Global Root CA1 O=BEIJING CERTIFICATE AUTHORITY +# Subject: CN=BJCA Global Root CA1 O=BEIJING CERTIFICATE AUTHORITY +# Label: "BJCA Global Root CA1" +# Serial: 113562791157148395269083148143378328608 +# MD5 Fingerprint: 42:32:99:76:43:33:36:24:35:07:82:9b:28:f9:d0:90 +# SHA1 Fingerprint: d5:ec:8d:7b:4c:ba:79:f4:e7:e8:cb:9d:6b:ae:77:83:10:03:21:6a +# SHA256 Fingerprint: f3:89:6f:88:fe:7c:0a:88:27:66:a7:fa:6a:d2:74:9f:b5:7a:7f:3e:98:fb:76:9c:1f:a7:b0:9c:2c:44:d5:ae +-----BEGIN CERTIFICATE----- +MIIFdDCCA1ygAwIBAgIQVW9l47TZkGobCdFsPsBsIDANBgkqhkiG9w0BAQsFADBU +MQswCQYDVQQGEwJDTjEmMCQGA1UECgwdQkVJSklORyBDRVJUSUZJQ0FURSBBVVRI +T1JJVFkxHTAbBgNVBAMMFEJKQ0EgR2xvYmFsIFJvb3QgQ0ExMB4XDTE5MTIxOTAz +MTYxN1oXDTQ0MTIxMjAzMTYxN1owVDELMAkGA1UEBhMCQ04xJjAkBgNVBAoMHUJF +SUpJTkcgQ0VSVElGSUNBVEUgQVVUSE9SSVRZMR0wGwYDVQQDDBRCSkNBIEdsb2Jh +bCBSb290IENBMTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAPFmCL3Z +xRVhy4QEQaVpN3cdwbB7+sN3SJATcmTRuHyQNZ0YeYjjlwE8R4HyDqKYDZ4/N+AZ +spDyRhySsTphzvq3Rp4Dhtczbu33RYx2N95ulpH3134rhxfVizXuhJFyV9xgw8O5 +58dnJCNPYwpj9mZ9S1WnP3hkSWkSl+BMDdMJoDIwOvqfwPKcxRIqLhy1BDPapDgR +at7GGPZHOiJBhyL8xIkoVNiMpTAK+BcWyqw3/XmnkRd4OJmtWO2y3syJfQOcs4ll +5+M7sSKGjwZteAf9kRJ/sGsciQ35uMt0WwfCyPQ10WRjeulumijWML3mG90Vr4Tq +nMfK9Q7q8l0ph49pczm+LiRvRSGsxdRpJQaDrXpIhRMsDQa4bHlW/KNnMoH1V6XK +V0Jp6VwkYe/iMBhORJhVb3rCk9gZtt58R4oRTklH2yiUAguUSiz5EtBP6DF+bHq/ +pj+bOT0CFqMYs2esWz8sgytnOYFcuX6U1WTdno9uruh8W7TXakdI136z1C2OVnZO +z2nxbkRs1CTqjSShGL+9V/6pmTW12xB3uD1IutbB5/EjPtffhZ0nPNRAvQoMvfXn +jSXWgXSHRtQpdaJCbPdzied9v3pKH9MiyRVVz99vfFXQpIsHETdfg6YmV6YBW37+ +WGgHqel62bno/1Afq8K0wM7o6v0PvY1NuLxxAgMBAAGjQjBAMB0GA1UdDgQWBBTF +7+3M2I0hxkjk49cULqcWk+WYATAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQE +AwIBBjANBgkqhkiG9w0BAQsFAAOCAgEAUoKsITQfI/Ki2Pm4rzc2IInRNwPWaZ+4 +YRC6ojGYWUfo0Q0lHhVBDOAqVdVXUsv45Mdpox1NcQJeXyFFYEhcCY5JEMEE3Kli +awLwQ8hOnThJdMkycFRtwUf8jrQ2ntScvd0g1lPJGKm1Vrl2i5VnZu69mP6u775u ++2D2/VnGKhs/I0qUJDAnyIm860Qkmss9vk/Ves6OF8tiwdneHg56/0OGNFK8YT88 +X7vZdrRTvJez/opMEi4r89fO4aL/3Xtw+zuhTaRjAv04l5U/BXCga99igUOLtFkN +SoxUnMW7gZ/NfaXvCyUeOiDbHPwfmGcCCtRzRBPbUYQaVQNW4AB+dAb/OMRyHdOo +P2gxXdMJxy6MW2Pg6Nwe0uxhHvLe5e/2mXZgLR6UcnHGCyoyx5JO1UbXHfmpGQrI ++pXObSOYqgs4rZpWDW+N8TEAiMEXnM0ZNjX+VVOg4DwzX5Ze4jLp3zO7Bkqp2IRz +znfSxqxx4VyjHQy7Ct9f4qNx2No3WqB4K/TUfet27fJhcKVlmtOJNBir+3I+17Q9 +eVzYH6Eze9mCUAyTF6ps3MKCuwJXNq+YJyo5UOGwifUll35HaBC07HPKs5fRJNz2 +YqAo07WjuGS3iGJCz51TzZm+ZGiPTx4SSPfSKcOYKMryMguTjClPPGAyzQWWYezy +r/6zcCwupvI= +-----END CERTIFICATE----- + +# Issuer: CN=BJCA Global Root CA2 O=BEIJING CERTIFICATE AUTHORITY +# Subject: CN=BJCA Global Root CA2 O=BEIJING CERTIFICATE AUTHORITY +# Label: "BJCA Global Root CA2" +# Serial: 58605626836079930195615843123109055211 +# MD5 Fingerprint: 5e:0a:f6:47:5f:a6:14:e8:11:01:95:3f:4d:01:eb:3c +# SHA1 Fingerprint: f4:27:86:eb:6e:b8:6d:88:31:67:02:fb:ba:66:a4:53:00:aa:7a:a6 +# SHA256 Fingerprint: 57:4d:f6:93:1e:27:80:39:66:7b:72:0a:fd:c1:60:0f:c2:7e:b6:6d:d3:09:29:79:fb:73:85:64:87:21:28:82 +-----BEGIN CERTIFICATE----- +MIICJTCCAaugAwIBAgIQLBcIfWQqwP6FGFkGz7RK6zAKBggqhkjOPQQDAzBUMQsw +CQYDVQQGEwJDTjEmMCQGA1UECgwdQkVJSklORyBDRVJUSUZJQ0FURSBBVVRIT1JJ +VFkxHTAbBgNVBAMMFEJKQ0EgR2xvYmFsIFJvb3QgQ0EyMB4XDTE5MTIxOTAzMTgy +MVoXDTQ0MTIxMjAzMTgyMVowVDELMAkGA1UEBhMCQ04xJjAkBgNVBAoMHUJFSUpJ +TkcgQ0VSVElGSUNBVEUgQVVUSE9SSVRZMR0wGwYDVQQDDBRCSkNBIEdsb2JhbCBS +b290IENBMjB2MBAGByqGSM49AgEGBSuBBAAiA2IABJ3LgJGNU2e1uVCxA/jlSR9B +IgmwUVJY1is0j8USRhTFiy8shP8sbqjV8QnjAyEUxEM9fMEsxEtqSs3ph+B99iK+ ++kpRuDCK/eHeGBIK9ke35xe/J4rUQUyWPGCWwf0VHKNCMEAwHQYDVR0OBBYEFNJK +sVF/BvDRgh9Obl+rg/xI1LCRMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQD +AgEGMAoGCCqGSM49BAMDA2gAMGUCMBq8W9f+qdJUDkpd0m2xQNz0Q9XSSpkZElaA +94M04TVOSG0ED1cxMDAtsaqdAzjbBgIxAMvMh1PLet8gUXOQwKhbYdDFUDn9hf7B +43j4ptZLvZuHjw/l1lOWqzzIQNph91Oj9w== +-----END CERTIFICATE----- + +# Issuer: CN=Sectigo Public Server Authentication Root E46 O=Sectigo Limited +# Subject: CN=Sectigo Public Server Authentication Root E46 O=Sectigo Limited +# Label: "Sectigo Public Server Authentication Root E46" +# Serial: 88989738453351742415770396670917916916 +# MD5 Fingerprint: 28:23:f8:b2:98:5c:37:16:3b:3e:46:13:4e:b0:b3:01 +# SHA1 Fingerprint: ec:8a:39:6c:40:f0:2e:bc:42:75:d4:9f:ab:1c:1a:5b:67:be:d2:9a +# SHA256 Fingerprint: c9:0f:26:f0:fb:1b:40:18:b2:22:27:51:9b:5c:a2:b5:3e:2c:a5:b3:be:5c:f1:8e:fe:1b:ef:47:38:0c:53:83 +-----BEGIN CERTIFICATE----- +MIICOjCCAcGgAwIBAgIQQvLM2htpN0RfFf51KBC49DAKBggqhkjOPQQDAzBfMQsw +CQYDVQQGEwJHQjEYMBYGA1UEChMPU2VjdGlnbyBMaW1pdGVkMTYwNAYDVQQDEy1T +ZWN0aWdvIFB1YmxpYyBTZXJ2ZXIgQXV0aGVudGljYXRpb24gUm9vdCBFNDYwHhcN +MjEwMzIyMDAwMDAwWhcNNDYwMzIxMjM1OTU5WjBfMQswCQYDVQQGEwJHQjEYMBYG +A1UEChMPU2VjdGlnbyBMaW1pdGVkMTYwNAYDVQQDEy1TZWN0aWdvIFB1YmxpYyBT +ZXJ2ZXIgQXV0aGVudGljYXRpb24gUm9vdCBFNDYwdjAQBgcqhkjOPQIBBgUrgQQA +IgNiAAR2+pmpbiDt+dd34wc7qNs9Xzjoq1WmVk/WSOrsfy2qw7LFeeyZYX8QeccC +WvkEN/U0NSt3zn8gj1KjAIns1aeibVvjS5KToID1AZTc8GgHHs3u/iVStSBDHBv+ +6xnOQ6OjQjBAMB0GA1UdDgQWBBTRItpMWfFLXyY4qp3W7usNw/upYTAOBgNVHQ8B +Af8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAwNnADBkAjAn7qRa +qCG76UeXlImldCBteU/IvZNeWBj7LRoAasm4PdCkT0RHlAFWovgzJQxC36oCMB3q +4S6ILuH5px0CMk7yn2xVdOOurvulGu7t0vzCAxHrRVxgED1cf5kDW21USAGKcw== +-----END CERTIFICATE----- + +# Issuer: CN=Sectigo Public Server Authentication Root R46 O=Sectigo Limited +# Subject: CN=Sectigo Public Server Authentication Root R46 O=Sectigo Limited +# Label: "Sectigo Public Server Authentication Root R46" +# Serial: 156256931880233212765902055439220583700 +# MD5 Fingerprint: 32:10:09:52:00:d5:7e:6c:43:df:15:c0:b1:16:93:e5 +# SHA1 Fingerprint: ad:98:f9:f3:e4:7d:75:3b:65:d4:82:b3:a4:52:17:bb:6e:f5:e4:38 +# SHA256 Fingerprint: 7b:b6:47:a6:2a:ee:ac:88:bf:25:7a:a5:22:d0:1f:fe:a3:95:e0:ab:45:c7:3f:93:f6:56:54:ec:38:f2:5a:06 +-----BEGIN CERTIFICATE----- +MIIFijCCA3KgAwIBAgIQdY39i658BwD6qSWn4cetFDANBgkqhkiG9w0BAQwFADBf +MQswCQYDVQQGEwJHQjEYMBYGA1UEChMPU2VjdGlnbyBMaW1pdGVkMTYwNAYDVQQD +Ey1TZWN0aWdvIFB1YmxpYyBTZXJ2ZXIgQXV0aGVudGljYXRpb24gUm9vdCBSNDYw +HhcNMjEwMzIyMDAwMDAwWhcNNDYwMzIxMjM1OTU5WjBfMQswCQYDVQQGEwJHQjEY +MBYGA1UEChMPU2VjdGlnbyBMaW1pdGVkMTYwNAYDVQQDEy1TZWN0aWdvIFB1Ymxp +YyBTZXJ2ZXIgQXV0aGVudGljYXRpb24gUm9vdCBSNDYwggIiMA0GCSqGSIb3DQEB +AQUAA4ICDwAwggIKAoICAQCTvtU2UnXYASOgHEdCSe5jtrch/cSV1UgrJnwUUxDa +ef0rty2k1Cz66jLdScK5vQ9IPXtamFSvnl0xdE8H/FAh3aTPaE8bEmNtJZlMKpnz +SDBh+oF8HqcIStw+KxwfGExxqjWMrfhu6DtK2eWUAtaJhBOqbchPM8xQljeSM9xf +iOefVNlI8JhD1mb9nxc4Q8UBUQvX4yMPFF1bFOdLvt30yNoDN9HWOaEhUTCDsG3X +ME6WW5HwcCSrv0WBZEMNvSE6Lzzpng3LILVCJ8zab5vuZDCQOc2TZYEhMbUjUDM3 +IuM47fgxMMxF/mL50V0yeUKH32rMVhlATc6qu/m1dkmU8Sf4kaWD5QazYw6A3OAS +VYCmO2a0OYctyPDQ0RTp5A1NDvZdV3LFOxxHVp3i1fuBYYzMTYCQNFu31xR13NgE +SJ/AwSiItOkcyqex8Va3e0lMWeUgFaiEAin6OJRpmkkGj80feRQXEgyDet4fsZfu ++Zd4KKTIRJLpfSYFplhym3kT2BFfrsU4YjRosoYwjviQYZ4ybPUHNs2iTG7sijbt +8uaZFURww3y8nDnAtOFr94MlI1fZEoDlSfB1D++N6xybVCi0ITz8fAr/73trdf+L +HaAZBav6+CuBQug4urv7qv094PPK306Xlynt8xhW6aWWrL3DkJiy4Pmi1KZHQ3xt +zwIDAQABo0IwQDAdBgNVHQ4EFgQUVnNYZJX5khqwEioEYnmhQBWIIUkwDgYDVR0P +AQH/BAQDAgGGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEMBQADggIBAC9c +mTz8Bl6MlC5w6tIyMY208FHVvArzZJ8HXtXBc2hkeqK5Duj5XYUtqDdFqij0lgVQ +YKlJfp/imTYpE0RHap1VIDzYm/EDMrraQKFz6oOht0SmDpkBm+S8f74TlH7Kph52 +gDY9hAaLMyZlbcp+nv4fjFg4exqDsQ+8FxG75gbMY/qB8oFM2gsQa6H61SilzwZA +Fv97fRheORKkU55+MkIQpiGRqRxOF3yEvJ+M0ejf5lG5Nkc/kLnHvALcWxxPDkjB +JYOcCj+esQMzEhonrPcibCTRAUH4WAP+JWgiH5paPHxsnnVI84HxZmduTILA7rpX +DhjvLpr3Etiga+kFpaHpaPi8TD8SHkXoUsCjvxInebnMMTzD9joiFgOgyY9mpFui +TdaBJQbpdqQACj7LzTWb4OE4y2BThihCQRxEV+ioratF4yUQvNs+ZUH7G6aXD+u5 +dHn5HrwdVw1Hr8Mvn4dGp+smWg9WY7ViYG4A++MnESLn/pmPNPW56MORcr3Ywx65 +LvKRRFHQV80MNNVIIb/bE/FmJUNS0nAiNs2fxBx1IK1jcmMGDw4nztJqDby1ORrp +0XZ60Vzk50lJLVU3aPAaOpg+VBeHVOmmJ1CJeyAvP/+/oYtKR5j/K3tJPsMpRmAY +QqszKbrAKbkTidOIijlBO8n9pu0f9GBj39ItVQGL +-----END CERTIFICATE----- + +# Issuer: CN=SSL.com TLS RSA Root CA 2022 O=SSL Corporation +# Subject: CN=SSL.com TLS RSA Root CA 2022 O=SSL Corporation +# Label: "SSL.com TLS RSA Root CA 2022" +# Serial: 148535279242832292258835760425842727825 +# MD5 Fingerprint: d8:4e:c6:59:30:d8:fe:a0:d6:7a:5a:2c:2c:69:78:da +# SHA1 Fingerprint: ec:2c:83:40:72:af:26:95:10:ff:0e:f2:03:ee:31:70:f6:78:9d:ca +# SHA256 Fingerprint: 8f:af:7d:2e:2c:b4:70:9b:b8:e0:b3:36:66:bf:75:a5:dd:45:b5:de:48:0f:8e:a8:d4:bf:e6:be:bc:17:f2:ed +-----BEGIN CERTIFICATE----- +MIIFiTCCA3GgAwIBAgIQb77arXO9CEDii02+1PdbkTANBgkqhkiG9w0BAQsFADBO +MQswCQYDVQQGEwJVUzEYMBYGA1UECgwPU1NMIENvcnBvcmF0aW9uMSUwIwYDVQQD +DBxTU0wuY29tIFRMUyBSU0EgUm9vdCBDQSAyMDIyMB4XDTIyMDgyNTE2MzQyMloX +DTQ2MDgxOTE2MzQyMVowTjELMAkGA1UEBhMCVVMxGDAWBgNVBAoMD1NTTCBDb3Jw +b3JhdGlvbjElMCMGA1UEAwwcU1NMLmNvbSBUTFMgUlNBIFJvb3QgQ0EgMjAyMjCC +AiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANCkCXJPQIgSYT41I57u9nTP +L3tYPc48DRAokC+X94xI2KDYJbFMsBFMF3NQ0CJKY7uB0ylu1bUJPiYYf7ISf5OY +t6/wNr/y7hienDtSxUcZXXTzZGbVXcdotL8bHAajvI9AI7YexoS9UcQbOcGV0ins +S657Lb85/bRi3pZ7QcacoOAGcvvwB5cJOYF0r/c0WRFXCsJbwST0MXMwgsadugL3 +PnxEX4MN8/HdIGkWCVDi1FW24IBydm5MR7d1VVm0U3TZlMZBrViKMWYPHqIbKUBO +L9975hYsLfy/7PO0+r4Y9ptJ1O4Fbtk085zx7AGL0SDGD6C1vBdOSHtRwvzpXGk3 +R2azaPgVKPC506QVzFpPulJwoxJF3ca6TvvC0PeoUidtbnm1jPx7jMEWTO6Af77w +dr5BUxIzrlo4QqvXDz5BjXYHMtWrifZOZ9mxQnUjbvPNQrL8VfVThxc7wDNY8VLS ++YCk8OjwO4s4zKTGkH8PnP2L0aPP2oOnaclQNtVcBdIKQXTbYxE3waWglksejBYS +d66UNHsef8JmAOSqg+qKkK3ONkRN0VHpvB/zagX9wHQfJRlAUW7qglFA35u5CCoG +AtUjHBPW6dvbxrB6y3snm/vg1UYk7RBLY0ulBY+6uB0rpvqR4pJSvezrZ5dtmi2f +gTIFZzL7SAg/2SW4BCUvAgMBAAGjYzBhMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0j +BBgwFoAU+y437uOEeicuzRk1sTN8/9REQrkwHQYDVR0OBBYEFPsuN+7jhHonLs0Z +NbEzfP/UREK5MA4GA1UdDwEB/wQEAwIBhjANBgkqhkiG9w0BAQsFAAOCAgEAjYlt +hEUY8U+zoO9opMAdrDC8Z2awms22qyIZZtM7QbUQnRC6cm4pJCAcAZli05bg4vsM +QtfhWsSWTVTNj8pDU/0quOr4ZcoBwq1gaAafORpR2eCNJvkLTqVTJXojpBzOCBvf +R4iyrT7gJ4eLSYwfqUdYe5byiB0YrrPRpgqU+tvT5TgKa3kSM/tKWTcWQA673vWJ +DPFs0/dRa1419dvAJuoSc06pkZCmF8NsLzjUo3KUQyxi4U5cMj29TH0ZR6LDSeeW +P4+a0zvkEdiLA9z2tmBVGKaBUfPhqBVq6+AL8BQx1rmMRTqoENjwuSfr98t67wVy +lrXEj5ZzxOhWc5y8aVFjvO9nHEMaX3cZHxj4HCUp+UmZKbaSPaKDN7EgkaibMOlq +bLQjk2UEqxHzDh1TJElTHaE/nUiSEeJ9DU/1172iWD54nR4fK/4huxoTtrEoZP2w +AgDHbICivRZQIA9ygV/MlP+7mea6kMvq+cYMwq7FGc4zoWtcu358NFcXrfA/rs3q +r5nsLFR+jM4uElZI7xc7P0peYNLcdDa8pUNjyw9bowJWCZ4kLOGGgYz+qxcs+sji +Mho6/4UIyYOf8kpIEFR3N+2ivEC+5BB09+Rbu7nzifmPQdjH5FCQNYA+HLhNkNPU +98OwoX6EyneSMSy4kLGCenROmxMmtNVQZlR4rmA= +-----END CERTIFICATE----- + +# Issuer: CN=SSL.com TLS ECC Root CA 2022 O=SSL Corporation +# Subject: CN=SSL.com TLS ECC Root CA 2022 O=SSL Corporation +# Label: "SSL.com TLS ECC Root CA 2022" +# Serial: 26605119622390491762507526719404364228 +# MD5 Fingerprint: 99:d7:5c:f1:51:36:cc:e9:ce:d9:19:2e:77:71:56:c5 +# SHA1 Fingerprint: 9f:5f:d9:1a:54:6d:f5:0c:71:f0:ee:7a:bd:17:49:98:84:73:e2:39 +# SHA256 Fingerprint: c3:2f:fd:9f:46:f9:36:d1:6c:36:73:99:09:59:43:4b:9a:d6:0a:af:bb:9e:7c:f3:36:54:f1:44:cc:1b:a1:43 +-----BEGIN CERTIFICATE----- +MIICOjCCAcCgAwIBAgIQFAP1q/s3ixdAW+JDsqXRxDAKBggqhkjOPQQDAzBOMQsw +CQYDVQQGEwJVUzEYMBYGA1UECgwPU1NMIENvcnBvcmF0aW9uMSUwIwYDVQQDDBxT +U0wuY29tIFRMUyBFQ0MgUm9vdCBDQSAyMDIyMB4XDTIyMDgyNTE2MzM0OFoXDTQ2 +MDgxOTE2MzM0N1owTjELMAkGA1UEBhMCVVMxGDAWBgNVBAoMD1NTTCBDb3Jwb3Jh +dGlvbjElMCMGA1UEAwwcU1NMLmNvbSBUTFMgRUNDIFJvb3QgQ0EgMjAyMjB2MBAG +ByqGSM49AgEGBSuBBAAiA2IABEUpNXP6wrgjzhR9qLFNoFs27iosU8NgCTWyJGYm +acCzldZdkkAZDsalE3D07xJRKF3nzL35PIXBz5SQySvOkkJYWWf9lCcQZIxPBLFN +SeR7T5v15wj4A4j3p8OSSxlUgaNjMGEwDwYDVR0TAQH/BAUwAwEB/zAfBgNVHSME +GDAWgBSJjy+j6CugFFR781a4Jl9nOAuc0DAdBgNVHQ4EFgQUiY8vo+groBRUe/NW +uCZfZzgLnNAwDgYDVR0PAQH/BAQDAgGGMAoGCCqGSM49BAMDA2gAMGUCMFXjIlbp +15IkWE8elDIPDAI2wv2sdDJO4fscgIijzPvX6yv/N33w7deedWo1dlJF4AIxAMeN +b0Igj762TVntd00pxCAgRWSGOlDGxK0tk/UYfXLtqc/ErFc2KAhl3zx5Zn6g6g== +-----END CERTIFICATE----- + +# Issuer: CN=Atos TrustedRoot Root CA ECC TLS 2021 O=Atos +# Subject: CN=Atos TrustedRoot Root CA ECC TLS 2021 O=Atos +# Label: "Atos TrustedRoot Root CA ECC TLS 2021" +# Serial: 81873346711060652204712539181482831616 +# MD5 Fingerprint: 16:9f:ad:f1:70:ad:79:d6:ed:29:b4:d1:c5:79:70:a8 +# SHA1 Fingerprint: 9e:bc:75:10:42:b3:02:f3:81:f4:f7:30:62:d4:8f:c3:a7:51:b2:dd +# SHA256 Fingerprint: b2:fa:e5:3e:14:cc:d7:ab:92:12:06:47:01:ae:27:9c:1d:89:88:fa:cb:77:5f:a8:a0:08:91:4e:66:39:88:a8 +-----BEGIN CERTIFICATE----- +MIICFTCCAZugAwIBAgIQPZg7pmY9kGP3fiZXOATvADAKBggqhkjOPQQDAzBMMS4w +LAYDVQQDDCVBdG9zIFRydXN0ZWRSb290IFJvb3QgQ0EgRUNDIFRMUyAyMDIxMQ0w +CwYDVQQKDARBdG9zMQswCQYDVQQGEwJERTAeFw0yMTA0MjIwOTI2MjNaFw00MTA0 +MTcwOTI2MjJaMEwxLjAsBgNVBAMMJUF0b3MgVHJ1c3RlZFJvb3QgUm9vdCBDQSBF +Q0MgVExTIDIwMjExDTALBgNVBAoMBEF0b3MxCzAJBgNVBAYTAkRFMHYwEAYHKoZI +zj0CAQYFK4EEACIDYgAEloZYKDcKZ9Cg3iQZGeHkBQcfl+3oZIK59sRxUM6KDP/X +tXa7oWyTbIOiaG6l2b4siJVBzV3dscqDY4PMwL502eCdpO5KTlbgmClBk1IQ1SQ4 +AjJn8ZQSb+/Xxd4u/RmAo0IwQDAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBR2 +KCXWfeBmmnoJsmo7jjPXNtNPojAOBgNVHQ8BAf8EBAMCAYYwCgYIKoZIzj0EAwMD +aAAwZQIwW5kp85wxtolrbNa9d+F851F+uDrNozZffPc8dz7kUK2o59JZDCaOMDtu +CCrCp1rIAjEAmeMM56PDr9NJLkaCI2ZdyQAUEv049OGYa3cpetskz2VAv9LcjBHo +9H1/IISpQuQo +-----END CERTIFICATE----- + +# Issuer: CN=Atos TrustedRoot Root CA RSA TLS 2021 O=Atos +# Subject: CN=Atos TrustedRoot Root CA RSA TLS 2021 O=Atos +# Label: "Atos TrustedRoot Root CA RSA TLS 2021" +# Serial: 111436099570196163832749341232207667876 +# MD5 Fingerprint: d4:d3:46:b8:9a:c0:9c:76:5d:9e:3a:c3:b9:99:31:d2 +# SHA1 Fingerprint: 18:52:3b:0d:06:37:e4:d6:3a:df:23:e4:98:fb:5b:16:fb:86:74:48 +# SHA256 Fingerprint: 81:a9:08:8e:a5:9f:b3:64:c5:48:a6:f8:55:59:09:9b:6f:04:05:ef:bf:18:e5:32:4e:c9:f4:57:ba:00:11:2f +-----BEGIN CERTIFICATE----- +MIIFZDCCA0ygAwIBAgIQU9XP5hmTC/srBRLYwiqipDANBgkqhkiG9w0BAQwFADBM +MS4wLAYDVQQDDCVBdG9zIFRydXN0ZWRSb290IFJvb3QgQ0EgUlNBIFRMUyAyMDIx +MQ0wCwYDVQQKDARBdG9zMQswCQYDVQQGEwJERTAeFw0yMTA0MjIwOTIxMTBaFw00 +MTA0MTcwOTIxMDlaMEwxLjAsBgNVBAMMJUF0b3MgVHJ1c3RlZFJvb3QgUm9vdCBD +QSBSU0EgVExTIDIwMjExDTALBgNVBAoMBEF0b3MxCzAJBgNVBAYTAkRFMIICIjAN +BgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAtoAOxHm9BYx9sKOdTSJNy/BBl01Z +4NH+VoyX8te9j2y3I49f1cTYQcvyAh5x5en2XssIKl4w8i1mx4QbZFc4nXUtVsYv +Ye+W/CBGvevUez8/fEc4BKkbqlLfEzfTFRVOvV98r61jx3ncCHvVoOX3W3WsgFWZ +kmGbzSoXfduP9LVq6hdKZChmFSlsAvFr1bqjM9xaZ6cF4r9lthawEO3NUDPJcFDs +GY6wx/J0W2tExn2WuZgIWWbeKQGb9Cpt0xU6kGpn8bRrZtkh68rZYnxGEFzedUln +nkL5/nWpo63/dgpnQOPF943HhZpZnmKaau1Fh5hnstVKPNe0OwANwI8f4UDErmwh +3El+fsqyjW22v5MvoVw+j8rtgI5Y4dtXz4U2OLJxpAmMkokIiEjxQGMYsluMWuPD +0xeqqxmjLBvk1cbiZnrXghmmOxYsL3GHX0WelXOTwkKBIROW1527k2gV+p2kHYzy +geBYBr3JtuP2iV2J+axEoctr+hbxx1A9JNr3w+SH1VbxT5Aw+kUJWdo0zuATHAR8 +ANSbhqRAvNncTFd+rrcztl524WWLZt+NyteYr842mIycg5kDcPOvdO3GDjbnvezB +c6eUWsuSZIKmAMFwoW4sKeFYV+xafJlrJaSQOoD0IJ2azsct+bJLKZWD6TWNp0lI +pw9MGZHQ9b8Q4HECAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU +dEmZ0f+0emhFdcN+tNzMzjkz2ggwDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3DQEB +DAUAA4ICAQAjQ1MkYlxt/T7Cz1UAbMVWiLkO3TriJQ2VSpfKgInuKs1l+NsW4AmS +4BjHeJi78+xCUvuppILXTdiK/ORO/auQxDh1MoSf/7OwKwIzNsAQkG8dnK/haZPs +o0UvFJ/1TCplQ3IM98P4lYsU84UgYt1UU90s3BiVaU+DR3BAM1h3Egyi61IxHkzJ +qM7F78PRreBrAwA0JrRUITWXAdxfG/F851X6LWh3e9NpzNMOa7pNdkTWwhWaJuyw +xfW70Xp0wmzNxbVe9kzmWy2B27O3Opee7c9GslA9hGCZcbUztVdF5kJHdWoOsAgM +rr3e97sPWD2PAzHoPYJQyi9eDF20l74gNAf0xBLh7tew2VktafcxBPTy+av5EzH4 +AXcOPUIjJsyacmdRIXrMPIWo6iFqO9taPKU0nprALN+AnCng33eU0aKAQv9qTFsR +0PXNor6uzFFcw9VUewyu1rkGd4Di7wcaaMxZUa1+XGdrudviB0JbuAEFWDlN5LuY +o7Ey7Nmj1m+UI/87tyll5gfp77YZ6ufCOB0yiJA8EytuzO+rdwY0d4RPcuSBhPm5 +dDTedk+SKlOxJTnbPP/lPqYO5Wue/9vsL3SD3460s6neFE3/MaNFcyT6lSnMEpcE +oji2jbDwN/zIIX8/syQbPYtuzE2wFg2WHYMfRsCbvUOZ58SWLs5fyQ== +-----END CERTIFICATE----- + +# Issuer: CN=TrustAsia Global Root CA G3 O=TrustAsia Technologies, Inc. +# Subject: CN=TrustAsia Global Root CA G3 O=TrustAsia Technologies, Inc. +# Label: "TrustAsia Global Root CA G3" +# Serial: 576386314500428537169965010905813481816650257167 +# MD5 Fingerprint: 30:42:1b:b7:bb:81:75:35:e4:16:4f:53:d2:94:de:04 +# SHA1 Fingerprint: 63:cf:b6:c1:27:2b:56:e4:88:8e:1c:23:9a:b6:2e:81:47:24:c3:c7 +# SHA256 Fingerprint: e0:d3:22:6a:eb:11:63:c2:e4:8f:f9:be:3b:50:b4:c6:43:1b:e7:bb:1e:ac:c5:c3:6b:5d:5e:c5:09:03:9a:08 +-----BEGIN CERTIFICATE----- +MIIFpTCCA42gAwIBAgIUZPYOZXdhaqs7tOqFhLuxibhxkw8wDQYJKoZIhvcNAQEM +BQAwWjELMAkGA1UEBhMCQ04xJTAjBgNVBAoMHFRydXN0QXNpYSBUZWNobm9sb2dp +ZXMsIEluYy4xJDAiBgNVBAMMG1RydXN0QXNpYSBHbG9iYWwgUm9vdCBDQSBHMzAe +Fw0yMTA1MjAwMjEwMTlaFw00NjA1MTkwMjEwMTlaMFoxCzAJBgNVBAYTAkNOMSUw +IwYDVQQKDBxUcnVzdEFzaWEgVGVjaG5vbG9naWVzLCBJbmMuMSQwIgYDVQQDDBtU +cnVzdEFzaWEgR2xvYmFsIFJvb3QgQ0EgRzMwggIiMA0GCSqGSIb3DQEBAQUAA4IC +DwAwggIKAoICAQDAMYJhkuSUGwoqZdC+BqmHO1ES6nBBruL7dOoKjbmzTNyPtxNS +T1QY4SxzlZHFZjtqz6xjbYdT8PfxObegQ2OwxANdV6nnRM7EoYNl9lA+sX4WuDqK +AtCWHwDNBSHvBm3dIZwZQ0WhxeiAysKtQGIXBsaqvPPW5vxQfmZCHzyLpnl5hkA1 +nyDvP+uLRx+PjsXUjrYsyUQE49RDdT/VP68czH5GX6zfZBCK70bwkPAPLfSIC7Ep +qq+FqklYqL9joDiR5rPmd2jE+SoZhLsO4fWvieylL1AgdB4SQXMeJNnKziyhWTXA +yB1GJ2Faj/lN03J5Zh6fFZAhLf3ti1ZwA0pJPn9pMRJpxx5cynoTi+jm9WAPzJMs +hH/x/Gr8m0ed262IPfN2dTPXS6TIi/n1Q1hPy8gDVI+lhXgEGvNz8teHHUGf59gX +zhqcD0r83ERoVGjiQTz+LISGNzzNPy+i2+f3VANfWdP3kXjHi3dqFuVJhZBFcnAv +kV34PmVACxmZySYgWmjBNb9Pp1Hx2BErW+Canig7CjoKH8GB5S7wprlppYiU5msT +f9FkPz2ccEblooV7WIQn3MSAPmeamseaMQ4w7OYXQJXZRe0Blqq/DPNL0WP3E1jA +uPP6Z92bfW1K/zJMtSU7/xxnD4UiWQWRkUF3gdCFTIcQcf+eQxuulXUtgQIDAQAB +o2MwYTAPBgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFEDk5PIj7zjKsK5Xf/Ih +MBY027ySMB0GA1UdDgQWBBRA5OTyI+84yrCuV3/yITAWNNu8kjAOBgNVHQ8BAf8E +BAMCAQYwDQYJKoZIhvcNAQEMBQADggIBACY7UeFNOPMyGLS0XuFlXsSUT9SnYaP4 +wM8zAQLpw6o1D/GUE3d3NZ4tVlFEbuHGLige/9rsR82XRBf34EzC4Xx8MnpmyFq2 +XFNFV1pF1AWZLy4jVe5jaN/TG3inEpQGAHUNcoTpLrxaatXeL1nHo+zSh2bbt1S1 +JKv0Q3jbSwTEb93mPmY+KfJLaHEih6D4sTNjduMNhXJEIlU/HHzp/LgV6FL6qj6j +ITk1dImmasI5+njPtqzn59ZW/yOSLlALqbUHM/Q4X6RJpstlcHboCoWASzY9M/eV +VHUl2qzEc4Jl6VL1XP04lQJqaTDFHApXB64ipCz5xUG3uOyfT0gA+QEEVcys+TIx +xHWVBqB/0Y0n3bOppHKH/lmLmnp0Ft0WpWIp6zqW3IunaFnT63eROfjXy9mPX1on +AX1daBli2MjN9LdyR75bl87yraKZk62Uy5P2EgmVtqvXO9A/EcswFi55gORngS1d +7XB4tmBZrOFdRWOPyN9yaFvqHbgB8X7754qz41SgOAngPN5C8sLtLpvzHzW2Ntjj +gKGLzZlkD8Kqq7HK9W+eQ42EVJmzbsASZthwEPEGNTNDqJwuuhQxzhB/HIbjj9LV ++Hfsm6vxL2PZQl/gZ4FkkfGXL/xuJvYz+NO1+MRiqzFRJQJ6+N1rZdVtTTDIZbpo +FGWsJwt0ivKH +-----END CERTIFICATE----- + +# Issuer: CN=TrustAsia Global Root CA G4 O=TrustAsia Technologies, Inc. +# Subject: CN=TrustAsia Global Root CA G4 O=TrustAsia Technologies, Inc. +# Label: "TrustAsia Global Root CA G4" +# Serial: 451799571007117016466790293371524403291602933463 +# MD5 Fingerprint: 54:dd:b2:d7:5f:d8:3e:ed:7c:e0:0b:2e:cc:ed:eb:eb +# SHA1 Fingerprint: 57:73:a5:61:5d:80:b2:e6:ac:38:82:fc:68:07:31:ac:9f:b5:92:5a +# SHA256 Fingerprint: be:4b:56:cb:50:56:c0:13:6a:52:6d:f4:44:50:8d:aa:36:a0:b5:4f:42:e4:ac:38:f7:2a:f4:70:e4:79:65:4c +-----BEGIN CERTIFICATE----- +MIICVTCCAdygAwIBAgIUTyNkuI6XY57GU4HBdk7LKnQV1tcwCgYIKoZIzj0EAwMw +WjELMAkGA1UEBhMCQ04xJTAjBgNVBAoMHFRydXN0QXNpYSBUZWNobm9sb2dpZXMs +IEluYy4xJDAiBgNVBAMMG1RydXN0QXNpYSBHbG9iYWwgUm9vdCBDQSBHNDAeFw0y +MTA1MjAwMjEwMjJaFw00NjA1MTkwMjEwMjJaMFoxCzAJBgNVBAYTAkNOMSUwIwYD +VQQKDBxUcnVzdEFzaWEgVGVjaG5vbG9naWVzLCBJbmMuMSQwIgYDVQQDDBtUcnVz +dEFzaWEgR2xvYmFsIFJvb3QgQ0EgRzQwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAATx +s8045CVD5d4ZCbuBeaIVXxVjAd7Cq92zphtnS4CDr5nLrBfbK5bKfFJV4hrhPVbw +LxYI+hW8m7tH5j/uqOFMjPXTNvk4XatwmkcN4oFBButJ+bAp3TPsUKV/eSm4IJij +YzBhMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUpbtKl86zK3+kMd6Xg1mD +pm9xy94wHQYDVR0OBBYEFKW7SpfOsyt/pDHel4NZg6ZvccveMA4GA1UdDwEB/wQE +AwIBBjAKBggqhkjOPQQDAwNnADBkAjBe8usGzEkxn0AAbbd+NvBNEU/zy4k6LHiR +UKNbwMp1JvK/kF0LgoxgKJ/GcJpo5PECMFxYDlZ2z1jD1xCMuo6u47xkdUfFVZDj +/bpV6wfEU6s3qe4hsiFbYI89MvHVI5TWWA== +-----END CERTIFICATE----- + +# Issuer: CN=CommScope Public Trust ECC Root-01 O=CommScope +# Subject: CN=CommScope Public Trust ECC Root-01 O=CommScope +# Label: "CommScope Public Trust ECC Root-01" +# Serial: 385011430473757362783587124273108818652468453534 +# MD5 Fingerprint: 3a:40:a7:fc:03:8c:9c:38:79:2f:3a:a2:6c:b6:0a:16 +# SHA1 Fingerprint: 07:86:c0:d8:dd:8e:c0:80:98:06:98:d0:58:7a:ef:de:a6:cc:a2:5d +# SHA256 Fingerprint: 11:43:7c:da:7b:b4:5e:41:36:5f:45:b3:9a:38:98:6b:0d:e0:0d:ef:34:8e:0c:7b:b0:87:36:33:80:0b:c3:8b +-----BEGIN CERTIFICATE----- +MIICHTCCAaOgAwIBAgIUQ3CCd89NXTTxyq4yLzf39H91oJ4wCgYIKoZIzj0EAwMw +TjELMAkGA1UEBhMCVVMxEjAQBgNVBAoMCUNvbW1TY29wZTErMCkGA1UEAwwiQ29t +bVNjb3BlIFB1YmxpYyBUcnVzdCBFQ0MgUm9vdC0wMTAeFw0yMTA0MjgxNzM1NDNa +Fw00NjA0MjgxNzM1NDJaME4xCzAJBgNVBAYTAlVTMRIwEAYDVQQKDAlDb21tU2Nv +cGUxKzApBgNVBAMMIkNvbW1TY29wZSBQdWJsaWMgVHJ1c3QgRUNDIFJvb3QtMDEw +djAQBgcqhkjOPQIBBgUrgQQAIgNiAARLNumuV16ocNfQj3Rid8NeeqrltqLxeP0C +flfdkXmcbLlSiFS8LwS+uM32ENEp7LXQoMPwiXAZu1FlxUOcw5tjnSCDPgYLpkJE +hRGnSjot6dZoL0hOUysHP029uax3OVejQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYD +VR0PAQH/BAQDAgEGMB0GA1UdDgQWBBSOB2LAUN3GGQYARnQE9/OufXVNMDAKBggq +hkjOPQQDAwNoADBlAjEAnDPfQeMjqEI2Jpc1XHvr20v4qotzVRVcrHgpD7oh2MSg +2NED3W3ROT3Ek2DS43KyAjB8xX6I01D1HiXo+k515liWpDVfG2XqYZpwI7UNo5uS +Um9poIyNStDuiw7LR47QjRE= +-----END CERTIFICATE----- + +# Issuer: CN=CommScope Public Trust ECC Root-02 O=CommScope +# Subject: CN=CommScope Public Trust ECC Root-02 O=CommScope +# Label: "CommScope Public Trust ECC Root-02" +# Serial: 234015080301808452132356021271193974922492992893 +# MD5 Fingerprint: 59:b0:44:d5:65:4d:b8:5c:55:19:92:02:b6:d1:94:b2 +# SHA1 Fingerprint: 3c:3f:ef:57:0f:fe:65:93:86:9e:a0:fe:b0:f6:ed:8e:d1:13:c7:e5 +# SHA256 Fingerprint: 2f:fb:7f:81:3b:bb:b3:c8:9a:b4:e8:16:2d:0f:16:d7:15:09:a8:30:cc:9d:73:c2:62:e5:14:08:75:d1:ad:4a +-----BEGIN CERTIFICATE----- +MIICHDCCAaOgAwIBAgIUKP2ZYEFHpgE6yhR7H+/5aAiDXX0wCgYIKoZIzj0EAwMw +TjELMAkGA1UEBhMCVVMxEjAQBgNVBAoMCUNvbW1TY29wZTErMCkGA1UEAwwiQ29t +bVNjb3BlIFB1YmxpYyBUcnVzdCBFQ0MgUm9vdC0wMjAeFw0yMTA0MjgxNzQ0NTRa +Fw00NjA0MjgxNzQ0NTNaME4xCzAJBgNVBAYTAlVTMRIwEAYDVQQKDAlDb21tU2Nv +cGUxKzApBgNVBAMMIkNvbW1TY29wZSBQdWJsaWMgVHJ1c3QgRUNDIFJvb3QtMDIw +djAQBgcqhkjOPQIBBgUrgQQAIgNiAAR4MIHoYx7l63FRD/cHB8o5mXxO1Q/MMDAL +j2aTPs+9xYa9+bG3tD60B8jzljHz7aRP+KNOjSkVWLjVb3/ubCK1sK9IRQq9qEmU +v4RDsNuESgMjGWdqb8FuvAY5N9GIIvejQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYD +VR0PAQH/BAQDAgEGMB0GA1UdDgQWBBTmGHX/72DehKT1RsfeSlXjMjZ59TAKBggq +hkjOPQQDAwNnADBkAjAmc0l6tqvmSfR9Uj/UQQSugEODZXW5hYA4O9Zv5JOGq4/n +ich/m35rChJVYaoR4HkCMHfoMXGsPHED1oQmHhS48zs73u1Z/GtMMH9ZzkXpc2AV +mkzw5l4lIhVtwodZ0LKOag== +-----END CERTIFICATE----- + +# Issuer: CN=CommScope Public Trust RSA Root-01 O=CommScope +# Subject: CN=CommScope Public Trust RSA Root-01 O=CommScope +# Label: "CommScope Public Trust RSA Root-01" +# Serial: 354030733275608256394402989253558293562031411421 +# MD5 Fingerprint: 0e:b4:15:bc:87:63:5d:5d:02:73:d4:26:38:68:73:d8 +# SHA1 Fingerprint: 6d:0a:5f:f7:b4:23:06:b4:85:b3:b7:97:64:fc:ac:75:f5:33:f2:93 +# SHA256 Fingerprint: 02:bd:f9:6e:2a:45:dd:9b:f1:8f:c7:e1:db:df:21:a0:37:9b:a3:c9:c2:61:03:44:cf:d8:d6:06:fe:c1:ed:81 +-----BEGIN CERTIFICATE----- +MIIFbDCCA1SgAwIBAgIUPgNJgXUWdDGOTKvVxZAplsU5EN0wDQYJKoZIhvcNAQEL +BQAwTjELMAkGA1UEBhMCVVMxEjAQBgNVBAoMCUNvbW1TY29wZTErMCkGA1UEAwwi +Q29tbVNjb3BlIFB1YmxpYyBUcnVzdCBSU0EgUm9vdC0wMTAeFw0yMTA0MjgxNjQ1 +NTRaFw00NjA0MjgxNjQ1NTNaME4xCzAJBgNVBAYTAlVTMRIwEAYDVQQKDAlDb21t +U2NvcGUxKzApBgNVBAMMIkNvbW1TY29wZSBQdWJsaWMgVHJ1c3QgUlNBIFJvb3Qt +MDEwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCwSGWjDR1C45FtnYSk +YZYSwu3D2iM0GXb26v1VWvZVAVMP8syMl0+5UMuzAURWlv2bKOx7dAvnQmtVzslh +suitQDy6uUEKBU8bJoWPQ7VAtYXR1HHcg0Hz9kXHgKKEUJdGzqAMxGBWBB0HW0al +DrJLpA6lfO741GIDuZNqihS4cPgugkY4Iw50x2tBt9Apo52AsH53k2NC+zSDO3Oj +WiE260f6GBfZumbCk6SP/F2krfxQapWsvCQz0b2If4b19bJzKo98rwjyGpg/qYFl +P8GMicWWMJoKz/TUyDTtnS+8jTiGU+6Xn6myY5QXjQ/cZip8UlF1y5mO6D1cv547 +KI2DAg+pn3LiLCuz3GaXAEDQpFSOm117RTYm1nJD68/A6g3czhLmfTifBSeolz7p +UcZsBSjBAg/pGG3svZwG1KdJ9FQFa2ww8esD1eo9anbCyxooSU1/ZOD6K9pzg4H/ +kQO9lLvkuI6cMmPNn7togbGEW682v3fuHX/3SZtS7NJ3Wn2RnU3COS3kuoL4b/JO +Hg9O5j9ZpSPcPYeoKFgo0fEbNttPxP/hjFtyjMcmAyejOQoBqsCyMWCDIqFPEgkB +Ea801M/XrmLTBQe0MXXgDW1XT2mH+VepuhX2yFJtocucH+X8eKg1mp9BFM6ltM6U +CBwJrVbl2rZJmkrqYxhTnCwuwwIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4G +A1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUN12mmnQywsL5x6YVEFm45P3luG0wDQYJ +KoZIhvcNAQELBQADggIBAK+nz97/4L1CjU3lIpbfaOp9TSp90K09FlxD533Ahuh6 +NWPxzIHIxgvoLlI1pKZJkGNRrDSsBTtXAOnTYtPZKdVUvhwQkZyybf5Z/Xn36lbQ +nmhUQo8mUuJM3y+Xpi/SB5io82BdS5pYV4jvguX6r2yBS5KPQJqTRlnLX3gWsWc+ +QgvfKNmwrZggvkN80V4aCRckjXtdlemrwWCrWxhkgPut4AZ9HcpZuPN4KWfGVh2v +trV0KnahP/t1MJ+UXjulYPPLXAziDslg+MkfFoom3ecnf+slpoq9uC02EJqxWE2a +aE9gVOX2RhOOiKy8IUISrcZKiX2bwdgt6ZYD9KJ0DLwAHb/WNyVntHKLr4W96ioD +j8z7PEQkguIBpQtZtjSNMgsSDesnwv1B10A8ckYpwIzqug/xBpMu95yo9GA+o/E4 +Xo4TwbM6l4c/ksp4qRyv0LAbJh6+cOx69TOY6lz/KwsETkPdY34Op054A5U+1C0w +lREQKC6/oAI+/15Z0wUOlV9TRe9rh9VIzRamloPh37MG88EU26fsHItdkJANclHn +YfkUyq+Dj7+vsQpZXdxc1+SWrVtgHdqul7I52Qb1dgAT+GhMIbA1xNxVssnBQVoc +icCMb3SgazNNtQEo/a2tiRc7ppqEvOuM6sRxJKi6KfkIsidWNTJf6jn7MZrVGczw +-----END CERTIFICATE----- + +# Issuer: CN=CommScope Public Trust RSA Root-02 O=CommScope +# Subject: CN=CommScope Public Trust RSA Root-02 O=CommScope +# Label: "CommScope Public Trust RSA Root-02" +# Serial: 480062499834624527752716769107743131258796508494 +# MD5 Fingerprint: e1:29:f9:62:7b:76:e2:96:6d:f3:d4:d7:0f:ae:1f:aa +# SHA1 Fingerprint: ea:b0:e2:52:1b:89:93:4c:11:68:f2:d8:9a:ac:22:4c:a3:8a:57:ae +# SHA256 Fingerprint: ff:e9:43:d7:93:42:4b:4f:7c:44:0c:1c:3d:64:8d:53:63:f3:4b:82:dc:87:aa:7a:9f:11:8f:c5:de:e1:01:f1 +-----BEGIN CERTIFICATE----- +MIIFbDCCA1SgAwIBAgIUVBa/O345lXGN0aoApYYNK496BU4wDQYJKoZIhvcNAQEL +BQAwTjELMAkGA1UEBhMCVVMxEjAQBgNVBAoMCUNvbW1TY29wZTErMCkGA1UEAwwi +Q29tbVNjb3BlIFB1YmxpYyBUcnVzdCBSU0EgUm9vdC0wMjAeFw0yMTA0MjgxNzE2 +NDNaFw00NjA0MjgxNzE2NDJaME4xCzAJBgNVBAYTAlVTMRIwEAYDVQQKDAlDb21t +U2NvcGUxKzApBgNVBAMMIkNvbW1TY29wZSBQdWJsaWMgVHJ1c3QgUlNBIFJvb3Qt +MDIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDh+g77aAASyE3VrCLE +NQE7xVTlWXZjpX/rwcRqmL0yjReA61260WI9JSMZNRTpf4mnG2I81lDnNJUDMrG0 +kyI9p+Kx7eZ7Ti6Hmw0zdQreqjXnfuU2mKKuJZ6VszKWpCtYHu8//mI0SFHRtI1C +rWDaSWqVcN3SAOLMV2MCe5bdSZdbkk6V0/nLKR8YSvgBKtJjCW4k6YnS5cciTNxz +hkcAqg2Ijq6FfUrpuzNPDlJwnZXjfG2WWy09X6GDRl224yW4fKcZgBzqZUPckXk2 +LHR88mcGyYnJ27/aaL8j7dxrrSiDeS/sOKUNNwFnJ5rpM9kzXzehxfCrPfp4sOcs +n/Y+n2Dg70jpkEUeBVF4GiwSLFworA2iI540jwXmojPOEXcT1A6kHkIfhs1w/tku +FT0du7jyU1fbzMZ0KZwYszZ1OC4PVKH4kh+Jlk+71O6d6Ts2QrUKOyrUZHk2EOH5 +kQMreyBUzQ0ZGshBMjTRsJnhkB4BQDa1t/qp5Xd1pCKBXbCL5CcSD1SIxtuFdOa3 +wNemKfrb3vOTlycEVS8KbzfFPROvCgCpLIscgSjX74Yxqa7ybrjKaixUR9gqiC6v +wQcQeKwRoi9C8DfF8rhW3Q5iLc4tVn5V8qdE9isy9COoR+jUKgF4z2rDN6ieZdIs +5fq6M8EGRPbmz6UNp2YINIos8wIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4G +A1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUR9DnsSL/nSz12Vdgs7GxcJXvYXowDQYJ +KoZIhvcNAQELBQADggIBAIZpsU0v6Z9PIpNojuQhmaPORVMbc0RTAIFhzTHjCLqB +KCh6krm2qMhDnscTJk3C2OVVnJJdUNjCK9v+5qiXz1I6JMNlZFxHMaNlNRPDk7n3 ++VGXu6TwYofF1gbTl4MgqX67tiHCpQ2EAOHyJxCDut0DgdXdaMNmEMjRdrSzbyme +APnCKfWxkxlSaRosTKCL4BWaMS/TiJVZbuXEs1DIFAhKm4sTg7GkcrI7djNB3Nyq +pgdvHSQSn8h2vS/ZjvQs7rfSOBAkNlEv41xdgSGn2rtO/+YHqP65DSdsu3BaVXoT +6fEqSWnHX4dXTEN5bTpl6TBcQe7rd6VzEojov32u5cSoHw2OHG1QAk8mGEPej1WF +sQs3BWDJVTkSBKEqz3EWnzZRSb9wO55nnPt7eck5HHisd5FUmrh1CoFSl+NmYWvt +PjgelmFV4ZFUjO2MJB+ByRCac5krFk5yAD9UG/iNuovnFNa2RU9g7Jauwy8CTl2d +lklyALKrdVwPaFsdZcJfMw8eD/A7hvWwTruc9+olBdytoptLFwG+Qt81IR2tq670 +v64fG9PiO/yzcnMcmyiQiRM9HcEARwmWmjgb3bHPDcK0RPOWlc4yOo80nOAXx17O +rg3bhzjlP1v9mxnhMUF6cKojawHhRUzNlM47ni3niAIi9G7oyOzWPPO5std3eqx7 +-----END CERTIFICATE----- + +# Issuer: CN=Telekom Security TLS ECC Root 2020 O=Deutsche Telekom Security GmbH +# Subject: CN=Telekom Security TLS ECC Root 2020 O=Deutsche Telekom Security GmbH +# Label: "Telekom Security TLS ECC Root 2020" +# Serial: 72082518505882327255703894282316633856 +# MD5 Fingerprint: c1:ab:fe:6a:10:2c:03:8d:bc:1c:22:32:c0:85:a7:fd +# SHA1 Fingerprint: c0:f8:96:c5:a9:3b:01:06:21:07:da:18:42:48:bc:e9:9d:88:d5:ec +# SHA256 Fingerprint: 57:8a:f4:de:d0:85:3f:4e:59:98:db:4a:ea:f9:cb:ea:8d:94:5f:60:b6:20:a3:8d:1a:3c:13:b2:bc:7b:a8:e1 +-----BEGIN CERTIFICATE----- +MIICQjCCAcmgAwIBAgIQNjqWjMlcsljN0AFdxeVXADAKBggqhkjOPQQDAzBjMQsw +CQYDVQQGEwJERTEnMCUGA1UECgweRGV1dHNjaGUgVGVsZWtvbSBTZWN1cml0eSBH +bWJIMSswKQYDVQQDDCJUZWxla29tIFNlY3VyaXR5IFRMUyBFQ0MgUm9vdCAyMDIw +MB4XDTIwMDgyNTA3NDgyMFoXDTQ1MDgyNTIzNTk1OVowYzELMAkGA1UEBhMCREUx +JzAlBgNVBAoMHkRldXRzY2hlIFRlbGVrb20gU2VjdXJpdHkgR21iSDErMCkGA1UE +AwwiVGVsZWtvbSBTZWN1cml0eSBUTFMgRUNDIFJvb3QgMjAyMDB2MBAGByqGSM49 +AgEGBSuBBAAiA2IABM6//leov9Wq9xCazbzREaK9Z0LMkOsVGJDZos0MKiXrPk/O +tdKPD/M12kOLAoC+b1EkHQ9rK8qfwm9QMuU3ILYg/4gND21Ju9sGpIeQkpT0CdDP +f8iAC8GXs7s1J8nCG6NCMEAwHQYDVR0OBBYEFONyzG6VmUex5rNhTNHLq+O6zd6f +MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMAoGCCqGSM49BAMDA2cA +MGQCMHVSi7ekEE+uShCLsoRbQuHmKjYC2qBuGT8lv9pZMo7k+5Dck2TOrbRBR2Di +z6fLHgIwN0GMZt9Ba9aDAEH9L1r3ULRn0SyocddDypwnJJGDSA3PzfdUga/sf+Rn +27iQ7t0l +-----END CERTIFICATE----- + +# Issuer: CN=Telekom Security TLS RSA Root 2023 O=Deutsche Telekom Security GmbH +# Subject: CN=Telekom Security TLS RSA Root 2023 O=Deutsche Telekom Security GmbH +# Label: "Telekom Security TLS RSA Root 2023" +# Serial: 44676229530606711399881795178081572759 +# MD5 Fingerprint: bf:5b:eb:54:40:cd:48:71:c4:20:8d:7d:de:0a:42:f2 +# SHA1 Fingerprint: 54:d3:ac:b3:bd:57:56:f6:85:9d:ce:e5:c3:21:e2:d4:ad:83:d0:93 +# SHA256 Fingerprint: ef:c6:5c:ad:bb:59:ad:b6:ef:e8:4d:a2:23:11:b3:56:24:b7:1b:3b:1e:a0:da:8b:66:55:17:4e:c8:97:86:46 +-----BEGIN CERTIFICATE----- +MIIFszCCA5ugAwIBAgIQIZxULej27HF3+k7ow3BXlzANBgkqhkiG9w0BAQwFADBj +MQswCQYDVQQGEwJERTEnMCUGA1UECgweRGV1dHNjaGUgVGVsZWtvbSBTZWN1cml0 +eSBHbWJIMSswKQYDVQQDDCJUZWxla29tIFNlY3VyaXR5IFRMUyBSU0EgUm9vdCAy +MDIzMB4XDTIzMDMyODEyMTY0NVoXDTQ4MDMyNzIzNTk1OVowYzELMAkGA1UEBhMC +REUxJzAlBgNVBAoMHkRldXRzY2hlIFRlbGVrb20gU2VjdXJpdHkgR21iSDErMCkG +A1UEAwwiVGVsZWtvbSBTZWN1cml0eSBUTFMgUlNBIFJvb3QgMjAyMzCCAiIwDQYJ +KoZIhvcNAQEBBQADggIPADCCAgoCggIBAO01oYGA88tKaVvC+1GDrib94W7zgRJ9 +cUD/h3VCKSHtgVIs3xLBGYSJwb3FKNXVS2xE1kzbB5ZKVXrKNoIENqil/Cf2SfHV +cp6R+SPWcHu79ZvB7JPPGeplfohwoHP89v+1VmLhc2o0mD6CuKyVU/QBoCcHcqMA +U6DksquDOFczJZSfvkgdmOGjup5czQRxUX11eKvzWarE4GC+j4NSuHUaQTXtvPM6 +Y+mpFEXX5lLRbtLevOP1Czvm4MS9Q2QTps70mDdsipWol8hHD/BeEIvnHRz+sTug +BTNoBUGCwQMrAcjnj02r6LX2zWtEtefdi+zqJbQAIldNsLGyMcEWzv/9FIS3R/qy +8XDe24tsNlikfLMR0cN3f1+2JeANxdKz+bi4d9s3cXFH42AYTyS2dTd4uaNir73J +co4vzLuu2+QVUhkHM/tqty1LkCiCc/4YizWN26cEar7qwU02OxY2kTLvtkCJkUPg +8qKrBC7m8kwOFjQgrIfBLX7JZkcXFBGk8/ehJImr2BrIoVyxo/eMbcgByU/J7MT8 +rFEz0ciD0cmfHdRHNCk+y7AO+oMLKFjlKdw/fKifybYKu6boRhYPluV75Gp6SG12 +mAWl3G0eQh5C2hrgUve1g8Aae3g1LDj1H/1Joy7SWWO/gLCMk3PLNaaZlSJhZQNg ++y+TS/qanIA7AgMBAAGjYzBhMA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUtqeX +gj10hZv3PJ+TmpV5dVKMbUcwDwYDVR0TAQH/BAUwAwEB/zAfBgNVHSMEGDAWgBS2 +p5eCPXSFm/c8n5OalXl1UoxtRzANBgkqhkiG9w0BAQwFAAOCAgEAqMxhpr51nhVQ +pGv7qHBFfLp+sVr8WyP6Cnf4mHGCDG3gXkaqk/QeoMPhk9tLrbKmXauw1GLLXrtm +9S3ul0A8Yute1hTWjOKWi0FpkzXmuZlrYrShF2Y0pmtjxrlO8iLpWA1WQdH6DErw +M807u20hOq6OcrXDSvvpfeWxm4bu4uB9tPcy/SKE8YXJN3nptT+/XOR0so8RYgDd +GGah2XsjX/GO1WfoVNpbOms2b/mBsTNHM3dA+VKq3dSDz4V4mZqTuXNnQkYRIer+ +CqkbGmVps4+uFrb2S1ayLfmlyOw7YqPta9BO1UAJpB+Y1zqlklkg5LB9zVtzaL1t +xKITDmcZuI1CfmwMmm6gJC3VRRvcxAIU/oVbZZfKTpBQCHpCNfnqwmbU+AGuHrS+ +w6jv/naaoqYfRvaE7fzbzsQCzndILIyy7MMAo+wsVRjBfhnu4S/yrYObnqsZ38aK +L4x35bcF7DvB7L6Gs4a8wPfc5+pbrrLMtTWGS9DiP7bY+A4A7l3j941Y/8+LN+lj +X273CXE2whJdV/LItM3z7gLfEdxquVeEHVlNjM7IDiPCtyaaEBRx/pOyiriA8A4Q +ntOoUAw3gi/q4Iqd4Sw5/7W0cwDk90imc6y/st53BIe0o82bNSQ3+pCTE4FCxpgm +dTdmQRCsu/WU48IxK63nI1bMNSWSs1A= +-----END CERTIFICATE----- + +# Issuer: CN=FIRMAPROFESIONAL CA ROOT-A WEB O=Firmaprofesional SA +# Subject: CN=FIRMAPROFESIONAL CA ROOT-A WEB O=Firmaprofesional SA +# Label: "FIRMAPROFESIONAL CA ROOT-A WEB" +# Serial: 65916896770016886708751106294915943533 +# MD5 Fingerprint: 82:b2:ad:45:00:82:b0:66:63:f8:5f:c3:67:4e:ce:a3 +# SHA1 Fingerprint: a8:31:11:74:a6:14:15:0d:ca:77:dd:0e:e4:0c:5d:58:fc:a0:72:a5 +# SHA256 Fingerprint: be:f2:56:da:f2:6e:9c:69:bd:ec:16:02:35:97:98:f3:ca:f7:18:21:a0:3e:01:82:57:c5:3c:65:61:7f:3d:4a +-----BEGIN CERTIFICATE----- +MIICejCCAgCgAwIBAgIQMZch7a+JQn81QYehZ1ZMbTAKBggqhkjOPQQDAzBuMQsw +CQYDVQQGEwJFUzEcMBoGA1UECgwTRmlybWFwcm9mZXNpb25hbCBTQTEYMBYGA1UE +YQwPVkFURVMtQTYyNjM0MDY4MScwJQYDVQQDDB5GSVJNQVBST0ZFU0lPTkFMIENB +IFJPT1QtQSBXRUIwHhcNMjIwNDA2MDkwMTM2WhcNNDcwMzMxMDkwMTM2WjBuMQsw +CQYDVQQGEwJFUzEcMBoGA1UECgwTRmlybWFwcm9mZXNpb25hbCBTQTEYMBYGA1UE +YQwPVkFURVMtQTYyNjM0MDY4MScwJQYDVQQDDB5GSVJNQVBST0ZFU0lPTkFMIENB +IFJPT1QtQSBXRUIwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAARHU+osEaR3xyrq89Zf +e9MEkVz6iMYiuYMQYneEMy3pA4jU4DP37XcsSmDq5G+tbbT4TIqk5B/K6k84Si6C +cyvHZpsKjECcfIr28jlgst7L7Ljkb+qbXbdTkBgyVcUgt5SjYzBhMA8GA1UdEwEB +/wQFMAMBAf8wHwYDVR0jBBgwFoAUk+FDY1w8ndYn81LsF7Kpryz3dvgwHQYDVR0O +BBYEFJPhQ2NcPJ3WJ/NS7Beyqa8s93b4MA4GA1UdDwEB/wQEAwIBBjAKBggqhkjO +PQQDAwNoADBlAjAdfKR7w4l1M+E7qUW/Runpod3JIha3RxEL2Jq68cgLcFBTApFw +hVmpHqTm6iMxoAACMQD94vizrxa5HnPEluPBMBnYfubDl94cT7iJLzPrSA8Z94dG +XSaQpYXFuXqUPoeovQA= +-----END CERTIFICATE----- + +# Issuer: CN=TWCA CYBER Root CA O=TAIWAN-CA OU=Root CA +# Subject: CN=TWCA CYBER Root CA O=TAIWAN-CA OU=Root CA +# Label: "TWCA CYBER Root CA" +# Serial: 85076849864375384482682434040119489222 +# MD5 Fingerprint: 0b:33:a0:97:52:95:d4:a9:fd:bb:db:6e:a3:55:5b:51 +# SHA1 Fingerprint: f6:b1:1c:1a:83:38:e9:7b:db:b3:a8:c8:33:24:e0:2d:9c:7f:26:66 +# SHA256 Fingerprint: 3f:63:bb:28:14:be:17:4e:c8:b6:43:9c:f0:8d:6d:56:f0:b7:c4:05:88:3a:56:48:a3:34:42:4d:6b:3e:c5:58 +-----BEGIN CERTIFICATE----- +MIIFjTCCA3WgAwIBAgIQQAE0jMIAAAAAAAAAATzyxjANBgkqhkiG9w0BAQwFADBQ +MQswCQYDVQQGEwJUVzESMBAGA1UEChMJVEFJV0FOLUNBMRAwDgYDVQQLEwdSb290 +IENBMRswGQYDVQQDExJUV0NBIENZQkVSIFJvb3QgQ0EwHhcNMjIxMTIyMDY1NDI5 +WhcNNDcxMTIyMTU1OTU5WjBQMQswCQYDVQQGEwJUVzESMBAGA1UEChMJVEFJV0FO +LUNBMRAwDgYDVQQLEwdSb290IENBMRswGQYDVQQDExJUV0NBIENZQkVSIFJvb3Qg +Q0EwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDG+Moe2Qkgfh1sTs6P +40czRJzHyWmqOlt47nDSkvgEs1JSHWdyKKHfi12VCv7qze33Kc7wb3+szT3vsxxF +avcokPFhV8UMxKNQXd7UtcsZyoC5dc4pztKFIuwCY8xEMCDa6pFbVuYdHNWdZsc/ +34bKS1PE2Y2yHer43CdTo0fhYcx9tbD47nORxc5zb87uEB8aBs/pJ2DFTxnk684i +JkXXYJndzk834H/nY62wuFm40AZoNWDTNq5xQwTxaWV4fPMf88oon1oglWa0zbfu +j3ikRRjpJi+NmykosaS3Om251Bw4ckVYsV7r8Cibt4LK/c/WMw+f+5eesRycnupf +Xtuq3VTpMCEobY5583WSjCb+3MX2w7DfRFlDo7YDKPYIMKoNM+HvnKkHIuNZW0CP +2oi3aQiotyMuRAlZN1vH4xfyIutuOVLF3lSnmMlLIJXcRolftBL5hSmO68gnFSDA +S9TMfAxsNAwmmyYxpjyn9tnQS6Jk/zuZQXLB4HCX8SS7K8R0IrGsayIyJNN4KsDA +oS/xUgXJP+92ZuJF2A09rZXIx4kmyA+upwMu+8Ff+iDhcK2wZSA3M2Cw1a/XDBzC +kHDXShi8fgGwsOsVHkQGzaRP6AzRwyAQ4VRlnrZR0Bp2a0JaWHY06rc3Ga4udfmW +5cFZ95RXKSWNOkyrTZpB0F8mAwIDAQABo2MwYTAOBgNVHQ8BAf8EBAMCAQYwDwYD +VR0TAQH/BAUwAwEB/zAfBgNVHSMEGDAWgBSdhWEUfMFib5do5E83QOGt4A1WNzAd +BgNVHQ4EFgQUnYVhFHzBYm+XaORPN0DhreANVjcwDQYJKoZIhvcNAQEMBQADggIB +AGSPesRiDrWIzLjHhg6hShbNcAu3p4ULs3a2D6f/CIsLJc+o1IN1KriWiLb73y0t +tGlTITVX1olNc79pj3CjYcya2x6a4CD4bLubIp1dhDGaLIrdaqHXKGnK/nZVekZn +68xDiBaiA9a5F/gZbG0jAn/xX9AKKSM70aoK7akXJlQKTcKlTfjF/biBzysseKNn +TKkHmvPfXvt89YnNdJdhEGoHK4Fa0o635yDRIG4kqIQnoVesqlVYL9zZyvpoBJ7t +RCT5dEA7IzOrg1oYJkK2bVS1FmAwbLGg+LhBoF1JSdJlBTrq/p1hvIbZv97Tujqx +f36SNI7JAG7cmL3c7IAFrQI932XtCwP39xaEBDG6k5TY8hL4iuO/Qq+n1M0RFxbI +Qh0UqEL20kCGoE8jypZFVmAGzbdVAaYBlGX+bgUJurSkquLvWL69J1bY73NxW0Qz +8ppy6rBePm6pUlvscG21h483XjyMnM7k8M4MZ0HMzvaAq07MTFb1wWFZk7Q+ptq4 +NxKfKjLji7gh7MMrZQzvIt6IKTtM1/r+t+FHvpw+PoP7UV31aPcuIYXcv/Fa4nzX +xeSDwWrruoBa3lwtcHb4yOWHh8qgnaHlIhInD0Q9HWzq1MKLL295q39QpsQZp6F6 +t5b5wR9iWqJDB0BeJsas7a5wFsWqynKKTbDPAYsDP27X +-----END CERTIFICATE----- + +# Issuer: CN=SecureSign Root CA12 O=Cybertrust Japan Co., Ltd. +# Subject: CN=SecureSign Root CA12 O=Cybertrust Japan Co., Ltd. +# Label: "SecureSign Root CA12" +# Serial: 587887345431707215246142177076162061960426065942 +# MD5 Fingerprint: c6:89:ca:64:42:9b:62:08:49:0b:1e:7f:e9:07:3d:e8 +# SHA1 Fingerprint: 7a:22:1e:3d:de:1b:06:ac:9e:c8:47:70:16:8e:3c:e5:f7:6b:06:f4 +# SHA256 Fingerprint: 3f:03:4b:b5:70:4d:44:b2:d0:85:45:a0:20:57:de:93:eb:f3:90:5f:ce:72:1a:cb:c7:30:c0:6d:da:ee:90:4e +-----BEGIN CERTIFICATE----- +MIIDcjCCAlqgAwIBAgIUZvnHwa/swlG07VOX5uaCwysckBYwDQYJKoZIhvcNAQEL +BQAwUTELMAkGA1UEBhMCSlAxIzAhBgNVBAoTGkN5YmVydHJ1c3QgSmFwYW4gQ28u +LCBMdGQuMR0wGwYDVQQDExRTZWN1cmVTaWduIFJvb3QgQ0ExMjAeFw0yMDA0MDgw +NTM2NDZaFw00MDA0MDgwNTM2NDZaMFExCzAJBgNVBAYTAkpQMSMwIQYDVQQKExpD +eWJlcnRydXN0IEphcGFuIENvLiwgTHRkLjEdMBsGA1UEAxMUU2VjdXJlU2lnbiBS +b290IENBMTIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC6OcE3emhF +KxS06+QT61d1I02PJC0W6K6OyX2kVzsqdiUzg2zqMoqUm048luT9Ub+ZyZN+v/mt +p7JIKwccJ/VMvHASd6SFVLX9kHrko+RRWAPNEHl57muTH2SOa2SroxPjcf59q5zd +J1M3s6oYwlkm7Fsf0uZlfO+TvdhYXAvA42VvPMfKWeP+bl+sg779XSVOKik71gur +FzJ4pOE+lEa+Ym6b3kaosRbnhW70CEBFEaCeVESE99g2zvVQR9wsMJvuwPWW0v4J +hscGWa5Pro4RmHvzC1KqYiaqId+OJTN5lxZJjfU+1UefNzFJM3IFTQy2VYzxV4+K +h9GtxRESOaCtAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQD +AgEGMB0GA1UdDgQWBBRXNPN0zwRL1SXm8UC2LEzZLemgrTANBgkqhkiG9w0BAQsF +AAOCAQEAPrvbFxbS8hQBICw4g0utvsqFepq2m2um4fylOqyttCg6r9cBg0krY6Ld +mmQOmFxv3Y67ilQiLUoT865AQ9tPkbeGGuwAtEGBpE/6aouIs3YIcipJQMPTw4WJ +mBClnW8Zt7vPemVV2zfrPIpyMpcemik+rY3moxtt9XUa5rBouVui7mlHJzWhhpmA +8zNL4WukJsPvdFlseqJkth5Ew1DgDzk9qTPxpfPSvWKErI4cqc1avTc7bgoitPQV +55FYxTpE05Uo2cBl6XLK0A+9H7MV2anjpEcJnuDLN/v9vZfVvhgaaaI5gdka9at/ +yOPiZwud9AzqVN/Ssq+xIvEg37xEHA== +-----END CERTIFICATE----- + +# Issuer: CN=SecureSign Root CA14 O=Cybertrust Japan Co., Ltd. +# Subject: CN=SecureSign Root CA14 O=Cybertrust Japan Co., Ltd. +# Label: "SecureSign Root CA14" +# Serial: 575790784512929437950770173562378038616896959179 +# MD5 Fingerprint: 71:0d:72:fa:92:19:65:5e:89:04:ac:16:33:f0:bc:d5 +# SHA1 Fingerprint: dd:50:c0:f7:79:b3:64:2e:74:a2:b8:9d:9f:d3:40:dd:bb:f0:f2:4f +# SHA256 Fingerprint: 4b:00:9c:10:34:49:4f:9a:b5:6b:ba:3b:a1:d6:27:31:fc:4d:20:d8:95:5a:dc:ec:10:a9:25:60:72:61:e3:38 +-----BEGIN CERTIFICATE----- +MIIFcjCCA1qgAwIBAgIUZNtaDCBO6Ncpd8hQJ6JaJ90t8sswDQYJKoZIhvcNAQEM +BQAwUTELMAkGA1UEBhMCSlAxIzAhBgNVBAoTGkN5YmVydHJ1c3QgSmFwYW4gQ28u +LCBMdGQuMR0wGwYDVQQDExRTZWN1cmVTaWduIFJvb3QgQ0ExNDAeFw0yMDA0MDgw +NzA2MTlaFw00NTA0MDgwNzA2MTlaMFExCzAJBgNVBAYTAkpQMSMwIQYDVQQKExpD +eWJlcnRydXN0IEphcGFuIENvLiwgTHRkLjEdMBsGA1UEAxMUU2VjdXJlU2lnbiBS +b290IENBMTQwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDF0nqh1oq/ +FjHQmNE6lPxauG4iwWL3pwon71D2LrGeaBLwbCRjOfHw3xDG3rdSINVSW0KZnvOg +vlIfX8xnbacuUKLBl422+JX1sLrcneC+y9/3OPJH9aaakpUqYllQC6KxNedlsmGy +6pJxaeQp8E+BgQQ8sqVb1MWoWWd7VRxJq3qdwudzTe/NCcLEVxLbAQ4jeQkHO6Lo +/IrPj8BGJJw4J+CDnRugv3gVEOuGTgpa/d/aLIJ+7sr2KeH6caH3iGicnPCNvg9J +kdjqOvn90Ghx2+m1K06Ckm9mH+Dw3EzsytHqunQG+bOEkJTRX45zGRBdAuVwpcAQ +0BB8b8VYSbSwbprafZX1zNoCr7gsfXmPvkPx+SgojQlD+Ajda8iLLCSxjVIHvXib +y8posqTdDEx5YMaZ0ZPxMBoH064iwurO8YQJzOAUbn8/ftKChazcqRZOhaBgy/ac +18izju3Gm5h1DVXoX+WViwKkrkMpKBGk5hIwAUt1ax5mnXkvpXYvHUC0bcl9eQjs +0Wq2XSqypWa9a4X0dFbD9ed1Uigspf9mR6XU/v6eVL9lfgHWMI+lNpyiUBzuOIAB +SMbHdPTGrMNASRZhdCyvjG817XsYAFs2PJxQDcqSMxDxJklt33UkN4Ii1+iW/RVL +ApY+B3KVfqs9TC7XyvDf4Fg/LS8EmjijAQIDAQABo0IwQDAPBgNVHRMBAf8EBTAD +AQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUBpOjCl4oaTeqYR3r6/wtbyPk +86AwDQYJKoZIhvcNAQEMBQADggIBAJaAcgkGfpzMkwQWu6A6jZJOtxEaCnFxEM0E +rX+lRVAQZk5KQaID2RFPeje5S+LGjzJmdSX7684/AykmjbgWHfYfM25I5uj4V7Ib +ed87hwriZLoAymzvftAj63iP/2SbNDefNWWipAA9EiOWWF3KY4fGoweITedpdopT +zfFP7ELyk+OZpDc8h7hi2/DsHzc/N19DzFGdtfCXwreFamgLRB7lUe6TzktuhsHS +DCRZNhqfLJGP4xjblJUK7ZGqDpncllPjYYPGFrojutzdfhrGe0K22VoF3Jpf1d+4 +2kd92jjbrDnVHmtsKheMYc2xbXIBw8MgAGJoFjHVdqqGuw6qnsb58Nn4DSEC5MUo +FlkRudlpcyqSeLiSV5sI8jrlL5WwWLdrIBRtFO8KvH7YVdiI2i/6GaX7i+B/OfVy +K4XELKzvGUWSTLNhB9xNH27SgRNcmvMSZ4PPmz+Ln52kuaiWA3rF7iDeM9ovnhp6 +dB7h7sxaOgTdsxoEqBRjrLdHEoOabPXm6RUVkRqEGQ6UROcSjiVbgGcZ3GOTEAtl +Lor6CZpO2oYofaphNdgOpygau1LgePhsumywbrmHXumZNTfxPWQrqaA0k89jL9WB +365jJ6UeTo3cKXhZ+PmhIIynJkBugnLNeLLIjzwec+fBH7/PzqUqm9tEZDKgu39c +JRNItX+S +-----END CERTIFICATE----- + +# Issuer: CN=SecureSign Root CA15 O=Cybertrust Japan Co., Ltd. +# Subject: CN=SecureSign Root CA15 O=Cybertrust Japan Co., Ltd. +# Label: "SecureSign Root CA15" +# Serial: 126083514594751269499665114766174399806381178503 +# MD5 Fingerprint: 13:30:fc:c4:62:a6:a9:de:b5:c1:68:af:b5:d2:31:47 +# SHA1 Fingerprint: cb:ba:83:c8:c1:5a:5d:f1:f9:73:6f:ca:d7:ef:28:13:06:4a:07:7d +# SHA256 Fingerprint: e7:78:f0:f0:95:fe:84:37:29:cd:1a:00:82:17:9e:53:14:a9:c2:91:44:28:05:e1:fb:1d:8f:b6:b8:88:6c:3a +-----BEGIN CERTIFICATE----- +MIICIzCCAamgAwIBAgIUFhXHw9hJp75pDIqI7fBw+d23PocwCgYIKoZIzj0EAwMw +UTELMAkGA1UEBhMCSlAxIzAhBgNVBAoTGkN5YmVydHJ1c3QgSmFwYW4gQ28uLCBM +dGQuMR0wGwYDVQQDExRTZWN1cmVTaWduIFJvb3QgQ0ExNTAeFw0yMDA0MDgwODMy +NTZaFw00NTA0MDgwODMyNTZaMFExCzAJBgNVBAYTAkpQMSMwIQYDVQQKExpDeWJl +cnRydXN0IEphcGFuIENvLiwgTHRkLjEdMBsGA1UEAxMUU2VjdXJlU2lnbiBSb290 +IENBMTUwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQLUHSNZDKZmbPSYAi4Io5GdCx4 +wCtELW1fHcmuS1Iggz24FG1Th2CeX2yF2wYUleDHKP+dX+Sq8bOLbe1PL0vJSpSR +ZHX+AezB2Ot6lHhWGENfa4HL9rzatAy2KZMIaY+jQjBAMA8GA1UdEwEB/wQFMAMB +Af8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBTrQciu/NWeUUj1vYv0hyCTQSvT +9DAKBggqhkjOPQQDAwNoADBlAjEA2S6Jfl5OpBEHvVnCB96rMjhTKkZEBhd6zlHp +4P9mLQlO4E/0BdGF9jVg3PVys0Z9AjBEmEYagoUeYWmJSwdLZrWeqrqgHkHZAXQ6 +bkU6iYAZezKYVWOr62Nuk22rGwlgMU4= +-----END CERTIFICATE----- + +# Issuer: CN=D-TRUST BR Root CA 2 2023 O=D-Trust GmbH +# Subject: CN=D-TRUST BR Root CA 2 2023 O=D-Trust GmbH +# Label: "D-TRUST BR Root CA 2 2023" +# Serial: 153168538924886464690566649552453098598 +# MD5 Fingerprint: e1:09:ed:d3:60:d4:56:1b:47:1f:b7:0c:5f:1b:5f:85 +# SHA1 Fingerprint: 2d:b0:70:ee:71:94:af:69:68:17:db:79:ce:58:9f:a0:6b:96:f7:87 +# SHA256 Fingerprint: 05:52:e6:f8:3f:df:65:e8:fa:96:70:e6:66:df:28:a4:e2:13:40:b5:10:cb:e5:25:66:f9:7c:4f:b9:4b:2b:d1 +-----BEGIN CERTIFICATE----- +MIIFqTCCA5GgAwIBAgIQczswBEhb2U14LnNLyaHcZjANBgkqhkiG9w0BAQ0FADBI +MQswCQYDVQQGEwJERTEVMBMGA1UEChMMRC1UcnVzdCBHbWJIMSIwIAYDVQQDExlE +LVRSVVNUIEJSIFJvb3QgQ0EgMiAyMDIzMB4XDTIzMDUwOTA4NTYzMVoXDTM4MDUw +OTA4NTYzMFowSDELMAkGA1UEBhMCREUxFTATBgNVBAoTDEQtVHJ1c3QgR21iSDEi +MCAGA1UEAxMZRC1UUlVTVCBCUiBSb290IENBIDIgMjAyMzCCAiIwDQYJKoZIhvcN +AQEBBQADggIPADCCAgoCggIBAK7/CVmRgApKaOYkP7in5Mg6CjoWzckjYaCTcfKr +i3OPoGdlYNJUa2NRb0kz4HIHE304zQaSBylSa053bATTlfrdTIzZXcFhfUvnKLNE +gXtRr90zsWh81k5M/itoucpmacTsXld/9w3HnDY25QdgrMBM6ghs7wZ8T1soegj8 +k12b9py0i4a6Ibn08OhZWiihNIQaJZG2tY/vsvmA+vk9PBFy2OMvhnbFeSzBqZCT +Rphny4NqoFAjpzv2gTng7fC5v2Xx2Mt6++9zA84A9H3X4F07ZrjcjrqDy4d2A/wl +2ecjbwb9Z/Pg/4S8R7+1FhhGaRTMBffb00msa8yr5LULQyReS2tNZ9/WtT5PeB+U +cSTq3nD88ZP+npNa5JRal1QMNXtfbO4AHyTsA7oC9Xb0n9Sa7YUsOCIvx9gvdhFP +/Wxc6PWOJ4d/GUohR5AdeY0cW/jPSoXk7bNbjb7EZChdQcRurDhaTyN0dKkSw/bS +uREVMweR2Ds3OmMwBtHFIjYoYiMQ4EbMl6zWK11kJNXuHA7e+whadSr2Y23OC0K+ +0bpwHJwh5Q8xaRfX/Aq03u2AnMuStIv13lmiWAmlY0cL4UEyNEHZmrHZqLAbWt4N +DfTisl01gLmB1IRpkQLLddCNxbU9CZEJjxShFHR5PtbJFR2kWVki3PaKRT08EtY+ +XTIvAgMBAAGjgY4wgYswDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUZ5Dw1t61 +GNVGKX5cq/ieCLxklRAwDgYDVR0PAQH/BAQDAgEGMEkGA1UdHwRCMEAwPqA8oDqG +OGh0dHA6Ly9jcmwuZC10cnVzdC5uZXQvY3JsL2QtdHJ1c3RfYnJfcm9vdF9jYV8y +XzIwMjMuY3JsMA0GCSqGSIb3DQEBDQUAA4ICAQA097N3U9swFrktpSHxQCF16+tI +FoE9c+CeJyrrd6kTpGoKWloUMz1oH4Guaf2Mn2VsNELZLdB/eBaxOqwjMa1ef67n +riv6uvw8l5VAk1/DLQOj7aRvU9f6QA4w9QAgLABMjDu0ox+2v5Eyq6+SmNMW5tTR +VFxDWy6u71cqqLRvpO8NVhTaIasgdp4D/Ca4nj8+AybmTNudX0KEPUUDAxxZiMrc +LmEkWqTqJwtzEr5SswrPMhfiHocaFpVIbVrg0M8JkiZmkdijYQ6qgYF/6FKC0ULn +4B0Y+qSFNueG4A3rvNTJ1jxD8V1Jbn6Bm2m1iWKPiFLY1/4nwSPFyysCu7Ff/vtD +hQNGvl3GyiEm/9cCnnRK3PgTFbGBVzbLZVzRHTF36SXDw7IyN9XxmAnkbWOACKsG +koHU6XCPpz+y7YaMgmo1yEJagtFSGkUPFaUA8JR7ZSdXOUPPfH/mvTWze/EZTN46 +ls/pdu4D58JDUjxqgejBWoC9EV2Ta/vH5mQ/u2kc6d0li690yVRAysuTEwrt+2aS +Ecr1wPrYg1UDfNPFIkZ1cGt5SAYqgpq/5usWDiJFAbzdNpQ0qTUmiteXue4Icr80 +knCDgKs4qllo3UCkGJCy89UDyibK79XH4I9TjvAA46jtn/mtd+ArY0+ew+43u3gJ +hJ65bvspmZDogNOfJA== +-----END CERTIFICATE----- + +# Issuer: CN=TrustAsia TLS ECC Root CA O=TrustAsia Technologies, Inc. +# Subject: CN=TrustAsia TLS ECC Root CA O=TrustAsia Technologies, Inc. +# Label: "TrustAsia TLS ECC Root CA" +# Serial: 310892014698942880364840003424242768478804666567 +# MD5 Fingerprint: 09:48:04:77:d2:fc:65:93:71:66:b1:11:95:4f:06:8c +# SHA1 Fingerprint: b5:ec:39:f3:a1:66:37:ae:c3:05:94:57:e2:be:11:be:b7:a1:7f:36 +# SHA256 Fingerprint: c0:07:6b:9e:f0:53:1f:b1:a6:56:d6:7c:4e:be:97:cd:5d:ba:a4:1e:f4:45:98:ac:c2:48:98:78:c9:2d:87:11 +-----BEGIN CERTIFICATE----- +MIICMTCCAbegAwIBAgIUNnThTXxlE8msg1UloD5Sfi9QaMcwCgYIKoZIzj0EAwMw +WDELMAkGA1UEBhMCQ04xJTAjBgNVBAoTHFRydXN0QXNpYSBUZWNobm9sb2dpZXMs +IEluYy4xIjAgBgNVBAMTGVRydXN0QXNpYSBUTFMgRUNDIFJvb3QgQ0EwHhcNMjQw +NTE1MDU0MTU2WhcNNDQwNTE1MDU0MTU1WjBYMQswCQYDVQQGEwJDTjElMCMGA1UE +ChMcVHJ1c3RBc2lhIFRlY2hub2xvZ2llcywgSW5jLjEiMCAGA1UEAxMZVHJ1c3RB +c2lhIFRMUyBFQ0MgUm9vdCBDQTB2MBAGByqGSM49AgEGBSuBBAAiA2IABLh/pVs/ +AT598IhtrimY4ZtcU5nb9wj/1WrgjstEpvDBjL1P1M7UiFPoXlfXTr4sP/MSpwDp +guMqWzJ8S5sUKZ74LYO1644xST0mYekdcouJtgq7nDM1D9rs3qlKH8kzsaNCMEAw +DwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQULIVTu7FDzTLqnqOH/qKYqKaT6RAw +DgYDVR0PAQH/BAQDAgEGMAoGCCqGSM49BAMDA2gAMGUCMFRH18MtYYZI9HlaVQ01 +L18N9mdsd0AaRuf4aFtOJx24mH1/k78ITcTaRTChD15KeAIxAKORh/IRM4PDwYqR +OkwrULG9IpRdNYlzg8WbGf60oenUoWa2AaU2+dhoYSi3dOGiMQ== +-----END CERTIFICATE----- + +# Issuer: CN=TrustAsia TLS RSA Root CA O=TrustAsia Technologies, Inc. +# Subject: CN=TrustAsia TLS RSA Root CA O=TrustAsia Technologies, Inc. +# Label: "TrustAsia TLS RSA Root CA" +# Serial: 160405846464868906657516898462547310235378010780 +# MD5 Fingerprint: 3b:9e:c3:86:0f:34:3c:6b:c5:46:c4:8e:1d:e7:19:12 +# SHA1 Fingerprint: a5:46:50:c5:62:ea:95:9a:1a:a7:04:6f:17:58:c7:29:53:3d:03:fa +# SHA256 Fingerprint: 06:c0:8d:7d:af:d8:76:97:1e:b1:12:4f:e6:7f:84:7e:c0:c7:a1:58:d3:ea:53:cb:e9:40:e2:ea:97:91:f4:c3 +-----BEGIN CERTIFICATE----- +MIIFgDCCA2igAwIBAgIUHBjYz+VTPyI1RlNUJDxsR9FcSpwwDQYJKoZIhvcNAQEM +BQAwWDELMAkGA1UEBhMCQ04xJTAjBgNVBAoTHFRydXN0QXNpYSBUZWNobm9sb2dp +ZXMsIEluYy4xIjAgBgNVBAMTGVRydXN0QXNpYSBUTFMgUlNBIFJvb3QgQ0EwHhcN +MjQwNTE1MDU0MTU3WhcNNDQwNTE1MDU0MTU2WjBYMQswCQYDVQQGEwJDTjElMCMG +A1UEChMcVHJ1c3RBc2lhIFRlY2hub2xvZ2llcywgSW5jLjEiMCAGA1UEAxMZVHJ1 +c3RBc2lhIFRMUyBSU0EgUm9vdCBDQTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCC +AgoCggIBAMMWuBtqpERz5dZO9LnPWwvB0ZqB9WOwj0PBuwhaGnrhB3YmH49pVr7+ +NmDQDIPNlOrnxS1cLwUWAp4KqC/lYCZUlviYQB2srp10Zy9U+5RjmOMmSoPGlbYJ +Q1DNDX3eRA5gEk9bNb2/mThtfWza4mhzH/kxpRkQcwUqwzIZheo0qt1CHjCNP561 +HmHVb70AcnKtEj+qpklz8oYVlQwQX1Fkzv93uMltrOXVmPGZLmzjyUT5tUMnCE32 +ft5EebuyjBza00tsLtbDeLdM1aTk2tyKjg7/D8OmYCYozza/+lcK7Fs/6TAWe8Tb +xNRkoDD75f0dcZLdKY9BWN4ArTr9PXwaqLEX8E40eFgl1oUh63kd0Nyrz2I8sMeX +i9bQn9P+PN7F4/w6g3CEIR0JwqH8uyghZVNgepBtljhb//HXeltt08lwSUq6HTrQ +UNoyIBnkiz/r1RYmNzz7dZ6wB3C4FGB33PYPXFIKvF1tjVEK2sUYyJtt3LCDs3+j +TnhMmCWr8n4uIF6CFabW2I+s5c0yhsj55NqJ4js+k8UTav/H9xj8Z7XvGCxUq0DT +bE3txci3OE9kxJRMT6DNrqXGJyV1J23G2pyOsAWZ1SgRxSHUuPzHlqtKZFlhaxP8 +S8ySpg+kUb8OWJDZgoM5pl+z+m6Ss80zDoWo8SnTq1mt1tve1CuBAgMBAAGjQjBA +MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFLgHkXlcBvRG/XtZylomkadFK/hT +MA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQwFAAOCAgEAIZtqBSBdGBanEqT3 +Rz/NyjuujsCCztxIJXgXbODgcMTWltnZ9r96nBO7U5WS/8+S4PPFJzVXqDuiGev4 +iqME3mmL5Dw8veWv0BIb5Ylrc5tvJQJLkIKvQMKtuppgJFqBTQUYo+IzeXoLH5Pt +7DlK9RME7I10nYEKqG/odv6LTytpEoYKNDbdgptvT+Bz3Ul/KD7JO6NXBNiT2Twp +2xIQaOHEibgGIOcberyxk2GaGUARtWqFVwHxtlotJnMnlvm5P1vQiJ3koP26TpUJ +g3933FEFlJ0gcXax7PqJtZwuhfG5WyRasQmr2soaB82G39tp27RIGAAtvKLEiUUj +pQ7hRGU+isFqMB3iYPg6qocJQrmBktwliJiJ8Xw18WLK7nn4GS/+X/jbh87qqA8M +pugLoDzga5SYnH+tBuYc6kIQX+ImFTw3OffXvO645e8D7r0i+yiGNFjEWn9hongP +XvPKnbwbPKfILfanIhHKA9jnZwqKDss1jjQ52MjqjZ9k4DewbNfFj8GQYSbbJIwe +SsCI3zWQzj8C9GRh3sfIB5XeMhg6j6JCQCTl1jNdfK7vsU1P1FeQNWrcrgSXSYk0 +ly4wBOeY99sLAZDBHwo/+ML+TvrbmnNzFrwFuHnYWa8G5z9nODmxfKuU4CkUpijy +323imttUQ/hHWKNddBWcwauwxzQ= +-----END CERTIFICATE----- + +# Issuer: CN=D-TRUST EV Root CA 2 2023 O=D-Trust GmbH +# Subject: CN=D-TRUST EV Root CA 2 2023 O=D-Trust GmbH +# Label: "D-TRUST EV Root CA 2 2023" +# Serial: 139766439402180512324132425437959641711 +# MD5 Fingerprint: 96:b4:78:09:f0:09:cb:77:eb:bb:1b:4d:6f:36:bc:b6 +# SHA1 Fingerprint: a5:5b:d8:47:6c:8f:19:f7:4c:f4:6d:6b:b6:c2:79:82:22:df:54:8b +# SHA256 Fingerprint: 8e:82:21:b2:e7:d4:00:78:36:a1:67:2f:0d:cc:29:9c:33:bc:07:d3:16:f1:32:fa:1a:20:6d:58:71:50:f1:ce +-----BEGIN CERTIFICATE----- +MIIFqTCCA5GgAwIBAgIQaSYJfoBLTKCnjHhiU19abzANBgkqhkiG9w0BAQ0FADBI +MQswCQYDVQQGEwJERTEVMBMGA1UEChMMRC1UcnVzdCBHbWJIMSIwIAYDVQQDExlE +LVRSVVNUIEVWIFJvb3QgQ0EgMiAyMDIzMB4XDTIzMDUwOTA5MTAzM1oXDTM4MDUw +OTA5MTAzMlowSDELMAkGA1UEBhMCREUxFTATBgNVBAoTDEQtVHJ1c3QgR21iSDEi +MCAGA1UEAxMZRC1UUlVTVCBFViBSb290IENBIDIgMjAyMzCCAiIwDQYJKoZIhvcN +AQEBBQADggIPADCCAgoCggIBANiOo4mAC7JXUtypU0w3uX9jFxPvp1sjW2l1sJkK +F8GLxNuo4MwxusLyzV3pt/gdr2rElYfXR8mV2IIEUD2BCP/kPbOx1sWy/YgJ25yE +7CUXFId/MHibaljJtnMoPDT3mfd/06b4HEV8rSyMlD/YZxBTfiLNTiVR8CUkNRFe +EMbsh2aJgWi6zCudR3Mfvc2RpHJqnKIbGKBv7FD0fUDCqDDPvXPIEysQEx6Lmqg6 +lHPTGGkKSv/BAQP/eX+1SH977ugpbzZMlWGG2Pmic4ruri+W7mjNPU0oQvlFKzIb +RlUWaqZLKfm7lVa/Rh3sHZMdwGWyH6FDrlaeoLGPaxK3YG14C8qKXO0elg6DpkiV +jTujIcSuWMYAsoS0I6SWhjW42J7YrDRJmGOVxcttSEfi8i4YHtAxq9107PncjLgc +jmgjutDzUNzPZY9zOjLHfP7KgiJPvo5iR2blzYfi6NUPGJ/lBHJLRjwQ8kTCZFZx +TnXonMkmdMV9WdEKWw9t/p51HBjGGjp82A0EzM23RWV6sY+4roRIPrN6TagD4uJ+ +ARZZaBhDM7DS3LAaQzXupdqpRlyuhoFBAUp0JuyfBr/CBTdkdXgpaP3F9ev+R/nk +hbDhezGdpn9yo7nELC7MmVcOIQxFAZRl62UJxmMiCzNJkkg8/M3OsD6Onov4/knF +NXJHAgMBAAGjgY4wgYswDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUqvyREBuH +kV8Wub9PS5FeAByxMoAwDgYDVR0PAQH/BAQDAgEGMEkGA1UdHwRCMEAwPqA8oDqG +OGh0dHA6Ly9jcmwuZC10cnVzdC5uZXQvY3JsL2QtdHJ1c3RfZXZfcm9vdF9jYV8y +XzIwMjMuY3JsMA0GCSqGSIb3DQEBDQUAA4ICAQCTy6UfmRHsmg1fLBWTxj++EI14 +QvBukEdHjqOSMo1wj/Zbjb6JzkcBahsgIIlbyIIQbODnmaprxiqgYzWRaoUlrRc4 +pZt+UPJ26oUFKidBK7GB0aL2QHWpDsvxVUjY7NHss+jOFKE17MJeNRqrphYBBo7q +3C+jisosketSjl8MmxfPy3MHGcRqwnNU73xDUmPBEcrCRbH0O1P1aa4846XerOhU +t7KR/aypH/KH5BfGSah82ApB9PI+53c0BFLd6IHyTS9URZ0V4U/M5d40VxDJI3IX +cI1QcB9WbMy5/zpaT2N6w25lBx2Eof+pDGOJbbJAiDnXH3dotfyc1dZnaVuodNv8 +ifYbMvekJKZ2t0dT741Jj6m2g1qllpBFYfXeA08mD6iL8AOWsKwV0HFaanuU5nCT +2vFp4LJiTZ6P/4mdm13NRemUAiKN4DV/6PEEeXFsVIP4M7kFMhtYVRFP0OUnR3Hs +7dpn1mKmS00PaaLJvOwiS5THaJQXfuKOKD62xur1NGyfN4gHONuGcfrNlUhDbqNP +gofXNJhuS5N5YHVpD/Aa1VP6IQzCP+k/HxiMkl14p3ZnGbuy6n/pcAlWVqOwDAst +Nl7F6cTVg8uGF5csbBNvh1qvSaYd2804BC5f4ko1Di1L+KIkBI3Y4WNeApI02phh +XBxvWHZks/wCuPWdCg== +-----END CERTIFICATE----- + +# Issuer: CN=SwissSign RSA TLS Root CA 2022 - 1 O=SwissSign AG +# Subject: CN=SwissSign RSA TLS Root CA 2022 - 1 O=SwissSign AG +# Label: "SwissSign RSA TLS Root CA 2022 - 1" +# Serial: 388078645722908516278762308316089881486363258315 +# MD5 Fingerprint: 16:2e:e4:19:76:81:85:ba:8e:91:58:f1:15:ef:72:39 +# SHA1 Fingerprint: 81:34:0a:be:4c:cd:ce:cc:e7:7d:cc:8a:d4:57:e2:45:a0:77:5d:ce +# SHA256 Fingerprint: 19:31:44:f4:31:e0:fd:db:74:07:17:d4:de:92:6a:57:11:33:88:4b:43:60:d3:0e:27:29:13:cb:e6:60:ce:41 +-----BEGIN CERTIFICATE----- +MIIFkzCCA3ugAwIBAgIUQ/oMX04bgBhE79G0TzUfRPSA7cswDQYJKoZIhvcNAQEL +BQAwUTELMAkGA1UEBhMCQ0gxFTATBgNVBAoTDFN3aXNzU2lnbiBBRzErMCkGA1UE +AxMiU3dpc3NTaWduIFJTQSBUTFMgUm9vdCBDQSAyMDIyIC0gMTAeFw0yMjA2MDgx +MTA4MjJaFw00NzA2MDgxMTA4MjJaMFExCzAJBgNVBAYTAkNIMRUwEwYDVQQKEwxT +d2lzc1NpZ24gQUcxKzApBgNVBAMTIlN3aXNzU2lnbiBSU0EgVExTIFJvb3QgQ0Eg +MjAyMiAtIDEwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDLKmjiC8NX +vDVjvHClO/OMPE5Xlm7DTjak9gLKHqquuN6orx122ro10JFwB9+zBvKK8i5VUXu7 +LCTLf5ImgKO0lPaCoaTo+nUdWfMHamFk4saMla+ju45vVs9xzF6BYQ1t8qsCLqSX +5XH8irCRIFucdFJtrhUnWXjyCcplDn/L9Ovn3KlMd/YrFgSVrpxxpT8q2kFC5zyE +EPThPYxr4iuRR1VPuFa+Rd4iUU1OKNlfGUEGjw5NBuBwQCMBauTLE5tzrE0USJIt +/m2n+IdreXXhvhCxqohAWVTXz8TQm0SzOGlkjIHRI36qOTw7D59Ke4LKa2/KIj4x +0LDQKhySio/YGZxH5D4MucLNvkEM+KRHBdvBFzA4OmnczcNpI/2aDwLOEGrOyvi5 +KaM2iYauC8BPY7kGWUleDsFpswrzd34unYyzJ5jSmY0lpx+Gs6ZUcDj8fV3oT4MM +0ZPlEuRU2j7yrTrePjxF8CgPBrnh25d7mUWe3f6VWQQvdT/TromZhqwUtKiE+shd +OxtYk8EXlFXIC+OCeYSf8wCENO7cMdWP8vpPlkwGqnj73mSiI80fPsWMvDdUDrta +clXvyFu1cvh43zcgTFeRc5JzrBh3Q4IgaezprClG5QtO+DdziZaKHG29777YtvTK +wP1H8K4LWCDFyB02rpeNUIMmJCn3nTsPBQIDAQABo2MwYTAPBgNVHRMBAf8EBTAD +AQH/MA4GA1UdDwEB/wQEAwIBBjAfBgNVHSMEGDAWgBRvjmKLk0Ow4UD2p8P98Q+4 +DxU4pTAdBgNVHQ4EFgQUb45ii5NDsOFA9qfD/fEPuA8VOKUwDQYJKoZIhvcNAQEL +BQADggIBAKwsKUF9+lz1GpUYvyypiqkkVHX1uECry6gkUSsYP2OprphWKwVDIqO3 +10aewCoSPY6WlkDfDDOLazeROpW7OSltwAJsipQLBwJNGD77+3v1dj2b9l4wBlgz +Hqp41eZUBDqyggmNzhYzWUUo8aWjlw5DI/0LIICQ/+Mmz7hkkeUFjxOgdg3XNwwQ +iJb0Pr6VvfHDffCjw3lHC1ySFWPtUnWK50Zpy1FVCypM9fJkT6lc/2cyjlUtMoIc +gC9qkfjLvH4YoiaoLqNTKIftV+Vlek4ASltOU8liNr3CjlvrzG4ngRhZi0Rjn9UM +ZfQpZX+RLOV/fuiJz48gy20HQhFRJjKKLjpHE7iNvUcNCfAWpO2Whi4Z2L6MOuhF +LhG6rlrnub+xzI/goP+4s9GFe3lmozm1O2bYQL7Pt2eLSMkZJVX8vY3PXtpOpvJp +zv1/THfQwUY1mFwjmwJFQ5Ra3bxHrSL+ul4vkSkphnsh3m5kt8sNjzdbowhq6/Td +Ao9QAwKxuDdollDruF/UKIqlIgyKhPBZLtU30WHlQnNYKoH3dtvi4k0NX/a3vgW0 +rk4N3hY9A4GzJl5LuEsAz/+MF7psYC0nhzck5npgL7XTgwSqT0N1osGDsieYK7EO +gLrAhV5Cud+xYJHT6xh+cHiudoO+cVrQkOPKwRYlZ0rwtnu64ZzZ +-----END CERTIFICATE----- diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/certifi/core.py b/.venv/lib/python3.12/site-packages/pip/_vendor/certifi/core.py new file mode 100644 index 0000000..2f2f7e0 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/certifi/core.py @@ -0,0 +1,83 @@ +""" +certifi.py +~~~~~~~~~~ + +This module returns the installation location of cacert.pem or its contents. +""" +import sys +import atexit + +def exit_cacert_ctx() -> None: + _CACERT_CTX.__exit__(None, None, None) # type: ignore[union-attr] + + +if sys.version_info >= (3, 11): + + from importlib.resources import as_file, files + + _CACERT_CTX = None + _CACERT_PATH = None + + def where() -> str: + # This is slightly terrible, but we want to delay extracting the file + # in cases where we're inside of a zipimport situation until someone + # actually calls where(), but we don't want to re-extract the file + # on every call of where(), so we'll do it once then store it in a + # global variable. + global _CACERT_CTX + global _CACERT_PATH + if _CACERT_PATH is None: + # This is slightly janky, the importlib.resources API wants you to + # manage the cleanup of this file, so it doesn't actually return a + # path, it returns a context manager that will give you the path + # when you enter it and will do any cleanup when you leave it. In + # the common case of not needing a temporary file, it will just + # return the file system location and the __exit__() is a no-op. + # + # We also have to hold onto the actual context manager, because + # it will do the cleanup whenever it gets garbage collected, so + # we will also store that at the global level as well. + _CACERT_CTX = as_file(files("pip._vendor.certifi").joinpath("cacert.pem")) + _CACERT_PATH = str(_CACERT_CTX.__enter__()) + atexit.register(exit_cacert_ctx) + + return _CACERT_PATH + + def contents() -> str: + return files("pip._vendor.certifi").joinpath("cacert.pem").read_text(encoding="ascii") + +else: + + from importlib.resources import path as get_path, read_text + + _CACERT_CTX = None + _CACERT_PATH = None + + def where() -> str: + # This is slightly terrible, but we want to delay extracting the + # file in cases where we're inside of a zipimport situation until + # someone actually calls where(), but we don't want to re-extract + # the file on every call of where(), so we'll do it once then store + # it in a global variable. + global _CACERT_CTX + global _CACERT_PATH + if _CACERT_PATH is None: + # This is slightly janky, the importlib.resources API wants you + # to manage the cleanup of this file, so it doesn't actually + # return a path, it returns a context manager that will give + # you the path when you enter it and will do any cleanup when + # you leave it. In the common case of not needing a temporary + # file, it will just return the file system location and the + # __exit__() is a no-op. + # + # We also have to hold onto the actual context manager, because + # it will do the cleanup whenever it gets garbage collected, so + # we will also store that at the global level as well. + _CACERT_CTX = get_path("pip._vendor.certifi", "cacert.pem") + _CACERT_PATH = str(_CACERT_CTX.__enter__()) + atexit.register(exit_cacert_ctx) + + return _CACERT_PATH + + def contents() -> str: + return read_text("pip._vendor.certifi", "cacert.pem", encoding="ascii") diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/certifi/py.typed b/.venv/lib/python3.12/site-packages/pip/_vendor/certifi/py.typed new file mode 100644 index 0000000..e69de29 diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/dependency_groups/__init__.py b/.venv/lib/python3.12/site-packages/pip/_vendor/dependency_groups/__init__.py new file mode 100644 index 0000000..9fec202 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/dependency_groups/__init__.py @@ -0,0 +1,13 @@ +from ._implementation import ( + CyclicDependencyError, + DependencyGroupInclude, + DependencyGroupResolver, + resolve, +) + +__all__ = ( + "CyclicDependencyError", + "DependencyGroupInclude", + "DependencyGroupResolver", + "resolve", +) diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/dependency_groups/__main__.py b/.venv/lib/python3.12/site-packages/pip/_vendor/dependency_groups/__main__.py new file mode 100644 index 0000000..48ebb0d --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/dependency_groups/__main__.py @@ -0,0 +1,65 @@ +import argparse +import sys + +from ._implementation import resolve +from ._toml_compat import tomllib + + +def main() -> None: + if tomllib is None: + print( + "Usage error: dependency-groups CLI requires tomli or Python 3.11+", + file=sys.stderr, + ) + raise SystemExit(2) + + parser = argparse.ArgumentParser( + description=( + "A dependency-groups CLI. Prints out a resolved group, newline-delimited." + ) + ) + parser.add_argument( + "GROUP_NAME", nargs="*", help="The dependency group(s) to resolve." + ) + parser.add_argument( + "-f", + "--pyproject-file", + default="pyproject.toml", + help="The pyproject.toml file. Defaults to trying in the current directory.", + ) + parser.add_argument( + "-o", + "--output", + help="An output file. Defaults to stdout.", + ) + parser.add_argument( + "-l", + "--list", + action="store_true", + help="List the available dependency groups", + ) + args = parser.parse_args() + + with open(args.pyproject_file, "rb") as fp: + pyproject = tomllib.load(fp) + + dependency_groups_raw = pyproject.get("dependency-groups", {}) + + if args.list: + print(*dependency_groups_raw.keys()) + return + if not args.GROUP_NAME: + print("A GROUP_NAME is required", file=sys.stderr) + raise SystemExit(3) + + content = "\n".join(resolve(dependency_groups_raw, *args.GROUP_NAME)) + + if args.output is None or args.output == "-": + print(content) + else: + with open(args.output, "w", encoding="utf-8") as fp: + print(content, file=fp) + + +if __name__ == "__main__": + main() diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/dependency_groups/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/dependency_groups/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..95091da Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/dependency_groups/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/dependency_groups/__pycache__/__main__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/dependency_groups/__pycache__/__main__.cpython-312.pyc new file mode 100644 index 0000000..dbe877a Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/dependency_groups/__pycache__/__main__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/dependency_groups/__pycache__/_implementation.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/dependency_groups/__pycache__/_implementation.cpython-312.pyc new file mode 100644 index 0000000..1c31334 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/dependency_groups/__pycache__/_implementation.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/dependency_groups/__pycache__/_lint_dependency_groups.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/dependency_groups/__pycache__/_lint_dependency_groups.cpython-312.pyc new file mode 100644 index 0000000..b54783e Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/dependency_groups/__pycache__/_lint_dependency_groups.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/dependency_groups/__pycache__/_pip_wrapper.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/dependency_groups/__pycache__/_pip_wrapper.cpython-312.pyc new file mode 100644 index 0000000..f3a8897 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/dependency_groups/__pycache__/_pip_wrapper.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/dependency_groups/__pycache__/_toml_compat.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/dependency_groups/__pycache__/_toml_compat.cpython-312.pyc new file mode 100644 index 0000000..528d177 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/dependency_groups/__pycache__/_toml_compat.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/dependency_groups/_implementation.py b/.venv/lib/python3.12/site-packages/pip/_vendor/dependency_groups/_implementation.py new file mode 100644 index 0000000..64e314a --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/dependency_groups/_implementation.py @@ -0,0 +1,209 @@ +from __future__ import annotations + +import dataclasses +import re +from collections.abc import Mapping + +from pip._vendor.packaging.requirements import Requirement + + +def _normalize_name(name: str) -> str: + return re.sub(r"[-_.]+", "-", name).lower() + + +def _normalize_group_names( + dependency_groups: Mapping[str, str | Mapping[str, str]], +) -> Mapping[str, str | Mapping[str, str]]: + original_names: dict[str, list[str]] = {} + normalized_groups = {} + + for group_name, value in dependency_groups.items(): + normed_group_name = _normalize_name(group_name) + original_names.setdefault(normed_group_name, []).append(group_name) + normalized_groups[normed_group_name] = value + + errors = [] + for normed_name, names in original_names.items(): + if len(names) > 1: + errors.append(f"{normed_name} ({', '.join(names)})") + if errors: + raise ValueError(f"Duplicate dependency group names: {', '.join(errors)}") + + return normalized_groups + + +@dataclasses.dataclass +class DependencyGroupInclude: + include_group: str + + +class CyclicDependencyError(ValueError): + """ + An error representing the detection of a cycle. + """ + + def __init__(self, requested_group: str, group: str, include_group: str) -> None: + self.requested_group = requested_group + self.group = group + self.include_group = include_group + + if include_group == group: + reason = f"{group} includes itself" + else: + reason = f"{include_group} -> {group}, {group} -> {include_group}" + super().__init__( + "Cyclic dependency group include while resolving " + f"{requested_group}: {reason}" + ) + + +class DependencyGroupResolver: + """ + A resolver for Dependency Group data. + + This class handles caching, name normalization, cycle detection, and other + parsing requirements. There are only two public methods for exploring the data: + ``lookup()`` and ``resolve()``. + + :param dependency_groups: A mapping, as provided via pyproject + ``[dependency-groups]``. + """ + + def __init__( + self, + dependency_groups: Mapping[str, str | Mapping[str, str]], + ) -> None: + if not isinstance(dependency_groups, Mapping): + raise TypeError("Dependency Groups table is not a mapping") + self.dependency_groups = _normalize_group_names(dependency_groups) + # a map of group names to parsed data + self._parsed_groups: dict[ + str, tuple[Requirement | DependencyGroupInclude, ...] + ] = {} + # a map of group names to their ancestors, used for cycle detection + self._include_graph_ancestors: dict[str, tuple[str, ...]] = {} + # a cache of completed resolutions to Requirement lists + self._resolve_cache: dict[str, tuple[Requirement, ...]] = {} + + def lookup(self, group: str) -> tuple[Requirement | DependencyGroupInclude, ...]: + """ + Lookup a group name, returning the parsed dependency data for that group. + This will not resolve includes. + + :param group: the name of the group to lookup + + :raises ValueError: if the data does not appear to be valid dependency group + data + :raises TypeError: if the data is not a string + :raises LookupError: if group name is absent + :raises packaging.requirements.InvalidRequirement: if a specifier is not valid + """ + if not isinstance(group, str): + raise TypeError("Dependency group name is not a str") + group = _normalize_name(group) + return self._parse_group(group) + + def resolve(self, group: str) -> tuple[Requirement, ...]: + """ + Resolve a dependency group to a list of requirements. + + :param group: the name of the group to resolve + + :raises TypeError: if the inputs appear to be the wrong types + :raises ValueError: if the data does not appear to be valid dependency group + data + :raises LookupError: if group name is absent + :raises packaging.requirements.InvalidRequirement: if a specifier is not valid + """ + if not isinstance(group, str): + raise TypeError("Dependency group name is not a str") + group = _normalize_name(group) + return self._resolve(group, group) + + def _parse_group( + self, group: str + ) -> tuple[Requirement | DependencyGroupInclude, ...]: + # short circuit -- never do the work twice + if group in self._parsed_groups: + return self._parsed_groups[group] + + if group not in self.dependency_groups: + raise LookupError(f"Dependency group '{group}' not found") + + raw_group = self.dependency_groups[group] + if not isinstance(raw_group, list): + raise TypeError(f"Dependency group '{group}' is not a list") + + elements: list[Requirement | DependencyGroupInclude] = [] + for item in raw_group: + if isinstance(item, str): + # packaging.requirements.Requirement parsing ensures that this is a + # valid PEP 508 Dependency Specifier + # raises InvalidRequirement on failure + elements.append(Requirement(item)) + elif isinstance(item, dict): + if tuple(item.keys()) != ("include-group",): + raise ValueError(f"Invalid dependency group item: {item}") + + include_group = next(iter(item.values())) + elements.append(DependencyGroupInclude(include_group=include_group)) + else: + raise ValueError(f"Invalid dependency group item: {item}") + + self._parsed_groups[group] = tuple(elements) + return self._parsed_groups[group] + + def _resolve(self, group: str, requested_group: str) -> tuple[Requirement, ...]: + """ + This is a helper for cached resolution to strings. + + :param group: The name of the group to resolve. + :param requested_group: The group which was used in the original, user-facing + request. + """ + if group in self._resolve_cache: + return self._resolve_cache[group] + + parsed = self._parse_group(group) + + resolved_group = [] + for item in parsed: + if isinstance(item, Requirement): + resolved_group.append(item) + elif isinstance(item, DependencyGroupInclude): + include_group = _normalize_name(item.include_group) + if include_group in self._include_graph_ancestors.get(group, ()): + raise CyclicDependencyError( + requested_group, group, item.include_group + ) + self._include_graph_ancestors[include_group] = ( + *self._include_graph_ancestors.get(group, ()), + group, + ) + resolved_group.extend(self._resolve(include_group, requested_group)) + else: # unreachable + raise NotImplementedError( + f"Invalid dependency group item after parse: {item}" + ) + + self._resolve_cache[group] = tuple(resolved_group) + return self._resolve_cache[group] + + +def resolve( + dependency_groups: Mapping[str, str | Mapping[str, str]], /, *groups: str +) -> tuple[str, ...]: + """ + Resolve a dependency group to a tuple of requirements, as strings. + + :param dependency_groups: the parsed contents of the ``[dependency-groups]`` table + from ``pyproject.toml`` + :param groups: the name of the group(s) to resolve + + :raises TypeError: if the inputs appear to be the wrong types + :raises ValueError: if the data does not appear to be valid dependency group data + :raises LookupError: if group name is absent + :raises packaging.requirements.InvalidRequirement: if a specifier is not valid + """ + resolver = DependencyGroupResolver(dependency_groups) + return tuple(str(r) for group in groups for r in resolver.resolve(group)) diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/dependency_groups/_lint_dependency_groups.py b/.venv/lib/python3.12/site-packages/pip/_vendor/dependency_groups/_lint_dependency_groups.py new file mode 100644 index 0000000..09454bd --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/dependency_groups/_lint_dependency_groups.py @@ -0,0 +1,59 @@ +from __future__ import annotations + +import argparse +import sys + +from ._implementation import DependencyGroupResolver +from ._toml_compat import tomllib + + +def main(*, argv: list[str] | None = None) -> None: + if tomllib is None: + print( + "Usage error: dependency-groups CLI requires tomli or Python 3.11+", + file=sys.stderr, + ) + raise SystemExit(2) + + parser = argparse.ArgumentParser( + description=( + "Lint Dependency Groups for validity. " + "This will eagerly load and check all of your Dependency Groups." + ) + ) + parser.add_argument( + "-f", + "--pyproject-file", + default="pyproject.toml", + help="The pyproject.toml file. Defaults to trying in the current directory.", + ) + args = parser.parse_args(argv if argv is not None else sys.argv[1:]) + + with open(args.pyproject_file, "rb") as fp: + pyproject = tomllib.load(fp) + dependency_groups_raw = pyproject.get("dependency-groups", {}) + + errors: list[str] = [] + try: + resolver = DependencyGroupResolver(dependency_groups_raw) + except (ValueError, TypeError) as e: + errors.append(f"{type(e).__name__}: {e}") + else: + for groupname in resolver.dependency_groups: + try: + resolver.resolve(groupname) + except (LookupError, ValueError, TypeError) as e: + errors.append(f"{type(e).__name__}: {e}") + + if errors: + print("errors encountered while examining dependency groups:") + for msg in errors: + print(f" {msg}") + sys.exit(1) + else: + print("ok") + sys.exit(0) + + +if __name__ == "__main__": + main() diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/dependency_groups/_pip_wrapper.py b/.venv/lib/python3.12/site-packages/pip/_vendor/dependency_groups/_pip_wrapper.py new file mode 100644 index 0000000..f86d896 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/dependency_groups/_pip_wrapper.py @@ -0,0 +1,62 @@ +from __future__ import annotations + +import argparse +import subprocess +import sys + +from ._implementation import DependencyGroupResolver +from ._toml_compat import tomllib + + +def _invoke_pip(deps: list[str]) -> None: + subprocess.check_call([sys.executable, "-m", "pip", "install", *deps]) + + +def main(*, argv: list[str] | None = None) -> None: + if tomllib is None: + print( + "Usage error: dependency-groups CLI requires tomli or Python 3.11+", + file=sys.stderr, + ) + raise SystemExit(2) + + parser = argparse.ArgumentParser(description="Install Dependency Groups.") + parser.add_argument( + "DEPENDENCY_GROUP", nargs="+", help="The dependency groups to install." + ) + parser.add_argument( + "-f", + "--pyproject-file", + default="pyproject.toml", + help="The pyproject.toml file. Defaults to trying in the current directory.", + ) + args = parser.parse_args(argv if argv is not None else sys.argv[1:]) + + with open(args.pyproject_file, "rb") as fp: + pyproject = tomllib.load(fp) + dependency_groups_raw = pyproject.get("dependency-groups", {}) + + errors: list[str] = [] + resolved: list[str] = [] + try: + resolver = DependencyGroupResolver(dependency_groups_raw) + except (ValueError, TypeError) as e: + errors.append(f"{type(e).__name__}: {e}") + else: + for groupname in args.DEPENDENCY_GROUP: + try: + resolved.extend(str(r) for r in resolver.resolve(groupname)) + except (LookupError, ValueError, TypeError) as e: + errors.append(f"{type(e).__name__}: {e}") + + if errors: + print("errors encountered while examining dependency groups:") + for msg in errors: + print(f" {msg}") + sys.exit(1) + + _invoke_pip(resolved) + + +if __name__ == "__main__": + main() diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/dependency_groups/_toml_compat.py b/.venv/lib/python3.12/site-packages/pip/_vendor/dependency_groups/_toml_compat.py new file mode 100644 index 0000000..8d6f921 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/dependency_groups/_toml_compat.py @@ -0,0 +1,9 @@ +try: + import tomllib +except ImportError: + try: + from pip._vendor import tomli as tomllib # type: ignore[no-redef, unused-ignore] + except ModuleNotFoundError: # pragma: no cover + tomllib = None # type: ignore[assignment, unused-ignore] + +__all__ = ("tomllib",) diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/dependency_groups/py.typed b/.venv/lib/python3.12/site-packages/pip/_vendor/dependency_groups/py.typed new file mode 100644 index 0000000..e69de29 diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/distlib/__init__.py b/.venv/lib/python3.12/site-packages/pip/_vendor/distlib/__init__.py new file mode 100644 index 0000000..4e82943 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/distlib/__init__.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2012-2024 Vinay Sajip. +# Licensed to the Python Software Foundation under a contributor agreement. +# See LICENSE.txt and CONTRIBUTORS.txt. +# +import logging + +__version__ = '0.4.0' + + +class DistlibException(Exception): + pass + + +try: + from logging import NullHandler +except ImportError: # pragma: no cover + + class NullHandler(logging.Handler): + + def handle(self, record): + pass + + def emit(self, record): + pass + + def createLock(self): + self.lock = None + + +logger = logging.getLogger(__name__) +logger.addHandler(NullHandler()) diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/distlib/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/distlib/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..758cdec Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/distlib/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/distlib/__pycache__/compat.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/distlib/__pycache__/compat.cpython-312.pyc new file mode 100644 index 0000000..0ddaf46 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/distlib/__pycache__/compat.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/distlib/__pycache__/resources.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/distlib/__pycache__/resources.cpython-312.pyc new file mode 100644 index 0000000..76e19a1 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/distlib/__pycache__/resources.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/distlib/__pycache__/scripts.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/distlib/__pycache__/scripts.cpython-312.pyc new file mode 100644 index 0000000..d585ae8 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/distlib/__pycache__/scripts.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/distlib/__pycache__/util.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/distlib/__pycache__/util.cpython-312.pyc new file mode 100644 index 0000000..9ca8758 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/distlib/__pycache__/util.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/distlib/compat.py b/.venv/lib/python3.12/site-packages/pip/_vendor/distlib/compat.py new file mode 100644 index 0000000..ca561dd --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/distlib/compat.py @@ -0,0 +1,1137 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2013-2017 Vinay Sajip. +# Licensed to the Python Software Foundation under a contributor agreement. +# See LICENSE.txt and CONTRIBUTORS.txt. +# +from __future__ import absolute_import + +import os +import re +import shutil +import sys + +try: + import ssl +except ImportError: # pragma: no cover + ssl = None + +if sys.version_info[0] < 3: # pragma: no cover + from StringIO import StringIO + string_types = basestring, + text_type = unicode + from types import FileType as file_type + import __builtin__ as builtins + import ConfigParser as configparser + from urlparse import urlparse, urlunparse, urljoin, urlsplit, urlunsplit + from urllib import (urlretrieve, quote as _quote, unquote, url2pathname, + pathname2url, ContentTooShortError, splittype) + + def quote(s): + if isinstance(s, unicode): + s = s.encode('utf-8') + return _quote(s) + + import urllib2 + from urllib2 import (Request, urlopen, URLError, HTTPError, + HTTPBasicAuthHandler, HTTPPasswordMgr, HTTPHandler, + HTTPRedirectHandler, build_opener) + if ssl: + from urllib2 import HTTPSHandler + import httplib + import xmlrpclib + import Queue as queue + from HTMLParser import HTMLParser + import htmlentitydefs + raw_input = raw_input + from itertools import ifilter as filter + from itertools import ifilterfalse as filterfalse + + # Leaving this around for now, in case it needs resurrecting in some way + # _userprog = None + # def splituser(host): + # """splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]'.""" + # global _userprog + # if _userprog is None: + # import re + # _userprog = re.compile('^(.*)@(.*)$') + + # match = _userprog.match(host) + # if match: return match.group(1, 2) + # return None, host + +else: # pragma: no cover + from io import StringIO + string_types = str, + text_type = str + from io import TextIOWrapper as file_type + import builtins + import configparser + from urllib.parse import (urlparse, urlunparse, urljoin, quote, unquote, + urlsplit, urlunsplit, splittype) + from urllib.request import (urlopen, urlretrieve, Request, url2pathname, + pathname2url, HTTPBasicAuthHandler, + HTTPPasswordMgr, HTTPHandler, + HTTPRedirectHandler, build_opener) + if ssl: + from urllib.request import HTTPSHandler + from urllib.error import HTTPError, URLError, ContentTooShortError + import http.client as httplib + import urllib.request as urllib2 + import xmlrpc.client as xmlrpclib + import queue + from html.parser import HTMLParser + import html.entities as htmlentitydefs + raw_input = input + from itertools import filterfalse + filter = filter + +try: + from ssl import match_hostname, CertificateError +except ImportError: # pragma: no cover + + class CertificateError(ValueError): + pass + + def _dnsname_match(dn, hostname, max_wildcards=1): + """Matching according to RFC 6125, section 6.4.3 + + http://tools.ietf.org/html/rfc6125#section-6.4.3 + """ + pats = [] + if not dn: + return False + + parts = dn.split('.') + leftmost, remainder = parts[0], parts[1:] + + wildcards = leftmost.count('*') + if wildcards > max_wildcards: + # Issue #17980: avoid denials of service by refusing more + # than one wildcard per fragment. A survey of established + # policy among SSL implementations showed it to be a + # reasonable choice. + raise CertificateError( + "too many wildcards in certificate DNS name: " + repr(dn)) + + # speed up common case w/o wildcards + if not wildcards: + return dn.lower() == hostname.lower() + + # RFC 6125, section 6.4.3, subitem 1. + # The client SHOULD NOT attempt to match a presented identifier in which + # the wildcard character comprises a label other than the left-most label. + if leftmost == '*': + # When '*' is a fragment by itself, it matches a non-empty dotless + # fragment. + pats.append('[^.]+') + elif leftmost.startswith('xn--') or hostname.startswith('xn--'): + # RFC 6125, section 6.4.3, subitem 3. + # The client SHOULD NOT attempt to match a presented identifier + # where the wildcard character is embedded within an A-label or + # U-label of an internationalized domain name. + pats.append(re.escape(leftmost)) + else: + # Otherwise, '*' matches any dotless string, e.g. www* + pats.append(re.escape(leftmost).replace(r'\*', '[^.]*')) + + # add the remaining fragments, ignore any wildcards + for frag in remainder: + pats.append(re.escape(frag)) + + pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE) + return pat.match(hostname) + + def match_hostname(cert, hostname): + """Verify that *cert* (in decoded format as returned by + SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125 + rules are followed, but IP addresses are not accepted for *hostname*. + + CertificateError is raised on failure. On success, the function + returns nothing. + """ + if not cert: + raise ValueError("empty or no certificate, match_hostname needs a " + "SSL socket or SSL context with either " + "CERT_OPTIONAL or CERT_REQUIRED") + dnsnames = [] + san = cert.get('subjectAltName', ()) + for key, value in san: + if key == 'DNS': + if _dnsname_match(value, hostname): + return + dnsnames.append(value) + if not dnsnames: + # The subject is only checked when there is no dNSName entry + # in subjectAltName + for sub in cert.get('subject', ()): + for key, value in sub: + # XXX according to RFC 2818, the most specific Common Name + # must be used. + if key == 'commonName': + if _dnsname_match(value, hostname): + return + dnsnames.append(value) + if len(dnsnames) > 1: + raise CertificateError("hostname %r " + "doesn't match either of %s" % + (hostname, ', '.join(map(repr, dnsnames)))) + elif len(dnsnames) == 1: + raise CertificateError("hostname %r " + "doesn't match %r" % + (hostname, dnsnames[0])) + else: + raise CertificateError("no appropriate commonName or " + "subjectAltName fields were found") + + +try: + from types import SimpleNamespace as Container +except ImportError: # pragma: no cover + + class Container(object): + """ + A generic container for when multiple values need to be returned + """ + + def __init__(self, **kwargs): + self.__dict__.update(kwargs) + + +try: + from shutil import which +except ImportError: # pragma: no cover + # Implementation from Python 3.3 + def which(cmd, mode=os.F_OK | os.X_OK, path=None): + """Given a command, mode, and a PATH string, return the path which + conforms to the given mode on the PATH, or None if there is no such + file. + + `mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result + of os.environ.get("PATH"), or can be overridden with a custom search + path. + + """ + + # Check that a given file can be accessed with the correct mode. + # Additionally check that `file` is not a directory, as on Windows + # directories pass the os.access check. + def _access_check(fn, mode): + return (os.path.exists(fn) and os.access(fn, mode) and not os.path.isdir(fn)) + + # If we're given a path with a directory part, look it up directly rather + # than referring to PATH directories. This includes checking relative to the + # current directory, e.g. ./script + if os.path.dirname(cmd): + if _access_check(cmd, mode): + return cmd + return None + + if path is None: + path = os.environ.get("PATH", os.defpath) + if not path: + return None + path = path.split(os.pathsep) + + if sys.platform == "win32": + # The current directory takes precedence on Windows. + if os.curdir not in path: + path.insert(0, os.curdir) + + # PATHEXT is necessary to check on Windows. + pathext = os.environ.get("PATHEXT", "").split(os.pathsep) + # See if the given file matches any of the expected path extensions. + # This will allow us to short circuit when given "python.exe". + # If it does match, only test that one, otherwise we have to try + # others. + if any(cmd.lower().endswith(ext.lower()) for ext in pathext): + files = [cmd] + else: + files = [cmd + ext for ext in pathext] + else: + # On other platforms you don't have things like PATHEXT to tell you + # what file suffixes are executable, so just pass on cmd as-is. + files = [cmd] + + seen = set() + for dir in path: + normdir = os.path.normcase(dir) + if normdir not in seen: + seen.add(normdir) + for thefile in files: + name = os.path.join(dir, thefile) + if _access_check(name, mode): + return name + return None + + +# ZipFile is a context manager in 2.7, but not in 2.6 + +from zipfile import ZipFile as BaseZipFile + +if hasattr(BaseZipFile, '__enter__'): # pragma: no cover + ZipFile = BaseZipFile +else: # pragma: no cover + from zipfile import ZipExtFile as BaseZipExtFile + + class ZipExtFile(BaseZipExtFile): + + def __init__(self, base): + self.__dict__.update(base.__dict__) + + def __enter__(self): + return self + + def __exit__(self, *exc_info): + self.close() + # return None, so if an exception occurred, it will propagate + + class ZipFile(BaseZipFile): + + def __enter__(self): + return self + + def __exit__(self, *exc_info): + self.close() + # return None, so if an exception occurred, it will propagate + + def open(self, *args, **kwargs): + base = BaseZipFile.open(self, *args, **kwargs) + return ZipExtFile(base) + + +try: + from platform import python_implementation +except ImportError: # pragma: no cover + + def python_implementation(): + """Return a string identifying the Python implementation.""" + if 'PyPy' in sys.version: + return 'PyPy' + if os.name == 'java': + return 'Jython' + if sys.version.startswith('IronPython'): + return 'IronPython' + return 'CPython' + + +import sysconfig + +try: + callable = callable +except NameError: # pragma: no cover + from collections.abc import Callable + + def callable(obj): + return isinstance(obj, Callable) + + +try: + fsencode = os.fsencode + fsdecode = os.fsdecode +except AttributeError: # pragma: no cover + # Issue #99: on some systems (e.g. containerised), + # sys.getfilesystemencoding() returns None, and we need a real value, + # so fall back to utf-8. From the CPython 2.7 docs relating to Unix and + # sys.getfilesystemencoding(): the return value is "the user’s preference + # according to the result of nl_langinfo(CODESET), or None if the + # nl_langinfo(CODESET) failed." + _fsencoding = sys.getfilesystemencoding() or 'utf-8' + if _fsencoding == 'mbcs': + _fserrors = 'strict' + else: + _fserrors = 'surrogateescape' + + def fsencode(filename): + if isinstance(filename, bytes): + return filename + elif isinstance(filename, text_type): + return filename.encode(_fsencoding, _fserrors) + else: + raise TypeError("expect bytes or str, not %s" % + type(filename).__name__) + + def fsdecode(filename): + if isinstance(filename, text_type): + return filename + elif isinstance(filename, bytes): + return filename.decode(_fsencoding, _fserrors) + else: + raise TypeError("expect bytes or str, not %s" % + type(filename).__name__) + + +try: + from tokenize import detect_encoding +except ImportError: # pragma: no cover + from codecs import BOM_UTF8, lookup + + cookie_re = re.compile(r"coding[:=]\s*([-\w.]+)") + + def _get_normal_name(orig_enc): + """Imitates get_normal_name in tokenizer.c.""" + # Only care about the first 12 characters. + enc = orig_enc[:12].lower().replace("_", "-") + if enc == "utf-8" or enc.startswith("utf-8-"): + return "utf-8" + if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \ + enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")): + return "iso-8859-1" + return orig_enc + + def detect_encoding(readline): + """ + The detect_encoding() function is used to detect the encoding that should + be used to decode a Python source file. It requires one argument, readline, + in the same way as the tokenize() generator. + + It will call readline a maximum of twice, and return the encoding used + (as a string) and a list of any lines (left as bytes) it has read in. + + It detects the encoding from the presence of a utf-8 bom or an encoding + cookie as specified in pep-0263. If both a bom and a cookie are present, + but disagree, a SyntaxError will be raised. If the encoding cookie is an + invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found, + 'utf-8-sig' is returned. + + If no encoding is specified, then the default of 'utf-8' will be returned. + """ + try: + filename = readline.__self__.name + except AttributeError: + filename = None + bom_found = False + encoding = None + default = 'utf-8' + + def read_or_stop(): + try: + return readline() + except StopIteration: + return b'' + + def find_cookie(line): + try: + # Decode as UTF-8. Either the line is an encoding declaration, + # in which case it should be pure ASCII, or it must be UTF-8 + # per default encoding. + line_string = line.decode('utf-8') + except UnicodeDecodeError: + msg = "invalid or missing encoding declaration" + if filename is not None: + msg = '{} for {!r}'.format(msg, filename) + raise SyntaxError(msg) + + matches = cookie_re.findall(line_string) + if not matches: + return None + encoding = _get_normal_name(matches[0]) + try: + codec = lookup(encoding) + except LookupError: + # This behaviour mimics the Python interpreter + if filename is None: + msg = "unknown encoding: " + encoding + else: + msg = "unknown encoding for {!r}: {}".format( + filename, encoding) + raise SyntaxError(msg) + + if bom_found: + if codec.name != 'utf-8': + # This behaviour mimics the Python interpreter + if filename is None: + msg = 'encoding problem: utf-8' + else: + msg = 'encoding problem for {!r}: utf-8'.format( + filename) + raise SyntaxError(msg) + encoding += '-sig' + return encoding + + first = read_or_stop() + if first.startswith(BOM_UTF8): + bom_found = True + first = first[3:] + default = 'utf-8-sig' + if not first: + return default, [] + + encoding = find_cookie(first) + if encoding: + return encoding, [first] + + second = read_or_stop() + if not second: + return default, [first] + + encoding = find_cookie(second) + if encoding: + return encoding, [first, second] + + return default, [first, second] + + +# For converting & <-> & etc. +try: + from html import escape +except ImportError: + from cgi import escape +if sys.version_info[:2] < (3, 4): + unescape = HTMLParser().unescape +else: + from html import unescape + +try: + from collections import ChainMap +except ImportError: # pragma: no cover + from collections import MutableMapping + + try: + from reprlib import recursive_repr as _recursive_repr + except ImportError: + + def _recursive_repr(fillvalue='...'): + ''' + Decorator to make a repr function return fillvalue for a recursive + call + ''' + + def decorating_function(user_function): + repr_running = set() + + def wrapper(self): + key = id(self), get_ident() + if key in repr_running: + return fillvalue + repr_running.add(key) + try: + result = user_function(self) + finally: + repr_running.discard(key) + return result + + # Can't use functools.wraps() here because of bootstrap issues + wrapper.__module__ = getattr(user_function, '__module__') + wrapper.__doc__ = getattr(user_function, '__doc__') + wrapper.__name__ = getattr(user_function, '__name__') + wrapper.__annotations__ = getattr(user_function, + '__annotations__', {}) + return wrapper + + return decorating_function + + class ChainMap(MutableMapping): + ''' + A ChainMap groups multiple dicts (or other mappings) together + to create a single, updateable view. + + The underlying mappings are stored in a list. That list is public and can + accessed or updated using the *maps* attribute. There is no other state. + + Lookups search the underlying mappings successively until a key is found. + In contrast, writes, updates, and deletions only operate on the first + mapping. + ''' + + def __init__(self, *maps): + '''Initialize a ChainMap by setting *maps* to the given mappings. + If no mappings are provided, a single empty dictionary is used. + + ''' + self.maps = list(maps) or [{}] # always at least one map + + def __missing__(self, key): + raise KeyError(key) + + def __getitem__(self, key): + for mapping in self.maps: + try: + return mapping[ + key] # can't use 'key in mapping' with defaultdict + except KeyError: + pass + return self.__missing__( + key) # support subclasses that define __missing__ + + def get(self, key, default=None): + return self[key] if key in self else default + + def __len__(self): + return len(set().union( + *self.maps)) # reuses stored hash values if possible + + def __iter__(self): + return iter(set().union(*self.maps)) + + def __contains__(self, key): + return any(key in m for m in self.maps) + + def __bool__(self): + return any(self.maps) + + @_recursive_repr() + def __repr__(self): + return '{0.__class__.__name__}({1})'.format( + self, ', '.join(map(repr, self.maps))) + + @classmethod + def fromkeys(cls, iterable, *args): + 'Create a ChainMap with a single dict created from the iterable.' + return cls(dict.fromkeys(iterable, *args)) + + def copy(self): + 'New ChainMap or subclass with a new copy of maps[0] and refs to maps[1:]' + return self.__class__(self.maps[0].copy(), *self.maps[1:]) + + __copy__ = copy + + def new_child(self): # like Django's Context.push() + 'New ChainMap with a new dict followed by all previous maps.' + return self.__class__({}, *self.maps) + + @property + def parents(self): # like Django's Context.pop() + 'New ChainMap from maps[1:].' + return self.__class__(*self.maps[1:]) + + def __setitem__(self, key, value): + self.maps[0][key] = value + + def __delitem__(self, key): + try: + del self.maps[0][key] + except KeyError: + raise KeyError( + 'Key not found in the first mapping: {!r}'.format(key)) + + def popitem(self): + 'Remove and return an item pair from maps[0]. Raise KeyError is maps[0] is empty.' + try: + return self.maps[0].popitem() + except KeyError: + raise KeyError('No keys found in the first mapping.') + + def pop(self, key, *args): + 'Remove *key* from maps[0] and return its value. Raise KeyError if *key* not in maps[0].' + try: + return self.maps[0].pop(key, *args) + except KeyError: + raise KeyError( + 'Key not found in the first mapping: {!r}'.format(key)) + + def clear(self): + 'Clear maps[0], leaving maps[1:] intact.' + self.maps[0].clear() + + +try: + from importlib.util import cache_from_source # Python >= 3.4 +except ImportError: # pragma: no cover + + def cache_from_source(path, debug_override=None): + assert path.endswith('.py') + if debug_override is None: + debug_override = __debug__ + if debug_override: + suffix = 'c' + else: + suffix = 'o' + return path + suffix + + +try: + from collections import OrderedDict +except ImportError: # pragma: no cover + # {{{ http://code.activestate.com/recipes/576693/ (r9) + # Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy. + # Passes Python2.7's test suite and incorporates all the latest updates. + try: + from thread import get_ident as _get_ident + except ImportError: + from dummy_thread import get_ident as _get_ident + + try: + from _abcoll import KeysView, ValuesView, ItemsView + except ImportError: + pass + + class OrderedDict(dict): + 'Dictionary that remembers insertion order' + + # An inherited dict maps keys to values. + # The inherited dict provides __getitem__, __len__, __contains__, and get. + # The remaining methods are order-aware. + # Big-O running times for all methods are the same as for regular dictionaries. + + # The internal self.__map dictionary maps keys to links in a doubly linked list. + # The circular doubly linked list starts and ends with a sentinel element. + # The sentinel element never gets deleted (this simplifies the algorithm). + # Each link is stored as a list of length three: [PREV, NEXT, KEY]. + + def __init__(self, *args, **kwds): + '''Initialize an ordered dictionary. Signature is the same as for + regular dictionaries, but keyword arguments are not recommended + because their insertion order is arbitrary. + + ''' + if len(args) > 1: + raise TypeError('expected at most 1 arguments, got %d' % + len(args)) + try: + self.__root + except AttributeError: + self.__root = root = [] # sentinel node + root[:] = [root, root, None] + self.__map = {} + self.__update(*args, **kwds) + + def __setitem__(self, key, value, dict_setitem=dict.__setitem__): + 'od.__setitem__(i, y) <==> od[i]=y' + # Setting a new item creates a new link which goes at the end of the linked + # list, and the inherited dictionary is updated with the new key/value pair. + if key not in self: + root = self.__root + last = root[0] + last[1] = root[0] = self.__map[key] = [last, root, key] + dict_setitem(self, key, value) + + def __delitem__(self, key, dict_delitem=dict.__delitem__): + 'od.__delitem__(y) <==> del od[y]' + # Deleting an existing item uses self.__map to find the link which is + # then removed by updating the links in the predecessor and successor nodes. + dict_delitem(self, key) + link_prev, link_next, key = self.__map.pop(key) + link_prev[1] = link_next + link_next[0] = link_prev + + def __iter__(self): + 'od.__iter__() <==> iter(od)' + root = self.__root + curr = root[1] + while curr is not root: + yield curr[2] + curr = curr[1] + + def __reversed__(self): + 'od.__reversed__() <==> reversed(od)' + root = self.__root + curr = root[0] + while curr is not root: + yield curr[2] + curr = curr[0] + + def clear(self): + 'od.clear() -> None. Remove all items from od.' + try: + for node in self.__map.itervalues(): + del node[:] + root = self.__root + root[:] = [root, root, None] + self.__map.clear() + except AttributeError: + pass + dict.clear(self) + + def popitem(self, last=True): + '''od.popitem() -> (k, v), return and remove a (key, value) pair. + Pairs are returned in LIFO order if last is true or FIFO order if false. + + ''' + if not self: + raise KeyError('dictionary is empty') + root = self.__root + if last: + link = root[0] + link_prev = link[0] + link_prev[1] = root + root[0] = link_prev + else: + link = root[1] + link_next = link[1] + root[1] = link_next + link_next[0] = root + key = link[2] + del self.__map[key] + value = dict.pop(self, key) + return key, value + + # -- the following methods do not depend on the internal structure -- + + def keys(self): + 'od.keys() -> list of keys in od' + return list(self) + + def values(self): + 'od.values() -> list of values in od' + return [self[key] for key in self] + + def items(self): + 'od.items() -> list of (key, value) pairs in od' + return [(key, self[key]) for key in self] + + def iterkeys(self): + 'od.iterkeys() -> an iterator over the keys in od' + return iter(self) + + def itervalues(self): + 'od.itervalues -> an iterator over the values in od' + for k in self: + yield self[k] + + def iteritems(self): + 'od.iteritems -> an iterator over the (key, value) items in od' + for k in self: + yield (k, self[k]) + + def update(*args, **kwds): + '''od.update(E, **F) -> None. Update od from dict/iterable E and F. + + If E is a dict instance, does: for k in E: od[k] = E[k] + If E has a .keys() method, does: for k in E.keys(): od[k] = E[k] + Or if E is an iterable of items, does: for k, v in E: od[k] = v + In either case, this is followed by: for k, v in F.items(): od[k] = v + + ''' + if len(args) > 2: + raise TypeError('update() takes at most 2 positional ' + 'arguments (%d given)' % (len(args), )) + elif not args: + raise TypeError('update() takes at least 1 argument (0 given)') + self = args[0] + # Make progressively weaker assumptions about "other" + other = () + if len(args) == 2: + other = args[1] + if isinstance(other, dict): + for key in other: + self[key] = other[key] + elif hasattr(other, 'keys'): + for key in other.keys(): + self[key] = other[key] + else: + for key, value in other: + self[key] = value + for key, value in kwds.items(): + self[key] = value + + __update = update # let subclasses override update without breaking __init__ + + __marker = object() + + def pop(self, key, default=__marker): + '''od.pop(k[,d]) -> v, remove specified key and return the corresponding value. + If key is not found, d is returned if given, otherwise KeyError is raised. + + ''' + if key in self: + result = self[key] + del self[key] + return result + if default is self.__marker: + raise KeyError(key) + return default + + def setdefault(self, key, default=None): + 'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od' + if key in self: + return self[key] + self[key] = default + return default + + def __repr__(self, _repr_running=None): + 'od.__repr__() <==> repr(od)' + if not _repr_running: + _repr_running = {} + call_key = id(self), _get_ident() + if call_key in _repr_running: + return '...' + _repr_running[call_key] = 1 + try: + if not self: + return '%s()' % (self.__class__.__name__, ) + return '%s(%r)' % (self.__class__.__name__, self.items()) + finally: + del _repr_running[call_key] + + def __reduce__(self): + 'Return state information for pickling' + items = [[k, self[k]] for k in self] + inst_dict = vars(self).copy() + for k in vars(OrderedDict()): + inst_dict.pop(k, None) + if inst_dict: + return (self.__class__, (items, ), inst_dict) + return self.__class__, (items, ) + + def copy(self): + 'od.copy() -> a shallow copy of od' + return self.__class__(self) + + @classmethod + def fromkeys(cls, iterable, value=None): + '''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S + and values equal to v (which defaults to None). + + ''' + d = cls() + for key in iterable: + d[key] = value + return d + + def __eq__(self, other): + '''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive + while comparison to a regular mapping is order-insensitive. + + ''' + if isinstance(other, OrderedDict): + return len(self) == len( + other) and self.items() == other.items() + return dict.__eq__(self, other) + + def __ne__(self, other): + return not self == other + + # -- the following methods are only used in Python 2.7 -- + + def viewkeys(self): + "od.viewkeys() -> a set-like object providing a view on od's keys" + return KeysView(self) + + def viewvalues(self): + "od.viewvalues() -> an object providing a view on od's values" + return ValuesView(self) + + def viewitems(self): + "od.viewitems() -> a set-like object providing a view on od's items" + return ItemsView(self) + + +try: + from logging.config import BaseConfigurator, valid_ident +except ImportError: # pragma: no cover + IDENTIFIER = re.compile('^[a-z_][a-z0-9_]*$', re.I) + + def valid_ident(s): + m = IDENTIFIER.match(s) + if not m: + raise ValueError('Not a valid Python identifier: %r' % s) + return True + + # The ConvertingXXX classes are wrappers around standard Python containers, + # and they serve to convert any suitable values in the container. The + # conversion converts base dicts, lists and tuples to their wrapped + # equivalents, whereas strings which match a conversion format are converted + # appropriately. + # + # Each wrapper should have a configurator attribute holding the actual + # configurator to use for conversion. + + class ConvertingDict(dict): + """A converting dictionary wrapper.""" + + def __getitem__(self, key): + value = dict.__getitem__(self, key) + result = self.configurator.convert(value) + # If the converted value is different, save for next time + if value is not result: + self[key] = result + if type(result) in (ConvertingDict, ConvertingList, + ConvertingTuple): + result.parent = self + result.key = key + return result + + def get(self, key, default=None): + value = dict.get(self, key, default) + result = self.configurator.convert(value) + # If the converted value is different, save for next time + if value is not result: + self[key] = result + if type(result) in (ConvertingDict, ConvertingList, + ConvertingTuple): + result.parent = self + result.key = key + return result + + def pop(self, key, default=None): + value = dict.pop(self, key, default) + result = self.configurator.convert(value) + if value is not result: + if type(result) in (ConvertingDict, ConvertingList, + ConvertingTuple): + result.parent = self + result.key = key + return result + + class ConvertingList(list): + """A converting list wrapper.""" + + def __getitem__(self, key): + value = list.__getitem__(self, key) + result = self.configurator.convert(value) + # If the converted value is different, save for next time + if value is not result: + self[key] = result + if type(result) in (ConvertingDict, ConvertingList, + ConvertingTuple): + result.parent = self + result.key = key + return result + + def pop(self, idx=-1): + value = list.pop(self, idx) + result = self.configurator.convert(value) + if value is not result: + if type(result) in (ConvertingDict, ConvertingList, + ConvertingTuple): + result.parent = self + return result + + class ConvertingTuple(tuple): + """A converting tuple wrapper.""" + + def __getitem__(self, key): + value = tuple.__getitem__(self, key) + result = self.configurator.convert(value) + if value is not result: + if type(result) in (ConvertingDict, ConvertingList, + ConvertingTuple): + result.parent = self + result.key = key + return result + + class BaseConfigurator(object): + """ + The configurator base class which defines some useful defaults. + """ + + CONVERT_PATTERN = re.compile(r'^(?P[a-z]+)://(?P.*)$') + + WORD_PATTERN = re.compile(r'^\s*(\w+)\s*') + DOT_PATTERN = re.compile(r'^\.\s*(\w+)\s*') + INDEX_PATTERN = re.compile(r'^\[\s*(\w+)\s*\]\s*') + DIGIT_PATTERN = re.compile(r'^\d+$') + + value_converters = { + 'ext': 'ext_convert', + 'cfg': 'cfg_convert', + } + + # We might want to use a different one, e.g. importlib + importer = staticmethod(__import__) + + def __init__(self, config): + self.config = ConvertingDict(config) + self.config.configurator = self + + def resolve(self, s): + """ + Resolve strings to objects using standard import and attribute + syntax. + """ + name = s.split('.') + used = name.pop(0) + try: + found = self.importer(used) + for frag in name: + used += '.' + frag + try: + found = getattr(found, frag) + except AttributeError: + self.importer(used) + found = getattr(found, frag) + return found + except ImportError: + e, tb = sys.exc_info()[1:] + v = ValueError('Cannot resolve %r: %s' % (s, e)) + v.__cause__, v.__traceback__ = e, tb + raise v + + def ext_convert(self, value): + """Default converter for the ext:// protocol.""" + return self.resolve(value) + + def cfg_convert(self, value): + """Default converter for the cfg:// protocol.""" + rest = value + m = self.WORD_PATTERN.match(rest) + if m is None: + raise ValueError("Unable to convert %r" % value) + else: + rest = rest[m.end():] + d = self.config[m.groups()[0]] + while rest: + m = self.DOT_PATTERN.match(rest) + if m: + d = d[m.groups()[0]] + else: + m = self.INDEX_PATTERN.match(rest) + if m: + idx = m.groups()[0] + if not self.DIGIT_PATTERN.match(idx): + d = d[idx] + else: + try: + n = int( + idx + ) # try as number first (most likely) + d = d[n] + except TypeError: + d = d[idx] + if m: + rest = rest[m.end():] + else: + raise ValueError('Unable to convert ' + '%r at %r' % (value, rest)) + # rest should be empty + return d + + def convert(self, value): + """ + Convert values to an appropriate type. dicts, lists and tuples are + replaced by their converting alternatives. Strings are checked to + see if they have a conversion format and are converted if they do. + """ + if not isinstance(value, ConvertingDict) and isinstance( + value, dict): + value = ConvertingDict(value) + value.configurator = self + elif not isinstance(value, ConvertingList) and isinstance( + value, list): + value = ConvertingList(value) + value.configurator = self + elif not isinstance(value, ConvertingTuple) and isinstance(value, tuple): + value = ConvertingTuple(value) + value.configurator = self + elif isinstance(value, string_types): + m = self.CONVERT_PATTERN.match(value) + if m: + d = m.groupdict() + prefix = d['prefix'] + converter = self.value_converters.get(prefix, None) + if converter: + suffix = d['suffix'] + converter = getattr(self, converter) + value = converter(suffix) + return value + + def configure_custom(self, config): + """Configure an object with a user-supplied factory.""" + c = config.pop('()') + if not callable(c): + c = self.resolve(c) + props = config.pop('.', None) + # Check for valid identifiers + kwargs = dict([(k, config[k]) for k in config if valid_ident(k)]) + result = c(**kwargs) + if props: + for name, value in props.items(): + setattr(result, name, value) + return result + + def as_tuple(self, value): + """Utility function which converts lists to tuples.""" + if isinstance(value, list): + value = tuple(value) + return value diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/distlib/resources.py b/.venv/lib/python3.12/site-packages/pip/_vendor/distlib/resources.py new file mode 100644 index 0000000..fef52aa --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/distlib/resources.py @@ -0,0 +1,358 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2013-2017 Vinay Sajip. +# Licensed to the Python Software Foundation under a contributor agreement. +# See LICENSE.txt and CONTRIBUTORS.txt. +# +from __future__ import unicode_literals + +import bisect +import io +import logging +import os +import pkgutil +import sys +import types +import zipimport + +from . import DistlibException +from .util import cached_property, get_cache_base, Cache + +logger = logging.getLogger(__name__) + + +cache = None # created when needed + + +class ResourceCache(Cache): + def __init__(self, base=None): + if base is None: + # Use native string to avoid issues on 2.x: see Python #20140. + base = os.path.join(get_cache_base(), str('resource-cache')) + super(ResourceCache, self).__init__(base) + + def is_stale(self, resource, path): + """ + Is the cache stale for the given resource? + + :param resource: The :class:`Resource` being cached. + :param path: The path of the resource in the cache. + :return: True if the cache is stale. + """ + # Cache invalidation is a hard problem :-) + return True + + def get(self, resource): + """ + Get a resource into the cache, + + :param resource: A :class:`Resource` instance. + :return: The pathname of the resource in the cache. + """ + prefix, path = resource.finder.get_cache_info(resource) + if prefix is None: + result = path + else: + result = os.path.join(self.base, self.prefix_to_dir(prefix), path) + dirname = os.path.dirname(result) + if not os.path.isdir(dirname): + os.makedirs(dirname) + if not os.path.exists(result): + stale = True + else: + stale = self.is_stale(resource, path) + if stale: + # write the bytes of the resource to the cache location + with open(result, 'wb') as f: + f.write(resource.bytes) + return result + + +class ResourceBase(object): + def __init__(self, finder, name): + self.finder = finder + self.name = name + + +class Resource(ResourceBase): + """ + A class representing an in-package resource, such as a data file. This is + not normally instantiated by user code, but rather by a + :class:`ResourceFinder` which manages the resource. + """ + is_container = False # Backwards compatibility + + def as_stream(self): + """ + Get the resource as a stream. + + This is not a property to make it obvious that it returns a new stream + each time. + """ + return self.finder.get_stream(self) + + @cached_property + def file_path(self): + global cache + if cache is None: + cache = ResourceCache() + return cache.get(self) + + @cached_property + def bytes(self): + return self.finder.get_bytes(self) + + @cached_property + def size(self): + return self.finder.get_size(self) + + +class ResourceContainer(ResourceBase): + is_container = True # Backwards compatibility + + @cached_property + def resources(self): + return self.finder.get_resources(self) + + +class ResourceFinder(object): + """ + Resource finder for file system resources. + """ + + if sys.platform.startswith('java'): + skipped_extensions = ('.pyc', '.pyo', '.class') + else: + skipped_extensions = ('.pyc', '.pyo') + + def __init__(self, module): + self.module = module + self.loader = getattr(module, '__loader__', None) + self.base = os.path.dirname(getattr(module, '__file__', '')) + + def _adjust_path(self, path): + return os.path.realpath(path) + + def _make_path(self, resource_name): + # Issue #50: need to preserve type of path on Python 2.x + # like os.path._get_sep + if isinstance(resource_name, bytes): # should only happen on 2.x + sep = b'/' + else: + sep = '/' + parts = resource_name.split(sep) + parts.insert(0, self.base) + result = os.path.join(*parts) + return self._adjust_path(result) + + def _find(self, path): + return os.path.exists(path) + + def get_cache_info(self, resource): + return None, resource.path + + def find(self, resource_name): + path = self._make_path(resource_name) + if not self._find(path): + result = None + else: + if self._is_directory(path): + result = ResourceContainer(self, resource_name) + else: + result = Resource(self, resource_name) + result.path = path + return result + + def get_stream(self, resource): + return open(resource.path, 'rb') + + def get_bytes(self, resource): + with open(resource.path, 'rb') as f: + return f.read() + + def get_size(self, resource): + return os.path.getsize(resource.path) + + def get_resources(self, resource): + def allowed(f): + return (f != '__pycache__' and not + f.endswith(self.skipped_extensions)) + return set([f for f in os.listdir(resource.path) if allowed(f)]) + + def is_container(self, resource): + return self._is_directory(resource.path) + + _is_directory = staticmethod(os.path.isdir) + + def iterator(self, resource_name): + resource = self.find(resource_name) + if resource is not None: + todo = [resource] + while todo: + resource = todo.pop(0) + yield resource + if resource.is_container: + rname = resource.name + for name in resource.resources: + if not rname: + new_name = name + else: + new_name = '/'.join([rname, name]) + child = self.find(new_name) + if child.is_container: + todo.append(child) + else: + yield child + + +class ZipResourceFinder(ResourceFinder): + """ + Resource finder for resources in .zip files. + """ + def __init__(self, module): + super(ZipResourceFinder, self).__init__(module) + archive = self.loader.archive + self.prefix_len = 1 + len(archive) + # PyPy doesn't have a _files attr on zipimporter, and you can't set one + if hasattr(self.loader, '_files'): + self._files = self.loader._files + else: + self._files = zipimport._zip_directory_cache[archive] + self.index = sorted(self._files) + + def _adjust_path(self, path): + return path + + def _find(self, path): + path = path[self.prefix_len:] + if path in self._files: + result = True + else: + if path and path[-1] != os.sep: + path = path + os.sep + i = bisect.bisect(self.index, path) + try: + result = self.index[i].startswith(path) + except IndexError: + result = False + if not result: + logger.debug('_find failed: %r %r', path, self.loader.prefix) + else: + logger.debug('_find worked: %r %r', path, self.loader.prefix) + return result + + def get_cache_info(self, resource): + prefix = self.loader.archive + path = resource.path[1 + len(prefix):] + return prefix, path + + def get_bytes(self, resource): + return self.loader.get_data(resource.path) + + def get_stream(self, resource): + return io.BytesIO(self.get_bytes(resource)) + + def get_size(self, resource): + path = resource.path[self.prefix_len:] + return self._files[path][3] + + def get_resources(self, resource): + path = resource.path[self.prefix_len:] + if path and path[-1] != os.sep: + path += os.sep + plen = len(path) + result = set() + i = bisect.bisect(self.index, path) + while i < len(self.index): + if not self.index[i].startswith(path): + break + s = self.index[i][plen:] + result.add(s.split(os.sep, 1)[0]) # only immediate children + i += 1 + return result + + def _is_directory(self, path): + path = path[self.prefix_len:] + if path and path[-1] != os.sep: + path += os.sep + i = bisect.bisect(self.index, path) + try: + result = self.index[i].startswith(path) + except IndexError: + result = False + return result + + +_finder_registry = { + type(None): ResourceFinder, + zipimport.zipimporter: ZipResourceFinder +} + +try: + # In Python 3.6, _frozen_importlib -> _frozen_importlib_external + try: + import _frozen_importlib_external as _fi + except ImportError: + import _frozen_importlib as _fi + _finder_registry[_fi.SourceFileLoader] = ResourceFinder + _finder_registry[_fi.FileFinder] = ResourceFinder + # See issue #146 + _finder_registry[_fi.SourcelessFileLoader] = ResourceFinder + del _fi +except (ImportError, AttributeError): + pass + + +def register_finder(loader, finder_maker): + _finder_registry[type(loader)] = finder_maker + + +_finder_cache = {} + + +def finder(package): + """ + Return a resource finder for a package. + :param package: The name of the package. + :return: A :class:`ResourceFinder` instance for the package. + """ + if package in _finder_cache: + result = _finder_cache[package] + else: + if package not in sys.modules: + __import__(package) + module = sys.modules[package] + path = getattr(module, '__path__', None) + if path is None: + raise DistlibException('You cannot get a finder for a module, ' + 'only for a package') + loader = getattr(module, '__loader__', None) + finder_maker = _finder_registry.get(type(loader)) + if finder_maker is None: + raise DistlibException('Unable to locate finder for %r' % package) + result = finder_maker(module) + _finder_cache[package] = result + return result + + +_dummy_module = types.ModuleType(str('__dummy__')) + + +def finder_for_path(path): + """ + Return a resource finder for a path, which should represent a container. + + :param path: The path. + :return: A :class:`ResourceFinder` instance for the path. + """ + result = None + # calls any path hooks, gets importer into cache + pkgutil.get_importer(path) + loader = sys.path_importer_cache.get(path) + finder = _finder_registry.get(type(loader)) + if finder: + module = _dummy_module + module.__file__ = os.path.join(path, '') + module.__loader__ = loader + result = finder(module) + return result diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/distlib/scripts.py b/.venv/lib/python3.12/site-packages/pip/_vendor/distlib/scripts.py new file mode 100644 index 0000000..195dc3f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/distlib/scripts.py @@ -0,0 +1,447 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2013-2023 Vinay Sajip. +# Licensed to the Python Software Foundation under a contributor agreement. +# See LICENSE.txt and CONTRIBUTORS.txt. +# +from io import BytesIO +import logging +import os +import re +import struct +import sys +import time +from zipfile import ZipInfo + +from .compat import sysconfig, detect_encoding, ZipFile +from .resources import finder +from .util import (FileOperator, get_export_entry, convert_path, get_executable, get_platform, in_venv) + +logger = logging.getLogger(__name__) + +_DEFAULT_MANIFEST = ''' + + + + + + + + + + + + +'''.strip() + +# check if Python is called on the first line with this expression +FIRST_LINE_RE = re.compile(b'^#!.*pythonw?[0-9.]*([ \t].*)?$') +SCRIPT_TEMPLATE = r'''# -*- coding: utf-8 -*- +import re +import sys +if __name__ == '__main__': + from %(module)s import %(import_name)s + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(%(func)s()) +''' + +# Pre-fetch the contents of all executable wrapper stubs. +# This is to address https://github.com/pypa/pip/issues/12666. +# When updating pip, we rename the old pip in place before installing the +# new version. If we try to fetch a wrapper *after* that rename, the finder +# machinery will be confused as the package is no longer available at the +# location where it was imported from. So we load everything into memory in +# advance. + +if os.name == 'nt' or (os.name == 'java' and os._name == 'nt'): + # Issue 31: don't hardcode an absolute package name, but + # determine it relative to the current package + DISTLIB_PACKAGE = __name__.rsplit('.', 1)[0] + + WRAPPERS = { + r.name: r.bytes + for r in finder(DISTLIB_PACKAGE).iterator("") + if r.name.endswith(".exe") + } + + +def enquote_executable(executable): + if ' ' in executable: + # make sure we quote only the executable in case of env + # for example /usr/bin/env "/dir with spaces/bin/jython" + # instead of "/usr/bin/env /dir with spaces/bin/jython" + # otherwise whole + if executable.startswith('/usr/bin/env '): + env, _executable = executable.split(' ', 1) + if ' ' in _executable and not _executable.startswith('"'): + executable = '%s "%s"' % (env, _executable) + else: + if not executable.startswith('"'): + executable = '"%s"' % executable + return executable + + +# Keep the old name around (for now), as there is at least one project using it! +_enquote_executable = enquote_executable + + +class ScriptMaker(object): + """ + A class to copy or create scripts from source scripts or callable + specifications. + """ + script_template = SCRIPT_TEMPLATE + + executable = None # for shebangs + + def __init__(self, source_dir, target_dir, add_launchers=True, dry_run=False, fileop=None): + self.source_dir = source_dir + self.target_dir = target_dir + self.add_launchers = add_launchers + self.force = False + self.clobber = False + # It only makes sense to set mode bits on POSIX. + self.set_mode = (os.name == 'posix') or (os.name == 'java' and os._name == 'posix') + self.variants = set(('', 'X.Y')) + self._fileop = fileop or FileOperator(dry_run) + + self._is_nt = os.name == 'nt' or (os.name == 'java' and os._name == 'nt') + self.version_info = sys.version_info + + def _get_alternate_executable(self, executable, options): + if options.get('gui', False) and self._is_nt: # pragma: no cover + dn, fn = os.path.split(executable) + fn = fn.replace('python', 'pythonw') + executable = os.path.join(dn, fn) + return executable + + if sys.platform.startswith('java'): # pragma: no cover + + def _is_shell(self, executable): + """ + Determine if the specified executable is a script + (contains a #! line) + """ + try: + with open(executable) as fp: + return fp.read(2) == '#!' + except (OSError, IOError): + logger.warning('Failed to open %s', executable) + return False + + def _fix_jython_executable(self, executable): + if self._is_shell(executable): + # Workaround for Jython is not needed on Linux systems. + import java + + if java.lang.System.getProperty('os.name') == 'Linux': + return executable + elif executable.lower().endswith('jython.exe'): + # Use wrapper exe for Jython on Windows + return executable + return '/usr/bin/env %s' % executable + + def _build_shebang(self, executable, post_interp): + """ + Build a shebang line. In the simple case (on Windows, or a shebang line + which is not too long or contains spaces) use a simple formulation for + the shebang. Otherwise, use /bin/sh as the executable, with a contrived + shebang which allows the script to run either under Python or sh, using + suitable quoting. Thanks to Harald Nordgren for his input. + + See also: http://www.in-ulm.de/~mascheck/various/shebang/#length + https://hg.mozilla.org/mozilla-central/file/tip/mach + """ + if os.name != 'posix': + simple_shebang = True + elif getattr(sys, "cross_compiling", False): + # In a cross-compiling environment, the shebang will likely be a + # script; this *must* be invoked with the "safe" version of the + # shebang, or else using os.exec() to run the entry script will + # fail, raising "OSError 8 [Errno 8] Exec format error". + simple_shebang = False + else: + # Add 3 for '#!' prefix and newline suffix. + shebang_length = len(executable) + len(post_interp) + 3 + if sys.platform == 'darwin': + max_shebang_length = 512 + else: + max_shebang_length = 127 + simple_shebang = ((b' ' not in executable) and (shebang_length <= max_shebang_length)) + + if simple_shebang: + result = b'#!' + executable + post_interp + b'\n' + else: + result = b'#!/bin/sh\n' + result += b"'''exec' " + executable + post_interp + b' "$0" "$@"\n' + result += b"' '''\n" + return result + + def _get_shebang(self, encoding, post_interp=b'', options=None): + enquote = True + if self.executable: + executable = self.executable + enquote = False # assume this will be taken care of + elif not sysconfig.is_python_build(): + executable = get_executable() + elif in_venv(): # pragma: no cover + executable = os.path.join(sysconfig.get_path('scripts'), 'python%s' % sysconfig.get_config_var('EXE')) + else: # pragma: no cover + if os.name == 'nt': + # for Python builds from source on Windows, no Python executables with + # a version suffix are created, so we use python.exe + executable = os.path.join(sysconfig.get_config_var('BINDIR'), + 'python%s' % (sysconfig.get_config_var('EXE'))) + else: + executable = os.path.join( + sysconfig.get_config_var('BINDIR'), + 'python%s%s' % (sysconfig.get_config_var('VERSION'), sysconfig.get_config_var('EXE'))) + if options: + executable = self._get_alternate_executable(executable, options) + + if sys.platform.startswith('java'): # pragma: no cover + executable = self._fix_jython_executable(executable) + + # Normalise case for Windows - COMMENTED OUT + # executable = os.path.normcase(executable) + # N.B. The normalising operation above has been commented out: See + # issue #124. Although paths in Windows are generally case-insensitive, + # they aren't always. For example, a path containing a ẞ (which is a + # LATIN CAPITAL LETTER SHARP S - U+1E9E) is normcased to ß (which is a + # LATIN SMALL LETTER SHARP S' - U+00DF). The two are not considered by + # Windows as equivalent in path names. + + # If the user didn't specify an executable, it may be necessary to + # cater for executable paths with spaces (not uncommon on Windows) + if enquote: + executable = enquote_executable(executable) + # Issue #51: don't use fsencode, since we later try to + # check that the shebang is decodable using utf-8. + executable = executable.encode('utf-8') + # in case of IronPython, play safe and enable frames support + if (sys.platform == 'cli' and '-X:Frames' not in post_interp and + '-X:FullFrames' not in post_interp): # pragma: no cover + post_interp += b' -X:Frames' + shebang = self._build_shebang(executable, post_interp) + # Python parser starts to read a script using UTF-8 until + # it gets a #coding:xxx cookie. The shebang has to be the + # first line of a file, the #coding:xxx cookie cannot be + # written before. So the shebang has to be decodable from + # UTF-8. + try: + shebang.decode('utf-8') + except UnicodeDecodeError: # pragma: no cover + raise ValueError('The shebang (%r) is not decodable from utf-8' % shebang) + # If the script is encoded to a custom encoding (use a + # #coding:xxx cookie), the shebang has to be decodable from + # the script encoding too. + if encoding != 'utf-8': + try: + shebang.decode(encoding) + except UnicodeDecodeError: # pragma: no cover + raise ValueError('The shebang (%r) is not decodable ' + 'from the script encoding (%r)' % (shebang, encoding)) + return shebang + + def _get_script_text(self, entry): + return self.script_template % dict( + module=entry.prefix, import_name=entry.suffix.split('.')[0], func=entry.suffix) + + manifest = _DEFAULT_MANIFEST + + def get_manifest(self, exename): + base = os.path.basename(exename) + return self.manifest % base + + def _write_script(self, names, shebang, script_bytes, filenames, ext): + use_launcher = self.add_launchers and self._is_nt + if not use_launcher: + script_bytes = shebang + script_bytes + else: # pragma: no cover + if ext == 'py': + launcher = self._get_launcher('t') + else: + launcher = self._get_launcher('w') + stream = BytesIO() + with ZipFile(stream, 'w') as zf: + source_date_epoch = os.environ.get('SOURCE_DATE_EPOCH') + if source_date_epoch: + date_time = time.gmtime(int(source_date_epoch))[:6] + zinfo = ZipInfo(filename='__main__.py', date_time=date_time) + zf.writestr(zinfo, script_bytes) + else: + zf.writestr('__main__.py', script_bytes) + zip_data = stream.getvalue() + script_bytes = launcher + shebang + zip_data + for name in names: + outname = os.path.join(self.target_dir, name) + if use_launcher: # pragma: no cover + n, e = os.path.splitext(outname) + if e.startswith('.py'): + outname = n + outname = '%s.exe' % outname + try: + self._fileop.write_binary_file(outname, script_bytes) + except Exception: + # Failed writing an executable - it might be in use. + logger.warning('Failed to write executable - trying to ' + 'use .deleteme logic') + dfname = '%s.deleteme' % outname + if os.path.exists(dfname): + os.remove(dfname) # Not allowed to fail here + os.rename(outname, dfname) # nor here + self._fileop.write_binary_file(outname, script_bytes) + logger.debug('Able to replace executable using ' + '.deleteme logic') + try: + os.remove(dfname) + except Exception: + pass # still in use - ignore error + else: + if self._is_nt and not outname.endswith('.' + ext): # pragma: no cover + outname = '%s.%s' % (outname, ext) + if os.path.exists(outname) and not self.clobber: + logger.warning('Skipping existing file %s', outname) + continue + self._fileop.write_binary_file(outname, script_bytes) + if self.set_mode: + self._fileop.set_executable_mode([outname]) + filenames.append(outname) + + variant_separator = '-' + + def get_script_filenames(self, name): + result = set() + if '' in self.variants: + result.add(name) + if 'X' in self.variants: + result.add('%s%s' % (name, self.version_info[0])) + if 'X.Y' in self.variants: + result.add('%s%s%s.%s' % (name, self.variant_separator, self.version_info[0], self.version_info[1])) + return result + + def _make_script(self, entry, filenames, options=None): + post_interp = b'' + if options: + args = options.get('interpreter_args', []) + if args: + args = ' %s' % ' '.join(args) + post_interp = args.encode('utf-8') + shebang = self._get_shebang('utf-8', post_interp, options=options) + script = self._get_script_text(entry).encode('utf-8') + scriptnames = self.get_script_filenames(entry.name) + if options and options.get('gui', False): + ext = 'pyw' + else: + ext = 'py' + self._write_script(scriptnames, shebang, script, filenames, ext) + + def _copy_script(self, script, filenames): + adjust = False + script = os.path.join(self.source_dir, convert_path(script)) + outname = os.path.join(self.target_dir, os.path.basename(script)) + if not self.force and not self._fileop.newer(script, outname): + logger.debug('not copying %s (up-to-date)', script) + return + + # Always open the file, but ignore failures in dry-run mode -- + # that way, we'll get accurate feedback if we can read the + # script. + try: + f = open(script, 'rb') + except IOError: # pragma: no cover + if not self.dry_run: + raise + f = None + else: + first_line = f.readline() + if not first_line: # pragma: no cover + logger.warning('%s is an empty file (skipping)', script) + return + + match = FIRST_LINE_RE.match(first_line.replace(b'\r\n', b'\n')) + if match: + adjust = True + post_interp = match.group(1) or b'' + + if not adjust: + if f: + f.close() + self._fileop.copy_file(script, outname) + if self.set_mode: + self._fileop.set_executable_mode([outname]) + filenames.append(outname) + else: + logger.info('copying and adjusting %s -> %s', script, self.target_dir) + if not self._fileop.dry_run: + encoding, lines = detect_encoding(f.readline) + f.seek(0) + shebang = self._get_shebang(encoding, post_interp) + if b'pythonw' in first_line: # pragma: no cover + ext = 'pyw' + else: + ext = 'py' + n = os.path.basename(outname) + self._write_script([n], shebang, f.read(), filenames, ext) + if f: + f.close() + + @property + def dry_run(self): + return self._fileop.dry_run + + @dry_run.setter + def dry_run(self, value): + self._fileop.dry_run = value + + if os.name == 'nt' or (os.name == 'java' and os._name == 'nt'): # pragma: no cover + # Executable launcher support. + # Launchers are from https://bitbucket.org/vinay.sajip/simple_launcher/ + + def _get_launcher(self, kind): + if struct.calcsize('P') == 8: # 64-bit + bits = '64' + else: + bits = '32' + platform_suffix = '-arm' if get_platform() == 'win-arm64' else '' + name = '%s%s%s.exe' % (kind, bits, platform_suffix) + if name not in WRAPPERS: + msg = ('Unable to find resource %s in package %s' % + (name, DISTLIB_PACKAGE)) + raise ValueError(msg) + return WRAPPERS[name] + + # Public API follows + + def make(self, specification, options=None): + """ + Make a script. + + :param specification: The specification, which is either a valid export + entry specification (to make a script from a + callable) or a filename (to make a script by + copying from a source location). + :param options: A dictionary of options controlling script generation. + :return: A list of all absolute pathnames written to. + """ + filenames = [] + entry = get_export_entry(specification) + if entry is None: + self._copy_script(specification, filenames) + else: + self._make_script(entry, filenames, options=options) + return filenames + + def make_multiple(self, specifications, options=None): + """ + Take a list of specifications and make scripts from them, + :param specifications: A list of specifications. + :return: A list of all absolute pathnames written to, + """ + filenames = [] + for specification in specifications: + filenames.extend(self.make(specification, options)) + return filenames diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/distlib/t32.exe b/.venv/lib/python3.12/site-packages/pip/_vendor/distlib/t32.exe new file mode 100644 index 0000000..52154f0 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/distlib/t32.exe differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/distlib/t64-arm.exe b/.venv/lib/python3.12/site-packages/pip/_vendor/distlib/t64-arm.exe new file mode 100644 index 0000000..e1ab8f8 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/distlib/t64-arm.exe differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/distlib/t64.exe b/.venv/lib/python3.12/site-packages/pip/_vendor/distlib/t64.exe new file mode 100644 index 0000000..e8bebdb Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/distlib/t64.exe differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/distlib/util.py b/.venv/lib/python3.12/site-packages/pip/_vendor/distlib/util.py new file mode 100644 index 0000000..0d5bd7a --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/distlib/util.py @@ -0,0 +1,1984 @@ +# +# Copyright (C) 2012-2023 The Python Software Foundation. +# See LICENSE.txt and CONTRIBUTORS.txt. +# +import codecs +from collections import deque +import contextlib +import csv +from glob import iglob as std_iglob +import io +import json +import logging +import os +import py_compile +import re +import socket +try: + import ssl +except ImportError: # pragma: no cover + ssl = None +import subprocess +import sys +import tarfile +import tempfile +import textwrap + +try: + import threading +except ImportError: # pragma: no cover + import dummy_threading as threading +import time + +from . import DistlibException +from .compat import (string_types, text_type, shutil, raw_input, StringIO, cache_from_source, urlopen, urljoin, httplib, + xmlrpclib, HTTPHandler, BaseConfigurator, valid_ident, Container, configparser, URLError, ZipFile, + fsdecode, unquote, urlparse) + +logger = logging.getLogger(__name__) + +# +# Requirement parsing code as per PEP 508 +# + +IDENTIFIER = re.compile(r'^([\w\.-]+)\s*') +VERSION_IDENTIFIER = re.compile(r'^([\w\.*+-]+)\s*') +COMPARE_OP = re.compile(r'^(<=?|>=?|={2,3}|[~!]=)\s*') +MARKER_OP = re.compile(r'^((<=?)|(>=?)|={2,3}|[~!]=|in|not\s+in)\s*') +OR = re.compile(r'^or\b\s*') +AND = re.compile(r'^and\b\s*') +NON_SPACE = re.compile(r'(\S+)\s*') +STRING_CHUNK = re.compile(r'([\s\w\.{}()*+#:;,/?!~`@$%^&=|<>\[\]-]+)') + + +def parse_marker(marker_string): + """ + Parse a marker string and return a dictionary containing a marker expression. + + The dictionary will contain keys "op", "lhs" and "rhs" for non-terminals in + the expression grammar, or strings. A string contained in quotes is to be + interpreted as a literal string, and a string not contained in quotes is a + variable (such as os_name). + """ + + def marker_var(remaining): + # either identifier, or literal string + m = IDENTIFIER.match(remaining) + if m: + result = m.groups()[0] + remaining = remaining[m.end():] + elif not remaining: + raise SyntaxError('unexpected end of input') + else: + q = remaining[0] + if q not in '\'"': + raise SyntaxError('invalid expression: %s' % remaining) + oq = '\'"'.replace(q, '') + remaining = remaining[1:] + parts = [q] + while remaining: + # either a string chunk, or oq, or q to terminate + if remaining[0] == q: + break + elif remaining[0] == oq: + parts.append(oq) + remaining = remaining[1:] + else: + m = STRING_CHUNK.match(remaining) + if not m: + raise SyntaxError('error in string literal: %s' % remaining) + parts.append(m.groups()[0]) + remaining = remaining[m.end():] + else: + s = ''.join(parts) + raise SyntaxError('unterminated string: %s' % s) + parts.append(q) + result = ''.join(parts) + remaining = remaining[1:].lstrip() # skip past closing quote + return result, remaining + + def marker_expr(remaining): + if remaining and remaining[0] == '(': + result, remaining = marker(remaining[1:].lstrip()) + if remaining[0] != ')': + raise SyntaxError('unterminated parenthesis: %s' % remaining) + remaining = remaining[1:].lstrip() + else: + lhs, remaining = marker_var(remaining) + while remaining: + m = MARKER_OP.match(remaining) + if not m: + break + op = m.groups()[0] + remaining = remaining[m.end():] + rhs, remaining = marker_var(remaining) + lhs = {'op': op, 'lhs': lhs, 'rhs': rhs} + result = lhs + return result, remaining + + def marker_and(remaining): + lhs, remaining = marker_expr(remaining) + while remaining: + m = AND.match(remaining) + if not m: + break + remaining = remaining[m.end():] + rhs, remaining = marker_expr(remaining) + lhs = {'op': 'and', 'lhs': lhs, 'rhs': rhs} + return lhs, remaining + + def marker(remaining): + lhs, remaining = marker_and(remaining) + while remaining: + m = OR.match(remaining) + if not m: + break + remaining = remaining[m.end():] + rhs, remaining = marker_and(remaining) + lhs = {'op': 'or', 'lhs': lhs, 'rhs': rhs} + return lhs, remaining + + return marker(marker_string) + + +def parse_requirement(req): + """ + Parse a requirement passed in as a string. Return a Container + whose attributes contain the various parts of the requirement. + """ + remaining = req.strip() + if not remaining or remaining.startswith('#'): + return None + m = IDENTIFIER.match(remaining) + if not m: + raise SyntaxError('name expected: %s' % remaining) + distname = m.groups()[0] + remaining = remaining[m.end():] + extras = mark_expr = versions = uri = None + if remaining and remaining[0] == '[': + i = remaining.find(']', 1) + if i < 0: + raise SyntaxError('unterminated extra: %s' % remaining) + s = remaining[1:i] + remaining = remaining[i + 1:].lstrip() + extras = [] + while s: + m = IDENTIFIER.match(s) + if not m: + raise SyntaxError('malformed extra: %s' % s) + extras.append(m.groups()[0]) + s = s[m.end():] + if not s: + break + if s[0] != ',': + raise SyntaxError('comma expected in extras: %s' % s) + s = s[1:].lstrip() + if not extras: + extras = None + if remaining: + if remaining[0] == '@': + # it's a URI + remaining = remaining[1:].lstrip() + m = NON_SPACE.match(remaining) + if not m: + raise SyntaxError('invalid URI: %s' % remaining) + uri = m.groups()[0] + t = urlparse(uri) + # there are issues with Python and URL parsing, so this test + # is a bit crude. See bpo-20271, bpo-23505. Python doesn't + # always parse invalid URLs correctly - it should raise + # exceptions for malformed URLs + if not (t.scheme and t.netloc): + raise SyntaxError('Invalid URL: %s' % uri) + remaining = remaining[m.end():].lstrip() + else: + + def get_versions(ver_remaining): + """ + Return a list of operator, version tuples if any are + specified, else None. + """ + m = COMPARE_OP.match(ver_remaining) + versions = None + if m: + versions = [] + while True: + op = m.groups()[0] + ver_remaining = ver_remaining[m.end():] + m = VERSION_IDENTIFIER.match(ver_remaining) + if not m: + raise SyntaxError('invalid version: %s' % ver_remaining) + v = m.groups()[0] + versions.append((op, v)) + ver_remaining = ver_remaining[m.end():] + if not ver_remaining or ver_remaining[0] != ',': + break + ver_remaining = ver_remaining[1:].lstrip() + # Some packages have a trailing comma which would break things + # See issue #148 + if not ver_remaining: + break + m = COMPARE_OP.match(ver_remaining) + if not m: + raise SyntaxError('invalid constraint: %s' % ver_remaining) + if not versions: + versions = None + return versions, ver_remaining + + if remaining[0] != '(': + versions, remaining = get_versions(remaining) + else: + i = remaining.find(')', 1) + if i < 0: + raise SyntaxError('unterminated parenthesis: %s' % remaining) + s = remaining[1:i] + remaining = remaining[i + 1:].lstrip() + # As a special diversion from PEP 508, allow a version number + # a.b.c in parentheses as a synonym for ~= a.b.c (because this + # is allowed in earlier PEPs) + if COMPARE_OP.match(s): + versions, _ = get_versions(s) + else: + m = VERSION_IDENTIFIER.match(s) + if not m: + raise SyntaxError('invalid constraint: %s' % s) + v = m.groups()[0] + s = s[m.end():].lstrip() + if s: + raise SyntaxError('invalid constraint: %s' % s) + versions = [('~=', v)] + + if remaining: + if remaining[0] != ';': + raise SyntaxError('invalid requirement: %s' % remaining) + remaining = remaining[1:].lstrip() + + mark_expr, remaining = parse_marker(remaining) + + if remaining and remaining[0] != '#': + raise SyntaxError('unexpected trailing data: %s' % remaining) + + if not versions: + rs = distname + else: + rs = '%s %s' % (distname, ', '.join(['%s %s' % con for con in versions])) + return Container(name=distname, extras=extras, constraints=versions, marker=mark_expr, url=uri, requirement=rs) + + +def get_resources_dests(resources_root, rules): + """Find destinations for resources files""" + + def get_rel_path(root, path): + # normalizes and returns a lstripped-/-separated path + root = root.replace(os.path.sep, '/') + path = path.replace(os.path.sep, '/') + assert path.startswith(root) + return path[len(root):].lstrip('/') + + destinations = {} + for base, suffix, dest in rules: + prefix = os.path.join(resources_root, base) + for abs_base in iglob(prefix): + abs_glob = os.path.join(abs_base, suffix) + for abs_path in iglob(abs_glob): + resource_file = get_rel_path(resources_root, abs_path) + if dest is None: # remove the entry if it was here + destinations.pop(resource_file, None) + else: + rel_path = get_rel_path(abs_base, abs_path) + rel_dest = dest.replace(os.path.sep, '/').rstrip('/') + destinations[resource_file] = rel_dest + '/' + rel_path + return destinations + + +def in_venv(): + if hasattr(sys, 'real_prefix'): + # virtualenv venvs + result = True + else: + # PEP 405 venvs + result = sys.prefix != getattr(sys, 'base_prefix', sys.prefix) + return result + + +def get_executable(): + # The __PYVENV_LAUNCHER__ dance is apparently no longer needed, as + # changes to the stub launcher mean that sys.executable always points + # to the stub on OS X + # if sys.platform == 'darwin' and ('__PYVENV_LAUNCHER__' + # in os.environ): + # result = os.environ['__PYVENV_LAUNCHER__'] + # else: + # result = sys.executable + # return result + # Avoid normcasing: see issue #143 + # result = os.path.normcase(sys.executable) + result = sys.executable + if not isinstance(result, text_type): + result = fsdecode(result) + return result + + +def proceed(prompt, allowed_chars, error_prompt=None, default=None): + p = prompt + while True: + s = raw_input(p) + p = prompt + if not s and default: + s = default + if s: + c = s[0].lower() + if c in allowed_chars: + break + if error_prompt: + p = '%c: %s\n%s' % (c, error_prompt, prompt) + return c + + +def extract_by_key(d, keys): + if isinstance(keys, string_types): + keys = keys.split() + result = {} + for key in keys: + if key in d: + result[key] = d[key] + return result + + +def read_exports(stream): + if sys.version_info[0] >= 3: + # needs to be a text stream + stream = codecs.getreader('utf-8')(stream) + # Try to load as JSON, falling back on legacy format + data = stream.read() + stream = StringIO(data) + try: + jdata = json.load(stream) + result = jdata['extensions']['python.exports']['exports'] + for group, entries in result.items(): + for k, v in entries.items(): + s = '%s = %s' % (k, v) + entry = get_export_entry(s) + assert entry is not None + entries[k] = entry + return result + except Exception: + stream.seek(0, 0) + + def read_stream(cp, stream): + if hasattr(cp, 'read_file'): + cp.read_file(stream) + else: + cp.readfp(stream) + + cp = configparser.ConfigParser() + try: + read_stream(cp, stream) + except configparser.MissingSectionHeaderError: + stream.close() + data = textwrap.dedent(data) + stream = StringIO(data) + read_stream(cp, stream) + + result = {} + for key in cp.sections(): + result[key] = entries = {} + for name, value in cp.items(key): + s = '%s = %s' % (name, value) + entry = get_export_entry(s) + assert entry is not None + # entry.dist = self + entries[name] = entry + return result + + +def write_exports(exports, stream): + if sys.version_info[0] >= 3: + # needs to be a text stream + stream = codecs.getwriter('utf-8')(stream) + cp = configparser.ConfigParser() + for k, v in exports.items(): + # TODO check k, v for valid values + cp.add_section(k) + for entry in v.values(): + if entry.suffix is None: + s = entry.prefix + else: + s = '%s:%s' % (entry.prefix, entry.suffix) + if entry.flags: + s = '%s [%s]' % (s, ', '.join(entry.flags)) + cp.set(k, entry.name, s) + cp.write(stream) + + +@contextlib.contextmanager +def tempdir(): + td = tempfile.mkdtemp() + try: + yield td + finally: + shutil.rmtree(td) + + +@contextlib.contextmanager +def chdir(d): + cwd = os.getcwd() + try: + os.chdir(d) + yield + finally: + os.chdir(cwd) + + +@contextlib.contextmanager +def socket_timeout(seconds=15): + cto = socket.getdefaulttimeout() + try: + socket.setdefaulttimeout(seconds) + yield + finally: + socket.setdefaulttimeout(cto) + + +class cached_property(object): + + def __init__(self, func): + self.func = func + # for attr in ('__name__', '__module__', '__doc__'): + # setattr(self, attr, getattr(func, attr, None)) + + def __get__(self, obj, cls=None): + if obj is None: + return self + value = self.func(obj) + object.__setattr__(obj, self.func.__name__, value) + # obj.__dict__[self.func.__name__] = value = self.func(obj) + return value + + +def convert_path(pathname): + """Return 'pathname' as a name that will work on the native filesystem. + + The path is split on '/' and put back together again using the current + directory separator. Needed because filenames in the setup script are + always supplied in Unix style, and have to be converted to the local + convention before we can actually use them in the filesystem. Raises + ValueError on non-Unix-ish systems if 'pathname' either starts or + ends with a slash. + """ + if os.sep == '/': + return pathname + if not pathname: + return pathname + if pathname[0] == '/': + raise ValueError("path '%s' cannot be absolute" % pathname) + if pathname[-1] == '/': + raise ValueError("path '%s' cannot end with '/'" % pathname) + + paths = pathname.split('/') + while os.curdir in paths: + paths.remove(os.curdir) + if not paths: + return os.curdir + return os.path.join(*paths) + + +class FileOperator(object): + + def __init__(self, dry_run=False): + self.dry_run = dry_run + self.ensured = set() + self._init_record() + + def _init_record(self): + self.record = False + self.files_written = set() + self.dirs_created = set() + + def record_as_written(self, path): + if self.record: + self.files_written.add(path) + + def newer(self, source, target): + """Tell if the target is newer than the source. + + Returns true if 'source' exists and is more recently modified than + 'target', or if 'source' exists and 'target' doesn't. + + Returns false if both exist and 'target' is the same age or younger + than 'source'. Raise PackagingFileError if 'source' does not exist. + + Note that this test is not very accurate: files created in the same + second will have the same "age". + """ + if not os.path.exists(source): + raise DistlibException("file '%r' does not exist" % os.path.abspath(source)) + if not os.path.exists(target): + return True + + return os.stat(source).st_mtime > os.stat(target).st_mtime + + def copy_file(self, infile, outfile, check=True): + """Copy a file respecting dry-run and force flags. + """ + self.ensure_dir(os.path.dirname(outfile)) + logger.info('Copying %s to %s', infile, outfile) + if not self.dry_run: + msg = None + if check: + if os.path.islink(outfile): + msg = '%s is a symlink' % outfile + elif os.path.exists(outfile) and not os.path.isfile(outfile): + msg = '%s is a non-regular file' % outfile + if msg: + raise ValueError(msg + ' which would be overwritten') + shutil.copyfile(infile, outfile) + self.record_as_written(outfile) + + def copy_stream(self, instream, outfile, encoding=None): + assert not os.path.isdir(outfile) + self.ensure_dir(os.path.dirname(outfile)) + logger.info('Copying stream %s to %s', instream, outfile) + if not self.dry_run: + if encoding is None: + outstream = open(outfile, 'wb') + else: + outstream = codecs.open(outfile, 'w', encoding=encoding) + try: + shutil.copyfileobj(instream, outstream) + finally: + outstream.close() + self.record_as_written(outfile) + + def write_binary_file(self, path, data): + self.ensure_dir(os.path.dirname(path)) + if not self.dry_run: + if os.path.exists(path): + os.remove(path) + with open(path, 'wb') as f: + f.write(data) + self.record_as_written(path) + + def write_text_file(self, path, data, encoding): + self.write_binary_file(path, data.encode(encoding)) + + def set_mode(self, bits, mask, files): + if os.name == 'posix' or (os.name == 'java' and os._name == 'posix'): + # Set the executable bits (owner, group, and world) on + # all the files specified. + for f in files: + if self.dry_run: + logger.info("changing mode of %s", f) + else: + mode = (os.stat(f).st_mode | bits) & mask + logger.info("changing mode of %s to %o", f, mode) + os.chmod(f, mode) + + set_executable_mode = lambda s, f: s.set_mode(0o555, 0o7777, f) + + def ensure_dir(self, path): + path = os.path.abspath(path) + if path not in self.ensured and not os.path.exists(path): + self.ensured.add(path) + d, f = os.path.split(path) + self.ensure_dir(d) + logger.info('Creating %s' % path) + if not self.dry_run: + os.mkdir(path) + if self.record: + self.dirs_created.add(path) + + def byte_compile(self, path, optimize=False, force=False, prefix=None, hashed_invalidation=False): + dpath = cache_from_source(path, not optimize) + logger.info('Byte-compiling %s to %s', path, dpath) + if not self.dry_run: + if force or self.newer(path, dpath): + if not prefix: + diagpath = None + else: + assert path.startswith(prefix) + diagpath = path[len(prefix):] + compile_kwargs = {} + if hashed_invalidation and hasattr(py_compile, 'PycInvalidationMode'): + if not isinstance(hashed_invalidation, py_compile.PycInvalidationMode): + hashed_invalidation = py_compile.PycInvalidationMode.CHECKED_HASH + compile_kwargs['invalidation_mode'] = hashed_invalidation + py_compile.compile(path, dpath, diagpath, True, **compile_kwargs) # raise error + self.record_as_written(dpath) + return dpath + + def ensure_removed(self, path): + if os.path.exists(path): + if os.path.isdir(path) and not os.path.islink(path): + logger.debug('Removing directory tree at %s', path) + if not self.dry_run: + shutil.rmtree(path) + if self.record: + if path in self.dirs_created: + self.dirs_created.remove(path) + else: + if os.path.islink(path): + s = 'link' + else: + s = 'file' + logger.debug('Removing %s %s', s, path) + if not self.dry_run: + os.remove(path) + if self.record: + if path in self.files_written: + self.files_written.remove(path) + + def is_writable(self, path): + result = False + while not result: + if os.path.exists(path): + result = os.access(path, os.W_OK) + break + parent = os.path.dirname(path) + if parent == path: + break + path = parent + return result + + def commit(self): + """ + Commit recorded changes, turn off recording, return + changes. + """ + assert self.record + result = self.files_written, self.dirs_created + self._init_record() + return result + + def rollback(self): + if not self.dry_run: + for f in list(self.files_written): + if os.path.exists(f): + os.remove(f) + # dirs should all be empty now, except perhaps for + # __pycache__ subdirs + # reverse so that subdirs appear before their parents + dirs = sorted(self.dirs_created, reverse=True) + for d in dirs: + flist = os.listdir(d) + if flist: + assert flist == ['__pycache__'] + sd = os.path.join(d, flist[0]) + os.rmdir(sd) + os.rmdir(d) # should fail if non-empty + self._init_record() + + +def resolve(module_name, dotted_path): + if module_name in sys.modules: + mod = sys.modules[module_name] + else: + mod = __import__(module_name) + if dotted_path is None: + result = mod + else: + parts = dotted_path.split('.') + result = getattr(mod, parts.pop(0)) + for p in parts: + result = getattr(result, p) + return result + + +class ExportEntry(object): + + def __init__(self, name, prefix, suffix, flags): + self.name = name + self.prefix = prefix + self.suffix = suffix + self.flags = flags + + @cached_property + def value(self): + return resolve(self.prefix, self.suffix) + + def __repr__(self): # pragma: no cover + return '' % (self.name, self.prefix, self.suffix, self.flags) + + def __eq__(self, other): + if not isinstance(other, ExportEntry): + result = False + else: + result = (self.name == other.name and self.prefix == other.prefix and self.suffix == other.suffix and + self.flags == other.flags) + return result + + __hash__ = object.__hash__ + + +ENTRY_RE = re.compile( + r'''(?P([^\[]\S*)) + \s*=\s*(?P(\w+)([:\.]\w+)*) + \s*(\[\s*(?P[\w-]+(=\w+)?(,\s*\w+(=\w+)?)*)\s*\])? + ''', re.VERBOSE) + + +def get_export_entry(specification): + m = ENTRY_RE.search(specification) + if not m: + result = None + if '[' in specification or ']' in specification: + raise DistlibException("Invalid specification " + "'%s'" % specification) + else: + d = m.groupdict() + name = d['name'] + path = d['callable'] + colons = path.count(':') + if colons == 0: + prefix, suffix = path, None + else: + if colons != 1: + raise DistlibException("Invalid specification " + "'%s'" % specification) + prefix, suffix = path.split(':') + flags = d['flags'] + if flags is None: + if '[' in specification or ']' in specification: + raise DistlibException("Invalid specification " + "'%s'" % specification) + flags = [] + else: + flags = [f.strip() for f in flags.split(',')] + result = ExportEntry(name, prefix, suffix, flags) + return result + + +def get_cache_base(suffix=None): + """ + Return the default base location for distlib caches. If the directory does + not exist, it is created. Use the suffix provided for the base directory, + and default to '.distlib' if it isn't provided. + + On Windows, if LOCALAPPDATA is defined in the environment, then it is + assumed to be a directory, and will be the parent directory of the result. + On POSIX, and on Windows if LOCALAPPDATA is not defined, the user's home + directory - using os.expanduser('~') - will be the parent directory of + the result. + + The result is just the directory '.distlib' in the parent directory as + determined above, or with the name specified with ``suffix``. + """ + if suffix is None: + suffix = '.distlib' + if os.name == 'nt' and 'LOCALAPPDATA' in os.environ: + result = os.path.expandvars('$localappdata') + else: + # Assume posix, or old Windows + result = os.path.expanduser('~') + # we use 'isdir' instead of 'exists', because we want to + # fail if there's a file with that name + if os.path.isdir(result): + usable = os.access(result, os.W_OK) + if not usable: + logger.warning('Directory exists but is not writable: %s', result) + else: + try: + os.makedirs(result) + usable = True + except OSError: + logger.warning('Unable to create %s', result, exc_info=True) + usable = False + if not usable: + result = tempfile.mkdtemp() + logger.warning('Default location unusable, using %s', result) + return os.path.join(result, suffix) + + +def path_to_cache_dir(path, use_abspath=True): + """ + Convert an absolute path to a directory name for use in a cache. + + The algorithm used is: + + #. On Windows, any ``':'`` in the drive is replaced with ``'---'``. + #. Any occurrence of ``os.sep`` is replaced with ``'--'``. + #. ``'.cache'`` is appended. + """ + d, p = os.path.splitdrive(os.path.abspath(path) if use_abspath else path) + if d: + d = d.replace(':', '---') + p = p.replace(os.sep, '--') + return d + p + '.cache' + + +def ensure_slash(s): + if not s.endswith('/'): + return s + '/' + return s + + +def parse_credentials(netloc): + username = password = None + if '@' in netloc: + prefix, netloc = netloc.rsplit('@', 1) + if ':' not in prefix: + username = prefix + else: + username, password = prefix.split(':', 1) + if username: + username = unquote(username) + if password: + password = unquote(password) + return username, password, netloc + + +def get_process_umask(): + result = os.umask(0o22) + os.umask(result) + return result + + +def is_string_sequence(seq): + result = True + i = None + for i, s in enumerate(seq): + if not isinstance(s, string_types): + result = False + break + assert i is not None + return result + + +PROJECT_NAME_AND_VERSION = re.compile('([a-z0-9_]+([.-][a-z_][a-z0-9_]*)*)-' + '([a-z0-9_.+-]+)', re.I) +PYTHON_VERSION = re.compile(r'-py(\d\.?\d?)') + + +def split_filename(filename, project_name=None): + """ + Extract name, version, python version from a filename (no extension) + + Return name, version, pyver or None + """ + result = None + pyver = None + filename = unquote(filename).replace(' ', '-') + m = PYTHON_VERSION.search(filename) + if m: + pyver = m.group(1) + filename = filename[:m.start()] + if project_name and len(filename) > len(project_name) + 1: + m = re.match(re.escape(project_name) + r'\b', filename) + if m: + n = m.end() + result = filename[:n], filename[n + 1:], pyver + if result is None: + m = PROJECT_NAME_AND_VERSION.match(filename) + if m: + result = m.group(1), m.group(3), pyver + return result + + +# Allow spaces in name because of legacy dists like "Twisted Core" +NAME_VERSION_RE = re.compile(r'(?P[\w .-]+)\s*' + r'\(\s*(?P[^\s)]+)\)$') + + +def parse_name_and_version(p): + """ + A utility method used to get name and version from a string. + + From e.g. a Provides-Dist value. + + :param p: A value in a form 'foo (1.0)' + :return: The name and version as a tuple. + """ + m = NAME_VERSION_RE.match(p) + if not m: + raise DistlibException('Ill-formed name/version string: \'%s\'' % p) + d = m.groupdict() + return d['name'].strip().lower(), d['ver'] + + +def get_extras(requested, available): + result = set() + requested = set(requested or []) + available = set(available or []) + if '*' in requested: + requested.remove('*') + result |= available + for r in requested: + if r == '-': + result.add(r) + elif r.startswith('-'): + unwanted = r[1:] + if unwanted not in available: + logger.warning('undeclared extra: %s' % unwanted) + if unwanted in result: + result.remove(unwanted) + else: + if r not in available: + logger.warning('undeclared extra: %s' % r) + result.add(r) + return result + + +# +# Extended metadata functionality +# + + +def _get_external_data(url): + result = {} + try: + # urlopen might fail if it runs into redirections, + # because of Python issue #13696. Fixed in locators + # using a custom redirect handler. + resp = urlopen(url) + headers = resp.info() + ct = headers.get('Content-Type') + if not ct.startswith('application/json'): + logger.debug('Unexpected response for JSON request: %s', ct) + else: + reader = codecs.getreader('utf-8')(resp) + # data = reader.read().decode('utf-8') + # result = json.loads(data) + result = json.load(reader) + except Exception as e: + logger.exception('Failed to get external data for %s: %s', url, e) + return result + + +_external_data_base_url = 'https://www.red-dove.com/pypi/projects/' + + +def get_project_data(name): + url = '%s/%s/project.json' % (name[0].upper(), name) + url = urljoin(_external_data_base_url, url) + result = _get_external_data(url) + return result + + +def get_package_data(name, version): + url = '%s/%s/package-%s.json' % (name[0].upper(), name, version) + url = urljoin(_external_data_base_url, url) + return _get_external_data(url) + + +class Cache(object): + """ + A class implementing a cache for resources that need to live in the file system + e.g. shared libraries. This class was moved from resources to here because it + could be used by other modules, e.g. the wheel module. + """ + + def __init__(self, base): + """ + Initialise an instance. + + :param base: The base directory where the cache should be located. + """ + # we use 'isdir' instead of 'exists', because we want to + # fail if there's a file with that name + if not os.path.isdir(base): # pragma: no cover + os.makedirs(base) + if (os.stat(base).st_mode & 0o77) != 0: + logger.warning('Directory \'%s\' is not private', base) + self.base = os.path.abspath(os.path.normpath(base)) + + def prefix_to_dir(self, prefix, use_abspath=True): + """ + Converts a resource prefix to a directory name in the cache. + """ + return path_to_cache_dir(prefix, use_abspath=use_abspath) + + def clear(self): + """ + Clear the cache. + """ + not_removed = [] + for fn in os.listdir(self.base): + fn = os.path.join(self.base, fn) + try: + if os.path.islink(fn) or os.path.isfile(fn): + os.remove(fn) + elif os.path.isdir(fn): + shutil.rmtree(fn) + except Exception: + not_removed.append(fn) + return not_removed + + +class EventMixin(object): + """ + A very simple publish/subscribe system. + """ + + def __init__(self): + self._subscribers = {} + + def add(self, event, subscriber, append=True): + """ + Add a subscriber for an event. + + :param event: The name of an event. + :param subscriber: The subscriber to be added (and called when the + event is published). + :param append: Whether to append or prepend the subscriber to an + existing subscriber list for the event. + """ + subs = self._subscribers + if event not in subs: + subs[event] = deque([subscriber]) + else: + sq = subs[event] + if append: + sq.append(subscriber) + else: + sq.appendleft(subscriber) + + def remove(self, event, subscriber): + """ + Remove a subscriber for an event. + + :param event: The name of an event. + :param subscriber: The subscriber to be removed. + """ + subs = self._subscribers + if event not in subs: + raise ValueError('No subscribers: %r' % event) + subs[event].remove(subscriber) + + def get_subscribers(self, event): + """ + Return an iterator for the subscribers for an event. + :param event: The event to return subscribers for. + """ + return iter(self._subscribers.get(event, ())) + + def publish(self, event, *args, **kwargs): + """ + Publish a event and return a list of values returned by its + subscribers. + + :param event: The event to publish. + :param args: The positional arguments to pass to the event's + subscribers. + :param kwargs: The keyword arguments to pass to the event's + subscribers. + """ + result = [] + for subscriber in self.get_subscribers(event): + try: + value = subscriber(event, *args, **kwargs) + except Exception: + logger.exception('Exception during event publication') + value = None + result.append(value) + logger.debug('publish %s: args = %s, kwargs = %s, result = %s', event, args, kwargs, result) + return result + + +# +# Simple sequencing +# +class Sequencer(object): + + def __init__(self): + self._preds = {} + self._succs = {} + self._nodes = set() # nodes with no preds/succs + + def add_node(self, node): + self._nodes.add(node) + + def remove_node(self, node, edges=False): + if node in self._nodes: + self._nodes.remove(node) + if edges: + for p in set(self._preds.get(node, ())): + self.remove(p, node) + for s in set(self._succs.get(node, ())): + self.remove(node, s) + # Remove empties + for k, v in list(self._preds.items()): + if not v: + del self._preds[k] + for k, v in list(self._succs.items()): + if not v: + del self._succs[k] + + def add(self, pred, succ): + assert pred != succ + self._preds.setdefault(succ, set()).add(pred) + self._succs.setdefault(pred, set()).add(succ) + + def remove(self, pred, succ): + assert pred != succ + try: + preds = self._preds[succ] + succs = self._succs[pred] + except KeyError: # pragma: no cover + raise ValueError('%r not a successor of anything' % succ) + try: + preds.remove(pred) + succs.remove(succ) + except KeyError: # pragma: no cover + raise ValueError('%r not a successor of %r' % (succ, pred)) + + def is_step(self, step): + return (step in self._preds or step in self._succs or step in self._nodes) + + def get_steps(self, final): + if not self.is_step(final): + raise ValueError('Unknown: %r' % final) + result = [] + todo = [] + seen = set() + todo.append(final) + while todo: + step = todo.pop(0) + if step in seen: + # if a step was already seen, + # move it to the end (so it will appear earlier + # when reversed on return) ... but not for the + # final step, as that would be confusing for + # users + if step != final: + result.remove(step) + result.append(step) + else: + seen.add(step) + result.append(step) + preds = self._preds.get(step, ()) + todo.extend(preds) + return reversed(result) + + @property + def strong_connections(self): + # http://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm + index_counter = [0] + stack = [] + lowlinks = {} + index = {} + result = [] + + graph = self._succs + + def strongconnect(node): + # set the depth index for this node to the smallest unused index + index[node] = index_counter[0] + lowlinks[node] = index_counter[0] + index_counter[0] += 1 + stack.append(node) + + # Consider successors + try: + successors = graph[node] + except Exception: + successors = [] + for successor in successors: + if successor not in lowlinks: + # Successor has not yet been visited + strongconnect(successor) + lowlinks[node] = min(lowlinks[node], lowlinks[successor]) + elif successor in stack: + # the successor is in the stack and hence in the current + # strongly connected component (SCC) + lowlinks[node] = min(lowlinks[node], index[successor]) + + # If `node` is a root node, pop the stack and generate an SCC + if lowlinks[node] == index[node]: + connected_component = [] + + while True: + successor = stack.pop() + connected_component.append(successor) + if successor == node: + break + component = tuple(connected_component) + # storing the result + result.append(component) + + for node in graph: + if node not in lowlinks: + strongconnect(node) + + return result + + @property + def dot(self): + result = ['digraph G {'] + for succ in self._preds: + preds = self._preds[succ] + for pred in preds: + result.append(' %s -> %s;' % (pred, succ)) + for node in self._nodes: + result.append(' %s;' % node) + result.append('}') + return '\n'.join(result) + + +# +# Unarchiving functionality for zip, tar, tgz, tbz, whl +# + +ARCHIVE_EXTENSIONS = ('.tar.gz', '.tar.bz2', '.tar', '.zip', '.tgz', '.tbz', '.whl') + + +def unarchive(archive_filename, dest_dir, format=None, check=True): + + def check_path(path): + if not isinstance(path, text_type): + path = path.decode('utf-8') + p = os.path.abspath(os.path.join(dest_dir, path)) + if not p.startswith(dest_dir) or p[plen] != os.sep: + raise ValueError('path outside destination: %r' % p) + + dest_dir = os.path.abspath(dest_dir) + plen = len(dest_dir) + archive = None + if format is None: + if archive_filename.endswith(('.zip', '.whl')): + format = 'zip' + elif archive_filename.endswith(('.tar.gz', '.tgz')): + format = 'tgz' + mode = 'r:gz' + elif archive_filename.endswith(('.tar.bz2', '.tbz')): + format = 'tbz' + mode = 'r:bz2' + elif archive_filename.endswith('.tar'): + format = 'tar' + mode = 'r' + else: # pragma: no cover + raise ValueError('Unknown format for %r' % archive_filename) + try: + if format == 'zip': + archive = ZipFile(archive_filename, 'r') + if check: + names = archive.namelist() + for name in names: + check_path(name) + else: + archive = tarfile.open(archive_filename, mode) + if check: + names = archive.getnames() + for name in names: + check_path(name) + if format != 'zip' and sys.version_info[0] < 3: + # See Python issue 17153. If the dest path contains Unicode, + # tarfile extraction fails on Python 2.x if a member path name + # contains non-ASCII characters - it leads to an implicit + # bytes -> unicode conversion using ASCII to decode. + for tarinfo in archive.getmembers(): + if not isinstance(tarinfo.name, text_type): + tarinfo.name = tarinfo.name.decode('utf-8') + + # Limit extraction of dangerous items, if this Python + # allows it easily. If not, just trust the input. + # See: https://docs.python.org/3/library/tarfile.html#extraction-filters + def extraction_filter(member, path): + """Run tarfile.tar_filter, but raise the expected ValueError""" + # This is only called if the current Python has tarfile filters + try: + return tarfile.tar_filter(member, path) + except tarfile.FilterError as exc: + raise ValueError(str(exc)) + + archive.extraction_filter = extraction_filter + + archive.extractall(dest_dir) + + finally: + if archive: + archive.close() + + +def zip_dir(directory): + """zip a directory tree into a BytesIO object""" + result = io.BytesIO() + dlen = len(directory) + with ZipFile(result, "w") as zf: + for root, dirs, files in os.walk(directory): + for name in files: + full = os.path.join(root, name) + rel = root[dlen:] + dest = os.path.join(rel, name) + zf.write(full, dest) + return result + + +# +# Simple progress bar +# + +UNITS = ('', 'K', 'M', 'G', 'T', 'P') + + +class Progress(object): + unknown = 'UNKNOWN' + + def __init__(self, minval=0, maxval=100): + assert maxval is None or maxval >= minval + self.min = self.cur = minval + self.max = maxval + self.started = None + self.elapsed = 0 + self.done = False + + def update(self, curval): + assert self.min <= curval + assert self.max is None or curval <= self.max + self.cur = curval + now = time.time() + if self.started is None: + self.started = now + else: + self.elapsed = now - self.started + + def increment(self, incr): + assert incr >= 0 + self.update(self.cur + incr) + + def start(self): + self.update(self.min) + return self + + def stop(self): + if self.max is not None: + self.update(self.max) + self.done = True + + @property + def maximum(self): + return self.unknown if self.max is None else self.max + + @property + def percentage(self): + if self.done: + result = '100 %' + elif self.max is None: + result = ' ?? %' + else: + v = 100.0 * (self.cur - self.min) / (self.max - self.min) + result = '%3d %%' % v + return result + + def format_duration(self, duration): + if (duration <= 0) and self.max is None or self.cur == self.min: + result = '??:??:??' + # elif duration < 1: + # result = '--:--:--' + else: + result = time.strftime('%H:%M:%S', time.gmtime(duration)) + return result + + @property + def ETA(self): + if self.done: + prefix = 'Done' + t = self.elapsed + # import pdb; pdb.set_trace() + else: + prefix = 'ETA ' + if self.max is None: + t = -1 + elif self.elapsed == 0 or (self.cur == self.min): + t = 0 + else: + # import pdb; pdb.set_trace() + t = float(self.max - self.min) + t /= self.cur - self.min + t = (t - 1) * self.elapsed + return '%s: %s' % (prefix, self.format_duration(t)) + + @property + def speed(self): + if self.elapsed == 0: + result = 0.0 + else: + result = (self.cur - self.min) / self.elapsed + for unit in UNITS: + if result < 1000: + break + result /= 1000.0 + return '%d %sB/s' % (result, unit) + + +# +# Glob functionality +# + +RICH_GLOB = re.compile(r'\{([^}]*)\}') +_CHECK_RECURSIVE_GLOB = re.compile(r'[^/\\,{]\*\*|\*\*[^/\\,}]') +_CHECK_MISMATCH_SET = re.compile(r'^[^{]*\}|\{[^}]*$') + + +def iglob(path_glob): + """Extended globbing function that supports ** and {opt1,opt2,opt3}.""" + if _CHECK_RECURSIVE_GLOB.search(path_glob): + msg = """invalid glob %r: recursive glob "**" must be used alone""" + raise ValueError(msg % path_glob) + if _CHECK_MISMATCH_SET.search(path_glob): + msg = """invalid glob %r: mismatching set marker '{' or '}'""" + raise ValueError(msg % path_glob) + return _iglob(path_glob) + + +def _iglob(path_glob): + rich_path_glob = RICH_GLOB.split(path_glob, 1) + if len(rich_path_glob) > 1: + assert len(rich_path_glob) == 3, rich_path_glob + prefix, set, suffix = rich_path_glob + for item in set.split(','): + for path in _iglob(''.join((prefix, item, suffix))): + yield path + else: + if '**' not in path_glob: + for item in std_iglob(path_glob): + yield item + else: + prefix, radical = path_glob.split('**', 1) + if prefix == '': + prefix = '.' + if radical == '': + radical = '*' + else: + # we support both + radical = radical.lstrip('/') + radical = radical.lstrip('\\') + for path, dir, files in os.walk(prefix): + path = os.path.normpath(path) + for fn in _iglob(os.path.join(path, radical)): + yield fn + + +if ssl: + from .compat import (HTTPSHandler as BaseHTTPSHandler, match_hostname, CertificateError) + + # + # HTTPSConnection which verifies certificates/matches domains + # + + class HTTPSConnection(httplib.HTTPSConnection): + ca_certs = None # set this to the path to the certs file (.pem) + check_domain = True # only used if ca_certs is not None + + # noinspection PyPropertyAccess + def connect(self): + sock = socket.create_connection((self.host, self.port), self.timeout) + if getattr(self, '_tunnel_host', False): + self.sock = sock + self._tunnel() + + context = ssl.SSLContext(ssl.PROTOCOL_SSLv23) + if hasattr(ssl, 'OP_NO_SSLv2'): + context.options |= ssl.OP_NO_SSLv2 + if getattr(self, 'cert_file', None): + context.load_cert_chain(self.cert_file, self.key_file) + kwargs = {} + if self.ca_certs: + context.verify_mode = ssl.CERT_REQUIRED + context.load_verify_locations(cafile=self.ca_certs) + if getattr(ssl, 'HAS_SNI', False): + kwargs['server_hostname'] = self.host + + self.sock = context.wrap_socket(sock, **kwargs) + if self.ca_certs and self.check_domain: + try: + match_hostname(self.sock.getpeercert(), self.host) + logger.debug('Host verified: %s', self.host) + except CertificateError: # pragma: no cover + self.sock.shutdown(socket.SHUT_RDWR) + self.sock.close() + raise + + class HTTPSHandler(BaseHTTPSHandler): + + def __init__(self, ca_certs, check_domain=True): + BaseHTTPSHandler.__init__(self) + self.ca_certs = ca_certs + self.check_domain = check_domain + + def _conn_maker(self, *args, **kwargs): + """ + This is called to create a connection instance. Normally you'd + pass a connection class to do_open, but it doesn't actually check for + a class, and just expects a callable. As long as we behave just as a + constructor would have, we should be OK. If it ever changes so that + we *must* pass a class, we'll create an UnsafeHTTPSConnection class + which just sets check_domain to False in the class definition, and + choose which one to pass to do_open. + """ + result = HTTPSConnection(*args, **kwargs) + if self.ca_certs: + result.ca_certs = self.ca_certs + result.check_domain = self.check_domain + return result + + def https_open(self, req): + try: + return self.do_open(self._conn_maker, req) + except URLError as e: + if 'certificate verify failed' in str(e.reason): + raise CertificateError('Unable to verify server certificate ' + 'for %s' % req.host) + else: + raise + + # + # To prevent against mixing HTTP traffic with HTTPS (examples: A Man-In-The- + # Middle proxy using HTTP listens on port 443, or an index mistakenly serves + # HTML containing a http://xyz link when it should be https://xyz), + # you can use the following handler class, which does not allow HTTP traffic. + # + # It works by inheriting from HTTPHandler - so build_opener won't add a + # handler for HTTP itself. + # + class HTTPSOnlyHandler(HTTPSHandler, HTTPHandler): + + def http_open(self, req): + raise URLError('Unexpected HTTP request on what should be a secure ' + 'connection: %s' % req) + + +# +# XML-RPC with timeouts +# +class Transport(xmlrpclib.Transport): + + def __init__(self, timeout, use_datetime=0): + self.timeout = timeout + xmlrpclib.Transport.__init__(self, use_datetime) + + def make_connection(self, host): + h, eh, x509 = self.get_host_info(host) + if not self._connection or host != self._connection[0]: + self._extra_headers = eh + self._connection = host, httplib.HTTPConnection(h) + return self._connection[1] + + +if ssl: + + class SafeTransport(xmlrpclib.SafeTransport): + + def __init__(self, timeout, use_datetime=0): + self.timeout = timeout + xmlrpclib.SafeTransport.__init__(self, use_datetime) + + def make_connection(self, host): + h, eh, kwargs = self.get_host_info(host) + if not kwargs: + kwargs = {} + kwargs['timeout'] = self.timeout + if not self._connection or host != self._connection[0]: + self._extra_headers = eh + self._connection = host, httplib.HTTPSConnection(h, None, **kwargs) + return self._connection[1] + + +class ServerProxy(xmlrpclib.ServerProxy): + + def __init__(self, uri, **kwargs): + self.timeout = timeout = kwargs.pop('timeout', None) + # The above classes only come into play if a timeout + # is specified + if timeout is not None: + # scheme = splittype(uri) # deprecated as of Python 3.8 + scheme = urlparse(uri)[0] + use_datetime = kwargs.get('use_datetime', 0) + if scheme == 'https': + tcls = SafeTransport + else: + tcls = Transport + kwargs['transport'] = t = tcls(timeout, use_datetime=use_datetime) + self.transport = t + xmlrpclib.ServerProxy.__init__(self, uri, **kwargs) + + +# +# CSV functionality. This is provided because on 2.x, the csv module can't +# handle Unicode. However, we need to deal with Unicode in e.g. RECORD files. +# + + +def _csv_open(fn, mode, **kwargs): + if sys.version_info[0] < 3: + mode += 'b' + else: + kwargs['newline'] = '' + # Python 3 determines encoding from locale. Force 'utf-8' + # file encoding to match other forced utf-8 encoding + kwargs['encoding'] = 'utf-8' + return open(fn, mode, **kwargs) + + +class CSVBase(object): + defaults = { + 'delimiter': str(','), # The strs are used because we need native + 'quotechar': str('"'), # str in the csv API (2.x won't take + 'lineterminator': str('\n') # Unicode) + } + + def __enter__(self): + return self + + def __exit__(self, *exc_info): + self.stream.close() + + +class CSVReader(CSVBase): + + def __init__(self, **kwargs): + if 'stream' in kwargs: + stream = kwargs['stream'] + if sys.version_info[0] >= 3: + # needs to be a text stream + stream = codecs.getreader('utf-8')(stream) + self.stream = stream + else: + self.stream = _csv_open(kwargs['path'], 'r') + self.reader = csv.reader(self.stream, **self.defaults) + + def __iter__(self): + return self + + def next(self): + result = next(self.reader) + if sys.version_info[0] < 3: + for i, item in enumerate(result): + if not isinstance(item, text_type): + result[i] = item.decode('utf-8') + return result + + __next__ = next + + +class CSVWriter(CSVBase): + + def __init__(self, fn, **kwargs): + self.stream = _csv_open(fn, 'w') + self.writer = csv.writer(self.stream, **self.defaults) + + def writerow(self, row): + if sys.version_info[0] < 3: + r = [] + for item in row: + if isinstance(item, text_type): + item = item.encode('utf-8') + r.append(item) + row = r + self.writer.writerow(row) + + +# +# Configurator functionality +# + + +class Configurator(BaseConfigurator): + + value_converters = dict(BaseConfigurator.value_converters) + value_converters['inc'] = 'inc_convert' + + def __init__(self, config, base=None): + super(Configurator, self).__init__(config) + self.base = base or os.getcwd() + + def configure_custom(self, config): + + def convert(o): + if isinstance(o, (list, tuple)): + result = type(o)([convert(i) for i in o]) + elif isinstance(o, dict): + if '()' in o: + result = self.configure_custom(o) + else: + result = {} + for k in o: + result[k] = convert(o[k]) + else: + result = self.convert(o) + return result + + c = config.pop('()') + if not callable(c): + c = self.resolve(c) + props = config.pop('.', None) + # Check for valid identifiers + args = config.pop('[]', ()) + if args: + args = tuple([convert(o) for o in args]) + items = [(k, convert(config[k])) for k in config if valid_ident(k)] + kwargs = dict(items) + result = c(*args, **kwargs) + if props: + for n, v in props.items(): + setattr(result, n, convert(v)) + return result + + def __getitem__(self, key): + result = self.config[key] + if isinstance(result, dict) and '()' in result: + self.config[key] = result = self.configure_custom(result) + return result + + def inc_convert(self, value): + """Default converter for the inc:// protocol.""" + if not os.path.isabs(value): + value = os.path.join(self.base, value) + with codecs.open(value, 'r', encoding='utf-8') as f: + result = json.load(f) + return result + + +class SubprocessMixin(object): + """ + Mixin for running subprocesses and capturing their output + """ + + def __init__(self, verbose=False, progress=None): + self.verbose = verbose + self.progress = progress + + def reader(self, stream, context): + """ + Read lines from a subprocess' output stream and either pass to a progress + callable (if specified) or write progress information to sys.stderr. + """ + progress = self.progress + verbose = self.verbose + while True: + s = stream.readline() + if not s: + break + if progress is not None: + progress(s, context) + else: + if not verbose: + sys.stderr.write('.') + else: + sys.stderr.write(s.decode('utf-8')) + sys.stderr.flush() + stream.close() + + def run_command(self, cmd, **kwargs): + p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs) + t1 = threading.Thread(target=self.reader, args=(p.stdout, 'stdout')) + t1.start() + t2 = threading.Thread(target=self.reader, args=(p.stderr, 'stderr')) + t2.start() + p.wait() + t1.join() + t2.join() + if self.progress is not None: + self.progress('done.', 'main') + elif self.verbose: + sys.stderr.write('done.\n') + return p + + +def normalize_name(name): + """Normalize a python package name a la PEP 503""" + # https://www.python.org/dev/peps/pep-0503/#normalized-names + return re.sub('[-_.]+', '-', name).lower() + + +# def _get_pypirc_command(): +# """ +# Get the distutils command for interacting with PyPI configurations. +# :return: the command. +# """ +# from distutils.core import Distribution +# from distutils.config import PyPIRCCommand +# d = Distribution() +# return PyPIRCCommand(d) + + +class PyPIRCFile(object): + + DEFAULT_REPOSITORY = 'https://upload.pypi.org/legacy/' + DEFAULT_REALM = 'pypi' + + def __init__(self, fn=None, url=None): + if fn is None: + fn = os.path.join(os.path.expanduser('~'), '.pypirc') + self.filename = fn + self.url = url + + def read(self): + result = {} + + if os.path.exists(self.filename): + repository = self.url or self.DEFAULT_REPOSITORY + + config = configparser.RawConfigParser() + config.read(self.filename) + sections = config.sections() + if 'distutils' in sections: + # let's get the list of servers + index_servers = config.get('distutils', 'index-servers') + _servers = [server.strip() for server in index_servers.split('\n') if server.strip() != ''] + if _servers == []: + # nothing set, let's try to get the default pypi + if 'pypi' in sections: + _servers = ['pypi'] + else: + for server in _servers: + result = {'server': server} + result['username'] = config.get(server, 'username') + + # optional params + for key, default in (('repository', self.DEFAULT_REPOSITORY), ('realm', self.DEFAULT_REALM), + ('password', None)): + if config.has_option(server, key): + result[key] = config.get(server, key) + else: + result[key] = default + + # work around people having "repository" for the "pypi" + # section of their config set to the HTTP (rather than + # HTTPS) URL + if (server == 'pypi' and repository in (self.DEFAULT_REPOSITORY, 'pypi')): + result['repository'] = self.DEFAULT_REPOSITORY + elif (result['server'] != repository and result['repository'] != repository): + result = {} + elif 'server-login' in sections: + # old format + server = 'server-login' + if config.has_option(server, 'repository'): + repository = config.get(server, 'repository') + else: + repository = self.DEFAULT_REPOSITORY + result = { + 'username': config.get(server, 'username'), + 'password': config.get(server, 'password'), + 'repository': repository, + 'server': server, + 'realm': self.DEFAULT_REALM + } + return result + + def update(self, username, password): + # import pdb; pdb.set_trace() + config = configparser.RawConfigParser() + fn = self.filename + config.read(fn) + if not config.has_section('pypi'): + config.add_section('pypi') + config.set('pypi', 'username', username) + config.set('pypi', 'password', password) + with open(fn, 'w') as f: + config.write(f) + + +def _load_pypirc(index): + """ + Read the PyPI access configuration as supported by distutils. + """ + return PyPIRCFile(url=index.url).read() + + +def _store_pypirc(index): + PyPIRCFile().update(index.username, index.password) + + +# +# get_platform()/get_host_platform() copied from Python 3.10.a0 source, with some minor +# tweaks +# + + +def get_host_platform(): + """Return a string that identifies the current platform. This is used mainly to + distinguish platform-specific build directories and platform-specific built + distributions. Typically includes the OS name and version and the + architecture (as supplied by 'os.uname()'), although the exact information + included depends on the OS; eg. on Linux, the kernel version isn't + particularly important. + + Examples of returned values: + linux-i586 + linux-alpha (?) + solaris-2.6-sun4u + + Windows will return one of: + win-amd64 (64bit Windows on AMD64 (aka x86_64, Intel64, EM64T, etc) + win32 (all others - specifically, sys.platform is returned) + + For other non-POSIX platforms, currently just returns 'sys.platform'. + + """ + if os.name == 'nt': + if 'amd64' in sys.version.lower(): + return 'win-amd64' + if '(arm)' in sys.version.lower(): + return 'win-arm32' + if '(arm64)' in sys.version.lower(): + return 'win-arm64' + return sys.platform + + # Set for cross builds explicitly + if "_PYTHON_HOST_PLATFORM" in os.environ: + return os.environ["_PYTHON_HOST_PLATFORM"] + + if os.name != 'posix' or not hasattr(os, 'uname'): + # XXX what about the architecture? NT is Intel or Alpha, + # Mac OS is M68k or PPC, etc. + return sys.platform + + # Try to distinguish various flavours of Unix + + (osname, host, release, version, machine) = os.uname() + + # Convert the OS name to lowercase, remove '/' characters, and translate + # spaces (for "Power Macintosh") + osname = osname.lower().replace('/', '') + machine = machine.replace(' ', '_').replace('/', '-') + + if osname[:5] == 'linux': + # At least on Linux/Intel, 'machine' is the processor -- + # i386, etc. + # XXX what about Alpha, SPARC, etc? + return "%s-%s" % (osname, machine) + + elif osname[:5] == 'sunos': + if release[0] >= '5': # SunOS 5 == Solaris 2 + osname = 'solaris' + release = '%d.%s' % (int(release[0]) - 3, release[2:]) + # We can't use 'platform.architecture()[0]' because a + # bootstrap problem. We use a dict to get an error + # if some suspicious happens. + bitness = {2147483647: '32bit', 9223372036854775807: '64bit'} + machine += '.%s' % bitness[sys.maxsize] + # fall through to standard osname-release-machine representation + elif osname[:3] == 'aix': + from _aix_support import aix_platform + return aix_platform() + elif osname[:6] == 'cygwin': + osname = 'cygwin' + rel_re = re.compile(r'[\d.]+', re.ASCII) + m = rel_re.match(release) + if m: + release = m.group() + elif osname[:6] == 'darwin': + import _osx_support + try: + from distutils import sysconfig + except ImportError: + import sysconfig + osname, release, machine = _osx_support.get_platform_osx(sysconfig.get_config_vars(), osname, release, machine) + + return '%s-%s-%s' % (osname, release, machine) + + +_TARGET_TO_PLAT = { + 'x86': 'win32', + 'x64': 'win-amd64', + 'arm': 'win-arm32', +} + + +def get_platform(): + if os.name != 'nt': + return get_host_platform() + cross_compilation_target = os.environ.get('VSCMD_ARG_TGT_ARCH') + if cross_compilation_target not in _TARGET_TO_PLAT: + return get_host_platform() + return _TARGET_TO_PLAT[cross_compilation_target] diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/distlib/w32.exe b/.venv/lib/python3.12/site-packages/pip/_vendor/distlib/w32.exe new file mode 100644 index 0000000..4ee2d3a Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/distlib/w32.exe differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/distlib/w64-arm.exe b/.venv/lib/python3.12/site-packages/pip/_vendor/distlib/w64-arm.exe new file mode 100644 index 0000000..951d581 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/distlib/w64-arm.exe differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/distlib/w64.exe b/.venv/lib/python3.12/site-packages/pip/_vendor/distlib/w64.exe new file mode 100644 index 0000000..5763076 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/distlib/w64.exe differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/distro/__init__.py b/.venv/lib/python3.12/site-packages/pip/_vendor/distro/__init__.py new file mode 100644 index 0000000..7686fe8 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/distro/__init__.py @@ -0,0 +1,54 @@ +from .distro import ( + NORMALIZED_DISTRO_ID, + NORMALIZED_LSB_ID, + NORMALIZED_OS_ID, + LinuxDistribution, + __version__, + build_number, + codename, + distro_release_attr, + distro_release_info, + id, + info, + like, + linux_distribution, + lsb_release_attr, + lsb_release_info, + major_version, + minor_version, + name, + os_release_attr, + os_release_info, + uname_attr, + uname_info, + version, + version_parts, +) + +__all__ = [ + "NORMALIZED_DISTRO_ID", + "NORMALIZED_LSB_ID", + "NORMALIZED_OS_ID", + "LinuxDistribution", + "build_number", + "codename", + "distro_release_attr", + "distro_release_info", + "id", + "info", + "like", + "linux_distribution", + "lsb_release_attr", + "lsb_release_info", + "major_version", + "minor_version", + "name", + "os_release_attr", + "os_release_info", + "uname_attr", + "uname_info", + "version", + "version_parts", +] + +__version__ = __version__ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/distro/__main__.py b/.venv/lib/python3.12/site-packages/pip/_vendor/distro/__main__.py new file mode 100644 index 0000000..0c01d5b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/distro/__main__.py @@ -0,0 +1,4 @@ +from .distro import main + +if __name__ == "__main__": + main() diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/distro/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/distro/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..9700151 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/distro/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/distro/__pycache__/__main__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/distro/__pycache__/__main__.cpython-312.pyc new file mode 100644 index 0000000..a58a1d9 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/distro/__pycache__/__main__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/distro/__pycache__/distro.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/distro/__pycache__/distro.cpython-312.pyc new file mode 100644 index 0000000..a53a78d Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/distro/__pycache__/distro.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/distro/distro.py b/.venv/lib/python3.12/site-packages/pip/_vendor/distro/distro.py new file mode 100644 index 0000000..78ccdfa --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/distro/distro.py @@ -0,0 +1,1403 @@ +#!/usr/bin/env python +# Copyright 2015-2021 Nir Cohen +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +The ``distro`` package (``distro`` stands for Linux Distribution) provides +information about the Linux distribution it runs on, such as a reliable +machine-readable distro ID, or version information. + +It is the recommended replacement for Python's original +:py:func:`platform.linux_distribution` function, but it provides much more +functionality. An alternative implementation became necessary because Python +3.5 deprecated this function, and Python 3.8 removed it altogether. Its +predecessor function :py:func:`platform.dist` was already deprecated since +Python 2.6 and removed in Python 3.8. Still, there are many cases in which +access to OS distribution information is needed. See `Python issue 1322 +`_ for more information. +""" + +import argparse +import json +import logging +import os +import re +import shlex +import subprocess +import sys +import warnings +from typing import ( + Any, + Callable, + Dict, + Iterable, + Optional, + Sequence, + TextIO, + Tuple, + Type, +) + +try: + from typing import TypedDict +except ImportError: + # Python 3.7 + TypedDict = dict + +__version__ = "1.9.0" + + +class VersionDict(TypedDict): + major: str + minor: str + build_number: str + + +class InfoDict(TypedDict): + id: str + version: str + version_parts: VersionDict + like: str + codename: str + + +_UNIXCONFDIR = os.environ.get("UNIXCONFDIR", "/etc") +_UNIXUSRLIBDIR = os.environ.get("UNIXUSRLIBDIR", "/usr/lib") +_OS_RELEASE_BASENAME = "os-release" + +#: Translation table for normalizing the "ID" attribute defined in os-release +#: files, for use by the :func:`distro.id` method. +#: +#: * Key: Value as defined in the os-release file, translated to lower case, +#: with blanks translated to underscores. +#: +#: * Value: Normalized value. +NORMALIZED_OS_ID = { + "ol": "oracle", # Oracle Linux + "opensuse-leap": "opensuse", # Newer versions of OpenSuSE report as opensuse-leap +} + +#: Translation table for normalizing the "Distributor ID" attribute returned by +#: the lsb_release command, for use by the :func:`distro.id` method. +#: +#: * Key: Value as returned by the lsb_release command, translated to lower +#: case, with blanks translated to underscores. +#: +#: * Value: Normalized value. +NORMALIZED_LSB_ID = { + "enterpriseenterpriseas": "oracle", # Oracle Enterprise Linux 4 + "enterpriseenterpriseserver": "oracle", # Oracle Linux 5 + "redhatenterpriseworkstation": "rhel", # RHEL 6, 7 Workstation + "redhatenterpriseserver": "rhel", # RHEL 6, 7 Server + "redhatenterprisecomputenode": "rhel", # RHEL 6 ComputeNode +} + +#: Translation table for normalizing the distro ID derived from the file name +#: of distro release files, for use by the :func:`distro.id` method. +#: +#: * Key: Value as derived from the file name of a distro release file, +#: translated to lower case, with blanks translated to underscores. +#: +#: * Value: Normalized value. +NORMALIZED_DISTRO_ID = { + "redhat": "rhel", # RHEL 6.x, 7.x +} + +# Pattern for content of distro release file (reversed) +_DISTRO_RELEASE_CONTENT_REVERSED_PATTERN = re.compile( + r"(?:[^)]*\)(.*)\()? *(?:STL )?([\d.+\-a-z]*\d) *(?:esaeler *)?(.+)" +) + +# Pattern for base file name of distro release file +_DISTRO_RELEASE_BASENAME_PATTERN = re.compile(r"(\w+)[-_](release|version)$") + +# Base file names to be looked up for if _UNIXCONFDIR is not readable. +_DISTRO_RELEASE_BASENAMES = [ + "SuSE-release", + "altlinux-release", + "arch-release", + "base-release", + "centos-release", + "fedora-release", + "gentoo-release", + "mageia-release", + "mandrake-release", + "mandriva-release", + "mandrivalinux-release", + "manjaro-release", + "oracle-release", + "redhat-release", + "rocky-release", + "sl-release", + "slackware-version", +] + +# Base file names to be ignored when searching for distro release file +_DISTRO_RELEASE_IGNORE_BASENAMES = ( + "debian_version", + "lsb-release", + "oem-release", + _OS_RELEASE_BASENAME, + "system-release", + "plesk-release", + "iredmail-release", + "board-release", + "ec2_version", +) + + +def linux_distribution(full_distribution_name: bool = True) -> Tuple[str, str, str]: + """ + .. deprecated:: 1.6.0 + + :func:`distro.linux_distribution()` is deprecated. It should only be + used as a compatibility shim with Python's + :py:func:`platform.linux_distribution()`. Please use :func:`distro.id`, + :func:`distro.version` and :func:`distro.name` instead. + + Return information about the current OS distribution as a tuple + ``(id_name, version, codename)`` with items as follows: + + * ``id_name``: If *full_distribution_name* is false, the result of + :func:`distro.id`. Otherwise, the result of :func:`distro.name`. + + * ``version``: The result of :func:`distro.version`. + + * ``codename``: The extra item (usually in parentheses) after the + os-release version number, or the result of :func:`distro.codename`. + + The interface of this function is compatible with the original + :py:func:`platform.linux_distribution` function, supporting a subset of + its parameters. + + The data it returns may not exactly be the same, because it uses more data + sources than the original function, and that may lead to different data if + the OS distribution is not consistent across multiple data sources it + provides (there are indeed such distributions ...). + + Another reason for differences is the fact that the :func:`distro.id` + method normalizes the distro ID string to a reliable machine-readable value + for a number of popular OS distributions. + """ + warnings.warn( + "distro.linux_distribution() is deprecated. It should only be used as a " + "compatibility shim with Python's platform.linux_distribution(). Please use " + "distro.id(), distro.version() and distro.name() instead.", + DeprecationWarning, + stacklevel=2, + ) + return _distro.linux_distribution(full_distribution_name) + + +def id() -> str: + """ + Return the distro ID of the current distribution, as a + machine-readable string. + + For a number of OS distributions, the returned distro ID value is + *reliable*, in the sense that it is documented and that it does not change + across releases of the distribution. + + This package maintains the following reliable distro ID values: + + ============== ========================================= + Distro ID Distribution + ============== ========================================= + "ubuntu" Ubuntu + "debian" Debian + "rhel" RedHat Enterprise Linux + "centos" CentOS + "fedora" Fedora + "sles" SUSE Linux Enterprise Server + "opensuse" openSUSE + "amzn" Amazon Linux + "arch" Arch Linux + "buildroot" Buildroot + "cloudlinux" CloudLinux OS + "exherbo" Exherbo Linux + "gentoo" GenToo Linux + "ibm_powerkvm" IBM PowerKVM + "kvmibm" KVM for IBM z Systems + "linuxmint" Linux Mint + "mageia" Mageia + "mandriva" Mandriva Linux + "parallels" Parallels + "pidora" Pidora + "raspbian" Raspbian + "oracle" Oracle Linux (and Oracle Enterprise Linux) + "scientific" Scientific Linux + "slackware" Slackware + "xenserver" XenServer + "openbsd" OpenBSD + "netbsd" NetBSD + "freebsd" FreeBSD + "midnightbsd" MidnightBSD + "rocky" Rocky Linux + "aix" AIX + "guix" Guix System + "altlinux" ALT Linux + ============== ========================================= + + If you have a need to get distros for reliable IDs added into this set, + or if you find that the :func:`distro.id` function returns a different + distro ID for one of the listed distros, please create an issue in the + `distro issue tracker`_. + + **Lookup hierarchy and transformations:** + + First, the ID is obtained from the following sources, in the specified + order. The first available and non-empty value is used: + + * the value of the "ID" attribute of the os-release file, + + * the value of the "Distributor ID" attribute returned by the lsb_release + command, + + * the first part of the file name of the distro release file, + + The so determined ID value then passes the following transformations, + before it is returned by this method: + + * it is translated to lower case, + + * blanks (which should not be there anyway) are translated to underscores, + + * a normalization of the ID is performed, based upon + `normalization tables`_. The purpose of this normalization is to ensure + that the ID is as reliable as possible, even across incompatible changes + in the OS distributions. A common reason for an incompatible change is + the addition of an os-release file, or the addition of the lsb_release + command, with ID values that differ from what was previously determined + from the distro release file name. + """ + return _distro.id() + + +def name(pretty: bool = False) -> str: + """ + Return the name of the current OS distribution, as a human-readable + string. + + If *pretty* is false, the name is returned without version or codename. + (e.g. "CentOS Linux") + + If *pretty* is true, the version and codename are appended. + (e.g. "CentOS Linux 7.1.1503 (Core)") + + **Lookup hierarchy:** + + The name is obtained from the following sources, in the specified order. + The first available and non-empty value is used: + + * If *pretty* is false: + + - the value of the "NAME" attribute of the os-release file, + + - the value of the "Distributor ID" attribute returned by the lsb_release + command, + + - the value of the "" field of the distro release file. + + * If *pretty* is true: + + - the value of the "PRETTY_NAME" attribute of the os-release file, + + - the value of the "Description" attribute returned by the lsb_release + command, + + - the value of the "" field of the distro release file, appended + with the value of the pretty version ("" and "" + fields) of the distro release file, if available. + """ + return _distro.name(pretty) + + +def version(pretty: bool = False, best: bool = False) -> str: + """ + Return the version of the current OS distribution, as a human-readable + string. + + If *pretty* is false, the version is returned without codename (e.g. + "7.0"). + + If *pretty* is true, the codename in parenthesis is appended, if the + codename is non-empty (e.g. "7.0 (Maipo)"). + + Some distributions provide version numbers with different precisions in + the different sources of distribution information. Examining the different + sources in a fixed priority order does not always yield the most precise + version (e.g. for Debian 8.2, or CentOS 7.1). + + Some other distributions may not provide this kind of information. In these + cases, an empty string would be returned. This behavior can be observed + with rolling releases distributions (e.g. Arch Linux). + + The *best* parameter can be used to control the approach for the returned + version: + + If *best* is false, the first non-empty version number in priority order of + the examined sources is returned. + + If *best* is true, the most precise version number out of all examined + sources is returned. + + **Lookup hierarchy:** + + In all cases, the version number is obtained from the following sources. + If *best* is false, this order represents the priority order: + + * the value of the "VERSION_ID" attribute of the os-release file, + * the value of the "Release" attribute returned by the lsb_release + command, + * the version number parsed from the "" field of the first line + of the distro release file, + * the version number parsed from the "PRETTY_NAME" attribute of the + os-release file, if it follows the format of the distro release files. + * the version number parsed from the "Description" attribute returned by + the lsb_release command, if it follows the format of the distro release + files. + """ + return _distro.version(pretty, best) + + +def version_parts(best: bool = False) -> Tuple[str, str, str]: + """ + Return the version of the current OS distribution as a tuple + ``(major, minor, build_number)`` with items as follows: + + * ``major``: The result of :func:`distro.major_version`. + + * ``minor``: The result of :func:`distro.minor_version`. + + * ``build_number``: The result of :func:`distro.build_number`. + + For a description of the *best* parameter, see the :func:`distro.version` + method. + """ + return _distro.version_parts(best) + + +def major_version(best: bool = False) -> str: + """ + Return the major version of the current OS distribution, as a string, + if provided. + Otherwise, the empty string is returned. The major version is the first + part of the dot-separated version string. + + For a description of the *best* parameter, see the :func:`distro.version` + method. + """ + return _distro.major_version(best) + + +def minor_version(best: bool = False) -> str: + """ + Return the minor version of the current OS distribution, as a string, + if provided. + Otherwise, the empty string is returned. The minor version is the second + part of the dot-separated version string. + + For a description of the *best* parameter, see the :func:`distro.version` + method. + """ + return _distro.minor_version(best) + + +def build_number(best: bool = False) -> str: + """ + Return the build number of the current OS distribution, as a string, + if provided. + Otherwise, the empty string is returned. The build number is the third part + of the dot-separated version string. + + For a description of the *best* parameter, see the :func:`distro.version` + method. + """ + return _distro.build_number(best) + + +def like() -> str: + """ + Return a space-separated list of distro IDs of distributions that are + closely related to the current OS distribution in regards to packaging + and programming interfaces, for example distributions the current + distribution is a derivative from. + + **Lookup hierarchy:** + + This information item is only provided by the os-release file. + For details, see the description of the "ID_LIKE" attribute in the + `os-release man page + `_. + """ + return _distro.like() + + +def codename() -> str: + """ + Return the codename for the release of the current OS distribution, + as a string. + + If the distribution does not have a codename, an empty string is returned. + + Note that the returned codename is not always really a codename. For + example, openSUSE returns "x86_64". This function does not handle such + cases in any special way and just returns the string it finds, if any. + + **Lookup hierarchy:** + + * the codename within the "VERSION" attribute of the os-release file, if + provided, + + * the value of the "Codename" attribute returned by the lsb_release + command, + + * the value of the "" field of the distro release file. + """ + return _distro.codename() + + +def info(pretty: bool = False, best: bool = False) -> InfoDict: + """ + Return certain machine-readable information items about the current OS + distribution in a dictionary, as shown in the following example: + + .. sourcecode:: python + + { + 'id': 'rhel', + 'version': '7.0', + 'version_parts': { + 'major': '7', + 'minor': '0', + 'build_number': '' + }, + 'like': 'fedora', + 'codename': 'Maipo' + } + + The dictionary structure and keys are always the same, regardless of which + information items are available in the underlying data sources. The values + for the various keys are as follows: + + * ``id``: The result of :func:`distro.id`. + + * ``version``: The result of :func:`distro.version`. + + * ``version_parts -> major``: The result of :func:`distro.major_version`. + + * ``version_parts -> minor``: The result of :func:`distro.minor_version`. + + * ``version_parts -> build_number``: The result of + :func:`distro.build_number`. + + * ``like``: The result of :func:`distro.like`. + + * ``codename``: The result of :func:`distro.codename`. + + For a description of the *pretty* and *best* parameters, see the + :func:`distro.version` method. + """ + return _distro.info(pretty, best) + + +def os_release_info() -> Dict[str, str]: + """ + Return a dictionary containing key-value pairs for the information items + from the os-release file data source of the current OS distribution. + + See `os-release file`_ for details about these information items. + """ + return _distro.os_release_info() + + +def lsb_release_info() -> Dict[str, str]: + """ + Return a dictionary containing key-value pairs for the information items + from the lsb_release command data source of the current OS distribution. + + See `lsb_release command output`_ for details about these information + items. + """ + return _distro.lsb_release_info() + + +def distro_release_info() -> Dict[str, str]: + """ + Return a dictionary containing key-value pairs for the information items + from the distro release file data source of the current OS distribution. + + See `distro release file`_ for details about these information items. + """ + return _distro.distro_release_info() + + +def uname_info() -> Dict[str, str]: + """ + Return a dictionary containing key-value pairs for the information items + from the distro release file data source of the current OS distribution. + """ + return _distro.uname_info() + + +def os_release_attr(attribute: str) -> str: + """ + Return a single named information item from the os-release file data source + of the current OS distribution. + + Parameters: + + * ``attribute`` (string): Key of the information item. + + Returns: + + * (string): Value of the information item, if the item exists. + The empty string, if the item does not exist. + + See `os-release file`_ for details about these information items. + """ + return _distro.os_release_attr(attribute) + + +def lsb_release_attr(attribute: str) -> str: + """ + Return a single named information item from the lsb_release command output + data source of the current OS distribution. + + Parameters: + + * ``attribute`` (string): Key of the information item. + + Returns: + + * (string): Value of the information item, if the item exists. + The empty string, if the item does not exist. + + See `lsb_release command output`_ for details about these information + items. + """ + return _distro.lsb_release_attr(attribute) + + +def distro_release_attr(attribute: str) -> str: + """ + Return a single named information item from the distro release file + data source of the current OS distribution. + + Parameters: + + * ``attribute`` (string): Key of the information item. + + Returns: + + * (string): Value of the information item, if the item exists. + The empty string, if the item does not exist. + + See `distro release file`_ for details about these information items. + """ + return _distro.distro_release_attr(attribute) + + +def uname_attr(attribute: str) -> str: + """ + Return a single named information item from the distro release file + data source of the current OS distribution. + + Parameters: + + * ``attribute`` (string): Key of the information item. + + Returns: + + * (string): Value of the information item, if the item exists. + The empty string, if the item does not exist. + """ + return _distro.uname_attr(attribute) + + +try: + from functools import cached_property +except ImportError: + # Python < 3.8 + class cached_property: # type: ignore + """A version of @property which caches the value. On access, it calls the + underlying function and sets the value in `__dict__` so future accesses + will not re-call the property. + """ + + def __init__(self, f: Callable[[Any], Any]) -> None: + self._fname = f.__name__ + self._f = f + + def __get__(self, obj: Any, owner: Type[Any]) -> Any: + assert obj is not None, f"call {self._fname} on an instance" + ret = obj.__dict__[self._fname] = self._f(obj) + return ret + + +class LinuxDistribution: + """ + Provides information about a OS distribution. + + This package creates a private module-global instance of this class with + default initialization arguments, that is used by the + `consolidated accessor functions`_ and `single source accessor functions`_. + By using default initialization arguments, that module-global instance + returns data about the current OS distribution (i.e. the distro this + package runs on). + + Normally, it is not necessary to create additional instances of this class. + However, in situations where control is needed over the exact data sources + that are used, instances of this class can be created with a specific + distro release file, or a specific os-release file, or without invoking the + lsb_release command. + """ + + def __init__( + self, + include_lsb: Optional[bool] = None, + os_release_file: str = "", + distro_release_file: str = "", + include_uname: Optional[bool] = None, + root_dir: Optional[str] = None, + include_oslevel: Optional[bool] = None, + ) -> None: + """ + The initialization method of this class gathers information from the + available data sources, and stores that in private instance attributes. + Subsequent access to the information items uses these private instance + attributes, so that the data sources are read only once. + + Parameters: + + * ``include_lsb`` (bool): Controls whether the + `lsb_release command output`_ is included as a data source. + + If the lsb_release command is not available in the program execution + path, the data source for the lsb_release command will be empty. + + * ``os_release_file`` (string): The path name of the + `os-release file`_ that is to be used as a data source. + + An empty string (the default) will cause the default path name to + be used (see `os-release file`_ for details). + + If the specified or defaulted os-release file does not exist, the + data source for the os-release file will be empty. + + * ``distro_release_file`` (string): The path name of the + `distro release file`_ that is to be used as a data source. + + An empty string (the default) will cause a default search algorithm + to be used (see `distro release file`_ for details). + + If the specified distro release file does not exist, or if no default + distro release file can be found, the data source for the distro + release file will be empty. + + * ``include_uname`` (bool): Controls whether uname command output is + included as a data source. If the uname command is not available in + the program execution path the data source for the uname command will + be empty. + + * ``root_dir`` (string): The absolute path to the root directory to use + to find distro-related information files. Note that ``include_*`` + parameters must not be enabled in combination with ``root_dir``. + + * ``include_oslevel`` (bool): Controls whether (AIX) oslevel command + output is included as a data source. If the oslevel command is not + available in the program execution path the data source will be + empty. + + Public instance attributes: + + * ``os_release_file`` (string): The path name of the + `os-release file`_ that is actually used as a data source. The + empty string if no distro release file is used as a data source. + + * ``distro_release_file`` (string): The path name of the + `distro release file`_ that is actually used as a data source. The + empty string if no distro release file is used as a data source. + + * ``include_lsb`` (bool): The result of the ``include_lsb`` parameter. + This controls whether the lsb information will be loaded. + + * ``include_uname`` (bool): The result of the ``include_uname`` + parameter. This controls whether the uname information will + be loaded. + + * ``include_oslevel`` (bool): The result of the ``include_oslevel`` + parameter. This controls whether (AIX) oslevel information will be + loaded. + + * ``root_dir`` (string): The result of the ``root_dir`` parameter. + The absolute path to the root directory to use to find distro-related + information files. + + Raises: + + * :py:exc:`ValueError`: Initialization parameters combination is not + supported. + + * :py:exc:`OSError`: Some I/O issue with an os-release file or distro + release file. + + * :py:exc:`UnicodeError`: A data source has unexpected characters or + uses an unexpected encoding. + """ + self.root_dir = root_dir + self.etc_dir = os.path.join(root_dir, "etc") if root_dir else _UNIXCONFDIR + self.usr_lib_dir = ( + os.path.join(root_dir, "usr/lib") if root_dir else _UNIXUSRLIBDIR + ) + + if os_release_file: + self.os_release_file = os_release_file + else: + etc_dir_os_release_file = os.path.join(self.etc_dir, _OS_RELEASE_BASENAME) + usr_lib_os_release_file = os.path.join( + self.usr_lib_dir, _OS_RELEASE_BASENAME + ) + + # NOTE: The idea is to respect order **and** have it set + # at all times for API backwards compatibility. + if os.path.isfile(etc_dir_os_release_file) or not os.path.isfile( + usr_lib_os_release_file + ): + self.os_release_file = etc_dir_os_release_file + else: + self.os_release_file = usr_lib_os_release_file + + self.distro_release_file = distro_release_file or "" # updated later + + is_root_dir_defined = root_dir is not None + if is_root_dir_defined and (include_lsb or include_uname or include_oslevel): + raise ValueError( + "Including subprocess data sources from specific root_dir is disallowed" + " to prevent false information" + ) + self.include_lsb = ( + include_lsb if include_lsb is not None else not is_root_dir_defined + ) + self.include_uname = ( + include_uname if include_uname is not None else not is_root_dir_defined + ) + self.include_oslevel = ( + include_oslevel if include_oslevel is not None else not is_root_dir_defined + ) + + def __repr__(self) -> str: + """Return repr of all info""" + return ( + "LinuxDistribution(" + "os_release_file={self.os_release_file!r}, " + "distro_release_file={self.distro_release_file!r}, " + "include_lsb={self.include_lsb!r}, " + "include_uname={self.include_uname!r}, " + "include_oslevel={self.include_oslevel!r}, " + "root_dir={self.root_dir!r}, " + "_os_release_info={self._os_release_info!r}, " + "_lsb_release_info={self._lsb_release_info!r}, " + "_distro_release_info={self._distro_release_info!r}, " + "_uname_info={self._uname_info!r}, " + "_oslevel_info={self._oslevel_info!r})".format(self=self) + ) + + def linux_distribution( + self, full_distribution_name: bool = True + ) -> Tuple[str, str, str]: + """ + Return information about the OS distribution that is compatible + with Python's :func:`platform.linux_distribution`, supporting a subset + of its parameters. + + For details, see :func:`distro.linux_distribution`. + """ + return ( + self.name() if full_distribution_name else self.id(), + self.version(), + self._os_release_info.get("release_codename") or self.codename(), + ) + + def id(self) -> str: + """Return the distro ID of the OS distribution, as a string. + + For details, see :func:`distro.id`. + """ + + def normalize(distro_id: str, table: Dict[str, str]) -> str: + distro_id = distro_id.lower().replace(" ", "_") + return table.get(distro_id, distro_id) + + distro_id = self.os_release_attr("id") + if distro_id: + return normalize(distro_id, NORMALIZED_OS_ID) + + distro_id = self.lsb_release_attr("distributor_id") + if distro_id: + return normalize(distro_id, NORMALIZED_LSB_ID) + + distro_id = self.distro_release_attr("id") + if distro_id: + return normalize(distro_id, NORMALIZED_DISTRO_ID) + + distro_id = self.uname_attr("id") + if distro_id: + return normalize(distro_id, NORMALIZED_DISTRO_ID) + + return "" + + def name(self, pretty: bool = False) -> str: + """ + Return the name of the OS distribution, as a string. + + For details, see :func:`distro.name`. + """ + name = ( + self.os_release_attr("name") + or self.lsb_release_attr("distributor_id") + or self.distro_release_attr("name") + or self.uname_attr("name") + ) + if pretty: + name = self.os_release_attr("pretty_name") or self.lsb_release_attr( + "description" + ) + if not name: + name = self.distro_release_attr("name") or self.uname_attr("name") + version = self.version(pretty=True) + if version: + name = f"{name} {version}" + return name or "" + + def version(self, pretty: bool = False, best: bool = False) -> str: + """ + Return the version of the OS distribution, as a string. + + For details, see :func:`distro.version`. + """ + versions = [ + self.os_release_attr("version_id"), + self.lsb_release_attr("release"), + self.distro_release_attr("version_id"), + self._parse_distro_release_content(self.os_release_attr("pretty_name")).get( + "version_id", "" + ), + self._parse_distro_release_content( + self.lsb_release_attr("description") + ).get("version_id", ""), + self.uname_attr("release"), + ] + if self.uname_attr("id").startswith("aix"): + # On AIX platforms, prefer oslevel command output. + versions.insert(0, self.oslevel_info()) + elif self.id() == "debian" or "debian" in self.like().split(): + # On Debian-like, add debian_version file content to candidates list. + versions.append(self._debian_version) + version = "" + if best: + # This algorithm uses the last version in priority order that has + # the best precision. If the versions are not in conflict, that + # does not matter; otherwise, using the last one instead of the + # first one might be considered a surprise. + for v in versions: + if v.count(".") > version.count(".") or version == "": + version = v + else: + for v in versions: + if v != "": + version = v + break + if pretty and version and self.codename(): + version = f"{version} ({self.codename()})" + return version + + def version_parts(self, best: bool = False) -> Tuple[str, str, str]: + """ + Return the version of the OS distribution, as a tuple of version + numbers. + + For details, see :func:`distro.version_parts`. + """ + version_str = self.version(best=best) + if version_str: + version_regex = re.compile(r"(\d+)\.?(\d+)?\.?(\d+)?") + matches = version_regex.match(version_str) + if matches: + major, minor, build_number = matches.groups() + return major, minor or "", build_number or "" + return "", "", "" + + def major_version(self, best: bool = False) -> str: + """ + Return the major version number of the current distribution. + + For details, see :func:`distro.major_version`. + """ + return self.version_parts(best)[0] + + def minor_version(self, best: bool = False) -> str: + """ + Return the minor version number of the current distribution. + + For details, see :func:`distro.minor_version`. + """ + return self.version_parts(best)[1] + + def build_number(self, best: bool = False) -> str: + """ + Return the build number of the current distribution. + + For details, see :func:`distro.build_number`. + """ + return self.version_parts(best)[2] + + def like(self) -> str: + """ + Return the IDs of distributions that are like the OS distribution. + + For details, see :func:`distro.like`. + """ + return self.os_release_attr("id_like") or "" + + def codename(self) -> str: + """ + Return the codename of the OS distribution. + + For details, see :func:`distro.codename`. + """ + try: + # Handle os_release specially since distros might purposefully set + # this to empty string to have no codename + return self._os_release_info["codename"] + except KeyError: + return ( + self.lsb_release_attr("codename") + or self.distro_release_attr("codename") + or "" + ) + + def info(self, pretty: bool = False, best: bool = False) -> InfoDict: + """ + Return certain machine-readable information about the OS + distribution. + + For details, see :func:`distro.info`. + """ + return InfoDict( + id=self.id(), + version=self.version(pretty, best), + version_parts=VersionDict( + major=self.major_version(best), + minor=self.minor_version(best), + build_number=self.build_number(best), + ), + like=self.like(), + codename=self.codename(), + ) + + def os_release_info(self) -> Dict[str, str]: + """ + Return a dictionary containing key-value pairs for the information + items from the os-release file data source of the OS distribution. + + For details, see :func:`distro.os_release_info`. + """ + return self._os_release_info + + def lsb_release_info(self) -> Dict[str, str]: + """ + Return a dictionary containing key-value pairs for the information + items from the lsb_release command data source of the OS + distribution. + + For details, see :func:`distro.lsb_release_info`. + """ + return self._lsb_release_info + + def distro_release_info(self) -> Dict[str, str]: + """ + Return a dictionary containing key-value pairs for the information + items from the distro release file data source of the OS + distribution. + + For details, see :func:`distro.distro_release_info`. + """ + return self._distro_release_info + + def uname_info(self) -> Dict[str, str]: + """ + Return a dictionary containing key-value pairs for the information + items from the uname command data source of the OS distribution. + + For details, see :func:`distro.uname_info`. + """ + return self._uname_info + + def oslevel_info(self) -> str: + """ + Return AIX' oslevel command output. + """ + return self._oslevel_info + + def os_release_attr(self, attribute: str) -> str: + """ + Return a single named information item from the os-release file data + source of the OS distribution. + + For details, see :func:`distro.os_release_attr`. + """ + return self._os_release_info.get(attribute, "") + + def lsb_release_attr(self, attribute: str) -> str: + """ + Return a single named information item from the lsb_release command + output data source of the OS distribution. + + For details, see :func:`distro.lsb_release_attr`. + """ + return self._lsb_release_info.get(attribute, "") + + def distro_release_attr(self, attribute: str) -> str: + """ + Return a single named information item from the distro release file + data source of the OS distribution. + + For details, see :func:`distro.distro_release_attr`. + """ + return self._distro_release_info.get(attribute, "") + + def uname_attr(self, attribute: str) -> str: + """ + Return a single named information item from the uname command + output data source of the OS distribution. + + For details, see :func:`distro.uname_attr`. + """ + return self._uname_info.get(attribute, "") + + @cached_property + def _os_release_info(self) -> Dict[str, str]: + """ + Get the information items from the specified os-release file. + + Returns: + A dictionary containing all information items. + """ + if os.path.isfile(self.os_release_file): + with open(self.os_release_file, encoding="utf-8") as release_file: + return self._parse_os_release_content(release_file) + return {} + + @staticmethod + def _parse_os_release_content(lines: TextIO) -> Dict[str, str]: + """ + Parse the lines of an os-release file. + + Parameters: + + * lines: Iterable through the lines in the os-release file. + Each line must be a unicode string or a UTF-8 encoded byte + string. + + Returns: + A dictionary containing all information items. + """ + props = {} + lexer = shlex.shlex(lines, posix=True) + lexer.whitespace_split = True + + tokens = list(lexer) + for token in tokens: + # At this point, all shell-like parsing has been done (i.e. + # comments processed, quotes and backslash escape sequences + # processed, multi-line values assembled, trailing newlines + # stripped, etc.), so the tokens are now either: + # * variable assignments: var=value + # * commands or their arguments (not allowed in os-release) + # Ignore any tokens that are not variable assignments + if "=" in token: + k, v = token.split("=", 1) + props[k.lower()] = v + + if "version" in props: + # extract release codename (if any) from version attribute + match = re.search(r"\((\D+)\)|,\s*(\D+)", props["version"]) + if match: + release_codename = match.group(1) or match.group(2) + props["codename"] = props["release_codename"] = release_codename + + if "version_codename" in props: + # os-release added a version_codename field. Use that in + # preference to anything else Note that some distros purposefully + # do not have code names. They should be setting + # version_codename="" + props["codename"] = props["version_codename"] + elif "ubuntu_codename" in props: + # Same as above but a non-standard field name used on older Ubuntus + props["codename"] = props["ubuntu_codename"] + + return props + + @cached_property + def _lsb_release_info(self) -> Dict[str, str]: + """ + Get the information items from the lsb_release command output. + + Returns: + A dictionary containing all information items. + """ + if not self.include_lsb: + return {} + try: + cmd = ("lsb_release", "-a") + stdout = subprocess.check_output(cmd, stderr=subprocess.DEVNULL) + # Command not found or lsb_release returned error + except (OSError, subprocess.CalledProcessError): + return {} + content = self._to_str(stdout).splitlines() + return self._parse_lsb_release_content(content) + + @staticmethod + def _parse_lsb_release_content(lines: Iterable[str]) -> Dict[str, str]: + """ + Parse the output of the lsb_release command. + + Parameters: + + * lines: Iterable through the lines of the lsb_release output. + Each line must be a unicode string or a UTF-8 encoded byte + string. + + Returns: + A dictionary containing all information items. + """ + props = {} + for line in lines: + kv = line.strip("\n").split(":", 1) + if len(kv) != 2: + # Ignore lines without colon. + continue + k, v = kv + props.update({k.replace(" ", "_").lower(): v.strip()}) + return props + + @cached_property + def _uname_info(self) -> Dict[str, str]: + if not self.include_uname: + return {} + try: + cmd = ("uname", "-rs") + stdout = subprocess.check_output(cmd, stderr=subprocess.DEVNULL) + except OSError: + return {} + content = self._to_str(stdout).splitlines() + return self._parse_uname_content(content) + + @cached_property + def _oslevel_info(self) -> str: + if not self.include_oslevel: + return "" + try: + stdout = subprocess.check_output("oslevel", stderr=subprocess.DEVNULL) + except (OSError, subprocess.CalledProcessError): + return "" + return self._to_str(stdout).strip() + + @cached_property + def _debian_version(self) -> str: + try: + with open( + os.path.join(self.etc_dir, "debian_version"), encoding="ascii" + ) as fp: + return fp.readline().rstrip() + except FileNotFoundError: + return "" + + @staticmethod + def _parse_uname_content(lines: Sequence[str]) -> Dict[str, str]: + if not lines: + return {} + props = {} + match = re.search(r"^([^\s]+)\s+([\d\.]+)", lines[0].strip()) + if match: + name, version = match.groups() + + # This is to prevent the Linux kernel version from + # appearing as the 'best' version on otherwise + # identifiable distributions. + if name == "Linux": + return {} + props["id"] = name.lower() + props["name"] = name + props["release"] = version + return props + + @staticmethod + def _to_str(bytestring: bytes) -> str: + encoding = sys.getfilesystemencoding() + return bytestring.decode(encoding) + + @cached_property + def _distro_release_info(self) -> Dict[str, str]: + """ + Get the information items from the specified distro release file. + + Returns: + A dictionary containing all information items. + """ + if self.distro_release_file: + # If it was specified, we use it and parse what we can, even if + # its file name or content does not match the expected pattern. + distro_info = self._parse_distro_release_file(self.distro_release_file) + basename = os.path.basename(self.distro_release_file) + # The file name pattern for user-specified distro release files + # is somewhat more tolerant (compared to when searching for the + # file), because we want to use what was specified as best as + # possible. + match = _DISTRO_RELEASE_BASENAME_PATTERN.match(basename) + else: + try: + basenames = [ + basename + for basename in os.listdir(self.etc_dir) + if basename not in _DISTRO_RELEASE_IGNORE_BASENAMES + and os.path.isfile(os.path.join(self.etc_dir, basename)) + ] + # We sort for repeatability in cases where there are multiple + # distro specific files; e.g. CentOS, Oracle, Enterprise all + # containing `redhat-release` on top of their own. + basenames.sort() + except OSError: + # This may occur when /etc is not readable but we can't be + # sure about the *-release files. Check common entries of + # /etc for information. If they turn out to not be there the + # error is handled in `_parse_distro_release_file()`. + basenames = _DISTRO_RELEASE_BASENAMES + for basename in basenames: + match = _DISTRO_RELEASE_BASENAME_PATTERN.match(basename) + if match is None: + continue + filepath = os.path.join(self.etc_dir, basename) + distro_info = self._parse_distro_release_file(filepath) + # The name is always present if the pattern matches. + if "name" not in distro_info: + continue + self.distro_release_file = filepath + break + else: # the loop didn't "break": no candidate. + return {} + + if match is not None: + distro_info["id"] = match.group(1) + + # CloudLinux < 7: manually enrich info with proper id. + if "cloudlinux" in distro_info.get("name", "").lower(): + distro_info["id"] = "cloudlinux" + + return distro_info + + def _parse_distro_release_file(self, filepath: str) -> Dict[str, str]: + """ + Parse a distro release file. + + Parameters: + + * filepath: Path name of the distro release file. + + Returns: + A dictionary containing all information items. + """ + try: + with open(filepath, encoding="utf-8") as fp: + # Only parse the first line. For instance, on SLES there + # are multiple lines. We don't want them... + return self._parse_distro_release_content(fp.readline()) + except OSError: + # Ignore not being able to read a specific, seemingly version + # related file. + # See https://github.com/python-distro/distro/issues/162 + return {} + + @staticmethod + def _parse_distro_release_content(line: str) -> Dict[str, str]: + """ + Parse a line from a distro release file. + + Parameters: + * line: Line from the distro release file. Must be a unicode string + or a UTF-8 encoded byte string. + + Returns: + A dictionary containing all information items. + """ + matches = _DISTRO_RELEASE_CONTENT_REVERSED_PATTERN.match(line.strip()[::-1]) + distro_info = {} + if matches: + # regexp ensures non-None + distro_info["name"] = matches.group(3)[::-1] + if matches.group(2): + distro_info["version_id"] = matches.group(2)[::-1] + if matches.group(1): + distro_info["codename"] = matches.group(1)[::-1] + elif line: + distro_info["name"] = line.strip() + return distro_info + + +_distro = LinuxDistribution() + + +def main() -> None: + logger = logging.getLogger(__name__) + logger.setLevel(logging.DEBUG) + logger.addHandler(logging.StreamHandler(sys.stdout)) + + parser = argparse.ArgumentParser(description="OS distro info tool") + parser.add_argument( + "--json", "-j", help="Output in machine readable format", action="store_true" + ) + + parser.add_argument( + "--root-dir", + "-r", + type=str, + dest="root_dir", + help="Path to the root filesystem directory (defaults to /)", + ) + + args = parser.parse_args() + + if args.root_dir: + dist = LinuxDistribution( + include_lsb=False, + include_uname=False, + include_oslevel=False, + root_dir=args.root_dir, + ) + else: + dist = _distro + + if args.json: + logger.info(json.dumps(dist.info(), indent=4, sort_keys=True)) + else: + logger.info("Name: %s", dist.name(pretty=True)) + distribution_version = dist.version(pretty=True) + logger.info("Version: %s", distribution_version) + distribution_codename = dist.codename() + logger.info("Codename: %s", distribution_codename) + + +if __name__ == "__main__": + main() diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/distro/py.typed b/.venv/lib/python3.12/site-packages/pip/_vendor/distro/py.typed new file mode 100644 index 0000000..e69de29 diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/idna/__init__.py b/.venv/lib/python3.12/site-packages/pip/_vendor/idna/__init__.py new file mode 100644 index 0000000..cfdc030 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/idna/__init__.py @@ -0,0 +1,45 @@ +from .core import ( + IDNABidiError, + IDNAError, + InvalidCodepoint, + InvalidCodepointContext, + alabel, + check_bidi, + check_hyphen_ok, + check_initial_combiner, + check_label, + check_nfc, + decode, + encode, + ulabel, + uts46_remap, + valid_contextj, + valid_contexto, + valid_label_length, + valid_string_length, +) +from .intranges import intranges_contain +from .package_data import __version__ + +__all__ = [ + "__version__", + "IDNABidiError", + "IDNAError", + "InvalidCodepoint", + "InvalidCodepointContext", + "alabel", + "check_bidi", + "check_hyphen_ok", + "check_initial_combiner", + "check_label", + "check_nfc", + "decode", + "encode", + "intranges_contain", + "ulabel", + "uts46_remap", + "valid_contextj", + "valid_contexto", + "valid_label_length", + "valid_string_length", +] diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/idna/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/idna/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..dd8c2bb Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/idna/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/idna/__pycache__/codec.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/idna/__pycache__/codec.cpython-312.pyc new file mode 100644 index 0000000..040bccc Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/idna/__pycache__/codec.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/idna/__pycache__/compat.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/idna/__pycache__/compat.cpython-312.pyc new file mode 100644 index 0000000..4356954 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/idna/__pycache__/compat.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/idna/__pycache__/core.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/idna/__pycache__/core.cpython-312.pyc new file mode 100644 index 0000000..0629676 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/idna/__pycache__/core.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/idna/__pycache__/idnadata.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/idna/__pycache__/idnadata.cpython-312.pyc new file mode 100644 index 0000000..0cead2a Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/idna/__pycache__/idnadata.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/idna/__pycache__/intranges.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/idna/__pycache__/intranges.cpython-312.pyc new file mode 100644 index 0000000..2a7e19a Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/idna/__pycache__/intranges.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/idna/__pycache__/package_data.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/idna/__pycache__/package_data.cpython-312.pyc new file mode 100644 index 0000000..6bdb1cd Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/idna/__pycache__/package_data.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/idna/__pycache__/uts46data.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/idna/__pycache__/uts46data.cpython-312.pyc new file mode 100644 index 0000000..6d67b25 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/idna/__pycache__/uts46data.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/idna/codec.py b/.venv/lib/python3.12/site-packages/pip/_vendor/idna/codec.py new file mode 100644 index 0000000..913abfd --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/idna/codec.py @@ -0,0 +1,122 @@ +import codecs +import re +from typing import Any, Optional, Tuple + +from .core import IDNAError, alabel, decode, encode, ulabel + +_unicode_dots_re = re.compile("[\u002e\u3002\uff0e\uff61]") + + +class Codec(codecs.Codec): + def encode(self, data: str, errors: str = "strict") -> Tuple[bytes, int]: + if errors != "strict": + raise IDNAError('Unsupported error handling "{}"'.format(errors)) + + if not data: + return b"", 0 + + return encode(data), len(data) + + def decode(self, data: bytes, errors: str = "strict") -> Tuple[str, int]: + if errors != "strict": + raise IDNAError('Unsupported error handling "{}"'.format(errors)) + + if not data: + return "", 0 + + return decode(data), len(data) + + +class IncrementalEncoder(codecs.BufferedIncrementalEncoder): + def _buffer_encode(self, data: str, errors: str, final: bool) -> Tuple[bytes, int]: + if errors != "strict": + raise IDNAError('Unsupported error handling "{}"'.format(errors)) + + if not data: + return b"", 0 + + labels = _unicode_dots_re.split(data) + trailing_dot = b"" + if labels: + if not labels[-1]: + trailing_dot = b"." + del labels[-1] + elif not final: + # Keep potentially unfinished label until the next call + del labels[-1] + if labels: + trailing_dot = b"." + + result = [] + size = 0 + for label in labels: + result.append(alabel(label)) + if size: + size += 1 + size += len(label) + + # Join with U+002E + result_bytes = b".".join(result) + trailing_dot + size += len(trailing_dot) + return result_bytes, size + + +class IncrementalDecoder(codecs.BufferedIncrementalDecoder): + def _buffer_decode(self, data: Any, errors: str, final: bool) -> Tuple[str, int]: + if errors != "strict": + raise IDNAError('Unsupported error handling "{}"'.format(errors)) + + if not data: + return ("", 0) + + if not isinstance(data, str): + data = str(data, "ascii") + + labels = _unicode_dots_re.split(data) + trailing_dot = "" + if labels: + if not labels[-1]: + trailing_dot = "." + del labels[-1] + elif not final: + # Keep potentially unfinished label until the next call + del labels[-1] + if labels: + trailing_dot = "." + + result = [] + size = 0 + for label in labels: + result.append(ulabel(label)) + if size: + size += 1 + size += len(label) + + result_str = ".".join(result) + trailing_dot + size += len(trailing_dot) + return (result_str, size) + + +class StreamWriter(Codec, codecs.StreamWriter): + pass + + +class StreamReader(Codec, codecs.StreamReader): + pass + + +def search_function(name: str) -> Optional[codecs.CodecInfo]: + if name != "idna2008": + return None + return codecs.CodecInfo( + name=name, + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamwriter=StreamWriter, + streamreader=StreamReader, + ) + + +codecs.register(search_function) diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/idna/compat.py b/.venv/lib/python3.12/site-packages/pip/_vendor/idna/compat.py new file mode 100644 index 0000000..1df9f2a --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/idna/compat.py @@ -0,0 +1,15 @@ +from typing import Any, Union + +from .core import decode, encode + + +def ToASCII(label: str) -> bytes: + return encode(label) + + +def ToUnicode(label: Union[bytes, bytearray]) -> str: + return decode(label) + + +def nameprep(s: Any) -> None: + raise NotImplementedError("IDNA 2008 does not utilise nameprep protocol") diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/idna/core.py b/.venv/lib/python3.12/site-packages/pip/_vendor/idna/core.py new file mode 100644 index 0000000..9115f12 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/idna/core.py @@ -0,0 +1,437 @@ +import bisect +import re +import unicodedata +from typing import Optional, Union + +from . import idnadata +from .intranges import intranges_contain + +_virama_combining_class = 9 +_alabel_prefix = b"xn--" +_unicode_dots_re = re.compile("[\u002e\u3002\uff0e\uff61]") + + +class IDNAError(UnicodeError): + """Base exception for all IDNA-encoding related problems""" + + pass + + +class IDNABidiError(IDNAError): + """Exception when bidirectional requirements are not satisfied""" + + pass + + +class InvalidCodepoint(IDNAError): + """Exception when a disallowed or unallocated codepoint is used""" + + pass + + +class InvalidCodepointContext(IDNAError): + """Exception when the codepoint is not valid in the context it is used""" + + pass + + +def _combining_class(cp: int) -> int: + v = unicodedata.combining(chr(cp)) + if v == 0: + if not unicodedata.name(chr(cp)): + raise ValueError("Unknown character in unicodedata") + return v + + +def _is_script(cp: str, script: str) -> bool: + return intranges_contain(ord(cp), idnadata.scripts[script]) + + +def _punycode(s: str) -> bytes: + return s.encode("punycode") + + +def _unot(s: int) -> str: + return "U+{:04X}".format(s) + + +def valid_label_length(label: Union[bytes, str]) -> bool: + if len(label) > 63: + return False + return True + + +def valid_string_length(label: Union[bytes, str], trailing_dot: bool) -> bool: + if len(label) > (254 if trailing_dot else 253): + return False + return True + + +def check_bidi(label: str, check_ltr: bool = False) -> bool: + # Bidi rules should only be applied if string contains RTL characters + bidi_label = False + for idx, cp in enumerate(label, 1): + direction = unicodedata.bidirectional(cp) + if direction == "": + # String likely comes from a newer version of Unicode + raise IDNABidiError("Unknown directionality in label {} at position {}".format(repr(label), idx)) + if direction in ["R", "AL", "AN"]: + bidi_label = True + if not bidi_label and not check_ltr: + return True + + # Bidi rule 1 + direction = unicodedata.bidirectional(label[0]) + if direction in ["R", "AL"]: + rtl = True + elif direction == "L": + rtl = False + else: + raise IDNABidiError("First codepoint in label {} must be directionality L, R or AL".format(repr(label))) + + valid_ending = False + number_type: Optional[str] = None + for idx, cp in enumerate(label, 1): + direction = unicodedata.bidirectional(cp) + + if rtl: + # Bidi rule 2 + if direction not in [ + "R", + "AL", + "AN", + "EN", + "ES", + "CS", + "ET", + "ON", + "BN", + "NSM", + ]: + raise IDNABidiError("Invalid direction for codepoint at position {} in a right-to-left label".format(idx)) + # Bidi rule 3 + if direction in ["R", "AL", "EN", "AN"]: + valid_ending = True + elif direction != "NSM": + valid_ending = False + # Bidi rule 4 + if direction in ["AN", "EN"]: + if not number_type: + number_type = direction + else: + if number_type != direction: + raise IDNABidiError("Can not mix numeral types in a right-to-left label") + else: + # Bidi rule 5 + if direction not in ["L", "EN", "ES", "CS", "ET", "ON", "BN", "NSM"]: + raise IDNABidiError("Invalid direction for codepoint at position {} in a left-to-right label".format(idx)) + # Bidi rule 6 + if direction in ["L", "EN"]: + valid_ending = True + elif direction != "NSM": + valid_ending = False + + if not valid_ending: + raise IDNABidiError("Label ends with illegal codepoint directionality") + + return True + + +def check_initial_combiner(label: str) -> bool: + if unicodedata.category(label[0])[0] == "M": + raise IDNAError("Label begins with an illegal combining character") + return True + + +def check_hyphen_ok(label: str) -> bool: + if label[2:4] == "--": + raise IDNAError("Label has disallowed hyphens in 3rd and 4th position") + if label[0] == "-" or label[-1] == "-": + raise IDNAError("Label must not start or end with a hyphen") + return True + + +def check_nfc(label: str) -> None: + if unicodedata.normalize("NFC", label) != label: + raise IDNAError("Label must be in Normalization Form C") + + +def valid_contextj(label: str, pos: int) -> bool: + cp_value = ord(label[pos]) + + if cp_value == 0x200C: + if pos > 0: + if _combining_class(ord(label[pos - 1])) == _virama_combining_class: + return True + + ok = False + for i in range(pos - 1, -1, -1): + joining_type = idnadata.joining_types.get(ord(label[i])) + if joining_type == ord("T"): + continue + elif joining_type in [ord("L"), ord("D")]: + ok = True + break + else: + break + + if not ok: + return False + + ok = False + for i in range(pos + 1, len(label)): + joining_type = idnadata.joining_types.get(ord(label[i])) + if joining_type == ord("T"): + continue + elif joining_type in [ord("R"), ord("D")]: + ok = True + break + else: + break + return ok + + if cp_value == 0x200D: + if pos > 0: + if _combining_class(ord(label[pos - 1])) == _virama_combining_class: + return True + return False + + else: + return False + + +def valid_contexto(label: str, pos: int, exception: bool = False) -> bool: + cp_value = ord(label[pos]) + + if cp_value == 0x00B7: + if 0 < pos < len(label) - 1: + if ord(label[pos - 1]) == 0x006C and ord(label[pos + 1]) == 0x006C: + return True + return False + + elif cp_value == 0x0375: + if pos < len(label) - 1 and len(label) > 1: + return _is_script(label[pos + 1], "Greek") + return False + + elif cp_value == 0x05F3 or cp_value == 0x05F4: + if pos > 0: + return _is_script(label[pos - 1], "Hebrew") + return False + + elif cp_value == 0x30FB: + for cp in label: + if cp == "\u30fb": + continue + if _is_script(cp, "Hiragana") or _is_script(cp, "Katakana") or _is_script(cp, "Han"): + return True + return False + + elif 0x660 <= cp_value <= 0x669: + for cp in label: + if 0x6F0 <= ord(cp) <= 0x06F9: + return False + return True + + elif 0x6F0 <= cp_value <= 0x6F9: + for cp in label: + if 0x660 <= ord(cp) <= 0x0669: + return False + return True + + return False + + +def check_label(label: Union[str, bytes, bytearray]) -> None: + if isinstance(label, (bytes, bytearray)): + label = label.decode("utf-8") + if len(label) == 0: + raise IDNAError("Empty Label") + + check_nfc(label) + check_hyphen_ok(label) + check_initial_combiner(label) + + for pos, cp in enumerate(label): + cp_value = ord(cp) + if intranges_contain(cp_value, idnadata.codepoint_classes["PVALID"]): + continue + elif intranges_contain(cp_value, idnadata.codepoint_classes["CONTEXTJ"]): + try: + if not valid_contextj(label, pos): + raise InvalidCodepointContext( + "Joiner {} not allowed at position {} in {}".format(_unot(cp_value), pos + 1, repr(label)) + ) + except ValueError: + raise IDNAError( + "Unknown codepoint adjacent to joiner {} at position {} in {}".format( + _unot(cp_value), pos + 1, repr(label) + ) + ) + elif intranges_contain(cp_value, idnadata.codepoint_classes["CONTEXTO"]): + if not valid_contexto(label, pos): + raise InvalidCodepointContext( + "Codepoint {} not allowed at position {} in {}".format(_unot(cp_value), pos + 1, repr(label)) + ) + else: + raise InvalidCodepoint( + "Codepoint {} at position {} of {} not allowed".format(_unot(cp_value), pos + 1, repr(label)) + ) + + check_bidi(label) + + +def alabel(label: str) -> bytes: + try: + label_bytes = label.encode("ascii") + ulabel(label_bytes) + if not valid_label_length(label_bytes): + raise IDNAError("Label too long") + return label_bytes + except UnicodeEncodeError: + pass + + check_label(label) + label_bytes = _alabel_prefix + _punycode(label) + + if not valid_label_length(label_bytes): + raise IDNAError("Label too long") + + return label_bytes + + +def ulabel(label: Union[str, bytes, bytearray]) -> str: + if not isinstance(label, (bytes, bytearray)): + try: + label_bytes = label.encode("ascii") + except UnicodeEncodeError: + check_label(label) + return label + else: + label_bytes = label + + label_bytes = label_bytes.lower() + if label_bytes.startswith(_alabel_prefix): + label_bytes = label_bytes[len(_alabel_prefix) :] + if not label_bytes: + raise IDNAError("Malformed A-label, no Punycode eligible content found") + if label_bytes.decode("ascii")[-1] == "-": + raise IDNAError("A-label must not end with a hyphen") + else: + check_label(label_bytes) + return label_bytes.decode("ascii") + + try: + label = label_bytes.decode("punycode") + except UnicodeError: + raise IDNAError("Invalid A-label") + check_label(label) + return label + + +def uts46_remap(domain: str, std3_rules: bool = True, transitional: bool = False) -> str: + """Re-map the characters in the string according to UTS46 processing.""" + from .uts46data import uts46data + + output = "" + + for pos, char in enumerate(domain): + code_point = ord(char) + try: + uts46row = uts46data[code_point if code_point < 256 else bisect.bisect_left(uts46data, (code_point, "Z")) - 1] + status = uts46row[1] + replacement: Optional[str] = None + if len(uts46row) == 3: + replacement = uts46row[2] + if ( + status == "V" + or (status == "D" and not transitional) + or (status == "3" and not std3_rules and replacement is None) + ): + output += char + elif replacement is not None and ( + status == "M" or (status == "3" and not std3_rules) or (status == "D" and transitional) + ): + output += replacement + elif status != "I": + raise IndexError() + except IndexError: + raise InvalidCodepoint( + "Codepoint {} not allowed at position {} in {}".format(_unot(code_point), pos + 1, repr(domain)) + ) + + return unicodedata.normalize("NFC", output) + + +def encode( + s: Union[str, bytes, bytearray], + strict: bool = False, + uts46: bool = False, + std3_rules: bool = False, + transitional: bool = False, +) -> bytes: + if not isinstance(s, str): + try: + s = str(s, "ascii") + except UnicodeDecodeError: + raise IDNAError("should pass a unicode string to the function rather than a byte string.") + if uts46: + s = uts46_remap(s, std3_rules, transitional) + trailing_dot = False + result = [] + if strict: + labels = s.split(".") + else: + labels = _unicode_dots_re.split(s) + if not labels or labels == [""]: + raise IDNAError("Empty domain") + if labels[-1] == "": + del labels[-1] + trailing_dot = True + for label in labels: + s = alabel(label) + if s: + result.append(s) + else: + raise IDNAError("Empty label") + if trailing_dot: + result.append(b"") + s = b".".join(result) + if not valid_string_length(s, trailing_dot): + raise IDNAError("Domain too long") + return s + + +def decode( + s: Union[str, bytes, bytearray], + strict: bool = False, + uts46: bool = False, + std3_rules: bool = False, +) -> str: + try: + if not isinstance(s, str): + s = str(s, "ascii") + except UnicodeDecodeError: + raise IDNAError("Invalid ASCII in A-label") + if uts46: + s = uts46_remap(s, std3_rules, False) + trailing_dot = False + result = [] + if not strict: + labels = _unicode_dots_re.split(s) + else: + labels = s.split(".") + if not labels or labels == [""]: + raise IDNAError("Empty domain") + if not labels[-1]: + del labels[-1] + trailing_dot = True + for label in labels: + s = ulabel(label) + if s: + result.append(s) + else: + raise IDNAError("Empty label") + if trailing_dot: + result.append("") + return ".".join(result) diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/idna/idnadata.py b/.venv/lib/python3.12/site-packages/pip/_vendor/idna/idnadata.py new file mode 100644 index 0000000..4be6004 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/idna/idnadata.py @@ -0,0 +1,4243 @@ +# This file is automatically generated by tools/idna-data + +__version__ = "15.1.0" +scripts = { + "Greek": ( + 0x37000000374, + 0x37500000378, + 0x37A0000037E, + 0x37F00000380, + 0x38400000385, + 0x38600000387, + 0x3880000038B, + 0x38C0000038D, + 0x38E000003A2, + 0x3A3000003E2, + 0x3F000000400, + 0x1D2600001D2B, + 0x1D5D00001D62, + 0x1D6600001D6B, + 0x1DBF00001DC0, + 0x1F0000001F16, + 0x1F1800001F1E, + 0x1F2000001F46, + 0x1F4800001F4E, + 0x1F5000001F58, + 0x1F5900001F5A, + 0x1F5B00001F5C, + 0x1F5D00001F5E, + 0x1F5F00001F7E, + 0x1F8000001FB5, + 0x1FB600001FC5, + 0x1FC600001FD4, + 0x1FD600001FDC, + 0x1FDD00001FF0, + 0x1FF200001FF5, + 0x1FF600001FFF, + 0x212600002127, + 0xAB650000AB66, + 0x101400001018F, + 0x101A0000101A1, + 0x1D2000001D246, + ), + "Han": ( + 0x2E8000002E9A, + 0x2E9B00002EF4, + 0x2F0000002FD6, + 0x300500003006, + 0x300700003008, + 0x30210000302A, + 0x30380000303C, + 0x340000004DC0, + 0x4E000000A000, + 0xF9000000FA6E, + 0xFA700000FADA, + 0x16FE200016FE4, + 0x16FF000016FF2, + 0x200000002A6E0, + 0x2A7000002B73A, + 0x2B7400002B81E, + 0x2B8200002CEA2, + 0x2CEB00002EBE1, + 0x2EBF00002EE5E, + 0x2F8000002FA1E, + 0x300000003134B, + 0x31350000323B0, + ), + "Hebrew": ( + 0x591000005C8, + 0x5D0000005EB, + 0x5EF000005F5, + 0xFB1D0000FB37, + 0xFB380000FB3D, + 0xFB3E0000FB3F, + 0xFB400000FB42, + 0xFB430000FB45, + 0xFB460000FB50, + ), + "Hiragana": ( + 0x304100003097, + 0x309D000030A0, + 0x1B0010001B120, + 0x1B1320001B133, + 0x1B1500001B153, + 0x1F2000001F201, + ), + "Katakana": ( + 0x30A1000030FB, + 0x30FD00003100, + 0x31F000003200, + 0x32D0000032FF, + 0x330000003358, + 0xFF660000FF70, + 0xFF710000FF9E, + 0x1AFF00001AFF4, + 0x1AFF50001AFFC, + 0x1AFFD0001AFFF, + 0x1B0000001B001, + 0x1B1200001B123, + 0x1B1550001B156, + 0x1B1640001B168, + ), +} +joining_types = { + 0xAD: 84, + 0x300: 84, + 0x301: 84, + 0x302: 84, + 0x303: 84, + 0x304: 84, + 0x305: 84, + 0x306: 84, + 0x307: 84, + 0x308: 84, + 0x309: 84, + 0x30A: 84, + 0x30B: 84, + 0x30C: 84, + 0x30D: 84, + 0x30E: 84, + 0x30F: 84, + 0x310: 84, + 0x311: 84, + 0x312: 84, + 0x313: 84, + 0x314: 84, + 0x315: 84, + 0x316: 84, + 0x317: 84, + 0x318: 84, + 0x319: 84, + 0x31A: 84, + 0x31B: 84, + 0x31C: 84, + 0x31D: 84, + 0x31E: 84, + 0x31F: 84, + 0x320: 84, + 0x321: 84, + 0x322: 84, + 0x323: 84, + 0x324: 84, + 0x325: 84, + 0x326: 84, + 0x327: 84, + 0x328: 84, + 0x329: 84, + 0x32A: 84, + 0x32B: 84, + 0x32C: 84, + 0x32D: 84, + 0x32E: 84, + 0x32F: 84, + 0x330: 84, + 0x331: 84, + 0x332: 84, + 0x333: 84, + 0x334: 84, + 0x335: 84, + 0x336: 84, + 0x337: 84, + 0x338: 84, + 0x339: 84, + 0x33A: 84, + 0x33B: 84, + 0x33C: 84, + 0x33D: 84, + 0x33E: 84, + 0x33F: 84, + 0x340: 84, + 0x341: 84, + 0x342: 84, + 0x343: 84, + 0x344: 84, + 0x345: 84, + 0x346: 84, + 0x347: 84, + 0x348: 84, + 0x349: 84, + 0x34A: 84, + 0x34B: 84, + 0x34C: 84, + 0x34D: 84, + 0x34E: 84, + 0x34F: 84, + 0x350: 84, + 0x351: 84, + 0x352: 84, + 0x353: 84, + 0x354: 84, + 0x355: 84, + 0x356: 84, + 0x357: 84, + 0x358: 84, + 0x359: 84, + 0x35A: 84, + 0x35B: 84, + 0x35C: 84, + 0x35D: 84, + 0x35E: 84, + 0x35F: 84, + 0x360: 84, + 0x361: 84, + 0x362: 84, + 0x363: 84, + 0x364: 84, + 0x365: 84, + 0x366: 84, + 0x367: 84, + 0x368: 84, + 0x369: 84, + 0x36A: 84, + 0x36B: 84, + 0x36C: 84, + 0x36D: 84, + 0x36E: 84, + 0x36F: 84, + 0x483: 84, + 0x484: 84, + 0x485: 84, + 0x486: 84, + 0x487: 84, + 0x488: 84, + 0x489: 84, + 0x591: 84, + 0x592: 84, + 0x593: 84, + 0x594: 84, + 0x595: 84, + 0x596: 84, + 0x597: 84, + 0x598: 84, + 0x599: 84, + 0x59A: 84, + 0x59B: 84, + 0x59C: 84, + 0x59D: 84, + 0x59E: 84, + 0x59F: 84, + 0x5A0: 84, + 0x5A1: 84, + 0x5A2: 84, + 0x5A3: 84, + 0x5A4: 84, + 0x5A5: 84, + 0x5A6: 84, + 0x5A7: 84, + 0x5A8: 84, + 0x5A9: 84, + 0x5AA: 84, + 0x5AB: 84, + 0x5AC: 84, + 0x5AD: 84, + 0x5AE: 84, + 0x5AF: 84, + 0x5B0: 84, + 0x5B1: 84, + 0x5B2: 84, + 0x5B3: 84, + 0x5B4: 84, + 0x5B5: 84, + 0x5B6: 84, + 0x5B7: 84, + 0x5B8: 84, + 0x5B9: 84, + 0x5BA: 84, + 0x5BB: 84, + 0x5BC: 84, + 0x5BD: 84, + 0x5BF: 84, + 0x5C1: 84, + 0x5C2: 84, + 0x5C4: 84, + 0x5C5: 84, + 0x5C7: 84, + 0x610: 84, + 0x611: 84, + 0x612: 84, + 0x613: 84, + 0x614: 84, + 0x615: 84, + 0x616: 84, + 0x617: 84, + 0x618: 84, + 0x619: 84, + 0x61A: 84, + 0x61C: 84, + 0x620: 68, + 0x622: 82, + 0x623: 82, + 0x624: 82, + 0x625: 82, + 0x626: 68, + 0x627: 82, + 0x628: 68, + 0x629: 82, + 0x62A: 68, + 0x62B: 68, + 0x62C: 68, + 0x62D: 68, + 0x62E: 68, + 0x62F: 82, + 0x630: 82, + 0x631: 82, + 0x632: 82, + 0x633: 68, + 0x634: 68, + 0x635: 68, + 0x636: 68, + 0x637: 68, + 0x638: 68, + 0x639: 68, + 0x63A: 68, + 0x63B: 68, + 0x63C: 68, + 0x63D: 68, + 0x63E: 68, + 0x63F: 68, + 0x640: 67, + 0x641: 68, + 0x642: 68, + 0x643: 68, + 0x644: 68, + 0x645: 68, + 0x646: 68, + 0x647: 68, + 0x648: 82, + 0x649: 68, + 0x64A: 68, + 0x64B: 84, + 0x64C: 84, + 0x64D: 84, + 0x64E: 84, + 0x64F: 84, + 0x650: 84, + 0x651: 84, + 0x652: 84, + 0x653: 84, + 0x654: 84, + 0x655: 84, + 0x656: 84, + 0x657: 84, + 0x658: 84, + 0x659: 84, + 0x65A: 84, + 0x65B: 84, + 0x65C: 84, + 0x65D: 84, + 0x65E: 84, + 0x65F: 84, + 0x66E: 68, + 0x66F: 68, + 0x670: 84, + 0x671: 82, + 0x672: 82, + 0x673: 82, + 0x675: 82, + 0x676: 82, + 0x677: 82, + 0x678: 68, + 0x679: 68, + 0x67A: 68, + 0x67B: 68, + 0x67C: 68, + 0x67D: 68, + 0x67E: 68, + 0x67F: 68, + 0x680: 68, + 0x681: 68, + 0x682: 68, + 0x683: 68, + 0x684: 68, + 0x685: 68, + 0x686: 68, + 0x687: 68, + 0x688: 82, + 0x689: 82, + 0x68A: 82, + 0x68B: 82, + 0x68C: 82, + 0x68D: 82, + 0x68E: 82, + 0x68F: 82, + 0x690: 82, + 0x691: 82, + 0x692: 82, + 0x693: 82, + 0x694: 82, + 0x695: 82, + 0x696: 82, + 0x697: 82, + 0x698: 82, + 0x699: 82, + 0x69A: 68, + 0x69B: 68, + 0x69C: 68, + 0x69D: 68, + 0x69E: 68, + 0x69F: 68, + 0x6A0: 68, + 0x6A1: 68, + 0x6A2: 68, + 0x6A3: 68, + 0x6A4: 68, + 0x6A5: 68, + 0x6A6: 68, + 0x6A7: 68, + 0x6A8: 68, + 0x6A9: 68, + 0x6AA: 68, + 0x6AB: 68, + 0x6AC: 68, + 0x6AD: 68, + 0x6AE: 68, + 0x6AF: 68, + 0x6B0: 68, + 0x6B1: 68, + 0x6B2: 68, + 0x6B3: 68, + 0x6B4: 68, + 0x6B5: 68, + 0x6B6: 68, + 0x6B7: 68, + 0x6B8: 68, + 0x6B9: 68, + 0x6BA: 68, + 0x6BB: 68, + 0x6BC: 68, + 0x6BD: 68, + 0x6BE: 68, + 0x6BF: 68, + 0x6C0: 82, + 0x6C1: 68, + 0x6C2: 68, + 0x6C3: 82, + 0x6C4: 82, + 0x6C5: 82, + 0x6C6: 82, + 0x6C7: 82, + 0x6C8: 82, + 0x6C9: 82, + 0x6CA: 82, + 0x6CB: 82, + 0x6CC: 68, + 0x6CD: 82, + 0x6CE: 68, + 0x6CF: 82, + 0x6D0: 68, + 0x6D1: 68, + 0x6D2: 82, + 0x6D3: 82, + 0x6D5: 82, + 0x6D6: 84, + 0x6D7: 84, + 0x6D8: 84, + 0x6D9: 84, + 0x6DA: 84, + 0x6DB: 84, + 0x6DC: 84, + 0x6DF: 84, + 0x6E0: 84, + 0x6E1: 84, + 0x6E2: 84, + 0x6E3: 84, + 0x6E4: 84, + 0x6E7: 84, + 0x6E8: 84, + 0x6EA: 84, + 0x6EB: 84, + 0x6EC: 84, + 0x6ED: 84, + 0x6EE: 82, + 0x6EF: 82, + 0x6FA: 68, + 0x6FB: 68, + 0x6FC: 68, + 0x6FF: 68, + 0x70F: 84, + 0x710: 82, + 0x711: 84, + 0x712: 68, + 0x713: 68, + 0x714: 68, + 0x715: 82, + 0x716: 82, + 0x717: 82, + 0x718: 82, + 0x719: 82, + 0x71A: 68, + 0x71B: 68, + 0x71C: 68, + 0x71D: 68, + 0x71E: 82, + 0x71F: 68, + 0x720: 68, + 0x721: 68, + 0x722: 68, + 0x723: 68, + 0x724: 68, + 0x725: 68, + 0x726: 68, + 0x727: 68, + 0x728: 82, + 0x729: 68, + 0x72A: 82, + 0x72B: 68, + 0x72C: 82, + 0x72D: 68, + 0x72E: 68, + 0x72F: 82, + 0x730: 84, + 0x731: 84, + 0x732: 84, + 0x733: 84, + 0x734: 84, + 0x735: 84, + 0x736: 84, + 0x737: 84, + 0x738: 84, + 0x739: 84, + 0x73A: 84, + 0x73B: 84, + 0x73C: 84, + 0x73D: 84, + 0x73E: 84, + 0x73F: 84, + 0x740: 84, + 0x741: 84, + 0x742: 84, + 0x743: 84, + 0x744: 84, + 0x745: 84, + 0x746: 84, + 0x747: 84, + 0x748: 84, + 0x749: 84, + 0x74A: 84, + 0x74D: 82, + 0x74E: 68, + 0x74F: 68, + 0x750: 68, + 0x751: 68, + 0x752: 68, + 0x753: 68, + 0x754: 68, + 0x755: 68, + 0x756: 68, + 0x757: 68, + 0x758: 68, + 0x759: 82, + 0x75A: 82, + 0x75B: 82, + 0x75C: 68, + 0x75D: 68, + 0x75E: 68, + 0x75F: 68, + 0x760: 68, + 0x761: 68, + 0x762: 68, + 0x763: 68, + 0x764: 68, + 0x765: 68, + 0x766: 68, + 0x767: 68, + 0x768: 68, + 0x769: 68, + 0x76A: 68, + 0x76B: 82, + 0x76C: 82, + 0x76D: 68, + 0x76E: 68, + 0x76F: 68, + 0x770: 68, + 0x771: 82, + 0x772: 68, + 0x773: 82, + 0x774: 82, + 0x775: 68, + 0x776: 68, + 0x777: 68, + 0x778: 82, + 0x779: 82, + 0x77A: 68, + 0x77B: 68, + 0x77C: 68, + 0x77D: 68, + 0x77E: 68, + 0x77F: 68, + 0x7A6: 84, + 0x7A7: 84, + 0x7A8: 84, + 0x7A9: 84, + 0x7AA: 84, + 0x7AB: 84, + 0x7AC: 84, + 0x7AD: 84, + 0x7AE: 84, + 0x7AF: 84, + 0x7B0: 84, + 0x7CA: 68, + 0x7CB: 68, + 0x7CC: 68, + 0x7CD: 68, + 0x7CE: 68, + 0x7CF: 68, + 0x7D0: 68, + 0x7D1: 68, + 0x7D2: 68, + 0x7D3: 68, + 0x7D4: 68, + 0x7D5: 68, + 0x7D6: 68, + 0x7D7: 68, + 0x7D8: 68, + 0x7D9: 68, + 0x7DA: 68, + 0x7DB: 68, + 0x7DC: 68, + 0x7DD: 68, + 0x7DE: 68, + 0x7DF: 68, + 0x7E0: 68, + 0x7E1: 68, + 0x7E2: 68, + 0x7E3: 68, + 0x7E4: 68, + 0x7E5: 68, + 0x7E6: 68, + 0x7E7: 68, + 0x7E8: 68, + 0x7E9: 68, + 0x7EA: 68, + 0x7EB: 84, + 0x7EC: 84, + 0x7ED: 84, + 0x7EE: 84, + 0x7EF: 84, + 0x7F0: 84, + 0x7F1: 84, + 0x7F2: 84, + 0x7F3: 84, + 0x7FA: 67, + 0x7FD: 84, + 0x816: 84, + 0x817: 84, + 0x818: 84, + 0x819: 84, + 0x81B: 84, + 0x81C: 84, + 0x81D: 84, + 0x81E: 84, + 0x81F: 84, + 0x820: 84, + 0x821: 84, + 0x822: 84, + 0x823: 84, + 0x825: 84, + 0x826: 84, + 0x827: 84, + 0x829: 84, + 0x82A: 84, + 0x82B: 84, + 0x82C: 84, + 0x82D: 84, + 0x840: 82, + 0x841: 68, + 0x842: 68, + 0x843: 68, + 0x844: 68, + 0x845: 68, + 0x846: 82, + 0x847: 82, + 0x848: 68, + 0x849: 82, + 0x84A: 68, + 0x84B: 68, + 0x84C: 68, + 0x84D: 68, + 0x84E: 68, + 0x84F: 68, + 0x850: 68, + 0x851: 68, + 0x852: 68, + 0x853: 68, + 0x854: 82, + 0x855: 68, + 0x856: 82, + 0x857: 82, + 0x858: 82, + 0x859: 84, + 0x85A: 84, + 0x85B: 84, + 0x860: 68, + 0x862: 68, + 0x863: 68, + 0x864: 68, + 0x865: 68, + 0x867: 82, + 0x868: 68, + 0x869: 82, + 0x86A: 82, + 0x870: 82, + 0x871: 82, + 0x872: 82, + 0x873: 82, + 0x874: 82, + 0x875: 82, + 0x876: 82, + 0x877: 82, + 0x878: 82, + 0x879: 82, + 0x87A: 82, + 0x87B: 82, + 0x87C: 82, + 0x87D: 82, + 0x87E: 82, + 0x87F: 82, + 0x880: 82, + 0x881: 82, + 0x882: 82, + 0x883: 67, + 0x884: 67, + 0x885: 67, + 0x886: 68, + 0x889: 68, + 0x88A: 68, + 0x88B: 68, + 0x88C: 68, + 0x88D: 68, + 0x88E: 82, + 0x898: 84, + 0x899: 84, + 0x89A: 84, + 0x89B: 84, + 0x89C: 84, + 0x89D: 84, + 0x89E: 84, + 0x89F: 84, + 0x8A0: 68, + 0x8A1: 68, + 0x8A2: 68, + 0x8A3: 68, + 0x8A4: 68, + 0x8A5: 68, + 0x8A6: 68, + 0x8A7: 68, + 0x8A8: 68, + 0x8A9: 68, + 0x8AA: 82, + 0x8AB: 82, + 0x8AC: 82, + 0x8AE: 82, + 0x8AF: 68, + 0x8B0: 68, + 0x8B1: 82, + 0x8B2: 82, + 0x8B3: 68, + 0x8B4: 68, + 0x8B5: 68, + 0x8B6: 68, + 0x8B7: 68, + 0x8B8: 68, + 0x8B9: 82, + 0x8BA: 68, + 0x8BB: 68, + 0x8BC: 68, + 0x8BD: 68, + 0x8BE: 68, + 0x8BF: 68, + 0x8C0: 68, + 0x8C1: 68, + 0x8C2: 68, + 0x8C3: 68, + 0x8C4: 68, + 0x8C5: 68, + 0x8C6: 68, + 0x8C7: 68, + 0x8C8: 68, + 0x8CA: 84, + 0x8CB: 84, + 0x8CC: 84, + 0x8CD: 84, + 0x8CE: 84, + 0x8CF: 84, + 0x8D0: 84, + 0x8D1: 84, + 0x8D2: 84, + 0x8D3: 84, + 0x8D4: 84, + 0x8D5: 84, + 0x8D6: 84, + 0x8D7: 84, + 0x8D8: 84, + 0x8D9: 84, + 0x8DA: 84, + 0x8DB: 84, + 0x8DC: 84, + 0x8DD: 84, + 0x8DE: 84, + 0x8DF: 84, + 0x8E0: 84, + 0x8E1: 84, + 0x8E3: 84, + 0x8E4: 84, + 0x8E5: 84, + 0x8E6: 84, + 0x8E7: 84, + 0x8E8: 84, + 0x8E9: 84, + 0x8EA: 84, + 0x8EB: 84, + 0x8EC: 84, + 0x8ED: 84, + 0x8EE: 84, + 0x8EF: 84, + 0x8F0: 84, + 0x8F1: 84, + 0x8F2: 84, + 0x8F3: 84, + 0x8F4: 84, + 0x8F5: 84, + 0x8F6: 84, + 0x8F7: 84, + 0x8F8: 84, + 0x8F9: 84, + 0x8FA: 84, + 0x8FB: 84, + 0x8FC: 84, + 0x8FD: 84, + 0x8FE: 84, + 0x8FF: 84, + 0x900: 84, + 0x901: 84, + 0x902: 84, + 0x93A: 84, + 0x93C: 84, + 0x941: 84, + 0x942: 84, + 0x943: 84, + 0x944: 84, + 0x945: 84, + 0x946: 84, + 0x947: 84, + 0x948: 84, + 0x94D: 84, + 0x951: 84, + 0x952: 84, + 0x953: 84, + 0x954: 84, + 0x955: 84, + 0x956: 84, + 0x957: 84, + 0x962: 84, + 0x963: 84, + 0x981: 84, + 0x9BC: 84, + 0x9C1: 84, + 0x9C2: 84, + 0x9C3: 84, + 0x9C4: 84, + 0x9CD: 84, + 0x9E2: 84, + 0x9E3: 84, + 0x9FE: 84, + 0xA01: 84, + 0xA02: 84, + 0xA3C: 84, + 0xA41: 84, + 0xA42: 84, + 0xA47: 84, + 0xA48: 84, + 0xA4B: 84, + 0xA4C: 84, + 0xA4D: 84, + 0xA51: 84, + 0xA70: 84, + 0xA71: 84, + 0xA75: 84, + 0xA81: 84, + 0xA82: 84, + 0xABC: 84, + 0xAC1: 84, + 0xAC2: 84, + 0xAC3: 84, + 0xAC4: 84, + 0xAC5: 84, + 0xAC7: 84, + 0xAC8: 84, + 0xACD: 84, + 0xAE2: 84, + 0xAE3: 84, + 0xAFA: 84, + 0xAFB: 84, + 0xAFC: 84, + 0xAFD: 84, + 0xAFE: 84, + 0xAFF: 84, + 0xB01: 84, + 0xB3C: 84, + 0xB3F: 84, + 0xB41: 84, + 0xB42: 84, + 0xB43: 84, + 0xB44: 84, + 0xB4D: 84, + 0xB55: 84, + 0xB56: 84, + 0xB62: 84, + 0xB63: 84, + 0xB82: 84, + 0xBC0: 84, + 0xBCD: 84, + 0xC00: 84, + 0xC04: 84, + 0xC3C: 84, + 0xC3E: 84, + 0xC3F: 84, + 0xC40: 84, + 0xC46: 84, + 0xC47: 84, + 0xC48: 84, + 0xC4A: 84, + 0xC4B: 84, + 0xC4C: 84, + 0xC4D: 84, + 0xC55: 84, + 0xC56: 84, + 0xC62: 84, + 0xC63: 84, + 0xC81: 84, + 0xCBC: 84, + 0xCBF: 84, + 0xCC6: 84, + 0xCCC: 84, + 0xCCD: 84, + 0xCE2: 84, + 0xCE3: 84, + 0xD00: 84, + 0xD01: 84, + 0xD3B: 84, + 0xD3C: 84, + 0xD41: 84, + 0xD42: 84, + 0xD43: 84, + 0xD44: 84, + 0xD4D: 84, + 0xD62: 84, + 0xD63: 84, + 0xD81: 84, + 0xDCA: 84, + 0xDD2: 84, + 0xDD3: 84, + 0xDD4: 84, + 0xDD6: 84, + 0xE31: 84, + 0xE34: 84, + 0xE35: 84, + 0xE36: 84, + 0xE37: 84, + 0xE38: 84, + 0xE39: 84, + 0xE3A: 84, + 0xE47: 84, + 0xE48: 84, + 0xE49: 84, + 0xE4A: 84, + 0xE4B: 84, + 0xE4C: 84, + 0xE4D: 84, + 0xE4E: 84, + 0xEB1: 84, + 0xEB4: 84, + 0xEB5: 84, + 0xEB6: 84, + 0xEB7: 84, + 0xEB8: 84, + 0xEB9: 84, + 0xEBA: 84, + 0xEBB: 84, + 0xEBC: 84, + 0xEC8: 84, + 0xEC9: 84, + 0xECA: 84, + 0xECB: 84, + 0xECC: 84, + 0xECD: 84, + 0xECE: 84, + 0xF18: 84, + 0xF19: 84, + 0xF35: 84, + 0xF37: 84, + 0xF39: 84, + 0xF71: 84, + 0xF72: 84, + 0xF73: 84, + 0xF74: 84, + 0xF75: 84, + 0xF76: 84, + 0xF77: 84, + 0xF78: 84, + 0xF79: 84, + 0xF7A: 84, + 0xF7B: 84, + 0xF7C: 84, + 0xF7D: 84, + 0xF7E: 84, + 0xF80: 84, + 0xF81: 84, + 0xF82: 84, + 0xF83: 84, + 0xF84: 84, + 0xF86: 84, + 0xF87: 84, + 0xF8D: 84, + 0xF8E: 84, + 0xF8F: 84, + 0xF90: 84, + 0xF91: 84, + 0xF92: 84, + 0xF93: 84, + 0xF94: 84, + 0xF95: 84, + 0xF96: 84, + 0xF97: 84, + 0xF99: 84, + 0xF9A: 84, + 0xF9B: 84, + 0xF9C: 84, + 0xF9D: 84, + 0xF9E: 84, + 0xF9F: 84, + 0xFA0: 84, + 0xFA1: 84, + 0xFA2: 84, + 0xFA3: 84, + 0xFA4: 84, + 0xFA5: 84, + 0xFA6: 84, + 0xFA7: 84, + 0xFA8: 84, + 0xFA9: 84, + 0xFAA: 84, + 0xFAB: 84, + 0xFAC: 84, + 0xFAD: 84, + 0xFAE: 84, + 0xFAF: 84, + 0xFB0: 84, + 0xFB1: 84, + 0xFB2: 84, + 0xFB3: 84, + 0xFB4: 84, + 0xFB5: 84, + 0xFB6: 84, + 0xFB7: 84, + 0xFB8: 84, + 0xFB9: 84, + 0xFBA: 84, + 0xFBB: 84, + 0xFBC: 84, + 0xFC6: 84, + 0x102D: 84, + 0x102E: 84, + 0x102F: 84, + 0x1030: 84, + 0x1032: 84, + 0x1033: 84, + 0x1034: 84, + 0x1035: 84, + 0x1036: 84, + 0x1037: 84, + 0x1039: 84, + 0x103A: 84, + 0x103D: 84, + 0x103E: 84, + 0x1058: 84, + 0x1059: 84, + 0x105E: 84, + 0x105F: 84, + 0x1060: 84, + 0x1071: 84, + 0x1072: 84, + 0x1073: 84, + 0x1074: 84, + 0x1082: 84, + 0x1085: 84, + 0x1086: 84, + 0x108D: 84, + 0x109D: 84, + 0x135D: 84, + 0x135E: 84, + 0x135F: 84, + 0x1712: 84, + 0x1713: 84, + 0x1714: 84, + 0x1732: 84, + 0x1733: 84, + 0x1752: 84, + 0x1753: 84, + 0x1772: 84, + 0x1773: 84, + 0x17B4: 84, + 0x17B5: 84, + 0x17B7: 84, + 0x17B8: 84, + 0x17B9: 84, + 0x17BA: 84, + 0x17BB: 84, + 0x17BC: 84, + 0x17BD: 84, + 0x17C6: 84, + 0x17C9: 84, + 0x17CA: 84, + 0x17CB: 84, + 0x17CC: 84, + 0x17CD: 84, + 0x17CE: 84, + 0x17CF: 84, + 0x17D0: 84, + 0x17D1: 84, + 0x17D2: 84, + 0x17D3: 84, + 0x17DD: 84, + 0x1807: 68, + 0x180A: 67, + 0x180B: 84, + 0x180C: 84, + 0x180D: 84, + 0x180F: 84, + 0x1820: 68, + 0x1821: 68, + 0x1822: 68, + 0x1823: 68, + 0x1824: 68, + 0x1825: 68, + 0x1826: 68, + 0x1827: 68, + 0x1828: 68, + 0x1829: 68, + 0x182A: 68, + 0x182B: 68, + 0x182C: 68, + 0x182D: 68, + 0x182E: 68, + 0x182F: 68, + 0x1830: 68, + 0x1831: 68, + 0x1832: 68, + 0x1833: 68, + 0x1834: 68, + 0x1835: 68, + 0x1836: 68, + 0x1837: 68, + 0x1838: 68, + 0x1839: 68, + 0x183A: 68, + 0x183B: 68, + 0x183C: 68, + 0x183D: 68, + 0x183E: 68, + 0x183F: 68, + 0x1840: 68, + 0x1841: 68, + 0x1842: 68, + 0x1843: 68, + 0x1844: 68, + 0x1845: 68, + 0x1846: 68, + 0x1847: 68, + 0x1848: 68, + 0x1849: 68, + 0x184A: 68, + 0x184B: 68, + 0x184C: 68, + 0x184D: 68, + 0x184E: 68, + 0x184F: 68, + 0x1850: 68, + 0x1851: 68, + 0x1852: 68, + 0x1853: 68, + 0x1854: 68, + 0x1855: 68, + 0x1856: 68, + 0x1857: 68, + 0x1858: 68, + 0x1859: 68, + 0x185A: 68, + 0x185B: 68, + 0x185C: 68, + 0x185D: 68, + 0x185E: 68, + 0x185F: 68, + 0x1860: 68, + 0x1861: 68, + 0x1862: 68, + 0x1863: 68, + 0x1864: 68, + 0x1865: 68, + 0x1866: 68, + 0x1867: 68, + 0x1868: 68, + 0x1869: 68, + 0x186A: 68, + 0x186B: 68, + 0x186C: 68, + 0x186D: 68, + 0x186E: 68, + 0x186F: 68, + 0x1870: 68, + 0x1871: 68, + 0x1872: 68, + 0x1873: 68, + 0x1874: 68, + 0x1875: 68, + 0x1876: 68, + 0x1877: 68, + 0x1878: 68, + 0x1885: 84, + 0x1886: 84, + 0x1887: 68, + 0x1888: 68, + 0x1889: 68, + 0x188A: 68, + 0x188B: 68, + 0x188C: 68, + 0x188D: 68, + 0x188E: 68, + 0x188F: 68, + 0x1890: 68, + 0x1891: 68, + 0x1892: 68, + 0x1893: 68, + 0x1894: 68, + 0x1895: 68, + 0x1896: 68, + 0x1897: 68, + 0x1898: 68, + 0x1899: 68, + 0x189A: 68, + 0x189B: 68, + 0x189C: 68, + 0x189D: 68, + 0x189E: 68, + 0x189F: 68, + 0x18A0: 68, + 0x18A1: 68, + 0x18A2: 68, + 0x18A3: 68, + 0x18A4: 68, + 0x18A5: 68, + 0x18A6: 68, + 0x18A7: 68, + 0x18A8: 68, + 0x18A9: 84, + 0x18AA: 68, + 0x1920: 84, + 0x1921: 84, + 0x1922: 84, + 0x1927: 84, + 0x1928: 84, + 0x1932: 84, + 0x1939: 84, + 0x193A: 84, + 0x193B: 84, + 0x1A17: 84, + 0x1A18: 84, + 0x1A1B: 84, + 0x1A56: 84, + 0x1A58: 84, + 0x1A59: 84, + 0x1A5A: 84, + 0x1A5B: 84, + 0x1A5C: 84, + 0x1A5D: 84, + 0x1A5E: 84, + 0x1A60: 84, + 0x1A62: 84, + 0x1A65: 84, + 0x1A66: 84, + 0x1A67: 84, + 0x1A68: 84, + 0x1A69: 84, + 0x1A6A: 84, + 0x1A6B: 84, + 0x1A6C: 84, + 0x1A73: 84, + 0x1A74: 84, + 0x1A75: 84, + 0x1A76: 84, + 0x1A77: 84, + 0x1A78: 84, + 0x1A79: 84, + 0x1A7A: 84, + 0x1A7B: 84, + 0x1A7C: 84, + 0x1A7F: 84, + 0x1AB0: 84, + 0x1AB1: 84, + 0x1AB2: 84, + 0x1AB3: 84, + 0x1AB4: 84, + 0x1AB5: 84, + 0x1AB6: 84, + 0x1AB7: 84, + 0x1AB8: 84, + 0x1AB9: 84, + 0x1ABA: 84, + 0x1ABB: 84, + 0x1ABC: 84, + 0x1ABD: 84, + 0x1ABE: 84, + 0x1ABF: 84, + 0x1AC0: 84, + 0x1AC1: 84, + 0x1AC2: 84, + 0x1AC3: 84, + 0x1AC4: 84, + 0x1AC5: 84, + 0x1AC6: 84, + 0x1AC7: 84, + 0x1AC8: 84, + 0x1AC9: 84, + 0x1ACA: 84, + 0x1ACB: 84, + 0x1ACC: 84, + 0x1ACD: 84, + 0x1ACE: 84, + 0x1B00: 84, + 0x1B01: 84, + 0x1B02: 84, + 0x1B03: 84, + 0x1B34: 84, + 0x1B36: 84, + 0x1B37: 84, + 0x1B38: 84, + 0x1B39: 84, + 0x1B3A: 84, + 0x1B3C: 84, + 0x1B42: 84, + 0x1B6B: 84, + 0x1B6C: 84, + 0x1B6D: 84, + 0x1B6E: 84, + 0x1B6F: 84, + 0x1B70: 84, + 0x1B71: 84, + 0x1B72: 84, + 0x1B73: 84, + 0x1B80: 84, + 0x1B81: 84, + 0x1BA2: 84, + 0x1BA3: 84, + 0x1BA4: 84, + 0x1BA5: 84, + 0x1BA8: 84, + 0x1BA9: 84, + 0x1BAB: 84, + 0x1BAC: 84, + 0x1BAD: 84, + 0x1BE6: 84, + 0x1BE8: 84, + 0x1BE9: 84, + 0x1BED: 84, + 0x1BEF: 84, + 0x1BF0: 84, + 0x1BF1: 84, + 0x1C2C: 84, + 0x1C2D: 84, + 0x1C2E: 84, + 0x1C2F: 84, + 0x1C30: 84, + 0x1C31: 84, + 0x1C32: 84, + 0x1C33: 84, + 0x1C36: 84, + 0x1C37: 84, + 0x1CD0: 84, + 0x1CD1: 84, + 0x1CD2: 84, + 0x1CD4: 84, + 0x1CD5: 84, + 0x1CD6: 84, + 0x1CD7: 84, + 0x1CD8: 84, + 0x1CD9: 84, + 0x1CDA: 84, + 0x1CDB: 84, + 0x1CDC: 84, + 0x1CDD: 84, + 0x1CDE: 84, + 0x1CDF: 84, + 0x1CE0: 84, + 0x1CE2: 84, + 0x1CE3: 84, + 0x1CE4: 84, + 0x1CE5: 84, + 0x1CE6: 84, + 0x1CE7: 84, + 0x1CE8: 84, + 0x1CED: 84, + 0x1CF4: 84, + 0x1CF8: 84, + 0x1CF9: 84, + 0x1DC0: 84, + 0x1DC1: 84, + 0x1DC2: 84, + 0x1DC3: 84, + 0x1DC4: 84, + 0x1DC5: 84, + 0x1DC6: 84, + 0x1DC7: 84, + 0x1DC8: 84, + 0x1DC9: 84, + 0x1DCA: 84, + 0x1DCB: 84, + 0x1DCC: 84, + 0x1DCD: 84, + 0x1DCE: 84, + 0x1DCF: 84, + 0x1DD0: 84, + 0x1DD1: 84, + 0x1DD2: 84, + 0x1DD3: 84, + 0x1DD4: 84, + 0x1DD5: 84, + 0x1DD6: 84, + 0x1DD7: 84, + 0x1DD8: 84, + 0x1DD9: 84, + 0x1DDA: 84, + 0x1DDB: 84, + 0x1DDC: 84, + 0x1DDD: 84, + 0x1DDE: 84, + 0x1DDF: 84, + 0x1DE0: 84, + 0x1DE1: 84, + 0x1DE2: 84, + 0x1DE3: 84, + 0x1DE4: 84, + 0x1DE5: 84, + 0x1DE6: 84, + 0x1DE7: 84, + 0x1DE8: 84, + 0x1DE9: 84, + 0x1DEA: 84, + 0x1DEB: 84, + 0x1DEC: 84, + 0x1DED: 84, + 0x1DEE: 84, + 0x1DEF: 84, + 0x1DF0: 84, + 0x1DF1: 84, + 0x1DF2: 84, + 0x1DF3: 84, + 0x1DF4: 84, + 0x1DF5: 84, + 0x1DF6: 84, + 0x1DF7: 84, + 0x1DF8: 84, + 0x1DF9: 84, + 0x1DFA: 84, + 0x1DFB: 84, + 0x1DFC: 84, + 0x1DFD: 84, + 0x1DFE: 84, + 0x1DFF: 84, + 0x200B: 84, + 0x200D: 67, + 0x200E: 84, + 0x200F: 84, + 0x202A: 84, + 0x202B: 84, + 0x202C: 84, + 0x202D: 84, + 0x202E: 84, + 0x2060: 84, + 0x2061: 84, + 0x2062: 84, + 0x2063: 84, + 0x2064: 84, + 0x206A: 84, + 0x206B: 84, + 0x206C: 84, + 0x206D: 84, + 0x206E: 84, + 0x206F: 84, + 0x20D0: 84, + 0x20D1: 84, + 0x20D2: 84, + 0x20D3: 84, + 0x20D4: 84, + 0x20D5: 84, + 0x20D6: 84, + 0x20D7: 84, + 0x20D8: 84, + 0x20D9: 84, + 0x20DA: 84, + 0x20DB: 84, + 0x20DC: 84, + 0x20DD: 84, + 0x20DE: 84, + 0x20DF: 84, + 0x20E0: 84, + 0x20E1: 84, + 0x20E2: 84, + 0x20E3: 84, + 0x20E4: 84, + 0x20E5: 84, + 0x20E6: 84, + 0x20E7: 84, + 0x20E8: 84, + 0x20E9: 84, + 0x20EA: 84, + 0x20EB: 84, + 0x20EC: 84, + 0x20ED: 84, + 0x20EE: 84, + 0x20EF: 84, + 0x20F0: 84, + 0x2CEF: 84, + 0x2CF0: 84, + 0x2CF1: 84, + 0x2D7F: 84, + 0x2DE0: 84, + 0x2DE1: 84, + 0x2DE2: 84, + 0x2DE3: 84, + 0x2DE4: 84, + 0x2DE5: 84, + 0x2DE6: 84, + 0x2DE7: 84, + 0x2DE8: 84, + 0x2DE9: 84, + 0x2DEA: 84, + 0x2DEB: 84, + 0x2DEC: 84, + 0x2DED: 84, + 0x2DEE: 84, + 0x2DEF: 84, + 0x2DF0: 84, + 0x2DF1: 84, + 0x2DF2: 84, + 0x2DF3: 84, + 0x2DF4: 84, + 0x2DF5: 84, + 0x2DF6: 84, + 0x2DF7: 84, + 0x2DF8: 84, + 0x2DF9: 84, + 0x2DFA: 84, + 0x2DFB: 84, + 0x2DFC: 84, + 0x2DFD: 84, + 0x2DFE: 84, + 0x2DFF: 84, + 0x302A: 84, + 0x302B: 84, + 0x302C: 84, + 0x302D: 84, + 0x3099: 84, + 0x309A: 84, + 0xA66F: 84, + 0xA670: 84, + 0xA671: 84, + 0xA672: 84, + 0xA674: 84, + 0xA675: 84, + 0xA676: 84, + 0xA677: 84, + 0xA678: 84, + 0xA679: 84, + 0xA67A: 84, + 0xA67B: 84, + 0xA67C: 84, + 0xA67D: 84, + 0xA69E: 84, + 0xA69F: 84, + 0xA6F0: 84, + 0xA6F1: 84, + 0xA802: 84, + 0xA806: 84, + 0xA80B: 84, + 0xA825: 84, + 0xA826: 84, + 0xA82C: 84, + 0xA840: 68, + 0xA841: 68, + 0xA842: 68, + 0xA843: 68, + 0xA844: 68, + 0xA845: 68, + 0xA846: 68, + 0xA847: 68, + 0xA848: 68, + 0xA849: 68, + 0xA84A: 68, + 0xA84B: 68, + 0xA84C: 68, + 0xA84D: 68, + 0xA84E: 68, + 0xA84F: 68, + 0xA850: 68, + 0xA851: 68, + 0xA852: 68, + 0xA853: 68, + 0xA854: 68, + 0xA855: 68, + 0xA856: 68, + 0xA857: 68, + 0xA858: 68, + 0xA859: 68, + 0xA85A: 68, + 0xA85B: 68, + 0xA85C: 68, + 0xA85D: 68, + 0xA85E: 68, + 0xA85F: 68, + 0xA860: 68, + 0xA861: 68, + 0xA862: 68, + 0xA863: 68, + 0xA864: 68, + 0xA865: 68, + 0xA866: 68, + 0xA867: 68, + 0xA868: 68, + 0xA869: 68, + 0xA86A: 68, + 0xA86B: 68, + 0xA86C: 68, + 0xA86D: 68, + 0xA86E: 68, + 0xA86F: 68, + 0xA870: 68, + 0xA871: 68, + 0xA872: 76, + 0xA8C4: 84, + 0xA8C5: 84, + 0xA8E0: 84, + 0xA8E1: 84, + 0xA8E2: 84, + 0xA8E3: 84, + 0xA8E4: 84, + 0xA8E5: 84, + 0xA8E6: 84, + 0xA8E7: 84, + 0xA8E8: 84, + 0xA8E9: 84, + 0xA8EA: 84, + 0xA8EB: 84, + 0xA8EC: 84, + 0xA8ED: 84, + 0xA8EE: 84, + 0xA8EF: 84, + 0xA8F0: 84, + 0xA8F1: 84, + 0xA8FF: 84, + 0xA926: 84, + 0xA927: 84, + 0xA928: 84, + 0xA929: 84, + 0xA92A: 84, + 0xA92B: 84, + 0xA92C: 84, + 0xA92D: 84, + 0xA947: 84, + 0xA948: 84, + 0xA949: 84, + 0xA94A: 84, + 0xA94B: 84, + 0xA94C: 84, + 0xA94D: 84, + 0xA94E: 84, + 0xA94F: 84, + 0xA950: 84, + 0xA951: 84, + 0xA980: 84, + 0xA981: 84, + 0xA982: 84, + 0xA9B3: 84, + 0xA9B6: 84, + 0xA9B7: 84, + 0xA9B8: 84, + 0xA9B9: 84, + 0xA9BC: 84, + 0xA9BD: 84, + 0xA9E5: 84, + 0xAA29: 84, + 0xAA2A: 84, + 0xAA2B: 84, + 0xAA2C: 84, + 0xAA2D: 84, + 0xAA2E: 84, + 0xAA31: 84, + 0xAA32: 84, + 0xAA35: 84, + 0xAA36: 84, + 0xAA43: 84, + 0xAA4C: 84, + 0xAA7C: 84, + 0xAAB0: 84, + 0xAAB2: 84, + 0xAAB3: 84, + 0xAAB4: 84, + 0xAAB7: 84, + 0xAAB8: 84, + 0xAABE: 84, + 0xAABF: 84, + 0xAAC1: 84, + 0xAAEC: 84, + 0xAAED: 84, + 0xAAF6: 84, + 0xABE5: 84, + 0xABE8: 84, + 0xABED: 84, + 0xFB1E: 84, + 0xFE00: 84, + 0xFE01: 84, + 0xFE02: 84, + 0xFE03: 84, + 0xFE04: 84, + 0xFE05: 84, + 0xFE06: 84, + 0xFE07: 84, + 0xFE08: 84, + 0xFE09: 84, + 0xFE0A: 84, + 0xFE0B: 84, + 0xFE0C: 84, + 0xFE0D: 84, + 0xFE0E: 84, + 0xFE0F: 84, + 0xFE20: 84, + 0xFE21: 84, + 0xFE22: 84, + 0xFE23: 84, + 0xFE24: 84, + 0xFE25: 84, + 0xFE26: 84, + 0xFE27: 84, + 0xFE28: 84, + 0xFE29: 84, + 0xFE2A: 84, + 0xFE2B: 84, + 0xFE2C: 84, + 0xFE2D: 84, + 0xFE2E: 84, + 0xFE2F: 84, + 0xFEFF: 84, + 0xFFF9: 84, + 0xFFFA: 84, + 0xFFFB: 84, + 0x101FD: 84, + 0x102E0: 84, + 0x10376: 84, + 0x10377: 84, + 0x10378: 84, + 0x10379: 84, + 0x1037A: 84, + 0x10A01: 84, + 0x10A02: 84, + 0x10A03: 84, + 0x10A05: 84, + 0x10A06: 84, + 0x10A0C: 84, + 0x10A0D: 84, + 0x10A0E: 84, + 0x10A0F: 84, + 0x10A38: 84, + 0x10A39: 84, + 0x10A3A: 84, + 0x10A3F: 84, + 0x10AC0: 68, + 0x10AC1: 68, + 0x10AC2: 68, + 0x10AC3: 68, + 0x10AC4: 68, + 0x10AC5: 82, + 0x10AC7: 82, + 0x10AC9: 82, + 0x10ACA: 82, + 0x10ACD: 76, + 0x10ACE: 82, + 0x10ACF: 82, + 0x10AD0: 82, + 0x10AD1: 82, + 0x10AD2: 82, + 0x10AD3: 68, + 0x10AD4: 68, + 0x10AD5: 68, + 0x10AD6: 68, + 0x10AD7: 76, + 0x10AD8: 68, + 0x10AD9: 68, + 0x10ADA: 68, + 0x10ADB: 68, + 0x10ADC: 68, + 0x10ADD: 82, + 0x10ADE: 68, + 0x10ADF: 68, + 0x10AE0: 68, + 0x10AE1: 82, + 0x10AE4: 82, + 0x10AE5: 84, + 0x10AE6: 84, + 0x10AEB: 68, + 0x10AEC: 68, + 0x10AED: 68, + 0x10AEE: 68, + 0x10AEF: 82, + 0x10B80: 68, + 0x10B81: 82, + 0x10B82: 68, + 0x10B83: 82, + 0x10B84: 82, + 0x10B85: 82, + 0x10B86: 68, + 0x10B87: 68, + 0x10B88: 68, + 0x10B89: 82, + 0x10B8A: 68, + 0x10B8B: 68, + 0x10B8C: 82, + 0x10B8D: 68, + 0x10B8E: 82, + 0x10B8F: 82, + 0x10B90: 68, + 0x10B91: 82, + 0x10BA9: 82, + 0x10BAA: 82, + 0x10BAB: 82, + 0x10BAC: 82, + 0x10BAD: 68, + 0x10BAE: 68, + 0x10D00: 76, + 0x10D01: 68, + 0x10D02: 68, + 0x10D03: 68, + 0x10D04: 68, + 0x10D05: 68, + 0x10D06: 68, + 0x10D07: 68, + 0x10D08: 68, + 0x10D09: 68, + 0x10D0A: 68, + 0x10D0B: 68, + 0x10D0C: 68, + 0x10D0D: 68, + 0x10D0E: 68, + 0x10D0F: 68, + 0x10D10: 68, + 0x10D11: 68, + 0x10D12: 68, + 0x10D13: 68, + 0x10D14: 68, + 0x10D15: 68, + 0x10D16: 68, + 0x10D17: 68, + 0x10D18: 68, + 0x10D19: 68, + 0x10D1A: 68, + 0x10D1B: 68, + 0x10D1C: 68, + 0x10D1D: 68, + 0x10D1E: 68, + 0x10D1F: 68, + 0x10D20: 68, + 0x10D21: 68, + 0x10D22: 82, + 0x10D23: 68, + 0x10D24: 84, + 0x10D25: 84, + 0x10D26: 84, + 0x10D27: 84, + 0x10EAB: 84, + 0x10EAC: 84, + 0x10EFD: 84, + 0x10EFE: 84, + 0x10EFF: 84, + 0x10F30: 68, + 0x10F31: 68, + 0x10F32: 68, + 0x10F33: 82, + 0x10F34: 68, + 0x10F35: 68, + 0x10F36: 68, + 0x10F37: 68, + 0x10F38: 68, + 0x10F39: 68, + 0x10F3A: 68, + 0x10F3B: 68, + 0x10F3C: 68, + 0x10F3D: 68, + 0x10F3E: 68, + 0x10F3F: 68, + 0x10F40: 68, + 0x10F41: 68, + 0x10F42: 68, + 0x10F43: 68, + 0x10F44: 68, + 0x10F46: 84, + 0x10F47: 84, + 0x10F48: 84, + 0x10F49: 84, + 0x10F4A: 84, + 0x10F4B: 84, + 0x10F4C: 84, + 0x10F4D: 84, + 0x10F4E: 84, + 0x10F4F: 84, + 0x10F50: 84, + 0x10F51: 68, + 0x10F52: 68, + 0x10F53: 68, + 0x10F54: 82, + 0x10F70: 68, + 0x10F71: 68, + 0x10F72: 68, + 0x10F73: 68, + 0x10F74: 82, + 0x10F75: 82, + 0x10F76: 68, + 0x10F77: 68, + 0x10F78: 68, + 0x10F79: 68, + 0x10F7A: 68, + 0x10F7B: 68, + 0x10F7C: 68, + 0x10F7D: 68, + 0x10F7E: 68, + 0x10F7F: 68, + 0x10F80: 68, + 0x10F81: 68, + 0x10F82: 84, + 0x10F83: 84, + 0x10F84: 84, + 0x10F85: 84, + 0x10FB0: 68, + 0x10FB2: 68, + 0x10FB3: 68, + 0x10FB4: 82, + 0x10FB5: 82, + 0x10FB6: 82, + 0x10FB8: 68, + 0x10FB9: 82, + 0x10FBA: 82, + 0x10FBB: 68, + 0x10FBC: 68, + 0x10FBD: 82, + 0x10FBE: 68, + 0x10FBF: 68, + 0x10FC1: 68, + 0x10FC2: 82, + 0x10FC3: 82, + 0x10FC4: 68, + 0x10FC9: 82, + 0x10FCA: 68, + 0x10FCB: 76, + 0x11001: 84, + 0x11038: 84, + 0x11039: 84, + 0x1103A: 84, + 0x1103B: 84, + 0x1103C: 84, + 0x1103D: 84, + 0x1103E: 84, + 0x1103F: 84, + 0x11040: 84, + 0x11041: 84, + 0x11042: 84, + 0x11043: 84, + 0x11044: 84, + 0x11045: 84, + 0x11046: 84, + 0x11070: 84, + 0x11073: 84, + 0x11074: 84, + 0x1107F: 84, + 0x11080: 84, + 0x11081: 84, + 0x110B3: 84, + 0x110B4: 84, + 0x110B5: 84, + 0x110B6: 84, + 0x110B9: 84, + 0x110BA: 84, + 0x110C2: 84, + 0x11100: 84, + 0x11101: 84, + 0x11102: 84, + 0x11127: 84, + 0x11128: 84, + 0x11129: 84, + 0x1112A: 84, + 0x1112B: 84, + 0x1112D: 84, + 0x1112E: 84, + 0x1112F: 84, + 0x11130: 84, + 0x11131: 84, + 0x11132: 84, + 0x11133: 84, + 0x11134: 84, + 0x11173: 84, + 0x11180: 84, + 0x11181: 84, + 0x111B6: 84, + 0x111B7: 84, + 0x111B8: 84, + 0x111B9: 84, + 0x111BA: 84, + 0x111BB: 84, + 0x111BC: 84, + 0x111BD: 84, + 0x111BE: 84, + 0x111C9: 84, + 0x111CA: 84, + 0x111CB: 84, + 0x111CC: 84, + 0x111CF: 84, + 0x1122F: 84, + 0x11230: 84, + 0x11231: 84, + 0x11234: 84, + 0x11236: 84, + 0x11237: 84, + 0x1123E: 84, + 0x11241: 84, + 0x112DF: 84, + 0x112E3: 84, + 0x112E4: 84, + 0x112E5: 84, + 0x112E6: 84, + 0x112E7: 84, + 0x112E8: 84, + 0x112E9: 84, + 0x112EA: 84, + 0x11300: 84, + 0x11301: 84, + 0x1133B: 84, + 0x1133C: 84, + 0x11340: 84, + 0x11366: 84, + 0x11367: 84, + 0x11368: 84, + 0x11369: 84, + 0x1136A: 84, + 0x1136B: 84, + 0x1136C: 84, + 0x11370: 84, + 0x11371: 84, + 0x11372: 84, + 0x11373: 84, + 0x11374: 84, + 0x11438: 84, + 0x11439: 84, + 0x1143A: 84, + 0x1143B: 84, + 0x1143C: 84, + 0x1143D: 84, + 0x1143E: 84, + 0x1143F: 84, + 0x11442: 84, + 0x11443: 84, + 0x11444: 84, + 0x11446: 84, + 0x1145E: 84, + 0x114B3: 84, + 0x114B4: 84, + 0x114B5: 84, + 0x114B6: 84, + 0x114B7: 84, + 0x114B8: 84, + 0x114BA: 84, + 0x114BF: 84, + 0x114C0: 84, + 0x114C2: 84, + 0x114C3: 84, + 0x115B2: 84, + 0x115B3: 84, + 0x115B4: 84, + 0x115B5: 84, + 0x115BC: 84, + 0x115BD: 84, + 0x115BF: 84, + 0x115C0: 84, + 0x115DC: 84, + 0x115DD: 84, + 0x11633: 84, + 0x11634: 84, + 0x11635: 84, + 0x11636: 84, + 0x11637: 84, + 0x11638: 84, + 0x11639: 84, + 0x1163A: 84, + 0x1163D: 84, + 0x1163F: 84, + 0x11640: 84, + 0x116AB: 84, + 0x116AD: 84, + 0x116B0: 84, + 0x116B1: 84, + 0x116B2: 84, + 0x116B3: 84, + 0x116B4: 84, + 0x116B5: 84, + 0x116B7: 84, + 0x1171D: 84, + 0x1171E: 84, + 0x1171F: 84, + 0x11722: 84, + 0x11723: 84, + 0x11724: 84, + 0x11725: 84, + 0x11727: 84, + 0x11728: 84, + 0x11729: 84, + 0x1172A: 84, + 0x1172B: 84, + 0x1182F: 84, + 0x11830: 84, + 0x11831: 84, + 0x11832: 84, + 0x11833: 84, + 0x11834: 84, + 0x11835: 84, + 0x11836: 84, + 0x11837: 84, + 0x11839: 84, + 0x1183A: 84, + 0x1193B: 84, + 0x1193C: 84, + 0x1193E: 84, + 0x11943: 84, + 0x119D4: 84, + 0x119D5: 84, + 0x119D6: 84, + 0x119D7: 84, + 0x119DA: 84, + 0x119DB: 84, + 0x119E0: 84, + 0x11A01: 84, + 0x11A02: 84, + 0x11A03: 84, + 0x11A04: 84, + 0x11A05: 84, + 0x11A06: 84, + 0x11A07: 84, + 0x11A08: 84, + 0x11A09: 84, + 0x11A0A: 84, + 0x11A33: 84, + 0x11A34: 84, + 0x11A35: 84, + 0x11A36: 84, + 0x11A37: 84, + 0x11A38: 84, + 0x11A3B: 84, + 0x11A3C: 84, + 0x11A3D: 84, + 0x11A3E: 84, + 0x11A47: 84, + 0x11A51: 84, + 0x11A52: 84, + 0x11A53: 84, + 0x11A54: 84, + 0x11A55: 84, + 0x11A56: 84, + 0x11A59: 84, + 0x11A5A: 84, + 0x11A5B: 84, + 0x11A8A: 84, + 0x11A8B: 84, + 0x11A8C: 84, + 0x11A8D: 84, + 0x11A8E: 84, + 0x11A8F: 84, + 0x11A90: 84, + 0x11A91: 84, + 0x11A92: 84, + 0x11A93: 84, + 0x11A94: 84, + 0x11A95: 84, + 0x11A96: 84, + 0x11A98: 84, + 0x11A99: 84, + 0x11C30: 84, + 0x11C31: 84, + 0x11C32: 84, + 0x11C33: 84, + 0x11C34: 84, + 0x11C35: 84, + 0x11C36: 84, + 0x11C38: 84, + 0x11C39: 84, + 0x11C3A: 84, + 0x11C3B: 84, + 0x11C3C: 84, + 0x11C3D: 84, + 0x11C3F: 84, + 0x11C92: 84, + 0x11C93: 84, + 0x11C94: 84, + 0x11C95: 84, + 0x11C96: 84, + 0x11C97: 84, + 0x11C98: 84, + 0x11C99: 84, + 0x11C9A: 84, + 0x11C9B: 84, + 0x11C9C: 84, + 0x11C9D: 84, + 0x11C9E: 84, + 0x11C9F: 84, + 0x11CA0: 84, + 0x11CA1: 84, + 0x11CA2: 84, + 0x11CA3: 84, + 0x11CA4: 84, + 0x11CA5: 84, + 0x11CA6: 84, + 0x11CA7: 84, + 0x11CAA: 84, + 0x11CAB: 84, + 0x11CAC: 84, + 0x11CAD: 84, + 0x11CAE: 84, + 0x11CAF: 84, + 0x11CB0: 84, + 0x11CB2: 84, + 0x11CB3: 84, + 0x11CB5: 84, + 0x11CB6: 84, + 0x11D31: 84, + 0x11D32: 84, + 0x11D33: 84, + 0x11D34: 84, + 0x11D35: 84, + 0x11D36: 84, + 0x11D3A: 84, + 0x11D3C: 84, + 0x11D3D: 84, + 0x11D3F: 84, + 0x11D40: 84, + 0x11D41: 84, + 0x11D42: 84, + 0x11D43: 84, + 0x11D44: 84, + 0x11D45: 84, + 0x11D47: 84, + 0x11D90: 84, + 0x11D91: 84, + 0x11D95: 84, + 0x11D97: 84, + 0x11EF3: 84, + 0x11EF4: 84, + 0x11F00: 84, + 0x11F01: 84, + 0x11F36: 84, + 0x11F37: 84, + 0x11F38: 84, + 0x11F39: 84, + 0x11F3A: 84, + 0x11F40: 84, + 0x11F42: 84, + 0x13430: 84, + 0x13431: 84, + 0x13432: 84, + 0x13433: 84, + 0x13434: 84, + 0x13435: 84, + 0x13436: 84, + 0x13437: 84, + 0x13438: 84, + 0x13439: 84, + 0x1343A: 84, + 0x1343B: 84, + 0x1343C: 84, + 0x1343D: 84, + 0x1343E: 84, + 0x1343F: 84, + 0x13440: 84, + 0x13447: 84, + 0x13448: 84, + 0x13449: 84, + 0x1344A: 84, + 0x1344B: 84, + 0x1344C: 84, + 0x1344D: 84, + 0x1344E: 84, + 0x1344F: 84, + 0x13450: 84, + 0x13451: 84, + 0x13452: 84, + 0x13453: 84, + 0x13454: 84, + 0x13455: 84, + 0x16AF0: 84, + 0x16AF1: 84, + 0x16AF2: 84, + 0x16AF3: 84, + 0x16AF4: 84, + 0x16B30: 84, + 0x16B31: 84, + 0x16B32: 84, + 0x16B33: 84, + 0x16B34: 84, + 0x16B35: 84, + 0x16B36: 84, + 0x16F4F: 84, + 0x16F8F: 84, + 0x16F90: 84, + 0x16F91: 84, + 0x16F92: 84, + 0x16FE4: 84, + 0x1BC9D: 84, + 0x1BC9E: 84, + 0x1BCA0: 84, + 0x1BCA1: 84, + 0x1BCA2: 84, + 0x1BCA3: 84, + 0x1CF00: 84, + 0x1CF01: 84, + 0x1CF02: 84, + 0x1CF03: 84, + 0x1CF04: 84, + 0x1CF05: 84, + 0x1CF06: 84, + 0x1CF07: 84, + 0x1CF08: 84, + 0x1CF09: 84, + 0x1CF0A: 84, + 0x1CF0B: 84, + 0x1CF0C: 84, + 0x1CF0D: 84, + 0x1CF0E: 84, + 0x1CF0F: 84, + 0x1CF10: 84, + 0x1CF11: 84, + 0x1CF12: 84, + 0x1CF13: 84, + 0x1CF14: 84, + 0x1CF15: 84, + 0x1CF16: 84, + 0x1CF17: 84, + 0x1CF18: 84, + 0x1CF19: 84, + 0x1CF1A: 84, + 0x1CF1B: 84, + 0x1CF1C: 84, + 0x1CF1D: 84, + 0x1CF1E: 84, + 0x1CF1F: 84, + 0x1CF20: 84, + 0x1CF21: 84, + 0x1CF22: 84, + 0x1CF23: 84, + 0x1CF24: 84, + 0x1CF25: 84, + 0x1CF26: 84, + 0x1CF27: 84, + 0x1CF28: 84, + 0x1CF29: 84, + 0x1CF2A: 84, + 0x1CF2B: 84, + 0x1CF2C: 84, + 0x1CF2D: 84, + 0x1CF30: 84, + 0x1CF31: 84, + 0x1CF32: 84, + 0x1CF33: 84, + 0x1CF34: 84, + 0x1CF35: 84, + 0x1CF36: 84, + 0x1CF37: 84, + 0x1CF38: 84, + 0x1CF39: 84, + 0x1CF3A: 84, + 0x1CF3B: 84, + 0x1CF3C: 84, + 0x1CF3D: 84, + 0x1CF3E: 84, + 0x1CF3F: 84, + 0x1CF40: 84, + 0x1CF41: 84, + 0x1CF42: 84, + 0x1CF43: 84, + 0x1CF44: 84, + 0x1CF45: 84, + 0x1CF46: 84, + 0x1D167: 84, + 0x1D168: 84, + 0x1D169: 84, + 0x1D173: 84, + 0x1D174: 84, + 0x1D175: 84, + 0x1D176: 84, + 0x1D177: 84, + 0x1D178: 84, + 0x1D179: 84, + 0x1D17A: 84, + 0x1D17B: 84, + 0x1D17C: 84, + 0x1D17D: 84, + 0x1D17E: 84, + 0x1D17F: 84, + 0x1D180: 84, + 0x1D181: 84, + 0x1D182: 84, + 0x1D185: 84, + 0x1D186: 84, + 0x1D187: 84, + 0x1D188: 84, + 0x1D189: 84, + 0x1D18A: 84, + 0x1D18B: 84, + 0x1D1AA: 84, + 0x1D1AB: 84, + 0x1D1AC: 84, + 0x1D1AD: 84, + 0x1D242: 84, + 0x1D243: 84, + 0x1D244: 84, + 0x1DA00: 84, + 0x1DA01: 84, + 0x1DA02: 84, + 0x1DA03: 84, + 0x1DA04: 84, + 0x1DA05: 84, + 0x1DA06: 84, + 0x1DA07: 84, + 0x1DA08: 84, + 0x1DA09: 84, + 0x1DA0A: 84, + 0x1DA0B: 84, + 0x1DA0C: 84, + 0x1DA0D: 84, + 0x1DA0E: 84, + 0x1DA0F: 84, + 0x1DA10: 84, + 0x1DA11: 84, + 0x1DA12: 84, + 0x1DA13: 84, + 0x1DA14: 84, + 0x1DA15: 84, + 0x1DA16: 84, + 0x1DA17: 84, + 0x1DA18: 84, + 0x1DA19: 84, + 0x1DA1A: 84, + 0x1DA1B: 84, + 0x1DA1C: 84, + 0x1DA1D: 84, + 0x1DA1E: 84, + 0x1DA1F: 84, + 0x1DA20: 84, + 0x1DA21: 84, + 0x1DA22: 84, + 0x1DA23: 84, + 0x1DA24: 84, + 0x1DA25: 84, + 0x1DA26: 84, + 0x1DA27: 84, + 0x1DA28: 84, + 0x1DA29: 84, + 0x1DA2A: 84, + 0x1DA2B: 84, + 0x1DA2C: 84, + 0x1DA2D: 84, + 0x1DA2E: 84, + 0x1DA2F: 84, + 0x1DA30: 84, + 0x1DA31: 84, + 0x1DA32: 84, + 0x1DA33: 84, + 0x1DA34: 84, + 0x1DA35: 84, + 0x1DA36: 84, + 0x1DA3B: 84, + 0x1DA3C: 84, + 0x1DA3D: 84, + 0x1DA3E: 84, + 0x1DA3F: 84, + 0x1DA40: 84, + 0x1DA41: 84, + 0x1DA42: 84, + 0x1DA43: 84, + 0x1DA44: 84, + 0x1DA45: 84, + 0x1DA46: 84, + 0x1DA47: 84, + 0x1DA48: 84, + 0x1DA49: 84, + 0x1DA4A: 84, + 0x1DA4B: 84, + 0x1DA4C: 84, + 0x1DA4D: 84, + 0x1DA4E: 84, + 0x1DA4F: 84, + 0x1DA50: 84, + 0x1DA51: 84, + 0x1DA52: 84, + 0x1DA53: 84, + 0x1DA54: 84, + 0x1DA55: 84, + 0x1DA56: 84, + 0x1DA57: 84, + 0x1DA58: 84, + 0x1DA59: 84, + 0x1DA5A: 84, + 0x1DA5B: 84, + 0x1DA5C: 84, + 0x1DA5D: 84, + 0x1DA5E: 84, + 0x1DA5F: 84, + 0x1DA60: 84, + 0x1DA61: 84, + 0x1DA62: 84, + 0x1DA63: 84, + 0x1DA64: 84, + 0x1DA65: 84, + 0x1DA66: 84, + 0x1DA67: 84, + 0x1DA68: 84, + 0x1DA69: 84, + 0x1DA6A: 84, + 0x1DA6B: 84, + 0x1DA6C: 84, + 0x1DA75: 84, + 0x1DA84: 84, + 0x1DA9B: 84, + 0x1DA9C: 84, + 0x1DA9D: 84, + 0x1DA9E: 84, + 0x1DA9F: 84, + 0x1DAA1: 84, + 0x1DAA2: 84, + 0x1DAA3: 84, + 0x1DAA4: 84, + 0x1DAA5: 84, + 0x1DAA6: 84, + 0x1DAA7: 84, + 0x1DAA8: 84, + 0x1DAA9: 84, + 0x1DAAA: 84, + 0x1DAAB: 84, + 0x1DAAC: 84, + 0x1DAAD: 84, + 0x1DAAE: 84, + 0x1DAAF: 84, + 0x1E000: 84, + 0x1E001: 84, + 0x1E002: 84, + 0x1E003: 84, + 0x1E004: 84, + 0x1E005: 84, + 0x1E006: 84, + 0x1E008: 84, + 0x1E009: 84, + 0x1E00A: 84, + 0x1E00B: 84, + 0x1E00C: 84, + 0x1E00D: 84, + 0x1E00E: 84, + 0x1E00F: 84, + 0x1E010: 84, + 0x1E011: 84, + 0x1E012: 84, + 0x1E013: 84, + 0x1E014: 84, + 0x1E015: 84, + 0x1E016: 84, + 0x1E017: 84, + 0x1E018: 84, + 0x1E01B: 84, + 0x1E01C: 84, + 0x1E01D: 84, + 0x1E01E: 84, + 0x1E01F: 84, + 0x1E020: 84, + 0x1E021: 84, + 0x1E023: 84, + 0x1E024: 84, + 0x1E026: 84, + 0x1E027: 84, + 0x1E028: 84, + 0x1E029: 84, + 0x1E02A: 84, + 0x1E08F: 84, + 0x1E130: 84, + 0x1E131: 84, + 0x1E132: 84, + 0x1E133: 84, + 0x1E134: 84, + 0x1E135: 84, + 0x1E136: 84, + 0x1E2AE: 84, + 0x1E2EC: 84, + 0x1E2ED: 84, + 0x1E2EE: 84, + 0x1E2EF: 84, + 0x1E4EC: 84, + 0x1E4ED: 84, + 0x1E4EE: 84, + 0x1E4EF: 84, + 0x1E8D0: 84, + 0x1E8D1: 84, + 0x1E8D2: 84, + 0x1E8D3: 84, + 0x1E8D4: 84, + 0x1E8D5: 84, + 0x1E8D6: 84, + 0x1E900: 68, + 0x1E901: 68, + 0x1E902: 68, + 0x1E903: 68, + 0x1E904: 68, + 0x1E905: 68, + 0x1E906: 68, + 0x1E907: 68, + 0x1E908: 68, + 0x1E909: 68, + 0x1E90A: 68, + 0x1E90B: 68, + 0x1E90C: 68, + 0x1E90D: 68, + 0x1E90E: 68, + 0x1E90F: 68, + 0x1E910: 68, + 0x1E911: 68, + 0x1E912: 68, + 0x1E913: 68, + 0x1E914: 68, + 0x1E915: 68, + 0x1E916: 68, + 0x1E917: 68, + 0x1E918: 68, + 0x1E919: 68, + 0x1E91A: 68, + 0x1E91B: 68, + 0x1E91C: 68, + 0x1E91D: 68, + 0x1E91E: 68, + 0x1E91F: 68, + 0x1E920: 68, + 0x1E921: 68, + 0x1E922: 68, + 0x1E923: 68, + 0x1E924: 68, + 0x1E925: 68, + 0x1E926: 68, + 0x1E927: 68, + 0x1E928: 68, + 0x1E929: 68, + 0x1E92A: 68, + 0x1E92B: 68, + 0x1E92C: 68, + 0x1E92D: 68, + 0x1E92E: 68, + 0x1E92F: 68, + 0x1E930: 68, + 0x1E931: 68, + 0x1E932: 68, + 0x1E933: 68, + 0x1E934: 68, + 0x1E935: 68, + 0x1E936: 68, + 0x1E937: 68, + 0x1E938: 68, + 0x1E939: 68, + 0x1E93A: 68, + 0x1E93B: 68, + 0x1E93C: 68, + 0x1E93D: 68, + 0x1E93E: 68, + 0x1E93F: 68, + 0x1E940: 68, + 0x1E941: 68, + 0x1E942: 68, + 0x1E943: 68, + 0x1E944: 84, + 0x1E945: 84, + 0x1E946: 84, + 0x1E947: 84, + 0x1E948: 84, + 0x1E949: 84, + 0x1E94A: 84, + 0x1E94B: 84, + 0xE0001: 84, + 0xE0020: 84, + 0xE0021: 84, + 0xE0022: 84, + 0xE0023: 84, + 0xE0024: 84, + 0xE0025: 84, + 0xE0026: 84, + 0xE0027: 84, + 0xE0028: 84, + 0xE0029: 84, + 0xE002A: 84, + 0xE002B: 84, + 0xE002C: 84, + 0xE002D: 84, + 0xE002E: 84, + 0xE002F: 84, + 0xE0030: 84, + 0xE0031: 84, + 0xE0032: 84, + 0xE0033: 84, + 0xE0034: 84, + 0xE0035: 84, + 0xE0036: 84, + 0xE0037: 84, + 0xE0038: 84, + 0xE0039: 84, + 0xE003A: 84, + 0xE003B: 84, + 0xE003C: 84, + 0xE003D: 84, + 0xE003E: 84, + 0xE003F: 84, + 0xE0040: 84, + 0xE0041: 84, + 0xE0042: 84, + 0xE0043: 84, + 0xE0044: 84, + 0xE0045: 84, + 0xE0046: 84, + 0xE0047: 84, + 0xE0048: 84, + 0xE0049: 84, + 0xE004A: 84, + 0xE004B: 84, + 0xE004C: 84, + 0xE004D: 84, + 0xE004E: 84, + 0xE004F: 84, + 0xE0050: 84, + 0xE0051: 84, + 0xE0052: 84, + 0xE0053: 84, + 0xE0054: 84, + 0xE0055: 84, + 0xE0056: 84, + 0xE0057: 84, + 0xE0058: 84, + 0xE0059: 84, + 0xE005A: 84, + 0xE005B: 84, + 0xE005C: 84, + 0xE005D: 84, + 0xE005E: 84, + 0xE005F: 84, + 0xE0060: 84, + 0xE0061: 84, + 0xE0062: 84, + 0xE0063: 84, + 0xE0064: 84, + 0xE0065: 84, + 0xE0066: 84, + 0xE0067: 84, + 0xE0068: 84, + 0xE0069: 84, + 0xE006A: 84, + 0xE006B: 84, + 0xE006C: 84, + 0xE006D: 84, + 0xE006E: 84, + 0xE006F: 84, + 0xE0070: 84, + 0xE0071: 84, + 0xE0072: 84, + 0xE0073: 84, + 0xE0074: 84, + 0xE0075: 84, + 0xE0076: 84, + 0xE0077: 84, + 0xE0078: 84, + 0xE0079: 84, + 0xE007A: 84, + 0xE007B: 84, + 0xE007C: 84, + 0xE007D: 84, + 0xE007E: 84, + 0xE007F: 84, + 0xE0100: 84, + 0xE0101: 84, + 0xE0102: 84, + 0xE0103: 84, + 0xE0104: 84, + 0xE0105: 84, + 0xE0106: 84, + 0xE0107: 84, + 0xE0108: 84, + 0xE0109: 84, + 0xE010A: 84, + 0xE010B: 84, + 0xE010C: 84, + 0xE010D: 84, + 0xE010E: 84, + 0xE010F: 84, + 0xE0110: 84, + 0xE0111: 84, + 0xE0112: 84, + 0xE0113: 84, + 0xE0114: 84, + 0xE0115: 84, + 0xE0116: 84, + 0xE0117: 84, + 0xE0118: 84, + 0xE0119: 84, + 0xE011A: 84, + 0xE011B: 84, + 0xE011C: 84, + 0xE011D: 84, + 0xE011E: 84, + 0xE011F: 84, + 0xE0120: 84, + 0xE0121: 84, + 0xE0122: 84, + 0xE0123: 84, + 0xE0124: 84, + 0xE0125: 84, + 0xE0126: 84, + 0xE0127: 84, + 0xE0128: 84, + 0xE0129: 84, + 0xE012A: 84, + 0xE012B: 84, + 0xE012C: 84, + 0xE012D: 84, + 0xE012E: 84, + 0xE012F: 84, + 0xE0130: 84, + 0xE0131: 84, + 0xE0132: 84, + 0xE0133: 84, + 0xE0134: 84, + 0xE0135: 84, + 0xE0136: 84, + 0xE0137: 84, + 0xE0138: 84, + 0xE0139: 84, + 0xE013A: 84, + 0xE013B: 84, + 0xE013C: 84, + 0xE013D: 84, + 0xE013E: 84, + 0xE013F: 84, + 0xE0140: 84, + 0xE0141: 84, + 0xE0142: 84, + 0xE0143: 84, + 0xE0144: 84, + 0xE0145: 84, + 0xE0146: 84, + 0xE0147: 84, + 0xE0148: 84, + 0xE0149: 84, + 0xE014A: 84, + 0xE014B: 84, + 0xE014C: 84, + 0xE014D: 84, + 0xE014E: 84, + 0xE014F: 84, + 0xE0150: 84, + 0xE0151: 84, + 0xE0152: 84, + 0xE0153: 84, + 0xE0154: 84, + 0xE0155: 84, + 0xE0156: 84, + 0xE0157: 84, + 0xE0158: 84, + 0xE0159: 84, + 0xE015A: 84, + 0xE015B: 84, + 0xE015C: 84, + 0xE015D: 84, + 0xE015E: 84, + 0xE015F: 84, + 0xE0160: 84, + 0xE0161: 84, + 0xE0162: 84, + 0xE0163: 84, + 0xE0164: 84, + 0xE0165: 84, + 0xE0166: 84, + 0xE0167: 84, + 0xE0168: 84, + 0xE0169: 84, + 0xE016A: 84, + 0xE016B: 84, + 0xE016C: 84, + 0xE016D: 84, + 0xE016E: 84, + 0xE016F: 84, + 0xE0170: 84, + 0xE0171: 84, + 0xE0172: 84, + 0xE0173: 84, + 0xE0174: 84, + 0xE0175: 84, + 0xE0176: 84, + 0xE0177: 84, + 0xE0178: 84, + 0xE0179: 84, + 0xE017A: 84, + 0xE017B: 84, + 0xE017C: 84, + 0xE017D: 84, + 0xE017E: 84, + 0xE017F: 84, + 0xE0180: 84, + 0xE0181: 84, + 0xE0182: 84, + 0xE0183: 84, + 0xE0184: 84, + 0xE0185: 84, + 0xE0186: 84, + 0xE0187: 84, + 0xE0188: 84, + 0xE0189: 84, + 0xE018A: 84, + 0xE018B: 84, + 0xE018C: 84, + 0xE018D: 84, + 0xE018E: 84, + 0xE018F: 84, + 0xE0190: 84, + 0xE0191: 84, + 0xE0192: 84, + 0xE0193: 84, + 0xE0194: 84, + 0xE0195: 84, + 0xE0196: 84, + 0xE0197: 84, + 0xE0198: 84, + 0xE0199: 84, + 0xE019A: 84, + 0xE019B: 84, + 0xE019C: 84, + 0xE019D: 84, + 0xE019E: 84, + 0xE019F: 84, + 0xE01A0: 84, + 0xE01A1: 84, + 0xE01A2: 84, + 0xE01A3: 84, + 0xE01A4: 84, + 0xE01A5: 84, + 0xE01A6: 84, + 0xE01A7: 84, + 0xE01A8: 84, + 0xE01A9: 84, + 0xE01AA: 84, + 0xE01AB: 84, + 0xE01AC: 84, + 0xE01AD: 84, + 0xE01AE: 84, + 0xE01AF: 84, + 0xE01B0: 84, + 0xE01B1: 84, + 0xE01B2: 84, + 0xE01B3: 84, + 0xE01B4: 84, + 0xE01B5: 84, + 0xE01B6: 84, + 0xE01B7: 84, + 0xE01B8: 84, + 0xE01B9: 84, + 0xE01BA: 84, + 0xE01BB: 84, + 0xE01BC: 84, + 0xE01BD: 84, + 0xE01BE: 84, + 0xE01BF: 84, + 0xE01C0: 84, + 0xE01C1: 84, + 0xE01C2: 84, + 0xE01C3: 84, + 0xE01C4: 84, + 0xE01C5: 84, + 0xE01C6: 84, + 0xE01C7: 84, + 0xE01C8: 84, + 0xE01C9: 84, + 0xE01CA: 84, + 0xE01CB: 84, + 0xE01CC: 84, + 0xE01CD: 84, + 0xE01CE: 84, + 0xE01CF: 84, + 0xE01D0: 84, + 0xE01D1: 84, + 0xE01D2: 84, + 0xE01D3: 84, + 0xE01D4: 84, + 0xE01D5: 84, + 0xE01D6: 84, + 0xE01D7: 84, + 0xE01D8: 84, + 0xE01D9: 84, + 0xE01DA: 84, + 0xE01DB: 84, + 0xE01DC: 84, + 0xE01DD: 84, + 0xE01DE: 84, + 0xE01DF: 84, + 0xE01E0: 84, + 0xE01E1: 84, + 0xE01E2: 84, + 0xE01E3: 84, + 0xE01E4: 84, + 0xE01E5: 84, + 0xE01E6: 84, + 0xE01E7: 84, + 0xE01E8: 84, + 0xE01E9: 84, + 0xE01EA: 84, + 0xE01EB: 84, + 0xE01EC: 84, + 0xE01ED: 84, + 0xE01EE: 84, + 0xE01EF: 84, +} +codepoint_classes = { + "PVALID": ( + 0x2D0000002E, + 0x300000003A, + 0x610000007B, + 0xDF000000F7, + 0xF800000100, + 0x10100000102, + 0x10300000104, + 0x10500000106, + 0x10700000108, + 0x1090000010A, + 0x10B0000010C, + 0x10D0000010E, + 0x10F00000110, + 0x11100000112, + 0x11300000114, + 0x11500000116, + 0x11700000118, + 0x1190000011A, + 0x11B0000011C, + 0x11D0000011E, + 0x11F00000120, + 0x12100000122, + 0x12300000124, + 0x12500000126, + 0x12700000128, + 0x1290000012A, + 0x12B0000012C, + 0x12D0000012E, + 0x12F00000130, + 0x13100000132, + 0x13500000136, + 0x13700000139, + 0x13A0000013B, + 0x13C0000013D, + 0x13E0000013F, + 0x14200000143, + 0x14400000145, + 0x14600000147, + 0x14800000149, + 0x14B0000014C, + 0x14D0000014E, + 0x14F00000150, + 0x15100000152, + 0x15300000154, + 0x15500000156, + 0x15700000158, + 0x1590000015A, + 0x15B0000015C, + 0x15D0000015E, + 0x15F00000160, + 0x16100000162, + 0x16300000164, + 0x16500000166, + 0x16700000168, + 0x1690000016A, + 0x16B0000016C, + 0x16D0000016E, + 0x16F00000170, + 0x17100000172, + 0x17300000174, + 0x17500000176, + 0x17700000178, + 0x17A0000017B, + 0x17C0000017D, + 0x17E0000017F, + 0x18000000181, + 0x18300000184, + 0x18500000186, + 0x18800000189, + 0x18C0000018E, + 0x19200000193, + 0x19500000196, + 0x1990000019C, + 0x19E0000019F, + 0x1A1000001A2, + 0x1A3000001A4, + 0x1A5000001A6, + 0x1A8000001A9, + 0x1AA000001AC, + 0x1AD000001AE, + 0x1B0000001B1, + 0x1B4000001B5, + 0x1B6000001B7, + 0x1B9000001BC, + 0x1BD000001C4, + 0x1CE000001CF, + 0x1D0000001D1, + 0x1D2000001D3, + 0x1D4000001D5, + 0x1D6000001D7, + 0x1D8000001D9, + 0x1DA000001DB, + 0x1DC000001DE, + 0x1DF000001E0, + 0x1E1000001E2, + 0x1E3000001E4, + 0x1E5000001E6, + 0x1E7000001E8, + 0x1E9000001EA, + 0x1EB000001EC, + 0x1ED000001EE, + 0x1EF000001F1, + 0x1F5000001F6, + 0x1F9000001FA, + 0x1FB000001FC, + 0x1FD000001FE, + 0x1FF00000200, + 0x20100000202, + 0x20300000204, + 0x20500000206, + 0x20700000208, + 0x2090000020A, + 0x20B0000020C, + 0x20D0000020E, + 0x20F00000210, + 0x21100000212, + 0x21300000214, + 0x21500000216, + 0x21700000218, + 0x2190000021A, + 0x21B0000021C, + 0x21D0000021E, + 0x21F00000220, + 0x22100000222, + 0x22300000224, + 0x22500000226, + 0x22700000228, + 0x2290000022A, + 0x22B0000022C, + 0x22D0000022E, + 0x22F00000230, + 0x23100000232, + 0x2330000023A, + 0x23C0000023D, + 0x23F00000241, + 0x24200000243, + 0x24700000248, + 0x2490000024A, + 0x24B0000024C, + 0x24D0000024E, + 0x24F000002B0, + 0x2B9000002C2, + 0x2C6000002D2, + 0x2EC000002ED, + 0x2EE000002EF, + 0x30000000340, + 0x34200000343, + 0x3460000034F, + 0x35000000370, + 0x37100000372, + 0x37300000374, + 0x37700000378, + 0x37B0000037E, + 0x39000000391, + 0x3AC000003CF, + 0x3D7000003D8, + 0x3D9000003DA, + 0x3DB000003DC, + 0x3DD000003DE, + 0x3DF000003E0, + 0x3E1000003E2, + 0x3E3000003E4, + 0x3E5000003E6, + 0x3E7000003E8, + 0x3E9000003EA, + 0x3EB000003EC, + 0x3ED000003EE, + 0x3EF000003F0, + 0x3F3000003F4, + 0x3F8000003F9, + 0x3FB000003FD, + 0x43000000460, + 0x46100000462, + 0x46300000464, + 0x46500000466, + 0x46700000468, + 0x4690000046A, + 0x46B0000046C, + 0x46D0000046E, + 0x46F00000470, + 0x47100000472, + 0x47300000474, + 0x47500000476, + 0x47700000478, + 0x4790000047A, + 0x47B0000047C, + 0x47D0000047E, + 0x47F00000480, + 0x48100000482, + 0x48300000488, + 0x48B0000048C, + 0x48D0000048E, + 0x48F00000490, + 0x49100000492, + 0x49300000494, + 0x49500000496, + 0x49700000498, + 0x4990000049A, + 0x49B0000049C, + 0x49D0000049E, + 0x49F000004A0, + 0x4A1000004A2, + 0x4A3000004A4, + 0x4A5000004A6, + 0x4A7000004A8, + 0x4A9000004AA, + 0x4AB000004AC, + 0x4AD000004AE, + 0x4AF000004B0, + 0x4B1000004B2, + 0x4B3000004B4, + 0x4B5000004B6, + 0x4B7000004B8, + 0x4B9000004BA, + 0x4BB000004BC, + 0x4BD000004BE, + 0x4BF000004C0, + 0x4C2000004C3, + 0x4C4000004C5, + 0x4C6000004C7, + 0x4C8000004C9, + 0x4CA000004CB, + 0x4CC000004CD, + 0x4CE000004D0, + 0x4D1000004D2, + 0x4D3000004D4, + 0x4D5000004D6, + 0x4D7000004D8, + 0x4D9000004DA, + 0x4DB000004DC, + 0x4DD000004DE, + 0x4DF000004E0, + 0x4E1000004E2, + 0x4E3000004E4, + 0x4E5000004E6, + 0x4E7000004E8, + 0x4E9000004EA, + 0x4EB000004EC, + 0x4ED000004EE, + 0x4EF000004F0, + 0x4F1000004F2, + 0x4F3000004F4, + 0x4F5000004F6, + 0x4F7000004F8, + 0x4F9000004FA, + 0x4FB000004FC, + 0x4FD000004FE, + 0x4FF00000500, + 0x50100000502, + 0x50300000504, + 0x50500000506, + 0x50700000508, + 0x5090000050A, + 0x50B0000050C, + 0x50D0000050E, + 0x50F00000510, + 0x51100000512, + 0x51300000514, + 0x51500000516, + 0x51700000518, + 0x5190000051A, + 0x51B0000051C, + 0x51D0000051E, + 0x51F00000520, + 0x52100000522, + 0x52300000524, + 0x52500000526, + 0x52700000528, + 0x5290000052A, + 0x52B0000052C, + 0x52D0000052E, + 0x52F00000530, + 0x5590000055A, + 0x56000000587, + 0x58800000589, + 0x591000005BE, + 0x5BF000005C0, + 0x5C1000005C3, + 0x5C4000005C6, + 0x5C7000005C8, + 0x5D0000005EB, + 0x5EF000005F3, + 0x6100000061B, + 0x62000000640, + 0x64100000660, + 0x66E00000675, + 0x679000006D4, + 0x6D5000006DD, + 0x6DF000006E9, + 0x6EA000006F0, + 0x6FA00000700, + 0x7100000074B, + 0x74D000007B2, + 0x7C0000007F6, + 0x7FD000007FE, + 0x8000000082E, + 0x8400000085C, + 0x8600000086B, + 0x87000000888, + 0x8890000088F, + 0x898000008E2, + 0x8E300000958, + 0x96000000964, + 0x96600000970, + 0x97100000984, + 0x9850000098D, + 0x98F00000991, + 0x993000009A9, + 0x9AA000009B1, + 0x9B2000009B3, + 0x9B6000009BA, + 0x9BC000009C5, + 0x9C7000009C9, + 0x9CB000009CF, + 0x9D7000009D8, + 0x9E0000009E4, + 0x9E6000009F2, + 0x9FC000009FD, + 0x9FE000009FF, + 0xA0100000A04, + 0xA0500000A0B, + 0xA0F00000A11, + 0xA1300000A29, + 0xA2A00000A31, + 0xA3200000A33, + 0xA3500000A36, + 0xA3800000A3A, + 0xA3C00000A3D, + 0xA3E00000A43, + 0xA4700000A49, + 0xA4B00000A4E, + 0xA5100000A52, + 0xA5C00000A5D, + 0xA6600000A76, + 0xA8100000A84, + 0xA8500000A8E, + 0xA8F00000A92, + 0xA9300000AA9, + 0xAAA00000AB1, + 0xAB200000AB4, + 0xAB500000ABA, + 0xABC00000AC6, + 0xAC700000ACA, + 0xACB00000ACE, + 0xAD000000AD1, + 0xAE000000AE4, + 0xAE600000AF0, + 0xAF900000B00, + 0xB0100000B04, + 0xB0500000B0D, + 0xB0F00000B11, + 0xB1300000B29, + 0xB2A00000B31, + 0xB3200000B34, + 0xB3500000B3A, + 0xB3C00000B45, + 0xB4700000B49, + 0xB4B00000B4E, + 0xB5500000B58, + 0xB5F00000B64, + 0xB6600000B70, + 0xB7100000B72, + 0xB8200000B84, + 0xB8500000B8B, + 0xB8E00000B91, + 0xB9200000B96, + 0xB9900000B9B, + 0xB9C00000B9D, + 0xB9E00000BA0, + 0xBA300000BA5, + 0xBA800000BAB, + 0xBAE00000BBA, + 0xBBE00000BC3, + 0xBC600000BC9, + 0xBCA00000BCE, + 0xBD000000BD1, + 0xBD700000BD8, + 0xBE600000BF0, + 0xC0000000C0D, + 0xC0E00000C11, + 0xC1200000C29, + 0xC2A00000C3A, + 0xC3C00000C45, + 0xC4600000C49, + 0xC4A00000C4E, + 0xC5500000C57, + 0xC5800000C5B, + 0xC5D00000C5E, + 0xC6000000C64, + 0xC6600000C70, + 0xC8000000C84, + 0xC8500000C8D, + 0xC8E00000C91, + 0xC9200000CA9, + 0xCAA00000CB4, + 0xCB500000CBA, + 0xCBC00000CC5, + 0xCC600000CC9, + 0xCCA00000CCE, + 0xCD500000CD7, + 0xCDD00000CDF, + 0xCE000000CE4, + 0xCE600000CF0, + 0xCF100000CF4, + 0xD0000000D0D, + 0xD0E00000D11, + 0xD1200000D45, + 0xD4600000D49, + 0xD4A00000D4F, + 0xD5400000D58, + 0xD5F00000D64, + 0xD6600000D70, + 0xD7A00000D80, + 0xD8100000D84, + 0xD8500000D97, + 0xD9A00000DB2, + 0xDB300000DBC, + 0xDBD00000DBE, + 0xDC000000DC7, + 0xDCA00000DCB, + 0xDCF00000DD5, + 0xDD600000DD7, + 0xDD800000DE0, + 0xDE600000DF0, + 0xDF200000DF4, + 0xE0100000E33, + 0xE3400000E3B, + 0xE4000000E4F, + 0xE5000000E5A, + 0xE8100000E83, + 0xE8400000E85, + 0xE8600000E8B, + 0xE8C00000EA4, + 0xEA500000EA6, + 0xEA700000EB3, + 0xEB400000EBE, + 0xEC000000EC5, + 0xEC600000EC7, + 0xEC800000ECF, + 0xED000000EDA, + 0xEDE00000EE0, + 0xF0000000F01, + 0xF0B00000F0C, + 0xF1800000F1A, + 0xF2000000F2A, + 0xF3500000F36, + 0xF3700000F38, + 0xF3900000F3A, + 0xF3E00000F43, + 0xF4400000F48, + 0xF4900000F4D, + 0xF4E00000F52, + 0xF5300000F57, + 0xF5800000F5C, + 0xF5D00000F69, + 0xF6A00000F6D, + 0xF7100000F73, + 0xF7400000F75, + 0xF7A00000F81, + 0xF8200000F85, + 0xF8600000F93, + 0xF9400000F98, + 0xF9900000F9D, + 0xF9E00000FA2, + 0xFA300000FA7, + 0xFA800000FAC, + 0xFAD00000FB9, + 0xFBA00000FBD, + 0xFC600000FC7, + 0x10000000104A, + 0x10500000109E, + 0x10D0000010FB, + 0x10FD00001100, + 0x120000001249, + 0x124A0000124E, + 0x125000001257, + 0x125800001259, + 0x125A0000125E, + 0x126000001289, + 0x128A0000128E, + 0x1290000012B1, + 0x12B2000012B6, + 0x12B8000012BF, + 0x12C0000012C1, + 0x12C2000012C6, + 0x12C8000012D7, + 0x12D800001311, + 0x131200001316, + 0x13180000135B, + 0x135D00001360, + 0x138000001390, + 0x13A0000013F6, + 0x14010000166D, + 0x166F00001680, + 0x16810000169B, + 0x16A0000016EB, + 0x16F1000016F9, + 0x170000001716, + 0x171F00001735, + 0x174000001754, + 0x17600000176D, + 0x176E00001771, + 0x177200001774, + 0x1780000017B4, + 0x17B6000017D4, + 0x17D7000017D8, + 0x17DC000017DE, + 0x17E0000017EA, + 0x18100000181A, + 0x182000001879, + 0x1880000018AB, + 0x18B0000018F6, + 0x19000000191F, + 0x19200000192C, + 0x19300000193C, + 0x19460000196E, + 0x197000001975, + 0x1980000019AC, + 0x19B0000019CA, + 0x19D0000019DA, + 0x1A0000001A1C, + 0x1A2000001A5F, + 0x1A6000001A7D, + 0x1A7F00001A8A, + 0x1A9000001A9A, + 0x1AA700001AA8, + 0x1AB000001ABE, + 0x1ABF00001ACF, + 0x1B0000001B4D, + 0x1B5000001B5A, + 0x1B6B00001B74, + 0x1B8000001BF4, + 0x1C0000001C38, + 0x1C4000001C4A, + 0x1C4D00001C7E, + 0x1CD000001CD3, + 0x1CD400001CFB, + 0x1D0000001D2C, + 0x1D2F00001D30, + 0x1D3B00001D3C, + 0x1D4E00001D4F, + 0x1D6B00001D78, + 0x1D7900001D9B, + 0x1DC000001E00, + 0x1E0100001E02, + 0x1E0300001E04, + 0x1E0500001E06, + 0x1E0700001E08, + 0x1E0900001E0A, + 0x1E0B00001E0C, + 0x1E0D00001E0E, + 0x1E0F00001E10, + 0x1E1100001E12, + 0x1E1300001E14, + 0x1E1500001E16, + 0x1E1700001E18, + 0x1E1900001E1A, + 0x1E1B00001E1C, + 0x1E1D00001E1E, + 0x1E1F00001E20, + 0x1E2100001E22, + 0x1E2300001E24, + 0x1E2500001E26, + 0x1E2700001E28, + 0x1E2900001E2A, + 0x1E2B00001E2C, + 0x1E2D00001E2E, + 0x1E2F00001E30, + 0x1E3100001E32, + 0x1E3300001E34, + 0x1E3500001E36, + 0x1E3700001E38, + 0x1E3900001E3A, + 0x1E3B00001E3C, + 0x1E3D00001E3E, + 0x1E3F00001E40, + 0x1E4100001E42, + 0x1E4300001E44, + 0x1E4500001E46, + 0x1E4700001E48, + 0x1E4900001E4A, + 0x1E4B00001E4C, + 0x1E4D00001E4E, + 0x1E4F00001E50, + 0x1E5100001E52, + 0x1E5300001E54, + 0x1E5500001E56, + 0x1E5700001E58, + 0x1E5900001E5A, + 0x1E5B00001E5C, + 0x1E5D00001E5E, + 0x1E5F00001E60, + 0x1E6100001E62, + 0x1E6300001E64, + 0x1E6500001E66, + 0x1E6700001E68, + 0x1E6900001E6A, + 0x1E6B00001E6C, + 0x1E6D00001E6E, + 0x1E6F00001E70, + 0x1E7100001E72, + 0x1E7300001E74, + 0x1E7500001E76, + 0x1E7700001E78, + 0x1E7900001E7A, + 0x1E7B00001E7C, + 0x1E7D00001E7E, + 0x1E7F00001E80, + 0x1E8100001E82, + 0x1E8300001E84, + 0x1E8500001E86, + 0x1E8700001E88, + 0x1E8900001E8A, + 0x1E8B00001E8C, + 0x1E8D00001E8E, + 0x1E8F00001E90, + 0x1E9100001E92, + 0x1E9300001E94, + 0x1E9500001E9A, + 0x1E9C00001E9E, + 0x1E9F00001EA0, + 0x1EA100001EA2, + 0x1EA300001EA4, + 0x1EA500001EA6, + 0x1EA700001EA8, + 0x1EA900001EAA, + 0x1EAB00001EAC, + 0x1EAD00001EAE, + 0x1EAF00001EB0, + 0x1EB100001EB2, + 0x1EB300001EB4, + 0x1EB500001EB6, + 0x1EB700001EB8, + 0x1EB900001EBA, + 0x1EBB00001EBC, + 0x1EBD00001EBE, + 0x1EBF00001EC0, + 0x1EC100001EC2, + 0x1EC300001EC4, + 0x1EC500001EC6, + 0x1EC700001EC8, + 0x1EC900001ECA, + 0x1ECB00001ECC, + 0x1ECD00001ECE, + 0x1ECF00001ED0, + 0x1ED100001ED2, + 0x1ED300001ED4, + 0x1ED500001ED6, + 0x1ED700001ED8, + 0x1ED900001EDA, + 0x1EDB00001EDC, + 0x1EDD00001EDE, + 0x1EDF00001EE0, + 0x1EE100001EE2, + 0x1EE300001EE4, + 0x1EE500001EE6, + 0x1EE700001EE8, + 0x1EE900001EEA, + 0x1EEB00001EEC, + 0x1EED00001EEE, + 0x1EEF00001EF0, + 0x1EF100001EF2, + 0x1EF300001EF4, + 0x1EF500001EF6, + 0x1EF700001EF8, + 0x1EF900001EFA, + 0x1EFB00001EFC, + 0x1EFD00001EFE, + 0x1EFF00001F08, + 0x1F1000001F16, + 0x1F2000001F28, + 0x1F3000001F38, + 0x1F4000001F46, + 0x1F5000001F58, + 0x1F6000001F68, + 0x1F7000001F71, + 0x1F7200001F73, + 0x1F7400001F75, + 0x1F7600001F77, + 0x1F7800001F79, + 0x1F7A00001F7B, + 0x1F7C00001F7D, + 0x1FB000001FB2, + 0x1FB600001FB7, + 0x1FC600001FC7, + 0x1FD000001FD3, + 0x1FD600001FD8, + 0x1FE000001FE3, + 0x1FE400001FE8, + 0x1FF600001FF7, + 0x214E0000214F, + 0x218400002185, + 0x2C3000002C60, + 0x2C6100002C62, + 0x2C6500002C67, + 0x2C6800002C69, + 0x2C6A00002C6B, + 0x2C6C00002C6D, + 0x2C7100002C72, + 0x2C7300002C75, + 0x2C7600002C7C, + 0x2C8100002C82, + 0x2C8300002C84, + 0x2C8500002C86, + 0x2C8700002C88, + 0x2C8900002C8A, + 0x2C8B00002C8C, + 0x2C8D00002C8E, + 0x2C8F00002C90, + 0x2C9100002C92, + 0x2C9300002C94, + 0x2C9500002C96, + 0x2C9700002C98, + 0x2C9900002C9A, + 0x2C9B00002C9C, + 0x2C9D00002C9E, + 0x2C9F00002CA0, + 0x2CA100002CA2, + 0x2CA300002CA4, + 0x2CA500002CA6, + 0x2CA700002CA8, + 0x2CA900002CAA, + 0x2CAB00002CAC, + 0x2CAD00002CAE, + 0x2CAF00002CB0, + 0x2CB100002CB2, + 0x2CB300002CB4, + 0x2CB500002CB6, + 0x2CB700002CB8, + 0x2CB900002CBA, + 0x2CBB00002CBC, + 0x2CBD00002CBE, + 0x2CBF00002CC0, + 0x2CC100002CC2, + 0x2CC300002CC4, + 0x2CC500002CC6, + 0x2CC700002CC8, + 0x2CC900002CCA, + 0x2CCB00002CCC, + 0x2CCD00002CCE, + 0x2CCF00002CD0, + 0x2CD100002CD2, + 0x2CD300002CD4, + 0x2CD500002CD6, + 0x2CD700002CD8, + 0x2CD900002CDA, + 0x2CDB00002CDC, + 0x2CDD00002CDE, + 0x2CDF00002CE0, + 0x2CE100002CE2, + 0x2CE300002CE5, + 0x2CEC00002CED, + 0x2CEE00002CF2, + 0x2CF300002CF4, + 0x2D0000002D26, + 0x2D2700002D28, + 0x2D2D00002D2E, + 0x2D3000002D68, + 0x2D7F00002D97, + 0x2DA000002DA7, + 0x2DA800002DAF, + 0x2DB000002DB7, + 0x2DB800002DBF, + 0x2DC000002DC7, + 0x2DC800002DCF, + 0x2DD000002DD7, + 0x2DD800002DDF, + 0x2DE000002E00, + 0x2E2F00002E30, + 0x300500003008, + 0x302A0000302E, + 0x303C0000303D, + 0x304100003097, + 0x30990000309B, + 0x309D0000309F, + 0x30A1000030FB, + 0x30FC000030FF, + 0x310500003130, + 0x31A0000031C0, + 0x31F000003200, + 0x340000004DC0, + 0x4E000000A48D, + 0xA4D00000A4FE, + 0xA5000000A60D, + 0xA6100000A62C, + 0xA6410000A642, + 0xA6430000A644, + 0xA6450000A646, + 0xA6470000A648, + 0xA6490000A64A, + 0xA64B0000A64C, + 0xA64D0000A64E, + 0xA64F0000A650, + 0xA6510000A652, + 0xA6530000A654, + 0xA6550000A656, + 0xA6570000A658, + 0xA6590000A65A, + 0xA65B0000A65C, + 0xA65D0000A65E, + 0xA65F0000A660, + 0xA6610000A662, + 0xA6630000A664, + 0xA6650000A666, + 0xA6670000A668, + 0xA6690000A66A, + 0xA66B0000A66C, + 0xA66D0000A670, + 0xA6740000A67E, + 0xA67F0000A680, + 0xA6810000A682, + 0xA6830000A684, + 0xA6850000A686, + 0xA6870000A688, + 0xA6890000A68A, + 0xA68B0000A68C, + 0xA68D0000A68E, + 0xA68F0000A690, + 0xA6910000A692, + 0xA6930000A694, + 0xA6950000A696, + 0xA6970000A698, + 0xA6990000A69A, + 0xA69B0000A69C, + 0xA69E0000A6E6, + 0xA6F00000A6F2, + 0xA7170000A720, + 0xA7230000A724, + 0xA7250000A726, + 0xA7270000A728, + 0xA7290000A72A, + 0xA72B0000A72C, + 0xA72D0000A72E, + 0xA72F0000A732, + 0xA7330000A734, + 0xA7350000A736, + 0xA7370000A738, + 0xA7390000A73A, + 0xA73B0000A73C, + 0xA73D0000A73E, + 0xA73F0000A740, + 0xA7410000A742, + 0xA7430000A744, + 0xA7450000A746, + 0xA7470000A748, + 0xA7490000A74A, + 0xA74B0000A74C, + 0xA74D0000A74E, + 0xA74F0000A750, + 0xA7510000A752, + 0xA7530000A754, + 0xA7550000A756, + 0xA7570000A758, + 0xA7590000A75A, + 0xA75B0000A75C, + 0xA75D0000A75E, + 0xA75F0000A760, + 0xA7610000A762, + 0xA7630000A764, + 0xA7650000A766, + 0xA7670000A768, + 0xA7690000A76A, + 0xA76B0000A76C, + 0xA76D0000A76E, + 0xA76F0000A770, + 0xA7710000A779, + 0xA77A0000A77B, + 0xA77C0000A77D, + 0xA77F0000A780, + 0xA7810000A782, + 0xA7830000A784, + 0xA7850000A786, + 0xA7870000A789, + 0xA78C0000A78D, + 0xA78E0000A790, + 0xA7910000A792, + 0xA7930000A796, + 0xA7970000A798, + 0xA7990000A79A, + 0xA79B0000A79C, + 0xA79D0000A79E, + 0xA79F0000A7A0, + 0xA7A10000A7A2, + 0xA7A30000A7A4, + 0xA7A50000A7A6, + 0xA7A70000A7A8, + 0xA7A90000A7AA, + 0xA7AF0000A7B0, + 0xA7B50000A7B6, + 0xA7B70000A7B8, + 0xA7B90000A7BA, + 0xA7BB0000A7BC, + 0xA7BD0000A7BE, + 0xA7BF0000A7C0, + 0xA7C10000A7C2, + 0xA7C30000A7C4, + 0xA7C80000A7C9, + 0xA7CA0000A7CB, + 0xA7D10000A7D2, + 0xA7D30000A7D4, + 0xA7D50000A7D6, + 0xA7D70000A7D8, + 0xA7D90000A7DA, + 0xA7F60000A7F8, + 0xA7FA0000A828, + 0xA82C0000A82D, + 0xA8400000A874, + 0xA8800000A8C6, + 0xA8D00000A8DA, + 0xA8E00000A8F8, + 0xA8FB0000A8FC, + 0xA8FD0000A92E, + 0xA9300000A954, + 0xA9800000A9C1, + 0xA9CF0000A9DA, + 0xA9E00000A9FF, + 0xAA000000AA37, + 0xAA400000AA4E, + 0xAA500000AA5A, + 0xAA600000AA77, + 0xAA7A0000AAC3, + 0xAADB0000AADE, + 0xAAE00000AAF0, + 0xAAF20000AAF7, + 0xAB010000AB07, + 0xAB090000AB0F, + 0xAB110000AB17, + 0xAB200000AB27, + 0xAB280000AB2F, + 0xAB300000AB5B, + 0xAB600000AB69, + 0xABC00000ABEB, + 0xABEC0000ABEE, + 0xABF00000ABFA, + 0xAC000000D7A4, + 0xFA0E0000FA10, + 0xFA110000FA12, + 0xFA130000FA15, + 0xFA1F0000FA20, + 0xFA210000FA22, + 0xFA230000FA25, + 0xFA270000FA2A, + 0xFB1E0000FB1F, + 0xFE200000FE30, + 0xFE730000FE74, + 0x100000001000C, + 0x1000D00010027, + 0x100280001003B, + 0x1003C0001003E, + 0x1003F0001004E, + 0x100500001005E, + 0x10080000100FB, + 0x101FD000101FE, + 0x102800001029D, + 0x102A0000102D1, + 0x102E0000102E1, + 0x1030000010320, + 0x1032D00010341, + 0x103420001034A, + 0x103500001037B, + 0x103800001039E, + 0x103A0000103C4, + 0x103C8000103D0, + 0x104280001049E, + 0x104A0000104AA, + 0x104D8000104FC, + 0x1050000010528, + 0x1053000010564, + 0x10597000105A2, + 0x105A3000105B2, + 0x105B3000105BA, + 0x105BB000105BD, + 0x1060000010737, + 0x1074000010756, + 0x1076000010768, + 0x1078000010781, + 0x1080000010806, + 0x1080800010809, + 0x1080A00010836, + 0x1083700010839, + 0x1083C0001083D, + 0x1083F00010856, + 0x1086000010877, + 0x108800001089F, + 0x108E0000108F3, + 0x108F4000108F6, + 0x1090000010916, + 0x109200001093A, + 0x10980000109B8, + 0x109BE000109C0, + 0x10A0000010A04, + 0x10A0500010A07, + 0x10A0C00010A14, + 0x10A1500010A18, + 0x10A1900010A36, + 0x10A3800010A3B, + 0x10A3F00010A40, + 0x10A6000010A7D, + 0x10A8000010A9D, + 0x10AC000010AC8, + 0x10AC900010AE7, + 0x10B0000010B36, + 0x10B4000010B56, + 0x10B6000010B73, + 0x10B8000010B92, + 0x10C0000010C49, + 0x10CC000010CF3, + 0x10D0000010D28, + 0x10D3000010D3A, + 0x10E8000010EAA, + 0x10EAB00010EAD, + 0x10EB000010EB2, + 0x10EFD00010F1D, + 0x10F2700010F28, + 0x10F3000010F51, + 0x10F7000010F86, + 0x10FB000010FC5, + 0x10FE000010FF7, + 0x1100000011047, + 0x1106600011076, + 0x1107F000110BB, + 0x110C2000110C3, + 0x110D0000110E9, + 0x110F0000110FA, + 0x1110000011135, + 0x1113600011140, + 0x1114400011148, + 0x1115000011174, + 0x1117600011177, + 0x11180000111C5, + 0x111C9000111CD, + 0x111CE000111DB, + 0x111DC000111DD, + 0x1120000011212, + 0x1121300011238, + 0x1123E00011242, + 0x1128000011287, + 0x1128800011289, + 0x1128A0001128E, + 0x1128F0001129E, + 0x1129F000112A9, + 0x112B0000112EB, + 0x112F0000112FA, + 0x1130000011304, + 0x113050001130D, + 0x1130F00011311, + 0x1131300011329, + 0x1132A00011331, + 0x1133200011334, + 0x113350001133A, + 0x1133B00011345, + 0x1134700011349, + 0x1134B0001134E, + 0x1135000011351, + 0x1135700011358, + 0x1135D00011364, + 0x113660001136D, + 0x1137000011375, + 0x114000001144B, + 0x114500001145A, + 0x1145E00011462, + 0x11480000114C6, + 0x114C7000114C8, + 0x114D0000114DA, + 0x11580000115B6, + 0x115B8000115C1, + 0x115D8000115DE, + 0x1160000011641, + 0x1164400011645, + 0x116500001165A, + 0x11680000116B9, + 0x116C0000116CA, + 0x117000001171B, + 0x1171D0001172C, + 0x117300001173A, + 0x1174000011747, + 0x118000001183B, + 0x118C0000118EA, + 0x118FF00011907, + 0x119090001190A, + 0x1190C00011914, + 0x1191500011917, + 0x1191800011936, + 0x1193700011939, + 0x1193B00011944, + 0x119500001195A, + 0x119A0000119A8, + 0x119AA000119D8, + 0x119DA000119E2, + 0x119E3000119E5, + 0x11A0000011A3F, + 0x11A4700011A48, + 0x11A5000011A9A, + 0x11A9D00011A9E, + 0x11AB000011AF9, + 0x11C0000011C09, + 0x11C0A00011C37, + 0x11C3800011C41, + 0x11C5000011C5A, + 0x11C7200011C90, + 0x11C9200011CA8, + 0x11CA900011CB7, + 0x11D0000011D07, + 0x11D0800011D0A, + 0x11D0B00011D37, + 0x11D3A00011D3B, + 0x11D3C00011D3E, + 0x11D3F00011D48, + 0x11D5000011D5A, + 0x11D6000011D66, + 0x11D6700011D69, + 0x11D6A00011D8F, + 0x11D9000011D92, + 0x11D9300011D99, + 0x11DA000011DAA, + 0x11EE000011EF7, + 0x11F0000011F11, + 0x11F1200011F3B, + 0x11F3E00011F43, + 0x11F5000011F5A, + 0x11FB000011FB1, + 0x120000001239A, + 0x1248000012544, + 0x12F9000012FF1, + 0x1300000013430, + 0x1344000013456, + 0x1440000014647, + 0x1680000016A39, + 0x16A4000016A5F, + 0x16A6000016A6A, + 0x16A7000016ABF, + 0x16AC000016ACA, + 0x16AD000016AEE, + 0x16AF000016AF5, + 0x16B0000016B37, + 0x16B4000016B44, + 0x16B5000016B5A, + 0x16B6300016B78, + 0x16B7D00016B90, + 0x16E6000016E80, + 0x16F0000016F4B, + 0x16F4F00016F88, + 0x16F8F00016FA0, + 0x16FE000016FE2, + 0x16FE300016FE5, + 0x16FF000016FF2, + 0x17000000187F8, + 0x1880000018CD6, + 0x18D0000018D09, + 0x1AFF00001AFF4, + 0x1AFF50001AFFC, + 0x1AFFD0001AFFF, + 0x1B0000001B123, + 0x1B1320001B133, + 0x1B1500001B153, + 0x1B1550001B156, + 0x1B1640001B168, + 0x1B1700001B2FC, + 0x1BC000001BC6B, + 0x1BC700001BC7D, + 0x1BC800001BC89, + 0x1BC900001BC9A, + 0x1BC9D0001BC9F, + 0x1CF000001CF2E, + 0x1CF300001CF47, + 0x1DA000001DA37, + 0x1DA3B0001DA6D, + 0x1DA750001DA76, + 0x1DA840001DA85, + 0x1DA9B0001DAA0, + 0x1DAA10001DAB0, + 0x1DF000001DF1F, + 0x1DF250001DF2B, + 0x1E0000001E007, + 0x1E0080001E019, + 0x1E01B0001E022, + 0x1E0230001E025, + 0x1E0260001E02B, + 0x1E08F0001E090, + 0x1E1000001E12D, + 0x1E1300001E13E, + 0x1E1400001E14A, + 0x1E14E0001E14F, + 0x1E2900001E2AF, + 0x1E2C00001E2FA, + 0x1E4D00001E4FA, + 0x1E7E00001E7E7, + 0x1E7E80001E7EC, + 0x1E7ED0001E7EF, + 0x1E7F00001E7FF, + 0x1E8000001E8C5, + 0x1E8D00001E8D7, + 0x1E9220001E94C, + 0x1E9500001E95A, + 0x200000002A6E0, + 0x2A7000002B73A, + 0x2B7400002B81E, + 0x2B8200002CEA2, + 0x2CEB00002EBE1, + 0x2EBF00002EE5E, + 0x300000003134B, + 0x31350000323B0, + ), + "CONTEXTJ": (0x200C0000200E,), + "CONTEXTO": ( + 0xB7000000B8, + 0x37500000376, + 0x5F3000005F5, + 0x6600000066A, + 0x6F0000006FA, + 0x30FB000030FC, + ), +} diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/idna/intranges.py b/.venv/lib/python3.12/site-packages/pip/_vendor/idna/intranges.py new file mode 100644 index 0000000..7bfaa8d --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/idna/intranges.py @@ -0,0 +1,57 @@ +""" +Given a list of integers, made up of (hopefully) a small number of long runs +of consecutive integers, compute a representation of the form +((start1, end1), (start2, end2) ...). Then answer the question "was x present +in the original list?" in time O(log(# runs)). +""" + +import bisect +from typing import List, Tuple + + +def intranges_from_list(list_: List[int]) -> Tuple[int, ...]: + """Represent a list of integers as a sequence of ranges: + ((start_0, end_0), (start_1, end_1), ...), such that the original + integers are exactly those x such that start_i <= x < end_i for some i. + + Ranges are encoded as single integers (start << 32 | end), not as tuples. + """ + + sorted_list = sorted(list_) + ranges = [] + last_write = -1 + for i in range(len(sorted_list)): + if i + 1 < len(sorted_list): + if sorted_list[i] == sorted_list[i + 1] - 1: + continue + current_range = sorted_list[last_write + 1 : i + 1] + ranges.append(_encode_range(current_range[0], current_range[-1] + 1)) + last_write = i + + return tuple(ranges) + + +def _encode_range(start: int, end: int) -> int: + return (start << 32) | end + + +def _decode_range(r: int) -> Tuple[int, int]: + return (r >> 32), (r & ((1 << 32) - 1)) + + +def intranges_contain(int_: int, ranges: Tuple[int, ...]) -> bool: + """Determine if `int_` falls into one of the ranges in `ranges`.""" + tuple_ = _encode_range(int_, 0) + pos = bisect.bisect_left(ranges, tuple_) + # we could be immediately ahead of a tuple (start, end) + # with start < int_ <= end + if pos > 0: + left, right = _decode_range(ranges[pos - 1]) + if left <= int_ < right: + return True + # or we could be immediately behind a tuple (int_, end) + if pos < len(ranges): + left, _ = _decode_range(ranges[pos]) + if left == int_: + return True + return False diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/idna/package_data.py b/.venv/lib/python3.12/site-packages/pip/_vendor/idna/package_data.py new file mode 100644 index 0000000..514ff7e --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/idna/package_data.py @@ -0,0 +1 @@ +__version__ = "3.10" diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/idna/py.typed b/.venv/lib/python3.12/site-packages/pip/_vendor/idna/py.typed new file mode 100644 index 0000000..e69de29 diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/idna/uts46data.py b/.venv/lib/python3.12/site-packages/pip/_vendor/idna/uts46data.py new file mode 100644 index 0000000..eb89432 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/idna/uts46data.py @@ -0,0 +1,8681 @@ +# This file is automatically generated by tools/idna-data +# vim: set fileencoding=utf-8 : + +from typing import List, Tuple, Union + +"""IDNA Mapping Table from UTS46.""" + + +__version__ = "15.1.0" + + +def _seg_0() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x0, "3"), + (0x1, "3"), + (0x2, "3"), + (0x3, "3"), + (0x4, "3"), + (0x5, "3"), + (0x6, "3"), + (0x7, "3"), + (0x8, "3"), + (0x9, "3"), + (0xA, "3"), + (0xB, "3"), + (0xC, "3"), + (0xD, "3"), + (0xE, "3"), + (0xF, "3"), + (0x10, "3"), + (0x11, "3"), + (0x12, "3"), + (0x13, "3"), + (0x14, "3"), + (0x15, "3"), + (0x16, "3"), + (0x17, "3"), + (0x18, "3"), + (0x19, "3"), + (0x1A, "3"), + (0x1B, "3"), + (0x1C, "3"), + (0x1D, "3"), + (0x1E, "3"), + (0x1F, "3"), + (0x20, "3"), + (0x21, "3"), + (0x22, "3"), + (0x23, "3"), + (0x24, "3"), + (0x25, "3"), + (0x26, "3"), + (0x27, "3"), + (0x28, "3"), + (0x29, "3"), + (0x2A, "3"), + (0x2B, "3"), + (0x2C, "3"), + (0x2D, "V"), + (0x2E, "V"), + (0x2F, "3"), + (0x30, "V"), + (0x31, "V"), + (0x32, "V"), + (0x33, "V"), + (0x34, "V"), + (0x35, "V"), + (0x36, "V"), + (0x37, "V"), + (0x38, "V"), + (0x39, "V"), + (0x3A, "3"), + (0x3B, "3"), + (0x3C, "3"), + (0x3D, "3"), + (0x3E, "3"), + (0x3F, "3"), + (0x40, "3"), + (0x41, "M", "a"), + (0x42, "M", "b"), + (0x43, "M", "c"), + (0x44, "M", "d"), + (0x45, "M", "e"), + (0x46, "M", "f"), + (0x47, "M", "g"), + (0x48, "M", "h"), + (0x49, "M", "i"), + (0x4A, "M", "j"), + (0x4B, "M", "k"), + (0x4C, "M", "l"), + (0x4D, "M", "m"), + (0x4E, "M", "n"), + (0x4F, "M", "o"), + (0x50, "M", "p"), + (0x51, "M", "q"), + (0x52, "M", "r"), + (0x53, "M", "s"), + (0x54, "M", "t"), + (0x55, "M", "u"), + (0x56, "M", "v"), + (0x57, "M", "w"), + (0x58, "M", "x"), + (0x59, "M", "y"), + (0x5A, "M", "z"), + (0x5B, "3"), + (0x5C, "3"), + (0x5D, "3"), + (0x5E, "3"), + (0x5F, "3"), + (0x60, "3"), + (0x61, "V"), + (0x62, "V"), + (0x63, "V"), + ] + + +def _seg_1() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x64, "V"), + (0x65, "V"), + (0x66, "V"), + (0x67, "V"), + (0x68, "V"), + (0x69, "V"), + (0x6A, "V"), + (0x6B, "V"), + (0x6C, "V"), + (0x6D, "V"), + (0x6E, "V"), + (0x6F, "V"), + (0x70, "V"), + (0x71, "V"), + (0x72, "V"), + (0x73, "V"), + (0x74, "V"), + (0x75, "V"), + (0x76, "V"), + (0x77, "V"), + (0x78, "V"), + (0x79, "V"), + (0x7A, "V"), + (0x7B, "3"), + (0x7C, "3"), + (0x7D, "3"), + (0x7E, "3"), + (0x7F, "3"), + (0x80, "X"), + (0x81, "X"), + (0x82, "X"), + (0x83, "X"), + (0x84, "X"), + (0x85, "X"), + (0x86, "X"), + (0x87, "X"), + (0x88, "X"), + (0x89, "X"), + (0x8A, "X"), + (0x8B, "X"), + (0x8C, "X"), + (0x8D, "X"), + (0x8E, "X"), + (0x8F, "X"), + (0x90, "X"), + (0x91, "X"), + (0x92, "X"), + (0x93, "X"), + (0x94, "X"), + (0x95, "X"), + (0x96, "X"), + (0x97, "X"), + (0x98, "X"), + (0x99, "X"), + (0x9A, "X"), + (0x9B, "X"), + (0x9C, "X"), + (0x9D, "X"), + (0x9E, "X"), + (0x9F, "X"), + (0xA0, "3", " "), + (0xA1, "V"), + (0xA2, "V"), + (0xA3, "V"), + (0xA4, "V"), + (0xA5, "V"), + (0xA6, "V"), + (0xA7, "V"), + (0xA8, "3", " ̈"), + (0xA9, "V"), + (0xAA, "M", "a"), + (0xAB, "V"), + (0xAC, "V"), + (0xAD, "I"), + (0xAE, "V"), + (0xAF, "3", " ̄"), + (0xB0, "V"), + (0xB1, "V"), + (0xB2, "M", "2"), + (0xB3, "M", "3"), + (0xB4, "3", " ́"), + (0xB5, "M", "μ"), + (0xB6, "V"), + (0xB7, "V"), + (0xB8, "3", " ̧"), + (0xB9, "M", "1"), + (0xBA, "M", "o"), + (0xBB, "V"), + (0xBC, "M", "1⁄4"), + (0xBD, "M", "1⁄2"), + (0xBE, "M", "3⁄4"), + (0xBF, "V"), + (0xC0, "M", "à"), + (0xC1, "M", "á"), + (0xC2, "M", "â"), + (0xC3, "M", "ã"), + (0xC4, "M", "ä"), + (0xC5, "M", "å"), + (0xC6, "M", "æ"), + (0xC7, "M", "ç"), + ] + + +def _seg_2() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xC8, "M", "è"), + (0xC9, "M", "é"), + (0xCA, "M", "ê"), + (0xCB, "M", "ë"), + (0xCC, "M", "ì"), + (0xCD, "M", "í"), + (0xCE, "M", "î"), + (0xCF, "M", "ï"), + (0xD0, "M", "ð"), + (0xD1, "M", "ñ"), + (0xD2, "M", "ò"), + (0xD3, "M", "ó"), + (0xD4, "M", "ô"), + (0xD5, "M", "õ"), + (0xD6, "M", "ö"), + (0xD7, "V"), + (0xD8, "M", "ø"), + (0xD9, "M", "ù"), + (0xDA, "M", "ú"), + (0xDB, "M", "û"), + (0xDC, "M", "ü"), + (0xDD, "M", "ý"), + (0xDE, "M", "þ"), + (0xDF, "D", "ss"), + (0xE0, "V"), + (0xE1, "V"), + (0xE2, "V"), + (0xE3, "V"), + (0xE4, "V"), + (0xE5, "V"), + (0xE6, "V"), + (0xE7, "V"), + (0xE8, "V"), + (0xE9, "V"), + (0xEA, "V"), + (0xEB, "V"), + (0xEC, "V"), + (0xED, "V"), + (0xEE, "V"), + (0xEF, "V"), + (0xF0, "V"), + (0xF1, "V"), + (0xF2, "V"), + (0xF3, "V"), + (0xF4, "V"), + (0xF5, "V"), + (0xF6, "V"), + (0xF7, "V"), + (0xF8, "V"), + (0xF9, "V"), + (0xFA, "V"), + (0xFB, "V"), + (0xFC, "V"), + (0xFD, "V"), + (0xFE, "V"), + (0xFF, "V"), + (0x100, "M", "ā"), + (0x101, "V"), + (0x102, "M", "ă"), + (0x103, "V"), + (0x104, "M", "ą"), + (0x105, "V"), + (0x106, "M", "ć"), + (0x107, "V"), + (0x108, "M", "ĉ"), + (0x109, "V"), + (0x10A, "M", "ċ"), + (0x10B, "V"), + (0x10C, "M", "č"), + (0x10D, "V"), + (0x10E, "M", "ď"), + (0x10F, "V"), + (0x110, "M", "đ"), + (0x111, "V"), + (0x112, "M", "ē"), + (0x113, "V"), + (0x114, "M", "ĕ"), + (0x115, "V"), + (0x116, "M", "ė"), + (0x117, "V"), + (0x118, "M", "ę"), + (0x119, "V"), + (0x11A, "M", "ě"), + (0x11B, "V"), + (0x11C, "M", "ĝ"), + (0x11D, "V"), + (0x11E, "M", "ğ"), + (0x11F, "V"), + (0x120, "M", "ġ"), + (0x121, "V"), + (0x122, "M", "ģ"), + (0x123, "V"), + (0x124, "M", "ĥ"), + (0x125, "V"), + (0x126, "M", "ħ"), + (0x127, "V"), + (0x128, "M", "ĩ"), + (0x129, "V"), + (0x12A, "M", "ī"), + (0x12B, "V"), + ] + + +def _seg_3() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x12C, "M", "ĭ"), + (0x12D, "V"), + (0x12E, "M", "į"), + (0x12F, "V"), + (0x130, "M", "i̇"), + (0x131, "V"), + (0x132, "M", "ij"), + (0x134, "M", "ĵ"), + (0x135, "V"), + (0x136, "M", "ķ"), + (0x137, "V"), + (0x139, "M", "ĺ"), + (0x13A, "V"), + (0x13B, "M", "ļ"), + (0x13C, "V"), + (0x13D, "M", "ľ"), + (0x13E, "V"), + (0x13F, "M", "l·"), + (0x141, "M", "ł"), + (0x142, "V"), + (0x143, "M", "ń"), + (0x144, "V"), + (0x145, "M", "ņ"), + (0x146, "V"), + (0x147, "M", "ň"), + (0x148, "V"), + (0x149, "M", "ʼn"), + (0x14A, "M", "ŋ"), + (0x14B, "V"), + (0x14C, "M", "ō"), + (0x14D, "V"), + (0x14E, "M", "ŏ"), + (0x14F, "V"), + (0x150, "M", "ő"), + (0x151, "V"), + (0x152, "M", "œ"), + (0x153, "V"), + (0x154, "M", "ŕ"), + (0x155, "V"), + (0x156, "M", "ŗ"), + (0x157, "V"), + (0x158, "M", "ř"), + (0x159, "V"), + (0x15A, "M", "ś"), + (0x15B, "V"), + (0x15C, "M", "ŝ"), + (0x15D, "V"), + (0x15E, "M", "ş"), + (0x15F, "V"), + (0x160, "M", "š"), + (0x161, "V"), + (0x162, "M", "ţ"), + (0x163, "V"), + (0x164, "M", "ť"), + (0x165, "V"), + (0x166, "M", "ŧ"), + (0x167, "V"), + (0x168, "M", "ũ"), + (0x169, "V"), + (0x16A, "M", "ū"), + (0x16B, "V"), + (0x16C, "M", "ŭ"), + (0x16D, "V"), + (0x16E, "M", "ů"), + (0x16F, "V"), + (0x170, "M", "ű"), + (0x171, "V"), + (0x172, "M", "ų"), + (0x173, "V"), + (0x174, "M", "ŵ"), + (0x175, "V"), + (0x176, "M", "ŷ"), + (0x177, "V"), + (0x178, "M", "ÿ"), + (0x179, "M", "ź"), + (0x17A, "V"), + (0x17B, "M", "ż"), + (0x17C, "V"), + (0x17D, "M", "ž"), + (0x17E, "V"), + (0x17F, "M", "s"), + (0x180, "V"), + (0x181, "M", "ɓ"), + (0x182, "M", "ƃ"), + (0x183, "V"), + (0x184, "M", "ƅ"), + (0x185, "V"), + (0x186, "M", "ɔ"), + (0x187, "M", "ƈ"), + (0x188, "V"), + (0x189, "M", "ɖ"), + (0x18A, "M", "ɗ"), + (0x18B, "M", "ƌ"), + (0x18C, "V"), + (0x18E, "M", "ǝ"), + (0x18F, "M", "ə"), + (0x190, "M", "ɛ"), + (0x191, "M", "ƒ"), + (0x192, "V"), + (0x193, "M", "ɠ"), + ] + + +def _seg_4() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x194, "M", "ɣ"), + (0x195, "V"), + (0x196, "M", "ɩ"), + (0x197, "M", "ɨ"), + (0x198, "M", "ƙ"), + (0x199, "V"), + (0x19C, "M", "ɯ"), + (0x19D, "M", "ɲ"), + (0x19E, "V"), + (0x19F, "M", "ɵ"), + (0x1A0, "M", "ơ"), + (0x1A1, "V"), + (0x1A2, "M", "ƣ"), + (0x1A3, "V"), + (0x1A4, "M", "ƥ"), + (0x1A5, "V"), + (0x1A6, "M", "ʀ"), + (0x1A7, "M", "ƨ"), + (0x1A8, "V"), + (0x1A9, "M", "ʃ"), + (0x1AA, "V"), + (0x1AC, "M", "ƭ"), + (0x1AD, "V"), + (0x1AE, "M", "ʈ"), + (0x1AF, "M", "ư"), + (0x1B0, "V"), + (0x1B1, "M", "ʊ"), + (0x1B2, "M", "ʋ"), + (0x1B3, "M", "ƴ"), + (0x1B4, "V"), + (0x1B5, "M", "ƶ"), + (0x1B6, "V"), + (0x1B7, "M", "ʒ"), + (0x1B8, "M", "ƹ"), + (0x1B9, "V"), + (0x1BC, "M", "ƽ"), + (0x1BD, "V"), + (0x1C4, "M", "dž"), + (0x1C7, "M", "lj"), + (0x1CA, "M", "nj"), + (0x1CD, "M", "ǎ"), + (0x1CE, "V"), + (0x1CF, "M", "ǐ"), + (0x1D0, "V"), + (0x1D1, "M", "ǒ"), + (0x1D2, "V"), + (0x1D3, "M", "ǔ"), + (0x1D4, "V"), + (0x1D5, "M", "ǖ"), + (0x1D6, "V"), + (0x1D7, "M", "ǘ"), + (0x1D8, "V"), + (0x1D9, "M", "ǚ"), + (0x1DA, "V"), + (0x1DB, "M", "ǜ"), + (0x1DC, "V"), + (0x1DE, "M", "ǟ"), + (0x1DF, "V"), + (0x1E0, "M", "ǡ"), + (0x1E1, "V"), + (0x1E2, "M", "ǣ"), + (0x1E3, "V"), + (0x1E4, "M", "ǥ"), + (0x1E5, "V"), + (0x1E6, "M", "ǧ"), + (0x1E7, "V"), + (0x1E8, "M", "ǩ"), + (0x1E9, "V"), + (0x1EA, "M", "ǫ"), + (0x1EB, "V"), + (0x1EC, "M", "ǭ"), + (0x1ED, "V"), + (0x1EE, "M", "ǯ"), + (0x1EF, "V"), + (0x1F1, "M", "dz"), + (0x1F4, "M", "ǵ"), + (0x1F5, "V"), + (0x1F6, "M", "ƕ"), + (0x1F7, "M", "ƿ"), + (0x1F8, "M", "ǹ"), + (0x1F9, "V"), + (0x1FA, "M", "ǻ"), + (0x1FB, "V"), + (0x1FC, "M", "ǽ"), + (0x1FD, "V"), + (0x1FE, "M", "ǿ"), + (0x1FF, "V"), + (0x200, "M", "ȁ"), + (0x201, "V"), + (0x202, "M", "ȃ"), + (0x203, "V"), + (0x204, "M", "ȅ"), + (0x205, "V"), + (0x206, "M", "ȇ"), + (0x207, "V"), + (0x208, "M", "ȉ"), + (0x209, "V"), + (0x20A, "M", "ȋ"), + (0x20B, "V"), + (0x20C, "M", "ȍ"), + ] + + +def _seg_5() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x20D, "V"), + (0x20E, "M", "ȏ"), + (0x20F, "V"), + (0x210, "M", "ȑ"), + (0x211, "V"), + (0x212, "M", "ȓ"), + (0x213, "V"), + (0x214, "M", "ȕ"), + (0x215, "V"), + (0x216, "M", "ȗ"), + (0x217, "V"), + (0x218, "M", "ș"), + (0x219, "V"), + (0x21A, "M", "ț"), + (0x21B, "V"), + (0x21C, "M", "ȝ"), + (0x21D, "V"), + (0x21E, "M", "ȟ"), + (0x21F, "V"), + (0x220, "M", "ƞ"), + (0x221, "V"), + (0x222, "M", "ȣ"), + (0x223, "V"), + (0x224, "M", "ȥ"), + (0x225, "V"), + (0x226, "M", "ȧ"), + (0x227, "V"), + (0x228, "M", "ȩ"), + (0x229, "V"), + (0x22A, "M", "ȫ"), + (0x22B, "V"), + (0x22C, "M", "ȭ"), + (0x22D, "V"), + (0x22E, "M", "ȯ"), + (0x22F, "V"), + (0x230, "M", "ȱ"), + (0x231, "V"), + (0x232, "M", "ȳ"), + (0x233, "V"), + (0x23A, "M", "ⱥ"), + (0x23B, "M", "ȼ"), + (0x23C, "V"), + (0x23D, "M", "ƚ"), + (0x23E, "M", "ⱦ"), + (0x23F, "V"), + (0x241, "M", "ɂ"), + (0x242, "V"), + (0x243, "M", "ƀ"), + (0x244, "M", "ʉ"), + (0x245, "M", "ʌ"), + (0x246, "M", "ɇ"), + (0x247, "V"), + (0x248, "M", "ɉ"), + (0x249, "V"), + (0x24A, "M", "ɋ"), + (0x24B, "V"), + (0x24C, "M", "ɍ"), + (0x24D, "V"), + (0x24E, "M", "ɏ"), + (0x24F, "V"), + (0x2B0, "M", "h"), + (0x2B1, "M", "ɦ"), + (0x2B2, "M", "j"), + (0x2B3, "M", "r"), + (0x2B4, "M", "ɹ"), + (0x2B5, "M", "ɻ"), + (0x2B6, "M", "ʁ"), + (0x2B7, "M", "w"), + (0x2B8, "M", "y"), + (0x2B9, "V"), + (0x2D8, "3", " ̆"), + (0x2D9, "3", " ̇"), + (0x2DA, "3", " ̊"), + (0x2DB, "3", " ̨"), + (0x2DC, "3", " ̃"), + (0x2DD, "3", " ̋"), + (0x2DE, "V"), + (0x2E0, "M", "ɣ"), + (0x2E1, "M", "l"), + (0x2E2, "M", "s"), + (0x2E3, "M", "x"), + (0x2E4, "M", "ʕ"), + (0x2E5, "V"), + (0x340, "M", "̀"), + (0x341, "M", "́"), + (0x342, "V"), + (0x343, "M", "̓"), + (0x344, "M", "̈́"), + (0x345, "M", "ι"), + (0x346, "V"), + (0x34F, "I"), + (0x350, "V"), + (0x370, "M", "ͱ"), + (0x371, "V"), + (0x372, "M", "ͳ"), + (0x373, "V"), + (0x374, "M", "ʹ"), + (0x375, "V"), + (0x376, "M", "ͷ"), + (0x377, "V"), + ] + + +def _seg_6() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x378, "X"), + (0x37A, "3", " ι"), + (0x37B, "V"), + (0x37E, "3", ";"), + (0x37F, "M", "ϳ"), + (0x380, "X"), + (0x384, "3", " ́"), + (0x385, "3", " ̈́"), + (0x386, "M", "ά"), + (0x387, "M", "·"), + (0x388, "M", "έ"), + (0x389, "M", "ή"), + (0x38A, "M", "ί"), + (0x38B, "X"), + (0x38C, "M", "ό"), + (0x38D, "X"), + (0x38E, "M", "ύ"), + (0x38F, "M", "ώ"), + (0x390, "V"), + (0x391, "M", "α"), + (0x392, "M", "β"), + (0x393, "M", "γ"), + (0x394, "M", "δ"), + (0x395, "M", "ε"), + (0x396, "M", "ζ"), + (0x397, "M", "η"), + (0x398, "M", "θ"), + (0x399, "M", "ι"), + (0x39A, "M", "κ"), + (0x39B, "M", "λ"), + (0x39C, "M", "μ"), + (0x39D, "M", "ν"), + (0x39E, "M", "ξ"), + (0x39F, "M", "ο"), + (0x3A0, "M", "π"), + (0x3A1, "M", "ρ"), + (0x3A2, "X"), + (0x3A3, "M", "σ"), + (0x3A4, "M", "τ"), + (0x3A5, "M", "υ"), + (0x3A6, "M", "φ"), + (0x3A7, "M", "χ"), + (0x3A8, "M", "ψ"), + (0x3A9, "M", "ω"), + (0x3AA, "M", "ϊ"), + (0x3AB, "M", "ϋ"), + (0x3AC, "V"), + (0x3C2, "D", "σ"), + (0x3C3, "V"), + (0x3CF, "M", "ϗ"), + (0x3D0, "M", "β"), + (0x3D1, "M", "θ"), + (0x3D2, "M", "υ"), + (0x3D3, "M", "ύ"), + (0x3D4, "M", "ϋ"), + (0x3D5, "M", "φ"), + (0x3D6, "M", "π"), + (0x3D7, "V"), + (0x3D8, "M", "ϙ"), + (0x3D9, "V"), + (0x3DA, "M", "ϛ"), + (0x3DB, "V"), + (0x3DC, "M", "ϝ"), + (0x3DD, "V"), + (0x3DE, "M", "ϟ"), + (0x3DF, "V"), + (0x3E0, "M", "ϡ"), + (0x3E1, "V"), + (0x3E2, "M", "ϣ"), + (0x3E3, "V"), + (0x3E4, "M", "ϥ"), + (0x3E5, "V"), + (0x3E6, "M", "ϧ"), + (0x3E7, "V"), + (0x3E8, "M", "ϩ"), + (0x3E9, "V"), + (0x3EA, "M", "ϫ"), + (0x3EB, "V"), + (0x3EC, "M", "ϭ"), + (0x3ED, "V"), + (0x3EE, "M", "ϯ"), + (0x3EF, "V"), + (0x3F0, "M", "κ"), + (0x3F1, "M", "ρ"), + (0x3F2, "M", "σ"), + (0x3F3, "V"), + (0x3F4, "M", "θ"), + (0x3F5, "M", "ε"), + (0x3F6, "V"), + (0x3F7, "M", "ϸ"), + (0x3F8, "V"), + (0x3F9, "M", "σ"), + (0x3FA, "M", "ϻ"), + (0x3FB, "V"), + (0x3FD, "M", "ͻ"), + (0x3FE, "M", "ͼ"), + (0x3FF, "M", "ͽ"), + (0x400, "M", "ѐ"), + (0x401, "M", "ё"), + (0x402, "M", "ђ"), + ] + + +def _seg_7() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x403, "M", "ѓ"), + (0x404, "M", "є"), + (0x405, "M", "ѕ"), + (0x406, "M", "і"), + (0x407, "M", "ї"), + (0x408, "M", "ј"), + (0x409, "M", "љ"), + (0x40A, "M", "њ"), + (0x40B, "M", "ћ"), + (0x40C, "M", "ќ"), + (0x40D, "M", "ѝ"), + (0x40E, "M", "ў"), + (0x40F, "M", "џ"), + (0x410, "M", "а"), + (0x411, "M", "б"), + (0x412, "M", "в"), + (0x413, "M", "г"), + (0x414, "M", "д"), + (0x415, "M", "е"), + (0x416, "M", "ж"), + (0x417, "M", "з"), + (0x418, "M", "и"), + (0x419, "M", "й"), + (0x41A, "M", "к"), + (0x41B, "M", "л"), + (0x41C, "M", "м"), + (0x41D, "M", "н"), + (0x41E, "M", "о"), + (0x41F, "M", "п"), + (0x420, "M", "р"), + (0x421, "M", "с"), + (0x422, "M", "т"), + (0x423, "M", "у"), + (0x424, "M", "ф"), + (0x425, "M", "х"), + (0x426, "M", "ц"), + (0x427, "M", "ч"), + (0x428, "M", "ш"), + (0x429, "M", "щ"), + (0x42A, "M", "ъ"), + (0x42B, "M", "ы"), + (0x42C, "M", "ь"), + (0x42D, "M", "э"), + (0x42E, "M", "ю"), + (0x42F, "M", "я"), + (0x430, "V"), + (0x460, "M", "ѡ"), + (0x461, "V"), + (0x462, "M", "ѣ"), + (0x463, "V"), + (0x464, "M", "ѥ"), + (0x465, "V"), + (0x466, "M", "ѧ"), + (0x467, "V"), + (0x468, "M", "ѩ"), + (0x469, "V"), + (0x46A, "M", "ѫ"), + (0x46B, "V"), + (0x46C, "M", "ѭ"), + (0x46D, "V"), + (0x46E, "M", "ѯ"), + (0x46F, "V"), + (0x470, "M", "ѱ"), + (0x471, "V"), + (0x472, "M", "ѳ"), + (0x473, "V"), + (0x474, "M", "ѵ"), + (0x475, "V"), + (0x476, "M", "ѷ"), + (0x477, "V"), + (0x478, "M", "ѹ"), + (0x479, "V"), + (0x47A, "M", "ѻ"), + (0x47B, "V"), + (0x47C, "M", "ѽ"), + (0x47D, "V"), + (0x47E, "M", "ѿ"), + (0x47F, "V"), + (0x480, "M", "ҁ"), + (0x481, "V"), + (0x48A, "M", "ҋ"), + (0x48B, "V"), + (0x48C, "M", "ҍ"), + (0x48D, "V"), + (0x48E, "M", "ҏ"), + (0x48F, "V"), + (0x490, "M", "ґ"), + (0x491, "V"), + (0x492, "M", "ғ"), + (0x493, "V"), + (0x494, "M", "ҕ"), + (0x495, "V"), + (0x496, "M", "җ"), + (0x497, "V"), + (0x498, "M", "ҙ"), + (0x499, "V"), + (0x49A, "M", "қ"), + (0x49B, "V"), + (0x49C, "M", "ҝ"), + (0x49D, "V"), + ] + + +def _seg_8() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x49E, "M", "ҟ"), + (0x49F, "V"), + (0x4A0, "M", "ҡ"), + (0x4A1, "V"), + (0x4A2, "M", "ң"), + (0x4A3, "V"), + (0x4A4, "M", "ҥ"), + (0x4A5, "V"), + (0x4A6, "M", "ҧ"), + (0x4A7, "V"), + (0x4A8, "M", "ҩ"), + (0x4A9, "V"), + (0x4AA, "M", "ҫ"), + (0x4AB, "V"), + (0x4AC, "M", "ҭ"), + (0x4AD, "V"), + (0x4AE, "M", "ү"), + (0x4AF, "V"), + (0x4B0, "M", "ұ"), + (0x4B1, "V"), + (0x4B2, "M", "ҳ"), + (0x4B3, "V"), + (0x4B4, "M", "ҵ"), + (0x4B5, "V"), + (0x4B6, "M", "ҷ"), + (0x4B7, "V"), + (0x4B8, "M", "ҹ"), + (0x4B9, "V"), + (0x4BA, "M", "һ"), + (0x4BB, "V"), + (0x4BC, "M", "ҽ"), + (0x4BD, "V"), + (0x4BE, "M", "ҿ"), + (0x4BF, "V"), + (0x4C0, "X"), + (0x4C1, "M", "ӂ"), + (0x4C2, "V"), + (0x4C3, "M", "ӄ"), + (0x4C4, "V"), + (0x4C5, "M", "ӆ"), + (0x4C6, "V"), + (0x4C7, "M", "ӈ"), + (0x4C8, "V"), + (0x4C9, "M", "ӊ"), + (0x4CA, "V"), + (0x4CB, "M", "ӌ"), + (0x4CC, "V"), + (0x4CD, "M", "ӎ"), + (0x4CE, "V"), + (0x4D0, "M", "ӑ"), + (0x4D1, "V"), + (0x4D2, "M", "ӓ"), + (0x4D3, "V"), + (0x4D4, "M", "ӕ"), + (0x4D5, "V"), + (0x4D6, "M", "ӗ"), + (0x4D7, "V"), + (0x4D8, "M", "ә"), + (0x4D9, "V"), + (0x4DA, "M", "ӛ"), + (0x4DB, "V"), + (0x4DC, "M", "ӝ"), + (0x4DD, "V"), + (0x4DE, "M", "ӟ"), + (0x4DF, "V"), + (0x4E0, "M", "ӡ"), + (0x4E1, "V"), + (0x4E2, "M", "ӣ"), + (0x4E3, "V"), + (0x4E4, "M", "ӥ"), + (0x4E5, "V"), + (0x4E6, "M", "ӧ"), + (0x4E7, "V"), + (0x4E8, "M", "ө"), + (0x4E9, "V"), + (0x4EA, "M", "ӫ"), + (0x4EB, "V"), + (0x4EC, "M", "ӭ"), + (0x4ED, "V"), + (0x4EE, "M", "ӯ"), + (0x4EF, "V"), + (0x4F0, "M", "ӱ"), + (0x4F1, "V"), + (0x4F2, "M", "ӳ"), + (0x4F3, "V"), + (0x4F4, "M", "ӵ"), + (0x4F5, "V"), + (0x4F6, "M", "ӷ"), + (0x4F7, "V"), + (0x4F8, "M", "ӹ"), + (0x4F9, "V"), + (0x4FA, "M", "ӻ"), + (0x4FB, "V"), + (0x4FC, "M", "ӽ"), + (0x4FD, "V"), + (0x4FE, "M", "ӿ"), + (0x4FF, "V"), + (0x500, "M", "ԁ"), + (0x501, "V"), + (0x502, "M", "ԃ"), + ] + + +def _seg_9() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x503, "V"), + (0x504, "M", "ԅ"), + (0x505, "V"), + (0x506, "M", "ԇ"), + (0x507, "V"), + (0x508, "M", "ԉ"), + (0x509, "V"), + (0x50A, "M", "ԋ"), + (0x50B, "V"), + (0x50C, "M", "ԍ"), + (0x50D, "V"), + (0x50E, "M", "ԏ"), + (0x50F, "V"), + (0x510, "M", "ԑ"), + (0x511, "V"), + (0x512, "M", "ԓ"), + (0x513, "V"), + (0x514, "M", "ԕ"), + (0x515, "V"), + (0x516, "M", "ԗ"), + (0x517, "V"), + (0x518, "M", "ԙ"), + (0x519, "V"), + (0x51A, "M", "ԛ"), + (0x51B, "V"), + (0x51C, "M", "ԝ"), + (0x51D, "V"), + (0x51E, "M", "ԟ"), + (0x51F, "V"), + (0x520, "M", "ԡ"), + (0x521, "V"), + (0x522, "M", "ԣ"), + (0x523, "V"), + (0x524, "M", "ԥ"), + (0x525, "V"), + (0x526, "M", "ԧ"), + (0x527, "V"), + (0x528, "M", "ԩ"), + (0x529, "V"), + (0x52A, "M", "ԫ"), + (0x52B, "V"), + (0x52C, "M", "ԭ"), + (0x52D, "V"), + (0x52E, "M", "ԯ"), + (0x52F, "V"), + (0x530, "X"), + (0x531, "M", "ա"), + (0x532, "M", "բ"), + (0x533, "M", "գ"), + (0x534, "M", "դ"), + (0x535, "M", "ե"), + (0x536, "M", "զ"), + (0x537, "M", "է"), + (0x538, "M", "ը"), + (0x539, "M", "թ"), + (0x53A, "M", "ժ"), + (0x53B, "M", "ի"), + (0x53C, "M", "լ"), + (0x53D, "M", "խ"), + (0x53E, "M", "ծ"), + (0x53F, "M", "կ"), + (0x540, "M", "հ"), + (0x541, "M", "ձ"), + (0x542, "M", "ղ"), + (0x543, "M", "ճ"), + (0x544, "M", "մ"), + (0x545, "M", "յ"), + (0x546, "M", "ն"), + (0x547, "M", "շ"), + (0x548, "M", "ո"), + (0x549, "M", "չ"), + (0x54A, "M", "պ"), + (0x54B, "M", "ջ"), + (0x54C, "M", "ռ"), + (0x54D, "M", "ս"), + (0x54E, "M", "վ"), + (0x54F, "M", "տ"), + (0x550, "M", "ր"), + (0x551, "M", "ց"), + (0x552, "M", "ւ"), + (0x553, "M", "փ"), + (0x554, "M", "ք"), + (0x555, "M", "օ"), + (0x556, "M", "ֆ"), + (0x557, "X"), + (0x559, "V"), + (0x587, "M", "եւ"), + (0x588, "V"), + (0x58B, "X"), + (0x58D, "V"), + (0x590, "X"), + (0x591, "V"), + (0x5C8, "X"), + (0x5D0, "V"), + (0x5EB, "X"), + (0x5EF, "V"), + (0x5F5, "X"), + (0x606, "V"), + (0x61C, "X"), + (0x61D, "V"), + ] + + +def _seg_10() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x675, "M", "اٴ"), + (0x676, "M", "وٴ"), + (0x677, "M", "ۇٴ"), + (0x678, "M", "يٴ"), + (0x679, "V"), + (0x6DD, "X"), + (0x6DE, "V"), + (0x70E, "X"), + (0x710, "V"), + (0x74B, "X"), + (0x74D, "V"), + (0x7B2, "X"), + (0x7C0, "V"), + (0x7FB, "X"), + (0x7FD, "V"), + (0x82E, "X"), + (0x830, "V"), + (0x83F, "X"), + (0x840, "V"), + (0x85C, "X"), + (0x85E, "V"), + (0x85F, "X"), + (0x860, "V"), + (0x86B, "X"), + (0x870, "V"), + (0x88F, "X"), + (0x898, "V"), + (0x8E2, "X"), + (0x8E3, "V"), + (0x958, "M", "क़"), + (0x959, "M", "ख़"), + (0x95A, "M", "ग़"), + (0x95B, "M", "ज़"), + (0x95C, "M", "ड़"), + (0x95D, "M", "ढ़"), + (0x95E, "M", "फ़"), + (0x95F, "M", "य़"), + (0x960, "V"), + (0x984, "X"), + (0x985, "V"), + (0x98D, "X"), + (0x98F, "V"), + (0x991, "X"), + (0x993, "V"), + (0x9A9, "X"), + (0x9AA, "V"), + (0x9B1, "X"), + (0x9B2, "V"), + (0x9B3, "X"), + (0x9B6, "V"), + (0x9BA, "X"), + (0x9BC, "V"), + (0x9C5, "X"), + (0x9C7, "V"), + (0x9C9, "X"), + (0x9CB, "V"), + (0x9CF, "X"), + (0x9D7, "V"), + (0x9D8, "X"), + (0x9DC, "M", "ড়"), + (0x9DD, "M", "ঢ়"), + (0x9DE, "X"), + (0x9DF, "M", "য়"), + (0x9E0, "V"), + (0x9E4, "X"), + (0x9E6, "V"), + (0x9FF, "X"), + (0xA01, "V"), + (0xA04, "X"), + (0xA05, "V"), + (0xA0B, "X"), + (0xA0F, "V"), + (0xA11, "X"), + (0xA13, "V"), + (0xA29, "X"), + (0xA2A, "V"), + (0xA31, "X"), + (0xA32, "V"), + (0xA33, "M", "ਲ਼"), + (0xA34, "X"), + (0xA35, "V"), + (0xA36, "M", "ਸ਼"), + (0xA37, "X"), + (0xA38, "V"), + (0xA3A, "X"), + (0xA3C, "V"), + (0xA3D, "X"), + (0xA3E, "V"), + (0xA43, "X"), + (0xA47, "V"), + (0xA49, "X"), + (0xA4B, "V"), + (0xA4E, "X"), + (0xA51, "V"), + (0xA52, "X"), + (0xA59, "M", "ਖ਼"), + (0xA5A, "M", "ਗ਼"), + (0xA5B, "M", "ਜ਼"), + (0xA5C, "V"), + (0xA5D, "X"), + ] + + +def _seg_11() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xA5E, "M", "ਫ਼"), + (0xA5F, "X"), + (0xA66, "V"), + (0xA77, "X"), + (0xA81, "V"), + (0xA84, "X"), + (0xA85, "V"), + (0xA8E, "X"), + (0xA8F, "V"), + (0xA92, "X"), + (0xA93, "V"), + (0xAA9, "X"), + (0xAAA, "V"), + (0xAB1, "X"), + (0xAB2, "V"), + (0xAB4, "X"), + (0xAB5, "V"), + (0xABA, "X"), + (0xABC, "V"), + (0xAC6, "X"), + (0xAC7, "V"), + (0xACA, "X"), + (0xACB, "V"), + (0xACE, "X"), + (0xAD0, "V"), + (0xAD1, "X"), + (0xAE0, "V"), + (0xAE4, "X"), + (0xAE6, "V"), + (0xAF2, "X"), + (0xAF9, "V"), + (0xB00, "X"), + (0xB01, "V"), + (0xB04, "X"), + (0xB05, "V"), + (0xB0D, "X"), + (0xB0F, "V"), + (0xB11, "X"), + (0xB13, "V"), + (0xB29, "X"), + (0xB2A, "V"), + (0xB31, "X"), + (0xB32, "V"), + (0xB34, "X"), + (0xB35, "V"), + (0xB3A, "X"), + (0xB3C, "V"), + (0xB45, "X"), + (0xB47, "V"), + (0xB49, "X"), + (0xB4B, "V"), + (0xB4E, "X"), + (0xB55, "V"), + (0xB58, "X"), + (0xB5C, "M", "ଡ଼"), + (0xB5D, "M", "ଢ଼"), + (0xB5E, "X"), + (0xB5F, "V"), + (0xB64, "X"), + (0xB66, "V"), + (0xB78, "X"), + (0xB82, "V"), + (0xB84, "X"), + (0xB85, "V"), + (0xB8B, "X"), + (0xB8E, "V"), + (0xB91, "X"), + (0xB92, "V"), + (0xB96, "X"), + (0xB99, "V"), + (0xB9B, "X"), + (0xB9C, "V"), + (0xB9D, "X"), + (0xB9E, "V"), + (0xBA0, "X"), + (0xBA3, "V"), + (0xBA5, "X"), + (0xBA8, "V"), + (0xBAB, "X"), + (0xBAE, "V"), + (0xBBA, "X"), + (0xBBE, "V"), + (0xBC3, "X"), + (0xBC6, "V"), + (0xBC9, "X"), + (0xBCA, "V"), + (0xBCE, "X"), + (0xBD0, "V"), + (0xBD1, "X"), + (0xBD7, "V"), + (0xBD8, "X"), + (0xBE6, "V"), + (0xBFB, "X"), + (0xC00, "V"), + (0xC0D, "X"), + (0xC0E, "V"), + (0xC11, "X"), + (0xC12, "V"), + (0xC29, "X"), + (0xC2A, "V"), + ] + + +def _seg_12() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xC3A, "X"), + (0xC3C, "V"), + (0xC45, "X"), + (0xC46, "V"), + (0xC49, "X"), + (0xC4A, "V"), + (0xC4E, "X"), + (0xC55, "V"), + (0xC57, "X"), + (0xC58, "V"), + (0xC5B, "X"), + (0xC5D, "V"), + (0xC5E, "X"), + (0xC60, "V"), + (0xC64, "X"), + (0xC66, "V"), + (0xC70, "X"), + (0xC77, "V"), + (0xC8D, "X"), + (0xC8E, "V"), + (0xC91, "X"), + (0xC92, "V"), + (0xCA9, "X"), + (0xCAA, "V"), + (0xCB4, "X"), + (0xCB5, "V"), + (0xCBA, "X"), + (0xCBC, "V"), + (0xCC5, "X"), + (0xCC6, "V"), + (0xCC9, "X"), + (0xCCA, "V"), + (0xCCE, "X"), + (0xCD5, "V"), + (0xCD7, "X"), + (0xCDD, "V"), + (0xCDF, "X"), + (0xCE0, "V"), + (0xCE4, "X"), + (0xCE6, "V"), + (0xCF0, "X"), + (0xCF1, "V"), + (0xCF4, "X"), + (0xD00, "V"), + (0xD0D, "X"), + (0xD0E, "V"), + (0xD11, "X"), + (0xD12, "V"), + (0xD45, "X"), + (0xD46, "V"), + (0xD49, "X"), + (0xD4A, "V"), + (0xD50, "X"), + (0xD54, "V"), + (0xD64, "X"), + (0xD66, "V"), + (0xD80, "X"), + (0xD81, "V"), + (0xD84, "X"), + (0xD85, "V"), + (0xD97, "X"), + (0xD9A, "V"), + (0xDB2, "X"), + (0xDB3, "V"), + (0xDBC, "X"), + (0xDBD, "V"), + (0xDBE, "X"), + (0xDC0, "V"), + (0xDC7, "X"), + (0xDCA, "V"), + (0xDCB, "X"), + (0xDCF, "V"), + (0xDD5, "X"), + (0xDD6, "V"), + (0xDD7, "X"), + (0xDD8, "V"), + (0xDE0, "X"), + (0xDE6, "V"), + (0xDF0, "X"), + (0xDF2, "V"), + (0xDF5, "X"), + (0xE01, "V"), + (0xE33, "M", "ํา"), + (0xE34, "V"), + (0xE3B, "X"), + (0xE3F, "V"), + (0xE5C, "X"), + (0xE81, "V"), + (0xE83, "X"), + (0xE84, "V"), + (0xE85, "X"), + (0xE86, "V"), + (0xE8B, "X"), + (0xE8C, "V"), + (0xEA4, "X"), + (0xEA5, "V"), + (0xEA6, "X"), + (0xEA7, "V"), + (0xEB3, "M", "ໍາ"), + (0xEB4, "V"), + ] + + +def _seg_13() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xEBE, "X"), + (0xEC0, "V"), + (0xEC5, "X"), + (0xEC6, "V"), + (0xEC7, "X"), + (0xEC8, "V"), + (0xECF, "X"), + (0xED0, "V"), + (0xEDA, "X"), + (0xEDC, "M", "ຫນ"), + (0xEDD, "M", "ຫມ"), + (0xEDE, "V"), + (0xEE0, "X"), + (0xF00, "V"), + (0xF0C, "M", "་"), + (0xF0D, "V"), + (0xF43, "M", "གྷ"), + (0xF44, "V"), + (0xF48, "X"), + (0xF49, "V"), + (0xF4D, "M", "ཌྷ"), + (0xF4E, "V"), + (0xF52, "M", "དྷ"), + (0xF53, "V"), + (0xF57, "M", "བྷ"), + (0xF58, "V"), + (0xF5C, "M", "ཛྷ"), + (0xF5D, "V"), + (0xF69, "M", "ཀྵ"), + (0xF6A, "V"), + (0xF6D, "X"), + (0xF71, "V"), + (0xF73, "M", "ཱི"), + (0xF74, "V"), + (0xF75, "M", "ཱུ"), + (0xF76, "M", "ྲྀ"), + (0xF77, "M", "ྲཱྀ"), + (0xF78, "M", "ླྀ"), + (0xF79, "M", "ླཱྀ"), + (0xF7A, "V"), + (0xF81, "M", "ཱྀ"), + (0xF82, "V"), + (0xF93, "M", "ྒྷ"), + (0xF94, "V"), + (0xF98, "X"), + (0xF99, "V"), + (0xF9D, "M", "ྜྷ"), + (0xF9E, "V"), + (0xFA2, "M", "ྡྷ"), + (0xFA3, "V"), + (0xFA7, "M", "ྦྷ"), + (0xFA8, "V"), + (0xFAC, "M", "ྫྷ"), + (0xFAD, "V"), + (0xFB9, "M", "ྐྵ"), + (0xFBA, "V"), + (0xFBD, "X"), + (0xFBE, "V"), + (0xFCD, "X"), + (0xFCE, "V"), + (0xFDB, "X"), + (0x1000, "V"), + (0x10A0, "X"), + (0x10C7, "M", "ⴧ"), + (0x10C8, "X"), + (0x10CD, "M", "ⴭ"), + (0x10CE, "X"), + (0x10D0, "V"), + (0x10FC, "M", "ნ"), + (0x10FD, "V"), + (0x115F, "X"), + (0x1161, "V"), + (0x1249, "X"), + (0x124A, "V"), + (0x124E, "X"), + (0x1250, "V"), + (0x1257, "X"), + (0x1258, "V"), + (0x1259, "X"), + (0x125A, "V"), + (0x125E, "X"), + (0x1260, "V"), + (0x1289, "X"), + (0x128A, "V"), + (0x128E, "X"), + (0x1290, "V"), + (0x12B1, "X"), + (0x12B2, "V"), + (0x12B6, "X"), + (0x12B8, "V"), + (0x12BF, "X"), + (0x12C0, "V"), + (0x12C1, "X"), + (0x12C2, "V"), + (0x12C6, "X"), + (0x12C8, "V"), + (0x12D7, "X"), + (0x12D8, "V"), + (0x1311, "X"), + (0x1312, "V"), + ] + + +def _seg_14() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1316, "X"), + (0x1318, "V"), + (0x135B, "X"), + (0x135D, "V"), + (0x137D, "X"), + (0x1380, "V"), + (0x139A, "X"), + (0x13A0, "V"), + (0x13F6, "X"), + (0x13F8, "M", "Ᏸ"), + (0x13F9, "M", "Ᏹ"), + (0x13FA, "M", "Ᏺ"), + (0x13FB, "M", "Ᏻ"), + (0x13FC, "M", "Ᏼ"), + (0x13FD, "M", "Ᏽ"), + (0x13FE, "X"), + (0x1400, "V"), + (0x1680, "X"), + (0x1681, "V"), + (0x169D, "X"), + (0x16A0, "V"), + (0x16F9, "X"), + (0x1700, "V"), + (0x1716, "X"), + (0x171F, "V"), + (0x1737, "X"), + (0x1740, "V"), + (0x1754, "X"), + (0x1760, "V"), + (0x176D, "X"), + (0x176E, "V"), + (0x1771, "X"), + (0x1772, "V"), + (0x1774, "X"), + (0x1780, "V"), + (0x17B4, "X"), + (0x17B6, "V"), + (0x17DE, "X"), + (0x17E0, "V"), + (0x17EA, "X"), + (0x17F0, "V"), + (0x17FA, "X"), + (0x1800, "V"), + (0x1806, "X"), + (0x1807, "V"), + (0x180B, "I"), + (0x180E, "X"), + (0x180F, "I"), + (0x1810, "V"), + (0x181A, "X"), + (0x1820, "V"), + (0x1879, "X"), + (0x1880, "V"), + (0x18AB, "X"), + (0x18B0, "V"), + (0x18F6, "X"), + (0x1900, "V"), + (0x191F, "X"), + (0x1920, "V"), + (0x192C, "X"), + (0x1930, "V"), + (0x193C, "X"), + (0x1940, "V"), + (0x1941, "X"), + (0x1944, "V"), + (0x196E, "X"), + (0x1970, "V"), + (0x1975, "X"), + (0x1980, "V"), + (0x19AC, "X"), + (0x19B0, "V"), + (0x19CA, "X"), + (0x19D0, "V"), + (0x19DB, "X"), + (0x19DE, "V"), + (0x1A1C, "X"), + (0x1A1E, "V"), + (0x1A5F, "X"), + (0x1A60, "V"), + (0x1A7D, "X"), + (0x1A7F, "V"), + (0x1A8A, "X"), + (0x1A90, "V"), + (0x1A9A, "X"), + (0x1AA0, "V"), + (0x1AAE, "X"), + (0x1AB0, "V"), + (0x1ACF, "X"), + (0x1B00, "V"), + (0x1B4D, "X"), + (0x1B50, "V"), + (0x1B7F, "X"), + (0x1B80, "V"), + (0x1BF4, "X"), + (0x1BFC, "V"), + (0x1C38, "X"), + (0x1C3B, "V"), + (0x1C4A, "X"), + (0x1C4D, "V"), + (0x1C80, "M", "в"), + ] + + +def _seg_15() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1C81, "M", "д"), + (0x1C82, "M", "о"), + (0x1C83, "M", "с"), + (0x1C84, "M", "т"), + (0x1C86, "M", "ъ"), + (0x1C87, "M", "ѣ"), + (0x1C88, "M", "ꙋ"), + (0x1C89, "X"), + (0x1C90, "M", "ა"), + (0x1C91, "M", "ბ"), + (0x1C92, "M", "გ"), + (0x1C93, "M", "დ"), + (0x1C94, "M", "ე"), + (0x1C95, "M", "ვ"), + (0x1C96, "M", "ზ"), + (0x1C97, "M", "თ"), + (0x1C98, "M", "ი"), + (0x1C99, "M", "კ"), + (0x1C9A, "M", "ლ"), + (0x1C9B, "M", "მ"), + (0x1C9C, "M", "ნ"), + (0x1C9D, "M", "ო"), + (0x1C9E, "M", "პ"), + (0x1C9F, "M", "ჟ"), + (0x1CA0, "M", "რ"), + (0x1CA1, "M", "ს"), + (0x1CA2, "M", "ტ"), + (0x1CA3, "M", "უ"), + (0x1CA4, "M", "ფ"), + (0x1CA5, "M", "ქ"), + (0x1CA6, "M", "ღ"), + (0x1CA7, "M", "ყ"), + (0x1CA8, "M", "შ"), + (0x1CA9, "M", "ჩ"), + (0x1CAA, "M", "ც"), + (0x1CAB, "M", "ძ"), + (0x1CAC, "M", "წ"), + (0x1CAD, "M", "ჭ"), + (0x1CAE, "M", "ხ"), + (0x1CAF, "M", "ჯ"), + (0x1CB0, "M", "ჰ"), + (0x1CB1, "M", "ჱ"), + (0x1CB2, "M", "ჲ"), + (0x1CB3, "M", "ჳ"), + (0x1CB4, "M", "ჴ"), + (0x1CB5, "M", "ჵ"), + (0x1CB6, "M", "ჶ"), + (0x1CB7, "M", "ჷ"), + (0x1CB8, "M", "ჸ"), + (0x1CB9, "M", "ჹ"), + (0x1CBA, "M", "ჺ"), + (0x1CBB, "X"), + (0x1CBD, "M", "ჽ"), + (0x1CBE, "M", "ჾ"), + (0x1CBF, "M", "ჿ"), + (0x1CC0, "V"), + (0x1CC8, "X"), + (0x1CD0, "V"), + (0x1CFB, "X"), + (0x1D00, "V"), + (0x1D2C, "M", "a"), + (0x1D2D, "M", "æ"), + (0x1D2E, "M", "b"), + (0x1D2F, "V"), + (0x1D30, "M", "d"), + (0x1D31, "M", "e"), + (0x1D32, "M", "ǝ"), + (0x1D33, "M", "g"), + (0x1D34, "M", "h"), + (0x1D35, "M", "i"), + (0x1D36, "M", "j"), + (0x1D37, "M", "k"), + (0x1D38, "M", "l"), + (0x1D39, "M", "m"), + (0x1D3A, "M", "n"), + (0x1D3B, "V"), + (0x1D3C, "M", "o"), + (0x1D3D, "M", "ȣ"), + (0x1D3E, "M", "p"), + (0x1D3F, "M", "r"), + (0x1D40, "M", "t"), + (0x1D41, "M", "u"), + (0x1D42, "M", "w"), + (0x1D43, "M", "a"), + (0x1D44, "M", "ɐ"), + (0x1D45, "M", "ɑ"), + (0x1D46, "M", "ᴂ"), + (0x1D47, "M", "b"), + (0x1D48, "M", "d"), + (0x1D49, "M", "e"), + (0x1D4A, "M", "ə"), + (0x1D4B, "M", "ɛ"), + (0x1D4C, "M", "ɜ"), + (0x1D4D, "M", "g"), + (0x1D4E, "V"), + (0x1D4F, "M", "k"), + (0x1D50, "M", "m"), + (0x1D51, "M", "ŋ"), + (0x1D52, "M", "o"), + (0x1D53, "M", "ɔ"), + ] + + +def _seg_16() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1D54, "M", "ᴖ"), + (0x1D55, "M", "ᴗ"), + (0x1D56, "M", "p"), + (0x1D57, "M", "t"), + (0x1D58, "M", "u"), + (0x1D59, "M", "ᴝ"), + (0x1D5A, "M", "ɯ"), + (0x1D5B, "M", "v"), + (0x1D5C, "M", "ᴥ"), + (0x1D5D, "M", "β"), + (0x1D5E, "M", "γ"), + (0x1D5F, "M", "δ"), + (0x1D60, "M", "φ"), + (0x1D61, "M", "χ"), + (0x1D62, "M", "i"), + (0x1D63, "M", "r"), + (0x1D64, "M", "u"), + (0x1D65, "M", "v"), + (0x1D66, "M", "β"), + (0x1D67, "M", "γ"), + (0x1D68, "M", "ρ"), + (0x1D69, "M", "φ"), + (0x1D6A, "M", "χ"), + (0x1D6B, "V"), + (0x1D78, "M", "н"), + (0x1D79, "V"), + (0x1D9B, "M", "ɒ"), + (0x1D9C, "M", "c"), + (0x1D9D, "M", "ɕ"), + (0x1D9E, "M", "ð"), + (0x1D9F, "M", "ɜ"), + (0x1DA0, "M", "f"), + (0x1DA1, "M", "ɟ"), + (0x1DA2, "M", "ɡ"), + (0x1DA3, "M", "ɥ"), + (0x1DA4, "M", "ɨ"), + (0x1DA5, "M", "ɩ"), + (0x1DA6, "M", "ɪ"), + (0x1DA7, "M", "ᵻ"), + (0x1DA8, "M", "ʝ"), + (0x1DA9, "M", "ɭ"), + (0x1DAA, "M", "ᶅ"), + (0x1DAB, "M", "ʟ"), + (0x1DAC, "M", "ɱ"), + (0x1DAD, "M", "ɰ"), + (0x1DAE, "M", "ɲ"), + (0x1DAF, "M", "ɳ"), + (0x1DB0, "M", "ɴ"), + (0x1DB1, "M", "ɵ"), + (0x1DB2, "M", "ɸ"), + (0x1DB3, "M", "ʂ"), + (0x1DB4, "M", "ʃ"), + (0x1DB5, "M", "ƫ"), + (0x1DB6, "M", "ʉ"), + (0x1DB7, "M", "ʊ"), + (0x1DB8, "M", "ᴜ"), + (0x1DB9, "M", "ʋ"), + (0x1DBA, "M", "ʌ"), + (0x1DBB, "M", "z"), + (0x1DBC, "M", "ʐ"), + (0x1DBD, "M", "ʑ"), + (0x1DBE, "M", "ʒ"), + (0x1DBF, "M", "θ"), + (0x1DC0, "V"), + (0x1E00, "M", "ḁ"), + (0x1E01, "V"), + (0x1E02, "M", "ḃ"), + (0x1E03, "V"), + (0x1E04, "M", "ḅ"), + (0x1E05, "V"), + (0x1E06, "M", "ḇ"), + (0x1E07, "V"), + (0x1E08, "M", "ḉ"), + (0x1E09, "V"), + (0x1E0A, "M", "ḋ"), + (0x1E0B, "V"), + (0x1E0C, "M", "ḍ"), + (0x1E0D, "V"), + (0x1E0E, "M", "ḏ"), + (0x1E0F, "V"), + (0x1E10, "M", "ḑ"), + (0x1E11, "V"), + (0x1E12, "M", "ḓ"), + (0x1E13, "V"), + (0x1E14, "M", "ḕ"), + (0x1E15, "V"), + (0x1E16, "M", "ḗ"), + (0x1E17, "V"), + (0x1E18, "M", "ḙ"), + (0x1E19, "V"), + (0x1E1A, "M", "ḛ"), + (0x1E1B, "V"), + (0x1E1C, "M", "ḝ"), + (0x1E1D, "V"), + (0x1E1E, "M", "ḟ"), + (0x1E1F, "V"), + (0x1E20, "M", "ḡ"), + (0x1E21, "V"), + (0x1E22, "M", "ḣ"), + (0x1E23, "V"), + ] + + +def _seg_17() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1E24, "M", "ḥ"), + (0x1E25, "V"), + (0x1E26, "M", "ḧ"), + (0x1E27, "V"), + (0x1E28, "M", "ḩ"), + (0x1E29, "V"), + (0x1E2A, "M", "ḫ"), + (0x1E2B, "V"), + (0x1E2C, "M", "ḭ"), + (0x1E2D, "V"), + (0x1E2E, "M", "ḯ"), + (0x1E2F, "V"), + (0x1E30, "M", "ḱ"), + (0x1E31, "V"), + (0x1E32, "M", "ḳ"), + (0x1E33, "V"), + (0x1E34, "M", "ḵ"), + (0x1E35, "V"), + (0x1E36, "M", "ḷ"), + (0x1E37, "V"), + (0x1E38, "M", "ḹ"), + (0x1E39, "V"), + (0x1E3A, "M", "ḻ"), + (0x1E3B, "V"), + (0x1E3C, "M", "ḽ"), + (0x1E3D, "V"), + (0x1E3E, "M", "ḿ"), + (0x1E3F, "V"), + (0x1E40, "M", "ṁ"), + (0x1E41, "V"), + (0x1E42, "M", "ṃ"), + (0x1E43, "V"), + (0x1E44, "M", "ṅ"), + (0x1E45, "V"), + (0x1E46, "M", "ṇ"), + (0x1E47, "V"), + (0x1E48, "M", "ṉ"), + (0x1E49, "V"), + (0x1E4A, "M", "ṋ"), + (0x1E4B, "V"), + (0x1E4C, "M", "ṍ"), + (0x1E4D, "V"), + (0x1E4E, "M", "ṏ"), + (0x1E4F, "V"), + (0x1E50, "M", "ṑ"), + (0x1E51, "V"), + (0x1E52, "M", "ṓ"), + (0x1E53, "V"), + (0x1E54, "M", "ṕ"), + (0x1E55, "V"), + (0x1E56, "M", "ṗ"), + (0x1E57, "V"), + (0x1E58, "M", "ṙ"), + (0x1E59, "V"), + (0x1E5A, "M", "ṛ"), + (0x1E5B, "V"), + (0x1E5C, "M", "ṝ"), + (0x1E5D, "V"), + (0x1E5E, "M", "ṟ"), + (0x1E5F, "V"), + (0x1E60, "M", "ṡ"), + (0x1E61, "V"), + (0x1E62, "M", "ṣ"), + (0x1E63, "V"), + (0x1E64, "M", "ṥ"), + (0x1E65, "V"), + (0x1E66, "M", "ṧ"), + (0x1E67, "V"), + (0x1E68, "M", "ṩ"), + (0x1E69, "V"), + (0x1E6A, "M", "ṫ"), + (0x1E6B, "V"), + (0x1E6C, "M", "ṭ"), + (0x1E6D, "V"), + (0x1E6E, "M", "ṯ"), + (0x1E6F, "V"), + (0x1E70, "M", "ṱ"), + (0x1E71, "V"), + (0x1E72, "M", "ṳ"), + (0x1E73, "V"), + (0x1E74, "M", "ṵ"), + (0x1E75, "V"), + (0x1E76, "M", "ṷ"), + (0x1E77, "V"), + (0x1E78, "M", "ṹ"), + (0x1E79, "V"), + (0x1E7A, "M", "ṻ"), + (0x1E7B, "V"), + (0x1E7C, "M", "ṽ"), + (0x1E7D, "V"), + (0x1E7E, "M", "ṿ"), + (0x1E7F, "V"), + (0x1E80, "M", "ẁ"), + (0x1E81, "V"), + (0x1E82, "M", "ẃ"), + (0x1E83, "V"), + (0x1E84, "M", "ẅ"), + (0x1E85, "V"), + (0x1E86, "M", "ẇ"), + (0x1E87, "V"), + ] + + +def _seg_18() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1E88, "M", "ẉ"), + (0x1E89, "V"), + (0x1E8A, "M", "ẋ"), + (0x1E8B, "V"), + (0x1E8C, "M", "ẍ"), + (0x1E8D, "V"), + (0x1E8E, "M", "ẏ"), + (0x1E8F, "V"), + (0x1E90, "M", "ẑ"), + (0x1E91, "V"), + (0x1E92, "M", "ẓ"), + (0x1E93, "V"), + (0x1E94, "M", "ẕ"), + (0x1E95, "V"), + (0x1E9A, "M", "aʾ"), + (0x1E9B, "M", "ṡ"), + (0x1E9C, "V"), + (0x1E9E, "M", "ß"), + (0x1E9F, "V"), + (0x1EA0, "M", "ạ"), + (0x1EA1, "V"), + (0x1EA2, "M", "ả"), + (0x1EA3, "V"), + (0x1EA4, "M", "ấ"), + (0x1EA5, "V"), + (0x1EA6, "M", "ầ"), + (0x1EA7, "V"), + (0x1EA8, "M", "ẩ"), + (0x1EA9, "V"), + (0x1EAA, "M", "ẫ"), + (0x1EAB, "V"), + (0x1EAC, "M", "ậ"), + (0x1EAD, "V"), + (0x1EAE, "M", "ắ"), + (0x1EAF, "V"), + (0x1EB0, "M", "ằ"), + (0x1EB1, "V"), + (0x1EB2, "M", "ẳ"), + (0x1EB3, "V"), + (0x1EB4, "M", "ẵ"), + (0x1EB5, "V"), + (0x1EB6, "M", "ặ"), + (0x1EB7, "V"), + (0x1EB8, "M", "ẹ"), + (0x1EB9, "V"), + (0x1EBA, "M", "ẻ"), + (0x1EBB, "V"), + (0x1EBC, "M", "ẽ"), + (0x1EBD, "V"), + (0x1EBE, "M", "ế"), + (0x1EBF, "V"), + (0x1EC0, "M", "ề"), + (0x1EC1, "V"), + (0x1EC2, "M", "ể"), + (0x1EC3, "V"), + (0x1EC4, "M", "ễ"), + (0x1EC5, "V"), + (0x1EC6, "M", "ệ"), + (0x1EC7, "V"), + (0x1EC8, "M", "ỉ"), + (0x1EC9, "V"), + (0x1ECA, "M", "ị"), + (0x1ECB, "V"), + (0x1ECC, "M", "ọ"), + (0x1ECD, "V"), + (0x1ECE, "M", "ỏ"), + (0x1ECF, "V"), + (0x1ED0, "M", "ố"), + (0x1ED1, "V"), + (0x1ED2, "M", "ồ"), + (0x1ED3, "V"), + (0x1ED4, "M", "ổ"), + (0x1ED5, "V"), + (0x1ED6, "M", "ỗ"), + (0x1ED7, "V"), + (0x1ED8, "M", "ộ"), + (0x1ED9, "V"), + (0x1EDA, "M", "ớ"), + (0x1EDB, "V"), + (0x1EDC, "M", "ờ"), + (0x1EDD, "V"), + (0x1EDE, "M", "ở"), + (0x1EDF, "V"), + (0x1EE0, "M", "ỡ"), + (0x1EE1, "V"), + (0x1EE2, "M", "ợ"), + (0x1EE3, "V"), + (0x1EE4, "M", "ụ"), + (0x1EE5, "V"), + (0x1EE6, "M", "ủ"), + (0x1EE7, "V"), + (0x1EE8, "M", "ứ"), + (0x1EE9, "V"), + (0x1EEA, "M", "ừ"), + (0x1EEB, "V"), + (0x1EEC, "M", "ử"), + (0x1EED, "V"), + (0x1EEE, "M", "ữ"), + (0x1EEF, "V"), + (0x1EF0, "M", "ự"), + ] + + +def _seg_19() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1EF1, "V"), + (0x1EF2, "M", "ỳ"), + (0x1EF3, "V"), + (0x1EF4, "M", "ỵ"), + (0x1EF5, "V"), + (0x1EF6, "M", "ỷ"), + (0x1EF7, "V"), + (0x1EF8, "M", "ỹ"), + (0x1EF9, "V"), + (0x1EFA, "M", "ỻ"), + (0x1EFB, "V"), + (0x1EFC, "M", "ỽ"), + (0x1EFD, "V"), + (0x1EFE, "M", "ỿ"), + (0x1EFF, "V"), + (0x1F08, "M", "ἀ"), + (0x1F09, "M", "ἁ"), + (0x1F0A, "M", "ἂ"), + (0x1F0B, "M", "ἃ"), + (0x1F0C, "M", "ἄ"), + (0x1F0D, "M", "ἅ"), + (0x1F0E, "M", "ἆ"), + (0x1F0F, "M", "ἇ"), + (0x1F10, "V"), + (0x1F16, "X"), + (0x1F18, "M", "ἐ"), + (0x1F19, "M", "ἑ"), + (0x1F1A, "M", "ἒ"), + (0x1F1B, "M", "ἓ"), + (0x1F1C, "M", "ἔ"), + (0x1F1D, "M", "ἕ"), + (0x1F1E, "X"), + (0x1F20, "V"), + (0x1F28, "M", "ἠ"), + (0x1F29, "M", "ἡ"), + (0x1F2A, "M", "ἢ"), + (0x1F2B, "M", "ἣ"), + (0x1F2C, "M", "ἤ"), + (0x1F2D, "M", "ἥ"), + (0x1F2E, "M", "ἦ"), + (0x1F2F, "M", "ἧ"), + (0x1F30, "V"), + (0x1F38, "M", "ἰ"), + (0x1F39, "M", "ἱ"), + (0x1F3A, "M", "ἲ"), + (0x1F3B, "M", "ἳ"), + (0x1F3C, "M", "ἴ"), + (0x1F3D, "M", "ἵ"), + (0x1F3E, "M", "ἶ"), + (0x1F3F, "M", "ἷ"), + (0x1F40, "V"), + (0x1F46, "X"), + (0x1F48, "M", "ὀ"), + (0x1F49, "M", "ὁ"), + (0x1F4A, "M", "ὂ"), + (0x1F4B, "M", "ὃ"), + (0x1F4C, "M", "ὄ"), + (0x1F4D, "M", "ὅ"), + (0x1F4E, "X"), + (0x1F50, "V"), + (0x1F58, "X"), + (0x1F59, "M", "ὑ"), + (0x1F5A, "X"), + (0x1F5B, "M", "ὓ"), + (0x1F5C, "X"), + (0x1F5D, "M", "ὕ"), + (0x1F5E, "X"), + (0x1F5F, "M", "ὗ"), + (0x1F60, "V"), + (0x1F68, "M", "ὠ"), + (0x1F69, "M", "ὡ"), + (0x1F6A, "M", "ὢ"), + (0x1F6B, "M", "ὣ"), + (0x1F6C, "M", "ὤ"), + (0x1F6D, "M", "ὥ"), + (0x1F6E, "M", "ὦ"), + (0x1F6F, "M", "ὧ"), + (0x1F70, "V"), + (0x1F71, "M", "ά"), + (0x1F72, "V"), + (0x1F73, "M", "έ"), + (0x1F74, "V"), + (0x1F75, "M", "ή"), + (0x1F76, "V"), + (0x1F77, "M", "ί"), + (0x1F78, "V"), + (0x1F79, "M", "ό"), + (0x1F7A, "V"), + (0x1F7B, "M", "ύ"), + (0x1F7C, "V"), + (0x1F7D, "M", "ώ"), + (0x1F7E, "X"), + (0x1F80, "M", "ἀι"), + (0x1F81, "M", "ἁι"), + (0x1F82, "M", "ἂι"), + (0x1F83, "M", "ἃι"), + (0x1F84, "M", "ἄι"), + (0x1F85, "M", "ἅι"), + (0x1F86, "M", "ἆι"), + (0x1F87, "M", "ἇι"), + ] + + +def _seg_20() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1F88, "M", "ἀι"), + (0x1F89, "M", "ἁι"), + (0x1F8A, "M", "ἂι"), + (0x1F8B, "M", "ἃι"), + (0x1F8C, "M", "ἄι"), + (0x1F8D, "M", "ἅι"), + (0x1F8E, "M", "ἆι"), + (0x1F8F, "M", "ἇι"), + (0x1F90, "M", "ἠι"), + (0x1F91, "M", "ἡι"), + (0x1F92, "M", "ἢι"), + (0x1F93, "M", "ἣι"), + (0x1F94, "M", "ἤι"), + (0x1F95, "M", "ἥι"), + (0x1F96, "M", "ἦι"), + (0x1F97, "M", "ἧι"), + (0x1F98, "M", "ἠι"), + (0x1F99, "M", "ἡι"), + (0x1F9A, "M", "ἢι"), + (0x1F9B, "M", "ἣι"), + (0x1F9C, "M", "ἤι"), + (0x1F9D, "M", "ἥι"), + (0x1F9E, "M", "ἦι"), + (0x1F9F, "M", "ἧι"), + (0x1FA0, "M", "ὠι"), + (0x1FA1, "M", "ὡι"), + (0x1FA2, "M", "ὢι"), + (0x1FA3, "M", "ὣι"), + (0x1FA4, "M", "ὤι"), + (0x1FA5, "M", "ὥι"), + (0x1FA6, "M", "ὦι"), + (0x1FA7, "M", "ὧι"), + (0x1FA8, "M", "ὠι"), + (0x1FA9, "M", "ὡι"), + (0x1FAA, "M", "ὢι"), + (0x1FAB, "M", "ὣι"), + (0x1FAC, "M", "ὤι"), + (0x1FAD, "M", "ὥι"), + (0x1FAE, "M", "ὦι"), + (0x1FAF, "M", "ὧι"), + (0x1FB0, "V"), + (0x1FB2, "M", "ὰι"), + (0x1FB3, "M", "αι"), + (0x1FB4, "M", "άι"), + (0x1FB5, "X"), + (0x1FB6, "V"), + (0x1FB7, "M", "ᾶι"), + (0x1FB8, "M", "ᾰ"), + (0x1FB9, "M", "ᾱ"), + (0x1FBA, "M", "ὰ"), + (0x1FBB, "M", "ά"), + (0x1FBC, "M", "αι"), + (0x1FBD, "3", " ̓"), + (0x1FBE, "M", "ι"), + (0x1FBF, "3", " ̓"), + (0x1FC0, "3", " ͂"), + (0x1FC1, "3", " ̈͂"), + (0x1FC2, "M", "ὴι"), + (0x1FC3, "M", "ηι"), + (0x1FC4, "M", "ήι"), + (0x1FC5, "X"), + (0x1FC6, "V"), + (0x1FC7, "M", "ῆι"), + (0x1FC8, "M", "ὲ"), + (0x1FC9, "M", "έ"), + (0x1FCA, "M", "ὴ"), + (0x1FCB, "M", "ή"), + (0x1FCC, "M", "ηι"), + (0x1FCD, "3", " ̓̀"), + (0x1FCE, "3", " ̓́"), + (0x1FCF, "3", " ̓͂"), + (0x1FD0, "V"), + (0x1FD3, "M", "ΐ"), + (0x1FD4, "X"), + (0x1FD6, "V"), + (0x1FD8, "M", "ῐ"), + (0x1FD9, "M", "ῑ"), + (0x1FDA, "M", "ὶ"), + (0x1FDB, "M", "ί"), + (0x1FDC, "X"), + (0x1FDD, "3", " ̔̀"), + (0x1FDE, "3", " ̔́"), + (0x1FDF, "3", " ̔͂"), + (0x1FE0, "V"), + (0x1FE3, "M", "ΰ"), + (0x1FE4, "V"), + (0x1FE8, "M", "ῠ"), + (0x1FE9, "M", "ῡ"), + (0x1FEA, "M", "ὺ"), + (0x1FEB, "M", "ύ"), + (0x1FEC, "M", "ῥ"), + (0x1FED, "3", " ̈̀"), + (0x1FEE, "3", " ̈́"), + (0x1FEF, "3", "`"), + (0x1FF0, "X"), + (0x1FF2, "M", "ὼι"), + (0x1FF3, "M", "ωι"), + (0x1FF4, "M", "ώι"), + (0x1FF5, "X"), + (0x1FF6, "V"), + ] + + +def _seg_21() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1FF7, "M", "ῶι"), + (0x1FF8, "M", "ὸ"), + (0x1FF9, "M", "ό"), + (0x1FFA, "M", "ὼ"), + (0x1FFB, "M", "ώ"), + (0x1FFC, "M", "ωι"), + (0x1FFD, "3", " ́"), + (0x1FFE, "3", " ̔"), + (0x1FFF, "X"), + (0x2000, "3", " "), + (0x200B, "I"), + (0x200C, "D", ""), + (0x200E, "X"), + (0x2010, "V"), + (0x2011, "M", "‐"), + (0x2012, "V"), + (0x2017, "3", " ̳"), + (0x2018, "V"), + (0x2024, "X"), + (0x2027, "V"), + (0x2028, "X"), + (0x202F, "3", " "), + (0x2030, "V"), + (0x2033, "M", "′′"), + (0x2034, "M", "′′′"), + (0x2035, "V"), + (0x2036, "M", "‵‵"), + (0x2037, "M", "‵‵‵"), + (0x2038, "V"), + (0x203C, "3", "!!"), + (0x203D, "V"), + (0x203E, "3", " ̅"), + (0x203F, "V"), + (0x2047, "3", "??"), + (0x2048, "3", "?!"), + (0x2049, "3", "!?"), + (0x204A, "V"), + (0x2057, "M", "′′′′"), + (0x2058, "V"), + (0x205F, "3", " "), + (0x2060, "I"), + (0x2061, "X"), + (0x2064, "I"), + (0x2065, "X"), + (0x2070, "M", "0"), + (0x2071, "M", "i"), + (0x2072, "X"), + (0x2074, "M", "4"), + (0x2075, "M", "5"), + (0x2076, "M", "6"), + (0x2077, "M", "7"), + (0x2078, "M", "8"), + (0x2079, "M", "9"), + (0x207A, "3", "+"), + (0x207B, "M", "−"), + (0x207C, "3", "="), + (0x207D, "3", "("), + (0x207E, "3", ")"), + (0x207F, "M", "n"), + (0x2080, "M", "0"), + (0x2081, "M", "1"), + (0x2082, "M", "2"), + (0x2083, "M", "3"), + (0x2084, "M", "4"), + (0x2085, "M", "5"), + (0x2086, "M", "6"), + (0x2087, "M", "7"), + (0x2088, "M", "8"), + (0x2089, "M", "9"), + (0x208A, "3", "+"), + (0x208B, "M", "−"), + (0x208C, "3", "="), + (0x208D, "3", "("), + (0x208E, "3", ")"), + (0x208F, "X"), + (0x2090, "M", "a"), + (0x2091, "M", "e"), + (0x2092, "M", "o"), + (0x2093, "M", "x"), + (0x2094, "M", "ə"), + (0x2095, "M", "h"), + (0x2096, "M", "k"), + (0x2097, "M", "l"), + (0x2098, "M", "m"), + (0x2099, "M", "n"), + (0x209A, "M", "p"), + (0x209B, "M", "s"), + (0x209C, "M", "t"), + (0x209D, "X"), + (0x20A0, "V"), + (0x20A8, "M", "rs"), + (0x20A9, "V"), + (0x20C1, "X"), + (0x20D0, "V"), + (0x20F1, "X"), + (0x2100, "3", "a/c"), + (0x2101, "3", "a/s"), + (0x2102, "M", "c"), + (0x2103, "M", "°c"), + (0x2104, "V"), + ] + + +def _seg_22() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x2105, "3", "c/o"), + (0x2106, "3", "c/u"), + (0x2107, "M", "ɛ"), + (0x2108, "V"), + (0x2109, "M", "°f"), + (0x210A, "M", "g"), + (0x210B, "M", "h"), + (0x210F, "M", "ħ"), + (0x2110, "M", "i"), + (0x2112, "M", "l"), + (0x2114, "V"), + (0x2115, "M", "n"), + (0x2116, "M", "no"), + (0x2117, "V"), + (0x2119, "M", "p"), + (0x211A, "M", "q"), + (0x211B, "M", "r"), + (0x211E, "V"), + (0x2120, "M", "sm"), + (0x2121, "M", "tel"), + (0x2122, "M", "tm"), + (0x2123, "V"), + (0x2124, "M", "z"), + (0x2125, "V"), + (0x2126, "M", "ω"), + (0x2127, "V"), + (0x2128, "M", "z"), + (0x2129, "V"), + (0x212A, "M", "k"), + (0x212B, "M", "å"), + (0x212C, "M", "b"), + (0x212D, "M", "c"), + (0x212E, "V"), + (0x212F, "M", "e"), + (0x2131, "M", "f"), + (0x2132, "X"), + (0x2133, "M", "m"), + (0x2134, "M", "o"), + (0x2135, "M", "א"), + (0x2136, "M", "ב"), + (0x2137, "M", "ג"), + (0x2138, "M", "ד"), + (0x2139, "M", "i"), + (0x213A, "V"), + (0x213B, "M", "fax"), + (0x213C, "M", "π"), + (0x213D, "M", "γ"), + (0x213F, "M", "π"), + (0x2140, "M", "∑"), + (0x2141, "V"), + (0x2145, "M", "d"), + (0x2147, "M", "e"), + (0x2148, "M", "i"), + (0x2149, "M", "j"), + (0x214A, "V"), + (0x2150, "M", "1⁄7"), + (0x2151, "M", "1⁄9"), + (0x2152, "M", "1⁄10"), + (0x2153, "M", "1⁄3"), + (0x2154, "M", "2⁄3"), + (0x2155, "M", "1⁄5"), + (0x2156, "M", "2⁄5"), + (0x2157, "M", "3⁄5"), + (0x2158, "M", "4⁄5"), + (0x2159, "M", "1⁄6"), + (0x215A, "M", "5⁄6"), + (0x215B, "M", "1⁄8"), + (0x215C, "M", "3⁄8"), + (0x215D, "M", "5⁄8"), + (0x215E, "M", "7⁄8"), + (0x215F, "M", "1⁄"), + (0x2160, "M", "i"), + (0x2161, "M", "ii"), + (0x2162, "M", "iii"), + (0x2163, "M", "iv"), + (0x2164, "M", "v"), + (0x2165, "M", "vi"), + (0x2166, "M", "vii"), + (0x2167, "M", "viii"), + (0x2168, "M", "ix"), + (0x2169, "M", "x"), + (0x216A, "M", "xi"), + (0x216B, "M", "xii"), + (0x216C, "M", "l"), + (0x216D, "M", "c"), + (0x216E, "M", "d"), + (0x216F, "M", "m"), + (0x2170, "M", "i"), + (0x2171, "M", "ii"), + (0x2172, "M", "iii"), + (0x2173, "M", "iv"), + (0x2174, "M", "v"), + (0x2175, "M", "vi"), + (0x2176, "M", "vii"), + (0x2177, "M", "viii"), + (0x2178, "M", "ix"), + (0x2179, "M", "x"), + (0x217A, "M", "xi"), + (0x217B, "M", "xii"), + (0x217C, "M", "l"), + ] + + +def _seg_23() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x217D, "M", "c"), + (0x217E, "M", "d"), + (0x217F, "M", "m"), + (0x2180, "V"), + (0x2183, "X"), + (0x2184, "V"), + (0x2189, "M", "0⁄3"), + (0x218A, "V"), + (0x218C, "X"), + (0x2190, "V"), + (0x222C, "M", "∫∫"), + (0x222D, "M", "∫∫∫"), + (0x222E, "V"), + (0x222F, "M", "∮∮"), + (0x2230, "M", "∮∮∮"), + (0x2231, "V"), + (0x2329, "M", "〈"), + (0x232A, "M", "〉"), + (0x232B, "V"), + (0x2427, "X"), + (0x2440, "V"), + (0x244B, "X"), + (0x2460, "M", "1"), + (0x2461, "M", "2"), + (0x2462, "M", "3"), + (0x2463, "M", "4"), + (0x2464, "M", "5"), + (0x2465, "M", "6"), + (0x2466, "M", "7"), + (0x2467, "M", "8"), + (0x2468, "M", "9"), + (0x2469, "M", "10"), + (0x246A, "M", "11"), + (0x246B, "M", "12"), + (0x246C, "M", "13"), + (0x246D, "M", "14"), + (0x246E, "M", "15"), + (0x246F, "M", "16"), + (0x2470, "M", "17"), + (0x2471, "M", "18"), + (0x2472, "M", "19"), + (0x2473, "M", "20"), + (0x2474, "3", "(1)"), + (0x2475, "3", "(2)"), + (0x2476, "3", "(3)"), + (0x2477, "3", "(4)"), + (0x2478, "3", "(5)"), + (0x2479, "3", "(6)"), + (0x247A, "3", "(7)"), + (0x247B, "3", "(8)"), + (0x247C, "3", "(9)"), + (0x247D, "3", "(10)"), + (0x247E, "3", "(11)"), + (0x247F, "3", "(12)"), + (0x2480, "3", "(13)"), + (0x2481, "3", "(14)"), + (0x2482, "3", "(15)"), + (0x2483, "3", "(16)"), + (0x2484, "3", "(17)"), + (0x2485, "3", "(18)"), + (0x2486, "3", "(19)"), + (0x2487, "3", "(20)"), + (0x2488, "X"), + (0x249C, "3", "(a)"), + (0x249D, "3", "(b)"), + (0x249E, "3", "(c)"), + (0x249F, "3", "(d)"), + (0x24A0, "3", "(e)"), + (0x24A1, "3", "(f)"), + (0x24A2, "3", "(g)"), + (0x24A3, "3", "(h)"), + (0x24A4, "3", "(i)"), + (0x24A5, "3", "(j)"), + (0x24A6, "3", "(k)"), + (0x24A7, "3", "(l)"), + (0x24A8, "3", "(m)"), + (0x24A9, "3", "(n)"), + (0x24AA, "3", "(o)"), + (0x24AB, "3", "(p)"), + (0x24AC, "3", "(q)"), + (0x24AD, "3", "(r)"), + (0x24AE, "3", "(s)"), + (0x24AF, "3", "(t)"), + (0x24B0, "3", "(u)"), + (0x24B1, "3", "(v)"), + (0x24B2, "3", "(w)"), + (0x24B3, "3", "(x)"), + (0x24B4, "3", "(y)"), + (0x24B5, "3", "(z)"), + (0x24B6, "M", "a"), + (0x24B7, "M", "b"), + (0x24B8, "M", "c"), + (0x24B9, "M", "d"), + (0x24BA, "M", "e"), + (0x24BB, "M", "f"), + (0x24BC, "M", "g"), + (0x24BD, "M", "h"), + (0x24BE, "M", "i"), + (0x24BF, "M", "j"), + (0x24C0, "M", "k"), + ] + + +def _seg_24() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x24C1, "M", "l"), + (0x24C2, "M", "m"), + (0x24C3, "M", "n"), + (0x24C4, "M", "o"), + (0x24C5, "M", "p"), + (0x24C6, "M", "q"), + (0x24C7, "M", "r"), + (0x24C8, "M", "s"), + (0x24C9, "M", "t"), + (0x24CA, "M", "u"), + (0x24CB, "M", "v"), + (0x24CC, "M", "w"), + (0x24CD, "M", "x"), + (0x24CE, "M", "y"), + (0x24CF, "M", "z"), + (0x24D0, "M", "a"), + (0x24D1, "M", "b"), + (0x24D2, "M", "c"), + (0x24D3, "M", "d"), + (0x24D4, "M", "e"), + (0x24D5, "M", "f"), + (0x24D6, "M", "g"), + (0x24D7, "M", "h"), + (0x24D8, "M", "i"), + (0x24D9, "M", "j"), + (0x24DA, "M", "k"), + (0x24DB, "M", "l"), + (0x24DC, "M", "m"), + (0x24DD, "M", "n"), + (0x24DE, "M", "o"), + (0x24DF, "M", "p"), + (0x24E0, "M", "q"), + (0x24E1, "M", "r"), + (0x24E2, "M", "s"), + (0x24E3, "M", "t"), + (0x24E4, "M", "u"), + (0x24E5, "M", "v"), + (0x24E6, "M", "w"), + (0x24E7, "M", "x"), + (0x24E8, "M", "y"), + (0x24E9, "M", "z"), + (0x24EA, "M", "0"), + (0x24EB, "V"), + (0x2A0C, "M", "∫∫∫∫"), + (0x2A0D, "V"), + (0x2A74, "3", "::="), + (0x2A75, "3", "=="), + (0x2A76, "3", "==="), + (0x2A77, "V"), + (0x2ADC, "M", "⫝̸"), + (0x2ADD, "V"), + (0x2B74, "X"), + (0x2B76, "V"), + (0x2B96, "X"), + (0x2B97, "V"), + (0x2C00, "M", "ⰰ"), + (0x2C01, "M", "ⰱ"), + (0x2C02, "M", "ⰲ"), + (0x2C03, "M", "ⰳ"), + (0x2C04, "M", "ⰴ"), + (0x2C05, "M", "ⰵ"), + (0x2C06, "M", "ⰶ"), + (0x2C07, "M", "ⰷ"), + (0x2C08, "M", "ⰸ"), + (0x2C09, "M", "ⰹ"), + (0x2C0A, "M", "ⰺ"), + (0x2C0B, "M", "ⰻ"), + (0x2C0C, "M", "ⰼ"), + (0x2C0D, "M", "ⰽ"), + (0x2C0E, "M", "ⰾ"), + (0x2C0F, "M", "ⰿ"), + (0x2C10, "M", "ⱀ"), + (0x2C11, "M", "ⱁ"), + (0x2C12, "M", "ⱂ"), + (0x2C13, "M", "ⱃ"), + (0x2C14, "M", "ⱄ"), + (0x2C15, "M", "ⱅ"), + (0x2C16, "M", "ⱆ"), + (0x2C17, "M", "ⱇ"), + (0x2C18, "M", "ⱈ"), + (0x2C19, "M", "ⱉ"), + (0x2C1A, "M", "ⱊ"), + (0x2C1B, "M", "ⱋ"), + (0x2C1C, "M", "ⱌ"), + (0x2C1D, "M", "ⱍ"), + (0x2C1E, "M", "ⱎ"), + (0x2C1F, "M", "ⱏ"), + (0x2C20, "M", "ⱐ"), + (0x2C21, "M", "ⱑ"), + (0x2C22, "M", "ⱒ"), + (0x2C23, "M", "ⱓ"), + (0x2C24, "M", "ⱔ"), + (0x2C25, "M", "ⱕ"), + (0x2C26, "M", "ⱖ"), + (0x2C27, "M", "ⱗ"), + (0x2C28, "M", "ⱘ"), + (0x2C29, "M", "ⱙ"), + (0x2C2A, "M", "ⱚ"), + (0x2C2B, "M", "ⱛ"), + (0x2C2C, "M", "ⱜ"), + ] + + +def _seg_25() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x2C2D, "M", "ⱝ"), + (0x2C2E, "M", "ⱞ"), + (0x2C2F, "M", "ⱟ"), + (0x2C30, "V"), + (0x2C60, "M", "ⱡ"), + (0x2C61, "V"), + (0x2C62, "M", "ɫ"), + (0x2C63, "M", "ᵽ"), + (0x2C64, "M", "ɽ"), + (0x2C65, "V"), + (0x2C67, "M", "ⱨ"), + (0x2C68, "V"), + (0x2C69, "M", "ⱪ"), + (0x2C6A, "V"), + (0x2C6B, "M", "ⱬ"), + (0x2C6C, "V"), + (0x2C6D, "M", "ɑ"), + (0x2C6E, "M", "ɱ"), + (0x2C6F, "M", "ɐ"), + (0x2C70, "M", "ɒ"), + (0x2C71, "V"), + (0x2C72, "M", "ⱳ"), + (0x2C73, "V"), + (0x2C75, "M", "ⱶ"), + (0x2C76, "V"), + (0x2C7C, "M", "j"), + (0x2C7D, "M", "v"), + (0x2C7E, "M", "ȿ"), + (0x2C7F, "M", "ɀ"), + (0x2C80, "M", "ⲁ"), + (0x2C81, "V"), + (0x2C82, "M", "ⲃ"), + (0x2C83, "V"), + (0x2C84, "M", "ⲅ"), + (0x2C85, "V"), + (0x2C86, "M", "ⲇ"), + (0x2C87, "V"), + (0x2C88, "M", "ⲉ"), + (0x2C89, "V"), + (0x2C8A, "M", "ⲋ"), + (0x2C8B, "V"), + (0x2C8C, "M", "ⲍ"), + (0x2C8D, "V"), + (0x2C8E, "M", "ⲏ"), + (0x2C8F, "V"), + (0x2C90, "M", "ⲑ"), + (0x2C91, "V"), + (0x2C92, "M", "ⲓ"), + (0x2C93, "V"), + (0x2C94, "M", "ⲕ"), + (0x2C95, "V"), + (0x2C96, "M", "ⲗ"), + (0x2C97, "V"), + (0x2C98, "M", "ⲙ"), + (0x2C99, "V"), + (0x2C9A, "M", "ⲛ"), + (0x2C9B, "V"), + (0x2C9C, "M", "ⲝ"), + (0x2C9D, "V"), + (0x2C9E, "M", "ⲟ"), + (0x2C9F, "V"), + (0x2CA0, "M", "ⲡ"), + (0x2CA1, "V"), + (0x2CA2, "M", "ⲣ"), + (0x2CA3, "V"), + (0x2CA4, "M", "ⲥ"), + (0x2CA5, "V"), + (0x2CA6, "M", "ⲧ"), + (0x2CA7, "V"), + (0x2CA8, "M", "ⲩ"), + (0x2CA9, "V"), + (0x2CAA, "M", "ⲫ"), + (0x2CAB, "V"), + (0x2CAC, "M", "ⲭ"), + (0x2CAD, "V"), + (0x2CAE, "M", "ⲯ"), + (0x2CAF, "V"), + (0x2CB0, "M", "ⲱ"), + (0x2CB1, "V"), + (0x2CB2, "M", "ⲳ"), + (0x2CB3, "V"), + (0x2CB4, "M", "ⲵ"), + (0x2CB5, "V"), + (0x2CB6, "M", "ⲷ"), + (0x2CB7, "V"), + (0x2CB8, "M", "ⲹ"), + (0x2CB9, "V"), + (0x2CBA, "M", "ⲻ"), + (0x2CBB, "V"), + (0x2CBC, "M", "ⲽ"), + (0x2CBD, "V"), + (0x2CBE, "M", "ⲿ"), + (0x2CBF, "V"), + (0x2CC0, "M", "ⳁ"), + (0x2CC1, "V"), + (0x2CC2, "M", "ⳃ"), + (0x2CC3, "V"), + (0x2CC4, "M", "ⳅ"), + (0x2CC5, "V"), + (0x2CC6, "M", "ⳇ"), + ] + + +def _seg_26() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x2CC7, "V"), + (0x2CC8, "M", "ⳉ"), + (0x2CC9, "V"), + (0x2CCA, "M", "ⳋ"), + (0x2CCB, "V"), + (0x2CCC, "M", "ⳍ"), + (0x2CCD, "V"), + (0x2CCE, "M", "ⳏ"), + (0x2CCF, "V"), + (0x2CD0, "M", "ⳑ"), + (0x2CD1, "V"), + (0x2CD2, "M", "ⳓ"), + (0x2CD3, "V"), + (0x2CD4, "M", "ⳕ"), + (0x2CD5, "V"), + (0x2CD6, "M", "ⳗ"), + (0x2CD7, "V"), + (0x2CD8, "M", "ⳙ"), + (0x2CD9, "V"), + (0x2CDA, "M", "ⳛ"), + (0x2CDB, "V"), + (0x2CDC, "M", "ⳝ"), + (0x2CDD, "V"), + (0x2CDE, "M", "ⳟ"), + (0x2CDF, "V"), + (0x2CE0, "M", "ⳡ"), + (0x2CE1, "V"), + (0x2CE2, "M", "ⳣ"), + (0x2CE3, "V"), + (0x2CEB, "M", "ⳬ"), + (0x2CEC, "V"), + (0x2CED, "M", "ⳮ"), + (0x2CEE, "V"), + (0x2CF2, "M", "ⳳ"), + (0x2CF3, "V"), + (0x2CF4, "X"), + (0x2CF9, "V"), + (0x2D26, "X"), + (0x2D27, "V"), + (0x2D28, "X"), + (0x2D2D, "V"), + (0x2D2E, "X"), + (0x2D30, "V"), + (0x2D68, "X"), + (0x2D6F, "M", "ⵡ"), + (0x2D70, "V"), + (0x2D71, "X"), + (0x2D7F, "V"), + (0x2D97, "X"), + (0x2DA0, "V"), + (0x2DA7, "X"), + (0x2DA8, "V"), + (0x2DAF, "X"), + (0x2DB0, "V"), + (0x2DB7, "X"), + (0x2DB8, "V"), + (0x2DBF, "X"), + (0x2DC0, "V"), + (0x2DC7, "X"), + (0x2DC8, "V"), + (0x2DCF, "X"), + (0x2DD0, "V"), + (0x2DD7, "X"), + (0x2DD8, "V"), + (0x2DDF, "X"), + (0x2DE0, "V"), + (0x2E5E, "X"), + (0x2E80, "V"), + (0x2E9A, "X"), + (0x2E9B, "V"), + (0x2E9F, "M", "母"), + (0x2EA0, "V"), + (0x2EF3, "M", "龟"), + (0x2EF4, "X"), + (0x2F00, "M", "一"), + (0x2F01, "M", "丨"), + (0x2F02, "M", "丶"), + (0x2F03, "M", "丿"), + (0x2F04, "M", "乙"), + (0x2F05, "M", "亅"), + (0x2F06, "M", "二"), + (0x2F07, "M", "亠"), + (0x2F08, "M", "人"), + (0x2F09, "M", "儿"), + (0x2F0A, "M", "入"), + (0x2F0B, "M", "八"), + (0x2F0C, "M", "冂"), + (0x2F0D, "M", "冖"), + (0x2F0E, "M", "冫"), + (0x2F0F, "M", "几"), + (0x2F10, "M", "凵"), + (0x2F11, "M", "刀"), + (0x2F12, "M", "力"), + (0x2F13, "M", "勹"), + (0x2F14, "M", "匕"), + (0x2F15, "M", "匚"), + (0x2F16, "M", "匸"), + (0x2F17, "M", "十"), + (0x2F18, "M", "卜"), + (0x2F19, "M", "卩"), + ] + + +def _seg_27() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x2F1A, "M", "厂"), + (0x2F1B, "M", "厶"), + (0x2F1C, "M", "又"), + (0x2F1D, "M", "口"), + (0x2F1E, "M", "囗"), + (0x2F1F, "M", "土"), + (0x2F20, "M", "士"), + (0x2F21, "M", "夂"), + (0x2F22, "M", "夊"), + (0x2F23, "M", "夕"), + (0x2F24, "M", "大"), + (0x2F25, "M", "女"), + (0x2F26, "M", "子"), + (0x2F27, "M", "宀"), + (0x2F28, "M", "寸"), + (0x2F29, "M", "小"), + (0x2F2A, "M", "尢"), + (0x2F2B, "M", "尸"), + (0x2F2C, "M", "屮"), + (0x2F2D, "M", "山"), + (0x2F2E, "M", "巛"), + (0x2F2F, "M", "工"), + (0x2F30, "M", "己"), + (0x2F31, "M", "巾"), + (0x2F32, "M", "干"), + (0x2F33, "M", "幺"), + (0x2F34, "M", "广"), + (0x2F35, "M", "廴"), + (0x2F36, "M", "廾"), + (0x2F37, "M", "弋"), + (0x2F38, "M", "弓"), + (0x2F39, "M", "彐"), + (0x2F3A, "M", "彡"), + (0x2F3B, "M", "彳"), + (0x2F3C, "M", "心"), + (0x2F3D, "M", "戈"), + (0x2F3E, "M", "戶"), + (0x2F3F, "M", "手"), + (0x2F40, "M", "支"), + (0x2F41, "M", "攴"), + (0x2F42, "M", "文"), + (0x2F43, "M", "斗"), + (0x2F44, "M", "斤"), + (0x2F45, "M", "方"), + (0x2F46, "M", "无"), + (0x2F47, "M", "日"), + (0x2F48, "M", "曰"), + (0x2F49, "M", "月"), + (0x2F4A, "M", "木"), + (0x2F4B, "M", "欠"), + (0x2F4C, "M", "止"), + (0x2F4D, "M", "歹"), + (0x2F4E, "M", "殳"), + (0x2F4F, "M", "毋"), + (0x2F50, "M", "比"), + (0x2F51, "M", "毛"), + (0x2F52, "M", "氏"), + (0x2F53, "M", "气"), + (0x2F54, "M", "水"), + (0x2F55, "M", "火"), + (0x2F56, "M", "爪"), + (0x2F57, "M", "父"), + (0x2F58, "M", "爻"), + (0x2F59, "M", "爿"), + (0x2F5A, "M", "片"), + (0x2F5B, "M", "牙"), + (0x2F5C, "M", "牛"), + (0x2F5D, "M", "犬"), + (0x2F5E, "M", "玄"), + (0x2F5F, "M", "玉"), + (0x2F60, "M", "瓜"), + (0x2F61, "M", "瓦"), + (0x2F62, "M", "甘"), + (0x2F63, "M", "生"), + (0x2F64, "M", "用"), + (0x2F65, "M", "田"), + (0x2F66, "M", "疋"), + (0x2F67, "M", "疒"), + (0x2F68, "M", "癶"), + (0x2F69, "M", "白"), + (0x2F6A, "M", "皮"), + (0x2F6B, "M", "皿"), + (0x2F6C, "M", "目"), + (0x2F6D, "M", "矛"), + (0x2F6E, "M", "矢"), + (0x2F6F, "M", "石"), + (0x2F70, "M", "示"), + (0x2F71, "M", "禸"), + (0x2F72, "M", "禾"), + (0x2F73, "M", "穴"), + (0x2F74, "M", "立"), + (0x2F75, "M", "竹"), + (0x2F76, "M", "米"), + (0x2F77, "M", "糸"), + (0x2F78, "M", "缶"), + (0x2F79, "M", "网"), + (0x2F7A, "M", "羊"), + (0x2F7B, "M", "羽"), + (0x2F7C, "M", "老"), + (0x2F7D, "M", "而"), + ] + + +def _seg_28() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x2F7E, "M", "耒"), + (0x2F7F, "M", "耳"), + (0x2F80, "M", "聿"), + (0x2F81, "M", "肉"), + (0x2F82, "M", "臣"), + (0x2F83, "M", "自"), + (0x2F84, "M", "至"), + (0x2F85, "M", "臼"), + (0x2F86, "M", "舌"), + (0x2F87, "M", "舛"), + (0x2F88, "M", "舟"), + (0x2F89, "M", "艮"), + (0x2F8A, "M", "色"), + (0x2F8B, "M", "艸"), + (0x2F8C, "M", "虍"), + (0x2F8D, "M", "虫"), + (0x2F8E, "M", "血"), + (0x2F8F, "M", "行"), + (0x2F90, "M", "衣"), + (0x2F91, "M", "襾"), + (0x2F92, "M", "見"), + (0x2F93, "M", "角"), + (0x2F94, "M", "言"), + (0x2F95, "M", "谷"), + (0x2F96, "M", "豆"), + (0x2F97, "M", "豕"), + (0x2F98, "M", "豸"), + (0x2F99, "M", "貝"), + (0x2F9A, "M", "赤"), + (0x2F9B, "M", "走"), + (0x2F9C, "M", "足"), + (0x2F9D, "M", "身"), + (0x2F9E, "M", "車"), + (0x2F9F, "M", "辛"), + (0x2FA0, "M", "辰"), + (0x2FA1, "M", "辵"), + (0x2FA2, "M", "邑"), + (0x2FA3, "M", "酉"), + (0x2FA4, "M", "釆"), + (0x2FA5, "M", "里"), + (0x2FA6, "M", "金"), + (0x2FA7, "M", "長"), + (0x2FA8, "M", "門"), + (0x2FA9, "M", "阜"), + (0x2FAA, "M", "隶"), + (0x2FAB, "M", "隹"), + (0x2FAC, "M", "雨"), + (0x2FAD, "M", "靑"), + (0x2FAE, "M", "非"), + (0x2FAF, "M", "面"), + (0x2FB0, "M", "革"), + (0x2FB1, "M", "韋"), + (0x2FB2, "M", "韭"), + (0x2FB3, "M", "音"), + (0x2FB4, "M", "頁"), + (0x2FB5, "M", "風"), + (0x2FB6, "M", "飛"), + (0x2FB7, "M", "食"), + (0x2FB8, "M", "首"), + (0x2FB9, "M", "香"), + (0x2FBA, "M", "馬"), + (0x2FBB, "M", "骨"), + (0x2FBC, "M", "高"), + (0x2FBD, "M", "髟"), + (0x2FBE, "M", "鬥"), + (0x2FBF, "M", "鬯"), + (0x2FC0, "M", "鬲"), + (0x2FC1, "M", "鬼"), + (0x2FC2, "M", "魚"), + (0x2FC3, "M", "鳥"), + (0x2FC4, "M", "鹵"), + (0x2FC5, "M", "鹿"), + (0x2FC6, "M", "麥"), + (0x2FC7, "M", "麻"), + (0x2FC8, "M", "黃"), + (0x2FC9, "M", "黍"), + (0x2FCA, "M", "黑"), + (0x2FCB, "M", "黹"), + (0x2FCC, "M", "黽"), + (0x2FCD, "M", "鼎"), + (0x2FCE, "M", "鼓"), + (0x2FCF, "M", "鼠"), + (0x2FD0, "M", "鼻"), + (0x2FD1, "M", "齊"), + (0x2FD2, "M", "齒"), + (0x2FD3, "M", "龍"), + (0x2FD4, "M", "龜"), + (0x2FD5, "M", "龠"), + (0x2FD6, "X"), + (0x3000, "3", " "), + (0x3001, "V"), + (0x3002, "M", "."), + (0x3003, "V"), + (0x3036, "M", "〒"), + (0x3037, "V"), + (0x3038, "M", "十"), + (0x3039, "M", "卄"), + (0x303A, "M", "卅"), + (0x303B, "V"), + (0x3040, "X"), + ] + + +def _seg_29() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x3041, "V"), + (0x3097, "X"), + (0x3099, "V"), + (0x309B, "3", " ゙"), + (0x309C, "3", " ゚"), + (0x309D, "V"), + (0x309F, "M", "より"), + (0x30A0, "V"), + (0x30FF, "M", "コト"), + (0x3100, "X"), + (0x3105, "V"), + (0x3130, "X"), + (0x3131, "M", "ᄀ"), + (0x3132, "M", "ᄁ"), + (0x3133, "M", "ᆪ"), + (0x3134, "M", "ᄂ"), + (0x3135, "M", "ᆬ"), + (0x3136, "M", "ᆭ"), + (0x3137, "M", "ᄃ"), + (0x3138, "M", "ᄄ"), + (0x3139, "M", "ᄅ"), + (0x313A, "M", "ᆰ"), + (0x313B, "M", "ᆱ"), + (0x313C, "M", "ᆲ"), + (0x313D, "M", "ᆳ"), + (0x313E, "M", "ᆴ"), + (0x313F, "M", "ᆵ"), + (0x3140, "M", "ᄚ"), + (0x3141, "M", "ᄆ"), + (0x3142, "M", "ᄇ"), + (0x3143, "M", "ᄈ"), + (0x3144, "M", "ᄡ"), + (0x3145, "M", "ᄉ"), + (0x3146, "M", "ᄊ"), + (0x3147, "M", "ᄋ"), + (0x3148, "M", "ᄌ"), + (0x3149, "M", "ᄍ"), + (0x314A, "M", "ᄎ"), + (0x314B, "M", "ᄏ"), + (0x314C, "M", "ᄐ"), + (0x314D, "M", "ᄑ"), + (0x314E, "M", "ᄒ"), + (0x314F, "M", "ᅡ"), + (0x3150, "M", "ᅢ"), + (0x3151, "M", "ᅣ"), + (0x3152, "M", "ᅤ"), + (0x3153, "M", "ᅥ"), + (0x3154, "M", "ᅦ"), + (0x3155, "M", "ᅧ"), + (0x3156, "M", "ᅨ"), + (0x3157, "M", "ᅩ"), + (0x3158, "M", "ᅪ"), + (0x3159, "M", "ᅫ"), + (0x315A, "M", "ᅬ"), + (0x315B, "M", "ᅭ"), + (0x315C, "M", "ᅮ"), + (0x315D, "M", "ᅯ"), + (0x315E, "M", "ᅰ"), + (0x315F, "M", "ᅱ"), + (0x3160, "M", "ᅲ"), + (0x3161, "M", "ᅳ"), + (0x3162, "M", "ᅴ"), + (0x3163, "M", "ᅵ"), + (0x3164, "X"), + (0x3165, "M", "ᄔ"), + (0x3166, "M", "ᄕ"), + (0x3167, "M", "ᇇ"), + (0x3168, "M", "ᇈ"), + (0x3169, "M", "ᇌ"), + (0x316A, "M", "ᇎ"), + (0x316B, "M", "ᇓ"), + (0x316C, "M", "ᇗ"), + (0x316D, "M", "ᇙ"), + (0x316E, "M", "ᄜ"), + (0x316F, "M", "ᇝ"), + (0x3170, "M", "ᇟ"), + (0x3171, "M", "ᄝ"), + (0x3172, "M", "ᄞ"), + (0x3173, "M", "ᄠ"), + (0x3174, "M", "ᄢ"), + (0x3175, "M", "ᄣ"), + (0x3176, "M", "ᄧ"), + (0x3177, "M", "ᄩ"), + (0x3178, "M", "ᄫ"), + (0x3179, "M", "ᄬ"), + (0x317A, "M", "ᄭ"), + (0x317B, "M", "ᄮ"), + (0x317C, "M", "ᄯ"), + (0x317D, "M", "ᄲ"), + (0x317E, "M", "ᄶ"), + (0x317F, "M", "ᅀ"), + (0x3180, "M", "ᅇ"), + (0x3181, "M", "ᅌ"), + (0x3182, "M", "ᇱ"), + (0x3183, "M", "ᇲ"), + (0x3184, "M", "ᅗ"), + (0x3185, "M", "ᅘ"), + (0x3186, "M", "ᅙ"), + (0x3187, "M", "ᆄ"), + (0x3188, "M", "ᆅ"), + ] + + +def _seg_30() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x3189, "M", "ᆈ"), + (0x318A, "M", "ᆑ"), + (0x318B, "M", "ᆒ"), + (0x318C, "M", "ᆔ"), + (0x318D, "M", "ᆞ"), + (0x318E, "M", "ᆡ"), + (0x318F, "X"), + (0x3190, "V"), + (0x3192, "M", "一"), + (0x3193, "M", "二"), + (0x3194, "M", "三"), + (0x3195, "M", "四"), + (0x3196, "M", "上"), + (0x3197, "M", "中"), + (0x3198, "M", "下"), + (0x3199, "M", "甲"), + (0x319A, "M", "乙"), + (0x319B, "M", "丙"), + (0x319C, "M", "丁"), + (0x319D, "M", "天"), + (0x319E, "M", "地"), + (0x319F, "M", "人"), + (0x31A0, "V"), + (0x31E4, "X"), + (0x31F0, "V"), + (0x3200, "3", "(ᄀ)"), + (0x3201, "3", "(ᄂ)"), + (0x3202, "3", "(ᄃ)"), + (0x3203, "3", "(ᄅ)"), + (0x3204, "3", "(ᄆ)"), + (0x3205, "3", "(ᄇ)"), + (0x3206, "3", "(ᄉ)"), + (0x3207, "3", "(ᄋ)"), + (0x3208, "3", "(ᄌ)"), + (0x3209, "3", "(ᄎ)"), + (0x320A, "3", "(ᄏ)"), + (0x320B, "3", "(ᄐ)"), + (0x320C, "3", "(ᄑ)"), + (0x320D, "3", "(ᄒ)"), + (0x320E, "3", "(가)"), + (0x320F, "3", "(나)"), + (0x3210, "3", "(다)"), + (0x3211, "3", "(라)"), + (0x3212, "3", "(마)"), + (0x3213, "3", "(바)"), + (0x3214, "3", "(사)"), + (0x3215, "3", "(아)"), + (0x3216, "3", "(자)"), + (0x3217, "3", "(차)"), + (0x3218, "3", "(카)"), + (0x3219, "3", "(타)"), + (0x321A, "3", "(파)"), + (0x321B, "3", "(하)"), + (0x321C, "3", "(주)"), + (0x321D, "3", "(오전)"), + (0x321E, "3", "(오후)"), + (0x321F, "X"), + (0x3220, "3", "(一)"), + (0x3221, "3", "(二)"), + (0x3222, "3", "(三)"), + (0x3223, "3", "(四)"), + (0x3224, "3", "(五)"), + (0x3225, "3", "(六)"), + (0x3226, "3", "(七)"), + (0x3227, "3", "(八)"), + (0x3228, "3", "(九)"), + (0x3229, "3", "(十)"), + (0x322A, "3", "(月)"), + (0x322B, "3", "(火)"), + (0x322C, "3", "(水)"), + (0x322D, "3", "(木)"), + (0x322E, "3", "(金)"), + (0x322F, "3", "(土)"), + (0x3230, "3", "(日)"), + (0x3231, "3", "(株)"), + (0x3232, "3", "(有)"), + (0x3233, "3", "(社)"), + (0x3234, "3", "(名)"), + (0x3235, "3", "(特)"), + (0x3236, "3", "(財)"), + (0x3237, "3", "(祝)"), + (0x3238, "3", "(労)"), + (0x3239, "3", "(代)"), + (0x323A, "3", "(呼)"), + (0x323B, "3", "(学)"), + (0x323C, "3", "(監)"), + (0x323D, "3", "(企)"), + (0x323E, "3", "(資)"), + (0x323F, "3", "(協)"), + (0x3240, "3", "(祭)"), + (0x3241, "3", "(休)"), + (0x3242, "3", "(自)"), + (0x3243, "3", "(至)"), + (0x3244, "M", "問"), + (0x3245, "M", "幼"), + (0x3246, "M", "文"), + (0x3247, "M", "箏"), + (0x3248, "V"), + (0x3250, "M", "pte"), + (0x3251, "M", "21"), + ] + + +def _seg_31() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x3252, "M", "22"), + (0x3253, "M", "23"), + (0x3254, "M", "24"), + (0x3255, "M", "25"), + (0x3256, "M", "26"), + (0x3257, "M", "27"), + (0x3258, "M", "28"), + (0x3259, "M", "29"), + (0x325A, "M", "30"), + (0x325B, "M", "31"), + (0x325C, "M", "32"), + (0x325D, "M", "33"), + (0x325E, "M", "34"), + (0x325F, "M", "35"), + (0x3260, "M", "ᄀ"), + (0x3261, "M", "ᄂ"), + (0x3262, "M", "ᄃ"), + (0x3263, "M", "ᄅ"), + (0x3264, "M", "ᄆ"), + (0x3265, "M", "ᄇ"), + (0x3266, "M", "ᄉ"), + (0x3267, "M", "ᄋ"), + (0x3268, "M", "ᄌ"), + (0x3269, "M", "ᄎ"), + (0x326A, "M", "ᄏ"), + (0x326B, "M", "ᄐ"), + (0x326C, "M", "ᄑ"), + (0x326D, "M", "ᄒ"), + (0x326E, "M", "가"), + (0x326F, "M", "나"), + (0x3270, "M", "다"), + (0x3271, "M", "라"), + (0x3272, "M", "마"), + (0x3273, "M", "바"), + (0x3274, "M", "사"), + (0x3275, "M", "아"), + (0x3276, "M", "자"), + (0x3277, "M", "차"), + (0x3278, "M", "카"), + (0x3279, "M", "타"), + (0x327A, "M", "파"), + (0x327B, "M", "하"), + (0x327C, "M", "참고"), + (0x327D, "M", "주의"), + (0x327E, "M", "우"), + (0x327F, "V"), + (0x3280, "M", "一"), + (0x3281, "M", "二"), + (0x3282, "M", "三"), + (0x3283, "M", "四"), + (0x3284, "M", "五"), + (0x3285, "M", "六"), + (0x3286, "M", "七"), + (0x3287, "M", "八"), + (0x3288, "M", "九"), + (0x3289, "M", "十"), + (0x328A, "M", "月"), + (0x328B, "M", "火"), + (0x328C, "M", "水"), + (0x328D, "M", "木"), + (0x328E, "M", "金"), + (0x328F, "M", "土"), + (0x3290, "M", "日"), + (0x3291, "M", "株"), + (0x3292, "M", "有"), + (0x3293, "M", "社"), + (0x3294, "M", "名"), + (0x3295, "M", "特"), + (0x3296, "M", "財"), + (0x3297, "M", "祝"), + (0x3298, "M", "労"), + (0x3299, "M", "秘"), + (0x329A, "M", "男"), + (0x329B, "M", "女"), + (0x329C, "M", "適"), + (0x329D, "M", "優"), + (0x329E, "M", "印"), + (0x329F, "M", "注"), + (0x32A0, "M", "項"), + (0x32A1, "M", "休"), + (0x32A2, "M", "写"), + (0x32A3, "M", "正"), + (0x32A4, "M", "上"), + (0x32A5, "M", "中"), + (0x32A6, "M", "下"), + (0x32A7, "M", "左"), + (0x32A8, "M", "右"), + (0x32A9, "M", "医"), + (0x32AA, "M", "宗"), + (0x32AB, "M", "学"), + (0x32AC, "M", "監"), + (0x32AD, "M", "企"), + (0x32AE, "M", "資"), + (0x32AF, "M", "協"), + (0x32B0, "M", "夜"), + (0x32B1, "M", "36"), + (0x32B2, "M", "37"), + (0x32B3, "M", "38"), + (0x32B4, "M", "39"), + (0x32B5, "M", "40"), + ] + + +def _seg_32() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x32B6, "M", "41"), + (0x32B7, "M", "42"), + (0x32B8, "M", "43"), + (0x32B9, "M", "44"), + (0x32BA, "M", "45"), + (0x32BB, "M", "46"), + (0x32BC, "M", "47"), + (0x32BD, "M", "48"), + (0x32BE, "M", "49"), + (0x32BF, "M", "50"), + (0x32C0, "M", "1月"), + (0x32C1, "M", "2月"), + (0x32C2, "M", "3月"), + (0x32C3, "M", "4月"), + (0x32C4, "M", "5月"), + (0x32C5, "M", "6月"), + (0x32C6, "M", "7月"), + (0x32C7, "M", "8月"), + (0x32C8, "M", "9月"), + (0x32C9, "M", "10月"), + (0x32CA, "M", "11月"), + (0x32CB, "M", "12月"), + (0x32CC, "M", "hg"), + (0x32CD, "M", "erg"), + (0x32CE, "M", "ev"), + (0x32CF, "M", "ltd"), + (0x32D0, "M", "ア"), + (0x32D1, "M", "イ"), + (0x32D2, "M", "ウ"), + (0x32D3, "M", "エ"), + (0x32D4, "M", "オ"), + (0x32D5, "M", "カ"), + (0x32D6, "M", "キ"), + (0x32D7, "M", "ク"), + (0x32D8, "M", "ケ"), + (0x32D9, "M", "コ"), + (0x32DA, "M", "サ"), + (0x32DB, "M", "シ"), + (0x32DC, "M", "ス"), + (0x32DD, "M", "セ"), + (0x32DE, "M", "ソ"), + (0x32DF, "M", "タ"), + (0x32E0, "M", "チ"), + (0x32E1, "M", "ツ"), + (0x32E2, "M", "テ"), + (0x32E3, "M", "ト"), + (0x32E4, "M", "ナ"), + (0x32E5, "M", "ニ"), + (0x32E6, "M", "ヌ"), + (0x32E7, "M", "ネ"), + (0x32E8, "M", "ノ"), + (0x32E9, "M", "ハ"), + (0x32EA, "M", "ヒ"), + (0x32EB, "M", "フ"), + (0x32EC, "M", "ヘ"), + (0x32ED, "M", "ホ"), + (0x32EE, "M", "マ"), + (0x32EF, "M", "ミ"), + (0x32F0, "M", "ム"), + (0x32F1, "M", "メ"), + (0x32F2, "M", "モ"), + (0x32F3, "M", "ヤ"), + (0x32F4, "M", "ユ"), + (0x32F5, "M", "ヨ"), + (0x32F6, "M", "ラ"), + (0x32F7, "M", "リ"), + (0x32F8, "M", "ル"), + (0x32F9, "M", "レ"), + (0x32FA, "M", "ロ"), + (0x32FB, "M", "ワ"), + (0x32FC, "M", "ヰ"), + (0x32FD, "M", "ヱ"), + (0x32FE, "M", "ヲ"), + (0x32FF, "M", "令和"), + (0x3300, "M", "アパート"), + (0x3301, "M", "アルファ"), + (0x3302, "M", "アンペア"), + (0x3303, "M", "アール"), + (0x3304, "M", "イニング"), + (0x3305, "M", "インチ"), + (0x3306, "M", "ウォン"), + (0x3307, "M", "エスクード"), + (0x3308, "M", "エーカー"), + (0x3309, "M", "オンス"), + (0x330A, "M", "オーム"), + (0x330B, "M", "カイリ"), + (0x330C, "M", "カラット"), + (0x330D, "M", "カロリー"), + (0x330E, "M", "ガロン"), + (0x330F, "M", "ガンマ"), + (0x3310, "M", "ギガ"), + (0x3311, "M", "ギニー"), + (0x3312, "M", "キュリー"), + (0x3313, "M", "ギルダー"), + (0x3314, "M", "キロ"), + (0x3315, "M", "キログラム"), + (0x3316, "M", "キロメートル"), + (0x3317, "M", "キロワット"), + (0x3318, "M", "グラム"), + (0x3319, "M", "グラムトン"), + ] + + +def _seg_33() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x331A, "M", "クルゼイロ"), + (0x331B, "M", "クローネ"), + (0x331C, "M", "ケース"), + (0x331D, "M", "コルナ"), + (0x331E, "M", "コーポ"), + (0x331F, "M", "サイクル"), + (0x3320, "M", "サンチーム"), + (0x3321, "M", "シリング"), + (0x3322, "M", "センチ"), + (0x3323, "M", "セント"), + (0x3324, "M", "ダース"), + (0x3325, "M", "デシ"), + (0x3326, "M", "ドル"), + (0x3327, "M", "トン"), + (0x3328, "M", "ナノ"), + (0x3329, "M", "ノット"), + (0x332A, "M", "ハイツ"), + (0x332B, "M", "パーセント"), + (0x332C, "M", "パーツ"), + (0x332D, "M", "バーレル"), + (0x332E, "M", "ピアストル"), + (0x332F, "M", "ピクル"), + (0x3330, "M", "ピコ"), + (0x3331, "M", "ビル"), + (0x3332, "M", "ファラッド"), + (0x3333, "M", "フィート"), + (0x3334, "M", "ブッシェル"), + (0x3335, "M", "フラン"), + (0x3336, "M", "ヘクタール"), + (0x3337, "M", "ペソ"), + (0x3338, "M", "ペニヒ"), + (0x3339, "M", "ヘルツ"), + (0x333A, "M", "ペンス"), + (0x333B, "M", "ページ"), + (0x333C, "M", "ベータ"), + (0x333D, "M", "ポイント"), + (0x333E, "M", "ボルト"), + (0x333F, "M", "ホン"), + (0x3340, "M", "ポンド"), + (0x3341, "M", "ホール"), + (0x3342, "M", "ホーン"), + (0x3343, "M", "マイクロ"), + (0x3344, "M", "マイル"), + (0x3345, "M", "マッハ"), + (0x3346, "M", "マルク"), + (0x3347, "M", "マンション"), + (0x3348, "M", "ミクロン"), + (0x3349, "M", "ミリ"), + (0x334A, "M", "ミリバール"), + (0x334B, "M", "メガ"), + (0x334C, "M", "メガトン"), + (0x334D, "M", "メートル"), + (0x334E, "M", "ヤード"), + (0x334F, "M", "ヤール"), + (0x3350, "M", "ユアン"), + (0x3351, "M", "リットル"), + (0x3352, "M", "リラ"), + (0x3353, "M", "ルピー"), + (0x3354, "M", "ルーブル"), + (0x3355, "M", "レム"), + (0x3356, "M", "レントゲン"), + (0x3357, "M", "ワット"), + (0x3358, "M", "0点"), + (0x3359, "M", "1点"), + (0x335A, "M", "2点"), + (0x335B, "M", "3点"), + (0x335C, "M", "4点"), + (0x335D, "M", "5点"), + (0x335E, "M", "6点"), + (0x335F, "M", "7点"), + (0x3360, "M", "8点"), + (0x3361, "M", "9点"), + (0x3362, "M", "10点"), + (0x3363, "M", "11点"), + (0x3364, "M", "12点"), + (0x3365, "M", "13点"), + (0x3366, "M", "14点"), + (0x3367, "M", "15点"), + (0x3368, "M", "16点"), + (0x3369, "M", "17点"), + (0x336A, "M", "18点"), + (0x336B, "M", "19点"), + (0x336C, "M", "20点"), + (0x336D, "M", "21点"), + (0x336E, "M", "22点"), + (0x336F, "M", "23点"), + (0x3370, "M", "24点"), + (0x3371, "M", "hpa"), + (0x3372, "M", "da"), + (0x3373, "M", "au"), + (0x3374, "M", "bar"), + (0x3375, "M", "ov"), + (0x3376, "M", "pc"), + (0x3377, "M", "dm"), + (0x3378, "M", "dm2"), + (0x3379, "M", "dm3"), + (0x337A, "M", "iu"), + (0x337B, "M", "平成"), + (0x337C, "M", "昭和"), + (0x337D, "M", "大正"), + ] + + +def _seg_34() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x337E, "M", "明治"), + (0x337F, "M", "株式会社"), + (0x3380, "M", "pa"), + (0x3381, "M", "na"), + (0x3382, "M", "μa"), + (0x3383, "M", "ma"), + (0x3384, "M", "ka"), + (0x3385, "M", "kb"), + (0x3386, "M", "mb"), + (0x3387, "M", "gb"), + (0x3388, "M", "cal"), + (0x3389, "M", "kcal"), + (0x338A, "M", "pf"), + (0x338B, "M", "nf"), + (0x338C, "M", "μf"), + (0x338D, "M", "μg"), + (0x338E, "M", "mg"), + (0x338F, "M", "kg"), + (0x3390, "M", "hz"), + (0x3391, "M", "khz"), + (0x3392, "M", "mhz"), + (0x3393, "M", "ghz"), + (0x3394, "M", "thz"), + (0x3395, "M", "μl"), + (0x3396, "M", "ml"), + (0x3397, "M", "dl"), + (0x3398, "M", "kl"), + (0x3399, "M", "fm"), + (0x339A, "M", "nm"), + (0x339B, "M", "μm"), + (0x339C, "M", "mm"), + (0x339D, "M", "cm"), + (0x339E, "M", "km"), + (0x339F, "M", "mm2"), + (0x33A0, "M", "cm2"), + (0x33A1, "M", "m2"), + (0x33A2, "M", "km2"), + (0x33A3, "M", "mm3"), + (0x33A4, "M", "cm3"), + (0x33A5, "M", "m3"), + (0x33A6, "M", "km3"), + (0x33A7, "M", "m∕s"), + (0x33A8, "M", "m∕s2"), + (0x33A9, "M", "pa"), + (0x33AA, "M", "kpa"), + (0x33AB, "M", "mpa"), + (0x33AC, "M", "gpa"), + (0x33AD, "M", "rad"), + (0x33AE, "M", "rad∕s"), + (0x33AF, "M", "rad∕s2"), + (0x33B0, "M", "ps"), + (0x33B1, "M", "ns"), + (0x33B2, "M", "μs"), + (0x33B3, "M", "ms"), + (0x33B4, "M", "pv"), + (0x33B5, "M", "nv"), + (0x33B6, "M", "μv"), + (0x33B7, "M", "mv"), + (0x33B8, "M", "kv"), + (0x33B9, "M", "mv"), + (0x33BA, "M", "pw"), + (0x33BB, "M", "nw"), + (0x33BC, "M", "μw"), + (0x33BD, "M", "mw"), + (0x33BE, "M", "kw"), + (0x33BF, "M", "mw"), + (0x33C0, "M", "kω"), + (0x33C1, "M", "mω"), + (0x33C2, "X"), + (0x33C3, "M", "bq"), + (0x33C4, "M", "cc"), + (0x33C5, "M", "cd"), + (0x33C6, "M", "c∕kg"), + (0x33C7, "X"), + (0x33C8, "M", "db"), + (0x33C9, "M", "gy"), + (0x33CA, "M", "ha"), + (0x33CB, "M", "hp"), + (0x33CC, "M", "in"), + (0x33CD, "M", "kk"), + (0x33CE, "M", "km"), + (0x33CF, "M", "kt"), + (0x33D0, "M", "lm"), + (0x33D1, "M", "ln"), + (0x33D2, "M", "log"), + (0x33D3, "M", "lx"), + (0x33D4, "M", "mb"), + (0x33D5, "M", "mil"), + (0x33D6, "M", "mol"), + (0x33D7, "M", "ph"), + (0x33D8, "X"), + (0x33D9, "M", "ppm"), + (0x33DA, "M", "pr"), + (0x33DB, "M", "sr"), + (0x33DC, "M", "sv"), + (0x33DD, "M", "wb"), + (0x33DE, "M", "v∕m"), + (0x33DF, "M", "a∕m"), + (0x33E0, "M", "1日"), + (0x33E1, "M", "2日"), + ] + + +def _seg_35() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x33E2, "M", "3日"), + (0x33E3, "M", "4日"), + (0x33E4, "M", "5日"), + (0x33E5, "M", "6日"), + (0x33E6, "M", "7日"), + (0x33E7, "M", "8日"), + (0x33E8, "M", "9日"), + (0x33E9, "M", "10日"), + (0x33EA, "M", "11日"), + (0x33EB, "M", "12日"), + (0x33EC, "M", "13日"), + (0x33ED, "M", "14日"), + (0x33EE, "M", "15日"), + (0x33EF, "M", "16日"), + (0x33F0, "M", "17日"), + (0x33F1, "M", "18日"), + (0x33F2, "M", "19日"), + (0x33F3, "M", "20日"), + (0x33F4, "M", "21日"), + (0x33F5, "M", "22日"), + (0x33F6, "M", "23日"), + (0x33F7, "M", "24日"), + (0x33F8, "M", "25日"), + (0x33F9, "M", "26日"), + (0x33FA, "M", "27日"), + (0x33FB, "M", "28日"), + (0x33FC, "M", "29日"), + (0x33FD, "M", "30日"), + (0x33FE, "M", "31日"), + (0x33FF, "M", "gal"), + (0x3400, "V"), + (0xA48D, "X"), + (0xA490, "V"), + (0xA4C7, "X"), + (0xA4D0, "V"), + (0xA62C, "X"), + (0xA640, "M", "ꙁ"), + (0xA641, "V"), + (0xA642, "M", "ꙃ"), + (0xA643, "V"), + (0xA644, "M", "ꙅ"), + (0xA645, "V"), + (0xA646, "M", "ꙇ"), + (0xA647, "V"), + (0xA648, "M", "ꙉ"), + (0xA649, "V"), + (0xA64A, "M", "ꙋ"), + (0xA64B, "V"), + (0xA64C, "M", "ꙍ"), + (0xA64D, "V"), + (0xA64E, "M", "ꙏ"), + (0xA64F, "V"), + (0xA650, "M", "ꙑ"), + (0xA651, "V"), + (0xA652, "M", "ꙓ"), + (0xA653, "V"), + (0xA654, "M", "ꙕ"), + (0xA655, "V"), + (0xA656, "M", "ꙗ"), + (0xA657, "V"), + (0xA658, "M", "ꙙ"), + (0xA659, "V"), + (0xA65A, "M", "ꙛ"), + (0xA65B, "V"), + (0xA65C, "M", "ꙝ"), + (0xA65D, "V"), + (0xA65E, "M", "ꙟ"), + (0xA65F, "V"), + (0xA660, "M", "ꙡ"), + (0xA661, "V"), + (0xA662, "M", "ꙣ"), + (0xA663, "V"), + (0xA664, "M", "ꙥ"), + (0xA665, "V"), + (0xA666, "M", "ꙧ"), + (0xA667, "V"), + (0xA668, "M", "ꙩ"), + (0xA669, "V"), + (0xA66A, "M", "ꙫ"), + (0xA66B, "V"), + (0xA66C, "M", "ꙭ"), + (0xA66D, "V"), + (0xA680, "M", "ꚁ"), + (0xA681, "V"), + (0xA682, "M", "ꚃ"), + (0xA683, "V"), + (0xA684, "M", "ꚅ"), + (0xA685, "V"), + (0xA686, "M", "ꚇ"), + (0xA687, "V"), + (0xA688, "M", "ꚉ"), + (0xA689, "V"), + (0xA68A, "M", "ꚋ"), + (0xA68B, "V"), + (0xA68C, "M", "ꚍ"), + (0xA68D, "V"), + (0xA68E, "M", "ꚏ"), + (0xA68F, "V"), + (0xA690, "M", "ꚑ"), + (0xA691, "V"), + ] + + +def _seg_36() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xA692, "M", "ꚓ"), + (0xA693, "V"), + (0xA694, "M", "ꚕ"), + (0xA695, "V"), + (0xA696, "M", "ꚗ"), + (0xA697, "V"), + (0xA698, "M", "ꚙ"), + (0xA699, "V"), + (0xA69A, "M", "ꚛ"), + (0xA69B, "V"), + (0xA69C, "M", "ъ"), + (0xA69D, "M", "ь"), + (0xA69E, "V"), + (0xA6F8, "X"), + (0xA700, "V"), + (0xA722, "M", "ꜣ"), + (0xA723, "V"), + (0xA724, "M", "ꜥ"), + (0xA725, "V"), + (0xA726, "M", "ꜧ"), + (0xA727, "V"), + (0xA728, "M", "ꜩ"), + (0xA729, "V"), + (0xA72A, "M", "ꜫ"), + (0xA72B, "V"), + (0xA72C, "M", "ꜭ"), + (0xA72D, "V"), + (0xA72E, "M", "ꜯ"), + (0xA72F, "V"), + (0xA732, "M", "ꜳ"), + (0xA733, "V"), + (0xA734, "M", "ꜵ"), + (0xA735, "V"), + (0xA736, "M", "ꜷ"), + (0xA737, "V"), + (0xA738, "M", "ꜹ"), + (0xA739, "V"), + (0xA73A, "M", "ꜻ"), + (0xA73B, "V"), + (0xA73C, "M", "ꜽ"), + (0xA73D, "V"), + (0xA73E, "M", "ꜿ"), + (0xA73F, "V"), + (0xA740, "M", "ꝁ"), + (0xA741, "V"), + (0xA742, "M", "ꝃ"), + (0xA743, "V"), + (0xA744, "M", "ꝅ"), + (0xA745, "V"), + (0xA746, "M", "ꝇ"), + (0xA747, "V"), + (0xA748, "M", "ꝉ"), + (0xA749, "V"), + (0xA74A, "M", "ꝋ"), + (0xA74B, "V"), + (0xA74C, "M", "ꝍ"), + (0xA74D, "V"), + (0xA74E, "M", "ꝏ"), + (0xA74F, "V"), + (0xA750, "M", "ꝑ"), + (0xA751, "V"), + (0xA752, "M", "ꝓ"), + (0xA753, "V"), + (0xA754, "M", "ꝕ"), + (0xA755, "V"), + (0xA756, "M", "ꝗ"), + (0xA757, "V"), + (0xA758, "M", "ꝙ"), + (0xA759, "V"), + (0xA75A, "M", "ꝛ"), + (0xA75B, "V"), + (0xA75C, "M", "ꝝ"), + (0xA75D, "V"), + (0xA75E, "M", "ꝟ"), + (0xA75F, "V"), + (0xA760, "M", "ꝡ"), + (0xA761, "V"), + (0xA762, "M", "ꝣ"), + (0xA763, "V"), + (0xA764, "M", "ꝥ"), + (0xA765, "V"), + (0xA766, "M", "ꝧ"), + (0xA767, "V"), + (0xA768, "M", "ꝩ"), + (0xA769, "V"), + (0xA76A, "M", "ꝫ"), + (0xA76B, "V"), + (0xA76C, "M", "ꝭ"), + (0xA76D, "V"), + (0xA76E, "M", "ꝯ"), + (0xA76F, "V"), + (0xA770, "M", "ꝯ"), + (0xA771, "V"), + (0xA779, "M", "ꝺ"), + (0xA77A, "V"), + (0xA77B, "M", "ꝼ"), + (0xA77C, "V"), + (0xA77D, "M", "ᵹ"), + (0xA77E, "M", "ꝿ"), + (0xA77F, "V"), + ] + + +def _seg_37() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xA780, "M", "ꞁ"), + (0xA781, "V"), + (0xA782, "M", "ꞃ"), + (0xA783, "V"), + (0xA784, "M", "ꞅ"), + (0xA785, "V"), + (0xA786, "M", "ꞇ"), + (0xA787, "V"), + (0xA78B, "M", "ꞌ"), + (0xA78C, "V"), + (0xA78D, "M", "ɥ"), + (0xA78E, "V"), + (0xA790, "M", "ꞑ"), + (0xA791, "V"), + (0xA792, "M", "ꞓ"), + (0xA793, "V"), + (0xA796, "M", "ꞗ"), + (0xA797, "V"), + (0xA798, "M", "ꞙ"), + (0xA799, "V"), + (0xA79A, "M", "ꞛ"), + (0xA79B, "V"), + (0xA79C, "M", "ꞝ"), + (0xA79D, "V"), + (0xA79E, "M", "ꞟ"), + (0xA79F, "V"), + (0xA7A0, "M", "ꞡ"), + (0xA7A1, "V"), + (0xA7A2, "M", "ꞣ"), + (0xA7A3, "V"), + (0xA7A4, "M", "ꞥ"), + (0xA7A5, "V"), + (0xA7A6, "M", "ꞧ"), + (0xA7A7, "V"), + (0xA7A8, "M", "ꞩ"), + (0xA7A9, "V"), + (0xA7AA, "M", "ɦ"), + (0xA7AB, "M", "ɜ"), + (0xA7AC, "M", "ɡ"), + (0xA7AD, "M", "ɬ"), + (0xA7AE, "M", "ɪ"), + (0xA7AF, "V"), + (0xA7B0, "M", "ʞ"), + (0xA7B1, "M", "ʇ"), + (0xA7B2, "M", "ʝ"), + (0xA7B3, "M", "ꭓ"), + (0xA7B4, "M", "ꞵ"), + (0xA7B5, "V"), + (0xA7B6, "M", "ꞷ"), + (0xA7B7, "V"), + (0xA7B8, "M", "ꞹ"), + (0xA7B9, "V"), + (0xA7BA, "M", "ꞻ"), + (0xA7BB, "V"), + (0xA7BC, "M", "ꞽ"), + (0xA7BD, "V"), + (0xA7BE, "M", "ꞿ"), + (0xA7BF, "V"), + (0xA7C0, "M", "ꟁ"), + (0xA7C1, "V"), + (0xA7C2, "M", "ꟃ"), + (0xA7C3, "V"), + (0xA7C4, "M", "ꞔ"), + (0xA7C5, "M", "ʂ"), + (0xA7C6, "M", "ᶎ"), + (0xA7C7, "M", "ꟈ"), + (0xA7C8, "V"), + (0xA7C9, "M", "ꟊ"), + (0xA7CA, "V"), + (0xA7CB, "X"), + (0xA7D0, "M", "ꟑ"), + (0xA7D1, "V"), + (0xA7D2, "X"), + (0xA7D3, "V"), + (0xA7D4, "X"), + (0xA7D5, "V"), + (0xA7D6, "M", "ꟗ"), + (0xA7D7, "V"), + (0xA7D8, "M", "ꟙ"), + (0xA7D9, "V"), + (0xA7DA, "X"), + (0xA7F2, "M", "c"), + (0xA7F3, "M", "f"), + (0xA7F4, "M", "q"), + (0xA7F5, "M", "ꟶ"), + (0xA7F6, "V"), + (0xA7F8, "M", "ħ"), + (0xA7F9, "M", "œ"), + (0xA7FA, "V"), + (0xA82D, "X"), + (0xA830, "V"), + (0xA83A, "X"), + (0xA840, "V"), + (0xA878, "X"), + (0xA880, "V"), + (0xA8C6, "X"), + (0xA8CE, "V"), + (0xA8DA, "X"), + (0xA8E0, "V"), + (0xA954, "X"), + ] + + +def _seg_38() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xA95F, "V"), + (0xA97D, "X"), + (0xA980, "V"), + (0xA9CE, "X"), + (0xA9CF, "V"), + (0xA9DA, "X"), + (0xA9DE, "V"), + (0xA9FF, "X"), + (0xAA00, "V"), + (0xAA37, "X"), + (0xAA40, "V"), + (0xAA4E, "X"), + (0xAA50, "V"), + (0xAA5A, "X"), + (0xAA5C, "V"), + (0xAAC3, "X"), + (0xAADB, "V"), + (0xAAF7, "X"), + (0xAB01, "V"), + (0xAB07, "X"), + (0xAB09, "V"), + (0xAB0F, "X"), + (0xAB11, "V"), + (0xAB17, "X"), + (0xAB20, "V"), + (0xAB27, "X"), + (0xAB28, "V"), + (0xAB2F, "X"), + (0xAB30, "V"), + (0xAB5C, "M", "ꜧ"), + (0xAB5D, "M", "ꬷ"), + (0xAB5E, "M", "ɫ"), + (0xAB5F, "M", "ꭒ"), + (0xAB60, "V"), + (0xAB69, "M", "ʍ"), + (0xAB6A, "V"), + (0xAB6C, "X"), + (0xAB70, "M", "Ꭰ"), + (0xAB71, "M", "Ꭱ"), + (0xAB72, "M", "Ꭲ"), + (0xAB73, "M", "Ꭳ"), + (0xAB74, "M", "Ꭴ"), + (0xAB75, "M", "Ꭵ"), + (0xAB76, "M", "Ꭶ"), + (0xAB77, "M", "Ꭷ"), + (0xAB78, "M", "Ꭸ"), + (0xAB79, "M", "Ꭹ"), + (0xAB7A, "M", "Ꭺ"), + (0xAB7B, "M", "Ꭻ"), + (0xAB7C, "M", "Ꭼ"), + (0xAB7D, "M", "Ꭽ"), + (0xAB7E, "M", "Ꭾ"), + (0xAB7F, "M", "Ꭿ"), + (0xAB80, "M", "Ꮀ"), + (0xAB81, "M", "Ꮁ"), + (0xAB82, "M", "Ꮂ"), + (0xAB83, "M", "Ꮃ"), + (0xAB84, "M", "Ꮄ"), + (0xAB85, "M", "Ꮅ"), + (0xAB86, "M", "Ꮆ"), + (0xAB87, "M", "Ꮇ"), + (0xAB88, "M", "Ꮈ"), + (0xAB89, "M", "Ꮉ"), + (0xAB8A, "M", "Ꮊ"), + (0xAB8B, "M", "Ꮋ"), + (0xAB8C, "M", "Ꮌ"), + (0xAB8D, "M", "Ꮍ"), + (0xAB8E, "M", "Ꮎ"), + (0xAB8F, "M", "Ꮏ"), + (0xAB90, "M", "Ꮐ"), + (0xAB91, "M", "Ꮑ"), + (0xAB92, "M", "Ꮒ"), + (0xAB93, "M", "Ꮓ"), + (0xAB94, "M", "Ꮔ"), + (0xAB95, "M", "Ꮕ"), + (0xAB96, "M", "Ꮖ"), + (0xAB97, "M", "Ꮗ"), + (0xAB98, "M", "Ꮘ"), + (0xAB99, "M", "Ꮙ"), + (0xAB9A, "M", "Ꮚ"), + (0xAB9B, "M", "Ꮛ"), + (0xAB9C, "M", "Ꮜ"), + (0xAB9D, "M", "Ꮝ"), + (0xAB9E, "M", "Ꮞ"), + (0xAB9F, "M", "Ꮟ"), + (0xABA0, "M", "Ꮠ"), + (0xABA1, "M", "Ꮡ"), + (0xABA2, "M", "Ꮢ"), + (0xABA3, "M", "Ꮣ"), + (0xABA4, "M", "Ꮤ"), + (0xABA5, "M", "Ꮥ"), + (0xABA6, "M", "Ꮦ"), + (0xABA7, "M", "Ꮧ"), + (0xABA8, "M", "Ꮨ"), + (0xABA9, "M", "Ꮩ"), + (0xABAA, "M", "Ꮪ"), + (0xABAB, "M", "Ꮫ"), + (0xABAC, "M", "Ꮬ"), + (0xABAD, "M", "Ꮭ"), + (0xABAE, "M", "Ꮮ"), + ] + + +def _seg_39() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xABAF, "M", "Ꮯ"), + (0xABB0, "M", "Ꮰ"), + (0xABB1, "M", "Ꮱ"), + (0xABB2, "M", "Ꮲ"), + (0xABB3, "M", "Ꮳ"), + (0xABB4, "M", "Ꮴ"), + (0xABB5, "M", "Ꮵ"), + (0xABB6, "M", "Ꮶ"), + (0xABB7, "M", "Ꮷ"), + (0xABB8, "M", "Ꮸ"), + (0xABB9, "M", "Ꮹ"), + (0xABBA, "M", "Ꮺ"), + (0xABBB, "M", "Ꮻ"), + (0xABBC, "M", "Ꮼ"), + (0xABBD, "M", "Ꮽ"), + (0xABBE, "M", "Ꮾ"), + (0xABBF, "M", "Ꮿ"), + (0xABC0, "V"), + (0xABEE, "X"), + (0xABF0, "V"), + (0xABFA, "X"), + (0xAC00, "V"), + (0xD7A4, "X"), + (0xD7B0, "V"), + (0xD7C7, "X"), + (0xD7CB, "V"), + (0xD7FC, "X"), + (0xF900, "M", "豈"), + (0xF901, "M", "更"), + (0xF902, "M", "車"), + (0xF903, "M", "賈"), + (0xF904, "M", "滑"), + (0xF905, "M", "串"), + (0xF906, "M", "句"), + (0xF907, "M", "龜"), + (0xF909, "M", "契"), + (0xF90A, "M", "金"), + (0xF90B, "M", "喇"), + (0xF90C, "M", "奈"), + (0xF90D, "M", "懶"), + (0xF90E, "M", "癩"), + (0xF90F, "M", "羅"), + (0xF910, "M", "蘿"), + (0xF911, "M", "螺"), + (0xF912, "M", "裸"), + (0xF913, "M", "邏"), + (0xF914, "M", "樂"), + (0xF915, "M", "洛"), + (0xF916, "M", "烙"), + (0xF917, "M", "珞"), + (0xF918, "M", "落"), + (0xF919, "M", "酪"), + (0xF91A, "M", "駱"), + (0xF91B, "M", "亂"), + (0xF91C, "M", "卵"), + (0xF91D, "M", "欄"), + (0xF91E, "M", "爛"), + (0xF91F, "M", "蘭"), + (0xF920, "M", "鸞"), + (0xF921, "M", "嵐"), + (0xF922, "M", "濫"), + (0xF923, "M", "藍"), + (0xF924, "M", "襤"), + (0xF925, "M", "拉"), + (0xF926, "M", "臘"), + (0xF927, "M", "蠟"), + (0xF928, "M", "廊"), + (0xF929, "M", "朗"), + (0xF92A, "M", "浪"), + (0xF92B, "M", "狼"), + (0xF92C, "M", "郎"), + (0xF92D, "M", "來"), + (0xF92E, "M", "冷"), + (0xF92F, "M", "勞"), + (0xF930, "M", "擄"), + (0xF931, "M", "櫓"), + (0xF932, "M", "爐"), + (0xF933, "M", "盧"), + (0xF934, "M", "老"), + (0xF935, "M", "蘆"), + (0xF936, "M", "虜"), + (0xF937, "M", "路"), + (0xF938, "M", "露"), + (0xF939, "M", "魯"), + (0xF93A, "M", "鷺"), + (0xF93B, "M", "碌"), + (0xF93C, "M", "祿"), + (0xF93D, "M", "綠"), + (0xF93E, "M", "菉"), + (0xF93F, "M", "錄"), + (0xF940, "M", "鹿"), + (0xF941, "M", "論"), + (0xF942, "M", "壟"), + (0xF943, "M", "弄"), + (0xF944, "M", "籠"), + (0xF945, "M", "聾"), + (0xF946, "M", "牢"), + (0xF947, "M", "磊"), + (0xF948, "M", "賂"), + (0xF949, "M", "雷"), + ] + + +def _seg_40() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xF94A, "M", "壘"), + (0xF94B, "M", "屢"), + (0xF94C, "M", "樓"), + (0xF94D, "M", "淚"), + (0xF94E, "M", "漏"), + (0xF94F, "M", "累"), + (0xF950, "M", "縷"), + (0xF951, "M", "陋"), + (0xF952, "M", "勒"), + (0xF953, "M", "肋"), + (0xF954, "M", "凜"), + (0xF955, "M", "凌"), + (0xF956, "M", "稜"), + (0xF957, "M", "綾"), + (0xF958, "M", "菱"), + (0xF959, "M", "陵"), + (0xF95A, "M", "讀"), + (0xF95B, "M", "拏"), + (0xF95C, "M", "樂"), + (0xF95D, "M", "諾"), + (0xF95E, "M", "丹"), + (0xF95F, "M", "寧"), + (0xF960, "M", "怒"), + (0xF961, "M", "率"), + (0xF962, "M", "異"), + (0xF963, "M", "北"), + (0xF964, "M", "磻"), + (0xF965, "M", "便"), + (0xF966, "M", "復"), + (0xF967, "M", "不"), + (0xF968, "M", "泌"), + (0xF969, "M", "數"), + (0xF96A, "M", "索"), + (0xF96B, "M", "參"), + (0xF96C, "M", "塞"), + (0xF96D, "M", "省"), + (0xF96E, "M", "葉"), + (0xF96F, "M", "說"), + (0xF970, "M", "殺"), + (0xF971, "M", "辰"), + (0xF972, "M", "沈"), + (0xF973, "M", "拾"), + (0xF974, "M", "若"), + (0xF975, "M", "掠"), + (0xF976, "M", "略"), + (0xF977, "M", "亮"), + (0xF978, "M", "兩"), + (0xF979, "M", "凉"), + (0xF97A, "M", "梁"), + (0xF97B, "M", "糧"), + (0xF97C, "M", "良"), + (0xF97D, "M", "諒"), + (0xF97E, "M", "量"), + (0xF97F, "M", "勵"), + (0xF980, "M", "呂"), + (0xF981, "M", "女"), + (0xF982, "M", "廬"), + (0xF983, "M", "旅"), + (0xF984, "M", "濾"), + (0xF985, "M", "礪"), + (0xF986, "M", "閭"), + (0xF987, "M", "驪"), + (0xF988, "M", "麗"), + (0xF989, "M", "黎"), + (0xF98A, "M", "力"), + (0xF98B, "M", "曆"), + (0xF98C, "M", "歷"), + (0xF98D, "M", "轢"), + (0xF98E, "M", "年"), + (0xF98F, "M", "憐"), + (0xF990, "M", "戀"), + (0xF991, "M", "撚"), + (0xF992, "M", "漣"), + (0xF993, "M", "煉"), + (0xF994, "M", "璉"), + (0xF995, "M", "秊"), + (0xF996, "M", "練"), + (0xF997, "M", "聯"), + (0xF998, "M", "輦"), + (0xF999, "M", "蓮"), + (0xF99A, "M", "連"), + (0xF99B, "M", "鍊"), + (0xF99C, "M", "列"), + (0xF99D, "M", "劣"), + (0xF99E, "M", "咽"), + (0xF99F, "M", "烈"), + (0xF9A0, "M", "裂"), + (0xF9A1, "M", "說"), + (0xF9A2, "M", "廉"), + (0xF9A3, "M", "念"), + (0xF9A4, "M", "捻"), + (0xF9A5, "M", "殮"), + (0xF9A6, "M", "簾"), + (0xF9A7, "M", "獵"), + (0xF9A8, "M", "令"), + (0xF9A9, "M", "囹"), + (0xF9AA, "M", "寧"), + (0xF9AB, "M", "嶺"), + (0xF9AC, "M", "怜"), + (0xF9AD, "M", "玲"), + ] + + +def _seg_41() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xF9AE, "M", "瑩"), + (0xF9AF, "M", "羚"), + (0xF9B0, "M", "聆"), + (0xF9B1, "M", "鈴"), + (0xF9B2, "M", "零"), + (0xF9B3, "M", "靈"), + (0xF9B4, "M", "領"), + (0xF9B5, "M", "例"), + (0xF9B6, "M", "禮"), + (0xF9B7, "M", "醴"), + (0xF9B8, "M", "隸"), + (0xF9B9, "M", "惡"), + (0xF9BA, "M", "了"), + (0xF9BB, "M", "僚"), + (0xF9BC, "M", "寮"), + (0xF9BD, "M", "尿"), + (0xF9BE, "M", "料"), + (0xF9BF, "M", "樂"), + (0xF9C0, "M", "燎"), + (0xF9C1, "M", "療"), + (0xF9C2, "M", "蓼"), + (0xF9C3, "M", "遼"), + (0xF9C4, "M", "龍"), + (0xF9C5, "M", "暈"), + (0xF9C6, "M", "阮"), + (0xF9C7, "M", "劉"), + (0xF9C8, "M", "杻"), + (0xF9C9, "M", "柳"), + (0xF9CA, "M", "流"), + (0xF9CB, "M", "溜"), + (0xF9CC, "M", "琉"), + (0xF9CD, "M", "留"), + (0xF9CE, "M", "硫"), + (0xF9CF, "M", "紐"), + (0xF9D0, "M", "類"), + (0xF9D1, "M", "六"), + (0xF9D2, "M", "戮"), + (0xF9D3, "M", "陸"), + (0xF9D4, "M", "倫"), + (0xF9D5, "M", "崙"), + (0xF9D6, "M", "淪"), + (0xF9D7, "M", "輪"), + (0xF9D8, "M", "律"), + (0xF9D9, "M", "慄"), + (0xF9DA, "M", "栗"), + (0xF9DB, "M", "率"), + (0xF9DC, "M", "隆"), + (0xF9DD, "M", "利"), + (0xF9DE, "M", "吏"), + (0xF9DF, "M", "履"), + (0xF9E0, "M", "易"), + (0xF9E1, "M", "李"), + (0xF9E2, "M", "梨"), + (0xF9E3, "M", "泥"), + (0xF9E4, "M", "理"), + (0xF9E5, "M", "痢"), + (0xF9E6, "M", "罹"), + (0xF9E7, "M", "裏"), + (0xF9E8, "M", "裡"), + (0xF9E9, "M", "里"), + (0xF9EA, "M", "離"), + (0xF9EB, "M", "匿"), + (0xF9EC, "M", "溺"), + (0xF9ED, "M", "吝"), + (0xF9EE, "M", "燐"), + (0xF9EF, "M", "璘"), + (0xF9F0, "M", "藺"), + (0xF9F1, "M", "隣"), + (0xF9F2, "M", "鱗"), + (0xF9F3, "M", "麟"), + (0xF9F4, "M", "林"), + (0xF9F5, "M", "淋"), + (0xF9F6, "M", "臨"), + (0xF9F7, "M", "立"), + (0xF9F8, "M", "笠"), + (0xF9F9, "M", "粒"), + (0xF9FA, "M", "狀"), + (0xF9FB, "M", "炙"), + (0xF9FC, "M", "識"), + (0xF9FD, "M", "什"), + (0xF9FE, "M", "茶"), + (0xF9FF, "M", "刺"), + (0xFA00, "M", "切"), + (0xFA01, "M", "度"), + (0xFA02, "M", "拓"), + (0xFA03, "M", "糖"), + (0xFA04, "M", "宅"), + (0xFA05, "M", "洞"), + (0xFA06, "M", "暴"), + (0xFA07, "M", "輻"), + (0xFA08, "M", "行"), + (0xFA09, "M", "降"), + (0xFA0A, "M", "見"), + (0xFA0B, "M", "廓"), + (0xFA0C, "M", "兀"), + (0xFA0D, "M", "嗀"), + (0xFA0E, "V"), + (0xFA10, "M", "塚"), + (0xFA11, "V"), + (0xFA12, "M", "晴"), + ] + + +def _seg_42() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xFA13, "V"), + (0xFA15, "M", "凞"), + (0xFA16, "M", "猪"), + (0xFA17, "M", "益"), + (0xFA18, "M", "礼"), + (0xFA19, "M", "神"), + (0xFA1A, "M", "祥"), + (0xFA1B, "M", "福"), + (0xFA1C, "M", "靖"), + (0xFA1D, "M", "精"), + (0xFA1E, "M", "羽"), + (0xFA1F, "V"), + (0xFA20, "M", "蘒"), + (0xFA21, "V"), + (0xFA22, "M", "諸"), + (0xFA23, "V"), + (0xFA25, "M", "逸"), + (0xFA26, "M", "都"), + (0xFA27, "V"), + (0xFA2A, "M", "飯"), + (0xFA2B, "M", "飼"), + (0xFA2C, "M", "館"), + (0xFA2D, "M", "鶴"), + (0xFA2E, "M", "郞"), + (0xFA2F, "M", "隷"), + (0xFA30, "M", "侮"), + (0xFA31, "M", "僧"), + (0xFA32, "M", "免"), + (0xFA33, "M", "勉"), + (0xFA34, "M", "勤"), + (0xFA35, "M", "卑"), + (0xFA36, "M", "喝"), + (0xFA37, "M", "嘆"), + (0xFA38, "M", "器"), + (0xFA39, "M", "塀"), + (0xFA3A, "M", "墨"), + (0xFA3B, "M", "層"), + (0xFA3C, "M", "屮"), + (0xFA3D, "M", "悔"), + (0xFA3E, "M", "慨"), + (0xFA3F, "M", "憎"), + (0xFA40, "M", "懲"), + (0xFA41, "M", "敏"), + (0xFA42, "M", "既"), + (0xFA43, "M", "暑"), + (0xFA44, "M", "梅"), + (0xFA45, "M", "海"), + (0xFA46, "M", "渚"), + (0xFA47, "M", "漢"), + (0xFA48, "M", "煮"), + (0xFA49, "M", "爫"), + (0xFA4A, "M", "琢"), + (0xFA4B, "M", "碑"), + (0xFA4C, "M", "社"), + (0xFA4D, "M", "祉"), + (0xFA4E, "M", "祈"), + (0xFA4F, "M", "祐"), + (0xFA50, "M", "祖"), + (0xFA51, "M", "祝"), + (0xFA52, "M", "禍"), + (0xFA53, "M", "禎"), + (0xFA54, "M", "穀"), + (0xFA55, "M", "突"), + (0xFA56, "M", "節"), + (0xFA57, "M", "練"), + (0xFA58, "M", "縉"), + (0xFA59, "M", "繁"), + (0xFA5A, "M", "署"), + (0xFA5B, "M", "者"), + (0xFA5C, "M", "臭"), + (0xFA5D, "M", "艹"), + (0xFA5F, "M", "著"), + (0xFA60, "M", "褐"), + (0xFA61, "M", "視"), + (0xFA62, "M", "謁"), + (0xFA63, "M", "謹"), + (0xFA64, "M", "賓"), + (0xFA65, "M", "贈"), + (0xFA66, "M", "辶"), + (0xFA67, "M", "逸"), + (0xFA68, "M", "難"), + (0xFA69, "M", "響"), + (0xFA6A, "M", "頻"), + (0xFA6B, "M", "恵"), + (0xFA6C, "M", "𤋮"), + (0xFA6D, "M", "舘"), + (0xFA6E, "X"), + (0xFA70, "M", "並"), + (0xFA71, "M", "况"), + (0xFA72, "M", "全"), + (0xFA73, "M", "侀"), + (0xFA74, "M", "充"), + (0xFA75, "M", "冀"), + (0xFA76, "M", "勇"), + (0xFA77, "M", "勺"), + (0xFA78, "M", "喝"), + (0xFA79, "M", "啕"), + (0xFA7A, "M", "喙"), + (0xFA7B, "M", "嗢"), + (0xFA7C, "M", "塚"), + ] + + +def _seg_43() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xFA7D, "M", "墳"), + (0xFA7E, "M", "奄"), + (0xFA7F, "M", "奔"), + (0xFA80, "M", "婢"), + (0xFA81, "M", "嬨"), + (0xFA82, "M", "廒"), + (0xFA83, "M", "廙"), + (0xFA84, "M", "彩"), + (0xFA85, "M", "徭"), + (0xFA86, "M", "惘"), + (0xFA87, "M", "慎"), + (0xFA88, "M", "愈"), + (0xFA89, "M", "憎"), + (0xFA8A, "M", "慠"), + (0xFA8B, "M", "懲"), + (0xFA8C, "M", "戴"), + (0xFA8D, "M", "揄"), + (0xFA8E, "M", "搜"), + (0xFA8F, "M", "摒"), + (0xFA90, "M", "敖"), + (0xFA91, "M", "晴"), + (0xFA92, "M", "朗"), + (0xFA93, "M", "望"), + (0xFA94, "M", "杖"), + (0xFA95, "M", "歹"), + (0xFA96, "M", "殺"), + (0xFA97, "M", "流"), + (0xFA98, "M", "滛"), + (0xFA99, "M", "滋"), + (0xFA9A, "M", "漢"), + (0xFA9B, "M", "瀞"), + (0xFA9C, "M", "煮"), + (0xFA9D, "M", "瞧"), + (0xFA9E, "M", "爵"), + (0xFA9F, "M", "犯"), + (0xFAA0, "M", "猪"), + (0xFAA1, "M", "瑱"), + (0xFAA2, "M", "甆"), + (0xFAA3, "M", "画"), + (0xFAA4, "M", "瘝"), + (0xFAA5, "M", "瘟"), + (0xFAA6, "M", "益"), + (0xFAA7, "M", "盛"), + (0xFAA8, "M", "直"), + (0xFAA9, "M", "睊"), + (0xFAAA, "M", "着"), + (0xFAAB, "M", "磌"), + (0xFAAC, "M", "窱"), + (0xFAAD, "M", "節"), + (0xFAAE, "M", "类"), + (0xFAAF, "M", "絛"), + (0xFAB0, "M", "練"), + (0xFAB1, "M", "缾"), + (0xFAB2, "M", "者"), + (0xFAB3, "M", "荒"), + (0xFAB4, "M", "華"), + (0xFAB5, "M", "蝹"), + (0xFAB6, "M", "襁"), + (0xFAB7, "M", "覆"), + (0xFAB8, "M", "視"), + (0xFAB9, "M", "調"), + (0xFABA, "M", "諸"), + (0xFABB, "M", "請"), + (0xFABC, "M", "謁"), + (0xFABD, "M", "諾"), + (0xFABE, "M", "諭"), + (0xFABF, "M", "謹"), + (0xFAC0, "M", "變"), + (0xFAC1, "M", "贈"), + (0xFAC2, "M", "輸"), + (0xFAC3, "M", "遲"), + (0xFAC4, "M", "醙"), + (0xFAC5, "M", "鉶"), + (0xFAC6, "M", "陼"), + (0xFAC7, "M", "難"), + (0xFAC8, "M", "靖"), + (0xFAC9, "M", "韛"), + (0xFACA, "M", "響"), + (0xFACB, "M", "頋"), + (0xFACC, "M", "頻"), + (0xFACD, "M", "鬒"), + (0xFACE, "M", "龜"), + (0xFACF, "M", "𢡊"), + (0xFAD0, "M", "𢡄"), + (0xFAD1, "M", "𣏕"), + (0xFAD2, "M", "㮝"), + (0xFAD3, "M", "䀘"), + (0xFAD4, "M", "䀹"), + (0xFAD5, "M", "𥉉"), + (0xFAD6, "M", "𥳐"), + (0xFAD7, "M", "𧻓"), + (0xFAD8, "M", "齃"), + (0xFAD9, "M", "龎"), + (0xFADA, "X"), + (0xFB00, "M", "ff"), + (0xFB01, "M", "fi"), + (0xFB02, "M", "fl"), + (0xFB03, "M", "ffi"), + (0xFB04, "M", "ffl"), + (0xFB05, "M", "st"), + ] + + +def _seg_44() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xFB07, "X"), + (0xFB13, "M", "մն"), + (0xFB14, "M", "մե"), + (0xFB15, "M", "մի"), + (0xFB16, "M", "վն"), + (0xFB17, "M", "մխ"), + (0xFB18, "X"), + (0xFB1D, "M", "יִ"), + (0xFB1E, "V"), + (0xFB1F, "M", "ײַ"), + (0xFB20, "M", "ע"), + (0xFB21, "M", "א"), + (0xFB22, "M", "ד"), + (0xFB23, "M", "ה"), + (0xFB24, "M", "כ"), + (0xFB25, "M", "ל"), + (0xFB26, "M", "ם"), + (0xFB27, "M", "ר"), + (0xFB28, "M", "ת"), + (0xFB29, "3", "+"), + (0xFB2A, "M", "שׁ"), + (0xFB2B, "M", "שׂ"), + (0xFB2C, "M", "שּׁ"), + (0xFB2D, "M", "שּׂ"), + (0xFB2E, "M", "אַ"), + (0xFB2F, "M", "אָ"), + (0xFB30, "M", "אּ"), + (0xFB31, "M", "בּ"), + (0xFB32, "M", "גּ"), + (0xFB33, "M", "דּ"), + (0xFB34, "M", "הּ"), + (0xFB35, "M", "וּ"), + (0xFB36, "M", "זּ"), + (0xFB37, "X"), + (0xFB38, "M", "טּ"), + (0xFB39, "M", "יּ"), + (0xFB3A, "M", "ךּ"), + (0xFB3B, "M", "כּ"), + (0xFB3C, "M", "לּ"), + (0xFB3D, "X"), + (0xFB3E, "M", "מּ"), + (0xFB3F, "X"), + (0xFB40, "M", "נּ"), + (0xFB41, "M", "סּ"), + (0xFB42, "X"), + (0xFB43, "M", "ףּ"), + (0xFB44, "M", "פּ"), + (0xFB45, "X"), + (0xFB46, "M", "צּ"), + (0xFB47, "M", "קּ"), + (0xFB48, "M", "רּ"), + (0xFB49, "M", "שּ"), + (0xFB4A, "M", "תּ"), + (0xFB4B, "M", "וֹ"), + (0xFB4C, "M", "בֿ"), + (0xFB4D, "M", "כֿ"), + (0xFB4E, "M", "פֿ"), + (0xFB4F, "M", "אל"), + (0xFB50, "M", "ٱ"), + (0xFB52, "M", "ٻ"), + (0xFB56, "M", "پ"), + (0xFB5A, "M", "ڀ"), + (0xFB5E, "M", "ٺ"), + (0xFB62, "M", "ٿ"), + (0xFB66, "M", "ٹ"), + (0xFB6A, "M", "ڤ"), + (0xFB6E, "M", "ڦ"), + (0xFB72, "M", "ڄ"), + (0xFB76, "M", "ڃ"), + (0xFB7A, "M", "چ"), + (0xFB7E, "M", "ڇ"), + (0xFB82, "M", "ڍ"), + (0xFB84, "M", "ڌ"), + (0xFB86, "M", "ڎ"), + (0xFB88, "M", "ڈ"), + (0xFB8A, "M", "ژ"), + (0xFB8C, "M", "ڑ"), + (0xFB8E, "M", "ک"), + (0xFB92, "M", "گ"), + (0xFB96, "M", "ڳ"), + (0xFB9A, "M", "ڱ"), + (0xFB9E, "M", "ں"), + (0xFBA0, "M", "ڻ"), + (0xFBA4, "M", "ۀ"), + (0xFBA6, "M", "ہ"), + (0xFBAA, "M", "ھ"), + (0xFBAE, "M", "ے"), + (0xFBB0, "M", "ۓ"), + (0xFBB2, "V"), + (0xFBC3, "X"), + (0xFBD3, "M", "ڭ"), + (0xFBD7, "M", "ۇ"), + (0xFBD9, "M", "ۆ"), + (0xFBDB, "M", "ۈ"), + (0xFBDD, "M", "ۇٴ"), + (0xFBDE, "M", "ۋ"), + (0xFBE0, "M", "ۅ"), + (0xFBE2, "M", "ۉ"), + (0xFBE4, "M", "ې"), + (0xFBE8, "M", "ى"), + ] + + +def _seg_45() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xFBEA, "M", "ئا"), + (0xFBEC, "M", "ئە"), + (0xFBEE, "M", "ئو"), + (0xFBF0, "M", "ئۇ"), + (0xFBF2, "M", "ئۆ"), + (0xFBF4, "M", "ئۈ"), + (0xFBF6, "M", "ئې"), + (0xFBF9, "M", "ئى"), + (0xFBFC, "M", "ی"), + (0xFC00, "M", "ئج"), + (0xFC01, "M", "ئح"), + (0xFC02, "M", "ئم"), + (0xFC03, "M", "ئى"), + (0xFC04, "M", "ئي"), + (0xFC05, "M", "بج"), + (0xFC06, "M", "بح"), + (0xFC07, "M", "بخ"), + (0xFC08, "M", "بم"), + (0xFC09, "M", "بى"), + (0xFC0A, "M", "بي"), + (0xFC0B, "M", "تج"), + (0xFC0C, "M", "تح"), + (0xFC0D, "M", "تخ"), + (0xFC0E, "M", "تم"), + (0xFC0F, "M", "تى"), + (0xFC10, "M", "تي"), + (0xFC11, "M", "ثج"), + (0xFC12, "M", "ثم"), + (0xFC13, "M", "ثى"), + (0xFC14, "M", "ثي"), + (0xFC15, "M", "جح"), + (0xFC16, "M", "جم"), + (0xFC17, "M", "حج"), + (0xFC18, "M", "حم"), + (0xFC19, "M", "خج"), + (0xFC1A, "M", "خح"), + (0xFC1B, "M", "خم"), + (0xFC1C, "M", "سج"), + (0xFC1D, "M", "سح"), + (0xFC1E, "M", "سخ"), + (0xFC1F, "M", "سم"), + (0xFC20, "M", "صح"), + (0xFC21, "M", "صم"), + (0xFC22, "M", "ضج"), + (0xFC23, "M", "ضح"), + (0xFC24, "M", "ضخ"), + (0xFC25, "M", "ضم"), + (0xFC26, "M", "طح"), + (0xFC27, "M", "طم"), + (0xFC28, "M", "ظم"), + (0xFC29, "M", "عج"), + (0xFC2A, "M", "عم"), + (0xFC2B, "M", "غج"), + (0xFC2C, "M", "غم"), + (0xFC2D, "M", "فج"), + (0xFC2E, "M", "فح"), + (0xFC2F, "M", "فخ"), + (0xFC30, "M", "فم"), + (0xFC31, "M", "فى"), + (0xFC32, "M", "في"), + (0xFC33, "M", "قح"), + (0xFC34, "M", "قم"), + (0xFC35, "M", "قى"), + (0xFC36, "M", "قي"), + (0xFC37, "M", "كا"), + (0xFC38, "M", "كج"), + (0xFC39, "M", "كح"), + (0xFC3A, "M", "كخ"), + (0xFC3B, "M", "كل"), + (0xFC3C, "M", "كم"), + (0xFC3D, "M", "كى"), + (0xFC3E, "M", "كي"), + (0xFC3F, "M", "لج"), + (0xFC40, "M", "لح"), + (0xFC41, "M", "لخ"), + (0xFC42, "M", "لم"), + (0xFC43, "M", "لى"), + (0xFC44, "M", "لي"), + (0xFC45, "M", "مج"), + (0xFC46, "M", "مح"), + (0xFC47, "M", "مخ"), + (0xFC48, "M", "مم"), + (0xFC49, "M", "مى"), + (0xFC4A, "M", "مي"), + (0xFC4B, "M", "نج"), + (0xFC4C, "M", "نح"), + (0xFC4D, "M", "نخ"), + (0xFC4E, "M", "نم"), + (0xFC4F, "M", "نى"), + (0xFC50, "M", "ني"), + (0xFC51, "M", "هج"), + (0xFC52, "M", "هم"), + (0xFC53, "M", "هى"), + (0xFC54, "M", "هي"), + (0xFC55, "M", "يج"), + (0xFC56, "M", "يح"), + (0xFC57, "M", "يخ"), + (0xFC58, "M", "يم"), + (0xFC59, "M", "يى"), + (0xFC5A, "M", "يي"), + ] + + +def _seg_46() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xFC5B, "M", "ذٰ"), + (0xFC5C, "M", "رٰ"), + (0xFC5D, "M", "ىٰ"), + (0xFC5E, "3", " ٌّ"), + (0xFC5F, "3", " ٍّ"), + (0xFC60, "3", " َّ"), + (0xFC61, "3", " ُّ"), + (0xFC62, "3", " ِّ"), + (0xFC63, "3", " ّٰ"), + (0xFC64, "M", "ئر"), + (0xFC65, "M", "ئز"), + (0xFC66, "M", "ئم"), + (0xFC67, "M", "ئن"), + (0xFC68, "M", "ئى"), + (0xFC69, "M", "ئي"), + (0xFC6A, "M", "بر"), + (0xFC6B, "M", "بز"), + (0xFC6C, "M", "بم"), + (0xFC6D, "M", "بن"), + (0xFC6E, "M", "بى"), + (0xFC6F, "M", "بي"), + (0xFC70, "M", "تر"), + (0xFC71, "M", "تز"), + (0xFC72, "M", "تم"), + (0xFC73, "M", "تن"), + (0xFC74, "M", "تى"), + (0xFC75, "M", "تي"), + (0xFC76, "M", "ثر"), + (0xFC77, "M", "ثز"), + (0xFC78, "M", "ثم"), + (0xFC79, "M", "ثن"), + (0xFC7A, "M", "ثى"), + (0xFC7B, "M", "ثي"), + (0xFC7C, "M", "فى"), + (0xFC7D, "M", "في"), + (0xFC7E, "M", "قى"), + (0xFC7F, "M", "قي"), + (0xFC80, "M", "كا"), + (0xFC81, "M", "كل"), + (0xFC82, "M", "كم"), + (0xFC83, "M", "كى"), + (0xFC84, "M", "كي"), + (0xFC85, "M", "لم"), + (0xFC86, "M", "لى"), + (0xFC87, "M", "لي"), + (0xFC88, "M", "ما"), + (0xFC89, "M", "مم"), + (0xFC8A, "M", "نر"), + (0xFC8B, "M", "نز"), + (0xFC8C, "M", "نم"), + (0xFC8D, "M", "نن"), + (0xFC8E, "M", "نى"), + (0xFC8F, "M", "ني"), + (0xFC90, "M", "ىٰ"), + (0xFC91, "M", "ير"), + (0xFC92, "M", "يز"), + (0xFC93, "M", "يم"), + (0xFC94, "M", "ين"), + (0xFC95, "M", "يى"), + (0xFC96, "M", "يي"), + (0xFC97, "M", "ئج"), + (0xFC98, "M", "ئح"), + (0xFC99, "M", "ئخ"), + (0xFC9A, "M", "ئم"), + (0xFC9B, "M", "ئه"), + (0xFC9C, "M", "بج"), + (0xFC9D, "M", "بح"), + (0xFC9E, "M", "بخ"), + (0xFC9F, "M", "بم"), + (0xFCA0, "M", "به"), + (0xFCA1, "M", "تج"), + (0xFCA2, "M", "تح"), + (0xFCA3, "M", "تخ"), + (0xFCA4, "M", "تم"), + (0xFCA5, "M", "ته"), + (0xFCA6, "M", "ثم"), + (0xFCA7, "M", "جح"), + (0xFCA8, "M", "جم"), + (0xFCA9, "M", "حج"), + (0xFCAA, "M", "حم"), + (0xFCAB, "M", "خج"), + (0xFCAC, "M", "خم"), + (0xFCAD, "M", "سج"), + (0xFCAE, "M", "سح"), + (0xFCAF, "M", "سخ"), + (0xFCB0, "M", "سم"), + (0xFCB1, "M", "صح"), + (0xFCB2, "M", "صخ"), + (0xFCB3, "M", "صم"), + (0xFCB4, "M", "ضج"), + (0xFCB5, "M", "ضح"), + (0xFCB6, "M", "ضخ"), + (0xFCB7, "M", "ضم"), + (0xFCB8, "M", "طح"), + (0xFCB9, "M", "ظم"), + (0xFCBA, "M", "عج"), + (0xFCBB, "M", "عم"), + (0xFCBC, "M", "غج"), + (0xFCBD, "M", "غم"), + (0xFCBE, "M", "فج"), + ] + + +def _seg_47() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xFCBF, "M", "فح"), + (0xFCC0, "M", "فخ"), + (0xFCC1, "M", "فم"), + (0xFCC2, "M", "قح"), + (0xFCC3, "M", "قم"), + (0xFCC4, "M", "كج"), + (0xFCC5, "M", "كح"), + (0xFCC6, "M", "كخ"), + (0xFCC7, "M", "كل"), + (0xFCC8, "M", "كم"), + (0xFCC9, "M", "لج"), + (0xFCCA, "M", "لح"), + (0xFCCB, "M", "لخ"), + (0xFCCC, "M", "لم"), + (0xFCCD, "M", "له"), + (0xFCCE, "M", "مج"), + (0xFCCF, "M", "مح"), + (0xFCD0, "M", "مخ"), + (0xFCD1, "M", "مم"), + (0xFCD2, "M", "نج"), + (0xFCD3, "M", "نح"), + (0xFCD4, "M", "نخ"), + (0xFCD5, "M", "نم"), + (0xFCD6, "M", "نه"), + (0xFCD7, "M", "هج"), + (0xFCD8, "M", "هم"), + (0xFCD9, "M", "هٰ"), + (0xFCDA, "M", "يج"), + (0xFCDB, "M", "يح"), + (0xFCDC, "M", "يخ"), + (0xFCDD, "M", "يم"), + (0xFCDE, "M", "يه"), + (0xFCDF, "M", "ئم"), + (0xFCE0, "M", "ئه"), + (0xFCE1, "M", "بم"), + (0xFCE2, "M", "به"), + (0xFCE3, "M", "تم"), + (0xFCE4, "M", "ته"), + (0xFCE5, "M", "ثم"), + (0xFCE6, "M", "ثه"), + (0xFCE7, "M", "سم"), + (0xFCE8, "M", "سه"), + (0xFCE9, "M", "شم"), + (0xFCEA, "M", "شه"), + (0xFCEB, "M", "كل"), + (0xFCEC, "M", "كم"), + (0xFCED, "M", "لم"), + (0xFCEE, "M", "نم"), + (0xFCEF, "M", "نه"), + (0xFCF0, "M", "يم"), + (0xFCF1, "M", "يه"), + (0xFCF2, "M", "ـَّ"), + (0xFCF3, "M", "ـُّ"), + (0xFCF4, "M", "ـِّ"), + (0xFCF5, "M", "طى"), + (0xFCF6, "M", "طي"), + (0xFCF7, "M", "عى"), + (0xFCF8, "M", "عي"), + (0xFCF9, "M", "غى"), + (0xFCFA, "M", "غي"), + (0xFCFB, "M", "سى"), + (0xFCFC, "M", "سي"), + (0xFCFD, "M", "شى"), + (0xFCFE, "M", "شي"), + (0xFCFF, "M", "حى"), + (0xFD00, "M", "حي"), + (0xFD01, "M", "جى"), + (0xFD02, "M", "جي"), + (0xFD03, "M", "خى"), + (0xFD04, "M", "خي"), + (0xFD05, "M", "صى"), + (0xFD06, "M", "صي"), + (0xFD07, "M", "ضى"), + (0xFD08, "M", "ضي"), + (0xFD09, "M", "شج"), + (0xFD0A, "M", "شح"), + (0xFD0B, "M", "شخ"), + (0xFD0C, "M", "شم"), + (0xFD0D, "M", "شر"), + (0xFD0E, "M", "سر"), + (0xFD0F, "M", "صر"), + (0xFD10, "M", "ضر"), + (0xFD11, "M", "طى"), + (0xFD12, "M", "طي"), + (0xFD13, "M", "عى"), + (0xFD14, "M", "عي"), + (0xFD15, "M", "غى"), + (0xFD16, "M", "غي"), + (0xFD17, "M", "سى"), + (0xFD18, "M", "سي"), + (0xFD19, "M", "شى"), + (0xFD1A, "M", "شي"), + (0xFD1B, "M", "حى"), + (0xFD1C, "M", "حي"), + (0xFD1D, "M", "جى"), + (0xFD1E, "M", "جي"), + (0xFD1F, "M", "خى"), + (0xFD20, "M", "خي"), + (0xFD21, "M", "صى"), + (0xFD22, "M", "صي"), + ] + + +def _seg_48() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xFD23, "M", "ضى"), + (0xFD24, "M", "ضي"), + (0xFD25, "M", "شج"), + (0xFD26, "M", "شح"), + (0xFD27, "M", "شخ"), + (0xFD28, "M", "شم"), + (0xFD29, "M", "شر"), + (0xFD2A, "M", "سر"), + (0xFD2B, "M", "صر"), + (0xFD2C, "M", "ضر"), + (0xFD2D, "M", "شج"), + (0xFD2E, "M", "شح"), + (0xFD2F, "M", "شخ"), + (0xFD30, "M", "شم"), + (0xFD31, "M", "سه"), + (0xFD32, "M", "شه"), + (0xFD33, "M", "طم"), + (0xFD34, "M", "سج"), + (0xFD35, "M", "سح"), + (0xFD36, "M", "سخ"), + (0xFD37, "M", "شج"), + (0xFD38, "M", "شح"), + (0xFD39, "M", "شخ"), + (0xFD3A, "M", "طم"), + (0xFD3B, "M", "ظم"), + (0xFD3C, "M", "اً"), + (0xFD3E, "V"), + (0xFD50, "M", "تجم"), + (0xFD51, "M", "تحج"), + (0xFD53, "M", "تحم"), + (0xFD54, "M", "تخم"), + (0xFD55, "M", "تمج"), + (0xFD56, "M", "تمح"), + (0xFD57, "M", "تمخ"), + (0xFD58, "M", "جمح"), + (0xFD5A, "M", "حمي"), + (0xFD5B, "M", "حمى"), + (0xFD5C, "M", "سحج"), + (0xFD5D, "M", "سجح"), + (0xFD5E, "M", "سجى"), + (0xFD5F, "M", "سمح"), + (0xFD61, "M", "سمج"), + (0xFD62, "M", "سمم"), + (0xFD64, "M", "صحح"), + (0xFD66, "M", "صمم"), + (0xFD67, "M", "شحم"), + (0xFD69, "M", "شجي"), + (0xFD6A, "M", "شمخ"), + (0xFD6C, "M", "شمم"), + (0xFD6E, "M", "ضحى"), + (0xFD6F, "M", "ضخم"), + (0xFD71, "M", "طمح"), + (0xFD73, "M", "طمم"), + (0xFD74, "M", "طمي"), + (0xFD75, "M", "عجم"), + (0xFD76, "M", "عمم"), + (0xFD78, "M", "عمى"), + (0xFD79, "M", "غمم"), + (0xFD7A, "M", "غمي"), + (0xFD7B, "M", "غمى"), + (0xFD7C, "M", "فخم"), + (0xFD7E, "M", "قمح"), + (0xFD7F, "M", "قمم"), + (0xFD80, "M", "لحم"), + (0xFD81, "M", "لحي"), + (0xFD82, "M", "لحى"), + (0xFD83, "M", "لجج"), + (0xFD85, "M", "لخم"), + (0xFD87, "M", "لمح"), + (0xFD89, "M", "محج"), + (0xFD8A, "M", "محم"), + (0xFD8B, "M", "محي"), + (0xFD8C, "M", "مجح"), + (0xFD8D, "M", "مجم"), + (0xFD8E, "M", "مخج"), + (0xFD8F, "M", "مخم"), + (0xFD90, "X"), + (0xFD92, "M", "مجخ"), + (0xFD93, "M", "همج"), + (0xFD94, "M", "همم"), + (0xFD95, "M", "نحم"), + (0xFD96, "M", "نحى"), + (0xFD97, "M", "نجم"), + (0xFD99, "M", "نجى"), + (0xFD9A, "M", "نمي"), + (0xFD9B, "M", "نمى"), + (0xFD9C, "M", "يمم"), + (0xFD9E, "M", "بخي"), + (0xFD9F, "M", "تجي"), + (0xFDA0, "M", "تجى"), + (0xFDA1, "M", "تخي"), + (0xFDA2, "M", "تخى"), + (0xFDA3, "M", "تمي"), + (0xFDA4, "M", "تمى"), + (0xFDA5, "M", "جمي"), + (0xFDA6, "M", "جحى"), + (0xFDA7, "M", "جمى"), + (0xFDA8, "M", "سخى"), + (0xFDA9, "M", "صحي"), + (0xFDAA, "M", "شحي"), + ] + + +def _seg_49() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xFDAB, "M", "ضحي"), + (0xFDAC, "M", "لجي"), + (0xFDAD, "M", "لمي"), + (0xFDAE, "M", "يحي"), + (0xFDAF, "M", "يجي"), + (0xFDB0, "M", "يمي"), + (0xFDB1, "M", "ممي"), + (0xFDB2, "M", "قمي"), + (0xFDB3, "M", "نحي"), + (0xFDB4, "M", "قمح"), + (0xFDB5, "M", "لحم"), + (0xFDB6, "M", "عمي"), + (0xFDB7, "M", "كمي"), + (0xFDB8, "M", "نجح"), + (0xFDB9, "M", "مخي"), + (0xFDBA, "M", "لجم"), + (0xFDBB, "M", "كمم"), + (0xFDBC, "M", "لجم"), + (0xFDBD, "M", "نجح"), + (0xFDBE, "M", "جحي"), + (0xFDBF, "M", "حجي"), + (0xFDC0, "M", "مجي"), + (0xFDC1, "M", "فمي"), + (0xFDC2, "M", "بحي"), + (0xFDC3, "M", "كمم"), + (0xFDC4, "M", "عجم"), + (0xFDC5, "M", "صمم"), + (0xFDC6, "M", "سخي"), + (0xFDC7, "M", "نجي"), + (0xFDC8, "X"), + (0xFDCF, "V"), + (0xFDD0, "X"), + (0xFDF0, "M", "صلے"), + (0xFDF1, "M", "قلے"), + (0xFDF2, "M", "الله"), + (0xFDF3, "M", "اكبر"), + (0xFDF4, "M", "محمد"), + (0xFDF5, "M", "صلعم"), + (0xFDF6, "M", "رسول"), + (0xFDF7, "M", "عليه"), + (0xFDF8, "M", "وسلم"), + (0xFDF9, "M", "صلى"), + (0xFDFA, "3", "صلى الله عليه وسلم"), + (0xFDFB, "3", "جل جلاله"), + (0xFDFC, "M", "ریال"), + (0xFDFD, "V"), + (0xFE00, "I"), + (0xFE10, "3", ","), + (0xFE11, "M", "、"), + (0xFE12, "X"), + (0xFE13, "3", ":"), + (0xFE14, "3", ";"), + (0xFE15, "3", "!"), + (0xFE16, "3", "?"), + (0xFE17, "M", "〖"), + (0xFE18, "M", "〗"), + (0xFE19, "X"), + (0xFE20, "V"), + (0xFE30, "X"), + (0xFE31, "M", "—"), + (0xFE32, "M", "–"), + (0xFE33, "3", "_"), + (0xFE35, "3", "("), + (0xFE36, "3", ")"), + (0xFE37, "3", "{"), + (0xFE38, "3", "}"), + (0xFE39, "M", "〔"), + (0xFE3A, "M", "〕"), + (0xFE3B, "M", "【"), + (0xFE3C, "M", "】"), + (0xFE3D, "M", "《"), + (0xFE3E, "M", "》"), + (0xFE3F, "M", "〈"), + (0xFE40, "M", "〉"), + (0xFE41, "M", "「"), + (0xFE42, "M", "」"), + (0xFE43, "M", "『"), + (0xFE44, "M", "』"), + (0xFE45, "V"), + (0xFE47, "3", "["), + (0xFE48, "3", "]"), + (0xFE49, "3", " ̅"), + (0xFE4D, "3", "_"), + (0xFE50, "3", ","), + (0xFE51, "M", "、"), + (0xFE52, "X"), + (0xFE54, "3", ";"), + (0xFE55, "3", ":"), + (0xFE56, "3", "?"), + (0xFE57, "3", "!"), + (0xFE58, "M", "—"), + (0xFE59, "3", "("), + (0xFE5A, "3", ")"), + (0xFE5B, "3", "{"), + (0xFE5C, "3", "}"), + (0xFE5D, "M", "〔"), + (0xFE5E, "M", "〕"), + (0xFE5F, "3", "#"), + (0xFE60, "3", "&"), + (0xFE61, "3", "*"), + ] + + +def _seg_50() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xFE62, "3", "+"), + (0xFE63, "M", "-"), + (0xFE64, "3", "<"), + (0xFE65, "3", ">"), + (0xFE66, "3", "="), + (0xFE67, "X"), + (0xFE68, "3", "\\"), + (0xFE69, "3", "$"), + (0xFE6A, "3", "%"), + (0xFE6B, "3", "@"), + (0xFE6C, "X"), + (0xFE70, "3", " ً"), + (0xFE71, "M", "ـً"), + (0xFE72, "3", " ٌ"), + (0xFE73, "V"), + (0xFE74, "3", " ٍ"), + (0xFE75, "X"), + (0xFE76, "3", " َ"), + (0xFE77, "M", "ـَ"), + (0xFE78, "3", " ُ"), + (0xFE79, "M", "ـُ"), + (0xFE7A, "3", " ِ"), + (0xFE7B, "M", "ـِ"), + (0xFE7C, "3", " ّ"), + (0xFE7D, "M", "ـّ"), + (0xFE7E, "3", " ْ"), + (0xFE7F, "M", "ـْ"), + (0xFE80, "M", "ء"), + (0xFE81, "M", "آ"), + (0xFE83, "M", "أ"), + (0xFE85, "M", "ؤ"), + (0xFE87, "M", "إ"), + (0xFE89, "M", "ئ"), + (0xFE8D, "M", "ا"), + (0xFE8F, "M", "ب"), + (0xFE93, "M", "ة"), + (0xFE95, "M", "ت"), + (0xFE99, "M", "ث"), + (0xFE9D, "M", "ج"), + (0xFEA1, "M", "ح"), + (0xFEA5, "M", "خ"), + (0xFEA9, "M", "د"), + (0xFEAB, "M", "ذ"), + (0xFEAD, "M", "ر"), + (0xFEAF, "M", "ز"), + (0xFEB1, "M", "س"), + (0xFEB5, "M", "ش"), + (0xFEB9, "M", "ص"), + (0xFEBD, "M", "ض"), + (0xFEC1, "M", "ط"), + (0xFEC5, "M", "ظ"), + (0xFEC9, "M", "ع"), + (0xFECD, "M", "غ"), + (0xFED1, "M", "ف"), + (0xFED5, "M", "ق"), + (0xFED9, "M", "ك"), + (0xFEDD, "M", "ل"), + (0xFEE1, "M", "م"), + (0xFEE5, "M", "ن"), + (0xFEE9, "M", "ه"), + (0xFEED, "M", "و"), + (0xFEEF, "M", "ى"), + (0xFEF1, "M", "ي"), + (0xFEF5, "M", "لآ"), + (0xFEF7, "M", "لأ"), + (0xFEF9, "M", "لإ"), + (0xFEFB, "M", "لا"), + (0xFEFD, "X"), + (0xFEFF, "I"), + (0xFF00, "X"), + (0xFF01, "3", "!"), + (0xFF02, "3", '"'), + (0xFF03, "3", "#"), + (0xFF04, "3", "$"), + (0xFF05, "3", "%"), + (0xFF06, "3", "&"), + (0xFF07, "3", "'"), + (0xFF08, "3", "("), + (0xFF09, "3", ")"), + (0xFF0A, "3", "*"), + (0xFF0B, "3", "+"), + (0xFF0C, "3", ","), + (0xFF0D, "M", "-"), + (0xFF0E, "M", "."), + (0xFF0F, "3", "/"), + (0xFF10, "M", "0"), + (0xFF11, "M", "1"), + (0xFF12, "M", "2"), + (0xFF13, "M", "3"), + (0xFF14, "M", "4"), + (0xFF15, "M", "5"), + (0xFF16, "M", "6"), + (0xFF17, "M", "7"), + (0xFF18, "M", "8"), + (0xFF19, "M", "9"), + (0xFF1A, "3", ":"), + (0xFF1B, "3", ";"), + (0xFF1C, "3", "<"), + (0xFF1D, "3", "="), + (0xFF1E, "3", ">"), + ] + + +def _seg_51() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xFF1F, "3", "?"), + (0xFF20, "3", "@"), + (0xFF21, "M", "a"), + (0xFF22, "M", "b"), + (0xFF23, "M", "c"), + (0xFF24, "M", "d"), + (0xFF25, "M", "e"), + (0xFF26, "M", "f"), + (0xFF27, "M", "g"), + (0xFF28, "M", "h"), + (0xFF29, "M", "i"), + (0xFF2A, "M", "j"), + (0xFF2B, "M", "k"), + (0xFF2C, "M", "l"), + (0xFF2D, "M", "m"), + (0xFF2E, "M", "n"), + (0xFF2F, "M", "o"), + (0xFF30, "M", "p"), + (0xFF31, "M", "q"), + (0xFF32, "M", "r"), + (0xFF33, "M", "s"), + (0xFF34, "M", "t"), + (0xFF35, "M", "u"), + (0xFF36, "M", "v"), + (0xFF37, "M", "w"), + (0xFF38, "M", "x"), + (0xFF39, "M", "y"), + (0xFF3A, "M", "z"), + (0xFF3B, "3", "["), + (0xFF3C, "3", "\\"), + (0xFF3D, "3", "]"), + (0xFF3E, "3", "^"), + (0xFF3F, "3", "_"), + (0xFF40, "3", "`"), + (0xFF41, "M", "a"), + (0xFF42, "M", "b"), + (0xFF43, "M", "c"), + (0xFF44, "M", "d"), + (0xFF45, "M", "e"), + (0xFF46, "M", "f"), + (0xFF47, "M", "g"), + (0xFF48, "M", "h"), + (0xFF49, "M", "i"), + (0xFF4A, "M", "j"), + (0xFF4B, "M", "k"), + (0xFF4C, "M", "l"), + (0xFF4D, "M", "m"), + (0xFF4E, "M", "n"), + (0xFF4F, "M", "o"), + (0xFF50, "M", "p"), + (0xFF51, "M", "q"), + (0xFF52, "M", "r"), + (0xFF53, "M", "s"), + (0xFF54, "M", "t"), + (0xFF55, "M", "u"), + (0xFF56, "M", "v"), + (0xFF57, "M", "w"), + (0xFF58, "M", "x"), + (0xFF59, "M", "y"), + (0xFF5A, "M", "z"), + (0xFF5B, "3", "{"), + (0xFF5C, "3", "|"), + (0xFF5D, "3", "}"), + (0xFF5E, "3", "~"), + (0xFF5F, "M", "⦅"), + (0xFF60, "M", "⦆"), + (0xFF61, "M", "."), + (0xFF62, "M", "「"), + (0xFF63, "M", "」"), + (0xFF64, "M", "、"), + (0xFF65, "M", "・"), + (0xFF66, "M", "ヲ"), + (0xFF67, "M", "ァ"), + (0xFF68, "M", "ィ"), + (0xFF69, "M", "ゥ"), + (0xFF6A, "M", "ェ"), + (0xFF6B, "M", "ォ"), + (0xFF6C, "M", "ャ"), + (0xFF6D, "M", "ュ"), + (0xFF6E, "M", "ョ"), + (0xFF6F, "M", "ッ"), + (0xFF70, "M", "ー"), + (0xFF71, "M", "ア"), + (0xFF72, "M", "イ"), + (0xFF73, "M", "ウ"), + (0xFF74, "M", "エ"), + (0xFF75, "M", "オ"), + (0xFF76, "M", "カ"), + (0xFF77, "M", "キ"), + (0xFF78, "M", "ク"), + (0xFF79, "M", "ケ"), + (0xFF7A, "M", "コ"), + (0xFF7B, "M", "サ"), + (0xFF7C, "M", "シ"), + (0xFF7D, "M", "ス"), + (0xFF7E, "M", "セ"), + (0xFF7F, "M", "ソ"), + (0xFF80, "M", "タ"), + (0xFF81, "M", "チ"), + (0xFF82, "M", "ツ"), + ] + + +def _seg_52() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xFF83, "M", "テ"), + (0xFF84, "M", "ト"), + (0xFF85, "M", "ナ"), + (0xFF86, "M", "ニ"), + (0xFF87, "M", "ヌ"), + (0xFF88, "M", "ネ"), + (0xFF89, "M", "ノ"), + (0xFF8A, "M", "ハ"), + (0xFF8B, "M", "ヒ"), + (0xFF8C, "M", "フ"), + (0xFF8D, "M", "ヘ"), + (0xFF8E, "M", "ホ"), + (0xFF8F, "M", "マ"), + (0xFF90, "M", "ミ"), + (0xFF91, "M", "ム"), + (0xFF92, "M", "メ"), + (0xFF93, "M", "モ"), + (0xFF94, "M", "ヤ"), + (0xFF95, "M", "ユ"), + (0xFF96, "M", "ヨ"), + (0xFF97, "M", "ラ"), + (0xFF98, "M", "リ"), + (0xFF99, "M", "ル"), + (0xFF9A, "M", "レ"), + (0xFF9B, "M", "ロ"), + (0xFF9C, "M", "ワ"), + (0xFF9D, "M", "ン"), + (0xFF9E, "M", "゙"), + (0xFF9F, "M", "゚"), + (0xFFA0, "X"), + (0xFFA1, "M", "ᄀ"), + (0xFFA2, "M", "ᄁ"), + (0xFFA3, "M", "ᆪ"), + (0xFFA4, "M", "ᄂ"), + (0xFFA5, "M", "ᆬ"), + (0xFFA6, "M", "ᆭ"), + (0xFFA7, "M", "ᄃ"), + (0xFFA8, "M", "ᄄ"), + (0xFFA9, "M", "ᄅ"), + (0xFFAA, "M", "ᆰ"), + (0xFFAB, "M", "ᆱ"), + (0xFFAC, "M", "ᆲ"), + (0xFFAD, "M", "ᆳ"), + (0xFFAE, "M", "ᆴ"), + (0xFFAF, "M", "ᆵ"), + (0xFFB0, "M", "ᄚ"), + (0xFFB1, "M", "ᄆ"), + (0xFFB2, "M", "ᄇ"), + (0xFFB3, "M", "ᄈ"), + (0xFFB4, "M", "ᄡ"), + (0xFFB5, "M", "ᄉ"), + (0xFFB6, "M", "ᄊ"), + (0xFFB7, "M", "ᄋ"), + (0xFFB8, "M", "ᄌ"), + (0xFFB9, "M", "ᄍ"), + (0xFFBA, "M", "ᄎ"), + (0xFFBB, "M", "ᄏ"), + (0xFFBC, "M", "ᄐ"), + (0xFFBD, "M", "ᄑ"), + (0xFFBE, "M", "ᄒ"), + (0xFFBF, "X"), + (0xFFC2, "M", "ᅡ"), + (0xFFC3, "M", "ᅢ"), + (0xFFC4, "M", "ᅣ"), + (0xFFC5, "M", "ᅤ"), + (0xFFC6, "M", "ᅥ"), + (0xFFC7, "M", "ᅦ"), + (0xFFC8, "X"), + (0xFFCA, "M", "ᅧ"), + (0xFFCB, "M", "ᅨ"), + (0xFFCC, "M", "ᅩ"), + (0xFFCD, "M", "ᅪ"), + (0xFFCE, "M", "ᅫ"), + (0xFFCF, "M", "ᅬ"), + (0xFFD0, "X"), + (0xFFD2, "M", "ᅭ"), + (0xFFD3, "M", "ᅮ"), + (0xFFD4, "M", "ᅯ"), + (0xFFD5, "M", "ᅰ"), + (0xFFD6, "M", "ᅱ"), + (0xFFD7, "M", "ᅲ"), + (0xFFD8, "X"), + (0xFFDA, "M", "ᅳ"), + (0xFFDB, "M", "ᅴ"), + (0xFFDC, "M", "ᅵ"), + (0xFFDD, "X"), + (0xFFE0, "M", "¢"), + (0xFFE1, "M", "£"), + (0xFFE2, "M", "¬"), + (0xFFE3, "3", " ̄"), + (0xFFE4, "M", "¦"), + (0xFFE5, "M", "¥"), + (0xFFE6, "M", "₩"), + (0xFFE7, "X"), + (0xFFE8, "M", "│"), + (0xFFE9, "M", "←"), + (0xFFEA, "M", "↑"), + (0xFFEB, "M", "→"), + (0xFFEC, "M", "↓"), + (0xFFED, "M", "■"), + ] + + +def _seg_53() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xFFEE, "M", "○"), + (0xFFEF, "X"), + (0x10000, "V"), + (0x1000C, "X"), + (0x1000D, "V"), + (0x10027, "X"), + (0x10028, "V"), + (0x1003B, "X"), + (0x1003C, "V"), + (0x1003E, "X"), + (0x1003F, "V"), + (0x1004E, "X"), + (0x10050, "V"), + (0x1005E, "X"), + (0x10080, "V"), + (0x100FB, "X"), + (0x10100, "V"), + (0x10103, "X"), + (0x10107, "V"), + (0x10134, "X"), + (0x10137, "V"), + (0x1018F, "X"), + (0x10190, "V"), + (0x1019D, "X"), + (0x101A0, "V"), + (0x101A1, "X"), + (0x101D0, "V"), + (0x101FE, "X"), + (0x10280, "V"), + (0x1029D, "X"), + (0x102A0, "V"), + (0x102D1, "X"), + (0x102E0, "V"), + (0x102FC, "X"), + (0x10300, "V"), + (0x10324, "X"), + (0x1032D, "V"), + (0x1034B, "X"), + (0x10350, "V"), + (0x1037B, "X"), + (0x10380, "V"), + (0x1039E, "X"), + (0x1039F, "V"), + (0x103C4, "X"), + (0x103C8, "V"), + (0x103D6, "X"), + (0x10400, "M", "𐐨"), + (0x10401, "M", "𐐩"), + (0x10402, "M", "𐐪"), + (0x10403, "M", "𐐫"), + (0x10404, "M", "𐐬"), + (0x10405, "M", "𐐭"), + (0x10406, "M", "𐐮"), + (0x10407, "M", "𐐯"), + (0x10408, "M", "𐐰"), + (0x10409, "M", "𐐱"), + (0x1040A, "M", "𐐲"), + (0x1040B, "M", "𐐳"), + (0x1040C, "M", "𐐴"), + (0x1040D, "M", "𐐵"), + (0x1040E, "M", "𐐶"), + (0x1040F, "M", "𐐷"), + (0x10410, "M", "𐐸"), + (0x10411, "M", "𐐹"), + (0x10412, "M", "𐐺"), + (0x10413, "M", "𐐻"), + (0x10414, "M", "𐐼"), + (0x10415, "M", "𐐽"), + (0x10416, "M", "𐐾"), + (0x10417, "M", "𐐿"), + (0x10418, "M", "𐑀"), + (0x10419, "M", "𐑁"), + (0x1041A, "M", "𐑂"), + (0x1041B, "M", "𐑃"), + (0x1041C, "M", "𐑄"), + (0x1041D, "M", "𐑅"), + (0x1041E, "M", "𐑆"), + (0x1041F, "M", "𐑇"), + (0x10420, "M", "𐑈"), + (0x10421, "M", "𐑉"), + (0x10422, "M", "𐑊"), + (0x10423, "M", "𐑋"), + (0x10424, "M", "𐑌"), + (0x10425, "M", "𐑍"), + (0x10426, "M", "𐑎"), + (0x10427, "M", "𐑏"), + (0x10428, "V"), + (0x1049E, "X"), + (0x104A0, "V"), + (0x104AA, "X"), + (0x104B0, "M", "𐓘"), + (0x104B1, "M", "𐓙"), + (0x104B2, "M", "𐓚"), + (0x104B3, "M", "𐓛"), + (0x104B4, "M", "𐓜"), + (0x104B5, "M", "𐓝"), + (0x104B6, "M", "𐓞"), + (0x104B7, "M", "𐓟"), + (0x104B8, "M", "𐓠"), + (0x104B9, "M", "𐓡"), + ] + + +def _seg_54() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x104BA, "M", "𐓢"), + (0x104BB, "M", "𐓣"), + (0x104BC, "M", "𐓤"), + (0x104BD, "M", "𐓥"), + (0x104BE, "M", "𐓦"), + (0x104BF, "M", "𐓧"), + (0x104C0, "M", "𐓨"), + (0x104C1, "M", "𐓩"), + (0x104C2, "M", "𐓪"), + (0x104C3, "M", "𐓫"), + (0x104C4, "M", "𐓬"), + (0x104C5, "M", "𐓭"), + (0x104C6, "M", "𐓮"), + (0x104C7, "M", "𐓯"), + (0x104C8, "M", "𐓰"), + (0x104C9, "M", "𐓱"), + (0x104CA, "M", "𐓲"), + (0x104CB, "M", "𐓳"), + (0x104CC, "M", "𐓴"), + (0x104CD, "M", "𐓵"), + (0x104CE, "M", "𐓶"), + (0x104CF, "M", "𐓷"), + (0x104D0, "M", "𐓸"), + (0x104D1, "M", "𐓹"), + (0x104D2, "M", "𐓺"), + (0x104D3, "M", "𐓻"), + (0x104D4, "X"), + (0x104D8, "V"), + (0x104FC, "X"), + (0x10500, "V"), + (0x10528, "X"), + (0x10530, "V"), + (0x10564, "X"), + (0x1056F, "V"), + (0x10570, "M", "𐖗"), + (0x10571, "M", "𐖘"), + (0x10572, "M", "𐖙"), + (0x10573, "M", "𐖚"), + (0x10574, "M", "𐖛"), + (0x10575, "M", "𐖜"), + (0x10576, "M", "𐖝"), + (0x10577, "M", "𐖞"), + (0x10578, "M", "𐖟"), + (0x10579, "M", "𐖠"), + (0x1057A, "M", "𐖡"), + (0x1057B, "X"), + (0x1057C, "M", "𐖣"), + (0x1057D, "M", "𐖤"), + (0x1057E, "M", "𐖥"), + (0x1057F, "M", "𐖦"), + (0x10580, "M", "𐖧"), + (0x10581, "M", "𐖨"), + (0x10582, "M", "𐖩"), + (0x10583, "M", "𐖪"), + (0x10584, "M", "𐖫"), + (0x10585, "M", "𐖬"), + (0x10586, "M", "𐖭"), + (0x10587, "M", "𐖮"), + (0x10588, "M", "𐖯"), + (0x10589, "M", "𐖰"), + (0x1058A, "M", "𐖱"), + (0x1058B, "X"), + (0x1058C, "M", "𐖳"), + (0x1058D, "M", "𐖴"), + (0x1058E, "M", "𐖵"), + (0x1058F, "M", "𐖶"), + (0x10590, "M", "𐖷"), + (0x10591, "M", "𐖸"), + (0x10592, "M", "𐖹"), + (0x10593, "X"), + (0x10594, "M", "𐖻"), + (0x10595, "M", "𐖼"), + (0x10596, "X"), + (0x10597, "V"), + (0x105A2, "X"), + (0x105A3, "V"), + (0x105B2, "X"), + (0x105B3, "V"), + (0x105BA, "X"), + (0x105BB, "V"), + (0x105BD, "X"), + (0x10600, "V"), + (0x10737, "X"), + (0x10740, "V"), + (0x10756, "X"), + (0x10760, "V"), + (0x10768, "X"), + (0x10780, "V"), + (0x10781, "M", "ː"), + (0x10782, "M", "ˑ"), + (0x10783, "M", "æ"), + (0x10784, "M", "ʙ"), + (0x10785, "M", "ɓ"), + (0x10786, "X"), + (0x10787, "M", "ʣ"), + (0x10788, "M", "ꭦ"), + (0x10789, "M", "ʥ"), + (0x1078A, "M", "ʤ"), + (0x1078B, "M", "ɖ"), + (0x1078C, "M", "ɗ"), + ] + + +def _seg_55() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1078D, "M", "ᶑ"), + (0x1078E, "M", "ɘ"), + (0x1078F, "M", "ɞ"), + (0x10790, "M", "ʩ"), + (0x10791, "M", "ɤ"), + (0x10792, "M", "ɢ"), + (0x10793, "M", "ɠ"), + (0x10794, "M", "ʛ"), + (0x10795, "M", "ħ"), + (0x10796, "M", "ʜ"), + (0x10797, "M", "ɧ"), + (0x10798, "M", "ʄ"), + (0x10799, "M", "ʪ"), + (0x1079A, "M", "ʫ"), + (0x1079B, "M", "ɬ"), + (0x1079C, "M", "𝼄"), + (0x1079D, "M", "ꞎ"), + (0x1079E, "M", "ɮ"), + (0x1079F, "M", "𝼅"), + (0x107A0, "M", "ʎ"), + (0x107A1, "M", "𝼆"), + (0x107A2, "M", "ø"), + (0x107A3, "M", "ɶ"), + (0x107A4, "M", "ɷ"), + (0x107A5, "M", "q"), + (0x107A6, "M", "ɺ"), + (0x107A7, "M", "𝼈"), + (0x107A8, "M", "ɽ"), + (0x107A9, "M", "ɾ"), + (0x107AA, "M", "ʀ"), + (0x107AB, "M", "ʨ"), + (0x107AC, "M", "ʦ"), + (0x107AD, "M", "ꭧ"), + (0x107AE, "M", "ʧ"), + (0x107AF, "M", "ʈ"), + (0x107B0, "M", "ⱱ"), + (0x107B1, "X"), + (0x107B2, "M", "ʏ"), + (0x107B3, "M", "ʡ"), + (0x107B4, "M", "ʢ"), + (0x107B5, "M", "ʘ"), + (0x107B6, "M", "ǀ"), + (0x107B7, "M", "ǁ"), + (0x107B8, "M", "ǂ"), + (0x107B9, "M", "𝼊"), + (0x107BA, "M", "𝼞"), + (0x107BB, "X"), + (0x10800, "V"), + (0x10806, "X"), + (0x10808, "V"), + (0x10809, "X"), + (0x1080A, "V"), + (0x10836, "X"), + (0x10837, "V"), + (0x10839, "X"), + (0x1083C, "V"), + (0x1083D, "X"), + (0x1083F, "V"), + (0x10856, "X"), + (0x10857, "V"), + (0x1089F, "X"), + (0x108A7, "V"), + (0x108B0, "X"), + (0x108E0, "V"), + (0x108F3, "X"), + (0x108F4, "V"), + (0x108F6, "X"), + (0x108FB, "V"), + (0x1091C, "X"), + (0x1091F, "V"), + (0x1093A, "X"), + (0x1093F, "V"), + (0x10940, "X"), + (0x10980, "V"), + (0x109B8, "X"), + (0x109BC, "V"), + (0x109D0, "X"), + (0x109D2, "V"), + (0x10A04, "X"), + (0x10A05, "V"), + (0x10A07, "X"), + (0x10A0C, "V"), + (0x10A14, "X"), + (0x10A15, "V"), + (0x10A18, "X"), + (0x10A19, "V"), + (0x10A36, "X"), + (0x10A38, "V"), + (0x10A3B, "X"), + (0x10A3F, "V"), + (0x10A49, "X"), + (0x10A50, "V"), + (0x10A59, "X"), + (0x10A60, "V"), + (0x10AA0, "X"), + (0x10AC0, "V"), + (0x10AE7, "X"), + (0x10AEB, "V"), + (0x10AF7, "X"), + (0x10B00, "V"), + ] + + +def _seg_56() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x10B36, "X"), + (0x10B39, "V"), + (0x10B56, "X"), + (0x10B58, "V"), + (0x10B73, "X"), + (0x10B78, "V"), + (0x10B92, "X"), + (0x10B99, "V"), + (0x10B9D, "X"), + (0x10BA9, "V"), + (0x10BB0, "X"), + (0x10C00, "V"), + (0x10C49, "X"), + (0x10C80, "M", "𐳀"), + (0x10C81, "M", "𐳁"), + (0x10C82, "M", "𐳂"), + (0x10C83, "M", "𐳃"), + (0x10C84, "M", "𐳄"), + (0x10C85, "M", "𐳅"), + (0x10C86, "M", "𐳆"), + (0x10C87, "M", "𐳇"), + (0x10C88, "M", "𐳈"), + (0x10C89, "M", "𐳉"), + (0x10C8A, "M", "𐳊"), + (0x10C8B, "M", "𐳋"), + (0x10C8C, "M", "𐳌"), + (0x10C8D, "M", "𐳍"), + (0x10C8E, "M", "𐳎"), + (0x10C8F, "M", "𐳏"), + (0x10C90, "M", "𐳐"), + (0x10C91, "M", "𐳑"), + (0x10C92, "M", "𐳒"), + (0x10C93, "M", "𐳓"), + (0x10C94, "M", "𐳔"), + (0x10C95, "M", "𐳕"), + (0x10C96, "M", "𐳖"), + (0x10C97, "M", "𐳗"), + (0x10C98, "M", "𐳘"), + (0x10C99, "M", "𐳙"), + (0x10C9A, "M", "𐳚"), + (0x10C9B, "M", "𐳛"), + (0x10C9C, "M", "𐳜"), + (0x10C9D, "M", "𐳝"), + (0x10C9E, "M", "𐳞"), + (0x10C9F, "M", "𐳟"), + (0x10CA0, "M", "𐳠"), + (0x10CA1, "M", "𐳡"), + (0x10CA2, "M", "𐳢"), + (0x10CA3, "M", "𐳣"), + (0x10CA4, "M", "𐳤"), + (0x10CA5, "M", "𐳥"), + (0x10CA6, "M", "𐳦"), + (0x10CA7, "M", "𐳧"), + (0x10CA8, "M", "𐳨"), + (0x10CA9, "M", "𐳩"), + (0x10CAA, "M", "𐳪"), + (0x10CAB, "M", "𐳫"), + (0x10CAC, "M", "𐳬"), + (0x10CAD, "M", "𐳭"), + (0x10CAE, "M", "𐳮"), + (0x10CAF, "M", "𐳯"), + (0x10CB0, "M", "𐳰"), + (0x10CB1, "M", "𐳱"), + (0x10CB2, "M", "𐳲"), + (0x10CB3, "X"), + (0x10CC0, "V"), + (0x10CF3, "X"), + (0x10CFA, "V"), + (0x10D28, "X"), + (0x10D30, "V"), + (0x10D3A, "X"), + (0x10E60, "V"), + (0x10E7F, "X"), + (0x10E80, "V"), + (0x10EAA, "X"), + (0x10EAB, "V"), + (0x10EAE, "X"), + (0x10EB0, "V"), + (0x10EB2, "X"), + (0x10EFD, "V"), + (0x10F28, "X"), + (0x10F30, "V"), + (0x10F5A, "X"), + (0x10F70, "V"), + (0x10F8A, "X"), + (0x10FB0, "V"), + (0x10FCC, "X"), + (0x10FE0, "V"), + (0x10FF7, "X"), + (0x11000, "V"), + (0x1104E, "X"), + (0x11052, "V"), + (0x11076, "X"), + (0x1107F, "V"), + (0x110BD, "X"), + (0x110BE, "V"), + (0x110C3, "X"), + (0x110D0, "V"), + (0x110E9, "X"), + (0x110F0, "V"), + ] + + +def _seg_57() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x110FA, "X"), + (0x11100, "V"), + (0x11135, "X"), + (0x11136, "V"), + (0x11148, "X"), + (0x11150, "V"), + (0x11177, "X"), + (0x11180, "V"), + (0x111E0, "X"), + (0x111E1, "V"), + (0x111F5, "X"), + (0x11200, "V"), + (0x11212, "X"), + (0x11213, "V"), + (0x11242, "X"), + (0x11280, "V"), + (0x11287, "X"), + (0x11288, "V"), + (0x11289, "X"), + (0x1128A, "V"), + (0x1128E, "X"), + (0x1128F, "V"), + (0x1129E, "X"), + (0x1129F, "V"), + (0x112AA, "X"), + (0x112B0, "V"), + (0x112EB, "X"), + (0x112F0, "V"), + (0x112FA, "X"), + (0x11300, "V"), + (0x11304, "X"), + (0x11305, "V"), + (0x1130D, "X"), + (0x1130F, "V"), + (0x11311, "X"), + (0x11313, "V"), + (0x11329, "X"), + (0x1132A, "V"), + (0x11331, "X"), + (0x11332, "V"), + (0x11334, "X"), + (0x11335, "V"), + (0x1133A, "X"), + (0x1133B, "V"), + (0x11345, "X"), + (0x11347, "V"), + (0x11349, "X"), + (0x1134B, "V"), + (0x1134E, "X"), + (0x11350, "V"), + (0x11351, "X"), + (0x11357, "V"), + (0x11358, "X"), + (0x1135D, "V"), + (0x11364, "X"), + (0x11366, "V"), + (0x1136D, "X"), + (0x11370, "V"), + (0x11375, "X"), + (0x11400, "V"), + (0x1145C, "X"), + (0x1145D, "V"), + (0x11462, "X"), + (0x11480, "V"), + (0x114C8, "X"), + (0x114D0, "V"), + (0x114DA, "X"), + (0x11580, "V"), + (0x115B6, "X"), + (0x115B8, "V"), + (0x115DE, "X"), + (0x11600, "V"), + (0x11645, "X"), + (0x11650, "V"), + (0x1165A, "X"), + (0x11660, "V"), + (0x1166D, "X"), + (0x11680, "V"), + (0x116BA, "X"), + (0x116C0, "V"), + (0x116CA, "X"), + (0x11700, "V"), + (0x1171B, "X"), + (0x1171D, "V"), + (0x1172C, "X"), + (0x11730, "V"), + (0x11747, "X"), + (0x11800, "V"), + (0x1183C, "X"), + (0x118A0, "M", "𑣀"), + (0x118A1, "M", "𑣁"), + (0x118A2, "M", "𑣂"), + (0x118A3, "M", "𑣃"), + (0x118A4, "M", "𑣄"), + (0x118A5, "M", "𑣅"), + (0x118A6, "M", "𑣆"), + (0x118A7, "M", "𑣇"), + (0x118A8, "M", "𑣈"), + (0x118A9, "M", "𑣉"), + (0x118AA, "M", "𑣊"), + ] + + +def _seg_58() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x118AB, "M", "𑣋"), + (0x118AC, "M", "𑣌"), + (0x118AD, "M", "𑣍"), + (0x118AE, "M", "𑣎"), + (0x118AF, "M", "𑣏"), + (0x118B0, "M", "𑣐"), + (0x118B1, "M", "𑣑"), + (0x118B2, "M", "𑣒"), + (0x118B3, "M", "𑣓"), + (0x118B4, "M", "𑣔"), + (0x118B5, "M", "𑣕"), + (0x118B6, "M", "𑣖"), + (0x118B7, "M", "𑣗"), + (0x118B8, "M", "𑣘"), + (0x118B9, "M", "𑣙"), + (0x118BA, "M", "𑣚"), + (0x118BB, "M", "𑣛"), + (0x118BC, "M", "𑣜"), + (0x118BD, "M", "𑣝"), + (0x118BE, "M", "𑣞"), + (0x118BF, "M", "𑣟"), + (0x118C0, "V"), + (0x118F3, "X"), + (0x118FF, "V"), + (0x11907, "X"), + (0x11909, "V"), + (0x1190A, "X"), + (0x1190C, "V"), + (0x11914, "X"), + (0x11915, "V"), + (0x11917, "X"), + (0x11918, "V"), + (0x11936, "X"), + (0x11937, "V"), + (0x11939, "X"), + (0x1193B, "V"), + (0x11947, "X"), + (0x11950, "V"), + (0x1195A, "X"), + (0x119A0, "V"), + (0x119A8, "X"), + (0x119AA, "V"), + (0x119D8, "X"), + (0x119DA, "V"), + (0x119E5, "X"), + (0x11A00, "V"), + (0x11A48, "X"), + (0x11A50, "V"), + (0x11AA3, "X"), + (0x11AB0, "V"), + (0x11AF9, "X"), + (0x11B00, "V"), + (0x11B0A, "X"), + (0x11C00, "V"), + (0x11C09, "X"), + (0x11C0A, "V"), + (0x11C37, "X"), + (0x11C38, "V"), + (0x11C46, "X"), + (0x11C50, "V"), + (0x11C6D, "X"), + (0x11C70, "V"), + (0x11C90, "X"), + (0x11C92, "V"), + (0x11CA8, "X"), + (0x11CA9, "V"), + (0x11CB7, "X"), + (0x11D00, "V"), + (0x11D07, "X"), + (0x11D08, "V"), + (0x11D0A, "X"), + (0x11D0B, "V"), + (0x11D37, "X"), + (0x11D3A, "V"), + (0x11D3B, "X"), + (0x11D3C, "V"), + (0x11D3E, "X"), + (0x11D3F, "V"), + (0x11D48, "X"), + (0x11D50, "V"), + (0x11D5A, "X"), + (0x11D60, "V"), + (0x11D66, "X"), + (0x11D67, "V"), + (0x11D69, "X"), + (0x11D6A, "V"), + (0x11D8F, "X"), + (0x11D90, "V"), + (0x11D92, "X"), + (0x11D93, "V"), + (0x11D99, "X"), + (0x11DA0, "V"), + (0x11DAA, "X"), + (0x11EE0, "V"), + (0x11EF9, "X"), + (0x11F00, "V"), + (0x11F11, "X"), + (0x11F12, "V"), + (0x11F3B, "X"), + (0x11F3E, "V"), + ] + + +def _seg_59() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x11F5A, "X"), + (0x11FB0, "V"), + (0x11FB1, "X"), + (0x11FC0, "V"), + (0x11FF2, "X"), + (0x11FFF, "V"), + (0x1239A, "X"), + (0x12400, "V"), + (0x1246F, "X"), + (0x12470, "V"), + (0x12475, "X"), + (0x12480, "V"), + (0x12544, "X"), + (0x12F90, "V"), + (0x12FF3, "X"), + (0x13000, "V"), + (0x13430, "X"), + (0x13440, "V"), + (0x13456, "X"), + (0x14400, "V"), + (0x14647, "X"), + (0x16800, "V"), + (0x16A39, "X"), + (0x16A40, "V"), + (0x16A5F, "X"), + (0x16A60, "V"), + (0x16A6A, "X"), + (0x16A6E, "V"), + (0x16ABF, "X"), + (0x16AC0, "V"), + (0x16ACA, "X"), + (0x16AD0, "V"), + (0x16AEE, "X"), + (0x16AF0, "V"), + (0x16AF6, "X"), + (0x16B00, "V"), + (0x16B46, "X"), + (0x16B50, "V"), + (0x16B5A, "X"), + (0x16B5B, "V"), + (0x16B62, "X"), + (0x16B63, "V"), + (0x16B78, "X"), + (0x16B7D, "V"), + (0x16B90, "X"), + (0x16E40, "M", "𖹠"), + (0x16E41, "M", "𖹡"), + (0x16E42, "M", "𖹢"), + (0x16E43, "M", "𖹣"), + (0x16E44, "M", "𖹤"), + (0x16E45, "M", "𖹥"), + (0x16E46, "M", "𖹦"), + (0x16E47, "M", "𖹧"), + (0x16E48, "M", "𖹨"), + (0x16E49, "M", "𖹩"), + (0x16E4A, "M", "𖹪"), + (0x16E4B, "M", "𖹫"), + (0x16E4C, "M", "𖹬"), + (0x16E4D, "M", "𖹭"), + (0x16E4E, "M", "𖹮"), + (0x16E4F, "M", "𖹯"), + (0x16E50, "M", "𖹰"), + (0x16E51, "M", "𖹱"), + (0x16E52, "M", "𖹲"), + (0x16E53, "M", "𖹳"), + (0x16E54, "M", "𖹴"), + (0x16E55, "M", "𖹵"), + (0x16E56, "M", "𖹶"), + (0x16E57, "M", "𖹷"), + (0x16E58, "M", "𖹸"), + (0x16E59, "M", "𖹹"), + (0x16E5A, "M", "𖹺"), + (0x16E5B, "M", "𖹻"), + (0x16E5C, "M", "𖹼"), + (0x16E5D, "M", "𖹽"), + (0x16E5E, "M", "𖹾"), + (0x16E5F, "M", "𖹿"), + (0x16E60, "V"), + (0x16E9B, "X"), + (0x16F00, "V"), + (0x16F4B, "X"), + (0x16F4F, "V"), + (0x16F88, "X"), + (0x16F8F, "V"), + (0x16FA0, "X"), + (0x16FE0, "V"), + (0x16FE5, "X"), + (0x16FF0, "V"), + (0x16FF2, "X"), + (0x17000, "V"), + (0x187F8, "X"), + (0x18800, "V"), + (0x18CD6, "X"), + (0x18D00, "V"), + (0x18D09, "X"), + (0x1AFF0, "V"), + (0x1AFF4, "X"), + (0x1AFF5, "V"), + (0x1AFFC, "X"), + (0x1AFFD, "V"), + ] + + +def _seg_60() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1AFFF, "X"), + (0x1B000, "V"), + (0x1B123, "X"), + (0x1B132, "V"), + (0x1B133, "X"), + (0x1B150, "V"), + (0x1B153, "X"), + (0x1B155, "V"), + (0x1B156, "X"), + (0x1B164, "V"), + (0x1B168, "X"), + (0x1B170, "V"), + (0x1B2FC, "X"), + (0x1BC00, "V"), + (0x1BC6B, "X"), + (0x1BC70, "V"), + (0x1BC7D, "X"), + (0x1BC80, "V"), + (0x1BC89, "X"), + (0x1BC90, "V"), + (0x1BC9A, "X"), + (0x1BC9C, "V"), + (0x1BCA0, "I"), + (0x1BCA4, "X"), + (0x1CF00, "V"), + (0x1CF2E, "X"), + (0x1CF30, "V"), + (0x1CF47, "X"), + (0x1CF50, "V"), + (0x1CFC4, "X"), + (0x1D000, "V"), + (0x1D0F6, "X"), + (0x1D100, "V"), + (0x1D127, "X"), + (0x1D129, "V"), + (0x1D15E, "M", "𝅗𝅥"), + (0x1D15F, "M", "𝅘𝅥"), + (0x1D160, "M", "𝅘𝅥𝅮"), + (0x1D161, "M", "𝅘𝅥𝅯"), + (0x1D162, "M", "𝅘𝅥𝅰"), + (0x1D163, "M", "𝅘𝅥𝅱"), + (0x1D164, "M", "𝅘𝅥𝅲"), + (0x1D165, "V"), + (0x1D173, "X"), + (0x1D17B, "V"), + (0x1D1BB, "M", "𝆹𝅥"), + (0x1D1BC, "M", "𝆺𝅥"), + (0x1D1BD, "M", "𝆹𝅥𝅮"), + (0x1D1BE, "M", "𝆺𝅥𝅮"), + (0x1D1BF, "M", "𝆹𝅥𝅯"), + (0x1D1C0, "M", "𝆺𝅥𝅯"), + (0x1D1C1, "V"), + (0x1D1EB, "X"), + (0x1D200, "V"), + (0x1D246, "X"), + (0x1D2C0, "V"), + (0x1D2D4, "X"), + (0x1D2E0, "V"), + (0x1D2F4, "X"), + (0x1D300, "V"), + (0x1D357, "X"), + (0x1D360, "V"), + (0x1D379, "X"), + (0x1D400, "M", "a"), + (0x1D401, "M", "b"), + (0x1D402, "M", "c"), + (0x1D403, "M", "d"), + (0x1D404, "M", "e"), + (0x1D405, "M", "f"), + (0x1D406, "M", "g"), + (0x1D407, "M", "h"), + (0x1D408, "M", "i"), + (0x1D409, "M", "j"), + (0x1D40A, "M", "k"), + (0x1D40B, "M", "l"), + (0x1D40C, "M", "m"), + (0x1D40D, "M", "n"), + (0x1D40E, "M", "o"), + (0x1D40F, "M", "p"), + (0x1D410, "M", "q"), + (0x1D411, "M", "r"), + (0x1D412, "M", "s"), + (0x1D413, "M", "t"), + (0x1D414, "M", "u"), + (0x1D415, "M", "v"), + (0x1D416, "M", "w"), + (0x1D417, "M", "x"), + (0x1D418, "M", "y"), + (0x1D419, "M", "z"), + (0x1D41A, "M", "a"), + (0x1D41B, "M", "b"), + (0x1D41C, "M", "c"), + (0x1D41D, "M", "d"), + (0x1D41E, "M", "e"), + (0x1D41F, "M", "f"), + (0x1D420, "M", "g"), + (0x1D421, "M", "h"), + (0x1D422, "M", "i"), + (0x1D423, "M", "j"), + (0x1D424, "M", "k"), + ] + + +def _seg_61() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1D425, "M", "l"), + (0x1D426, "M", "m"), + (0x1D427, "M", "n"), + (0x1D428, "M", "o"), + (0x1D429, "M", "p"), + (0x1D42A, "M", "q"), + (0x1D42B, "M", "r"), + (0x1D42C, "M", "s"), + (0x1D42D, "M", "t"), + (0x1D42E, "M", "u"), + (0x1D42F, "M", "v"), + (0x1D430, "M", "w"), + (0x1D431, "M", "x"), + (0x1D432, "M", "y"), + (0x1D433, "M", "z"), + (0x1D434, "M", "a"), + (0x1D435, "M", "b"), + (0x1D436, "M", "c"), + (0x1D437, "M", "d"), + (0x1D438, "M", "e"), + (0x1D439, "M", "f"), + (0x1D43A, "M", "g"), + (0x1D43B, "M", "h"), + (0x1D43C, "M", "i"), + (0x1D43D, "M", "j"), + (0x1D43E, "M", "k"), + (0x1D43F, "M", "l"), + (0x1D440, "M", "m"), + (0x1D441, "M", "n"), + (0x1D442, "M", "o"), + (0x1D443, "M", "p"), + (0x1D444, "M", "q"), + (0x1D445, "M", "r"), + (0x1D446, "M", "s"), + (0x1D447, "M", "t"), + (0x1D448, "M", "u"), + (0x1D449, "M", "v"), + (0x1D44A, "M", "w"), + (0x1D44B, "M", "x"), + (0x1D44C, "M", "y"), + (0x1D44D, "M", "z"), + (0x1D44E, "M", "a"), + (0x1D44F, "M", "b"), + (0x1D450, "M", "c"), + (0x1D451, "M", "d"), + (0x1D452, "M", "e"), + (0x1D453, "M", "f"), + (0x1D454, "M", "g"), + (0x1D455, "X"), + (0x1D456, "M", "i"), + (0x1D457, "M", "j"), + (0x1D458, "M", "k"), + (0x1D459, "M", "l"), + (0x1D45A, "M", "m"), + (0x1D45B, "M", "n"), + (0x1D45C, "M", "o"), + (0x1D45D, "M", "p"), + (0x1D45E, "M", "q"), + (0x1D45F, "M", "r"), + (0x1D460, "M", "s"), + (0x1D461, "M", "t"), + (0x1D462, "M", "u"), + (0x1D463, "M", "v"), + (0x1D464, "M", "w"), + (0x1D465, "M", "x"), + (0x1D466, "M", "y"), + (0x1D467, "M", "z"), + (0x1D468, "M", "a"), + (0x1D469, "M", "b"), + (0x1D46A, "M", "c"), + (0x1D46B, "M", "d"), + (0x1D46C, "M", "e"), + (0x1D46D, "M", "f"), + (0x1D46E, "M", "g"), + (0x1D46F, "M", "h"), + (0x1D470, "M", "i"), + (0x1D471, "M", "j"), + (0x1D472, "M", "k"), + (0x1D473, "M", "l"), + (0x1D474, "M", "m"), + (0x1D475, "M", "n"), + (0x1D476, "M", "o"), + (0x1D477, "M", "p"), + (0x1D478, "M", "q"), + (0x1D479, "M", "r"), + (0x1D47A, "M", "s"), + (0x1D47B, "M", "t"), + (0x1D47C, "M", "u"), + (0x1D47D, "M", "v"), + (0x1D47E, "M", "w"), + (0x1D47F, "M", "x"), + (0x1D480, "M", "y"), + (0x1D481, "M", "z"), + (0x1D482, "M", "a"), + (0x1D483, "M", "b"), + (0x1D484, "M", "c"), + (0x1D485, "M", "d"), + (0x1D486, "M", "e"), + (0x1D487, "M", "f"), + (0x1D488, "M", "g"), + ] + + +def _seg_62() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1D489, "M", "h"), + (0x1D48A, "M", "i"), + (0x1D48B, "M", "j"), + (0x1D48C, "M", "k"), + (0x1D48D, "M", "l"), + (0x1D48E, "M", "m"), + (0x1D48F, "M", "n"), + (0x1D490, "M", "o"), + (0x1D491, "M", "p"), + (0x1D492, "M", "q"), + (0x1D493, "M", "r"), + (0x1D494, "M", "s"), + (0x1D495, "M", "t"), + (0x1D496, "M", "u"), + (0x1D497, "M", "v"), + (0x1D498, "M", "w"), + (0x1D499, "M", "x"), + (0x1D49A, "M", "y"), + (0x1D49B, "M", "z"), + (0x1D49C, "M", "a"), + (0x1D49D, "X"), + (0x1D49E, "M", "c"), + (0x1D49F, "M", "d"), + (0x1D4A0, "X"), + (0x1D4A2, "M", "g"), + (0x1D4A3, "X"), + (0x1D4A5, "M", "j"), + (0x1D4A6, "M", "k"), + (0x1D4A7, "X"), + (0x1D4A9, "M", "n"), + (0x1D4AA, "M", "o"), + (0x1D4AB, "M", "p"), + (0x1D4AC, "M", "q"), + (0x1D4AD, "X"), + (0x1D4AE, "M", "s"), + (0x1D4AF, "M", "t"), + (0x1D4B0, "M", "u"), + (0x1D4B1, "M", "v"), + (0x1D4B2, "M", "w"), + (0x1D4B3, "M", "x"), + (0x1D4B4, "M", "y"), + (0x1D4B5, "M", "z"), + (0x1D4B6, "M", "a"), + (0x1D4B7, "M", "b"), + (0x1D4B8, "M", "c"), + (0x1D4B9, "M", "d"), + (0x1D4BA, "X"), + (0x1D4BB, "M", "f"), + (0x1D4BC, "X"), + (0x1D4BD, "M", "h"), + (0x1D4BE, "M", "i"), + (0x1D4BF, "M", "j"), + (0x1D4C0, "M", "k"), + (0x1D4C1, "M", "l"), + (0x1D4C2, "M", "m"), + (0x1D4C3, "M", "n"), + (0x1D4C4, "X"), + (0x1D4C5, "M", "p"), + (0x1D4C6, "M", "q"), + (0x1D4C7, "M", "r"), + (0x1D4C8, "M", "s"), + (0x1D4C9, "M", "t"), + (0x1D4CA, "M", "u"), + (0x1D4CB, "M", "v"), + (0x1D4CC, "M", "w"), + (0x1D4CD, "M", "x"), + (0x1D4CE, "M", "y"), + (0x1D4CF, "M", "z"), + (0x1D4D0, "M", "a"), + (0x1D4D1, "M", "b"), + (0x1D4D2, "M", "c"), + (0x1D4D3, "M", "d"), + (0x1D4D4, "M", "e"), + (0x1D4D5, "M", "f"), + (0x1D4D6, "M", "g"), + (0x1D4D7, "M", "h"), + (0x1D4D8, "M", "i"), + (0x1D4D9, "M", "j"), + (0x1D4DA, "M", "k"), + (0x1D4DB, "M", "l"), + (0x1D4DC, "M", "m"), + (0x1D4DD, "M", "n"), + (0x1D4DE, "M", "o"), + (0x1D4DF, "M", "p"), + (0x1D4E0, "M", "q"), + (0x1D4E1, "M", "r"), + (0x1D4E2, "M", "s"), + (0x1D4E3, "M", "t"), + (0x1D4E4, "M", "u"), + (0x1D4E5, "M", "v"), + (0x1D4E6, "M", "w"), + (0x1D4E7, "M", "x"), + (0x1D4E8, "M", "y"), + (0x1D4E9, "M", "z"), + (0x1D4EA, "M", "a"), + (0x1D4EB, "M", "b"), + (0x1D4EC, "M", "c"), + (0x1D4ED, "M", "d"), + (0x1D4EE, "M", "e"), + (0x1D4EF, "M", "f"), + ] + + +def _seg_63() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1D4F0, "M", "g"), + (0x1D4F1, "M", "h"), + (0x1D4F2, "M", "i"), + (0x1D4F3, "M", "j"), + (0x1D4F4, "M", "k"), + (0x1D4F5, "M", "l"), + (0x1D4F6, "M", "m"), + (0x1D4F7, "M", "n"), + (0x1D4F8, "M", "o"), + (0x1D4F9, "M", "p"), + (0x1D4FA, "M", "q"), + (0x1D4FB, "M", "r"), + (0x1D4FC, "M", "s"), + (0x1D4FD, "M", "t"), + (0x1D4FE, "M", "u"), + (0x1D4FF, "M", "v"), + (0x1D500, "M", "w"), + (0x1D501, "M", "x"), + (0x1D502, "M", "y"), + (0x1D503, "M", "z"), + (0x1D504, "M", "a"), + (0x1D505, "M", "b"), + (0x1D506, "X"), + (0x1D507, "M", "d"), + (0x1D508, "M", "e"), + (0x1D509, "M", "f"), + (0x1D50A, "M", "g"), + (0x1D50B, "X"), + (0x1D50D, "M", "j"), + (0x1D50E, "M", "k"), + (0x1D50F, "M", "l"), + (0x1D510, "M", "m"), + (0x1D511, "M", "n"), + (0x1D512, "M", "o"), + (0x1D513, "M", "p"), + (0x1D514, "M", "q"), + (0x1D515, "X"), + (0x1D516, "M", "s"), + (0x1D517, "M", "t"), + (0x1D518, "M", "u"), + (0x1D519, "M", "v"), + (0x1D51A, "M", "w"), + (0x1D51B, "M", "x"), + (0x1D51C, "M", "y"), + (0x1D51D, "X"), + (0x1D51E, "M", "a"), + (0x1D51F, "M", "b"), + (0x1D520, "M", "c"), + (0x1D521, "M", "d"), + (0x1D522, "M", "e"), + (0x1D523, "M", "f"), + (0x1D524, "M", "g"), + (0x1D525, "M", "h"), + (0x1D526, "M", "i"), + (0x1D527, "M", "j"), + (0x1D528, "M", "k"), + (0x1D529, "M", "l"), + (0x1D52A, "M", "m"), + (0x1D52B, "M", "n"), + (0x1D52C, "M", "o"), + (0x1D52D, "M", "p"), + (0x1D52E, "M", "q"), + (0x1D52F, "M", "r"), + (0x1D530, "M", "s"), + (0x1D531, "M", "t"), + (0x1D532, "M", "u"), + (0x1D533, "M", "v"), + (0x1D534, "M", "w"), + (0x1D535, "M", "x"), + (0x1D536, "M", "y"), + (0x1D537, "M", "z"), + (0x1D538, "M", "a"), + (0x1D539, "M", "b"), + (0x1D53A, "X"), + (0x1D53B, "M", "d"), + (0x1D53C, "M", "e"), + (0x1D53D, "M", "f"), + (0x1D53E, "M", "g"), + (0x1D53F, "X"), + (0x1D540, "M", "i"), + (0x1D541, "M", "j"), + (0x1D542, "M", "k"), + (0x1D543, "M", "l"), + (0x1D544, "M", "m"), + (0x1D545, "X"), + (0x1D546, "M", "o"), + (0x1D547, "X"), + (0x1D54A, "M", "s"), + (0x1D54B, "M", "t"), + (0x1D54C, "M", "u"), + (0x1D54D, "M", "v"), + (0x1D54E, "M", "w"), + (0x1D54F, "M", "x"), + (0x1D550, "M", "y"), + (0x1D551, "X"), + (0x1D552, "M", "a"), + (0x1D553, "M", "b"), + (0x1D554, "M", "c"), + (0x1D555, "M", "d"), + (0x1D556, "M", "e"), + ] + + +def _seg_64() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1D557, "M", "f"), + (0x1D558, "M", "g"), + (0x1D559, "M", "h"), + (0x1D55A, "M", "i"), + (0x1D55B, "M", "j"), + (0x1D55C, "M", "k"), + (0x1D55D, "M", "l"), + (0x1D55E, "M", "m"), + (0x1D55F, "M", "n"), + (0x1D560, "M", "o"), + (0x1D561, "M", "p"), + (0x1D562, "M", "q"), + (0x1D563, "M", "r"), + (0x1D564, "M", "s"), + (0x1D565, "M", "t"), + (0x1D566, "M", "u"), + (0x1D567, "M", "v"), + (0x1D568, "M", "w"), + (0x1D569, "M", "x"), + (0x1D56A, "M", "y"), + (0x1D56B, "M", "z"), + (0x1D56C, "M", "a"), + (0x1D56D, "M", "b"), + (0x1D56E, "M", "c"), + (0x1D56F, "M", "d"), + (0x1D570, "M", "e"), + (0x1D571, "M", "f"), + (0x1D572, "M", "g"), + (0x1D573, "M", "h"), + (0x1D574, "M", "i"), + (0x1D575, "M", "j"), + (0x1D576, "M", "k"), + (0x1D577, "M", "l"), + (0x1D578, "M", "m"), + (0x1D579, "M", "n"), + (0x1D57A, "M", "o"), + (0x1D57B, "M", "p"), + (0x1D57C, "M", "q"), + (0x1D57D, "M", "r"), + (0x1D57E, "M", "s"), + (0x1D57F, "M", "t"), + (0x1D580, "M", "u"), + (0x1D581, "M", "v"), + (0x1D582, "M", "w"), + (0x1D583, "M", "x"), + (0x1D584, "M", "y"), + (0x1D585, "M", "z"), + (0x1D586, "M", "a"), + (0x1D587, "M", "b"), + (0x1D588, "M", "c"), + (0x1D589, "M", "d"), + (0x1D58A, "M", "e"), + (0x1D58B, "M", "f"), + (0x1D58C, "M", "g"), + (0x1D58D, "M", "h"), + (0x1D58E, "M", "i"), + (0x1D58F, "M", "j"), + (0x1D590, "M", "k"), + (0x1D591, "M", "l"), + (0x1D592, "M", "m"), + (0x1D593, "M", "n"), + (0x1D594, "M", "o"), + (0x1D595, "M", "p"), + (0x1D596, "M", "q"), + (0x1D597, "M", "r"), + (0x1D598, "M", "s"), + (0x1D599, "M", "t"), + (0x1D59A, "M", "u"), + (0x1D59B, "M", "v"), + (0x1D59C, "M", "w"), + (0x1D59D, "M", "x"), + (0x1D59E, "M", "y"), + (0x1D59F, "M", "z"), + (0x1D5A0, "M", "a"), + (0x1D5A1, "M", "b"), + (0x1D5A2, "M", "c"), + (0x1D5A3, "M", "d"), + (0x1D5A4, "M", "e"), + (0x1D5A5, "M", "f"), + (0x1D5A6, "M", "g"), + (0x1D5A7, "M", "h"), + (0x1D5A8, "M", "i"), + (0x1D5A9, "M", "j"), + (0x1D5AA, "M", "k"), + (0x1D5AB, "M", "l"), + (0x1D5AC, "M", "m"), + (0x1D5AD, "M", "n"), + (0x1D5AE, "M", "o"), + (0x1D5AF, "M", "p"), + (0x1D5B0, "M", "q"), + (0x1D5B1, "M", "r"), + (0x1D5B2, "M", "s"), + (0x1D5B3, "M", "t"), + (0x1D5B4, "M", "u"), + (0x1D5B5, "M", "v"), + (0x1D5B6, "M", "w"), + (0x1D5B7, "M", "x"), + (0x1D5B8, "M", "y"), + (0x1D5B9, "M", "z"), + (0x1D5BA, "M", "a"), + ] + + +def _seg_65() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1D5BB, "M", "b"), + (0x1D5BC, "M", "c"), + (0x1D5BD, "M", "d"), + (0x1D5BE, "M", "e"), + (0x1D5BF, "M", "f"), + (0x1D5C0, "M", "g"), + (0x1D5C1, "M", "h"), + (0x1D5C2, "M", "i"), + (0x1D5C3, "M", "j"), + (0x1D5C4, "M", "k"), + (0x1D5C5, "M", "l"), + (0x1D5C6, "M", "m"), + (0x1D5C7, "M", "n"), + (0x1D5C8, "M", "o"), + (0x1D5C9, "M", "p"), + (0x1D5CA, "M", "q"), + (0x1D5CB, "M", "r"), + (0x1D5CC, "M", "s"), + (0x1D5CD, "M", "t"), + (0x1D5CE, "M", "u"), + (0x1D5CF, "M", "v"), + (0x1D5D0, "M", "w"), + (0x1D5D1, "M", "x"), + (0x1D5D2, "M", "y"), + (0x1D5D3, "M", "z"), + (0x1D5D4, "M", "a"), + (0x1D5D5, "M", "b"), + (0x1D5D6, "M", "c"), + (0x1D5D7, "M", "d"), + (0x1D5D8, "M", "e"), + (0x1D5D9, "M", "f"), + (0x1D5DA, "M", "g"), + (0x1D5DB, "M", "h"), + (0x1D5DC, "M", "i"), + (0x1D5DD, "M", "j"), + (0x1D5DE, "M", "k"), + (0x1D5DF, "M", "l"), + (0x1D5E0, "M", "m"), + (0x1D5E1, "M", "n"), + (0x1D5E2, "M", "o"), + (0x1D5E3, "M", "p"), + (0x1D5E4, "M", "q"), + (0x1D5E5, "M", "r"), + (0x1D5E6, "M", "s"), + (0x1D5E7, "M", "t"), + (0x1D5E8, "M", "u"), + (0x1D5E9, "M", "v"), + (0x1D5EA, "M", "w"), + (0x1D5EB, "M", "x"), + (0x1D5EC, "M", "y"), + (0x1D5ED, "M", "z"), + (0x1D5EE, "M", "a"), + (0x1D5EF, "M", "b"), + (0x1D5F0, "M", "c"), + (0x1D5F1, "M", "d"), + (0x1D5F2, "M", "e"), + (0x1D5F3, "M", "f"), + (0x1D5F4, "M", "g"), + (0x1D5F5, "M", "h"), + (0x1D5F6, "M", "i"), + (0x1D5F7, "M", "j"), + (0x1D5F8, "M", "k"), + (0x1D5F9, "M", "l"), + (0x1D5FA, "M", "m"), + (0x1D5FB, "M", "n"), + (0x1D5FC, "M", "o"), + (0x1D5FD, "M", "p"), + (0x1D5FE, "M", "q"), + (0x1D5FF, "M", "r"), + (0x1D600, "M", "s"), + (0x1D601, "M", "t"), + (0x1D602, "M", "u"), + (0x1D603, "M", "v"), + (0x1D604, "M", "w"), + (0x1D605, "M", "x"), + (0x1D606, "M", "y"), + (0x1D607, "M", "z"), + (0x1D608, "M", "a"), + (0x1D609, "M", "b"), + (0x1D60A, "M", "c"), + (0x1D60B, "M", "d"), + (0x1D60C, "M", "e"), + (0x1D60D, "M", "f"), + (0x1D60E, "M", "g"), + (0x1D60F, "M", "h"), + (0x1D610, "M", "i"), + (0x1D611, "M", "j"), + (0x1D612, "M", "k"), + (0x1D613, "M", "l"), + (0x1D614, "M", "m"), + (0x1D615, "M", "n"), + (0x1D616, "M", "o"), + (0x1D617, "M", "p"), + (0x1D618, "M", "q"), + (0x1D619, "M", "r"), + (0x1D61A, "M", "s"), + (0x1D61B, "M", "t"), + (0x1D61C, "M", "u"), + (0x1D61D, "M", "v"), + (0x1D61E, "M", "w"), + ] + + +def _seg_66() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1D61F, "M", "x"), + (0x1D620, "M", "y"), + (0x1D621, "M", "z"), + (0x1D622, "M", "a"), + (0x1D623, "M", "b"), + (0x1D624, "M", "c"), + (0x1D625, "M", "d"), + (0x1D626, "M", "e"), + (0x1D627, "M", "f"), + (0x1D628, "M", "g"), + (0x1D629, "M", "h"), + (0x1D62A, "M", "i"), + (0x1D62B, "M", "j"), + (0x1D62C, "M", "k"), + (0x1D62D, "M", "l"), + (0x1D62E, "M", "m"), + (0x1D62F, "M", "n"), + (0x1D630, "M", "o"), + (0x1D631, "M", "p"), + (0x1D632, "M", "q"), + (0x1D633, "M", "r"), + (0x1D634, "M", "s"), + (0x1D635, "M", "t"), + (0x1D636, "M", "u"), + (0x1D637, "M", "v"), + (0x1D638, "M", "w"), + (0x1D639, "M", "x"), + (0x1D63A, "M", "y"), + (0x1D63B, "M", "z"), + (0x1D63C, "M", "a"), + (0x1D63D, "M", "b"), + (0x1D63E, "M", "c"), + (0x1D63F, "M", "d"), + (0x1D640, "M", "e"), + (0x1D641, "M", "f"), + (0x1D642, "M", "g"), + (0x1D643, "M", "h"), + (0x1D644, "M", "i"), + (0x1D645, "M", "j"), + (0x1D646, "M", "k"), + (0x1D647, "M", "l"), + (0x1D648, "M", "m"), + (0x1D649, "M", "n"), + (0x1D64A, "M", "o"), + (0x1D64B, "M", "p"), + (0x1D64C, "M", "q"), + (0x1D64D, "M", "r"), + (0x1D64E, "M", "s"), + (0x1D64F, "M", "t"), + (0x1D650, "M", "u"), + (0x1D651, "M", "v"), + (0x1D652, "M", "w"), + (0x1D653, "M", "x"), + (0x1D654, "M", "y"), + (0x1D655, "M", "z"), + (0x1D656, "M", "a"), + (0x1D657, "M", "b"), + (0x1D658, "M", "c"), + (0x1D659, "M", "d"), + (0x1D65A, "M", "e"), + (0x1D65B, "M", "f"), + (0x1D65C, "M", "g"), + (0x1D65D, "M", "h"), + (0x1D65E, "M", "i"), + (0x1D65F, "M", "j"), + (0x1D660, "M", "k"), + (0x1D661, "M", "l"), + (0x1D662, "M", "m"), + (0x1D663, "M", "n"), + (0x1D664, "M", "o"), + (0x1D665, "M", "p"), + (0x1D666, "M", "q"), + (0x1D667, "M", "r"), + (0x1D668, "M", "s"), + (0x1D669, "M", "t"), + (0x1D66A, "M", "u"), + (0x1D66B, "M", "v"), + (0x1D66C, "M", "w"), + (0x1D66D, "M", "x"), + (0x1D66E, "M", "y"), + (0x1D66F, "M", "z"), + (0x1D670, "M", "a"), + (0x1D671, "M", "b"), + (0x1D672, "M", "c"), + (0x1D673, "M", "d"), + (0x1D674, "M", "e"), + (0x1D675, "M", "f"), + (0x1D676, "M", "g"), + (0x1D677, "M", "h"), + (0x1D678, "M", "i"), + (0x1D679, "M", "j"), + (0x1D67A, "M", "k"), + (0x1D67B, "M", "l"), + (0x1D67C, "M", "m"), + (0x1D67D, "M", "n"), + (0x1D67E, "M", "o"), + (0x1D67F, "M", "p"), + (0x1D680, "M", "q"), + (0x1D681, "M", "r"), + (0x1D682, "M", "s"), + ] + + +def _seg_67() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1D683, "M", "t"), + (0x1D684, "M", "u"), + (0x1D685, "M", "v"), + (0x1D686, "M", "w"), + (0x1D687, "M", "x"), + (0x1D688, "M", "y"), + (0x1D689, "M", "z"), + (0x1D68A, "M", "a"), + (0x1D68B, "M", "b"), + (0x1D68C, "M", "c"), + (0x1D68D, "M", "d"), + (0x1D68E, "M", "e"), + (0x1D68F, "M", "f"), + (0x1D690, "M", "g"), + (0x1D691, "M", "h"), + (0x1D692, "M", "i"), + (0x1D693, "M", "j"), + (0x1D694, "M", "k"), + (0x1D695, "M", "l"), + (0x1D696, "M", "m"), + (0x1D697, "M", "n"), + (0x1D698, "M", "o"), + (0x1D699, "M", "p"), + (0x1D69A, "M", "q"), + (0x1D69B, "M", "r"), + (0x1D69C, "M", "s"), + (0x1D69D, "M", "t"), + (0x1D69E, "M", "u"), + (0x1D69F, "M", "v"), + (0x1D6A0, "M", "w"), + (0x1D6A1, "M", "x"), + (0x1D6A2, "M", "y"), + (0x1D6A3, "M", "z"), + (0x1D6A4, "M", "ı"), + (0x1D6A5, "M", "ȷ"), + (0x1D6A6, "X"), + (0x1D6A8, "M", "α"), + (0x1D6A9, "M", "β"), + (0x1D6AA, "M", "γ"), + (0x1D6AB, "M", "δ"), + (0x1D6AC, "M", "ε"), + (0x1D6AD, "M", "ζ"), + (0x1D6AE, "M", "η"), + (0x1D6AF, "M", "θ"), + (0x1D6B0, "M", "ι"), + (0x1D6B1, "M", "κ"), + (0x1D6B2, "M", "λ"), + (0x1D6B3, "M", "μ"), + (0x1D6B4, "M", "ν"), + (0x1D6B5, "M", "ξ"), + (0x1D6B6, "M", "ο"), + (0x1D6B7, "M", "π"), + (0x1D6B8, "M", "ρ"), + (0x1D6B9, "M", "θ"), + (0x1D6BA, "M", "σ"), + (0x1D6BB, "M", "τ"), + (0x1D6BC, "M", "υ"), + (0x1D6BD, "M", "φ"), + (0x1D6BE, "M", "χ"), + (0x1D6BF, "M", "ψ"), + (0x1D6C0, "M", "ω"), + (0x1D6C1, "M", "∇"), + (0x1D6C2, "M", "α"), + (0x1D6C3, "M", "β"), + (0x1D6C4, "M", "γ"), + (0x1D6C5, "M", "δ"), + (0x1D6C6, "M", "ε"), + (0x1D6C7, "M", "ζ"), + (0x1D6C8, "M", "η"), + (0x1D6C9, "M", "θ"), + (0x1D6CA, "M", "ι"), + (0x1D6CB, "M", "κ"), + (0x1D6CC, "M", "λ"), + (0x1D6CD, "M", "μ"), + (0x1D6CE, "M", "ν"), + (0x1D6CF, "M", "ξ"), + (0x1D6D0, "M", "ο"), + (0x1D6D1, "M", "π"), + (0x1D6D2, "M", "ρ"), + (0x1D6D3, "M", "σ"), + (0x1D6D5, "M", "τ"), + (0x1D6D6, "M", "υ"), + (0x1D6D7, "M", "φ"), + (0x1D6D8, "M", "χ"), + (0x1D6D9, "M", "ψ"), + (0x1D6DA, "M", "ω"), + (0x1D6DB, "M", "∂"), + (0x1D6DC, "M", "ε"), + (0x1D6DD, "M", "θ"), + (0x1D6DE, "M", "κ"), + (0x1D6DF, "M", "φ"), + (0x1D6E0, "M", "ρ"), + (0x1D6E1, "M", "π"), + (0x1D6E2, "M", "α"), + (0x1D6E3, "M", "β"), + (0x1D6E4, "M", "γ"), + (0x1D6E5, "M", "δ"), + (0x1D6E6, "M", "ε"), + (0x1D6E7, "M", "ζ"), + (0x1D6E8, "M", "η"), + ] + + +def _seg_68() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1D6E9, "M", "θ"), + (0x1D6EA, "M", "ι"), + (0x1D6EB, "M", "κ"), + (0x1D6EC, "M", "λ"), + (0x1D6ED, "M", "μ"), + (0x1D6EE, "M", "ν"), + (0x1D6EF, "M", "ξ"), + (0x1D6F0, "M", "ο"), + (0x1D6F1, "M", "π"), + (0x1D6F2, "M", "ρ"), + (0x1D6F3, "M", "θ"), + (0x1D6F4, "M", "σ"), + (0x1D6F5, "M", "τ"), + (0x1D6F6, "M", "υ"), + (0x1D6F7, "M", "φ"), + (0x1D6F8, "M", "χ"), + (0x1D6F9, "M", "ψ"), + (0x1D6FA, "M", "ω"), + (0x1D6FB, "M", "∇"), + (0x1D6FC, "M", "α"), + (0x1D6FD, "M", "β"), + (0x1D6FE, "M", "γ"), + (0x1D6FF, "M", "δ"), + (0x1D700, "M", "ε"), + (0x1D701, "M", "ζ"), + (0x1D702, "M", "η"), + (0x1D703, "M", "θ"), + (0x1D704, "M", "ι"), + (0x1D705, "M", "κ"), + (0x1D706, "M", "λ"), + (0x1D707, "M", "μ"), + (0x1D708, "M", "ν"), + (0x1D709, "M", "ξ"), + (0x1D70A, "M", "ο"), + (0x1D70B, "M", "π"), + (0x1D70C, "M", "ρ"), + (0x1D70D, "M", "σ"), + (0x1D70F, "M", "τ"), + (0x1D710, "M", "υ"), + (0x1D711, "M", "φ"), + (0x1D712, "M", "χ"), + (0x1D713, "M", "ψ"), + (0x1D714, "M", "ω"), + (0x1D715, "M", "∂"), + (0x1D716, "M", "ε"), + (0x1D717, "M", "θ"), + (0x1D718, "M", "κ"), + (0x1D719, "M", "φ"), + (0x1D71A, "M", "ρ"), + (0x1D71B, "M", "π"), + (0x1D71C, "M", "α"), + (0x1D71D, "M", "β"), + (0x1D71E, "M", "γ"), + (0x1D71F, "M", "δ"), + (0x1D720, "M", "ε"), + (0x1D721, "M", "ζ"), + (0x1D722, "M", "η"), + (0x1D723, "M", "θ"), + (0x1D724, "M", "ι"), + (0x1D725, "M", "κ"), + (0x1D726, "M", "λ"), + (0x1D727, "M", "μ"), + (0x1D728, "M", "ν"), + (0x1D729, "M", "ξ"), + (0x1D72A, "M", "ο"), + (0x1D72B, "M", "π"), + (0x1D72C, "M", "ρ"), + (0x1D72D, "M", "θ"), + (0x1D72E, "M", "σ"), + (0x1D72F, "M", "τ"), + (0x1D730, "M", "υ"), + (0x1D731, "M", "φ"), + (0x1D732, "M", "χ"), + (0x1D733, "M", "ψ"), + (0x1D734, "M", "ω"), + (0x1D735, "M", "∇"), + (0x1D736, "M", "α"), + (0x1D737, "M", "β"), + (0x1D738, "M", "γ"), + (0x1D739, "M", "δ"), + (0x1D73A, "M", "ε"), + (0x1D73B, "M", "ζ"), + (0x1D73C, "M", "η"), + (0x1D73D, "M", "θ"), + (0x1D73E, "M", "ι"), + (0x1D73F, "M", "κ"), + (0x1D740, "M", "λ"), + (0x1D741, "M", "μ"), + (0x1D742, "M", "ν"), + (0x1D743, "M", "ξ"), + (0x1D744, "M", "ο"), + (0x1D745, "M", "π"), + (0x1D746, "M", "ρ"), + (0x1D747, "M", "σ"), + (0x1D749, "M", "τ"), + (0x1D74A, "M", "υ"), + (0x1D74B, "M", "φ"), + (0x1D74C, "M", "χ"), + (0x1D74D, "M", "ψ"), + (0x1D74E, "M", "ω"), + ] + + +def _seg_69() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1D74F, "M", "∂"), + (0x1D750, "M", "ε"), + (0x1D751, "M", "θ"), + (0x1D752, "M", "κ"), + (0x1D753, "M", "φ"), + (0x1D754, "M", "ρ"), + (0x1D755, "M", "π"), + (0x1D756, "M", "α"), + (0x1D757, "M", "β"), + (0x1D758, "M", "γ"), + (0x1D759, "M", "δ"), + (0x1D75A, "M", "ε"), + (0x1D75B, "M", "ζ"), + (0x1D75C, "M", "η"), + (0x1D75D, "M", "θ"), + (0x1D75E, "M", "ι"), + (0x1D75F, "M", "κ"), + (0x1D760, "M", "λ"), + (0x1D761, "M", "μ"), + (0x1D762, "M", "ν"), + (0x1D763, "M", "ξ"), + (0x1D764, "M", "ο"), + (0x1D765, "M", "π"), + (0x1D766, "M", "ρ"), + (0x1D767, "M", "θ"), + (0x1D768, "M", "σ"), + (0x1D769, "M", "τ"), + (0x1D76A, "M", "υ"), + (0x1D76B, "M", "φ"), + (0x1D76C, "M", "χ"), + (0x1D76D, "M", "ψ"), + (0x1D76E, "M", "ω"), + (0x1D76F, "M", "∇"), + (0x1D770, "M", "α"), + (0x1D771, "M", "β"), + (0x1D772, "M", "γ"), + (0x1D773, "M", "δ"), + (0x1D774, "M", "ε"), + (0x1D775, "M", "ζ"), + (0x1D776, "M", "η"), + (0x1D777, "M", "θ"), + (0x1D778, "M", "ι"), + (0x1D779, "M", "κ"), + (0x1D77A, "M", "λ"), + (0x1D77B, "M", "μ"), + (0x1D77C, "M", "ν"), + (0x1D77D, "M", "ξ"), + (0x1D77E, "M", "ο"), + (0x1D77F, "M", "π"), + (0x1D780, "M", "ρ"), + (0x1D781, "M", "σ"), + (0x1D783, "M", "τ"), + (0x1D784, "M", "υ"), + (0x1D785, "M", "φ"), + (0x1D786, "M", "χ"), + (0x1D787, "M", "ψ"), + (0x1D788, "M", "ω"), + (0x1D789, "M", "∂"), + (0x1D78A, "M", "ε"), + (0x1D78B, "M", "θ"), + (0x1D78C, "M", "κ"), + (0x1D78D, "M", "φ"), + (0x1D78E, "M", "ρ"), + (0x1D78F, "M", "π"), + (0x1D790, "M", "α"), + (0x1D791, "M", "β"), + (0x1D792, "M", "γ"), + (0x1D793, "M", "δ"), + (0x1D794, "M", "ε"), + (0x1D795, "M", "ζ"), + (0x1D796, "M", "η"), + (0x1D797, "M", "θ"), + (0x1D798, "M", "ι"), + (0x1D799, "M", "κ"), + (0x1D79A, "M", "λ"), + (0x1D79B, "M", "μ"), + (0x1D79C, "M", "ν"), + (0x1D79D, "M", "ξ"), + (0x1D79E, "M", "ο"), + (0x1D79F, "M", "π"), + (0x1D7A0, "M", "ρ"), + (0x1D7A1, "M", "θ"), + (0x1D7A2, "M", "σ"), + (0x1D7A3, "M", "τ"), + (0x1D7A4, "M", "υ"), + (0x1D7A5, "M", "φ"), + (0x1D7A6, "M", "χ"), + (0x1D7A7, "M", "ψ"), + (0x1D7A8, "M", "ω"), + (0x1D7A9, "M", "∇"), + (0x1D7AA, "M", "α"), + (0x1D7AB, "M", "β"), + (0x1D7AC, "M", "γ"), + (0x1D7AD, "M", "δ"), + (0x1D7AE, "M", "ε"), + (0x1D7AF, "M", "ζ"), + (0x1D7B0, "M", "η"), + (0x1D7B1, "M", "θ"), + (0x1D7B2, "M", "ι"), + (0x1D7B3, "M", "κ"), + ] + + +def _seg_70() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1D7B4, "M", "λ"), + (0x1D7B5, "M", "μ"), + (0x1D7B6, "M", "ν"), + (0x1D7B7, "M", "ξ"), + (0x1D7B8, "M", "ο"), + (0x1D7B9, "M", "π"), + (0x1D7BA, "M", "ρ"), + (0x1D7BB, "M", "σ"), + (0x1D7BD, "M", "τ"), + (0x1D7BE, "M", "υ"), + (0x1D7BF, "M", "φ"), + (0x1D7C0, "M", "χ"), + (0x1D7C1, "M", "ψ"), + (0x1D7C2, "M", "ω"), + (0x1D7C3, "M", "∂"), + (0x1D7C4, "M", "ε"), + (0x1D7C5, "M", "θ"), + (0x1D7C6, "M", "κ"), + (0x1D7C7, "M", "φ"), + (0x1D7C8, "M", "ρ"), + (0x1D7C9, "M", "π"), + (0x1D7CA, "M", "ϝ"), + (0x1D7CC, "X"), + (0x1D7CE, "M", "0"), + (0x1D7CF, "M", "1"), + (0x1D7D0, "M", "2"), + (0x1D7D1, "M", "3"), + (0x1D7D2, "M", "4"), + (0x1D7D3, "M", "5"), + (0x1D7D4, "M", "6"), + (0x1D7D5, "M", "7"), + (0x1D7D6, "M", "8"), + (0x1D7D7, "M", "9"), + (0x1D7D8, "M", "0"), + (0x1D7D9, "M", "1"), + (0x1D7DA, "M", "2"), + (0x1D7DB, "M", "3"), + (0x1D7DC, "M", "4"), + (0x1D7DD, "M", "5"), + (0x1D7DE, "M", "6"), + (0x1D7DF, "M", "7"), + (0x1D7E0, "M", "8"), + (0x1D7E1, "M", "9"), + (0x1D7E2, "M", "0"), + (0x1D7E3, "M", "1"), + (0x1D7E4, "M", "2"), + (0x1D7E5, "M", "3"), + (0x1D7E6, "M", "4"), + (0x1D7E7, "M", "5"), + (0x1D7E8, "M", "6"), + (0x1D7E9, "M", "7"), + (0x1D7EA, "M", "8"), + (0x1D7EB, "M", "9"), + (0x1D7EC, "M", "0"), + (0x1D7ED, "M", "1"), + (0x1D7EE, "M", "2"), + (0x1D7EF, "M", "3"), + (0x1D7F0, "M", "4"), + (0x1D7F1, "M", "5"), + (0x1D7F2, "M", "6"), + (0x1D7F3, "M", "7"), + (0x1D7F4, "M", "8"), + (0x1D7F5, "M", "9"), + (0x1D7F6, "M", "0"), + (0x1D7F7, "M", "1"), + (0x1D7F8, "M", "2"), + (0x1D7F9, "M", "3"), + (0x1D7FA, "M", "4"), + (0x1D7FB, "M", "5"), + (0x1D7FC, "M", "6"), + (0x1D7FD, "M", "7"), + (0x1D7FE, "M", "8"), + (0x1D7FF, "M", "9"), + (0x1D800, "V"), + (0x1DA8C, "X"), + (0x1DA9B, "V"), + (0x1DAA0, "X"), + (0x1DAA1, "V"), + (0x1DAB0, "X"), + (0x1DF00, "V"), + (0x1DF1F, "X"), + (0x1DF25, "V"), + (0x1DF2B, "X"), + (0x1E000, "V"), + (0x1E007, "X"), + (0x1E008, "V"), + (0x1E019, "X"), + (0x1E01B, "V"), + (0x1E022, "X"), + (0x1E023, "V"), + (0x1E025, "X"), + (0x1E026, "V"), + (0x1E02B, "X"), + (0x1E030, "M", "а"), + (0x1E031, "M", "б"), + (0x1E032, "M", "в"), + (0x1E033, "M", "г"), + (0x1E034, "M", "д"), + (0x1E035, "M", "е"), + (0x1E036, "M", "ж"), + ] + + +def _seg_71() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1E037, "M", "з"), + (0x1E038, "M", "и"), + (0x1E039, "M", "к"), + (0x1E03A, "M", "л"), + (0x1E03B, "M", "м"), + (0x1E03C, "M", "о"), + (0x1E03D, "M", "п"), + (0x1E03E, "M", "р"), + (0x1E03F, "M", "с"), + (0x1E040, "M", "т"), + (0x1E041, "M", "у"), + (0x1E042, "M", "ф"), + (0x1E043, "M", "х"), + (0x1E044, "M", "ц"), + (0x1E045, "M", "ч"), + (0x1E046, "M", "ш"), + (0x1E047, "M", "ы"), + (0x1E048, "M", "э"), + (0x1E049, "M", "ю"), + (0x1E04A, "M", "ꚉ"), + (0x1E04B, "M", "ә"), + (0x1E04C, "M", "і"), + (0x1E04D, "M", "ј"), + (0x1E04E, "M", "ө"), + (0x1E04F, "M", "ү"), + (0x1E050, "M", "ӏ"), + (0x1E051, "M", "а"), + (0x1E052, "M", "б"), + (0x1E053, "M", "в"), + (0x1E054, "M", "г"), + (0x1E055, "M", "д"), + (0x1E056, "M", "е"), + (0x1E057, "M", "ж"), + (0x1E058, "M", "з"), + (0x1E059, "M", "и"), + (0x1E05A, "M", "к"), + (0x1E05B, "M", "л"), + (0x1E05C, "M", "о"), + (0x1E05D, "M", "п"), + (0x1E05E, "M", "с"), + (0x1E05F, "M", "у"), + (0x1E060, "M", "ф"), + (0x1E061, "M", "х"), + (0x1E062, "M", "ц"), + (0x1E063, "M", "ч"), + (0x1E064, "M", "ш"), + (0x1E065, "M", "ъ"), + (0x1E066, "M", "ы"), + (0x1E067, "M", "ґ"), + (0x1E068, "M", "і"), + (0x1E069, "M", "ѕ"), + (0x1E06A, "M", "џ"), + (0x1E06B, "M", "ҫ"), + (0x1E06C, "M", "ꙑ"), + (0x1E06D, "M", "ұ"), + (0x1E06E, "X"), + (0x1E08F, "V"), + (0x1E090, "X"), + (0x1E100, "V"), + (0x1E12D, "X"), + (0x1E130, "V"), + (0x1E13E, "X"), + (0x1E140, "V"), + (0x1E14A, "X"), + (0x1E14E, "V"), + (0x1E150, "X"), + (0x1E290, "V"), + (0x1E2AF, "X"), + (0x1E2C0, "V"), + (0x1E2FA, "X"), + (0x1E2FF, "V"), + (0x1E300, "X"), + (0x1E4D0, "V"), + (0x1E4FA, "X"), + (0x1E7E0, "V"), + (0x1E7E7, "X"), + (0x1E7E8, "V"), + (0x1E7EC, "X"), + (0x1E7ED, "V"), + (0x1E7EF, "X"), + (0x1E7F0, "V"), + (0x1E7FF, "X"), + (0x1E800, "V"), + (0x1E8C5, "X"), + (0x1E8C7, "V"), + (0x1E8D7, "X"), + (0x1E900, "M", "𞤢"), + (0x1E901, "M", "𞤣"), + (0x1E902, "M", "𞤤"), + (0x1E903, "M", "𞤥"), + (0x1E904, "M", "𞤦"), + (0x1E905, "M", "𞤧"), + (0x1E906, "M", "𞤨"), + (0x1E907, "M", "𞤩"), + (0x1E908, "M", "𞤪"), + (0x1E909, "M", "𞤫"), + (0x1E90A, "M", "𞤬"), + (0x1E90B, "M", "𞤭"), + (0x1E90C, "M", "𞤮"), + (0x1E90D, "M", "𞤯"), + ] + + +def _seg_72() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1E90E, "M", "𞤰"), + (0x1E90F, "M", "𞤱"), + (0x1E910, "M", "𞤲"), + (0x1E911, "M", "𞤳"), + (0x1E912, "M", "𞤴"), + (0x1E913, "M", "𞤵"), + (0x1E914, "M", "𞤶"), + (0x1E915, "M", "𞤷"), + (0x1E916, "M", "𞤸"), + (0x1E917, "M", "𞤹"), + (0x1E918, "M", "𞤺"), + (0x1E919, "M", "𞤻"), + (0x1E91A, "M", "𞤼"), + (0x1E91B, "M", "𞤽"), + (0x1E91C, "M", "𞤾"), + (0x1E91D, "M", "𞤿"), + (0x1E91E, "M", "𞥀"), + (0x1E91F, "M", "𞥁"), + (0x1E920, "M", "𞥂"), + (0x1E921, "M", "𞥃"), + (0x1E922, "V"), + (0x1E94C, "X"), + (0x1E950, "V"), + (0x1E95A, "X"), + (0x1E95E, "V"), + (0x1E960, "X"), + (0x1EC71, "V"), + (0x1ECB5, "X"), + (0x1ED01, "V"), + (0x1ED3E, "X"), + (0x1EE00, "M", "ا"), + (0x1EE01, "M", "ب"), + (0x1EE02, "M", "ج"), + (0x1EE03, "M", "د"), + (0x1EE04, "X"), + (0x1EE05, "M", "و"), + (0x1EE06, "M", "ز"), + (0x1EE07, "M", "ح"), + (0x1EE08, "M", "ط"), + (0x1EE09, "M", "ي"), + (0x1EE0A, "M", "ك"), + (0x1EE0B, "M", "ل"), + (0x1EE0C, "M", "م"), + (0x1EE0D, "M", "ن"), + (0x1EE0E, "M", "س"), + (0x1EE0F, "M", "ع"), + (0x1EE10, "M", "ف"), + (0x1EE11, "M", "ص"), + (0x1EE12, "M", "ق"), + (0x1EE13, "M", "ر"), + (0x1EE14, "M", "ش"), + (0x1EE15, "M", "ت"), + (0x1EE16, "M", "ث"), + (0x1EE17, "M", "خ"), + (0x1EE18, "M", "ذ"), + (0x1EE19, "M", "ض"), + (0x1EE1A, "M", "ظ"), + (0x1EE1B, "M", "غ"), + (0x1EE1C, "M", "ٮ"), + (0x1EE1D, "M", "ں"), + (0x1EE1E, "M", "ڡ"), + (0x1EE1F, "M", "ٯ"), + (0x1EE20, "X"), + (0x1EE21, "M", "ب"), + (0x1EE22, "M", "ج"), + (0x1EE23, "X"), + (0x1EE24, "M", "ه"), + (0x1EE25, "X"), + (0x1EE27, "M", "ح"), + (0x1EE28, "X"), + (0x1EE29, "M", "ي"), + (0x1EE2A, "M", "ك"), + (0x1EE2B, "M", "ل"), + (0x1EE2C, "M", "م"), + (0x1EE2D, "M", "ن"), + (0x1EE2E, "M", "س"), + (0x1EE2F, "M", "ع"), + (0x1EE30, "M", "ف"), + (0x1EE31, "M", "ص"), + (0x1EE32, "M", "ق"), + (0x1EE33, "X"), + (0x1EE34, "M", "ش"), + (0x1EE35, "M", "ت"), + (0x1EE36, "M", "ث"), + (0x1EE37, "M", "خ"), + (0x1EE38, "X"), + (0x1EE39, "M", "ض"), + (0x1EE3A, "X"), + (0x1EE3B, "M", "غ"), + (0x1EE3C, "X"), + (0x1EE42, "M", "ج"), + (0x1EE43, "X"), + (0x1EE47, "M", "ح"), + (0x1EE48, "X"), + (0x1EE49, "M", "ي"), + (0x1EE4A, "X"), + (0x1EE4B, "M", "ل"), + (0x1EE4C, "X"), + (0x1EE4D, "M", "ن"), + (0x1EE4E, "M", "س"), + ] + + +def _seg_73() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1EE4F, "M", "ع"), + (0x1EE50, "X"), + (0x1EE51, "M", "ص"), + (0x1EE52, "M", "ق"), + (0x1EE53, "X"), + (0x1EE54, "M", "ش"), + (0x1EE55, "X"), + (0x1EE57, "M", "خ"), + (0x1EE58, "X"), + (0x1EE59, "M", "ض"), + (0x1EE5A, "X"), + (0x1EE5B, "M", "غ"), + (0x1EE5C, "X"), + (0x1EE5D, "M", "ں"), + (0x1EE5E, "X"), + (0x1EE5F, "M", "ٯ"), + (0x1EE60, "X"), + (0x1EE61, "M", "ب"), + (0x1EE62, "M", "ج"), + (0x1EE63, "X"), + (0x1EE64, "M", "ه"), + (0x1EE65, "X"), + (0x1EE67, "M", "ح"), + (0x1EE68, "M", "ط"), + (0x1EE69, "M", "ي"), + (0x1EE6A, "M", "ك"), + (0x1EE6B, "X"), + (0x1EE6C, "M", "م"), + (0x1EE6D, "M", "ن"), + (0x1EE6E, "M", "س"), + (0x1EE6F, "M", "ع"), + (0x1EE70, "M", "ف"), + (0x1EE71, "M", "ص"), + (0x1EE72, "M", "ق"), + (0x1EE73, "X"), + (0x1EE74, "M", "ش"), + (0x1EE75, "M", "ت"), + (0x1EE76, "M", "ث"), + (0x1EE77, "M", "خ"), + (0x1EE78, "X"), + (0x1EE79, "M", "ض"), + (0x1EE7A, "M", "ظ"), + (0x1EE7B, "M", "غ"), + (0x1EE7C, "M", "ٮ"), + (0x1EE7D, "X"), + (0x1EE7E, "M", "ڡ"), + (0x1EE7F, "X"), + (0x1EE80, "M", "ا"), + (0x1EE81, "M", "ب"), + (0x1EE82, "M", "ج"), + (0x1EE83, "M", "د"), + (0x1EE84, "M", "ه"), + (0x1EE85, "M", "و"), + (0x1EE86, "M", "ز"), + (0x1EE87, "M", "ح"), + (0x1EE88, "M", "ط"), + (0x1EE89, "M", "ي"), + (0x1EE8A, "X"), + (0x1EE8B, "M", "ل"), + (0x1EE8C, "M", "م"), + (0x1EE8D, "M", "ن"), + (0x1EE8E, "M", "س"), + (0x1EE8F, "M", "ع"), + (0x1EE90, "M", "ف"), + (0x1EE91, "M", "ص"), + (0x1EE92, "M", "ق"), + (0x1EE93, "M", "ر"), + (0x1EE94, "M", "ش"), + (0x1EE95, "M", "ت"), + (0x1EE96, "M", "ث"), + (0x1EE97, "M", "خ"), + (0x1EE98, "M", "ذ"), + (0x1EE99, "M", "ض"), + (0x1EE9A, "M", "ظ"), + (0x1EE9B, "M", "غ"), + (0x1EE9C, "X"), + (0x1EEA1, "M", "ب"), + (0x1EEA2, "M", "ج"), + (0x1EEA3, "M", "د"), + (0x1EEA4, "X"), + (0x1EEA5, "M", "و"), + (0x1EEA6, "M", "ز"), + (0x1EEA7, "M", "ح"), + (0x1EEA8, "M", "ط"), + (0x1EEA9, "M", "ي"), + (0x1EEAA, "X"), + (0x1EEAB, "M", "ل"), + (0x1EEAC, "M", "م"), + (0x1EEAD, "M", "ن"), + (0x1EEAE, "M", "س"), + (0x1EEAF, "M", "ع"), + (0x1EEB0, "M", "ف"), + (0x1EEB1, "M", "ص"), + (0x1EEB2, "M", "ق"), + (0x1EEB3, "M", "ر"), + (0x1EEB4, "M", "ش"), + (0x1EEB5, "M", "ت"), + (0x1EEB6, "M", "ث"), + (0x1EEB7, "M", "خ"), + (0x1EEB8, "M", "ذ"), + ] + + +def _seg_74() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1EEB9, "M", "ض"), + (0x1EEBA, "M", "ظ"), + (0x1EEBB, "M", "غ"), + (0x1EEBC, "X"), + (0x1EEF0, "V"), + (0x1EEF2, "X"), + (0x1F000, "V"), + (0x1F02C, "X"), + (0x1F030, "V"), + (0x1F094, "X"), + (0x1F0A0, "V"), + (0x1F0AF, "X"), + (0x1F0B1, "V"), + (0x1F0C0, "X"), + (0x1F0C1, "V"), + (0x1F0D0, "X"), + (0x1F0D1, "V"), + (0x1F0F6, "X"), + (0x1F101, "3", "0,"), + (0x1F102, "3", "1,"), + (0x1F103, "3", "2,"), + (0x1F104, "3", "3,"), + (0x1F105, "3", "4,"), + (0x1F106, "3", "5,"), + (0x1F107, "3", "6,"), + (0x1F108, "3", "7,"), + (0x1F109, "3", "8,"), + (0x1F10A, "3", "9,"), + (0x1F10B, "V"), + (0x1F110, "3", "(a)"), + (0x1F111, "3", "(b)"), + (0x1F112, "3", "(c)"), + (0x1F113, "3", "(d)"), + (0x1F114, "3", "(e)"), + (0x1F115, "3", "(f)"), + (0x1F116, "3", "(g)"), + (0x1F117, "3", "(h)"), + (0x1F118, "3", "(i)"), + (0x1F119, "3", "(j)"), + (0x1F11A, "3", "(k)"), + (0x1F11B, "3", "(l)"), + (0x1F11C, "3", "(m)"), + (0x1F11D, "3", "(n)"), + (0x1F11E, "3", "(o)"), + (0x1F11F, "3", "(p)"), + (0x1F120, "3", "(q)"), + (0x1F121, "3", "(r)"), + (0x1F122, "3", "(s)"), + (0x1F123, "3", "(t)"), + (0x1F124, "3", "(u)"), + (0x1F125, "3", "(v)"), + (0x1F126, "3", "(w)"), + (0x1F127, "3", "(x)"), + (0x1F128, "3", "(y)"), + (0x1F129, "3", "(z)"), + (0x1F12A, "M", "〔s〕"), + (0x1F12B, "M", "c"), + (0x1F12C, "M", "r"), + (0x1F12D, "M", "cd"), + (0x1F12E, "M", "wz"), + (0x1F12F, "V"), + (0x1F130, "M", "a"), + (0x1F131, "M", "b"), + (0x1F132, "M", "c"), + (0x1F133, "M", "d"), + (0x1F134, "M", "e"), + (0x1F135, "M", "f"), + (0x1F136, "M", "g"), + (0x1F137, "M", "h"), + (0x1F138, "M", "i"), + (0x1F139, "M", "j"), + (0x1F13A, "M", "k"), + (0x1F13B, "M", "l"), + (0x1F13C, "M", "m"), + (0x1F13D, "M", "n"), + (0x1F13E, "M", "o"), + (0x1F13F, "M", "p"), + (0x1F140, "M", "q"), + (0x1F141, "M", "r"), + (0x1F142, "M", "s"), + (0x1F143, "M", "t"), + (0x1F144, "M", "u"), + (0x1F145, "M", "v"), + (0x1F146, "M", "w"), + (0x1F147, "M", "x"), + (0x1F148, "M", "y"), + (0x1F149, "M", "z"), + (0x1F14A, "M", "hv"), + (0x1F14B, "M", "mv"), + (0x1F14C, "M", "sd"), + (0x1F14D, "M", "ss"), + (0x1F14E, "M", "ppv"), + (0x1F14F, "M", "wc"), + (0x1F150, "V"), + (0x1F16A, "M", "mc"), + (0x1F16B, "M", "md"), + (0x1F16C, "M", "mr"), + (0x1F16D, "V"), + (0x1F190, "M", "dj"), + (0x1F191, "V"), + ] + + +def _seg_75() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1F1AE, "X"), + (0x1F1E6, "V"), + (0x1F200, "M", "ほか"), + (0x1F201, "M", "ココ"), + (0x1F202, "M", "サ"), + (0x1F203, "X"), + (0x1F210, "M", "手"), + (0x1F211, "M", "字"), + (0x1F212, "M", "双"), + (0x1F213, "M", "デ"), + (0x1F214, "M", "二"), + (0x1F215, "M", "多"), + (0x1F216, "M", "解"), + (0x1F217, "M", "天"), + (0x1F218, "M", "交"), + (0x1F219, "M", "映"), + (0x1F21A, "M", "無"), + (0x1F21B, "M", "料"), + (0x1F21C, "M", "前"), + (0x1F21D, "M", "後"), + (0x1F21E, "M", "再"), + (0x1F21F, "M", "新"), + (0x1F220, "M", "初"), + (0x1F221, "M", "終"), + (0x1F222, "M", "生"), + (0x1F223, "M", "販"), + (0x1F224, "M", "声"), + (0x1F225, "M", "吹"), + (0x1F226, "M", "演"), + (0x1F227, "M", "投"), + (0x1F228, "M", "捕"), + (0x1F229, "M", "一"), + (0x1F22A, "M", "三"), + (0x1F22B, "M", "遊"), + (0x1F22C, "M", "左"), + (0x1F22D, "M", "中"), + (0x1F22E, "M", "右"), + (0x1F22F, "M", "指"), + (0x1F230, "M", "走"), + (0x1F231, "M", "打"), + (0x1F232, "M", "禁"), + (0x1F233, "M", "空"), + (0x1F234, "M", "合"), + (0x1F235, "M", "満"), + (0x1F236, "M", "有"), + (0x1F237, "M", "月"), + (0x1F238, "M", "申"), + (0x1F239, "M", "割"), + (0x1F23A, "M", "営"), + (0x1F23B, "M", "配"), + (0x1F23C, "X"), + (0x1F240, "M", "〔本〕"), + (0x1F241, "M", "〔三〕"), + (0x1F242, "M", "〔二〕"), + (0x1F243, "M", "〔安〕"), + (0x1F244, "M", "〔点〕"), + (0x1F245, "M", "〔打〕"), + (0x1F246, "M", "〔盗〕"), + (0x1F247, "M", "〔勝〕"), + (0x1F248, "M", "〔敗〕"), + (0x1F249, "X"), + (0x1F250, "M", "得"), + (0x1F251, "M", "可"), + (0x1F252, "X"), + (0x1F260, "V"), + (0x1F266, "X"), + (0x1F300, "V"), + (0x1F6D8, "X"), + (0x1F6DC, "V"), + (0x1F6ED, "X"), + (0x1F6F0, "V"), + (0x1F6FD, "X"), + (0x1F700, "V"), + (0x1F777, "X"), + (0x1F77B, "V"), + (0x1F7DA, "X"), + (0x1F7E0, "V"), + (0x1F7EC, "X"), + (0x1F7F0, "V"), + (0x1F7F1, "X"), + (0x1F800, "V"), + (0x1F80C, "X"), + (0x1F810, "V"), + (0x1F848, "X"), + (0x1F850, "V"), + (0x1F85A, "X"), + (0x1F860, "V"), + (0x1F888, "X"), + (0x1F890, "V"), + (0x1F8AE, "X"), + (0x1F8B0, "V"), + (0x1F8B2, "X"), + (0x1F900, "V"), + (0x1FA54, "X"), + (0x1FA60, "V"), + (0x1FA6E, "X"), + (0x1FA70, "V"), + (0x1FA7D, "X"), + (0x1FA80, "V"), + (0x1FA89, "X"), + ] + + +def _seg_76() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1FA90, "V"), + (0x1FABE, "X"), + (0x1FABF, "V"), + (0x1FAC6, "X"), + (0x1FACE, "V"), + (0x1FADC, "X"), + (0x1FAE0, "V"), + (0x1FAE9, "X"), + (0x1FAF0, "V"), + (0x1FAF9, "X"), + (0x1FB00, "V"), + (0x1FB93, "X"), + (0x1FB94, "V"), + (0x1FBCB, "X"), + (0x1FBF0, "M", "0"), + (0x1FBF1, "M", "1"), + (0x1FBF2, "M", "2"), + (0x1FBF3, "M", "3"), + (0x1FBF4, "M", "4"), + (0x1FBF5, "M", "5"), + (0x1FBF6, "M", "6"), + (0x1FBF7, "M", "7"), + (0x1FBF8, "M", "8"), + (0x1FBF9, "M", "9"), + (0x1FBFA, "X"), + (0x20000, "V"), + (0x2A6E0, "X"), + (0x2A700, "V"), + (0x2B73A, "X"), + (0x2B740, "V"), + (0x2B81E, "X"), + (0x2B820, "V"), + (0x2CEA2, "X"), + (0x2CEB0, "V"), + (0x2EBE1, "X"), + (0x2EBF0, "V"), + (0x2EE5E, "X"), + (0x2F800, "M", "丽"), + (0x2F801, "M", "丸"), + (0x2F802, "M", "乁"), + (0x2F803, "M", "𠄢"), + (0x2F804, "M", "你"), + (0x2F805, "M", "侮"), + (0x2F806, "M", "侻"), + (0x2F807, "M", "倂"), + (0x2F808, "M", "偺"), + (0x2F809, "M", "備"), + (0x2F80A, "M", "僧"), + (0x2F80B, "M", "像"), + (0x2F80C, "M", "㒞"), + (0x2F80D, "M", "𠘺"), + (0x2F80E, "M", "免"), + (0x2F80F, "M", "兔"), + (0x2F810, "M", "兤"), + (0x2F811, "M", "具"), + (0x2F812, "M", "𠔜"), + (0x2F813, "M", "㒹"), + (0x2F814, "M", "內"), + (0x2F815, "M", "再"), + (0x2F816, "M", "𠕋"), + (0x2F817, "M", "冗"), + (0x2F818, "M", "冤"), + (0x2F819, "M", "仌"), + (0x2F81A, "M", "冬"), + (0x2F81B, "M", "况"), + (0x2F81C, "M", "𩇟"), + (0x2F81D, "M", "凵"), + (0x2F81E, "M", "刃"), + (0x2F81F, "M", "㓟"), + (0x2F820, "M", "刻"), + (0x2F821, "M", "剆"), + (0x2F822, "M", "割"), + (0x2F823, "M", "剷"), + (0x2F824, "M", "㔕"), + (0x2F825, "M", "勇"), + (0x2F826, "M", "勉"), + (0x2F827, "M", "勤"), + (0x2F828, "M", "勺"), + (0x2F829, "M", "包"), + (0x2F82A, "M", "匆"), + (0x2F82B, "M", "北"), + (0x2F82C, "M", "卉"), + (0x2F82D, "M", "卑"), + (0x2F82E, "M", "博"), + (0x2F82F, "M", "即"), + (0x2F830, "M", "卽"), + (0x2F831, "M", "卿"), + (0x2F834, "M", "𠨬"), + (0x2F835, "M", "灰"), + (0x2F836, "M", "及"), + (0x2F837, "M", "叟"), + (0x2F838, "M", "𠭣"), + (0x2F839, "M", "叫"), + (0x2F83A, "M", "叱"), + (0x2F83B, "M", "吆"), + (0x2F83C, "M", "咞"), + (0x2F83D, "M", "吸"), + (0x2F83E, "M", "呈"), + (0x2F83F, "M", "周"), + (0x2F840, "M", "咢"), + ] + + +def _seg_77() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x2F841, "M", "哶"), + (0x2F842, "M", "唐"), + (0x2F843, "M", "啓"), + (0x2F844, "M", "啣"), + (0x2F845, "M", "善"), + (0x2F847, "M", "喙"), + (0x2F848, "M", "喫"), + (0x2F849, "M", "喳"), + (0x2F84A, "M", "嗂"), + (0x2F84B, "M", "圖"), + (0x2F84C, "M", "嘆"), + (0x2F84D, "M", "圗"), + (0x2F84E, "M", "噑"), + (0x2F84F, "M", "噴"), + (0x2F850, "M", "切"), + (0x2F851, "M", "壮"), + (0x2F852, "M", "城"), + (0x2F853, "M", "埴"), + (0x2F854, "M", "堍"), + (0x2F855, "M", "型"), + (0x2F856, "M", "堲"), + (0x2F857, "M", "報"), + (0x2F858, "M", "墬"), + (0x2F859, "M", "𡓤"), + (0x2F85A, "M", "売"), + (0x2F85B, "M", "壷"), + (0x2F85C, "M", "夆"), + (0x2F85D, "M", "多"), + (0x2F85E, "M", "夢"), + (0x2F85F, "M", "奢"), + (0x2F860, "M", "𡚨"), + (0x2F861, "M", "𡛪"), + (0x2F862, "M", "姬"), + (0x2F863, "M", "娛"), + (0x2F864, "M", "娧"), + (0x2F865, "M", "姘"), + (0x2F866, "M", "婦"), + (0x2F867, "M", "㛮"), + (0x2F868, "X"), + (0x2F869, "M", "嬈"), + (0x2F86A, "M", "嬾"), + (0x2F86C, "M", "𡧈"), + (0x2F86D, "M", "寃"), + (0x2F86E, "M", "寘"), + (0x2F86F, "M", "寧"), + (0x2F870, "M", "寳"), + (0x2F871, "M", "𡬘"), + (0x2F872, "M", "寿"), + (0x2F873, "M", "将"), + (0x2F874, "X"), + (0x2F875, "M", "尢"), + (0x2F876, "M", "㞁"), + (0x2F877, "M", "屠"), + (0x2F878, "M", "屮"), + (0x2F879, "M", "峀"), + (0x2F87A, "M", "岍"), + (0x2F87B, "M", "𡷤"), + (0x2F87C, "M", "嵃"), + (0x2F87D, "M", "𡷦"), + (0x2F87E, "M", "嵮"), + (0x2F87F, "M", "嵫"), + (0x2F880, "M", "嵼"), + (0x2F881, "M", "巡"), + (0x2F882, "M", "巢"), + (0x2F883, "M", "㠯"), + (0x2F884, "M", "巽"), + (0x2F885, "M", "帨"), + (0x2F886, "M", "帽"), + (0x2F887, "M", "幩"), + (0x2F888, "M", "㡢"), + (0x2F889, "M", "𢆃"), + (0x2F88A, "M", "㡼"), + (0x2F88B, "M", "庰"), + (0x2F88C, "M", "庳"), + (0x2F88D, "M", "庶"), + (0x2F88E, "M", "廊"), + (0x2F88F, "M", "𪎒"), + (0x2F890, "M", "廾"), + (0x2F891, "M", "𢌱"), + (0x2F893, "M", "舁"), + (0x2F894, "M", "弢"), + (0x2F896, "M", "㣇"), + (0x2F897, "M", "𣊸"), + (0x2F898, "M", "𦇚"), + (0x2F899, "M", "形"), + (0x2F89A, "M", "彫"), + (0x2F89B, "M", "㣣"), + (0x2F89C, "M", "徚"), + (0x2F89D, "M", "忍"), + (0x2F89E, "M", "志"), + (0x2F89F, "M", "忹"), + (0x2F8A0, "M", "悁"), + (0x2F8A1, "M", "㤺"), + (0x2F8A2, "M", "㤜"), + (0x2F8A3, "M", "悔"), + (0x2F8A4, "M", "𢛔"), + (0x2F8A5, "M", "惇"), + (0x2F8A6, "M", "慈"), + (0x2F8A7, "M", "慌"), + (0x2F8A8, "M", "慎"), + ] + + +def _seg_78() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x2F8A9, "M", "慌"), + (0x2F8AA, "M", "慺"), + (0x2F8AB, "M", "憎"), + (0x2F8AC, "M", "憲"), + (0x2F8AD, "M", "憤"), + (0x2F8AE, "M", "憯"), + (0x2F8AF, "M", "懞"), + (0x2F8B0, "M", "懲"), + (0x2F8B1, "M", "懶"), + (0x2F8B2, "M", "成"), + (0x2F8B3, "M", "戛"), + (0x2F8B4, "M", "扝"), + (0x2F8B5, "M", "抱"), + (0x2F8B6, "M", "拔"), + (0x2F8B7, "M", "捐"), + (0x2F8B8, "M", "𢬌"), + (0x2F8B9, "M", "挽"), + (0x2F8BA, "M", "拼"), + (0x2F8BB, "M", "捨"), + (0x2F8BC, "M", "掃"), + (0x2F8BD, "M", "揤"), + (0x2F8BE, "M", "𢯱"), + (0x2F8BF, "M", "搢"), + (0x2F8C0, "M", "揅"), + (0x2F8C1, "M", "掩"), + (0x2F8C2, "M", "㨮"), + (0x2F8C3, "M", "摩"), + (0x2F8C4, "M", "摾"), + (0x2F8C5, "M", "撝"), + (0x2F8C6, "M", "摷"), + (0x2F8C7, "M", "㩬"), + (0x2F8C8, "M", "敏"), + (0x2F8C9, "M", "敬"), + (0x2F8CA, "M", "𣀊"), + (0x2F8CB, "M", "旣"), + (0x2F8CC, "M", "書"), + (0x2F8CD, "M", "晉"), + (0x2F8CE, "M", "㬙"), + (0x2F8CF, "M", "暑"), + (0x2F8D0, "M", "㬈"), + (0x2F8D1, "M", "㫤"), + (0x2F8D2, "M", "冒"), + (0x2F8D3, "M", "冕"), + (0x2F8D4, "M", "最"), + (0x2F8D5, "M", "暜"), + (0x2F8D6, "M", "肭"), + (0x2F8D7, "M", "䏙"), + (0x2F8D8, "M", "朗"), + (0x2F8D9, "M", "望"), + (0x2F8DA, "M", "朡"), + (0x2F8DB, "M", "杞"), + (0x2F8DC, "M", "杓"), + (0x2F8DD, "M", "𣏃"), + (0x2F8DE, "M", "㭉"), + (0x2F8DF, "M", "柺"), + (0x2F8E0, "M", "枅"), + (0x2F8E1, "M", "桒"), + (0x2F8E2, "M", "梅"), + (0x2F8E3, "M", "𣑭"), + (0x2F8E4, "M", "梎"), + (0x2F8E5, "M", "栟"), + (0x2F8E6, "M", "椔"), + (0x2F8E7, "M", "㮝"), + (0x2F8E8, "M", "楂"), + (0x2F8E9, "M", "榣"), + (0x2F8EA, "M", "槪"), + (0x2F8EB, "M", "檨"), + (0x2F8EC, "M", "𣚣"), + (0x2F8ED, "M", "櫛"), + (0x2F8EE, "M", "㰘"), + (0x2F8EF, "M", "次"), + (0x2F8F0, "M", "𣢧"), + (0x2F8F1, "M", "歔"), + (0x2F8F2, "M", "㱎"), + (0x2F8F3, "M", "歲"), + (0x2F8F4, "M", "殟"), + (0x2F8F5, "M", "殺"), + (0x2F8F6, "M", "殻"), + (0x2F8F7, "M", "𣪍"), + (0x2F8F8, "M", "𡴋"), + (0x2F8F9, "M", "𣫺"), + (0x2F8FA, "M", "汎"), + (0x2F8FB, "M", "𣲼"), + (0x2F8FC, "M", "沿"), + (0x2F8FD, "M", "泍"), + (0x2F8FE, "M", "汧"), + (0x2F8FF, "M", "洖"), + (0x2F900, "M", "派"), + (0x2F901, "M", "海"), + (0x2F902, "M", "流"), + (0x2F903, "M", "浩"), + (0x2F904, "M", "浸"), + (0x2F905, "M", "涅"), + (0x2F906, "M", "𣴞"), + (0x2F907, "M", "洴"), + (0x2F908, "M", "港"), + (0x2F909, "M", "湮"), + (0x2F90A, "M", "㴳"), + (0x2F90B, "M", "滋"), + (0x2F90C, "M", "滇"), + ] + + +def _seg_79() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x2F90D, "M", "𣻑"), + (0x2F90E, "M", "淹"), + (0x2F90F, "M", "潮"), + (0x2F910, "M", "𣽞"), + (0x2F911, "M", "𣾎"), + (0x2F912, "M", "濆"), + (0x2F913, "M", "瀹"), + (0x2F914, "M", "瀞"), + (0x2F915, "M", "瀛"), + (0x2F916, "M", "㶖"), + (0x2F917, "M", "灊"), + (0x2F918, "M", "災"), + (0x2F919, "M", "灷"), + (0x2F91A, "M", "炭"), + (0x2F91B, "M", "𠔥"), + (0x2F91C, "M", "煅"), + (0x2F91D, "M", "𤉣"), + (0x2F91E, "M", "熜"), + (0x2F91F, "X"), + (0x2F920, "M", "爨"), + (0x2F921, "M", "爵"), + (0x2F922, "M", "牐"), + (0x2F923, "M", "𤘈"), + (0x2F924, "M", "犀"), + (0x2F925, "M", "犕"), + (0x2F926, "M", "𤜵"), + (0x2F927, "M", "𤠔"), + (0x2F928, "M", "獺"), + (0x2F929, "M", "王"), + (0x2F92A, "M", "㺬"), + (0x2F92B, "M", "玥"), + (0x2F92C, "M", "㺸"), + (0x2F92E, "M", "瑇"), + (0x2F92F, "M", "瑜"), + (0x2F930, "M", "瑱"), + (0x2F931, "M", "璅"), + (0x2F932, "M", "瓊"), + (0x2F933, "M", "㼛"), + (0x2F934, "M", "甤"), + (0x2F935, "M", "𤰶"), + (0x2F936, "M", "甾"), + (0x2F937, "M", "𤲒"), + (0x2F938, "M", "異"), + (0x2F939, "M", "𢆟"), + (0x2F93A, "M", "瘐"), + (0x2F93B, "M", "𤾡"), + (0x2F93C, "M", "𤾸"), + (0x2F93D, "M", "𥁄"), + (0x2F93E, "M", "㿼"), + (0x2F93F, "M", "䀈"), + (0x2F940, "M", "直"), + (0x2F941, "M", "𥃳"), + (0x2F942, "M", "𥃲"), + (0x2F943, "M", "𥄙"), + (0x2F944, "M", "𥄳"), + (0x2F945, "M", "眞"), + (0x2F946, "M", "真"), + (0x2F948, "M", "睊"), + (0x2F949, "M", "䀹"), + (0x2F94A, "M", "瞋"), + (0x2F94B, "M", "䁆"), + (0x2F94C, "M", "䂖"), + (0x2F94D, "M", "𥐝"), + (0x2F94E, "M", "硎"), + (0x2F94F, "M", "碌"), + (0x2F950, "M", "磌"), + (0x2F951, "M", "䃣"), + (0x2F952, "M", "𥘦"), + (0x2F953, "M", "祖"), + (0x2F954, "M", "𥚚"), + (0x2F955, "M", "𥛅"), + (0x2F956, "M", "福"), + (0x2F957, "M", "秫"), + (0x2F958, "M", "䄯"), + (0x2F959, "M", "穀"), + (0x2F95A, "M", "穊"), + (0x2F95B, "M", "穏"), + (0x2F95C, "M", "𥥼"), + (0x2F95D, "M", "𥪧"), + (0x2F95F, "X"), + (0x2F960, "M", "䈂"), + (0x2F961, "M", "𥮫"), + (0x2F962, "M", "篆"), + (0x2F963, "M", "築"), + (0x2F964, "M", "䈧"), + (0x2F965, "M", "𥲀"), + (0x2F966, "M", "糒"), + (0x2F967, "M", "䊠"), + (0x2F968, "M", "糨"), + (0x2F969, "M", "糣"), + (0x2F96A, "M", "紀"), + (0x2F96B, "M", "𥾆"), + (0x2F96C, "M", "絣"), + (0x2F96D, "M", "䌁"), + (0x2F96E, "M", "緇"), + (0x2F96F, "M", "縂"), + (0x2F970, "M", "繅"), + (0x2F971, "M", "䌴"), + (0x2F972, "M", "𦈨"), + (0x2F973, "M", "𦉇"), + ] + + +def _seg_80() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x2F974, "M", "䍙"), + (0x2F975, "M", "𦋙"), + (0x2F976, "M", "罺"), + (0x2F977, "M", "𦌾"), + (0x2F978, "M", "羕"), + (0x2F979, "M", "翺"), + (0x2F97A, "M", "者"), + (0x2F97B, "M", "𦓚"), + (0x2F97C, "M", "𦔣"), + (0x2F97D, "M", "聠"), + (0x2F97E, "M", "𦖨"), + (0x2F97F, "M", "聰"), + (0x2F980, "M", "𣍟"), + (0x2F981, "M", "䏕"), + (0x2F982, "M", "育"), + (0x2F983, "M", "脃"), + (0x2F984, "M", "䐋"), + (0x2F985, "M", "脾"), + (0x2F986, "M", "媵"), + (0x2F987, "M", "𦞧"), + (0x2F988, "M", "𦞵"), + (0x2F989, "M", "𣎓"), + (0x2F98A, "M", "𣎜"), + (0x2F98B, "M", "舁"), + (0x2F98C, "M", "舄"), + (0x2F98D, "M", "辞"), + (0x2F98E, "M", "䑫"), + (0x2F98F, "M", "芑"), + (0x2F990, "M", "芋"), + (0x2F991, "M", "芝"), + (0x2F992, "M", "劳"), + (0x2F993, "M", "花"), + (0x2F994, "M", "芳"), + (0x2F995, "M", "芽"), + (0x2F996, "M", "苦"), + (0x2F997, "M", "𦬼"), + (0x2F998, "M", "若"), + (0x2F999, "M", "茝"), + (0x2F99A, "M", "荣"), + (0x2F99B, "M", "莭"), + (0x2F99C, "M", "茣"), + (0x2F99D, "M", "莽"), + (0x2F99E, "M", "菧"), + (0x2F99F, "M", "著"), + (0x2F9A0, "M", "荓"), + (0x2F9A1, "M", "菊"), + (0x2F9A2, "M", "菌"), + (0x2F9A3, "M", "菜"), + (0x2F9A4, "M", "𦰶"), + (0x2F9A5, "M", "𦵫"), + (0x2F9A6, "M", "𦳕"), + (0x2F9A7, "M", "䔫"), + (0x2F9A8, "M", "蓱"), + (0x2F9A9, "M", "蓳"), + (0x2F9AA, "M", "蔖"), + (0x2F9AB, "M", "𧏊"), + (0x2F9AC, "M", "蕤"), + (0x2F9AD, "M", "𦼬"), + (0x2F9AE, "M", "䕝"), + (0x2F9AF, "M", "䕡"), + (0x2F9B0, "M", "𦾱"), + (0x2F9B1, "M", "𧃒"), + (0x2F9B2, "M", "䕫"), + (0x2F9B3, "M", "虐"), + (0x2F9B4, "M", "虜"), + (0x2F9B5, "M", "虧"), + (0x2F9B6, "M", "虩"), + (0x2F9B7, "M", "蚩"), + (0x2F9B8, "M", "蚈"), + (0x2F9B9, "M", "蜎"), + (0x2F9BA, "M", "蛢"), + (0x2F9BB, "M", "蝹"), + (0x2F9BC, "M", "蜨"), + (0x2F9BD, "M", "蝫"), + (0x2F9BE, "M", "螆"), + (0x2F9BF, "X"), + (0x2F9C0, "M", "蟡"), + (0x2F9C1, "M", "蠁"), + (0x2F9C2, "M", "䗹"), + (0x2F9C3, "M", "衠"), + (0x2F9C4, "M", "衣"), + (0x2F9C5, "M", "𧙧"), + (0x2F9C6, "M", "裗"), + (0x2F9C7, "M", "裞"), + (0x2F9C8, "M", "䘵"), + (0x2F9C9, "M", "裺"), + (0x2F9CA, "M", "㒻"), + (0x2F9CB, "M", "𧢮"), + (0x2F9CC, "M", "𧥦"), + (0x2F9CD, "M", "䚾"), + (0x2F9CE, "M", "䛇"), + (0x2F9CF, "M", "誠"), + (0x2F9D0, "M", "諭"), + (0x2F9D1, "M", "變"), + (0x2F9D2, "M", "豕"), + (0x2F9D3, "M", "𧲨"), + (0x2F9D4, "M", "貫"), + (0x2F9D5, "M", "賁"), + (0x2F9D6, "M", "贛"), + (0x2F9D7, "M", "起"), + ] + + +def _seg_81() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x2F9D8, "M", "𧼯"), + (0x2F9D9, "M", "𠠄"), + (0x2F9DA, "M", "跋"), + (0x2F9DB, "M", "趼"), + (0x2F9DC, "M", "跰"), + (0x2F9DD, "M", "𠣞"), + (0x2F9DE, "M", "軔"), + (0x2F9DF, "M", "輸"), + (0x2F9E0, "M", "𨗒"), + (0x2F9E1, "M", "𨗭"), + (0x2F9E2, "M", "邔"), + (0x2F9E3, "M", "郱"), + (0x2F9E4, "M", "鄑"), + (0x2F9E5, "M", "𨜮"), + (0x2F9E6, "M", "鄛"), + (0x2F9E7, "M", "鈸"), + (0x2F9E8, "M", "鋗"), + (0x2F9E9, "M", "鋘"), + (0x2F9EA, "M", "鉼"), + (0x2F9EB, "M", "鏹"), + (0x2F9EC, "M", "鐕"), + (0x2F9ED, "M", "𨯺"), + (0x2F9EE, "M", "開"), + (0x2F9EF, "M", "䦕"), + (0x2F9F0, "M", "閷"), + (0x2F9F1, "M", "𨵷"), + (0x2F9F2, "M", "䧦"), + (0x2F9F3, "M", "雃"), + (0x2F9F4, "M", "嶲"), + (0x2F9F5, "M", "霣"), + (0x2F9F6, "M", "𩅅"), + (0x2F9F7, "M", "𩈚"), + (0x2F9F8, "M", "䩮"), + (0x2F9F9, "M", "䩶"), + (0x2F9FA, "M", "韠"), + (0x2F9FB, "M", "𩐊"), + (0x2F9FC, "M", "䪲"), + (0x2F9FD, "M", "𩒖"), + (0x2F9FE, "M", "頋"), + (0x2FA00, "M", "頩"), + (0x2FA01, "M", "𩖶"), + (0x2FA02, "M", "飢"), + (0x2FA03, "M", "䬳"), + (0x2FA04, "M", "餩"), + (0x2FA05, "M", "馧"), + (0x2FA06, "M", "駂"), + (0x2FA07, "M", "駾"), + (0x2FA08, "M", "䯎"), + (0x2FA09, "M", "𩬰"), + (0x2FA0A, "M", "鬒"), + (0x2FA0B, "M", "鱀"), + (0x2FA0C, "M", "鳽"), + (0x2FA0D, "M", "䳎"), + (0x2FA0E, "M", "䳭"), + (0x2FA0F, "M", "鵧"), + (0x2FA10, "M", "𪃎"), + (0x2FA11, "M", "䳸"), + (0x2FA12, "M", "𪄅"), + (0x2FA13, "M", "𪈎"), + (0x2FA14, "M", "𪊑"), + (0x2FA15, "M", "麻"), + (0x2FA16, "M", "䵖"), + (0x2FA17, "M", "黹"), + (0x2FA18, "M", "黾"), + (0x2FA19, "M", "鼅"), + (0x2FA1A, "M", "鼏"), + (0x2FA1B, "M", "鼖"), + (0x2FA1C, "M", "鼻"), + (0x2FA1D, "M", "𪘀"), + (0x2FA1E, "X"), + (0x30000, "V"), + (0x3134B, "X"), + (0x31350, "V"), + (0x323B0, "X"), + (0xE0100, "I"), + (0xE01F0, "X"), + ] + + +uts46data = tuple( + _seg_0() + + _seg_1() + + _seg_2() + + _seg_3() + + _seg_4() + + _seg_5() + + _seg_6() + + _seg_7() + + _seg_8() + + _seg_9() + + _seg_10() + + _seg_11() + + _seg_12() + + _seg_13() + + _seg_14() + + _seg_15() + + _seg_16() + + _seg_17() + + _seg_18() + + _seg_19() + + _seg_20() + + _seg_21() + + _seg_22() + + _seg_23() + + _seg_24() + + _seg_25() + + _seg_26() + + _seg_27() + + _seg_28() + + _seg_29() + + _seg_30() + + _seg_31() + + _seg_32() + + _seg_33() + + _seg_34() + + _seg_35() + + _seg_36() + + _seg_37() + + _seg_38() + + _seg_39() + + _seg_40() + + _seg_41() + + _seg_42() + + _seg_43() + + _seg_44() + + _seg_45() + + _seg_46() + + _seg_47() + + _seg_48() + + _seg_49() + + _seg_50() + + _seg_51() + + _seg_52() + + _seg_53() + + _seg_54() + + _seg_55() + + _seg_56() + + _seg_57() + + _seg_58() + + _seg_59() + + _seg_60() + + _seg_61() + + _seg_62() + + _seg_63() + + _seg_64() + + _seg_65() + + _seg_66() + + _seg_67() + + _seg_68() + + _seg_69() + + _seg_70() + + _seg_71() + + _seg_72() + + _seg_73() + + _seg_74() + + _seg_75() + + _seg_76() + + _seg_77() + + _seg_78() + + _seg_79() + + _seg_80() + + _seg_81() +) # type: Tuple[Union[Tuple[int, str], Tuple[int, str, str]], ...] diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/msgpack/__init__.py b/.venv/lib/python3.12/site-packages/pip/_vendor/msgpack/__init__.py new file mode 100644 index 0000000..ad68271 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/msgpack/__init__.py @@ -0,0 +1,55 @@ +# ruff: noqa: F401 +import os + +from .exceptions import * # noqa: F403 +from .ext import ExtType, Timestamp + +version = (1, 1, 1) +__version__ = "1.1.1" + + +if os.environ.get("MSGPACK_PUREPYTHON"): + from .fallback import Packer, Unpacker, unpackb +else: + try: + from ._cmsgpack import Packer, Unpacker, unpackb + except ImportError: + from .fallback import Packer, Unpacker, unpackb + + +def pack(o, stream, **kwargs): + """ + Pack object `o` and write it to `stream` + + See :class:`Packer` for options. + """ + packer = Packer(**kwargs) + stream.write(packer.pack(o)) + + +def packb(o, **kwargs): + """ + Pack object `o` and return packed bytes + + See :class:`Packer` for options. + """ + return Packer(**kwargs).pack(o) + + +def unpack(stream, **kwargs): + """ + Unpack an object from `stream`. + + Raises `ExtraData` when `stream` contains extra bytes. + See :class:`Unpacker` for options. + """ + data = stream.read() + return unpackb(data, **kwargs) + + +# alias for compatibility to simplejson/marshal/pickle. +load = unpack +loads = unpackb + +dump = pack +dumps = packb diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/msgpack/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/msgpack/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..8e916d4 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/msgpack/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/msgpack/__pycache__/exceptions.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/msgpack/__pycache__/exceptions.cpython-312.pyc new file mode 100644 index 0000000..faa472b Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/msgpack/__pycache__/exceptions.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/msgpack/__pycache__/ext.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/msgpack/__pycache__/ext.cpython-312.pyc new file mode 100644 index 0000000..2eac8ce Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/msgpack/__pycache__/ext.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/msgpack/__pycache__/fallback.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/msgpack/__pycache__/fallback.cpython-312.pyc new file mode 100644 index 0000000..9f8c169 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/msgpack/__pycache__/fallback.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/msgpack/exceptions.py b/.venv/lib/python3.12/site-packages/pip/_vendor/msgpack/exceptions.py new file mode 100644 index 0000000..d6d2615 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/msgpack/exceptions.py @@ -0,0 +1,48 @@ +class UnpackException(Exception): + """Base class for some exceptions raised while unpacking. + + NOTE: unpack may raise exception other than subclass of + UnpackException. If you want to catch all error, catch + Exception instead. + """ + + +class BufferFull(UnpackException): + pass + + +class OutOfData(UnpackException): + pass + + +class FormatError(ValueError, UnpackException): + """Invalid msgpack format""" + + +class StackError(ValueError, UnpackException): + """Too nested""" + + +# Deprecated. Use ValueError instead +UnpackValueError = ValueError + + +class ExtraData(UnpackValueError): + """ExtraData is raised when there is trailing data. + + This exception is raised while only one-shot (not streaming) + unpack. + """ + + def __init__(self, unpacked, extra): + self.unpacked = unpacked + self.extra = extra + + def __str__(self): + return "unpack(b) received extra data." + + +# Deprecated. Use Exception instead to catch all exception during packing. +PackException = Exception +PackValueError = ValueError +PackOverflowError = OverflowError diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/msgpack/ext.py b/.venv/lib/python3.12/site-packages/pip/_vendor/msgpack/ext.py new file mode 100644 index 0000000..9694819 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/msgpack/ext.py @@ -0,0 +1,170 @@ +import datetime +import struct +from collections import namedtuple + + +class ExtType(namedtuple("ExtType", "code data")): + """ExtType represents ext type in msgpack.""" + + def __new__(cls, code, data): + if not isinstance(code, int): + raise TypeError("code must be int") + if not isinstance(data, bytes): + raise TypeError("data must be bytes") + if not 0 <= code <= 127: + raise ValueError("code must be 0~127") + return super().__new__(cls, code, data) + + +class Timestamp: + """Timestamp represents the Timestamp extension type in msgpack. + + When built with Cython, msgpack uses C methods to pack and unpack `Timestamp`. + When using pure-Python msgpack, :func:`to_bytes` and :func:`from_bytes` are used to pack and + unpack `Timestamp`. + + This class is immutable: Do not override seconds and nanoseconds. + """ + + __slots__ = ["seconds", "nanoseconds"] + + def __init__(self, seconds, nanoseconds=0): + """Initialize a Timestamp object. + + :param int seconds: + Number of seconds since the UNIX epoch (00:00:00 UTC Jan 1 1970, minus leap seconds). + May be negative. + + :param int nanoseconds: + Number of nanoseconds to add to `seconds` to get fractional time. + Maximum is 999_999_999. Default is 0. + + Note: Negative times (before the UNIX epoch) are represented as neg. seconds + pos. ns. + """ + if not isinstance(seconds, int): + raise TypeError("seconds must be an integer") + if not isinstance(nanoseconds, int): + raise TypeError("nanoseconds must be an integer") + if not (0 <= nanoseconds < 10**9): + raise ValueError("nanoseconds must be a non-negative integer less than 999999999.") + self.seconds = seconds + self.nanoseconds = nanoseconds + + def __repr__(self): + """String representation of Timestamp.""" + return f"Timestamp(seconds={self.seconds}, nanoseconds={self.nanoseconds})" + + def __eq__(self, other): + """Check for equality with another Timestamp object""" + if type(other) is self.__class__: + return self.seconds == other.seconds and self.nanoseconds == other.nanoseconds + return False + + def __ne__(self, other): + """not-equals method (see :func:`__eq__()`)""" + return not self.__eq__(other) + + def __hash__(self): + return hash((self.seconds, self.nanoseconds)) + + @staticmethod + def from_bytes(b): + """Unpack bytes into a `Timestamp` object. + + Used for pure-Python msgpack unpacking. + + :param b: Payload from msgpack ext message with code -1 + :type b: bytes + + :returns: Timestamp object unpacked from msgpack ext payload + :rtype: Timestamp + """ + if len(b) == 4: + seconds = struct.unpack("!L", b)[0] + nanoseconds = 0 + elif len(b) == 8: + data64 = struct.unpack("!Q", b)[0] + seconds = data64 & 0x00000003FFFFFFFF + nanoseconds = data64 >> 34 + elif len(b) == 12: + nanoseconds, seconds = struct.unpack("!Iq", b) + else: + raise ValueError( + "Timestamp type can only be created from 32, 64, or 96-bit byte objects" + ) + return Timestamp(seconds, nanoseconds) + + def to_bytes(self): + """Pack this Timestamp object into bytes. + + Used for pure-Python msgpack packing. + + :returns data: Payload for EXT message with code -1 (timestamp type) + :rtype: bytes + """ + if (self.seconds >> 34) == 0: # seconds is non-negative and fits in 34 bits + data64 = self.nanoseconds << 34 | self.seconds + if data64 & 0xFFFFFFFF00000000 == 0: + # nanoseconds is zero and seconds < 2**32, so timestamp 32 + data = struct.pack("!L", data64) + else: + # timestamp 64 + data = struct.pack("!Q", data64) + else: + # timestamp 96 + data = struct.pack("!Iq", self.nanoseconds, self.seconds) + return data + + @staticmethod + def from_unix(unix_sec): + """Create a Timestamp from posix timestamp in seconds. + + :param unix_float: Posix timestamp in seconds. + :type unix_float: int or float + """ + seconds = int(unix_sec // 1) + nanoseconds = int((unix_sec % 1) * 10**9) + return Timestamp(seconds, nanoseconds) + + def to_unix(self): + """Get the timestamp as a floating-point value. + + :returns: posix timestamp + :rtype: float + """ + return self.seconds + self.nanoseconds / 1e9 + + @staticmethod + def from_unix_nano(unix_ns): + """Create a Timestamp from posix timestamp in nanoseconds. + + :param int unix_ns: Posix timestamp in nanoseconds. + :rtype: Timestamp + """ + return Timestamp(*divmod(unix_ns, 10**9)) + + def to_unix_nano(self): + """Get the timestamp as a unixtime in nanoseconds. + + :returns: posix timestamp in nanoseconds + :rtype: int + """ + return self.seconds * 10**9 + self.nanoseconds + + def to_datetime(self): + """Get the timestamp as a UTC datetime. + + :rtype: `datetime.datetime` + """ + utc = datetime.timezone.utc + return datetime.datetime.fromtimestamp(0, utc) + datetime.timedelta( + seconds=self.seconds, microseconds=self.nanoseconds // 1000 + ) + + @staticmethod + def from_datetime(dt): + """Create a Timestamp from datetime with tzinfo. + + :rtype: Timestamp + """ + return Timestamp(seconds=int(dt.timestamp()), nanoseconds=dt.microsecond * 1000) diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/msgpack/fallback.py b/.venv/lib/python3.12/site-packages/pip/_vendor/msgpack/fallback.py new file mode 100644 index 0000000..b02e47c --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/msgpack/fallback.py @@ -0,0 +1,929 @@ +"""Fallback pure Python implementation of msgpack""" + +import struct +import sys +from datetime import datetime as _DateTime + +if hasattr(sys, "pypy_version_info"): + from __pypy__ import newlist_hint + from __pypy__.builders import BytesBuilder + + _USING_STRINGBUILDER = True + + class BytesIO: + def __init__(self, s=b""): + if s: + self.builder = BytesBuilder(len(s)) + self.builder.append(s) + else: + self.builder = BytesBuilder() + + def write(self, s): + if isinstance(s, memoryview): + s = s.tobytes() + elif isinstance(s, bytearray): + s = bytes(s) + self.builder.append(s) + + def getvalue(self): + return self.builder.build() + +else: + from io import BytesIO + + _USING_STRINGBUILDER = False + + def newlist_hint(size): + return [] + + +from .exceptions import BufferFull, ExtraData, FormatError, OutOfData, StackError +from .ext import ExtType, Timestamp + +EX_SKIP = 0 +EX_CONSTRUCT = 1 +EX_READ_ARRAY_HEADER = 2 +EX_READ_MAP_HEADER = 3 + +TYPE_IMMEDIATE = 0 +TYPE_ARRAY = 1 +TYPE_MAP = 2 +TYPE_RAW = 3 +TYPE_BIN = 4 +TYPE_EXT = 5 + +DEFAULT_RECURSE_LIMIT = 511 + + +def _check_type_strict(obj, t, type=type, tuple=tuple): + if type(t) is tuple: + return type(obj) in t + else: + return type(obj) is t + + +def _get_data_from_buffer(obj): + view = memoryview(obj) + if view.itemsize != 1: + raise ValueError("cannot unpack from multi-byte object") + return view + + +def unpackb(packed, **kwargs): + """ + Unpack an object from `packed`. + + Raises ``ExtraData`` when *packed* contains extra bytes. + Raises ``ValueError`` when *packed* is incomplete. + Raises ``FormatError`` when *packed* is not valid msgpack. + Raises ``StackError`` when *packed* contains too nested. + Other exceptions can be raised during unpacking. + + See :class:`Unpacker` for options. + """ + unpacker = Unpacker(None, max_buffer_size=len(packed), **kwargs) + unpacker.feed(packed) + try: + ret = unpacker._unpack() + except OutOfData: + raise ValueError("Unpack failed: incomplete input") + except RecursionError: + raise StackError + if unpacker._got_extradata(): + raise ExtraData(ret, unpacker._get_extradata()) + return ret + + +_NO_FORMAT_USED = "" +_MSGPACK_HEADERS = { + 0xC4: (1, _NO_FORMAT_USED, TYPE_BIN), + 0xC5: (2, ">H", TYPE_BIN), + 0xC6: (4, ">I", TYPE_BIN), + 0xC7: (2, "Bb", TYPE_EXT), + 0xC8: (3, ">Hb", TYPE_EXT), + 0xC9: (5, ">Ib", TYPE_EXT), + 0xCA: (4, ">f"), + 0xCB: (8, ">d"), + 0xCC: (1, _NO_FORMAT_USED), + 0xCD: (2, ">H"), + 0xCE: (4, ">I"), + 0xCF: (8, ">Q"), + 0xD0: (1, "b"), + 0xD1: (2, ">h"), + 0xD2: (4, ">i"), + 0xD3: (8, ">q"), + 0xD4: (1, "b1s", TYPE_EXT), + 0xD5: (2, "b2s", TYPE_EXT), + 0xD6: (4, "b4s", TYPE_EXT), + 0xD7: (8, "b8s", TYPE_EXT), + 0xD8: (16, "b16s", TYPE_EXT), + 0xD9: (1, _NO_FORMAT_USED, TYPE_RAW), + 0xDA: (2, ">H", TYPE_RAW), + 0xDB: (4, ">I", TYPE_RAW), + 0xDC: (2, ">H", TYPE_ARRAY), + 0xDD: (4, ">I", TYPE_ARRAY), + 0xDE: (2, ">H", TYPE_MAP), + 0xDF: (4, ">I", TYPE_MAP), +} + + +class Unpacker: + """Streaming unpacker. + + Arguments: + + :param file_like: + File-like object having `.read(n)` method. + If specified, unpacker reads serialized data from it and `.feed()` is not usable. + + :param int read_size: + Used as `file_like.read(read_size)`. (default: `min(16*1024, max_buffer_size)`) + + :param bool use_list: + If true, unpack msgpack array to Python list. + Otherwise, unpack to Python tuple. (default: True) + + :param bool raw: + If true, unpack msgpack raw to Python bytes. + Otherwise, unpack to Python str by decoding with UTF-8 encoding (default). + + :param int timestamp: + Control how timestamp type is unpacked: + + 0 - Timestamp + 1 - float (Seconds from the EPOCH) + 2 - int (Nanoseconds from the EPOCH) + 3 - datetime.datetime (UTC). + + :param bool strict_map_key: + If true (default), only str or bytes are accepted for map (dict) keys. + + :param object_hook: + When specified, it should be callable. + Unpacker calls it with a dict argument after unpacking msgpack map. + (See also simplejson) + + :param object_pairs_hook: + When specified, it should be callable. + Unpacker calls it with a list of key-value pairs after unpacking msgpack map. + (See also simplejson) + + :param str unicode_errors: + The error handler for decoding unicode. (default: 'strict') + This option should be used only when you have msgpack data which + contains invalid UTF-8 string. + + :param int max_buffer_size: + Limits size of data waiting unpacked. 0 means 2**32-1. + The default value is 100*1024*1024 (100MiB). + Raises `BufferFull` exception when it is insufficient. + You should set this parameter when unpacking data from untrusted source. + + :param int max_str_len: + Deprecated, use *max_buffer_size* instead. + Limits max length of str. (default: max_buffer_size) + + :param int max_bin_len: + Deprecated, use *max_buffer_size* instead. + Limits max length of bin. (default: max_buffer_size) + + :param int max_array_len: + Limits max length of array. + (default: max_buffer_size) + + :param int max_map_len: + Limits max length of map. + (default: max_buffer_size//2) + + :param int max_ext_len: + Deprecated, use *max_buffer_size* instead. + Limits max size of ext type. (default: max_buffer_size) + + Example of streaming deserialize from file-like object:: + + unpacker = Unpacker(file_like) + for o in unpacker: + process(o) + + Example of streaming deserialize from socket:: + + unpacker = Unpacker() + while True: + buf = sock.recv(1024**2) + if not buf: + break + unpacker.feed(buf) + for o in unpacker: + process(o) + + Raises ``ExtraData`` when *packed* contains extra bytes. + Raises ``OutOfData`` when *packed* is incomplete. + Raises ``FormatError`` when *packed* is not valid msgpack. + Raises ``StackError`` when *packed* contains too nested. + Other exceptions can be raised during unpacking. + """ + + def __init__( + self, + file_like=None, + *, + read_size=0, + use_list=True, + raw=False, + timestamp=0, + strict_map_key=True, + object_hook=None, + object_pairs_hook=None, + list_hook=None, + unicode_errors=None, + max_buffer_size=100 * 1024 * 1024, + ext_hook=ExtType, + max_str_len=-1, + max_bin_len=-1, + max_array_len=-1, + max_map_len=-1, + max_ext_len=-1, + ): + if unicode_errors is None: + unicode_errors = "strict" + + if file_like is None: + self._feeding = True + else: + if not callable(file_like.read): + raise TypeError("`file_like.read` must be callable") + self.file_like = file_like + self._feeding = False + + #: array of bytes fed. + self._buffer = bytearray() + #: Which position we currently reads + self._buff_i = 0 + + # When Unpacker is used as an iterable, between the calls to next(), + # the buffer is not "consumed" completely, for efficiency sake. + # Instead, it is done sloppily. To make sure we raise BufferFull at + # the correct moments, we have to keep track of how sloppy we were. + # Furthermore, when the buffer is incomplete (that is: in the case + # we raise an OutOfData) we need to rollback the buffer to the correct + # state, which _buf_checkpoint records. + self._buf_checkpoint = 0 + + if not max_buffer_size: + max_buffer_size = 2**31 - 1 + if max_str_len == -1: + max_str_len = max_buffer_size + if max_bin_len == -1: + max_bin_len = max_buffer_size + if max_array_len == -1: + max_array_len = max_buffer_size + if max_map_len == -1: + max_map_len = max_buffer_size // 2 + if max_ext_len == -1: + max_ext_len = max_buffer_size + + self._max_buffer_size = max_buffer_size + if read_size > self._max_buffer_size: + raise ValueError("read_size must be smaller than max_buffer_size") + self._read_size = read_size or min(self._max_buffer_size, 16 * 1024) + self._raw = bool(raw) + self._strict_map_key = bool(strict_map_key) + self._unicode_errors = unicode_errors + self._use_list = use_list + if not (0 <= timestamp <= 3): + raise ValueError("timestamp must be 0..3") + self._timestamp = timestamp + self._list_hook = list_hook + self._object_hook = object_hook + self._object_pairs_hook = object_pairs_hook + self._ext_hook = ext_hook + self._max_str_len = max_str_len + self._max_bin_len = max_bin_len + self._max_array_len = max_array_len + self._max_map_len = max_map_len + self._max_ext_len = max_ext_len + self._stream_offset = 0 + + if list_hook is not None and not callable(list_hook): + raise TypeError("`list_hook` is not callable") + if object_hook is not None and not callable(object_hook): + raise TypeError("`object_hook` is not callable") + if object_pairs_hook is not None and not callable(object_pairs_hook): + raise TypeError("`object_pairs_hook` is not callable") + if object_hook is not None and object_pairs_hook is not None: + raise TypeError("object_pairs_hook and object_hook are mutually exclusive") + if not callable(ext_hook): + raise TypeError("`ext_hook` is not callable") + + def feed(self, next_bytes): + assert self._feeding + view = _get_data_from_buffer(next_bytes) + if len(self._buffer) - self._buff_i + len(view) > self._max_buffer_size: + raise BufferFull + + # Strip buffer before checkpoint before reading file. + if self._buf_checkpoint > 0: + del self._buffer[: self._buf_checkpoint] + self._buff_i -= self._buf_checkpoint + self._buf_checkpoint = 0 + + # Use extend here: INPLACE_ADD += doesn't reliably typecast memoryview in jython + self._buffer.extend(view) + view.release() + + def _consume(self): + """Gets rid of the used parts of the buffer.""" + self._stream_offset += self._buff_i - self._buf_checkpoint + self._buf_checkpoint = self._buff_i + + def _got_extradata(self): + return self._buff_i < len(self._buffer) + + def _get_extradata(self): + return self._buffer[self._buff_i :] + + def read_bytes(self, n): + ret = self._read(n, raise_outofdata=False) + self._consume() + return ret + + def _read(self, n, raise_outofdata=True): + # (int) -> bytearray + self._reserve(n, raise_outofdata=raise_outofdata) + i = self._buff_i + ret = self._buffer[i : i + n] + self._buff_i = i + len(ret) + return ret + + def _reserve(self, n, raise_outofdata=True): + remain_bytes = len(self._buffer) - self._buff_i - n + + # Fast path: buffer has n bytes already + if remain_bytes >= 0: + return + + if self._feeding: + self._buff_i = self._buf_checkpoint + raise OutOfData + + # Strip buffer before checkpoint before reading file. + if self._buf_checkpoint > 0: + del self._buffer[: self._buf_checkpoint] + self._buff_i -= self._buf_checkpoint + self._buf_checkpoint = 0 + + # Read from file + remain_bytes = -remain_bytes + if remain_bytes + len(self._buffer) > self._max_buffer_size: + raise BufferFull + while remain_bytes > 0: + to_read_bytes = max(self._read_size, remain_bytes) + read_data = self.file_like.read(to_read_bytes) + if not read_data: + break + assert isinstance(read_data, bytes) + self._buffer += read_data + remain_bytes -= len(read_data) + + if len(self._buffer) < n + self._buff_i and raise_outofdata: + self._buff_i = 0 # rollback + raise OutOfData + + def _read_header(self): + typ = TYPE_IMMEDIATE + n = 0 + obj = None + self._reserve(1) + b = self._buffer[self._buff_i] + self._buff_i += 1 + if b & 0b10000000 == 0: + obj = b + elif b & 0b11100000 == 0b11100000: + obj = -1 - (b ^ 0xFF) + elif b & 0b11100000 == 0b10100000: + n = b & 0b00011111 + typ = TYPE_RAW + if n > self._max_str_len: + raise ValueError(f"{n} exceeds max_str_len({self._max_str_len})") + obj = self._read(n) + elif b & 0b11110000 == 0b10010000: + n = b & 0b00001111 + typ = TYPE_ARRAY + if n > self._max_array_len: + raise ValueError(f"{n} exceeds max_array_len({self._max_array_len})") + elif b & 0b11110000 == 0b10000000: + n = b & 0b00001111 + typ = TYPE_MAP + if n > self._max_map_len: + raise ValueError(f"{n} exceeds max_map_len({self._max_map_len})") + elif b == 0xC0: + obj = None + elif b == 0xC2: + obj = False + elif b == 0xC3: + obj = True + elif 0xC4 <= b <= 0xC6: + size, fmt, typ = _MSGPACK_HEADERS[b] + self._reserve(size) + if len(fmt) > 0: + n = struct.unpack_from(fmt, self._buffer, self._buff_i)[0] + else: + n = self._buffer[self._buff_i] + self._buff_i += size + if n > self._max_bin_len: + raise ValueError(f"{n} exceeds max_bin_len({self._max_bin_len})") + obj = self._read(n) + elif 0xC7 <= b <= 0xC9: + size, fmt, typ = _MSGPACK_HEADERS[b] + self._reserve(size) + L, n = struct.unpack_from(fmt, self._buffer, self._buff_i) + self._buff_i += size + if L > self._max_ext_len: + raise ValueError(f"{L} exceeds max_ext_len({self._max_ext_len})") + obj = self._read(L) + elif 0xCA <= b <= 0xD3: + size, fmt = _MSGPACK_HEADERS[b] + self._reserve(size) + if len(fmt) > 0: + obj = struct.unpack_from(fmt, self._buffer, self._buff_i)[0] + else: + obj = self._buffer[self._buff_i] + self._buff_i += size + elif 0xD4 <= b <= 0xD8: + size, fmt, typ = _MSGPACK_HEADERS[b] + if self._max_ext_len < size: + raise ValueError(f"{size} exceeds max_ext_len({self._max_ext_len})") + self._reserve(size + 1) + n, obj = struct.unpack_from(fmt, self._buffer, self._buff_i) + self._buff_i += size + 1 + elif 0xD9 <= b <= 0xDB: + size, fmt, typ = _MSGPACK_HEADERS[b] + self._reserve(size) + if len(fmt) > 0: + (n,) = struct.unpack_from(fmt, self._buffer, self._buff_i) + else: + n = self._buffer[self._buff_i] + self._buff_i += size + if n > self._max_str_len: + raise ValueError(f"{n} exceeds max_str_len({self._max_str_len})") + obj = self._read(n) + elif 0xDC <= b <= 0xDD: + size, fmt, typ = _MSGPACK_HEADERS[b] + self._reserve(size) + (n,) = struct.unpack_from(fmt, self._buffer, self._buff_i) + self._buff_i += size + if n > self._max_array_len: + raise ValueError(f"{n} exceeds max_array_len({self._max_array_len})") + elif 0xDE <= b <= 0xDF: + size, fmt, typ = _MSGPACK_HEADERS[b] + self._reserve(size) + (n,) = struct.unpack_from(fmt, self._buffer, self._buff_i) + self._buff_i += size + if n > self._max_map_len: + raise ValueError(f"{n} exceeds max_map_len({self._max_map_len})") + else: + raise FormatError("Unknown header: 0x%x" % b) + return typ, n, obj + + def _unpack(self, execute=EX_CONSTRUCT): + typ, n, obj = self._read_header() + + if execute == EX_READ_ARRAY_HEADER: + if typ != TYPE_ARRAY: + raise ValueError("Expected array") + return n + if execute == EX_READ_MAP_HEADER: + if typ != TYPE_MAP: + raise ValueError("Expected map") + return n + # TODO should we eliminate the recursion? + if typ == TYPE_ARRAY: + if execute == EX_SKIP: + for i in range(n): + # TODO check whether we need to call `list_hook` + self._unpack(EX_SKIP) + return + ret = newlist_hint(n) + for i in range(n): + ret.append(self._unpack(EX_CONSTRUCT)) + if self._list_hook is not None: + ret = self._list_hook(ret) + # TODO is the interaction between `list_hook` and `use_list` ok? + return ret if self._use_list else tuple(ret) + if typ == TYPE_MAP: + if execute == EX_SKIP: + for i in range(n): + # TODO check whether we need to call hooks + self._unpack(EX_SKIP) + self._unpack(EX_SKIP) + return + if self._object_pairs_hook is not None: + ret = self._object_pairs_hook( + (self._unpack(EX_CONSTRUCT), self._unpack(EX_CONSTRUCT)) for _ in range(n) + ) + else: + ret = {} + for _ in range(n): + key = self._unpack(EX_CONSTRUCT) + if self._strict_map_key and type(key) not in (str, bytes): + raise ValueError("%s is not allowed for map key" % str(type(key))) + if isinstance(key, str): + key = sys.intern(key) + ret[key] = self._unpack(EX_CONSTRUCT) + if self._object_hook is not None: + ret = self._object_hook(ret) + return ret + if execute == EX_SKIP: + return + if typ == TYPE_RAW: + if self._raw: + obj = bytes(obj) + else: + obj = obj.decode("utf_8", self._unicode_errors) + return obj + if typ == TYPE_BIN: + return bytes(obj) + if typ == TYPE_EXT: + if n == -1: # timestamp + ts = Timestamp.from_bytes(bytes(obj)) + if self._timestamp == 1: + return ts.to_unix() + elif self._timestamp == 2: + return ts.to_unix_nano() + elif self._timestamp == 3: + return ts.to_datetime() + else: + return ts + else: + return self._ext_hook(n, bytes(obj)) + assert typ == TYPE_IMMEDIATE + return obj + + def __iter__(self): + return self + + def __next__(self): + try: + ret = self._unpack(EX_CONSTRUCT) + self._consume() + return ret + except OutOfData: + self._consume() + raise StopIteration + except RecursionError: + raise StackError + + next = __next__ + + def skip(self): + self._unpack(EX_SKIP) + self._consume() + + def unpack(self): + try: + ret = self._unpack(EX_CONSTRUCT) + except RecursionError: + raise StackError + self._consume() + return ret + + def read_array_header(self): + ret = self._unpack(EX_READ_ARRAY_HEADER) + self._consume() + return ret + + def read_map_header(self): + ret = self._unpack(EX_READ_MAP_HEADER) + self._consume() + return ret + + def tell(self): + return self._stream_offset + + +class Packer: + """ + MessagePack Packer + + Usage:: + + packer = Packer() + astream.write(packer.pack(a)) + astream.write(packer.pack(b)) + + Packer's constructor has some keyword arguments: + + :param default: + When specified, it should be callable. + Convert user type to builtin type that Packer supports. + See also simplejson's document. + + :param bool use_single_float: + Use single precision float type for float. (default: False) + + :param bool autoreset: + Reset buffer after each pack and return its content as `bytes`. (default: True). + If set this to false, use `bytes()` to get content and `.reset()` to clear buffer. + + :param bool use_bin_type: + Use bin type introduced in msgpack spec 2.0 for bytes. + It also enables str8 type for unicode. (default: True) + + :param bool strict_types: + If set to true, types will be checked to be exact. Derived classes + from serializable types will not be serialized and will be + treated as unsupported type and forwarded to default. + Additionally tuples will not be serialized as lists. + This is useful when trying to implement accurate serialization + for python types. + + :param bool datetime: + If set to true, datetime with tzinfo is packed into Timestamp type. + Note that the tzinfo is stripped in the timestamp. + You can get UTC datetime with `timestamp=3` option of the Unpacker. + + :param str unicode_errors: + The error handler for encoding unicode. (default: 'strict') + DO NOT USE THIS!! This option is kept for very specific usage. + + :param int buf_size: + Internal buffer size. This option is used only for C implementation. + """ + + def __init__( + self, + *, + default=None, + use_single_float=False, + autoreset=True, + use_bin_type=True, + strict_types=False, + datetime=False, + unicode_errors=None, + buf_size=None, + ): + self._strict_types = strict_types + self._use_float = use_single_float + self._autoreset = autoreset + self._use_bin_type = use_bin_type + self._buffer = BytesIO() + self._datetime = bool(datetime) + self._unicode_errors = unicode_errors or "strict" + if default is not None and not callable(default): + raise TypeError("default must be callable") + self._default = default + + def _pack( + self, + obj, + nest_limit=DEFAULT_RECURSE_LIMIT, + check=isinstance, + check_type_strict=_check_type_strict, + ): + default_used = False + if self._strict_types: + check = check_type_strict + list_types = list + else: + list_types = (list, tuple) + while True: + if nest_limit < 0: + raise ValueError("recursion limit exceeded") + if obj is None: + return self._buffer.write(b"\xc0") + if check(obj, bool): + if obj: + return self._buffer.write(b"\xc3") + return self._buffer.write(b"\xc2") + if check(obj, int): + if 0 <= obj < 0x80: + return self._buffer.write(struct.pack("B", obj)) + if -0x20 <= obj < 0: + return self._buffer.write(struct.pack("b", obj)) + if 0x80 <= obj <= 0xFF: + return self._buffer.write(struct.pack("BB", 0xCC, obj)) + if -0x80 <= obj < 0: + return self._buffer.write(struct.pack(">Bb", 0xD0, obj)) + if 0xFF < obj <= 0xFFFF: + return self._buffer.write(struct.pack(">BH", 0xCD, obj)) + if -0x8000 <= obj < -0x80: + return self._buffer.write(struct.pack(">Bh", 0xD1, obj)) + if 0xFFFF < obj <= 0xFFFFFFFF: + return self._buffer.write(struct.pack(">BI", 0xCE, obj)) + if -0x80000000 <= obj < -0x8000: + return self._buffer.write(struct.pack(">Bi", 0xD2, obj)) + if 0xFFFFFFFF < obj <= 0xFFFFFFFFFFFFFFFF: + return self._buffer.write(struct.pack(">BQ", 0xCF, obj)) + if -0x8000000000000000 <= obj < -0x80000000: + return self._buffer.write(struct.pack(">Bq", 0xD3, obj)) + if not default_used and self._default is not None: + obj = self._default(obj) + default_used = True + continue + raise OverflowError("Integer value out of range") + if check(obj, (bytes, bytearray)): + n = len(obj) + if n >= 2**32: + raise ValueError("%s is too large" % type(obj).__name__) + self._pack_bin_header(n) + return self._buffer.write(obj) + if check(obj, str): + obj = obj.encode("utf-8", self._unicode_errors) + n = len(obj) + if n >= 2**32: + raise ValueError("String is too large") + self._pack_raw_header(n) + return self._buffer.write(obj) + if check(obj, memoryview): + n = obj.nbytes + if n >= 2**32: + raise ValueError("Memoryview is too large") + self._pack_bin_header(n) + return self._buffer.write(obj) + if check(obj, float): + if self._use_float: + return self._buffer.write(struct.pack(">Bf", 0xCA, obj)) + return self._buffer.write(struct.pack(">Bd", 0xCB, obj)) + if check(obj, (ExtType, Timestamp)): + if check(obj, Timestamp): + code = -1 + data = obj.to_bytes() + else: + code = obj.code + data = obj.data + assert isinstance(code, int) + assert isinstance(data, bytes) + L = len(data) + if L == 1: + self._buffer.write(b"\xd4") + elif L == 2: + self._buffer.write(b"\xd5") + elif L == 4: + self._buffer.write(b"\xd6") + elif L == 8: + self._buffer.write(b"\xd7") + elif L == 16: + self._buffer.write(b"\xd8") + elif L <= 0xFF: + self._buffer.write(struct.pack(">BB", 0xC7, L)) + elif L <= 0xFFFF: + self._buffer.write(struct.pack(">BH", 0xC8, L)) + else: + self._buffer.write(struct.pack(">BI", 0xC9, L)) + self._buffer.write(struct.pack("b", code)) + self._buffer.write(data) + return + if check(obj, list_types): + n = len(obj) + self._pack_array_header(n) + for i in range(n): + self._pack(obj[i], nest_limit - 1) + return + if check(obj, dict): + return self._pack_map_pairs(len(obj), obj.items(), nest_limit - 1) + + if self._datetime and check(obj, _DateTime) and obj.tzinfo is not None: + obj = Timestamp.from_datetime(obj) + default_used = 1 + continue + + if not default_used and self._default is not None: + obj = self._default(obj) + default_used = 1 + continue + + if self._datetime and check(obj, _DateTime): + raise ValueError(f"Cannot serialize {obj!r} where tzinfo=None") + + raise TypeError(f"Cannot serialize {obj!r}") + + def pack(self, obj): + try: + self._pack(obj) + except: + self._buffer = BytesIO() # force reset + raise + if self._autoreset: + ret = self._buffer.getvalue() + self._buffer = BytesIO() + return ret + + def pack_map_pairs(self, pairs): + self._pack_map_pairs(len(pairs), pairs) + if self._autoreset: + ret = self._buffer.getvalue() + self._buffer = BytesIO() + return ret + + def pack_array_header(self, n): + if n >= 2**32: + raise ValueError + self._pack_array_header(n) + if self._autoreset: + ret = self._buffer.getvalue() + self._buffer = BytesIO() + return ret + + def pack_map_header(self, n): + if n >= 2**32: + raise ValueError + self._pack_map_header(n) + if self._autoreset: + ret = self._buffer.getvalue() + self._buffer = BytesIO() + return ret + + def pack_ext_type(self, typecode, data): + if not isinstance(typecode, int): + raise TypeError("typecode must have int type.") + if not 0 <= typecode <= 127: + raise ValueError("typecode should be 0-127") + if not isinstance(data, bytes): + raise TypeError("data must have bytes type") + L = len(data) + if L > 0xFFFFFFFF: + raise ValueError("Too large data") + if L == 1: + self._buffer.write(b"\xd4") + elif L == 2: + self._buffer.write(b"\xd5") + elif L == 4: + self._buffer.write(b"\xd6") + elif L == 8: + self._buffer.write(b"\xd7") + elif L == 16: + self._buffer.write(b"\xd8") + elif L <= 0xFF: + self._buffer.write(b"\xc7" + struct.pack("B", L)) + elif L <= 0xFFFF: + self._buffer.write(b"\xc8" + struct.pack(">H", L)) + else: + self._buffer.write(b"\xc9" + struct.pack(">I", L)) + self._buffer.write(struct.pack("B", typecode)) + self._buffer.write(data) + + def _pack_array_header(self, n): + if n <= 0x0F: + return self._buffer.write(struct.pack("B", 0x90 + n)) + if n <= 0xFFFF: + return self._buffer.write(struct.pack(">BH", 0xDC, n)) + if n <= 0xFFFFFFFF: + return self._buffer.write(struct.pack(">BI", 0xDD, n)) + raise ValueError("Array is too large") + + def _pack_map_header(self, n): + if n <= 0x0F: + return self._buffer.write(struct.pack("B", 0x80 + n)) + if n <= 0xFFFF: + return self._buffer.write(struct.pack(">BH", 0xDE, n)) + if n <= 0xFFFFFFFF: + return self._buffer.write(struct.pack(">BI", 0xDF, n)) + raise ValueError("Dict is too large") + + def _pack_map_pairs(self, n, pairs, nest_limit=DEFAULT_RECURSE_LIMIT): + self._pack_map_header(n) + for k, v in pairs: + self._pack(k, nest_limit - 1) + self._pack(v, nest_limit - 1) + + def _pack_raw_header(self, n): + if n <= 0x1F: + self._buffer.write(struct.pack("B", 0xA0 + n)) + elif self._use_bin_type and n <= 0xFF: + self._buffer.write(struct.pack(">BB", 0xD9, n)) + elif n <= 0xFFFF: + self._buffer.write(struct.pack(">BH", 0xDA, n)) + elif n <= 0xFFFFFFFF: + self._buffer.write(struct.pack(">BI", 0xDB, n)) + else: + raise ValueError("Raw is too large") + + def _pack_bin_header(self, n): + if not self._use_bin_type: + return self._pack_raw_header(n) + elif n <= 0xFF: + return self._buffer.write(struct.pack(">BB", 0xC4, n)) + elif n <= 0xFFFF: + return self._buffer.write(struct.pack(">BH", 0xC5, n)) + elif n <= 0xFFFFFFFF: + return self._buffer.write(struct.pack(">BI", 0xC6, n)) + else: + raise ValueError("Bin is too large") + + def bytes(self): + """Return internal buffer contents as bytes object""" + return self._buffer.getvalue() + + def reset(self): + """Reset internal buffer. + + This method is useful only when autoreset=False. + """ + self._buffer = BytesIO() + + def getbuffer(self): + """Return view of internal buffer.""" + if _USING_STRINGBUILDER: + return memoryview(self.bytes()) + else: + return self._buffer.getbuffer() diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/__init__.py b/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/__init__.py new file mode 100644 index 0000000..d45c22c --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/__init__.py @@ -0,0 +1,15 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +__title__ = "packaging" +__summary__ = "Core utilities for Python packages" +__uri__ = "https://github.com/pypa/packaging" + +__version__ = "25.0" + +__author__ = "Donald Stufft and individual contributors" +__email__ = "donald@stufft.io" + +__license__ = "BSD-2-Clause or Apache-2.0" +__copyright__ = f"2014 {__author__}" diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..b574cb6 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/__pycache__/_elffile.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/__pycache__/_elffile.cpython-312.pyc new file mode 100644 index 0000000..7e09cef Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/__pycache__/_elffile.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/__pycache__/_manylinux.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/__pycache__/_manylinux.cpython-312.pyc new file mode 100644 index 0000000..014a509 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/__pycache__/_manylinux.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/__pycache__/_musllinux.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/__pycache__/_musllinux.cpython-312.pyc new file mode 100644 index 0000000..ae66d2e Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/__pycache__/_musllinux.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/__pycache__/_parser.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/__pycache__/_parser.cpython-312.pyc new file mode 100644 index 0000000..8e152b4 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/__pycache__/_parser.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/__pycache__/_structures.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/__pycache__/_structures.cpython-312.pyc new file mode 100644 index 0000000..671fc4b Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/__pycache__/_structures.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/__pycache__/_tokenizer.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/__pycache__/_tokenizer.cpython-312.pyc new file mode 100644 index 0000000..9d9a533 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/__pycache__/_tokenizer.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/__pycache__/markers.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/__pycache__/markers.cpython-312.pyc new file mode 100644 index 0000000..927738d Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/__pycache__/markers.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/__pycache__/metadata.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/__pycache__/metadata.cpython-312.pyc new file mode 100644 index 0000000..37df4e8 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/__pycache__/metadata.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/__pycache__/requirements.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/__pycache__/requirements.cpython-312.pyc new file mode 100644 index 0000000..813dfa3 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/__pycache__/requirements.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/__pycache__/specifiers.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/__pycache__/specifiers.cpython-312.pyc new file mode 100644 index 0000000..1239368 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/__pycache__/specifiers.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/__pycache__/tags.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/__pycache__/tags.cpython-312.pyc new file mode 100644 index 0000000..798beaf Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/__pycache__/tags.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/__pycache__/utils.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/__pycache__/utils.cpython-312.pyc new file mode 100644 index 0000000..ee782a8 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/__pycache__/utils.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/__pycache__/version.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/__pycache__/version.cpython-312.pyc new file mode 100644 index 0000000..9dcbe82 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/__pycache__/version.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/_elffile.py b/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/_elffile.py new file mode 100644 index 0000000..7a5afc3 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/_elffile.py @@ -0,0 +1,109 @@ +""" +ELF file parser. + +This provides a class ``ELFFile`` that parses an ELF executable in a similar +interface to ``ZipFile``. Only the read interface is implemented. + +Based on: https://gist.github.com/lyssdod/f51579ae8d93c8657a5564aefc2ffbca +ELF header: https://refspecs.linuxfoundation.org/elf/gabi4+/ch4.eheader.html +""" + +from __future__ import annotations + +import enum +import os +import struct +from typing import IO + + +class ELFInvalid(ValueError): + pass + + +class EIClass(enum.IntEnum): + C32 = 1 + C64 = 2 + + +class EIData(enum.IntEnum): + Lsb = 1 + Msb = 2 + + +class EMachine(enum.IntEnum): + I386 = 3 + S390 = 22 + Arm = 40 + X8664 = 62 + AArc64 = 183 + + +class ELFFile: + """ + Representation of an ELF executable. + """ + + def __init__(self, f: IO[bytes]) -> None: + self._f = f + + try: + ident = self._read("16B") + except struct.error as e: + raise ELFInvalid("unable to parse identification") from e + magic = bytes(ident[:4]) + if magic != b"\x7fELF": + raise ELFInvalid(f"invalid magic: {magic!r}") + + self.capacity = ident[4] # Format for program header (bitness). + self.encoding = ident[5] # Data structure encoding (endianness). + + try: + # e_fmt: Format for program header. + # p_fmt: Format for section header. + # p_idx: Indexes to find p_type, p_offset, and p_filesz. + e_fmt, self._p_fmt, self._p_idx = { + (1, 1): ("HHIIIIIHHH", ">IIIIIIII", (0, 1, 4)), # 32-bit MSB. + (2, 1): ("HHIQQQIHHH", ">IIQQQQQQ", (0, 2, 5)), # 64-bit MSB. + }[(self.capacity, self.encoding)] + except KeyError as e: + raise ELFInvalid( + f"unrecognized capacity ({self.capacity}) or encoding ({self.encoding})" + ) from e + + try: + ( + _, + self.machine, # Architecture type. + _, + _, + self._e_phoff, # Offset of program header. + _, + self.flags, # Processor-specific flags. + _, + self._e_phentsize, # Size of section. + self._e_phnum, # Number of sections. + ) = self._read(e_fmt) + except struct.error as e: + raise ELFInvalid("unable to parse machine and section information") from e + + def _read(self, fmt: str) -> tuple[int, ...]: + return struct.unpack(fmt, self._f.read(struct.calcsize(fmt))) + + @property + def interpreter(self) -> str | None: + """ + The path recorded in the ``PT_INTERP`` section header. + """ + for index in range(self._e_phnum): + self._f.seek(self._e_phoff + self._e_phentsize * index) + try: + data = self._read(self._p_fmt) + except struct.error: + continue + if data[self._p_idx[0]] != 3: # Not PT_INTERP. + continue + self._f.seek(data[self._p_idx[1]]) + return os.fsdecode(self._f.read(data[self._p_idx[2]])).strip("\0") + return None diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/_manylinux.py b/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/_manylinux.py new file mode 100644 index 0000000..95f5576 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/_manylinux.py @@ -0,0 +1,262 @@ +from __future__ import annotations + +import collections +import contextlib +import functools +import os +import re +import sys +import warnings +from typing import Generator, Iterator, NamedTuple, Sequence + +from ._elffile import EIClass, EIData, ELFFile, EMachine + +EF_ARM_ABIMASK = 0xFF000000 +EF_ARM_ABI_VER5 = 0x05000000 +EF_ARM_ABI_FLOAT_HARD = 0x00000400 + + +# `os.PathLike` not a generic type until Python 3.9, so sticking with `str` +# as the type for `path` until then. +@contextlib.contextmanager +def _parse_elf(path: str) -> Generator[ELFFile | None, None, None]: + try: + with open(path, "rb") as f: + yield ELFFile(f) + except (OSError, TypeError, ValueError): + yield None + + +def _is_linux_armhf(executable: str) -> bool: + # hard-float ABI can be detected from the ELF header of the running + # process + # https://static.docs.arm.com/ihi0044/g/aaelf32.pdf + with _parse_elf(executable) as f: + return ( + f is not None + and f.capacity == EIClass.C32 + and f.encoding == EIData.Lsb + and f.machine == EMachine.Arm + and f.flags & EF_ARM_ABIMASK == EF_ARM_ABI_VER5 + and f.flags & EF_ARM_ABI_FLOAT_HARD == EF_ARM_ABI_FLOAT_HARD + ) + + +def _is_linux_i686(executable: str) -> bool: + with _parse_elf(executable) as f: + return ( + f is not None + and f.capacity == EIClass.C32 + and f.encoding == EIData.Lsb + and f.machine == EMachine.I386 + ) + + +def _have_compatible_abi(executable: str, archs: Sequence[str]) -> bool: + if "armv7l" in archs: + return _is_linux_armhf(executable) + if "i686" in archs: + return _is_linux_i686(executable) + allowed_archs = { + "x86_64", + "aarch64", + "ppc64", + "ppc64le", + "s390x", + "loongarch64", + "riscv64", + } + return any(arch in allowed_archs for arch in archs) + + +# If glibc ever changes its major version, we need to know what the last +# minor version was, so we can build the complete list of all versions. +# For now, guess what the highest minor version might be, assume it will +# be 50 for testing. Once this actually happens, update the dictionary +# with the actual value. +_LAST_GLIBC_MINOR: dict[int, int] = collections.defaultdict(lambda: 50) + + +class _GLibCVersion(NamedTuple): + major: int + minor: int + + +def _glibc_version_string_confstr() -> str | None: + """ + Primary implementation of glibc_version_string using os.confstr. + """ + # os.confstr is quite a bit faster than ctypes.DLL. It's also less likely + # to be broken or missing. This strategy is used in the standard library + # platform module. + # https://github.com/python/cpython/blob/fcf1d003bf4f0100c/Lib/platform.py#L175-L183 + try: + # Should be a string like "glibc 2.17". + version_string: str | None = os.confstr("CS_GNU_LIBC_VERSION") + assert version_string is not None + _, version = version_string.rsplit() + except (AssertionError, AttributeError, OSError, ValueError): + # os.confstr() or CS_GNU_LIBC_VERSION not available (or a bad value)... + return None + return version + + +def _glibc_version_string_ctypes() -> str | None: + """ + Fallback implementation of glibc_version_string using ctypes. + """ + try: + import ctypes + except ImportError: + return None + + # ctypes.CDLL(None) internally calls dlopen(NULL), and as the dlopen + # manpage says, "If filename is NULL, then the returned handle is for the + # main program". This way we can let the linker do the work to figure out + # which libc our process is actually using. + # + # We must also handle the special case where the executable is not a + # dynamically linked executable. This can occur when using musl libc, + # for example. In this situation, dlopen() will error, leading to an + # OSError. Interestingly, at least in the case of musl, there is no + # errno set on the OSError. The single string argument used to construct + # OSError comes from libc itself and is therefore not portable to + # hard code here. In any case, failure to call dlopen() means we + # can proceed, so we bail on our attempt. + try: + process_namespace = ctypes.CDLL(None) + except OSError: + return None + + try: + gnu_get_libc_version = process_namespace.gnu_get_libc_version + except AttributeError: + # Symbol doesn't exist -> therefore, we are not linked to + # glibc. + return None + + # Call gnu_get_libc_version, which returns a string like "2.5" + gnu_get_libc_version.restype = ctypes.c_char_p + version_str: str = gnu_get_libc_version() + # py2 / py3 compatibility: + if not isinstance(version_str, str): + version_str = version_str.decode("ascii") + + return version_str + + +def _glibc_version_string() -> str | None: + """Returns glibc version string, or None if not using glibc.""" + return _glibc_version_string_confstr() or _glibc_version_string_ctypes() + + +def _parse_glibc_version(version_str: str) -> tuple[int, int]: + """Parse glibc version. + + We use a regexp instead of str.split because we want to discard any + random junk that might come after the minor version -- this might happen + in patched/forked versions of glibc (e.g. Linaro's version of glibc + uses version strings like "2.20-2014.11"). See gh-3588. + """ + m = re.match(r"(?P[0-9]+)\.(?P[0-9]+)", version_str) + if not m: + warnings.warn( + f"Expected glibc version with 2 components major.minor, got: {version_str}", + RuntimeWarning, + stacklevel=2, + ) + return -1, -1 + return int(m.group("major")), int(m.group("minor")) + + +@functools.lru_cache +def _get_glibc_version() -> tuple[int, int]: + version_str = _glibc_version_string() + if version_str is None: + return (-1, -1) + return _parse_glibc_version(version_str) + + +# From PEP 513, PEP 600 +def _is_compatible(arch: str, version: _GLibCVersion) -> bool: + sys_glibc = _get_glibc_version() + if sys_glibc < version: + return False + # Check for presence of _manylinux module. + try: + import _manylinux + except ImportError: + return True + if hasattr(_manylinux, "manylinux_compatible"): + result = _manylinux.manylinux_compatible(version[0], version[1], arch) + if result is not None: + return bool(result) + return True + if version == _GLibCVersion(2, 5): + if hasattr(_manylinux, "manylinux1_compatible"): + return bool(_manylinux.manylinux1_compatible) + if version == _GLibCVersion(2, 12): + if hasattr(_manylinux, "manylinux2010_compatible"): + return bool(_manylinux.manylinux2010_compatible) + if version == _GLibCVersion(2, 17): + if hasattr(_manylinux, "manylinux2014_compatible"): + return bool(_manylinux.manylinux2014_compatible) + return True + + +_LEGACY_MANYLINUX_MAP = { + # CentOS 7 w/ glibc 2.17 (PEP 599) + (2, 17): "manylinux2014", + # CentOS 6 w/ glibc 2.12 (PEP 571) + (2, 12): "manylinux2010", + # CentOS 5 w/ glibc 2.5 (PEP 513) + (2, 5): "manylinux1", +} + + +def platform_tags(archs: Sequence[str]) -> Iterator[str]: + """Generate manylinux tags compatible to the current platform. + + :param archs: Sequence of compatible architectures. + The first one shall be the closest to the actual architecture and be the part of + platform tag after the ``linux_`` prefix, e.g. ``x86_64``. + The ``linux_`` prefix is assumed as a prerequisite for the current platform to + be manylinux-compatible. + + :returns: An iterator of compatible manylinux tags. + """ + if not _have_compatible_abi(sys.executable, archs): + return + # Oldest glibc to be supported regardless of architecture is (2, 17). + too_old_glibc2 = _GLibCVersion(2, 16) + if set(archs) & {"x86_64", "i686"}: + # On x86/i686 also oldest glibc to be supported is (2, 5). + too_old_glibc2 = _GLibCVersion(2, 4) + current_glibc = _GLibCVersion(*_get_glibc_version()) + glibc_max_list = [current_glibc] + # We can assume compatibility across glibc major versions. + # https://sourceware.org/bugzilla/show_bug.cgi?id=24636 + # + # Build a list of maximum glibc versions so that we can + # output the canonical list of all glibc from current_glibc + # down to too_old_glibc2, including all intermediary versions. + for glibc_major in range(current_glibc.major - 1, 1, -1): + glibc_minor = _LAST_GLIBC_MINOR[glibc_major] + glibc_max_list.append(_GLibCVersion(glibc_major, glibc_minor)) + for arch in archs: + for glibc_max in glibc_max_list: + if glibc_max.major == too_old_glibc2.major: + min_minor = too_old_glibc2.minor + else: + # For other glibc major versions oldest supported is (x, 0). + min_minor = -1 + for glibc_minor in range(glibc_max.minor, min_minor, -1): + glibc_version = _GLibCVersion(glibc_max.major, glibc_minor) + tag = "manylinux_{}_{}".format(*glibc_version) + if _is_compatible(arch, glibc_version): + yield f"{tag}_{arch}" + # Handle the legacy manylinux1, manylinux2010, manylinux2014 tags. + if glibc_version in _LEGACY_MANYLINUX_MAP: + legacy_tag = _LEGACY_MANYLINUX_MAP[glibc_version] + if _is_compatible(arch, glibc_version): + yield f"{legacy_tag}_{arch}" diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/_musllinux.py b/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/_musllinux.py new file mode 100644 index 0000000..d2bf30b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/_musllinux.py @@ -0,0 +1,85 @@ +"""PEP 656 support. + +This module implements logic to detect if the currently running Python is +linked against musl, and what musl version is used. +""" + +from __future__ import annotations + +import functools +import re +import subprocess +import sys +from typing import Iterator, NamedTuple, Sequence + +from ._elffile import ELFFile + + +class _MuslVersion(NamedTuple): + major: int + minor: int + + +def _parse_musl_version(output: str) -> _MuslVersion | None: + lines = [n for n in (n.strip() for n in output.splitlines()) if n] + if len(lines) < 2 or lines[0][:4] != "musl": + return None + m = re.match(r"Version (\d+)\.(\d+)", lines[1]) + if not m: + return None + return _MuslVersion(major=int(m.group(1)), minor=int(m.group(2))) + + +@functools.lru_cache +def _get_musl_version(executable: str) -> _MuslVersion | None: + """Detect currently-running musl runtime version. + + This is done by checking the specified executable's dynamic linking + information, and invoking the loader to parse its output for a version + string. If the loader is musl, the output would be something like:: + + musl libc (x86_64) + Version 1.2.2 + Dynamic Program Loader + """ + try: + with open(executable, "rb") as f: + ld = ELFFile(f).interpreter + except (OSError, TypeError, ValueError): + return None + if ld is None or "musl" not in ld: + return None + proc = subprocess.run([ld], stderr=subprocess.PIPE, text=True) + return _parse_musl_version(proc.stderr) + + +def platform_tags(archs: Sequence[str]) -> Iterator[str]: + """Generate musllinux tags compatible to the current platform. + + :param archs: Sequence of compatible architectures. + The first one shall be the closest to the actual architecture and be the part of + platform tag after the ``linux_`` prefix, e.g. ``x86_64``. + The ``linux_`` prefix is assumed as a prerequisite for the current platform to + be musllinux-compatible. + + :returns: An iterator of compatible musllinux tags. + """ + sys_musl = _get_musl_version(sys.executable) + if sys_musl is None: # Python not dynamically linked against musl. + return + for arch in archs: + for minor in range(sys_musl.minor, -1, -1): + yield f"musllinux_{sys_musl.major}_{minor}_{arch}" + + +if __name__ == "__main__": # pragma: no cover + import sysconfig + + plat = sysconfig.get_platform() + assert plat.startswith("linux-"), "not linux" + + print("plat:", plat) + print("musl:", _get_musl_version(sys.executable)) + print("tags:", end=" ") + for t in platform_tags(re.sub(r"[.-]", "_", plat.split("-", 1)[-1])): + print(t, end="\n ") diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/_parser.py b/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/_parser.py new file mode 100644 index 0000000..0007c0a --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/_parser.py @@ -0,0 +1,353 @@ +"""Handwritten parser of dependency specifiers. + +The docstring for each __parse_* function contains EBNF-inspired grammar representing +the implementation. +""" + +from __future__ import annotations + +import ast +from typing import NamedTuple, Sequence, Tuple, Union + +from ._tokenizer import DEFAULT_RULES, Tokenizer + + +class Node: + def __init__(self, value: str) -> None: + self.value = value + + def __str__(self) -> str: + return self.value + + def __repr__(self) -> str: + return f"<{self.__class__.__name__}('{self}')>" + + def serialize(self) -> str: + raise NotImplementedError + + +class Variable(Node): + def serialize(self) -> str: + return str(self) + + +class Value(Node): + def serialize(self) -> str: + return f'"{self}"' + + +class Op(Node): + def serialize(self) -> str: + return str(self) + + +MarkerVar = Union[Variable, Value] +MarkerItem = Tuple[MarkerVar, Op, MarkerVar] +MarkerAtom = Union[MarkerItem, Sequence["MarkerAtom"]] +MarkerList = Sequence[Union["MarkerList", MarkerAtom, str]] + + +class ParsedRequirement(NamedTuple): + name: str + url: str + extras: list[str] + specifier: str + marker: MarkerList | None + + +# -------------------------------------------------------------------------------------- +# Recursive descent parser for dependency specifier +# -------------------------------------------------------------------------------------- +def parse_requirement(source: str) -> ParsedRequirement: + return _parse_requirement(Tokenizer(source, rules=DEFAULT_RULES)) + + +def _parse_requirement(tokenizer: Tokenizer) -> ParsedRequirement: + """ + requirement = WS? IDENTIFIER WS? extras WS? requirement_details + """ + tokenizer.consume("WS") + + name_token = tokenizer.expect( + "IDENTIFIER", expected="package name at the start of dependency specifier" + ) + name = name_token.text + tokenizer.consume("WS") + + extras = _parse_extras(tokenizer) + tokenizer.consume("WS") + + url, specifier, marker = _parse_requirement_details(tokenizer) + tokenizer.expect("END", expected="end of dependency specifier") + + return ParsedRequirement(name, url, extras, specifier, marker) + + +def _parse_requirement_details( + tokenizer: Tokenizer, +) -> tuple[str, str, MarkerList | None]: + """ + requirement_details = AT URL (WS requirement_marker?)? + | specifier WS? (requirement_marker)? + """ + + specifier = "" + url = "" + marker = None + + if tokenizer.check("AT"): + tokenizer.read() + tokenizer.consume("WS") + + url_start = tokenizer.position + url = tokenizer.expect("URL", expected="URL after @").text + if tokenizer.check("END", peek=True): + return (url, specifier, marker) + + tokenizer.expect("WS", expected="whitespace after URL") + + # The input might end after whitespace. + if tokenizer.check("END", peek=True): + return (url, specifier, marker) + + marker = _parse_requirement_marker( + tokenizer, span_start=url_start, after="URL and whitespace" + ) + else: + specifier_start = tokenizer.position + specifier = _parse_specifier(tokenizer) + tokenizer.consume("WS") + + if tokenizer.check("END", peek=True): + return (url, specifier, marker) + + marker = _parse_requirement_marker( + tokenizer, + span_start=specifier_start, + after=( + "version specifier" + if specifier + else "name and no valid version specifier" + ), + ) + + return (url, specifier, marker) + + +def _parse_requirement_marker( + tokenizer: Tokenizer, *, span_start: int, after: str +) -> MarkerList: + """ + requirement_marker = SEMICOLON marker WS? + """ + + if not tokenizer.check("SEMICOLON"): + tokenizer.raise_syntax_error( + f"Expected end or semicolon (after {after})", + span_start=span_start, + ) + tokenizer.read() + + marker = _parse_marker(tokenizer) + tokenizer.consume("WS") + + return marker + + +def _parse_extras(tokenizer: Tokenizer) -> list[str]: + """ + extras = (LEFT_BRACKET wsp* extras_list? wsp* RIGHT_BRACKET)? + """ + if not tokenizer.check("LEFT_BRACKET", peek=True): + return [] + + with tokenizer.enclosing_tokens( + "LEFT_BRACKET", + "RIGHT_BRACKET", + around="extras", + ): + tokenizer.consume("WS") + extras = _parse_extras_list(tokenizer) + tokenizer.consume("WS") + + return extras + + +def _parse_extras_list(tokenizer: Tokenizer) -> list[str]: + """ + extras_list = identifier (wsp* ',' wsp* identifier)* + """ + extras: list[str] = [] + + if not tokenizer.check("IDENTIFIER"): + return extras + + extras.append(tokenizer.read().text) + + while True: + tokenizer.consume("WS") + if tokenizer.check("IDENTIFIER", peek=True): + tokenizer.raise_syntax_error("Expected comma between extra names") + elif not tokenizer.check("COMMA"): + break + + tokenizer.read() + tokenizer.consume("WS") + + extra_token = tokenizer.expect("IDENTIFIER", expected="extra name after comma") + extras.append(extra_token.text) + + return extras + + +def _parse_specifier(tokenizer: Tokenizer) -> str: + """ + specifier = LEFT_PARENTHESIS WS? version_many WS? RIGHT_PARENTHESIS + | WS? version_many WS? + """ + with tokenizer.enclosing_tokens( + "LEFT_PARENTHESIS", + "RIGHT_PARENTHESIS", + around="version specifier", + ): + tokenizer.consume("WS") + parsed_specifiers = _parse_version_many(tokenizer) + tokenizer.consume("WS") + + return parsed_specifiers + + +def _parse_version_many(tokenizer: Tokenizer) -> str: + """ + version_many = (SPECIFIER (WS? COMMA WS? SPECIFIER)*)? + """ + parsed_specifiers = "" + while tokenizer.check("SPECIFIER"): + span_start = tokenizer.position + parsed_specifiers += tokenizer.read().text + if tokenizer.check("VERSION_PREFIX_TRAIL", peek=True): + tokenizer.raise_syntax_error( + ".* suffix can only be used with `==` or `!=` operators", + span_start=span_start, + span_end=tokenizer.position + 1, + ) + if tokenizer.check("VERSION_LOCAL_LABEL_TRAIL", peek=True): + tokenizer.raise_syntax_error( + "Local version label can only be used with `==` or `!=` operators", + span_start=span_start, + span_end=tokenizer.position, + ) + tokenizer.consume("WS") + if not tokenizer.check("COMMA"): + break + parsed_specifiers += tokenizer.read().text + tokenizer.consume("WS") + + return parsed_specifiers + + +# -------------------------------------------------------------------------------------- +# Recursive descent parser for marker expression +# -------------------------------------------------------------------------------------- +def parse_marker(source: str) -> MarkerList: + return _parse_full_marker(Tokenizer(source, rules=DEFAULT_RULES)) + + +def _parse_full_marker(tokenizer: Tokenizer) -> MarkerList: + retval = _parse_marker(tokenizer) + tokenizer.expect("END", expected="end of marker expression") + return retval + + +def _parse_marker(tokenizer: Tokenizer) -> MarkerList: + """ + marker = marker_atom (BOOLOP marker_atom)+ + """ + expression = [_parse_marker_atom(tokenizer)] + while tokenizer.check("BOOLOP"): + token = tokenizer.read() + expr_right = _parse_marker_atom(tokenizer) + expression.extend((token.text, expr_right)) + return expression + + +def _parse_marker_atom(tokenizer: Tokenizer) -> MarkerAtom: + """ + marker_atom = WS? LEFT_PARENTHESIS WS? marker WS? RIGHT_PARENTHESIS WS? + | WS? marker_item WS? + """ + + tokenizer.consume("WS") + if tokenizer.check("LEFT_PARENTHESIS", peek=True): + with tokenizer.enclosing_tokens( + "LEFT_PARENTHESIS", + "RIGHT_PARENTHESIS", + around="marker expression", + ): + tokenizer.consume("WS") + marker: MarkerAtom = _parse_marker(tokenizer) + tokenizer.consume("WS") + else: + marker = _parse_marker_item(tokenizer) + tokenizer.consume("WS") + return marker + + +def _parse_marker_item(tokenizer: Tokenizer) -> MarkerItem: + """ + marker_item = WS? marker_var WS? marker_op WS? marker_var WS? + """ + tokenizer.consume("WS") + marker_var_left = _parse_marker_var(tokenizer) + tokenizer.consume("WS") + marker_op = _parse_marker_op(tokenizer) + tokenizer.consume("WS") + marker_var_right = _parse_marker_var(tokenizer) + tokenizer.consume("WS") + return (marker_var_left, marker_op, marker_var_right) + + +def _parse_marker_var(tokenizer: Tokenizer) -> MarkerVar: + """ + marker_var = VARIABLE | QUOTED_STRING + """ + if tokenizer.check("VARIABLE"): + return process_env_var(tokenizer.read().text.replace(".", "_")) + elif tokenizer.check("QUOTED_STRING"): + return process_python_str(tokenizer.read().text) + else: + tokenizer.raise_syntax_error( + message="Expected a marker variable or quoted string" + ) + + +def process_env_var(env_var: str) -> Variable: + if env_var in ("platform_python_implementation", "python_implementation"): + return Variable("platform_python_implementation") + else: + return Variable(env_var) + + +def process_python_str(python_str: str) -> Value: + value = ast.literal_eval(python_str) + return Value(str(value)) + + +def _parse_marker_op(tokenizer: Tokenizer) -> Op: + """ + marker_op = IN | NOT IN | OP + """ + if tokenizer.check("IN"): + tokenizer.read() + return Op("in") + elif tokenizer.check("NOT"): + tokenizer.read() + tokenizer.expect("WS", expected="whitespace after 'not'") + tokenizer.expect("IN", expected="'in' after 'not'") + return Op("not in") + elif tokenizer.check("OP"): + return Op(tokenizer.read().text) + else: + return tokenizer.raise_syntax_error( + "Expected marker operator, one of <=, <, !=, ==, >=, >, ~=, ===, in, not in" + ) diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/_structures.py b/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/_structures.py new file mode 100644 index 0000000..90a6465 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/_structures.py @@ -0,0 +1,61 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + + +class InfinityType: + def __repr__(self) -> str: + return "Infinity" + + def __hash__(self) -> int: + return hash(repr(self)) + + def __lt__(self, other: object) -> bool: + return False + + def __le__(self, other: object) -> bool: + return False + + def __eq__(self, other: object) -> bool: + return isinstance(other, self.__class__) + + def __gt__(self, other: object) -> bool: + return True + + def __ge__(self, other: object) -> bool: + return True + + def __neg__(self: object) -> "NegativeInfinityType": + return NegativeInfinity + + +Infinity = InfinityType() + + +class NegativeInfinityType: + def __repr__(self) -> str: + return "-Infinity" + + def __hash__(self) -> int: + return hash(repr(self)) + + def __lt__(self, other: object) -> bool: + return True + + def __le__(self, other: object) -> bool: + return True + + def __eq__(self, other: object) -> bool: + return isinstance(other, self.__class__) + + def __gt__(self, other: object) -> bool: + return False + + def __ge__(self, other: object) -> bool: + return False + + def __neg__(self: object) -> InfinityType: + return Infinity + + +NegativeInfinity = NegativeInfinityType() diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/_tokenizer.py b/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/_tokenizer.py new file mode 100644 index 0000000..d28a9b6 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/_tokenizer.py @@ -0,0 +1,195 @@ +from __future__ import annotations + +import contextlib +import re +from dataclasses import dataclass +from typing import Iterator, NoReturn + +from .specifiers import Specifier + + +@dataclass +class Token: + name: str + text: str + position: int + + +class ParserSyntaxError(Exception): + """The provided source text could not be parsed correctly.""" + + def __init__( + self, + message: str, + *, + source: str, + span: tuple[int, int], + ) -> None: + self.span = span + self.message = message + self.source = source + + super().__init__() + + def __str__(self) -> str: + marker = " " * self.span[0] + "~" * (self.span[1] - self.span[0]) + "^" + return "\n ".join([self.message, self.source, marker]) + + +DEFAULT_RULES: dict[str, str | re.Pattern[str]] = { + "LEFT_PARENTHESIS": r"\(", + "RIGHT_PARENTHESIS": r"\)", + "LEFT_BRACKET": r"\[", + "RIGHT_BRACKET": r"\]", + "SEMICOLON": r";", + "COMMA": r",", + "QUOTED_STRING": re.compile( + r""" + ( + ('[^']*') + | + ("[^"]*") + ) + """, + re.VERBOSE, + ), + "OP": r"(===|==|~=|!=|<=|>=|<|>)", + "BOOLOP": r"\b(or|and)\b", + "IN": r"\bin\b", + "NOT": r"\bnot\b", + "VARIABLE": re.compile( + r""" + \b( + python_version + |python_full_version + |os[._]name + |sys[._]platform + |platform_(release|system) + |platform[._](version|machine|python_implementation) + |python_implementation + |implementation_(name|version) + |extras? + |dependency_groups + )\b + """, + re.VERBOSE, + ), + "SPECIFIER": re.compile( + Specifier._operator_regex_str + Specifier._version_regex_str, + re.VERBOSE | re.IGNORECASE, + ), + "AT": r"\@", + "URL": r"[^ \t]+", + "IDENTIFIER": r"\b[a-zA-Z0-9][a-zA-Z0-9._-]*\b", + "VERSION_PREFIX_TRAIL": r"\.\*", + "VERSION_LOCAL_LABEL_TRAIL": r"\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*", + "WS": r"[ \t]+", + "END": r"$", +} + + +class Tokenizer: + """Context-sensitive token parsing. + + Provides methods to examine the input stream to check whether the next token + matches. + """ + + def __init__( + self, + source: str, + *, + rules: dict[str, str | re.Pattern[str]], + ) -> None: + self.source = source + self.rules: dict[str, re.Pattern[str]] = { + name: re.compile(pattern) for name, pattern in rules.items() + } + self.next_token: Token | None = None + self.position = 0 + + def consume(self, name: str) -> None: + """Move beyond provided token name, if at current position.""" + if self.check(name): + self.read() + + def check(self, name: str, *, peek: bool = False) -> bool: + """Check whether the next token has the provided name. + + By default, if the check succeeds, the token *must* be read before + another check. If `peek` is set to `True`, the token is not loaded and + would need to be checked again. + """ + assert self.next_token is None, ( + f"Cannot check for {name!r}, already have {self.next_token!r}" + ) + assert name in self.rules, f"Unknown token name: {name!r}" + + expression = self.rules[name] + + match = expression.match(self.source, self.position) + if match is None: + return False + if not peek: + self.next_token = Token(name, match[0], self.position) + return True + + def expect(self, name: str, *, expected: str) -> Token: + """Expect a certain token name next, failing with a syntax error otherwise. + + The token is *not* read. + """ + if not self.check(name): + raise self.raise_syntax_error(f"Expected {expected}") + return self.read() + + def read(self) -> Token: + """Consume the next token and return it.""" + token = self.next_token + assert token is not None + + self.position += len(token.text) + self.next_token = None + + return token + + def raise_syntax_error( + self, + message: str, + *, + span_start: int | None = None, + span_end: int | None = None, + ) -> NoReturn: + """Raise ParserSyntaxError at the given position.""" + span = ( + self.position if span_start is None else span_start, + self.position if span_end is None else span_end, + ) + raise ParserSyntaxError( + message, + source=self.source, + span=span, + ) + + @contextlib.contextmanager + def enclosing_tokens( + self, open_token: str, close_token: str, *, around: str + ) -> Iterator[None]: + if self.check(open_token): + open_position = self.position + self.read() + else: + open_position = None + + yield + + if open_position is None: + return + + if not self.check(close_token): + self.raise_syntax_error( + f"Expected matching {close_token} for {open_token}, after {around}", + span_start=open_position, + ) + + self.read() diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/licenses/__init__.py b/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/licenses/__init__.py new file mode 100644 index 0000000..031f277 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/licenses/__init__.py @@ -0,0 +1,145 @@ +####################################################################################### +# +# Adapted from: +# https://github.com/pypa/hatch/blob/5352e44/backend/src/hatchling/licenses/parse.py +# +# MIT License +# +# Copyright (c) 2017-present Ofek Lev +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this +# software and associated documentation files (the "Software"), to deal in the Software +# without restriction, including without limitation the rights to use, copy, modify, +# merge, publish, distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to the following +# conditions: +# +# The above copyright notice and this permission notice shall be included in all copies +# or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, +# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF +# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE +# OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +# +# +# With additional allowance of arbitrary `LicenseRef-` identifiers, not just +# `LicenseRef-Public-Domain` and `LicenseRef-Proprietary`. +# +####################################################################################### +from __future__ import annotations + +import re +from typing import NewType, cast + +from pip._vendor.packaging.licenses._spdx import EXCEPTIONS, LICENSES + +__all__ = [ + "InvalidLicenseExpression", + "NormalizedLicenseExpression", + "canonicalize_license_expression", +] + +license_ref_allowed = re.compile("^[A-Za-z0-9.-]*$") + +NormalizedLicenseExpression = NewType("NormalizedLicenseExpression", str) + + +class InvalidLicenseExpression(ValueError): + """Raised when a license-expression string is invalid + + >>> canonicalize_license_expression("invalid") + Traceback (most recent call last): + ... + packaging.licenses.InvalidLicenseExpression: Invalid license expression: 'invalid' + """ + + +def canonicalize_license_expression( + raw_license_expression: str, +) -> NormalizedLicenseExpression: + if not raw_license_expression: + message = f"Invalid license expression: {raw_license_expression!r}" + raise InvalidLicenseExpression(message) + + # Pad any parentheses so tokenization can be achieved by merely splitting on + # whitespace. + license_expression = raw_license_expression.replace("(", " ( ").replace(")", " ) ") + licenseref_prefix = "LicenseRef-" + license_refs = { + ref.lower(): "LicenseRef-" + ref[len(licenseref_prefix) :] + for ref in license_expression.split() + if ref.lower().startswith(licenseref_prefix.lower()) + } + + # Normalize to lower case so we can look up licenses/exceptions + # and so boolean operators are Python-compatible. + license_expression = license_expression.lower() + + tokens = license_expression.split() + + # Rather than implementing boolean logic, we create an expression that Python can + # parse. Everything that is not involved with the grammar itself is treated as + # `False` and the expression should evaluate as such. + python_tokens = [] + for token in tokens: + if token not in {"or", "and", "with", "(", ")"}: + python_tokens.append("False") + elif token == "with": + python_tokens.append("or") + elif token == "(" and python_tokens and python_tokens[-1] not in {"or", "and"}: + message = f"Invalid license expression: {raw_license_expression!r}" + raise InvalidLicenseExpression(message) + else: + python_tokens.append(token) + + python_expression = " ".join(python_tokens) + try: + invalid = eval(python_expression, globals(), locals()) + except Exception: + invalid = True + + if invalid is not False: + message = f"Invalid license expression: {raw_license_expression!r}" + raise InvalidLicenseExpression(message) from None + + # Take a final pass to check for unknown licenses/exceptions. + normalized_tokens = [] + for token in tokens: + if token in {"or", "and", "with", "(", ")"}: + normalized_tokens.append(token.upper()) + continue + + if normalized_tokens and normalized_tokens[-1] == "WITH": + if token not in EXCEPTIONS: + message = f"Unknown license exception: {token!r}" + raise InvalidLicenseExpression(message) + + normalized_tokens.append(EXCEPTIONS[token]["id"]) + else: + if token.endswith("+"): + final_token = token[:-1] + suffix = "+" + else: + final_token = token + suffix = "" + + if final_token.startswith("licenseref-"): + if not license_ref_allowed.match(final_token): + message = f"Invalid licenseref: {final_token!r}" + raise InvalidLicenseExpression(message) + normalized_tokens.append(license_refs[final_token] + suffix) + else: + if final_token not in LICENSES: + message = f"Unknown license: {final_token!r}" + raise InvalidLicenseExpression(message) + normalized_tokens.append(LICENSES[final_token]["id"] + suffix) + + normalized_expression = " ".join(normalized_tokens) + + return cast( + NormalizedLicenseExpression, + normalized_expression.replace("( ", "(").replace(" )", ")"), + ) diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/licenses/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/licenses/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..fbefe10 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/licenses/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/licenses/__pycache__/_spdx.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/licenses/__pycache__/_spdx.cpython-312.pyc new file mode 100644 index 0000000..952cfb5 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/licenses/__pycache__/_spdx.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/licenses/_spdx.py b/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/licenses/_spdx.py new file mode 100644 index 0000000..eac2227 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/licenses/_spdx.py @@ -0,0 +1,759 @@ + +from __future__ import annotations + +from typing import TypedDict + +class SPDXLicense(TypedDict): + id: str + deprecated: bool + +class SPDXException(TypedDict): + id: str + deprecated: bool + + +VERSION = '3.25.0' + +LICENSES: dict[str, SPDXLicense] = { + '0bsd': {'id': '0BSD', 'deprecated': False}, + '3d-slicer-1.0': {'id': '3D-Slicer-1.0', 'deprecated': False}, + 'aal': {'id': 'AAL', 'deprecated': False}, + 'abstyles': {'id': 'Abstyles', 'deprecated': False}, + 'adacore-doc': {'id': 'AdaCore-doc', 'deprecated': False}, + 'adobe-2006': {'id': 'Adobe-2006', 'deprecated': False}, + 'adobe-display-postscript': {'id': 'Adobe-Display-PostScript', 'deprecated': False}, + 'adobe-glyph': {'id': 'Adobe-Glyph', 'deprecated': False}, + 'adobe-utopia': {'id': 'Adobe-Utopia', 'deprecated': False}, + 'adsl': {'id': 'ADSL', 'deprecated': False}, + 'afl-1.1': {'id': 'AFL-1.1', 'deprecated': False}, + 'afl-1.2': {'id': 'AFL-1.2', 'deprecated': False}, + 'afl-2.0': {'id': 'AFL-2.0', 'deprecated': False}, + 'afl-2.1': {'id': 'AFL-2.1', 'deprecated': False}, + 'afl-3.0': {'id': 'AFL-3.0', 'deprecated': False}, + 'afmparse': {'id': 'Afmparse', 'deprecated': False}, + 'agpl-1.0': {'id': 'AGPL-1.0', 'deprecated': True}, + 'agpl-1.0-only': {'id': 'AGPL-1.0-only', 'deprecated': False}, + 'agpl-1.0-or-later': {'id': 'AGPL-1.0-or-later', 'deprecated': False}, + 'agpl-3.0': {'id': 'AGPL-3.0', 'deprecated': True}, + 'agpl-3.0-only': {'id': 'AGPL-3.0-only', 'deprecated': False}, + 'agpl-3.0-or-later': {'id': 'AGPL-3.0-or-later', 'deprecated': False}, + 'aladdin': {'id': 'Aladdin', 'deprecated': False}, + 'amd-newlib': {'id': 'AMD-newlib', 'deprecated': False}, + 'amdplpa': {'id': 'AMDPLPA', 'deprecated': False}, + 'aml': {'id': 'AML', 'deprecated': False}, + 'aml-glslang': {'id': 'AML-glslang', 'deprecated': False}, + 'ampas': {'id': 'AMPAS', 'deprecated': False}, + 'antlr-pd': {'id': 'ANTLR-PD', 'deprecated': False}, + 'antlr-pd-fallback': {'id': 'ANTLR-PD-fallback', 'deprecated': False}, + 'any-osi': {'id': 'any-OSI', 'deprecated': False}, + 'apache-1.0': {'id': 'Apache-1.0', 'deprecated': False}, + 'apache-1.1': {'id': 'Apache-1.1', 'deprecated': False}, + 'apache-2.0': {'id': 'Apache-2.0', 'deprecated': False}, + 'apafml': {'id': 'APAFML', 'deprecated': False}, + 'apl-1.0': {'id': 'APL-1.0', 'deprecated': False}, + 'app-s2p': {'id': 'App-s2p', 'deprecated': False}, + 'apsl-1.0': {'id': 'APSL-1.0', 'deprecated': False}, + 'apsl-1.1': {'id': 'APSL-1.1', 'deprecated': False}, + 'apsl-1.2': {'id': 'APSL-1.2', 'deprecated': False}, + 'apsl-2.0': {'id': 'APSL-2.0', 'deprecated': False}, + 'arphic-1999': {'id': 'Arphic-1999', 'deprecated': False}, + 'artistic-1.0': {'id': 'Artistic-1.0', 'deprecated': False}, + 'artistic-1.0-cl8': {'id': 'Artistic-1.0-cl8', 'deprecated': False}, + 'artistic-1.0-perl': {'id': 'Artistic-1.0-Perl', 'deprecated': False}, + 'artistic-2.0': {'id': 'Artistic-2.0', 'deprecated': False}, + 'aswf-digital-assets-1.0': {'id': 'ASWF-Digital-Assets-1.0', 'deprecated': False}, + 'aswf-digital-assets-1.1': {'id': 'ASWF-Digital-Assets-1.1', 'deprecated': False}, + 'baekmuk': {'id': 'Baekmuk', 'deprecated': False}, + 'bahyph': {'id': 'Bahyph', 'deprecated': False}, + 'barr': {'id': 'Barr', 'deprecated': False}, + 'bcrypt-solar-designer': {'id': 'bcrypt-Solar-Designer', 'deprecated': False}, + 'beerware': {'id': 'Beerware', 'deprecated': False}, + 'bitstream-charter': {'id': 'Bitstream-Charter', 'deprecated': False}, + 'bitstream-vera': {'id': 'Bitstream-Vera', 'deprecated': False}, + 'bittorrent-1.0': {'id': 'BitTorrent-1.0', 'deprecated': False}, + 'bittorrent-1.1': {'id': 'BitTorrent-1.1', 'deprecated': False}, + 'blessing': {'id': 'blessing', 'deprecated': False}, + 'blueoak-1.0.0': {'id': 'BlueOak-1.0.0', 'deprecated': False}, + 'boehm-gc': {'id': 'Boehm-GC', 'deprecated': False}, + 'borceux': {'id': 'Borceux', 'deprecated': False}, + 'brian-gladman-2-clause': {'id': 'Brian-Gladman-2-Clause', 'deprecated': False}, + 'brian-gladman-3-clause': {'id': 'Brian-Gladman-3-Clause', 'deprecated': False}, + 'bsd-1-clause': {'id': 'BSD-1-Clause', 'deprecated': False}, + 'bsd-2-clause': {'id': 'BSD-2-Clause', 'deprecated': False}, + 'bsd-2-clause-darwin': {'id': 'BSD-2-Clause-Darwin', 'deprecated': False}, + 'bsd-2-clause-first-lines': {'id': 'BSD-2-Clause-first-lines', 'deprecated': False}, + 'bsd-2-clause-freebsd': {'id': 'BSD-2-Clause-FreeBSD', 'deprecated': True}, + 'bsd-2-clause-netbsd': {'id': 'BSD-2-Clause-NetBSD', 'deprecated': True}, + 'bsd-2-clause-patent': {'id': 'BSD-2-Clause-Patent', 'deprecated': False}, + 'bsd-2-clause-views': {'id': 'BSD-2-Clause-Views', 'deprecated': False}, + 'bsd-3-clause': {'id': 'BSD-3-Clause', 'deprecated': False}, + 'bsd-3-clause-acpica': {'id': 'BSD-3-Clause-acpica', 'deprecated': False}, + 'bsd-3-clause-attribution': {'id': 'BSD-3-Clause-Attribution', 'deprecated': False}, + 'bsd-3-clause-clear': {'id': 'BSD-3-Clause-Clear', 'deprecated': False}, + 'bsd-3-clause-flex': {'id': 'BSD-3-Clause-flex', 'deprecated': False}, + 'bsd-3-clause-hp': {'id': 'BSD-3-Clause-HP', 'deprecated': False}, + 'bsd-3-clause-lbnl': {'id': 'BSD-3-Clause-LBNL', 'deprecated': False}, + 'bsd-3-clause-modification': {'id': 'BSD-3-Clause-Modification', 'deprecated': False}, + 'bsd-3-clause-no-military-license': {'id': 'BSD-3-Clause-No-Military-License', 'deprecated': False}, + 'bsd-3-clause-no-nuclear-license': {'id': 'BSD-3-Clause-No-Nuclear-License', 'deprecated': False}, + 'bsd-3-clause-no-nuclear-license-2014': {'id': 'BSD-3-Clause-No-Nuclear-License-2014', 'deprecated': False}, + 'bsd-3-clause-no-nuclear-warranty': {'id': 'BSD-3-Clause-No-Nuclear-Warranty', 'deprecated': False}, + 'bsd-3-clause-open-mpi': {'id': 'BSD-3-Clause-Open-MPI', 'deprecated': False}, + 'bsd-3-clause-sun': {'id': 'BSD-3-Clause-Sun', 'deprecated': False}, + 'bsd-4-clause': {'id': 'BSD-4-Clause', 'deprecated': False}, + 'bsd-4-clause-shortened': {'id': 'BSD-4-Clause-Shortened', 'deprecated': False}, + 'bsd-4-clause-uc': {'id': 'BSD-4-Clause-UC', 'deprecated': False}, + 'bsd-4.3reno': {'id': 'BSD-4.3RENO', 'deprecated': False}, + 'bsd-4.3tahoe': {'id': 'BSD-4.3TAHOE', 'deprecated': False}, + 'bsd-advertising-acknowledgement': {'id': 'BSD-Advertising-Acknowledgement', 'deprecated': False}, + 'bsd-attribution-hpnd-disclaimer': {'id': 'BSD-Attribution-HPND-disclaimer', 'deprecated': False}, + 'bsd-inferno-nettverk': {'id': 'BSD-Inferno-Nettverk', 'deprecated': False}, + 'bsd-protection': {'id': 'BSD-Protection', 'deprecated': False}, + 'bsd-source-beginning-file': {'id': 'BSD-Source-beginning-file', 'deprecated': False}, + 'bsd-source-code': {'id': 'BSD-Source-Code', 'deprecated': False}, + 'bsd-systemics': {'id': 'BSD-Systemics', 'deprecated': False}, + 'bsd-systemics-w3works': {'id': 'BSD-Systemics-W3Works', 'deprecated': False}, + 'bsl-1.0': {'id': 'BSL-1.0', 'deprecated': False}, + 'busl-1.1': {'id': 'BUSL-1.1', 'deprecated': False}, + 'bzip2-1.0.5': {'id': 'bzip2-1.0.5', 'deprecated': True}, + 'bzip2-1.0.6': {'id': 'bzip2-1.0.6', 'deprecated': False}, + 'c-uda-1.0': {'id': 'C-UDA-1.0', 'deprecated': False}, + 'cal-1.0': {'id': 'CAL-1.0', 'deprecated': False}, + 'cal-1.0-combined-work-exception': {'id': 'CAL-1.0-Combined-Work-Exception', 'deprecated': False}, + 'caldera': {'id': 'Caldera', 'deprecated': False}, + 'caldera-no-preamble': {'id': 'Caldera-no-preamble', 'deprecated': False}, + 'catharon': {'id': 'Catharon', 'deprecated': False}, + 'catosl-1.1': {'id': 'CATOSL-1.1', 'deprecated': False}, + 'cc-by-1.0': {'id': 'CC-BY-1.0', 'deprecated': False}, + 'cc-by-2.0': {'id': 'CC-BY-2.0', 'deprecated': False}, + 'cc-by-2.5': {'id': 'CC-BY-2.5', 'deprecated': False}, + 'cc-by-2.5-au': {'id': 'CC-BY-2.5-AU', 'deprecated': False}, + 'cc-by-3.0': {'id': 'CC-BY-3.0', 'deprecated': False}, + 'cc-by-3.0-at': {'id': 'CC-BY-3.0-AT', 'deprecated': False}, + 'cc-by-3.0-au': {'id': 'CC-BY-3.0-AU', 'deprecated': False}, + 'cc-by-3.0-de': {'id': 'CC-BY-3.0-DE', 'deprecated': False}, + 'cc-by-3.0-igo': {'id': 'CC-BY-3.0-IGO', 'deprecated': False}, + 'cc-by-3.0-nl': {'id': 'CC-BY-3.0-NL', 'deprecated': False}, + 'cc-by-3.0-us': {'id': 'CC-BY-3.0-US', 'deprecated': False}, + 'cc-by-4.0': {'id': 'CC-BY-4.0', 'deprecated': False}, + 'cc-by-nc-1.0': {'id': 'CC-BY-NC-1.0', 'deprecated': False}, + 'cc-by-nc-2.0': {'id': 'CC-BY-NC-2.0', 'deprecated': False}, + 'cc-by-nc-2.5': {'id': 'CC-BY-NC-2.5', 'deprecated': False}, + 'cc-by-nc-3.0': {'id': 'CC-BY-NC-3.0', 'deprecated': False}, + 'cc-by-nc-3.0-de': {'id': 'CC-BY-NC-3.0-DE', 'deprecated': False}, + 'cc-by-nc-4.0': {'id': 'CC-BY-NC-4.0', 'deprecated': False}, + 'cc-by-nc-nd-1.0': {'id': 'CC-BY-NC-ND-1.0', 'deprecated': False}, + 'cc-by-nc-nd-2.0': {'id': 'CC-BY-NC-ND-2.0', 'deprecated': False}, + 'cc-by-nc-nd-2.5': {'id': 'CC-BY-NC-ND-2.5', 'deprecated': False}, + 'cc-by-nc-nd-3.0': {'id': 'CC-BY-NC-ND-3.0', 'deprecated': False}, + 'cc-by-nc-nd-3.0-de': {'id': 'CC-BY-NC-ND-3.0-DE', 'deprecated': False}, + 'cc-by-nc-nd-3.0-igo': {'id': 'CC-BY-NC-ND-3.0-IGO', 'deprecated': False}, + 'cc-by-nc-nd-4.0': {'id': 'CC-BY-NC-ND-4.0', 'deprecated': False}, + 'cc-by-nc-sa-1.0': {'id': 'CC-BY-NC-SA-1.0', 'deprecated': False}, + 'cc-by-nc-sa-2.0': {'id': 'CC-BY-NC-SA-2.0', 'deprecated': False}, + 'cc-by-nc-sa-2.0-de': {'id': 'CC-BY-NC-SA-2.0-DE', 'deprecated': False}, + 'cc-by-nc-sa-2.0-fr': {'id': 'CC-BY-NC-SA-2.0-FR', 'deprecated': False}, + 'cc-by-nc-sa-2.0-uk': {'id': 'CC-BY-NC-SA-2.0-UK', 'deprecated': False}, + 'cc-by-nc-sa-2.5': {'id': 'CC-BY-NC-SA-2.5', 'deprecated': False}, + 'cc-by-nc-sa-3.0': {'id': 'CC-BY-NC-SA-3.0', 'deprecated': False}, + 'cc-by-nc-sa-3.0-de': {'id': 'CC-BY-NC-SA-3.0-DE', 'deprecated': False}, + 'cc-by-nc-sa-3.0-igo': {'id': 'CC-BY-NC-SA-3.0-IGO', 'deprecated': False}, + 'cc-by-nc-sa-4.0': {'id': 'CC-BY-NC-SA-4.0', 'deprecated': False}, + 'cc-by-nd-1.0': {'id': 'CC-BY-ND-1.0', 'deprecated': False}, + 'cc-by-nd-2.0': {'id': 'CC-BY-ND-2.0', 'deprecated': False}, + 'cc-by-nd-2.5': {'id': 'CC-BY-ND-2.5', 'deprecated': False}, + 'cc-by-nd-3.0': {'id': 'CC-BY-ND-3.0', 'deprecated': False}, + 'cc-by-nd-3.0-de': {'id': 'CC-BY-ND-3.0-DE', 'deprecated': False}, + 'cc-by-nd-4.0': {'id': 'CC-BY-ND-4.0', 'deprecated': False}, + 'cc-by-sa-1.0': {'id': 'CC-BY-SA-1.0', 'deprecated': False}, + 'cc-by-sa-2.0': {'id': 'CC-BY-SA-2.0', 'deprecated': False}, + 'cc-by-sa-2.0-uk': {'id': 'CC-BY-SA-2.0-UK', 'deprecated': False}, + 'cc-by-sa-2.1-jp': {'id': 'CC-BY-SA-2.1-JP', 'deprecated': False}, + 'cc-by-sa-2.5': {'id': 'CC-BY-SA-2.5', 'deprecated': False}, + 'cc-by-sa-3.0': {'id': 'CC-BY-SA-3.0', 'deprecated': False}, + 'cc-by-sa-3.0-at': {'id': 'CC-BY-SA-3.0-AT', 'deprecated': False}, + 'cc-by-sa-3.0-de': {'id': 'CC-BY-SA-3.0-DE', 'deprecated': False}, + 'cc-by-sa-3.0-igo': {'id': 'CC-BY-SA-3.0-IGO', 'deprecated': False}, + 'cc-by-sa-4.0': {'id': 'CC-BY-SA-4.0', 'deprecated': False}, + 'cc-pddc': {'id': 'CC-PDDC', 'deprecated': False}, + 'cc0-1.0': {'id': 'CC0-1.0', 'deprecated': False}, + 'cddl-1.0': {'id': 'CDDL-1.0', 'deprecated': False}, + 'cddl-1.1': {'id': 'CDDL-1.1', 'deprecated': False}, + 'cdl-1.0': {'id': 'CDL-1.0', 'deprecated': False}, + 'cdla-permissive-1.0': {'id': 'CDLA-Permissive-1.0', 'deprecated': False}, + 'cdla-permissive-2.0': {'id': 'CDLA-Permissive-2.0', 'deprecated': False}, + 'cdla-sharing-1.0': {'id': 'CDLA-Sharing-1.0', 'deprecated': False}, + 'cecill-1.0': {'id': 'CECILL-1.0', 'deprecated': False}, + 'cecill-1.1': {'id': 'CECILL-1.1', 'deprecated': False}, + 'cecill-2.0': {'id': 'CECILL-2.0', 'deprecated': False}, + 'cecill-2.1': {'id': 'CECILL-2.1', 'deprecated': False}, + 'cecill-b': {'id': 'CECILL-B', 'deprecated': False}, + 'cecill-c': {'id': 'CECILL-C', 'deprecated': False}, + 'cern-ohl-1.1': {'id': 'CERN-OHL-1.1', 'deprecated': False}, + 'cern-ohl-1.2': {'id': 'CERN-OHL-1.2', 'deprecated': False}, + 'cern-ohl-p-2.0': {'id': 'CERN-OHL-P-2.0', 'deprecated': False}, + 'cern-ohl-s-2.0': {'id': 'CERN-OHL-S-2.0', 'deprecated': False}, + 'cern-ohl-w-2.0': {'id': 'CERN-OHL-W-2.0', 'deprecated': False}, + 'cfitsio': {'id': 'CFITSIO', 'deprecated': False}, + 'check-cvs': {'id': 'check-cvs', 'deprecated': False}, + 'checkmk': {'id': 'checkmk', 'deprecated': False}, + 'clartistic': {'id': 'ClArtistic', 'deprecated': False}, + 'clips': {'id': 'Clips', 'deprecated': False}, + 'cmu-mach': {'id': 'CMU-Mach', 'deprecated': False}, + 'cmu-mach-nodoc': {'id': 'CMU-Mach-nodoc', 'deprecated': False}, + 'cnri-jython': {'id': 'CNRI-Jython', 'deprecated': False}, + 'cnri-python': {'id': 'CNRI-Python', 'deprecated': False}, + 'cnri-python-gpl-compatible': {'id': 'CNRI-Python-GPL-Compatible', 'deprecated': False}, + 'coil-1.0': {'id': 'COIL-1.0', 'deprecated': False}, + 'community-spec-1.0': {'id': 'Community-Spec-1.0', 'deprecated': False}, + 'condor-1.1': {'id': 'Condor-1.1', 'deprecated': False}, + 'copyleft-next-0.3.0': {'id': 'copyleft-next-0.3.0', 'deprecated': False}, + 'copyleft-next-0.3.1': {'id': 'copyleft-next-0.3.1', 'deprecated': False}, + 'cornell-lossless-jpeg': {'id': 'Cornell-Lossless-JPEG', 'deprecated': False}, + 'cpal-1.0': {'id': 'CPAL-1.0', 'deprecated': False}, + 'cpl-1.0': {'id': 'CPL-1.0', 'deprecated': False}, + 'cpol-1.02': {'id': 'CPOL-1.02', 'deprecated': False}, + 'cronyx': {'id': 'Cronyx', 'deprecated': False}, + 'crossword': {'id': 'Crossword', 'deprecated': False}, + 'crystalstacker': {'id': 'CrystalStacker', 'deprecated': False}, + 'cua-opl-1.0': {'id': 'CUA-OPL-1.0', 'deprecated': False}, + 'cube': {'id': 'Cube', 'deprecated': False}, + 'curl': {'id': 'curl', 'deprecated': False}, + 'cve-tou': {'id': 'cve-tou', 'deprecated': False}, + 'd-fsl-1.0': {'id': 'D-FSL-1.0', 'deprecated': False}, + 'dec-3-clause': {'id': 'DEC-3-Clause', 'deprecated': False}, + 'diffmark': {'id': 'diffmark', 'deprecated': False}, + 'dl-de-by-2.0': {'id': 'DL-DE-BY-2.0', 'deprecated': False}, + 'dl-de-zero-2.0': {'id': 'DL-DE-ZERO-2.0', 'deprecated': False}, + 'doc': {'id': 'DOC', 'deprecated': False}, + 'docbook-schema': {'id': 'DocBook-Schema', 'deprecated': False}, + 'docbook-xml': {'id': 'DocBook-XML', 'deprecated': False}, + 'dotseqn': {'id': 'Dotseqn', 'deprecated': False}, + 'drl-1.0': {'id': 'DRL-1.0', 'deprecated': False}, + 'drl-1.1': {'id': 'DRL-1.1', 'deprecated': False}, + 'dsdp': {'id': 'DSDP', 'deprecated': False}, + 'dtoa': {'id': 'dtoa', 'deprecated': False}, + 'dvipdfm': {'id': 'dvipdfm', 'deprecated': False}, + 'ecl-1.0': {'id': 'ECL-1.0', 'deprecated': False}, + 'ecl-2.0': {'id': 'ECL-2.0', 'deprecated': False}, + 'ecos-2.0': {'id': 'eCos-2.0', 'deprecated': True}, + 'efl-1.0': {'id': 'EFL-1.0', 'deprecated': False}, + 'efl-2.0': {'id': 'EFL-2.0', 'deprecated': False}, + 'egenix': {'id': 'eGenix', 'deprecated': False}, + 'elastic-2.0': {'id': 'Elastic-2.0', 'deprecated': False}, + 'entessa': {'id': 'Entessa', 'deprecated': False}, + 'epics': {'id': 'EPICS', 'deprecated': False}, + 'epl-1.0': {'id': 'EPL-1.0', 'deprecated': False}, + 'epl-2.0': {'id': 'EPL-2.0', 'deprecated': False}, + 'erlpl-1.1': {'id': 'ErlPL-1.1', 'deprecated': False}, + 'etalab-2.0': {'id': 'etalab-2.0', 'deprecated': False}, + 'eudatagrid': {'id': 'EUDatagrid', 'deprecated': False}, + 'eupl-1.0': {'id': 'EUPL-1.0', 'deprecated': False}, + 'eupl-1.1': {'id': 'EUPL-1.1', 'deprecated': False}, + 'eupl-1.2': {'id': 'EUPL-1.2', 'deprecated': False}, + 'eurosym': {'id': 'Eurosym', 'deprecated': False}, + 'fair': {'id': 'Fair', 'deprecated': False}, + 'fbm': {'id': 'FBM', 'deprecated': False}, + 'fdk-aac': {'id': 'FDK-AAC', 'deprecated': False}, + 'ferguson-twofish': {'id': 'Ferguson-Twofish', 'deprecated': False}, + 'frameworx-1.0': {'id': 'Frameworx-1.0', 'deprecated': False}, + 'freebsd-doc': {'id': 'FreeBSD-DOC', 'deprecated': False}, + 'freeimage': {'id': 'FreeImage', 'deprecated': False}, + 'fsfap': {'id': 'FSFAP', 'deprecated': False}, + 'fsfap-no-warranty-disclaimer': {'id': 'FSFAP-no-warranty-disclaimer', 'deprecated': False}, + 'fsful': {'id': 'FSFUL', 'deprecated': False}, + 'fsfullr': {'id': 'FSFULLR', 'deprecated': False}, + 'fsfullrwd': {'id': 'FSFULLRWD', 'deprecated': False}, + 'ftl': {'id': 'FTL', 'deprecated': False}, + 'furuseth': {'id': 'Furuseth', 'deprecated': False}, + 'fwlw': {'id': 'fwlw', 'deprecated': False}, + 'gcr-docs': {'id': 'GCR-docs', 'deprecated': False}, + 'gd': {'id': 'GD', 'deprecated': False}, + 'gfdl-1.1': {'id': 'GFDL-1.1', 'deprecated': True}, + 'gfdl-1.1-invariants-only': {'id': 'GFDL-1.1-invariants-only', 'deprecated': False}, + 'gfdl-1.1-invariants-or-later': {'id': 'GFDL-1.1-invariants-or-later', 'deprecated': False}, + 'gfdl-1.1-no-invariants-only': {'id': 'GFDL-1.1-no-invariants-only', 'deprecated': False}, + 'gfdl-1.1-no-invariants-or-later': {'id': 'GFDL-1.1-no-invariants-or-later', 'deprecated': False}, + 'gfdl-1.1-only': {'id': 'GFDL-1.1-only', 'deprecated': False}, + 'gfdl-1.1-or-later': {'id': 'GFDL-1.1-or-later', 'deprecated': False}, + 'gfdl-1.2': {'id': 'GFDL-1.2', 'deprecated': True}, + 'gfdl-1.2-invariants-only': {'id': 'GFDL-1.2-invariants-only', 'deprecated': False}, + 'gfdl-1.2-invariants-or-later': {'id': 'GFDL-1.2-invariants-or-later', 'deprecated': False}, + 'gfdl-1.2-no-invariants-only': {'id': 'GFDL-1.2-no-invariants-only', 'deprecated': False}, + 'gfdl-1.2-no-invariants-or-later': {'id': 'GFDL-1.2-no-invariants-or-later', 'deprecated': False}, + 'gfdl-1.2-only': {'id': 'GFDL-1.2-only', 'deprecated': False}, + 'gfdl-1.2-or-later': {'id': 'GFDL-1.2-or-later', 'deprecated': False}, + 'gfdl-1.3': {'id': 'GFDL-1.3', 'deprecated': True}, + 'gfdl-1.3-invariants-only': {'id': 'GFDL-1.3-invariants-only', 'deprecated': False}, + 'gfdl-1.3-invariants-or-later': {'id': 'GFDL-1.3-invariants-or-later', 'deprecated': False}, + 'gfdl-1.3-no-invariants-only': {'id': 'GFDL-1.3-no-invariants-only', 'deprecated': False}, + 'gfdl-1.3-no-invariants-or-later': {'id': 'GFDL-1.3-no-invariants-or-later', 'deprecated': False}, + 'gfdl-1.3-only': {'id': 'GFDL-1.3-only', 'deprecated': False}, + 'gfdl-1.3-or-later': {'id': 'GFDL-1.3-or-later', 'deprecated': False}, + 'giftware': {'id': 'Giftware', 'deprecated': False}, + 'gl2ps': {'id': 'GL2PS', 'deprecated': False}, + 'glide': {'id': 'Glide', 'deprecated': False}, + 'glulxe': {'id': 'Glulxe', 'deprecated': False}, + 'glwtpl': {'id': 'GLWTPL', 'deprecated': False}, + 'gnuplot': {'id': 'gnuplot', 'deprecated': False}, + 'gpl-1.0': {'id': 'GPL-1.0', 'deprecated': True}, + 'gpl-1.0+': {'id': 'GPL-1.0+', 'deprecated': True}, + 'gpl-1.0-only': {'id': 'GPL-1.0-only', 'deprecated': False}, + 'gpl-1.0-or-later': {'id': 'GPL-1.0-or-later', 'deprecated': False}, + 'gpl-2.0': {'id': 'GPL-2.0', 'deprecated': True}, + 'gpl-2.0+': {'id': 'GPL-2.0+', 'deprecated': True}, + 'gpl-2.0-only': {'id': 'GPL-2.0-only', 'deprecated': False}, + 'gpl-2.0-or-later': {'id': 'GPL-2.0-or-later', 'deprecated': False}, + 'gpl-2.0-with-autoconf-exception': {'id': 'GPL-2.0-with-autoconf-exception', 'deprecated': True}, + 'gpl-2.0-with-bison-exception': {'id': 'GPL-2.0-with-bison-exception', 'deprecated': True}, + 'gpl-2.0-with-classpath-exception': {'id': 'GPL-2.0-with-classpath-exception', 'deprecated': True}, + 'gpl-2.0-with-font-exception': {'id': 'GPL-2.0-with-font-exception', 'deprecated': True}, + 'gpl-2.0-with-gcc-exception': {'id': 'GPL-2.0-with-GCC-exception', 'deprecated': True}, + 'gpl-3.0': {'id': 'GPL-3.0', 'deprecated': True}, + 'gpl-3.0+': {'id': 'GPL-3.0+', 'deprecated': True}, + 'gpl-3.0-only': {'id': 'GPL-3.0-only', 'deprecated': False}, + 'gpl-3.0-or-later': {'id': 'GPL-3.0-or-later', 'deprecated': False}, + 'gpl-3.0-with-autoconf-exception': {'id': 'GPL-3.0-with-autoconf-exception', 'deprecated': True}, + 'gpl-3.0-with-gcc-exception': {'id': 'GPL-3.0-with-GCC-exception', 'deprecated': True}, + 'graphics-gems': {'id': 'Graphics-Gems', 'deprecated': False}, + 'gsoap-1.3b': {'id': 'gSOAP-1.3b', 'deprecated': False}, + 'gtkbook': {'id': 'gtkbook', 'deprecated': False}, + 'gutmann': {'id': 'Gutmann', 'deprecated': False}, + 'haskellreport': {'id': 'HaskellReport', 'deprecated': False}, + 'hdparm': {'id': 'hdparm', 'deprecated': False}, + 'hidapi': {'id': 'HIDAPI', 'deprecated': False}, + 'hippocratic-2.1': {'id': 'Hippocratic-2.1', 'deprecated': False}, + 'hp-1986': {'id': 'HP-1986', 'deprecated': False}, + 'hp-1989': {'id': 'HP-1989', 'deprecated': False}, + 'hpnd': {'id': 'HPND', 'deprecated': False}, + 'hpnd-dec': {'id': 'HPND-DEC', 'deprecated': False}, + 'hpnd-doc': {'id': 'HPND-doc', 'deprecated': False}, + 'hpnd-doc-sell': {'id': 'HPND-doc-sell', 'deprecated': False}, + 'hpnd-export-us': {'id': 'HPND-export-US', 'deprecated': False}, + 'hpnd-export-us-acknowledgement': {'id': 'HPND-export-US-acknowledgement', 'deprecated': False}, + 'hpnd-export-us-modify': {'id': 'HPND-export-US-modify', 'deprecated': False}, + 'hpnd-export2-us': {'id': 'HPND-export2-US', 'deprecated': False}, + 'hpnd-fenneberg-livingston': {'id': 'HPND-Fenneberg-Livingston', 'deprecated': False}, + 'hpnd-inria-imag': {'id': 'HPND-INRIA-IMAG', 'deprecated': False}, + 'hpnd-intel': {'id': 'HPND-Intel', 'deprecated': False}, + 'hpnd-kevlin-henney': {'id': 'HPND-Kevlin-Henney', 'deprecated': False}, + 'hpnd-markus-kuhn': {'id': 'HPND-Markus-Kuhn', 'deprecated': False}, + 'hpnd-merchantability-variant': {'id': 'HPND-merchantability-variant', 'deprecated': False}, + 'hpnd-mit-disclaimer': {'id': 'HPND-MIT-disclaimer', 'deprecated': False}, + 'hpnd-netrek': {'id': 'HPND-Netrek', 'deprecated': False}, + 'hpnd-pbmplus': {'id': 'HPND-Pbmplus', 'deprecated': False}, + 'hpnd-sell-mit-disclaimer-xserver': {'id': 'HPND-sell-MIT-disclaimer-xserver', 'deprecated': False}, + 'hpnd-sell-regexpr': {'id': 'HPND-sell-regexpr', 'deprecated': False}, + 'hpnd-sell-variant': {'id': 'HPND-sell-variant', 'deprecated': False}, + 'hpnd-sell-variant-mit-disclaimer': {'id': 'HPND-sell-variant-MIT-disclaimer', 'deprecated': False}, + 'hpnd-sell-variant-mit-disclaimer-rev': {'id': 'HPND-sell-variant-MIT-disclaimer-rev', 'deprecated': False}, + 'hpnd-uc': {'id': 'HPND-UC', 'deprecated': False}, + 'hpnd-uc-export-us': {'id': 'HPND-UC-export-US', 'deprecated': False}, + 'htmltidy': {'id': 'HTMLTIDY', 'deprecated': False}, + 'ibm-pibs': {'id': 'IBM-pibs', 'deprecated': False}, + 'icu': {'id': 'ICU', 'deprecated': False}, + 'iec-code-components-eula': {'id': 'IEC-Code-Components-EULA', 'deprecated': False}, + 'ijg': {'id': 'IJG', 'deprecated': False}, + 'ijg-short': {'id': 'IJG-short', 'deprecated': False}, + 'imagemagick': {'id': 'ImageMagick', 'deprecated': False}, + 'imatix': {'id': 'iMatix', 'deprecated': False}, + 'imlib2': {'id': 'Imlib2', 'deprecated': False}, + 'info-zip': {'id': 'Info-ZIP', 'deprecated': False}, + 'inner-net-2.0': {'id': 'Inner-Net-2.0', 'deprecated': False}, + 'intel': {'id': 'Intel', 'deprecated': False}, + 'intel-acpi': {'id': 'Intel-ACPI', 'deprecated': False}, + 'interbase-1.0': {'id': 'Interbase-1.0', 'deprecated': False}, + 'ipa': {'id': 'IPA', 'deprecated': False}, + 'ipl-1.0': {'id': 'IPL-1.0', 'deprecated': False}, + 'isc': {'id': 'ISC', 'deprecated': False}, + 'isc-veillard': {'id': 'ISC-Veillard', 'deprecated': False}, + 'jam': {'id': 'Jam', 'deprecated': False}, + 'jasper-2.0': {'id': 'JasPer-2.0', 'deprecated': False}, + 'jpl-image': {'id': 'JPL-image', 'deprecated': False}, + 'jpnic': {'id': 'JPNIC', 'deprecated': False}, + 'json': {'id': 'JSON', 'deprecated': False}, + 'kastrup': {'id': 'Kastrup', 'deprecated': False}, + 'kazlib': {'id': 'Kazlib', 'deprecated': False}, + 'knuth-ctan': {'id': 'Knuth-CTAN', 'deprecated': False}, + 'lal-1.2': {'id': 'LAL-1.2', 'deprecated': False}, + 'lal-1.3': {'id': 'LAL-1.3', 'deprecated': False}, + 'latex2e': {'id': 'Latex2e', 'deprecated': False}, + 'latex2e-translated-notice': {'id': 'Latex2e-translated-notice', 'deprecated': False}, + 'leptonica': {'id': 'Leptonica', 'deprecated': False}, + 'lgpl-2.0': {'id': 'LGPL-2.0', 'deprecated': True}, + 'lgpl-2.0+': {'id': 'LGPL-2.0+', 'deprecated': True}, + 'lgpl-2.0-only': {'id': 'LGPL-2.0-only', 'deprecated': False}, + 'lgpl-2.0-or-later': {'id': 'LGPL-2.0-or-later', 'deprecated': False}, + 'lgpl-2.1': {'id': 'LGPL-2.1', 'deprecated': True}, + 'lgpl-2.1+': {'id': 'LGPL-2.1+', 'deprecated': True}, + 'lgpl-2.1-only': {'id': 'LGPL-2.1-only', 'deprecated': False}, + 'lgpl-2.1-or-later': {'id': 'LGPL-2.1-or-later', 'deprecated': False}, + 'lgpl-3.0': {'id': 'LGPL-3.0', 'deprecated': True}, + 'lgpl-3.0+': {'id': 'LGPL-3.0+', 'deprecated': True}, + 'lgpl-3.0-only': {'id': 'LGPL-3.0-only', 'deprecated': False}, + 'lgpl-3.0-or-later': {'id': 'LGPL-3.0-or-later', 'deprecated': False}, + 'lgpllr': {'id': 'LGPLLR', 'deprecated': False}, + 'libpng': {'id': 'Libpng', 'deprecated': False}, + 'libpng-2.0': {'id': 'libpng-2.0', 'deprecated': False}, + 'libselinux-1.0': {'id': 'libselinux-1.0', 'deprecated': False}, + 'libtiff': {'id': 'libtiff', 'deprecated': False}, + 'libutil-david-nugent': {'id': 'libutil-David-Nugent', 'deprecated': False}, + 'liliq-p-1.1': {'id': 'LiLiQ-P-1.1', 'deprecated': False}, + 'liliq-r-1.1': {'id': 'LiLiQ-R-1.1', 'deprecated': False}, + 'liliq-rplus-1.1': {'id': 'LiLiQ-Rplus-1.1', 'deprecated': False}, + 'linux-man-pages-1-para': {'id': 'Linux-man-pages-1-para', 'deprecated': False}, + 'linux-man-pages-copyleft': {'id': 'Linux-man-pages-copyleft', 'deprecated': False}, + 'linux-man-pages-copyleft-2-para': {'id': 'Linux-man-pages-copyleft-2-para', 'deprecated': False}, + 'linux-man-pages-copyleft-var': {'id': 'Linux-man-pages-copyleft-var', 'deprecated': False}, + 'linux-openib': {'id': 'Linux-OpenIB', 'deprecated': False}, + 'loop': {'id': 'LOOP', 'deprecated': False}, + 'lpd-document': {'id': 'LPD-document', 'deprecated': False}, + 'lpl-1.0': {'id': 'LPL-1.0', 'deprecated': False}, + 'lpl-1.02': {'id': 'LPL-1.02', 'deprecated': False}, + 'lppl-1.0': {'id': 'LPPL-1.0', 'deprecated': False}, + 'lppl-1.1': {'id': 'LPPL-1.1', 'deprecated': False}, + 'lppl-1.2': {'id': 'LPPL-1.2', 'deprecated': False}, + 'lppl-1.3a': {'id': 'LPPL-1.3a', 'deprecated': False}, + 'lppl-1.3c': {'id': 'LPPL-1.3c', 'deprecated': False}, + 'lsof': {'id': 'lsof', 'deprecated': False}, + 'lucida-bitmap-fonts': {'id': 'Lucida-Bitmap-Fonts', 'deprecated': False}, + 'lzma-sdk-9.11-to-9.20': {'id': 'LZMA-SDK-9.11-to-9.20', 'deprecated': False}, + 'lzma-sdk-9.22': {'id': 'LZMA-SDK-9.22', 'deprecated': False}, + 'mackerras-3-clause': {'id': 'Mackerras-3-Clause', 'deprecated': False}, + 'mackerras-3-clause-acknowledgment': {'id': 'Mackerras-3-Clause-acknowledgment', 'deprecated': False}, + 'magaz': {'id': 'magaz', 'deprecated': False}, + 'mailprio': {'id': 'mailprio', 'deprecated': False}, + 'makeindex': {'id': 'MakeIndex', 'deprecated': False}, + 'martin-birgmeier': {'id': 'Martin-Birgmeier', 'deprecated': False}, + 'mcphee-slideshow': {'id': 'McPhee-slideshow', 'deprecated': False}, + 'metamail': {'id': 'metamail', 'deprecated': False}, + 'minpack': {'id': 'Minpack', 'deprecated': False}, + 'miros': {'id': 'MirOS', 'deprecated': False}, + 'mit': {'id': 'MIT', 'deprecated': False}, + 'mit-0': {'id': 'MIT-0', 'deprecated': False}, + 'mit-advertising': {'id': 'MIT-advertising', 'deprecated': False}, + 'mit-cmu': {'id': 'MIT-CMU', 'deprecated': False}, + 'mit-enna': {'id': 'MIT-enna', 'deprecated': False}, + 'mit-feh': {'id': 'MIT-feh', 'deprecated': False}, + 'mit-festival': {'id': 'MIT-Festival', 'deprecated': False}, + 'mit-khronos-old': {'id': 'MIT-Khronos-old', 'deprecated': False}, + 'mit-modern-variant': {'id': 'MIT-Modern-Variant', 'deprecated': False}, + 'mit-open-group': {'id': 'MIT-open-group', 'deprecated': False}, + 'mit-testregex': {'id': 'MIT-testregex', 'deprecated': False}, + 'mit-wu': {'id': 'MIT-Wu', 'deprecated': False}, + 'mitnfa': {'id': 'MITNFA', 'deprecated': False}, + 'mmixware': {'id': 'MMIXware', 'deprecated': False}, + 'motosoto': {'id': 'Motosoto', 'deprecated': False}, + 'mpeg-ssg': {'id': 'MPEG-SSG', 'deprecated': False}, + 'mpi-permissive': {'id': 'mpi-permissive', 'deprecated': False}, + 'mpich2': {'id': 'mpich2', 'deprecated': False}, + 'mpl-1.0': {'id': 'MPL-1.0', 'deprecated': False}, + 'mpl-1.1': {'id': 'MPL-1.1', 'deprecated': False}, + 'mpl-2.0': {'id': 'MPL-2.0', 'deprecated': False}, + 'mpl-2.0-no-copyleft-exception': {'id': 'MPL-2.0-no-copyleft-exception', 'deprecated': False}, + 'mplus': {'id': 'mplus', 'deprecated': False}, + 'ms-lpl': {'id': 'MS-LPL', 'deprecated': False}, + 'ms-pl': {'id': 'MS-PL', 'deprecated': False}, + 'ms-rl': {'id': 'MS-RL', 'deprecated': False}, + 'mtll': {'id': 'MTLL', 'deprecated': False}, + 'mulanpsl-1.0': {'id': 'MulanPSL-1.0', 'deprecated': False}, + 'mulanpsl-2.0': {'id': 'MulanPSL-2.0', 'deprecated': False}, + 'multics': {'id': 'Multics', 'deprecated': False}, + 'mup': {'id': 'Mup', 'deprecated': False}, + 'naist-2003': {'id': 'NAIST-2003', 'deprecated': False}, + 'nasa-1.3': {'id': 'NASA-1.3', 'deprecated': False}, + 'naumen': {'id': 'Naumen', 'deprecated': False}, + 'nbpl-1.0': {'id': 'NBPL-1.0', 'deprecated': False}, + 'ncbi-pd': {'id': 'NCBI-PD', 'deprecated': False}, + 'ncgl-uk-2.0': {'id': 'NCGL-UK-2.0', 'deprecated': False}, + 'ncl': {'id': 'NCL', 'deprecated': False}, + 'ncsa': {'id': 'NCSA', 'deprecated': False}, + 'net-snmp': {'id': 'Net-SNMP', 'deprecated': True}, + 'netcdf': {'id': 'NetCDF', 'deprecated': False}, + 'newsletr': {'id': 'Newsletr', 'deprecated': False}, + 'ngpl': {'id': 'NGPL', 'deprecated': False}, + 'nicta-1.0': {'id': 'NICTA-1.0', 'deprecated': False}, + 'nist-pd': {'id': 'NIST-PD', 'deprecated': False}, + 'nist-pd-fallback': {'id': 'NIST-PD-fallback', 'deprecated': False}, + 'nist-software': {'id': 'NIST-Software', 'deprecated': False}, + 'nlod-1.0': {'id': 'NLOD-1.0', 'deprecated': False}, + 'nlod-2.0': {'id': 'NLOD-2.0', 'deprecated': False}, + 'nlpl': {'id': 'NLPL', 'deprecated': False}, + 'nokia': {'id': 'Nokia', 'deprecated': False}, + 'nosl': {'id': 'NOSL', 'deprecated': False}, + 'noweb': {'id': 'Noweb', 'deprecated': False}, + 'npl-1.0': {'id': 'NPL-1.0', 'deprecated': False}, + 'npl-1.1': {'id': 'NPL-1.1', 'deprecated': False}, + 'nposl-3.0': {'id': 'NPOSL-3.0', 'deprecated': False}, + 'nrl': {'id': 'NRL', 'deprecated': False}, + 'ntp': {'id': 'NTP', 'deprecated': False}, + 'ntp-0': {'id': 'NTP-0', 'deprecated': False}, + 'nunit': {'id': 'Nunit', 'deprecated': True}, + 'o-uda-1.0': {'id': 'O-UDA-1.0', 'deprecated': False}, + 'oar': {'id': 'OAR', 'deprecated': False}, + 'occt-pl': {'id': 'OCCT-PL', 'deprecated': False}, + 'oclc-2.0': {'id': 'OCLC-2.0', 'deprecated': False}, + 'odbl-1.0': {'id': 'ODbL-1.0', 'deprecated': False}, + 'odc-by-1.0': {'id': 'ODC-By-1.0', 'deprecated': False}, + 'offis': {'id': 'OFFIS', 'deprecated': False}, + 'ofl-1.0': {'id': 'OFL-1.0', 'deprecated': False}, + 'ofl-1.0-no-rfn': {'id': 'OFL-1.0-no-RFN', 'deprecated': False}, + 'ofl-1.0-rfn': {'id': 'OFL-1.0-RFN', 'deprecated': False}, + 'ofl-1.1': {'id': 'OFL-1.1', 'deprecated': False}, + 'ofl-1.1-no-rfn': {'id': 'OFL-1.1-no-RFN', 'deprecated': False}, + 'ofl-1.1-rfn': {'id': 'OFL-1.1-RFN', 'deprecated': False}, + 'ogc-1.0': {'id': 'OGC-1.0', 'deprecated': False}, + 'ogdl-taiwan-1.0': {'id': 'OGDL-Taiwan-1.0', 'deprecated': False}, + 'ogl-canada-2.0': {'id': 'OGL-Canada-2.0', 'deprecated': False}, + 'ogl-uk-1.0': {'id': 'OGL-UK-1.0', 'deprecated': False}, + 'ogl-uk-2.0': {'id': 'OGL-UK-2.0', 'deprecated': False}, + 'ogl-uk-3.0': {'id': 'OGL-UK-3.0', 'deprecated': False}, + 'ogtsl': {'id': 'OGTSL', 'deprecated': False}, + 'oldap-1.1': {'id': 'OLDAP-1.1', 'deprecated': False}, + 'oldap-1.2': {'id': 'OLDAP-1.2', 'deprecated': False}, + 'oldap-1.3': {'id': 'OLDAP-1.3', 'deprecated': False}, + 'oldap-1.4': {'id': 'OLDAP-1.4', 'deprecated': False}, + 'oldap-2.0': {'id': 'OLDAP-2.0', 'deprecated': False}, + 'oldap-2.0.1': {'id': 'OLDAP-2.0.1', 'deprecated': False}, + 'oldap-2.1': {'id': 'OLDAP-2.1', 'deprecated': False}, + 'oldap-2.2': {'id': 'OLDAP-2.2', 'deprecated': False}, + 'oldap-2.2.1': {'id': 'OLDAP-2.2.1', 'deprecated': False}, + 'oldap-2.2.2': {'id': 'OLDAP-2.2.2', 'deprecated': False}, + 'oldap-2.3': {'id': 'OLDAP-2.3', 'deprecated': False}, + 'oldap-2.4': {'id': 'OLDAP-2.4', 'deprecated': False}, + 'oldap-2.5': {'id': 'OLDAP-2.5', 'deprecated': False}, + 'oldap-2.6': {'id': 'OLDAP-2.6', 'deprecated': False}, + 'oldap-2.7': {'id': 'OLDAP-2.7', 'deprecated': False}, + 'oldap-2.8': {'id': 'OLDAP-2.8', 'deprecated': False}, + 'olfl-1.3': {'id': 'OLFL-1.3', 'deprecated': False}, + 'oml': {'id': 'OML', 'deprecated': False}, + 'openpbs-2.3': {'id': 'OpenPBS-2.3', 'deprecated': False}, + 'openssl': {'id': 'OpenSSL', 'deprecated': False}, + 'openssl-standalone': {'id': 'OpenSSL-standalone', 'deprecated': False}, + 'openvision': {'id': 'OpenVision', 'deprecated': False}, + 'opl-1.0': {'id': 'OPL-1.0', 'deprecated': False}, + 'opl-uk-3.0': {'id': 'OPL-UK-3.0', 'deprecated': False}, + 'opubl-1.0': {'id': 'OPUBL-1.0', 'deprecated': False}, + 'oset-pl-2.1': {'id': 'OSET-PL-2.1', 'deprecated': False}, + 'osl-1.0': {'id': 'OSL-1.0', 'deprecated': False}, + 'osl-1.1': {'id': 'OSL-1.1', 'deprecated': False}, + 'osl-2.0': {'id': 'OSL-2.0', 'deprecated': False}, + 'osl-2.1': {'id': 'OSL-2.1', 'deprecated': False}, + 'osl-3.0': {'id': 'OSL-3.0', 'deprecated': False}, + 'padl': {'id': 'PADL', 'deprecated': False}, + 'parity-6.0.0': {'id': 'Parity-6.0.0', 'deprecated': False}, + 'parity-7.0.0': {'id': 'Parity-7.0.0', 'deprecated': False}, + 'pddl-1.0': {'id': 'PDDL-1.0', 'deprecated': False}, + 'php-3.0': {'id': 'PHP-3.0', 'deprecated': False}, + 'php-3.01': {'id': 'PHP-3.01', 'deprecated': False}, + 'pixar': {'id': 'Pixar', 'deprecated': False}, + 'pkgconf': {'id': 'pkgconf', 'deprecated': False}, + 'plexus': {'id': 'Plexus', 'deprecated': False}, + 'pnmstitch': {'id': 'pnmstitch', 'deprecated': False}, + 'polyform-noncommercial-1.0.0': {'id': 'PolyForm-Noncommercial-1.0.0', 'deprecated': False}, + 'polyform-small-business-1.0.0': {'id': 'PolyForm-Small-Business-1.0.0', 'deprecated': False}, + 'postgresql': {'id': 'PostgreSQL', 'deprecated': False}, + 'ppl': {'id': 'PPL', 'deprecated': False}, + 'psf-2.0': {'id': 'PSF-2.0', 'deprecated': False}, + 'psfrag': {'id': 'psfrag', 'deprecated': False}, + 'psutils': {'id': 'psutils', 'deprecated': False}, + 'python-2.0': {'id': 'Python-2.0', 'deprecated': False}, + 'python-2.0.1': {'id': 'Python-2.0.1', 'deprecated': False}, + 'python-ldap': {'id': 'python-ldap', 'deprecated': False}, + 'qhull': {'id': 'Qhull', 'deprecated': False}, + 'qpl-1.0': {'id': 'QPL-1.0', 'deprecated': False}, + 'qpl-1.0-inria-2004': {'id': 'QPL-1.0-INRIA-2004', 'deprecated': False}, + 'radvd': {'id': 'radvd', 'deprecated': False}, + 'rdisc': {'id': 'Rdisc', 'deprecated': False}, + 'rhecos-1.1': {'id': 'RHeCos-1.1', 'deprecated': False}, + 'rpl-1.1': {'id': 'RPL-1.1', 'deprecated': False}, + 'rpl-1.5': {'id': 'RPL-1.5', 'deprecated': False}, + 'rpsl-1.0': {'id': 'RPSL-1.0', 'deprecated': False}, + 'rsa-md': {'id': 'RSA-MD', 'deprecated': False}, + 'rscpl': {'id': 'RSCPL', 'deprecated': False}, + 'ruby': {'id': 'Ruby', 'deprecated': False}, + 'ruby-pty': {'id': 'Ruby-pty', 'deprecated': False}, + 'sax-pd': {'id': 'SAX-PD', 'deprecated': False}, + 'sax-pd-2.0': {'id': 'SAX-PD-2.0', 'deprecated': False}, + 'saxpath': {'id': 'Saxpath', 'deprecated': False}, + 'scea': {'id': 'SCEA', 'deprecated': False}, + 'schemereport': {'id': 'SchemeReport', 'deprecated': False}, + 'sendmail': {'id': 'Sendmail', 'deprecated': False}, + 'sendmail-8.23': {'id': 'Sendmail-8.23', 'deprecated': False}, + 'sgi-b-1.0': {'id': 'SGI-B-1.0', 'deprecated': False}, + 'sgi-b-1.1': {'id': 'SGI-B-1.1', 'deprecated': False}, + 'sgi-b-2.0': {'id': 'SGI-B-2.0', 'deprecated': False}, + 'sgi-opengl': {'id': 'SGI-OpenGL', 'deprecated': False}, + 'sgp4': {'id': 'SGP4', 'deprecated': False}, + 'shl-0.5': {'id': 'SHL-0.5', 'deprecated': False}, + 'shl-0.51': {'id': 'SHL-0.51', 'deprecated': False}, + 'simpl-2.0': {'id': 'SimPL-2.0', 'deprecated': False}, + 'sissl': {'id': 'SISSL', 'deprecated': False}, + 'sissl-1.2': {'id': 'SISSL-1.2', 'deprecated': False}, + 'sl': {'id': 'SL', 'deprecated': False}, + 'sleepycat': {'id': 'Sleepycat', 'deprecated': False}, + 'smlnj': {'id': 'SMLNJ', 'deprecated': False}, + 'smppl': {'id': 'SMPPL', 'deprecated': False}, + 'snia': {'id': 'SNIA', 'deprecated': False}, + 'snprintf': {'id': 'snprintf', 'deprecated': False}, + 'softsurfer': {'id': 'softSurfer', 'deprecated': False}, + 'soundex': {'id': 'Soundex', 'deprecated': False}, + 'spencer-86': {'id': 'Spencer-86', 'deprecated': False}, + 'spencer-94': {'id': 'Spencer-94', 'deprecated': False}, + 'spencer-99': {'id': 'Spencer-99', 'deprecated': False}, + 'spl-1.0': {'id': 'SPL-1.0', 'deprecated': False}, + 'ssh-keyscan': {'id': 'ssh-keyscan', 'deprecated': False}, + 'ssh-openssh': {'id': 'SSH-OpenSSH', 'deprecated': False}, + 'ssh-short': {'id': 'SSH-short', 'deprecated': False}, + 'ssleay-standalone': {'id': 'SSLeay-standalone', 'deprecated': False}, + 'sspl-1.0': {'id': 'SSPL-1.0', 'deprecated': False}, + 'standardml-nj': {'id': 'StandardML-NJ', 'deprecated': True}, + 'sugarcrm-1.1.3': {'id': 'SugarCRM-1.1.3', 'deprecated': False}, + 'sun-ppp': {'id': 'Sun-PPP', 'deprecated': False}, + 'sun-ppp-2000': {'id': 'Sun-PPP-2000', 'deprecated': False}, + 'sunpro': {'id': 'SunPro', 'deprecated': False}, + 'swl': {'id': 'SWL', 'deprecated': False}, + 'swrule': {'id': 'swrule', 'deprecated': False}, + 'symlinks': {'id': 'Symlinks', 'deprecated': False}, + 'tapr-ohl-1.0': {'id': 'TAPR-OHL-1.0', 'deprecated': False}, + 'tcl': {'id': 'TCL', 'deprecated': False}, + 'tcp-wrappers': {'id': 'TCP-wrappers', 'deprecated': False}, + 'termreadkey': {'id': 'TermReadKey', 'deprecated': False}, + 'tgppl-1.0': {'id': 'TGPPL-1.0', 'deprecated': False}, + 'threeparttable': {'id': 'threeparttable', 'deprecated': False}, + 'tmate': {'id': 'TMate', 'deprecated': False}, + 'torque-1.1': {'id': 'TORQUE-1.1', 'deprecated': False}, + 'tosl': {'id': 'TOSL', 'deprecated': False}, + 'tpdl': {'id': 'TPDL', 'deprecated': False}, + 'tpl-1.0': {'id': 'TPL-1.0', 'deprecated': False}, + 'ttwl': {'id': 'TTWL', 'deprecated': False}, + 'ttyp0': {'id': 'TTYP0', 'deprecated': False}, + 'tu-berlin-1.0': {'id': 'TU-Berlin-1.0', 'deprecated': False}, + 'tu-berlin-2.0': {'id': 'TU-Berlin-2.0', 'deprecated': False}, + 'ubuntu-font-1.0': {'id': 'Ubuntu-font-1.0', 'deprecated': False}, + 'ucar': {'id': 'UCAR', 'deprecated': False}, + 'ucl-1.0': {'id': 'UCL-1.0', 'deprecated': False}, + 'ulem': {'id': 'ulem', 'deprecated': False}, + 'umich-merit': {'id': 'UMich-Merit', 'deprecated': False}, + 'unicode-3.0': {'id': 'Unicode-3.0', 'deprecated': False}, + 'unicode-dfs-2015': {'id': 'Unicode-DFS-2015', 'deprecated': False}, + 'unicode-dfs-2016': {'id': 'Unicode-DFS-2016', 'deprecated': False}, + 'unicode-tou': {'id': 'Unicode-TOU', 'deprecated': False}, + 'unixcrypt': {'id': 'UnixCrypt', 'deprecated': False}, + 'unlicense': {'id': 'Unlicense', 'deprecated': False}, + 'upl-1.0': {'id': 'UPL-1.0', 'deprecated': False}, + 'urt-rle': {'id': 'URT-RLE', 'deprecated': False}, + 'vim': {'id': 'Vim', 'deprecated': False}, + 'vostrom': {'id': 'VOSTROM', 'deprecated': False}, + 'vsl-1.0': {'id': 'VSL-1.0', 'deprecated': False}, + 'w3c': {'id': 'W3C', 'deprecated': False}, + 'w3c-19980720': {'id': 'W3C-19980720', 'deprecated': False}, + 'w3c-20150513': {'id': 'W3C-20150513', 'deprecated': False}, + 'w3m': {'id': 'w3m', 'deprecated': False}, + 'watcom-1.0': {'id': 'Watcom-1.0', 'deprecated': False}, + 'widget-workshop': {'id': 'Widget-Workshop', 'deprecated': False}, + 'wsuipa': {'id': 'Wsuipa', 'deprecated': False}, + 'wtfpl': {'id': 'WTFPL', 'deprecated': False}, + 'wxwindows': {'id': 'wxWindows', 'deprecated': True}, + 'x11': {'id': 'X11', 'deprecated': False}, + 'x11-distribute-modifications-variant': {'id': 'X11-distribute-modifications-variant', 'deprecated': False}, + 'x11-swapped': {'id': 'X11-swapped', 'deprecated': False}, + 'xdebug-1.03': {'id': 'Xdebug-1.03', 'deprecated': False}, + 'xerox': {'id': 'Xerox', 'deprecated': False}, + 'xfig': {'id': 'Xfig', 'deprecated': False}, + 'xfree86-1.1': {'id': 'XFree86-1.1', 'deprecated': False}, + 'xinetd': {'id': 'xinetd', 'deprecated': False}, + 'xkeyboard-config-zinoviev': {'id': 'xkeyboard-config-Zinoviev', 'deprecated': False}, + 'xlock': {'id': 'xlock', 'deprecated': False}, + 'xnet': {'id': 'Xnet', 'deprecated': False}, + 'xpp': {'id': 'xpp', 'deprecated': False}, + 'xskat': {'id': 'XSkat', 'deprecated': False}, + 'xzoom': {'id': 'xzoom', 'deprecated': False}, + 'ypl-1.0': {'id': 'YPL-1.0', 'deprecated': False}, + 'ypl-1.1': {'id': 'YPL-1.1', 'deprecated': False}, + 'zed': {'id': 'Zed', 'deprecated': False}, + 'zeeff': {'id': 'Zeeff', 'deprecated': False}, + 'zend-2.0': {'id': 'Zend-2.0', 'deprecated': False}, + 'zimbra-1.3': {'id': 'Zimbra-1.3', 'deprecated': False}, + 'zimbra-1.4': {'id': 'Zimbra-1.4', 'deprecated': False}, + 'zlib': {'id': 'Zlib', 'deprecated': False}, + 'zlib-acknowledgement': {'id': 'zlib-acknowledgement', 'deprecated': False}, + 'zpl-1.1': {'id': 'ZPL-1.1', 'deprecated': False}, + 'zpl-2.0': {'id': 'ZPL-2.0', 'deprecated': False}, + 'zpl-2.1': {'id': 'ZPL-2.1', 'deprecated': False}, +} + +EXCEPTIONS: dict[str, SPDXException] = { + '389-exception': {'id': '389-exception', 'deprecated': False}, + 'asterisk-exception': {'id': 'Asterisk-exception', 'deprecated': False}, + 'asterisk-linking-protocols-exception': {'id': 'Asterisk-linking-protocols-exception', 'deprecated': False}, + 'autoconf-exception-2.0': {'id': 'Autoconf-exception-2.0', 'deprecated': False}, + 'autoconf-exception-3.0': {'id': 'Autoconf-exception-3.0', 'deprecated': False}, + 'autoconf-exception-generic': {'id': 'Autoconf-exception-generic', 'deprecated': False}, + 'autoconf-exception-generic-3.0': {'id': 'Autoconf-exception-generic-3.0', 'deprecated': False}, + 'autoconf-exception-macro': {'id': 'Autoconf-exception-macro', 'deprecated': False}, + 'bison-exception-1.24': {'id': 'Bison-exception-1.24', 'deprecated': False}, + 'bison-exception-2.2': {'id': 'Bison-exception-2.2', 'deprecated': False}, + 'bootloader-exception': {'id': 'Bootloader-exception', 'deprecated': False}, + 'classpath-exception-2.0': {'id': 'Classpath-exception-2.0', 'deprecated': False}, + 'clisp-exception-2.0': {'id': 'CLISP-exception-2.0', 'deprecated': False}, + 'cryptsetup-openssl-exception': {'id': 'cryptsetup-OpenSSL-exception', 'deprecated': False}, + 'digirule-foss-exception': {'id': 'DigiRule-FOSS-exception', 'deprecated': False}, + 'ecos-exception-2.0': {'id': 'eCos-exception-2.0', 'deprecated': False}, + 'erlang-otp-linking-exception': {'id': 'erlang-otp-linking-exception', 'deprecated': False}, + 'fawkes-runtime-exception': {'id': 'Fawkes-Runtime-exception', 'deprecated': False}, + 'fltk-exception': {'id': 'FLTK-exception', 'deprecated': False}, + 'fmt-exception': {'id': 'fmt-exception', 'deprecated': False}, + 'font-exception-2.0': {'id': 'Font-exception-2.0', 'deprecated': False}, + 'freertos-exception-2.0': {'id': 'freertos-exception-2.0', 'deprecated': False}, + 'gcc-exception-2.0': {'id': 'GCC-exception-2.0', 'deprecated': False}, + 'gcc-exception-2.0-note': {'id': 'GCC-exception-2.0-note', 'deprecated': False}, + 'gcc-exception-3.1': {'id': 'GCC-exception-3.1', 'deprecated': False}, + 'gmsh-exception': {'id': 'Gmsh-exception', 'deprecated': False}, + 'gnat-exception': {'id': 'GNAT-exception', 'deprecated': False}, + 'gnome-examples-exception': {'id': 'GNOME-examples-exception', 'deprecated': False}, + 'gnu-compiler-exception': {'id': 'GNU-compiler-exception', 'deprecated': False}, + 'gnu-javamail-exception': {'id': 'gnu-javamail-exception', 'deprecated': False}, + 'gpl-3.0-interface-exception': {'id': 'GPL-3.0-interface-exception', 'deprecated': False}, + 'gpl-3.0-linking-exception': {'id': 'GPL-3.0-linking-exception', 'deprecated': False}, + 'gpl-3.0-linking-source-exception': {'id': 'GPL-3.0-linking-source-exception', 'deprecated': False}, + 'gpl-cc-1.0': {'id': 'GPL-CC-1.0', 'deprecated': False}, + 'gstreamer-exception-2005': {'id': 'GStreamer-exception-2005', 'deprecated': False}, + 'gstreamer-exception-2008': {'id': 'GStreamer-exception-2008', 'deprecated': False}, + 'i2p-gpl-java-exception': {'id': 'i2p-gpl-java-exception', 'deprecated': False}, + 'kicad-libraries-exception': {'id': 'KiCad-libraries-exception', 'deprecated': False}, + 'lgpl-3.0-linking-exception': {'id': 'LGPL-3.0-linking-exception', 'deprecated': False}, + 'libpri-openh323-exception': {'id': 'libpri-OpenH323-exception', 'deprecated': False}, + 'libtool-exception': {'id': 'Libtool-exception', 'deprecated': False}, + 'linux-syscall-note': {'id': 'Linux-syscall-note', 'deprecated': False}, + 'llgpl': {'id': 'LLGPL', 'deprecated': False}, + 'llvm-exception': {'id': 'LLVM-exception', 'deprecated': False}, + 'lzma-exception': {'id': 'LZMA-exception', 'deprecated': False}, + 'mif-exception': {'id': 'mif-exception', 'deprecated': False}, + 'nokia-qt-exception-1.1': {'id': 'Nokia-Qt-exception-1.1', 'deprecated': True}, + 'ocaml-lgpl-linking-exception': {'id': 'OCaml-LGPL-linking-exception', 'deprecated': False}, + 'occt-exception-1.0': {'id': 'OCCT-exception-1.0', 'deprecated': False}, + 'openjdk-assembly-exception-1.0': {'id': 'OpenJDK-assembly-exception-1.0', 'deprecated': False}, + 'openvpn-openssl-exception': {'id': 'openvpn-openssl-exception', 'deprecated': False}, + 'pcre2-exception': {'id': 'PCRE2-exception', 'deprecated': False}, + 'ps-or-pdf-font-exception-20170817': {'id': 'PS-or-PDF-font-exception-20170817', 'deprecated': False}, + 'qpl-1.0-inria-2004-exception': {'id': 'QPL-1.0-INRIA-2004-exception', 'deprecated': False}, + 'qt-gpl-exception-1.0': {'id': 'Qt-GPL-exception-1.0', 'deprecated': False}, + 'qt-lgpl-exception-1.1': {'id': 'Qt-LGPL-exception-1.1', 'deprecated': False}, + 'qwt-exception-1.0': {'id': 'Qwt-exception-1.0', 'deprecated': False}, + 'romic-exception': {'id': 'romic-exception', 'deprecated': False}, + 'rrdtool-floss-exception-2.0': {'id': 'RRDtool-FLOSS-exception-2.0', 'deprecated': False}, + 'sane-exception': {'id': 'SANE-exception', 'deprecated': False}, + 'shl-2.0': {'id': 'SHL-2.0', 'deprecated': False}, + 'shl-2.1': {'id': 'SHL-2.1', 'deprecated': False}, + 'stunnel-exception': {'id': 'stunnel-exception', 'deprecated': False}, + 'swi-exception': {'id': 'SWI-exception', 'deprecated': False}, + 'swift-exception': {'id': 'Swift-exception', 'deprecated': False}, + 'texinfo-exception': {'id': 'Texinfo-exception', 'deprecated': False}, + 'u-boot-exception-2.0': {'id': 'u-boot-exception-2.0', 'deprecated': False}, + 'ubdl-exception': {'id': 'UBDL-exception', 'deprecated': False}, + 'universal-foss-exception-1.0': {'id': 'Universal-FOSS-exception-1.0', 'deprecated': False}, + 'vsftpd-openssl-exception': {'id': 'vsftpd-openssl-exception', 'deprecated': False}, + 'wxwindows-exception-3.1': {'id': 'WxWindows-exception-3.1', 'deprecated': False}, + 'x11vnc-openssl-exception': {'id': 'x11vnc-openssl-exception', 'deprecated': False}, +} diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/markers.py b/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/markers.py new file mode 100644 index 0000000..e7cea57 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/markers.py @@ -0,0 +1,362 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import annotations + +import operator +import os +import platform +import sys +from typing import AbstractSet, Any, Callable, Literal, TypedDict, Union, cast + +from ._parser import MarkerAtom, MarkerList, Op, Value, Variable +from ._parser import parse_marker as _parse_marker +from ._tokenizer import ParserSyntaxError +from .specifiers import InvalidSpecifier, Specifier +from .utils import canonicalize_name + +__all__ = [ + "EvaluateContext", + "InvalidMarker", + "Marker", + "UndefinedComparison", + "UndefinedEnvironmentName", + "default_environment", +] + +Operator = Callable[[str, Union[str, AbstractSet[str]]], bool] +EvaluateContext = Literal["metadata", "lock_file", "requirement"] +MARKERS_ALLOWING_SET = {"extras", "dependency_groups"} + + +class InvalidMarker(ValueError): + """ + An invalid marker was found, users should refer to PEP 508. + """ + + +class UndefinedComparison(ValueError): + """ + An invalid operation was attempted on a value that doesn't support it. + """ + + +class UndefinedEnvironmentName(ValueError): + """ + A name was attempted to be used that does not exist inside of the + environment. + """ + + +class Environment(TypedDict): + implementation_name: str + """The implementation's identifier, e.g. ``'cpython'``.""" + + implementation_version: str + """ + The implementation's version, e.g. ``'3.13.0a2'`` for CPython 3.13.0a2, or + ``'7.3.13'`` for PyPy3.10 v7.3.13. + """ + + os_name: str + """ + The value of :py:data:`os.name`. The name of the operating system dependent module + imported, e.g. ``'posix'``. + """ + + platform_machine: str + """ + Returns the machine type, e.g. ``'i386'``. + + An empty string if the value cannot be determined. + """ + + platform_release: str + """ + The system's release, e.g. ``'2.2.0'`` or ``'NT'``. + + An empty string if the value cannot be determined. + """ + + platform_system: str + """ + The system/OS name, e.g. ``'Linux'``, ``'Windows'`` or ``'Java'``. + + An empty string if the value cannot be determined. + """ + + platform_version: str + """ + The system's release version, e.g. ``'#3 on degas'``. + + An empty string if the value cannot be determined. + """ + + python_full_version: str + """ + The Python version as string ``'major.minor.patchlevel'``. + + Note that unlike the Python :py:data:`sys.version`, this value will always include + the patchlevel (it defaults to 0). + """ + + platform_python_implementation: str + """ + A string identifying the Python implementation, e.g. ``'CPython'``. + """ + + python_version: str + """The Python version as string ``'major.minor'``.""" + + sys_platform: str + """ + This string contains a platform identifier that can be used to append + platform-specific components to :py:data:`sys.path`, for instance. + + For Unix systems, except on Linux and AIX, this is the lowercased OS name as + returned by ``uname -s`` with the first part of the version as returned by + ``uname -r`` appended, e.g. ``'sunos5'`` or ``'freebsd8'``, at the time when Python + was built. + """ + + +def _normalize_extra_values(results: Any) -> Any: + """ + Normalize extra values. + """ + if isinstance(results[0], tuple): + lhs, op, rhs = results[0] + if isinstance(lhs, Variable) and lhs.value == "extra": + normalized_extra = canonicalize_name(rhs.value) + rhs = Value(normalized_extra) + elif isinstance(rhs, Variable) and rhs.value == "extra": + normalized_extra = canonicalize_name(lhs.value) + lhs = Value(normalized_extra) + results[0] = lhs, op, rhs + return results + + +def _format_marker( + marker: list[str] | MarkerAtom | str, first: bool | None = True +) -> str: + assert isinstance(marker, (list, tuple, str)) + + # Sometimes we have a structure like [[...]] which is a single item list + # where the single item is itself it's own list. In that case we want skip + # the rest of this function so that we don't get extraneous () on the + # outside. + if ( + isinstance(marker, list) + and len(marker) == 1 + and isinstance(marker[0], (list, tuple)) + ): + return _format_marker(marker[0]) + + if isinstance(marker, list): + inner = (_format_marker(m, first=False) for m in marker) + if first: + return " ".join(inner) + else: + return "(" + " ".join(inner) + ")" + elif isinstance(marker, tuple): + return " ".join([m.serialize() for m in marker]) + else: + return marker + + +_operators: dict[str, Operator] = { + "in": lambda lhs, rhs: lhs in rhs, + "not in": lambda lhs, rhs: lhs not in rhs, + "<": operator.lt, + "<=": operator.le, + "==": operator.eq, + "!=": operator.ne, + ">=": operator.ge, + ">": operator.gt, +} + + +def _eval_op(lhs: str, op: Op, rhs: str | AbstractSet[str]) -> bool: + if isinstance(rhs, str): + try: + spec = Specifier("".join([op.serialize(), rhs])) + except InvalidSpecifier: + pass + else: + return spec.contains(lhs, prereleases=True) + + oper: Operator | None = _operators.get(op.serialize()) + if oper is None: + raise UndefinedComparison(f"Undefined {op!r} on {lhs!r} and {rhs!r}.") + + return oper(lhs, rhs) + + +def _normalize( + lhs: str, rhs: str | AbstractSet[str], key: str +) -> tuple[str, str | AbstractSet[str]]: + # PEP 685 – Comparison of extra names for optional distribution dependencies + # https://peps.python.org/pep-0685/ + # > When comparing extra names, tools MUST normalize the names being + # > compared using the semantics outlined in PEP 503 for names + if key == "extra": + assert isinstance(rhs, str), "extra value must be a string" + return (canonicalize_name(lhs), canonicalize_name(rhs)) + if key in MARKERS_ALLOWING_SET: + if isinstance(rhs, str): # pragma: no cover + return (canonicalize_name(lhs), canonicalize_name(rhs)) + else: + return (canonicalize_name(lhs), {canonicalize_name(v) for v in rhs}) + + # other environment markers don't have such standards + return lhs, rhs + + +def _evaluate_markers( + markers: MarkerList, environment: dict[str, str | AbstractSet[str]] +) -> bool: + groups: list[list[bool]] = [[]] + + for marker in markers: + assert isinstance(marker, (list, tuple, str)) + + if isinstance(marker, list): + groups[-1].append(_evaluate_markers(marker, environment)) + elif isinstance(marker, tuple): + lhs, op, rhs = marker + + if isinstance(lhs, Variable): + environment_key = lhs.value + lhs_value = environment[environment_key] + rhs_value = rhs.value + else: + lhs_value = lhs.value + environment_key = rhs.value + rhs_value = environment[environment_key] + assert isinstance(lhs_value, str), "lhs must be a string" + lhs_value, rhs_value = _normalize(lhs_value, rhs_value, key=environment_key) + groups[-1].append(_eval_op(lhs_value, op, rhs_value)) + else: + assert marker in ["and", "or"] + if marker == "or": + groups.append([]) + + return any(all(item) for item in groups) + + +def format_full_version(info: sys._version_info) -> str: + version = f"{info.major}.{info.minor}.{info.micro}" + kind = info.releaselevel + if kind != "final": + version += kind[0] + str(info.serial) + return version + + +def default_environment() -> Environment: + iver = format_full_version(sys.implementation.version) + implementation_name = sys.implementation.name + return { + "implementation_name": implementation_name, + "implementation_version": iver, + "os_name": os.name, + "platform_machine": platform.machine(), + "platform_release": platform.release(), + "platform_system": platform.system(), + "platform_version": platform.version(), + "python_full_version": platform.python_version(), + "platform_python_implementation": platform.python_implementation(), + "python_version": ".".join(platform.python_version_tuple()[:2]), + "sys_platform": sys.platform, + } + + +class Marker: + def __init__(self, marker: str) -> None: + # Note: We create a Marker object without calling this constructor in + # packaging.requirements.Requirement. If any additional logic is + # added here, make sure to mirror/adapt Requirement. + try: + self._markers = _normalize_extra_values(_parse_marker(marker)) + # The attribute `_markers` can be described in terms of a recursive type: + # MarkerList = List[Union[Tuple[Node, ...], str, MarkerList]] + # + # For example, the following expression: + # python_version > "3.6" or (python_version == "3.6" and os_name == "unix") + # + # is parsed into: + # [ + # (, ')>, ), + # 'and', + # [ + # (, , ), + # 'or', + # (, , ) + # ] + # ] + except ParserSyntaxError as e: + raise InvalidMarker(str(e)) from e + + def __str__(self) -> str: + return _format_marker(self._markers) + + def __repr__(self) -> str: + return f"" + + def __hash__(self) -> int: + return hash((self.__class__.__name__, str(self))) + + def __eq__(self, other: Any) -> bool: + if not isinstance(other, Marker): + return NotImplemented + + return str(self) == str(other) + + def evaluate( + self, + environment: dict[str, str] | None = None, + context: EvaluateContext = "metadata", + ) -> bool: + """Evaluate a marker. + + Return the boolean from evaluating the given marker against the + environment. environment is an optional argument to override all or + part of the determined environment. The *context* parameter specifies what + context the markers are being evaluated for, which influences what markers + are considered valid. Acceptable values are "metadata" (for core metadata; + default), "lock_file", and "requirement" (i.e. all other situations). + + The environment is determined from the current Python process. + """ + current_environment = cast( + "dict[str, str | AbstractSet[str]]", default_environment() + ) + if context == "lock_file": + current_environment.update( + extras=frozenset(), dependency_groups=frozenset() + ) + elif context == "metadata": + current_environment["extra"] = "" + if environment is not None: + current_environment.update(environment) + # The API used to allow setting extra to None. We need to handle this + # case for backwards compatibility. + if "extra" in current_environment and current_environment["extra"] is None: + current_environment["extra"] = "" + + return _evaluate_markers( + self._markers, _repair_python_full_version(current_environment) + ) + + +def _repair_python_full_version( + env: dict[str, str | AbstractSet[str]], +) -> dict[str, str | AbstractSet[str]]: + """ + Work around platform.python_version() returning something that is not PEP 440 + compliant for non-tagged Python builds. + """ + python_full_version = cast(str, env["python_full_version"]) + if python_full_version.endswith("+"): + env["python_full_version"] = f"{python_full_version}local" + return env diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/metadata.py b/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/metadata.py new file mode 100644 index 0000000..3bd8602 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/metadata.py @@ -0,0 +1,862 @@ +from __future__ import annotations + +import email.feedparser +import email.header +import email.message +import email.parser +import email.policy +import pathlib +import sys +import typing +from typing import ( + Any, + Callable, + Generic, + Literal, + TypedDict, + cast, +) + +from . import licenses, requirements, specifiers, utils +from . import version as version_module +from .licenses import NormalizedLicenseExpression + +T = typing.TypeVar("T") + + +if sys.version_info >= (3, 11): # pragma: no cover + ExceptionGroup = ExceptionGroup +else: # pragma: no cover + + class ExceptionGroup(Exception): + """A minimal implementation of :external:exc:`ExceptionGroup` from Python 3.11. + + If :external:exc:`ExceptionGroup` is already defined by Python itself, + that version is used instead. + """ + + message: str + exceptions: list[Exception] + + def __init__(self, message: str, exceptions: list[Exception]) -> None: + self.message = message + self.exceptions = exceptions + + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self.message!r}, {self.exceptions!r})" + + +class InvalidMetadata(ValueError): + """A metadata field contains invalid data.""" + + field: str + """The name of the field that contains invalid data.""" + + def __init__(self, field: str, message: str) -> None: + self.field = field + super().__init__(message) + + +# The RawMetadata class attempts to make as few assumptions about the underlying +# serialization formats as possible. The idea is that as long as a serialization +# formats offer some very basic primitives in *some* way then we can support +# serializing to and from that format. +class RawMetadata(TypedDict, total=False): + """A dictionary of raw core metadata. + + Each field in core metadata maps to a key of this dictionary (when data is + provided). The key is lower-case and underscores are used instead of dashes + compared to the equivalent core metadata field. Any core metadata field that + can be specified multiple times or can hold multiple values in a single + field have a key with a plural name. See :class:`Metadata` whose attributes + match the keys of this dictionary. + + Core metadata fields that can be specified multiple times are stored as a + list or dict depending on which is appropriate for the field. Any fields + which hold multiple values in a single field are stored as a list. + + """ + + # Metadata 1.0 - PEP 241 + metadata_version: str + name: str + version: str + platforms: list[str] + summary: str + description: str + keywords: list[str] + home_page: str + author: str + author_email: str + license: str + + # Metadata 1.1 - PEP 314 + supported_platforms: list[str] + download_url: str + classifiers: list[str] + requires: list[str] + provides: list[str] + obsoletes: list[str] + + # Metadata 1.2 - PEP 345 + maintainer: str + maintainer_email: str + requires_dist: list[str] + provides_dist: list[str] + obsoletes_dist: list[str] + requires_python: str + requires_external: list[str] + project_urls: dict[str, str] + + # Metadata 2.0 + # PEP 426 attempted to completely revamp the metadata format + # but got stuck without ever being able to build consensus on + # it and ultimately ended up withdrawn. + # + # However, a number of tools had started emitting METADATA with + # `2.0` Metadata-Version, so for historical reasons, this version + # was skipped. + + # Metadata 2.1 - PEP 566 + description_content_type: str + provides_extra: list[str] + + # Metadata 2.2 - PEP 643 + dynamic: list[str] + + # Metadata 2.3 - PEP 685 + # No new fields were added in PEP 685, just some edge case were + # tightened up to provide better interoptability. + + # Metadata 2.4 - PEP 639 + license_expression: str + license_files: list[str] + + +_STRING_FIELDS = { + "author", + "author_email", + "description", + "description_content_type", + "download_url", + "home_page", + "license", + "license_expression", + "maintainer", + "maintainer_email", + "metadata_version", + "name", + "requires_python", + "summary", + "version", +} + +_LIST_FIELDS = { + "classifiers", + "dynamic", + "license_files", + "obsoletes", + "obsoletes_dist", + "platforms", + "provides", + "provides_dist", + "provides_extra", + "requires", + "requires_dist", + "requires_external", + "supported_platforms", +} + +_DICT_FIELDS = { + "project_urls", +} + + +def _parse_keywords(data: str) -> list[str]: + """Split a string of comma-separated keywords into a list of keywords.""" + return [k.strip() for k in data.split(",")] + + +def _parse_project_urls(data: list[str]) -> dict[str, str]: + """Parse a list of label/URL string pairings separated by a comma.""" + urls = {} + for pair in data: + # Our logic is slightly tricky here as we want to try and do + # *something* reasonable with malformed data. + # + # The main thing that we have to worry about, is data that does + # not have a ',' at all to split the label from the Value. There + # isn't a singular right answer here, and we will fail validation + # later on (if the caller is validating) so it doesn't *really* + # matter, but since the missing value has to be an empty str + # and our return value is dict[str, str], if we let the key + # be the missing value, then they'd have multiple '' values that + # overwrite each other in a accumulating dict. + # + # The other potentional issue is that it's possible to have the + # same label multiple times in the metadata, with no solid "right" + # answer with what to do in that case. As such, we'll do the only + # thing we can, which is treat the field as unparseable and add it + # to our list of unparsed fields. + parts = [p.strip() for p in pair.split(",", 1)] + parts.extend([""] * (max(0, 2 - len(parts)))) # Ensure 2 items + + # TODO: The spec doesn't say anything about if the keys should be + # considered case sensitive or not... logically they should + # be case-preserving and case-insensitive, but doing that + # would open up more cases where we might have duplicate + # entries. + label, url = parts + if label in urls: + # The label already exists in our set of urls, so this field + # is unparseable, and we can just add the whole thing to our + # unparseable data and stop processing it. + raise KeyError("duplicate labels in project urls") + urls[label] = url + + return urls + + +def _get_payload(msg: email.message.Message, source: bytes | str) -> str: + """Get the body of the message.""" + # If our source is a str, then our caller has managed encodings for us, + # and we don't need to deal with it. + if isinstance(source, str): + payload = msg.get_payload() + assert isinstance(payload, str) + return payload + # If our source is a bytes, then we're managing the encoding and we need + # to deal with it. + else: + bpayload = msg.get_payload(decode=True) + assert isinstance(bpayload, bytes) + try: + return bpayload.decode("utf8", "strict") + except UnicodeDecodeError as exc: + raise ValueError("payload in an invalid encoding") from exc + + +# The various parse_FORMAT functions here are intended to be as lenient as +# possible in their parsing, while still returning a correctly typed +# RawMetadata. +# +# To aid in this, we also generally want to do as little touching of the +# data as possible, except where there are possibly some historic holdovers +# that make valid data awkward to work with. +# +# While this is a lower level, intermediate format than our ``Metadata`` +# class, some light touch ups can make a massive difference in usability. + +# Map METADATA fields to RawMetadata. +_EMAIL_TO_RAW_MAPPING = { + "author": "author", + "author-email": "author_email", + "classifier": "classifiers", + "description": "description", + "description-content-type": "description_content_type", + "download-url": "download_url", + "dynamic": "dynamic", + "home-page": "home_page", + "keywords": "keywords", + "license": "license", + "license-expression": "license_expression", + "license-file": "license_files", + "maintainer": "maintainer", + "maintainer-email": "maintainer_email", + "metadata-version": "metadata_version", + "name": "name", + "obsoletes": "obsoletes", + "obsoletes-dist": "obsoletes_dist", + "platform": "platforms", + "project-url": "project_urls", + "provides": "provides", + "provides-dist": "provides_dist", + "provides-extra": "provides_extra", + "requires": "requires", + "requires-dist": "requires_dist", + "requires-external": "requires_external", + "requires-python": "requires_python", + "summary": "summary", + "supported-platform": "supported_platforms", + "version": "version", +} +_RAW_TO_EMAIL_MAPPING = {raw: email for email, raw in _EMAIL_TO_RAW_MAPPING.items()} + + +def parse_email(data: bytes | str) -> tuple[RawMetadata, dict[str, list[str]]]: + """Parse a distribution's metadata stored as email headers (e.g. from ``METADATA``). + + This function returns a two-item tuple of dicts. The first dict is of + recognized fields from the core metadata specification. Fields that can be + parsed and translated into Python's built-in types are converted + appropriately. All other fields are left as-is. Fields that are allowed to + appear multiple times are stored as lists. + + The second dict contains all other fields from the metadata. This includes + any unrecognized fields. It also includes any fields which are expected to + be parsed into a built-in type but were not formatted appropriately. Finally, + any fields that are expected to appear only once but are repeated are + included in this dict. + + """ + raw: dict[str, str | list[str] | dict[str, str]] = {} + unparsed: dict[str, list[str]] = {} + + if isinstance(data, str): + parsed = email.parser.Parser(policy=email.policy.compat32).parsestr(data) + else: + parsed = email.parser.BytesParser(policy=email.policy.compat32).parsebytes(data) + + # We have to wrap parsed.keys() in a set, because in the case of multiple + # values for a key (a list), the key will appear multiple times in the + # list of keys, but we're avoiding that by using get_all(). + for name in frozenset(parsed.keys()): + # Header names in RFC are case insensitive, so we'll normalize to all + # lower case to make comparisons easier. + name = name.lower() + + # We use get_all() here, even for fields that aren't multiple use, + # because otherwise someone could have e.g. two Name fields, and we + # would just silently ignore it rather than doing something about it. + headers = parsed.get_all(name) or [] + + # The way the email module works when parsing bytes is that it + # unconditionally decodes the bytes as ascii using the surrogateescape + # handler. When you pull that data back out (such as with get_all() ), + # it looks to see if the str has any surrogate escapes, and if it does + # it wraps it in a Header object instead of returning the string. + # + # As such, we'll look for those Header objects, and fix up the encoding. + value = [] + # Flag if we have run into any issues processing the headers, thus + # signalling that the data belongs in 'unparsed'. + valid_encoding = True + for h in headers: + # It's unclear if this can return more types than just a Header or + # a str, so we'll just assert here to make sure. + assert isinstance(h, (email.header.Header, str)) + + # If it's a header object, we need to do our little dance to get + # the real data out of it. In cases where there is invalid data + # we're going to end up with mojibake, but there's no obvious, good + # way around that without reimplementing parts of the Header object + # ourselves. + # + # That should be fine since, if mojibacked happens, this key is + # going into the unparsed dict anyways. + if isinstance(h, email.header.Header): + # The Header object stores it's data as chunks, and each chunk + # can be independently encoded, so we'll need to check each + # of them. + chunks: list[tuple[bytes, str | None]] = [] + for bin, encoding in email.header.decode_header(h): + try: + bin.decode("utf8", "strict") + except UnicodeDecodeError: + # Enable mojibake. + encoding = "latin1" + valid_encoding = False + else: + encoding = "utf8" + chunks.append((bin, encoding)) + + # Turn our chunks back into a Header object, then let that + # Header object do the right thing to turn them into a + # string for us. + value.append(str(email.header.make_header(chunks))) + # This is already a string, so just add it. + else: + value.append(h) + + # We've processed all of our values to get them into a list of str, + # but we may have mojibake data, in which case this is an unparsed + # field. + if not valid_encoding: + unparsed[name] = value + continue + + raw_name = _EMAIL_TO_RAW_MAPPING.get(name) + if raw_name is None: + # This is a bit of a weird situation, we've encountered a key that + # we don't know what it means, so we don't know whether it's meant + # to be a list or not. + # + # Since we can't really tell one way or another, we'll just leave it + # as a list, even though it may be a single item list, because that's + # what makes the most sense for email headers. + unparsed[name] = value + continue + + # If this is one of our string fields, then we'll check to see if our + # value is a list of a single item. If it is then we'll assume that + # it was emitted as a single string, and unwrap the str from inside + # the list. + # + # If it's any other kind of data, then we haven't the faintest clue + # what we should parse it as, and we have to just add it to our list + # of unparsed stuff. + if raw_name in _STRING_FIELDS and len(value) == 1: + raw[raw_name] = value[0] + # If this is one of our list of string fields, then we can just assign + # the value, since email *only* has strings, and our get_all() call + # above ensures that this is a list. + elif raw_name in _LIST_FIELDS: + raw[raw_name] = value + # Special Case: Keywords + # The keywords field is implemented in the metadata spec as a str, + # but it conceptually is a list of strings, and is serialized using + # ", ".join(keywords), so we'll do some light data massaging to turn + # this into what it logically is. + elif raw_name == "keywords" and len(value) == 1: + raw[raw_name] = _parse_keywords(value[0]) + # Special Case: Project-URL + # The project urls is implemented in the metadata spec as a list of + # specially-formatted strings that represent a key and a value, which + # is fundamentally a mapping, however the email format doesn't support + # mappings in a sane way, so it was crammed into a list of strings + # instead. + # + # We will do a little light data massaging to turn this into a map as + # it logically should be. + elif raw_name == "project_urls": + try: + raw[raw_name] = _parse_project_urls(value) + except KeyError: + unparsed[name] = value + # Nothing that we've done has managed to parse this, so it'll just + # throw it in our unparseable data and move on. + else: + unparsed[name] = value + + # We need to support getting the Description from the message payload in + # addition to getting it from the the headers. This does mean, though, there + # is the possibility of it being set both ways, in which case we put both + # in 'unparsed' since we don't know which is right. + try: + payload = _get_payload(parsed, data) + except ValueError: + unparsed.setdefault("description", []).append( + parsed.get_payload(decode=isinstance(data, bytes)) # type: ignore[call-overload] + ) + else: + if payload: + # Check to see if we've already got a description, if so then both + # it, and this body move to unparseable. + if "description" in raw: + description_header = cast(str, raw.pop("description")) + unparsed.setdefault("description", []).extend( + [description_header, payload] + ) + elif "description" in unparsed: + unparsed["description"].append(payload) + else: + raw["description"] = payload + + # We need to cast our `raw` to a metadata, because a TypedDict only support + # literal key names, but we're computing our key names on purpose, but the + # way this function is implemented, our `TypedDict` can only have valid key + # names. + return cast(RawMetadata, raw), unparsed + + +_NOT_FOUND = object() + + +# Keep the two values in sync. +_VALID_METADATA_VERSIONS = ["1.0", "1.1", "1.2", "2.1", "2.2", "2.3", "2.4"] +_MetadataVersion = Literal["1.0", "1.1", "1.2", "2.1", "2.2", "2.3", "2.4"] + +_REQUIRED_ATTRS = frozenset(["metadata_version", "name", "version"]) + + +class _Validator(Generic[T]): + """Validate a metadata field. + + All _process_*() methods correspond to a core metadata field. The method is + called with the field's raw value. If the raw value is valid it is returned + in its "enriched" form (e.g. ``version.Version`` for the ``Version`` field). + If the raw value is invalid, :exc:`InvalidMetadata` is raised (with a cause + as appropriate). + """ + + name: str + raw_name: str + added: _MetadataVersion + + def __init__( + self, + *, + added: _MetadataVersion = "1.0", + ) -> None: + self.added = added + + def __set_name__(self, _owner: Metadata, name: str) -> None: + self.name = name + self.raw_name = _RAW_TO_EMAIL_MAPPING[name] + + def __get__(self, instance: Metadata, _owner: type[Metadata]) -> T: + # With Python 3.8, the caching can be replaced with functools.cached_property(). + # No need to check the cache as attribute lookup will resolve into the + # instance's __dict__ before __get__ is called. + cache = instance.__dict__ + value = instance._raw.get(self.name) + + # To make the _process_* methods easier, we'll check if the value is None + # and if this field is NOT a required attribute, and if both of those + # things are true, we'll skip the the converter. This will mean that the + # converters never have to deal with the None union. + if self.name in _REQUIRED_ATTRS or value is not None: + try: + converter: Callable[[Any], T] = getattr(self, f"_process_{self.name}") + except AttributeError: + pass + else: + value = converter(value) + + cache[self.name] = value + try: + del instance._raw[self.name] # type: ignore[misc] + except KeyError: + pass + + return cast(T, value) + + def _invalid_metadata( + self, msg: str, cause: Exception | None = None + ) -> InvalidMetadata: + exc = InvalidMetadata( + self.raw_name, msg.format_map({"field": repr(self.raw_name)}) + ) + exc.__cause__ = cause + return exc + + def _process_metadata_version(self, value: str) -> _MetadataVersion: + # Implicitly makes Metadata-Version required. + if value not in _VALID_METADATA_VERSIONS: + raise self._invalid_metadata(f"{value!r} is not a valid metadata version") + return cast(_MetadataVersion, value) + + def _process_name(self, value: str) -> str: + if not value: + raise self._invalid_metadata("{field} is a required field") + # Validate the name as a side-effect. + try: + utils.canonicalize_name(value, validate=True) + except utils.InvalidName as exc: + raise self._invalid_metadata( + f"{value!r} is invalid for {{field}}", cause=exc + ) from exc + else: + return value + + def _process_version(self, value: str) -> version_module.Version: + if not value: + raise self._invalid_metadata("{field} is a required field") + try: + return version_module.parse(value) + except version_module.InvalidVersion as exc: + raise self._invalid_metadata( + f"{value!r} is invalid for {{field}}", cause=exc + ) from exc + + def _process_summary(self, value: str) -> str: + """Check the field contains no newlines.""" + if "\n" in value: + raise self._invalid_metadata("{field} must be a single line") + return value + + def _process_description_content_type(self, value: str) -> str: + content_types = {"text/plain", "text/x-rst", "text/markdown"} + message = email.message.EmailMessage() + message["content-type"] = value + + content_type, parameters = ( + # Defaults to `text/plain` if parsing failed. + message.get_content_type().lower(), + message["content-type"].params, + ) + # Check if content-type is valid or defaulted to `text/plain` and thus was + # not parseable. + if content_type not in content_types or content_type not in value.lower(): + raise self._invalid_metadata( + f"{{field}} must be one of {list(content_types)}, not {value!r}" + ) + + charset = parameters.get("charset", "UTF-8") + if charset != "UTF-8": + raise self._invalid_metadata( + f"{{field}} can only specify the UTF-8 charset, not {list(charset)}" + ) + + markdown_variants = {"GFM", "CommonMark"} + variant = parameters.get("variant", "GFM") # Use an acceptable default. + if content_type == "text/markdown" and variant not in markdown_variants: + raise self._invalid_metadata( + f"valid Markdown variants for {{field}} are {list(markdown_variants)}, " + f"not {variant!r}", + ) + return value + + def _process_dynamic(self, value: list[str]) -> list[str]: + for dynamic_field in map(str.lower, value): + if dynamic_field in {"name", "version", "metadata-version"}: + raise self._invalid_metadata( + f"{dynamic_field!r} is not allowed as a dynamic field" + ) + elif dynamic_field not in _EMAIL_TO_RAW_MAPPING: + raise self._invalid_metadata( + f"{dynamic_field!r} is not a valid dynamic field" + ) + return list(map(str.lower, value)) + + def _process_provides_extra( + self, + value: list[str], + ) -> list[utils.NormalizedName]: + normalized_names = [] + try: + for name in value: + normalized_names.append(utils.canonicalize_name(name, validate=True)) + except utils.InvalidName as exc: + raise self._invalid_metadata( + f"{name!r} is invalid for {{field}}", cause=exc + ) from exc + else: + return normalized_names + + def _process_requires_python(self, value: str) -> specifiers.SpecifierSet: + try: + return specifiers.SpecifierSet(value) + except specifiers.InvalidSpecifier as exc: + raise self._invalid_metadata( + f"{value!r} is invalid for {{field}}", cause=exc + ) from exc + + def _process_requires_dist( + self, + value: list[str], + ) -> list[requirements.Requirement]: + reqs = [] + try: + for req in value: + reqs.append(requirements.Requirement(req)) + except requirements.InvalidRequirement as exc: + raise self._invalid_metadata( + f"{req!r} is invalid for {{field}}", cause=exc + ) from exc + else: + return reqs + + def _process_license_expression( + self, value: str + ) -> NormalizedLicenseExpression | None: + try: + return licenses.canonicalize_license_expression(value) + except ValueError as exc: + raise self._invalid_metadata( + f"{value!r} is invalid for {{field}}", cause=exc + ) from exc + + def _process_license_files(self, value: list[str]) -> list[str]: + paths = [] + for path in value: + if ".." in path: + raise self._invalid_metadata( + f"{path!r} is invalid for {{field}}, " + "parent directory indicators are not allowed" + ) + if "*" in path: + raise self._invalid_metadata( + f"{path!r} is invalid for {{field}}, paths must be resolved" + ) + if ( + pathlib.PurePosixPath(path).is_absolute() + or pathlib.PureWindowsPath(path).is_absolute() + ): + raise self._invalid_metadata( + f"{path!r} is invalid for {{field}}, paths must be relative" + ) + if pathlib.PureWindowsPath(path).as_posix() != path: + raise self._invalid_metadata( + f"{path!r} is invalid for {{field}}, paths must use '/' delimiter" + ) + paths.append(path) + return paths + + +class Metadata: + """Representation of distribution metadata. + + Compared to :class:`RawMetadata`, this class provides objects representing + metadata fields instead of only using built-in types. Any invalid metadata + will cause :exc:`InvalidMetadata` to be raised (with a + :py:attr:`~BaseException.__cause__` attribute as appropriate). + """ + + _raw: RawMetadata + + @classmethod + def from_raw(cls, data: RawMetadata, *, validate: bool = True) -> Metadata: + """Create an instance from :class:`RawMetadata`. + + If *validate* is true, all metadata will be validated. All exceptions + related to validation will be gathered and raised as an :class:`ExceptionGroup`. + """ + ins = cls() + ins._raw = data.copy() # Mutations occur due to caching enriched values. + + if validate: + exceptions: list[Exception] = [] + try: + metadata_version = ins.metadata_version + metadata_age = _VALID_METADATA_VERSIONS.index(metadata_version) + except InvalidMetadata as metadata_version_exc: + exceptions.append(metadata_version_exc) + metadata_version = None + + # Make sure to check for the fields that are present, the required + # fields (so their absence can be reported). + fields_to_check = frozenset(ins._raw) | _REQUIRED_ATTRS + # Remove fields that have already been checked. + fields_to_check -= {"metadata_version"} + + for key in fields_to_check: + try: + if metadata_version: + # Can't use getattr() as that triggers descriptor protocol which + # will fail due to no value for the instance argument. + try: + field_metadata_version = cls.__dict__[key].added + except KeyError: + exc = InvalidMetadata(key, f"unrecognized field: {key!r}") + exceptions.append(exc) + continue + field_age = _VALID_METADATA_VERSIONS.index( + field_metadata_version + ) + if field_age > metadata_age: + field = _RAW_TO_EMAIL_MAPPING[key] + exc = InvalidMetadata( + field, + f"{field} introduced in metadata version " + f"{field_metadata_version}, not {metadata_version}", + ) + exceptions.append(exc) + continue + getattr(ins, key) + except InvalidMetadata as exc: + exceptions.append(exc) + + if exceptions: + raise ExceptionGroup("invalid metadata", exceptions) + + return ins + + @classmethod + def from_email(cls, data: bytes | str, *, validate: bool = True) -> Metadata: + """Parse metadata from email headers. + + If *validate* is true, the metadata will be validated. All exceptions + related to validation will be gathered and raised as an :class:`ExceptionGroup`. + """ + raw, unparsed = parse_email(data) + + if validate: + exceptions: list[Exception] = [] + for unparsed_key in unparsed: + if unparsed_key in _EMAIL_TO_RAW_MAPPING: + message = f"{unparsed_key!r} has invalid data" + else: + message = f"unrecognized field: {unparsed_key!r}" + exceptions.append(InvalidMetadata(unparsed_key, message)) + + if exceptions: + raise ExceptionGroup("unparsed", exceptions) + + try: + return cls.from_raw(raw, validate=validate) + except ExceptionGroup as exc_group: + raise ExceptionGroup( + "invalid or unparsed metadata", exc_group.exceptions + ) from None + + metadata_version: _Validator[_MetadataVersion] = _Validator() + """:external:ref:`core-metadata-metadata-version` + (required; validated to be a valid metadata version)""" + # `name` is not normalized/typed to NormalizedName so as to provide access to + # the original/raw name. + name: _Validator[str] = _Validator() + """:external:ref:`core-metadata-name` + (required; validated using :func:`~packaging.utils.canonicalize_name` and its + *validate* parameter)""" + version: _Validator[version_module.Version] = _Validator() + """:external:ref:`core-metadata-version` (required)""" + dynamic: _Validator[list[str] | None] = _Validator( + added="2.2", + ) + """:external:ref:`core-metadata-dynamic` + (validated against core metadata field names and lowercased)""" + platforms: _Validator[list[str] | None] = _Validator() + """:external:ref:`core-metadata-platform`""" + supported_platforms: _Validator[list[str] | None] = _Validator(added="1.1") + """:external:ref:`core-metadata-supported-platform`""" + summary: _Validator[str | None] = _Validator() + """:external:ref:`core-metadata-summary` (validated to contain no newlines)""" + description: _Validator[str | None] = _Validator() # TODO 2.1: can be in body + """:external:ref:`core-metadata-description`""" + description_content_type: _Validator[str | None] = _Validator(added="2.1") + """:external:ref:`core-metadata-description-content-type` (validated)""" + keywords: _Validator[list[str] | None] = _Validator() + """:external:ref:`core-metadata-keywords`""" + home_page: _Validator[str | None] = _Validator() + """:external:ref:`core-metadata-home-page`""" + download_url: _Validator[str | None] = _Validator(added="1.1") + """:external:ref:`core-metadata-download-url`""" + author: _Validator[str | None] = _Validator() + """:external:ref:`core-metadata-author`""" + author_email: _Validator[str | None] = _Validator() + """:external:ref:`core-metadata-author-email`""" + maintainer: _Validator[str | None] = _Validator(added="1.2") + """:external:ref:`core-metadata-maintainer`""" + maintainer_email: _Validator[str | None] = _Validator(added="1.2") + """:external:ref:`core-metadata-maintainer-email`""" + license: _Validator[str | None] = _Validator() + """:external:ref:`core-metadata-license`""" + license_expression: _Validator[NormalizedLicenseExpression | None] = _Validator( + added="2.4" + ) + """:external:ref:`core-metadata-license-expression`""" + license_files: _Validator[list[str] | None] = _Validator(added="2.4") + """:external:ref:`core-metadata-license-file`""" + classifiers: _Validator[list[str] | None] = _Validator(added="1.1") + """:external:ref:`core-metadata-classifier`""" + requires_dist: _Validator[list[requirements.Requirement] | None] = _Validator( + added="1.2" + ) + """:external:ref:`core-metadata-requires-dist`""" + requires_python: _Validator[specifiers.SpecifierSet | None] = _Validator( + added="1.2" + ) + """:external:ref:`core-metadata-requires-python`""" + # Because `Requires-External` allows for non-PEP 440 version specifiers, we + # don't do any processing on the values. + requires_external: _Validator[list[str] | None] = _Validator(added="1.2") + """:external:ref:`core-metadata-requires-external`""" + project_urls: _Validator[dict[str, str] | None] = _Validator(added="1.2") + """:external:ref:`core-metadata-project-url`""" + # PEP 685 lets us raise an error if an extra doesn't pass `Name` validation + # regardless of metadata version. + provides_extra: _Validator[list[utils.NormalizedName] | None] = _Validator( + added="2.1", + ) + """:external:ref:`core-metadata-provides-extra`""" + provides_dist: _Validator[list[str] | None] = _Validator(added="1.2") + """:external:ref:`core-metadata-provides-dist`""" + obsoletes_dist: _Validator[list[str] | None] = _Validator(added="1.2") + """:external:ref:`core-metadata-obsoletes-dist`""" + requires: _Validator[list[str] | None] = _Validator(added="1.1") + """``Requires`` (deprecated)""" + provides: _Validator[list[str] | None] = _Validator(added="1.1") + """``Provides`` (deprecated)""" + obsoletes: _Validator[list[str] | None] = _Validator(added="1.1") + """``Obsoletes`` (deprecated)""" diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/py.typed b/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/py.typed new file mode 100644 index 0000000..e69de29 diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/requirements.py b/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/requirements.py new file mode 100644 index 0000000..4e068c9 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/requirements.py @@ -0,0 +1,91 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. +from __future__ import annotations + +from typing import Any, Iterator + +from ._parser import parse_requirement as _parse_requirement +from ._tokenizer import ParserSyntaxError +from .markers import Marker, _normalize_extra_values +from .specifiers import SpecifierSet +from .utils import canonicalize_name + + +class InvalidRequirement(ValueError): + """ + An invalid requirement was found, users should refer to PEP 508. + """ + + +class Requirement: + """Parse a requirement. + + Parse a given requirement string into its parts, such as name, specifier, + URL, and extras. Raises InvalidRequirement on a badly-formed requirement + string. + """ + + # TODO: Can we test whether something is contained within a requirement? + # If so how do we do that? Do we need to test against the _name_ of + # the thing as well as the version? What about the markers? + # TODO: Can we normalize the name and extra name? + + def __init__(self, requirement_string: str) -> None: + try: + parsed = _parse_requirement(requirement_string) + except ParserSyntaxError as e: + raise InvalidRequirement(str(e)) from e + + self.name: str = parsed.name + self.url: str | None = parsed.url or None + self.extras: set[str] = set(parsed.extras or []) + self.specifier: SpecifierSet = SpecifierSet(parsed.specifier) + self.marker: Marker | None = None + if parsed.marker is not None: + self.marker = Marker.__new__(Marker) + self.marker._markers = _normalize_extra_values(parsed.marker) + + def _iter_parts(self, name: str) -> Iterator[str]: + yield name + + if self.extras: + formatted_extras = ",".join(sorted(self.extras)) + yield f"[{formatted_extras}]" + + if self.specifier: + yield str(self.specifier) + + if self.url: + yield f"@ {self.url}" + if self.marker: + yield " " + + if self.marker: + yield f"; {self.marker}" + + def __str__(self) -> str: + return "".join(self._iter_parts(self.name)) + + def __repr__(self) -> str: + return f"" + + def __hash__(self) -> int: + return hash( + ( + self.__class__.__name__, + *self._iter_parts(canonicalize_name(self.name)), + ) + ) + + def __eq__(self, other: Any) -> bool: + if not isinstance(other, Requirement): + return NotImplemented + + return ( + canonicalize_name(self.name) == canonicalize_name(other.name) + and self.extras == other.extras + and self.specifier == other.specifier + and self.url == other.url + and self.marker == other.marker + ) diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/specifiers.py b/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/specifiers.py new file mode 100644 index 0000000..47c3929 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/specifiers.py @@ -0,0 +1,1019 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. +""" +.. testsetup:: + + from pip._vendor.packaging.specifiers import Specifier, SpecifierSet, InvalidSpecifier + from pip._vendor.packaging.version import Version +""" + +from __future__ import annotations + +import abc +import itertools +import re +from typing import Callable, Iterable, Iterator, TypeVar, Union + +from .utils import canonicalize_version +from .version import Version + +UnparsedVersion = Union[Version, str] +UnparsedVersionVar = TypeVar("UnparsedVersionVar", bound=UnparsedVersion) +CallableOperator = Callable[[Version, str], bool] + + +def _coerce_version(version: UnparsedVersion) -> Version: + if not isinstance(version, Version): + version = Version(version) + return version + + +class InvalidSpecifier(ValueError): + """ + Raised when attempting to create a :class:`Specifier` with a specifier + string that is invalid. + + >>> Specifier("lolwat") + Traceback (most recent call last): + ... + packaging.specifiers.InvalidSpecifier: Invalid specifier: 'lolwat' + """ + + +class BaseSpecifier(metaclass=abc.ABCMeta): + @abc.abstractmethod + def __str__(self) -> str: + """ + Returns the str representation of this Specifier-like object. This + should be representative of the Specifier itself. + """ + + @abc.abstractmethod + def __hash__(self) -> int: + """ + Returns a hash value for this Specifier-like object. + """ + + @abc.abstractmethod + def __eq__(self, other: object) -> bool: + """ + Returns a boolean representing whether or not the two Specifier-like + objects are equal. + + :param other: The other object to check against. + """ + + @property + @abc.abstractmethod + def prereleases(self) -> bool | None: + """Whether or not pre-releases as a whole are allowed. + + This can be set to either ``True`` or ``False`` to explicitly enable or disable + prereleases or it can be set to ``None`` (the default) to use default semantics. + """ + + @prereleases.setter + def prereleases(self, value: bool) -> None: + """Setter for :attr:`prereleases`. + + :param value: The value to set. + """ + + @abc.abstractmethod + def contains(self, item: str, prereleases: bool | None = None) -> bool: + """ + Determines if the given item is contained within this specifier. + """ + + @abc.abstractmethod + def filter( + self, iterable: Iterable[UnparsedVersionVar], prereleases: bool | None = None + ) -> Iterator[UnparsedVersionVar]: + """ + Takes an iterable of items and filters them so that only items which + are contained within this specifier are allowed in it. + """ + + +class Specifier(BaseSpecifier): + """This class abstracts handling of version specifiers. + + .. tip:: + + It is generally not required to instantiate this manually. You should instead + prefer to work with :class:`SpecifierSet` instead, which can parse + comma-separated version specifiers (which is what package metadata contains). + """ + + _operator_regex_str = r""" + (?P(~=|==|!=|<=|>=|<|>|===)) + """ + _version_regex_str = r""" + (?P + (?: + # The identity operators allow for an escape hatch that will + # do an exact string match of the version you wish to install. + # This will not be parsed by PEP 440 and we cannot determine + # any semantic meaning from it. This operator is discouraged + # but included entirely as an escape hatch. + (?<====) # Only match for the identity operator + \s* + [^\s;)]* # The arbitrary version can be just about anything, + # we match everything except for whitespace, a + # semi-colon for marker support, and a closing paren + # since versions can be enclosed in them. + ) + | + (?: + # The (non)equality operators allow for wild card and local + # versions to be specified so we have to define these two + # operators separately to enable that. + (?<===|!=) # Only match for equals and not equals + + \s* + v? + (?:[0-9]+!)? # epoch + [0-9]+(?:\.[0-9]+)* # release + + # You cannot use a wild card and a pre-release, post-release, a dev or + # local version together so group them with a | and make them optional. + (?: + \.\* # Wild card syntax of .* + | + (?: # pre release + [-_\.]? + (alpha|beta|preview|pre|a|b|c|rc) + [-_\.]? + [0-9]* + )? + (?: # post release + (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*) + )? + (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release + (?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local + )? + ) + | + (?: + # The compatible operator requires at least two digits in the + # release segment. + (?<=~=) # Only match for the compatible operator + + \s* + v? + (?:[0-9]+!)? # epoch + [0-9]+(?:\.[0-9]+)+ # release (We have a + instead of a *) + (?: # pre release + [-_\.]? + (alpha|beta|preview|pre|a|b|c|rc) + [-_\.]? + [0-9]* + )? + (?: # post release + (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*) + )? + (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release + ) + | + (?: + # All other operators only allow a sub set of what the + # (non)equality operators do. Specifically they do not allow + # local versions to be specified nor do they allow the prefix + # matching wild cards. + (?=": "greater_than_equal", + "<": "less_than", + ">": "greater_than", + "===": "arbitrary", + } + + def __init__(self, spec: str = "", prereleases: bool | None = None) -> None: + """Initialize a Specifier instance. + + :param spec: + The string representation of a specifier which will be parsed and + normalized before use. + :param prereleases: + This tells the specifier if it should accept prerelease versions if + applicable or not. The default of ``None`` will autodetect it from the + given specifiers. + :raises InvalidSpecifier: + If the given specifier is invalid (i.e. bad syntax). + """ + match = self._regex.search(spec) + if not match: + raise InvalidSpecifier(f"Invalid specifier: {spec!r}") + + self._spec: tuple[str, str] = ( + match.group("operator").strip(), + match.group("version").strip(), + ) + + # Store whether or not this Specifier should accept prereleases + self._prereleases = prereleases + + # https://github.com/python/mypy/pull/13475#pullrequestreview-1079784515 + @property # type: ignore[override] + def prereleases(self) -> bool: + # If there is an explicit prereleases set for this, then we'll just + # blindly use that. + if self._prereleases is not None: + return self._prereleases + + # Look at all of our specifiers and determine if they are inclusive + # operators, and if they are if they are including an explicit + # prerelease. + operator, version = self._spec + if operator in ["==", ">=", "<=", "~=", "===", ">", "<"]: + # The == specifier can include a trailing .*, if it does we + # want to remove before parsing. + if operator == "==" and version.endswith(".*"): + version = version[:-2] + + # Parse the version, and if it is a pre-release than this + # specifier allows pre-releases. + if Version(version).is_prerelease: + return True + + return False + + @prereleases.setter + def prereleases(self, value: bool) -> None: + self._prereleases = value + + @property + def operator(self) -> str: + """The operator of this specifier. + + >>> Specifier("==1.2.3").operator + '==' + """ + return self._spec[0] + + @property + def version(self) -> str: + """The version of this specifier. + + >>> Specifier("==1.2.3").version + '1.2.3' + """ + return self._spec[1] + + def __repr__(self) -> str: + """A representation of the Specifier that shows all internal state. + + >>> Specifier('>=1.0.0') + =1.0.0')> + >>> Specifier('>=1.0.0', prereleases=False) + =1.0.0', prereleases=False)> + >>> Specifier('>=1.0.0', prereleases=True) + =1.0.0', prereleases=True)> + """ + pre = ( + f", prereleases={self.prereleases!r}" + if self._prereleases is not None + else "" + ) + + return f"<{self.__class__.__name__}({str(self)!r}{pre})>" + + def __str__(self) -> str: + """A string representation of the Specifier that can be round-tripped. + + >>> str(Specifier('>=1.0.0')) + '>=1.0.0' + >>> str(Specifier('>=1.0.0', prereleases=False)) + '>=1.0.0' + """ + return "{}{}".format(*self._spec) + + @property + def _canonical_spec(self) -> tuple[str, str]: + canonical_version = canonicalize_version( + self._spec[1], + strip_trailing_zero=(self._spec[0] != "~="), + ) + return self._spec[0], canonical_version + + def __hash__(self) -> int: + return hash(self._canonical_spec) + + def __eq__(self, other: object) -> bool: + """Whether or not the two Specifier-like objects are equal. + + :param other: The other object to check against. + + The value of :attr:`prereleases` is ignored. + + >>> Specifier("==1.2.3") == Specifier("== 1.2.3.0") + True + >>> (Specifier("==1.2.3", prereleases=False) == + ... Specifier("==1.2.3", prereleases=True)) + True + >>> Specifier("==1.2.3") == "==1.2.3" + True + >>> Specifier("==1.2.3") == Specifier("==1.2.4") + False + >>> Specifier("==1.2.3") == Specifier("~=1.2.3") + False + """ + if isinstance(other, str): + try: + other = self.__class__(str(other)) + except InvalidSpecifier: + return NotImplemented + elif not isinstance(other, self.__class__): + return NotImplemented + + return self._canonical_spec == other._canonical_spec + + def _get_operator(self, op: str) -> CallableOperator: + operator_callable: CallableOperator = getattr( + self, f"_compare_{self._operators[op]}" + ) + return operator_callable + + def _compare_compatible(self, prospective: Version, spec: str) -> bool: + # Compatible releases have an equivalent combination of >= and ==. That + # is that ~=2.2 is equivalent to >=2.2,==2.*. This allows us to + # implement this in terms of the other specifiers instead of + # implementing it ourselves. The only thing we need to do is construct + # the other specifiers. + + # We want everything but the last item in the version, but we want to + # ignore suffix segments. + prefix = _version_join( + list(itertools.takewhile(_is_not_suffix, _version_split(spec)))[:-1] + ) + + # Add the prefix notation to the end of our string + prefix += ".*" + + return self._get_operator(">=")(prospective, spec) and self._get_operator("==")( + prospective, prefix + ) + + def _compare_equal(self, prospective: Version, spec: str) -> bool: + # We need special logic to handle prefix matching + if spec.endswith(".*"): + # In the case of prefix matching we want to ignore local segment. + normalized_prospective = canonicalize_version( + prospective.public, strip_trailing_zero=False + ) + # Get the normalized version string ignoring the trailing .* + normalized_spec = canonicalize_version(spec[:-2], strip_trailing_zero=False) + # Split the spec out by bangs and dots, and pretend that there is + # an implicit dot in between a release segment and a pre-release segment. + split_spec = _version_split(normalized_spec) + + # Split the prospective version out by bangs and dots, and pretend + # that there is an implicit dot in between a release segment and + # a pre-release segment. + split_prospective = _version_split(normalized_prospective) + + # 0-pad the prospective version before shortening it to get the correct + # shortened version. + padded_prospective, _ = _pad_version(split_prospective, split_spec) + + # Shorten the prospective version to be the same length as the spec + # so that we can determine if the specifier is a prefix of the + # prospective version or not. + shortened_prospective = padded_prospective[: len(split_spec)] + + return shortened_prospective == split_spec + else: + # Convert our spec string into a Version + spec_version = Version(spec) + + # If the specifier does not have a local segment, then we want to + # act as if the prospective version also does not have a local + # segment. + if not spec_version.local: + prospective = Version(prospective.public) + + return prospective == spec_version + + def _compare_not_equal(self, prospective: Version, spec: str) -> bool: + return not self._compare_equal(prospective, spec) + + def _compare_less_than_equal(self, prospective: Version, spec: str) -> bool: + # NB: Local version identifiers are NOT permitted in the version + # specifier, so local version labels can be universally removed from + # the prospective version. + return Version(prospective.public) <= Version(spec) + + def _compare_greater_than_equal(self, prospective: Version, spec: str) -> bool: + # NB: Local version identifiers are NOT permitted in the version + # specifier, so local version labels can be universally removed from + # the prospective version. + return Version(prospective.public) >= Version(spec) + + def _compare_less_than(self, prospective: Version, spec_str: str) -> bool: + # Convert our spec to a Version instance, since we'll want to work with + # it as a version. + spec = Version(spec_str) + + # Check to see if the prospective version is less than the spec + # version. If it's not we can short circuit and just return False now + # instead of doing extra unneeded work. + if not prospective < spec: + return False + + # This special case is here so that, unless the specifier itself + # includes is a pre-release version, that we do not accept pre-release + # versions for the version mentioned in the specifier (e.g. <3.1 should + # not match 3.1.dev0, but should match 3.0.dev0). + if not spec.is_prerelease and prospective.is_prerelease: + if Version(prospective.base_version) == Version(spec.base_version): + return False + + # If we've gotten to here, it means that prospective version is both + # less than the spec version *and* it's not a pre-release of the same + # version in the spec. + return True + + def _compare_greater_than(self, prospective: Version, spec_str: str) -> bool: + # Convert our spec to a Version instance, since we'll want to work with + # it as a version. + spec = Version(spec_str) + + # Check to see if the prospective version is greater than the spec + # version. If it's not we can short circuit and just return False now + # instead of doing extra unneeded work. + if not prospective > spec: + return False + + # This special case is here so that, unless the specifier itself + # includes is a post-release version, that we do not accept + # post-release versions for the version mentioned in the specifier + # (e.g. >3.1 should not match 3.0.post0, but should match 3.2.post0). + if not spec.is_postrelease and prospective.is_postrelease: + if Version(prospective.base_version) == Version(spec.base_version): + return False + + # Ensure that we do not allow a local version of the version mentioned + # in the specifier, which is technically greater than, to match. + if prospective.local is not None: + if Version(prospective.base_version) == Version(spec.base_version): + return False + + # If we've gotten to here, it means that prospective version is both + # greater than the spec version *and* it's not a pre-release of the + # same version in the spec. + return True + + def _compare_arbitrary(self, prospective: Version, spec: str) -> bool: + return str(prospective).lower() == str(spec).lower() + + def __contains__(self, item: str | Version) -> bool: + """Return whether or not the item is contained in this specifier. + + :param item: The item to check for. + + This is used for the ``in`` operator and behaves the same as + :meth:`contains` with no ``prereleases`` argument passed. + + >>> "1.2.3" in Specifier(">=1.2.3") + True + >>> Version("1.2.3") in Specifier(">=1.2.3") + True + >>> "1.0.0" in Specifier(">=1.2.3") + False + >>> "1.3.0a1" in Specifier(">=1.2.3") + False + >>> "1.3.0a1" in Specifier(">=1.2.3", prereleases=True) + True + """ + return self.contains(item) + + def contains(self, item: UnparsedVersion, prereleases: bool | None = None) -> bool: + """Return whether or not the item is contained in this specifier. + + :param item: + The item to check for, which can be a version string or a + :class:`Version` instance. + :param prereleases: + Whether or not to match prereleases with this Specifier. If set to + ``None`` (the default), it uses :attr:`prereleases` to determine + whether or not prereleases are allowed. + + >>> Specifier(">=1.2.3").contains("1.2.3") + True + >>> Specifier(">=1.2.3").contains(Version("1.2.3")) + True + >>> Specifier(">=1.2.3").contains("1.0.0") + False + >>> Specifier(">=1.2.3").contains("1.3.0a1") + False + >>> Specifier(">=1.2.3", prereleases=True).contains("1.3.0a1") + True + >>> Specifier(">=1.2.3").contains("1.3.0a1", prereleases=True) + True + """ + + # Determine if prereleases are to be allowed or not. + if prereleases is None: + prereleases = self.prereleases + + # Normalize item to a Version, this allows us to have a shortcut for + # "2.0" in Specifier(">=2") + normalized_item = _coerce_version(item) + + # Determine if we should be supporting prereleases in this specifier + # or not, if we do not support prereleases than we can short circuit + # logic if this version is a prereleases. + if normalized_item.is_prerelease and not prereleases: + return False + + # Actually do the comparison to determine if this item is contained + # within this Specifier or not. + operator_callable: CallableOperator = self._get_operator(self.operator) + return operator_callable(normalized_item, self.version) + + def filter( + self, iterable: Iterable[UnparsedVersionVar], prereleases: bool | None = None + ) -> Iterator[UnparsedVersionVar]: + """Filter items in the given iterable, that match the specifier. + + :param iterable: + An iterable that can contain version strings and :class:`Version` instances. + The items in the iterable will be filtered according to the specifier. + :param prereleases: + Whether or not to allow prereleases in the returned iterator. If set to + ``None`` (the default), it will be intelligently decide whether to allow + prereleases or not (based on the :attr:`prereleases` attribute, and + whether the only versions matching are prereleases). + + This method is smarter than just ``filter(Specifier().contains, [...])`` + because it implements the rule from :pep:`440` that a prerelease item + SHOULD be accepted if no other versions match the given specifier. + + >>> list(Specifier(">=1.2.3").filter(["1.2", "1.3", "1.5a1"])) + ['1.3'] + >>> list(Specifier(">=1.2.3").filter(["1.2", "1.2.3", "1.3", Version("1.4")])) + ['1.2.3', '1.3', ] + >>> list(Specifier(">=1.2.3").filter(["1.2", "1.5a1"])) + ['1.5a1'] + >>> list(Specifier(">=1.2.3").filter(["1.3", "1.5a1"], prereleases=True)) + ['1.3', '1.5a1'] + >>> list(Specifier(">=1.2.3", prereleases=True).filter(["1.3", "1.5a1"])) + ['1.3', '1.5a1'] + """ + + yielded = False + found_prereleases = [] + + kw = {"prereleases": prereleases if prereleases is not None else True} + + # Attempt to iterate over all the values in the iterable and if any of + # them match, yield them. + for version in iterable: + parsed_version = _coerce_version(version) + + if self.contains(parsed_version, **kw): + # If our version is a prerelease, and we were not set to allow + # prereleases, then we'll store it for later in case nothing + # else matches this specifier. + if parsed_version.is_prerelease and not ( + prereleases or self.prereleases + ): + found_prereleases.append(version) + # Either this is not a prerelease, or we should have been + # accepting prereleases from the beginning. + else: + yielded = True + yield version + + # Now that we've iterated over everything, determine if we've yielded + # any values, and if we have not and we have any prereleases stored up + # then we will go ahead and yield the prereleases. + if not yielded and found_prereleases: + for version in found_prereleases: + yield version + + +_prefix_regex = re.compile(r"^([0-9]+)((?:a|b|c|rc)[0-9]+)$") + + +def _version_split(version: str) -> list[str]: + """Split version into components. + + The split components are intended for version comparison. The logic does + not attempt to retain the original version string, so joining the + components back with :func:`_version_join` may not produce the original + version string. + """ + result: list[str] = [] + + epoch, _, rest = version.rpartition("!") + result.append(epoch or "0") + + for item in rest.split("."): + match = _prefix_regex.search(item) + if match: + result.extend(match.groups()) + else: + result.append(item) + return result + + +def _version_join(components: list[str]) -> str: + """Join split version components into a version string. + + This function assumes the input came from :func:`_version_split`, where the + first component must be the epoch (either empty or numeric), and all other + components numeric. + """ + epoch, *rest = components + return f"{epoch}!{'.'.join(rest)}" + + +def _is_not_suffix(segment: str) -> bool: + return not any( + segment.startswith(prefix) for prefix in ("dev", "a", "b", "rc", "post") + ) + + +def _pad_version(left: list[str], right: list[str]) -> tuple[list[str], list[str]]: + left_split, right_split = [], [] + + # Get the release segment of our versions + left_split.append(list(itertools.takewhile(lambda x: x.isdigit(), left))) + right_split.append(list(itertools.takewhile(lambda x: x.isdigit(), right))) + + # Get the rest of our versions + left_split.append(left[len(left_split[0]) :]) + right_split.append(right[len(right_split[0]) :]) + + # Insert our padding + left_split.insert(1, ["0"] * max(0, len(right_split[0]) - len(left_split[0]))) + right_split.insert(1, ["0"] * max(0, len(left_split[0]) - len(right_split[0]))) + + return ( + list(itertools.chain.from_iterable(left_split)), + list(itertools.chain.from_iterable(right_split)), + ) + + +class SpecifierSet(BaseSpecifier): + """This class abstracts handling of a set of version specifiers. + + It can be passed a single specifier (``>=3.0``), a comma-separated list of + specifiers (``>=3.0,!=3.1``), or no specifier at all. + """ + + def __init__( + self, + specifiers: str | Iterable[Specifier] = "", + prereleases: bool | None = None, + ) -> None: + """Initialize a SpecifierSet instance. + + :param specifiers: + The string representation of a specifier or a comma-separated list of + specifiers which will be parsed and normalized before use. + May also be an iterable of ``Specifier`` instances, which will be used + as is. + :param prereleases: + This tells the SpecifierSet if it should accept prerelease versions if + applicable or not. The default of ``None`` will autodetect it from the + given specifiers. + + :raises InvalidSpecifier: + If the given ``specifiers`` are not parseable than this exception will be + raised. + """ + + if isinstance(specifiers, str): + # Split on `,` to break each individual specifier into its own item, and + # strip each item to remove leading/trailing whitespace. + split_specifiers = [s.strip() for s in specifiers.split(",") if s.strip()] + + # Make each individual specifier a Specifier and save in a frozen set + # for later. + self._specs = frozenset(map(Specifier, split_specifiers)) + else: + # Save the supplied specifiers in a frozen set. + self._specs = frozenset(specifiers) + + # Store our prereleases value so we can use it later to determine if + # we accept prereleases or not. + self._prereleases = prereleases + + @property + def prereleases(self) -> bool | None: + # If we have been given an explicit prerelease modifier, then we'll + # pass that through here. + if self._prereleases is not None: + return self._prereleases + + # If we don't have any specifiers, and we don't have a forced value, + # then we'll just return None since we don't know if this should have + # pre-releases or not. + if not self._specs: + return None + + # Otherwise we'll see if any of the given specifiers accept + # prereleases, if any of them do we'll return True, otherwise False. + return any(s.prereleases for s in self._specs) + + @prereleases.setter + def prereleases(self, value: bool) -> None: + self._prereleases = value + + def __repr__(self) -> str: + """A representation of the specifier set that shows all internal state. + + Note that the ordering of the individual specifiers within the set may not + match the input string. + + >>> SpecifierSet('>=1.0.0,!=2.0.0') + =1.0.0')> + >>> SpecifierSet('>=1.0.0,!=2.0.0', prereleases=False) + =1.0.0', prereleases=False)> + >>> SpecifierSet('>=1.0.0,!=2.0.0', prereleases=True) + =1.0.0', prereleases=True)> + """ + pre = ( + f", prereleases={self.prereleases!r}" + if self._prereleases is not None + else "" + ) + + return f"" + + def __str__(self) -> str: + """A string representation of the specifier set that can be round-tripped. + + Note that the ordering of the individual specifiers within the set may not + match the input string. + + >>> str(SpecifierSet(">=1.0.0,!=1.0.1")) + '!=1.0.1,>=1.0.0' + >>> str(SpecifierSet(">=1.0.0,!=1.0.1", prereleases=False)) + '!=1.0.1,>=1.0.0' + """ + return ",".join(sorted(str(s) for s in self._specs)) + + def __hash__(self) -> int: + return hash(self._specs) + + def __and__(self, other: SpecifierSet | str) -> SpecifierSet: + """Return a SpecifierSet which is a combination of the two sets. + + :param other: The other object to combine with. + + >>> SpecifierSet(">=1.0.0,!=1.0.1") & '<=2.0.0,!=2.0.1' + =1.0.0')> + >>> SpecifierSet(">=1.0.0,!=1.0.1") & SpecifierSet('<=2.0.0,!=2.0.1') + =1.0.0')> + """ + if isinstance(other, str): + other = SpecifierSet(other) + elif not isinstance(other, SpecifierSet): + return NotImplemented + + specifier = SpecifierSet() + specifier._specs = frozenset(self._specs | other._specs) + + if self._prereleases is None and other._prereleases is not None: + specifier._prereleases = other._prereleases + elif self._prereleases is not None and other._prereleases is None: + specifier._prereleases = self._prereleases + elif self._prereleases == other._prereleases: + specifier._prereleases = self._prereleases + else: + raise ValueError( + "Cannot combine SpecifierSets with True and False prerelease overrides." + ) + + return specifier + + def __eq__(self, other: object) -> bool: + """Whether or not the two SpecifierSet-like objects are equal. + + :param other: The other object to check against. + + The value of :attr:`prereleases` is ignored. + + >>> SpecifierSet(">=1.0.0,!=1.0.1") == SpecifierSet(">=1.0.0,!=1.0.1") + True + >>> (SpecifierSet(">=1.0.0,!=1.0.1", prereleases=False) == + ... SpecifierSet(">=1.0.0,!=1.0.1", prereleases=True)) + True + >>> SpecifierSet(">=1.0.0,!=1.0.1") == ">=1.0.0,!=1.0.1" + True + >>> SpecifierSet(">=1.0.0,!=1.0.1") == SpecifierSet(">=1.0.0") + False + >>> SpecifierSet(">=1.0.0,!=1.0.1") == SpecifierSet(">=1.0.0,!=1.0.2") + False + """ + if isinstance(other, (str, Specifier)): + other = SpecifierSet(str(other)) + elif not isinstance(other, SpecifierSet): + return NotImplemented + + return self._specs == other._specs + + def __len__(self) -> int: + """Returns the number of specifiers in this specifier set.""" + return len(self._specs) + + def __iter__(self) -> Iterator[Specifier]: + """ + Returns an iterator over all the underlying :class:`Specifier` instances + in this specifier set. + + >>> sorted(SpecifierSet(">=1.0.0,!=1.0.1"), key=str) + [, =1.0.0')>] + """ + return iter(self._specs) + + def __contains__(self, item: UnparsedVersion) -> bool: + """Return whether or not the item is contained in this specifier. + + :param item: The item to check for. + + This is used for the ``in`` operator and behaves the same as + :meth:`contains` with no ``prereleases`` argument passed. + + >>> "1.2.3" in SpecifierSet(">=1.0.0,!=1.0.1") + True + >>> Version("1.2.3") in SpecifierSet(">=1.0.0,!=1.0.1") + True + >>> "1.0.1" in SpecifierSet(">=1.0.0,!=1.0.1") + False + >>> "1.3.0a1" in SpecifierSet(">=1.0.0,!=1.0.1") + False + >>> "1.3.0a1" in SpecifierSet(">=1.0.0,!=1.0.1", prereleases=True) + True + """ + return self.contains(item) + + def contains( + self, + item: UnparsedVersion, + prereleases: bool | None = None, + installed: bool | None = None, + ) -> bool: + """Return whether or not the item is contained in this SpecifierSet. + + :param item: + The item to check for, which can be a version string or a + :class:`Version` instance. + :param prereleases: + Whether or not to match prereleases with this SpecifierSet. If set to + ``None`` (the default), it uses :attr:`prereleases` to determine + whether or not prereleases are allowed. + + >>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.2.3") + True + >>> SpecifierSet(">=1.0.0,!=1.0.1").contains(Version("1.2.3")) + True + >>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.0.1") + False + >>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.3.0a1") + False + >>> SpecifierSet(">=1.0.0,!=1.0.1", prereleases=True).contains("1.3.0a1") + True + >>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.3.0a1", prereleases=True) + True + """ + # Ensure that our item is a Version instance. + if not isinstance(item, Version): + item = Version(item) + + # Determine if we're forcing a prerelease or not, if we're not forcing + # one for this particular filter call, then we'll use whatever the + # SpecifierSet thinks for whether or not we should support prereleases. + if prereleases is None: + prereleases = self.prereleases + + # We can determine if we're going to allow pre-releases by looking to + # see if any of the underlying items supports them. If none of them do + # and this item is a pre-release then we do not allow it and we can + # short circuit that here. + # Note: This means that 1.0.dev1 would not be contained in something + # like >=1.0.devabc however it would be in >=1.0.debabc,>0.0.dev0 + if not prereleases and item.is_prerelease: + return False + + if installed and item.is_prerelease: + item = Version(item.base_version) + + # We simply dispatch to the underlying specs here to make sure that the + # given version is contained within all of them. + # Note: This use of all() here means that an empty set of specifiers + # will always return True, this is an explicit design decision. + return all(s.contains(item, prereleases=prereleases) for s in self._specs) + + def filter( + self, iterable: Iterable[UnparsedVersionVar], prereleases: bool | None = None + ) -> Iterator[UnparsedVersionVar]: + """Filter items in the given iterable, that match the specifiers in this set. + + :param iterable: + An iterable that can contain version strings and :class:`Version` instances. + The items in the iterable will be filtered according to the specifier. + :param prereleases: + Whether or not to allow prereleases in the returned iterator. If set to + ``None`` (the default), it will be intelligently decide whether to allow + prereleases or not (based on the :attr:`prereleases` attribute, and + whether the only versions matching are prereleases). + + This method is smarter than just ``filter(SpecifierSet(...).contains, [...])`` + because it implements the rule from :pep:`440` that a prerelease item + SHOULD be accepted if no other versions match the given specifier. + + >>> list(SpecifierSet(">=1.2.3").filter(["1.2", "1.3", "1.5a1"])) + ['1.3'] + >>> list(SpecifierSet(">=1.2.3").filter(["1.2", "1.3", Version("1.4")])) + ['1.3', ] + >>> list(SpecifierSet(">=1.2.3").filter(["1.2", "1.5a1"])) + [] + >>> list(SpecifierSet(">=1.2.3").filter(["1.3", "1.5a1"], prereleases=True)) + ['1.3', '1.5a1'] + >>> list(SpecifierSet(">=1.2.3", prereleases=True).filter(["1.3", "1.5a1"])) + ['1.3', '1.5a1'] + + An "empty" SpecifierSet will filter items based on the presence of prerelease + versions in the set. + + >>> list(SpecifierSet("").filter(["1.3", "1.5a1"])) + ['1.3'] + >>> list(SpecifierSet("").filter(["1.5a1"])) + ['1.5a1'] + >>> list(SpecifierSet("", prereleases=True).filter(["1.3", "1.5a1"])) + ['1.3', '1.5a1'] + >>> list(SpecifierSet("").filter(["1.3", "1.5a1"], prereleases=True)) + ['1.3', '1.5a1'] + """ + # Determine if we're forcing a prerelease or not, if we're not forcing + # one for this particular filter call, then we'll use whatever the + # SpecifierSet thinks for whether or not we should support prereleases. + if prereleases is None: + prereleases = self.prereleases + + # If we have any specifiers, then we want to wrap our iterable in the + # filter method for each one, this will act as a logical AND amongst + # each specifier. + if self._specs: + for spec in self._specs: + iterable = spec.filter(iterable, prereleases=bool(prereleases)) + return iter(iterable) + # If we do not have any specifiers, then we need to have a rough filter + # which will filter out any pre-releases, unless there are no final + # releases. + else: + filtered: list[UnparsedVersionVar] = [] + found_prereleases: list[UnparsedVersionVar] = [] + + for item in iterable: + parsed_version = _coerce_version(item) + + # Store any item which is a pre-release for later unless we've + # already found a final version or we are accepting prereleases + if parsed_version.is_prerelease and not prereleases: + if not filtered: + found_prereleases.append(item) + else: + filtered.append(item) + + # If we've found no items except for pre-releases, then we'll go + # ahead and use the pre-releases + if not filtered and found_prereleases and prereleases is None: + return iter(found_prereleases) + + return iter(filtered) diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/tags.py b/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/tags.py new file mode 100644 index 0000000..8522f59 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/tags.py @@ -0,0 +1,656 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import annotations + +import logging +import platform +import re +import struct +import subprocess +import sys +import sysconfig +from importlib.machinery import EXTENSION_SUFFIXES +from typing import ( + Iterable, + Iterator, + Sequence, + Tuple, + cast, +) + +from . import _manylinux, _musllinux + +logger = logging.getLogger(__name__) + +PythonVersion = Sequence[int] +AppleVersion = Tuple[int, int] + +INTERPRETER_SHORT_NAMES: dict[str, str] = { + "python": "py", # Generic. + "cpython": "cp", + "pypy": "pp", + "ironpython": "ip", + "jython": "jy", +} + + +_32_BIT_INTERPRETER = struct.calcsize("P") == 4 + + +class Tag: + """ + A representation of the tag triple for a wheel. + + Instances are considered immutable and thus are hashable. Equality checking + is also supported. + """ + + __slots__ = ["_abi", "_hash", "_interpreter", "_platform"] + + def __init__(self, interpreter: str, abi: str, platform: str) -> None: + self._interpreter = interpreter.lower() + self._abi = abi.lower() + self._platform = platform.lower() + # The __hash__ of every single element in a Set[Tag] will be evaluated each time + # that a set calls its `.disjoint()` method, which may be called hundreds of + # times when scanning a page of links for packages with tags matching that + # Set[Tag]. Pre-computing the value here produces significant speedups for + # downstream consumers. + self._hash = hash((self._interpreter, self._abi, self._platform)) + + @property + def interpreter(self) -> str: + return self._interpreter + + @property + def abi(self) -> str: + return self._abi + + @property + def platform(self) -> str: + return self._platform + + def __eq__(self, other: object) -> bool: + if not isinstance(other, Tag): + return NotImplemented + + return ( + (self._hash == other._hash) # Short-circuit ASAP for perf reasons. + and (self._platform == other._platform) + and (self._abi == other._abi) + and (self._interpreter == other._interpreter) + ) + + def __hash__(self) -> int: + return self._hash + + def __str__(self) -> str: + return f"{self._interpreter}-{self._abi}-{self._platform}" + + def __repr__(self) -> str: + return f"<{self} @ {id(self)}>" + + +def parse_tag(tag: str) -> frozenset[Tag]: + """ + Parses the provided tag (e.g. `py3-none-any`) into a frozenset of Tag instances. + + Returning a set is required due to the possibility that the tag is a + compressed tag set. + """ + tags = set() + interpreters, abis, platforms = tag.split("-") + for interpreter in interpreters.split("."): + for abi in abis.split("."): + for platform_ in platforms.split("."): + tags.add(Tag(interpreter, abi, platform_)) + return frozenset(tags) + + +def _get_config_var(name: str, warn: bool = False) -> int | str | None: + value: int | str | None = sysconfig.get_config_var(name) + if value is None and warn: + logger.debug( + "Config variable '%s' is unset, Python ABI tag may be incorrect", name + ) + return value + + +def _normalize_string(string: str) -> str: + return string.replace(".", "_").replace("-", "_").replace(" ", "_") + + +def _is_threaded_cpython(abis: list[str]) -> bool: + """ + Determine if the ABI corresponds to a threaded (`--disable-gil`) build. + + The threaded builds are indicated by a "t" in the abiflags. + """ + if len(abis) == 0: + return False + # expect e.g., cp313 + m = re.match(r"cp\d+(.*)", abis[0]) + if not m: + return False + abiflags = m.group(1) + return "t" in abiflags + + +def _abi3_applies(python_version: PythonVersion, threading: bool) -> bool: + """ + Determine if the Python version supports abi3. + + PEP 384 was first implemented in Python 3.2. The threaded (`--disable-gil`) + builds do not support abi3. + """ + return len(python_version) > 1 and tuple(python_version) >= (3, 2) and not threading + + +def _cpython_abis(py_version: PythonVersion, warn: bool = False) -> list[str]: + py_version = tuple(py_version) # To allow for version comparison. + abis = [] + version = _version_nodot(py_version[:2]) + threading = debug = pymalloc = ucs4 = "" + with_debug = _get_config_var("Py_DEBUG", warn) + has_refcount = hasattr(sys, "gettotalrefcount") + # Windows doesn't set Py_DEBUG, so checking for support of debug-compiled + # extension modules is the best option. + # https://github.com/pypa/pip/issues/3383#issuecomment-173267692 + has_ext = "_d.pyd" in EXTENSION_SUFFIXES + if with_debug or (with_debug is None and (has_refcount or has_ext)): + debug = "d" + if py_version >= (3, 13) and _get_config_var("Py_GIL_DISABLED", warn): + threading = "t" + if py_version < (3, 8): + with_pymalloc = _get_config_var("WITH_PYMALLOC", warn) + if with_pymalloc or with_pymalloc is None: + pymalloc = "m" + if py_version < (3, 3): + unicode_size = _get_config_var("Py_UNICODE_SIZE", warn) + if unicode_size == 4 or ( + unicode_size is None and sys.maxunicode == 0x10FFFF + ): + ucs4 = "u" + elif debug: + # Debug builds can also load "normal" extension modules. + # We can also assume no UCS-4 or pymalloc requirement. + abis.append(f"cp{version}{threading}") + abis.insert(0, f"cp{version}{threading}{debug}{pymalloc}{ucs4}") + return abis + + +def cpython_tags( + python_version: PythonVersion | None = None, + abis: Iterable[str] | None = None, + platforms: Iterable[str] | None = None, + *, + warn: bool = False, +) -> Iterator[Tag]: + """ + Yields the tags for a CPython interpreter. + + The tags consist of: + - cp-- + - cp-abi3- + - cp-none- + - cp-abi3- # Older Python versions down to 3.2. + + If python_version only specifies a major version then user-provided ABIs and + the 'none' ABItag will be used. + + If 'abi3' or 'none' are specified in 'abis' then they will be yielded at + their normal position and not at the beginning. + """ + if not python_version: + python_version = sys.version_info[:2] + + interpreter = f"cp{_version_nodot(python_version[:2])}" + + if abis is None: + if len(python_version) > 1: + abis = _cpython_abis(python_version, warn) + else: + abis = [] + abis = list(abis) + # 'abi3' and 'none' are explicitly handled later. + for explicit_abi in ("abi3", "none"): + try: + abis.remove(explicit_abi) + except ValueError: + pass + + platforms = list(platforms or platform_tags()) + for abi in abis: + for platform_ in platforms: + yield Tag(interpreter, abi, platform_) + + threading = _is_threaded_cpython(abis) + use_abi3 = _abi3_applies(python_version, threading) + if use_abi3: + yield from (Tag(interpreter, "abi3", platform_) for platform_ in platforms) + yield from (Tag(interpreter, "none", platform_) for platform_ in platforms) + + if use_abi3: + for minor_version in range(python_version[1] - 1, 1, -1): + for platform_ in platforms: + version = _version_nodot((python_version[0], minor_version)) + interpreter = f"cp{version}" + yield Tag(interpreter, "abi3", platform_) + + +def _generic_abi() -> list[str]: + """ + Return the ABI tag based on EXT_SUFFIX. + """ + # The following are examples of `EXT_SUFFIX`. + # We want to keep the parts which are related to the ABI and remove the + # parts which are related to the platform: + # - linux: '.cpython-310-x86_64-linux-gnu.so' => cp310 + # - mac: '.cpython-310-darwin.so' => cp310 + # - win: '.cp310-win_amd64.pyd' => cp310 + # - win: '.pyd' => cp37 (uses _cpython_abis()) + # - pypy: '.pypy38-pp73-x86_64-linux-gnu.so' => pypy38_pp73 + # - graalpy: '.graalpy-38-native-x86_64-darwin.dylib' + # => graalpy_38_native + + ext_suffix = _get_config_var("EXT_SUFFIX", warn=True) + if not isinstance(ext_suffix, str) or ext_suffix[0] != ".": + raise SystemError("invalid sysconfig.get_config_var('EXT_SUFFIX')") + parts = ext_suffix.split(".") + if len(parts) < 3: + # CPython3.7 and earlier uses ".pyd" on Windows. + return _cpython_abis(sys.version_info[:2]) + soabi = parts[1] + if soabi.startswith("cpython"): + # non-windows + abi = "cp" + soabi.split("-")[1] + elif soabi.startswith("cp"): + # windows + abi = soabi.split("-")[0] + elif soabi.startswith("pypy"): + abi = "-".join(soabi.split("-")[:2]) + elif soabi.startswith("graalpy"): + abi = "-".join(soabi.split("-")[:3]) + elif soabi: + # pyston, ironpython, others? + abi = soabi + else: + return [] + return [_normalize_string(abi)] + + +def generic_tags( + interpreter: str | None = None, + abis: Iterable[str] | None = None, + platforms: Iterable[str] | None = None, + *, + warn: bool = False, +) -> Iterator[Tag]: + """ + Yields the tags for a generic interpreter. + + The tags consist of: + - -- + + The "none" ABI will be added if it was not explicitly provided. + """ + if not interpreter: + interp_name = interpreter_name() + interp_version = interpreter_version(warn=warn) + interpreter = "".join([interp_name, interp_version]) + if abis is None: + abis = _generic_abi() + else: + abis = list(abis) + platforms = list(platforms or platform_tags()) + if "none" not in abis: + abis.append("none") + for abi in abis: + for platform_ in platforms: + yield Tag(interpreter, abi, platform_) + + +def _py_interpreter_range(py_version: PythonVersion) -> Iterator[str]: + """ + Yields Python versions in descending order. + + After the latest version, the major-only version will be yielded, and then + all previous versions of that major version. + """ + if len(py_version) > 1: + yield f"py{_version_nodot(py_version[:2])}" + yield f"py{py_version[0]}" + if len(py_version) > 1: + for minor in range(py_version[1] - 1, -1, -1): + yield f"py{_version_nodot((py_version[0], minor))}" + + +def compatible_tags( + python_version: PythonVersion | None = None, + interpreter: str | None = None, + platforms: Iterable[str] | None = None, +) -> Iterator[Tag]: + """ + Yields the sequence of tags that are compatible with a specific version of Python. + + The tags consist of: + - py*-none- + - -none-any # ... if `interpreter` is provided. + - py*-none-any + """ + if not python_version: + python_version = sys.version_info[:2] + platforms = list(platforms or platform_tags()) + for version in _py_interpreter_range(python_version): + for platform_ in platforms: + yield Tag(version, "none", platform_) + if interpreter: + yield Tag(interpreter, "none", "any") + for version in _py_interpreter_range(python_version): + yield Tag(version, "none", "any") + + +def _mac_arch(arch: str, is_32bit: bool = _32_BIT_INTERPRETER) -> str: + if not is_32bit: + return arch + + if arch.startswith("ppc"): + return "ppc" + + return "i386" + + +def _mac_binary_formats(version: AppleVersion, cpu_arch: str) -> list[str]: + formats = [cpu_arch] + if cpu_arch == "x86_64": + if version < (10, 4): + return [] + formats.extend(["intel", "fat64", "fat32"]) + + elif cpu_arch == "i386": + if version < (10, 4): + return [] + formats.extend(["intel", "fat32", "fat"]) + + elif cpu_arch == "ppc64": + # TODO: Need to care about 32-bit PPC for ppc64 through 10.2? + if version > (10, 5) or version < (10, 4): + return [] + formats.append("fat64") + + elif cpu_arch == "ppc": + if version > (10, 6): + return [] + formats.extend(["fat32", "fat"]) + + if cpu_arch in {"arm64", "x86_64"}: + formats.append("universal2") + + if cpu_arch in {"x86_64", "i386", "ppc64", "ppc", "intel"}: + formats.append("universal") + + return formats + + +def mac_platforms( + version: AppleVersion | None = None, arch: str | None = None +) -> Iterator[str]: + """ + Yields the platform tags for a macOS system. + + The `version` parameter is a two-item tuple specifying the macOS version to + generate platform tags for. The `arch` parameter is the CPU architecture to + generate platform tags for. Both parameters default to the appropriate value + for the current system. + """ + version_str, _, cpu_arch = platform.mac_ver() + if version is None: + version = cast("AppleVersion", tuple(map(int, version_str.split(".")[:2]))) + if version == (10, 16): + # When built against an older macOS SDK, Python will report macOS 10.16 + # instead of the real version. + version_str = subprocess.run( + [ + sys.executable, + "-sS", + "-c", + "import platform; print(platform.mac_ver()[0])", + ], + check=True, + env={"SYSTEM_VERSION_COMPAT": "0"}, + stdout=subprocess.PIPE, + text=True, + ).stdout + version = cast("AppleVersion", tuple(map(int, version_str.split(".")[:2]))) + else: + version = version + if arch is None: + arch = _mac_arch(cpu_arch) + else: + arch = arch + + if (10, 0) <= version and version < (11, 0): + # Prior to Mac OS 11, each yearly release of Mac OS bumped the + # "minor" version number. The major version was always 10. + major_version = 10 + for minor_version in range(version[1], -1, -1): + compat_version = major_version, minor_version + binary_formats = _mac_binary_formats(compat_version, arch) + for binary_format in binary_formats: + yield f"macosx_{major_version}_{minor_version}_{binary_format}" + + if version >= (11, 0): + # Starting with Mac OS 11, each yearly release bumps the major version + # number. The minor versions are now the midyear updates. + minor_version = 0 + for major_version in range(version[0], 10, -1): + compat_version = major_version, minor_version + binary_formats = _mac_binary_formats(compat_version, arch) + for binary_format in binary_formats: + yield f"macosx_{major_version}_{minor_version}_{binary_format}" + + if version >= (11, 0): + # Mac OS 11 on x86_64 is compatible with binaries from previous releases. + # Arm64 support was introduced in 11.0, so no Arm binaries from previous + # releases exist. + # + # However, the "universal2" binary format can have a + # macOS version earlier than 11.0 when the x86_64 part of the binary supports + # that version of macOS. + major_version = 10 + if arch == "x86_64": + for minor_version in range(16, 3, -1): + compat_version = major_version, minor_version + binary_formats = _mac_binary_formats(compat_version, arch) + for binary_format in binary_formats: + yield f"macosx_{major_version}_{minor_version}_{binary_format}" + else: + for minor_version in range(16, 3, -1): + compat_version = major_version, minor_version + binary_format = "universal2" + yield f"macosx_{major_version}_{minor_version}_{binary_format}" + + +def ios_platforms( + version: AppleVersion | None = None, multiarch: str | None = None +) -> Iterator[str]: + """ + Yields the platform tags for an iOS system. + + :param version: A two-item tuple specifying the iOS version to generate + platform tags for. Defaults to the current iOS version. + :param multiarch: The CPU architecture+ABI to generate platform tags for - + (the value used by `sys.implementation._multiarch` e.g., + `arm64_iphoneos` or `x84_64_iphonesimulator`). Defaults to the current + multiarch value. + """ + if version is None: + # if iOS is the current platform, ios_ver *must* be defined. However, + # it won't exist for CPython versions before 3.13, which causes a mypy + # error. + _, release, _, _ = platform.ios_ver() # type: ignore[attr-defined, unused-ignore] + version = cast("AppleVersion", tuple(map(int, release.split(".")[:2]))) + + if multiarch is None: + multiarch = sys.implementation._multiarch + multiarch = multiarch.replace("-", "_") + + ios_platform_template = "ios_{major}_{minor}_{multiarch}" + + # Consider any iOS major.minor version from the version requested, down to + # 12.0. 12.0 is the first iOS version that is known to have enough features + # to support CPython. Consider every possible minor release up to X.9. There + # highest the minor has ever gone is 8 (14.8 and 15.8) but having some extra + # candidates that won't ever match doesn't really hurt, and it saves us from + # having to keep an explicit list of known iOS versions in the code. Return + # the results descending order of version number. + + # If the requested major version is less than 12, there won't be any matches. + if version[0] < 12: + return + + # Consider the actual X.Y version that was requested. + yield ios_platform_template.format( + major=version[0], minor=version[1], multiarch=multiarch + ) + + # Consider every minor version from X.0 to the minor version prior to the + # version requested by the platform. + for minor in range(version[1] - 1, -1, -1): + yield ios_platform_template.format( + major=version[0], minor=minor, multiarch=multiarch + ) + + for major in range(version[0] - 1, 11, -1): + for minor in range(9, -1, -1): + yield ios_platform_template.format( + major=major, minor=minor, multiarch=multiarch + ) + + +def android_platforms( + api_level: int | None = None, abi: str | None = None +) -> Iterator[str]: + """ + Yields the :attr:`~Tag.platform` tags for Android. If this function is invoked on + non-Android platforms, the ``api_level`` and ``abi`` arguments are required. + + :param int api_level: The maximum `API level + `__ to return. Defaults + to the current system's version, as returned by ``platform.android_ver``. + :param str abi: The `Android ABI `__, + e.g. ``arm64_v8a``. Defaults to the current system's ABI , as returned by + ``sysconfig.get_platform``. Hyphens and periods will be replaced with + underscores. + """ + if platform.system() != "Android" and (api_level is None or abi is None): + raise TypeError( + "on non-Android platforms, the api_level and abi arguments are required" + ) + + if api_level is None: + # Python 3.13 was the first version to return platform.system() == "Android", + # and also the first version to define platform.android_ver(). + api_level = platform.android_ver().api_level # type: ignore[attr-defined] + + if abi is None: + abi = sysconfig.get_platform().split("-")[-1] + abi = _normalize_string(abi) + + # 16 is the minimum API level known to have enough features to support CPython + # without major patching. Yield every API level from the maximum down to the + # minimum, inclusive. + min_api_level = 16 + for ver in range(api_level, min_api_level - 1, -1): + yield f"android_{ver}_{abi}" + + +def _linux_platforms(is_32bit: bool = _32_BIT_INTERPRETER) -> Iterator[str]: + linux = _normalize_string(sysconfig.get_platform()) + if not linux.startswith("linux_"): + # we should never be here, just yield the sysconfig one and return + yield linux + return + if is_32bit: + if linux == "linux_x86_64": + linux = "linux_i686" + elif linux == "linux_aarch64": + linux = "linux_armv8l" + _, arch = linux.split("_", 1) + archs = {"armv8l": ["armv8l", "armv7l"]}.get(arch, [arch]) + yield from _manylinux.platform_tags(archs) + yield from _musllinux.platform_tags(archs) + for arch in archs: + yield f"linux_{arch}" + + +def _generic_platforms() -> Iterator[str]: + yield _normalize_string(sysconfig.get_platform()) + + +def platform_tags() -> Iterator[str]: + """ + Provides the platform tags for this installation. + """ + if platform.system() == "Darwin": + return mac_platforms() + elif platform.system() == "iOS": + return ios_platforms() + elif platform.system() == "Android": + return android_platforms() + elif platform.system() == "Linux": + return _linux_platforms() + else: + return _generic_platforms() + + +def interpreter_name() -> str: + """ + Returns the name of the running interpreter. + + Some implementations have a reserved, two-letter abbreviation which will + be returned when appropriate. + """ + name = sys.implementation.name + return INTERPRETER_SHORT_NAMES.get(name) or name + + +def interpreter_version(*, warn: bool = False) -> str: + """ + Returns the version of the running interpreter. + """ + version = _get_config_var("py_version_nodot", warn=warn) + if version: + version = str(version) + else: + version = _version_nodot(sys.version_info[:2]) + return version + + +def _version_nodot(version: PythonVersion) -> str: + return "".join(map(str, version)) + + +def sys_tags(*, warn: bool = False) -> Iterator[Tag]: + """ + Returns the sequence of tag triples for the running interpreter. + + The order of the sequence corresponds to priority order for the + interpreter, from most to least important. + """ + + interp_name = interpreter_name() + if interp_name == "cp": + yield from cpython_tags(warn=warn) + else: + yield from generic_tags() + + if interp_name == "pp": + interp = "pp3" + elif interp_name == "cp": + interp = "cp" + interpreter_version(warn=warn) + else: + interp = None + yield from compatible_tags(interpreter=interp) diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/utils.py b/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/utils.py new file mode 100644 index 0000000..2345095 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/utils.py @@ -0,0 +1,163 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import annotations + +import functools +import re +from typing import NewType, Tuple, Union, cast + +from .tags import Tag, parse_tag +from .version import InvalidVersion, Version, _TrimmedRelease + +BuildTag = Union[Tuple[()], Tuple[int, str]] +NormalizedName = NewType("NormalizedName", str) + + +class InvalidName(ValueError): + """ + An invalid distribution name; users should refer to the packaging user guide. + """ + + +class InvalidWheelFilename(ValueError): + """ + An invalid wheel filename was found, users should refer to PEP 427. + """ + + +class InvalidSdistFilename(ValueError): + """ + An invalid sdist filename was found, users should refer to the packaging user guide. + """ + + +# Core metadata spec for `Name` +_validate_regex = re.compile( + r"^([A-Z0-9]|[A-Z0-9][A-Z0-9._-]*[A-Z0-9])$", re.IGNORECASE +) +_canonicalize_regex = re.compile(r"[-_.]+") +_normalized_regex = re.compile(r"^([a-z0-9]|[a-z0-9]([a-z0-9-](?!--))*[a-z0-9])$") +# PEP 427: The build number must start with a digit. +_build_tag_regex = re.compile(r"(\d+)(.*)") + + +def canonicalize_name(name: str, *, validate: bool = False) -> NormalizedName: + if validate and not _validate_regex.match(name): + raise InvalidName(f"name is invalid: {name!r}") + # This is taken from PEP 503. + value = _canonicalize_regex.sub("-", name).lower() + return cast(NormalizedName, value) + + +def is_normalized_name(name: str) -> bool: + return _normalized_regex.match(name) is not None + + +@functools.singledispatch +def canonicalize_version( + version: Version | str, *, strip_trailing_zero: bool = True +) -> str: + """ + Return a canonical form of a version as a string. + + >>> canonicalize_version('1.0.1') + '1.0.1' + + Per PEP 625, versions may have multiple canonical forms, differing + only by trailing zeros. + + >>> canonicalize_version('1.0.0') + '1' + >>> canonicalize_version('1.0.0', strip_trailing_zero=False) + '1.0.0' + + Invalid versions are returned unaltered. + + >>> canonicalize_version('foo bar baz') + 'foo bar baz' + """ + return str(_TrimmedRelease(str(version)) if strip_trailing_zero else version) + + +@canonicalize_version.register +def _(version: str, *, strip_trailing_zero: bool = True) -> str: + try: + parsed = Version(version) + except InvalidVersion: + # Legacy versions cannot be normalized + return version + return canonicalize_version(parsed, strip_trailing_zero=strip_trailing_zero) + + +def parse_wheel_filename( + filename: str, +) -> tuple[NormalizedName, Version, BuildTag, frozenset[Tag]]: + if not filename.endswith(".whl"): + raise InvalidWheelFilename( + f"Invalid wheel filename (extension must be '.whl'): {filename!r}" + ) + + filename = filename[:-4] + dashes = filename.count("-") + if dashes not in (4, 5): + raise InvalidWheelFilename( + f"Invalid wheel filename (wrong number of parts): {filename!r}" + ) + + parts = filename.split("-", dashes - 2) + name_part = parts[0] + # See PEP 427 for the rules on escaping the project name. + if "__" in name_part or re.match(r"^[\w\d._]*$", name_part, re.UNICODE) is None: + raise InvalidWheelFilename(f"Invalid project name: {filename!r}") + name = canonicalize_name(name_part) + + try: + version = Version(parts[1]) + except InvalidVersion as e: + raise InvalidWheelFilename( + f"Invalid wheel filename (invalid version): {filename!r}" + ) from e + + if dashes == 5: + build_part = parts[2] + build_match = _build_tag_regex.match(build_part) + if build_match is None: + raise InvalidWheelFilename( + f"Invalid build number: {build_part} in {filename!r}" + ) + build = cast(BuildTag, (int(build_match.group(1)), build_match.group(2))) + else: + build = () + tags = parse_tag(parts[-1]) + return (name, version, build, tags) + + +def parse_sdist_filename(filename: str) -> tuple[NormalizedName, Version]: + if filename.endswith(".tar.gz"): + file_stem = filename[: -len(".tar.gz")] + elif filename.endswith(".zip"): + file_stem = filename[: -len(".zip")] + else: + raise InvalidSdistFilename( + f"Invalid sdist filename (extension must be '.tar.gz' or '.zip'):" + f" {filename!r}" + ) + + # We are requiring a PEP 440 version, which cannot contain dashes, + # so we split on the last dash. + name_part, sep, version_part = file_stem.rpartition("-") + if not sep: + raise InvalidSdistFilename(f"Invalid sdist filename: {filename!r}") + + name = canonicalize_name(name_part) + + try: + version = Version(version_part) + except InvalidVersion as e: + raise InvalidSdistFilename( + f"Invalid sdist filename (invalid version): {filename!r}" + ) from e + + return (name, version) diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/version.py b/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/version.py new file mode 100644 index 0000000..21f44ca --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/packaging/version.py @@ -0,0 +1,582 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. +""" +.. testsetup:: + + from pip._vendor.packaging.version import parse, Version +""" + +from __future__ import annotations + +import itertools +import re +from typing import Any, Callable, NamedTuple, SupportsInt, Tuple, Union + +from ._structures import Infinity, InfinityType, NegativeInfinity, NegativeInfinityType + +__all__ = ["VERSION_PATTERN", "InvalidVersion", "Version", "parse"] + +LocalType = Tuple[Union[int, str], ...] + +CmpPrePostDevType = Union[InfinityType, NegativeInfinityType, Tuple[str, int]] +CmpLocalType = Union[ + NegativeInfinityType, + Tuple[Union[Tuple[int, str], Tuple[NegativeInfinityType, Union[int, str]]], ...], +] +CmpKey = Tuple[ + int, + Tuple[int, ...], + CmpPrePostDevType, + CmpPrePostDevType, + CmpPrePostDevType, + CmpLocalType, +] +VersionComparisonMethod = Callable[[CmpKey, CmpKey], bool] + + +class _Version(NamedTuple): + epoch: int + release: tuple[int, ...] + dev: tuple[str, int] | None + pre: tuple[str, int] | None + post: tuple[str, int] | None + local: LocalType | None + + +def parse(version: str) -> Version: + """Parse the given version string. + + >>> parse('1.0.dev1') + + + :param version: The version string to parse. + :raises InvalidVersion: When the version string is not a valid version. + """ + return Version(version) + + +class InvalidVersion(ValueError): + """Raised when a version string is not a valid version. + + >>> Version("invalid") + Traceback (most recent call last): + ... + packaging.version.InvalidVersion: Invalid version: 'invalid' + """ + + +class _BaseVersion: + _key: tuple[Any, ...] + + def __hash__(self) -> int: + return hash(self._key) + + # Please keep the duplicated `isinstance` check + # in the six comparisons hereunder + # unless you find a way to avoid adding overhead function calls. + def __lt__(self, other: _BaseVersion) -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key < other._key + + def __le__(self, other: _BaseVersion) -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key <= other._key + + def __eq__(self, other: object) -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key == other._key + + def __ge__(self, other: _BaseVersion) -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key >= other._key + + def __gt__(self, other: _BaseVersion) -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key > other._key + + def __ne__(self, other: object) -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key != other._key + + +# Deliberately not anchored to the start and end of the string, to make it +# easier for 3rd party code to reuse +_VERSION_PATTERN = r""" + v? + (?: + (?:(?P[0-9]+)!)? # epoch + (?P[0-9]+(?:\.[0-9]+)*) # release segment + (?P
                                          # pre-release
+            [-_\.]?
+            (?Palpha|a|beta|b|preview|pre|c|rc)
+            [-_\.]?
+            (?P[0-9]+)?
+        )?
+        (?P                                         # post release
+            (?:-(?P[0-9]+))
+            |
+            (?:
+                [-_\.]?
+                (?Ppost|rev|r)
+                [-_\.]?
+                (?P[0-9]+)?
+            )
+        )?
+        (?P                                          # dev release
+            [-_\.]?
+            (?Pdev)
+            [-_\.]?
+            (?P[0-9]+)?
+        )?
+    )
+    (?:\+(?P[a-z0-9]+(?:[-_\.][a-z0-9]+)*))?       # local version
+"""
+
+VERSION_PATTERN = _VERSION_PATTERN
+"""
+A string containing the regular expression used to match a valid version.
+
+The pattern is not anchored at either end, and is intended for embedding in larger
+expressions (for example, matching a version number as part of a file name). The
+regular expression should be compiled with the ``re.VERBOSE`` and ``re.IGNORECASE``
+flags set.
+
+:meta hide-value:
+"""
+
+
+class Version(_BaseVersion):
+    """This class abstracts handling of a project's versions.
+
+    A :class:`Version` instance is comparison aware and can be compared and
+    sorted using the standard Python interfaces.
+
+    >>> v1 = Version("1.0a5")
+    >>> v2 = Version("1.0")
+    >>> v1
+    
+    >>> v2
+    
+    >>> v1 < v2
+    True
+    >>> v1 == v2
+    False
+    >>> v1 > v2
+    False
+    >>> v1 >= v2
+    False
+    >>> v1 <= v2
+    True
+    """
+
+    _regex = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE)
+    _key: CmpKey
+
+    def __init__(self, version: str) -> None:
+        """Initialize a Version object.
+
+        :param version:
+            The string representation of a version which will be parsed and normalized
+            before use.
+        :raises InvalidVersion:
+            If the ``version`` does not conform to PEP 440 in any way then this
+            exception will be raised.
+        """
+
+        # Validate the version and parse it into pieces
+        match = self._regex.search(version)
+        if not match:
+            raise InvalidVersion(f"Invalid version: {version!r}")
+
+        # Store the parsed out pieces of the version
+        self._version = _Version(
+            epoch=int(match.group("epoch")) if match.group("epoch") else 0,
+            release=tuple(int(i) for i in match.group("release").split(".")),
+            pre=_parse_letter_version(match.group("pre_l"), match.group("pre_n")),
+            post=_parse_letter_version(
+                match.group("post_l"), match.group("post_n1") or match.group("post_n2")
+            ),
+            dev=_parse_letter_version(match.group("dev_l"), match.group("dev_n")),
+            local=_parse_local_version(match.group("local")),
+        )
+
+        # Generate a key which will be used for sorting
+        self._key = _cmpkey(
+            self._version.epoch,
+            self._version.release,
+            self._version.pre,
+            self._version.post,
+            self._version.dev,
+            self._version.local,
+        )
+
+    def __repr__(self) -> str:
+        """A representation of the Version that shows all internal state.
+
+        >>> Version('1.0.0')
+        
+        """
+        return f""
+
+    def __str__(self) -> str:
+        """A string representation of the version that can be round-tripped.
+
+        >>> str(Version("1.0a5"))
+        '1.0a5'
+        """
+        parts = []
+
+        # Epoch
+        if self.epoch != 0:
+            parts.append(f"{self.epoch}!")
+
+        # Release segment
+        parts.append(".".join(str(x) for x in self.release))
+
+        # Pre-release
+        if self.pre is not None:
+            parts.append("".join(str(x) for x in self.pre))
+
+        # Post-release
+        if self.post is not None:
+            parts.append(f".post{self.post}")
+
+        # Development release
+        if self.dev is not None:
+            parts.append(f".dev{self.dev}")
+
+        # Local version segment
+        if self.local is not None:
+            parts.append(f"+{self.local}")
+
+        return "".join(parts)
+
+    @property
+    def epoch(self) -> int:
+        """The epoch of the version.
+
+        >>> Version("2.0.0").epoch
+        0
+        >>> Version("1!2.0.0").epoch
+        1
+        """
+        return self._version.epoch
+
+    @property
+    def release(self) -> tuple[int, ...]:
+        """The components of the "release" segment of the version.
+
+        >>> Version("1.2.3").release
+        (1, 2, 3)
+        >>> Version("2.0.0").release
+        (2, 0, 0)
+        >>> Version("1!2.0.0.post0").release
+        (2, 0, 0)
+
+        Includes trailing zeroes but not the epoch or any pre-release / development /
+        post-release suffixes.
+        """
+        return self._version.release
+
+    @property
+    def pre(self) -> tuple[str, int] | None:
+        """The pre-release segment of the version.
+
+        >>> print(Version("1.2.3").pre)
+        None
+        >>> Version("1.2.3a1").pre
+        ('a', 1)
+        >>> Version("1.2.3b1").pre
+        ('b', 1)
+        >>> Version("1.2.3rc1").pre
+        ('rc', 1)
+        """
+        return self._version.pre
+
+    @property
+    def post(self) -> int | None:
+        """The post-release number of the version.
+
+        >>> print(Version("1.2.3").post)
+        None
+        >>> Version("1.2.3.post1").post
+        1
+        """
+        return self._version.post[1] if self._version.post else None
+
+    @property
+    def dev(self) -> int | None:
+        """The development number of the version.
+
+        >>> print(Version("1.2.3").dev)
+        None
+        >>> Version("1.2.3.dev1").dev
+        1
+        """
+        return self._version.dev[1] if self._version.dev else None
+
+    @property
+    def local(self) -> str | None:
+        """The local version segment of the version.
+
+        >>> print(Version("1.2.3").local)
+        None
+        >>> Version("1.2.3+abc").local
+        'abc'
+        """
+        if self._version.local:
+            return ".".join(str(x) for x in self._version.local)
+        else:
+            return None
+
+    @property
+    def public(self) -> str:
+        """The public portion of the version.
+
+        >>> Version("1.2.3").public
+        '1.2.3'
+        >>> Version("1.2.3+abc").public
+        '1.2.3'
+        >>> Version("1!1.2.3dev1+abc").public
+        '1!1.2.3.dev1'
+        """
+        return str(self).split("+", 1)[0]
+
+    @property
+    def base_version(self) -> str:
+        """The "base version" of the version.
+
+        >>> Version("1.2.3").base_version
+        '1.2.3'
+        >>> Version("1.2.3+abc").base_version
+        '1.2.3'
+        >>> Version("1!1.2.3dev1+abc").base_version
+        '1!1.2.3'
+
+        The "base version" is the public version of the project without any pre or post
+        release markers.
+        """
+        parts = []
+
+        # Epoch
+        if self.epoch != 0:
+            parts.append(f"{self.epoch}!")
+
+        # Release segment
+        parts.append(".".join(str(x) for x in self.release))
+
+        return "".join(parts)
+
+    @property
+    def is_prerelease(self) -> bool:
+        """Whether this version is a pre-release.
+
+        >>> Version("1.2.3").is_prerelease
+        False
+        >>> Version("1.2.3a1").is_prerelease
+        True
+        >>> Version("1.2.3b1").is_prerelease
+        True
+        >>> Version("1.2.3rc1").is_prerelease
+        True
+        >>> Version("1.2.3dev1").is_prerelease
+        True
+        """
+        return self.dev is not None or self.pre is not None
+
+    @property
+    def is_postrelease(self) -> bool:
+        """Whether this version is a post-release.
+
+        >>> Version("1.2.3").is_postrelease
+        False
+        >>> Version("1.2.3.post1").is_postrelease
+        True
+        """
+        return self.post is not None
+
+    @property
+    def is_devrelease(self) -> bool:
+        """Whether this version is a development release.
+
+        >>> Version("1.2.3").is_devrelease
+        False
+        >>> Version("1.2.3.dev1").is_devrelease
+        True
+        """
+        return self.dev is not None
+
+    @property
+    def major(self) -> int:
+        """The first item of :attr:`release` or ``0`` if unavailable.
+
+        >>> Version("1.2.3").major
+        1
+        """
+        return self.release[0] if len(self.release) >= 1 else 0
+
+    @property
+    def minor(self) -> int:
+        """The second item of :attr:`release` or ``0`` if unavailable.
+
+        >>> Version("1.2.3").minor
+        2
+        >>> Version("1").minor
+        0
+        """
+        return self.release[1] if len(self.release) >= 2 else 0
+
+    @property
+    def micro(self) -> int:
+        """The third item of :attr:`release` or ``0`` if unavailable.
+
+        >>> Version("1.2.3").micro
+        3
+        >>> Version("1").micro
+        0
+        """
+        return self.release[2] if len(self.release) >= 3 else 0
+
+
+class _TrimmedRelease(Version):
+    @property
+    def release(self) -> tuple[int, ...]:
+        """
+        Release segment without any trailing zeros.
+
+        >>> _TrimmedRelease('1.0.0').release
+        (1,)
+        >>> _TrimmedRelease('0.0').release
+        (0,)
+        """
+        rel = super().release
+        nonzeros = (index for index, val in enumerate(rel) if val)
+        last_nonzero = max(nonzeros, default=0)
+        return rel[: last_nonzero + 1]
+
+
+def _parse_letter_version(
+    letter: str | None, number: str | bytes | SupportsInt | None
+) -> tuple[str, int] | None:
+    if letter:
+        # We consider there to be an implicit 0 in a pre-release if there is
+        # not a numeral associated with it.
+        if number is None:
+            number = 0
+
+        # We normalize any letters to their lower case form
+        letter = letter.lower()
+
+        # We consider some words to be alternate spellings of other words and
+        # in those cases we want to normalize the spellings to our preferred
+        # spelling.
+        if letter == "alpha":
+            letter = "a"
+        elif letter == "beta":
+            letter = "b"
+        elif letter in ["c", "pre", "preview"]:
+            letter = "rc"
+        elif letter in ["rev", "r"]:
+            letter = "post"
+
+        return letter, int(number)
+
+    assert not letter
+    if number:
+        # We assume if we are given a number, but we are not given a letter
+        # then this is using the implicit post release syntax (e.g. 1.0-1)
+        letter = "post"
+
+        return letter, int(number)
+
+    return None
+
+
+_local_version_separators = re.compile(r"[\._-]")
+
+
+def _parse_local_version(local: str | None) -> LocalType | None:
+    """
+    Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
+    """
+    if local is not None:
+        return tuple(
+            part.lower() if not part.isdigit() else int(part)
+            for part in _local_version_separators.split(local)
+        )
+    return None
+
+
+def _cmpkey(
+    epoch: int,
+    release: tuple[int, ...],
+    pre: tuple[str, int] | None,
+    post: tuple[str, int] | None,
+    dev: tuple[str, int] | None,
+    local: LocalType | None,
+) -> CmpKey:
+    # When we compare a release version, we want to compare it with all of the
+    # trailing zeros removed. So we'll use a reverse the list, drop all the now
+    # leading zeros until we come to something non zero, then take the rest
+    # re-reverse it back into the correct order and make it a tuple and use
+    # that for our sorting key.
+    _release = tuple(
+        reversed(list(itertools.dropwhile(lambda x: x == 0, reversed(release))))
+    )
+
+    # We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
+    # We'll do this by abusing the pre segment, but we _only_ want to do this
+    # if there is not a pre or a post segment. If we have one of those then
+    # the normal sorting rules will handle this case correctly.
+    if pre is None and post is None and dev is not None:
+        _pre: CmpPrePostDevType = NegativeInfinity
+    # Versions without a pre-release (except as noted above) should sort after
+    # those with one.
+    elif pre is None:
+        _pre = Infinity
+    else:
+        _pre = pre
+
+    # Versions without a post segment should sort before those with one.
+    if post is None:
+        _post: CmpPrePostDevType = NegativeInfinity
+
+    else:
+        _post = post
+
+    # Versions without a development segment should sort after those with one.
+    if dev is None:
+        _dev: CmpPrePostDevType = Infinity
+
+    else:
+        _dev = dev
+
+    if local is None:
+        # Versions without a local segment should sort before those with one.
+        _local: CmpLocalType = NegativeInfinity
+    else:
+        # Versions with a local segment need that segment parsed to implement
+        # the sorting rules in PEP440.
+        # - Alpha numeric segments sort before numeric segments
+        # - Alpha numeric segments sort lexicographically
+        # - Numeric segments sort numerically
+        # - Shorter versions sort before longer versions when the prefixes
+        #   match exactly
+        _local = tuple(
+            (i, "") if isinstance(i, int) else (NegativeInfinity, i) for i in local
+        )
+
+    return epoch, _release, _pre, _post, _dev, _local
diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/pkg_resources/__init__.py b/.venv/lib/python3.12/site-packages/pip/_vendor/pkg_resources/__init__.py
new file mode 100644
index 0000000..72f2b03
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/pip/_vendor/pkg_resources/__init__.py
@@ -0,0 +1,3676 @@
+# TODO: Add Generic type annotations to initialized collections.
+# For now we'd simply use implicit Any/Unknown which would add redundant annotations
+# mypy: disable-error-code="var-annotated"
+"""
+Package resource API
+--------------------
+
+A resource is a logical file contained within a package, or a logical
+subdirectory thereof.  The package resource API expects resource names
+to have their path parts separated with ``/``, *not* whatever the local
+path separator is.  Do not use os.path operations to manipulate resource
+names being passed into the API.
+
+The package resource API is designed to work with normal filesystem packages,
+.egg files, and unpacked .egg files.  It can also work in a limited way with
+.zip files and with custom PEP 302 loaders that support the ``get_data()``
+method.
+
+This module is deprecated. Users are directed to :mod:`importlib.resources`,
+:mod:`importlib.metadata` and :pypi:`packaging` instead.
+"""
+
+from __future__ import annotations
+
+import sys
+
+if sys.version_info < (3, 8):  # noqa: UP036 # Check for unsupported versions
+    raise RuntimeError("Python 3.8 or later is required")
+
+import os
+import io
+import time
+import re
+import types
+from typing import (
+    Any,
+    Literal,
+    Dict,
+    Iterator,
+    Mapping,
+    MutableSequence,
+    NamedTuple,
+    NoReturn,
+    Tuple,
+    Union,
+    TYPE_CHECKING,
+    Protocol,
+    Callable,
+    Iterable,
+    TypeVar,
+    overload,
+)
+import zipfile
+import zipimport
+import warnings
+import stat
+import functools
+import pkgutil
+import operator
+import platform
+import collections
+import plistlib
+import email.parser
+import errno
+import tempfile
+import textwrap
+import inspect
+import ntpath
+import posixpath
+import importlib
+import importlib.abc
+import importlib.machinery
+from pkgutil import get_importer
+
+import _imp
+
+# capture these to bypass sandboxing
+from os import utime
+from os import open as os_open
+from os.path import isdir, split
+
+try:
+    from os import mkdir, rename, unlink
+
+    WRITE_SUPPORT = True
+except ImportError:
+    # no write support, probably under GAE
+    WRITE_SUPPORT = False
+
+from pip._internal.utils._jaraco_text import (
+    yield_lines,
+    drop_comment,
+    join_continuation,
+)
+from pip._vendor.packaging import markers as _packaging_markers
+from pip._vendor.packaging import requirements as _packaging_requirements
+from pip._vendor.packaging import utils as _packaging_utils
+from pip._vendor.packaging import version as _packaging_version
+from pip._vendor.platformdirs import user_cache_dir as _user_cache_dir
+
+if TYPE_CHECKING:
+    from _typeshed import BytesPath, StrPath, StrOrBytesPath
+    from typing_extensions import Self
+
+
+# Patch: Remove deprecation warning from vendored pkg_resources.
+# Setting PYTHONWARNINGS=error to verify builds produce no warnings
+# causes immediate exceptions.
+# See https://github.com/pypa/pip/issues/12243
+
+
+_T = TypeVar("_T")
+_DistributionT = TypeVar("_DistributionT", bound="Distribution")
+# Type aliases
+_NestedStr = Union[str, Iterable[Union[str, Iterable["_NestedStr"]]]]
+_InstallerTypeT = Callable[["Requirement"], "_DistributionT"]
+_InstallerType = Callable[["Requirement"], Union["Distribution", None]]
+_PkgReqType = Union[str, "Requirement"]
+_EPDistType = Union["Distribution", _PkgReqType]
+_MetadataType = Union["IResourceProvider", None]
+_ResolvedEntryPoint = Any  # Can be any attribute in the module
+_ResourceStream = Any  # TODO / Incomplete: A readable file-like object
+# Any object works, but let's indicate we expect something like a module (optionally has __loader__ or __file__)
+_ModuleLike = Union[object, types.ModuleType]
+# Any: Should be _ModuleLike but we end up with issues where _ModuleLike doesn't have _ZipLoaderModule's __loader__
+_ProviderFactoryType = Callable[[Any], "IResourceProvider"]
+_DistFinderType = Callable[[_T, str, bool], Iterable["Distribution"]]
+_NSHandlerType = Callable[[_T, str, str, types.ModuleType], Union[str, None]]
+_AdapterT = TypeVar(
+    "_AdapterT", _DistFinderType[Any], _ProviderFactoryType, _NSHandlerType[Any]
+)
+
+
+# Use _typeshed.importlib.LoaderProtocol once available https://github.com/python/typeshed/pull/11890
+class _LoaderProtocol(Protocol):
+    def load_module(self, fullname: str, /) -> types.ModuleType: ...
+
+
+class _ZipLoaderModule(Protocol):
+    __loader__: zipimport.zipimporter
+
+
+_PEP440_FALLBACK = re.compile(r"^v?(?P(?:[0-9]+!)?[0-9]+(?:\.[0-9]+)*)", re.I)
+
+
+class PEP440Warning(RuntimeWarning):
+    """
+    Used when there is an issue with a version or specifier not complying with
+    PEP 440.
+    """
+
+
+parse_version = _packaging_version.Version
+
+
+_state_vars: dict[str, str] = {}
+
+
+def _declare_state(vartype: str, varname: str, initial_value: _T) -> _T:
+    _state_vars[varname] = vartype
+    return initial_value
+
+
+def __getstate__() -> dict[str, Any]:
+    state = {}
+    g = globals()
+    for k, v in _state_vars.items():
+        state[k] = g['_sget_' + v](g[k])
+    return state
+
+
+def __setstate__(state: dict[str, Any]) -> dict[str, Any]:
+    g = globals()
+    for k, v in state.items():
+        g['_sset_' + _state_vars[k]](k, g[k], v)
+    return state
+
+
+def _sget_dict(val):
+    return val.copy()
+
+
+def _sset_dict(key, ob, state):
+    ob.clear()
+    ob.update(state)
+
+
+def _sget_object(val):
+    return val.__getstate__()
+
+
+def _sset_object(key, ob, state):
+    ob.__setstate__(state)
+
+
+_sget_none = _sset_none = lambda *args: None
+
+
+def get_supported_platform():
+    """Return this platform's maximum compatible version.
+
+    distutils.util.get_platform() normally reports the minimum version
+    of macOS that would be required to *use* extensions produced by
+    distutils.  But what we want when checking compatibility is to know the
+    version of macOS that we are *running*.  To allow usage of packages that
+    explicitly require a newer version of macOS, we must also know the
+    current version of the OS.
+
+    If this condition occurs for any other platform with a version in its
+    platform strings, this function should be extended accordingly.
+    """
+    plat = get_build_platform()
+    m = macosVersionString.match(plat)
+    if m is not None and sys.platform == "darwin":
+        try:
+            plat = 'macosx-%s-%s' % ('.'.join(_macos_vers()[:2]), m.group(3))
+        except ValueError:
+            # not macOS
+            pass
+    return plat
+
+
+__all__ = [
+    # Basic resource access and distribution/entry point discovery
+    'require',
+    'run_script',
+    'get_provider',
+    'get_distribution',
+    'load_entry_point',
+    'get_entry_map',
+    'get_entry_info',
+    'iter_entry_points',
+    'resource_string',
+    'resource_stream',
+    'resource_filename',
+    'resource_listdir',
+    'resource_exists',
+    'resource_isdir',
+    # Environmental control
+    'declare_namespace',
+    'working_set',
+    'add_activation_listener',
+    'find_distributions',
+    'set_extraction_path',
+    'cleanup_resources',
+    'get_default_cache',
+    # Primary implementation classes
+    'Environment',
+    'WorkingSet',
+    'ResourceManager',
+    'Distribution',
+    'Requirement',
+    'EntryPoint',
+    # Exceptions
+    'ResolutionError',
+    'VersionConflict',
+    'DistributionNotFound',
+    'UnknownExtra',
+    'ExtractionError',
+    # Warnings
+    'PEP440Warning',
+    # Parsing functions and string utilities
+    'parse_requirements',
+    'parse_version',
+    'safe_name',
+    'safe_version',
+    'get_platform',
+    'compatible_platforms',
+    'yield_lines',
+    'split_sections',
+    'safe_extra',
+    'to_filename',
+    'invalid_marker',
+    'evaluate_marker',
+    # filesystem utilities
+    'ensure_directory',
+    'normalize_path',
+    # Distribution "precedence" constants
+    'EGG_DIST',
+    'BINARY_DIST',
+    'SOURCE_DIST',
+    'CHECKOUT_DIST',
+    'DEVELOP_DIST',
+    # "Provider" interfaces, implementations, and registration/lookup APIs
+    'IMetadataProvider',
+    'IResourceProvider',
+    'FileMetadata',
+    'PathMetadata',
+    'EggMetadata',
+    'EmptyProvider',
+    'empty_provider',
+    'NullProvider',
+    'EggProvider',
+    'DefaultProvider',
+    'ZipProvider',
+    'register_finder',
+    'register_namespace_handler',
+    'register_loader_type',
+    'fixup_namespace_packages',
+    'get_importer',
+    # Warnings
+    'PkgResourcesDeprecationWarning',
+    # Deprecated/backward compatibility only
+    'run_main',
+    'AvailableDistributions',
+]
+
+
+class ResolutionError(Exception):
+    """Abstract base for dependency resolution errors"""
+
+    def __repr__(self):
+        return self.__class__.__name__ + repr(self.args)
+
+
+class VersionConflict(ResolutionError):
+    """
+    An already-installed version conflicts with the requested version.
+
+    Should be initialized with the installed Distribution and the requested
+    Requirement.
+    """
+
+    _template = "{self.dist} is installed but {self.req} is required"
+
+    @property
+    def dist(self) -> Distribution:
+        return self.args[0]
+
+    @property
+    def req(self) -> Requirement:
+        return self.args[1]
+
+    def report(self):
+        return self._template.format(**locals())
+
+    def with_context(self, required_by: set[Distribution | str]):
+        """
+        If required_by is non-empty, return a version of self that is a
+        ContextualVersionConflict.
+        """
+        if not required_by:
+            return self
+        args = self.args + (required_by,)
+        return ContextualVersionConflict(*args)
+
+
+class ContextualVersionConflict(VersionConflict):
+    """
+    A VersionConflict that accepts a third parameter, the set of the
+    requirements that required the installed Distribution.
+    """
+
+    _template = VersionConflict._template + ' by {self.required_by}'
+
+    @property
+    def required_by(self) -> set[str]:
+        return self.args[2]
+
+
+class DistributionNotFound(ResolutionError):
+    """A requested distribution was not found"""
+
+    _template = (
+        "The '{self.req}' distribution was not found "
+        "and is required by {self.requirers_str}"
+    )
+
+    @property
+    def req(self) -> Requirement:
+        return self.args[0]
+
+    @property
+    def requirers(self) -> set[str] | None:
+        return self.args[1]
+
+    @property
+    def requirers_str(self):
+        if not self.requirers:
+            return 'the application'
+        return ', '.join(self.requirers)
+
+    def report(self):
+        return self._template.format(**locals())
+
+    def __str__(self):
+        return self.report()
+
+
+class UnknownExtra(ResolutionError):
+    """Distribution doesn't have an "extra feature" of the given name"""
+
+
+_provider_factories: dict[type[_ModuleLike], _ProviderFactoryType] = {}
+
+PY_MAJOR = '{}.{}'.format(*sys.version_info)
+EGG_DIST = 3
+BINARY_DIST = 2
+SOURCE_DIST = 1
+CHECKOUT_DIST = 0
+DEVELOP_DIST = -1
+
+
+def register_loader_type(
+    loader_type: type[_ModuleLike], provider_factory: _ProviderFactoryType
+):
+    """Register `provider_factory` to make providers for `loader_type`
+
+    `loader_type` is the type or class of a PEP 302 ``module.__loader__``,
+    and `provider_factory` is a function that, passed a *module* object,
+    returns an ``IResourceProvider`` for that module.
+    """
+    _provider_factories[loader_type] = provider_factory
+
+
+@overload
+def get_provider(moduleOrReq: str) -> IResourceProvider: ...
+@overload
+def get_provider(moduleOrReq: Requirement) -> Distribution: ...
+def get_provider(moduleOrReq: str | Requirement) -> IResourceProvider | Distribution:
+    """Return an IResourceProvider for the named module or requirement"""
+    if isinstance(moduleOrReq, Requirement):
+        return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0]
+    try:
+        module = sys.modules[moduleOrReq]
+    except KeyError:
+        __import__(moduleOrReq)
+        module = sys.modules[moduleOrReq]
+    loader = getattr(module, '__loader__', None)
+    return _find_adapter(_provider_factories, loader)(module)
+
+
+@functools.lru_cache(maxsize=None)
+def _macos_vers():
+    version = platform.mac_ver()[0]
+    # fallback for MacPorts
+    if version == '':
+        plist = '/System/Library/CoreServices/SystemVersion.plist'
+        if os.path.exists(plist):
+            with open(plist, 'rb') as fh:
+                plist_content = plistlib.load(fh)
+            if 'ProductVersion' in plist_content:
+                version = plist_content['ProductVersion']
+    return version.split('.')
+
+
+def _macos_arch(machine):
+    return {'PowerPC': 'ppc', 'Power_Macintosh': 'ppc'}.get(machine, machine)
+
+
+def get_build_platform():
+    """Return this platform's string for platform-specific distributions
+
+    XXX Currently this is the same as ``distutils.util.get_platform()``, but it
+    needs some hacks for Linux and macOS.
+    """
+    from sysconfig import get_platform
+
+    plat = get_platform()
+    if sys.platform == "darwin" and not plat.startswith('macosx-'):
+        try:
+            version = _macos_vers()
+            machine = os.uname()[4].replace(" ", "_")
+            return "macosx-%d.%d-%s" % (
+                int(version[0]),
+                int(version[1]),
+                _macos_arch(machine),
+            )
+        except ValueError:
+            # if someone is running a non-Mac darwin system, this will fall
+            # through to the default implementation
+            pass
+    return plat
+
+
+macosVersionString = re.compile(r"macosx-(\d+)\.(\d+)-(.*)")
+darwinVersionString = re.compile(r"darwin-(\d+)\.(\d+)\.(\d+)-(.*)")
+# XXX backward compat
+get_platform = get_build_platform
+
+
+def compatible_platforms(provided: str | None, required: str | None):
+    """Can code for the `provided` platform run on the `required` platform?
+
+    Returns true if either platform is ``None``, or the platforms are equal.
+
+    XXX Needs compatibility checks for Linux and other unixy OSes.
+    """
+    if provided is None or required is None or provided == required:
+        # easy case
+        return True
+
+    # macOS special cases
+    reqMac = macosVersionString.match(required)
+    if reqMac:
+        provMac = macosVersionString.match(provided)
+
+        # is this a Mac package?
+        if not provMac:
+            # this is backwards compatibility for packages built before
+            # setuptools 0.6. All packages built after this point will
+            # use the new macOS designation.
+            provDarwin = darwinVersionString.match(provided)
+            if provDarwin:
+                dversion = int(provDarwin.group(1))
+                macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2))
+                if (
+                    dversion == 7
+                    and macosversion >= "10.3"
+                    or dversion == 8
+                    and macosversion >= "10.4"
+                ):
+                    return True
+            # egg isn't macOS or legacy darwin
+            return False
+
+        # are they the same major version and machine type?
+        if provMac.group(1) != reqMac.group(1) or provMac.group(3) != reqMac.group(3):
+            return False
+
+        # is the required OS major update >= the provided one?
+        if int(provMac.group(2)) > int(reqMac.group(2)):
+            return False
+
+        return True
+
+    # XXX Linux and other platforms' special cases should go here
+    return False
+
+
+@overload
+def get_distribution(dist: _DistributionT) -> _DistributionT: ...
+@overload
+def get_distribution(dist: _PkgReqType) -> Distribution: ...
+def get_distribution(dist: Distribution | _PkgReqType) -> Distribution:
+    """Return a current distribution object for a Requirement or string"""
+    if isinstance(dist, str):
+        dist = Requirement.parse(dist)
+    if isinstance(dist, Requirement):
+        # Bad type narrowing, dist has to be a Requirement here, so get_provider has to return Distribution
+        dist = get_provider(dist)  # type: ignore[assignment]
+    if not isinstance(dist, Distribution):
+        raise TypeError("Expected str, Requirement, or Distribution", dist)
+    return dist
+
+
+def load_entry_point(dist: _EPDistType, group: str, name: str) -> _ResolvedEntryPoint:
+    """Return `name` entry point of `group` for `dist` or raise ImportError"""
+    return get_distribution(dist).load_entry_point(group, name)
+
+
+@overload
+def get_entry_map(
+    dist: _EPDistType, group: None = None
+) -> dict[str, dict[str, EntryPoint]]: ...
+@overload
+def get_entry_map(dist: _EPDistType, group: str) -> dict[str, EntryPoint]: ...
+def get_entry_map(dist: _EPDistType, group: str | None = None):
+    """Return the entry point map for `group`, or the full entry map"""
+    return get_distribution(dist).get_entry_map(group)
+
+
+def get_entry_info(dist: _EPDistType, group: str, name: str):
+    """Return the EntryPoint object for `group`+`name`, or ``None``"""
+    return get_distribution(dist).get_entry_info(group, name)
+
+
+class IMetadataProvider(Protocol):
+    def has_metadata(self, name: str) -> bool:
+        """Does the package's distribution contain the named metadata?"""
+
+    def get_metadata(self, name: str) -> str:
+        """The named metadata resource as a string"""
+
+    def get_metadata_lines(self, name: str) -> Iterator[str]:
+        """Yield named metadata resource as list of non-blank non-comment lines
+
+        Leading and trailing whitespace is stripped from each line, and lines
+        with ``#`` as the first non-blank character are omitted."""
+
+    def metadata_isdir(self, name: str) -> bool:
+        """Is the named metadata a directory?  (like ``os.path.isdir()``)"""
+
+    def metadata_listdir(self, name: str) -> list[str]:
+        """List of metadata names in the directory (like ``os.listdir()``)"""
+
+    def run_script(self, script_name: str, namespace: dict[str, Any]) -> None:
+        """Execute the named script in the supplied namespace dictionary"""
+
+
+class IResourceProvider(IMetadataProvider, Protocol):
+    """An object that provides access to package resources"""
+
+    def get_resource_filename(
+        self, manager: ResourceManager, resource_name: str
+    ) -> str:
+        """Return a true filesystem path for `resource_name`
+
+        `manager` must be a ``ResourceManager``"""
+
+    def get_resource_stream(
+        self, manager: ResourceManager, resource_name: str
+    ) -> _ResourceStream:
+        """Return a readable file-like object for `resource_name`
+
+        `manager` must be a ``ResourceManager``"""
+
+    def get_resource_string(
+        self, manager: ResourceManager, resource_name: str
+    ) -> bytes:
+        """Return the contents of `resource_name` as :obj:`bytes`
+
+        `manager` must be a ``ResourceManager``"""
+
+    def has_resource(self, resource_name: str) -> bool:
+        """Does the package contain the named resource?"""
+
+    def resource_isdir(self, resource_name: str) -> bool:
+        """Is the named resource a directory?  (like ``os.path.isdir()``)"""
+
+    def resource_listdir(self, resource_name: str) -> list[str]:
+        """List of resource names in the directory (like ``os.listdir()``)"""
+
+
+class WorkingSet:
+    """A collection of active distributions on sys.path (or a similar list)"""
+
+    def __init__(self, entries: Iterable[str] | None = None):
+        """Create working set from list of path entries (default=sys.path)"""
+        self.entries: list[str] = []
+        self.entry_keys = {}
+        self.by_key = {}
+        self.normalized_to_canonical_keys = {}
+        self.callbacks = []
+
+        if entries is None:
+            entries = sys.path
+
+        for entry in entries:
+            self.add_entry(entry)
+
+    @classmethod
+    def _build_master(cls):
+        """
+        Prepare the master working set.
+        """
+        ws = cls()
+        try:
+            from __main__ import __requires__
+        except ImportError:
+            # The main program does not list any requirements
+            return ws
+
+        # ensure the requirements are met
+        try:
+            ws.require(__requires__)
+        except VersionConflict:
+            return cls._build_from_requirements(__requires__)
+
+        return ws
+
+    @classmethod
+    def _build_from_requirements(cls, req_spec):
+        """
+        Build a working set from a requirement spec. Rewrites sys.path.
+        """
+        # try it without defaults already on sys.path
+        # by starting with an empty path
+        ws = cls([])
+        reqs = parse_requirements(req_spec)
+        dists = ws.resolve(reqs, Environment())
+        for dist in dists:
+            ws.add(dist)
+
+        # add any missing entries from sys.path
+        for entry in sys.path:
+            if entry not in ws.entries:
+                ws.add_entry(entry)
+
+        # then copy back to sys.path
+        sys.path[:] = ws.entries
+        return ws
+
+    def add_entry(self, entry: str):
+        """Add a path item to ``.entries``, finding any distributions on it
+
+        ``find_distributions(entry, True)`` is used to find distributions
+        corresponding to the path entry, and they are added.  `entry` is
+        always appended to ``.entries``, even if it is already present.
+        (This is because ``sys.path`` can contain the same value more than
+        once, and the ``.entries`` of the ``sys.path`` WorkingSet should always
+        equal ``sys.path``.)
+        """
+        self.entry_keys.setdefault(entry, [])
+        self.entries.append(entry)
+        for dist in find_distributions(entry, True):
+            self.add(dist, entry, False)
+
+    def __contains__(self, dist: Distribution) -> bool:
+        """True if `dist` is the active distribution for its project"""
+        return self.by_key.get(dist.key) == dist
+
+    def find(self, req: Requirement) -> Distribution | None:
+        """Find a distribution matching requirement `req`
+
+        If there is an active distribution for the requested project, this
+        returns it as long as it meets the version requirement specified by
+        `req`.  But, if there is an active distribution for the project and it
+        does *not* meet the `req` requirement, ``VersionConflict`` is raised.
+        If there is no active distribution for the requested project, ``None``
+        is returned.
+        """
+        dist = self.by_key.get(req.key)
+
+        if dist is None:
+            canonical_key = self.normalized_to_canonical_keys.get(req.key)
+
+            if canonical_key is not None:
+                req.key = canonical_key
+                dist = self.by_key.get(canonical_key)
+
+        if dist is not None and dist not in req:
+            # XXX add more info
+            raise VersionConflict(dist, req)
+        return dist
+
+    def iter_entry_points(self, group: str, name: str | None = None):
+        """Yield entry point objects from `group` matching `name`
+
+        If `name` is None, yields all entry points in `group` from all
+        distributions in the working set, otherwise only ones matching
+        both `group` and `name` are yielded (in distribution order).
+        """
+        return (
+            entry
+            for dist in self
+            for entry in dist.get_entry_map(group).values()
+            if name is None or name == entry.name
+        )
+
+    def run_script(self, requires: str, script_name: str):
+        """Locate distribution for `requires` and run `script_name` script"""
+        ns = sys._getframe(1).f_globals
+        name = ns['__name__']
+        ns.clear()
+        ns['__name__'] = name
+        self.require(requires)[0].run_script(script_name, ns)
+
+    def __iter__(self) -> Iterator[Distribution]:
+        """Yield distributions for non-duplicate projects in the working set
+
+        The yield order is the order in which the items' path entries were
+        added to the working set.
+        """
+        seen = set()
+        for item in self.entries:
+            if item not in self.entry_keys:
+                # workaround a cache issue
+                continue
+
+            for key in self.entry_keys[item]:
+                if key not in seen:
+                    seen.add(key)
+                    yield self.by_key[key]
+
+    def add(
+        self,
+        dist: Distribution,
+        entry: str | None = None,
+        insert: bool = True,
+        replace: bool = False,
+    ):
+        """Add `dist` to working set, associated with `entry`
+
+        If `entry` is unspecified, it defaults to the ``.location`` of `dist`.
+        On exit from this routine, `entry` is added to the end of the working
+        set's ``.entries`` (if it wasn't already present).
+
+        `dist` is only added to the working set if it's for a project that
+        doesn't already have a distribution in the set, unless `replace=True`.
+        If it's added, any callbacks registered with the ``subscribe()`` method
+        will be called.
+        """
+        if insert:
+            dist.insert_on(self.entries, entry, replace=replace)
+
+        if entry is None:
+            entry = dist.location
+        keys = self.entry_keys.setdefault(entry, [])
+        keys2 = self.entry_keys.setdefault(dist.location, [])
+        if not replace and dist.key in self.by_key:
+            # ignore hidden distros
+            return
+
+        self.by_key[dist.key] = dist
+        normalized_name = _packaging_utils.canonicalize_name(dist.key)
+        self.normalized_to_canonical_keys[normalized_name] = dist.key
+        if dist.key not in keys:
+            keys.append(dist.key)
+        if dist.key not in keys2:
+            keys2.append(dist.key)
+        self._added_new(dist)
+
+    @overload
+    def resolve(
+        self,
+        requirements: Iterable[Requirement],
+        env: Environment | None,
+        installer: _InstallerTypeT[_DistributionT],
+        replace_conflicting: bool = False,
+        extras: tuple[str, ...] | None = None,
+    ) -> list[_DistributionT]: ...
+    @overload
+    def resolve(
+        self,
+        requirements: Iterable[Requirement],
+        env: Environment | None = None,
+        *,
+        installer: _InstallerTypeT[_DistributionT],
+        replace_conflicting: bool = False,
+        extras: tuple[str, ...] | None = None,
+    ) -> list[_DistributionT]: ...
+    @overload
+    def resolve(
+        self,
+        requirements: Iterable[Requirement],
+        env: Environment | None = None,
+        installer: _InstallerType | None = None,
+        replace_conflicting: bool = False,
+        extras: tuple[str, ...] | None = None,
+    ) -> list[Distribution]: ...
+    def resolve(
+        self,
+        requirements: Iterable[Requirement],
+        env: Environment | None = None,
+        installer: _InstallerType | None | _InstallerTypeT[_DistributionT] = None,
+        replace_conflicting: bool = False,
+        extras: tuple[str, ...] | None = None,
+    ) -> list[Distribution] | list[_DistributionT]:
+        """List all distributions needed to (recursively) meet `requirements`
+
+        `requirements` must be a sequence of ``Requirement`` objects.  `env`,
+        if supplied, should be an ``Environment`` instance.  If
+        not supplied, it defaults to all distributions available within any
+        entry or distribution in the working set.  `installer`, if supplied,
+        will be invoked with each requirement that cannot be met by an
+        already-installed distribution; it should return a ``Distribution`` or
+        ``None``.
+
+        Unless `replace_conflicting=True`, raises a VersionConflict exception
+        if
+        any requirements are found on the path that have the correct name but
+        the wrong version.  Otherwise, if an `installer` is supplied it will be
+        invoked to obtain the correct version of the requirement and activate
+        it.
+
+        `extras` is a list of the extras to be used with these requirements.
+        This is important because extra requirements may look like `my_req;
+        extra = "my_extra"`, which would otherwise be interpreted as a purely
+        optional requirement.  Instead, we want to be able to assert that these
+        requirements are truly required.
+        """
+
+        # set up the stack
+        requirements = list(requirements)[::-1]
+        # set of processed requirements
+        processed = set()
+        # key -> dist
+        best = {}
+        to_activate = []
+
+        req_extras = _ReqExtras()
+
+        # Mapping of requirement to set of distributions that required it;
+        # useful for reporting info about conflicts.
+        required_by = collections.defaultdict(set)
+
+        while requirements:
+            # process dependencies breadth-first
+            req = requirements.pop(0)
+            if req in processed:
+                # Ignore cyclic or redundant dependencies
+                continue
+
+            if not req_extras.markers_pass(req, extras):
+                continue
+
+            dist = self._resolve_dist(
+                req, best, replace_conflicting, env, installer, required_by, to_activate
+            )
+
+            # push the new requirements onto the stack
+            new_requirements = dist.requires(req.extras)[::-1]
+            requirements.extend(new_requirements)
+
+            # Register the new requirements needed by req
+            for new_requirement in new_requirements:
+                required_by[new_requirement].add(req.project_name)
+                req_extras[new_requirement] = req.extras
+
+            processed.add(req)
+
+        # return list of distros to activate
+        return to_activate
+
+    def _resolve_dist(
+        self, req, best, replace_conflicting, env, installer, required_by, to_activate
+    ) -> Distribution:
+        dist = best.get(req.key)
+        if dist is None:
+            # Find the best distribution and add it to the map
+            dist = self.by_key.get(req.key)
+            if dist is None or (dist not in req and replace_conflicting):
+                ws = self
+                if env is None:
+                    if dist is None:
+                        env = Environment(self.entries)
+                    else:
+                        # Use an empty environment and workingset to avoid
+                        # any further conflicts with the conflicting
+                        # distribution
+                        env = Environment([])
+                        ws = WorkingSet([])
+                dist = best[req.key] = env.best_match(
+                    req, ws, installer, replace_conflicting=replace_conflicting
+                )
+                if dist is None:
+                    requirers = required_by.get(req, None)
+                    raise DistributionNotFound(req, requirers)
+            to_activate.append(dist)
+        if dist not in req:
+            # Oops, the "best" so far conflicts with a dependency
+            dependent_req = required_by[req]
+            raise VersionConflict(dist, req).with_context(dependent_req)
+        return dist
+
+    @overload
+    def find_plugins(
+        self,
+        plugin_env: Environment,
+        full_env: Environment | None,
+        installer: _InstallerTypeT[_DistributionT],
+        fallback: bool = True,
+    ) -> tuple[list[_DistributionT], dict[Distribution, Exception]]: ...
+    @overload
+    def find_plugins(
+        self,
+        plugin_env: Environment,
+        full_env: Environment | None = None,
+        *,
+        installer: _InstallerTypeT[_DistributionT],
+        fallback: bool = True,
+    ) -> tuple[list[_DistributionT], dict[Distribution, Exception]]: ...
+    @overload
+    def find_plugins(
+        self,
+        plugin_env: Environment,
+        full_env: Environment | None = None,
+        installer: _InstallerType | None = None,
+        fallback: bool = True,
+    ) -> tuple[list[Distribution], dict[Distribution, Exception]]: ...
+    def find_plugins(
+        self,
+        plugin_env: Environment,
+        full_env: Environment | None = None,
+        installer: _InstallerType | None | _InstallerTypeT[_DistributionT] = None,
+        fallback: bool = True,
+    ) -> tuple[
+        list[Distribution] | list[_DistributionT],
+        dict[Distribution, Exception],
+    ]:
+        """Find all activatable distributions in `plugin_env`
+
+        Example usage::
+
+            distributions, errors = working_set.find_plugins(
+                Environment(plugin_dirlist)
+            )
+            # add plugins+libs to sys.path
+            map(working_set.add, distributions)
+            # display errors
+            print('Could not load', errors)
+
+        The `plugin_env` should be an ``Environment`` instance that contains
+        only distributions that are in the project's "plugin directory" or
+        directories. The `full_env`, if supplied, should be an ``Environment``
+        contains all currently-available distributions.  If `full_env` is not
+        supplied, one is created automatically from the ``WorkingSet`` this
+        method is called on, which will typically mean that every directory on
+        ``sys.path`` will be scanned for distributions.
+
+        `installer` is a standard installer callback as used by the
+        ``resolve()`` method. The `fallback` flag indicates whether we should
+        attempt to resolve older versions of a plugin if the newest version
+        cannot be resolved.
+
+        This method returns a 2-tuple: (`distributions`, `error_info`), where
+        `distributions` is a list of the distributions found in `plugin_env`
+        that were loadable, along with any other distributions that are needed
+        to resolve their dependencies.  `error_info` is a dictionary mapping
+        unloadable plugin distributions to an exception instance describing the
+        error that occurred. Usually this will be a ``DistributionNotFound`` or
+        ``VersionConflict`` instance.
+        """
+
+        plugin_projects = list(plugin_env)
+        # scan project names in alphabetic order
+        plugin_projects.sort()
+
+        error_info: dict[Distribution, Exception] = {}
+        distributions: dict[Distribution, Exception | None] = {}
+
+        if full_env is None:
+            env = Environment(self.entries)
+            env += plugin_env
+        else:
+            env = full_env + plugin_env
+
+        shadow_set = self.__class__([])
+        # put all our entries in shadow_set
+        list(map(shadow_set.add, self))
+
+        for project_name in plugin_projects:
+            for dist in plugin_env[project_name]:
+                req = [dist.as_requirement()]
+
+                try:
+                    resolvees = shadow_set.resolve(req, env, installer)
+
+                except ResolutionError as v:
+                    # save error info
+                    error_info[dist] = v
+                    if fallback:
+                        # try the next older version of project
+                        continue
+                    else:
+                        # give up on this project, keep going
+                        break
+
+                else:
+                    list(map(shadow_set.add, resolvees))
+                    distributions.update(dict.fromkeys(resolvees))
+
+                    # success, no need to try any more versions of this project
+                    break
+
+        sorted_distributions = list(distributions)
+        sorted_distributions.sort()
+
+        return sorted_distributions, error_info
+
+    def require(self, *requirements: _NestedStr):
+        """Ensure that distributions matching `requirements` are activated
+
+        `requirements` must be a string or a (possibly-nested) sequence
+        thereof, specifying the distributions and versions required.  The
+        return value is a sequence of the distributions that needed to be
+        activated to fulfill the requirements; all relevant distributions are
+        included, even if they were already activated in this working set.
+        """
+        needed = self.resolve(parse_requirements(requirements))
+
+        for dist in needed:
+            self.add(dist)
+
+        return needed
+
+    def subscribe(
+        self, callback: Callable[[Distribution], object], existing: bool = True
+    ):
+        """Invoke `callback` for all distributions
+
+        If `existing=True` (default),
+        call on all existing ones, as well.
+        """
+        if callback in self.callbacks:
+            return
+        self.callbacks.append(callback)
+        if not existing:
+            return
+        for dist in self:
+            callback(dist)
+
+    def _added_new(self, dist):
+        for callback in self.callbacks:
+            callback(dist)
+
+    def __getstate__(self):
+        return (
+            self.entries[:],
+            self.entry_keys.copy(),
+            self.by_key.copy(),
+            self.normalized_to_canonical_keys.copy(),
+            self.callbacks[:],
+        )
+
+    def __setstate__(self, e_k_b_n_c):
+        entries, keys, by_key, normalized_to_canonical_keys, callbacks = e_k_b_n_c
+        self.entries = entries[:]
+        self.entry_keys = keys.copy()
+        self.by_key = by_key.copy()
+        self.normalized_to_canonical_keys = normalized_to_canonical_keys.copy()
+        self.callbacks = callbacks[:]
+
+
+class _ReqExtras(Dict["Requirement", Tuple[str, ...]]):
+    """
+    Map each requirement to the extras that demanded it.
+    """
+
+    def markers_pass(self, req: Requirement, extras: tuple[str, ...] | None = None):
+        """
+        Evaluate markers for req against each extra that
+        demanded it.
+
+        Return False if the req has a marker and fails
+        evaluation. Otherwise, return True.
+        """
+        extra_evals = (
+            req.marker.evaluate({'extra': extra})
+            for extra in self.get(req, ()) + (extras or (None,))
+        )
+        return not req.marker or any(extra_evals)
+
+
+class Environment:
+    """Searchable snapshot of distributions on a search path"""
+
+    def __init__(
+        self,
+        search_path: Iterable[str] | None = None,
+        platform: str | None = get_supported_platform(),
+        python: str | None = PY_MAJOR,
+    ):
+        """Snapshot distributions available on a search path
+
+        Any distributions found on `search_path` are added to the environment.
+        `search_path` should be a sequence of ``sys.path`` items.  If not
+        supplied, ``sys.path`` is used.
+
+        `platform` is an optional string specifying the name of the platform
+        that platform-specific distributions must be compatible with.  If
+        unspecified, it defaults to the current platform.  `python` is an
+        optional string naming the desired version of Python (e.g. ``'3.6'``);
+        it defaults to the current version.
+
+        You may explicitly set `platform` (and/or `python`) to ``None`` if you
+        wish to map *all* distributions, not just those compatible with the
+        running platform or Python version.
+        """
+        self._distmap = {}
+        self.platform = platform
+        self.python = python
+        self.scan(search_path)
+
+    def can_add(self, dist: Distribution):
+        """Is distribution `dist` acceptable for this environment?
+
+        The distribution must match the platform and python version
+        requirements specified when this environment was created, or False
+        is returned.
+        """
+        py_compat = (
+            self.python is None
+            or dist.py_version is None
+            or dist.py_version == self.python
+        )
+        return py_compat and compatible_platforms(dist.platform, self.platform)
+
+    def remove(self, dist: Distribution):
+        """Remove `dist` from the environment"""
+        self._distmap[dist.key].remove(dist)
+
+    def scan(self, search_path: Iterable[str] | None = None):
+        """Scan `search_path` for distributions usable in this environment
+
+        Any distributions found are added to the environment.
+        `search_path` should be a sequence of ``sys.path`` items.  If not
+        supplied, ``sys.path`` is used.  Only distributions conforming to
+        the platform/python version defined at initialization are added.
+        """
+        if search_path is None:
+            search_path = sys.path
+
+        for item in search_path:
+            for dist in find_distributions(item):
+                self.add(dist)
+
+    def __getitem__(self, project_name: str) -> list[Distribution]:
+        """Return a newest-to-oldest list of distributions for `project_name`
+
+        Uses case-insensitive `project_name` comparison, assuming all the
+        project's distributions use their project's name converted to all
+        lowercase as their key.
+
+        """
+        distribution_key = project_name.lower()
+        return self._distmap.get(distribution_key, [])
+
+    def add(self, dist: Distribution):
+        """Add `dist` if we ``can_add()`` it and it has not already been added"""
+        if self.can_add(dist) and dist.has_version():
+            dists = self._distmap.setdefault(dist.key, [])
+            if dist not in dists:
+                dists.append(dist)
+                dists.sort(key=operator.attrgetter('hashcmp'), reverse=True)
+
+    @overload
+    def best_match(
+        self,
+        req: Requirement,
+        working_set: WorkingSet,
+        installer: _InstallerTypeT[_DistributionT],
+        replace_conflicting: bool = False,
+    ) -> _DistributionT: ...
+    @overload
+    def best_match(
+        self,
+        req: Requirement,
+        working_set: WorkingSet,
+        installer: _InstallerType | None = None,
+        replace_conflicting: bool = False,
+    ) -> Distribution | None: ...
+    def best_match(
+        self,
+        req: Requirement,
+        working_set: WorkingSet,
+        installer: _InstallerType | None | _InstallerTypeT[_DistributionT] = None,
+        replace_conflicting: bool = False,
+    ) -> Distribution | None:
+        """Find distribution best matching `req` and usable on `working_set`
+
+        This calls the ``find(req)`` method of the `working_set` to see if a
+        suitable distribution is already active.  (This may raise
+        ``VersionConflict`` if an unsuitable version of the project is already
+        active in the specified `working_set`.)  If a suitable distribution
+        isn't active, this method returns the newest distribution in the
+        environment that meets the ``Requirement`` in `req`.  If no suitable
+        distribution is found, and `installer` is supplied, then the result of
+        calling the environment's ``obtain(req, installer)`` method will be
+        returned.
+        """
+        try:
+            dist = working_set.find(req)
+        except VersionConflict:
+            if not replace_conflicting:
+                raise
+            dist = None
+        if dist is not None:
+            return dist
+        for dist in self[req.key]:
+            if dist in req:
+                return dist
+        # try to download/install
+        return self.obtain(req, installer)
+
+    @overload
+    def obtain(
+        self,
+        requirement: Requirement,
+        installer: _InstallerTypeT[_DistributionT],
+    ) -> _DistributionT: ...
+    @overload
+    def obtain(
+        self,
+        requirement: Requirement,
+        installer: Callable[[Requirement], None] | None = None,
+    ) -> None: ...
+    @overload
+    def obtain(
+        self,
+        requirement: Requirement,
+        installer: _InstallerType | None = None,
+    ) -> Distribution | None: ...
+    def obtain(
+        self,
+        requirement: Requirement,
+        installer: Callable[[Requirement], None]
+        | _InstallerType
+        | None
+        | _InstallerTypeT[_DistributionT] = None,
+    ) -> Distribution | None:
+        """Obtain a distribution matching `requirement` (e.g. via download)
+
+        Obtain a distro that matches requirement (e.g. via download).  In the
+        base ``Environment`` class, this routine just returns
+        ``installer(requirement)``, unless `installer` is None, in which case
+        None is returned instead.  This method is a hook that allows subclasses
+        to attempt other ways of obtaining a distribution before falling back
+        to the `installer` argument."""
+        return installer(requirement) if installer else None
+
+    def __iter__(self) -> Iterator[str]:
+        """Yield the unique project names of the available distributions"""
+        for key in self._distmap.keys():
+            if self[key]:
+                yield key
+
+    def __iadd__(self, other: Distribution | Environment):
+        """In-place addition of a distribution or environment"""
+        if isinstance(other, Distribution):
+            self.add(other)
+        elif isinstance(other, Environment):
+            for project in other:
+                for dist in other[project]:
+                    self.add(dist)
+        else:
+            raise TypeError("Can't add %r to environment" % (other,))
+        return self
+
+    def __add__(self, other: Distribution | Environment):
+        """Add an environment or distribution to an environment"""
+        new = self.__class__([], platform=None, python=None)
+        for env in self, other:
+            new += env
+        return new
+
+
+# XXX backward compatibility
+AvailableDistributions = Environment
+
+
+class ExtractionError(RuntimeError):
+    """An error occurred extracting a resource
+
+    The following attributes are available from instances of this exception:
+
+    manager
+        The resource manager that raised this exception
+
+    cache_path
+        The base directory for resource extraction
+
+    original_error
+        The exception instance that caused extraction to fail
+    """
+
+    manager: ResourceManager
+    cache_path: str
+    original_error: BaseException | None
+
+
+class ResourceManager:
+    """Manage resource extraction and packages"""
+
+    extraction_path: str | None = None
+
+    def __init__(self):
+        self.cached_files = {}
+
+    def resource_exists(self, package_or_requirement: _PkgReqType, resource_name: str):
+        """Does the named resource exist?"""
+        return get_provider(package_or_requirement).has_resource(resource_name)
+
+    def resource_isdir(self, package_or_requirement: _PkgReqType, resource_name: str):
+        """Is the named resource an existing directory?"""
+        return get_provider(package_or_requirement).resource_isdir(resource_name)
+
+    def resource_filename(
+        self, package_or_requirement: _PkgReqType, resource_name: str
+    ):
+        """Return a true filesystem path for specified resource"""
+        return get_provider(package_or_requirement).get_resource_filename(
+            self, resource_name
+        )
+
+    def resource_stream(self, package_or_requirement: _PkgReqType, resource_name: str):
+        """Return a readable file-like object for specified resource"""
+        return get_provider(package_or_requirement).get_resource_stream(
+            self, resource_name
+        )
+
+    def resource_string(
+        self, package_or_requirement: _PkgReqType, resource_name: str
+    ) -> bytes:
+        """Return specified resource as :obj:`bytes`"""
+        return get_provider(package_or_requirement).get_resource_string(
+            self, resource_name
+        )
+
+    def resource_listdir(self, package_or_requirement: _PkgReqType, resource_name: str):
+        """List the contents of the named resource directory"""
+        return get_provider(package_or_requirement).resource_listdir(resource_name)
+
+    def extraction_error(self) -> NoReturn:
+        """Give an error message for problems extracting file(s)"""
+
+        old_exc = sys.exc_info()[1]
+        cache_path = self.extraction_path or get_default_cache()
+
+        tmpl = textwrap.dedent(
+            """
+            Can't extract file(s) to egg cache
+
+            The following error occurred while trying to extract file(s)
+            to the Python egg cache:
+
+              {old_exc}
+
+            The Python egg cache directory is currently set to:
+
+              {cache_path}
+
+            Perhaps your account does not have write access to this directory?
+            You can change the cache directory by setting the PYTHON_EGG_CACHE
+            environment variable to point to an accessible directory.
+            """
+        ).lstrip()
+        err = ExtractionError(tmpl.format(**locals()))
+        err.manager = self
+        err.cache_path = cache_path
+        err.original_error = old_exc
+        raise err
+
+    def get_cache_path(self, archive_name: str, names: Iterable[StrPath] = ()):
+        """Return absolute location in cache for `archive_name` and `names`
+
+        The parent directory of the resulting path will be created if it does
+        not already exist.  `archive_name` should be the base filename of the
+        enclosing egg (which may not be the name of the enclosing zipfile!),
+        including its ".egg" extension.  `names`, if provided, should be a
+        sequence of path name parts "under" the egg's extraction location.
+
+        This method should only be called by resource providers that need to
+        obtain an extraction location, and only for names they intend to
+        extract, as it tracks the generated names for possible cleanup later.
+        """
+        extract_path = self.extraction_path or get_default_cache()
+        target_path = os.path.join(extract_path, archive_name + '-tmp', *names)
+        try:
+            _bypass_ensure_directory(target_path)
+        except Exception:
+            self.extraction_error()
+
+        self._warn_unsafe_extraction_path(extract_path)
+
+        self.cached_files[target_path] = True
+        return target_path
+
+    @staticmethod
+    def _warn_unsafe_extraction_path(path):
+        """
+        If the default extraction path is overridden and set to an insecure
+        location, such as /tmp, it opens up an opportunity for an attacker to
+        replace an extracted file with an unauthorized payload. Warn the user
+        if a known insecure location is used.
+
+        See Distribute #375 for more details.
+        """
+        if os.name == 'nt' and not path.startswith(os.environ['windir']):
+            # On Windows, permissions are generally restrictive by default
+            #  and temp directories are not writable by other users, so
+            #  bypass the warning.
+            return
+        mode = os.stat(path).st_mode
+        if mode & stat.S_IWOTH or mode & stat.S_IWGRP:
+            msg = (
+                "Extraction path is writable by group/others "
+                "and vulnerable to attack when "
+                "used with get_resource_filename ({path}). "
+                "Consider a more secure "
+                "location (set with .set_extraction_path or the "
+                "PYTHON_EGG_CACHE environment variable)."
+            ).format(**locals())
+            warnings.warn(msg, UserWarning)
+
+    def postprocess(self, tempname: StrOrBytesPath, filename: StrOrBytesPath):
+        """Perform any platform-specific postprocessing of `tempname`
+
+        This is where Mac header rewrites should be done; other platforms don't
+        have anything special they should do.
+
+        Resource providers should call this method ONLY after successfully
+        extracting a compressed resource.  They must NOT call it on resources
+        that are already in the filesystem.
+
+        `tempname` is the current (temporary) name of the file, and `filename`
+        is the name it will be renamed to by the caller after this routine
+        returns.
+        """
+
+        if os.name == 'posix':
+            # Make the resource executable
+            mode = ((os.stat(tempname).st_mode) | 0o555) & 0o7777
+            os.chmod(tempname, mode)
+
+    def set_extraction_path(self, path: str):
+        """Set the base path where resources will be extracted to, if needed.
+
+        If you do not call this routine before any extractions take place, the
+        path defaults to the return value of ``get_default_cache()``.  (Which
+        is based on the ``PYTHON_EGG_CACHE`` environment variable, with various
+        platform-specific fallbacks.  See that routine's documentation for more
+        details.)
+
+        Resources are extracted to subdirectories of this path based upon
+        information given by the ``IResourceProvider``.  You may set this to a
+        temporary directory, but then you must call ``cleanup_resources()`` to
+        delete the extracted files when done.  There is no guarantee that
+        ``cleanup_resources()`` will be able to remove all extracted files.
+
+        (Note: you may not change the extraction path for a given resource
+        manager once resources have been extracted, unless you first call
+        ``cleanup_resources()``.)
+        """
+        if self.cached_files:
+            raise ValueError("Can't change extraction path, files already extracted")
+
+        self.extraction_path = path
+
+    def cleanup_resources(self, force: bool = False) -> list[str]:
+        """
+        Delete all extracted resource files and directories, returning a list
+        of the file and directory names that could not be successfully removed.
+        This function does not have any concurrency protection, so it should
+        generally only be called when the extraction path is a temporary
+        directory exclusive to a single process.  This method is not
+        automatically called; you must call it explicitly or register it as an
+        ``atexit`` function if you wish to ensure cleanup of a temporary
+        directory used for extractions.
+        """
+        # XXX
+        return []
+
+
+def get_default_cache() -> str:
+    """
+    Return the ``PYTHON_EGG_CACHE`` environment variable
+    or a platform-relevant user cache dir for an app
+    named "Python-Eggs".
+    """
+    return os.environ.get('PYTHON_EGG_CACHE') or _user_cache_dir(appname='Python-Eggs')
+
+
+def safe_name(name: str):
+    """Convert an arbitrary string to a standard distribution name
+
+    Any runs of non-alphanumeric/. characters are replaced with a single '-'.
+    """
+    return re.sub('[^A-Za-z0-9.]+', '-', name)
+
+
+def safe_version(version: str):
+    """
+    Convert an arbitrary string to a standard version string
+    """
+    try:
+        # normalize the version
+        return str(_packaging_version.Version(version))
+    except _packaging_version.InvalidVersion:
+        version = version.replace(' ', '.')
+        return re.sub('[^A-Za-z0-9.]+', '-', version)
+
+
+def _forgiving_version(version):
+    """Fallback when ``safe_version`` is not safe enough
+    >>> parse_version(_forgiving_version('0.23ubuntu1'))
+    
+    >>> parse_version(_forgiving_version('0.23-'))
+    
+    >>> parse_version(_forgiving_version('0.-_'))
+    
+    >>> parse_version(_forgiving_version('42.+?1'))
+    
+    >>> parse_version(_forgiving_version('hello world'))
+    
+    """
+    version = version.replace(' ', '.')
+    match = _PEP440_FALLBACK.search(version)
+    if match:
+        safe = match["safe"]
+        rest = version[len(safe) :]
+    else:
+        safe = "0"
+        rest = version
+    local = f"sanitized.{_safe_segment(rest)}".strip(".")
+    return f"{safe}.dev0+{local}"
+
+
+def _safe_segment(segment):
+    """Convert an arbitrary string into a safe segment"""
+    segment = re.sub('[^A-Za-z0-9.]+', '-', segment)
+    segment = re.sub('-[^A-Za-z0-9]+', '-', segment)
+    return re.sub(r'\.[^A-Za-z0-9]+', '.', segment).strip(".-")
+
+
+def safe_extra(extra: str):
+    """Convert an arbitrary string to a standard 'extra' name
+
+    Any runs of non-alphanumeric characters are replaced with a single '_',
+    and the result is always lowercased.
+    """
+    return re.sub('[^A-Za-z0-9.-]+', '_', extra).lower()
+
+
+def to_filename(name: str):
+    """Convert a project or version name to its filename-escaped form
+
+    Any '-' characters are currently replaced with '_'.
+    """
+    return name.replace('-', '_')
+
+
+def invalid_marker(text: str):
+    """
+    Validate text as a PEP 508 environment marker; return an exception
+    if invalid or False otherwise.
+    """
+    try:
+        evaluate_marker(text)
+    except SyntaxError as e:
+        e.filename = None
+        e.lineno = None
+        return e
+    return False
+
+
+def evaluate_marker(text: str, extra: str | None = None) -> bool:
+    """
+    Evaluate a PEP 508 environment marker.
+    Return a boolean indicating the marker result in this environment.
+    Raise SyntaxError if marker is invalid.
+
+    This implementation uses the 'pyparsing' module.
+    """
+    try:
+        marker = _packaging_markers.Marker(text)
+        return marker.evaluate()
+    except _packaging_markers.InvalidMarker as e:
+        raise SyntaxError(e) from e
+
+
+class NullProvider:
+    """Try to implement resources and metadata for arbitrary PEP 302 loaders"""
+
+    egg_name: str | None = None
+    egg_info: str | None = None
+    loader: _LoaderProtocol | None = None
+
+    def __init__(self, module: _ModuleLike):
+        self.loader = getattr(module, '__loader__', None)
+        self.module_path = os.path.dirname(getattr(module, '__file__', ''))
+
+    def get_resource_filename(self, manager: ResourceManager, resource_name: str):
+        return self._fn(self.module_path, resource_name)
+
+    def get_resource_stream(self, manager: ResourceManager, resource_name: str):
+        return io.BytesIO(self.get_resource_string(manager, resource_name))
+
+    def get_resource_string(
+        self, manager: ResourceManager, resource_name: str
+    ) -> bytes:
+        return self._get(self._fn(self.module_path, resource_name))
+
+    def has_resource(self, resource_name: str):
+        return self._has(self._fn(self.module_path, resource_name))
+
+    def _get_metadata_path(self, name):
+        return self._fn(self.egg_info, name)
+
+    def has_metadata(self, name: str) -> bool:
+        if not self.egg_info:
+            return False
+
+        path = self._get_metadata_path(name)
+        return self._has(path)
+
+    def get_metadata(self, name: str):
+        if not self.egg_info:
+            return ""
+        path = self._get_metadata_path(name)
+        value = self._get(path)
+        try:
+            return value.decode('utf-8')
+        except UnicodeDecodeError as exc:
+            # Include the path in the error message to simplify
+            # troubleshooting, and without changing the exception type.
+            exc.reason += ' in {} file at path: {}'.format(name, path)
+            raise
+
+    def get_metadata_lines(self, name: str) -> Iterator[str]:
+        return yield_lines(self.get_metadata(name))
+
+    def resource_isdir(self, resource_name: str):
+        return self._isdir(self._fn(self.module_path, resource_name))
+
+    def metadata_isdir(self, name: str) -> bool:
+        return bool(self.egg_info and self._isdir(self._fn(self.egg_info, name)))
+
+    def resource_listdir(self, resource_name: str):
+        return self._listdir(self._fn(self.module_path, resource_name))
+
+    def metadata_listdir(self, name: str) -> list[str]:
+        if self.egg_info:
+            return self._listdir(self._fn(self.egg_info, name))
+        return []
+
+    def run_script(self, script_name: str, namespace: dict[str, Any]):
+        script = 'scripts/' + script_name
+        if not self.has_metadata(script):
+            raise ResolutionError(
+                "Script {script!r} not found in metadata at {self.egg_info!r}".format(
+                    **locals()
+                ),
+            )
+
+        script_text = self.get_metadata(script).replace('\r\n', '\n')
+        script_text = script_text.replace('\r', '\n')
+        script_filename = self._fn(self.egg_info, script)
+        namespace['__file__'] = script_filename
+        if os.path.exists(script_filename):
+            source = _read_utf8_with_fallback(script_filename)
+            code = compile(source, script_filename, 'exec')
+            exec(code, namespace, namespace)
+        else:
+            from linecache import cache
+
+            cache[script_filename] = (
+                len(script_text),
+                0,
+                script_text.split('\n'),
+                script_filename,
+            )
+            script_code = compile(script_text, script_filename, 'exec')
+            exec(script_code, namespace, namespace)
+
+    def _has(self, path) -> bool:
+        raise NotImplementedError(
+            "Can't perform this operation for unregistered loader type"
+        )
+
+    def _isdir(self, path) -> bool:
+        raise NotImplementedError(
+            "Can't perform this operation for unregistered loader type"
+        )
+
+    def _listdir(self, path) -> list[str]:
+        raise NotImplementedError(
+            "Can't perform this operation for unregistered loader type"
+        )
+
+    def _fn(self, base: str | None, resource_name: str):
+        if base is None:
+            raise TypeError(
+                "`base` parameter in `_fn` is `None`. Either override this method or check the parameter first."
+            )
+        self._validate_resource_path(resource_name)
+        if resource_name:
+            return os.path.join(base, *resource_name.split('/'))
+        return base
+
+    @staticmethod
+    def _validate_resource_path(path):
+        """
+        Validate the resource paths according to the docs.
+        https://setuptools.pypa.io/en/latest/pkg_resources.html#basic-resource-access
+
+        >>> warned = getfixture('recwarn')
+        >>> warnings.simplefilter('always')
+        >>> vrp = NullProvider._validate_resource_path
+        >>> vrp('foo/bar.txt')
+        >>> bool(warned)
+        False
+        >>> vrp('../foo/bar.txt')
+        >>> bool(warned)
+        True
+        >>> warned.clear()
+        >>> vrp('/foo/bar.txt')
+        >>> bool(warned)
+        True
+        >>> vrp('foo/../../bar.txt')
+        >>> bool(warned)
+        True
+        >>> warned.clear()
+        >>> vrp('foo/f../bar.txt')
+        >>> bool(warned)
+        False
+
+        Windows path separators are straight-up disallowed.
+        >>> vrp(r'\\foo/bar.txt')
+        Traceback (most recent call last):
+        ...
+        ValueError: Use of .. or absolute path in a resource path \
+is not allowed.
+
+        >>> vrp(r'C:\\foo/bar.txt')
+        Traceback (most recent call last):
+        ...
+        ValueError: Use of .. or absolute path in a resource path \
+is not allowed.
+
+        Blank values are allowed
+
+        >>> vrp('')
+        >>> bool(warned)
+        False
+
+        Non-string values are not.
+
+        >>> vrp(None)
+        Traceback (most recent call last):
+        ...
+        AttributeError: ...
+        """
+        invalid = (
+            os.path.pardir in path.split(posixpath.sep)
+            or posixpath.isabs(path)
+            or ntpath.isabs(path)
+            or path.startswith("\\")
+        )
+        if not invalid:
+            return
+
+        msg = "Use of .. or absolute path in a resource path is not allowed."
+
+        # Aggressively disallow Windows absolute paths
+        if (path.startswith("\\") or ntpath.isabs(path)) and not posixpath.isabs(path):
+            raise ValueError(msg)
+
+        # for compatibility, warn; in future
+        # raise ValueError(msg)
+        issue_warning(
+            msg[:-1] + " and will raise exceptions in a future release.",
+            DeprecationWarning,
+        )
+
+    def _get(self, path) -> bytes:
+        if hasattr(self.loader, 'get_data') and self.loader:
+            # Already checked get_data exists
+            return self.loader.get_data(path)  # type: ignore[attr-defined]
+        raise NotImplementedError(
+            "Can't perform this operation for loaders without 'get_data()'"
+        )
+
+
+register_loader_type(object, NullProvider)
+
+
+def _parents(path):
+    """
+    yield all parents of path including path
+    """
+    last = None
+    while path != last:
+        yield path
+        last = path
+        path, _ = os.path.split(path)
+
+
+class EggProvider(NullProvider):
+    """Provider based on a virtual filesystem"""
+
+    def __init__(self, module: _ModuleLike):
+        super().__init__(module)
+        self._setup_prefix()
+
+    def _setup_prefix(self):
+        # Assume that metadata may be nested inside a "basket"
+        # of multiple eggs and use module_path instead of .archive.
+        eggs = filter(_is_egg_path, _parents(self.module_path))
+        egg = next(eggs, None)
+        egg and self._set_egg(egg)
+
+    def _set_egg(self, path: str):
+        self.egg_name = os.path.basename(path)
+        self.egg_info = os.path.join(path, 'EGG-INFO')
+        self.egg_root = path
+
+
+class DefaultProvider(EggProvider):
+    """Provides access to package resources in the filesystem"""
+
+    def _has(self, path) -> bool:
+        return os.path.exists(path)
+
+    def _isdir(self, path) -> bool:
+        return os.path.isdir(path)
+
+    def _listdir(self, path):
+        return os.listdir(path)
+
+    def get_resource_stream(self, manager: object, resource_name: str):
+        return open(self._fn(self.module_path, resource_name), 'rb')
+
+    def _get(self, path) -> bytes:
+        with open(path, 'rb') as stream:
+            return stream.read()
+
+    @classmethod
+    def _register(cls):
+        loader_names = (
+            'SourceFileLoader',
+            'SourcelessFileLoader',
+        )
+        for name in loader_names:
+            loader_cls = getattr(importlib.machinery, name, type(None))
+            register_loader_type(loader_cls, cls)
+
+
+DefaultProvider._register()
+
+
+class EmptyProvider(NullProvider):
+    """Provider that returns nothing for all requests"""
+
+    # A special case, we don't want all Providers inheriting from NullProvider to have a potentially None module_path
+    module_path: str | None = None  # type: ignore[assignment]
+
+    _isdir = _has = lambda self, path: False
+
+    def _get(self, path) -> bytes:
+        return b''
+
+    def _listdir(self, path):
+        return []
+
+    def __init__(self):
+        pass
+
+
+empty_provider = EmptyProvider()
+
+
+class ZipManifests(Dict[str, "MemoizedZipManifests.manifest_mod"]):
+    """
+    zip manifest builder
+    """
+
+    # `path` could be `StrPath | IO[bytes]` but that violates the LSP for `MemoizedZipManifests.load`
+    @classmethod
+    def build(cls, path: str):
+        """
+        Build a dictionary similar to the zipimport directory
+        caches, except instead of tuples, store ZipInfo objects.
+
+        Use a platform-specific path separator (os.sep) for the path keys
+        for compatibility with pypy on Windows.
+        """
+        with zipfile.ZipFile(path) as zfile:
+            items = (
+                (
+                    name.replace('/', os.sep),
+                    zfile.getinfo(name),
+                )
+                for name in zfile.namelist()
+            )
+            return dict(items)
+
+    load = build
+
+
+class MemoizedZipManifests(ZipManifests):
+    """
+    Memoized zipfile manifests.
+    """
+
+    class manifest_mod(NamedTuple):
+        manifest: dict[str, zipfile.ZipInfo]
+        mtime: float
+
+    def load(self, path: str) -> dict[str, zipfile.ZipInfo]:  # type: ignore[override] # ZipManifests.load is a classmethod
+        """
+        Load a manifest at path or return a suitable manifest already loaded.
+        """
+        path = os.path.normpath(path)
+        mtime = os.stat(path).st_mtime
+
+        if path not in self or self[path].mtime != mtime:
+            manifest = self.build(path)
+            self[path] = self.manifest_mod(manifest, mtime)
+
+        return self[path].manifest
+
+
+class ZipProvider(EggProvider):
+    """Resource support for zips and eggs"""
+
+    eagers: list[str] | None = None
+    _zip_manifests = MemoizedZipManifests()
+    # ZipProvider's loader should always be a zipimporter or equivalent
+    loader: zipimport.zipimporter
+
+    def __init__(self, module: _ZipLoaderModule):
+        super().__init__(module)
+        self.zip_pre = self.loader.archive + os.sep
+
+    def _zipinfo_name(self, fspath):
+        # Convert a virtual filename (full path to file) into a zipfile subpath
+        # usable with the zipimport directory cache for our target archive
+        fspath = fspath.rstrip(os.sep)
+        if fspath == self.loader.archive:
+            return ''
+        if fspath.startswith(self.zip_pre):
+            return fspath[len(self.zip_pre) :]
+        raise AssertionError("%s is not a subpath of %s" % (fspath, self.zip_pre))
+
+    def _parts(self, zip_path):
+        # Convert a zipfile subpath into an egg-relative path part list.
+        # pseudo-fs path
+        fspath = self.zip_pre + zip_path
+        if fspath.startswith(self.egg_root + os.sep):
+            return fspath[len(self.egg_root) + 1 :].split(os.sep)
+        raise AssertionError("%s is not a subpath of %s" % (fspath, self.egg_root))
+
+    @property
+    def zipinfo(self):
+        return self._zip_manifests.load(self.loader.archive)
+
+    def get_resource_filename(self, manager: ResourceManager, resource_name: str):
+        if not self.egg_name:
+            raise NotImplementedError(
+                "resource_filename() only supported for .egg, not .zip"
+            )
+        # no need to lock for extraction, since we use temp names
+        zip_path = self._resource_to_zip(resource_name)
+        eagers = self._get_eager_resources()
+        if '/'.join(self._parts(zip_path)) in eagers:
+            for name in eagers:
+                self._extract_resource(manager, self._eager_to_zip(name))
+        return self._extract_resource(manager, zip_path)
+
+    @staticmethod
+    def _get_date_and_size(zip_stat):
+        size = zip_stat.file_size
+        # ymdhms+wday, yday, dst
+        date_time = zip_stat.date_time + (0, 0, -1)
+        # 1980 offset already done
+        timestamp = time.mktime(date_time)
+        return timestamp, size
+
+    # FIXME: 'ZipProvider._extract_resource' is too complex (12)
+    def _extract_resource(self, manager: ResourceManager, zip_path) -> str:  # noqa: C901
+        if zip_path in self._index():
+            for name in self._index()[zip_path]:
+                last = self._extract_resource(manager, os.path.join(zip_path, name))
+            # return the extracted directory name
+            return os.path.dirname(last)
+
+        timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
+
+        if not WRITE_SUPPORT:
+            raise OSError(
+                '"os.rename" and "os.unlink" are not supported on this platform'
+            )
+        try:
+            if not self.egg_name:
+                raise OSError(
+                    '"egg_name" is empty. This likely means no egg could be found from the "module_path".'
+                )
+            real_path = manager.get_cache_path(self.egg_name, self._parts(zip_path))
+
+            if self._is_current(real_path, zip_path):
+                return real_path
+
+            outf, tmpnam = _mkstemp(
+                ".$extract",
+                dir=os.path.dirname(real_path),
+            )
+            os.write(outf, self.loader.get_data(zip_path))
+            os.close(outf)
+            utime(tmpnam, (timestamp, timestamp))
+            manager.postprocess(tmpnam, real_path)
+
+            try:
+                rename(tmpnam, real_path)
+
+            except OSError:
+                if os.path.isfile(real_path):
+                    if self._is_current(real_path, zip_path):
+                        # the file became current since it was checked above,
+                        #  so proceed.
+                        return real_path
+                    # Windows, del old file and retry
+                    elif os.name == 'nt':
+                        unlink(real_path)
+                        rename(tmpnam, real_path)
+                        return real_path
+                raise
+
+        except OSError:
+            # report a user-friendly error
+            manager.extraction_error()
+
+        return real_path
+
+    def _is_current(self, file_path, zip_path):
+        """
+        Return True if the file_path is current for this zip_path
+        """
+        timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
+        if not os.path.isfile(file_path):
+            return False
+        stat = os.stat(file_path)
+        if stat.st_size != size or stat.st_mtime != timestamp:
+            return False
+        # check that the contents match
+        zip_contents = self.loader.get_data(zip_path)
+        with open(file_path, 'rb') as f:
+            file_contents = f.read()
+        return zip_contents == file_contents
+
+    def _get_eager_resources(self):
+        if self.eagers is None:
+            eagers = []
+            for name in ('native_libs.txt', 'eager_resources.txt'):
+                if self.has_metadata(name):
+                    eagers.extend(self.get_metadata_lines(name))
+            self.eagers = eagers
+        return self.eagers
+
+    def _index(self):
+        try:
+            return self._dirindex
+        except AttributeError:
+            ind = {}
+            for path in self.zipinfo:
+                parts = path.split(os.sep)
+                while parts:
+                    parent = os.sep.join(parts[:-1])
+                    if parent in ind:
+                        ind[parent].append(parts[-1])
+                        break
+                    else:
+                        ind[parent] = [parts.pop()]
+            self._dirindex = ind
+            return ind
+
+    def _has(self, fspath) -> bool:
+        zip_path = self._zipinfo_name(fspath)
+        return zip_path in self.zipinfo or zip_path in self._index()
+
+    def _isdir(self, fspath) -> bool:
+        return self._zipinfo_name(fspath) in self._index()
+
+    def _listdir(self, fspath):
+        return list(self._index().get(self._zipinfo_name(fspath), ()))
+
+    def _eager_to_zip(self, resource_name: str):
+        return self._zipinfo_name(self._fn(self.egg_root, resource_name))
+
+    def _resource_to_zip(self, resource_name: str):
+        return self._zipinfo_name(self._fn(self.module_path, resource_name))
+
+
+register_loader_type(zipimport.zipimporter, ZipProvider)
+
+
+class FileMetadata(EmptyProvider):
+    """Metadata handler for standalone PKG-INFO files
+
+    Usage::
+
+        metadata = FileMetadata("/path/to/PKG-INFO")
+
+    This provider rejects all data and metadata requests except for PKG-INFO,
+    which is treated as existing, and will be the contents of the file at
+    the provided location.
+    """
+
+    def __init__(self, path: StrPath):
+        self.path = path
+
+    def _get_metadata_path(self, name):
+        return self.path
+
+    def has_metadata(self, name: str) -> bool:
+        return name == 'PKG-INFO' and os.path.isfile(self.path)
+
+    def get_metadata(self, name: str):
+        if name != 'PKG-INFO':
+            raise KeyError("No metadata except PKG-INFO is available")
+
+        with open(self.path, encoding='utf-8', errors="replace") as f:
+            metadata = f.read()
+        self._warn_on_replacement(metadata)
+        return metadata
+
+    def _warn_on_replacement(self, metadata):
+        replacement_char = '�'
+        if replacement_char in metadata:
+            tmpl = "{self.path} could not be properly decoded in UTF-8"
+            msg = tmpl.format(**locals())
+            warnings.warn(msg)
+
+    def get_metadata_lines(self, name: str) -> Iterator[str]:
+        return yield_lines(self.get_metadata(name))
+
+
+class PathMetadata(DefaultProvider):
+    """Metadata provider for egg directories
+
+    Usage::
+
+        # Development eggs:
+
+        egg_info = "/path/to/PackageName.egg-info"
+        base_dir = os.path.dirname(egg_info)
+        metadata = PathMetadata(base_dir, egg_info)
+        dist_name = os.path.splitext(os.path.basename(egg_info))[0]
+        dist = Distribution(basedir, project_name=dist_name, metadata=metadata)
+
+        # Unpacked egg directories:
+
+        egg_path = "/path/to/PackageName-ver-pyver-etc.egg"
+        metadata = PathMetadata(egg_path, os.path.join(egg_path,'EGG-INFO'))
+        dist = Distribution.from_filename(egg_path, metadata=metadata)
+    """
+
+    def __init__(self, path: str, egg_info: str):
+        self.module_path = path
+        self.egg_info = egg_info
+
+
+class EggMetadata(ZipProvider):
+    """Metadata provider for .egg files"""
+
+    def __init__(self, importer: zipimport.zipimporter):
+        """Create a metadata provider from a zipimporter"""
+
+        self.zip_pre = importer.archive + os.sep
+        self.loader = importer
+        if importer.prefix:
+            self.module_path = os.path.join(importer.archive, importer.prefix)
+        else:
+            self.module_path = importer.archive
+        self._setup_prefix()
+
+
+_distribution_finders: dict[type, _DistFinderType[Any]] = _declare_state(
+    'dict', '_distribution_finders', {}
+)
+
+
+def register_finder(importer_type: type[_T], distribution_finder: _DistFinderType[_T]):
+    """Register `distribution_finder` to find distributions in sys.path items
+
+    `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
+    handler), and `distribution_finder` is a callable that, passed a path
+    item and the importer instance, yields ``Distribution`` instances found on
+    that path item.  See ``pkg_resources.find_on_path`` for an example."""
+    _distribution_finders[importer_type] = distribution_finder
+
+
+def find_distributions(path_item: str, only: bool = False):
+    """Yield distributions accessible via `path_item`"""
+    importer = get_importer(path_item)
+    finder = _find_adapter(_distribution_finders, importer)
+    return finder(importer, path_item, only)
+
+
+def find_eggs_in_zip(
+    importer: zipimport.zipimporter, path_item: str, only: bool = False
+) -> Iterator[Distribution]:
+    """
+    Find eggs in zip files; possibly multiple nested eggs.
+    """
+    if importer.archive.endswith('.whl'):
+        # wheels are not supported with this finder
+        # they don't have PKG-INFO metadata, and won't ever contain eggs
+        return
+    metadata = EggMetadata(importer)
+    if metadata.has_metadata('PKG-INFO'):
+        yield Distribution.from_filename(path_item, metadata=metadata)
+    if only:
+        # don't yield nested distros
+        return
+    for subitem in metadata.resource_listdir(''):
+        if _is_egg_path(subitem):
+            subpath = os.path.join(path_item, subitem)
+            dists = find_eggs_in_zip(zipimport.zipimporter(subpath), subpath)
+            yield from dists
+        elif subitem.lower().endswith(('.dist-info', '.egg-info')):
+            subpath = os.path.join(path_item, subitem)
+            submeta = EggMetadata(zipimport.zipimporter(subpath))
+            submeta.egg_info = subpath
+            yield Distribution.from_location(path_item, subitem, submeta)
+
+
+register_finder(zipimport.zipimporter, find_eggs_in_zip)
+
+
+def find_nothing(
+    importer: object | None, path_item: str | None, only: bool | None = False
+):
+    return ()
+
+
+register_finder(object, find_nothing)
+
+
+def find_on_path(importer: object | None, path_item, only=False):
+    """Yield distributions accessible on a sys.path directory"""
+    path_item = _normalize_cached(path_item)
+
+    if _is_unpacked_egg(path_item):
+        yield Distribution.from_filename(
+            path_item,
+            metadata=PathMetadata(path_item, os.path.join(path_item, 'EGG-INFO')),
+        )
+        return
+
+    entries = (os.path.join(path_item, child) for child in safe_listdir(path_item))
+
+    # scan for .egg and .egg-info in directory
+    for entry in sorted(entries):
+        fullpath = os.path.join(path_item, entry)
+        factory = dist_factory(path_item, entry, only)
+        yield from factory(fullpath)
+
+
+def dist_factory(path_item, entry, only):
+    """Return a dist_factory for the given entry."""
+    lower = entry.lower()
+    is_egg_info = lower.endswith('.egg-info')
+    is_dist_info = lower.endswith('.dist-info') and os.path.isdir(
+        os.path.join(path_item, entry)
+    )
+    is_meta = is_egg_info or is_dist_info
+    return (
+        distributions_from_metadata
+        if is_meta
+        else find_distributions
+        if not only and _is_egg_path(entry)
+        else resolve_egg_link
+        if not only and lower.endswith('.egg-link')
+        else NoDists()
+    )
+
+
+class NoDists:
+    """
+    >>> bool(NoDists())
+    False
+
+    >>> list(NoDists()('anything'))
+    []
+    """
+
+    def __bool__(self):
+        return False
+
+    def __call__(self, fullpath):
+        return iter(())
+
+
+def safe_listdir(path: StrOrBytesPath):
+    """
+    Attempt to list contents of path, but suppress some exceptions.
+    """
+    try:
+        return os.listdir(path)
+    except (PermissionError, NotADirectoryError):
+        pass
+    except OSError as e:
+        # Ignore the directory if does not exist, not a directory or
+        # permission denied
+        if e.errno not in (errno.ENOTDIR, errno.EACCES, errno.ENOENT):
+            raise
+    return ()
+
+
+def distributions_from_metadata(path: str):
+    root = os.path.dirname(path)
+    if os.path.isdir(path):
+        if len(os.listdir(path)) == 0:
+            # empty metadata dir; skip
+            return
+        metadata: _MetadataType = PathMetadata(root, path)
+    else:
+        metadata = FileMetadata(path)
+    entry = os.path.basename(path)
+    yield Distribution.from_location(
+        root,
+        entry,
+        metadata,
+        precedence=DEVELOP_DIST,
+    )
+
+
+def non_empty_lines(path):
+    """
+    Yield non-empty lines from file at path
+    """
+    for line in _read_utf8_with_fallback(path).splitlines():
+        line = line.strip()
+        if line:
+            yield line
+
+
+def resolve_egg_link(path):
+    """
+    Given a path to an .egg-link, resolve distributions
+    present in the referenced path.
+    """
+    referenced_paths = non_empty_lines(path)
+    resolved_paths = (
+        os.path.join(os.path.dirname(path), ref) for ref in referenced_paths
+    )
+    dist_groups = map(find_distributions, resolved_paths)
+    return next(dist_groups, ())
+
+
+if hasattr(pkgutil, 'ImpImporter'):
+    register_finder(pkgutil.ImpImporter, find_on_path)
+
+register_finder(importlib.machinery.FileFinder, find_on_path)
+
+_namespace_handlers: dict[type, _NSHandlerType[Any]] = _declare_state(
+    'dict', '_namespace_handlers', {}
+)
+_namespace_packages: dict[str | None, list[str]] = _declare_state(
+    'dict', '_namespace_packages', {}
+)
+
+
+def register_namespace_handler(
+    importer_type: type[_T], namespace_handler: _NSHandlerType[_T]
+):
+    """Register `namespace_handler` to declare namespace packages
+
+    `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
+    handler), and `namespace_handler` is a callable like this::
+
+        def namespace_handler(importer, path_entry, moduleName, module):
+            # return a path_entry to use for child packages
+
+    Namespace handlers are only called if the importer object has already
+    agreed that it can handle the relevant path item, and they should only
+    return a subpath if the module __path__ does not already contain an
+    equivalent subpath.  For an example namespace handler, see
+    ``pkg_resources.file_ns_handler``.
+    """
+    _namespace_handlers[importer_type] = namespace_handler
+
+
+def _handle_ns(packageName, path_item):
+    """Ensure that named package includes a subpath of path_item (if needed)"""
+
+    importer = get_importer(path_item)
+    if importer is None:
+        return None
+
+    # use find_spec (PEP 451) and fall-back to find_module (PEP 302)
+    try:
+        spec = importer.find_spec(packageName)
+    except AttributeError:
+        # capture warnings due to #1111
+        with warnings.catch_warnings():
+            warnings.simplefilter("ignore")
+            loader = importer.find_module(packageName)
+    else:
+        loader = spec.loader if spec else None
+
+    if loader is None:
+        return None
+    module = sys.modules.get(packageName)
+    if module is None:
+        module = sys.modules[packageName] = types.ModuleType(packageName)
+        module.__path__ = []
+        _set_parent_ns(packageName)
+    elif not hasattr(module, '__path__'):
+        raise TypeError("Not a package:", packageName)
+    handler = _find_adapter(_namespace_handlers, importer)
+    subpath = handler(importer, path_item, packageName, module)
+    if subpath is not None:
+        path = module.__path__
+        path.append(subpath)
+        importlib.import_module(packageName)
+        _rebuild_mod_path(path, packageName, module)
+    return subpath
+
+
+def _rebuild_mod_path(orig_path, package_name, module: types.ModuleType):
+    """
+    Rebuild module.__path__ ensuring that all entries are ordered
+    corresponding to their sys.path order
+    """
+    sys_path = [_normalize_cached(p) for p in sys.path]
+
+    def safe_sys_path_index(entry):
+        """
+        Workaround for #520 and #513.
+        """
+        try:
+            return sys_path.index(entry)
+        except ValueError:
+            return float('inf')
+
+    def position_in_sys_path(path):
+        """
+        Return the ordinal of the path based on its position in sys.path
+        """
+        path_parts = path.split(os.sep)
+        module_parts = package_name.count('.') + 1
+        parts = path_parts[:-module_parts]
+        return safe_sys_path_index(_normalize_cached(os.sep.join(parts)))
+
+    new_path = sorted(orig_path, key=position_in_sys_path)
+    new_path = [_normalize_cached(p) for p in new_path]
+
+    if isinstance(module.__path__, list):
+        module.__path__[:] = new_path
+    else:
+        module.__path__ = new_path
+
+
+def declare_namespace(packageName: str):
+    """Declare that package 'packageName' is a namespace package"""
+
+    msg = (
+        f"Deprecated call to `pkg_resources.declare_namespace({packageName!r})`.\n"
+        "Implementing implicit namespace packages (as specified in PEP 420) "
+        "is preferred to `pkg_resources.declare_namespace`. "
+        "See https://setuptools.pypa.io/en/latest/references/"
+        "keywords.html#keyword-namespace-packages"
+    )
+    warnings.warn(msg, DeprecationWarning, stacklevel=2)
+
+    _imp.acquire_lock()
+    try:
+        if packageName in _namespace_packages:
+            return
+
+        path: MutableSequence[str] = sys.path
+        parent, _, _ = packageName.rpartition('.')
+
+        if parent:
+            declare_namespace(parent)
+            if parent not in _namespace_packages:
+                __import__(parent)
+            try:
+                path = sys.modules[parent].__path__
+            except AttributeError as e:
+                raise TypeError("Not a package:", parent) from e
+
+        # Track what packages are namespaces, so when new path items are added,
+        # they can be updated
+        _namespace_packages.setdefault(parent or None, []).append(packageName)
+        _namespace_packages.setdefault(packageName, [])
+
+        for path_item in path:
+            # Ensure all the parent's path items are reflected in the child,
+            # if they apply
+            _handle_ns(packageName, path_item)
+
+    finally:
+        _imp.release_lock()
+
+
+def fixup_namespace_packages(path_item: str, parent: str | None = None):
+    """Ensure that previously-declared namespace packages include path_item"""
+    _imp.acquire_lock()
+    try:
+        for package in _namespace_packages.get(parent, ()):
+            subpath = _handle_ns(package, path_item)
+            if subpath:
+                fixup_namespace_packages(subpath, package)
+    finally:
+        _imp.release_lock()
+
+
+def file_ns_handler(
+    importer: object,
+    path_item: StrPath,
+    packageName: str,
+    module: types.ModuleType,
+):
+    """Compute an ns-package subpath for a filesystem or zipfile importer"""
+
+    subpath = os.path.join(path_item, packageName.split('.')[-1])
+    normalized = _normalize_cached(subpath)
+    for item in module.__path__:
+        if _normalize_cached(item) == normalized:
+            break
+    else:
+        # Only return the path if it's not already there
+        return subpath
+
+
+if hasattr(pkgutil, 'ImpImporter'):
+    register_namespace_handler(pkgutil.ImpImporter, file_ns_handler)
+
+register_namespace_handler(zipimport.zipimporter, file_ns_handler)
+register_namespace_handler(importlib.machinery.FileFinder, file_ns_handler)
+
+
+def null_ns_handler(
+    importer: object,
+    path_item: str | None,
+    packageName: str | None,
+    module: _ModuleLike | None,
+):
+    return None
+
+
+register_namespace_handler(object, null_ns_handler)
+
+
+@overload
+def normalize_path(filename: StrPath) -> str: ...
+@overload
+def normalize_path(filename: BytesPath) -> bytes: ...
+def normalize_path(filename: StrOrBytesPath):
+    """Normalize a file/dir name for comparison purposes"""
+    return os.path.normcase(os.path.realpath(os.path.normpath(_cygwin_patch(filename))))
+
+
+def _cygwin_patch(filename: StrOrBytesPath):  # pragma: nocover
+    """
+    Contrary to POSIX 2008, on Cygwin, getcwd (3) contains
+    symlink components. Using
+    os.path.abspath() works around this limitation. A fix in os.getcwd()
+    would probably better, in Cygwin even more so, except
+    that this seems to be by design...
+    """
+    return os.path.abspath(filename) if sys.platform == 'cygwin' else filename
+
+
+if TYPE_CHECKING:
+    # https://github.com/python/mypy/issues/16261
+    # https://github.com/python/typeshed/issues/6347
+    @overload
+    def _normalize_cached(filename: StrPath) -> str: ...
+    @overload
+    def _normalize_cached(filename: BytesPath) -> bytes: ...
+    def _normalize_cached(filename: StrOrBytesPath) -> str | bytes: ...
+else:
+
+    @functools.lru_cache(maxsize=None)
+    def _normalize_cached(filename):
+        return normalize_path(filename)
+
+
+def _is_egg_path(path):
+    """
+    Determine if given path appears to be an egg.
+    """
+    return _is_zip_egg(path) or _is_unpacked_egg(path)
+
+
+def _is_zip_egg(path):
+    return (
+        path.lower().endswith('.egg')
+        and os.path.isfile(path)
+        and zipfile.is_zipfile(path)
+    )
+
+
+def _is_unpacked_egg(path):
+    """
+    Determine if given path appears to be an unpacked egg.
+    """
+    return path.lower().endswith('.egg') and os.path.isfile(
+        os.path.join(path, 'EGG-INFO', 'PKG-INFO')
+    )
+
+
+def _set_parent_ns(packageName):
+    parts = packageName.split('.')
+    name = parts.pop()
+    if parts:
+        parent = '.'.join(parts)
+        setattr(sys.modules[parent], name, sys.modules[packageName])
+
+
+MODULE = re.compile(r"\w+(\.\w+)*$").match
+EGG_NAME = re.compile(
+    r"""
+    (?P[^-]+) (
+        -(?P[^-]+) (
+            -py(?P[^-]+) (
+                -(?P.+)
+            )?
+        )?
+    )?
+    """,
+    re.VERBOSE | re.IGNORECASE,
+).match
+
+
+class EntryPoint:
+    """Object representing an advertised importable object"""
+
+    def __init__(
+        self,
+        name: str,
+        module_name: str,
+        attrs: Iterable[str] = (),
+        extras: Iterable[str] = (),
+        dist: Distribution | None = None,
+    ):
+        if not MODULE(module_name):
+            raise ValueError("Invalid module name", module_name)
+        self.name = name
+        self.module_name = module_name
+        self.attrs = tuple(attrs)
+        self.extras = tuple(extras)
+        self.dist = dist
+
+    def __str__(self):
+        s = "%s = %s" % (self.name, self.module_name)
+        if self.attrs:
+            s += ':' + '.'.join(self.attrs)
+        if self.extras:
+            s += ' [%s]' % ','.join(self.extras)
+        return s
+
+    def __repr__(self):
+        return "EntryPoint.parse(%r)" % str(self)
+
+    @overload
+    def load(
+        self,
+        require: Literal[True] = True,
+        env: Environment | None = None,
+        installer: _InstallerType | None = None,
+    ) -> _ResolvedEntryPoint: ...
+    @overload
+    def load(
+        self,
+        require: Literal[False],
+        *args: Any,
+        **kwargs: Any,
+    ) -> _ResolvedEntryPoint: ...
+    def load(
+        self,
+        require: bool = True,
+        *args: Environment | _InstallerType | None,
+        **kwargs: Environment | _InstallerType | None,
+    ) -> _ResolvedEntryPoint:
+        """
+        Require packages for this EntryPoint, then resolve it.
+        """
+        if not require or args or kwargs:
+            warnings.warn(
+                "Parameters to load are deprecated.  Call .resolve and "
+                ".require separately.",
+                PkgResourcesDeprecationWarning,
+                stacklevel=2,
+            )
+        if require:
+            # We could pass `env` and `installer` directly,
+            # but keeping `*args` and `**kwargs` for backwards compatibility
+            self.require(*args, **kwargs)  # type: ignore
+        return self.resolve()
+
+    def resolve(self) -> _ResolvedEntryPoint:
+        """
+        Resolve the entry point from its module and attrs.
+        """
+        module = __import__(self.module_name, fromlist=['__name__'], level=0)
+        try:
+            return functools.reduce(getattr, self.attrs, module)
+        except AttributeError as exc:
+            raise ImportError(str(exc)) from exc
+
+    def require(
+        self,
+        env: Environment | None = None,
+        installer: _InstallerType | None = None,
+    ):
+        if not self.dist:
+            error_cls = UnknownExtra if self.extras else AttributeError
+            raise error_cls("Can't require() without a distribution", self)
+
+        # Get the requirements for this entry point with all its extras and
+        # then resolve them. We have to pass `extras` along when resolving so
+        # that the working set knows what extras we want. Otherwise, for
+        # dist-info distributions, the working set will assume that the
+        # requirements for that extra are purely optional and skip over them.
+        reqs = self.dist.requires(self.extras)
+        items = working_set.resolve(reqs, env, installer, extras=self.extras)
+        list(map(working_set.add, items))
+
+    pattern = re.compile(
+        r'\s*'
+        r'(?P.+?)\s*'
+        r'=\s*'
+        r'(?P[\w.]+)\s*'
+        r'(:\s*(?P[\w.]+))?\s*'
+        r'(?P\[.*\])?\s*$'
+    )
+
+    @classmethod
+    def parse(cls, src: str, dist: Distribution | None = None):
+        """Parse a single entry point from string `src`
+
+        Entry point syntax follows the form::
+
+            name = some.module:some.attr [extra1, extra2]
+
+        The entry name and module name are required, but the ``:attrs`` and
+        ``[extras]`` parts are optional
+        """
+        m = cls.pattern.match(src)
+        if not m:
+            msg = "EntryPoint must be in 'name=module:attrs [extras]' format"
+            raise ValueError(msg, src)
+        res = m.groupdict()
+        extras = cls._parse_extras(res['extras'])
+        attrs = res['attr'].split('.') if res['attr'] else ()
+        return cls(res['name'], res['module'], attrs, extras, dist)
+
+    @classmethod
+    def _parse_extras(cls, extras_spec):
+        if not extras_spec:
+            return ()
+        req = Requirement.parse('x' + extras_spec)
+        if req.specs:
+            raise ValueError
+        return req.extras
+
+    @classmethod
+    def parse_group(
+        cls,
+        group: str,
+        lines: _NestedStr,
+        dist: Distribution | None = None,
+    ):
+        """Parse an entry point group"""
+        if not MODULE(group):
+            raise ValueError("Invalid group name", group)
+        this: dict[str, Self] = {}
+        for line in yield_lines(lines):
+            ep = cls.parse(line, dist)
+            if ep.name in this:
+                raise ValueError("Duplicate entry point", group, ep.name)
+            this[ep.name] = ep
+        return this
+
+    @classmethod
+    def parse_map(
+        cls,
+        data: str | Iterable[str] | dict[str, str | Iterable[str]],
+        dist: Distribution | None = None,
+    ):
+        """Parse a map of entry point groups"""
+        _data: Iterable[tuple[str | None, str | Iterable[str]]]
+        if isinstance(data, dict):
+            _data = data.items()
+        else:
+            _data = split_sections(data)
+        maps: dict[str, dict[str, Self]] = {}
+        for group, lines in _data:
+            if group is None:
+                if not lines:
+                    continue
+                raise ValueError("Entry points must be listed in groups")
+            group = group.strip()
+            if group in maps:
+                raise ValueError("Duplicate group name", group)
+            maps[group] = cls.parse_group(group, lines, dist)
+        return maps
+
+
+def _version_from_file(lines):
+    """
+    Given an iterable of lines from a Metadata file, return
+    the value of the Version field, if present, or None otherwise.
+    """
+
+    def is_version_line(line):
+        return line.lower().startswith('version:')
+
+    version_lines = filter(is_version_line, lines)
+    line = next(iter(version_lines), '')
+    _, _, value = line.partition(':')
+    return safe_version(value.strip()) or None
+
+
+class Distribution:
+    """Wrap an actual or potential sys.path entry w/metadata"""
+
+    PKG_INFO = 'PKG-INFO'
+
+    def __init__(
+        self,
+        location: str | None = None,
+        metadata: _MetadataType = None,
+        project_name: str | None = None,
+        version: str | None = None,
+        py_version: str | None = PY_MAJOR,
+        platform: str | None = None,
+        precedence: int = EGG_DIST,
+    ):
+        self.project_name = safe_name(project_name or 'Unknown')
+        if version is not None:
+            self._version = safe_version(version)
+        self.py_version = py_version
+        self.platform = platform
+        self.location = location
+        self.precedence = precedence
+        self._provider = metadata or empty_provider
+
+    @classmethod
+    def from_location(
+        cls,
+        location: str,
+        basename: StrPath,
+        metadata: _MetadataType = None,
+        **kw: int,  # We could set `precedence` explicitly, but keeping this as `**kw` for full backwards and subclassing compatibility
+    ) -> Distribution:
+        project_name, version, py_version, platform = [None] * 4
+        basename, ext = os.path.splitext(basename)
+        if ext.lower() in _distributionImpl:
+            cls = _distributionImpl[ext.lower()]
+
+            match = EGG_NAME(basename)
+            if match:
+                project_name, version, py_version, platform = match.group(
+                    'name', 'ver', 'pyver', 'plat'
+                )
+        return cls(
+            location,
+            metadata,
+            project_name=project_name,
+            version=version,
+            py_version=py_version,
+            platform=platform,
+            **kw,
+        )._reload_version()
+
+    def _reload_version(self):
+        return self
+
+    @property
+    def hashcmp(self):
+        return (
+            self._forgiving_parsed_version,
+            self.precedence,
+            self.key,
+            self.location,
+            self.py_version or '',
+            self.platform or '',
+        )
+
+    def __hash__(self):
+        return hash(self.hashcmp)
+
+    def __lt__(self, other: Distribution):
+        return self.hashcmp < other.hashcmp
+
+    def __le__(self, other: Distribution):
+        return self.hashcmp <= other.hashcmp
+
+    def __gt__(self, other: Distribution):
+        return self.hashcmp > other.hashcmp
+
+    def __ge__(self, other: Distribution):
+        return self.hashcmp >= other.hashcmp
+
+    def __eq__(self, other: object):
+        if not isinstance(other, self.__class__):
+            # It's not a Distribution, so they are not equal
+            return False
+        return self.hashcmp == other.hashcmp
+
+    def __ne__(self, other: object):
+        return not self == other
+
+    # These properties have to be lazy so that we don't have to load any
+    # metadata until/unless it's actually needed.  (i.e., some distributions
+    # may not know their name or version without loading PKG-INFO)
+
+    @property
+    def key(self):
+        try:
+            return self._key
+        except AttributeError:
+            self._key = key = self.project_name.lower()
+            return key
+
+    @property
+    def parsed_version(self):
+        if not hasattr(self, "_parsed_version"):
+            try:
+                self._parsed_version = parse_version(self.version)
+            except _packaging_version.InvalidVersion as ex:
+                info = f"(package: {self.project_name})"
+                if hasattr(ex, "add_note"):
+                    ex.add_note(info)  # PEP 678
+                    raise
+                raise _packaging_version.InvalidVersion(f"{str(ex)} {info}") from None
+
+        return self._parsed_version
+
+    @property
+    def _forgiving_parsed_version(self):
+        try:
+            return self.parsed_version
+        except _packaging_version.InvalidVersion as ex:
+            self._parsed_version = parse_version(_forgiving_version(self.version))
+
+            notes = "\n".join(getattr(ex, "__notes__", []))  # PEP 678
+            msg = f"""!!\n\n
+            *************************************************************************
+            {str(ex)}\n{notes}
+
+            This is a long overdue deprecation.
+            For the time being, `pkg_resources` will use `{self._parsed_version}`
+            as a replacement to avoid breaking existing environments,
+            but no future compatibility is guaranteed.
+
+            If you maintain package {self.project_name} you should implement
+            the relevant changes to adequate the project to PEP 440 immediately.
+            *************************************************************************
+            \n\n!!
+            """
+            warnings.warn(msg, DeprecationWarning)
+
+            return self._parsed_version
+
+    @property
+    def version(self):
+        try:
+            return self._version
+        except AttributeError as e:
+            version = self._get_version()
+            if version is None:
+                path = self._get_metadata_path_for_display(self.PKG_INFO)
+                msg = ("Missing 'Version:' header and/or {} file at path: {}").format(
+                    self.PKG_INFO, path
+                )
+                raise ValueError(msg, self) from e
+
+            return version
+
+    @property
+    def _dep_map(self):
+        """
+        A map of extra to its list of (direct) requirements
+        for this distribution, including the null extra.
+        """
+        try:
+            return self.__dep_map
+        except AttributeError:
+            self.__dep_map = self._filter_extras(self._build_dep_map())
+        return self.__dep_map
+
+    @staticmethod
+    def _filter_extras(dm: dict[str | None, list[Requirement]]):
+        """
+        Given a mapping of extras to dependencies, strip off
+        environment markers and filter out any dependencies
+        not matching the markers.
+        """
+        for extra in list(filter(None, dm)):
+            new_extra: str | None = extra
+            reqs = dm.pop(extra)
+            new_extra, _, marker = extra.partition(':')
+            fails_marker = marker and (
+                invalid_marker(marker) or not evaluate_marker(marker)
+            )
+            if fails_marker:
+                reqs = []
+            new_extra = safe_extra(new_extra) or None
+
+            dm.setdefault(new_extra, []).extend(reqs)
+        return dm
+
+    def _build_dep_map(self):
+        dm = {}
+        for name in 'requires.txt', 'depends.txt':
+            for extra, reqs in split_sections(self._get_metadata(name)):
+                dm.setdefault(extra, []).extend(parse_requirements(reqs))
+        return dm
+
+    def requires(self, extras: Iterable[str] = ()):
+        """List of Requirements needed for this distro if `extras` are used"""
+        dm = self._dep_map
+        deps: list[Requirement] = []
+        deps.extend(dm.get(None, ()))
+        for ext in extras:
+            try:
+                deps.extend(dm[safe_extra(ext)])
+            except KeyError as e:
+                raise UnknownExtra(
+                    "%s has no such extra feature %r" % (self, ext)
+                ) from e
+        return deps
+
+    def _get_metadata_path_for_display(self, name):
+        """
+        Return the path to the given metadata file, if available.
+        """
+        try:
+            # We need to access _get_metadata_path() on the provider object
+            # directly rather than through this class's __getattr__()
+            # since _get_metadata_path() is marked private.
+            path = self._provider._get_metadata_path(name)
+
+        # Handle exceptions e.g. in case the distribution's metadata
+        # provider doesn't support _get_metadata_path().
+        except Exception:
+            return '[could not detect]'
+
+        return path
+
+    def _get_metadata(self, name):
+        if self.has_metadata(name):
+            yield from self.get_metadata_lines(name)
+
+    def _get_version(self):
+        lines = self._get_metadata(self.PKG_INFO)
+        return _version_from_file(lines)
+
+    def activate(self, path: list[str] | None = None, replace: bool = False):
+        """Ensure distribution is importable on `path` (default=sys.path)"""
+        if path is None:
+            path = sys.path
+        self.insert_on(path, replace=replace)
+        if path is sys.path and self.location is not None:
+            fixup_namespace_packages(self.location)
+            for pkg in self._get_metadata('namespace_packages.txt'):
+                if pkg in sys.modules:
+                    declare_namespace(pkg)
+
+    def egg_name(self):
+        """Return what this distribution's standard .egg filename should be"""
+        filename = "%s-%s-py%s" % (
+            to_filename(self.project_name),
+            to_filename(self.version),
+            self.py_version or PY_MAJOR,
+        )
+
+        if self.platform:
+            filename += '-' + self.platform
+        return filename
+
+    def __repr__(self):
+        if self.location:
+            return "%s (%s)" % (self, self.location)
+        else:
+            return str(self)
+
+    def __str__(self):
+        try:
+            version = getattr(self, 'version', None)
+        except ValueError:
+            version = None
+        version = version or "[unknown version]"
+        return "%s %s" % (self.project_name, version)
+
+    def __getattr__(self, attr):
+        """Delegate all unrecognized public attributes to .metadata provider"""
+        if attr.startswith('_'):
+            raise AttributeError(attr)
+        return getattr(self._provider, attr)
+
+    def __dir__(self):
+        return list(
+            set(super().__dir__())
+            | set(attr for attr in self._provider.__dir__() if not attr.startswith('_'))
+        )
+
+    @classmethod
+    def from_filename(
+        cls,
+        filename: StrPath,
+        metadata: _MetadataType = None,
+        **kw: int,  # We could set `precedence` explicitly, but keeping this as `**kw` for full backwards and subclassing compatibility
+    ):
+        return cls.from_location(
+            _normalize_cached(filename), os.path.basename(filename), metadata, **kw
+        )
+
+    def as_requirement(self):
+        """Return a ``Requirement`` that matches this distribution exactly"""
+        if isinstance(self.parsed_version, _packaging_version.Version):
+            spec = "%s==%s" % (self.project_name, self.parsed_version)
+        else:
+            spec = "%s===%s" % (self.project_name, self.parsed_version)
+
+        return Requirement.parse(spec)
+
+    def load_entry_point(self, group: str, name: str) -> _ResolvedEntryPoint:
+        """Return the `name` entry point of `group` or raise ImportError"""
+        ep = self.get_entry_info(group, name)
+        if ep is None:
+            raise ImportError("Entry point %r not found" % ((group, name),))
+        return ep.load()
+
+    @overload
+    def get_entry_map(self, group: None = None) -> dict[str, dict[str, EntryPoint]]: ...
+    @overload
+    def get_entry_map(self, group: str) -> dict[str, EntryPoint]: ...
+    def get_entry_map(self, group: str | None = None):
+        """Return the entry point map for `group`, or the full entry map"""
+        if not hasattr(self, "_ep_map"):
+            self._ep_map = EntryPoint.parse_map(
+                self._get_metadata('entry_points.txt'), self
+            )
+        if group is not None:
+            return self._ep_map.get(group, {})
+        return self._ep_map
+
+    def get_entry_info(self, group: str, name: str):
+        """Return the EntryPoint object for `group`+`name`, or ``None``"""
+        return self.get_entry_map(group).get(name)
+
+    # FIXME: 'Distribution.insert_on' is too complex (13)
+    def insert_on(  # noqa: C901
+        self,
+        path: list[str],
+        loc=None,
+        replace: bool = False,
+    ):
+        """Ensure self.location is on path
+
+        If replace=False (default):
+            - If location is already in path anywhere, do nothing.
+            - Else:
+              - If it's an egg and its parent directory is on path,
+                insert just ahead of the parent.
+              - Else: add to the end of path.
+        If replace=True:
+            - If location is already on path anywhere (not eggs)
+              or higher priority than its parent (eggs)
+              do nothing.
+            - Else:
+              - If it's an egg and its parent directory is on path,
+                insert just ahead of the parent,
+                removing any lower-priority entries.
+              - Else: add it to the front of path.
+        """
+
+        loc = loc or self.location
+        if not loc:
+            return
+
+        nloc = _normalize_cached(loc)
+        bdir = os.path.dirname(nloc)
+        npath = [(p and _normalize_cached(p) or p) for p in path]
+
+        for p, item in enumerate(npath):
+            if item == nloc:
+                if replace:
+                    break
+                else:
+                    # don't modify path (even removing duplicates) if
+                    # found and not replace
+                    return
+            elif item == bdir and self.precedence == EGG_DIST:
+                # if it's an .egg, give it precedence over its directory
+                # UNLESS it's already been added to sys.path and replace=False
+                if (not replace) and nloc in npath[p:]:
+                    return
+                if path is sys.path:
+                    self.check_version_conflict()
+                path.insert(p, loc)
+                npath.insert(p, nloc)
+                break
+        else:
+            if path is sys.path:
+                self.check_version_conflict()
+            if replace:
+                path.insert(0, loc)
+            else:
+                path.append(loc)
+            return
+
+        # p is the spot where we found or inserted loc; now remove duplicates
+        while True:
+            try:
+                np = npath.index(nloc, p + 1)
+            except ValueError:
+                break
+            else:
+                del npath[np], path[np]
+                # ha!
+                p = np
+
+        return
+
+    def check_version_conflict(self):
+        if self.key == 'setuptools':
+            # ignore the inevitable setuptools self-conflicts  :(
+            return
+
+        nsp = dict.fromkeys(self._get_metadata('namespace_packages.txt'))
+        loc = normalize_path(self.location)
+        for modname in self._get_metadata('top_level.txt'):
+            if (
+                modname not in sys.modules
+                or modname in nsp
+                or modname in _namespace_packages
+            ):
+                continue
+            if modname in ('pkg_resources', 'setuptools', 'site'):
+                continue
+            fn = getattr(sys.modules[modname], '__file__', None)
+            if fn and (
+                normalize_path(fn).startswith(loc) or fn.startswith(self.location)
+            ):
+                continue
+            issue_warning(
+                "Module %s was already imported from %s, but %s is being added"
+                " to sys.path" % (modname, fn, self.location),
+            )
+
+    def has_version(self):
+        try:
+            self.version
+        except ValueError:
+            issue_warning("Unbuilt egg for " + repr(self))
+            return False
+        except SystemError:
+            # TODO: remove this except clause when python/cpython#103632 is fixed.
+            return False
+        return True
+
+    def clone(self, **kw: str | int | IResourceProvider | None):
+        """Copy this distribution, substituting in any changed keyword args"""
+        names = 'project_name version py_version platform location precedence'
+        for attr in names.split():
+            kw.setdefault(attr, getattr(self, attr, None))
+        kw.setdefault('metadata', self._provider)
+        # Unsafely unpacking. But keeping **kw for backwards and subclassing compatibility
+        return self.__class__(**kw)  # type:ignore[arg-type]
+
+    @property
+    def extras(self):
+        return [dep for dep in self._dep_map if dep]
+
+
+class EggInfoDistribution(Distribution):
+    def _reload_version(self):
+        """
+        Packages installed by distutils (e.g. numpy or scipy),
+        which uses an old safe_version, and so
+        their version numbers can get mangled when
+        converted to filenames (e.g., 1.11.0.dev0+2329eae to
+        1.11.0.dev0_2329eae). These distributions will not be
+        parsed properly
+        downstream by Distribution and safe_version, so
+        take an extra step and try to get the version number from
+        the metadata file itself instead of the filename.
+        """
+        md_version = self._get_version()
+        if md_version:
+            self._version = md_version
+        return self
+
+
+class DistInfoDistribution(Distribution):
+    """
+    Wrap an actual or potential sys.path entry
+    w/metadata, .dist-info style.
+    """
+
+    PKG_INFO = 'METADATA'
+    EQEQ = re.compile(r"([\(,])\s*(\d.*?)\s*([,\)])")
+
+    @property
+    def _parsed_pkg_info(self):
+        """Parse and cache metadata"""
+        try:
+            return self._pkg_info
+        except AttributeError:
+            metadata = self.get_metadata(self.PKG_INFO)
+            self._pkg_info = email.parser.Parser().parsestr(metadata)
+            return self._pkg_info
+
+    @property
+    def _dep_map(self):
+        try:
+            return self.__dep_map
+        except AttributeError:
+            self.__dep_map = self._compute_dependencies()
+            return self.__dep_map
+
+    def _compute_dependencies(self) -> dict[str | None, list[Requirement]]:
+        """Recompute this distribution's dependencies."""
+        self.__dep_map: dict[str | None, list[Requirement]] = {None: []}
+
+        reqs: list[Requirement] = []
+        # Including any condition expressions
+        for req in self._parsed_pkg_info.get_all('Requires-Dist') or []:
+            reqs.extend(parse_requirements(req))
+
+        def reqs_for_extra(extra):
+            for req in reqs:
+                if not req.marker or req.marker.evaluate({'extra': extra}):
+                    yield req
+
+        common = types.MappingProxyType(dict.fromkeys(reqs_for_extra(None)))
+        self.__dep_map[None].extend(common)
+
+        for extra in self._parsed_pkg_info.get_all('Provides-Extra') or []:
+            s_extra = safe_extra(extra.strip())
+            self.__dep_map[s_extra] = [
+                r for r in reqs_for_extra(extra) if r not in common
+            ]
+
+        return self.__dep_map
+
+
+_distributionImpl = {
+    '.egg': Distribution,
+    '.egg-info': EggInfoDistribution,
+    '.dist-info': DistInfoDistribution,
+}
+
+
+def issue_warning(*args, **kw):
+    level = 1
+    g = globals()
+    try:
+        # find the first stack frame that is *not* code in
+        # the pkg_resources module, to use for the warning
+        while sys._getframe(level).f_globals is g:
+            level += 1
+    except ValueError:
+        pass
+    warnings.warn(stacklevel=level + 1, *args, **kw)
+
+
+def parse_requirements(strs: _NestedStr):
+    """
+    Yield ``Requirement`` objects for each specification in `strs`.
+
+    `strs` must be a string, or a (possibly-nested) iterable thereof.
+    """
+    return map(Requirement, join_continuation(map(drop_comment, yield_lines(strs))))
+
+
+class RequirementParseError(_packaging_requirements.InvalidRequirement):
+    "Compatibility wrapper for InvalidRequirement"
+
+
+class Requirement(_packaging_requirements.Requirement):
+    def __init__(self, requirement_string: str):
+        """DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!"""
+        super().__init__(requirement_string)
+        self.unsafe_name = self.name
+        project_name = safe_name(self.name)
+        self.project_name, self.key = project_name, project_name.lower()
+        self.specs = [(spec.operator, spec.version) for spec in self.specifier]
+        # packaging.requirements.Requirement uses a set for its extras. We use a variable-length tuple
+        self.extras: tuple[str] = tuple(map(safe_extra, self.extras))
+        self.hashCmp = (
+            self.key,
+            self.url,
+            self.specifier,
+            frozenset(self.extras),
+            str(self.marker) if self.marker else None,
+        )
+        self.__hash = hash(self.hashCmp)
+
+    def __eq__(self, other: object):
+        return isinstance(other, Requirement) and self.hashCmp == other.hashCmp
+
+    def __ne__(self, other):
+        return not self == other
+
+    def __contains__(self, item: Distribution | str | tuple[str, ...]) -> bool:
+        if isinstance(item, Distribution):
+            if item.key != self.key:
+                return False
+
+            item = item.version
+
+        # Allow prereleases always in order to match the previous behavior of
+        # this method. In the future this should be smarter and follow PEP 440
+        # more accurately.
+        return self.specifier.contains(item, prereleases=True)
+
+    def __hash__(self):
+        return self.__hash
+
+    def __repr__(self):
+        return "Requirement.parse(%r)" % str(self)
+
+    @staticmethod
+    def parse(s: str | Iterable[str]):
+        (req,) = parse_requirements(s)
+        return req
+
+
+def _always_object(classes):
+    """
+    Ensure object appears in the mro even
+    for old-style classes.
+    """
+    if object not in classes:
+        return classes + (object,)
+    return classes
+
+
+def _find_adapter(registry: Mapping[type, _AdapterT], ob: object) -> _AdapterT:
+    """Return an adapter factory for `ob` from `registry`"""
+    types = _always_object(inspect.getmro(getattr(ob, '__class__', type(ob))))
+    for t in types:
+        if t in registry:
+            return registry[t]
+    # _find_adapter would previously return None, and immediately be called.
+    # So we're raising a TypeError to keep backward compatibility if anyone depended on that behaviour.
+    raise TypeError(f"Could not find adapter for {registry} and {ob}")
+
+
+def ensure_directory(path: StrOrBytesPath):
+    """Ensure that the parent directory of `path` exists"""
+    dirname = os.path.dirname(path)
+    os.makedirs(dirname, exist_ok=True)
+
+
+def _bypass_ensure_directory(path):
+    """Sandbox-bypassing version of ensure_directory()"""
+    if not WRITE_SUPPORT:
+        raise OSError('"os.mkdir" not supported on this platform.')
+    dirname, filename = split(path)
+    if dirname and filename and not isdir(dirname):
+        _bypass_ensure_directory(dirname)
+        try:
+            mkdir(dirname, 0o755)
+        except FileExistsError:
+            pass
+
+
+def split_sections(s: _NestedStr) -> Iterator[tuple[str | None, list[str]]]:
+    """Split a string or iterable thereof into (section, content) pairs
+
+    Each ``section`` is a stripped version of the section header ("[section]")
+    and each ``content`` is a list of stripped lines excluding blank lines and
+    comment-only lines.  If there are any such lines before the first section
+    header, they're returned in a first ``section`` of ``None``.
+    """
+    section = None
+    content = []
+    for line in yield_lines(s):
+        if line.startswith("["):
+            if line.endswith("]"):
+                if section or content:
+                    yield section, content
+                section = line[1:-1].strip()
+                content = []
+            else:
+                raise ValueError("Invalid section heading", line)
+        else:
+            content.append(line)
+
+    # wrap up last segment
+    yield section, content
+
+
+def _mkstemp(*args, **kw):
+    old_open = os.open
+    try:
+        # temporarily bypass sandboxing
+        os.open = os_open
+        return tempfile.mkstemp(*args, **kw)
+    finally:
+        # and then put it back
+        os.open = old_open
+
+
+# Silence the PEP440Warning by default, so that end users don't get hit by it
+# randomly just because they use pkg_resources. We want to append the rule
+# because we want earlier uses of filterwarnings to take precedence over this
+# one.
+warnings.filterwarnings("ignore", category=PEP440Warning, append=True)
+
+
+class PkgResourcesDeprecationWarning(Warning):
+    """
+    Base class for warning about deprecations in ``pkg_resources``
+
+    This class is not derived from ``DeprecationWarning``, and as such is
+    visible by default.
+    """
+
+
+# Ported from ``setuptools`` to avoid introducing an import inter-dependency:
+_LOCALE_ENCODING = "locale" if sys.version_info >= (3, 10) else None
+
+
+def _read_utf8_with_fallback(file: str, fallback_encoding=_LOCALE_ENCODING) -> str:
+    """See setuptools.unicode_utils._read_utf8_with_fallback"""
+    try:
+        with open(file, "r", encoding="utf-8") as f:
+            return f.read()
+    except UnicodeDecodeError:  # pragma: no cover
+        msg = f"""\
+        ********************************************************************************
+        `encoding="utf-8"` fails with {file!r}, trying `encoding={fallback_encoding!r}`.
+
+        This fallback behaviour is considered **deprecated** and future versions of
+        `setuptools/pkg_resources` may not implement it.
+
+        Please encode {file!r} with "utf-8" to ensure future builds will succeed.
+
+        If this file was produced by `setuptools` itself, cleaning up the cached files
+        and re-building/re-installing the package with a newer version of `setuptools`
+        (e.g. by updating `build-system.requires` in its `pyproject.toml`)
+        might solve the problem.
+        ********************************************************************************
+        """
+        # TODO: Add a deadline?
+        #       See comment in setuptools.unicode_utils._Utf8EncodingNeeded
+        warnings.warn(msg, PkgResourcesDeprecationWarning, stacklevel=2)
+        with open(file, "r", encoding=fallback_encoding) as f:
+            return f.read()
+
+
+# from jaraco.functools 1.3
+def _call_aside(f, *args, **kwargs):
+    f(*args, **kwargs)
+    return f
+
+
+@_call_aside
+def _initialize(g=globals()):
+    "Set up global resource manager (deliberately not state-saved)"
+    manager = ResourceManager()
+    g['_manager'] = manager
+    g.update(
+        (name, getattr(manager, name))
+        for name in dir(manager)
+        if not name.startswith('_')
+    )
+
+
+@_call_aside
+def _initialize_master_working_set():
+    """
+    Prepare the master working set and make the ``require()``
+    API available.
+
+    This function has explicit effects on the global state
+    of pkg_resources. It is intended to be invoked once at
+    the initialization of this module.
+
+    Invocation by other packages is unsupported and done
+    at their own risk.
+    """
+    working_set = _declare_state('object', 'working_set', WorkingSet._build_master())
+
+    require = working_set.require
+    iter_entry_points = working_set.iter_entry_points
+    add_activation_listener = working_set.subscribe
+    run_script = working_set.run_script
+    # backward compatibility
+    run_main = run_script
+    # Activate all distributions already on sys.path with replace=False and
+    # ensure that all distributions added to the working set in the future
+    # (e.g. by calling ``require()``) will get activated as well,
+    # with higher priority (replace=True).
+    tuple(dist.activate(replace=False) for dist in working_set)
+    add_activation_listener(
+        lambda dist: dist.activate(replace=True),
+        existing=False,
+    )
+    working_set.entries = []
+    # match order
+    list(map(working_set.add_entry, sys.path))
+    globals().update(locals())
+
+
+if TYPE_CHECKING:
+    # All of these are set by the @_call_aside methods above
+    __resource_manager = ResourceManager()  # Won't exist at runtime
+    resource_exists = __resource_manager.resource_exists
+    resource_isdir = __resource_manager.resource_isdir
+    resource_filename = __resource_manager.resource_filename
+    resource_stream = __resource_manager.resource_stream
+    resource_string = __resource_manager.resource_string
+    resource_listdir = __resource_manager.resource_listdir
+    set_extraction_path = __resource_manager.set_extraction_path
+    cleanup_resources = __resource_manager.cleanup_resources
+
+    working_set = WorkingSet()
+    require = working_set.require
+    iter_entry_points = working_set.iter_entry_points
+    add_activation_listener = working_set.subscribe
+    run_script = working_set.run_script
+    run_main = run_script
diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/pkg_resources/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/pkg_resources/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 0000000..027e22c
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/pkg_resources/__pycache__/__init__.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/platformdirs/__init__.py b/.venv/lib/python3.12/site-packages/pip/_vendor/platformdirs/__init__.py
new file mode 100644
index 0000000..2325ec2
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/pip/_vendor/platformdirs/__init__.py
@@ -0,0 +1,631 @@
+"""
+Utilities for determining application-specific dirs.
+
+See  for details and usage.
+
+"""
+
+from __future__ import annotations
+
+import os
+import sys
+from typing import TYPE_CHECKING
+
+from .api import PlatformDirsABC
+from .version import __version__
+from .version import __version_tuple__ as __version_info__
+
+if TYPE_CHECKING:
+    from pathlib import Path
+    from typing import Literal
+
+if sys.platform == "win32":
+    from pip._vendor.platformdirs.windows import Windows as _Result
+elif sys.platform == "darwin":
+    from pip._vendor.platformdirs.macos import MacOS as _Result
+else:
+    from pip._vendor.platformdirs.unix import Unix as _Result
+
+
+def _set_platform_dir_class() -> type[PlatformDirsABC]:
+    if os.getenv("ANDROID_DATA") == "/data" and os.getenv("ANDROID_ROOT") == "/system":
+        if os.getenv("SHELL") or os.getenv("PREFIX"):
+            return _Result
+
+        from pip._vendor.platformdirs.android import _android_folder  # noqa: PLC0415
+
+        if _android_folder() is not None:
+            from pip._vendor.platformdirs.android import Android  # noqa: PLC0415
+
+            return Android  # return to avoid redefinition of a result
+
+    return _Result
+
+
+if TYPE_CHECKING:
+    # Work around mypy issue: https://github.com/python/mypy/issues/10962
+    PlatformDirs = _Result
+else:
+    PlatformDirs = _set_platform_dir_class()  #: Currently active platform
+AppDirs = PlatformDirs  #: Backwards compatibility with appdirs
+
+
+def user_data_dir(
+    appname: str | None = None,
+    appauthor: str | Literal[False] | None = None,
+    version: str | None = None,
+    roaming: bool = False,  # noqa: FBT001, FBT002
+    ensure_exists: bool = False,  # noqa: FBT001, FBT002
+) -> str:
+    """
+    :param appname: See `appname `.
+    :param appauthor: See `appauthor `.
+    :param version: See `version `.
+    :param roaming: See `roaming `.
+    :param ensure_exists: See `ensure_exists `.
+    :returns: data directory tied to the user
+    """
+    return PlatformDirs(
+        appname=appname,
+        appauthor=appauthor,
+        version=version,
+        roaming=roaming,
+        ensure_exists=ensure_exists,
+    ).user_data_dir
+
+
+def site_data_dir(
+    appname: str | None = None,
+    appauthor: str | Literal[False] | None = None,
+    version: str | None = None,
+    multipath: bool = False,  # noqa: FBT001, FBT002
+    ensure_exists: bool = False,  # noqa: FBT001, FBT002
+) -> str:
+    """
+    :param appname: See `appname `.
+    :param appauthor: See `appauthor `.
+    :param version: See `version `.
+    :param multipath: See `roaming `.
+    :param ensure_exists: See `ensure_exists `.
+    :returns: data directory shared by users
+    """
+    return PlatformDirs(
+        appname=appname,
+        appauthor=appauthor,
+        version=version,
+        multipath=multipath,
+        ensure_exists=ensure_exists,
+    ).site_data_dir
+
+
+def user_config_dir(
+    appname: str | None = None,
+    appauthor: str | Literal[False] | None = None,
+    version: str | None = None,
+    roaming: bool = False,  # noqa: FBT001, FBT002
+    ensure_exists: bool = False,  # noqa: FBT001, FBT002
+) -> str:
+    """
+    :param appname: See `appname `.
+    :param appauthor: See `appauthor `.
+    :param version: See `version `.
+    :param roaming: See `roaming `.
+    :param ensure_exists: See `ensure_exists `.
+    :returns: config directory tied to the user
+    """
+    return PlatformDirs(
+        appname=appname,
+        appauthor=appauthor,
+        version=version,
+        roaming=roaming,
+        ensure_exists=ensure_exists,
+    ).user_config_dir
+
+
+def site_config_dir(
+    appname: str | None = None,
+    appauthor: str | Literal[False] | None = None,
+    version: str | None = None,
+    multipath: bool = False,  # noqa: FBT001, FBT002
+    ensure_exists: bool = False,  # noqa: FBT001, FBT002
+) -> str:
+    """
+    :param appname: See `appname `.
+    :param appauthor: See `appauthor `.
+    :param version: See `version `.
+    :param multipath: See `roaming `.
+    :param ensure_exists: See `ensure_exists `.
+    :returns: config directory shared by the users
+    """
+    return PlatformDirs(
+        appname=appname,
+        appauthor=appauthor,
+        version=version,
+        multipath=multipath,
+        ensure_exists=ensure_exists,
+    ).site_config_dir
+
+
+def user_cache_dir(
+    appname: str | None = None,
+    appauthor: str | Literal[False] | None = None,
+    version: str | None = None,
+    opinion: bool = True,  # noqa: FBT001, FBT002
+    ensure_exists: bool = False,  # noqa: FBT001, FBT002
+) -> str:
+    """
+    :param appname: See `appname `.
+    :param appauthor: See `appauthor `.
+    :param version: See `version `.
+    :param opinion: See `roaming `.
+    :param ensure_exists: See `ensure_exists `.
+    :returns: cache directory tied to the user
+    """
+    return PlatformDirs(
+        appname=appname,
+        appauthor=appauthor,
+        version=version,
+        opinion=opinion,
+        ensure_exists=ensure_exists,
+    ).user_cache_dir
+
+
+def site_cache_dir(
+    appname: str | None = None,
+    appauthor: str | Literal[False] | None = None,
+    version: str | None = None,
+    opinion: bool = True,  # noqa: FBT001, FBT002
+    ensure_exists: bool = False,  # noqa: FBT001, FBT002
+) -> str:
+    """
+    :param appname: See `appname `.
+    :param appauthor: See `appauthor `.
+    :param version: See `version `.
+    :param opinion: See `opinion `.
+    :param ensure_exists: See `ensure_exists `.
+    :returns: cache directory tied to the user
+    """
+    return PlatformDirs(
+        appname=appname,
+        appauthor=appauthor,
+        version=version,
+        opinion=opinion,
+        ensure_exists=ensure_exists,
+    ).site_cache_dir
+
+
+def user_state_dir(
+    appname: str | None = None,
+    appauthor: str | Literal[False] | None = None,
+    version: str | None = None,
+    roaming: bool = False,  # noqa: FBT001, FBT002
+    ensure_exists: bool = False,  # noqa: FBT001, FBT002
+) -> str:
+    """
+    :param appname: See `appname `.
+    :param appauthor: See `appauthor `.
+    :param version: See `version `.
+    :param roaming: See `roaming `.
+    :param ensure_exists: See `ensure_exists `.
+    :returns: state directory tied to the user
+    """
+    return PlatformDirs(
+        appname=appname,
+        appauthor=appauthor,
+        version=version,
+        roaming=roaming,
+        ensure_exists=ensure_exists,
+    ).user_state_dir
+
+
+def user_log_dir(
+    appname: str | None = None,
+    appauthor: str | Literal[False] | None = None,
+    version: str | None = None,
+    opinion: bool = True,  # noqa: FBT001, FBT002
+    ensure_exists: bool = False,  # noqa: FBT001, FBT002
+) -> str:
+    """
+    :param appname: See `appname `.
+    :param appauthor: See `appauthor `.
+    :param version: See `version `.
+    :param opinion: See `roaming `.
+    :param ensure_exists: See `ensure_exists `.
+    :returns: log directory tied to the user
+    """
+    return PlatformDirs(
+        appname=appname,
+        appauthor=appauthor,
+        version=version,
+        opinion=opinion,
+        ensure_exists=ensure_exists,
+    ).user_log_dir
+
+
+def user_documents_dir() -> str:
+    """:returns: documents directory tied to the user"""
+    return PlatformDirs().user_documents_dir
+
+
+def user_downloads_dir() -> str:
+    """:returns: downloads directory tied to the user"""
+    return PlatformDirs().user_downloads_dir
+
+
+def user_pictures_dir() -> str:
+    """:returns: pictures directory tied to the user"""
+    return PlatformDirs().user_pictures_dir
+
+
+def user_videos_dir() -> str:
+    """:returns: videos directory tied to the user"""
+    return PlatformDirs().user_videos_dir
+
+
+def user_music_dir() -> str:
+    """:returns: music directory tied to the user"""
+    return PlatformDirs().user_music_dir
+
+
+def user_desktop_dir() -> str:
+    """:returns: desktop directory tied to the user"""
+    return PlatformDirs().user_desktop_dir
+
+
+def user_runtime_dir(
+    appname: str | None = None,
+    appauthor: str | Literal[False] | None = None,
+    version: str | None = None,
+    opinion: bool = True,  # noqa: FBT001, FBT002
+    ensure_exists: bool = False,  # noqa: FBT001, FBT002
+) -> str:
+    """
+    :param appname: See `appname `.
+    :param appauthor: See `appauthor `.
+    :param version: See `version `.
+    :param opinion: See `opinion `.
+    :param ensure_exists: See `ensure_exists `.
+    :returns: runtime directory tied to the user
+    """
+    return PlatformDirs(
+        appname=appname,
+        appauthor=appauthor,
+        version=version,
+        opinion=opinion,
+        ensure_exists=ensure_exists,
+    ).user_runtime_dir
+
+
+def site_runtime_dir(
+    appname: str | None = None,
+    appauthor: str | Literal[False] | None = None,
+    version: str | None = None,
+    opinion: bool = True,  # noqa: FBT001, FBT002
+    ensure_exists: bool = False,  # noqa: FBT001, FBT002
+) -> str:
+    """
+    :param appname: See `appname `.
+    :param appauthor: See `appauthor `.
+    :param version: See `version `.
+    :param opinion: See `opinion `.
+    :param ensure_exists: See `ensure_exists `.
+    :returns: runtime directory shared by users
+    """
+    return PlatformDirs(
+        appname=appname,
+        appauthor=appauthor,
+        version=version,
+        opinion=opinion,
+        ensure_exists=ensure_exists,
+    ).site_runtime_dir
+
+
+def user_data_path(
+    appname: str | None = None,
+    appauthor: str | Literal[False] | None = None,
+    version: str | None = None,
+    roaming: bool = False,  # noqa: FBT001, FBT002
+    ensure_exists: bool = False,  # noqa: FBT001, FBT002
+) -> Path:
+    """
+    :param appname: See `appname `.
+    :param appauthor: See `appauthor `.
+    :param version: See `version `.
+    :param roaming: See `roaming `.
+    :param ensure_exists: See `ensure_exists `.
+    :returns: data path tied to the user
+    """
+    return PlatformDirs(
+        appname=appname,
+        appauthor=appauthor,
+        version=version,
+        roaming=roaming,
+        ensure_exists=ensure_exists,
+    ).user_data_path
+
+
+def site_data_path(
+    appname: str | None = None,
+    appauthor: str | Literal[False] | None = None,
+    version: str | None = None,
+    multipath: bool = False,  # noqa: FBT001, FBT002
+    ensure_exists: bool = False,  # noqa: FBT001, FBT002
+) -> Path:
+    """
+    :param appname: See `appname `.
+    :param appauthor: See `appauthor `.
+    :param version: See `version `.
+    :param multipath: See `multipath `.
+    :param ensure_exists: See `ensure_exists `.
+    :returns: data path shared by users
+    """
+    return PlatformDirs(
+        appname=appname,
+        appauthor=appauthor,
+        version=version,
+        multipath=multipath,
+        ensure_exists=ensure_exists,
+    ).site_data_path
+
+
+def user_config_path(
+    appname: str | None = None,
+    appauthor: str | Literal[False] | None = None,
+    version: str | None = None,
+    roaming: bool = False,  # noqa: FBT001, FBT002
+    ensure_exists: bool = False,  # noqa: FBT001, FBT002
+) -> Path:
+    """
+    :param appname: See `appname `.
+    :param appauthor: See `appauthor `.
+    :param version: See `version `.
+    :param roaming: See `roaming `.
+    :param ensure_exists: See `ensure_exists `.
+    :returns: config path tied to the user
+    """
+    return PlatformDirs(
+        appname=appname,
+        appauthor=appauthor,
+        version=version,
+        roaming=roaming,
+        ensure_exists=ensure_exists,
+    ).user_config_path
+
+
+def site_config_path(
+    appname: str | None = None,
+    appauthor: str | Literal[False] | None = None,
+    version: str | None = None,
+    multipath: bool = False,  # noqa: FBT001, FBT002
+    ensure_exists: bool = False,  # noqa: FBT001, FBT002
+) -> Path:
+    """
+    :param appname: See `appname `.
+    :param appauthor: See `appauthor `.
+    :param version: See `version `.
+    :param multipath: See `roaming `.
+    :param ensure_exists: See `ensure_exists `.
+    :returns: config path shared by the users
+    """
+    return PlatformDirs(
+        appname=appname,
+        appauthor=appauthor,
+        version=version,
+        multipath=multipath,
+        ensure_exists=ensure_exists,
+    ).site_config_path
+
+
+def site_cache_path(
+    appname: str | None = None,
+    appauthor: str | Literal[False] | None = None,
+    version: str | None = None,
+    opinion: bool = True,  # noqa: FBT001, FBT002
+    ensure_exists: bool = False,  # noqa: FBT001, FBT002
+) -> Path:
+    """
+    :param appname: See `appname `.
+    :param appauthor: See `appauthor `.
+    :param version: See `version `.
+    :param opinion: See `opinion `.
+    :param ensure_exists: See `ensure_exists `.
+    :returns: cache directory tied to the user
+    """
+    return PlatformDirs(
+        appname=appname,
+        appauthor=appauthor,
+        version=version,
+        opinion=opinion,
+        ensure_exists=ensure_exists,
+    ).site_cache_path
+
+
+def user_cache_path(
+    appname: str | None = None,
+    appauthor: str | Literal[False] | None = None,
+    version: str | None = None,
+    opinion: bool = True,  # noqa: FBT001, FBT002
+    ensure_exists: bool = False,  # noqa: FBT001, FBT002
+) -> Path:
+    """
+    :param appname: See `appname `.
+    :param appauthor: See `appauthor `.
+    :param version: See `version `.
+    :param opinion: See `roaming `.
+    :param ensure_exists: See `ensure_exists `.
+    :returns: cache path tied to the user
+    """
+    return PlatformDirs(
+        appname=appname,
+        appauthor=appauthor,
+        version=version,
+        opinion=opinion,
+        ensure_exists=ensure_exists,
+    ).user_cache_path
+
+
+def user_state_path(
+    appname: str | None = None,
+    appauthor: str | Literal[False] | None = None,
+    version: str | None = None,
+    roaming: bool = False,  # noqa: FBT001, FBT002
+    ensure_exists: bool = False,  # noqa: FBT001, FBT002
+) -> Path:
+    """
+    :param appname: See `appname `.
+    :param appauthor: See `appauthor `.
+    :param version: See `version `.
+    :param roaming: See `roaming `.
+    :param ensure_exists: See `ensure_exists `.
+    :returns: state path tied to the user
+    """
+    return PlatformDirs(
+        appname=appname,
+        appauthor=appauthor,
+        version=version,
+        roaming=roaming,
+        ensure_exists=ensure_exists,
+    ).user_state_path
+
+
+def user_log_path(
+    appname: str | None = None,
+    appauthor: str | Literal[False] | None = None,
+    version: str | None = None,
+    opinion: bool = True,  # noqa: FBT001, FBT002
+    ensure_exists: bool = False,  # noqa: FBT001, FBT002
+) -> Path:
+    """
+    :param appname: See `appname `.
+    :param appauthor: See `appauthor `.
+    :param version: See `version `.
+    :param opinion: See `roaming `.
+    :param ensure_exists: See `ensure_exists `.
+    :returns: log path tied to the user
+    """
+    return PlatformDirs(
+        appname=appname,
+        appauthor=appauthor,
+        version=version,
+        opinion=opinion,
+        ensure_exists=ensure_exists,
+    ).user_log_path
+
+
+def user_documents_path() -> Path:
+    """:returns: documents a path tied to the user"""
+    return PlatformDirs().user_documents_path
+
+
+def user_downloads_path() -> Path:
+    """:returns: downloads path tied to the user"""
+    return PlatformDirs().user_downloads_path
+
+
+def user_pictures_path() -> Path:
+    """:returns: pictures path tied to the user"""
+    return PlatformDirs().user_pictures_path
+
+
+def user_videos_path() -> Path:
+    """:returns: videos path tied to the user"""
+    return PlatformDirs().user_videos_path
+
+
+def user_music_path() -> Path:
+    """:returns: music path tied to the user"""
+    return PlatformDirs().user_music_path
+
+
+def user_desktop_path() -> Path:
+    """:returns: desktop path tied to the user"""
+    return PlatformDirs().user_desktop_path
+
+
+def user_runtime_path(
+    appname: str | None = None,
+    appauthor: str | Literal[False] | None = None,
+    version: str | None = None,
+    opinion: bool = True,  # noqa: FBT001, FBT002
+    ensure_exists: bool = False,  # noqa: FBT001, FBT002
+) -> Path:
+    """
+    :param appname: See `appname `.
+    :param appauthor: See `appauthor `.
+    :param version: See `version `.
+    :param opinion: See `opinion `.
+    :param ensure_exists: See `ensure_exists `.
+    :returns: runtime path tied to the user
+    """
+    return PlatformDirs(
+        appname=appname,
+        appauthor=appauthor,
+        version=version,
+        opinion=opinion,
+        ensure_exists=ensure_exists,
+    ).user_runtime_path
+
+
+def site_runtime_path(
+    appname: str | None = None,
+    appauthor: str | Literal[False] | None = None,
+    version: str | None = None,
+    opinion: bool = True,  # noqa: FBT001, FBT002
+    ensure_exists: bool = False,  # noqa: FBT001, FBT002
+) -> Path:
+    """
+    :param appname: See `appname `.
+    :param appauthor: See `appauthor `.
+    :param version: See `version `.
+    :param opinion: See `opinion `.
+    :param ensure_exists: See `ensure_exists `.
+    :returns: runtime path shared by users
+    """
+    return PlatformDirs(
+        appname=appname,
+        appauthor=appauthor,
+        version=version,
+        opinion=opinion,
+        ensure_exists=ensure_exists,
+    ).site_runtime_path
+
+
+__all__ = [
+    "AppDirs",
+    "PlatformDirs",
+    "PlatformDirsABC",
+    "__version__",
+    "__version_info__",
+    "site_cache_dir",
+    "site_cache_path",
+    "site_config_dir",
+    "site_config_path",
+    "site_data_dir",
+    "site_data_path",
+    "site_runtime_dir",
+    "site_runtime_path",
+    "user_cache_dir",
+    "user_cache_path",
+    "user_config_dir",
+    "user_config_path",
+    "user_data_dir",
+    "user_data_path",
+    "user_desktop_dir",
+    "user_desktop_path",
+    "user_documents_dir",
+    "user_documents_path",
+    "user_downloads_dir",
+    "user_downloads_path",
+    "user_log_dir",
+    "user_log_path",
+    "user_music_dir",
+    "user_music_path",
+    "user_pictures_dir",
+    "user_pictures_path",
+    "user_runtime_dir",
+    "user_runtime_path",
+    "user_state_dir",
+    "user_state_path",
+    "user_videos_dir",
+    "user_videos_path",
+]
diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/platformdirs/__main__.py b/.venv/lib/python3.12/site-packages/pip/_vendor/platformdirs/__main__.py
new file mode 100644
index 0000000..fa8a677
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/pip/_vendor/platformdirs/__main__.py
@@ -0,0 +1,55 @@
+"""Main entry point."""
+
+from __future__ import annotations
+
+from pip._vendor.platformdirs import PlatformDirs, __version__
+
+PROPS = (
+    "user_data_dir",
+    "user_config_dir",
+    "user_cache_dir",
+    "user_state_dir",
+    "user_log_dir",
+    "user_documents_dir",
+    "user_downloads_dir",
+    "user_pictures_dir",
+    "user_videos_dir",
+    "user_music_dir",
+    "user_runtime_dir",
+    "site_data_dir",
+    "site_config_dir",
+    "site_cache_dir",
+    "site_runtime_dir",
+)
+
+
+def main() -> None:
+    """Run the main entry point."""
+    app_name = "MyApp"
+    app_author = "MyCompany"
+
+    print(f"-- platformdirs {__version__} --")  # noqa: T201
+
+    print("-- app dirs (with optional 'version')")  # noqa: T201
+    dirs = PlatformDirs(app_name, app_author, version="1.0")
+    for prop in PROPS:
+        print(f"{prop}: {getattr(dirs, prop)}")  # noqa: T201
+
+    print("\n-- app dirs (without optional 'version')")  # noqa: T201
+    dirs = PlatformDirs(app_name, app_author)
+    for prop in PROPS:
+        print(f"{prop}: {getattr(dirs, prop)}")  # noqa: T201
+
+    print("\n-- app dirs (without optional 'appauthor')")  # noqa: T201
+    dirs = PlatformDirs(app_name)
+    for prop in PROPS:
+        print(f"{prop}: {getattr(dirs, prop)}")  # noqa: T201
+
+    print("\n-- app dirs (with disabled 'appauthor')")  # noqa: T201
+    dirs = PlatformDirs(app_name, appauthor=False)
+    for prop in PROPS:
+        print(f"{prop}: {getattr(dirs, prop)}")  # noqa: T201
+
+
+if __name__ == "__main__":
+    main()
diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/platformdirs/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/platformdirs/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 0000000..bf8ee16
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/platformdirs/__pycache__/__init__.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/platformdirs/__pycache__/__main__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/platformdirs/__pycache__/__main__.cpython-312.pyc
new file mode 100644
index 0000000..eaad221
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/platformdirs/__pycache__/__main__.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/platformdirs/__pycache__/android.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/platformdirs/__pycache__/android.cpython-312.pyc
new file mode 100644
index 0000000..5302f02
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/platformdirs/__pycache__/android.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/platformdirs/__pycache__/api.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/platformdirs/__pycache__/api.cpython-312.pyc
new file mode 100644
index 0000000..0bb1926
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/platformdirs/__pycache__/api.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/platformdirs/__pycache__/macos.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/platformdirs/__pycache__/macos.cpython-312.pyc
new file mode 100644
index 0000000..3038fda
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/platformdirs/__pycache__/macos.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/platformdirs/__pycache__/unix.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/platformdirs/__pycache__/unix.cpython-312.pyc
new file mode 100644
index 0000000..3033c61
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/platformdirs/__pycache__/unix.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/platformdirs/__pycache__/version.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/platformdirs/__pycache__/version.cpython-312.pyc
new file mode 100644
index 0000000..ab49981
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/platformdirs/__pycache__/version.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/platformdirs/__pycache__/windows.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/platformdirs/__pycache__/windows.cpython-312.pyc
new file mode 100644
index 0000000..b1d4155
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/platformdirs/__pycache__/windows.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/platformdirs/android.py b/.venv/lib/python3.12/site-packages/pip/_vendor/platformdirs/android.py
new file mode 100644
index 0000000..92efc85
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/pip/_vendor/platformdirs/android.py
@@ -0,0 +1,249 @@
+"""Android."""
+
+from __future__ import annotations
+
+import os
+import re
+import sys
+from functools import lru_cache
+from typing import TYPE_CHECKING, cast
+
+from .api import PlatformDirsABC
+
+
+class Android(PlatformDirsABC):
+    """
+    Follows the guidance `from here `_.
+
+    Makes use of the `appname `, `version
+    `, `ensure_exists `.
+
+    """
+
+    @property
+    def user_data_dir(self) -> str:
+        """:return: data directory tied to the user, e.g. ``/data/user///files/``"""
+        return self._append_app_name_and_version(cast("str", _android_folder()), "files")
+
+    @property
+    def site_data_dir(self) -> str:
+        """:return: data directory shared by users, same as `user_data_dir`"""
+        return self.user_data_dir
+
+    @property
+    def user_config_dir(self) -> str:
+        """
+        :return: config directory tied to the user, e.g. \
+        ``/data/user///shared_prefs/``
+        """
+        return self._append_app_name_and_version(cast("str", _android_folder()), "shared_prefs")
+
+    @property
+    def site_config_dir(self) -> str:
+        """:return: config directory shared by the users, same as `user_config_dir`"""
+        return self.user_config_dir
+
+    @property
+    def user_cache_dir(self) -> str:
+        """:return: cache directory tied to the user, e.g.,``/data/user///cache/``"""
+        return self._append_app_name_and_version(cast("str", _android_folder()), "cache")
+
+    @property
+    def site_cache_dir(self) -> str:
+        """:return: cache directory shared by users, same as `user_cache_dir`"""
+        return self.user_cache_dir
+
+    @property
+    def user_state_dir(self) -> str:
+        """:return: state directory tied to the user, same as `user_data_dir`"""
+        return self.user_data_dir
+
+    @property
+    def user_log_dir(self) -> str:
+        """
+        :return: log directory tied to the user, same as `user_cache_dir` if not opinionated else ``log`` in it,
+          e.g. ``/data/user///cache//log``
+        """
+        path = self.user_cache_dir
+        if self.opinion:
+            path = os.path.join(path, "log")  # noqa: PTH118
+        return path
+
+    @property
+    def user_documents_dir(self) -> str:
+        """:return: documents directory tied to the user e.g. ``/storage/emulated/0/Documents``"""
+        return _android_documents_folder()
+
+    @property
+    def user_downloads_dir(self) -> str:
+        """:return: downloads directory tied to the user e.g. ``/storage/emulated/0/Downloads``"""
+        return _android_downloads_folder()
+
+    @property
+    def user_pictures_dir(self) -> str:
+        """:return: pictures directory tied to the user e.g. ``/storage/emulated/0/Pictures``"""
+        return _android_pictures_folder()
+
+    @property
+    def user_videos_dir(self) -> str:
+        """:return: videos directory tied to the user e.g. ``/storage/emulated/0/DCIM/Camera``"""
+        return _android_videos_folder()
+
+    @property
+    def user_music_dir(self) -> str:
+        """:return: music directory tied to the user e.g. ``/storage/emulated/0/Music``"""
+        return _android_music_folder()
+
+    @property
+    def user_desktop_dir(self) -> str:
+        """:return: desktop directory tied to the user e.g. ``/storage/emulated/0/Desktop``"""
+        return "/storage/emulated/0/Desktop"
+
+    @property
+    def user_runtime_dir(self) -> str:
+        """
+        :return: runtime directory tied to the user, same as `user_cache_dir` if not opinionated else ``tmp`` in it,
+          e.g. ``/data/user///cache//tmp``
+        """
+        path = self.user_cache_dir
+        if self.opinion:
+            path = os.path.join(path, "tmp")  # noqa: PTH118
+        return path
+
+    @property
+    def site_runtime_dir(self) -> str:
+        """:return: runtime directory shared by users, same as `user_runtime_dir`"""
+        return self.user_runtime_dir
+
+
+@lru_cache(maxsize=1)
+def _android_folder() -> str | None:  # noqa: C901
+    """:return: base folder for the Android OS or None if it cannot be found"""
+    result: str | None = None
+    # type checker isn't happy with our "import android", just don't do this when type checking see
+    # https://stackoverflow.com/a/61394121
+    if not TYPE_CHECKING:
+        try:
+            # First try to get a path to android app using python4android (if available)...
+            from android import mActivity  # noqa: PLC0415
+
+            context = cast("android.content.Context", mActivity.getApplicationContext())  # noqa: F821
+            result = context.getFilesDir().getParentFile().getAbsolutePath()
+        except Exception:  # noqa: BLE001
+            result = None
+    if result is None:
+        try:
+            # ...and fall back to using plain pyjnius, if python4android isn't available or doesn't deliver any useful
+            # result...
+            from jnius import autoclass  # noqa: PLC0415
+
+            context = autoclass("android.content.Context")
+            result = context.getFilesDir().getParentFile().getAbsolutePath()
+        except Exception:  # noqa: BLE001
+            result = None
+    if result is None:
+        # and if that fails, too, find an android folder looking at path on the sys.path
+        # warning: only works for apps installed under /data, not adopted storage etc.
+        pattern = re.compile(r"/data/(data|user/\d+)/(.+)/files")
+        for path in sys.path:
+            if pattern.match(path):
+                result = path.split("/files")[0]
+                break
+        else:
+            result = None
+    if result is None:
+        # one last try: find an android folder looking at path on the sys.path taking adopted storage paths into
+        # account
+        pattern = re.compile(r"/mnt/expand/[a-fA-F0-9-]{36}/(data|user/\d+)/(.+)/files")
+        for path in sys.path:
+            if pattern.match(path):
+                result = path.split("/files")[0]
+                break
+        else:
+            result = None
+    return result
+
+
+@lru_cache(maxsize=1)
+def _android_documents_folder() -> str:
+    """:return: documents folder for the Android OS"""
+    # Get directories with pyjnius
+    try:
+        from jnius import autoclass  # noqa: PLC0415
+
+        context = autoclass("android.content.Context")
+        environment = autoclass("android.os.Environment")
+        documents_dir: str = context.getExternalFilesDir(environment.DIRECTORY_DOCUMENTS).getAbsolutePath()
+    except Exception:  # noqa: BLE001
+        documents_dir = "/storage/emulated/0/Documents"
+
+    return documents_dir
+
+
+@lru_cache(maxsize=1)
+def _android_downloads_folder() -> str:
+    """:return: downloads folder for the Android OS"""
+    # Get directories with pyjnius
+    try:
+        from jnius import autoclass  # noqa: PLC0415
+
+        context = autoclass("android.content.Context")
+        environment = autoclass("android.os.Environment")
+        downloads_dir: str = context.getExternalFilesDir(environment.DIRECTORY_DOWNLOADS).getAbsolutePath()
+    except Exception:  # noqa: BLE001
+        downloads_dir = "/storage/emulated/0/Downloads"
+
+    return downloads_dir
+
+
+@lru_cache(maxsize=1)
+def _android_pictures_folder() -> str:
+    """:return: pictures folder for the Android OS"""
+    # Get directories with pyjnius
+    try:
+        from jnius import autoclass  # noqa: PLC0415
+
+        context = autoclass("android.content.Context")
+        environment = autoclass("android.os.Environment")
+        pictures_dir: str = context.getExternalFilesDir(environment.DIRECTORY_PICTURES).getAbsolutePath()
+    except Exception:  # noqa: BLE001
+        pictures_dir = "/storage/emulated/0/Pictures"
+
+    return pictures_dir
+
+
+@lru_cache(maxsize=1)
+def _android_videos_folder() -> str:
+    """:return: videos folder for the Android OS"""
+    # Get directories with pyjnius
+    try:
+        from jnius import autoclass  # noqa: PLC0415
+
+        context = autoclass("android.content.Context")
+        environment = autoclass("android.os.Environment")
+        videos_dir: str = context.getExternalFilesDir(environment.DIRECTORY_DCIM).getAbsolutePath()
+    except Exception:  # noqa: BLE001
+        videos_dir = "/storage/emulated/0/DCIM/Camera"
+
+    return videos_dir
+
+
+@lru_cache(maxsize=1)
+def _android_music_folder() -> str:
+    """:return: music folder for the Android OS"""
+    # Get directories with pyjnius
+    try:
+        from jnius import autoclass  # noqa: PLC0415
+
+        context = autoclass("android.content.Context")
+        environment = autoclass("android.os.Environment")
+        music_dir: str = context.getExternalFilesDir(environment.DIRECTORY_MUSIC).getAbsolutePath()
+    except Exception:  # noqa: BLE001
+        music_dir = "/storage/emulated/0/Music"
+
+    return music_dir
+
+
+__all__ = [
+    "Android",
+]
diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/platformdirs/api.py b/.venv/lib/python3.12/site-packages/pip/_vendor/platformdirs/api.py
new file mode 100644
index 0000000..a352035
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/pip/_vendor/platformdirs/api.py
@@ -0,0 +1,299 @@
+"""Base API."""
+
+from __future__ import annotations
+
+import os
+from abc import ABC, abstractmethod
+from pathlib import Path
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from collections.abc import Iterator
+    from typing import Literal
+
+
+class PlatformDirsABC(ABC):  # noqa: PLR0904
+    """Abstract base class for platform directories."""
+
+    def __init__(  # noqa: PLR0913, PLR0917
+        self,
+        appname: str | None = None,
+        appauthor: str | Literal[False] | None = None,
+        version: str | None = None,
+        roaming: bool = False,  # noqa: FBT001, FBT002
+        multipath: bool = False,  # noqa: FBT001, FBT002
+        opinion: bool = True,  # noqa: FBT001, FBT002
+        ensure_exists: bool = False,  # noqa: FBT001, FBT002
+    ) -> None:
+        """
+        Create a new platform directory.
+
+        :param appname: See `appname`.
+        :param appauthor: See `appauthor`.
+        :param version: See `version`.
+        :param roaming: See `roaming`.
+        :param multipath: See `multipath`.
+        :param opinion: See `opinion`.
+        :param ensure_exists: See `ensure_exists`.
+
+        """
+        self.appname = appname  #: The name of application.
+        self.appauthor = appauthor
+        """
+        The name of the app author or distributing body for this application.
+
+        Typically, it is the owning company name. Defaults to `appname`. You may pass ``False`` to disable it.
+
+        """
+        self.version = version
+        """
+        An optional version path element to append to the path.
+
+        You might want to use this if you want multiple versions of your app to be able to run independently. If used,
+        this would typically be ``.``.
+
+        """
+        self.roaming = roaming
+        """
+        Whether to use the roaming appdata directory on Windows.
+
+        That means that for users on a Windows network setup for roaming profiles, this user data will be synced on
+        login (see
+        `here `_).
+
+        """
+        self.multipath = multipath
+        """
+        An optional parameter which indicates that the entire list of data dirs should be returned.
+
+        By default, the first item would only be returned.
+
+        """
+        self.opinion = opinion  #: A flag to indicating to use opinionated values.
+        self.ensure_exists = ensure_exists
+        """
+        Optionally create the directory (and any missing parents) upon access if it does not exist.
+
+        By default, no directories are created.
+
+        """
+
+    def _append_app_name_and_version(self, *base: str) -> str:
+        params = list(base[1:])
+        if self.appname:
+            params.append(self.appname)
+            if self.version:
+                params.append(self.version)
+        path = os.path.join(base[0], *params)  # noqa: PTH118
+        self._optionally_create_directory(path)
+        return path
+
+    def _optionally_create_directory(self, path: str) -> None:
+        if self.ensure_exists:
+            Path(path).mkdir(parents=True, exist_ok=True)
+
+    def _first_item_as_path_if_multipath(self, directory: str) -> Path:
+        if self.multipath:
+            # If multipath is True, the first path is returned.
+            directory = directory.split(os.pathsep)[0]
+        return Path(directory)
+
+    @property
+    @abstractmethod
+    def user_data_dir(self) -> str:
+        """:return: data directory tied to the user"""
+
+    @property
+    @abstractmethod
+    def site_data_dir(self) -> str:
+        """:return: data directory shared by users"""
+
+    @property
+    @abstractmethod
+    def user_config_dir(self) -> str:
+        """:return: config directory tied to the user"""
+
+    @property
+    @abstractmethod
+    def site_config_dir(self) -> str:
+        """:return: config directory shared by the users"""
+
+    @property
+    @abstractmethod
+    def user_cache_dir(self) -> str:
+        """:return: cache directory tied to the user"""
+
+    @property
+    @abstractmethod
+    def site_cache_dir(self) -> str:
+        """:return: cache directory shared by users"""
+
+    @property
+    @abstractmethod
+    def user_state_dir(self) -> str:
+        """:return: state directory tied to the user"""
+
+    @property
+    @abstractmethod
+    def user_log_dir(self) -> str:
+        """:return: log directory tied to the user"""
+
+    @property
+    @abstractmethod
+    def user_documents_dir(self) -> str:
+        """:return: documents directory tied to the user"""
+
+    @property
+    @abstractmethod
+    def user_downloads_dir(self) -> str:
+        """:return: downloads directory tied to the user"""
+
+    @property
+    @abstractmethod
+    def user_pictures_dir(self) -> str:
+        """:return: pictures directory tied to the user"""
+
+    @property
+    @abstractmethod
+    def user_videos_dir(self) -> str:
+        """:return: videos directory tied to the user"""
+
+    @property
+    @abstractmethod
+    def user_music_dir(self) -> str:
+        """:return: music directory tied to the user"""
+
+    @property
+    @abstractmethod
+    def user_desktop_dir(self) -> str:
+        """:return: desktop directory tied to the user"""
+
+    @property
+    @abstractmethod
+    def user_runtime_dir(self) -> str:
+        """:return: runtime directory tied to the user"""
+
+    @property
+    @abstractmethod
+    def site_runtime_dir(self) -> str:
+        """:return: runtime directory shared by users"""
+
+    @property
+    def user_data_path(self) -> Path:
+        """:return: data path tied to the user"""
+        return Path(self.user_data_dir)
+
+    @property
+    def site_data_path(self) -> Path:
+        """:return: data path shared by users"""
+        return Path(self.site_data_dir)
+
+    @property
+    def user_config_path(self) -> Path:
+        """:return: config path tied to the user"""
+        return Path(self.user_config_dir)
+
+    @property
+    def site_config_path(self) -> Path:
+        """:return: config path shared by the users"""
+        return Path(self.site_config_dir)
+
+    @property
+    def user_cache_path(self) -> Path:
+        """:return: cache path tied to the user"""
+        return Path(self.user_cache_dir)
+
+    @property
+    def site_cache_path(self) -> Path:
+        """:return: cache path shared by users"""
+        return Path(self.site_cache_dir)
+
+    @property
+    def user_state_path(self) -> Path:
+        """:return: state path tied to the user"""
+        return Path(self.user_state_dir)
+
+    @property
+    def user_log_path(self) -> Path:
+        """:return: log path tied to the user"""
+        return Path(self.user_log_dir)
+
+    @property
+    def user_documents_path(self) -> Path:
+        """:return: documents a path tied to the user"""
+        return Path(self.user_documents_dir)
+
+    @property
+    def user_downloads_path(self) -> Path:
+        """:return: downloads path tied to the user"""
+        return Path(self.user_downloads_dir)
+
+    @property
+    def user_pictures_path(self) -> Path:
+        """:return: pictures path tied to the user"""
+        return Path(self.user_pictures_dir)
+
+    @property
+    def user_videos_path(self) -> Path:
+        """:return: videos path tied to the user"""
+        return Path(self.user_videos_dir)
+
+    @property
+    def user_music_path(self) -> Path:
+        """:return: music path tied to the user"""
+        return Path(self.user_music_dir)
+
+    @property
+    def user_desktop_path(self) -> Path:
+        """:return: desktop path tied to the user"""
+        return Path(self.user_desktop_dir)
+
+    @property
+    def user_runtime_path(self) -> Path:
+        """:return: runtime path tied to the user"""
+        return Path(self.user_runtime_dir)
+
+    @property
+    def site_runtime_path(self) -> Path:
+        """:return: runtime path shared by users"""
+        return Path(self.site_runtime_dir)
+
+    def iter_config_dirs(self) -> Iterator[str]:
+        """:yield: all user and site configuration directories."""
+        yield self.user_config_dir
+        yield self.site_config_dir
+
+    def iter_data_dirs(self) -> Iterator[str]:
+        """:yield: all user and site data directories."""
+        yield self.user_data_dir
+        yield self.site_data_dir
+
+    def iter_cache_dirs(self) -> Iterator[str]:
+        """:yield: all user and site cache directories."""
+        yield self.user_cache_dir
+        yield self.site_cache_dir
+
+    def iter_runtime_dirs(self) -> Iterator[str]:
+        """:yield: all user and site runtime directories."""
+        yield self.user_runtime_dir
+        yield self.site_runtime_dir
+
+    def iter_config_paths(self) -> Iterator[Path]:
+        """:yield: all user and site configuration paths."""
+        for path in self.iter_config_dirs():
+            yield Path(path)
+
+    def iter_data_paths(self) -> Iterator[Path]:
+        """:yield: all user and site data paths."""
+        for path in self.iter_data_dirs():
+            yield Path(path)
+
+    def iter_cache_paths(self) -> Iterator[Path]:
+        """:yield: all user and site cache paths."""
+        for path in self.iter_cache_dirs():
+            yield Path(path)
+
+    def iter_runtime_paths(self) -> Iterator[Path]:
+        """:yield: all user and site runtime paths."""
+        for path in self.iter_runtime_dirs():
+            yield Path(path)
diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/platformdirs/macos.py b/.venv/lib/python3.12/site-packages/pip/_vendor/platformdirs/macos.py
new file mode 100644
index 0000000..e4b0391
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/pip/_vendor/platformdirs/macos.py
@@ -0,0 +1,144 @@
+"""macOS."""
+
+from __future__ import annotations
+
+import os.path
+import sys
+from typing import TYPE_CHECKING
+
+from .api import PlatformDirsABC
+
+if TYPE_CHECKING:
+    from pathlib import Path
+
+
+class MacOS(PlatformDirsABC):
+    """
+    Platform directories for the macOS operating system.
+
+    Follows the guidance from
+    `Apple documentation `_.
+    Makes use of the `appname `,
+    `version `,
+    `ensure_exists `.
+
+    """
+
+    @property
+    def user_data_dir(self) -> str:
+        """:return: data directory tied to the user, e.g. ``~/Library/Application Support/$appname/$version``"""
+        return self._append_app_name_and_version(os.path.expanduser("~/Library/Application Support"))  # noqa: PTH111
+
+    @property
+    def site_data_dir(self) -> str:
+        """
+        :return: data directory shared by users, e.g. ``/Library/Application Support/$appname/$version``.
+          If we're using a Python binary managed by `Homebrew `_, the directory
+          will be under the Homebrew prefix, e.g. ``/opt/homebrew/share/$appname/$version``.
+          If `multipath ` is enabled, and we're in Homebrew,
+          the response is a multi-path string separated by ":", e.g.
+          ``/opt/homebrew/share/$appname/$version:/Library/Application Support/$appname/$version``
+        """
+        is_homebrew = sys.prefix.startswith("/opt/homebrew")
+        path_list = [self._append_app_name_and_version("/opt/homebrew/share")] if is_homebrew else []
+        path_list.append(self._append_app_name_and_version("/Library/Application Support"))
+        if self.multipath:
+            return os.pathsep.join(path_list)
+        return path_list[0]
+
+    @property
+    def site_data_path(self) -> Path:
+        """:return: data path shared by users. Only return the first item, even if ``multipath`` is set to ``True``"""
+        return self._first_item_as_path_if_multipath(self.site_data_dir)
+
+    @property
+    def user_config_dir(self) -> str:
+        """:return: config directory tied to the user, same as `user_data_dir`"""
+        return self.user_data_dir
+
+    @property
+    def site_config_dir(self) -> str:
+        """:return: config directory shared by the users, same as `site_data_dir`"""
+        return self.site_data_dir
+
+    @property
+    def user_cache_dir(self) -> str:
+        """:return: cache directory tied to the user, e.g. ``~/Library/Caches/$appname/$version``"""
+        return self._append_app_name_and_version(os.path.expanduser("~/Library/Caches"))  # noqa: PTH111
+
+    @property
+    def site_cache_dir(self) -> str:
+        """
+        :return: cache directory shared by users, e.g. ``/Library/Caches/$appname/$version``.
+          If we're using a Python binary managed by `Homebrew `_, the directory
+          will be under the Homebrew prefix, e.g. ``/opt/homebrew/var/cache/$appname/$version``.
+          If `multipath ` is enabled, and we're in Homebrew,
+          the response is a multi-path string separated by ":", e.g.
+          ``/opt/homebrew/var/cache/$appname/$version:/Library/Caches/$appname/$version``
+        """
+        is_homebrew = sys.prefix.startswith("/opt/homebrew")
+        path_list = [self._append_app_name_and_version("/opt/homebrew/var/cache")] if is_homebrew else []
+        path_list.append(self._append_app_name_and_version("/Library/Caches"))
+        if self.multipath:
+            return os.pathsep.join(path_list)
+        return path_list[0]
+
+    @property
+    def site_cache_path(self) -> Path:
+        """:return: cache path shared by users. Only return the first item, even if ``multipath`` is set to ``True``"""
+        return self._first_item_as_path_if_multipath(self.site_cache_dir)
+
+    @property
+    def user_state_dir(self) -> str:
+        """:return: state directory tied to the user, same as `user_data_dir`"""
+        return self.user_data_dir
+
+    @property
+    def user_log_dir(self) -> str:
+        """:return: log directory tied to the user, e.g. ``~/Library/Logs/$appname/$version``"""
+        return self._append_app_name_and_version(os.path.expanduser("~/Library/Logs"))  # noqa: PTH111
+
+    @property
+    def user_documents_dir(self) -> str:
+        """:return: documents directory tied to the user, e.g. ``~/Documents``"""
+        return os.path.expanduser("~/Documents")  # noqa: PTH111
+
+    @property
+    def user_downloads_dir(self) -> str:
+        """:return: downloads directory tied to the user, e.g. ``~/Downloads``"""
+        return os.path.expanduser("~/Downloads")  # noqa: PTH111
+
+    @property
+    def user_pictures_dir(self) -> str:
+        """:return: pictures directory tied to the user, e.g. ``~/Pictures``"""
+        return os.path.expanduser("~/Pictures")  # noqa: PTH111
+
+    @property
+    def user_videos_dir(self) -> str:
+        """:return: videos directory tied to the user, e.g. ``~/Movies``"""
+        return os.path.expanduser("~/Movies")  # noqa: PTH111
+
+    @property
+    def user_music_dir(self) -> str:
+        """:return: music directory tied to the user, e.g. ``~/Music``"""
+        return os.path.expanduser("~/Music")  # noqa: PTH111
+
+    @property
+    def user_desktop_dir(self) -> str:
+        """:return: desktop directory tied to the user, e.g. ``~/Desktop``"""
+        return os.path.expanduser("~/Desktop")  # noqa: PTH111
+
+    @property
+    def user_runtime_dir(self) -> str:
+        """:return: runtime directory tied to the user, e.g. ``~/Library/Caches/TemporaryItems/$appname/$version``"""
+        return self._append_app_name_and_version(os.path.expanduser("~/Library/Caches/TemporaryItems"))  # noqa: PTH111
+
+    @property
+    def site_runtime_dir(self) -> str:
+        """:return: runtime directory shared by users, same as `user_runtime_dir`"""
+        return self.user_runtime_dir
+
+
+__all__ = [
+    "MacOS",
+]
diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/platformdirs/py.typed b/.venv/lib/python3.12/site-packages/pip/_vendor/platformdirs/py.typed
new file mode 100644
index 0000000..e69de29
diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/platformdirs/unix.py b/.venv/lib/python3.12/site-packages/pip/_vendor/platformdirs/unix.py
new file mode 100644
index 0000000..fc75d8d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/pip/_vendor/platformdirs/unix.py
@@ -0,0 +1,272 @@
+"""Unix."""
+
+from __future__ import annotations
+
+import os
+import sys
+from configparser import ConfigParser
+from pathlib import Path
+from typing import TYPE_CHECKING, NoReturn
+
+from .api import PlatformDirsABC
+
+if TYPE_CHECKING:
+    from collections.abc import Iterator
+
+if sys.platform == "win32":
+
+    def getuid() -> NoReturn:
+        msg = "should only be used on Unix"
+        raise RuntimeError(msg)
+
+else:
+    from os import getuid
+
+
+class Unix(PlatformDirsABC):  # noqa: PLR0904
+    """
+    On Unix/Linux, we follow the `XDG Basedir Spec `_.
+
+    The spec allows overriding directories with environment variables. The examples shown are the default values,
+    alongside the name of the environment variable that overrides them. Makes use of the `appname
+    `, `version `, `multipath
+    `, `opinion `, `ensure_exists
+    `.
+
+    """
+
+    @property
+    def user_data_dir(self) -> str:
+        """
+        :return: data directory tied to the user, e.g. ``~/.local/share/$appname/$version`` or
+         ``$XDG_DATA_HOME/$appname/$version``
+        """
+        path = os.environ.get("XDG_DATA_HOME", "")
+        if not path.strip():
+            path = os.path.expanduser("~/.local/share")  # noqa: PTH111
+        return self._append_app_name_and_version(path)
+
+    @property
+    def _site_data_dirs(self) -> list[str]:
+        path = os.environ.get("XDG_DATA_DIRS", "")
+        if not path.strip():
+            path = f"/usr/local/share{os.pathsep}/usr/share"
+        return [self._append_app_name_and_version(p) for p in path.split(os.pathsep)]
+
+    @property
+    def site_data_dir(self) -> str:
+        """
+        :return: data directories shared by users (if `multipath ` is
+         enabled and ``XDG_DATA_DIRS`` is set and a multi path the response is also a multi path separated by the
+         OS path separator), e.g. ``/usr/local/share/$appname/$version`` or ``/usr/share/$appname/$version``
+        """
+        # XDG default for $XDG_DATA_DIRS; only first, if multipath is False
+        dirs = self._site_data_dirs
+        if not self.multipath:
+            return dirs[0]
+        return os.pathsep.join(dirs)
+
+    @property
+    def user_config_dir(self) -> str:
+        """
+        :return: config directory tied to the user, e.g. ``~/.config/$appname/$version`` or
+         ``$XDG_CONFIG_HOME/$appname/$version``
+        """
+        path = os.environ.get("XDG_CONFIG_HOME", "")
+        if not path.strip():
+            path = os.path.expanduser("~/.config")  # noqa: PTH111
+        return self._append_app_name_and_version(path)
+
+    @property
+    def _site_config_dirs(self) -> list[str]:
+        path = os.environ.get("XDG_CONFIG_DIRS", "")
+        if not path.strip():
+            path = "/etc/xdg"
+        return [self._append_app_name_and_version(p) for p in path.split(os.pathsep)]
+
+    @property
+    def site_config_dir(self) -> str:
+        """
+        :return: config directories shared by users (if `multipath `
+         is enabled and ``XDG_CONFIG_DIRS`` is set and a multi path the response is also a multi path separated by
+         the OS path separator), e.g. ``/etc/xdg/$appname/$version``
+        """
+        # XDG default for $XDG_CONFIG_DIRS only first, if multipath is False
+        dirs = self._site_config_dirs
+        if not self.multipath:
+            return dirs[0]
+        return os.pathsep.join(dirs)
+
+    @property
+    def user_cache_dir(self) -> str:
+        """
+        :return: cache directory tied to the user, e.g. ``~/.cache/$appname/$version`` or
+         ``~/$XDG_CACHE_HOME/$appname/$version``
+        """
+        path = os.environ.get("XDG_CACHE_HOME", "")
+        if not path.strip():
+            path = os.path.expanduser("~/.cache")  # noqa: PTH111
+        return self._append_app_name_and_version(path)
+
+    @property
+    def site_cache_dir(self) -> str:
+        """:return: cache directory shared by users, e.g. ``/var/cache/$appname/$version``"""
+        return self._append_app_name_and_version("/var/cache")
+
+    @property
+    def user_state_dir(self) -> str:
+        """
+        :return: state directory tied to the user, e.g. ``~/.local/state/$appname/$version`` or
+         ``$XDG_STATE_HOME/$appname/$version``
+        """
+        path = os.environ.get("XDG_STATE_HOME", "")
+        if not path.strip():
+            path = os.path.expanduser("~/.local/state")  # noqa: PTH111
+        return self._append_app_name_and_version(path)
+
+    @property
+    def user_log_dir(self) -> str:
+        """:return: log directory tied to the user, same as `user_state_dir` if not opinionated else ``log`` in it"""
+        path = self.user_state_dir
+        if self.opinion:
+            path = os.path.join(path, "log")  # noqa: PTH118
+            self._optionally_create_directory(path)
+        return path
+
+    @property
+    def user_documents_dir(self) -> str:
+        """:return: documents directory tied to the user, e.g. ``~/Documents``"""
+        return _get_user_media_dir("XDG_DOCUMENTS_DIR", "~/Documents")
+
+    @property
+    def user_downloads_dir(self) -> str:
+        """:return: downloads directory tied to the user, e.g. ``~/Downloads``"""
+        return _get_user_media_dir("XDG_DOWNLOAD_DIR", "~/Downloads")
+
+    @property
+    def user_pictures_dir(self) -> str:
+        """:return: pictures directory tied to the user, e.g. ``~/Pictures``"""
+        return _get_user_media_dir("XDG_PICTURES_DIR", "~/Pictures")
+
+    @property
+    def user_videos_dir(self) -> str:
+        """:return: videos directory tied to the user, e.g. ``~/Videos``"""
+        return _get_user_media_dir("XDG_VIDEOS_DIR", "~/Videos")
+
+    @property
+    def user_music_dir(self) -> str:
+        """:return: music directory tied to the user, e.g. ``~/Music``"""
+        return _get_user_media_dir("XDG_MUSIC_DIR", "~/Music")
+
+    @property
+    def user_desktop_dir(self) -> str:
+        """:return: desktop directory tied to the user, e.g. ``~/Desktop``"""
+        return _get_user_media_dir("XDG_DESKTOP_DIR", "~/Desktop")
+
+    @property
+    def user_runtime_dir(self) -> str:
+        """
+        :return: runtime directory tied to the user, e.g. ``/run/user/$(id -u)/$appname/$version`` or
+         ``$XDG_RUNTIME_DIR/$appname/$version``.
+
+         For FreeBSD/OpenBSD/NetBSD, it would return ``/var/run/user/$(id -u)/$appname/$version`` if
+         exists, otherwise ``/tmp/runtime-$(id -u)/$appname/$version``, if``$XDG_RUNTIME_DIR``
+         is not set.
+        """
+        path = os.environ.get("XDG_RUNTIME_DIR", "")
+        if not path.strip():
+            if sys.platform.startswith(("freebsd", "openbsd", "netbsd")):
+                path = f"/var/run/user/{getuid()}"
+                if not Path(path).exists():
+                    path = f"/tmp/runtime-{getuid()}"  # noqa: S108
+            else:
+                path = f"/run/user/{getuid()}"
+        return self._append_app_name_and_version(path)
+
+    @property
+    def site_runtime_dir(self) -> str:
+        """
+        :return: runtime directory shared by users, e.g. ``/run/$appname/$version`` or \
+        ``$XDG_RUNTIME_DIR/$appname/$version``.
+
+        Note that this behaves almost exactly like `user_runtime_dir` if ``$XDG_RUNTIME_DIR`` is set, but will
+        fall back to paths associated to the root user instead of a regular logged-in user if it's not set.
+
+        If you wish to ensure that a logged-in root user path is returned e.g. ``/run/user/0``, use `user_runtime_dir`
+        instead.
+
+        For FreeBSD/OpenBSD/NetBSD, it would return ``/var/run/$appname/$version`` if ``$XDG_RUNTIME_DIR`` is not set.
+        """
+        path = os.environ.get("XDG_RUNTIME_DIR", "")
+        if not path.strip():
+            if sys.platform.startswith(("freebsd", "openbsd", "netbsd")):
+                path = "/var/run"
+            else:
+                path = "/run"
+        return self._append_app_name_and_version(path)
+
+    @property
+    def site_data_path(self) -> Path:
+        """:return: data path shared by users. Only return the first item, even if ``multipath`` is set to ``True``"""
+        return self._first_item_as_path_if_multipath(self.site_data_dir)
+
+    @property
+    def site_config_path(self) -> Path:
+        """:return: config path shared by the users, returns the first item, even if ``multipath`` is set to ``True``"""
+        return self._first_item_as_path_if_multipath(self.site_config_dir)
+
+    @property
+    def site_cache_path(self) -> Path:
+        """:return: cache path shared by users. Only return the first item, even if ``multipath`` is set to ``True``"""
+        return self._first_item_as_path_if_multipath(self.site_cache_dir)
+
+    def iter_config_dirs(self) -> Iterator[str]:
+        """:yield: all user and site configuration directories."""
+        yield self.user_config_dir
+        yield from self._site_config_dirs
+
+    def iter_data_dirs(self) -> Iterator[str]:
+        """:yield: all user and site data directories."""
+        yield self.user_data_dir
+        yield from self._site_data_dirs
+
+
+def _get_user_media_dir(env_var: str, fallback_tilde_path: str) -> str:
+    media_dir = _get_user_dirs_folder(env_var)
+    if media_dir is None:
+        media_dir = os.environ.get(env_var, "").strip()
+        if not media_dir:
+            media_dir = os.path.expanduser(fallback_tilde_path)  # noqa: PTH111
+
+    return media_dir
+
+
+def _get_user_dirs_folder(key: str) -> str | None:
+    """
+    Return directory from user-dirs.dirs config file.
+
+    See https://freedesktop.org/wiki/Software/xdg-user-dirs/.
+
+    """
+    user_dirs_config_path = Path(Unix().user_config_dir) / "user-dirs.dirs"
+    if user_dirs_config_path.exists():
+        parser = ConfigParser()
+
+        with user_dirs_config_path.open() as stream:
+            # Add fake section header, so ConfigParser doesn't complain
+            parser.read_string(f"[top]\n{stream.read()}")
+
+        if key not in parser["top"]:
+            return None
+
+        path = parser["top"][key].strip('"')
+        # Handle relative home paths
+        return path.replace("$HOME", os.path.expanduser("~"))  # noqa: PTH111
+
+    return None
+
+
+__all__ = [
+    "Unix",
+]
diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/platformdirs/version.py b/.venv/lib/python3.12/site-packages/pip/_vendor/platformdirs/version.py
new file mode 100644
index 0000000..611ac61
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/pip/_vendor/platformdirs/version.py
@@ -0,0 +1,21 @@
+# file generated by setuptools-scm
+# don't change, don't track in version control
+
+__all__ = ["__version__", "__version_tuple__", "version", "version_tuple"]
+
+TYPE_CHECKING = False
+if TYPE_CHECKING:
+    from typing import Tuple
+    from typing import Union
+
+    VERSION_TUPLE = Tuple[Union[int, str], ...]
+else:
+    VERSION_TUPLE = object
+
+version: str
+__version__: str
+__version_tuple__: VERSION_TUPLE
+version_tuple: VERSION_TUPLE
+
+__version__ = version = '4.3.8'
+__version_tuple__ = version_tuple = (4, 3, 8)
diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/platformdirs/windows.py b/.venv/lib/python3.12/site-packages/pip/_vendor/platformdirs/windows.py
new file mode 100644
index 0000000..d7bc960
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/pip/_vendor/platformdirs/windows.py
@@ -0,0 +1,272 @@
+"""Windows."""
+
+from __future__ import annotations
+
+import os
+import sys
+from functools import lru_cache
+from typing import TYPE_CHECKING
+
+from .api import PlatformDirsABC
+
+if TYPE_CHECKING:
+    from collections.abc import Callable
+
+
+class Windows(PlatformDirsABC):
+    """
+    `MSDN on where to store app data files `_.
+
+    Makes use of the `appname `, `appauthor
+    `, `version `, `roaming
+    `, `opinion `, `ensure_exists
+    `.
+
+    """
+
+    @property
+    def user_data_dir(self) -> str:
+        """
+        :return: data directory tied to the user, e.g.
+         ``%USERPROFILE%\\AppData\\Local\\$appauthor\\$appname`` (not roaming) or
+         ``%USERPROFILE%\\AppData\\Roaming\\$appauthor\\$appname`` (roaming)
+        """
+        const = "CSIDL_APPDATA" if self.roaming else "CSIDL_LOCAL_APPDATA"
+        path = os.path.normpath(get_win_folder(const))
+        return self._append_parts(path)
+
+    def _append_parts(self, path: str, *, opinion_value: str | None = None) -> str:
+        params = []
+        if self.appname:
+            if self.appauthor is not False:
+                author = self.appauthor or self.appname
+                params.append(author)
+            params.append(self.appname)
+            if opinion_value is not None and self.opinion:
+                params.append(opinion_value)
+            if self.version:
+                params.append(self.version)
+        path = os.path.join(path, *params)  # noqa: PTH118
+        self._optionally_create_directory(path)
+        return path
+
+    @property
+    def site_data_dir(self) -> str:
+        """:return: data directory shared by users, e.g. ``C:\\ProgramData\\$appauthor\\$appname``"""
+        path = os.path.normpath(get_win_folder("CSIDL_COMMON_APPDATA"))
+        return self._append_parts(path)
+
+    @property
+    def user_config_dir(self) -> str:
+        """:return: config directory tied to the user, same as `user_data_dir`"""
+        return self.user_data_dir
+
+    @property
+    def site_config_dir(self) -> str:
+        """:return: config directory shared by the users, same as `site_data_dir`"""
+        return self.site_data_dir
+
+    @property
+    def user_cache_dir(self) -> str:
+        """
+        :return: cache directory tied to the user (if opinionated with ``Cache`` folder within ``$appname``) e.g.
+         ``%USERPROFILE%\\AppData\\Local\\$appauthor\\$appname\\Cache\\$version``
+        """
+        path = os.path.normpath(get_win_folder("CSIDL_LOCAL_APPDATA"))
+        return self._append_parts(path, opinion_value="Cache")
+
+    @property
+    def site_cache_dir(self) -> str:
+        """:return: cache directory shared by users, e.g. ``C:\\ProgramData\\$appauthor\\$appname\\Cache\\$version``"""
+        path = os.path.normpath(get_win_folder("CSIDL_COMMON_APPDATA"))
+        return self._append_parts(path, opinion_value="Cache")
+
+    @property
+    def user_state_dir(self) -> str:
+        """:return: state directory tied to the user, same as `user_data_dir`"""
+        return self.user_data_dir
+
+    @property
+    def user_log_dir(self) -> str:
+        """:return: log directory tied to the user, same as `user_data_dir` if not opinionated else ``Logs`` in it"""
+        path = self.user_data_dir
+        if self.opinion:
+            path = os.path.join(path, "Logs")  # noqa: PTH118
+            self._optionally_create_directory(path)
+        return path
+
+    @property
+    def user_documents_dir(self) -> str:
+        """:return: documents directory tied to the user e.g. ``%USERPROFILE%\\Documents``"""
+        return os.path.normpath(get_win_folder("CSIDL_PERSONAL"))
+
+    @property
+    def user_downloads_dir(self) -> str:
+        """:return: downloads directory tied to the user e.g. ``%USERPROFILE%\\Downloads``"""
+        return os.path.normpath(get_win_folder("CSIDL_DOWNLOADS"))
+
+    @property
+    def user_pictures_dir(self) -> str:
+        """:return: pictures directory tied to the user e.g. ``%USERPROFILE%\\Pictures``"""
+        return os.path.normpath(get_win_folder("CSIDL_MYPICTURES"))
+
+    @property
+    def user_videos_dir(self) -> str:
+        """:return: videos directory tied to the user e.g. ``%USERPROFILE%\\Videos``"""
+        return os.path.normpath(get_win_folder("CSIDL_MYVIDEO"))
+
+    @property
+    def user_music_dir(self) -> str:
+        """:return: music directory tied to the user e.g. ``%USERPROFILE%\\Music``"""
+        return os.path.normpath(get_win_folder("CSIDL_MYMUSIC"))
+
+    @property
+    def user_desktop_dir(self) -> str:
+        """:return: desktop directory tied to the user, e.g. ``%USERPROFILE%\\Desktop``"""
+        return os.path.normpath(get_win_folder("CSIDL_DESKTOPDIRECTORY"))
+
+    @property
+    def user_runtime_dir(self) -> str:
+        """
+        :return: runtime directory tied to the user, e.g.
+         ``%USERPROFILE%\\AppData\\Local\\Temp\\$appauthor\\$appname``
+        """
+        path = os.path.normpath(os.path.join(get_win_folder("CSIDL_LOCAL_APPDATA"), "Temp"))  # noqa: PTH118
+        return self._append_parts(path)
+
+    @property
+    def site_runtime_dir(self) -> str:
+        """:return: runtime directory shared by users, same as `user_runtime_dir`"""
+        return self.user_runtime_dir
+
+
+def get_win_folder_from_env_vars(csidl_name: str) -> str:
+    """Get folder from environment variables."""
+    result = get_win_folder_if_csidl_name_not_env_var(csidl_name)
+    if result is not None:
+        return result
+
+    env_var_name = {
+        "CSIDL_APPDATA": "APPDATA",
+        "CSIDL_COMMON_APPDATA": "ALLUSERSPROFILE",
+        "CSIDL_LOCAL_APPDATA": "LOCALAPPDATA",
+    }.get(csidl_name)
+    if env_var_name is None:
+        msg = f"Unknown CSIDL name: {csidl_name}"
+        raise ValueError(msg)
+    result = os.environ.get(env_var_name)
+    if result is None:
+        msg = f"Unset environment variable: {env_var_name}"
+        raise ValueError(msg)
+    return result
+
+
+def get_win_folder_if_csidl_name_not_env_var(csidl_name: str) -> str | None:
+    """Get a folder for a CSIDL name that does not exist as an environment variable."""
+    if csidl_name == "CSIDL_PERSONAL":
+        return os.path.join(os.path.normpath(os.environ["USERPROFILE"]), "Documents")  # noqa: PTH118
+
+    if csidl_name == "CSIDL_DOWNLOADS":
+        return os.path.join(os.path.normpath(os.environ["USERPROFILE"]), "Downloads")  # noqa: PTH118
+
+    if csidl_name == "CSIDL_MYPICTURES":
+        return os.path.join(os.path.normpath(os.environ["USERPROFILE"]), "Pictures")  # noqa: PTH118
+
+    if csidl_name == "CSIDL_MYVIDEO":
+        return os.path.join(os.path.normpath(os.environ["USERPROFILE"]), "Videos")  # noqa: PTH118
+
+    if csidl_name == "CSIDL_MYMUSIC":
+        return os.path.join(os.path.normpath(os.environ["USERPROFILE"]), "Music")  # noqa: PTH118
+    return None
+
+
+def get_win_folder_from_registry(csidl_name: str) -> str:
+    """
+    Get folder from the registry.
+
+    This is a fallback technique at best. I'm not sure if using the registry for these guarantees us the correct answer
+    for all CSIDL_* names.
+
+    """
+    shell_folder_name = {
+        "CSIDL_APPDATA": "AppData",
+        "CSIDL_COMMON_APPDATA": "Common AppData",
+        "CSIDL_LOCAL_APPDATA": "Local AppData",
+        "CSIDL_PERSONAL": "Personal",
+        "CSIDL_DOWNLOADS": "{374DE290-123F-4565-9164-39C4925E467B}",
+        "CSIDL_MYPICTURES": "My Pictures",
+        "CSIDL_MYVIDEO": "My Video",
+        "CSIDL_MYMUSIC": "My Music",
+    }.get(csidl_name)
+    if shell_folder_name is None:
+        msg = f"Unknown CSIDL name: {csidl_name}"
+        raise ValueError(msg)
+    if sys.platform != "win32":  # only needed for mypy type checker to know that this code runs only on Windows
+        raise NotImplementedError
+    import winreg  # noqa: PLC0415
+
+    key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders")
+    directory, _ = winreg.QueryValueEx(key, shell_folder_name)
+    return str(directory)
+
+
+def get_win_folder_via_ctypes(csidl_name: str) -> str:
+    """Get folder with ctypes."""
+    # There is no 'CSIDL_DOWNLOADS'.
+    # Use 'CSIDL_PROFILE' (40) and append the default folder 'Downloads' instead.
+    # https://learn.microsoft.com/en-us/windows/win32/shell/knownfolderid
+
+    import ctypes  # noqa: PLC0415
+
+    csidl_const = {
+        "CSIDL_APPDATA": 26,
+        "CSIDL_COMMON_APPDATA": 35,
+        "CSIDL_LOCAL_APPDATA": 28,
+        "CSIDL_PERSONAL": 5,
+        "CSIDL_MYPICTURES": 39,
+        "CSIDL_MYVIDEO": 14,
+        "CSIDL_MYMUSIC": 13,
+        "CSIDL_DOWNLOADS": 40,
+        "CSIDL_DESKTOPDIRECTORY": 16,
+    }.get(csidl_name)
+    if csidl_const is None:
+        msg = f"Unknown CSIDL name: {csidl_name}"
+        raise ValueError(msg)
+
+    buf = ctypes.create_unicode_buffer(1024)
+    windll = getattr(ctypes, "windll")  # noqa: B009 # using getattr to avoid false positive with mypy type checker
+    windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf)
+
+    # Downgrade to short path name if it has high-bit chars.
+    if any(ord(c) > 255 for c in buf):  # noqa: PLR2004
+        buf2 = ctypes.create_unicode_buffer(1024)
+        if windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024):
+            buf = buf2
+
+    if csidl_name == "CSIDL_DOWNLOADS":
+        return os.path.join(buf.value, "Downloads")  # noqa: PTH118
+
+    return buf.value
+
+
+def _pick_get_win_folder() -> Callable[[str], str]:
+    try:
+        import ctypes  # noqa: PLC0415
+    except ImportError:
+        pass
+    else:
+        if hasattr(ctypes, "windll"):
+            return get_win_folder_via_ctypes
+    try:
+        import winreg  # noqa: PLC0415, F401
+    except ImportError:
+        return get_win_folder_from_env_vars
+    else:
+        return get_win_folder_from_registry
+
+
+get_win_folder = lru_cache(maxsize=None)(_pick_get_win_folder())
+
+__all__ = [
+    "Windows",
+]
diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/__init__.py b/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/__init__.py
new file mode 100644
index 0000000..cb229c9
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/__init__.py
@@ -0,0 +1,82 @@
+"""
+    Pygments
+    ~~~~~~~~
+
+    Pygments is a syntax highlighting package written in Python.
+
+    It is a generic syntax highlighter for general use in all kinds of software
+    such as forum systems, wikis or other applications that need to prettify
+    source code. Highlights are:
+
+    * a wide range of common languages and markup formats is supported
+    * special attention is paid to details, increasing quality by a fair amount
+    * support for new languages and formats are added easily
+    * a number of output formats, presently HTML, LaTeX, RTF, SVG, all image
+      formats that PIL supports, and ANSI sequences
+    * it is usable as a command-line tool and as a library
+    * ... and it highlights even Brainfuck!
+
+    The `Pygments master branch`_ is installable with ``easy_install Pygments==dev``.
+
+    .. _Pygments master branch:
+       https://github.com/pygments/pygments/archive/master.zip#egg=Pygments-dev
+
+    :copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
+    :license: BSD, see LICENSE for details.
+"""
+from io import StringIO, BytesIO
+
+__version__ = '2.19.2'
+__docformat__ = 'restructuredtext'
+
+__all__ = ['lex', 'format', 'highlight']
+
+
+def lex(code, lexer):
+    """
+    Lex `code` with the `lexer` (must be a `Lexer` instance)
+    and return an iterable of tokens. Currently, this only calls
+    `lexer.get_tokens()`.
+    """
+    try:
+        return lexer.get_tokens(code)
+    except TypeError:
+        # Heuristic to catch a common mistake.
+        from pip._vendor.pygments.lexer import RegexLexer
+        if isinstance(lexer, type) and issubclass(lexer, RegexLexer):
+            raise TypeError('lex() argument must be a lexer instance, '
+                            'not a class')
+        raise
+
+
+def format(tokens, formatter, outfile=None):  # pylint: disable=redefined-builtin
+    """
+    Format ``tokens`` (an iterable of tokens) with the formatter ``formatter``
+    (a `Formatter` instance).
+
+    If ``outfile`` is given and a valid file object (an object with a
+    ``write`` method), the result will be written to it, otherwise it
+    is returned as a string.
+    """
+    try:
+        if not outfile:
+            realoutfile = getattr(formatter, 'encoding', None) and BytesIO() or StringIO()
+            formatter.format(tokens, realoutfile)
+            return realoutfile.getvalue()
+        else:
+            formatter.format(tokens, outfile)
+    except TypeError:
+        # Heuristic to catch a common mistake.
+        from pip._vendor.pygments.formatter import Formatter
+        if isinstance(formatter, type) and issubclass(formatter, Formatter):
+            raise TypeError('format() argument must be a formatter instance, '
+                            'not a class')
+        raise
+
+
+def highlight(code, lexer, formatter, outfile=None):
+    """
+    This is the most high-level highlighting function. It combines `lex` and
+    `format` in one function.
+    """
+    return format(lex(code, lexer), formatter, outfile)
diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/__main__.py b/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/__main__.py
new file mode 100644
index 0000000..a2e612f
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/__main__.py
@@ -0,0 +1,17 @@
+"""
+    pygments.__main__
+    ~~~~~~~~~~~~~~~~~
+
+    Main entry point for ``python -m pygments``.
+
+    :copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
+    :license: BSD, see LICENSE for details.
+"""
+
+import sys
+from pip._vendor.pygments.cmdline import main
+
+try:
+    sys.exit(main(sys.argv))
+except KeyboardInterrupt:
+    sys.exit(1)
diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 0000000..b157ef6
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/__pycache__/__init__.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/__pycache__/__main__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/__pycache__/__main__.cpython-312.pyc
new file mode 100644
index 0000000..77a127b
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/__pycache__/__main__.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/__pycache__/console.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/__pycache__/console.cpython-312.pyc
new file mode 100644
index 0000000..258656f
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/__pycache__/console.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/__pycache__/filter.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/__pycache__/filter.cpython-312.pyc
new file mode 100644
index 0000000..8b671d5
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/__pycache__/filter.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/__pycache__/formatter.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/__pycache__/formatter.cpython-312.pyc
new file mode 100644
index 0000000..d755d60
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/__pycache__/formatter.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/__pycache__/lexer.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/__pycache__/lexer.cpython-312.pyc
new file mode 100644
index 0000000..394567d
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/__pycache__/lexer.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/__pycache__/modeline.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/__pycache__/modeline.cpython-312.pyc
new file mode 100644
index 0000000..d4d9a9b
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/__pycache__/modeline.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/__pycache__/plugin.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/__pycache__/plugin.cpython-312.pyc
new file mode 100644
index 0000000..cad30cd
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/__pycache__/plugin.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/__pycache__/regexopt.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/__pycache__/regexopt.cpython-312.pyc
new file mode 100644
index 0000000..d577ca6
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/__pycache__/regexopt.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/__pycache__/scanner.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/__pycache__/scanner.cpython-312.pyc
new file mode 100644
index 0000000..83ba19f
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/__pycache__/scanner.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/__pycache__/sphinxext.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/__pycache__/sphinxext.cpython-312.pyc
new file mode 100644
index 0000000..ac619d9
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/__pycache__/sphinxext.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/__pycache__/style.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/__pycache__/style.cpython-312.pyc
new file mode 100644
index 0000000..dab1530
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/__pycache__/style.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/__pycache__/token.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/__pycache__/token.cpython-312.pyc
new file mode 100644
index 0000000..888e772
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/__pycache__/token.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/__pycache__/unistring.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/__pycache__/unistring.cpython-312.pyc
new file mode 100644
index 0000000..55d38dc
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/__pycache__/unistring.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/__pycache__/util.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/__pycache__/util.cpython-312.pyc
new file mode 100644
index 0000000..63f781e
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/__pycache__/util.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/console.py b/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/console.py
new file mode 100644
index 0000000..ee1ac27
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/console.py
@@ -0,0 +1,70 @@
+"""
+    pygments.console
+    ~~~~~~~~~~~~~~~~
+
+    Format colored console output.
+
+    :copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
+    :license: BSD, see LICENSE for details.
+"""
+
+esc = "\x1b["
+
+codes = {}
+codes[""] = ""
+codes["reset"] = esc + "39;49;00m"
+
+codes["bold"] = esc + "01m"
+codes["faint"] = esc + "02m"
+codes["standout"] = esc + "03m"
+codes["underline"] = esc + "04m"
+codes["blink"] = esc + "05m"
+codes["overline"] = esc + "06m"
+
+dark_colors = ["black", "red", "green", "yellow", "blue",
+               "magenta", "cyan", "gray"]
+light_colors = ["brightblack", "brightred", "brightgreen", "brightyellow", "brightblue",
+                "brightmagenta", "brightcyan", "white"]
+
+x = 30
+for dark, light in zip(dark_colors, light_colors):
+    codes[dark] = esc + "%im" % x
+    codes[light] = esc + "%im" % (60 + x)
+    x += 1
+
+del dark, light, x
+
+codes["white"] = codes["bold"]
+
+
+def reset_color():
+    return codes["reset"]
+
+
+def colorize(color_key, text):
+    return codes[color_key] + text + codes["reset"]
+
+
+def ansiformat(attr, text):
+    """
+    Format ``text`` with a color and/or some attributes::
+
+        color       normal color
+        *color*     bold color
+        _color_     underlined color
+        +color+     blinking color
+    """
+    result = []
+    if attr[:1] == attr[-1:] == '+':
+        result.append(codes['blink'])
+        attr = attr[1:-1]
+    if attr[:1] == attr[-1:] == '*':
+        result.append(codes['bold'])
+        attr = attr[1:-1]
+    if attr[:1] == attr[-1:] == '_':
+        result.append(codes['underline'])
+        attr = attr[1:-1]
+    result.append(codes[attr])
+    result.append(text)
+    result.append(codes['reset'])
+    return ''.join(result)
diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/filter.py b/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/filter.py
new file mode 100644
index 0000000..5efff43
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/filter.py
@@ -0,0 +1,70 @@
+"""
+    pygments.filter
+    ~~~~~~~~~~~~~~~
+
+    Module that implements the default filter.
+
+    :copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
+    :license: BSD, see LICENSE for details.
+"""
+
+
+def apply_filters(stream, filters, lexer=None):
+    """
+    Use this method to apply an iterable of filters to
+    a stream. If lexer is given it's forwarded to the
+    filter, otherwise the filter receives `None`.
+    """
+    def _apply(filter_, stream):
+        yield from filter_.filter(lexer, stream)
+    for filter_ in filters:
+        stream = _apply(filter_, stream)
+    return stream
+
+
+def simplefilter(f):
+    """
+    Decorator that converts a function into a filter::
+
+        @simplefilter
+        def lowercase(self, lexer, stream, options):
+            for ttype, value in stream:
+                yield ttype, value.lower()
+    """
+    return type(f.__name__, (FunctionFilter,), {
+        '__module__': getattr(f, '__module__'),
+        '__doc__': f.__doc__,
+        'function': f,
+    })
+
+
+class Filter:
+    """
+    Default filter. Subclass this class or use the `simplefilter`
+    decorator to create own filters.
+    """
+
+    def __init__(self, **options):
+        self.options = options
+
+    def filter(self, lexer, stream):
+        raise NotImplementedError()
+
+
+class FunctionFilter(Filter):
+    """
+    Abstract class used by `simplefilter` to create simple
+    function filters on the fly. The `simplefilter` decorator
+    automatically creates subclasses of this class for
+    functions passed to it.
+    """
+    function = None
+
+    def __init__(self, **options):
+        if not hasattr(self, 'function'):
+            raise TypeError(f'{self.__class__.__name__!r} used without bound function')
+        Filter.__init__(self, **options)
+
+    def filter(self, lexer, stream):
+        # pylint: disable=not-callable
+        yield from self.function(lexer, stream, self.options)
diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/filters/__init__.py b/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/filters/__init__.py
new file mode 100644
index 0000000..97380c9
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/filters/__init__.py
@@ -0,0 +1,940 @@
+"""
+    pygments.filters
+    ~~~~~~~~~~~~~~~~
+
+    Module containing filter lookup functions and default
+    filters.
+
+    :copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
+    :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pip._vendor.pygments.token import String, Comment, Keyword, Name, Error, Whitespace, \
+    string_to_tokentype
+from pip._vendor.pygments.filter import Filter
+from pip._vendor.pygments.util import get_list_opt, get_int_opt, get_bool_opt, \
+    get_choice_opt, ClassNotFound, OptionError
+from pip._vendor.pygments.plugin import find_plugin_filters
+
+
+def find_filter_class(filtername):
+    """Lookup a filter by name. Return None if not found."""
+    if filtername in FILTERS:
+        return FILTERS[filtername]
+    for name, cls in find_plugin_filters():
+        if name == filtername:
+            return cls
+    return None
+
+
+def get_filter_by_name(filtername, **options):
+    """Return an instantiated filter.
+
+    Options are passed to the filter initializer if wanted.
+    Raise a ClassNotFound if not found.
+    """
+    cls = find_filter_class(filtername)
+    if cls:
+        return cls(**options)
+    else:
+        raise ClassNotFound(f'filter {filtername!r} not found')
+
+
+def get_all_filters():
+    """Return a generator of all filter names."""
+    yield from FILTERS
+    for name, _ in find_plugin_filters():
+        yield name
+
+
+def _replace_special(ttype, value, regex, specialttype,
+                     replacefunc=lambda x: x):
+    last = 0
+    for match in regex.finditer(value):
+        start, end = match.start(), match.end()
+        if start != last:
+            yield ttype, value[last:start]
+        yield specialttype, replacefunc(value[start:end])
+        last = end
+    if last != len(value):
+        yield ttype, value[last:]
+
+
+class CodeTagFilter(Filter):
+    """Highlight special code tags in comments and docstrings.
+
+    Options accepted:
+
+    `codetags` : list of strings
+       A list of strings that are flagged as code tags.  The default is to
+       highlight ``XXX``, ``TODO``, ``FIXME``, ``BUG`` and ``NOTE``.
+
+    .. versionchanged:: 2.13
+       Now recognizes ``FIXME`` by default.
+    """
+
+    def __init__(self, **options):
+        Filter.__init__(self, **options)
+        tags = get_list_opt(options, 'codetags',
+                            ['XXX', 'TODO', 'FIXME', 'BUG', 'NOTE'])
+        self.tag_re = re.compile(r'\b({})\b'.format('|'.join([
+            re.escape(tag) for tag in tags if tag
+        ])))
+
+    def filter(self, lexer, stream):
+        regex = self.tag_re
+        for ttype, value in stream:
+            if ttype in String.Doc or \
+               ttype in Comment and \
+               ttype not in Comment.Preproc:
+                yield from _replace_special(ttype, value, regex, Comment.Special)
+            else:
+                yield ttype, value
+
+
+class SymbolFilter(Filter):
+    """Convert mathematical symbols such as \\ in Isabelle
+    or \\longrightarrow in LaTeX into Unicode characters.
+
+    This is mostly useful for HTML or console output when you want to
+    approximate the source rendering you'd see in an IDE.
+
+    Options accepted:
+
+    `lang` : string
+       The symbol language. Must be one of ``'isabelle'`` or
+       ``'latex'``.  The default is ``'isabelle'``.
+    """
+
+    latex_symbols = {
+        '\\alpha'                : '\U000003b1',
+        '\\beta'                 : '\U000003b2',
+        '\\gamma'                : '\U000003b3',
+        '\\delta'                : '\U000003b4',
+        '\\varepsilon'           : '\U000003b5',
+        '\\zeta'                 : '\U000003b6',
+        '\\eta'                  : '\U000003b7',
+        '\\vartheta'             : '\U000003b8',
+        '\\iota'                 : '\U000003b9',
+        '\\kappa'                : '\U000003ba',
+        '\\lambda'               : '\U000003bb',
+        '\\mu'                   : '\U000003bc',
+        '\\nu'                   : '\U000003bd',
+        '\\xi'                   : '\U000003be',
+        '\\pi'                   : '\U000003c0',
+        '\\varrho'               : '\U000003c1',
+        '\\sigma'                : '\U000003c3',
+        '\\tau'                  : '\U000003c4',
+        '\\upsilon'              : '\U000003c5',
+        '\\varphi'               : '\U000003c6',
+        '\\chi'                  : '\U000003c7',
+        '\\psi'                  : '\U000003c8',
+        '\\omega'                : '\U000003c9',
+        '\\Gamma'                : '\U00000393',
+        '\\Delta'                : '\U00000394',
+        '\\Theta'                : '\U00000398',
+        '\\Lambda'               : '\U0000039b',
+        '\\Xi'                   : '\U0000039e',
+        '\\Pi'                   : '\U000003a0',
+        '\\Sigma'                : '\U000003a3',
+        '\\Upsilon'              : '\U000003a5',
+        '\\Phi'                  : '\U000003a6',
+        '\\Psi'                  : '\U000003a8',
+        '\\Omega'                : '\U000003a9',
+        '\\leftarrow'            : '\U00002190',
+        '\\longleftarrow'        : '\U000027f5',
+        '\\rightarrow'           : '\U00002192',
+        '\\longrightarrow'       : '\U000027f6',
+        '\\Leftarrow'            : '\U000021d0',
+        '\\Longleftarrow'        : '\U000027f8',
+        '\\Rightarrow'           : '\U000021d2',
+        '\\Longrightarrow'       : '\U000027f9',
+        '\\leftrightarrow'       : '\U00002194',
+        '\\longleftrightarrow'   : '\U000027f7',
+        '\\Leftrightarrow'       : '\U000021d4',
+        '\\Longleftrightarrow'   : '\U000027fa',
+        '\\mapsto'               : '\U000021a6',
+        '\\longmapsto'           : '\U000027fc',
+        '\\relbar'               : '\U00002500',
+        '\\Relbar'               : '\U00002550',
+        '\\hookleftarrow'        : '\U000021a9',
+        '\\hookrightarrow'       : '\U000021aa',
+        '\\leftharpoondown'      : '\U000021bd',
+        '\\rightharpoondown'     : '\U000021c1',
+        '\\leftharpoonup'        : '\U000021bc',
+        '\\rightharpoonup'       : '\U000021c0',
+        '\\rightleftharpoons'    : '\U000021cc',
+        '\\leadsto'              : '\U0000219d',
+        '\\downharpoonleft'      : '\U000021c3',
+        '\\downharpoonright'     : '\U000021c2',
+        '\\upharpoonleft'        : '\U000021bf',
+        '\\upharpoonright'       : '\U000021be',
+        '\\restriction'          : '\U000021be',
+        '\\uparrow'              : '\U00002191',
+        '\\Uparrow'              : '\U000021d1',
+        '\\downarrow'            : '\U00002193',
+        '\\Downarrow'            : '\U000021d3',
+        '\\updownarrow'          : '\U00002195',
+        '\\Updownarrow'          : '\U000021d5',
+        '\\langle'               : '\U000027e8',
+        '\\rangle'               : '\U000027e9',
+        '\\lceil'                : '\U00002308',
+        '\\rceil'                : '\U00002309',
+        '\\lfloor'               : '\U0000230a',
+        '\\rfloor'               : '\U0000230b',
+        '\\flqq'                 : '\U000000ab',
+        '\\frqq'                 : '\U000000bb',
+        '\\bot'                  : '\U000022a5',
+        '\\top'                  : '\U000022a4',
+        '\\wedge'                : '\U00002227',
+        '\\bigwedge'             : '\U000022c0',
+        '\\vee'                  : '\U00002228',
+        '\\bigvee'               : '\U000022c1',
+        '\\forall'               : '\U00002200',
+        '\\exists'               : '\U00002203',
+        '\\nexists'              : '\U00002204',
+        '\\neg'                  : '\U000000ac',
+        '\\Box'                  : '\U000025a1',
+        '\\Diamond'              : '\U000025c7',
+        '\\vdash'                : '\U000022a2',
+        '\\models'               : '\U000022a8',
+        '\\dashv'                : '\U000022a3',
+        '\\surd'                 : '\U0000221a',
+        '\\le'                   : '\U00002264',
+        '\\ge'                   : '\U00002265',
+        '\\ll'                   : '\U0000226a',
+        '\\gg'                   : '\U0000226b',
+        '\\lesssim'              : '\U00002272',
+        '\\gtrsim'               : '\U00002273',
+        '\\lessapprox'           : '\U00002a85',
+        '\\gtrapprox'            : '\U00002a86',
+        '\\in'                   : '\U00002208',
+        '\\notin'                : '\U00002209',
+        '\\subset'               : '\U00002282',
+        '\\supset'               : '\U00002283',
+        '\\subseteq'             : '\U00002286',
+        '\\supseteq'             : '\U00002287',
+        '\\sqsubset'             : '\U0000228f',
+        '\\sqsupset'             : '\U00002290',
+        '\\sqsubseteq'           : '\U00002291',
+        '\\sqsupseteq'           : '\U00002292',
+        '\\cap'                  : '\U00002229',
+        '\\bigcap'               : '\U000022c2',
+        '\\cup'                  : '\U0000222a',
+        '\\bigcup'               : '\U000022c3',
+        '\\sqcup'                : '\U00002294',
+        '\\bigsqcup'             : '\U00002a06',
+        '\\sqcap'                : '\U00002293',
+        '\\Bigsqcap'             : '\U00002a05',
+        '\\setminus'             : '\U00002216',
+        '\\propto'               : '\U0000221d',
+        '\\uplus'                : '\U0000228e',
+        '\\bigplus'              : '\U00002a04',
+        '\\sim'                  : '\U0000223c',
+        '\\doteq'                : '\U00002250',
+        '\\simeq'                : '\U00002243',
+        '\\approx'               : '\U00002248',
+        '\\asymp'                : '\U0000224d',
+        '\\cong'                 : '\U00002245',
+        '\\equiv'                : '\U00002261',
+        '\\Join'                 : '\U000022c8',
+        '\\bowtie'               : '\U00002a1d',
+        '\\prec'                 : '\U0000227a',
+        '\\succ'                 : '\U0000227b',
+        '\\preceq'               : '\U0000227c',
+        '\\succeq'               : '\U0000227d',
+        '\\parallel'             : '\U00002225',
+        '\\mid'                  : '\U000000a6',
+        '\\pm'                   : '\U000000b1',
+        '\\mp'                   : '\U00002213',
+        '\\times'                : '\U000000d7',
+        '\\div'                  : '\U000000f7',
+        '\\cdot'                 : '\U000022c5',
+        '\\star'                 : '\U000022c6',
+        '\\circ'                 : '\U00002218',
+        '\\dagger'               : '\U00002020',
+        '\\ddagger'              : '\U00002021',
+        '\\lhd'                  : '\U000022b2',
+        '\\rhd'                  : '\U000022b3',
+        '\\unlhd'                : '\U000022b4',
+        '\\unrhd'                : '\U000022b5',
+        '\\triangleleft'         : '\U000025c3',
+        '\\triangleright'        : '\U000025b9',
+        '\\triangle'             : '\U000025b3',
+        '\\triangleq'            : '\U0000225c',
+        '\\oplus'                : '\U00002295',
+        '\\bigoplus'             : '\U00002a01',
+        '\\otimes'               : '\U00002297',
+        '\\bigotimes'            : '\U00002a02',
+        '\\odot'                 : '\U00002299',
+        '\\bigodot'              : '\U00002a00',
+        '\\ominus'               : '\U00002296',
+        '\\oslash'               : '\U00002298',
+        '\\dots'                 : '\U00002026',
+        '\\cdots'                : '\U000022ef',
+        '\\sum'                  : '\U00002211',
+        '\\prod'                 : '\U0000220f',
+        '\\coprod'               : '\U00002210',
+        '\\infty'                : '\U0000221e',
+        '\\int'                  : '\U0000222b',
+        '\\oint'                 : '\U0000222e',
+        '\\clubsuit'             : '\U00002663',
+        '\\diamondsuit'          : '\U00002662',
+        '\\heartsuit'            : '\U00002661',
+        '\\spadesuit'            : '\U00002660',
+        '\\aleph'                : '\U00002135',
+        '\\emptyset'             : '\U00002205',
+        '\\nabla'                : '\U00002207',
+        '\\partial'              : '\U00002202',
+        '\\flat'                 : '\U0000266d',
+        '\\natural'              : '\U0000266e',
+        '\\sharp'                : '\U0000266f',
+        '\\angle'                : '\U00002220',
+        '\\copyright'            : '\U000000a9',
+        '\\textregistered'       : '\U000000ae',
+        '\\textonequarter'       : '\U000000bc',
+        '\\textonehalf'          : '\U000000bd',
+        '\\textthreequarters'    : '\U000000be',
+        '\\textordfeminine'      : '\U000000aa',
+        '\\textordmasculine'     : '\U000000ba',
+        '\\euro'                 : '\U000020ac',
+        '\\pounds'               : '\U000000a3',
+        '\\yen'                  : '\U000000a5',
+        '\\textcent'             : '\U000000a2',
+        '\\textcurrency'         : '\U000000a4',
+        '\\textdegree'           : '\U000000b0',
+    }
+
+    isabelle_symbols = {
+        '\\'                 : '\U0001d7ec',
+        '\\'                  : '\U0001d7ed',
+        '\\'                  : '\U0001d7ee',
+        '\\'                : '\U0001d7ef',
+        '\\'                 : '\U0001d7f0',
+        '\\'                 : '\U0001d7f1',
+        '\\'                  : '\U0001d7f2',
+        '\\'                : '\U0001d7f3',
+        '\\'                : '\U0001d7f4',
+        '\\'                 : '\U0001d7f5',
+        '\\'                    : '\U0001d49c',
+        '\\'                    : '\U0000212c',
+        '\\'                    : '\U0001d49e',
+        '\\'                    : '\U0001d49f',
+        '\\'                    : '\U00002130',
+        '\\'                    : '\U00002131',
+        '\\'                    : '\U0001d4a2',
+        '\\'                    : '\U0000210b',
+        '\\'                    : '\U00002110',
+        '\\'                    : '\U0001d4a5',
+        '\\'                    : '\U0001d4a6',
+        '\\'                    : '\U00002112',
+        '\\'                    : '\U00002133',
+        '\\'                    : '\U0001d4a9',
+        '\\'                    : '\U0001d4aa',
+        '\\

' : '\U0001d5c9', + '\\' : '\U0001d5ca', + '\\' : '\U0001d5cb', + '\\' : '\U0001d5cc', + '\\' : '\U0001d5cd', + '\\' : '\U0001d5ce', + '\\' : '\U0001d5cf', + '\\' : '\U0001d5d0', + '\\' : '\U0001d5d1', + '\\' : '\U0001d5d2', + '\\' : '\U0001d5d3', + '\\' : '\U0001d504', + '\\' : '\U0001d505', + '\\' : '\U0000212d', + '\\

' : '\U0001d507', + '\\' : '\U0001d508', + '\\' : '\U0001d509', + '\\' : '\U0001d50a', + '\\' : '\U0000210c', + '\\' : '\U00002111', + '\\' : '\U0001d50d', + '\\' : '\U0001d50e', + '\\' : '\U0001d50f', + '\\' : '\U0001d510', + '\\' : '\U0001d511', + '\\' : '\U0001d512', + '\\' : '\U0001d513', + '\\' : '\U0001d514', + '\\' : '\U0000211c', + '\\' : '\U0001d516', + '\\' : '\U0001d517', + '\\' : '\U0001d518', + '\\' : '\U0001d519', + '\\' : '\U0001d51a', + '\\' : '\U0001d51b', + '\\' : '\U0001d51c', + '\\' : '\U00002128', + '\\' : '\U0001d51e', + '\\' : '\U0001d51f', + '\\' : '\U0001d520', + '\\
' : '\U0001d521', + '\\' : '\U0001d522', + '\\' : '\U0001d523', + '\\' : '\U0001d524', + '\\' : '\U0001d525', + '\\' : '\U0001d526', + '\\' : '\U0001d527', + '\\' : '\U0001d528', + '\\' : '\U0001d529', + '\\' : '\U0001d52a', + '\\' : '\U0001d52b', + '\\' : '\U0001d52c', + '\\' : '\U0001d52d', + '\\' : '\U0001d52e', + '\\' : '\U0001d52f', + '\\' : '\U0001d530', + '\\' : '\U0001d531', + '\\' : '\U0001d532', + '\\' : '\U0001d533', + '\\' : '\U0001d534', + '\\' : '\U0001d535', + '\\' : '\U0001d536', + '\\' : '\U0001d537', + '\\' : '\U000003b1', + '\\' : '\U000003b2', + '\\' : '\U000003b3', + '\\' : '\U000003b4', + '\\' : '\U000003b5', + '\\' : '\U000003b6', + '\\' : '\U000003b7', + '\\' : '\U000003b8', + '\\' : '\U000003b9', + '\\' : '\U000003ba', + '\\' : '\U000003bb', + '\\' : '\U000003bc', + '\\' : '\U000003bd', + '\\' : '\U000003be', + '\\' : '\U000003c0', + '\\' : '\U000003c1', + '\\' : '\U000003c3', + '\\' : '\U000003c4', + '\\' : '\U000003c5', + '\\' : '\U000003c6', + '\\' : '\U000003c7', + '\\' : '\U000003c8', + '\\' : '\U000003c9', + '\\' : '\U00000393', + '\\' : '\U00000394', + '\\' : '\U00000398', + '\\' : '\U0000039b', + '\\' : '\U0000039e', + '\\' : '\U000003a0', + '\\' : '\U000003a3', + '\\' : '\U000003a5', + '\\' : '\U000003a6', + '\\' : '\U000003a8', + '\\' : '\U000003a9', + '\\' : '\U0001d539', + '\\' : '\U00002102', + '\\' : '\U00002115', + '\\' : '\U0000211a', + '\\' : '\U0000211d', + '\\' : '\U00002124', + '\\' : '\U00002190', + '\\' : '\U000027f5', + '\\' : '\U00002192', + '\\' : '\U000027f6', + '\\' : '\U000021d0', + '\\' : '\U000027f8', + '\\' : '\U000021d2', + '\\' : '\U000027f9', + '\\' : '\U00002194', + '\\' : '\U000027f7', + '\\' : '\U000021d4', + '\\' : '\U000027fa', + '\\' : '\U000021a6', + '\\' : '\U000027fc', + '\\' : '\U00002500', + '\\' : '\U00002550', + '\\' : '\U000021a9', + '\\' : '\U000021aa', + '\\' : '\U000021bd', + '\\' : '\U000021c1', + '\\' : '\U000021bc', + '\\' : '\U000021c0', + '\\' : '\U000021cc', + '\\' : '\U0000219d', + '\\' : '\U000021c3', + '\\' : '\U000021c2', + '\\' : '\U000021bf', + '\\' : '\U000021be', + '\\' : '\U000021be', + '\\' : '\U00002237', + '\\' : '\U00002191', + '\\' : '\U000021d1', + '\\' : '\U00002193', + '\\' : '\U000021d3', + '\\' : '\U00002195', + '\\' : '\U000021d5', + '\\' : '\U000027e8', + '\\' : '\U000027e9', + '\\' : '\U00002308', + '\\' : '\U00002309', + '\\' : '\U0000230a', + '\\' : '\U0000230b', + '\\' : '\U00002987', + '\\' : '\U00002988', + '\\' : '\U000027e6', + '\\' : '\U000027e7', + '\\' : '\U00002983', + '\\' : '\U00002984', + '\\' : '\U000000ab', + '\\' : '\U000000bb', + '\\' : '\U000022a5', + '\\' : '\U000022a4', + '\\' : '\U00002227', + '\\' : '\U000022c0', + '\\' : '\U00002228', + '\\' : '\U000022c1', + '\\' : '\U00002200', + '\\' : '\U00002203', + '\\' : '\U00002204', + '\\' : '\U000000ac', + '\\' : '\U000025a1', + '\\' : '\U000025c7', + '\\' : '\U000022a2', + '\\' : '\U000022a8', + '\\' : '\U000022a9', + '\\' : '\U000022ab', + '\\' : '\U000022a3', + '\\' : '\U0000221a', + '\\' : '\U00002264', + '\\' : '\U00002265', + '\\' : '\U0000226a', + '\\' : '\U0000226b', + '\\' : '\U00002272', + '\\' : '\U00002273', + '\\' : '\U00002a85', + '\\' : '\U00002a86', + '\\' : '\U00002208', + '\\' : '\U00002209', + '\\' : '\U00002282', + '\\' : '\U00002283', + '\\' : '\U00002286', + '\\' : '\U00002287', + '\\' : '\U0000228f', + '\\' : '\U00002290', + '\\' : '\U00002291', + '\\' : '\U00002292', + '\\' : '\U00002229', + '\\' : '\U000022c2', + '\\' : '\U0000222a', + '\\' : '\U000022c3', + '\\' : '\U00002294', + '\\' : '\U00002a06', + '\\' : '\U00002293', + '\\' : '\U00002a05', + '\\' : '\U00002216', + '\\' : '\U0000221d', + '\\' : '\U0000228e', + '\\' : '\U00002a04', + '\\' : '\U00002260', + '\\' : '\U0000223c', + '\\' : '\U00002250', + '\\' : '\U00002243', + '\\' : '\U00002248', + '\\' : '\U0000224d', + '\\' : '\U00002245', + '\\' : '\U00002323', + '\\' : '\U00002261', + '\\' : '\U00002322', + '\\' : '\U000022c8', + '\\' : '\U00002a1d', + '\\' : '\U0000227a', + '\\' : '\U0000227b', + '\\' : '\U0000227c', + '\\' : '\U0000227d', + '\\' : '\U00002225', + '\\' : '\U000000a6', + '\\' : '\U000000b1', + '\\' : '\U00002213', + '\\' : '\U000000d7', + '\\
' : '\U000000f7', + '\\' : '\U000022c5', + '\\' : '\U000022c6', + '\\' : '\U00002219', + '\\' : '\U00002218', + '\\' : '\U00002020', + '\\' : '\U00002021', + '\\' : '\U000022b2', + '\\' : '\U000022b3', + '\\' : '\U000022b4', + '\\' : '\U000022b5', + '\\' : '\U000025c3', + '\\' : '\U000025b9', + '\\' : '\U000025b3', + '\\' : '\U0000225c', + '\\' : '\U00002295', + '\\' : '\U00002a01', + '\\' : '\U00002297', + '\\' : '\U00002a02', + '\\' : '\U00002299', + '\\' : '\U00002a00', + '\\' : '\U00002296', + '\\' : '\U00002298', + '\\' : '\U00002026', + '\\' : '\U000022ef', + '\\' : '\U00002211', + '\\' : '\U0000220f', + '\\' : '\U00002210', + '\\' : '\U0000221e', + '\\' : '\U0000222b', + '\\' : '\U0000222e', + '\\' : '\U00002663', + '\\' : '\U00002662', + '\\' : '\U00002661', + '\\' : '\U00002660', + '\\' : '\U00002135', + '\\' : '\U00002205', + '\\' : '\U00002207', + '\\' : '\U00002202', + '\\' : '\U0000266d', + '\\' : '\U0000266e', + '\\' : '\U0000266f', + '\\' : '\U00002220', + '\\' : '\U000000a9', + '\\' : '\U000000ae', + '\\' : '\U000000ad', + '\\' : '\U000000af', + '\\' : '\U000000bc', + '\\' : '\U000000bd', + '\\' : '\U000000be', + '\\' : '\U000000aa', + '\\' : '\U000000ba', + '\\
' : '\U000000a7', + '\\' : '\U000000b6', + '\\' : '\U000000a1', + '\\' : '\U000000bf', + '\\' : '\U000020ac', + '\\' : '\U000000a3', + '\\' : '\U000000a5', + '\\' : '\U000000a2', + '\\' : '\U000000a4', + '\\' : '\U000000b0', + '\\' : '\U00002a3f', + '\\' : '\U00002127', + '\\' : '\U000025ca', + '\\' : '\U00002118', + '\\' : '\U00002240', + '\\' : '\U000022c4', + '\\' : '\U000000b4', + '\\' : '\U00000131', + '\\' : '\U000000a8', + '\\' : '\U000000b8', + '\\' : '\U000002dd', + '\\' : '\U000003f5', + '\\' : '\U000023ce', + '\\' : '\U00002039', + '\\' : '\U0000203a', + '\\' : '\U00002302', + '\\<^sub>' : '\U000021e9', + '\\<^sup>' : '\U000021e7', + '\\<^bold>' : '\U00002759', + '\\<^bsub>' : '\U000021d8', + '\\<^esub>' : '\U000021d9', + '\\<^bsup>' : '\U000021d7', + '\\<^esup>' : '\U000021d6', + } + + lang_map = {'isabelle' : isabelle_symbols, 'latex' : latex_symbols} + + def __init__(self, **options): + Filter.__init__(self, **options) + lang = get_choice_opt(options, 'lang', + ['isabelle', 'latex'], 'isabelle') + self.symbols = self.lang_map[lang] + + def filter(self, lexer, stream): + for ttype, value in stream: + if value in self.symbols: + yield ttype, self.symbols[value] + else: + yield ttype, value + + +class KeywordCaseFilter(Filter): + """Convert keywords to lowercase or uppercase or capitalize them, which + means first letter uppercase, rest lowercase. + + This can be useful e.g. if you highlight Pascal code and want to adapt the + code to your styleguide. + + Options accepted: + + `case` : string + The casing to convert keywords to. Must be one of ``'lower'``, + ``'upper'`` or ``'capitalize'``. The default is ``'lower'``. + """ + + def __init__(self, **options): + Filter.__init__(self, **options) + case = get_choice_opt(options, 'case', + ['lower', 'upper', 'capitalize'], 'lower') + self.convert = getattr(str, case) + + def filter(self, lexer, stream): + for ttype, value in stream: + if ttype in Keyword: + yield ttype, self.convert(value) + else: + yield ttype, value + + +class NameHighlightFilter(Filter): + """Highlight a normal Name (and Name.*) token with a different token type. + + Example:: + + filter = NameHighlightFilter( + names=['foo', 'bar', 'baz'], + tokentype=Name.Function, + ) + + This would highlight the names "foo", "bar" and "baz" + as functions. `Name.Function` is the default token type. + + Options accepted: + + `names` : list of strings + A list of names that should be given the different token type. + There is no default. + `tokentype` : TokenType or string + A token type or a string containing a token type name that is + used for highlighting the strings in `names`. The default is + `Name.Function`. + """ + + def __init__(self, **options): + Filter.__init__(self, **options) + self.names = set(get_list_opt(options, 'names', [])) + tokentype = options.get('tokentype') + if tokentype: + self.tokentype = string_to_tokentype(tokentype) + else: + self.tokentype = Name.Function + + def filter(self, lexer, stream): + for ttype, value in stream: + if ttype in Name and value in self.names: + yield self.tokentype, value + else: + yield ttype, value + + +class ErrorToken(Exception): + pass + + +class RaiseOnErrorTokenFilter(Filter): + """Raise an exception when the lexer generates an error token. + + Options accepted: + + `excclass` : Exception class + The exception class to raise. + The default is `pygments.filters.ErrorToken`. + + .. versionadded:: 0.8 + """ + + def __init__(self, **options): + Filter.__init__(self, **options) + self.exception = options.get('excclass', ErrorToken) + try: + # issubclass() will raise TypeError if first argument is not a class + if not issubclass(self.exception, Exception): + raise TypeError + except TypeError: + raise OptionError('excclass option is not an exception class') + + def filter(self, lexer, stream): + for ttype, value in stream: + if ttype is Error: + raise self.exception(value) + yield ttype, value + + +class VisibleWhitespaceFilter(Filter): + """Convert tabs, newlines and/or spaces to visible characters. + + Options accepted: + + `spaces` : string or bool + If this is a one-character string, spaces will be replaces by this string. + If it is another true value, spaces will be replaced by ``·`` (unicode + MIDDLE DOT). If it is a false value, spaces will not be replaced. The + default is ``False``. + `tabs` : string or bool + The same as for `spaces`, but the default replacement character is ``»`` + (unicode RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK). The default value + is ``False``. Note: this will not work if the `tabsize` option for the + lexer is nonzero, as tabs will already have been expanded then. + `tabsize` : int + If tabs are to be replaced by this filter (see the `tabs` option), this + is the total number of characters that a tab should be expanded to. + The default is ``8``. + `newlines` : string or bool + The same as for `spaces`, but the default replacement character is ``¶`` + (unicode PILCROW SIGN). The default value is ``False``. + `wstokentype` : bool + If true, give whitespace the special `Whitespace` token type. This allows + styling the visible whitespace differently (e.g. greyed out), but it can + disrupt background colors. The default is ``True``. + + .. versionadded:: 0.8 + """ + + def __init__(self, **options): + Filter.__init__(self, **options) + for name, default in [('spaces', '·'), + ('tabs', '»'), + ('newlines', '¶')]: + opt = options.get(name, False) + if isinstance(opt, str) and len(opt) == 1: + setattr(self, name, opt) + else: + setattr(self, name, (opt and default or '')) + tabsize = get_int_opt(options, 'tabsize', 8) + if self.tabs: + self.tabs += ' ' * (tabsize - 1) + if self.newlines: + self.newlines += '\n' + self.wstt = get_bool_opt(options, 'wstokentype', True) + + def filter(self, lexer, stream): + if self.wstt: + spaces = self.spaces or ' ' + tabs = self.tabs or '\t' + newlines = self.newlines or '\n' + regex = re.compile(r'\s') + + def replacefunc(wschar): + if wschar == ' ': + return spaces + elif wschar == '\t': + return tabs + elif wschar == '\n': + return newlines + return wschar + + for ttype, value in stream: + yield from _replace_special(ttype, value, regex, Whitespace, + replacefunc) + else: + spaces, tabs, newlines = self.spaces, self.tabs, self.newlines + # simpler processing + for ttype, value in stream: + if spaces: + value = value.replace(' ', spaces) + if tabs: + value = value.replace('\t', tabs) + if newlines: + value = value.replace('\n', newlines) + yield ttype, value + + +class GobbleFilter(Filter): + """Gobbles source code lines (eats initial characters). + + This filter drops the first ``n`` characters off every line of code. This + may be useful when the source code fed to the lexer is indented by a fixed + amount of space that isn't desired in the output. + + Options accepted: + + `n` : int + The number of characters to gobble. + + .. versionadded:: 1.2 + """ + def __init__(self, **options): + Filter.__init__(self, **options) + self.n = get_int_opt(options, 'n', 0) + + def gobble(self, value, left): + if left < len(value): + return value[left:], 0 + else: + return '', left - len(value) + + def filter(self, lexer, stream): + n = self.n + left = n # How many characters left to gobble. + for ttype, value in stream: + # Remove ``left`` tokens from first line, ``n`` from all others. + parts = value.split('\n') + (parts[0], left) = self.gobble(parts[0], left) + for i in range(1, len(parts)): + (parts[i], left) = self.gobble(parts[i], n) + value = '\n'.join(parts) + + if value != '': + yield ttype, value + + +class TokenMergeFilter(Filter): + """Merges consecutive tokens with the same token type in the output + stream of a lexer. + + .. versionadded:: 1.2 + """ + def __init__(self, **options): + Filter.__init__(self, **options) + + def filter(self, lexer, stream): + current_type = None + current_value = None + for ttype, value in stream: + if ttype is current_type: + current_value += value + else: + if current_type is not None: + yield current_type, current_value + current_type = ttype + current_value = value + if current_type is not None: + yield current_type, current_value + + +FILTERS = { + 'codetagify': CodeTagFilter, + 'keywordcase': KeywordCaseFilter, + 'highlight': NameHighlightFilter, + 'raiseonerror': RaiseOnErrorTokenFilter, + 'whitespace': VisibleWhitespaceFilter, + 'gobble': GobbleFilter, + 'tokenmerge': TokenMergeFilter, + 'symbols': SymbolFilter, +} diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/filters/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/filters/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..9a4adb4 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/filters/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/formatter.py b/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/formatter.py new file mode 100644 index 0000000..0041e41 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/formatter.py @@ -0,0 +1,129 @@ +""" + pygments.formatter + ~~~~~~~~~~~~~~~~~~ + + Base formatter class. + + :copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import codecs + +from pip._vendor.pygments.util import get_bool_opt +from pip._vendor.pygments.styles import get_style_by_name + +__all__ = ['Formatter'] + + +def _lookup_style(style): + if isinstance(style, str): + return get_style_by_name(style) + return style + + +class Formatter: + """ + Converts a token stream to text. + + Formatters should have attributes to help selecting them. These + are similar to the corresponding :class:`~pygments.lexer.Lexer` + attributes. + + .. autoattribute:: name + :no-value: + + .. autoattribute:: aliases + :no-value: + + .. autoattribute:: filenames + :no-value: + + You can pass options as keyword arguments to the constructor. + All formatters accept these basic options: + + ``style`` + The style to use, can be a string or a Style subclass + (default: "default"). Not used by e.g. the + TerminalFormatter. + ``full`` + Tells the formatter to output a "full" document, i.e. + a complete self-contained document. This doesn't have + any effect for some formatters (default: false). + ``title`` + If ``full`` is true, the title that should be used to + caption the document (default: ''). + ``encoding`` + If given, must be an encoding name. This will be used to + convert the Unicode token strings to byte strings in the + output. If it is "" or None, Unicode strings will be written + to the output file, which most file-like objects do not + support (default: None). + ``outencoding`` + Overrides ``encoding`` if given. + + """ + + #: Full name for the formatter, in human-readable form. + name = None + + #: A list of short, unique identifiers that can be used to lookup + #: the formatter from a list, e.g. using :func:`.get_formatter_by_name()`. + aliases = [] + + #: A list of fnmatch patterns that match filenames for which this + #: formatter can produce output. The patterns in this list should be unique + #: among all formatters. + filenames = [] + + #: If True, this formatter outputs Unicode strings when no encoding + #: option is given. + unicodeoutput = True + + def __init__(self, **options): + """ + As with lexers, this constructor takes arbitrary optional arguments, + and if you override it, you should first process your own options, then + call the base class implementation. + """ + self.style = _lookup_style(options.get('style', 'default')) + self.full = get_bool_opt(options, 'full', False) + self.title = options.get('title', '') + self.encoding = options.get('encoding', None) or None + if self.encoding in ('guess', 'chardet'): + # can happen for e.g. pygmentize -O encoding=guess + self.encoding = 'utf-8' + self.encoding = options.get('outencoding') or self.encoding + self.options = options + + def get_style_defs(self, arg=''): + """ + This method must return statements or declarations suitable to define + the current style for subsequent highlighted text (e.g. CSS classes + in the `HTMLFormatter`). + + The optional argument `arg` can be used to modify the generation and + is formatter dependent (it is standardized because it can be given on + the command line). + + This method is called by the ``-S`` :doc:`command-line option `, + the `arg` is then given by the ``-a`` option. + """ + return '' + + def format(self, tokensource, outfile): + """ + This method must format the tokens from the `tokensource` iterable and + write the formatted version to the file object `outfile`. + + Formatter options can control how exactly the tokens are converted. + """ + if self.encoding: + # wrap the outfile in a StreamWriter + outfile = codecs.lookup(self.encoding)[3](outfile) + return self.format_unencoded(tokensource, outfile) + + # Allow writing Formatter[str] or Formatter[bytes]. That's equivalent to + # Formatter. This helps when using third-party type stubs from typeshed. + def __class_getitem__(cls, name): + return cls diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/formatters/__init__.py b/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/formatters/__init__.py new file mode 100644 index 0000000..014f2ee --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/formatters/__init__.py @@ -0,0 +1,157 @@ +""" + pygments.formatters + ~~~~~~~~~~~~~~~~~~~ + + Pygments formatters. + + :copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re +import sys +import types +import fnmatch +from os.path import basename + +from pip._vendor.pygments.formatters._mapping import FORMATTERS +from pip._vendor.pygments.plugin import find_plugin_formatters +from pip._vendor.pygments.util import ClassNotFound + +__all__ = ['get_formatter_by_name', 'get_formatter_for_filename', + 'get_all_formatters', 'load_formatter_from_file'] + list(FORMATTERS) + +_formatter_cache = {} # classes by name +_pattern_cache = {} + + +def _fn_matches(fn, glob): + """Return whether the supplied file name fn matches pattern filename.""" + if glob not in _pattern_cache: + pattern = _pattern_cache[glob] = re.compile(fnmatch.translate(glob)) + return pattern.match(fn) + return _pattern_cache[glob].match(fn) + + +def _load_formatters(module_name): + """Load a formatter (and all others in the module too).""" + mod = __import__(module_name, None, None, ['__all__']) + for formatter_name in mod.__all__: + cls = getattr(mod, formatter_name) + _formatter_cache[cls.name] = cls + + +def get_all_formatters(): + """Return a generator for all formatter classes.""" + # NB: this returns formatter classes, not info like get_all_lexers(). + for info in FORMATTERS.values(): + if info[1] not in _formatter_cache: + _load_formatters(info[0]) + yield _formatter_cache[info[1]] + for _, formatter in find_plugin_formatters(): + yield formatter + + +def find_formatter_class(alias): + """Lookup a formatter by alias. + + Returns None if not found. + """ + for module_name, name, aliases, _, _ in FORMATTERS.values(): + if alias in aliases: + if name not in _formatter_cache: + _load_formatters(module_name) + return _formatter_cache[name] + for _, cls in find_plugin_formatters(): + if alias in cls.aliases: + return cls + + +def get_formatter_by_name(_alias, **options): + """ + Return an instance of a :class:`.Formatter` subclass that has `alias` in its + aliases list. The formatter is given the `options` at its instantiation. + + Will raise :exc:`pygments.util.ClassNotFound` if no formatter with that + alias is found. + """ + cls = find_formatter_class(_alias) + if cls is None: + raise ClassNotFound(f"no formatter found for name {_alias!r}") + return cls(**options) + + +def load_formatter_from_file(filename, formattername="CustomFormatter", **options): + """ + Return a `Formatter` subclass instance loaded from the provided file, relative + to the current directory. + + The file is expected to contain a Formatter class named ``formattername`` + (by default, CustomFormatter). Users should be very careful with the input, because + this method is equivalent to running ``eval()`` on the input file. The formatter is + given the `options` at its instantiation. + + :exc:`pygments.util.ClassNotFound` is raised if there are any errors loading + the formatter. + + .. versionadded:: 2.2 + """ + try: + # This empty dict will contain the namespace for the exec'd file + custom_namespace = {} + with open(filename, 'rb') as f: + exec(f.read(), custom_namespace) + # Retrieve the class `formattername` from that namespace + if formattername not in custom_namespace: + raise ClassNotFound(f'no valid {formattername} class found in {filename}') + formatter_class = custom_namespace[formattername] + # And finally instantiate it with the options + return formatter_class(**options) + except OSError as err: + raise ClassNotFound(f'cannot read {filename}: {err}') + except ClassNotFound: + raise + except Exception as err: + raise ClassNotFound(f'error when loading custom formatter: {err}') + + +def get_formatter_for_filename(fn, **options): + """ + Return a :class:`.Formatter` subclass instance that has a filename pattern + matching `fn`. The formatter is given the `options` at its instantiation. + + Will raise :exc:`pygments.util.ClassNotFound` if no formatter for that filename + is found. + """ + fn = basename(fn) + for modname, name, _, filenames, _ in FORMATTERS.values(): + for filename in filenames: + if _fn_matches(fn, filename): + if name not in _formatter_cache: + _load_formatters(modname) + return _formatter_cache[name](**options) + for _name, cls in find_plugin_formatters(): + for filename in cls.filenames: + if _fn_matches(fn, filename): + return cls(**options) + raise ClassNotFound(f"no formatter found for file name {fn!r}") + + +class _automodule(types.ModuleType): + """Automatically import formatters.""" + + def __getattr__(self, name): + info = FORMATTERS.get(name) + if info: + _load_formatters(info[0]) + cls = _formatter_cache[info[1]] + setattr(self, name, cls) + return cls + raise AttributeError(name) + + +oldmod = sys.modules[__name__] +newmod = _automodule(__name__) +newmod.__dict__.update(oldmod.__dict__) +sys.modules[__name__] = newmod +del newmod.newmod, newmod.oldmod, newmod.sys, newmod.types diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/formatters/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/formatters/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..b43fc3a Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/formatters/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/formatters/__pycache__/_mapping.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/formatters/__pycache__/_mapping.cpython-312.pyc new file mode 100644 index 0000000..71a589f Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/formatters/__pycache__/_mapping.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/formatters/_mapping.py b/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/formatters/_mapping.py new file mode 100644 index 0000000..72ca840 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/formatters/_mapping.py @@ -0,0 +1,23 @@ +# Automatically generated by scripts/gen_mapfiles.py. +# DO NOT EDIT BY HAND; run `tox -e mapfiles` instead. + +FORMATTERS = { + 'BBCodeFormatter': ('pygments.formatters.bbcode', 'BBCode', ('bbcode', 'bb'), (), 'Format tokens with BBcodes. These formatting codes are used by many bulletin boards, so you can highlight your sourcecode with pygments before posting it there.'), + 'BmpImageFormatter': ('pygments.formatters.img', 'img_bmp', ('bmp', 'bitmap'), ('*.bmp',), 'Create a bitmap image from source code. This uses the Python Imaging Library to generate a pixmap from the source code.'), + 'GifImageFormatter': ('pygments.formatters.img', 'img_gif', ('gif',), ('*.gif',), 'Create a GIF image from source code. This uses the Python Imaging Library to generate a pixmap from the source code.'), + 'GroffFormatter': ('pygments.formatters.groff', 'groff', ('groff', 'troff', 'roff'), (), 'Format tokens with groff escapes to change their color and font style.'), + 'HtmlFormatter': ('pygments.formatters.html', 'HTML', ('html',), ('*.html', '*.htm'), "Format tokens as HTML 4 ```` tags. By default, the content is enclosed in a ``
`` tag, itself wrapped in a ``
`` tag (but see the `nowrap` option). The ``
``'s CSS class can be set by the `cssclass` option."), + 'IRCFormatter': ('pygments.formatters.irc', 'IRC', ('irc', 'IRC'), (), 'Format tokens with IRC color sequences'), + 'ImageFormatter': ('pygments.formatters.img', 'img', ('img', 'IMG', 'png'), ('*.png',), 'Create a PNG image from source code. This uses the Python Imaging Library to generate a pixmap from the source code.'), + 'JpgImageFormatter': ('pygments.formatters.img', 'img_jpg', ('jpg', 'jpeg'), ('*.jpg',), 'Create a JPEG image from source code. This uses the Python Imaging Library to generate a pixmap from the source code.'), + 'LatexFormatter': ('pygments.formatters.latex', 'LaTeX', ('latex', 'tex'), ('*.tex',), 'Format tokens as LaTeX code. This needs the `fancyvrb` and `color` standard packages.'), + 'NullFormatter': ('pygments.formatters.other', 'Text only', ('text', 'null'), ('*.txt',), 'Output the text unchanged without any formatting.'), + 'PangoMarkupFormatter': ('pygments.formatters.pangomarkup', 'Pango Markup', ('pango', 'pangomarkup'), (), 'Format tokens as Pango Markup code. It can then be rendered to an SVG.'), + 'RawTokenFormatter': ('pygments.formatters.other', 'Raw tokens', ('raw', 'tokens'), ('*.raw',), 'Format tokens as a raw representation for storing token streams.'), + 'RtfFormatter': ('pygments.formatters.rtf', 'RTF', ('rtf',), ('*.rtf',), 'Format tokens as RTF markup. This formatter automatically outputs full RTF documents with color information and other useful stuff. Perfect for Copy and Paste into Microsoft(R) Word(R) documents.'), + 'SvgFormatter': ('pygments.formatters.svg', 'SVG', ('svg',), ('*.svg',), 'Format tokens as an SVG graphics file. This formatter is still experimental. Each line of code is a ```` element with explicit ``x`` and ``y`` coordinates containing ```` elements with the individual token styles.'), + 'Terminal256Formatter': ('pygments.formatters.terminal256', 'Terminal256', ('terminal256', 'console256', '256'), (), 'Format tokens with ANSI color sequences, for output in a 256-color terminal or console. Like in `TerminalFormatter` color sequences are terminated at newlines, so that paging the output works correctly.'), + 'TerminalFormatter': ('pygments.formatters.terminal', 'Terminal', ('terminal', 'console'), (), 'Format tokens with ANSI color sequences, for output in a text console. Color sequences are terminated at newlines, so that paging the output works correctly.'), + 'TerminalTrueColorFormatter': ('pygments.formatters.terminal256', 'TerminalTrueColor', ('terminal16m', 'console16m', '16m'), (), 'Format tokens with ANSI color sequences, for output in a true-color terminal or console. Like in `TerminalFormatter` color sequences are terminated at newlines, so that paging the output works correctly.'), + 'TestcaseFormatter': ('pygments.formatters.other', 'Testcase', ('testcase',), (), 'Format tokens as appropriate for a new testcase.'), +} diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/lexer.py b/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/lexer.py new file mode 100644 index 0000000..c05aa81 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/lexer.py @@ -0,0 +1,963 @@ +""" + pygments.lexer + ~~~~~~~~~~~~~~ + + Base lexer classes. + + :copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re +import sys +import time + +from pip._vendor.pygments.filter import apply_filters, Filter +from pip._vendor.pygments.filters import get_filter_by_name +from pip._vendor.pygments.token import Error, Text, Other, Whitespace, _TokenType +from pip._vendor.pygments.util import get_bool_opt, get_int_opt, get_list_opt, \ + make_analysator, Future, guess_decode +from pip._vendor.pygments.regexopt import regex_opt + +__all__ = ['Lexer', 'RegexLexer', 'ExtendedRegexLexer', 'DelegatingLexer', + 'LexerContext', 'include', 'inherit', 'bygroups', 'using', 'this', + 'default', 'words', 'line_re'] + +line_re = re.compile('.*?\n') + +_encoding_map = [(b'\xef\xbb\xbf', 'utf-8'), + (b'\xff\xfe\0\0', 'utf-32'), + (b'\0\0\xfe\xff', 'utf-32be'), + (b'\xff\xfe', 'utf-16'), + (b'\xfe\xff', 'utf-16be')] + +_default_analyse = staticmethod(lambda x: 0.0) + + +class LexerMeta(type): + """ + This metaclass automagically converts ``analyse_text`` methods into + static methods which always return float values. + """ + + def __new__(mcs, name, bases, d): + if 'analyse_text' in d: + d['analyse_text'] = make_analysator(d['analyse_text']) + return type.__new__(mcs, name, bases, d) + + +class Lexer(metaclass=LexerMeta): + """ + Lexer for a specific language. + + See also :doc:`lexerdevelopment`, a high-level guide to writing + lexers. + + Lexer classes have attributes used for choosing the most appropriate + lexer based on various criteria. + + .. autoattribute:: name + :no-value: + .. autoattribute:: aliases + :no-value: + .. autoattribute:: filenames + :no-value: + .. autoattribute:: alias_filenames + .. autoattribute:: mimetypes + :no-value: + .. autoattribute:: priority + + Lexers included in Pygments should have two additional attributes: + + .. autoattribute:: url + :no-value: + .. autoattribute:: version_added + :no-value: + + Lexers included in Pygments may have additional attributes: + + .. autoattribute:: _example + :no-value: + + You can pass options to the constructor. The basic options recognized + by all lexers and processed by the base `Lexer` class are: + + ``stripnl`` + Strip leading and trailing newlines from the input (default: True). + ``stripall`` + Strip all leading and trailing whitespace from the input + (default: False). + ``ensurenl`` + Make sure that the input ends with a newline (default: True). This + is required for some lexers that consume input linewise. + + .. versionadded:: 1.3 + + ``tabsize`` + If given and greater than 0, expand tabs in the input (default: 0). + ``encoding`` + If given, must be an encoding name. This encoding will be used to + convert the input string to Unicode, if it is not already a Unicode + string (default: ``'guess'``, which uses a simple UTF-8 / Locale / + Latin1 detection. Can also be ``'chardet'`` to use the chardet + library, if it is installed. + ``inencoding`` + Overrides the ``encoding`` if given. + """ + + #: Full name of the lexer, in human-readable form + name = None + + #: A list of short, unique identifiers that can be used to look + #: up the lexer from a list, e.g., using `get_lexer_by_name()`. + aliases = [] + + #: A list of `fnmatch` patterns that match filenames which contain + #: content for this lexer. The patterns in this list should be unique among + #: all lexers. + filenames = [] + + #: A list of `fnmatch` patterns that match filenames which may or may not + #: contain content for this lexer. This list is used by the + #: :func:`.guess_lexer_for_filename()` function, to determine which lexers + #: are then included in guessing the correct one. That means that + #: e.g. every lexer for HTML and a template language should include + #: ``\*.html`` in this list. + alias_filenames = [] + + #: A list of MIME types for content that can be lexed with this lexer. + mimetypes = [] + + #: Priority, should multiple lexers match and no content is provided + priority = 0 + + #: URL of the language specification/definition. Used in the Pygments + #: documentation. Set to an empty string to disable. + url = None + + #: Version of Pygments in which the lexer was added. + version_added = None + + #: Example file name. Relative to the ``tests/examplefiles`` directory. + #: This is used by the documentation generator to show an example. + _example = None + + def __init__(self, **options): + """ + This constructor takes arbitrary options as keyword arguments. + Every subclass must first process its own options and then call + the `Lexer` constructor, since it processes the basic + options like `stripnl`. + + An example looks like this: + + .. sourcecode:: python + + def __init__(self, **options): + self.compress = options.get('compress', '') + Lexer.__init__(self, **options) + + As these options must all be specifiable as strings (due to the + command line usage), there are various utility functions + available to help with that, see `Utilities`_. + """ + self.options = options + self.stripnl = get_bool_opt(options, 'stripnl', True) + self.stripall = get_bool_opt(options, 'stripall', False) + self.ensurenl = get_bool_opt(options, 'ensurenl', True) + self.tabsize = get_int_opt(options, 'tabsize', 0) + self.encoding = options.get('encoding', 'guess') + self.encoding = options.get('inencoding') or self.encoding + self.filters = [] + for filter_ in get_list_opt(options, 'filters', ()): + self.add_filter(filter_) + + def __repr__(self): + if self.options: + return f'' + else: + return f'' + + def add_filter(self, filter_, **options): + """ + Add a new stream filter to this lexer. + """ + if not isinstance(filter_, Filter): + filter_ = get_filter_by_name(filter_, **options) + self.filters.append(filter_) + + def analyse_text(text): + """ + A static method which is called for lexer guessing. + + It should analyse the text and return a float in the range + from ``0.0`` to ``1.0``. If it returns ``0.0``, the lexer + will not be selected as the most probable one, if it returns + ``1.0``, it will be selected immediately. This is used by + `guess_lexer`. + + The `LexerMeta` metaclass automatically wraps this function so + that it works like a static method (no ``self`` or ``cls`` + parameter) and the return value is automatically converted to + `float`. If the return value is an object that is boolean `False` + it's the same as if the return values was ``0.0``. + """ + + def _preprocess_lexer_input(self, text): + """Apply preprocessing such as decoding the input, removing BOM and normalizing newlines.""" + + if not isinstance(text, str): + if self.encoding == 'guess': + text, _ = guess_decode(text) + elif self.encoding == 'chardet': + try: + # pip vendoring note: this code is not reachable by pip, + # removed import of chardet to make it clear. + raise ImportError('chardet is not vendored by pip') + except ImportError as e: + raise ImportError('To enable chardet encoding guessing, ' + 'please install the chardet library ' + 'from http://chardet.feedparser.org/') from e + # check for BOM first + decoded = None + for bom, encoding in _encoding_map: + if text.startswith(bom): + decoded = text[len(bom):].decode(encoding, 'replace') + break + # no BOM found, so use chardet + if decoded is None: + enc = chardet.detect(text[:1024]) # Guess using first 1KB + decoded = text.decode(enc.get('encoding') or 'utf-8', + 'replace') + text = decoded + else: + text = text.decode(self.encoding) + if text.startswith('\ufeff'): + text = text[len('\ufeff'):] + else: + if text.startswith('\ufeff'): + text = text[len('\ufeff'):] + + # text now *is* a unicode string + text = text.replace('\r\n', '\n') + text = text.replace('\r', '\n') + if self.stripall: + text = text.strip() + elif self.stripnl: + text = text.strip('\n') + if self.tabsize > 0: + text = text.expandtabs(self.tabsize) + if self.ensurenl and not text.endswith('\n'): + text += '\n' + + return text + + def get_tokens(self, text, unfiltered=False): + """ + This method is the basic interface of a lexer. It is called by + the `highlight()` function. It must process the text and return an + iterable of ``(tokentype, value)`` pairs from `text`. + + Normally, you don't need to override this method. The default + implementation processes the options recognized by all lexers + (`stripnl`, `stripall` and so on), and then yields all tokens + from `get_tokens_unprocessed()`, with the ``index`` dropped. + + If `unfiltered` is set to `True`, the filtering mechanism is + bypassed even if filters are defined. + """ + text = self._preprocess_lexer_input(text) + + def streamer(): + for _, t, v in self.get_tokens_unprocessed(text): + yield t, v + stream = streamer() + if not unfiltered: + stream = apply_filters(stream, self.filters, self) + return stream + + def get_tokens_unprocessed(self, text): + """ + This method should process the text and return an iterable of + ``(index, tokentype, value)`` tuples where ``index`` is the starting + position of the token within the input text. + + It must be overridden by subclasses. It is recommended to + implement it as a generator to maximize effectiveness. + """ + raise NotImplementedError + + +class DelegatingLexer(Lexer): + """ + This lexer takes two lexer as arguments. A root lexer and + a language lexer. First everything is scanned using the language + lexer, afterwards all ``Other`` tokens are lexed using the root + lexer. + + The lexers from the ``template`` lexer package use this base lexer. + """ + + def __init__(self, _root_lexer, _language_lexer, _needle=Other, **options): + self.root_lexer = _root_lexer(**options) + self.language_lexer = _language_lexer(**options) + self.needle = _needle + Lexer.__init__(self, **options) + + def get_tokens_unprocessed(self, text): + buffered = '' + insertions = [] + lng_buffer = [] + for i, t, v in self.language_lexer.get_tokens_unprocessed(text): + if t is self.needle: + if lng_buffer: + insertions.append((len(buffered), lng_buffer)) + lng_buffer = [] + buffered += v + else: + lng_buffer.append((i, t, v)) + if lng_buffer: + insertions.append((len(buffered), lng_buffer)) + return do_insertions(insertions, + self.root_lexer.get_tokens_unprocessed(buffered)) + + +# ------------------------------------------------------------------------------ +# RegexLexer and ExtendedRegexLexer +# + + +class include(str): # pylint: disable=invalid-name + """ + Indicates that a state should include rules from another state. + """ + pass + + +class _inherit: + """ + Indicates the a state should inherit from its superclass. + """ + def __repr__(self): + return 'inherit' + +inherit = _inherit() # pylint: disable=invalid-name + + +class combined(tuple): # pylint: disable=invalid-name + """ + Indicates a state combined from multiple states. + """ + + def __new__(cls, *args): + return tuple.__new__(cls, args) + + def __init__(self, *args): + # tuple.__init__ doesn't do anything + pass + + +class _PseudoMatch: + """ + A pseudo match object constructed from a string. + """ + + def __init__(self, start, text): + self._text = text + self._start = start + + def start(self, arg=None): + return self._start + + def end(self, arg=None): + return self._start + len(self._text) + + def group(self, arg=None): + if arg: + raise IndexError('No such group') + return self._text + + def groups(self): + return (self._text,) + + def groupdict(self): + return {} + + +def bygroups(*args): + """ + Callback that yields multiple actions for each group in the match. + """ + def callback(lexer, match, ctx=None): + for i, action in enumerate(args): + if action is None: + continue + elif type(action) is _TokenType: + data = match.group(i + 1) + if data: + yield match.start(i + 1), action, data + else: + data = match.group(i + 1) + if data is not None: + if ctx: + ctx.pos = match.start(i + 1) + for item in action(lexer, + _PseudoMatch(match.start(i + 1), data), ctx): + if item: + yield item + if ctx: + ctx.pos = match.end() + return callback + + +class _This: + """ + Special singleton used for indicating the caller class. + Used by ``using``. + """ + +this = _This() + + +def using(_other, **kwargs): + """ + Callback that processes the match with a different lexer. + + The keyword arguments are forwarded to the lexer, except `state` which + is handled separately. + + `state` specifies the state that the new lexer will start in, and can + be an enumerable such as ('root', 'inline', 'string') or a simple + string which is assumed to be on top of the root state. + + Note: For that to work, `_other` must not be an `ExtendedRegexLexer`. + """ + gt_kwargs = {} + if 'state' in kwargs: + s = kwargs.pop('state') + if isinstance(s, (list, tuple)): + gt_kwargs['stack'] = s + else: + gt_kwargs['stack'] = ('root', s) + + if _other is this: + def callback(lexer, match, ctx=None): + # if keyword arguments are given the callback + # function has to create a new lexer instance + if kwargs: + # XXX: cache that somehow + kwargs.update(lexer.options) + lx = lexer.__class__(**kwargs) + else: + lx = lexer + s = match.start() + for i, t, v in lx.get_tokens_unprocessed(match.group(), **gt_kwargs): + yield i + s, t, v + if ctx: + ctx.pos = match.end() + else: + def callback(lexer, match, ctx=None): + # XXX: cache that somehow + kwargs.update(lexer.options) + lx = _other(**kwargs) + + s = match.start() + for i, t, v in lx.get_tokens_unprocessed(match.group(), **gt_kwargs): + yield i + s, t, v + if ctx: + ctx.pos = match.end() + return callback + + +class default: + """ + Indicates a state or state action (e.g. #pop) to apply. + For example default('#pop') is equivalent to ('', Token, '#pop') + Note that state tuples may be used as well. + + .. versionadded:: 2.0 + """ + def __init__(self, state): + self.state = state + + +class words(Future): + """ + Indicates a list of literal words that is transformed into an optimized + regex that matches any of the words. + + .. versionadded:: 2.0 + """ + def __init__(self, words, prefix='', suffix=''): + self.words = words + self.prefix = prefix + self.suffix = suffix + + def get(self): + return regex_opt(self.words, prefix=self.prefix, suffix=self.suffix) + + +class RegexLexerMeta(LexerMeta): + """ + Metaclass for RegexLexer, creates the self._tokens attribute from + self.tokens on the first instantiation. + """ + + def _process_regex(cls, regex, rflags, state): + """Preprocess the regular expression component of a token definition.""" + if isinstance(regex, Future): + regex = regex.get() + return re.compile(regex, rflags).match + + def _process_token(cls, token): + """Preprocess the token component of a token definition.""" + assert type(token) is _TokenType or callable(token), \ + f'token type must be simple type or callable, not {token!r}' + return token + + def _process_new_state(cls, new_state, unprocessed, processed): + """Preprocess the state transition action of a token definition.""" + if isinstance(new_state, str): + # an existing state + if new_state == '#pop': + return -1 + elif new_state in unprocessed: + return (new_state,) + elif new_state == '#push': + return new_state + elif new_state[:5] == '#pop:': + return -int(new_state[5:]) + else: + assert False, f'unknown new state {new_state!r}' + elif isinstance(new_state, combined): + # combine a new state from existing ones + tmp_state = '_tmp_%d' % cls._tmpname + cls._tmpname += 1 + itokens = [] + for istate in new_state: + assert istate != new_state, f'circular state ref {istate!r}' + itokens.extend(cls._process_state(unprocessed, + processed, istate)) + processed[tmp_state] = itokens + return (tmp_state,) + elif isinstance(new_state, tuple): + # push more than one state + for istate in new_state: + assert (istate in unprocessed or + istate in ('#pop', '#push')), \ + 'unknown new state ' + istate + return new_state + else: + assert False, f'unknown new state def {new_state!r}' + + def _process_state(cls, unprocessed, processed, state): + """Preprocess a single state definition.""" + assert isinstance(state, str), f"wrong state name {state!r}" + assert state[0] != '#', f"invalid state name {state!r}" + if state in processed: + return processed[state] + tokens = processed[state] = [] + rflags = cls.flags + for tdef in unprocessed[state]: + if isinstance(tdef, include): + # it's a state reference + assert tdef != state, f"circular state reference {state!r}" + tokens.extend(cls._process_state(unprocessed, processed, + str(tdef))) + continue + if isinstance(tdef, _inherit): + # should be processed already, but may not in the case of: + # 1. the state has no counterpart in any parent + # 2. the state includes more than one 'inherit' + continue + if isinstance(tdef, default): + new_state = cls._process_new_state(tdef.state, unprocessed, processed) + tokens.append((re.compile('').match, None, new_state)) + continue + + assert type(tdef) is tuple, f"wrong rule def {tdef!r}" + + try: + rex = cls._process_regex(tdef[0], rflags, state) + except Exception as err: + raise ValueError(f"uncompilable regex {tdef[0]!r} in state {state!r} of {cls!r}: {err}") from err + + token = cls._process_token(tdef[1]) + + if len(tdef) == 2: + new_state = None + else: + new_state = cls._process_new_state(tdef[2], + unprocessed, processed) + + tokens.append((rex, token, new_state)) + return tokens + + def process_tokendef(cls, name, tokendefs=None): + """Preprocess a dictionary of token definitions.""" + processed = cls._all_tokens[name] = {} + tokendefs = tokendefs or cls.tokens[name] + for state in list(tokendefs): + cls._process_state(tokendefs, processed, state) + return processed + + def get_tokendefs(cls): + """ + Merge tokens from superclasses in MRO order, returning a single tokendef + dictionary. + + Any state that is not defined by a subclass will be inherited + automatically. States that *are* defined by subclasses will, by + default, override that state in the superclass. If a subclass wishes to + inherit definitions from a superclass, it can use the special value + "inherit", which will cause the superclass' state definition to be + included at that point in the state. + """ + tokens = {} + inheritable = {} + for c in cls.__mro__: + toks = c.__dict__.get('tokens', {}) + + for state, items in toks.items(): + curitems = tokens.get(state) + if curitems is None: + # N.b. because this is assigned by reference, sufficiently + # deep hierarchies are processed incrementally (e.g. for + # A(B), B(C), C(RegexLexer), B will be premodified so X(B) + # will not see any inherits in B). + tokens[state] = items + try: + inherit_ndx = items.index(inherit) + except ValueError: + continue + inheritable[state] = inherit_ndx + continue + + inherit_ndx = inheritable.pop(state, None) + if inherit_ndx is None: + continue + + # Replace the "inherit" value with the items + curitems[inherit_ndx:inherit_ndx+1] = items + try: + # N.b. this is the index in items (that is, the superclass + # copy), so offset required when storing below. + new_inh_ndx = items.index(inherit) + except ValueError: + pass + else: + inheritable[state] = inherit_ndx + new_inh_ndx + + return tokens + + def __call__(cls, *args, **kwds): + """Instantiate cls after preprocessing its token definitions.""" + if '_tokens' not in cls.__dict__: + cls._all_tokens = {} + cls._tmpname = 0 + if hasattr(cls, 'token_variants') and cls.token_variants: + # don't process yet + pass + else: + cls._tokens = cls.process_tokendef('', cls.get_tokendefs()) + + return type.__call__(cls, *args, **kwds) + + +class RegexLexer(Lexer, metaclass=RegexLexerMeta): + """ + Base for simple stateful regular expression-based lexers. + Simplifies the lexing process so that you need only + provide a list of states and regular expressions. + """ + + #: Flags for compiling the regular expressions. + #: Defaults to MULTILINE. + flags = re.MULTILINE + + #: At all time there is a stack of states. Initially, the stack contains + #: a single state 'root'. The top of the stack is called "the current state". + #: + #: Dict of ``{'state': [(regex, tokentype, new_state), ...], ...}`` + #: + #: ``new_state`` can be omitted to signify no state transition. + #: If ``new_state`` is a string, it is pushed on the stack. This ensure + #: the new current state is ``new_state``. + #: If ``new_state`` is a tuple of strings, all of those strings are pushed + #: on the stack and the current state will be the last element of the list. + #: ``new_state`` can also be ``combined('state1', 'state2', ...)`` + #: to signify a new, anonymous state combined from the rules of two + #: or more existing ones. + #: Furthermore, it can be '#pop' to signify going back one step in + #: the state stack, or '#push' to push the current state on the stack + #: again. Note that if you push while in a combined state, the combined + #: state itself is pushed, and not only the state in which the rule is + #: defined. + #: + #: The tuple can also be replaced with ``include('state')``, in which + #: case the rules from the state named by the string are included in the + #: current one. + tokens = {} + + def get_tokens_unprocessed(self, text, stack=('root',)): + """ + Split ``text`` into (tokentype, text) pairs. + + ``stack`` is the initial stack (default: ``['root']``) + """ + pos = 0 + tokendefs = self._tokens + statestack = list(stack) + statetokens = tokendefs[statestack[-1]] + while 1: + for rexmatch, action, new_state in statetokens: + m = rexmatch(text, pos) + if m: + if action is not None: + if type(action) is _TokenType: + yield pos, action, m.group() + else: + yield from action(self, m) + pos = m.end() + if new_state is not None: + # state transition + if isinstance(new_state, tuple): + for state in new_state: + if state == '#pop': + if len(statestack) > 1: + statestack.pop() + elif state == '#push': + statestack.append(statestack[-1]) + else: + statestack.append(state) + elif isinstance(new_state, int): + # pop, but keep at least one state on the stack + # (random code leading to unexpected pops should + # not allow exceptions) + if abs(new_state) >= len(statestack): + del statestack[1:] + else: + del statestack[new_state:] + elif new_state == '#push': + statestack.append(statestack[-1]) + else: + assert False, f"wrong state def: {new_state!r}" + statetokens = tokendefs[statestack[-1]] + break + else: + # We are here only if all state tokens have been considered + # and there was not a match on any of them. + try: + if text[pos] == '\n': + # at EOL, reset state to "root" + statestack = ['root'] + statetokens = tokendefs['root'] + yield pos, Whitespace, '\n' + pos += 1 + continue + yield pos, Error, text[pos] + pos += 1 + except IndexError: + break + + +class LexerContext: + """ + A helper object that holds lexer position data. + """ + + def __init__(self, text, pos, stack=None, end=None): + self.text = text + self.pos = pos + self.end = end or len(text) # end=0 not supported ;-) + self.stack = stack or ['root'] + + def __repr__(self): + return f'LexerContext({self.text!r}, {self.pos!r}, {self.stack!r})' + + +class ExtendedRegexLexer(RegexLexer): + """ + A RegexLexer that uses a context object to store its state. + """ + + def get_tokens_unprocessed(self, text=None, context=None): + """ + Split ``text`` into (tokentype, text) pairs. + If ``context`` is given, use this lexer context instead. + """ + tokendefs = self._tokens + if not context: + ctx = LexerContext(text, 0) + statetokens = tokendefs['root'] + else: + ctx = context + statetokens = tokendefs[ctx.stack[-1]] + text = ctx.text + while 1: + for rexmatch, action, new_state in statetokens: + m = rexmatch(text, ctx.pos, ctx.end) + if m: + if action is not None: + if type(action) is _TokenType: + yield ctx.pos, action, m.group() + ctx.pos = m.end() + else: + yield from action(self, m, ctx) + if not new_state: + # altered the state stack? + statetokens = tokendefs[ctx.stack[-1]] + # CAUTION: callback must set ctx.pos! + if new_state is not None: + # state transition + if isinstance(new_state, tuple): + for state in new_state: + if state == '#pop': + if len(ctx.stack) > 1: + ctx.stack.pop() + elif state == '#push': + ctx.stack.append(ctx.stack[-1]) + else: + ctx.stack.append(state) + elif isinstance(new_state, int): + # see RegexLexer for why this check is made + if abs(new_state) >= len(ctx.stack): + del ctx.stack[1:] + else: + del ctx.stack[new_state:] + elif new_state == '#push': + ctx.stack.append(ctx.stack[-1]) + else: + assert False, f"wrong state def: {new_state!r}" + statetokens = tokendefs[ctx.stack[-1]] + break + else: + try: + if ctx.pos >= ctx.end: + break + if text[ctx.pos] == '\n': + # at EOL, reset state to "root" + ctx.stack = ['root'] + statetokens = tokendefs['root'] + yield ctx.pos, Text, '\n' + ctx.pos += 1 + continue + yield ctx.pos, Error, text[ctx.pos] + ctx.pos += 1 + except IndexError: + break + + +def do_insertions(insertions, tokens): + """ + Helper for lexers which must combine the results of several + sublexers. + + ``insertions`` is a list of ``(index, itokens)`` pairs. + Each ``itokens`` iterable should be inserted at position + ``index`` into the token stream given by the ``tokens`` + argument. + + The result is a combined token stream. + + TODO: clean up the code here. + """ + insertions = iter(insertions) + try: + index, itokens = next(insertions) + except StopIteration: + # no insertions + yield from tokens + return + + realpos = None + insleft = True + + # iterate over the token stream where we want to insert + # the tokens from the insertion list. + for i, t, v in tokens: + # first iteration. store the position of first item + if realpos is None: + realpos = i + oldi = 0 + while insleft and i + len(v) >= index: + tmpval = v[oldi:index - i] + if tmpval: + yield realpos, t, tmpval + realpos += len(tmpval) + for it_index, it_token, it_value in itokens: + yield realpos, it_token, it_value + realpos += len(it_value) + oldi = index - i + try: + index, itokens = next(insertions) + except StopIteration: + insleft = False + break # not strictly necessary + if oldi < len(v): + yield realpos, t, v[oldi:] + realpos += len(v) - oldi + + # leftover tokens + while insleft: + # no normal tokens, set realpos to zero + realpos = realpos or 0 + for p, t, v in itokens: + yield realpos, t, v + realpos += len(v) + try: + index, itokens = next(insertions) + except StopIteration: + insleft = False + break # not strictly necessary + + +class ProfilingRegexLexerMeta(RegexLexerMeta): + """Metaclass for ProfilingRegexLexer, collects regex timing info.""" + + def _process_regex(cls, regex, rflags, state): + if isinstance(regex, words): + rex = regex_opt(regex.words, prefix=regex.prefix, + suffix=regex.suffix) + else: + rex = regex + compiled = re.compile(rex, rflags) + + def match_func(text, pos, endpos=sys.maxsize): + info = cls._prof_data[-1].setdefault((state, rex), [0, 0.0]) + t0 = time.time() + res = compiled.match(text, pos, endpos) + t1 = time.time() + info[0] += 1 + info[1] += t1 - t0 + return res + return match_func + + +class ProfilingRegexLexer(RegexLexer, metaclass=ProfilingRegexLexerMeta): + """Drop-in replacement for RegexLexer that does profiling of its regexes.""" + + _prof_data = [] + _prof_sort_index = 4 # defaults to time per call + + def get_tokens_unprocessed(self, text, stack=('root',)): + # this needs to be a stack, since using(this) will produce nested calls + self.__class__._prof_data.append({}) + yield from RegexLexer.get_tokens_unprocessed(self, text, stack) + rawdata = self.__class__._prof_data.pop() + data = sorted(((s, repr(r).strip('u\'').replace('\\\\', '\\')[:65], + n, 1000 * t, 1000 * t / n) + for ((s, r), (n, t)) in rawdata.items()), + key=lambda x: x[self._prof_sort_index], + reverse=True) + sum_total = sum(x[3] for x in data) + + print() + print('Profiling result for %s lexing %d chars in %.3f ms' % + (self.__class__.__name__, len(text), sum_total)) + print('=' * 110) + print('%-20s %-64s ncalls tottime percall' % ('state', 'regex')) + print('-' * 110) + for d in data: + print('%-20s %-65s %5d %8.4f %8.4f' % d) + print('=' * 110) diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/lexers/__init__.py b/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/lexers/__init__.py new file mode 100644 index 0000000..49184ec --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/lexers/__init__.py @@ -0,0 +1,362 @@ +""" + pygments.lexers + ~~~~~~~~~~~~~~~ + + Pygments lexers. + + :copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re +import sys +import types +import fnmatch +from os.path import basename + +from pip._vendor.pygments.lexers._mapping import LEXERS +from pip._vendor.pygments.modeline import get_filetype_from_buffer +from pip._vendor.pygments.plugin import find_plugin_lexers +from pip._vendor.pygments.util import ClassNotFound, guess_decode + +COMPAT = { + 'Python3Lexer': 'PythonLexer', + 'Python3TracebackLexer': 'PythonTracebackLexer', + 'LeanLexer': 'Lean3Lexer', +} + +__all__ = ['get_lexer_by_name', 'get_lexer_for_filename', 'find_lexer_class', + 'guess_lexer', 'load_lexer_from_file'] + list(LEXERS) + list(COMPAT) + +_lexer_cache = {} +_pattern_cache = {} + + +def _fn_matches(fn, glob): + """Return whether the supplied file name fn matches pattern filename.""" + if glob not in _pattern_cache: + pattern = _pattern_cache[glob] = re.compile(fnmatch.translate(glob)) + return pattern.match(fn) + return _pattern_cache[glob].match(fn) + + +def _load_lexers(module_name): + """Load a lexer (and all others in the module too).""" + mod = __import__(module_name, None, None, ['__all__']) + for lexer_name in mod.__all__: + cls = getattr(mod, lexer_name) + _lexer_cache[cls.name] = cls + + +def get_all_lexers(plugins=True): + """Return a generator of tuples in the form ``(name, aliases, + filenames, mimetypes)`` of all know lexers. + + If *plugins* is true (the default), plugin lexers supplied by entrypoints + are also returned. Otherwise, only builtin ones are considered. + """ + for item in LEXERS.values(): + yield item[1:] + if plugins: + for lexer in find_plugin_lexers(): + yield lexer.name, lexer.aliases, lexer.filenames, lexer.mimetypes + + +def find_lexer_class(name): + """ + Return the `Lexer` subclass that with the *name* attribute as given by + the *name* argument. + """ + if name in _lexer_cache: + return _lexer_cache[name] + # lookup builtin lexers + for module_name, lname, aliases, _, _ in LEXERS.values(): + if name == lname: + _load_lexers(module_name) + return _lexer_cache[name] + # continue with lexers from setuptools entrypoints + for cls in find_plugin_lexers(): + if cls.name == name: + return cls + + +def find_lexer_class_by_name(_alias): + """ + Return the `Lexer` subclass that has `alias` in its aliases list, without + instantiating it. + + Like `get_lexer_by_name`, but does not instantiate the class. + + Will raise :exc:`pygments.util.ClassNotFound` if no lexer with that alias is + found. + + .. versionadded:: 2.2 + """ + if not _alias: + raise ClassNotFound(f'no lexer for alias {_alias!r} found') + # lookup builtin lexers + for module_name, name, aliases, _, _ in LEXERS.values(): + if _alias.lower() in aliases: + if name not in _lexer_cache: + _load_lexers(module_name) + return _lexer_cache[name] + # continue with lexers from setuptools entrypoints + for cls in find_plugin_lexers(): + if _alias.lower() in cls.aliases: + return cls + raise ClassNotFound(f'no lexer for alias {_alias!r} found') + + +def get_lexer_by_name(_alias, **options): + """ + Return an instance of a `Lexer` subclass that has `alias` in its + aliases list. The lexer is given the `options` at its + instantiation. + + Will raise :exc:`pygments.util.ClassNotFound` if no lexer with that alias is + found. + """ + if not _alias: + raise ClassNotFound(f'no lexer for alias {_alias!r} found') + + # lookup builtin lexers + for module_name, name, aliases, _, _ in LEXERS.values(): + if _alias.lower() in aliases: + if name not in _lexer_cache: + _load_lexers(module_name) + return _lexer_cache[name](**options) + # continue with lexers from setuptools entrypoints + for cls in find_plugin_lexers(): + if _alias.lower() in cls.aliases: + return cls(**options) + raise ClassNotFound(f'no lexer for alias {_alias!r} found') + + +def load_lexer_from_file(filename, lexername="CustomLexer", **options): + """Load a lexer from a file. + + This method expects a file located relative to the current working + directory, which contains a Lexer class. By default, it expects the + Lexer to be name CustomLexer; you can specify your own class name + as the second argument to this function. + + Users should be very careful with the input, because this method + is equivalent to running eval on the input file. + + Raises ClassNotFound if there are any problems importing the Lexer. + + .. versionadded:: 2.2 + """ + try: + # This empty dict will contain the namespace for the exec'd file + custom_namespace = {} + with open(filename, 'rb') as f: + exec(f.read(), custom_namespace) + # Retrieve the class `lexername` from that namespace + if lexername not in custom_namespace: + raise ClassNotFound(f'no valid {lexername} class found in {filename}') + lexer_class = custom_namespace[lexername] + # And finally instantiate it with the options + return lexer_class(**options) + except OSError as err: + raise ClassNotFound(f'cannot read {filename}: {err}') + except ClassNotFound: + raise + except Exception as err: + raise ClassNotFound(f'error when loading custom lexer: {err}') + + +def find_lexer_class_for_filename(_fn, code=None): + """Get a lexer for a filename. + + If multiple lexers match the filename pattern, use ``analyse_text()`` to + figure out which one is more appropriate. + + Returns None if not found. + """ + matches = [] + fn = basename(_fn) + for modname, name, _, filenames, _ in LEXERS.values(): + for filename in filenames: + if _fn_matches(fn, filename): + if name not in _lexer_cache: + _load_lexers(modname) + matches.append((_lexer_cache[name], filename)) + for cls in find_plugin_lexers(): + for filename in cls.filenames: + if _fn_matches(fn, filename): + matches.append((cls, filename)) + + if isinstance(code, bytes): + # decode it, since all analyse_text functions expect unicode + code = guess_decode(code) + + def get_rating(info): + cls, filename = info + # explicit patterns get a bonus + bonus = '*' not in filename and 0.5 or 0 + # The class _always_ defines analyse_text because it's included in + # the Lexer class. The default implementation returns None which + # gets turned into 0.0. Run scripts/detect_missing_analyse_text.py + # to find lexers which need it overridden. + if code: + return cls.analyse_text(code) + bonus, cls.__name__ + return cls.priority + bonus, cls.__name__ + + if matches: + matches.sort(key=get_rating) + # print "Possible lexers, after sort:", matches + return matches[-1][0] + + +def get_lexer_for_filename(_fn, code=None, **options): + """Get a lexer for a filename. + + Return a `Lexer` subclass instance that has a filename pattern + matching `fn`. The lexer is given the `options` at its + instantiation. + + Raise :exc:`pygments.util.ClassNotFound` if no lexer for that filename + is found. + + If multiple lexers match the filename pattern, use their ``analyse_text()`` + methods to figure out which one is more appropriate. + """ + res = find_lexer_class_for_filename(_fn, code) + if not res: + raise ClassNotFound(f'no lexer for filename {_fn!r} found') + return res(**options) + + +def get_lexer_for_mimetype(_mime, **options): + """ + Return a `Lexer` subclass instance that has `mime` in its mimetype + list. The lexer is given the `options` at its instantiation. + + Will raise :exc:`pygments.util.ClassNotFound` if not lexer for that mimetype + is found. + """ + for modname, name, _, _, mimetypes in LEXERS.values(): + if _mime in mimetypes: + if name not in _lexer_cache: + _load_lexers(modname) + return _lexer_cache[name](**options) + for cls in find_plugin_lexers(): + if _mime in cls.mimetypes: + return cls(**options) + raise ClassNotFound(f'no lexer for mimetype {_mime!r} found') + + +def _iter_lexerclasses(plugins=True): + """Return an iterator over all lexer classes.""" + for key in sorted(LEXERS): + module_name, name = LEXERS[key][:2] + if name not in _lexer_cache: + _load_lexers(module_name) + yield _lexer_cache[name] + if plugins: + yield from find_plugin_lexers() + + +def guess_lexer_for_filename(_fn, _text, **options): + """ + As :func:`guess_lexer()`, but only lexers which have a pattern in `filenames` + or `alias_filenames` that matches `filename` are taken into consideration. + + :exc:`pygments.util.ClassNotFound` is raised if no lexer thinks it can + handle the content. + """ + fn = basename(_fn) + primary = {} + matching_lexers = set() + for lexer in _iter_lexerclasses(): + for filename in lexer.filenames: + if _fn_matches(fn, filename): + matching_lexers.add(lexer) + primary[lexer] = True + for filename in lexer.alias_filenames: + if _fn_matches(fn, filename): + matching_lexers.add(lexer) + primary[lexer] = False + if not matching_lexers: + raise ClassNotFound(f'no lexer for filename {fn!r} found') + if len(matching_lexers) == 1: + return matching_lexers.pop()(**options) + result = [] + for lexer in matching_lexers: + rv = lexer.analyse_text(_text) + if rv == 1.0: + return lexer(**options) + result.append((rv, lexer)) + + def type_sort(t): + # sort by: + # - analyse score + # - is primary filename pattern? + # - priority + # - last resort: class name + return (t[0], primary[t[1]], t[1].priority, t[1].__name__) + result.sort(key=type_sort) + + return result[-1][1](**options) + + +def guess_lexer(_text, **options): + """ + Return a `Lexer` subclass instance that's guessed from the text in + `text`. For that, the :meth:`.analyse_text()` method of every known lexer + class is called with the text as argument, and the lexer which returned the + highest value will be instantiated and returned. + + :exc:`pygments.util.ClassNotFound` is raised if no lexer thinks it can + handle the content. + """ + + if not isinstance(_text, str): + inencoding = options.get('inencoding', options.get('encoding')) + if inencoding: + _text = _text.decode(inencoding or 'utf8') + else: + _text, _ = guess_decode(_text) + + # try to get a vim modeline first + ft = get_filetype_from_buffer(_text) + + if ft is not None: + try: + return get_lexer_by_name(ft, **options) + except ClassNotFound: + pass + + best_lexer = [0.0, None] + for lexer in _iter_lexerclasses(): + rv = lexer.analyse_text(_text) + if rv == 1.0: + return lexer(**options) + if rv > best_lexer[0]: + best_lexer[:] = (rv, lexer) + if not best_lexer[0] or best_lexer[1] is None: + raise ClassNotFound('no lexer matching the text found') + return best_lexer[1](**options) + + +class _automodule(types.ModuleType): + """Automatically import lexers.""" + + def __getattr__(self, name): + info = LEXERS.get(name) + if info: + _load_lexers(info[0]) + cls = _lexer_cache[info[1]] + setattr(self, name, cls) + return cls + if name in COMPAT: + return getattr(self, COMPAT[name]) + raise AttributeError(name) + + +oldmod = sys.modules[__name__] +newmod = _automodule(__name__) +newmod.__dict__.update(oldmod.__dict__) +sys.modules[__name__] = newmod +del newmod.newmod, newmod.oldmod, newmod.sys, newmod.types diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/lexers/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/lexers/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..7786165 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/lexers/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/lexers/__pycache__/_mapping.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/lexers/__pycache__/_mapping.cpython-312.pyc new file mode 100644 index 0000000..391642a Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/lexers/__pycache__/_mapping.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/lexers/__pycache__/python.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/lexers/__pycache__/python.cpython-312.pyc new file mode 100644 index 0000000..3f13bb8 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/lexers/__pycache__/python.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/lexers/_mapping.py b/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/lexers/_mapping.py new file mode 100644 index 0000000..c0d6a8a --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/lexers/_mapping.py @@ -0,0 +1,602 @@ +# Automatically generated by scripts/gen_mapfiles.py. +# DO NOT EDIT BY HAND; run `tox -e mapfiles` instead. + +LEXERS = { + 'ABAPLexer': ('pip._vendor.pygments.lexers.business', 'ABAP', ('abap',), ('*.abap', '*.ABAP'), ('text/x-abap',)), + 'AMDGPULexer': ('pip._vendor.pygments.lexers.amdgpu', 'AMDGPU', ('amdgpu',), ('*.isa',), ()), + 'APLLexer': ('pip._vendor.pygments.lexers.apl', 'APL', ('apl',), ('*.apl', '*.aplf', '*.aplo', '*.apln', '*.aplc', '*.apli', '*.dyalog'), ()), + 'AbnfLexer': ('pip._vendor.pygments.lexers.grammar_notation', 'ABNF', ('abnf',), ('*.abnf',), ('text/x-abnf',)), + 'ActionScript3Lexer': ('pip._vendor.pygments.lexers.actionscript', 'ActionScript 3', ('actionscript3', 'as3'), ('*.as',), ('application/x-actionscript3', 'text/x-actionscript3', 'text/actionscript3')), + 'ActionScriptLexer': ('pip._vendor.pygments.lexers.actionscript', 'ActionScript', ('actionscript', 'as'), ('*.as',), ('application/x-actionscript', 'text/x-actionscript', 'text/actionscript')), + 'AdaLexer': ('pip._vendor.pygments.lexers.ada', 'Ada', ('ada', 'ada95', 'ada2005'), ('*.adb', '*.ads', '*.ada'), ('text/x-ada',)), + 'AdlLexer': ('pip._vendor.pygments.lexers.archetype', 'ADL', ('adl',), ('*.adl', '*.adls', '*.adlf', '*.adlx'), ()), + 'AgdaLexer': ('pip._vendor.pygments.lexers.haskell', 'Agda', ('agda',), ('*.agda',), ('text/x-agda',)), + 'AheuiLexer': ('pip._vendor.pygments.lexers.esoteric', 'Aheui', ('aheui',), ('*.aheui',), ()), + 'AlloyLexer': ('pip._vendor.pygments.lexers.dsls', 'Alloy', ('alloy',), ('*.als',), ('text/x-alloy',)), + 'AmbientTalkLexer': ('pip._vendor.pygments.lexers.ambient', 'AmbientTalk', ('ambienttalk', 'ambienttalk/2', 'at'), ('*.at',), ('text/x-ambienttalk',)), + 'AmplLexer': ('pip._vendor.pygments.lexers.ampl', 'Ampl', ('ampl',), ('*.run',), ()), + 'Angular2HtmlLexer': ('pip._vendor.pygments.lexers.templates', 'HTML + Angular2', ('html+ng2',), ('*.ng2',), ()), + 'Angular2Lexer': ('pip._vendor.pygments.lexers.templates', 'Angular2', ('ng2',), (), ()), + 'AntlrActionScriptLexer': ('pip._vendor.pygments.lexers.parsers', 'ANTLR With ActionScript Target', ('antlr-actionscript', 'antlr-as'), ('*.G', '*.g'), ()), + 'AntlrCSharpLexer': ('pip._vendor.pygments.lexers.parsers', 'ANTLR With C# Target', ('antlr-csharp', 'antlr-c#'), ('*.G', '*.g'), ()), + 'AntlrCppLexer': ('pip._vendor.pygments.lexers.parsers', 'ANTLR With CPP Target', ('antlr-cpp',), ('*.G', '*.g'), ()), + 'AntlrJavaLexer': ('pip._vendor.pygments.lexers.parsers', 'ANTLR With Java Target', ('antlr-java',), ('*.G', '*.g'), ()), + 'AntlrLexer': ('pip._vendor.pygments.lexers.parsers', 'ANTLR', ('antlr',), (), ()), + 'AntlrObjectiveCLexer': ('pip._vendor.pygments.lexers.parsers', 'ANTLR With ObjectiveC Target', ('antlr-objc',), ('*.G', '*.g'), ()), + 'AntlrPerlLexer': ('pip._vendor.pygments.lexers.parsers', 'ANTLR With Perl Target', ('antlr-perl',), ('*.G', '*.g'), ()), + 'AntlrPythonLexer': ('pip._vendor.pygments.lexers.parsers', 'ANTLR With Python Target', ('antlr-python',), ('*.G', '*.g'), ()), + 'AntlrRubyLexer': ('pip._vendor.pygments.lexers.parsers', 'ANTLR With Ruby Target', ('antlr-ruby', 'antlr-rb'), ('*.G', '*.g'), ()), + 'ApacheConfLexer': ('pip._vendor.pygments.lexers.configs', 'ApacheConf', ('apacheconf', 'aconf', 'apache'), ('.htaccess', 'apache.conf', 'apache2.conf'), ('text/x-apacheconf',)), + 'AppleScriptLexer': ('pip._vendor.pygments.lexers.scripting', 'AppleScript', ('applescript',), ('*.applescript',), ()), + 'ArduinoLexer': ('pip._vendor.pygments.lexers.c_like', 'Arduino', ('arduino',), ('*.ino',), ('text/x-arduino',)), + 'ArrowLexer': ('pip._vendor.pygments.lexers.arrow', 'Arrow', ('arrow',), ('*.arw',), ()), + 'ArturoLexer': ('pip._vendor.pygments.lexers.arturo', 'Arturo', ('arturo', 'art'), ('*.art',), ()), + 'AscLexer': ('pip._vendor.pygments.lexers.asc', 'ASCII armored', ('asc', 'pem'), ('*.asc', '*.pem', 'id_dsa', 'id_ecdsa', 'id_ecdsa_sk', 'id_ed25519', 'id_ed25519_sk', 'id_rsa'), ('application/pgp-keys', 'application/pgp-encrypted', 'application/pgp-signature', 'application/pem-certificate-chain')), + 'Asn1Lexer': ('pip._vendor.pygments.lexers.asn1', 'ASN.1', ('asn1',), ('*.asn1',), ()), + 'AspectJLexer': ('pip._vendor.pygments.lexers.jvm', 'AspectJ', ('aspectj',), ('*.aj',), ('text/x-aspectj',)), + 'AsymptoteLexer': ('pip._vendor.pygments.lexers.graphics', 'Asymptote', ('asymptote', 'asy'), ('*.asy',), ('text/x-asymptote',)), + 'AugeasLexer': ('pip._vendor.pygments.lexers.configs', 'Augeas', ('augeas',), ('*.aug',), ()), + 'AutoItLexer': ('pip._vendor.pygments.lexers.automation', 'AutoIt', ('autoit',), ('*.au3',), ('text/x-autoit',)), + 'AutohotkeyLexer': ('pip._vendor.pygments.lexers.automation', 'autohotkey', ('autohotkey', 'ahk'), ('*.ahk', '*.ahkl'), ('text/x-autohotkey',)), + 'AwkLexer': ('pip._vendor.pygments.lexers.textedit', 'Awk', ('awk', 'gawk', 'mawk', 'nawk'), ('*.awk',), ('application/x-awk',)), + 'BBCBasicLexer': ('pip._vendor.pygments.lexers.basic', 'BBC Basic', ('bbcbasic',), ('*.bbc',), ()), + 'BBCodeLexer': ('pip._vendor.pygments.lexers.markup', 'BBCode', ('bbcode',), (), ('text/x-bbcode',)), + 'BCLexer': ('pip._vendor.pygments.lexers.algebra', 'BC', ('bc',), ('*.bc',), ()), + 'BQNLexer': ('pip._vendor.pygments.lexers.bqn', 'BQN', ('bqn',), ('*.bqn',), ()), + 'BSTLexer': ('pip._vendor.pygments.lexers.bibtex', 'BST', ('bst', 'bst-pybtex'), ('*.bst',), ()), + 'BareLexer': ('pip._vendor.pygments.lexers.bare', 'BARE', ('bare',), ('*.bare',), ()), + 'BaseMakefileLexer': ('pip._vendor.pygments.lexers.make', 'Base Makefile', ('basemake',), (), ()), + 'BashLexer': ('pip._vendor.pygments.lexers.shell', 'Bash', ('bash', 'sh', 'ksh', 'zsh', 'shell', 'openrc'), ('*.sh', '*.ksh', '*.bash', '*.ebuild', '*.eclass', '*.exheres-0', '*.exlib', '*.zsh', '.bashrc', 'bashrc', '.bash_*', 'bash_*', 'zshrc', '.zshrc', '.kshrc', 'kshrc', 'PKGBUILD'), ('application/x-sh', 'application/x-shellscript', 'text/x-shellscript')), + 'BashSessionLexer': ('pip._vendor.pygments.lexers.shell', 'Bash Session', ('console', 'shell-session'), ('*.sh-session', '*.shell-session'), ('application/x-shell-session', 'application/x-sh-session')), + 'BatchLexer': ('pip._vendor.pygments.lexers.shell', 'Batchfile', ('batch', 'bat', 'dosbatch', 'winbatch'), ('*.bat', '*.cmd'), ('application/x-dos-batch',)), + 'BddLexer': ('pip._vendor.pygments.lexers.bdd', 'Bdd', ('bdd',), ('*.feature',), ('text/x-bdd',)), + 'BefungeLexer': ('pip._vendor.pygments.lexers.esoteric', 'Befunge', ('befunge',), ('*.befunge',), ('application/x-befunge',)), + 'BerryLexer': ('pip._vendor.pygments.lexers.berry', 'Berry', ('berry', 'be'), ('*.be',), ('text/x-berry', 'application/x-berry')), + 'BibTeXLexer': ('pip._vendor.pygments.lexers.bibtex', 'BibTeX', ('bibtex', 'bib'), ('*.bib',), ('text/x-bibtex',)), + 'BlitzBasicLexer': ('pip._vendor.pygments.lexers.basic', 'BlitzBasic', ('blitzbasic', 'b3d', 'bplus'), ('*.bb', '*.decls'), ('text/x-bb',)), + 'BlitzMaxLexer': ('pip._vendor.pygments.lexers.basic', 'BlitzMax', ('blitzmax', 'bmax'), ('*.bmx',), ('text/x-bmx',)), + 'BlueprintLexer': ('pip._vendor.pygments.lexers.blueprint', 'Blueprint', ('blueprint',), ('*.blp',), ('text/x-blueprint',)), + 'BnfLexer': ('pip._vendor.pygments.lexers.grammar_notation', 'BNF', ('bnf',), ('*.bnf',), ('text/x-bnf',)), + 'BoaLexer': ('pip._vendor.pygments.lexers.boa', 'Boa', ('boa',), ('*.boa',), ()), + 'BooLexer': ('pip._vendor.pygments.lexers.dotnet', 'Boo', ('boo',), ('*.boo',), ('text/x-boo',)), + 'BoogieLexer': ('pip._vendor.pygments.lexers.verification', 'Boogie', ('boogie',), ('*.bpl',), ()), + 'BrainfuckLexer': ('pip._vendor.pygments.lexers.esoteric', 'Brainfuck', ('brainfuck', 'bf'), ('*.bf', '*.b'), ('application/x-brainfuck',)), + 'BugsLexer': ('pip._vendor.pygments.lexers.modeling', 'BUGS', ('bugs', 'winbugs', 'openbugs'), ('*.bug',), ()), + 'CAmkESLexer': ('pip._vendor.pygments.lexers.esoteric', 'CAmkES', ('camkes', 'idl4'), ('*.camkes', '*.idl4'), ()), + 'CLexer': ('pip._vendor.pygments.lexers.c_cpp', 'C', ('c',), ('*.c', '*.h', '*.idc', '*.x[bp]m'), ('text/x-chdr', 'text/x-csrc', 'image/x-xbitmap', 'image/x-xpixmap')), + 'CMakeLexer': ('pip._vendor.pygments.lexers.make', 'CMake', ('cmake',), ('*.cmake', 'CMakeLists.txt'), ('text/x-cmake',)), + 'CObjdumpLexer': ('pip._vendor.pygments.lexers.asm', 'c-objdump', ('c-objdump',), ('*.c-objdump',), ('text/x-c-objdump',)), + 'CPSALexer': ('pip._vendor.pygments.lexers.lisp', 'CPSA', ('cpsa',), ('*.cpsa',), ()), + 'CSSUL4Lexer': ('pip._vendor.pygments.lexers.ul4', 'CSS+UL4', ('css+ul4',), ('*.cssul4',), ()), + 'CSharpAspxLexer': ('pip._vendor.pygments.lexers.dotnet', 'aspx-cs', ('aspx-cs',), ('*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd'), ()), + 'CSharpLexer': ('pip._vendor.pygments.lexers.dotnet', 'C#', ('csharp', 'c#', 'cs'), ('*.cs',), ('text/x-csharp',)), + 'Ca65Lexer': ('pip._vendor.pygments.lexers.asm', 'ca65 assembler', ('ca65',), ('*.s',), ()), + 'CadlLexer': ('pip._vendor.pygments.lexers.archetype', 'cADL', ('cadl',), ('*.cadl',), ()), + 'CapDLLexer': ('pip._vendor.pygments.lexers.esoteric', 'CapDL', ('capdl',), ('*.cdl',), ()), + 'CapnProtoLexer': ('pip._vendor.pygments.lexers.capnproto', "Cap'n Proto", ('capnp',), ('*.capnp',), ()), + 'CarbonLexer': ('pip._vendor.pygments.lexers.carbon', 'Carbon', ('carbon',), ('*.carbon',), ('text/x-carbon',)), + 'CbmBasicV2Lexer': ('pip._vendor.pygments.lexers.basic', 'CBM BASIC V2', ('cbmbas',), ('*.bas',), ()), + 'CddlLexer': ('pip._vendor.pygments.lexers.cddl', 'CDDL', ('cddl',), ('*.cddl',), ('text/x-cddl',)), + 'CeylonLexer': ('pip._vendor.pygments.lexers.jvm', 'Ceylon', ('ceylon',), ('*.ceylon',), ('text/x-ceylon',)), + 'Cfengine3Lexer': ('pip._vendor.pygments.lexers.configs', 'CFEngine3', ('cfengine3', 'cf3'), ('*.cf',), ()), + 'ChaiscriptLexer': ('pip._vendor.pygments.lexers.scripting', 'ChaiScript', ('chaiscript', 'chai'), ('*.chai',), ('text/x-chaiscript', 'application/x-chaiscript')), + 'ChapelLexer': ('pip._vendor.pygments.lexers.chapel', 'Chapel', ('chapel', 'chpl'), ('*.chpl',), ()), + 'CharmciLexer': ('pip._vendor.pygments.lexers.c_like', 'Charmci', ('charmci',), ('*.ci',), ()), + 'CheetahHtmlLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Cheetah', ('html+cheetah', 'html+spitfire', 'htmlcheetah'), (), ('text/html+cheetah', 'text/html+spitfire')), + 'CheetahJavascriptLexer': ('pip._vendor.pygments.lexers.templates', 'JavaScript+Cheetah', ('javascript+cheetah', 'js+cheetah', 'javascript+spitfire', 'js+spitfire'), (), ('application/x-javascript+cheetah', 'text/x-javascript+cheetah', 'text/javascript+cheetah', 'application/x-javascript+spitfire', 'text/x-javascript+spitfire', 'text/javascript+spitfire')), + 'CheetahLexer': ('pip._vendor.pygments.lexers.templates', 'Cheetah', ('cheetah', 'spitfire'), ('*.tmpl', '*.spt'), ('application/x-cheetah', 'application/x-spitfire')), + 'CheetahXmlLexer': ('pip._vendor.pygments.lexers.templates', 'XML+Cheetah', ('xml+cheetah', 'xml+spitfire'), (), ('application/xml+cheetah', 'application/xml+spitfire')), + 'CirruLexer': ('pip._vendor.pygments.lexers.webmisc', 'Cirru', ('cirru',), ('*.cirru',), ('text/x-cirru',)), + 'ClayLexer': ('pip._vendor.pygments.lexers.c_like', 'Clay', ('clay',), ('*.clay',), ('text/x-clay',)), + 'CleanLexer': ('pip._vendor.pygments.lexers.clean', 'Clean', ('clean',), ('*.icl', '*.dcl'), ()), + 'ClojureLexer': ('pip._vendor.pygments.lexers.jvm', 'Clojure', ('clojure', 'clj'), ('*.clj', '*.cljc'), ('text/x-clojure', 'application/x-clojure')), + 'ClojureScriptLexer': ('pip._vendor.pygments.lexers.jvm', 'ClojureScript', ('clojurescript', 'cljs'), ('*.cljs',), ('text/x-clojurescript', 'application/x-clojurescript')), + 'CobolFreeformatLexer': ('pip._vendor.pygments.lexers.business', 'COBOLFree', ('cobolfree',), ('*.cbl', '*.CBL'), ()), + 'CobolLexer': ('pip._vendor.pygments.lexers.business', 'COBOL', ('cobol',), ('*.cob', '*.COB', '*.cpy', '*.CPY'), ('text/x-cobol',)), + 'CodeQLLexer': ('pip._vendor.pygments.lexers.codeql', 'CodeQL', ('codeql', 'ql'), ('*.ql', '*.qll'), ()), + 'CoffeeScriptLexer': ('pip._vendor.pygments.lexers.javascript', 'CoffeeScript', ('coffeescript', 'coffee-script', 'coffee'), ('*.coffee',), ('text/coffeescript',)), + 'ColdfusionCFCLexer': ('pip._vendor.pygments.lexers.templates', 'Coldfusion CFC', ('cfc',), ('*.cfc',), ()), + 'ColdfusionHtmlLexer': ('pip._vendor.pygments.lexers.templates', 'Coldfusion HTML', ('cfm',), ('*.cfm', '*.cfml'), ('application/x-coldfusion',)), + 'ColdfusionLexer': ('pip._vendor.pygments.lexers.templates', 'cfstatement', ('cfs',), (), ()), + 'Comal80Lexer': ('pip._vendor.pygments.lexers.comal', 'COMAL-80', ('comal', 'comal80'), ('*.cml', '*.comal'), ()), + 'CommonLispLexer': ('pip._vendor.pygments.lexers.lisp', 'Common Lisp', ('common-lisp', 'cl', 'lisp'), ('*.cl', '*.lisp'), ('text/x-common-lisp',)), + 'ComponentPascalLexer': ('pip._vendor.pygments.lexers.oberon', 'Component Pascal', ('componentpascal', 'cp'), ('*.cp', '*.cps'), ('text/x-component-pascal',)), + 'CoqLexer': ('pip._vendor.pygments.lexers.theorem', 'Coq', ('coq',), ('*.v',), ('text/x-coq',)), + 'CplintLexer': ('pip._vendor.pygments.lexers.cplint', 'cplint', ('cplint',), ('*.ecl', '*.prolog', '*.pro', '*.pl', '*.P', '*.lpad', '*.cpl'), ('text/x-cplint',)), + 'CppLexer': ('pip._vendor.pygments.lexers.c_cpp', 'C++', ('cpp', 'c++'), ('*.cpp', '*.hpp', '*.c++', '*.h++', '*.cc', '*.hh', '*.cxx', '*.hxx', '*.C', '*.H', '*.cp', '*.CPP', '*.tpp'), ('text/x-c++hdr', 'text/x-c++src')), + 'CppObjdumpLexer': ('pip._vendor.pygments.lexers.asm', 'cpp-objdump', ('cpp-objdump', 'c++-objdumb', 'cxx-objdump'), ('*.cpp-objdump', '*.c++-objdump', '*.cxx-objdump'), ('text/x-cpp-objdump',)), + 'CrmshLexer': ('pip._vendor.pygments.lexers.dsls', 'Crmsh', ('crmsh', 'pcmk'), ('*.crmsh', '*.pcmk'), ()), + 'CrocLexer': ('pip._vendor.pygments.lexers.d', 'Croc', ('croc',), ('*.croc',), ('text/x-crocsrc',)), + 'CryptolLexer': ('pip._vendor.pygments.lexers.haskell', 'Cryptol', ('cryptol', 'cry'), ('*.cry',), ('text/x-cryptol',)), + 'CrystalLexer': ('pip._vendor.pygments.lexers.crystal', 'Crystal', ('cr', 'crystal'), ('*.cr',), ('text/x-crystal',)), + 'CsoundDocumentLexer': ('pip._vendor.pygments.lexers.csound', 'Csound Document', ('csound-document', 'csound-csd'), ('*.csd',), ()), + 'CsoundOrchestraLexer': ('pip._vendor.pygments.lexers.csound', 'Csound Orchestra', ('csound', 'csound-orc'), ('*.orc', '*.udo'), ()), + 'CsoundScoreLexer': ('pip._vendor.pygments.lexers.csound', 'Csound Score', ('csound-score', 'csound-sco'), ('*.sco',), ()), + 'CssDjangoLexer': ('pip._vendor.pygments.lexers.templates', 'CSS+Django/Jinja', ('css+django', 'css+jinja'), ('*.css.j2', '*.css.jinja2'), ('text/css+django', 'text/css+jinja')), + 'CssErbLexer': ('pip._vendor.pygments.lexers.templates', 'CSS+Ruby', ('css+ruby', 'css+erb'), (), ('text/css+ruby',)), + 'CssGenshiLexer': ('pip._vendor.pygments.lexers.templates', 'CSS+Genshi Text', ('css+genshitext', 'css+genshi'), (), ('text/css+genshi',)), + 'CssLexer': ('pip._vendor.pygments.lexers.css', 'CSS', ('css',), ('*.css',), ('text/css',)), + 'CssPhpLexer': ('pip._vendor.pygments.lexers.templates', 'CSS+PHP', ('css+php',), (), ('text/css+php',)), + 'CssSmartyLexer': ('pip._vendor.pygments.lexers.templates', 'CSS+Smarty', ('css+smarty',), (), ('text/css+smarty',)), + 'CudaLexer': ('pip._vendor.pygments.lexers.c_like', 'CUDA', ('cuda', 'cu'), ('*.cu', '*.cuh'), ('text/x-cuda',)), + 'CypherLexer': ('pip._vendor.pygments.lexers.graph', 'Cypher', ('cypher',), ('*.cyp', '*.cypher'), ()), + 'CythonLexer': ('pip._vendor.pygments.lexers.python', 'Cython', ('cython', 'pyx', 'pyrex'), ('*.pyx', '*.pxd', '*.pxi'), ('text/x-cython', 'application/x-cython')), + 'DLexer': ('pip._vendor.pygments.lexers.d', 'D', ('d',), ('*.d', '*.di'), ('text/x-dsrc',)), + 'DObjdumpLexer': ('pip._vendor.pygments.lexers.asm', 'd-objdump', ('d-objdump',), ('*.d-objdump',), ('text/x-d-objdump',)), + 'DarcsPatchLexer': ('pip._vendor.pygments.lexers.diff', 'Darcs Patch', ('dpatch',), ('*.dpatch', '*.darcspatch'), ()), + 'DartLexer': ('pip._vendor.pygments.lexers.javascript', 'Dart', ('dart',), ('*.dart',), ('text/x-dart',)), + 'Dasm16Lexer': ('pip._vendor.pygments.lexers.asm', 'DASM16', ('dasm16',), ('*.dasm16', '*.dasm'), ('text/x-dasm16',)), + 'DaxLexer': ('pip._vendor.pygments.lexers.dax', 'Dax', ('dax',), ('*.dax',), ()), + 'DebianControlLexer': ('pip._vendor.pygments.lexers.installers', 'Debian Control file', ('debcontrol', 'control'), ('control',), ()), + 'DebianSourcesLexer': ('pip._vendor.pygments.lexers.installers', 'Debian Sources file', ('debian.sources',), ('*.sources',), ()), + 'DelphiLexer': ('pip._vendor.pygments.lexers.pascal', 'Delphi', ('delphi', 'pas', 'pascal', 'objectpascal'), ('*.pas', '*.dpr'), ('text/x-pascal',)), + 'DesktopLexer': ('pip._vendor.pygments.lexers.configs', 'Desktop file', ('desktop',), ('*.desktop',), ('application/x-desktop',)), + 'DevicetreeLexer': ('pip._vendor.pygments.lexers.devicetree', 'Devicetree', ('devicetree', 'dts'), ('*.dts', '*.dtsi'), ('text/x-c',)), + 'DgLexer': ('pip._vendor.pygments.lexers.python', 'dg', ('dg',), ('*.dg',), ('text/x-dg',)), + 'DiffLexer': ('pip._vendor.pygments.lexers.diff', 'Diff', ('diff', 'udiff'), ('*.diff', '*.patch'), ('text/x-diff', 'text/x-patch')), + 'DjangoLexer': ('pip._vendor.pygments.lexers.templates', 'Django/Jinja', ('django', 'jinja'), (), ('application/x-django-templating', 'application/x-jinja')), + 'DnsZoneLexer': ('pip._vendor.pygments.lexers.dns', 'Zone', ('zone',), ('*.zone',), ('text/dns',)), + 'DockerLexer': ('pip._vendor.pygments.lexers.configs', 'Docker', ('docker', 'dockerfile'), ('Dockerfile', '*.docker'), ('text/x-dockerfile-config',)), + 'DtdLexer': ('pip._vendor.pygments.lexers.html', 'DTD', ('dtd',), ('*.dtd',), ('application/xml-dtd',)), + 'DuelLexer': ('pip._vendor.pygments.lexers.webmisc', 'Duel', ('duel', 'jbst', 'jsonml+bst'), ('*.duel', '*.jbst'), ('text/x-duel', 'text/x-jbst')), + 'DylanConsoleLexer': ('pip._vendor.pygments.lexers.dylan', 'Dylan session', ('dylan-console', 'dylan-repl'), ('*.dylan-console',), ('text/x-dylan-console',)), + 'DylanLexer': ('pip._vendor.pygments.lexers.dylan', 'Dylan', ('dylan',), ('*.dylan', '*.dyl', '*.intr'), ('text/x-dylan',)), + 'DylanLidLexer': ('pip._vendor.pygments.lexers.dylan', 'DylanLID', ('dylan-lid', 'lid'), ('*.lid', '*.hdp'), ('text/x-dylan-lid',)), + 'ECLLexer': ('pip._vendor.pygments.lexers.ecl', 'ECL', ('ecl',), ('*.ecl',), ('application/x-ecl',)), + 'ECLexer': ('pip._vendor.pygments.lexers.c_like', 'eC', ('ec',), ('*.ec', '*.eh'), ('text/x-echdr', 'text/x-ecsrc')), + 'EarlGreyLexer': ('pip._vendor.pygments.lexers.javascript', 'Earl Grey', ('earl-grey', 'earlgrey', 'eg'), ('*.eg',), ('text/x-earl-grey',)), + 'EasytrieveLexer': ('pip._vendor.pygments.lexers.scripting', 'Easytrieve', ('easytrieve',), ('*.ezt', '*.mac'), ('text/x-easytrieve',)), + 'EbnfLexer': ('pip._vendor.pygments.lexers.parsers', 'EBNF', ('ebnf',), ('*.ebnf',), ('text/x-ebnf',)), + 'EiffelLexer': ('pip._vendor.pygments.lexers.eiffel', 'Eiffel', ('eiffel',), ('*.e',), ('text/x-eiffel',)), + 'ElixirConsoleLexer': ('pip._vendor.pygments.lexers.erlang', 'Elixir iex session', ('iex',), (), ('text/x-elixir-shellsession',)), + 'ElixirLexer': ('pip._vendor.pygments.lexers.erlang', 'Elixir', ('elixir', 'ex', 'exs'), ('*.ex', '*.eex', '*.exs', '*.leex'), ('text/x-elixir',)), + 'ElmLexer': ('pip._vendor.pygments.lexers.elm', 'Elm', ('elm',), ('*.elm',), ('text/x-elm',)), + 'ElpiLexer': ('pip._vendor.pygments.lexers.elpi', 'Elpi', ('elpi',), ('*.elpi',), ('text/x-elpi',)), + 'EmacsLispLexer': ('pip._vendor.pygments.lexers.lisp', 'EmacsLisp', ('emacs-lisp', 'elisp', 'emacs'), ('*.el',), ('text/x-elisp', 'application/x-elisp')), + 'EmailLexer': ('pip._vendor.pygments.lexers.email', 'E-mail', ('email', 'eml'), ('*.eml',), ('message/rfc822',)), + 'ErbLexer': ('pip._vendor.pygments.lexers.templates', 'ERB', ('erb',), (), ('application/x-ruby-templating',)), + 'ErlangLexer': ('pip._vendor.pygments.lexers.erlang', 'Erlang', ('erlang',), ('*.erl', '*.hrl', '*.es', '*.escript'), ('text/x-erlang',)), + 'ErlangShellLexer': ('pip._vendor.pygments.lexers.erlang', 'Erlang erl session', ('erl',), ('*.erl-sh',), ('text/x-erl-shellsession',)), + 'EvoqueHtmlLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Evoque', ('html+evoque',), (), ('text/html+evoque',)), + 'EvoqueLexer': ('pip._vendor.pygments.lexers.templates', 'Evoque', ('evoque',), ('*.evoque',), ('application/x-evoque',)), + 'EvoqueXmlLexer': ('pip._vendor.pygments.lexers.templates', 'XML+Evoque', ('xml+evoque',), (), ('application/xml+evoque',)), + 'ExeclineLexer': ('pip._vendor.pygments.lexers.shell', 'execline', ('execline',), ('*.exec',), ()), + 'EzhilLexer': ('pip._vendor.pygments.lexers.ezhil', 'Ezhil', ('ezhil',), ('*.n',), ('text/x-ezhil',)), + 'FSharpLexer': ('pip._vendor.pygments.lexers.dotnet', 'F#', ('fsharp', 'f#'), ('*.fs', '*.fsi', '*.fsx'), ('text/x-fsharp',)), + 'FStarLexer': ('pip._vendor.pygments.lexers.ml', 'FStar', ('fstar',), ('*.fst', '*.fsti'), ('text/x-fstar',)), + 'FactorLexer': ('pip._vendor.pygments.lexers.factor', 'Factor', ('factor',), ('*.factor',), ('text/x-factor',)), + 'FancyLexer': ('pip._vendor.pygments.lexers.ruby', 'Fancy', ('fancy', 'fy'), ('*.fy', '*.fancypack'), ('text/x-fancysrc',)), + 'FantomLexer': ('pip._vendor.pygments.lexers.fantom', 'Fantom', ('fan',), ('*.fan',), ('application/x-fantom',)), + 'FelixLexer': ('pip._vendor.pygments.lexers.felix', 'Felix', ('felix', 'flx'), ('*.flx', '*.flxh'), ('text/x-felix',)), + 'FennelLexer': ('pip._vendor.pygments.lexers.lisp', 'Fennel', ('fennel', 'fnl'), ('*.fnl',), ()), + 'FiftLexer': ('pip._vendor.pygments.lexers.fift', 'Fift', ('fift', 'fif'), ('*.fif',), ()), + 'FishShellLexer': ('pip._vendor.pygments.lexers.shell', 'Fish', ('fish', 'fishshell'), ('*.fish', '*.load'), ('application/x-fish',)), + 'FlatlineLexer': ('pip._vendor.pygments.lexers.dsls', 'Flatline', ('flatline',), (), ('text/x-flatline',)), + 'FloScriptLexer': ('pip._vendor.pygments.lexers.floscript', 'FloScript', ('floscript', 'flo'), ('*.flo',), ()), + 'ForthLexer': ('pip._vendor.pygments.lexers.forth', 'Forth', ('forth',), ('*.frt', '*.fs'), ('application/x-forth',)), + 'FortranFixedLexer': ('pip._vendor.pygments.lexers.fortran', 'FortranFixed', ('fortranfixed',), ('*.f', '*.F'), ()), + 'FortranLexer': ('pip._vendor.pygments.lexers.fortran', 'Fortran', ('fortran', 'f90'), ('*.f03', '*.f90', '*.F03', '*.F90'), ('text/x-fortran',)), + 'FoxProLexer': ('pip._vendor.pygments.lexers.foxpro', 'FoxPro', ('foxpro', 'vfp', 'clipper', 'xbase'), ('*.PRG', '*.prg'), ()), + 'FreeFemLexer': ('pip._vendor.pygments.lexers.freefem', 'Freefem', ('freefem',), ('*.edp',), ('text/x-freefem',)), + 'FuncLexer': ('pip._vendor.pygments.lexers.func', 'FunC', ('func', 'fc'), ('*.fc', '*.func'), ()), + 'FutharkLexer': ('pip._vendor.pygments.lexers.futhark', 'Futhark', ('futhark',), ('*.fut',), ('text/x-futhark',)), + 'GAPConsoleLexer': ('pip._vendor.pygments.lexers.algebra', 'GAP session', ('gap-console', 'gap-repl'), ('*.tst',), ()), + 'GAPLexer': ('pip._vendor.pygments.lexers.algebra', 'GAP', ('gap',), ('*.g', '*.gd', '*.gi', '*.gap'), ()), + 'GDScriptLexer': ('pip._vendor.pygments.lexers.gdscript', 'GDScript', ('gdscript', 'gd'), ('*.gd',), ('text/x-gdscript', 'application/x-gdscript')), + 'GLShaderLexer': ('pip._vendor.pygments.lexers.graphics', 'GLSL', ('glsl',), ('*.vert', '*.frag', '*.geo'), ('text/x-glslsrc',)), + 'GSQLLexer': ('pip._vendor.pygments.lexers.gsql', 'GSQL', ('gsql',), ('*.gsql',), ()), + 'GasLexer': ('pip._vendor.pygments.lexers.asm', 'GAS', ('gas', 'asm'), ('*.s', '*.S'), ('text/x-gas',)), + 'GcodeLexer': ('pip._vendor.pygments.lexers.gcodelexer', 'g-code', ('gcode',), ('*.gcode',), ()), + 'GenshiLexer': ('pip._vendor.pygments.lexers.templates', 'Genshi', ('genshi', 'kid', 'xml+genshi', 'xml+kid'), ('*.kid',), ('application/x-genshi', 'application/x-kid')), + 'GenshiTextLexer': ('pip._vendor.pygments.lexers.templates', 'Genshi Text', ('genshitext',), (), ('application/x-genshi-text', 'text/x-genshi')), + 'GettextLexer': ('pip._vendor.pygments.lexers.textfmts', 'Gettext Catalog', ('pot', 'po'), ('*.pot', '*.po'), ('application/x-gettext', 'text/x-gettext', 'text/gettext')), + 'GherkinLexer': ('pip._vendor.pygments.lexers.testing', 'Gherkin', ('gherkin', 'cucumber'), ('*.feature',), ('text/x-gherkin',)), + 'GleamLexer': ('pip._vendor.pygments.lexers.gleam', 'Gleam', ('gleam',), ('*.gleam',), ('text/x-gleam',)), + 'GnuplotLexer': ('pip._vendor.pygments.lexers.graphics', 'Gnuplot', ('gnuplot',), ('*.plot', '*.plt'), ('text/x-gnuplot',)), + 'GoLexer': ('pip._vendor.pygments.lexers.go', 'Go', ('go', 'golang'), ('*.go',), ('text/x-gosrc',)), + 'GoloLexer': ('pip._vendor.pygments.lexers.jvm', 'Golo', ('golo',), ('*.golo',), ()), + 'GoodDataCLLexer': ('pip._vendor.pygments.lexers.business', 'GoodData-CL', ('gooddata-cl',), ('*.gdc',), ('text/x-gooddata-cl',)), + 'GoogleSqlLexer': ('pip._vendor.pygments.lexers.sql', 'GoogleSQL', ('googlesql', 'zetasql'), ('*.googlesql', '*.googlesql.sql'), ('text/x-google-sql', 'text/x-google-sql-aux')), + 'GosuLexer': ('pip._vendor.pygments.lexers.jvm', 'Gosu', ('gosu',), ('*.gs', '*.gsx', '*.gsp', '*.vark'), ('text/x-gosu',)), + 'GosuTemplateLexer': ('pip._vendor.pygments.lexers.jvm', 'Gosu Template', ('gst',), ('*.gst',), ('text/x-gosu-template',)), + 'GraphQLLexer': ('pip._vendor.pygments.lexers.graphql', 'GraphQL', ('graphql',), ('*.graphql',), ()), + 'GraphvizLexer': ('pip._vendor.pygments.lexers.graphviz', 'Graphviz', ('graphviz', 'dot'), ('*.gv', '*.dot'), ('text/x-graphviz', 'text/vnd.graphviz')), + 'GroffLexer': ('pip._vendor.pygments.lexers.markup', 'Groff', ('groff', 'nroff', 'man'), ('*.[1-9]', '*.man', '*.1p', '*.3pm'), ('application/x-troff', 'text/troff')), + 'GroovyLexer': ('pip._vendor.pygments.lexers.jvm', 'Groovy', ('groovy',), ('*.groovy', '*.gradle'), ('text/x-groovy',)), + 'HLSLShaderLexer': ('pip._vendor.pygments.lexers.graphics', 'HLSL', ('hlsl',), ('*.hlsl', '*.hlsli'), ('text/x-hlsl',)), + 'HTMLUL4Lexer': ('pip._vendor.pygments.lexers.ul4', 'HTML+UL4', ('html+ul4',), ('*.htmlul4',), ()), + 'HamlLexer': ('pip._vendor.pygments.lexers.html', 'Haml', ('haml',), ('*.haml',), ('text/x-haml',)), + 'HandlebarsHtmlLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Handlebars', ('html+handlebars',), ('*.handlebars', '*.hbs'), ('text/html+handlebars', 'text/x-handlebars-template')), + 'HandlebarsLexer': ('pip._vendor.pygments.lexers.templates', 'Handlebars', ('handlebars',), (), ()), + 'HareLexer': ('pip._vendor.pygments.lexers.hare', 'Hare', ('hare',), ('*.ha',), ('text/x-hare',)), + 'HaskellLexer': ('pip._vendor.pygments.lexers.haskell', 'Haskell', ('haskell', 'hs'), ('*.hs',), ('text/x-haskell',)), + 'HaxeLexer': ('pip._vendor.pygments.lexers.haxe', 'Haxe', ('haxe', 'hxsl', 'hx'), ('*.hx', '*.hxsl'), ('text/haxe', 'text/x-haxe', 'text/x-hx')), + 'HexdumpLexer': ('pip._vendor.pygments.lexers.hexdump', 'Hexdump', ('hexdump',), (), ()), + 'HsailLexer': ('pip._vendor.pygments.lexers.asm', 'HSAIL', ('hsail', 'hsa'), ('*.hsail',), ('text/x-hsail',)), + 'HspecLexer': ('pip._vendor.pygments.lexers.haskell', 'Hspec', ('hspec',), ('*Spec.hs',), ()), + 'HtmlDjangoLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Django/Jinja', ('html+django', 'html+jinja', 'htmldjango'), ('*.html.j2', '*.htm.j2', '*.xhtml.j2', '*.html.jinja2', '*.htm.jinja2', '*.xhtml.jinja2'), ('text/html+django', 'text/html+jinja')), + 'HtmlGenshiLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Genshi', ('html+genshi', 'html+kid'), (), ('text/html+genshi',)), + 'HtmlLexer': ('pip._vendor.pygments.lexers.html', 'HTML', ('html',), ('*.html', '*.htm', '*.xhtml', '*.xslt'), ('text/html', 'application/xhtml+xml')), + 'HtmlPhpLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+PHP', ('html+php',), ('*.phtml',), ('application/x-php', 'application/x-httpd-php', 'application/x-httpd-php3', 'application/x-httpd-php4', 'application/x-httpd-php5')), + 'HtmlSmartyLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Smarty', ('html+smarty',), (), ('text/html+smarty',)), + 'HttpLexer': ('pip._vendor.pygments.lexers.textfmts', 'HTTP', ('http',), (), ()), + 'HxmlLexer': ('pip._vendor.pygments.lexers.haxe', 'Hxml', ('haxeml', 'hxml'), ('*.hxml',), ()), + 'HyLexer': ('pip._vendor.pygments.lexers.lisp', 'Hy', ('hylang', 'hy'), ('*.hy',), ('text/x-hy', 'application/x-hy')), + 'HybrisLexer': ('pip._vendor.pygments.lexers.scripting', 'Hybris', ('hybris',), ('*.hyb',), ('text/x-hybris', 'application/x-hybris')), + 'IDLLexer': ('pip._vendor.pygments.lexers.idl', 'IDL', ('idl',), ('*.pro',), ('text/idl',)), + 'IconLexer': ('pip._vendor.pygments.lexers.unicon', 'Icon', ('icon',), ('*.icon', '*.ICON'), ()), + 'IdrisLexer': ('pip._vendor.pygments.lexers.haskell', 'Idris', ('idris', 'idr'), ('*.idr',), ('text/x-idris',)), + 'IgorLexer': ('pip._vendor.pygments.lexers.igor', 'Igor', ('igor', 'igorpro'), ('*.ipf',), ('text/ipf',)), + 'Inform6Lexer': ('pip._vendor.pygments.lexers.int_fiction', 'Inform 6', ('inform6', 'i6'), ('*.inf',), ()), + 'Inform6TemplateLexer': ('pip._vendor.pygments.lexers.int_fiction', 'Inform 6 template', ('i6t',), ('*.i6t',), ()), + 'Inform7Lexer': ('pip._vendor.pygments.lexers.int_fiction', 'Inform 7', ('inform7', 'i7'), ('*.ni', '*.i7x'), ()), + 'IniLexer': ('pip._vendor.pygments.lexers.configs', 'INI', ('ini', 'cfg', 'dosini'), ('*.ini', '*.cfg', '*.inf', '.editorconfig'), ('text/x-ini', 'text/inf')), + 'IoLexer': ('pip._vendor.pygments.lexers.iolang', 'Io', ('io',), ('*.io',), ('text/x-iosrc',)), + 'IokeLexer': ('pip._vendor.pygments.lexers.jvm', 'Ioke', ('ioke', 'ik'), ('*.ik',), ('text/x-iokesrc',)), + 'IrcLogsLexer': ('pip._vendor.pygments.lexers.textfmts', 'IRC logs', ('irc',), ('*.weechatlog',), ('text/x-irclog',)), + 'IsabelleLexer': ('pip._vendor.pygments.lexers.theorem', 'Isabelle', ('isabelle',), ('*.thy',), ('text/x-isabelle',)), + 'JLexer': ('pip._vendor.pygments.lexers.j', 'J', ('j',), ('*.ijs',), ('text/x-j',)), + 'JMESPathLexer': ('pip._vendor.pygments.lexers.jmespath', 'JMESPath', ('jmespath', 'jp'), ('*.jp',), ()), + 'JSLTLexer': ('pip._vendor.pygments.lexers.jslt', 'JSLT', ('jslt',), ('*.jslt',), ('text/x-jslt',)), + 'JagsLexer': ('pip._vendor.pygments.lexers.modeling', 'JAGS', ('jags',), ('*.jag', '*.bug'), ()), + 'JanetLexer': ('pip._vendor.pygments.lexers.lisp', 'Janet', ('janet',), ('*.janet', '*.jdn'), ('text/x-janet', 'application/x-janet')), + 'JasminLexer': ('pip._vendor.pygments.lexers.jvm', 'Jasmin', ('jasmin', 'jasminxt'), ('*.j',), ()), + 'JavaLexer': ('pip._vendor.pygments.lexers.jvm', 'Java', ('java',), ('*.java',), ('text/x-java',)), + 'JavascriptDjangoLexer': ('pip._vendor.pygments.lexers.templates', 'JavaScript+Django/Jinja', ('javascript+django', 'js+django', 'javascript+jinja', 'js+jinja'), ('*.js.j2', '*.js.jinja2'), ('application/x-javascript+django', 'application/x-javascript+jinja', 'text/x-javascript+django', 'text/x-javascript+jinja', 'text/javascript+django', 'text/javascript+jinja')), + 'JavascriptErbLexer': ('pip._vendor.pygments.lexers.templates', 'JavaScript+Ruby', ('javascript+ruby', 'js+ruby', 'javascript+erb', 'js+erb'), (), ('application/x-javascript+ruby', 'text/x-javascript+ruby', 'text/javascript+ruby')), + 'JavascriptGenshiLexer': ('pip._vendor.pygments.lexers.templates', 'JavaScript+Genshi Text', ('js+genshitext', 'js+genshi', 'javascript+genshitext', 'javascript+genshi'), (), ('application/x-javascript+genshi', 'text/x-javascript+genshi', 'text/javascript+genshi')), + 'JavascriptLexer': ('pip._vendor.pygments.lexers.javascript', 'JavaScript', ('javascript', 'js'), ('*.js', '*.jsm', '*.mjs', '*.cjs'), ('application/javascript', 'application/x-javascript', 'text/x-javascript', 'text/javascript')), + 'JavascriptPhpLexer': ('pip._vendor.pygments.lexers.templates', 'JavaScript+PHP', ('javascript+php', 'js+php'), (), ('application/x-javascript+php', 'text/x-javascript+php', 'text/javascript+php')), + 'JavascriptSmartyLexer': ('pip._vendor.pygments.lexers.templates', 'JavaScript+Smarty', ('javascript+smarty', 'js+smarty'), (), ('application/x-javascript+smarty', 'text/x-javascript+smarty', 'text/javascript+smarty')), + 'JavascriptUL4Lexer': ('pip._vendor.pygments.lexers.ul4', 'Javascript+UL4', ('js+ul4',), ('*.jsul4',), ()), + 'JclLexer': ('pip._vendor.pygments.lexers.scripting', 'JCL', ('jcl',), ('*.jcl',), ('text/x-jcl',)), + 'JsgfLexer': ('pip._vendor.pygments.lexers.grammar_notation', 'JSGF', ('jsgf',), ('*.jsgf',), ('application/jsgf', 'application/x-jsgf', 'text/jsgf')), + 'Json5Lexer': ('pip._vendor.pygments.lexers.json5', 'JSON5', ('json5',), ('*.json5',), ()), + 'JsonBareObjectLexer': ('pip._vendor.pygments.lexers.data', 'JSONBareObject', (), (), ()), + 'JsonLdLexer': ('pip._vendor.pygments.lexers.data', 'JSON-LD', ('jsonld', 'json-ld'), ('*.jsonld',), ('application/ld+json',)), + 'JsonLexer': ('pip._vendor.pygments.lexers.data', 'JSON', ('json', 'json-object'), ('*.json', '*.jsonl', '*.ndjson', 'Pipfile.lock'), ('application/json', 'application/json-object', 'application/x-ndjson', 'application/jsonl', 'application/json-seq')), + 'JsonnetLexer': ('pip._vendor.pygments.lexers.jsonnet', 'Jsonnet', ('jsonnet',), ('*.jsonnet', '*.libsonnet'), ()), + 'JspLexer': ('pip._vendor.pygments.lexers.templates', 'Java Server Page', ('jsp',), ('*.jsp',), ('application/x-jsp',)), + 'JsxLexer': ('pip._vendor.pygments.lexers.jsx', 'JSX', ('jsx', 'react'), ('*.jsx', '*.react'), ('text/jsx', 'text/typescript-jsx')), + 'JuliaConsoleLexer': ('pip._vendor.pygments.lexers.julia', 'Julia console', ('jlcon', 'julia-repl'), (), ()), + 'JuliaLexer': ('pip._vendor.pygments.lexers.julia', 'Julia', ('julia', 'jl'), ('*.jl',), ('text/x-julia', 'application/x-julia')), + 'JuttleLexer': ('pip._vendor.pygments.lexers.javascript', 'Juttle', ('juttle',), ('*.juttle',), ('application/juttle', 'application/x-juttle', 'text/x-juttle', 'text/juttle')), + 'KLexer': ('pip._vendor.pygments.lexers.q', 'K', ('k',), ('*.k',), ()), + 'KalLexer': ('pip._vendor.pygments.lexers.javascript', 'Kal', ('kal',), ('*.kal',), ('text/kal', 'application/kal')), + 'KconfigLexer': ('pip._vendor.pygments.lexers.configs', 'Kconfig', ('kconfig', 'menuconfig', 'linux-config', 'kernel-config'), ('Kconfig*', '*Config.in*', 'external.in*', 'standard-modules.in'), ('text/x-kconfig',)), + 'KernelLogLexer': ('pip._vendor.pygments.lexers.textfmts', 'Kernel log', ('kmsg', 'dmesg'), ('*.kmsg', '*.dmesg'), ()), + 'KokaLexer': ('pip._vendor.pygments.lexers.haskell', 'Koka', ('koka',), ('*.kk', '*.kki'), ('text/x-koka',)), + 'KotlinLexer': ('pip._vendor.pygments.lexers.jvm', 'Kotlin', ('kotlin',), ('*.kt', '*.kts'), ('text/x-kotlin',)), + 'KuinLexer': ('pip._vendor.pygments.lexers.kuin', 'Kuin', ('kuin',), ('*.kn',), ()), + 'KustoLexer': ('pip._vendor.pygments.lexers.kusto', 'Kusto', ('kql', 'kusto'), ('*.kql', '*.kusto', '.csl'), ()), + 'LSLLexer': ('pip._vendor.pygments.lexers.scripting', 'LSL', ('lsl',), ('*.lsl',), ('text/x-lsl',)), + 'LassoCssLexer': ('pip._vendor.pygments.lexers.templates', 'CSS+Lasso', ('css+lasso',), (), ('text/css+lasso',)), + 'LassoHtmlLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Lasso', ('html+lasso',), (), ('text/html+lasso', 'application/x-httpd-lasso', 'application/x-httpd-lasso[89]')), + 'LassoJavascriptLexer': ('pip._vendor.pygments.lexers.templates', 'JavaScript+Lasso', ('javascript+lasso', 'js+lasso'), (), ('application/x-javascript+lasso', 'text/x-javascript+lasso', 'text/javascript+lasso')), + 'LassoLexer': ('pip._vendor.pygments.lexers.javascript', 'Lasso', ('lasso', 'lassoscript'), ('*.lasso', '*.lasso[89]'), ('text/x-lasso',)), + 'LassoXmlLexer': ('pip._vendor.pygments.lexers.templates', 'XML+Lasso', ('xml+lasso',), (), ('application/xml+lasso',)), + 'LdaprcLexer': ('pip._vendor.pygments.lexers.ldap', 'LDAP configuration file', ('ldapconf', 'ldaprc'), ('.ldaprc', 'ldaprc', 'ldap.conf'), ('text/x-ldapconf',)), + 'LdifLexer': ('pip._vendor.pygments.lexers.ldap', 'LDIF', ('ldif',), ('*.ldif',), ('text/x-ldif',)), + 'Lean3Lexer': ('pip._vendor.pygments.lexers.lean', 'Lean', ('lean', 'lean3'), ('*.lean',), ('text/x-lean', 'text/x-lean3')), + 'Lean4Lexer': ('pip._vendor.pygments.lexers.lean', 'Lean4', ('lean4',), ('*.lean',), ('text/x-lean4',)), + 'LessCssLexer': ('pip._vendor.pygments.lexers.css', 'LessCss', ('less',), ('*.less',), ('text/x-less-css',)), + 'LighttpdConfLexer': ('pip._vendor.pygments.lexers.configs', 'Lighttpd configuration file', ('lighttpd', 'lighty'), ('lighttpd.conf',), ('text/x-lighttpd-conf',)), + 'LilyPondLexer': ('pip._vendor.pygments.lexers.lilypond', 'LilyPond', ('lilypond',), ('*.ly',), ()), + 'LimboLexer': ('pip._vendor.pygments.lexers.inferno', 'Limbo', ('limbo',), ('*.b',), ('text/limbo',)), + 'LiquidLexer': ('pip._vendor.pygments.lexers.templates', 'liquid', ('liquid',), ('*.liquid',), ()), + 'LiterateAgdaLexer': ('pip._vendor.pygments.lexers.haskell', 'Literate Agda', ('literate-agda', 'lagda'), ('*.lagda',), ('text/x-literate-agda',)), + 'LiterateCryptolLexer': ('pip._vendor.pygments.lexers.haskell', 'Literate Cryptol', ('literate-cryptol', 'lcryptol', 'lcry'), ('*.lcry',), ('text/x-literate-cryptol',)), + 'LiterateHaskellLexer': ('pip._vendor.pygments.lexers.haskell', 'Literate Haskell', ('literate-haskell', 'lhaskell', 'lhs'), ('*.lhs',), ('text/x-literate-haskell',)), + 'LiterateIdrisLexer': ('pip._vendor.pygments.lexers.haskell', 'Literate Idris', ('literate-idris', 'lidris', 'lidr'), ('*.lidr',), ('text/x-literate-idris',)), + 'LiveScriptLexer': ('pip._vendor.pygments.lexers.javascript', 'LiveScript', ('livescript', 'live-script'), ('*.ls',), ('text/livescript',)), + 'LlvmLexer': ('pip._vendor.pygments.lexers.asm', 'LLVM', ('llvm',), ('*.ll',), ('text/x-llvm',)), + 'LlvmMirBodyLexer': ('pip._vendor.pygments.lexers.asm', 'LLVM-MIR Body', ('llvm-mir-body',), (), ()), + 'LlvmMirLexer': ('pip._vendor.pygments.lexers.asm', 'LLVM-MIR', ('llvm-mir',), ('*.mir',), ()), + 'LogosLexer': ('pip._vendor.pygments.lexers.objective', 'Logos', ('logos',), ('*.x', '*.xi', '*.xm', '*.xmi'), ('text/x-logos',)), + 'LogtalkLexer': ('pip._vendor.pygments.lexers.prolog', 'Logtalk', ('logtalk',), ('*.lgt', '*.logtalk'), ('text/x-logtalk',)), + 'LuaLexer': ('pip._vendor.pygments.lexers.scripting', 'Lua', ('lua',), ('*.lua', '*.wlua'), ('text/x-lua', 'application/x-lua')), + 'LuauLexer': ('pip._vendor.pygments.lexers.scripting', 'Luau', ('luau',), ('*.luau',), ()), + 'MCFunctionLexer': ('pip._vendor.pygments.lexers.minecraft', 'MCFunction', ('mcfunction', 'mcf'), ('*.mcfunction',), ('text/mcfunction',)), + 'MCSchemaLexer': ('pip._vendor.pygments.lexers.minecraft', 'MCSchema', ('mcschema',), ('*.mcschema',), ('text/mcschema',)), + 'MIMELexer': ('pip._vendor.pygments.lexers.mime', 'MIME', ('mime',), (), ('multipart/mixed', 'multipart/related', 'multipart/alternative')), + 'MIPSLexer': ('pip._vendor.pygments.lexers.mips', 'MIPS', ('mips',), ('*.mips', '*.MIPS'), ()), + 'MOOCodeLexer': ('pip._vendor.pygments.lexers.scripting', 'MOOCode', ('moocode', 'moo'), ('*.moo',), ('text/x-moocode',)), + 'MSDOSSessionLexer': ('pip._vendor.pygments.lexers.shell', 'MSDOS Session', ('doscon',), (), ()), + 'Macaulay2Lexer': ('pip._vendor.pygments.lexers.macaulay2', 'Macaulay2', ('macaulay2',), ('*.m2',), ()), + 'MakefileLexer': ('pip._vendor.pygments.lexers.make', 'Makefile', ('make', 'makefile', 'mf', 'bsdmake'), ('*.mak', '*.mk', 'Makefile', 'makefile', 'Makefile.*', 'GNUmakefile'), ('text/x-makefile',)), + 'MakoCssLexer': ('pip._vendor.pygments.lexers.templates', 'CSS+Mako', ('css+mako',), (), ('text/css+mako',)), + 'MakoHtmlLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Mako', ('html+mako',), (), ('text/html+mako',)), + 'MakoJavascriptLexer': ('pip._vendor.pygments.lexers.templates', 'JavaScript+Mako', ('javascript+mako', 'js+mako'), (), ('application/x-javascript+mako', 'text/x-javascript+mako', 'text/javascript+mako')), + 'MakoLexer': ('pip._vendor.pygments.lexers.templates', 'Mako', ('mako',), ('*.mao',), ('application/x-mako',)), + 'MakoXmlLexer': ('pip._vendor.pygments.lexers.templates', 'XML+Mako', ('xml+mako',), (), ('application/xml+mako',)), + 'MapleLexer': ('pip._vendor.pygments.lexers.maple', 'Maple', ('maple',), ('*.mpl', '*.mi', '*.mm'), ('text/x-maple',)), + 'MaqlLexer': ('pip._vendor.pygments.lexers.business', 'MAQL', ('maql',), ('*.maql',), ('text/x-gooddata-maql', 'application/x-gooddata-maql')), + 'MarkdownLexer': ('pip._vendor.pygments.lexers.markup', 'Markdown', ('markdown', 'md'), ('*.md', '*.markdown'), ('text/x-markdown',)), + 'MaskLexer': ('pip._vendor.pygments.lexers.javascript', 'Mask', ('mask',), ('*.mask',), ('text/x-mask',)), + 'MasonLexer': ('pip._vendor.pygments.lexers.templates', 'Mason', ('mason',), ('*.m', '*.mhtml', '*.mc', '*.mi', 'autohandler', 'dhandler'), ('application/x-mason',)), + 'MathematicaLexer': ('pip._vendor.pygments.lexers.algebra', 'Mathematica', ('mathematica', 'mma', 'nb'), ('*.nb', '*.cdf', '*.nbp', '*.ma'), ('application/mathematica', 'application/vnd.wolfram.mathematica', 'application/vnd.wolfram.mathematica.package', 'application/vnd.wolfram.cdf')), + 'MatlabLexer': ('pip._vendor.pygments.lexers.matlab', 'Matlab', ('matlab',), ('*.m',), ('text/matlab',)), + 'MatlabSessionLexer': ('pip._vendor.pygments.lexers.matlab', 'Matlab session', ('matlabsession',), (), ()), + 'MaximaLexer': ('pip._vendor.pygments.lexers.maxima', 'Maxima', ('maxima', 'macsyma'), ('*.mac', '*.max'), ()), + 'MesonLexer': ('pip._vendor.pygments.lexers.meson', 'Meson', ('meson', 'meson.build'), ('meson.build', 'meson_options.txt'), ('text/x-meson',)), + 'MiniDLexer': ('pip._vendor.pygments.lexers.d', 'MiniD', ('minid',), (), ('text/x-minidsrc',)), + 'MiniScriptLexer': ('pip._vendor.pygments.lexers.scripting', 'MiniScript', ('miniscript', 'ms'), ('*.ms',), ('text/x-minicript', 'application/x-miniscript')), + 'ModelicaLexer': ('pip._vendor.pygments.lexers.modeling', 'Modelica', ('modelica',), ('*.mo',), ('text/x-modelica',)), + 'Modula2Lexer': ('pip._vendor.pygments.lexers.modula2', 'Modula-2', ('modula2', 'm2'), ('*.def', '*.mod'), ('text/x-modula2',)), + 'MoinWikiLexer': ('pip._vendor.pygments.lexers.markup', 'MoinMoin/Trac Wiki markup', ('trac-wiki', 'moin'), (), ('text/x-trac-wiki',)), + 'MojoLexer': ('pip._vendor.pygments.lexers.mojo', 'Mojo', ('mojo', '🔥'), ('*.mojo', '*.🔥'), ('text/x-mojo', 'application/x-mojo')), + 'MonkeyLexer': ('pip._vendor.pygments.lexers.basic', 'Monkey', ('monkey',), ('*.monkey',), ('text/x-monkey',)), + 'MonteLexer': ('pip._vendor.pygments.lexers.monte', 'Monte', ('monte',), ('*.mt',), ()), + 'MoonScriptLexer': ('pip._vendor.pygments.lexers.scripting', 'MoonScript', ('moonscript', 'moon'), ('*.moon',), ('text/x-moonscript', 'application/x-moonscript')), + 'MoselLexer': ('pip._vendor.pygments.lexers.mosel', 'Mosel', ('mosel',), ('*.mos',), ()), + 'MozPreprocCssLexer': ('pip._vendor.pygments.lexers.markup', 'CSS+mozpreproc', ('css+mozpreproc',), ('*.css.in',), ()), + 'MozPreprocHashLexer': ('pip._vendor.pygments.lexers.markup', 'mozhashpreproc', ('mozhashpreproc',), (), ()), + 'MozPreprocJavascriptLexer': ('pip._vendor.pygments.lexers.markup', 'Javascript+mozpreproc', ('javascript+mozpreproc',), ('*.js.in',), ()), + 'MozPreprocPercentLexer': ('pip._vendor.pygments.lexers.markup', 'mozpercentpreproc', ('mozpercentpreproc',), (), ()), + 'MozPreprocXulLexer': ('pip._vendor.pygments.lexers.markup', 'XUL+mozpreproc', ('xul+mozpreproc',), ('*.xul.in',), ()), + 'MqlLexer': ('pip._vendor.pygments.lexers.c_like', 'MQL', ('mql', 'mq4', 'mq5', 'mql4', 'mql5'), ('*.mq4', '*.mq5', '*.mqh'), ('text/x-mql',)), + 'MscgenLexer': ('pip._vendor.pygments.lexers.dsls', 'Mscgen', ('mscgen', 'msc'), ('*.msc',), ()), + 'MuPADLexer': ('pip._vendor.pygments.lexers.algebra', 'MuPAD', ('mupad',), ('*.mu',), ()), + 'MxmlLexer': ('pip._vendor.pygments.lexers.actionscript', 'MXML', ('mxml',), ('*.mxml',), ()), + 'MySqlLexer': ('pip._vendor.pygments.lexers.sql', 'MySQL', ('mysql',), (), ('text/x-mysql',)), + 'MyghtyCssLexer': ('pip._vendor.pygments.lexers.templates', 'CSS+Myghty', ('css+myghty',), (), ('text/css+myghty',)), + 'MyghtyHtmlLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Myghty', ('html+myghty',), (), ('text/html+myghty',)), + 'MyghtyJavascriptLexer': ('pip._vendor.pygments.lexers.templates', 'JavaScript+Myghty', ('javascript+myghty', 'js+myghty'), (), ('application/x-javascript+myghty', 'text/x-javascript+myghty', 'text/javascript+mygthy')), + 'MyghtyLexer': ('pip._vendor.pygments.lexers.templates', 'Myghty', ('myghty',), ('*.myt', 'autodelegate'), ('application/x-myghty',)), + 'MyghtyXmlLexer': ('pip._vendor.pygments.lexers.templates', 'XML+Myghty', ('xml+myghty',), (), ('application/xml+myghty',)), + 'NCLLexer': ('pip._vendor.pygments.lexers.ncl', 'NCL', ('ncl',), ('*.ncl',), ('text/ncl',)), + 'NSISLexer': ('pip._vendor.pygments.lexers.installers', 'NSIS', ('nsis', 'nsi', 'nsh'), ('*.nsi', '*.nsh'), ('text/x-nsis',)), + 'NasmLexer': ('pip._vendor.pygments.lexers.asm', 'NASM', ('nasm',), ('*.asm', '*.ASM', '*.nasm'), ('text/x-nasm',)), + 'NasmObjdumpLexer': ('pip._vendor.pygments.lexers.asm', 'objdump-nasm', ('objdump-nasm',), ('*.objdump-intel',), ('text/x-nasm-objdump',)), + 'NemerleLexer': ('pip._vendor.pygments.lexers.dotnet', 'Nemerle', ('nemerle',), ('*.n',), ('text/x-nemerle',)), + 'NesCLexer': ('pip._vendor.pygments.lexers.c_like', 'nesC', ('nesc',), ('*.nc',), ('text/x-nescsrc',)), + 'NestedTextLexer': ('pip._vendor.pygments.lexers.configs', 'NestedText', ('nestedtext', 'nt'), ('*.nt',), ()), + 'NewLispLexer': ('pip._vendor.pygments.lexers.lisp', 'NewLisp', ('newlisp',), ('*.lsp', '*.nl', '*.kif'), ('text/x-newlisp', 'application/x-newlisp')), + 'NewspeakLexer': ('pip._vendor.pygments.lexers.smalltalk', 'Newspeak', ('newspeak',), ('*.ns2',), ('text/x-newspeak',)), + 'NginxConfLexer': ('pip._vendor.pygments.lexers.configs', 'Nginx configuration file', ('nginx',), ('nginx.conf',), ('text/x-nginx-conf',)), + 'NimrodLexer': ('pip._vendor.pygments.lexers.nimrod', 'Nimrod', ('nimrod', 'nim'), ('*.nim', '*.nimrod'), ('text/x-nim',)), + 'NitLexer': ('pip._vendor.pygments.lexers.nit', 'Nit', ('nit',), ('*.nit',), ()), + 'NixLexer': ('pip._vendor.pygments.lexers.nix', 'Nix', ('nixos', 'nix'), ('*.nix',), ('text/x-nix',)), + 'NodeConsoleLexer': ('pip._vendor.pygments.lexers.javascript', 'Node.js REPL console session', ('nodejsrepl',), (), ('text/x-nodejsrepl',)), + 'NotmuchLexer': ('pip._vendor.pygments.lexers.textfmts', 'Notmuch', ('notmuch',), (), ()), + 'NuSMVLexer': ('pip._vendor.pygments.lexers.smv', 'NuSMV', ('nusmv',), ('*.smv',), ()), + 'NumPyLexer': ('pip._vendor.pygments.lexers.python', 'NumPy', ('numpy',), (), ()), + 'NumbaIRLexer': ('pip._vendor.pygments.lexers.numbair', 'Numba_IR', ('numba_ir', 'numbair'), ('*.numba_ir',), ('text/x-numba_ir', 'text/x-numbair')), + 'ObjdumpLexer': ('pip._vendor.pygments.lexers.asm', 'objdump', ('objdump',), ('*.objdump',), ('text/x-objdump',)), + 'ObjectiveCLexer': ('pip._vendor.pygments.lexers.objective', 'Objective-C', ('objective-c', 'objectivec', 'obj-c', 'objc'), ('*.m', '*.h'), ('text/x-objective-c',)), + 'ObjectiveCppLexer': ('pip._vendor.pygments.lexers.objective', 'Objective-C++', ('objective-c++', 'objectivec++', 'obj-c++', 'objc++'), ('*.mm', '*.hh'), ('text/x-objective-c++',)), + 'ObjectiveJLexer': ('pip._vendor.pygments.lexers.javascript', 'Objective-J', ('objective-j', 'objectivej', 'obj-j', 'objj'), ('*.j',), ('text/x-objective-j',)), + 'OcamlLexer': ('pip._vendor.pygments.lexers.ml', 'OCaml', ('ocaml',), ('*.ml', '*.mli', '*.mll', '*.mly'), ('text/x-ocaml',)), + 'OctaveLexer': ('pip._vendor.pygments.lexers.matlab', 'Octave', ('octave',), ('*.m',), ('text/octave',)), + 'OdinLexer': ('pip._vendor.pygments.lexers.archetype', 'ODIN', ('odin',), ('*.odin',), ('text/odin',)), + 'OmgIdlLexer': ('pip._vendor.pygments.lexers.c_like', 'OMG Interface Definition Language', ('omg-idl',), ('*.idl', '*.pidl'), ()), + 'OocLexer': ('pip._vendor.pygments.lexers.ooc', 'Ooc', ('ooc',), ('*.ooc',), ('text/x-ooc',)), + 'OpaLexer': ('pip._vendor.pygments.lexers.ml', 'Opa', ('opa',), ('*.opa',), ('text/x-opa',)), + 'OpenEdgeLexer': ('pip._vendor.pygments.lexers.business', 'OpenEdge ABL', ('openedge', 'abl', 'progress'), ('*.p', '*.cls'), ('text/x-openedge', 'application/x-openedge')), + 'OpenScadLexer': ('pip._vendor.pygments.lexers.openscad', 'OpenSCAD', ('openscad',), ('*.scad',), ('application/x-openscad',)), + 'OrgLexer': ('pip._vendor.pygments.lexers.markup', 'Org Mode', ('org', 'orgmode', 'org-mode'), ('*.org',), ('text/org',)), + 'OutputLexer': ('pip._vendor.pygments.lexers.special', 'Text output', ('output',), (), ()), + 'PacmanConfLexer': ('pip._vendor.pygments.lexers.configs', 'PacmanConf', ('pacmanconf',), ('pacman.conf',), ()), + 'PanLexer': ('pip._vendor.pygments.lexers.dsls', 'Pan', ('pan',), ('*.pan',), ()), + 'ParaSailLexer': ('pip._vendor.pygments.lexers.parasail', 'ParaSail', ('parasail',), ('*.psi', '*.psl'), ('text/x-parasail',)), + 'PawnLexer': ('pip._vendor.pygments.lexers.pawn', 'Pawn', ('pawn',), ('*.p', '*.pwn', '*.inc'), ('text/x-pawn',)), + 'PddlLexer': ('pip._vendor.pygments.lexers.pddl', 'PDDL', ('pddl',), ('*.pddl',), ()), + 'PegLexer': ('pip._vendor.pygments.lexers.grammar_notation', 'PEG', ('peg',), ('*.peg',), ('text/x-peg',)), + 'Perl6Lexer': ('pip._vendor.pygments.lexers.perl', 'Perl6', ('perl6', 'pl6', 'raku'), ('*.pl', '*.pm', '*.nqp', '*.p6', '*.6pl', '*.p6l', '*.pl6', '*.6pm', '*.p6m', '*.pm6', '*.t', '*.raku', '*.rakumod', '*.rakutest', '*.rakudoc'), ('text/x-perl6', 'application/x-perl6')), + 'PerlLexer': ('pip._vendor.pygments.lexers.perl', 'Perl', ('perl', 'pl'), ('*.pl', '*.pm', '*.t', '*.perl'), ('text/x-perl', 'application/x-perl')), + 'PhixLexer': ('pip._vendor.pygments.lexers.phix', 'Phix', ('phix',), ('*.exw',), ('text/x-phix',)), + 'PhpLexer': ('pip._vendor.pygments.lexers.php', 'PHP', ('php', 'php3', 'php4', 'php5'), ('*.php', '*.php[345]', '*.inc'), ('text/x-php',)), + 'PigLexer': ('pip._vendor.pygments.lexers.jvm', 'Pig', ('pig',), ('*.pig',), ('text/x-pig',)), + 'PikeLexer': ('pip._vendor.pygments.lexers.c_like', 'Pike', ('pike',), ('*.pike', '*.pmod'), ('text/x-pike',)), + 'PkgConfigLexer': ('pip._vendor.pygments.lexers.configs', 'PkgConfig', ('pkgconfig',), ('*.pc',), ()), + 'PlPgsqlLexer': ('pip._vendor.pygments.lexers.sql', 'PL/pgSQL', ('plpgsql',), (), ('text/x-plpgsql',)), + 'PointlessLexer': ('pip._vendor.pygments.lexers.pointless', 'Pointless', ('pointless',), ('*.ptls',), ()), + 'PonyLexer': ('pip._vendor.pygments.lexers.pony', 'Pony', ('pony',), ('*.pony',), ()), + 'PortugolLexer': ('pip._vendor.pygments.lexers.pascal', 'Portugol', ('portugol',), ('*.alg', '*.portugol'), ()), + 'PostScriptLexer': ('pip._vendor.pygments.lexers.graphics', 'PostScript', ('postscript', 'postscr'), ('*.ps', '*.eps'), ('application/postscript',)), + 'PostgresConsoleLexer': ('pip._vendor.pygments.lexers.sql', 'PostgreSQL console (psql)', ('psql', 'postgresql-console', 'postgres-console'), (), ('text/x-postgresql-psql',)), + 'PostgresExplainLexer': ('pip._vendor.pygments.lexers.sql', 'PostgreSQL EXPLAIN dialect', ('postgres-explain',), ('*.explain',), ('text/x-postgresql-explain',)), + 'PostgresLexer': ('pip._vendor.pygments.lexers.sql', 'PostgreSQL SQL dialect', ('postgresql', 'postgres'), (), ('text/x-postgresql',)), + 'PovrayLexer': ('pip._vendor.pygments.lexers.graphics', 'POVRay', ('pov',), ('*.pov', '*.inc'), ('text/x-povray',)), + 'PowerShellLexer': ('pip._vendor.pygments.lexers.shell', 'PowerShell', ('powershell', 'pwsh', 'posh', 'ps1', 'psm1'), ('*.ps1', '*.psm1'), ('text/x-powershell',)), + 'PowerShellSessionLexer': ('pip._vendor.pygments.lexers.shell', 'PowerShell Session', ('pwsh-session', 'ps1con'), (), ()), + 'PraatLexer': ('pip._vendor.pygments.lexers.praat', 'Praat', ('praat',), ('*.praat', '*.proc', '*.psc'), ()), + 'ProcfileLexer': ('pip._vendor.pygments.lexers.procfile', 'Procfile', ('procfile',), ('Procfile',), ()), + 'PrologLexer': ('pip._vendor.pygments.lexers.prolog', 'Prolog', ('prolog',), ('*.ecl', '*.prolog', '*.pro', '*.pl'), ('text/x-prolog',)), + 'PromQLLexer': ('pip._vendor.pygments.lexers.promql', 'PromQL', ('promql',), ('*.promql',), ()), + 'PromelaLexer': ('pip._vendor.pygments.lexers.c_like', 'Promela', ('promela',), ('*.pml', '*.prom', '*.prm', '*.promela', '*.pr', '*.pm'), ('text/x-promela',)), + 'PropertiesLexer': ('pip._vendor.pygments.lexers.configs', 'Properties', ('properties', 'jproperties'), ('*.properties',), ('text/x-java-properties',)), + 'ProtoBufLexer': ('pip._vendor.pygments.lexers.dsls', 'Protocol Buffer', ('protobuf', 'proto'), ('*.proto',), ()), + 'PrqlLexer': ('pip._vendor.pygments.lexers.prql', 'PRQL', ('prql',), ('*.prql',), ('application/prql', 'application/x-prql')), + 'PsyshConsoleLexer': ('pip._vendor.pygments.lexers.php', 'PsySH console session for PHP', ('psysh',), (), ()), + 'PtxLexer': ('pip._vendor.pygments.lexers.ptx', 'PTX', ('ptx',), ('*.ptx',), ('text/x-ptx',)), + 'PugLexer': ('pip._vendor.pygments.lexers.html', 'Pug', ('pug', 'jade'), ('*.pug', '*.jade'), ('text/x-pug', 'text/x-jade')), + 'PuppetLexer': ('pip._vendor.pygments.lexers.dsls', 'Puppet', ('puppet',), ('*.pp',), ()), + 'PyPyLogLexer': ('pip._vendor.pygments.lexers.console', 'PyPy Log', ('pypylog', 'pypy'), ('*.pypylog',), ('application/x-pypylog',)), + 'Python2Lexer': ('pip._vendor.pygments.lexers.python', 'Python 2.x', ('python2', 'py2'), (), ('text/x-python2', 'application/x-python2')), + 'Python2TracebackLexer': ('pip._vendor.pygments.lexers.python', 'Python 2.x Traceback', ('py2tb',), ('*.py2tb',), ('text/x-python2-traceback',)), + 'PythonConsoleLexer': ('pip._vendor.pygments.lexers.python', 'Python console session', ('pycon', 'python-console'), (), ('text/x-python-doctest',)), + 'PythonLexer': ('pip._vendor.pygments.lexers.python', 'Python', ('python', 'py', 'sage', 'python3', 'py3', 'bazel', 'starlark', 'pyi'), ('*.py', '*.pyw', '*.pyi', '*.jy', '*.sage', '*.sc', 'SConstruct', 'SConscript', '*.bzl', 'BUCK', 'BUILD', 'BUILD.bazel', 'WORKSPACE', '*.tac'), ('text/x-python', 'application/x-python', 'text/x-python3', 'application/x-python3')), + 'PythonTracebackLexer': ('pip._vendor.pygments.lexers.python', 'Python Traceback', ('pytb', 'py3tb'), ('*.pytb', '*.py3tb'), ('text/x-python-traceback', 'text/x-python3-traceback')), + 'PythonUL4Lexer': ('pip._vendor.pygments.lexers.ul4', 'Python+UL4', ('py+ul4',), ('*.pyul4',), ()), + 'QBasicLexer': ('pip._vendor.pygments.lexers.basic', 'QBasic', ('qbasic', 'basic'), ('*.BAS', '*.bas'), ('text/basic',)), + 'QLexer': ('pip._vendor.pygments.lexers.q', 'Q', ('q',), ('*.q',), ()), + 'QVToLexer': ('pip._vendor.pygments.lexers.qvt', 'QVTO', ('qvto', 'qvt'), ('*.qvto',), ()), + 'QlikLexer': ('pip._vendor.pygments.lexers.qlik', 'Qlik', ('qlik', 'qlikview', 'qliksense', 'qlikscript'), ('*.qvs', '*.qvw'), ()), + 'QmlLexer': ('pip._vendor.pygments.lexers.webmisc', 'QML', ('qml', 'qbs'), ('*.qml', '*.qbs'), ('application/x-qml', 'application/x-qt.qbs+qml')), + 'RConsoleLexer': ('pip._vendor.pygments.lexers.r', 'RConsole', ('rconsole', 'rout'), ('*.Rout',), ()), + 'RNCCompactLexer': ('pip._vendor.pygments.lexers.rnc', 'Relax-NG Compact', ('rng-compact', 'rnc'), ('*.rnc',), ()), + 'RPMSpecLexer': ('pip._vendor.pygments.lexers.installers', 'RPMSpec', ('spec',), ('*.spec',), ('text/x-rpm-spec',)), + 'RacketLexer': ('pip._vendor.pygments.lexers.lisp', 'Racket', ('racket', 'rkt'), ('*.rkt', '*.rktd', '*.rktl'), ('text/x-racket', 'application/x-racket')), + 'RagelCLexer': ('pip._vendor.pygments.lexers.parsers', 'Ragel in C Host', ('ragel-c',), ('*.rl',), ()), + 'RagelCppLexer': ('pip._vendor.pygments.lexers.parsers', 'Ragel in CPP Host', ('ragel-cpp',), ('*.rl',), ()), + 'RagelDLexer': ('pip._vendor.pygments.lexers.parsers', 'Ragel in D Host', ('ragel-d',), ('*.rl',), ()), + 'RagelEmbeddedLexer': ('pip._vendor.pygments.lexers.parsers', 'Embedded Ragel', ('ragel-em',), ('*.rl',), ()), + 'RagelJavaLexer': ('pip._vendor.pygments.lexers.parsers', 'Ragel in Java Host', ('ragel-java',), ('*.rl',), ()), + 'RagelLexer': ('pip._vendor.pygments.lexers.parsers', 'Ragel', ('ragel',), (), ()), + 'RagelObjectiveCLexer': ('pip._vendor.pygments.lexers.parsers', 'Ragel in Objective C Host', ('ragel-objc',), ('*.rl',), ()), + 'RagelRubyLexer': ('pip._vendor.pygments.lexers.parsers', 'Ragel in Ruby Host', ('ragel-ruby', 'ragel-rb'), ('*.rl',), ()), + 'RawTokenLexer': ('pip._vendor.pygments.lexers.special', 'Raw token data', (), (), ('application/x-pygments-tokens',)), + 'RdLexer': ('pip._vendor.pygments.lexers.r', 'Rd', ('rd',), ('*.Rd',), ('text/x-r-doc',)), + 'ReasonLexer': ('pip._vendor.pygments.lexers.ml', 'ReasonML', ('reasonml', 'reason'), ('*.re', '*.rei'), ('text/x-reasonml',)), + 'RebolLexer': ('pip._vendor.pygments.lexers.rebol', 'REBOL', ('rebol',), ('*.r', '*.r3', '*.reb'), ('text/x-rebol',)), + 'RedLexer': ('pip._vendor.pygments.lexers.rebol', 'Red', ('red', 'red/system'), ('*.red', '*.reds'), ('text/x-red', 'text/x-red-system')), + 'RedcodeLexer': ('pip._vendor.pygments.lexers.esoteric', 'Redcode', ('redcode',), ('*.cw',), ()), + 'RegeditLexer': ('pip._vendor.pygments.lexers.configs', 'reg', ('registry',), ('*.reg',), ('text/x-windows-registry',)), + 'RegoLexer': ('pip._vendor.pygments.lexers.rego', 'Rego', ('rego',), ('*.rego',), ('text/x-rego',)), + 'ResourceLexer': ('pip._vendor.pygments.lexers.resource', 'ResourceBundle', ('resourcebundle', 'resource'), (), ()), + 'RexxLexer': ('pip._vendor.pygments.lexers.scripting', 'Rexx', ('rexx', 'arexx'), ('*.rexx', '*.rex', '*.rx', '*.arexx'), ('text/x-rexx',)), + 'RhtmlLexer': ('pip._vendor.pygments.lexers.templates', 'RHTML', ('rhtml', 'html+erb', 'html+ruby'), ('*.rhtml',), ('text/html+ruby',)), + 'RideLexer': ('pip._vendor.pygments.lexers.ride', 'Ride', ('ride',), ('*.ride',), ('text/x-ride',)), + 'RitaLexer': ('pip._vendor.pygments.lexers.rita', 'Rita', ('rita',), ('*.rita',), ('text/rita',)), + 'RoboconfGraphLexer': ('pip._vendor.pygments.lexers.roboconf', 'Roboconf Graph', ('roboconf-graph',), ('*.graph',), ()), + 'RoboconfInstancesLexer': ('pip._vendor.pygments.lexers.roboconf', 'Roboconf Instances', ('roboconf-instances',), ('*.instances',), ()), + 'RobotFrameworkLexer': ('pip._vendor.pygments.lexers.robotframework', 'RobotFramework', ('robotframework',), ('*.robot', '*.resource'), ('text/x-robotframework',)), + 'RqlLexer': ('pip._vendor.pygments.lexers.sql', 'RQL', ('rql',), ('*.rql',), ('text/x-rql',)), + 'RslLexer': ('pip._vendor.pygments.lexers.dsls', 'RSL', ('rsl',), ('*.rsl',), ('text/rsl',)), + 'RstLexer': ('pip._vendor.pygments.lexers.markup', 'reStructuredText', ('restructuredtext', 'rst', 'rest'), ('*.rst', '*.rest'), ('text/x-rst', 'text/prs.fallenstein.rst')), + 'RtsLexer': ('pip._vendor.pygments.lexers.trafficscript', 'TrafficScript', ('trafficscript', 'rts'), ('*.rts',), ()), + 'RubyConsoleLexer': ('pip._vendor.pygments.lexers.ruby', 'Ruby irb session', ('rbcon', 'irb'), (), ('text/x-ruby-shellsession',)), + 'RubyLexer': ('pip._vendor.pygments.lexers.ruby', 'Ruby', ('ruby', 'rb', 'duby'), ('*.rb', '*.rbw', 'Rakefile', '*.rake', '*.gemspec', '*.rbx', '*.duby', 'Gemfile', 'Vagrantfile'), ('text/x-ruby', 'application/x-ruby')), + 'RustLexer': ('pip._vendor.pygments.lexers.rust', 'Rust', ('rust', 'rs'), ('*.rs', '*.rs.in'), ('text/rust', 'text/x-rust')), + 'SASLexer': ('pip._vendor.pygments.lexers.sas', 'SAS', ('sas',), ('*.SAS', '*.sas'), ('text/x-sas', 'text/sas', 'application/x-sas')), + 'SLexer': ('pip._vendor.pygments.lexers.r', 'S', ('splus', 's', 'r'), ('*.S', '*.R', '.Rhistory', '.Rprofile', '.Renviron'), ('text/S-plus', 'text/S', 'text/x-r-source', 'text/x-r', 'text/x-R', 'text/x-r-history', 'text/x-r-profile')), + 'SMLLexer': ('pip._vendor.pygments.lexers.ml', 'Standard ML', ('sml',), ('*.sml', '*.sig', '*.fun'), ('text/x-standardml', 'application/x-standardml')), + 'SNBTLexer': ('pip._vendor.pygments.lexers.minecraft', 'SNBT', ('snbt',), ('*.snbt',), ('text/snbt',)), + 'SarlLexer': ('pip._vendor.pygments.lexers.jvm', 'SARL', ('sarl',), ('*.sarl',), ('text/x-sarl',)), + 'SassLexer': ('pip._vendor.pygments.lexers.css', 'Sass', ('sass',), ('*.sass',), ('text/x-sass',)), + 'SaviLexer': ('pip._vendor.pygments.lexers.savi', 'Savi', ('savi',), ('*.savi',), ()), + 'ScalaLexer': ('pip._vendor.pygments.lexers.jvm', 'Scala', ('scala',), ('*.scala',), ('text/x-scala',)), + 'ScamlLexer': ('pip._vendor.pygments.lexers.html', 'Scaml', ('scaml',), ('*.scaml',), ('text/x-scaml',)), + 'ScdocLexer': ('pip._vendor.pygments.lexers.scdoc', 'scdoc', ('scdoc', 'scd'), ('*.scd', '*.scdoc'), ()), + 'SchemeLexer': ('pip._vendor.pygments.lexers.lisp', 'Scheme', ('scheme', 'scm'), ('*.scm', '*.ss'), ('text/x-scheme', 'application/x-scheme')), + 'ScilabLexer': ('pip._vendor.pygments.lexers.matlab', 'Scilab', ('scilab',), ('*.sci', '*.sce', '*.tst'), ('text/scilab',)), + 'ScssLexer': ('pip._vendor.pygments.lexers.css', 'SCSS', ('scss',), ('*.scss',), ('text/x-scss',)), + 'SedLexer': ('pip._vendor.pygments.lexers.textedit', 'Sed', ('sed', 'gsed', 'ssed'), ('*.sed', '*.[gs]sed'), ('text/x-sed',)), + 'ShExCLexer': ('pip._vendor.pygments.lexers.rdf', 'ShExC', ('shexc', 'shex'), ('*.shex',), ('text/shex',)), + 'ShenLexer': ('pip._vendor.pygments.lexers.lisp', 'Shen', ('shen',), ('*.shen',), ('text/x-shen', 'application/x-shen')), + 'SieveLexer': ('pip._vendor.pygments.lexers.sieve', 'Sieve', ('sieve',), ('*.siv', '*.sieve'), ()), + 'SilverLexer': ('pip._vendor.pygments.lexers.verification', 'Silver', ('silver',), ('*.sil', '*.vpr'), ()), + 'SingularityLexer': ('pip._vendor.pygments.lexers.configs', 'Singularity', ('singularity',), ('*.def', 'Singularity'), ()), + 'SlashLexer': ('pip._vendor.pygments.lexers.slash', 'Slash', ('slash',), ('*.sla',), ()), + 'SlimLexer': ('pip._vendor.pygments.lexers.webmisc', 'Slim', ('slim',), ('*.slim',), ('text/x-slim',)), + 'SlurmBashLexer': ('pip._vendor.pygments.lexers.shell', 'Slurm', ('slurm', 'sbatch'), ('*.sl',), ()), + 'SmaliLexer': ('pip._vendor.pygments.lexers.dalvik', 'Smali', ('smali',), ('*.smali',), ('text/smali',)), + 'SmalltalkLexer': ('pip._vendor.pygments.lexers.smalltalk', 'Smalltalk', ('smalltalk', 'squeak', 'st'), ('*.st',), ('text/x-smalltalk',)), + 'SmartGameFormatLexer': ('pip._vendor.pygments.lexers.sgf', 'SmartGameFormat', ('sgf',), ('*.sgf',), ()), + 'SmartyLexer': ('pip._vendor.pygments.lexers.templates', 'Smarty', ('smarty',), ('*.tpl',), ('application/x-smarty',)), + 'SmithyLexer': ('pip._vendor.pygments.lexers.smithy', 'Smithy', ('smithy',), ('*.smithy',), ()), + 'SnobolLexer': ('pip._vendor.pygments.lexers.snobol', 'Snobol', ('snobol',), ('*.snobol',), ('text/x-snobol',)), + 'SnowballLexer': ('pip._vendor.pygments.lexers.dsls', 'Snowball', ('snowball',), ('*.sbl',), ()), + 'SolidityLexer': ('pip._vendor.pygments.lexers.solidity', 'Solidity', ('solidity',), ('*.sol',), ()), + 'SoongLexer': ('pip._vendor.pygments.lexers.soong', 'Soong', ('androidbp', 'bp', 'soong'), ('Android.bp',), ()), + 'SophiaLexer': ('pip._vendor.pygments.lexers.sophia', 'Sophia', ('sophia',), ('*.aes',), ()), + 'SourcePawnLexer': ('pip._vendor.pygments.lexers.pawn', 'SourcePawn', ('sp',), ('*.sp',), ('text/x-sourcepawn',)), + 'SourcesListLexer': ('pip._vendor.pygments.lexers.installers', 'Debian Sourcelist', ('debsources', 'sourceslist', 'sources.list'), ('sources.list',), ()), + 'SparqlLexer': ('pip._vendor.pygments.lexers.rdf', 'SPARQL', ('sparql',), ('*.rq', '*.sparql'), ('application/sparql-query',)), + 'SpiceLexer': ('pip._vendor.pygments.lexers.spice', 'Spice', ('spice', 'spicelang'), ('*.spice',), ('text/x-spice',)), + 'SqlJinjaLexer': ('pip._vendor.pygments.lexers.templates', 'SQL+Jinja', ('sql+jinja',), ('*.sql', '*.sql.j2', '*.sql.jinja2'), ()), + 'SqlLexer': ('pip._vendor.pygments.lexers.sql', 'SQL', ('sql',), ('*.sql',), ('text/x-sql',)), + 'SqliteConsoleLexer': ('pip._vendor.pygments.lexers.sql', 'sqlite3con', ('sqlite3',), ('*.sqlite3-console',), ('text/x-sqlite3-console',)), + 'SquidConfLexer': ('pip._vendor.pygments.lexers.configs', 'SquidConf', ('squidconf', 'squid.conf', 'squid'), ('squid.conf',), ('text/x-squidconf',)), + 'SrcinfoLexer': ('pip._vendor.pygments.lexers.srcinfo', 'Srcinfo', ('srcinfo',), ('.SRCINFO',), ()), + 'SspLexer': ('pip._vendor.pygments.lexers.templates', 'Scalate Server Page', ('ssp',), ('*.ssp',), ('application/x-ssp',)), + 'StanLexer': ('pip._vendor.pygments.lexers.modeling', 'Stan', ('stan',), ('*.stan',), ()), + 'StataLexer': ('pip._vendor.pygments.lexers.stata', 'Stata', ('stata', 'do'), ('*.do', '*.ado'), ('text/x-stata', 'text/stata', 'application/x-stata')), + 'SuperColliderLexer': ('pip._vendor.pygments.lexers.supercollider', 'SuperCollider', ('supercollider', 'sc'), ('*.sc', '*.scd'), ('application/supercollider', 'text/supercollider')), + 'SwiftLexer': ('pip._vendor.pygments.lexers.objective', 'Swift', ('swift',), ('*.swift',), ('text/x-swift',)), + 'SwigLexer': ('pip._vendor.pygments.lexers.c_like', 'SWIG', ('swig',), ('*.swg', '*.i'), ('text/swig',)), + 'SystemVerilogLexer': ('pip._vendor.pygments.lexers.hdl', 'systemverilog', ('systemverilog', 'sv'), ('*.sv', '*.svh'), ('text/x-systemverilog',)), + 'SystemdLexer': ('pip._vendor.pygments.lexers.configs', 'Systemd', ('systemd',), ('*.service', '*.socket', '*.device', '*.mount', '*.automount', '*.swap', '*.target', '*.path', '*.timer', '*.slice', '*.scope'), ()), + 'TAPLexer': ('pip._vendor.pygments.lexers.testing', 'TAP', ('tap',), ('*.tap',), ()), + 'TNTLexer': ('pip._vendor.pygments.lexers.tnt', 'Typographic Number Theory', ('tnt',), ('*.tnt',), ()), + 'TOMLLexer': ('pip._vendor.pygments.lexers.configs', 'TOML', ('toml',), ('*.toml', 'Pipfile', 'poetry.lock'), ('application/toml',)), + 'TableGenLexer': ('pip._vendor.pygments.lexers.tablegen', 'TableGen', ('tablegen', 'td'), ('*.td',), ()), + 'TactLexer': ('pip._vendor.pygments.lexers.tact', 'Tact', ('tact',), ('*.tact',), ()), + 'Tads3Lexer': ('pip._vendor.pygments.lexers.int_fiction', 'TADS 3', ('tads3',), ('*.t',), ()), + 'TalLexer': ('pip._vendor.pygments.lexers.tal', 'Tal', ('tal', 'uxntal'), ('*.tal',), ('text/x-uxntal',)), + 'TasmLexer': ('pip._vendor.pygments.lexers.asm', 'TASM', ('tasm',), ('*.asm', '*.ASM', '*.tasm'), ('text/x-tasm',)), + 'TclLexer': ('pip._vendor.pygments.lexers.tcl', 'Tcl', ('tcl',), ('*.tcl', '*.rvt'), ('text/x-tcl', 'text/x-script.tcl', 'application/x-tcl')), + 'TcshLexer': ('pip._vendor.pygments.lexers.shell', 'Tcsh', ('tcsh', 'csh'), ('*.tcsh', '*.csh'), ('application/x-csh',)), + 'TcshSessionLexer': ('pip._vendor.pygments.lexers.shell', 'Tcsh Session', ('tcshcon',), (), ()), + 'TeaTemplateLexer': ('pip._vendor.pygments.lexers.templates', 'Tea', ('tea',), ('*.tea',), ('text/x-tea',)), + 'TealLexer': ('pip._vendor.pygments.lexers.teal', 'teal', ('teal',), ('*.teal',), ()), + 'TeraTermLexer': ('pip._vendor.pygments.lexers.teraterm', 'Tera Term macro', ('teratermmacro', 'teraterm', 'ttl'), ('*.ttl',), ('text/x-teratermmacro',)), + 'TermcapLexer': ('pip._vendor.pygments.lexers.configs', 'Termcap', ('termcap',), ('termcap', 'termcap.src'), ()), + 'TerminfoLexer': ('pip._vendor.pygments.lexers.configs', 'Terminfo', ('terminfo',), ('terminfo', 'terminfo.src'), ()), + 'TerraformLexer': ('pip._vendor.pygments.lexers.configs', 'Terraform', ('terraform', 'tf', 'hcl'), ('*.tf', '*.hcl'), ('application/x-tf', 'application/x-terraform')), + 'TexLexer': ('pip._vendor.pygments.lexers.markup', 'TeX', ('tex', 'latex'), ('*.tex', '*.aux', '*.toc'), ('text/x-tex', 'text/x-latex')), + 'TextLexer': ('pip._vendor.pygments.lexers.special', 'Text only', ('text',), ('*.txt',), ('text/plain',)), + 'ThingsDBLexer': ('pip._vendor.pygments.lexers.thingsdb', 'ThingsDB', ('ti', 'thingsdb'), ('*.ti',), ()), + 'ThriftLexer': ('pip._vendor.pygments.lexers.dsls', 'Thrift', ('thrift',), ('*.thrift',), ('application/x-thrift',)), + 'TiddlyWiki5Lexer': ('pip._vendor.pygments.lexers.markup', 'tiddler', ('tid',), ('*.tid',), ('text/vnd.tiddlywiki',)), + 'TlbLexer': ('pip._vendor.pygments.lexers.tlb', 'Tl-b', ('tlb',), ('*.tlb',), ()), + 'TlsLexer': ('pip._vendor.pygments.lexers.tls', 'TLS Presentation Language', ('tls',), (), ()), + 'TodotxtLexer': ('pip._vendor.pygments.lexers.textfmts', 'Todotxt', ('todotxt',), ('todo.txt', '*.todotxt'), ('text/x-todo',)), + 'TransactSqlLexer': ('pip._vendor.pygments.lexers.sql', 'Transact-SQL', ('tsql', 't-sql'), ('*.sql',), ('text/x-tsql',)), + 'TreetopLexer': ('pip._vendor.pygments.lexers.parsers', 'Treetop', ('treetop',), ('*.treetop', '*.tt'), ()), + 'TsxLexer': ('pip._vendor.pygments.lexers.jsx', 'TSX', ('tsx',), ('*.tsx',), ('text/typescript-tsx',)), + 'TurtleLexer': ('pip._vendor.pygments.lexers.rdf', 'Turtle', ('turtle',), ('*.ttl',), ('text/turtle', 'application/x-turtle')), + 'TwigHtmlLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Twig', ('html+twig',), ('*.twig',), ('text/html+twig',)), + 'TwigLexer': ('pip._vendor.pygments.lexers.templates', 'Twig', ('twig',), (), ('application/x-twig',)), + 'TypeScriptLexer': ('pip._vendor.pygments.lexers.javascript', 'TypeScript', ('typescript', 'ts'), ('*.ts',), ('application/x-typescript', 'text/x-typescript')), + 'TypoScriptCssDataLexer': ('pip._vendor.pygments.lexers.typoscript', 'TypoScriptCssData', ('typoscriptcssdata',), (), ()), + 'TypoScriptHtmlDataLexer': ('pip._vendor.pygments.lexers.typoscript', 'TypoScriptHtmlData', ('typoscripthtmldata',), (), ()), + 'TypoScriptLexer': ('pip._vendor.pygments.lexers.typoscript', 'TypoScript', ('typoscript',), ('*.typoscript',), ('text/x-typoscript',)), + 'TypstLexer': ('pip._vendor.pygments.lexers.typst', 'Typst', ('typst',), ('*.typ',), ('text/x-typst',)), + 'UL4Lexer': ('pip._vendor.pygments.lexers.ul4', 'UL4', ('ul4',), ('*.ul4',), ()), + 'UcodeLexer': ('pip._vendor.pygments.lexers.unicon', 'ucode', ('ucode',), ('*.u', '*.u1', '*.u2'), ()), + 'UniconLexer': ('pip._vendor.pygments.lexers.unicon', 'Unicon', ('unicon',), ('*.icn',), ('text/unicon',)), + 'UnixConfigLexer': ('pip._vendor.pygments.lexers.configs', 'Unix/Linux config files', ('unixconfig', 'linuxconfig'), (), ()), + 'UrbiscriptLexer': ('pip._vendor.pygments.lexers.urbi', 'UrbiScript', ('urbiscript',), ('*.u',), ('application/x-urbiscript',)), + 'UrlEncodedLexer': ('pip._vendor.pygments.lexers.html', 'urlencoded', ('urlencoded',), (), ('application/x-www-form-urlencoded',)), + 'UsdLexer': ('pip._vendor.pygments.lexers.usd', 'USD', ('usd', 'usda'), ('*.usd', '*.usda'), ()), + 'VBScriptLexer': ('pip._vendor.pygments.lexers.basic', 'VBScript', ('vbscript',), ('*.vbs', '*.VBS'), ()), + 'VCLLexer': ('pip._vendor.pygments.lexers.varnish', 'VCL', ('vcl',), ('*.vcl',), ('text/x-vclsrc',)), + 'VCLSnippetLexer': ('pip._vendor.pygments.lexers.varnish', 'VCLSnippets', ('vclsnippets', 'vclsnippet'), (), ('text/x-vclsnippet',)), + 'VCTreeStatusLexer': ('pip._vendor.pygments.lexers.console', 'VCTreeStatus', ('vctreestatus',), (), ()), + 'VGLLexer': ('pip._vendor.pygments.lexers.dsls', 'VGL', ('vgl',), ('*.rpf',), ()), + 'ValaLexer': ('pip._vendor.pygments.lexers.c_like', 'Vala', ('vala', 'vapi'), ('*.vala', '*.vapi'), ('text/x-vala',)), + 'VbNetAspxLexer': ('pip._vendor.pygments.lexers.dotnet', 'aspx-vb', ('aspx-vb',), ('*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd'), ()), + 'VbNetLexer': ('pip._vendor.pygments.lexers.dotnet', 'VB.net', ('vb.net', 'vbnet', 'lobas', 'oobas', 'sobas', 'visual-basic', 'visualbasic'), ('*.vb', '*.bas'), ('text/x-vbnet', 'text/x-vba')), + 'VelocityHtmlLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Velocity', ('html+velocity',), (), ('text/html+velocity',)), + 'VelocityLexer': ('pip._vendor.pygments.lexers.templates', 'Velocity', ('velocity',), ('*.vm', '*.fhtml'), ()), + 'VelocityXmlLexer': ('pip._vendor.pygments.lexers.templates', 'XML+Velocity', ('xml+velocity',), (), ('application/xml+velocity',)), + 'VerifpalLexer': ('pip._vendor.pygments.lexers.verifpal', 'Verifpal', ('verifpal',), ('*.vp',), ('text/x-verifpal',)), + 'VerilogLexer': ('pip._vendor.pygments.lexers.hdl', 'verilog', ('verilog', 'v'), ('*.v',), ('text/x-verilog',)), + 'VhdlLexer': ('pip._vendor.pygments.lexers.hdl', 'vhdl', ('vhdl',), ('*.vhdl', '*.vhd'), ('text/x-vhdl',)), + 'VimLexer': ('pip._vendor.pygments.lexers.textedit', 'VimL', ('vim',), ('*.vim', '.vimrc', '.exrc', '.gvimrc', '_vimrc', '_exrc', '_gvimrc', 'vimrc', 'gvimrc'), ('text/x-vim',)), + 'VisualPrologGrammarLexer': ('pip._vendor.pygments.lexers.vip', 'Visual Prolog Grammar', ('visualprologgrammar',), ('*.vipgrm',), ()), + 'VisualPrologLexer': ('pip._vendor.pygments.lexers.vip', 'Visual Prolog', ('visualprolog',), ('*.pro', '*.cl', '*.i', '*.pack', '*.ph'), ()), + 'VueLexer': ('pip._vendor.pygments.lexers.html', 'Vue', ('vue',), ('*.vue',), ()), + 'VyperLexer': ('pip._vendor.pygments.lexers.vyper', 'Vyper', ('vyper',), ('*.vy',), ()), + 'WDiffLexer': ('pip._vendor.pygments.lexers.diff', 'WDiff', ('wdiff',), ('*.wdiff',), ()), + 'WatLexer': ('pip._vendor.pygments.lexers.webassembly', 'WebAssembly', ('wast', 'wat'), ('*.wat', '*.wast'), ()), + 'WebIDLLexer': ('pip._vendor.pygments.lexers.webidl', 'Web IDL', ('webidl',), ('*.webidl',), ()), + 'WgslLexer': ('pip._vendor.pygments.lexers.wgsl', 'WebGPU Shading Language', ('wgsl',), ('*.wgsl',), ('text/wgsl',)), + 'WhileyLexer': ('pip._vendor.pygments.lexers.whiley', 'Whiley', ('whiley',), ('*.whiley',), ('text/x-whiley',)), + 'WikitextLexer': ('pip._vendor.pygments.lexers.markup', 'Wikitext', ('wikitext', 'mediawiki'), (), ('text/x-wiki',)), + 'WoWTocLexer': ('pip._vendor.pygments.lexers.wowtoc', 'World of Warcraft TOC', ('wowtoc',), ('*.toc',), ()), + 'WrenLexer': ('pip._vendor.pygments.lexers.wren', 'Wren', ('wren',), ('*.wren',), ()), + 'X10Lexer': ('pip._vendor.pygments.lexers.x10', 'X10', ('x10', 'xten'), ('*.x10',), ('text/x-x10',)), + 'XMLUL4Lexer': ('pip._vendor.pygments.lexers.ul4', 'XML+UL4', ('xml+ul4',), ('*.xmlul4',), ()), + 'XQueryLexer': ('pip._vendor.pygments.lexers.webmisc', 'XQuery', ('xquery', 'xqy', 'xq', 'xql', 'xqm'), ('*.xqy', '*.xquery', '*.xq', '*.xql', '*.xqm'), ('text/xquery', 'application/xquery')), + 'XmlDjangoLexer': ('pip._vendor.pygments.lexers.templates', 'XML+Django/Jinja', ('xml+django', 'xml+jinja'), ('*.xml.j2', '*.xml.jinja2'), ('application/xml+django', 'application/xml+jinja')), + 'XmlErbLexer': ('pip._vendor.pygments.lexers.templates', 'XML+Ruby', ('xml+ruby', 'xml+erb'), (), ('application/xml+ruby',)), + 'XmlLexer': ('pip._vendor.pygments.lexers.html', 'XML', ('xml',), ('*.xml', '*.xsl', '*.rss', '*.xslt', '*.xsd', '*.wsdl', '*.wsf'), ('text/xml', 'application/xml', 'image/svg+xml', 'application/rss+xml', 'application/atom+xml')), + 'XmlPhpLexer': ('pip._vendor.pygments.lexers.templates', 'XML+PHP', ('xml+php',), (), ('application/xml+php',)), + 'XmlSmartyLexer': ('pip._vendor.pygments.lexers.templates', 'XML+Smarty', ('xml+smarty',), (), ('application/xml+smarty',)), + 'XorgLexer': ('pip._vendor.pygments.lexers.xorg', 'Xorg', ('xorg.conf',), ('xorg.conf',), ()), + 'XppLexer': ('pip._vendor.pygments.lexers.dotnet', 'X++', ('xpp', 'x++'), ('*.xpp',), ()), + 'XsltLexer': ('pip._vendor.pygments.lexers.html', 'XSLT', ('xslt',), ('*.xsl', '*.xslt', '*.xpl'), ('application/xsl+xml', 'application/xslt+xml')), + 'XtendLexer': ('pip._vendor.pygments.lexers.jvm', 'Xtend', ('xtend',), ('*.xtend',), ('text/x-xtend',)), + 'XtlangLexer': ('pip._vendor.pygments.lexers.lisp', 'xtlang', ('extempore',), ('*.xtm',), ()), + 'YamlJinjaLexer': ('pip._vendor.pygments.lexers.templates', 'YAML+Jinja', ('yaml+jinja', 'salt', 'sls'), ('*.sls', '*.yaml.j2', '*.yml.j2', '*.yaml.jinja2', '*.yml.jinja2'), ('text/x-yaml+jinja', 'text/x-sls')), + 'YamlLexer': ('pip._vendor.pygments.lexers.data', 'YAML', ('yaml',), ('*.yaml', '*.yml'), ('text/x-yaml',)), + 'YangLexer': ('pip._vendor.pygments.lexers.yang', 'YANG', ('yang',), ('*.yang',), ('application/yang',)), + 'YaraLexer': ('pip._vendor.pygments.lexers.yara', 'YARA', ('yara', 'yar'), ('*.yar',), ('text/x-yara',)), + 'ZeekLexer': ('pip._vendor.pygments.lexers.dsls', 'Zeek', ('zeek', 'bro'), ('*.zeek', '*.bro'), ()), + 'ZephirLexer': ('pip._vendor.pygments.lexers.php', 'Zephir', ('zephir',), ('*.zep',), ()), + 'ZigLexer': ('pip._vendor.pygments.lexers.zig', 'Zig', ('zig',), ('*.zig',), ('text/zig',)), + 'apdlexer': ('pip._vendor.pygments.lexers.apdlexer', 'ANSYS parametric design language', ('ansys', 'apdl'), ('*.ans',), ()), +} diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/lexers/python.py b/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/lexers/python.py new file mode 100644 index 0000000..1b78829 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/lexers/python.py @@ -0,0 +1,1201 @@ +""" + pygments.lexers.python + ~~~~~~~~~~~~~~~~~~~~~~ + + Lexers for Python and related languages. + + :copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import keyword + +from pip._vendor.pygments.lexer import DelegatingLexer, RegexLexer, include, \ + bygroups, using, default, words, combined, this +from pip._vendor.pygments.util import get_bool_opt, shebang_matches +from pip._vendor.pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation, Generic, Other, Error, Whitespace +from pip._vendor.pygments import unistring as uni + +__all__ = ['PythonLexer', 'PythonConsoleLexer', 'PythonTracebackLexer', + 'Python2Lexer', 'Python2TracebackLexer', + 'CythonLexer', 'DgLexer', 'NumPyLexer'] + + +class PythonLexer(RegexLexer): + """ + For Python source code (version 3.x). + + .. versionchanged:: 2.5 + This is now the default ``PythonLexer``. It is still available as the + alias ``Python3Lexer``. + """ + + name = 'Python' + url = 'https://www.python.org' + aliases = ['python', 'py', 'sage', 'python3', 'py3', 'bazel', 'starlark', 'pyi'] + filenames = [ + '*.py', + '*.pyw', + # Type stubs + '*.pyi', + # Jython + '*.jy', + # Sage + '*.sage', + # SCons + '*.sc', + 'SConstruct', + 'SConscript', + # Skylark/Starlark (used by Bazel, Buck, and Pants) + '*.bzl', + 'BUCK', + 'BUILD', + 'BUILD.bazel', + 'WORKSPACE', + # Twisted Application infrastructure + '*.tac', + ] + mimetypes = ['text/x-python', 'application/x-python', + 'text/x-python3', 'application/x-python3'] + version_added = '0.10' + + uni_name = f"[{uni.xid_start}][{uni.xid_continue}]*" + + def innerstring_rules(ttype): + return [ + # the old style '%s' % (...) string formatting (still valid in Py3) + (r'%(\(\w+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?' + '[hlL]?[E-GXc-giorsaux%]', String.Interpol), + # the new style '{}'.format(...) string formatting + (r'\{' + r'((\w+)((\.\w+)|(\[[^\]]+\]))*)?' # field name + r'(\![sra])?' # conversion + r'(\:(.?[<>=\^])?[-+ ]?#?0?(\d+)?,?(\.\d+)?[E-GXb-gnosx%]?)?' + r'\}', String.Interpol), + + # backslashes, quotes and formatting signs must be parsed one at a time + (r'[^\\\'"%{\n]+', ttype), + (r'[\'"\\]', ttype), + # unhandled string formatting sign + (r'%|(\{{1,2})', ttype) + # newlines are an error (use "nl" state) + ] + + def fstring_rules(ttype): + return [ + # Assuming that a '}' is the closing brace after format specifier. + # Sadly, this means that we won't detect syntax error. But it's + # more important to parse correct syntax correctly, than to + # highlight invalid syntax. + (r'\}', String.Interpol), + (r'\{', String.Interpol, 'expr-inside-fstring'), + # backslashes, quotes and formatting signs must be parsed one at a time + (r'[^\\\'"{}\n]+', ttype), + (r'[\'"\\]', ttype), + # newlines are an error (use "nl" state) + ] + + tokens = { + 'root': [ + (r'\n', Whitespace), + (r'^(\s*)([rRuUbB]{,2})("""(?:.|\n)*?""")', + bygroups(Whitespace, String.Affix, String.Doc)), + (r"^(\s*)([rRuUbB]{,2})('''(?:.|\n)*?''')", + bygroups(Whitespace, String.Affix, String.Doc)), + (r'\A#!.+$', Comment.Hashbang), + (r'#.*$', Comment.Single), + (r'\\\n', Text), + (r'\\', Text), + include('keywords'), + include('soft-keywords'), + (r'(def)((?:\s|\\\s)+)', bygroups(Keyword, Whitespace), 'funcname'), + (r'(class)((?:\s|\\\s)+)', bygroups(Keyword, Whitespace), 'classname'), + (r'(from)((?:\s|\\\s)+)', bygroups(Keyword.Namespace, Whitespace), + 'fromimport'), + (r'(import)((?:\s|\\\s)+)', bygroups(Keyword.Namespace, Whitespace), + 'import'), + include('expr'), + ], + 'expr': [ + # raw f-strings + ('(?i)(rf|fr)(""")', + bygroups(String.Affix, String.Double), + combined('rfstringescape', 'tdqf')), + ("(?i)(rf|fr)(''')", + bygroups(String.Affix, String.Single), + combined('rfstringescape', 'tsqf')), + ('(?i)(rf|fr)(")', + bygroups(String.Affix, String.Double), + combined('rfstringescape', 'dqf')), + ("(?i)(rf|fr)(')", + bygroups(String.Affix, String.Single), + combined('rfstringescape', 'sqf')), + # non-raw f-strings + ('([fF])(""")', bygroups(String.Affix, String.Double), + combined('fstringescape', 'tdqf')), + ("([fF])(''')", bygroups(String.Affix, String.Single), + combined('fstringescape', 'tsqf')), + ('([fF])(")', bygroups(String.Affix, String.Double), + combined('fstringescape', 'dqf')), + ("([fF])(')", bygroups(String.Affix, String.Single), + combined('fstringescape', 'sqf')), + # raw bytes and strings + ('(?i)(rb|br|r)(""")', + bygroups(String.Affix, String.Double), 'tdqs'), + ("(?i)(rb|br|r)(''')", + bygroups(String.Affix, String.Single), 'tsqs'), + ('(?i)(rb|br|r)(")', + bygroups(String.Affix, String.Double), 'dqs'), + ("(?i)(rb|br|r)(')", + bygroups(String.Affix, String.Single), 'sqs'), + # non-raw strings + ('([uU]?)(""")', bygroups(String.Affix, String.Double), + combined('stringescape', 'tdqs')), + ("([uU]?)(''')", bygroups(String.Affix, String.Single), + combined('stringescape', 'tsqs')), + ('([uU]?)(")', bygroups(String.Affix, String.Double), + combined('stringescape', 'dqs')), + ("([uU]?)(')", bygroups(String.Affix, String.Single), + combined('stringescape', 'sqs')), + # non-raw bytes + ('([bB])(""")', bygroups(String.Affix, String.Double), + combined('bytesescape', 'tdqs')), + ("([bB])(''')", bygroups(String.Affix, String.Single), + combined('bytesescape', 'tsqs')), + ('([bB])(")', bygroups(String.Affix, String.Double), + combined('bytesescape', 'dqs')), + ("([bB])(')", bygroups(String.Affix, String.Single), + combined('bytesescape', 'sqs')), + + (r'[^\S\n]+', Text), + include('numbers'), + (r'!=|==|<<|>>|:=|[-~+/*%=<>&^|.]', Operator), + (r'[]{}:(),;[]', Punctuation), + (r'(in|is|and|or|not)\b', Operator.Word), + include('expr-keywords'), + include('builtins'), + include('magicfuncs'), + include('magicvars'), + include('name'), + ], + 'expr-inside-fstring': [ + (r'[{([]', Punctuation, 'expr-inside-fstring-inner'), + # without format specifier + (r'(=\s*)?' # debug (https://bugs.python.org/issue36817) + r'(\![sraf])?' # conversion + r'\}', String.Interpol, '#pop'), + # with format specifier + # we'll catch the remaining '}' in the outer scope + (r'(=\s*)?' # debug (https://bugs.python.org/issue36817) + r'(\![sraf])?' # conversion + r':', String.Interpol, '#pop'), + (r'\s+', Whitespace), # allow new lines + include('expr'), + ], + 'expr-inside-fstring-inner': [ + (r'[{([]', Punctuation, 'expr-inside-fstring-inner'), + (r'[])}]', Punctuation, '#pop'), + (r'\s+', Whitespace), # allow new lines + include('expr'), + ], + 'expr-keywords': [ + # Based on https://docs.python.org/3/reference/expressions.html + (words(( + 'async for', 'await', 'else', 'for', 'if', 'lambda', + 'yield', 'yield from'), suffix=r'\b'), + Keyword), + (words(('True', 'False', 'None'), suffix=r'\b'), Keyword.Constant), + ], + 'keywords': [ + (words(( + 'assert', 'async', 'await', 'break', 'continue', 'del', 'elif', + 'else', 'except', 'finally', 'for', 'global', 'if', 'lambda', + 'pass', 'raise', 'nonlocal', 'return', 'try', 'while', 'yield', + 'yield from', 'as', 'with'), suffix=r'\b'), + Keyword), + (words(('True', 'False', 'None'), suffix=r'\b'), Keyword.Constant), + ], + 'soft-keywords': [ + # `match`, `case` and `_` soft keywords + (r'(^[ \t]*)' # at beginning of line + possible indentation + r'(match|case)\b' # a possible keyword + r'(?![ \t]*(?:' # not followed by... + r'[:,;=^&|@~)\]}]|(?:' + # characters and keywords that mean this isn't + # pattern matching (but None/True/False is ok) + r'|'.join(k for k in keyword.kwlist if k[0].islower()) + r')\b))', + bygroups(Text, Keyword), 'soft-keywords-inner'), + ], + 'soft-keywords-inner': [ + # optional `_` keyword + (r'(\s+)([^\n_]*)(_\b)', bygroups(Whitespace, using(this), Keyword)), + default('#pop') + ], + 'builtins': [ + (words(( + '__import__', 'abs', 'aiter', 'all', 'any', 'bin', 'bool', 'bytearray', + 'breakpoint', 'bytes', 'callable', 'chr', 'classmethod', 'compile', + 'complex', 'delattr', 'dict', 'dir', 'divmod', 'enumerate', 'eval', + 'filter', 'float', 'format', 'frozenset', 'getattr', 'globals', + 'hasattr', 'hash', 'hex', 'id', 'input', 'int', 'isinstance', + 'issubclass', 'iter', 'len', 'list', 'locals', 'map', 'max', + 'memoryview', 'min', 'next', 'object', 'oct', 'open', 'ord', 'pow', + 'print', 'property', 'range', 'repr', 'reversed', 'round', 'set', + 'setattr', 'slice', 'sorted', 'staticmethod', 'str', 'sum', 'super', + 'tuple', 'type', 'vars', 'zip'), prefix=r'(?>|[-~+/*%=<>&^|.]', Operator), + include('keywords'), + (r'(def)((?:\s|\\\s)+)', bygroups(Keyword, Whitespace), 'funcname'), + (r'(class)((?:\s|\\\s)+)', bygroups(Keyword, Whitespace), 'classname'), + (r'(from)((?:\s|\\\s)+)', bygroups(Keyword.Namespace, Whitespace), + 'fromimport'), + (r'(import)((?:\s|\\\s)+)', bygroups(Keyword.Namespace, Whitespace), + 'import'), + include('builtins'), + include('magicfuncs'), + include('magicvars'), + include('backtick'), + ('([rR]|[uUbB][rR]|[rR][uUbB])(""")', + bygroups(String.Affix, String.Double), 'tdqs'), + ("([rR]|[uUbB][rR]|[rR][uUbB])(''')", + bygroups(String.Affix, String.Single), 'tsqs'), + ('([rR]|[uUbB][rR]|[rR][uUbB])(")', + bygroups(String.Affix, String.Double), 'dqs'), + ("([rR]|[uUbB][rR]|[rR][uUbB])(')", + bygroups(String.Affix, String.Single), 'sqs'), + ('([uUbB]?)(""")', bygroups(String.Affix, String.Double), + combined('stringescape', 'tdqs')), + ("([uUbB]?)(''')", bygroups(String.Affix, String.Single), + combined('stringescape', 'tsqs')), + ('([uUbB]?)(")', bygroups(String.Affix, String.Double), + combined('stringescape', 'dqs')), + ("([uUbB]?)(')", bygroups(String.Affix, String.Single), + combined('stringescape', 'sqs')), + include('name'), + include('numbers'), + ], + 'keywords': [ + (words(( + 'assert', 'break', 'continue', 'del', 'elif', 'else', 'except', + 'exec', 'finally', 'for', 'global', 'if', 'lambda', 'pass', + 'print', 'raise', 'return', 'try', 'while', 'yield', + 'yield from', 'as', 'with'), suffix=r'\b'), + Keyword), + ], + 'builtins': [ + (words(( + '__import__', 'abs', 'all', 'any', 'apply', 'basestring', 'bin', + 'bool', 'buffer', 'bytearray', 'bytes', 'callable', 'chr', 'classmethod', + 'cmp', 'coerce', 'compile', 'complex', 'delattr', 'dict', 'dir', 'divmod', + 'enumerate', 'eval', 'execfile', 'exit', 'file', 'filter', 'float', + 'frozenset', 'getattr', 'globals', 'hasattr', 'hash', 'hex', 'id', + 'input', 'int', 'intern', 'isinstance', 'issubclass', 'iter', 'len', + 'list', 'locals', 'long', 'map', 'max', 'min', 'next', 'object', + 'oct', 'open', 'ord', 'pow', 'property', 'range', 'raw_input', 'reduce', + 'reload', 'repr', 'reversed', 'round', 'set', 'setattr', 'slice', + 'sorted', 'staticmethod', 'str', 'sum', 'super', 'tuple', 'type', + 'unichr', 'unicode', 'vars', 'xrange', 'zip'), + prefix=r'(?>> )(.*\n)', bygroups(Generic.Prompt, Other.Code), 'continuations'), + # This happens, e.g., when tracebacks are embedded in documentation; + # trailing whitespaces are often stripped in such contexts. + (r'(>>>)(\n)', bygroups(Generic.Prompt, Whitespace)), + (r'(\^C)?Traceback \(most recent call last\):\n', Other.Traceback, 'traceback'), + # SyntaxError starts with this + (r' File "[^"]+", line \d+', Other.Traceback, 'traceback'), + (r'.*\n', Generic.Output), + ], + 'continuations': [ + (r'(\.\.\. )(.*\n)', bygroups(Generic.Prompt, Other.Code)), + # See above. + (r'(\.\.\.)(\n)', bygroups(Generic.Prompt, Whitespace)), + default('#pop'), + ], + 'traceback': [ + # As soon as we see a traceback, consume everything until the next + # >>> prompt. + (r'(?=>>>( |$))', Text, '#pop'), + (r'(KeyboardInterrupt)(\n)', bygroups(Name.Class, Whitespace)), + (r'.*\n', Other.Traceback), + ], + } + + +class PythonConsoleLexer(DelegatingLexer): + """ + For Python console output or doctests, such as: + + .. sourcecode:: pycon + + >>> a = 'foo' + >>> print(a) + foo + >>> 1 / 0 + Traceback (most recent call last): + File "", line 1, in + ZeroDivisionError: integer division or modulo by zero + + Additional options: + + `python3` + Use Python 3 lexer for code. Default is ``True``. + + .. versionadded:: 1.0 + .. versionchanged:: 2.5 + Now defaults to ``True``. + """ + + name = 'Python console session' + aliases = ['pycon', 'python-console'] + mimetypes = ['text/x-python-doctest'] + url = 'https://python.org' + version_added = '' + + def __init__(self, **options): + python3 = get_bool_opt(options, 'python3', True) + if python3: + pylexer = PythonLexer + tblexer = PythonTracebackLexer + else: + pylexer = Python2Lexer + tblexer = Python2TracebackLexer + # We have two auxiliary lexers. Use DelegatingLexer twice with + # different tokens. TODO: DelegatingLexer should support this + # directly, by accepting a tuplet of auxiliary lexers and a tuple of + # distinguishing tokens. Then we wouldn't need this intermediary + # class. + class _ReplaceInnerCode(DelegatingLexer): + def __init__(self, **options): + super().__init__(pylexer, _PythonConsoleLexerBase, Other.Code, **options) + super().__init__(tblexer, _ReplaceInnerCode, Other.Traceback, **options) + + +class PythonTracebackLexer(RegexLexer): + """ + For Python 3.x tracebacks, with support for chained exceptions. + + .. versionchanged:: 2.5 + This is now the default ``PythonTracebackLexer``. It is still available + as the alias ``Python3TracebackLexer``. + """ + + name = 'Python Traceback' + aliases = ['pytb', 'py3tb'] + filenames = ['*.pytb', '*.py3tb'] + mimetypes = ['text/x-python-traceback', 'text/x-python3-traceback'] + url = 'https://python.org' + version_added = '1.0' + + tokens = { + 'root': [ + (r'\n', Whitespace), + (r'^(\^C)?Traceback \(most recent call last\):\n', Generic.Traceback, 'intb'), + (r'^During handling of the above exception, another ' + r'exception occurred:\n\n', Generic.Traceback), + (r'^The above exception was the direct cause of the ' + r'following exception:\n\n', Generic.Traceback), + (r'^(?= File "[^"]+", line \d+)', Generic.Traceback, 'intb'), + (r'^.*\n', Other), + ], + 'intb': [ + (r'^( File )("[^"]+")(, line )(\d+)(, in )(.+)(\n)', + bygroups(Text, Name.Builtin, Text, Number, Text, Name, Whitespace)), + (r'^( File )("[^"]+")(, line )(\d+)(\n)', + bygroups(Text, Name.Builtin, Text, Number, Whitespace)), + (r'^( )(.+)(\n)', + bygroups(Whitespace, using(PythonLexer), Whitespace), 'markers'), + (r'^([ \t]*)(\.\.\.)(\n)', + bygroups(Whitespace, Comment, Whitespace)), # for doctests... + (r'^([^:]+)(: )(.+)(\n)', + bygroups(Generic.Error, Text, Name, Whitespace), '#pop'), + (r'^([a-zA-Z_][\w.]*)(:?\n)', + bygroups(Generic.Error, Whitespace), '#pop'), + default('#pop'), + ], + 'markers': [ + # Either `PEP 657 ` + # error locations in Python 3.11+, or single-caret markers + # for syntax errors before that. + (r'^( {4,})([~^]+)(\n)', + bygroups(Whitespace, Punctuation.Marker, Whitespace), + '#pop'), + default('#pop'), + ], + } + + +Python3TracebackLexer = PythonTracebackLexer + + +class Python2TracebackLexer(RegexLexer): + """ + For Python tracebacks. + + .. versionchanged:: 2.5 + This class has been renamed from ``PythonTracebackLexer``. + ``PythonTracebackLexer`` now refers to the Python 3 variant. + """ + + name = 'Python 2.x Traceback' + aliases = ['py2tb'] + filenames = ['*.py2tb'] + mimetypes = ['text/x-python2-traceback'] + url = 'https://python.org' + version_added = '0.7' + + tokens = { + 'root': [ + # Cover both (most recent call last) and (innermost last) + # The optional ^C allows us to catch keyboard interrupt signals. + (r'^(\^C)?(Traceback.*\n)', + bygroups(Text, Generic.Traceback), 'intb'), + # SyntaxError starts with this. + (r'^(?= File "[^"]+", line \d+)', Generic.Traceback, 'intb'), + (r'^.*\n', Other), + ], + 'intb': [ + (r'^( File )("[^"]+")(, line )(\d+)(, in )(.+)(\n)', + bygroups(Text, Name.Builtin, Text, Number, Text, Name, Whitespace)), + (r'^( File )("[^"]+")(, line )(\d+)(\n)', + bygroups(Text, Name.Builtin, Text, Number, Whitespace)), + (r'^( )(.+)(\n)', + bygroups(Text, using(Python2Lexer), Whitespace), 'marker'), + (r'^([ \t]*)(\.\.\.)(\n)', + bygroups(Text, Comment, Whitespace)), # for doctests... + (r'^([^:]+)(: )(.+)(\n)', + bygroups(Generic.Error, Text, Name, Whitespace), '#pop'), + (r'^([a-zA-Z_]\w*)(:?\n)', + bygroups(Generic.Error, Whitespace), '#pop') + ], + 'marker': [ + # For syntax errors. + (r'( {4,})(\^)', bygroups(Text, Punctuation.Marker), '#pop'), + default('#pop'), + ], + } + + +class CythonLexer(RegexLexer): + """ + For Pyrex and Cython source code. + """ + + name = 'Cython' + url = 'https://cython.org' + aliases = ['cython', 'pyx', 'pyrex'] + filenames = ['*.pyx', '*.pxd', '*.pxi'] + mimetypes = ['text/x-cython', 'application/x-cython'] + version_added = '1.1' + + tokens = { + 'root': [ + (r'\n', Whitespace), + (r'^(\s*)("""(?:.|\n)*?""")', bygroups(Whitespace, String.Doc)), + (r"^(\s*)('''(?:.|\n)*?''')", bygroups(Whitespace, String.Doc)), + (r'[^\S\n]+', Text), + (r'#.*$', Comment), + (r'[]{}:(),;[]', Punctuation), + (r'\\\n', Whitespace), + (r'\\', Text), + (r'(in|is|and|or|not)\b', Operator.Word), + (r'(<)([a-zA-Z0-9.?]+)(>)', + bygroups(Punctuation, Keyword.Type, Punctuation)), + (r'!=|==|<<|>>|[-~+/*%=<>&^|.?]', Operator), + (r'(from)(\d+)(<=)(\s+)(<)(\d+)(:)', + bygroups(Keyword, Number.Integer, Operator, Whitespace, Operator, + Name, Punctuation)), + include('keywords'), + (r'(def|property)(\s+)', bygroups(Keyword, Whitespace), 'funcname'), + (r'(cp?def)(\s+)', bygroups(Keyword, Whitespace), 'cdef'), + # (should actually start a block with only cdefs) + (r'(cdef)(:)', bygroups(Keyword, Punctuation)), + (r'(class|struct)(\s+)', bygroups(Keyword, Whitespace), 'classname'), + (r'(from)(\s+)', bygroups(Keyword, Whitespace), 'fromimport'), + (r'(c?import)(\s+)', bygroups(Keyword, Whitespace), 'import'), + include('builtins'), + include('backtick'), + ('(?:[rR]|[uU][rR]|[rR][uU])"""', String, 'tdqs'), + ("(?:[rR]|[uU][rR]|[rR][uU])'''", String, 'tsqs'), + ('(?:[rR]|[uU][rR]|[rR][uU])"', String, 'dqs'), + ("(?:[rR]|[uU][rR]|[rR][uU])'", String, 'sqs'), + ('[uU]?"""', String, combined('stringescape', 'tdqs')), + ("[uU]?'''", String, combined('stringescape', 'tsqs')), + ('[uU]?"', String, combined('stringescape', 'dqs')), + ("[uU]?'", String, combined('stringescape', 'sqs')), + include('name'), + include('numbers'), + ], + 'keywords': [ + (words(( + 'assert', 'async', 'await', 'break', 'by', 'continue', 'ctypedef', 'del', 'elif', + 'else', 'except', 'except?', 'exec', 'finally', 'for', 'fused', 'gil', + 'global', 'if', 'include', 'lambda', 'nogil', 'pass', 'print', + 'raise', 'return', 'try', 'while', 'yield', 'as', 'with'), suffix=r'\b'), + Keyword), + (r'(DEF|IF|ELIF|ELSE)\b', Comment.Preproc), + ], + 'builtins': [ + (words(( + '__import__', 'abs', 'all', 'any', 'apply', 'basestring', 'bin', 'bint', + 'bool', 'buffer', 'bytearray', 'bytes', 'callable', 'chr', + 'classmethod', 'cmp', 'coerce', 'compile', 'complex', 'delattr', + 'dict', 'dir', 'divmod', 'enumerate', 'eval', 'execfile', 'exit', + 'file', 'filter', 'float', 'frozenset', 'getattr', 'globals', + 'hasattr', 'hash', 'hex', 'id', 'input', 'int', 'intern', 'isinstance', + 'issubclass', 'iter', 'len', 'list', 'locals', 'long', 'map', 'max', + 'min', 'next', 'object', 'oct', 'open', 'ord', 'pow', 'property', 'Py_ssize_t', + 'range', 'raw_input', 'reduce', 'reload', 'repr', 'reversed', + 'round', 'set', 'setattr', 'slice', 'sorted', 'staticmethod', + 'str', 'sum', 'super', 'tuple', 'type', 'unichr', 'unicode', 'unsigned', + 'vars', 'xrange', 'zip'), prefix=r'(?]? \d* )? : + .* (?: ft | filetype | syn | syntax ) = ( [^:\s]+ ) +''', re.VERBOSE) + + +def get_filetype_from_line(l): # noqa: E741 + m = modeline_re.search(l) + if m: + return m.group(1) + + +def get_filetype_from_buffer(buf, max_lines=5): + """ + Scan the buffer for modelines and return filetype if one is found. + """ + lines = buf.splitlines() + for line in lines[-1:-max_lines-1:-1]: + ret = get_filetype_from_line(line) + if ret: + return ret + for i in range(max_lines, -1, -1): + if i < len(lines): + ret = get_filetype_from_line(lines[i]) + if ret: + return ret + + return None diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/plugin.py b/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/plugin.py new file mode 100644 index 0000000..498db42 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/plugin.py @@ -0,0 +1,72 @@ +""" + pygments.plugin + ~~~~~~~~~~~~~~~ + + Pygments plugin interface. + + lexer plugins:: + + [pygments.lexers] + yourlexer = yourmodule:YourLexer + + formatter plugins:: + + [pygments.formatters] + yourformatter = yourformatter:YourFormatter + /.ext = yourformatter:YourFormatter + + As you can see, you can define extensions for the formatter + with a leading slash. + + syntax plugins:: + + [pygments.styles] + yourstyle = yourstyle:YourStyle + + filter plugin:: + + [pygments.filter] + yourfilter = yourfilter:YourFilter + + + :copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" +from importlib.metadata import entry_points + +LEXER_ENTRY_POINT = 'pygments.lexers' +FORMATTER_ENTRY_POINT = 'pygments.formatters' +STYLE_ENTRY_POINT = 'pygments.styles' +FILTER_ENTRY_POINT = 'pygments.filters' + + +def iter_entry_points(group_name): + groups = entry_points() + if hasattr(groups, 'select'): + # New interface in Python 3.10 and newer versions of the + # importlib_metadata backport. + return groups.select(group=group_name) + else: + # Older interface, deprecated in Python 3.10 and recent + # importlib_metadata, but we need it in Python 3.8 and 3.9. + return groups.get(group_name, []) + + +def find_plugin_lexers(): + for entrypoint in iter_entry_points(LEXER_ENTRY_POINT): + yield entrypoint.load() + + +def find_plugin_formatters(): + for entrypoint in iter_entry_points(FORMATTER_ENTRY_POINT): + yield entrypoint.name, entrypoint.load() + + +def find_plugin_styles(): + for entrypoint in iter_entry_points(STYLE_ENTRY_POINT): + yield entrypoint.name, entrypoint.load() + + +def find_plugin_filters(): + for entrypoint in iter_entry_points(FILTER_ENTRY_POINT): + yield entrypoint.name, entrypoint.load() diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/regexopt.py b/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/regexopt.py new file mode 100644 index 0000000..cc8d2c3 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/regexopt.py @@ -0,0 +1,91 @@ +""" + pygments.regexopt + ~~~~~~~~~~~~~~~~~ + + An algorithm that generates optimized regexes for matching long lists of + literal strings. + + :copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re +from re import escape +from os.path import commonprefix +from itertools import groupby +from operator import itemgetter + +CS_ESCAPE = re.compile(r'[\[\^\\\-\]]') +FIRST_ELEMENT = itemgetter(0) + + +def make_charset(letters): + return '[' + CS_ESCAPE.sub(lambda m: '\\' + m.group(), ''.join(letters)) + ']' + + +def regex_opt_inner(strings, open_paren): + """Return a regex that matches any string in the sorted list of strings.""" + close_paren = open_paren and ')' or '' + # print strings, repr(open_paren) + if not strings: + # print '-> nothing left' + return '' + first = strings[0] + if len(strings) == 1: + # print '-> only 1 string' + return open_paren + escape(first) + close_paren + if not first: + # print '-> first string empty' + return open_paren + regex_opt_inner(strings[1:], '(?:') \ + + '?' + close_paren + if len(first) == 1: + # multiple one-char strings? make a charset + oneletter = [] + rest = [] + for s in strings: + if len(s) == 1: + oneletter.append(s) + else: + rest.append(s) + if len(oneletter) > 1: # do we have more than one oneletter string? + if rest: + # print '-> 1-character + rest' + return open_paren + regex_opt_inner(rest, '') + '|' \ + + make_charset(oneletter) + close_paren + # print '-> only 1-character' + return open_paren + make_charset(oneletter) + close_paren + prefix = commonprefix(strings) + if prefix: + plen = len(prefix) + # we have a prefix for all strings + # print '-> prefix:', prefix + return open_paren + escape(prefix) \ + + regex_opt_inner([s[plen:] for s in strings], '(?:') \ + + close_paren + # is there a suffix? + strings_rev = [s[::-1] for s in strings] + suffix = commonprefix(strings_rev) + if suffix: + slen = len(suffix) + # print '-> suffix:', suffix[::-1] + return open_paren \ + + regex_opt_inner(sorted(s[:-slen] for s in strings), '(?:') \ + + escape(suffix[::-1]) + close_paren + # recurse on common 1-string prefixes + # print '-> last resort' + return open_paren + \ + '|'.join(regex_opt_inner(list(group[1]), '') + for group in groupby(strings, lambda s: s[0] == first[0])) \ + + close_paren + + +def regex_opt(strings, prefix='', suffix=''): + """Return a compiled regex that matches any string in the given list. + + The strings to match must be literal strings, not regexes. They will be + regex-escaped. + + *prefix* and *suffix* are pre- and appended to the final regex. + """ + strings = sorted(strings) + return prefix + regex_opt_inner(strings, '(') + suffix diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/scanner.py b/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/scanner.py new file mode 100644 index 0000000..3c8c848 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/scanner.py @@ -0,0 +1,104 @@ +""" + pygments.scanner + ~~~~~~~~~~~~~~~~ + + This library implements a regex based scanner. Some languages + like Pascal are easy to parse but have some keywords that + depend on the context. Because of this it's impossible to lex + that just by using a regular expression lexer like the + `RegexLexer`. + + Have a look at the `DelphiLexer` to get an idea of how to use + this scanner. + + :copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" +import re + + +class EndOfText(RuntimeError): + """ + Raise if end of text is reached and the user + tried to call a match function. + """ + + +class Scanner: + """ + Simple scanner + + All method patterns are regular expression strings (not + compiled expressions!) + """ + + def __init__(self, text, flags=0): + """ + :param text: The text which should be scanned + :param flags: default regular expression flags + """ + self.data = text + self.data_length = len(text) + self.start_pos = 0 + self.pos = 0 + self.flags = flags + self.last = None + self.match = None + self._re_cache = {} + + def eos(self): + """`True` if the scanner reached the end of text.""" + return self.pos >= self.data_length + eos = property(eos, eos.__doc__) + + def check(self, pattern): + """ + Apply `pattern` on the current position and return + the match object. (Doesn't touch pos). Use this for + lookahead. + """ + if self.eos: + raise EndOfText() + if pattern not in self._re_cache: + self._re_cache[pattern] = re.compile(pattern, self.flags) + return self._re_cache[pattern].match(self.data, self.pos) + + def test(self, pattern): + """Apply a pattern on the current position and check + if it patches. Doesn't touch pos. + """ + return self.check(pattern) is not None + + def scan(self, pattern): + """ + Scan the text for the given pattern and update pos/match + and related fields. The return value is a boolean that + indicates if the pattern matched. The matched value is + stored on the instance as ``match``, the last value is + stored as ``last``. ``start_pos`` is the position of the + pointer before the pattern was matched, ``pos`` is the + end position. + """ + if self.eos: + raise EndOfText() + if pattern not in self._re_cache: + self._re_cache[pattern] = re.compile(pattern, self.flags) + self.last = self.match + m = self._re_cache[pattern].match(self.data, self.pos) + if m is None: + return False + self.start_pos = m.start() + self.pos = m.end() + self.match = m.group() + return True + + def get_char(self): + """Scan exactly one char.""" + self.scan('.') + + def __repr__(self): + return '<%s %d/%d>' % ( + self.__class__.__name__, + self.pos, + self.data_length + ) diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/sphinxext.py b/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/sphinxext.py new file mode 100644 index 0000000..955d958 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/sphinxext.py @@ -0,0 +1,247 @@ +""" + pygments.sphinxext + ~~~~~~~~~~~~~~~~~~ + + Sphinx extension to generate automatic documentation of lexers, + formatters and filters. + + :copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import sys + +from docutils import nodes +from docutils.statemachine import ViewList +from docutils.parsers.rst import Directive +from sphinx.util.nodes import nested_parse_with_titles + + +MODULEDOC = ''' +.. module:: %s + +%s +%s +''' + +LEXERDOC = ''' +.. class:: %s + + :Short names: %s + :Filenames: %s + :MIME types: %s + + %s + + %s + +''' + +FMTERDOC = ''' +.. class:: %s + + :Short names: %s + :Filenames: %s + + %s + +''' + +FILTERDOC = ''' +.. class:: %s + + :Name: %s + + %s + +''' + + +class PygmentsDoc(Directive): + """ + A directive to collect all lexers/formatters/filters and generate + autoclass directives for them. + """ + has_content = False + required_arguments = 1 + optional_arguments = 0 + final_argument_whitespace = False + option_spec = {} + + def run(self): + self.filenames = set() + if self.arguments[0] == 'lexers': + out = self.document_lexers() + elif self.arguments[0] == 'formatters': + out = self.document_formatters() + elif self.arguments[0] == 'filters': + out = self.document_filters() + elif self.arguments[0] == 'lexers_overview': + out = self.document_lexers_overview() + else: + raise Exception('invalid argument for "pygmentsdoc" directive') + node = nodes.compound() + vl = ViewList(out.split('\n'), source='') + nested_parse_with_titles(self.state, vl, node) + for fn in self.filenames: + self.state.document.settings.record_dependencies.add(fn) + return node.children + + def document_lexers_overview(self): + """Generate a tabular overview of all lexers. + + The columns are the lexer name, the extensions handled by this lexer + (or "None"), the aliases and a link to the lexer class.""" + from pip._vendor.pygments.lexers._mapping import LEXERS + from pip._vendor.pygments.lexers import find_lexer_class + out = [] + + table = [] + + def format_link(name, url): + if url: + return f'`{name} <{url}>`_' + return name + + for classname, data in sorted(LEXERS.items(), key=lambda x: x[1][1].lower()): + lexer_cls = find_lexer_class(data[1]) + extensions = lexer_cls.filenames + lexer_cls.alias_filenames + + table.append({ + 'name': format_link(data[1], lexer_cls.url), + 'extensions': ', '.join(extensions).replace('*', '\\*').replace('_', '\\') or 'None', + 'aliases': ', '.join(data[2]), + 'class': f'{data[0]}.{classname}' + }) + + column_names = ['name', 'extensions', 'aliases', 'class'] + column_lengths = [max([len(row[column]) for row in table if row[column]]) + for column in column_names] + + def write_row(*columns): + """Format a table row""" + out = [] + for length, col in zip(column_lengths, columns): + if col: + out.append(col.ljust(length)) + else: + out.append(' '*length) + + return ' '.join(out) + + def write_seperator(): + """Write a table separator row""" + sep = ['='*c for c in column_lengths] + return write_row(*sep) + + out.append(write_seperator()) + out.append(write_row('Name', 'Extension(s)', 'Short name(s)', 'Lexer class')) + out.append(write_seperator()) + for row in table: + out.append(write_row( + row['name'], + row['extensions'], + row['aliases'], + f':class:`~{row["class"]}`')) + out.append(write_seperator()) + + return '\n'.join(out) + + def document_lexers(self): + from pip._vendor.pygments.lexers._mapping import LEXERS + from pip._vendor import pygments + import inspect + import pathlib + + out = [] + modules = {} + moduledocstrings = {} + for classname, data in sorted(LEXERS.items(), key=lambda x: x[0]): + module = data[0] + mod = __import__(module, None, None, [classname]) + self.filenames.add(mod.__file__) + cls = getattr(mod, classname) + if not cls.__doc__: + print(f"Warning: {classname} does not have a docstring.") + docstring = cls.__doc__ + if isinstance(docstring, bytes): + docstring = docstring.decode('utf8') + + example_file = getattr(cls, '_example', None) + if example_file: + p = pathlib.Path(inspect.getabsfile(pygments)).parent.parent /\ + 'tests' / 'examplefiles' / example_file + content = p.read_text(encoding='utf-8') + if not content: + raise Exception( + f"Empty example file '{example_file}' for lexer " + f"{classname}") + + if data[2]: + lexer_name = data[2][0] + docstring += '\n\n .. admonition:: Example\n' + docstring += f'\n .. code-block:: {lexer_name}\n\n' + for line in content.splitlines(): + docstring += f' {line}\n' + + if cls.version_added: + version_line = f'.. versionadded:: {cls.version_added}' + else: + version_line = '' + + modules.setdefault(module, []).append(( + classname, + ', '.join(data[2]) or 'None', + ', '.join(data[3]).replace('*', '\\*').replace('_', '\\') or 'None', + ', '.join(data[4]) or 'None', + docstring, + version_line)) + if module not in moduledocstrings: + moddoc = mod.__doc__ + if isinstance(moddoc, bytes): + moddoc = moddoc.decode('utf8') + moduledocstrings[module] = moddoc + + for module, lexers in sorted(modules.items(), key=lambda x: x[0]): + if moduledocstrings[module] is None: + raise Exception(f"Missing docstring for {module}") + heading = moduledocstrings[module].splitlines()[4].strip().rstrip('.') + out.append(MODULEDOC % (module, heading, '-'*len(heading))) + for data in lexers: + out.append(LEXERDOC % data) + + return ''.join(out) + + def document_formatters(self): + from pip._vendor.pygments.formatters import FORMATTERS + + out = [] + for classname, data in sorted(FORMATTERS.items(), key=lambda x: x[0]): + module = data[0] + mod = __import__(module, None, None, [classname]) + self.filenames.add(mod.__file__) + cls = getattr(mod, classname) + docstring = cls.__doc__ + if isinstance(docstring, bytes): + docstring = docstring.decode('utf8') + heading = cls.__name__ + out.append(FMTERDOC % (heading, ', '.join(data[2]) or 'None', + ', '.join(data[3]).replace('*', '\\*') or 'None', + docstring)) + return ''.join(out) + + def document_filters(self): + from pip._vendor.pygments.filters import FILTERS + + out = [] + for name, cls in FILTERS.items(): + self.filenames.add(sys.modules[cls.__module__].__file__) + docstring = cls.__doc__ + if isinstance(docstring, bytes): + docstring = docstring.decode('utf8') + out.append(FILTERDOC % (cls.__name__, name, docstring)) + return ''.join(out) + + +def setup(app): + app.add_directive('pygmentsdoc', PygmentsDoc) diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/style.py b/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/style.py new file mode 100644 index 0000000..be5f832 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/style.py @@ -0,0 +1,203 @@ +""" + pygments.style + ~~~~~~~~~~~~~~ + + Basic style object. + + :copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from pip._vendor.pygments.token import Token, STANDARD_TYPES + +# Default mapping of ansixxx to RGB colors. +_ansimap = { + # dark + 'ansiblack': '000000', + 'ansired': '7f0000', + 'ansigreen': '007f00', + 'ansiyellow': '7f7fe0', + 'ansiblue': '00007f', + 'ansimagenta': '7f007f', + 'ansicyan': '007f7f', + 'ansigray': 'e5e5e5', + # normal + 'ansibrightblack': '555555', + 'ansibrightred': 'ff0000', + 'ansibrightgreen': '00ff00', + 'ansibrightyellow': 'ffff00', + 'ansibrightblue': '0000ff', + 'ansibrightmagenta': 'ff00ff', + 'ansibrightcyan': '00ffff', + 'ansiwhite': 'ffffff', +} +# mapping of deprecated #ansixxx colors to new color names +_deprecated_ansicolors = { + # dark + '#ansiblack': 'ansiblack', + '#ansidarkred': 'ansired', + '#ansidarkgreen': 'ansigreen', + '#ansibrown': 'ansiyellow', + '#ansidarkblue': 'ansiblue', + '#ansipurple': 'ansimagenta', + '#ansiteal': 'ansicyan', + '#ansilightgray': 'ansigray', + # normal + '#ansidarkgray': 'ansibrightblack', + '#ansired': 'ansibrightred', + '#ansigreen': 'ansibrightgreen', + '#ansiyellow': 'ansibrightyellow', + '#ansiblue': 'ansibrightblue', + '#ansifuchsia': 'ansibrightmagenta', + '#ansiturquoise': 'ansibrightcyan', + '#ansiwhite': 'ansiwhite', +} +ansicolors = set(_ansimap) + + +class StyleMeta(type): + + def __new__(mcs, name, bases, dct): + obj = type.__new__(mcs, name, bases, dct) + for token in STANDARD_TYPES: + if token not in obj.styles: + obj.styles[token] = '' + + def colorformat(text): + if text in ansicolors: + return text + if text[0:1] == '#': + col = text[1:] + if len(col) == 6: + return col + elif len(col) == 3: + return col[0] * 2 + col[1] * 2 + col[2] * 2 + elif text == '': + return '' + elif text.startswith('var') or text.startswith('calc'): + return text + assert False, f"wrong color format {text!r}" + + _styles = obj._styles = {} + + for ttype in obj.styles: + for token in ttype.split(): + if token in _styles: + continue + ndef = _styles.get(token.parent, None) + styledefs = obj.styles.get(token, '').split() + if not ndef or token is None: + ndef = ['', 0, 0, 0, '', '', 0, 0, 0] + elif 'noinherit' in styledefs and token is not Token: + ndef = _styles[Token][:] + else: + ndef = ndef[:] + _styles[token] = ndef + for styledef in obj.styles.get(token, '').split(): + if styledef == 'noinherit': + pass + elif styledef == 'bold': + ndef[1] = 1 + elif styledef == 'nobold': + ndef[1] = 0 + elif styledef == 'italic': + ndef[2] = 1 + elif styledef == 'noitalic': + ndef[2] = 0 + elif styledef == 'underline': + ndef[3] = 1 + elif styledef == 'nounderline': + ndef[3] = 0 + elif styledef[:3] == 'bg:': + ndef[4] = colorformat(styledef[3:]) + elif styledef[:7] == 'border:': + ndef[5] = colorformat(styledef[7:]) + elif styledef == 'roman': + ndef[6] = 1 + elif styledef == 'sans': + ndef[7] = 1 + elif styledef == 'mono': + ndef[8] = 1 + else: + ndef[0] = colorformat(styledef) + + return obj + + def style_for_token(cls, token): + t = cls._styles[token] + ansicolor = bgansicolor = None + color = t[0] + if color in _deprecated_ansicolors: + color = _deprecated_ansicolors[color] + if color in ansicolors: + ansicolor = color + color = _ansimap[color] + bgcolor = t[4] + if bgcolor in _deprecated_ansicolors: + bgcolor = _deprecated_ansicolors[bgcolor] + if bgcolor in ansicolors: + bgansicolor = bgcolor + bgcolor = _ansimap[bgcolor] + + return { + 'color': color or None, + 'bold': bool(t[1]), + 'italic': bool(t[2]), + 'underline': bool(t[3]), + 'bgcolor': bgcolor or None, + 'border': t[5] or None, + 'roman': bool(t[6]) or None, + 'sans': bool(t[7]) or None, + 'mono': bool(t[8]) or None, + 'ansicolor': ansicolor, + 'bgansicolor': bgansicolor, + } + + def list_styles(cls): + return list(cls) + + def styles_token(cls, ttype): + return ttype in cls._styles + + def __iter__(cls): + for token in cls._styles: + yield token, cls.style_for_token(token) + + def __len__(cls): + return len(cls._styles) + + +class Style(metaclass=StyleMeta): + + #: overall background color (``None`` means transparent) + background_color = '#ffffff' + + #: highlight background color + highlight_color = '#ffffcc' + + #: line number font color + line_number_color = 'inherit' + + #: line number background color + line_number_background_color = 'transparent' + + #: special line number font color + line_number_special_color = '#000000' + + #: special line number background color + line_number_special_background_color = '#ffffc0' + + #: Style definitions for individual token types. + styles = {} + + #: user-friendly style name (used when selecting the style, so this + # should be all-lowercase, no spaces, hyphens) + name = 'unnamed' + + aliases = [] + + # Attribute for lexers defined within Pygments. If set + # to True, the style is not shown in the style gallery + # on the website. This is intended for language-specific + # styles. + web_style_gallery_exclude = False diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/styles/__init__.py b/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/styles/__init__.py new file mode 100644 index 0000000..96d53dc --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/styles/__init__.py @@ -0,0 +1,61 @@ +""" + pygments.styles + ~~~~~~~~~~~~~~~ + + Contains built-in styles. + + :copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from pip._vendor.pygments.plugin import find_plugin_styles +from pip._vendor.pygments.util import ClassNotFound +from pip._vendor.pygments.styles._mapping import STYLES + +#: A dictionary of built-in styles, mapping style names to +#: ``'submodule::classname'`` strings. +#: This list is deprecated. Use `pygments.styles.STYLES` instead +STYLE_MAP = {v[1]: v[0].split('.')[-1] + '::' + k for k, v in STYLES.items()} + +#: Internal reverse mapping to make `get_style_by_name` more efficient +_STYLE_NAME_TO_MODULE_MAP = {v[1]: (v[0], k) for k, v in STYLES.items()} + + +def get_style_by_name(name): + """ + Return a style class by its short name. The names of the builtin styles + are listed in :data:`pygments.styles.STYLE_MAP`. + + Will raise :exc:`pygments.util.ClassNotFound` if no style of that name is + found. + """ + if name in _STYLE_NAME_TO_MODULE_MAP: + mod, cls = _STYLE_NAME_TO_MODULE_MAP[name] + builtin = "yes" + else: + for found_name, style in find_plugin_styles(): + if name == found_name: + return style + # perhaps it got dropped into our styles package + builtin = "" + mod = 'pygments.styles.' + name + cls = name.title() + "Style" + + try: + mod = __import__(mod, None, None, [cls]) + except ImportError: + raise ClassNotFound(f"Could not find style module {mod!r}" + + (builtin and ", though it should be builtin") + + ".") + try: + return getattr(mod, cls) + except AttributeError: + raise ClassNotFound(f"Could not find style class {cls!r} in style module.") + + +def get_all_styles(): + """Return a generator for all styles by name, both builtin and plugin.""" + for v in STYLES.values(): + yield v[1] + for name, _ in find_plugin_styles(): + yield name diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/styles/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/styles/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..3cd5eef Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/styles/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/styles/__pycache__/_mapping.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/styles/__pycache__/_mapping.cpython-312.pyc new file mode 100644 index 0000000..10f802a Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/styles/__pycache__/_mapping.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/styles/_mapping.py b/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/styles/_mapping.py new file mode 100644 index 0000000..49a7fae --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/styles/_mapping.py @@ -0,0 +1,54 @@ +# Automatically generated by scripts/gen_mapfiles.py. +# DO NOT EDIT BY HAND; run `tox -e mapfiles` instead. + +STYLES = { + 'AbapStyle': ('pygments.styles.abap', 'abap', ()), + 'AlgolStyle': ('pygments.styles.algol', 'algol', ()), + 'Algol_NuStyle': ('pygments.styles.algol_nu', 'algol_nu', ()), + 'ArduinoStyle': ('pygments.styles.arduino', 'arduino', ()), + 'AutumnStyle': ('pygments.styles.autumn', 'autumn', ()), + 'BlackWhiteStyle': ('pygments.styles.bw', 'bw', ()), + 'BorlandStyle': ('pygments.styles.borland', 'borland', ()), + 'CoffeeStyle': ('pygments.styles.coffee', 'coffee', ()), + 'ColorfulStyle': ('pygments.styles.colorful', 'colorful', ()), + 'DefaultStyle': ('pygments.styles.default', 'default', ()), + 'DraculaStyle': ('pygments.styles.dracula', 'dracula', ()), + 'EmacsStyle': ('pygments.styles.emacs', 'emacs', ()), + 'FriendlyGrayscaleStyle': ('pygments.styles.friendly_grayscale', 'friendly_grayscale', ()), + 'FriendlyStyle': ('pygments.styles.friendly', 'friendly', ()), + 'FruityStyle': ('pygments.styles.fruity', 'fruity', ()), + 'GhDarkStyle': ('pygments.styles.gh_dark', 'github-dark', ()), + 'GruvboxDarkStyle': ('pygments.styles.gruvbox', 'gruvbox-dark', ()), + 'GruvboxLightStyle': ('pygments.styles.gruvbox', 'gruvbox-light', ()), + 'IgorStyle': ('pygments.styles.igor', 'igor', ()), + 'InkPotStyle': ('pygments.styles.inkpot', 'inkpot', ()), + 'LightbulbStyle': ('pygments.styles.lightbulb', 'lightbulb', ()), + 'LilyPondStyle': ('pygments.styles.lilypond', 'lilypond', ()), + 'LovelaceStyle': ('pygments.styles.lovelace', 'lovelace', ()), + 'ManniStyle': ('pygments.styles.manni', 'manni', ()), + 'MaterialStyle': ('pygments.styles.material', 'material', ()), + 'MonokaiStyle': ('pygments.styles.monokai', 'monokai', ()), + 'MurphyStyle': ('pygments.styles.murphy', 'murphy', ()), + 'NativeStyle': ('pygments.styles.native', 'native', ()), + 'NordDarkerStyle': ('pygments.styles.nord', 'nord-darker', ()), + 'NordStyle': ('pygments.styles.nord', 'nord', ()), + 'OneDarkStyle': ('pygments.styles.onedark', 'one-dark', ()), + 'ParaisoDarkStyle': ('pygments.styles.paraiso_dark', 'paraiso-dark', ()), + 'ParaisoLightStyle': ('pygments.styles.paraiso_light', 'paraiso-light', ()), + 'PastieStyle': ('pygments.styles.pastie', 'pastie', ()), + 'PerldocStyle': ('pygments.styles.perldoc', 'perldoc', ()), + 'RainbowDashStyle': ('pygments.styles.rainbow_dash', 'rainbow_dash', ()), + 'RrtStyle': ('pygments.styles.rrt', 'rrt', ()), + 'SasStyle': ('pygments.styles.sas', 'sas', ()), + 'SolarizedDarkStyle': ('pygments.styles.solarized', 'solarized-dark', ()), + 'SolarizedLightStyle': ('pygments.styles.solarized', 'solarized-light', ()), + 'StarofficeStyle': ('pygments.styles.staroffice', 'staroffice', ()), + 'StataDarkStyle': ('pygments.styles.stata_dark', 'stata-dark', ()), + 'StataLightStyle': ('pygments.styles.stata_light', 'stata-light', ()), + 'TangoStyle': ('pygments.styles.tango', 'tango', ()), + 'TracStyle': ('pygments.styles.trac', 'trac', ()), + 'VimStyle': ('pygments.styles.vim', 'vim', ()), + 'VisualStudioStyle': ('pygments.styles.vs', 'vs', ()), + 'XcodeStyle': ('pygments.styles.xcode', 'xcode', ()), + 'ZenburnStyle': ('pygments.styles.zenburn', 'zenburn', ()), +} diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/token.py b/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/token.py new file mode 100644 index 0000000..2f3b97e --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/token.py @@ -0,0 +1,214 @@ +""" + pygments.token + ~~~~~~~~~~~~~~ + + Basic token types and the standard tokens. + + :copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + + +class _TokenType(tuple): + parent = None + + def split(self): + buf = [] + node = self + while node is not None: + buf.append(node) + node = node.parent + buf.reverse() + return buf + + def __init__(self, *args): + # no need to call super.__init__ + self.subtypes = set() + + def __contains__(self, val): + return self is val or ( + type(val) is self.__class__ and + val[:len(self)] == self + ) + + def __getattr__(self, val): + if not val or not val[0].isupper(): + return tuple.__getattribute__(self, val) + new = _TokenType(self + (val,)) + setattr(self, val, new) + self.subtypes.add(new) + new.parent = self + return new + + def __repr__(self): + return 'Token' + (self and '.' or '') + '.'.join(self) + + def __copy__(self): + # These instances are supposed to be singletons + return self + + def __deepcopy__(self, memo): + # These instances are supposed to be singletons + return self + + +Token = _TokenType() + +# Special token types +Text = Token.Text +Whitespace = Text.Whitespace +Escape = Token.Escape +Error = Token.Error +# Text that doesn't belong to this lexer (e.g. HTML in PHP) +Other = Token.Other + +# Common token types for source code +Keyword = Token.Keyword +Name = Token.Name +Literal = Token.Literal +String = Literal.String +Number = Literal.Number +Punctuation = Token.Punctuation +Operator = Token.Operator +Comment = Token.Comment + +# Generic types for non-source code +Generic = Token.Generic + +# String and some others are not direct children of Token. +# alias them: +Token.Token = Token +Token.String = String +Token.Number = Number + + +def is_token_subtype(ttype, other): + """ + Return True if ``ttype`` is a subtype of ``other``. + + exists for backwards compatibility. use ``ttype in other`` now. + """ + return ttype in other + + +def string_to_tokentype(s): + """ + Convert a string into a token type:: + + >>> string_to_token('String.Double') + Token.Literal.String.Double + >>> string_to_token('Token.Literal.Number') + Token.Literal.Number + >>> string_to_token('') + Token + + Tokens that are already tokens are returned unchanged: + + >>> string_to_token(String) + Token.Literal.String + """ + if isinstance(s, _TokenType): + return s + if not s: + return Token + node = Token + for item in s.split('.'): + node = getattr(node, item) + return node + + +# Map standard token types to short names, used in CSS class naming. +# If you add a new item, please be sure to run this file to perform +# a consistency check for duplicate values. +STANDARD_TYPES = { + Token: '', + + Text: '', + Whitespace: 'w', + Escape: 'esc', + Error: 'err', + Other: 'x', + + Keyword: 'k', + Keyword.Constant: 'kc', + Keyword.Declaration: 'kd', + Keyword.Namespace: 'kn', + Keyword.Pseudo: 'kp', + Keyword.Reserved: 'kr', + Keyword.Type: 'kt', + + Name: 'n', + Name.Attribute: 'na', + Name.Builtin: 'nb', + Name.Builtin.Pseudo: 'bp', + Name.Class: 'nc', + Name.Constant: 'no', + Name.Decorator: 'nd', + Name.Entity: 'ni', + Name.Exception: 'ne', + Name.Function: 'nf', + Name.Function.Magic: 'fm', + Name.Property: 'py', + Name.Label: 'nl', + Name.Namespace: 'nn', + Name.Other: 'nx', + Name.Tag: 'nt', + Name.Variable: 'nv', + Name.Variable.Class: 'vc', + Name.Variable.Global: 'vg', + Name.Variable.Instance: 'vi', + Name.Variable.Magic: 'vm', + + Literal: 'l', + Literal.Date: 'ld', + + String: 's', + String.Affix: 'sa', + String.Backtick: 'sb', + String.Char: 'sc', + String.Delimiter: 'dl', + String.Doc: 'sd', + String.Double: 's2', + String.Escape: 'se', + String.Heredoc: 'sh', + String.Interpol: 'si', + String.Other: 'sx', + String.Regex: 'sr', + String.Single: 's1', + String.Symbol: 'ss', + + Number: 'm', + Number.Bin: 'mb', + Number.Float: 'mf', + Number.Hex: 'mh', + Number.Integer: 'mi', + Number.Integer.Long: 'il', + Number.Oct: 'mo', + + Operator: 'o', + Operator.Word: 'ow', + + Punctuation: 'p', + Punctuation.Marker: 'pm', + + Comment: 'c', + Comment.Hashbang: 'ch', + Comment.Multiline: 'cm', + Comment.Preproc: 'cp', + Comment.PreprocFile: 'cpf', + Comment.Single: 'c1', + Comment.Special: 'cs', + + Generic: 'g', + Generic.Deleted: 'gd', + Generic.Emph: 'ge', + Generic.Error: 'gr', + Generic.Heading: 'gh', + Generic.Inserted: 'gi', + Generic.Output: 'go', + Generic.Prompt: 'gp', + Generic.Strong: 'gs', + Generic.Subheading: 'gu', + Generic.EmphStrong: 'ges', + Generic.Traceback: 'gt', +} diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/unistring.py b/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/unistring.py new file mode 100644 index 0000000..e3bd2e7 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/unistring.py @@ -0,0 +1,153 @@ +""" + pygments.unistring + ~~~~~~~~~~~~~~~~~~ + + Strings of all Unicode characters of a certain category. + Used for matching in Unicode-aware languages. Run to regenerate. + + Inspired by chartypes_create.py from the MoinMoin project. + + :copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +Cc = '\x00-\x1f\x7f-\x9f' + +Cf = '\xad\u0600-\u0605\u061c\u06dd\u070f\u08e2\u180e\u200b-\u200f\u202a-\u202e\u2060-\u2064\u2066-\u206f\ufeff\ufff9-\ufffb\U000110bd\U000110cd\U0001bca0-\U0001bca3\U0001d173-\U0001d17a\U000e0001\U000e0020-\U000e007f' + +Cn = '\u0378-\u0379\u0380-\u0383\u038b\u038d\u03a2\u0530\u0557-\u0558\u058b-\u058c\u0590\u05c8-\u05cf\u05eb-\u05ee\u05f5-\u05ff\u061d\u070e\u074b-\u074c\u07b2-\u07bf\u07fb-\u07fc\u082e-\u082f\u083f\u085c-\u085d\u085f\u086b-\u089f\u08b5\u08be-\u08d2\u0984\u098d-\u098e\u0991-\u0992\u09a9\u09b1\u09b3-\u09b5\u09ba-\u09bb\u09c5-\u09c6\u09c9-\u09ca\u09cf-\u09d6\u09d8-\u09db\u09de\u09e4-\u09e5\u09ff-\u0a00\u0a04\u0a0b-\u0a0e\u0a11-\u0a12\u0a29\u0a31\u0a34\u0a37\u0a3a-\u0a3b\u0a3d\u0a43-\u0a46\u0a49-\u0a4a\u0a4e-\u0a50\u0a52-\u0a58\u0a5d\u0a5f-\u0a65\u0a77-\u0a80\u0a84\u0a8e\u0a92\u0aa9\u0ab1\u0ab4\u0aba-\u0abb\u0ac6\u0aca\u0ace-\u0acf\u0ad1-\u0adf\u0ae4-\u0ae5\u0af2-\u0af8\u0b00\u0b04\u0b0d-\u0b0e\u0b11-\u0b12\u0b29\u0b31\u0b34\u0b3a-\u0b3b\u0b45-\u0b46\u0b49-\u0b4a\u0b4e-\u0b55\u0b58-\u0b5b\u0b5e\u0b64-\u0b65\u0b78-\u0b81\u0b84\u0b8b-\u0b8d\u0b91\u0b96-\u0b98\u0b9b\u0b9d\u0ba0-\u0ba2\u0ba5-\u0ba7\u0bab-\u0bad\u0bba-\u0bbd\u0bc3-\u0bc5\u0bc9\u0bce-\u0bcf\u0bd1-\u0bd6\u0bd8-\u0be5\u0bfb-\u0bff\u0c0d\u0c11\u0c29\u0c3a-\u0c3c\u0c45\u0c49\u0c4e-\u0c54\u0c57\u0c5b-\u0c5f\u0c64-\u0c65\u0c70-\u0c77\u0c8d\u0c91\u0ca9\u0cb4\u0cba-\u0cbb\u0cc5\u0cc9\u0cce-\u0cd4\u0cd7-\u0cdd\u0cdf\u0ce4-\u0ce5\u0cf0\u0cf3-\u0cff\u0d04\u0d0d\u0d11\u0d45\u0d49\u0d50-\u0d53\u0d64-\u0d65\u0d80-\u0d81\u0d84\u0d97-\u0d99\u0db2\u0dbc\u0dbe-\u0dbf\u0dc7-\u0dc9\u0dcb-\u0dce\u0dd5\u0dd7\u0de0-\u0de5\u0df0-\u0df1\u0df5-\u0e00\u0e3b-\u0e3e\u0e5c-\u0e80\u0e83\u0e85-\u0e86\u0e89\u0e8b-\u0e8c\u0e8e-\u0e93\u0e98\u0ea0\u0ea4\u0ea6\u0ea8-\u0ea9\u0eac\u0eba\u0ebe-\u0ebf\u0ec5\u0ec7\u0ece-\u0ecf\u0eda-\u0edb\u0ee0-\u0eff\u0f48\u0f6d-\u0f70\u0f98\u0fbd\u0fcd\u0fdb-\u0fff\u10c6\u10c8-\u10cc\u10ce-\u10cf\u1249\u124e-\u124f\u1257\u1259\u125e-\u125f\u1289\u128e-\u128f\u12b1\u12b6-\u12b7\u12bf\u12c1\u12c6-\u12c7\u12d7\u1311\u1316-\u1317\u135b-\u135c\u137d-\u137f\u139a-\u139f\u13f6-\u13f7\u13fe-\u13ff\u169d-\u169f\u16f9-\u16ff\u170d\u1715-\u171f\u1737-\u173f\u1754-\u175f\u176d\u1771\u1774-\u177f\u17de-\u17df\u17ea-\u17ef\u17fa-\u17ff\u180f\u181a-\u181f\u1879-\u187f\u18ab-\u18af\u18f6-\u18ff\u191f\u192c-\u192f\u193c-\u193f\u1941-\u1943\u196e-\u196f\u1975-\u197f\u19ac-\u19af\u19ca-\u19cf\u19db-\u19dd\u1a1c-\u1a1d\u1a5f\u1a7d-\u1a7e\u1a8a-\u1a8f\u1a9a-\u1a9f\u1aae-\u1aaf\u1abf-\u1aff\u1b4c-\u1b4f\u1b7d-\u1b7f\u1bf4-\u1bfb\u1c38-\u1c3a\u1c4a-\u1c4c\u1c89-\u1c8f\u1cbb-\u1cbc\u1cc8-\u1ccf\u1cfa-\u1cff\u1dfa\u1f16-\u1f17\u1f1e-\u1f1f\u1f46-\u1f47\u1f4e-\u1f4f\u1f58\u1f5a\u1f5c\u1f5e\u1f7e-\u1f7f\u1fb5\u1fc5\u1fd4-\u1fd5\u1fdc\u1ff0-\u1ff1\u1ff5\u1fff\u2065\u2072-\u2073\u208f\u209d-\u209f\u20c0-\u20cf\u20f1-\u20ff\u218c-\u218f\u2427-\u243f\u244b-\u245f\u2b74-\u2b75\u2b96-\u2b97\u2bc9\u2bff\u2c2f\u2c5f\u2cf4-\u2cf8\u2d26\u2d28-\u2d2c\u2d2e-\u2d2f\u2d68-\u2d6e\u2d71-\u2d7e\u2d97-\u2d9f\u2da7\u2daf\u2db7\u2dbf\u2dc7\u2dcf\u2dd7\u2ddf\u2e4f-\u2e7f\u2e9a\u2ef4-\u2eff\u2fd6-\u2fef\u2ffc-\u2fff\u3040\u3097-\u3098\u3100-\u3104\u3130\u318f\u31bb-\u31bf\u31e4-\u31ef\u321f\u32ff\u4db6-\u4dbf\u9ff0-\u9fff\ua48d-\ua48f\ua4c7-\ua4cf\ua62c-\ua63f\ua6f8-\ua6ff\ua7ba-\ua7f6\ua82c-\ua82f\ua83a-\ua83f\ua878-\ua87f\ua8c6-\ua8cd\ua8da-\ua8df\ua954-\ua95e\ua97d-\ua97f\ua9ce\ua9da-\ua9dd\ua9ff\uaa37-\uaa3f\uaa4e-\uaa4f\uaa5a-\uaa5b\uaac3-\uaada\uaaf7-\uab00\uab07-\uab08\uab0f-\uab10\uab17-\uab1f\uab27\uab2f\uab66-\uab6f\uabee-\uabef\uabfa-\uabff\ud7a4-\ud7af\ud7c7-\ud7ca\ud7fc-\ud7ff\ufa6e-\ufa6f\ufada-\ufaff\ufb07-\ufb12\ufb18-\ufb1c\ufb37\ufb3d\ufb3f\ufb42\ufb45\ufbc2-\ufbd2\ufd40-\ufd4f\ufd90-\ufd91\ufdc8-\ufdef\ufdfe-\ufdff\ufe1a-\ufe1f\ufe53\ufe67\ufe6c-\ufe6f\ufe75\ufefd-\ufefe\uff00\uffbf-\uffc1\uffc8-\uffc9\uffd0-\uffd1\uffd8-\uffd9\uffdd-\uffdf\uffe7\uffef-\ufff8\ufffe-\uffff\U0001000c\U00010027\U0001003b\U0001003e\U0001004e-\U0001004f\U0001005e-\U0001007f\U000100fb-\U000100ff\U00010103-\U00010106\U00010134-\U00010136\U0001018f\U0001019c-\U0001019f\U000101a1-\U000101cf\U000101fe-\U0001027f\U0001029d-\U0001029f\U000102d1-\U000102df\U000102fc-\U000102ff\U00010324-\U0001032c\U0001034b-\U0001034f\U0001037b-\U0001037f\U0001039e\U000103c4-\U000103c7\U000103d6-\U000103ff\U0001049e-\U0001049f\U000104aa-\U000104af\U000104d4-\U000104d7\U000104fc-\U000104ff\U00010528-\U0001052f\U00010564-\U0001056e\U00010570-\U000105ff\U00010737-\U0001073f\U00010756-\U0001075f\U00010768-\U000107ff\U00010806-\U00010807\U00010809\U00010836\U00010839-\U0001083b\U0001083d-\U0001083e\U00010856\U0001089f-\U000108a6\U000108b0-\U000108df\U000108f3\U000108f6-\U000108fa\U0001091c-\U0001091e\U0001093a-\U0001093e\U00010940-\U0001097f\U000109b8-\U000109bb\U000109d0-\U000109d1\U00010a04\U00010a07-\U00010a0b\U00010a14\U00010a18\U00010a36-\U00010a37\U00010a3b-\U00010a3e\U00010a49-\U00010a4f\U00010a59-\U00010a5f\U00010aa0-\U00010abf\U00010ae7-\U00010aea\U00010af7-\U00010aff\U00010b36-\U00010b38\U00010b56-\U00010b57\U00010b73-\U00010b77\U00010b92-\U00010b98\U00010b9d-\U00010ba8\U00010bb0-\U00010bff\U00010c49-\U00010c7f\U00010cb3-\U00010cbf\U00010cf3-\U00010cf9\U00010d28-\U00010d2f\U00010d3a-\U00010e5f\U00010e7f-\U00010eff\U00010f28-\U00010f2f\U00010f5a-\U00010fff\U0001104e-\U00011051\U00011070-\U0001107e\U000110c2-\U000110cc\U000110ce-\U000110cf\U000110e9-\U000110ef\U000110fa-\U000110ff\U00011135\U00011147-\U0001114f\U00011177-\U0001117f\U000111ce-\U000111cf\U000111e0\U000111f5-\U000111ff\U00011212\U0001123f-\U0001127f\U00011287\U00011289\U0001128e\U0001129e\U000112aa-\U000112af\U000112eb-\U000112ef\U000112fa-\U000112ff\U00011304\U0001130d-\U0001130e\U00011311-\U00011312\U00011329\U00011331\U00011334\U0001133a\U00011345-\U00011346\U00011349-\U0001134a\U0001134e-\U0001134f\U00011351-\U00011356\U00011358-\U0001135c\U00011364-\U00011365\U0001136d-\U0001136f\U00011375-\U000113ff\U0001145a\U0001145c\U0001145f-\U0001147f\U000114c8-\U000114cf\U000114da-\U0001157f\U000115b6-\U000115b7\U000115de-\U000115ff\U00011645-\U0001164f\U0001165a-\U0001165f\U0001166d-\U0001167f\U000116b8-\U000116bf\U000116ca-\U000116ff\U0001171b-\U0001171c\U0001172c-\U0001172f\U00011740-\U000117ff\U0001183c-\U0001189f\U000118f3-\U000118fe\U00011900-\U000119ff\U00011a48-\U00011a4f\U00011a84-\U00011a85\U00011aa3-\U00011abf\U00011af9-\U00011bff\U00011c09\U00011c37\U00011c46-\U00011c4f\U00011c6d-\U00011c6f\U00011c90-\U00011c91\U00011ca8\U00011cb7-\U00011cff\U00011d07\U00011d0a\U00011d37-\U00011d39\U00011d3b\U00011d3e\U00011d48-\U00011d4f\U00011d5a-\U00011d5f\U00011d66\U00011d69\U00011d8f\U00011d92\U00011d99-\U00011d9f\U00011daa-\U00011edf\U00011ef9-\U00011fff\U0001239a-\U000123ff\U0001246f\U00012475-\U0001247f\U00012544-\U00012fff\U0001342f-\U000143ff\U00014647-\U000167ff\U00016a39-\U00016a3f\U00016a5f\U00016a6a-\U00016a6d\U00016a70-\U00016acf\U00016aee-\U00016aef\U00016af6-\U00016aff\U00016b46-\U00016b4f\U00016b5a\U00016b62\U00016b78-\U00016b7c\U00016b90-\U00016e3f\U00016e9b-\U00016eff\U00016f45-\U00016f4f\U00016f7f-\U00016f8e\U00016fa0-\U00016fdf\U00016fe2-\U00016fff\U000187f2-\U000187ff\U00018af3-\U0001afff\U0001b11f-\U0001b16f\U0001b2fc-\U0001bbff\U0001bc6b-\U0001bc6f\U0001bc7d-\U0001bc7f\U0001bc89-\U0001bc8f\U0001bc9a-\U0001bc9b\U0001bca4-\U0001cfff\U0001d0f6-\U0001d0ff\U0001d127-\U0001d128\U0001d1e9-\U0001d1ff\U0001d246-\U0001d2df\U0001d2f4-\U0001d2ff\U0001d357-\U0001d35f\U0001d379-\U0001d3ff\U0001d455\U0001d49d\U0001d4a0-\U0001d4a1\U0001d4a3-\U0001d4a4\U0001d4a7-\U0001d4a8\U0001d4ad\U0001d4ba\U0001d4bc\U0001d4c4\U0001d506\U0001d50b-\U0001d50c\U0001d515\U0001d51d\U0001d53a\U0001d53f\U0001d545\U0001d547-\U0001d549\U0001d551\U0001d6a6-\U0001d6a7\U0001d7cc-\U0001d7cd\U0001da8c-\U0001da9a\U0001daa0\U0001dab0-\U0001dfff\U0001e007\U0001e019-\U0001e01a\U0001e022\U0001e025\U0001e02b-\U0001e7ff\U0001e8c5-\U0001e8c6\U0001e8d7-\U0001e8ff\U0001e94b-\U0001e94f\U0001e95a-\U0001e95d\U0001e960-\U0001ec70\U0001ecb5-\U0001edff\U0001ee04\U0001ee20\U0001ee23\U0001ee25-\U0001ee26\U0001ee28\U0001ee33\U0001ee38\U0001ee3a\U0001ee3c-\U0001ee41\U0001ee43-\U0001ee46\U0001ee48\U0001ee4a\U0001ee4c\U0001ee50\U0001ee53\U0001ee55-\U0001ee56\U0001ee58\U0001ee5a\U0001ee5c\U0001ee5e\U0001ee60\U0001ee63\U0001ee65-\U0001ee66\U0001ee6b\U0001ee73\U0001ee78\U0001ee7d\U0001ee7f\U0001ee8a\U0001ee9c-\U0001eea0\U0001eea4\U0001eeaa\U0001eebc-\U0001eeef\U0001eef2-\U0001efff\U0001f02c-\U0001f02f\U0001f094-\U0001f09f\U0001f0af-\U0001f0b0\U0001f0c0\U0001f0d0\U0001f0f6-\U0001f0ff\U0001f10d-\U0001f10f\U0001f16c-\U0001f16f\U0001f1ad-\U0001f1e5\U0001f203-\U0001f20f\U0001f23c-\U0001f23f\U0001f249-\U0001f24f\U0001f252-\U0001f25f\U0001f266-\U0001f2ff\U0001f6d5-\U0001f6df\U0001f6ed-\U0001f6ef\U0001f6fa-\U0001f6ff\U0001f774-\U0001f77f\U0001f7d9-\U0001f7ff\U0001f80c-\U0001f80f\U0001f848-\U0001f84f\U0001f85a-\U0001f85f\U0001f888-\U0001f88f\U0001f8ae-\U0001f8ff\U0001f90c-\U0001f90f\U0001f93f\U0001f971-\U0001f972\U0001f977-\U0001f979\U0001f97b\U0001f9a3-\U0001f9af\U0001f9ba-\U0001f9bf\U0001f9c3-\U0001f9cf\U0001fa00-\U0001fa5f\U0001fa6e-\U0001ffff\U0002a6d7-\U0002a6ff\U0002b735-\U0002b73f\U0002b81e-\U0002b81f\U0002cea2-\U0002ceaf\U0002ebe1-\U0002f7ff\U0002fa1e-\U000e0000\U000e0002-\U000e001f\U000e0080-\U000e00ff\U000e01f0-\U000effff\U000ffffe-\U000fffff\U0010fffe-\U0010ffff' + +Co = '\ue000-\uf8ff\U000f0000-\U000ffffd\U00100000-\U0010fffd' + +Cs = '\ud800-\udbff\\\udc00\udc01-\udfff' + +Ll = 'a-z\xb5\xdf-\xf6\xf8-\xff\u0101\u0103\u0105\u0107\u0109\u010b\u010d\u010f\u0111\u0113\u0115\u0117\u0119\u011b\u011d\u011f\u0121\u0123\u0125\u0127\u0129\u012b\u012d\u012f\u0131\u0133\u0135\u0137-\u0138\u013a\u013c\u013e\u0140\u0142\u0144\u0146\u0148-\u0149\u014b\u014d\u014f\u0151\u0153\u0155\u0157\u0159\u015b\u015d\u015f\u0161\u0163\u0165\u0167\u0169\u016b\u016d\u016f\u0171\u0173\u0175\u0177\u017a\u017c\u017e-\u0180\u0183\u0185\u0188\u018c-\u018d\u0192\u0195\u0199-\u019b\u019e\u01a1\u01a3\u01a5\u01a8\u01aa-\u01ab\u01ad\u01b0\u01b4\u01b6\u01b9-\u01ba\u01bd-\u01bf\u01c6\u01c9\u01cc\u01ce\u01d0\u01d2\u01d4\u01d6\u01d8\u01da\u01dc-\u01dd\u01df\u01e1\u01e3\u01e5\u01e7\u01e9\u01eb\u01ed\u01ef-\u01f0\u01f3\u01f5\u01f9\u01fb\u01fd\u01ff\u0201\u0203\u0205\u0207\u0209\u020b\u020d\u020f\u0211\u0213\u0215\u0217\u0219\u021b\u021d\u021f\u0221\u0223\u0225\u0227\u0229\u022b\u022d\u022f\u0231\u0233-\u0239\u023c\u023f-\u0240\u0242\u0247\u0249\u024b\u024d\u024f-\u0293\u0295-\u02af\u0371\u0373\u0377\u037b-\u037d\u0390\u03ac-\u03ce\u03d0-\u03d1\u03d5-\u03d7\u03d9\u03db\u03dd\u03df\u03e1\u03e3\u03e5\u03e7\u03e9\u03eb\u03ed\u03ef-\u03f3\u03f5\u03f8\u03fb-\u03fc\u0430-\u045f\u0461\u0463\u0465\u0467\u0469\u046b\u046d\u046f\u0471\u0473\u0475\u0477\u0479\u047b\u047d\u047f\u0481\u048b\u048d\u048f\u0491\u0493\u0495\u0497\u0499\u049b\u049d\u049f\u04a1\u04a3\u04a5\u04a7\u04a9\u04ab\u04ad\u04af\u04b1\u04b3\u04b5\u04b7\u04b9\u04bb\u04bd\u04bf\u04c2\u04c4\u04c6\u04c8\u04ca\u04cc\u04ce-\u04cf\u04d1\u04d3\u04d5\u04d7\u04d9\u04db\u04dd\u04df\u04e1\u04e3\u04e5\u04e7\u04e9\u04eb\u04ed\u04ef\u04f1\u04f3\u04f5\u04f7\u04f9\u04fb\u04fd\u04ff\u0501\u0503\u0505\u0507\u0509\u050b\u050d\u050f\u0511\u0513\u0515\u0517\u0519\u051b\u051d\u051f\u0521\u0523\u0525\u0527\u0529\u052b\u052d\u052f\u0560-\u0588\u10d0-\u10fa\u10fd-\u10ff\u13f8-\u13fd\u1c80-\u1c88\u1d00-\u1d2b\u1d6b-\u1d77\u1d79-\u1d9a\u1e01\u1e03\u1e05\u1e07\u1e09\u1e0b\u1e0d\u1e0f\u1e11\u1e13\u1e15\u1e17\u1e19\u1e1b\u1e1d\u1e1f\u1e21\u1e23\u1e25\u1e27\u1e29\u1e2b\u1e2d\u1e2f\u1e31\u1e33\u1e35\u1e37\u1e39\u1e3b\u1e3d\u1e3f\u1e41\u1e43\u1e45\u1e47\u1e49\u1e4b\u1e4d\u1e4f\u1e51\u1e53\u1e55\u1e57\u1e59\u1e5b\u1e5d\u1e5f\u1e61\u1e63\u1e65\u1e67\u1e69\u1e6b\u1e6d\u1e6f\u1e71\u1e73\u1e75\u1e77\u1e79\u1e7b\u1e7d\u1e7f\u1e81\u1e83\u1e85\u1e87\u1e89\u1e8b\u1e8d\u1e8f\u1e91\u1e93\u1e95-\u1e9d\u1e9f\u1ea1\u1ea3\u1ea5\u1ea7\u1ea9\u1eab\u1ead\u1eaf\u1eb1\u1eb3\u1eb5\u1eb7\u1eb9\u1ebb\u1ebd\u1ebf\u1ec1\u1ec3\u1ec5\u1ec7\u1ec9\u1ecb\u1ecd\u1ecf\u1ed1\u1ed3\u1ed5\u1ed7\u1ed9\u1edb\u1edd\u1edf\u1ee1\u1ee3\u1ee5\u1ee7\u1ee9\u1eeb\u1eed\u1eef\u1ef1\u1ef3\u1ef5\u1ef7\u1ef9\u1efb\u1efd\u1eff-\u1f07\u1f10-\u1f15\u1f20-\u1f27\u1f30-\u1f37\u1f40-\u1f45\u1f50-\u1f57\u1f60-\u1f67\u1f70-\u1f7d\u1f80-\u1f87\u1f90-\u1f97\u1fa0-\u1fa7\u1fb0-\u1fb4\u1fb6-\u1fb7\u1fbe\u1fc2-\u1fc4\u1fc6-\u1fc7\u1fd0-\u1fd3\u1fd6-\u1fd7\u1fe0-\u1fe7\u1ff2-\u1ff4\u1ff6-\u1ff7\u210a\u210e-\u210f\u2113\u212f\u2134\u2139\u213c-\u213d\u2146-\u2149\u214e\u2184\u2c30-\u2c5e\u2c61\u2c65-\u2c66\u2c68\u2c6a\u2c6c\u2c71\u2c73-\u2c74\u2c76-\u2c7b\u2c81\u2c83\u2c85\u2c87\u2c89\u2c8b\u2c8d\u2c8f\u2c91\u2c93\u2c95\u2c97\u2c99\u2c9b\u2c9d\u2c9f\u2ca1\u2ca3\u2ca5\u2ca7\u2ca9\u2cab\u2cad\u2caf\u2cb1\u2cb3\u2cb5\u2cb7\u2cb9\u2cbb\u2cbd\u2cbf\u2cc1\u2cc3\u2cc5\u2cc7\u2cc9\u2ccb\u2ccd\u2ccf\u2cd1\u2cd3\u2cd5\u2cd7\u2cd9\u2cdb\u2cdd\u2cdf\u2ce1\u2ce3-\u2ce4\u2cec\u2cee\u2cf3\u2d00-\u2d25\u2d27\u2d2d\ua641\ua643\ua645\ua647\ua649\ua64b\ua64d\ua64f\ua651\ua653\ua655\ua657\ua659\ua65b\ua65d\ua65f\ua661\ua663\ua665\ua667\ua669\ua66b\ua66d\ua681\ua683\ua685\ua687\ua689\ua68b\ua68d\ua68f\ua691\ua693\ua695\ua697\ua699\ua69b\ua723\ua725\ua727\ua729\ua72b\ua72d\ua72f-\ua731\ua733\ua735\ua737\ua739\ua73b\ua73d\ua73f\ua741\ua743\ua745\ua747\ua749\ua74b\ua74d\ua74f\ua751\ua753\ua755\ua757\ua759\ua75b\ua75d\ua75f\ua761\ua763\ua765\ua767\ua769\ua76b\ua76d\ua76f\ua771-\ua778\ua77a\ua77c\ua77f\ua781\ua783\ua785\ua787\ua78c\ua78e\ua791\ua793-\ua795\ua797\ua799\ua79b\ua79d\ua79f\ua7a1\ua7a3\ua7a5\ua7a7\ua7a9\ua7af\ua7b5\ua7b7\ua7b9\ua7fa\uab30-\uab5a\uab60-\uab65\uab70-\uabbf\ufb00-\ufb06\ufb13-\ufb17\uff41-\uff5a\U00010428-\U0001044f\U000104d8-\U000104fb\U00010cc0-\U00010cf2\U000118c0-\U000118df\U00016e60-\U00016e7f\U0001d41a-\U0001d433\U0001d44e-\U0001d454\U0001d456-\U0001d467\U0001d482-\U0001d49b\U0001d4b6-\U0001d4b9\U0001d4bb\U0001d4bd-\U0001d4c3\U0001d4c5-\U0001d4cf\U0001d4ea-\U0001d503\U0001d51e-\U0001d537\U0001d552-\U0001d56b\U0001d586-\U0001d59f\U0001d5ba-\U0001d5d3\U0001d5ee-\U0001d607\U0001d622-\U0001d63b\U0001d656-\U0001d66f\U0001d68a-\U0001d6a5\U0001d6c2-\U0001d6da\U0001d6dc-\U0001d6e1\U0001d6fc-\U0001d714\U0001d716-\U0001d71b\U0001d736-\U0001d74e\U0001d750-\U0001d755\U0001d770-\U0001d788\U0001d78a-\U0001d78f\U0001d7aa-\U0001d7c2\U0001d7c4-\U0001d7c9\U0001d7cb\U0001e922-\U0001e943' + +Lm = '\u02b0-\u02c1\u02c6-\u02d1\u02e0-\u02e4\u02ec\u02ee\u0374\u037a\u0559\u0640\u06e5-\u06e6\u07f4-\u07f5\u07fa\u081a\u0824\u0828\u0971\u0e46\u0ec6\u10fc\u17d7\u1843\u1aa7\u1c78-\u1c7d\u1d2c-\u1d6a\u1d78\u1d9b-\u1dbf\u2071\u207f\u2090-\u209c\u2c7c-\u2c7d\u2d6f\u2e2f\u3005\u3031-\u3035\u303b\u309d-\u309e\u30fc-\u30fe\ua015\ua4f8-\ua4fd\ua60c\ua67f\ua69c-\ua69d\ua717-\ua71f\ua770\ua788\ua7f8-\ua7f9\ua9cf\ua9e6\uaa70\uaadd\uaaf3-\uaaf4\uab5c-\uab5f\uff70\uff9e-\uff9f\U00016b40-\U00016b43\U00016f93-\U00016f9f\U00016fe0-\U00016fe1' + +Lo = '\xaa\xba\u01bb\u01c0-\u01c3\u0294\u05d0-\u05ea\u05ef-\u05f2\u0620-\u063f\u0641-\u064a\u066e-\u066f\u0671-\u06d3\u06d5\u06ee-\u06ef\u06fa-\u06fc\u06ff\u0710\u0712-\u072f\u074d-\u07a5\u07b1\u07ca-\u07ea\u0800-\u0815\u0840-\u0858\u0860-\u086a\u08a0-\u08b4\u08b6-\u08bd\u0904-\u0939\u093d\u0950\u0958-\u0961\u0972-\u0980\u0985-\u098c\u098f-\u0990\u0993-\u09a8\u09aa-\u09b0\u09b2\u09b6-\u09b9\u09bd\u09ce\u09dc-\u09dd\u09df-\u09e1\u09f0-\u09f1\u09fc\u0a05-\u0a0a\u0a0f-\u0a10\u0a13-\u0a28\u0a2a-\u0a30\u0a32-\u0a33\u0a35-\u0a36\u0a38-\u0a39\u0a59-\u0a5c\u0a5e\u0a72-\u0a74\u0a85-\u0a8d\u0a8f-\u0a91\u0a93-\u0aa8\u0aaa-\u0ab0\u0ab2-\u0ab3\u0ab5-\u0ab9\u0abd\u0ad0\u0ae0-\u0ae1\u0af9\u0b05-\u0b0c\u0b0f-\u0b10\u0b13-\u0b28\u0b2a-\u0b30\u0b32-\u0b33\u0b35-\u0b39\u0b3d\u0b5c-\u0b5d\u0b5f-\u0b61\u0b71\u0b83\u0b85-\u0b8a\u0b8e-\u0b90\u0b92-\u0b95\u0b99-\u0b9a\u0b9c\u0b9e-\u0b9f\u0ba3-\u0ba4\u0ba8-\u0baa\u0bae-\u0bb9\u0bd0\u0c05-\u0c0c\u0c0e-\u0c10\u0c12-\u0c28\u0c2a-\u0c39\u0c3d\u0c58-\u0c5a\u0c60-\u0c61\u0c80\u0c85-\u0c8c\u0c8e-\u0c90\u0c92-\u0ca8\u0caa-\u0cb3\u0cb5-\u0cb9\u0cbd\u0cde\u0ce0-\u0ce1\u0cf1-\u0cf2\u0d05-\u0d0c\u0d0e-\u0d10\u0d12-\u0d3a\u0d3d\u0d4e\u0d54-\u0d56\u0d5f-\u0d61\u0d7a-\u0d7f\u0d85-\u0d96\u0d9a-\u0db1\u0db3-\u0dbb\u0dbd\u0dc0-\u0dc6\u0e01-\u0e30\u0e32-\u0e33\u0e40-\u0e45\u0e81-\u0e82\u0e84\u0e87-\u0e88\u0e8a\u0e8d\u0e94-\u0e97\u0e99-\u0e9f\u0ea1-\u0ea3\u0ea5\u0ea7\u0eaa-\u0eab\u0ead-\u0eb0\u0eb2-\u0eb3\u0ebd\u0ec0-\u0ec4\u0edc-\u0edf\u0f00\u0f40-\u0f47\u0f49-\u0f6c\u0f88-\u0f8c\u1000-\u102a\u103f\u1050-\u1055\u105a-\u105d\u1061\u1065-\u1066\u106e-\u1070\u1075-\u1081\u108e\u1100-\u1248\u124a-\u124d\u1250-\u1256\u1258\u125a-\u125d\u1260-\u1288\u128a-\u128d\u1290-\u12b0\u12b2-\u12b5\u12b8-\u12be\u12c0\u12c2-\u12c5\u12c8-\u12d6\u12d8-\u1310\u1312-\u1315\u1318-\u135a\u1380-\u138f\u1401-\u166c\u166f-\u167f\u1681-\u169a\u16a0-\u16ea\u16f1-\u16f8\u1700-\u170c\u170e-\u1711\u1720-\u1731\u1740-\u1751\u1760-\u176c\u176e-\u1770\u1780-\u17b3\u17dc\u1820-\u1842\u1844-\u1878\u1880-\u1884\u1887-\u18a8\u18aa\u18b0-\u18f5\u1900-\u191e\u1950-\u196d\u1970-\u1974\u1980-\u19ab\u19b0-\u19c9\u1a00-\u1a16\u1a20-\u1a54\u1b05-\u1b33\u1b45-\u1b4b\u1b83-\u1ba0\u1bae-\u1baf\u1bba-\u1be5\u1c00-\u1c23\u1c4d-\u1c4f\u1c5a-\u1c77\u1ce9-\u1cec\u1cee-\u1cf1\u1cf5-\u1cf6\u2135-\u2138\u2d30-\u2d67\u2d80-\u2d96\u2da0-\u2da6\u2da8-\u2dae\u2db0-\u2db6\u2db8-\u2dbe\u2dc0-\u2dc6\u2dc8-\u2dce\u2dd0-\u2dd6\u2dd8-\u2dde\u3006\u303c\u3041-\u3096\u309f\u30a1-\u30fa\u30ff\u3105-\u312f\u3131-\u318e\u31a0-\u31ba\u31f0-\u31ff\u3400-\u4db5\u4e00-\u9fef\ua000-\ua014\ua016-\ua48c\ua4d0-\ua4f7\ua500-\ua60b\ua610-\ua61f\ua62a-\ua62b\ua66e\ua6a0-\ua6e5\ua78f\ua7f7\ua7fb-\ua801\ua803-\ua805\ua807-\ua80a\ua80c-\ua822\ua840-\ua873\ua882-\ua8b3\ua8f2-\ua8f7\ua8fb\ua8fd-\ua8fe\ua90a-\ua925\ua930-\ua946\ua960-\ua97c\ua984-\ua9b2\ua9e0-\ua9e4\ua9e7-\ua9ef\ua9fa-\ua9fe\uaa00-\uaa28\uaa40-\uaa42\uaa44-\uaa4b\uaa60-\uaa6f\uaa71-\uaa76\uaa7a\uaa7e-\uaaaf\uaab1\uaab5-\uaab6\uaab9-\uaabd\uaac0\uaac2\uaadb-\uaadc\uaae0-\uaaea\uaaf2\uab01-\uab06\uab09-\uab0e\uab11-\uab16\uab20-\uab26\uab28-\uab2e\uabc0-\uabe2\uac00-\ud7a3\ud7b0-\ud7c6\ud7cb-\ud7fb\uf900-\ufa6d\ufa70-\ufad9\ufb1d\ufb1f-\ufb28\ufb2a-\ufb36\ufb38-\ufb3c\ufb3e\ufb40-\ufb41\ufb43-\ufb44\ufb46-\ufbb1\ufbd3-\ufd3d\ufd50-\ufd8f\ufd92-\ufdc7\ufdf0-\ufdfb\ufe70-\ufe74\ufe76-\ufefc\uff66-\uff6f\uff71-\uff9d\uffa0-\uffbe\uffc2-\uffc7\uffca-\uffcf\uffd2-\uffd7\uffda-\uffdc\U00010000-\U0001000b\U0001000d-\U00010026\U00010028-\U0001003a\U0001003c-\U0001003d\U0001003f-\U0001004d\U00010050-\U0001005d\U00010080-\U000100fa\U00010280-\U0001029c\U000102a0-\U000102d0\U00010300-\U0001031f\U0001032d-\U00010340\U00010342-\U00010349\U00010350-\U00010375\U00010380-\U0001039d\U000103a0-\U000103c3\U000103c8-\U000103cf\U00010450-\U0001049d\U00010500-\U00010527\U00010530-\U00010563\U00010600-\U00010736\U00010740-\U00010755\U00010760-\U00010767\U00010800-\U00010805\U00010808\U0001080a-\U00010835\U00010837-\U00010838\U0001083c\U0001083f-\U00010855\U00010860-\U00010876\U00010880-\U0001089e\U000108e0-\U000108f2\U000108f4-\U000108f5\U00010900-\U00010915\U00010920-\U00010939\U00010980-\U000109b7\U000109be-\U000109bf\U00010a00\U00010a10-\U00010a13\U00010a15-\U00010a17\U00010a19-\U00010a35\U00010a60-\U00010a7c\U00010a80-\U00010a9c\U00010ac0-\U00010ac7\U00010ac9-\U00010ae4\U00010b00-\U00010b35\U00010b40-\U00010b55\U00010b60-\U00010b72\U00010b80-\U00010b91\U00010c00-\U00010c48\U00010d00-\U00010d23\U00010f00-\U00010f1c\U00010f27\U00010f30-\U00010f45\U00011003-\U00011037\U00011083-\U000110af\U000110d0-\U000110e8\U00011103-\U00011126\U00011144\U00011150-\U00011172\U00011176\U00011183-\U000111b2\U000111c1-\U000111c4\U000111da\U000111dc\U00011200-\U00011211\U00011213-\U0001122b\U00011280-\U00011286\U00011288\U0001128a-\U0001128d\U0001128f-\U0001129d\U0001129f-\U000112a8\U000112b0-\U000112de\U00011305-\U0001130c\U0001130f-\U00011310\U00011313-\U00011328\U0001132a-\U00011330\U00011332-\U00011333\U00011335-\U00011339\U0001133d\U00011350\U0001135d-\U00011361\U00011400-\U00011434\U00011447-\U0001144a\U00011480-\U000114af\U000114c4-\U000114c5\U000114c7\U00011580-\U000115ae\U000115d8-\U000115db\U00011600-\U0001162f\U00011644\U00011680-\U000116aa\U00011700-\U0001171a\U00011800-\U0001182b\U000118ff\U00011a00\U00011a0b-\U00011a32\U00011a3a\U00011a50\U00011a5c-\U00011a83\U00011a86-\U00011a89\U00011a9d\U00011ac0-\U00011af8\U00011c00-\U00011c08\U00011c0a-\U00011c2e\U00011c40\U00011c72-\U00011c8f\U00011d00-\U00011d06\U00011d08-\U00011d09\U00011d0b-\U00011d30\U00011d46\U00011d60-\U00011d65\U00011d67-\U00011d68\U00011d6a-\U00011d89\U00011d98\U00011ee0-\U00011ef2\U00012000-\U00012399\U00012480-\U00012543\U00013000-\U0001342e\U00014400-\U00014646\U00016800-\U00016a38\U00016a40-\U00016a5e\U00016ad0-\U00016aed\U00016b00-\U00016b2f\U00016b63-\U00016b77\U00016b7d-\U00016b8f\U00016f00-\U00016f44\U00016f50\U00017000-\U000187f1\U00018800-\U00018af2\U0001b000-\U0001b11e\U0001b170-\U0001b2fb\U0001bc00-\U0001bc6a\U0001bc70-\U0001bc7c\U0001bc80-\U0001bc88\U0001bc90-\U0001bc99\U0001e800-\U0001e8c4\U0001ee00-\U0001ee03\U0001ee05-\U0001ee1f\U0001ee21-\U0001ee22\U0001ee24\U0001ee27\U0001ee29-\U0001ee32\U0001ee34-\U0001ee37\U0001ee39\U0001ee3b\U0001ee42\U0001ee47\U0001ee49\U0001ee4b\U0001ee4d-\U0001ee4f\U0001ee51-\U0001ee52\U0001ee54\U0001ee57\U0001ee59\U0001ee5b\U0001ee5d\U0001ee5f\U0001ee61-\U0001ee62\U0001ee64\U0001ee67-\U0001ee6a\U0001ee6c-\U0001ee72\U0001ee74-\U0001ee77\U0001ee79-\U0001ee7c\U0001ee7e\U0001ee80-\U0001ee89\U0001ee8b-\U0001ee9b\U0001eea1-\U0001eea3\U0001eea5-\U0001eea9\U0001eeab-\U0001eebb\U00020000-\U0002a6d6\U0002a700-\U0002b734\U0002b740-\U0002b81d\U0002b820-\U0002cea1\U0002ceb0-\U0002ebe0\U0002f800-\U0002fa1d' + +Lt = '\u01c5\u01c8\u01cb\u01f2\u1f88-\u1f8f\u1f98-\u1f9f\u1fa8-\u1faf\u1fbc\u1fcc\u1ffc' + +Lu = 'A-Z\xc0-\xd6\xd8-\xde\u0100\u0102\u0104\u0106\u0108\u010a\u010c\u010e\u0110\u0112\u0114\u0116\u0118\u011a\u011c\u011e\u0120\u0122\u0124\u0126\u0128\u012a\u012c\u012e\u0130\u0132\u0134\u0136\u0139\u013b\u013d\u013f\u0141\u0143\u0145\u0147\u014a\u014c\u014e\u0150\u0152\u0154\u0156\u0158\u015a\u015c\u015e\u0160\u0162\u0164\u0166\u0168\u016a\u016c\u016e\u0170\u0172\u0174\u0176\u0178-\u0179\u017b\u017d\u0181-\u0182\u0184\u0186-\u0187\u0189-\u018b\u018e-\u0191\u0193-\u0194\u0196-\u0198\u019c-\u019d\u019f-\u01a0\u01a2\u01a4\u01a6-\u01a7\u01a9\u01ac\u01ae-\u01af\u01b1-\u01b3\u01b5\u01b7-\u01b8\u01bc\u01c4\u01c7\u01ca\u01cd\u01cf\u01d1\u01d3\u01d5\u01d7\u01d9\u01db\u01de\u01e0\u01e2\u01e4\u01e6\u01e8\u01ea\u01ec\u01ee\u01f1\u01f4\u01f6-\u01f8\u01fa\u01fc\u01fe\u0200\u0202\u0204\u0206\u0208\u020a\u020c\u020e\u0210\u0212\u0214\u0216\u0218\u021a\u021c\u021e\u0220\u0222\u0224\u0226\u0228\u022a\u022c\u022e\u0230\u0232\u023a-\u023b\u023d-\u023e\u0241\u0243-\u0246\u0248\u024a\u024c\u024e\u0370\u0372\u0376\u037f\u0386\u0388-\u038a\u038c\u038e-\u038f\u0391-\u03a1\u03a3-\u03ab\u03cf\u03d2-\u03d4\u03d8\u03da\u03dc\u03de\u03e0\u03e2\u03e4\u03e6\u03e8\u03ea\u03ec\u03ee\u03f4\u03f7\u03f9-\u03fa\u03fd-\u042f\u0460\u0462\u0464\u0466\u0468\u046a\u046c\u046e\u0470\u0472\u0474\u0476\u0478\u047a\u047c\u047e\u0480\u048a\u048c\u048e\u0490\u0492\u0494\u0496\u0498\u049a\u049c\u049e\u04a0\u04a2\u04a4\u04a6\u04a8\u04aa\u04ac\u04ae\u04b0\u04b2\u04b4\u04b6\u04b8\u04ba\u04bc\u04be\u04c0-\u04c1\u04c3\u04c5\u04c7\u04c9\u04cb\u04cd\u04d0\u04d2\u04d4\u04d6\u04d8\u04da\u04dc\u04de\u04e0\u04e2\u04e4\u04e6\u04e8\u04ea\u04ec\u04ee\u04f0\u04f2\u04f4\u04f6\u04f8\u04fa\u04fc\u04fe\u0500\u0502\u0504\u0506\u0508\u050a\u050c\u050e\u0510\u0512\u0514\u0516\u0518\u051a\u051c\u051e\u0520\u0522\u0524\u0526\u0528\u052a\u052c\u052e\u0531-\u0556\u10a0-\u10c5\u10c7\u10cd\u13a0-\u13f5\u1c90-\u1cba\u1cbd-\u1cbf\u1e00\u1e02\u1e04\u1e06\u1e08\u1e0a\u1e0c\u1e0e\u1e10\u1e12\u1e14\u1e16\u1e18\u1e1a\u1e1c\u1e1e\u1e20\u1e22\u1e24\u1e26\u1e28\u1e2a\u1e2c\u1e2e\u1e30\u1e32\u1e34\u1e36\u1e38\u1e3a\u1e3c\u1e3e\u1e40\u1e42\u1e44\u1e46\u1e48\u1e4a\u1e4c\u1e4e\u1e50\u1e52\u1e54\u1e56\u1e58\u1e5a\u1e5c\u1e5e\u1e60\u1e62\u1e64\u1e66\u1e68\u1e6a\u1e6c\u1e6e\u1e70\u1e72\u1e74\u1e76\u1e78\u1e7a\u1e7c\u1e7e\u1e80\u1e82\u1e84\u1e86\u1e88\u1e8a\u1e8c\u1e8e\u1e90\u1e92\u1e94\u1e9e\u1ea0\u1ea2\u1ea4\u1ea6\u1ea8\u1eaa\u1eac\u1eae\u1eb0\u1eb2\u1eb4\u1eb6\u1eb8\u1eba\u1ebc\u1ebe\u1ec0\u1ec2\u1ec4\u1ec6\u1ec8\u1eca\u1ecc\u1ece\u1ed0\u1ed2\u1ed4\u1ed6\u1ed8\u1eda\u1edc\u1ede\u1ee0\u1ee2\u1ee4\u1ee6\u1ee8\u1eea\u1eec\u1eee\u1ef0\u1ef2\u1ef4\u1ef6\u1ef8\u1efa\u1efc\u1efe\u1f08-\u1f0f\u1f18-\u1f1d\u1f28-\u1f2f\u1f38-\u1f3f\u1f48-\u1f4d\u1f59\u1f5b\u1f5d\u1f5f\u1f68-\u1f6f\u1fb8-\u1fbb\u1fc8-\u1fcb\u1fd8-\u1fdb\u1fe8-\u1fec\u1ff8-\u1ffb\u2102\u2107\u210b-\u210d\u2110-\u2112\u2115\u2119-\u211d\u2124\u2126\u2128\u212a-\u212d\u2130-\u2133\u213e-\u213f\u2145\u2183\u2c00-\u2c2e\u2c60\u2c62-\u2c64\u2c67\u2c69\u2c6b\u2c6d-\u2c70\u2c72\u2c75\u2c7e-\u2c80\u2c82\u2c84\u2c86\u2c88\u2c8a\u2c8c\u2c8e\u2c90\u2c92\u2c94\u2c96\u2c98\u2c9a\u2c9c\u2c9e\u2ca0\u2ca2\u2ca4\u2ca6\u2ca8\u2caa\u2cac\u2cae\u2cb0\u2cb2\u2cb4\u2cb6\u2cb8\u2cba\u2cbc\u2cbe\u2cc0\u2cc2\u2cc4\u2cc6\u2cc8\u2cca\u2ccc\u2cce\u2cd0\u2cd2\u2cd4\u2cd6\u2cd8\u2cda\u2cdc\u2cde\u2ce0\u2ce2\u2ceb\u2ced\u2cf2\ua640\ua642\ua644\ua646\ua648\ua64a\ua64c\ua64e\ua650\ua652\ua654\ua656\ua658\ua65a\ua65c\ua65e\ua660\ua662\ua664\ua666\ua668\ua66a\ua66c\ua680\ua682\ua684\ua686\ua688\ua68a\ua68c\ua68e\ua690\ua692\ua694\ua696\ua698\ua69a\ua722\ua724\ua726\ua728\ua72a\ua72c\ua72e\ua732\ua734\ua736\ua738\ua73a\ua73c\ua73e\ua740\ua742\ua744\ua746\ua748\ua74a\ua74c\ua74e\ua750\ua752\ua754\ua756\ua758\ua75a\ua75c\ua75e\ua760\ua762\ua764\ua766\ua768\ua76a\ua76c\ua76e\ua779\ua77b\ua77d-\ua77e\ua780\ua782\ua784\ua786\ua78b\ua78d\ua790\ua792\ua796\ua798\ua79a\ua79c\ua79e\ua7a0\ua7a2\ua7a4\ua7a6\ua7a8\ua7aa-\ua7ae\ua7b0-\ua7b4\ua7b6\ua7b8\uff21-\uff3a\U00010400-\U00010427\U000104b0-\U000104d3\U00010c80-\U00010cb2\U000118a0-\U000118bf\U00016e40-\U00016e5f\U0001d400-\U0001d419\U0001d434-\U0001d44d\U0001d468-\U0001d481\U0001d49c\U0001d49e-\U0001d49f\U0001d4a2\U0001d4a5-\U0001d4a6\U0001d4a9-\U0001d4ac\U0001d4ae-\U0001d4b5\U0001d4d0-\U0001d4e9\U0001d504-\U0001d505\U0001d507-\U0001d50a\U0001d50d-\U0001d514\U0001d516-\U0001d51c\U0001d538-\U0001d539\U0001d53b-\U0001d53e\U0001d540-\U0001d544\U0001d546\U0001d54a-\U0001d550\U0001d56c-\U0001d585\U0001d5a0-\U0001d5b9\U0001d5d4-\U0001d5ed\U0001d608-\U0001d621\U0001d63c-\U0001d655\U0001d670-\U0001d689\U0001d6a8-\U0001d6c0\U0001d6e2-\U0001d6fa\U0001d71c-\U0001d734\U0001d756-\U0001d76e\U0001d790-\U0001d7a8\U0001d7ca\U0001e900-\U0001e921' + +Mc = '\u0903\u093b\u093e-\u0940\u0949-\u094c\u094e-\u094f\u0982-\u0983\u09be-\u09c0\u09c7-\u09c8\u09cb-\u09cc\u09d7\u0a03\u0a3e-\u0a40\u0a83\u0abe-\u0ac0\u0ac9\u0acb-\u0acc\u0b02-\u0b03\u0b3e\u0b40\u0b47-\u0b48\u0b4b-\u0b4c\u0b57\u0bbe-\u0bbf\u0bc1-\u0bc2\u0bc6-\u0bc8\u0bca-\u0bcc\u0bd7\u0c01-\u0c03\u0c41-\u0c44\u0c82-\u0c83\u0cbe\u0cc0-\u0cc4\u0cc7-\u0cc8\u0cca-\u0ccb\u0cd5-\u0cd6\u0d02-\u0d03\u0d3e-\u0d40\u0d46-\u0d48\u0d4a-\u0d4c\u0d57\u0d82-\u0d83\u0dcf-\u0dd1\u0dd8-\u0ddf\u0df2-\u0df3\u0f3e-\u0f3f\u0f7f\u102b-\u102c\u1031\u1038\u103b-\u103c\u1056-\u1057\u1062-\u1064\u1067-\u106d\u1083-\u1084\u1087-\u108c\u108f\u109a-\u109c\u17b6\u17be-\u17c5\u17c7-\u17c8\u1923-\u1926\u1929-\u192b\u1930-\u1931\u1933-\u1938\u1a19-\u1a1a\u1a55\u1a57\u1a61\u1a63-\u1a64\u1a6d-\u1a72\u1b04\u1b35\u1b3b\u1b3d-\u1b41\u1b43-\u1b44\u1b82\u1ba1\u1ba6-\u1ba7\u1baa\u1be7\u1bea-\u1bec\u1bee\u1bf2-\u1bf3\u1c24-\u1c2b\u1c34-\u1c35\u1ce1\u1cf2-\u1cf3\u1cf7\u302e-\u302f\ua823-\ua824\ua827\ua880-\ua881\ua8b4-\ua8c3\ua952-\ua953\ua983\ua9b4-\ua9b5\ua9ba-\ua9bb\ua9bd-\ua9c0\uaa2f-\uaa30\uaa33-\uaa34\uaa4d\uaa7b\uaa7d\uaaeb\uaaee-\uaaef\uaaf5\uabe3-\uabe4\uabe6-\uabe7\uabe9-\uabea\uabec\U00011000\U00011002\U00011082\U000110b0-\U000110b2\U000110b7-\U000110b8\U0001112c\U00011145-\U00011146\U00011182\U000111b3-\U000111b5\U000111bf-\U000111c0\U0001122c-\U0001122e\U00011232-\U00011233\U00011235\U000112e0-\U000112e2\U00011302-\U00011303\U0001133e-\U0001133f\U00011341-\U00011344\U00011347-\U00011348\U0001134b-\U0001134d\U00011357\U00011362-\U00011363\U00011435-\U00011437\U00011440-\U00011441\U00011445\U000114b0-\U000114b2\U000114b9\U000114bb-\U000114be\U000114c1\U000115af-\U000115b1\U000115b8-\U000115bb\U000115be\U00011630-\U00011632\U0001163b-\U0001163c\U0001163e\U000116ac\U000116ae-\U000116af\U000116b6\U00011720-\U00011721\U00011726\U0001182c-\U0001182e\U00011838\U00011a39\U00011a57-\U00011a58\U00011a97\U00011c2f\U00011c3e\U00011ca9\U00011cb1\U00011cb4\U00011d8a-\U00011d8e\U00011d93-\U00011d94\U00011d96\U00011ef5-\U00011ef6\U00016f51-\U00016f7e\U0001d165-\U0001d166\U0001d16d-\U0001d172' + +Me = '\u0488-\u0489\u1abe\u20dd-\u20e0\u20e2-\u20e4\ua670-\ua672' + +Mn = '\u0300-\u036f\u0483-\u0487\u0591-\u05bd\u05bf\u05c1-\u05c2\u05c4-\u05c5\u05c7\u0610-\u061a\u064b-\u065f\u0670\u06d6-\u06dc\u06df-\u06e4\u06e7-\u06e8\u06ea-\u06ed\u0711\u0730-\u074a\u07a6-\u07b0\u07eb-\u07f3\u07fd\u0816-\u0819\u081b-\u0823\u0825-\u0827\u0829-\u082d\u0859-\u085b\u08d3-\u08e1\u08e3-\u0902\u093a\u093c\u0941-\u0948\u094d\u0951-\u0957\u0962-\u0963\u0981\u09bc\u09c1-\u09c4\u09cd\u09e2-\u09e3\u09fe\u0a01-\u0a02\u0a3c\u0a41-\u0a42\u0a47-\u0a48\u0a4b-\u0a4d\u0a51\u0a70-\u0a71\u0a75\u0a81-\u0a82\u0abc\u0ac1-\u0ac5\u0ac7-\u0ac8\u0acd\u0ae2-\u0ae3\u0afa-\u0aff\u0b01\u0b3c\u0b3f\u0b41-\u0b44\u0b4d\u0b56\u0b62-\u0b63\u0b82\u0bc0\u0bcd\u0c00\u0c04\u0c3e-\u0c40\u0c46-\u0c48\u0c4a-\u0c4d\u0c55-\u0c56\u0c62-\u0c63\u0c81\u0cbc\u0cbf\u0cc6\u0ccc-\u0ccd\u0ce2-\u0ce3\u0d00-\u0d01\u0d3b-\u0d3c\u0d41-\u0d44\u0d4d\u0d62-\u0d63\u0dca\u0dd2-\u0dd4\u0dd6\u0e31\u0e34-\u0e3a\u0e47-\u0e4e\u0eb1\u0eb4-\u0eb9\u0ebb-\u0ebc\u0ec8-\u0ecd\u0f18-\u0f19\u0f35\u0f37\u0f39\u0f71-\u0f7e\u0f80-\u0f84\u0f86-\u0f87\u0f8d-\u0f97\u0f99-\u0fbc\u0fc6\u102d-\u1030\u1032-\u1037\u1039-\u103a\u103d-\u103e\u1058-\u1059\u105e-\u1060\u1071-\u1074\u1082\u1085-\u1086\u108d\u109d\u135d-\u135f\u1712-\u1714\u1732-\u1734\u1752-\u1753\u1772-\u1773\u17b4-\u17b5\u17b7-\u17bd\u17c6\u17c9-\u17d3\u17dd\u180b-\u180d\u1885-\u1886\u18a9\u1920-\u1922\u1927-\u1928\u1932\u1939-\u193b\u1a17-\u1a18\u1a1b\u1a56\u1a58-\u1a5e\u1a60\u1a62\u1a65-\u1a6c\u1a73-\u1a7c\u1a7f\u1ab0-\u1abd\u1b00-\u1b03\u1b34\u1b36-\u1b3a\u1b3c\u1b42\u1b6b-\u1b73\u1b80-\u1b81\u1ba2-\u1ba5\u1ba8-\u1ba9\u1bab-\u1bad\u1be6\u1be8-\u1be9\u1bed\u1bef-\u1bf1\u1c2c-\u1c33\u1c36-\u1c37\u1cd0-\u1cd2\u1cd4-\u1ce0\u1ce2-\u1ce8\u1ced\u1cf4\u1cf8-\u1cf9\u1dc0-\u1df9\u1dfb-\u1dff\u20d0-\u20dc\u20e1\u20e5-\u20f0\u2cef-\u2cf1\u2d7f\u2de0-\u2dff\u302a-\u302d\u3099-\u309a\ua66f\ua674-\ua67d\ua69e-\ua69f\ua6f0-\ua6f1\ua802\ua806\ua80b\ua825-\ua826\ua8c4-\ua8c5\ua8e0-\ua8f1\ua8ff\ua926-\ua92d\ua947-\ua951\ua980-\ua982\ua9b3\ua9b6-\ua9b9\ua9bc\ua9e5\uaa29-\uaa2e\uaa31-\uaa32\uaa35-\uaa36\uaa43\uaa4c\uaa7c\uaab0\uaab2-\uaab4\uaab7-\uaab8\uaabe-\uaabf\uaac1\uaaec-\uaaed\uaaf6\uabe5\uabe8\uabed\ufb1e\ufe00-\ufe0f\ufe20-\ufe2f\U000101fd\U000102e0\U00010376-\U0001037a\U00010a01-\U00010a03\U00010a05-\U00010a06\U00010a0c-\U00010a0f\U00010a38-\U00010a3a\U00010a3f\U00010ae5-\U00010ae6\U00010d24-\U00010d27\U00010f46-\U00010f50\U00011001\U00011038-\U00011046\U0001107f-\U00011081\U000110b3-\U000110b6\U000110b9-\U000110ba\U00011100-\U00011102\U00011127-\U0001112b\U0001112d-\U00011134\U00011173\U00011180-\U00011181\U000111b6-\U000111be\U000111c9-\U000111cc\U0001122f-\U00011231\U00011234\U00011236-\U00011237\U0001123e\U000112df\U000112e3-\U000112ea\U00011300-\U00011301\U0001133b-\U0001133c\U00011340\U00011366-\U0001136c\U00011370-\U00011374\U00011438-\U0001143f\U00011442-\U00011444\U00011446\U0001145e\U000114b3-\U000114b8\U000114ba\U000114bf-\U000114c0\U000114c2-\U000114c3\U000115b2-\U000115b5\U000115bc-\U000115bd\U000115bf-\U000115c0\U000115dc-\U000115dd\U00011633-\U0001163a\U0001163d\U0001163f-\U00011640\U000116ab\U000116ad\U000116b0-\U000116b5\U000116b7\U0001171d-\U0001171f\U00011722-\U00011725\U00011727-\U0001172b\U0001182f-\U00011837\U00011839-\U0001183a\U00011a01-\U00011a0a\U00011a33-\U00011a38\U00011a3b-\U00011a3e\U00011a47\U00011a51-\U00011a56\U00011a59-\U00011a5b\U00011a8a-\U00011a96\U00011a98-\U00011a99\U00011c30-\U00011c36\U00011c38-\U00011c3d\U00011c3f\U00011c92-\U00011ca7\U00011caa-\U00011cb0\U00011cb2-\U00011cb3\U00011cb5-\U00011cb6\U00011d31-\U00011d36\U00011d3a\U00011d3c-\U00011d3d\U00011d3f-\U00011d45\U00011d47\U00011d90-\U00011d91\U00011d95\U00011d97\U00011ef3-\U00011ef4\U00016af0-\U00016af4\U00016b30-\U00016b36\U00016f8f-\U00016f92\U0001bc9d-\U0001bc9e\U0001d167-\U0001d169\U0001d17b-\U0001d182\U0001d185-\U0001d18b\U0001d1aa-\U0001d1ad\U0001d242-\U0001d244\U0001da00-\U0001da36\U0001da3b-\U0001da6c\U0001da75\U0001da84\U0001da9b-\U0001da9f\U0001daa1-\U0001daaf\U0001e000-\U0001e006\U0001e008-\U0001e018\U0001e01b-\U0001e021\U0001e023-\U0001e024\U0001e026-\U0001e02a\U0001e8d0-\U0001e8d6\U0001e944-\U0001e94a\U000e0100-\U000e01ef' + +Nd = '0-9\u0660-\u0669\u06f0-\u06f9\u07c0-\u07c9\u0966-\u096f\u09e6-\u09ef\u0a66-\u0a6f\u0ae6-\u0aef\u0b66-\u0b6f\u0be6-\u0bef\u0c66-\u0c6f\u0ce6-\u0cef\u0d66-\u0d6f\u0de6-\u0def\u0e50-\u0e59\u0ed0-\u0ed9\u0f20-\u0f29\u1040-\u1049\u1090-\u1099\u17e0-\u17e9\u1810-\u1819\u1946-\u194f\u19d0-\u19d9\u1a80-\u1a89\u1a90-\u1a99\u1b50-\u1b59\u1bb0-\u1bb9\u1c40-\u1c49\u1c50-\u1c59\ua620-\ua629\ua8d0-\ua8d9\ua900-\ua909\ua9d0-\ua9d9\ua9f0-\ua9f9\uaa50-\uaa59\uabf0-\uabf9\uff10-\uff19\U000104a0-\U000104a9\U00010d30-\U00010d39\U00011066-\U0001106f\U000110f0-\U000110f9\U00011136-\U0001113f\U000111d0-\U000111d9\U000112f0-\U000112f9\U00011450-\U00011459\U000114d0-\U000114d9\U00011650-\U00011659\U000116c0-\U000116c9\U00011730-\U00011739\U000118e0-\U000118e9\U00011c50-\U00011c59\U00011d50-\U00011d59\U00011da0-\U00011da9\U00016a60-\U00016a69\U00016b50-\U00016b59\U0001d7ce-\U0001d7ff\U0001e950-\U0001e959' + +Nl = '\u16ee-\u16f0\u2160-\u2182\u2185-\u2188\u3007\u3021-\u3029\u3038-\u303a\ua6e6-\ua6ef\U00010140-\U00010174\U00010341\U0001034a\U000103d1-\U000103d5\U00012400-\U0001246e' + +No = '\xb2-\xb3\xb9\xbc-\xbe\u09f4-\u09f9\u0b72-\u0b77\u0bf0-\u0bf2\u0c78-\u0c7e\u0d58-\u0d5e\u0d70-\u0d78\u0f2a-\u0f33\u1369-\u137c\u17f0-\u17f9\u19da\u2070\u2074-\u2079\u2080-\u2089\u2150-\u215f\u2189\u2460-\u249b\u24ea-\u24ff\u2776-\u2793\u2cfd\u3192-\u3195\u3220-\u3229\u3248-\u324f\u3251-\u325f\u3280-\u3289\u32b1-\u32bf\ua830-\ua835\U00010107-\U00010133\U00010175-\U00010178\U0001018a-\U0001018b\U000102e1-\U000102fb\U00010320-\U00010323\U00010858-\U0001085f\U00010879-\U0001087f\U000108a7-\U000108af\U000108fb-\U000108ff\U00010916-\U0001091b\U000109bc-\U000109bd\U000109c0-\U000109cf\U000109d2-\U000109ff\U00010a40-\U00010a48\U00010a7d-\U00010a7e\U00010a9d-\U00010a9f\U00010aeb-\U00010aef\U00010b58-\U00010b5f\U00010b78-\U00010b7f\U00010ba9-\U00010baf\U00010cfa-\U00010cff\U00010e60-\U00010e7e\U00010f1d-\U00010f26\U00010f51-\U00010f54\U00011052-\U00011065\U000111e1-\U000111f4\U0001173a-\U0001173b\U000118ea-\U000118f2\U00011c5a-\U00011c6c\U00016b5b-\U00016b61\U00016e80-\U00016e96\U0001d2e0-\U0001d2f3\U0001d360-\U0001d378\U0001e8c7-\U0001e8cf\U0001ec71-\U0001ecab\U0001ecad-\U0001ecaf\U0001ecb1-\U0001ecb4\U0001f100-\U0001f10c' + +Pc = '_\u203f-\u2040\u2054\ufe33-\ufe34\ufe4d-\ufe4f\uff3f' + +Pd = '\\-\u058a\u05be\u1400\u1806\u2010-\u2015\u2e17\u2e1a\u2e3a-\u2e3b\u2e40\u301c\u3030\u30a0\ufe31-\ufe32\ufe58\ufe63\uff0d' + +Pe = ')\\]}\u0f3b\u0f3d\u169c\u2046\u207e\u208e\u2309\u230b\u232a\u2769\u276b\u276d\u276f\u2771\u2773\u2775\u27c6\u27e7\u27e9\u27eb\u27ed\u27ef\u2984\u2986\u2988\u298a\u298c\u298e\u2990\u2992\u2994\u2996\u2998\u29d9\u29db\u29fd\u2e23\u2e25\u2e27\u2e29\u3009\u300b\u300d\u300f\u3011\u3015\u3017\u3019\u301b\u301e-\u301f\ufd3e\ufe18\ufe36\ufe38\ufe3a\ufe3c\ufe3e\ufe40\ufe42\ufe44\ufe48\ufe5a\ufe5c\ufe5e\uff09\uff3d\uff5d\uff60\uff63' + +Pf = '\xbb\u2019\u201d\u203a\u2e03\u2e05\u2e0a\u2e0d\u2e1d\u2e21' + +Pi = '\xab\u2018\u201b-\u201c\u201f\u2039\u2e02\u2e04\u2e09\u2e0c\u2e1c\u2e20' + +Po = "!-#%-'*,.-/:-;?-@\\\\\xa1\xa7\xb6-\xb7\xbf\u037e\u0387\u055a-\u055f\u0589\u05c0\u05c3\u05c6\u05f3-\u05f4\u0609-\u060a\u060c-\u060d\u061b\u061e-\u061f\u066a-\u066d\u06d4\u0700-\u070d\u07f7-\u07f9\u0830-\u083e\u085e\u0964-\u0965\u0970\u09fd\u0a76\u0af0\u0c84\u0df4\u0e4f\u0e5a-\u0e5b\u0f04-\u0f12\u0f14\u0f85\u0fd0-\u0fd4\u0fd9-\u0fda\u104a-\u104f\u10fb\u1360-\u1368\u166d-\u166e\u16eb-\u16ed\u1735-\u1736\u17d4-\u17d6\u17d8-\u17da\u1800-\u1805\u1807-\u180a\u1944-\u1945\u1a1e-\u1a1f\u1aa0-\u1aa6\u1aa8-\u1aad\u1b5a-\u1b60\u1bfc-\u1bff\u1c3b-\u1c3f\u1c7e-\u1c7f\u1cc0-\u1cc7\u1cd3\u2016-\u2017\u2020-\u2027\u2030-\u2038\u203b-\u203e\u2041-\u2043\u2047-\u2051\u2053\u2055-\u205e\u2cf9-\u2cfc\u2cfe-\u2cff\u2d70\u2e00-\u2e01\u2e06-\u2e08\u2e0b\u2e0e-\u2e16\u2e18-\u2e19\u2e1b\u2e1e-\u2e1f\u2e2a-\u2e2e\u2e30-\u2e39\u2e3c-\u2e3f\u2e41\u2e43-\u2e4e\u3001-\u3003\u303d\u30fb\ua4fe-\ua4ff\ua60d-\ua60f\ua673\ua67e\ua6f2-\ua6f7\ua874-\ua877\ua8ce-\ua8cf\ua8f8-\ua8fa\ua8fc\ua92e-\ua92f\ua95f\ua9c1-\ua9cd\ua9de-\ua9df\uaa5c-\uaa5f\uaade-\uaadf\uaaf0-\uaaf1\uabeb\ufe10-\ufe16\ufe19\ufe30\ufe45-\ufe46\ufe49-\ufe4c\ufe50-\ufe52\ufe54-\ufe57\ufe5f-\ufe61\ufe68\ufe6a-\ufe6b\uff01-\uff03\uff05-\uff07\uff0a\uff0c\uff0e-\uff0f\uff1a-\uff1b\uff1f-\uff20\uff3c\uff61\uff64-\uff65\U00010100-\U00010102\U0001039f\U000103d0\U0001056f\U00010857\U0001091f\U0001093f\U00010a50-\U00010a58\U00010a7f\U00010af0-\U00010af6\U00010b39-\U00010b3f\U00010b99-\U00010b9c\U00010f55-\U00010f59\U00011047-\U0001104d\U000110bb-\U000110bc\U000110be-\U000110c1\U00011140-\U00011143\U00011174-\U00011175\U000111c5-\U000111c8\U000111cd\U000111db\U000111dd-\U000111df\U00011238-\U0001123d\U000112a9\U0001144b-\U0001144f\U0001145b\U0001145d\U000114c6\U000115c1-\U000115d7\U00011641-\U00011643\U00011660-\U0001166c\U0001173c-\U0001173e\U0001183b\U00011a3f-\U00011a46\U00011a9a-\U00011a9c\U00011a9e-\U00011aa2\U00011c41-\U00011c45\U00011c70-\U00011c71\U00011ef7-\U00011ef8\U00012470-\U00012474\U00016a6e-\U00016a6f\U00016af5\U00016b37-\U00016b3b\U00016b44\U00016e97-\U00016e9a\U0001bc9f\U0001da87-\U0001da8b\U0001e95e-\U0001e95f" + +Ps = '(\\[{\u0f3a\u0f3c\u169b\u201a\u201e\u2045\u207d\u208d\u2308\u230a\u2329\u2768\u276a\u276c\u276e\u2770\u2772\u2774\u27c5\u27e6\u27e8\u27ea\u27ec\u27ee\u2983\u2985\u2987\u2989\u298b\u298d\u298f\u2991\u2993\u2995\u2997\u29d8\u29da\u29fc\u2e22\u2e24\u2e26\u2e28\u2e42\u3008\u300a\u300c\u300e\u3010\u3014\u3016\u3018\u301a\u301d\ufd3f\ufe17\ufe35\ufe37\ufe39\ufe3b\ufe3d\ufe3f\ufe41\ufe43\ufe47\ufe59\ufe5b\ufe5d\uff08\uff3b\uff5b\uff5f\uff62' + +Sc = '$\xa2-\xa5\u058f\u060b\u07fe-\u07ff\u09f2-\u09f3\u09fb\u0af1\u0bf9\u0e3f\u17db\u20a0-\u20bf\ua838\ufdfc\ufe69\uff04\uffe0-\uffe1\uffe5-\uffe6\U0001ecb0' + +Sk = '\\^`\xa8\xaf\xb4\xb8\u02c2-\u02c5\u02d2-\u02df\u02e5-\u02eb\u02ed\u02ef-\u02ff\u0375\u0384-\u0385\u1fbd\u1fbf-\u1fc1\u1fcd-\u1fcf\u1fdd-\u1fdf\u1fed-\u1fef\u1ffd-\u1ffe\u309b-\u309c\ua700-\ua716\ua720-\ua721\ua789-\ua78a\uab5b\ufbb2-\ufbc1\uff3e\uff40\uffe3\U0001f3fb-\U0001f3ff' + +Sm = '+<->|~\xac\xb1\xd7\xf7\u03f6\u0606-\u0608\u2044\u2052\u207a-\u207c\u208a-\u208c\u2118\u2140-\u2144\u214b\u2190-\u2194\u219a-\u219b\u21a0\u21a3\u21a6\u21ae\u21ce-\u21cf\u21d2\u21d4\u21f4-\u22ff\u2320-\u2321\u237c\u239b-\u23b3\u23dc-\u23e1\u25b7\u25c1\u25f8-\u25ff\u266f\u27c0-\u27c4\u27c7-\u27e5\u27f0-\u27ff\u2900-\u2982\u2999-\u29d7\u29dc-\u29fb\u29fe-\u2aff\u2b30-\u2b44\u2b47-\u2b4c\ufb29\ufe62\ufe64-\ufe66\uff0b\uff1c-\uff1e\uff5c\uff5e\uffe2\uffe9-\uffec\U0001d6c1\U0001d6db\U0001d6fb\U0001d715\U0001d735\U0001d74f\U0001d76f\U0001d789\U0001d7a9\U0001d7c3\U0001eef0-\U0001eef1' + +So = '\xa6\xa9\xae\xb0\u0482\u058d-\u058e\u060e-\u060f\u06de\u06e9\u06fd-\u06fe\u07f6\u09fa\u0b70\u0bf3-\u0bf8\u0bfa\u0c7f\u0d4f\u0d79\u0f01-\u0f03\u0f13\u0f15-\u0f17\u0f1a-\u0f1f\u0f34\u0f36\u0f38\u0fbe-\u0fc5\u0fc7-\u0fcc\u0fce-\u0fcf\u0fd5-\u0fd8\u109e-\u109f\u1390-\u1399\u1940\u19de-\u19ff\u1b61-\u1b6a\u1b74-\u1b7c\u2100-\u2101\u2103-\u2106\u2108-\u2109\u2114\u2116-\u2117\u211e-\u2123\u2125\u2127\u2129\u212e\u213a-\u213b\u214a\u214c-\u214d\u214f\u218a-\u218b\u2195-\u2199\u219c-\u219f\u21a1-\u21a2\u21a4-\u21a5\u21a7-\u21ad\u21af-\u21cd\u21d0-\u21d1\u21d3\u21d5-\u21f3\u2300-\u2307\u230c-\u231f\u2322-\u2328\u232b-\u237b\u237d-\u239a\u23b4-\u23db\u23e2-\u2426\u2440-\u244a\u249c-\u24e9\u2500-\u25b6\u25b8-\u25c0\u25c2-\u25f7\u2600-\u266e\u2670-\u2767\u2794-\u27bf\u2800-\u28ff\u2b00-\u2b2f\u2b45-\u2b46\u2b4d-\u2b73\u2b76-\u2b95\u2b98-\u2bc8\u2bca-\u2bfe\u2ce5-\u2cea\u2e80-\u2e99\u2e9b-\u2ef3\u2f00-\u2fd5\u2ff0-\u2ffb\u3004\u3012-\u3013\u3020\u3036-\u3037\u303e-\u303f\u3190-\u3191\u3196-\u319f\u31c0-\u31e3\u3200-\u321e\u322a-\u3247\u3250\u3260-\u327f\u328a-\u32b0\u32c0-\u32fe\u3300-\u33ff\u4dc0-\u4dff\ua490-\ua4c6\ua828-\ua82b\ua836-\ua837\ua839\uaa77-\uaa79\ufdfd\uffe4\uffe8\uffed-\uffee\ufffc-\ufffd\U00010137-\U0001013f\U00010179-\U00010189\U0001018c-\U0001018e\U00010190-\U0001019b\U000101a0\U000101d0-\U000101fc\U00010877-\U00010878\U00010ac8\U0001173f\U00016b3c-\U00016b3f\U00016b45\U0001bc9c\U0001d000-\U0001d0f5\U0001d100-\U0001d126\U0001d129-\U0001d164\U0001d16a-\U0001d16c\U0001d183-\U0001d184\U0001d18c-\U0001d1a9\U0001d1ae-\U0001d1e8\U0001d200-\U0001d241\U0001d245\U0001d300-\U0001d356\U0001d800-\U0001d9ff\U0001da37-\U0001da3a\U0001da6d-\U0001da74\U0001da76-\U0001da83\U0001da85-\U0001da86\U0001ecac\U0001f000-\U0001f02b\U0001f030-\U0001f093\U0001f0a0-\U0001f0ae\U0001f0b1-\U0001f0bf\U0001f0c1-\U0001f0cf\U0001f0d1-\U0001f0f5\U0001f110-\U0001f16b\U0001f170-\U0001f1ac\U0001f1e6-\U0001f202\U0001f210-\U0001f23b\U0001f240-\U0001f248\U0001f250-\U0001f251\U0001f260-\U0001f265\U0001f300-\U0001f3fa\U0001f400-\U0001f6d4\U0001f6e0-\U0001f6ec\U0001f6f0-\U0001f6f9\U0001f700-\U0001f773\U0001f780-\U0001f7d8\U0001f800-\U0001f80b\U0001f810-\U0001f847\U0001f850-\U0001f859\U0001f860-\U0001f887\U0001f890-\U0001f8ad\U0001f900-\U0001f90b\U0001f910-\U0001f93e\U0001f940-\U0001f970\U0001f973-\U0001f976\U0001f97a\U0001f97c-\U0001f9a2\U0001f9b0-\U0001f9b9\U0001f9c0-\U0001f9c2\U0001f9d0-\U0001f9ff\U0001fa60-\U0001fa6d' + +Zl = '\u2028' + +Zp = '\u2029' + +Zs = ' \xa0\u1680\u2000-\u200a\u202f\u205f\u3000' + +xid_continue = '0-9A-Z_a-z\xaa\xb5\xb7\xba\xc0-\xd6\xd8-\xf6\xf8-\u02c1\u02c6-\u02d1\u02e0-\u02e4\u02ec\u02ee\u0300-\u0374\u0376-\u0377\u037b-\u037d\u037f\u0386-\u038a\u038c\u038e-\u03a1\u03a3-\u03f5\u03f7-\u0481\u0483-\u0487\u048a-\u052f\u0531-\u0556\u0559\u0560-\u0588\u0591-\u05bd\u05bf\u05c1-\u05c2\u05c4-\u05c5\u05c7\u05d0-\u05ea\u05ef-\u05f2\u0610-\u061a\u0620-\u0669\u066e-\u06d3\u06d5-\u06dc\u06df-\u06e8\u06ea-\u06fc\u06ff\u0710-\u074a\u074d-\u07b1\u07c0-\u07f5\u07fa\u07fd\u0800-\u082d\u0840-\u085b\u0860-\u086a\u08a0-\u08b4\u08b6-\u08bd\u08d3-\u08e1\u08e3-\u0963\u0966-\u096f\u0971-\u0983\u0985-\u098c\u098f-\u0990\u0993-\u09a8\u09aa-\u09b0\u09b2\u09b6-\u09b9\u09bc-\u09c4\u09c7-\u09c8\u09cb-\u09ce\u09d7\u09dc-\u09dd\u09df-\u09e3\u09e6-\u09f1\u09fc\u09fe\u0a01-\u0a03\u0a05-\u0a0a\u0a0f-\u0a10\u0a13-\u0a28\u0a2a-\u0a30\u0a32-\u0a33\u0a35-\u0a36\u0a38-\u0a39\u0a3c\u0a3e-\u0a42\u0a47-\u0a48\u0a4b-\u0a4d\u0a51\u0a59-\u0a5c\u0a5e\u0a66-\u0a75\u0a81-\u0a83\u0a85-\u0a8d\u0a8f-\u0a91\u0a93-\u0aa8\u0aaa-\u0ab0\u0ab2-\u0ab3\u0ab5-\u0ab9\u0abc-\u0ac5\u0ac7-\u0ac9\u0acb-\u0acd\u0ad0\u0ae0-\u0ae3\u0ae6-\u0aef\u0af9-\u0aff\u0b01-\u0b03\u0b05-\u0b0c\u0b0f-\u0b10\u0b13-\u0b28\u0b2a-\u0b30\u0b32-\u0b33\u0b35-\u0b39\u0b3c-\u0b44\u0b47-\u0b48\u0b4b-\u0b4d\u0b56-\u0b57\u0b5c-\u0b5d\u0b5f-\u0b63\u0b66-\u0b6f\u0b71\u0b82-\u0b83\u0b85-\u0b8a\u0b8e-\u0b90\u0b92-\u0b95\u0b99-\u0b9a\u0b9c\u0b9e-\u0b9f\u0ba3-\u0ba4\u0ba8-\u0baa\u0bae-\u0bb9\u0bbe-\u0bc2\u0bc6-\u0bc8\u0bca-\u0bcd\u0bd0\u0bd7\u0be6-\u0bef\u0c00-\u0c0c\u0c0e-\u0c10\u0c12-\u0c28\u0c2a-\u0c39\u0c3d-\u0c44\u0c46-\u0c48\u0c4a-\u0c4d\u0c55-\u0c56\u0c58-\u0c5a\u0c60-\u0c63\u0c66-\u0c6f\u0c80-\u0c83\u0c85-\u0c8c\u0c8e-\u0c90\u0c92-\u0ca8\u0caa-\u0cb3\u0cb5-\u0cb9\u0cbc-\u0cc4\u0cc6-\u0cc8\u0cca-\u0ccd\u0cd5-\u0cd6\u0cde\u0ce0-\u0ce3\u0ce6-\u0cef\u0cf1-\u0cf2\u0d00-\u0d03\u0d05-\u0d0c\u0d0e-\u0d10\u0d12-\u0d44\u0d46-\u0d48\u0d4a-\u0d4e\u0d54-\u0d57\u0d5f-\u0d63\u0d66-\u0d6f\u0d7a-\u0d7f\u0d82-\u0d83\u0d85-\u0d96\u0d9a-\u0db1\u0db3-\u0dbb\u0dbd\u0dc0-\u0dc6\u0dca\u0dcf-\u0dd4\u0dd6\u0dd8-\u0ddf\u0de6-\u0def\u0df2-\u0df3\u0e01-\u0e3a\u0e40-\u0e4e\u0e50-\u0e59\u0e81-\u0e82\u0e84\u0e87-\u0e88\u0e8a\u0e8d\u0e94-\u0e97\u0e99-\u0e9f\u0ea1-\u0ea3\u0ea5\u0ea7\u0eaa-\u0eab\u0ead-\u0eb9\u0ebb-\u0ebd\u0ec0-\u0ec4\u0ec6\u0ec8-\u0ecd\u0ed0-\u0ed9\u0edc-\u0edf\u0f00\u0f18-\u0f19\u0f20-\u0f29\u0f35\u0f37\u0f39\u0f3e-\u0f47\u0f49-\u0f6c\u0f71-\u0f84\u0f86-\u0f97\u0f99-\u0fbc\u0fc6\u1000-\u1049\u1050-\u109d\u10a0-\u10c5\u10c7\u10cd\u10d0-\u10fa\u10fc-\u1248\u124a-\u124d\u1250-\u1256\u1258\u125a-\u125d\u1260-\u1288\u128a-\u128d\u1290-\u12b0\u12b2-\u12b5\u12b8-\u12be\u12c0\u12c2-\u12c5\u12c8-\u12d6\u12d8-\u1310\u1312-\u1315\u1318-\u135a\u135d-\u135f\u1369-\u1371\u1380-\u138f\u13a0-\u13f5\u13f8-\u13fd\u1401-\u166c\u166f-\u167f\u1681-\u169a\u16a0-\u16ea\u16ee-\u16f8\u1700-\u170c\u170e-\u1714\u1720-\u1734\u1740-\u1753\u1760-\u176c\u176e-\u1770\u1772-\u1773\u1780-\u17d3\u17d7\u17dc-\u17dd\u17e0-\u17e9\u180b-\u180d\u1810-\u1819\u1820-\u1878\u1880-\u18aa\u18b0-\u18f5\u1900-\u191e\u1920-\u192b\u1930-\u193b\u1946-\u196d\u1970-\u1974\u1980-\u19ab\u19b0-\u19c9\u19d0-\u19da\u1a00-\u1a1b\u1a20-\u1a5e\u1a60-\u1a7c\u1a7f-\u1a89\u1a90-\u1a99\u1aa7\u1ab0-\u1abd\u1b00-\u1b4b\u1b50-\u1b59\u1b6b-\u1b73\u1b80-\u1bf3\u1c00-\u1c37\u1c40-\u1c49\u1c4d-\u1c7d\u1c80-\u1c88\u1c90-\u1cba\u1cbd-\u1cbf\u1cd0-\u1cd2\u1cd4-\u1cf9\u1d00-\u1df9\u1dfb-\u1f15\u1f18-\u1f1d\u1f20-\u1f45\u1f48-\u1f4d\u1f50-\u1f57\u1f59\u1f5b\u1f5d\u1f5f-\u1f7d\u1f80-\u1fb4\u1fb6-\u1fbc\u1fbe\u1fc2-\u1fc4\u1fc6-\u1fcc\u1fd0-\u1fd3\u1fd6-\u1fdb\u1fe0-\u1fec\u1ff2-\u1ff4\u1ff6-\u1ffc\u203f-\u2040\u2054\u2071\u207f\u2090-\u209c\u20d0-\u20dc\u20e1\u20e5-\u20f0\u2102\u2107\u210a-\u2113\u2115\u2118-\u211d\u2124\u2126\u2128\u212a-\u2139\u213c-\u213f\u2145-\u2149\u214e\u2160-\u2188\u2c00-\u2c2e\u2c30-\u2c5e\u2c60-\u2ce4\u2ceb-\u2cf3\u2d00-\u2d25\u2d27\u2d2d\u2d30-\u2d67\u2d6f\u2d7f-\u2d96\u2da0-\u2da6\u2da8-\u2dae\u2db0-\u2db6\u2db8-\u2dbe\u2dc0-\u2dc6\u2dc8-\u2dce\u2dd0-\u2dd6\u2dd8-\u2dde\u2de0-\u2dff\u3005-\u3007\u3021-\u302f\u3031-\u3035\u3038-\u303c\u3041-\u3096\u3099-\u309a\u309d-\u309f\u30a1-\u30fa\u30fc-\u30ff\u3105-\u312f\u3131-\u318e\u31a0-\u31ba\u31f0-\u31ff\u3400-\u4db5\u4e00-\u9fef\ua000-\ua48c\ua4d0-\ua4fd\ua500-\ua60c\ua610-\ua62b\ua640-\ua66f\ua674-\ua67d\ua67f-\ua6f1\ua717-\ua71f\ua722-\ua788\ua78b-\ua7b9\ua7f7-\ua827\ua840-\ua873\ua880-\ua8c5\ua8d0-\ua8d9\ua8e0-\ua8f7\ua8fb\ua8fd-\ua92d\ua930-\ua953\ua960-\ua97c\ua980-\ua9c0\ua9cf-\ua9d9\ua9e0-\ua9fe\uaa00-\uaa36\uaa40-\uaa4d\uaa50-\uaa59\uaa60-\uaa76\uaa7a-\uaac2\uaadb-\uaadd\uaae0-\uaaef\uaaf2-\uaaf6\uab01-\uab06\uab09-\uab0e\uab11-\uab16\uab20-\uab26\uab28-\uab2e\uab30-\uab5a\uab5c-\uab65\uab70-\uabea\uabec-\uabed\uabf0-\uabf9\uac00-\ud7a3\ud7b0-\ud7c6\ud7cb-\ud7fb\uf900-\ufa6d\ufa70-\ufad9\ufb00-\ufb06\ufb13-\ufb17\ufb1d-\ufb28\ufb2a-\ufb36\ufb38-\ufb3c\ufb3e\ufb40-\ufb41\ufb43-\ufb44\ufb46-\ufbb1\ufbd3-\ufc5d\ufc64-\ufd3d\ufd50-\ufd8f\ufd92-\ufdc7\ufdf0-\ufdf9\ufe00-\ufe0f\ufe20-\ufe2f\ufe33-\ufe34\ufe4d-\ufe4f\ufe71\ufe73\ufe77\ufe79\ufe7b\ufe7d\ufe7f-\ufefc\uff10-\uff19\uff21-\uff3a\uff3f\uff41-\uff5a\uff66-\uffbe\uffc2-\uffc7\uffca-\uffcf\uffd2-\uffd7\uffda-\uffdc\U00010000-\U0001000b\U0001000d-\U00010026\U00010028-\U0001003a\U0001003c-\U0001003d\U0001003f-\U0001004d\U00010050-\U0001005d\U00010080-\U000100fa\U00010140-\U00010174\U000101fd\U00010280-\U0001029c\U000102a0-\U000102d0\U000102e0\U00010300-\U0001031f\U0001032d-\U0001034a\U00010350-\U0001037a\U00010380-\U0001039d\U000103a0-\U000103c3\U000103c8-\U000103cf\U000103d1-\U000103d5\U00010400-\U0001049d\U000104a0-\U000104a9\U000104b0-\U000104d3\U000104d8-\U000104fb\U00010500-\U00010527\U00010530-\U00010563\U00010600-\U00010736\U00010740-\U00010755\U00010760-\U00010767\U00010800-\U00010805\U00010808\U0001080a-\U00010835\U00010837-\U00010838\U0001083c\U0001083f-\U00010855\U00010860-\U00010876\U00010880-\U0001089e\U000108e0-\U000108f2\U000108f4-\U000108f5\U00010900-\U00010915\U00010920-\U00010939\U00010980-\U000109b7\U000109be-\U000109bf\U00010a00-\U00010a03\U00010a05-\U00010a06\U00010a0c-\U00010a13\U00010a15-\U00010a17\U00010a19-\U00010a35\U00010a38-\U00010a3a\U00010a3f\U00010a60-\U00010a7c\U00010a80-\U00010a9c\U00010ac0-\U00010ac7\U00010ac9-\U00010ae6\U00010b00-\U00010b35\U00010b40-\U00010b55\U00010b60-\U00010b72\U00010b80-\U00010b91\U00010c00-\U00010c48\U00010c80-\U00010cb2\U00010cc0-\U00010cf2\U00010d00-\U00010d27\U00010d30-\U00010d39\U00010f00-\U00010f1c\U00010f27\U00010f30-\U00010f50\U00011000-\U00011046\U00011066-\U0001106f\U0001107f-\U000110ba\U000110d0-\U000110e8\U000110f0-\U000110f9\U00011100-\U00011134\U00011136-\U0001113f\U00011144-\U00011146\U00011150-\U00011173\U00011176\U00011180-\U000111c4\U000111c9-\U000111cc\U000111d0-\U000111da\U000111dc\U00011200-\U00011211\U00011213-\U00011237\U0001123e\U00011280-\U00011286\U00011288\U0001128a-\U0001128d\U0001128f-\U0001129d\U0001129f-\U000112a8\U000112b0-\U000112ea\U000112f0-\U000112f9\U00011300-\U00011303\U00011305-\U0001130c\U0001130f-\U00011310\U00011313-\U00011328\U0001132a-\U00011330\U00011332-\U00011333\U00011335-\U00011339\U0001133b-\U00011344\U00011347-\U00011348\U0001134b-\U0001134d\U00011350\U00011357\U0001135d-\U00011363\U00011366-\U0001136c\U00011370-\U00011374\U00011400-\U0001144a\U00011450-\U00011459\U0001145e\U00011480-\U000114c5\U000114c7\U000114d0-\U000114d9\U00011580-\U000115b5\U000115b8-\U000115c0\U000115d8-\U000115dd\U00011600-\U00011640\U00011644\U00011650-\U00011659\U00011680-\U000116b7\U000116c0-\U000116c9\U00011700-\U0001171a\U0001171d-\U0001172b\U00011730-\U00011739\U00011800-\U0001183a\U000118a0-\U000118e9\U000118ff\U00011a00-\U00011a3e\U00011a47\U00011a50-\U00011a83\U00011a86-\U00011a99\U00011a9d\U00011ac0-\U00011af8\U00011c00-\U00011c08\U00011c0a-\U00011c36\U00011c38-\U00011c40\U00011c50-\U00011c59\U00011c72-\U00011c8f\U00011c92-\U00011ca7\U00011ca9-\U00011cb6\U00011d00-\U00011d06\U00011d08-\U00011d09\U00011d0b-\U00011d36\U00011d3a\U00011d3c-\U00011d3d\U00011d3f-\U00011d47\U00011d50-\U00011d59\U00011d60-\U00011d65\U00011d67-\U00011d68\U00011d6a-\U00011d8e\U00011d90-\U00011d91\U00011d93-\U00011d98\U00011da0-\U00011da9\U00011ee0-\U00011ef6\U00012000-\U00012399\U00012400-\U0001246e\U00012480-\U00012543\U00013000-\U0001342e\U00014400-\U00014646\U00016800-\U00016a38\U00016a40-\U00016a5e\U00016a60-\U00016a69\U00016ad0-\U00016aed\U00016af0-\U00016af4\U00016b00-\U00016b36\U00016b40-\U00016b43\U00016b50-\U00016b59\U00016b63-\U00016b77\U00016b7d-\U00016b8f\U00016e40-\U00016e7f\U00016f00-\U00016f44\U00016f50-\U00016f7e\U00016f8f-\U00016f9f\U00016fe0-\U00016fe1\U00017000-\U000187f1\U00018800-\U00018af2\U0001b000-\U0001b11e\U0001b170-\U0001b2fb\U0001bc00-\U0001bc6a\U0001bc70-\U0001bc7c\U0001bc80-\U0001bc88\U0001bc90-\U0001bc99\U0001bc9d-\U0001bc9e\U0001d165-\U0001d169\U0001d16d-\U0001d172\U0001d17b-\U0001d182\U0001d185-\U0001d18b\U0001d1aa-\U0001d1ad\U0001d242-\U0001d244\U0001d400-\U0001d454\U0001d456-\U0001d49c\U0001d49e-\U0001d49f\U0001d4a2\U0001d4a5-\U0001d4a6\U0001d4a9-\U0001d4ac\U0001d4ae-\U0001d4b9\U0001d4bb\U0001d4bd-\U0001d4c3\U0001d4c5-\U0001d505\U0001d507-\U0001d50a\U0001d50d-\U0001d514\U0001d516-\U0001d51c\U0001d51e-\U0001d539\U0001d53b-\U0001d53e\U0001d540-\U0001d544\U0001d546\U0001d54a-\U0001d550\U0001d552-\U0001d6a5\U0001d6a8-\U0001d6c0\U0001d6c2-\U0001d6da\U0001d6dc-\U0001d6fa\U0001d6fc-\U0001d714\U0001d716-\U0001d734\U0001d736-\U0001d74e\U0001d750-\U0001d76e\U0001d770-\U0001d788\U0001d78a-\U0001d7a8\U0001d7aa-\U0001d7c2\U0001d7c4-\U0001d7cb\U0001d7ce-\U0001d7ff\U0001da00-\U0001da36\U0001da3b-\U0001da6c\U0001da75\U0001da84\U0001da9b-\U0001da9f\U0001daa1-\U0001daaf\U0001e000-\U0001e006\U0001e008-\U0001e018\U0001e01b-\U0001e021\U0001e023-\U0001e024\U0001e026-\U0001e02a\U0001e800-\U0001e8c4\U0001e8d0-\U0001e8d6\U0001e900-\U0001e94a\U0001e950-\U0001e959\U0001ee00-\U0001ee03\U0001ee05-\U0001ee1f\U0001ee21-\U0001ee22\U0001ee24\U0001ee27\U0001ee29-\U0001ee32\U0001ee34-\U0001ee37\U0001ee39\U0001ee3b\U0001ee42\U0001ee47\U0001ee49\U0001ee4b\U0001ee4d-\U0001ee4f\U0001ee51-\U0001ee52\U0001ee54\U0001ee57\U0001ee59\U0001ee5b\U0001ee5d\U0001ee5f\U0001ee61-\U0001ee62\U0001ee64\U0001ee67-\U0001ee6a\U0001ee6c-\U0001ee72\U0001ee74-\U0001ee77\U0001ee79-\U0001ee7c\U0001ee7e\U0001ee80-\U0001ee89\U0001ee8b-\U0001ee9b\U0001eea1-\U0001eea3\U0001eea5-\U0001eea9\U0001eeab-\U0001eebb\U00020000-\U0002a6d6\U0002a700-\U0002b734\U0002b740-\U0002b81d\U0002b820-\U0002cea1\U0002ceb0-\U0002ebe0\U0002f800-\U0002fa1d\U000e0100-\U000e01ef' + +xid_start = 'A-Z_a-z\xaa\xb5\xba\xc0-\xd6\xd8-\xf6\xf8-\u02c1\u02c6-\u02d1\u02e0-\u02e4\u02ec\u02ee\u0370-\u0374\u0376-\u0377\u037b-\u037d\u037f\u0386\u0388-\u038a\u038c\u038e-\u03a1\u03a3-\u03f5\u03f7-\u0481\u048a-\u052f\u0531-\u0556\u0559\u0560-\u0588\u05d0-\u05ea\u05ef-\u05f2\u0620-\u064a\u066e-\u066f\u0671-\u06d3\u06d5\u06e5-\u06e6\u06ee-\u06ef\u06fa-\u06fc\u06ff\u0710\u0712-\u072f\u074d-\u07a5\u07b1\u07ca-\u07ea\u07f4-\u07f5\u07fa\u0800-\u0815\u081a\u0824\u0828\u0840-\u0858\u0860-\u086a\u08a0-\u08b4\u08b6-\u08bd\u0904-\u0939\u093d\u0950\u0958-\u0961\u0971-\u0980\u0985-\u098c\u098f-\u0990\u0993-\u09a8\u09aa-\u09b0\u09b2\u09b6-\u09b9\u09bd\u09ce\u09dc-\u09dd\u09df-\u09e1\u09f0-\u09f1\u09fc\u0a05-\u0a0a\u0a0f-\u0a10\u0a13-\u0a28\u0a2a-\u0a30\u0a32-\u0a33\u0a35-\u0a36\u0a38-\u0a39\u0a59-\u0a5c\u0a5e\u0a72-\u0a74\u0a85-\u0a8d\u0a8f-\u0a91\u0a93-\u0aa8\u0aaa-\u0ab0\u0ab2-\u0ab3\u0ab5-\u0ab9\u0abd\u0ad0\u0ae0-\u0ae1\u0af9\u0b05-\u0b0c\u0b0f-\u0b10\u0b13-\u0b28\u0b2a-\u0b30\u0b32-\u0b33\u0b35-\u0b39\u0b3d\u0b5c-\u0b5d\u0b5f-\u0b61\u0b71\u0b83\u0b85-\u0b8a\u0b8e-\u0b90\u0b92-\u0b95\u0b99-\u0b9a\u0b9c\u0b9e-\u0b9f\u0ba3-\u0ba4\u0ba8-\u0baa\u0bae-\u0bb9\u0bd0\u0c05-\u0c0c\u0c0e-\u0c10\u0c12-\u0c28\u0c2a-\u0c39\u0c3d\u0c58-\u0c5a\u0c60-\u0c61\u0c80\u0c85-\u0c8c\u0c8e-\u0c90\u0c92-\u0ca8\u0caa-\u0cb3\u0cb5-\u0cb9\u0cbd\u0cde\u0ce0-\u0ce1\u0cf1-\u0cf2\u0d05-\u0d0c\u0d0e-\u0d10\u0d12-\u0d3a\u0d3d\u0d4e\u0d54-\u0d56\u0d5f-\u0d61\u0d7a-\u0d7f\u0d85-\u0d96\u0d9a-\u0db1\u0db3-\u0dbb\u0dbd\u0dc0-\u0dc6\u0e01-\u0e30\u0e32\u0e40-\u0e46\u0e81-\u0e82\u0e84\u0e87-\u0e88\u0e8a\u0e8d\u0e94-\u0e97\u0e99-\u0e9f\u0ea1-\u0ea3\u0ea5\u0ea7\u0eaa-\u0eab\u0ead-\u0eb0\u0eb2\u0ebd\u0ec0-\u0ec4\u0ec6\u0edc-\u0edf\u0f00\u0f40-\u0f47\u0f49-\u0f6c\u0f88-\u0f8c\u1000-\u102a\u103f\u1050-\u1055\u105a-\u105d\u1061\u1065-\u1066\u106e-\u1070\u1075-\u1081\u108e\u10a0-\u10c5\u10c7\u10cd\u10d0-\u10fa\u10fc-\u1248\u124a-\u124d\u1250-\u1256\u1258\u125a-\u125d\u1260-\u1288\u128a-\u128d\u1290-\u12b0\u12b2-\u12b5\u12b8-\u12be\u12c0\u12c2-\u12c5\u12c8-\u12d6\u12d8-\u1310\u1312-\u1315\u1318-\u135a\u1380-\u138f\u13a0-\u13f5\u13f8-\u13fd\u1401-\u166c\u166f-\u167f\u1681-\u169a\u16a0-\u16ea\u16ee-\u16f8\u1700-\u170c\u170e-\u1711\u1720-\u1731\u1740-\u1751\u1760-\u176c\u176e-\u1770\u1780-\u17b3\u17d7\u17dc\u1820-\u1878\u1880-\u18a8\u18aa\u18b0-\u18f5\u1900-\u191e\u1950-\u196d\u1970-\u1974\u1980-\u19ab\u19b0-\u19c9\u1a00-\u1a16\u1a20-\u1a54\u1aa7\u1b05-\u1b33\u1b45-\u1b4b\u1b83-\u1ba0\u1bae-\u1baf\u1bba-\u1be5\u1c00-\u1c23\u1c4d-\u1c4f\u1c5a-\u1c7d\u1c80-\u1c88\u1c90-\u1cba\u1cbd-\u1cbf\u1ce9-\u1cec\u1cee-\u1cf1\u1cf5-\u1cf6\u1d00-\u1dbf\u1e00-\u1f15\u1f18-\u1f1d\u1f20-\u1f45\u1f48-\u1f4d\u1f50-\u1f57\u1f59\u1f5b\u1f5d\u1f5f-\u1f7d\u1f80-\u1fb4\u1fb6-\u1fbc\u1fbe\u1fc2-\u1fc4\u1fc6-\u1fcc\u1fd0-\u1fd3\u1fd6-\u1fdb\u1fe0-\u1fec\u1ff2-\u1ff4\u1ff6-\u1ffc\u2071\u207f\u2090-\u209c\u2102\u2107\u210a-\u2113\u2115\u2118-\u211d\u2124\u2126\u2128\u212a-\u2139\u213c-\u213f\u2145-\u2149\u214e\u2160-\u2188\u2c00-\u2c2e\u2c30-\u2c5e\u2c60-\u2ce4\u2ceb-\u2cee\u2cf2-\u2cf3\u2d00-\u2d25\u2d27\u2d2d\u2d30-\u2d67\u2d6f\u2d80-\u2d96\u2da0-\u2da6\u2da8-\u2dae\u2db0-\u2db6\u2db8-\u2dbe\u2dc0-\u2dc6\u2dc8-\u2dce\u2dd0-\u2dd6\u2dd8-\u2dde\u3005-\u3007\u3021-\u3029\u3031-\u3035\u3038-\u303c\u3041-\u3096\u309d-\u309f\u30a1-\u30fa\u30fc-\u30ff\u3105-\u312f\u3131-\u318e\u31a0-\u31ba\u31f0-\u31ff\u3400-\u4db5\u4e00-\u9fef\ua000-\ua48c\ua4d0-\ua4fd\ua500-\ua60c\ua610-\ua61f\ua62a-\ua62b\ua640-\ua66e\ua67f-\ua69d\ua6a0-\ua6ef\ua717-\ua71f\ua722-\ua788\ua78b-\ua7b9\ua7f7-\ua801\ua803-\ua805\ua807-\ua80a\ua80c-\ua822\ua840-\ua873\ua882-\ua8b3\ua8f2-\ua8f7\ua8fb\ua8fd-\ua8fe\ua90a-\ua925\ua930-\ua946\ua960-\ua97c\ua984-\ua9b2\ua9cf\ua9e0-\ua9e4\ua9e6-\ua9ef\ua9fa-\ua9fe\uaa00-\uaa28\uaa40-\uaa42\uaa44-\uaa4b\uaa60-\uaa76\uaa7a\uaa7e-\uaaaf\uaab1\uaab5-\uaab6\uaab9-\uaabd\uaac0\uaac2\uaadb-\uaadd\uaae0-\uaaea\uaaf2-\uaaf4\uab01-\uab06\uab09-\uab0e\uab11-\uab16\uab20-\uab26\uab28-\uab2e\uab30-\uab5a\uab5c-\uab65\uab70-\uabe2\uac00-\ud7a3\ud7b0-\ud7c6\ud7cb-\ud7fb\uf900-\ufa6d\ufa70-\ufad9\ufb00-\ufb06\ufb13-\ufb17\ufb1d\ufb1f-\ufb28\ufb2a-\ufb36\ufb38-\ufb3c\ufb3e\ufb40-\ufb41\ufb43-\ufb44\ufb46-\ufbb1\ufbd3-\ufc5d\ufc64-\ufd3d\ufd50-\ufd8f\ufd92-\ufdc7\ufdf0-\ufdf9\ufe71\ufe73\ufe77\ufe79\ufe7b\ufe7d\ufe7f-\ufefc\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d\uffa0-\uffbe\uffc2-\uffc7\uffca-\uffcf\uffd2-\uffd7\uffda-\uffdc\U00010000-\U0001000b\U0001000d-\U00010026\U00010028-\U0001003a\U0001003c-\U0001003d\U0001003f-\U0001004d\U00010050-\U0001005d\U00010080-\U000100fa\U00010140-\U00010174\U00010280-\U0001029c\U000102a0-\U000102d0\U00010300-\U0001031f\U0001032d-\U0001034a\U00010350-\U00010375\U00010380-\U0001039d\U000103a0-\U000103c3\U000103c8-\U000103cf\U000103d1-\U000103d5\U00010400-\U0001049d\U000104b0-\U000104d3\U000104d8-\U000104fb\U00010500-\U00010527\U00010530-\U00010563\U00010600-\U00010736\U00010740-\U00010755\U00010760-\U00010767\U00010800-\U00010805\U00010808\U0001080a-\U00010835\U00010837-\U00010838\U0001083c\U0001083f-\U00010855\U00010860-\U00010876\U00010880-\U0001089e\U000108e0-\U000108f2\U000108f4-\U000108f5\U00010900-\U00010915\U00010920-\U00010939\U00010980-\U000109b7\U000109be-\U000109bf\U00010a00\U00010a10-\U00010a13\U00010a15-\U00010a17\U00010a19-\U00010a35\U00010a60-\U00010a7c\U00010a80-\U00010a9c\U00010ac0-\U00010ac7\U00010ac9-\U00010ae4\U00010b00-\U00010b35\U00010b40-\U00010b55\U00010b60-\U00010b72\U00010b80-\U00010b91\U00010c00-\U00010c48\U00010c80-\U00010cb2\U00010cc0-\U00010cf2\U00010d00-\U00010d23\U00010f00-\U00010f1c\U00010f27\U00010f30-\U00010f45\U00011003-\U00011037\U00011083-\U000110af\U000110d0-\U000110e8\U00011103-\U00011126\U00011144\U00011150-\U00011172\U00011176\U00011183-\U000111b2\U000111c1-\U000111c4\U000111da\U000111dc\U00011200-\U00011211\U00011213-\U0001122b\U00011280-\U00011286\U00011288\U0001128a-\U0001128d\U0001128f-\U0001129d\U0001129f-\U000112a8\U000112b0-\U000112de\U00011305-\U0001130c\U0001130f-\U00011310\U00011313-\U00011328\U0001132a-\U00011330\U00011332-\U00011333\U00011335-\U00011339\U0001133d\U00011350\U0001135d-\U00011361\U00011400-\U00011434\U00011447-\U0001144a\U00011480-\U000114af\U000114c4-\U000114c5\U000114c7\U00011580-\U000115ae\U000115d8-\U000115db\U00011600-\U0001162f\U00011644\U00011680-\U000116aa\U00011700-\U0001171a\U00011800-\U0001182b\U000118a0-\U000118df\U000118ff\U00011a00\U00011a0b-\U00011a32\U00011a3a\U00011a50\U00011a5c-\U00011a83\U00011a86-\U00011a89\U00011a9d\U00011ac0-\U00011af8\U00011c00-\U00011c08\U00011c0a-\U00011c2e\U00011c40\U00011c72-\U00011c8f\U00011d00-\U00011d06\U00011d08-\U00011d09\U00011d0b-\U00011d30\U00011d46\U00011d60-\U00011d65\U00011d67-\U00011d68\U00011d6a-\U00011d89\U00011d98\U00011ee0-\U00011ef2\U00012000-\U00012399\U00012400-\U0001246e\U00012480-\U00012543\U00013000-\U0001342e\U00014400-\U00014646\U00016800-\U00016a38\U00016a40-\U00016a5e\U00016ad0-\U00016aed\U00016b00-\U00016b2f\U00016b40-\U00016b43\U00016b63-\U00016b77\U00016b7d-\U00016b8f\U00016e40-\U00016e7f\U00016f00-\U00016f44\U00016f50\U00016f93-\U00016f9f\U00016fe0-\U00016fe1\U00017000-\U000187f1\U00018800-\U00018af2\U0001b000-\U0001b11e\U0001b170-\U0001b2fb\U0001bc00-\U0001bc6a\U0001bc70-\U0001bc7c\U0001bc80-\U0001bc88\U0001bc90-\U0001bc99\U0001d400-\U0001d454\U0001d456-\U0001d49c\U0001d49e-\U0001d49f\U0001d4a2\U0001d4a5-\U0001d4a6\U0001d4a9-\U0001d4ac\U0001d4ae-\U0001d4b9\U0001d4bb\U0001d4bd-\U0001d4c3\U0001d4c5-\U0001d505\U0001d507-\U0001d50a\U0001d50d-\U0001d514\U0001d516-\U0001d51c\U0001d51e-\U0001d539\U0001d53b-\U0001d53e\U0001d540-\U0001d544\U0001d546\U0001d54a-\U0001d550\U0001d552-\U0001d6a5\U0001d6a8-\U0001d6c0\U0001d6c2-\U0001d6da\U0001d6dc-\U0001d6fa\U0001d6fc-\U0001d714\U0001d716-\U0001d734\U0001d736-\U0001d74e\U0001d750-\U0001d76e\U0001d770-\U0001d788\U0001d78a-\U0001d7a8\U0001d7aa-\U0001d7c2\U0001d7c4-\U0001d7cb\U0001e800-\U0001e8c4\U0001e900-\U0001e943\U0001ee00-\U0001ee03\U0001ee05-\U0001ee1f\U0001ee21-\U0001ee22\U0001ee24\U0001ee27\U0001ee29-\U0001ee32\U0001ee34-\U0001ee37\U0001ee39\U0001ee3b\U0001ee42\U0001ee47\U0001ee49\U0001ee4b\U0001ee4d-\U0001ee4f\U0001ee51-\U0001ee52\U0001ee54\U0001ee57\U0001ee59\U0001ee5b\U0001ee5d\U0001ee5f\U0001ee61-\U0001ee62\U0001ee64\U0001ee67-\U0001ee6a\U0001ee6c-\U0001ee72\U0001ee74-\U0001ee77\U0001ee79-\U0001ee7c\U0001ee7e\U0001ee80-\U0001ee89\U0001ee8b-\U0001ee9b\U0001eea1-\U0001eea3\U0001eea5-\U0001eea9\U0001eeab-\U0001eebb\U00020000-\U0002a6d6\U0002a700-\U0002b734\U0002b740-\U0002b81d\U0002b820-\U0002cea1\U0002ceb0-\U0002ebe0\U0002f800-\U0002fa1d' + +cats = ['Cc', 'Cf', 'Cn', 'Co', 'Cs', 'Ll', 'Lm', 'Lo', 'Lt', 'Lu', 'Mc', 'Me', 'Mn', 'Nd', 'Nl', 'No', 'Pc', 'Pd', 'Pe', 'Pf', 'Pi', 'Po', 'Ps', 'Sc', 'Sk', 'Sm', 'So', 'Zl', 'Zp', 'Zs'] + +# Generated from unidata 11.0.0 + +def combine(*args): + return ''.join(globals()[cat] for cat in args) + + +def allexcept(*args): + newcats = cats[:] + for arg in args: + newcats.remove(arg) + return ''.join(globals()[cat] for cat in newcats) + + +def _handle_runs(char_list): # pragma: no cover + buf = [] + for c in char_list: + if len(c) == 1: + if buf and buf[-1][1] == chr(ord(c)-1): + buf[-1] = (buf[-1][0], c) + else: + buf.append((c, c)) + else: + buf.append((c, c)) + for a, b in buf: + if a == b: + yield a + else: + yield f'{a}-{b}' + + +if __name__ == '__main__': # pragma: no cover + import unicodedata + + categories = {'xid_start': [], 'xid_continue': []} + + with open(__file__, encoding='utf-8') as fp: + content = fp.read() + + header = content[:content.find('Cc =')] + footer = content[content.find("def combine("):] + + for code in range(0x110000): + c = chr(code) + cat = unicodedata.category(c) + if ord(c) == 0xdc00: + # Hack to avoid combining this combining with the preceding high + # surrogate, 0xdbff, when doing a repr. + c = '\\' + c + elif ord(c) in (0x2d, 0x5b, 0x5c, 0x5d, 0x5e): + # Escape regex metachars. + c = '\\' + c + categories.setdefault(cat, []).append(c) + # XID_START and XID_CONTINUE are special categories used for matching + # identifiers in Python 3. + if c.isidentifier(): + categories['xid_start'].append(c) + if ('a' + c).isidentifier(): + categories['xid_continue'].append(c) + + with open(__file__, 'w', encoding='utf-8') as fp: + fp.write(header) + + for cat in sorted(categories): + val = ''.join(_handle_runs(categories[cat])) + fp.write(f'{cat} = {val!a}\n\n') + + cats = sorted(categories) + cats.remove('xid_start') + cats.remove('xid_continue') + fp.write(f'cats = {cats!r}\n\n') + + fp.write(f'# Generated from unidata {unicodedata.unidata_version}\n\n') + + fp.write(footer) diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/util.py b/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/util.py new file mode 100644 index 0000000..71c5710 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/pygments/util.py @@ -0,0 +1,324 @@ +""" + pygments.util + ~~~~~~~~~~~~~ + + Utility functions. + + :copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re +from io import TextIOWrapper + + +split_path_re = re.compile(r'[/\\ ]') +doctype_lookup_re = re.compile(r''' + ]*> +''', re.DOTALL | re.MULTILINE | re.VERBOSE) +tag_re = re.compile(r'<(.+?)(\s.*?)?>.*?', + re.IGNORECASE | re.DOTALL | re.MULTILINE) +xml_decl_re = re.compile(r'\s*<\?xml[^>]*\?>', re.I) + + +class ClassNotFound(ValueError): + """Raised if one of the lookup functions didn't find a matching class.""" + + +class OptionError(Exception): + """ + This exception will be raised by all option processing functions if + the type or value of the argument is not correct. + """ + +def get_choice_opt(options, optname, allowed, default=None, normcase=False): + """ + If the key `optname` from the dictionary is not in the sequence + `allowed`, raise an error, otherwise return it. + """ + string = options.get(optname, default) + if normcase: + string = string.lower() + if string not in allowed: + raise OptionError('Value for option {} must be one of {}'.format(optname, ', '.join(map(str, allowed)))) + return string + + +def get_bool_opt(options, optname, default=None): + """ + Intuitively, this is `options.get(optname, default)`, but restricted to + Boolean value. The Booleans can be represented as string, in order to accept + Boolean value from the command line arguments. If the key `optname` is + present in the dictionary `options` and is not associated with a Boolean, + raise an `OptionError`. If it is absent, `default` is returned instead. + + The valid string values for ``True`` are ``1``, ``yes``, ``true`` and + ``on``, the ones for ``False`` are ``0``, ``no``, ``false`` and ``off`` + (matched case-insensitively). + """ + string = options.get(optname, default) + if isinstance(string, bool): + return string + elif isinstance(string, int): + return bool(string) + elif not isinstance(string, str): + raise OptionError(f'Invalid type {string!r} for option {optname}; use ' + '1/0, yes/no, true/false, on/off') + elif string.lower() in ('1', 'yes', 'true', 'on'): + return True + elif string.lower() in ('0', 'no', 'false', 'off'): + return False + else: + raise OptionError(f'Invalid value {string!r} for option {optname}; use ' + '1/0, yes/no, true/false, on/off') + + +def get_int_opt(options, optname, default=None): + """As :func:`get_bool_opt`, but interpret the value as an integer.""" + string = options.get(optname, default) + try: + return int(string) + except TypeError: + raise OptionError(f'Invalid type {string!r} for option {optname}; you ' + 'must give an integer value') + except ValueError: + raise OptionError(f'Invalid value {string!r} for option {optname}; you ' + 'must give an integer value') + +def get_list_opt(options, optname, default=None): + """ + If the key `optname` from the dictionary `options` is a string, + split it at whitespace and return it. If it is already a list + or a tuple, it is returned as a list. + """ + val = options.get(optname, default) + if isinstance(val, str): + return val.split() + elif isinstance(val, (list, tuple)): + return list(val) + else: + raise OptionError(f'Invalid type {val!r} for option {optname}; you ' + 'must give a list value') + + +def docstring_headline(obj): + if not obj.__doc__: + return '' + res = [] + for line in obj.__doc__.strip().splitlines(): + if line.strip(): + res.append(" " + line.strip()) + else: + break + return ''.join(res).lstrip() + + +def make_analysator(f): + """Return a static text analyser function that returns float values.""" + def text_analyse(text): + try: + rv = f(text) + except Exception: + return 0.0 + if not rv: + return 0.0 + try: + return min(1.0, max(0.0, float(rv))) + except (ValueError, TypeError): + return 0.0 + text_analyse.__doc__ = f.__doc__ + return staticmethod(text_analyse) + + +def shebang_matches(text, regex): + r"""Check if the given regular expression matches the last part of the + shebang if one exists. + + >>> from pygments.util import shebang_matches + >>> shebang_matches('#!/usr/bin/env python', r'python(2\.\d)?') + True + >>> shebang_matches('#!/usr/bin/python2.4', r'python(2\.\d)?') + True + >>> shebang_matches('#!/usr/bin/python-ruby', r'python(2\.\d)?') + False + >>> shebang_matches('#!/usr/bin/python/ruby', r'python(2\.\d)?') + False + >>> shebang_matches('#!/usr/bin/startsomethingwith python', + ... r'python(2\.\d)?') + True + + It also checks for common windows executable file extensions:: + + >>> shebang_matches('#!C:\\Python2.4\\Python.exe', r'python(2\.\d)?') + True + + Parameters (``'-f'`` or ``'--foo'`` are ignored so ``'perl'`` does + the same as ``'perl -e'``) + + Note that this method automatically searches the whole string (eg: + the regular expression is wrapped in ``'^$'``) + """ + index = text.find('\n') + if index >= 0: + first_line = text[:index].lower() + else: + first_line = text.lower() + if first_line.startswith('#!'): + try: + found = [x for x in split_path_re.split(first_line[2:].strip()) + if x and not x.startswith('-')][-1] + except IndexError: + return False + regex = re.compile(rf'^{regex}(\.(exe|cmd|bat|bin))?$', re.IGNORECASE) + if regex.search(found) is not None: + return True + return False + + +def doctype_matches(text, regex): + """Check if the doctype matches a regular expression (if present). + + Note that this method only checks the first part of a DOCTYPE. + eg: 'html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"' + """ + m = doctype_lookup_re.search(text) + if m is None: + return False + doctype = m.group(1) + return re.compile(regex, re.I).match(doctype.strip()) is not None + + +def html_doctype_matches(text): + """Check if the file looks like it has a html doctype.""" + return doctype_matches(text, r'html') + + +_looks_like_xml_cache = {} + + +def looks_like_xml(text): + """Check if a doctype exists or if we have some tags.""" + if xml_decl_re.match(text): + return True + key = hash(text) + try: + return _looks_like_xml_cache[key] + except KeyError: + m = doctype_lookup_re.search(text) + if m is not None: + return True + rv = tag_re.search(text[:1000]) is not None + _looks_like_xml_cache[key] = rv + return rv + + +def surrogatepair(c): + """Given a unicode character code with length greater than 16 bits, + return the two 16 bit surrogate pair. + """ + # From example D28 of: + # http://www.unicode.org/book/ch03.pdf + return (0xd7c0 + (c >> 10), (0xdc00 + (c & 0x3ff))) + + +def format_lines(var_name, seq, raw=False, indent_level=0): + """Formats a sequence of strings for output.""" + lines = [] + base_indent = ' ' * indent_level * 4 + inner_indent = ' ' * (indent_level + 1) * 4 + lines.append(base_indent + var_name + ' = (') + if raw: + # These should be preformatted reprs of, say, tuples. + for i in seq: + lines.append(inner_indent + i + ',') + else: + for i in seq: + # Force use of single quotes + r = repr(i + '"') + lines.append(inner_indent + r[:-2] + r[-1] + ',') + lines.append(base_indent + ')') + return '\n'.join(lines) + + +def duplicates_removed(it, already_seen=()): + """ + Returns a list with duplicates removed from the iterable `it`. + + Order is preserved. + """ + lst = [] + seen = set() + for i in it: + if i in seen or i in already_seen: + continue + lst.append(i) + seen.add(i) + return lst + + +class Future: + """Generic class to defer some work. + + Handled specially in RegexLexerMeta, to support regex string construction at + first use. + """ + def get(self): + raise NotImplementedError + + +def guess_decode(text): + """Decode *text* with guessed encoding. + + First try UTF-8; this should fail for non-UTF-8 encodings. + Then try the preferred locale encoding. + Fall back to latin-1, which always works. + """ + try: + text = text.decode('utf-8') + return text, 'utf-8' + except UnicodeDecodeError: + try: + import locale + prefencoding = locale.getpreferredencoding() + text = text.decode() + return text, prefencoding + except (UnicodeDecodeError, LookupError): + text = text.decode('latin1') + return text, 'latin1' + + +def guess_decode_from_terminal(text, term): + """Decode *text* coming from terminal *term*. + + First try the terminal encoding, if given. + Then try UTF-8. Then try the preferred locale encoding. + Fall back to latin-1, which always works. + """ + if getattr(term, 'encoding', None): + try: + text = text.decode(term.encoding) + except UnicodeDecodeError: + pass + else: + return text, term.encoding + return guess_decode(text) + + +def terminal_encoding(term): + """Return our best guess of encoding for the given *term*.""" + if getattr(term, 'encoding', None): + return term.encoding + import locale + return locale.getpreferredencoding() + + +class UnclosingTextIOWrapper(TextIOWrapper): + # Don't close underlying buffer on destruction. + def close(self): + self.flush() diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/pyproject_hooks/__init__.py b/.venv/lib/python3.12/site-packages/pip/_vendor/pyproject_hooks/__init__.py new file mode 100644 index 0000000..746b89f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/pyproject_hooks/__init__.py @@ -0,0 +1,31 @@ +"""Wrappers to call pyproject.toml-based build backend hooks. +""" + +from typing import TYPE_CHECKING + +from ._impl import ( + BackendUnavailable, + BuildBackendHookCaller, + HookMissing, + UnsupportedOperation, + default_subprocess_runner, + quiet_subprocess_runner, +) + +__version__ = "1.2.0" +__all__ = [ + "BackendUnavailable", + "BackendInvalid", + "HookMissing", + "UnsupportedOperation", + "default_subprocess_runner", + "quiet_subprocess_runner", + "BuildBackendHookCaller", +] + +BackendInvalid = BackendUnavailable # Deprecated alias, previously a separate exception + +if TYPE_CHECKING: + from ._impl import SubprocessRunner + + __all__ += ["SubprocessRunner"] diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/pyproject_hooks/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/pyproject_hooks/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..5bab062 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/pyproject_hooks/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/pyproject_hooks/__pycache__/_impl.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/pyproject_hooks/__pycache__/_impl.cpython-312.pyc new file mode 100644 index 0000000..b0eced6 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/pyproject_hooks/__pycache__/_impl.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/pyproject_hooks/_impl.py b/.venv/lib/python3.12/site-packages/pip/_vendor/pyproject_hooks/_impl.py new file mode 100644 index 0000000..d1e9d7b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/pyproject_hooks/_impl.py @@ -0,0 +1,410 @@ +import json +import os +import sys +import tempfile +from contextlib import contextmanager +from os.path import abspath +from os.path import join as pjoin +from subprocess import STDOUT, check_call, check_output +from typing import TYPE_CHECKING, Any, Iterator, Mapping, Optional, Sequence + +from ._in_process import _in_proc_script_path + +if TYPE_CHECKING: + from typing import Protocol + + class SubprocessRunner(Protocol): + """A protocol for the subprocess runner.""" + + def __call__( + self, + cmd: Sequence[str], + cwd: Optional[str] = None, + extra_environ: Optional[Mapping[str, str]] = None, + ) -> None: + ... + + +def write_json(obj: Mapping[str, Any], path: str, **kwargs) -> None: + with open(path, "w", encoding="utf-8") as f: + json.dump(obj, f, **kwargs) + + +def read_json(path: str) -> Mapping[str, Any]: + with open(path, encoding="utf-8") as f: + return json.load(f) + + +class BackendUnavailable(Exception): + """Will be raised if the backend cannot be imported in the hook process.""" + + def __init__( + self, + traceback: str, + message: Optional[str] = None, + backend_name: Optional[str] = None, + backend_path: Optional[Sequence[str]] = None, + ) -> None: + # Preserving arg order for the sake of API backward compatibility. + self.backend_name = backend_name + self.backend_path = backend_path + self.traceback = traceback + super().__init__(message or "Error while importing backend") + + +class HookMissing(Exception): + """Will be raised on missing hooks (if a fallback can't be used).""" + + def __init__(self, hook_name: str) -> None: + super().__init__(hook_name) + self.hook_name = hook_name + + +class UnsupportedOperation(Exception): + """May be raised by build_sdist if the backend indicates that it can't.""" + + def __init__(self, traceback: str) -> None: + self.traceback = traceback + + +def default_subprocess_runner( + cmd: Sequence[str], + cwd: Optional[str] = None, + extra_environ: Optional[Mapping[str, str]] = None, +) -> None: + """The default method of calling the wrapper subprocess. + + This uses :func:`subprocess.check_call` under the hood. + """ + env = os.environ.copy() + if extra_environ: + env.update(extra_environ) + + check_call(cmd, cwd=cwd, env=env) + + +def quiet_subprocess_runner( + cmd: Sequence[str], + cwd: Optional[str] = None, + extra_environ: Optional[Mapping[str, str]] = None, +) -> None: + """Call the subprocess while suppressing output. + + This uses :func:`subprocess.check_output` under the hood. + """ + env = os.environ.copy() + if extra_environ: + env.update(extra_environ) + + check_output(cmd, cwd=cwd, env=env, stderr=STDOUT) + + +def norm_and_check(source_tree: str, requested: str) -> str: + """Normalise and check a backend path. + + Ensure that the requested backend path is specified as a relative path, + and resolves to a location under the given source tree. + + Return an absolute version of the requested path. + """ + if os.path.isabs(requested): + raise ValueError("paths must be relative") + + abs_source = os.path.abspath(source_tree) + abs_requested = os.path.normpath(os.path.join(abs_source, requested)) + # We have to use commonprefix for Python 2.7 compatibility. So we + # normalise case to avoid problems because commonprefix is a character + # based comparison :-( + norm_source = os.path.normcase(abs_source) + norm_requested = os.path.normcase(abs_requested) + if os.path.commonprefix([norm_source, norm_requested]) != norm_source: + raise ValueError("paths must be inside source tree") + + return abs_requested + + +class BuildBackendHookCaller: + """A wrapper to call the build backend hooks for a source directory.""" + + def __init__( + self, + source_dir: str, + build_backend: str, + backend_path: Optional[Sequence[str]] = None, + runner: Optional["SubprocessRunner"] = None, + python_executable: Optional[str] = None, + ) -> None: + """ + :param source_dir: The source directory to invoke the build backend for + :param build_backend: The build backend spec + :param backend_path: Additional path entries for the build backend spec + :param runner: The :ref:`subprocess runner ` to use + :param python_executable: + The Python executable used to invoke the build backend + """ + if runner is None: + runner = default_subprocess_runner + + self.source_dir = abspath(source_dir) + self.build_backend = build_backend + if backend_path: + backend_path = [norm_and_check(self.source_dir, p) for p in backend_path] + self.backend_path = backend_path + self._subprocess_runner = runner + if not python_executable: + python_executable = sys.executable + self.python_executable = python_executable + + @contextmanager + def subprocess_runner(self, runner: "SubprocessRunner") -> Iterator[None]: + """A context manager for temporarily overriding the default + :ref:`subprocess runner `. + + :param runner: The new subprocess runner to use within the context. + + .. code-block:: python + + hook_caller = BuildBackendHookCaller(...) + with hook_caller.subprocess_runner(quiet_subprocess_runner): + ... + """ + prev = self._subprocess_runner + self._subprocess_runner = runner + try: + yield + finally: + self._subprocess_runner = prev + + def _supported_features(self) -> Sequence[str]: + """Return the list of optional features supported by the backend.""" + return self._call_hook("_supported_features", {}) + + def get_requires_for_build_wheel( + self, + config_settings: Optional[Mapping[str, Any]] = None, + ) -> Sequence[str]: + """Get additional dependencies required for building a wheel. + + :param config_settings: The configuration settings for the build backend + :returns: A list of :pep:`dependency specifiers <508>`. + + .. admonition:: Fallback + + If the build backend does not defined a hook with this name, an + empty list will be returned. + """ + return self._call_hook( + "get_requires_for_build_wheel", {"config_settings": config_settings} + ) + + def prepare_metadata_for_build_wheel( + self, + metadata_directory: str, + config_settings: Optional[Mapping[str, Any]] = None, + _allow_fallback: bool = True, + ) -> str: + """Prepare a ``*.dist-info`` folder with metadata for this project. + + :param metadata_directory: The directory to write the metadata to + :param config_settings: The configuration settings for the build backend + :param _allow_fallback: + Whether to allow the fallback to building a wheel and extracting + the metadata from it. Should be passed as a keyword argument only. + + :returns: Name of the newly created subfolder within + ``metadata_directory``, containing the metadata. + + .. admonition:: Fallback + + If the build backend does not define a hook with this name and + ``_allow_fallback`` is truthy, the backend will be asked to build a + wheel via the ``build_wheel`` hook and the dist-info extracted from + that will be returned. + """ + return self._call_hook( + "prepare_metadata_for_build_wheel", + { + "metadata_directory": abspath(metadata_directory), + "config_settings": config_settings, + "_allow_fallback": _allow_fallback, + }, + ) + + def build_wheel( + self, + wheel_directory: str, + config_settings: Optional[Mapping[str, Any]] = None, + metadata_directory: Optional[str] = None, + ) -> str: + """Build a wheel from this project. + + :param wheel_directory: The directory to write the wheel to + :param config_settings: The configuration settings for the build backend + :param metadata_directory: The directory to reuse existing metadata from + :returns: + The name of the newly created wheel within ``wheel_directory``. + + .. admonition:: Interaction with fallback + + If the ``build_wheel`` hook was called in the fallback for + :meth:`prepare_metadata_for_build_wheel`, the build backend would + not be invoked. Instead, the previously built wheel will be copied + to ``wheel_directory`` and the name of that file will be returned. + """ + if metadata_directory is not None: + metadata_directory = abspath(metadata_directory) + return self._call_hook( + "build_wheel", + { + "wheel_directory": abspath(wheel_directory), + "config_settings": config_settings, + "metadata_directory": metadata_directory, + }, + ) + + def get_requires_for_build_editable( + self, + config_settings: Optional[Mapping[str, Any]] = None, + ) -> Sequence[str]: + """Get additional dependencies required for building an editable wheel. + + :param config_settings: The configuration settings for the build backend + :returns: A list of :pep:`dependency specifiers <508>`. + + .. admonition:: Fallback + + If the build backend does not defined a hook with this name, an + empty list will be returned. + """ + return self._call_hook( + "get_requires_for_build_editable", {"config_settings": config_settings} + ) + + def prepare_metadata_for_build_editable( + self, + metadata_directory: str, + config_settings: Optional[Mapping[str, Any]] = None, + _allow_fallback: bool = True, + ) -> Optional[str]: + """Prepare a ``*.dist-info`` folder with metadata for this project. + + :param metadata_directory: The directory to write the metadata to + :param config_settings: The configuration settings for the build backend + :param _allow_fallback: + Whether to allow the fallback to building a wheel and extracting + the metadata from it. Should be passed as a keyword argument only. + :returns: Name of the newly created subfolder within + ``metadata_directory``, containing the metadata. + + .. admonition:: Fallback + + If the build backend does not define a hook with this name and + ``_allow_fallback`` is truthy, the backend will be asked to build a + wheel via the ``build_editable`` hook and the dist-info + extracted from that will be returned. + """ + return self._call_hook( + "prepare_metadata_for_build_editable", + { + "metadata_directory": abspath(metadata_directory), + "config_settings": config_settings, + "_allow_fallback": _allow_fallback, + }, + ) + + def build_editable( + self, + wheel_directory: str, + config_settings: Optional[Mapping[str, Any]] = None, + metadata_directory: Optional[str] = None, + ) -> str: + """Build an editable wheel from this project. + + :param wheel_directory: The directory to write the wheel to + :param config_settings: The configuration settings for the build backend + :param metadata_directory: The directory to reuse existing metadata from + :returns: + The name of the newly created wheel within ``wheel_directory``. + + .. admonition:: Interaction with fallback + + If the ``build_editable`` hook was called in the fallback for + :meth:`prepare_metadata_for_build_editable`, the build backend + would not be invoked. Instead, the previously built wheel will be + copied to ``wheel_directory`` and the name of that file will be + returned. + """ + if metadata_directory is not None: + metadata_directory = abspath(metadata_directory) + return self._call_hook( + "build_editable", + { + "wheel_directory": abspath(wheel_directory), + "config_settings": config_settings, + "metadata_directory": metadata_directory, + }, + ) + + def get_requires_for_build_sdist( + self, + config_settings: Optional[Mapping[str, Any]] = None, + ) -> Sequence[str]: + """Get additional dependencies required for building an sdist. + + :returns: A list of :pep:`dependency specifiers <508>`. + """ + return self._call_hook( + "get_requires_for_build_sdist", {"config_settings": config_settings} + ) + + def build_sdist( + self, + sdist_directory: str, + config_settings: Optional[Mapping[str, Any]] = None, + ) -> str: + """Build an sdist from this project. + + :returns: + The name of the newly created sdist within ``wheel_directory``. + """ + return self._call_hook( + "build_sdist", + { + "sdist_directory": abspath(sdist_directory), + "config_settings": config_settings, + }, + ) + + def _call_hook(self, hook_name: str, kwargs: Mapping[str, Any]) -> Any: + extra_environ = {"_PYPROJECT_HOOKS_BUILD_BACKEND": self.build_backend} + + if self.backend_path: + backend_path = os.pathsep.join(self.backend_path) + extra_environ["_PYPROJECT_HOOKS_BACKEND_PATH"] = backend_path + + with tempfile.TemporaryDirectory() as td: + hook_input = {"kwargs": kwargs} + write_json(hook_input, pjoin(td, "input.json"), indent=2) + + # Run the hook in a subprocess + with _in_proc_script_path() as script: + python = self.python_executable + self._subprocess_runner( + [python, abspath(str(script)), hook_name, td], + cwd=self.source_dir, + extra_environ=extra_environ, + ) + + data = read_json(pjoin(td, "output.json")) + if data.get("unsupported"): + raise UnsupportedOperation(data.get("traceback", "")) + if data.get("no_backend"): + raise BackendUnavailable( + data.get("traceback", ""), + message=data.get("backend_error", ""), + backend_name=self.build_backend, + backend_path=self.backend_path, + ) + if data.get("hook_missing"): + raise HookMissing(data.get("missing_hook_name") or hook_name) + return data["return_val"] diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/pyproject_hooks/_in_process/__init__.py b/.venv/lib/python3.12/site-packages/pip/_vendor/pyproject_hooks/_in_process/__init__.py new file mode 100644 index 0000000..906d0ba --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/pyproject_hooks/_in_process/__init__.py @@ -0,0 +1,21 @@ +"""This is a subpackage because the directory is on sys.path for _in_process.py + +The subpackage should stay as empty as possible to avoid shadowing modules that +the backend might import. +""" + +import importlib.resources as resources + +try: + resources.files +except AttributeError: + # Python 3.8 compatibility + def _in_proc_script_path(): + return resources.path(__package__, "_in_process.py") + +else: + + def _in_proc_script_path(): + return resources.as_file( + resources.files(__package__).joinpath("_in_process.py") + ) diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/pyproject_hooks/_in_process/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/pyproject_hooks/_in_process/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..fde6608 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/pyproject_hooks/_in_process/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/pyproject_hooks/_in_process/__pycache__/_in_process.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/pyproject_hooks/_in_process/__pycache__/_in_process.cpython-312.pyc new file mode 100644 index 0000000..71d013a Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/pyproject_hooks/_in_process/__pycache__/_in_process.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/pyproject_hooks/_in_process/_in_process.py b/.venv/lib/python3.12/site-packages/pip/_vendor/pyproject_hooks/_in_process/_in_process.py new file mode 100644 index 0000000..d689bab --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/pyproject_hooks/_in_process/_in_process.py @@ -0,0 +1,389 @@ +"""This is invoked in a subprocess to call the build backend hooks. + +It expects: +- Command line args: hook_name, control_dir +- Environment variables: + _PYPROJECT_HOOKS_BUILD_BACKEND=entry.point:spec + _PYPROJECT_HOOKS_BACKEND_PATH=paths (separated with os.pathsep) +- control_dir/input.json: + - {"kwargs": {...}} + +Results: +- control_dir/output.json + - {"return_val": ...} +""" +import json +import os +import os.path +import re +import shutil +import sys +import traceback +from glob import glob +from importlib import import_module +from importlib.machinery import PathFinder +from os.path import join as pjoin + +# This file is run as a script, and `import wrappers` is not zip-safe, so we +# include write_json() and read_json() from wrappers.py. + + +def write_json(obj, path, **kwargs): + with open(path, "w", encoding="utf-8") as f: + json.dump(obj, f, **kwargs) + + +def read_json(path): + with open(path, encoding="utf-8") as f: + return json.load(f) + + +class BackendUnavailable(Exception): + """Raised if we cannot import the backend""" + + def __init__(self, message, traceback=None): + super().__init__(message) + self.message = message + self.traceback = traceback + + +class HookMissing(Exception): + """Raised if a hook is missing and we are not executing the fallback""" + + def __init__(self, hook_name=None): + super().__init__(hook_name) + self.hook_name = hook_name + + +def _build_backend(): + """Find and load the build backend""" + backend_path = os.environ.get("_PYPROJECT_HOOKS_BACKEND_PATH") + ep = os.environ["_PYPROJECT_HOOKS_BUILD_BACKEND"] + mod_path, _, obj_path = ep.partition(":") + + if backend_path: + # Ensure in-tree backend directories have the highest priority when importing. + extra_pathitems = backend_path.split(os.pathsep) + sys.meta_path.insert(0, _BackendPathFinder(extra_pathitems, mod_path)) + + try: + obj = import_module(mod_path) + except ImportError: + msg = f"Cannot import {mod_path!r}" + raise BackendUnavailable(msg, traceback.format_exc()) + + if obj_path: + for path_part in obj_path.split("."): + obj = getattr(obj, path_part) + return obj + + +class _BackendPathFinder: + """Implements the MetaPathFinder interface to locate modules in ``backend-path``. + + Since the environment provided by the frontend can contain all sorts of + MetaPathFinders, the only way to ensure the backend is loaded from the + right place is to prepend our own. + """ + + def __init__(self, backend_path, backend_module): + self.backend_path = backend_path + self.backend_module = backend_module + self.backend_parent, _, _ = backend_module.partition(".") + + def find_spec(self, fullname, _path, _target=None): + if "." in fullname: + # Rely on importlib to find nested modules based on parent's path + return None + + # Ignore other items in _path or sys.path and use backend_path instead: + spec = PathFinder.find_spec(fullname, path=self.backend_path) + if spec is None and fullname == self.backend_parent: + # According to the spec, the backend MUST be loaded from backend-path. + # Therefore, we can halt the import machinery and raise a clean error. + msg = f"Cannot find module {self.backend_module!r} in {self.backend_path!r}" + raise BackendUnavailable(msg) + + return spec + + if sys.version_info >= (3, 8): + + def find_distributions(self, context=None): + # Delayed import: Python 3.7 does not contain importlib.metadata + from importlib.metadata import DistributionFinder, MetadataPathFinder + + context = DistributionFinder.Context(path=self.backend_path) + return MetadataPathFinder.find_distributions(context=context) + + +def _supported_features(): + """Return the list of options features supported by the backend. + + Returns a list of strings. + The only possible value is 'build_editable'. + """ + backend = _build_backend() + features = [] + if hasattr(backend, "build_editable"): + features.append("build_editable") + return features + + +def get_requires_for_build_wheel(config_settings): + """Invoke the optional get_requires_for_build_wheel hook + + Returns [] if the hook is not defined. + """ + backend = _build_backend() + try: + hook = backend.get_requires_for_build_wheel + except AttributeError: + return [] + else: + return hook(config_settings) + + +def get_requires_for_build_editable(config_settings): + """Invoke the optional get_requires_for_build_editable hook + + Returns [] if the hook is not defined. + """ + backend = _build_backend() + try: + hook = backend.get_requires_for_build_editable + except AttributeError: + return [] + else: + return hook(config_settings) + + +def prepare_metadata_for_build_wheel( + metadata_directory, config_settings, _allow_fallback +): + """Invoke optional prepare_metadata_for_build_wheel + + Implements a fallback by building a wheel if the hook isn't defined, + unless _allow_fallback is False in which case HookMissing is raised. + """ + backend = _build_backend() + try: + hook = backend.prepare_metadata_for_build_wheel + except AttributeError: + if not _allow_fallback: + raise HookMissing() + else: + return hook(metadata_directory, config_settings) + # fallback to build_wheel outside the try block to avoid exception chaining + # which can be confusing to users and is not relevant + whl_basename = backend.build_wheel(metadata_directory, config_settings) + return _get_wheel_metadata_from_wheel( + whl_basename, metadata_directory, config_settings + ) + + +def prepare_metadata_for_build_editable( + metadata_directory, config_settings, _allow_fallback +): + """Invoke optional prepare_metadata_for_build_editable + + Implements a fallback by building an editable wheel if the hook isn't + defined, unless _allow_fallback is False in which case HookMissing is + raised. + """ + backend = _build_backend() + try: + hook = backend.prepare_metadata_for_build_editable + except AttributeError: + if not _allow_fallback: + raise HookMissing() + try: + build_hook = backend.build_editable + except AttributeError: + raise HookMissing(hook_name="build_editable") + else: + whl_basename = build_hook(metadata_directory, config_settings) + return _get_wheel_metadata_from_wheel( + whl_basename, metadata_directory, config_settings + ) + else: + return hook(metadata_directory, config_settings) + + +WHEEL_BUILT_MARKER = "PYPROJECT_HOOKS_ALREADY_BUILT_WHEEL" + + +def _dist_info_files(whl_zip): + """Identify the .dist-info folder inside a wheel ZipFile.""" + res = [] + for path in whl_zip.namelist(): + m = re.match(r"[^/\\]+-[^/\\]+\.dist-info/", path) + if m: + res.append(path) + if res: + return res + raise Exception("No .dist-info folder found in wheel") + + +def _get_wheel_metadata_from_wheel(whl_basename, metadata_directory, config_settings): + """Extract the metadata from a wheel. + + Fallback for when the build backend does not + define the 'get_wheel_metadata' hook. + """ + from zipfile import ZipFile + + with open(os.path.join(metadata_directory, WHEEL_BUILT_MARKER), "wb"): + pass # Touch marker file + + whl_file = os.path.join(metadata_directory, whl_basename) + with ZipFile(whl_file) as zipf: + dist_info = _dist_info_files(zipf) + zipf.extractall(path=metadata_directory, members=dist_info) + return dist_info[0].split("/")[0] + + +def _find_already_built_wheel(metadata_directory): + """Check for a wheel already built during the get_wheel_metadata hook.""" + if not metadata_directory: + return None + metadata_parent = os.path.dirname(metadata_directory) + if not os.path.isfile(pjoin(metadata_parent, WHEEL_BUILT_MARKER)): + return None + + whl_files = glob(os.path.join(metadata_parent, "*.whl")) + if not whl_files: + print("Found wheel built marker, but no .whl files") + return None + if len(whl_files) > 1: + print( + "Found multiple .whl files; unspecified behaviour. " + "Will call build_wheel." + ) + return None + + # Exactly one .whl file + return whl_files[0] + + +def build_wheel(wheel_directory, config_settings, metadata_directory=None): + """Invoke the mandatory build_wheel hook. + + If a wheel was already built in the + prepare_metadata_for_build_wheel fallback, this + will copy it rather than rebuilding the wheel. + """ + prebuilt_whl = _find_already_built_wheel(metadata_directory) + if prebuilt_whl: + shutil.copy2(prebuilt_whl, wheel_directory) + return os.path.basename(prebuilt_whl) + + return _build_backend().build_wheel( + wheel_directory, config_settings, metadata_directory + ) + + +def build_editable(wheel_directory, config_settings, metadata_directory=None): + """Invoke the optional build_editable hook. + + If a wheel was already built in the + prepare_metadata_for_build_editable fallback, this + will copy it rather than rebuilding the wheel. + """ + backend = _build_backend() + try: + hook = backend.build_editable + except AttributeError: + raise HookMissing() + else: + prebuilt_whl = _find_already_built_wheel(metadata_directory) + if prebuilt_whl: + shutil.copy2(prebuilt_whl, wheel_directory) + return os.path.basename(prebuilt_whl) + + return hook(wheel_directory, config_settings, metadata_directory) + + +def get_requires_for_build_sdist(config_settings): + """Invoke the optional get_requires_for_build_wheel hook + + Returns [] if the hook is not defined. + """ + backend = _build_backend() + try: + hook = backend.get_requires_for_build_sdist + except AttributeError: + return [] + else: + return hook(config_settings) + + +class _DummyException(Exception): + """Nothing should ever raise this exception""" + + +class GotUnsupportedOperation(Exception): + """For internal use when backend raises UnsupportedOperation""" + + def __init__(self, traceback): + self.traceback = traceback + + +def build_sdist(sdist_directory, config_settings): + """Invoke the mandatory build_sdist hook.""" + backend = _build_backend() + try: + return backend.build_sdist(sdist_directory, config_settings) + except getattr(backend, "UnsupportedOperation", _DummyException): + raise GotUnsupportedOperation(traceback.format_exc()) + + +HOOK_NAMES = { + "get_requires_for_build_wheel", + "prepare_metadata_for_build_wheel", + "build_wheel", + "get_requires_for_build_editable", + "prepare_metadata_for_build_editable", + "build_editable", + "get_requires_for_build_sdist", + "build_sdist", + "_supported_features", +} + + +def main(): + if len(sys.argv) < 3: + sys.exit("Needs args: hook_name, control_dir") + hook_name = sys.argv[1] + control_dir = sys.argv[2] + if hook_name not in HOOK_NAMES: + sys.exit("Unknown hook: %s" % hook_name) + + # Remove the parent directory from sys.path to avoid polluting the backend + # import namespace with this directory. + here = os.path.dirname(__file__) + if here in sys.path: + sys.path.remove(here) + + hook = globals()[hook_name] + + hook_input = read_json(pjoin(control_dir, "input.json")) + + json_out = {"unsupported": False, "return_val": None} + try: + json_out["return_val"] = hook(**hook_input["kwargs"]) + except BackendUnavailable as e: + json_out["no_backend"] = True + json_out["traceback"] = e.traceback + json_out["backend_error"] = e.message + except GotUnsupportedOperation as e: + json_out["unsupported"] = True + json_out["traceback"] = e.traceback + except HookMissing as e: + json_out["hook_missing"] = True + json_out["missing_hook_name"] = e.hook_name or hook_name + + write_json(json_out, pjoin(control_dir, "output.json"), indent=2) + + +if __name__ == "__main__": + main() diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/pyproject_hooks/py.typed b/.venv/lib/python3.12/site-packages/pip/_vendor/pyproject_hooks/py.typed new file mode 100644 index 0000000..e69de29 diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/requests/__init__.py b/.venv/lib/python3.12/site-packages/pip/_vendor/requests/__init__.py new file mode 100644 index 0000000..04230fc --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/requests/__init__.py @@ -0,0 +1,179 @@ +# __ +# /__) _ _ _ _ _/ _ +# / ( (- (/ (/ (- _) / _) +# / + +""" +Requests HTTP Library +~~~~~~~~~~~~~~~~~~~~~ + +Requests is an HTTP library, written in Python, for human beings. +Basic GET usage: + + >>> import requests + >>> r = requests.get('https://www.python.org') + >>> r.status_code + 200 + >>> b'Python is a programming language' in r.content + True + +... or POST: + + >>> payload = dict(key1='value1', key2='value2') + >>> r = requests.post('https://httpbin.org/post', data=payload) + >>> print(r.text) + { + ... + "form": { + "key1": "value1", + "key2": "value2" + }, + ... + } + +The other HTTP methods are supported - see `requests.api`. Full documentation +is at . + +:copyright: (c) 2017 by Kenneth Reitz. +:license: Apache 2.0, see LICENSE for more details. +""" + +import warnings + +from pip._vendor import urllib3 + +from .exceptions import RequestsDependencyWarning + +charset_normalizer_version = None +chardet_version = None + + +def check_compatibility(urllib3_version, chardet_version, charset_normalizer_version): + urllib3_version = urllib3_version.split(".") + assert urllib3_version != ["dev"] # Verify urllib3 isn't installed from git. + + # Sometimes, urllib3 only reports its version as 16.1. + if len(urllib3_version) == 2: + urllib3_version.append("0") + + # Check urllib3 for compatibility. + major, minor, patch = urllib3_version # noqa: F811 + major, minor, patch = int(major), int(minor), int(patch) + # urllib3 >= 1.21.1 + assert major >= 1 + if major == 1: + assert minor >= 21 + + # Check charset_normalizer for compatibility. + if chardet_version: + major, minor, patch = chardet_version.split(".")[:3] + major, minor, patch = int(major), int(minor), int(patch) + # chardet_version >= 3.0.2, < 6.0.0 + assert (3, 0, 2) <= (major, minor, patch) < (6, 0, 0) + elif charset_normalizer_version: + major, minor, patch = charset_normalizer_version.split(".")[:3] + major, minor, patch = int(major), int(minor), int(patch) + # charset_normalizer >= 2.0.0 < 4.0.0 + assert (2, 0, 0) <= (major, minor, patch) < (4, 0, 0) + else: + # pip does not need or use character detection + pass + + +def _check_cryptography(cryptography_version): + # cryptography < 1.3.4 + try: + cryptography_version = list(map(int, cryptography_version.split("."))) + except ValueError: + return + + if cryptography_version < [1, 3, 4]: + warning = "Old version of cryptography ({}) may cause slowdown.".format( + cryptography_version + ) + warnings.warn(warning, RequestsDependencyWarning) + + +# Check imported dependencies for compatibility. +try: + check_compatibility( + urllib3.__version__, chardet_version, charset_normalizer_version + ) +except (AssertionError, ValueError): + warnings.warn( + "urllib3 ({}) or chardet ({})/charset_normalizer ({}) doesn't match a supported " + "version!".format( + urllib3.__version__, chardet_version, charset_normalizer_version + ), + RequestsDependencyWarning, + ) + +# Attempt to enable urllib3's fallback for SNI support +# if the standard library doesn't support SNI or the +# 'ssl' library isn't available. +try: + # Note: This logic prevents upgrading cryptography on Windows, if imported + # as part of pip. + from pip._internal.utils.compat import WINDOWS + if not WINDOWS: + raise ImportError("pip internals: don't import cryptography on Windows") + try: + import ssl + except ImportError: + ssl = None + + if not getattr(ssl, "HAS_SNI", False): + from pip._vendor.urllib3.contrib import pyopenssl + + pyopenssl.inject_into_urllib3() + + # Check cryptography version + from cryptography import __version__ as cryptography_version + + _check_cryptography(cryptography_version) +except ImportError: + pass + +# urllib3's DependencyWarnings should be silenced. +from pip._vendor.urllib3.exceptions import DependencyWarning + +warnings.simplefilter("ignore", DependencyWarning) + +# Set default logging handler to avoid "No handler found" warnings. +import logging +from logging import NullHandler + +from . import packages, utils +from .__version__ import ( + __author__, + __author_email__, + __build__, + __cake__, + __copyright__, + __description__, + __license__, + __title__, + __url__, + __version__, +) +from .api import delete, get, head, options, patch, post, put, request +from .exceptions import ( + ConnectionError, + ConnectTimeout, + FileModeWarning, + HTTPError, + JSONDecodeError, + ReadTimeout, + RequestException, + Timeout, + TooManyRedirects, + URLRequired, +) +from .models import PreparedRequest, Request, Response +from .sessions import Session, session +from .status_codes import codes + +logging.getLogger(__name__).addHandler(NullHandler()) + +# FileModeWarnings go off per the default. +warnings.simplefilter("default", FileModeWarning, append=True) diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/requests/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/requests/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..99860da Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/requests/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/requests/__pycache__/__version__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/requests/__pycache__/__version__.cpython-312.pyc new file mode 100644 index 0000000..e2a7327 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/requests/__pycache__/__version__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/requests/__pycache__/_internal_utils.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/requests/__pycache__/_internal_utils.cpython-312.pyc new file mode 100644 index 0000000..2e8cf37 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/requests/__pycache__/_internal_utils.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/requests/__pycache__/adapters.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/requests/__pycache__/adapters.cpython-312.pyc new file mode 100644 index 0000000..f4270c9 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/requests/__pycache__/adapters.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/requests/__pycache__/api.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/requests/__pycache__/api.cpython-312.pyc new file mode 100644 index 0000000..01a8fda Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/requests/__pycache__/api.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/requests/__pycache__/auth.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/requests/__pycache__/auth.cpython-312.pyc new file mode 100644 index 0000000..acb0737 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/requests/__pycache__/auth.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/requests/__pycache__/certs.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/requests/__pycache__/certs.cpython-312.pyc new file mode 100644 index 0000000..3757903 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/requests/__pycache__/certs.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/requests/__pycache__/compat.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/requests/__pycache__/compat.cpython-312.pyc new file mode 100644 index 0000000..a65125e Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/requests/__pycache__/compat.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/requests/__pycache__/cookies.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/requests/__pycache__/cookies.cpython-312.pyc new file mode 100644 index 0000000..9566ed7 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/requests/__pycache__/cookies.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/requests/__pycache__/exceptions.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/requests/__pycache__/exceptions.cpython-312.pyc new file mode 100644 index 0000000..d7046a9 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/requests/__pycache__/exceptions.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/requests/__pycache__/help.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/requests/__pycache__/help.cpython-312.pyc new file mode 100644 index 0000000..37524ce Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/requests/__pycache__/help.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/requests/__pycache__/hooks.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/requests/__pycache__/hooks.cpython-312.pyc new file mode 100644 index 0000000..4d5a4a0 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/requests/__pycache__/hooks.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/requests/__pycache__/models.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/requests/__pycache__/models.cpython-312.pyc new file mode 100644 index 0000000..d730224 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/requests/__pycache__/models.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/requests/__pycache__/packages.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/requests/__pycache__/packages.cpython-312.pyc new file mode 100644 index 0000000..ecc930b Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/requests/__pycache__/packages.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/requests/__pycache__/sessions.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/requests/__pycache__/sessions.cpython-312.pyc new file mode 100644 index 0000000..7f496f4 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/requests/__pycache__/sessions.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/requests/__pycache__/status_codes.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/requests/__pycache__/status_codes.cpython-312.pyc new file mode 100644 index 0000000..ec86e6d Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/requests/__pycache__/status_codes.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/requests/__pycache__/structures.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/requests/__pycache__/structures.cpython-312.pyc new file mode 100644 index 0000000..1505436 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/requests/__pycache__/structures.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/requests/__pycache__/utils.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/requests/__pycache__/utils.cpython-312.pyc new file mode 100644 index 0000000..4fd6431 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/requests/__pycache__/utils.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/requests/__version__.py b/.venv/lib/python3.12/site-packages/pip/_vendor/requests/__version__.py new file mode 100644 index 0000000..3128a46 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/requests/__version__.py @@ -0,0 +1,14 @@ +# .-. .-. .-. . . .-. .-. .-. .-. +# |( |- |.| | | |- `-. | `-. +# ' ' `-' `-`.`-' `-' `-' ' `-' + +__title__ = "requests" +__description__ = "Python HTTP for Humans." +__url__ = "https://requests.readthedocs.io" +__version__ = "2.32.4" +__build__ = 0x023204 +__author__ = "Kenneth Reitz" +__author_email__ = "me@kennethreitz.org" +__license__ = "Apache-2.0" +__copyright__ = "Copyright Kenneth Reitz" +__cake__ = "\u2728 \U0001f370 \u2728" diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/requests/_internal_utils.py b/.venv/lib/python3.12/site-packages/pip/_vendor/requests/_internal_utils.py new file mode 100644 index 0000000..f2cf635 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/requests/_internal_utils.py @@ -0,0 +1,50 @@ +""" +requests._internal_utils +~~~~~~~~~~~~~~ + +Provides utility functions that are consumed internally by Requests +which depend on extremely few external helpers (such as compat) +""" +import re + +from .compat import builtin_str + +_VALID_HEADER_NAME_RE_BYTE = re.compile(rb"^[^:\s][^:\r\n]*$") +_VALID_HEADER_NAME_RE_STR = re.compile(r"^[^:\s][^:\r\n]*$") +_VALID_HEADER_VALUE_RE_BYTE = re.compile(rb"^\S[^\r\n]*$|^$") +_VALID_HEADER_VALUE_RE_STR = re.compile(r"^\S[^\r\n]*$|^$") + +_HEADER_VALIDATORS_STR = (_VALID_HEADER_NAME_RE_STR, _VALID_HEADER_VALUE_RE_STR) +_HEADER_VALIDATORS_BYTE = (_VALID_HEADER_NAME_RE_BYTE, _VALID_HEADER_VALUE_RE_BYTE) +HEADER_VALIDATORS = { + bytes: _HEADER_VALIDATORS_BYTE, + str: _HEADER_VALIDATORS_STR, +} + + +def to_native_string(string, encoding="ascii"): + """Given a string object, regardless of type, returns a representation of + that string in the native string type, encoding and decoding where + necessary. This assumes ASCII unless told otherwise. + """ + if isinstance(string, builtin_str): + out = string + else: + out = string.decode(encoding) + + return out + + +def unicode_is_ascii(u_string): + """Determine if unicode string only contains ASCII characters. + + :param str u_string: unicode string to check. Must be unicode + and not Python 2 `str`. + :rtype: bool + """ + assert isinstance(u_string, str) + try: + u_string.encode("ascii") + return True + except UnicodeEncodeError: + return False diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/requests/adapters.py b/.venv/lib/python3.12/site-packages/pip/_vendor/requests/adapters.py new file mode 100644 index 0000000..7030777 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/requests/adapters.py @@ -0,0 +1,719 @@ +""" +requests.adapters +~~~~~~~~~~~~~~~~~ + +This module contains the transport adapters that Requests uses to define +and maintain connections. +""" + +import os.path +import socket # noqa: F401 +import typing +import warnings + +from pip._vendor.urllib3.exceptions import ClosedPoolError, ConnectTimeoutError +from pip._vendor.urllib3.exceptions import HTTPError as _HTTPError +from pip._vendor.urllib3.exceptions import InvalidHeader as _InvalidHeader +from pip._vendor.urllib3.exceptions import ( + LocationValueError, + MaxRetryError, + NewConnectionError, + ProtocolError, +) +from pip._vendor.urllib3.exceptions import ProxyError as _ProxyError +from pip._vendor.urllib3.exceptions import ReadTimeoutError, ResponseError +from pip._vendor.urllib3.exceptions import SSLError as _SSLError +from pip._vendor.urllib3.poolmanager import PoolManager, proxy_from_url +from pip._vendor.urllib3.util import Timeout as TimeoutSauce +from pip._vendor.urllib3.util import parse_url +from pip._vendor.urllib3.util.retry import Retry +from pip._vendor.urllib3.util.ssl_ import create_urllib3_context + +from .auth import _basic_auth_str +from .compat import basestring, urlparse +from .cookies import extract_cookies_to_jar +from .exceptions import ( + ConnectionError, + ConnectTimeout, + InvalidHeader, + InvalidProxyURL, + InvalidSchema, + InvalidURL, + ProxyError, + ReadTimeout, + RetryError, + SSLError, +) +from .models import Response +from .structures import CaseInsensitiveDict +from .utils import ( + DEFAULT_CA_BUNDLE_PATH, + extract_zipped_paths, + get_auth_from_url, + get_encoding_from_headers, + prepend_scheme_if_needed, + select_proxy, + urldefragauth, +) + +try: + from pip._vendor.urllib3.contrib.socks import SOCKSProxyManager +except ImportError: + + def SOCKSProxyManager(*args, **kwargs): + raise InvalidSchema("Missing dependencies for SOCKS support.") + + +if typing.TYPE_CHECKING: + from .models import PreparedRequest + + +DEFAULT_POOLBLOCK = False +DEFAULT_POOLSIZE = 10 +DEFAULT_RETRIES = 0 +DEFAULT_POOL_TIMEOUT = None + + +try: + import ssl # noqa: F401 + + _preloaded_ssl_context = create_urllib3_context() + _preloaded_ssl_context.load_verify_locations( + extract_zipped_paths(DEFAULT_CA_BUNDLE_PATH) + ) +except ImportError: + # Bypass default SSLContext creation when Python + # interpreter isn't built with the ssl module. + _preloaded_ssl_context = None + + +def _urllib3_request_context( + request: "PreparedRequest", + verify: "bool | str | None", + client_cert: "typing.Tuple[str, str] | str | None", + poolmanager: "PoolManager", +) -> "(typing.Dict[str, typing.Any], typing.Dict[str, typing.Any])": + host_params = {} + pool_kwargs = {} + parsed_request_url = urlparse(request.url) + scheme = parsed_request_url.scheme.lower() + port = parsed_request_url.port + + # Determine if we have and should use our default SSLContext + # to optimize performance on standard requests. + poolmanager_kwargs = getattr(poolmanager, "connection_pool_kw", {}) + has_poolmanager_ssl_context = poolmanager_kwargs.get("ssl_context") + should_use_default_ssl_context = ( + _preloaded_ssl_context is not None and not has_poolmanager_ssl_context + ) + + cert_reqs = "CERT_REQUIRED" + if verify is False: + cert_reqs = "CERT_NONE" + elif verify is True and should_use_default_ssl_context: + pool_kwargs["ssl_context"] = _preloaded_ssl_context + elif isinstance(verify, str): + if not os.path.isdir(verify): + pool_kwargs["ca_certs"] = verify + else: + pool_kwargs["ca_cert_dir"] = verify + pool_kwargs["cert_reqs"] = cert_reqs + if client_cert is not None: + if isinstance(client_cert, tuple) and len(client_cert) == 2: + pool_kwargs["cert_file"] = client_cert[0] + pool_kwargs["key_file"] = client_cert[1] + else: + # According to our docs, we allow users to specify just the client + # cert path + pool_kwargs["cert_file"] = client_cert + host_params = { + "scheme": scheme, + "host": parsed_request_url.hostname, + "port": port, + } + return host_params, pool_kwargs + + +class BaseAdapter: + """The Base Transport Adapter""" + + def __init__(self): + super().__init__() + + def send( + self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None + ): + """Sends PreparedRequest object. Returns Response object. + + :param request: The :class:`PreparedRequest ` being sent. + :param stream: (optional) Whether to stream the request content. + :param timeout: (optional) How long to wait for the server to send + data before giving up, as a float, or a :ref:`(connect timeout, + read timeout) ` tuple. + :type timeout: float or tuple + :param verify: (optional) Either a boolean, in which case it controls whether we verify + the server's TLS certificate, or a string, in which case it must be a path + to a CA bundle to use + :param cert: (optional) Any user-provided SSL certificate to be trusted. + :param proxies: (optional) The proxies dictionary to apply to the request. + """ + raise NotImplementedError + + def close(self): + """Cleans up adapter specific items.""" + raise NotImplementedError + + +class HTTPAdapter(BaseAdapter): + """The built-in HTTP Adapter for urllib3. + + Provides a general-case interface for Requests sessions to contact HTTP and + HTTPS urls by implementing the Transport Adapter interface. This class will + usually be created by the :class:`Session ` class under the + covers. + + :param pool_connections: The number of urllib3 connection pools to cache. + :param pool_maxsize: The maximum number of connections to save in the pool. + :param max_retries: The maximum number of retries each connection + should attempt. Note, this applies only to failed DNS lookups, socket + connections and connection timeouts, never to requests where data has + made it to the server. By default, Requests does not retry failed + connections. If you need granular control over the conditions under + which we retry a request, import urllib3's ``Retry`` class and pass + that instead. + :param pool_block: Whether the connection pool should block for connections. + + Usage:: + + >>> import requests + >>> s = requests.Session() + >>> a = requests.adapters.HTTPAdapter(max_retries=3) + >>> s.mount('http://', a) + """ + + __attrs__ = [ + "max_retries", + "config", + "_pool_connections", + "_pool_maxsize", + "_pool_block", + ] + + def __init__( + self, + pool_connections=DEFAULT_POOLSIZE, + pool_maxsize=DEFAULT_POOLSIZE, + max_retries=DEFAULT_RETRIES, + pool_block=DEFAULT_POOLBLOCK, + ): + if max_retries == DEFAULT_RETRIES: + self.max_retries = Retry(0, read=False) + else: + self.max_retries = Retry.from_int(max_retries) + self.config = {} + self.proxy_manager = {} + + super().__init__() + + self._pool_connections = pool_connections + self._pool_maxsize = pool_maxsize + self._pool_block = pool_block + + self.init_poolmanager(pool_connections, pool_maxsize, block=pool_block) + + def __getstate__(self): + return {attr: getattr(self, attr, None) for attr in self.__attrs__} + + def __setstate__(self, state): + # Can't handle by adding 'proxy_manager' to self.__attrs__ because + # self.poolmanager uses a lambda function, which isn't pickleable. + self.proxy_manager = {} + self.config = {} + + for attr, value in state.items(): + setattr(self, attr, value) + + self.init_poolmanager( + self._pool_connections, self._pool_maxsize, block=self._pool_block + ) + + def init_poolmanager( + self, connections, maxsize, block=DEFAULT_POOLBLOCK, **pool_kwargs + ): + """Initializes a urllib3 PoolManager. + + This method should not be called from user code, and is only + exposed for use when subclassing the + :class:`HTTPAdapter `. + + :param connections: The number of urllib3 connection pools to cache. + :param maxsize: The maximum number of connections to save in the pool. + :param block: Block when no free connections are available. + :param pool_kwargs: Extra keyword arguments used to initialize the Pool Manager. + """ + # save these values for pickling + self._pool_connections = connections + self._pool_maxsize = maxsize + self._pool_block = block + + self.poolmanager = PoolManager( + num_pools=connections, + maxsize=maxsize, + block=block, + **pool_kwargs, + ) + + def proxy_manager_for(self, proxy, **proxy_kwargs): + """Return urllib3 ProxyManager for the given proxy. + + This method should not be called from user code, and is only + exposed for use when subclassing the + :class:`HTTPAdapter `. + + :param proxy: The proxy to return a urllib3 ProxyManager for. + :param proxy_kwargs: Extra keyword arguments used to configure the Proxy Manager. + :returns: ProxyManager + :rtype: urllib3.ProxyManager + """ + if proxy in self.proxy_manager: + manager = self.proxy_manager[proxy] + elif proxy.lower().startswith("socks"): + username, password = get_auth_from_url(proxy) + manager = self.proxy_manager[proxy] = SOCKSProxyManager( + proxy, + username=username, + password=password, + num_pools=self._pool_connections, + maxsize=self._pool_maxsize, + block=self._pool_block, + **proxy_kwargs, + ) + else: + proxy_headers = self.proxy_headers(proxy) + manager = self.proxy_manager[proxy] = proxy_from_url( + proxy, + proxy_headers=proxy_headers, + num_pools=self._pool_connections, + maxsize=self._pool_maxsize, + block=self._pool_block, + **proxy_kwargs, + ) + + return manager + + def cert_verify(self, conn, url, verify, cert): + """Verify a SSL certificate. This method should not be called from user + code, and is only exposed for use when subclassing the + :class:`HTTPAdapter `. + + :param conn: The urllib3 connection object associated with the cert. + :param url: The requested URL. + :param verify: Either a boolean, in which case it controls whether we verify + the server's TLS certificate, or a string, in which case it must be a path + to a CA bundle to use + :param cert: The SSL certificate to verify. + """ + if url.lower().startswith("https") and verify: + conn.cert_reqs = "CERT_REQUIRED" + + # Only load the CA certificates if 'verify' is a string indicating the CA bundle to use. + # Otherwise, if verify is a boolean, we don't load anything since + # the connection will be using a context with the default certificates already loaded, + # and this avoids a call to the slow load_verify_locations() + if verify is not True: + # `verify` must be a str with a path then + cert_loc = verify + + if not os.path.exists(cert_loc): + raise OSError( + f"Could not find a suitable TLS CA certificate bundle, " + f"invalid path: {cert_loc}" + ) + + if not os.path.isdir(cert_loc): + conn.ca_certs = cert_loc + else: + conn.ca_cert_dir = cert_loc + else: + conn.cert_reqs = "CERT_NONE" + conn.ca_certs = None + conn.ca_cert_dir = None + + if cert: + if not isinstance(cert, basestring): + conn.cert_file = cert[0] + conn.key_file = cert[1] + else: + conn.cert_file = cert + conn.key_file = None + if conn.cert_file and not os.path.exists(conn.cert_file): + raise OSError( + f"Could not find the TLS certificate file, " + f"invalid path: {conn.cert_file}" + ) + if conn.key_file and not os.path.exists(conn.key_file): + raise OSError( + f"Could not find the TLS key file, invalid path: {conn.key_file}" + ) + + def build_response(self, req, resp): + """Builds a :class:`Response ` object from a urllib3 + response. This should not be called from user code, and is only exposed + for use when subclassing the + :class:`HTTPAdapter ` + + :param req: The :class:`PreparedRequest ` used to generate the response. + :param resp: The urllib3 response object. + :rtype: requests.Response + """ + response = Response() + + # Fallback to None if there's no status_code, for whatever reason. + response.status_code = getattr(resp, "status", None) + + # Make headers case-insensitive. + response.headers = CaseInsensitiveDict(getattr(resp, "headers", {})) + + # Set encoding. + response.encoding = get_encoding_from_headers(response.headers) + response.raw = resp + response.reason = response.raw.reason + + if isinstance(req.url, bytes): + response.url = req.url.decode("utf-8") + else: + response.url = req.url + + # Add new cookies from the server. + extract_cookies_to_jar(response.cookies, req, resp) + + # Give the Response some context. + response.request = req + response.connection = self + + return response + + def build_connection_pool_key_attributes(self, request, verify, cert=None): + """Build the PoolKey attributes used by urllib3 to return a connection. + + This looks at the PreparedRequest, the user-specified verify value, + and the value of the cert parameter to determine what PoolKey values + to use to select a connection from a given urllib3 Connection Pool. + + The SSL related pool key arguments are not consistently set. As of + this writing, use the following to determine what keys may be in that + dictionary: + + * If ``verify`` is ``True``, ``"ssl_context"`` will be set and will be the + default Requests SSL Context + * If ``verify`` is ``False``, ``"ssl_context"`` will not be set but + ``"cert_reqs"`` will be set + * If ``verify`` is a string, (i.e., it is a user-specified trust bundle) + ``"ca_certs"`` will be set if the string is not a directory recognized + by :py:func:`os.path.isdir`, otherwise ``"ca_certs_dir"`` will be + set. + * If ``"cert"`` is specified, ``"cert_file"`` will always be set. If + ``"cert"`` is a tuple with a second item, ``"key_file"`` will also + be present + + To override these settings, one may subclass this class, call this + method and use the above logic to change parameters as desired. For + example, if one wishes to use a custom :py:class:`ssl.SSLContext` one + must both set ``"ssl_context"`` and based on what else they require, + alter the other keys to ensure the desired behaviour. + + :param request: + The PreparedReqest being sent over the connection. + :type request: + :class:`~requests.models.PreparedRequest` + :param verify: + Either a boolean, in which case it controls whether + we verify the server's TLS certificate, or a string, in which case it + must be a path to a CA bundle to use. + :param cert: + (optional) Any user-provided SSL certificate for client + authentication (a.k.a., mTLS). This may be a string (i.e., just + the path to a file which holds both certificate and key) or a + tuple of length 2 with the certificate file path and key file + path. + :returns: + A tuple of two dictionaries. The first is the "host parameters" + portion of the Pool Key including scheme, hostname, and port. The + second is a dictionary of SSLContext related parameters. + """ + return _urllib3_request_context(request, verify, cert, self.poolmanager) + + def get_connection_with_tls_context(self, request, verify, proxies=None, cert=None): + """Returns a urllib3 connection for the given request and TLS settings. + This should not be called from user code, and is only exposed for use + when subclassing the :class:`HTTPAdapter `. + + :param request: + The :class:`PreparedRequest ` object to be sent + over the connection. + :param verify: + Either a boolean, in which case it controls whether we verify the + server's TLS certificate, or a string, in which case it must be a + path to a CA bundle to use. + :param proxies: + (optional) The proxies dictionary to apply to the request. + :param cert: + (optional) Any user-provided SSL certificate to be used for client + authentication (a.k.a., mTLS). + :rtype: + urllib3.ConnectionPool + """ + proxy = select_proxy(request.url, proxies) + try: + host_params, pool_kwargs = self.build_connection_pool_key_attributes( + request, + verify, + cert, + ) + except ValueError as e: + raise InvalidURL(e, request=request) + if proxy: + proxy = prepend_scheme_if_needed(proxy, "http") + proxy_url = parse_url(proxy) + if not proxy_url.host: + raise InvalidProxyURL( + "Please check proxy URL. It is malformed " + "and could be missing the host." + ) + proxy_manager = self.proxy_manager_for(proxy) + conn = proxy_manager.connection_from_host( + **host_params, pool_kwargs=pool_kwargs + ) + else: + # Only scheme should be lower case + conn = self.poolmanager.connection_from_host( + **host_params, pool_kwargs=pool_kwargs + ) + + return conn + + def get_connection(self, url, proxies=None): + """DEPRECATED: Users should move to `get_connection_with_tls_context` + for all subclasses of HTTPAdapter using Requests>=2.32.2. + + Returns a urllib3 connection for the given URL. This should not be + called from user code, and is only exposed for use when subclassing the + :class:`HTTPAdapter `. + + :param url: The URL to connect to. + :param proxies: (optional) A Requests-style dictionary of proxies used on this request. + :rtype: urllib3.ConnectionPool + """ + warnings.warn( + ( + "`get_connection` has been deprecated in favor of " + "`get_connection_with_tls_context`. Custom HTTPAdapter subclasses " + "will need to migrate for Requests>=2.32.2. Please see " + "https://github.com/psf/requests/pull/6710 for more details." + ), + DeprecationWarning, + ) + proxy = select_proxy(url, proxies) + + if proxy: + proxy = prepend_scheme_if_needed(proxy, "http") + proxy_url = parse_url(proxy) + if not proxy_url.host: + raise InvalidProxyURL( + "Please check proxy URL. It is malformed " + "and could be missing the host." + ) + proxy_manager = self.proxy_manager_for(proxy) + conn = proxy_manager.connection_from_url(url) + else: + # Only scheme should be lower case + parsed = urlparse(url) + url = parsed.geturl() + conn = self.poolmanager.connection_from_url(url) + + return conn + + def close(self): + """Disposes of any internal state. + + Currently, this closes the PoolManager and any active ProxyManager, + which closes any pooled connections. + """ + self.poolmanager.clear() + for proxy in self.proxy_manager.values(): + proxy.clear() + + def request_url(self, request, proxies): + """Obtain the url to use when making the final request. + + If the message is being sent through a HTTP proxy, the full URL has to + be used. Otherwise, we should only use the path portion of the URL. + + This should not be called from user code, and is only exposed for use + when subclassing the + :class:`HTTPAdapter `. + + :param request: The :class:`PreparedRequest ` being sent. + :param proxies: A dictionary of schemes or schemes and hosts to proxy URLs. + :rtype: str + """ + proxy = select_proxy(request.url, proxies) + scheme = urlparse(request.url).scheme + + is_proxied_http_request = proxy and scheme != "https" + using_socks_proxy = False + if proxy: + proxy_scheme = urlparse(proxy).scheme.lower() + using_socks_proxy = proxy_scheme.startswith("socks") + + url = request.path_url + if url.startswith("//"): # Don't confuse urllib3 + url = f"/{url.lstrip('/')}" + + if is_proxied_http_request and not using_socks_proxy: + url = urldefragauth(request.url) + + return url + + def add_headers(self, request, **kwargs): + """Add any headers needed by the connection. As of v2.0 this does + nothing by default, but is left for overriding by users that subclass + the :class:`HTTPAdapter `. + + This should not be called from user code, and is only exposed for use + when subclassing the + :class:`HTTPAdapter `. + + :param request: The :class:`PreparedRequest ` to add headers to. + :param kwargs: The keyword arguments from the call to send(). + """ + pass + + def proxy_headers(self, proxy): + """Returns a dictionary of the headers to add to any request sent + through a proxy. This works with urllib3 magic to ensure that they are + correctly sent to the proxy, rather than in a tunnelled request if + CONNECT is being used. + + This should not be called from user code, and is only exposed for use + when subclassing the + :class:`HTTPAdapter `. + + :param proxy: The url of the proxy being used for this request. + :rtype: dict + """ + headers = {} + username, password = get_auth_from_url(proxy) + + if username: + headers["Proxy-Authorization"] = _basic_auth_str(username, password) + + return headers + + def send( + self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None + ): + """Sends PreparedRequest object. Returns Response object. + + :param request: The :class:`PreparedRequest ` being sent. + :param stream: (optional) Whether to stream the request content. + :param timeout: (optional) How long to wait for the server to send + data before giving up, as a float, or a :ref:`(connect timeout, + read timeout) ` tuple. + :type timeout: float or tuple or urllib3 Timeout object + :param verify: (optional) Either a boolean, in which case it controls whether + we verify the server's TLS certificate, or a string, in which case it + must be a path to a CA bundle to use + :param cert: (optional) Any user-provided SSL certificate to be trusted. + :param proxies: (optional) The proxies dictionary to apply to the request. + :rtype: requests.Response + """ + + try: + conn = self.get_connection_with_tls_context( + request, verify, proxies=proxies, cert=cert + ) + except LocationValueError as e: + raise InvalidURL(e, request=request) + + self.cert_verify(conn, request.url, verify, cert) + url = self.request_url(request, proxies) + self.add_headers( + request, + stream=stream, + timeout=timeout, + verify=verify, + cert=cert, + proxies=proxies, + ) + + chunked = not (request.body is None or "Content-Length" in request.headers) + + if isinstance(timeout, tuple): + try: + connect, read = timeout + timeout = TimeoutSauce(connect=connect, read=read) + except ValueError: + raise ValueError( + f"Invalid timeout {timeout}. Pass a (connect, read) timeout tuple, " + f"or a single float to set both timeouts to the same value." + ) + elif isinstance(timeout, TimeoutSauce): + pass + else: + timeout = TimeoutSauce(connect=timeout, read=timeout) + + try: + resp = conn.urlopen( + method=request.method, + url=url, + body=request.body, + headers=request.headers, + redirect=False, + assert_same_host=False, + preload_content=False, + decode_content=False, + retries=self.max_retries, + timeout=timeout, + chunked=chunked, + ) + + except (ProtocolError, OSError) as err: + raise ConnectionError(err, request=request) + + except MaxRetryError as e: + if isinstance(e.reason, ConnectTimeoutError): + # TODO: Remove this in 3.0.0: see #2811 + if not isinstance(e.reason, NewConnectionError): + raise ConnectTimeout(e, request=request) + + if isinstance(e.reason, ResponseError): + raise RetryError(e, request=request) + + if isinstance(e.reason, _ProxyError): + raise ProxyError(e, request=request) + + if isinstance(e.reason, _SSLError): + # This branch is for urllib3 v1.22 and later. + raise SSLError(e, request=request) + + raise ConnectionError(e, request=request) + + except ClosedPoolError as e: + raise ConnectionError(e, request=request) + + except _ProxyError as e: + raise ProxyError(e) + + except (_SSLError, _HTTPError) as e: + if isinstance(e, _SSLError): + # This branch is for urllib3 versions earlier than v1.22 + raise SSLError(e, request=request) + elif isinstance(e, ReadTimeoutError): + raise ReadTimeout(e, request=request) + elif isinstance(e, _InvalidHeader): + raise InvalidHeader(e, request=request) + else: + raise + + return self.build_response(request, resp) diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/requests/api.py b/.venv/lib/python3.12/site-packages/pip/_vendor/requests/api.py new file mode 100644 index 0000000..5960744 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/requests/api.py @@ -0,0 +1,157 @@ +""" +requests.api +~~~~~~~~~~~~ + +This module implements the Requests API. + +:copyright: (c) 2012 by Kenneth Reitz. +:license: Apache2, see LICENSE for more details. +""" + +from . import sessions + + +def request(method, url, **kwargs): + """Constructs and sends a :class:`Request `. + + :param method: method for the new :class:`Request` object: ``GET``, ``OPTIONS``, ``HEAD``, ``POST``, ``PUT``, ``PATCH``, or ``DELETE``. + :param url: URL for the new :class:`Request` object. + :param params: (optional) Dictionary, list of tuples or bytes to send + in the query string for the :class:`Request`. + :param data: (optional) Dictionary, list of tuples, bytes, or file-like + object to send in the body of the :class:`Request`. + :param json: (optional) A JSON serializable Python object to send in the body of the :class:`Request`. + :param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`. + :param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`. + :param files: (optional) Dictionary of ``'name': file-like-objects`` (or ``{'name': file-tuple}``) for multipart encoding upload. + ``file-tuple`` can be a 2-tuple ``('filename', fileobj)``, 3-tuple ``('filename', fileobj, 'content_type')`` + or a 4-tuple ``('filename', fileobj, 'content_type', custom_headers)``, where ``'content_type'`` is a string + defining the content type of the given file and ``custom_headers`` a dict-like object containing additional headers + to add for the file. + :param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth. + :param timeout: (optional) How many seconds to wait for the server to send data + before giving up, as a float, or a :ref:`(connect timeout, read + timeout) ` tuple. + :type timeout: float or tuple + :param allow_redirects: (optional) Boolean. Enable/disable GET/OPTIONS/POST/PUT/PATCH/DELETE/HEAD redirection. Defaults to ``True``. + :type allow_redirects: bool + :param proxies: (optional) Dictionary mapping protocol to the URL of the proxy. + :param verify: (optional) Either a boolean, in which case it controls whether we verify + the server's TLS certificate, or a string, in which case it must be a path + to a CA bundle to use. Defaults to ``True``. + :param stream: (optional) if ``False``, the response content will be immediately downloaded. + :param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair. + :return: :class:`Response ` object + :rtype: requests.Response + + Usage:: + + >>> import requests + >>> req = requests.request('GET', 'https://httpbin.org/get') + >>> req + + """ + + # By using the 'with' statement we are sure the session is closed, thus we + # avoid leaving sockets open which can trigger a ResourceWarning in some + # cases, and look like a memory leak in others. + with sessions.Session() as session: + return session.request(method=method, url=url, **kwargs) + + +def get(url, params=None, **kwargs): + r"""Sends a GET request. + + :param url: URL for the new :class:`Request` object. + :param params: (optional) Dictionary, list of tuples or bytes to send + in the query string for the :class:`Request`. + :param \*\*kwargs: Optional arguments that ``request`` takes. + :return: :class:`Response ` object + :rtype: requests.Response + """ + + return request("get", url, params=params, **kwargs) + + +def options(url, **kwargs): + r"""Sends an OPTIONS request. + + :param url: URL for the new :class:`Request` object. + :param \*\*kwargs: Optional arguments that ``request`` takes. + :return: :class:`Response ` object + :rtype: requests.Response + """ + + return request("options", url, **kwargs) + + +def head(url, **kwargs): + r"""Sends a HEAD request. + + :param url: URL for the new :class:`Request` object. + :param \*\*kwargs: Optional arguments that ``request`` takes. If + `allow_redirects` is not provided, it will be set to `False` (as + opposed to the default :meth:`request` behavior). + :return: :class:`Response ` object + :rtype: requests.Response + """ + + kwargs.setdefault("allow_redirects", False) + return request("head", url, **kwargs) + + +def post(url, data=None, json=None, **kwargs): + r"""Sends a POST request. + + :param url: URL for the new :class:`Request` object. + :param data: (optional) Dictionary, list of tuples, bytes, or file-like + object to send in the body of the :class:`Request`. + :param json: (optional) A JSON serializable Python object to send in the body of the :class:`Request`. + :param \*\*kwargs: Optional arguments that ``request`` takes. + :return: :class:`Response ` object + :rtype: requests.Response + """ + + return request("post", url, data=data, json=json, **kwargs) + + +def put(url, data=None, **kwargs): + r"""Sends a PUT request. + + :param url: URL for the new :class:`Request` object. + :param data: (optional) Dictionary, list of tuples, bytes, or file-like + object to send in the body of the :class:`Request`. + :param json: (optional) A JSON serializable Python object to send in the body of the :class:`Request`. + :param \*\*kwargs: Optional arguments that ``request`` takes. + :return: :class:`Response ` object + :rtype: requests.Response + """ + + return request("put", url, data=data, **kwargs) + + +def patch(url, data=None, **kwargs): + r"""Sends a PATCH request. + + :param url: URL for the new :class:`Request` object. + :param data: (optional) Dictionary, list of tuples, bytes, or file-like + object to send in the body of the :class:`Request`. + :param json: (optional) A JSON serializable Python object to send in the body of the :class:`Request`. + :param \*\*kwargs: Optional arguments that ``request`` takes. + :return: :class:`Response ` object + :rtype: requests.Response + """ + + return request("patch", url, data=data, **kwargs) + + +def delete(url, **kwargs): + r"""Sends a DELETE request. + + :param url: URL for the new :class:`Request` object. + :param \*\*kwargs: Optional arguments that ``request`` takes. + :return: :class:`Response ` object + :rtype: requests.Response + """ + + return request("delete", url, **kwargs) diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/requests/auth.py b/.venv/lib/python3.12/site-packages/pip/_vendor/requests/auth.py new file mode 100644 index 0000000..4a7ce6d --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/requests/auth.py @@ -0,0 +1,314 @@ +""" +requests.auth +~~~~~~~~~~~~~ + +This module contains the authentication handlers for Requests. +""" + +import hashlib +import os +import re +import threading +import time +import warnings +from base64 import b64encode + +from ._internal_utils import to_native_string +from .compat import basestring, str, urlparse +from .cookies import extract_cookies_to_jar +from .utils import parse_dict_header + +CONTENT_TYPE_FORM_URLENCODED = "application/x-www-form-urlencoded" +CONTENT_TYPE_MULTI_PART = "multipart/form-data" + + +def _basic_auth_str(username, password): + """Returns a Basic Auth string.""" + + # "I want us to put a big-ol' comment on top of it that + # says that this behaviour is dumb but we need to preserve + # it because people are relying on it." + # - Lukasa + # + # These are here solely to maintain backwards compatibility + # for things like ints. This will be removed in 3.0.0. + if not isinstance(username, basestring): + warnings.warn( + "Non-string usernames will no longer be supported in Requests " + "3.0.0. Please convert the object you've passed in ({!r}) to " + "a string or bytes object in the near future to avoid " + "problems.".format(username), + category=DeprecationWarning, + ) + username = str(username) + + if not isinstance(password, basestring): + warnings.warn( + "Non-string passwords will no longer be supported in Requests " + "3.0.0. Please convert the object you've passed in ({!r}) to " + "a string or bytes object in the near future to avoid " + "problems.".format(type(password)), + category=DeprecationWarning, + ) + password = str(password) + # -- End Removal -- + + if isinstance(username, str): + username = username.encode("latin1") + + if isinstance(password, str): + password = password.encode("latin1") + + authstr = "Basic " + to_native_string( + b64encode(b":".join((username, password))).strip() + ) + + return authstr + + +class AuthBase: + """Base class that all auth implementations derive from""" + + def __call__(self, r): + raise NotImplementedError("Auth hooks must be callable.") + + +class HTTPBasicAuth(AuthBase): + """Attaches HTTP Basic Authentication to the given Request object.""" + + def __init__(self, username, password): + self.username = username + self.password = password + + def __eq__(self, other): + return all( + [ + self.username == getattr(other, "username", None), + self.password == getattr(other, "password", None), + ] + ) + + def __ne__(self, other): + return not self == other + + def __call__(self, r): + r.headers["Authorization"] = _basic_auth_str(self.username, self.password) + return r + + +class HTTPProxyAuth(HTTPBasicAuth): + """Attaches HTTP Proxy Authentication to a given Request object.""" + + def __call__(self, r): + r.headers["Proxy-Authorization"] = _basic_auth_str(self.username, self.password) + return r + + +class HTTPDigestAuth(AuthBase): + """Attaches HTTP Digest Authentication to the given Request object.""" + + def __init__(self, username, password): + self.username = username + self.password = password + # Keep state in per-thread local storage + self._thread_local = threading.local() + + def init_per_thread_state(self): + # Ensure state is initialized just once per-thread + if not hasattr(self._thread_local, "init"): + self._thread_local.init = True + self._thread_local.last_nonce = "" + self._thread_local.nonce_count = 0 + self._thread_local.chal = {} + self._thread_local.pos = None + self._thread_local.num_401_calls = None + + def build_digest_header(self, method, url): + """ + :rtype: str + """ + + realm = self._thread_local.chal["realm"] + nonce = self._thread_local.chal["nonce"] + qop = self._thread_local.chal.get("qop") + algorithm = self._thread_local.chal.get("algorithm") + opaque = self._thread_local.chal.get("opaque") + hash_utf8 = None + + if algorithm is None: + _algorithm = "MD5" + else: + _algorithm = algorithm.upper() + # lambdas assume digest modules are imported at the top level + if _algorithm == "MD5" or _algorithm == "MD5-SESS": + + def md5_utf8(x): + if isinstance(x, str): + x = x.encode("utf-8") + return hashlib.md5(x).hexdigest() + + hash_utf8 = md5_utf8 + elif _algorithm == "SHA": + + def sha_utf8(x): + if isinstance(x, str): + x = x.encode("utf-8") + return hashlib.sha1(x).hexdigest() + + hash_utf8 = sha_utf8 + elif _algorithm == "SHA-256": + + def sha256_utf8(x): + if isinstance(x, str): + x = x.encode("utf-8") + return hashlib.sha256(x).hexdigest() + + hash_utf8 = sha256_utf8 + elif _algorithm == "SHA-512": + + def sha512_utf8(x): + if isinstance(x, str): + x = x.encode("utf-8") + return hashlib.sha512(x).hexdigest() + + hash_utf8 = sha512_utf8 + + KD = lambda s, d: hash_utf8(f"{s}:{d}") # noqa:E731 + + if hash_utf8 is None: + return None + + # XXX not implemented yet + entdig = None + p_parsed = urlparse(url) + #: path is request-uri defined in RFC 2616 which should not be empty + path = p_parsed.path or "/" + if p_parsed.query: + path += f"?{p_parsed.query}" + + A1 = f"{self.username}:{realm}:{self.password}" + A2 = f"{method}:{path}" + + HA1 = hash_utf8(A1) + HA2 = hash_utf8(A2) + + if nonce == self._thread_local.last_nonce: + self._thread_local.nonce_count += 1 + else: + self._thread_local.nonce_count = 1 + ncvalue = f"{self._thread_local.nonce_count:08x}" + s = str(self._thread_local.nonce_count).encode("utf-8") + s += nonce.encode("utf-8") + s += time.ctime().encode("utf-8") + s += os.urandom(8) + + cnonce = hashlib.sha1(s).hexdigest()[:16] + if _algorithm == "MD5-SESS": + HA1 = hash_utf8(f"{HA1}:{nonce}:{cnonce}") + + if not qop: + respdig = KD(HA1, f"{nonce}:{HA2}") + elif qop == "auth" or "auth" in qop.split(","): + noncebit = f"{nonce}:{ncvalue}:{cnonce}:auth:{HA2}" + respdig = KD(HA1, noncebit) + else: + # XXX handle auth-int. + return None + + self._thread_local.last_nonce = nonce + + # XXX should the partial digests be encoded too? + base = ( + f'username="{self.username}", realm="{realm}", nonce="{nonce}", ' + f'uri="{path}", response="{respdig}"' + ) + if opaque: + base += f', opaque="{opaque}"' + if algorithm: + base += f', algorithm="{algorithm}"' + if entdig: + base += f', digest="{entdig}"' + if qop: + base += f', qop="auth", nc={ncvalue}, cnonce="{cnonce}"' + + return f"Digest {base}" + + def handle_redirect(self, r, **kwargs): + """Reset num_401_calls counter on redirects.""" + if r.is_redirect: + self._thread_local.num_401_calls = 1 + + def handle_401(self, r, **kwargs): + """ + Takes the given response and tries digest-auth, if needed. + + :rtype: requests.Response + """ + + # If response is not 4xx, do not auth + # See https://github.com/psf/requests/issues/3772 + if not 400 <= r.status_code < 500: + self._thread_local.num_401_calls = 1 + return r + + if self._thread_local.pos is not None: + # Rewind the file position indicator of the body to where + # it was to resend the request. + r.request.body.seek(self._thread_local.pos) + s_auth = r.headers.get("www-authenticate", "") + + if "digest" in s_auth.lower() and self._thread_local.num_401_calls < 2: + self._thread_local.num_401_calls += 1 + pat = re.compile(r"digest ", flags=re.IGNORECASE) + self._thread_local.chal = parse_dict_header(pat.sub("", s_auth, count=1)) + + # Consume content and release the original connection + # to allow our new request to reuse the same one. + r.content + r.close() + prep = r.request.copy() + extract_cookies_to_jar(prep._cookies, r.request, r.raw) + prep.prepare_cookies(prep._cookies) + + prep.headers["Authorization"] = self.build_digest_header( + prep.method, prep.url + ) + _r = r.connection.send(prep, **kwargs) + _r.history.append(r) + _r.request = prep + + return _r + + self._thread_local.num_401_calls = 1 + return r + + def __call__(self, r): + # Initialize per-thread state, if needed + self.init_per_thread_state() + # If we have a saved nonce, skip the 401 + if self._thread_local.last_nonce: + r.headers["Authorization"] = self.build_digest_header(r.method, r.url) + try: + self._thread_local.pos = r.body.tell() + except AttributeError: + # In the case of HTTPDigestAuth being reused and the body of + # the previous request was a file-like object, pos has the + # file position of the previous body. Ensure it's set to + # None. + self._thread_local.pos = None + r.register_hook("response", self.handle_401) + r.register_hook("response", self.handle_redirect) + self._thread_local.num_401_calls = 1 + + return r + + def __eq__(self, other): + return all( + [ + self.username == getattr(other, "username", None), + self.password == getattr(other, "password", None), + ] + ) + + def __ne__(self, other): + return not self == other diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/requests/certs.py b/.venv/lib/python3.12/site-packages/pip/_vendor/requests/certs.py new file mode 100644 index 0000000..2743144 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/requests/certs.py @@ -0,0 +1,17 @@ +#!/usr/bin/env python + +""" +requests.certs +~~~~~~~~~~~~~~ + +This module returns the preferred default CA certificate bundle. There is +only one — the one from the certifi package. + +If you are packaging Requests, e.g., for a Linux distribution or a managed +environment, you can change the definition of where() to return a separately +packaged CA bundle. +""" +from pip._vendor.certifi import where + +if __name__ == "__main__": + print(where()) diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/requests/compat.py b/.venv/lib/python3.12/site-packages/pip/_vendor/requests/compat.py new file mode 100644 index 0000000..b95a921 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/requests/compat.py @@ -0,0 +1,90 @@ +""" +requests.compat +~~~~~~~~~~~~~~~ + +This module previously handled import compatibility issues +between Python 2 and Python 3. It remains for backwards +compatibility until the next major version. +""" + +import sys + +# ------- +# urllib3 +# ------- +from pip._vendor.urllib3 import __version__ as urllib3_version + +# Detect which major version of urllib3 is being used. +try: + is_urllib3_1 = int(urllib3_version.split(".")[0]) == 1 +except (TypeError, AttributeError): + # If we can't discern a version, prefer old functionality. + is_urllib3_1 = True + +# ------------------- +# Character Detection +# ------------------- + + +def _resolve_char_detection(): + """Find supported character detection libraries.""" + chardet = None + return chardet + + +chardet = _resolve_char_detection() + +# ------- +# Pythons +# ------- + +# Syntax sugar. +_ver = sys.version_info + +#: Python 2.x? +is_py2 = _ver[0] == 2 + +#: Python 3.x? +is_py3 = _ver[0] == 3 + +# Note: We've patched out simplejson support in pip because it prevents +# upgrading simplejson on Windows. +import json +from json import JSONDecodeError + +# Keep OrderedDict for backwards compatibility. +from collections import OrderedDict +from collections.abc import Callable, Mapping, MutableMapping +from http import cookiejar as cookielib +from http.cookies import Morsel +from io import StringIO + +# -------------- +# Legacy Imports +# -------------- +from urllib.parse import ( + quote, + quote_plus, + unquote, + unquote_plus, + urldefrag, + urlencode, + urljoin, + urlparse, + urlsplit, + urlunparse, +) +from urllib.request import ( + getproxies, + getproxies_environment, + parse_http_list, + proxy_bypass, + proxy_bypass_environment, +) + +builtin_str = str +str = str +bytes = bytes +basestring = (str, bytes) +numeric_types = (int, float) +integer_types = (int,) diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/requests/cookies.py b/.venv/lib/python3.12/site-packages/pip/_vendor/requests/cookies.py new file mode 100644 index 0000000..f69d0cd --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/requests/cookies.py @@ -0,0 +1,561 @@ +""" +requests.cookies +~~~~~~~~~~~~~~~~ + +Compatibility code to be able to use `http.cookiejar.CookieJar` with requests. + +requests.utils imports from here, so be careful with imports. +""" + +import calendar +import copy +import time + +from ._internal_utils import to_native_string +from .compat import Morsel, MutableMapping, cookielib, urlparse, urlunparse + +try: + import threading +except ImportError: + import dummy_threading as threading + + +class MockRequest: + """Wraps a `requests.Request` to mimic a `urllib2.Request`. + + The code in `http.cookiejar.CookieJar` expects this interface in order to correctly + manage cookie policies, i.e., determine whether a cookie can be set, given the + domains of the request and the cookie. + + The original request object is read-only. The client is responsible for collecting + the new headers via `get_new_headers()` and interpreting them appropriately. You + probably want `get_cookie_header`, defined below. + """ + + def __init__(self, request): + self._r = request + self._new_headers = {} + self.type = urlparse(self._r.url).scheme + + def get_type(self): + return self.type + + def get_host(self): + return urlparse(self._r.url).netloc + + def get_origin_req_host(self): + return self.get_host() + + def get_full_url(self): + # Only return the response's URL if the user hadn't set the Host + # header + if not self._r.headers.get("Host"): + return self._r.url + # If they did set it, retrieve it and reconstruct the expected domain + host = to_native_string(self._r.headers["Host"], encoding="utf-8") + parsed = urlparse(self._r.url) + # Reconstruct the URL as we expect it + return urlunparse( + [ + parsed.scheme, + host, + parsed.path, + parsed.params, + parsed.query, + parsed.fragment, + ] + ) + + def is_unverifiable(self): + return True + + def has_header(self, name): + return name in self._r.headers or name in self._new_headers + + def get_header(self, name, default=None): + return self._r.headers.get(name, self._new_headers.get(name, default)) + + def add_header(self, key, val): + """cookiejar has no legitimate use for this method; add it back if you find one.""" + raise NotImplementedError( + "Cookie headers should be added with add_unredirected_header()" + ) + + def add_unredirected_header(self, name, value): + self._new_headers[name] = value + + def get_new_headers(self): + return self._new_headers + + @property + def unverifiable(self): + return self.is_unverifiable() + + @property + def origin_req_host(self): + return self.get_origin_req_host() + + @property + def host(self): + return self.get_host() + + +class MockResponse: + """Wraps a `httplib.HTTPMessage` to mimic a `urllib.addinfourl`. + + ...what? Basically, expose the parsed HTTP headers from the server response + the way `http.cookiejar` expects to see them. + """ + + def __init__(self, headers): + """Make a MockResponse for `cookiejar` to read. + + :param headers: a httplib.HTTPMessage or analogous carrying the headers + """ + self._headers = headers + + def info(self): + return self._headers + + def getheaders(self, name): + self._headers.getheaders(name) + + +def extract_cookies_to_jar(jar, request, response): + """Extract the cookies from the response into a CookieJar. + + :param jar: http.cookiejar.CookieJar (not necessarily a RequestsCookieJar) + :param request: our own requests.Request object + :param response: urllib3.HTTPResponse object + """ + if not (hasattr(response, "_original_response") and response._original_response): + return + # the _original_response field is the wrapped httplib.HTTPResponse object, + req = MockRequest(request) + # pull out the HTTPMessage with the headers and put it in the mock: + res = MockResponse(response._original_response.msg) + jar.extract_cookies(res, req) + + +def get_cookie_header(jar, request): + """ + Produce an appropriate Cookie header string to be sent with `request`, or None. + + :rtype: str + """ + r = MockRequest(request) + jar.add_cookie_header(r) + return r.get_new_headers().get("Cookie") + + +def remove_cookie_by_name(cookiejar, name, domain=None, path=None): + """Unsets a cookie by name, by default over all domains and paths. + + Wraps CookieJar.clear(), is O(n). + """ + clearables = [] + for cookie in cookiejar: + if cookie.name != name: + continue + if domain is not None and domain != cookie.domain: + continue + if path is not None and path != cookie.path: + continue + clearables.append((cookie.domain, cookie.path, cookie.name)) + + for domain, path, name in clearables: + cookiejar.clear(domain, path, name) + + +class CookieConflictError(RuntimeError): + """There are two cookies that meet the criteria specified in the cookie jar. + Use .get and .set and include domain and path args in order to be more specific. + """ + + +class RequestsCookieJar(cookielib.CookieJar, MutableMapping): + """Compatibility class; is a http.cookiejar.CookieJar, but exposes a dict + interface. + + This is the CookieJar we create by default for requests and sessions that + don't specify one, since some clients may expect response.cookies and + session.cookies to support dict operations. + + Requests does not use the dict interface internally; it's just for + compatibility with external client code. All requests code should work + out of the box with externally provided instances of ``CookieJar``, e.g. + ``LWPCookieJar`` and ``FileCookieJar``. + + Unlike a regular CookieJar, this class is pickleable. + + .. warning:: dictionary operations that are normally O(1) may be O(n). + """ + + def get(self, name, default=None, domain=None, path=None): + """Dict-like get() that also supports optional domain and path args in + order to resolve naming collisions from using one cookie jar over + multiple domains. + + .. warning:: operation is O(n), not O(1). + """ + try: + return self._find_no_duplicates(name, domain, path) + except KeyError: + return default + + def set(self, name, value, **kwargs): + """Dict-like set() that also supports optional domain and path args in + order to resolve naming collisions from using one cookie jar over + multiple domains. + """ + # support client code that unsets cookies by assignment of a None value: + if value is None: + remove_cookie_by_name( + self, name, domain=kwargs.get("domain"), path=kwargs.get("path") + ) + return + + if isinstance(value, Morsel): + c = morsel_to_cookie(value) + else: + c = create_cookie(name, value, **kwargs) + self.set_cookie(c) + return c + + def iterkeys(self): + """Dict-like iterkeys() that returns an iterator of names of cookies + from the jar. + + .. seealso:: itervalues() and iteritems(). + """ + for cookie in iter(self): + yield cookie.name + + def keys(self): + """Dict-like keys() that returns a list of names of cookies from the + jar. + + .. seealso:: values() and items(). + """ + return list(self.iterkeys()) + + def itervalues(self): + """Dict-like itervalues() that returns an iterator of values of cookies + from the jar. + + .. seealso:: iterkeys() and iteritems(). + """ + for cookie in iter(self): + yield cookie.value + + def values(self): + """Dict-like values() that returns a list of values of cookies from the + jar. + + .. seealso:: keys() and items(). + """ + return list(self.itervalues()) + + def iteritems(self): + """Dict-like iteritems() that returns an iterator of name-value tuples + from the jar. + + .. seealso:: iterkeys() and itervalues(). + """ + for cookie in iter(self): + yield cookie.name, cookie.value + + def items(self): + """Dict-like items() that returns a list of name-value tuples from the + jar. Allows client-code to call ``dict(RequestsCookieJar)`` and get a + vanilla python dict of key value pairs. + + .. seealso:: keys() and values(). + """ + return list(self.iteritems()) + + def list_domains(self): + """Utility method to list all the domains in the jar.""" + domains = [] + for cookie in iter(self): + if cookie.domain not in domains: + domains.append(cookie.domain) + return domains + + def list_paths(self): + """Utility method to list all the paths in the jar.""" + paths = [] + for cookie in iter(self): + if cookie.path not in paths: + paths.append(cookie.path) + return paths + + def multiple_domains(self): + """Returns True if there are multiple domains in the jar. + Returns False otherwise. + + :rtype: bool + """ + domains = [] + for cookie in iter(self): + if cookie.domain is not None and cookie.domain in domains: + return True + domains.append(cookie.domain) + return False # there is only one domain in jar + + def get_dict(self, domain=None, path=None): + """Takes as an argument an optional domain and path and returns a plain + old Python dict of name-value pairs of cookies that meet the + requirements. + + :rtype: dict + """ + dictionary = {} + for cookie in iter(self): + if (domain is None or cookie.domain == domain) and ( + path is None or cookie.path == path + ): + dictionary[cookie.name] = cookie.value + return dictionary + + def __contains__(self, name): + try: + return super().__contains__(name) + except CookieConflictError: + return True + + def __getitem__(self, name): + """Dict-like __getitem__() for compatibility with client code. Throws + exception if there are more than one cookie with name. In that case, + use the more explicit get() method instead. + + .. warning:: operation is O(n), not O(1). + """ + return self._find_no_duplicates(name) + + def __setitem__(self, name, value): + """Dict-like __setitem__ for compatibility with client code. Throws + exception if there is already a cookie of that name in the jar. In that + case, use the more explicit set() method instead. + """ + self.set(name, value) + + def __delitem__(self, name): + """Deletes a cookie given a name. Wraps ``http.cookiejar.CookieJar``'s + ``remove_cookie_by_name()``. + """ + remove_cookie_by_name(self, name) + + def set_cookie(self, cookie, *args, **kwargs): + if ( + hasattr(cookie.value, "startswith") + and cookie.value.startswith('"') + and cookie.value.endswith('"') + ): + cookie.value = cookie.value.replace('\\"', "") + return super().set_cookie(cookie, *args, **kwargs) + + def update(self, other): + """Updates this jar with cookies from another CookieJar or dict-like""" + if isinstance(other, cookielib.CookieJar): + for cookie in other: + self.set_cookie(copy.copy(cookie)) + else: + super().update(other) + + def _find(self, name, domain=None, path=None): + """Requests uses this method internally to get cookie values. + + If there are conflicting cookies, _find arbitrarily chooses one. + See _find_no_duplicates if you want an exception thrown if there are + conflicting cookies. + + :param name: a string containing name of cookie + :param domain: (optional) string containing domain of cookie + :param path: (optional) string containing path of cookie + :return: cookie.value + """ + for cookie in iter(self): + if cookie.name == name: + if domain is None or cookie.domain == domain: + if path is None or cookie.path == path: + return cookie.value + + raise KeyError(f"name={name!r}, domain={domain!r}, path={path!r}") + + def _find_no_duplicates(self, name, domain=None, path=None): + """Both ``__get_item__`` and ``get`` call this function: it's never + used elsewhere in Requests. + + :param name: a string containing name of cookie + :param domain: (optional) string containing domain of cookie + :param path: (optional) string containing path of cookie + :raises KeyError: if cookie is not found + :raises CookieConflictError: if there are multiple cookies + that match name and optionally domain and path + :return: cookie.value + """ + toReturn = None + for cookie in iter(self): + if cookie.name == name: + if domain is None or cookie.domain == domain: + if path is None or cookie.path == path: + if toReturn is not None: + # if there are multiple cookies that meet passed in criteria + raise CookieConflictError( + f"There are multiple cookies with name, {name!r}" + ) + # we will eventually return this as long as no cookie conflict + toReturn = cookie.value + + if toReturn: + return toReturn + raise KeyError(f"name={name!r}, domain={domain!r}, path={path!r}") + + def __getstate__(self): + """Unlike a normal CookieJar, this class is pickleable.""" + state = self.__dict__.copy() + # remove the unpickleable RLock object + state.pop("_cookies_lock") + return state + + def __setstate__(self, state): + """Unlike a normal CookieJar, this class is pickleable.""" + self.__dict__.update(state) + if "_cookies_lock" not in self.__dict__: + self._cookies_lock = threading.RLock() + + def copy(self): + """Return a copy of this RequestsCookieJar.""" + new_cj = RequestsCookieJar() + new_cj.set_policy(self.get_policy()) + new_cj.update(self) + return new_cj + + def get_policy(self): + """Return the CookiePolicy instance used.""" + return self._policy + + +def _copy_cookie_jar(jar): + if jar is None: + return None + + if hasattr(jar, "copy"): + # We're dealing with an instance of RequestsCookieJar + return jar.copy() + # We're dealing with a generic CookieJar instance + new_jar = copy.copy(jar) + new_jar.clear() + for cookie in jar: + new_jar.set_cookie(copy.copy(cookie)) + return new_jar + + +def create_cookie(name, value, **kwargs): + """Make a cookie from underspecified parameters. + + By default, the pair of `name` and `value` will be set for the domain '' + and sent on every request (this is sometimes called a "supercookie"). + """ + result = { + "version": 0, + "name": name, + "value": value, + "port": None, + "domain": "", + "path": "/", + "secure": False, + "expires": None, + "discard": True, + "comment": None, + "comment_url": None, + "rest": {"HttpOnly": None}, + "rfc2109": False, + } + + badargs = set(kwargs) - set(result) + if badargs: + raise TypeError( + f"create_cookie() got unexpected keyword arguments: {list(badargs)}" + ) + + result.update(kwargs) + result["port_specified"] = bool(result["port"]) + result["domain_specified"] = bool(result["domain"]) + result["domain_initial_dot"] = result["domain"].startswith(".") + result["path_specified"] = bool(result["path"]) + + return cookielib.Cookie(**result) + + +def morsel_to_cookie(morsel): + """Convert a Morsel object into a Cookie containing the one k/v pair.""" + + expires = None + if morsel["max-age"]: + try: + expires = int(time.time() + int(morsel["max-age"])) + except ValueError: + raise TypeError(f"max-age: {morsel['max-age']} must be integer") + elif morsel["expires"]: + time_template = "%a, %d-%b-%Y %H:%M:%S GMT" + expires = calendar.timegm(time.strptime(morsel["expires"], time_template)) + return create_cookie( + comment=morsel["comment"], + comment_url=bool(morsel["comment"]), + discard=False, + domain=morsel["domain"], + expires=expires, + name=morsel.key, + path=morsel["path"], + port=None, + rest={"HttpOnly": morsel["httponly"]}, + rfc2109=False, + secure=bool(morsel["secure"]), + value=morsel.value, + version=morsel["version"] or 0, + ) + + +def cookiejar_from_dict(cookie_dict, cookiejar=None, overwrite=True): + """Returns a CookieJar from a key/value dictionary. + + :param cookie_dict: Dict of key/values to insert into CookieJar. + :param cookiejar: (optional) A cookiejar to add the cookies to. + :param overwrite: (optional) If False, will not replace cookies + already in the jar with new ones. + :rtype: CookieJar + """ + if cookiejar is None: + cookiejar = RequestsCookieJar() + + if cookie_dict is not None: + names_from_jar = [cookie.name for cookie in cookiejar] + for name in cookie_dict: + if overwrite or (name not in names_from_jar): + cookiejar.set_cookie(create_cookie(name, cookie_dict[name])) + + return cookiejar + + +def merge_cookies(cookiejar, cookies): + """Add cookies to cookiejar and returns a merged CookieJar. + + :param cookiejar: CookieJar object to add the cookies to. + :param cookies: Dictionary or CookieJar object to be added. + :rtype: CookieJar + """ + if not isinstance(cookiejar, cookielib.CookieJar): + raise ValueError("You can only merge into CookieJar") + + if isinstance(cookies, dict): + cookiejar = cookiejar_from_dict(cookies, cookiejar=cookiejar, overwrite=False) + elif isinstance(cookies, cookielib.CookieJar): + try: + cookiejar.update(cookies) + except AttributeError: + for cookie_in_jar in cookies: + cookiejar.set_cookie(cookie_in_jar) + + return cookiejar diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/requests/exceptions.py b/.venv/lib/python3.12/site-packages/pip/_vendor/requests/exceptions.py new file mode 100644 index 0000000..7f3660f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/requests/exceptions.py @@ -0,0 +1,151 @@ +""" +requests.exceptions +~~~~~~~~~~~~~~~~~~~ + +This module contains the set of Requests' exceptions. +""" +from pip._vendor.urllib3.exceptions import HTTPError as BaseHTTPError + +from .compat import JSONDecodeError as CompatJSONDecodeError + + +class RequestException(IOError): + """There was an ambiguous exception that occurred while handling your + request. + """ + + def __init__(self, *args, **kwargs): + """Initialize RequestException with `request` and `response` objects.""" + response = kwargs.pop("response", None) + self.response = response + self.request = kwargs.pop("request", None) + if response is not None and not self.request and hasattr(response, "request"): + self.request = self.response.request + super().__init__(*args, **kwargs) + + +class InvalidJSONError(RequestException): + """A JSON error occurred.""" + + +class JSONDecodeError(InvalidJSONError, CompatJSONDecodeError): + """Couldn't decode the text into json""" + + def __init__(self, *args, **kwargs): + """ + Construct the JSONDecodeError instance first with all + args. Then use it's args to construct the IOError so that + the json specific args aren't used as IOError specific args + and the error message from JSONDecodeError is preserved. + """ + CompatJSONDecodeError.__init__(self, *args) + InvalidJSONError.__init__(self, *self.args, **kwargs) + + def __reduce__(self): + """ + The __reduce__ method called when pickling the object must + be the one from the JSONDecodeError (be it json/simplejson) + as it expects all the arguments for instantiation, not just + one like the IOError, and the MRO would by default call the + __reduce__ method from the IOError due to the inheritance order. + """ + return CompatJSONDecodeError.__reduce__(self) + + +class HTTPError(RequestException): + """An HTTP error occurred.""" + + +class ConnectionError(RequestException): + """A Connection error occurred.""" + + +class ProxyError(ConnectionError): + """A proxy error occurred.""" + + +class SSLError(ConnectionError): + """An SSL error occurred.""" + + +class Timeout(RequestException): + """The request timed out. + + Catching this error will catch both + :exc:`~requests.exceptions.ConnectTimeout` and + :exc:`~requests.exceptions.ReadTimeout` errors. + """ + + +class ConnectTimeout(ConnectionError, Timeout): + """The request timed out while trying to connect to the remote server. + + Requests that produced this error are safe to retry. + """ + + +class ReadTimeout(Timeout): + """The server did not send any data in the allotted amount of time.""" + + +class URLRequired(RequestException): + """A valid URL is required to make a request.""" + + +class TooManyRedirects(RequestException): + """Too many redirects.""" + + +class MissingSchema(RequestException, ValueError): + """The URL scheme (e.g. http or https) is missing.""" + + +class InvalidSchema(RequestException, ValueError): + """The URL scheme provided is either invalid or unsupported.""" + + +class InvalidURL(RequestException, ValueError): + """The URL provided was somehow invalid.""" + + +class InvalidHeader(RequestException, ValueError): + """The header value provided was somehow invalid.""" + + +class InvalidProxyURL(InvalidURL): + """The proxy URL provided is invalid.""" + + +class ChunkedEncodingError(RequestException): + """The server declared chunked encoding but sent an invalid chunk.""" + + +class ContentDecodingError(RequestException, BaseHTTPError): + """Failed to decode response content.""" + + +class StreamConsumedError(RequestException, TypeError): + """The content for this response was already consumed.""" + + +class RetryError(RequestException): + """Custom retries logic failed""" + + +class UnrewindableBodyError(RequestException): + """Requests encountered an error when trying to rewind a body.""" + + +# Warnings + + +class RequestsWarning(Warning): + """Base warning for Requests.""" + + +class FileModeWarning(RequestsWarning, DeprecationWarning): + """A file was opened in text mode, but Requests determined its binary length.""" + + +class RequestsDependencyWarning(RequestsWarning): + """An imported dependency doesn't match the expected version range.""" diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/requests/help.py b/.venv/lib/python3.12/site-packages/pip/_vendor/requests/help.py new file mode 100644 index 0000000..ddbb615 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/requests/help.py @@ -0,0 +1,127 @@ +"""Module containing bug report helper(s).""" + +import json +import platform +import ssl +import sys + +from pip._vendor import idna +from pip._vendor import urllib3 + +from . import __version__ as requests_version + +charset_normalizer = None +chardet = None + +try: + from pip._vendor.urllib3.contrib import pyopenssl +except ImportError: + pyopenssl = None + OpenSSL = None + cryptography = None +else: + import cryptography + import OpenSSL + + +def _implementation(): + """Return a dict with the Python implementation and version. + + Provide both the name and the version of the Python implementation + currently running. For example, on CPython 3.10.3 it will return + {'name': 'CPython', 'version': '3.10.3'}. + + This function works best on CPython and PyPy: in particular, it probably + doesn't work for Jython or IronPython. Future investigation should be done + to work out the correct shape of the code for those platforms. + """ + implementation = platform.python_implementation() + + if implementation == "CPython": + implementation_version = platform.python_version() + elif implementation == "PyPy": + implementation_version = "{}.{}.{}".format( + sys.pypy_version_info.major, + sys.pypy_version_info.minor, + sys.pypy_version_info.micro, + ) + if sys.pypy_version_info.releaselevel != "final": + implementation_version = "".join( + [implementation_version, sys.pypy_version_info.releaselevel] + ) + elif implementation == "Jython": + implementation_version = platform.python_version() # Complete Guess + elif implementation == "IronPython": + implementation_version = platform.python_version() # Complete Guess + else: + implementation_version = "Unknown" + + return {"name": implementation, "version": implementation_version} + + +def info(): + """Generate information for a bug report.""" + try: + platform_info = { + "system": platform.system(), + "release": platform.release(), + } + except OSError: + platform_info = { + "system": "Unknown", + "release": "Unknown", + } + + implementation_info = _implementation() + urllib3_info = {"version": urllib3.__version__} + charset_normalizer_info = {"version": None} + chardet_info = {"version": None} + if charset_normalizer: + charset_normalizer_info = {"version": charset_normalizer.__version__} + if chardet: + chardet_info = {"version": chardet.__version__} + + pyopenssl_info = { + "version": None, + "openssl_version": "", + } + if OpenSSL: + pyopenssl_info = { + "version": OpenSSL.__version__, + "openssl_version": f"{OpenSSL.SSL.OPENSSL_VERSION_NUMBER:x}", + } + cryptography_info = { + "version": getattr(cryptography, "__version__", ""), + } + idna_info = { + "version": getattr(idna, "__version__", ""), + } + + system_ssl = ssl.OPENSSL_VERSION_NUMBER + system_ssl_info = {"version": f"{system_ssl:x}" if system_ssl is not None else ""} + + return { + "platform": platform_info, + "implementation": implementation_info, + "system_ssl": system_ssl_info, + "using_pyopenssl": pyopenssl is not None, + "using_charset_normalizer": chardet is None, + "pyOpenSSL": pyopenssl_info, + "urllib3": urllib3_info, + "chardet": chardet_info, + "charset_normalizer": charset_normalizer_info, + "cryptography": cryptography_info, + "idna": idna_info, + "requests": { + "version": requests_version, + }, + } + + +def main(): + """Pretty-print the bug information as JSON.""" + print(json.dumps(info(), sort_keys=True, indent=2)) + + +if __name__ == "__main__": + main() diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/requests/hooks.py b/.venv/lib/python3.12/site-packages/pip/_vendor/requests/hooks.py new file mode 100644 index 0000000..d181ba2 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/requests/hooks.py @@ -0,0 +1,33 @@ +""" +requests.hooks +~~~~~~~~~~~~~~ + +This module provides the capabilities for the Requests hooks system. + +Available hooks: + +``response``: + The response generated from a Request. +""" +HOOKS = ["response"] + + +def default_hooks(): + return {event: [] for event in HOOKS} + + +# TODO: response is the only one + + +def dispatch_hook(key, hooks, hook_data, **kwargs): + """Dispatches a hook dictionary on a given piece of data.""" + hooks = hooks or {} + hooks = hooks.get(key) + if hooks: + if hasattr(hooks, "__call__"): + hooks = [hooks] + for hook in hooks: + _hook_data = hook(hook_data, **kwargs) + if _hook_data is not None: + hook_data = _hook_data + return hook_data diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/requests/models.py b/.venv/lib/python3.12/site-packages/pip/_vendor/requests/models.py new file mode 100644 index 0000000..22de95c --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/requests/models.py @@ -0,0 +1,1039 @@ +""" +requests.models +~~~~~~~~~~~~~~~ + +This module contains the primary objects that power Requests. +""" + +import datetime + +# Import encoding now, to avoid implicit import later. +# Implicit import within threads may cause LookupError when standard library is in a ZIP, +# such as in Embedded Python. See https://github.com/psf/requests/issues/3578. +import encodings.idna # noqa: F401 +from io import UnsupportedOperation + +from pip._vendor.urllib3.exceptions import ( + DecodeError, + LocationParseError, + ProtocolError, + ReadTimeoutError, + SSLError, +) +from pip._vendor.urllib3.fields import RequestField +from pip._vendor.urllib3.filepost import encode_multipart_formdata +from pip._vendor.urllib3.util import parse_url + +from ._internal_utils import to_native_string, unicode_is_ascii +from .auth import HTTPBasicAuth +from .compat import ( + Callable, + JSONDecodeError, + Mapping, + basestring, + builtin_str, + chardet, + cookielib, +) +from .compat import json as complexjson +from .compat import urlencode, urlsplit, urlunparse +from .cookies import _copy_cookie_jar, cookiejar_from_dict, get_cookie_header +from .exceptions import ( + ChunkedEncodingError, + ConnectionError, + ContentDecodingError, + HTTPError, + InvalidJSONError, + InvalidURL, +) +from .exceptions import JSONDecodeError as RequestsJSONDecodeError +from .exceptions import MissingSchema +from .exceptions import SSLError as RequestsSSLError +from .exceptions import StreamConsumedError +from .hooks import default_hooks +from .status_codes import codes +from .structures import CaseInsensitiveDict +from .utils import ( + check_header_validity, + get_auth_from_url, + guess_filename, + guess_json_utf, + iter_slices, + parse_header_links, + requote_uri, + stream_decode_response_unicode, + super_len, + to_key_val_list, +) + +#: The set of HTTP status codes that indicate an automatically +#: processable redirect. +REDIRECT_STATI = ( + codes.moved, # 301 + codes.found, # 302 + codes.other, # 303 + codes.temporary_redirect, # 307 + codes.permanent_redirect, # 308 +) + +DEFAULT_REDIRECT_LIMIT = 30 +CONTENT_CHUNK_SIZE = 10 * 1024 +ITER_CHUNK_SIZE = 512 + + +class RequestEncodingMixin: + @property + def path_url(self): + """Build the path URL to use.""" + + url = [] + + p = urlsplit(self.url) + + path = p.path + if not path: + path = "/" + + url.append(path) + + query = p.query + if query: + url.append("?") + url.append(query) + + return "".join(url) + + @staticmethod + def _encode_params(data): + """Encode parameters in a piece of data. + + Will successfully encode parameters when passed as a dict or a list of + 2-tuples. Order is retained if data is a list of 2-tuples but arbitrary + if parameters are supplied as a dict. + """ + + if isinstance(data, (str, bytes)): + return data + elif hasattr(data, "read"): + return data + elif hasattr(data, "__iter__"): + result = [] + for k, vs in to_key_val_list(data): + if isinstance(vs, basestring) or not hasattr(vs, "__iter__"): + vs = [vs] + for v in vs: + if v is not None: + result.append( + ( + k.encode("utf-8") if isinstance(k, str) else k, + v.encode("utf-8") if isinstance(v, str) else v, + ) + ) + return urlencode(result, doseq=True) + else: + return data + + @staticmethod + def _encode_files(files, data): + """Build the body for a multipart/form-data request. + + Will successfully encode files when passed as a dict or a list of + tuples. Order is retained if data is a list of tuples but arbitrary + if parameters are supplied as a dict. + The tuples may be 2-tuples (filename, fileobj), 3-tuples (filename, fileobj, contentype) + or 4-tuples (filename, fileobj, contentype, custom_headers). + """ + if not files: + raise ValueError("Files must be provided.") + elif isinstance(data, basestring): + raise ValueError("Data must not be a string.") + + new_fields = [] + fields = to_key_val_list(data or {}) + files = to_key_val_list(files or {}) + + for field, val in fields: + if isinstance(val, basestring) or not hasattr(val, "__iter__"): + val = [val] + for v in val: + if v is not None: + # Don't call str() on bytestrings: in Py3 it all goes wrong. + if not isinstance(v, bytes): + v = str(v) + + new_fields.append( + ( + field.decode("utf-8") + if isinstance(field, bytes) + else field, + v.encode("utf-8") if isinstance(v, str) else v, + ) + ) + + for k, v in files: + # support for explicit filename + ft = None + fh = None + if isinstance(v, (tuple, list)): + if len(v) == 2: + fn, fp = v + elif len(v) == 3: + fn, fp, ft = v + else: + fn, fp, ft, fh = v + else: + fn = guess_filename(v) or k + fp = v + + if isinstance(fp, (str, bytes, bytearray)): + fdata = fp + elif hasattr(fp, "read"): + fdata = fp.read() + elif fp is None: + continue + else: + fdata = fp + + rf = RequestField(name=k, data=fdata, filename=fn, headers=fh) + rf.make_multipart(content_type=ft) + new_fields.append(rf) + + body, content_type = encode_multipart_formdata(new_fields) + + return body, content_type + + +class RequestHooksMixin: + def register_hook(self, event, hook): + """Properly register a hook.""" + + if event not in self.hooks: + raise ValueError(f'Unsupported event specified, with event name "{event}"') + + if isinstance(hook, Callable): + self.hooks[event].append(hook) + elif hasattr(hook, "__iter__"): + self.hooks[event].extend(h for h in hook if isinstance(h, Callable)) + + def deregister_hook(self, event, hook): + """Deregister a previously registered hook. + Returns True if the hook existed, False if not. + """ + + try: + self.hooks[event].remove(hook) + return True + except ValueError: + return False + + +class Request(RequestHooksMixin): + """A user-created :class:`Request ` object. + + Used to prepare a :class:`PreparedRequest `, which is sent to the server. + + :param method: HTTP method to use. + :param url: URL to send. + :param headers: dictionary of headers to send. + :param files: dictionary of {filename: fileobject} files to multipart upload. + :param data: the body to attach to the request. If a dictionary or + list of tuples ``[(key, value)]`` is provided, form-encoding will + take place. + :param json: json for the body to attach to the request (if files or data is not specified). + :param params: URL parameters to append to the URL. If a dictionary or + list of tuples ``[(key, value)]`` is provided, form-encoding will + take place. + :param auth: Auth handler or (user, pass) tuple. + :param cookies: dictionary or CookieJar of cookies to attach to this request. + :param hooks: dictionary of callback hooks, for internal usage. + + Usage:: + + >>> import requests + >>> req = requests.Request('GET', 'https://httpbin.org/get') + >>> req.prepare() + + """ + + def __init__( + self, + method=None, + url=None, + headers=None, + files=None, + data=None, + params=None, + auth=None, + cookies=None, + hooks=None, + json=None, + ): + # Default empty dicts for dict params. + data = [] if data is None else data + files = [] if files is None else files + headers = {} if headers is None else headers + params = {} if params is None else params + hooks = {} if hooks is None else hooks + + self.hooks = default_hooks() + for k, v in list(hooks.items()): + self.register_hook(event=k, hook=v) + + self.method = method + self.url = url + self.headers = headers + self.files = files + self.data = data + self.json = json + self.params = params + self.auth = auth + self.cookies = cookies + + def __repr__(self): + return f"" + + def prepare(self): + """Constructs a :class:`PreparedRequest ` for transmission and returns it.""" + p = PreparedRequest() + p.prepare( + method=self.method, + url=self.url, + headers=self.headers, + files=self.files, + data=self.data, + json=self.json, + params=self.params, + auth=self.auth, + cookies=self.cookies, + hooks=self.hooks, + ) + return p + + +class PreparedRequest(RequestEncodingMixin, RequestHooksMixin): + """The fully mutable :class:`PreparedRequest ` object, + containing the exact bytes that will be sent to the server. + + Instances are generated from a :class:`Request ` object, and + should not be instantiated manually; doing so may produce undesirable + effects. + + Usage:: + + >>> import requests + >>> req = requests.Request('GET', 'https://httpbin.org/get') + >>> r = req.prepare() + >>> r + + + >>> s = requests.Session() + >>> s.send(r) + + """ + + def __init__(self): + #: HTTP verb to send to the server. + self.method = None + #: HTTP URL to send the request to. + self.url = None + #: dictionary of HTTP headers. + self.headers = None + # The `CookieJar` used to create the Cookie header will be stored here + # after prepare_cookies is called + self._cookies = None + #: request body to send to the server. + self.body = None + #: dictionary of callback hooks, for internal usage. + self.hooks = default_hooks() + #: integer denoting starting position of a readable file-like body. + self._body_position = None + + def prepare( + self, + method=None, + url=None, + headers=None, + files=None, + data=None, + params=None, + auth=None, + cookies=None, + hooks=None, + json=None, + ): + """Prepares the entire request with the given parameters.""" + + self.prepare_method(method) + self.prepare_url(url, params) + self.prepare_headers(headers) + self.prepare_cookies(cookies) + self.prepare_body(data, files, json) + self.prepare_auth(auth, url) + + # Note that prepare_auth must be last to enable authentication schemes + # such as OAuth to work on a fully prepared request. + + # This MUST go after prepare_auth. Authenticators could add a hook + self.prepare_hooks(hooks) + + def __repr__(self): + return f"" + + def copy(self): + p = PreparedRequest() + p.method = self.method + p.url = self.url + p.headers = self.headers.copy() if self.headers is not None else None + p._cookies = _copy_cookie_jar(self._cookies) + p.body = self.body + p.hooks = self.hooks + p._body_position = self._body_position + return p + + def prepare_method(self, method): + """Prepares the given HTTP method.""" + self.method = method + if self.method is not None: + self.method = to_native_string(self.method.upper()) + + @staticmethod + def _get_idna_encoded_host(host): + from pip._vendor import idna + + try: + host = idna.encode(host, uts46=True).decode("utf-8") + except idna.IDNAError: + raise UnicodeError + return host + + def prepare_url(self, url, params): + """Prepares the given HTTP URL.""" + #: Accept objects that have string representations. + #: We're unable to blindly call unicode/str functions + #: as this will include the bytestring indicator (b'') + #: on python 3.x. + #: https://github.com/psf/requests/pull/2238 + if isinstance(url, bytes): + url = url.decode("utf8") + else: + url = str(url) + + # Remove leading whitespaces from url + url = url.lstrip() + + # Don't do any URL preparation for non-HTTP schemes like `mailto`, + # `data` etc to work around exceptions from `url_parse`, which + # handles RFC 3986 only. + if ":" in url and not url.lower().startswith("http"): + self.url = url + return + + # Support for unicode domain names and paths. + try: + scheme, auth, host, port, path, query, fragment = parse_url(url) + except LocationParseError as e: + raise InvalidURL(*e.args) + + if not scheme: + raise MissingSchema( + f"Invalid URL {url!r}: No scheme supplied. " + f"Perhaps you meant https://{url}?" + ) + + if not host: + raise InvalidURL(f"Invalid URL {url!r}: No host supplied") + + # In general, we want to try IDNA encoding the hostname if the string contains + # non-ASCII characters. This allows users to automatically get the correct IDNA + # behaviour. For strings containing only ASCII characters, we need to also verify + # it doesn't start with a wildcard (*), before allowing the unencoded hostname. + if not unicode_is_ascii(host): + try: + host = self._get_idna_encoded_host(host) + except UnicodeError: + raise InvalidURL("URL has an invalid label.") + elif host.startswith(("*", ".")): + raise InvalidURL("URL has an invalid label.") + + # Carefully reconstruct the network location + netloc = auth or "" + if netloc: + netloc += "@" + netloc += host + if port: + netloc += f":{port}" + + # Bare domains aren't valid URLs. + if not path: + path = "/" + + if isinstance(params, (str, bytes)): + params = to_native_string(params) + + enc_params = self._encode_params(params) + if enc_params: + if query: + query = f"{query}&{enc_params}" + else: + query = enc_params + + url = requote_uri(urlunparse([scheme, netloc, path, None, query, fragment])) + self.url = url + + def prepare_headers(self, headers): + """Prepares the given HTTP headers.""" + + self.headers = CaseInsensitiveDict() + if headers: + for header in headers.items(): + # Raise exception on invalid header value. + check_header_validity(header) + name, value = header + self.headers[to_native_string(name)] = value + + def prepare_body(self, data, files, json=None): + """Prepares the given HTTP body data.""" + + # Check if file, fo, generator, iterator. + # If not, run through normal process. + + # Nottin' on you. + body = None + content_type = None + + if not data and json is not None: + # urllib3 requires a bytes-like body. Python 2's json.dumps + # provides this natively, but Python 3 gives a Unicode string. + content_type = "application/json" + + try: + body = complexjson.dumps(json, allow_nan=False) + except ValueError as ve: + raise InvalidJSONError(ve, request=self) + + if not isinstance(body, bytes): + body = body.encode("utf-8") + + is_stream = all( + [ + hasattr(data, "__iter__"), + not isinstance(data, (basestring, list, tuple, Mapping)), + ] + ) + + if is_stream: + try: + length = super_len(data) + except (TypeError, AttributeError, UnsupportedOperation): + length = None + + body = data + + if getattr(body, "tell", None) is not None: + # Record the current file position before reading. + # This will allow us to rewind a file in the event + # of a redirect. + try: + self._body_position = body.tell() + except OSError: + # This differentiates from None, allowing us to catch + # a failed `tell()` later when trying to rewind the body + self._body_position = object() + + if files: + raise NotImplementedError( + "Streamed bodies and files are mutually exclusive." + ) + + if length: + self.headers["Content-Length"] = builtin_str(length) + else: + self.headers["Transfer-Encoding"] = "chunked" + else: + # Multi-part file uploads. + if files: + (body, content_type) = self._encode_files(files, data) + else: + if data: + body = self._encode_params(data) + if isinstance(data, basestring) or hasattr(data, "read"): + content_type = None + else: + content_type = "application/x-www-form-urlencoded" + + self.prepare_content_length(body) + + # Add content-type if it wasn't explicitly provided. + if content_type and ("content-type" not in self.headers): + self.headers["Content-Type"] = content_type + + self.body = body + + def prepare_content_length(self, body): + """Prepare Content-Length header based on request method and body""" + if body is not None: + length = super_len(body) + if length: + # If length exists, set it. Otherwise, we fallback + # to Transfer-Encoding: chunked. + self.headers["Content-Length"] = builtin_str(length) + elif ( + self.method not in ("GET", "HEAD") + and self.headers.get("Content-Length") is None + ): + # Set Content-Length to 0 for methods that can have a body + # but don't provide one. (i.e. not GET or HEAD) + self.headers["Content-Length"] = "0" + + def prepare_auth(self, auth, url=""): + """Prepares the given HTTP auth data.""" + + # If no Auth is explicitly provided, extract it from the URL first. + if auth is None: + url_auth = get_auth_from_url(self.url) + auth = url_auth if any(url_auth) else None + + if auth: + if isinstance(auth, tuple) and len(auth) == 2: + # special-case basic HTTP auth + auth = HTTPBasicAuth(*auth) + + # Allow auth to make its changes. + r = auth(self) + + # Update self to reflect the auth changes. + self.__dict__.update(r.__dict__) + + # Recompute Content-Length + self.prepare_content_length(self.body) + + def prepare_cookies(self, cookies): + """Prepares the given HTTP cookie data. + + This function eventually generates a ``Cookie`` header from the + given cookies using cookielib. Due to cookielib's design, the header + will not be regenerated if it already exists, meaning this function + can only be called once for the life of the + :class:`PreparedRequest ` object. Any subsequent calls + to ``prepare_cookies`` will have no actual effect, unless the "Cookie" + header is removed beforehand. + """ + if isinstance(cookies, cookielib.CookieJar): + self._cookies = cookies + else: + self._cookies = cookiejar_from_dict(cookies) + + cookie_header = get_cookie_header(self._cookies, self) + if cookie_header is not None: + self.headers["Cookie"] = cookie_header + + def prepare_hooks(self, hooks): + """Prepares the given hooks.""" + # hooks can be passed as None to the prepare method and to this + # method. To prevent iterating over None, simply use an empty list + # if hooks is False-y + hooks = hooks or [] + for event in hooks: + self.register_hook(event, hooks[event]) + + +class Response: + """The :class:`Response ` object, which contains a + server's response to an HTTP request. + """ + + __attrs__ = [ + "_content", + "status_code", + "headers", + "url", + "history", + "encoding", + "reason", + "cookies", + "elapsed", + "request", + ] + + def __init__(self): + self._content = False + self._content_consumed = False + self._next = None + + #: Integer Code of responded HTTP Status, e.g. 404 or 200. + self.status_code = None + + #: Case-insensitive Dictionary of Response Headers. + #: For example, ``headers['content-encoding']`` will return the + #: value of a ``'Content-Encoding'`` response header. + self.headers = CaseInsensitiveDict() + + #: File-like object representation of response (for advanced usage). + #: Use of ``raw`` requires that ``stream=True`` be set on the request. + #: This requirement does not apply for use internally to Requests. + self.raw = None + + #: Final URL location of Response. + self.url = None + + #: Encoding to decode with when accessing r.text. + self.encoding = None + + #: A list of :class:`Response ` objects from + #: the history of the Request. Any redirect responses will end + #: up here. The list is sorted from the oldest to the most recent request. + self.history = [] + + #: Textual reason of responded HTTP Status, e.g. "Not Found" or "OK". + self.reason = None + + #: A CookieJar of Cookies the server sent back. + self.cookies = cookiejar_from_dict({}) + + #: The amount of time elapsed between sending the request + #: and the arrival of the response (as a timedelta). + #: This property specifically measures the time taken between sending + #: the first byte of the request and finishing parsing the headers. It + #: is therefore unaffected by consuming the response content or the + #: value of the ``stream`` keyword argument. + self.elapsed = datetime.timedelta(0) + + #: The :class:`PreparedRequest ` object to which this + #: is a response. + self.request = None + + def __enter__(self): + return self + + def __exit__(self, *args): + self.close() + + def __getstate__(self): + # Consume everything; accessing the content attribute makes + # sure the content has been fully read. + if not self._content_consumed: + self.content + + return {attr: getattr(self, attr, None) for attr in self.__attrs__} + + def __setstate__(self, state): + for name, value in state.items(): + setattr(self, name, value) + + # pickled objects do not have .raw + setattr(self, "_content_consumed", True) + setattr(self, "raw", None) + + def __repr__(self): + return f"" + + def __bool__(self): + """Returns True if :attr:`status_code` is less than 400. + + This attribute checks if the status code of the response is between + 400 and 600 to see if there was a client error or a server error. If + the status code, is between 200 and 400, this will return True. This + is **not** a check to see if the response code is ``200 OK``. + """ + return self.ok + + def __nonzero__(self): + """Returns True if :attr:`status_code` is less than 400. + + This attribute checks if the status code of the response is between + 400 and 600 to see if there was a client error or a server error. If + the status code, is between 200 and 400, this will return True. This + is **not** a check to see if the response code is ``200 OK``. + """ + return self.ok + + def __iter__(self): + """Allows you to use a response as an iterator.""" + return self.iter_content(128) + + @property + def ok(self): + """Returns True if :attr:`status_code` is less than 400, False if not. + + This attribute checks if the status code of the response is between + 400 and 600 to see if there was a client error or a server error. If + the status code is between 200 and 400, this will return True. This + is **not** a check to see if the response code is ``200 OK``. + """ + try: + self.raise_for_status() + except HTTPError: + return False + return True + + @property + def is_redirect(self): + """True if this Response is a well-formed HTTP redirect that could have + been processed automatically (by :meth:`Session.resolve_redirects`). + """ + return "location" in self.headers and self.status_code in REDIRECT_STATI + + @property + def is_permanent_redirect(self): + """True if this Response one of the permanent versions of redirect.""" + return "location" in self.headers and self.status_code in ( + codes.moved_permanently, + codes.permanent_redirect, + ) + + @property + def next(self): + """Returns a PreparedRequest for the next request in a redirect chain, if there is one.""" + return self._next + + @property + def apparent_encoding(self): + """The apparent encoding, provided by the charset_normalizer or chardet libraries.""" + if chardet is not None: + return chardet.detect(self.content)["encoding"] + else: + # If no character detection library is available, we'll fall back + # to a standard Python utf-8 str. + return "utf-8" + + def iter_content(self, chunk_size=1, decode_unicode=False): + """Iterates over the response data. When stream=True is set on the + request, this avoids reading the content at once into memory for + large responses. The chunk size is the number of bytes it should + read into memory. This is not necessarily the length of each item + returned as decoding can take place. + + chunk_size must be of type int or None. A value of None will + function differently depending on the value of `stream`. + stream=True will read data as it arrives in whatever size the + chunks are received. If stream=False, data is returned as + a single chunk. + + If decode_unicode is True, content will be decoded using the best + available encoding based on the response. + """ + + def generate(): + # Special case for urllib3. + if hasattr(self.raw, "stream"): + try: + yield from self.raw.stream(chunk_size, decode_content=True) + except ProtocolError as e: + raise ChunkedEncodingError(e) + except DecodeError as e: + raise ContentDecodingError(e) + except ReadTimeoutError as e: + raise ConnectionError(e) + except SSLError as e: + raise RequestsSSLError(e) + else: + # Standard file-like object. + while True: + chunk = self.raw.read(chunk_size) + if not chunk: + break + yield chunk + + self._content_consumed = True + + if self._content_consumed and isinstance(self._content, bool): + raise StreamConsumedError() + elif chunk_size is not None and not isinstance(chunk_size, int): + raise TypeError( + f"chunk_size must be an int, it is instead a {type(chunk_size)}." + ) + # simulate reading small chunks of the content + reused_chunks = iter_slices(self._content, chunk_size) + + stream_chunks = generate() + + chunks = reused_chunks if self._content_consumed else stream_chunks + + if decode_unicode: + chunks = stream_decode_response_unicode(chunks, self) + + return chunks + + def iter_lines( + self, chunk_size=ITER_CHUNK_SIZE, decode_unicode=False, delimiter=None + ): + """Iterates over the response data, one line at a time. When + stream=True is set on the request, this avoids reading the + content at once into memory for large responses. + + .. note:: This method is not reentrant safe. + """ + + pending = None + + for chunk in self.iter_content( + chunk_size=chunk_size, decode_unicode=decode_unicode + ): + if pending is not None: + chunk = pending + chunk + + if delimiter: + lines = chunk.split(delimiter) + else: + lines = chunk.splitlines() + + if lines and lines[-1] and chunk and lines[-1][-1] == chunk[-1]: + pending = lines.pop() + else: + pending = None + + yield from lines + + if pending is not None: + yield pending + + @property + def content(self): + """Content of the response, in bytes.""" + + if self._content is False: + # Read the contents. + if self._content_consumed: + raise RuntimeError("The content for this response was already consumed") + + if self.status_code == 0 or self.raw is None: + self._content = None + else: + self._content = b"".join(self.iter_content(CONTENT_CHUNK_SIZE)) or b"" + + self._content_consumed = True + # don't need to release the connection; that's been handled by urllib3 + # since we exhausted the data. + return self._content + + @property + def text(self): + """Content of the response, in unicode. + + If Response.encoding is None, encoding will be guessed using + ``charset_normalizer`` or ``chardet``. + + The encoding of the response content is determined based solely on HTTP + headers, following RFC 2616 to the letter. If you can take advantage of + non-HTTP knowledge to make a better guess at the encoding, you should + set ``r.encoding`` appropriately before accessing this property. + """ + + # Try charset from content-type + content = None + encoding = self.encoding + + if not self.content: + return "" + + # Fallback to auto-detected encoding. + if self.encoding is None: + encoding = self.apparent_encoding + + # Decode unicode from given encoding. + try: + content = str(self.content, encoding, errors="replace") + except (LookupError, TypeError): + # A LookupError is raised if the encoding was not found which could + # indicate a misspelling or similar mistake. + # + # A TypeError can be raised if encoding is None + # + # So we try blindly encoding. + content = str(self.content, errors="replace") + + return content + + def json(self, **kwargs): + r"""Decodes the JSON response body (if any) as a Python object. + + This may return a dictionary, list, etc. depending on what is in the response. + + :param \*\*kwargs: Optional arguments that ``json.loads`` takes. + :raises requests.exceptions.JSONDecodeError: If the response body does not + contain valid json. + """ + + if not self.encoding and self.content and len(self.content) > 3: + # No encoding set. JSON RFC 4627 section 3 states we should expect + # UTF-8, -16 or -32. Detect which one to use; If the detection or + # decoding fails, fall back to `self.text` (using charset_normalizer to make + # a best guess). + encoding = guess_json_utf(self.content) + if encoding is not None: + try: + return complexjson.loads(self.content.decode(encoding), **kwargs) + except UnicodeDecodeError: + # Wrong UTF codec detected; usually because it's not UTF-8 + # but some other 8-bit codec. This is an RFC violation, + # and the server didn't bother to tell us what codec *was* + # used. + pass + except JSONDecodeError as e: + raise RequestsJSONDecodeError(e.msg, e.doc, e.pos) + + try: + return complexjson.loads(self.text, **kwargs) + except JSONDecodeError as e: + # Catch JSON-related errors and raise as requests.JSONDecodeError + # This aliases json.JSONDecodeError and simplejson.JSONDecodeError + raise RequestsJSONDecodeError(e.msg, e.doc, e.pos) + + @property + def links(self): + """Returns the parsed header links of the response, if any.""" + + header = self.headers.get("link") + + resolved_links = {} + + if header: + links = parse_header_links(header) + + for link in links: + key = link.get("rel") or link.get("url") + resolved_links[key] = link + + return resolved_links + + def raise_for_status(self): + """Raises :class:`HTTPError`, if one occurred.""" + + http_error_msg = "" + if isinstance(self.reason, bytes): + # We attempt to decode utf-8 first because some servers + # choose to localize their reason strings. If the string + # isn't utf-8, we fall back to iso-8859-1 for all other + # encodings. (See PR #3538) + try: + reason = self.reason.decode("utf-8") + except UnicodeDecodeError: + reason = self.reason.decode("iso-8859-1") + else: + reason = self.reason + + if 400 <= self.status_code < 500: + http_error_msg = ( + f"{self.status_code} Client Error: {reason} for url: {self.url}" + ) + + elif 500 <= self.status_code < 600: + http_error_msg = ( + f"{self.status_code} Server Error: {reason} for url: {self.url}" + ) + + if http_error_msg: + raise HTTPError(http_error_msg, response=self) + + def close(self): + """Releases the connection back to the pool. Once this method has been + called the underlying ``raw`` object must not be accessed again. + + *Note: Should not normally need to be called explicitly.* + """ + if not self._content_consumed: + self.raw.close() + + release_conn = getattr(self.raw, "release_conn", None) + if release_conn is not None: + release_conn() diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/requests/packages.py b/.venv/lib/python3.12/site-packages/pip/_vendor/requests/packages.py new file mode 100644 index 0000000..200c382 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/requests/packages.py @@ -0,0 +1,25 @@ +import sys + +from .compat import chardet + +# This code exists for backwards compatibility reasons. +# I don't like it either. Just look the other way. :) + +for package in ("urllib3", "idna"): + vendored_package = "pip._vendor." + package + locals()[package] = __import__(vendored_package) + # This traversal is apparently necessary such that the identities are + # preserved (requests.packages.urllib3.* is urllib3.*) + for mod in list(sys.modules): + if mod == vendored_package or mod.startswith(vendored_package + '.'): + unprefixed_mod = mod[len("pip._vendor."):] + sys.modules['pip._vendor.requests.packages.' + unprefixed_mod] = sys.modules[mod] + +if chardet is not None: + target = chardet.__name__ + for mod in list(sys.modules): + if mod == target or mod.startswith(f"{target}."): + imported_mod = sys.modules[mod] + sys.modules[f"requests.packages.{mod}"] = imported_mod + mod = mod.replace(target, "chardet") + sys.modules[f"requests.packages.{mod}"] = imported_mod diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/requests/sessions.py b/.venv/lib/python3.12/site-packages/pip/_vendor/requests/sessions.py new file mode 100644 index 0000000..b387bc3 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/requests/sessions.py @@ -0,0 +1,831 @@ +""" +requests.sessions +~~~~~~~~~~~~~~~~~ + +This module provides a Session object to manage and persist settings across +requests (cookies, auth, proxies). +""" +import os +import sys +import time +from collections import OrderedDict +from datetime import timedelta + +from ._internal_utils import to_native_string +from .adapters import HTTPAdapter +from .auth import _basic_auth_str +from .compat import Mapping, cookielib, urljoin, urlparse +from .cookies import ( + RequestsCookieJar, + cookiejar_from_dict, + extract_cookies_to_jar, + merge_cookies, +) +from .exceptions import ( + ChunkedEncodingError, + ContentDecodingError, + InvalidSchema, + TooManyRedirects, +) +from .hooks import default_hooks, dispatch_hook + +# formerly defined here, reexposed here for backward compatibility +from .models import ( # noqa: F401 + DEFAULT_REDIRECT_LIMIT, + REDIRECT_STATI, + PreparedRequest, + Request, +) +from .status_codes import codes +from .structures import CaseInsensitiveDict +from .utils import ( # noqa: F401 + DEFAULT_PORTS, + default_headers, + get_auth_from_url, + get_environ_proxies, + get_netrc_auth, + requote_uri, + resolve_proxies, + rewind_body, + should_bypass_proxies, + to_key_val_list, +) + +# Preferred clock, based on which one is more accurate on a given system. +if sys.platform == "win32": + preferred_clock = time.perf_counter +else: + preferred_clock = time.time + + +def merge_setting(request_setting, session_setting, dict_class=OrderedDict): + """Determines appropriate setting for a given request, taking into account + the explicit setting on that request, and the setting in the session. If a + setting is a dictionary, they will be merged together using `dict_class` + """ + + if session_setting is None: + return request_setting + + if request_setting is None: + return session_setting + + # Bypass if not a dictionary (e.g. verify) + if not ( + isinstance(session_setting, Mapping) and isinstance(request_setting, Mapping) + ): + return request_setting + + merged_setting = dict_class(to_key_val_list(session_setting)) + merged_setting.update(to_key_val_list(request_setting)) + + # Remove keys that are set to None. Extract keys first to avoid altering + # the dictionary during iteration. + none_keys = [k for (k, v) in merged_setting.items() if v is None] + for key in none_keys: + del merged_setting[key] + + return merged_setting + + +def merge_hooks(request_hooks, session_hooks, dict_class=OrderedDict): + """Properly merges both requests and session hooks. + + This is necessary because when request_hooks == {'response': []}, the + merge breaks Session hooks entirely. + """ + if session_hooks is None or session_hooks.get("response") == []: + return request_hooks + + if request_hooks is None or request_hooks.get("response") == []: + return session_hooks + + return merge_setting(request_hooks, session_hooks, dict_class) + + +class SessionRedirectMixin: + def get_redirect_target(self, resp): + """Receives a Response. Returns a redirect URI or ``None``""" + # Due to the nature of how requests processes redirects this method will + # be called at least once upon the original response and at least twice + # on each subsequent redirect response (if any). + # If a custom mixin is used to handle this logic, it may be advantageous + # to cache the redirect location onto the response object as a private + # attribute. + if resp.is_redirect: + location = resp.headers["location"] + # Currently the underlying http module on py3 decode headers + # in latin1, but empirical evidence suggests that latin1 is very + # rarely used with non-ASCII characters in HTTP headers. + # It is more likely to get UTF8 header rather than latin1. + # This causes incorrect handling of UTF8 encoded location headers. + # To solve this, we re-encode the location in latin1. + location = location.encode("latin1") + return to_native_string(location, "utf8") + return None + + def should_strip_auth(self, old_url, new_url): + """Decide whether Authorization header should be removed when redirecting""" + old_parsed = urlparse(old_url) + new_parsed = urlparse(new_url) + if old_parsed.hostname != new_parsed.hostname: + return True + # Special case: allow http -> https redirect when using the standard + # ports. This isn't specified by RFC 7235, but is kept to avoid + # breaking backwards compatibility with older versions of requests + # that allowed any redirects on the same host. + if ( + old_parsed.scheme == "http" + and old_parsed.port in (80, None) + and new_parsed.scheme == "https" + and new_parsed.port in (443, None) + ): + return False + + # Handle default port usage corresponding to scheme. + changed_port = old_parsed.port != new_parsed.port + changed_scheme = old_parsed.scheme != new_parsed.scheme + default_port = (DEFAULT_PORTS.get(old_parsed.scheme, None), None) + if ( + not changed_scheme + and old_parsed.port in default_port + and new_parsed.port in default_port + ): + return False + + # Standard case: root URI must match + return changed_port or changed_scheme + + def resolve_redirects( + self, + resp, + req, + stream=False, + timeout=None, + verify=True, + cert=None, + proxies=None, + yield_requests=False, + **adapter_kwargs, + ): + """Receives a Response. Returns a generator of Responses or Requests.""" + + hist = [] # keep track of history + + url = self.get_redirect_target(resp) + previous_fragment = urlparse(req.url).fragment + while url: + prepared_request = req.copy() + + # Update history and keep track of redirects. + # resp.history must ignore the original request in this loop + hist.append(resp) + resp.history = hist[1:] + + try: + resp.content # Consume socket so it can be released + except (ChunkedEncodingError, ContentDecodingError, RuntimeError): + resp.raw.read(decode_content=False) + + if len(resp.history) >= self.max_redirects: + raise TooManyRedirects( + f"Exceeded {self.max_redirects} redirects.", response=resp + ) + + # Release the connection back into the pool. + resp.close() + + # Handle redirection without scheme (see: RFC 1808 Section 4) + if url.startswith("//"): + parsed_rurl = urlparse(resp.url) + url = ":".join([to_native_string(parsed_rurl.scheme), url]) + + # Normalize url case and attach previous fragment if needed (RFC 7231 7.1.2) + parsed = urlparse(url) + if parsed.fragment == "" and previous_fragment: + parsed = parsed._replace(fragment=previous_fragment) + elif parsed.fragment: + previous_fragment = parsed.fragment + url = parsed.geturl() + + # Facilitate relative 'location' headers, as allowed by RFC 7231. + # (e.g. '/path/to/resource' instead of 'http://domain.tld/path/to/resource') + # Compliant with RFC3986, we percent encode the url. + if not parsed.netloc: + url = urljoin(resp.url, requote_uri(url)) + else: + url = requote_uri(url) + + prepared_request.url = to_native_string(url) + + self.rebuild_method(prepared_request, resp) + + # https://github.com/psf/requests/issues/1084 + if resp.status_code not in ( + codes.temporary_redirect, + codes.permanent_redirect, + ): + # https://github.com/psf/requests/issues/3490 + purged_headers = ("Content-Length", "Content-Type", "Transfer-Encoding") + for header in purged_headers: + prepared_request.headers.pop(header, None) + prepared_request.body = None + + headers = prepared_request.headers + headers.pop("Cookie", None) + + # Extract any cookies sent on the response to the cookiejar + # in the new request. Because we've mutated our copied prepared + # request, use the old one that we haven't yet touched. + extract_cookies_to_jar(prepared_request._cookies, req, resp.raw) + merge_cookies(prepared_request._cookies, self.cookies) + prepared_request.prepare_cookies(prepared_request._cookies) + + # Rebuild auth and proxy information. + proxies = self.rebuild_proxies(prepared_request, proxies) + self.rebuild_auth(prepared_request, resp) + + # A failed tell() sets `_body_position` to `object()`. This non-None + # value ensures `rewindable` will be True, allowing us to raise an + # UnrewindableBodyError, instead of hanging the connection. + rewindable = prepared_request._body_position is not None and ( + "Content-Length" in headers or "Transfer-Encoding" in headers + ) + + # Attempt to rewind consumed file-like object. + if rewindable: + rewind_body(prepared_request) + + # Override the original request. + req = prepared_request + + if yield_requests: + yield req + else: + resp = self.send( + req, + stream=stream, + timeout=timeout, + verify=verify, + cert=cert, + proxies=proxies, + allow_redirects=False, + **adapter_kwargs, + ) + + extract_cookies_to_jar(self.cookies, prepared_request, resp.raw) + + # extract redirect url, if any, for the next loop + url = self.get_redirect_target(resp) + yield resp + + def rebuild_auth(self, prepared_request, response): + """When being redirected we may want to strip authentication from the + request to avoid leaking credentials. This method intelligently removes + and reapplies authentication where possible to avoid credential loss. + """ + headers = prepared_request.headers + url = prepared_request.url + + if "Authorization" in headers and self.should_strip_auth( + response.request.url, url + ): + # If we get redirected to a new host, we should strip out any + # authentication headers. + del headers["Authorization"] + + # .netrc might have more auth for us on our new host. + new_auth = get_netrc_auth(url) if self.trust_env else None + if new_auth is not None: + prepared_request.prepare_auth(new_auth) + + def rebuild_proxies(self, prepared_request, proxies): + """This method re-evaluates the proxy configuration by considering the + environment variables. If we are redirected to a URL covered by + NO_PROXY, we strip the proxy configuration. Otherwise, we set missing + proxy keys for this URL (in case they were stripped by a previous + redirect). + + This method also replaces the Proxy-Authorization header where + necessary. + + :rtype: dict + """ + headers = prepared_request.headers + scheme = urlparse(prepared_request.url).scheme + new_proxies = resolve_proxies(prepared_request, proxies, self.trust_env) + + if "Proxy-Authorization" in headers: + del headers["Proxy-Authorization"] + + try: + username, password = get_auth_from_url(new_proxies[scheme]) + except KeyError: + username, password = None, None + + # urllib3 handles proxy authorization for us in the standard adapter. + # Avoid appending this to TLS tunneled requests where it may be leaked. + if not scheme.startswith("https") and username and password: + headers["Proxy-Authorization"] = _basic_auth_str(username, password) + + return new_proxies + + def rebuild_method(self, prepared_request, response): + """When being redirected we may want to change the method of the request + based on certain specs or browser behavior. + """ + method = prepared_request.method + + # https://tools.ietf.org/html/rfc7231#section-6.4.4 + if response.status_code == codes.see_other and method != "HEAD": + method = "GET" + + # Do what the browsers do, despite standards... + # First, turn 302s into GETs. + if response.status_code == codes.found and method != "HEAD": + method = "GET" + + # Second, if a POST is responded to with a 301, turn it into a GET. + # This bizarre behaviour is explained in Issue 1704. + if response.status_code == codes.moved and method == "POST": + method = "GET" + + prepared_request.method = method + + +class Session(SessionRedirectMixin): + """A Requests session. + + Provides cookie persistence, connection-pooling, and configuration. + + Basic Usage:: + + >>> import requests + >>> s = requests.Session() + >>> s.get('https://httpbin.org/get') + + + Or as a context manager:: + + >>> with requests.Session() as s: + ... s.get('https://httpbin.org/get') + + """ + + __attrs__ = [ + "headers", + "cookies", + "auth", + "proxies", + "hooks", + "params", + "verify", + "cert", + "adapters", + "stream", + "trust_env", + "max_redirects", + ] + + def __init__(self): + #: A case-insensitive dictionary of headers to be sent on each + #: :class:`Request ` sent from this + #: :class:`Session `. + self.headers = default_headers() + + #: Default Authentication tuple or object to attach to + #: :class:`Request `. + self.auth = None + + #: Dictionary mapping protocol or protocol and host to the URL of the proxy + #: (e.g. {'http': 'foo.bar:3128', 'http://host.name': 'foo.bar:4012'}) to + #: be used on each :class:`Request `. + self.proxies = {} + + #: Event-handling hooks. + self.hooks = default_hooks() + + #: Dictionary of querystring data to attach to each + #: :class:`Request `. The dictionary values may be lists for + #: representing multivalued query parameters. + self.params = {} + + #: Stream response content default. + self.stream = False + + #: SSL Verification default. + #: Defaults to `True`, requiring requests to verify the TLS certificate at the + #: remote end. + #: If verify is set to `False`, requests will accept any TLS certificate + #: presented by the server, and will ignore hostname mismatches and/or + #: expired certificates, which will make your application vulnerable to + #: man-in-the-middle (MitM) attacks. + #: Only set this to `False` for testing. + self.verify = True + + #: SSL client certificate default, if String, path to ssl client + #: cert file (.pem). If Tuple, ('cert', 'key') pair. + self.cert = None + + #: Maximum number of redirects allowed. If the request exceeds this + #: limit, a :class:`TooManyRedirects` exception is raised. + #: This defaults to requests.models.DEFAULT_REDIRECT_LIMIT, which is + #: 30. + self.max_redirects = DEFAULT_REDIRECT_LIMIT + + #: Trust environment settings for proxy configuration, default + #: authentication and similar. + self.trust_env = True + + #: A CookieJar containing all currently outstanding cookies set on this + #: session. By default it is a + #: :class:`RequestsCookieJar `, but + #: may be any other ``cookielib.CookieJar`` compatible object. + self.cookies = cookiejar_from_dict({}) + + # Default connection adapters. + self.adapters = OrderedDict() + self.mount("https://", HTTPAdapter()) + self.mount("http://", HTTPAdapter()) + + def __enter__(self): + return self + + def __exit__(self, *args): + self.close() + + def prepare_request(self, request): + """Constructs a :class:`PreparedRequest ` for + transmission and returns it. The :class:`PreparedRequest` has settings + merged from the :class:`Request ` instance and those of the + :class:`Session`. + + :param request: :class:`Request` instance to prepare with this + session's settings. + :rtype: requests.PreparedRequest + """ + cookies = request.cookies or {} + + # Bootstrap CookieJar. + if not isinstance(cookies, cookielib.CookieJar): + cookies = cookiejar_from_dict(cookies) + + # Merge with session cookies + merged_cookies = merge_cookies( + merge_cookies(RequestsCookieJar(), self.cookies), cookies + ) + + # Set environment's basic authentication if not explicitly set. + auth = request.auth + if self.trust_env and not auth and not self.auth: + auth = get_netrc_auth(request.url) + + p = PreparedRequest() + p.prepare( + method=request.method.upper(), + url=request.url, + files=request.files, + data=request.data, + json=request.json, + headers=merge_setting( + request.headers, self.headers, dict_class=CaseInsensitiveDict + ), + params=merge_setting(request.params, self.params), + auth=merge_setting(auth, self.auth), + cookies=merged_cookies, + hooks=merge_hooks(request.hooks, self.hooks), + ) + return p + + def request( + self, + method, + url, + params=None, + data=None, + headers=None, + cookies=None, + files=None, + auth=None, + timeout=None, + allow_redirects=True, + proxies=None, + hooks=None, + stream=None, + verify=None, + cert=None, + json=None, + ): + """Constructs a :class:`Request `, prepares it and sends it. + Returns :class:`Response ` object. + + :param method: method for the new :class:`Request` object. + :param url: URL for the new :class:`Request` object. + :param params: (optional) Dictionary or bytes to be sent in the query + string for the :class:`Request`. + :param data: (optional) Dictionary, list of tuples, bytes, or file-like + object to send in the body of the :class:`Request`. + :param json: (optional) json to send in the body of the + :class:`Request`. + :param headers: (optional) Dictionary of HTTP Headers to send with the + :class:`Request`. + :param cookies: (optional) Dict or CookieJar object to send with the + :class:`Request`. + :param files: (optional) Dictionary of ``'filename': file-like-objects`` + for multipart encoding upload. + :param auth: (optional) Auth tuple or callable to enable + Basic/Digest/Custom HTTP Auth. + :param timeout: (optional) How long to wait for the server to send + data before giving up, as a float, or a :ref:`(connect timeout, + read timeout) ` tuple. + :type timeout: float or tuple + :param allow_redirects: (optional) Set to True by default. + :type allow_redirects: bool + :param proxies: (optional) Dictionary mapping protocol or protocol and + hostname to the URL of the proxy. + :param hooks: (optional) Dictionary mapping hook name to one event or + list of events, event must be callable. + :param stream: (optional) whether to immediately download the response + content. Defaults to ``False``. + :param verify: (optional) Either a boolean, in which case it controls whether we verify + the server's TLS certificate, or a string, in which case it must be a path + to a CA bundle to use. Defaults to ``True``. When set to + ``False``, requests will accept any TLS certificate presented by + the server, and will ignore hostname mismatches and/or expired + certificates, which will make your application vulnerable to + man-in-the-middle (MitM) attacks. Setting verify to ``False`` + may be useful during local development or testing. + :param cert: (optional) if String, path to ssl client cert file (.pem). + If Tuple, ('cert', 'key') pair. + :rtype: requests.Response + """ + # Create the Request. + req = Request( + method=method.upper(), + url=url, + headers=headers, + files=files, + data=data or {}, + json=json, + params=params or {}, + auth=auth, + cookies=cookies, + hooks=hooks, + ) + prep = self.prepare_request(req) + + proxies = proxies or {} + + settings = self.merge_environment_settings( + prep.url, proxies, stream, verify, cert + ) + + # Send the request. + send_kwargs = { + "timeout": timeout, + "allow_redirects": allow_redirects, + } + send_kwargs.update(settings) + resp = self.send(prep, **send_kwargs) + + return resp + + def get(self, url, **kwargs): + r"""Sends a GET request. Returns :class:`Response` object. + + :param url: URL for the new :class:`Request` object. + :param \*\*kwargs: Optional arguments that ``request`` takes. + :rtype: requests.Response + """ + + kwargs.setdefault("allow_redirects", True) + return self.request("GET", url, **kwargs) + + def options(self, url, **kwargs): + r"""Sends a OPTIONS request. Returns :class:`Response` object. + + :param url: URL for the new :class:`Request` object. + :param \*\*kwargs: Optional arguments that ``request`` takes. + :rtype: requests.Response + """ + + kwargs.setdefault("allow_redirects", True) + return self.request("OPTIONS", url, **kwargs) + + def head(self, url, **kwargs): + r"""Sends a HEAD request. Returns :class:`Response` object. + + :param url: URL for the new :class:`Request` object. + :param \*\*kwargs: Optional arguments that ``request`` takes. + :rtype: requests.Response + """ + + kwargs.setdefault("allow_redirects", False) + return self.request("HEAD", url, **kwargs) + + def post(self, url, data=None, json=None, **kwargs): + r"""Sends a POST request. Returns :class:`Response` object. + + :param url: URL for the new :class:`Request` object. + :param data: (optional) Dictionary, list of tuples, bytes, or file-like + object to send in the body of the :class:`Request`. + :param json: (optional) json to send in the body of the :class:`Request`. + :param \*\*kwargs: Optional arguments that ``request`` takes. + :rtype: requests.Response + """ + + return self.request("POST", url, data=data, json=json, **kwargs) + + def put(self, url, data=None, **kwargs): + r"""Sends a PUT request. Returns :class:`Response` object. + + :param url: URL for the new :class:`Request` object. + :param data: (optional) Dictionary, list of tuples, bytes, or file-like + object to send in the body of the :class:`Request`. + :param \*\*kwargs: Optional arguments that ``request`` takes. + :rtype: requests.Response + """ + + return self.request("PUT", url, data=data, **kwargs) + + def patch(self, url, data=None, **kwargs): + r"""Sends a PATCH request. Returns :class:`Response` object. + + :param url: URL for the new :class:`Request` object. + :param data: (optional) Dictionary, list of tuples, bytes, or file-like + object to send in the body of the :class:`Request`. + :param \*\*kwargs: Optional arguments that ``request`` takes. + :rtype: requests.Response + """ + + return self.request("PATCH", url, data=data, **kwargs) + + def delete(self, url, **kwargs): + r"""Sends a DELETE request. Returns :class:`Response` object. + + :param url: URL for the new :class:`Request` object. + :param \*\*kwargs: Optional arguments that ``request`` takes. + :rtype: requests.Response + """ + + return self.request("DELETE", url, **kwargs) + + def send(self, request, **kwargs): + """Send a given PreparedRequest. + + :rtype: requests.Response + """ + # Set defaults that the hooks can utilize to ensure they always have + # the correct parameters to reproduce the previous request. + kwargs.setdefault("stream", self.stream) + kwargs.setdefault("verify", self.verify) + kwargs.setdefault("cert", self.cert) + if "proxies" not in kwargs: + kwargs["proxies"] = resolve_proxies(request, self.proxies, self.trust_env) + + # It's possible that users might accidentally send a Request object. + # Guard against that specific failure case. + if isinstance(request, Request): + raise ValueError("You can only send PreparedRequests.") + + # Set up variables needed for resolve_redirects and dispatching of hooks + allow_redirects = kwargs.pop("allow_redirects", True) + stream = kwargs.get("stream") + hooks = request.hooks + + # Get the appropriate adapter to use + adapter = self.get_adapter(url=request.url) + + # Start time (approximately) of the request + start = preferred_clock() + + # Send the request + r = adapter.send(request, **kwargs) + + # Total elapsed time of the request (approximately) + elapsed = preferred_clock() - start + r.elapsed = timedelta(seconds=elapsed) + + # Response manipulation hooks + r = dispatch_hook("response", hooks, r, **kwargs) + + # Persist cookies + if r.history: + # If the hooks create history then we want those cookies too + for resp in r.history: + extract_cookies_to_jar(self.cookies, resp.request, resp.raw) + + extract_cookies_to_jar(self.cookies, request, r.raw) + + # Resolve redirects if allowed. + if allow_redirects: + # Redirect resolving generator. + gen = self.resolve_redirects(r, request, **kwargs) + history = [resp for resp in gen] + else: + history = [] + + # Shuffle things around if there's history. + if history: + # Insert the first (original) request at the start + history.insert(0, r) + # Get the last request made + r = history.pop() + r.history = history + + # If redirects aren't being followed, store the response on the Request for Response.next(). + if not allow_redirects: + try: + r._next = next( + self.resolve_redirects(r, request, yield_requests=True, **kwargs) + ) + except StopIteration: + pass + + if not stream: + r.content + + return r + + def merge_environment_settings(self, url, proxies, stream, verify, cert): + """ + Check the environment and merge it with some settings. + + :rtype: dict + """ + # Gather clues from the surrounding environment. + if self.trust_env: + # Set environment's proxies. + no_proxy = proxies.get("no_proxy") if proxies is not None else None + env_proxies = get_environ_proxies(url, no_proxy=no_proxy) + for k, v in env_proxies.items(): + proxies.setdefault(k, v) + + # Look for requests environment configuration + # and be compatible with cURL. + if verify is True or verify is None: + verify = ( + os.environ.get("REQUESTS_CA_BUNDLE") + or os.environ.get("CURL_CA_BUNDLE") + or verify + ) + + # Merge all the kwargs. + proxies = merge_setting(proxies, self.proxies) + stream = merge_setting(stream, self.stream) + verify = merge_setting(verify, self.verify) + cert = merge_setting(cert, self.cert) + + return {"proxies": proxies, "stream": stream, "verify": verify, "cert": cert} + + def get_adapter(self, url): + """ + Returns the appropriate connection adapter for the given URL. + + :rtype: requests.adapters.BaseAdapter + """ + for prefix, adapter in self.adapters.items(): + if url.lower().startswith(prefix.lower()): + return adapter + + # Nothing matches :-/ + raise InvalidSchema(f"No connection adapters were found for {url!r}") + + def close(self): + """Closes all adapters and as such the session""" + for v in self.adapters.values(): + v.close() + + def mount(self, prefix, adapter): + """Registers a connection adapter to a prefix. + + Adapters are sorted in descending order by prefix length. + """ + self.adapters[prefix] = adapter + keys_to_move = [k for k in self.adapters if len(k) < len(prefix)] + + for key in keys_to_move: + self.adapters[key] = self.adapters.pop(key) + + def __getstate__(self): + state = {attr: getattr(self, attr, None) for attr in self.__attrs__} + return state + + def __setstate__(self, state): + for attr, value in state.items(): + setattr(self, attr, value) + + +def session(): + """ + Returns a :class:`Session` for context-management. + + .. deprecated:: 1.0.0 + + This method has been deprecated since version 1.0.0 and is only kept for + backwards compatibility. New code should use :class:`~requests.sessions.Session` + to create a session. This may be removed at a future date. + + :rtype: Session + """ + return Session() diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/requests/status_codes.py b/.venv/lib/python3.12/site-packages/pip/_vendor/requests/status_codes.py new file mode 100644 index 0000000..c7945a2 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/requests/status_codes.py @@ -0,0 +1,128 @@ +r""" +The ``codes`` object defines a mapping from common names for HTTP statuses +to their numerical codes, accessible either as attributes or as dictionary +items. + +Example:: + + >>> import requests + >>> requests.codes['temporary_redirect'] + 307 + >>> requests.codes.teapot + 418 + >>> requests.codes['\o/'] + 200 + +Some codes have multiple names, and both upper- and lower-case versions of +the names are allowed. For example, ``codes.ok``, ``codes.OK``, and +``codes.okay`` all correspond to the HTTP status code 200. +""" + +from .structures import LookupDict + +_codes = { + # Informational. + 100: ("continue",), + 101: ("switching_protocols",), + 102: ("processing", "early-hints"), + 103: ("checkpoint",), + 122: ("uri_too_long", "request_uri_too_long"), + 200: ("ok", "okay", "all_ok", "all_okay", "all_good", "\\o/", "✓"), + 201: ("created",), + 202: ("accepted",), + 203: ("non_authoritative_info", "non_authoritative_information"), + 204: ("no_content",), + 205: ("reset_content", "reset"), + 206: ("partial_content", "partial"), + 207: ("multi_status", "multiple_status", "multi_stati", "multiple_stati"), + 208: ("already_reported",), + 226: ("im_used",), + # Redirection. + 300: ("multiple_choices",), + 301: ("moved_permanently", "moved", "\\o-"), + 302: ("found",), + 303: ("see_other", "other"), + 304: ("not_modified",), + 305: ("use_proxy",), + 306: ("switch_proxy",), + 307: ("temporary_redirect", "temporary_moved", "temporary"), + 308: ( + "permanent_redirect", + "resume_incomplete", + "resume", + ), # "resume" and "resume_incomplete" to be removed in 3.0 + # Client Error. + 400: ("bad_request", "bad"), + 401: ("unauthorized",), + 402: ("payment_required", "payment"), + 403: ("forbidden",), + 404: ("not_found", "-o-"), + 405: ("method_not_allowed", "not_allowed"), + 406: ("not_acceptable",), + 407: ("proxy_authentication_required", "proxy_auth", "proxy_authentication"), + 408: ("request_timeout", "timeout"), + 409: ("conflict",), + 410: ("gone",), + 411: ("length_required",), + 412: ("precondition_failed", "precondition"), + 413: ("request_entity_too_large", "content_too_large"), + 414: ("request_uri_too_large", "uri_too_long"), + 415: ("unsupported_media_type", "unsupported_media", "media_type"), + 416: ( + "requested_range_not_satisfiable", + "requested_range", + "range_not_satisfiable", + ), + 417: ("expectation_failed",), + 418: ("im_a_teapot", "teapot", "i_am_a_teapot"), + 421: ("misdirected_request",), + 422: ("unprocessable_entity", "unprocessable", "unprocessable_content"), + 423: ("locked",), + 424: ("failed_dependency", "dependency"), + 425: ("unordered_collection", "unordered", "too_early"), + 426: ("upgrade_required", "upgrade"), + 428: ("precondition_required", "precondition"), + 429: ("too_many_requests", "too_many"), + 431: ("header_fields_too_large", "fields_too_large"), + 444: ("no_response", "none"), + 449: ("retry_with", "retry"), + 450: ("blocked_by_windows_parental_controls", "parental_controls"), + 451: ("unavailable_for_legal_reasons", "legal_reasons"), + 499: ("client_closed_request",), + # Server Error. + 500: ("internal_server_error", "server_error", "/o\\", "✗"), + 501: ("not_implemented",), + 502: ("bad_gateway",), + 503: ("service_unavailable", "unavailable"), + 504: ("gateway_timeout",), + 505: ("http_version_not_supported", "http_version"), + 506: ("variant_also_negotiates",), + 507: ("insufficient_storage",), + 509: ("bandwidth_limit_exceeded", "bandwidth"), + 510: ("not_extended",), + 511: ("network_authentication_required", "network_auth", "network_authentication"), +} + +codes = LookupDict(name="status_codes") + + +def _init(): + for code, titles in _codes.items(): + for title in titles: + setattr(codes, title, code) + if not title.startswith(("\\", "/")): + setattr(codes, title.upper(), code) + + def doc(code): + names = ", ".join(f"``{n}``" for n in _codes[code]) + return "* %d: %s" % (code, names) + + global __doc__ + __doc__ = ( + __doc__ + "\n" + "\n".join(doc(code) for code in sorted(_codes)) + if __doc__ is not None + else None + ) + + +_init() diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/requests/structures.py b/.venv/lib/python3.12/site-packages/pip/_vendor/requests/structures.py new file mode 100644 index 0000000..188e13e --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/requests/structures.py @@ -0,0 +1,99 @@ +""" +requests.structures +~~~~~~~~~~~~~~~~~~~ + +Data structures that power Requests. +""" + +from collections import OrderedDict + +from .compat import Mapping, MutableMapping + + +class CaseInsensitiveDict(MutableMapping): + """A case-insensitive ``dict``-like object. + + Implements all methods and operations of + ``MutableMapping`` as well as dict's ``copy``. Also + provides ``lower_items``. + + All keys are expected to be strings. The structure remembers the + case of the last key to be set, and ``iter(instance)``, + ``keys()``, ``items()``, ``iterkeys()``, and ``iteritems()`` + will contain case-sensitive keys. However, querying and contains + testing is case insensitive:: + + cid = CaseInsensitiveDict() + cid['Accept'] = 'application/json' + cid['aCCEPT'] == 'application/json' # True + list(cid) == ['Accept'] # True + + For example, ``headers['content-encoding']`` will return the + value of a ``'Content-Encoding'`` response header, regardless + of how the header name was originally stored. + + If the constructor, ``.update``, or equality comparison + operations are given keys that have equal ``.lower()``s, the + behavior is undefined. + """ + + def __init__(self, data=None, **kwargs): + self._store = OrderedDict() + if data is None: + data = {} + self.update(data, **kwargs) + + def __setitem__(self, key, value): + # Use the lowercased key for lookups, but store the actual + # key alongside the value. + self._store[key.lower()] = (key, value) + + def __getitem__(self, key): + return self._store[key.lower()][1] + + def __delitem__(self, key): + del self._store[key.lower()] + + def __iter__(self): + return (casedkey for casedkey, mappedvalue in self._store.values()) + + def __len__(self): + return len(self._store) + + def lower_items(self): + """Like iteritems(), but with all lowercase keys.""" + return ((lowerkey, keyval[1]) for (lowerkey, keyval) in self._store.items()) + + def __eq__(self, other): + if isinstance(other, Mapping): + other = CaseInsensitiveDict(other) + else: + return NotImplemented + # Compare insensitively + return dict(self.lower_items()) == dict(other.lower_items()) + + # Copy is required + def copy(self): + return CaseInsensitiveDict(self._store.values()) + + def __repr__(self): + return str(dict(self.items())) + + +class LookupDict(dict): + """Dictionary lookup object.""" + + def __init__(self, name=None): + self.name = name + super().__init__() + + def __repr__(self): + return f"" + + def __getitem__(self, key): + # We allow fall-through here, so values default to None + + return self.__dict__.get(key, None) + + def get(self, key, default=None): + return self.__dict__.get(key, default) diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/requests/utils.py b/.venv/lib/python3.12/site-packages/pip/_vendor/requests/utils.py new file mode 100644 index 0000000..e8ea5ad --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/requests/utils.py @@ -0,0 +1,1086 @@ +""" +requests.utils +~~~~~~~~~~~~~~ + +This module provides utility functions that are used within Requests +that are also useful for external consumption. +""" + +import codecs +import contextlib +import io +import os +import re +import socket +import struct +import sys +import tempfile +import warnings +import zipfile +from collections import OrderedDict + +from pip._vendor.urllib3.util import make_headers, parse_url + +from . import certs +from .__version__ import __version__ + +# to_native_string is unused here, but imported here for backwards compatibility +from ._internal_utils import ( # noqa: F401 + _HEADER_VALIDATORS_BYTE, + _HEADER_VALIDATORS_STR, + HEADER_VALIDATORS, + to_native_string, +) +from .compat import ( + Mapping, + basestring, + bytes, + getproxies, + getproxies_environment, + integer_types, + is_urllib3_1, +) +from .compat import parse_http_list as _parse_list_header +from .compat import ( + proxy_bypass, + proxy_bypass_environment, + quote, + str, + unquote, + urlparse, + urlunparse, +) +from .cookies import cookiejar_from_dict +from .exceptions import ( + FileModeWarning, + InvalidHeader, + InvalidURL, + UnrewindableBodyError, +) +from .structures import CaseInsensitiveDict + +NETRC_FILES = (".netrc", "_netrc") + +DEFAULT_CA_BUNDLE_PATH = certs.where() + +DEFAULT_PORTS = {"http": 80, "https": 443} + +# Ensure that ', ' is used to preserve previous delimiter behavior. +DEFAULT_ACCEPT_ENCODING = ", ".join( + re.split(r",\s*", make_headers(accept_encoding=True)["accept-encoding"]) +) + + +if sys.platform == "win32": + # provide a proxy_bypass version on Windows without DNS lookups + + def proxy_bypass_registry(host): + try: + import winreg + except ImportError: + return False + + try: + internetSettings = winreg.OpenKey( + winreg.HKEY_CURRENT_USER, + r"Software\Microsoft\Windows\CurrentVersion\Internet Settings", + ) + # ProxyEnable could be REG_SZ or REG_DWORD, normalizing it + proxyEnable = int(winreg.QueryValueEx(internetSettings, "ProxyEnable")[0]) + # ProxyOverride is almost always a string + proxyOverride = winreg.QueryValueEx(internetSettings, "ProxyOverride")[0] + except (OSError, ValueError): + return False + if not proxyEnable or not proxyOverride: + return False + + # make a check value list from the registry entry: replace the + # '' string by the localhost entry and the corresponding + # canonical entry. + proxyOverride = proxyOverride.split(";") + # filter out empty strings to avoid re.match return true in the following code. + proxyOverride = filter(None, proxyOverride) + # now check if we match one of the registry values. + for test in proxyOverride: + if test == "": + if "." not in host: + return True + test = test.replace(".", r"\.") # mask dots + test = test.replace("*", r".*") # change glob sequence + test = test.replace("?", r".") # change glob char + if re.match(test, host, re.I): + return True + return False + + def proxy_bypass(host): # noqa + """Return True, if the host should be bypassed. + + Checks proxy settings gathered from the environment, if specified, + or the registry. + """ + if getproxies_environment(): + return proxy_bypass_environment(host) + else: + return proxy_bypass_registry(host) + + +def dict_to_sequence(d): + """Returns an internal sequence dictionary update.""" + + if hasattr(d, "items"): + d = d.items() + + return d + + +def super_len(o): + total_length = None + current_position = 0 + + if not is_urllib3_1 and isinstance(o, str): + # urllib3 2.x+ treats all strings as utf-8 instead + # of latin-1 (iso-8859-1) like http.client. + o = o.encode("utf-8") + + if hasattr(o, "__len__"): + total_length = len(o) + + elif hasattr(o, "len"): + total_length = o.len + + elif hasattr(o, "fileno"): + try: + fileno = o.fileno() + except (io.UnsupportedOperation, AttributeError): + # AttributeError is a surprising exception, seeing as how we've just checked + # that `hasattr(o, 'fileno')`. It happens for objects obtained via + # `Tarfile.extractfile()`, per issue 5229. + pass + else: + total_length = os.fstat(fileno).st_size + + # Having used fstat to determine the file length, we need to + # confirm that this file was opened up in binary mode. + if "b" not in o.mode: + warnings.warn( + ( + "Requests has determined the content-length for this " + "request using the binary size of the file: however, the " + "file has been opened in text mode (i.e. without the 'b' " + "flag in the mode). This may lead to an incorrect " + "content-length. In Requests 3.0, support will be removed " + "for files in text mode." + ), + FileModeWarning, + ) + + if hasattr(o, "tell"): + try: + current_position = o.tell() + except OSError: + # This can happen in some weird situations, such as when the file + # is actually a special file descriptor like stdin. In this + # instance, we don't know what the length is, so set it to zero and + # let requests chunk it instead. + if total_length is not None: + current_position = total_length + else: + if hasattr(o, "seek") and total_length is None: + # StringIO and BytesIO have seek but no usable fileno + try: + # seek to end of file + o.seek(0, 2) + total_length = o.tell() + + # seek back to current position to support + # partially read file-like objects + o.seek(current_position or 0) + except OSError: + total_length = 0 + + if total_length is None: + total_length = 0 + + return max(0, total_length - current_position) + + +def get_netrc_auth(url, raise_errors=False): + """Returns the Requests tuple auth for a given url from netrc.""" + + netrc_file = os.environ.get("NETRC") + if netrc_file is not None: + netrc_locations = (netrc_file,) + else: + netrc_locations = (f"~/{f}" for f in NETRC_FILES) + + try: + from netrc import NetrcParseError, netrc + + netrc_path = None + + for f in netrc_locations: + loc = os.path.expanduser(f) + if os.path.exists(loc): + netrc_path = loc + break + + # Abort early if there isn't one. + if netrc_path is None: + return + + ri = urlparse(url) + host = ri.hostname + + try: + _netrc = netrc(netrc_path).authenticators(host) + if _netrc: + # Return with login / password + login_i = 0 if _netrc[0] else 1 + return (_netrc[login_i], _netrc[2]) + except (NetrcParseError, OSError): + # If there was a parsing error or a permissions issue reading the file, + # we'll just skip netrc auth unless explicitly asked to raise errors. + if raise_errors: + raise + + # App Engine hackiness. + except (ImportError, AttributeError): + pass + + +def guess_filename(obj): + """Tries to guess the filename of the given object.""" + name = getattr(obj, "name", None) + if name and isinstance(name, basestring) and name[0] != "<" and name[-1] != ">": + return os.path.basename(name) + + +def extract_zipped_paths(path): + """Replace nonexistent paths that look like they refer to a member of a zip + archive with the location of an extracted copy of the target, or else + just return the provided path unchanged. + """ + if os.path.exists(path): + # this is already a valid path, no need to do anything further + return path + + # find the first valid part of the provided path and treat that as a zip archive + # assume the rest of the path is the name of a member in the archive + archive, member = os.path.split(path) + while archive and not os.path.exists(archive): + archive, prefix = os.path.split(archive) + if not prefix: + # If we don't check for an empty prefix after the split (in other words, archive remains unchanged after the split), + # we _can_ end up in an infinite loop on a rare corner case affecting a small number of users + break + member = "/".join([prefix, member]) + + if not zipfile.is_zipfile(archive): + return path + + zip_file = zipfile.ZipFile(archive) + if member not in zip_file.namelist(): + return path + + # we have a valid zip archive and a valid member of that archive + tmp = tempfile.gettempdir() + extracted_path = os.path.join(tmp, member.split("/")[-1]) + if not os.path.exists(extracted_path): + # use read + write to avoid the creating nested folders, we only want the file, avoids mkdir racing condition + with atomic_open(extracted_path) as file_handler: + file_handler.write(zip_file.read(member)) + return extracted_path + + +@contextlib.contextmanager +def atomic_open(filename): + """Write a file to the disk in an atomic fashion""" + tmp_descriptor, tmp_name = tempfile.mkstemp(dir=os.path.dirname(filename)) + try: + with os.fdopen(tmp_descriptor, "wb") as tmp_handler: + yield tmp_handler + os.replace(tmp_name, filename) + except BaseException: + os.remove(tmp_name) + raise + + +def from_key_val_list(value): + """Take an object and test to see if it can be represented as a + dictionary. Unless it can not be represented as such, return an + OrderedDict, e.g., + + :: + + >>> from_key_val_list([('key', 'val')]) + OrderedDict([('key', 'val')]) + >>> from_key_val_list('string') + Traceback (most recent call last): + ... + ValueError: cannot encode objects that are not 2-tuples + >>> from_key_val_list({'key': 'val'}) + OrderedDict([('key', 'val')]) + + :rtype: OrderedDict + """ + if value is None: + return None + + if isinstance(value, (str, bytes, bool, int)): + raise ValueError("cannot encode objects that are not 2-tuples") + + return OrderedDict(value) + + +def to_key_val_list(value): + """Take an object and test to see if it can be represented as a + dictionary. If it can be, return a list of tuples, e.g., + + :: + + >>> to_key_val_list([('key', 'val')]) + [('key', 'val')] + >>> to_key_val_list({'key': 'val'}) + [('key', 'val')] + >>> to_key_val_list('string') + Traceback (most recent call last): + ... + ValueError: cannot encode objects that are not 2-tuples + + :rtype: list + """ + if value is None: + return None + + if isinstance(value, (str, bytes, bool, int)): + raise ValueError("cannot encode objects that are not 2-tuples") + + if isinstance(value, Mapping): + value = value.items() + + return list(value) + + +# From mitsuhiko/werkzeug (used with permission). +def parse_list_header(value): + """Parse lists as described by RFC 2068 Section 2. + + In particular, parse comma-separated lists where the elements of + the list may include quoted-strings. A quoted-string could + contain a comma. A non-quoted string could have quotes in the + middle. Quotes are removed automatically after parsing. + + It basically works like :func:`parse_set_header` just that items + may appear multiple times and case sensitivity is preserved. + + The return value is a standard :class:`list`: + + >>> parse_list_header('token, "quoted value"') + ['token', 'quoted value'] + + To create a header from the :class:`list` again, use the + :func:`dump_header` function. + + :param value: a string with a list header. + :return: :class:`list` + :rtype: list + """ + result = [] + for item in _parse_list_header(value): + if item[:1] == item[-1:] == '"': + item = unquote_header_value(item[1:-1]) + result.append(item) + return result + + +# From mitsuhiko/werkzeug (used with permission). +def parse_dict_header(value): + """Parse lists of key, value pairs as described by RFC 2068 Section 2 and + convert them into a python dict: + + >>> d = parse_dict_header('foo="is a fish", bar="as well"') + >>> type(d) is dict + True + >>> sorted(d.items()) + [('bar', 'as well'), ('foo', 'is a fish')] + + If there is no value for a key it will be `None`: + + >>> parse_dict_header('key_without_value') + {'key_without_value': None} + + To create a header from the :class:`dict` again, use the + :func:`dump_header` function. + + :param value: a string with a dict header. + :return: :class:`dict` + :rtype: dict + """ + result = {} + for item in _parse_list_header(value): + if "=" not in item: + result[item] = None + continue + name, value = item.split("=", 1) + if value[:1] == value[-1:] == '"': + value = unquote_header_value(value[1:-1]) + result[name] = value + return result + + +# From mitsuhiko/werkzeug (used with permission). +def unquote_header_value(value, is_filename=False): + r"""Unquotes a header value. (Reversal of :func:`quote_header_value`). + This does not use the real unquoting but what browsers are actually + using for quoting. + + :param value: the header value to unquote. + :rtype: str + """ + if value and value[0] == value[-1] == '"': + # this is not the real unquoting, but fixing this so that the + # RFC is met will result in bugs with internet explorer and + # probably some other browsers as well. IE for example is + # uploading files with "C:\foo\bar.txt" as filename + value = value[1:-1] + + # if this is a filename and the starting characters look like + # a UNC path, then just return the value without quotes. Using the + # replace sequence below on a UNC path has the effect of turning + # the leading double slash into a single slash and then + # _fix_ie_filename() doesn't work correctly. See #458. + if not is_filename or value[:2] != "\\\\": + return value.replace("\\\\", "\\").replace('\\"', '"') + return value + + +def dict_from_cookiejar(cj): + """Returns a key/value dictionary from a CookieJar. + + :param cj: CookieJar object to extract cookies from. + :rtype: dict + """ + + cookie_dict = {cookie.name: cookie.value for cookie in cj} + return cookie_dict + + +def add_dict_to_cookiejar(cj, cookie_dict): + """Returns a CookieJar from a key/value dictionary. + + :param cj: CookieJar to insert cookies into. + :param cookie_dict: Dict of key/values to insert into CookieJar. + :rtype: CookieJar + """ + + return cookiejar_from_dict(cookie_dict, cj) + + +def get_encodings_from_content(content): + """Returns encodings from given content string. + + :param content: bytestring to extract encodings from. + """ + warnings.warn( + ( + "In requests 3.0, get_encodings_from_content will be removed. For " + "more information, please see the discussion on issue #2266. (This" + " warning should only appear once.)" + ), + DeprecationWarning, + ) + + charset_re = re.compile(r']', flags=re.I) + pragma_re = re.compile(r']', flags=re.I) + xml_re = re.compile(r'^<\?xml.*?encoding=["\']*(.+?)["\'>]') + + return ( + charset_re.findall(content) + + pragma_re.findall(content) + + xml_re.findall(content) + ) + + +def _parse_content_type_header(header): + """Returns content type and parameters from given header + + :param header: string + :return: tuple containing content type and dictionary of + parameters + """ + + tokens = header.split(";") + content_type, params = tokens[0].strip(), tokens[1:] + params_dict = {} + items_to_strip = "\"' " + + for param in params: + param = param.strip() + if param: + key, value = param, True + index_of_equals = param.find("=") + if index_of_equals != -1: + key = param[:index_of_equals].strip(items_to_strip) + value = param[index_of_equals + 1 :].strip(items_to_strip) + params_dict[key.lower()] = value + return content_type, params_dict + + +def get_encoding_from_headers(headers): + """Returns encodings from given HTTP Header Dict. + + :param headers: dictionary to extract encoding from. + :rtype: str + """ + + content_type = headers.get("content-type") + + if not content_type: + return None + + content_type, params = _parse_content_type_header(content_type) + + if "charset" in params: + return params["charset"].strip("'\"") + + if "text" in content_type: + return "ISO-8859-1" + + if "application/json" in content_type: + # Assume UTF-8 based on RFC 4627: https://www.ietf.org/rfc/rfc4627.txt since the charset was unset + return "utf-8" + + +def stream_decode_response_unicode(iterator, r): + """Stream decodes an iterator.""" + + if r.encoding is None: + yield from iterator + return + + decoder = codecs.getincrementaldecoder(r.encoding)(errors="replace") + for chunk in iterator: + rv = decoder.decode(chunk) + if rv: + yield rv + rv = decoder.decode(b"", final=True) + if rv: + yield rv + + +def iter_slices(string, slice_length): + """Iterate over slices of a string.""" + pos = 0 + if slice_length is None or slice_length <= 0: + slice_length = len(string) + while pos < len(string): + yield string[pos : pos + slice_length] + pos += slice_length + + +def get_unicode_from_response(r): + """Returns the requested content back in unicode. + + :param r: Response object to get unicode content from. + + Tried: + + 1. charset from content-type + 2. fall back and replace all unicode characters + + :rtype: str + """ + warnings.warn( + ( + "In requests 3.0, get_unicode_from_response will be removed. For " + "more information, please see the discussion on issue #2266. (This" + " warning should only appear once.)" + ), + DeprecationWarning, + ) + + tried_encodings = [] + + # Try charset from content-type + encoding = get_encoding_from_headers(r.headers) + + if encoding: + try: + return str(r.content, encoding) + except UnicodeError: + tried_encodings.append(encoding) + + # Fall back: + try: + return str(r.content, encoding, errors="replace") + except TypeError: + return r.content + + +# The unreserved URI characters (RFC 3986) +UNRESERVED_SET = frozenset( + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" + "0123456789-._~" +) + + +def unquote_unreserved(uri): + """Un-escape any percent-escape sequences in a URI that are unreserved + characters. This leaves all reserved, illegal and non-ASCII bytes encoded. + + :rtype: str + """ + parts = uri.split("%") + for i in range(1, len(parts)): + h = parts[i][0:2] + if len(h) == 2 and h.isalnum(): + try: + c = chr(int(h, 16)) + except ValueError: + raise InvalidURL(f"Invalid percent-escape sequence: '{h}'") + + if c in UNRESERVED_SET: + parts[i] = c + parts[i][2:] + else: + parts[i] = f"%{parts[i]}" + else: + parts[i] = f"%{parts[i]}" + return "".join(parts) + + +def requote_uri(uri): + """Re-quote the given URI. + + This function passes the given URI through an unquote/quote cycle to + ensure that it is fully and consistently quoted. + + :rtype: str + """ + safe_with_percent = "!#$%&'()*+,/:;=?@[]~" + safe_without_percent = "!#$&'()*+,/:;=?@[]~" + try: + # Unquote only the unreserved characters + # Then quote only illegal characters (do not quote reserved, + # unreserved, or '%') + return quote(unquote_unreserved(uri), safe=safe_with_percent) + except InvalidURL: + # We couldn't unquote the given URI, so let's try quoting it, but + # there may be unquoted '%'s in the URI. We need to make sure they're + # properly quoted so they do not cause issues elsewhere. + return quote(uri, safe=safe_without_percent) + + +def address_in_network(ip, net): + """This function allows you to check if an IP belongs to a network subnet + + Example: returns True if ip = 192.168.1.1 and net = 192.168.1.0/24 + returns False if ip = 192.168.1.1 and net = 192.168.100.0/24 + + :rtype: bool + """ + ipaddr = struct.unpack("=L", socket.inet_aton(ip))[0] + netaddr, bits = net.split("/") + netmask = struct.unpack("=L", socket.inet_aton(dotted_netmask(int(bits))))[0] + network = struct.unpack("=L", socket.inet_aton(netaddr))[0] & netmask + return (ipaddr & netmask) == (network & netmask) + + +def dotted_netmask(mask): + """Converts mask from /xx format to xxx.xxx.xxx.xxx + + Example: if mask is 24 function returns 255.255.255.0 + + :rtype: str + """ + bits = 0xFFFFFFFF ^ (1 << 32 - mask) - 1 + return socket.inet_ntoa(struct.pack(">I", bits)) + + +def is_ipv4_address(string_ip): + """ + :rtype: bool + """ + try: + socket.inet_aton(string_ip) + except OSError: + return False + return True + + +def is_valid_cidr(string_network): + """ + Very simple check of the cidr format in no_proxy variable. + + :rtype: bool + """ + if string_network.count("/") == 1: + try: + mask = int(string_network.split("/")[1]) + except ValueError: + return False + + if mask < 1 or mask > 32: + return False + + try: + socket.inet_aton(string_network.split("/")[0]) + except OSError: + return False + else: + return False + return True + + +@contextlib.contextmanager +def set_environ(env_name, value): + """Set the environment variable 'env_name' to 'value' + + Save previous value, yield, and then restore the previous value stored in + the environment variable 'env_name'. + + If 'value' is None, do nothing""" + value_changed = value is not None + if value_changed: + old_value = os.environ.get(env_name) + os.environ[env_name] = value + try: + yield + finally: + if value_changed: + if old_value is None: + del os.environ[env_name] + else: + os.environ[env_name] = old_value + + +def should_bypass_proxies(url, no_proxy): + """ + Returns whether we should bypass proxies or not. + + :rtype: bool + """ + + # Prioritize lowercase environment variables over uppercase + # to keep a consistent behaviour with other http projects (curl, wget). + def get_proxy(key): + return os.environ.get(key) or os.environ.get(key.upper()) + + # First check whether no_proxy is defined. If it is, check that the URL + # we're getting isn't in the no_proxy list. + no_proxy_arg = no_proxy + if no_proxy is None: + no_proxy = get_proxy("no_proxy") + parsed = urlparse(url) + + if parsed.hostname is None: + # URLs don't always have hostnames, e.g. file:/// urls. + return True + + if no_proxy: + # We need to check whether we match here. We need to see if we match + # the end of the hostname, both with and without the port. + no_proxy = (host for host in no_proxy.replace(" ", "").split(",") if host) + + if is_ipv4_address(parsed.hostname): + for proxy_ip in no_proxy: + if is_valid_cidr(proxy_ip): + if address_in_network(parsed.hostname, proxy_ip): + return True + elif parsed.hostname == proxy_ip: + # If no_proxy ip was defined in plain IP notation instead of cidr notation & + # matches the IP of the index + return True + else: + host_with_port = parsed.hostname + if parsed.port: + host_with_port += f":{parsed.port}" + + for host in no_proxy: + if parsed.hostname.endswith(host) or host_with_port.endswith(host): + # The URL does match something in no_proxy, so we don't want + # to apply the proxies on this URL. + return True + + with set_environ("no_proxy", no_proxy_arg): + # parsed.hostname can be `None` in cases such as a file URI. + try: + bypass = proxy_bypass(parsed.hostname) + except (TypeError, socket.gaierror): + bypass = False + + if bypass: + return True + + return False + + +def get_environ_proxies(url, no_proxy=None): + """ + Return a dict of environment proxies. + + :rtype: dict + """ + if should_bypass_proxies(url, no_proxy=no_proxy): + return {} + else: + return getproxies() + + +def select_proxy(url, proxies): + """Select a proxy for the url, if applicable. + + :param url: The url being for the request + :param proxies: A dictionary of schemes or schemes and hosts to proxy URLs + """ + proxies = proxies or {} + urlparts = urlparse(url) + if urlparts.hostname is None: + return proxies.get(urlparts.scheme, proxies.get("all")) + + proxy_keys = [ + urlparts.scheme + "://" + urlparts.hostname, + urlparts.scheme, + "all://" + urlparts.hostname, + "all", + ] + proxy = None + for proxy_key in proxy_keys: + if proxy_key in proxies: + proxy = proxies[proxy_key] + break + + return proxy + + +def resolve_proxies(request, proxies, trust_env=True): + """This method takes proxy information from a request and configuration + input to resolve a mapping of target proxies. This will consider settings + such as NO_PROXY to strip proxy configurations. + + :param request: Request or PreparedRequest + :param proxies: A dictionary of schemes or schemes and hosts to proxy URLs + :param trust_env: Boolean declaring whether to trust environment configs + + :rtype: dict + """ + proxies = proxies if proxies is not None else {} + url = request.url + scheme = urlparse(url).scheme + no_proxy = proxies.get("no_proxy") + new_proxies = proxies.copy() + + if trust_env and not should_bypass_proxies(url, no_proxy=no_proxy): + environ_proxies = get_environ_proxies(url, no_proxy=no_proxy) + + proxy = environ_proxies.get(scheme, environ_proxies.get("all")) + + if proxy: + new_proxies.setdefault(scheme, proxy) + return new_proxies + + +def default_user_agent(name="python-requests"): + """ + Return a string representing the default user agent. + + :rtype: str + """ + return f"{name}/{__version__}" + + +def default_headers(): + """ + :rtype: requests.structures.CaseInsensitiveDict + """ + return CaseInsensitiveDict( + { + "User-Agent": default_user_agent(), + "Accept-Encoding": DEFAULT_ACCEPT_ENCODING, + "Accept": "*/*", + "Connection": "keep-alive", + } + ) + + +def parse_header_links(value): + """Return a list of parsed link headers proxies. + + i.e. Link: ; rel=front; type="image/jpeg",; rel=back;type="image/jpeg" + + :rtype: list + """ + + links = [] + + replace_chars = " '\"" + + value = value.strip(replace_chars) + if not value: + return links + + for val in re.split(", *<", value): + try: + url, params = val.split(";", 1) + except ValueError: + url, params = val, "" + + link = {"url": url.strip("<> '\"")} + + for param in params.split(";"): + try: + key, value = param.split("=") + except ValueError: + break + + link[key.strip(replace_chars)] = value.strip(replace_chars) + + links.append(link) + + return links + + +# Null bytes; no need to recreate these on each call to guess_json_utf +_null = "\x00".encode("ascii") # encoding to ASCII for Python 3 +_null2 = _null * 2 +_null3 = _null * 3 + + +def guess_json_utf(data): + """ + :rtype: str + """ + # JSON always starts with two ASCII characters, so detection is as + # easy as counting the nulls and from their location and count + # determine the encoding. Also detect a BOM, if present. + sample = data[:4] + if sample in (codecs.BOM_UTF32_LE, codecs.BOM_UTF32_BE): + return "utf-32" # BOM included + if sample[:3] == codecs.BOM_UTF8: + return "utf-8-sig" # BOM included, MS style (discouraged) + if sample[:2] in (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE): + return "utf-16" # BOM included + nullcount = sample.count(_null) + if nullcount == 0: + return "utf-8" + if nullcount == 2: + if sample[::2] == _null2: # 1st and 3rd are null + return "utf-16-be" + if sample[1::2] == _null2: # 2nd and 4th are null + return "utf-16-le" + # Did not detect 2 valid UTF-16 ascii-range characters + if nullcount == 3: + if sample[:3] == _null3: + return "utf-32-be" + if sample[1:] == _null3: + return "utf-32-le" + # Did not detect a valid UTF-32 ascii-range character + return None + + +def prepend_scheme_if_needed(url, new_scheme): + """Given a URL that may or may not have a scheme, prepend the given scheme. + Does not replace a present scheme with the one provided as an argument. + + :rtype: str + """ + parsed = parse_url(url) + scheme, auth, host, port, path, query, fragment = parsed + + # A defect in urlparse determines that there isn't a netloc present in some + # urls. We previously assumed parsing was overly cautious, and swapped the + # netloc and path. Due to a lack of tests on the original defect, this is + # maintained with parse_url for backwards compatibility. + netloc = parsed.netloc + if not netloc: + netloc, path = path, netloc + + if auth: + # parse_url doesn't provide the netloc with auth + # so we'll add it ourselves. + netloc = "@".join([auth, netloc]) + if scheme is None: + scheme = new_scheme + if path is None: + path = "" + + return urlunparse((scheme, netloc, path, "", query, fragment)) + + +def get_auth_from_url(url): + """Given a url with authentication components, extract them into a tuple of + username,password. + + :rtype: (str,str) + """ + parsed = urlparse(url) + + try: + auth = (unquote(parsed.username), unquote(parsed.password)) + except (AttributeError, TypeError): + auth = ("", "") + + return auth + + +def check_header_validity(header): + """Verifies that header parts don't contain leading whitespace + reserved characters, or return characters. + + :param header: tuple, in the format (name, value). + """ + name, value = header + _validate_header_part(header, name, 0) + _validate_header_part(header, value, 1) + + +def _validate_header_part(header, header_part, header_validator_index): + if isinstance(header_part, str): + validator = _HEADER_VALIDATORS_STR[header_validator_index] + elif isinstance(header_part, bytes): + validator = _HEADER_VALIDATORS_BYTE[header_validator_index] + else: + raise InvalidHeader( + f"Header part ({header_part!r}) from {header} " + f"must be of type str or bytes, not {type(header_part)}" + ) + + if not validator.match(header_part): + header_kind = "name" if header_validator_index == 0 else "value" + raise InvalidHeader( + f"Invalid leading whitespace, reserved character(s), or return " + f"character(s) in header {header_kind}: {header_part!r}" + ) + + +def urldefragauth(url): + """ + Given a url remove the fragment and the authentication part. + + :rtype: str + """ + scheme, netloc, path, params, query, fragment = urlparse(url) + + # see func:`prepend_scheme_if_needed` + if not netloc: + netloc, path = path, netloc + + netloc = netloc.rsplit("@", 1)[-1] + + return urlunparse((scheme, netloc, path, params, query, "")) + + +def rewind_body(prepared_request): + """Move file pointer back to its recorded starting position + so it can be read again on redirect. + """ + body_seek = getattr(prepared_request.body, "seek", None) + if body_seek is not None and isinstance( + prepared_request._body_position, integer_types + ): + try: + body_seek(prepared_request._body_position) + except OSError: + raise UnrewindableBodyError( + "An error occurred when rewinding request body for redirect." + ) + else: + raise UnrewindableBodyError("Unable to rewind request body for redirect.") diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/resolvelib/__init__.py b/.venv/lib/python3.12/site-packages/pip/_vendor/resolvelib/__init__.py new file mode 100644 index 0000000..41537e3 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/resolvelib/__init__.py @@ -0,0 +1,27 @@ +__all__ = [ + "AbstractProvider", + "AbstractResolver", + "BaseReporter", + "InconsistentCandidate", + "RequirementsConflicted", + "ResolutionError", + "ResolutionImpossible", + "ResolutionTooDeep", + "Resolver", + "__version__", +] + +__version__ = "1.2.0" + + +from .providers import AbstractProvider +from .reporters import BaseReporter +from .resolvers import ( + AbstractResolver, + InconsistentCandidate, + RequirementsConflicted, + ResolutionError, + ResolutionImpossible, + ResolutionTooDeep, + Resolver, +) diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/resolvelib/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/resolvelib/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..0e4dd17 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/resolvelib/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/resolvelib/__pycache__/providers.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/resolvelib/__pycache__/providers.cpython-312.pyc new file mode 100644 index 0000000..ac1c166 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/resolvelib/__pycache__/providers.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/resolvelib/__pycache__/reporters.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/resolvelib/__pycache__/reporters.cpython-312.pyc new file mode 100644 index 0000000..484ca4d Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/resolvelib/__pycache__/reporters.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/resolvelib/__pycache__/structs.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/resolvelib/__pycache__/structs.cpython-312.pyc new file mode 100644 index 0000000..7b16de7 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/resolvelib/__pycache__/structs.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/resolvelib/providers.py b/.venv/lib/python3.12/site-packages/pip/_vendor/resolvelib/providers.py new file mode 100644 index 0000000..524e3d8 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/resolvelib/providers.py @@ -0,0 +1,196 @@ +from __future__ import annotations + +from typing import ( + TYPE_CHECKING, + Generic, + Iterable, + Iterator, + Mapping, + Sequence, +) + +from .structs import CT, KT, RT, Matches, RequirementInformation + +if TYPE_CHECKING: + from typing import Any, Protocol + + class Preference(Protocol): + def __lt__(self, __other: Any) -> bool: ... + + +class AbstractProvider(Generic[RT, CT, KT]): + """Delegate class to provide the required interface for the resolver.""" + + def identify(self, requirement_or_candidate: RT | CT) -> KT: + """Given a requirement or candidate, return an identifier for it. + + This is used to identify, e.g. whether two requirements + should have their specifier parts merged or a candidate matches a + requirement via ``find_matches()``. + """ + raise NotImplementedError + + def get_preference( + self, + identifier: KT, + resolutions: Mapping[KT, CT], + candidates: Mapping[KT, Iterator[CT]], + information: Mapping[KT, Iterator[RequirementInformation[RT, CT]]], + backtrack_causes: Sequence[RequirementInformation[RT, CT]], + ) -> Preference: + """Produce a sort key for given requirement based on preference. + + As this is a sort key it will be called O(n) times per backtrack + step, where n is the number of `identifier`s, if you have a check + which is expensive in some sense. E.g. It needs to make O(n) checks + per call or takes significant wall clock time, consider using + `narrow_requirement_selection` to filter the `identifier`s, which + is applied before this sort key is called. + + The preference is defined as "I think this requirement should be + resolved first". The lower the return value is, the more preferred + this group of arguments is. + + :param identifier: An identifier as returned by ``identify()``. This + identifies the requirement being considered. + :param resolutions: Mapping of candidates currently pinned by the + resolver. Each key is an identifier, and the value is a candidate. + The candidate may conflict with requirements from ``information``. + :param candidates: Mapping of each dependency's possible candidates. + Each value is an iterator of candidates. + :param information: Mapping of requirement information of each package. + Each value is an iterator of *requirement information*. + :param backtrack_causes: Sequence of *requirement information* that are + the requirements that caused the resolver to most recently + backtrack. + + A *requirement information* instance is a named tuple with two members: + + * ``requirement`` specifies a requirement contributing to the current + list of candidates. + * ``parent`` specifies the candidate that provides (depended on) the + requirement, or ``None`` to indicate a root requirement. + + The preference could depend on various issues, including (not + necessarily in this order): + + * Is this package pinned in the current resolution result? + * How relaxed is the requirement? Stricter ones should probably be + worked on first? (I don't know, actually.) + * How many possibilities are there to satisfy this requirement? Those + with few left should likely be worked on first, I guess? + * Are there any known conflicts for this requirement? We should + probably work on those with the most known conflicts. + + A sortable value should be returned (this will be used as the ``key`` + parameter of the built-in sorting function). The smaller the value is, + the more preferred this requirement is (i.e. the sorting function + is called with ``reverse=False``). + """ + raise NotImplementedError + + def find_matches( + self, + identifier: KT, + requirements: Mapping[KT, Iterator[RT]], + incompatibilities: Mapping[KT, Iterator[CT]], + ) -> Matches[CT]: + """Find all possible candidates that satisfy the given constraints. + + :param identifier: An identifier as returned by ``identify()``. All + candidates returned by this method should produce the same + identifier. + :param requirements: A mapping of requirements that all returned + candidates must satisfy. Each key is an identifier, and the value + an iterator of requirements for that dependency. + :param incompatibilities: A mapping of known incompatibile candidates of + each dependency. Each key is an identifier, and the value an + iterator of incompatibilities known to the resolver. All + incompatibilities *must* be excluded from the return value. + + This should try to get candidates based on the requirements' types. + For VCS, local, and archive requirements, the one-and-only match is + returned, and for a "named" requirement, the index(es) should be + consulted to find concrete candidates for this requirement. + + The return value should produce candidates ordered by preference; the + most preferred candidate should come first. The return type may be one + of the following: + + * A callable that returns an iterator that yields candidates. + * An collection of candidates. + * An iterable of candidates. This will be consumed immediately into a + list of candidates. + """ + raise NotImplementedError + + def is_satisfied_by(self, requirement: RT, candidate: CT) -> bool: + """Whether the given requirement can be satisfied by a candidate. + + The candidate is guaranteed to have been generated from the + requirement. + + A boolean should be returned to indicate whether ``candidate`` is a + viable solution to the requirement. + """ + raise NotImplementedError + + def get_dependencies(self, candidate: CT) -> Iterable[RT]: + """Get dependencies of a candidate. + + This should return a collection of requirements that `candidate` + specifies as its dependencies. + """ + raise NotImplementedError + + def narrow_requirement_selection( + self, + identifiers: Iterable[KT], + resolutions: Mapping[KT, CT], + candidates: Mapping[KT, Iterator[CT]], + information: Mapping[KT, Iterator[RequirementInformation[RT, CT]]], + backtrack_causes: Sequence[RequirementInformation[RT, CT]], + ) -> Iterable[KT]: + """ + An optional method to narrow the selection of requirements being + considered during resolution. This method is called O(1) time per + backtrack step. + + :param identifiers: An iterable of `identifiers` as returned by + ``identify()``. These identify all requirements currently being + considered. + :param resolutions: A mapping of candidates currently pinned by the + resolver. Each key is an identifier, and the value is a candidate + that may conflict with requirements from ``information``. + :param candidates: A mapping of each dependency's possible candidates. + Each value is an iterator of candidates. + :param information: A mapping of requirement information for each package. + Each value is an iterator of *requirement information*. + :param backtrack_causes: A sequence of *requirement information* that are + the requirements causing the resolver to most recently + backtrack. + + A *requirement information* instance is a named tuple with two members: + + * ``requirement`` specifies a requirement contributing to the current + list of candidates. + * ``parent`` specifies the candidate that provides (is depended on for) + the requirement, or ``None`` to indicate a root requirement. + + Must return a non-empty subset of `identifiers`, with the default + implementation being to return `identifiers` unchanged. Those `identifiers` + will then be passed to the sort key `get_preference` to pick the most + prefered requirement to attempt to pin, unless `narrow_requirement_selection` + returns only 1 requirement, in which case that will be used without + calling the sort key `get_preference`. + + This method is designed to be used by the provider to optimize the + dependency resolution, e.g. if a check cost is O(m) and it can be done + against all identifiers at once then filtering the requirement selection + here will cost O(m) but making it part of the sort key in `get_preference` + will cost O(m*n), where n is the number of `identifiers`. + + Returns: + Iterable[KT]: A non-empty subset of `identifiers`. + """ + return identifiers diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/resolvelib/py.typed b/.venv/lib/python3.12/site-packages/pip/_vendor/resolvelib/py.typed new file mode 100644 index 0000000..e69de29 diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/resolvelib/reporters.py b/.venv/lib/python3.12/site-packages/pip/_vendor/resolvelib/reporters.py new file mode 100644 index 0000000..6c14220 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/resolvelib/reporters.py @@ -0,0 +1,55 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING, Collection, Generic + +from .structs import CT, KT, RT, RequirementInformation, State + +if TYPE_CHECKING: + from .resolvers import Criterion + + +class BaseReporter(Generic[RT, CT, KT]): + """Delegate class to provide progress reporting for the resolver.""" + + def starting(self) -> None: + """Called before the resolution actually starts.""" + + def starting_round(self, index: int) -> None: + """Called before each round of resolution starts. + + The index is zero-based. + """ + + def ending_round(self, index: int, state: State[RT, CT, KT]) -> None: + """Called before each round of resolution ends. + + This is NOT called if the resolution ends at this round. Use `ending` + if you want to report finalization. The index is zero-based. + """ + + def ending(self, state: State[RT, CT, KT]) -> None: + """Called before the resolution ends successfully.""" + + def adding_requirement(self, requirement: RT, parent: CT | None) -> None: + """Called when adding a new requirement into the resolve criteria. + + :param requirement: The additional requirement to be applied to filter + the available candidaites. + :param parent: The candidate that requires ``requirement`` as a + dependency, or None if ``requirement`` is one of the root + requirements passed in from ``Resolver.resolve()``. + """ + + def resolving_conflicts( + self, causes: Collection[RequirementInformation[RT, CT]] + ) -> None: + """Called when starting to attempt requirement conflict resolution. + + :param causes: The information on the collision that caused the backtracking. + """ + + def rejecting_candidate(self, criterion: Criterion[RT, CT], candidate: CT) -> None: + """Called when rejecting a candidate during backtracking.""" + + def pinning(self, candidate: CT) -> None: + """Called when adding a candidate to the potential solution.""" diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/resolvelib/resolvers/__init__.py b/.venv/lib/python3.12/site-packages/pip/_vendor/resolvelib/resolvers/__init__.py new file mode 100644 index 0000000..b249221 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/resolvelib/resolvers/__init__.py @@ -0,0 +1,27 @@ +from ..structs import RequirementInformation +from .abstract import AbstractResolver, Result +from .criterion import Criterion +from .exceptions import ( + InconsistentCandidate, + RequirementsConflicted, + ResolutionError, + ResolutionImpossible, + ResolutionTooDeep, + ResolverException, +) +from .resolution import Resolution, Resolver + +__all__ = [ + "AbstractResolver", + "Criterion", + "InconsistentCandidate", + "RequirementInformation", + "RequirementsConflicted", + "Resolution", + "ResolutionError", + "ResolutionImpossible", + "ResolutionTooDeep", + "Resolver", + "ResolverException", + "Result", +] diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/resolvelib/resolvers/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/resolvelib/resolvers/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..9401280 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/resolvelib/resolvers/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/resolvelib/resolvers/__pycache__/abstract.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/resolvelib/resolvers/__pycache__/abstract.cpython-312.pyc new file mode 100644 index 0000000..b6e0cbc Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/resolvelib/resolvers/__pycache__/abstract.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/resolvelib/resolvers/__pycache__/criterion.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/resolvelib/resolvers/__pycache__/criterion.cpython-312.pyc new file mode 100644 index 0000000..2f17d80 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/resolvelib/resolvers/__pycache__/criterion.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/resolvelib/resolvers/__pycache__/exceptions.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/resolvelib/resolvers/__pycache__/exceptions.cpython-312.pyc new file mode 100644 index 0000000..7e10f00 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/resolvelib/resolvers/__pycache__/exceptions.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/resolvelib/resolvers/__pycache__/resolution.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/resolvelib/resolvers/__pycache__/resolution.cpython-312.pyc new file mode 100644 index 0000000..3e7c9b4 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/resolvelib/resolvers/__pycache__/resolution.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/resolvelib/resolvers/abstract.py b/.venv/lib/python3.12/site-packages/pip/_vendor/resolvelib/resolvers/abstract.py new file mode 100644 index 0000000..f9b5a7a --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/resolvelib/resolvers/abstract.py @@ -0,0 +1,47 @@ +from __future__ import annotations + +import collections +from typing import TYPE_CHECKING, Any, Generic, Iterable, Mapping, NamedTuple + +from ..structs import CT, KT, RT, DirectedGraph + +if TYPE_CHECKING: + from ..providers import AbstractProvider + from ..reporters import BaseReporter + from .criterion import Criterion + + class Result(NamedTuple, Generic[RT, CT, KT]): + mapping: Mapping[KT, CT] + graph: DirectedGraph[KT | None] + criteria: Mapping[KT, Criterion[RT, CT]] + +else: + Result = collections.namedtuple("Result", ["mapping", "graph", "criteria"]) + + +class AbstractResolver(Generic[RT, CT, KT]): + """The thing that performs the actual resolution work.""" + + base_exception = Exception + + def __init__( + self, + provider: AbstractProvider[RT, CT, KT], + reporter: BaseReporter[RT, CT, KT], + ) -> None: + self.provider = provider + self.reporter = reporter + + def resolve(self, requirements: Iterable[RT], **kwargs: Any) -> Result[RT, CT, KT]: + """Take a collection of constraints, spit out the resolution result. + + This returns a representation of the final resolution state, with one + guarenteed attribute ``mapping`` that contains resolved candidates as + values. The keys are their respective identifiers. + + :param requirements: A collection of constraints. + :param kwargs: Additional keyword arguments that subclasses may accept. + + :raises: ``self.base_exception`` or its subclass. + """ + raise NotImplementedError diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/resolvelib/resolvers/criterion.py b/.venv/lib/python3.12/site-packages/pip/_vendor/resolvelib/resolvers/criterion.py new file mode 100644 index 0000000..ee5019c --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/resolvelib/resolvers/criterion.py @@ -0,0 +1,48 @@ +from __future__ import annotations + +from typing import Collection, Generic, Iterable, Iterator + +from ..structs import CT, RT, RequirementInformation + + +class Criterion(Generic[RT, CT]): + """Representation of possible resolution results of a package. + + This holds three attributes: + + * `information` is a collection of `RequirementInformation` pairs. + Each pair is a requirement contributing to this criterion, and the + candidate that provides the requirement. + * `incompatibilities` is a collection of all known not-to-work candidates + to exclude from consideration. + * `candidates` is a collection containing all possible candidates deducted + from the union of contributing requirements and known incompatibilities. + It should never be empty, except when the criterion is an attribute of a + raised `RequirementsConflicted` (in which case it is always empty). + + .. note:: + This class is intended to be externally immutable. **Do not** mutate + any of its attribute containers. + """ + + def __init__( + self, + candidates: Iterable[CT], + information: Collection[RequirementInformation[RT, CT]], + incompatibilities: Collection[CT], + ) -> None: + self.candidates = candidates + self.information = information + self.incompatibilities = incompatibilities + + def __repr__(self) -> str: + requirements = ", ".join( + f"({req!r}, via={parent!r})" for req, parent in self.information + ) + return f"Criterion({requirements})" + + def iter_requirement(self) -> Iterator[RT]: + return (i.requirement for i in self.information) + + def iter_parent(self) -> Iterator[CT | None]: + return (i.parent for i in self.information) diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/resolvelib/resolvers/exceptions.py b/.venv/lib/python3.12/site-packages/pip/_vendor/resolvelib/resolvers/exceptions.py new file mode 100644 index 0000000..35e2755 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/resolvelib/resolvers/exceptions.py @@ -0,0 +1,57 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING, Collection, Generic + +from ..structs import CT, RT, RequirementInformation + +if TYPE_CHECKING: + from .criterion import Criterion + + +class ResolverException(Exception): + """A base class for all exceptions raised by this module. + + Exceptions derived by this class should all be handled in this module. Any + bubbling pass the resolver should be treated as a bug. + """ + + +class RequirementsConflicted(ResolverException, Generic[RT, CT]): + def __init__(self, criterion: Criterion[RT, CT]) -> None: + super().__init__(criterion) + self.criterion = criterion + + def __str__(self) -> str: + return "Requirements conflict: {}".format( + ", ".join(repr(r) for r in self.criterion.iter_requirement()), + ) + + +class InconsistentCandidate(ResolverException, Generic[RT, CT]): + def __init__(self, candidate: CT, criterion: Criterion[RT, CT]): + super().__init__(candidate, criterion) + self.candidate = candidate + self.criterion = criterion + + def __str__(self) -> str: + return "Provided candidate {!r} does not satisfy {}".format( + self.candidate, + ", ".join(repr(r) for r in self.criterion.iter_requirement()), + ) + + +class ResolutionError(ResolverException): + pass + + +class ResolutionImpossible(ResolutionError, Generic[RT, CT]): + def __init__(self, causes: Collection[RequirementInformation[RT, CT]]): + super().__init__(causes) + # causes is a list of RequirementInformation objects + self.causes = causes + + +class ResolutionTooDeep(ResolutionError): + def __init__(self, round_count: int) -> None: + super().__init__(round_count) + self.round_count = round_count diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/resolvelib/resolvers/resolution.py b/.venv/lib/python3.12/site-packages/pip/_vendor/resolvelib/resolvers/resolution.py new file mode 100644 index 0000000..f55ac7a --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/resolvelib/resolvers/resolution.py @@ -0,0 +1,622 @@ +from __future__ import annotations + +import collections +import itertools +import operator +from typing import TYPE_CHECKING, Generic + +from ..structs import ( + CT, + KT, + RT, + DirectedGraph, + IterableView, + IteratorMapping, + RequirementInformation, + State, + build_iter_view, +) +from .abstract import AbstractResolver, Result +from .criterion import Criterion +from .exceptions import ( + InconsistentCandidate, + RequirementsConflicted, + ResolutionImpossible, + ResolutionTooDeep, + ResolverException, +) + +if TYPE_CHECKING: + from collections.abc import Collection, Iterable, Mapping + + from ..providers import AbstractProvider, Preference + from ..reporters import BaseReporter + +_OPTIMISTIC_BACKJUMPING_RATIO: float = 0.1 + + +def _build_result(state: State[RT, CT, KT]) -> Result[RT, CT, KT]: + mapping = state.mapping + all_keys: dict[int, KT | None] = {id(v): k for k, v in mapping.items()} + all_keys[id(None)] = None + + graph: DirectedGraph[KT | None] = DirectedGraph() + graph.add(None) # Sentinel as root dependencies' parent. + + connected: set[KT | None] = {None} + for key, criterion in state.criteria.items(): + if not _has_route_to_root(state.criteria, key, all_keys, connected): + continue + if key not in graph: + graph.add(key) + for p in criterion.iter_parent(): + try: + pkey = all_keys[id(p)] + except KeyError: + continue + if pkey not in graph: + graph.add(pkey) + graph.connect(pkey, key) + + return Result( + mapping={k: v for k, v in mapping.items() if k in connected}, + graph=graph, + criteria=state.criteria, + ) + + +class Resolution(Generic[RT, CT, KT]): + """Stateful resolution object. + + This is designed as a one-off object that holds information to kick start + the resolution process, and holds the results afterwards. + """ + + def __init__( + self, + provider: AbstractProvider[RT, CT, KT], + reporter: BaseReporter[RT, CT, KT], + ) -> None: + self._p = provider + self._r = reporter + self._states: list[State[RT, CT, KT]] = [] + + # Optimistic backjumping variables + self._optimistic_backjumping_ratio = _OPTIMISTIC_BACKJUMPING_RATIO + self._save_states: list[State[RT, CT, KT]] | None = None + self._optimistic_start_round: int | None = None + + @property + def state(self) -> State[RT, CT, KT]: + try: + return self._states[-1] + except IndexError as e: + raise AttributeError("state") from e + + def _push_new_state(self) -> None: + """Push a new state into history. + + This new state will be used to hold resolution results of the next + coming round. + """ + base = self._states[-1] + state = State( + mapping=base.mapping.copy(), + criteria=base.criteria.copy(), + backtrack_causes=base.backtrack_causes[:], + ) + self._states.append(state) + + def _add_to_criteria( + self, + criteria: dict[KT, Criterion[RT, CT]], + requirement: RT, + parent: CT | None, + ) -> None: + self._r.adding_requirement(requirement=requirement, parent=parent) + + identifier = self._p.identify(requirement_or_candidate=requirement) + criterion = criteria.get(identifier) + if criterion: + incompatibilities = list(criterion.incompatibilities) + else: + incompatibilities = [] + + matches = self._p.find_matches( + identifier=identifier, + requirements=IteratorMapping( + criteria, + operator.methodcaller("iter_requirement"), + {identifier: [requirement]}, + ), + incompatibilities=IteratorMapping( + criteria, + operator.attrgetter("incompatibilities"), + {identifier: incompatibilities}, + ), + ) + + if criterion: + information = list(criterion.information) + information.append(RequirementInformation(requirement, parent)) + else: + information = [RequirementInformation(requirement, parent)] + + criterion = Criterion( + candidates=build_iter_view(matches), + information=information, + incompatibilities=incompatibilities, + ) + if not criterion.candidates: + raise RequirementsConflicted(criterion) + criteria[identifier] = criterion + + def _remove_information_from_criteria( + self, criteria: dict[KT, Criterion[RT, CT]], parents: Collection[KT] + ) -> None: + """Remove information from parents of criteria. + + Concretely, removes all values from each criterion's ``information`` + field that have one of ``parents`` as provider of the requirement. + + :param criteria: The criteria to update. + :param parents: Identifiers for which to remove information from all criteria. + """ + if not parents: + return + for key, criterion in criteria.items(): + criteria[key] = Criterion( + criterion.candidates, + [ + information + for information in criterion.information + if ( + information.parent is None + or self._p.identify(information.parent) not in parents + ) + ], + criterion.incompatibilities, + ) + + def _get_preference(self, name: KT) -> Preference: + return self._p.get_preference( + identifier=name, + resolutions=self.state.mapping, + candidates=IteratorMapping( + self.state.criteria, + operator.attrgetter("candidates"), + ), + information=IteratorMapping( + self.state.criteria, + operator.attrgetter("information"), + ), + backtrack_causes=self.state.backtrack_causes, + ) + + def _is_current_pin_satisfying( + self, name: KT, criterion: Criterion[RT, CT] + ) -> bool: + try: + current_pin = self.state.mapping[name] + except KeyError: + return False + return all( + self._p.is_satisfied_by(requirement=r, candidate=current_pin) + for r in criterion.iter_requirement() + ) + + def _get_updated_criteria(self, candidate: CT) -> dict[KT, Criterion[RT, CT]]: + criteria = self.state.criteria.copy() + for requirement in self._p.get_dependencies(candidate=candidate): + self._add_to_criteria(criteria, requirement, parent=candidate) + return criteria + + def _attempt_to_pin_criterion(self, name: KT) -> list[Criterion[RT, CT]]: + criterion = self.state.criteria[name] + + causes: list[Criterion[RT, CT]] = [] + for candidate in criterion.candidates: + try: + criteria = self._get_updated_criteria(candidate) + except RequirementsConflicted as e: + self._r.rejecting_candidate(e.criterion, candidate) + causes.append(e.criterion) + continue + + # Check the newly-pinned candidate actually works. This should + # always pass under normal circumstances, but in the case of a + # faulty provider, we will raise an error to notify the implementer + # to fix find_matches() and/or is_satisfied_by(). + satisfied = all( + self._p.is_satisfied_by(requirement=r, candidate=candidate) + for r in criterion.iter_requirement() + ) + if not satisfied: + raise InconsistentCandidate(candidate, criterion) + + self._r.pinning(candidate=candidate) + self.state.criteria.update(criteria) + + # Put newly-pinned candidate at the end. This is essential because + # backtracking looks at this mapping to get the last pin. + self.state.mapping.pop(name, None) + self.state.mapping[name] = candidate + + return [] + + # All candidates tried, nothing works. This criterion is a dead + # end, signal for backtracking. + return causes + + def _patch_criteria( + self, incompatibilities_from_broken: list[tuple[KT, list[CT]]] + ) -> bool: + # Create a new state from the last known-to-work one, and apply + # the previously gathered incompatibility information. + for k, incompatibilities in incompatibilities_from_broken: + if not incompatibilities: + continue + try: + criterion = self.state.criteria[k] + except KeyError: + continue + matches = self._p.find_matches( + identifier=k, + requirements=IteratorMapping( + self.state.criteria, + operator.methodcaller("iter_requirement"), + ), + incompatibilities=IteratorMapping( + self.state.criteria, + operator.attrgetter("incompatibilities"), + {k: incompatibilities}, + ), + ) + candidates: IterableView[CT] = build_iter_view(matches) + if not candidates: + return False + incompatibilities.extend(criterion.incompatibilities) + self.state.criteria[k] = Criterion( + candidates=candidates, + information=list(criterion.information), + incompatibilities=incompatibilities, + ) + return True + + def _save_state(self) -> None: + """Save states for potential rollback if optimistic backjumping fails.""" + if self._save_states is None: + self._save_states = [ + State( + mapping=s.mapping.copy(), + criteria=s.criteria.copy(), + backtrack_causes=s.backtrack_causes[:], + ) + for s in self._states + ] + + def _rollback_states(self) -> None: + """Rollback states and disable optimistic backjumping.""" + self._optimistic_backjumping_ratio = 0.0 + if self._save_states: + self._states = self._save_states + self._save_states = None + + def _backjump(self, causes: list[RequirementInformation[RT, CT]]) -> bool: + """Perform backjumping. + + When we enter here, the stack is like this:: + + [ state Z ] + [ state Y ] + [ state X ] + .... earlier states are irrelevant. + + 1. No pins worked for Z, so it does not have a pin. + 2. We want to reset state Y to unpinned, and pin another candidate. + 3. State X holds what state Y was before the pin, but does not + have the incompatibility information gathered in state Y. + + Each iteration of the loop will: + + 1. Identify Z. The incompatibility is not always caused by the latest + state. For example, given three requirements A, B and C, with + dependencies A1, B1 and C1, where A1 and B1 are incompatible: the + last state might be related to C, so we want to discard the + previous state. + 2. Discard Z. + 3. Discard Y but remember its incompatibility information gathered + previously, and the failure we're dealing with right now. + 4. Push a new state Y' based on X, and apply the incompatibility + information from Y to Y'. + 5a. If this causes Y' to conflict, we need to backtrack again. Make Y' + the new Z and go back to step 2. + 5b. If the incompatibilities apply cleanly, end backtracking. + """ + incompatible_reqs: Iterable[CT | RT] = itertools.chain( + (c.parent for c in causes if c.parent is not None), + (c.requirement for c in causes), + ) + incompatible_deps = {self._p.identify(r) for r in incompatible_reqs} + while len(self._states) >= 3: + # Remove the state that triggered backtracking. + del self._states[-1] + + # Optimistically backtrack to a state that caused the incompatibility + broken_state = self.state + while True: + # Retrieve the last candidate pin and known incompatibilities. + try: + broken_state = self._states.pop() + name, candidate = broken_state.mapping.popitem() + except (IndexError, KeyError): + raise ResolutionImpossible(causes) from None + + if ( + not self._optimistic_backjumping_ratio + and name not in incompatible_deps + ): + # For safe backjumping only backjump if the current dependency + # is not the same as the incompatible dependency + break + + # On the first time a non-safe backjump is done the state + # is saved so we can restore it later if the resolution fails + if ( + self._optimistic_backjumping_ratio + and self._save_states is None + and name not in incompatible_deps + ): + self._save_state() + + # If the current dependencies and the incompatible dependencies + # are overlapping then we have likely found a cause of the + # incompatibility + current_dependencies = { + self._p.identify(d) for d in self._p.get_dependencies(candidate) + } + if not current_dependencies.isdisjoint(incompatible_deps): + break + + # Fallback: We should not backtrack to the point where + # broken_state.mapping is empty, so stop backtracking for + # a chance for the resolution to recover + if not broken_state.mapping: + break + + incompatibilities_from_broken = [ + (k, list(v.incompatibilities)) for k, v in broken_state.criteria.items() + ] + + # Also mark the newly known incompatibility. + incompatibilities_from_broken.append((name, [candidate])) + + self._push_new_state() + success = self._patch_criteria(incompatibilities_from_broken) + + # It works! Let's work on this new state. + if success: + return True + + # State does not work after applying known incompatibilities. + # Try the still previous state. + + # No way to backtrack anymore. + return False + + def _extract_causes( + self, criteron: list[Criterion[RT, CT]] + ) -> list[RequirementInformation[RT, CT]]: + """Extract causes from list of criterion and deduplicate""" + return list({id(i): i for c in criteron for i in c.information}.values()) + + def resolve(self, requirements: Iterable[RT], max_rounds: int) -> State[RT, CT, KT]: + if self._states: + raise RuntimeError("already resolved") + + self._r.starting() + + # Initialize the root state. + self._states = [ + State( + mapping=collections.OrderedDict(), + criteria={}, + backtrack_causes=[], + ) + ] + for r in requirements: + try: + self._add_to_criteria(self.state.criteria, r, parent=None) + except RequirementsConflicted as e: + raise ResolutionImpossible(e.criterion.information) from e + + # The root state is saved as a sentinel so the first ever pin can have + # something to backtrack to if it fails. The root state is basically + # pinning the virtual "root" package in the graph. + self._push_new_state() + + # Variables for optimistic backjumping + optimistic_rounds_cutoff: int | None = None + optimistic_backjumping_start_round: int | None = None + + for round_index in range(max_rounds): + self._r.starting_round(index=round_index) + + # Handle if optimistic backjumping has been running for too long + if self._optimistic_backjumping_ratio and self._save_states is not None: + if optimistic_backjumping_start_round is None: + optimistic_backjumping_start_round = round_index + optimistic_rounds_cutoff = int( + (max_rounds - round_index) * self._optimistic_backjumping_ratio + ) + + if optimistic_rounds_cutoff <= 0: + self._rollback_states() + continue + elif optimistic_rounds_cutoff is not None: + if ( + round_index - optimistic_backjumping_start_round + >= optimistic_rounds_cutoff + ): + self._rollback_states() + continue + + unsatisfied_names = [ + key + for key, criterion in self.state.criteria.items() + if not self._is_current_pin_satisfying(key, criterion) + ] + + # All criteria are accounted for. Nothing more to pin, we are done! + if not unsatisfied_names: + self._r.ending(state=self.state) + return self.state + + # keep track of satisfied names to calculate diff after pinning + satisfied_names = set(self.state.criteria.keys()) - set(unsatisfied_names) + + if len(unsatisfied_names) > 1: + narrowed_unstatisfied_names = list( + self._p.narrow_requirement_selection( + identifiers=unsatisfied_names, + resolutions=self.state.mapping, + candidates=IteratorMapping( + self.state.criteria, + operator.attrgetter("candidates"), + ), + information=IteratorMapping( + self.state.criteria, + operator.attrgetter("information"), + ), + backtrack_causes=self.state.backtrack_causes, + ) + ) + else: + narrowed_unstatisfied_names = unsatisfied_names + + # If there are no unsatisfied names use unsatisfied names + if not narrowed_unstatisfied_names: + raise RuntimeError("narrow_requirement_selection returned 0 names") + + # If there is only 1 unsatisfied name skip calling self._get_preference + if len(narrowed_unstatisfied_names) > 1: + # Choose the most preferred unpinned criterion to try. + name = min(narrowed_unstatisfied_names, key=self._get_preference) + else: + name = narrowed_unstatisfied_names[0] + + failure_criterion = self._attempt_to_pin_criterion(name) + + if failure_criterion: + causes = self._extract_causes(failure_criterion) + # Backjump if pinning fails. The backjump process puts us in + # an unpinned state, so we can work on it in the next round. + self._r.resolving_conflicts(causes=causes) + + try: + success = self._backjump(causes) + except ResolutionImpossible: + if self._optimistic_backjumping_ratio and self._save_states: + failed_optimistic_backjumping = True + else: + raise + else: + failed_optimistic_backjumping = bool( + not success + and self._optimistic_backjumping_ratio + and self._save_states + ) + + if failed_optimistic_backjumping and self._save_states: + self._rollback_states() + else: + self.state.backtrack_causes[:] = causes + + # Dead ends everywhere. Give up. + if not success: + raise ResolutionImpossible(self.state.backtrack_causes) + else: + # discard as information sources any invalidated names + # (unsatisfied names that were previously satisfied) + newly_unsatisfied_names = { + key + for key, criterion in self.state.criteria.items() + if key in satisfied_names + and not self._is_current_pin_satisfying(key, criterion) + } + self._remove_information_from_criteria( + self.state.criteria, newly_unsatisfied_names + ) + # Pinning was successful. Push a new state to do another pin. + self._push_new_state() + + self._r.ending_round(index=round_index, state=self.state) + + raise ResolutionTooDeep(max_rounds) + + +class Resolver(AbstractResolver[RT, CT, KT]): + """The thing that performs the actual resolution work.""" + + base_exception = ResolverException + + def resolve( # type: ignore[override] + self, + requirements: Iterable[RT], + max_rounds: int = 100, + ) -> Result[RT, CT, KT]: + """Take a collection of constraints, spit out the resolution result. + + The return value is a representation to the final resolution result. It + is a tuple subclass with three public members: + + * `mapping`: A dict of resolved candidates. Each key is an identifier + of a requirement (as returned by the provider's `identify` method), + and the value is the resolved candidate. + * `graph`: A `DirectedGraph` instance representing the dependency tree. + The vertices are keys of `mapping`, and each edge represents *why* + a particular package is included. A special vertex `None` is + included to represent parents of user-supplied requirements. + * `criteria`: A dict of "criteria" that hold detailed information on + how edges in the graph are derived. Each key is an identifier of a + requirement, and the value is a `Criterion` instance. + + The following exceptions may be raised if a resolution cannot be found: + + * `ResolutionImpossible`: A resolution cannot be found for the given + combination of requirements. The `causes` attribute of the + exception is a list of (requirement, parent), giving the + requirements that could not be satisfied. + * `ResolutionTooDeep`: The dependency tree is too deeply nested and + the resolver gave up. This is usually caused by a circular + dependency, but you can try to resolve this by increasing the + `max_rounds` argument. + """ + resolution = Resolution(self.provider, self.reporter) + state = resolution.resolve(requirements, max_rounds=max_rounds) + return _build_result(state) + + +def _has_route_to_root( + criteria: Mapping[KT, Criterion[RT, CT]], + key: KT | None, + all_keys: dict[int, KT | None], + connected: set[KT | None], +) -> bool: + if key in connected: + return True + if key not in criteria: + return False + assert key is not None + for p in criteria[key].iter_parent(): + try: + pkey = all_keys[id(p)] + except KeyError: + continue + if pkey in connected: + connected.add(key) + return True + if _has_route_to_root(criteria, pkey, all_keys, connected): + connected.add(key) + return True + return False diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/resolvelib/structs.py b/.venv/lib/python3.12/site-packages/pip/_vendor/resolvelib/structs.py new file mode 100644 index 0000000..18c74d4 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/resolvelib/structs.py @@ -0,0 +1,209 @@ +from __future__ import annotations + +import itertools +from collections import namedtuple +from typing import ( + TYPE_CHECKING, + Callable, + Generic, + Iterable, + Iterator, + Mapping, + NamedTuple, + Sequence, + TypeVar, + Union, +) + +KT = TypeVar("KT") # Identifier. +RT = TypeVar("RT") # Requirement. +CT = TypeVar("CT") # Candidate. + +Matches = Union[Iterable[CT], Callable[[], Iterable[CT]]] + +if TYPE_CHECKING: + from .resolvers.criterion import Criterion + + class RequirementInformation(NamedTuple, Generic[RT, CT]): + requirement: RT + parent: CT | None + + class State(NamedTuple, Generic[RT, CT, KT]): + """Resolution state in a round.""" + + mapping: dict[KT, CT] + criteria: dict[KT, Criterion[RT, CT]] + backtrack_causes: list[RequirementInformation[RT, CT]] + +else: + RequirementInformation = namedtuple( + "RequirementInformation", ["requirement", "parent"] + ) + State = namedtuple("State", ["mapping", "criteria", "backtrack_causes"]) + + +class DirectedGraph(Generic[KT]): + """A graph structure with directed edges.""" + + def __init__(self) -> None: + self._vertices: set[KT] = set() + self._forwards: dict[KT, set[KT]] = {} # -> Set[] + self._backwards: dict[KT, set[KT]] = {} # -> Set[] + + def __iter__(self) -> Iterator[KT]: + return iter(self._vertices) + + def __len__(self) -> int: + return len(self._vertices) + + def __contains__(self, key: KT) -> bool: + return key in self._vertices + + def copy(self) -> DirectedGraph[KT]: + """Return a shallow copy of this graph.""" + other = type(self)() + other._vertices = set(self._vertices) + other._forwards = {k: set(v) for k, v in self._forwards.items()} + other._backwards = {k: set(v) for k, v in self._backwards.items()} + return other + + def add(self, key: KT) -> None: + """Add a new vertex to the graph.""" + if key in self._vertices: + raise ValueError("vertex exists") + self._vertices.add(key) + self._forwards[key] = set() + self._backwards[key] = set() + + def remove(self, key: KT) -> None: + """Remove a vertex from the graph, disconnecting all edges from/to it.""" + self._vertices.remove(key) + for f in self._forwards.pop(key): + self._backwards[f].remove(key) + for t in self._backwards.pop(key): + self._forwards[t].remove(key) + + def connected(self, f: KT, t: KT) -> bool: + return f in self._backwards[t] and t in self._forwards[f] + + def connect(self, f: KT, t: KT) -> None: + """Connect two existing vertices. + + Nothing happens if the vertices are already connected. + """ + if t not in self._vertices: + raise KeyError(t) + self._forwards[f].add(t) + self._backwards[t].add(f) + + def iter_edges(self) -> Iterator[tuple[KT, KT]]: + for f, children in self._forwards.items(): + for t in children: + yield f, t + + def iter_children(self, key: KT) -> Iterator[KT]: + return iter(self._forwards[key]) + + def iter_parents(self, key: KT) -> Iterator[KT]: + return iter(self._backwards[key]) + + +class IteratorMapping(Mapping[KT, Iterator[CT]], Generic[RT, CT, KT]): + def __init__( + self, + mapping: Mapping[KT, RT], + accessor: Callable[[RT], Iterable[CT]], + appends: Mapping[KT, Iterable[CT]] | None = None, + ) -> None: + self._mapping = mapping + self._accessor = accessor + self._appends: Mapping[KT, Iterable[CT]] = appends or {} + + def __repr__(self) -> str: + return "IteratorMapping({!r}, {!r}, {!r})".format( + self._mapping, + self._accessor, + self._appends, + ) + + def __bool__(self) -> bool: + return bool(self._mapping or self._appends) + + def __contains__(self, key: object) -> bool: + return key in self._mapping or key in self._appends + + def __getitem__(self, k: KT) -> Iterator[CT]: + try: + v = self._mapping[k] + except KeyError: + return iter(self._appends[k]) + return itertools.chain(self._accessor(v), self._appends.get(k, ())) + + def __iter__(self) -> Iterator[KT]: + more = (k for k in self._appends if k not in self._mapping) + return itertools.chain(self._mapping, more) + + def __len__(self) -> int: + more = sum(1 for k in self._appends if k not in self._mapping) + return len(self._mapping) + more + + +class _FactoryIterableView(Iterable[RT]): + """Wrap an iterator factory returned by `find_matches()`. + + Calling `iter()` on this class would invoke the underlying iterator + factory, making it a "collection with ordering" that can be iterated + through multiple times, but lacks random access methods presented in + built-in Python sequence types. + """ + + def __init__(self, factory: Callable[[], Iterable[RT]]) -> None: + self._factory = factory + self._iterable: Iterable[RT] | None = None + + def __repr__(self) -> str: + return f"{type(self).__name__}({list(self)})" + + def __bool__(self) -> bool: + try: + next(iter(self)) + except StopIteration: + return False + return True + + def __iter__(self) -> Iterator[RT]: + iterable = self._factory() if self._iterable is None else self._iterable + self._iterable, current = itertools.tee(iterable) + return current + + +class _SequenceIterableView(Iterable[RT]): + """Wrap an iterable returned by find_matches(). + + This is essentially just a proxy to the underlying sequence that provides + the same interface as `_FactoryIterableView`. + """ + + def __init__(self, sequence: Sequence[RT]): + self._sequence = sequence + + def __repr__(self) -> str: + return f"{type(self).__name__}({self._sequence})" + + def __bool__(self) -> bool: + return bool(self._sequence) + + def __iter__(self) -> Iterator[RT]: + return iter(self._sequence) + + +def build_iter_view(matches: Matches[CT]) -> Iterable[CT]: + """Build an iterable view from the value returned by `find_matches()`.""" + if callable(matches): + return _FactoryIterableView(matches) + if not isinstance(matches, Sequence): + matches = list(matches) + return _SequenceIterableView(matches) + + +IterableView = Iterable diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__init__.py b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__init__.py new file mode 100644 index 0000000..73f58d7 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__init__.py @@ -0,0 +1,177 @@ +"""Rich text and beautiful formatting in the terminal.""" + +import os +from typing import IO, TYPE_CHECKING, Any, Callable, Optional, Union + +from ._extension import load_ipython_extension # noqa: F401 + +__all__ = ["get_console", "reconfigure", "print", "inspect", "print_json"] + +if TYPE_CHECKING: + from .console import Console + +# Global console used by alternative print +_console: Optional["Console"] = None + +try: + _IMPORT_CWD = os.path.abspath(os.getcwd()) +except FileNotFoundError: + # Can happen if the cwd has been deleted + _IMPORT_CWD = "" + + +def get_console() -> "Console": + """Get a global :class:`~rich.console.Console` instance. This function is used when Rich requires a Console, + and hasn't been explicitly given one. + + Returns: + Console: A console instance. + """ + global _console + if _console is None: + from .console import Console + + _console = Console() + + return _console + + +def reconfigure(*args: Any, **kwargs: Any) -> None: + """Reconfigures the global console by replacing it with another. + + Args: + *args (Any): Positional arguments for the replacement :class:`~rich.console.Console`. + **kwargs (Any): Keyword arguments for the replacement :class:`~rich.console.Console`. + """ + from pip._vendor.rich.console import Console + + new_console = Console(*args, **kwargs) + _console = get_console() + _console.__dict__ = new_console.__dict__ + + +def print( + *objects: Any, + sep: str = " ", + end: str = "\n", + file: Optional[IO[str]] = None, + flush: bool = False, +) -> None: + r"""Print object(s) supplied via positional arguments. + This function has an identical signature to the built-in print. + For more advanced features, see the :class:`~rich.console.Console` class. + + Args: + sep (str, optional): Separator between printed objects. Defaults to " ". + end (str, optional): Character to write at end of output. Defaults to "\\n". + file (IO[str], optional): File to write to, or None for stdout. Defaults to None. + flush (bool, optional): Has no effect as Rich always flushes output. Defaults to False. + + """ + from .console import Console + + write_console = get_console() if file is None else Console(file=file) + return write_console.print(*objects, sep=sep, end=end) + + +def print_json( + json: Optional[str] = None, + *, + data: Any = None, + indent: Union[None, int, str] = 2, + highlight: bool = True, + skip_keys: bool = False, + ensure_ascii: bool = False, + check_circular: bool = True, + allow_nan: bool = True, + default: Optional[Callable[[Any], Any]] = None, + sort_keys: bool = False, +) -> None: + """Pretty prints JSON. Output will be valid JSON. + + Args: + json (str): A string containing JSON. + data (Any): If json is not supplied, then encode this data. + indent (int, optional): Number of spaces to indent. Defaults to 2. + highlight (bool, optional): Enable highlighting of output: Defaults to True. + skip_keys (bool, optional): Skip keys not of a basic type. Defaults to False. + ensure_ascii (bool, optional): Escape all non-ascii characters. Defaults to False. + check_circular (bool, optional): Check for circular references. Defaults to True. + allow_nan (bool, optional): Allow NaN and Infinity values. Defaults to True. + default (Callable, optional): A callable that converts values that can not be encoded + in to something that can be JSON encoded. Defaults to None. + sort_keys (bool, optional): Sort dictionary keys. Defaults to False. + """ + + get_console().print_json( + json, + data=data, + indent=indent, + highlight=highlight, + skip_keys=skip_keys, + ensure_ascii=ensure_ascii, + check_circular=check_circular, + allow_nan=allow_nan, + default=default, + sort_keys=sort_keys, + ) + + +def inspect( + obj: Any, + *, + console: Optional["Console"] = None, + title: Optional[str] = None, + help: bool = False, + methods: bool = False, + docs: bool = True, + private: bool = False, + dunder: bool = False, + sort: bool = True, + all: bool = False, + value: bool = True, +) -> None: + """Inspect any Python object. + + * inspect() to see summarized info. + * inspect(, methods=True) to see methods. + * inspect(, help=True) to see full (non-abbreviated) help. + * inspect(, private=True) to see private attributes (single underscore). + * inspect(, dunder=True) to see attributes beginning with double underscore. + * inspect(, all=True) to see all attributes. + + Args: + obj (Any): An object to inspect. + title (str, optional): Title to display over inspect result, or None use type. Defaults to None. + help (bool, optional): Show full help text rather than just first paragraph. Defaults to False. + methods (bool, optional): Enable inspection of callables. Defaults to False. + docs (bool, optional): Also render doc strings. Defaults to True. + private (bool, optional): Show private attributes (beginning with underscore). Defaults to False. + dunder (bool, optional): Show attributes starting with double underscore. Defaults to False. + sort (bool, optional): Sort attributes alphabetically. Defaults to True. + all (bool, optional): Show all attributes. Defaults to False. + value (bool, optional): Pretty print value. Defaults to True. + """ + _console = console or get_console() + from pip._vendor.rich._inspect import Inspect + + # Special case for inspect(inspect) + is_inspect = obj is inspect + + _inspect = Inspect( + obj, + title=title, + help=is_inspect or help, + methods=is_inspect or methods, + docs=is_inspect or docs, + private=private, + dunder=dunder, + sort=sort, + all=all, + value=value, + ) + _console.print(_inspect) + + +if __name__ == "__main__": # pragma: no cover + print("Hello, **World**") diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__main__.py b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__main__.py new file mode 100644 index 0000000..a583f1e --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__main__.py @@ -0,0 +1,245 @@ +import colorsys +import io +from time import process_time + +from pip._vendor.rich import box +from pip._vendor.rich.color import Color +from pip._vendor.rich.console import Console, ConsoleOptions, Group, RenderableType, RenderResult +from pip._vendor.rich.markdown import Markdown +from pip._vendor.rich.measure import Measurement +from pip._vendor.rich.pretty import Pretty +from pip._vendor.rich.segment import Segment +from pip._vendor.rich.style import Style +from pip._vendor.rich.syntax import Syntax +from pip._vendor.rich.table import Table +from pip._vendor.rich.text import Text + + +class ColorBox: + def __rich_console__( + self, console: Console, options: ConsoleOptions + ) -> RenderResult: + for y in range(0, 5): + for x in range(options.max_width): + h = x / options.max_width + l = 0.1 + ((y / 5) * 0.7) + r1, g1, b1 = colorsys.hls_to_rgb(h, l, 1.0) + r2, g2, b2 = colorsys.hls_to_rgb(h, l + 0.7 / 10, 1.0) + bgcolor = Color.from_rgb(r1 * 255, g1 * 255, b1 * 255) + color = Color.from_rgb(r2 * 255, g2 * 255, b2 * 255) + yield Segment("▄", Style(color=color, bgcolor=bgcolor)) + yield Segment.line() + + def __rich_measure__( + self, console: "Console", options: ConsoleOptions + ) -> Measurement: + return Measurement(1, options.max_width) + + +def make_test_card() -> Table: + """Get a renderable that demonstrates a number of features.""" + table = Table.grid(padding=1, pad_edge=True) + table.title = "Rich features" + table.add_column("Feature", no_wrap=True, justify="center", style="bold red") + table.add_column("Demonstration") + + color_table = Table( + box=None, + expand=False, + show_header=False, + show_edge=False, + pad_edge=False, + ) + color_table.add_row( + ( + "✓ [bold green]4-bit color[/]\n" + "✓ [bold blue]8-bit color[/]\n" + "✓ [bold magenta]Truecolor (16.7 million)[/]\n" + "✓ [bold yellow]Dumb terminals[/]\n" + "✓ [bold cyan]Automatic color conversion" + ), + ColorBox(), + ) + + table.add_row("Colors", color_table) + + table.add_row( + "Styles", + "All ansi styles: [bold]bold[/], [dim]dim[/], [italic]italic[/italic], [underline]underline[/], [strike]strikethrough[/], [reverse]reverse[/], and even [blink]blink[/].", + ) + + lorem = "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Quisque in metus sed sapien ultricies pretium a at justo. Maecenas luctus velit et auctor maximus." + lorem_table = Table.grid(padding=1, collapse_padding=True) + lorem_table.pad_edge = False + lorem_table.add_row( + Text(lorem, justify="left", style="green"), + Text(lorem, justify="center", style="yellow"), + Text(lorem, justify="right", style="blue"), + Text(lorem, justify="full", style="red"), + ) + table.add_row( + "Text", + Group( + Text.from_markup( + """Word wrap text. Justify [green]left[/], [yellow]center[/], [blue]right[/] or [red]full[/].\n""" + ), + lorem_table, + ), + ) + + def comparison(renderable1: RenderableType, renderable2: RenderableType) -> Table: + table = Table(show_header=False, pad_edge=False, box=None, expand=True) + table.add_column("1", ratio=1) + table.add_column("2", ratio=1) + table.add_row(renderable1, renderable2) + return table + + table.add_row( + "Asian\nlanguage\nsupport", + ":flag_for_china: 该库支持中文,日文和韩文文本!\n:flag_for_japan: ライブラリは中国語、日本語、韓国語のテキストをサポートしています\n:flag_for_south_korea: 이 라이브러리는 중국어, 일본어 및 한국어 텍스트를 지원합니다", + ) + + markup_example = ( + "[bold magenta]Rich[/] supports a simple [i]bbcode[/i]-like [b]markup[/b] for [yellow]color[/], [underline]style[/], and emoji! " + ":+1: :apple: :ant: :bear: :baguette_bread: :bus: " + ) + table.add_row("Markup", markup_example) + + example_table = Table( + show_edge=False, + show_header=True, + expand=False, + row_styles=["none", "dim"], + box=box.SIMPLE, + ) + example_table.add_column("[green]Date", style="green", no_wrap=True) + example_table.add_column("[blue]Title", style="blue") + example_table.add_column( + "[cyan]Production Budget", + style="cyan", + justify="right", + no_wrap=True, + ) + example_table.add_column( + "[magenta]Box Office", + style="magenta", + justify="right", + no_wrap=True, + ) + example_table.add_row( + "Dec 20, 2019", + "Star Wars: The Rise of Skywalker", + "$275,000,000", + "$375,126,118", + ) + example_table.add_row( + "May 25, 2018", + "[b]Solo[/]: A Star Wars Story", + "$275,000,000", + "$393,151,347", + ) + example_table.add_row( + "Dec 15, 2017", + "Star Wars Ep. VIII: The Last Jedi", + "$262,000,000", + "[bold]$1,332,539,889[/bold]", + ) + example_table.add_row( + "May 19, 1999", + "Star Wars Ep. [b]I[/b]: [i]The phantom Menace", + "$115,000,000", + "$1,027,044,677", + ) + + table.add_row("Tables", example_table) + + code = '''\ +def iter_last(values: Iterable[T]) -> Iterable[Tuple[bool, T]]: + """Iterate and generate a tuple with a flag for last value.""" + iter_values = iter(values) + try: + previous_value = next(iter_values) + except StopIteration: + return + for value in iter_values: + yield False, previous_value + previous_value = value + yield True, previous_value''' + + pretty_data = { + "foo": [ + 3.1427, + ( + "Paul Atreides", + "Vladimir Harkonnen", + "Thufir Hawat", + ), + ], + "atomic": (False, True, None), + } + table.add_row( + "Syntax\nhighlighting\n&\npretty\nprinting", + comparison( + Syntax(code, "python3", line_numbers=True, indent_guides=True), + Pretty(pretty_data, indent_guides=True), + ), + ) + + markdown_example = """\ +# Markdown + +Supports much of the *markdown* __syntax__! + +- Headers +- Basic formatting: **bold**, *italic*, `code` +- Block quotes +- Lists, and more... + """ + table.add_row( + "Markdown", comparison("[cyan]" + markdown_example, Markdown(markdown_example)) + ) + + table.add_row( + "+more!", + """Progress bars, columns, styled logging handler, tracebacks, etc...""", + ) + return table + + +if __name__ == "__main__": # pragma: no cover + from pip._vendor.rich.panel import Panel + + console = Console( + file=io.StringIO(), + force_terminal=True, + ) + test_card = make_test_card() + + # Print once to warm cache + start = process_time() + console.print(test_card) + pre_cache_taken = round((process_time() - start) * 1000.0, 1) + + console.file = io.StringIO() + + start = process_time() + console.print(test_card) + taken = round((process_time() - start) * 1000.0, 1) + + c = Console(record=True) + c.print(test_card) + + console = Console() + console.print(f"[dim]rendered in [not dim]{pre_cache_taken}ms[/] (cold cache)") + console.print(f"[dim]rendered in [not dim]{taken}ms[/] (warm cache)") + console.print() + console.print( + Panel.fit( + "[b magenta]Hope you enjoy using Rich![/]\n\n" + "Please consider sponsoring me if you get value from my work.\n\n" + "Even the price of a ☕ can brighten my day!\n\n" + "https://github.com/sponsors/willmcgugan", + border_style="red", + title="Help ensure Rich is maintained", + ) + ) diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..b056395 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/__init__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/__main__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/__main__.cpython-312.pyc new file mode 100644 index 0000000..5f09773 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/__main__.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/_cell_widths.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/_cell_widths.cpython-312.pyc new file mode 100644 index 0000000..8d7bfee Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/_cell_widths.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/_emoji_codes.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/_emoji_codes.cpython-312.pyc new file mode 100644 index 0000000..2c916c1 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/_emoji_codes.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/_emoji_replace.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/_emoji_replace.cpython-312.pyc new file mode 100644 index 0000000..9055467 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/_emoji_replace.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/_export_format.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/_export_format.cpython-312.pyc new file mode 100644 index 0000000..30201fb Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/_export_format.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/_extension.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/_extension.cpython-312.pyc new file mode 100644 index 0000000..5542ef1 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/_extension.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/_fileno.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/_fileno.cpython-312.pyc new file mode 100644 index 0000000..ab71e9b Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/_fileno.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/_inspect.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/_inspect.cpython-312.pyc new file mode 100644 index 0000000..beaeb90 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/_inspect.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/_log_render.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/_log_render.cpython-312.pyc new file mode 100644 index 0000000..6a90e3f Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/_log_render.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/_loop.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/_loop.cpython-312.pyc new file mode 100644 index 0000000..58a4ece Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/_loop.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/_null_file.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/_null_file.cpython-312.pyc new file mode 100644 index 0000000..bc0e341 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/_null_file.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/_palettes.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/_palettes.cpython-312.pyc new file mode 100644 index 0000000..54e5544 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/_palettes.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/_pick.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/_pick.cpython-312.pyc new file mode 100644 index 0000000..e6d4994 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/_pick.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/_ratio.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/_ratio.cpython-312.pyc new file mode 100644 index 0000000..b3973e7 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/_ratio.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/_spinners.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/_spinners.cpython-312.pyc new file mode 100644 index 0000000..fb6234f Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/_spinners.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/_stack.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/_stack.cpython-312.pyc new file mode 100644 index 0000000..2e4aaac Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/_stack.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/_timer.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/_timer.cpython-312.pyc new file mode 100644 index 0000000..1694aa1 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/_timer.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/_win32_console.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/_win32_console.cpython-312.pyc new file mode 100644 index 0000000..00816ea Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/_win32_console.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/_windows.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/_windows.cpython-312.pyc new file mode 100644 index 0000000..b3cd5a2 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/_windows.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/_windows_renderer.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/_windows_renderer.cpython-312.pyc new file mode 100644 index 0000000..228a54c Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/_windows_renderer.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/_wrap.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/_wrap.cpython-312.pyc new file mode 100644 index 0000000..64caaca Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/_wrap.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/abc.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/abc.cpython-312.pyc new file mode 100644 index 0000000..c237314 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/abc.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/align.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/align.cpython-312.pyc new file mode 100644 index 0000000..b5e187c Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/align.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/ansi.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/ansi.cpython-312.pyc new file mode 100644 index 0000000..7f956c3 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/ansi.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/bar.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/bar.cpython-312.pyc new file mode 100644 index 0000000..7ad3aa5 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/bar.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/box.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/box.cpython-312.pyc new file mode 100644 index 0000000..4d4ea41 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/box.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/cells.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/cells.cpython-312.pyc new file mode 100644 index 0000000..839dd34 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/cells.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/color.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/color.cpython-312.pyc new file mode 100644 index 0000000..0bc0611 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/color.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/color_triplet.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/color_triplet.cpython-312.pyc new file mode 100644 index 0000000..ea57abc Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/color_triplet.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/columns.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/columns.cpython-312.pyc new file mode 100644 index 0000000..8a4afb9 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/columns.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/console.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/console.cpython-312.pyc new file mode 100644 index 0000000..92566ae Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/console.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/constrain.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/constrain.cpython-312.pyc new file mode 100644 index 0000000..fbe80f4 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/constrain.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/containers.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/containers.cpython-312.pyc new file mode 100644 index 0000000..0afa19e Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/containers.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/control.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/control.cpython-312.pyc new file mode 100644 index 0000000..862a19e Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/control.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/default_styles.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/default_styles.cpython-312.pyc new file mode 100644 index 0000000..9e75ea3 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/default_styles.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/diagnose.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/diagnose.cpython-312.pyc new file mode 100644 index 0000000..cc62cfd Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/diagnose.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/emoji.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/emoji.cpython-312.pyc new file mode 100644 index 0000000..784c78d Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/emoji.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/errors.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/errors.cpython-312.pyc new file mode 100644 index 0000000..24c7419 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/errors.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/file_proxy.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/file_proxy.cpython-312.pyc new file mode 100644 index 0000000..504d71e Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/file_proxy.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/filesize.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/filesize.cpython-312.pyc new file mode 100644 index 0000000..1e4a60a Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/filesize.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/highlighter.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/highlighter.cpython-312.pyc new file mode 100644 index 0000000..9916b84 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/highlighter.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/json.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/json.cpython-312.pyc new file mode 100644 index 0000000..8c1c26d Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/json.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/jupyter.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/jupyter.cpython-312.pyc new file mode 100644 index 0000000..3e974d1 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/jupyter.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/layout.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/layout.cpython-312.pyc new file mode 100644 index 0000000..8bc6093 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/layout.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/live.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/live.cpython-312.pyc new file mode 100644 index 0000000..ae5aca1 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/live.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/live_render.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/live_render.cpython-312.pyc new file mode 100644 index 0000000..c5caa16 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/live_render.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/logging.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/logging.cpython-312.pyc new file mode 100644 index 0000000..b4d9c95 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/logging.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/markup.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/markup.cpython-312.pyc new file mode 100644 index 0000000..19abd33 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/markup.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/measure.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/measure.cpython-312.pyc new file mode 100644 index 0000000..96ab728 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/measure.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/padding.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/padding.cpython-312.pyc new file mode 100644 index 0000000..cf19c2a Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/padding.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/pager.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/pager.cpython-312.pyc new file mode 100644 index 0000000..61fd5e2 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/pager.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/palette.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/palette.cpython-312.pyc new file mode 100644 index 0000000..dc028da Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/palette.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/panel.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/panel.cpython-312.pyc new file mode 100644 index 0000000..f654222 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/panel.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/pretty.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/pretty.cpython-312.pyc new file mode 100644 index 0000000..a366c8b Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/pretty.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/progress.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/progress.cpython-312.pyc new file mode 100644 index 0000000..9843f8f Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/progress.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/progress_bar.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/progress_bar.cpython-312.pyc new file mode 100644 index 0000000..1422556 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/progress_bar.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/protocol.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/protocol.cpython-312.pyc new file mode 100644 index 0000000..1f90285 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/protocol.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/region.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/region.cpython-312.pyc new file mode 100644 index 0000000..a0c9529 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/region.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/repr.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/repr.cpython-312.pyc new file mode 100644 index 0000000..78cc673 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/repr.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/scope.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/scope.cpython-312.pyc new file mode 100644 index 0000000..e92755d Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/scope.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/screen.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/screen.cpython-312.pyc new file mode 100644 index 0000000..7a90c7c Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/screen.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/segment.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/segment.cpython-312.pyc new file mode 100644 index 0000000..a04160c Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/segment.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/spinner.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/spinner.cpython-312.pyc new file mode 100644 index 0000000..2377f77 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/spinner.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/style.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/style.cpython-312.pyc new file mode 100644 index 0000000..1c8f5ac Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/style.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/styled.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/styled.cpython-312.pyc new file mode 100644 index 0000000..ba959aa Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/styled.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/syntax.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/syntax.cpython-312.pyc new file mode 100644 index 0000000..42ed0fc Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/syntax.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/table.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/table.cpython-312.pyc new file mode 100644 index 0000000..a3d3893 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/table.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/terminal_theme.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/terminal_theme.cpython-312.pyc new file mode 100644 index 0000000..ef5563c Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/terminal_theme.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/text.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/text.cpython-312.pyc new file mode 100644 index 0000000..104fbbf Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/text.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/theme.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/theme.cpython-312.pyc new file mode 100644 index 0000000..52b9a69 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/theme.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/themes.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/themes.cpython-312.pyc new file mode 100644 index 0000000..03c0df2 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/themes.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/traceback.cpython-312.pyc b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/traceback.cpython-312.pyc new file mode 100644 index 0000000..0902339 Binary files /dev/null and b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/__pycache__/traceback.cpython-312.pyc differ diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/_cell_widths.py b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/_cell_widths.py new file mode 100644 index 0000000..608ae3a --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/_cell_widths.py @@ -0,0 +1,454 @@ +# Auto generated by make_terminal_widths.py + +CELL_WIDTHS = [ + (0, 0, 0), + (1, 31, -1), + (127, 159, -1), + (173, 173, 0), + (768, 879, 0), + (1155, 1161, 0), + (1425, 1469, 0), + (1471, 1471, 0), + (1473, 1474, 0), + (1476, 1477, 0), + (1479, 1479, 0), + (1536, 1541, 0), + (1552, 1562, 0), + (1564, 1564, 0), + (1611, 1631, 0), + (1648, 1648, 0), + (1750, 1757, 0), + (1759, 1764, 0), + (1767, 1768, 0), + (1770, 1773, 0), + (1807, 1807, 0), + (1809, 1809, 0), + (1840, 1866, 0), + (1958, 1968, 0), + (2027, 2035, 0), + (2045, 2045, 0), + (2070, 2073, 0), + (2075, 2083, 0), + (2085, 2087, 0), + (2089, 2093, 0), + (2137, 2139, 0), + (2192, 2193, 0), + (2200, 2207, 0), + (2250, 2307, 0), + (2362, 2364, 0), + (2366, 2383, 0), + (2385, 2391, 0), + (2402, 2403, 0), + (2433, 2435, 0), + (2492, 2492, 0), + (2494, 2500, 0), + (2503, 2504, 0), + (2507, 2509, 0), + (2519, 2519, 0), + (2530, 2531, 0), + (2558, 2558, 0), + (2561, 2563, 0), + (2620, 2620, 0), + (2622, 2626, 0), + (2631, 2632, 0), + (2635, 2637, 0), + (2641, 2641, 0), + (2672, 2673, 0), + (2677, 2677, 0), + (2689, 2691, 0), + (2748, 2748, 0), + (2750, 2757, 0), + (2759, 2761, 0), + (2763, 2765, 0), + (2786, 2787, 0), + (2810, 2815, 0), + (2817, 2819, 0), + (2876, 2876, 0), + (2878, 2884, 0), + (2887, 2888, 0), + (2891, 2893, 0), + (2901, 2903, 0), + (2914, 2915, 0), + (2946, 2946, 0), + (3006, 3010, 0), + (3014, 3016, 0), + (3018, 3021, 0), + (3031, 3031, 0), + (3072, 3076, 0), + (3132, 3132, 0), + (3134, 3140, 0), + (3142, 3144, 0), + (3146, 3149, 0), + (3157, 3158, 0), + (3170, 3171, 0), + (3201, 3203, 0), + (3260, 3260, 0), + (3262, 3268, 0), + (3270, 3272, 0), + (3274, 3277, 0), + (3285, 3286, 0), + (3298, 3299, 0), + (3315, 3315, 0), + (3328, 3331, 0), + (3387, 3388, 0), + (3390, 3396, 0), + (3398, 3400, 0), + (3402, 3405, 0), + (3415, 3415, 0), + (3426, 3427, 0), + (3457, 3459, 0), + (3530, 3530, 0), + (3535, 3540, 0), + (3542, 3542, 0), + (3544, 3551, 0), + (3570, 3571, 0), + (3633, 3633, 0), + (3636, 3642, 0), + (3655, 3662, 0), + (3761, 3761, 0), + (3764, 3772, 0), + (3784, 3790, 0), + (3864, 3865, 0), + (3893, 3893, 0), + (3895, 3895, 0), + (3897, 3897, 0), + (3902, 3903, 0), + (3953, 3972, 0), + (3974, 3975, 0), + (3981, 3991, 0), + (3993, 4028, 0), + (4038, 4038, 0), + (4139, 4158, 0), + (4182, 4185, 0), + (4190, 4192, 0), + (4194, 4196, 0), + (4199, 4205, 0), + (4209, 4212, 0), + (4226, 4237, 0), + (4239, 4239, 0), + (4250, 4253, 0), + (4352, 4447, 2), + (4448, 4607, 0), + (4957, 4959, 0), + (5906, 5909, 0), + (5938, 5940, 0), + (5970, 5971, 0), + (6002, 6003, 0), + (6068, 6099, 0), + (6109, 6109, 0), + (6155, 6159, 0), + (6277, 6278, 0), + (6313, 6313, 0), + (6432, 6443, 0), + (6448, 6459, 0), + (6679, 6683, 0), + (6741, 6750, 0), + (6752, 6780, 0), + (6783, 6783, 0), + (6832, 6862, 0), + (6912, 6916, 0), + (6964, 6980, 0), + (7019, 7027, 0), + (7040, 7042, 0), + (7073, 7085, 0), + (7142, 7155, 0), + (7204, 7223, 0), + (7376, 7378, 0), + (7380, 7400, 0), + (7405, 7405, 0), + (7412, 7412, 0), + (7415, 7417, 0), + (7616, 7679, 0), + (8203, 8207, 0), + (8232, 8238, 0), + (8288, 8292, 0), + (8294, 8303, 0), + (8400, 8432, 0), + (8986, 8987, 2), + (9001, 9002, 2), + (9193, 9196, 2), + (9200, 9200, 2), + (9203, 9203, 2), + (9725, 9726, 2), + (9748, 9749, 2), + (9800, 9811, 2), + (9855, 9855, 2), + (9875, 9875, 2), + (9889, 9889, 2), + (9898, 9899, 2), + (9917, 9918, 2), + (9924, 9925, 2), + (9934, 9934, 2), + (9940, 9940, 2), + (9962, 9962, 2), + (9970, 9971, 2), + (9973, 9973, 2), + (9978, 9978, 2), + (9981, 9981, 2), + (9989, 9989, 2), + (9994, 9995, 2), + (10024, 10024, 2), + (10060, 10060, 2), + (10062, 10062, 2), + (10067, 10069, 2), + (10071, 10071, 2), + (10133, 10135, 2), + (10160, 10160, 2), + (10175, 10175, 2), + (11035, 11036, 2), + (11088, 11088, 2), + (11093, 11093, 2), + (11503, 11505, 0), + (11647, 11647, 0), + (11744, 11775, 0), + (11904, 11929, 2), + (11931, 12019, 2), + (12032, 12245, 2), + (12272, 12329, 2), + (12330, 12335, 0), + (12336, 12350, 2), + (12353, 12438, 2), + (12441, 12442, 0), + (12443, 12543, 2), + (12549, 12591, 2), + (12593, 12686, 2), + (12688, 12771, 2), + (12783, 12830, 2), + (12832, 12871, 2), + (12880, 19903, 2), + (19968, 42124, 2), + (42128, 42182, 2), + (42607, 42610, 0), + (42612, 42621, 0), + (42654, 42655, 0), + (42736, 42737, 0), + (43010, 43010, 0), + (43014, 43014, 0), + (43019, 43019, 0), + (43043, 43047, 0), + (43052, 43052, 0), + (43136, 43137, 0), + (43188, 43205, 0), + (43232, 43249, 0), + (43263, 43263, 0), + (43302, 43309, 0), + (43335, 43347, 0), + (43360, 43388, 2), + (43392, 43395, 0), + (43443, 43456, 0), + (43493, 43493, 0), + (43561, 43574, 0), + (43587, 43587, 0), + (43596, 43597, 0), + (43643, 43645, 0), + (43696, 43696, 0), + (43698, 43700, 0), + (43703, 43704, 0), + (43710, 43711, 0), + (43713, 43713, 0), + (43755, 43759, 0), + (43765, 43766, 0), + (44003, 44010, 0), + (44012, 44013, 0), + (44032, 55203, 2), + (55216, 55295, 0), + (63744, 64255, 2), + (64286, 64286, 0), + (65024, 65039, 0), + (65040, 65049, 2), + (65056, 65071, 0), + (65072, 65106, 2), + (65108, 65126, 2), + (65128, 65131, 2), + (65279, 65279, 0), + (65281, 65376, 2), + (65504, 65510, 2), + (65529, 65531, 0), + (66045, 66045, 0), + (66272, 66272, 0), + (66422, 66426, 0), + (68097, 68099, 0), + (68101, 68102, 0), + (68108, 68111, 0), + (68152, 68154, 0), + (68159, 68159, 0), + (68325, 68326, 0), + (68900, 68903, 0), + (69291, 69292, 0), + (69373, 69375, 0), + (69446, 69456, 0), + (69506, 69509, 0), + (69632, 69634, 0), + (69688, 69702, 0), + (69744, 69744, 0), + (69747, 69748, 0), + (69759, 69762, 0), + (69808, 69818, 0), + (69821, 69821, 0), + (69826, 69826, 0), + (69837, 69837, 0), + (69888, 69890, 0), + (69927, 69940, 0), + (69957, 69958, 0), + (70003, 70003, 0), + (70016, 70018, 0), + (70067, 70080, 0), + (70089, 70092, 0), + (70094, 70095, 0), + (70188, 70199, 0), + (70206, 70206, 0), + (70209, 70209, 0), + (70367, 70378, 0), + (70400, 70403, 0), + (70459, 70460, 0), + (70462, 70468, 0), + (70471, 70472, 0), + (70475, 70477, 0), + (70487, 70487, 0), + (70498, 70499, 0), + (70502, 70508, 0), + (70512, 70516, 0), + (70709, 70726, 0), + (70750, 70750, 0), + (70832, 70851, 0), + (71087, 71093, 0), + (71096, 71104, 0), + (71132, 71133, 0), + (71216, 71232, 0), + (71339, 71351, 0), + (71453, 71467, 0), + (71724, 71738, 0), + (71984, 71989, 0), + (71991, 71992, 0), + (71995, 71998, 0), + (72000, 72000, 0), + (72002, 72003, 0), + (72145, 72151, 0), + (72154, 72160, 0), + (72164, 72164, 0), + (72193, 72202, 0), + (72243, 72249, 0), + (72251, 72254, 0), + (72263, 72263, 0), + (72273, 72283, 0), + (72330, 72345, 0), + (72751, 72758, 0), + (72760, 72767, 0), + (72850, 72871, 0), + (72873, 72886, 0), + (73009, 73014, 0), + (73018, 73018, 0), + (73020, 73021, 0), + (73023, 73029, 0), + (73031, 73031, 0), + (73098, 73102, 0), + (73104, 73105, 0), + (73107, 73111, 0), + (73459, 73462, 0), + (73472, 73473, 0), + (73475, 73475, 0), + (73524, 73530, 0), + (73534, 73538, 0), + (78896, 78912, 0), + (78919, 78933, 0), + (92912, 92916, 0), + (92976, 92982, 0), + (94031, 94031, 0), + (94033, 94087, 0), + (94095, 94098, 0), + (94176, 94179, 2), + (94180, 94180, 0), + (94192, 94193, 0), + (94208, 100343, 2), + (100352, 101589, 2), + (101632, 101640, 2), + (110576, 110579, 2), + (110581, 110587, 2), + (110589, 110590, 2), + (110592, 110882, 2), + (110898, 110898, 2), + (110928, 110930, 2), + (110933, 110933, 2), + (110948, 110951, 2), + (110960, 111355, 2), + (113821, 113822, 0), + (113824, 113827, 0), + (118528, 118573, 0), + (118576, 118598, 0), + (119141, 119145, 0), + (119149, 119170, 0), + (119173, 119179, 0), + (119210, 119213, 0), + (119362, 119364, 0), + (121344, 121398, 0), + (121403, 121452, 0), + (121461, 121461, 0), + (121476, 121476, 0), + (121499, 121503, 0), + (121505, 121519, 0), + (122880, 122886, 0), + (122888, 122904, 0), + (122907, 122913, 0), + (122915, 122916, 0), + (122918, 122922, 0), + (123023, 123023, 0), + (123184, 123190, 0), + (123566, 123566, 0), + (123628, 123631, 0), + (124140, 124143, 0), + (125136, 125142, 0), + (125252, 125258, 0), + (126980, 126980, 2), + (127183, 127183, 2), + (127374, 127374, 2), + (127377, 127386, 2), + (127488, 127490, 2), + (127504, 127547, 2), + (127552, 127560, 2), + (127568, 127569, 2), + (127584, 127589, 2), + (127744, 127776, 2), + (127789, 127797, 2), + (127799, 127868, 2), + (127870, 127891, 2), + (127904, 127946, 2), + (127951, 127955, 2), + (127968, 127984, 2), + (127988, 127988, 2), + (127992, 127994, 2), + (127995, 127999, 0), + (128000, 128062, 2), + (128064, 128064, 2), + (128066, 128252, 2), + (128255, 128317, 2), + (128331, 128334, 2), + (128336, 128359, 2), + (128378, 128378, 2), + (128405, 128406, 2), + (128420, 128420, 2), + (128507, 128591, 2), + (128640, 128709, 2), + (128716, 128716, 2), + (128720, 128722, 2), + (128725, 128727, 2), + (128732, 128735, 2), + (128747, 128748, 2), + (128756, 128764, 2), + (128992, 129003, 2), + (129008, 129008, 2), + (129292, 129338, 2), + (129340, 129349, 2), + (129351, 129535, 2), + (129648, 129660, 2), + (129664, 129672, 2), + (129680, 129725, 2), + (129727, 129733, 2), + (129742, 129755, 2), + (129760, 129768, 2), + (129776, 129784, 2), + (131072, 196605, 2), + (196608, 262141, 2), + (917505, 917505, 0), + (917536, 917631, 0), + (917760, 917999, 0), +] diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/_emoji_codes.py b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/_emoji_codes.py new file mode 100644 index 0000000..1f2877b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/_emoji_codes.py @@ -0,0 +1,3610 @@ +EMOJI = { + "1st_place_medal": "🥇", + "2nd_place_medal": "🥈", + "3rd_place_medal": "🥉", + "ab_button_(blood_type)": "🆎", + "atm_sign": "🏧", + "a_button_(blood_type)": "🅰", + "afghanistan": "🇦🇫", + "albania": "🇦🇱", + "algeria": "🇩🇿", + "american_samoa": "🇦🇸", + "andorra": "🇦🇩", + "angola": "🇦🇴", + "anguilla": "🇦🇮", + "antarctica": "🇦🇶", + "antigua_&_barbuda": "🇦🇬", + "aquarius": "♒", + "argentina": "🇦🇷", + "aries": "♈", + "armenia": "🇦🇲", + "aruba": "🇦🇼", + "ascension_island": "🇦🇨", + "australia": "🇦🇺", + "austria": "🇦🇹", + "azerbaijan": "🇦🇿", + "back_arrow": "🔙", + "b_button_(blood_type)": "🅱", + "bahamas": "🇧🇸", + "bahrain": "🇧🇭", + "bangladesh": "🇧🇩", + "barbados": "🇧🇧", + "belarus": "🇧🇾", + "belgium": "🇧🇪", + "belize": "🇧🇿", + "benin": "🇧🇯", + "bermuda": "🇧🇲", + "bhutan": "🇧🇹", + "bolivia": "🇧🇴", + "bosnia_&_herzegovina": "🇧🇦", + "botswana": "🇧🇼", + "bouvet_island": "🇧🇻", + "brazil": "🇧🇷", + "british_indian_ocean_territory": "🇮🇴", + "british_virgin_islands": "🇻🇬", + "brunei": "🇧🇳", + "bulgaria": "🇧🇬", + "burkina_faso": "🇧🇫", + "burundi": "🇧🇮", + "cl_button": "🆑", + "cool_button": "🆒", + "cambodia": "🇰🇭", + "cameroon": "🇨🇲", + "canada": "🇨🇦", + "canary_islands": "🇮🇨", + "cancer": "♋", + "cape_verde": "🇨🇻", + "capricorn": "♑", + "caribbean_netherlands": "🇧🇶", + "cayman_islands": "🇰🇾", + "central_african_republic": "🇨🇫", + "ceuta_&_melilla": "🇪🇦", + "chad": "🇹🇩", + "chile": "🇨🇱", + "china": "🇨🇳", + "christmas_island": "🇨🇽", + "christmas_tree": "🎄", + "clipperton_island": "🇨🇵", + "cocos_(keeling)_islands": "🇨🇨", + "colombia": "🇨🇴", + "comoros": "🇰🇲", + "congo_-_brazzaville": "🇨🇬", + "congo_-_kinshasa": "🇨🇩", + "cook_islands": "🇨🇰", + "costa_rica": "🇨🇷", + "croatia": "🇭🇷", + "cuba": "🇨🇺", + "curaçao": "🇨🇼", + "cyprus": "🇨🇾", + "czechia": "🇨🇿", + "côte_d’ivoire": "🇨🇮", + "denmark": "🇩🇰", + "diego_garcia": "🇩🇬", + "djibouti": "🇩🇯", + "dominica": "🇩🇲", + "dominican_republic": "🇩🇴", + "end_arrow": "🔚", + "ecuador": "🇪🇨", + "egypt": "🇪🇬", + "el_salvador": "🇸🇻", + "england": "🏴\U000e0067\U000e0062\U000e0065\U000e006e\U000e0067\U000e007f", + "equatorial_guinea": "🇬🇶", + "eritrea": "🇪🇷", + "estonia": "🇪🇪", + "ethiopia": "🇪🇹", + "european_union": "🇪🇺", + "free_button": "🆓", + "falkland_islands": "🇫🇰", + "faroe_islands": "🇫🇴", + "fiji": "🇫🇯", + "finland": "🇫🇮", + "france": "🇫🇷", + "french_guiana": "🇬🇫", + "french_polynesia": "🇵🇫", + "french_southern_territories": "🇹🇫", + "gabon": "🇬🇦", + "gambia": "🇬🇲", + "gemini": "♊", + "georgia": "🇬🇪", + "germany": "🇩🇪", + "ghana": "🇬🇭", + "gibraltar": "🇬🇮", + "greece": "🇬🇷", + "greenland": "🇬🇱", + "grenada": "🇬🇩", + "guadeloupe": "🇬🇵", + "guam": "🇬🇺", + "guatemala": "🇬🇹", + "guernsey": "🇬🇬", + "guinea": "🇬🇳", + "guinea-bissau": "🇬🇼", + "guyana": "🇬🇾", + "haiti": "🇭🇹", + "heard_&_mcdonald_islands": "🇭🇲", + "honduras": "🇭🇳", + "hong_kong_sar_china": "🇭🇰", + "hungary": "🇭🇺", + "id_button": "🆔", + "iceland": "🇮🇸", + "india": "🇮🇳", + "indonesia": "🇮🇩", + "iran": "🇮🇷", + "iraq": "🇮🇶", + "ireland": "🇮🇪", + "isle_of_man": "🇮🇲", + "israel": "🇮🇱", + "italy": "🇮🇹", + "jamaica": "🇯🇲", + "japan": "🗾", + "japanese_acceptable_button": "🉑", + "japanese_application_button": "🈸", + "japanese_bargain_button": "🉐", + "japanese_castle": "🏯", + "japanese_congratulations_button": "㊗", + "japanese_discount_button": "🈹", + "japanese_dolls": "🎎", + "japanese_free_of_charge_button": "🈚", + "japanese_here_button": "🈁", + "japanese_monthly_amount_button": "🈷", + "japanese_no_vacancy_button": "🈵", + "japanese_not_free_of_charge_button": "🈶", + "japanese_open_for_business_button": "🈺", + "japanese_passing_grade_button": "🈴", + "japanese_post_office": "🏣", + "japanese_prohibited_button": "🈲", + "japanese_reserved_button": "🈯", + "japanese_secret_button": "㊙", + "japanese_service_charge_button": "🈂", + "japanese_symbol_for_beginner": "🔰", + "japanese_vacancy_button": "🈳", + "jersey": "🇯🇪", + "jordan": "🇯🇴", + "kazakhstan": "🇰🇿", + "kenya": "🇰🇪", + "kiribati": "🇰🇮", + "kosovo": "🇽🇰", + "kuwait": "🇰🇼", + "kyrgyzstan": "🇰🇬", + "laos": "🇱🇦", + "latvia": "🇱🇻", + "lebanon": "🇱🇧", + "leo": "♌", + "lesotho": "🇱🇸", + "liberia": "🇱🇷", + "libra": "♎", + "libya": "🇱🇾", + "liechtenstein": "🇱🇮", + "lithuania": "🇱🇹", + "luxembourg": "🇱🇺", + "macau_sar_china": "🇲🇴", + "macedonia": "🇲🇰", + "madagascar": "🇲🇬", + "malawi": "🇲🇼", + "malaysia": "🇲🇾", + "maldives": "🇲🇻", + "mali": "🇲🇱", + "malta": "🇲🇹", + "marshall_islands": "🇲🇭", + "martinique": "🇲🇶", + "mauritania": "🇲🇷", + "mauritius": "🇲🇺", + "mayotte": "🇾🇹", + "mexico": "🇲🇽", + "micronesia": "🇫🇲", + "moldova": "🇲🇩", + "monaco": "🇲🇨", + "mongolia": "🇲🇳", + "montenegro": "🇲🇪", + "montserrat": "🇲🇸", + "morocco": "🇲🇦", + "mozambique": "🇲🇿", + "mrs._claus": "🤶", + "mrs._claus_dark_skin_tone": "🤶🏿", + "mrs._claus_light_skin_tone": "🤶🏻", + "mrs._claus_medium-dark_skin_tone": "🤶🏾", + "mrs._claus_medium-light_skin_tone": "🤶🏼", + "mrs._claus_medium_skin_tone": "🤶🏽", + "myanmar_(burma)": "🇲🇲", + "new_button": "🆕", + "ng_button": "🆖", + "namibia": "🇳🇦", + "nauru": "🇳🇷", + "nepal": "🇳🇵", + "netherlands": "🇳🇱", + "new_caledonia": "🇳🇨", + "new_zealand": "🇳🇿", + "nicaragua": "🇳🇮", + "niger": "🇳🇪", + "nigeria": "🇳🇬", + "niue": "🇳🇺", + "norfolk_island": "🇳🇫", + "north_korea": "🇰🇵", + "northern_mariana_islands": "🇲🇵", + "norway": "🇳🇴", + "ok_button": "🆗", + "ok_hand": "👌", + "ok_hand_dark_skin_tone": "👌🏿", + "ok_hand_light_skin_tone": "👌🏻", + "ok_hand_medium-dark_skin_tone": "👌🏾", + "ok_hand_medium-light_skin_tone": "👌🏼", + "ok_hand_medium_skin_tone": "👌🏽", + "on!_arrow": "🔛", + "o_button_(blood_type)": "🅾", + "oman": "🇴🇲", + "ophiuchus": "⛎", + "p_button": "🅿", + "pakistan": "🇵🇰", + "palau": "🇵🇼", + "palestinian_territories": "🇵🇸", + "panama": "🇵🇦", + "papua_new_guinea": "🇵🇬", + "paraguay": "🇵🇾", + "peru": "🇵🇪", + "philippines": "🇵🇭", + "pisces": "♓", + "pitcairn_islands": "🇵🇳", + "poland": "🇵🇱", + "portugal": "🇵🇹", + "puerto_rico": "🇵🇷", + "qatar": "🇶🇦", + "romania": "🇷🇴", + "russia": "🇷🇺", + "rwanda": "🇷🇼", + "réunion": "🇷🇪", + "soon_arrow": "🔜", + "sos_button": "🆘", + "sagittarius": "♐", + "samoa": "🇼🇸", + "san_marino": "🇸🇲", + "santa_claus": "🎅", + "santa_claus_dark_skin_tone": "🎅🏿", + "santa_claus_light_skin_tone": "🎅🏻", + "santa_claus_medium-dark_skin_tone": "🎅🏾", + "santa_claus_medium-light_skin_tone": "🎅🏼", + "santa_claus_medium_skin_tone": "🎅🏽", + "saudi_arabia": "🇸🇦", + "scorpio": "♏", + "scotland": "🏴\U000e0067\U000e0062\U000e0073\U000e0063\U000e0074\U000e007f", + "senegal": "🇸🇳", + "serbia": "🇷🇸", + "seychelles": "🇸🇨", + "sierra_leone": "🇸🇱", + "singapore": "🇸🇬", + "sint_maarten": "🇸🇽", + "slovakia": "🇸🇰", + "slovenia": "🇸🇮", + "solomon_islands": "🇸🇧", + "somalia": "🇸🇴", + "south_africa": "🇿🇦", + "south_georgia_&_south_sandwich_islands": "🇬🇸", + "south_korea": "🇰🇷", + "south_sudan": "🇸🇸", + "spain": "🇪🇸", + "sri_lanka": "🇱🇰", + "st._barthélemy": "🇧🇱", + "st._helena": "🇸🇭", + "st._kitts_&_nevis": "🇰🇳", + "st._lucia": "🇱🇨", + "st._martin": "🇲🇫", + "st._pierre_&_miquelon": "🇵🇲", + "st._vincent_&_grenadines": "🇻🇨", + "statue_of_liberty": "🗽", + "sudan": "🇸🇩", + "suriname": "🇸🇷", + "svalbard_&_jan_mayen": "🇸🇯", + "swaziland": "🇸🇿", + "sweden": "🇸🇪", + "switzerland": "🇨🇭", + "syria": "🇸🇾", + "são_tomé_&_príncipe": "🇸🇹", + "t-rex": "🦖", + "top_arrow": "🔝", + "taiwan": "🇹🇼", + "tajikistan": "🇹🇯", + "tanzania": "🇹🇿", + "taurus": "♉", + "thailand": "🇹🇭", + "timor-leste": "🇹🇱", + "togo": "🇹🇬", + "tokelau": "🇹🇰", + "tokyo_tower": "🗼", + "tonga": "🇹🇴", + "trinidad_&_tobago": "🇹🇹", + "tristan_da_cunha": "🇹🇦", + "tunisia": "🇹🇳", + "turkey": "🦃", + "turkmenistan": "🇹🇲", + "turks_&_caicos_islands": "🇹🇨", + "tuvalu": "🇹🇻", + "u.s._outlying_islands": "🇺🇲", + "u.s._virgin_islands": "🇻🇮", + "up!_button": "🆙", + "uganda": "🇺🇬", + "ukraine": "🇺🇦", + "united_arab_emirates": "🇦🇪", + "united_kingdom": "🇬🇧", + "united_nations": "🇺🇳", + "united_states": "🇺🇸", + "uruguay": "🇺🇾", + "uzbekistan": "🇺🇿", + "vs_button": "🆚", + "vanuatu": "🇻🇺", + "vatican_city": "🇻🇦", + "venezuela": "🇻🇪", + "vietnam": "🇻🇳", + "virgo": "♍", + "wales": "🏴\U000e0067\U000e0062\U000e0077\U000e006c\U000e0073\U000e007f", + "wallis_&_futuna": "🇼🇫", + "western_sahara": "🇪🇭", + "yemen": "🇾🇪", + "zambia": "🇿🇲", + "zimbabwe": "🇿🇼", + "abacus": "🧮", + "adhesive_bandage": "🩹", + "admission_tickets": "🎟", + "adult": "🧑", + "adult_dark_skin_tone": "🧑🏿", + "adult_light_skin_tone": "🧑🏻", + "adult_medium-dark_skin_tone": "🧑🏾", + "adult_medium-light_skin_tone": "🧑🏼", + "adult_medium_skin_tone": "🧑🏽", + "aerial_tramway": "🚡", + "airplane": "✈", + "airplane_arrival": "🛬", + "airplane_departure": "🛫", + "alarm_clock": "⏰", + "alembic": "⚗", + "alien": "👽", + "alien_monster": "👾", + "ambulance": "🚑", + "american_football": "🏈", + "amphora": "🏺", + "anchor": "⚓", + "anger_symbol": "💢", + "angry_face": "😠", + "angry_face_with_horns": "👿", + "anguished_face": "😧", + "ant": "🐜", + "antenna_bars": "📶", + "anxious_face_with_sweat": "😰", + "articulated_lorry": "🚛", + "artist_palette": "🎨", + "astonished_face": "😲", + "atom_symbol": "⚛", + "auto_rickshaw": "🛺", + "automobile": "🚗", + "avocado": "🥑", + "axe": "🪓", + "baby": "👶", + "baby_angel": "👼", + "baby_angel_dark_skin_tone": "👼🏿", + "baby_angel_light_skin_tone": "👼🏻", + "baby_angel_medium-dark_skin_tone": "👼🏾", + "baby_angel_medium-light_skin_tone": "👼🏼", + "baby_angel_medium_skin_tone": "👼🏽", + "baby_bottle": "🍼", + "baby_chick": "🐤", + "baby_dark_skin_tone": "👶🏿", + "baby_light_skin_tone": "👶🏻", + "baby_medium-dark_skin_tone": "👶🏾", + "baby_medium-light_skin_tone": "👶🏼", + "baby_medium_skin_tone": "👶🏽", + "baby_symbol": "🚼", + "backhand_index_pointing_down": "👇", + "backhand_index_pointing_down_dark_skin_tone": "👇🏿", + "backhand_index_pointing_down_light_skin_tone": "👇🏻", + "backhand_index_pointing_down_medium-dark_skin_tone": "👇🏾", + "backhand_index_pointing_down_medium-light_skin_tone": "👇🏼", + "backhand_index_pointing_down_medium_skin_tone": "👇🏽", + "backhand_index_pointing_left": "👈", + "backhand_index_pointing_left_dark_skin_tone": "👈🏿", + "backhand_index_pointing_left_light_skin_tone": "👈🏻", + "backhand_index_pointing_left_medium-dark_skin_tone": "👈🏾", + "backhand_index_pointing_left_medium-light_skin_tone": "👈🏼", + "backhand_index_pointing_left_medium_skin_tone": "👈🏽", + "backhand_index_pointing_right": "👉", + "backhand_index_pointing_right_dark_skin_tone": "👉🏿", + "backhand_index_pointing_right_light_skin_tone": "👉🏻", + "backhand_index_pointing_right_medium-dark_skin_tone": "👉🏾", + "backhand_index_pointing_right_medium-light_skin_tone": "👉🏼", + "backhand_index_pointing_right_medium_skin_tone": "👉🏽", + "backhand_index_pointing_up": "👆", + "backhand_index_pointing_up_dark_skin_tone": "👆🏿", + "backhand_index_pointing_up_light_skin_tone": "👆🏻", + "backhand_index_pointing_up_medium-dark_skin_tone": "👆🏾", + "backhand_index_pointing_up_medium-light_skin_tone": "👆🏼", + "backhand_index_pointing_up_medium_skin_tone": "👆🏽", + "bacon": "🥓", + "badger": "🦡", + "badminton": "🏸", + "bagel": "🥯", + "baggage_claim": "🛄", + "baguette_bread": "🥖", + "balance_scale": "⚖", + "bald": "🦲", + "bald_man": "👨\u200d🦲", + "bald_woman": "👩\u200d🦲", + "ballet_shoes": "🩰", + "balloon": "🎈", + "ballot_box_with_ballot": "🗳", + "ballot_box_with_check": "☑", + "banana": "🍌", + "banjo": "🪕", + "bank": "🏦", + "bar_chart": "📊", + "barber_pole": "💈", + "baseball": "⚾", + "basket": "🧺", + "basketball": "🏀", + "bat": "🦇", + "bathtub": "🛁", + "battery": "🔋", + "beach_with_umbrella": "🏖", + "beaming_face_with_smiling_eyes": "😁", + "bear_face": "🐻", + "bearded_person": "🧔", + "bearded_person_dark_skin_tone": "🧔🏿", + "bearded_person_light_skin_tone": "🧔🏻", + "bearded_person_medium-dark_skin_tone": "🧔🏾", + "bearded_person_medium-light_skin_tone": "🧔🏼", + "bearded_person_medium_skin_tone": "🧔🏽", + "beating_heart": "💓", + "bed": "🛏", + "beer_mug": "🍺", + "bell": "🔔", + "bell_with_slash": "🔕", + "bellhop_bell": "🛎", + "bento_box": "🍱", + "beverage_box": "🧃", + "bicycle": "🚲", + "bikini": "👙", + "billed_cap": "🧢", + "biohazard": "☣", + "bird": "🐦", + "birthday_cake": "🎂", + "black_circle": "⚫", + "black_flag": "🏴", + "black_heart": "🖤", + "black_large_square": "⬛", + "black_medium-small_square": "◾", + "black_medium_square": "◼", + "black_nib": "✒", + "black_small_square": "▪", + "black_square_button": "🔲", + "blond-haired_man": "👱\u200d♂️", + "blond-haired_man_dark_skin_tone": "👱🏿\u200d♂️", + "blond-haired_man_light_skin_tone": "👱🏻\u200d♂️", + "blond-haired_man_medium-dark_skin_tone": "👱🏾\u200d♂️", + "blond-haired_man_medium-light_skin_tone": "👱🏼\u200d♂️", + "blond-haired_man_medium_skin_tone": "👱🏽\u200d♂️", + "blond-haired_person": "👱", + "blond-haired_person_dark_skin_tone": "👱🏿", + "blond-haired_person_light_skin_tone": "👱🏻", + "blond-haired_person_medium-dark_skin_tone": "👱🏾", + "blond-haired_person_medium-light_skin_tone": "👱🏼", + "blond-haired_person_medium_skin_tone": "👱🏽", + "blond-haired_woman": "👱\u200d♀️", + "blond-haired_woman_dark_skin_tone": "👱🏿\u200d♀️", + "blond-haired_woman_light_skin_tone": "👱🏻\u200d♀️", + "blond-haired_woman_medium-dark_skin_tone": "👱🏾\u200d♀️", + "blond-haired_woman_medium-light_skin_tone": "👱🏼\u200d♀️", + "blond-haired_woman_medium_skin_tone": "👱🏽\u200d♀️", + "blossom": "🌼", + "blowfish": "🐡", + "blue_book": "📘", + "blue_circle": "🔵", + "blue_heart": "💙", + "blue_square": "🟦", + "boar": "🐗", + "bomb": "💣", + "bone": "🦴", + "bookmark": "🔖", + "bookmark_tabs": "📑", + "books": "📚", + "bottle_with_popping_cork": "🍾", + "bouquet": "💐", + "bow_and_arrow": "🏹", + "bowl_with_spoon": "🥣", + "bowling": "🎳", + "boxing_glove": "🥊", + "boy": "👦", + "boy_dark_skin_tone": "👦🏿", + "boy_light_skin_tone": "👦🏻", + "boy_medium-dark_skin_tone": "👦🏾", + "boy_medium-light_skin_tone": "👦🏼", + "boy_medium_skin_tone": "👦🏽", + "brain": "🧠", + "bread": "🍞", + "breast-feeding": "🤱", + "breast-feeding_dark_skin_tone": "🤱🏿", + "breast-feeding_light_skin_tone": "🤱🏻", + "breast-feeding_medium-dark_skin_tone": "🤱🏾", + "breast-feeding_medium-light_skin_tone": "🤱🏼", + "breast-feeding_medium_skin_tone": "🤱🏽", + "brick": "🧱", + "bride_with_veil": "👰", + "bride_with_veil_dark_skin_tone": "👰🏿", + "bride_with_veil_light_skin_tone": "👰🏻", + "bride_with_veil_medium-dark_skin_tone": "👰🏾", + "bride_with_veil_medium-light_skin_tone": "👰🏼", + "bride_with_veil_medium_skin_tone": "👰🏽", + "bridge_at_night": "🌉", + "briefcase": "💼", + "briefs": "🩲", + "bright_button": "🔆", + "broccoli": "🥦", + "broken_heart": "💔", + "broom": "🧹", + "brown_circle": "🟤", + "brown_heart": "🤎", + "brown_square": "🟫", + "bug": "🐛", + "building_construction": "🏗", + "bullet_train": "🚅", + "burrito": "🌯", + "bus": "🚌", + "bus_stop": "🚏", + "bust_in_silhouette": "👤", + "busts_in_silhouette": "👥", + "butter": "🧈", + "butterfly": "🦋", + "cactus": "🌵", + "calendar": "📆", + "call_me_hand": "🤙", + "call_me_hand_dark_skin_tone": "🤙🏿", + "call_me_hand_light_skin_tone": "🤙🏻", + "call_me_hand_medium-dark_skin_tone": "🤙🏾", + "call_me_hand_medium-light_skin_tone": "🤙🏼", + "call_me_hand_medium_skin_tone": "🤙🏽", + "camel": "🐫", + "camera": "📷", + "camera_with_flash": "📸", + "camping": "🏕", + "candle": "🕯", + "candy": "🍬", + "canned_food": "🥫", + "canoe": "🛶", + "card_file_box": "🗃", + "card_index": "📇", + "card_index_dividers": "🗂", + "carousel_horse": "🎠", + "carp_streamer": "🎏", + "carrot": "🥕", + "castle": "🏰", + "cat": "🐱", + "cat_face": "🐱", + "cat_face_with_tears_of_joy": "😹", + "cat_face_with_wry_smile": "😼", + "chains": "⛓", + "chair": "🪑", + "chart_decreasing": "📉", + "chart_increasing": "📈", + "chart_increasing_with_yen": "💹", + "cheese_wedge": "🧀", + "chequered_flag": "🏁", + "cherries": "🍒", + "cherry_blossom": "🌸", + "chess_pawn": "♟", + "chestnut": "🌰", + "chicken": "🐔", + "child": "🧒", + "child_dark_skin_tone": "🧒🏿", + "child_light_skin_tone": "🧒🏻", + "child_medium-dark_skin_tone": "🧒🏾", + "child_medium-light_skin_tone": "🧒🏼", + "child_medium_skin_tone": "🧒🏽", + "children_crossing": "🚸", + "chipmunk": "🐿", + "chocolate_bar": "🍫", + "chopsticks": "🥢", + "church": "⛪", + "cigarette": "🚬", + "cinema": "🎦", + "circled_m": "Ⓜ", + "circus_tent": "🎪", + "cityscape": "🏙", + "cityscape_at_dusk": "🌆", + "clamp": "🗜", + "clapper_board": "🎬", + "clapping_hands": "👏", + "clapping_hands_dark_skin_tone": "👏🏿", + "clapping_hands_light_skin_tone": "👏🏻", + "clapping_hands_medium-dark_skin_tone": "👏🏾", + "clapping_hands_medium-light_skin_tone": "👏🏼", + "clapping_hands_medium_skin_tone": "👏🏽", + "classical_building": "🏛", + "clinking_beer_mugs": "🍻", + "clinking_glasses": "🥂", + "clipboard": "📋", + "clockwise_vertical_arrows": "🔃", + "closed_book": "📕", + "closed_mailbox_with_lowered_flag": "📪", + "closed_mailbox_with_raised_flag": "📫", + "closed_umbrella": "🌂", + "cloud": "☁", + "cloud_with_lightning": "🌩", + "cloud_with_lightning_and_rain": "⛈", + "cloud_with_rain": "🌧", + "cloud_with_snow": "🌨", + "clown_face": "🤡", + "club_suit": "♣", + "clutch_bag": "👝", + "coat": "🧥", + "cocktail_glass": "🍸", + "coconut": "🥥", + "coffin": "⚰", + "cold_face": "🥶", + "collision": "💥", + "comet": "☄", + "compass": "🧭", + "computer_disk": "💽", + "computer_mouse": "🖱", + "confetti_ball": "🎊", + "confounded_face": "😖", + "confused_face": "😕", + "construction": "🚧", + "construction_worker": "👷", + "construction_worker_dark_skin_tone": "👷🏿", + "construction_worker_light_skin_tone": "👷🏻", + "construction_worker_medium-dark_skin_tone": "👷🏾", + "construction_worker_medium-light_skin_tone": "👷🏼", + "construction_worker_medium_skin_tone": "👷🏽", + "control_knobs": "🎛", + "convenience_store": "🏪", + "cooked_rice": "🍚", + "cookie": "🍪", + "cooking": "🍳", + "copyright": "©", + "couch_and_lamp": "🛋", + "counterclockwise_arrows_button": "🔄", + "couple_with_heart": "💑", + "couple_with_heart_man_man": "👨\u200d❤️\u200d👨", + "couple_with_heart_woman_man": "👩\u200d❤️\u200d👨", + "couple_with_heart_woman_woman": "👩\u200d❤️\u200d👩", + "cow": "🐮", + "cow_face": "🐮", + "cowboy_hat_face": "🤠", + "crab": "🦀", + "crayon": "🖍", + "credit_card": "💳", + "crescent_moon": "🌙", + "cricket": "🦗", + "cricket_game": "🏏", + "crocodile": "🐊", + "croissant": "🥐", + "cross_mark": "❌", + "cross_mark_button": "❎", + "crossed_fingers": "🤞", + "crossed_fingers_dark_skin_tone": "🤞🏿", + "crossed_fingers_light_skin_tone": "🤞🏻", + "crossed_fingers_medium-dark_skin_tone": "🤞🏾", + "crossed_fingers_medium-light_skin_tone": "🤞🏼", + "crossed_fingers_medium_skin_tone": "🤞🏽", + "crossed_flags": "🎌", + "crossed_swords": "⚔", + "crown": "👑", + "crying_cat_face": "😿", + "crying_face": "😢", + "crystal_ball": "🔮", + "cucumber": "🥒", + "cupcake": "🧁", + "cup_with_straw": "🥤", + "curling_stone": "🥌", + "curly_hair": "🦱", + "curly-haired_man": "👨\u200d🦱", + "curly-haired_woman": "👩\u200d🦱", + "curly_loop": "➰", + "currency_exchange": "💱", + "curry_rice": "🍛", + "custard": "🍮", + "customs": "🛃", + "cut_of_meat": "🥩", + "cyclone": "🌀", + "dagger": "🗡", + "dango": "🍡", + "dashing_away": "💨", + "deaf_person": "🧏", + "deciduous_tree": "🌳", + "deer": "🦌", + "delivery_truck": "🚚", + "department_store": "🏬", + "derelict_house": "🏚", + "desert": "🏜", + "desert_island": "🏝", + "desktop_computer": "🖥", + "detective": "🕵", + "detective_dark_skin_tone": "🕵🏿", + "detective_light_skin_tone": "🕵🏻", + "detective_medium-dark_skin_tone": "🕵🏾", + "detective_medium-light_skin_tone": "🕵🏼", + "detective_medium_skin_tone": "🕵🏽", + "diamond_suit": "♦", + "diamond_with_a_dot": "💠", + "dim_button": "🔅", + "direct_hit": "🎯", + "disappointed_face": "😞", + "diving_mask": "🤿", + "diya_lamp": "🪔", + "dizzy": "💫", + "dizzy_face": "😵", + "dna": "🧬", + "dog": "🐶", + "dog_face": "🐶", + "dollar_banknote": "💵", + "dolphin": "🐬", + "door": "🚪", + "dotted_six-pointed_star": "🔯", + "double_curly_loop": "➿", + "double_exclamation_mark": "‼", + "doughnut": "🍩", + "dove": "🕊", + "down-left_arrow": "↙", + "down-right_arrow": "↘", + "down_arrow": "⬇", + "downcast_face_with_sweat": "😓", + "downwards_button": "🔽", + "dragon": "🐉", + "dragon_face": "🐲", + "dress": "👗", + "drooling_face": "🤤", + "drop_of_blood": "🩸", + "droplet": "💧", + "drum": "🥁", + "duck": "🦆", + "dumpling": "🥟", + "dvd": "📀", + "e-mail": "📧", + "eagle": "🦅", + "ear": "👂", + "ear_dark_skin_tone": "👂🏿", + "ear_light_skin_tone": "👂🏻", + "ear_medium-dark_skin_tone": "👂🏾", + "ear_medium-light_skin_tone": "👂🏼", + "ear_medium_skin_tone": "👂🏽", + "ear_of_corn": "🌽", + "ear_with_hearing_aid": "🦻", + "egg": "🍳", + "eggplant": "🍆", + "eight-pointed_star": "✴", + "eight-spoked_asterisk": "✳", + "eight-thirty": "🕣", + "eight_o’clock": "🕗", + "eject_button": "⏏", + "electric_plug": "🔌", + "elephant": "🐘", + "eleven-thirty": "🕦", + "eleven_o’clock": "🕚", + "elf": "🧝", + "elf_dark_skin_tone": "🧝🏿", + "elf_light_skin_tone": "🧝🏻", + "elf_medium-dark_skin_tone": "🧝🏾", + "elf_medium-light_skin_tone": "🧝🏼", + "elf_medium_skin_tone": "🧝🏽", + "envelope": "✉", + "envelope_with_arrow": "📩", + "euro_banknote": "💶", + "evergreen_tree": "🌲", + "ewe": "🐑", + "exclamation_mark": "❗", + "exclamation_question_mark": "⁉", + "exploding_head": "🤯", + "expressionless_face": "😑", + "eye": "👁", + "eye_in_speech_bubble": "👁️\u200d🗨️", + "eyes": "👀", + "face_blowing_a_kiss": "😘", + "face_savoring_food": "😋", + "face_screaming_in_fear": "😱", + "face_vomiting": "🤮", + "face_with_hand_over_mouth": "🤭", + "face_with_head-bandage": "🤕", + "face_with_medical_mask": "😷", + "face_with_monocle": "🧐", + "face_with_open_mouth": "😮", + "face_with_raised_eyebrow": "🤨", + "face_with_rolling_eyes": "🙄", + "face_with_steam_from_nose": "😤", + "face_with_symbols_on_mouth": "🤬", + "face_with_tears_of_joy": "😂", + "face_with_thermometer": "🤒", + "face_with_tongue": "😛", + "face_without_mouth": "😶", + "factory": "🏭", + "fairy": "🧚", + "fairy_dark_skin_tone": "🧚🏿", + "fairy_light_skin_tone": "🧚🏻", + "fairy_medium-dark_skin_tone": "🧚🏾", + "fairy_medium-light_skin_tone": "🧚🏼", + "fairy_medium_skin_tone": "🧚🏽", + "falafel": "🧆", + "fallen_leaf": "🍂", + "family": "👪", + "family_man_boy": "👨\u200d👦", + "family_man_boy_boy": "👨\u200d👦\u200d👦", + "family_man_girl": "👨\u200d👧", + "family_man_girl_boy": "👨\u200d👧\u200d👦", + "family_man_girl_girl": "👨\u200d👧\u200d👧", + "family_man_man_boy": "👨\u200d👨\u200d👦", + "family_man_man_boy_boy": "👨\u200d👨\u200d👦\u200d👦", + "family_man_man_girl": "👨\u200d👨\u200d👧", + "family_man_man_girl_boy": "👨\u200d👨\u200d👧\u200d👦", + "family_man_man_girl_girl": "👨\u200d👨\u200d👧\u200d👧", + "family_man_woman_boy": "👨\u200d👩\u200d👦", + "family_man_woman_boy_boy": "👨\u200d👩\u200d👦\u200d👦", + "family_man_woman_girl": "👨\u200d👩\u200d👧", + "family_man_woman_girl_boy": "👨\u200d👩\u200d👧\u200d👦", + "family_man_woman_girl_girl": "👨\u200d👩\u200d👧\u200d👧", + "family_woman_boy": "👩\u200d👦", + "family_woman_boy_boy": "👩\u200d👦\u200d👦", + "family_woman_girl": "👩\u200d👧", + "family_woman_girl_boy": "👩\u200d👧\u200d👦", + "family_woman_girl_girl": "👩\u200d👧\u200d👧", + "family_woman_woman_boy": "👩\u200d👩\u200d👦", + "family_woman_woman_boy_boy": "👩\u200d👩\u200d👦\u200d👦", + "family_woman_woman_girl": "👩\u200d👩\u200d👧", + "family_woman_woman_girl_boy": "👩\u200d👩\u200d👧\u200d👦", + "family_woman_woman_girl_girl": "👩\u200d👩\u200d👧\u200d👧", + "fast-forward_button": "⏩", + "fast_down_button": "⏬", + "fast_reverse_button": "⏪", + "fast_up_button": "⏫", + "fax_machine": "📠", + "fearful_face": "😨", + "female_sign": "♀", + "ferris_wheel": "🎡", + "ferry": "⛴", + "field_hockey": "🏑", + "file_cabinet": "🗄", + "file_folder": "📁", + "film_frames": "🎞", + "film_projector": "📽", + "fire": "🔥", + "fire_extinguisher": "🧯", + "firecracker": "🧨", + "fire_engine": "🚒", + "fireworks": "🎆", + "first_quarter_moon": "🌓", + "first_quarter_moon_face": "🌛", + "fish": "🐟", + "fish_cake_with_swirl": "🍥", + "fishing_pole": "🎣", + "five-thirty": "🕠", + "five_o’clock": "🕔", + "flag_in_hole": "⛳", + "flamingo": "🦩", + "flashlight": "🔦", + "flat_shoe": "🥿", + "fleur-de-lis": "⚜", + "flexed_biceps": "💪", + "flexed_biceps_dark_skin_tone": "💪🏿", + "flexed_biceps_light_skin_tone": "💪🏻", + "flexed_biceps_medium-dark_skin_tone": "💪🏾", + "flexed_biceps_medium-light_skin_tone": "💪🏼", + "flexed_biceps_medium_skin_tone": "💪🏽", + "floppy_disk": "💾", + "flower_playing_cards": "🎴", + "flushed_face": "😳", + "flying_disc": "🥏", + "flying_saucer": "🛸", + "fog": "🌫", + "foggy": "🌁", + "folded_hands": "🙏", + "folded_hands_dark_skin_tone": "🙏🏿", + "folded_hands_light_skin_tone": "🙏🏻", + "folded_hands_medium-dark_skin_tone": "🙏🏾", + "folded_hands_medium-light_skin_tone": "🙏🏼", + "folded_hands_medium_skin_tone": "🙏🏽", + "foot": "🦶", + "footprints": "👣", + "fork_and_knife": "🍴", + "fork_and_knife_with_plate": "🍽", + "fortune_cookie": "🥠", + "fountain": "⛲", + "fountain_pen": "🖋", + "four-thirty": "🕟", + "four_leaf_clover": "🍀", + "four_o’clock": "🕓", + "fox_face": "🦊", + "framed_picture": "🖼", + "french_fries": "🍟", + "fried_shrimp": "🍤", + "frog_face": "🐸", + "front-facing_baby_chick": "🐥", + "frowning_face": "☹", + "frowning_face_with_open_mouth": "😦", + "fuel_pump": "⛽", + "full_moon": "🌕", + "full_moon_face": "🌝", + "funeral_urn": "⚱", + "game_die": "🎲", + "garlic": "🧄", + "gear": "⚙", + "gem_stone": "💎", + "genie": "🧞", + "ghost": "👻", + "giraffe": "🦒", + "girl": "👧", + "girl_dark_skin_tone": "👧🏿", + "girl_light_skin_tone": "👧🏻", + "girl_medium-dark_skin_tone": "👧🏾", + "girl_medium-light_skin_tone": "👧🏼", + "girl_medium_skin_tone": "👧🏽", + "glass_of_milk": "🥛", + "glasses": "👓", + "globe_showing_americas": "🌎", + "globe_showing_asia-australia": "🌏", + "globe_showing_europe-africa": "🌍", + "globe_with_meridians": "🌐", + "gloves": "🧤", + "glowing_star": "🌟", + "goal_net": "🥅", + "goat": "🐐", + "goblin": "👺", + "goggles": "🥽", + "gorilla": "🦍", + "graduation_cap": "🎓", + "grapes": "🍇", + "green_apple": "🍏", + "green_book": "📗", + "green_circle": "🟢", + "green_heart": "💚", + "green_salad": "🥗", + "green_square": "🟩", + "grimacing_face": "😬", + "grinning_cat_face": "😺", + "grinning_cat_face_with_smiling_eyes": "😸", + "grinning_face": "😀", + "grinning_face_with_big_eyes": "😃", + "grinning_face_with_smiling_eyes": "😄", + "grinning_face_with_sweat": "😅", + "grinning_squinting_face": "😆", + "growing_heart": "💗", + "guard": "💂", + "guard_dark_skin_tone": "💂🏿", + "guard_light_skin_tone": "💂🏻", + "guard_medium-dark_skin_tone": "💂🏾", + "guard_medium-light_skin_tone": "💂🏼", + "guard_medium_skin_tone": "💂🏽", + "guide_dog": "🦮", + "guitar": "🎸", + "hamburger": "🍔", + "hammer": "🔨", + "hammer_and_pick": "⚒", + "hammer_and_wrench": "🛠", + "hamster_face": "🐹", + "hand_with_fingers_splayed": "🖐", + "hand_with_fingers_splayed_dark_skin_tone": "🖐🏿", + "hand_with_fingers_splayed_light_skin_tone": "🖐🏻", + "hand_with_fingers_splayed_medium-dark_skin_tone": "🖐🏾", + "hand_with_fingers_splayed_medium-light_skin_tone": "🖐🏼", + "hand_with_fingers_splayed_medium_skin_tone": "🖐🏽", + "handbag": "👜", + "handshake": "🤝", + "hatching_chick": "🐣", + "headphone": "🎧", + "hear-no-evil_monkey": "🙉", + "heart_decoration": "💟", + "heart_suit": "♥", + "heart_with_arrow": "💘", + "heart_with_ribbon": "💝", + "heavy_check_mark": "✔", + "heavy_division_sign": "➗", + "heavy_dollar_sign": "💲", + "heavy_heart_exclamation": "❣", + "heavy_large_circle": "⭕", + "heavy_minus_sign": "➖", + "heavy_multiplication_x": "✖", + "heavy_plus_sign": "➕", + "hedgehog": "🦔", + "helicopter": "🚁", + "herb": "🌿", + "hibiscus": "🌺", + "high-heeled_shoe": "👠", + "high-speed_train": "🚄", + "high_voltage": "⚡", + "hiking_boot": "🥾", + "hindu_temple": "🛕", + "hippopotamus": "🦛", + "hole": "🕳", + "honey_pot": "🍯", + "honeybee": "🐝", + "horizontal_traffic_light": "🚥", + "horse": "🐴", + "horse_face": "🐴", + "horse_racing": "🏇", + "horse_racing_dark_skin_tone": "🏇🏿", + "horse_racing_light_skin_tone": "🏇🏻", + "horse_racing_medium-dark_skin_tone": "🏇🏾", + "horse_racing_medium-light_skin_tone": "🏇🏼", + "horse_racing_medium_skin_tone": "🏇🏽", + "hospital": "🏥", + "hot_beverage": "☕", + "hot_dog": "🌭", + "hot_face": "🥵", + "hot_pepper": "🌶", + "hot_springs": "♨", + "hotel": "🏨", + "hourglass_done": "⌛", + "hourglass_not_done": "⏳", + "house": "🏠", + "house_with_garden": "🏡", + "houses": "🏘", + "hugging_face": "🤗", + "hundred_points": "💯", + "hushed_face": "😯", + "ice": "🧊", + "ice_cream": "🍨", + "ice_hockey": "🏒", + "ice_skate": "⛸", + "inbox_tray": "📥", + "incoming_envelope": "📨", + "index_pointing_up": "☝", + "index_pointing_up_dark_skin_tone": "☝🏿", + "index_pointing_up_light_skin_tone": "☝🏻", + "index_pointing_up_medium-dark_skin_tone": "☝🏾", + "index_pointing_up_medium-light_skin_tone": "☝🏼", + "index_pointing_up_medium_skin_tone": "☝🏽", + "infinity": "♾", + "information": "ℹ", + "input_latin_letters": "🔤", + "input_latin_lowercase": "🔡", + "input_latin_uppercase": "🔠", + "input_numbers": "🔢", + "input_symbols": "🔣", + "jack-o-lantern": "🎃", + "jeans": "👖", + "jigsaw": "🧩", + "joker": "🃏", + "joystick": "🕹", + "kaaba": "🕋", + "kangaroo": "🦘", + "key": "🔑", + "keyboard": "⌨", + "keycap_#": "#️⃣", + "keycap_*": "*️⃣", + "keycap_0": "0️⃣", + "keycap_1": "1️⃣", + "keycap_10": "🔟", + "keycap_2": "2️⃣", + "keycap_3": "3️⃣", + "keycap_4": "4️⃣", + "keycap_5": "5️⃣", + "keycap_6": "6️⃣", + "keycap_7": "7️⃣", + "keycap_8": "8️⃣", + "keycap_9": "9️⃣", + "kick_scooter": "🛴", + "kimono": "👘", + "kiss": "💋", + "kiss_man_man": "👨\u200d❤️\u200d💋\u200d👨", + "kiss_mark": "💋", + "kiss_woman_man": "👩\u200d❤️\u200d💋\u200d👨", + "kiss_woman_woman": "👩\u200d❤️\u200d💋\u200d👩", + "kissing_cat_face": "😽", + "kissing_face": "😗", + "kissing_face_with_closed_eyes": "😚", + "kissing_face_with_smiling_eyes": "😙", + "kitchen_knife": "🔪", + "kite": "🪁", + "kiwi_fruit": "🥝", + "koala": "🐨", + "lab_coat": "🥼", + "label": "🏷", + "lacrosse": "🥍", + "lady_beetle": "🐞", + "laptop_computer": "💻", + "large_blue_diamond": "🔷", + "large_orange_diamond": "🔶", + "last_quarter_moon": "🌗", + "last_quarter_moon_face": "🌜", + "last_track_button": "⏮", + "latin_cross": "✝", + "leaf_fluttering_in_wind": "🍃", + "leafy_green": "🥬", + "ledger": "📒", + "left-facing_fist": "🤛", + "left-facing_fist_dark_skin_tone": "🤛🏿", + "left-facing_fist_light_skin_tone": "🤛🏻", + "left-facing_fist_medium-dark_skin_tone": "🤛🏾", + "left-facing_fist_medium-light_skin_tone": "🤛🏼", + "left-facing_fist_medium_skin_tone": "🤛🏽", + "left-right_arrow": "↔", + "left_arrow": "⬅", + "left_arrow_curving_right": "↪", + "left_luggage": "🛅", + "left_speech_bubble": "🗨", + "leg": "🦵", + "lemon": "🍋", + "leopard": "🐆", + "level_slider": "🎚", + "light_bulb": "💡", + "light_rail": "🚈", + "link": "🔗", + "linked_paperclips": "🖇", + "lion_face": "🦁", + "lipstick": "💄", + "litter_in_bin_sign": "🚮", + "lizard": "🦎", + "llama": "🦙", + "lobster": "🦞", + "locked": "🔒", + "locked_with_key": "🔐", + "locked_with_pen": "🔏", + "locomotive": "🚂", + "lollipop": "🍭", + "lotion_bottle": "🧴", + "loudly_crying_face": "😭", + "loudspeaker": "📢", + "love-you_gesture": "🤟", + "love-you_gesture_dark_skin_tone": "🤟🏿", + "love-you_gesture_light_skin_tone": "🤟🏻", + "love-you_gesture_medium-dark_skin_tone": "🤟🏾", + "love-you_gesture_medium-light_skin_tone": "🤟🏼", + "love-you_gesture_medium_skin_tone": "🤟🏽", + "love_hotel": "🏩", + "love_letter": "💌", + "luggage": "🧳", + "lying_face": "🤥", + "mage": "🧙", + "mage_dark_skin_tone": "🧙🏿", + "mage_light_skin_tone": "🧙🏻", + "mage_medium-dark_skin_tone": "🧙🏾", + "mage_medium-light_skin_tone": "🧙🏼", + "mage_medium_skin_tone": "🧙🏽", + "magnet": "🧲", + "magnifying_glass_tilted_left": "🔍", + "magnifying_glass_tilted_right": "🔎", + "mahjong_red_dragon": "🀄", + "male_sign": "♂", + "man": "👨", + "man_and_woman_holding_hands": "👫", + "man_artist": "👨\u200d🎨", + "man_artist_dark_skin_tone": "👨🏿\u200d🎨", + "man_artist_light_skin_tone": "👨🏻\u200d🎨", + "man_artist_medium-dark_skin_tone": "👨🏾\u200d🎨", + "man_artist_medium-light_skin_tone": "👨🏼\u200d🎨", + "man_artist_medium_skin_tone": "👨🏽\u200d🎨", + "man_astronaut": "👨\u200d🚀", + "man_astronaut_dark_skin_tone": "👨🏿\u200d🚀", + "man_astronaut_light_skin_tone": "👨🏻\u200d🚀", + "man_astronaut_medium-dark_skin_tone": "👨🏾\u200d🚀", + "man_astronaut_medium-light_skin_tone": "👨🏼\u200d🚀", + "man_astronaut_medium_skin_tone": "👨🏽\u200d🚀", + "man_biking": "🚴\u200d♂️", + "man_biking_dark_skin_tone": "🚴🏿\u200d♂️", + "man_biking_light_skin_tone": "🚴🏻\u200d♂️", + "man_biking_medium-dark_skin_tone": "🚴🏾\u200d♂️", + "man_biking_medium-light_skin_tone": "🚴🏼\u200d♂️", + "man_biking_medium_skin_tone": "🚴🏽\u200d♂️", + "man_bouncing_ball": "⛹️\u200d♂️", + "man_bouncing_ball_dark_skin_tone": "⛹🏿\u200d♂️", + "man_bouncing_ball_light_skin_tone": "⛹🏻\u200d♂️", + "man_bouncing_ball_medium-dark_skin_tone": "⛹🏾\u200d♂️", + "man_bouncing_ball_medium-light_skin_tone": "⛹🏼\u200d♂️", + "man_bouncing_ball_medium_skin_tone": "⛹🏽\u200d♂️", + "man_bowing": "🙇\u200d♂️", + "man_bowing_dark_skin_tone": "🙇🏿\u200d♂️", + "man_bowing_light_skin_tone": "🙇🏻\u200d♂️", + "man_bowing_medium-dark_skin_tone": "🙇🏾\u200d♂️", + "man_bowing_medium-light_skin_tone": "🙇🏼\u200d♂️", + "man_bowing_medium_skin_tone": "🙇🏽\u200d♂️", + "man_cartwheeling": "🤸\u200d♂️", + "man_cartwheeling_dark_skin_tone": "🤸🏿\u200d♂️", + "man_cartwheeling_light_skin_tone": "🤸🏻\u200d♂️", + "man_cartwheeling_medium-dark_skin_tone": "🤸🏾\u200d♂️", + "man_cartwheeling_medium-light_skin_tone": "🤸🏼\u200d♂️", + "man_cartwheeling_medium_skin_tone": "🤸🏽\u200d♂️", + "man_climbing": "🧗\u200d♂️", + "man_climbing_dark_skin_tone": "🧗🏿\u200d♂️", + "man_climbing_light_skin_tone": "🧗🏻\u200d♂️", + "man_climbing_medium-dark_skin_tone": "🧗🏾\u200d♂️", + "man_climbing_medium-light_skin_tone": "🧗🏼\u200d♂️", + "man_climbing_medium_skin_tone": "🧗🏽\u200d♂️", + "man_construction_worker": "👷\u200d♂️", + "man_construction_worker_dark_skin_tone": "👷🏿\u200d♂️", + "man_construction_worker_light_skin_tone": "👷🏻\u200d♂️", + "man_construction_worker_medium-dark_skin_tone": "👷🏾\u200d♂️", + "man_construction_worker_medium-light_skin_tone": "👷🏼\u200d♂️", + "man_construction_worker_medium_skin_tone": "👷🏽\u200d♂️", + "man_cook": "👨\u200d🍳", + "man_cook_dark_skin_tone": "👨🏿\u200d🍳", + "man_cook_light_skin_tone": "👨🏻\u200d🍳", + "man_cook_medium-dark_skin_tone": "👨🏾\u200d🍳", + "man_cook_medium-light_skin_tone": "👨🏼\u200d🍳", + "man_cook_medium_skin_tone": "👨🏽\u200d🍳", + "man_dancing": "🕺", + "man_dancing_dark_skin_tone": "🕺🏿", + "man_dancing_light_skin_tone": "🕺🏻", + "man_dancing_medium-dark_skin_tone": "🕺🏾", + "man_dancing_medium-light_skin_tone": "🕺🏼", + "man_dancing_medium_skin_tone": "🕺🏽", + "man_dark_skin_tone": "👨🏿", + "man_detective": "🕵️\u200d♂️", + "man_detective_dark_skin_tone": "🕵🏿\u200d♂️", + "man_detective_light_skin_tone": "🕵🏻\u200d♂️", + "man_detective_medium-dark_skin_tone": "🕵🏾\u200d♂️", + "man_detective_medium-light_skin_tone": "🕵🏼\u200d♂️", + "man_detective_medium_skin_tone": "🕵🏽\u200d♂️", + "man_elf": "🧝\u200d♂️", + "man_elf_dark_skin_tone": "🧝🏿\u200d♂️", + "man_elf_light_skin_tone": "🧝🏻\u200d♂️", + "man_elf_medium-dark_skin_tone": "🧝🏾\u200d♂️", + "man_elf_medium-light_skin_tone": "🧝🏼\u200d♂️", + "man_elf_medium_skin_tone": "🧝🏽\u200d♂️", + "man_facepalming": "🤦\u200d♂️", + "man_facepalming_dark_skin_tone": "🤦🏿\u200d♂️", + "man_facepalming_light_skin_tone": "🤦🏻\u200d♂️", + "man_facepalming_medium-dark_skin_tone": "🤦🏾\u200d♂️", + "man_facepalming_medium-light_skin_tone": "🤦🏼\u200d♂️", + "man_facepalming_medium_skin_tone": "🤦🏽\u200d♂️", + "man_factory_worker": "👨\u200d🏭", + "man_factory_worker_dark_skin_tone": "👨🏿\u200d🏭", + "man_factory_worker_light_skin_tone": "👨🏻\u200d🏭", + "man_factory_worker_medium-dark_skin_tone": "👨🏾\u200d🏭", + "man_factory_worker_medium-light_skin_tone": "👨🏼\u200d🏭", + "man_factory_worker_medium_skin_tone": "👨🏽\u200d🏭", + "man_fairy": "🧚\u200d♂️", + "man_fairy_dark_skin_tone": "🧚🏿\u200d♂️", + "man_fairy_light_skin_tone": "🧚🏻\u200d♂️", + "man_fairy_medium-dark_skin_tone": "🧚🏾\u200d♂️", + "man_fairy_medium-light_skin_tone": "🧚🏼\u200d♂️", + "man_fairy_medium_skin_tone": "🧚🏽\u200d♂️", + "man_farmer": "👨\u200d🌾", + "man_farmer_dark_skin_tone": "👨🏿\u200d🌾", + "man_farmer_light_skin_tone": "👨🏻\u200d🌾", + "man_farmer_medium-dark_skin_tone": "👨🏾\u200d🌾", + "man_farmer_medium-light_skin_tone": "👨🏼\u200d🌾", + "man_farmer_medium_skin_tone": "👨🏽\u200d🌾", + "man_firefighter": "👨\u200d🚒", + "man_firefighter_dark_skin_tone": "👨🏿\u200d🚒", + "man_firefighter_light_skin_tone": "👨🏻\u200d🚒", + "man_firefighter_medium-dark_skin_tone": "👨🏾\u200d🚒", + "man_firefighter_medium-light_skin_tone": "👨🏼\u200d🚒", + "man_firefighter_medium_skin_tone": "👨🏽\u200d🚒", + "man_frowning": "🙍\u200d♂️", + "man_frowning_dark_skin_tone": "🙍🏿\u200d♂️", + "man_frowning_light_skin_tone": "🙍🏻\u200d♂️", + "man_frowning_medium-dark_skin_tone": "🙍🏾\u200d♂️", + "man_frowning_medium-light_skin_tone": "🙍🏼\u200d♂️", + "man_frowning_medium_skin_tone": "🙍🏽\u200d♂️", + "man_genie": "🧞\u200d♂️", + "man_gesturing_no": "🙅\u200d♂️", + "man_gesturing_no_dark_skin_tone": "🙅🏿\u200d♂️", + "man_gesturing_no_light_skin_tone": "🙅🏻\u200d♂️", + "man_gesturing_no_medium-dark_skin_tone": "🙅🏾\u200d♂️", + "man_gesturing_no_medium-light_skin_tone": "🙅🏼\u200d♂️", + "man_gesturing_no_medium_skin_tone": "🙅🏽\u200d♂️", + "man_gesturing_ok": "🙆\u200d♂️", + "man_gesturing_ok_dark_skin_tone": "🙆🏿\u200d♂️", + "man_gesturing_ok_light_skin_tone": "🙆🏻\u200d♂️", + "man_gesturing_ok_medium-dark_skin_tone": "🙆🏾\u200d♂️", + "man_gesturing_ok_medium-light_skin_tone": "🙆🏼\u200d♂️", + "man_gesturing_ok_medium_skin_tone": "🙆🏽\u200d♂️", + "man_getting_haircut": "💇\u200d♂️", + "man_getting_haircut_dark_skin_tone": "💇🏿\u200d♂️", + "man_getting_haircut_light_skin_tone": "💇🏻\u200d♂️", + "man_getting_haircut_medium-dark_skin_tone": "💇🏾\u200d♂️", + "man_getting_haircut_medium-light_skin_tone": "💇🏼\u200d♂️", + "man_getting_haircut_medium_skin_tone": "💇🏽\u200d♂️", + "man_getting_massage": "💆\u200d♂️", + "man_getting_massage_dark_skin_tone": "💆🏿\u200d♂️", + "man_getting_massage_light_skin_tone": "💆🏻\u200d♂️", + "man_getting_massage_medium-dark_skin_tone": "💆🏾\u200d♂️", + "man_getting_massage_medium-light_skin_tone": "💆🏼\u200d♂️", + "man_getting_massage_medium_skin_tone": "💆🏽\u200d♂️", + "man_golfing": "🏌️\u200d♂️", + "man_golfing_dark_skin_tone": "🏌🏿\u200d♂️", + "man_golfing_light_skin_tone": "🏌🏻\u200d♂️", + "man_golfing_medium-dark_skin_tone": "🏌🏾\u200d♂️", + "man_golfing_medium-light_skin_tone": "🏌🏼\u200d♂️", + "man_golfing_medium_skin_tone": "🏌🏽\u200d♂️", + "man_guard": "💂\u200d♂️", + "man_guard_dark_skin_tone": "💂🏿\u200d♂️", + "man_guard_light_skin_tone": "💂🏻\u200d♂️", + "man_guard_medium-dark_skin_tone": "💂🏾\u200d♂️", + "man_guard_medium-light_skin_tone": "💂🏼\u200d♂️", + "man_guard_medium_skin_tone": "💂🏽\u200d♂️", + "man_health_worker": "👨\u200d⚕️", + "man_health_worker_dark_skin_tone": "👨🏿\u200d⚕️", + "man_health_worker_light_skin_tone": "👨🏻\u200d⚕️", + "man_health_worker_medium-dark_skin_tone": "👨🏾\u200d⚕️", + "man_health_worker_medium-light_skin_tone": "👨🏼\u200d⚕️", + "man_health_worker_medium_skin_tone": "👨🏽\u200d⚕️", + "man_in_lotus_position": "🧘\u200d♂️", + "man_in_lotus_position_dark_skin_tone": "🧘🏿\u200d♂️", + "man_in_lotus_position_light_skin_tone": "🧘🏻\u200d♂️", + "man_in_lotus_position_medium-dark_skin_tone": "🧘🏾\u200d♂️", + "man_in_lotus_position_medium-light_skin_tone": "🧘🏼\u200d♂️", + "man_in_lotus_position_medium_skin_tone": "🧘🏽\u200d♂️", + "man_in_manual_wheelchair": "👨\u200d🦽", + "man_in_motorized_wheelchair": "👨\u200d🦼", + "man_in_steamy_room": "🧖\u200d♂️", + "man_in_steamy_room_dark_skin_tone": "🧖🏿\u200d♂️", + "man_in_steamy_room_light_skin_tone": "🧖🏻\u200d♂️", + "man_in_steamy_room_medium-dark_skin_tone": "🧖🏾\u200d♂️", + "man_in_steamy_room_medium-light_skin_tone": "🧖🏼\u200d♂️", + "man_in_steamy_room_medium_skin_tone": "🧖🏽\u200d♂️", + "man_in_suit_levitating": "🕴", + "man_in_suit_levitating_dark_skin_tone": "🕴🏿", + "man_in_suit_levitating_light_skin_tone": "🕴🏻", + "man_in_suit_levitating_medium-dark_skin_tone": "🕴🏾", + "man_in_suit_levitating_medium-light_skin_tone": "🕴🏼", + "man_in_suit_levitating_medium_skin_tone": "🕴🏽", + "man_in_tuxedo": "🤵", + "man_in_tuxedo_dark_skin_tone": "🤵🏿", + "man_in_tuxedo_light_skin_tone": "🤵🏻", + "man_in_tuxedo_medium-dark_skin_tone": "🤵🏾", + "man_in_tuxedo_medium-light_skin_tone": "🤵🏼", + "man_in_tuxedo_medium_skin_tone": "🤵🏽", + "man_judge": "👨\u200d⚖️", + "man_judge_dark_skin_tone": "👨🏿\u200d⚖️", + "man_judge_light_skin_tone": "👨🏻\u200d⚖️", + "man_judge_medium-dark_skin_tone": "👨🏾\u200d⚖️", + "man_judge_medium-light_skin_tone": "👨🏼\u200d⚖️", + "man_judge_medium_skin_tone": "👨🏽\u200d⚖️", + "man_juggling": "🤹\u200d♂️", + "man_juggling_dark_skin_tone": "🤹🏿\u200d♂️", + "man_juggling_light_skin_tone": "🤹🏻\u200d♂️", + "man_juggling_medium-dark_skin_tone": "🤹🏾\u200d♂️", + "man_juggling_medium-light_skin_tone": "🤹🏼\u200d♂️", + "man_juggling_medium_skin_tone": "🤹🏽\u200d♂️", + "man_lifting_weights": "🏋️\u200d♂️", + "man_lifting_weights_dark_skin_tone": "🏋🏿\u200d♂️", + "man_lifting_weights_light_skin_tone": "🏋🏻\u200d♂️", + "man_lifting_weights_medium-dark_skin_tone": "🏋🏾\u200d♂️", + "man_lifting_weights_medium-light_skin_tone": "🏋🏼\u200d♂️", + "man_lifting_weights_medium_skin_tone": "🏋🏽\u200d♂️", + "man_light_skin_tone": "👨🏻", + "man_mage": "🧙\u200d♂️", + "man_mage_dark_skin_tone": "🧙🏿\u200d♂️", + "man_mage_light_skin_tone": "🧙🏻\u200d♂️", + "man_mage_medium-dark_skin_tone": "🧙🏾\u200d♂️", + "man_mage_medium-light_skin_tone": "🧙🏼\u200d♂️", + "man_mage_medium_skin_tone": "🧙🏽\u200d♂️", + "man_mechanic": "👨\u200d🔧", + "man_mechanic_dark_skin_tone": "👨🏿\u200d🔧", + "man_mechanic_light_skin_tone": "👨🏻\u200d🔧", + "man_mechanic_medium-dark_skin_tone": "👨🏾\u200d🔧", + "man_mechanic_medium-light_skin_tone": "👨🏼\u200d🔧", + "man_mechanic_medium_skin_tone": "👨🏽\u200d🔧", + "man_medium-dark_skin_tone": "👨🏾", + "man_medium-light_skin_tone": "👨🏼", + "man_medium_skin_tone": "👨🏽", + "man_mountain_biking": "🚵\u200d♂️", + "man_mountain_biking_dark_skin_tone": "🚵🏿\u200d♂️", + "man_mountain_biking_light_skin_tone": "🚵🏻\u200d♂️", + "man_mountain_biking_medium-dark_skin_tone": "🚵🏾\u200d♂️", + "man_mountain_biking_medium-light_skin_tone": "🚵🏼\u200d♂️", + "man_mountain_biking_medium_skin_tone": "🚵🏽\u200d♂️", + "man_office_worker": "👨\u200d💼", + "man_office_worker_dark_skin_tone": "👨🏿\u200d💼", + "man_office_worker_light_skin_tone": "👨🏻\u200d💼", + "man_office_worker_medium-dark_skin_tone": "👨🏾\u200d💼", + "man_office_worker_medium-light_skin_tone": "👨🏼\u200d💼", + "man_office_worker_medium_skin_tone": "👨🏽\u200d💼", + "man_pilot": "👨\u200d✈️", + "man_pilot_dark_skin_tone": "👨🏿\u200d✈️", + "man_pilot_light_skin_tone": "👨🏻\u200d✈️", + "man_pilot_medium-dark_skin_tone": "👨🏾\u200d✈️", + "man_pilot_medium-light_skin_tone": "👨🏼\u200d✈️", + "man_pilot_medium_skin_tone": "👨🏽\u200d✈️", + "man_playing_handball": "🤾\u200d♂️", + "man_playing_handball_dark_skin_tone": "🤾🏿\u200d♂️", + "man_playing_handball_light_skin_tone": "🤾🏻\u200d♂️", + "man_playing_handball_medium-dark_skin_tone": "🤾🏾\u200d♂️", + "man_playing_handball_medium-light_skin_tone": "🤾🏼\u200d♂️", + "man_playing_handball_medium_skin_tone": "🤾🏽\u200d♂️", + "man_playing_water_polo": "🤽\u200d♂️", + "man_playing_water_polo_dark_skin_tone": "🤽🏿\u200d♂️", + "man_playing_water_polo_light_skin_tone": "🤽🏻\u200d♂️", + "man_playing_water_polo_medium-dark_skin_tone": "🤽🏾\u200d♂️", + "man_playing_water_polo_medium-light_skin_tone": "🤽🏼\u200d♂️", + "man_playing_water_polo_medium_skin_tone": "🤽🏽\u200d♂️", + "man_police_officer": "👮\u200d♂️", + "man_police_officer_dark_skin_tone": "👮🏿\u200d♂️", + "man_police_officer_light_skin_tone": "👮🏻\u200d♂️", + "man_police_officer_medium-dark_skin_tone": "👮🏾\u200d♂️", + "man_police_officer_medium-light_skin_tone": "👮🏼\u200d♂️", + "man_police_officer_medium_skin_tone": "👮🏽\u200d♂️", + "man_pouting": "🙎\u200d♂️", + "man_pouting_dark_skin_tone": "🙎🏿\u200d♂️", + "man_pouting_light_skin_tone": "🙎🏻\u200d♂️", + "man_pouting_medium-dark_skin_tone": "🙎🏾\u200d♂️", + "man_pouting_medium-light_skin_tone": "🙎🏼\u200d♂️", + "man_pouting_medium_skin_tone": "🙎🏽\u200d♂️", + "man_raising_hand": "🙋\u200d♂️", + "man_raising_hand_dark_skin_tone": "🙋🏿\u200d♂️", + "man_raising_hand_light_skin_tone": "🙋🏻\u200d♂️", + "man_raising_hand_medium-dark_skin_tone": "🙋🏾\u200d♂️", + "man_raising_hand_medium-light_skin_tone": "🙋🏼\u200d♂️", + "man_raising_hand_medium_skin_tone": "🙋🏽\u200d♂️", + "man_rowing_boat": "🚣\u200d♂️", + "man_rowing_boat_dark_skin_tone": "🚣🏿\u200d♂️", + "man_rowing_boat_light_skin_tone": "🚣🏻\u200d♂️", + "man_rowing_boat_medium-dark_skin_tone": "🚣🏾\u200d♂️", + "man_rowing_boat_medium-light_skin_tone": "🚣🏼\u200d♂️", + "man_rowing_boat_medium_skin_tone": "🚣🏽\u200d♂️", + "man_running": "🏃\u200d♂️", + "man_running_dark_skin_tone": "🏃🏿\u200d♂️", + "man_running_light_skin_tone": "🏃🏻\u200d♂️", + "man_running_medium-dark_skin_tone": "🏃🏾\u200d♂️", + "man_running_medium-light_skin_tone": "🏃🏼\u200d♂️", + "man_running_medium_skin_tone": "🏃🏽\u200d♂️", + "man_scientist": "👨\u200d🔬", + "man_scientist_dark_skin_tone": "👨🏿\u200d🔬", + "man_scientist_light_skin_tone": "👨🏻\u200d🔬", + "man_scientist_medium-dark_skin_tone": "👨🏾\u200d🔬", + "man_scientist_medium-light_skin_tone": "👨🏼\u200d🔬", + "man_scientist_medium_skin_tone": "👨🏽\u200d🔬", + "man_shrugging": "🤷\u200d♂️", + "man_shrugging_dark_skin_tone": "🤷🏿\u200d♂️", + "man_shrugging_light_skin_tone": "🤷🏻\u200d♂️", + "man_shrugging_medium-dark_skin_tone": "🤷🏾\u200d♂️", + "man_shrugging_medium-light_skin_tone": "🤷🏼\u200d♂️", + "man_shrugging_medium_skin_tone": "🤷🏽\u200d♂️", + "man_singer": "👨\u200d🎤", + "man_singer_dark_skin_tone": "👨🏿\u200d🎤", + "man_singer_light_skin_tone": "👨🏻\u200d🎤", + "man_singer_medium-dark_skin_tone": "👨🏾\u200d🎤", + "man_singer_medium-light_skin_tone": "👨🏼\u200d🎤", + "man_singer_medium_skin_tone": "👨🏽\u200d🎤", + "man_student": "👨\u200d🎓", + "man_student_dark_skin_tone": "👨🏿\u200d🎓", + "man_student_light_skin_tone": "👨🏻\u200d🎓", + "man_student_medium-dark_skin_tone": "👨🏾\u200d🎓", + "man_student_medium-light_skin_tone": "👨🏼\u200d🎓", + "man_student_medium_skin_tone": "👨🏽\u200d🎓", + "man_surfing": "🏄\u200d♂️", + "man_surfing_dark_skin_tone": "🏄🏿\u200d♂️", + "man_surfing_light_skin_tone": "🏄🏻\u200d♂️", + "man_surfing_medium-dark_skin_tone": "🏄🏾\u200d♂️", + "man_surfing_medium-light_skin_tone": "🏄🏼\u200d♂️", + "man_surfing_medium_skin_tone": "🏄🏽\u200d♂️", + "man_swimming": "🏊\u200d♂️", + "man_swimming_dark_skin_tone": "🏊🏿\u200d♂️", + "man_swimming_light_skin_tone": "🏊🏻\u200d♂️", + "man_swimming_medium-dark_skin_tone": "🏊🏾\u200d♂️", + "man_swimming_medium-light_skin_tone": "🏊🏼\u200d♂️", + "man_swimming_medium_skin_tone": "🏊🏽\u200d♂️", + "man_teacher": "👨\u200d🏫", + "man_teacher_dark_skin_tone": "👨🏿\u200d🏫", + "man_teacher_light_skin_tone": "👨🏻\u200d🏫", + "man_teacher_medium-dark_skin_tone": "👨🏾\u200d🏫", + "man_teacher_medium-light_skin_tone": "👨🏼\u200d🏫", + "man_teacher_medium_skin_tone": "👨🏽\u200d🏫", + "man_technologist": "👨\u200d💻", + "man_technologist_dark_skin_tone": "👨🏿\u200d💻", + "man_technologist_light_skin_tone": "👨🏻\u200d💻", + "man_technologist_medium-dark_skin_tone": "👨🏾\u200d💻", + "man_technologist_medium-light_skin_tone": "👨🏼\u200d💻", + "man_technologist_medium_skin_tone": "👨🏽\u200d💻", + "man_tipping_hand": "💁\u200d♂️", + "man_tipping_hand_dark_skin_tone": "💁🏿\u200d♂️", + "man_tipping_hand_light_skin_tone": "💁🏻\u200d♂️", + "man_tipping_hand_medium-dark_skin_tone": "💁🏾\u200d♂️", + "man_tipping_hand_medium-light_skin_tone": "💁🏼\u200d♂️", + "man_tipping_hand_medium_skin_tone": "💁🏽\u200d♂️", + "man_vampire": "🧛\u200d♂️", + "man_vampire_dark_skin_tone": "🧛🏿\u200d♂️", + "man_vampire_light_skin_tone": "🧛🏻\u200d♂️", + "man_vampire_medium-dark_skin_tone": "🧛🏾\u200d♂️", + "man_vampire_medium-light_skin_tone": "🧛🏼\u200d♂️", + "man_vampire_medium_skin_tone": "🧛🏽\u200d♂️", + "man_walking": "🚶\u200d♂️", + "man_walking_dark_skin_tone": "🚶🏿\u200d♂️", + "man_walking_light_skin_tone": "🚶🏻\u200d♂️", + "man_walking_medium-dark_skin_tone": "🚶🏾\u200d♂️", + "man_walking_medium-light_skin_tone": "🚶🏼\u200d♂️", + "man_walking_medium_skin_tone": "🚶🏽\u200d♂️", + "man_wearing_turban": "👳\u200d♂️", + "man_wearing_turban_dark_skin_tone": "👳🏿\u200d♂️", + "man_wearing_turban_light_skin_tone": "👳🏻\u200d♂️", + "man_wearing_turban_medium-dark_skin_tone": "👳🏾\u200d♂️", + "man_wearing_turban_medium-light_skin_tone": "👳🏼\u200d♂️", + "man_wearing_turban_medium_skin_tone": "👳🏽\u200d♂️", + "man_with_probing_cane": "👨\u200d🦯", + "man_with_chinese_cap": "👲", + "man_with_chinese_cap_dark_skin_tone": "👲🏿", + "man_with_chinese_cap_light_skin_tone": "👲🏻", + "man_with_chinese_cap_medium-dark_skin_tone": "👲🏾", + "man_with_chinese_cap_medium-light_skin_tone": "👲🏼", + "man_with_chinese_cap_medium_skin_tone": "👲🏽", + "man_zombie": "🧟\u200d♂️", + "mango": "🥭", + "mantelpiece_clock": "🕰", + "manual_wheelchair": "🦽", + "man’s_shoe": "👞", + "map_of_japan": "🗾", + "maple_leaf": "🍁", + "martial_arts_uniform": "🥋", + "mate": "🧉", + "meat_on_bone": "🍖", + "mechanical_arm": "🦾", + "mechanical_leg": "🦿", + "medical_symbol": "⚕", + "megaphone": "📣", + "melon": "🍈", + "memo": "📝", + "men_with_bunny_ears": "👯\u200d♂️", + "men_wrestling": "🤼\u200d♂️", + "menorah": "🕎", + "men’s_room": "🚹", + "mermaid": "🧜\u200d♀️", + "mermaid_dark_skin_tone": "🧜🏿\u200d♀️", + "mermaid_light_skin_tone": "🧜🏻\u200d♀️", + "mermaid_medium-dark_skin_tone": "🧜🏾\u200d♀️", + "mermaid_medium-light_skin_tone": "🧜🏼\u200d♀️", + "mermaid_medium_skin_tone": "🧜🏽\u200d♀️", + "merman": "🧜\u200d♂️", + "merman_dark_skin_tone": "🧜🏿\u200d♂️", + "merman_light_skin_tone": "🧜🏻\u200d♂️", + "merman_medium-dark_skin_tone": "🧜🏾\u200d♂️", + "merman_medium-light_skin_tone": "🧜🏼\u200d♂️", + "merman_medium_skin_tone": "🧜🏽\u200d♂️", + "merperson": "🧜", + "merperson_dark_skin_tone": "🧜🏿", + "merperson_light_skin_tone": "🧜🏻", + "merperson_medium-dark_skin_tone": "🧜🏾", + "merperson_medium-light_skin_tone": "🧜🏼", + "merperson_medium_skin_tone": "🧜🏽", + "metro": "🚇", + "microbe": "🦠", + "microphone": "🎤", + "microscope": "🔬", + "middle_finger": "🖕", + "middle_finger_dark_skin_tone": "🖕🏿", + "middle_finger_light_skin_tone": "🖕🏻", + "middle_finger_medium-dark_skin_tone": "🖕🏾", + "middle_finger_medium-light_skin_tone": "🖕🏼", + "middle_finger_medium_skin_tone": "🖕🏽", + "military_medal": "🎖", + "milky_way": "🌌", + "minibus": "🚐", + "moai": "🗿", + "mobile_phone": "📱", + "mobile_phone_off": "📴", + "mobile_phone_with_arrow": "📲", + "money-mouth_face": "🤑", + "money_bag": "💰", + "money_with_wings": "💸", + "monkey": "🐒", + "monkey_face": "🐵", + "monorail": "🚝", + "moon_cake": "🥮", + "moon_viewing_ceremony": "🎑", + "mosque": "🕌", + "mosquito": "🦟", + "motor_boat": "🛥", + "motor_scooter": "🛵", + "motorcycle": "🏍", + "motorized_wheelchair": "🦼", + "motorway": "🛣", + "mount_fuji": "🗻", + "mountain": "⛰", + "mountain_cableway": "🚠", + "mountain_railway": "🚞", + "mouse": "🐭", + "mouse_face": "🐭", + "mouth": "👄", + "movie_camera": "🎥", + "mushroom": "🍄", + "musical_keyboard": "🎹", + "musical_note": "🎵", + "musical_notes": "🎶", + "musical_score": "🎼", + "muted_speaker": "🔇", + "nail_polish": "💅", + "nail_polish_dark_skin_tone": "💅🏿", + "nail_polish_light_skin_tone": "💅🏻", + "nail_polish_medium-dark_skin_tone": "💅🏾", + "nail_polish_medium-light_skin_tone": "💅🏼", + "nail_polish_medium_skin_tone": "💅🏽", + "name_badge": "📛", + "national_park": "🏞", + "nauseated_face": "🤢", + "nazar_amulet": "🧿", + "necktie": "👔", + "nerd_face": "🤓", + "neutral_face": "😐", + "new_moon": "🌑", + "new_moon_face": "🌚", + "newspaper": "📰", + "next_track_button": "⏭", + "night_with_stars": "🌃", + "nine-thirty": "🕤", + "nine_o’clock": "🕘", + "no_bicycles": "🚳", + "no_entry": "⛔", + "no_littering": "🚯", + "no_mobile_phones": "📵", + "no_one_under_eighteen": "🔞", + "no_pedestrians": "🚷", + "no_smoking": "🚭", + "non-potable_water": "🚱", + "nose": "👃", + "nose_dark_skin_tone": "👃🏿", + "nose_light_skin_tone": "👃🏻", + "nose_medium-dark_skin_tone": "👃🏾", + "nose_medium-light_skin_tone": "👃🏼", + "nose_medium_skin_tone": "👃🏽", + "notebook": "📓", + "notebook_with_decorative_cover": "📔", + "nut_and_bolt": "🔩", + "octopus": "🐙", + "oden": "🍢", + "office_building": "🏢", + "ogre": "👹", + "oil_drum": "🛢", + "old_key": "🗝", + "old_man": "👴", + "old_man_dark_skin_tone": "👴🏿", + "old_man_light_skin_tone": "👴🏻", + "old_man_medium-dark_skin_tone": "👴🏾", + "old_man_medium-light_skin_tone": "👴🏼", + "old_man_medium_skin_tone": "👴🏽", + "old_woman": "👵", + "old_woman_dark_skin_tone": "👵🏿", + "old_woman_light_skin_tone": "👵🏻", + "old_woman_medium-dark_skin_tone": "👵🏾", + "old_woman_medium-light_skin_tone": "👵🏼", + "old_woman_medium_skin_tone": "👵🏽", + "older_adult": "🧓", + "older_adult_dark_skin_tone": "🧓🏿", + "older_adult_light_skin_tone": "🧓🏻", + "older_adult_medium-dark_skin_tone": "🧓🏾", + "older_adult_medium-light_skin_tone": "🧓🏼", + "older_adult_medium_skin_tone": "🧓🏽", + "om": "🕉", + "oncoming_automobile": "🚘", + "oncoming_bus": "🚍", + "oncoming_fist": "👊", + "oncoming_fist_dark_skin_tone": "👊🏿", + "oncoming_fist_light_skin_tone": "👊🏻", + "oncoming_fist_medium-dark_skin_tone": "👊🏾", + "oncoming_fist_medium-light_skin_tone": "👊🏼", + "oncoming_fist_medium_skin_tone": "👊🏽", + "oncoming_police_car": "🚔", + "oncoming_taxi": "🚖", + "one-piece_swimsuit": "🩱", + "one-thirty": "🕜", + "one_o’clock": "🕐", + "onion": "🧅", + "open_book": "📖", + "open_file_folder": "📂", + "open_hands": "👐", + "open_hands_dark_skin_tone": "👐🏿", + "open_hands_light_skin_tone": "👐🏻", + "open_hands_medium-dark_skin_tone": "👐🏾", + "open_hands_medium-light_skin_tone": "👐🏼", + "open_hands_medium_skin_tone": "👐🏽", + "open_mailbox_with_lowered_flag": "📭", + "open_mailbox_with_raised_flag": "📬", + "optical_disk": "💿", + "orange_book": "📙", + "orange_circle": "🟠", + "orange_heart": "🧡", + "orange_square": "🟧", + "orangutan": "🦧", + "orthodox_cross": "☦", + "otter": "🦦", + "outbox_tray": "📤", + "owl": "🦉", + "ox": "🐂", + "oyster": "🦪", + "package": "📦", + "page_facing_up": "📄", + "page_with_curl": "📃", + "pager": "📟", + "paintbrush": "🖌", + "palm_tree": "🌴", + "palms_up_together": "🤲", + "palms_up_together_dark_skin_tone": "🤲🏿", + "palms_up_together_light_skin_tone": "🤲🏻", + "palms_up_together_medium-dark_skin_tone": "🤲🏾", + "palms_up_together_medium-light_skin_tone": "🤲🏼", + "palms_up_together_medium_skin_tone": "🤲🏽", + "pancakes": "🥞", + "panda_face": "🐼", + "paperclip": "📎", + "parrot": "🦜", + "part_alternation_mark": "〽", + "party_popper": "🎉", + "partying_face": "🥳", + "passenger_ship": "🛳", + "passport_control": "🛂", + "pause_button": "⏸", + "paw_prints": "🐾", + "peace_symbol": "☮", + "peach": "🍑", + "peacock": "🦚", + "peanuts": "🥜", + "pear": "🍐", + "pen": "🖊", + "pencil": "📝", + "penguin": "🐧", + "pensive_face": "😔", + "people_holding_hands": "🧑\u200d🤝\u200d🧑", + "people_with_bunny_ears": "👯", + "people_wrestling": "🤼", + "performing_arts": "🎭", + "persevering_face": "😣", + "person_biking": "🚴", + "person_biking_dark_skin_tone": "🚴🏿", + "person_biking_light_skin_tone": "🚴🏻", + "person_biking_medium-dark_skin_tone": "🚴🏾", + "person_biking_medium-light_skin_tone": "🚴🏼", + "person_biking_medium_skin_tone": "🚴🏽", + "person_bouncing_ball": "⛹", + "person_bouncing_ball_dark_skin_tone": "⛹🏿", + "person_bouncing_ball_light_skin_tone": "⛹🏻", + "person_bouncing_ball_medium-dark_skin_tone": "⛹🏾", + "person_bouncing_ball_medium-light_skin_tone": "⛹🏼", + "person_bouncing_ball_medium_skin_tone": "⛹🏽", + "person_bowing": "🙇", + "person_bowing_dark_skin_tone": "🙇🏿", + "person_bowing_light_skin_tone": "🙇🏻", + "person_bowing_medium-dark_skin_tone": "🙇🏾", + "person_bowing_medium-light_skin_tone": "🙇🏼", + "person_bowing_medium_skin_tone": "🙇🏽", + "person_cartwheeling": "🤸", + "person_cartwheeling_dark_skin_tone": "🤸🏿", + "person_cartwheeling_light_skin_tone": "🤸🏻", + "person_cartwheeling_medium-dark_skin_tone": "🤸🏾", + "person_cartwheeling_medium-light_skin_tone": "🤸🏼", + "person_cartwheeling_medium_skin_tone": "🤸🏽", + "person_climbing": "🧗", + "person_climbing_dark_skin_tone": "🧗🏿", + "person_climbing_light_skin_tone": "🧗🏻", + "person_climbing_medium-dark_skin_tone": "🧗🏾", + "person_climbing_medium-light_skin_tone": "🧗🏼", + "person_climbing_medium_skin_tone": "🧗🏽", + "person_facepalming": "🤦", + "person_facepalming_dark_skin_tone": "🤦🏿", + "person_facepalming_light_skin_tone": "🤦🏻", + "person_facepalming_medium-dark_skin_tone": "🤦🏾", + "person_facepalming_medium-light_skin_tone": "🤦🏼", + "person_facepalming_medium_skin_tone": "🤦🏽", + "person_fencing": "🤺", + "person_frowning": "🙍", + "person_frowning_dark_skin_tone": "🙍🏿", + "person_frowning_light_skin_tone": "🙍🏻", + "person_frowning_medium-dark_skin_tone": "🙍🏾", + "person_frowning_medium-light_skin_tone": "🙍🏼", + "person_frowning_medium_skin_tone": "🙍🏽", + "person_gesturing_no": "🙅", + "person_gesturing_no_dark_skin_tone": "🙅🏿", + "person_gesturing_no_light_skin_tone": "🙅🏻", + "person_gesturing_no_medium-dark_skin_tone": "🙅🏾", + "person_gesturing_no_medium-light_skin_tone": "🙅🏼", + "person_gesturing_no_medium_skin_tone": "🙅🏽", + "person_gesturing_ok": "🙆", + "person_gesturing_ok_dark_skin_tone": "🙆🏿", + "person_gesturing_ok_light_skin_tone": "🙆🏻", + "person_gesturing_ok_medium-dark_skin_tone": "🙆🏾", + "person_gesturing_ok_medium-light_skin_tone": "🙆🏼", + "person_gesturing_ok_medium_skin_tone": "🙆🏽", + "person_getting_haircut": "💇", + "person_getting_haircut_dark_skin_tone": "💇🏿", + "person_getting_haircut_light_skin_tone": "💇🏻", + "person_getting_haircut_medium-dark_skin_tone": "💇🏾", + "person_getting_haircut_medium-light_skin_tone": "💇🏼", + "person_getting_haircut_medium_skin_tone": "💇🏽", + "person_getting_massage": "💆", + "person_getting_massage_dark_skin_tone": "💆🏿", + "person_getting_massage_light_skin_tone": "💆🏻", + "person_getting_massage_medium-dark_skin_tone": "💆🏾", + "person_getting_massage_medium-light_skin_tone": "💆🏼", + "person_getting_massage_medium_skin_tone": "💆🏽", + "person_golfing": "🏌", + "person_golfing_dark_skin_tone": "🏌🏿", + "person_golfing_light_skin_tone": "🏌🏻", + "person_golfing_medium-dark_skin_tone": "🏌🏾", + "person_golfing_medium-light_skin_tone": "🏌🏼", + "person_golfing_medium_skin_tone": "🏌🏽", + "person_in_bed": "🛌", + "person_in_bed_dark_skin_tone": "🛌🏿", + "person_in_bed_light_skin_tone": "🛌🏻", + "person_in_bed_medium-dark_skin_tone": "🛌🏾", + "person_in_bed_medium-light_skin_tone": "🛌🏼", + "person_in_bed_medium_skin_tone": "🛌🏽", + "person_in_lotus_position": "🧘", + "person_in_lotus_position_dark_skin_tone": "🧘🏿", + "person_in_lotus_position_light_skin_tone": "🧘🏻", + "person_in_lotus_position_medium-dark_skin_tone": "🧘🏾", + "person_in_lotus_position_medium-light_skin_tone": "🧘🏼", + "person_in_lotus_position_medium_skin_tone": "🧘🏽", + "person_in_steamy_room": "🧖", + "person_in_steamy_room_dark_skin_tone": "🧖🏿", + "person_in_steamy_room_light_skin_tone": "🧖🏻", + "person_in_steamy_room_medium-dark_skin_tone": "🧖🏾", + "person_in_steamy_room_medium-light_skin_tone": "🧖🏼", + "person_in_steamy_room_medium_skin_tone": "🧖🏽", + "person_juggling": "🤹", + "person_juggling_dark_skin_tone": "🤹🏿", + "person_juggling_light_skin_tone": "🤹🏻", + "person_juggling_medium-dark_skin_tone": "🤹🏾", + "person_juggling_medium-light_skin_tone": "🤹🏼", + "person_juggling_medium_skin_tone": "🤹🏽", + "person_kneeling": "🧎", + "person_lifting_weights": "🏋", + "person_lifting_weights_dark_skin_tone": "🏋🏿", + "person_lifting_weights_light_skin_tone": "🏋🏻", + "person_lifting_weights_medium-dark_skin_tone": "🏋🏾", + "person_lifting_weights_medium-light_skin_tone": "🏋🏼", + "person_lifting_weights_medium_skin_tone": "🏋🏽", + "person_mountain_biking": "🚵", + "person_mountain_biking_dark_skin_tone": "🚵🏿", + "person_mountain_biking_light_skin_tone": "🚵🏻", + "person_mountain_biking_medium-dark_skin_tone": "🚵🏾", + "person_mountain_biking_medium-light_skin_tone": "🚵🏼", + "person_mountain_biking_medium_skin_tone": "🚵🏽", + "person_playing_handball": "🤾", + "person_playing_handball_dark_skin_tone": "🤾🏿", + "person_playing_handball_light_skin_tone": "🤾🏻", + "person_playing_handball_medium-dark_skin_tone": "🤾🏾", + "person_playing_handball_medium-light_skin_tone": "🤾🏼", + "person_playing_handball_medium_skin_tone": "🤾🏽", + "person_playing_water_polo": "🤽", + "person_playing_water_polo_dark_skin_tone": "🤽🏿", + "person_playing_water_polo_light_skin_tone": "🤽🏻", + "person_playing_water_polo_medium-dark_skin_tone": "🤽🏾", + "person_playing_water_polo_medium-light_skin_tone": "🤽🏼", + "person_playing_water_polo_medium_skin_tone": "🤽🏽", + "person_pouting": "🙎", + "person_pouting_dark_skin_tone": "🙎🏿", + "person_pouting_light_skin_tone": "🙎🏻", + "person_pouting_medium-dark_skin_tone": "🙎🏾", + "person_pouting_medium-light_skin_tone": "🙎🏼", + "person_pouting_medium_skin_tone": "🙎🏽", + "person_raising_hand": "🙋", + "person_raising_hand_dark_skin_tone": "🙋🏿", + "person_raising_hand_light_skin_tone": "🙋🏻", + "person_raising_hand_medium-dark_skin_tone": "🙋🏾", + "person_raising_hand_medium-light_skin_tone": "🙋🏼", + "person_raising_hand_medium_skin_tone": "🙋🏽", + "person_rowing_boat": "🚣", + "person_rowing_boat_dark_skin_tone": "🚣🏿", + "person_rowing_boat_light_skin_tone": "🚣🏻", + "person_rowing_boat_medium-dark_skin_tone": "🚣🏾", + "person_rowing_boat_medium-light_skin_tone": "🚣🏼", + "person_rowing_boat_medium_skin_tone": "🚣🏽", + "person_running": "🏃", + "person_running_dark_skin_tone": "🏃🏿", + "person_running_light_skin_tone": "🏃🏻", + "person_running_medium-dark_skin_tone": "🏃🏾", + "person_running_medium-light_skin_tone": "🏃🏼", + "person_running_medium_skin_tone": "🏃🏽", + "person_shrugging": "🤷", + "person_shrugging_dark_skin_tone": "🤷🏿", + "person_shrugging_light_skin_tone": "🤷🏻", + "person_shrugging_medium-dark_skin_tone": "🤷🏾", + "person_shrugging_medium-light_skin_tone": "🤷🏼", + "person_shrugging_medium_skin_tone": "🤷🏽", + "person_standing": "🧍", + "person_surfing": "🏄", + "person_surfing_dark_skin_tone": "🏄🏿", + "person_surfing_light_skin_tone": "🏄🏻", + "person_surfing_medium-dark_skin_tone": "🏄🏾", + "person_surfing_medium-light_skin_tone": "🏄🏼", + "person_surfing_medium_skin_tone": "🏄🏽", + "person_swimming": "🏊", + "person_swimming_dark_skin_tone": "🏊🏿", + "person_swimming_light_skin_tone": "🏊🏻", + "person_swimming_medium-dark_skin_tone": "🏊🏾", + "person_swimming_medium-light_skin_tone": "🏊🏼", + "person_swimming_medium_skin_tone": "🏊🏽", + "person_taking_bath": "🛀", + "person_taking_bath_dark_skin_tone": "🛀🏿", + "person_taking_bath_light_skin_tone": "🛀🏻", + "person_taking_bath_medium-dark_skin_tone": "🛀🏾", + "person_taking_bath_medium-light_skin_tone": "🛀🏼", + "person_taking_bath_medium_skin_tone": "🛀🏽", + "person_tipping_hand": "💁", + "person_tipping_hand_dark_skin_tone": "💁🏿", + "person_tipping_hand_light_skin_tone": "💁🏻", + "person_tipping_hand_medium-dark_skin_tone": "💁🏾", + "person_tipping_hand_medium-light_skin_tone": "💁🏼", + "person_tipping_hand_medium_skin_tone": "💁🏽", + "person_walking": "🚶", + "person_walking_dark_skin_tone": "🚶🏿", + "person_walking_light_skin_tone": "🚶🏻", + "person_walking_medium-dark_skin_tone": "🚶🏾", + "person_walking_medium-light_skin_tone": "🚶🏼", + "person_walking_medium_skin_tone": "🚶🏽", + "person_wearing_turban": "👳", + "person_wearing_turban_dark_skin_tone": "👳🏿", + "person_wearing_turban_light_skin_tone": "👳🏻", + "person_wearing_turban_medium-dark_skin_tone": "👳🏾", + "person_wearing_turban_medium-light_skin_tone": "👳🏼", + "person_wearing_turban_medium_skin_tone": "👳🏽", + "petri_dish": "🧫", + "pick": "⛏", + "pie": "🥧", + "pig": "🐷", + "pig_face": "🐷", + "pig_nose": "🐽", + "pile_of_poo": "💩", + "pill": "💊", + "pinching_hand": "🤏", + "pine_decoration": "🎍", + "pineapple": "🍍", + "ping_pong": "🏓", + "pirate_flag": "🏴\u200d☠️", + "pistol": "🔫", + "pizza": "🍕", + "place_of_worship": "🛐", + "play_button": "▶", + "play_or_pause_button": "⏯", + "pleading_face": "🥺", + "police_car": "🚓", + "police_car_light": "🚨", + "police_officer": "👮", + "police_officer_dark_skin_tone": "👮🏿", + "police_officer_light_skin_tone": "👮🏻", + "police_officer_medium-dark_skin_tone": "👮🏾", + "police_officer_medium-light_skin_tone": "👮🏼", + "police_officer_medium_skin_tone": "👮🏽", + "poodle": "🐩", + "pool_8_ball": "🎱", + "popcorn": "🍿", + "post_office": "🏣", + "postal_horn": "📯", + "postbox": "📮", + "pot_of_food": "🍲", + "potable_water": "🚰", + "potato": "🥔", + "poultry_leg": "🍗", + "pound_banknote": "💷", + "pouting_cat_face": "😾", + "pouting_face": "😡", + "prayer_beads": "📿", + "pregnant_woman": "🤰", + "pregnant_woman_dark_skin_tone": "🤰🏿", + "pregnant_woman_light_skin_tone": "🤰🏻", + "pregnant_woman_medium-dark_skin_tone": "🤰🏾", + "pregnant_woman_medium-light_skin_tone": "🤰🏼", + "pregnant_woman_medium_skin_tone": "🤰🏽", + "pretzel": "🥨", + "probing_cane": "🦯", + "prince": "🤴", + "prince_dark_skin_tone": "🤴🏿", + "prince_light_skin_tone": "🤴🏻", + "prince_medium-dark_skin_tone": "🤴🏾", + "prince_medium-light_skin_tone": "🤴🏼", + "prince_medium_skin_tone": "🤴🏽", + "princess": "👸", + "princess_dark_skin_tone": "👸🏿", + "princess_light_skin_tone": "👸🏻", + "princess_medium-dark_skin_tone": "👸🏾", + "princess_medium-light_skin_tone": "👸🏼", + "princess_medium_skin_tone": "👸🏽", + "printer": "🖨", + "prohibited": "🚫", + "purple_circle": "🟣", + "purple_heart": "💜", + "purple_square": "🟪", + "purse": "👛", + "pushpin": "📌", + "question_mark": "❓", + "rabbit": "🐰", + "rabbit_face": "🐰", + "raccoon": "🦝", + "racing_car": "🏎", + "radio": "📻", + "radio_button": "🔘", + "radioactive": "☢", + "railway_car": "🚃", + "railway_track": "🛤", + "rainbow": "🌈", + "rainbow_flag": "🏳️\u200d🌈", + "raised_back_of_hand": "🤚", + "raised_back_of_hand_dark_skin_tone": "🤚🏿", + "raised_back_of_hand_light_skin_tone": "🤚🏻", + "raised_back_of_hand_medium-dark_skin_tone": "🤚🏾", + "raised_back_of_hand_medium-light_skin_tone": "🤚🏼", + "raised_back_of_hand_medium_skin_tone": "🤚🏽", + "raised_fist": "✊", + "raised_fist_dark_skin_tone": "✊🏿", + "raised_fist_light_skin_tone": "✊🏻", + "raised_fist_medium-dark_skin_tone": "✊🏾", + "raised_fist_medium-light_skin_tone": "✊🏼", + "raised_fist_medium_skin_tone": "✊🏽", + "raised_hand": "✋", + "raised_hand_dark_skin_tone": "✋🏿", + "raised_hand_light_skin_tone": "✋🏻", + "raised_hand_medium-dark_skin_tone": "✋🏾", + "raised_hand_medium-light_skin_tone": "✋🏼", + "raised_hand_medium_skin_tone": "✋🏽", + "raising_hands": "🙌", + "raising_hands_dark_skin_tone": "🙌🏿", + "raising_hands_light_skin_tone": "🙌🏻", + "raising_hands_medium-dark_skin_tone": "🙌🏾", + "raising_hands_medium-light_skin_tone": "🙌🏼", + "raising_hands_medium_skin_tone": "🙌🏽", + "ram": "🐏", + "rat": "🐀", + "razor": "🪒", + "ringed_planet": "🪐", + "receipt": "🧾", + "record_button": "⏺", + "recycling_symbol": "♻", + "red_apple": "🍎", + "red_circle": "🔴", + "red_envelope": "🧧", + "red_hair": "🦰", + "red-haired_man": "👨\u200d🦰", + "red-haired_woman": "👩\u200d🦰", + "red_heart": "❤", + "red_paper_lantern": "🏮", + "red_square": "🟥", + "red_triangle_pointed_down": "🔻", + "red_triangle_pointed_up": "🔺", + "registered": "®", + "relieved_face": "😌", + "reminder_ribbon": "🎗", + "repeat_button": "🔁", + "repeat_single_button": "🔂", + "rescue_worker’s_helmet": "⛑", + "restroom": "🚻", + "reverse_button": "◀", + "revolving_hearts": "💞", + "rhinoceros": "🦏", + "ribbon": "🎀", + "rice_ball": "🍙", + "rice_cracker": "🍘", + "right-facing_fist": "🤜", + "right-facing_fist_dark_skin_tone": "🤜🏿", + "right-facing_fist_light_skin_tone": "🤜🏻", + "right-facing_fist_medium-dark_skin_tone": "🤜🏾", + "right-facing_fist_medium-light_skin_tone": "🤜🏼", + "right-facing_fist_medium_skin_tone": "🤜🏽", + "right_anger_bubble": "🗯", + "right_arrow": "➡", + "right_arrow_curving_down": "⤵", + "right_arrow_curving_left": "↩", + "right_arrow_curving_up": "⤴", + "ring": "💍", + "roasted_sweet_potato": "🍠", + "robot_face": "🤖", + "rocket": "🚀", + "roll_of_paper": "🧻", + "rolled-up_newspaper": "🗞", + "roller_coaster": "🎢", + "rolling_on_the_floor_laughing": "🤣", + "rooster": "🐓", + "rose": "🌹", + "rosette": "🏵", + "round_pushpin": "📍", + "rugby_football": "🏉", + "running_shirt": "🎽", + "running_shoe": "👟", + "sad_but_relieved_face": "😥", + "safety_pin": "🧷", + "safety_vest": "🦺", + "salt": "🧂", + "sailboat": "⛵", + "sake": "🍶", + "sandwich": "🥪", + "sari": "🥻", + "satellite": "📡", + "satellite_antenna": "📡", + "sauropod": "🦕", + "saxophone": "🎷", + "scarf": "🧣", + "school": "🏫", + "school_backpack": "🎒", + "scissors": "✂", + "scorpion": "🦂", + "scroll": "📜", + "seat": "💺", + "see-no-evil_monkey": "🙈", + "seedling": "🌱", + "selfie": "🤳", + "selfie_dark_skin_tone": "🤳🏿", + "selfie_light_skin_tone": "🤳🏻", + "selfie_medium-dark_skin_tone": "🤳🏾", + "selfie_medium-light_skin_tone": "🤳🏼", + "selfie_medium_skin_tone": "🤳🏽", + "service_dog": "🐕\u200d🦺", + "seven-thirty": "🕢", + "seven_o’clock": "🕖", + "shallow_pan_of_food": "🥘", + "shamrock": "☘", + "shark": "🦈", + "shaved_ice": "🍧", + "sheaf_of_rice": "🌾", + "shield": "🛡", + "shinto_shrine": "⛩", + "ship": "🚢", + "shooting_star": "🌠", + "shopping_bags": "🛍", + "shopping_cart": "🛒", + "shortcake": "🍰", + "shorts": "🩳", + "shower": "🚿", + "shrimp": "🦐", + "shuffle_tracks_button": "🔀", + "shushing_face": "🤫", + "sign_of_the_horns": "🤘", + "sign_of_the_horns_dark_skin_tone": "🤘🏿", + "sign_of_the_horns_light_skin_tone": "🤘🏻", + "sign_of_the_horns_medium-dark_skin_tone": "🤘🏾", + "sign_of_the_horns_medium-light_skin_tone": "🤘🏼", + "sign_of_the_horns_medium_skin_tone": "🤘🏽", + "six-thirty": "🕡", + "six_o’clock": "🕕", + "skateboard": "🛹", + "skier": "⛷", + "skis": "🎿", + "skull": "💀", + "skull_and_crossbones": "☠", + "skunk": "🦨", + "sled": "🛷", + "sleeping_face": "😴", + "sleepy_face": "😪", + "slightly_frowning_face": "🙁", + "slightly_smiling_face": "🙂", + "slot_machine": "🎰", + "sloth": "🦥", + "small_airplane": "🛩", + "small_blue_diamond": "🔹", + "small_orange_diamond": "🔸", + "smiling_cat_face_with_heart-eyes": "😻", + "smiling_face": "☺", + "smiling_face_with_halo": "😇", + "smiling_face_with_3_hearts": "🥰", + "smiling_face_with_heart-eyes": "😍", + "smiling_face_with_horns": "😈", + "smiling_face_with_smiling_eyes": "😊", + "smiling_face_with_sunglasses": "😎", + "smirking_face": "😏", + "snail": "🐌", + "snake": "🐍", + "sneezing_face": "🤧", + "snow-capped_mountain": "🏔", + "snowboarder": "🏂", + "snowboarder_dark_skin_tone": "🏂🏿", + "snowboarder_light_skin_tone": "🏂🏻", + "snowboarder_medium-dark_skin_tone": "🏂🏾", + "snowboarder_medium-light_skin_tone": "🏂🏼", + "snowboarder_medium_skin_tone": "🏂🏽", + "snowflake": "❄", + "snowman": "☃", + "snowman_without_snow": "⛄", + "soap": "🧼", + "soccer_ball": "⚽", + "socks": "🧦", + "softball": "🥎", + "soft_ice_cream": "🍦", + "spade_suit": "♠", + "spaghetti": "🍝", + "sparkle": "❇", + "sparkler": "🎇", + "sparkles": "✨", + "sparkling_heart": "💖", + "speak-no-evil_monkey": "🙊", + "speaker_high_volume": "🔊", + "speaker_low_volume": "🔈", + "speaker_medium_volume": "🔉", + "speaking_head": "🗣", + "speech_balloon": "💬", + "speedboat": "🚤", + "spider": "🕷", + "spider_web": "🕸", + "spiral_calendar": "🗓", + "spiral_notepad": "🗒", + "spiral_shell": "🐚", + "spoon": "🥄", + "sponge": "🧽", + "sport_utility_vehicle": "🚙", + "sports_medal": "🏅", + "spouting_whale": "🐳", + "squid": "🦑", + "squinting_face_with_tongue": "😝", + "stadium": "🏟", + "star-struck": "🤩", + "star_and_crescent": "☪", + "star_of_david": "✡", + "station": "🚉", + "steaming_bowl": "🍜", + "stethoscope": "🩺", + "stop_button": "⏹", + "stop_sign": "🛑", + "stopwatch": "⏱", + "straight_ruler": "📏", + "strawberry": "🍓", + "studio_microphone": "🎙", + "stuffed_flatbread": "🥙", + "sun": "☀", + "sun_behind_cloud": "⛅", + "sun_behind_large_cloud": "🌥", + "sun_behind_rain_cloud": "🌦", + "sun_behind_small_cloud": "🌤", + "sun_with_face": "🌞", + "sunflower": "🌻", + "sunglasses": "😎", + "sunrise": "🌅", + "sunrise_over_mountains": "🌄", + "sunset": "🌇", + "superhero": "🦸", + "supervillain": "🦹", + "sushi": "🍣", + "suspension_railway": "🚟", + "swan": "🦢", + "sweat_droplets": "💦", + "synagogue": "🕍", + "syringe": "💉", + "t-shirt": "👕", + "taco": "🌮", + "takeout_box": "🥡", + "tanabata_tree": "🎋", + "tangerine": "🍊", + "taxi": "🚕", + "teacup_without_handle": "🍵", + "tear-off_calendar": "📆", + "teddy_bear": "🧸", + "telephone": "☎", + "telephone_receiver": "📞", + "telescope": "🔭", + "television": "📺", + "ten-thirty": "🕥", + "ten_o’clock": "🕙", + "tennis": "🎾", + "tent": "⛺", + "test_tube": "🧪", + "thermometer": "🌡", + "thinking_face": "🤔", + "thought_balloon": "💭", + "thread": "🧵", + "three-thirty": "🕞", + "three_o’clock": "🕒", + "thumbs_down": "👎", + "thumbs_down_dark_skin_tone": "👎🏿", + "thumbs_down_light_skin_tone": "👎🏻", + "thumbs_down_medium-dark_skin_tone": "👎🏾", + "thumbs_down_medium-light_skin_tone": "👎🏼", + "thumbs_down_medium_skin_tone": "👎🏽", + "thumbs_up": "👍", + "thumbs_up_dark_skin_tone": "👍🏿", + "thumbs_up_light_skin_tone": "👍🏻", + "thumbs_up_medium-dark_skin_tone": "👍🏾", + "thumbs_up_medium-light_skin_tone": "👍🏼", + "thumbs_up_medium_skin_tone": "👍🏽", + "ticket": "🎫", + "tiger": "🐯", + "tiger_face": "🐯", + "timer_clock": "⏲", + "tired_face": "😫", + "toolbox": "🧰", + "toilet": "🚽", + "tomato": "🍅", + "tongue": "👅", + "tooth": "🦷", + "top_hat": "🎩", + "tornado": "🌪", + "trackball": "🖲", + "tractor": "🚜", + "trade_mark": "™", + "train": "🚋", + "tram": "🚊", + "tram_car": "🚋", + "triangular_flag": "🚩", + "triangular_ruler": "📐", + "trident_emblem": "🔱", + "trolleybus": "🚎", + "trophy": "🏆", + "tropical_drink": "🍹", + "tropical_fish": "🐠", + "trumpet": "🎺", + "tulip": "🌷", + "tumbler_glass": "🥃", + "turtle": "🐢", + "twelve-thirty": "🕧", + "twelve_o’clock": "🕛", + "two-hump_camel": "🐫", + "two-thirty": "🕝", + "two_hearts": "💕", + "two_men_holding_hands": "👬", + "two_o’clock": "🕑", + "two_women_holding_hands": "👭", + "umbrella": "☂", + "umbrella_on_ground": "⛱", + "umbrella_with_rain_drops": "☔", + "unamused_face": "😒", + "unicorn_face": "🦄", + "unlocked": "🔓", + "up-down_arrow": "↕", + "up-left_arrow": "↖", + "up-right_arrow": "↗", + "up_arrow": "⬆", + "upside-down_face": "🙃", + "upwards_button": "🔼", + "vampire": "🧛", + "vampire_dark_skin_tone": "🧛🏿", + "vampire_light_skin_tone": "🧛🏻", + "vampire_medium-dark_skin_tone": "🧛🏾", + "vampire_medium-light_skin_tone": "🧛🏼", + "vampire_medium_skin_tone": "🧛🏽", + "vertical_traffic_light": "🚦", + "vibration_mode": "📳", + "victory_hand": "✌", + "victory_hand_dark_skin_tone": "✌🏿", + "victory_hand_light_skin_tone": "✌🏻", + "victory_hand_medium-dark_skin_tone": "✌🏾", + "victory_hand_medium-light_skin_tone": "✌🏼", + "victory_hand_medium_skin_tone": "✌🏽", + "video_camera": "📹", + "video_game": "🎮", + "videocassette": "📼", + "violin": "🎻", + "volcano": "🌋", + "volleyball": "🏐", + "vulcan_salute": "🖖", + "vulcan_salute_dark_skin_tone": "🖖🏿", + "vulcan_salute_light_skin_tone": "🖖🏻", + "vulcan_salute_medium-dark_skin_tone": "🖖🏾", + "vulcan_salute_medium-light_skin_tone": "🖖🏼", + "vulcan_salute_medium_skin_tone": "🖖🏽", + "waffle": "🧇", + "waning_crescent_moon": "🌘", + "waning_gibbous_moon": "🌖", + "warning": "⚠", + "wastebasket": "🗑", + "watch": "⌚", + "water_buffalo": "🐃", + "water_closet": "🚾", + "water_wave": "🌊", + "watermelon": "🍉", + "waving_hand": "👋", + "waving_hand_dark_skin_tone": "👋🏿", + "waving_hand_light_skin_tone": "👋🏻", + "waving_hand_medium-dark_skin_tone": "👋🏾", + "waving_hand_medium-light_skin_tone": "👋🏼", + "waving_hand_medium_skin_tone": "👋🏽", + "wavy_dash": "〰", + "waxing_crescent_moon": "🌒", + "waxing_gibbous_moon": "🌔", + "weary_cat_face": "🙀", + "weary_face": "😩", + "wedding": "💒", + "whale": "🐳", + "wheel_of_dharma": "☸", + "wheelchair_symbol": "♿", + "white_circle": "⚪", + "white_exclamation_mark": "❕", + "white_flag": "🏳", + "white_flower": "💮", + "white_hair": "🦳", + "white-haired_man": "👨\u200d🦳", + "white-haired_woman": "👩\u200d🦳", + "white_heart": "🤍", + "white_heavy_check_mark": "✅", + "white_large_square": "⬜", + "white_medium-small_square": "◽", + "white_medium_square": "◻", + "white_medium_star": "⭐", + "white_question_mark": "❔", + "white_small_square": "▫", + "white_square_button": "🔳", + "wilted_flower": "🥀", + "wind_chime": "🎐", + "wind_face": "🌬", + "wine_glass": "🍷", + "winking_face": "😉", + "winking_face_with_tongue": "😜", + "wolf_face": "🐺", + "woman": "👩", + "woman_artist": "👩\u200d🎨", + "woman_artist_dark_skin_tone": "👩🏿\u200d🎨", + "woman_artist_light_skin_tone": "👩🏻\u200d🎨", + "woman_artist_medium-dark_skin_tone": "👩🏾\u200d🎨", + "woman_artist_medium-light_skin_tone": "👩🏼\u200d🎨", + "woman_artist_medium_skin_tone": "👩🏽\u200d🎨", + "woman_astronaut": "👩\u200d🚀", + "woman_astronaut_dark_skin_tone": "👩🏿\u200d🚀", + "woman_astronaut_light_skin_tone": "👩🏻\u200d🚀", + "woman_astronaut_medium-dark_skin_tone": "👩🏾\u200d🚀", + "woman_astronaut_medium-light_skin_tone": "👩🏼\u200d🚀", + "woman_astronaut_medium_skin_tone": "👩🏽\u200d🚀", + "woman_biking": "🚴\u200d♀️", + "woman_biking_dark_skin_tone": "🚴🏿\u200d♀️", + "woman_biking_light_skin_tone": "🚴🏻\u200d♀️", + "woman_biking_medium-dark_skin_tone": "🚴🏾\u200d♀️", + "woman_biking_medium-light_skin_tone": "🚴🏼\u200d♀️", + "woman_biking_medium_skin_tone": "🚴🏽\u200d♀️", + "woman_bouncing_ball": "⛹️\u200d♀️", + "woman_bouncing_ball_dark_skin_tone": "⛹🏿\u200d♀️", + "woman_bouncing_ball_light_skin_tone": "⛹🏻\u200d♀️", + "woman_bouncing_ball_medium-dark_skin_tone": "⛹🏾\u200d♀️", + "woman_bouncing_ball_medium-light_skin_tone": "⛹🏼\u200d♀️", + "woman_bouncing_ball_medium_skin_tone": "⛹🏽\u200d♀️", + "woman_bowing": "🙇\u200d♀️", + "woman_bowing_dark_skin_tone": "🙇🏿\u200d♀️", + "woman_bowing_light_skin_tone": "🙇🏻\u200d♀️", + "woman_bowing_medium-dark_skin_tone": "🙇🏾\u200d♀️", + "woman_bowing_medium-light_skin_tone": "🙇🏼\u200d♀️", + "woman_bowing_medium_skin_tone": "🙇🏽\u200d♀️", + "woman_cartwheeling": "🤸\u200d♀️", + "woman_cartwheeling_dark_skin_tone": "🤸🏿\u200d♀️", + "woman_cartwheeling_light_skin_tone": "🤸🏻\u200d♀️", + "woman_cartwheeling_medium-dark_skin_tone": "🤸🏾\u200d♀️", + "woman_cartwheeling_medium-light_skin_tone": "🤸🏼\u200d♀️", + "woman_cartwheeling_medium_skin_tone": "🤸🏽\u200d♀️", + "woman_climbing": "🧗\u200d♀️", + "woman_climbing_dark_skin_tone": "🧗🏿\u200d♀️", + "woman_climbing_light_skin_tone": "🧗🏻\u200d♀️", + "woman_climbing_medium-dark_skin_tone": "🧗🏾\u200d♀️", + "woman_climbing_medium-light_skin_tone": "🧗🏼\u200d♀️", + "woman_climbing_medium_skin_tone": "🧗🏽\u200d♀️", + "woman_construction_worker": "👷\u200d♀️", + "woman_construction_worker_dark_skin_tone": "👷🏿\u200d♀️", + "woman_construction_worker_light_skin_tone": "👷🏻\u200d♀️", + "woman_construction_worker_medium-dark_skin_tone": "👷🏾\u200d♀️", + "woman_construction_worker_medium-light_skin_tone": "👷🏼\u200d♀️", + "woman_construction_worker_medium_skin_tone": "👷🏽\u200d♀️", + "woman_cook": "👩\u200d🍳", + "woman_cook_dark_skin_tone": "👩🏿\u200d🍳", + "woman_cook_light_skin_tone": "👩🏻\u200d🍳", + "woman_cook_medium-dark_skin_tone": "👩🏾\u200d🍳", + "woman_cook_medium-light_skin_tone": "👩🏼\u200d🍳", + "woman_cook_medium_skin_tone": "👩🏽\u200d🍳", + "woman_dancing": "💃", + "woman_dancing_dark_skin_tone": "💃🏿", + "woman_dancing_light_skin_tone": "💃🏻", + "woman_dancing_medium-dark_skin_tone": "💃🏾", + "woman_dancing_medium-light_skin_tone": "💃🏼", + "woman_dancing_medium_skin_tone": "💃🏽", + "woman_dark_skin_tone": "👩🏿", + "woman_detective": "🕵️\u200d♀️", + "woman_detective_dark_skin_tone": "🕵🏿\u200d♀️", + "woman_detective_light_skin_tone": "🕵🏻\u200d♀️", + "woman_detective_medium-dark_skin_tone": "🕵🏾\u200d♀️", + "woman_detective_medium-light_skin_tone": "🕵🏼\u200d♀️", + "woman_detective_medium_skin_tone": "🕵🏽\u200d♀️", + "woman_elf": "🧝\u200d♀️", + "woman_elf_dark_skin_tone": "🧝🏿\u200d♀️", + "woman_elf_light_skin_tone": "🧝🏻\u200d♀️", + "woman_elf_medium-dark_skin_tone": "🧝🏾\u200d♀️", + "woman_elf_medium-light_skin_tone": "🧝🏼\u200d♀️", + "woman_elf_medium_skin_tone": "🧝🏽\u200d♀️", + "woman_facepalming": "🤦\u200d♀️", + "woman_facepalming_dark_skin_tone": "🤦🏿\u200d♀️", + "woman_facepalming_light_skin_tone": "🤦🏻\u200d♀️", + "woman_facepalming_medium-dark_skin_tone": "🤦🏾\u200d♀️", + "woman_facepalming_medium-light_skin_tone": "🤦🏼\u200d♀️", + "woman_facepalming_medium_skin_tone": "🤦🏽\u200d♀️", + "woman_factory_worker": "👩\u200d🏭", + "woman_factory_worker_dark_skin_tone": "👩🏿\u200d🏭", + "woman_factory_worker_light_skin_tone": "👩🏻\u200d🏭", + "woman_factory_worker_medium-dark_skin_tone": "👩🏾\u200d🏭", + "woman_factory_worker_medium-light_skin_tone": "👩🏼\u200d🏭", + "woman_factory_worker_medium_skin_tone": "👩🏽\u200d🏭", + "woman_fairy": "🧚\u200d♀️", + "woman_fairy_dark_skin_tone": "🧚🏿\u200d♀️", + "woman_fairy_light_skin_tone": "🧚🏻\u200d♀️", + "woman_fairy_medium-dark_skin_tone": "🧚🏾\u200d♀️", + "woman_fairy_medium-light_skin_tone": "🧚🏼\u200d♀️", + "woman_fairy_medium_skin_tone": "🧚🏽\u200d♀️", + "woman_farmer": "👩\u200d🌾", + "woman_farmer_dark_skin_tone": "👩🏿\u200d🌾", + "woman_farmer_light_skin_tone": "👩🏻\u200d🌾", + "woman_farmer_medium-dark_skin_tone": "👩🏾\u200d🌾", + "woman_farmer_medium-light_skin_tone": "👩🏼\u200d🌾", + "woman_farmer_medium_skin_tone": "👩🏽\u200d🌾", + "woman_firefighter": "👩\u200d🚒", + "woman_firefighter_dark_skin_tone": "👩🏿\u200d🚒", + "woman_firefighter_light_skin_tone": "👩🏻\u200d🚒", + "woman_firefighter_medium-dark_skin_tone": "👩🏾\u200d🚒", + "woman_firefighter_medium-light_skin_tone": "👩🏼\u200d🚒", + "woman_firefighter_medium_skin_tone": "👩🏽\u200d🚒", + "woman_frowning": "🙍\u200d♀️", + "woman_frowning_dark_skin_tone": "🙍🏿\u200d♀️", + "woman_frowning_light_skin_tone": "🙍🏻\u200d♀️", + "woman_frowning_medium-dark_skin_tone": "🙍🏾\u200d♀️", + "woman_frowning_medium-light_skin_tone": "🙍🏼\u200d♀️", + "woman_frowning_medium_skin_tone": "🙍🏽\u200d♀️", + "woman_genie": "🧞\u200d♀️", + "woman_gesturing_no": "🙅\u200d♀️", + "woman_gesturing_no_dark_skin_tone": "🙅🏿\u200d♀️", + "woman_gesturing_no_light_skin_tone": "🙅🏻\u200d♀️", + "woman_gesturing_no_medium-dark_skin_tone": "🙅🏾\u200d♀️", + "woman_gesturing_no_medium-light_skin_tone": "🙅🏼\u200d♀️", + "woman_gesturing_no_medium_skin_tone": "🙅🏽\u200d♀️", + "woman_gesturing_ok": "🙆\u200d♀️", + "woman_gesturing_ok_dark_skin_tone": "🙆🏿\u200d♀️", + "woman_gesturing_ok_light_skin_tone": "🙆🏻\u200d♀️", + "woman_gesturing_ok_medium-dark_skin_tone": "🙆🏾\u200d♀️", + "woman_gesturing_ok_medium-light_skin_tone": "🙆🏼\u200d♀️", + "woman_gesturing_ok_medium_skin_tone": "🙆🏽\u200d♀️", + "woman_getting_haircut": "💇\u200d♀️", + "woman_getting_haircut_dark_skin_tone": "💇🏿\u200d♀️", + "woman_getting_haircut_light_skin_tone": "💇🏻\u200d♀️", + "woman_getting_haircut_medium-dark_skin_tone": "💇🏾\u200d♀️", + "woman_getting_haircut_medium-light_skin_tone": "💇🏼\u200d♀️", + "woman_getting_haircut_medium_skin_tone": "💇🏽\u200d♀️", + "woman_getting_massage": "💆\u200d♀️", + "woman_getting_massage_dark_skin_tone": "💆🏿\u200d♀️", + "woman_getting_massage_light_skin_tone": "💆🏻\u200d♀️", + "woman_getting_massage_medium-dark_skin_tone": "💆🏾\u200d♀️", + "woman_getting_massage_medium-light_skin_tone": "💆🏼\u200d♀️", + "woman_getting_massage_medium_skin_tone": "💆🏽\u200d♀️", + "woman_golfing": "🏌️\u200d♀️", + "woman_golfing_dark_skin_tone": "🏌🏿\u200d♀️", + "woman_golfing_light_skin_tone": "🏌🏻\u200d♀️", + "woman_golfing_medium-dark_skin_tone": "🏌🏾\u200d♀️", + "woman_golfing_medium-light_skin_tone": "🏌🏼\u200d♀️", + "woman_golfing_medium_skin_tone": "🏌🏽\u200d♀️", + "woman_guard": "💂\u200d♀️", + "woman_guard_dark_skin_tone": "💂🏿\u200d♀️", + "woman_guard_light_skin_tone": "💂🏻\u200d♀️", + "woman_guard_medium-dark_skin_tone": "💂🏾\u200d♀️", + "woman_guard_medium-light_skin_tone": "💂🏼\u200d♀️", + "woman_guard_medium_skin_tone": "💂🏽\u200d♀️", + "woman_health_worker": "👩\u200d⚕️", + "woman_health_worker_dark_skin_tone": "👩🏿\u200d⚕️", + "woman_health_worker_light_skin_tone": "👩🏻\u200d⚕️", + "woman_health_worker_medium-dark_skin_tone": "👩🏾\u200d⚕️", + "woman_health_worker_medium-light_skin_tone": "👩🏼\u200d⚕️", + "woman_health_worker_medium_skin_tone": "👩🏽\u200d⚕️", + "woman_in_lotus_position": "🧘\u200d♀️", + "woman_in_lotus_position_dark_skin_tone": "🧘🏿\u200d♀️", + "woman_in_lotus_position_light_skin_tone": "🧘🏻\u200d♀️", + "woman_in_lotus_position_medium-dark_skin_tone": "🧘🏾\u200d♀️", + "woman_in_lotus_position_medium-light_skin_tone": "🧘🏼\u200d♀️", + "woman_in_lotus_position_medium_skin_tone": "🧘🏽\u200d♀️", + "woman_in_manual_wheelchair": "👩\u200d🦽", + "woman_in_motorized_wheelchair": "👩\u200d🦼", + "woman_in_steamy_room": "🧖\u200d♀️", + "woman_in_steamy_room_dark_skin_tone": "🧖🏿\u200d♀️", + "woman_in_steamy_room_light_skin_tone": "🧖🏻\u200d♀️", + "woman_in_steamy_room_medium-dark_skin_tone": "🧖🏾\u200d♀️", + "woman_in_steamy_room_medium-light_skin_tone": "🧖🏼\u200d♀️", + "woman_in_steamy_room_medium_skin_tone": "🧖🏽\u200d♀️", + "woman_judge": "👩\u200d⚖️", + "woman_judge_dark_skin_tone": "👩🏿\u200d⚖️", + "woman_judge_light_skin_tone": "👩🏻\u200d⚖️", + "woman_judge_medium-dark_skin_tone": "👩🏾\u200d⚖️", + "woman_judge_medium-light_skin_tone": "👩🏼\u200d⚖️", + "woman_judge_medium_skin_tone": "👩🏽\u200d⚖️", + "woman_juggling": "🤹\u200d♀️", + "woman_juggling_dark_skin_tone": "🤹🏿\u200d♀️", + "woman_juggling_light_skin_tone": "🤹🏻\u200d♀️", + "woman_juggling_medium-dark_skin_tone": "🤹🏾\u200d♀️", + "woman_juggling_medium-light_skin_tone": "🤹🏼\u200d♀️", + "woman_juggling_medium_skin_tone": "🤹🏽\u200d♀️", + "woman_lifting_weights": "🏋️\u200d♀️", + "woman_lifting_weights_dark_skin_tone": "🏋🏿\u200d♀️", + "woman_lifting_weights_light_skin_tone": "🏋🏻\u200d♀️", + "woman_lifting_weights_medium-dark_skin_tone": "🏋🏾\u200d♀️", + "woman_lifting_weights_medium-light_skin_tone": "🏋🏼\u200d♀️", + "woman_lifting_weights_medium_skin_tone": "🏋🏽\u200d♀️", + "woman_light_skin_tone": "👩🏻", + "woman_mage": "🧙\u200d♀️", + "woman_mage_dark_skin_tone": "🧙🏿\u200d♀️", + "woman_mage_light_skin_tone": "🧙🏻\u200d♀️", + "woman_mage_medium-dark_skin_tone": "🧙🏾\u200d♀️", + "woman_mage_medium-light_skin_tone": "🧙🏼\u200d♀️", + "woman_mage_medium_skin_tone": "🧙🏽\u200d♀️", + "woman_mechanic": "👩\u200d🔧", + "woman_mechanic_dark_skin_tone": "👩🏿\u200d🔧", + "woman_mechanic_light_skin_tone": "👩🏻\u200d🔧", + "woman_mechanic_medium-dark_skin_tone": "👩🏾\u200d🔧", + "woman_mechanic_medium-light_skin_tone": "👩🏼\u200d🔧", + "woman_mechanic_medium_skin_tone": "👩🏽\u200d🔧", + "woman_medium-dark_skin_tone": "👩🏾", + "woman_medium-light_skin_tone": "👩🏼", + "woman_medium_skin_tone": "👩🏽", + "woman_mountain_biking": "🚵\u200d♀️", + "woman_mountain_biking_dark_skin_tone": "🚵🏿\u200d♀️", + "woman_mountain_biking_light_skin_tone": "🚵🏻\u200d♀️", + "woman_mountain_biking_medium-dark_skin_tone": "🚵🏾\u200d♀️", + "woman_mountain_biking_medium-light_skin_tone": "🚵🏼\u200d♀️", + "woman_mountain_biking_medium_skin_tone": "🚵🏽\u200d♀️", + "woman_office_worker": "👩\u200d💼", + "woman_office_worker_dark_skin_tone": "👩🏿\u200d💼", + "woman_office_worker_light_skin_tone": "👩🏻\u200d💼", + "woman_office_worker_medium-dark_skin_tone": "👩🏾\u200d💼", + "woman_office_worker_medium-light_skin_tone": "👩🏼\u200d💼", + "woman_office_worker_medium_skin_tone": "👩🏽\u200d💼", + "woman_pilot": "👩\u200d✈️", + "woman_pilot_dark_skin_tone": "👩🏿\u200d✈️", + "woman_pilot_light_skin_tone": "👩🏻\u200d✈️", + "woman_pilot_medium-dark_skin_tone": "👩🏾\u200d✈️", + "woman_pilot_medium-light_skin_tone": "👩🏼\u200d✈️", + "woman_pilot_medium_skin_tone": "👩🏽\u200d✈️", + "woman_playing_handball": "🤾\u200d♀️", + "woman_playing_handball_dark_skin_tone": "🤾🏿\u200d♀️", + "woman_playing_handball_light_skin_tone": "🤾🏻\u200d♀️", + "woman_playing_handball_medium-dark_skin_tone": "🤾🏾\u200d♀️", + "woman_playing_handball_medium-light_skin_tone": "🤾🏼\u200d♀️", + "woman_playing_handball_medium_skin_tone": "🤾🏽\u200d♀️", + "woman_playing_water_polo": "🤽\u200d♀️", + "woman_playing_water_polo_dark_skin_tone": "🤽🏿\u200d♀️", + "woman_playing_water_polo_light_skin_tone": "🤽🏻\u200d♀️", + "woman_playing_water_polo_medium-dark_skin_tone": "🤽🏾\u200d♀️", + "woman_playing_water_polo_medium-light_skin_tone": "🤽🏼\u200d♀️", + "woman_playing_water_polo_medium_skin_tone": "🤽🏽\u200d♀️", + "woman_police_officer": "👮\u200d♀️", + "woman_police_officer_dark_skin_tone": "👮🏿\u200d♀️", + "woman_police_officer_light_skin_tone": "👮🏻\u200d♀️", + "woman_police_officer_medium-dark_skin_tone": "👮🏾\u200d♀️", + "woman_police_officer_medium-light_skin_tone": "👮🏼\u200d♀️", + "woman_police_officer_medium_skin_tone": "👮🏽\u200d♀️", + "woman_pouting": "🙎\u200d♀️", + "woman_pouting_dark_skin_tone": "🙎🏿\u200d♀️", + "woman_pouting_light_skin_tone": "🙎🏻\u200d♀️", + "woman_pouting_medium-dark_skin_tone": "🙎🏾\u200d♀️", + "woman_pouting_medium-light_skin_tone": "🙎🏼\u200d♀️", + "woman_pouting_medium_skin_tone": "🙎🏽\u200d♀️", + "woman_raising_hand": "🙋\u200d♀️", + "woman_raising_hand_dark_skin_tone": "🙋🏿\u200d♀️", + "woman_raising_hand_light_skin_tone": "🙋🏻\u200d♀️", + "woman_raising_hand_medium-dark_skin_tone": "🙋🏾\u200d♀️", + "woman_raising_hand_medium-light_skin_tone": "🙋🏼\u200d♀️", + "woman_raising_hand_medium_skin_tone": "🙋🏽\u200d♀️", + "woman_rowing_boat": "🚣\u200d♀️", + "woman_rowing_boat_dark_skin_tone": "🚣🏿\u200d♀️", + "woman_rowing_boat_light_skin_tone": "🚣🏻\u200d♀️", + "woman_rowing_boat_medium-dark_skin_tone": "🚣🏾\u200d♀️", + "woman_rowing_boat_medium-light_skin_tone": "🚣🏼\u200d♀️", + "woman_rowing_boat_medium_skin_tone": "🚣🏽\u200d♀️", + "woman_running": "🏃\u200d♀️", + "woman_running_dark_skin_tone": "🏃🏿\u200d♀️", + "woman_running_light_skin_tone": "🏃🏻\u200d♀️", + "woman_running_medium-dark_skin_tone": "🏃🏾\u200d♀️", + "woman_running_medium-light_skin_tone": "🏃🏼\u200d♀️", + "woman_running_medium_skin_tone": "🏃🏽\u200d♀️", + "woman_scientist": "👩\u200d🔬", + "woman_scientist_dark_skin_tone": "👩🏿\u200d🔬", + "woman_scientist_light_skin_tone": "👩🏻\u200d🔬", + "woman_scientist_medium-dark_skin_tone": "👩🏾\u200d🔬", + "woman_scientist_medium-light_skin_tone": "👩🏼\u200d🔬", + "woman_scientist_medium_skin_tone": "👩🏽\u200d🔬", + "woman_shrugging": "🤷\u200d♀️", + "woman_shrugging_dark_skin_tone": "🤷🏿\u200d♀️", + "woman_shrugging_light_skin_tone": "🤷🏻\u200d♀️", + "woman_shrugging_medium-dark_skin_tone": "🤷🏾\u200d♀️", + "woman_shrugging_medium-light_skin_tone": "🤷🏼\u200d♀️", + "woman_shrugging_medium_skin_tone": "🤷🏽\u200d♀️", + "woman_singer": "👩\u200d🎤", + "woman_singer_dark_skin_tone": "👩🏿\u200d🎤", + "woman_singer_light_skin_tone": "👩🏻\u200d🎤", + "woman_singer_medium-dark_skin_tone": "👩🏾\u200d🎤", + "woman_singer_medium-light_skin_tone": "👩🏼\u200d🎤", + "woman_singer_medium_skin_tone": "👩🏽\u200d🎤", + "woman_student": "👩\u200d🎓", + "woman_student_dark_skin_tone": "👩🏿\u200d🎓", + "woman_student_light_skin_tone": "👩🏻\u200d🎓", + "woman_student_medium-dark_skin_tone": "👩🏾\u200d🎓", + "woman_student_medium-light_skin_tone": "👩🏼\u200d🎓", + "woman_student_medium_skin_tone": "👩🏽\u200d🎓", + "woman_surfing": "🏄\u200d♀️", + "woman_surfing_dark_skin_tone": "🏄🏿\u200d♀️", + "woman_surfing_light_skin_tone": "🏄🏻\u200d♀️", + "woman_surfing_medium-dark_skin_tone": "🏄🏾\u200d♀️", + "woman_surfing_medium-light_skin_tone": "🏄🏼\u200d♀️", + "woman_surfing_medium_skin_tone": "🏄🏽\u200d♀️", + "woman_swimming": "🏊\u200d♀️", + "woman_swimming_dark_skin_tone": "🏊🏿\u200d♀️", + "woman_swimming_light_skin_tone": "🏊🏻\u200d♀️", + "woman_swimming_medium-dark_skin_tone": "🏊🏾\u200d♀️", + "woman_swimming_medium-light_skin_tone": "🏊🏼\u200d♀️", + "woman_swimming_medium_skin_tone": "🏊🏽\u200d♀️", + "woman_teacher": "👩\u200d🏫", + "woman_teacher_dark_skin_tone": "👩🏿\u200d🏫", + "woman_teacher_light_skin_tone": "👩🏻\u200d🏫", + "woman_teacher_medium-dark_skin_tone": "👩🏾\u200d🏫", + "woman_teacher_medium-light_skin_tone": "👩🏼\u200d🏫", + "woman_teacher_medium_skin_tone": "👩🏽\u200d🏫", + "woman_technologist": "👩\u200d💻", + "woman_technologist_dark_skin_tone": "👩🏿\u200d💻", + "woman_technologist_light_skin_tone": "👩🏻\u200d💻", + "woman_technologist_medium-dark_skin_tone": "👩🏾\u200d💻", + "woman_technologist_medium-light_skin_tone": "👩🏼\u200d💻", + "woman_technologist_medium_skin_tone": "👩🏽\u200d💻", + "woman_tipping_hand": "💁\u200d♀️", + "woman_tipping_hand_dark_skin_tone": "💁🏿\u200d♀️", + "woman_tipping_hand_light_skin_tone": "💁🏻\u200d♀️", + "woman_tipping_hand_medium-dark_skin_tone": "💁🏾\u200d♀️", + "woman_tipping_hand_medium-light_skin_tone": "💁🏼\u200d♀️", + "woman_tipping_hand_medium_skin_tone": "💁🏽\u200d♀️", + "woman_vampire": "🧛\u200d♀️", + "woman_vampire_dark_skin_tone": "🧛🏿\u200d♀️", + "woman_vampire_light_skin_tone": "🧛🏻\u200d♀️", + "woman_vampire_medium-dark_skin_tone": "🧛🏾\u200d♀️", + "woman_vampire_medium-light_skin_tone": "🧛🏼\u200d♀️", + "woman_vampire_medium_skin_tone": "🧛🏽\u200d♀️", + "woman_walking": "🚶\u200d♀️", + "woman_walking_dark_skin_tone": "🚶🏿\u200d♀️", + "woman_walking_light_skin_tone": "🚶🏻\u200d♀️", + "woman_walking_medium-dark_skin_tone": "🚶🏾\u200d♀️", + "woman_walking_medium-light_skin_tone": "🚶🏼\u200d♀️", + "woman_walking_medium_skin_tone": "🚶🏽\u200d♀️", + "woman_wearing_turban": "👳\u200d♀️", + "woman_wearing_turban_dark_skin_tone": "👳🏿\u200d♀️", + "woman_wearing_turban_light_skin_tone": "👳🏻\u200d♀️", + "woman_wearing_turban_medium-dark_skin_tone": "👳🏾\u200d♀️", + "woman_wearing_turban_medium-light_skin_tone": "👳🏼\u200d♀️", + "woman_wearing_turban_medium_skin_tone": "👳🏽\u200d♀️", + "woman_with_headscarf": "🧕", + "woman_with_headscarf_dark_skin_tone": "🧕🏿", + "woman_with_headscarf_light_skin_tone": "🧕🏻", + "woman_with_headscarf_medium-dark_skin_tone": "🧕🏾", + "woman_with_headscarf_medium-light_skin_tone": "🧕🏼", + "woman_with_headscarf_medium_skin_tone": "🧕🏽", + "woman_with_probing_cane": "👩\u200d🦯", + "woman_zombie": "🧟\u200d♀️", + "woman’s_boot": "👢", + "woman’s_clothes": "👚", + "woman’s_hat": "👒", + "woman’s_sandal": "👡", + "women_with_bunny_ears": "👯\u200d♀️", + "women_wrestling": "🤼\u200d♀️", + "women’s_room": "🚺", + "woozy_face": "🥴", + "world_map": "🗺", + "worried_face": "😟", + "wrapped_gift": "🎁", + "wrench": "🔧", + "writing_hand": "✍", + "writing_hand_dark_skin_tone": "✍🏿", + "writing_hand_light_skin_tone": "✍🏻", + "writing_hand_medium-dark_skin_tone": "✍🏾", + "writing_hand_medium-light_skin_tone": "✍🏼", + "writing_hand_medium_skin_tone": "✍🏽", + "yarn": "🧶", + "yawning_face": "🥱", + "yellow_circle": "🟡", + "yellow_heart": "💛", + "yellow_square": "🟨", + "yen_banknote": "💴", + "yo-yo": "🪀", + "yin_yang": "☯", + "zany_face": "🤪", + "zebra": "🦓", + "zipper-mouth_face": "🤐", + "zombie": "🧟", + "zzz": "💤", + "åland_islands": "🇦🇽", + "keycap_asterisk": "*⃣", + "keycap_digit_eight": "8⃣", + "keycap_digit_five": "5⃣", + "keycap_digit_four": "4⃣", + "keycap_digit_nine": "9⃣", + "keycap_digit_one": "1⃣", + "keycap_digit_seven": "7⃣", + "keycap_digit_six": "6⃣", + "keycap_digit_three": "3⃣", + "keycap_digit_two": "2⃣", + "keycap_digit_zero": "0⃣", + "keycap_number_sign": "#⃣", + "light_skin_tone": "🏻", + "medium_light_skin_tone": "🏼", + "medium_skin_tone": "🏽", + "medium_dark_skin_tone": "🏾", + "dark_skin_tone": "🏿", + "regional_indicator_symbol_letter_a": "🇦", + "regional_indicator_symbol_letter_b": "🇧", + "regional_indicator_symbol_letter_c": "🇨", + "regional_indicator_symbol_letter_d": "🇩", + "regional_indicator_symbol_letter_e": "🇪", + "regional_indicator_symbol_letter_f": "🇫", + "regional_indicator_symbol_letter_g": "🇬", + "regional_indicator_symbol_letter_h": "🇭", + "regional_indicator_symbol_letter_i": "🇮", + "regional_indicator_symbol_letter_j": "🇯", + "regional_indicator_symbol_letter_k": "🇰", + "regional_indicator_symbol_letter_l": "🇱", + "regional_indicator_symbol_letter_m": "🇲", + "regional_indicator_symbol_letter_n": "🇳", + "regional_indicator_symbol_letter_o": "🇴", + "regional_indicator_symbol_letter_p": "🇵", + "regional_indicator_symbol_letter_q": "🇶", + "regional_indicator_symbol_letter_r": "🇷", + "regional_indicator_symbol_letter_s": "🇸", + "regional_indicator_symbol_letter_t": "🇹", + "regional_indicator_symbol_letter_u": "🇺", + "regional_indicator_symbol_letter_v": "🇻", + "regional_indicator_symbol_letter_w": "🇼", + "regional_indicator_symbol_letter_x": "🇽", + "regional_indicator_symbol_letter_y": "🇾", + "regional_indicator_symbol_letter_z": "🇿", + "airplane_arriving": "🛬", + "space_invader": "👾", + "football": "🏈", + "anger": "💢", + "angry": "😠", + "anguished": "😧", + "signal_strength": "📶", + "arrows_counterclockwise": "🔄", + "arrow_heading_down": "⤵", + "arrow_heading_up": "⤴", + "art": "🎨", + "astonished": "😲", + "athletic_shoe": "👟", + "atm": "🏧", + "car": "🚗", + "red_car": "🚗", + "angel": "👼", + "back": "🔙", + "badminton_racquet_and_shuttlecock": "🏸", + "dollar": "💵", + "euro": "💶", + "pound": "💷", + "yen": "💴", + "barber": "💈", + "bath": "🛀", + "bear": "🐻", + "heartbeat": "💓", + "beer": "🍺", + "no_bell": "🔕", + "bento": "🍱", + "bike": "🚲", + "bicyclist": "🚴", + "8ball": "🎱", + "biohazard_sign": "☣", + "birthday": "🎂", + "black_circle_for_record": "⏺", + "clubs": "♣", + "diamonds": "♦", + "arrow_double_down": "⏬", + "hearts": "♥", + "rewind": "⏪", + "black_left__pointing_double_triangle_with_vertical_bar": "⏮", + "arrow_backward": "◀", + "black_medium_small_square": "◾", + "question": "❓", + "fast_forward": "⏩", + "black_right__pointing_double_triangle_with_vertical_bar": "⏭", + "arrow_forward": "▶", + "black_right__pointing_triangle_with_double_vertical_bar": "⏯", + "arrow_right": "➡", + "spades": "♠", + "black_square_for_stop": "⏹", + "sunny": "☀", + "phone": "☎", + "recycle": "♻", + "arrow_double_up": "⏫", + "busstop": "🚏", + "date": "📅", + "flags": "🎏", + "cat2": "🐈", + "joy_cat": "😹", + "smirk_cat": "😼", + "chart_with_downwards_trend": "📉", + "chart_with_upwards_trend": "📈", + "chart": "💹", + "mega": "📣", + "checkered_flag": "🏁", + "accept": "🉑", + "ideograph_advantage": "🉐", + "congratulations": "㊗", + "secret": "㊙", + "m": "Ⓜ", + "city_sunset": "🌆", + "clapper": "🎬", + "clap": "👏", + "beers": "🍻", + "clock830": "🕣", + "clock8": "🕗", + "clock1130": "🕦", + "clock11": "🕚", + "clock530": "🕠", + "clock5": "🕔", + "clock430": "🕟", + "clock4": "🕓", + "clock930": "🕤", + "clock9": "🕘", + "clock130": "🕜", + "clock1": "🕐", + "clock730": "🕢", + "clock7": "🕖", + "clock630": "🕡", + "clock6": "🕕", + "clock1030": "🕥", + "clock10": "🕙", + "clock330": "🕞", + "clock3": "🕒", + "clock1230": "🕧", + "clock12": "🕛", + "clock230": "🕝", + "clock2": "🕑", + "arrows_clockwise": "🔃", + "repeat": "🔁", + "repeat_one": "🔂", + "closed_lock_with_key": "🔐", + "mailbox_closed": "📪", + "mailbox": "📫", + "cloud_with_tornado": "🌪", + "cocktail": "🍸", + "boom": "💥", + "compression": "🗜", + "confounded": "😖", + "confused": "😕", + "rice": "🍚", + "cow2": "🐄", + "cricket_bat_and_ball": "🏏", + "x": "❌", + "cry": "😢", + "curry": "🍛", + "dagger_knife": "🗡", + "dancer": "💃", + "dark_sunglasses": "🕶", + "dash": "💨", + "truck": "🚚", + "derelict_house_building": "🏚", + "diamond_shape_with_a_dot_inside": "💠", + "dart": "🎯", + "disappointed_relieved": "😥", + "disappointed": "😞", + "do_not_litter": "🚯", + "dog2": "🐕", + "flipper": "🐬", + "loop": "➿", + "bangbang": "‼", + "double_vertical_bar": "⏸", + "dove_of_peace": "🕊", + "small_red_triangle_down": "🔻", + "arrow_down_small": "🔽", + "arrow_down": "⬇", + "dromedary_camel": "🐪", + "e__mail": "📧", + "corn": "🌽", + "ear_of_rice": "🌾", + "earth_americas": "🌎", + "earth_asia": "🌏", + "earth_africa": "🌍", + "eight_pointed_black_star": "✴", + "eight_spoked_asterisk": "✳", + "eject_symbol": "⏏", + "bulb": "💡", + "emoji_modifier_fitzpatrick_type__1__2": "🏻", + "emoji_modifier_fitzpatrick_type__3": "🏼", + "emoji_modifier_fitzpatrick_type__4": "🏽", + "emoji_modifier_fitzpatrick_type__5": "🏾", + "emoji_modifier_fitzpatrick_type__6": "🏿", + "end": "🔚", + "email": "✉", + "european_castle": "🏰", + "european_post_office": "🏤", + "interrobang": "⁉", + "expressionless": "😑", + "eyeglasses": "👓", + "massage": "💆", + "yum": "😋", + "scream": "😱", + "kissing_heart": "😘", + "sweat": "😓", + "face_with_head__bandage": "🤕", + "triumph": "😤", + "mask": "😷", + "no_good": "🙅", + "ok_woman": "🙆", + "open_mouth": "😮", + "cold_sweat": "😰", + "stuck_out_tongue": "😛", + "stuck_out_tongue_closed_eyes": "😝", + "stuck_out_tongue_winking_eye": "😜", + "joy": "😂", + "no_mouth": "😶", + "santa": "🎅", + "fax": "📠", + "fearful": "😨", + "field_hockey_stick_and_ball": "🏑", + "first_quarter_moon_with_face": "🌛", + "fish_cake": "🍥", + "fishing_pole_and_fish": "🎣", + "facepunch": "👊", + "punch": "👊", + "flag_for_afghanistan": "🇦🇫", + "flag_for_albania": "🇦🇱", + "flag_for_algeria": "🇩🇿", + "flag_for_american_samoa": "🇦🇸", + "flag_for_andorra": "🇦🇩", + "flag_for_angola": "🇦🇴", + "flag_for_anguilla": "🇦🇮", + "flag_for_antarctica": "🇦🇶", + "flag_for_antigua_&_barbuda": "🇦🇬", + "flag_for_argentina": "🇦🇷", + "flag_for_armenia": "🇦🇲", + "flag_for_aruba": "🇦🇼", + "flag_for_ascension_island": "🇦🇨", + "flag_for_australia": "🇦🇺", + "flag_for_austria": "🇦🇹", + "flag_for_azerbaijan": "🇦🇿", + "flag_for_bahamas": "🇧🇸", + "flag_for_bahrain": "🇧🇭", + "flag_for_bangladesh": "🇧🇩", + "flag_for_barbados": "🇧🇧", + "flag_for_belarus": "🇧🇾", + "flag_for_belgium": "🇧🇪", + "flag_for_belize": "🇧🇿", + "flag_for_benin": "🇧🇯", + "flag_for_bermuda": "🇧🇲", + "flag_for_bhutan": "🇧🇹", + "flag_for_bolivia": "🇧🇴", + "flag_for_bosnia_&_herzegovina": "🇧🇦", + "flag_for_botswana": "🇧🇼", + "flag_for_bouvet_island": "🇧🇻", + "flag_for_brazil": "🇧🇷", + "flag_for_british_indian_ocean_territory": "🇮🇴", + "flag_for_british_virgin_islands": "🇻🇬", + "flag_for_brunei": "🇧🇳", + "flag_for_bulgaria": "🇧🇬", + "flag_for_burkina_faso": "🇧🇫", + "flag_for_burundi": "🇧🇮", + "flag_for_cambodia": "🇰🇭", + "flag_for_cameroon": "🇨🇲", + "flag_for_canada": "🇨🇦", + "flag_for_canary_islands": "🇮🇨", + "flag_for_cape_verde": "🇨🇻", + "flag_for_caribbean_netherlands": "🇧🇶", + "flag_for_cayman_islands": "🇰🇾", + "flag_for_central_african_republic": "🇨🇫", + "flag_for_ceuta_&_melilla": "🇪🇦", + "flag_for_chad": "🇹🇩", + "flag_for_chile": "🇨🇱", + "flag_for_china": "🇨🇳", + "flag_for_christmas_island": "🇨🇽", + "flag_for_clipperton_island": "🇨🇵", + "flag_for_cocos__islands": "🇨🇨", + "flag_for_colombia": "🇨🇴", + "flag_for_comoros": "🇰🇲", + "flag_for_congo____brazzaville": "🇨🇬", + "flag_for_congo____kinshasa": "🇨🇩", + "flag_for_cook_islands": "🇨🇰", + "flag_for_costa_rica": "🇨🇷", + "flag_for_croatia": "🇭🇷", + "flag_for_cuba": "🇨🇺", + "flag_for_curaçao": "🇨🇼", + "flag_for_cyprus": "🇨🇾", + "flag_for_czech_republic": "🇨🇿", + "flag_for_côte_d’ivoire": "🇨🇮", + "flag_for_denmark": "🇩🇰", + "flag_for_diego_garcia": "🇩🇬", + "flag_for_djibouti": "🇩🇯", + "flag_for_dominica": "🇩🇲", + "flag_for_dominican_republic": "🇩🇴", + "flag_for_ecuador": "🇪🇨", + "flag_for_egypt": "🇪🇬", + "flag_for_el_salvador": "🇸🇻", + "flag_for_equatorial_guinea": "🇬🇶", + "flag_for_eritrea": "🇪🇷", + "flag_for_estonia": "🇪🇪", + "flag_for_ethiopia": "🇪🇹", + "flag_for_european_union": "🇪🇺", + "flag_for_falkland_islands": "🇫🇰", + "flag_for_faroe_islands": "🇫🇴", + "flag_for_fiji": "🇫🇯", + "flag_for_finland": "🇫🇮", + "flag_for_france": "🇫🇷", + "flag_for_french_guiana": "🇬🇫", + "flag_for_french_polynesia": "🇵🇫", + "flag_for_french_southern_territories": "🇹🇫", + "flag_for_gabon": "🇬🇦", + "flag_for_gambia": "🇬🇲", + "flag_for_georgia": "🇬🇪", + "flag_for_germany": "🇩🇪", + "flag_for_ghana": "🇬🇭", + "flag_for_gibraltar": "🇬🇮", + "flag_for_greece": "🇬🇷", + "flag_for_greenland": "🇬🇱", + "flag_for_grenada": "🇬🇩", + "flag_for_guadeloupe": "🇬🇵", + "flag_for_guam": "🇬🇺", + "flag_for_guatemala": "🇬🇹", + "flag_for_guernsey": "🇬🇬", + "flag_for_guinea": "🇬🇳", + "flag_for_guinea__bissau": "🇬🇼", + "flag_for_guyana": "🇬🇾", + "flag_for_haiti": "🇭🇹", + "flag_for_heard_&_mcdonald_islands": "🇭🇲", + "flag_for_honduras": "🇭🇳", + "flag_for_hong_kong": "🇭🇰", + "flag_for_hungary": "🇭🇺", + "flag_for_iceland": "🇮🇸", + "flag_for_india": "🇮🇳", + "flag_for_indonesia": "🇮🇩", + "flag_for_iran": "🇮🇷", + "flag_for_iraq": "🇮🇶", + "flag_for_ireland": "🇮🇪", + "flag_for_isle_of_man": "🇮🇲", + "flag_for_israel": "🇮🇱", + "flag_for_italy": "🇮🇹", + "flag_for_jamaica": "🇯🇲", + "flag_for_japan": "🇯🇵", + "flag_for_jersey": "🇯🇪", + "flag_for_jordan": "🇯🇴", + "flag_for_kazakhstan": "🇰🇿", + "flag_for_kenya": "🇰🇪", + "flag_for_kiribati": "🇰🇮", + "flag_for_kosovo": "🇽🇰", + "flag_for_kuwait": "🇰🇼", + "flag_for_kyrgyzstan": "🇰🇬", + "flag_for_laos": "🇱🇦", + "flag_for_latvia": "🇱🇻", + "flag_for_lebanon": "🇱🇧", + "flag_for_lesotho": "🇱🇸", + "flag_for_liberia": "🇱🇷", + "flag_for_libya": "🇱🇾", + "flag_for_liechtenstein": "🇱🇮", + "flag_for_lithuania": "🇱🇹", + "flag_for_luxembourg": "🇱🇺", + "flag_for_macau": "🇲🇴", + "flag_for_macedonia": "🇲🇰", + "flag_for_madagascar": "🇲🇬", + "flag_for_malawi": "🇲🇼", + "flag_for_malaysia": "🇲🇾", + "flag_for_maldives": "🇲🇻", + "flag_for_mali": "🇲🇱", + "flag_for_malta": "🇲🇹", + "flag_for_marshall_islands": "🇲🇭", + "flag_for_martinique": "🇲🇶", + "flag_for_mauritania": "🇲🇷", + "flag_for_mauritius": "🇲🇺", + "flag_for_mayotte": "🇾🇹", + "flag_for_mexico": "🇲🇽", + "flag_for_micronesia": "🇫🇲", + "flag_for_moldova": "🇲🇩", + "flag_for_monaco": "🇲🇨", + "flag_for_mongolia": "🇲🇳", + "flag_for_montenegro": "🇲🇪", + "flag_for_montserrat": "🇲🇸", + "flag_for_morocco": "🇲🇦", + "flag_for_mozambique": "🇲🇿", + "flag_for_myanmar": "🇲🇲", + "flag_for_namibia": "🇳🇦", + "flag_for_nauru": "🇳🇷", + "flag_for_nepal": "🇳🇵", + "flag_for_netherlands": "🇳🇱", + "flag_for_new_caledonia": "🇳🇨", + "flag_for_new_zealand": "🇳🇿", + "flag_for_nicaragua": "🇳🇮", + "flag_for_niger": "🇳🇪", + "flag_for_nigeria": "🇳🇬", + "flag_for_niue": "🇳🇺", + "flag_for_norfolk_island": "🇳🇫", + "flag_for_north_korea": "🇰🇵", + "flag_for_northern_mariana_islands": "🇲🇵", + "flag_for_norway": "🇳🇴", + "flag_for_oman": "🇴🇲", + "flag_for_pakistan": "🇵🇰", + "flag_for_palau": "🇵🇼", + "flag_for_palestinian_territories": "🇵🇸", + "flag_for_panama": "🇵🇦", + "flag_for_papua_new_guinea": "🇵🇬", + "flag_for_paraguay": "🇵🇾", + "flag_for_peru": "🇵🇪", + "flag_for_philippines": "🇵🇭", + "flag_for_pitcairn_islands": "🇵🇳", + "flag_for_poland": "🇵🇱", + "flag_for_portugal": "🇵🇹", + "flag_for_puerto_rico": "🇵🇷", + "flag_for_qatar": "🇶🇦", + "flag_for_romania": "🇷🇴", + "flag_for_russia": "🇷🇺", + "flag_for_rwanda": "🇷🇼", + "flag_for_réunion": "🇷🇪", + "flag_for_samoa": "🇼🇸", + "flag_for_san_marino": "🇸🇲", + "flag_for_saudi_arabia": "🇸🇦", + "flag_for_senegal": "🇸🇳", + "flag_for_serbia": "🇷🇸", + "flag_for_seychelles": "🇸🇨", + "flag_for_sierra_leone": "🇸🇱", + "flag_for_singapore": "🇸🇬", + "flag_for_sint_maarten": "🇸🇽", + "flag_for_slovakia": "🇸🇰", + "flag_for_slovenia": "🇸🇮", + "flag_for_solomon_islands": "🇸🇧", + "flag_for_somalia": "🇸🇴", + "flag_for_south_africa": "🇿🇦", + "flag_for_south_georgia_&_south_sandwich_islands": "🇬🇸", + "flag_for_south_korea": "🇰🇷", + "flag_for_south_sudan": "🇸🇸", + "flag_for_spain": "🇪🇸", + "flag_for_sri_lanka": "🇱🇰", + "flag_for_st._barthélemy": "🇧🇱", + "flag_for_st._helena": "🇸🇭", + "flag_for_st._kitts_&_nevis": "🇰🇳", + "flag_for_st._lucia": "🇱🇨", + "flag_for_st._martin": "🇲🇫", + "flag_for_st._pierre_&_miquelon": "🇵🇲", + "flag_for_st._vincent_&_grenadines": "🇻🇨", + "flag_for_sudan": "🇸🇩", + "flag_for_suriname": "🇸🇷", + "flag_for_svalbard_&_jan_mayen": "🇸🇯", + "flag_for_swaziland": "🇸🇿", + "flag_for_sweden": "🇸🇪", + "flag_for_switzerland": "🇨🇭", + "flag_for_syria": "🇸🇾", + "flag_for_são_tomé_&_príncipe": "🇸🇹", + "flag_for_taiwan": "🇹🇼", + "flag_for_tajikistan": "🇹🇯", + "flag_for_tanzania": "🇹🇿", + "flag_for_thailand": "🇹🇭", + "flag_for_timor__leste": "🇹🇱", + "flag_for_togo": "🇹🇬", + "flag_for_tokelau": "🇹🇰", + "flag_for_tonga": "🇹🇴", + "flag_for_trinidad_&_tobago": "🇹🇹", + "flag_for_tristan_da_cunha": "🇹🇦", + "flag_for_tunisia": "🇹🇳", + "flag_for_turkey": "🇹🇷", + "flag_for_turkmenistan": "🇹🇲", + "flag_for_turks_&_caicos_islands": "🇹🇨", + "flag_for_tuvalu": "🇹🇻", + "flag_for_u.s._outlying_islands": "🇺🇲", + "flag_for_u.s._virgin_islands": "🇻🇮", + "flag_for_uganda": "🇺🇬", + "flag_for_ukraine": "🇺🇦", + "flag_for_united_arab_emirates": "🇦🇪", + "flag_for_united_kingdom": "🇬🇧", + "flag_for_united_states": "🇺🇸", + "flag_for_uruguay": "🇺🇾", + "flag_for_uzbekistan": "🇺🇿", + "flag_for_vanuatu": "🇻🇺", + "flag_for_vatican_city": "🇻🇦", + "flag_for_venezuela": "🇻🇪", + "flag_for_vietnam": "🇻🇳", + "flag_for_wallis_&_futuna": "🇼🇫", + "flag_for_western_sahara": "🇪🇭", + "flag_for_yemen": "🇾🇪", + "flag_for_zambia": "🇿🇲", + "flag_for_zimbabwe": "🇿🇼", + "flag_for_åland_islands": "🇦🇽", + "golf": "⛳", + "fleur__de__lis": "⚜", + "muscle": "💪", + "flushed": "😳", + "frame_with_picture": "🖼", + "fries": "🍟", + "frog": "🐸", + "hatched_chick": "🐥", + "frowning": "😦", + "fuelpump": "⛽", + "full_moon_with_face": "🌝", + "gem": "💎", + "star2": "🌟", + "golfer": "🏌", + "mortar_board": "🎓", + "grimacing": "😬", + "smile_cat": "😸", + "grinning": "😀", + "grin": "😁", + "heartpulse": "💗", + "guardsman": "💂", + "haircut": "💇", + "hamster": "🐹", + "raising_hand": "🙋", + "headphones": "🎧", + "hear_no_evil": "🙉", + "cupid": "💘", + "gift_heart": "💝", + "heart": "❤", + "exclamation": "❗", + "heavy_exclamation_mark": "❗", + "heavy_heart_exclamation_mark_ornament": "❣", + "o": "⭕", + "helm_symbol": "⎈", + "helmet_with_white_cross": "⛑", + "high_heel": "👠", + "bullettrain_side": "🚄", + "bullettrain_front": "🚅", + "high_brightness": "🔆", + "zap": "⚡", + "hocho": "🔪", + "knife": "🔪", + "bee": "🐝", + "traffic_light": "🚥", + "racehorse": "🐎", + "coffee": "☕", + "hotsprings": "♨", + "hourglass": "⌛", + "hourglass_flowing_sand": "⏳", + "house_buildings": "🏘", + "100": "💯", + "hushed": "😯", + "ice_hockey_stick_and_puck": "🏒", + "imp": "👿", + "information_desk_person": "💁", + "information_source": "ℹ", + "capital_abcd": "🔠", + "abc": "🔤", + "abcd": "🔡", + "1234": "🔢", + "symbols": "🔣", + "izakaya_lantern": "🏮", + "lantern": "🏮", + "jack_o_lantern": "🎃", + "dolls": "🎎", + "japanese_goblin": "👺", + "japanese_ogre": "👹", + "beginner": "🔰", + "zero": "0️⃣", + "one": "1️⃣", + "ten": "🔟", + "two": "2️⃣", + "three": "3️⃣", + "four": "4️⃣", + "five": "5️⃣", + "six": "6️⃣", + "seven": "7️⃣", + "eight": "8️⃣", + "nine": "9️⃣", + "couplekiss": "💏", + "kissing_cat": "😽", + "kissing": "😗", + "kissing_closed_eyes": "😚", + "kissing_smiling_eyes": "😙", + "beetle": "🐞", + "large_blue_circle": "🔵", + "last_quarter_moon_with_face": "🌜", + "leaves": "🍃", + "mag": "🔍", + "left_right_arrow": "↔", + "leftwards_arrow_with_hook": "↩", + "arrow_left": "⬅", + "lock": "🔒", + "lock_with_ink_pen": "🔏", + "sob": "😭", + "low_brightness": "🔅", + "lower_left_ballpoint_pen": "🖊", + "lower_left_crayon": "🖍", + "lower_left_fountain_pen": "🖋", + "lower_left_paintbrush": "🖌", + "mahjong": "🀄", + "couple": "👫", + "man_in_business_suit_levitating": "🕴", + "man_with_gua_pi_mao": "👲", + "man_with_turban": "👳", + "mans_shoe": "👞", + "shoe": "👞", + "menorah_with_nine_branches": "🕎", + "mens": "🚹", + "minidisc": "💽", + "iphone": "📱", + "calling": "📲", + "money__mouth_face": "🤑", + "moneybag": "💰", + "rice_scene": "🎑", + "mountain_bicyclist": "🚵", + "mouse2": "🐁", + "lips": "👄", + "moyai": "🗿", + "notes": "🎶", + "nail_care": "💅", + "ab": "🆎", + "negative_squared_cross_mark": "❎", + "a": "🅰", + "b": "🅱", + "o2": "🅾", + "parking": "🅿", + "new_moon_with_face": "🌚", + "no_entry_sign": "🚫", + "underage": "🔞", + "non__potable_water": "🚱", + "arrow_upper_right": "↗", + "arrow_upper_left": "↖", + "office": "🏢", + "older_man": "👴", + "older_woman": "👵", + "om_symbol": "🕉", + "on": "🔛", + "book": "📖", + "unlock": "🔓", + "mailbox_with_no_mail": "📭", + "mailbox_with_mail": "📬", + "cd": "💿", + "tada": "🎉", + "feet": "🐾", + "walking": "🚶", + "pencil2": "✏", + "pensive": "😔", + "persevere": "😣", + "bow": "🙇", + "raised_hands": "🙌", + "person_with_ball": "⛹", + "person_with_blond_hair": "👱", + "pray": "🙏", + "person_with_pouting_face": "🙎", + "computer": "💻", + "pig2": "🐖", + "hankey": "💩", + "poop": "💩", + "shit": "💩", + "bamboo": "🎍", + "gun": "🔫", + "black_joker": "🃏", + "rotating_light": "🚨", + "cop": "👮", + "stew": "🍲", + "pouch": "👝", + "pouting_cat": "😾", + "rage": "😡", + "put_litter_in_its_place": "🚮", + "rabbit2": "🐇", + "racing_motorcycle": "🏍", + "radioactive_sign": "☢", + "fist": "✊", + "hand": "✋", + "raised_hand_with_fingers_splayed": "🖐", + "raised_hand_with_part_between_middle_and_ring_fingers": "🖖", + "blue_car": "🚙", + "apple": "🍎", + "relieved": "😌", + "reversed_hand_with_middle_finger_extended": "🖕", + "mag_right": "🔎", + "arrow_right_hook": "↪", + "sweet_potato": "🍠", + "robot": "🤖", + "rolled__up_newspaper": "🗞", + "rowboat": "🚣", + "runner": "🏃", + "running": "🏃", + "running_shirt_with_sash": "🎽", + "boat": "⛵", + "scales": "⚖", + "school_satchel": "🎒", + "scorpius": "♏", + "see_no_evil": "🙈", + "sheep": "🐑", + "stars": "🌠", + "cake": "🍰", + "six_pointed_star": "🔯", + "ski": "🎿", + "sleeping_accommodation": "🛌", + "sleeping": "😴", + "sleepy": "😪", + "sleuth_or_spy": "🕵", + "heart_eyes_cat": "😻", + "smiley_cat": "😺", + "innocent": "😇", + "heart_eyes": "😍", + "smiling_imp": "😈", + "smiley": "😃", + "sweat_smile": "😅", + "smile": "😄", + "laughing": "😆", + "satisfied": "😆", + "blush": "😊", + "smirk": "😏", + "smoking": "🚬", + "snow_capped_mountain": "🏔", + "soccer": "⚽", + "icecream": "🍦", + "soon": "🔜", + "arrow_lower_right": "↘", + "arrow_lower_left": "↙", + "speak_no_evil": "🙊", + "speaker": "🔈", + "mute": "🔇", + "sound": "🔉", + "loud_sound": "🔊", + "speaking_head_in_silhouette": "🗣", + "spiral_calendar_pad": "🗓", + "spiral_note_pad": "🗒", + "shell": "🐚", + "sweat_drops": "💦", + "u5272": "🈹", + "u5408": "🈴", + "u55b6": "🈺", + "u6307": "🈯", + "u6708": "🈷", + "u6709": "🈶", + "u6e80": "🈵", + "u7121": "🈚", + "u7533": "🈸", + "u7981": "🈲", + "u7a7a": "🈳", + "cl": "🆑", + "cool": "🆒", + "free": "🆓", + "id": "🆔", + "koko": "🈁", + "sa": "🈂", + "new": "🆕", + "ng": "🆖", + "ok": "🆗", + "sos": "🆘", + "up": "🆙", + "vs": "🆚", + "steam_locomotive": "🚂", + "ramen": "🍜", + "partly_sunny": "⛅", + "city_sunrise": "🌇", + "surfer": "🏄", + "swimmer": "🏊", + "shirt": "👕", + "tshirt": "👕", + "table_tennis_paddle_and_ball": "🏓", + "tea": "🍵", + "tv": "📺", + "three_button_mouse": "🖱", + "+1": "👍", + "thumbsup": "👍", + "__1": "👎", + "-1": "👎", + "thumbsdown": "👎", + "thunder_cloud_and_rain": "⛈", + "tiger2": "🐅", + "tophat": "🎩", + "top": "🔝", + "tm": "™", + "train2": "🚆", + "triangular_flag_on_post": "🚩", + "trident": "🔱", + "twisted_rightwards_arrows": "🔀", + "unamused": "😒", + "small_red_triangle": "🔺", + "arrow_up_small": "🔼", + "arrow_up_down": "↕", + "upside__down_face": "🙃", + "arrow_up": "⬆", + "v": "✌", + "vhs": "📼", + "wc": "🚾", + "ocean": "🌊", + "waving_black_flag": "🏴", + "wave": "👋", + "waving_white_flag": "🏳", + "moon": "🌔", + "scream_cat": "🙀", + "weary": "😩", + "weight_lifter": "🏋", + "whale2": "🐋", + "wheelchair": "♿", + "point_down": "👇", + "grey_exclamation": "❕", + "white_frowning_face": "☹", + "white_check_mark": "✅", + "point_left": "👈", + "white_medium_small_square": "◽", + "star": "⭐", + "grey_question": "❔", + "point_right": "👉", + "relaxed": "☺", + "white_sun_behind_cloud": "🌥", + "white_sun_behind_cloud_with_rain": "🌦", + "white_sun_with_small_cloud": "🌤", + "point_up_2": "👆", + "point_up": "☝", + "wind_blowing_face": "🌬", + "wink": "😉", + "wolf": "🐺", + "dancers": "👯", + "boot": "👢", + "womans_clothes": "👚", + "womans_hat": "👒", + "sandal": "👡", + "womens": "🚺", + "worried": "😟", + "gift": "🎁", + "zipper__mouth_face": "🤐", + "regional_indicator_a": "🇦", + "regional_indicator_b": "🇧", + "regional_indicator_c": "🇨", + "regional_indicator_d": "🇩", + "regional_indicator_e": "🇪", + "regional_indicator_f": "🇫", + "regional_indicator_g": "🇬", + "regional_indicator_h": "🇭", + "regional_indicator_i": "🇮", + "regional_indicator_j": "🇯", + "regional_indicator_k": "🇰", + "regional_indicator_l": "🇱", + "regional_indicator_m": "🇲", + "regional_indicator_n": "🇳", + "regional_indicator_o": "🇴", + "regional_indicator_p": "🇵", + "regional_indicator_q": "🇶", + "regional_indicator_r": "🇷", + "regional_indicator_s": "🇸", + "regional_indicator_t": "🇹", + "regional_indicator_u": "🇺", + "regional_indicator_v": "🇻", + "regional_indicator_w": "🇼", + "regional_indicator_x": "🇽", + "regional_indicator_y": "🇾", + "regional_indicator_z": "🇿", +} diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/_emoji_replace.py b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/_emoji_replace.py new file mode 100644 index 0000000..bb2cafa --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/_emoji_replace.py @@ -0,0 +1,32 @@ +from typing import Callable, Match, Optional +import re + +from ._emoji_codes import EMOJI + + +_ReStringMatch = Match[str] # regex match object +_ReSubCallable = Callable[[_ReStringMatch], str] # Callable invoked by re.sub +_EmojiSubMethod = Callable[[_ReSubCallable, str], str] # Sub method of a compiled re + + +def _emoji_replace( + text: str, + default_variant: Optional[str] = None, + _emoji_sub: _EmojiSubMethod = re.compile(r"(:(\S*?)(?:(?:\-)(emoji|text))?:)").sub, +) -> str: + """Replace emoji code in text.""" + get_emoji = EMOJI.__getitem__ + variants = {"text": "\uFE0E", "emoji": "\uFE0F"} + get_variant = variants.get + default_variant_code = variants.get(default_variant, "") if default_variant else "" + + def do_replace(match: Match[str]) -> str: + emoji_code, emoji_name, variant = match.groups() + try: + return get_emoji(emoji_name.lower()) + get_variant( + variant, default_variant_code + ) + except KeyError: + return emoji_code + + return _emoji_sub(do_replace, text) diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/_export_format.py b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/_export_format.py new file mode 100644 index 0000000..e7527e5 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/_export_format.py @@ -0,0 +1,76 @@ +CONSOLE_HTML_FORMAT = """\ + + + + + + + +
{code}
+ + +""" + +CONSOLE_SVG_FORMAT = """\ + + + + + + + + + {lines} + + + {chrome} + + {backgrounds} + + {matrix} + + + +""" + +_SVG_FONT_FAMILY = "Rich Fira Code" +_SVG_CLASSES_PREFIX = "rich-svg" diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/_extension.py b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/_extension.py new file mode 100644 index 0000000..cbd6da9 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/_extension.py @@ -0,0 +1,10 @@ +from typing import Any + + +def load_ipython_extension(ip: Any) -> None: # pragma: no cover + # prevent circular import + from pip._vendor.rich.pretty import install + from pip._vendor.rich.traceback import install as tr_install + + install() + tr_install() diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/_fileno.py b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/_fileno.py new file mode 100644 index 0000000..b17ee65 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/_fileno.py @@ -0,0 +1,24 @@ +from __future__ import annotations + +from typing import IO, Callable + + +def get_fileno(file_like: IO[str]) -> int | None: + """Get fileno() from a file, accounting for poorly implemented file-like objects. + + Args: + file_like (IO): A file-like object. + + Returns: + int | None: The result of fileno if available, or None if operation failed. + """ + fileno: Callable[[], int] | None = getattr(file_like, "fileno", None) + if fileno is not None: + try: + return fileno() + except Exception: + # `fileno` is documented as potentially raising a OSError + # Alas, from the issues, there are so many poorly implemented file-like objects, + # that `fileno()` can raise just about anything. + return None + return None diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/_inspect.py b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/_inspect.py new file mode 100644 index 0000000..27d65ce --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/_inspect.py @@ -0,0 +1,268 @@ +import inspect +from inspect import cleandoc, getdoc, getfile, isclass, ismodule, signature +from typing import Any, Collection, Iterable, Optional, Tuple, Type, Union + +from .console import Group, RenderableType +from .control import escape_control_codes +from .highlighter import ReprHighlighter +from .jupyter import JupyterMixin +from .panel import Panel +from .pretty import Pretty +from .table import Table +from .text import Text, TextType + + +def _first_paragraph(doc: str) -> str: + """Get the first paragraph from a docstring.""" + paragraph, _, _ = doc.partition("\n\n") + return paragraph + + +class Inspect(JupyterMixin): + """A renderable to inspect any Python Object. + + Args: + obj (Any): An object to inspect. + title (str, optional): Title to display over inspect result, or None use type. Defaults to None. + help (bool, optional): Show full help text rather than just first paragraph. Defaults to False. + methods (bool, optional): Enable inspection of callables. Defaults to False. + docs (bool, optional): Also render doc strings. Defaults to True. + private (bool, optional): Show private attributes (beginning with underscore). Defaults to False. + dunder (bool, optional): Show attributes starting with double underscore. Defaults to False. + sort (bool, optional): Sort attributes alphabetically. Defaults to True. + all (bool, optional): Show all attributes. Defaults to False. + value (bool, optional): Pretty print value of object. Defaults to True. + """ + + def __init__( + self, + obj: Any, + *, + title: Optional[TextType] = None, + help: bool = False, + methods: bool = False, + docs: bool = True, + private: bool = False, + dunder: bool = False, + sort: bool = True, + all: bool = True, + value: bool = True, + ) -> None: + self.highlighter = ReprHighlighter() + self.obj = obj + self.title = title or self._make_title(obj) + if all: + methods = private = dunder = True + self.help = help + self.methods = methods + self.docs = docs or help + self.private = private or dunder + self.dunder = dunder + self.sort = sort + self.value = value + + def _make_title(self, obj: Any) -> Text: + """Make a default title.""" + title_str = ( + str(obj) + if (isclass(obj) or callable(obj) or ismodule(obj)) + else str(type(obj)) + ) + title_text = self.highlighter(title_str) + return title_text + + def __rich__(self) -> Panel: + return Panel.fit( + Group(*self._render()), + title=self.title, + border_style="scope.border", + padding=(0, 1), + ) + + def _get_signature(self, name: str, obj: Any) -> Optional[Text]: + """Get a signature for a callable.""" + try: + _signature = str(signature(obj)) + ":" + except ValueError: + _signature = "(...)" + except TypeError: + return None + + source_filename: Optional[str] = None + try: + source_filename = getfile(obj) + except (OSError, TypeError): + # OSError is raised if obj has no source file, e.g. when defined in REPL. + pass + + callable_name = Text(name, style="inspect.callable") + if source_filename: + callable_name.stylize(f"link file://{source_filename}") + signature_text = self.highlighter(_signature) + + qualname = name or getattr(obj, "__qualname__", name) + + # If obj is a module, there may be classes (which are callable) to display + if inspect.isclass(obj): + prefix = "class" + elif inspect.iscoroutinefunction(obj): + prefix = "async def" + else: + prefix = "def" + + qual_signature = Text.assemble( + (f"{prefix} ", f"inspect.{prefix.replace(' ', '_')}"), + (qualname, "inspect.callable"), + signature_text, + ) + + return qual_signature + + def _render(self) -> Iterable[RenderableType]: + """Render object.""" + + def sort_items(item: Tuple[str, Any]) -> Tuple[bool, str]: + key, (_error, value) = item + return (callable(value), key.strip("_").lower()) + + def safe_getattr(attr_name: str) -> Tuple[Any, Any]: + """Get attribute or any exception.""" + try: + return (None, getattr(obj, attr_name)) + except Exception as error: + return (error, None) + + obj = self.obj + keys = dir(obj) + total_items = len(keys) + if not self.dunder: + keys = [key for key in keys if not key.startswith("__")] + if not self.private: + keys = [key for key in keys if not key.startswith("_")] + not_shown_count = total_items - len(keys) + items = [(key, safe_getattr(key)) for key in keys] + if self.sort: + items.sort(key=sort_items) + + items_table = Table.grid(padding=(0, 1), expand=False) + items_table.add_column(justify="right") + add_row = items_table.add_row + highlighter = self.highlighter + + if callable(obj): + signature = self._get_signature("", obj) + if signature is not None: + yield signature + yield "" + + if self.docs: + _doc = self._get_formatted_doc(obj) + if _doc is not None: + doc_text = Text(_doc, style="inspect.help") + doc_text = highlighter(doc_text) + yield doc_text + yield "" + + if self.value and not (isclass(obj) or callable(obj) or ismodule(obj)): + yield Panel( + Pretty(obj, indent_guides=True, max_length=10, max_string=60), + border_style="inspect.value.border", + ) + yield "" + + for key, (error, value) in items: + key_text = Text.assemble( + ( + key, + "inspect.attr.dunder" if key.startswith("__") else "inspect.attr", + ), + (" =", "inspect.equals"), + ) + if error is not None: + warning = key_text.copy() + warning.stylize("inspect.error") + add_row(warning, highlighter(repr(error))) + continue + + if callable(value): + if not self.methods: + continue + + _signature_text = self._get_signature(key, value) + if _signature_text is None: + add_row(key_text, Pretty(value, highlighter=highlighter)) + else: + if self.docs: + docs = self._get_formatted_doc(value) + if docs is not None: + _signature_text.append("\n" if "\n" in docs else " ") + doc = highlighter(docs) + doc.stylize("inspect.doc") + _signature_text.append(doc) + + add_row(key_text, _signature_text) + else: + add_row(key_text, Pretty(value, highlighter=highlighter)) + if items_table.row_count: + yield items_table + elif not_shown_count: + yield Text.from_markup( + f"[b cyan]{not_shown_count}[/][i] attribute(s) not shown.[/i] " + f"Run [b][magenta]inspect[/]([not b]inspect[/])[/b] for options." + ) + + def _get_formatted_doc(self, object_: Any) -> Optional[str]: + """ + Extract the docstring of an object, process it and returns it. + The processing consists in cleaning up the docstring's indentation, + taking only its 1st paragraph if `self.help` is not True, + and escape its control codes. + + Args: + object_ (Any): the object to get the docstring from. + + Returns: + Optional[str]: the processed docstring, or None if no docstring was found. + """ + docs = getdoc(object_) + if docs is None: + return None + docs = cleandoc(docs).strip() + if not self.help: + docs = _first_paragraph(docs) + return escape_control_codes(docs) + + +def get_object_types_mro(obj: Union[object, Type[Any]]) -> Tuple[type, ...]: + """Returns the MRO of an object's class, or of the object itself if it's a class.""" + if not hasattr(obj, "__mro__"): + # N.B. we cannot use `if type(obj) is type` here because it doesn't work with + # some types of classes, such as the ones that use abc.ABCMeta. + obj = type(obj) + return getattr(obj, "__mro__", ()) + + +def get_object_types_mro_as_strings(obj: object) -> Collection[str]: + """ + Returns the MRO of an object's class as full qualified names, or of the object itself if it's a class. + + Examples: + `object_types_mro_as_strings(JSONDecoder)` will return `['json.decoder.JSONDecoder', 'builtins.object']` + """ + return [ + f'{getattr(type_, "__module__", "")}.{getattr(type_, "__qualname__", "")}' + for type_ in get_object_types_mro(obj) + ] + + +def is_object_one_of_types( + obj: object, fully_qualified_types_names: Collection[str] +) -> bool: + """ + Returns `True` if the given object's class (or the object itself, if it's a class) has one of the + fully qualified names in its MRO. + """ + for type_name in get_object_types_mro_as_strings(obj): + if type_name in fully_qualified_types_names: + return True + return False diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/_log_render.py b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/_log_render.py new file mode 100644 index 0000000..fc16c84 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/_log_render.py @@ -0,0 +1,94 @@ +from datetime import datetime +from typing import Iterable, List, Optional, TYPE_CHECKING, Union, Callable + + +from .text import Text, TextType + +if TYPE_CHECKING: + from .console import Console, ConsoleRenderable, RenderableType + from .table import Table + +FormatTimeCallable = Callable[[datetime], Text] + + +class LogRender: + def __init__( + self, + show_time: bool = True, + show_level: bool = False, + show_path: bool = True, + time_format: Union[str, FormatTimeCallable] = "[%x %X]", + omit_repeated_times: bool = True, + level_width: Optional[int] = 8, + ) -> None: + self.show_time = show_time + self.show_level = show_level + self.show_path = show_path + self.time_format = time_format + self.omit_repeated_times = omit_repeated_times + self.level_width = level_width + self._last_time: Optional[Text] = None + + def __call__( + self, + console: "Console", + renderables: Iterable["ConsoleRenderable"], + log_time: Optional[datetime] = None, + time_format: Optional[Union[str, FormatTimeCallable]] = None, + level: TextType = "", + path: Optional[str] = None, + line_no: Optional[int] = None, + link_path: Optional[str] = None, + ) -> "Table": + from .containers import Renderables + from .table import Table + + output = Table.grid(padding=(0, 1)) + output.expand = True + if self.show_time: + output.add_column(style="log.time") + if self.show_level: + output.add_column(style="log.level", width=self.level_width) + output.add_column(ratio=1, style="log.message", overflow="fold") + if self.show_path and path: + output.add_column(style="log.path") + row: List["RenderableType"] = [] + if self.show_time: + log_time = log_time or console.get_datetime() + time_format = time_format or self.time_format + if callable(time_format): + log_time_display = time_format(log_time) + else: + log_time_display = Text(log_time.strftime(time_format)) + if log_time_display == self._last_time and self.omit_repeated_times: + row.append(Text(" " * len(log_time_display))) + else: + row.append(log_time_display) + self._last_time = log_time_display + if self.show_level: + row.append(level) + + row.append(Renderables(renderables)) + if self.show_path and path: + path_text = Text() + path_text.append( + path, style=f"link file://{link_path}" if link_path else "" + ) + if line_no: + path_text.append(":") + path_text.append( + f"{line_no}", + style=f"link file://{link_path}#{line_no}" if link_path else "", + ) + row.append(path_text) + + output.add_row(*row) + return output + + +if __name__ == "__main__": # pragma: no cover + from pip._vendor.rich.console import Console + + c = Console() + c.print("[on blue]Hello", justify="right") + c.log("[on blue]hello", justify="right") diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/_loop.py b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/_loop.py new file mode 100644 index 0000000..01c6caf --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/_loop.py @@ -0,0 +1,43 @@ +from typing import Iterable, Tuple, TypeVar + +T = TypeVar("T") + + +def loop_first(values: Iterable[T]) -> Iterable[Tuple[bool, T]]: + """Iterate and generate a tuple with a flag for first value.""" + iter_values = iter(values) + try: + value = next(iter_values) + except StopIteration: + return + yield True, value + for value in iter_values: + yield False, value + + +def loop_last(values: Iterable[T]) -> Iterable[Tuple[bool, T]]: + """Iterate and generate a tuple with a flag for last value.""" + iter_values = iter(values) + try: + previous_value = next(iter_values) + except StopIteration: + return + for value in iter_values: + yield False, previous_value + previous_value = value + yield True, previous_value + + +def loop_first_last(values: Iterable[T]) -> Iterable[Tuple[bool, bool, T]]: + """Iterate and generate a tuple with a flag for first and last value.""" + iter_values = iter(values) + try: + previous_value = next(iter_values) + except StopIteration: + return + first = True + for value in iter_values: + yield first, False, previous_value + first = False + previous_value = value + yield first, True, previous_value diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/_null_file.py b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/_null_file.py new file mode 100644 index 0000000..6ae05d3 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/_null_file.py @@ -0,0 +1,69 @@ +from types import TracebackType +from typing import IO, Iterable, Iterator, List, Optional, Type + + +class NullFile(IO[str]): + def close(self) -> None: + pass + + def isatty(self) -> bool: + return False + + def read(self, __n: int = 1) -> str: + return "" + + def readable(self) -> bool: + return False + + def readline(self, __limit: int = 1) -> str: + return "" + + def readlines(self, __hint: int = 1) -> List[str]: + return [] + + def seek(self, __offset: int, __whence: int = 1) -> int: + return 0 + + def seekable(self) -> bool: + return False + + def tell(self) -> int: + return 0 + + def truncate(self, __size: Optional[int] = 1) -> int: + return 0 + + def writable(self) -> bool: + return False + + def writelines(self, __lines: Iterable[str]) -> None: + pass + + def __next__(self) -> str: + return "" + + def __iter__(self) -> Iterator[str]: + return iter([""]) + + def __enter__(self) -> IO[str]: + return self + + def __exit__( + self, + __t: Optional[Type[BaseException]], + __value: Optional[BaseException], + __traceback: Optional[TracebackType], + ) -> None: + pass + + def write(self, text: str) -> int: + return 0 + + def flush(self) -> None: + pass + + def fileno(self) -> int: + return -1 + + +NULL_FILE = NullFile() diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/_palettes.py b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/_palettes.py new file mode 100644 index 0000000..3c748d3 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/_palettes.py @@ -0,0 +1,309 @@ +from .palette import Palette + + +# Taken from https://en.wikipedia.org/wiki/ANSI_escape_code (Windows 10 column) +WINDOWS_PALETTE = Palette( + [ + (12, 12, 12), + (197, 15, 31), + (19, 161, 14), + (193, 156, 0), + (0, 55, 218), + (136, 23, 152), + (58, 150, 221), + (204, 204, 204), + (118, 118, 118), + (231, 72, 86), + (22, 198, 12), + (249, 241, 165), + (59, 120, 255), + (180, 0, 158), + (97, 214, 214), + (242, 242, 242), + ] +) + +# # The standard ansi colors (including bright variants) +STANDARD_PALETTE = Palette( + [ + (0, 0, 0), + (170, 0, 0), + (0, 170, 0), + (170, 85, 0), + (0, 0, 170), + (170, 0, 170), + (0, 170, 170), + (170, 170, 170), + (85, 85, 85), + (255, 85, 85), + (85, 255, 85), + (255, 255, 85), + (85, 85, 255), + (255, 85, 255), + (85, 255, 255), + (255, 255, 255), + ] +) + + +# The 256 color palette +EIGHT_BIT_PALETTE = Palette( + [ + (0, 0, 0), + (128, 0, 0), + (0, 128, 0), + (128, 128, 0), + (0, 0, 128), + (128, 0, 128), + (0, 128, 128), + (192, 192, 192), + (128, 128, 128), + (255, 0, 0), + (0, 255, 0), + (255, 255, 0), + (0, 0, 255), + (255, 0, 255), + (0, 255, 255), + (255, 255, 255), + (0, 0, 0), + (0, 0, 95), + (0, 0, 135), + (0, 0, 175), + (0, 0, 215), + (0, 0, 255), + (0, 95, 0), + (0, 95, 95), + (0, 95, 135), + (0, 95, 175), + (0, 95, 215), + (0, 95, 255), + (0, 135, 0), + (0, 135, 95), + (0, 135, 135), + (0, 135, 175), + (0, 135, 215), + (0, 135, 255), + (0, 175, 0), + (0, 175, 95), + (0, 175, 135), + (0, 175, 175), + (0, 175, 215), + (0, 175, 255), + (0, 215, 0), + (0, 215, 95), + (0, 215, 135), + (0, 215, 175), + (0, 215, 215), + (0, 215, 255), + (0, 255, 0), + (0, 255, 95), + (0, 255, 135), + (0, 255, 175), + (0, 255, 215), + (0, 255, 255), + (95, 0, 0), + (95, 0, 95), + (95, 0, 135), + (95, 0, 175), + (95, 0, 215), + (95, 0, 255), + (95, 95, 0), + (95, 95, 95), + (95, 95, 135), + (95, 95, 175), + (95, 95, 215), + (95, 95, 255), + (95, 135, 0), + (95, 135, 95), + (95, 135, 135), + (95, 135, 175), + (95, 135, 215), + (95, 135, 255), + (95, 175, 0), + (95, 175, 95), + (95, 175, 135), + (95, 175, 175), + (95, 175, 215), + (95, 175, 255), + (95, 215, 0), + (95, 215, 95), + (95, 215, 135), + (95, 215, 175), + (95, 215, 215), + (95, 215, 255), + (95, 255, 0), + (95, 255, 95), + (95, 255, 135), + (95, 255, 175), + (95, 255, 215), + (95, 255, 255), + (135, 0, 0), + (135, 0, 95), + (135, 0, 135), + (135, 0, 175), + (135, 0, 215), + (135, 0, 255), + (135, 95, 0), + (135, 95, 95), + (135, 95, 135), + (135, 95, 175), + (135, 95, 215), + (135, 95, 255), + (135, 135, 0), + (135, 135, 95), + (135, 135, 135), + (135, 135, 175), + (135, 135, 215), + (135, 135, 255), + (135, 175, 0), + (135, 175, 95), + (135, 175, 135), + (135, 175, 175), + (135, 175, 215), + (135, 175, 255), + (135, 215, 0), + (135, 215, 95), + (135, 215, 135), + (135, 215, 175), + (135, 215, 215), + (135, 215, 255), + (135, 255, 0), + (135, 255, 95), + (135, 255, 135), + (135, 255, 175), + (135, 255, 215), + (135, 255, 255), + (175, 0, 0), + (175, 0, 95), + (175, 0, 135), + (175, 0, 175), + (175, 0, 215), + (175, 0, 255), + (175, 95, 0), + (175, 95, 95), + (175, 95, 135), + (175, 95, 175), + (175, 95, 215), + (175, 95, 255), + (175, 135, 0), + (175, 135, 95), + (175, 135, 135), + (175, 135, 175), + (175, 135, 215), + (175, 135, 255), + (175, 175, 0), + (175, 175, 95), + (175, 175, 135), + (175, 175, 175), + (175, 175, 215), + (175, 175, 255), + (175, 215, 0), + (175, 215, 95), + (175, 215, 135), + (175, 215, 175), + (175, 215, 215), + (175, 215, 255), + (175, 255, 0), + (175, 255, 95), + (175, 255, 135), + (175, 255, 175), + (175, 255, 215), + (175, 255, 255), + (215, 0, 0), + (215, 0, 95), + (215, 0, 135), + (215, 0, 175), + (215, 0, 215), + (215, 0, 255), + (215, 95, 0), + (215, 95, 95), + (215, 95, 135), + (215, 95, 175), + (215, 95, 215), + (215, 95, 255), + (215, 135, 0), + (215, 135, 95), + (215, 135, 135), + (215, 135, 175), + (215, 135, 215), + (215, 135, 255), + (215, 175, 0), + (215, 175, 95), + (215, 175, 135), + (215, 175, 175), + (215, 175, 215), + (215, 175, 255), + (215, 215, 0), + (215, 215, 95), + (215, 215, 135), + (215, 215, 175), + (215, 215, 215), + (215, 215, 255), + (215, 255, 0), + (215, 255, 95), + (215, 255, 135), + (215, 255, 175), + (215, 255, 215), + (215, 255, 255), + (255, 0, 0), + (255, 0, 95), + (255, 0, 135), + (255, 0, 175), + (255, 0, 215), + (255, 0, 255), + (255, 95, 0), + (255, 95, 95), + (255, 95, 135), + (255, 95, 175), + (255, 95, 215), + (255, 95, 255), + (255, 135, 0), + (255, 135, 95), + (255, 135, 135), + (255, 135, 175), + (255, 135, 215), + (255, 135, 255), + (255, 175, 0), + (255, 175, 95), + (255, 175, 135), + (255, 175, 175), + (255, 175, 215), + (255, 175, 255), + (255, 215, 0), + (255, 215, 95), + (255, 215, 135), + (255, 215, 175), + (255, 215, 215), + (255, 215, 255), + (255, 255, 0), + (255, 255, 95), + (255, 255, 135), + (255, 255, 175), + (255, 255, 215), + (255, 255, 255), + (8, 8, 8), + (18, 18, 18), + (28, 28, 28), + (38, 38, 38), + (48, 48, 48), + (58, 58, 58), + (68, 68, 68), + (78, 78, 78), + (88, 88, 88), + (98, 98, 98), + (108, 108, 108), + (118, 118, 118), + (128, 128, 128), + (138, 138, 138), + (148, 148, 148), + (158, 158, 158), + (168, 168, 168), + (178, 178, 178), + (188, 188, 188), + (198, 198, 198), + (208, 208, 208), + (218, 218, 218), + (228, 228, 228), + (238, 238, 238), + ] +) diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/_pick.py b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/_pick.py new file mode 100644 index 0000000..4f6d8b2 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/_pick.py @@ -0,0 +1,17 @@ +from typing import Optional + + +def pick_bool(*values: Optional[bool]) -> bool: + """Pick the first non-none bool or return the last value. + + Args: + *values (bool): Any number of boolean or None values. + + Returns: + bool: First non-none boolean. + """ + assert values, "1 or more values required" + for value in values: + if value is not None: + return value + return bool(value) diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/_ratio.py b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/_ratio.py new file mode 100644 index 0000000..5fd5a38 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/_ratio.py @@ -0,0 +1,153 @@ +from fractions import Fraction +from math import ceil +from typing import cast, List, Optional, Sequence, Protocol + + +class Edge(Protocol): + """Any object that defines an edge (such as Layout).""" + + size: Optional[int] = None + ratio: int = 1 + minimum_size: int = 1 + + +def ratio_resolve(total: int, edges: Sequence[Edge]) -> List[int]: + """Divide total space to satisfy size, ratio, and minimum_size, constraints. + + The returned list of integers should add up to total in most cases, unless it is + impossible to satisfy all the constraints. For instance, if there are two edges + with a minimum size of 20 each and `total` is 30 then the returned list will be + greater than total. In practice, this would mean that a Layout object would + clip the rows that would overflow the screen height. + + Args: + total (int): Total number of characters. + edges (List[Edge]): Edges within total space. + + Returns: + List[int]: Number of characters for each edge. + """ + # Size of edge or None for yet to be determined + sizes = [(edge.size or None) for edge in edges] + + _Fraction = Fraction + + # While any edges haven't been calculated + while None in sizes: + # Get flexible edges and index to map these back on to sizes list + flexible_edges = [ + (index, edge) + for index, (size, edge) in enumerate(zip(sizes, edges)) + if size is None + ] + # Remaining space in total + remaining = total - sum(size or 0 for size in sizes) + if remaining <= 0: + # No room for flexible edges + return [ + ((edge.minimum_size or 1) if size is None else size) + for size, edge in zip(sizes, edges) + ] + # Calculate number of characters in a ratio portion + portion = _Fraction( + remaining, sum((edge.ratio or 1) for _, edge in flexible_edges) + ) + + # If any edges will be less than their minimum, replace size with the minimum + for index, edge in flexible_edges: + if portion * edge.ratio <= edge.minimum_size: + sizes[index] = edge.minimum_size + # New fixed size will invalidate calculations, so we need to repeat the process + break + else: + # Distribute flexible space and compensate for rounding error + # Since edge sizes can only be integers we need to add the remainder + # to the following line + remainder = _Fraction(0) + for index, edge in flexible_edges: + size, remainder = divmod(portion * edge.ratio + remainder, 1) + sizes[index] = size + break + # Sizes now contains integers only + return cast(List[int], sizes) + + +def ratio_reduce( + total: int, ratios: List[int], maximums: List[int], values: List[int] +) -> List[int]: + """Divide an integer total in to parts based on ratios. + + Args: + total (int): The total to divide. + ratios (List[int]): A list of integer ratios. + maximums (List[int]): List of maximums values for each slot. + values (List[int]): List of values + + Returns: + List[int]: A list of integers guaranteed to sum to total. + """ + ratios = [ratio if _max else 0 for ratio, _max in zip(ratios, maximums)] + total_ratio = sum(ratios) + if not total_ratio: + return values[:] + total_remaining = total + result: List[int] = [] + append = result.append + for ratio, maximum, value in zip(ratios, maximums, values): + if ratio and total_ratio > 0: + distributed = min(maximum, round(ratio * total_remaining / total_ratio)) + append(value - distributed) + total_remaining -= distributed + total_ratio -= ratio + else: + append(value) + return result + + +def ratio_distribute( + total: int, ratios: List[int], minimums: Optional[List[int]] = None +) -> List[int]: + """Distribute an integer total in to parts based on ratios. + + Args: + total (int): The total to divide. + ratios (List[int]): A list of integer ratios. + minimums (List[int]): List of minimum values for each slot. + + Returns: + List[int]: A list of integers guaranteed to sum to total. + """ + if minimums: + ratios = [ratio if _min else 0 for ratio, _min in zip(ratios, minimums)] + total_ratio = sum(ratios) + assert total_ratio > 0, "Sum of ratios must be > 0" + + total_remaining = total + distributed_total: List[int] = [] + append = distributed_total.append + if minimums is None: + _minimums = [0] * len(ratios) + else: + _minimums = minimums + for ratio, minimum in zip(ratios, _minimums): + if total_ratio > 0: + distributed = max(minimum, ceil(ratio * total_remaining / total_ratio)) + else: + distributed = total_remaining + append(distributed) + total_ratio -= ratio + total_remaining -= distributed + return distributed_total + + +if __name__ == "__main__": + from dataclasses import dataclass + + @dataclass + class E: + size: Optional[int] = None + ratio: int = 1 + minimum_size: int = 1 + + resolved = ratio_resolve(110, [E(None, 1, 1), E(None, 1, 1), E(None, 1, 1)]) + print(sum(resolved)) diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/_spinners.py b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/_spinners.py new file mode 100644 index 0000000..d0bb1fe --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/_spinners.py @@ -0,0 +1,482 @@ +""" +Spinners are from: +* cli-spinners: + MIT License + Copyright (c) Sindre Sorhus (sindresorhus.com) + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights to + use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + the Software, and to permit persons to whom the Software is furnished to do so, + subject to the following conditions: + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, + INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR + PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE + FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + IN THE SOFTWARE. +""" + +SPINNERS = { + "dots": { + "interval": 80, + "frames": "⠋⠙⠹⠸⠼⠴⠦⠧⠇⠏", + }, + "dots2": {"interval": 80, "frames": "⣾⣽⣻⢿⡿⣟⣯⣷"}, + "dots3": { + "interval": 80, + "frames": "⠋⠙⠚⠞⠖⠦⠴⠲⠳⠓", + }, + "dots4": { + "interval": 80, + "frames": "⠄⠆⠇⠋⠙⠸⠰⠠⠰⠸⠙⠋⠇⠆", + }, + "dots5": { + "interval": 80, + "frames": "⠋⠙⠚⠒⠂⠂⠒⠲⠴⠦⠖⠒⠐⠐⠒⠓⠋", + }, + "dots6": { + "interval": 80, + "frames": "⠁⠉⠙⠚⠒⠂⠂⠒⠲⠴⠤⠄⠄⠤⠴⠲⠒⠂⠂⠒⠚⠙⠉⠁", + }, + "dots7": { + "interval": 80, + "frames": "⠈⠉⠋⠓⠒⠐⠐⠒⠖⠦⠤⠠⠠⠤⠦⠖⠒⠐⠐⠒⠓⠋⠉⠈", + }, + "dots8": { + "interval": 80, + "frames": "⠁⠁⠉⠙⠚⠒⠂⠂⠒⠲⠴⠤⠄⠄⠤⠠⠠⠤⠦⠖⠒⠐⠐⠒⠓⠋⠉⠈⠈", + }, + "dots9": {"interval": 80, "frames": "⢹⢺⢼⣸⣇⡧⡗⡏"}, + "dots10": {"interval": 80, "frames": "⢄⢂⢁⡁⡈⡐⡠"}, + "dots11": {"interval": 100, "frames": "⠁⠂⠄⡀⢀⠠⠐⠈"}, + "dots12": { + "interval": 80, + "frames": [ + "⢀⠀", + "⡀⠀", + "⠄⠀", + "⢂⠀", + "⡂⠀", + "⠅⠀", + "⢃⠀", + "⡃⠀", + "⠍⠀", + "⢋⠀", + "⡋⠀", + "⠍⠁", + "⢋⠁", + "⡋⠁", + "⠍⠉", + "⠋⠉", + "⠋⠉", + "⠉⠙", + "⠉⠙", + "⠉⠩", + "⠈⢙", + "⠈⡙", + "⢈⠩", + "⡀⢙", + "⠄⡙", + "⢂⠩", + "⡂⢘", + "⠅⡘", + "⢃⠨", + "⡃⢐", + "⠍⡐", + "⢋⠠", + "⡋⢀", + "⠍⡁", + "⢋⠁", + "⡋⠁", + "⠍⠉", + "⠋⠉", + "⠋⠉", + "⠉⠙", + "⠉⠙", + "⠉⠩", + "⠈⢙", + "⠈⡙", + "⠈⠩", + "⠀⢙", + "⠀⡙", + "⠀⠩", + "⠀⢘", + "⠀⡘", + "⠀⠨", + "⠀⢐", + "⠀⡐", + "⠀⠠", + "⠀⢀", + "⠀⡀", + ], + }, + "dots8Bit": { + "interval": 80, + "frames": "⠀⠁⠂⠃⠄⠅⠆⠇⡀⡁⡂⡃⡄⡅⡆⡇⠈⠉⠊⠋⠌⠍⠎⠏⡈⡉⡊⡋⡌⡍⡎⡏⠐⠑⠒⠓⠔⠕⠖⠗⡐⡑⡒⡓⡔⡕⡖⡗⠘⠙⠚⠛⠜⠝⠞⠟⡘⡙" + "⡚⡛⡜⡝⡞⡟⠠⠡⠢⠣⠤⠥⠦⠧⡠⡡⡢⡣⡤⡥⡦⡧⠨⠩⠪⠫⠬⠭⠮⠯⡨⡩⡪⡫⡬⡭⡮⡯⠰⠱⠲⠳⠴⠵⠶⠷⡰⡱⡲⡳⡴⡵⡶⡷⠸⠹⠺⠻" + "⠼⠽⠾⠿⡸⡹⡺⡻⡼⡽⡾⡿⢀⢁⢂⢃⢄⢅⢆⢇⣀⣁⣂⣃⣄⣅⣆⣇⢈⢉⢊⢋⢌⢍⢎⢏⣈⣉⣊⣋⣌⣍⣎⣏⢐⢑⢒⢓⢔⢕⢖⢗⣐⣑⣒⣓⣔⣕" + "⣖⣗⢘⢙⢚⢛⢜⢝⢞⢟⣘⣙⣚⣛⣜⣝⣞⣟⢠⢡⢢⢣⢤⢥⢦⢧⣠⣡⣢⣣⣤⣥⣦⣧⢨⢩⢪⢫⢬⢭⢮⢯⣨⣩⣪⣫⣬⣭⣮⣯⢰⢱⢲⢳⢴⢵⢶⢷" + "⣰⣱⣲⣳⣴⣵⣶⣷⢸⢹⢺⢻⢼⢽⢾⢿⣸⣹⣺⣻⣼⣽⣾⣿", + }, + "line": {"interval": 130, "frames": ["-", "\\", "|", "/"]}, + "line2": {"interval": 100, "frames": "⠂-–—–-"}, + "pipe": {"interval": 100, "frames": "┤┘┴└├┌┬┐"}, + "simpleDots": {"interval": 400, "frames": [". ", ".. ", "...", " "]}, + "simpleDotsScrolling": { + "interval": 200, + "frames": [". ", ".. ", "...", " ..", " .", " "], + }, + "star": {"interval": 70, "frames": "✶✸✹✺✹✷"}, + "star2": {"interval": 80, "frames": "+x*"}, + "flip": { + "interval": 70, + "frames": "___-``'´-___", + }, + "hamburger": {"interval": 100, "frames": "☱☲☴"}, + "growVertical": { + "interval": 120, + "frames": "▁▃▄▅▆▇▆▅▄▃", + }, + "growHorizontal": { + "interval": 120, + "frames": "▏▎▍▌▋▊▉▊▋▌▍▎", + }, + "balloon": {"interval": 140, "frames": " .oO@* "}, + "balloon2": {"interval": 120, "frames": ".oO°Oo."}, + "noise": {"interval": 100, "frames": "▓▒░"}, + "bounce": {"interval": 120, "frames": "⠁⠂⠄⠂"}, + "boxBounce": {"interval": 120, "frames": "▖▘▝▗"}, + "boxBounce2": {"interval": 100, "frames": "▌▀▐▄"}, + "triangle": {"interval": 50, "frames": "◢◣◤◥"}, + "arc": {"interval": 100, "frames": "◜◠◝◞◡◟"}, + "circle": {"interval": 120, "frames": "◡⊙◠"}, + "squareCorners": {"interval": 180, "frames": "◰◳◲◱"}, + "circleQuarters": {"interval": 120, "frames": "◴◷◶◵"}, + "circleHalves": {"interval": 50, "frames": "◐◓◑◒"}, + "squish": {"interval": 100, "frames": "╫╪"}, + "toggle": {"interval": 250, "frames": "⊶⊷"}, + "toggle2": {"interval": 80, "frames": "▫▪"}, + "toggle3": {"interval": 120, "frames": "□■"}, + "toggle4": {"interval": 100, "frames": "■□▪▫"}, + "toggle5": {"interval": 100, "frames": "▮▯"}, + "toggle6": {"interval": 300, "frames": "ဝ၀"}, + "toggle7": {"interval": 80, "frames": "⦾⦿"}, + "toggle8": {"interval": 100, "frames": "◍◌"}, + "toggle9": {"interval": 100, "frames": "◉◎"}, + "toggle10": {"interval": 100, "frames": "㊂㊀㊁"}, + "toggle11": {"interval": 50, "frames": "⧇⧆"}, + "toggle12": {"interval": 120, "frames": "☗☖"}, + "toggle13": {"interval": 80, "frames": "=*-"}, + "arrow": {"interval": 100, "frames": "←↖↑↗→↘↓↙"}, + "arrow2": { + "interval": 80, + "frames": ["⬆️ ", "↗️ ", "➡️ ", "↘️ ", "⬇️ ", "↙️ ", "⬅️ ", "↖️ "], + }, + "arrow3": { + "interval": 120, + "frames": ["▹▹▹▹▹", "▸▹▹▹▹", "▹▸▹▹▹", "▹▹▸▹▹", "▹▹▹▸▹", "▹▹▹▹▸"], + }, + "bouncingBar": { + "interval": 80, + "frames": [ + "[ ]", + "[= ]", + "[== ]", + "[=== ]", + "[ ===]", + "[ ==]", + "[ =]", + "[ ]", + "[ =]", + "[ ==]", + "[ ===]", + "[====]", + "[=== ]", + "[== ]", + "[= ]", + ], + }, + "bouncingBall": { + "interval": 80, + "frames": [ + "( ● )", + "( ● )", + "( ● )", + "( ● )", + "( ●)", + "( ● )", + "( ● )", + "( ● )", + "( ● )", + "(● )", + ], + }, + "smiley": {"interval": 200, "frames": ["😄 ", "😝 "]}, + "monkey": {"interval": 300, "frames": ["🙈 ", "🙈 ", "🙉 ", "🙊 "]}, + "hearts": {"interval": 100, "frames": ["💛 ", "💙 ", "💜 ", "💚 ", "❤️ "]}, + "clock": { + "interval": 100, + "frames": [ + "🕛 ", + "🕐 ", + "🕑 ", + "🕒 ", + "🕓 ", + "🕔 ", + "🕕 ", + "🕖 ", + "🕗 ", + "🕘 ", + "🕙 ", + "🕚 ", + ], + }, + "earth": {"interval": 180, "frames": ["🌍 ", "🌎 ", "🌏 "]}, + "material": { + "interval": 17, + "frames": [ + "█▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁", + "██▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁", + "███▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁", + "████▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁", + "██████▁▁▁▁▁▁▁▁▁▁▁▁▁▁", + "██████▁▁▁▁▁▁▁▁▁▁▁▁▁▁", + "███████▁▁▁▁▁▁▁▁▁▁▁▁▁", + "████████▁▁▁▁▁▁▁▁▁▁▁▁", + "█████████▁▁▁▁▁▁▁▁▁▁▁", + "█████████▁▁▁▁▁▁▁▁▁▁▁", + "██████████▁▁▁▁▁▁▁▁▁▁", + "███████████▁▁▁▁▁▁▁▁▁", + "█████████████▁▁▁▁▁▁▁", + "██████████████▁▁▁▁▁▁", + "██████████████▁▁▁▁▁▁", + "▁██████████████▁▁▁▁▁", + "▁██████████████▁▁▁▁▁", + "▁██████████████▁▁▁▁▁", + "▁▁██████████████▁▁▁▁", + "▁▁▁██████████████▁▁▁", + "▁▁▁▁█████████████▁▁▁", + "▁▁▁▁██████████████▁▁", + "▁▁▁▁██████████████▁▁", + "▁▁▁▁▁██████████████▁", + "▁▁▁▁▁██████████████▁", + "▁▁▁▁▁██████████████▁", + "▁▁▁▁▁▁██████████████", + "▁▁▁▁▁▁██████████████", + "▁▁▁▁▁▁▁█████████████", + "▁▁▁▁▁▁▁█████████████", + "▁▁▁▁▁▁▁▁████████████", + "▁▁▁▁▁▁▁▁████████████", + "▁▁▁▁▁▁▁▁▁███████████", + "▁▁▁▁▁▁▁▁▁███████████", + "▁▁▁▁▁▁▁▁▁▁██████████", + "▁▁▁▁▁▁▁▁▁▁██████████", + "▁▁▁▁▁▁▁▁▁▁▁▁████████", + "▁▁▁▁▁▁▁▁▁▁▁▁▁███████", + "▁▁▁▁▁▁▁▁▁▁▁▁▁▁██████", + "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁█████", + "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁█████", + "█▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁████", + "██▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁███", + "██▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁███", + "███▁▁▁▁▁▁▁▁▁▁▁▁▁▁███", + "████▁▁▁▁▁▁▁▁▁▁▁▁▁▁██", + "█████▁▁▁▁▁▁▁▁▁▁▁▁▁▁█", + "█████▁▁▁▁▁▁▁▁▁▁▁▁▁▁█", + "██████▁▁▁▁▁▁▁▁▁▁▁▁▁█", + "████████▁▁▁▁▁▁▁▁▁▁▁▁", + "█████████▁▁▁▁▁▁▁▁▁▁▁", + "█████████▁▁▁▁▁▁▁▁▁▁▁", + "█████████▁▁▁▁▁▁▁▁▁▁▁", + "█████████▁▁▁▁▁▁▁▁▁▁▁", + "███████████▁▁▁▁▁▁▁▁▁", + "████████████▁▁▁▁▁▁▁▁", + "████████████▁▁▁▁▁▁▁▁", + "██████████████▁▁▁▁▁▁", + "██████████████▁▁▁▁▁▁", + "▁██████████████▁▁▁▁▁", + "▁██████████████▁▁▁▁▁", + "▁▁▁█████████████▁▁▁▁", + "▁▁▁▁▁████████████▁▁▁", + "▁▁▁▁▁████████████▁▁▁", + "▁▁▁▁▁▁███████████▁▁▁", + "▁▁▁▁▁▁▁▁█████████▁▁▁", + "▁▁▁▁▁▁▁▁█████████▁▁▁", + "▁▁▁▁▁▁▁▁▁█████████▁▁", + "▁▁▁▁▁▁▁▁▁█████████▁▁", + "▁▁▁▁▁▁▁▁▁▁█████████▁", + "▁▁▁▁▁▁▁▁▁▁▁████████▁", + "▁▁▁▁▁▁▁▁▁▁▁████████▁", + "▁▁▁▁▁▁▁▁▁▁▁▁███████▁", + "▁▁▁▁▁▁▁▁▁▁▁▁███████▁", + "▁▁▁▁▁▁▁▁▁▁▁▁▁███████", + "▁▁▁▁▁▁▁▁▁▁▁▁▁███████", + "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁█████", + "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁████", + "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁████", + "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁████", + "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁███", + "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁███", + "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁██", + "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁██", + "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁██", + "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁█", + "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁█", + "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁█", + "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁", + "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁", + "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁", + "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁", + ], + }, + "moon": { + "interval": 80, + "frames": ["🌑 ", "🌒 ", "🌓 ", "🌔 ", "🌕 ", "🌖 ", "🌗 ", "🌘 "], + }, + "runner": {"interval": 140, "frames": ["🚶 ", "🏃 "]}, + "pong": { + "interval": 80, + "frames": [ + "▐⠂ ▌", + "▐⠈ ▌", + "▐ ⠂ ▌", + "▐ ⠠ ▌", + "▐ ⡀ ▌", + "▐ ⠠ ▌", + "▐ ⠂ ▌", + "▐ ⠈ ▌", + "▐ ⠂ ▌", + "▐ ⠠ ▌", + "▐ ⡀ ▌", + "▐ ⠠ ▌", + "▐ ⠂ ▌", + "▐ ⠈ ▌", + "▐ ⠂▌", + "▐ ⠠▌", + "▐ ⡀▌", + "▐ ⠠ ▌", + "▐ ⠂ ▌", + "▐ ⠈ ▌", + "▐ ⠂ ▌", + "▐ ⠠ ▌", + "▐ ⡀ ▌", + "▐ ⠠ ▌", + "▐ ⠂ ▌", + "▐ ⠈ ▌", + "▐ ⠂ ▌", + "▐ ⠠ ▌", + "▐ ⡀ ▌", + "▐⠠ ▌", + ], + }, + "shark": { + "interval": 120, + "frames": [ + "▐|\\____________▌", + "▐_|\\___________▌", + "▐__|\\__________▌", + "▐___|\\_________▌", + "▐____|\\________▌", + "▐_____|\\_______▌", + "▐______|\\______▌", + "▐_______|\\_____▌", + "▐________|\\____▌", + "▐_________|\\___▌", + "▐__________|\\__▌", + "▐___________|\\_▌", + "▐____________|\\▌", + "▐____________/|▌", + "▐___________/|_▌", + "▐__________/|__▌", + "▐_________/|___▌", + "▐________/|____▌", + "▐_______/|_____▌", + "▐______/|______▌", + "▐_____/|_______▌", + "▐____/|________▌", + "▐___/|_________▌", + "▐__/|__________▌", + "▐_/|___________▌", + "▐/|____________▌", + ], + }, + "dqpb": {"interval": 100, "frames": "dqpb"}, + "weather": { + "interval": 100, + "frames": [ + "☀️ ", + "☀️ ", + "☀️ ", + "🌤 ", + "⛅️ ", + "🌥 ", + "☁️ ", + "🌧 ", + "🌨 ", + "🌧 ", + "🌨 ", + "🌧 ", + "🌨 ", + "⛈ ", + "🌨 ", + "🌧 ", + "🌨 ", + "☁️ ", + "🌥 ", + "⛅️ ", + "🌤 ", + "☀️ ", + "☀️ ", + ], + }, + "christmas": {"interval": 400, "frames": "🌲🎄"}, + "grenade": { + "interval": 80, + "frames": [ + "، ", + "′ ", + " ´ ", + " ‾ ", + " ⸌", + " ⸊", + " |", + " ⁎", + " ⁕", + " ෴ ", + " ⁓", + " ", + " ", + " ", + ], + }, + "point": {"interval": 125, "frames": ["∙∙∙", "●∙∙", "∙●∙", "∙∙●", "∙∙∙"]}, + "layer": {"interval": 150, "frames": "-=≡"}, + "betaWave": { + "interval": 80, + "frames": [ + "ρββββββ", + "βρβββββ", + "ββρββββ", + "βββρβββ", + "ββββρββ", + "βββββρβ", + "ββββββρ", + ], + }, + "aesthetic": { + "interval": 80, + "frames": [ + "▰▱▱▱▱▱▱", + "▰▰▱▱▱▱▱", + "▰▰▰▱▱▱▱", + "▰▰▰▰▱▱▱", + "▰▰▰▰▰▱▱", + "▰▰▰▰▰▰▱", + "▰▰▰▰▰▰▰", + "▰▱▱▱▱▱▱", + ], + }, +} diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/_stack.py b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/_stack.py new file mode 100644 index 0000000..194564e --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/_stack.py @@ -0,0 +1,16 @@ +from typing import List, TypeVar + +T = TypeVar("T") + + +class Stack(List[T]): + """A small shim over builtin list.""" + + @property + def top(self) -> T: + """Get top of stack.""" + return self[-1] + + def push(self, item: T) -> None: + """Push an item on to the stack (append in stack nomenclature).""" + self.append(item) diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/_timer.py b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/_timer.py new file mode 100644 index 0000000..a2ca6be --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/_timer.py @@ -0,0 +1,19 @@ +""" +Timer context manager, only used in debug. + +""" + +from time import time + +import contextlib +from typing import Generator + + +@contextlib.contextmanager +def timer(subject: str = "time") -> Generator[None, None, None]: + """print the elapsed time. (only used in debugging)""" + start = time() + yield + elapsed = time() - start + elapsed_ms = elapsed * 1000 + print(f"{subject} elapsed {elapsed_ms:.1f}ms") diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/_win32_console.py b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/_win32_console.py new file mode 100644 index 0000000..2eba1b9 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/_win32_console.py @@ -0,0 +1,661 @@ +"""Light wrapper around the Win32 Console API - this module should only be imported on Windows + +The API that this module wraps is documented at https://docs.microsoft.com/en-us/windows/console/console-functions +""" + +import ctypes +import sys +from typing import Any + +windll: Any = None +if sys.platform == "win32": + windll = ctypes.LibraryLoader(ctypes.WinDLL) +else: + raise ImportError(f"{__name__} can only be imported on Windows") + +import time +from ctypes import Structure, byref, wintypes +from typing import IO, NamedTuple, Type, cast + +from pip._vendor.rich.color import ColorSystem +from pip._vendor.rich.style import Style + +STDOUT = -11 +ENABLE_VIRTUAL_TERMINAL_PROCESSING = 4 + +COORD = wintypes._COORD + + +class LegacyWindowsError(Exception): + pass + + +class WindowsCoordinates(NamedTuple): + """Coordinates in the Windows Console API are (y, x), not (x, y). + This class is intended to prevent that confusion. + Rows and columns are indexed from 0. + This class can be used in place of wintypes._COORD in arguments and argtypes. + """ + + row: int + col: int + + @classmethod + def from_param(cls, value: "WindowsCoordinates") -> COORD: + """Converts a WindowsCoordinates into a wintypes _COORD structure. + This classmethod is internally called by ctypes to perform the conversion. + + Args: + value (WindowsCoordinates): The input coordinates to convert. + + Returns: + wintypes._COORD: The converted coordinates struct. + """ + return COORD(value.col, value.row) + + +class CONSOLE_SCREEN_BUFFER_INFO(Structure): + _fields_ = [ + ("dwSize", COORD), + ("dwCursorPosition", COORD), + ("wAttributes", wintypes.WORD), + ("srWindow", wintypes.SMALL_RECT), + ("dwMaximumWindowSize", COORD), + ] + + +class CONSOLE_CURSOR_INFO(ctypes.Structure): + _fields_ = [("dwSize", wintypes.DWORD), ("bVisible", wintypes.BOOL)] + + +_GetStdHandle = windll.kernel32.GetStdHandle +_GetStdHandle.argtypes = [ + wintypes.DWORD, +] +_GetStdHandle.restype = wintypes.HANDLE + + +def GetStdHandle(handle: int = STDOUT) -> wintypes.HANDLE: + """Retrieves a handle to the specified standard device (standard input, standard output, or standard error). + + Args: + handle (int): Integer identifier for the handle. Defaults to -11 (stdout). + + Returns: + wintypes.HANDLE: The handle + """ + return cast(wintypes.HANDLE, _GetStdHandle(handle)) + + +_GetConsoleMode = windll.kernel32.GetConsoleMode +_GetConsoleMode.argtypes = [wintypes.HANDLE, wintypes.LPDWORD] +_GetConsoleMode.restype = wintypes.BOOL + + +def GetConsoleMode(std_handle: wintypes.HANDLE) -> int: + """Retrieves the current input mode of a console's input buffer + or the current output mode of a console screen buffer. + + Args: + std_handle (wintypes.HANDLE): A handle to the console input buffer or the console screen buffer. + + Raises: + LegacyWindowsError: If any error occurs while calling the Windows console API. + + Returns: + int: Value representing the current console mode as documented at + https://docs.microsoft.com/en-us/windows/console/getconsolemode#parameters + """ + + console_mode = wintypes.DWORD() + success = bool(_GetConsoleMode(std_handle, console_mode)) + if not success: + raise LegacyWindowsError("Unable to get legacy Windows Console Mode") + return console_mode.value + + +_FillConsoleOutputCharacterW = windll.kernel32.FillConsoleOutputCharacterW +_FillConsoleOutputCharacterW.argtypes = [ + wintypes.HANDLE, + ctypes.c_char, + wintypes.DWORD, + cast(Type[COORD], WindowsCoordinates), + ctypes.POINTER(wintypes.DWORD), +] +_FillConsoleOutputCharacterW.restype = wintypes.BOOL + + +def FillConsoleOutputCharacter( + std_handle: wintypes.HANDLE, + char: str, + length: int, + start: WindowsCoordinates, +) -> int: + """Writes a character to the console screen buffer a specified number of times, beginning at the specified coordinates. + + Args: + std_handle (wintypes.HANDLE): A handle to the console input buffer or the console screen buffer. + char (str): The character to write. Must be a string of length 1. + length (int): The number of times to write the character. + start (WindowsCoordinates): The coordinates to start writing at. + + Returns: + int: The number of characters written. + """ + character = ctypes.c_char(char.encode()) + num_characters = wintypes.DWORD(length) + num_written = wintypes.DWORD(0) + _FillConsoleOutputCharacterW( + std_handle, + character, + num_characters, + start, + byref(num_written), + ) + return num_written.value + + +_FillConsoleOutputAttribute = windll.kernel32.FillConsoleOutputAttribute +_FillConsoleOutputAttribute.argtypes = [ + wintypes.HANDLE, + wintypes.WORD, + wintypes.DWORD, + cast(Type[COORD], WindowsCoordinates), + ctypes.POINTER(wintypes.DWORD), +] +_FillConsoleOutputAttribute.restype = wintypes.BOOL + + +def FillConsoleOutputAttribute( + std_handle: wintypes.HANDLE, + attributes: int, + length: int, + start: WindowsCoordinates, +) -> int: + """Sets the character attributes for a specified number of character cells, + beginning at the specified coordinates in a screen buffer. + + Args: + std_handle (wintypes.HANDLE): A handle to the console input buffer or the console screen buffer. + attributes (int): Integer value representing the foreground and background colours of the cells. + length (int): The number of cells to set the output attribute of. + start (WindowsCoordinates): The coordinates of the first cell whose attributes are to be set. + + Returns: + int: The number of cells whose attributes were actually set. + """ + num_cells = wintypes.DWORD(length) + style_attrs = wintypes.WORD(attributes) + num_written = wintypes.DWORD(0) + _FillConsoleOutputAttribute( + std_handle, style_attrs, num_cells, start, byref(num_written) + ) + return num_written.value + + +_SetConsoleTextAttribute = windll.kernel32.SetConsoleTextAttribute +_SetConsoleTextAttribute.argtypes = [ + wintypes.HANDLE, + wintypes.WORD, +] +_SetConsoleTextAttribute.restype = wintypes.BOOL + + +def SetConsoleTextAttribute( + std_handle: wintypes.HANDLE, attributes: wintypes.WORD +) -> bool: + """Set the colour attributes for all text written after this function is called. + + Args: + std_handle (wintypes.HANDLE): A handle to the console input buffer or the console screen buffer. + attributes (int): Integer value representing the foreground and background colours. + + + Returns: + bool: True if the attribute was set successfully, otherwise False. + """ + return bool(_SetConsoleTextAttribute(std_handle, attributes)) + + +_GetConsoleScreenBufferInfo = windll.kernel32.GetConsoleScreenBufferInfo +_GetConsoleScreenBufferInfo.argtypes = [ + wintypes.HANDLE, + ctypes.POINTER(CONSOLE_SCREEN_BUFFER_INFO), +] +_GetConsoleScreenBufferInfo.restype = wintypes.BOOL + + +def GetConsoleScreenBufferInfo( + std_handle: wintypes.HANDLE, +) -> CONSOLE_SCREEN_BUFFER_INFO: + """Retrieves information about the specified console screen buffer. + + Args: + std_handle (wintypes.HANDLE): A handle to the console input buffer or the console screen buffer. + + Returns: + CONSOLE_SCREEN_BUFFER_INFO: A CONSOLE_SCREEN_BUFFER_INFO ctype struct contain information about + screen size, cursor position, colour attributes, and more.""" + console_screen_buffer_info = CONSOLE_SCREEN_BUFFER_INFO() + _GetConsoleScreenBufferInfo(std_handle, byref(console_screen_buffer_info)) + return console_screen_buffer_info + + +_SetConsoleCursorPosition = windll.kernel32.SetConsoleCursorPosition +_SetConsoleCursorPosition.argtypes = [ + wintypes.HANDLE, + cast(Type[COORD], WindowsCoordinates), +] +_SetConsoleCursorPosition.restype = wintypes.BOOL + + +def SetConsoleCursorPosition( + std_handle: wintypes.HANDLE, coords: WindowsCoordinates +) -> bool: + """Set the position of the cursor in the console screen + + Args: + std_handle (wintypes.HANDLE): A handle to the console input buffer or the console screen buffer. + coords (WindowsCoordinates): The coordinates to move the cursor to. + + Returns: + bool: True if the function succeeds, otherwise False. + """ + return bool(_SetConsoleCursorPosition(std_handle, coords)) + + +_GetConsoleCursorInfo = windll.kernel32.GetConsoleCursorInfo +_GetConsoleCursorInfo.argtypes = [ + wintypes.HANDLE, + ctypes.POINTER(CONSOLE_CURSOR_INFO), +] +_GetConsoleCursorInfo.restype = wintypes.BOOL + + +def GetConsoleCursorInfo( + std_handle: wintypes.HANDLE, cursor_info: CONSOLE_CURSOR_INFO +) -> bool: + """Get the cursor info - used to get cursor visibility and width + + Args: + std_handle (wintypes.HANDLE): A handle to the console input buffer or the console screen buffer. + cursor_info (CONSOLE_CURSOR_INFO): CONSOLE_CURSOR_INFO ctype struct that receives information + about the console's cursor. + + Returns: + bool: True if the function succeeds, otherwise False. + """ + return bool(_GetConsoleCursorInfo(std_handle, byref(cursor_info))) + + +_SetConsoleCursorInfo = windll.kernel32.SetConsoleCursorInfo +_SetConsoleCursorInfo.argtypes = [ + wintypes.HANDLE, + ctypes.POINTER(CONSOLE_CURSOR_INFO), +] +_SetConsoleCursorInfo.restype = wintypes.BOOL + + +def SetConsoleCursorInfo( + std_handle: wintypes.HANDLE, cursor_info: CONSOLE_CURSOR_INFO +) -> bool: + """Set the cursor info - used for adjusting cursor visibility and width + + Args: + std_handle (wintypes.HANDLE): A handle to the console input buffer or the console screen buffer. + cursor_info (CONSOLE_CURSOR_INFO): CONSOLE_CURSOR_INFO ctype struct containing the new cursor info. + + Returns: + bool: True if the function succeeds, otherwise False. + """ + return bool(_SetConsoleCursorInfo(std_handle, byref(cursor_info))) + + +_SetConsoleTitle = windll.kernel32.SetConsoleTitleW +_SetConsoleTitle.argtypes = [wintypes.LPCWSTR] +_SetConsoleTitle.restype = wintypes.BOOL + + +def SetConsoleTitle(title: str) -> bool: + """Sets the title of the current console window + + Args: + title (str): The new title of the console window. + + Returns: + bool: True if the function succeeds, otherwise False. + """ + return bool(_SetConsoleTitle(title)) + + +class LegacyWindowsTerm: + """This class allows interaction with the legacy Windows Console API. It should only be used in the context + of environments where virtual terminal processing is not available. However, if it is used in a Windows environment, + the entire API should work. + + Args: + file (IO[str]): The file which the Windows Console API HANDLE is retrieved from, defaults to sys.stdout. + """ + + BRIGHT_BIT = 8 + + # Indices are ANSI color numbers, values are the corresponding Windows Console API color numbers + ANSI_TO_WINDOWS = [ + 0, # black The Windows colours are defined in wincon.h as follows: + 4, # red define FOREGROUND_BLUE 0x0001 -- 0000 0001 + 2, # green define FOREGROUND_GREEN 0x0002 -- 0000 0010 + 6, # yellow define FOREGROUND_RED 0x0004 -- 0000 0100 + 1, # blue define FOREGROUND_INTENSITY 0x0008 -- 0000 1000 + 5, # magenta define BACKGROUND_BLUE 0x0010 -- 0001 0000 + 3, # cyan define BACKGROUND_GREEN 0x0020 -- 0010 0000 + 7, # white define BACKGROUND_RED 0x0040 -- 0100 0000 + 8, # bright black (grey) define BACKGROUND_INTENSITY 0x0080 -- 1000 0000 + 12, # bright red + 10, # bright green + 14, # bright yellow + 9, # bright blue + 13, # bright magenta + 11, # bright cyan + 15, # bright white + ] + + def __init__(self, file: "IO[str]") -> None: + handle = GetStdHandle(STDOUT) + self._handle = handle + default_text = GetConsoleScreenBufferInfo(handle).wAttributes + self._default_text = default_text + + self._default_fore = default_text & 7 + self._default_back = (default_text >> 4) & 7 + self._default_attrs = self._default_fore | (self._default_back << 4) + + self._file = file + self.write = file.write + self.flush = file.flush + + @property + def cursor_position(self) -> WindowsCoordinates: + """Returns the current position of the cursor (0-based) + + Returns: + WindowsCoordinates: The current cursor position. + """ + coord: COORD = GetConsoleScreenBufferInfo(self._handle).dwCursorPosition + return WindowsCoordinates(row=coord.Y, col=coord.X) + + @property + def screen_size(self) -> WindowsCoordinates: + """Returns the current size of the console screen buffer, in character columns and rows + + Returns: + WindowsCoordinates: The width and height of the screen as WindowsCoordinates. + """ + screen_size: COORD = GetConsoleScreenBufferInfo(self._handle).dwSize + return WindowsCoordinates(row=screen_size.Y, col=screen_size.X) + + def write_text(self, text: str) -> None: + """Write text directly to the terminal without any modification of styles + + Args: + text (str): The text to write to the console + """ + self.write(text) + self.flush() + + def write_styled(self, text: str, style: Style) -> None: + """Write styled text to the terminal. + + Args: + text (str): The text to write + style (Style): The style of the text + """ + color = style.color + bgcolor = style.bgcolor + if style.reverse: + color, bgcolor = bgcolor, color + + if color: + fore = color.downgrade(ColorSystem.WINDOWS).number + fore = fore if fore is not None else 7 # Default to ANSI 7: White + if style.bold: + fore = fore | self.BRIGHT_BIT + if style.dim: + fore = fore & ~self.BRIGHT_BIT + fore = self.ANSI_TO_WINDOWS[fore] + else: + fore = self._default_fore + + if bgcolor: + back = bgcolor.downgrade(ColorSystem.WINDOWS).number + back = back if back is not None else 0 # Default to ANSI 0: Black + back = self.ANSI_TO_WINDOWS[back] + else: + back = self._default_back + + assert fore is not None + assert back is not None + + SetConsoleTextAttribute( + self._handle, attributes=ctypes.c_ushort(fore | (back << 4)) + ) + self.write_text(text) + SetConsoleTextAttribute(self._handle, attributes=self._default_text) + + def move_cursor_to(self, new_position: WindowsCoordinates) -> None: + """Set the position of the cursor + + Args: + new_position (WindowsCoordinates): The WindowsCoordinates representing the new position of the cursor. + """ + if new_position.col < 0 or new_position.row < 0: + return + SetConsoleCursorPosition(self._handle, coords=new_position) + + def erase_line(self) -> None: + """Erase all content on the line the cursor is currently located at""" + screen_size = self.screen_size + cursor_position = self.cursor_position + cells_to_erase = screen_size.col + start_coordinates = WindowsCoordinates(row=cursor_position.row, col=0) + FillConsoleOutputCharacter( + self._handle, " ", length=cells_to_erase, start=start_coordinates + ) + FillConsoleOutputAttribute( + self._handle, + self._default_attrs, + length=cells_to_erase, + start=start_coordinates, + ) + + def erase_end_of_line(self) -> None: + """Erase all content from the cursor position to the end of that line""" + cursor_position = self.cursor_position + cells_to_erase = self.screen_size.col - cursor_position.col + FillConsoleOutputCharacter( + self._handle, " ", length=cells_to_erase, start=cursor_position + ) + FillConsoleOutputAttribute( + self._handle, + self._default_attrs, + length=cells_to_erase, + start=cursor_position, + ) + + def erase_start_of_line(self) -> None: + """Erase all content from the cursor position to the start of that line""" + row, col = self.cursor_position + start = WindowsCoordinates(row, 0) + FillConsoleOutputCharacter(self._handle, " ", length=col, start=start) + FillConsoleOutputAttribute( + self._handle, self._default_attrs, length=col, start=start + ) + + def move_cursor_up(self) -> None: + """Move the cursor up a single cell""" + cursor_position = self.cursor_position + SetConsoleCursorPosition( + self._handle, + coords=WindowsCoordinates( + row=cursor_position.row - 1, col=cursor_position.col + ), + ) + + def move_cursor_down(self) -> None: + """Move the cursor down a single cell""" + cursor_position = self.cursor_position + SetConsoleCursorPosition( + self._handle, + coords=WindowsCoordinates( + row=cursor_position.row + 1, + col=cursor_position.col, + ), + ) + + def move_cursor_forward(self) -> None: + """Move the cursor forward a single cell. Wrap to the next line if required.""" + row, col = self.cursor_position + if col == self.screen_size.col - 1: + row += 1 + col = 0 + else: + col += 1 + SetConsoleCursorPosition( + self._handle, coords=WindowsCoordinates(row=row, col=col) + ) + + def move_cursor_to_column(self, column: int) -> None: + """Move cursor to the column specified by the zero-based column index, staying on the same row + + Args: + column (int): The zero-based column index to move the cursor to. + """ + row, _ = self.cursor_position + SetConsoleCursorPosition(self._handle, coords=WindowsCoordinates(row, column)) + + def move_cursor_backward(self) -> None: + """Move the cursor backward a single cell. Wrap to the previous line if required.""" + row, col = self.cursor_position + if col == 0: + row -= 1 + col = self.screen_size.col - 1 + else: + col -= 1 + SetConsoleCursorPosition( + self._handle, coords=WindowsCoordinates(row=row, col=col) + ) + + def hide_cursor(self) -> None: + """Hide the cursor""" + current_cursor_size = self._get_cursor_size() + invisible_cursor = CONSOLE_CURSOR_INFO(dwSize=current_cursor_size, bVisible=0) + SetConsoleCursorInfo(self._handle, cursor_info=invisible_cursor) + + def show_cursor(self) -> None: + """Show the cursor""" + current_cursor_size = self._get_cursor_size() + visible_cursor = CONSOLE_CURSOR_INFO(dwSize=current_cursor_size, bVisible=1) + SetConsoleCursorInfo(self._handle, cursor_info=visible_cursor) + + def set_title(self, title: str) -> None: + """Set the title of the terminal window + + Args: + title (str): The new title of the console window + """ + assert len(title) < 255, "Console title must be less than 255 characters" + SetConsoleTitle(title) + + def _get_cursor_size(self) -> int: + """Get the percentage of the character cell that is filled by the cursor""" + cursor_info = CONSOLE_CURSOR_INFO() + GetConsoleCursorInfo(self._handle, cursor_info=cursor_info) + return int(cursor_info.dwSize) + + +if __name__ == "__main__": + handle = GetStdHandle() + + from pip._vendor.rich.console import Console + + console = Console() + + term = LegacyWindowsTerm(sys.stdout) + term.set_title("Win32 Console Examples") + + style = Style(color="black", bgcolor="red") + + heading = Style.parse("black on green") + + # Check colour output + console.rule("Checking colour output") + console.print("[on red]on red!") + console.print("[blue]blue!") + console.print("[yellow]yellow!") + console.print("[bold yellow]bold yellow!") + console.print("[bright_yellow]bright_yellow!") + console.print("[dim bright_yellow]dim bright_yellow!") + console.print("[italic cyan]italic cyan!") + console.print("[bold white on blue]bold white on blue!") + console.print("[reverse bold white on blue]reverse bold white on blue!") + console.print("[bold black on cyan]bold black on cyan!") + console.print("[black on green]black on green!") + console.print("[blue on green]blue on green!") + console.print("[white on black]white on black!") + console.print("[black on white]black on white!") + console.print("[#1BB152 on #DA812D]#1BB152 on #DA812D!") + + # Check cursor movement + console.rule("Checking cursor movement") + console.print() + term.move_cursor_backward() + term.move_cursor_backward() + term.write_text("went back and wrapped to prev line") + time.sleep(1) + term.move_cursor_up() + term.write_text("we go up") + time.sleep(1) + term.move_cursor_down() + term.write_text("and down") + time.sleep(1) + term.move_cursor_up() + term.move_cursor_backward() + term.move_cursor_backward() + term.write_text("we went up and back 2") + time.sleep(1) + term.move_cursor_down() + term.move_cursor_backward() + term.move_cursor_backward() + term.write_text("we went down and back 2") + time.sleep(1) + + # Check erasing of lines + term.hide_cursor() + console.print() + console.rule("Checking line erasing") + console.print("\n...Deleting to the start of the line...") + term.write_text("The red arrow shows the cursor location, and direction of erase") + time.sleep(1) + term.move_cursor_to_column(16) + term.write_styled("<", Style.parse("black on red")) + term.move_cursor_backward() + time.sleep(1) + term.erase_start_of_line() + time.sleep(1) + + console.print("\n\n...And to the end of the line...") + term.write_text("The red arrow shows the cursor location, and direction of erase") + time.sleep(1) + + term.move_cursor_to_column(16) + term.write_styled(">", Style.parse("black on red")) + time.sleep(1) + term.erase_end_of_line() + time.sleep(1) + + console.print("\n\n...Now the whole line will be erased...") + term.write_styled("I'm going to disappear!", style=Style.parse("black on cyan")) + time.sleep(1) + term.erase_line() + + term.show_cursor() + print("\n") diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/_windows.py b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/_windows.py new file mode 100644 index 0000000..7520a9f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/_windows.py @@ -0,0 +1,71 @@ +import sys +from dataclasses import dataclass + + +@dataclass +class WindowsConsoleFeatures: + """Windows features available.""" + + vt: bool = False + """The console supports VT codes.""" + truecolor: bool = False + """The console supports truecolor.""" + + +try: + import ctypes + from ctypes import LibraryLoader + + if sys.platform == "win32": + windll = LibraryLoader(ctypes.WinDLL) + else: + windll = None + raise ImportError("Not windows") + + from pip._vendor.rich._win32_console import ( + ENABLE_VIRTUAL_TERMINAL_PROCESSING, + GetConsoleMode, + GetStdHandle, + LegacyWindowsError, + ) + +except (AttributeError, ImportError, ValueError): + # Fallback if we can't load the Windows DLL + def get_windows_console_features() -> WindowsConsoleFeatures: + features = WindowsConsoleFeatures() + return features + +else: + + def get_windows_console_features() -> WindowsConsoleFeatures: + """Get windows console features. + + Returns: + WindowsConsoleFeatures: An instance of WindowsConsoleFeatures. + """ + handle = GetStdHandle() + try: + console_mode = GetConsoleMode(handle) + success = True + except LegacyWindowsError: + console_mode = 0 + success = False + vt = bool(success and console_mode & ENABLE_VIRTUAL_TERMINAL_PROCESSING) + truecolor = False + if vt: + win_version = sys.getwindowsversion() + truecolor = win_version.major > 10 or ( + win_version.major == 10 and win_version.build >= 15063 + ) + features = WindowsConsoleFeatures(vt=vt, truecolor=truecolor) + return features + + +if __name__ == "__main__": + import platform + + features = get_windows_console_features() + from pip._vendor.rich import print + + print(f'platform="{platform.system()}"') + print(repr(features)) diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/_windows_renderer.py b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/_windows_renderer.py new file mode 100644 index 0000000..5ece056 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/_windows_renderer.py @@ -0,0 +1,56 @@ +from typing import Iterable, Sequence, Tuple, cast + +from pip._vendor.rich._win32_console import LegacyWindowsTerm, WindowsCoordinates +from pip._vendor.rich.segment import ControlCode, ControlType, Segment + + +def legacy_windows_render(buffer: Iterable[Segment], term: LegacyWindowsTerm) -> None: + """Makes appropriate Windows Console API calls based on the segments in the buffer. + + Args: + buffer (Iterable[Segment]): Iterable of Segments to convert to Win32 API calls. + term (LegacyWindowsTerm): Used to call the Windows Console API. + """ + for text, style, control in buffer: + if not control: + if style: + term.write_styled(text, style) + else: + term.write_text(text) + else: + control_codes: Sequence[ControlCode] = control + for control_code in control_codes: + control_type = control_code[0] + if control_type == ControlType.CURSOR_MOVE_TO: + _, x, y = cast(Tuple[ControlType, int, int], control_code) + term.move_cursor_to(WindowsCoordinates(row=y - 1, col=x - 1)) + elif control_type == ControlType.CARRIAGE_RETURN: + term.write_text("\r") + elif control_type == ControlType.HOME: + term.move_cursor_to(WindowsCoordinates(0, 0)) + elif control_type == ControlType.CURSOR_UP: + term.move_cursor_up() + elif control_type == ControlType.CURSOR_DOWN: + term.move_cursor_down() + elif control_type == ControlType.CURSOR_FORWARD: + term.move_cursor_forward() + elif control_type == ControlType.CURSOR_BACKWARD: + term.move_cursor_backward() + elif control_type == ControlType.CURSOR_MOVE_TO_COLUMN: + _, column = cast(Tuple[ControlType, int], control_code) + term.move_cursor_to_column(column - 1) + elif control_type == ControlType.HIDE_CURSOR: + term.hide_cursor() + elif control_type == ControlType.SHOW_CURSOR: + term.show_cursor() + elif control_type == ControlType.ERASE_IN_LINE: + _, mode = cast(Tuple[ControlType, int], control_code) + if mode == 0: + term.erase_end_of_line() + elif mode == 1: + term.erase_start_of_line() + elif mode == 2: + term.erase_line() + elif control_type == ControlType.SET_WINDOW_TITLE: + _, title = cast(Tuple[ControlType, str], control_code) + term.set_title(title) diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/_wrap.py b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/_wrap.py new file mode 100644 index 0000000..2e94ff6 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/_wrap.py @@ -0,0 +1,93 @@ +from __future__ import annotations + +import re +from typing import Iterable + +from ._loop import loop_last +from .cells import cell_len, chop_cells + +re_word = re.compile(r"\s*\S+\s*") + + +def words(text: str) -> Iterable[tuple[int, int, str]]: + """Yields each word from the text as a tuple + containing (start_index, end_index, word). A "word" in this context may + include the actual word and any whitespace to the right. + """ + position = 0 + word_match = re_word.match(text, position) + while word_match is not None: + start, end = word_match.span() + word = word_match.group(0) + yield start, end, word + word_match = re_word.match(text, end) + + +def divide_line(text: str, width: int, fold: bool = True) -> list[int]: + """Given a string of text, and a width (measured in cells), return a list + of cell offsets which the string should be split at in order for it to fit + within the given width. + + Args: + text: The text to examine. + width: The available cell width. + fold: If True, words longer than `width` will be folded onto a new line. + + Returns: + A list of indices to break the line at. + """ + break_positions: list[int] = [] # offsets to insert the breaks at + append = break_positions.append + cell_offset = 0 + _cell_len = cell_len + + for start, _end, word in words(text): + word_length = _cell_len(word.rstrip()) + remaining_space = width - cell_offset + word_fits_remaining_space = remaining_space >= word_length + + if word_fits_remaining_space: + # Simplest case - the word fits within the remaining width for this line. + cell_offset += _cell_len(word) + else: + # Not enough space remaining for this word on the current line. + if word_length > width: + # The word doesn't fit on any line, so we can't simply + # place it on the next line... + if fold: + # Fold the word across multiple lines. + folded_word = chop_cells(word, width=width) + for last, line in loop_last(folded_word): + if start: + append(start) + if last: + cell_offset = _cell_len(line) + else: + start += len(line) + else: + # Folding isn't allowed, so crop the word. + if start: + append(start) + cell_offset = _cell_len(word) + elif cell_offset and start: + # The word doesn't fit within the remaining space on the current + # line, but it *can* fit on to the next (empty) line. + append(start) + cell_offset = _cell_len(word) + + return break_positions + + +if __name__ == "__main__": # pragma: no cover + from .console import Console + + console = Console(width=10) + console.print("12345 abcdefghijklmnopqrstuvwyxzABCDEFGHIJKLMNOPQRSTUVWXYZ 12345") + print(chop_cells("abcdefghijklmnopqrstuvwxyz", 10)) + + console = Console(width=20) + console.rule() + console.print("TextualはPythonの高速アプリケーション開発フレームワークです") + + console.rule() + console.print("アプリケーションは1670万色を使用でき") diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/abc.py b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/abc.py new file mode 100644 index 0000000..e6e498e --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/abc.py @@ -0,0 +1,33 @@ +from abc import ABC + + +class RichRenderable(ABC): + """An abstract base class for Rich renderables. + + Note that there is no need to extend this class, the intended use is to check if an + object supports the Rich renderable protocol. For example:: + + if isinstance(my_object, RichRenderable): + console.print(my_object) + + """ + + @classmethod + def __subclasshook__(cls, other: type) -> bool: + """Check if this class supports the rich render protocol.""" + return hasattr(other, "__rich_console__") or hasattr(other, "__rich__") + + +if __name__ == "__main__": # pragma: no cover + from pip._vendor.rich.text import Text + + t = Text() + print(isinstance(Text, RichRenderable)) + print(isinstance(t, RichRenderable)) + + class Foo: + pass + + f = Foo() + print(isinstance(f, RichRenderable)) + print(isinstance("", RichRenderable)) diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/align.py b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/align.py new file mode 100644 index 0000000..e65dc5b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/align.py @@ -0,0 +1,306 @@ +from itertools import chain +from typing import TYPE_CHECKING, Iterable, Optional, Literal + +from .constrain import Constrain +from .jupyter import JupyterMixin +from .measure import Measurement +from .segment import Segment +from .style import StyleType + +if TYPE_CHECKING: + from .console import Console, ConsoleOptions, RenderableType, RenderResult + +AlignMethod = Literal["left", "center", "right"] +VerticalAlignMethod = Literal["top", "middle", "bottom"] + + +class Align(JupyterMixin): + """Align a renderable by adding spaces if necessary. + + Args: + renderable (RenderableType): A console renderable. + align (AlignMethod): One of "left", "center", or "right"" + style (StyleType, optional): An optional style to apply to the background. + vertical (Optional[VerticalAlignMethod], optional): Optional vertical align, one of "top", "middle", or "bottom". Defaults to None. + pad (bool, optional): Pad the right with spaces. Defaults to True. + width (int, optional): Restrict contents to given width, or None to use default width. Defaults to None. + height (int, optional): Set height of align renderable, or None to fit to contents. Defaults to None. + + Raises: + ValueError: if ``align`` is not one of the expected values. + """ + + def __init__( + self, + renderable: "RenderableType", + align: AlignMethod = "left", + style: Optional[StyleType] = None, + *, + vertical: Optional[VerticalAlignMethod] = None, + pad: bool = True, + width: Optional[int] = None, + height: Optional[int] = None, + ) -> None: + if align not in ("left", "center", "right"): + raise ValueError( + f'invalid value for align, expected "left", "center", or "right" (not {align!r})' + ) + if vertical is not None and vertical not in ("top", "middle", "bottom"): + raise ValueError( + f'invalid value for vertical, expected "top", "middle", or "bottom" (not {vertical!r})' + ) + self.renderable = renderable + self.align = align + self.style = style + self.vertical = vertical + self.pad = pad + self.width = width + self.height = height + + def __repr__(self) -> str: + return f"Align({self.renderable!r}, {self.align!r})" + + @classmethod + def left( + cls, + renderable: "RenderableType", + style: Optional[StyleType] = None, + *, + vertical: Optional[VerticalAlignMethod] = None, + pad: bool = True, + width: Optional[int] = None, + height: Optional[int] = None, + ) -> "Align": + """Align a renderable to the left.""" + return cls( + renderable, + "left", + style=style, + vertical=vertical, + pad=pad, + width=width, + height=height, + ) + + @classmethod + def center( + cls, + renderable: "RenderableType", + style: Optional[StyleType] = None, + *, + vertical: Optional[VerticalAlignMethod] = None, + pad: bool = True, + width: Optional[int] = None, + height: Optional[int] = None, + ) -> "Align": + """Align a renderable to the center.""" + return cls( + renderable, + "center", + style=style, + vertical=vertical, + pad=pad, + width=width, + height=height, + ) + + @classmethod + def right( + cls, + renderable: "RenderableType", + style: Optional[StyleType] = None, + *, + vertical: Optional[VerticalAlignMethod] = None, + pad: bool = True, + width: Optional[int] = None, + height: Optional[int] = None, + ) -> "Align": + """Align a renderable to the right.""" + return cls( + renderable, + "right", + style=style, + vertical=vertical, + pad=pad, + width=width, + height=height, + ) + + def __rich_console__( + self, console: "Console", options: "ConsoleOptions" + ) -> "RenderResult": + align = self.align + width = console.measure(self.renderable, options=options).maximum + rendered = console.render( + Constrain( + self.renderable, width if self.width is None else min(width, self.width) + ), + options.update(height=None), + ) + lines = list(Segment.split_lines(rendered)) + width, height = Segment.get_shape(lines) + lines = Segment.set_shape(lines, width, height) + new_line = Segment.line() + excess_space = options.max_width - width + style = console.get_style(self.style) if self.style is not None else None + + def generate_segments() -> Iterable[Segment]: + if excess_space <= 0: + # Exact fit + for line in lines: + yield from line + yield new_line + + elif align == "left": + # Pad on the right + pad = Segment(" " * excess_space, style) if self.pad else None + for line in lines: + yield from line + if pad: + yield pad + yield new_line + + elif align == "center": + # Pad left and right + left = excess_space // 2 + pad = Segment(" " * left, style) + pad_right = ( + Segment(" " * (excess_space - left), style) if self.pad else None + ) + for line in lines: + if left: + yield pad + yield from line + if pad_right: + yield pad_right + yield new_line + + elif align == "right": + # Padding on left + pad = Segment(" " * excess_space, style) + for line in lines: + yield pad + yield from line + yield new_line + + blank_line = ( + Segment(f"{' ' * (self.width or options.max_width)}\n", style) + if self.pad + else Segment("\n") + ) + + def blank_lines(count: int) -> Iterable[Segment]: + if count > 0: + for _ in range(count): + yield blank_line + + vertical_height = self.height or options.height + iter_segments: Iterable[Segment] + if self.vertical and vertical_height is not None: + if self.vertical == "top": + bottom_space = vertical_height - height + iter_segments = chain(generate_segments(), blank_lines(bottom_space)) + elif self.vertical == "middle": + top_space = (vertical_height - height) // 2 + bottom_space = vertical_height - top_space - height + iter_segments = chain( + blank_lines(top_space), + generate_segments(), + blank_lines(bottom_space), + ) + else: # self.vertical == "bottom": + top_space = vertical_height - height + iter_segments = chain(blank_lines(top_space), generate_segments()) + else: + iter_segments = generate_segments() + if self.style: + style = console.get_style(self.style) + iter_segments = Segment.apply_style(iter_segments, style) + yield from iter_segments + + def __rich_measure__( + self, console: "Console", options: "ConsoleOptions" + ) -> Measurement: + measurement = Measurement.get(console, options, self.renderable) + return measurement + + +class VerticalCenter(JupyterMixin): + """Vertically aligns a renderable. + + Warn: + This class is deprecated and may be removed in a future version. Use Align class with + `vertical="middle"`. + + Args: + renderable (RenderableType): A renderable object. + style (StyleType, optional): An optional style to apply to the background. Defaults to None. + """ + + def __init__( + self, + renderable: "RenderableType", + style: Optional[StyleType] = None, + ) -> None: + self.renderable = renderable + self.style = style + + def __repr__(self) -> str: + return f"VerticalCenter({self.renderable!r})" + + def __rich_console__( + self, console: "Console", options: "ConsoleOptions" + ) -> "RenderResult": + style = console.get_style(self.style) if self.style is not None else None + lines = console.render_lines( + self.renderable, options.update(height=None), pad=False + ) + width, _height = Segment.get_shape(lines) + new_line = Segment.line() + height = options.height or options.size.height + top_space = (height - len(lines)) // 2 + bottom_space = height - top_space - len(lines) + blank_line = Segment(f"{' ' * width}", style) + + def blank_lines(count: int) -> Iterable[Segment]: + for _ in range(count): + yield blank_line + yield new_line + + if top_space > 0: + yield from blank_lines(top_space) + for line in lines: + yield from line + yield new_line + if bottom_space > 0: + yield from blank_lines(bottom_space) + + def __rich_measure__( + self, console: "Console", options: "ConsoleOptions" + ) -> Measurement: + measurement = Measurement.get(console, options, self.renderable) + return measurement + + +if __name__ == "__main__": # pragma: no cover + from pip._vendor.rich.console import Console, Group + from pip._vendor.rich.highlighter import ReprHighlighter + from pip._vendor.rich.panel import Panel + + highlighter = ReprHighlighter() + console = Console() + + panel = Panel( + Group( + Align.left(highlighter("align='left'")), + Align.center(highlighter("align='center'")), + Align.right(highlighter("align='right'")), + ), + width=60, + style="on dark_blue", + title="Align", + ) + + console.print( + Align.center(panel, vertical="middle", style="on red", height=console.height) + ) diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/ansi.py b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/ansi.py new file mode 100644 index 0000000..7de86ce --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/ansi.py @@ -0,0 +1,241 @@ +import re +import sys +from contextlib import suppress +from typing import Iterable, NamedTuple, Optional + +from .color import Color +from .style import Style +from .text import Text + +re_ansi = re.compile( + r""" +(?:\x1b[0-?])| +(?:\x1b\](.*?)\x1b\\)| +(?:\x1b([(@-Z\\-_]|\[[0-?]*[ -/]*[@-~])) +""", + re.VERBOSE, +) + + +class _AnsiToken(NamedTuple): + """Result of ansi tokenized string.""" + + plain: str = "" + sgr: Optional[str] = "" + osc: Optional[str] = "" + + +def _ansi_tokenize(ansi_text: str) -> Iterable[_AnsiToken]: + """Tokenize a string in to plain text and ANSI codes. + + Args: + ansi_text (str): A String containing ANSI codes. + + Yields: + AnsiToken: A named tuple of (plain, sgr, osc) + """ + + position = 0 + sgr: Optional[str] + osc: Optional[str] + for match in re_ansi.finditer(ansi_text): + start, end = match.span(0) + osc, sgr = match.groups() + if start > position: + yield _AnsiToken(ansi_text[position:start]) + if sgr: + if sgr == "(": + position = end + 1 + continue + if sgr.endswith("m"): + yield _AnsiToken("", sgr[1:-1], osc) + else: + yield _AnsiToken("", sgr, osc) + position = end + if position < len(ansi_text): + yield _AnsiToken(ansi_text[position:]) + + +SGR_STYLE_MAP = { + 1: "bold", + 2: "dim", + 3: "italic", + 4: "underline", + 5: "blink", + 6: "blink2", + 7: "reverse", + 8: "conceal", + 9: "strike", + 21: "underline2", + 22: "not dim not bold", + 23: "not italic", + 24: "not underline", + 25: "not blink", + 26: "not blink2", + 27: "not reverse", + 28: "not conceal", + 29: "not strike", + 30: "color(0)", + 31: "color(1)", + 32: "color(2)", + 33: "color(3)", + 34: "color(4)", + 35: "color(5)", + 36: "color(6)", + 37: "color(7)", + 39: "default", + 40: "on color(0)", + 41: "on color(1)", + 42: "on color(2)", + 43: "on color(3)", + 44: "on color(4)", + 45: "on color(5)", + 46: "on color(6)", + 47: "on color(7)", + 49: "on default", + 51: "frame", + 52: "encircle", + 53: "overline", + 54: "not frame not encircle", + 55: "not overline", + 90: "color(8)", + 91: "color(9)", + 92: "color(10)", + 93: "color(11)", + 94: "color(12)", + 95: "color(13)", + 96: "color(14)", + 97: "color(15)", + 100: "on color(8)", + 101: "on color(9)", + 102: "on color(10)", + 103: "on color(11)", + 104: "on color(12)", + 105: "on color(13)", + 106: "on color(14)", + 107: "on color(15)", +} + + +class AnsiDecoder: + """Translate ANSI code in to styled Text.""" + + def __init__(self) -> None: + self.style = Style.null() + + def decode(self, terminal_text: str) -> Iterable[Text]: + """Decode ANSI codes in an iterable of lines. + + Args: + lines (Iterable[str]): An iterable of lines of terminal output. + + Yields: + Text: Marked up Text. + """ + for line in terminal_text.splitlines(): + yield self.decode_line(line) + + def decode_line(self, line: str) -> Text: + """Decode a line containing ansi codes. + + Args: + line (str): A line of terminal output. + + Returns: + Text: A Text instance marked up according to ansi codes. + """ + from_ansi = Color.from_ansi + from_rgb = Color.from_rgb + _Style = Style + text = Text() + append = text.append + line = line.rsplit("\r", 1)[-1] + for plain_text, sgr, osc in _ansi_tokenize(line): + if plain_text: + append(plain_text, self.style or None) + elif osc is not None: + if osc.startswith("8;"): + _params, semicolon, link = osc[2:].partition(";") + if semicolon: + self.style = self.style.update_link(link or None) + elif sgr is not None: + # Translate in to semi-colon separated codes + # Ignore invalid codes, because we want to be lenient + codes = [ + min(255, int(_code) if _code else 0) + for _code in sgr.split(";") + if _code.isdigit() or _code == "" + ] + iter_codes = iter(codes) + for code in iter_codes: + if code == 0: + # reset + self.style = _Style.null() + elif code in SGR_STYLE_MAP: + # styles + self.style += _Style.parse(SGR_STYLE_MAP[code]) + elif code == 38: + #  Foreground + with suppress(StopIteration): + color_type = next(iter_codes) + if color_type == 5: + self.style += _Style.from_color( + from_ansi(next(iter_codes)) + ) + elif color_type == 2: + self.style += _Style.from_color( + from_rgb( + next(iter_codes), + next(iter_codes), + next(iter_codes), + ) + ) + elif code == 48: + # Background + with suppress(StopIteration): + color_type = next(iter_codes) + if color_type == 5: + self.style += _Style.from_color( + None, from_ansi(next(iter_codes)) + ) + elif color_type == 2: + self.style += _Style.from_color( + None, + from_rgb( + next(iter_codes), + next(iter_codes), + next(iter_codes), + ), + ) + + return text + + +if sys.platform != "win32" and __name__ == "__main__": # pragma: no cover + import io + import os + import pty + import sys + + decoder = AnsiDecoder() + + stdout = io.BytesIO() + + def read(fd: int) -> bytes: + data = os.read(fd, 1024) + stdout.write(data) + return data + + pty.spawn(sys.argv[1:], read) + + from .console import Console + + console = Console(record=True) + + stdout_result = stdout.getvalue().decode("utf-8") + print(stdout_result) + + for line in decoder.decode(stdout_result): + console.print(line) + + console.save_html("stdout.html") diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/bar.py b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/bar.py new file mode 100644 index 0000000..022284b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/bar.py @@ -0,0 +1,93 @@ +from typing import Optional, Union + +from .color import Color +from .console import Console, ConsoleOptions, RenderResult +from .jupyter import JupyterMixin +from .measure import Measurement +from .segment import Segment +from .style import Style + +# There are left-aligned characters for 1/8 to 7/8, but +# the right-aligned characters exist only for 1/8 and 4/8. +BEGIN_BLOCK_ELEMENTS = ["█", "█", "█", "▐", "▐", "▐", "▕", "▕"] +END_BLOCK_ELEMENTS = [" ", "▏", "▎", "▍", "▌", "▋", "▊", "▉"] +FULL_BLOCK = "█" + + +class Bar(JupyterMixin): + """Renders a solid block bar. + + Args: + size (float): Value for the end of the bar. + begin (float): Begin point (between 0 and size, inclusive). + end (float): End point (between 0 and size, inclusive). + width (int, optional): Width of the bar, or ``None`` for maximum width. Defaults to None. + color (Union[Color, str], optional): Color of the bar. Defaults to "default". + bgcolor (Union[Color, str], optional): Color of bar background. Defaults to "default". + """ + + def __init__( + self, + size: float, + begin: float, + end: float, + *, + width: Optional[int] = None, + color: Union[Color, str] = "default", + bgcolor: Union[Color, str] = "default", + ): + self.size = size + self.begin = max(begin, 0) + self.end = min(end, size) + self.width = width + self.style = Style(color=color, bgcolor=bgcolor) + + def __repr__(self) -> str: + return f"Bar({self.size}, {self.begin}, {self.end})" + + def __rich_console__( + self, console: Console, options: ConsoleOptions + ) -> RenderResult: + width = min( + self.width if self.width is not None else options.max_width, + options.max_width, + ) + + if self.begin >= self.end: + yield Segment(" " * width, self.style) + yield Segment.line() + return + + prefix_complete_eights = int(width * 8 * self.begin / self.size) + prefix_bar_count = prefix_complete_eights // 8 + prefix_eights_count = prefix_complete_eights % 8 + + body_complete_eights = int(width * 8 * self.end / self.size) + body_bar_count = body_complete_eights // 8 + body_eights_count = body_complete_eights % 8 + + # When start and end fall into the same cell, we ideally should render + # a symbol that's "center-aligned", but there is no good symbol in Unicode. + # In this case, we fall back to right-aligned block symbol for simplicity. + + prefix = " " * prefix_bar_count + if prefix_eights_count: + prefix += BEGIN_BLOCK_ELEMENTS[prefix_eights_count] + + body = FULL_BLOCK * body_bar_count + if body_eights_count: + body += END_BLOCK_ELEMENTS[body_eights_count] + + suffix = " " * (width - len(body)) + + yield Segment(prefix + body[len(prefix) :] + suffix, self.style) + yield Segment.line() + + def __rich_measure__( + self, console: Console, options: ConsoleOptions + ) -> Measurement: + return ( + Measurement(self.width, self.width) + if self.width is not None + else Measurement(4, options.max_width) + ) diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/box.py b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/box.py new file mode 100644 index 0000000..3f330cc --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/box.py @@ -0,0 +1,474 @@ +from typing import TYPE_CHECKING, Iterable, List, Literal + + +from ._loop import loop_last + +if TYPE_CHECKING: + from pip._vendor.rich.console import ConsoleOptions + + +class Box: + """Defines characters to render boxes. + + ┌─┬┐ top + │ ││ head + ├─┼┤ head_row + │ ││ mid + ├─┼┤ row + ├─┼┤ foot_row + │ ││ foot + └─┴┘ bottom + + Args: + box (str): Characters making up box. + ascii (bool, optional): True if this box uses ascii characters only. Default is False. + """ + + def __init__(self, box: str, *, ascii: bool = False) -> None: + self._box = box + self.ascii = ascii + line1, line2, line3, line4, line5, line6, line7, line8 = box.splitlines() + # top + self.top_left, self.top, self.top_divider, self.top_right = iter(line1) + # head + self.head_left, _, self.head_vertical, self.head_right = iter(line2) + # head_row + ( + self.head_row_left, + self.head_row_horizontal, + self.head_row_cross, + self.head_row_right, + ) = iter(line3) + + # mid + self.mid_left, _, self.mid_vertical, self.mid_right = iter(line4) + # row + self.row_left, self.row_horizontal, self.row_cross, self.row_right = iter(line5) + # foot_row + ( + self.foot_row_left, + self.foot_row_horizontal, + self.foot_row_cross, + self.foot_row_right, + ) = iter(line6) + # foot + self.foot_left, _, self.foot_vertical, self.foot_right = iter(line7) + # bottom + self.bottom_left, self.bottom, self.bottom_divider, self.bottom_right = iter( + line8 + ) + + def __repr__(self) -> str: + return "Box(...)" + + def __str__(self) -> str: + return self._box + + def substitute(self, options: "ConsoleOptions", safe: bool = True) -> "Box": + """Substitute this box for another if it won't render due to platform issues. + + Args: + options (ConsoleOptions): Console options used in rendering. + safe (bool, optional): Substitute this for another Box if there are known problems + displaying on the platform (currently only relevant on Windows). Default is True. + + Returns: + Box: A different Box or the same Box. + """ + box = self + if options.legacy_windows and safe: + box = LEGACY_WINDOWS_SUBSTITUTIONS.get(box, box) + if options.ascii_only and not box.ascii: + box = ASCII + return box + + def get_plain_headed_box(self) -> "Box": + """If this box uses special characters for the borders of the header, then + return the equivalent box that does not. + + Returns: + Box: The most similar Box that doesn't use header-specific box characters. + If the current Box already satisfies this criterion, then it's returned. + """ + return PLAIN_HEADED_SUBSTITUTIONS.get(self, self) + + def get_top(self, widths: Iterable[int]) -> str: + """Get the top of a simple box. + + Args: + widths (List[int]): Widths of columns. + + Returns: + str: A string of box characters. + """ + + parts: List[str] = [] + append = parts.append + append(self.top_left) + for last, width in loop_last(widths): + append(self.top * width) + if not last: + append(self.top_divider) + append(self.top_right) + return "".join(parts) + + def get_row( + self, + widths: Iterable[int], + level: Literal["head", "row", "foot", "mid"] = "row", + edge: bool = True, + ) -> str: + """Get the top of a simple box. + + Args: + width (List[int]): Widths of columns. + + Returns: + str: A string of box characters. + """ + if level == "head": + left = self.head_row_left + horizontal = self.head_row_horizontal + cross = self.head_row_cross + right = self.head_row_right + elif level == "row": + left = self.row_left + horizontal = self.row_horizontal + cross = self.row_cross + right = self.row_right + elif level == "mid": + left = self.mid_left + horizontal = " " + cross = self.mid_vertical + right = self.mid_right + elif level == "foot": + left = self.foot_row_left + horizontal = self.foot_row_horizontal + cross = self.foot_row_cross + right = self.foot_row_right + else: + raise ValueError("level must be 'head', 'row' or 'foot'") + + parts: List[str] = [] + append = parts.append + if edge: + append(left) + for last, width in loop_last(widths): + append(horizontal * width) + if not last: + append(cross) + if edge: + append(right) + return "".join(parts) + + def get_bottom(self, widths: Iterable[int]) -> str: + """Get the bottom of a simple box. + + Args: + widths (List[int]): Widths of columns. + + Returns: + str: A string of box characters. + """ + + parts: List[str] = [] + append = parts.append + append(self.bottom_left) + for last, width in loop_last(widths): + append(self.bottom * width) + if not last: + append(self.bottom_divider) + append(self.bottom_right) + return "".join(parts) + + +# fmt: off +ASCII: Box = Box( + "+--+\n" + "| ||\n" + "|-+|\n" + "| ||\n" + "|-+|\n" + "|-+|\n" + "| ||\n" + "+--+\n", + ascii=True, +) + +ASCII2: Box = Box( + "+-++\n" + "| ||\n" + "+-++\n" + "| ||\n" + "+-++\n" + "+-++\n" + "| ||\n" + "+-++\n", + ascii=True, +) + +ASCII_DOUBLE_HEAD: Box = Box( + "+-++\n" + "| ||\n" + "+=++\n" + "| ||\n" + "+-++\n" + "+-++\n" + "| ||\n" + "+-++\n", + ascii=True, +) + +SQUARE: Box = Box( + "┌─┬┐\n" + "│ ││\n" + "├─┼┤\n" + "│ ││\n" + "├─┼┤\n" + "├─┼┤\n" + "│ ││\n" + "└─┴┘\n" +) + +SQUARE_DOUBLE_HEAD: Box = Box( + "┌─┬┐\n" + "│ ││\n" + "╞═╪╡\n" + "│ ││\n" + "├─┼┤\n" + "├─┼┤\n" + "│ ││\n" + "└─┴┘\n" +) + +MINIMAL: Box = Box( + " ╷ \n" + " │ \n" + "╶─┼╴\n" + " │ \n" + "╶─┼╴\n" + "╶─┼╴\n" + " │ \n" + " ╵ \n" +) + + +MINIMAL_HEAVY_HEAD: Box = Box( + " ╷ \n" + " │ \n" + "╺━┿╸\n" + " │ \n" + "╶─┼╴\n" + "╶─┼╴\n" + " │ \n" + " ╵ \n" +) + +MINIMAL_DOUBLE_HEAD: Box = Box( + " ╷ \n" + " │ \n" + " ═╪ \n" + " │ \n" + " ─┼ \n" + " ─┼ \n" + " │ \n" + " ╵ \n" +) + + +SIMPLE: Box = Box( + " \n" + " \n" + " ── \n" + " \n" + " \n" + " ── \n" + " \n" + " \n" +) + +SIMPLE_HEAD: Box = Box( + " \n" + " \n" + " ── \n" + " \n" + " \n" + " \n" + " \n" + " \n" +) + + +SIMPLE_HEAVY: Box = Box( + " \n" + " \n" + " ━━ \n" + " \n" + " \n" + " ━━ \n" + " \n" + " \n" +) + + +HORIZONTALS: Box = Box( + " ── \n" + " \n" + " ── \n" + " \n" + " ── \n" + " ── \n" + " \n" + " ── \n" +) + +ROUNDED: Box = Box( + "╭─┬╮\n" + "│ ││\n" + "├─┼┤\n" + "│ ││\n" + "├─┼┤\n" + "├─┼┤\n" + "│ ││\n" + "╰─┴╯\n" +) + +HEAVY: Box = Box( + "┏━┳┓\n" + "┃ ┃┃\n" + "┣━╋┫\n" + "┃ ┃┃\n" + "┣━╋┫\n" + "┣━╋┫\n" + "┃ ┃┃\n" + "┗━┻┛\n" +) + +HEAVY_EDGE: Box = Box( + "┏━┯┓\n" + "┃ │┃\n" + "┠─┼┨\n" + "┃ │┃\n" + "┠─┼┨\n" + "┠─┼┨\n" + "┃ │┃\n" + "┗━┷┛\n" +) + +HEAVY_HEAD: Box = Box( + "┏━┳┓\n" + "┃ ┃┃\n" + "┡━╇┩\n" + "│ ││\n" + "├─┼┤\n" + "├─┼┤\n" + "│ ││\n" + "└─┴┘\n" +) + +DOUBLE: Box = Box( + "╔═╦╗\n" + "║ ║║\n" + "╠═╬╣\n" + "║ ║║\n" + "╠═╬╣\n" + "╠═╬╣\n" + "║ ║║\n" + "╚═╩╝\n" +) + +DOUBLE_EDGE: Box = Box( + "╔═╤╗\n" + "║ │║\n" + "╟─┼╢\n" + "║ │║\n" + "╟─┼╢\n" + "╟─┼╢\n" + "║ │║\n" + "╚═╧╝\n" +) + +MARKDOWN: Box = Box( + " \n" + "| ||\n" + "|-||\n" + "| ||\n" + "|-||\n" + "|-||\n" + "| ||\n" + " \n", + ascii=True, +) +# fmt: on + +# Map Boxes that don't render with raster fonts on to equivalent that do +LEGACY_WINDOWS_SUBSTITUTIONS = { + ROUNDED: SQUARE, + MINIMAL_HEAVY_HEAD: MINIMAL, + SIMPLE_HEAVY: SIMPLE, + HEAVY: SQUARE, + HEAVY_EDGE: SQUARE, + HEAVY_HEAD: SQUARE, +} + +# Map headed boxes to their headerless equivalents +PLAIN_HEADED_SUBSTITUTIONS = { + HEAVY_HEAD: SQUARE, + SQUARE_DOUBLE_HEAD: SQUARE, + MINIMAL_DOUBLE_HEAD: MINIMAL, + MINIMAL_HEAVY_HEAD: MINIMAL, + ASCII_DOUBLE_HEAD: ASCII2, +} + + +if __name__ == "__main__": # pragma: no cover + from pip._vendor.rich.columns import Columns + from pip._vendor.rich.panel import Panel + + from . import box as box + from .console import Console + from .table import Table + from .text import Text + + console = Console(record=True) + + BOXES = [ + "ASCII", + "ASCII2", + "ASCII_DOUBLE_HEAD", + "SQUARE", + "SQUARE_DOUBLE_HEAD", + "MINIMAL", + "MINIMAL_HEAVY_HEAD", + "MINIMAL_DOUBLE_HEAD", + "SIMPLE", + "SIMPLE_HEAD", + "SIMPLE_HEAVY", + "HORIZONTALS", + "ROUNDED", + "HEAVY", + "HEAVY_EDGE", + "HEAVY_HEAD", + "DOUBLE", + "DOUBLE_EDGE", + "MARKDOWN", + ] + + console.print(Panel("[bold green]Box Constants", style="green"), justify="center") + console.print() + + columns = Columns(expand=True, padding=2) + for box_name in sorted(BOXES): + table = Table( + show_footer=True, style="dim", border_style="not dim", expand=True + ) + table.add_column("Header 1", "Footer 1") + table.add_column("Header 2", "Footer 2") + table.add_row("Cell", "Cell") + table.add_row("Cell", "Cell") + table.box = getattr(box, box_name) + table.title = Text(f"box.{box_name}", style="magenta") + columns.add_renderable(table) + console.print(columns) + + # console.save_svg("box.svg") diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/cells.py b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/cells.py new file mode 100644 index 0000000..a854622 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/cells.py @@ -0,0 +1,174 @@ +from __future__ import annotations + +from functools import lru_cache +from typing import Callable + +from ._cell_widths import CELL_WIDTHS + +# Ranges of unicode ordinals that produce a 1-cell wide character +# This is non-exhaustive, but covers most common Western characters +_SINGLE_CELL_UNICODE_RANGES: list[tuple[int, int]] = [ + (0x20, 0x7E), # Latin (excluding non-printable) + (0xA0, 0xAC), + (0xAE, 0x002FF), + (0x00370, 0x00482), # Greek / Cyrillic + (0x02500, 0x025FC), # Box drawing, box elements, geometric shapes + (0x02800, 0x028FF), # Braille +] + +# A set of characters that are a single cell wide +_SINGLE_CELLS = frozenset( + [ + character + for _start, _end in _SINGLE_CELL_UNICODE_RANGES + for character in map(chr, range(_start, _end + 1)) + ] +) + +# When called with a string this will return True if all +# characters are single-cell, otherwise False +_is_single_cell_widths: Callable[[str], bool] = _SINGLE_CELLS.issuperset + + +@lru_cache(4096) +def cached_cell_len(text: str) -> int: + """Get the number of cells required to display text. + + This method always caches, which may use up a lot of memory. It is recommended to use + `cell_len` over this method. + + Args: + text (str): Text to display. + + Returns: + int: Get the number of cells required to display text. + """ + if _is_single_cell_widths(text): + return len(text) + return sum(map(get_character_cell_size, text)) + + +def cell_len(text: str, _cell_len: Callable[[str], int] = cached_cell_len) -> int: + """Get the number of cells required to display text. + + Args: + text (str): Text to display. + + Returns: + int: Get the number of cells required to display text. + """ + if len(text) < 512: + return _cell_len(text) + if _is_single_cell_widths(text): + return len(text) + return sum(map(get_character_cell_size, text)) + + +@lru_cache(maxsize=4096) +def get_character_cell_size(character: str) -> int: + """Get the cell size of a character. + + Args: + character (str): A single character. + + Returns: + int: Number of cells (0, 1 or 2) occupied by that character. + """ + codepoint = ord(character) + _table = CELL_WIDTHS + lower_bound = 0 + upper_bound = len(_table) - 1 + index = (lower_bound + upper_bound) // 2 + while True: + start, end, width = _table[index] + if codepoint < start: + upper_bound = index - 1 + elif codepoint > end: + lower_bound = index + 1 + else: + return 0 if width == -1 else width + if upper_bound < lower_bound: + break + index = (lower_bound + upper_bound) // 2 + return 1 + + +def set_cell_size(text: str, total: int) -> str: + """Set the length of a string to fit within given number of cells.""" + + if _is_single_cell_widths(text): + size = len(text) + if size < total: + return text + " " * (total - size) + return text[:total] + + if total <= 0: + return "" + cell_size = cell_len(text) + if cell_size == total: + return text + if cell_size < total: + return text + " " * (total - cell_size) + + start = 0 + end = len(text) + + # Binary search until we find the right size + while True: + pos = (start + end) // 2 + before = text[: pos + 1] + before_len = cell_len(before) + if before_len == total + 1 and cell_len(before[-1]) == 2: + return before[:-1] + " " + if before_len == total: + return before + if before_len > total: + end = pos + else: + start = pos + + +def chop_cells( + text: str, + width: int, +) -> list[str]: + """Split text into lines such that each line fits within the available (cell) width. + + Args: + text: The text to fold such that it fits in the given width. + width: The width available (number of cells). + + Returns: + A list of strings such that each string in the list has cell width + less than or equal to the available width. + """ + _get_character_cell_size = get_character_cell_size + lines: list[list[str]] = [[]] + + append_new_line = lines.append + append_to_last_line = lines[-1].append + + total_width = 0 + + for character in text: + cell_width = _get_character_cell_size(character) + char_doesnt_fit = total_width + cell_width > width + + if char_doesnt_fit: + append_new_line([character]) + append_to_last_line = lines[-1].append + total_width = cell_width + else: + append_to_last_line(character) + total_width += cell_width + + return ["".join(line) for line in lines] + + +if __name__ == "__main__": # pragma: no cover + print(get_character_cell_size("😽")) + for line in chop_cells("""这是对亚洲语言支持的测试。面对模棱两可的想法,拒绝猜测的诱惑。""", 8): + print(line) + for n in range(80, 1, -1): + print(set_cell_size("""这是对亚洲语言支持的测试。面对模棱两可的想法,拒绝猜测的诱惑。""", n) + "|") + print("x" * n) diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/color.py b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/color.py new file mode 100644 index 0000000..e2c23a6 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/color.py @@ -0,0 +1,621 @@ +import re +import sys +from colorsys import rgb_to_hls +from enum import IntEnum +from functools import lru_cache +from typing import TYPE_CHECKING, NamedTuple, Optional, Tuple + +from ._palettes import EIGHT_BIT_PALETTE, STANDARD_PALETTE, WINDOWS_PALETTE +from .color_triplet import ColorTriplet +from .repr import Result, rich_repr +from .terminal_theme import DEFAULT_TERMINAL_THEME + +if TYPE_CHECKING: # pragma: no cover + from .terminal_theme import TerminalTheme + from .text import Text + + +WINDOWS = sys.platform == "win32" + + +class ColorSystem(IntEnum): + """One of the 3 color system supported by terminals.""" + + STANDARD = 1 + EIGHT_BIT = 2 + TRUECOLOR = 3 + WINDOWS = 4 + + def __repr__(self) -> str: + return f"ColorSystem.{self.name}" + + def __str__(self) -> str: + return repr(self) + + +class ColorType(IntEnum): + """Type of color stored in Color class.""" + + DEFAULT = 0 + STANDARD = 1 + EIGHT_BIT = 2 + TRUECOLOR = 3 + WINDOWS = 4 + + def __repr__(self) -> str: + return f"ColorType.{self.name}" + + +ANSI_COLOR_NAMES = { + "black": 0, + "red": 1, + "green": 2, + "yellow": 3, + "blue": 4, + "magenta": 5, + "cyan": 6, + "white": 7, + "bright_black": 8, + "bright_red": 9, + "bright_green": 10, + "bright_yellow": 11, + "bright_blue": 12, + "bright_magenta": 13, + "bright_cyan": 14, + "bright_white": 15, + "grey0": 16, + "gray0": 16, + "navy_blue": 17, + "dark_blue": 18, + "blue3": 20, + "blue1": 21, + "dark_green": 22, + "deep_sky_blue4": 25, + "dodger_blue3": 26, + "dodger_blue2": 27, + "green4": 28, + "spring_green4": 29, + "turquoise4": 30, + "deep_sky_blue3": 32, + "dodger_blue1": 33, + "green3": 40, + "spring_green3": 41, + "dark_cyan": 36, + "light_sea_green": 37, + "deep_sky_blue2": 38, + "deep_sky_blue1": 39, + "spring_green2": 47, + "cyan3": 43, + "dark_turquoise": 44, + "turquoise2": 45, + "green1": 46, + "spring_green1": 48, + "medium_spring_green": 49, + "cyan2": 50, + "cyan1": 51, + "dark_red": 88, + "deep_pink4": 125, + "purple4": 55, + "purple3": 56, + "blue_violet": 57, + "orange4": 94, + "grey37": 59, + "gray37": 59, + "medium_purple4": 60, + "slate_blue3": 62, + "royal_blue1": 63, + "chartreuse4": 64, + "dark_sea_green4": 71, + "pale_turquoise4": 66, + "steel_blue": 67, + "steel_blue3": 68, + "cornflower_blue": 69, + "chartreuse3": 76, + "cadet_blue": 73, + "sky_blue3": 74, + "steel_blue1": 81, + "pale_green3": 114, + "sea_green3": 78, + "aquamarine3": 79, + "medium_turquoise": 80, + "chartreuse2": 112, + "sea_green2": 83, + "sea_green1": 85, + "aquamarine1": 122, + "dark_slate_gray2": 87, + "dark_magenta": 91, + "dark_violet": 128, + "purple": 129, + "light_pink4": 95, + "plum4": 96, + "medium_purple3": 98, + "slate_blue1": 99, + "yellow4": 106, + "wheat4": 101, + "grey53": 102, + "gray53": 102, + "light_slate_grey": 103, + "light_slate_gray": 103, + "medium_purple": 104, + "light_slate_blue": 105, + "dark_olive_green3": 149, + "dark_sea_green": 108, + "light_sky_blue3": 110, + "sky_blue2": 111, + "dark_sea_green3": 150, + "dark_slate_gray3": 116, + "sky_blue1": 117, + "chartreuse1": 118, + "light_green": 120, + "pale_green1": 156, + "dark_slate_gray1": 123, + "red3": 160, + "medium_violet_red": 126, + "magenta3": 164, + "dark_orange3": 166, + "indian_red": 167, + "hot_pink3": 168, + "medium_orchid3": 133, + "medium_orchid": 134, + "medium_purple2": 140, + "dark_goldenrod": 136, + "light_salmon3": 173, + "rosy_brown": 138, + "grey63": 139, + "gray63": 139, + "medium_purple1": 141, + "gold3": 178, + "dark_khaki": 143, + "navajo_white3": 144, + "grey69": 145, + "gray69": 145, + "light_steel_blue3": 146, + "light_steel_blue": 147, + "yellow3": 184, + "dark_sea_green2": 157, + "light_cyan3": 152, + "light_sky_blue1": 153, + "green_yellow": 154, + "dark_olive_green2": 155, + "dark_sea_green1": 193, + "pale_turquoise1": 159, + "deep_pink3": 162, + "magenta2": 200, + "hot_pink2": 169, + "orchid": 170, + "medium_orchid1": 207, + "orange3": 172, + "light_pink3": 174, + "pink3": 175, + "plum3": 176, + "violet": 177, + "light_goldenrod3": 179, + "tan": 180, + "misty_rose3": 181, + "thistle3": 182, + "plum2": 183, + "khaki3": 185, + "light_goldenrod2": 222, + "light_yellow3": 187, + "grey84": 188, + "gray84": 188, + "light_steel_blue1": 189, + "yellow2": 190, + "dark_olive_green1": 192, + "honeydew2": 194, + "light_cyan1": 195, + "red1": 196, + "deep_pink2": 197, + "deep_pink1": 199, + "magenta1": 201, + "orange_red1": 202, + "indian_red1": 204, + "hot_pink": 206, + "dark_orange": 208, + "salmon1": 209, + "light_coral": 210, + "pale_violet_red1": 211, + "orchid2": 212, + "orchid1": 213, + "orange1": 214, + "sandy_brown": 215, + "light_salmon1": 216, + "light_pink1": 217, + "pink1": 218, + "plum1": 219, + "gold1": 220, + "navajo_white1": 223, + "misty_rose1": 224, + "thistle1": 225, + "yellow1": 226, + "light_goldenrod1": 227, + "khaki1": 228, + "wheat1": 229, + "cornsilk1": 230, + "grey100": 231, + "gray100": 231, + "grey3": 232, + "gray3": 232, + "grey7": 233, + "gray7": 233, + "grey11": 234, + "gray11": 234, + "grey15": 235, + "gray15": 235, + "grey19": 236, + "gray19": 236, + "grey23": 237, + "gray23": 237, + "grey27": 238, + "gray27": 238, + "grey30": 239, + "gray30": 239, + "grey35": 240, + "gray35": 240, + "grey39": 241, + "gray39": 241, + "grey42": 242, + "gray42": 242, + "grey46": 243, + "gray46": 243, + "grey50": 244, + "gray50": 244, + "grey54": 245, + "gray54": 245, + "grey58": 246, + "gray58": 246, + "grey62": 247, + "gray62": 247, + "grey66": 248, + "gray66": 248, + "grey70": 249, + "gray70": 249, + "grey74": 250, + "gray74": 250, + "grey78": 251, + "gray78": 251, + "grey82": 252, + "gray82": 252, + "grey85": 253, + "gray85": 253, + "grey89": 254, + "gray89": 254, + "grey93": 255, + "gray93": 255, +} + + +class ColorParseError(Exception): + """The color could not be parsed.""" + + +RE_COLOR = re.compile( + r"""^ +\#([0-9a-f]{6})$| +color\(([0-9]{1,3})\)$| +rgb\(([\d\s,]+)\)$ +""", + re.VERBOSE, +) + + +@rich_repr +class Color(NamedTuple): + """Terminal color definition.""" + + name: str + """The name of the color (typically the input to Color.parse).""" + type: ColorType + """The type of the color.""" + number: Optional[int] = None + """The color number, if a standard color, or None.""" + triplet: Optional[ColorTriplet] = None + """A triplet of color components, if an RGB color.""" + + def __rich__(self) -> "Text": + """Displays the actual color if Rich printed.""" + from .style import Style + from .text import Text + + return Text.assemble( + f"", + ) + + def __rich_repr__(self) -> Result: + yield self.name + yield self.type + yield "number", self.number, None + yield "triplet", self.triplet, None + + @property + def system(self) -> ColorSystem: + """Get the native color system for this color.""" + if self.type == ColorType.DEFAULT: + return ColorSystem.STANDARD + return ColorSystem(int(self.type)) + + @property + def is_system_defined(self) -> bool: + """Check if the color is ultimately defined by the system.""" + return self.system not in (ColorSystem.EIGHT_BIT, ColorSystem.TRUECOLOR) + + @property + def is_default(self) -> bool: + """Check if the color is a default color.""" + return self.type == ColorType.DEFAULT + + def get_truecolor( + self, theme: Optional["TerminalTheme"] = None, foreground: bool = True + ) -> ColorTriplet: + """Get an equivalent color triplet for this color. + + Args: + theme (TerminalTheme, optional): Optional terminal theme, or None to use default. Defaults to None. + foreground (bool, optional): True for a foreground color, or False for background. Defaults to True. + + Returns: + ColorTriplet: A color triplet containing RGB components. + """ + + if theme is None: + theme = DEFAULT_TERMINAL_THEME + if self.type == ColorType.TRUECOLOR: + assert self.triplet is not None + return self.triplet + elif self.type == ColorType.EIGHT_BIT: + assert self.number is not None + return EIGHT_BIT_PALETTE[self.number] + elif self.type == ColorType.STANDARD: + assert self.number is not None + return theme.ansi_colors[self.number] + elif self.type == ColorType.WINDOWS: + assert self.number is not None + return WINDOWS_PALETTE[self.number] + else: # self.type == ColorType.DEFAULT: + assert self.number is None + return theme.foreground_color if foreground else theme.background_color + + @classmethod + def from_ansi(cls, number: int) -> "Color": + """Create a Color number from it's 8-bit ansi number. + + Args: + number (int): A number between 0-255 inclusive. + + Returns: + Color: A new Color instance. + """ + return cls( + name=f"color({number})", + type=(ColorType.STANDARD if number < 16 else ColorType.EIGHT_BIT), + number=number, + ) + + @classmethod + def from_triplet(cls, triplet: "ColorTriplet") -> "Color": + """Create a truecolor RGB color from a triplet of values. + + Args: + triplet (ColorTriplet): A color triplet containing red, green and blue components. + + Returns: + Color: A new color object. + """ + return cls(name=triplet.hex, type=ColorType.TRUECOLOR, triplet=triplet) + + @classmethod + def from_rgb(cls, red: float, green: float, blue: float) -> "Color": + """Create a truecolor from three color components in the range(0->255). + + Args: + red (float): Red component in range 0-255. + green (float): Green component in range 0-255. + blue (float): Blue component in range 0-255. + + Returns: + Color: A new color object. + """ + return cls.from_triplet(ColorTriplet(int(red), int(green), int(blue))) + + @classmethod + def default(cls) -> "Color": + """Get a Color instance representing the default color. + + Returns: + Color: Default color. + """ + return cls(name="default", type=ColorType.DEFAULT) + + @classmethod + @lru_cache(maxsize=1024) + def parse(cls, color: str) -> "Color": + """Parse a color definition.""" + original_color = color + color = color.lower().strip() + + if color == "default": + return cls(color, type=ColorType.DEFAULT) + + color_number = ANSI_COLOR_NAMES.get(color) + if color_number is not None: + return cls( + color, + type=(ColorType.STANDARD if color_number < 16 else ColorType.EIGHT_BIT), + number=color_number, + ) + + color_match = RE_COLOR.match(color) + if color_match is None: + raise ColorParseError(f"{original_color!r} is not a valid color") + + color_24, color_8, color_rgb = color_match.groups() + if color_24: + triplet = ColorTriplet( + int(color_24[0:2], 16), int(color_24[2:4], 16), int(color_24[4:6], 16) + ) + return cls(color, ColorType.TRUECOLOR, triplet=triplet) + + elif color_8: + number = int(color_8) + if number > 255: + raise ColorParseError(f"color number must be <= 255 in {color!r}") + return cls( + color, + type=(ColorType.STANDARD if number < 16 else ColorType.EIGHT_BIT), + number=number, + ) + + else: # color_rgb: + components = color_rgb.split(",") + if len(components) != 3: + raise ColorParseError( + f"expected three components in {original_color!r}" + ) + red, green, blue = components + triplet = ColorTriplet(int(red), int(green), int(blue)) + if not all(component <= 255 for component in triplet): + raise ColorParseError( + f"color components must be <= 255 in {original_color!r}" + ) + return cls(color, ColorType.TRUECOLOR, triplet=triplet) + + @lru_cache(maxsize=1024) + def get_ansi_codes(self, foreground: bool = True) -> Tuple[str, ...]: + """Get the ANSI escape codes for this color.""" + _type = self.type + if _type == ColorType.DEFAULT: + return ("39" if foreground else "49",) + + elif _type == ColorType.WINDOWS: + number = self.number + assert number is not None + fore, back = (30, 40) if number < 8 else (82, 92) + return (str(fore + number if foreground else back + number),) + + elif _type == ColorType.STANDARD: + number = self.number + assert number is not None + fore, back = (30, 40) if number < 8 else (82, 92) + return (str(fore + number if foreground else back + number),) + + elif _type == ColorType.EIGHT_BIT: + assert self.number is not None + return ("38" if foreground else "48", "5", str(self.number)) + + else: # self.standard == ColorStandard.TRUECOLOR: + assert self.triplet is not None + red, green, blue = self.triplet + return ("38" if foreground else "48", "2", str(red), str(green), str(blue)) + + @lru_cache(maxsize=1024) + def downgrade(self, system: ColorSystem) -> "Color": + """Downgrade a color system to a system with fewer colors.""" + + if self.type in (ColorType.DEFAULT, system): + return self + # Convert to 8-bit color from truecolor color + if system == ColorSystem.EIGHT_BIT and self.system == ColorSystem.TRUECOLOR: + assert self.triplet is not None + _h, l, s = rgb_to_hls(*self.triplet.normalized) + # If saturation is under 15% assume it is grayscale + if s < 0.15: + gray = round(l * 25.0) + if gray == 0: + color_number = 16 + elif gray == 25: + color_number = 231 + else: + color_number = 231 + gray + return Color(self.name, ColorType.EIGHT_BIT, number=color_number) + + red, green, blue = self.triplet + six_red = red / 95 if red < 95 else 1 + (red - 95) / 40 + six_green = green / 95 if green < 95 else 1 + (green - 95) / 40 + six_blue = blue / 95 if blue < 95 else 1 + (blue - 95) / 40 + + color_number = ( + 16 + 36 * round(six_red) + 6 * round(six_green) + round(six_blue) + ) + return Color(self.name, ColorType.EIGHT_BIT, number=color_number) + + # Convert to standard from truecolor or 8-bit + elif system == ColorSystem.STANDARD: + if self.system == ColorSystem.TRUECOLOR: + assert self.triplet is not None + triplet = self.triplet + else: # self.system == ColorSystem.EIGHT_BIT + assert self.number is not None + triplet = ColorTriplet(*EIGHT_BIT_PALETTE[self.number]) + + color_number = STANDARD_PALETTE.match(triplet) + return Color(self.name, ColorType.STANDARD, number=color_number) + + elif system == ColorSystem.WINDOWS: + if self.system == ColorSystem.TRUECOLOR: + assert self.triplet is not None + triplet = self.triplet + else: # self.system == ColorSystem.EIGHT_BIT + assert self.number is not None + if self.number < 16: + return Color(self.name, ColorType.WINDOWS, number=self.number) + triplet = ColorTriplet(*EIGHT_BIT_PALETTE[self.number]) + + color_number = WINDOWS_PALETTE.match(triplet) + return Color(self.name, ColorType.WINDOWS, number=color_number) + + return self + + +def parse_rgb_hex(hex_color: str) -> ColorTriplet: + """Parse six hex characters in to RGB triplet.""" + assert len(hex_color) == 6, "must be 6 characters" + color = ColorTriplet( + int(hex_color[0:2], 16), int(hex_color[2:4], 16), int(hex_color[4:6], 16) + ) + return color + + +def blend_rgb( + color1: ColorTriplet, color2: ColorTriplet, cross_fade: float = 0.5 +) -> ColorTriplet: + """Blend one RGB color in to another.""" + r1, g1, b1 = color1 + r2, g2, b2 = color2 + new_color = ColorTriplet( + int(r1 + (r2 - r1) * cross_fade), + int(g1 + (g2 - g1) * cross_fade), + int(b1 + (b2 - b1) * cross_fade), + ) + return new_color + + +if __name__ == "__main__": # pragma: no cover + from .console import Console + from .table import Table + from .text import Text + + console = Console() + + table = Table(show_footer=False, show_edge=True) + table.add_column("Color", width=10, overflow="ellipsis") + table.add_column("Number", justify="right", style="yellow") + table.add_column("Name", style="green") + table.add_column("Hex", style="blue") + table.add_column("RGB", style="magenta") + + colors = sorted((v, k) for k, v in ANSI_COLOR_NAMES.items()) + for color_number, name in colors: + if "grey" in name: + continue + color_cell = Text(" " * 10, style=f"on {name}") + if color_number < 16: + table.add_row(color_cell, f"{color_number}", Text(f'"{name}"')) + else: + color = EIGHT_BIT_PALETTE[color_number] # type: ignore[has-type] + table.add_row( + color_cell, str(color_number), Text(f'"{name}"'), color.hex, color.rgb + ) + + console.print(table) diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/color_triplet.py b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/color_triplet.py new file mode 100644 index 0000000..02cab32 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/color_triplet.py @@ -0,0 +1,38 @@ +from typing import NamedTuple, Tuple + + +class ColorTriplet(NamedTuple): + """The red, green, and blue components of a color.""" + + red: int + """Red component in 0 to 255 range.""" + green: int + """Green component in 0 to 255 range.""" + blue: int + """Blue component in 0 to 255 range.""" + + @property + def hex(self) -> str: + """get the color triplet in CSS style.""" + red, green, blue = self + return f"#{red:02x}{green:02x}{blue:02x}" + + @property + def rgb(self) -> str: + """The color in RGB format. + + Returns: + str: An rgb color, e.g. ``"rgb(100,23,255)"``. + """ + red, green, blue = self + return f"rgb({red},{green},{blue})" + + @property + def normalized(self) -> Tuple[float, float, float]: + """Convert components into floats between 0 and 1. + + Returns: + Tuple[float, float, float]: A tuple of three normalized colour components. + """ + red, green, blue = self + return red / 255.0, green / 255.0, blue / 255.0 diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/columns.py b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/columns.py new file mode 100644 index 0000000..669a3a7 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/columns.py @@ -0,0 +1,187 @@ +from collections import defaultdict +from itertools import chain +from operator import itemgetter +from typing import Dict, Iterable, List, Optional, Tuple + +from .align import Align, AlignMethod +from .console import Console, ConsoleOptions, RenderableType, RenderResult +from .constrain import Constrain +from .measure import Measurement +from .padding import Padding, PaddingDimensions +from .table import Table +from .text import TextType +from .jupyter import JupyterMixin + + +class Columns(JupyterMixin): + """Display renderables in neat columns. + + Args: + renderables (Iterable[RenderableType]): Any number of Rich renderables (including str). + width (int, optional): The desired width of the columns, or None to auto detect. Defaults to None. + padding (PaddingDimensions, optional): Optional padding around cells. Defaults to (0, 1). + expand (bool, optional): Expand columns to full width. Defaults to False. + equal (bool, optional): Arrange in to equal sized columns. Defaults to False. + column_first (bool, optional): Align items from top to bottom (rather than left to right). Defaults to False. + right_to_left (bool, optional): Start column from right hand side. Defaults to False. + align (str, optional): Align value ("left", "right", or "center") or None for default. Defaults to None. + title (TextType, optional): Optional title for Columns. + """ + + def __init__( + self, + renderables: Optional[Iterable[RenderableType]] = None, + padding: PaddingDimensions = (0, 1), + *, + width: Optional[int] = None, + expand: bool = False, + equal: bool = False, + column_first: bool = False, + right_to_left: bool = False, + align: Optional[AlignMethod] = None, + title: Optional[TextType] = None, + ) -> None: + self.renderables = list(renderables or []) + self.width = width + self.padding = padding + self.expand = expand + self.equal = equal + self.column_first = column_first + self.right_to_left = right_to_left + self.align: Optional[AlignMethod] = align + self.title = title + + def add_renderable(self, renderable: RenderableType) -> None: + """Add a renderable to the columns. + + Args: + renderable (RenderableType): Any renderable object. + """ + self.renderables.append(renderable) + + def __rich_console__( + self, console: Console, options: ConsoleOptions + ) -> RenderResult: + render_str = console.render_str + renderables = [ + render_str(renderable) if isinstance(renderable, str) else renderable + for renderable in self.renderables + ] + if not renderables: + return + _top, right, _bottom, left = Padding.unpack(self.padding) + width_padding = max(left, right) + max_width = options.max_width + widths: Dict[int, int] = defaultdict(int) + column_count = len(renderables) + + get_measurement = Measurement.get + renderable_widths = [ + get_measurement(console, options, renderable).maximum + for renderable in renderables + ] + if self.equal: + renderable_widths = [max(renderable_widths)] * len(renderable_widths) + + def iter_renderables( + column_count: int, + ) -> Iterable[Tuple[int, Optional[RenderableType]]]: + item_count = len(renderables) + if self.column_first: + width_renderables = list(zip(renderable_widths, renderables)) + + column_lengths: List[int] = [item_count // column_count] * column_count + for col_no in range(item_count % column_count): + column_lengths[col_no] += 1 + + row_count = (item_count + column_count - 1) // column_count + cells = [[-1] * column_count for _ in range(row_count)] + row = col = 0 + for index in range(item_count): + cells[row][col] = index + column_lengths[col] -= 1 + if column_lengths[col]: + row += 1 + else: + col += 1 + row = 0 + for index in chain.from_iterable(cells): + if index == -1: + break + yield width_renderables[index] + else: + yield from zip(renderable_widths, renderables) + # Pad odd elements with spaces + if item_count % column_count: + for _ in range(column_count - (item_count % column_count)): + yield 0, None + + table = Table.grid(padding=self.padding, collapse_padding=True, pad_edge=False) + table.expand = self.expand + table.title = self.title + + if self.width is not None: + column_count = (max_width) // (self.width + width_padding) + for _ in range(column_count): + table.add_column(width=self.width) + else: + while column_count > 1: + widths.clear() + column_no = 0 + for renderable_width, _ in iter_renderables(column_count): + widths[column_no] = max(widths[column_no], renderable_width) + total_width = sum(widths.values()) + width_padding * ( + len(widths) - 1 + ) + if total_width > max_width: + column_count = len(widths) - 1 + break + else: + column_no = (column_no + 1) % column_count + else: + break + + get_renderable = itemgetter(1) + _renderables = [ + get_renderable(_renderable) + for _renderable in iter_renderables(column_count) + ] + if self.equal: + _renderables = [ + None + if renderable is None + else Constrain(renderable, renderable_widths[0]) + for renderable in _renderables + ] + if self.align: + align = self.align + _Align = Align + _renderables = [ + None if renderable is None else _Align(renderable, align) + for renderable in _renderables + ] + + right_to_left = self.right_to_left + add_row = table.add_row + for start in range(0, len(_renderables), column_count): + row = _renderables[start : start + column_count] + if right_to_left: + row = row[::-1] + add_row(*row) + yield table + + +if __name__ == "__main__": # pragma: no cover + import os + + console = Console() + + files = [f"{i} {s}" for i, s in enumerate(sorted(os.listdir()))] + columns = Columns(files, padding=(0, 1), expand=False, equal=False) + console.print(columns) + console.rule() + columns.column_first = True + console.print(columns) + columns.right_to_left = True + console.rule() + console.print(columns) diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/console.py b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/console.py new file mode 100644 index 0000000..db2ba55 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/console.py @@ -0,0 +1,2680 @@ +import inspect +import os +import sys +import threading +import zlib +from abc import ABC, abstractmethod +from dataclasses import dataclass, field +from datetime import datetime +from functools import wraps +from getpass import getpass +from html import escape +from inspect import isclass +from itertools import islice +from math import ceil +from time import monotonic +from types import FrameType, ModuleType, TracebackType +from typing import ( + IO, + TYPE_CHECKING, + Any, + Callable, + Dict, + Iterable, + List, + Literal, + Mapping, + NamedTuple, + Optional, + Protocol, + TextIO, + Tuple, + Type, + Union, + cast, + runtime_checkable, +) + +from pip._vendor.rich._null_file import NULL_FILE + +from . import errors, themes +from ._emoji_replace import _emoji_replace +from ._export_format import CONSOLE_HTML_FORMAT, CONSOLE_SVG_FORMAT +from ._fileno import get_fileno +from ._log_render import FormatTimeCallable, LogRender +from .align import Align, AlignMethod +from .color import ColorSystem, blend_rgb +from .control import Control +from .emoji import EmojiVariant +from .highlighter import NullHighlighter, ReprHighlighter +from .markup import render as render_markup +from .measure import Measurement, measure_renderables +from .pager import Pager, SystemPager +from .pretty import Pretty, is_expandable +from .protocol import rich_cast +from .region import Region +from .scope import render_scope +from .screen import Screen +from .segment import Segment +from .style import Style, StyleType +from .styled import Styled +from .terminal_theme import DEFAULT_TERMINAL_THEME, SVG_EXPORT_THEME, TerminalTheme +from .text import Text, TextType +from .theme import Theme, ThemeStack + +if TYPE_CHECKING: + from ._windows import WindowsConsoleFeatures + from .live import Live + from .status import Status + +JUPYTER_DEFAULT_COLUMNS = 115 +JUPYTER_DEFAULT_LINES = 100 +WINDOWS = sys.platform == "win32" + +HighlighterType = Callable[[Union[str, "Text"]], "Text"] +JustifyMethod = Literal["default", "left", "center", "right", "full"] +OverflowMethod = Literal["fold", "crop", "ellipsis", "ignore"] + + +class NoChange: + pass + + +NO_CHANGE = NoChange() + +try: + _STDIN_FILENO = sys.__stdin__.fileno() # type: ignore[union-attr] +except Exception: + _STDIN_FILENO = 0 +try: + _STDOUT_FILENO = sys.__stdout__.fileno() # type: ignore[union-attr] +except Exception: + _STDOUT_FILENO = 1 +try: + _STDERR_FILENO = sys.__stderr__.fileno() # type: ignore[union-attr] +except Exception: + _STDERR_FILENO = 2 + +_STD_STREAMS = (_STDIN_FILENO, _STDOUT_FILENO, _STDERR_FILENO) +_STD_STREAMS_OUTPUT = (_STDOUT_FILENO, _STDERR_FILENO) + + +_TERM_COLORS = { + "kitty": ColorSystem.EIGHT_BIT, + "256color": ColorSystem.EIGHT_BIT, + "16color": ColorSystem.STANDARD, +} + + +class ConsoleDimensions(NamedTuple): + """Size of the terminal.""" + + width: int + """The width of the console in 'cells'.""" + height: int + """The height of the console in lines.""" + + +@dataclass +class ConsoleOptions: + """Options for __rich_console__ method.""" + + size: ConsoleDimensions + """Size of console.""" + legacy_windows: bool + """legacy_windows: flag for legacy windows.""" + min_width: int + """Minimum width of renderable.""" + max_width: int + """Maximum width of renderable.""" + is_terminal: bool + """True if the target is a terminal, otherwise False.""" + encoding: str + """Encoding of terminal.""" + max_height: int + """Height of container (starts as terminal)""" + justify: Optional[JustifyMethod] = None + """Justify value override for renderable.""" + overflow: Optional[OverflowMethod] = None + """Overflow value override for renderable.""" + no_wrap: Optional[bool] = False + """Disable wrapping for text.""" + highlight: Optional[bool] = None + """Highlight override for render_str.""" + markup: Optional[bool] = None + """Enable markup when rendering strings.""" + height: Optional[int] = None + + @property + def ascii_only(self) -> bool: + """Check if renderables should use ascii only.""" + return not self.encoding.startswith("utf") + + def copy(self) -> "ConsoleOptions": + """Return a copy of the options. + + Returns: + ConsoleOptions: a copy of self. + """ + options: ConsoleOptions = ConsoleOptions.__new__(ConsoleOptions) + options.__dict__ = self.__dict__.copy() + return options + + def update( + self, + *, + width: Union[int, NoChange] = NO_CHANGE, + min_width: Union[int, NoChange] = NO_CHANGE, + max_width: Union[int, NoChange] = NO_CHANGE, + justify: Union[Optional[JustifyMethod], NoChange] = NO_CHANGE, + overflow: Union[Optional[OverflowMethod], NoChange] = NO_CHANGE, + no_wrap: Union[Optional[bool], NoChange] = NO_CHANGE, + highlight: Union[Optional[bool], NoChange] = NO_CHANGE, + markup: Union[Optional[bool], NoChange] = NO_CHANGE, + height: Union[Optional[int], NoChange] = NO_CHANGE, + ) -> "ConsoleOptions": + """Update values, return a copy.""" + options = self.copy() + if not isinstance(width, NoChange): + options.min_width = options.max_width = max(0, width) + if not isinstance(min_width, NoChange): + options.min_width = min_width + if not isinstance(max_width, NoChange): + options.max_width = max_width + if not isinstance(justify, NoChange): + options.justify = justify + if not isinstance(overflow, NoChange): + options.overflow = overflow + if not isinstance(no_wrap, NoChange): + options.no_wrap = no_wrap + if not isinstance(highlight, NoChange): + options.highlight = highlight + if not isinstance(markup, NoChange): + options.markup = markup + if not isinstance(height, NoChange): + if height is not None: + options.max_height = height + options.height = None if height is None else max(0, height) + return options + + def update_width(self, width: int) -> "ConsoleOptions": + """Update just the width, return a copy. + + Args: + width (int): New width (sets both min_width and max_width) + + Returns: + ~ConsoleOptions: New console options instance. + """ + options = self.copy() + options.min_width = options.max_width = max(0, width) + return options + + def update_height(self, height: int) -> "ConsoleOptions": + """Update the height, and return a copy. + + Args: + height (int): New height + + Returns: + ~ConsoleOptions: New Console options instance. + """ + options = self.copy() + options.max_height = options.height = height + return options + + def reset_height(self) -> "ConsoleOptions": + """Return a copy of the options with height set to ``None``. + + Returns: + ~ConsoleOptions: New console options instance. + """ + options = self.copy() + options.height = None + return options + + def update_dimensions(self, width: int, height: int) -> "ConsoleOptions": + """Update the width and height, and return a copy. + + Args: + width (int): New width (sets both min_width and max_width). + height (int): New height. + + Returns: + ~ConsoleOptions: New console options instance. + """ + options = self.copy() + options.min_width = options.max_width = max(0, width) + options.height = options.max_height = height + return options + + +@runtime_checkable +class RichCast(Protocol): + """An object that may be 'cast' to a console renderable.""" + + def __rich__( + self, + ) -> Union["ConsoleRenderable", "RichCast", str]: # pragma: no cover + ... + + +@runtime_checkable +class ConsoleRenderable(Protocol): + """An object that supports the console protocol.""" + + def __rich_console__( + self, console: "Console", options: "ConsoleOptions" + ) -> "RenderResult": # pragma: no cover + ... + + +# A type that may be rendered by Console. +RenderableType = Union[ConsoleRenderable, RichCast, str] +"""A string or any object that may be rendered by Rich.""" + +# The result of calling a __rich_console__ method. +RenderResult = Iterable[Union[RenderableType, Segment]] + +_null_highlighter = NullHighlighter() + + +class CaptureError(Exception): + """An error in the Capture context manager.""" + + +class NewLine: + """A renderable to generate new line(s)""" + + def __init__(self, count: int = 1) -> None: + self.count = count + + def __rich_console__( + self, console: "Console", options: "ConsoleOptions" + ) -> Iterable[Segment]: + yield Segment("\n" * self.count) + + +class ScreenUpdate: + """Render a list of lines at a given offset.""" + + def __init__(self, lines: List[List[Segment]], x: int, y: int) -> None: + self._lines = lines + self.x = x + self.y = y + + def __rich_console__( + self, console: "Console", options: ConsoleOptions + ) -> RenderResult: + x = self.x + move_to = Control.move_to + for offset, line in enumerate(self._lines, self.y): + yield move_to(x, offset) + yield from line + + +class Capture: + """Context manager to capture the result of printing to the console. + See :meth:`~rich.console.Console.capture` for how to use. + + Args: + console (Console): A console instance to capture output. + """ + + def __init__(self, console: "Console") -> None: + self._console = console + self._result: Optional[str] = None + + def __enter__(self) -> "Capture": + self._console.begin_capture() + return self + + def __exit__( + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> None: + self._result = self._console.end_capture() + + def get(self) -> str: + """Get the result of the capture.""" + if self._result is None: + raise CaptureError( + "Capture result is not available until context manager exits." + ) + return self._result + + +class ThemeContext: + """A context manager to use a temporary theme. See :meth:`~rich.console.Console.use_theme` for usage.""" + + def __init__(self, console: "Console", theme: Theme, inherit: bool = True) -> None: + self.console = console + self.theme = theme + self.inherit = inherit + + def __enter__(self) -> "ThemeContext": + self.console.push_theme(self.theme) + return self + + def __exit__( + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> None: + self.console.pop_theme() + + +class PagerContext: + """A context manager that 'pages' content. See :meth:`~rich.console.Console.pager` for usage.""" + + def __init__( + self, + console: "Console", + pager: Optional[Pager] = None, + styles: bool = False, + links: bool = False, + ) -> None: + self._console = console + self.pager = SystemPager() if pager is None else pager + self.styles = styles + self.links = links + + def __enter__(self) -> "PagerContext": + self._console._enter_buffer() + return self + + def __exit__( + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> None: + if exc_type is None: + with self._console._lock: + buffer: List[Segment] = self._console._buffer[:] + del self._console._buffer[:] + segments: Iterable[Segment] = buffer + if not self.styles: + segments = Segment.strip_styles(segments) + elif not self.links: + segments = Segment.strip_links(segments) + content = self._console._render_buffer(segments) + self.pager.show(content) + self._console._exit_buffer() + + +class ScreenContext: + """A context manager that enables an alternative screen. See :meth:`~rich.console.Console.screen` for usage.""" + + def __init__( + self, console: "Console", hide_cursor: bool, style: StyleType = "" + ) -> None: + self.console = console + self.hide_cursor = hide_cursor + self.screen = Screen(style=style) + self._changed = False + + def update( + self, *renderables: RenderableType, style: Optional[StyleType] = None + ) -> None: + """Update the screen. + + Args: + renderable (RenderableType, optional): Optional renderable to replace current renderable, + or None for no change. Defaults to None. + style: (Style, optional): Replacement style, or None for no change. Defaults to None. + """ + if renderables: + self.screen.renderable = ( + Group(*renderables) if len(renderables) > 1 else renderables[0] + ) + if style is not None: + self.screen.style = style + self.console.print(self.screen, end="") + + def __enter__(self) -> "ScreenContext": + self._changed = self.console.set_alt_screen(True) + if self._changed and self.hide_cursor: + self.console.show_cursor(False) + return self + + def __exit__( + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> None: + if self._changed: + self.console.set_alt_screen(False) + if self.hide_cursor: + self.console.show_cursor(True) + + +class Group: + """Takes a group of renderables and returns a renderable object that renders the group. + + Args: + renderables (Iterable[RenderableType]): An iterable of renderable objects. + fit (bool, optional): Fit dimension of group to contents, or fill available space. Defaults to True. + """ + + def __init__(self, *renderables: "RenderableType", fit: bool = True) -> None: + self._renderables = renderables + self.fit = fit + self._render: Optional[List[RenderableType]] = None + + @property + def renderables(self) -> List["RenderableType"]: + if self._render is None: + self._render = list(self._renderables) + return self._render + + def __rich_measure__( + self, console: "Console", options: "ConsoleOptions" + ) -> "Measurement": + if self.fit: + return measure_renderables(console, options, self.renderables) + else: + return Measurement(options.max_width, options.max_width) + + def __rich_console__( + self, console: "Console", options: "ConsoleOptions" + ) -> RenderResult: + yield from self.renderables + + +def group(fit: bool = True) -> Callable[..., Callable[..., Group]]: + """A decorator that turns an iterable of renderables in to a group. + + Args: + fit (bool, optional): Fit dimension of group to contents, or fill available space. Defaults to True. + """ + + def decorator( + method: Callable[..., Iterable[RenderableType]], + ) -> Callable[..., Group]: + """Convert a method that returns an iterable of renderables in to a Group.""" + + @wraps(method) + def _replace(*args: Any, **kwargs: Any) -> Group: + renderables = method(*args, **kwargs) + return Group(*renderables, fit=fit) + + return _replace + + return decorator + + +def _is_jupyter() -> bool: # pragma: no cover + """Check if we're running in a Jupyter notebook.""" + try: + get_ipython # type: ignore[name-defined] + except NameError: + return False + ipython = get_ipython() # type: ignore[name-defined] + shell = ipython.__class__.__name__ + if ( + "google.colab" in str(ipython.__class__) + or os.getenv("DATABRICKS_RUNTIME_VERSION") + or shell == "ZMQInteractiveShell" + ): + return True # Jupyter notebook or qtconsole + elif shell == "TerminalInteractiveShell": + return False # Terminal running IPython + else: + return False # Other type (?) + + +COLOR_SYSTEMS = { + "standard": ColorSystem.STANDARD, + "256": ColorSystem.EIGHT_BIT, + "truecolor": ColorSystem.TRUECOLOR, + "windows": ColorSystem.WINDOWS, +} + +_COLOR_SYSTEMS_NAMES = {system: name for name, system in COLOR_SYSTEMS.items()} + + +@dataclass +class ConsoleThreadLocals(threading.local): + """Thread local values for Console context.""" + + theme_stack: ThemeStack + buffer: List[Segment] = field(default_factory=list) + buffer_index: int = 0 + + +class RenderHook(ABC): + """Provides hooks in to the render process.""" + + @abstractmethod + def process_renderables( + self, renderables: List[ConsoleRenderable] + ) -> List[ConsoleRenderable]: + """Called with a list of objects to render. + + This method can return a new list of renderables, or modify and return the same list. + + Args: + renderables (List[ConsoleRenderable]): A number of renderable objects. + + Returns: + List[ConsoleRenderable]: A replacement list of renderables. + """ + + +_windows_console_features: Optional["WindowsConsoleFeatures"] = None + + +def get_windows_console_features() -> "WindowsConsoleFeatures": # pragma: no cover + global _windows_console_features + if _windows_console_features is not None: + return _windows_console_features + from ._windows import get_windows_console_features + + _windows_console_features = get_windows_console_features() + return _windows_console_features + + +def detect_legacy_windows() -> bool: + """Detect legacy Windows.""" + return WINDOWS and not get_windows_console_features().vt + + +class Console: + """A high level console interface. + + Args: + color_system (str, optional): The color system supported by your terminal, + either ``"standard"``, ``"256"`` or ``"truecolor"``. Leave as ``"auto"`` to autodetect. + force_terminal (Optional[bool], optional): Enable/disable terminal control codes, or None to auto-detect terminal. Defaults to None. + force_jupyter (Optional[bool], optional): Enable/disable Jupyter rendering, or None to auto-detect Jupyter. Defaults to None. + force_interactive (Optional[bool], optional): Enable/disable interactive mode, or None to auto detect. Defaults to None. + soft_wrap (Optional[bool], optional): Set soft wrap default on print method. Defaults to False. + theme (Theme, optional): An optional style theme object, or ``None`` for default theme. + stderr (bool, optional): Use stderr rather than stdout if ``file`` is not specified. Defaults to False. + file (IO, optional): A file object where the console should write to. Defaults to stdout. + quiet (bool, Optional): Boolean to suppress all output. Defaults to False. + width (int, optional): The width of the terminal. Leave as default to auto-detect width. + height (int, optional): The height of the terminal. Leave as default to auto-detect height. + style (StyleType, optional): Style to apply to all output, or None for no style. Defaults to None. + no_color (Optional[bool], optional): Enabled no color mode, or None to auto detect. Defaults to None. + tab_size (int, optional): Number of spaces used to replace a tab character. Defaults to 8. + record (bool, optional): Boolean to enable recording of terminal output, + required to call :meth:`export_html`, :meth:`export_svg`, and :meth:`export_text`. Defaults to False. + markup (bool, optional): Boolean to enable :ref:`console_markup`. Defaults to True. + emoji (bool, optional): Enable emoji code. Defaults to True. + emoji_variant (str, optional): Optional emoji variant, either "text" or "emoji". Defaults to None. + highlight (bool, optional): Enable automatic highlighting. Defaults to True. + log_time (bool, optional): Boolean to enable logging of time by :meth:`log` methods. Defaults to True. + log_path (bool, optional): Boolean to enable the logging of the caller by :meth:`log`. Defaults to True. + log_time_format (Union[str, TimeFormatterCallable], optional): If ``log_time`` is enabled, either string for strftime or callable that formats the time. Defaults to "[%X] ". + highlighter (HighlighterType, optional): Default highlighter. + legacy_windows (bool, optional): Enable legacy Windows mode, or ``None`` to auto detect. Defaults to ``None``. + safe_box (bool, optional): Restrict box options that don't render on legacy Windows. + get_datetime (Callable[[], datetime], optional): Callable that gets the current time as a datetime.datetime object (used by Console.log), + or None for datetime.now. + get_time (Callable[[], time], optional): Callable that gets the current time in seconds, default uses time.monotonic. + """ + + _environ: Mapping[str, str] = os.environ + + def __init__( + self, + *, + color_system: Optional[ + Literal["auto", "standard", "256", "truecolor", "windows"] + ] = "auto", + force_terminal: Optional[bool] = None, + force_jupyter: Optional[bool] = None, + force_interactive: Optional[bool] = None, + soft_wrap: bool = False, + theme: Optional[Theme] = None, + stderr: bool = False, + file: Optional[IO[str]] = None, + quiet: bool = False, + width: Optional[int] = None, + height: Optional[int] = None, + style: Optional[StyleType] = None, + no_color: Optional[bool] = None, + tab_size: int = 8, + record: bool = False, + markup: bool = True, + emoji: bool = True, + emoji_variant: Optional[EmojiVariant] = None, + highlight: bool = True, + log_time: bool = True, + log_path: bool = True, + log_time_format: Union[str, FormatTimeCallable] = "[%X]", + highlighter: Optional["HighlighterType"] = ReprHighlighter(), + legacy_windows: Optional[bool] = None, + safe_box: bool = True, + get_datetime: Optional[Callable[[], datetime]] = None, + get_time: Optional[Callable[[], float]] = None, + _environ: Optional[Mapping[str, str]] = None, + ): + # Copy of os.environ allows us to replace it for testing + if _environ is not None: + self._environ = _environ + + self.is_jupyter = _is_jupyter() if force_jupyter is None else force_jupyter + if self.is_jupyter: + if width is None: + jupyter_columns = self._environ.get("JUPYTER_COLUMNS") + if jupyter_columns is not None and jupyter_columns.isdigit(): + width = int(jupyter_columns) + else: + width = JUPYTER_DEFAULT_COLUMNS + if height is None: + jupyter_lines = self._environ.get("JUPYTER_LINES") + if jupyter_lines is not None and jupyter_lines.isdigit(): + height = int(jupyter_lines) + else: + height = JUPYTER_DEFAULT_LINES + + self.tab_size = tab_size + self.record = record + self._markup = markup + self._emoji = emoji + self._emoji_variant: Optional[EmojiVariant] = emoji_variant + self._highlight = highlight + self.legacy_windows: bool = ( + (detect_legacy_windows() and not self.is_jupyter) + if legacy_windows is None + else legacy_windows + ) + + if width is None: + columns = self._environ.get("COLUMNS") + if columns is not None and columns.isdigit(): + width = int(columns) - self.legacy_windows + if height is None: + lines = self._environ.get("LINES") + if lines is not None and lines.isdigit(): + height = int(lines) + + self.soft_wrap = soft_wrap + self._width = width + self._height = height + + self._color_system: Optional[ColorSystem] + + self._force_terminal = None + if force_terminal is not None: + self._force_terminal = force_terminal + + self._file = file + self.quiet = quiet + self.stderr = stderr + + if color_system is None: + self._color_system = None + elif color_system == "auto": + self._color_system = self._detect_color_system() + else: + self._color_system = COLOR_SYSTEMS[color_system] + + self._lock = threading.RLock() + self._log_render = LogRender( + show_time=log_time, + show_path=log_path, + time_format=log_time_format, + ) + self.highlighter: HighlighterType = highlighter or _null_highlighter + self.safe_box = safe_box + self.get_datetime = get_datetime or datetime.now + self.get_time = get_time or monotonic + self.style = style + self.no_color = ( + no_color + if no_color is not None + else self._environ.get("NO_COLOR", "") != "" + ) + if force_interactive is None: + tty_interactive = self._environ.get("TTY_INTERACTIVE", None) + if tty_interactive is not None: + if tty_interactive == "0": + force_interactive = False + elif tty_interactive == "1": + force_interactive = True + + self.is_interactive = ( + (self.is_terminal and not self.is_dumb_terminal) + if force_interactive is None + else force_interactive + ) + + self._record_buffer_lock = threading.RLock() + self._thread_locals = ConsoleThreadLocals( + theme_stack=ThemeStack(themes.DEFAULT if theme is None else theme) + ) + self._record_buffer: List[Segment] = [] + self._render_hooks: List[RenderHook] = [] + self._live_stack: List[Live] = [] + self._is_alt_screen = False + + def __repr__(self) -> str: + return f"" + + @property + def file(self) -> IO[str]: + """Get the file object to write to.""" + file = self._file or (sys.stderr if self.stderr else sys.stdout) + file = getattr(file, "rich_proxied_file", file) + if file is None: + file = NULL_FILE + return file + + @file.setter + def file(self, new_file: IO[str]) -> None: + """Set a new file object.""" + self._file = new_file + + @property + def _buffer(self) -> List[Segment]: + """Get a thread local buffer.""" + return self._thread_locals.buffer + + @property + def _buffer_index(self) -> int: + """Get a thread local buffer.""" + return self._thread_locals.buffer_index + + @_buffer_index.setter + def _buffer_index(self, value: int) -> None: + self._thread_locals.buffer_index = value + + @property + def _theme_stack(self) -> ThemeStack: + """Get the thread local theme stack.""" + return self._thread_locals.theme_stack + + def _detect_color_system(self) -> Optional[ColorSystem]: + """Detect color system from env vars.""" + if self.is_jupyter: + return ColorSystem.TRUECOLOR + if not self.is_terminal or self.is_dumb_terminal: + return None + if WINDOWS: # pragma: no cover + if self.legacy_windows: # pragma: no cover + return ColorSystem.WINDOWS + windows_console_features = get_windows_console_features() + return ( + ColorSystem.TRUECOLOR + if windows_console_features.truecolor + else ColorSystem.EIGHT_BIT + ) + else: + color_term = self._environ.get("COLORTERM", "").strip().lower() + if color_term in ("truecolor", "24bit"): + return ColorSystem.TRUECOLOR + term = self._environ.get("TERM", "").strip().lower() + _term_name, _hyphen, colors = term.rpartition("-") + color_system = _TERM_COLORS.get(colors, ColorSystem.STANDARD) + return color_system + + def _enter_buffer(self) -> None: + """Enter in to a buffer context, and buffer all output.""" + self._buffer_index += 1 + + def _exit_buffer(self) -> None: + """Leave buffer context, and render content if required.""" + self._buffer_index -= 1 + self._check_buffer() + + def set_live(self, live: "Live") -> bool: + """Set Live instance. Used by Live context manager (no need to call directly). + + Args: + live (Live): Live instance using this Console. + + Returns: + Boolean that indicates if the live is the topmost of the stack. + + Raises: + errors.LiveError: If this Console has a Live context currently active. + """ + with self._lock: + self._live_stack.append(live) + return len(self._live_stack) == 1 + + def clear_live(self) -> None: + """Clear the Live instance. Used by the Live context manager (no need to call directly).""" + with self._lock: + self._live_stack.pop() + + def push_render_hook(self, hook: RenderHook) -> None: + """Add a new render hook to the stack. + + Args: + hook (RenderHook): Render hook instance. + """ + with self._lock: + self._render_hooks.append(hook) + + def pop_render_hook(self) -> None: + """Pop the last renderhook from the stack.""" + with self._lock: + self._render_hooks.pop() + + def __enter__(self) -> "Console": + """Own context manager to enter buffer context.""" + self._enter_buffer() + return self + + def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None: + """Exit buffer context.""" + self._exit_buffer() + + def begin_capture(self) -> None: + """Begin capturing console output. Call :meth:`end_capture` to exit capture mode and return output.""" + self._enter_buffer() + + def end_capture(self) -> str: + """End capture mode and return captured string. + + Returns: + str: Console output. + """ + render_result = self._render_buffer(self._buffer) + del self._buffer[:] + self._exit_buffer() + return render_result + + def push_theme(self, theme: Theme, *, inherit: bool = True) -> None: + """Push a new theme on to the top of the stack, replacing the styles from the previous theme. + Generally speaking, you should call :meth:`~rich.console.Console.use_theme` to get a context manager, rather + than calling this method directly. + + Args: + theme (Theme): A theme instance. + inherit (bool, optional): Inherit existing styles. Defaults to True. + """ + self._theme_stack.push_theme(theme, inherit=inherit) + + def pop_theme(self) -> None: + """Remove theme from top of stack, restoring previous theme.""" + self._theme_stack.pop_theme() + + def use_theme(self, theme: Theme, *, inherit: bool = True) -> ThemeContext: + """Use a different theme for the duration of the context manager. + + Args: + theme (Theme): Theme instance to user. + inherit (bool, optional): Inherit existing console styles. Defaults to True. + + Returns: + ThemeContext: [description] + """ + return ThemeContext(self, theme, inherit) + + @property + def color_system(self) -> Optional[str]: + """Get color system string. + + Returns: + Optional[str]: "standard", "256" or "truecolor". + """ + + if self._color_system is not None: + return _COLOR_SYSTEMS_NAMES[self._color_system] + else: + return None + + @property + def encoding(self) -> str: + """Get the encoding of the console file, e.g. ``"utf-8"``. + + Returns: + str: A standard encoding string. + """ + return (getattr(self.file, "encoding", "utf-8") or "utf-8").lower() + + @property + def is_terminal(self) -> bool: + """Check if the console is writing to a terminal. + + Returns: + bool: True if the console writing to a device capable of + understanding escape sequences, otherwise False. + """ + # If dev has explicitly set this value, return it + if self._force_terminal is not None: + return self._force_terminal + + # Fudge for Idle + if hasattr(sys.stdin, "__module__") and sys.stdin.__module__.startswith( + "idlelib" + ): + # Return False for Idle which claims to be a tty but can't handle ansi codes + return False + + if self.is_jupyter: + # return False for Jupyter, which may have FORCE_COLOR set + return False + + environ = self._environ + + tty_compatible = environ.get("TTY_COMPATIBLE", "") + # 0 indicates device is not tty compatible + if tty_compatible == "0": + return False + # 1 indicates device is tty compatible + if tty_compatible == "1": + return True + + # https://force-color.org/ + force_color = environ.get("FORCE_COLOR") + if force_color is not None: + return force_color != "" + + # Any other value defaults to auto detect + isatty: Optional[Callable[[], bool]] = getattr(self.file, "isatty", None) + try: + return False if isatty is None else isatty() + except ValueError: + # in some situation (at the end of a pytest run for example) isatty() can raise + # ValueError: I/O operation on closed file + # return False because we aren't in a terminal anymore + return False + + @property + def is_dumb_terminal(self) -> bool: + """Detect dumb terminal. + + Returns: + bool: True if writing to a dumb terminal, otherwise False. + + """ + _term = self._environ.get("TERM", "") + is_dumb = _term.lower() in ("dumb", "unknown") + return self.is_terminal and is_dumb + + @property + def options(self) -> ConsoleOptions: + """Get default console options.""" + size = self.size + return ConsoleOptions( + max_height=size.height, + size=size, + legacy_windows=self.legacy_windows, + min_width=1, + max_width=size.width, + encoding=self.encoding, + is_terminal=self.is_terminal, + ) + + @property + def size(self) -> ConsoleDimensions: + """Get the size of the console. + + Returns: + ConsoleDimensions: A named tuple containing the dimensions. + """ + + if self._width is not None and self._height is not None: + return ConsoleDimensions(self._width - self.legacy_windows, self._height) + + if self.is_dumb_terminal: + return ConsoleDimensions(80, 25) + + width: Optional[int] = None + height: Optional[int] = None + + streams = _STD_STREAMS_OUTPUT if WINDOWS else _STD_STREAMS + for file_descriptor in streams: + try: + width, height = os.get_terminal_size(file_descriptor) + except (AttributeError, ValueError, OSError): # Probably not a terminal + pass + else: + break + + columns = self._environ.get("COLUMNS") + if columns is not None and columns.isdigit(): + width = int(columns) + lines = self._environ.get("LINES") + if lines is not None and lines.isdigit(): + height = int(lines) + + # get_terminal_size can report 0, 0 if run from pseudo-terminal + width = width or 80 + height = height or 25 + return ConsoleDimensions( + width - self.legacy_windows if self._width is None else self._width, + height if self._height is None else self._height, + ) + + @size.setter + def size(self, new_size: Tuple[int, int]) -> None: + """Set a new size for the terminal. + + Args: + new_size (Tuple[int, int]): New width and height. + """ + width, height = new_size + self._width = width + self._height = height + + @property + def width(self) -> int: + """Get the width of the console. + + Returns: + int: The width (in characters) of the console. + """ + return self.size.width + + @width.setter + def width(self, width: int) -> None: + """Set width. + + Args: + width (int): New width. + """ + self._width = width + + @property + def height(self) -> int: + """Get the height of the console. + + Returns: + int: The height (in lines) of the console. + """ + return self.size.height + + @height.setter + def height(self, height: int) -> None: + """Set height. + + Args: + height (int): new height. + """ + self._height = height + + def bell(self) -> None: + """Play a 'bell' sound (if supported by the terminal).""" + self.control(Control.bell()) + + def capture(self) -> Capture: + """A context manager to *capture* the result of print() or log() in a string, + rather than writing it to the console. + + Example: + >>> from rich.console import Console + >>> console = Console() + >>> with console.capture() as capture: + ... console.print("[bold magenta]Hello World[/]") + >>> print(capture.get()) + + Returns: + Capture: Context manager with disables writing to the terminal. + """ + capture = Capture(self) + return capture + + def pager( + self, pager: Optional[Pager] = None, styles: bool = False, links: bool = False + ) -> PagerContext: + """A context manager to display anything printed within a "pager". The pager application + is defined by the system and will typically support at least pressing a key to scroll. + + Args: + pager (Pager, optional): A pager object, or None to use :class:`~rich.pager.SystemPager`. Defaults to None. + styles (bool, optional): Show styles in pager. Defaults to False. + links (bool, optional): Show links in pager. Defaults to False. + + Example: + >>> from rich.console import Console + >>> from rich.__main__ import make_test_card + >>> console = Console() + >>> with console.pager(): + console.print(make_test_card()) + + Returns: + PagerContext: A context manager. + """ + return PagerContext(self, pager=pager, styles=styles, links=links) + + def line(self, count: int = 1) -> None: + """Write new line(s). + + Args: + count (int, optional): Number of new lines. Defaults to 1. + """ + + assert count >= 0, "count must be >= 0" + self.print(NewLine(count)) + + def clear(self, home: bool = True) -> None: + """Clear the screen. + + Args: + home (bool, optional): Also move the cursor to 'home' position. Defaults to True. + """ + if home: + self.control(Control.clear(), Control.home()) + else: + self.control(Control.clear()) + + def status( + self, + status: RenderableType, + *, + spinner: str = "dots", + spinner_style: StyleType = "status.spinner", + speed: float = 1.0, + refresh_per_second: float = 12.5, + ) -> "Status": + """Display a status and spinner. + + Args: + status (RenderableType): A status renderable (str or Text typically). + spinner (str, optional): Name of spinner animation (see python -m rich.spinner). Defaults to "dots". + spinner_style (StyleType, optional): Style of spinner. Defaults to "status.spinner". + speed (float, optional): Speed factor for spinner animation. Defaults to 1.0. + refresh_per_second (float, optional): Number of refreshes per second. Defaults to 12.5. + + Returns: + Status: A Status object that may be used as a context manager. + """ + from .status import Status + + status_renderable = Status( + status, + console=self, + spinner=spinner, + spinner_style=spinner_style, + speed=speed, + refresh_per_second=refresh_per_second, + ) + return status_renderable + + def show_cursor(self, show: bool = True) -> bool: + """Show or hide the cursor. + + Args: + show (bool, optional): Set visibility of the cursor. + """ + if self.is_terminal: + self.control(Control.show_cursor(show)) + return True + return False + + def set_alt_screen(self, enable: bool = True) -> bool: + """Enables alternative screen mode. + + Note, if you enable this mode, you should ensure that is disabled before + the application exits. See :meth:`~rich.Console.screen` for a context manager + that handles this for you. + + Args: + enable (bool, optional): Enable (True) or disable (False) alternate screen. Defaults to True. + + Returns: + bool: True if the control codes were written. + + """ + changed = False + if self.is_terminal and not self.legacy_windows: + self.control(Control.alt_screen(enable)) + changed = True + self._is_alt_screen = enable + return changed + + @property + def is_alt_screen(self) -> bool: + """Check if the alt screen was enabled. + + Returns: + bool: True if the alt screen was enabled, otherwise False. + """ + return self._is_alt_screen + + def set_window_title(self, title: str) -> bool: + """Set the title of the console terminal window. + + Warning: There is no means within Rich of "resetting" the window title to its + previous value, meaning the title you set will persist even after your application + exits. + + ``fish`` shell resets the window title before and after each command by default, + negating this issue. Windows Terminal and command prompt will also reset the title for you. + Most other shells and terminals, however, do not do this. + + Some terminals may require configuration changes before you can set the title. + Some terminals may not support setting the title at all. + + Other software (including the terminal itself, the shell, custom prompts, plugins, etc.) + may also set the terminal window title. This could result in whatever value you write + using this method being overwritten. + + Args: + title (str): The new title of the terminal window. + + Returns: + bool: True if the control code to change the terminal title was + written, otherwise False. Note that a return value of True + does not guarantee that the window title has actually changed, + since the feature may be unsupported/disabled in some terminals. + """ + if self.is_terminal: + self.control(Control.title(title)) + return True + return False + + def screen( + self, hide_cursor: bool = True, style: Optional[StyleType] = None + ) -> "ScreenContext": + """Context manager to enable and disable 'alternative screen' mode. + + Args: + hide_cursor (bool, optional): Also hide the cursor. Defaults to False. + style (Style, optional): Optional style for screen. Defaults to None. + + Returns: + ~ScreenContext: Context which enables alternate screen on enter, and disables it on exit. + """ + return ScreenContext(self, hide_cursor=hide_cursor, style=style or "") + + def measure( + self, renderable: RenderableType, *, options: Optional[ConsoleOptions] = None + ) -> Measurement: + """Measure a renderable. Returns a :class:`~rich.measure.Measurement` object which contains + information regarding the number of characters required to print the renderable. + + Args: + renderable (RenderableType): Any renderable or string. + options (Optional[ConsoleOptions], optional): Options to use when measuring, or None + to use default options. Defaults to None. + + Returns: + Measurement: A measurement of the renderable. + """ + measurement = Measurement.get(self, options or self.options, renderable) + return measurement + + def render( + self, renderable: RenderableType, options: Optional[ConsoleOptions] = None + ) -> Iterable[Segment]: + """Render an object in to an iterable of `Segment` instances. + + This method contains the logic for rendering objects with the console protocol. + You are unlikely to need to use it directly, unless you are extending the library. + + Args: + renderable (RenderableType): An object supporting the console protocol, or + an object that may be converted to a string. + options (ConsoleOptions, optional): An options object, or None to use self.options. Defaults to None. + + Returns: + Iterable[Segment]: An iterable of segments that may be rendered. + """ + + _options = options or self.options + if _options.max_width < 1: + # No space to render anything. This prevents potential recursion errors. + return + render_iterable: RenderResult + + renderable = rich_cast(renderable) + if hasattr(renderable, "__rich_console__") and not isclass(renderable): + render_iterable = renderable.__rich_console__(self, _options) + elif isinstance(renderable, str): + text_renderable = self.render_str( + renderable, highlight=_options.highlight, markup=_options.markup + ) + render_iterable = text_renderable.__rich_console__(self, _options) + else: + raise errors.NotRenderableError( + f"Unable to render {renderable!r}; " + "A str, Segment or object with __rich_console__ method is required" + ) + + try: + iter_render = iter(render_iterable) + except TypeError: + raise errors.NotRenderableError( + f"object {render_iterable!r} is not renderable" + ) + _Segment = Segment + _options = _options.reset_height() + for render_output in iter_render: + if isinstance(render_output, _Segment): + yield render_output + else: + yield from self.render(render_output, _options) + + def render_lines( + self, + renderable: RenderableType, + options: Optional[ConsoleOptions] = None, + *, + style: Optional[Style] = None, + pad: bool = True, + new_lines: bool = False, + ) -> List[List[Segment]]: + """Render objects in to a list of lines. + + The output of render_lines is useful when further formatting of rendered console text + is required, such as the Panel class which draws a border around any renderable object. + + Args: + renderable (RenderableType): Any object renderable in the console. + options (Optional[ConsoleOptions], optional): Console options, or None to use self.options. Default to ``None``. + style (Style, optional): Optional style to apply to renderables. Defaults to ``None``. + pad (bool, optional): Pad lines shorter than render width. Defaults to ``True``. + new_lines (bool, optional): Include "\n" characters at end of lines. + + Returns: + List[List[Segment]]: A list of lines, where a line is a list of Segment objects. + """ + with self._lock: + render_options = options or self.options + _rendered = self.render(renderable, render_options) + if style: + _rendered = Segment.apply_style(_rendered, style) + + render_height = render_options.height + if render_height is not None: + render_height = max(0, render_height) + + lines = list( + islice( + Segment.split_and_crop_lines( + _rendered, + render_options.max_width, + include_new_lines=new_lines, + pad=pad, + style=style, + ), + None, + render_height, + ) + ) + if render_options.height is not None: + extra_lines = render_options.height - len(lines) + if extra_lines > 0: + pad_line = [ + ( + [ + Segment(" " * render_options.max_width, style), + Segment("\n"), + ] + if new_lines + else [Segment(" " * render_options.max_width, style)] + ) + ] + lines.extend(pad_line * extra_lines) + + return lines + + def render_str( + self, + text: str, + *, + style: Union[str, Style] = "", + justify: Optional[JustifyMethod] = None, + overflow: Optional[OverflowMethod] = None, + emoji: Optional[bool] = None, + markup: Optional[bool] = None, + highlight: Optional[bool] = None, + highlighter: Optional[HighlighterType] = None, + ) -> "Text": + """Convert a string to a Text instance. This is called automatically if + you print or log a string. + + Args: + text (str): Text to render. + style (Union[str, Style], optional): Style to apply to rendered text. + justify (str, optional): Justify method: "default", "left", "center", "full", or "right". Defaults to ``None``. + overflow (str, optional): Overflow method: "crop", "fold", or "ellipsis". Defaults to ``None``. + emoji (Optional[bool], optional): Enable emoji, or ``None`` to use Console default. + markup (Optional[bool], optional): Enable markup, or ``None`` to use Console default. + highlight (Optional[bool], optional): Enable highlighting, or ``None`` to use Console default. + highlighter (HighlighterType, optional): Optional highlighter to apply. + Returns: + ConsoleRenderable: Renderable object. + + """ + emoji_enabled = emoji or (emoji is None and self._emoji) + markup_enabled = markup or (markup is None and self._markup) + highlight_enabled = highlight or (highlight is None and self._highlight) + + if markup_enabled: + rich_text = render_markup( + text, + style=style, + emoji=emoji_enabled, + emoji_variant=self._emoji_variant, + ) + rich_text.justify = justify + rich_text.overflow = overflow + else: + rich_text = Text( + ( + _emoji_replace(text, default_variant=self._emoji_variant) + if emoji_enabled + else text + ), + justify=justify, + overflow=overflow, + style=style, + ) + + _highlighter = (highlighter or self.highlighter) if highlight_enabled else None + if _highlighter is not None: + highlight_text = _highlighter(str(rich_text)) + highlight_text.copy_styles(rich_text) + return highlight_text + + return rich_text + + def get_style( + self, name: Union[str, Style], *, default: Optional[Union[Style, str]] = None + ) -> Style: + """Get a Style instance by its theme name or parse a definition. + + Args: + name (str): The name of a style or a style definition. + + Returns: + Style: A Style object. + + Raises: + MissingStyle: If no style could be parsed from name. + + """ + if isinstance(name, Style): + return name + + try: + style = self._theme_stack.get(name) + if style is None: + style = Style.parse(name) + return style.copy() if style.link else style + except errors.StyleSyntaxError as error: + if default is not None: + return self.get_style(default) + raise errors.MissingStyle( + f"Failed to get style {name!r}; {error}" + ) from None + + def _collect_renderables( + self, + objects: Iterable[Any], + sep: str, + end: str, + *, + justify: Optional[JustifyMethod] = None, + emoji: Optional[bool] = None, + markup: Optional[bool] = None, + highlight: Optional[bool] = None, + ) -> List[ConsoleRenderable]: + """Combine a number of renderables and text into one renderable. + + Args: + objects (Iterable[Any]): Anything that Rich can render. + sep (str): String to write between print data. + end (str): String to write at end of print data. + justify (str, optional): One of "left", "right", "center", or "full". Defaults to ``None``. + emoji (Optional[bool], optional): Enable emoji code, or ``None`` to use console default. + markup (Optional[bool], optional): Enable markup, or ``None`` to use console default. + highlight (Optional[bool], optional): Enable automatic highlighting, or ``None`` to use console default. + + Returns: + List[ConsoleRenderable]: A list of things to render. + """ + renderables: List[ConsoleRenderable] = [] + _append = renderables.append + text: List[Text] = [] + append_text = text.append + + append = _append + if justify in ("left", "center", "right"): + + def align_append(renderable: RenderableType) -> None: + _append(Align(renderable, cast(AlignMethod, justify))) + + append = align_append + + _highlighter: HighlighterType = _null_highlighter + if highlight or (highlight is None and self._highlight): + _highlighter = self.highlighter + + def check_text() -> None: + if text: + sep_text = Text(sep, justify=justify, end=end) + append(sep_text.join(text)) + text.clear() + + for renderable in objects: + renderable = rich_cast(renderable) + if isinstance(renderable, str): + append_text( + self.render_str( + renderable, + emoji=emoji, + markup=markup, + highlight=highlight, + highlighter=_highlighter, + ) + ) + elif isinstance(renderable, Text): + append_text(renderable) + elif isinstance(renderable, ConsoleRenderable): + check_text() + append(renderable) + elif is_expandable(renderable): + check_text() + append(Pretty(renderable, highlighter=_highlighter)) + else: + append_text(_highlighter(str(renderable))) + + check_text() + + if self.style is not None: + style = self.get_style(self.style) + renderables = [Styled(renderable, style) for renderable in renderables] + + return renderables + + def rule( + self, + title: TextType = "", + *, + characters: str = "─", + style: Union[str, Style] = "rule.line", + align: AlignMethod = "center", + ) -> None: + """Draw a line with optional centered title. + + Args: + title (str, optional): Text to render over the rule. Defaults to "". + characters (str, optional): Character(s) to form the line. Defaults to "─". + style (str, optional): Style of line. Defaults to "rule.line". + align (str, optional): How to align the title, one of "left", "center", or "right". Defaults to "center". + """ + from .rule import Rule + + rule = Rule(title=title, characters=characters, style=style, align=align) + self.print(rule) + + def control(self, *control: Control) -> None: + """Insert non-printing control codes. + + Args: + control_codes (str): Control codes, such as those that may move the cursor. + """ + if not self.is_dumb_terminal: + with self: + self._buffer.extend(_control.segment for _control in control) + + def out( + self, + *objects: Any, + sep: str = " ", + end: str = "\n", + style: Optional[Union[str, Style]] = None, + highlight: Optional[bool] = None, + ) -> None: + """Output to the terminal. This is a low-level way of writing to the terminal which unlike + :meth:`~rich.console.Console.print` won't pretty print, wrap text, or apply markup, but will + optionally apply highlighting and a basic style. + + Args: + sep (str, optional): String to write between print data. Defaults to " ". + end (str, optional): String to write at end of print data. Defaults to "\\\\n". + style (Union[str, Style], optional): A style to apply to output. Defaults to None. + highlight (Optional[bool], optional): Enable automatic highlighting, or ``None`` to use + console default. Defaults to ``None``. + """ + raw_output: str = sep.join(str(_object) for _object in objects) + self.print( + raw_output, + style=style, + highlight=highlight, + emoji=False, + markup=False, + no_wrap=True, + overflow="ignore", + crop=False, + end=end, + ) + + def print( + self, + *objects: Any, + sep: str = " ", + end: str = "\n", + style: Optional[Union[str, Style]] = None, + justify: Optional[JustifyMethod] = None, + overflow: Optional[OverflowMethod] = None, + no_wrap: Optional[bool] = None, + emoji: Optional[bool] = None, + markup: Optional[bool] = None, + highlight: Optional[bool] = None, + width: Optional[int] = None, + height: Optional[int] = None, + crop: bool = True, + soft_wrap: Optional[bool] = None, + new_line_start: bool = False, + ) -> None: + """Print to the console. + + Args: + objects (positional args): Objects to log to the terminal. + sep (str, optional): String to write between print data. Defaults to " ". + end (str, optional): String to write at end of print data. Defaults to "\\\\n". + style (Union[str, Style], optional): A style to apply to output. Defaults to None. + justify (str, optional): Justify method: "default", "left", "right", "center", or "full". Defaults to ``None``. + overflow (str, optional): Overflow method: "ignore", "crop", "fold", or "ellipsis". Defaults to None. + no_wrap (Optional[bool], optional): Disable word wrapping. Defaults to None. + emoji (Optional[bool], optional): Enable emoji code, or ``None`` to use console default. Defaults to ``None``. + markup (Optional[bool], optional): Enable markup, or ``None`` to use console default. Defaults to ``None``. + highlight (Optional[bool], optional): Enable automatic highlighting, or ``None`` to use console default. Defaults to ``None``. + width (Optional[int], optional): Width of output, or ``None`` to auto-detect. Defaults to ``None``. + crop (Optional[bool], optional): Crop output to width of terminal. Defaults to True. + soft_wrap (bool, optional): Enable soft wrap mode which disables word wrapping and cropping of text or ``None`` for + Console default. Defaults to ``None``. + new_line_start (bool, False): Insert a new line at the start if the output contains more than one line. Defaults to ``False``. + """ + if not objects: + objects = (NewLine(),) + + if soft_wrap is None: + soft_wrap = self.soft_wrap + if soft_wrap: + if no_wrap is None: + no_wrap = True + if overflow is None: + overflow = "ignore" + crop = False + render_hooks = self._render_hooks[:] + with self: + renderables = self._collect_renderables( + objects, + sep, + end, + justify=justify, + emoji=emoji, + markup=markup, + highlight=highlight, + ) + for hook in render_hooks: + renderables = hook.process_renderables(renderables) + render_options = self.options.update( + justify=justify, + overflow=overflow, + width=min(width, self.width) if width is not None else NO_CHANGE, + height=height, + no_wrap=no_wrap, + markup=markup, + highlight=highlight, + ) + + new_segments: List[Segment] = [] + extend = new_segments.extend + render = self.render + if style is None: + for renderable in renderables: + extend(render(renderable, render_options)) + else: + for renderable in renderables: + extend( + Segment.apply_style( + render(renderable, render_options), self.get_style(style) + ) + ) + if new_line_start: + if ( + len("".join(segment.text for segment in new_segments).splitlines()) + > 1 + ): + new_segments.insert(0, Segment.line()) + if crop: + buffer_extend = self._buffer.extend + for line in Segment.split_and_crop_lines( + new_segments, self.width, pad=False + ): + buffer_extend(line) + else: + self._buffer.extend(new_segments) + + def print_json( + self, + json: Optional[str] = None, + *, + data: Any = None, + indent: Union[None, int, str] = 2, + highlight: bool = True, + skip_keys: bool = False, + ensure_ascii: bool = False, + check_circular: bool = True, + allow_nan: bool = True, + default: Optional[Callable[[Any], Any]] = None, + sort_keys: bool = False, + ) -> None: + """Pretty prints JSON. Output will be valid JSON. + + Args: + json (Optional[str]): A string containing JSON. + data (Any): If json is not supplied, then encode this data. + indent (Union[None, int, str], optional): Number of spaces to indent. Defaults to 2. + highlight (bool, optional): Enable highlighting of output: Defaults to True. + skip_keys (bool, optional): Skip keys not of a basic type. Defaults to False. + ensure_ascii (bool, optional): Escape all non-ascii characters. Defaults to False. + check_circular (bool, optional): Check for circular references. Defaults to True. + allow_nan (bool, optional): Allow NaN and Infinity values. Defaults to True. + default (Callable, optional): A callable that converts values that can not be encoded + in to something that can be JSON encoded. Defaults to None. + sort_keys (bool, optional): Sort dictionary keys. Defaults to False. + """ + from pip._vendor.rich.json import JSON + + if json is None: + json_renderable = JSON.from_data( + data, + indent=indent, + highlight=highlight, + skip_keys=skip_keys, + ensure_ascii=ensure_ascii, + check_circular=check_circular, + allow_nan=allow_nan, + default=default, + sort_keys=sort_keys, + ) + else: + if not isinstance(json, str): + raise TypeError( + f"json must be str. Did you mean print_json(data={json!r}) ?" + ) + json_renderable = JSON( + json, + indent=indent, + highlight=highlight, + skip_keys=skip_keys, + ensure_ascii=ensure_ascii, + check_circular=check_circular, + allow_nan=allow_nan, + default=default, + sort_keys=sort_keys, + ) + self.print(json_renderable, soft_wrap=True) + + def update_screen( + self, + renderable: RenderableType, + *, + region: Optional[Region] = None, + options: Optional[ConsoleOptions] = None, + ) -> None: + """Update the screen at a given offset. + + Args: + renderable (RenderableType): A Rich renderable. + region (Region, optional): Region of screen to update, or None for entire screen. Defaults to None. + x (int, optional): x offset. Defaults to 0. + y (int, optional): y offset. Defaults to 0. + + Raises: + errors.NoAltScreen: If the Console isn't in alt screen mode. + + """ + if not self.is_alt_screen: + raise errors.NoAltScreen("Alt screen must be enabled to call update_screen") + render_options = options or self.options + if region is None: + x = y = 0 + render_options = render_options.update_dimensions( + render_options.max_width, render_options.height or self.height + ) + else: + x, y, width, height = region + render_options = render_options.update_dimensions(width, height) + + lines = self.render_lines(renderable, options=render_options) + self.update_screen_lines(lines, x, y) + + def update_screen_lines( + self, lines: List[List[Segment]], x: int = 0, y: int = 0 + ) -> None: + """Update lines of the screen at a given offset. + + Args: + lines (List[List[Segment]]): Rendered lines (as produced by :meth:`~rich.Console.render_lines`). + x (int, optional): x offset (column no). Defaults to 0. + y (int, optional): y offset (column no). Defaults to 0. + + Raises: + errors.NoAltScreen: If the Console isn't in alt screen mode. + """ + if not self.is_alt_screen: + raise errors.NoAltScreen("Alt screen must be enabled to call update_screen") + screen_update = ScreenUpdate(lines, x, y) + segments = self.render(screen_update) + self._buffer.extend(segments) + self._check_buffer() + + def print_exception( + self, + *, + width: Optional[int] = 100, + extra_lines: int = 3, + theme: Optional[str] = None, + word_wrap: bool = False, + show_locals: bool = False, + suppress: Iterable[Union[str, ModuleType]] = (), + max_frames: int = 100, + ) -> None: + """Prints a rich render of the last exception and traceback. + + Args: + width (Optional[int], optional): Number of characters used to render code. Defaults to 100. + extra_lines (int, optional): Additional lines of code to render. Defaults to 3. + theme (str, optional): Override pygments theme used in traceback + word_wrap (bool, optional): Enable word wrapping of long lines. Defaults to False. + show_locals (bool, optional): Enable display of local variables. Defaults to False. + suppress (Iterable[Union[str, ModuleType]]): Optional sequence of modules or paths to exclude from traceback. + max_frames (int): Maximum number of frames to show in a traceback, 0 for no maximum. Defaults to 100. + """ + from .traceback import Traceback + + traceback = Traceback( + width=width, + extra_lines=extra_lines, + theme=theme, + word_wrap=word_wrap, + show_locals=show_locals, + suppress=suppress, + max_frames=max_frames, + ) + self.print(traceback) + + @staticmethod + def _caller_frame_info( + offset: int, + currentframe: Callable[[], Optional[FrameType]] = inspect.currentframe, + ) -> Tuple[str, int, Dict[str, Any]]: + """Get caller frame information. + + Args: + offset (int): the caller offset within the current frame stack. + currentframe (Callable[[], Optional[FrameType]], optional): the callable to use to + retrieve the current frame. Defaults to ``inspect.currentframe``. + + Returns: + Tuple[str, int, Dict[str, Any]]: A tuple containing the filename, the line number and + the dictionary of local variables associated with the caller frame. + + Raises: + RuntimeError: If the stack offset is invalid. + """ + # Ignore the frame of this local helper + offset += 1 + + frame = currentframe() + if frame is not None: + # Use the faster currentframe where implemented + while offset and frame is not None: + frame = frame.f_back + offset -= 1 + assert frame is not None + return frame.f_code.co_filename, frame.f_lineno, frame.f_locals + else: + # Fallback to the slower stack + frame_info = inspect.stack()[offset] + return frame_info.filename, frame_info.lineno, frame_info.frame.f_locals + + def log( + self, + *objects: Any, + sep: str = " ", + end: str = "\n", + style: Optional[Union[str, Style]] = None, + justify: Optional[JustifyMethod] = None, + emoji: Optional[bool] = None, + markup: Optional[bool] = None, + highlight: Optional[bool] = None, + log_locals: bool = False, + _stack_offset: int = 1, + ) -> None: + """Log rich content to the terminal. + + Args: + objects (positional args): Objects to log to the terminal. + sep (str, optional): String to write between print data. Defaults to " ". + end (str, optional): String to write at end of print data. Defaults to "\\\\n". + style (Union[str, Style], optional): A style to apply to output. Defaults to None. + justify (str, optional): One of "left", "right", "center", or "full". Defaults to ``None``. + emoji (Optional[bool], optional): Enable emoji code, or ``None`` to use console default. Defaults to None. + markup (Optional[bool], optional): Enable markup, or ``None`` to use console default. Defaults to None. + highlight (Optional[bool], optional): Enable automatic highlighting, or ``None`` to use console default. Defaults to None. + log_locals (bool, optional): Boolean to enable logging of locals where ``log()`` + was called. Defaults to False. + _stack_offset (int, optional): Offset of caller from end of call stack. Defaults to 1. + """ + if not objects: + objects = (NewLine(),) + + render_hooks = self._render_hooks[:] + + with self: + renderables = self._collect_renderables( + objects, + sep, + end, + justify=justify, + emoji=emoji, + markup=markup, + highlight=highlight, + ) + if style is not None: + renderables = [Styled(renderable, style) for renderable in renderables] + + filename, line_no, locals = self._caller_frame_info(_stack_offset) + link_path = None if filename.startswith("<") else os.path.abspath(filename) + path = filename.rpartition(os.sep)[-1] + if log_locals: + locals_map = { + key: value + for key, value in locals.items() + if not key.startswith("__") + } + renderables.append(render_scope(locals_map, title="[i]locals")) + + renderables = [ + self._log_render( + self, + renderables, + log_time=self.get_datetime(), + path=path, + line_no=line_no, + link_path=link_path, + ) + ] + for hook in render_hooks: + renderables = hook.process_renderables(renderables) + new_segments: List[Segment] = [] + extend = new_segments.extend + render = self.render + render_options = self.options + for renderable in renderables: + extend(render(renderable, render_options)) + buffer_extend = self._buffer.extend + for line in Segment.split_and_crop_lines( + new_segments, self.width, pad=False + ): + buffer_extend(line) + + def on_broken_pipe(self) -> None: + """This function is called when a `BrokenPipeError` is raised. + + This can occur when piping Textual output in Linux and macOS. + The default implementation is to exit the app, but you could implement + this method in a subclass to change the behavior. + + See https://docs.python.org/3/library/signal.html#note-on-sigpipe for details. + """ + self.quiet = True + devnull = os.open(os.devnull, os.O_WRONLY) + os.dup2(devnull, sys.stdout.fileno()) + raise SystemExit(1) + + def _check_buffer(self) -> None: + """Check if the buffer may be rendered. Render it if it can (e.g. Console.quiet is False) + Rendering is supported on Windows, Unix and Jupyter environments. For + legacy Windows consoles, the win32 API is called directly. + This method will also record what it renders if recording is enabled via Console.record. + """ + if self.quiet: + del self._buffer[:] + return + + try: + self._write_buffer() + except BrokenPipeError: + self.on_broken_pipe() + + def _write_buffer(self) -> None: + """Write the buffer to the output file.""" + + with self._lock: + if self.record and not self._buffer_index: + with self._record_buffer_lock: + self._record_buffer.extend(self._buffer[:]) + + if self._buffer_index == 0: + if self.is_jupyter: # pragma: no cover + from .jupyter import display + + display(self._buffer, self._render_buffer(self._buffer[:])) + del self._buffer[:] + else: + if WINDOWS: + use_legacy_windows_render = False + if self.legacy_windows: + fileno = get_fileno(self.file) + if fileno is not None: + use_legacy_windows_render = ( + fileno in _STD_STREAMS_OUTPUT + ) + + if use_legacy_windows_render: + from pip._vendor.rich._win32_console import LegacyWindowsTerm + from pip._vendor.rich._windows_renderer import legacy_windows_render + + buffer = self._buffer[:] + if self.no_color and self._color_system: + buffer = list(Segment.remove_color(buffer)) + + legacy_windows_render(buffer, LegacyWindowsTerm(self.file)) + else: + # Either a non-std stream on legacy Windows, or modern Windows. + text = self._render_buffer(self._buffer[:]) + # https://bugs.python.org/issue37871 + # https://github.com/python/cpython/issues/82052 + # We need to avoid writing more than 32Kb in a single write, due to the above bug + write = self.file.write + # Worse case scenario, every character is 4 bytes of utf-8 + MAX_WRITE = 32 * 1024 // 4 + try: + if len(text) <= MAX_WRITE: + write(text) + else: + batch: List[str] = [] + batch_append = batch.append + size = 0 + for line in text.splitlines(True): + if size + len(line) > MAX_WRITE and batch: + write("".join(batch)) + batch.clear() + size = 0 + batch_append(line) + size += len(line) + if batch: + write("".join(batch)) + batch.clear() + except UnicodeEncodeError as error: + error.reason = f"{error.reason}\n*** You may need to add PYTHONIOENCODING=utf-8 to your environment ***" + raise + else: + text = self._render_buffer(self._buffer[:]) + try: + self.file.write(text) + except UnicodeEncodeError as error: + error.reason = f"{error.reason}\n*** You may need to add PYTHONIOENCODING=utf-8 to your environment ***" + raise + + self.file.flush() + del self._buffer[:] + + def _render_buffer(self, buffer: Iterable[Segment]) -> str: + """Render buffered output, and clear buffer.""" + output: List[str] = [] + append = output.append + color_system = self._color_system + legacy_windows = self.legacy_windows + not_terminal = not self.is_terminal + if self.no_color and color_system: + buffer = Segment.remove_color(buffer) + for text, style, control in buffer: + if style: + append( + style.render( + text, + color_system=color_system, + legacy_windows=legacy_windows, + ) + ) + elif not (not_terminal and control): + append(text) + + rendered = "".join(output) + return rendered + + def input( + self, + prompt: TextType = "", + *, + markup: bool = True, + emoji: bool = True, + password: bool = False, + stream: Optional[TextIO] = None, + ) -> str: + """Displays a prompt and waits for input from the user. The prompt may contain color / style. + + It works in the same way as Python's builtin :func:`input` function and provides elaborate line editing and history features if Python's builtin :mod:`readline` module is previously loaded. + + Args: + prompt (Union[str, Text]): Text to render in the prompt. + markup (bool, optional): Enable console markup (requires a str prompt). Defaults to True. + emoji (bool, optional): Enable emoji (requires a str prompt). Defaults to True. + password: (bool, optional): Hide typed text. Defaults to False. + stream: (TextIO, optional): Optional file to read input from (rather than stdin). Defaults to None. + + Returns: + str: Text read from stdin. + """ + if prompt: + self.print(prompt, markup=markup, emoji=emoji, end="") + if password: + result = getpass("", stream=stream) + else: + if stream: + result = stream.readline() + else: + result = input() + return result + + def export_text(self, *, clear: bool = True, styles: bool = False) -> str: + """Generate text from console contents (requires record=True argument in constructor). + + Args: + clear (bool, optional): Clear record buffer after exporting. Defaults to ``True``. + styles (bool, optional): If ``True``, ansi escape codes will be included. ``False`` for plain text. + Defaults to ``False``. + + Returns: + str: String containing console contents. + + """ + assert ( + self.record + ), "To export console contents set record=True in the constructor or instance" + + with self._record_buffer_lock: + if styles: + text = "".join( + (style.render(text) if style else text) + for text, style, _ in self._record_buffer + ) + else: + text = "".join( + segment.text + for segment in self._record_buffer + if not segment.control + ) + if clear: + del self._record_buffer[:] + return text + + def save_text(self, path: str, *, clear: bool = True, styles: bool = False) -> None: + """Generate text from console and save to a given location (requires record=True argument in constructor). + + Args: + path (str): Path to write text files. + clear (bool, optional): Clear record buffer after exporting. Defaults to ``True``. + styles (bool, optional): If ``True``, ansi style codes will be included. ``False`` for plain text. + Defaults to ``False``. + + """ + text = self.export_text(clear=clear, styles=styles) + with open(path, "w", encoding="utf-8") as write_file: + write_file.write(text) + + def export_html( + self, + *, + theme: Optional[TerminalTheme] = None, + clear: bool = True, + code_format: Optional[str] = None, + inline_styles: bool = False, + ) -> str: + """Generate HTML from console contents (requires record=True argument in constructor). + + Args: + theme (TerminalTheme, optional): TerminalTheme object containing console colors. + clear (bool, optional): Clear record buffer after exporting. Defaults to ``True``. + code_format (str, optional): Format string to render HTML. In addition to '{foreground}', + '{background}', and '{code}', should contain '{stylesheet}' if inline_styles is ``False``. + inline_styles (bool, optional): If ``True`` styles will be inlined in to spans, which makes files + larger but easier to cut and paste markup. If ``False``, styles will be embedded in a style tag. + Defaults to False. + + Returns: + str: String containing console contents as HTML. + """ + assert ( + self.record + ), "To export console contents set record=True in the constructor or instance" + fragments: List[str] = [] + append = fragments.append + _theme = theme or DEFAULT_TERMINAL_THEME + stylesheet = "" + + render_code_format = CONSOLE_HTML_FORMAT if code_format is None else code_format + + with self._record_buffer_lock: + if inline_styles: + for text, style, _ in Segment.filter_control( + Segment.simplify(self._record_buffer) + ): + text = escape(text) + if style: + rule = style.get_html_style(_theme) + if style.link: + text = f'{text}' + text = f'{text}' if rule else text + append(text) + else: + styles: Dict[str, int] = {} + for text, style, _ in Segment.filter_control( + Segment.simplify(self._record_buffer) + ): + text = escape(text) + if style: + rule = style.get_html_style(_theme) + style_number = styles.setdefault(rule, len(styles) + 1) + if style.link: + text = f'{text}' + else: + text = f'{text}' + append(text) + stylesheet_rules: List[str] = [] + stylesheet_append = stylesheet_rules.append + for style_rule, style_number in styles.items(): + if style_rule: + stylesheet_append(f".r{style_number} {{{style_rule}}}") + stylesheet = "\n".join(stylesheet_rules) + + rendered_code = render_code_format.format( + code="".join(fragments), + stylesheet=stylesheet, + foreground=_theme.foreground_color.hex, + background=_theme.background_color.hex, + ) + if clear: + del self._record_buffer[:] + return rendered_code + + def save_html( + self, + path: str, + *, + theme: Optional[TerminalTheme] = None, + clear: bool = True, + code_format: str = CONSOLE_HTML_FORMAT, + inline_styles: bool = False, + ) -> None: + """Generate HTML from console contents and write to a file (requires record=True argument in constructor). + + Args: + path (str): Path to write html file. + theme (TerminalTheme, optional): TerminalTheme object containing console colors. + clear (bool, optional): Clear record buffer after exporting. Defaults to ``True``. + code_format (str, optional): Format string to render HTML. In addition to '{foreground}', + '{background}', and '{code}', should contain '{stylesheet}' if inline_styles is ``False``. + inline_styles (bool, optional): If ``True`` styles will be inlined in to spans, which makes files + larger but easier to cut and paste markup. If ``False``, styles will be embedded in a style tag. + Defaults to False. + + """ + html = self.export_html( + theme=theme, + clear=clear, + code_format=code_format, + inline_styles=inline_styles, + ) + with open(path, "w", encoding="utf-8") as write_file: + write_file.write(html) + + def export_svg( + self, + *, + title: str = "Rich", + theme: Optional[TerminalTheme] = None, + clear: bool = True, + code_format: str = CONSOLE_SVG_FORMAT, + font_aspect_ratio: float = 0.61, + unique_id: Optional[str] = None, + ) -> str: + """ + Generate an SVG from the console contents (requires record=True in Console constructor). + + Args: + title (str, optional): The title of the tab in the output image + theme (TerminalTheme, optional): The ``TerminalTheme`` object to use to style the terminal + clear (bool, optional): Clear record buffer after exporting. Defaults to ``True`` + code_format (str, optional): Format string used to generate the SVG. Rich will inject a number of variables + into the string in order to form the final SVG output. The default template used and the variables + injected by Rich can be found by inspecting the ``console.CONSOLE_SVG_FORMAT`` variable. + font_aspect_ratio (float, optional): The width to height ratio of the font used in the ``code_format`` + string. Defaults to 0.61, which is the width to height ratio of Fira Code (the default font). + If you aren't specifying a different font inside ``code_format``, you probably don't need this. + unique_id (str, optional): unique id that is used as the prefix for various elements (CSS styles, node + ids). If not set, this defaults to a computed value based on the recorded content. + """ + + from pip._vendor.rich.cells import cell_len + + style_cache: Dict[Style, str] = {} + + def get_svg_style(style: Style) -> str: + """Convert a Style to CSS rules for SVG.""" + if style in style_cache: + return style_cache[style] + css_rules = [] + color = ( + _theme.foreground_color + if (style.color is None or style.color.is_default) + else style.color.get_truecolor(_theme) + ) + bgcolor = ( + _theme.background_color + if (style.bgcolor is None or style.bgcolor.is_default) + else style.bgcolor.get_truecolor(_theme) + ) + if style.reverse: + color, bgcolor = bgcolor, color + if style.dim: + color = blend_rgb(color, bgcolor, 0.4) + css_rules.append(f"fill: {color.hex}") + if style.bold: + css_rules.append("font-weight: bold") + if style.italic: + css_rules.append("font-style: italic;") + if style.underline: + css_rules.append("text-decoration: underline;") + if style.strike: + css_rules.append("text-decoration: line-through;") + + css = ";".join(css_rules) + style_cache[style] = css + return css + + _theme = theme or SVG_EXPORT_THEME + + width = self.width + char_height = 20 + char_width = char_height * font_aspect_ratio + line_height = char_height * 1.22 + + margin_top = 1 + margin_right = 1 + margin_bottom = 1 + margin_left = 1 + + padding_top = 40 + padding_right = 8 + padding_bottom = 8 + padding_left = 8 + + padding_width = padding_left + padding_right + padding_height = padding_top + padding_bottom + margin_width = margin_left + margin_right + margin_height = margin_top + margin_bottom + + text_backgrounds: List[str] = [] + text_group: List[str] = [] + classes: Dict[str, int] = {} + style_no = 1 + + def escape_text(text: str) -> str: + """HTML escape text and replace spaces with nbsp.""" + return escape(text).replace(" ", " ") + + def make_tag( + name: str, content: Optional[str] = None, **attribs: object + ) -> str: + """Make a tag from name, content, and attributes.""" + + def stringify(value: object) -> str: + if isinstance(value, (float)): + return format(value, "g") + return str(value) + + tag_attribs = " ".join( + f'{k.lstrip("_").replace("_", "-")}="{stringify(v)}"' + for k, v in attribs.items() + ) + return ( + f"<{name} {tag_attribs}>{content}" + if content + else f"<{name} {tag_attribs}/>" + ) + + with self._record_buffer_lock: + segments = list(Segment.filter_control(self._record_buffer)) + if clear: + self._record_buffer.clear() + + if unique_id is None: + unique_id = "terminal-" + str( + zlib.adler32( + ("".join(repr(segment) for segment in segments)).encode( + "utf-8", + "ignore", + ) + + title.encode("utf-8", "ignore") + ) + ) + y = 0 + for y, line in enumerate(Segment.split_and_crop_lines(segments, length=width)): + x = 0 + for text, style, _control in line: + style = style or Style() + rules = get_svg_style(style) + if rules not in classes: + classes[rules] = style_no + style_no += 1 + class_name = f"r{classes[rules]}" + + if style.reverse: + has_background = True + background = ( + _theme.foreground_color.hex + if style.color is None + else style.color.get_truecolor(_theme).hex + ) + else: + bgcolor = style.bgcolor + has_background = bgcolor is not None and not bgcolor.is_default + background = ( + _theme.background_color.hex + if style.bgcolor is None + else style.bgcolor.get_truecolor(_theme).hex + ) + + text_length = cell_len(text) + if has_background: + text_backgrounds.append( + make_tag( + "rect", + fill=background, + x=x * char_width, + y=y * line_height + 1.5, + width=char_width * text_length, + height=line_height + 0.25, + shape_rendering="crispEdges", + ) + ) + + if text != " " * len(text): + text_group.append( + make_tag( + "text", + escape_text(text), + _class=f"{unique_id}-{class_name}", + x=x * char_width, + y=y * line_height + char_height, + textLength=char_width * len(text), + clip_path=f"url(#{unique_id}-line-{y})", + ) + ) + x += cell_len(text) + + line_offsets = [line_no * line_height + 1.5 for line_no in range(y)] + lines = "\n".join( + f""" + {make_tag("rect", x=0, y=offset, width=char_width * width, height=line_height + 0.25)} + """ + for line_no, offset in enumerate(line_offsets) + ) + + styles = "\n".join( + f".{unique_id}-r{rule_no} {{ {css} }}" for css, rule_no in classes.items() + ) + backgrounds = "".join(text_backgrounds) + matrix = "".join(text_group) + + terminal_width = ceil(width * char_width + padding_width) + terminal_height = (y + 1) * line_height + padding_height + chrome = make_tag( + "rect", + fill=_theme.background_color.hex, + stroke="rgba(255,255,255,0.35)", + stroke_width="1", + x=margin_left, + y=margin_top, + width=terminal_width, + height=terminal_height, + rx=8, + ) + + title_color = _theme.foreground_color.hex + if title: + chrome += make_tag( + "text", + escape_text(title), + _class=f"{unique_id}-title", + fill=title_color, + text_anchor="middle", + x=terminal_width // 2, + y=margin_top + char_height + 6, + ) + chrome += f""" + + + + + + """ + + svg = code_format.format( + unique_id=unique_id, + char_width=char_width, + char_height=char_height, + line_height=line_height, + terminal_width=char_width * width - 1, + terminal_height=(y + 1) * line_height - 1, + width=terminal_width + margin_width, + height=terminal_height + margin_height, + terminal_x=margin_left + padding_left, + terminal_y=margin_top + padding_top, + styles=styles, + chrome=chrome, + backgrounds=backgrounds, + matrix=matrix, + lines=lines, + ) + return svg + + def save_svg( + self, + path: str, + *, + title: str = "Rich", + theme: Optional[TerminalTheme] = None, + clear: bool = True, + code_format: str = CONSOLE_SVG_FORMAT, + font_aspect_ratio: float = 0.61, + unique_id: Optional[str] = None, + ) -> None: + """Generate an SVG file from the console contents (requires record=True in Console constructor). + + Args: + path (str): The path to write the SVG to. + title (str, optional): The title of the tab in the output image + theme (TerminalTheme, optional): The ``TerminalTheme`` object to use to style the terminal + clear (bool, optional): Clear record buffer after exporting. Defaults to ``True`` + code_format (str, optional): Format string used to generate the SVG. Rich will inject a number of variables + into the string in order to form the final SVG output. The default template used and the variables + injected by Rich can be found by inspecting the ``console.CONSOLE_SVG_FORMAT`` variable. + font_aspect_ratio (float, optional): The width to height ratio of the font used in the ``code_format`` + string. Defaults to 0.61, which is the width to height ratio of Fira Code (the default font). + If you aren't specifying a different font inside ``code_format``, you probably don't need this. + unique_id (str, optional): unique id that is used as the prefix for various elements (CSS styles, node + ids). If not set, this defaults to a computed value based on the recorded content. + """ + svg = self.export_svg( + title=title, + theme=theme, + clear=clear, + code_format=code_format, + font_aspect_ratio=font_aspect_ratio, + unique_id=unique_id, + ) + with open(path, "w", encoding="utf-8") as write_file: + write_file.write(svg) + + +def _svg_hash(svg_main_code: str) -> str: + """Returns a unique hash for the given SVG main code. + + Args: + svg_main_code (str): The content we're going to inject in the SVG envelope. + + Returns: + str: a hash of the given content + """ + return str(zlib.adler32(svg_main_code.encode())) + + +if __name__ == "__main__": # pragma: no cover + console = Console(record=True) + + console.log( + "JSONRPC [i]request[/i]", + 5, + 1.3, + True, + False, + None, + { + "jsonrpc": "2.0", + "method": "subtract", + "params": {"minuend": 42, "subtrahend": 23}, + "id": 3, + }, + ) + + console.log("Hello, World!", "{'a': 1}", repr(console)) + + console.print( + { + "name": None, + "empty": [], + "quiz": { + "sport": { + "answered": True, + "q1": { + "question": "Which one is correct team name in NBA?", + "options": [ + "New York Bulls", + "Los Angeles Kings", + "Golden State Warriors", + "Huston Rocket", + ], + "answer": "Huston Rocket", + }, + }, + "maths": { + "answered": False, + "q1": { + "question": "5 + 7 = ?", + "options": [10, 11, 12, 13], + "answer": 12, + }, + "q2": { + "question": "12 - 8 = ?", + "options": [1, 2, 3, 4], + "answer": 4, + }, + }, + }, + } + ) diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/constrain.py b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/constrain.py new file mode 100644 index 0000000..65fdf56 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/constrain.py @@ -0,0 +1,37 @@ +from typing import Optional, TYPE_CHECKING + +from .jupyter import JupyterMixin +from .measure import Measurement + +if TYPE_CHECKING: + from .console import Console, ConsoleOptions, RenderableType, RenderResult + + +class Constrain(JupyterMixin): + """Constrain the width of a renderable to a given number of characters. + + Args: + renderable (RenderableType): A renderable object. + width (int, optional): The maximum width (in characters) to render. Defaults to 80. + """ + + def __init__(self, renderable: "RenderableType", width: Optional[int] = 80) -> None: + self.renderable = renderable + self.width = width + + def __rich_console__( + self, console: "Console", options: "ConsoleOptions" + ) -> "RenderResult": + if self.width is None: + yield self.renderable + else: + child_options = options.update_width(min(self.width, options.max_width)) + yield from console.render(self.renderable, child_options) + + def __rich_measure__( + self, console: "Console", options: "ConsoleOptions" + ) -> "Measurement": + if self.width is not None: + options = options.update_width(self.width) + measurement = Measurement.get(console, options, self.renderable) + return measurement diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/containers.py b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/containers.py new file mode 100644 index 0000000..901ff8b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/containers.py @@ -0,0 +1,167 @@ +from itertools import zip_longest +from typing import ( + TYPE_CHECKING, + Iterable, + Iterator, + List, + Optional, + TypeVar, + Union, + overload, +) + +if TYPE_CHECKING: + from .console import ( + Console, + ConsoleOptions, + JustifyMethod, + OverflowMethod, + RenderResult, + RenderableType, + ) + from .text import Text + +from .cells import cell_len +from .measure import Measurement + +T = TypeVar("T") + + +class Renderables: + """A list subclass which renders its contents to the console.""" + + def __init__( + self, renderables: Optional[Iterable["RenderableType"]] = None + ) -> None: + self._renderables: List["RenderableType"] = ( + list(renderables) if renderables is not None else [] + ) + + def __rich_console__( + self, console: "Console", options: "ConsoleOptions" + ) -> "RenderResult": + """Console render method to insert line-breaks.""" + yield from self._renderables + + def __rich_measure__( + self, console: "Console", options: "ConsoleOptions" + ) -> "Measurement": + dimensions = [ + Measurement.get(console, options, renderable) + for renderable in self._renderables + ] + if not dimensions: + return Measurement(1, 1) + _min = max(dimension.minimum for dimension in dimensions) + _max = max(dimension.maximum for dimension in dimensions) + return Measurement(_min, _max) + + def append(self, renderable: "RenderableType") -> None: + self._renderables.append(renderable) + + def __iter__(self) -> Iterable["RenderableType"]: + return iter(self._renderables) + + +class Lines: + """A list subclass which can render to the console.""" + + def __init__(self, lines: Iterable["Text"] = ()) -> None: + self._lines: List["Text"] = list(lines) + + def __repr__(self) -> str: + return f"Lines({self._lines!r})" + + def __iter__(self) -> Iterator["Text"]: + return iter(self._lines) + + @overload + def __getitem__(self, index: int) -> "Text": + ... + + @overload + def __getitem__(self, index: slice) -> List["Text"]: + ... + + def __getitem__(self, index: Union[slice, int]) -> Union["Text", List["Text"]]: + return self._lines[index] + + def __setitem__(self, index: int, value: "Text") -> "Lines": + self._lines[index] = value + return self + + def __len__(self) -> int: + return self._lines.__len__() + + def __rich_console__( + self, console: "Console", options: "ConsoleOptions" + ) -> "RenderResult": + """Console render method to insert line-breaks.""" + yield from self._lines + + def append(self, line: "Text") -> None: + self._lines.append(line) + + def extend(self, lines: Iterable["Text"]) -> None: + self._lines.extend(lines) + + def pop(self, index: int = -1) -> "Text": + return self._lines.pop(index) + + def justify( + self, + console: "Console", + width: int, + justify: "JustifyMethod" = "left", + overflow: "OverflowMethod" = "fold", + ) -> None: + """Justify and overflow text to a given width. + + Args: + console (Console): Console instance. + width (int): Number of cells available per line. + justify (str, optional): Default justify method for text: "left", "center", "full" or "right". Defaults to "left". + overflow (str, optional): Default overflow for text: "crop", "fold", or "ellipsis". Defaults to "fold". + + """ + from .text import Text + + if justify == "left": + for line in self._lines: + line.truncate(width, overflow=overflow, pad=True) + elif justify == "center": + for line in self._lines: + line.rstrip() + line.truncate(width, overflow=overflow) + line.pad_left((width - cell_len(line.plain)) // 2) + line.pad_right(width - cell_len(line.plain)) + elif justify == "right": + for line in self._lines: + line.rstrip() + line.truncate(width, overflow=overflow) + line.pad_left(width - cell_len(line.plain)) + elif justify == "full": + for line_index, line in enumerate(self._lines): + if line_index == len(self._lines) - 1: + break + words = line.split(" ") + words_size = sum(cell_len(word.plain) for word in words) + num_spaces = len(words) - 1 + spaces = [1 for _ in range(num_spaces)] + index = 0 + if spaces: + while words_size + num_spaces < width: + spaces[len(spaces) - index - 1] += 1 + num_spaces += 1 + index = (index + 1) % len(spaces) + tokens: List[Text] = [] + for index, (word, next_word) in enumerate( + zip_longest(words, words[1:]) + ): + tokens.append(word) + if index < len(spaces): + style = word.get_style_at_offset(console, -1) + next_style = next_word.get_style_at_offset(console, 0) + space_style = style if style == next_style else line.style + tokens.append(Text(" " * spaces[index], style=space_style)) + self[line_index] = Text("").join(tokens) diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/control.py b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/control.py new file mode 100644 index 0000000..84963e9 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/control.py @@ -0,0 +1,219 @@ +import time +from typing import TYPE_CHECKING, Callable, Dict, Iterable, List, Union, Final + +from .segment import ControlCode, ControlType, Segment + +if TYPE_CHECKING: + from .console import Console, ConsoleOptions, RenderResult + +STRIP_CONTROL_CODES: Final = [ + 7, # Bell + 8, # Backspace + 11, # Vertical tab + 12, # Form feed + 13, # Carriage return +] +_CONTROL_STRIP_TRANSLATE: Final = { + _codepoint: None for _codepoint in STRIP_CONTROL_CODES +} + +CONTROL_ESCAPE: Final = { + 7: "\\a", + 8: "\\b", + 11: "\\v", + 12: "\\f", + 13: "\\r", +} + +CONTROL_CODES_FORMAT: Dict[int, Callable[..., str]] = { + ControlType.BELL: lambda: "\x07", + ControlType.CARRIAGE_RETURN: lambda: "\r", + ControlType.HOME: lambda: "\x1b[H", + ControlType.CLEAR: lambda: "\x1b[2J", + ControlType.ENABLE_ALT_SCREEN: lambda: "\x1b[?1049h", + ControlType.DISABLE_ALT_SCREEN: lambda: "\x1b[?1049l", + ControlType.SHOW_CURSOR: lambda: "\x1b[?25h", + ControlType.HIDE_CURSOR: lambda: "\x1b[?25l", + ControlType.CURSOR_UP: lambda param: f"\x1b[{param}A", + ControlType.CURSOR_DOWN: lambda param: f"\x1b[{param}B", + ControlType.CURSOR_FORWARD: lambda param: f"\x1b[{param}C", + ControlType.CURSOR_BACKWARD: lambda param: f"\x1b[{param}D", + ControlType.CURSOR_MOVE_TO_COLUMN: lambda param: f"\x1b[{param+1}G", + ControlType.ERASE_IN_LINE: lambda param: f"\x1b[{param}K", + ControlType.CURSOR_MOVE_TO: lambda x, y: f"\x1b[{y+1};{x+1}H", + ControlType.SET_WINDOW_TITLE: lambda title: f"\x1b]0;{title}\x07", +} + + +class Control: + """A renderable that inserts a control code (non printable but may move cursor). + + Args: + *codes (str): Positional arguments are either a :class:`~rich.segment.ControlType` enum or a + tuple of ControlType and an integer parameter + """ + + __slots__ = ["segment"] + + def __init__(self, *codes: Union[ControlType, ControlCode]) -> None: + control_codes: List[ControlCode] = [ + (code,) if isinstance(code, ControlType) else code for code in codes + ] + _format_map = CONTROL_CODES_FORMAT + rendered_codes = "".join( + _format_map[code](*parameters) for code, *parameters in control_codes + ) + self.segment = Segment(rendered_codes, None, control_codes) + + @classmethod + def bell(cls) -> "Control": + """Ring the 'bell'.""" + return cls(ControlType.BELL) + + @classmethod + def home(cls) -> "Control": + """Move cursor to 'home' position.""" + return cls(ControlType.HOME) + + @classmethod + def move(cls, x: int = 0, y: int = 0) -> "Control": + """Move cursor relative to current position. + + Args: + x (int): X offset. + y (int): Y offset. + + Returns: + ~Control: Control object. + + """ + + def get_codes() -> Iterable[ControlCode]: + control = ControlType + if x: + yield ( + control.CURSOR_FORWARD if x > 0 else control.CURSOR_BACKWARD, + abs(x), + ) + if y: + yield ( + control.CURSOR_DOWN if y > 0 else control.CURSOR_UP, + abs(y), + ) + + control = cls(*get_codes()) + return control + + @classmethod + def move_to_column(cls, x: int, y: int = 0) -> "Control": + """Move to the given column, optionally add offset to row. + + Returns: + x (int): absolute x (column) + y (int): optional y offset (row) + + Returns: + ~Control: Control object. + """ + + return ( + cls( + (ControlType.CURSOR_MOVE_TO_COLUMN, x), + ( + ControlType.CURSOR_DOWN if y > 0 else ControlType.CURSOR_UP, + abs(y), + ), + ) + if y + else cls((ControlType.CURSOR_MOVE_TO_COLUMN, x)) + ) + + @classmethod + def move_to(cls, x: int, y: int) -> "Control": + """Move cursor to absolute position. + + Args: + x (int): x offset (column) + y (int): y offset (row) + + Returns: + ~Control: Control object. + """ + return cls((ControlType.CURSOR_MOVE_TO, x, y)) + + @classmethod + def clear(cls) -> "Control": + """Clear the screen.""" + return cls(ControlType.CLEAR) + + @classmethod + def show_cursor(cls, show: bool) -> "Control": + """Show or hide the cursor.""" + return cls(ControlType.SHOW_CURSOR if show else ControlType.HIDE_CURSOR) + + @classmethod + def alt_screen(cls, enable: bool) -> "Control": + """Enable or disable alt screen.""" + if enable: + return cls(ControlType.ENABLE_ALT_SCREEN, ControlType.HOME) + else: + return cls(ControlType.DISABLE_ALT_SCREEN) + + @classmethod + def title(cls, title: str) -> "Control": + """Set the terminal window title + + Args: + title (str): The new terminal window title + """ + return cls((ControlType.SET_WINDOW_TITLE, title)) + + def __str__(self) -> str: + return self.segment.text + + def __rich_console__( + self, console: "Console", options: "ConsoleOptions" + ) -> "RenderResult": + if self.segment.text: + yield self.segment + + +def strip_control_codes( + text: str, _translate_table: Dict[int, None] = _CONTROL_STRIP_TRANSLATE +) -> str: + """Remove control codes from text. + + Args: + text (str): A string possibly contain control codes. + + Returns: + str: String with control codes removed. + """ + return text.translate(_translate_table) + + +def escape_control_codes( + text: str, + _translate_table: Dict[int, str] = CONTROL_ESCAPE, +) -> str: + """Replace control codes with their "escaped" equivalent in the given text. + (e.g. "\b" becomes "\\b") + + Args: + text (str): A string possibly containing control codes. + + Returns: + str: String with control codes replaced with their escaped version. + """ + return text.translate(_translate_table) + + +if __name__ == "__main__": # pragma: no cover + from pip._vendor.rich.console import Console + + console = Console() + console.print("Look at the title of your terminal window ^") + # console.print(Control((ControlType.SET_WINDOW_TITLE, "Hello, world!"))) + for i in range(10): + console.set_window_title("🚀 Loading" + "." * i) + time.sleep(0.5) diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/default_styles.py b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/default_styles.py new file mode 100644 index 0000000..61797bf --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/default_styles.py @@ -0,0 +1,193 @@ +from typing import Dict + +from .style import Style + +DEFAULT_STYLES: Dict[str, Style] = { + "none": Style.null(), + "reset": Style( + color="default", + bgcolor="default", + dim=False, + bold=False, + italic=False, + underline=False, + blink=False, + blink2=False, + reverse=False, + conceal=False, + strike=False, + ), + "dim": Style(dim=True), + "bright": Style(dim=False), + "bold": Style(bold=True), + "strong": Style(bold=True), + "code": Style(reverse=True, bold=True), + "italic": Style(italic=True), + "emphasize": Style(italic=True), + "underline": Style(underline=True), + "blink": Style(blink=True), + "blink2": Style(blink2=True), + "reverse": Style(reverse=True), + "strike": Style(strike=True), + "black": Style(color="black"), + "red": Style(color="red"), + "green": Style(color="green"), + "yellow": Style(color="yellow"), + "magenta": Style(color="magenta"), + "cyan": Style(color="cyan"), + "white": Style(color="white"), + "inspect.attr": Style(color="yellow", italic=True), + "inspect.attr.dunder": Style(color="yellow", italic=True, dim=True), + "inspect.callable": Style(bold=True, color="red"), + "inspect.async_def": Style(italic=True, color="bright_cyan"), + "inspect.def": Style(italic=True, color="bright_cyan"), + "inspect.class": Style(italic=True, color="bright_cyan"), + "inspect.error": Style(bold=True, color="red"), + "inspect.equals": Style(), + "inspect.help": Style(color="cyan"), + "inspect.doc": Style(dim=True), + "inspect.value.border": Style(color="green"), + "live.ellipsis": Style(bold=True, color="red"), + "layout.tree.row": Style(dim=False, color="red"), + "layout.tree.column": Style(dim=False, color="blue"), + "logging.keyword": Style(bold=True, color="yellow"), + "logging.level.notset": Style(dim=True), + "logging.level.debug": Style(color="green"), + "logging.level.info": Style(color="blue"), + "logging.level.warning": Style(color="yellow"), + "logging.level.error": Style(color="red", bold=True), + "logging.level.critical": Style(color="red", bold=True, reverse=True), + "log.level": Style.null(), + "log.time": Style(color="cyan", dim=True), + "log.message": Style.null(), + "log.path": Style(dim=True), + "repr.ellipsis": Style(color="yellow"), + "repr.indent": Style(color="green", dim=True), + "repr.error": Style(color="red", bold=True), + "repr.str": Style(color="green", italic=False, bold=False), + "repr.brace": Style(bold=True), + "repr.comma": Style(bold=True), + "repr.ipv4": Style(bold=True, color="bright_green"), + "repr.ipv6": Style(bold=True, color="bright_green"), + "repr.eui48": Style(bold=True, color="bright_green"), + "repr.eui64": Style(bold=True, color="bright_green"), + "repr.tag_start": Style(bold=True), + "repr.tag_name": Style(color="bright_magenta", bold=True), + "repr.tag_contents": Style(color="default"), + "repr.tag_end": Style(bold=True), + "repr.attrib_name": Style(color="yellow", italic=False), + "repr.attrib_equal": Style(bold=True), + "repr.attrib_value": Style(color="magenta", italic=False), + "repr.number": Style(color="cyan", bold=True, italic=False), + "repr.number_complex": Style(color="cyan", bold=True, italic=False), # same + "repr.bool_true": Style(color="bright_green", italic=True), + "repr.bool_false": Style(color="bright_red", italic=True), + "repr.none": Style(color="magenta", italic=True), + "repr.url": Style(underline=True, color="bright_blue", italic=False, bold=False), + "repr.uuid": Style(color="bright_yellow", bold=False), + "repr.call": Style(color="magenta", bold=True), + "repr.path": Style(color="magenta"), + "repr.filename": Style(color="bright_magenta"), + "rule.line": Style(color="bright_green"), + "rule.text": Style.null(), + "json.brace": Style(bold=True), + "json.bool_true": Style(color="bright_green", italic=True), + "json.bool_false": Style(color="bright_red", italic=True), + "json.null": Style(color="magenta", italic=True), + "json.number": Style(color="cyan", bold=True, italic=False), + "json.str": Style(color="green", italic=False, bold=False), + "json.key": Style(color="blue", bold=True), + "prompt": Style.null(), + "prompt.choices": Style(color="magenta", bold=True), + "prompt.default": Style(color="cyan", bold=True), + "prompt.invalid": Style(color="red"), + "prompt.invalid.choice": Style(color="red"), + "pretty": Style.null(), + "scope.border": Style(color="blue"), + "scope.key": Style(color="yellow", italic=True), + "scope.key.special": Style(color="yellow", italic=True, dim=True), + "scope.equals": Style(color="red"), + "table.header": Style(bold=True), + "table.footer": Style(bold=True), + "table.cell": Style.null(), + "table.title": Style(italic=True), + "table.caption": Style(italic=True, dim=True), + "traceback.error": Style(color="red", italic=True), + "traceback.border.syntax_error": Style(color="bright_red"), + "traceback.border": Style(color="red"), + "traceback.text": Style.null(), + "traceback.title": Style(color="red", bold=True), + "traceback.exc_type": Style(color="bright_red", bold=True), + "traceback.exc_value": Style.null(), + "traceback.offset": Style(color="bright_red", bold=True), + "traceback.error_range": Style(underline=True, bold=True), + "traceback.note": Style(color="green", bold=True), + "traceback.group.border": Style(color="magenta"), + "bar.back": Style(color="grey23"), + "bar.complete": Style(color="rgb(249,38,114)"), + "bar.finished": Style(color="rgb(114,156,31)"), + "bar.pulse": Style(color="rgb(249,38,114)"), + "progress.description": Style.null(), + "progress.filesize": Style(color="green"), + "progress.filesize.total": Style(color="green"), + "progress.download": Style(color="green"), + "progress.elapsed": Style(color="yellow"), + "progress.percentage": Style(color="magenta"), + "progress.remaining": Style(color="cyan"), + "progress.data.speed": Style(color="red"), + "progress.spinner": Style(color="green"), + "status.spinner": Style(color="green"), + "tree": Style(), + "tree.line": Style(), + "markdown.paragraph": Style(), + "markdown.text": Style(), + "markdown.em": Style(italic=True), + "markdown.emph": Style(italic=True), # For commonmark backwards compatibility + "markdown.strong": Style(bold=True), + "markdown.code": Style(bold=True, color="cyan", bgcolor="black"), + "markdown.code_block": Style(color="cyan", bgcolor="black"), + "markdown.block_quote": Style(color="magenta"), + "markdown.list": Style(color="cyan"), + "markdown.item": Style(), + "markdown.item.bullet": Style(color="yellow", bold=True), + "markdown.item.number": Style(color="yellow", bold=True), + "markdown.hr": Style(color="yellow"), + "markdown.h1.border": Style(), + "markdown.h1": Style(bold=True), + "markdown.h2": Style(bold=True, underline=True), + "markdown.h3": Style(bold=True), + "markdown.h4": Style(bold=True, dim=True), + "markdown.h5": Style(underline=True), + "markdown.h6": Style(italic=True), + "markdown.h7": Style(italic=True, dim=True), + "markdown.link": Style(color="bright_blue"), + "markdown.link_url": Style(color="blue", underline=True), + "markdown.s": Style(strike=True), + "iso8601.date": Style(color="blue"), + "iso8601.time": Style(color="magenta"), + "iso8601.timezone": Style(color="yellow"), +} + + +if __name__ == "__main__": # pragma: no cover + import argparse + import io + + from pip._vendor.rich.console import Console + from pip._vendor.rich.table import Table + from pip._vendor.rich.text import Text + + parser = argparse.ArgumentParser() + parser.add_argument("--html", action="store_true", help="Export as HTML table") + args = parser.parse_args() + html: bool = args.html + console = Console(record=True, width=70, file=io.StringIO()) if html else Console() + + table = Table("Name", "Styling") + + for style_name, style in DEFAULT_STYLES.items(): + table.add_row(Text(style_name, style=style), str(style)) + + console.print(table) + if html: + print(console.export_html(inline_styles=True)) diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/diagnose.py b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/diagnose.py new file mode 100644 index 0000000..92893b3 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/diagnose.py @@ -0,0 +1,39 @@ +import os +import platform + +from pip._vendor.rich import inspect +from pip._vendor.rich.console import Console, get_windows_console_features +from pip._vendor.rich.panel import Panel +from pip._vendor.rich.pretty import Pretty + + +def report() -> None: # pragma: no cover + """Print a report to the terminal with debugging information""" + console = Console() + inspect(console) + features = get_windows_console_features() + inspect(features) + + env_names = ( + "CLICOLOR", + "COLORTERM", + "COLUMNS", + "JPY_PARENT_PID", + "JUPYTER_COLUMNS", + "JUPYTER_LINES", + "LINES", + "NO_COLOR", + "TERM_PROGRAM", + "TERM", + "TTY_COMPATIBLE", + "TTY_INTERACTIVE", + "VSCODE_VERBOSE_LOGGING", + ) + env = {name: os.getenv(name) for name in env_names} + console.print(Panel.fit((Pretty(env)), title="[b]Environment Variables")) + + console.print(f'platform="{platform.system()}"') + + +if __name__ == "__main__": # pragma: no cover + report() diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/emoji.py b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/emoji.py new file mode 100644 index 0000000..4a667ed --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/emoji.py @@ -0,0 +1,91 @@ +import sys +from typing import TYPE_CHECKING, Optional, Union, Literal + +from .jupyter import JupyterMixin +from .segment import Segment +from .style import Style +from ._emoji_codes import EMOJI +from ._emoji_replace import _emoji_replace + + +if TYPE_CHECKING: + from .console import Console, ConsoleOptions, RenderResult + + +EmojiVariant = Literal["emoji", "text"] + + +class NoEmoji(Exception): + """No emoji by that name.""" + + +class Emoji(JupyterMixin): + __slots__ = ["name", "style", "_char", "variant"] + + VARIANTS = {"text": "\uFE0E", "emoji": "\uFE0F"} + + def __init__( + self, + name: str, + style: Union[str, Style] = "none", + variant: Optional[EmojiVariant] = None, + ) -> None: + """A single emoji character. + + Args: + name (str): Name of emoji. + style (Union[str, Style], optional): Optional style. Defaults to None. + + Raises: + NoEmoji: If the emoji doesn't exist. + """ + self.name = name + self.style = style + self.variant = variant + try: + self._char = EMOJI[name] + except KeyError: + raise NoEmoji(f"No emoji called {name!r}") + if variant is not None: + self._char += self.VARIANTS.get(variant, "") + + @classmethod + def replace(cls, text: str) -> str: + """Replace emoji markup with corresponding unicode characters. + + Args: + text (str): A string with emojis codes, e.g. "Hello :smiley:!" + + Returns: + str: A string with emoji codes replaces with actual emoji. + """ + return _emoji_replace(text) + + def __repr__(self) -> str: + return f"" + + def __str__(self) -> str: + return self._char + + def __rich_console__( + self, console: "Console", options: "ConsoleOptions" + ) -> "RenderResult": + yield Segment(self._char, console.get_style(self.style)) + + +if __name__ == "__main__": # pragma: no cover + import sys + + from pip._vendor.rich.columns import Columns + from pip._vendor.rich.console import Console + + console = Console(record=True) + + columns = Columns( + (f":{name}: {name}" for name in sorted(EMOJI.keys()) if "\u200D" not in name), + column_first=True, + ) + + console.print(columns) + if len(sys.argv) > 1: + console.save_html(sys.argv[1]) diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/errors.py b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/errors.py new file mode 100644 index 0000000..0bcbe53 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/errors.py @@ -0,0 +1,34 @@ +class ConsoleError(Exception): + """An error in console operation.""" + + +class StyleError(Exception): + """An error in styles.""" + + +class StyleSyntaxError(ConsoleError): + """Style was badly formatted.""" + + +class MissingStyle(StyleError): + """No such style.""" + + +class StyleStackError(ConsoleError): + """Style stack is invalid.""" + + +class NotRenderableError(ConsoleError): + """Object is not renderable.""" + + +class MarkupError(ConsoleError): + """Markup was badly formatted.""" + + +class LiveError(ConsoleError): + """Error related to Live display.""" + + +class NoAltScreen(ConsoleError): + """Alt screen mode was required.""" diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/file_proxy.py b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/file_proxy.py new file mode 100644 index 0000000..4b0b0da --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/file_proxy.py @@ -0,0 +1,57 @@ +import io +from typing import IO, TYPE_CHECKING, Any, List + +from .ansi import AnsiDecoder +from .text import Text + +if TYPE_CHECKING: + from .console import Console + + +class FileProxy(io.TextIOBase): + """Wraps a file (e.g. sys.stdout) and redirects writes to a console.""" + + def __init__(self, console: "Console", file: IO[str]) -> None: + self.__console = console + self.__file = file + self.__buffer: List[str] = [] + self.__ansi_decoder = AnsiDecoder() + + @property + def rich_proxied_file(self) -> IO[str]: + """Get proxied file.""" + return self.__file + + def __getattr__(self, name: str) -> Any: + return getattr(self.__file, name) + + def write(self, text: str) -> int: + if not isinstance(text, str): + raise TypeError(f"write() argument must be str, not {type(text).__name__}") + buffer = self.__buffer + lines: List[str] = [] + while text: + line, new_line, text = text.partition("\n") + if new_line: + lines.append("".join(buffer) + line) + buffer.clear() + else: + buffer.append(line) + break + if lines: + console = self.__console + with console: + output = Text("\n").join( + self.__ansi_decoder.decode_line(line) for line in lines + ) + console.print(output) + return len(text) + + def flush(self) -> None: + output = "".join(self.__buffer) + if output: + self.__console.print(output) + del self.__buffer[:] + + def fileno(self) -> int: + return self.__file.fileno() diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/filesize.py b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/filesize.py new file mode 100644 index 0000000..83bc911 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/filesize.py @@ -0,0 +1,88 @@ +"""Functions for reporting filesizes. Borrowed from https://github.com/PyFilesystem/pyfilesystem2 + +The functions declared in this module should cover the different +use cases needed to generate a string representation of a file size +using several different units. Since there are many standards regarding +file size units, three different functions have been implemented. + +See Also: + * `Wikipedia: Binary prefix `_ + +""" + +__all__ = ["decimal"] + +from typing import Iterable, List, Optional, Tuple + + +def _to_str( + size: int, + suffixes: Iterable[str], + base: int, + *, + precision: Optional[int] = 1, + separator: Optional[str] = " ", +) -> str: + if size == 1: + return "1 byte" + elif size < base: + return f"{size:,} bytes" + + for i, suffix in enumerate(suffixes, 2): # noqa: B007 + unit = base**i + if size < unit: + break + return "{:,.{precision}f}{separator}{}".format( + (base * size / unit), + suffix, + precision=precision, + separator=separator, + ) + + +def pick_unit_and_suffix(size: int, suffixes: List[str], base: int) -> Tuple[int, str]: + """Pick a suffix and base for the given size.""" + for i, suffix in enumerate(suffixes): + unit = base**i + if size < unit * base: + break + return unit, suffix + + +def decimal( + size: int, + *, + precision: Optional[int] = 1, + separator: Optional[str] = " ", +) -> str: + """Convert a filesize in to a string (powers of 1000, SI prefixes). + + In this convention, ``1000 B = 1 kB``. + + This is typically the format used to advertise the storage + capacity of USB flash drives and the like (*256 MB* meaning + actually a storage capacity of more than *256 000 000 B*), + or used by **Mac OS X** since v10.6 to report file sizes. + + Arguments: + int (size): A file size. + int (precision): The number of decimal places to include (default = 1). + str (separator): The string to separate the value from the units (default = " "). + + Returns: + `str`: A string containing a abbreviated file size and units. + + Example: + >>> filesize.decimal(30000) + '30.0 kB' + >>> filesize.decimal(30000, precision=2, separator="") + '30.00kB' + + """ + return _to_str( + size, + ("kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"), + 1000, + precision=precision, + separator=separator, + ) diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/rich/highlighter.py b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/highlighter.py new file mode 100644 index 0000000..e4c462e --- /dev/null +++ b/.venv/lib/python3.12/site-packages/pip/_vendor/rich/highlighter.py @@ -0,0 +1,232 @@ +import re +from abc import ABC, abstractmethod +from typing import List, Union + +from .text import Span, Text + + +def _combine_regex(*regexes: str) -> str: + """Combine a number of regexes in to a single regex. + + Returns: + str: New regex with all regexes ORed together. + """ + return "|".join(regexes) + + +class Highlighter(ABC): + """Abstract base class for highlighters.""" + + def __call__(self, text: Union[str, Text]) -> Text: + """Highlight a str or Text instance. + + Args: + text (Union[str, ~Text]): Text to highlight. + + Raises: + TypeError: If not called with text or str. + + Returns: + Text: A test instance with highlighting applied. + """ + if isinstance(text, str): + highlight_text = Text(text) + elif isinstance(text, Text): + highlight_text = text.copy() + else: + raise TypeError(f"str or Text instance required, not {text!r}") + self.highlight(highlight_text) + return highlight_text + + @abstractmethod + def highlight(self, text: Text) -> None: + """Apply highlighting in place to text. + + Args: + text (~Text): A text object highlight. + """ + + +class NullHighlighter(Highlighter): + """A highlighter object that doesn't highlight. + + May be used to disable highlighting entirely. + + """ + + def highlight(self, text: Text) -> None: + """Nothing to do""" + + +class RegexHighlighter(Highlighter): + """Applies highlighting from a list of regular expressions.""" + + highlights: List[str] = [] + base_style: str = "" + + def highlight(self, text: Text) -> None: + """Highlight :class:`rich.text.Text` using regular expressions. + + Args: + text (~Text): Text to highlighted. + + """ + + highlight_regex = text.highlight_regex + for re_highlight in self.highlights: + highlight_regex(re_highlight, style_prefix=self.base_style) + + +class ReprHighlighter(RegexHighlighter): + """Highlights the text typically produced from ``__repr__`` methods.""" + + base_style = "repr." + highlights = [ + r"(?P<)(?P[-\w.:|]*)(?P[\w\W]*)(?P>)", + r'(?P[\w_]{1,50})=(?P"?[\w_]+"?)?', + r"(?P[][{}()])", + _combine_regex( + r"(?P[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3})", + r"(?P([A-Fa-f0-9]{1,4}::?){1,7}[A-Fa-f0-9]{1,4})", + r"(?P(?:[0-9A-Fa-f]{1,2}-){7}[0-9A-Fa-f]{1,2}|(?:[0-9A-Fa-f]{1,2}:){7}[0-9A-Fa-f]{1,2}|(?:[0-9A-Fa-f]{4}\.){3}[0-9A-Fa-f]{4})", + r"(?P(?:[0-9A-Fa-f]{1,2}-){5}[0-9A-Fa-f]{1,2}|(?:[0-9A-Fa-f]{1,2}:){5}[0-9A-Fa-f]{1,2}|(?:[0-9A-Fa-f]{4}\.){2}[0-9A-Fa-f]{4})", + r"(?P[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12})", + r"(?P[\w.]*?)\(", + r"\b(?PTrue)\b|\b(?PFalse)\b|\b(?PNone)\b", + r"(?P\.\.\.)", + r"(?P(?(?\B(/[-\w._+]+)*\/)(?P[-\w._+]*)?", + r"(?b?'''.*?(?(file|https|http|ws|wss)://[-0-9a-zA-Z$_+!`(),.?/;:&=%#~@]*)", + ), + ] + + +class JSONHighlighter(RegexHighlighter): + """Highlights JSON""" + + # Captures the start and end of JSON strings, handling escaped quotes + JSON_STR = r"(?b?\".*?(?[\{\[\(\)\]\}])", + r"\b(?Ptrue)\b|\b(?Pfalse)\b|\b(?Pnull)\b", + r"(?P(? None: + super().highlight(text) + + # Additional work to handle highlighting JSON keys + plain = text.plain + append = text.spans.append + whitespace = self.JSON_WHITESPACE + for match in re.finditer(self.JSON_STR, plain): + start, end = match.span() + cursor = end + while cursor < len(plain): + char = plain[cursor] + cursor += 1 + if char == ":": + append(Span(start, end, "json.key")) + elif char in whitespace: + continue + break + + +class ISO8601Highlighter(RegexHighlighter): + """Highlights the ISO8601 date time strings. + Regex reference: https://www.oreilly.com/library/view/regular-expressions-cookbook/9781449327453/ch04s07.html + """ + + base_style = "iso8601." + highlights = [ + # + # Dates + # + # Calendar month (e.g. 2008-08). The hyphen is required + r"^(?P[0-9]{4})-(?P1[0-2]|0[1-9])$", + # Calendar date w/o hyphens (e.g. 20080830) + r"^(?P(?P[0-9]{4})(?P1[0-2]|0[1-9])(?P3[01]|0[1-9]|[12][0-9]))$", + # Ordinal date (e.g. 2008-243). The hyphen is optional + r"^(?P(?P[0-9]{4})-?(?P36[0-6]|3[0-5][0-9]|[12][0-9]{2}|0[1-9][0-9]|00[1-9]))$", + # + # Weeks + # + # Week of the year (e.g., 2008-W35). The hyphen is optional + r"^(?P(?P[0-9]{4})-?W(?P5[0-3]|[1-4][0-9]|0[1-9]))$", + # Week date (e.g., 2008-W35-6). The hyphens are optional + r"^(?P(?P[0-9]{4})-?W(?P5[0-3]|[1-4][0-9]|0[1-9])-?(?P[1-7]))$", + # + # Times + # + # Hours and minutes (e.g., 17:21). The colon is optional + r"^(?P

' : '\U0001d4ab', + '\\' : '\U0001d4ac', + '\\' : '\U0000211b', + '\\' : '\U0001d4ae', + '\\' : '\U0001d4af', + '\\' : '\U0001d4b0', + '\\' : '\U0001d4b1', + '\\' : '\U0001d4b2', + '\\' : '\U0001d4b3', + '\\' : '\U0001d4b4', + '\\' : '\U0001d4b5', + '\\' : '\U0001d5ba', + '\\' : '\U0001d5bb', + '\\' : '\U0001d5bc', + '\\' : '\U0001d5bd', + '\\' : '\U0001d5be', + '\\' : '\U0001d5bf', + '\\' : '\U0001d5c0', + '\\' : '\U0001d5c1', + '\\' : '\U0001d5c2', + '\\' : '\U0001d5c3', + '\\' : '\U0001d5c4', + '\\' : '\U0001d5c5', + '\\' : '\U0001d5c6', + '\\' : '\U0001d5c7', + '\\' : '\U0001d5c8', + '\\